aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.b4-config14
-rw-r--r--.editorconfig13
-rw-r--r--.gitattributes5
-rw-r--r--.gitlab-ci.d/base.yml10
-rw-r--r--.gitlab-ci.d/buildtest-template.yml73
-rw-r--r--.gitlab-ci.d/buildtest.yml151
-rwxr-xr-x.gitlab-ci.d/check-dco.py10
-rwxr-xr-x.gitlab-ci.d/check-patch.py5
-rwxr-xr-x.gitlab-ci.d/check-units.py66
-rw-r--r--.gitlab-ci.d/cirrus.yml55
-rw-r--r--.gitlab-ci.d/cirrus/build.yml4
-rw-r--r--.gitlab-ci.d/cirrus/freebsd-13.vars16
-rw-r--r--.gitlab-ci.d/cirrus/freebsd-14.vars16
-rw-r--r--.gitlab-ci.d/cirrus/macos-13.vars16
-rw-r--r--.gitlab-ci.d/cirrus/macos-14.vars2
-rw-r--r--.gitlab-ci.d/container-cross.yml17
-rw-r--r--.gitlab-ci.d/containers.yml6
-rw-r--r--.gitlab-ci.d/crossbuild-template.yml56
-rw-r--r--.gitlab-ci.d/crossbuilds.yml20
-rw-r--r--.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml2
-rw-r--r--.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml2
-rw-r--r--.gitlab-ci.d/static_checks.yml46
-rw-r--r--.gitlab-ci.d/windows.yml29
-rw-r--r--.mailmap9
-rw-r--r--.travis.yml35
-rw-r--r--COPYING5
-rw-r--r--COPYING.LIB5
-rw-r--r--Kconfig1
-rw-r--r--Kconfig.host12
-rw-r--r--MAINTAINERS784
-rw-r--r--Makefile19
-rw-r--r--VERSION2
-rw-r--r--accel/Kconfig1
-rw-r--r--accel/accel-blocker.c3
-rw-r--r--accel/accel-common.c142
-rw-r--r--accel/accel-internal.h17
-rw-r--r--accel/accel-system.c11
-rw-r--r--accel/accel-system.h15
-rw-r--r--accel/accel-target.c137
-rw-r--r--accel/accel-user.c6
-rw-r--r--accel/dummy-cpus.c2
-rw-r--r--accel/hvf/hvf-accel-ops.c33
-rw-r--r--accel/hvf/hvf-all.c12
-rw-r--r--accel/kvm/kvm-accel-ops.c11
-rw-r--r--accel/kvm/kvm-all.c622
-rw-r--r--accel/kvm/kvm-cpus.h3
-rw-r--r--accel/kvm/trace-events14
-rw-r--r--accel/meson.build1
-rw-r--r--accel/qtest/qtest.c9
-rw-r--r--accel/stubs/hvf-stub.c12
-rw-r--r--accel/stubs/kvm-stub.c2
-rw-r--r--accel/stubs/meson.build1
-rw-r--r--accel/stubs/tcg-stub.c21
-rw-r--r--accel/stubs/xen-stub.c2
-rw-r--r--accel/tcg/atomic_common.c.inc13
-rw-r--r--accel/tcg/atomic_template.h82
-rw-r--r--accel/tcg/backend-ldst.h41
-rw-r--r--accel/tcg/cpu-exec-common.c36
-rw-r--r--accel/tcg/cpu-exec.c249
-rw-r--r--accel/tcg/cputlb.c391
-rw-r--r--accel/tcg/icount-common.c15
-rw-r--r--accel/tcg/internal-common.h89
-rw-r--r--accel/tcg/internal-target.h118
-rw-r--r--accel/tcg/ldst_atomicity.c.inc9
-rw-r--r--accel/tcg/ldst_common.c.inc373
-rw-r--r--accel/tcg/meson.build42
-rw-r--r--accel/tcg/monitor.c6
-rw-r--r--accel/tcg/plugin-gen.c32
-rw-r--r--accel/tcg/tb-hash.h5
-rw-r--r--accel/tcg/tb-internal.h55
-rw-r--r--accel/tcg/tb-maint.c109
-rw-r--r--accel/tcg/tcg-accel-ops-icount.c6
-rw-r--r--accel/tcg/tcg-accel-ops-mttcg.c7
-rw-r--r--accel/tcg/tcg-accel-ops-rr.c14
-rw-r--r--accel/tcg/tcg-accel-ops.c16
-rw-r--r--accel/tcg/tcg-accel-ops.h2
-rw-r--r--accel/tcg/tcg-all.c112
-rw-r--r--accel/tcg/tcg-runtime-gvec.c1
-rw-r--r--accel/tcg/tcg-runtime.c8
-rw-r--r--accel/tcg/tlb-bounds.h13
-rw-r--r--accel/tcg/trace-events12
-rw-r--r--accel/tcg/translate-all.c154
-rw-r--r--accel/tcg/translator.c143
-rw-r--r--accel/tcg/user-exec-stub.c11
-rw-r--r--accel/tcg/user-exec.c357
-rw-r--r--accel/tcg/user-retaddr.h28
-rw-r--r--accel/tcg/vcpu-state.h9
-rw-r--r--accel/tcg/watchpoint.c26
-rw-r--r--accel/xen/xen-all.c11
-rw-r--r--audio/alsaaudio.c2
-rw-r--r--audio/audio-hmp-cmds.c2
-rw-r--r--audio/audio.c33
-rw-r--r--audio/audio_int.h1
-rw-r--r--audio/audio_template.h12
-rw-r--r--audio/dbusaudio.c31
-rw-r--r--audio/mixeng.c83
-rw-r--r--audio/mixeng.h6
-rw-r--r--audio/pwaudio.c8
-rw-r--r--audio/trace-events2
-rw-r--r--authz/list.c4
-rw-r--r--authz/listfile.c8
-rw-r--r--authz/pamacct.c4
-rw-r--r--authz/simple.c4
-rw-r--r--backends/Kconfig4
-rw-r--r--backends/confidential-guest-support.c5
-rw-r--r--backends/cryptodev-builtin.c56
-rw-r--r--backends/cryptodev-hmp-cmds.c2
-rw-r--r--backends/cryptodev-lkcf.c43
-rw-r--r--backends/cryptodev-vhost-user.c15
-rw-r--r--backends/cryptodev-vhost.c6
-rw-r--r--backends/cryptodev.c24
-rw-r--r--backends/dbus-vmstate.c4
-rw-r--r--backends/host_iommu_device.c4
-rw-r--r--backends/hostmem-epc.c6
-rw-r--r--backends/hostmem-file.c6
-rw-r--r--backends/hostmem-memfd.c20
-rw-r--r--backends/hostmem-ram.c6
-rw-r--r--backends/hostmem-shm.c55
-rw-r--r--backends/hostmem.c8
-rw-r--r--backends/iommufd.c158
-rw-r--r--backends/meson.build8
-rw-r--r--backends/rng-builtin.c6
-rw-r--r--backends/rng-egd.c4
-rw-r--r--backends/rng-random.c6
-rw-r--r--backends/rng.c6
-rw-r--r--backends/spdm-socket.c216
-rw-r--r--backends/tpm/tpm_backend.c4
-rw-r--r--backends/tpm/tpm_emulator.c86
-rw-r--r--backends/tpm/tpm_int.h2
-rw-r--r--backends/tpm/tpm_ioctl.h13
-rw-r--r--backends/tpm/tpm_passthrough.c6
-rw-r--r--backends/tpm/tpm_util.c36
-rw-r--r--backends/tpm/trace-events2
-rw-r--r--backends/trace-events4
-rw-r--r--backends/vhost-user.c26
-rw-r--r--block.c305
-rw-r--r--block/accounting.c2
-rw-r--r--block/aio_task.c5
-rw-r--r--block/backup.c7
-rw-r--r--block/blkdebug.c14
-rw-r--r--block/blkio.c14
-rw-r--r--block/blklogwrites.c8
-rw-r--r--block/blkreplay.c2
-rw-r--r--block/blkverify.c6
-rw-r--r--block/block-backend.c146
-rw-r--r--block/block-copy.c44
-rw-r--r--block/block-ram-registrar.c4
-rw-r--r--block/commit.c124
-rw-r--r--block/copy-before-write.c27
-rw-r--r--block/copy-before-write.h2
-rw-r--r--block/copy-on-read.c2
-rw-r--r--block/coroutines.h6
-rw-r--r--block/crypto.c12
-rw-r--r--block/curl.c4
-rw-r--r--block/export/export.c33
-rw-r--r--block/export/fuse.c2
-rw-r--r--block/export/vduse-blk.c7
-rw-r--r--block/export/vhost-user-blk-server.c6
-rw-r--r--block/export/virtio-blk-handler.h2
-rw-r--r--block/file-posix.c244
-rw-r--r--block/file-win32.c4
-rw-r--r--block/gluster.c19
-rw-r--r--block/io.c170
-rw-r--r--block/io_uring.c27
-rw-r--r--block/iscsi.c14
-rw-r--r--block/linux-aio.c29
-rw-r--r--block/meson.build6
-rw-r--r--block/mirror.c203
-rw-r--r--block/monitor/block-hmp-cmds.c16
-rw-r--r--block/nbd.c6
-rw-r--r--block/nfs.c6
-rw-r--r--block/null.c12
-rw-r--r--block/nvme.c49
-rw-r--r--block/parallels-ext.c2
-rw-r--r--block/parallels.c18
-rw-r--r--block/qapi-sysemu.c574
-rw-r--r--block/qapi-system.c574
-rw-r--r--block/qapi.c13
-rw-r--r--block/qcow.c10
-rw-r--r--block/qcow2-snapshot.c2
-rw-r--r--block/qcow2.c32
-rw-r--r--block/qed.c11
-rw-r--r--block/quorum.c18
-rw-r--r--block/raw-format.c8
-rw-r--r--block/rbd.c21
-rw-r--r--block/replication.c17
-rw-r--r--block/reqlist.c2
-rw-r--r--block/snapshot-access.c6
-rw-r--r--block/snapshot.c35
-rw-r--r--block/ssh.c20
-rw-r--r--block/stream.c20
-rw-r--r--block/throttle-groups.c10
-rw-r--r--block/vdi.c14
-rw-r--r--block/vhdx.c4
-rw-r--r--block/vmdk.c16
-rw-r--r--block/vpc.c71
-rw-r--r--block/vvfat.c48
-rw-r--r--blockdev-nbd.c87
-rw-r--r--blockdev.c186
-rw-r--r--blockjob.c16
-rw-r--r--bsd-user/aarch64/signal.c137
-rw-r--r--bsd-user/aarch64/target.h20
-rw-r--r--bsd-user/aarch64/target_arch.h29
-rw-r--r--bsd-user/aarch64/target_arch_cpu.c31
-rw-r--r--bsd-user/aarch64/target_arch_cpu.h189
-rw-r--r--bsd-user/aarch64/target_arch_elf.h163
-rw-r--r--bsd-user/aarch64/target_arch_reg.h56
-rw-r--r--bsd-user/aarch64/target_arch_signal.h82
-rw-r--r--bsd-user/aarch64/target_arch_sigtramp.h48
-rw-r--r--bsd-user/aarch64/target_arch_sysarch.h42
-rw-r--r--bsd-user/aarch64/target_arch_thread.h61
-rw-r--r--bsd-user/aarch64/target_arch_vmparam.h74
-rw-r--r--bsd-user/aarch64/target_syscall.h51
-rw-r--r--bsd-user/arm/target_arch_cpu.h2
-rw-r--r--bsd-user/arm/target_arch_signal.h2
-rw-r--r--bsd-user/bsd-mem.h6
-rw-r--r--bsd-user/elfload.c6
-rw-r--r--bsd-user/freebsd/os-proc.c118
-rw-r--r--bsd-user/i386/target_arch_cpu.h2
-rw-r--r--bsd-user/i386/target_arch_signal.h2
-rw-r--r--bsd-user/main.c79
-rw-r--r--bsd-user/meson.build1
-rw-r--r--bsd-user/mmap.c57
-rw-r--r--bsd-user/plugin-api.c15
-rw-r--r--bsd-user/qemu.h22
-rw-r--r--bsd-user/riscv/signal.c170
-rw-r--r--bsd-user/riscv/target.h20
-rw-r--r--bsd-user/riscv/target_arch.h27
-rw-r--r--bsd-user/riscv/target_arch_cpu.c29
-rw-r--r--bsd-user/riscv/target_arch_cpu.h148
-rw-r--r--bsd-user/riscv/target_arch_elf.h42
-rw-r--r--bsd-user/riscv/target_arch_reg.h88
-rw-r--r--bsd-user/riscv/target_arch_signal.h75
-rw-r--r--bsd-user/riscv/target_arch_sigtramp.h41
-rw-r--r--bsd-user/riscv/target_arch_sysarch.h41
-rw-r--r--bsd-user/riscv/target_arch_thread.h47
-rw-r--r--bsd-user/riscv/target_arch_vmparam.h53
-rw-r--r--bsd-user/riscv/target_syscall.h38
-rw-r--r--bsd-user/signal-common.h1
-rw-r--r--bsd-user/signal.c35
-rw-r--r--bsd-user/x86_64/target_arch_cpu.h2
-rw-r--r--bsd-user/x86_64/target_arch_signal.h2
-rw-r--r--bsd-user/x86_64/target_arch_thread.h2
-rw-r--r--chardev/baum.c2
-rw-r--r--chardev/char-console.c2
-rw-r--r--chardev/char-fd.c4
-rw-r--r--chardev/char-fe.c15
-rw-r--r--chardev/char-file.c2
-rw-r--r--chardev/char-hmp-cmds.c2
-rw-r--r--chardev/char-hub.c301
-rw-r--r--chardev/char-mux.c93
-rw-r--r--chardev/char-null.c2
-rw-r--r--chardev/char-parallel.c2
-rw-r--r--chardev/char-pipe.c2
-rw-r--r--chardev/char-pty.c41
-rw-r--r--chardev/char-ringbuf.c2
-rw-r--r--chardev/char-serial.c2
-rw-r--r--chardev/char-socket.c84
-rw-r--r--chardev/char-stdio.c2
-rw-r--r--chardev/char-udp.c2
-rw-r--r--chardev/char-win-stdio.c7
-rw-r--r--chardev/char-win.c2
-rw-r--r--chardev/char.c112
-rw-r--r--chardev/chardev-internal.h67
-rw-r--r--chardev/meson.build1
-rw-r--r--chardev/msmouse.c4
-rw-r--r--chardev/spice.c6
-rw-r--r--chardev/testdev.c2
-rw-r--r--chardev/trace-events10
-rw-r--r--chardev/wctablet.c2
-rw-r--r--clippy.toml3
-rw-r--r--common-user/host/riscv/safe-syscall.inc.S4
-rw-r--r--common-user/plugin-api.c.inc43
-rw-r--r--configs/devices/aarch64-softmmu/default.mak2
-rw-r--r--configs/devices/arm-softmmu/default.mak7
-rw-r--r--configs/devices/cris-softmmu/default.mak4
-rw-r--r--configs/devices/i386-softmmu/default.mak2
-rw-r--r--configs/devices/microblaze-softmmu/default.mak2
-rw-r--r--configs/devices/microblazeel-softmmu/default.mak5
-rw-r--r--configs/devices/sh4-softmmu/default.mak3
-rw-r--r--configs/meson/emscripten.txt8
-rw-r--r--configs/targets/aarch64-bsd-user.mak4
-rw-r--r--configs/targets/aarch64-linux-user.mak3
-rw-r--r--configs/targets/aarch64-softmmu.mak2
-rw-r--r--configs/targets/aarch64_be-linux-user.mak5
-rw-r--r--configs/targets/alpha-linux-user.mak1
-rw-r--r--configs/targets/alpha-softmmu.mak2
-rw-r--r--configs/targets/arm-bsd-user.mak1
-rw-r--r--configs/targets/arm-linux-user.mak1
-rw-r--r--configs/targets/arm-softmmu.mak2
-rw-r--r--configs/targets/armeb-linux-user.mak1
-rw-r--r--configs/targets/avr-softmmu.mak1
-rw-r--r--configs/targets/cris-linux-user.mak1
-rw-r--r--configs/targets/cris-softmmu.mak1
-rw-r--r--configs/targets/hexagon-linux-user.mak3
-rw-r--r--configs/targets/hppa-linux-user.mak2
-rw-r--r--configs/targets/hppa-softmmu.mak2
-rw-r--r--configs/targets/i386-bsd-user.mak1
-rw-r--r--configs/targets/i386-linux-user.mak3
-rw-r--r--configs/targets/i386-softmmu.mak3
-rw-r--r--configs/targets/loongarch64-linux-user.mak3
-rw-r--r--configs/targets/loongarch64-softmmu.mak2
-rw-r--r--configs/targets/m68k-linux-user.mak1
-rw-r--r--configs/targets/m68k-softmmu.mak1
-rw-r--r--configs/targets/microblaze-linux-user.mak1
-rw-r--r--configs/targets/microblaze-softmmu.mak2
-rw-r--r--configs/targets/microblazeel-linux-user.mak1
-rw-r--r--configs/targets/microblazeel-softmmu.mak2
-rw-r--r--configs/targets/mips-linux-user.mak1
-rw-r--r--configs/targets/mips-softmmu.mak2
-rw-r--r--configs/targets/mips64-linux-user.mak1
-rw-r--r--configs/targets/mips64-softmmu.mak1
-rw-r--r--configs/targets/mips64el-linux-user.mak1
-rw-r--r--configs/targets/mips64el-softmmu.mak1
-rw-r--r--configs/targets/mipsel-linux-user.mak1
-rw-r--r--configs/targets/mipsel-softmmu.mak2
-rw-r--r--configs/targets/mipsn32-linux-user.mak1
-rw-r--r--configs/targets/mipsn32el-linux-user.mak1
-rw-r--r--configs/targets/or1k-linux-user.mak3
-rw-r--r--configs/targets/or1k-softmmu.mak2
-rw-r--r--configs/targets/ppc-linux-user.mak1
-rw-r--r--configs/targets/ppc-softmmu.mak1
-rw-r--r--configs/targets/ppc64-linux-user.mak1
-rw-r--r--configs/targets/ppc64-softmmu.mak2
-rw-r--r--configs/targets/ppc64le-linux-user.mak1
-rw-r--r--configs/targets/riscv32-linux-user.mak4
-rw-r--r--configs/targets/riscv32-softmmu.mak2
-rw-r--r--configs/targets/riscv64-bsd-user.mak5
-rw-r--r--configs/targets/riscv64-linux-user.mak4
-rw-r--r--configs/targets/riscv64-softmmu.mak4
-rw-r--r--configs/targets/rx-softmmu.mak1
-rw-r--r--configs/targets/s390x-linux-user.mak1
-rw-r--r--configs/targets/s390x-softmmu.mak2
-rw-r--r--configs/targets/sh4-linux-user.mak1
-rw-r--r--configs/targets/sh4-softmmu.mak1
-rw-r--r--configs/targets/sh4eb-linux-user.mak1
-rw-r--r--configs/targets/sh4eb-softmmu.mak1
-rw-r--r--configs/targets/sparc-linux-user.mak1
-rw-r--r--configs/targets/sparc-softmmu.mak2
-rw-r--r--configs/targets/sparc32plus-linux-user.mak1
-rw-r--r--configs/targets/sparc64-linux-user.mak1
-rw-r--r--configs/targets/sparc64-softmmu.mak2
-rw-r--r--configs/targets/tricore-softmmu.mak1
-rw-r--r--configs/targets/x86_64-bsd-user.mak1
-rw-r--r--configs/targets/x86_64-linux-user.mak3
-rw-r--r--configs/targets/x86_64-softmmu.mak3
-rw-r--r--configs/targets/xtensa-linux-user.mak1
-rw-r--r--configs/targets/xtensa-softmmu.mak2
-rw-r--r--configs/targets/xtensaeb-linux-user.mak1
-rw-r--r--configs/targets/xtensaeb-softmmu.mak2
-rwxr-xr-xconfigure269
-rw-r--r--contrib/elf2dmp/pdb.c4
-rw-r--r--contrib/plugins/Makefile69
-rw-r--r--contrib/plugins/bbv.c158
-rw-r--r--contrib/plugins/cache.c34
-rw-r--r--contrib/plugins/cflow.c393
-rw-r--r--contrib/plugins/execlog.c6
-rw-r--r--contrib/plugins/hotblocks.c33
-rw-r--r--contrib/plugins/hotpages.c10
-rw-r--r--contrib/plugins/howvec.c11
-rw-r--r--contrib/plugins/hwprofile.c35
-rw-r--r--contrib/plugins/ips.c55
-rw-r--r--contrib/plugins/lockstep.c25
-rw-r--r--contrib/plugins/meson.build30
-rw-r--r--contrib/plugins/stoptrigger.c157
-rw-r--r--contrib/systemd/qemu-vmsr-helper.service15
-rw-r--r--contrib/systemd/qemu-vmsr-helper.socket9
-rw-r--r--contrib/vhost-user-blk/vhost-user-blk.c2
-rwxr-xr-xcontrib/vmapple/uuid.sh12
-rw-r--r--cpu-common.c21
-rw-r--r--cpu-target.c401
-rw-r--r--crypto/afalg.c8
-rw-r--r--crypto/afalgpriv.h14
-rw-r--r--crypto/afsplit.c6
-rw-r--r--crypto/akcipher-gcrypt.c.inc46
-rw-r--r--crypto/akcipher-nettle.c.inc58
-rw-r--r--crypto/akcipher.c2
-rw-r--r--crypto/akcipherpriv.h2
-rw-r--r--crypto/block-luks.c131
-rw-r--r--crypto/block-qcow.c6
-rw-r--r--crypto/block.c8
-rw-r--r--crypto/blockpriv.h6
-rw-r--r--crypto/cipher-afalg.c36
-rw-r--r--crypto/cipher-builtin.c.inc303
-rw-r--r--crypto/cipher-gcrypt.c.inc56
-rw-r--r--crypto/cipher-gnutls.c.inc38
-rw-r--r--crypto/cipher-nettle.c.inc83
-rw-r--r--crypto/cipher-stub.c.inc30
-rw-r--r--crypto/cipher.c74
-rw-r--r--crypto/cipherpriv.h2
-rw-r--r--crypto/der.c13
-rw-r--r--crypto/der.h22
-rw-r--r--crypto/hash-afalg.c201
-rw-r--r--crypto/hash-gcrypt.c142
-rw-r--r--crypto/hash-glib.c113
-rw-r--r--crypto/hash-gnutls.c125
-rw-r--r--crypto/hash-nettle.c124
-rw-r--r--crypto/hash.c194
-rw-r--r--crypto/hashpriv.h13
-rw-r--r--crypto/hmac-gcrypt.c27
-rw-r--r--crypto/hmac-glib.c22
-rw-r--r--crypto/hmac-gnutls.c22
-rw-r--r--crypto/hmac-nettle.c33
-rw-r--r--crypto/hmac.c2
-rw-r--r--crypto/hmacpriv.h4
-rw-r--r--crypto/init.c15
-rw-r--r--crypto/ivgen.c18
-rw-r--r--crypto/ivgenpriv.h6
-rw-r--r--crypto/meson.build4
-rw-r--r--crypto/pbkdf-gcrypt.c44
-rw-r--r--crypto/pbkdf-gnutls.c38
-rw-r--r--crypto/pbkdf-nettle.c45
-rw-r--r--crypto/pbkdf-stub.c4
-rw-r--r--crypto/pbkdf.c64
-rw-r--r--crypto/rsakey-builtin.c.inc4
-rw-r--r--crypto/rsakey-nettle.c.inc4
-rw-r--r--crypto/secret.c2
-rw-r--r--crypto/secret_common.c18
-rw-r--r--crypto/secret_keyring.c2
-rw-r--r--crypto/tls-cipher-suites.c5
-rw-r--r--crypto/tlscreds.c2
-rw-r--r--crypto/tlscredsanon.c39
-rw-r--r--crypto/tlscredspsk.c39
-rw-r--r--crypto/tlscredsx509.c34
-rw-r--r--crypto/tlssession.c186
-rw-r--r--crypto/x509-utils.c76
-rw-r--r--disas/cris.c2863
-rw-r--r--disas/disas-common.c13
-rw-r--r--disas/disas-mon.c2
-rw-r--r--disas/hppa.c23
-rw-r--r--disas/meson.build1
-rw-r--r--disas/riscv.c116
-rw-r--r--disas/riscv.h6
-rw-r--r--docs/COLO-FT.txt4
-rw-r--r--docs/about/build-platforms.rst37
-rw-r--r--docs/about/deprecated.rst340
-rw-r--r--docs/about/emulation.rst658
-rw-r--r--docs/about/removed-features.rst180
-rw-r--r--docs/conf.py71
-rw-r--r--docs/devel/acpi-bits.rst167
-rw-r--r--docs/devel/atomics.rst6
-rw-r--r--docs/devel/blkdebug.txt162
-rw-r--r--docs/devel/blkverify.txt69
-rw-r--r--docs/devel/build-environment.rst118
-rw-r--r--docs/devel/build-system.rst27
-rw-r--r--docs/devel/ci-definitions.rst.inc121
-rw-r--r--docs/devel/ci-jobs.rst.inc190
-rw-r--r--docs/devel/ci.rst14
-rw-r--r--docs/devel/clocks.rst6
-rw-r--r--docs/devel/code-provenance.rst338
-rw-r--r--docs/devel/codebase.rst215
-rw-r--r--docs/devel/control-flow-integrity.rst2
-rw-r--r--docs/devel/crypto.rst10
-rw-r--r--docs/devel/decodetree.rst2
-rw-r--r--docs/devel/ebpf_rss.rst2
-rw-r--r--docs/devel/fuzzing.rst304
-rw-r--r--docs/devel/index-api.rst1
-rw-r--r--docs/devel/index-build.rst16
-rw-r--r--docs/devel/index-internals.rst6
-rw-r--r--docs/devel/index-process.rst2
-rw-r--r--docs/devel/index.rst2
-rw-r--r--docs/devel/kconfig.rst16
-rw-r--r--docs/devel/loads-stores.rst2
-rw-r--r--docs/devel/lockcnt.rst278
-rw-r--r--docs/devel/lockcnt.txt277
-rw-r--r--docs/devel/luks-detached-header.rst182
-rw-r--r--docs/devel/maintainers.rst4
-rw-r--r--docs/devel/memory.rst2
-rw-r--r--docs/devel/migration/CPR.rst184
-rw-r--r--docs/devel/migration/compatibility.rst5
-rw-r--r--docs/devel/migration/features.rst1
-rw-r--r--docs/devel/migration/main.rst8
-rw-r--r--docs/devel/migration/mapped-ram.rst4
-rw-r--r--docs/devel/migration/qatzip-compression.rst165
-rw-r--r--docs/devel/migration/uadk-compression.rst4
-rw-r--r--docs/devel/migration/vfio.rst45
-rw-r--r--docs/devel/multi-thread-tcg.rst9
-rw-r--r--docs/devel/multiple-iothreads.rst139
-rw-r--r--docs/devel/multiple-iothreads.txt130
-rw-r--r--docs/devel/nested-papr.txt119
-rw-r--r--docs/devel/qapi-code-gen.rst61
-rw-r--r--docs/devel/qapi-domain.rst716
-rw-r--r--docs/devel/qom.rst8
-rw-r--r--docs/devel/qtest.rst91
-rw-r--r--docs/devel/rcu.rst394
-rw-r--r--docs/devel/rcu.txt406
-rw-r--r--docs/devel/replay.rst3
-rw-r--r--docs/devel/reset.rst31
-rw-r--r--docs/devel/rust.rst478
-rw-r--r--docs/devel/style.rst20
-rw-r--r--docs/devel/submitting-a-patch.rst66
-rw-r--r--docs/devel/tcg-ops.rst247
-rw-r--r--docs/devel/tcg-plugins.rst496
-rw-r--r--docs/devel/testing.rst1529
-rw-r--r--docs/devel/testing/acpi-bits.rst155
-rw-r--r--docs/devel/testing/blkdebug.rst177
-rw-r--r--docs/devel/testing/blkverify.rst73
-rw-r--r--docs/devel/testing/ci-jobs.rst.inc189
-rw-r--r--docs/devel/testing/ci-runners.rst.inc (renamed from docs/devel/ci-runners.rst.inc)0
-rw-r--r--docs/devel/testing/ci.rst34
-rw-r--r--docs/devel/testing/functional.rst380
-rw-r--r--docs/devel/testing/fuzzing.rst305
-rw-r--r--docs/devel/testing/index.rst17
-rw-r--r--docs/devel/testing/main.rst1059
-rw-r--r--docs/devel/testing/qgraph.rst (renamed from docs/devel/qgraph.rst)0
-rw-r--r--docs/devel/testing/qtest.rst93
-rw-r--r--docs/devel/uefi-vars.rst68
-rw-r--r--docs/devel/virtio-backends.rst5
-rw-r--r--docs/glossary.rst280
-rw-r--r--docs/igd-assign.txt272
-rw-r--r--docs/index.rst3
-rw-r--r--docs/interop/bitmaps.rst2
-rw-r--r--docs/interop/firmware.json59
-rw-r--r--docs/interop/index.rst6
-rw-r--r--docs/interop/live-block-operations.rst4
-rw-r--r--docs/interop/nbd.rst89
-rw-r--r--docs/interop/nbd.txt72
-rw-r--r--docs/interop/parallels.rst240
-rw-r--r--docs/interop/parallels.txt232
-rw-r--r--docs/interop/prl-xml.rst192
-rw-r--r--docs/interop/prl-xml.txt158
-rw-r--r--docs/interop/qcow2.rst937
-rw-r--r--docs/interop/qcow2.txt906
-rw-r--r--docs/interop/qed_spec.rst219
-rw-r--r--docs/interop/qed_spec.txt138
-rw-r--r--docs/interop/qemu-ga-ref.rst5
-rw-r--r--docs/interop/qemu-ga.rst22
-rw-r--r--docs/interop/qemu-qmp-ref.rst4
-rw-r--r--docs/interop/qemu-storage-daemon-qmp-ref.rst4
-rw-r--r--docs/interop/vfio-user.rst1520
-rw-r--r--docs/interop/vhost-user.rst24
-rw-r--r--docs/meson.build6
-rw-r--r--docs/qcow2-cache.txt2
-rw-r--r--docs/specs/acpi_hest_ghes.rst6
-rw-r--r--docs/specs/acpi_hw_reduced_hotplug.rst3
-rw-r--r--docs/specs/aspeed-intc.rst136
-rw-r--r--docs/specs/fw_cfg.rst4
-rw-r--r--docs/specs/index.rst6
-rw-r--r--docs/specs/pci-ids.rst10
-rw-r--r--docs/specs/rapl-msr.rst154
-rw-r--r--docs/specs/riscv-aia.rst83
-rw-r--r--docs/specs/riscv-iommu.rst116
-rw-r--r--docs/specs/rocker.rst1015
-rw-r--r--docs/specs/rocker.txt1014
-rw-r--r--docs/specs/spdm.rst134
-rw-r--r--docs/specs/tpm.rst8
-rw-r--r--docs/sphinx-static/theme_overrides.css98
-rw-r--r--docs/sphinx/compat.py230
-rw-r--r--docs/sphinx/depfile.py5
-rw-r--r--docs/sphinx/qapi_domain.py1055
-rw-r--r--docs/sphinx/qapidoc.py936
-rw-r--r--docs/sphinx/qapidoc_legacy.py440
-rw-r--r--docs/sphinx/qmp_lexer.py2
-rw-r--r--docs/system/arm/aspeed.rst313
-rw-r--r--docs/system/arm/bananapi_m2u.rst11
-rw-r--r--docs/system/arm/cpu-features.rst7
-rw-r--r--docs/system/arm/cubieboard.rst1
-rw-r--r--docs/system/arm/emulation.rst16
-rw-r--r--docs/system/arm/exynos.rst9
-rw-r--r--docs/system/arm/fby35.rst52
-rw-r--r--docs/system/arm/gumstix.rst21
-rw-r--r--docs/system/arm/imx8mp-evk.rst62
-rw-r--r--docs/system/arm/mainstone.rst25
-rw-r--r--docs/system/arm/mcimx6ul-evk.rst5
-rw-r--r--docs/system/arm/mcimx7d-sabre.rst5
-rw-r--r--docs/system/arm/nseries.rst33
-rw-r--r--docs/system/arm/nuvoton.rst27
-rw-r--r--docs/system/arm/orangepi.rst16
-rw-r--r--docs/system/arm/palm.rst23
-rw-r--r--docs/system/arm/stm32.rst7
-rw-r--r--docs/system/arm/virt.rst30
-rw-r--r--docs/system/arm/vmapple.rst65
-rw-r--r--docs/system/arm/xlnx-versal-virt.rst3
-rw-r--r--docs/system/arm/xlnx-zcu102.rst19
-rw-r--r--docs/system/arm/xscale.rst35
-rw-r--r--docs/system/bootindex.rst7
-rw-r--r--docs/system/confidential-guest-support.rst1
-rw-r--r--docs/system/cpu-hotplug.rst54
-rw-r--r--docs/system/cpu-models-x86.rst.inc50
-rw-r--r--docs/system/device-emulation.rst2
-rw-r--r--docs/system/devices/cxl.rst18
-rw-r--r--docs/system/devices/igb.rst5
-rw-r--r--docs/system/devices/ivshmem-flat.rst33
-rw-r--r--docs/system/devices/net.rst100
-rw-r--r--docs/system/devices/nvme.rst7
-rw-r--r--docs/system/devices/vfio-user.rst26
-rw-r--r--docs/system/devices/virtio-gpu.rst11
-rw-r--r--docs/system/gdb.rst2
-rw-r--r--docs/system/i386/hyperv.rst43
-rw-r--r--docs/system/i386/nitro-enclave.rst78
-rw-r--r--docs/system/i386/tdx.rst161
-rw-r--r--docs/system/i386/xenpvh.rst49
-rw-r--r--docs/system/images.rst2
-rw-r--r--docs/system/index.rst1
-rw-r--r--docs/system/introduction.rst2
-rw-r--r--docs/system/linuxboot.rst6
-rw-r--r--docs/system/loongarch/virt.rst33
-rw-r--r--docs/system/ppc/amigang.rst17
-rw-r--r--docs/system/ppc/embedded.rst1
-rw-r--r--docs/system/ppc/powermac.rst4
-rw-r--r--docs/system/ppc/powernv.rst9
-rw-r--r--docs/system/ppc/pseries.rst17
-rw-r--r--docs/system/riscv/microblaze-v-generic.rst42
-rw-r--r--docs/system/riscv/microchip-icicle-kit.rst124
-rw-r--r--docs/system/riscv/virt.rst30
-rw-r--r--docs/system/s390x/bootdevices.rst53
-rw-r--r--docs/system/sriov.rst37
-rw-r--r--docs/system/target-arm.rst16
-rw-r--r--docs/system/target-i386.rst5
-rw-r--r--docs/system/target-loongarch.rst19
-rw-r--r--docs/system/target-mips.rst2
-rw-r--r--docs/system/target-riscv.rst1
-rw-r--r--docs/system/targets.rst1
-rw-r--r--docs/tools/index.rst2
-rw-r--r--docs/tools/qemu-nbd.rst7
-rw-r--r--docs/tools/qemu-storage-daemon.rst2
-rw-r--r--docs/tools/qemu-vmsr-helper.rst89
-rw-r--r--docs/tools/virtfs-proxy-helper.rst75
-rw-r--r--docs/user/main.rst26
-rw-r--r--dump/dump-hmp-cmds.c2
-rw-r--r--dump/dump.c18
-rw-r--r--dump/win_dump.c4
-rw-r--r--dump/win_dump.h2
-rw-r--r--ebpf/ebpf_rss-stub.c8
-rw-r--r--ebpf/ebpf_rss.c120
-rw-r--r--ebpf/ebpf_rss.h10
-rw-r--r--ebpf/trace-events8
-rw-r--r--event-loop-base.c9
-rw-r--r--fpu/meson.build2
-rw-r--r--fpu/softfloat-parts.c.inc327
-rw-r--r--fpu/softfloat-specialize.c.inc460
-rw-r--r--fpu/softfloat.c177
-rw-r--r--fsdev/9p-iov-marshal.c15
-rw-r--r--fsdev/file-op-9p.h5
-rw-r--r--fsdev/meson.build8
-rw-r--r--fsdev/qemu-fsdev.c19
-rw-r--r--fsdev/qemu-fsdev.h1
-rw-r--r--fsdev/virtfs-proxy-helper.c1193
-rw-r--r--gdb-xml/aarch64-core.xml52
-rw-r--r--gdb-xml/hexagon-core.xml6
-rw-r--r--gdb-xml/i386-32bit-linux.xml11
-rw-r--r--gdb-xml/i386-64bit-linux.xml11
-rw-r--r--gdbstub/gdbstub.c224
-rw-r--r--gdbstub/meson.build38
-rw-r--r--gdbstub/syscalls.c4
-rw-r--r--gdbstub/system.c45
-rw-r--r--gdbstub/user-target.c18
-rw-r--r--gdbstub/user.c173
-rw-r--r--hmp-commands-info.hx6
-rw-r--r--host/include/aarch64/host/atomic128-cas.h2
-rw-r--r--host/include/aarch64/host/atomic128-ldst.h.inc (renamed from host/include/aarch64/host/atomic128-ldst.h)0
-rw-r--r--host/include/generic/host/atomic128-cas.h.inc (renamed from host/include/generic/host/atomic128-cas.h)0
-rw-r--r--host/include/generic/host/atomic128-ldst.h.inc (renamed from host/include/generic/host/atomic128-ldst.h)0
-rw-r--r--host/include/i386/host/cpuinfo.h1
-rw-r--r--host/include/loongarch64/host/atomic128-ldst.h52
-rw-r--r--host/include/loongarch64/host/atomic128-ldst.h.inc52
-rw-r--r--host/include/loongarch64/host/bufferiszero.c.inc6
-rw-r--r--host/include/loongarch64/host/load-extract-al16-al8.h.inc2
-rw-r--r--host/include/riscv/host/cpuinfo.h5
-rw-r--r--host/include/x86_64/host/atomic128-ldst.h75
-rw-r--r--host/include/x86_64/host/atomic128-ldst.h.inc75
-rw-r--r--host/include/x86_64/host/load-extract-al16-al8.h.inc2
-rw-r--r--hw/9pfs/9p-local.c52
-rw-r--r--hw/9pfs/9p-proxy.c1279
-rw-r--r--hw/9pfs/9p-proxy.h101
-rw-r--r--hw/9pfs/9p-synth.c24
-rw-r--r--hw/9pfs/9p-util-generic.c50
-rw-r--r--hw/9pfs/9p-util.h34
-rw-r--r--hw/9pfs/9p.c124
-rw-r--r--hw/9pfs/9p.h2
-rw-r--r--hw/9pfs/codir.c7
-rw-r--r--hw/9pfs/cofile.c7
-rw-r--r--hw/9pfs/cofs.c37
-rw-r--r--hw/9pfs/coth.h4
-rw-r--r--hw/9pfs/meson.build2
-rw-r--r--hw/9pfs/trace-events4
-rw-r--r--hw/9pfs/virtio-9p-device.c7
-rw-r--r--hw/Kconfig5
-rw-r--r--hw/acpi/Kconfig5
-rw-r--r--hw/acpi/acpi-cpu-hotplug-stub.c12
-rw-r--r--hw/acpi/acpi-mem-hotplug-stub.c5
-rw-r--r--hw/acpi/acpi-nvdimm-stub.c1
-rw-r--r--hw/acpi/acpi-pci-hotplug-stub.c8
-rw-r--r--hw/acpi/acpi-stub.c8
-rw-r--r--hw/acpi/acpi_generic_initiator.c148
-rw-r--r--hw/acpi/aml-build.c108
-rw-r--r--hw/acpi/core.c7
-rw-r--r--hw/acpi/cpu.c67
-rw-r--r--hw/acpi/cpu_hotplug.c3
-rw-r--r--hw/acpi/erst.c15
-rw-r--r--hw/acpi/generic_event_device.c98
-rw-r--r--hw/acpi/ghes-stub.c2
-rw-r--r--hw/acpi/ghes.c240
-rw-r--r--hw/acpi/hmat.c2
-rw-r--r--hw/acpi/hmat.h2
-rw-r--r--hw/acpi/ich9.c40
-rw-r--r--hw/acpi/ich9_tco.c2
-rw-r--r--hw/acpi/ich9_timer.c93
-rw-r--r--hw/acpi/ipmi.c3
-rw-r--r--hw/acpi/meson.build4
-rw-r--r--hw/acpi/pci.c242
-rw-r--r--hw/acpi/pcihp.c2
-rw-r--r--hw/acpi/piix4.c17
-rw-r--r--hw/acpi/vmclock.c179
-rw-r--r--hw/acpi/vmgenid.c7
-rw-r--r--hw/adc/Kconfig3
-rw-r--r--hw/adc/aspeed_adc.c29
-rw-r--r--hw/adc/max111x.c236
-rw-r--r--hw/adc/meson.build1
-rw-r--r--hw/adc/npcm7xx_adc.c5
-rw-r--r--hw/adc/stm32f2xx_adc.c4
-rw-r--r--hw/adc/zynq-xadc.c4
-rw-r--r--hw/alpha/dp264.c5
-rw-r--r--hw/alpha/typhoon.c3
-rw-r--r--hw/arm/Kconfig174
-rw-r--r--hw/arm/allwinner-a10.c22
-rw-r--r--hw/arm/allwinner-h3.c19
-rw-r--r--hw/arm/allwinner-r40.c13
-rw-r--r--hw/arm/armsse.c27
-rw-r--r--hw/arm/armv7m.c28
-rw-r--r--hw/arm/aspeed.c332
-rw-r--r--hw/arm/aspeed_ast10x0.c9
-rw-r--r--hw/arm/aspeed_ast2400.c15
-rw-r--r--hw/arm/aspeed_ast2600.c25
-rw-r--r--hw/arm/aspeed_ast27x0-fc.c200
-rw-r--r--hw/arm/aspeed_ast27x0-ssp.c294
-rw-r--r--hw/arm/aspeed_ast27x0-tsp.c294
-rw-r--r--hw/arm/aspeed_ast27x0.c602
-rw-r--r--hw/arm/aspeed_soc_common.c14
-rw-r--r--hw/arm/b-l475e-iot01a.c4
-rw-r--r--hw/arm/bananapi_m2u.c3
-rw-r--r--hw/arm/bcm2835_peripherals.c4
-rw-r--r--hw/arm/bcm2836.c10
-rw-r--r--hw/arm/bcm2838.c2
-rw-r--r--hw/arm/bcm2838_peripherals.c2
-rw-r--r--hw/arm/boot.c73
-rw-r--r--hw/arm/collie.c4
-rw-r--r--hw/arm/cubieboard.c1
-rw-r--r--hw/arm/digic.c4
-rw-r--r--hw/arm/digic_boards.c4
-rw-r--r--hw/arm/exynos4210.c17
-rw-r--r--hw/arm/exynos4_boards.c8
-rw-r--r--hw/arm/fby35.c8
-rw-r--r--hw/arm/fsl-imx25.c9
-rw-r--r--hw/arm/fsl-imx31.c6
-rw-r--r--hw/arm/fsl-imx6.c74
-rw-r--r--hw/arm/fsl-imx6ul.c73
-rw-r--r--hw/arm/fsl-imx7.c74
-rw-r--r--hw/arm/fsl-imx8mp.c712
-rw-r--r--hw/arm/gumstix.c141
-rw-r--r--hw/arm/highbank.c22
-rw-r--r--hw/arm/imx25_pdk.c3
-rw-r--r--hw/arm/imx8mp-evk.c103
-rw-r--r--hw/arm/integratorcp.c16
-rw-r--r--hw/arm/kzm.c8
-rw-r--r--hw/arm/mainstone.c175
-rw-r--r--hw/arm/mcimx6ul-evk.c3
-rw-r--r--hw/arm/mcimx7d-sabre.c3
-rw-r--r--hw/arm/meson.build132
-rw-r--r--hw/arm/microbit.c8
-rw-r--r--hw/arm/mps2-tz.c32
-rw-r--r--hw/arm/mps2.c22
-rw-r--r--hw/arm/mps3r.c10
-rw-r--r--hw/arm/msf2-soc.c11
-rw-r--r--hw/arm/msf2-som.c4
-rw-r--r--hw/arm/musca.c12
-rw-r--r--hw/arm/musicpal.c32
-rw-r--r--hw/arm/netduino2.c2
-rw-r--r--hw/arm/netduinoplus2.c2
-rw-r--r--hw/arm/npcm7xx.c19
-rw-r--r--hw/arm/npcm7xx_boards.c23
-rw-r--r--hw/arm/npcm8xx.c859
-rw-r--r--hw/arm/npcm8xx_boards.c254
-rw-r--r--hw/arm/nrf51_soc.c23
-rw-r--r--hw/arm/nseries.c1473
-rw-r--r--hw/arm/olimex-stm32-h405.c2
-rw-r--r--hw/arm/omap1.c1210
-rw-r--r--hw/arm/omap2.c2715
-rw-r--r--hw/arm/omap_sx1.c16
-rw-r--r--hw/arm/orangepi.c3
-rw-r--r--hw/arm/palm.c324
-rw-r--r--hw/arm/pxa2xx.c2393
-rw-r--r--hw/arm/pxa2xx_gpio.c365
-rw-r--r--hw/arm/pxa2xx_pic.c359
-rw-r--r--hw/arm/raspi.c15
-rw-r--r--hw/arm/raspi4b.c5
-rw-r--r--hw/arm/realview.c25
-rw-r--r--hw/arm/sabrelite.c3
-rw-r--r--hw/arm/sbsa-ref.c34
-rw-r--r--hw/arm/smmu-common.c38
-rw-r--r--hw/arm/smmu-internal.h5
-rw-r--r--hw/arm/smmuv3-internal.h3
-rw-r--r--hw/arm/smmuv3.c48
-rw-r--r--hw/arm/spitz.c1284
-rw-r--r--hw/arm/stellaris.c140
-rw-r--r--hw/arm/stm32f100_soc.c6
-rw-r--r--hw/arm/stm32f205_soc.c6
-rw-r--r--hw/arm/stm32f405_soc.c18
-rw-r--r--hw/arm/stm32l4x5_soc.c14
-rw-r--r--hw/arm/stm32vldiscovery.c2
-rw-r--r--hw/arm/strongarm.c24
-rw-r--r--hw/arm/strongarm.h2
-rw-r--r--hw/arm/tosa.c327
-rw-r--r--hw/arm/trace-events16
-rw-r--r--hw/arm/versatilepb.c17
-rw-r--r--hw/arm/vexpress.c21
-rw-r--r--hw/arm/virt-acpi-build.c38
-rw-r--r--hw/arm/virt.c404
-rw-r--r--hw/arm/xen-pvh.c106
-rw-r--r--hw/arm/xen-stubs.c30
-rw-r--r--hw/arm/xen_arm.c267
-rw-r--r--hw/arm/xilinx_zynq.c128
-rw-r--r--hw/arm/xlnx-versal-virt.c9
-rw-r--r--hw/arm/xlnx-versal.c23
-rw-r--r--hw/arm/xlnx-zcu102.c5
-rw-r--r--hw/arm/xlnx-zynqmp.c58
-rw-r--r--hw/arm/z2.c355
-rw-r--r--hw/audio/ac97.c15
-rw-r--r--hw/audio/adlib.c5
-rw-r--r--hw/audio/asc.c15
-rw-r--r--hw/audio/cs4231.c4
-rw-r--r--hw/audio/cs4231a.c15
-rw-r--r--hw/audio/es1370.c13
-rw-r--r--hw/audio/gus.c7
-rw-r--r--hw/audio/hda-codec.c27
-rw-r--r--hw/audio/intel-hda.c20
-rw-r--r--hw/audio/marvell_88w8618.c6
-rw-r--r--hw/audio/pcspk.c5
-rw-r--r--hw/audio/pl041.c7
-rw-r--r--hw/audio/sb16.c7
-rw-r--r--hw/audio/soundhw.c3
-rw-r--r--hw/audio/trace-events1
-rw-r--r--hw/audio/via-ac97.c15
-rw-r--r--hw/audio/virtio-snd-pci.c5
-rw-r--r--hw/audio/virtio-snd.c45
-rw-r--r--hw/audio/wm8750.c5
-rw-r--r--hw/avr/arduino.c10
-rw-r--r--hw/avr/atmega.c59
-rw-r--r--hw/avr/atmega.h1
-rw-r--r--hw/avr/boot.c8
-rw-r--r--hw/block/Kconfig12
-rw-r--r--hw/block/block.c9
-rw-r--r--hw/block/dataplane/xen-block.c4
-rw-r--r--hw/block/dataplane/xen-block.h2
-rw-r--r--hw/block/ecc.c91
-rw-r--r--hw/block/fdc-isa.c19
-rw-r--r--hw/block/fdc-sysbus.c16
-rw-r--r--hw/block/fdc.c11
-rw-r--r--hw/block/hd-geometry.c2
-rw-r--r--hw/block/m25p80.c86
-rw-r--r--hw/block/m25p80_sfdp.c73
-rw-r--r--hw/block/m25p80_sfdp.h3
-rw-r--r--hw/block/meson.build4
-rw-r--r--hw/block/nand.c836
-rw-r--r--hw/block/onenand.c872
-rw-r--r--hw/block/pflash_cfi01.c35
-rw-r--r--hw/block/pflash_cfi02.c9
-rw-r--r--hw/block/swim.c11
-rw-r--r--hw/block/tc58128.c211
-rw-r--r--hw/block/vhost-user-blk.c64
-rw-r--r--hw/block/virtio-blk.c223
-rw-r--r--hw/block/xen-block.c34
-rw-r--r--hw/char/Kconfig18
-rw-r--r--hw/char/avr_usart.c9
-rw-r--r--hw/char/bcm2835_aux.c15
-rw-r--r--hw/char/cadence_uart.c5
-rw-r--r--hw/char/cmsdk-apb-uart.c7
-rw-r--r--hw/char/debugcon.c5
-rw-r--r--hw/char/digic-uart.c7
-rw-r--r--hw/char/diva-gsp.c295
-rw-r--r--hw/char/escc.c95
-rw-r--r--hw/char/etraxfs_ser.c267
-rw-r--r--hw/char/exynos4210_uart.c7
-rw-r--r--hw/char/goldfish_tty.c21
-rw-r--r--hw/char/grlib_apbuart.c7
-rw-r--r--hw/char/ibex_uart.c7
-rw-r--r--hw/char/imx_serial.c80
-rw-r--r--hw/char/ipoctal232.c9
-rw-r--r--hw/char/mcf_uart.c23
-rw-r--r--hw/char/mchp_pfsoc_mmuart.c4
-rw-r--r--hw/char/meson.build7
-rw-r--r--hw/char/nrf51_uart.c7
-rw-r--r--hw/char/omap_uart.c117
-rw-r--r--hw/char/parallel-isa.c2
-rw-r--r--hw/char/parallel.c11
-rw-r--r--hw/char/pl011.c241
-rw-r--r--hw/char/renesas_sci.c7
-rw-r--r--hw/char/riscv_htif.c47
-rw-r--r--hw/char/sclpconsole-lm.c9
-rw-r--r--hw/char/sclpconsole.c7
-rw-r--r--hw/char/serial-isa.c10
-rw-r--r--hw/char/serial-mm.c156
-rw-r--r--hw/char/serial-pci-multi.c30
-rw-r--r--hw/char/serial-pci.c15
-rw-r--r--hw/char/serial.c142
-rw-r--r--hw/char/sh_serial.c61
-rw-r--r--hw/char/shakti_uart.c7
-rw-r--r--hw/char/sifive_uart.c137
-rw-r--r--hw/char/spapr_vty.c5
-rw-r--r--hw/char/stm32f2xx_usart.c56
-rw-r--r--hw/char/stm32l4x5_usart.c28
-rw-r--r--hw/char/terminal3270.c5
-rw-r--r--hw/char/trace-events24
-rw-r--r--hw/char/virtio-console.c7
-rw-r--r--hw/char/virtio-serial-bus.c17
-rw-r--r--hw/char/xen_console.c63
-rw-r--r--hw/char/xilinx_uartlite.c41
-rw-r--r--hw/core/Kconfig4
-rw-r--r--hw/core/bus.c4
-rw-r--r--hw/core/clock.c38
-rw-r--r--hw/core/cpu-common.c143
-rw-r--r--hw/core/cpu-sysemu.c144
-rw-r--r--hw/core/cpu-system.c305
-rw-r--r--hw/core/cpu-user.c49
-rw-r--r--hw/core/eif.c709
-rw-r--r--hw/core/eif.h22
-rw-r--r--hw/core/generic-loader.c20
-rw-r--r--hw/core/gpio.c3
-rw-r--r--hw/core/guest-loader.c9
-rw-r--r--hw/core/irq.c42
-rw-r--r--hw/core/loader-fit.c42
-rw-r--r--hw/core/loader.c85
-rw-r--r--hw/core/machine-hmp-cmds.c4
-rw-r--r--hw/core/machine-qmp-cmds.c36
-rw-r--r--hw/core/machine-smp.c136
-rw-r--r--hw/core/machine.c297
-rw-r--r--hw/core/meson.build7
-rw-r--r--hw/core/null-machine.c3
-rw-r--r--hw/core/numa.c12
-rw-r--r--hw/core/or-irq.c7
-rw-r--r--hw/core/platform-bus.c10
-rw-r--r--hw/core/ptimer.c8
-rw-r--r--hw/core/qdev-clock.c86
-rw-r--r--hw/core/qdev-hotplug.c45
-rw-r--r--hw/core/qdev-properties-system.c242
-rw-r--r--hw/core/qdev-properties.c210
-rw-r--r--hw/core/qdev-user.c19
-rw-r--r--hw/core/qdev.c149
-rw-r--r--hw/core/register.c2
-rw-r--r--hw/core/reset.c9
-rw-r--r--hw/core/resetcontainer.c3
-rw-r--r--hw/core/resettable.c24
-rw-r--r--hw/core/split-irq.c5
-rw-r--r--hw/core/sysbus-fdt.c29
-rw-r--r--hw/core/sysbus.c75
-rw-r--r--hw/core/uboot_image.h2
-rw-r--r--hw/core/vm-change-state-handler.c20
-rw-r--r--hw/cpu/a15mpcore.c46
-rw-r--r--hw/cpu/a9mpcore.c44
-rw-r--r--hw/cpu/arm11mpcore.c26
-rw-r--r--hw/cpu/cluster.c5
-rw-r--r--hw/cpu/core.c2
-rw-r--r--hw/cpu/realview_mpcore.c32
-rw-r--r--hw/cris/Kconfig11
-rw-r--r--hw/cris/axis_dev88.c351
-rw-r--r--hw/cris/boot.c102
-rw-r--r--hw/cris/boot.h16
-rw-r--r--hw/cris/meson.build5
-rw-r--r--hw/cxl/cxl-component-utils.c9
-rw-r--r--hw/cxl/cxl-device-utils.c26
-rw-r--r--hw/cxl/cxl-events.c13
-rw-r--r--hw/cxl/cxl-host.c7
-rw-r--r--hw/cxl/cxl-mailbox-utils.c1678
-rw-r--r--hw/cxl/switch-mailbox-cci.c13
-rw-r--r--hw/display/Kconfig14
-rw-r--r--hw/display/apple-gfx-mmio.m285
-rw-r--r--hw/display/apple-gfx-pci.m157
-rw-r--r--hw/display/apple-gfx.h74
-rw-r--r--hw/display/apple-gfx.m880
-rw-r--r--hw/display/artist.c16
-rw-r--r--hw/display/ati.c9
-rw-r--r--hw/display/bcm2835_fb.c7
-rw-r--r--hw/display/blizzard.c1026
-rw-r--r--hw/display/bochs-display.c7
-rw-r--r--hw/display/cg3.c7
-rw-r--r--hw/display/cirrus_vga.c9
-rw-r--r--hw/display/cirrus_vga_isa.c5
-rw-r--r--hw/display/dm163.c6
-rw-r--r--hw/display/dpcd.c4
-rw-r--r--hw/display/edid-region.c2
-rw-r--r--hw/display/exynos4210_fimd.c7
-rw-r--r--hw/display/framebuffer.h2
-rw-r--r--hw/display/g364fb.c7
-rw-r--r--hw/display/i2c-ddc.c7
-rw-r--r--hw/display/jazz_led.c4
-rw-r--r--hw/display/macfb.c15
-rw-r--r--hw/display/meson.build6
-rw-r--r--hw/display/next-fb.c2
-rw-r--r--hw/display/omap_dss.c1093
-rw-r--r--hw/display/pl110.c5
-rw-r--r--hw/display/pxa2xx_lcd.c1451
-rw-r--r--hw/display/qxl-render.c2
-rw-r--r--hw/display/qxl.c48
-rw-r--r--hw/display/ramfb-standalone.c8
-rw-r--r--hw/display/ramfb.c2
-rw-r--r--hw/display/sii9022.c4
-rw-r--r--hw/display/sm501.c18
-rw-r--r--hw/display/ssd0303.c2
-rw-r--r--hw/display/ssd0323.c2
-rw-r--r--hw/display/tc6393xb.c568
-rw-r--r--hw/display/tcx.c8
-rw-r--r--hw/display/trace-events33
-rw-r--r--hw/display/vga-isa.c7
-rw-r--r--hw/display/vga-mmio.c7
-rw-r--r--hw/display/vga-pci.c16
-rw-r--r--hw/display/vga.c6
-rw-r--r--hw/display/vga_int.h4
-rw-r--r--hw/display/vhost-user-gpu.c38
-rw-r--r--hw/display/virtio-gpu-base.c6
-rw-r--r--hw/display/virtio-gpu-gl.c78
-rw-r--r--hw/display/virtio-gpu-pci-rutabaga.c2
-rw-r--r--hw/display/virtio-gpu-pci.c5
-rw-r--r--hw/display/virtio-gpu-rutabaga.c5
-rw-r--r--hw/display/virtio-gpu-udmabuf.c12
-rw-r--r--hw/display/virtio-gpu-virgl.c565
-rw-r--r--hw/display/virtio-gpu.c183
-rw-r--r--hw/display/virtio-vga.c5
-rw-r--r--hw/display/vmware_vga.c11
-rw-r--r--hw/display/xenfb.c2
-rw-r--r--hw/display/xlnx_dp.c7
-rw-r--r--hw/dma/bcm2835_dma.c4
-rw-r--r--hw/dma/etraxfs_dma.c781
-rw-r--r--hw/dma/i82374.c7
-rw-r--r--hw/dma/i8257.c9
-rw-r--r--hw/dma/meson.build2
-rw-r--r--hw/dma/omap_dma.c785
-rw-r--r--hw/dma/pl080.c7
-rw-r--r--hw/dma/pl330.c10
-rw-r--r--hw/dma/pxa2xx_dma.c591
-rw-r--r--hw/dma/rc4030.c8
-rw-r--r--hw/dma/sifive_pdma.c5
-rw-r--r--hw/dma/sparc32_dma.c14
-rw-r--r--hw/dma/xilinx_axidma.c18
-rw-r--r--hw/dma/xlnx-zdma.c7
-rw-r--r--hw/dma/xlnx-zynq-devcfg.c6
-rw-r--r--hw/dma/xlnx_csu_dma.c13
-rw-r--r--hw/dma/xlnx_dpdma.c4
-rw-r--r--hw/fsi/aspeed_apb2opb.c4
-rw-r--r--hw/fsi/cfam.c2
-rw-r--r--hw/fsi/fsi-master.c4
-rw-r--r--hw/fsi/fsi.c4
-rw-r--r--hw/fsi/lbus.c4
-rw-r--r--hw/gpio/Kconfig15
-rw-r--r--hw/gpio/aspeed_gpio.c444
-rw-r--r--hw/gpio/bcm2835_gpio.c4
-rw-r--r--hw/gpio/bcm2838_gpio.c5
-rw-r--r--hw/gpio/gpio_key.c4
-rw-r--r--hw/gpio/gpio_pwr.c2
-rw-r--r--hw/gpio/imx_gpio.c29
-rw-r--r--hw/gpio/max7310.c217
-rw-r--r--hw/gpio/meson.build3
-rw-r--r--hw/gpio/mpc8xxx.c26
-rw-r--r--hw/gpio/npcm7xx_gpio.c8
-rw-r--r--hw/gpio/nrf51_gpio.c5
-rw-r--r--hw/gpio/omap_gpio.c592
-rw-r--r--hw/gpio/pca9552.c11
-rw-r--r--hw/gpio/pca9554.c12
-rw-r--r--hw/gpio/pcf8574.c4
-rw-r--r--hw/gpio/pl061.c6
-rw-r--r--hw/gpio/sifive_gpio.c7
-rw-r--r--hw/gpio/stm32l4x5_gpio.c5
-rw-r--r--hw/gpio/trace-events5
-rw-r--r--hw/gpio/zaurus.c2
-rw-r--r--hw/hppa/Kconfig3
-rw-r--r--hw/hppa/hppa_hardware.h36
-rw-r--r--hw/hppa/machine.c151
-rw-r--r--hw/hyperv/hv-balloon-our_range_memslots.h2
-rw-r--r--hw/hyperv/hv-balloon.c27
-rw-r--r--hw/hyperv/hyperv.c15
-rw-r--r--hw/hyperv/hyperv_testdev.c9
-rw-r--r--hw/hyperv/meson.build11
-rw-r--r--hw/hyperv/syndbg.c15
-rw-r--r--hw/hyperv/vmbus.c33
-rw-r--r--hw/i2c/allwinner-i2c.c4
-rw-r--r--hw/i2c/aspeed_i2c.c366
-rw-r--r--hw/i2c/bcm2835_i2c.c4
-rw-r--r--hw/i2c/bitbang_i2c.c2
-rw-r--r--hw/i2c/core.c5
-rw-r--r--hw/i2c/exynos4210_i2c.c4
-rw-r--r--hw/i2c/i2c_mux_pca954x.c9
-rw-r--r--hw/i2c/imx_i2c.c36
-rw-r--r--hw/i2c/microbit_i2c.c4
-rw-r--r--hw/i2c/mpc_i2c.c41
-rw-r--r--hw/i2c/npcm7xx_smbus.c2
-rw-r--r--hw/i2c/omap_i2c.c185
-rw-r--r--hw/i2c/pm_smbus.c1
-rw-r--r--hw/i2c/pmbus_device.c2
-rw-r--r--hw/i2c/ppc4xx_i2c.c4
-rw-r--r--hw/i2c/smbus_eeprom.c23
-rw-r--r--hw/i2c/smbus_ich9.c4
-rw-r--r--hw/i2c/smbus_slave.c2
-rw-r--r--hw/i2c/trace-events10
-rw-r--r--hw/i386/Kconfig17
-rw-r--r--hw/i386/acpi-build.c209
-rw-r--r--hw/i386/acpi-build.h2
-rw-r--r--hw/i386/acpi-common.c2
-rw-r--r--hw/i386/acpi-microvm.c2
-rw-r--r--hw/i386/amd_iommu.c229
-rw-r--r--hw/i386/amd_iommu.h12
-rw-r--r--hw/i386/fw_cfg.c16
-rw-r--r--hw/i386/intel_iommu.c967
-rw-r--r--hw/i386/intel_iommu_internal.h134
-rw-r--r--hw/i386/kvm/apic.c13
-rw-r--r--hw/i386/kvm/clock.c16
-rw-r--r--hw/i386/kvm/i8254.c11
-rw-r--r--hw/i386/kvm/i8259.c6
-rw-r--r--hw/i386/kvm/ioapic.c9
-rw-r--r--hw/i386/kvm/xen-stubs.c13
-rw-r--r--hw/i386/kvm/xen_evtchn.c72
-rw-r--r--hw/i386/kvm/xen_evtchn.h2
-rw-r--r--hw/i386/kvm/xen_gnttab.c8
-rw-r--r--hw/i386/kvm/xen_overlay.c10
-rw-r--r--hw/i386/kvm/xen_primary_console.c6
-rw-r--r--hw/i386/kvm/xen_xenstore.c14
-rw-r--r--hw/i386/meson.build2
-rw-r--r--hw/i386/microvm-dt.c4
-rw-r--r--hw/i386/microvm.c94
-rw-r--r--hw/i386/monitor.c4
-rw-r--r--hw/i386/multiboot.c46
-rw-r--r--hw/i386/nitro_enclave.c353
-rw-r--r--hw/i386/pc.c150
-rw-r--r--hw/i386/pc_piix.c87
-rw-r--r--hw/i386/pc_q35.c78
-rw-r--r--hw/i386/pc_sysfw.c13
-rw-r--r--hw/i386/pc_sysfw_ovmf.c1
-rw-r--r--hw/i386/port92.c6
-rw-r--r--hw/i386/sgx-epc.c9
-rw-r--r--hw/i386/sgx-stub.c11
-rw-r--r--hw/i386/sgx.c54
-rw-r--r--hw/i386/tdvf-hob.c130
-rw-r--r--hw/i386/tdvf-hob.h26
-rw-r--r--hw/i386/tdvf.c189
-rw-r--r--hw/i386/trace-events1
-rw-r--r--hw/i386/vapic.c19
-rw-r--r--hw/i386/vmmouse.c7
-rw-r--r--hw/i386/vmport.c12
-rw-r--r--hw/i386/x86-common.c141
-rw-r--r--hw/i386/x86-cpu.c6
-rw-r--r--hw/i386/x86-iommu.c9
-rw-r--r--hw/i386/x86.c9
-rw-r--r--hw/i386/xen/meson.build1
-rw-r--r--hw/i386/xen/xen-hvm.c12
-rw-r--r--hw/i386/xen/xen-pvh.c125
-rw-r--r--hw/i386/xen/xen_apic.c4
-rw-r--r--hw/i386/xen/xen_platform.c12
-rw-r--r--hw/i386/xen/xen_pvdevice.c7
-rw-r--r--hw/ide/Kconfig10
-rw-r--r--hw/ide/ahci-allwinner.c4
-rw-r--r--hw/ide/ahci-internal.h1
-rw-r--r--hw/ide/ahci-sysbus.c90
-rw-r--r--hw/ide/ahci.c113
-rw-r--r--hw/ide/atapi.c4
-rw-r--r--hw/ide/cf.c5
-rw-r--r--hw/ide/cmd646.c11
-rw-r--r--hw/ide/core.c15
-rw-r--r--hw/ide/ich.c27
-rw-r--r--hw/ide/ide-bus.c8
-rw-r--r--hw/ide/ide-dev.c21
-rw-r--r--hw/ide/isa.c9
-rw-r--r--hw/ide/macio.c20
-rw-r--r--hw/ide/meson.build2
-rw-r--r--hw/ide/microdrive.c644
-rw-r--r--hw/ide/mmio.c9
-rw-r--r--hw/ide/pci.c11
-rw-r--r--hw/ide/piix.c8
-rw-r--r--hw/ide/sii3112.c4
-rw-r--r--hw/ide/via.c6
-rw-r--r--hw/input/Kconfig13
-rw-r--r--hw/input/adb-kbd.c4
-rw-r--r--hw/input/adb-mouse.c65
-rw-r--r--hw/input/adb.c4
-rw-r--r--hw/input/ads7846.c186
-rw-r--r--hw/input/lasips2.c10
-rw-r--r--hw/input/lm832x.c528
-rw-r--r--hw/input/meson.build5
-rw-r--r--hw/input/pckbd.c20
-rw-r--r--hw/input/pl050.c6
-rw-r--r--hw/input/ps2.c10
-rw-r--r--hw/input/pxa2xx_keypad.c331
-rw-r--r--hw/input/stellaris_gamepad.c5
-rw-r--r--hw/input/trace-events3
-rw-r--r--hw/input/tsc2005.c571
-rw-r--r--hw/input/tsc210x.c1241
-rw-r--r--hw/input/virtio-input-hid.c15
-rw-r--r--hw/input/virtio-input-host.c6
-rw-r--r--hw/input/virtio-input.c8
-rw-r--r--hw/intc/Kconfig14
-rw-r--r--hw/intc/allwinner-a10-pic.c6
-rw-r--r--hw/intc/apic.c7
-rw-r--r--hw/intc/apic_common.c9
-rw-r--r--hw/intc/arm_gic.c19
-rw-r--r--hw/intc/arm_gic_common.c10
-rw-r--r--hw/intc/arm_gic_kvm.c13
-rw-r--r--hw/intc/arm_gicv2m.c7
-rw-r--r--hw/intc/arm_gicv3.c2
-rw-r--r--hw/intc/arm_gicv3_common.c9
-rw-r--r--hw/intc/arm_gicv3_cpuif.c27
-rw-r--r--hw/intc/arm_gicv3_its.c49
-rw-r--r--hw/intc/arm_gicv3_its_common.c4
-rw-r--r--hw/intc/arm_gicv3_its_kvm.c9
-rw-r--r--hw/intc/arm_gicv3_kvm.c6
-rw-r--r--hw/intc/armv7m_nvic.c13
-rw-r--r--hw/intc/aspeed_intc.c1095
-rw-r--r--hw/intc/aspeed_vic.c4
-rw-r--r--hw/intc/bcm2835_ic.c4
-rw-r--r--hw/intc/bcm2836_control.c4
-rw-r--r--hw/intc/etraxfs_pic.c172
-rw-r--r--hw/intc/exynos4210_combiner.c7
-rw-r--r--hw/intc/exynos4210_gic.c5
-rw-r--r--hw/intc/goldfish_pic.c9
-rw-r--r--hw/intc/grlib_irqmp.c7
-rw-r--r--hw/intc/heathrow_pic.c4
-rw-r--r--hw/intc/i8259.c9
-rw-r--r--hw/intc/i8259_common.c7
-rw-r--r--hw/intc/imx_avic.c4
-rw-r--r--hw/intc/imx_gpcv2.c4
-rw-r--r--hw/intc/ioapic.c11
-rw-r--r--hw/intc/ioapic_common.c4
-rw-r--r--hw/intc/ioapic_internal.h2
-rw-r--r--hw/intc/loongarch_extioi.c224
-rw-r--r--hw/intc/loongarch_extioi_common.c248
-rw-r--r--hw/intc/loongarch_extioi_kvm.c140
-rw-r--r--hw/intc/loongarch_ipi.c230
-rw-r--r--hw/intc/loongarch_ipi_kvm.c85
-rw-r--r--hw/intc/loongarch_pch_msi.c24
-rw-r--r--hw/intc/loongarch_pch_pic.c474
-rw-r--r--hw/intc/loongarch_pic_common.c135
-rw-r--r--hw/intc/loongarch_pic_kvm.c89
-rw-r--r--hw/intc/loongson_ipi.c370
-rw-r--r--hw/intc/loongson_ipi_common.c362
-rw-r--r--hw/intc/m68k_irqc.c9
-rw-r--r--hw/intc/meson.build17
-rw-r--r--hw/intc/mips_gic.c12
-rw-r--r--hw/intc/omap_intc.c448
-rw-r--r--hw/intc/ompic.c7
-rw-r--r--hw/intc/openpic.c27
-rw-r--r--hw/intc/openpic_kvm.c9
-rw-r--r--hw/intc/pl190.c4
-rw-r--r--hw/intc/pnv_xive.c23
-rw-r--r--hw/intc/pnv_xive2.c789
-rw-r--r--hw/intc/pnv_xive2_regs.h108
-rw-r--r--hw/intc/ppc-uic.c7
-rw-r--r--hw/intc/realview_gic.c2
-rw-r--r--hw/intc/riscv_aclint.c10
-rw-r--r--hw/intc/riscv_aplic.c198
-rw-r--r--hw/intc/riscv_imsic.c108
-rw-r--r--hw/intc/rx_icu.c5
-rw-r--r--hw/intc/s390_flic.c33
-rw-r--r--hw/intc/s390_flic_kvm.c6
-rw-r--r--hw/intc/sifive_plic.c24
-rw-r--r--hw/intc/slavio_intctl.c6
-rw-r--r--hw/intc/spapr_xive.c19
-rw-r--r--hw/intc/spapr_xive_kvm.c10
-rw-r--r--hw/intc/trace-events39
-rw-r--r--hw/intc/xics.c30
-rw-r--r--hw/intc/xics_kvm.c2
-rw-r--r--hw/intc/xics_pnv.c2
-rw-r--r--hw/intc/xics_spapr.c4
-rw-r--r--hw/intc/xilinx_intc.c60
-rw-r--r--hw/intc/xive.c455
-rw-r--r--hw/intc/xive2.c1057
-rw-r--r--hw/intc/xlnx-pmu-iomod-intc.c7
-rw-r--r--hw/intc/xlnx-zynqmp-ipi.c4
-rw-r--r--hw/ipack/Kconfig4
-rw-r--r--hw/ipack/ipack.c10
-rw-r--r--hw/ipack/meson.build3
-rw-r--r--hw/ipack/tpci200.c10
-rw-r--r--hw/ipmi/ipmi.c9
-rw-r--r--hw/ipmi/ipmi_bmc_extern.c14
-rw-r--r--hw/ipmi/ipmi_bmc_sim.c123
-rw-r--r--hw/ipmi/ipmi_bt.c10
-rw-r--r--hw/ipmi/ipmi_kcs.c5
-rw-r--r--hw/ipmi/isa_ipmi_bt.c11
-rw-r--r--hw/ipmi/isa_ipmi_kcs.c20
-rw-r--r--hw/ipmi/pci_ipmi_bt.c54
-rw-r--r--hw/ipmi/pci_ipmi_kcs.c15
-rw-r--r--hw/ipmi/smbus_ipmi.c4
-rw-r--r--hw/isa/fdc37m81x-superio.c2
-rw-r--r--hw/isa/i82378.c4
-rw-r--r--hw/isa/isa-bus.c8
-rw-r--r--hw/isa/isa-superio.c6
-rw-r--r--hw/isa/lpc_ich9.c28
-rw-r--r--hw/isa/pc87312.c7
-rw-r--r--hw/isa/piix.c15
-rw-r--r--hw/isa/smc37c669-superio.c2
-rw-r--r--hw/isa/vt82c686.c41
-rw-r--r--hw/loongarch/Kconfig6
-rw-r--r--hw/loongarch/acpi-build.c661
-rw-r--r--hw/loongarch/boot.c187
-rw-r--r--hw/loongarch/fw_cfg.c2
-rw-r--r--hw/loongarch/meson.build8
-rw-r--r--hw/loongarch/virt-acpi-build.c742
-rw-r--r--hw/loongarch/virt-fdt-build.c534
-rw-r--r--hw/loongarch/virt.c988
-rw-r--r--hw/m68k/Kconfig1
-rw-r--r--hw/m68k/an5206.c4
-rw-r--r--hw/m68k/bootinfo.h30
-rw-r--r--hw/m68k/mcf5206.c11
-rw-r--r--hw/m68k/mcf5208.c24
-rw-r--r--hw/m68k/mcf_intc.c9
-rw-r--r--hw/m68k/next-cube.c1037
-rw-r--r--hw/m68k/next-kbd.c164
-rw-r--r--hw/m68k/q800-glue.c7
-rw-r--r--hw/m68k/q800.c19
-rw-r--r--hw/m68k/virt.c38
-rw-r--r--hw/mem/cxl_type3.c159
-rw-r--r--hw/mem/memory-device.c4
-rw-r--r--hw/mem/npcm7xx_mc.c2
-rw-r--r--hw/mem/nvdimm.c7
-rw-r--r--hw/mem/pc-dimm.c11
-rw-r--r--hw/mem/sparse-mem.c8
-rw-r--r--hw/meson.build5
-rw-r--r--hw/microblaze/Kconfig2
-rw-r--r--hw/microblaze/boot.c16
-rw-r--r--hw/microblaze/boot.h4
-rw-r--r--hw/microblaze/petalogix_ml605_mmu.c15
-rw-r--r--hw/microblaze/petalogix_s3adsp1800_mmu.c68
-rw-r--r--hw/microblaze/xlnx-zynqmp-pmu.c11
-rw-r--r--hw/mips/Kconfig10
-rw-r--r--hw/mips/boston.c36
-rw-r--r--hw/mips/cps.c13
-rw-r--r--hw/mips/fuloong2e.c12
-rw-r--r--hw/mips/jazz.c30
-rw-r--r--hw/mips/loongson3_bootp.c16
-rw-r--r--hw/mips/loongson3_bootp.h1
-rw-r--r--hw/mips/loongson3_virt.c89
-rw-r--r--hw/mips/malta.c42
-rw-r--r--hw/mips/meson.build5
-rw-r--r--hw/mips/mips_int.c2
-rw-r--r--hw/mips/mipssim.c27
-rw-r--r--hw/misc/Kconfig27
-rw-r--r--hw/misc/a9scu.c7
-rw-r--r--hw/misc/allwinner-a10-ccm.c4
-rw-r--r--hw/misc/allwinner-a10-dramc.c4
-rw-r--r--hw/misc/allwinner-cpucfg.c6
-rw-r--r--hw/misc/allwinner-h3-ccu.c6
-rw-r--r--hw/misc/allwinner-h3-dramc.c15
-rw-r--r--hw/misc/allwinner-h3-sysctrl.c7
-rw-r--r--hw/misc/allwinner-r40-ccu.c6
-rw-r--r--hw/misc/allwinner-r40-dramc.c19
-rw-r--r--hw/misc/allwinner-sid.c9
-rw-r--r--hw/misc/allwinner-sramc.c8
-rw-r--r--hw/misc/applesmc.c9
-rw-r--r--hw/misc/arm11scu.c5
-rw-r--r--hw/misc/arm_l2x0.c7
-rw-r--r--hw/misc/arm_sysctl.c11
-rw-r--r--hw/misc/armsse-cpu-pwrctrl.c4
-rw-r--r--hw/misc/armsse-cpuid.c5
-rw-r--r--hw/misc/armsse-mhu.c4
-rw-r--r--hw/misc/armv7m_ras.c2
-rw-r--r--hw/misc/aspeed_hace.c558
-rw-r--r--hw/misc/aspeed_i3c.c11
-rw-r--r--hw/misc/aspeed_lpc.c7
-rw-r--r--hw/misc/aspeed_peci.c4
-rw-r--r--hw/misc/aspeed_sbc.c9
-rw-r--r--hw/misc/aspeed_scu.c53
-rw-r--r--hw/misc/aspeed_sdmc.c17
-rw-r--r--hw/misc/aspeed_sli.c6
-rw-r--r--hw/misc/aspeed_xdma.c10
-rw-r--r--hw/misc/auxbus.c6
-rw-r--r--hw/misc/avr_power.c4
-rw-r--r--hw/misc/axp2xx.c6
-rw-r--r--hw/misc/bcm2835_cprman.c29
-rw-r--r--hw/misc/bcm2835_mbox.c4
-rw-r--r--hw/misc/bcm2835_mphi.c4
-rw-r--r--hw/misc/bcm2835_powermgt.c6
-rw-r--r--hw/misc/bcm2835_property.c98
-rw-r--r--hw/misc/bcm2835_rng.c4
-rw-r--r--hw/misc/bcm2835_thermal.c4
-rw-r--r--hw/misc/cbus.c619
-rw-r--r--hw/misc/debugexit.c7
-rw-r--r--hw/misc/djmemc.c2
-rw-r--r--hw/misc/eccmemctl.c7
-rw-r--r--hw/misc/edu.c23
-rw-r--r--hw/misc/empty_slot.c5
-rw-r--r--hw/misc/exynos4210_clk.c4
-rw-r--r--hw/misc/exynos4210_pmu.c6
-rw-r--r--hw/misc/exynos4210_rng.c4
-rw-r--r--hw/misc/grlib_ahb_apb_pnp.c4
-rw-r--r--hw/misc/i2c-echo.c12
-rw-r--r--hw/misc/imx25_ccm.c4
-rw-r--r--hw/misc/imx31_ccm.c4
-rw-r--r--hw/misc/imx6_ccm.c5
-rw-r--r--hw/misc/imx6_src.c27
-rw-r--r--hw/misc/imx6ul_ccm.c4
-rw-r--r--hw/misc/imx7_ccm.c8
-rw-r--r--hw/misc/imx7_gpr.c2
-rw-r--r--hw/misc/imx7_snvs.c10
-rw-r--r--hw/misc/imx7_src.c4
-rw-r--r--hw/misc/imx8mp_analog.c160
-rw-r--r--hw/misc/imx8mp_ccm.c175
-rw-r--r--hw/misc/imx_rngc.c4
-rw-r--r--hw/misc/iosb.c2
-rw-r--r--hw/misc/iotkit-secctl.c7
-rw-r--r--hw/misc/iotkit-sysctl.c9
-rw-r--r--hw/misc/iotkit-sysinfo.c5
-rw-r--r--hw/misc/ivshmem-flat.c458
-rw-r--r--hw/misc/ivshmem-pci.c1131
-rw-r--r--hw/misc/ivshmem.c1133
-rw-r--r--hw/misc/lasi.c8
-rw-r--r--hw/misc/led.c7
-rw-r--r--hw/misc/mac_via.c17
-rw-r--r--hw/misc/macio/cuda.c13
-rw-r--r--hw/misc/macio/gpio.c27
-rw-r--r--hw/misc/macio/mac_dbdma.c6
-rw-r--r--hw/misc/macio/macio.c14
-rw-r--r--hw/misc/macio/pmu.c13
-rw-r--r--hw/misc/macio/trace-events3
-rw-r--r--hw/misc/mchp_pfsoc_dmc.c5
-rw-r--r--hw/misc/mchp_pfsoc_ioscb.c2
-rw-r--r--hw/misc/mchp_pfsoc_sysreg.c9
-rw-r--r--hw/misc/meson.build18
-rw-r--r--hw/misc/mips_cmgcr.c7
-rw-r--r--hw/misc/mips_cpc.c9
-rw-r--r--hw/misc/mips_itu.c7
-rw-r--r--hw/misc/mos6522.c5
-rw-r--r--hw/misc/mps2-fpgaio.c9
-rw-r--r--hw/misc/mps2-scc.c7
-rw-r--r--hw/misc/msf2-sysreg.c7
-rw-r--r--hw/misc/mst_fpga.c269
-rw-r--r--hw/misc/npcm7xx_clk.c1088
-rw-r--r--hw/misc/npcm7xx_gcr.c265
-rw-r--r--hw/misc/npcm7xx_mft.c7
-rw-r--r--hw/misc/npcm7xx_pwm.c2
-rw-r--r--hw/misc/npcm7xx_rng.c2
-rw-r--r--hw/misc/npcm_clk.c1220
-rw-r--r--hw/misc/npcm_gcr.c482
-rw-r--r--hw/misc/nrf51_rng.c17
-rw-r--r--hw/misc/omap_clk.c995
-rw-r--r--hw/misc/omap_gpmc.c898
-rw-r--r--hw/misc/omap_l4.c162
-rw-r--r--hw/misc/omap_sdrc.c167
-rw-r--r--hw/misc/omap_tap.c117
-rw-r--r--hw/misc/pc-testdev.c2
-rw-r--r--hw/misc/pci-testdev.c23
-rw-r--r--hw/misc/pvpanic-isa.c9
-rw-r--r--hw/misc/pvpanic-mmio.c60
-rw-r--r--hw/misc/pvpanic-pci.c9
-rw-r--r--hw/misc/pvpanic.c2
-rw-r--r--hw/misc/sbsa_ec.c4
-rw-r--r--hw/misc/sifive_e_aon.c9
-rw-r--r--hw/misc/sifive_test.c4
-rw-r--r--hw/misc/sifive_u_otp.c9
-rw-r--r--hw/misc/sifive_u_prci.c4
-rw-r--r--hw/misc/slavio_misc.c6
-rw-r--r--hw/misc/stm32_rcc.c162
-rw-r--r--hw/misc/stm32f2xx_syscfg.c4
-rw-r--r--hw/misc/stm32f4xx_exti.c4
-rw-r--r--hw/misc/stm32f4xx_syscfg.c4
-rw-r--r--hw/misc/stm32l4x5_exti.c2
-rw-r--r--hw/misc/stm32l4x5_rcc.c41
-rw-r--r--hw/misc/stm32l4x5_syscfg.c21
-rw-r--r--hw/misc/trace-events53
-rw-r--r--hw/misc/tz-mpc.c9
-rw-r--r--hw/misc/tz-msc.c7
-rw-r--r--hw/misc/tz-ppc.c7
-rw-r--r--hw/misc/unimp.c5
-rw-r--r--hw/misc/virt_ctrl.c6
-rw-r--r--hw/misc/vmcoreinfo.c39
-rw-r--r--hw/misc/xlnx-versal-cframe-reg.c12
-rw-r--r--hw/misc/xlnx-versal-cfu.c26
-rw-r--r--hw/misc/xlnx-versal-crl.c2
-rw-r--r--hw/misc/xlnx-versal-pmc-iou-slcr.c3
-rw-r--r--hw/misc/xlnx-versal-trng.c26
-rw-r--r--hw/misc/xlnx-versal-xramc.c5
-rw-r--r--hw/misc/xlnx-zynqmp-apu-ctrl.c2
-rw-r--r--hw/misc/xlnx-zynqmp-crf.c2
-rw-r--r--hw/misc/zynq_slcr.c7
-rw-r--r--hw/net/Kconfig5
-rw-r--r--hw/net/allwinner-sun8i-emac.c12
-rw-r--r--hw/net/allwinner_emac.c11
-rw-r--r--hw/net/cadence_gem.c37
-rw-r--r--hw/net/can/can_kvaser_pci.c10
-rw-r--r--hw/net/can/can_mioe3680_pci.c10
-rw-r--r--hw/net/can/can_pcm3680_pci.c10
-rw-r--r--hw/net/can/can_sja1000.c2
-rw-r--r--hw/net/can/ctucan_core.c5
-rw-r--r--hw/net/can/ctucan_pci.c10
-rw-r--r--hw/net/can/xlnx-versal-canfd.c186
-rw-r--r--hw/net/can/xlnx-zynqmp-can.c5
-rw-r--r--hw/net/dp8393x.c9
-rw-r--r--hw/net/e1000.c110
-rw-r--r--hw/net/e1000e.c12
-rw-r--r--hw/net/e1000e_core.c6
-rw-r--r--hw/net/e1000x_regs.h2
-rw-r--r--hw/net/eepro100.c23
-rw-r--r--hw/net/etraxfs_eth.c688
-rw-r--r--hw/net/fsl_etsec/etsec.c34
-rw-r--r--hw/net/fsl_etsec/miim.c19
-rw-r--r--hw/net/ftgmac100.c19
-rw-r--r--hw/net/i82596.c44
-rw-r--r--hw/net/i82596.h4
-rw-r--r--hw/net/igb.c21
-rw-r--r--hw/net/igb_core.c6
-rw-r--r--hw/net/igb_regs.h2
-rw-r--r--hw/net/igbvf.c4
-rw-r--r--hw/net/imx_fec.c159
-rw-r--r--hw/net/lan9118.c147
-rw-r--r--hw/net/lan9118_phy.c222
-rw-r--r--hw/net/lance.c9
-rw-r--r--hw/net/lasi_i82596.c9
-rw-r--r--hw/net/mcf_fec.c10
-rw-r--r--hw/net/meson.build3
-rw-r--r--hw/net/mipsnet.c7
-rw-r--r--hw/net/msf2-emac.c7
-rw-r--r--hw/net/mv88w8618_eth.c7
-rw-r--r--hw/net/ne2000-isa.c7
-rw-r--r--hw/net/ne2000-pci.c9
-rw-r--r--hw/net/ne2000.c2
-rw-r--r--hw/net/net_rx_pkt.c16
-rw-r--r--hw/net/net_rx_pkt.h17
-rw-r--r--hw/net/net_tx_pkt.c4
-rw-r--r--hw/net/npcm7xx_emc.c12
-rw-r--r--hw/net/npcm_gmac.c14
-rw-r--r--hw/net/npcm_pcs.c410
-rw-r--r--hw/net/opencores_eth.c7
-rw-r--r--hw/net/pcnet-pci.c13
-rw-r--r--hw/net/pcnet.h2
-rw-r--r--hw/net/rocker/rocker-hmp-cmds.c2
-rw-r--r--hw/net/rocker/rocker.c14
-rw-r--r--hw/net/rocker/rocker.h15
-rw-r--r--hw/net/rocker/rocker_hw.h20
-rw-r--r--hw/net/rocker/rocker_of_dpa.c53
-rw-r--r--hw/net/rtl8139.c25
-rw-r--r--hw/net/smc91c111.c167
-rw-r--r--hw/net/spapr_llan.c7
-rw-r--r--hw/net/stellaris_enet.c9
-rw-r--r--hw/net/sungem.c11
-rw-r--r--hw/net/sunhme.c11
-rw-r--r--hw/net/trace-events30
-rw-r--r--hw/net/tulip.c13
-rw-r--r--hw/net/vhost_net.c172
-rw-r--r--hw/net/virtio-net.c320
-rw-r--r--hw/net/vmxnet3.c57
-rw-r--r--hw/net/vmxnet3.h4
-rw-r--r--hw/net/xen_nic.c20
-rw-r--r--hw/net/xgmac.c7
-rw-r--r--hw/net/xilinx_axienet.c16
-rw-r--r--hw/net/xilinx_ethlite.c410
-rw-r--r--hw/nubus/mac-nubus-bridge.c2
-rw-r--r--hw/nubus/nubus-bridge.c5
-rw-r--r--hw/nubus/nubus-bus.c2
-rw-r--r--hw/nubus/nubus-device.c12
-rw-r--r--hw/nubus/nubus-virtio-mmio.c4
-rw-r--r--hw/nvme/ctrl.c1218
-rw-r--r--hw/nvme/dif.c7
-rw-r--r--hw/nvme/nguid.c6
-rw-r--r--hw/nvme/ns.c80
-rw-r--r--hw/nvme/nvme.h31
-rw-r--r--hw/nvme/subsys.c15
-rw-r--r--hw/nvme/trace-events1
-rw-r--r--hw/nvram/bcm2835_otp.c2
-rw-r--r--hw/nvram/chrp_nvram.c2
-rw-r--r--hw/nvram/ds1225y.c5
-rw-r--r--hw/nvram/eeprom_at24c.c32
-rw-r--r--hw/nvram/fw_cfg-acpi.c2
-rw-r--r--hw/nvram/fw_cfg.c178
-rw-r--r--hw/nvram/mac_nvram.c11
-rw-r--r--hw/nvram/npcm7xx_otp.c6
-rw-r--r--hw/nvram/nrf51_nvm.c7
-rw-r--r--hw/nvram/spapr_nvram.c13
-rw-r--r--hw/nvram/xlnx-bbram.c22
-rw-r--r--hw/nvram/xlnx-efuse.c11
-rw-r--r--hw/nvram/xlnx-versal-efuse-cache.c6
-rw-r--r--hw/nvram/xlnx-versal-efuse-ctrl.c13
-rw-r--r--hw/nvram/xlnx-zynqmp-efuse.c19
-rw-r--r--hw/openrisc/Kconfig4
-rw-r--r--hw/openrisc/boot.c17
-rw-r--r--hw/openrisc/cputimer.c28
-rw-r--r--hw/openrisc/openrisc_sim.c46
-rw-r--r--hw/openrisc/virt.c32
-rw-r--r--hw/pci-bridge/Kconfig5
-rw-r--r--hw/pci-bridge/cxl_downstream.c28
-rw-r--r--hw/pci-bridge/cxl_root_port.c12
-rw-r--r--hw/pci-bridge/cxl_upstream.c19
-rw-r--r--hw/pci-bridge/gen_pcie_root_port.c5
-rw-r--r--hw/pci-bridge/i82801b11.c6
-rw-r--r--hw/pci-bridge/ioh3420.c2
-rw-r--r--hw/pci-bridge/meson.build2
-rw-r--r--hw/pci-bridge/pci_bridge_dev.c13
-rw-r--r--hw/pci-bridge/pci_expander_bridge.c55
-rw-r--r--hw/pci-bridge/pcie_pci_bridge.c12
-rw-r--r--hw/pci-bridge/pcie_root_port.c7
-rw-r--r--hw/pci-bridge/simba.c6
-rw-r--r--hw/pci-bridge/xio3130_downstream.c9
-rw-r--r--hw/pci-bridge/xio3130_upstream.c6
-rw-r--r--hw/pci-host/Kconfig3
-rw-r--r--hw/pci-host/articia.c10
-rw-r--r--hw/pci-host/astro.c68
-rw-r--r--hw/pci-host/bonito.c8
-rw-r--r--hw/pci-host/designware.c125
-rw-r--r--hw/pci-host/dino.c7
-rw-r--r--hw/pci-host/fsl_imx8m_phy.c98
-rw-r--r--hw/pci-host/gpex-acpi.c20
-rw-r--r--hw/pci-host/gpex.c52
-rw-r--r--hw/pci-host/grackle.c9
-rw-r--r--hw/pci-host/gt64120.c118
-rw-r--r--hw/pci-host/i440fx.c9
-rw-r--r--hw/pci-host/meson.build1
-rw-r--r--hw/pci-host/mv64361.c16
-rw-r--r--hw/pci-host/pnv_phb.c14
-rw-r--r--hw/pci-host/pnv_phb3.c11
-rw-r--r--hw/pci-host/pnv_phb3_msi.c4
-rw-r--r--hw/pci-host/pnv_phb3_pbcq.c4
-rw-r--r--hw/pci-host/pnv_phb4.c11
-rw-r--r--hw/pci-host/pnv_phb4_pec.c68
-rw-r--r--hw/pci-host/ppc440_pcix.c4
-rw-r--r--hw/pci-host/ppc4xx_pci.c8
-rw-r--r--hw/pci-host/ppce500.c61
-rw-r--r--hw/pci-host/q35.c24
-rw-r--r--hw/pci-host/raven.c94
-rw-r--r--hw/pci-host/remote.c4
-rw-r--r--hw/pci-host/sabre.c13
-rw-r--r--hw/pci-host/sh_pci.c6
-rw-r--r--hw/pci-host/uninorth.c30
-rw-r--r--hw/pci-host/versatile.c15
-rw-r--r--hw/pci-host/xen_igd_pt.c3
-rw-r--r--hw/pci-host/xilinx-pcie.c11
-rw-r--r--hw/pci/msi.c2
-rw-r--r--hw/pci/msix.c15
-rw-r--r--hw/pci/pci-hmp-cmds.c28
-rw-r--r--hw/pci/pci-stub.c6
-rw-r--r--hw/pci/pci.c530
-rw-r--r--hw/pci/pci_bridge.c12
-rw-r--r--hw/pci/pci_host.c11
-rw-r--r--hw/pci/pcie.c203
-rw-r--r--hw/pci/pcie_port.c26
-rw-r--r--hw/pci/pcie_sriov.c324
-rw-r--r--hw/pci/trace-events6
-rw-r--r--hw/pcmcia/Kconfig2
-rw-r--r--hw/pcmcia/meson.build2
-rw-r--r--hw/pcmcia/pcmcia.c24
-rw-r--r--hw/pcmcia/pxa2xx.c248
-rw-r--r--hw/ppc/Kconfig21
-rw-r--r--hw/ppc/amigaone.c290
-rw-r--r--hw/ppc/e500.c98
-rw-r--r--hw/ppc/e500.h6
-rw-r--r--hw/ppc/e500plat.c10
-rw-r--r--hw/ppc/mac_newworld.c27
-rw-r--r--hw/ppc/mac_oldworld.c24
-rw-r--r--hw/ppc/meson.build4
-rw-r--r--hw/ppc/mpc8544_guts.c34
-rw-r--r--hw/ppc/mpc8544ds.c6
-rw-r--r--hw/ppc/pef.c6
-rw-r--r--hw/ppc/pegasos2.c61
-rw-r--r--hw/ppc/pnv.c651
-rw-r--r--hw/ppc/pnv_adu.c217
-rw-r--r--hw/ppc/pnv_bmc.c30
-rw-r--r--hw/ppc/pnv_chiptod.c22
-rw-r--r--hw/ppc/pnv_core.c160
-rw-r--r--hw/ppc/pnv_homer.c245
-rw-r--r--hw/ppc/pnv_i2c.c9
-rw-r--r--hw/ppc/pnv_lpc.c186
-rw-r--r--hw/ppc/pnv_n1_chiplet.c4
-rw-r--r--hw/ppc/pnv_nest_pervasive.c6
-rw-r--r--hw/ppc/pnv_occ.c672
-rw-r--r--hw/ppc/pnv_pnor.c11
-rw-r--r--hw/ppc/pnv_psi.c23
-rw-r--r--hw/ppc/pnv_sbe.c6
-rw-r--r--hw/ppc/pnv_xscom.c11
-rw-r--r--hw/ppc/ppc.c25
-rw-r--r--hw/ppc/ppc405.h186
-rw-r--r--hw/ppc/ppc405_boards.c520
-rw-r--r--hw/ppc/ppc405_uc.c1217
-rw-r--r--hw/ppc/ppc440_bamboo.c43
-rw-r--r--hw/ppc/ppc440_uc.c7
-rw-r--r--hw/ppc/ppc4xx_devs.c20
-rw-r--r--hw/ppc/ppc4xx_sdram.c16
-rw-r--r--hw/ppc/ppc_booke.c15
-rw-r--r--hw/ppc/ppce500_spin.c36
-rw-r--r--hw/ppc/prep.c33
-rw-r--r--hw/ppc/prep_systemio.c9
-rw-r--r--hw/ppc/rs6000_mc.c11
-rw-r--r--hw/ppc/sam460ex.c65
-rw-r--r--hw/ppc/spapr.c556
-rw-r--r--hw/ppc/spapr_caps.c53
-rw-r--r--hw/ppc/spapr_cpu_core.c57
-rw-r--r--hw/ppc/spapr_drc.c51
-rw-r--r--hw/ppc/spapr_events.c7
-rw-r--r--hw/ppc/spapr_hcall.c41
-rw-r--r--hw/ppc/spapr_iommu.c11
-rw-r--r--hw/ppc/spapr_irq.c2
-rw-r--r--hw/ppc/spapr_nested.c136
-rw-r--r--hw/ppc/spapr_nvdimm.c14
-rw-r--r--hw/ppc/spapr_ovec.c3
-rw-r--r--hw/ppc/spapr_pci.c117
-rw-r--r--hw/ppc/spapr_pci_vfio.c6
-rw-r--r--hw/ppc/spapr_rng.c9
-rw-r--r--hw/ppc/spapr_rtas.c18
-rw-r--r--hw/ppc/spapr_rtc.c6
-rw-r--r--hw/ppc/spapr_tpm_proxy.c11
-rw-r--r--hw/ppc/spapr_vhyp_mmu.c21
-rw-r--r--hw/ppc/spapr_vio.c12
-rw-r--r--hw/ppc/spapr_vof.c4
-rw-r--r--hw/ppc/trace-events4
-rw-r--r--hw/ppc/virtex_ml507.c46
-rw-r--r--hw/ppc/vof.c6
-rw-r--r--hw/remote/iohub.c13
-rw-r--r--hw/remote/iommu.c4
-rw-r--r--hw/remote/machine.c6
-rw-r--r--hw/remote/memory.c2
-rw-r--r--hw/remote/message.c9
-rw-r--r--hw/remote/mpqemu-link.c4
-rw-r--r--hw/remote/proxy-memory-listener.c4
-rw-r--r--hw/remote/proxy.c11
-rw-r--r--hw/remote/remote-obj.c6
-rw-r--r--hw/remote/vfio-user-obj.c12
-rw-r--r--hw/riscv/Kconfig14
-rw-r--r--hw/riscv/boot.c159
-rw-r--r--hw/riscv/meson.build3
-rw-r--r--hw/riscv/microblaze-v-generic.c189
-rw-r--r--hw/riscv/microchip_pfsoc.c174
-rw-r--r--hw/riscv/numa.c2
-rw-r--r--hw/riscv/opentitan.c17
-rw-r--r--hw/riscv/riscv-iommu-bits.h468
-rw-r--r--hw/riscv/riscv-iommu-hpm.c381
-rw-r--r--hw/riscv/riscv-iommu-hpm.h33
-rw-r--r--hw/riscv/riscv-iommu-pci.c217
-rw-r--r--hw/riscv/riscv-iommu-sys.c248
-rw-r--r--hw/riscv/riscv-iommu.c2679
-rw-r--r--hw/riscv/riscv-iommu.h157
-rw-r--r--hw/riscv/riscv_hart.c105
-rw-r--r--hw/riscv/shakti_c.c21
-rw-r--r--hw/riscv/sifive_e.c11
-rw-r--r--hw/riscv/sifive_u.c40
-rw-r--r--hw/riscv/spike.c25
-rw-r--r--hw/riscv/trace-events26
-rw-r--r--hw/riscv/trace.h1
-rw-r--r--hw/riscv/virt-acpi-build.c278
-rw-r--r--hw/riscv/virt.c549
-rw-r--r--hw/rtc/Kconfig9
-rw-r--r--hw/rtc/allwinner-rtc.c17
-rw-r--r--hw/rtc/aspeed_rtc.c6
-rw-r--r--hw/rtc/ds1338.c32
-rw-r--r--hw/rtc/exynos4210_rtc.c6
-rw-r--r--hw/rtc/goldfish_rtc.c67
-rw-r--r--hw/rtc/ls7a_rtc.c10
-rw-r--r--hw/rtc/m41t80.c4
-rw-r--r--hw/rtc/m48t59-isa.c15
-rw-r--r--hw/rtc/m48t59.c24
-rw-r--r--hw/rtc/mc146818rtc.c35
-rw-r--r--hw/rtc/meson.build2
-rw-r--r--hw/rtc/pl031.c9
-rw-r--r--hw/rtc/rs5c372.c236
-rw-r--r--hw/rtc/sun4v-rtc.c2
-rw-r--r--hw/rtc/trace-events8
-rw-r--r--hw/rtc/twl92230.c882
-rw-r--r--hw/rtc/xlnx-zynqmp-rtc.c8
-rw-r--r--hw/rx/rx-gdbsim.c19
-rw-r--r--hw/rx/rx62n.c13
-rw-r--r--hw/s390x/3270-ccw.c7
-rw-r--r--hw/s390x/Kconfig2
-rw-r--r--hw/s390x/ap-bridge.c6
-rw-r--r--hw/s390x/ap-device.c2
-rw-r--r--hw/s390x/ap-stub.c21
-rw-r--r--hw/s390x/ccw-device.c56
-rw-r--r--hw/s390x/ccw-device.h7
-rw-r--r--hw/s390x/cpu-topology.c10
-rw-r--r--hw/s390x/css-bridge.c23
-rw-r--r--hw/s390x/css.c41
-rw-r--r--hw/s390x/event-facility.c45
-rw-r--r--hw/s390x/ipl.c313
-rw-r--r--hw/s390x/ipl.h134
-rw-r--r--hw/s390x/meson.build12
-rw-r--r--hw/s390x/s390-ccw.c4
-rw-r--r--hw/s390x/s390-hypercall.c85
-rw-r--r--hw/s390x/s390-hypercall.h25
-rw-r--r--hw/s390x/s390-pci-bus.c110
-rw-r--r--hw/s390x/s390-pci-inst.c184
-rw-r--r--hw/s390x/s390-pci-vfio.c33
-rw-r--r--hw/s390x/s390-skeys-kvm.c4
-rw-r--r--hw/s390x/s390-skeys.c78
-rw-r--r--hw/s390x/s390-stattrib-kvm.c75
-rw-r--r--hw/s390x/s390-stattrib.c16
-rw-r--r--hw/s390x/s390-virtio-ccw.c613
-rw-r--r--hw/s390x/s390-virtio-hcall.c41
-rw-r--r--hw/s390x/s390-virtio-hcall.h25
-rw-r--r--hw/s390x/sclp.c28
-rw-r--r--hw/s390x/sclpcpi.c212
-rw-r--r--hw/s390x/sclpcpu.c4
-rw-r--r--hw/s390x/sclpquiesce.c6
-rw-r--r--hw/s390x/tod-kvm.c4
-rw-r--r--hw/s390x/tod-tcg.c4
-rw-r--r--hw/s390x/tod.c8
-rw-r--r--hw/s390x/vhost-scsi-ccw.c5
-rw-r--r--hw/s390x/vhost-user-fs-ccw.c5
-rw-r--r--hw/s390x/vhost-vsock-ccw.c5
-rw-r--r--hw/s390x/virtio-ccw-9p.c5
-rw-r--r--hw/s390x/virtio-ccw-balloon.c5
-rw-r--r--hw/s390x/virtio-ccw-blk.c6
-rw-r--r--hw/s390x/virtio-ccw-crypto.c5
-rw-r--r--hw/s390x/virtio-ccw-gpu.c5
-rw-r--r--hw/s390x/virtio-ccw-input.c5
-rw-r--r--hw/s390x/virtio-ccw-md-stubs.c24
-rw-r--r--hw/s390x/virtio-ccw-md.c153
-rw-r--r--hw/s390x/virtio-ccw-md.h44
-rw-r--r--hw/s390x/virtio-ccw-mem.c225
-rw-r--r--hw/s390x/virtio-ccw-mem.h34
-rw-r--r--hw/s390x/virtio-ccw-net.c6
-rw-r--r--hw/s390x/virtio-ccw-rng.c5
-rw-r--r--hw/s390x/virtio-ccw-scsi.c5
-rw-r--r--hw/s390x/virtio-ccw-serial.c5
-rw-r--r--hw/s390x/virtio-ccw.c28
-rw-r--r--hw/s390x/virtio-ccw.h2
-rw-r--r--hw/scsi/esp-pci.c8
-rw-r--r--hw/scsi/esp.c49
-rw-r--r--hw/scsi/lsi53c895a.c14
-rw-r--r--hw/scsi/megasas.c55
-rw-r--r--hw/scsi/mptendian.c2
-rw-r--r--hw/scsi/mptsas.c13
-rw-r--r--hw/scsi/scsi-bus.c142
-rw-r--r--hw/scsi/scsi-disk.c245
-rw-r--r--hw/scsi/scsi-generic.c9
-rw-r--r--hw/scsi/spapr_vscsi.c5
-rw-r--r--hw/scsi/vhost-scsi-common.c13
-rw-r--r--hw/scsi/vhost-scsi.c21
-rw-r--r--hw/scsi/vhost-user-scsi.c28
-rw-r--r--hw/scsi/virtio-scsi-dataplane.c105
-rw-r--r--hw/scsi/virtio-scsi.c519
-rw-r--r--hw/scsi/vmw_pvscsi.c76
-rw-r--r--hw/scsi/vmw_pvscsi.h4
-rw-r--r--hw/sd/allwinner-sdhost.c26
-rw-r--r--hw/sd/aspeed_sdhci.c110
-rw-r--r--hw/sd/bcm2835_sdhost.c6
-rw-r--r--hw/sd/cadence_sdhci.c4
-rw-r--r--hw/sd/meson.build1
-rw-r--r--hw/sd/npcm7xx_sdhci.c4
-rw-r--r--hw/sd/omap_mmc.c309
-rw-r--r--hw/sd/pl181.c8
-rw-r--r--hw/sd/pxa2xx_mmci.c594
-rw-r--r--hw/sd/sd.c150
-rw-r--r--hw/sd/sdhci-internal.h2
-rw-r--r--hw/sd/sdhci-pci.c9
-rw-r--r--hw/sd/sdhci.c199
-rw-r--r--hw/sd/ssi-sd.c6
-rw-r--r--hw/sd/trace-events4
-rw-r--r--hw/sensor/adm1266.c2
-rw-r--r--hw/sensor/adm1272.c2
-rw-r--r--hw/sensor/dps310.c4
-rw-r--r--hw/sensor/emc141x.c8
-rw-r--r--hw/sensor/isl_pmbus_vr.c10
-rw-r--r--hw/sensor/lsm303dlhc_mag.c4
-rw-r--r--hw/sensor/max31785.c2
-rw-r--r--hw/sensor/max34451.c2
-rw-r--r--hw/sensor/tmp105.c75
-rw-r--r--hw/sensor/tmp421.c8
-rw-r--r--hw/sensor/trace-events6
-rw-r--r--hw/sensor/trace.h1
-rw-r--r--hw/sh4/Kconfig7
-rw-r--r--hw/sh4/meson.build1
-rw-r--r--hw/sh4/r2d.c45
-rw-r--r--hw/sh4/sh7750.c62
-rw-r--r--hw/sh4/shix.c86
-rw-r--r--hw/smbios/smbios.c16
-rw-r--r--hw/smbios/smbios_legacy.c2
-rw-r--r--hw/smbios/smbios_type_38.c7
-rw-r--r--hw/sparc/leon3.c8
-rw-r--r--hw/sparc/sun4m.c58
-rw-r--r--hw/sparc/sun4m_iommu.c14
-rw-r--r--hw/sparc64/Kconfig1
-rw-r--r--hw/sparc64/niagara.c10
-rw-r--r--hw/sparc64/sparc64.c2
-rw-r--r--hw/sparc64/sun4u.c52
-rw-r--r--hw/sparc64/sun4u_iommu.c9
-rw-r--r--hw/ssi/Kconfig8
-rw-r--r--hw/ssi/allwinner-a10-spi.c561
-rw-r--r--hw/ssi/aspeed_smc.c91
-rw-r--r--hw/ssi/bcm2835_spi.c4
-rw-r--r--hw/ssi/ibex_spi_host.c8
-rw-r--r--hw/ssi/imx_spi.c4
-rw-r--r--hw/ssi/meson.build3
-rw-r--r--hw/ssi/mss-spi.c4
-rw-r--r--hw/ssi/npcm7xx_fiu.c21
-rw-r--r--hw/ssi/npcm_pspi.c2
-rw-r--r--hw/ssi/omap_spi.c380
-rw-r--r--hw/ssi/pl022.c4
-rw-r--r--hw/ssi/pnv_spi.c1231
-rw-r--r--hw/ssi/sifive_spi.c7
-rw-r--r--hw/ssi/ssi.c7
-rw-r--r--hw/ssi/stm32f2xx_spi.c4
-rw-r--r--hw/ssi/trace-events31
-rw-r--r--hw/ssi/xilinx_spi.c39
-rw-r--r--hw/ssi/xilinx_spips.c22
-rw-r--r--hw/ssi/xlnx-versal-ospi.c7
-rw-r--r--hw/timer/Kconfig9
-rw-r--r--hw/timer/a9gtimer.c9
-rw-r--r--hw/timer/allwinner-a10-pit.c9
-rw-r--r--hw/timer/arm_mptimer.c7
-rw-r--r--hw/timer/arm_timer.c5
-rw-r--r--hw/timer/armv7m_systick.c4
-rw-r--r--hw/timer/aspeed_timer.c293
-rw-r--r--hw/timer/avr_timer16.c7
-rw-r--r--hw/timer/bcm2835_systmr.c4
-rw-r--r--hw/timer/cadence_ttc.c2
-rw-r--r--hw/timer/cmsdk-apb-dualtimer.c4
-rw-r--r--hw/timer/cmsdk-apb-timer.c4
-rw-r--r--hw/timer/digic-timer.c4
-rw-r--r--hw/timer/etraxfs_timer.c407
-rw-r--r--hw/timer/exynos4210_mct.c6
-rw-r--r--hw/timer/exynos4210_pwm.c4
-rw-r--r--hw/timer/grlib_gptimer.c7
-rw-r--r--hw/timer/hpet.c513
-rw-r--r--hw/timer/i8254.c4
-rw-r--r--hw/timer/i8254_common.c5
-rw-r--r--hw/timer/ibex_timer.c7
-rw-r--r--hw/timer/imx_epit.c4
-rw-r--r--hw/timer/imx_gpt.c51
-rw-r--r--hw/timer/meson.build7
-rw-r--r--hw/timer/mss-timer.c5
-rw-r--r--hw/timer/npcm7xx_timer.c2
-rw-r--r--hw/timer/nrf51_timer.c7
-rw-r--r--hw/timer/omap_gptimer.c512
-rw-r--r--hw/timer/omap_synctimer.c110
-rw-r--r--hw/timer/pxa2xx_timer.c116
-rw-r--r--hw/timer/renesas_cmt.c7
-rw-r--r--hw/timer/renesas_tmr.c7
-rw-r--r--hw/timer/sh_timer.c2
-rw-r--r--hw/timer/sifive_pwm.c7
-rw-r--r--hw/timer/slavio_timer.c7
-rw-r--r--hw/timer/sse-counter.c4
-rw-r--r--hw/timer/sse-timer.c7
-rw-r--r--hw/timer/stellaris-gptm.c2
-rw-r--r--hw/timer/stm32f2xx_timer.c7
-rw-r--r--hw/timer/trace-events12
-rw-r--r--hw/timer/xilinx_timer.c46
-rw-r--r--hw/tpm/tpm_crb.c17
-rw-r--r--hw/tpm/tpm_ppi.c2
-rw-r--r--hw/tpm/tpm_ppi.h2
-rw-r--r--hw/tpm/tpm_prop.h2
-rw-r--r--hw/tpm/tpm_spapr.c12
-rw-r--r--hw/tpm/tpm_tis.h2
-rw-r--r--hw/tpm/tpm_tis_common.c4
-rw-r--r--hw/tpm/tpm_tis_i2c.c13
-rw-r--r--hw/tpm/tpm_tis_isa.c9
-rw-r--r--hw/tpm/tpm_tis_sysbus.c12
-rw-r--r--hw/tricore/tc27x_soc.c9
-rw-r--r--hw/tricore/triboard.c10
-rw-r--r--hw/tricore/tricore_testboard.c2
-rw-r--r--hw/tricore/tricore_testdevice.c11
-rw-r--r--hw/uefi/Kconfig3
-rw-r--r--hw/uefi/LIMITATIONS.md7
-rw-r--r--hw/uefi/hardware-info.c31
-rw-r--r--hw/uefi/meson.build21
-rw-r--r--hw/uefi/trace-events17
-rw-r--r--hw/uefi/var-service-auth.c361
-rw-r--r--hw/uefi/var-service-core.c322
-rw-r--r--hw/uefi/var-service-guid.c99
-rw-r--r--hw/uefi/var-service-json.c257
-rw-r--r--hw/uefi/var-service-pkcs7-stub.c16
-rw-r--r--hw/uefi/var-service-pkcs7.c436
-rw-r--r--hw/uefi/var-service-policy.c370
-rw-r--r--hw/uefi/var-service-siglist.c212
-rw-r--r--hw/uefi/var-service-sysbus.c124
-rw-r--r--hw/uefi/var-service-utils.c241
-rw-r--r--hw/uefi/var-service-vars.c725
-rw-r--r--hw/ufs/lu.c9
-rw-r--r--hw/ufs/ufs.c126
-rw-r--r--hw/ufs/ufs.h3
-rw-r--r--hw/usb/Kconfig12
-rw-r--r--hw/usb/bus-stub.c2
-rw-r--r--hw/usb/bus.c16
-rw-r--r--hw/usb/canokey.c11
-rw-r--r--hw/usb/canokey.h4
-rw-r--r--hw/usb/ccid-card-emulated.c5
-rw-r--r--hw/usb/ccid-card-passthru.c5
-rw-r--r--hw/usb/chipidea.c2
-rw-r--r--hw/usb/dev-audio.c5
-rw-r--r--hw/usb/dev-hid.c17
-rw-r--r--hw/usb/dev-hub.c6
-rw-r--r--hw/usb/dev-mtp.c7
-rw-r--r--hw/usb/dev-network.c7
-rw-r--r--hw/usb/dev-serial.c16
-rw-r--r--hw/usb/dev-smartcard-reader.c13
-rw-r--r--hw/usb/dev-storage-bot.c2
-rw-r--r--hw/usb/dev-storage-classic.c9
-rw-r--r--hw/usb/dev-storage.c19
-rw-r--r--hw/usb/dev-uas.c6
-rw-r--r--hw/usb/dev-wacom.c2
-rw-r--r--hw/usb/hcd-dwc2.c5
-rw-r--r--hw/usb/hcd-dwc2.h2
-rw-r--r--hw/usb/hcd-dwc3.c12
-rw-r--r--hw/usb/hcd-ehci-pci.c15
-rw-r--r--hw/usb/hcd-ehci-sysbus.c139
-rw-r--r--hw/usb/hcd-ehci.c5
-rw-r--r--hw/usb/hcd-ehci.h2
-rw-r--r--hw/usb/hcd-musb.c1553
-rw-r--r--hw/usb/hcd-ohci-pci.c9
-rw-r--r--hw/usb/hcd-ohci-sysbus.c7
-rw-r--r--hw/usb/hcd-ohci.c2
-rw-r--r--hw/usb/hcd-ohci.h2
-rw-r--r--hw/usb/hcd-uhci.c142
-rw-r--r--hw/usb/hcd-uhci.h4
-rw-r--r--hw/usb/hcd-xhci-nec.c12
-rw-r--r--hw/usb/hcd-xhci-pci.c41
-rw-r--r--hw/usb/hcd-xhci-pci.h1
-rw-r--r--hw/usb/hcd-xhci-sysbus.c7
-rw-r--r--hw/usb/hcd-xhci.c50
-rw-r--r--hw/usb/hcd-xhci.h7
-rw-r--r--hw/usb/host-libusb.c9
-rw-r--r--hw/usb/imx-usb-phy.c4
-rw-r--r--hw/usb/libhw.c2
-rw-r--r--hw/usb/meson.build4
-rw-r--r--hw/usb/redirect.c9
-rw-r--r--hw/usb/tusb6010.c850
-rw-r--r--hw/usb/u2f-emulated.c5
-rw-r--r--hw/usb/u2f-passthru.c5
-rw-r--r--hw/usb/u2f.c2
-rw-r--r--hw/usb/xen-usb.c10
-rw-r--r--hw/usb/xlnx-usb-subsystem.c2
-rw-r--r--hw/usb/xlnx-versal-usb2-ctrl-regs.c2
-rw-r--r--hw/vfio-user/Kconfig7
-rw-r--r--hw/vfio-user/container.c370
-rw-r--r--hw/vfio-user/container.h23
-rw-r--r--hw/vfio-user/device.c441
-rw-r--r--hw/vfio-user/device.h24
-rw-r--r--hw/vfio-user/meson.build11
-rw-r--r--hw/vfio-user/pci.c475
-rw-r--r--hw/vfio-user/protocol.h242
-rw-r--r--hw/vfio-user/proxy.c1356
-rw-r--r--hw/vfio-user/proxy.h135
-rw-r--r--hw/vfio-user/trace-events20
-rw-r--r--hw/vfio-user/trace.h4
-rw-r--r--hw/vfio/Kconfig2
-rw-r--r--hw/vfio/amd-xgbe.c6
-rw-r--r--hw/vfio/ap.c142
-rw-r--r--hw/vfio/calxeda-xgmac.c6
-rw-r--r--hw/vfio/ccw.c97
-rw-r--r--hw/vfio/common.c1569
-rw-r--r--hw/vfio/container-base.c210
-rw-r--r--hw/vfio/container.c430
-rw-r--r--hw/vfio/cpr-legacy.c287
-rw-r--r--hw/vfio/cpr.c41
-rw-r--r--hw/vfio/device.c576
-rw-r--r--hw/vfio/display.c18
-rw-r--r--hw/vfio/helpers.c639
-rw-r--r--hw/vfio/igd.c751
-rw-r--r--hw/vfio/iommufd.c310
-rw-r--r--hw/vfio/listener.c1253
-rw-r--r--hw/vfio/meson.build31
-rw-r--r--hw/vfio/migration-multifd.c685
-rw-r--r--hw/vfio/migration-multifd.h34
-rw-r--r--hw/vfio/migration.c249
-rw-r--r--hw/vfio/pci-quirks.c117
-rw-r--r--hw/vfio/pci-quirks.h72
-rw-r--r--hw/vfio/pci.c1024
-rw-r--r--hw/vfio/pci.h51
-rw-r--r--hw/vfio/platform.c64
-rw-r--r--hw/vfio/region.c403
-rw-r--r--hw/vfio/spapr.c91
-rw-r--r--hw/vfio/trace-events74
-rw-r--r--hw/vfio/trace.h3
-rw-r--r--hw/vfio/vfio-display.h42
-rw-r--r--hw/vfio/vfio-helpers.h35
-rw-r--r--hw/vfio/vfio-iommufd.h34
-rw-r--r--hw/vfio/vfio-listener.h15
-rw-r--r--hw/vfio/vfio-migration-internal.h74
-rw-r--r--hw/virtio/Kconfig18
-rw-r--r--hw/virtio/cbor-helpers.c321
-rw-r--r--hw/virtio/iothread-vq-mapping.c131
-rw-r--r--hw/virtio/meson.build6
-rw-r--r--hw/virtio/trace-events3
-rw-r--r--hw/virtio/vdpa-dev-pci.c8
-rw-r--r--hw/virtio/vdpa-dev.c14
-rw-r--r--hw/virtio/vhost-iova-tree.c115
-rw-r--r--hw/virtio/vhost-iova-tree.h10
-rw-r--r--hw/virtio/vhost-scsi-pci.c5
-rw-r--r--hw/virtio/vhost-shadow-virtqueue.c79
-rw-r--r--hw/virtio/vhost-shadow-virtqueue.h5
-rw-r--r--hw/virtio/vhost-user-base.c25
-rw-r--r--hw/virtio/vhost-user-blk-pci.c5
-rw-r--r--hw/virtio/vhost-user-device-pci.c3
-rw-r--r--hw/virtio/vhost-user-device.c5
-rw-r--r--hw/virtio/vhost-user-fs-pci.c5
-rw-r--r--hw/virtio/vhost-user-fs.c32
-rw-r--r--hw/virtio/vhost-user-gpio-pci.c2
-rw-r--r--hw/virtio/vhost-user-gpio.c5
-rw-r--r--hw/virtio/vhost-user-i2c-pci.c2
-rw-r--r--hw/virtio/vhost-user-i2c.c5
-rw-r--r--hw/virtio/vhost-user-input.c5
-rw-r--r--hw/virtio/vhost-user-rng-pci.c5
-rw-r--r--hw/virtio/vhost-user-rng.c5
-rw-r--r--hw/virtio/vhost-user-scmi-pci.c2
-rw-r--r--hw/virtio/vhost-user-scmi.c34
-rw-r--r--hw/virtio/vhost-user-scsi-pci.c7
-rw-r--r--hw/virtio/vhost-user-snd-pci.c7
-rw-r--r--hw/virtio/vhost-user-snd.c23
-rw-r--r--hw/virtio/vhost-user-vsock-pci.c6
-rw-r--r--hw/virtio/vhost-user-vsock.c22
-rw-r--r--hw/virtio/vhost-user.c69
-rw-r--r--hw/virtio/vhost-vdpa.c160
-rw-r--r--hw/virtio/vhost-vsock-common.c17
-rw-r--r--hw/virtio/vhost-vsock-pci.c5
-rw-r--r--hw/virtio/vhost-vsock.c16
-rw-r--r--hw/virtio/vhost.c101
-rw-r--r--hw/virtio/virtio-9p-pci.c5
-rw-r--r--hw/virtio/virtio-acpi.c2
-rw-r--r--hw/virtio/virtio-balloon-pci.c14
-rw-r--r--hw/virtio/virtio-balloon.c58
-rw-r--r--hw/virtio/virtio-blk-pci.c5
-rw-r--r--hw/virtio/virtio-bus.c4
-rw-r--r--hw/virtio/virtio-crypto-pci.c5
-rw-r--r--hw/virtio/virtio-crypto.c54
-rw-r--r--hw/virtio/virtio-hmp-cmds.c2
-rw-r--r--hw/virtio/virtio-input-pci.c10
-rw-r--r--hw/virtio/virtio-iommu-pci.c5
-rw-r--r--hw/virtio/virtio-iommu.c118
-rw-r--r--hw/virtio/virtio-md-pci.c2
-rw-r--r--hw/virtio/virtio-mem-pci.c14
-rw-r--r--hw/virtio/virtio-mem.c203
-rw-r--r--hw/virtio/virtio-mmio.c13
-rw-r--r--hw/virtio/virtio-net-pci.c6
-rw-r--r--hw/virtio/virtio-nsm-pci.c73
-rw-r--r--hw/virtio/virtio-nsm.c1737
-rw-r--r--hw/virtio/virtio-pci.c119
-rw-r--r--hw/virtio/virtio-pmem-pci.c2
-rw-r--r--hw/virtio/virtio-pmem.c7
-rw-r--r--hw/virtio/virtio-qmp.c13
-rw-r--r--hw/virtio/virtio-rng-pci.c5
-rw-r--r--hw/virtio/virtio-rng.c19
-rw-r--r--hw/virtio/virtio-scsi-pci.c5
-rw-r--r--hw/virtio/virtio-serial-pci.c5
-rw-r--r--hw/virtio/virtio.c342
-rw-r--r--hw/vmapple/Kconfig34
-rw-r--r--hw/vmapple/aes.c581
-rw-r--r--hw/vmapple/bdif.c274
-rw-r--r--hw/vmapple/cfg.c195
-rw-r--r--hw/vmapple/meson.build7
-rw-r--r--hw/vmapple/trace-events21
-rw-r--r--hw/vmapple/trace.h2
-rw-r--r--hw/vmapple/virtio-blk.c205
-rw-r--r--hw/vmapple/vmapple.c618
-rw-r--r--hw/watchdog/allwinner-wdt.c10
-rw-r--r--hw/watchdog/cmsdk-apb-watchdog.c44
-rw-r--r--hw/watchdog/sbsa_gwdt.c12
-rw-r--r--hw/watchdog/spapr_watchdog.c2
-rw-r--r--hw/watchdog/watchdog.c6
-rw-r--r--hw/watchdog/wdt_aspeed.c39
-rw-r--r--hw/watchdog/wdt_diag288.c8
-rw-r--r--hw/watchdog/wdt_i6300esb.c8
-rw-r--r--hw/watchdog/wdt_ib700.c6
-rw-r--r--hw/watchdog/wdt_imx2.c10
-rw-r--r--hw/xen/meson.build4
-rw-r--r--hw/xen/trace-events6
-rw-r--r--hw/xen/xen-bus-helper.c37
-rw-r--r--hw/xen/xen-bus.c35
-rw-r--r--hw/xen/xen-hvm-common.c114
-rw-r--r--hw/xen/xen-legacy-backend.c50
-rw-r--r--hw/xen/xen-mapcache.c37
-rw-r--r--hw/xen/xen-pvh-common.c399
-rw-r--r--hw/xen/xen_devconfig.c12
-rw-r--r--hw/xen/xen_pt.c67
-rw-r--r--hw/xen/xen_pt_graphics.c4
-rw-r--r--hw/xen/xen_pvdev.c8
-rw-r--r--hw/xen/xen_stubs.c51
-rw-r--r--hw/xenpv/xen_machine_pv.c4
-rw-r--r--hw/xtensa/Kconfig2
-rw-r--r--hw/xtensa/bootparam.h1
-rw-r--r--hw/xtensa/pic_cpu.c1
-rw-r--r--hw/xtensa/sim.c9
-rw-r--r--hw/xtensa/virt.c6
-rw-r--r--hw/xtensa/xtensa_memory.c2
-rw-r--r--hw/xtensa/xtfpga.c44
-rw-r--r--include/accel/accel-cpu-target.h31
-rw-r--r--include/accel/accel-cpu.h23
-rw-r--r--include/accel/tcg/cpu-ldst-common.h122
-rw-r--r--include/accel/tcg/cpu-ldst.h505
-rw-r--r--include/accel/tcg/cpu-mmu-index.h42
-rw-r--r--include/accel/tcg/cpu-ops.h333
-rw-r--r--include/accel/tcg/getpc.h20
-rw-r--r--include/accel/tcg/helper-retaddr.h43
-rw-r--r--include/accel/tcg/iommu.h41
-rw-r--r--include/accel/tcg/probe.h122
-rw-r--r--include/accel/tcg/tb-cpu-state.h18
-rw-r--r--include/block/aio.h14
-rw-r--r--include/block/aio_task.h2
-rw-r--r--include/block/block-common.h13
-rw-r--r--include/block/block-copy.h1
-rw-r--r--include/block/block-global-state.h25
-rw-r--r--include/block/block-io.h4
-rw-r--r--include/block/block_int-common.h63
-rw-r--r--include/block/block_int-global-state.h6
-rw-r--r--include/block/block_int-io.h4
-rw-r--r--include/block/blockjob.h2
-rw-r--r--include/block/export.h3
-rw-r--r--include/block/graph-lock.h23
-rw-r--r--include/block/nbd.h24
-rw-r--r--include/block/nvme.h143
-rw-r--r--include/block/qdict.h2
-rw-r--r--include/block/raw-aio.h19
-rw-r--r--include/block/thread-pool.h62
-rw-r--r--include/block/ufs.h19
-rw-r--r--include/chardev/char-fe.h5
-rw-r--r--include/chardev/char-socket.h2
-rw-r--r--include/chardev/char.h1
-rw-r--r--include/crypto/afsplit.h8
-rw-r--r--include/crypto/block.h2
-rw-r--r--include/crypto/cipher.h18
-rw-r--r--include/crypto/hash.h189
-rw-r--r--include/crypto/hmac.h40
-rw-r--r--include/crypto/ivgen.h30
-rw-r--r--include/crypto/pbkdf.h14
-rw-r--r--include/crypto/tlssession.h79
-rw-r--r--include/crypto/x509-utils.h22
-rw-r--r--include/disas/capstone.h1
-rw-r--r--include/disas/dis-asm.h6
-rw-r--r--include/exec/address-spaces.h39
-rw-r--r--include/exec/confidential-guest-support.h99
-rw-r--r--include/exec/cpu-all.h375
-rw-r--r--include/exec/cpu-common.h73
-rw-r--r--include/exec/cpu-defs.h44
-rw-r--r--include/exec/cpu-interrupt.h70
-rw-r--r--include/exec/cpu_ldst.h382
-rw-r--r--include/exec/cputlb.h263
-rw-r--r--include/exec/exec-all.h599
-rw-r--r--include/exec/gdbstub.h14
-rw-r--r--include/exec/helper-head.h.inc14
-rw-r--r--include/exec/helper-proto-common.h2
-rw-r--r--include/exec/icount.h76
-rw-r--r--include/exec/ioport.h77
-rw-r--r--include/exec/memattrs.h29
-rw-r--r--include/exec/memop.h59
-rw-r--r--include/exec/memory-internal.h49
-rw-r--r--include/exec/memory.h3177
-rw-r--r--include/exec/memory_ldst.h.inc4
-rw-r--r--include/exec/memory_ldst_phys.h.inc5
-rw-r--r--include/exec/mmap-lock.h33
-rw-r--r--include/exec/page-vary.h9
-rw-r--r--include/exec/poison.h30
-rw-r--r--include/exec/ram_addr.h552
-rw-r--r--include/exec/ramblock.h94
-rw-r--r--include/exec/ramlist.h1
-rw-r--r--include/exec/target_page.h52
-rw-r--r--include/exec/tlb-common.h10
-rw-r--r--include/exec/tlb-flags.h86
-rw-r--r--include/exec/translate-all.h33
-rw-r--r--include/exec/translation-block.h59
-rw-r--r--include/exec/translator.h81
-rw-r--r--include/exec/tswap.h87
-rw-r--r--include/exec/vaddr.h16
-rw-r--r--include/exec/watchpoint.h41
-rw-r--r--include/fpu/softfloat-helpers.h89
-rw-r--r--include/fpu/softfloat-types.h225
-rw-r--r--include/fpu/softfloat.h107
-rw-r--r--include/gdbstub/commands.h21
-rw-r--r--include/gdbstub/helpers.h4
-rw-r--r--include/gdbstub/syscalls.h2
-rw-r--r--include/gdbstub/user.h2
-rw-r--r--include/glib-compat.h7
-rw-r--r--include/hw/acpi/acpi-defs.h7
-rw-r--r--include/hw/acpi/acpi.h5
-rw-r--r--include/hw/acpi/acpi_generic_initiator.h47
-rw-r--r--include/hw/acpi/aml-build.h9
-rw-r--r--include/hw/acpi/cpu.h7
-rw-r--r--include/hw/acpi/generic_event_device.h12
-rw-r--r--include/hw/acpi/ghes.h14
-rw-r--r--include/hw/acpi/ich9.h6
-rw-r--r--include/hw/acpi/ich9_tco.h2
-rw-r--r--include/hw/acpi/ich9_timer.h23
-rw-r--r--include/hw/acpi/pci.h3
-rw-r--r--include/hw/acpi/pcihp.h2
-rw-r--r--include/hw/acpi/tpm.h2
-rw-r--r--include/hw/acpi/vmclock.h34
-rw-r--r--include/hw/adc/aspeed_adc.h1
-rw-r--r--include/hw/adc/max111x.h56
-rw-r--r--include/hw/arm/allwinner-a10.h4
-rw-r--r--include/hw/arm/allwinner-h3.h2
-rw-r--r--include/hw/arm/allwinner-r40.h2
-rw-r--r--include/hw/arm/aspeed.h2
-rw-r--r--include/hw/arm/aspeed_soc.h47
-rw-r--r--include/hw/arm/boot.h7
-rw-r--r--include/hw/arm/bsa.h2
-rw-r--r--include/hw/arm/fsl-imx25.h2
-rw-r--r--include/hw/arm/fsl-imx31.h2
-rw-r--r--include/hw/arm/fsl-imx6.h6
-rw-r--r--include/hw/arm/fsl-imx6ul.h2
-rw-r--r--include/hw/arm/fsl-imx7.h4
-rw-r--r--include/hw/arm/fsl-imx8mp.h284
-rw-r--r--include/hw/arm/npcm7xx.h8
-rw-r--r--include/hw/arm/npcm8xx.h132
-rw-r--r--include/hw/arm/nrf51_soc.h2
-rw-r--r--include/hw/arm/omap.h905
-rw-r--r--include/hw/arm/pxa.h197
-rw-r--r--include/hw/arm/sharpsl.h2
-rw-r--r--include/hw/arm/smmu-common.h7
-rw-r--r--include/hw/arm/soc_dma.h4
-rw-r--r--include/hw/arm/stm32f405_soc.h2
-rw-r--r--include/hw/arm/stm32l4x5_soc.h2
-rw-r--r--include/hw/arm/virt.h12
-rw-r--r--include/hw/arm/xlnx-versal.h1
-rw-r--r--include/hw/arm/xlnx-zynqmp.h1
-rw-r--r--include/hw/block/flash.h32
-rw-r--r--include/hw/boards.h130
-rw-r--r--include/hw/char/escc.h3
-rw-r--r--include/hw/char/imx_serial.h2
-rw-r--r--include/hw/char/mchp_pfsoc_mmuart.h2
-rw-r--r--include/hw/char/parallel-isa.h2
-rw-r--r--include/hw/char/parallel.h2
-rw-r--r--include/hw/char/pl011.h6
-rw-r--r--include/hw/char/riscv_htif.h2
-rw-r--r--include/hw/char/serial-isa.h38
-rw-r--r--include/hw/char/serial-mm.h52
-rw-r--r--include/hw/char/serial.h32
-rw-r--r--include/hw/char/sifive_uart.h16
-rw-r--r--include/hw/clock.h8
-rw-r--r--include/hw/core/accel-cpu.h38
-rw-r--r--include/hw/core/cpu.h133
-rw-r--r--include/hw/core/resetcontainer.h2
-rw-r--r--include/hw/core/sysemu-cpu-ops.h10
-rw-r--r--include/hw/core/tcg-cpu-ops.h239
-rw-r--r--include/hw/cris/etraxfs.h54
-rw-r--r--include/hw/cris/etraxfs_dma.h36
-rw-r--r--include/hw/cxl/cxl_device.h127
-rw-r--r--include/hw/cxl/cxl_mailbox.h19
-rw-r--r--include/hw/display/blizzard.h21
-rw-r--r--include/hw/display/macfb.h2
-rw-r--r--include/hw/display/tc6393xb.h21
-rw-r--r--include/hw/dma/i8257.h2
-rw-r--r--include/hw/dma/xlnx-zdma.h2
-rw-r--r--include/hw/dma/xlnx_dpdma.h3
-rw-r--r--include/hw/fsi/aspeed_apb2opb.h2
-rw-r--r--include/hw/fsi/cfam.h2
-rw-r--r--include/hw/fsi/fsi-master.h2
-rw-r--r--include/hw/fsi/fsi.h2
-rw-r--r--include/hw/fsi/lbus.h2
-rw-r--r--include/hw/gpio/aspeed_gpio.h6
-rw-r--r--include/hw/gpio/npcm7xx_gpio.h2
-rw-r--r--include/hw/hw.h4
-rw-r--r--include/hw/hyperv/hyperv-proto.h12
-rw-r--r--include/hw/hyperv/hyperv.h3
-rw-r--r--include/hw/hyperv/vmbus.h4
-rw-r--r--include/hw/i2c/aspeed_i2c.h37
-rw-r--r--include/hw/i2c/npcm7xx_smbus.h2
-rw-r--r--include/hw/i2c/pm_smbus.h2
-rw-r--r--include/hw/i386/apic_internal.h2
-rw-r--r--include/hw/i386/hostmem-epc.h2
-rw-r--r--include/hw/i386/intel_iommu.h11
-rw-r--r--include/hw/i386/microvm.h2
-rw-r--r--include/hw/i386/nitro_enclave.h62
-rw-r--r--include/hw/i386/pc.h32
-rw-r--r--include/hw/i386/sgx-epc.h1
-rw-r--r--include/hw/i386/tdvf.h45
-rw-r--r--include/hw/i386/topology.h52
-rw-r--r--include/hw/i386/x86.h7
-rw-r--r--include/hw/ide/ahci-pci.h2
-rw-r--r--include/hw/ide/ahci.h4
-rw-r--r--include/hw/ide/ide-bus.h2
-rw-r--r--include/hw/ide/ide-dev.h2
-rw-r--r--include/hw/input/lm832x.h28
-rw-r--r--include/hw/input/tsc2xxx.h41
-rw-r--r--include/hw/intc/arm_gic.h3
-rw-r--r--include/hw/intc/arm_gic_common.h2
-rw-r--r--include/hw/intc/arm_gicv3_common.h54
-rw-r--r--include/hw/intc/armv7m_nvic.h14
-rw-r--r--include/hw/intc/aspeed_intc.h41
-rw-r--r--include/hw/intc/loongarch_extioi.h90
-rw-r--r--include/hw/intc/loongarch_extioi_common.h101
-rw-r--r--include/hw/intc/loongarch_ipi.h32
-rw-r--r--include/hw/intc/loongarch_pch_pic.h76
-rw-r--r--include/hw/intc/loongarch_pic_common.h81
-rw-r--r--include/hw/intc/loongson_ipi.h51
-rw-r--r--include/hw/intc/loongson_ipi_common.h79
-rw-r--r--include/hw/intc/riscv_aplic.h8
-rw-r--r--include/hw/ipack/ipack.h7
-rw-r--r--include/hw/ipmi/ipmi.h17
-rw-r--r--include/hw/irq.h29
-rw-r--r--include/hw/isa/apm.h2
-rw-r--r--include/hw/isa/isa.h4
-rw-r--r--include/hw/isa/superio.h2
-rw-r--r--include/hw/loader-fit.h21
-rw-r--r--include/hw/loader.h28
-rw-r--r--include/hw/loongarch/boot.h5
-rw-r--r--include/hw/loongarch/virt.h19
-rw-r--r--include/hw/m68k/q800.h2
-rw-r--r--include/hw/mem/npcm7xx_mc.h2
-rw-r--r--include/hw/mem/pc-dimm.h2
-rw-r--r--include/hw/mips/cps.h1
-rw-r--r--include/hw/mips/mips.h2
-rw-r--r--include/hw/misc/aspeed_hace.h17
-rw-r--r--include/hw/misc/aspeed_scu.h6
-rw-r--r--include/hw/misc/auxbus.h2
-rw-r--r--include/hw/misc/cbus.h31
-rw-r--r--include/hw/misc/imx8mp_analog.h81
-rw-r--r--include/hw/misc/imx8mp_ccm.h30
-rw-r--r--include/hw/misc/ivshmem-flat.h86
-rw-r--r--include/hw/misc/lasi.h2
-rw-r--r--include/hw/misc/mac_via.h2
-rw-r--r--include/hw/misc/mos6522.h2
-rw-r--r--include/hw/misc/npcm7xx_clk.h180
-rw-r--r--include/hw/misc/npcm7xx_gcr.h73
-rw-r--r--include/hw/misc/npcm7xx_mft.h2
-rw-r--r--include/hw/misc/npcm_clk.h195
-rw-r--r--include/hw/misc/npcm_gcr.h86
-rw-r--r--include/hw/misc/pvpanic.h3
-rw-r--r--include/hw/misc/stm32_rcc.h91
-rw-r--r--include/hw/misc/stm32l4x5_syscfg.h1
-rw-r--r--include/hw/misc/vmcoreinfo.h7
-rw-r--r--include/hw/misc/xlnx-versal-trng.h1
-rw-r--r--include/hw/net/dp8393x.h2
-rw-r--r--include/hw/net/imx_fec.h9
-rw-r--r--include/hw/net/lan9118_phy.h37
-rw-r--r--include/hw/net/mii.h6
-rw-r--r--include/hw/net/msf2-emac.h2
-rw-r--r--include/hw/net/npcm_pcs.h42
-rw-r--r--include/hw/nubus/nubus.h2
-rw-r--r--include/hw/nvram/fw_cfg.h45
-rw-r--r--include/hw/nvram/fw_cfg_acpi.h2
-rw-r--r--include/hw/nvram/mac_nvram.h2
-rw-r--r--include/hw/nvram/npcm7xx_otp.h2
-rw-r--r--include/hw/nvram/xlnx-bbram.h3
-rw-r--r--include/hw/nvram/xlnx-efuse.h2
-rw-r--r--include/hw/nvram/xlnx-versal-efuse.h1
-rw-r--r--include/hw/nvram/xlnx-zynqmp-efuse.h1
-rw-r--r--include/hw/openrisc/boot.h3
-rw-r--r--include/hw/pci-bridge/cxl_upstream_port.h4
-rw-r--r--include/hw/pci-host/astro.h6
-rw-r--r--include/hw/pci-host/designware.h8
-rw-r--r--include/hw/pci-host/dino.h4
-rw-r--r--include/hw/pci-host/fsl_imx8m_phy.h28
-rw-r--r--include/hw/pci-host/gpex.h7
-rw-r--r--include/hw/pci-host/ls7a.h9
-rw-r--r--include/hw/pci-host/pam.h2
-rw-r--r--include/hw/pci-host/pnv_phb4.h5
-rw-r--r--include/hw/pci-host/q35.h2
-rw-r--r--include/hw/pci-host/remote.h2
-rw-r--r--include/hw/pci-host/spapr.h5
-rw-r--r--include/hw/pci/msix.h1
-rw-r--r--include/hw/pci/pci.h350
-rw-r--r--include/hw/pci/pci_bridge.h7
-rw-r--r--include/hw/pci/pci_device.h41
-rw-r--r--include/hw/pci/pci_host.h1
-rw-r--r--include/hw/pci/pci_ids.h1
-rw-r--r--include/hw/pci/pcie.h17
-rw-r--r--include/hw/pci/pcie_doe.h5
-rw-r--r--include/hw/pci/pcie_host.h2
-rw-r--r--include/hw/pci/pcie_port.h1
-rw-r--r--include/hw/pci/pcie_regs.h8
-rw-r--r--include/hw/pci/pcie_sriov.h21
-rw-r--r--include/hw/pci/shpc.h2
-rw-r--r--include/hw/pcmcia.h66
-rw-r--r--include/hw/ppc/mac_dbdma.h8
-rw-r--r--include/hw/ppc/pnv.h14
-rw-r--r--include/hw/ppc/pnv_adu.h32
-rw-r--r--include/hw/ppc/pnv_chip.h13
-rw-r--r--include/hw/ppc/pnv_core.h31
-rw-r--r--include/hw/ppc/pnv_homer.h12
-rw-r--r--include/hw/ppc/pnv_lpc.h24
-rw-r--r--include/hw/ppc/pnv_occ.h11
-rw-r--r--include/hw/ppc/pnv_pnor.h7
-rw-r--r--include/hw/ppc/pnv_sbe.h2
-rw-r--r--include/hw/ppc/pnv_xscom.h17
-rw-r--r--include/hw/ppc/ppc.h7
-rw-r--r--include/hw/ppc/ppc4xx.h2
-rw-r--r--include/hw/ppc/spapr.h13
-rw-r--r--include/hw/ppc/spapr_cpu_core.h1
-rw-r--r--include/hw/ppc/spapr_drc.h2
-rw-r--r--include/hw/ppc/spapr_nested.h75
-rw-r--r--include/hw/ppc/spapr_vio.h2
-rw-r--r--include/hw/ppc/vof.h4
-rw-r--r--include/hw/ppc/xics.h2
-rw-r--r--include/hw/ppc/xive.h47
-rw-r--r--include/hw/ppc/xive2.h42
-rw-r--r--include/hw/ppc/xive2_regs.h51
-rw-r--r--include/hw/ppc/xive_regs.h70
-rw-r--r--include/hw/qdev-core.h95
-rw-r--r--include/hw/qdev-properties-system.h18
-rw-r--r--include/hw/qdev-properties.h23
-rw-r--r--include/hw/register.h2
-rw-r--r--include/hw/remote/iohub.h1
-rw-r--r--include/hw/remote/proxy-memory-listener.h2
-rw-r--r--include/hw/resettable.h17
-rw-r--r--include/hw/riscv/boot.h36
-rw-r--r--include/hw/riscv/boot_opensbi.h29
-rw-r--r--include/hw/riscv/iommu.h42
-rw-r--r--include/hw/riscv/microchip_pfsoc.h1
-rw-r--r--include/hw/riscv/numa.h2
-rw-r--r--include/hw/riscv/riscv_hart.h4
-rw-r--r--include/hw/riscv/virt.h7
-rw-r--r--include/hw/s390x/ap-bridge.h39
-rw-r--r--include/hw/s390x/cpu-topology.h6
-rw-r--r--include/hw/s390x/css-bridge.h1
-rw-r--r--include/hw/s390x/css.h10
-rw-r--r--include/hw/s390x/event-facility.h17
-rw-r--r--include/hw/s390x/ipl/qipl.h127
-rw-r--r--include/hw/s390x/s390-pci-bus.h3
-rw-r--r--include/hw/s390x/s390-pci-clp.h1
-rw-r--r--include/hw/s390x/s390-pci-inst.h2
-rw-r--r--include/hw/s390x/s390-virtio-ccw.h20
-rw-r--r--include/hw/s390x/s390_flic.h3
-rw-r--r--include/hw/s390x/storage-attributes.h1
-rw-r--r--include/hw/s390x/storage-keys.h18
-rw-r--r--include/hw/s390x/vfio-ccw.h2
-rw-r--r--include/hw/scsi/scsi.h8
-rw-r--r--include/hw/sd/aspeed_sdhci.h13
-rw-r--r--include/hw/sd/sd.h9
-rw-r--r--include/hw/sd/sdcard_legacy.h50
-rw-r--r--include/hw/sd/sdhci.h7
-rw-r--r--include/hw/sh4/sh.h19
-rw-r--r--include/hw/sh4/sh_intc.h2
-rw-r--r--include/hw/southbridge/ich9.h6
-rw-r--r--include/hw/ssi/allwinner-a10-spi.h57
-rw-r--r--include/hw/ssi/aspeed_smc.h1
-rw-r--r--include/hw/ssi/npcm7xx_fiu.h1
-rw-r--r--include/hw/ssi/pnv_spi.h72
-rw-r--r--include/hw/ssi/pnv_spi_regs.h133
-rw-r--r--include/hw/sysbus.h5
-rw-r--r--include/hw/timer/aspeed_timer.h4
-rw-r--r--include/hw/timer/hpet.h3
-rw-r--r--include/hw/timer/imx_gpt.h1
-rw-r--r--include/hw/timer/npcm7xx_timer.h2
-rw-r--r--include/hw/tricore/triboard.h4
-rw-r--r--include/hw/tricore/tricore.h2
-rw-r--r--include/hw/uefi/hardware-info.h35
-rw-r--r--include/hw/uefi/var-service-api.h48
-rw-r--r--include/hw/uefi/var-service-edk2.h227
-rw-r--r--include/hw/uefi/var-service.h191
-rw-r--r--include/hw/usb.h14
-rw-r--r--include/hw/usb/dwc2-regs.h4
-rw-r--r--include/hw/usb/hcd-dwc3.h2
-rw-r--r--include/hw/usb/hcd-musb.h49
-rw-r--r--include/hw/usb/uhci-regs.h11
-rw-r--r--include/hw/vfio/vfio-common.h296
-rw-r--r--include/hw/vfio/vfio-container-base.h122
-rw-r--r--include/hw/vfio/vfio-container.h38
-rw-r--r--include/hw/vfio/vfio-cpr.h57
-rw-r--r--include/hw/vfio/vfio-device.h286
-rw-r--r--include/hw/vfio/vfio-migration.h16
-rw-r--r--include/hw/vfio/vfio-platform.h4
-rw-r--r--include/hw/vfio/vfio-region.h48
-rw-r--r--include/hw/virtio/cbor-helpers.h45
-rw-r--r--include/hw/virtio/iothread-vq-mapping.h45
-rw-r--r--include/hw/virtio/vhost-backend.h2
-rw-r--r--include/hw/virtio/vhost-scsi-common.h2
-rw-r--r--include/hw/virtio/vhost-user.h1
-rw-r--r--include/hw/virtio/vhost-vdpa.h22
-rw-r--r--include/hw/virtio/vhost-vsock-common.h2
-rw-r--r--include/hw/virtio/vhost.h39
-rw-r--r--include/hw/virtio/virtio-acpi.h2
-rw-r--r--include/hw/virtio/virtio-balloon.h6
-rw-r--r--include/hw/virtio/virtio-blk.h17
-rw-r--r--include/hw/virtio/virtio-crypto.h4
-rw-r--r--include/hw/virtio/virtio-gpu.h56
-rw-r--r--include/hw/virtio/virtio-input.h2
-rw-r--r--include/hw/virtio/virtio-iommu.h3
-rw-r--r--include/hw/virtio/virtio-mem.h19
-rw-r--r--include/hw/virtio/virtio-net.h2
-rw-r--r--include/hw/virtio/virtio-nsm.h49
-rw-r--r--include/hw/virtio/virtio-pci.h16
-rw-r--r--include/hw/virtio/virtio-pmem.h2
-rw-r--r--include/hw/virtio/virtio-rng.h2
-rw-r--r--include/hw/virtio/virtio-scsi.h17
-rw-r--r--include/hw/virtio/virtio.h32
-rw-r--r--include/hw/vmapple/vmapple.h23
-rw-r--r--include/hw/xen/arch_hvm.h2
-rw-r--r--include/hw/xen/interface/io/blkif.h2
-rw-r--r--include/hw/xen/xen-block.h2
-rw-r--r--include/hw/xen/xen-bus-helper.h9
-rw-r--r--include/hw/xen/xen-bus.h4
-rw-r--r--include/hw/xen/xen-hvm-common.h17
-rw-r--r--include/hw/xen/xen-legacy-backend.h6
-rw-r--r--include/hw/xen/xen-pvh-common.h91
-rw-r--r--include/hw/xen/xen.h2
-rw-r--r--include/hw/xen/xen_native.h3
-rw-r--r--include/hw/xen/xen_pvdev.h5
-rw-r--r--include/hw/xtensa/mx_pic.h2
-rw-r--r--include/io/channel-socket.h13
-rw-r--r--include/io/channel-tls.h12
-rw-r--r--include/io/channel.h24
-rw-r--r--include/libdecnumber/dconfig.h5
-rw-r--r--include/libdecnumber/decContext.h5
-rw-r--r--include/libdecnumber/decDPD.h5
-rw-r--r--include/libdecnumber/decNumber.h5
-rw-r--r--include/libdecnumber/decNumberLocal.h5
-rw-r--r--include/libdecnumber/dpd/decimal128.h5
-rw-r--r--include/libdecnumber/dpd/decimal128Local.h5
-rw-r--r--include/libdecnumber/dpd/decimal32.h5
-rw-r--r--include/libdecnumber/dpd/decimal64.h5
-rw-r--r--include/migration/client-options.h4
-rw-r--r--include/migration/cpr.h39
-rw-r--r--include/migration/misc.h45
-rw-r--r--include/migration/register.h67
-rw-r--r--include/migration/vmstate.h15
-rw-r--r--include/net/checksum.h2
-rw-r--r--include/net/eth.h2
-rw-r--r--include/net/net.h4
-rw-r--r--include/net/queue.h4
-rw-r--r--include/qapi/compat-policy.h2
-rw-r--r--include/qapi/error-internal.h35
-rw-r--r--include/qapi/error.h14
-rw-r--r--include/qapi/qmp-registry.h67
-rw-r--r--include/qapi/qmp/dispatch.h67
-rw-r--r--include/qapi/qmp/qbool.h31
-rw-r--r--include/qapi/qmp/qdict.h71
-rw-r--r--include/qapi/qmp/qerror.h6
-rw-r--r--include/qapi/qmp/qlist.h69
-rw-r--r--include/qapi/qmp/qnull.h33
-rw-r--r--include/qapi/qmp/qnum.h75
-rw-r--r--include/qapi/qmp/qobject.h144
-rw-r--r--include/qapi/qmp/qstring.h33
-rw-r--r--include/qapi/util.h2
-rw-r--r--include/qapi/visitor-impl.h4
-rw-r--r--include/qapi/visitor.h12
-rw-r--r--include/qemu-main.h14
-rw-r--r--include/qemu/accel.h10
-rw-r--r--include/qemu/atomic.h20
-rw-r--r--include/qemu/atomic128.h5
-rw-r--r--include/qemu/bitmap.h8
-rw-r--r--include/qemu/bitops.h172
-rw-r--r--include/qemu/bswap.h5
-rw-r--r--include/qemu/cacheflush.h7
-rw-r--r--include/qemu/clang-tsa.h114
-rw-r--r--include/qemu/co-shared-resource.h7
-rw-r--r--include/qemu/compiler.h125
-rw-r--r--include/qemu/coroutine.h1
-rw-r--r--include/qemu/crc-ccitt.h2
-rw-r--r--include/qemu/cutils.h20
-rw-r--r--include/qemu/datadir.h11
-rw-r--r--include/qemu/envlist.h2
-rw-r--r--include/qemu/fifo8.h82
-rw-r--r--include/qemu/futex.h44
-rw-r--r--include/qemu/help-texts.h2
-rw-r--r--include/qemu/host-pci-mmio.h136
-rw-r--r--include/qemu/host-utils.h9
-rw-r--r--include/qemu/iov.h32
-rw-r--r--include/qemu/iova-tree.h49
-rw-r--r--include/qemu/job.h3
-rw-r--r--include/qemu/lockcnt.h130
-rw-r--r--include/qemu/log.h1
-rw-r--r--include/qemu/main-loop.h25
-rw-r--r--include/qemu/osdep.h48
-rw-r--r--include/qemu/plugin-memory.h1
-rw-r--r--include/qemu/plugin.h4
-rw-r--r--include/qemu/pmem.h1
-rw-r--r--include/qemu/qemu-plugin.h65
-rw-r--r--include/qemu/range.h4
-rw-r--r--include/qemu/rcu.h4
-rw-r--r--include/qemu/rcu_queue.h4
-rw-r--r--include/qemu/reserved-region.h2
-rw-r--r--include/qemu/s390x_pci_mmio.h24
-rw-r--r--include/qemu/sockets.h16
-rw-r--r--include/qemu/target-info-impl.h32
-rw-r--r--include/qemu/target-info.h41
-rw-r--r--include/qemu/thread-posix.h9
-rw-r--r--include/qemu/thread-win32.h6
-rw-r--r--include/qemu/thread.h133
-rw-r--r--include/qemu/timed-average.h4
-rw-r--r--include/qemu/timer.h31
-rw-r--r--include/qemu/typedefs.h7
-rw-r--r--include/qemu/userfaultfd.h1
-rw-r--r--include/qobject/json-parser.h (renamed from include/qapi/qmp/json-parser.h)0
-rw-r--r--include/qobject/json-writer.h (renamed from include/qapi/qmp/json-writer.h)0
-rw-r--r--include/qobject/qbool.h31
-rw-r--r--include/qobject/qdict.h71
-rw-r--r--include/qobject/qjson.h (renamed from include/qapi/qmp/qjson.h)0
-rw-r--r--include/qobject/qlist.h69
-rw-r--r--include/qobject/qlit.h (renamed from include/qapi/qmp/qlit.h)0
-rw-r--r--include/qobject/qnull.h33
-rw-r--r--include/qobject/qnum.h75
-rw-r--r--include/qobject/qobject.h144
-rw-r--r--include/qobject/qstring.h33
-rw-r--r--include/qom/object.h80
-rw-r--r--include/semihosting/console.h2
-rw-r--r--include/semihosting/semihost.h29
-rw-r--r--include/semihosting/syscalls.h3
-rw-r--r--include/semihosting/uaccess.h55
-rw-r--r--include/standard-headers/asm-x86/setup_data.h17
-rw-r--r--include/standard-headers/drm/drm_fourcc.h130
-rw-r--r--include/standard-headers/linux/const.h17
-rw-r--r--include/standard-headers/linux/ethtool.h391
-rw-r--r--include/standard-headers/linux/fuse.h110
-rw-r--r--include/standard-headers/linux/input-event-codes.h6
-rw-r--r--include/standard-headers/linux/pci_regs.h120
-rw-r--r--include/standard-headers/linux/virtio_balloon.h16
-rw-r--r--include/standard-headers/linux/virtio_crypto.h1
-rw-r--r--include/standard-headers/linux/virtio_gpu.h4
-rw-r--r--include/standard-headers/linux/virtio_net.h13
-rw-r--r--include/standard-headers/linux/virtio_pci.h146
-rw-r--r--include/standard-headers/linux/virtio_snd.h2
-rw-r--r--include/standard-headers/linux/vmclock-abi.h182
-rw-r--r--include/standard-headers/uefi/uefi.h187
-rw-r--r--include/sysemu/accel-blocker.h55
-rw-r--r--include/sysemu/accel-ops.h74
-rw-r--r--include/sysemu/arch_init.h33
-rw-r--r--include/sysemu/block-backend-global-state.h133
-rw-r--r--include/sysemu/block-backend-io.h230
-rw-r--r--include/sysemu/cpu-throttle.h68
-rw-r--r--include/sysemu/cpu-timers.h104
-rw-r--r--include/sysemu/cpus.h53
-rw-r--r--include/sysemu/cryptodev-vhost-user.h50
-rw-r--r--include/sysemu/cryptodev-vhost.h153
-rw-r--r--include/sysemu/cryptodev.h447
-rw-r--r--include/sysemu/device_tree.h215
-rw-r--r--include/sysemu/dma.h324
-rw-r--r--include/sysemu/dump.h225
-rw-r--r--include/sysemu/host_iommu_device.h109
-rw-r--r--include/sysemu/hostmem.h94
-rw-r--r--include/sysemu/hvf.h71
-rw-r--r--include/sysemu/hvf_int.h76
-rw-r--r--include/sysemu/hw_accel.h25
-rw-r--r--include/sysemu/iommufd.h55
-rw-r--r--include/sysemu/iothread.h67
-rw-r--r--include/sysemu/kvm.h548
-rw-r--r--include/sysemu/kvm_int.h145
-rw-r--r--include/sysemu/kvm_xen.h44
-rw-r--r--include/sysemu/numa.h114
-rw-r--r--include/sysemu/os-posix.h101
-rw-r--r--include/sysemu/os-win32.h277
-rw-r--r--include/sysemu/qtest.h39
-rw-r--r--include/sysemu/replay.h190
-rw-r--r--include/sysemu/reset.h126
-rw-r--r--include/sysemu/rtc.h58
-rw-r--r--include/sysemu/runstate.h111
-rw-r--r--include/sysemu/sysemu.h115
-rw-r--r--include/sysemu/tcg.h20
-rw-r--r--include/sysemu/tpm_backend.h216
-rw-r--r--include/sysemu/tpm_util.h72
-rw-r--r--include/sysemu/vhost-user-backend.h48
-rw-r--r--include/sysemu/xen-mapcache.h71
-rw-r--r--include/sysemu/xen.h54
-rw-r--r--include/system/accel-blocker.h55
-rw-r--r--include/system/accel-ops.h73
-rw-r--r--include/system/address-spaces.h35
-rw-r--r--include/system/arch_init.h30
-rw-r--r--include/system/balloon.h (renamed from include/sysemu/balloon.h)0
-rw-r--r--include/system/block-backend-common.h (renamed from include/sysemu/block-backend-common.h)0
-rw-r--r--include/system/block-backend-global-state.h124
-rw-r--r--include/system/block-backend-io.h237
-rw-r--r--include/system/block-backend.h (renamed from include/sysemu/block-backend.h)0
-rw-r--r--include/system/block-ram-registrar.h (renamed from include/sysemu/block-ram-registrar.h)0
-rw-r--r--include/system/blockdev.h (renamed from include/sysemu/blockdev.h)0
-rw-r--r--include/system/confidential-guest-support.h95
-rw-r--r--include/system/cpu-throttle.h82
-rw-r--r--include/system/cpu-timers-internal.h (renamed from include/sysemu/cpu-timers-internal.h)0
-rw-r--r--include/system/cpu-timers.h46
-rw-r--r--include/system/cpus.h49
-rw-r--r--include/system/cryptodev-vhost-user.h50
-rw-r--r--include/system/cryptodev-vhost.h153
-rw-r--r--include/system/cryptodev.h447
-rw-r--r--include/system/device_tree.h213
-rw-r--r--include/system/dirtylimit.h (renamed from include/sysemu/dirtylimit.h)0
-rw-r--r--include/system/dirtyrate.h (renamed from include/sysemu/dirtyrate.h)0
-rw-r--r--include/system/dma.h322
-rw-r--r--include/system/dump-arch.h (renamed from include/sysemu/dump-arch.h)0
-rw-r--r--include/system/dump.h225
-rw-r--r--include/system/event-loop-base.h (renamed from include/sysemu/event-loop-base.h)0
-rw-r--r--include/system/host_iommu_device.h125
-rw-r--r--include/system/hostmem.h99
-rw-r--r--include/system/hvf.h77
-rw-r--r--include/system/hvf_int.h80
-rw-r--r--include/system/hw_accel.h25
-rw-r--r--include/system/iommufd.h120
-rw-r--r--include/system/ioport.h75
-rw-r--r--include/system/iothread.h67
-rw-r--r--include/system/kvm.h589
-rw-r--r--include/system/kvm_int.h187
-rw-r--r--include/system/kvm_xen.h44
-rw-r--r--include/system/memory.h3267
-rw-r--r--include/system/memory_mapping.h (renamed from include/sysemu/memory_mapping.h)0
-rw-r--r--include/system/numa.h113
-rw-r--r--include/system/nvmm.h (renamed from include/sysemu/nvmm.h)0
-rw-r--r--include/system/os-posix.h101
-rw-r--r--include/system/os-wasm.h104
-rw-r--r--include/system/os-win32.h276
-rw-r--r--include/system/qtest.h36
-rw-r--r--include/system/ram_addr.h561
-rw-r--r--include/system/ramblock.h116
-rw-r--r--include/system/replay.h179
-rw-r--r--include/system/reset.h127
-rw-r--r--include/system/rng-random.h (renamed from include/sysemu/rng-random.h)0
-rw-r--r--include/system/rng.h (renamed from include/sysemu/rng.h)0
-rw-r--r--include/system/rtc.h58
-rw-r--r--include/system/runstate-action.h (renamed from include/sysemu/runstate-action.h)0
-rw-r--r--include/system/runstate.h119
-rw-r--r--include/system/seccomp.h (renamed from include/sysemu/seccomp.h)0
-rw-r--r--include/system/spdm-socket.h74
-rw-r--r--include/system/stats.h (renamed from include/sysemu/stats.h)0
-rw-r--r--include/system/system.h125
-rw-r--r--include/system/tcg.h28
-rw-r--r--include/system/tpm.h (renamed from include/sysemu/tpm.h)0
-rw-r--r--include/system/tpm_backend.h216
-rw-r--r--include/system/tpm_util.h72
-rw-r--r--include/system/vhost-user-backend.h48
-rw-r--r--include/system/watchdog.h (renamed from include/sysemu/watchdog.h)0
-rw-r--r--include/system/whpx.h (renamed from include/sysemu/whpx.h)0
-rw-r--r--include/system/xen-mapcache.h30
-rw-r--r--include/system/xen.h35
-rw-r--r--include/tcg/insn-start-words.h15
-rw-r--r--include/tcg/oversized-guest.h23
-rw-r--r--include/tcg/startup.h6
-rw-r--r--include/tcg/tcg-op-common.h5
-rw-r--r--include/tcg/tcg-op-gvec-common.h63
-rw-r--r--include/tcg/tcg-op.h22
-rw-r--r--include/tcg/tcg-opc.h396
-rw-r--r--include/tcg/tcg-temp-internal.h6
-rw-r--r--include/tcg/tcg.h264
-rw-r--r--include/ui/clipboard.h31
-rw-r--r--include/ui/console.h3
-rw-r--r--include/ui/dmabuf.h20
-rw-r--r--include/ui/egl-helpers.h9
-rw-r--r--include/ui/gtk.h2
-rw-r--r--include/ui/qemu-pixman.h28
-rw-r--r--include/ui/sdl2.h2
-rw-r--r--include/ui/surface.h14
-rw-r--r--include/user/abitypes.h7
-rw-r--r--include/user/cpu_loop.h88
-rw-r--r--include/user/guest-host.h76
-rw-r--r--include/user/mmap.h32
-rw-r--r--include/user/page-protection.h96
-rw-r--r--include/user/signal.h25
-rw-r--r--io/channel-buffer.c2
-rw-r--r--io/channel-command.c2
-rw-r--r--io/channel-file.c2
-rw-r--r--io/channel-null.c2
-rw-r--r--io/channel-socket.c41
-rw-r--r--io/channel-tls.c160
-rw-r--r--io/channel-websock.c4
-rw-r--r--io/channel.c22
-rw-r--r--io/dns-resolver.c21
-rw-r--r--io/trace-events5
-rw-r--r--iothread.c6
-rw-r--r--job.c6
-rw-r--r--libdecnumber/decContext.c5
-rw-r--r--libdecnumber/decNumber.c5
-rw-r--r--libdecnumber/dpd/decimal128.c5
-rw-r--r--libdecnumber/dpd/decimal32.c5
-rw-r--r--libdecnumber/dpd/decimal64.c5
-rw-r--r--linux-headers/asm-arm64/kvm.h29
-rw-r--r--linux-headers/asm-arm64/mman.h9
-rw-r--r--linux-headers/asm-arm64/unistd.h25
-rw-r--r--linux-headers/asm-arm64/unistd_64.h329
-rw-r--r--linux-headers/asm-generic/mman-common.h4
-rw-r--r--linux-headers/asm-generic/mman.h4
-rw-r--r--linux-headers/asm-generic/unistd.h19
-rw-r--r--linux-headers/asm-loongarch/kvm.h44
-rw-r--r--linux-headers/asm-loongarch/kvm_para.h22
-rw-r--r--linux-headers/asm-loongarch/unistd.h4
-rw-r--r--linux-headers/asm-loongarch/unistd_64.h325
-rw-r--r--linux-headers/asm-mips/mman.h3
-rw-r--r--linux-headers/asm-mips/unistd_n32.h5
-rw-r--r--linux-headers/asm-mips/unistd_n64.h5
-rw-r--r--linux-headers/asm-mips/unistd_o32.h5
-rw-r--r--linux-headers/asm-powerpc/kvm.h3
-rw-r--r--linux-headers/asm-powerpc/unistd_32.h5
-rw-r--r--linux-headers/asm-powerpc/unistd_64.h5
-rw-r--r--linux-headers/asm-riscv/kvm.h20
-rw-r--r--linux-headers/asm-riscv/unistd.h41
-rw-r--r--linux-headers/asm-riscv/unistd_32.h320
-rw-r--r--linux-headers/asm-riscv/unistd_64.h330
-rw-r--r--linux-headers/asm-s390/kvm.h3
-rw-r--r--linux-headers/asm-s390/unistd_32.h5
-rw-r--r--linux-headers/asm-s390/unistd_64.h5
-rw-r--r--linux-headers/asm-x86/kvm.h79
-rw-r--r--linux-headers/asm-x86/mman.h3
-rw-r--r--linux-headers/asm-x86/unistd_32.h5
-rw-r--r--linux-headers/asm-x86/unistd_64.h6
-rw-r--r--linux-headers/asm-x86/unistd_x32.h6
-rw-r--r--linux-headers/linux/bits.h11
-rw-r--r--linux-headers/linux/const.h17
-rw-r--r--linux-headers/linux/iommufd.h509
-rw-r--r--linux-headers/linux/kvm.h66
-rw-r--r--linux-headers/linux/mman.h1
-rw-r--r--linux-headers/linux/psci.h5
-rw-r--r--linux-headers/linux/psp-sev.h49
-rw-r--r--linux-headers/linux/stddef.h15
-rw-r--r--linux-headers/linux/vduse.h2
-rw-r--r--linux-headers/linux/vfio.h32
-rw-r--r--linux-headers/linux/vhost.h4
-rw-r--r--linux-user/aarch64/Makefile.vdso5
-rw-r--r--linux-user/aarch64/cpu_loop.c52
-rw-r--r--linux-user/aarch64/meson.build6
-rw-r--r--linux-user/aarch64/mte_user_helper.h2
-rw-r--r--linux-user/aarch64/syscall_64.tbl405
-rw-r--r--linux-user/aarch64/syscall_nr.h314
-rw-r--r--linux-user/aarch64/syscallhdr.sh28
-rw-r--r--linux-user/aarch64/target_signal.h2
-rwxr-xr-xlinux-user/aarch64/vdso-be.sobin3224 -> 3224 bytes
-rwxr-xr-xlinux-user/aarch64/vdso-le.sobin3224 -> 3224 bytes
-rw-r--r--linux-user/alpha/cpu_loop.c4
-rw-r--r--linux-user/alpha/syscall.tbl30
-rw-r--r--linux-user/alpha/syscallhdr.sh2
-rw-r--r--linux-user/alpha/target_proc.h2
-rw-r--r--linux-user/arm/Makefile.vdso11
-rw-r--r--linux-user/arm/cpu_loop.c50
-rw-r--r--linux-user/arm/meson.build13
-rw-r--r--linux-user/arm/nwfpe/fpa11.c23
-rw-r--r--linux-user/arm/syscall.tbl25
-rw-r--r--linux-user/arm/syscallhdr.sh2
-rw-r--r--linux-user/arm/target_signal.h2
-rwxr-xr-xlinux-user/arm/vdso-be.sobin2648 -> 0 bytes
-rwxr-xr-xlinux-user/arm/vdso-be32.sobin0 -> 2648 bytes
-rwxr-xr-xlinux-user/arm/vdso-be8.sobin0 -> 2648 bytes
-rwxr-xr-xlinux-user/arm/vdso-le.sobin2648 -> 2648 bytes
-rw-r--r--linux-user/cpu_loop-common.h31
-rw-r--r--linux-user/cris/cpu_loop.c95
-rw-r--r--linux-user/cris/signal.c194
-rw-r--r--linux-user/cris/sockbits.h1
-rw-r--r--linux-user/cris/syscall_nr.h367
-rw-r--r--linux-user/cris/target_cpu.h45
-rw-r--r--linux-user/cris/target_elf.h14
-rw-r--r--linux-user/cris/target_errno_defs.h7
-rw-r--r--linux-user/cris/target_fcntl.h11
-rw-r--r--linux-user/cris/target_mman.h13
-rw-r--r--linux-user/cris/target_prctl.h1
-rw-r--r--linux-user/cris/target_proc.h1
-rw-r--r--linux-user/cris/target_resource.h1
-rw-r--r--linux-user/cris/target_signal.h9
-rw-r--r--linux-user/cris/target_structs.h1
-rw-r--r--linux-user/cris/target_syscall.h46
-rw-r--r--linux-user/cris/termbits.h225
-rw-r--r--linux-user/elfload.c273
-rw-r--r--linux-user/fd-trans.c169
-rw-r--r--linux-user/flatload.c5
-rw-r--r--linux-user/gen-vdso-elfn.c.inc98
-rw-r--r--linux-user/gen-vdso.c67
-rw-r--r--linux-user/generic/signal.h1
-rw-r--r--linux-user/hexagon/cpu_loop.c8
-rw-r--r--linux-user/hexagon/meson.build6
-rw-r--r--linux-user/hexagon/syscall.tbl405
-rw-r--r--linux-user/hexagon/syscall_nr.h332
-rw-r--r--linux-user/hexagon/syscallhdr.sh28
-rw-r--r--linux-user/hppa/cpu_loop.c18
-rw-r--r--linux-user/hppa/syscall.tbl43
-rw-r--r--linux-user/hppa/syscallhdr.sh2
-rw-r--r--linux-user/i386/cpu_loop.c7
-rw-r--r--linux-user/i386/signal.c4
-rw-r--r--linux-user/i386/syscall_32.tbl35
-rw-r--r--linux-user/i386/syscallhdr.sh2
-rw-r--r--linux-user/i386/target_signal.h2
-rw-r--r--linux-user/loongarch64/Makefile.vdso3
-rw-r--r--linux-user/loongarch64/cpu_loop.c29
-rw-r--r--linux-user/loongarch64/meson.build7
-rw-r--r--linux-user/loongarch64/syscall.tbl405
-rw-r--r--linux-user/loongarch64/syscall_nr.h312
-rw-r--r--linux-user/loongarch64/syscallhdr.sh28
-rwxr-xr-xlinux-user/loongarch64/vdso.sobin3560 -> 3560 bytes
-rw-r--r--linux-user/m68k/cpu_loop.c4
-rw-r--r--linux-user/m68k/syscall.tbl24
-rw-r--r--linux-user/m68k/syscallhdr.sh2
-rw-r--r--linux-user/m68k/target_signal.h1
-rw-r--r--linux-user/main.c46
-rw-r--r--linux-user/meson.build3
-rw-r--r--linux-user/microblaze/cpu_loop.c4
-rw-r--r--linux-user/microblaze/syscall.tbl24
-rw-r--r--linux-user/microblaze/syscallhdr.sh2
-rw-r--r--linux-user/microblaze/target_signal.h2
-rw-r--r--linux-user/mips/cpu_loop.c4
-rw-r--r--linux-user/mips/syscall-args-o32.c.inc20
-rw-r--r--linux-user/mips/syscall_o32.tbl38
-rw-r--r--linux-user/mips/syscallhdr.sh2
-rw-r--r--linux-user/mips/target_elf.h3
-rw-r--r--linux-user/mips64/syscall_n32.tbl34
-rw-r--r--linux-user/mips64/syscall_n64.tbl22
-rw-r--r--linux-user/mips64/syscallhdr.sh2
-rw-r--r--linux-user/mips64/target_elf.h24
-rw-r--r--linux-user/mmap.c68
-rw-r--r--linux-user/openrisc/cpu_loop.c4
-rw-r--r--linux-user/openrisc/meson.build5
-rw-r--r--linux-user/openrisc/syscall.tbl405
-rw-r--r--linux-user/openrisc/syscall_nr.h334
-rw-r--r--linux-user/openrisc/syscallhdr.sh28
-rw-r--r--linux-user/plugin-api.c15
-rw-r--r--linux-user/ppc/Makefile.vdso6
-rw-r--r--linux-user/ppc/cpu_loop.c4
-rw-r--r--linux-user/ppc/signal.c2
-rw-r--r--linux-user/ppc/syscall.tbl73
-rw-r--r--linux-user/ppc/syscallhdr.sh2
-rw-r--r--linux-user/ppc/target_signal.h2
-rwxr-xr-xlinux-user/ppc/vdso-32.sobin3020 -> 3020 bytes
-rwxr-xr-xlinux-user/ppc/vdso-64.sobin3896 -> 3896 bytes
-rwxr-xr-xlinux-user/ppc/vdso-64le.sobin3896 -> 3896 bytes
-rw-r--r--linux-user/qemu.h20
-rw-r--r--linux-user/riscv/cpu_loop.c6
-rw-r--r--linux-user/riscv/meson.build6
-rw-r--r--linux-user/riscv/syscall.tbl405
-rw-r--r--linux-user/riscv/syscall32_nr.h308
-rw-r--r--linux-user/riscv/syscall64_nr.h314
-rw-r--r--linux-user/riscv/syscall_nr.h15
-rw-r--r--linux-user/riscv/syscallhdr.sh28
-rw-r--r--linux-user/s390x/cpu_loop.c4
-rw-r--r--linux-user/s390x/syscall.tbl36
-rwxr-xr-xlinux-user/s390x/syscallhdr.sh2
-rw-r--r--linux-user/s390x/target_signal.h2
-rw-r--r--linux-user/sh4/cpu_loop.c4
-rw-r--r--linux-user/sh4/syscall.tbl27
-rw-r--r--linux-user/sh4/syscallhdr.sh2
-rw-r--r--linux-user/sh4/target_signal.h2
-rw-r--r--linux-user/signal-common.h3
-rw-r--r--linux-user/signal.c136
-rw-r--r--linux-user/sparc/cpu_loop.c4
-rw-r--r--linux-user/sparc/syscall.tbl42
-rw-r--r--linux-user/sparc/syscallhdr.sh2
-rw-r--r--linux-user/sparc/target_proc.h20
-rw-r--r--linux-user/strace.c251
-rw-r--r--linux-user/strace.list16
-rw-r--r--linux-user/syscall.c338
-rw-r--r--linux-user/syscall_defs.h47
-rw-r--r--linux-user/user-internals.h4
-rw-r--r--linux-user/user-mmap.h19
-rw-r--r--linux-user/vm86.c201
-rw-r--r--linux-user/x86_64/syscall_64.tbl30
-rw-r--r--linux-user/x86_64/syscallhdr.sh2
-rw-r--r--linux-user/x86_64/target_signal.h2
-rw-r--r--linux-user/xtensa/cpu_loop.c4
-rw-r--r--linux-user/xtensa/signal.c1
-rw-r--r--linux-user/xtensa/syscall.tbl24
-rw-r--r--linux-user/xtensa/syscallhdr.sh2
-rw-r--r--linux-user/xtensa/target_signal.h2
-rw-r--r--meson.build865
-rw-r--r--meson_options.txt29
-rw-r--r--migration/block-active.c48
-rw-r--r--migration/block-dirty-bitmap.c4
-rw-r--r--migration/channel-block.c4
-rw-r--r--migration/channel.c9
-rw-r--r--migration/colo.c34
-rw-r--r--migration/cpr-transfer.c74
-rw-r--r--migration/cpr.c266
-rw-r--r--migration/cpu-throttle.c199
-rw-r--r--migration/dirtyrate.c28
-rw-r--r--migration/dirtyrate.h2
-rw-r--r--migration/fd.c27
-rw-r--r--migration/file.c7
-rw-r--r--migration/file.h2
-rw-r--r--migration/global_state.c2
-rw-r--r--migration/meson.build7
-rw-r--r--migration/migration-hmp-cmds.c198
-rw-r--r--migration/migration.c942
-rw-r--r--migration/migration.h87
-rw-r--r--migration/multifd-device-state.c212
-rw-r--r--migration/multifd-nocomp.c468
-rw-r--r--migration/multifd-qatzip.c395
-rw-r--r--migration/multifd-qpl.c92
-rw-r--r--migration/multifd-uadk.c108
-rw-r--r--migration/multifd-zero-page.c37
-rw-r--r--migration/multifd-zlib.c101
-rw-r--r--migration/multifd-zstd.c108
-rw-r--r--migration/multifd.c824
-rw-r--r--migration/multifd.h251
-rw-r--r--migration/options.c129
-rw-r--r--migration/options.h7
-rw-r--r--migration/postcopy-ram.c93
-rw-r--r--migration/qemu-file.c86
-rw-r--r--migration/qemu-file.h4
-rw-r--r--migration/ram.c396
-rw-r--r--migration/ram.h29
-rw-r--r--migration/rdma.c197
-rw-r--r--migration/rdma.h12
-rw-r--r--migration/savevm.c452
-rw-r--r--migration/savevm.h13
-rw-r--r--migration/socket.c18
-rw-r--r--migration/socket.h1
-rw-r--r--migration/target.c8
-rw-r--r--migration/tls.c5
-rw-r--r--migration/tls.h2
-rw-r--r--migration/trace-events35
-rw-r--r--migration/vmstate-types.c26
-rw-r--r--migration/vmstate.c168
-rw-r--r--monitor/fds.c2
-rw-r--r--monitor/hmp-cmds-target.c12
-rw-r--r--monitor/hmp-cmds.c12
-rw-r--r--monitor/hmp-target.c2
-rw-r--r--monitor/hmp.c6
-rw-r--r--monitor/monitor-internal.h6
-rw-r--r--monitor/monitor.c11
-rw-r--r--monitor/qemu-config-qmp.c2
-rw-r--r--monitor/qmp-cmds-control.c2
-rw-r--r--monitor/qmp-cmds.c32
-rw-r--r--monitor/qmp.c9
-rw-r--r--nbd/client-connection.c5
-rw-r--r--nbd/common.c26
-rw-r--r--nbd/nbd-internal.h7
-rw-r--r--nbd/server.c93
-rw-r--r--nbd/trace-events1
-rw-r--r--net/can/can_core.c4
-rw-r--r--net/can/can_host.c4
-rw-r--r--net/can/can_socketcan.c2
-rw-r--r--net/checksum.c4
-rw-r--r--net/colo-compare.c11
-rw-r--r--net/dump.c7
-rw-r--r--net/filter-buffer.c2
-rw-r--r--net/filter-mirror.c4
-rw-r--r--net/filter-replay.c4
-rw-r--r--net/filter-rewriter.c2
-rw-r--r--net/filter.c4
-rw-r--r--net/hub.c27
-rw-r--r--net/meson.build2
-rw-r--r--net/net-hmp-cmds.c2
-rw-r--r--net/net.c88
-rw-r--r--net/queue.c11
-rw-r--r--net/slirp.c29
-rw-r--r--net/socket.c2
-rw-r--r--net/stream.c36
-rw-r--r--net/tap-linux.c17
-rw-r--r--net/tap-win32.c15
-rw-r--r--net/tap.c36
-rw-r--r--net/vhost-user.c3
-rw-r--r--net/vhost-vdpa.c72
-rw-r--r--net/vmnet-common.m27
-rw-r--r--os-posix.c17
-rw-r--r--os-wasm.c119
-rw-r--r--os-win32.c2
-rw-r--r--page-target.c23
-rw-r--r--page-vary-target.c48
-rw-r--r--pc-bios/README31
-rw-r--r--pc-bios/ast27x0_bootrom.binbin0 -> 15552 bytes
-rw-r--r--pc-bios/bios-256k.binbin262144 -> 262144 bytes
-rw-r--r--pc-bios/bios-microvm.binbin131072 -> 131072 bytes
-rw-r--r--pc-bios/bios.binbin131072 -> 131072 bytes
-rw-r--r--pc-bios/descriptors/60-edk2-loongarch64.json31
-rw-r--r--pc-bios/descriptors/60-edk2-riscv64.json31
-rw-r--r--pc-bios/descriptors/60-edk2-x86_64.json1
-rw-r--r--pc-bios/descriptors/meson.build4
-rw-r--r--pc-bios/dtb/bamboo.dtb (renamed from pc-bios/bamboo.dtb)bin3211 -> 3211 bytes
-rw-r--r--pc-bios/dtb/bamboo.dts (renamed from pc-bios/bamboo.dts)0
-rw-r--r--pc-bios/dtb/canyonlands.dtb (renamed from pc-bios/canyonlands.dtb)bin9779 -> 9779 bytes
-rw-r--r--pc-bios/dtb/canyonlands.dts (renamed from pc-bios/canyonlands.dts)0
-rw-r--r--pc-bios/dtb/meson.build23
-rw-r--r--pc-bios/dtb/petalogix-ml605.dtb (renamed from pc-bios/petalogix-ml605.dtb)bin9882 -> 9882 bytes
-rw-r--r--pc-bios/dtb/petalogix-ml605.dts (renamed from pc-bios/petalogix-ml605.dts)0
-rw-r--r--pc-bios/dtb/petalogix-s3adsp1800.dtb (renamed from pc-bios/petalogix-s3adsp1800.dtb)bin8161 -> 8161 bytes
-rw-r--r--pc-bios/dtb/petalogix-s3adsp1800.dts (renamed from pc-bios/petalogix-s3adsp1800.dts)0
-rw-r--r--pc-bios/edk2-aarch64-code.fd.bz2bin1588976 -> 1565763 bytes
-rw-r--r--pc-bios/edk2-arm-code.fd.bz2bin1571639 -> 1570311 bytes
-rw-r--r--pc-bios/edk2-i386-code.fd.bz2bin1775230 -> 1780004 bytes
-rw-r--r--pc-bios/edk2-i386-secure-code.fd.bz2bin1877268 -> 1858666 bytes
-rw-r--r--pc-bios/edk2-loongarch64-code.fd.bz2bin0 -> 1148383 bytes
-rw-r--r--pc-bios/edk2-loongarch64-vars.fd.bz2bin0 -> 233 bytes
-rw-r--r--pc-bios/edk2-riscv-code.fd.bz2bin1289337 -> 1296526 bytes
-rw-r--r--pc-bios/edk2-x86_64-code.fd.bz2bin1892766 -> 1907255 bytes
-rw-r--r--pc-bios/edk2-x86_64-microvm.fd.bz2bin1785290 -> 1787244 bytes
-rw-r--r--pc-bios/edk2-x86_64-secure-code.fd.bz2bin1969096 -> 1962992 bytes
-rwxr-xr-xpc-bios/hppa-firmware.imgbin167820 -> 167644 bytes
-rwxr-xr-xpc-bios/hppa-firmware64.imgbin206024 -> 206104 bytes
-rw-r--r--pc-bios/keymaps/meson.build17
-rw-r--r--pc-bios/meson.build30
-rw-r--r--pc-bios/npcm7xx_bootrom.binbin768 -> 768 bytes
-rw-r--r--pc-bios/npcm8xx_bootrom.binbin0 -> 608 bytes
-rw-r--r--pc-bios/openbios-ppcbin677196 -> 677200 bytes
-rw-r--r--pc-bios/openbios-sparc32bin382080 -> 382080 bytes
-rw-r--r--pc-bios/openbios-sparc64bin1593408 -> 1593408 bytes
-rw-r--r--pc-bios/opensbi-riscv32-generic-fw_dynamic.binbin268312 -> 268312 bytes
-rw-r--r--pc-bios/opensbi-riscv64-generic-fw_dynamic.binbin272504 -> 272504 bytes
-rw-r--r--pc-bios/pnv-pnor.binbin0 -> 139264 bytes
-rw-r--r--pc-bios/s390-ccw.imgbin42608 -> 96000 bytes
-rw-r--r--pc-bios/s390-ccw/Makefile72
-rw-r--r--pc-bios/s390-ccw/bootmap.c458
-rw-r--r--pc-bios/s390-ccw/bootmap.h20
-rw-r--r--pc-bios/s390-ccw/cio.c81
-rw-r--r--pc-bios/s390-ccw/cio.h2
-rw-r--r--pc-bios/s390-ccw/dasd-ipl.c71
-rw-r--r--pc-bios/s390-ccw/dasd-ipl.h2
-rw-r--r--pc-bios/s390-ccw/iplb.h108
-rw-r--r--pc-bios/s390-ccw/jump2ipl.c22
-rw-r--r--pc-bios/s390-ccw/libc.c88
-rw-r--r--pc-bios/s390-ccw/libc.h89
-rw-r--r--pc-bios/s390-ccw/main.c99
-rw-r--r--pc-bios/s390-ccw/menu.c51
-rw-r--r--pc-bios/s390-ccw/netboot.mak62
-rw-r--r--pc-bios/s390-ccw/netmain.c90
-rw-r--r--pc-bios/s390-ccw/s390-ccw.h36
-rw-r--r--pc-bios/s390-ccw/sclp.c7
-rw-r--r--pc-bios/s390-ccw/start.S11
-rw-r--r--pc-bios/s390-ccw/virtio-blkdev.c12
-rw-r--r--pc-bios/s390-ccw/virtio-net.c14
-rw-r--r--pc-bios/s390-ccw/virtio-scsi.c160
-rw-r--r--pc-bios/s390-ccw/virtio.c74
-rw-r--r--pc-bios/s390-ccw/virtio.h5
-rw-r--r--pc-bios/s390-netboot.imgbin67232 -> 0 bytes
-rw-r--r--pc-bios/skiboot.lidbin2527328 -> 2592960 bytes
-rw-r--r--pc-bios/slof.binbin995000 -> 996184 bytes
-rw-r--r--pc-bios/vgabios-ati.binbin39424 -> 39424 bytes
-rw-r--r--pc-bios/vgabios-bochs-display.binbin28672 -> 28672 bytes
-rw-r--r--pc-bios/vgabios-cirrus.binbin38912 -> 39424 bytes
-rw-r--r--pc-bios/vgabios-qxl.binbin39424 -> 39424 bytes
-rw-r--r--pc-bios/vgabios-ramfb.binbin28672 -> 28672 bytes
-rw-r--r--pc-bios/vgabios-stdvga.binbin39424 -> 39424 bytes
-rw-r--r--pc-bios/vgabios-virtio.binbin39424 -> 39424 bytes
-rw-r--r--pc-bios/vgabios-vmware.binbin39424 -> 39424 bytes
-rw-r--r--pc-bios/vgabios.binbin38912 -> 38912 bytes
-rw-r--r--plugins/api-system.c131
-rw-r--r--plugins/api-user.c57
-rw-r--r--plugins/api.c223
-rw-r--r--plugins/core.c59
-rw-r--r--plugins/loader.c29
-rw-r--r--plugins/meson.build78
-rw-r--r--plugins/plugin.h7
-rw-r--r--plugins/qemu-plugins.symbols57
-rw-r--r--plugins/system.c24
-rw-r--r--plugins/user.c19
-rw-r--r--po/it.po2
-rw-r--r--python/Makefile8
-rw-r--r--python/scripts/mkvenv.py3
-rwxr-xr-xpython/scripts/vendor.py4
-rw-r--r--python/setup.cfg10
-rw-r--r--python/tests/minreqs.txt33
-rwxr-xr-xpython/tests/qapi-flake8.sh6
-rwxr-xr-xpython/tests/qapi-isort.sh8
-rwxr-xr-xpython/tests/qapi-mypy.sh4
-rwxr-xr-xpython/tests/qapi-pylint.sh8
-rw-r--r--python/wheels/meson-1.2.3-py3-none-any.whlbin964928 -> 0 bytes
-rw-r--r--python/wheels/meson-1.8.1-py3-none-any.whlbin0 -> 1013001 bytes
-rw-r--r--python/wheels/pycotap-1.3.1-py3-none-any.whlbin0 -> 5119 bytes
-rw-r--r--pythondeps.toml11
-rw-r--r--qapi/acpi.json2
-rw-r--r--qapi/audio.json34
-rw-r--r--qapi/block-core.json451
-rw-r--r--qapi/block-export.json86
-rw-r--r--qapi/block.json16
-rw-r--r--qapi/char.json133
-rw-r--r--qapi/common.json28
-rw-r--r--qapi/control.json16
-rw-r--r--qapi/crypto.json149
-rw-r--r--qapi/cryptodev.json21
-rw-r--r--qapi/cxl.json51
-rw-r--r--qapi/dump.json8
-rw-r--r--qapi/ebpf.json2
-rw-r--r--qapi/introspect.json28
-rw-r--r--qapi/job.json52
-rw-r--r--qapi/machine-common.json99
-rw-r--r--qapi/machine-s390x.json121
-rw-r--r--qapi/machine-target.json518
-rw-r--r--qapi/machine.json459
-rw-r--r--qapi/meson.build34
-rw-r--r--qapi/migration.json273
-rw-r--r--qapi/misc-arm.json49
-rw-r--r--qapi/misc-i386.json486
-rw-r--r--qapi/misc-target.json528
-rw-r--r--qapi/misc.json37
-rw-r--r--qapi/net.json98
-rw-r--r--qapi/pci.json3
-rw-r--r--qapi/pragma.json17
-rw-r--r--qapi/qapi-clone-visitor.c2
-rw-r--r--qapi/qapi-dealloc-visitor.c2
-rw-r--r--qapi/qapi-forward-visitor.c22
-rw-r--r--qapi/qapi-schema.json37
-rw-r--r--qapi/qapi-util.c6
-rw-r--r--qapi/qapi-visit-core.c12
-rw-r--r--qapi/qdev.json44
-rw-r--r--qapi/qmp-dispatch.c10
-rw-r--r--qapi/qmp-event.c6
-rw-r--r--qapi/qmp-registry.c6
-rw-r--r--qapi/qobject-input-visitor.c18
-rw-r--r--qapi/qobject-output-visitor.c18
-rw-r--r--qapi/qom.json141
-rw-r--r--qapi/rocker.json14
-rw-r--r--qapi/run-state.json55
-rw-r--r--qapi/sockets.json28
-rw-r--r--qapi/stats.json6
-rw-r--r--qapi/string-input-visitor.c2
-rw-r--r--qapi/transaction.json8
-rw-r--r--qapi/uefi.json64
-rw-r--r--qapi/ui.json48
-rw-r--r--qapi/vfio.json17
-rw-r--r--qapi/virtio.json22
-rw-r--r--qemu-img.c16
-rw-r--r--qemu-io-cmds.c4
-rw-r--r--qemu-io.c6
-rw-r--r--qemu-keymap.c10
-rw-r--r--qemu-nbd.c68
-rw-r--r--qemu-options.hx528
-rw-r--r--qemu.nsi6
-rw-r--r--qga/commands-bsd.c25
-rw-r--r--qga/commands-common.h9
-rw-r--r--qga/commands-linux.c1958
-rw-r--r--qga/commands-posix.c2052
-rw-r--r--qga/commands-win32.c225
-rw-r--r--qga/commands-windows-ssh.c2
-rw-r--r--qga/guest-agent-core.h12
-rw-r--r--qga/main.c302
-rw-r--r--qga/meson.build2
-rw-r--r--qga/qapi-schema.json294
-rw-r--r--qga/vss-win32/install.cpp6
-rw-r--r--qga/vss-win32/provider.cpp5
-rw-r--r--qga/vss-win32/requester.cpp8
-rw-r--r--qobject/block-qdict.c8
-rw-r--r--qobject/json-parser-int.h2
-rw-r--r--qobject/json-parser.c12
-rw-r--r--qobject/json-writer.c2
-rw-r--r--qobject/qbool.c2
-rw-r--r--qobject/qdict.c10
-rw-r--r--qobject/qjson.c16
-rw-r--r--qobject/qlist.c10
-rw-r--r--qobject/qlit.c16
-rw-r--r--qobject/qnull.c2
-rw-r--r--qobject/qnum.c14
-rw-r--r--qobject/qobject-internal.h2
-rw-r--r--qobject/qobject.c12
-rw-r--r--qobject/qstring.c2
-rw-r--r--qom/container.c27
-rw-r--r--qom/object.c177
-rw-r--r--qom/object_interfaces.c34
-rw-r--r--qom/qom-hmp-cmds.c4
-rw-r--r--qom/qom-qmp-cmds.c27
-rw-r--r--replay/replay-audio.c2
-rw-r--r--replay/replay-char.c2
-rw-r--r--replay/replay-debugging.c6
-rw-r--r--replay/replay-events.c11
-rw-r--r--replay/replay-input.c2
-rw-r--r--replay/replay-internal.c4
-rw-r--r--replay/replay-net.c2
-rw-r--r--replay/replay-random.c2
-rw-r--r--replay/replay-snapshot.c4
-rw-r--r--replay/replay-time.c2
-rw-r--r--replay/replay.c31
-rw-r--r--replay/stubs-system.c2
-rw-r--r--roms/Makefile11
m---------roms/edk20
-rw-r--r--roms/edk2-build.config13
-rw-r--r--roms/edk2-version4
m---------roms/openbios0
m---------roms/opensbi0
m---------roms/seabios0
m---------roms/seabios-hppa0
m---------roms/skiboot0
m---------roms/vbootrom0
-rw-r--r--rust/.gitignore3
-rw-r--r--rust/Cargo.lock177
-rw-r--r--rust/Cargo.toml101
-rw-r--r--rust/Kconfig1
-rw-r--r--rust/bits/Cargo.toml19
-rw-r--r--rust/bits/meson.build16
-rw-r--r--rust/bits/src/lib.rs443
-rw-r--r--rust/hw/Kconfig3
-rw-r--r--rust/hw/char/Kconfig2
-rw-r--r--rust/hw/char/meson.build1
-rw-r--r--rust/hw/char/pl011/Cargo.toml26
-rw-r--r--rust/hw/char/pl011/meson.build21
-rw-r--r--rust/hw/char/pl011/src/device.rs714
-rw-r--r--rust/hw/char/pl011/src/device_class.rs103
-rw-r--r--rust/hw/char/pl011/src/lib.rs22
-rw-r--r--rust/hw/char/pl011/src/registers.rs350
-rw-r--r--rust/hw/meson.build2
-rw-r--r--rust/hw/timer/Kconfig2
-rw-r--r--rust/hw/timer/hpet/Cargo.toml21
-rw-r--r--rust/hw/timer/hpet/meson.build18
-rw-r--r--rust/hw/timer/hpet/src/device.rs1050
-rw-r--r--rust/hw/timer/hpet/src/fw_cfg.rs68
-rw-r--r--rust/hw/timer/hpet/src/lib.rs13
-rw-r--r--rust/hw/timer/meson.build1
-rw-r--r--rust/meson.build39
-rw-r--r--rust/qemu-api-macros/Cargo.toml24
-rw-r--r--rust/qemu-api-macros/meson.build19
-rw-r--r--rust/qemu-api-macros/src/bits.rs229
-rw-r--r--rust/qemu-api-macros/src/lib.rs262
-rw-r--r--rust/qemu-api-macros/src/utils.rs26
-rw-r--r--rust/qemu-api/.gitignore2
-rw-r--r--rust/qemu-api/Cargo.toml28
-rw-r--r--rust/qemu-api/README.md19
-rw-r--r--rust/qemu-api/build.rs41
-rw-r--r--rust/qemu-api/meson.build114
-rw-r--r--rust/qemu-api/src/assertions.rs152
-rw-r--r--rust/qemu-api/src/bindings.rs56
-rw-r--r--rust/qemu-api/src/bitops.rs119
-rw-r--r--rust/qemu-api/src/callbacks.rs241
-rw-r--r--rust/qemu-api/src/cell.rs1101
-rw-r--r--rust/qemu-api/src/chardev.rs260
-rw-r--r--rust/qemu-api/src/errno.rs345
-rw-r--r--rust/qemu-api/src/error.rs416
-rw-r--r--rust/qemu-api/src/irq.rs115
-rw-r--r--rust/qemu-api/src/lib.rs170
-rw-r--r--rust/qemu-api/src/log.rs73
-rw-r--r--rust/qemu-api/src/memory.rs204
-rw-r--r--rust/qemu-api/src/module.rs43
-rw-r--r--rust/qemu-api/src/prelude.rs31
-rw-r--r--rust/qemu-api/src/qdev.rs410
-rw-r--r--rust/qemu-api/src/qom.rs950
-rw-r--r--rust/qemu-api/src/sysbus.rs122
-rw-r--r--rust/qemu-api/src/timer.rs125
-rw-r--r--rust/qemu-api/src/uninit.rs85
-rw-r--r--rust/qemu-api/src/vmstate.rs604
-rw-r--r--rust/qemu-api/src/zeroable.rs37
-rw-r--r--rust/qemu-api/tests/tests.rs180
-rw-r--r--rust/qemu-api/tests/vmstate_tests.rs505
-rw-r--r--rust/qemu-api/wrapper.h71
-rw-r--r--rust/rustfmt.toml7
-rw-r--r--scripts/analyze-inclusions2
-rwxr-xr-xscripts/analyze-migration.py151
-rwxr-xr-xscripts/archive-source.sh30
-rwxr-xr-xscripts/checkpatch.pl433
-rw-r--r--scripts/ci/gitlab-ci-section29
-rw-r--r--scripts/ci/setup/gitlab-runner.yml39
-rw-r--r--scripts/ci/setup/ubuntu/build-environment.yml2
-rw-r--r--scripts/ci/setup/ubuntu/ubuntu-2204-aarch64.yaml4
-rw-r--r--scripts/ci/setup/ubuntu/ubuntu-2204-s390x.yaml4
-rwxr-xr-xscripts/clean-includes6
-rw-r--r--scripts/cocci-macro-file.h6
-rw-r--r--scripts/coccinelle/device-reset.cocci30
-rw-r--r--scripts/codeconverter/codeconverter/qom_type_info.py23
-rw-r--r--scripts/codeconverter/codeconverter/test_regexps.py16
-rw-r--r--scripts/coverity-scan/COMPONENTS.md7
-rwxr-xr-xscripts/device-crash-test3
-rwxr-xr-xscripts/gensyscalls.sh103
-rwxr-xr-xscripts/kernel-doc2
-rwxr-xr-xscripts/make-release39
-rw-r--r--scripts/meson-buildoptions.py10
-rw-r--r--scripts/meson-buildoptions.sh42
-rw-r--r--scripts/minikconf.py2
-rw-r--r--scripts/modinfo-collect.py23
-rw-r--r--scripts/mtest2make.py2
-rw-r--r--scripts/nsis.py12
-rw-r--r--scripts/probe-gdb-support.py76
-rw-r--r--scripts/qapi/.flake83
-rw-r--r--scripts/qapi/.isort.cfg7
-rw-r--r--scripts/qapi/backend.py65
-rw-r--r--scripts/qapi/commands.py11
-rw-r--r--scripts/qapi/common.py44
-rw-r--r--scripts/qapi/events.py2
-rw-r--r--scripts/qapi/features.py48
-rw-r--r--scripts/qapi/gen.py9
-rw-r--r--scripts/qapi/introspect.py14
-rw-r--r--scripts/qapi/main.py72
-rw-r--r--scripts/qapi/mypy.ini4
-rw-r--r--scripts/qapi/parser.py128
-rw-r--r--scripts/qapi/pylintrc2
-rw-r--r--scripts/qapi/schema.py42
-rw-r--r--scripts/qapi/source.py4
-rw-r--r--scripts/qapi/types.py23
-rw-r--r--scripts/qapi/visit.py21
-rwxr-xr-xscripts/qcow2-to-stdout.py449
-rwxr-xr-xscripts/qemu-binfmt-conf.sh78
-rw-r--r--scripts/qemu-gdb.py2
-rwxr-xr-xscripts/qemu-guest-agent/fsfreeze-hook36
-rwxr-xr-xscripts/qemu-plugin-symbols.py45
-rwxr-xr-xscripts/qemu-trace-stap6
-rw-r--r--scripts/qemugdb/coroutine.py102
-rw-r--r--scripts/qom-cast-macro-clean-cocci-gen.py7
-rwxr-xr-xscripts/rdma-migration-helper.sh109
-rwxr-xr-xscripts/replay-dump.py167
-rwxr-xr-xscripts/rust/rust_root_crate.sh13
-rw-r--r--scripts/rust/rustc_args.py232
-rw-r--r--scripts/symlink-install-tree.py3
-rw-r--r--scripts/tracetool/__init__.py15
-rw-r--r--scripts/tracetool/backend/ftrace.py4
-rw-r--r--scripts/tracetool/backend/log.py4
-rw-r--r--scripts/tracetool/backend/simple.py23
-rw-r--r--scripts/tracetool/backend/syslog.py4
-rwxr-xr-xscripts/update-linux-headers.sh10
-rwxr-xr-xscripts/update-syscalltbl.sh5
-rwxr-xr-xscripts/vmstate-static-checker.py2
-rw-r--r--scsi/pr-manager-helper.c2
-rw-r--r--scsi/pr-manager.c6
-rw-r--r--scsi/qemu-pr-helper.c2
-rw-r--r--scsi/utils.c13
-rw-r--r--semihosting/Kconfig1
-rw-r--r--semihosting/arm-compat-semi.c1
-rw-r--r--semihosting/console.c3
-rw-r--r--semihosting/meson.build12
-rw-r--r--semihosting/stubs-all.c6
-rw-r--r--semihosting/stubs-system.c6
-rw-r--r--semihosting/syscalls.c2
-rw-r--r--semihosting/uaccess.c5
-rw-r--r--semihosting/user.c21
-rw-r--r--stats/stats-hmp-cmds.c2
-rw-r--r--stats/stats-qmp-cmds.c2
-rw-r--r--storage-daemon/qapi/qapi-schema.json24
-rw-r--r--storage-daemon/qemu-storage-daemon.c6
-rw-r--r--stubs/blk-commit-all.c2
-rw-r--r--stubs/change-state-handler.c2
-rw-r--r--stubs/cpu-get-clock.c2
-rw-r--r--stubs/cpu-synchronize-state.c2
-rw-r--r--stubs/cpus-virtual-clock.c2
-rw-r--r--stubs/dump.c2
-rw-r--r--stubs/get-vm-name.c2
-rw-r--r--stubs/icount.c2
-rw-r--r--stubs/iothread-lock.c23
-rw-r--r--stubs/meson.build17
-rw-r--r--stubs/monitor-arm-gic.c12
-rw-r--r--stubs/monitor-cpu-s390x-kvm.c22
-rw-r--r--stubs/monitor-cpu-s390x.c23
-rw-r--r--stubs/monitor-cpu.c21
-rw-r--r--stubs/monitor-i386-rtc.c12
-rw-r--r--stubs/monitor-i386-sev.c36
-rw-r--r--stubs/monitor-i386-sgx.c17
-rw-r--r--stubs/monitor-i386-xen.c16
-rw-r--r--stubs/qemu-timer-notify-cb.c2
-rw-r--r--stubs/qmp-command-available.c2
-rw-r--r--stubs/qmp-quit.c2
-rw-r--r--stubs/qtest.c2
-rw-r--r--stubs/ram-block.c2
-rw-r--r--stubs/replay-mode.c2
-rw-r--r--stubs/replay-tools.c2
-rw-r--r--stubs/runstate-check.c2
-rw-r--r--stubs/vm-stop.c2
-rw-r--r--stubs/vmstate.c7
-rw-r--r--subprojects/.gitignore14
-rw-r--r--subprojects/anyhow-1-rs.wrap7
-rw-r--r--subprojects/arbitrary-int-1-rs.wrap10
-rw-r--r--subprojects/bilge-0.2-rs.wrap10
-rw-r--r--subprojects/bilge-impl-0.2-rs.wrap10
-rw-r--r--subprojects/either-1-rs.wrap10
-rw-r--r--subprojects/foreign-0.3-rs.wrap7
-rw-r--r--subprojects/itertools-0.11-rs.wrap10
-rw-r--r--subprojects/libc-0.2-rs.wrap7
-rw-r--r--subprojects/libvhost-user/libvhost-user.h6
-rw-r--r--subprojects/packagefiles/anyhow-1-rs/meson.build33
-rw-r--r--subprojects/packagefiles/arbitrary-int-1-rs/meson.build21
-rw-r--r--subprojects/packagefiles/bilge-0.2-rs/meson.build31
-rw-r--r--subprojects/packagefiles/bilge-impl-0.2-rs/meson.build47
-rw-r--r--subprojects/packagefiles/either-1-rs/meson.build26
-rw-r--r--subprojects/packagefiles/foreign-0.3-rs/meson.build26
-rw-r--r--subprojects/packagefiles/itertools-0.11-rs/meson.build32
-rw-r--r--subprojects/packagefiles/libc-0.2-rs/meson.build37
-rw-r--r--subprojects/packagefiles/proc-macro-error-1-rs/meson.build42
-rw-r--r--subprojects/packagefiles/proc-macro-error-attr-1-rs/meson.build34
-rw-r--r--subprojects/packagefiles/proc-macro2-1-rs/meson.build35
-rw-r--r--subprojects/packagefiles/quote-1-rs/meson.build31
-rw-r--r--subprojects/packagefiles/syn-2-rs/meson.build43
-rw-r--r--subprojects/packagefiles/unicode-ident-1-rs/meson.build22
-rw-r--r--subprojects/proc-macro-error-1-rs.wrap10
-rw-r--r--subprojects/proc-macro-error-attr-1-rs.wrap10
-rw-r--r--subprojects/proc-macro2-1-rs.wrap10
-rw-r--r--subprojects/quote-1-rs.wrap10
-rw-r--r--subprojects/syn-2-rs.wrap10
-rw-r--r--subprojects/unicode-ident-1-rs.wrap10
-rw-r--r--system/arch_init.c26
-rw-r--r--system/async-teardown.c37
-rw-r--r--system/balloon.c4
-rw-r--r--system/bootdevice.c4
-rw-r--r--system/cpu-throttle.c128
-rw-r--r--system/cpu-timers.c14
-rw-r--r--system/cpus.c55
-rw-r--r--system/datadir.c5
-rw-r--r--system/device_tree-stub.c5
-rw-r--r--system/device_tree.c26
-rw-r--r--system/dirtylimit.c15
-rw-r--r--system/dma-helpers.c16
-rw-r--r--system/globals-target.c24
-rw-r--r--system/globals.c18
-rw-r--r--system/ioport.c9
-rw-r--r--system/main.c57
-rw-r--r--system/memory-internal.h57
-rw-r--r--system/memory.c191
-rw-r--r--system/memory_ldst.c.inc18
-rw-r--r--system/memory_mapping.c10
-rw-r--r--system/meson.build18
-rw-r--r--system/physmem.c637
-rw-r--r--system/qdev-monitor.c201
-rw-r--r--system/qemu-seccomp.c6
-rw-r--r--system/qtest.c94
-rw-r--r--system/ram-block-attributes.c444
-rw-r--r--system/rtc.c8
-rw-r--r--system/runstate-action.c4
-rw-r--r--system/runstate-hmp-cmds.c2
-rw-r--r--system/runstate.c161
-rw-r--r--system/tpm.c4
-rw-r--r--system/trace-events11
-rw-r--r--system/vl.c393
-rw-r--r--system/watchpoint.c4
-rw-r--r--target-info-stub.c25
-rw-r--r--target-info.c31
-rw-r--r--target/Kconfig1
-rw-r--r--target/alpha/cpu-param.h8
-rw-r--r--target/alpha/cpu.c55
-rw-r--r--target/alpha/cpu.h19
-rw-r--r--target/alpha/fpu_helper.c14
-rw-r--r--target/alpha/gdbstub.c2
-rw-r--r--target/alpha/helper.c4
-rw-r--r--target/alpha/int_helper.c1
-rw-r--r--target/alpha/machine.c2
-rw-r--r--target/alpha/mem_helper.c3
-rw-r--r--target/alpha/sys_helper.c6
-rw-r--r--target/alpha/translate.c9
-rw-r--r--target/alpha/vax_helper.c1
-rw-r--r--target/arm/arch_dump.c9
-rw-r--r--target/arm/arm-powerctl.c2
-rw-r--r--target/arm/arm-qmp-cmds.c11
-rw-r--r--target/arm/cpregs.h124
-rw-r--r--target/arm/cpu-features.h76
-rw-r--r--target/arm/cpu-param.h18
-rw-r--r--target/arm/cpu-qom.h5
-rw-r--r--target/arm/cpu.c286
-rw-r--r--target/arm/cpu.h405
-rw-r--r--target/arm/cpu32-stubs.c26
-rw-r--r--target/arm/cpu64.c165
-rw-r--r--target/arm/debug_helper.c22
-rw-r--r--target/arm/gdbstub.c30
-rw-r--r--target/arm/gdbstub64.c42
-rw-r--r--target/arm/gtimer.h14
-rw-r--r--target/arm/helper.c2142
-rw-r--r--target/arm/helper.h1101
-rw-r--r--target/arm/hvf-stub.c20
-rw-r--r--target/arm/hvf/hvf.c432
-rw-r--r--target/arm/hvf/trace.h1
-rw-r--r--target/arm/hvf_arm.h5
-rw-r--r--target/arm/hyp_gdbstub.c15
-rw-r--r--target/arm/internals.h261
-rw-r--r--target/arm/kvm-stub.c97
-rw-r--r--target/arm/kvm.c201
-rw-r--r--target/arm/kvm_arm.h110
-rw-r--r--target/arm/machine.c19
-rw-r--r--target/arm/meson.build45
-rw-r--r--target/arm/ptw.c351
-rw-r--r--target/arm/tcg-stubs.c27
-rw-r--r--target/arm/tcg/a64.decode762
-rw-r--r--target/arm/tcg/arith_helper.c297
-rw-r--r--target/arm/tcg/cpu-v7m.c19
-rw-r--r--target/arm/tcg/cpu32.c44
-rw-r--r--target/arm/tcg/cpu64.c122
-rw-r--r--target/arm/tcg/crypto_helper.c6
-rw-r--r--target/arm/tcg/gengvec.c490
-rw-r--r--target/arm/tcg/helper-a64.c463
-rw-r--r--target/arm/tcg/helper-a64.h131
-rw-r--r--target/arm/tcg/helper-sme.h10
-rw-r--r--target/arm/tcg/helper-sve.h546
-rw-r--r--target/arm/tcg/helper.h1153
-rw-r--r--target/arm/tcg/hflags.c147
-rw-r--r--target/arm/tcg/iwmmxt_helper.c4
-rw-r--r--target/arm/tcg/m_helper.c11
-rw-r--r--target/arm/tcg/meson.build28
-rw-r--r--target/arm/tcg/mte_helper.c18
-rw-r--r--target/arm/tcg/mve_helper.c47
-rw-r--r--target/arm/tcg/neon-dp.decode6
-rw-r--r--target/arm/tcg/neon_helper.c209
-rw-r--r--target/arm/tcg/op_addsub.c.inc (renamed from target/arm/op_addsub.h)0
-rw-r--r--target/arm/tcg/op_helper.c111
-rw-r--r--target/arm/tcg/pauth_helper.c3
-rw-r--r--target/arm/tcg/psci.c2
-rw-r--r--target/arm/tcg/sme_helper.c153
-rw-r--r--target/arm/tcg/sve_helper.c520
-rw-r--r--target/arm/tcg/sve_ldst_internal.h2
-rw-r--r--target/arm/tcg/tlb-insns.c1306
-rw-r--r--target/arm/tcg/tlb_helper.c53
-rw-r--r--target/arm/tcg/translate-a64.c7956
-rw-r--r--target/arm/tcg/translate-a64.h15
-rw-r--r--target/arm/tcg/translate-neon.c559
-rw-r--r--target/arm/tcg/translate-sme.c42
-rw-r--r--target/arm/tcg/translate-sve.c502
-rw-r--r--target/arm/tcg/translate-vfp.c86
-rw-r--r--target/arm/tcg/translate.c180
-rw-r--r--target/arm/tcg/translate.h107
-rw-r--r--target/arm/tcg/vec_helper.c752
-rw-r--r--target/arm/tcg/vec_internal.h74
-rw-r--r--target/arm/tcg/vfp.decode12
-rw-r--r--target/arm/tcg/vfp_helper.c1370
-rw-r--r--target/arm/vfp_fpscr.c155
-rw-r--r--target/arm/vfp_helper.c1304
-rw-r--r--target/avr/cpu-param.h11
-rw-r--r--target/avr/cpu.c57
-rw-r--r--target/avr/cpu.h43
-rw-r--r--target/avr/disas.c21
-rw-r--r--target/avr/gdbstub.c4
-rw-r--r--target/avr/helper.c254
-rw-r--r--target/avr/helper.h3
-rw-r--r--target/avr/insn.decode7
-rw-r--r--target/avr/translate.c53
-rw-r--r--target/cris/Kconfig2
-rw-r--r--target/cris/cpu-param.h16
-rw-r--r--target/cris/cpu-qom.h32
-rw-r--r--target/cris/cpu.c323
-rw-r--r--target/cris/cpu.h286
-rw-r--r--target/cris/crisv10-decode.h112
-rw-r--r--target/cris/crisv32-decode.h133
-rw-r--r--target/cris/gdbstub.c127
-rw-r--r--target/cris/helper.c287
-rw-r--r--target/cris/helper.h23
-rw-r--r--target/cris/machine.c93
-rw-r--r--target/cris/meson.build17
-rw-r--r--target/cris/mmu.c356
-rw-r--r--target/cris/mmu.h22
-rw-r--r--target/cris/op_helper.c580
-rw-r--r--target/cris/opcode-cris.h355
-rw-r--r--target/cris/translate.c3252
-rw-r--r--target/cris/translate_v10.c.inc1262
-rw-r--r--target/hexagon/README9
-rw-r--r--target/hexagon/cpu-param.h3
-rw-r--r--target/hexagon/cpu-qom.h1
-rw-r--r--target/hexagon/cpu.c62
-rw-r--r--target/hexagon/cpu.h30
-rw-r--r--target/hexagon/cpu_bits.h23
-rw-r--r--target/hexagon/fma_emu.c470
-rw-r--r--target/hexagon/fma_emu.h3
-rw-r--r--target/hexagon/gdbstub.c27
-rwxr-xr-xtarget/hexagon/gen_analyze_funcs.py6
-rwxr-xr-xtarget/hexagon/gen_decodetree.py19
-rwxr-xr-xtarget/hexagon/gen_helper_funcs.py7
-rwxr-xr-xtarget/hexagon/gen_helper_protos.py7
-rw-r--r--target/hexagon/gen_idef_parser_funcs.py13
-rwxr-xr-xtarget/hexagon/gen_op_attribs.py11
-rwxr-xr-xtarget/hexagon/gen_opcodes_def.py11
-rwxr-xr-xtarget/hexagon/gen_printinsn.py11
-rw-r--r--target/hexagon/gen_tcg.h2
-rwxr-xr-xtarget/hexagon/gen_tcg_func_table.py11
-rwxr-xr-xtarget/hexagon/gen_tcg_funcs.py9
-rwxr-xr-xtarget/hexagon/gen_trans_funcs.py18
-rw-r--r--target/hexagon/genptr.c15
-rw-r--r--target/hexagon/helper.h3
-rwxr-xr-xtarget/hexagon/hex_common.py33
-rw-r--r--target/hexagon/idef-parser/README.rst4
-rw-r--r--target/hexagon/idef-parser/idef-parser.y1
-rw-r--r--target/hexagon/idef-parser/macros.h.inc (renamed from target/hexagon/idef-parser/macros.inc)0
-rw-r--r--target/hexagon/idef-parser/parser-helpers.c2
-rw-r--r--target/hexagon/internal.h11
-rw-r--r--target/hexagon/macros.h11
-rw-r--r--target/hexagon/meson.build6
-rw-r--r--target/hexagon/mmvec/macros.h38
-rw-r--r--target/hexagon/op_helper.c259
-rw-r--r--target/hexagon/translate.c84
-rw-r--r--target/hexagon/translate.h2
-rw-r--r--target/hppa/cpu-param.h12
-rw-r--r--target/hppa/cpu.c76
-rw-r--r--target/hppa/cpu.h62
-rw-r--r--target/hppa/fpu_helper.c51
-rw-r--r--target/hppa/helper.c28
-rw-r--r--target/hppa/helper.h1
-rw-r--r--target/hppa/insns.decode6
-rw-r--r--target/hppa/int_helper.c16
-rw-r--r--target/hppa/machine.c7
-rw-r--r--target/hppa/mem_helper.c65
-rw-r--r--target/hppa/op_helper.c9
-rw-r--r--target/hppa/sys_helper.c9
-rw-r--r--target/hppa/translate.c58
-rw-r--r--target/i386/arch_dump.c4
-rw-r--r--target/i386/arch_memory_mapping.c3
-rw-r--r--target/i386/confidential-guest.c2
-rw-r--r--target/i386/confidential-guest.h48
-rw-r--r--target/i386/cpu-apic.c10
-rw-r--r--target/i386/cpu-dump.c18
-rw-r--r--target/i386/cpu-internal.h2
-rw-r--r--target/i386/cpu-param.h7
-rw-r--r--target/i386/cpu-sysemu.c311
-rw-r--r--target/i386/cpu-system.c322
-rw-r--r--target/i386/cpu.c1667
-rw-r--r--target/i386/cpu.h348
-rw-r--r--target/i386/emulate/meson.build5
-rw-r--r--target/i386/emulate/panic.h45
-rw-r--r--target/i386/emulate/x86.h289
-rw-r--r--target/i386/emulate/x86_decode.c2173
-rw-r--r--target/i386/emulate/x86_decode.h326
-rw-r--r--target/i386/emulate/x86_emu.c1264
-rw-r--r--target/i386/emulate/x86_emu.h58
-rw-r--r--target/i386/emulate/x86_flags.c273
-rw-r--r--target/i386/emulate/x86_flags.h71
-rw-r--r--target/i386/gdbstub.c102
-rw-r--r--target/i386/helper.c11
-rw-r--r--target/i386/helper.h1
-rw-r--r--target/i386/host-cpu.c32
-rw-r--r--target/i386/host-cpu.h1
-rw-r--r--target/i386/hvf/hvf-cpu.c8
-rw-r--r--target/i386/hvf/hvf-i386.h4
-rw-r--r--target/i386/hvf/hvf.c355
-rw-r--r--target/i386/hvf/meson.build3
-rw-r--r--target/i386/hvf/vmx.h9
-rw-r--r--target/i386/hvf/x86.c8
-rw-r--r--target/i386/hvf/x86.h289
-rw-r--r--target/i386/hvf/x86_cpuid.c58
-rw-r--r--target/i386/hvf/x86_decode.c2196
-rw-r--r--target/i386/hvf/x86_decode.h325
-rw-r--r--target/i386/hvf/x86_descr.c8
-rw-r--r--target/i386/hvf/x86_descr.h8
-rw-r--r--target/i386/hvf/x86_emu.c1487
-rw-r--r--target/i386/hvf/x86_emu.h50
-rw-r--r--target/i386/hvf/x86_flags.c313
-rw-r--r--target/i386/hvf/x86_flags.h81
-rw-r--r--target/i386/hvf/x86_mmu.c32
-rw-r--r--target/i386/hvf/x86_task.c44
-rw-r--r--target/i386/hvf/x86_task.h2
-rw-r--r--target/i386/hvf/x86hvf.c4
-rw-r--r--target/i386/hvf/x86hvf.h3
-rw-r--r--target/i386/kvm/hyperv-proto.h12
-rw-r--r--target/i386/kvm/hyperv-stub.c5
-rw-r--r--target/i386/kvm/hyperv.c2
-rw-r--r--target/i386/kvm/hyperv.h2
-rw-r--r--target/i386/kvm/kvm-cpu.c10
-rw-r--r--target/i386/kvm/kvm.c1040
-rw-r--r--target/i386/kvm/kvm_i386.h37
-rw-r--r--target/i386/kvm/meson.build3
-rw-r--r--target/i386/kvm/tdx-quote-generator.c300
-rw-r--r--target/i386/kvm/tdx-quote-generator.h82
-rw-r--r--target/i386/kvm/tdx-stub.c28
-rw-r--r--target/i386/kvm/tdx.c1487
-rw-r--r--target/i386/kvm/tdx.h84
-rw-r--r--target/i386/kvm/vmsr_energy.c344
-rw-r--r--target/i386/kvm/vmsr_energy.h99
-rw-r--r--target/i386/kvm/xen-emu.c9
-rw-r--r--target/i386/machine.c35
-rw-r--r--target/i386/meson.build6
-rw-r--r--target/i386/monitor.c3
-rw-r--r--target/i386/nvmm/nvmm-accel-ops.c9
-rw-r--r--target/i386/nvmm/nvmm-accel-ops.h2
-rw-r--r--target/i386/nvmm/nvmm-all.c12
-rw-r--r--target/i386/ops_sse.h16
-rw-r--r--target/i386/sev-sysemu-stub.c73
-rw-r--r--target/i386/sev-system-stub.c41
-rw-r--r--target/i386/sev.c39
-rw-r--r--target/i386/sev.h29
-rw-r--r--target/i386/tcg/access.c32
-rw-r--r--target/i386/tcg/cc_helper.c69
-rw-r--r--target/i386/tcg/cc_helper_template.h.inc171
-rw-r--r--target/i386/tcg/decode-new.c.inc203
-rw-r--r--target/i386/tcg/decode-new.h19
-rw-r--r--target/i386/tcg/emit.c.inc559
-rw-r--r--target/i386/tcg/excp_helper.c3
-rw-r--r--target/i386/tcg/fpu_helper.c215
-rw-r--r--target/i386/tcg/helper-tcg.h13
-rw-r--r--target/i386/tcg/int_helper.c5
-rw-r--r--target/i386/tcg/mem_helper.c3
-rw-r--r--target/i386/tcg/meson.build2
-rw-r--r--target/i386/tcg/misc_helper.c2
-rw-r--r--target/i386/tcg/mpx_helper.c4
-rw-r--r--target/i386/tcg/seg_helper.c125
-rw-r--r--target/i386/tcg/seg_helper.h10
-rw-r--r--target/i386/tcg/sysemu/bpt_helper.c316
-rw-r--r--target/i386/tcg/sysemu/excp_helper.c644
-rw-r--r--target/i386/tcg/sysemu/fpu_helper.c63
-rw-r--r--target/i386/tcg/sysemu/misc_helper.c544
-rw-r--r--target/i386/tcg/sysemu/seg_helper.c253
-rw-r--r--target/i386/tcg/sysemu/smm_helper.c319
-rw-r--r--target/i386/tcg/sysemu/svm_helper.c926
-rw-r--r--target/i386/tcg/sysemu/tcg-cpu.c83
-rw-r--r--target/i386/tcg/system/bpt_helper.c316
-rw-r--r--target/i386/tcg/system/excp_helper.c662
-rw-r--r--target/i386/tcg/system/fpu_helper.c63
-rw-r--r--target/i386/tcg/system/meson.build (renamed from target/i386/tcg/sysemu/meson.build)0
-rw-r--r--target/i386/tcg/system/misc_helper.c544
-rw-r--r--target/i386/tcg/system/seg_helper.c253
-rw-r--r--target/i386/tcg/system/smm_helper.c319
-rw-r--r--target/i386/tcg/system/svm_helper.c926
-rw-r--r--target/i386/tcg/system/tcg-cpu.c84
-rw-r--r--target/i386/tcg/tcg-cpu.c86
-rw-r--r--target/i386/tcg/tcg-cpu.h6
-rw-r--r--target/i386/tcg/translate.c1129
-rw-r--r--target/i386/tcg/user/excp_helper.c1
-rw-r--r--target/i386/tcg/user/seg_helper.c3
-rw-r--r--target/i386/whpx/whpx-accel-ops.c9
-rw-r--r--target/i386/whpx/whpx-accel-ops.h2
-rw-r--r--target/i386/whpx/whpx-all.c23
-rw-r--r--target/i386/whpx/whpx-apic.c8
-rw-r--r--target/i386/xsave_helper.c1
-rw-r--r--target/loongarch/arch_dump.c163
-rw-r--r--target/loongarch/cpu-param.h3
-rw-r--r--target/loongarch/cpu.c312
-rw-r--r--target/loongarch/cpu.h75
-rw-r--r--target/loongarch/cpu_helper.c235
-rw-r--r--target/loongarch/csr.c129
-rw-r--r--target/loongarch/csr.h29
-rw-r--r--target/loongarch/gdbstub.c45
-rw-r--r--target/loongarch/helper.h718
-rw-r--r--target/loongarch/internals.h19
-rw-r--r--target/loongarch/kvm/kvm.c571
-rw-r--r--target/loongarch/kvm/kvm_loongarch.h2
-rw-r--r--target/loongarch/loongarch-qmp-cmds.c6
-rw-r--r--target/loongarch/machine.c32
-rw-r--r--target/loongarch/meson.build2
-rw-r--r--target/loongarch/tcg/csr_helper.c45
-rw-r--r--target/loongarch/tcg/fpu_helper.c18
-rw-r--r--target/loongarch/tcg/helper.h722
-rw-r--r--target/loongarch/tcg/insn_trans/trans_atomic.c.inc2
-rw-r--r--target/loongarch/tcg/insn_trans/trans_branch.c.inc4
-rw-r--r--target/loongarch/tcg/insn_trans/trans_extra.c.inc16
-rw-r--r--target/loongarch/tcg/insn_trans/trans_fcmp.c.inc25
-rw-r--r--target/loongarch/tcg/insn_trans/trans_privileged.c.inc162
-rw-r--r--target/loongarch/tcg/insn_trans/trans_vec.c.inc33
-rw-r--r--target/loongarch/tcg/iocsr_helper.c3
-rw-r--r--target/loongarch/tcg/op_helper.c5
-rw-r--r--target/loongarch/tcg/tcg_loongarch.h21
-rw-r--r--target/loongarch/tcg/tlb_helper.c230
-rw-r--r--target/loongarch/tcg/translate.c12
-rw-r--r--target/loongarch/tcg/vec_helper.c1
-rw-r--r--target/loongarch/translate.h1
-rw-r--r--target/m68k/Kconfig2
-rw-r--r--target/m68k/cpu-param.h5
-rw-r--r--target/m68k/cpu.c100
-rw-r--r--target/m68k/cpu.h24
-rw-r--r--target/m68k/fpu_helper.c12
-rw-r--r--target/m68k/gdbstub.c2
-rw-r--r--target/m68k/helper.c28
-rw-r--r--target/m68k/meson.build5
-rw-r--r--target/m68k/op_helper.c3
-rw-r--r--target/m68k/semihosting-stub.c18
-rw-r--r--target/m68k/softfloat.c47
-rw-r--r--target/m68k/translate.c20
-rw-r--r--target/meson.build1
-rw-r--r--target/microblaze/cpu-param.h7
-rw-r--r--target/microblaze/cpu.c83
-rw-r--r--target/microblaze/cpu.h19
-rw-r--r--target/microblaze/gdbstub.c5
-rw-r--r--target/microblaze/helper.c75
-rw-r--r--target/microblaze/helper.h22
-rw-r--r--target/microblaze/machine.c2
-rw-r--r--target/microblaze/mmu.c7
-rw-r--r--target/microblaze/op_helper.c107
-rw-r--r--target/microblaze/translate.c188
-rw-r--r--target/mips/Kconfig2
-rw-r--r--target/mips/cpu-defs.c.inc16
-rw-r--r--target/mips/cpu-param.h14
-rw-r--r--target/mips/cpu.c99
-rw-r--r--target/mips/cpu.h31
-rw-r--r--target/mips/fpu_helper.h48
-rw-r--r--target/mips/helper.h2
-rw-r--r--target/mips/internal.h14
-rw-r--r--target/mips/kvm.c15
-rw-r--r--target/mips/meson.build2
-rw-r--r--target/mips/mips-defs.h2
-rw-r--r--target/mips/msa.c36
-rw-r--r--target/mips/sysemu/cp0.c123
-rw-r--r--target/mips/sysemu/cp0_timer.c147
-rw-r--r--target/mips/sysemu/machine.c333
-rw-r--r--target/mips/sysemu/mips-qmp-cmds.c38
-rw-r--r--target/mips/sysemu/physaddr.c243
-rw-r--r--target/mips/system/addr.c (renamed from target/mips/sysemu/addr.c)0
-rw-r--r--target/mips/system/cp0.c123
-rw-r--r--target/mips/system/cp0_timer.c147
-rw-r--r--target/mips/system/machine.c336
-rw-r--r--target/mips/system/meson.build (renamed from target/mips/sysemu/meson.build)0
-rw-r--r--target/mips/system/mips-qmp-cmds.c48
-rw-r--r--target/mips/system/physaddr.c242
-rw-r--r--target/mips/tcg/exception.c2
-rw-r--r--target/mips/tcg/fpu_helper.c1
-rw-r--r--target/mips/tcg/godson2.decode27
-rw-r--r--target/mips/tcg/ldst_helper.c18
-rw-r--r--target/mips/tcg/loong-ext.decode28
-rw-r--r--target/mips/tcg/loong_translate.c271
-rw-r--r--target/mips/tcg/meson.build5
-rw-r--r--target/mips/tcg/micromips_translate.c.inc39
-rw-r--r--target/mips/tcg/mips16e_translate.c.inc118
-rw-r--r--target/mips/tcg/msa_helper.c57
-rw-r--r--target/mips/tcg/mxu_translate.c18
-rw-r--r--target/mips/tcg/nanomips_translate.c.inc162
-rw-r--r--target/mips/tcg/octeon_translate.c4
-rw-r--r--target/mips/tcg/op_helper.c1
-rw-r--r--target/mips/tcg/sysemu/cp0_helper.c1644
-rw-r--r--target/mips/tcg/sysemu/meson.build10
-rw-r--r--target/mips/tcg/sysemu/mips-semi.c361
-rw-r--r--target/mips/tcg/sysemu/special_helper.c173
-rw-r--r--target/mips/tcg/sysemu/tlb_helper.c1420
-rw-r--r--target/mips/tcg/sysemu_helper.h.inc185
-rw-r--r--target/mips/tcg/system/cp0_helper.c1633
-rw-r--r--target/mips/tcg/system/lcsr_helper.c (renamed from target/mips/tcg/sysemu/lcsr_helper.c)0
-rw-r--r--target/mips/tcg/system/meson.build12
-rw-r--r--target/mips/tcg/system/mips-semi.c377
-rw-r--r--target/mips/tcg/system/semihosting-stub.c16
-rw-r--r--target/mips/tcg/system/special_helper.c173
-rw-r--r--target/mips/tcg/system/tlb_helper.c1422
-rw-r--r--target/mips/tcg/system_helper.h.inc185
-rw-r--r--target/mips/tcg/tcg-internal.h4
-rw-r--r--target/mips/tcg/translate.c620
-rw-r--r--target/mips/tcg/translate.h21
-rw-r--r--target/mips/tcg/tx79_translate.c8
-rw-r--r--target/openrisc/cpu-param.h5
-rw-r--r--target/openrisc/cpu.c45
-rw-r--r--target/openrisc/cpu.h18
-rw-r--r--target/openrisc/exception.c1
-rw-r--r--target/openrisc/exception_helper.c1
-rw-r--r--target/openrisc/fpu_helper.c1
-rw-r--r--target/openrisc/gdbstub.c5
-rw-r--r--target/openrisc/interrupt.c1
-rw-r--r--target/openrisc/interrupt_helper.c1
-rw-r--r--target/openrisc/machine.c2
-rw-r--r--target/openrisc/mmu.c3
-rw-r--r--target/openrisc/sys_helper.c5
-rw-r--r--target/openrisc/translate.c12
-rw-r--r--target/ppc/arch_dump.c28
-rw-r--r--target/ppc/compat.c17
-rw-r--r--target/ppc/cpu-models.c5
-rw-r--r--target/ppc/cpu-models.h3
-rw-r--r--target/ppc/cpu-param.h7
-rw-r--r--target/ppc/cpu.c48
-rw-r--r--target/ppc/cpu.h118
-rw-r--r--target/ppc/cpu_init.c523
-rw-r--r--target/ppc/cpu_init.h91
-rw-r--r--target/ppc/dfp_helper.c8
-rw-r--r--target/ppc/excp_helper.c1105
-rw-r--r--target/ppc/fpu_helper.c64
-rw-r--r--target/ppc/helper.h102
-rw-r--r--target/ppc/helper_regs.c44
-rw-r--r--target/ppc/helper_regs.h2
-rw-r--r--target/ppc/insn32.decode98
-rw-r--r--target/ppc/int_helper.c22
-rw-r--r--target/ppc/internal.h11
-rw-r--r--target/ppc/kvm.c86
-rw-r--r--target/ppc/kvm_ppc.h17
-rw-r--r--target/ppc/machine.c84
-rw-r--r--target/ppc/mem_helper.c66
-rw-r--r--target/ppc/meson.build1
-rw-r--r--target/ppc/misc_helper.c179
-rw-r--r--target/ppc/mmu-book3s-v3.c2
-rw-r--r--target/ppc/mmu-book3s-v3.h43
-rw-r--r--target/ppc/mmu-hash32.c73
-rw-r--r--target/ppc/mmu-hash32.h58
-rw-r--r--target/ppc/mmu-hash64.c63
-rw-r--r--target/ppc/mmu-hash64.h3
-rw-r--r--target/ppc/mmu-radix64.c68
-rw-r--r--target/ppc/mmu-radix64.h53
-rw-r--r--target/ppc/mmu_common.c337
-rw-r--r--target/ppc/mmu_helper.c9
-rw-r--r--target/ppc/power8-pmu.c1
-rw-r--r--target/ppc/ppc-qmp-cmds.c12
-rw-r--r--target/ppc/spr_common.h4
-rw-r--r--target/ppc/tcg-excp_helper.c851
-rw-r--r--target/ppc/timebase_helper.c90
-rw-r--r--target/ppc/translate.c116
-rw-r--r--target/ppc/translate/vmx-impl.c.inc292
-rw-r--r--target/ppc/translate/vmx-ops.c.inc19
-rw-r--r--target/ppc/translate/vsx-impl.c.inc594
-rw-r--r--target/ppc/translate/vsx-ops.c.inc82
-rw-r--r--target/ppc/user_only_helper.c1
-rw-r--r--target/riscv/Kconfig4
-rw-r--r--target/riscv/arch_dump.c2
-rw-r--r--target/riscv/bitmanip_helper.c2
-rw-r--r--target/riscv/cpu-param.h14
-rw-r--r--target/riscv/cpu-qom.h8
-rw-r--r--target/riscv/cpu.c1271
-rw-r--r--target/riscv/cpu.h189
-rw-r--r--target/riscv/cpu_bits.h234
-rw-r--r--target/riscv/cpu_cfg.h156
-rw-r--r--target/riscv/cpu_cfg_fields.h.inc170
-rw-r--r--target/riscv/cpu_helper.c954
-rw-r--r--target/riscv/cpu_user.h1
-rw-r--r--target/riscv/crypto_helper.c1
-rw-r--r--target/riscv/csr.c1787
-rw-r--r--target/riscv/debug.c131
-rw-r--r--target/riscv/debug.h3
-rw-r--r--target/riscv/fpu_helper.c1
-rw-r--r--target/riscv/gdbstub.c29
-rw-r--r--target/riscv/helper.h3
-rw-r--r--target/riscv/insn16.decode4
-rw-r--r--target/riscv/insn32.decode49
-rw-r--r--target/riscv/insn_trans/trans_privileged.c.inc46
-rw-r--r--target/riscv/insn_trans/trans_rva.c.inc4
-rw-r--r--target/riscv/insn_trans/trans_rvbf16.c.inc9
-rw-r--r--target/riscv/insn_trans/trans_rvd.c.inc22
-rw-r--r--target/riscv/insn_trans/trans_rvf.c.inc8
-rw-r--r--target/riscv/insn_trans/trans_rvh.c.inc8
-rw-r--r--target/riscv/insn_trans/trans_rvi.c.inc148
-rw-r--r--target/riscv/insn_trans/trans_rvv.c.inc647
-rw-r--r--target/riscv/insn_trans/trans_rvvk.c.inc10
-rw-r--r--target/riscv/insn_trans/trans_rvzacas.c.inc4
-rw-r--r--target/riscv/insn_trans/trans_rvzce.c.inc21
-rw-r--r--target/riscv/insn_trans/trans_rvzfh.c.inc4
-rw-r--r--target/riscv/insn_trans/trans_rvzicfiss.c.inc131
-rw-r--r--target/riscv/insn_trans/trans_svinval.c.inc6
-rw-r--r--target/riscv/internals.h69
-rw-r--r--target/riscv/kvm/kvm-cpu.c471
-rw-r--r--target/riscv/kvm/kvm_riscv.h4
-rw-r--r--target/riscv/m128_helper.c1
-rw-r--r--target/riscv/machine.c87
-rw-r--r--target/riscv/monitor.c1
-rw-r--r--target/riscv/op_helper.c218
-rw-r--r--target/riscv/pmp.c171
-rw-r--r--target/riscv/pmp.h4
-rw-r--r--target/riscv/pmu.c10
-rw-r--r--target/riscv/riscv-qmp-cmds.c10
-rw-r--r--target/riscv/tcg/tcg-cpu.c358
-rw-r--r--target/riscv/tcg/tcg-cpu.h2
-rw-r--r--target/riscv/th_csr.c30
-rw-r--r--target/riscv/time_helper.c1
-rw-r--r--target/riscv/trace-events3
-rw-r--r--target/riscv/translate.c162
-rw-r--r--target/riscv/vcrypto_helper.c33
-rw-r--r--target/riscv/vector_helper.c826
-rw-r--r--target/riscv/vector_internals.c4
-rw-r--r--target/riscv/vector_internals.h13
-rw-r--r--target/riscv/zce_helper.c3
-rw-r--r--target/rx/cpu-param.h3
-rw-r--r--target/rx/cpu.c58
-rw-r--r--target/rx/cpu.h21
-rw-r--r--target/rx/helper.c8
-rw-r--r--target/rx/helper.h34
-rw-r--r--target/rx/op_helper.c7
-rw-r--r--target/rx/translate.c9
-rw-r--r--target/s390x/arch_dump.c2
-rw-r--r--target/s390x/cpu-dump.c2
-rw-r--r--target/s390x/cpu-param.h9
-rw-r--r--target/s390x/cpu-sysemu.c322
-rw-r--r--target/s390x/cpu-system.c325
-rw-r--r--target/s390x/cpu.c138
-rw-r--r--target/s390x/cpu.h49
-rw-r--r--target/s390x/cpu_features.c11
-rw-r--r--target/s390x/cpu_features.h1
-rw-r--r--target/s390x/cpu_features_def.h.inc94
-rw-r--r--target/s390x/cpu_models.c86
-rw-r--r--target/s390x/cpu_models.h3
-rw-r--r--target/s390x/cpu_models_sysemu.c424
-rw-r--r--target/s390x/cpu_models_system.c433
-rw-r--r--target/s390x/diag.c13
-rw-r--r--target/s390x/gdbstub.c40
-rw-r--r--target/s390x/gen-features.c195
-rw-r--r--target/s390x/helper.c8
-rw-r--r--target/s390x/interrupt.c13
-rw-r--r--target/s390x/ioinst.c14
-rw-r--r--target/s390x/kvm/kvm.c70
-rw-r--r--target/s390x/kvm/pv.c76
-rw-r--r--target/s390x/kvm/pv.h28
-rw-r--r--target/s390x/machine.c4
-rw-r--r--target/s390x/meson.build4
-rw-r--r--target/s390x/mmu_helper.c8
-rw-r--r--target/s390x/s390x-internal.h13
-rw-r--r--target/s390x/sigp.c18
-rw-r--r--target/s390x/tcg/cc_helper.c1
-rw-r--r--target/s390x/tcg/crypto_helper.c3
-rw-r--r--target/s390x/tcg/excp_helper.c6
-rw-r--r--target/s390x/tcg/fpu_helper.c9
-rw-r--r--target/s390x/tcg/insn-data.h.inc2
-rw-r--r--target/s390x/tcg/int_helper.c3
-rw-r--r--target/s390x/tcg/mem_helper.c122
-rw-r--r--target/s390x/tcg/misc_helper.c19
-rw-r--r--target/s390x/tcg/translate.c17
-rw-r--r--target/s390x/tcg/vec_fpu_helper.c13
-rw-r--r--target/s390x/tcg/vec_helper.c3
-rw-r--r--target/s390x/trace-events2
-rw-r--r--target/sh4/cpu-param.h5
-rw-r--r--target/sh4/cpu.c63
-rw-r--r--target/sh4/cpu.h23
-rw-r--r--target/sh4/helper.c7
-rw-r--r--target/sh4/op_helper.c3
-rw-r--r--target/sh4/translate.c46
-rw-r--r--target/sparc/cpu-param.h27
-rw-r--r--target/sparc/cpu.c152
-rw-r--r--target/sparc/cpu.h74
-rw-r--r--target/sparc/fop_helper.c32
-rw-r--r--target/sparc/gdbstub.c18
-rw-r--r--target/sparc/helper.c1
-rw-r--r--target/sparc/helper.h8
-rw-r--r--target/sparc/insns.decode25
-rw-r--r--target/sparc/int32_helper.c42
-rw-r--r--target/sparc/ldst_helper.c20
-rw-r--r--target/sparc/machine.c26
-rw-r--r--target/sparc/mmu_helper.c8
-rw-r--r--target/sparc/translate.c274
-rw-r--r--target/sparc/translate.h17
-rw-r--r--target/sparc/win_helper.c27
-rw-r--r--target/tricore/cpu-param.h3
-rw-r--r--target/tricore/cpu.c28
-rw-r--r--target/tricore/cpu.h21
-rw-r--r--target/tricore/fpu_helper.c6
-rw-r--r--target/tricore/gdbstub.c2
-rw-r--r--target/tricore/helper.c7
-rw-r--r--target/tricore/op_helper.c7
-rw-r--r--target/tricore/translate.c25
-rw-r--r--target/xtensa/Kconfig2
-rw-r--r--target/xtensa/core-dc232b/gdb-config.c.inc5
-rw-r--r--target/xtensa/core-dc232b/xtensa-modules.c.inc5
-rw-r--r--target/xtensa/core-fsf/xtensa-modules.c.inc5
-rw-r--r--target/xtensa/cpu-param.h4
-rw-r--r--target/xtensa/cpu.c113
-rw-r--r--target/xtensa/cpu.h82
-rw-r--r--target/xtensa/dbg_helper.c4
-rw-r--r--target/xtensa/exc_helper.c3
-rw-r--r--target/xtensa/fpu_helper.c37
-rw-r--r--target/xtensa/helper.c14
-rw-r--r--target/xtensa/mmu_helper.c10
-rw-r--r--target/xtensa/op_helper.c2
-rw-r--r--target/xtensa/translate.c43
-rw-r--r--target/xtensa/win_helper.c1
-rw-r--r--target/xtensa/xtensa-semi.c16
-rw-r--r--tcg/aarch64/tcg-target-con-set.h11
-rw-r--r--tcg/aarch64/tcg-target-has.h60
-rw-r--r--tcg/aarch64/tcg-target-mo.h12
-rw-r--r--tcg/aarch64/tcg-target-opc.h.inc15
-rw-r--r--tcg/aarch64/tcg-target.c.inc1593
-rw-r--r--tcg/aarch64/tcg-target.h128
-rw-r--r--tcg/aarch64/tcg-target.opc.h15
-rw-r--r--tcg/arm/tcg-target-con-set.h5
-rw-r--r--tcg/arm/tcg-target-has.h73
-rw-r--r--tcg/arm/tcg-target-mo.h13
-rw-r--r--tcg/arm/tcg-target-opc.h.inc16
-rw-r--r--tcg/arm/tcg-target.c.inc1679
-rw-r--r--tcg/arm/tcg-target.h86
-rw-r--r--tcg/arm/tcg-target.opc.h16
-rw-r--r--tcg/i386/tcg-target-con-set.h5
-rw-r--r--tcg/i386/tcg-target-con-str.h3
-rw-r--r--tcg/i386/tcg-target-has.h112
-rw-r--r--tcg/i386/tcg-target-mo.h19
-rw-r--r--tcg/i386/tcg-target-opc.h.inc37
-rw-r--r--tcg/i386/tcg-target.c.inc2455
-rw-r--r--tcg/i386/tcg-target.h162
-rw-r--r--tcg/i386/tcg-target.opc.h38
-rw-r--r--tcg/loongarch64/tcg-target-con-set.h14
-rw-r--r--tcg/loongarch64/tcg-target-con-str.h2
-rw-r--r--tcg/loongarch64/tcg-target-has.h57
-rw-r--r--tcg/loongarch64/tcg-target-mo.h12
-rw-r--r--tcg/loongarch64/tcg-target-opc.h.inc (renamed from tcg/loongarch64/tcg-target.opc.h)0
-rw-r--r--tcg/loongarch64/tcg-target.c.inc1399
-rw-r--r--tcg/loongarch64/tcg-target.h115
-rw-r--r--tcg/meson.build25
-rw-r--r--tcg/mips/tcg-target-con-set.h23
-rw-r--r--tcg/mips/tcg-target-con-str.h2
-rw-r--r--tcg/mips/tcg-target-has.h71
-rw-r--r--tcg/mips/tcg-target-mo.h13
-rw-r--r--tcg/mips/tcg-target-opc.h.inc1
-rw-r--r--tcg/mips/tcg-target.c.inc1751
-rw-r--r--tcg/mips/tcg-target.h130
-rw-r--r--tcg/optimize.c2059
-rw-r--r--tcg/perf.c7
-rw-r--r--tcg/ppc/tcg-target-con-set.h13
-rw-r--r--tcg/ppc/tcg-target-con-str.h1
-rw-r--r--tcg/ppc/tcg-target-has.h72
-rw-r--r--tcg/ppc/tcg-target-mo.h12
-rw-r--r--tcg/ppc/tcg-target-opc.h.inc32
-rw-r--r--tcg/ppc/tcg-target.c.inc2340
-rw-r--r--tcg/ppc/tcg-target.h126
-rw-r--r--tcg/ppc/tcg-target.opc.h32
-rw-r--r--tcg/region.c27
-rw-r--r--tcg/riscv/tcg-target-con-set.h18
-rw-r--r--tcg/riscv/tcg-target-con-str.h6
-rw-r--r--tcg/riscv/tcg-target-has.h72
-rw-r--r--tcg/riscv/tcg-target-mo.h12
-rw-r--r--tcg/riscv/tcg-target-opc.h.inc12
-rw-r--r--tcg/riscv/tcg-target.c.inc2287
-rw-r--r--tcg/riscv/tcg-target.h142
-rw-r--r--tcg/s390x/tcg-target-con-set.h9
-rw-r--r--tcg/s390x/tcg-target-con-str.h2
-rw-r--r--tcg/s390x/tcg-target-has.h80
-rw-r--r--tcg/s390x/tcg-target-mo.h12
-rw-r--r--tcg/s390x/tcg-target-opc.h.inc15
-rw-r--r--tcg/s390x/tcg-target.c.inc2141
-rw-r--r--tcg/s390x/tcg-target.h126
-rw-r--r--tcg/s390x/tcg-target.opc.h15
-rw-r--r--tcg/sparc64/tcg-target-con-set.h13
-rw-r--r--tcg/sparc64/tcg-target-con-str.h1
-rw-r--r--tcg/sparc64/tcg-target-has.h22
-rw-r--r--tcg/sparc64/tcg-target-mo.h12
-rw-r--r--tcg/sparc64/tcg-target-opc.h.inc1
-rw-r--r--tcg/sparc64/tcg-target.c.inc1327
-rw-r--r--tcg/sparc64/tcg-target.h94
-rw-r--r--tcg/tcg-common.c5
-rw-r--r--tcg/tcg-has.h54
-rw-r--r--tcg/tcg-internal.h22
-rw-r--r--tcg/tcg-ldst.c.inc65
-rw-r--r--tcg/tcg-op-gvec.c398
-rw-r--r--tcg/tcg-op-ldst.c209
-rw-r--r--tcg/tcg-op-vec.c13
-rw-r--r--tcg/tcg-op.c1441
-rw-r--r--tcg/tcg-pool.c.inc162
-rw-r--r--tcg/tcg.c2007
-rw-r--r--tcg/tci.c769
-rw-r--r--tcg/tci/tcg-target-has.h22
-rw-r--r--tcg/tci/tcg-target-mo.h17
-rw-r--r--tcg/tci/tcg-target-opc.h.inc15
-rw-r--r--tcg/tci/tcg-target.c.inc1238
-rw-r--r--tcg/tci/tcg-target.h95
-rw-r--r--tests/Makefile.include94
-rw-r--r--tests/avocado/README.rst10
-rw-r--r--tests/avocado/acpi-bits.py409
-rw-r--r--tests/avocado/avocado_qemu/__init__.py681
-rw-r--r--tests/avocado/boot_linux.py131
-rw-r--r--tests/avocado/boot_linux_console.py1547
-rw-r--r--tests/avocado/boot_xen.py116
-rw-r--r--tests/avocado/cpu_queries.py35
-rw-r--r--tests/avocado/empty_cpu_model.py19
-rw-r--r--tests/avocado/hotplug_blk.py69
-rw-r--r--tests/avocado/hotplug_cpu.py37
-rw-r--r--tests/avocado/info_usernet.py33
-rw-r--r--tests/avocado/intel_iommu.py123
-rw-r--r--tests/avocado/kvm_xen_guest.py171
-rw-r--r--tests/avocado/linux_initrd.py92
-rw-r--r--tests/avocado/linux_ssh_mips_malta.py205
-rw-r--r--tests/avocado/load_bflt.py54
-rw-r--r--tests/avocado/machine_aarch64_sbsaref.py236
-rw-r--r--tests/avocado/machine_aarch64_virt.py146
-rw-r--r--tests/avocado/machine_arm_canona1100.py35
-rw-r--r--tests/avocado/machine_arm_integratorcp.py99
-rw-r--r--tests/avocado/machine_arm_n8x0.py49
-rw-r--r--tests/avocado/machine_aspeed.py441
-rw-r--r--tests/avocado/machine_avr6.py50
-rw-r--r--tests/avocado/machine_loongarch.py58
-rw-r--r--tests/avocado/machine_m68k_nextcube.py70
-rw-r--r--tests/avocado/machine_microblaze.py61
-rw-r--r--tests/avocado/machine_mips_fuloong2e.py42
-rw-r--r--tests/avocado/machine_mips_loongson3v.py39
-rw-r--r--tests/avocado/machine_mips_malta.py164
-rw-r--r--tests/avocado/machine_rx_gdbsim.py77
-rw-r--r--tests/avocado/machine_s390_ccw_virtio.py277
-rw-r--r--tests/avocado/machine_sparc64_sun4u.py36
-rw-r--r--tests/avocado/machine_sparc_leon3.py37
-rw-r--r--tests/avocado/mem-addr-space-check.py355
-rw-r--r--tests/avocado/migration.py135
-rw-r--r--tests/avocado/multiprocess.py102
-rw-r--r--tests/avocado/netdev-ethtool.py101
-rw-r--r--tests/avocado/pc_cpu_hotplug_props.py35
-rw-r--r--tests/avocado/ppc_405.py36
-rw-r--r--tests/avocado/ppc_74xx.py136
-rw-r--r--tests/avocado/ppc_amiga.py38
-rw-r--r--tests/avocado/ppc_bamboo.py42
-rw-r--r--tests/avocado/ppc_hv_tests.py206
-rw-r--r--tests/avocado/ppc_mpc8544ds.py34
-rw-r--r--tests/avocado/ppc_powernv.py102
-rw-r--r--tests/avocado/ppc_prep_40p.py85
-rw-r--r--tests/avocado/ppc_pseries.py110
-rw-r--r--tests/avocado/ppc_virtex_ml507.py36
-rw-r--r--tests/avocado/replay_kernel.py550
-rw-r--r--tests/avocado/replay_linux.py196
-rw-r--r--tests/avocado/reverse_debugging.py276
-rw-r--r--tests/avocado/riscv_opensbi.py63
-rw-r--r--tests/avocado/s390_topology.py439
-rw-r--r--tests/avocado/smmu.py139
-rw-r--r--tests/avocado/tcg_plugins.py155
-rw-r--r--tests/avocado/tesseract_utils.py46
-rw-r--r--tests/avocado/tuxrun_baselines.py620
-rw-r--r--tests/avocado/version.py25
-rw-r--r--tests/avocado/virtio-gpu.py157
-rw-r--r--tests/avocado/virtio_version.py175
-rw-r--r--tests/avocado/virtiofs_submounts.py.data/cleanup.sh46
-rw-r--r--tests/avocado/virtiofs_submounts.py.data/guest-cleanup.sh30
-rw-r--r--tests/avocado/virtiofs_submounts.py.data/guest.sh138
-rw-r--r--tests/avocado/virtiofs_submounts.py.data/host.sh127
-rw-r--r--tests/avocado/vnc.py115
-rw-r--r--tests/avocado/x86_cpu_model_versions.py362
-rw-r--r--tests/bench/benchmark-crypto-akcipher.c30
-rw-r--r--tests/bench/benchmark-crypto-cipher.c22
-rw-r--r--tests/bench/benchmark-crypto-hash.c10
-rw-r--r--tests/bench/benchmark-crypto-hmac.c6
-rw-r--r--tests/bench/test_akcipher_keys.c.inc (renamed from tests/bench/test_akcipher_keys.inc)0
-rw-r--r--tests/data/acpi/aarch64/virt/DSDTbin5196 -> 5196 bytes
-rw-r--r--tests/data/acpi/aarch64/virt/DSDT.acpihmatvirtbin5282 -> 5282 bytes
-rw-r--r--tests/data/acpi/aarch64/virt/DSDT.memhpbin6557 -> 6557 bytes
-rw-r--r--tests/data/acpi/aarch64/virt/DSDT.pxbbin7679 -> 7679 bytes
-rw-r--r--tests/data/acpi/aarch64/virt/DSDT.topologybin5398 -> 5398 bytes
-rw-r--r--tests/data/acpi/aarch64/virt/SSDT.memhpbin1817 -> 1817 bytes
-rwxr-xr-xtests/data/acpi/disassemle-aml.sh2
-rw-r--r--tests/data/acpi/riscv64/virt/APICbin0 -> 116 bytes
-rw-r--r--tests/data/acpi/riscv64/virt/DSDTbin0 -> 3576 bytes
-rw-r--r--tests/data/acpi/riscv64/virt/FACPbin0 -> 276 bytes
-rw-r--r--tests/data/acpi/riscv64/virt/MCFGbin0 -> 60 bytes
-rw-r--r--tests/data/acpi/riscv64/virt/RHCTbin0 -> 400 bytes
-rw-r--r--tests/data/acpi/riscv64/virt/SPCRbin0 -> 90 bytes
-rw-r--r--tests/data/acpi/riscv64/virt/SRAT.numamembin0 -> 108 bytes
-rw-r--r--tests/data/acpi/x86/microvm/DSDT.pciebin3023 -> 3023 bytes
-rw-r--r--tests/data/acpi/x86/pc/DSDTbin6830 -> 8611 bytes
-rw-r--r--tests/data/acpi/x86/pc/DSDT.acpierstbin6741 -> 8522 bytes
-rw-r--r--tests/data/acpi/x86/pc/DSDT.acpihmatbin8155 -> 9936 bytes
-rw-r--r--tests/data/acpi/x86/pc/DSDT.bridgebin13701 -> 15482 bytes
-rw-r--r--tests/data/acpi/x86/pc/DSDT.cphpbin7294 -> 9075 bytes
-rw-r--r--tests/data/acpi/x86/pc/DSDT.dimmpxmbin8484 -> 10265 bytes
-rw-r--r--tests/data/acpi/x86/pc/DSDT.hpbridgebin6781 -> 8562 bytes
-rw-r--r--tests/data/acpi/x86/pc/DSDT.hpbrrootbin3337 -> 5100 bytes
-rw-r--r--tests/data/acpi/x86/pc/DSDT.ipmikcsbin6902 -> 8683 bytes
-rw-r--r--tests/data/acpi/x86/pc/DSDT.memhpbin8189 -> 9970 bytes
-rw-r--r--tests/data/acpi/x86/pc/DSDT.nohpetbin6688 -> 8469 bytes
-rw-r--r--tests/data/acpi/x86/pc/DSDT.numamembin6836 -> 8617 bytes
-rw-r--r--tests/data/acpi/x86/pc/DSDT.roothpbin10623 -> 12404 bytes
-rw-r--r--tests/data/acpi/x86/q35/APIC.acpihmat-generic-xbin0 -> 136 bytes
-rw-r--r--tests/data/acpi/x86/q35/CEDT.acpihmat-generic-xbin0 -> 68 bytes
-rw-r--r--tests/data/acpi/x86/q35/DMAR.dmarbin120 -> 120 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDTbin8355 -> 8440 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.acpierstbin8372 -> 8457 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.acpihmatbin9680 -> 9765 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.acpihmat-generic-xbin0 -> 12650 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.acpihmat-noinitiatorbin8634 -> 8719 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.applesmcbin8401 -> 8486 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.bridgebin11968 -> 12053 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.core-countbin12913 -> 12998 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.core-count2bin33770 -> 33855 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.cphpbin8819 -> 8904 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.cxlbin9714 -> 13231 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.dimmpxmbin10009 -> 10094 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.ipmibtbin8430 -> 8515 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.ipmismbusbin8443 -> 8528 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.ivrsbin8372 -> 8457 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.memhpbin9714 -> 9799 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.mmio64bin9485 -> 9570 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.multi-bridgebin13208 -> 13293 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.noacpihpbin8235 -> 8302 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.nohpetbin8213 -> 8298 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.numamembin8361 -> 8446 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.pvpanic-isabin8456 -> 8541 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.thread-countbin12913 -> 12998 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.thread-count2bin33770 -> 33855 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.tis.tpm12bin8961 -> 9046 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.tis.tpm2bin8987 -> 9072 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.type4-countbin18589 -> 18674 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.viotbin9464 -> 14697 bytes
-rw-r--r--tests/data/acpi/x86/q35/DSDT.xapicbin35718 -> 35803 bytes
-rw-r--r--tests/data/acpi/x86/q35/HMAT.acpihmat-generic-xbin0 -> 360 bytes
-rw-r--r--tests/data/acpi/x86/q35/SRAT.acpihmat-generic-xbin0 -> 520 bytes
-rw-r--r--tests/data/qobject/qdict.txt6
-rw-r--r--tests/data/uefi-boot-images/bios-tables-test.loongarch64.iso.qcow2bin0 -> 12800 bytes
-rw-r--r--tests/docker/Makefile.include19
-rw-r--r--tests/docker/dockerfiles/alpine.docker9
-rw-r--r--tests/docker/dockerfiles/centos9.docker3
-rw-r--r--tests/docker/dockerfiles/debian-all-test-cross.docker3
-rw-r--r--tests/docker/dockerfiles/debian-amd64-cross.docker14
-rw-r--r--tests/docker/dockerfiles/debian-arm64-cross.docker14
-rw-r--r--tests/docker/dockerfiles/debian-armel-cross.docker178
-rw-r--r--tests/docker/dockerfiles/debian-armhf-cross.docker14
-rw-r--r--tests/docker/dockerfiles/debian-hexagon-cross.docker3
-rw-r--r--tests/docker/dockerfiles/debian-i686-cross.docker24
-rw-r--r--tests/docker/dockerfiles/debian-legacy-test-cross.docker3
-rw-r--r--tests/docker/dockerfiles/debian-loongarch-cross.docker7
-rwxr-xr-xtests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh8
-rw-r--r--tests/docker/dockerfiles/debian-mips64el-cross.docker24
-rw-r--r--tests/docker/dockerfiles/debian-mipsel-cross.docker24
-rw-r--r--tests/docker/dockerfiles/debian-ppc64el-cross.docker14
-rw-r--r--tests/docker/dockerfiles/debian-riscv64-cross.docker4
-rw-r--r--tests/docker/dockerfiles/debian-s390x-cross.docker14
-rw-r--r--tests/docker/dockerfiles/debian-toolchain.docker7
-rw-r--r--tests/docker/dockerfiles/debian-tricore-cross.docker5
-rw-r--r--tests/docker/dockerfiles/debian-xtensa-cross.docker3
-rw-r--r--tests/docker/dockerfiles/debian.docker7
-rw-r--r--tests/docker/dockerfiles/emsdk-wasm32-cross.docker145
-rw-r--r--tests/docker/dockerfiles/fedora-cris-cross.docker14
-rw-r--r--tests/docker/dockerfiles/fedora-rust-nightly.docker183
-rw-r--r--tests/docker/dockerfiles/fedora-win64-cross.docker6
-rw-r--r--tests/docker/dockerfiles/fedora.docker5
-rw-r--r--tests/docker/dockerfiles/opensuse-leap.docker10
-rw-r--r--tests/docker/dockerfiles/python.docker1
-rw-r--r--tests/docker/dockerfiles/ubuntu2204.docker11
-rwxr-xr-xtests/docker/test-debug4
-rwxr-xr-xtests/docker/test-rust21
-rw-r--r--tests/fp/fp-bench.c10
-rw-r--r--tests/fp/fp-test-log2.c2
-rw-r--r--tests/fp/fp-test.c9
-rw-r--r--tests/fp/meson.build16
-rw-r--r--tests/functional/acpi-bits/bits-config/bits-cfg.txt (renamed from tests/avocado/acpi-bits/bits-config/bits-cfg.txt)0
-rw-r--r--tests/functional/acpi-bits/bits-tests/smbios.py2 (renamed from tests/avocado/acpi-bits/bits-tests/smbios.py2)0
-rw-r--r--tests/functional/acpi-bits/bits-tests/smilatency.py2 (renamed from tests/avocado/acpi-bits/bits-tests/smilatency.py2)0
-rw-r--r--tests/functional/acpi-bits/bits-tests/testacpi.py2 (renamed from tests/avocado/acpi-bits/bits-tests/testacpi.py2)0
-rw-r--r--tests/functional/acpi-bits/bits-tests/testcpuid.py2 (renamed from tests/avocado/acpi-bits/bits-tests/testcpuid.py2)0
-rw-r--r--tests/functional/aspeed.py58
-rw-r--r--tests/functional/meson.build420
-rw-r--r--tests/functional/qemu_test/__init__.py20
-rw-r--r--tests/functional/qemu_test/archive.py117
-rw-r--r--tests/functional/qemu_test/asset.py230
-rw-r--r--tests/functional/qemu_test/cmd.py202
-rw-r--r--tests/functional/qemu_test/config.py48
-rw-r--r--tests/functional/qemu_test/decorators.py151
-rw-r--r--tests/functional/qemu_test/linuxkernel.py52
-rw-r--r--tests/functional/qemu_test/ports.py55
-rw-r--r--tests/functional/qemu_test/tesseract.py25
-rw-r--r--tests/functional/qemu_test/testcase.py400
-rw-r--r--tests/functional/qemu_test/tuxruntest.py136
-rw-r--r--tests/functional/qemu_test/uncompress.py107
-rw-r--r--tests/functional/qemu_test/utils.py39
-rw-r--r--tests/functional/replay_kernel.py84
-rw-r--r--tests/functional/reverse_debugging.py196
-rwxr-xr-xtests/functional/test_aarch64_aspeed_ast2700.py140
-rwxr-xr-xtests/functional/test_aarch64_aspeed_ast2700fc.py135
-rwxr-xr-xtests/functional/test_aarch64_imx8mp_evk.py67
-rwxr-xr-xtests/functional/test_aarch64_raspi3.py34
-rwxr-xr-xtests/functional/test_aarch64_raspi4.py96
-rwxr-xr-xtests/functional/test_aarch64_replay.py51
-rwxr-xr-xtests/functional/test_aarch64_reverse_debug.py38
-rwxr-xr-xtests/functional/test_aarch64_rme_sbsaref.py69
-rwxr-xr-xtests/functional/test_aarch64_rme_virt.py101
-rwxr-xr-xtests/functional/test_aarch64_sbsaref.py100
-rwxr-xr-xtests/functional/test_aarch64_sbsaref_alpine.py61
-rwxr-xr-xtests/functional/test_aarch64_sbsaref_freebsd.py62
-rwxr-xr-xtests/functional/test_aarch64_smmu.py205
-rwxr-xr-xtests/functional/test_aarch64_tcg_plugins.py118
-rwxr-xr-xtests/functional/test_aarch64_tuxrun.py50
-rwxr-xr-xtests/functional/test_aarch64_virt.py135
-rwxr-xr-xtests/functional/test_aarch64_virt_gpu.py140
-rwxr-xr-xtests/functional/test_aarch64_xen.py90
-rwxr-xr-xtests/functional/test_aarch64_xlnx_versal.py37
-rwxr-xr-xtests/functional/test_acpi_bits.py340
-rwxr-xr-xtests/functional/test_alpha_clipper.py34
-rwxr-xr-xtests/functional/test_alpha_replay.py29
-rwxr-xr-xtests/functional/test_arm_aspeed_ast1030.py73
-rwxr-xr-xtests/functional/test_arm_aspeed_ast2500.py56
-rwxr-xr-xtests/functional/test_arm_aspeed_ast2600.py140
-rw-r--r--tests/functional/test_arm_aspeed_bletchley.py25
-rwxr-xr-xtests/functional/test_arm_aspeed_palmetto.py25
-rwxr-xr-xtests/functional/test_arm_aspeed_rainier.py65
-rwxr-xr-xtests/functional/test_arm_aspeed_romulus.py25
-rw-r--r--tests/functional/test_arm_aspeed_witherspoon.py25
-rwxr-xr-xtests/functional/test_arm_bflt.py41
-rwxr-xr-xtests/functional/test_arm_bpim2u.py180
-rwxr-xr-xtests/functional/test_arm_canona1100.py37
-rwxr-xr-xtests/functional/test_arm_collie.py31
-rwxr-xr-xtests/functional/test_arm_cubieboard.py144
-rwxr-xr-xtests/functional/test_arm_emcraft_sf2.py52
-rwxr-xr-xtests/functional/test_arm_integratorcp.py95
-rwxr-xr-xtests/functional/test_arm_microbit.py31
-rwxr-xr-xtests/functional/test_arm_orangepi.py237
-rwxr-xr-xtests/functional/test_arm_quanta_gsj.py92
-rwxr-xr-xtests/functional/test_arm_raspi2.py92
-rwxr-xr-xtests/functional/test_arm_realview.py47
-rwxr-xr-xtests/functional/test_arm_replay.py69
-rwxr-xr-xtests/functional/test_arm_smdkc210.py51
-rwxr-xr-xtests/functional/test_arm_stellaris.py48
-rwxr-xr-xtests/functional/test_arm_sx1.py73
-rwxr-xr-xtests/functional/test_arm_tuxrun.py70
-rwxr-xr-xtests/functional/test_arm_vexpress.py26
-rwxr-xr-xtests/functional/test_arm_virt.py30
-rwxr-xr-xtests/functional/test_avr_mega2560.py49
-rwxr-xr-xtests/functional/test_avr_uno.py32
-rwxr-xr-xtests/functional/test_cpu_queries.py37
-rwxr-xr-xtests/functional/test_empty_cpu_model.py24
-rwxr-xr-xtests/functional/test_hppa_seabios.py35
-rwxr-xr-xtests/functional/test_i386_replay.py28
-rwxr-xr-xtests/functional/test_i386_tuxrun.py35
-rwxr-xr-xtests/functional/test_info_usernet.py34
-rwxr-xr-xtests/functional/test_intel_iommu.py155
-rwxr-xr-xtests/functional/test_linux_initrd.py95
-rwxr-xr-xtests/functional/test_loongarch64_virt.py62
-rwxr-xr-xtests/functional/test_m68k_mcf5208evb.py27
-rwxr-xr-xtests/functional/test_m68k_nextcube.py64
-rwxr-xr-xtests/functional/test_m68k_q800.py37
-rwxr-xr-xtests/functional/test_m68k_replay.py43
-rwxr-xr-xtests/functional/test_m68k_tuxrun.py34
-rwxr-xr-xtests/functional/test_mem_addr_space.py349
-rwxr-xr-xtests/functional/test_memlock.py79
-rwxr-xr-xtests/functional/test_microblaze_replay.py28
-rwxr-xr-xtests/functional/test_microblaze_s3adsp1800.py76
-rwxr-xr-xtests/functional/test_microblazeel_s3adsp1800.py26
-rwxr-xr-xtests/functional/test_migration.py98
-rwxr-xr-xtests/functional/test_mips64_malta.py35
-rwxr-xr-xtests/functional/test_mips64_tuxrun.py35
-rwxr-xr-xtests/functional/test_mips64el_fuloong2e.py64
-rwxr-xr-xtests/functional/test_mips64el_loongson3v.py36
-rwxr-xr-xtests/functional/test_mips64el_malta.py197
-rwxr-xr-xtests/functional/test_mips64el_replay.py56
-rwxr-xr-xtests/functional/test_mips64el_tuxrun.py35
-rwxr-xr-xtests/functional/test_mips_malta.py196
-rwxr-xr-xtests/functional/test_mips_replay.py55
-rwxr-xr-xtests/functional/test_mips_tuxrun.py36
-rwxr-xr-xtests/functional/test_mipsel_malta.py108
-rw-r--r--tests/functional/test_mipsel_replay.py54
-rwxr-xr-xtests/functional/test_mipsel_tuxrun.py36
-rwxr-xr-xtests/functional/test_multiprocess.py100
-rwxr-xr-xtests/functional/test_netdev_ethtool.py88
-rwxr-xr-xtests/functional/test_or1k_replay.py27
-rwxr-xr-xtests/functional/test_or1k_sim.py26
-rwxr-xr-xtests/functional/test_pc_cpu_hotplug_props.py37
-rwxr-xr-xtests/functional/test_ppc64_e500.py44
-rwxr-xr-xtests/functional/test_ppc64_hv.py165
-rwxr-xr-xtests/functional/test_ppc64_mac99.py44
-rwxr-xr-xtests/functional/test_ppc64_powernv.py118
-rwxr-xr-xtests/functional/test_ppc64_pseries.py91
-rwxr-xr-xtests/functional/test_ppc64_replay.py50
-rwxr-xr-xtests/functional/test_ppc64_reverse_debug.py41
-rwxr-xr-xtests/functional/test_ppc64_tuxrun.py113
-rwxr-xr-xtests/functional/test_ppc_40p.py94
-rwxr-xr-xtests/functional/test_ppc_74xx.py126
-rwxr-xr-xtests/functional/test_ppc_amiga.py43
-rwxr-xr-xtests/functional/test_ppc_bamboo.py44
-rwxr-xr-xtests/functional/test_ppc_mac.py36
-rwxr-xr-xtests/functional/test_ppc_mpc8544ds.py37
-rwxr-xr-xtests/functional/test_ppc_replay.py34
-rw-r--r--tests/functional/test_ppc_sam460ex.py38
-rwxr-xr-xtests/functional/test_ppc_tuxrun.py35
-rwxr-xr-xtests/functional/test_ppc_virtex_ml507.py39
-rwxr-xr-xtests/functional/test_riscv32_tuxrun.py38
-rwxr-xr-xtests/functional/test_riscv64_tuxrun.py51
-rwxr-xr-xtests/functional/test_riscv_opensbi.py36
-rwxr-xr-xtests/functional/test_rx_gdbsim.py76
-rwxr-xr-xtests/functional/test_s390x_ccw_virtio.py274
-rwxr-xr-xtests/functional/test_s390x_replay.py28
-rwxr-xr-xtests/functional/test_s390x_topology.py415
-rwxr-xr-xtests/functional/test_s390x_tuxrun.py35
-rwxr-xr-xtests/functional/test_sh4_r2d.py29
-rwxr-xr-xtests/functional/test_sh4_tuxrun.py49
-rwxr-xr-xtests/functional/test_sh4eb_r2d.py28
-rwxr-xr-xtests/functional/test_sparc64_sun4u.py38
-rwxr-xr-xtests/functional/test_sparc64_tuxrun.py35
-rwxr-xr-xtests/functional/test_sparc_replay.py27
-rwxr-xr-xtests/functional/test_sparc_sun4m.py24
-rwxr-xr-xtests/functional/test_version.py28
-rwxr-xr-xtests/functional/test_virtio_balloon.py178
-rwxr-xr-xtests/functional/test_virtio_gpu.py142
-rwxr-xr-xtests/functional/test_virtio_version.py177
-rwxr-xr-xtests/functional/test_vnc.py116
-rwxr-xr-xtests/functional/test_x86_64_hotplug_blk.py85
-rwxr-xr-xtests/functional/test_x86_64_hotplug_cpu.py71
-rwxr-xr-xtests/functional/test_x86_64_kvm_xen.py157
-rwxr-xr-xtests/functional/test_x86_64_replay.py58
-rwxr-xr-xtests/functional/test_x86_64_reverse_debug.py36
-rwxr-xr-xtests/functional/test_x86_64_tuxrun.py36
-rwxr-xr-xtests/functional/test_x86_cpu_model_versions.py335
-rwxr-xr-xtests/functional/test_xtensa_lx60.py26
-rwxr-xr-xtests/functional/test_xtensa_replay.py28
-rwxr-xr-xtests/guest-debug/run-test.py21
-rw-r--r--tests/guest-debug/test_gdbstub.py17
-rw-r--r--tests/include/meson.build2
m---------tests/lcitool/libvirt-ci0
-rw-r--r--tests/lcitool/mappings.yml15
-rw-r--r--tests/lcitool/projects/qemu.yml5
-rwxr-xr-xtests/lcitool/refresh68
-rw-r--r--tests/meson.build7
-rwxr-xr-xtests/migration-stress/guestperf-batch.py (renamed from tests/migration/guestperf-batch.py)0
-rwxr-xr-xtests/migration-stress/guestperf-plot.py (renamed from tests/migration/guestperf-plot.py)0
-rwxr-xr-xtests/migration-stress/guestperf.py (renamed from tests/migration/guestperf.py)0
-rw-r--r--tests/migration-stress/guestperf/__init__.py (renamed from tests/migration/guestperf/__init__.py)0
-rw-r--r--tests/migration-stress/guestperf/comparison.py174
-rw-r--r--tests/migration-stress/guestperf/engine.py536
-rw-r--r--tests/migration-stress/guestperf/hardware.py (renamed from tests/migration/guestperf/hardware.py)0
-rw-r--r--tests/migration-stress/guestperf/plot.py (renamed from tests/migration/guestperf/plot.py)0
-rw-r--r--tests/migration-stress/guestperf/progress.py (renamed from tests/migration/guestperf/progress.py)0
-rw-r--r--tests/migration-stress/guestperf/report.py118
-rw-r--r--tests/migration-stress/guestperf/scenario.py115
-rw-r--r--tests/migration-stress/guestperf/shell.py300
-rw-r--r--tests/migration-stress/guestperf/timings.py (renamed from tests/migration/guestperf/timings.py)0
-rwxr-xr-xtests/migration-stress/initrd-stress.sh (renamed from tests/migration/initrd-stress.sh)0
-rw-r--r--tests/migration-stress/meson.build (renamed from tests/migration/meson.build)0
-rw-r--r--tests/migration-stress/stress.c (renamed from tests/migration/stress.c)0
-rw-r--r--tests/migration/guestperf/comparison.py161
-rw-r--r--tests/migration/guestperf/engine.py505
-rw-r--r--tests/migration/guestperf/report.py98
-rw-r--r--tests/migration/guestperf/scenario.py112
-rw-r--r--tests/migration/guestperf/shell.py296
-rw-r--r--tests/migration/migration-test.h36
-rw-r--r--tests/plugin/inline.c302
-rw-r--r--tests/plugin/insn.c305
-rw-r--r--tests/plugin/mem.c139
-rw-r--r--tests/plugin/meson.build22
-rw-r--r--tests/plugin/syscall.c144
-rw-r--r--tests/qapi-schema/alternate-array.out1
-rw-r--r--tests/qapi-schema/comments.out1
-rw-r--r--tests/qapi-schema/doc-good.json6
-rw-r--r--tests/qapi-schema/doc-good.out16
-rw-r--r--tests/qapi-schema/doc-good.txt2
-rw-r--r--tests/qapi-schema/empty.out1
-rw-r--r--tests/qapi-schema/features-too-many.err2
-rw-r--r--tests/qapi-schema/features-too-many.json13
-rw-r--r--tests/qapi-schema/features-too-many.out0
-rw-r--r--tests/qapi-schema/include-repetition.out1
-rw-r--r--tests/qapi-schema/include-simple.out1
-rw-r--r--tests/qapi-schema/indented-expr.out1
-rw-r--r--tests/qapi-schema/meson.build1
-rw-r--r--tests/qapi-schema/qapi-schema-test.out1
-rwxr-xr-xtests/qapi-schema/test-qapi.py13
-rwxr-xr-xtests/qemu-iotests/0242
-rw-r--r--tests/qemu-iotests/024.out1
-rwxr-xr-xtests/qemu-iotests/0414
-rw-r--r--tests/qemu-iotests/051.pc.out2
-rwxr-xr-xtests/qemu-iotests/1061
-rwxr-xr-xtests/qemu-iotests/1252
-rwxr-xr-xtests/qemu-iotests/1654
-rw-r--r--tests/qemu-iotests/172.out60
-rwxr-xr-xtests/qemu-iotests/1751
-rw-r--r--tests/qemu-iotests/184.out2
-rw-r--r--tests/qemu-iotests/191.out16
-rwxr-xr-xtests/qemu-iotests/1947
-rw-r--r--tests/qemu-iotests/194.out5
-rw-r--r--tests/qemu-iotests/203.out1
-rw-r--r--tests/qemu-iotests/211.out6
-rwxr-xr-xtests/qemu-iotests/2211
-rw-r--r--tests/qemu-iotests/233.out12
-rw-r--r--tests/qemu-iotests/234.out2
-rwxr-xr-xtests/qemu-iotests/2402
-rw-r--r--tests/qemu-iotests/240.out4
-rwxr-xr-xtests/qemu-iotests/2505
-rwxr-xr-xtests/qemu-iotests/2531
-rw-r--r--tests/qemu-iotests/262.out1
-rw-r--r--tests/qemu-iotests/273.out5
-rw-r--r--tests/qemu-iotests/280.out1
-rwxr-xr-xtests/qemu-iotests/30219
-rwxr-xr-xtests/qemu-iotests/3085
-rwxr-xr-xtests/qemu-iotests/check2
-rw-r--r--tests/qemu-iotests/common.rc36
-rw-r--r--tests/qemu-iotests/fat16.py690
-rw-r--r--tests/qemu-iotests/iotests.py31
-rw-r--r--tests/qemu-iotests/pylintrc1
-rw-r--r--tests/qemu-iotests/testenv.py5
-rwxr-xr-xtests/qemu-iotests/tests/backup-discard-source39
-rwxr-xr-xtests/qemu-iotests/tests/commit-zero-blocks96
-rw-r--r--tests/qemu-iotests/tests/commit-zero-blocks.out54
-rwxr-xr-xtests/qemu-iotests/tests/copy-before-write98
-rw-r--r--tests/qemu-iotests/tests/copy-before-write.out4
-rwxr-xr-xtests/qemu-iotests/tests/graph-changes-while-io102
-rw-r--r--tests/qemu-iotests/tests/graph-changes-while-io.out4
-rwxr-xr-xtests/qemu-iotests/tests/inactive-node-nbd303
-rw-r--r--tests/qemu-iotests/tests/inactive-node-nbd.out239
-rwxr-xr-xtests/qemu-iotests/tests/migrate-bitmaps-test7
-rwxr-xr-xtests/qemu-iotests/tests/mirror-sparse128
-rw-r--r--tests/qemu-iotests/tests/mirror-sparse.out365
-rwxr-xr-xtests/qemu-iotests/tests/qcow2-encryption75
-rw-r--r--tests/qemu-iotests/tests/qcow2-encryption.out32
-rwxr-xr-xtests/qemu-iotests/tests/qsd-migrate140
-rw-r--r--tests/qemu-iotests/tests/qsd-migrate.out59
-rwxr-xr-xtests/qemu-iotests/tests/vvfat485
-rwxr-xr-xtests/qemu-iotests/tests/vvfat.out5
-rwxr-xr-xtests/qemu-iotests/tests/write-zeroes-unmap1
-rw-r--r--tests/qtest/acpi-utils.c1
-rw-r--r--tests/qtest/adm1266-test.c4
-rw-r--r--tests/qtest/adm1272-test.c4
-rw-r--r--tests/qtest/ahci-test.c3
-rw-r--r--tests/qtest/arm-cpu-features.c19
-rw-r--r--tests/qtest/aspeed-hace-utils.c646
-rw-r--r--tests/qtest/aspeed-hace-utils.h84
-rw-r--r--tests/qtest/aspeed-smc-utils.c686
-rw-r--r--tests/qtest/aspeed-smc-utils.h95
-rw-r--r--tests/qtest/aspeed_gpio-test.c2
-rw-r--r--tests/qtest/aspeed_hace-test.c577
-rw-r--r--tests/qtest/aspeed_smc-test.c778
-rw-r--r--tests/qtest/ast2700-gpio-test.c95
-rw-r--r--tests/qtest/ast2700-hace-test.c98
-rw-r--r--tests/qtest/ast2700-smc-test.c72
-rw-r--r--tests/qtest/bcm2835-i2c-test.c2
-rw-r--r--tests/qtest/bios-tables-test.c186
-rw-r--r--tests/qtest/boot-order-test.c13
-rw-r--r--tests/qtest/boot-serial-test.c25
-rw-r--r--tests/qtest/cdrom-test.c103
-rw-r--r--tests/qtest/cmsdk-apb-watchdog-test.c328
-rw-r--r--tests/qtest/cpu-plug-test.c28
-rw-r--r--tests/qtest/dbus-display-test.c72
-rw-r--r--tests/qtest/device-introspect-test.c6
-rw-r--r--tests/qtest/device-plug-test.c15
-rw-r--r--tests/qtest/dm163-test.c2
-rw-r--r--tests/qtest/drive_del-test.c11
-rw-r--r--tests/qtest/emc141x-test.c2
-rw-r--r--tests/qtest/fdc-test.c4
-rw-r--r--tests/qtest/fuzz/fuzz.c7
-rw-r--r--tests/qtest/fuzz/generic_fuzz.c8
-rw-r--r--tests/qtest/fuzz/qos_fuzz.c2
-rw-r--r--tests/qtest/fuzz/qtest_wrappers.c2
-rw-r--r--tests/qtest/fw_cfg-test.c6
-rw-r--r--tests/qtest/hd-geo-test.c82
-rw-r--r--tests/qtest/ide-test.c2
-rw-r--r--tests/qtest/intel-iommu-test.c64
-rw-r--r--tests/qtest/ipmi-bt-test.c4
-rw-r--r--tests/qtest/ipmi-kcs-test.c4
-rw-r--r--tests/qtest/isl_pmbus_vr-test.c4
-rw-r--r--tests/qtest/libqmp.c4
-rw-r--r--tests/qtest/libqmp.h2
-rw-r--r--tests/qtest/libqos/arm-imx25-pdk-machine.c5
-rw-r--r--tests/qtest/libqos/arm-n800-machine.c92
-rw-r--r--tests/qtest/libqos/fw_cfg.c202
-rw-r--r--tests/qtest/libqos/fw_cfg.h6
-rw-r--r--tests/qtest/libqos/generic-pcihost.c2
-rw-r--r--tests/qtest/libqos/i2c-imx.c4
-rw-r--r--tests/qtest/libqos/igb.c4
-rw-r--r--tests/qtest/libqos/libqos-malloc.c1
-rw-r--r--tests/qtest/libqos/libqos.c5
-rw-r--r--tests/qtest/libqos/meson.build9
-rw-r--r--tests/qtest/libqos/pci-pc.c2
-rw-r--r--tests/qtest/libqos/pci.c2
-rw-r--r--tests/qtest/libqos/qgraph.h2
-rw-r--r--tests/qtest/libqos/qos_external.c8
-rw-r--r--tests/qtest/libqos/riscv-iommu.c76
-rw-r--r--tests/qtest/libqos/riscv-iommu.h101
-rw-r--r--tests/qtest/libqos/virtio-9p-client.c52
-rw-r--r--tests/qtest/libqos/virtio-9p-client.h34
-rw-r--r--tests/qtest/libqos/virtio-pci-modern.c6
-rw-r--r--tests/qtest/libqos/virtio-pci.c6
-rw-r--r--tests/qtest/libqos/virtio-scmi.c2
-rw-r--r--tests/qtest/libqos/virtio.c48
-rw-r--r--tests/qtest/libqtest.c278
-rw-r--r--tests/qtest/libqtest.h78
-rw-r--r--tests/qtest/lsm303dlhc-mag-test.c2
-rw-r--r--tests/qtest/m48t59-test.c5
-rw-r--r--tests/qtest/machine-none-test.c3
-rw-r--r--tests/qtest/max34451-test.c4
-rw-r--r--tests/qtest/meson.build120
-rw-r--r--tests/qtest/migration-helpers.c533
-rw-r--r--tests/qtest/migration-helpers.h68
-rw-r--r--tests/qtest/migration-test.c4034
-rw-r--r--tests/qtest/migration/Makefile (renamed from tests/migration/Makefile)0
-rw-r--r--tests/qtest/migration/aarch64/Makefile (renamed from tests/migration/aarch64/Makefile)0
-rw-r--r--tests/qtest/migration/aarch64/a-b-kernel.S (renamed from tests/migration/aarch64/a-b-kernel.S)0
-rw-r--r--tests/qtest/migration/aarch64/a-b-kernel.h (renamed from tests/migration/aarch64/a-b-kernel.h)0
-rw-r--r--tests/qtest/migration/bootfile.c70
-rw-r--r--tests/qtest/migration/bootfile.h39
-rw-r--r--tests/qtest/migration/compression-tests.c226
-rw-r--r--tests/qtest/migration/cpr-tests.c136
-rw-r--r--tests/qtest/migration/file-tests.c341
-rw-r--r--tests/qtest/migration/framework.c1066
-rw-r--r--tests/qtest/migration/framework.h251
-rw-r--r--tests/qtest/migration/i386/Makefile (renamed from tests/migration/i386/Makefile)0
-rw-r--r--tests/qtest/migration/i386/a-b-bootblock.S (renamed from tests/migration/i386/a-b-bootblock.S)0
-rw-r--r--tests/qtest/migration/i386/a-b-bootblock.h (renamed from tests/migration/i386/a-b-bootblock.h)0
-rw-r--r--tests/qtest/migration/migration-qmp.c520
-rw-r--r--tests/qtest/migration/migration-qmp.h48
-rw-r--r--tests/qtest/migration/migration-util.c398
-rw-r--r--tests/qtest/migration/migration-util.h59
-rw-r--r--tests/qtest/migration/misc-tests.c297
-rw-r--r--tests/qtest/migration/postcopy-tests.c149
-rw-r--r--tests/qtest/migration/ppc64/Makefile (renamed from tests/migration/ppc64/Makefile)0
-rw-r--r--tests/qtest/migration/ppc64/a-b-kernel.S (renamed from tests/migration/ppc64/a-b-kernel.S)0
-rw-r--r--tests/qtest/migration/ppc64/a-b-kernel.h (renamed from tests/migration/ppc64/a-b-kernel.h)0
-rw-r--r--tests/qtest/migration/precopy-tests.c1337
-rw-r--r--tests/qtest/migration/s390x/Makefile (renamed from tests/migration/s390x/Makefile)0
-rw-r--r--tests/qtest/migration/s390x/a-b-bios.c (renamed from tests/migration/s390x/a-b-bios.c)0
-rw-r--r--tests/qtest/migration/s390x/a-b-bios.h (renamed from tests/migration/s390x/a-b-bios.h)0
-rw-r--r--tests/qtest/migration/tls-tests.c871
-rw-r--r--tests/qtest/netdev-socket.c4
-rw-r--r--tests/qtest/npcm7xx_adc-test.c2
-rw-r--r--tests/qtest/npcm7xx_emc-test.c4
-rw-r--r--tests/qtest/npcm7xx_pwm-test.c4
-rw-r--r--tests/qtest/npcm7xx_timer-test.c1
-rw-r--r--tests/qtest/npcm7xx_watchdog_timer-test.c2
-rw-r--r--tests/qtest/npcm_gmac-test.c85
-rw-r--r--tests/qtest/numa-test.c14
-rw-r--r--tests/qtest/pnv-host-i2c-test.c4
-rw-r--r--tests/qtest/pnv-spi-seeprom-test.c110
-rw-r--r--tests/qtest/pnv-xive2-common.c190
-rw-r--r--tests/qtest/pnv-xive2-common.h112
-rw-r--r--tests/qtest/pnv-xive2-flush-sync.c205
-rw-r--r--tests/qtest/pnv-xive2-nvpg_bar.c152
-rw-r--r--tests/qtest/pnv-xive2-test.c585
-rw-r--r--tests/qtest/pnv-xscom.h2
-rw-r--r--tests/qtest/pvpanic-pci-test.c2
-rw-r--r--tests/qtest/pvpanic-test.c2
-rw-r--r--tests/qtest/q35-test.c51
-rw-r--r--tests/qtest/qmp-cmd-test.c3
-rw-r--r--tests/qtest/qmp-test.c6
-rw-r--r--tests/qtest/qom-test.c15
-rw-r--r--tests/qtest/qos-test.c5
-rw-r--r--tests/qtest/readconfig-test.c6
-rw-r--r--tests/qtest/riscv-csr-test.c56
-rw-r--r--tests/qtest/riscv-iommu-test.c210
-rw-r--r--tests/qtest/rs5c372-test.c43
-rw-r--r--tests/qtest/rtl8139-test.c2
-rw-r--r--tests/qtest/stm32l4x5.h42
-rw-r--r--tests/qtest/stm32l4x5_gpio-test.c33
-rw-r--r--tests/qtest/stm32l4x5_syscfg-test.c32
-rw-r--r--tests/qtest/stm32l4x5_usart-test.c72
-rw-r--r--tests/qtest/tco-test.c2
-rw-r--r--tests/qtest/test-filter-mirror.c2
-rw-r--r--tests/qtest/test-filter-redirector.c2
-rw-r--r--tests/qtest/test-netfilter.c2
-rw-r--r--tests/qtest/test-x86-cpuid-compat.c41
-rw-r--r--tests/qtest/tmp105-test.c6
-rw-r--r--tests/qtest/tpm-emu.c4
-rw-r--r--tests/qtest/tpm-emu.h2
-rw-r--r--tests/qtest/tpm-tests.c2
-rw-r--r--tests/qtest/tpm-util.c2
-rw-r--r--tests/qtest/ufs-test.c760
-rw-r--r--tests/qtest/vhost-user-test.c9
-rw-r--r--tests/qtest/virtio-9p-test.c61
-rw-r--r--tests/qtest/virtio-balloon-test.c57
-rw-r--r--tests/qtest/virtio-iommu-test.c4
-rw-r--r--tests/qtest/virtio-net-failover.c17
-rw-r--r--tests/qtest/virtio-net-test.c2
-rw-r--r--tests/qtest/vmcoreinfo-test.c90
-rw-r--r--tests/qtest/vmgenid-test.c2
-rw-r--r--tests/qtest/wdt_ib700-test.c2
-rw-r--r--tests/tcg/Makefile.target29
-rw-r--r--tests/tcg/README23
-rw-r--r--tests/tcg/aarch64/Makefile.softmmu-target57
-rw-r--r--tests/tcg/aarch64/Makefile.target6
-rw-r--r--tests/tcg/aarch64/gdbstub/test-mte.py75
-rw-r--r--tests/tcg/aarch64/system/boot.S188
-rw-r--r--tests/tcg/aarch64/system/feat-xs.c27
-rw-r--r--tests/tcg/aarch64/system/kernel.ld33
-rw-r--r--tests/tcg/aarch64/system/mte.S109
-rw-r--r--tests/tcg/aarch64_be/Makefile.target17
-rw-r--r--tests/tcg/aarch64_be/hello.c35
-rw-r--r--tests/tcg/alpha/Makefile.softmmu-target4
-rw-r--r--tests/tcg/alpha/Makefile.target3
-rw-r--r--tests/tcg/arm/Makefile.softmmu-target2
-rw-r--r--tests/tcg/arm/Makefile.target10
-rw-r--r--tests/tcg/arm/README5
-rw-r--r--tests/tcg/arm/test-arm-iwmmxt.S49
-rw-r--r--tests/tcg/cris/.gdbinit11
-rw-r--r--tests/tcg/cris/Makefile.target62
-rw-r--r--tests/tcg/cris/README1
-rw-r--r--tests/tcg/cris/bare/check_addcv17.s65
-rw-r--r--tests/tcg/cris/bare/check_addi.s57
-rw-r--r--tests/tcg/cris/bare/check_addiv32.s62
-rw-r--r--tests/tcg/cris/bare/check_addm.s96
-rw-r--r--tests/tcg/cris/bare/check_addq.s47
-rw-r--r--tests/tcg/cris/bare/check_addr.s96
-rw-r--r--tests/tcg/cris/bare/check_addxc.s91
-rw-r--r--tests/tcg/cris/bare/check_addxm.s106
-rw-r--r--tests/tcg/cris/bare/check_addxr.s96
-rw-r--r--tests/tcg/cris/bare/check_andc.s80
-rw-r--r--tests/tcg/cris/bare/check_andm.s90
-rw-r--r--tests/tcg/cris/bare/check_andq.s46
-rw-r--r--tests/tcg/cris/bare/check_andr.s95
-rw-r--r--tests/tcg/cris/bare/check_asr.s230
-rw-r--r--tests/tcg/cris/bare/check_ba.s93
-rw-r--r--tests/tcg/cris/bare/check_bas.s102
-rw-r--r--tests/tcg/cris/bare/check_bcc.s197
-rw-r--r--tests/tcg/cris/bare/check_boundc.s101
-rw-r--r--tests/tcg/cris/bare/check_boundr.s125
-rw-r--r--tests/tcg/cris/bare/check_btst.s96
-rw-r--r--tests/tcg/cris/bare/check_clearfv32.s19
-rw-r--r--tests/tcg/cris/bare/check_clrjmp1.s36
-rw-r--r--tests/tcg/cris/bare/check_cmp-2.s15
-rw-r--r--tests/tcg/cris/bare/check_cmpc.s86
-rw-r--r--tests/tcg/cris/bare/check_cmpm.s96
-rw-r--r--tests/tcg/cris/bare/check_cmpq.s75
-rw-r--r--tests/tcg/cris/bare/check_cmpr.s102
-rw-r--r--tests/tcg/cris/bare/check_cmpxc.s92
-rw-r--r--tests/tcg/cris/bare/check_cmpxm.s106
-rw-r--r--tests/tcg/cris/bare/check_dstep.s42
-rw-r--r--tests/tcg/cris/bare/check_jsr.s85
-rw-r--r--tests/tcg/cris/bare/check_lapc.s78
-rw-r--r--tests/tcg/cris/bare/check_lsl.s217
-rw-r--r--tests/tcg/cris/bare/check_lsr.s218
-rw-r--r--tests/tcg/cris/bare/check_mcp.s49
-rw-r--r--tests/tcg/cris/bare/check_movdelsr1.s33
-rw-r--r--tests/tcg/cris/bare/check_movecr.s37
-rw-r--r--tests/tcg/cris/bare/check_movei.s50
-rw-r--r--tests/tcg/cris/bare/check_movemr.s78
-rw-r--r--tests/tcg/cris/bare/check_movemrv32.s96
-rw-r--r--tests/tcg/cris/bare/check_mover.s28
-rw-r--r--tests/tcg/cris/bare/check_moverm.s45
-rw-r--r--tests/tcg/cris/bare/check_movmp.s131
-rw-r--r--tests/tcg/cris/bare/check_movpmv32.s35
-rw-r--r--tests/tcg/cris/bare/check_movpr.s28
-rw-r--r--tests/tcg/cris/bare/check_movprv32.s21
-rw-r--r--tests/tcg/cris/bare/check_movscr.s29
-rw-r--r--tests/tcg/cris/bare/check_movsm.s44
-rw-r--r--tests/tcg/cris/bare/check_movsr.s46
-rw-r--r--tests/tcg/cris/bare/check_movucr.s33
-rw-r--r--tests/tcg/cris/bare/check_movum.s40
-rw-r--r--tests/tcg/cris/bare/check_movur.s45
-rw-r--r--tests/tcg/cris/bare/check_mulv32.s51
-rw-r--r--tests/tcg/cris/bare/check_mulx.s257
-rw-r--r--tests/tcg/cris/bare/check_neg.s104
-rw-r--r--tests/tcg/cris/bare/check_not.s31
-rw-r--r--tests/tcg/cris/bare/check_orc.s71
-rw-r--r--tests/tcg/cris/bare/check_orm.s75
-rw-r--r--tests/tcg/cris/bare/check_orq.s41
-rw-r--r--tests/tcg/cris/bare/check_orr.s84
-rw-r--r--tests/tcg/cris/bare/check_ret.s25
-rw-r--r--tests/tcg/cris/bare/check_scc.s95
-rw-r--r--tests/tcg/cris/bare/check_subc.s87
-rw-r--r--tests/tcg/cris/bare/check_subm.s96
-rw-r--r--tests/tcg/cris/bare/check_subq.s52
-rw-r--r--tests/tcg/cris/bare/check_subr.s102
-rw-r--r--tests/tcg/cris/bare/check_xarith.s72
-rw-r--r--tests/tcg/cris/bare/crt.s13
-rw-r--r--tests/tcg/cris/bare/sys.c63
-rw-r--r--tests/tcg/cris/bare/testutils.inc117
-rw-r--r--tests/tcg/cris/libc/check_abs.c40
-rw-r--r--tests/tcg/cris/libc/check_addc.c58
-rw-r--r--tests/tcg/cris/libc/check_addcm.c85
-rw-r--r--tests/tcg/cris/libc/check_addo.c125
-rw-r--r--tests/tcg/cris/libc/check_addoq.c44
-rw-r--r--tests/tcg/cris/libc/check_bound.c142
-rw-r--r--tests/tcg/cris/libc/check_ftag.c37
-rw-r--r--tests/tcg/cris/libc/check_gcctorture_pr28634-1.c15
-rw-r--r--tests/tcg/cris/libc/check_gcctorture_pr28634.c15
-rw-r--r--tests/tcg/cris/libc/check_glibc_kernelversion.c116
-rw-r--r--tests/tcg/cris/libc/check_hello.c7
-rw-r--r--tests/tcg/cris/libc/check_int64.c47
-rw-r--r--tests/tcg/cris/libc/check_lz.c49
-rw-r--r--tests/tcg/cris/libc/check_mapbrk.c39
-rw-r--r--tests/tcg/cris/libc/check_mmap1.c48
-rw-r--r--tests/tcg/cris/libc/check_mmap2.c48
-rw-r--r--tests/tcg/cris/libc/check_mmap3.c33
-rw-r--r--tests/tcg/cris/libc/check_moveq.c51
-rw-r--r--tests/tcg/cris/libc/check_openpf1.c38
-rw-r--r--tests/tcg/cris/libc/check_openpf2.c16
-rw-r--r--tests/tcg/cris/libc/check_openpf3.c49
-rw-r--r--tests/tcg/cris/libc/check_openpf5.c56
-rw-r--r--tests/tcg/cris/libc/check_settls1.c45
-rw-r--r--tests/tcg/cris/libc/check_sigalrm.c26
-rw-r--r--tests/tcg/cris/libc/check_stat1.c16
-rw-r--r--tests/tcg/cris/libc/check_stat2.c20
-rw-r--r--tests/tcg/cris/libc/check_stat3.c25
-rw-r--r--tests/tcg/cris/libc/check_stat4.c27
-rw-r--r--tests/tcg/cris/libc/check_swap.c76
-rw-r--r--tests/tcg/cris/libc/check_time2.c18
-rw-r--r--tests/tcg/cris/libc/crisutils.h76
-rw-r--r--tests/tcg/cris/libc/sys.h18
-rw-r--r--tests/tcg/hexagon/usr.c12
-rw-r--r--tests/tcg/i386/Makefile.softmmu-target2
-rw-r--r--tests/tcg/i386/Makefile.target2
-rw-r--r--tests/tcg/i386/test-avx.c2
-rw-r--r--tests/tcg/i386/test-i386-adcox.c6
-rw-r--r--tests/tcg/loongarch64/Makefile.softmmu-target4
-rw-r--r--tests/tcg/loongarch64/system/kernel.ld2
-rw-r--r--tests/tcg/loongarch64/system/regdef.h2
-rw-r--r--tests/tcg/multiarch/Makefile.target31
-rwxr-xr-xtests/tcg/multiarch/check-plugin-output.sh36
-rw-r--r--tests/tcg/multiarch/gdbstub/interrupt.py4
-rw-r--r--tests/tcg/multiarch/gdbstub/late-attach.py28
-rw-r--r--tests/tcg/multiarch/gdbstub/prot-none.py4
-rw-r--r--tests/tcg/multiarch/gdbstub/test-proc-mappings.py19
-rw-r--r--tests/tcg/multiarch/late-attach.c41
-rw-r--r--tests/tcg/multiarch/linux/linux-sigrtminmax.c74
-rw-r--r--tests/tcg/multiarch/linux/test-vma.c (renamed from tests/tcg/multiarch/test-vma.c)0
-rw-r--r--tests/tcg/multiarch/sigreturn-sigmask.c51
-rw-r--r--tests/tcg/multiarch/system/Makefile.softmmu-target6
-rw-r--r--tests/tcg/multiarch/system/memory.c122
-rwxr-xr-xtests/tcg/multiarch/system/validate-memory-counts.py130
-rw-r--r--tests/tcg/multiarch/test-plugin-mem-access.c177
-rw-r--r--tests/tcg/plugins/bb.c (renamed from tests/plugin/bb.c)0
-rw-r--r--tests/tcg/plugins/empty.c (renamed from tests/plugin/empty.c)0
-rw-r--r--tests/tcg/plugins/inline.c310
-rw-r--r--tests/tcg/plugins/insn.c303
-rw-r--r--tests/tcg/plugins/mem.c405
-rw-r--r--tests/tcg/plugins/meson.build23
-rw-r--r--tests/tcg/plugins/reset.c73
-rw-r--r--tests/tcg/plugins/syscall.c260
-rw-r--r--tests/tcg/ppc64/Makefile.target27
-rw-r--r--tests/tcg/riscv64/Makefile.softmmu-target2
-rw-r--r--tests/tcg/s390x/Makefile.softmmu-target16
-rw-r--r--tests/tcg/s390x/Makefile.target6
-rw-r--r--tests/tcg/s390x/console.c3
-rw-r--r--tests/tcg/s390x/ex-smc.c57
-rw-r--r--tests/tcg/s390x/float.h104
-rw-r--r--tests/tcg/s390x/fma.c233
-rw-r--r--tests/tcg/s390x/mvc-smc.c82
-rw-r--r--tests/tcg/s390x/vfminmax.c223
-rw-r--r--tests/tcg/x86_64/Makefile.softmmu-target2
-rw-r--r--tests/tcg/x86_64/Makefile.target6
-rw-r--r--tests/tcg/x86_64/cross-modifying-code.c80
-rw-r--r--tests/tcg/x86_64/fma.c116
-rw-r--r--tests/tcg/x86_64/test-2175.c24
-rw-r--r--tests/uefi-test-tools/Makefile5
-rw-r--r--tests/uefi-test-tools/UefiTestToolsPkg/UefiTestToolsPkg.dsc6
-rw-r--r--tests/uefi-test-tools/uefi-test-build.config10
-rw-r--r--tests/unit/check-block-qdict.c4
-rw-r--r--tests/unit/check-qdict.c6
-rw-r--r--tests/unit/check-qjson.c12
-rw-r--r--tests/unit/check-qlist.c4
-rw-r--r--tests/unit/check-qlit.c12
-rw-r--r--tests/unit/check-qnull.c2
-rw-r--r--tests/unit/check-qnum.c2
-rw-r--r--tests/unit/check-qobject.c12
-rw-r--r--tests/unit/check-qom-interface.c4
-rw-r--r--tests/unit/check-qom-proplist.c14
-rw-r--r--tests/unit/check-qstring.c2
-rw-r--r--tests/unit/crypto-tls-psk-helpers.c1
-rw-r--r--tests/unit/crypto-tls-x509-helpers.c19
-rw-r--r--tests/unit/crypto-tls-x509-helpers.h9
-rw-r--r--tests/unit/meson.build13
-rw-r--r--tests/unit/pkix_asn1_tab.c1105
-rw-r--r--tests/unit/pkix_asn1_tab.c.inc1102
-rw-r--r--tests/unit/ptimer-test.c33
-rw-r--r--tests/unit/socket-helpers.c1
-rw-r--r--tests/unit/test-aio-multithread.c6
-rw-r--r--tests/unit/test-bdrv-drain.c60
-rw-r--r--tests/unit/test-bdrv-graph-mod.c12
-rw-r--r--tests/unit/test-block-backend.c2
-rw-r--r--tests/unit/test-block-iothread.c8
-rw-r--r--tests/unit/test-blockjob-txn.c4
-rw-r--r--tests/unit/test-blockjob.c4
-rw-r--r--tests/unit/test-char.c437
-rw-r--r--tests/unit/test-crypto-afsplit.c10
-rw-r--r--tests/unit/test-crypto-akcipher.c54
-rw-r--r--tests/unit/test-crypto-block.c65
-rw-r--r--tests/unit/test-crypto-cipher.c79
-rw-r--r--tests/unit/test-crypto-hash.c111
-rw-r--r--tests/unit/test-crypto-hmac.c30
-rw-r--r--tests/unit/test-crypto-ivgen.c38
-rw-r--r--tests/unit/test-crypto-pbkdf.c73
-rw-r--r--tests/unit/test-crypto-secret.c28
-rw-r--r--tests/unit/test-crypto-tlssession.c42
-rw-r--r--tests/unit/test-fifo.c449
-rw-r--r--tests/unit/test-forward-visitor.c4
-rw-r--r--tests/unit/test-image-locking.c4
-rw-r--r--tests/unit/test-io-channel-socket.c6
-rw-r--r--tests/unit/test-keyval.c6
-rw-r--r--tests/unit/test-qdev-global-props.c33
-rw-r--r--tests/unit/test-qemu-opts.c4
-rw-r--r--tests/unit/test-qga.c21
-rw-r--r--tests/unit/test-qgraph.c1
-rw-r--r--tests/unit/test-qmp-cmds.c8
-rw-r--r--tests/unit/test-qmp-event.c10
-rw-r--r--tests/unit/test-qobject-input-visitor.c16
-rw-r--r--tests/unit/test-qobject-output-visitor.c16
-rw-r--r--tests/unit/test-replication.c4
-rw-r--r--tests/unit/test-resv-mem.c2
-rw-r--r--tests/unit/test-seccomp.c2
-rw-r--r--tests/unit/test-smp-parse.c23
-rw-r--r--tests/unit/test-thread-pool.c6
-rw-r--r--tests/unit/test-throttle.c2
-rw-r--r--tests/unit/test-timed-average.c2
-rw-r--r--tests/unit/test-util-sockets.c239
-rw-r--r--tests/unit/test-visitor-serialization.c4
-rw-r--r--tests/unit/test-xs-node.c4
-rw-r--r--tests/unit/test-yank.c2
-rw-r--r--tests/vm/Makefile.include29
-rw-r--r--tests/vm/README2
-rw-r--r--tests/vm/basevm.py14
-rwxr-xr-xtests/vm/freebsd6
-rw-r--r--tests/vm/generated/freebsd.json22
-rwxr-xr-xtests/vm/openbsd4
-rw-r--r--tools/i386/qemu-vmsr-helper.c537
-rw-r--r--tools/i386/rapl-msr-index.h28
-rw-r--r--trace-events7
-rw-r--r--trace/control-target.c3
-rw-r--r--trace/control.c1
-rw-r--r--trace/meson.build9
-rw-r--r--trace/simple.c2
-rw-r--r--trace/trace-hmp-cmds.c2
-rw-r--r--ui/clipboard.c68
-rw-r--r--ui/cocoa.m69
-rw-r--r--ui/console-vc.c168
-rw-r--r--ui/console.c95
-rw-r--r--ui/curses.c4
-rw-r--r--ui/cursor.c26
-rw-r--r--ui/dbus-chardev.c4
-rw-r--r--ui/dbus-clipboard.c6
-rw-r--r--ui/dbus-console.c31
-rw-r--r--ui/dbus-display1.xml110
-rw-r--r--ui/dbus-listener.c290
-rw-r--r--ui/dbus.c28
-rw-r--r--ui/dmabuf.c80
-rw-r--r--ui/egl-context.c2
-rw-r--r--ui/egl-headless.c2
-rw-r--r--ui/egl-helpers.c127
-rw-r--r--ui/gtk-clipboard.c13
-rw-r--r--ui/gtk-egl.c60
-rw-r--r--ui/gtk-gl-area.c55
-rw-r--r--ui/gtk.c266
-rw-r--r--ui/input-barrier.c6
-rw-r--r--ui/input-legacy.c37
-rw-r--r--ui/input-linux.c7
-rw-r--r--ui/input.c42
-rw-r--r--ui/meson.build7
-rw-r--r--ui/qemu-pixman.c87
-rw-r--r--ui/sdl2-gl.c10
-rw-r--r--ui/sdl2-input.c5
-rw-r--r--ui/sdl2.c77
-rw-r--r--ui/spice-app.c6
-rw-r--r--ui/spice-core.c6
-rw-r--r--ui/spice-display.c108
-rw-r--r--ui/trace-events6
-rw-r--r--ui/ui-hmp-cmds.c2
-rw-r--r--ui/vdagent.c216
-rw-r--r--ui/vnc-auth-sasl.c75
-rw-r--r--ui/vnc-enc-tight.c20
-rw-r--r--ui/vnc-enc-zrle.c2
-rw-r--r--ui/vnc-jobs.c2
-rw-r--r--ui/vnc.c38
-rw-r--r--ui/vnc.h9
-rw-r--r--ui/win32-kbd-hook.c2
-rw-r--r--util/aio-posix.c115
-rw-r--r--util/aio-posix.h1
-rw-r--r--util/aio-win32.c1
-rw-r--r--util/async.c12
-rw-r--r--util/block-helpers.c28
-rw-r--r--util/block-helpers.h3
-rw-r--r--util/cacheflush.c8
-rw-r--r--util/coroutine-wasm.c127
-rw-r--r--util/cpuinfo-aarch64.c9
-rw-r--r--util/cpuinfo-i386.c1
-rw-r--r--util/cpuinfo-ppc.c5
-rw-r--r--util/cpuinfo-riscv.c51
-rw-r--r--util/cutils.c5
-rw-r--r--util/envlist.c69
-rw-r--r--util/error.c31
-rw-r--r--util/event.c171
-rw-r--r--util/fdmon-epoll.c1
-rw-r--r--util/fifo8.c84
-rw-r--r--util/getauxval.c9
-rw-r--r--util/hbitmap.c2
-rw-r--r--util/hexdump.c18
-rw-r--r--util/iov.c30
-rw-r--r--util/iova-tree.c69
-rw-r--r--util/keyval.c6
-rw-r--r--util/lockcnt.c10
-rw-r--r--util/log.c2
-rw-r--r--util/main-loop.c8
-rw-r--r--util/memfd.c25
-rw-r--r--util/meson.build11
-rw-r--r--util/module.c4
-rw-r--r--util/oslib-posix.c221
-rw-r--r--util/oslib-win32.c6
-rw-r--r--util/qemu-co-shared-resource.c6
-rw-r--r--util/qemu-config.c4
-rw-r--r--util/qemu-coroutine.c2
-rw-r--r--util/qemu-option.c8
-rw-r--r--util/qemu-sockets.c363
-rw-r--r--util/qemu-thread-posix.c148
-rw-r--r--util/qemu-thread-win32.c129
-rw-r--r--util/qemu-timer.c27
-rw-r--r--util/qht.c1
-rw-r--r--util/rcu.c4
-rw-r--r--util/s390x_pci_mmio.c146
-rw-r--r--util/thread-context.c4
-rw-r--r--util/thread-pool.c184
-rw-r--r--util/timed-average.c4
-rw-r--r--util/trace-events6
-rw-r--r--util/userfaultfd.c49
-rw-r--r--util/vfio-helpers.c2
5151 files changed, 262511 insertions, 198748 deletions
diff --git a/.b4-config b/.b4-config
new file mode 100644
index 0000000..4b9b2fe
--- /dev/null
+++ b/.b4-config
@@ -0,0 +1,14 @@
+#
+# Common b4 settings that can be used to send patches to QEMU upstream.
+# https://b4.docs.kernel.org/
+#
+
+[b4]
+ send-series-to = qemu-devel@nongnu.org
+ send-auto-to-cmd = echo
+ send-auto-cc-cmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback
+ am-perpatch-check-cmd = scripts/checkpatch.pl -q --terse --no-summary --mailback -
+ prep-perpatch-check-cmd = scripts/checkpatch.pl -q --terse --no-summary --mailback -
+ searchmask = https://lore.kernel.org/qemu-devel/?x=m&t=1&q=%s
+ linkmask = https://lore.kernel.org/qemu-devel/%s
+ linktrailermask = Message-ID: <%s>
diff --git a/.editorconfig b/.editorconfig
index 7303759..a04cb90 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -47,3 +47,16 @@ emacs_mode = glsl
[*.json]
indent_style = space
emacs_mode = python
+
+# by default follow QEMU's style
+[*.pl]
+indent_style = space
+indent_size = 4
+emacs_mode = perl
+
+# but user kernel "style" for imported scripts
+[scripts/{kernel-doc,get_maintainer.pl,checkpatch.pl}]
+indent_style = tab
+indent_size = 8
+emacs_mode = perl
+
diff --git a/.gitattributes b/.gitattributes
index a217cb7..9ce7a19 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -2,3 +2,8 @@
*.h.inc diff=c
*.m diff=objc
*.py diff=python
+*.rs diff=rust
+*.rs.inc diff=rust
+Cargo.lock diff=toml merge=binary
+
+*.patch -text -whitespace
diff --git a/.gitlab-ci.d/base.yml b/.gitlab-ci.d/base.yml
index bf3d8ef..60a24a9 100644
--- a/.gitlab-ci.d/base.yml
+++ b/.gitlab-ci.d/base.yml
@@ -69,10 +69,6 @@ variables:
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
when: never
- # Avocado jobs don't run in forks unless $QEMU_CI_AVOCADO_TESTING is set
- - if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
- when: never
-
#############################################################
# Stage 2: fine tune execution of jobs in specific scenarios
@@ -101,8 +97,8 @@ variables:
when: manual
allow_failure: true
- # Avocado jobs can be manually start in forks if $QEMU_CI_AVOCADO_TESTING is unset
- - if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
+ # Functional jobs can be manually started in forks
+ - if: '$QEMU_JOB_FUNCTIONAL && $QEMU_CI_FUNCTIONAL != "1" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
when: manual
allow_failure: true
@@ -128,7 +124,7 @@ variables:
when: manual
# Jobs can run if any jobs they depend on were successful
- - if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
+ - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
when: on_success
variables:
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
diff --git a/.gitlab-ci.d/buildtest-template.yml b/.gitlab-ci.d/buildtest-template.yml
index 8f7ebfa..038c3c9 100644
--- a/.gitlab-ci.d/buildtest-template.yml
+++ b/.gitlab-ci.d/buildtest-template.yml
@@ -8,7 +8,11 @@
key: "$CI_JOB_NAME"
when: always
before_script:
+ - source scripts/ci/gitlab-ci-section
+ - section_start setup "Pre-script setup"
- JOBS=$(expr $(nproc) + 1)
+ - cat /packages.txt
+ - section_end setup
script:
- export CCACHE_BASEDIR="$(pwd)"
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
@@ -18,7 +22,9 @@
- mkdir build
- cd build
- ccache --zero-stats
+ - section_start configure "Running configure"
- ../configure --enable-werror --disable-docs --enable-fdt=system
+ --disable-debug-info
${TARGETS:+--target-list="$TARGETS"}
$CONFIGURE_ARGS ||
{ cat config.log meson-logs/meson-log.txt && exit 1; }
@@ -26,11 +32,16 @@
then
pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ;
fi || exit 1;
+ - section_end configure
+ - section_start build "Building QEMU"
- $MAKE -j"$JOBS"
+ - section_end build
+ - section_start test "Running tests"
- if test -n "$MAKE_CHECK_ARGS";
then
$MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
fi
+ - section_end test
- ccache --show-stats
# We jump some hoops in common_test_job_template to avoid
@@ -53,12 +64,22 @@
stage: test
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
script:
+ - source scripts/ci/gitlab-ci-section
+ - section_start buildenv "Setting up to run tests"
- scripts/git-submodule.sh update roms/SLOF
- - meson subprojects download $(cd build/subprojects && echo *)
+ - build/pyvenv/bin/meson subprojects download $(cd build/subprojects && echo *)
- cd build
- find . -type f -exec touch {} +
# Avoid recompiling by hiding ninja with NINJA=":"
- - $MAKE NINJA=":" $MAKE_CHECK_ARGS
+ # We also have to pre-cache the functional tests manually in this case
+ - if [ "x${QEMU_TEST_CACHE_DIR}" != "x" ]; then
+ $MAKE precache-functional ;
+ fi
+ - section_end buildenv
+ - section_start test "Running tests"
+ # doctests need all the compilation artifacts
+ - $MAKE NINJA=":" MTESTARGS="--no-suite doc" $MAKE_CHECK_ARGS
+ - section_end test
.native_test_job_template:
extends: .common_test_job_template
@@ -71,12 +92,12 @@
reports:
junit: build/meson-logs/testlog.junit.xml
-.avocado_test_job_template:
+.functional_test_job_template:
extends: .common_test_job_template
cache:
key: "${CI_JOB_NAME}-cache"
paths:
- - ${CI_PROJECT_DIR}/avocado-cache
+ - ${CI_PROJECT_DIR}/functional-cache
policy: pull-push
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
@@ -85,21 +106,41 @@
paths:
- build/tests/results/latest/results.xml
- build/tests/results/latest/test-results
+ - build/tests/functional/*/*/*.log
reports:
junit: build/tests/results/latest/results.xml
before_script:
- - mkdir -p ~/.config/avocado
- - echo "[datadir.paths]" > ~/.config/avocado/avocado.conf
- - echo "cache_dirs = ['${CI_PROJECT_DIR}/avocado-cache']"
- >> ~/.config/avocado/avocado.conf
- - echo -e '[job.output.testlogs]\nstatuses = ["FAIL", "INTERRUPT"]'
- >> ~/.config/avocado/avocado.conf
- - if [ -d ${CI_PROJECT_DIR}/avocado-cache ]; then
- du -chs ${CI_PROJECT_DIR}/avocado-cache ;
- fi
- - export AVOCADO_ALLOW_UNTRUSTED_CODE=1
+ - export QEMU_TEST_ALLOW_UNTRUSTED_CODE=1
+ - export QEMU_TEST_CACHE_DIR=${CI_PROJECT_DIR}/functional-cache
after_script:
- cd build
- - du -chs ${CI_PROJECT_DIR}/avocado-cache
+ - du -chs ${CI_PROJECT_DIR}/*-cache
variables:
- QEMU_JOB_AVOCADO: 1
+ QEMU_JOB_FUNCTIONAL: 1
+
+.wasm_build_job_template:
+ extends: .base_job_template
+ stage: build
+ image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
+ before_script:
+ - source scripts/ci/gitlab-ci-section
+ - section_start setup "Pre-script setup"
+ - JOBS=$(expr $(nproc) + 1)
+ - section_end setup
+ script:
+ - du -sh .git
+ - mkdir build
+ - cd build
+ - section_start configure "Running configure"
+ - emconfigure ../configure --disable-docs
+ ${TARGETS:+--target-list="$TARGETS"}
+ $CONFIGURE_ARGS ||
+ { cat config.log meson-logs/meson-log.txt && exit 1; }
+ - if test -n "$LD_JOBS";
+ then
+ pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ;
+ fi || exit 1;
+ - section_end configure
+ - section_start build "Building QEMU"
+ - emmake make -j"$JOBS"
+ - section_end build
diff --git a/.gitlab-ci.d/buildtest.yml b/.gitlab-ci.d/buildtest.yml
index e3a0758..d888a60 100644
--- a/.gitlab-ci.d/buildtest.yml
+++ b/.gitlab-ci.d/buildtest.yml
@@ -22,15 +22,14 @@ check-system-alpine:
IMAGE: alpine
MAKE_CHECK_ARGS: check-unit check-qtest
-avocado-system-alpine:
- extends: .avocado_test_job_template
+functional-system-alpine:
+ extends: .functional_test_job_template
needs:
- job: build-system-alpine
artifacts: true
variables:
IMAGE: alpine
- MAKE_CHECK_ARGS: check-avocado
- AVOCADO_TAGS: arch:avr arch:loongarch64 arch:mips64 arch:mipsel
+ MAKE_CHECK_ARGS: check-functional
build-system-ubuntu:
extends:
@@ -40,9 +39,9 @@ build-system-ubuntu:
job: amd64-ubuntu2204-container
variables:
IMAGE: ubuntu2204
- CONFIGURE_ARGS: --enable-docs
+ CONFIGURE_ARGS: --enable-docs --enable-rust
TARGETS: alpha-softmmu microblazeel-softmmu mips64el-softmmu
- MAKE_CHECK_ARGS: check-build
+ MAKE_CHECK_ARGS: check-build check-doc
check-system-ubuntu:
extends: .native_test_job_template
@@ -53,15 +52,14 @@ check-system-ubuntu:
IMAGE: ubuntu2204
MAKE_CHECK_ARGS: check
-avocado-system-ubuntu:
- extends: .avocado_test_job_template
+functional-system-ubuntu:
+ extends: .functional_test_job_template
needs:
- job: build-system-ubuntu
artifacts: true
variables:
IMAGE: ubuntu2204
- MAKE_CHECK_ARGS: check-avocado
- AVOCADO_TAGS: arch:alpha arch:microblazeel arch:mips64el
+ MAKE_CHECK_ARGS: check-functional
build-system-debian:
extends:
@@ -71,7 +69,7 @@ build-system-debian:
job: amd64-debian-container
variables:
IMAGE: debian
- CONFIGURE_ARGS: --with-coroutine=sigaltstack
+ CONFIGURE_ARGS: --with-coroutine=sigaltstack --enable-rust
TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu
sparc-softmmu xtensa-softmmu
MAKE_CHECK_ARGS: check-build
@@ -85,15 +83,14 @@ check-system-debian:
IMAGE: debian
MAKE_CHECK_ARGS: check
-avocado-system-debian:
- extends: .avocado_test_job_template
+functional-system-debian:
+ extends: .functional_test_job_template
needs:
- job: build-system-debian
artifacts: true
variables:
IMAGE: debian
- MAKE_CHECK_ARGS: check-avocado
- AVOCADO_TAGS: arch:arm arch:i386 arch:riscv64 arch:sh4 arch:sparc arch:xtensa
+ MAKE_CHECK_ARGS: check-functional
crash-test-debian:
extends: .native_test_job_template
@@ -115,10 +112,24 @@ build-system-fedora:
job: amd64-fedora-container
variables:
IMAGE: fedora
- CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
+ CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs --enable-crypto-afalg --enable-rust
TARGETS: microblaze-softmmu mips-softmmu
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
- MAKE_CHECK_ARGS: check-build
+ MAKE_CHECK_ARGS: check-build check-doc
+
+build-system-fedora-rust-nightly:
+ extends:
+ - .native_build_job_template
+ - .native_build_artifact_template
+ needs:
+ job: amd64-fedora-rust-nightly-container
+ variables:
+ IMAGE: fedora-rust-nightly
+ CONFIGURE_ARGS: --disable-docs --enable-rust --enable-strict-rust-lints
+ TARGETS: aarch64-softmmu
+ MAKE_CHECK_ARGS: check-build check-doc
+
+ allow_failure: true
check-system-fedora:
extends: .native_test_job_template
@@ -129,16 +140,14 @@ check-system-fedora:
IMAGE: fedora
MAKE_CHECK_ARGS: check
-avocado-system-fedora:
- extends: .avocado_test_job_template
+functional-system-fedora:
+ extends: .functional_test_job_template
needs:
- job: build-system-fedora
artifacts: true
variables:
IMAGE: fedora
- MAKE_CHECK_ARGS: check-avocado
- AVOCADO_TAGS: arch:microblaze arch:mips arch:xtensa arch:m68k
- arch:riscv32 arch:ppc arch:sparc64
+ MAKE_CHECK_ARGS: check-functional
crash-test-fedora:
extends: .native_test_job_template
@@ -174,12 +183,11 @@ build-previous-qemu:
when: on_success
expire_in: 2 days
paths:
- - build-previous
- exclude:
- - build-previous/**/*.p
- - build-previous/**/*.a.p
- - build-previous/**/*.c.o
- - build-previous/**/*.c.o.d
+ - build-previous/qemu-bundle
+ - build-previous/qemu-system-aarch64
+ - build-previous/qemu-system-x86_64
+ - build-previous/tests/qtest/migration-test
+ - build-previous/scripts
needs:
job: amd64-opensuse-leap-container
variables:
@@ -188,6 +196,12 @@ build-previous-qemu:
# Override the default flags as we need more to grab the old version
GIT_FETCH_EXTRA_FLAGS: --prune --quiet
before_script:
+ - source scripts/ci/gitlab-ci-section
+ # Skip if this series contains the release bump commit. During the
+ # release process there might be a window of commits when the
+ # version tag is not yet present in the remote and git fetch would
+ # fail.
+ - if grep -q "\.0$" VERSION; then exit 0; fi
- export QEMU_PREV_VERSION="$(sed 's/\([0-9.]*\)\.[0-9]*/v\1.0/' VERSION)"
- git remote add upstream https://gitlab.com/qemu-project/qemu
- git fetch upstream refs/tags/$QEMU_PREV_VERSION:refs/tags/$QEMU_PREV_VERSION
@@ -208,6 +222,9 @@ build-previous-qemu:
IMAGE: opensuse-leap
MAKE_CHECK_ARGS: check-build
script:
+ # Skip for round release numbers, this job is only relevant for
+ # testing a development tree.
+ - if grep -q "\.0$" VERSION; then exit 0; fi
# Use the migration-tests from the older QEMU tree. This avoids
# testing an old QEMU against new features/tests that it is not
# compatible with.
@@ -243,16 +260,14 @@ check-system-centos:
IMAGE: centos9
MAKE_CHECK_ARGS: check
-avocado-system-centos:
- extends: .avocado_test_job_template
+functional-system-centos:
+ extends: .functional_test_job_template
needs:
- job: build-system-centos
artifacts: true
variables:
IMAGE: centos9
- MAKE_CHECK_ARGS: check-avocado
- AVOCADO_TAGS: arch:ppc64 arch:or1k arch:s390x arch:x86_64 arch:rx
- arch:sh4
+ MAKE_CHECK_ARGS: check-functional
build-system-opensuse:
extends:
@@ -274,15 +289,14 @@ check-system-opensuse:
IMAGE: opensuse-leap
MAKE_CHECK_ARGS: check
-avocado-system-opensuse:
- extends: .avocado_test_job_template
+functional-system-opensuse:
+ extends: .functional_test_job_template
needs:
- job: build-system-opensuse
artifacts: true
variables:
IMAGE: opensuse-leap
- MAKE_CHECK_ARGS: check-avocado
- AVOCADO_TAGS: arch:s390x arch:x86_64 arch:aarch64
+ MAKE_CHECK_ARGS: check-functional
#
# Flaky tests. We don't run these by default and they are allow fail
@@ -302,18 +316,17 @@ build-system-flaky:
ppc64-softmmu rx-softmmu s390x-softmmu sh4-softmmu x86_64-softmmu
MAKE_CHECK_ARGS: check-build
-avocado-system-flaky:
- extends: .avocado_test_job_template
+functional-system-flaky:
+ extends: .functional_test_job_template
needs:
- job: build-system-flaky
artifacts: true
allow_failure: true
variables:
IMAGE: debian
- MAKE_CHECK_ARGS: check-avocado
+ MAKE_CHECK_ARGS: check-functional
QEMU_JOB_OPTIONAL: 1
QEMU_TEST_FLAKY_TESTS: 1
- AVOCADO_TAGS: flaky
# This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
# the configure script. The container doesn't contain Xen headers so
@@ -345,6 +358,8 @@ build-tcg-disabled:
124 132 139 142 144 145 151 152 155 157 165 194 196 200 202
208 209 216 218 227 234 246 247 248 250 254 255 257 258
260 261 262 263 264 270 272 273 277 279 image-fleecing
+ - cd ../..
+ - make distclean
build-user:
extends: .native_build_job_template
@@ -428,9 +443,8 @@ clang-system:
job: amd64-fedora-container
variables:
IMAGE: fedora
- CONFIGURE_ARGS: --cc=clang --cxx=clang++
- --extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
- --extra-cflags=-fno-sanitize=function
+ CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-ubsan
+ --extra-cflags=-fno-sanitize-recover=undefined
TARGETS: alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu s390x-softmmu
MAKE_CHECK_ARGS: check-qtest check-tcg
@@ -441,9 +455,9 @@ clang-user:
timeout: 70m
variables:
IMAGE: debian-all-test-cross
- CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system
+ CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system --enable-ubsan
--target-list-exclude=alpha-linux-user,microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user
- --extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
+ --extra-cflags=-fno-sanitize-recover=undefined
MAKE_CHECK_ARGS: check-unit check-tcg
# Set LD_JOBS=1 because this requires LTO and ld consumes a large amount of memory.
@@ -453,8 +467,8 @@ clang-user:
# Since slirp callbacks are used in QEMU Timers, we cannot use libslirp with
# CFI builds, and thus have to disable it here.
#
-# Split in three sets of build/check/avocado to limit the execution time of each
-# job
+# Split in three sets of build/check/functional to limit the execution time
+# of each job
build-cfi-aarch64:
extends:
- .native_build_job_template
@@ -484,14 +498,14 @@ check-cfi-aarch64:
IMAGE: fedora
MAKE_CHECK_ARGS: check
-avocado-cfi-aarch64:
- extends: .avocado_test_job_template
+functional-cfi-aarch64:
+ extends: .functional_test_job_template
needs:
- job: build-cfi-aarch64
artifacts: true
variables:
IMAGE: fedora
- MAKE_CHECK_ARGS: check-avocado
+ MAKE_CHECK_ARGS: check-functional
build-cfi-ppc64-s390x:
extends:
@@ -522,14 +536,14 @@ check-cfi-ppc64-s390x:
IMAGE: fedora
MAKE_CHECK_ARGS: check
-avocado-cfi-ppc64-s390x:
- extends: .avocado_test_job_template
+functional-cfi-ppc64-s390x:
+ extends: .functional_test_job_template
needs:
- job: build-cfi-ppc64-s390x
artifacts: true
variables:
IMAGE: fedora
- MAKE_CHECK_ARGS: check-avocado
+ MAKE_CHECK_ARGS: check-functional
build-cfi-x86_64:
extends:
@@ -556,14 +570,14 @@ check-cfi-x86_64:
IMAGE: fedora
MAKE_CHECK_ARGS: check
-avocado-cfi-x86_64:
- extends: .avocado_test_job_template
+functional-cfi-x86_64:
+ extends: .functional_test_job_template
needs:
- job: build-cfi-x86_64
artifacts: true
variables:
IMAGE: fedora
- MAKE_CHECK_ARGS: check-avocado
+ MAKE_CHECK_ARGS: check-functional
tsan-build:
extends: .native_build_job_template
@@ -618,12 +632,15 @@ build-oss-fuzz:
- CC="clang" CXX="clang++" CFLAGS="-fsanitize=address"
./scripts/oss-fuzz/build.sh
- export ASAN_OPTIONS="fast_unwind_on_malloc=0"
+ - failures=0
- for fuzzer in $(find ./build-oss-fuzz/DEST_DIR/ -executable -type f
| grep -v slirp); do
grep "LLVMFuzzerTestOneInput" ${fuzzer} > /dev/null 2>&1 || continue ;
echo Testing ${fuzzer} ... ;
- "${fuzzer}" -runs=1 -seed=1 || exit 1 ;
+ "${fuzzer}" -runs=1 -seed=1 || { echo "FAILED:"" ${fuzzer} exit code is $?"; failures=$(($failures+1)); };
done
+ - echo "Number of failures:"" $failures"
+ - test $failures = 0
build-tci:
extends: .native_build_job_template
@@ -650,9 +667,6 @@ build-tci:
- make check-tcg
# Check our reduced build configurations
-# requires libfdt: aarch64, arm, loongarch64, microblaze, microblazeel,
-# or1k, ppc64, riscv32, riscv64, rx
-# fails qtest without boards: i386, x86_64
build-without-defaults:
extends: .native_build_job_template
needs:
@@ -666,11 +680,7 @@ build-without-defaults:
--disable-pie
--disable-qom-cast-debug
--disable-strip
- TARGETS: alpha-softmmu avr-softmmu cris-softmmu hppa-softmmu m68k-softmmu
- mips-softmmu mips64-softmmu mipsel-softmmu mips64el-softmmu
- ppc-softmmu s390x-softmmu sh4-softmmu sh4eb-softmmu sparc-softmmu
- sparc64-softmmu tricore-softmmu xtensa-softmmu xtensaeb-softmmu
- hexagon-linux-user i386-linux-user s390x-linux-user
+ --target-list-exclude=aarch64-softmmu,microblaze-softmmu,mips64-softmmu,mipsel-softmmu,ppc64-softmmu,sh4el-softmmu,xtensa-softmmu,x86_64-softmmu
MAKE_CHECK_ARGS: check
build-libvhost-user:
@@ -776,3 +786,12 @@ coverity:
when: never
# Always manual on forks even if $QEMU_CI == "2"
- when: manual
+
+build-wasm:
+ extends: .wasm_build_job_template
+ timeout: 2h
+ needs:
+ job: wasm-emsdk-cross-container
+ variables:
+ IMAGE: emsdk-wasm32-cross
+ CONFIGURE_ARGS: --static --disable-tools --enable-debug --enable-tcg-interpreter
diff --git a/.gitlab-ci.d/check-dco.py b/.gitlab-ci.d/check-dco.py
index 632c8bc..2fd5668 100755
--- a/.gitlab-ci.d/check-dco.py
+++ b/.gitlab-ci.d/check-dco.py
@@ -19,10 +19,9 @@ cwd = os.getcwd()
reponame = os.path.basename(cwd)
repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame)
+print(f"adding upstream git repo @ {repourl}")
subprocess.check_call(["git", "remote", "add", "check-dco", repourl])
-subprocess.check_call(["git", "fetch", "check-dco", "master"],
- stdout=subprocess.DEVNULL,
- stderr=subprocess.DEVNULL)
+subprocess.check_call(["git", "fetch", "--refetch", "check-dco", "master"])
ancestor = subprocess.check_output(["git", "merge-base",
"check-dco/master", "HEAD"],
@@ -79,7 +78,10 @@ of Origin 1.1 (DCO):
To indicate acceptance of the DCO every commit must have a tag
- Signed-off-by: REAL NAME <EMAIL>
+ Signed-off-by: YOUR NAME <EMAIL>
+
+where "YOUR NAME" is your commonly known identity in the context
+of the community.
This can be achieved by passing the "-s" flag to the "git commit" command.
diff --git a/.gitlab-ci.d/check-patch.py b/.gitlab-ci.d/check-patch.py
index 39e2b40..be13e6f 100755
--- a/.gitlab-ci.d/check-patch.py
+++ b/.gitlab-ci.d/check-patch.py
@@ -19,13 +19,12 @@ cwd = os.getcwd()
reponame = os.path.basename(cwd)
repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame)
+print(f"adding upstream git repo @ {repourl}")
# GitLab CI environment does not give us any direct info about the
# base for the user's branch. We thus need to figure out a common
# ancestor between the user's branch and current git master.
subprocess.check_call(["git", "remote", "add", "check-patch", repourl])
-subprocess.check_call(["git", "fetch", "check-patch", "master"],
- stdout=subprocess.DEVNULL,
- stderr=subprocess.DEVNULL)
+subprocess.check_call(["git", "fetch", "--refetch", "check-patch", "master"])
ancestor = subprocess.check_output(["git", "merge-base",
"check-patch/master", "HEAD"],
diff --git a/.gitlab-ci.d/check-units.py b/.gitlab-ci.d/check-units.py
new file mode 100755
index 0000000..268a411
--- /dev/null
+++ b/.gitlab-ci.d/check-units.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python3
+#
+# check-units.py: check the number of compilation units and identify
+# those that are rebuilt multiple times
+#
+# Copyright (C) 2025 Linaro Ltd.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from os import access, R_OK, path
+from sys import argv, exit
+import json
+from collections import Counter
+
+
+def extract_build_units(cc_path):
+ """
+ Extract the build units and their counds from compile_commands.json file.
+
+ Returns:
+ Hash table of ["unit"] = count
+ """
+
+ j = json.load(open(cc_path, 'r'))
+ files = [f['file'] for f in j]
+ build_units = Counter(files)
+
+ return build_units
+
+
+def analyse_units(build_units):
+ """
+ Analyse the build units and report stats and the top 10 rebuilds
+ """
+
+ print(f"Total source files: {len(build_units.keys())}")
+ print(f"Total build units: {sum(units.values())}")
+
+ # Create a sorted list by number of rebuilds
+ sorted_build_units = sorted(build_units.items(),
+ key=lambda item: item[1],
+ reverse=True)
+
+ print("Most rebuilt units:")
+ for unit, count in sorted_build_units[:20]:
+ print(f" {unit} built {count} times")
+
+ print("Least rebuilt units:")
+ for unit, count in sorted_build_units[-10:]:
+ print(f" {unit} built {count} times")
+
+
+if __name__ == "__main__":
+ if len(argv) != 2:
+ script_name = path.basename(argv[0])
+ print(f"Usage: {script_name} <path_to_compile_commands.json>")
+ exit(1)
+
+ cc_path = argv[1]
+ if path.isfile(cc_path) and access(cc_path, R_OK):
+ units = extract_build_units(cc_path)
+ analyse_units(units)
+ exit(0)
+ else:
+ print(f"{cc_path} doesn't exist or isn't readable")
+ exit(1)
diff --git a/.gitlab-ci.d/cirrus.yml b/.gitlab-ci.d/cirrus.yml
index 75df127..75b6114 100644
--- a/.gitlab-ci.d/cirrus.yml
+++ b/.gitlab-ci.d/cirrus.yml
@@ -15,44 +15,29 @@
stage: build
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:latest
needs: []
+ allow_failure:
+ exit_codes: 3
# 20 mins larger than "timeout_in" in cirrus/build.yml
# as there's often a 5-10 minute delay before Cirrus CI
# actually starts the task
timeout: 80m
script:
+ - set -o allexport
- source .gitlab-ci.d/cirrus/$NAME.vars
- - sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g"
- -e "s|[@]CI_COMMIT_REF_NAME@|$CI_COMMIT_REF_NAME|g"
- -e "s|[@]CI_COMMIT_SHA@|$CI_COMMIT_SHA|g"
- -e "s|[@]CIRRUS_VM_INSTANCE_TYPE@|$CIRRUS_VM_INSTANCE_TYPE|g"
- -e "s|[@]CIRRUS_VM_IMAGE_SELECTOR@|$CIRRUS_VM_IMAGE_SELECTOR|g"
- -e "s|[@]CIRRUS_VM_IMAGE_NAME@|$CIRRUS_VM_IMAGE_NAME|g"
- -e "s|[@]CIRRUS_VM_CPUS@|$CIRRUS_VM_CPUS|g"
- -e "s|[@]CIRRUS_VM_RAM@|$CIRRUS_VM_RAM|g"
- -e "s|[@]UPDATE_COMMAND@|$UPDATE_COMMAND|g"
- -e "s|[@]INSTALL_COMMAND@|$INSTALL_COMMAND|g"
- -e "s|[@]PATH@|$PATH_EXTRA${PATH_EXTRA:+:}\$PATH|g"
- -e "s|[@]PKG_CONFIG_PATH@|$PKG_CONFIG_PATH|g"
- -e "s|[@]PKGS@|$PKGS|g"
- -e "s|[@]MAKE@|$MAKE|g"
- -e "s|[@]PYTHON@|$PYTHON|g"
- -e "s|[@]PIP3@|$PIP3|g"
- -e "s|[@]PYPI_PKGS@|$PYPI_PKGS|g"
- -e "s|[@]CONFIGURE_ARGS@|$CONFIGURE_ARGS|g"
- -e "s|[@]TEST_TARGETS@|$TEST_TARGETS|g"
- <.gitlab-ci.d/cirrus/build.yml >.gitlab-ci.d/cirrus/$NAME.yml
+ - set +o allexport
+ - cirrus-vars <.gitlab-ci.d/cirrus/build.yml >.gitlab-ci.d/cirrus/$NAME.yml
- cat .gitlab-ci.d/cirrus/$NAME.yml
- cirrus-run -v --show-build-log always .gitlab-ci.d/cirrus/$NAME.yml
variables:
QEMU_JOB_CIRRUS: 1
-x64-freebsd-13-build:
+x64-freebsd-14-build:
extends: .cirrus_build_job
variables:
- NAME: freebsd-13
+ NAME: freebsd-14
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
CIRRUS_VM_IMAGE_SELECTOR: image_family
- CIRRUS_VM_IMAGE_NAME: freebsd-13-3
+ CIRRUS_VM_IMAGE_NAME: freebsd-14-2
CIRRUS_VM_CPUS: 8
CIRRUS_VM_RAM: 8G
UPDATE_COMMAND: pkg update; pkg upgrade -y
@@ -60,34 +45,16 @@ x64-freebsd-13-build:
CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblaze-softmmu,mips64el-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4eb-softmmu,xtensa-softmmu
TEST_TARGETS: check
-aarch64-macos-13-base-build:
- extends: .cirrus_build_job
- variables:
- NAME: macos-13
- CIRRUS_VM_INSTANCE_TYPE: macos_instance
- CIRRUS_VM_IMAGE_SELECTOR: image
- CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-ventura-base:latest
- CIRRUS_VM_CPUS: 12
- CIRRUS_VM_RAM: 24G
- UPDATE_COMMAND: brew update
- INSTALL_COMMAND: brew install
- PATH_EXTRA: /opt/homebrew/ccache/libexec:/opt/homebrew/gettext/bin
- PKG_CONFIG_PATH: /opt/homebrew/curl/lib/pkgconfig:/opt/homebrew/ncurses/lib/pkgconfig:/opt/homebrew/readline/lib/pkgconfig
- CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblazeel-softmmu,mips64-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4-softmmu,xtensaeb-softmmu
- TEST_TARGETS: check-unit check-block check-qapi-schema check-softfloat check-qtest-x86_64
-
-aarch64-macos-14-base-build:
+aarch64-macos-build:
extends: .cirrus_build_job
variables:
NAME: macos-14
CIRRUS_VM_INSTANCE_TYPE: macos_instance
CIRRUS_VM_IMAGE_SELECTOR: image
- CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-sonoma-base:latest
- CIRRUS_VM_CPUS: 12
- CIRRUS_VM_RAM: 24G
+ CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-runner:sonoma
UPDATE_COMMAND: brew update
INSTALL_COMMAND: brew install
PATH_EXTRA: /opt/homebrew/ccache/libexec:/opt/homebrew/gettext/bin
PKG_CONFIG_PATH: /opt/homebrew/curl/lib/pkgconfig:/opt/homebrew/ncurses/lib/pkgconfig:/opt/homebrew/readline/lib/pkgconfig
+ CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblazeel-softmmu,mips64-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4-softmmu,xtensaeb-softmmu
TEST_TARGETS: check-unit check-block check-qapi-schema check-softfloat check-qtest-x86_64
- QEMU_JOB_OPTIONAL: 1
diff --git a/.gitlab-ci.d/cirrus/build.yml b/.gitlab-ci.d/cirrus/build.yml
index 43dd52d..41abd0b 100644
--- a/.gitlab-ci.d/cirrus/build.yml
+++ b/.gitlab-ci.d/cirrus/build.yml
@@ -8,7 +8,7 @@ env:
CI_REPOSITORY_URL: "@CI_REPOSITORY_URL@"
CI_COMMIT_REF_NAME: "@CI_COMMIT_REF_NAME@"
CI_COMMIT_SHA: "@CI_COMMIT_SHA@"
- PATH: "@PATH@"
+ PATH: "@PATH_EXTRA@:$PATH"
PKG_CONFIG_PATH: "@PKG_CONFIG_PATH@"
PYTHON: "@PYTHON@"
MAKE: "@MAKE@"
@@ -26,7 +26,7 @@ build_task:
- git clone --depth 100 "$CI_REPOSITORY_URL" .
- git fetch origin "$CI_COMMIT_REF_NAME"
- git reset --hard "$CI_COMMIT_SHA"
- build_script:
+ step_script:
- mkdir build
- cd build
- ../configure --enable-werror $CONFIGURE_ARGS
diff --git a/.gitlab-ci.d/cirrus/freebsd-13.vars b/.gitlab-ci.d/cirrus/freebsd-13.vars
deleted file mode 100644
index 3785afc..0000000
--- a/.gitlab-ci.d/cirrus/freebsd-13.vars
+++ /dev/null
@@ -1,16 +0,0 @@
-# THIS FILE WAS AUTO-GENERATED
-#
-# $ lcitool variables freebsd-13 qemu
-#
-# https://gitlab.com/libvirt/libvirt-ci
-
-CCACHE='/usr/local/bin/ccache'
-CPAN_PKGS=''
-CROSS_PKGS=''
-MAKE='/usr/local/bin/gmake'
-NINJA='/usr/local/bin/ninja'
-PACKAGING_COMMAND='pkg'
-PIP3='/usr/local/bin/pip-3.8'
-PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-tomli py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd'
-PYPI_PKGS=''
-PYTHON='/usr/local/bin/python3'
diff --git a/.gitlab-ci.d/cirrus/freebsd-14.vars b/.gitlab-ci.d/cirrus/freebsd-14.vars
new file mode 100644
index 0000000..19ca0d3
--- /dev/null
+++ b/.gitlab-ci.d/cirrus/freebsd-14.vars
@@ -0,0 +1,16 @@
+# THIS FILE WAS AUTO-GENERATED
+#
+# $ lcitool variables freebsd-14 qemu
+#
+# https://gitlab.com/libvirt/libvirt-ci
+
+CCACHE='/usr/local/bin/ccache'
+CPAN_PKGS=''
+CROSS_PKGS=''
+MAKE='/usr/local/bin/gmake'
+NINJA='/usr/local/bin/ninja'
+PACKAGING_COMMAND='pkg'
+PIP3='/usr/local/bin/pip'
+PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache4 cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk-vnc gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py311-numpy py311-pillow py311-pip py311-pyyaml py311-sphinx py311-sphinx_rtd_theme py311-tomli python3 rpm2cpio rust rust-bindgen-cli sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 vulkan-tools xorriso zstd'
+PYPI_PKGS=''
+PYTHON='/usr/local/bin/python3'
diff --git a/.gitlab-ci.d/cirrus/macos-13.vars b/.gitlab-ci.d/cirrus/macos-13.vars
deleted file mode 100644
index 534f029..0000000
--- a/.gitlab-ci.d/cirrus/macos-13.vars
+++ /dev/null
@@ -1,16 +0,0 @@
-# THIS FILE WAS AUTO-GENERATED
-#
-# $ lcitool variables macos-13 qemu
-#
-# https://gitlab.com/libvirt/libvirt-ci
-
-CCACHE='/opt/homebrew/bin/ccache'
-CPAN_PKGS=''
-CROSS_PKGS=''
-MAKE='/opt/homebrew/bin/gmake'
-NINJA='/opt/homebrew/bin/ninja'
-PACKAGING_COMMAND='brew'
-PIP3='/opt/homebrew/bin/pip3'
-PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 xorriso zlib zstd'
-PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme tomli'
-PYTHON='/opt/homebrew/bin/python3'
diff --git a/.gitlab-ci.d/cirrus/macos-14.vars b/.gitlab-ci.d/cirrus/macos-14.vars
index 43070f4..b039465 100644
--- a/.gitlab-ci.d/cirrus/macos-14.vars
+++ b/.gitlab-ci.d/cirrus/macos-14.vars
@@ -11,6 +11,6 @@ MAKE='/opt/homebrew/bin/gmake'
NINJA='/opt/homebrew/bin/ninja'
PACKAGING_COMMAND='brew'
PIP3='/opt/homebrew/bin/pip3'
-PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 xorriso zlib zstd'
+PKGS='bash bc bindgen bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 gtk-vnc jemalloc jpeg-turbo json-c libcbor libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio rust sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 vulkan-tools xorriso zlib zstd'
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme tomli'
PYTHON='/opt/homebrew/bin/python3'
diff --git a/.gitlab-ci.d/container-cross.yml b/.gitlab-ci.d/container-cross.yml
index e310394..8d3be53 100644
--- a/.gitlab-ci.d/container-cross.yml
+++ b/.gitlab-ci.d/container-cross.yml
@@ -22,12 +22,6 @@ arm64-debian-cross-container:
variables:
NAME: debian-arm64-cross
-armel-debian-cross-container:
- extends: .container_job_template
- stage: containers
- variables:
- NAME: debian-armel-cross
-
armhf-debian-cross-container:
extends: .container_job_template
stage: containers
@@ -73,11 +67,8 @@ ppc64el-debian-cross-container:
riscv64-debian-cross-container:
extends: .container_job_template
stage: containers
- # as we are currently based on 'sid/unstable' we may break so...
- allow_failure: true
variables:
NAME: debian-riscv64-cross
- QEMU_JOB_OPTIONAL: 1
s390x-debian-cross-container:
extends: .container_job_template
@@ -96,12 +87,12 @@ xtensa-debian-cross-container:
variables:
NAME: debian-xtensa-cross
-cris-fedora-cross-container:
+win64-fedora-cross-container:
extends: .container_job_template
variables:
- NAME: fedora-cris-cross
+ NAME: fedora-win64-cross
-win64-fedora-cross-container:
+wasm-emsdk-cross-container:
extends: .container_job_template
variables:
- NAME: fedora-win64-cross
+ NAME: emsdk-wasm32-cross
diff --git a/.gitlab-ci.d/containers.yml b/.gitlab-ci.d/containers.yml
index ae79d4c..db9b4d5 100644
--- a/.gitlab-ci.d/containers.yml
+++ b/.gitlab-ci.d/containers.yml
@@ -27,3 +27,9 @@ python-container:
extends: .container_job_template
variables:
NAME: python
+
+amd64-fedora-rust-nightly-container:
+ extends: .container_job_template
+ variables:
+ NAME: fedora-rust-nightly
+ allow_failure: true
diff --git a/.gitlab-ci.d/crossbuild-template.yml b/.gitlab-ci.d/crossbuild-template.yml
index d9f81b7..303943f 100644
--- a/.gitlab-ci.d/crossbuild-template.yml
+++ b/.gitlab-ci.d/crossbuild-template.yml
@@ -8,6 +8,12 @@
key: "$CI_JOB_NAME"
when: always
timeout: 80m
+ before_script:
+ - source scripts/ci/gitlab-ci-section
+ - section_start setup "Pre-script setup"
+ - JOBS=$(expr $(nproc) + 1)
+ - cat /packages.txt
+ - section_end setup
script:
- export CCACHE_BASEDIR="$(pwd)"
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
@@ -16,18 +22,30 @@
- mkdir build
- cd build
- ccache --zero-stats
+ - section_start configure "Running configure"
- ../configure --enable-werror --disable-docs --enable-fdt=system
--disable-user $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS
- --target-list-exclude="arm-softmmu cris-softmmu
+ --target-list-exclude="arm-softmmu
i386-softmmu microblaze-softmmu mips-softmmu mipsel-softmmu
mips64-softmmu ppc-softmmu riscv32-softmmu sh4-softmmu
sparc-softmmu xtensa-softmmu $CROSS_SKIP_TARGETS"
- - make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
+ - section_end configure
+ - section_start build "Building QEMU"
+ - make -j"$JOBS" all check-build
+ - section_end build
+ - section_start test "Running tests"
+ - if test -n "$MAKE_CHECK_ARGS";
+ then
+ $MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
+ fi
+ - section_end test
+ - section_start installer "Building the installer"
- if grep -q "EXESUF=.exe" config-host.mak;
then make installer;
version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)";
mv -v qemu-setup*.exe qemu-setup-${version}.exe;
fi
+ - section_end installer
- ccache --show-stats
# Job to cross-build specific accelerators.
@@ -39,11 +57,14 @@
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
- timeout: 30m
+ timeout: 60m
cache:
paths:
- ccache/
key: "$CI_JOB_NAME"
+ before_script:
+ - source scripts/ci/gitlab-ci-section
+ - JOBS=$(expr $(nproc) + 1)
script:
- export CCACHE_BASEDIR="$(pwd)"
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
@@ -51,9 +72,19 @@
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
- mkdir build
- cd build
+ - section_start configure "Running configure"
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
--disable-tools --enable-${ACCEL:-kvm} $EXTRA_CONFIGURE_OPTS
- - make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
+ - section_end configure
+ - section_start build "Building QEMU"
+ - make -j"$JOBS" all check-build
+ - section_end build
+ - section_start test "Running tests"
+ - if test -n "$MAKE_CHECK_ARGS";
+ then
+ $MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
+ fi
+ - section_end test
.cross_user_build_job:
extends: .base_job_template
@@ -63,18 +94,31 @@
paths:
- ccache/
key: "$CI_JOB_NAME"
+ before_script:
+ - source scripts/ci/gitlab-ci-section
+ - JOBS=$(expr $(nproc) + 1)
script:
- export CCACHE_BASEDIR="$(pwd)"
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
- export CCACHE_MAXSIZE="500M"
- mkdir build
- cd build
+ - section_start configure "Running configure"
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
--disable-system --target-list-exclude="aarch64_be-linux-user
- alpha-linux-user cris-linux-user m68k-linux-user microblazeel-linux-user
+ alpha-linux-user m68k-linux-user microblazeel-linux-user
or1k-linux-user ppc-linux-user sparc-linux-user
xtensa-linux-user $CROSS_SKIP_TARGETS"
- - make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
+ - section_end configure
+ - section_start build "Building QEMU"
+ - make -j"$JOBS" all check-build
+ - section_end build
+ - section_start test "Running tests"
+ - if test -n "$MAKE_CHECK_ARGS";
+ then
+ $MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
+ fi
+ - section_end test
# We can still run some tests on some of our cross build jobs. They can add this
# template to their extends to save the build logs and test results
diff --git a/.gitlab-ci.d/crossbuilds.yml b/.gitlab-ci.d/crossbuilds.yml
index cb499e4..3f76c90 100644
--- a/.gitlab-ci.d/crossbuilds.yml
+++ b/.gitlab-ci.d/crossbuilds.yml
@@ -1,13 +1,6 @@
include:
- local: '/.gitlab-ci.d/crossbuild-template.yml'
-cross-armel-user:
- extends: .cross_user_build_job
- needs:
- job: armel-debian-cross-container
- variables:
- IMAGE: debian-armel-cross
-
cross-armhf-user:
extends: .cross_user_build_job
needs:
@@ -68,8 +61,12 @@ cross-i686-tci:
variables:
IMAGE: debian-i686-cross
ACCEL: tcg-interpreter
- EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user --disable-plugins --disable-kvm
- MAKE_CHECK_ARGS: check check-tcg
+ EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,arm-softmmu,arm-linux-user,ppc-softmmu,ppc-linux-user --disable-plugins --disable-kvm
+ # Force tests to run with reduced parallelism, to see whether this
+ # reduces the flakiness of this CI job. The CI
+ # environment by default shows us 8 CPUs and so we
+ # would otherwise be using a parallelism of 9.
+ MAKE_CHECK_ARGS: check check-tcg -j2
cross-mipsel-system:
extends: .cross_system_build_job
@@ -121,12 +118,8 @@ cross-ppc64el-kvm-only:
IMAGE: debian-ppc64el-cross
EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-devices
-# The riscv64 cross-builds currently use a 'sid' container to get
-# compilers and libraries. Until something more stable is found we
-# allow_failure so as not to block CI.
cross-riscv64-system:
extends: .cross_system_build_job
- allow_failure: true
needs:
job: riscv64-debian-cross-container
variables:
@@ -134,7 +127,6 @@ cross-riscv64-system:
cross-riscv64-user:
extends: .cross_user_build_job
- allow_failure: true
needs:
job: riscv64-debian-cross-container
variables:
diff --git a/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml b/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml
index 263a3c2..ca2f140 100644
--- a/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml
+++ b/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml
@@ -103,7 +103,7 @@ ubuntu-22.04-aarch64-clang:
script:
- mkdir build
- cd build
- - ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-sanitizers
+ - ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-ubsan
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check
diff --git a/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml b/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml
index 69ddd3e..ca374ac 100644
--- a/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml
+++ b/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml
@@ -80,7 +80,7 @@ ubuntu-22.04-s390x-clang:
script:
- mkdir build
- cd build
- - ../configure --cc=clang --cxx=clang++ --enable-sanitizers
+ - ../configure --cc=clang --cxx=clang++ --enable-ubsan
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check
diff --git a/.gitlab-ci.d/static_checks.yml b/.gitlab-ci.d/static_checks.yml
index ad9f426..c3ed6de 100644
--- a/.gitlab-ci.d/static_checks.yml
+++ b/.gitlab-ci.d/static_checks.yml
@@ -46,3 +46,49 @@ check-python-tox:
QEMU_JOB_OPTIONAL: 1
needs:
job: python-container
+
+check-rust-tools-nightly:
+ extends: .base_job_template
+ stage: test
+ image: $CI_REGISTRY_IMAGE/qemu/fedora-rust-nightly:$QEMU_CI_CONTAINER_TAG
+ script:
+ - source scripts/ci/gitlab-ci-section
+ - section_start test "Running Rust code checks"
+ - cd build
+ - pyvenv/bin/meson devenv -w ../rust ${CARGO-cargo} fmt --check
+ - make clippy
+ - make rustdoc
+ - section_end test
+ variables:
+ GIT_DEPTH: 1
+ allow_failure: true
+ needs:
+ - job: build-system-fedora-rust-nightly
+ artifacts: true
+ artifacts:
+ when: on_success
+ expire_in: 2 days
+ paths:
+ - rust/target/doc
+
+check-build-units:
+ extends: .base_job_template
+ stage: build
+ image: $CI_REGISTRY_IMAGE/qemu/debian:$QEMU_CI_CONTAINER_TAG
+ needs:
+ job: amd64-debian-container
+ before_script:
+ - source scripts/ci/gitlab-ci-section
+ - section_start setup "Install Tools"
+ - apt install --assume-yes --no-install-recommends jq
+ - section_end setup
+ script:
+ - mkdir build
+ - cd build
+ - section_start configure "Running configure"
+ - ../configure
+ - cd ..
+ - section_end configure
+ - section_start analyse "Analyse"
+ - .gitlab-ci.d/check-units.py build/compile_commands.json
+ - section_end analyse
diff --git a/.gitlab-ci.d/windows.yml b/.gitlab-ci.d/windows.yml
index a83f23a..45ed0c9 100644
--- a/.gitlab-ci.d/windows.yml
+++ b/.gitlab-ci.d/windows.yml
@@ -17,12 +17,7 @@ msys2-64bit:
# This feature doesn't (currently) work with PowerShell, it stops
# the echo'ing of commands being run and doesn't show any timing
FF_SCRIPT_SECTIONS: 0
- # do not remove "--without-default-devices"!
- # commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices"
- # changed to compile QEMU with the --without-default-devices switch
- # for this job, because otherwise the build could not complete within
- # the project timeout.
- CONFIGURE_ARGS: --target-list=sparc-softmmu --without-default-devices -Ddebug=false -Doptimization=0
+ CONFIGURE_ARGS: --disable-system --enable-tools -Ddebug=false -Doptimization=0
# The Windows git is a bit older so override the default
GIT_FETCH_EXTRA_FLAGS: --no-tags --prune --quiet
artifacts:
@@ -81,35 +76,19 @@ msys2-64bit:
bison diffutils flex
git grep make sed
mingw-w64-x86_64-binutils
- mingw-w64-x86_64-capstone
mingw-w64-x86_64-ccache
mingw-w64-x86_64-curl
- mingw-w64-x86_64-cyrus-sasl
- mingw-w64-x86_64-dtc
mingw-w64-x86_64-gcc
mingw-w64-x86_64-glib2
- mingw-w64-x86_64-gnutls
- mingw-w64-x86_64-gtk3
- mingw-w64-x86_64-libgcrypt
- mingw-w64-x86_64-libjpeg-turbo
mingw-w64-x86_64-libnfs
- mingw-w64-x86_64-libpng
mingw-w64-x86_64-libssh
- mingw-w64-x86_64-libtasn1
- mingw-w64-x86_64-libusb
- mingw-w64-x86_64-lzo2
- mingw-w64-x86_64-nettle
mingw-w64-x86_64-ninja
mingw-w64-x86_64-pixman
mingw-w64-x86_64-pkgconf
mingw-w64-x86_64-python
- mingw-w64-x86_64-SDL2
- mingw-w64-x86_64-SDL2_image
- mingw-w64-x86_64-snappy
- mingw-w64-x86_64-spice
- mingw-w64-x86_64-usbredir
mingw-w64-x86_64-zstd"
- Write-Output "Running build at $(Get-Date -Format u)"
+ - $env:JOBS = $(.\msys64\usr\bin\bash -lc nproc)
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
- $env:CCACHE_BASEDIR = "$env:CI_PROJECT_DIR"
@@ -120,8 +99,8 @@ msys2-64bit:
- mkdir build
- cd build
- ..\msys64\usr\bin\bash -lc "ccache --zero-stats"
- - ..\msys64\usr\bin\bash -lc "../configure --enable-fdt=system $CONFIGURE_ARGS"
- - ..\msys64\usr\bin\bash -lc "make"
+ - ..\msys64\usr\bin\bash -lc "../configure $CONFIGURE_ARGS"
+ - ..\msys64\usr\bin\bash -lc "make -j$env:JOBS"
- ..\msys64\usr\bin\bash -lc "make check MTESTARGS='$TEST_ARGS' || { cat meson-logs/testlog.txt; exit 1; } ;"
- ..\msys64\usr\bin\bash -lc "ccache --show-stats"
- Write-Output "Finished build at $(Get-Date -Format u)"
diff --git a/.mailmap b/.mailmap
index ef1b8a5..e727185 100644
--- a/.mailmap
+++ b/.mailmap
@@ -67,6 +67,8 @@ Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> Andrey Drobyshev via <qemu-blo
BALATON Zoltan <balaton@eik.bme.hu> BALATON Zoltan via <qemu-ppc@nongnu.org>
# Next, replace old addresses by a more recent one.
+Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp> <akihiko.odaki@daynix.com>
+Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp> <akihiko.odaki@gmail.com>
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@mips.com>
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@imgtec.com>
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <amarkovic@wavecomp.com>
@@ -75,6 +77,8 @@ Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <aleksandar.rikalo@rt-rk.com>
Alexander Graf <agraf@csgraf.de> <agraf@suse.de>
Ani Sinha <anisinha@redhat.com> <ani@anisinha.ca>
Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com>
+Brian Cain <brian.cain@oss.qualcomm.com> <bcain@quicinc.com>
+Brian Cain <brian.cain@oss.qualcomm.com> <quic_bcain@quicinc.com>
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
Damien Hedde <damien.hedde@dahe.fr> <damien.hedde@greensocs.com>
Filip Bozuta <filip.bozuta@syrmia.com> <filip.bozuta@rt-rk.com.com>
@@ -85,8 +89,9 @@ Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
Juan Quintela <quintela@trasno.org> <quintela@redhat.com>
-Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
-Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
+Leif Lindholm <leif.lindholm@oss.qualcomm.com> <quic_llindhol@quicinc.com>
+Leif Lindholm <leif.lindholm@oss.qualcomm.com> <leif.lindholm@linaro.org>
+Leif Lindholm <leif.lindholm@oss.qualcomm.com> <leif@nuviainc.com>
Luc Michel <luc@lmichel.fr> <luc.michel@git.antfield.fr>
Luc Michel <luc@lmichel.fr> <luc.michel@greensocs.com>
Luc Michel <luc@lmichel.fr> <lmichel@kalray.eu>
diff --git a/.travis.yml b/.travis.yml
index 8fc1ae0..0a634d7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -79,41 +79,6 @@ after_script:
jobs:
include:
- - name: "[aarch64] GCC check-tcg"
- arch: arm64
- addons:
- apt_packages:
- - libaio-dev
- - libattr1-dev
- - libbrlapi-dev
- - libcacard-dev
- - libcap-ng-dev
- - libfdt-dev
- - libgcrypt20-dev
- - libgnutls28-dev
- - libgtk-3-dev
- - libiscsi-dev
- - liblttng-ust-dev
- - libncurses5-dev
- - libnfs-dev
- - libpixman-1-dev
- - libpng-dev
- - librados-dev
- - libsdl2-dev
- - libseccomp-dev
- - liburcu-dev
- - libusb-1.0-0-dev
- - libvdeplug-dev
- - libvte-2.91-dev
- - ninja-build
- - python3-tomli
- # Tests dependencies
- - genisoimage
- env:
- - TEST_CMD="make check check-tcg V=1"
- - CONFIG="--disable-containers --enable-fdt=system
- --target-list=${MAIN_SYSTEM_TARGETS} --cxx=/bin/false"
-
- name: "[ppc64] Clang check-tcg"
arch: ppc64le
compiler: clang
diff --git a/COPYING b/COPYING
index 00ccfbb..8095135 100644
--- a/COPYING
+++ b/COPYING
@@ -2,7 +2,7 @@
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
@@ -304,8 +304,7 @@ the "copyright" line and a pointer to where the full notice is found.
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ with this program; if not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
diff --git a/COPYING.LIB b/COPYING.LIB
index 4362b49..99f4757 100644
--- a/COPYING.LIB
+++ b/COPYING.LIB
@@ -2,7 +2,7 @@
Version 2.1, February 1999
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
@@ -484,8 +484,7 @@ convey the exclusion of warranty; and each file should have at least the
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ License along with this library; if not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
diff --git a/Kconfig b/Kconfig
index fb6a24a..63ca7f4 100644
--- a/Kconfig
+++ b/Kconfig
@@ -4,3 +4,4 @@ source accel/Kconfig
source target/Kconfig
source hw/Kconfig
source semihosting/Kconfig
+source rust/Kconfig
diff --git a/Kconfig.host b/Kconfig.host
index 17f4050..933425c 100644
--- a/Kconfig.host
+++ b/Kconfig.host
@@ -5,6 +5,12 @@
config LINUX
bool
+config LIBCBOR
+ bool
+
+config GNUTLS
+ bool
+
config OPENGL
bool
@@ -52,3 +58,9 @@ config VFIO_USER_SERVER_ALLOWED
config HV_BALLOON_POSSIBLE
bool
+
+config HAVE_RUST
+ bool
+
+config MAC_PVG
+ bool
diff --git a/MAINTAINERS b/MAINTAINERS
index 7d98114..d1672fd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -72,11 +72,14 @@ R: Markus Armbruster <armbru@redhat.com>
R: Philippe Mathieu-DaudƩ <philmd@linaro.org>
W: https://www.qemu.org/docs/master/devel/index.html
S: Odd Fixes
-F: docs/devel/style.rst
+F: docs/devel/build-environment.rst
F: docs/devel/code-of-conduct.rst
+F: docs/devel/codebase.rst
F: docs/devel/conflict-resolution.rst
+F: docs/devel/style.rst
F: docs/devel/submitting-a-patch.rst
F: docs/devel/submitting-a-pull-request.rst
+F: docs/glossary.rst
Responsible Disclosure, Reporting Security Issues
-------------------------------------------------
@@ -109,6 +112,7 @@ F: hw/intc/s390_flic.c
F: hw/intc/s390_flic_kvm.c
F: hw/s390x/
F: hw/vfio/ap.c
+F: hw/s390x/ap-stub.c
F: hw/vfio/ccw.c
F: hw/watchdog/wdt_diag288.c
F: include/hw/s390x/
@@ -118,7 +122,7 @@ F: pc-bios/s390-ccw.img
F: target/s390x/
F: docs/system/target-s390x.rst
F: docs/system/s390x/
-F: tests/migration/s390x/
+F: tests/qtest/migration/s390x/
K: ^Subject:.*(?i)s390x?
L: qemu-s390x@nongnu.org
@@ -132,6 +136,7 @@ F: configs/targets/mips*
X86 general architecture support
M: Paolo Bonzini <pbonzini@redhat.com>
+R: Zhao Liu <zhao1.liu@intel.com>
S: Maintained
F: configs/devices/i386-softmmu/default.mak
F: configs/targets/i386-softmmu.mak
@@ -140,6 +145,7 @@ F: docs/system/target-i386*
F: target/i386/*.[ch]
F: target/i386/Kconfig
F: target/i386/meson.build
+F: tools/i386/
Guest CPU cores (TCG)
---------------------
@@ -147,10 +153,7 @@ Overall TCG CPUs
M: Richard Henderson <richard.henderson@linaro.org>
R: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
-F: system/cpus.c
F: system/watchpoint.c
-F: cpu-common.c
-F: cpu-target.c
F: page-vary-target.c
F: page-vary-common.c
F: accel/tcg/
@@ -160,17 +163,13 @@ F: util/cacheflush.c
F: scripts/decodetree.py
F: docs/devel/decodetree.rst
F: docs/devel/tcg*
-F: include/exec/cpu*.h
-F: include/exec/exec-all.h
F: include/exec/tb-flush.h
-F: include/exec/target_long.h
F: include/exec/helper*.h
F: include/exec/helper*.h.inc
F: include/exec/helper-info.c.inc
F: include/exec/page-protection.h
-F: include/sysemu/cpus.h
-F: include/sysemu/tcg.h
-F: include/hw/core/tcg-cpu-ops.h
+F: include/system/tcg.h
+F: include/accel/tcg/
F: host/include/*/host/cpuinfo.h
F: util/cpuinfo-*.c
F: include/tcg/
@@ -213,7 +212,7 @@ L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/smmu*
F: include/hw/arm/smmu*
-F: tests/avocado/smmu.py
+F: tests/functional/test_aarch64_smmu.py
AVR TCG CPUs
M: Michael Rolnik <mrolnik@gmail.com>
@@ -221,19 +220,10 @@ S: Maintained
F: docs/system/target-avr.rst
F: gdb-xml/avr-cpu.xml
F: target/avr/
-F: tests/avocado/machine_avr6.py
-
-CRIS TCG CPUs
-M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
-S: Maintained
-F: target/cris/
-F: hw/cris/
-F: include/hw/cris/
-F: tests/tcg/cris/
-F: disas/cris.c
+F: tests/functional/test_avr_*.py
Hexagon TCG CPUs
-M: Brian Cain <bcain@quicinc.com>
+M: Brian Cain <brian.cain@oss.qualcomm.com>
S: Supported
F: target/hexagon/
X: target/hexagon/idef-parser/
@@ -244,6 +234,7 @@ F: disas/hexagon.c
F: configs/targets/hexagon-linux-user/default.mak
F: docker/dockerfiles/debian-hexagon-cross.docker
F: gdb-xml/hexagon*.xml
+T: git https://github.com/quic/qemu.git hex-next
Hexagon idef-parser
M: Alessandro Di Federico <ale@rev.ng>
@@ -254,6 +245,7 @@ F: target/hexagon/gen_idef_parser_funcs.py
HPPA (PA-RISC) TCG CPUs
M: Richard Henderson <richard.henderson@linaro.org>
+M: Helge Deller <deller@gmx.de>
S: Maintained
F: target/hppa/
F: disas/hppa.c
@@ -264,7 +256,7 @@ M: Song Gao <gaosong@loongson.cn>
S: Maintained
F: target/loongarch/
F: tests/tcg/loongarch64/
-F: tests/avocado/machine_loongarch.py
+F: tests/functional/test_loongarch64_virt.py
M68K TCG CPUs
M: Laurent Vivier <laurent@vivier.eu>
@@ -316,11 +308,11 @@ F: configs/devices/ppc*
F: docs/system/ppc/embedded.rst
F: docs/system/target-ppc.rst
F: tests/tcg/ppc*/*
+F: tests/functional/test_ppc_74xx.py
RISC-V TCG CPUs
M: Palmer Dabbelt <palmer@dabbelt.com>
M: Alistair Francis <alistair.francis@wdc.com>
-M: Bin Meng <bmeng.cn@gmail.com>
R: Weiwei Li <liwei1518@gmail.com>
R: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
R: Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
@@ -328,12 +320,17 @@ L: qemu-riscv@nongnu.org
S: Supported
F: configs/targets/riscv*
F: docs/system/target-riscv.rst
+F: docs/specs/riscv-iommu.rst
F: target/riscv/
+F: hw/char/riscv_htif.c
F: hw/riscv/
F: hw/intc/riscv*
+F: include/hw/char/riscv_htif.h
F: include/hw/riscv/
F: linux-user/host/riscv32/
F: linux-user/host/riscv64/
+F: common-user/host/riscv*
+F: tests/functional/test_riscv*
F: tests/tcg/riscv64/
RISC-V XThead* extensions
@@ -355,7 +352,7 @@ F: target/riscv/insn_trans/trans_xventanacondops.c.inc
F: disas/riscv-xventana*
RENESAS RX CPUs
-R: Yoshinori Sato <ysato@users.sourceforge.jp>
+R: Yoshinori Sato <yoshinori.sato@nifty.com>
S: Orphan
F: target/rx/
@@ -371,7 +368,7 @@ F: tests/tcg/s390x/
L: qemu-s390x@nongnu.org
SH4 TCG CPUs
-R: Yoshinori Sato <ysato@users.sourceforge.jp>
+R: Yoshinori Sato <yoshinori.sato@nifty.com>
S: Orphan
F: target/sh4/
F: hw/sh4/
@@ -438,7 +435,7 @@ F: */*/kvm*
F: accel/kvm/
F: accel/stubs/kvm-stub.c
F: include/hw/kvm/
-F: include/sysemu/kvm*.h
+F: include/system/kvm*.h
F: scripts/kvm/kvm_flightrecorder
ARM KVM CPUs
@@ -451,7 +448,7 @@ MIPS KVM CPUs
M: Huacai Chen <chenhuacai@kernel.org>
S: Odd Fixes
F: target/mips/kvm*
-F: target/mips/sysemu/
+F: target/mips/system/
PPC KVM CPUs
M: Nicholas Piggin <npiggin@gmail.com>
@@ -462,6 +459,8 @@ F: target/ppc/kvm.c
S390 KVM CPUs
M: Halil Pasic <pasic@linux.ibm.com>
M: Christian Borntraeger <borntraeger@linux.ibm.com>
+R: Eric Farman <farman@linux.ibm.com>
+R: Matthew Rosato <mjrosato@linux.ibm.com>
S: Supported
F: target/s390x/kvm/
F: target/s390x/machine.c
@@ -480,15 +479,16 @@ F: docs/system/i386/sgx.rst
F: target/i386/kvm/
F: target/i386/sev*
F: scripts/kvm/vmxcap
+F: tests/functional/test_x86_64_hotplug_cpu.py
Xen emulation on X86 KVM CPUs
M: David Woodhouse <dwmw2@infradead.org>
M: Paul Durrant <paul@xen.org>
S: Supported
-F: include/sysemu/kvm_xen.h
+F: include/system/kvm_xen.h
F: target/i386/kvm/xen*
F: hw/i386/kvm/xen*
-F: tests/avocado/kvm_xen_guest.py
+F: tests/functional/test_x86_64_kvm_xen.py
Guest CPU Cores (other accelerators)
------------------------------------
@@ -496,21 +496,29 @@ Overall
M: Richard Henderson <richard.henderson@linaro.org>
R: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
+F: include/exec/cpu*.h
+F: include/exec/target_long.h
F: include/qemu/accel.h
-F: include/sysemu/accel-*.h
-F: include/hw/core/accel-cpu.h
-F: accel/accel-*.c
+F: include/system/accel-*.h
+F: include/system/cpus.h
+F: include/accel/accel-cpu*.h
+F: accel/accel-*.?
F: accel/Makefile.objs
F: accel/stubs/Makefile.objs
+F: cpu-common.c
+F: cpu-target.c
+F: system/cpus.c
Apple Silicon HVF CPUs
M: Alexander Graf <agraf@csgraf.de>
S: Maintained
F: target/arm/hvf/
+F: target/arm/hvf-stub.c
X86 HVF CPUs
M: Cameron Esfahani <dirty@apple.com>
M: Roman Bolshakov <rbolshakov@ddn.com>
+R: Phil Dennis-Jordan <phil@philjordan.eu>
W: https://wiki.qemu.org/Features/HVF
S: Maintained
F: target/i386/hvf/
@@ -518,17 +526,27 @@ F: target/i386/hvf/
HVF
M: Cameron Esfahani <dirty@apple.com>
M: Roman Bolshakov <rbolshakov@ddn.com>
+R: Phil Dennis-Jordan <phil@philjordan.eu>
W: https://wiki.qemu.org/Features/HVF
S: Maintained
F: accel/hvf/
-F: include/sysemu/hvf.h
-F: include/sysemu/hvf_int.h
+F: accel/stubs/hvf-stub.c
+F: include/system/hvf.h
+F: include/system/hvf_int.h
WHPX CPUs
M: Sunil Muthuswamy <sunilmut@microsoft.com>
S: Supported
F: target/i386/whpx/
-F: include/sysemu/whpx.h
+F: include/system/whpx.h
+
+X86 Instruction Emulator
+M: Cameron Esfahani <dirty@apple.com>
+M: Roman Bolshakov <rbolshakov@ddn.com>
+R: Phil Dennis-Jordan <phil@philjordan.eu>
+R: Wei Liu <wei.liu@kernel.org>
+S: Maintained
+F: target/i386/emulate/
Guest CPU Cores (Xen)
---------------------
@@ -554,16 +572,18 @@ F: hw/i386/xen/
F: hw/pci-host/xen_igd_pt.c
F: include/hw/block/dataplane/xen*
F: include/hw/xen/
-F: include/sysemu/xen.h
-F: include/sysemu/xen-mapcache.h
+F: include/system/xen.h
+F: include/system/xen-mapcache.h
F: stubs/xen-hw-stub.c
+F: docs/system/arm/xenpvh.rst
+F: docs/system/i386/xenpvh.rst
Guest CPU Cores (NVMM)
----------------------
NetBSD Virtual Machine Monitor (NVMM) CPU support
M: Reinoud Zandijk <reinoud@netbsd.org>
S: Maintained
-F: include/sysemu/nvmm.h
+F: include/system/nvmm.h
F: target/i386/nvmm/
Hosts
@@ -581,7 +601,7 @@ POSIX
M: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: os-posix.c
-F: include/sysemu/os-posix.h
+F: include/system/os-posix.h
F: util/*posix*.c
F: include/qemu/*posix*.h
@@ -613,6 +633,15 @@ F: .gitlab-ci.d/cirrus/macos-*
F: */*.m
F: scripts/entitlement.sh
+WebAssembly
+M: Kohei Tokunaga <ktokunaga.mail@gmail.com>
+S: Maintained
+F: include/system/os-wasm.h
+F: os-wasm.c
+F: util/coroutine-wasm.c
+F: configs/meson/emscripten.txt
+F: tests/docker/dockerfiles/emsdk-wasm32-cross.docker
+
Alpha Machines
--------------
M: Richard Henderson <richard.henderson@linaro.org>
@@ -620,6 +649,7 @@ S: Maintained
F: hw/alpha/
F: hw/isa/smc37c669-superio.c
F: tests/tcg/alpha/system/
+F: tests/functional/test_alpha_clipper.py
ARM Machines
------------
@@ -635,6 +665,7 @@ F: include/hw/*/allwinner*
F: hw/arm/cubieboard.c
F: docs/system/arm/cubieboard.rst
F: hw/misc/axp209.c
+F: tests/functional/test_arm_cubieboard.py
Allwinner-h3
M: Niek Linnenbank <nieklinnenbank@gmail.com>
@@ -644,6 +675,7 @@ F: hw/*/allwinner-h3*
F: include/hw/*/allwinner-h3*
F: hw/arm/orangepi.c
F: docs/system/arm/orangepi.rst
+F: tests/functional/test_arm_orangepi.py
ARM PrimeCell and CMSDK devices
M: Peter Maydell <peter.maydell@linaro.org>
@@ -705,6 +737,24 @@ F: include/hw/timer/armv7m_systick.h
F: include/hw/misc/armv7m_ras.h
F: tests/qtest/test-arm-mptimer.c
+Bananapi M2U
+M: Peter Maydell <peter.maydell@linaro.org>
+L: qemu-arm@nongnu.org
+S: Odd Fixes
+F: docs/system/arm/bananapi_m2u.rst
+F: hw/*/allwinner-r40*.c
+F: hw/arm/bananapi_m2u.c
+F: include/hw/*/allwinner-r40*.h
+F: tests/functional/test_arm_bpim2u.py
+
+B-L475E-IOT01A IoT Node
+M: Samuel Tardieu <sam@rfc1149.net>
+L: qemu-arm@nongnu.org
+S: Maintained
+F: hw/arm/b-l475e-iot01a.c
+F: hw/display/dm163.c
+F: tests/qtest/dm163-test.c
+
Exynos
M: Igor Mitsyanko <i.mitsyanko@gmail.com>
M: Peter Maydell <peter.maydell@linaro.org>
@@ -712,6 +762,8 @@ L: qemu-arm@nongnu.org
S: Odd Fixes
F: hw/*/exynos*
F: include/hw/*/exynos*
+F: docs/system/arm/exynos.rst
+F: tests/functional/test_arm_smdkc210.py
Calxeda Highbank
M: Rob Herring <robh@kernel.org>
@@ -730,7 +782,7 @@ S: Odd Fixes
F: include/hw/arm/digic.h
F: hw/*/digic*
F: include/hw/*/digic*
-F: tests/avocado/machine_arm_canona1100.py
+F: tests/functional/test_arm_canona1100.py
F: docs/system/arm/digic.rst
Goldfish RTC
@@ -741,14 +793,6 @@ S: Maintained
F: hw/rtc/goldfish_rtc.c
F: include/hw/rtc/goldfish_rtc.h
-Gumstix
-M: Peter Maydell <peter.maydell@linaro.org>
-R: Philippe Mathieu-DaudƩ <philmd@linaro.org>
-L: qemu-arm@nongnu.org
-S: Odd Fixes
-F: hw/arm/gumstix.c
-F: docs/system/arm/gumstix.rst
-
i.MX25 PDK
M: Peter Maydell <peter.maydell@linaro.org>
R: Jean-Christophe Dubois <jcd@tribudubois.net>
@@ -777,11 +821,11 @@ F: docs/system/arm/kzm.rst
Integrator CP
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
-S: Maintained
+S: Odd Fixes
F: hw/arm/integratorcp.c
F: hw/misc/arm_integrator_debug.c
F: include/hw/misc/arm_integrator_debug.h
-F: tests/avocado/machine_arm_integratorcp.py
+F: tests/functional/test_arm_integratorcp.py
F: docs/system/arm/integratorcp.rst
MCIMX6UL EVK / i.MX6ul
@@ -794,6 +838,7 @@ F: hw/arm/fsl-imx6ul.c
F: hw/misc/imx6ul_ccm.c
F: include/hw/arm/fsl-imx6ul.h
F: include/hw/misc/imx6ul_ccm.h
+F: docs/system/arm/mcimx6ul-evk.rst
MCIMX7D SABRE / i.MX7
M: Peter Maydell <peter.maydell@linaro.org>
@@ -807,6 +852,23 @@ F: include/hw/arm/fsl-imx7.h
F: include/hw/misc/imx7_*.h
F: hw/pci-host/designware.c
F: include/hw/pci-host/designware.h
+F: docs/system/arm/mcimx7d-sabre.rst
+
+MCIMX8MP-EVK / i.MX8MP
+M: Bernhard Beschow <shentey@gmail.com>
+L: qemu-arm@nongnu.org
+S: Maintained
+F: hw/arm/imx8mp-evk.c
+F: hw/arm/fsl-imx8mp.c
+F: hw/misc/imx8mp_*.c
+F: hw/pci-host/fsl_imx8m_phy.c
+F: hw/rtc/rs5c372.c
+F: include/hw/arm/fsl-imx8mp.h
+F: include/hw/misc/imx8mp_*.h
+F: include/hw/pci-host/fsl_imx8m_phy.h
+F: docs/system/arm/imx8mp-evk.rst
+F: tests/functional/test_aarch64_imx8mp_evk.py
+F: tests/qtest/rs5c372-test.c
MPS2 / MPS3
M: Peter Maydell <peter.maydell@linaro.org>
@@ -841,7 +903,7 @@ F: docs/system/arm/mps2.rst
Musca
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
-S: Maintained
+S: Odd Fixes
F: hw/arm/musca.c
F: docs/system/arm/musca.rst
@@ -866,34 +928,10 @@ F: include/hw/*/npcm*
F: tests/qtest/npcm*
F: tests/qtest/adm1266-test.c
F: pc-bios/npcm7xx_bootrom.bin
+F: pc-bios/npcm8xx_bootrom.bin
F: roms/vbootrom
F: docs/system/arm/nuvoton.rst
-
-nSeries
-M: Peter Maydell <peter.maydell@linaro.org>
-L: qemu-arm@nongnu.org
-S: Odd Fixes
-F: hw/arm/nseries.c
-F: hw/display/blizzard.c
-F: hw/input/lm832x.c
-F: hw/input/tsc2005.c
-F: hw/misc/cbus.c
-F: hw/rtc/twl92230.c
-F: include/hw/display/blizzard.h
-F: include/hw/input/lm832x.h
-F: include/hw/input/tsc2xxx.h
-F: include/hw/misc/cbus.h
-F: tests/avocado/machine_arm_n8x0.py
-F: docs/system/arm/nseries.rst
-
-Palm
-M: Peter Maydell <peter.maydell@linaro.org>
-L: qemu-arm@nongnu.org
-S: Odd Fixes
-F: hw/arm/palm.c
-F: hw/input/tsc210x.c
-F: include/hw/input/tsc2xxx.h
-F: docs/system/arm/palm.rst
+F: tests/functional/test_arm_quanta_gsj.py
Raspberry Pi
M: Peter Maydell <peter.maydell@linaro.org>
@@ -906,38 +944,20 @@ F: hw/*/bcm283*
F: include/hw/arm/rasp*
F: include/hw/*/bcm283*
F: docs/system/arm/raspi.rst
+F: tests/functional/test_arm_raspi2.py
+F: tests/functional/test_aarch64_raspi3.py
+F: tests/functional/test_aarch64_raspi4.py
Real View
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
-S: Maintained
+S: Odd Fixes
F: hw/arm/realview*
F: hw/cpu/realview_mpcore.c
F: hw/intc/realview_gic.c
F: include/hw/intc/realview_gic.h
F: docs/system/arm/realview.rst
-
-PXA2XX
-M: Peter Maydell <peter.maydell@linaro.org>
-L: qemu-arm@nongnu.org
-S: Odd Fixes
-F: hw/arm/mainstone.c
-F: hw/arm/spitz.c
-F: hw/arm/tosa.c
-F: hw/arm/z2.c
-F: hw/*/pxa2xx*
-F: hw/display/tc6393xb.c
-F: hw/gpio/max7310.c
-F: hw/gpio/zaurus.c
-F: hw/input/ads7846.c
-F: hw/misc/mst_fpga.c
-F: hw/adc/max111x.c
-F: include/hw/adc/max111x.h
-F: include/hw/arm/pxa.h
-F: include/hw/arm/sharpsl.h
-F: include/hw/display/tc6393xb.h
-F: docs/system/arm/xscale.rst
-F: docs/system/arm/mainstone.rst
+F: tests/functional/test_arm_realview.py
SABRELITE / i.MX6
M: Peter Maydell <peter.maydell@linaro.org>
@@ -958,8 +978,7 @@ F: include/hw/ssi/imx_spi.h
SBSA-REF
M: Radoslaw Biernacki <rad@semihalf.com>
M: Peter Maydell <peter.maydell@linaro.org>
-R: Leif Lindholm <quic_llindhol@quicinc.com>
-R: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
+R: Leif Lindholm <leif.lindholm@oss.qualcomm.com>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/sbsa-ref.c
@@ -967,7 +986,7 @@ F: hw/misc/sbsa_ec.c
F: hw/watchdog/sbsa_gwdt.c
F: include/hw/watchdog/sbsa_gwdt.h
F: docs/system/arm/sbsa.rst
-F: tests/avocado/machine_aarch64_sbsaref.py
+F: tests/functional/test_aarch64_*sbsaref*.py
Sharp SL-5500 (Collie) PDA
M: Peter Maydell <peter.maydell@linaro.org>
@@ -975,17 +994,34 @@ L: qemu-arm@nongnu.org
S: Odd Fixes
F: hw/arm/collie.c
F: hw/arm/strongarm*
+F: hw/gpio/zaurus.c
+F: include/hw/arm/sharpsl.h
F: docs/system/arm/collie.rst
+F: tests/functional/test_arm_collie.py
Stellaris
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
-S: Maintained
+S: Odd Fixes
F: hw/*/stellaris*
F: hw/display/ssd03*
F: include/hw/input/gamepad.h
F: include/hw/timer/stellaris-gptm.h
F: docs/system/arm/stellaris.rst
+F: tests/functional/test_arm_stellaris.py
+
+STM32L4x5 SoC Family
+M: Samuel Tardieu <sam@rfc1149.net>
+L: qemu-arm@nongnu.org
+S: Maintained
+F: hw/arm/stm32l4x5_soc.c
+F: hw/char/stm32l4x5_usart.c
+F: hw/misc/stm32l4x5_exti.c
+F: hw/misc/stm32l4x5_syscfg.c
+F: hw/misc/stm32l4x5_rcc.c
+F: hw/gpio/stm32l4x5_gpio.c
+F: include/hw/*/stm32l4x5_*.h
+F: tests/qtest/stm32l4x5*
STM32VLDISCOVERY
M: Alexandre Iooss <erdnaxe@crans.org>
@@ -997,15 +1033,16 @@ F: docs/system/arm/stm32.rst
Versatile Express
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
-S: Maintained
+S: Odd Fixes
F: hw/arm/vexpress.c
F: hw/display/sii9022.c
F: docs/system/arm/vexpress.rst
+F: tests/functional/test_arm_vexpress.py
Versatile PB
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
-S: Maintained
+S: Odd Fixes
F: hw/*/versatile*
F: hw/i2c/arm_sbcon_i2c.c
F: include/hw/i2c/arm_sbcon_i2c.h
@@ -1019,7 +1056,10 @@ S: Maintained
F: hw/arm/virt*
F: include/hw/arm/virt.h
F: docs/system/arm/virt.rst
-F: tests/avocado/machine_aarch64_virt.py
+F: tests/functional/test_aarch64_*virt*.py
+F: tests/functional/test_aarch64_tuxrun.py
+F: tests/functional/test_arm_tuxrun.py
+F: tests/functional/test_arm_virt.py
Xilinx Zynq
M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
@@ -1048,9 +1088,11 @@ F: include/hw/ssi/xilinx_spips.h
F: hw/display/dpcd.c
F: include/hw/display/dpcd.h
F: docs/system/arm/xlnx-versal-virt.rst
+F: docs/system/arm/xlnx-zcu102.rst
+F: tests/functional/test_aarch64_xlnx_versal.py
Xilinx Versal OSPI
-M: Francisco Iglesias <francisco.iglesias@xilinx.com>
+M: Francisco Iglesias <francisco.iglesias@amd.com>
S: Maintained
F: hw/ssi/xlnx-versal-ospi.c
F: include/hw/ssi/xlnx-versal-ospi.h
@@ -1092,6 +1134,8 @@ S: Maintained
F: hw/arm/stm32f405_soc.c
F: hw/misc/stm32f4xx_syscfg.c
F: hw/misc/stm32f4xx_exti.c
+F: hw/misc/stm32_rcc.c
+F: include/hw/misc/stm32_rcc.h
Netduino 2
M: Alistair Francis <alistair@alistair23.me>
@@ -1113,26 +1157,6 @@ L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/olimex-stm32-h405.c
-STM32L4x5 SoC Family
-M: Arnaud Minier <arnaud.minier@telecom-paris.fr>
-M: InĆØs Varhol <ines.varhol@telecom-paris.fr>
-L: qemu-arm@nongnu.org
-S: Maintained
-F: hw/arm/stm32l4x5_soc.c
-F: hw/char/stm32l4x5_usart.c
-F: hw/misc/stm32l4x5_exti.c
-F: hw/misc/stm32l4x5_syscfg.c
-F: hw/misc/stm32l4x5_rcc.c
-F: hw/gpio/stm32l4x5_gpio.c
-F: include/hw/*/stm32l4x5_*.h
-
-B-L475E-IOT01A IoT Node
-M: Arnaud Minier <arnaud.minier@telecom-paris.fr>
-M: InĆØs Varhol <ines.varhol@telecom-paris.fr>
-L: qemu-arm@nongnu.org
-S: Maintained
-F: hw/arm/b-l475e-iot01a.c
-
SmartFusion2
M: Subbaraya Sundeep <sundeep.lkml@gmail.com>
M: Peter Maydell <peter.maydell@linaro.org>
@@ -1156,6 +1180,7 @@ L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/msf2-som.c
F: docs/system/arm/emcraft-sf2.rst
+F: tests/functional/test_arm_emcraft_sf2.py
ASPEED BMCs
M: CƩdric Le Goater <clg@kaod.org>
@@ -1172,8 +1197,11 @@ F: include/hw/*/*aspeed*
F: hw/net/ftgmac100.c
F: include/hw/net/ftgmac100.h
F: docs/system/arm/aspeed.rst
+F: docs/system/arm/fby35.rst
F: tests/*/*aspeed*
+F: tests/*/*ast2700*
F: hw/arm/fby35.c
+F: pc-bios/ast27x0_bootrom.bin
NRF51
M: Joel Stanley <joel@jms.id.au>
@@ -1185,8 +1213,14 @@ F: hw/*/microbit*.c
F: include/hw/*/nrf51*.h
F: include/hw/*/microbit*.h
F: tests/qtest/microbit-test.c
+F: tests/functional/test_arm_microbit.py
F: docs/system/arm/nrf.rst
+ARM PL011 Rust device
+M: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+S: Maintained
+F: rust/hw/char/pl011/
+
AVR Machines
-------------
@@ -1206,22 +1240,16 @@ Arduino
M: Philippe Mathieu-DaudƩ <philmd@linaro.org>
S: Maintained
F: hw/avr/arduino.c
-
-CRIS Machines
--------------
-Axis Dev88
-M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
-S: Maintained
-F: hw/cris/axis_dev88.c
-F: hw/*/etraxfs_*.c
+F: tests/functional/test_avr_uno.py
HP-PARISC Machines
------------------
HP B160L, HP C3700
M: Richard Henderson <richard.henderson@linaro.org>
-R: Helge Deller <deller@gmx.de>
-S: Odd Fixes
+M: Helge Deller <deller@gmx.de>
+S: Maintained
F: configs/devices/hppa-softmmu/default.mak
+F: hw/char/diva-gsp.c
F: hw/display/artist.c
F: hw/hppa/
F: hw/input/lasips2.c
@@ -1236,11 +1264,13 @@ F: include/hw/pci-host/astro.h
F: include/hw/pci-host/dino.h
F: pc-bios/hppa-firmware.img
F: roms/seabios-hppa/
+F: tests/functional/test_hppa_seabios.py
LoongArch Machines
------------------
Virt
M: Song Gao <gaosong@loongson.cn>
+M: Bibo Mao <maobibo@loongson.cn>
R: Jiaxun Yang <jiaxun.yang@flygoat.com>
S: Maintained
F: docs/system/loongarch/virt.rst
@@ -1249,9 +1279,9 @@ F: configs/devices/loongarch64-softmmu/default.mak
F: hw/loongarch/
F: include/hw/loongarch/virt.h
F: include/hw/intc/loongarch_*.h
-F: include/hw/intc/loongson_ipi.h
+F: include/hw/intc/loongson_ipi_common.h
F: hw/intc/loongarch_*.c
-F: hw/intc/loongson_ipi.c
+F: hw/intc/loongson_ipi_common.c
F: include/hw/pci-host/ls7a.h
F: hw/rtc/ls7a_rtc.c
F: gdb-xml/loongarch*.xml
@@ -1272,6 +1302,7 @@ F: hw/m68k/mcf_intc.c
F: hw/char/mcf_uart.c
F: hw/net/mcf_fec.c
F: include/hw/m68k/mcf*.h
+F: tests/functional/test_m68k_mcf5208evb.py
NeXTcube
M: Thomas Huth <huth@tuxfamily.org>
@@ -1279,6 +1310,7 @@ S: Odd Fixes
F: hw/m68k/next-*.c
F: hw/display/next-fb.c
F: include/hw/m68k/next-cube.h
+F: tests/functional/test_m68k_nextcube.py
q800
M: Laurent Vivier <laurent@vivier.eu>
@@ -1304,6 +1336,7 @@ F: include/hw/m68k/q800-glue.h
F: include/hw/misc/djmemc.h
F: include/hw/misc/iosb.h
F: include/hw/audio/asc.h
+F: tests/functional/test_m68k_q800.py
virt
M: Laurent Vivier <laurent@vivier.eu>
@@ -1318,6 +1351,7 @@ F: include/hw/intc/goldfish_pic.h
F: include/hw/intc/m68k_irqc.h
F: include/hw/misc/virt_ctrl.h
F: docs/specs/virt-ctlr.rst
+F: tests/functional/test_m68k_tuxrun.py
MicroBlaze Machines
-------------------
@@ -1326,7 +1360,7 @@ M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
S: Maintained
F: hw/microblaze/petalogix_s3adsp1800_mmu.c
F: include/hw/char/xilinx_uartlite.h
-F: tests/avocado/machine_microblaze.py
+F: tests/functional/test_microblaze*.py
petalogix_ml605
M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
@@ -1362,8 +1396,8 @@ F: hw/acpi/piix4.c
F: hw/mips/malta.c
F: hw/pci-host/gt64120.c
F: include/hw/southbridge/piix.h
-F: tests/avocado/linux_ssh_mips_malta.py
-F: tests/avocado/machine_mips_malta.py
+F: tests/functional/test_mips*_malta.py
+F: tests/functional/test_mips*_tuxrun.py
Mipssim
R: Aleksandar Rikalo <arikalo@gmail.com>
@@ -1379,20 +1413,22 @@ S: Odd Fixes
F: hw/mips/fuloong2e.c
F: hw/pci-host/bonito.c
F: include/hw/pci-host/bonito.h
-F: tests/avocado/machine_mips_fuloong2e.py
+F: tests/functional/test_mips64el_fuloong2e.py
Loongson-3 virtual platforms
M: Huacai Chen <chenhuacai@kernel.org>
R: Jiaxun Yang <jiaxun.yang@flygoat.com>
S: Maintained
+F: hw/intc/loongson_ipi_common.c
F: hw/intc/loongson_ipi.c
F: hw/intc/loongson_liointc.c
F: hw/mips/loongson3_bootp.c
F: hw/mips/loongson3_bootp.h
F: hw/mips/loongson3_virt.c
+F: include/hw/intc/loongson_ipi_common.h
F: include/hw/intc/loongson_ipi.h
F: include/hw/intc/loongson_liointc.h
-F: tests/avocado/machine_mips_loongson3v.py
+F: tests/functional/test_mips64el_loongson3v.py
Boston
M: Paul Burton <paulburton@kernel.org>
@@ -1411,25 +1447,21 @@ S: Maintained
F: docs/system/openrisc/or1k-sim.rst
F: hw/intc/ompic.c
F: hw/openrisc/openrisc_sim.c
+F: tests/functional/test_or1k_sim.py
PowerPC Machines
----------------
-405 (ref405ep)
-L: qemu-ppc@nongnu.org
-S: Orphan
-F: hw/ppc/ppc405*
-F: tests/avocado/ppc_405.py
-
Bamboo
L: qemu-ppc@nongnu.org
S: Orphan
F: hw/ppc/ppc440_bamboo.c
F: hw/pci-host/ppc4xx_pci.c
-F: tests/avocado/ppc_bamboo.py
+F: tests/functional/test_ppc_bamboo.py
e500
+M: Bernhard Beschow <shentey@gmail.com>
L: qemu-ppc@nongnu.org
-S: Orphan
+S: Odd Fixes
F: hw/ppc/e500*
F: hw/ppc/ppce500_spin.c
F: hw/gpio/mpc8xxx.c
@@ -1442,13 +1474,16 @@ F: pc-bios/u-boot.e500
F: hw/intc/openpic_kvm.c
F: include/hw/ppc/openpic_kvm.h
F: docs/system/ppc/ppce500.rst
+F: tests/functional/test_ppc64_e500.py
+F: tests/functional/test_ppc_tuxrun.py
mpc8544ds
+M: Bernhard Beschow <shentey@gmail.com>
L: qemu-ppc@nongnu.org
-S: Orphan
+S: Odd Fixes
F: hw/ppc/mpc8544ds.c
F: hw/ppc/mpc8544_guts.c
-F: tests/avocado/ppc_mpc8544ds.py
+F: tests/functional/test_ppc_mpc8544ds.py
New World (mac99)
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
@@ -1470,6 +1505,8 @@ F: include/hw/ppc/mac_dbdma.h
F: include/hw/pci-host/uninorth.h
F: include/hw/input/adb*
F: pc-bios/qemu_vga.ndrv
+F: tests/functional/test_ppc_mac.py
+F: tests/functional/test_ppc64_mac99.py
Old World (g3beige)
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
@@ -1485,6 +1522,7 @@ F: include/hw/intc/heathrow_pic.h
F: include/hw/input/adb*
F: include/hw/pci-host/grackle.h
F: pc-bios/qemu_vga.ndrv
+F: tests/functional/test_ppc_mac.py
PReP
M: HervƩ Poussineau <hpoussin@reactos.org>
@@ -1501,12 +1539,11 @@ F: hw/dma/i82374.c
F: hw/rtc/m48t59-isa.c
F: include/hw/isa/pc87312.h
F: include/hw/rtc/m48t59.h
-F: tests/avocado/ppc_prep_40p.py
+F: tests/functional/test_ppc_40p.py
sPAPR (pseries)
M: Nicholas Piggin <npiggin@gmail.com>
R: Daniel Henrique Barboza <danielhb413@gmail.com>
-R: David Gibson <david@gibson.dropbear.id.au>
R: Harsh Prateek Bora <harshpb@linux.ibm.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
@@ -1525,11 +1562,11 @@ F: tests/qtest/spapr*
F: tests/qtest/libqos/*spapr*
F: tests/qtest/rtas*
F: tests/qtest/libqos/rtas*
-F: tests/avocado/ppc_pseries.py
-F: tests/avocado/ppc_hv_tests.py
+F: tests/functional/test_ppc64_pseries.py
+F: tests/functional/test_ppc64_hv.py
+F: tests/functional/test_ppc64_tuxrun.py
PowerNV (Non-Virtualized)
-M: CƩdric Le Goater <clg@kaod.org>
M: Nicholas Piggin <npiggin@gmail.com>
R: FrƩdƩric Barrat <fbarrat@linux.ibm.com>
L: qemu-ppc@nongnu.org
@@ -1539,10 +1576,14 @@ F: hw/ppc/pnv*
F: hw/intc/pnv*
F: hw/intc/xics_pnv.c
F: hw/pci-host/pnv*
+F: hw/ssi/pnv_spi.c
F: include/hw/ppc/pnv*
F: include/hw/pci-host/pnv*
+F: include/hw/ssi/pnv_spi*
F: pc-bios/skiboot.lid
+F: pc-bios/pnv-pnor.bin
F: tests/qtest/pnv*
+F: tests/functional/test_ppc64_powernv.py
pca955x
M: Glenn Miles <milesg@linux.ibm.com>
@@ -1557,7 +1598,7 @@ M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/ppc/virtex_ml507.c
-F: tests/avocado/ppc_virtex_ml507.py
+F: tests/functional/test_ppc_virtex_ml507.py
sam460ex
M: BALATON Zoltan <balaton@eik.bme.hu>
@@ -1569,10 +1610,11 @@ F: hw/pci-host/ppc440_pcix.c
F: hw/display/sm501*
F: hw/ide/sii3112.c
F: hw/rtc/m41t80.c
-F: pc-bios/canyonlands.dt[sb]
+F: pc-bios/dtb/canyonlands.dt[sb]
F: pc-bios/u-boot-sam460ex-20100605.bin
F: roms/u-boot-sam460ex
F: docs/system/ppc/amigang.rst
+F: tests/functional/test_ppc_sam460ex.py
pegasos2
M: BALATON Zoltan <balaton@eik.bme.hu>
@@ -1590,10 +1632,10 @@ S: Maintained
F: hw/ppc/amigaone.c
F: hw/pci-host/articia.c
F: include/hw/pci-host/articia.h
+F: tests/functional/test_ppc_amiga.py
Virtual Open Firmware (VOF)
M: Alexey Kardashevskiy <aik@ozlabs.ru>
-R: David Gibson <david@gibson.dropbear.id.au>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/ppc/spapr_vof*
@@ -1614,7 +1656,6 @@ F: include/hw/riscv/opentitan.h
F: include/hw/*/ibex_*.h
Microchip PolarFire SoC Icicle Kit
-M: Bin Meng <bmeng.cn@gmail.com>
L: qemu-riscv@nongnu.org
S: Supported
F: docs/system/riscv/microchip-icicle-kit.rst
@@ -1641,7 +1682,6 @@ F: include/hw/char/shakti_uart.h
SiFive Machines
M: Alistair Francis <Alistair.Francis@wdc.com>
-M: Bin Meng <bmeng.cn@gmail.com>
M: Palmer Dabbelt <palmer@dabbelt.com>
L: qemu-riscv@nongnu.org
S: Supported
@@ -1649,19 +1689,25 @@ F: docs/system/riscv/sifive_u.rst
F: hw/*/*sifive*.c
F: include/hw/*/*sifive*.h
+AMD Microblaze-V Generic Board
+M: Sai Pavan Boddu <sai.pavan.boddu@amd.com>
+S: Maintained
+F: hw/riscv/microblaze-v-generic.c
+F: docs/system/riscv/microblaze-v-generic.rst
+
RX Machines
-----------
rx-gdbsim
-R: Yoshinori Sato <ysato@users.sourceforge.jp>
+R: Yoshinori Sato <yoshinori.sato@nifty.com>
S: Orphan
F: docs/system/target-rx.rst
F: hw/rx/rx-gdbsim.c
-F: tests/avocado/machine_rx_gdbsim.py
+F: tests/functional/test_rx_gdbsim.py
SH4 Machines
------------
R2D
-R: Yoshinori Sato <ysato@users.sourceforge.jp>
+R: Yoshinori Sato <yoshinori.sato@nifty.com>
R: Magnus Damm <magnus.damm@gmail.com>
S: Odd Fixes
F: hw/char/sh_serial.c
@@ -1671,17 +1717,8 @@ F: hw/pci-host/sh_pci.c
F: hw/timer/sh_timer.c
F: include/hw/sh4/sh_intc.h
F: include/hw/timer/tmu012.h
-
-Shix
-R: Yoshinori Sato <ysato@users.sourceforge.jp>
-R: Magnus Damm <magnus.damm@gmail.com>
-S: Odd Fixes
-F: hw/block/tc58128.c
-F: hw/char/sh_serial.c
-F: hw/sh4/shix.c
-F: hw/intc/sh_intc.c
-F: hw/timer/sh_timer.c
-F: include/hw/sh4/sh_intc.h
+F: tests/functional/test_sh4*_r2d.py
+F: tests/functional/test_sh4_tuxrun.py
SPARC Machines
--------------
@@ -1699,6 +1736,7 @@ F: include/hw/nvram/sun_nvram.h
F: include/hw/sparc/sparc32_dma.h
F: include/hw/sparc/sun4m_iommu.h
F: pc-bios/openbios-sparc32
+F: tests/functional/test_sparc_sun4m.py
Sun4u
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
@@ -1711,7 +1749,8 @@ F: include/hw/pci-host/sabre.h
F: hw/pci-bridge/simba.c
F: include/hw/pci-bridge/simba.h
F: pc-bios/openbios-sparc64
-F: tests/avocado/machine_sparc64_sun4u.py
+F: tests/functional/test_sparc64_sun4u.py
+F: tests/functional/test_sparc64_tuxrun.py
Sun4v
M: Artyom Tarasenko <atar4qemu@gmail.com>
@@ -1727,7 +1766,6 @@ S: Maintained
F: hw/sparc/leon3.c
F: hw/*/grlib*
F: include/hw/*/grlib*
-F: tests/avocado/machine_sparc_leon3.py
S390 Machines
-------------
@@ -1735,17 +1773,20 @@ S390 Virtio-ccw
M: Halil Pasic <pasic@linux.ibm.com>
M: Christian Borntraeger <borntraeger@linux.ibm.com>
M: Eric Farman <farman@linux.ibm.com>
+R: Matthew Rosato <mjrosato@linux.ibm.com>
S: Supported
F: hw/s390x/
F: include/hw/s390x/
F: configs/devices/s390x-softmmu/default.mak
-F: tests/avocado/machine_s390_ccw_virtio.py
+F: tests/functional/test_s390x_*
T: git https://github.com/borntraeger/qemu.git s390-next
L: qemu-s390x@nongnu.org
S390-ccw boot
M: Christian Borntraeger <borntraeger@linux.ibm.com>
M: Thomas Huth <thuth@redhat.com>
+R: Jared Rossi <jrossi@linux.ibm.com>
+R: Zhuoying Cai <zycai@linux.ibm.com>
S: Supported
F: hw/s390x/ipl.*
F: pc-bios/s390-ccw/
@@ -1766,6 +1807,7 @@ S390 channel subsystem
M: Halil Pasic <pasic@linux.ibm.com>
M: Christian Borntraeger <borntraeger@linux.ibm.com>
M: Eric Farman <farman@linux.ibm.com>
+R: Farhan Ali <alifm@linux.ibm.com>
S: Supported
F: hw/s390x/ccw-device.[ch]
F: hw/s390x/css.c
@@ -1786,6 +1828,7 @@ L: qemu-s390x@nongnu.org
S390 SCLP-backed devices
M: Halil Pasic <pasic@linux.ibm.com>
M: Christian Borntraeger <borntraeger@linux.ibm.com>
+R: Jason Herne <jjherne@linux.ibm.com>
S: Supported
F: include/hw/s390x/event-facility.h
F: include/hw/s390x/sclp.h
@@ -1802,7 +1845,7 @@ F: hw/s390x/cpu-topology.c
F: target/s390x/kvm/stsi-topology.c
F: docs/devel/s390-cpu-topology.rst
F: docs/system/s390x/cpu-topology.rst
-F: tests/avocado/s390_topology.py
+F: tests/functional/test_s390x_topology.py
X86 Machines
------------
@@ -1830,6 +1873,12 @@ F: hw/isa/apm.c
F: include/hw/isa/apm.h
F: tests/unit/test-x86-topo.c
F: tests/qtest/test-x86-cpuid-compat.c
+F: tests/functional/test_i386_tuxrun.py
+F: tests/functional/test_linux_initrd.py
+F: tests/functional/test_mem_addr_space.py
+F: tests/functional/test_pc_cpu_hotplug_props.py
+F: tests/functional/test_x86_64_tuxrun.py
+F: tests/functional/test_x86_cpu_model_versions.py
PC Chipset
M: Michael S. Tsirkin <mst@redhat.com>
@@ -1874,14 +1923,25 @@ F: hw/i386/microvm.c
F: include/hw/i386/microvm.h
F: pc-bios/bios-microvm.bin
+nitro-enclave
+M: Alexander Graf <graf@amazon.com>
+M: Dorjoy Chowdhury <dorjoychy111@gmail.com>
+S: Maintained
+F: hw/core/eif.c
+F: hw/core/eif.h
+F: hw/i386/nitro_enclave.c
+F: include/hw/i386/nitro_enclave.h
+F: docs/system/i386/nitro-enclave.rst
+
Machine core
M: Eduardo Habkost <eduardo@habkost.net>
M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
R: Philippe Mathieu-DaudƩ <philmd@linaro.org>
R: Yanan Wang <wangyanan55@huawei.com>
+R: Zhao Liu <zhao1.liu@intel.com>
S: Supported
F: hw/core/cpu-common.c
-F: hw/core/cpu-sysemu.c
+F: hw/core/cpu-system.c
F: hw/core/machine-qmp-cmds.c
F: hw/core/machine.c
F: hw/core/machine-smp.c
@@ -1890,14 +1950,22 @@ F: hw/core/numa.c
F: hw/cpu/cluster.c
F: qapi/machine.json
F: qapi/machine-common.json
-F: qapi/machine-target.json
F: include/hw/boards.h
F: include/hw/core/cpu.h
F: include/hw/cpu/cluster.h
-F: include/sysemu/numa.h
+F: include/system/numa.h
+F: tests/functional/test_cpu_queries.py
+F: tests/functional/test_empty_cpu_model.py
F: tests/unit/test-smp-parse.c
T: git https://gitlab.com/ehabkost/qemu.git machine-next
+TargetInfo API
+M: Pierrick Bouvier <pierrick.bouvier@linaro.org>
+M: Philippe Mathieu-DaudƩ <philmd@linaro.org>
+S: Supported
+F: include/qemu/target-info*.h
+F: target-info*.c
+
Xtensa Machines
---------------
sim
@@ -1916,6 +1984,7 @@ S: Maintained
F: hw/xtensa/xtfpga.c
F: hw/net/opencores_eth.c
F: include/hw/xtensa/mx_pic.h
+F: tests/functional/test_xtensa_lx60.py
Devices
-------
@@ -1930,8 +1999,8 @@ F: tests/qtest/intel-hda-test.c
F: tests/qtest/fuzz-sb16-test.c
Xilinx CAN
-M: Vikram Garhwal <vikram.garhwal@amd.com>
M: Francisco Iglesias <francisco.iglesias@amd.com>
+M: Vikram Garhwal <vikram.garhwal@bytedance.com>
S: Maintained
F: hw/net/can/xlnx-*
F: include/hw/net/xlnx-*
@@ -1988,10 +2057,11 @@ F: include/hw/hyperv/vmbus*.h
OMAP
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
-S: Maintained
+S: Odd Fixes
F: hw/*/omap*
F: include/hw/arm/omap.h
F: docs/system/arm/sx1.rst
+F: tests/functional/test_arm_sx1.py
IPack
M: Alberto Garcia <berto@igalia.com>
@@ -2010,6 +2080,7 @@ F: hw/pci-bridge/*
F: qapi/pci.json
F: docs/pci*
F: docs/specs/*pci*
+F: docs/system/sriov.rst
PCIE DOE
M: Huai-Cheng Kuo <hchkuo@avery-design.com.tw>
@@ -2057,13 +2128,13 @@ S: Supported
F: hw/acpi/viot.c
F: hw/acpi/viot.h
-ACPI/AVOCADO/BIOSBITS
+ACPI/FUNCTIONAL/BIOSBITS
M: Ani Sinha <anisinha@redhat.com>
M: Michael S. Tsirkin <mst@redhat.com>
S: Supported
-F: tests/avocado/acpi-bits/*
-F: tests/avocado/acpi-bits.py
-F: docs/devel/acpi-bits.rst
+F: tests/functional/acpi-bits/*
+F: tests/functional/test_acpi_bits.py
+F: docs/devel/testing/acpi-bits.rst
ACPI/HEST/GHES
R: Dongjiu Geng <gengdongjiu1@gmail.com>
@@ -2099,6 +2170,7 @@ S: Odd Fixes
F: hw/net/
F: include/hw/net/
F: tests/qtest/virtio-net-test.c
+F: tests/functional/test_info_usernet.py
F: docs/virtio-net-failover.rst
T: git https://github.com/jasowang/qemu.git net
@@ -2166,11 +2238,19 @@ M: Alex Williamson <alex.williamson@redhat.com>
M: CƩdric Le Goater <clg@redhat.com>
S: Supported
F: hw/vfio/*
+F: util/vfio-helpers.c
F: include/hw/vfio/
-F: docs/igd-assign.txt
F: docs/devel/migration/vfio.rst
F: qapi/vfio.json
+vfio-igd
+M: Alex Williamson <alex.williamson@redhat.com>
+M: CƩdric Le Goater <clg@redhat.com>
+M: Tomita Moeko <tomitamoeko@gmail.com>
+S: Supported
+F: hw/vfio/igd.c
+F: docs/igd-assign.txt
+
vfio-ccw
M: Eric Farman <farman@linux.ibm.com>
M: Matthew Rosato <mjrosato@linux.ibm.com>
@@ -2200,23 +2280,28 @@ M: Eric Auger <eric.auger@redhat.com>
M: Zhenzhong Duan <zhenzhong.duan@intel.com>
S: Supported
F: backends/iommufd.c
-F: include/sysemu/iommufd.h
+F: include/system/iommufd.h
F: backends/host_iommu_device.c
-F: include/sysemu/host_iommu_device.h
+F: include/system/host_iommu_device.h
F: include/qemu/chardev_open.h
F: util/chardev_open.c
F: docs/devel/vfio-iommufd.rst
vhost
M: Michael S. Tsirkin <mst@redhat.com>
+R: Stefano Garzarella <sgarzare@redhat.com>
S: Supported
F: hw/*/*vhost*
-F: docs/interop/vhost-user.json
-F: docs/interop/vhost-user.rst
+F: docs/interop/vhost-user*
+F: docs/system/devices/vhost-user*
F: contrib/vhost-user-*/
-F: backends/vhost-user.c
-F: include/sysemu/vhost-user-backend.h
+F: backends/*vhost*
+F: include/system/vhost-user-backend.h
+F: include/hw/virtio/vhost*
+F: include/*/vhost*
F: subprojects/libvhost-user/
+F: block/export/vhost-user*
+F: util/vhost-user-server.c
vhost-shadow-virtqueue
R: Eugenio PƩrez <eperezma@redhat.com>
@@ -2233,6 +2318,7 @@ F: net/vhost-user.c
F: include/hw/virtio/
F: docs/devel/virtio*
F: docs/devel/migration/virtio.rst
+F: tests/functional/test_virtio_version.py
virtio-balloon
M: Michael S. Tsirkin <mst@redhat.com>
@@ -2242,29 +2328,22 @@ F: docs/interop/virtio-balloon-stats.rst
F: hw/virtio/virtio-balloon*.c
F: include/hw/virtio/virtio-balloon.h
F: system/balloon.c
-F: include/sysemu/balloon.h
+F: include/system/balloon.h
+F: tests/qtest/virtio-balloon-test.c
+F: tests/functional/test_virtio_balloon.py
virtio-9p
-M: Greg Kurz <groug@kaod.org>
M: Christian Schoenebeck <qemu_oss@crudebyte.com>
+R: Greg Kurz <groug@kaod.org>
S: Maintained
W: https://wiki.qemu.org/Documentation/9p
F: hw/9pfs/
X: hw/9pfs/xen-9p*
-X: hw/9pfs/9p-proxy*
F: fsdev/
-X: fsdev/virtfs-proxy-helper.c
F: tests/qtest/virtio-9p-test.c
F: tests/qtest/libqos/virtio-9p*
-T: git https://gitlab.com/gkurz/qemu.git 9p-next
T: git https://github.com/cschoenebeck/qemu.git 9p.next
-virtio-9p-proxy
-F: hw/9pfs/9p-proxy*
-F: fsdev/virtfs-proxy-helper.c
-F: docs/tools/virtfs-proxy-helper.rst
-S: Obsolete
-
virtio-blk
M: Stefan Hajnoczi <stefanha@redhat.com>
L: qemu-block@nongnu.org
@@ -2274,6 +2353,7 @@ F: hw/block/virtio-blk.c
F: hw/block/dataplane/*
F: include/hw/virtio/virtio-blk-common.h
F: tests/qtest/virtio-blk-test.c
+F: tests/functional/test_x86_64_hotplug_blk.py
T: git https://github.com/stefanha/qemu.git block
virtio-ccw
@@ -2331,10 +2411,20 @@ R: Amit Shah <amit@kernel.org>
S: Supported
F: hw/virtio/virtio-rng.c
F: include/hw/virtio/virtio-rng.h
-F: include/sysemu/rng*.h
+F: include/system/rng*.h
F: backends/rng*.c
F: tests/qtest/virtio-rng-test.c
+virtio-nsm
+M: Alexander Graf <graf@amazon.com>
+M: Dorjoy Chowdhury <dorjoychy111@gmail.com>
+S: Maintained
+F: hw/virtio/cbor-helpers.c
+F: hw/virtio/virtio-nsm.c
+F: hw/virtio/virtio-nsm-pci.c
+F: include/hw/virtio/cbor-helpers.h
+F: include/hw/virtio/virtio-nsm.h
+
vhost-user-stubs
M: Alex BennƩe <alex.bennee@linaro.org>
S: Maintained
@@ -2382,6 +2472,9 @@ F: include/hw/virtio/virtio-crypto.h
virtio based memory device
M: David Hildenbrand <david@redhat.com>
S: Supported
+F: hw/s390x/virtio-ccw-md.c
+F: hw/s390x/virtio-ccw-md.h
+F: hw/s390x/virtio-ccw-md-stubs.c
F: hw/virtio/virtio-md-pci.c
F: include/hw/virtio/virtio-md-pci.h
F: stubs/virtio-md-pci.c
@@ -2393,6 +2486,8 @@ W: https://virtio-mem.gitlab.io/
F: hw/virtio/virtio-mem.c
F: hw/virtio/virtio-mem-pci.h
F: hw/virtio/virtio-mem-pci.c
+F: hw/s390x/virtio-ccw-mem.c
+F: hw/s390x/virtio-ccw-mem.h
F: include/hw/virtio/virtio-mem.h
virtio-snd
@@ -2434,7 +2529,7 @@ F: tests/qtest/fuzz-megasas-test.c
Network packet abstractions
M: Dmitry Fleytman <dmitry.fleytman@gmail.com>
-R: Akihiko Odaki <akihiko.odaki@daynix.com>
+R: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
S: Maintained
F: include/net/eth.h
F: net/eth.c
@@ -2460,17 +2555,17 @@ S: Maintained
F: hw/net/rocker/
F: qapi/rocker.json
F: tests/rocker/
-F: docs/specs/rocker.txt
+F: docs/specs/rocker.rst
e1000x
M: Dmitry Fleytman <dmitry.fleytman@gmail.com>
-R: Akihiko Odaki <akihiko.odaki@daynix.com>
+R: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
S: Maintained
F: hw/net/e1000x*
e1000e
M: Dmitry Fleytman <dmitry.fleytman@gmail.com>
-R: Akihiko Odaki <akihiko.odaki@daynix.com>
+R: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
S: Maintained
F: hw/net/e1000e*
F: tests/qtest/fuzz-e1000e-test.c
@@ -2478,12 +2573,12 @@ F: tests/qtest/e1000e-test.c
F: tests/qtest/libqos/e1000e.*
igb
-M: Akihiko Odaki <akihiko.odaki@daynix.com>
+M: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
R: Sriram Yagnaraman <sriram.yagnaraman@ericsson.com>
-S: Maintained
+S: Odd Fixes
F: docs/system/devices/igb.rst
F: hw/net/igb*
-F: tests/avocado/netdev-ethtool.py
+F: tests/functional/test_netdev_ethtool.py
F: tests/qtest/igb-test.c
F: tests/qtest/libqos/igb.c
@@ -2505,8 +2600,7 @@ F: hw/i2c/i2c_mux_pca954x.c
F: include/hw/i2c/i2c_mux_pca954x.h
pcf8574
-M: Dmitrii Sharikhin <d.sharikhin@yadro.com>
-S: Maintained
+S: Orphaned
F: hw/gpio/pcf8574.c
F: include/gpio/pcf8574.h
@@ -2523,7 +2617,7 @@ M: Alex BennƩe <alex.bennee@linaro.org>
S: Maintained
F: hw/core/guest-loader.c
F: docs/system/guest-loader.rst
-F: tests/avocado/boot_xen.py
+F: tests/functional/test_aarch64_xen.py
Intel Hexadecimal Object File Loader
M: Su Hang <suhang16@mails.ucas.ac.cn>
@@ -2589,6 +2683,7 @@ F: hw/display/virtio-gpu*
F: hw/display/virtio-vga.*
F: include/hw/virtio/virtio-gpu.h
F: docs/system/devices/virtio-gpu.rst
+F: tests/functional/test_aarch64_virt_gpu.py
vhost-user-blk
M: Raphael Norwitz <raphael@enfabrica.net>
@@ -2624,6 +2719,11 @@ F: hw/display/edid*
F: include/hw/display/edid.h
F: qemu-edid.c
+macOS PV Graphics (apple-gfx)
+M: Phil Dennis-Jordan <phil@philjordan.eu>
+S: Maintained
+F: hw/display/apple-gfx*
+
PIIX4 South Bridge (i82371AB)
M: HervƩ Poussineau <hpoussin@reactos.org>
M: Philippe Mathieu-DaudƩ <philmd@linaro.org>
@@ -2654,16 +2754,16 @@ F: tests/qtest/fw_cfg-test.c
T: git https://github.com/philmd/qemu.git fw_cfg-next
XIVE
-M: CƩdric Le Goater <clg@kaod.org>
R: FrƩdƩric Barrat <fbarrat@linux.ibm.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/*/*xive*
F: include/hw/*/*xive*
+F: tests/qtest/*xive*
F: docs/*/*xive*
Renesas peripherals
-R: Yoshinori Sato <ysato@users.sourceforge.jp>
+R: Yoshinori Sato <yoshinori.sato@nifty.com>
R: Magnus Damm <magnus.damm@gmail.com>
S: Odd Fixes
F: hw/char/renesas_sci.c
@@ -2675,7 +2775,7 @@ F: include/hw/sh4/sh.h
F: include/hw/timer/renesas_*.h
Renesas RX peripherals
-R: Yoshinori Sato <ysato@users.sourceforge.jp>
+R: Yoshinori Sato <yoshinori.sato@nifty.com>
S: Orphan
F: hw/intc/rx_icu.c
F: hw/rx/
@@ -2684,7 +2784,8 @@ F: include/hw/rx/
CAN bus subsystem and hardware
M: Pavel Pisa <pisa@cmp.felk.cvut.cz>
-M: Vikram Garhwal <fnu.vikram@xilinx.com>
+M: Francisco Iglesias <francisco.iglesias@amd.com>
+M: Vikram Garhwal <vikram.garhwal@bytedance.com>
S: Maintained
W: https://canbus.pages.fel.cvut.cz/
F: net/can/*
@@ -2715,6 +2816,7 @@ F: include/hw/timer/mips_gictimer.h
S390 3270 device
M: Halil Pasic <pasic@linux.ibm.com>
M: Christian Borntraeger <borntraeger@linux.ibm.com>
+R: Collin Walling <walling@linux.ibm.com>
S: Odd fixes
F: include/hw/s390x/3270-ccw.h
F: hw/char/terminal3270.c
@@ -2724,6 +2826,7 @@ L: qemu-s390x@nongnu.org
S390 diag 288 watchdog
M: Halil Pasic <pasic@linux.ibm.com>
M: Christian Borntraeger <borntraeger@linux.ibm.com>
+R: Collin Walling <walling@linux.ibm.com>
S: Supported
F: hw/watchdog/wdt_diag288.c
F: include/hw/watchdog/wdt_diag288.h
@@ -2732,6 +2835,7 @@ L: qemu-s390x@nongnu.org
S390 storage key device
M: Halil Pasic <pasic@linux.ibm.com>
M: Christian Borntraeger <borntraeger@linux.ibm.com>
+R: Jason Herne <jjherne@linux.ibm.com>
S: Supported
F: hw/s390x/storage-keys.h
F: hw/s390x/s390-skeys*.c
@@ -2740,6 +2844,7 @@ L: qemu-s390x@nongnu.org
S390 storage attribute device
M: Halil Pasic <pasic@linux.ibm.com>
M: Christian Borntraeger <borntraeger@linux.ibm.com>
+R: Jason Herne <jjherne@linux.ibm.com>
S: Supported
F: hw/s390x/storage-attributes.h
F: hw/s390x/s390-stattrib*.c
@@ -2749,6 +2854,7 @@ S390 floating interrupt controller
M: Halil Pasic <pasic@linux.ibm.com>
M: Christian Borntraeger <borntraeger@linux.ibm.com>
M: David Hildenbrand <david@redhat.com>
+R: Jason Herne <jjherne@linux.ibm.com>
S: Supported
F: hw/intc/s390_flic*.c
F: include/hw/s390x/s390_flic.h
@@ -2770,6 +2876,27 @@ F: hw/hyperv/hv-balloon*.h
F: include/hw/hyperv/dynmem-proto.h
F: include/hw/hyperv/hv-balloon.h
+ivshmem-flat
+M: Gustavo Romero <gustavo.romero@linaro.org>
+S: Maintained
+F: hw/misc/ivshmem-flat.c
+F: include/hw/misc/ivshmem-flat.h
+F: docs/system/devices/ivshmem-flat.rst
+
+UEFI variable service
+M: Gerd Hoffmann <kraxel@redhat.com>
+S: Maintained
+F: hw/uefi/
+F: include/hw/uefi/
+
+VMapple
+M: Alexander Graf <agraf@csgraf.de>
+M: Phil Dennis-Jordan <phil@philjordan.eu>
+S: Maintained
+F: hw/vmapple/*
+F: include/hw/vmapple/*
+F: docs/system/arm/vmapple.rst
+
Subsystems
----------
Overall Audio backends
@@ -2778,7 +2905,7 @@ M: Marc-AndrƩ Lureau <marcandre.lureau@redhat.com>
S: Odd Fixes
F: audio/
X: audio/alsaaudio.c
-X: audio/coreaudio.c
+X: audio/coreaudio.m
X: audio/dsound*
X: audio/jackaudio.c
X: audio/ossaudio.c
@@ -2798,9 +2925,9 @@ Core Audio framework backend
M: Gerd Hoffmann <kraxel@redhat.com>
M: Philippe Mathieu-DaudƩ <philmd@linaro.org>
R: Christian Schoenebeck <qemu_oss@crudebyte.com>
-R: Akihiko Odaki <akihiko.odaki@daynix.com>
+R: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
S: Odd Fixes
-F: audio/coreaudio.c
+F: audio/coreaudio.m
DSound Audio backend
M: Gerd Hoffmann <kraxel@redhat.com>
@@ -2846,7 +2973,7 @@ F: hw/block/
F: qapi/block*.json
F: qapi/transaction.json
F: include/block/
-F: include/sysemu/block-*.h
+F: include/system/block-*.h
F: qemu-img*
F: docs/tools/qemu-img.rst
F: qemu-io*
@@ -2920,6 +3047,16 @@ F: include/qemu/co-shared-resource.h
T: git https://gitlab.com/jsnow/qemu.git jobs
T: git https://gitlab.com/vsementsov/qemu.git block
+CheckPoint and Restart (CPR)
+R: Steve Sistare <steven.sistare@oracle.com>
+S: Supported
+F: hw/vfio/cpr*
+F: include/hw/vfio/vfio-cpr.h
+F: include/migration/cpr.h
+F: migration/cpr*
+F: tests/qtest/migration/cpr*
+F: docs/devel/migration/CPR.rst
+
Compute Express Link
M: Jonathan Cameron <jonathan.cameron@huawei.com>
R: Fan Ni <fan.ni@samsung.com>
@@ -2966,6 +3103,7 @@ S: Supported
F: include/qemu/option.h
F: tests/unit/test-keyval.c
F: tests/unit/test-qemu-opts.c
+F: tests/functional/test_version.py
F: util/keyval.c
F: util/qemu-option.c
@@ -2984,21 +3122,23 @@ M: Alistair Francis <alistair.francis@wdc.com>
R: David Gibson <david@gibson.dropbear.id.au>
S: Maintained
F: system/device_tree.c
-F: include/sysemu/device_tree.h
+F: include/system/device_tree.h
Dump
S: Supported
M: Marc-AndrƩ Lureau <marcandre.lureau@redhat.com>
+R: Ani Sinha <anisinha@redhat.com>
F: dump/
F: hw/misc/vmcoreinfo.c
F: include/hw/misc/vmcoreinfo.h
F: include/qemu/win_dump_defs
-F: include/sysemu/dump-arch.h
-F: include/sysemu/dump.h
+F: include/system/dump-arch.h
+F: include/system/dump.h
F: qapi/dump.json
F: scripts/dump-guest-memory.py
F: stubs/dump.c
F: docs/specs/vmcoreinfo.rst
+F: tests/qtest/vmcoreinfo-test.c
Error reporting
M: Markus Armbruster <armbru@redhat.com>
@@ -3027,6 +3167,7 @@ F: gdb-xml/
F: tests/tcg/multiarch/gdbstub/*
F: scripts/feature_to_c.py
F: scripts/probe-gdb-support.py
+T: git https://gitlab.com/stsquad/qemu gdbstub/next
Memory API
M: Paolo Bonzini <pbonzini@redhat.com>
@@ -3034,18 +3175,19 @@ M: Peter Xu <peterx@redhat.com>
M: David Hildenbrand <david@redhat.com>
R: Philippe Mathieu-DaudƩ <philmd@linaro.org>
S: Supported
-F: include/exec/ioport.h
+F: include/system/ioport.h
F: include/exec/memop.h
-F: include/exec/memory.h
-F: include/exec/ram_addr.h
-F: include/exec/ramblock.h
-F: include/sysemu/memory_mapping.h
+F: include/system/memory.h
+F: include/system/ram_addr.h
+F: include/system/ramblock.h
+F: include/system/memory_mapping.h
F: system/dma-helpers.c
F: system/ioport.c
F: system/memory.c
F: system/memory_mapping.c
F: system/physmem.c
-F: include/exec/memory-internal.h
+F: system/memory-internal.h
+F: system/ram-block-attributes.c
F: scripts/coccinelle/memory-region-housekeeping.cocci
Memory devices
@@ -3080,11 +3222,12 @@ F: include/ui/
F: qapi/ui.json
F: util/drm.c
F: docs/devel/ui.rst
+F: tests/functional/test_vnc.py
Cocoa graphics
M: Peter Maydell <peter.maydell@linaro.org>
M: Philippe Mathieu-DaudƩ <philmd@linaro.org>
-R: Akihiko Odaki <akihiko.odaki@daynix.com>
+R: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
S: Odd Fixes
F: ui/cocoa.m
@@ -3092,8 +3235,8 @@ Main loop
M: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: include/qemu/main-loop.h
-F: include/sysemu/runstate.h
-F: include/sysemu/runstate-action.h
+F: include/system/runstate.h
+F: include/system/runstate-action.h
F: util/main-loop.c
F: util/qemu-timer*.c
F: system/vl.c
@@ -3107,11 +3250,13 @@ F: qapi/run-state.json
Read, Copy, Update (RCU)
M: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
-F: docs/devel/lockcnt.txt
-F: docs/devel/rcu.txt
+F: docs/devel/lockcnt.rst
+F: docs/devel/rcu.rst
F: include/qemu/rcu*.h
+F: include/qemu/lockcnt.h
F: tests/unit/rcutorture.c
F: tests/unit/test-rcu-*.c
+F: util/lockcnt.c
F: util/rcu.c
Human Monitor (HMP)
@@ -3154,7 +3299,7 @@ M: David Hildenbrand <david@redhat.com>
M: Igor Mammedov <imammedo@redhat.com>
S: Maintained
F: backends/hostmem*.c
-F: include/sysemu/hostmem.h
+F: include/system/hostmem.h
F: docs/system/vm-templating.rst
T: git https://gitlab.com/ehabkost/qemu.git machine-next
@@ -3162,14 +3307,13 @@ Cryptodev Backends
M: Gonglei <arei.gonglei@huawei.com>
M: zhenwei pi <pizhenwei@bytedance.com>
S: Maintained
-F: include/sysemu/cryptodev*.h
+F: include/system/cryptodev*.h
F: backends/cryptodev*.c
F: qapi/cryptodev.json
Python library
M: John Snow <jsnow@redhat.com>
M: Cleber Rosa <crosa@redhat.com>
-R: Beraldo Leal <bleal@redhat.com>
S: Maintained
F: python/
T: git https://gitlab.com/jsnow/qemu.git python
@@ -3201,8 +3345,6 @@ S: Supported
F: qapi/
X: qapi/*.json
F: include/qapi/
-X: include/qapi/qmp/
-F: include/qapi/qmp/dispatch.h
F: tests/qapi-schema/
F: tests/unit/test-*-visitor.c
F: tests/unit/test-qapi-*.c
@@ -3219,14 +3361,14 @@ M: Eric Blake <eblake@redhat.com>
M: Markus Armbruster <armbru@redhat.com>
S: Supported
F: qapi/*.json
+F: qga/qapi-schema.json
T: git https://repo.or.cz/qemu/armbru.git qapi-next
QObject
M: Markus Armbruster <armbru@redhat.com>
S: Supported
F: qobject/
-F: include/qapi/qmp/
-X: include/qapi/qmp/dispatch.h
+F: include/qobject/
F: scripts/coccinelle/qobject.cocci
F: tests/unit/check-qdict.c
F: tests/unit/check-qjson.c
@@ -3306,16 +3448,16 @@ F: tests/qtest/qmp-cmd-test.c
T: git https://repo.or.cz/qemu/armbru.git qapi-next
qtest
-M: Thomas Huth <thuth@redhat.com>
+M: Fabiano Rosas <farosas@suse.de>
M: Laurent Vivier <lvivier@redhat.com>
R: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: system/qtest.c
-F: include/sysemu/qtest.h
+F: include/system/qtest.h
F: accel/qtest/
F: tests/qtest/
-F: docs/devel/qgraph.rst
-F: docs/devel/qtest.rst
+F: docs/devel/testing/qgraph.rst
+F: docs/devel/testing/qtest.rst
X: tests/qtest/bios-tables-test*
X: tests/qtest/migration-*
@@ -3324,7 +3466,7 @@ M: Alexander Bulekov <alxndr@bu.edu>
R: Paolo Bonzini <pbonzini@redhat.com>
R: Bandan Das <bsd@redhat.com>
R: Stefan Hajnoczi <stefanha@redhat.com>
-R: Thomas Huth <thuth@redhat.com>
+R: Fabiano Rosas <farosas@suse.de>
R: Darren Kenny <darren.kenny@oracle.com>
R: Qiuhao Li <Qiuhao.Li@outlook.com>
S: Maintained
@@ -3333,7 +3475,7 @@ F: tests/qtest/fuzz-*test.c
F: tests/docker/test-fuzz
F: scripts/oss-fuzz/
F: hw/mem/sparse-mem.c
-F: docs/devel/fuzzing.rst
+F: docs/devel/testing/fuzzing.rst
Register API
M: Alistair Francis <alistair@alistair23.me>
@@ -3342,6 +3484,18 @@ F: hw/core/register.c
F: include/hw/register.h
F: include/hw/registerfields.h
+Rust
+M: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+S: Maintained
+F: rust/qemu-api
+F: rust/qemu-api-macros
+F: rust/rustfmt.toml
+
+Rust-related patches CC here
+L: qemu-rust@nongnu.org
+F: tests/docker/test-rust
+F: rust/
+
SLIRP
M: Samuel Thibault <samuel.thibault@ens-lyon.org>
S: Maintained
@@ -3351,7 +3505,7 @@ T: git https://people.debian.org/~sthibault/qemu.git slirp
Stats
S: Orphan
-F: include/sysemu/stats.h
+F: include/system/stats.h
F: stats/
F: qapi/stats.json
@@ -3392,13 +3546,19 @@ S: Maintained
F: system/tpm*
F: hw/tpm/*
F: include/hw/acpi/tpm.h
-F: include/sysemu/tpm*
+F: include/system/tpm*
F: qapi/tpm.json
F: backends/tpm/
F: tests/qtest/*tpm*
F: docs/specs/tpm.rst
T: git https://github.com/stefanberger/qemu-tpm.git tpm-next
+SPDM
+M: Alistair Francis <alistair.francis@wdc.com>
+S: Maintained
+F: backends/spdm-socket.c
+F: include/system/spdm-socket.h
+
Checkpatch
S: Odd Fixes
F: scripts/checkpatch.pl
@@ -3413,11 +3573,13 @@ F: include/migration/
F: include/qemu/userfaultfd.h
F: migration/
F: scripts/vmstate-static-checker.py
+F: tests/functional/test_migration.py
F: tests/vmstate-static-checker-data/
+F: tests/qtest/migration/
F: tests/qtest/migration-*
F: docs/devel/migration/
F: qapi/migration.json
-F: tests/migration/
+F: tests/migration-stress/
F: util/userfaultfd.c
X: migration/rdma*
@@ -3426,21 +3588,23 @@ R: Li Zhijian <lizhijian@fujitsu.com>
R: Peter Xu <peterx@redhat.com>
S: Odd Fixes
F: migration/rdma*
+F: scripts/rdma-migration-helper.sh
Migration dirty limit and dirty page rate
M: Hyman Huang <yong.huang@smartx.com>
S: Maintained
F: system/dirtylimit.c
-F: include/sysemu/dirtylimit.h
+F: include/system/dirtylimit.h
F: migration/dirtyrate.c
F: migration/dirtyrate.h
-F: include/sysemu/dirtyrate.h
+F: include/system/dirtyrate.h
F: docs/devel/migration/dirty-limit.rst
Detached LUKS header
M: Hyman Huang <yong.huang@smartx.com>
S: Maintained
F: tests/qemu-iotests/tests/luks-detached-header
+F: docs/devel/luks-detached-header.rst
D-Bus
M: Marc-AndrƩ Lureau <marcandre.lureau@redhat.com>
@@ -3461,7 +3625,7 @@ Seccomp
M: Daniel P. Berrange <berrange@redhat.com>
S: Odd Fixes
F: system/qemu-seccomp.c
-F: include/sysemu/seccomp.h
+F: include/system/seccomp.h
F: tests/unit/test-seccomp.c
Cryptography
@@ -3474,7 +3638,7 @@ F: qapi/crypto.json
F: tests/unit/test-crypto-*
F: tests/bench/benchmark-crypto-*
F: tests/unit/crypto-tls-*
-F: tests/unit/pkix_asn1_tab.c
+F: tests/unit/pkix_asn1_tab.c.inc
F: qemu.sasl
Coroutines
@@ -3556,7 +3720,7 @@ F: include/migration/failover.h
F: docs/COLO-FT.txt
COLO Proxy
-M: Zhang Chen <chen.zhang@intel.com>
+M: Zhang Chen <zhangckid@gmail.com>
M: Li Zhijian <lizhijian@fujitsu.com>
S: Supported
F: docs/colo-proxy.txt
@@ -3566,21 +3730,20 @@ F: net/filter-mirror.c
F: tests/qtest/test-filter*
Record/replay
-M: Pavel Dovgalyuk <pavel.dovgaluk@ispras.ru>
R: Paolo Bonzini <pbonzini@redhat.com>
+R: Alex BennƩe <alex.bennee@linaro.org>
W: https://wiki.qemu.org/Features/record-replay
-S: Supported
+S: Odd Fixes
F: replay/*
F: block/blkreplay.c
F: net/filter-replay.c
F: include/exec/replay-core.h
-F: include/sysemu/replay.h
+F: include/system/replay.h
F: docs/devel/replay.rst
F: docs/system/replay.rst
F: stubs/replay.c
-F: tests/avocado/replay_kernel.py
-F: tests/avocado/replay_linux.py
-F: tests/avocado/reverse_debugging.py
+F: tests/functional/*reverse_debug*.py
+F: tests/functional/*replay*.py
F: qapi/replay.json
IOVA Tree
@@ -3591,7 +3754,7 @@ F: util/iova-tree.c
elf2dmp
M: Viktor Prutyanov <viktor.prutyanov@phystech.edu>
-R: Akihiko Odaki <akihiko.odaki@daynix.com>
+R: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
S: Maintained
F: contrib/elf2dmp/
@@ -3658,17 +3821,21 @@ F: tests/uefi-test-tools/
VT-d Emulation
M: Michael S. Tsirkin <mst@redhat.com>
R: Jason Wang <jasowang@redhat.com>
+R: Yi Liu <yi.l.liu@intel.com>
+R: ClƩment Mathieu--Drif <clement.mathieu--drif@eviden.com>
S: Supported
F: hw/i386/intel_iommu.c
F: hw/i386/intel_iommu_internal.h
F: include/hw/i386/intel_iommu.h
+F: tests/functional/test_intel_iommu.py
+F: tests/qtest/intel-iommu-test.c
AMD-Vi Emulation
S: Orphan
F: hw/i386/amd_iommu.?
OpenSBI Firmware
-M: Bin Meng <bmeng.cn@gmail.com>
+L: qemu-riscv@nongnu.org
S: Supported
F: pc-bios/opensbi-*
F: .gitlab-ci.d/opensbi.yml
@@ -3690,7 +3857,7 @@ M: Peter Maydell <peter.maydell@linaro.org>
S: Maintained
F: include/hw/resettable.h
F: include/hw/core/resetcontainer.h
-F: include/sysemu/reset.h
+F: include/system/reset.h
F: hw/core/reset.c
F: hw/core/resettable.c
F: hw/core/resetcontainer.c
@@ -3701,6 +3868,7 @@ Overall usermode emulation
M: Riku Voipio <riku.voipio@iki.fi>
S: Maintained
F: accel/tcg/user-exec*.c
+F: hw/core/cpu-user.c
F: include/user/
F: common-user/
@@ -3721,7 +3889,7 @@ F: configs/targets/*linux-user.mak
F: scripts/qemu-binfmt-conf.sh
F: scripts/update-syscalltbl.sh
F: scripts/update-mips-syscall-args.sh
-F: scripts/gensyscalls.sh
+F: tests/functional/test_arm_bflt.py
Tiny Code Generator (TCG)
-------------------------
@@ -3733,15 +3901,17 @@ F: include/tcg/
TCG Plugins
M: Alex BennƩe <alex.bennee@linaro.org>
+T: git https://gitlab.com/stsquad/qemu plugins/next
R: Alexandre Iooss <erdnaxe@crans.org>
R: Mahmoud Mandour <ma.mandourr@gmail.com>
R: Pierrick Bouvier <pierrick.bouvier@linaro.org>
S: Maintained
F: docs/devel/tcg-plugins.rst
F: plugins/
-F: tests/plugin/
-F: tests/avocado/tcg_plugins.py
+F: tests/tcg/plugins/
+F: tests/functional/test_aarch64_tcg_plugins.py
F: contrib/plugins/
+F: scripts/qemu-plugin-symbols.py
AArch64 TCG target
M: Richard Henderson <richard.henderson@linaro.org>
@@ -3857,7 +4027,7 @@ F: nbd/
F: include/block/nbd*
F: qemu-nbd.*
F: blockdev-nbd.c
-F: docs/interop/nbd.txt
+F: docs/interop/nbd.rst
F: docs/tools/qemu-nbd.rst
F: tests/qemu-iotests/tests/*nbd*
T: git https://repo.or.cz/qemu/ericb.git nbd
@@ -3924,6 +4094,7 @@ M: Stefan Hajnoczi <stefanha@redhat.com>
L: qemu-block@nongnu.org
S: Supported
F: block/blkverify.c
+F: docs/devel/testing/blkverify.rst
bochs
M: Stefan Hajnoczi <stefanha@redhat.com>
@@ -3950,7 +4121,8 @@ L: qemu-block@nongnu.org
S: Supported
F: block/parallels.c
F: block/parallels-ext.c
-F: docs/interop/parallels.txt
+F: docs/interop/parallels.rst
+F: docs/interop/prl-xml.rst
T: git https://src.openvz.org/scm/~den/qemu.git parallels
qed
@@ -3958,6 +4130,7 @@ M: Stefan Hajnoczi <stefanha@redhat.com>
L: qemu-block@nongnu.org
S: Supported
F: block/qed.c
+F: docs/interop/qed_spec.rst
raw
M: Kevin Wolf <kwolf@redhat.com>
@@ -3986,7 +4159,7 @@ M: Hanna Reitz <hreitz@redhat.com>
L: qemu-block@nongnu.org
S: Supported
F: block/qcow2*
-F: docs/interop/qcow2.txt
+F: docs/interop/qcow2.rst
qcow
M: Kevin Wolf <kwolf@redhat.com>
@@ -4000,6 +4173,7 @@ M: Hanna Reitz <hreitz@redhat.com>
L: qemu-block@nongnu.org
S: Supported
F: block/blkdebug.c
+F: docs/devel/testing/blkdebug.rst
vpc
M: Kevin Wolf <kwolf@redhat.com>
@@ -4084,11 +4258,21 @@ F: hw/remote/proxy-memory-listener.c
F: include/hw/remote/proxy-memory-listener.h
F: hw/remote/iohub.c
F: include/hw/remote/iohub.h
-F: subprojects/libvfio-user
F: hw/remote/vfio-user-obj.c
F: include/hw/remote/vfio-user-obj.h
F: hw/remote/iommu.c
F: include/hw/remote/iommu.h
+F: tests/functional/test_multiprocess.py
+
+VFIO-USER:
+M: John Levon <john.levon@nutanix.com>
+M: Thanos Makatos <thanos.makatos@nutanix.com>
+S: Supported
+F: docs/interop/vfio-user.rst
+F: docs/system/devices/vfio-user.rst
+F: hw/vfio-user/*
+F: include/hw/vfio-user/*
+F: subprojects/libvfio-user
EBPF:
M: Jason Wang <jasowang@redhat.com>
@@ -4103,10 +4287,9 @@ Build and test automation
-------------------------
Build and test automation, general continuous integration
M: Alex BennƩe <alex.bennee@linaro.org>
+T: git https://gitlab.com/stsquad/qemu testing/next
M: Philippe Mathieu-DaudƩ <philmd@linaro.org>
M: Thomas Huth <thuth@redhat.com>
-R: Wainer dos Santos Moschetta <wainersm@redhat.com>
-R: Beraldo Leal <bleal@redhat.com>
S: Maintained
F: .github/workflows/lockdown.yml
F: .gitlab-ci.yml
@@ -4117,9 +4300,10 @@ F: scripts/ci/
F: tests/docker/
F: tests/vm/
F: tests/lcitool/
-F: tests/avocado/tuxrun_baselines.py
+F: tests/functional/test_*_tuxrun.py
F: scripts/archive-source.sh
-F: docs/devel/testing.rst
+F: docs/devel/testing/ci*
+F: docs/devel/testing/main.rst
W: https://gitlab.com/qemu-project/qemu/pipelines
W: https://travis-ci.org/qemu/qemu
@@ -4131,6 +4315,13 @@ F: .gitlab-ci.d/cirrus/freebsd*
F: tests/vm/freebsd
W: https://cirrus-ci.com/github/qemu/qemu
+Functional testing framework
+M: Thomas Huth <thuth@redhat.com>
+R: Philippe Mathieu-DaudƩ <philmd@linaro.org>
+R: Daniel P. Berrange <berrange@redhat.com>
+F: docs/devel/testing/functional.rst
+F: tests/functional/qemu_test/
+
Windows Hosted Continuous Integration
M: Yonggang Luo <luoyonggang@gmail.com>
S: Maintained
@@ -4142,15 +4333,6 @@ R: Philippe Mathieu-DaudƩ <philmd@linaro.org>
S: Maintained
F: tests/tcg/Makefile.target
-Integration Testing with the Avocado framework
-W: https://trello.com/b/6Qi1pxVn/avocado-qemu
-R: Cleber Rosa <crosa@redhat.com>
-R: Philippe Mathieu-DaudƩ <philmd@linaro.org>
-R: Wainer dos Santos Moschetta <wainersm@redhat.com>
-R: Beraldo Leal <bleal@redhat.com>
-S: Odd Fixes
-F: tests/avocado/
-
GitLab custom runner (Works On Arm Sponsored)
M: Alex BennƩe <alex.bennee@linaro.org>
M: Philippe Mathieu-DaudƩ <philmd@linaro.org>
@@ -4181,7 +4363,6 @@ Meson
M: Paolo Bonzini <pbonzini@redhat.com>
R: Marc-AndrƩ Lureau <marcandre.lureau@redhat.com>
R: Daniel P. Berrange <berrange@redhat.com>
-R: Thomas Huth <thuth@redhat.com>
R: Philippe Mathieu-DaudƩ <philmd@linaro.org>
S: Maintained
F: meson.build
@@ -4220,6 +4401,7 @@ S: Orphan
F: po/*.po
Sphinx documentation configuration and build machinery
+M: John Snow <jsnow@redhat.com>
M: Peter Maydell <peter.maydell@linaro.org>
S: Maintained
F: docs/conf.py
@@ -4228,6 +4410,16 @@ F: docs/sphinx/
F: docs/_templates/
F: docs/devel/docs.rst
+Rust build system integration
+M: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+L: qemu-rust@nongnu.org
+S: Maintained
+F: scripts/rust/
+F: rust/.gitignore
+F: rust/Kconfig
+F: rust/meson.build
+F: rust/wrapper.h
+
Miscellaneous
-------------
Performance Tools and Tests
diff --git a/Makefile b/Makefile
index 02a2575..c92a3cf 100644
--- a/Makefile
+++ b/Makefile
@@ -78,7 +78,8 @@ x := $(shell rm -rf meson-private meson-info meson-logs)
endif
# 1. ensure config-host.mak is up-to-date
-config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh $(SRC_PATH)/VERSION
+config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh \
+ $(SRC_PATH)/pythondeps.toml $(SRC_PATH)/VERSION
@echo config-host.mak is out-of-date, running configure
@if test -f meson-private/coredata.dat; then \
./config.status --skip-meson; \
@@ -186,11 +187,6 @@ SUBDIR_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(SUBDIRS)))
$(SUBDIR_RULES):
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),)
-ifneq ($(filter contrib/plugins, $(SUBDIRS)),)
-.PHONY: plugins
-plugins: contrib/plugins/all
-endif
-
.PHONY: recurse-all recurse-clean
recurse-all: $(addsuffix /all, $(SUBDIRS))
recurse-clean: $(addsuffix /clean, $(SUBDIRS))
@@ -211,10 +207,10 @@ clean: recurse-clean
VERSION = $(shell cat $(SRC_PATH)/VERSION)
-dist: qemu-$(VERSION).tar.bz2
+dist: qemu-$(VERSION).tar.xz
-qemu-%.tar.bz2:
- $(SRC_PATH)/scripts/make-release "$(SRC_PATH)" "$(patsubst qemu-%.tar.bz2,%,$@)"
+qemu-%.tar.xz:
+ $(SRC_PATH)/scripts/make-release "$(SRC_PATH)" "$(patsubst qemu-%.tar.xz,%,$@)"
distclean: clean recurse-distclean
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || :
@@ -306,11 +302,6 @@ help:
$(call print-help,cscope,Generate cscope index)
$(call print-help,sparse,Run sparse on the QEMU source)
@echo ''
-ifneq ($(filter contrib/plugins, $(SUBDIRS)),)
- @echo 'Plugin targets:'
- $(call print-help,plugins,Build the example TCG plugins)
- @echo ''
-endif
@echo 'Cleaning targets:'
$(call print-help,clean,Remove most generated files but keep the config)
$(call print-help,distclean,Remove all generated files)
diff --git a/VERSION b/VERSION
index bc66ba6..54e6ccf 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-9.0.50
+10.0.50
diff --git a/accel/Kconfig b/accel/Kconfig
index 794e0d1..4263cab 100644
--- a/accel/Kconfig
+++ b/accel/Kconfig
@@ -16,4 +16,5 @@ config KVM
config XEN
bool
select FSDEV_9P if VIRTFS
+ select PCI_EXPRESS_GENERIC_BRIDGE
select XEN_BUS
diff --git a/accel/accel-blocker.c b/accel/accel-blocker.c
index e083f24..51132d1 100644
--- a/accel/accel-blocker.c
+++ b/accel/accel-blocker.c
@@ -25,10 +25,11 @@
*/
#include "qemu/osdep.h"
+#include "qemu/lockcnt.h"
#include "qemu/thread.h"
#include "qemu/main-loop.h"
#include "hw/core/cpu.h"
-#include "sysemu/accel-blocker.h"
+#include "system/accel-blocker.h"
static QemuLockCnt accel_in_ioctl_lock;
static QemuEvent accel_in_ioctl_event;
diff --git a/accel/accel-common.c b/accel/accel-common.c
new file mode 100644
index 0000000..4894b98
--- /dev/null
+++ b/accel/accel-common.c
@@ -0,0 +1,142 @@
+/*
+ * QEMU accel class, components common to system emulation and user mode
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2014 Red Hat Inc.
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/accel.h"
+#include "qemu/target-info.h"
+#include "accel/accel-cpu.h"
+#include "accel-internal.h"
+
+/* Lookup AccelClass from opt_name. Returns NULL if not found */
+AccelClass *accel_find(const char *opt_name)
+{
+ char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name);
+ AccelClass *ac = ACCEL_CLASS(module_object_class_by_name(class_name));
+ g_free(class_name);
+ return ac;
+}
+
+/* Return the name of the current accelerator */
+const char *current_accel_name(void)
+{
+ AccelClass *ac = ACCEL_GET_CLASS(current_accel());
+
+ return ac->name;
+}
+
+static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque)
+{
+ CPUClass *cc = CPU_CLASS(klass);
+ AccelCPUClass *accel_cpu = opaque;
+
+ /*
+ * The first callback allows accel-cpu to run initializations
+ * for the CPU, customizing CPU behavior according to the accelerator.
+ *
+ * The second one allows the CPU to customize the accel-cpu
+ * behavior according to the CPU.
+ *
+ * The second is currently only used by TCG, to specialize the
+ * TCGCPUOps depending on the CPU type.
+ */
+ cc->accel_cpu = accel_cpu;
+ if (accel_cpu->cpu_class_init) {
+ accel_cpu->cpu_class_init(cc);
+ }
+ if (cc->init_accel_cpu) {
+ cc->init_accel_cpu(accel_cpu, cc);
+ }
+}
+
+/* initialize the arch-specific accel CpuClass interfaces */
+static void accel_init_cpu_interfaces(AccelClass *ac)
+{
+ const char *ac_name; /* AccelClass name */
+ char *acc_name; /* AccelCPUClass name */
+ ObjectClass *acc; /* AccelCPUClass */
+ const char *cpu_resolving_type = target_cpu_type();
+
+ ac_name = object_class_get_name(OBJECT_CLASS(ac));
+ g_assert(ac_name != NULL);
+
+ acc_name = g_strdup_printf("%s-%s", ac_name, cpu_resolving_type);
+ acc = object_class_by_name(acc_name);
+ g_free(acc_name);
+
+ if (acc) {
+ object_class_foreach(accel_init_cpu_int_aux,
+ cpu_resolving_type, false, acc);
+ }
+}
+
+void accel_init_interfaces(AccelClass *ac)
+{
+ accel_init_ops_interfaces(ac);
+ accel_init_cpu_interfaces(ac);
+}
+
+void accel_cpu_instance_init(CPUState *cpu)
+{
+ if (cpu->cc->accel_cpu && cpu->cc->accel_cpu->cpu_instance_init) {
+ cpu->cc->accel_cpu->cpu_instance_init(cpu);
+ }
+}
+
+bool accel_cpu_common_realize(CPUState *cpu, Error **errp)
+{
+ AccelState *accel = current_accel();
+ AccelClass *acc = ACCEL_GET_CLASS(accel);
+
+ /* target specific realization */
+ if (cpu->cc->accel_cpu
+ && cpu->cc->accel_cpu->cpu_target_realize
+ && !cpu->cc->accel_cpu->cpu_target_realize(cpu, errp)) {
+ return false;
+ }
+
+ /* generic realization */
+ if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) {
+ return false;
+ }
+
+ return true;
+}
+
+void accel_cpu_common_unrealize(CPUState *cpu)
+{
+ AccelState *accel = current_accel();
+ AccelClass *acc = ACCEL_GET_CLASS(accel);
+
+ /* generic unrealization */
+ if (acc->cpu_common_unrealize) {
+ acc->cpu_common_unrealize(cpu);
+ }
+}
+
+int accel_supported_gdbstub_sstep_flags(void)
+{
+ AccelState *accel = current_accel();
+ AccelClass *acc = ACCEL_GET_CLASS(accel);
+ if (acc->gdbstub_supported_sstep_flags) {
+ return acc->gdbstub_supported_sstep_flags();
+ }
+ return 0;
+}
+
+static const TypeInfo accel_types[] = {
+ {
+ .name = TYPE_ACCEL,
+ .parent = TYPE_OBJECT,
+ .class_size = sizeof(AccelClass),
+ .instance_size = sizeof(AccelState),
+ .abstract = true,
+ },
+};
+
+DEFINE_TYPES(accel_types)
diff --git a/accel/accel-internal.h b/accel/accel-internal.h
new file mode 100644
index 0000000..d3a4422
--- /dev/null
+++ b/accel/accel-internal.h
@@ -0,0 +1,17 @@
+/*
+ * QEMU accel internal functions
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef ACCEL_INTERNAL_H
+#define ACCEL_INTERNAL_H
+
+#include "qemu/accel.h"
+
+void accel_init_ops_interfaces(AccelClass *ac);
+
+#endif /* ACCEL_SYSTEM_H */
diff --git a/accel/accel-system.c b/accel/accel-system.c
index f6c947d..a0f562a 100644
--- a/accel/accel-system.c
+++ b/accel/accel-system.c
@@ -26,9 +26,10 @@
#include "qemu/osdep.h"
#include "qemu/accel.h"
#include "hw/boards.h"
-#include "sysemu/cpus.h"
+#include "system/accel-ops.h"
+#include "system/cpus.h"
#include "qemu/error-report.h"
-#include "accel-system.h"
+#include "accel-internal.h"
int accel_init_machine(AccelState *accel, MachineState *ms)
{
@@ -62,7 +63,7 @@ void accel_setup_post(MachineState *ms)
}
/* initialize the arch-independent accel operation interfaces */
-void accel_system_init_ops_interfaces(AccelClass *ac)
+void accel_init_ops_interfaces(AccelClass *ac)
{
const char *ac_name;
char *ops_name;
@@ -73,19 +74,17 @@ void accel_system_init_ops_interfaces(AccelClass *ac)
g_assert(ac_name != NULL);
ops_name = g_strdup_printf("%s" ACCEL_OPS_SUFFIX, ac_name);
- ops = ACCEL_OPS_CLASS(module_object_class_by_name(ops_name));
oc = module_object_class_by_name(ops_name);
if (!oc) {
error_report("fatal: could not load module for type '%s'", ops_name);
exit(1);
}
g_free(ops_name);
- ops = ACCEL_OPS_CLASS(oc);
/*
* all accelerators need to define ops, providing at least a mandatory
* non-NULL create_vcpu_thread operation.
*/
- g_assert(ops != NULL);
+ ops = ACCEL_OPS_CLASS(oc);
if (ops->ops_init) {
ops->ops_init(ops);
}
diff --git a/accel/accel-system.h b/accel/accel-system.h
deleted file mode 100644
index 2d37c73..0000000
--- a/accel/accel-system.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * QEMU System Emulation accel internal functions
- *
- * Copyright 2021 SUSE LLC
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef ACCEL_SYSTEM_H
-#define ACCEL_SYSTEM_H
-
-void accel_system_init_ops_interfaces(AccelClass *ac);
-
-#endif /* ACCEL_SYSTEM_H */
diff --git a/accel/accel-target.c b/accel/accel-target.c
index 08626c0..7fd392f 100644
--- a/accel/accel-target.c
+++ b/accel/accel-target.c
@@ -24,141 +24,7 @@
*/
#include "qemu/osdep.h"
-#include "qemu/accel.h"
-
-#include "cpu.h"
-#include "hw/core/accel-cpu.h"
-
-#ifndef CONFIG_USER_ONLY
-#include "accel-system.h"
-#endif /* !CONFIG_USER_ONLY */
-
-static const TypeInfo accel_type = {
- .name = TYPE_ACCEL,
- .parent = TYPE_OBJECT,
- .class_size = sizeof(AccelClass),
- .instance_size = sizeof(AccelState),
-};
-
-/* Lookup AccelClass from opt_name. Returns NULL if not found */
-AccelClass *accel_find(const char *opt_name)
-{
- char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name);
- AccelClass *ac = ACCEL_CLASS(module_object_class_by_name(class_name));
- g_free(class_name);
- return ac;
-}
-
-/* Return the name of the current accelerator */
-const char *current_accel_name(void)
-{
- AccelClass *ac = ACCEL_GET_CLASS(current_accel());
-
- return ac->name;
-}
-
-static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque)
-{
- CPUClass *cc = CPU_CLASS(klass);
- AccelCPUClass *accel_cpu = opaque;
-
- /*
- * The first callback allows accel-cpu to run initializations
- * for the CPU, customizing CPU behavior according to the accelerator.
- *
- * The second one allows the CPU to customize the accel-cpu
- * behavior according to the CPU.
- *
- * The second is currently only used by TCG, to specialize the
- * TCGCPUOps depending on the CPU type.
- */
- cc->accel_cpu = accel_cpu;
- if (accel_cpu->cpu_class_init) {
- accel_cpu->cpu_class_init(cc);
- }
- if (cc->init_accel_cpu) {
- cc->init_accel_cpu(accel_cpu, cc);
- }
-}
-
-/* initialize the arch-specific accel CpuClass interfaces */
-static void accel_init_cpu_interfaces(AccelClass *ac)
-{
- const char *ac_name; /* AccelClass name */
- char *acc_name; /* AccelCPUClass name */
- ObjectClass *acc; /* AccelCPUClass */
-
- ac_name = object_class_get_name(OBJECT_CLASS(ac));
- g_assert(ac_name != NULL);
-
- acc_name = g_strdup_printf("%s-%s", ac_name, CPU_RESOLVING_TYPE);
- acc = object_class_by_name(acc_name);
- g_free(acc_name);
-
- if (acc) {
- object_class_foreach(accel_init_cpu_int_aux,
- CPU_RESOLVING_TYPE, false, acc);
- }
-}
-
-void accel_init_interfaces(AccelClass *ac)
-{
-#ifndef CONFIG_USER_ONLY
- accel_system_init_ops_interfaces(ac);
-#endif /* !CONFIG_USER_ONLY */
-
- accel_init_cpu_interfaces(ac);
-}
-
-void accel_cpu_instance_init(CPUState *cpu)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (cc->accel_cpu && cc->accel_cpu->cpu_instance_init) {
- cc->accel_cpu->cpu_instance_init(cpu);
- }
-}
-
-bool accel_cpu_common_realize(CPUState *cpu, Error **errp)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
- AccelState *accel = current_accel();
- AccelClass *acc = ACCEL_GET_CLASS(accel);
-
- /* target specific realization */
- if (cc->accel_cpu && cc->accel_cpu->cpu_target_realize
- && !cc->accel_cpu->cpu_target_realize(cpu, errp)) {
- return false;
- }
-
- /* generic realization */
- if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) {
- return false;
- }
-
- return true;
-}
-
-void accel_cpu_common_unrealize(CPUState *cpu)
-{
- AccelState *accel = current_accel();
- AccelClass *acc = ACCEL_GET_CLASS(accel);
-
- /* generic unrealization */
- if (acc->cpu_common_unrealize) {
- acc->cpu_common_unrealize(cpu);
- }
-}
-
-int accel_supported_gdbstub_sstep_flags(void)
-{
- AccelState *accel = current_accel();
- AccelClass *acc = ACCEL_GET_CLASS(accel);
- if (acc->gdbstub_supported_sstep_flags) {
- return acc->gdbstub_supported_sstep_flags();
- }
- return 0;
-}
+#include "accel/accel-cpu-target.h"
static const TypeInfo accel_cpu_type = {
.name = TYPE_ACCEL_CPU,
@@ -169,7 +35,6 @@ static const TypeInfo accel_cpu_type = {
static void register_accel_types(void)
{
- type_register_static(&accel_type);
type_register_static(&accel_cpu_type);
}
diff --git a/accel/accel-user.c b/accel/accel-user.c
index 22b6a1a..7d19230 100644
--- a/accel/accel-user.c
+++ b/accel/accel-user.c
@@ -9,6 +9,12 @@
#include "qemu/osdep.h"
#include "qemu/accel.h"
+#include "accel-internal.h"
+
+void accel_init_ops_interfaces(AccelClass *ac)
+{
+ /* nothing */
+}
AccelState *current_accel(void)
{
diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c
index f32d8c8..8672761 100644
--- a/accel/dummy-cpus.c
+++ b/accel/dummy-cpus.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "qemu/rcu.h"
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
#include "qemu/guest-random.h"
#include "qemu/main-loop.h"
#include "hw/core/cpu.h"
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
index ac08cfb..d60446b 100644
--- a/accel/hvf/hvf-accel-ops.c
+++ b/accel/hvf/hvf-accel-ops.c
@@ -50,21 +50,18 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
-#include "exec/address-spaces.h"
-#include "exec/exec-all.h"
+#include "system/address-spaces.h"
#include "gdbstub/enums.h"
-#include "sysemu/cpus.h"
-#include "sysemu/hvf.h"
-#include "sysemu/hvf_int.h"
-#include "sysemu/runstate.h"
+#include "hw/boards.h"
+#include "system/accel-ops.h"
+#include "system/cpus.h"
+#include "system/hvf.h"
+#include "system/hvf_int.h"
+#include "system/runstate.h"
#include "qemu/guest-random.h"
HVFState *hvf_state;
-#ifdef __aarch64__
-#define HV_VM_DEFAULT NULL
-#endif
-
/* Memory slots */
hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
@@ -323,8 +320,17 @@ static int hvf_accel_init(MachineState *ms)
int x;
hv_return_t ret;
HVFState *s;
+ int pa_range = 36;
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+
+ if (mc->hvf_get_physical_address_range) {
+ pa_range = mc->hvf_get_physical_address_range(ms);
+ if (pa_range < 0) {
+ return -EINVAL;
+ }
+ }
- ret = hv_vm_create(HV_VM_DEFAULT);
+ ret = hvf_arch_vm_create(ms, (uint32_t)pa_range);
assert_hvf_ok(ret);
s = g_new0(HVFState, 1);
@@ -348,7 +354,7 @@ static inline int hvf_gdbstub_sstep_flags(void)
return SSTEP_ENABLE | SSTEP_NOIRQ;
}
-static void hvf_accel_class_init(ObjectClass *oc, void *data)
+static void hvf_accel_class_init(ObjectClass *oc, const void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "HVF";
@@ -360,6 +366,7 @@ static void hvf_accel_class_init(ObjectClass *oc, void *data)
static const TypeInfo hvf_accel_type = {
.name = TYPE_HVF_ACCEL,
.parent = TYPE_ACCEL,
+ .instance_size = sizeof(HVFState),
.class_init = hvf_accel_class_init,
};
@@ -571,7 +578,7 @@ static void hvf_remove_all_breakpoints(CPUState *cpu)
}
}
-static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
+static void hvf_accel_ops_class_init(ObjectClass *oc, const void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
diff --git a/accel/hvf/hvf-all.c b/accel/hvf/hvf-all.c
index 6ca0850..8c387fd 100644
--- a/accel/hvf/hvf-all.c
+++ b/accel/hvf/hvf-all.c
@@ -10,8 +10,9 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
-#include "sysemu/hvf.h"
-#include "sysemu/hvf_int.h"
+#include "system/hvf.h"
+#include "system/hvf_int.h"
+#include "hw/core/cpu.h"
const char *hvf_return_string(hv_return_t ret)
{
@@ -58,8 +59,13 @@ int hvf_sw_breakpoints_active(CPUState *cpu)
return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints);
}
-int hvf_update_guest_debug(CPUState *cpu)
+static void do_hvf_update_guest_debug(CPUState *cpu, run_on_cpu_data arg)
{
hvf_arch_update_guest_debug(cpu);
+}
+
+int hvf_update_guest_debug(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_hvf_update_guest_debug, RUN_ON_CPU_NULL);
return 0;
}
diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c
index c239dfc..e5c1544 100644
--- a/accel/kvm/kvm-accel-ops.c
+++ b/accel/kvm/kvm-accel-ops.c
@@ -16,10 +16,11 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
-#include "sysemu/kvm.h"
-#include "sysemu/kvm_int.h"
-#include "sysemu/runstate.h"
-#include "sysemu/cpus.h"
+#include "system/accel-ops.h"
+#include "system/kvm.h"
+#include "system/kvm_int.h"
+#include "system/runstate.h"
+#include "system/cpus.h"
#include "qemu/guest-random.h"
#include "qapi/error.h"
@@ -89,7 +90,7 @@ static int kvm_update_guest_debug_ops(CPUState *cpu)
}
#endif
-static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
+static void kvm_accel_ops_class_init(ObjectClass *oc, const void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 64bf47a..d095d1b 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -28,13 +28,14 @@
#include "hw/pci/msix.h"
#include "hw/s390x/adapter.h"
#include "gdbstub/enums.h"
-#include "sysemu/kvm_int.h"
-#include "sysemu/runstate.h"
-#include "sysemu/cpus.h"
-#include "sysemu/accel-blocker.h"
+#include "system/kvm_int.h"
+#include "system/runstate.h"
+#include "system/cpus.h"
+#include "system/accel-blocker.h"
#include "qemu/bswap.h"
-#include "exec/memory.h"
-#include "exec/ram_addr.h"
+#include "exec/tswap.h"
+#include "system/memory.h"
+#include "system/ram_addr.h"
#include "qemu/event_notifier.h"
#include "qemu/main-loop.h"
#include "trace.h"
@@ -42,21 +43,26 @@
#include "qapi/visitor.h"
#include "qapi/qapi-types-common.h"
#include "qapi/qapi-visit-common.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "qemu/guest-random.h"
-#include "sysemu/hw_accel.h"
+#include "system/hw_accel.h"
#include "kvm-cpus.h"
-#include "sysemu/dirtylimit.h"
+#include "system/dirtylimit.h"
#include "qemu/range.h"
#include "hw/boards.h"
-#include "sysemu/stats.h"
+#include "system/stats.h"
/* This check must be after config-host.h is included */
#ifdef CONFIG_EVENTFD
#include <sys/eventfd.h>
#endif
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+# define KVM_HAVE_MCE_INJECTION 1
+#endif
+
+
/* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
* need to use the real host PAGE_SIZE, as that's what KVM will use.
*/
@@ -69,6 +75,11 @@
#define KVM_GUESTDBG_BLOCKIRQ 0
#endif
+/* Default num of memslots to be allocated when VM starts */
+#define KVM_MEMSLOTS_NR_ALLOC_DEFAULT 16
+/* Default max allowed memslots if kernel reported nothing */
+#define KVM_MEMSLOTS_NR_MAX_DEFAULT 32
+
struct KVMParkedVcpu {
unsigned long vcpu_id;
int kvm_fd;
@@ -88,6 +99,7 @@ bool kvm_allowed;
bool kvm_readonly_mem_allowed;
bool kvm_vm_attributes_allowed;
bool kvm_msi_use_devid;
+bool kvm_pre_fault_memory_supported;
static bool kvm_has_guest_debug;
static int kvm_sstep_flags;
static bool kvm_immediate_exit;
@@ -165,11 +177,62 @@ void kvm_resample_fd_notify(int gsi)
}
}
+/**
+ * kvm_slots_grow(): Grow the slots[] array in the KVMMemoryListener
+ *
+ * @kml: The KVMMemoryListener* to grow the slots[] array
+ * @nr_slots_new: The new size of slots[] array
+ *
+ * Returns: True if the array grows larger, false otherwise.
+ */
+static bool kvm_slots_grow(KVMMemoryListener *kml, unsigned int nr_slots_new)
+{
+ unsigned int i, cur = kml->nr_slots_allocated;
+ KVMSlot *slots;
+
+ if (nr_slots_new > kvm_state->nr_slots_max) {
+ nr_slots_new = kvm_state->nr_slots_max;
+ }
+
+ if (cur >= nr_slots_new) {
+ /* Big enough, no need to grow, or we reached max */
+ return false;
+ }
+
+ if (cur == 0) {
+ slots = g_new0(KVMSlot, nr_slots_new);
+ } else {
+ assert(kml->slots);
+ slots = g_renew(KVMSlot, kml->slots, nr_slots_new);
+ /*
+ * g_renew() doesn't initialize extended buffers, however kvm
+ * memslots require fields to be zero-initialized. E.g. pointers,
+ * memory_size field, etc.
+ */
+ memset(&slots[cur], 0x0, sizeof(slots[0]) * (nr_slots_new - cur));
+ }
+
+ for (i = cur; i < nr_slots_new; i++) {
+ slots[i].slot = i;
+ }
+
+ kml->slots = slots;
+ kml->nr_slots_allocated = nr_slots_new;
+ trace_kvm_slots_grow(cur, nr_slots_new);
+
+ return true;
+}
+
+static bool kvm_slots_double(KVMMemoryListener *kml)
+{
+ return kvm_slots_grow(kml, kml->nr_slots_allocated * 2);
+}
+
unsigned int kvm_get_max_memslots(void)
{
KVMState *s = KVM_STATE(current_accel());
- return s->nr_slots;
+ return s->nr_slots_max;
}
unsigned int kvm_get_free_memslots(void)
@@ -183,25 +246,36 @@ unsigned int kvm_get_free_memslots(void)
if (!s->as[i].ml) {
continue;
}
- used_slots = MAX(used_slots, s->as[i].ml->nr_used_slots);
+ used_slots = MAX(used_slots, s->as[i].ml->nr_slots_used);
}
kvm_slots_unlock();
- return s->nr_slots - used_slots;
+ return s->nr_slots_max - used_slots;
}
/* Called with KVMMemoryListener.slots_lock held */
static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
{
- KVMState *s = kvm_state;
+ unsigned int n;
int i;
- for (i = 0; i < s->nr_slots; i++) {
+ for (i = 0; i < kml->nr_slots_allocated; i++) {
if (kml->slots[i].memory_size == 0) {
return &kml->slots[i];
}
}
+ /*
+ * If no free slots, try to grow first by doubling. Cache the old size
+ * here to avoid another round of search: if the grow succeeded, it
+ * means slots[] now must have the existing "n" slots occupied,
+ * followed by one or more free slots starting from slots[n].
+ */
+ n = kml->nr_slots_allocated;
+ if (kvm_slots_double(kml)) {
+ return &kml->slots[n];
+ }
+
return NULL;
}
@@ -222,10 +296,9 @@ static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
hwaddr start_addr,
hwaddr size)
{
- KVMState *s = kvm_state;
int i;
- for (i = 0; i < s->nr_slots; i++) {
+ for (i = 0; i < kml->nr_slots_allocated; i++) {
KVMSlot *mem = &kml->slots[i];
if (start_addr == mem->start_addr && size == mem->memory_size) {
@@ -267,7 +340,7 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
int i, ret = 0;
kvm_slots_lock();
- for (i = 0; i < s->nr_slots; i++) {
+ for (i = 0; i < kml->nr_slots_allocated; i++) {
KVMSlot *mem = &kml->slots[i];
if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
@@ -340,14 +413,95 @@ err:
return ret;
}
+void kvm_park_vcpu(CPUState *cpu)
+{
+ struct KVMParkedVcpu *vcpu;
+
+ trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
+
+ vcpu = g_malloc0(sizeof(*vcpu));
+ vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
+ vcpu->kvm_fd = cpu->kvm_fd;
+ QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
+}
+
+int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id)
+{
+ struct KVMParkedVcpu *cpu;
+ int kvm_fd = -ENOENT;
+
+ QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
+ if (cpu->vcpu_id == vcpu_id) {
+ QLIST_REMOVE(cpu, node);
+ kvm_fd = cpu->kvm_fd;
+ g_free(cpu);
+ break;
+ }
+ }
+
+ trace_kvm_unpark_vcpu(vcpu_id, kvm_fd > 0 ? "unparked" : "!found parked");
+
+ return kvm_fd;
+}
+
+static void kvm_reset_parked_vcpus(KVMState *s)
+{
+ struct KVMParkedVcpu *cpu;
+
+ QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
+ kvm_arch_reset_parked_vcpu(cpu->vcpu_id, cpu->kvm_fd);
+ }
+}
+
+int kvm_create_vcpu(CPUState *cpu)
+{
+ unsigned long vcpu_id = kvm_arch_vcpu_id(cpu);
+ KVMState *s = kvm_state;
+ int kvm_fd;
+
+ /* check if the KVM vCPU already exist but is parked */
+ kvm_fd = kvm_unpark_vcpu(s, vcpu_id);
+ if (kvm_fd < 0) {
+ /* vCPU not parked: create a new KVM vCPU */
+ kvm_fd = kvm_vm_ioctl(s, KVM_CREATE_VCPU, vcpu_id);
+ if (kvm_fd < 0) {
+ error_report("KVM_CREATE_VCPU IOCTL failed for vCPU %lu", vcpu_id);
+ return kvm_fd;
+ }
+ }
+
+ cpu->kvm_fd = kvm_fd;
+ cpu->kvm_state = s;
+ if (!s->guest_state_protected) {
+ cpu->vcpu_dirty = true;
+ }
+ cpu->dirty_pages = 0;
+ cpu->throttle_us_per_full = 0;
+
+ trace_kvm_create_vcpu(cpu->cpu_index, vcpu_id, kvm_fd);
+
+ return 0;
+}
+
+int kvm_create_and_park_vcpu(CPUState *cpu)
+{
+ int ret = 0;
+
+ ret = kvm_create_vcpu(cpu);
+ if (!ret) {
+ kvm_park_vcpu(cpu);
+ }
+
+ return ret;
+}
+
static int do_kvm_destroy_vcpu(CPUState *cpu)
{
KVMState *s = kvm_state;
- long mmap_size;
- struct KVMParkedVcpu *vcpu = NULL;
+ int mmap_size;
int ret = 0;
- trace_kvm_destroy_vcpu();
+ trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
ret = kvm_arch_destroy_vcpu(cpu);
if (ret < 0) {
@@ -373,10 +527,7 @@ static int do_kvm_destroy_vcpu(CPUState *cpu)
}
}
- vcpu = g_malloc0(sizeof(*vcpu));
- vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
- vcpu->kvm_fd = cpu->kvm_fd;
- QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
+ kvm_park_vcpu(cpu);
err:
return ret;
}
@@ -389,44 +540,26 @@ void kvm_destroy_vcpu(CPUState *cpu)
}
}
-static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
-{
- struct KVMParkedVcpu *cpu;
-
- QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
- if (cpu->vcpu_id == vcpu_id) {
- int kvm_fd;
-
- QLIST_REMOVE(cpu, node);
- kvm_fd = cpu->kvm_fd;
- g_free(cpu);
- return kvm_fd;
- }
- }
-
- return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
-}
-
int kvm_init_vcpu(CPUState *cpu, Error **errp)
{
KVMState *s = kvm_state;
- long mmap_size;
+ int mmap_size;
int ret;
trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
- ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
+ ret = kvm_arch_pre_create_vcpu(cpu, errp);
if (ret < 0) {
- error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
- kvm_arch_vcpu_id(cpu));
goto err;
}
- cpu->kvm_fd = ret;
- cpu->kvm_state = s;
- cpu->vcpu_dirty = true;
- cpu->dirty_pages = 0;
- cpu->throttle_us_per_full = 0;
+ ret = kvm_create_vcpu(cpu);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret,
+ "kvm_init_vcpu: kvm_create_vcpu failed (%lu)",
+ kvm_arch_vcpu_id(cpu));
+ goto err;
+ }
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0) {
@@ -1027,7 +1160,7 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
kvm_slots_lock();
- for (i = 0; i < s->nr_slots; i++) {
+ for (i = 0; i < kml->nr_slots_allocated; i++) {
mem = &kml->slots[i];
/* Discard slots that are empty or do not overlap the section */
if (!mem->memory_size ||
@@ -1168,7 +1301,7 @@ static void kvm_unpoison_all(void *param)
QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
QLIST_REMOVE(page, list);
- qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
+ qemu_ram_remap(page->ram_addr);
g_free(page);
}
}
@@ -1194,21 +1327,22 @@ bool kvm_hwpoisoned_mem(void)
static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
{
-#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
- /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN
- * endianness, but the memory core hands them in target endianness.
- * For example, PPC is always treated as big-endian even if running
- * on KVM and on PPC64LE. Correct here.
- */
- switch (size) {
- case 2:
- val = bswap16(val);
- break;
- case 4:
- val = bswap32(val);
- break;
+ if (target_needs_bswap()) {
+ /*
+ * The kernel expects ioeventfd values in HOST_BIG_ENDIAN
+ * endianness, but the memory core hands them in target endianness.
+ * For example, PPC is always treated as big-endian even if running
+ * on KVM and on PPC64LE. Correct here, swapping back.
+ */
+ switch (size) {
+ case 2:
+ val = bswap16(val);
+ break;
+ case 4:
+ val = bswap32(val);
+ break;
+ }
}
-#endif
return val;
}
@@ -1406,7 +1540,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
}
start_addr += slot_size;
size -= slot_size;
- kml->nr_used_slots--;
+ kml->nr_slots_used--;
} while (size);
return;
}
@@ -1445,7 +1579,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
ram_start_offset += slot_size;
ram += slot_size;
size -= slot_size;
- kml->nr_used_slots++;
+ kml->nr_slots_used++;
} while (size);
}
@@ -1481,11 +1615,7 @@ static void *kvm_dirty_ring_reaper_thread(void *data)
r->reaper_iteration++;
}
- trace_kvm_dirty_ring_reaper("exit");
-
- rcu_unregister_thread();
-
- return NULL;
+ g_assert_not_reached();
}
static void kvm_dirty_ring_reaper_init(KVMState *s)
@@ -1675,12 +1805,8 @@ static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
/* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
kvm_dirty_ring_flush();
- /*
- * TODO: make this faster when nr_slots is big while there are
- * only a few used slots (small VMs).
- */
kvm_slots_lock();
- for (i = 0; i < s->nr_slots; i++) {
+ for (i = 0; i < kml->nr_slots_allocated; i++) {
mem = &kml->slots[i];
if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
kvm_slot_sync_dirty_pages(mem);
@@ -1795,12 +1921,9 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
{
int i;
- kml->slots = g_new0(KVMSlot, s->nr_slots);
kml->as_id = as_id;
- for (i = 0; i < s->nr_slots; i++) {
- kml->slots[i].slot = i;
- }
+ kvm_slots_grow(kml, KVM_MEMSLOTS_NR_ALLOC_DEFAULT);
QSIMPLEQ_INIT(&kml->transaction_add);
QSIMPLEQ_INIT(&kml->transaction_del);
@@ -2311,7 +2434,7 @@ static int kvm_recommended_vcpus(KVMState *s)
static int kvm_max_vcpus(KVMState *s)
{
- int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
+ int ret = kvm_vm_check_extension(s, KVM_CAP_MAX_VCPUS);
return (ret) ? ret : kvm_recommended_vcpus(s);
}
@@ -2341,6 +2464,109 @@ uint32_t kvm_dirty_ring_size(void)
return kvm_state->kvm_dirty_ring_size;
}
+static int do_kvm_create_vm(MachineState *ms, int type)
+{
+ KVMState *s;
+ int ret;
+
+ s = KVM_STATE(ms->accelerator);
+
+ do {
+ ret = kvm_ioctl(s, KVM_CREATE_VM, type);
+ } while (ret == -EINTR);
+
+ if (ret < 0) {
+ error_report("ioctl(KVM_CREATE_VM) failed: %s", strerror(-ret));
+
+#ifdef TARGET_S390X
+ if (ret == -EINVAL) {
+ error_printf("Host kernel setup problem detected."
+ " Please verify:\n");
+ error_printf("- for kernels supporting the"
+ " switch_amode or user_mode parameters, whether");
+ error_printf(" user space is running in primary address space\n");
+ error_printf("- for kernels supporting the vm.allocate_pgste"
+ " sysctl, whether it is enabled\n");
+ }
+#elif defined(TARGET_PPC)
+ if (ret == -EINVAL) {
+ error_printf("PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
+ (type == 2) ? "pr" : "hv");
+ }
+#endif
+ }
+
+ return ret;
+}
+
+static int find_kvm_machine_type(MachineState *ms)
+{
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ int type;
+
+ if (object_property_find(OBJECT(current_machine), "kvm-type")) {
+ g_autofree char *kvm_type;
+ kvm_type = object_property_get_str(OBJECT(current_machine),
+ "kvm-type",
+ &error_abort);
+ type = mc->kvm_type(ms, kvm_type);
+ } else if (mc->kvm_type) {
+ type = mc->kvm_type(ms, NULL);
+ } else {
+ type = kvm_arch_get_default_type(ms);
+ }
+ return type;
+}
+
+static int kvm_setup_dirty_ring(KVMState *s)
+{
+ uint64_t dirty_log_manual_caps;
+ int ret;
+
+ /*
+ * Enable KVM dirty ring if supported, otherwise fall back to
+ * dirty logging mode
+ */
+ ret = kvm_dirty_ring_init(s);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /*
+ * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
+ * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
+ * page is wr-protected initially, which is against how kvm dirty ring is
+ * usage - kvm dirty ring requires all pages are wr-protected at the very
+ * beginning. Enabling this feature for dirty ring causes data corruption.
+ *
+ * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
+ * we may expect a higher stall time when starting the migration. In the
+ * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
+ * instead of clearing dirty bit, it can be a way to explicitly wr-protect
+ * guest pages.
+ */
+ if (!s->kvm_dirty_ring_size) {
+ dirty_log_manual_caps =
+ kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
+ dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
+ KVM_DIRTY_LOG_INITIALLY_SET);
+ s->manual_dirty_log_protect = dirty_log_manual_caps;
+ if (dirty_log_manual_caps) {
+ ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
+ dirty_log_manual_caps);
+ if (ret) {
+ warn_report("Trying to enable capability %"PRIu64" of "
+ "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
+ "Falling back to the legacy mode. ",
+ dirty_log_manual_caps);
+ s->manual_dirty_log_protect = 0;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int kvm_init(MachineState *ms)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
@@ -2360,7 +2586,6 @@ static int kvm_init(MachineState *ms)
const KVMCapabilityInfo *missing_cap;
int ret;
int type;
- uint64_t dirty_log_manual_caps;
qemu_mutex_init(&kml_slots_lock);
@@ -2383,7 +2608,7 @@ static int kvm_init(MachineState *ms)
QLIST_INIT(&s->kvm_parked_vcpus);
s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR);
if (s->fd == -1) {
- fprintf(stderr, "Could not access KVM kernel module: %m\n");
+ error_report("Could not access KVM kernel module: %m");
ret = -errno;
goto err;
}
@@ -2393,84 +2618,43 @@ static int kvm_init(MachineState *ms)
if (ret >= 0) {
ret = -EINVAL;
}
- fprintf(stderr, "kvm version too old\n");
+ error_report("kvm version too old");
goto err;
}
if (ret > KVM_API_VERSION) {
ret = -EINVAL;
- fprintf(stderr, "kvm version not supported\n");
+ error_report("kvm version not supported");
goto err;
}
- kvm_supported_memory_attributes = kvm_check_extension(s, KVM_CAP_MEMORY_ATTRIBUTES);
- kvm_guest_memfd_supported =
- kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) &&
- kvm_check_extension(s, KVM_CAP_USER_MEMORY2) &&
- (kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE);
-
kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
- s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
+ s->nr_slots_max = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
/* If unspecified, use the default value */
- if (!s->nr_slots) {
- s->nr_slots = 32;
- }
-
- s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
- if (s->nr_as <= 1) {
- s->nr_as = 1;
- }
- s->as = g_new0(struct KVMAs, s->nr_as);
-
- if (object_property_find(OBJECT(current_machine), "kvm-type")) {
- g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine),
- "kvm-type",
- &error_abort);
- type = mc->kvm_type(ms, kvm_type);
- } else if (mc->kvm_type) {
- type = mc->kvm_type(ms, NULL);
- } else {
- type = kvm_arch_get_default_type(ms);
+ if (!s->nr_slots_max) {
+ s->nr_slots_max = KVM_MEMSLOTS_NR_MAX_DEFAULT;
}
+ type = find_kvm_machine_type(ms);
if (type < 0) {
ret = -EINVAL;
goto err;
}
- do {
- ret = kvm_ioctl(s, KVM_CREATE_VM, type);
- } while (ret == -EINTR);
-
+ ret = do_kvm_create_vm(ms, type);
if (ret < 0) {
- fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
- strerror(-ret));
-
-#ifdef TARGET_S390X
- if (ret == -EINVAL) {
- fprintf(stderr,
- "Host kernel setup problem detected. Please verify:\n");
- fprintf(stderr, "- for kernels supporting the switch_amode or"
- " user_mode parameters, whether\n");
- fprintf(stderr,
- " user space is running in primary address space\n");
- fprintf(stderr,
- "- for kernels supporting the vm.allocate_pgste sysctl, "
- "whether it is enabled\n");
- }
-#elif defined(TARGET_PPC)
- if (ret == -EINVAL) {
- fprintf(stderr,
- "PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
- (type == 2) ? "pr" : "hv");
- }
-#endif
goto err;
}
s->vmfd = ret;
+ s->nr_as = kvm_vm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
+ if (s->nr_as <= 1) {
+ s->nr_as = 1;
+ }
+ s->as = g_new0(struct KVMAs, s->nr_as);
+
/* check the vcpu limits */
soft_vcpus_limit = kvm_recommended_vcpus(s);
hard_vcpus_limit = kvm_max_vcpus(s);
@@ -2482,9 +2666,9 @@ static int kvm_init(MachineState *ms)
nc->name, nc->num, soft_vcpus_limit);
if (nc->num > hard_vcpus_limit) {
- fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
- "the maximum cpus supported by KVM (%d)\n",
- nc->name, nc->num, hard_vcpus_limit);
+ error_report("Number of %s cpus requested (%d) exceeds "
+ "the maximum cpus supported by KVM (%d)",
+ nc->name, nc->num, hard_vcpus_limit);
exit(1);
}
}
@@ -2498,8 +2682,8 @@ static int kvm_init(MachineState *ms)
}
if (missing_cap) {
ret = -EINVAL;
- fprintf(stderr, "kvm does not support %s\n%s",
- missing_cap->name, upgrade_note);
+ error_report("kvm does not support %s", missing_cap->name);
+ error_printf("%s", upgrade_note);
goto err;
}
@@ -2507,47 +2691,11 @@ static int kvm_init(MachineState *ms)
s->coalesced_pio = s->coalesced_mmio &&
kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
- /*
- * Enable KVM dirty ring if supported, otherwise fall back to
- * dirty logging mode
- */
- ret = kvm_dirty_ring_init(s);
+ ret = kvm_setup_dirty_ring(s);
if (ret < 0) {
goto err;
}
- /*
- * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
- * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
- * page is wr-protected initially, which is against how kvm dirty ring is
- * usage - kvm dirty ring requires all pages are wr-protected at the very
- * beginning. Enabling this feature for dirty ring causes data corruption.
- *
- * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
- * we may expect a higher stall time when starting the migration. In the
- * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
- * instead of clearing dirty bit, it can be a way to explicitly wr-protect
- * guest pages.
- */
- if (!s->kvm_dirty_ring_size) {
- dirty_log_manual_caps =
- kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
- dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
- KVM_DIRTY_LOG_INITIALLY_SET);
- s->manual_dirty_log_protect = dirty_log_manual_caps;
- if (dirty_log_manual_caps) {
- ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
- dirty_log_manual_caps);
- if (ret) {
- warn_report("Trying to enable capability %"PRIu64" of "
- "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
- "Falling back to the legacy mode. ",
- dirty_log_manual_caps);
- s->manual_dirty_log_protect = 0;
- }
- }
- }
-
#ifdef KVM_CAP_VCPU_EVENTS
s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
#endif
@@ -2559,7 +2707,7 @@ static int kvm_init(MachineState *ms)
}
kvm_readonly_mem_allowed =
- (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
+ (kvm_vm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
kvm_resamplefds_allowed =
(kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
@@ -2593,6 +2741,13 @@ static int kvm_init(MachineState *ms)
goto err;
}
+ kvm_supported_memory_attributes = kvm_vm_check_extension(s, KVM_CAP_MEMORY_ATTRIBUTES);
+ kvm_guest_memfd_supported =
+ kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) &&
+ kvm_check_extension(s, KVM_CAP_USER_MEMORY2) &&
+ (kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE);
+ kvm_pre_fault_memory_supported = kvm_vm_check_extension(s, KVM_CAP_PRE_FAULT_MEMORY);
+
if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
}
@@ -2722,9 +2877,15 @@ void kvm_flush_coalesced_mmio_buffer(void)
static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{
if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
- int ret = kvm_arch_get_registers(cpu);
+ Error *err = NULL;
+ int ret = kvm_arch_get_registers(cpu, &err);
if (ret) {
- error_report("Failed to get registers: %s", strerror(-ret));
+ if (err) {
+ error_reportf_err(err, "Failed to synchronize CPU state: ");
+ } else {
+ error_report("Failed to get registers: %s", strerror(-ret));
+ }
+
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
vm_stop(RUN_STATE_INTERNAL_ERROR);
}
@@ -2742,9 +2903,15 @@ void kvm_cpu_synchronize_state(CPUState *cpu)
static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
{
- int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
+ Error *err = NULL;
+ int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE, &err);
if (ret) {
- error_report("Failed to put registers after reset: %s", strerror(-ret));
+ if (err) {
+ error_reportf_err(err, "Restoring resisters after reset: ");
+ } else {
+ error_report("Failed to put registers after reset: %s",
+ strerror(-ret));
+ }
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
vm_stop(RUN_STATE_INTERNAL_ERROR);
}
@@ -2755,13 +2922,23 @@ static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg
void kvm_cpu_synchronize_post_reset(CPUState *cpu)
{
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
+
+ if (cpu == first_cpu) {
+ kvm_reset_parked_vcpus(kvm_state);
+ }
}
static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
{
- int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
+ Error *err = NULL;
+ int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE, &err);
if (ret) {
- error_report("Failed to put registers after init: %s", strerror(-ret));
+ if (err) {
+ error_reportf_err(err, "Putting registers after init: ");
+ } else {
+ error_report("Failed to put registers after init: %s",
+ strerror(-ret));
+ }
exit(1);
}
@@ -2851,17 +3028,17 @@ int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private)
MemoryRegion *mr;
RAMBlock *rb;
void *addr;
- int ret = -1;
+ int ret = -EINVAL;
trace_kvm_convert_memory(start, size, to_private ? "shared_to_private" : "private_to_shared");
if (!QEMU_PTR_IS_ALIGNED(start, qemu_real_host_page_size()) ||
!QEMU_PTR_IS_ALIGNED(size, qemu_real_host_page_size())) {
- return -1;
+ return ret;
}
if (!size) {
- return -1;
+ return ret;
}
section = memory_region_find(get_system_memory(), start, size);
@@ -2879,7 +3056,7 @@ int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private)
if (!to_private) {
return 0;
}
- return -1;
+ return ret;
}
if (!memory_region_has_guest_memfd(mr)) {
@@ -2914,6 +3091,15 @@ int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private)
addr = memory_region_get_ram_ptr(mr) + section.offset_within_region;
rb = qemu_ram_block_from_host(addr, false, &offset);
+ ret = ram_block_attributes_state_change(RAM_BLOCK_ATTRIBUTES(mr->rdm),
+ offset, size, to_private);
+ if (ret) {
+ error_report("Failed to notify the listener the state change of "
+ "(0x%"HWADDR_PRIx" + 0x%"HWADDR_PRIx") to %s",
+ start, size, to_private ? "private" : "shared");
+ goto out_unref;
+ }
+
if (to_private) {
if (rb->page_size != qemu_real_host_page_size()) {
/*
@@ -2951,10 +3137,15 @@ int kvm_cpu_exec(CPUState *cpu)
MemTxAttrs attrs;
if (cpu->vcpu_dirty) {
- ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
+ Error *err = NULL;
+ ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE, &err);
if (ret) {
- error_report("Failed to put registers after init: %s",
- strerror(-ret));
+ if (err) {
+ error_reportf_err(err, "Putting registers after init: ");
+ } else {
+ error_report("Failed to put registers after init: %s",
+ strerror(-ret));
+ }
ret = -1;
break;
}
@@ -3126,7 +3317,7 @@ int kvm_cpu_exec(CPUState *cpu)
return ret;
}
-int kvm_ioctl(KVMState *s, int type, ...)
+int kvm_ioctl(KVMState *s, unsigned long type, ...)
{
int ret;
void *arg;
@@ -3144,7 +3335,7 @@ int kvm_ioctl(KVMState *s, int type, ...)
return ret;
}
-int kvm_vm_ioctl(KVMState *s, int type, ...)
+int kvm_vm_ioctl(KVMState *s, unsigned long type, ...)
{
int ret;
void *arg;
@@ -3164,7 +3355,7 @@ int kvm_vm_ioctl(KVMState *s, int type, ...)
return ret;
}
-int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
+int kvm_vcpu_ioctl(CPUState *cpu, unsigned long type, ...)
{
int ret;
void *arg;
@@ -3184,7 +3375,7 @@ int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
return ret;
}
-int kvm_device_ioctl(int fd, int type, ...)
+int kvm_device_ioctl(int fd, unsigned long type, ...)
{
int ret;
void *arg;
@@ -3745,6 +3936,21 @@ static void kvm_set_device(Object *obj,
s->device = g_strdup(value);
}
+static void kvm_set_kvm_rapl(Object *obj, bool value, Error **errp)
+{
+ KVMState *s = KVM_STATE(obj);
+ s->msr_energy.enable = value;
+}
+
+static void kvm_set_kvm_rapl_socket_path(Object *obj,
+ const char *str,
+ Error **errp)
+{
+ KVMState *s = KVM_STATE(obj);
+ g_free(s->msr_energy.socket_path);
+ s->msr_energy.socket_path = g_strdup(str);
+}
+
static void kvm_accel_instance_init(Object *obj)
{
KVMState *s = KVM_STATE(obj);
@@ -3764,6 +3970,7 @@ static void kvm_accel_instance_init(Object *obj)
s->xen_gnttab_max_frames = 64;
s->xen_evtchn_max_pirq = 256;
s->device = NULL;
+ s->msr_energy.enable = false;
}
/**
@@ -3777,7 +3984,7 @@ static int kvm_gdbstub_sstep_flags(void)
return kvm_sstep_flags;
}
-static void kvm_accel_class_init(ObjectClass *oc, void *data)
+static void kvm_accel_class_init(ObjectClass *oc, const void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "KVM";
@@ -3808,6 +4015,17 @@ static void kvm_accel_class_init(ObjectClass *oc, void *data)
object_class_property_set_description(oc, "device",
"Path to the device node to use (default: /dev/kvm)");
+ object_class_property_add_bool(oc, "rapl",
+ NULL,
+ kvm_set_kvm_rapl);
+ object_class_property_set_description(oc, "rapl",
+ "Allow energy related MSRs for RAPL interface in Guest");
+
+ object_class_property_add_str(oc, "rapl-helper-socket", NULL,
+ kvm_set_kvm_rapl_socket_path);
+ object_class_property_set_description(oc, "rapl-helper-socket",
+ "Socket Path for comminucating with the Virtual MSR helper daemon");
+
kvm_arch_accel_class_init(oc);
}
diff --git a/accel/kvm/kvm-cpus.h b/accel/kvm/kvm-cpus.h
index ca40add..6885111 100644
--- a/accel/kvm/kvm-cpus.h
+++ b/accel/kvm/kvm-cpus.h
@@ -10,8 +10,6 @@
#ifndef KVM_CPUS_H
#define KVM_CPUS_H
-#include "sysemu/cpus.h"
-
int kvm_init_vcpu(CPUState *cpu, Error **errp);
int kvm_cpu_exec(CPUState *cpu);
void kvm_destroy_vcpu(CPUState *cpu);
@@ -22,5 +20,4 @@ bool kvm_supports_guest_debug(void);
int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
void kvm_remove_all_breakpoints(CPUState *cpu);
-
#endif /* KVM_CPUS_H */
diff --git a/accel/kvm/trace-events b/accel/kvm/trace-events
index 681ccb6..e43d18a 100644
--- a/accel/kvm/trace-events
+++ b/accel/kvm/trace-events
@@ -1,14 +1,18 @@
# See docs/devel/tracing.rst for syntax documentation.
# kvm-all.c
-kvm_ioctl(int type, void *arg) "type 0x%x, arg %p"
-kvm_vm_ioctl(int type, void *arg) "type 0x%x, arg %p"
-kvm_vcpu_ioctl(int cpu_index, int type, void *arg) "cpu_index %d, type 0x%x, arg %p"
+kvm_ioctl(unsigned long type, void *arg) "type 0x%lx, arg %p"
+kvm_vm_ioctl(unsigned long type, void *arg) "type 0x%lx, arg %p"
+kvm_vcpu_ioctl(int cpu_index, unsigned long type, void *arg) "cpu_index %d, type 0x%lx, arg %p"
kvm_run_exit(int cpu_index, uint32_t reason) "cpu_index %d, reason %d"
-kvm_device_ioctl(int fd, int type, void *arg) "dev fd %d, type 0x%x, arg %p"
+kvm_device_ioctl(int fd, unsigned long type, void *arg) "dev fd %d, type 0x%lx, arg %p"
kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s"
kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s"
kvm_init_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
+kvm_create_vcpu(int cpu_index, unsigned long arch_cpu_id, int kvm_fd) "index: %d, id: %lu, kvm fd: %d"
+kvm_destroy_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
+kvm_park_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
+kvm_unpark_vcpu(unsigned long arch_cpu_id, const char *msg) "id: %lu %s"
kvm_irqchip_commit_routes(void) ""
kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s vector %d virq %d"
kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d"
@@ -25,7 +29,6 @@ kvm_dirty_ring_reaper(const char *s) "%s"
kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64" pages (took %"PRIi64" us)"
kvm_dirty_ring_reaper_kick(const char *reason) "%s"
kvm_dirty_ring_flush(int finished) "%d"
-kvm_destroy_vcpu(void) ""
kvm_failed_get_vcpu_mmap_size(void) ""
kvm_cpu_exec(void) ""
kvm_interrupt_exit_request(void) ""
@@ -33,3 +36,4 @@ kvm_io_window_exit(void) ""
kvm_run_exit_system_event(int cpu_index, uint32_t event_type) "cpu_index %d, system_even_type %"PRIu32
kvm_convert_memory(uint64_t start, uint64_t size, const char *msg) "start 0x%" PRIx64 " size 0x%" PRIx64 " %s"
kvm_memory_fault(uint64_t start, uint64_t size, uint64_t flags) "start 0x%" PRIx64 " size 0x%" PRIx64 " flags 0x%" PRIx64
+kvm_slots_grow(unsigned int old, unsigned int new) "%u -> %u"
diff --git a/accel/meson.build b/accel/meson.build
index 5eaeb68..5290931 100644
--- a/accel/meson.build
+++ b/accel/meson.build
@@ -1,3 +1,4 @@
+common_ss.add(files('accel-common.c'))
specific_ss.add(files('accel-target.c'))
system_ss.add(files('accel-system.c', 'accel-blocker.c'))
user_ss.add(files('accel-user.c'))
diff --git a/accel/qtest/qtest.c b/accel/qtest/qtest.c
index bf14032..92bed92 100644
--- a/accel/qtest/qtest.c
+++ b/accel/qtest/qtest.c
@@ -18,8 +18,9 @@
#include "qemu/option.h"
#include "qemu/config-file.h"
#include "qemu/accel.h"
-#include "sysemu/qtest.h"
-#include "sysemu/cpus.h"
+#include "system/accel-ops.h"
+#include "system/qtest.h"
+#include "system/cpus.h"
#include "qemu/guest-random.h"
#include "qemu/main-loop.h"
#include "hw/core/cpu.h"
@@ -41,7 +42,7 @@ static int qtest_init_accel(MachineState *ms)
return 0;
}
-static void qtest_accel_class_init(ObjectClass *oc, void *data)
+static void qtest_accel_class_init(ObjectClass *oc, const void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "QTest";
@@ -58,7 +59,7 @@ static const TypeInfo qtest_accel_type = {
};
module_obj(TYPE_QTEST_ACCEL);
-static void qtest_accel_ops_class_init(ObjectClass *oc, void *data)
+static void qtest_accel_ops_class_init(ObjectClass *oc, const void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
diff --git a/accel/stubs/hvf-stub.c b/accel/stubs/hvf-stub.c
new file mode 100644
index 0000000..42eadc5
--- /dev/null
+++ b/accel/stubs/hvf-stub.c
@@ -0,0 +1,12 @@
+/*
+ * HVF stubs for QEMU
+ *
+ * Copyright (c) Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "system/hvf.h"
+
+bool hvf_allowed;
diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c
index 8e0eb22..ecfd763 100644
--- a/accel/stubs/kvm-stub.c
+++ b/accel/stubs/kvm-stub.c
@@ -11,7 +11,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "hw/pci/msi.h"
KVMState *kvm_state;
diff --git a/accel/stubs/meson.build b/accel/stubs/meson.build
index 91a2d21..8ca1a45 100644
--- a/accel/stubs/meson.build
+++ b/accel/stubs/meson.build
@@ -2,5 +2,6 @@ system_stubs_ss = ss.source_set()
system_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
system_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
system_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
+system_stubs_ss.add(when: 'CONFIG_HVF', if_false: files('hvf-stub.c'))
specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: system_stubs_ss)
diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c
index dd890d6..3b76b8b 100644
--- a/accel/stubs/tcg-stub.c
+++ b/accel/stubs/tcg-stub.c
@@ -11,26 +11,7 @@
*/
#include "qemu/osdep.h"
-#include "exec/tb-flush.h"
-#include "exec/exec-all.h"
-
-void tb_flush(CPUState *cpu)
-{
-}
-
-int probe_access_flags(CPUArchState *env, vaddr addr, int size,
- MMUAccessType access_type, int mmu_idx,
- bool nonfault, void **phost, uintptr_t retaddr)
-{
- g_assert_not_reached();
-}
-
-void *probe_access(CPUArchState *env, vaddr addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- /* Handled by hardware accelerator. */
- g_assert_not_reached();
-}
+#include "exec/cpu-common.h"
G_NORETURN void cpu_loop_exit(CPUState *cpu)
{
diff --git a/accel/stubs/xen-stub.c b/accel/stubs/xen-stub.c
index 7054965..cf929b6 100644
--- a/accel/stubs/xen-stub.c
+++ b/accel/stubs/xen-stub.c
@@ -6,7 +6,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/xen.h"
+#include "system/xen.h"
#include "qapi/qapi-commands-migration.h"
bool xen_allowed;
diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc
index 95a5c5f..6056598 100644
--- a/accel/tcg/atomic_common.c.inc
+++ b/accel/tcg/atomic_common.c.inc
@@ -14,9 +14,20 @@
*/
static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
+ uint64_t read_value_low,
+ uint64_t read_value_high,
+ uint64_t write_value_low,
+ uint64_t write_value_high,
MemOpIdx oi)
{
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
+ if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
+ read_value_low, read_value_high,
+ oi, QEMU_PLUGIN_MEM_R);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
+ write_value_low, write_value_high,
+ oi, QEMU_PLUGIN_MEM_W);
+ }
}
/*
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
index 1dc2151..08a475c 100644
--- a/accel/tcg/atomic_template.h
+++ b/accel/tcg/atomic_template.h
@@ -53,6 +53,14 @@
# error unsupported data size
#endif
+#if DATA_SIZE == 16
+# define VALUE_LOW(val) int128_getlo(val)
+# define VALUE_HIGH(val) int128_gethi(val)
+#else
+# define VALUE_LOW(val) val
+# define VALUE_HIGH(val) 0
+#endif
+
#if DATA_SIZE >= 4
# define ABI_TYPE DATA_TYPE
#else
@@ -69,7 +77,7 @@
# define END _le
#endif
-ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
+ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, vaddr addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
@@ -83,12 +91,17 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
#endif
ATOMIC_MMU_CLEANUP;
- atomic_trace_rmw_post(env, addr, oi);
+ atomic_trace_rmw_post(env, addr,
+ VALUE_LOW(ret),
+ VALUE_HIGH(ret),
+ VALUE_LOW(newv),
+ VALUE_HIGH(newv),
+ oi);
return ret;
}
#if DATA_SIZE < 16
-ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
+ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
@@ -97,19 +110,29 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
ret = qatomic_xchg__nocheck(haddr, val);
ATOMIC_MMU_CLEANUP;
- atomic_trace_rmw_post(env, addr, oi);
+ atomic_trace_rmw_post(env, addr,
+ VALUE_LOW(ret),
+ VALUE_HIGH(ret),
+ VALUE_LOW(val),
+ VALUE_HIGH(val),
+ oi);
return ret;
}
#define GEN_ATOMIC_HELPER(X) \
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr, ret; \
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
- atomic_trace_rmw_post(env, addr, oi); \
+ atomic_trace_rmw_post(env, addr, \
+ VALUE_LOW(ret), \
+ VALUE_HIGH(ret), \
+ VALUE_LOW(val), \
+ VALUE_HIGH(val), \
+ oi); \
return ret; \
}
@@ -133,7 +156,7 @@ GEN_ATOMIC_HELPER(xor_fetch)
* of CF_PARALLEL's value, we'll trace just a read and a write.
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
@@ -145,7 +168,12 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
} while (cmp != old); \
ATOMIC_MMU_CLEANUP; \
- atomic_trace_rmw_post(env, addr, oi); \
+ atomic_trace_rmw_post(env, addr, \
+ VALUE_LOW(old), \
+ VALUE_HIGH(old), \
+ VALUE_LOW(xval), \
+ VALUE_HIGH(xval), \
+ oi); \
return RET; \
}
@@ -174,7 +202,7 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
# define END _be
#endif
-ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
+ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, vaddr addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
@@ -188,12 +216,17 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
#endif
ATOMIC_MMU_CLEANUP;
- atomic_trace_rmw_post(env, addr, oi);
+ atomic_trace_rmw_post(env, addr,
+ VALUE_LOW(ret),
+ VALUE_HIGH(ret),
+ VALUE_LOW(newv),
+ VALUE_HIGH(newv),
+ oi);
return BSWAP(ret);
}
#if DATA_SIZE < 16
-ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
+ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
@@ -202,19 +235,29 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
ATOMIC_MMU_CLEANUP;
- atomic_trace_rmw_post(env, addr, oi);
+ atomic_trace_rmw_post(env, addr,
+ VALUE_LOW(ret),
+ VALUE_HIGH(ret),
+ VALUE_LOW(val),
+ VALUE_HIGH(val),
+ oi);
return BSWAP(ret);
}
#define GEN_ATOMIC_HELPER(X) \
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr, ret; \
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
- atomic_trace_rmw_post(env, addr, oi); \
+ atomic_trace_rmw_post(env, addr, \
+ VALUE_LOW(ret), \
+ VALUE_HIGH(ret), \
+ VALUE_LOW(val), \
+ VALUE_HIGH(val), \
+ oi); \
return BSWAP(ret); \
}
@@ -235,7 +278,7 @@ GEN_ATOMIC_HELPER(xor_fetch)
* of CF_PARALLEL's value, we'll trace just a read and a write.
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
@@ -247,7 +290,12 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
} while (ldo != ldn); \
ATOMIC_MMU_CLEANUP; \
- atomic_trace_rmw_post(env, addr, oi); \
+ atomic_trace_rmw_post(env, addr, \
+ VALUE_LOW(old), \
+ VALUE_HIGH(old), \
+ VALUE_LOW(xval), \
+ VALUE_HIGH(xval), \
+ oi); \
return RET; \
}
@@ -281,3 +329,5 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
#undef SUFFIX
#undef DATA_SIZE
#undef SHIFT
+#undef VALUE_LOW
+#undef VALUE_HIGH
diff --git a/accel/tcg/backend-ldst.h b/accel/tcg/backend-ldst.h
new file mode 100644
index 0000000..9c3a407
--- /dev/null
+++ b/accel/tcg/backend-ldst.h
@@ -0,0 +1,41 @@
+/*
+ * Internal memory barrier helpers for QEMU (target agnostic)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_BACKEND_LDST_H
+#define ACCEL_TCG_BACKEND_LDST_H
+
+#include "tcg-target-mo.h"
+
+/**
+ * tcg_req_mo:
+ * @guest_mo: Guest default memory order
+ * @type: TCGBar
+ *
+ * Filter @type to the barrier that is required for the guest
+ * memory ordering vs the host memory ordering. A non-zero
+ * result indicates that some barrier is required.
+ */
+#define tcg_req_mo(guest_mo, type) \
+ ((type) & guest_mo & ~TCG_TARGET_DEFAULT_MO)
+
+/**
+ * cpu_req_mo:
+ * @cpu: CPUState
+ * @type: TCGBar
+ *
+ * If tcg_req_mo indicates a barrier for @type is required
+ * for the guest memory model, issue a host memory barrier.
+ */
+#define cpu_req_mo(cpu, type) \
+ do { \
+ if (tcg_req_mo(cpu->cc->tcg_ops->guest_default_memory_order, type)) { \
+ smp_mb(); \
+ } \
+ } while (0)
+
+#endif
diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c
index bc9b1a2..c5c513f 100644
--- a/accel/tcg/cpu-exec-common.c
+++ b/accel/tcg/cpu-exec-common.c
@@ -18,13 +18,45 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/cpus.h"
-#include "sysemu/tcg.h"
+#include "exec/log.h"
+#include "system/tcg.h"
#include "qemu/plugin.h"
#include "internal-common.h"
bool tcg_allowed;
+bool tcg_cflags_has(CPUState *cpu, uint32_t flags)
+{
+ return cpu->tcg_cflags & flags;
+}
+
+void tcg_cflags_set(CPUState *cpu, uint32_t flags)
+{
+ cpu->tcg_cflags |= flags;
+}
+
+uint32_t curr_cflags(CPUState *cpu)
+{
+ uint32_t cflags = cpu->tcg_cflags;
+
+ /*
+ * Record gdb single-step. We should be exiting the TB by raising
+ * EXCP_DEBUG, but to simplify other tests, disable chaining too.
+ *
+ * For singlestep and -d nochain, suppress goto_tb so that
+ * we can log -d cpu,exec after every TB.
+ */
+ if (unlikely(cpu->singlestep_enabled)) {
+ cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
+ } else if (qatomic_read(&one_insn_per_tb)) {
+ cflags |= CF_NO_GOTO_TB | 1;
+ } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
+ cflags |= CF_NO_GOTO_TB;
+ }
+
+ return cflags;
+}
+
/* exit the current TB, but without causing any exception to be raised */
void cpu_loop_exit_noexc(CPUState *cpu)
{
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 9010dad..713bdb2 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -21,29 +21,30 @@
#include "qemu/qemu-print.h"
#include "qapi/error.h"
#include "qapi/type-helpers.h"
-#include "hw/core/tcg-cpu-ops.h"
+#include "hw/core/cpu.h"
+#include "accel/tcg/cpu-ops.h"
+#include "accel/tcg/helper-retaddr.h"
#include "trace.h"
#include "disas/disas.h"
-#include "exec/exec-all.h"
+#include "exec/cpu-common.h"
+#include "exec/cpu-interrupt.h"
+#include "exec/page-protection.h"
+#include "exec/mmap-lock.h"
+#include "exec/translation-block.h"
#include "tcg/tcg.h"
#include "qemu/atomic.h"
#include "qemu/rcu.h"
#include "exec/log.h"
#include "qemu/main-loop.h"
-#include "sysemu/cpus.h"
-#include "exec/cpu-all.h"
-#include "sysemu/cpu-timers.h"
+#include "exec/icount.h"
#include "exec/replay-core.h"
-#include "sysemu/tcg.h"
+#include "system/tcg.h"
#include "exec/helper-proto-common.h"
#include "tb-jmp-cache.h"
#include "tb-hash.h"
#include "tb-context.h"
+#include "tb-internal.h"
#include "internal-common.h"
-#include "internal-target.h"
-#if defined(CONFIG_USER_ONLY)
-#include "user-retaddr.h"
-#endif
/* -icount align implementation. */
@@ -147,45 +148,10 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
}
#endif /* CONFIG USER ONLY */
-bool tcg_cflags_has(CPUState *cpu, uint32_t flags)
-{
- return cpu->tcg_cflags & flags;
-}
-
-void tcg_cflags_set(CPUState *cpu, uint32_t flags)
-{
- cpu->tcg_cflags |= flags;
-}
-
-uint32_t curr_cflags(CPUState *cpu)
-{
- uint32_t cflags = cpu->tcg_cflags;
-
- /*
- * Record gdb single-step. We should be exiting the TB by raising
- * EXCP_DEBUG, but to simplify other tests, disable chaining too.
- *
- * For singlestep and -d nochain, suppress goto_tb so that
- * we can log -d cpu,exec after every TB.
- */
- if (unlikely(cpu->singlestep_enabled)) {
- cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
- } else if (qatomic_read(&one_insn_per_tb)) {
- cflags |= CF_NO_GOTO_TB | 1;
- } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
- cflags |= CF_NO_GOTO_TB;
- }
-
- return cflags;
-}
-
struct tb_desc {
- vaddr pc;
- uint64_t cs_base;
+ TCGTBCPUState s;
CPUArchState *env;
tb_page_addr_t page_addr0;
- uint32_t flags;
- uint32_t cflags;
};
static bool tb_lookup_cmp(const void *p, const void *d)
@@ -193,11 +159,11 @@ static bool tb_lookup_cmp(const void *p, const void *d)
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
- if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) &&
+ if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->s.pc) &&
tb_page_addr0(tb) == desc->page_addr0 &&
- tb->cs_base == desc->cs_base &&
- tb->flags == desc->flags &&
- tb_cflags(tb) == desc->cflags) {
+ tb->cs_base == desc->s.cs_base &&
+ tb->flags == desc->s.flags &&
+ tb_cflags(tb) == desc->s.cflags) {
/* check next page if needed */
tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
if (tb_phys_page1 == -1) {
@@ -215,7 +181,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
* is different for the new TB. Therefore any exception raised
* here by the faulting lookup is not premature.
*/
- virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
+ virt_page1 = TARGET_PAGE_ALIGN(desc->s.pc);
phys_page1 = get_page_addr_code(desc->env, virt_page1);
if (tb_phys_page1 == phys_page1) {
return true;
@@ -225,59 +191,65 @@ static bool tb_lookup_cmp(const void *p, const void *d)
return false;
}
-static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
- uint64_t cs_base, uint32_t flags,
- uint32_t cflags)
+static TranslationBlock *tb_htable_lookup(CPUState *cpu, TCGTBCPUState s)
{
tb_page_addr_t phys_pc;
struct tb_desc desc;
uint32_t h;
+ desc.s = s;
desc.env = cpu_env(cpu);
- desc.cs_base = cs_base;
- desc.flags = flags;
- desc.cflags = cflags;
- desc.pc = pc;
- phys_pc = get_page_addr_code(desc.env, pc);
+ phys_pc = get_page_addr_code(desc.env, s.pc);
if (phys_pc == -1) {
return NULL;
}
desc.page_addr0 = phys_pc;
- h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
- flags, cs_base, cflags);
+ h = tb_hash_func(phys_pc, (s.cflags & CF_PCREL ? 0 : s.pc),
+ s.flags, s.cs_base, s.cflags);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
-/* Might cause an exception, so have a longjmp destination ready */
-static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
- uint64_t cs_base, uint32_t flags,
- uint32_t cflags)
+/**
+ * tb_lookup:
+ * @cpu: CPU that will execute the returned translation block
+ * @pc: guest PC
+ * @cs_base: arch-specific value associated with translation block
+ * @flags: arch-specific translation block flags
+ * @cflags: CF_* flags
+ *
+ * Look up a translation block inside the QHT using @pc, @cs_base, @flags and
+ * @cflags. Uses @cpu's tb_jmp_cache. Might cause an exception, so have a
+ * longjmp destination ready.
+ *
+ * Returns: an existing translation block or NULL.
+ */
+static inline TranslationBlock *tb_lookup(CPUState *cpu, TCGTBCPUState s)
{
TranslationBlock *tb;
CPUJumpCache *jc;
uint32_t hash;
/* we should never be trying to look up an INVALID tb */
- tcg_debug_assert(!(cflags & CF_INVALID));
+ tcg_debug_assert(!(s.cflags & CF_INVALID));
- hash = tb_jmp_cache_hash_func(pc);
+ hash = tb_jmp_cache_hash_func(s.pc);
jc = cpu->tb_jmp_cache;
tb = qatomic_read(&jc->array[hash].tb);
if (likely(tb &&
- jc->array[hash].pc == pc &&
- tb->cs_base == cs_base &&
- tb->flags == flags &&
- tb_cflags(tb) == cflags)) {
+ jc->array[hash].pc == s.pc &&
+ tb->cs_base == s.cs_base &&
+ tb->flags == s.flags &&
+ tb_cflags(tb) == s.cflags)) {
goto hit;
}
- tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
+ tb = tb_htable_lookup(cpu, s);
if (tb == NULL) {
return NULL;
}
- jc->array[hash].pc = pc;
+ jc->array[hash].pc = s.pc;
qatomic_set(&jc->array[hash].tb, tb);
hit:
@@ -285,7 +257,7 @@ hit:
* As long as tb is not NULL, the contents are consistent. Therefore,
* the virtual PC has to match for non-CF_PCREL translations.
*/
- assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc);
+ assert((tb_cflags(tb) & CF_PCREL) || tb->pc == s.pc);
return tb;
}
@@ -302,14 +274,11 @@ static void log_cpu_exec(vaddr pc, CPUState *cpu,
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
FILE *logfile = qemu_log_trylock();
if (logfile) {
- int flags = 0;
+ int flags = CPU_DUMP_CCOP;
if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
flags |= CPU_DUMP_FPU;
}
-#if defined(TARGET_I386)
- flags |= CPU_DUMP_CCOP;
-#endif
if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) {
flags |= CPU_DUMP_VPU;
}
@@ -405,9 +374,6 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
{
CPUState *cpu = env_cpu(env);
TranslationBlock *tb;
- vaddr pc;
- uint64_t cs_base;
- uint32_t flags, cflags;
/*
* By definition we've just finished a TB, so I/O is OK.
@@ -417,25 +383,36 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
* The next TB, if we chain to it, will clear the flag again.
*/
cpu->neg.can_do_io = true;
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- cflags = curr_cflags(cpu);
- if (check_for_breakpoints(cpu, pc, &cflags)) {
+ TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
+ s.cflags = curr_cflags(cpu);
+
+ if (check_for_breakpoints(cpu, s.pc, &s.cflags)) {
cpu_loop_exit(cpu);
}
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ tb = tb_lookup(cpu, s);
if (tb == NULL) {
return tcg_code_gen_epilogue;
}
if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
- log_cpu_exec(pc, cpu, tb);
+ log_cpu_exec(s.pc, cpu, tb);
}
return tb->tc.ptr;
}
+/* Return the current PC from CPU, which may be cached in TB. */
+static vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
+{
+ if (tb_cflags(tb) & CF_PCREL) {
+ return cpu->cc->get_pc(cpu);
+ } else {
+ return tb->pc;
+ }
+}
+
/* Execute a TB, and fix up the CPU state afterwards if necessary */
/*
* Disable CFI checks.
@@ -570,11 +547,7 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
void cpu_exec_step_atomic(CPUState *cpu)
{
- CPUArchState *env = cpu_env(cpu);
TranslationBlock *tb;
- vaddr pc;
- uint64_t cs_base;
- uint32_t flags, cflags;
int tb_exit;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
@@ -583,13 +556,13 @@ void cpu_exec_step_atomic(CPUState *cpu)
g_assert(!cpu->running);
cpu->running = true;
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+ TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
+ s.cflags = curr_cflags(cpu);
- cflags = curr_cflags(cpu);
/* Execute in a serial context. */
- cflags &= ~CF_PARALLEL;
+ s.cflags &= ~CF_PARALLEL;
/* After 1 insn, return and release the exclusive lock. */
- cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
+ s.cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
/*
* No need to check_for_breakpoints here.
* We only arrive in cpu_exec_step_atomic after beginning execution
@@ -597,16 +570,16 @@ void cpu_exec_step_atomic(CPUState *cpu)
* Any breakpoint for this insn will have been recognized earlier.
*/
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ tb = tb_lookup(cpu, s);
if (tb == NULL) {
mmap_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
+ tb = tb_gen_code(cpu, s);
mmap_unlock();
}
cpu_exec_enter(cpu);
/* execute the generated code */
- trace_exec_tb(tb, pc);
+ trace_exec_tb(tb, s.pc);
cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu);
} else {
@@ -674,7 +647,6 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
out_unlock_next:
qemu_spin_unlock(&tb_next->jmp_lock);
- return;
}
static inline bool cpu_handle_halt(CPUState *cpu)
@@ -740,10 +712,10 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
* If user mode only, we simulate a fake exception which will be
* handled outside the cpu execution loop.
*/
-#if defined(TARGET_I386)
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
- tcg_ops->fake_user_interrupt(cpu);
-#endif /* TARGET_I386 */
+ if (tcg_ops->fake_user_interrupt) {
+ tcg_ops->fake_user_interrupt(cpu);
+ }
*ret = cpu->exception_index;
cpu->exception_index = -1;
return true;
@@ -830,33 +802,22 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
cpu->exception_index = EXCP_HLT;
bql_unlock();
return true;
- }
-#if defined(TARGET_I386)
- else if (interrupt_request & CPU_INTERRUPT_INIT) {
- X86CPU *x86_cpu = X86_CPU(cpu);
- CPUArchState *env = &x86_cpu->env;
- replay_interrupt();
- cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
- do_cpu_init(x86_cpu);
- cpu->exception_index = EXCP_HALTED;
- bql_unlock();
- return true;
- }
-#else
- else if (interrupt_request & CPU_INTERRUPT_RESET) {
- replay_interrupt();
- cpu_reset(cpu);
- bql_unlock();
- return true;
- }
-#endif /* !TARGET_I386 */
- /* The target hook has 3 exit conditions:
- False when the interrupt isn't processed,
- True when it is, and we should restart on a new TB,
- and via longjmp via cpu_loop_exit. */
- else {
+ } else {
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
+ if (interrupt_request & CPU_INTERRUPT_RESET) {
+ replay_interrupt();
+ tcg_ops->cpu_exec_reset(cpu);
+ bql_unlock();
+ return true;
+ }
+
+ /*
+ * The target hook has 3 exit conditions:
+ * False when the interrupt isn't processed,
+ * True when it is, and we should restart on a new TB,
+ * and via longjmp via cpu_loop_exit.
+ */
if (tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
if (!tcg_ops->need_replay_interrupt ||
tcg_ops->need_replay_interrupt(interrupt_request)) {
@@ -963,11 +924,8 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
while (!cpu_handle_interrupt(cpu, &last_tb)) {
TranslationBlock *tb;
- vaddr pc;
- uint64_t cs_base;
- uint32_t flags, cflags;
-
- cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags);
+ TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
+ s.cflags = cpu->cflags_next_tb;
/*
* When requested, use an exact setting for cflags for the next
@@ -976,33 +934,32 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
* have CF_INVALID set, -1 is a convenient invalid value that
* does not require tcg headers for cpu_common_reset.
*/
- cflags = cpu->cflags_next_tb;
- if (cflags == -1) {
- cflags = curr_cflags(cpu);
+ if (s.cflags == -1) {
+ s.cflags = curr_cflags(cpu);
} else {
cpu->cflags_next_tb = -1;
}
- if (check_for_breakpoints(cpu, pc, &cflags)) {
+ if (check_for_breakpoints(cpu, s.pc, &s.cflags)) {
break;
}
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ tb = tb_lookup(cpu, s);
if (tb == NULL) {
CPUJumpCache *jc;
uint32_t h;
mmap_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
+ tb = tb_gen_code(cpu, s);
mmap_unlock();
/*
* We add the TB in the virtual pc hash table
* for the fast lookup
*/
- h = tb_jmp_cache_hash_func(pc);
+ h = tb_jmp_cache_hash_func(s.pc);
jc = cpu->tb_jmp_cache;
- jc->array[h].pc = pc;
+ jc->array[h].pc = s.pc;
qatomic_set(&jc->array[h].tb, tb);
}
@@ -1022,7 +979,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
tb_add_jump(last_tb, tb_exit, tb);
}
- cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
+ cpu_loop_exec_tb(cpu, tb, s.pc, &last_tb, &tb_exit);
/* Try to align the host and virtual clocks
if the guest is in advance */
@@ -1077,11 +1034,17 @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
if (!tcg_target_initialized) {
/* Check mandatory TCGCPUOps handlers */
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
#ifndef CONFIG_USER_ONLY
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
+ assert(tcg_ops->cpu_exec_halt);
+ assert(tcg_ops->cpu_exec_interrupt);
+ assert(tcg_ops->cpu_exec_reset);
+ assert(tcg_ops->pointer_wrap);
#endif /* !CONFIG_USER_ONLY */
- cpu->cc->tcg_ops->initialize();
+ assert(tcg_ops->translate_code);
+ assert(tcg_ops->get_tb_cpu_state);
+ assert(tcg_ops->mmu_index);
+ tcg_ops->initialize();
tcg_target_initialized = true;
}
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 117b516..87e14bd 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -19,15 +19,17 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
-#include "hw/core/tcg-cpu-ops.h"
-#include "exec/exec-all.h"
+#include "qemu/target-info.h"
+#include "accel/tcg/cpu-ops.h"
+#include "accel/tcg/iommu.h"
+#include "accel/tcg/probe.h"
#include "exec/page-protection.h"
-#include "exec/memory.h"
-#include "exec/cpu_ldst.h"
+#include "system/memory.h"
+#include "accel/tcg/cpu-ldst-common.h"
+#include "accel/tcg/cpu-mmu-index.h"
#include "exec/cputlb.h"
#include "exec/tb-flush.h"
-#include "exec/memory-internal.h"
-#include "exec/ram_addr.h"
+#include "system/ram_addr.h"
#include "exec/mmu-access-type.h"
#include "exec/tlb-common.h"
#include "exec/vaddr.h"
@@ -35,18 +37,21 @@
#include "qemu/error-report.h"
#include "exec/log.h"
#include "exec/helper-proto-common.h"
+#include "exec/tlb-flags.h"
#include "qemu/atomic.h"
#include "qemu/atomic128.h"
-#include "exec/translate-all.h"
+#include "tb-internal.h"
#include "trace.h"
#include "tb-hash.h"
+#include "tb-internal.h"
+#include "tlb-bounds.h"
#include "internal-common.h"
-#include "internal-target.h"
#ifdef CONFIG_PLUGIN
#include "qemu/plugin-memory.h"
#endif
#include "tcg/tcg-ldst.h"
-#include "tcg/oversized-guest.h"
+#include "backend-ldst.h"
+
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
/* #define DEBUG_TLB */
@@ -104,26 +109,15 @@ static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
{
/* Do not rearrange the CPUTLBEntry structure members. */
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
- MMU_DATA_LOAD * sizeof(uint64_t));
+ MMU_DATA_LOAD * sizeof(uintptr_t));
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
- MMU_DATA_STORE * sizeof(uint64_t));
+ MMU_DATA_STORE * sizeof(uintptr_t));
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
- MMU_INST_FETCH * sizeof(uint64_t));
+ MMU_INST_FETCH * sizeof(uintptr_t));
-#if TARGET_LONG_BITS == 32
- /* Use qatomic_read, in case of addr_write; only care about low bits. */
- const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type];
- ptr += HOST_BIG_ENDIAN;
- return qatomic_read(ptr);
-#else
- const uint64_t *ptr = &entry->addr_idx[access_type];
-# if TCG_OVERSIZED_GUEST
- return *ptr;
-# else
+ const uintptr_t *ptr = &entry->addr_idx[access_type];
/* ofs might correspond to .addr_write, so use qatomic_read */
return qatomic_read(ptr);
-# endif
-#endif
}
static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
@@ -779,19 +773,19 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
assert_cpu_is_self(cpu);
+ /* If no page bits are significant, this devolves to tlb_flush. */
+ if (bits < TARGET_PAGE_BITS) {
+ tlb_flush_by_mmuidx(cpu, idxmap);
+ return;
+ }
/*
* If all bits are significant, and len is small,
* this devolves to tlb_flush_page.
*/
- if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
+ if (len <= TARGET_PAGE_SIZE && bits >= target_long_bits()) {
tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
return;
}
- /* If no page bits are significant, this devolves to tlb_flush. */
- if (bits < TARGET_PAGE_BITS) {
- tlb_flush_by_mmuidx(cpu, idxmap);
- return;
- }
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
@@ -817,19 +811,19 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
TLBFlushRangeData d, *p;
CPUState *dst_cpu;
+ /* If no page bits are significant, this devolves to tlb_flush. */
+ if (bits < TARGET_PAGE_BITS) {
+ tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
+ return;
+ }
/*
* If all bits are significant, and len is small,
* this devolves to tlb_flush_page.
*/
- if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
+ if (len <= TARGET_PAGE_SIZE && bits >= target_long_bits()) {
tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
return;
}
- /* If no page bits are significant, this devolves to tlb_flush. */
- if (bits < TARGET_PAGE_BITS) {
- tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
- return;
- }
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
@@ -893,26 +887,17 @@ void tlb_unprotect_code(ram_addr_t ram_addr)
*
* Called with tlb_c.lock held.
*/
-static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
+static void tlb_reset_dirty_range_locked(CPUTLBEntryFull *full, CPUTLBEntry *ent,
uintptr_t start, uintptr_t length)
{
- uintptr_t addr = tlb_entry->addr_write;
-
- if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
- TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
- addr &= TARGET_PAGE_MASK;
- addr += tlb_entry->addend;
- if ((addr - start) < length) {
-#if TARGET_LONG_BITS == 32
- uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
- ptr_write += HOST_BIG_ENDIAN;
- qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
-#elif TCG_OVERSIZED_GUEST
- tlb_entry->addr_write |= TLB_NOTDIRTY;
-#else
- qatomic_set(&tlb_entry->addr_write,
- tlb_entry->addr_write | TLB_NOTDIRTY);
-#endif
+ const uintptr_t addr = ent->addr_write;
+ int flags = addr | full->slow_flags[MMU_DATA_STORE];
+
+ flags &= TLB_INVALID_MASK | TLB_MMIO | TLB_DISCARD_WRITE | TLB_NOTDIRTY;
+ if (flags == 0) {
+ uintptr_t host = (addr & TARGET_PAGE_MASK) + ent->addend;
+ if ((host - start) < length) {
+ qatomic_set(&ent->addr_write, addr | TLB_NOTDIRTY);
}
}
}
@@ -931,23 +916,25 @@ static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
* We must take tlb_c.lock to avoid racing with another vCPU update. The only
* thing actually updated is the target TLB entry ->addr_write flags.
*/
-void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
+void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length)
{
int mmu_idx;
qemu_spin_lock(&cpu->neg.tlb.c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+ CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
+ CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
+ unsigned int n = tlb_n_entries(fast);
unsigned int i;
- unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]);
for (i = 0; i < n; i++) {
- tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
- start1, length);
+ tlb_reset_dirty_range_locked(&desc->fulltlb[i], &fast->table[i],
+ start, length);
}
for (i = 0; i < CPU_VTLB_SIZE; i++) {
- tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
- start1, length);
+ tlb_reset_dirty_range_locked(&desc->vfulltlb[i], &desc->vtable[i],
+ start, length);
}
}
qemu_spin_unlock(&cpu->neg.tlb.c.lock);
@@ -1199,7 +1186,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
hwaddr paddr, MemTxAttrs attrs, int prot,
- int mmu_idx, uint64_t size)
+ int mmu_idx, vaddr size)
{
CPUTLBEntryFull full = {
.phys_addr = paddr,
@@ -1214,29 +1201,65 @@ void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
void tlb_set_page(CPUState *cpu, vaddr addr,
hwaddr paddr, int prot,
- int mmu_idx, uint64_t size)
+ int mmu_idx, vaddr size)
{
tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
prot, mmu_idx, size);
}
+/**
+ * tlb_hit_page: return true if page aligned @addr is a hit against the
+ * TLB entry @tlb_addr
+ *
+ * @addr: virtual address to test (must be page aligned)
+ * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
+ */
+static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr)
+{
+ return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
+}
+
+/**
+ * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
+ *
+ * @addr: virtual address to test (need not be page aligned)
+ * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
+ */
+static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr)
+{
+ return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
+}
+
/*
- * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
- * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
- * be discarded and looked up again (e.g. via tlb_entry()).
+ * Note: tlb_fill_align() can trigger a resize of the TLB.
+ * This means that all of the caller's prior references to the TLB table
+ * (e.g. CPUTLBEntry pointers) must be discarded and looked up again
+ * (e.g. via tlb_entry()).
*/
-static void tlb_fill(CPUState *cpu, vaddr addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
+static bool tlb_fill_align(CPUState *cpu, vaddr addr, MMUAccessType type,
+ int mmu_idx, MemOp memop, int size,
+ bool probe, uintptr_t ra)
{
- bool ok;
+ const TCGCPUOps *ops = cpu->cc->tcg_ops;
+ CPUTLBEntryFull full;
- /*
- * This is not a probe, so only valid return is success; failure
- * should result in exception + longjmp to the cpu loop.
- */
- ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
- access_type, mmu_idx, false, retaddr);
- assert(ok);
+ if (ops->tlb_fill_align) {
+ if (ops->tlb_fill_align(cpu, &full, addr, type, mmu_idx,
+ memop, size, probe, ra)) {
+ tlb_set_page_full(cpu, mmu_idx, addr, &full);
+ return true;
+ }
+ } else {
+ /* Legacy behaviour is alignment before paging. */
+ if (addr & ((1u << memop_alignment_bits(memop)) - 1)) {
+ ops->do_unaligned_access(cpu, addr, type, mmu_idx, ra);
+ }
+ if (ops->tlb_fill(cpu, addr, size, type, mmu_idx, probe, ra)) {
+ return true;
+ }
+ }
+ assert(probe);
+ return false;
}
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
@@ -1319,7 +1342,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
- tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
+ tb_invalidate_phys_range_fast(cpu, ram_addr, size, retaddr);
}
/*
@@ -1351,22 +1374,22 @@ static int probe_access_internal(CPUState *cpu, vaddr addr,
if (!tlb_hit_page(tlb_addr, page_addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
- if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
- mmu_idx, nonfault, retaddr)) {
+ if (!tlb_fill_align(cpu, addr, access_type, mmu_idx,
+ 0, fault_size, nonfault, retaddr)) {
/* Non-faulting page table read failed. */
*phost = NULL;
*pfull = NULL;
return TLB_INVALID_MASK;
}
- /* TLB resize via tlb_fill may have moved the entry. */
+ /* TLB resize via tlb_fill_align may have moved the entry. */
index = tlb_index(cpu, mmu_idx, addr);
entry = tlb_entry(cpu, mmu_idx, addr);
/*
* With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
- * to force the next access through tlb_fill. We've just
- * called tlb_fill, so we know that this entry *is* valid.
+ * to force the next access through tlb_fill_align. We've just
+ * called tlb_fill_align, so we know that this entry *is* valid.
*/
flags &= ~TLB_INVALID_MASK;
}
@@ -1491,7 +1514,7 @@ void *probe_access(CPUArchState *env, vaddr addr, int size,
return host;
}
-void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
+void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr,
MMUAccessType access_type, int mmu_idx)
{
CPUTLBEntryFull *full;
@@ -1607,16 +1630,17 @@ typedef struct MMULookupLocals {
* mmu_lookup1: translate one page
* @cpu: generic cpu state
* @data: lookup parameters
+ * @memop: memory operation for the access, or 0
* @mmu_idx: virtual address context
* @access_type: load/store/code
* @ra: return address into tcg generated code, or 0
*
* Resolve the translation for the one page at @data.addr, filling in
* the rest of @data with the results. If the translation fails,
- * tlb_fill will longjmp out. Return true if the softmmu tlb for
+ * tlb_fill_align will longjmp out. Return true if the softmmu tlb for
* @mmu_idx may have resized.
*/
-static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
+static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
{
vaddr addr = data->addr;
@@ -1631,7 +1655,8 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
addr & TARGET_PAGE_MASK)) {
- tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
+ tlb_fill_align(cpu, addr, access_type, mmu_idx,
+ memop, data->size, false, ra);
maybe_resized = true;
index = tlb_index(cpu, mmu_idx, addr);
entry = tlb_entry(cpu, mmu_idx, addr);
@@ -1643,6 +1668,25 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
flags |= full->slow_flags[access_type];
+ if (likely(!maybe_resized)) {
+ /* Alignment has not been checked by tlb_fill_align. */
+ int a_bits = memop_alignment_bits(memop);
+
+ /*
+ * This alignment check differs from the one above, in that this is
+ * based on the atomicity of the operation. The intended use case is
+ * the ARM memory type field of each PTE, where access to pages with
+ * Device memory type require alignment.
+ */
+ if (unlikely(flags & TLB_CHECK_ALIGNED)) {
+ int at_bits = memop_atomicity_bits(memop);
+ a_bits = MAX(a_bits, at_bits);
+ }
+ if (unlikely(addr & ((1 << a_bits) - 1))) {
+ cpu_unaligned_access(cpu, addr, access_type, mmu_idx, ra);
+ }
+ }
+
data->full = full;
data->flags = flags;
/* Compute haddr speculatively; depending on flags it might be invalid. */
@@ -1699,7 +1743,6 @@ static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
{
- unsigned a_bits;
bool crosspage;
int flags;
@@ -1708,12 +1751,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
- /* Handle CPU specific unaligned behaviour */
- a_bits = get_alignment_bits(l->memop);
- if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
- }
-
l->page[0].addr = addr;
l->page[0].size = memop_size(l->memop);
l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
@@ -1721,7 +1758,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
if (likely(!crosspage)) {
- mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
+ mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
flags = l->page[0].flags;
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
@@ -1736,12 +1773,15 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
l->page[1].size = l->page[0].size - size0;
l->page[0].size = size0;
+ l->page[1].addr = cpu->cc->tcg_ops->pointer_wrap(cpu, l->mmu_idx,
+ l->page[1].addr, addr);
+
/*
* Lookup both pages, recognizing exceptions from either. If the
* second lookup potentially resized, refresh first CPUTLBEntryFull.
*/
- mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
- if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
+ mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
+ if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) {
uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
}
@@ -1760,31 +1800,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
tcg_debug_assert((flags & TLB_BSWAP) == 0);
}
- /*
- * This alignment check differs from the one above, in that this is
- * based on the atomicity of the operation. The intended use case is
- * the ARM memory type field of each PTE, where access to pages with
- * Device memory type require alignment.
- */
- if (unlikely(flags & TLB_CHECK_ALIGNED)) {
- MemOp size = l->memop & MO_SIZE;
-
- switch (l->memop & MO_ATOM_MASK) {
- case MO_ATOM_NONE:
- size = MO_8;
- break;
- case MO_ATOM_IFALIGN_PAIR:
- case MO_ATOM_WITHIN16_PAIR:
- size = size ? size - 1 : 0;
- break;
- default:
- break;
- }
- if (addr & ((1 << size) - 1)) {
- cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
- }
- }
-
return crosspage;
}
@@ -1797,34 +1812,18 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
{
uintptr_t mmu_idx = get_mmuidx(oi);
MemOp mop = get_memop(oi);
- int a_bits = get_alignment_bits(mop);
uintptr_t index;
CPUTLBEntry *tlbe;
vaddr tlb_addr;
void *hostaddr;
CPUTLBEntryFull *full;
+ bool did_tlb_fill = false;
tcg_debug_assert(mmu_idx < NB_MMU_MODES);
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
- /* Enforce guest required alignment. */
- if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
- /* ??? Maybe indicate atomic op to cpu_unaligned_access */
- cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
- /* Enforce qemu required alignment. */
- if (unlikely(addr & (size - 1))) {
- /* We get here if guest alignment was not requested,
- or was not enforced by cpu_unaligned_access above.
- We might widen the access and emulate, but for now
- mark an exception and exit the cpu loop. */
- goto stop_the_world;
- }
-
index = tlb_index(cpu, mmu_idx, addr);
tlbe = tlb_entry(cpu, mmu_idx, addr);
@@ -1833,8 +1832,9 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
addr & TARGET_PAGE_MASK)) {
- tlb_fill(cpu, addr, size,
- MMU_DATA_STORE, mmu_idx, retaddr);
+ tlb_fill_align(cpu, addr, MMU_DATA_STORE, mmu_idx,
+ mop, size, false, retaddr);
+ did_tlb_fill = true;
index = tlb_index(cpu, mmu_idx, addr);
tlbe = tlb_entry(cpu, mmu_idx, addr);
}
@@ -1848,17 +1848,38 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* but addr_read will only be -1 if PAGE_READ was unset.
*/
if (unlikely(tlbe->addr_read == -1)) {
- tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
+ tlb_fill_align(cpu, addr, MMU_DATA_LOAD, mmu_idx,
+ 0, size, false, retaddr);
/*
* Since we don't support reads and writes to different
* addresses, and we do have the proper page loaded for
- * write, this shouldn't ever return. But just in case,
- * handle via stop-the-world.
+ * write, this shouldn't ever return.
+ */
+ g_assert_not_reached();
+ }
+
+ /* Enforce guest required alignment, if not handled by tlb_fill_align. */
+ if (!did_tlb_fill && (addr & ((1 << memop_alignment_bits(mop)) - 1))) {
+ cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, mmu_idx, retaddr);
+ }
+
+ /* Enforce qemu required alignment. */
+ if (unlikely(addr & (size - 1))) {
+ /*
+ * We get here if guest alignment was not requested, or was not
+ * enforced by cpu_unaligned_access or tlb_fill_align above.
+ * We might widen the access and emulate, but for now
+ * mark an exception and exit the cpu loop.
*/
goto stop_the_world;
}
- /* Collect tlb flags for read. */
+
+ /* Finish collecting tlb flags for both read and write. */
+ full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
tlb_addr |= tlbe->addr_read;
+ tlb_addr &= TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
+ tlb_addr |= full->slow_flags[MMU_DATA_STORE];
+ tlb_addr |= full->slow_flags[MMU_DATA_LOAD];
/* Notice an IO access or a needs-MMU-lookup access */
if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
@@ -1868,13 +1889,12 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
}
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
- full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
notdirty_write(cpu, addr, size, full, retaddr);
}
- if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
+ if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
int wp_flags = 0;
if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
@@ -1883,10 +1903,8 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
wp_flags |= BP_MEM_READ;
}
- if (wp_flags) {
- cpu_check_watchpoint(cpu, addr, size,
- full->attrs, wp_flags, retaddr);
- }
+ cpu_check_watchpoint(cpu, addr, size,
+ full->attrs, wp_flags, retaddr);
}
return hostaddr;
@@ -2313,7 +2331,7 @@ static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
MMULookupLocals l;
bool crosspage;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
tcg_debug_assert(!crosspage);
@@ -2328,7 +2346,7 @@ static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uint16_t ret;
uint8_t a, b;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
@@ -2352,7 +2370,7 @@ static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
bool crosspage;
uint32_t ret;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
@@ -2373,7 +2391,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
bool crosspage;
uint64_t ret;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
@@ -2396,7 +2414,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
Int128 ret;
int first;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l);
if (likely(!crosspage)) {
if (unlikely(l.page[0].flags & TLB_MMIO)) {
@@ -2724,7 +2742,7 @@ static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
MMULookupLocals l;
bool crosspage;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
tcg_debug_assert(!crosspage);
@@ -2738,7 +2756,7 @@ static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
bool crosspage;
uint8_t a, b;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
@@ -2760,7 +2778,7 @@ static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
MMULookupLocals l;
bool crosspage;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
@@ -2781,7 +2799,7 @@ static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
MMULookupLocals l;
bool crosspage;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
@@ -2804,7 +2822,7 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
uint64_t a, b;
int first;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
if (unlikely(l.page[0].flags & TLB_MMIO)) {
@@ -2889,54 +2907,45 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
/* Code access functions. */
-uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
-{
- CPUState *cs = env_cpu(env);
- MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true));
- return do_ld1_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
-}
-
-uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
-{
- CPUState *cs = env_cpu(env);
- MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true));
- return do_ld2_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
-}
-
-uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
-{
- CPUState *cs = env_cpu(env);
- MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true));
- return do_ld4_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
-}
-
-uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
-{
- CPUState *cs = env_cpu(env);
- MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true));
- return do_ld8_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
-}
-
-uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
+uint8_t cpu_ldb_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t retaddr)
{
return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
-uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
+uint16_t cpu_ldw_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t retaddr)
{
return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
-uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
+uint32_t cpu_ldl_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t retaddr)
{
return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
-uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
+uint64_t cpu_ldq_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t retaddr)
{
return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
+
+/*
+ * Common pointer_wrap implementations.
+ */
+
+/*
+ * To be used for strict alignment targets.
+ * Because no accesses are unaligned, no accesses wrap either.
+ */
+vaddr cpu_pointer_wrap_notreached(CPUState *cs, int idx, vaddr res, vaddr base)
+{
+ g_assert_not_reached();
+}
+
+/* To be used for strict 32-bit targets. */
+vaddr cpu_pointer_wrap_uint32(CPUState *cs, int idx, vaddr res, vaddr base)
+{
+ return (uint32_t)res;
+}
diff --git a/accel/tcg/icount-common.c b/accel/tcg/icount-common.c
index 8d3d3a7..d647117 100644
--- a/accel/tcg/icount-common.c
+++ b/accel/tcg/icount-common.c
@@ -27,17 +27,16 @@
#include "migration/vmstate.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "sysemu/cpus.h"
-#include "sysemu/qtest.h"
+#include "system/cpus.h"
+#include "system/qtest.h"
#include "qemu/main-loop.h"
#include "qemu/option.h"
#include "qemu/seqlock.h"
-#include "sysemu/replay.h"
-#include "sysemu/runstate.h"
+#include "system/replay.h"
+#include "system/runstate.h"
#include "hw/core/cpu.h"
-#include "sysemu/cpu-timers.h"
-#include "sysemu/cpu-throttle.h"
-#include "sysemu/cpu-timers-internal.h"
+#include "exec/icount.h"
+#include "system/cpu-timers-internal.h"
/*
* ICOUNT: Instruction Counter
@@ -49,6 +48,8 @@ static bool icount_sleep = true;
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
#define MAX_ICOUNT_SHIFT 10
+bool icount_align_option;
+
/* Do not count executed instructions */
ICountMode use_icount = ICOUNT_DISABLED;
diff --git a/accel/tcg/internal-common.h b/accel/tcg/internal-common.h
index a8fc3db..1dbc45d 100644
--- a/accel/tcg/internal-common.h
+++ b/accel/tcg/internal-common.h
@@ -11,12 +11,16 @@
#include "exec/cpu-common.h"
#include "exec/translation-block.h"
+#include "exec/mmap-lock.h"
+#include "accel/tcg/tb-cpu-state.h"
extern int64_t max_delay;
extern int64_t max_advance;
extern bool one_insn_per_tb;
+extern bool icount_align_option;
+
/*
* Return true if CS is not running in parallel with other cpus, either
* because there are no other cpus or we are within an exclusive context.
@@ -43,9 +47,7 @@ static inline bool cpu_plugin_mem_cbs_enabled(const CPUState *cpu)
#endif
}
-TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
- uint64_t cs_base, uint32_t flags,
- int cflags);
+TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s);
void page_init(void);
void tb_htable_init(void);
void tb_reset_jump(TranslationBlock *tb, int n);
@@ -53,7 +55,88 @@ TranslationBlock *tb_link_page(TranslationBlock *tb);
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc);
+/**
+ * tlb_init - initialize a CPU's TLB
+ * @cpu: CPU whose TLB should be initialized
+ */
+void tlb_init(CPUState *cpu);
+/**
+ * tlb_destroy - destroy a CPU's TLB
+ * @cpu: CPU whose TLB should be destroyed
+ */
+void tlb_destroy(CPUState *cpu);
+
bool tcg_exec_realizefn(CPUState *cpu, Error **errp);
void tcg_exec_unrealizefn(CPUState *cpu);
+/* current cflags for hashing/comparison */
+uint32_t curr_cflags(CPUState *cpu);
+
+void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
+
+/**
+ * get_page_addr_code_hostp()
+ * @env: CPUArchState
+ * @addr: guest virtual address of guest code
+ *
+ * See get_page_addr_code() (full-system version) for documentation on the
+ * return value.
+ *
+ * Sets *@hostp (when @hostp is non-NULL) as follows.
+ * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
+ * to the host address where @addr's content is kept.
+ *
+ * Note: this function can trigger an exception.
+ */
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
+ void **hostp);
+
+/**
+ * get_page_addr_code()
+ * @env: CPUArchState
+ * @addr: guest virtual address of guest code
+ *
+ * If we cannot translate and execute from the entire RAM page, or if
+ * the region is not backed by RAM, returns -1. Otherwise, returns the
+ * ram_addr_t corresponding to the guest code at @addr.
+ *
+ * Note: this function can trigger an exception.
+ */
+static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
+ vaddr addr)
+{
+ return get_page_addr_code_hostp(env, addr, NULL);
+}
+
+/*
+ * Access to the various translations structures need to be serialised
+ * via locks for consistency. In user-mode emulation access to the
+ * memory related structures are protected with mmap_lock.
+ * In !user-mode we use per-page locks.
+ */
+#ifdef CONFIG_USER_ONLY
+#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
+#else
+#define assert_memory_lock()
+#endif
+
+#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
+void assert_no_pages_locked(void);
+#else
+static inline void assert_no_pages_locked(void) { }
+#endif
+
+#ifdef CONFIG_USER_ONLY
+static inline void page_table_config_init(void) { }
+#else
+void page_table_config_init(void);
+#endif
+
+#ifndef CONFIG_USER_ONLY
+G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
+#endif /* CONFIG_USER_ONLY */
+
+void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
+void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
+
#endif
diff --git a/accel/tcg/internal-target.h b/accel/tcg/internal-target.h
deleted file mode 100644
index fe10972..0000000
--- a/accel/tcg/internal-target.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Internal execution defines for qemu (target specific)
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * SPDX-License-Identifier: LGPL-2.1-or-later
- */
-
-#ifndef ACCEL_TCG_INTERNAL_TARGET_H
-#define ACCEL_TCG_INTERNAL_TARGET_H
-
-#include "exec/exec-all.h"
-#include "exec/translate-all.h"
-
-/*
- * Access to the various translations structures need to be serialised
- * via locks for consistency. In user-mode emulation access to the
- * memory related structures are protected with mmap_lock.
- * In !user-mode we use per-page locks.
- */
-#ifdef CONFIG_USER_ONLY
-#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
-#else
-#define assert_memory_lock()
-#endif
-
-#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
-void assert_no_pages_locked(void);
-#else
-static inline void assert_no_pages_locked(void) { }
-#endif
-
-#ifdef CONFIG_USER_ONLY
-static inline void page_table_config_init(void) { }
-#else
-void page_table_config_init(void);
-#endif
-
-#ifdef CONFIG_USER_ONLY
-/*
- * For user-only, page_protect sets the page read-only.
- * Since most execution is already on read-only pages, and we'd need to
- * account for other TBs on the same page, defer undoing any page protection
- * until we receive the write fault.
- */
-static inline void tb_lock_page0(tb_page_addr_t p0)
-{
- page_protect(p0);
-}
-
-static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
-{
- page_protect(p1);
-}
-
-static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
-static inline void tb_unlock_pages(TranslationBlock *tb) { }
-#else
-void tb_lock_page0(tb_page_addr_t);
-void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
-void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
-void tb_unlock_pages(TranslationBlock *);
-#endif
-
-#ifdef CONFIG_SOFTMMU
-void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
- unsigned size,
- uintptr_t retaddr);
-G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
-#endif /* CONFIG_SOFTMMU */
-
-bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
-
-/* Return the current PC from CPU, which may be cached in TB. */
-static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
-{
- if (tb_cflags(tb) & CF_PCREL) {
- return cpu->cc->get_pc(cpu);
- } else {
- return tb->pc;
- }
-}
-
-/**
- * tcg_req_mo:
- * @type: TCGBar
- *
- * Filter @type to the barrier that is required for the guest
- * memory ordering vs the host memory ordering. A non-zero
- * result indicates that some barrier is required.
- *
- * If TCG_GUEST_DEFAULT_MO is not defined, assume that the
- * guest requires strict ordering.
- *
- * This is a macro so that it's constant even without optimization.
- */
-#ifdef TCG_GUEST_DEFAULT_MO
-# define tcg_req_mo(type) \
- ((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
-#else
-# define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
-#endif
-
-/**
- * cpu_req_mo:
- * @type: TCGBar
- *
- * If tcg_req_mo indicates a barrier for @type is required
- * for the guest memory model, issue a host memory barrier.
- */
-#define cpu_req_mo(type) \
- do { \
- if (tcg_req_mo(type)) { \
- smp_mb(); \
- } \
- } while (0)
-
-#endif /* ACCEL_TCG_INTERNAL_H */
diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
index 134da3c..c735add 100644
--- a/accel/tcg/ldst_atomicity.c.inc
+++ b/accel/tcg/ldst_atomicity.c.inc
@@ -168,6 +168,7 @@ static uint64_t load_atomic8_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
#endif
/* Ultimate fallback: re-execute in serial context. */
+ trace_load_atom8_or_exit_fallback(ra);
cpu_loop_exit_atomic(cpu, ra);
}
@@ -212,6 +213,7 @@ static Int128 load_atomic16_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
}
/* Ultimate fallback: re-execute in serial context. */
+ trace_load_atom16_or_exit_fallback(ra);
cpu_loop_exit_atomic(cpu, ra);
}
@@ -519,6 +521,7 @@ static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
if (HAVE_al8) {
return load_atom_extract_al8x2(pv);
}
+ trace_load_atom8_fallback(memop, ra);
cpu_loop_exit_atomic(cpu, ra);
default:
g_assert_not_reached();
@@ -563,6 +566,7 @@ static Int128 load_atom_16(CPUState *cpu, uintptr_t ra,
break;
case MO_64:
if (!HAVE_al8) {
+ trace_load_atom16_fallback(memop, ra);
cpu_loop_exit_atomic(cpu, ra);
}
a = load_atomic8(pv);
@@ -570,6 +574,7 @@ static Int128 load_atom_16(CPUState *cpu, uintptr_t ra,
break;
case -MO_64:
if (!HAVE_al8) {
+ trace_load_atom16_fallback(memop, ra);
cpu_loop_exit_atomic(cpu, ra);
}
a = load_atom_extract_al8x2(pv);
@@ -897,6 +902,7 @@ static void store_atom_2(CPUState *cpu, uintptr_t ra,
g_assert_not_reached();
}
+ trace_store_atom2_fallback(memop, ra);
cpu_loop_exit_atomic(cpu, ra);
}
@@ -961,6 +967,7 @@ static void store_atom_4(CPUState *cpu, uintptr_t ra,
return;
}
}
+ trace_store_atom4_fallback(memop, ra);
cpu_loop_exit_atomic(cpu, ra);
default:
g_assert_not_reached();
@@ -1029,6 +1036,7 @@ static void store_atom_8(CPUState *cpu, uintptr_t ra,
default:
g_assert_not_reached();
}
+ trace_store_atom8_fallback(memop, ra);
cpu_loop_exit_atomic(cpu, ra);
}
@@ -1107,5 +1115,6 @@ static void store_atom_16(CPUState *cpu, uintptr_t ra,
default:
g_assert_not_reached();
}
+ trace_store_atom16_fallback(memop, ra);
cpu_loop_exit_atomic(cpu, ra);
}
diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc
index 87ceb95..57f3e06 100644
--- a/accel/tcg/ldst_common.c.inc
+++ b/accel/tcg/ldst_common.c.inc
@@ -123,64 +123,69 @@ void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
* Load helpers for cpu_ldst.h
*/
-static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
+static void plugin_load_cb(CPUArchState *env, vaddr addr,
+ uint64_t value_low,
+ uint64_t value_high,
+ MemOpIdx oi)
{
if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
+ value_low, value_high,
+ oi, QEMU_PLUGIN_MEM_R);
}
}
-uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
+uint8_t cpu_ldb_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra)
{
uint8_t ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
ret = do_ld1_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
+ plugin_load_cb(env, addr, ret, 0, oi);
return ret;
}
-uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
+uint16_t cpu_ldw_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
uint16_t ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
ret = do_ld2_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
+ plugin_load_cb(env, addr, ret, 0, oi);
return ret;
}
-uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
+uint32_t cpu_ldl_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
uint32_t ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
ret = do_ld4_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
+ plugin_load_cb(env, addr, ret, 0, oi);
return ret;
}
-uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
+uint64_t cpu_ldq_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
uint64_t ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
ret = do_ld8_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
+ plugin_load_cb(env, addr, ret, 0, oi);
return ret;
}
-Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
+Int128 cpu_ld16_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
Int128 ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
ret = do_ld16_mmu(env_cpu(env), addr, oi, ra);
- plugin_load_cb(env, addr, oi);
+ plugin_load_cb(env, addr, int128_getlo(ret), int128_gethi(ret), oi);
return ret;
}
@@ -188,363 +193,53 @@ Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
* Store helpers for cpu_ldst.h
*/
-static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
+static void plugin_store_cb(CPUArchState *env, vaddr addr,
+ uint64_t value_low,
+ uint64_t value_high,
+ MemOpIdx oi)
{
if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
+ value_low, value_high,
+ oi, QEMU_PLUGIN_MEM_W);
}
}
-void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
+void cpu_stb_mmu(CPUArchState *env, vaddr addr, uint8_t val,
MemOpIdx oi, uintptr_t retaddr)
{
helper_stb_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
+ plugin_store_cb(env, addr, val, 0, oi);
}
-void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
+void cpu_stw_mmu(CPUArchState *env, vaddr addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
+ plugin_store_cb(env, addr, val, 0, oi);
}
-void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
+void cpu_stl_mmu(CPUArchState *env, vaddr addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
+ plugin_store_cb(env, addr, val, 0, oi);
}
-void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
+void cpu_stq_mmu(CPUArchState *env, vaddr addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
+ plugin_store_cb(env, addr, val, 0, oi);
}
-void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
+void cpu_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-/*
- * Wrappers of the above
- */
-
-uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
- return cpu_ldb_mmu(env, addr, oi, ra);
-}
-
-int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
-}
-
-uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
- return cpu_ldw_mmu(env, addr, oi, ra);
-}
-
-int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
-}
-
-uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
- return cpu_ldl_mmu(env, addr, oi, ra);
-}
-
-uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
- return cpu_ldq_mmu(env, addr, oi, ra);
-}
-
-uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
- return cpu_ldw_mmu(env, addr, oi, ra);
-}
-
-int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
-}
-
-uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
- return cpu_ldl_mmu(env, addr, oi, ra);
-}
-
-uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
- return cpu_ldq_mmu(env, addr, oi, ra);
-}
-
-void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
- cpu_stb_mmu(env, addr, val, oi, ra);
-}
-
-void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
- cpu_stw_mmu(env, addr, val, oi, ra);
-}
-
-void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
- cpu_stl_mmu(env, addr, val, oi, ra);
-}
-
-void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
- cpu_stq_mmu(env, addr, val, oi, ra);
-}
-
-void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
- cpu_stw_mmu(env, addr, val, oi, ra);
-}
-
-void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
- cpu_stl_mmu(env, addr, val, oi, ra);
-}
-
-void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
- cpu_stq_mmu(env, addr, val, oi, ra);
-}
-
-/*--------------------------*/
-
-uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- return cpu_ldub_mmuidx_ra(env, addr, mmu_index, ra);
-}
-
-int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- return (int8_t)cpu_ldub_data_ra(env, addr, ra);
-}
-
-uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- return cpu_lduw_be_mmuidx_ra(env, addr, mmu_index, ra);
-}
-
-int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- return (int16_t)cpu_lduw_be_data_ra(env, addr, ra);
-}
-
-uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- return cpu_ldl_be_mmuidx_ra(env, addr, mmu_index, ra);
-}
-
-uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- return cpu_ldq_be_mmuidx_ra(env, addr, mmu_index, ra);
-}
-
-uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- return cpu_lduw_le_mmuidx_ra(env, addr, mmu_index, ra);
-}
-
-int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- return (int16_t)cpu_lduw_le_data_ra(env, addr, ra);
-}
-
-uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- return cpu_ldl_le_mmuidx_ra(env, addr, mmu_index, ra);
-}
-
-uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- return cpu_ldq_le_mmuidx_ra(env, addr, mmu_index, ra);
-}
-
-void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- cpu_stb_mmuidx_ra(env, addr, val, mmu_index, ra);
-}
-
-void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- cpu_stw_be_mmuidx_ra(env, addr, val, mmu_index, ra);
-}
-
-void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- cpu_stl_be_mmuidx_ra(env, addr, val, mmu_index, ra);
-}
-
-void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr,
- uint64_t val, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- cpu_stq_be_mmuidx_ra(env, addr, val, mmu_index, ra);
-}
-
-void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- cpu_stw_le_mmuidx_ra(env, addr, val, mmu_index, ra);
-}
-
-void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- cpu_stl_le_mmuidx_ra(env, addr, val, mmu_index, ra);
-}
-
-void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr,
- uint64_t val, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- cpu_stq_le_mmuidx_ra(env, addr, val, mmu_index, ra);
-}
-
-/*--------------------------*/
-
-uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr addr)
-{
- return cpu_ldub_data_ra(env, addr, 0);
-}
-
-int cpu_ldsb_data(CPUArchState *env, abi_ptr addr)
-{
- return (int8_t)cpu_ldub_data(env, addr);
-}
-
-uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr addr)
-{
- return cpu_lduw_be_data_ra(env, addr, 0);
-}
-
-int cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr)
-{
- return (int16_t)cpu_lduw_be_data(env, addr);
-}
-
-uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr addr)
-{
- return cpu_ldl_be_data_ra(env, addr, 0);
-}
-
-uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr addr)
-{
- return cpu_ldq_be_data_ra(env, addr, 0);
-}
-
-uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr addr)
-{
- return cpu_lduw_le_data_ra(env, addr, 0);
-}
-
-int cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr)
-{
- return (int16_t)cpu_lduw_le_data(env, addr);
-}
-
-uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr addr)
-{
- return cpu_ldl_le_data_ra(env, addr, 0);
-}
-
-uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr addr)
-{
- return cpu_ldq_le_data_ra(env, addr, 0);
-}
-
-void cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val)
-{
- cpu_stb_data_ra(env, addr, val, 0);
-}
-
-void cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
-{
- cpu_stw_be_data_ra(env, addr, val, 0);
-}
-
-void cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
-{
- cpu_stl_be_data_ra(env, addr, val, 0);
-}
-
-void cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val)
-{
- cpu_stq_be_data_ra(env, addr, val, 0);
-}
-
-void cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
-{
- cpu_stw_le_data_ra(env, addr, val, 0);
-}
-
-void cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
-{
- cpu_stl_le_data_ra(env, addr, val, 0);
-}
-
-void cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val)
-{
- cpu_stq_le_data_ra(env, addr, val, 0);
+ plugin_store_cb(env, addr, int128_getlo(val), int128_gethi(val), oi);
}
diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build
index aef80de..575e92b 100644
--- a/accel/tcg/meson.build
+++ b/accel/tcg/meson.build
@@ -1,36 +1,38 @@
-common_ss.add(when: 'CONFIG_TCG', if_true: files(
- 'cpu-exec-common.c',
-))
-tcg_specific_ss = ss.source_set()
-tcg_specific_ss.add(files(
- 'tcg-all.c',
+if not have_tcg
+ subdir_done()
+endif
+
+tcg_ss = ss.source_set()
+
+tcg_ss.add(files(
'cpu-exec.c',
- 'tb-maint.c',
- 'tcg-runtime-gvec.c',
+ 'cpu-exec-common.c',
'tcg-runtime.c',
+ 'tcg-runtime-gvec.c',
+ 'tb-maint.c',
+ 'tcg-all.c',
'translate-all.c',
'translator.c',
))
-tcg_specific_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
-tcg_specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c'))
if get_option('plugins')
- tcg_specific_ss.add(files('plugin-gen.c'))
+ tcg_ss.add(files('plugin-gen.c'))
endif
-specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_specific_ss)
-specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
- 'cputlb.c',
- 'watchpoint.c',
+user_ss.add_all(tcg_ss)
+system_ss.add_all(tcg_ss)
+
+user_ss.add(files(
+ 'user-exec.c',
+ 'user-exec-stub.c',
))
-system_ss.add(when: ['CONFIG_TCG'], if_true: files(
+system_ss.add(files(
+ 'cputlb.c',
'icount-common.c',
'monitor.c',
-))
-
-tcg_module_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
'tcg-accel-ops.c',
- 'tcg-accel-ops-mttcg.c',
'tcg-accel-ops-icount.c',
+ 'tcg-accel-ops-mttcg.c',
'tcg-accel-ops-rr.c',
+ 'watchpoint.c',
))
diff --git a/accel/tcg/monitor.c b/accel/tcg/monitor.c
index 093efe9..1c182b6 100644
--- a/accel/tcg/monitor.c
+++ b/accel/tcg/monitor.c
@@ -13,9 +13,9 @@
#include "qapi/type-helpers.h"
#include "qapi/qapi-commands-machine.h"
#include "monitor/monitor.h"
-#include "sysemu/cpus.h"
-#include "sysemu/cpu-timers.h"
-#include "sysemu/tcg.h"
+#include "system/cpu-timers.h"
+#include "exec/icount.h"
+#include "system/tcg.h"
#include "tcg/tcg.h"
#include "internal-common.h"
#include "tb-context.h"
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
index b6bae32..c1da753 100644
--- a/accel/tcg/plugin-gen.c
+++ b/accel/tcg/plugin-gen.c
@@ -22,13 +22,12 @@
#include "qemu/osdep.h"
#include "qemu/plugin.h"
#include "qemu/log.h"
-#include "cpu.h"
#include "tcg/tcg.h"
#include "tcg/tcg-temp-internal.h"
-#include "tcg/tcg-op.h"
-#include "exec/exec-all.h"
+#include "tcg/tcg-op-common.h"
#include "exec/plugin-gen.h"
#include "exec/translator.h"
+#include "exec/translation-block.h"
enum plugin_gen_from {
PLUGIN_GEN_FROM_TB,
@@ -85,27 +84,33 @@ static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
len = insn->mem_cbs->len;
arr = g_array_sized_new(false, false,
sizeof(struct qemu_plugin_dyn_cb), len);
- memcpy(arr->data, insn->mem_cbs->data,
- len * sizeof(struct qemu_plugin_dyn_cb));
+ g_array_append_vals(arr, insn->mem_cbs->data, len);
qemu_plugin_add_dyn_cb_arr(arr);
tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env,
- offsetof(CPUState, neg.plugin_mem_cbs) -
- offsetof(ArchCPU, env));
+ offsetof(CPUState, neg.plugin_mem_cbs) - sizeof(CPUState));
}
static void gen_disable_mem_helper(void)
{
tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env,
- offsetof(CPUState, neg.plugin_mem_cbs) -
- offsetof(ArchCPU, env));
+ offsetof(CPUState, neg.plugin_mem_cbs) - sizeof(CPUState));
}
static TCGv_i32 gen_cpu_index(void)
{
+ /*
+ * Optimize when we run with a single vcpu. All values using cpu_index,
+ * including scoreboard index, will be optimized out.
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
+ * vcpus are created before generating code.
+ */
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
+ return tcg_constant_i32(current_cpu->cpu_index);
+ }
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
tcg_gen_ld_i32(cpu_index, tcg_env,
- -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
+ offsetof(CPUState, cpu_index) - sizeof(CPUState));
return cpu_index;
}
@@ -252,7 +257,6 @@ static void inject_mem_cb(struct qemu_plugin_dyn_cb *cb,
break;
default:
g_assert_not_reached();
- break;
}
}
@@ -277,7 +281,7 @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
* that might be live within the existing opcode stream.
* The simplest solution is to release them all and create new.
*/
- memset(tcg_ctx->free_temps, 0, sizeof(tcg_ctx->free_temps));
+ tcg_temp_ebb_reset_freed(tcg_ctx);
QTAILQ_FOREACH_SAFE(op, &tcg_ctx->ops, link, next) {
switch (op->opc) {
@@ -469,4 +473,8 @@ void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
/* inject the instrumentation at the appropriate places */
plugin_gen_inject(ptb);
+
+ /* reset plugin translation state (plugin_tb is reused between blocks) */
+ tcg_ctx->plugin_db = NULL;
+ tcg_ctx->plugin_insn = NULL;
}
diff --git a/accel/tcg/tb-hash.h b/accel/tcg/tb-hash.h
index a0c61f2..f7b159f 100644
--- a/accel/tcg/tb-hash.h
+++ b/accel/tcg/tb-hash.h
@@ -20,8 +20,9 @@
#ifndef EXEC_TB_HASH_H
#define EXEC_TB_HASH_H
-#include "exec/cpu-defs.h"
-#include "exec/exec-all.h"
+#include "exec/vaddr.h"
+#include "exec/target_page.h"
+#include "exec/translation-block.h"
#include "qemu/xxhash.h"
#include "tb-jmp-cache.h"
diff --git a/accel/tcg/tb-internal.h b/accel/tcg/tb-internal.h
new file mode 100644
index 0000000..40439f0
--- /dev/null
+++ b/accel/tcg/tb-internal.h
@@ -0,0 +1,55 @@
+/*
+ * TranslationBlock internal declarations (target specific)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_TB_INTERNAL_TARGET_H
+#define ACCEL_TCG_TB_INTERNAL_TARGET_H
+
+#include "exec/translation-block.h"
+
+/*
+ * The true return address will often point to a host insn that is part of
+ * the next translated guest insn. Adjust the address backward to point to
+ * the middle of the call insn. Subtracting one would do the job except for
+ * several compressed mode architectures (arm, mips) which set the low bit
+ * to indicate the compressed mode; subtracting two works around that. It
+ * is also the case that there are no host isas that contain a call insn
+ * smaller than 4 bytes, so we don't worry about special-casing this.
+ */
+#define GETPC_ADJ 2
+
+void tb_lock_page0(tb_page_addr_t);
+
+#ifdef CONFIG_USER_ONLY
+/*
+ * For user-only, page_protect sets the page read-only.
+ * Since most execution is already on read-only pages, and we'd need to
+ * account for other TBs on the same page, defer undoing any page protection
+ * until we receive the write fault.
+ */
+static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
+{
+ tb_lock_page0(p1);
+}
+
+static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
+static inline void tb_unlock_pages(TranslationBlock *tb) { }
+#else
+void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
+void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
+void tb_unlock_pages(TranslationBlock *);
+#endif
+
+#ifdef CONFIG_SOFTMMU
+void tb_invalidate_phys_range_fast(CPUState *cpu, ram_addr_t ram_addr,
+ unsigned size, uintptr_t retaddr);
+#endif /* CONFIG_SOFTMMU */
+
+bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
+ uintptr_t pc);
+
+#endif
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index cc0f5af..0048316 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -22,16 +22,21 @@
#include "qemu/qtree.h"
#include "exec/cputlb.h"
#include "exec/log.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
+#include "exec/mmap-lock.h"
#include "exec/tb-flush.h"
-#include "exec/translate-all.h"
-#include "sysemu/tcg.h"
+#include "exec/target_page.h"
+#include "accel/tcg/cpu-ops.h"
+#include "tb-internal.h"
+#include "system/tcg.h"
#include "tcg/tcg.h"
#include "tb-hash.h"
#include "tb-context.h"
+#include "tb-internal.h"
#include "internal-common.h"
-#include "internal-target.h"
+#ifdef CONFIG_USER_ONLY
+#include "user/page-protection.h"
+#endif
/* List iterators for lists of tagged pointers in TranslationBlock. */
@@ -152,11 +157,7 @@ static PageForEachNext foreach_tb_next(PageForEachNext tb,
/*
* In system mode we want L1_MAP to be based on ram offsets.
*/
-#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
-# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
-#else
-# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
-#endif
+#define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
/* Size of the L2 (and L3, etc) page tables. */
#define V_L2_BITS 10
@@ -1005,7 +1006,8 @@ TranslationBlock *tb_link_page(TranslationBlock *tb)
* Called with mmap_lock held for user-mode emulation.
* NOTE: this function must not be called while a TB is running.
*/
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
+void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
+ tb_page_addr_t last)
{
TranslationBlock *tb;
PageForEachNext n;
@@ -1028,17 +1030,16 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr)
start = addr & TARGET_PAGE_MASK;
last = addr | ~TARGET_PAGE_MASK;
- tb_invalidate_phys_range(start, last);
+ tb_invalidate_phys_range(NULL, start, last);
}
/*
* Called with mmap_lock held. If pc is not 0 then it indicates the
* host PC of the faulting store instruction that caused this invalidate.
- * Returns true if the caller needs to abort execution of the current
- * TB (because it was modified by this store and the guest CPU has
- * precise-SMC semantics).
+ * Returns true if the caller needs to abort execution of the current TB.
*/
-bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
+bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
+ uintptr_t pc)
{
TranslationBlock *current_tb;
bool current_tb_modified;
@@ -1050,10 +1051,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
* Without precise smc semantics, or when outside of a TB,
* we can skip to invalidate.
*/
-#ifndef TARGET_HAS_PRECISE_SMC
- pc = 0;
-#endif
- if (!pc) {
+ if (!pc || !cpu || !cpu->cc->tcg_ops->precise_smc) {
tb_invalidate_phys_page(addr);
return false;
}
@@ -1076,15 +1074,14 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
* the CPU state.
*/
current_tb_modified = true;
- cpu_restore_state_from_tb(current_cpu, current_tb, pc);
+ cpu_restore_state_from_tb(cpu, current_tb, pc);
}
tb_phys_invalidate__locked(tb);
}
if (current_tb_modified) {
/* Force execution of one insn next time. */
- CPUState *cpu = current_cpu;
- cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
+ cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
return true;
}
return false;
@@ -1093,23 +1090,28 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
/*
* @p must be non-NULL.
* Call with all @pages locked.
+ * (@cpu, @retaddr) may be (NULL, 0) outside of a cpu context,
+ * in which case precise_smc need not be detected.
*/
static void
-tb_invalidate_phys_page_range__locked(struct page_collection *pages,
+tb_invalidate_phys_page_range__locked(CPUState *cpu,
+ struct page_collection *pages,
PageDesc *p, tb_page_addr_t start,
tb_page_addr_t last,
uintptr_t retaddr)
{
TranslationBlock *tb;
PageForEachNext n;
-#ifdef TARGET_HAS_PRECISE_SMC
bool current_tb_modified = false;
- TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
-#endif /* TARGET_HAS_PRECISE_SMC */
+ TranslationBlock *current_tb = NULL;
/* Range may not cross a page. */
tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0);
+ if (retaddr && cpu && cpu->cc->tcg_ops->precise_smc) {
+ current_tb = tcg_tb_lookup(retaddr);
+ }
+
/*
* We remove all the TBs in the range [start, last].
* XXX: see if in some cases it could be faster to invalidate all the code
@@ -1127,8 +1129,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
}
if (!(tb_last < start || tb_start > last)) {
-#ifdef TARGET_HAS_PRECISE_SMC
- if (current_tb == tb &&
+ if (unlikely(current_tb == tb) &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/*
* If we are modifying the current TB, we must stop
@@ -1138,9 +1139,8 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
* restore the CPU state.
*/
current_tb_modified = true;
- cpu_restore_state_from_tb(current_cpu, current_tb, retaddr);
+ cpu_restore_state_from_tb(cpu, current_tb, retaddr);
}
-#endif /* TARGET_HAS_PRECISE_SMC */
tb_phys_invalidate__locked(tb);
}
}
@@ -1150,15 +1150,13 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
tlb_unprotect_code(start);
}
-#ifdef TARGET_HAS_PRECISE_SMC
- if (current_tb_modified) {
+ if (unlikely(current_tb_modified)) {
page_collection_unlock(pages);
/* Force execution of one insn next time. */
- current_cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
+ cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
mmap_unlock();
- cpu_loop_exit_noexc(current_cpu);
+ cpu_loop_exit_noexc(cpu);
}
-#endif
}
/*
@@ -1168,7 +1166,8 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*/
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
+void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
+ tb_page_addr_t last)
{
struct page_collection *pages;
tb_page_addr_t index, index_last;
@@ -1187,44 +1186,30 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
page_start = index << TARGET_PAGE_BITS;
page_last = page_start | ~TARGET_PAGE_MASK;
page_last = MIN(page_last, last);
- tb_invalidate_phys_page_range__locked(pages, pd,
+ tb_invalidate_phys_page_range__locked(cpu, pages, pd,
page_start, page_last, 0);
}
page_collection_unlock(pages);
}
/*
- * Call with all @pages in the range [@start, @start + len[ locked.
- */
-static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
- tb_page_addr_t start,
- unsigned len, uintptr_t ra)
-{
- PageDesc *p;
-
- p = page_find(start >> TARGET_PAGE_BITS);
- if (!p) {
- return;
- }
-
- assert_page_locked(p);
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra);
-}
-
-/*
* len must be <= 8 and start must be a multiple of len.
* Called via softmmu_template.h when code areas are written to with
* iothread mutex not held.
*/
-void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
- unsigned size,
- uintptr_t retaddr)
+void tb_invalidate_phys_range_fast(CPUState *cpu, ram_addr_t start,
+ unsigned len, uintptr_t ra)
{
- struct page_collection *pages;
+ PageDesc *p = page_find(start >> TARGET_PAGE_BITS);
- pages = page_collection_lock(ram_addr, ram_addr + size - 1);
- tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
- page_collection_unlock(pages);
+ if (p) {
+ ram_addr_t last = start + len - 1;
+ struct page_collection *pages = page_collection_lock(start, last);
+
+ tb_invalidate_phys_page_range__locked(cpu, pages, p,
+ start, last, ra);
+ page_collection_unlock(pages);
+ }
}
#endif /* CONFIG_USER_ONLY */
diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c
index 9e1ae66..d0f7b41 100644
--- a/accel/tcg/tcg-accel-ops-icount.c
+++ b/accel/tcg/tcg-accel-ops-icount.c
@@ -24,11 +24,11 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/replay.h"
-#include "sysemu/cpu-timers.h"
+#include "system/replay.h"
+#include "exec/icount.h"
#include "qemu/main-loop.h"
#include "qemu/guest-random.h"
-#include "exec/exec-all.h"
+#include "hw/core/cpu.h"
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-icount.h"
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
index 49814ec..dfcee30 100644
--- a/accel/tcg/tcg-accel-ops-mttcg.c
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
@@ -24,13 +24,12 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/tcg.h"
-#include "sysemu/replay.h"
-#include "sysemu/cpu-timers.h"
+#include "system/tcg.h"
+#include "system/replay.h"
+#include "exec/icount.h"
#include "qemu/main-loop.h"
#include "qemu/notify.h"
#include "qemu/guest-random.h"
-#include "exec/exec-all.h"
#include "hw/boards.h"
#include "tcg/startup.h"
#include "tcg-accel-ops.h"
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index 48c3871..6eec5c9 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -25,13 +25,13 @@
#include "qemu/osdep.h"
#include "qemu/lockable.h"
-#include "sysemu/tcg.h"
-#include "sysemu/replay.h"
-#include "sysemu/cpu-timers.h"
+#include "system/tcg.h"
+#include "system/replay.h"
+#include "exec/icount.h"
#include "qemu/main-loop.h"
#include "qemu/notify.h"
#include "qemu/guest-random.h"
-#include "exec/exec-all.h"
+#include "exec/cpu-common.h"
#include "tcg/startup.h"
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-rr.h"
@@ -109,7 +109,7 @@ static void rr_wait_io_event(void)
{
CPUState *cpu;
- while (all_cpu_threads_idle() && replay_can_wait()) {
+ while (all_cpu_threads_idle()) {
rr_stop_kick_timer();
qemu_cond_wait_bql(first_cpu->halt_cond);
}
@@ -302,9 +302,7 @@ static void *rr_cpu_thread_fn(void *arg)
rr_deal_with_unplugged_cpus();
}
- rcu_remove_force_rcu_notifier(&force_rcu);
- rcu_unregister_thread();
- return NULL;
+ g_assert_not_reached();
}
void rr_start_vcpu_thread(CPUState *cpu)
diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c
index 3c19e68..b24d6a7 100644
--- a/accel/tcg/tcg-accel-ops.c
+++ b/accel/tcg/tcg-accel-ops.c
@@ -26,15 +26,18 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/tcg.h"
-#include "sysemu/replay.h"
-#include "sysemu/cpu-timers.h"
+#include "system/accel-ops.h"
+#include "system/tcg.h"
+#include "system/replay.h"
+#include "exec/icount.h"
#include "qemu/main-loop.h"
#include "qemu/guest-random.h"
#include "qemu/timer.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/hwaddr.h"
#include "exec/tb-flush.h"
+#include "exec/translation-block.h"
+#include "exec/watchpoint.h"
#include "gdbstub/enums.h"
#include "hw/core/cpu.h"
@@ -119,10 +122,9 @@ static inline int xlat_gdb_type(CPUState *cpu, int gdbtype)
[GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
};
- CPUClass *cc = CPU_GET_CLASS(cpu);
int cputype = xlat[gdbtype];
- if (cc->gdb_stop_before_watchpoint) {
+ if (cpu->cc->gdb_stop_before_watchpoint) {
cputype |= BP_STOP_BEFORE_ACCESS;
}
return cputype;
@@ -222,7 +224,7 @@ static void tcg_accel_ops_init(AccelOpsClass *ops)
ops->remove_all_breakpoints = tcg_remove_all_breakpoints;
}
-static void tcg_accel_ops_class_init(ObjectClass *oc, void *data)
+static void tcg_accel_ops_class_init(ObjectClass *oc, const void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
diff --git a/accel/tcg/tcg-accel-ops.h b/accel/tcg/tcg-accel-ops.h
index 44c4079..6feeb3f 100644
--- a/accel/tcg/tcg-accel-ops.h
+++ b/accel/tcg/tcg-accel-ops.h
@@ -12,7 +12,7 @@
#ifndef TCG_ACCEL_OPS_H
#define TCG_ACCEL_OPS_H
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
void tcg_cpu_destroy(CPUState *cpu);
int tcg_cpu_exec(CPUState *cpu);
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
index 2090907..6e5dc33 100644
--- a/accel/tcg/tcg-all.c
+++ b/accel/tcg/tcg-all.c
@@ -24,26 +24,29 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/tcg.h"
+#include "system/tcg.h"
#include "exec/replay-core.h"
-#include "sysemu/cpu-timers.h"
+#include "exec/icount.h"
#include "tcg/startup.h"
-#include "tcg/oversized-guest.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/accel.h"
#include "qemu/atomic.h"
+#include "qapi/qapi-types-common.h"
#include "qapi/qapi-builtin-visit.h"
#include "qemu/units.h"
-#if !defined(CONFIG_USER_ONLY)
+#include "qemu/target-info.h"
+#ifndef CONFIG_USER_ONLY
#include "hw/boards.h"
#endif
+#include "accel/tcg/cpu-ops.h"
#include "internal-common.h"
+
struct TCGState {
AccelState parent_obj;
- bool mttcg_enabled;
+ OnOffAuto mttcg_enabled;
bool one_insn_per_tb;
int splitwx_enabled;
unsigned long tb_size;
@@ -55,40 +58,18 @@ typedef struct TCGState TCGState;
DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
TYPE_TCG_ACCEL)
-/*
- * We default to false if we know other options have been enabled
- * which are currently incompatible with MTTCG. Otherwise when each
- * guest (target) has been updated to support:
- * - atomic instructions
- * - memory ordering primitives (barriers)
- * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
- *
- * Once a guest architecture has been converted to the new primitives
- * there is one remaining limitation to check:
- * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
- */
-
-static bool default_mttcg_enabled(void)
+#ifndef CONFIG_USER_ONLY
+bool qemu_tcg_mttcg_enabled(void)
{
- if (icount_enabled() || TCG_OVERSIZED_GUEST) {
- return false;
- }
-#ifdef TARGET_SUPPORTS_MTTCG
-# ifndef TCG_GUEST_DEFAULT_MO
-# error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO"
-# endif
- return true;
-#else
- return false;
-#endif
+ TCGState *s = TCG_STATE(current_accel());
+ return s->mttcg_enabled == ON_OFF_AUTO_ON;
}
+#endif /* !CONFIG_USER_ONLY */
static void tcg_accel_instance_init(Object *obj)
{
TCGState *s = TCG_STATE(obj);
- s->mttcg_enabled = default_mttcg_enabled();
-
/* If debugging enabled, default "auto on", otherwise off. */
#if defined(CONFIG_DEBUG_TCG) && !defined(CONFIG_USER_ONLY)
s->splitwx_enabled = -1;
@@ -97,24 +78,57 @@ static void tcg_accel_instance_init(Object *obj)
#endif
}
-bool mttcg_enabled;
bool one_insn_per_tb;
static int tcg_init_machine(MachineState *ms)
{
TCGState *s = TCG_STATE(current_accel());
-#ifdef CONFIG_USER_ONLY
- unsigned max_cpus = 1;
-#else
- unsigned max_cpus = ms->smp.max_cpus;
+ unsigned max_threads = 1;
+
+#ifndef CONFIG_USER_ONLY
+ CPUClass *cc = CPU_CLASS(object_class_by_name(target_cpu_type()));
+ bool mttcg_supported = cc->tcg_ops->mttcg_supported;
+
+ switch (s->mttcg_enabled) {
+ case ON_OFF_AUTO_AUTO:
+ /*
+ * We default to false if we know other options have been enabled
+ * which are currently incompatible with MTTCG. Otherwise when each
+ * guest (target) has been updated to support:
+ * - atomic instructions
+ * - memory ordering primitives (barriers)
+ * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
+ *
+ * Once a guest architecture has been converted to the new primitives
+ * there is one remaining limitation to check:
+ * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
+ */
+ if (mttcg_supported && !icount_enabled()) {
+ s->mttcg_enabled = ON_OFF_AUTO_ON;
+ max_threads = ms->smp.max_cpus;
+ } else {
+ s->mttcg_enabled = ON_OFF_AUTO_OFF;
+ }
+ break;
+ case ON_OFF_AUTO_ON:
+ if (!mttcg_supported) {
+ warn_report("Guest not yet converted to MTTCG - "
+ "you may get unexpected results");
+ }
+ max_threads = ms->smp.max_cpus;
+ break;
+ case ON_OFF_AUTO_OFF:
+ break;
+ default:
+ g_assert_not_reached();
+ }
#endif
tcg_allowed = true;
- mttcg_enabled = s->mttcg_enabled;
page_init();
tb_htable_init();
- tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_cpus);
+ tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_threads);
#if defined(CONFIG_SOFTMMU)
/*
@@ -124,6 +138,10 @@ static int tcg_init_machine(MachineState *ms)
tcg_prologue_init();
#endif
+#ifdef CONFIG_USER_ONLY
+ qdev_create_fake_machine();
+#endif
+
return 0;
}
@@ -131,7 +149,7 @@ static char *tcg_get_thread(Object *obj, Error **errp)
{
TCGState *s = TCG_STATE(obj);
- return g_strdup(s->mttcg_enabled ? "multi" : "single");
+ return g_strdup(s->mttcg_enabled == ON_OFF_AUTO_ON ? "multi" : "single");
}
static void tcg_set_thread(Object *obj, const char *value, Error **errp)
@@ -139,19 +157,13 @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp)
TCGState *s = TCG_STATE(obj);
if (strcmp(value, "multi") == 0) {
- if (TCG_OVERSIZED_GUEST) {
- error_setg(errp, "No MTTCG when guest word size > hosts");
- } else if (icount_enabled()) {
+ if (icount_enabled()) {
error_setg(errp, "No MTTCG when icount is enabled");
} else {
-#ifndef TARGET_SUPPORTS_MTTCG
- warn_report("Guest not yet converted to MTTCG - "
- "you may get unexpected results");
-#endif
- s->mttcg_enabled = true;
+ s->mttcg_enabled = ON_OFF_AUTO_ON;
}
} else if (strcmp(value, "single") == 0) {
- s->mttcg_enabled = false;
+ s->mttcg_enabled = ON_OFF_AUTO_OFF;
} else {
error_setg(errp, "Invalid 'thread' setting %s", value);
}
@@ -222,7 +234,7 @@ static int tcg_gdbstub_supported_sstep_flags(void)
}
}
-static void tcg_accel_class_init(ObjectClass *oc, void *data)
+static void tcg_accel_class_init(ObjectClass *oc, const void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "tcg";
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
index afca89b..ff927c5 100644
--- a/accel/tcg/tcg-runtime-gvec.c
+++ b/accel/tcg/tcg-runtime-gvec.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "qemu/host-utils.h"
-#include "cpu.h"
#include "exec/helper-proto-common.h"
#include "tcg/tcg-gvec-desc.h"
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
index 9fa539a..fa7ed97 100644
--- a/accel/tcg/tcg-runtime.c
+++ b/accel/tcg/tcg-runtime.c
@@ -23,13 +23,9 @@
*/
#include "qemu/osdep.h"
#include "qemu/host-utils.h"
-#include "cpu.h"
+#include "exec/cpu-common.h"
#include "exec/helper-proto-common.h"
-#include "exec/cpu_ldst.h"
-#include "exec/exec-all.h"
-#include "disas/disas.h"
-#include "exec/log.h"
-#include "tcg/tcg.h"
+#include "accel/tcg/getpc.h"
#define HELPER_H "accel/tcg/tcg-runtime.h"
#include "exec/helper-info.c.inc"
diff --git a/accel/tcg/tlb-bounds.h b/accel/tcg/tlb-bounds.h
new file mode 100644
index 0000000..f83d9ac
--- /dev/null
+++ b/accel/tcg/tlb-bounds.h
@@ -0,0 +1,13 @@
+/*
+ * softmmu size bounds
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_TLB_BOUNDS_H
+#define ACCEL_TCG_TLB_BOUNDS_H
+
+#define CPU_TLB_DYN_MIN_BITS 6
+#define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
+#define CPU_TLB_DYN_DEFAULT_BITS 8
+
+#endif /* ACCEL_TCG_TLB_BOUNDS_H */
diff --git a/accel/tcg/trace-events b/accel/tcg/trace-events
index 4e9b450..14f6388 100644
--- a/accel/tcg/trace-events
+++ b/accel/tcg/trace-events
@@ -12,3 +12,15 @@ memory_notdirty_set_dirty(uint64_t vaddr) "0x%" PRIx64
# translate-all.c
translate_block(void *tb, uintptr_t pc, const void *tb_code) "tb:%p, pc:0x%"PRIxPTR", tb_code:%p"
+
+# ldst_atomicity
+load_atom2_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
+load_atom4_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
+load_atom8_or_exit_fallback(uintptr_t ra) "ra:0x%"PRIxPTR""
+load_atom8_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
+load_atom16_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
+load_atom16_or_exit_fallback(uintptr_t ra) "ra:0x%"PRIxPTR""
+store_atom2_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
+store_atom4_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
+store_atom8_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
+store_atom16_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index fdf6d8a..d468667 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -21,48 +21,20 @@
#include "trace.h"
#include "disas/disas.h"
-#include "exec/exec-all.h"
#include "tcg/tcg.h"
-#if defined(CONFIG_USER_ONLY)
-#include "qemu.h"
-#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
-#include <sys/param.h>
-#if __FreeBSD_version >= 700104
-#define HAVE_KINFO_GETVMMAP
-#define sigqueue sigqueue_freebsd /* avoid redefinition */
-#include <sys/proc.h>
-#include <machine/profile.h>
-#define _KERNEL
-#include <sys/user.h>
-#undef _KERNEL
-#undef sigqueue
-#include <libutil.h>
-#endif
-#endif
-#else
-#include "exec/ram_addr.h"
-#endif
-
-#include "exec/cputlb.h"
-#include "exec/translate-all.h"
-#include "exec/translator.h"
+#include "exec/mmap-lock.h"
+#include "tb-internal.h"
#include "exec/tb-flush.h"
-#include "qemu/bitmap.h"
-#include "qemu/qemu-print.h"
-#include "qemu/main-loop.h"
#include "qemu/cacheinfo.h"
-#include "qemu/timer.h"
+#include "qemu/target-info.h"
#include "exec/log.h"
-#include "sysemu/cpus.h"
-#include "sysemu/cpu-timers.h"
-#include "sysemu/tcg.h"
-#include "qapi/error.h"
-#include "hw/core/tcg-cpu-ops.h"
+#include "exec/icount.h"
+#include "accel/tcg/cpu-ops.h"
#include "tb-jmp-cache.h"
#include "tb-hash.h"
#include "tb-context.h"
+#include "tb-internal.h"
#include "internal-common.h"
-#include "internal-target.h"
#include "tcg/perf.h"
#include "tcg/insn-start-words.h"
@@ -105,7 +77,7 @@ static int64_t decode_sleb128(const uint8_t **pp)
val |= (int64_t)(byte & 0x7f) << shift;
shift += 7;
} while (byte & 0x80);
- if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
+ if (shift < 64 && (byte & 0x40)) {
val |= -(int64_t)1 << shift;
}
@@ -116,7 +88,7 @@ static int64_t decode_sleb128(const uint8_t **pp)
/* Encode the data collected about the instructions while compiling TB.
Place the data at BLOCK, and return the number of bytes consumed.
- The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
+ The logical table consists of INSN_START_WORDS uint64_t's,
which come from the target's insn_start data, followed by a uintptr_t
which comes from the host pc of the end of the code implementing the insn.
@@ -136,13 +108,13 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
for (i = 0, n = tb->icount; i < n; ++i) {
uint64_t prev, curr;
- for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
+ for (j = 0; j < INSN_START_WORDS; ++j) {
if (i == 0) {
prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
} else {
- prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j];
+ prev = insn_data[(i - 1) * INSN_START_WORDS + j];
}
- curr = insn_data[i * TARGET_INSN_START_WORDS + j];
+ curr = insn_data[i * INSN_START_WORDS + j];
p = encode_sleb128(p, curr - prev);
}
prev = (i == 0 ? 0 : insn_end_off[i - 1]);
@@ -174,7 +146,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
return -1;
}
- memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
+ memset(data, 0, sizeof(uint64_t) * INSN_START_WORDS);
if (!(tb_cflags(tb) & CF_PCREL)) {
data[0] = tb->pc;
}
@@ -184,7 +156,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
* at which the end of the insn exceeds host_pc.
*/
for (i = 0; i < num_insns; ++i) {
- for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
+ for (j = 0; j < INSN_START_WORDS; ++j) {
data[j] += decode_sleb128(&p);
}
iter_pc += decode_sleb128(&p);
@@ -202,7 +174,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc)
{
- uint64_t data[TARGET_INSN_START_WORDS];
+ uint64_t data[INSN_START_WORDS];
int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
if (insns_left < 0) {
@@ -274,8 +246,10 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
tcg_func_start(tcg_ctx);
- tcg_ctx->cpu = env_cpu(env);
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
+ CPUState *cs = env_cpu(env);
+ tcg_ctx->cpu = cs;
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
+
assert(tb->size != 0);
tcg_ctx->cpu = NULL;
*max_insns = tb->icount;
@@ -284,9 +258,7 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
}
/* Called with mmap_lock held for user mode emulation. */
-TranslationBlock *tb_gen_code(CPUState *cpu,
- vaddr pc, uint64_t cs_base,
- uint32_t flags, int cflags)
+TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s)
{
CPUArchState *env = cpu_env(cpu);
TranslationBlock *tb, *existing_tb;
@@ -299,14 +271,14 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
assert_memory_lock();
qemu_thread_jit_write();
- phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
+ phys_pc = get_page_addr_code_hostp(env, s.pc, &host_pc);
if (phys_pc == -1) {
/* Generate a one-shot TB with 1 insn in it */
- cflags = (cflags & ~CF_COUNT_MASK) | 1;
+ s.cflags = (s.cflags & ~CF_COUNT_MASK) | 1;
}
- max_insns = cflags & CF_COUNT_MASK;
+ max_insns = s.cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = TCG_MAX_INSNS;
}
@@ -326,12 +298,12 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
gen_code_buf = tcg_ctx->code_gen_ptr;
tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
- if (!(cflags & CF_PCREL)) {
- tb->pc = pc;
+ if (!(s.cflags & CF_PCREL)) {
+ tb->pc = s.pc;
}
- tb->cs_base = cs_base;
- tb->flags = flags;
- tb->cflags = cflags;
+ tb->cs_base = s.cs_base;
+ tb->flags = s.flags;
+ tb->cflags = s.cflags;
tb_set_page_addr0(tb, phys_pc);
tb_set_page_addr1(tb, -1);
if (phys_pc != -1) {
@@ -339,30 +311,20 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
}
tcg_ctx->gen_tb = tb;
- tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
-#ifdef CONFIG_SOFTMMU
- tcg_ctx->page_bits = TARGET_PAGE_BITS;
- tcg_ctx->page_mask = TARGET_PAGE_MASK;
- tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
-#endif
- tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
-#ifdef TCG_GUEST_DEFAULT_MO
- tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO;
-#else
- tcg_ctx->guest_mo = TCG_MO_ALL;
-#endif
+ tcg_ctx->addr_type = target_long_bits() == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
+ tcg_ctx->guest_mo = cpu->cc->tcg_ops->guest_default_memory_order;
restart_translate:
- trace_translate_block(tb, pc, tb->tc.ptr);
+ trace_translate_block(tb, s.pc, tb->tc.ptr);
- gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
+ gen_code_size = setjmp_gen_code(env, tb, s.pc, host_pc, &max_insns, &ti);
if (unlikely(gen_code_size < 0)) {
switch (gen_code_size) {
case -1:
/*
* Overflow of code_gen_buffer, or the current slice of it.
*
- * TODO: We don't need to re-do gen_intermediate_code, nor
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
* should we re-do the tcg optimization currently hidden
* inside tcg_gen_code. All that should be required is to
* flush the TBs, allocate a new TB, re-initialize it per
@@ -432,10 +394,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* For CF_PCREL, attribute all executions of the generated code
* to its first mapping.
*/
- perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
+ perf_report_code(s.pc, tb, tcg_splitwx_to_rx(gen_code_buf));
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
- qemu_log_in_addr_range(pc)) {
+ qemu_log_in_addr_range(s.pc)) {
FILE *logfile = qemu_log_trylock();
if (logfile) {
int code_size, data_size;
@@ -457,7 +419,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
fprintf(logfile,
" -- guest addr 0x%016" PRIx64 " + tb prologue\n",
- tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
+ tcg_ctx->gen_insn_data[insn * INSN_START_WORDS]);
chunk_start = tcg_ctx->gen_insn_end_off[insn];
disas(logfile, tb->tc.ptr, chunk_start);
@@ -470,7 +432,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
if (chunk_end > chunk_start) {
fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n",
- tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
+ tcg_ctx->gen_insn_data[insn * INSN_START_WORDS]);
disas(logfile, tb->tc.ptr + chunk_start,
chunk_end - chunk_start);
chunk_start = chunk_end;
@@ -528,9 +490,25 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
}
/*
+ * Insert TB into the corresponding region tree before publishing it
+ * through QHT. Otherwise rewinding happened in the TB might fail to
+ * lookup itself using host PC.
+ */
+ tcg_tb_insert(tb);
+
+ /*
* If the TB is not associated with a physical RAM page then it must be
- * a temporary one-insn TB, and we have nothing left to do. Return early
- * before attempting to link to other TBs or add to the lookup table.
+ * a temporary one-insn TB.
+ *
+ * Such TBs must be added to region trees in order to make sure that
+ * restore_state_to_opc() - which on some architectures is not limited to
+ * rewinding, but also affects exception handling! - is called when such a
+ * TB causes an exception.
+ *
+ * At the same time, temporary one-insn TBs must be executed at most once,
+ * because subsequent reads from, e.g., I/O memory may return different
+ * values. So return early before attempting to link to other TBs or add
+ * to the QHT.
*/
if (tb_page_addr0(tb) == -1) {
assert_no_pages_locked();
@@ -538,13 +516,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
}
/*
- * Insert TB into the corresponding region tree before publishing it
- * through QHT. Otherwise rewinding happened in the TB might fail to
- * lookup itself using host PC.
- */
- tcg_tb_insert(tb);
-
- /*
* No explicit memory barrier is required -- tb_link_page() makes the
* TB visible in a consistent state.
*/
@@ -579,15 +550,11 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
/* The exception probably happened in a helper. The CPU state should
have been saved before calling it. Fetch the PC from there. */
CPUArchState *env = cpu_env(cpu);
- vaddr pc;
- uint64_t cs_base;
- tb_page_addr_t addr;
- uint32_t flags;
+ TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
+ tb_page_addr_t addr = get_page_addr_code(env, s.pc);
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- addr = get_page_addr_code(env, pc);
if (addr != -1) {
- tb_invalidate_phys_range(addr, addr);
+ tb_invalidate_phys_range(cpu, addr, addr);
}
}
}
@@ -618,7 +585,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
* to account for the re-execution of the branch.
*/
n = 1;
- cc = CPU_GET_CLASS(cpu);
+ cc = cpu->cc;
if (cc->tcg_ops->io_recompile_replay_branch &&
cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
cpu->neg.icount_decr.u16.low++;
@@ -629,9 +596,10 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
* Exit the loop and potentially generate a new TB executing the
* just the I/O insns. We also limit instrumentation to memory
* operations only (which execute after completion) so we don't
- * double instrument the instruction.
+ * double instrument the instruction. Also don't let an IRQ sneak
+ * in before we execute it.
*/
- cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n;
+ cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_NOIRQ | n;
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
vaddr pc = cpu->cc->get_pc(cpu);
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
index 113edcf..034f2f3 100644
--- a/accel/tcg/translator.c
+++ b/accel/tcg/translator.c
@@ -8,23 +8,24 @@
*/
#include "qemu/osdep.h"
+#include "qemu/bswap.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-ldst-common.h"
+#include "accel/tcg/cpu-mmu-index.h"
+#include "exec/target_page.h"
#include "exec/translator.h"
-#include "exec/cpu_ldst.h"
#include "exec/plugin-gen.h"
-#include "exec/cpu_ldst.h"
#include "tcg/tcg-op-common.h"
-#include "internal-target.h"
+#include "internal-common.h"
#include "disas/disas.h"
+#include "tb-internal.h"
static void set_can_do_io(DisasContextBase *db, bool val)
{
QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1);
tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env,
- offsetof(ArchCPU, parent_obj.neg.can_do_io) -
- offsetof(ArchCPU, env));
+ offsetof(CPUState, neg.can_do_io) - sizeof(CPUState));
}
bool translator_io_start(DisasContextBase *db)
@@ -47,8 +48,8 @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) {
count = tcg_temp_new_i32();
tcg_gen_ld_i32(count, tcg_env,
- offsetof(ArchCPU, parent_obj.neg.icount_decr.u32)
- - offsetof(ArchCPU, env));
+ offsetof(CPUState, neg.icount_decr.u32) -
+ sizeof(CPUState));
}
if (cflags & CF_USE_ICOUNT) {
@@ -77,8 +78,8 @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
if (cflags & CF_USE_ICOUNT) {
tcg_gen_st16_i32(count, tcg_env,
- offsetof(ArchCPU, parent_obj.neg.icount_decr.u16.low)
- - offsetof(ArchCPU, env));
+ offsetof(CPUState, neg.icount_decr.u16.low) -
+ sizeof(CPUState));
}
return icount_start_insn;
@@ -102,6 +103,11 @@ static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags,
}
}
+bool translator_is_same_page(const DisasContextBase *db, vaddr addr)
+{
+ return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0;
+}
+
bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
{
/* Suppress goto_tb if requested. */
@@ -110,7 +116,7 @@ bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
}
/* Check for the dest on the same page as the start of the TB. */
- return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
+ return translator_is_same_page(db, dest);
}
void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
@@ -129,13 +135,13 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
db->is_jmp = DISAS_NEXT;
db->num_insns = 0;
db->max_insns = *max_insns;
- db->singlestep_enabled = cflags & CF_SINGLE_STEP;
db->insn_start = NULL;
db->fake_insn = false;
db->host_addr[0] = host_pc;
db->host_addr[1] = NULL;
db->record_start = 0;
db->record_len = 0;
+ db->code_mmuidx = cpu_mmu_index(cpu, true);
ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@@ -259,12 +265,14 @@ static bool translator_ld(CPUArchState *env, DisasContextBase *db,
if (likely(((base ^ last) & TARGET_PAGE_MASK) == 0)) {
/* Entire read is from the first page. */
- memcpy(dest, host + (pc - base), len);
- return true;
+ goto do_read;
}
if (unlikely(((base ^ pc) & TARGET_PAGE_MASK) == 0)) {
- /* Read begins on the first page and extends to the second. */
+ /*
+ * Read begins on the first page and extends to the second.
+ * The unaligned read is never atomic.
+ */
size_t len0 = -(pc | TARGET_PAGE_MASK);
memcpy(dest, host + (pc - base), len0);
pc += len0;
@@ -323,7 +331,39 @@ static bool translator_ld(CPUArchState *env, DisasContextBase *db,
host = db->host_addr[1];
}
- memcpy(dest, host + (pc - base), len);
+ do_read:
+ /*
+ * Assume aligned reads should be atomic, if possible.
+ * We're not in a position to jump out with EXCP_ATOMIC.
+ */
+ host += pc - base;
+ switch (len) {
+ case 2:
+ if (QEMU_IS_ALIGNED(pc, 2)) {
+ uint16_t t = qatomic_read((uint16_t *)host);
+ stw_he_p(dest, t);
+ return true;
+ }
+ break;
+ case 4:
+ if (QEMU_IS_ALIGNED(pc, 4)) {
+ uint32_t t = qatomic_read((uint32_t *)host);
+ stl_he_p(dest, t);
+ return true;
+ }
+ break;
+#ifdef CONFIG_ATOMIC64
+ case 8:
+ if (QEMU_IS_ALIGNED(pc, 8)) {
+ uint64_t t = qatomic_read__nocheck((uint64_t *)host);
+ stq_he_p(dest, t);
+ return true;
+ }
+ break;
+#endif
+ }
+ /* Unaligned or partial read from the second page is not atomic. */
+ memcpy(dest, host, len);
return true;
}
@@ -417,55 +457,62 @@ bool translator_st(const DisasContextBase *db, void *dest,
uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc)
{
- uint8_t raw;
+ uint8_t val;
- if (!translator_ld(env, db, &raw, pc, sizeof(raw))) {
- raw = cpu_ldub_code(env, pc);
- record_save(db, pc, &raw, sizeof(raw));
+ if (!translator_ld(env, db, &val, pc, sizeof(val))) {
+ MemOpIdx oi = make_memop_idx(MO_UB, db->code_mmuidx);
+ val = cpu_ldb_code_mmu(env, pc, oi, 0);
+ record_save(db, pc, &val, sizeof(val));
}
- return raw;
+ return val;
}
-uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc)
+uint16_t translator_lduw_end(CPUArchState *env, DisasContextBase *db,
+ vaddr pc, MemOp endian)
{
- uint16_t raw, tgt;
+ uint16_t val;
- if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
- tgt = tswap16(raw);
- } else {
- tgt = cpu_lduw_code(env, pc);
- raw = tswap16(tgt);
- record_save(db, pc, &raw, sizeof(raw));
+ if (!translator_ld(env, db, &val, pc, sizeof(val))) {
+ MemOpIdx oi = make_memop_idx(MO_UW, db->code_mmuidx);
+ val = cpu_ldw_code_mmu(env, pc, oi, 0);
+ record_save(db, pc, &val, sizeof(val));
}
- return tgt;
+ if (endian & MO_BSWAP) {
+ val = bswap16(val);
+ }
+ return val;
}
-uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc)
+uint32_t translator_ldl_end(CPUArchState *env, DisasContextBase *db,
+ vaddr pc, MemOp endian)
{
- uint32_t raw, tgt;
+ uint32_t val;
- if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
- tgt = tswap32(raw);
- } else {
- tgt = cpu_ldl_code(env, pc);
- raw = tswap32(tgt);
- record_save(db, pc, &raw, sizeof(raw));
+ if (!translator_ld(env, db, &val, pc, sizeof(val))) {
+ MemOpIdx oi = make_memop_idx(MO_UL, db->code_mmuidx);
+ val = cpu_ldl_code_mmu(env, pc, oi, 0);
+ record_save(db, pc, &val, sizeof(val));
+ }
+ if (endian & MO_BSWAP) {
+ val = bswap32(val);
}
- return tgt;
+ return val;
}
-uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc)
+uint64_t translator_ldq_end(CPUArchState *env, DisasContextBase *db,
+ vaddr pc, MemOp endian)
{
- uint64_t raw, tgt;
+ uint64_t val;
- if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
- tgt = tswap64(raw);
- } else {
- tgt = cpu_ldq_code(env, pc);
- raw = tswap64(tgt);
- record_save(db, pc, &raw, sizeof(raw));
+ if (!translator_ld(env, db, &val, pc, sizeof(val))) {
+ MemOpIdx oi = make_memop_idx(MO_UQ, db->code_mmuidx);
+ val = cpu_ldq_code_mmu(env, pc, oi, 0);
+ record_save(db, pc, &val, sizeof(val));
+ }
+ if (endian & MO_BSWAP) {
+ val = bswap64(val);
}
- return tgt;
+ return val;
}
void translator_fake_ld(DisasContextBase *db, const void *data, size_t len)
diff --git a/accel/tcg/user-exec-stub.c b/accel/tcg/user-exec-stub.c
index 4fbe2db..1d52f48 100644
--- a/accel/tcg/user-exec-stub.c
+++ b/accel/tcg/user-exec-stub.c
@@ -1,6 +1,7 @@
#include "qemu/osdep.h"
#include "hw/core/cpu.h"
#include "exec/replay-core.h"
+#include "internal-common.h"
void cpu_resume(CPUState *cpu)
{
@@ -18,6 +19,16 @@ void cpu_exec_reset_hold(CPUState *cpu)
{
}
+/* User mode emulation does not support softmmu yet. */
+
+void tlb_init(CPUState *cpu)
+{
+}
+
+void tlb_destroy(CPUState *cpu)
+{
+}
+
/* User mode emulation does not support record/replay yet. */
bool replay_exception(void)
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 80d2454..f25d80e 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -17,23 +17,30 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
-#include "hw/core/tcg-cpu-ops.h"
+#include "accel/tcg/cpu-ops.h"
#include "disas/disas.h"
-#include "exec/exec-all.h"
+#include "exec/vaddr.h"
+#include "exec/tlb-flags.h"
#include "tcg/tcg.h"
#include "qemu/bitops.h"
#include "qemu/rcu.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst-common.h"
+#include "accel/tcg/helper-retaddr.h"
+#include "accel/tcg/probe.h"
+#include "user/cpu_loop.h"
+#include "user/guest-host.h"
#include "qemu/main-loop.h"
-#include "exec/translate-all.h"
+#include "user/page-protection.h"
#include "exec/page-protection.h"
-#include "exec/helper-proto.h"
+#include "exec/helper-proto-common.h"
#include "qemu/atomic128.h"
-#include "trace/trace-root.h"
+#include "qemu/bswap.h"
+#include "qemu/int128.h"
+#include "trace.h"
#include "tcg/tcg-ldst.h"
+#include "backend-ldst.h"
#include "internal-common.h"
-#include "internal-target.h"
-#include "user-retaddr.h"
+#include "tb-internal.h"
__thread uintptr_t helper_retaddr;
@@ -119,9 +126,9 @@ MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
* guest, we'd end up in an infinite loop of retrying the faulting access.
*/
bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
- uintptr_t host_pc, abi_ptr guest_addr)
+ uintptr_t host_pc, vaddr guest_addr)
{
- switch (page_unprotect(guest_addr, host_pc)) {
+ switch (page_unprotect(cpu, guest_addr, host_pc)) {
case 0:
/*
* Fault not caused by a page marked unwritable to protect
@@ -155,7 +162,7 @@ typedef struct PageFlagsNode {
static IntervalTreeRoot pageflags_root;
-static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
+static PageFlagsNode *pageflags_find(vaddr start, vaddr last)
{
IntervalTreeNode *n;
@@ -163,8 +170,7 @@ static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
return n ? container_of(n, PageFlagsNode, itree) : NULL;
}
-static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
- target_ulong last)
+static PageFlagsNode *pageflags_next(PageFlagsNode *p, vaddr start, vaddr last)
{
IntervalTreeNode *n;
@@ -193,13 +199,22 @@ int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
return rc;
}
-static int dump_region(void *priv, target_ulong start,
- target_ulong end, unsigned long prot)
+static int dump_region(void *opaque, vaddr start, vaddr end, int prot)
{
- FILE *f = (FILE *)priv;
+ FILE *f = opaque;
+ uint64_t mask;
+ int width;
- fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n",
- start, end, end - start,
+ if (guest_addr_max <= UINT32_MAX) {
+ mask = UINT32_MAX, width = 8;
+ } else {
+ mask = UINT64_MAX, width = 16;
+ }
+
+ fprintf(f, "%0*" PRIx64 "-%0*" PRIx64 " %0*" PRIx64 " %c%c%c\n",
+ width, start & mask,
+ width, end & mask,
+ width, (end - start) & mask,
((prot & PAGE_READ) ? 'r' : '-'),
((prot & PAGE_WRITE) ? 'w' : '-'),
((prot & PAGE_EXEC) ? 'x' : '-'));
@@ -209,14 +224,14 @@ static int dump_region(void *priv, target_ulong start,
/* dump memory mappings */
void page_dump(FILE *f)
{
- const int length = sizeof(target_ulong) * 2;
+ int width = guest_addr_max <= UINT32_MAX ? 8 : 16;
fprintf(f, "%-*s %-*s %-*s %s\n",
- length, "start", length, "end", length, "size", "prot");
+ width, "start", width, "end", width, "size", "prot");
walk_memory_regions(f, dump_region);
}
-int page_get_flags(target_ulong address)
+int page_get_flags(vaddr address)
{
PageFlagsNode *p = pageflags_find(address, address);
@@ -239,7 +254,7 @@ int page_get_flags(target_ulong address)
}
/* A subroutine of page_set_flags: insert a new node for [start,last]. */
-static void pageflags_create(target_ulong start, target_ulong last, int flags)
+static void pageflags_create(vaddr start, vaddr last, int flags)
{
PageFlagsNode *p = g_new(PageFlagsNode, 1);
@@ -250,13 +265,13 @@ static void pageflags_create(target_ulong start, target_ulong last, int flags)
}
/* A subroutine of page_set_flags: remove everything in [start,last]. */
-static bool pageflags_unset(target_ulong start, target_ulong last)
+static bool pageflags_unset(vaddr start, vaddr last)
{
bool inval_tb = false;
while (true) {
PageFlagsNode *p = pageflags_find(start, last);
- target_ulong p_last;
+ vaddr p_last;
if (!p) {
break;
@@ -295,8 +310,7 @@ static bool pageflags_unset(target_ulong start, target_ulong last)
* A subroutine of page_set_flags: nothing overlaps [start,last],
* but check adjacent mappings and maybe merge into a single range.
*/
-static void pageflags_create_merge(target_ulong start, target_ulong last,
- int flags)
+static void pageflags_create_merge(vaddr start, vaddr last, int flags)
{
PageFlagsNode *next = NULL, *prev = NULL;
@@ -347,11 +361,11 @@ static void pageflags_create_merge(target_ulong start, target_ulong last,
#define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
/* A subroutine of page_set_flags: add flags to [start,last]. */
-static bool pageflags_set_clear(target_ulong start, target_ulong last,
+static bool pageflags_set_clear(vaddr start, vaddr last,
int set_flags, int clear_flags)
{
PageFlagsNode *p;
- target_ulong p_start, p_last;
+ vaddr p_start, p_last;
int p_flags, merge_flags;
bool inval_tb = false;
@@ -486,12 +500,7 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
return inval_tb;
}
-/*
- * Modify the flags of a page and invalidate the code if necessary.
- * The flag PAGE_WRITE_ORG is positioned automatically depending
- * on PAGE_WRITE. The mmap_lock should already be held.
- */
-void page_set_flags(target_ulong start, target_ulong last, int flags)
+void page_set_flags(vaddr start, vaddr last, int flags)
{
bool reset = false;
bool inval_tb = false;
@@ -500,7 +509,7 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
guest address space. If this assert fires, it probably indicates
a missing call to h2g_valid. */
assert(start <= last);
- assert(last <= GUEST_ADDR_MAX);
+ assert(last <= guest_addr_max);
/* Only set PAGE_ANON with new mappings. */
assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
assert_memory_lock();
@@ -527,13 +536,13 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
~(reset ? 0 : PAGE_STICKY));
}
if (inval_tb) {
- tb_invalidate_phys_range(start, last);
+ tb_invalidate_phys_range(NULL, start, last);
}
}
-bool page_check_range(target_ulong start, target_ulong len, int flags)
+bool page_check_range(vaddr start, vaddr len, int flags)
{
- target_ulong last;
+ vaddr last;
int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
bool ret;
@@ -582,7 +591,7 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
break;
}
/* Asking about writable, but has been protected: undo. */
- if (!page_unprotect(start, 0)) {
+ if (!page_unprotect(NULL, start, 0)) {
ret = false;
break;
}
@@ -609,20 +618,19 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
return ret;
}
-bool page_check_range_empty(target_ulong start, target_ulong last)
+bool page_check_range_empty(vaddr start, vaddr last)
{
assert(last >= start);
assert_memory_lock();
return pageflags_find(start, last) == NULL;
}
-target_ulong page_find_range_empty(target_ulong min, target_ulong max,
- target_ulong len, target_ulong align)
+vaddr page_find_range_empty(vaddr min, vaddr max, vaddr len, vaddr align)
{
- target_ulong len_m1, align_m1;
+ vaddr len_m1, align_m1;
assert(min <= max);
- assert(max <= GUEST_ADDR_MAX);
+ assert(max <= guest_addr_max);
assert(len != 0);
assert(is_power_of_2(align));
assert_memory_lock();
@@ -657,10 +665,10 @@ target_ulong page_find_range_empty(target_ulong min, target_ulong max,
}
}
-void page_protect(tb_page_addr_t address)
+void tb_lock_page0(tb_page_addr_t address)
{
PageFlagsNode *p;
- target_ulong start, last;
+ vaddr start, last;
int host_page_size = qemu_real_host_page_size();
int prot;
@@ -702,11 +710,13 @@ void page_protect(tb_page_addr_t address)
* immediately exited. (We can only return 2 if the 'pc' argument is
* non-zero.)
*/
-int page_unprotect(target_ulong address, uintptr_t pc)
+int page_unprotect(CPUState *cpu, tb_page_addr_t address, uintptr_t pc)
{
PageFlagsNode *p;
bool current_tb_invalidated;
+ assert((cpu == NULL) == (pc == 0));
+
/*
* Technically this isn't safe inside a signal handler. However we
* know this only ever happens in a synchronous SEGV handler, so in
@@ -729,15 +739,15 @@ int page_unprotect(target_ulong address, uintptr_t pc)
* this thread raced with another one which got here first and
* set the page to PAGE_WRITE and did the TB invalidate for us.
*/
-#ifdef TARGET_HAS_PRECISE_SMC
- TranslationBlock *current_tb = tcg_tb_lookup(pc);
- if (current_tb) {
- current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
+ if (pc && cpu->cc->tcg_ops->precise_smc) {
+ TranslationBlock *current_tb = tcg_tb_lookup(pc);
+ if (current_tb) {
+ current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
+ }
}
-#endif
} else {
int host_page_size = qemu_real_host_page_size();
- target_ulong start, len, i;
+ vaddr start, len, i;
int prot;
if (host_page_size <= TARGET_PAGE_SIZE) {
@@ -745,14 +755,15 @@ int page_unprotect(target_ulong address, uintptr_t pc)
len = TARGET_PAGE_SIZE;
prot = p->flags | PAGE_WRITE;
pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
- current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
+ current_tb_invalidated =
+ tb_invalidate_phys_page_unwind(cpu, start, pc);
} else {
start = address & -host_page_size;
len = host_page_size;
prot = 0;
for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
- target_ulong addr = start + i;
+ vaddr addr = start + i;
p = pageflags_find(addr, addr);
if (p) {
@@ -768,7 +779,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
* the corresponding translated code.
*/
current_tb_invalidated |=
- tb_invalidate_phys_page_unwind(addr, pc);
+ tb_invalidate_phys_page_unwind(cpu, addr, pc);
}
}
if (prot & PAGE_EXEC) {
@@ -806,7 +817,7 @@ static int probe_access_internal(CPUArchState *env, vaddr addr,
if (guest_addr_valid_untagged(addr)) {
int page_flags = page_get_flags(addr);
if (page_flags & acc_flag) {
- if ((acc_flag == PAGE_READ || acc_flag == PAGE_WRITE)
+ if (access_type != MMU_INST_FETCH
&& cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
return TLB_MMIO;
}
@@ -848,6 +859,12 @@ void *probe_access(CPUArchState *env, vaddr addr, int size,
return size ? g2h(env_cpu(env), addr) : NULL;
}
+void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr,
+ MMUAccessType access_type, int mmu_idx)
+{
+ return g2h(env_cpu(env), addr);
+}
+
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp)
{
@@ -862,7 +879,6 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
return addr;
}
-#ifdef TARGET_PAGE_DATA_SIZE
/*
* Allocate chunks of target data together. For the only current user,
* if we allocate one hunk per page, we have overhead of 40/128 or 40%.
@@ -878,10 +894,16 @@ typedef struct TargetPageDataNode {
} TargetPageDataNode;
static IntervalTreeRoot targetdata_root;
+static size_t target_page_data_size;
-void page_reset_target_data(target_ulong start, target_ulong last)
+void page_reset_target_data(vaddr start, vaddr last)
{
IntervalTreeNode *n, *next;
+ size_t size = target_page_data_size;
+
+ if (likely(size == 0)) {
+ return;
+ }
assert_memory_lock();
@@ -893,7 +915,7 @@ void page_reset_target_data(target_ulong start, target_ulong last)
n != NULL;
n = next,
next = next ? interval_tree_iter_next(n, start, last) : NULL) {
- target_ulong n_start, n_last, p_ofs, p_len;
+ vaddr n_start, n_last, p_ofs, p_len;
TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree);
if (n->start >= start && n->last <= last) {
@@ -912,16 +934,21 @@ void page_reset_target_data(target_ulong start, target_ulong last)
n_last = MIN(last, n->last);
p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
- memset(t->data + p_ofs * TARGET_PAGE_DATA_SIZE, 0,
- p_len * TARGET_PAGE_DATA_SIZE);
+ memset(t->data + p_ofs * size, 0, p_len * size);
}
}
-void *page_get_target_data(target_ulong address)
+void *page_get_target_data(vaddr address, size_t size)
{
IntervalTreeNode *n;
TargetPageDataNode *t;
- target_ulong page, region, p_ofs;
+ vaddr page, region, p_ofs;
+
+ /* Remember the size from the first call, and it should be constant. */
+ if (unlikely(target_page_data_size != size)) {
+ assert(target_page_data_size == 0);
+ target_page_data_size = size;
+ }
page = address & TARGET_PAGE_MASK;
region = address & TBD_MASK;
@@ -937,8 +964,7 @@ void *page_get_target_data(target_ulong address)
mmap_lock();
n = interval_tree_iter_first(&targetdata_root, page, page);
if (!n) {
- t = g_malloc0(sizeof(TargetPageDataNode)
- + TPD_PAGES * TARGET_PAGE_DATA_SIZE);
+ t = g_malloc0(sizeof(TargetPageDataNode) + TPD_PAGES * size);
n = &t->itree;
n->start = region;
n->last = region | ~TBD_MASK;
@@ -949,18 +975,15 @@ void *page_get_target_data(target_ulong address)
t = container_of(n, TargetPageDataNode, itree);
p_ofs = (page - region) >> TARGET_PAGE_BITS;
- return t->data + p_ofs * TARGET_PAGE_DATA_SIZE;
+ return t->data + p_ofs * size;
}
-#else
-void page_reset_target_data(target_ulong start, target_ulong last) { }
-#endif /* TARGET_PAGE_DATA_SIZE */
/* The system-mode versions of these helpers are in cputlb.c. */
static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
MemOp mop, uintptr_t ra, MMUAccessType type)
{
- int a_bits = get_alignment_bits(mop);
+ int a_bits = memop_alignment_bits(mop);
void *ret;
/* Enforce guest required alignment. */
@@ -973,6 +996,85 @@ static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
return ret;
}
+/* physical memory access (slow version, mainly for debug) */
+int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
+ void *ptr, size_t len, bool is_write)
+{
+ int flags;
+ vaddr l, page;
+ uint8_t *buf = ptr;
+ ssize_t written;
+ int ret = -1;
+ int fd = -1;
+
+ mmap_lock();
+
+ while (len > 0) {
+ page = addr & TARGET_PAGE_MASK;
+ l = (page + TARGET_PAGE_SIZE) - addr;
+ if (l > len) {
+ l = len;
+ }
+ flags = page_get_flags(page);
+ if (!(flags & PAGE_VALID)) {
+ goto out_close;
+ }
+ if (is_write) {
+ if (flags & PAGE_WRITE) {
+ memcpy(g2h(cpu, addr), buf, l);
+ } else {
+ /* Bypass the host page protection using ptrace. */
+ if (fd == -1) {
+ fd = open("/proc/self/mem", O_WRONLY);
+ if (fd == -1) {
+ goto out;
+ }
+ }
+ /*
+ * If there is a TranslationBlock and we weren't bypassing the
+ * host page protection, the memcpy() above would SEGV,
+ * ultimately leading to page_unprotect(). So invalidate the
+ * translations manually. Both invalidation and pwrite() must
+ * be under mmap_lock() in order to prevent the creation of
+ * another TranslationBlock in between.
+ */
+ tb_invalidate_phys_range(NULL, addr, addr + l - 1);
+ written = pwrite(fd, buf, l,
+ (off_t)(uintptr_t)g2h_untagged(addr));
+ if (written != l) {
+ goto out_close;
+ }
+ }
+ } else if (flags & PAGE_READ) {
+ memcpy(buf, g2h(cpu, addr), l);
+ } else {
+ /* Bypass the host page protection using ptrace. */
+ if (fd == -1) {
+ fd = open("/proc/self/mem", O_RDONLY);
+ if (fd == -1) {
+ goto out;
+ }
+ }
+ if (pread(fd, buf, l,
+ (off_t)(uintptr_t)g2h_untagged(addr)) != l) {
+ goto out_close;
+ }
+ }
+ len -= l;
+ buf += l;
+ addr += l;
+ }
+ ret = 0;
+out_close:
+ if (fd != -1) {
+ close(fd);
+ }
+out:
+ mmap_unlock();
+
+ return ret;
+}
+
#include "ldst_atomicity.c.inc"
static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
@@ -981,7 +1083,7 @@ static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
void *haddr;
uint8_t ret;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type);
ret = ldub_p(haddr);
clear_helper_retaddr();
@@ -995,7 +1097,7 @@ static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uint16_t ret;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
ret = load_atom_2(cpu, ra, haddr, mop);
clear_helper_retaddr();
@@ -1013,7 +1115,7 @@ static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uint32_t ret;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
ret = load_atom_4(cpu, ra, haddr, mop);
clear_helper_retaddr();
@@ -1031,7 +1133,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uint64_t ret;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
ret = load_atom_8(cpu, ra, haddr, mop);
clear_helper_retaddr();
@@ -1042,7 +1144,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
return ret;
}
-static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
+static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
@@ -1050,7 +1152,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
MemOp mop = get_memop(oi);
tcg_debug_assert((mop & MO_SIZE) == MO_128);
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_16(cpu, ra, haddr, mop);
clear_helper_retaddr();
@@ -1066,7 +1168,7 @@ static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
{
void *haddr;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE);
stb_p(haddr, val);
clear_helper_retaddr();
@@ -1078,7 +1180,7 @@ static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
void *haddr;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@@ -1094,7 +1196,7 @@ static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
void *haddr;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@@ -1110,7 +1212,7 @@ static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
void *haddr;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@@ -1126,7 +1228,7 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
void *haddr;
MemOpIdx mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@@ -1136,101 +1238,28 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
clear_helper_retaddr();
}
-uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
-{
- uint32_t ret;
-
- set_helper_retaddr(1);
- ret = ldub_p(g2h_untagged(ptr));
- clear_helper_retaddr();
- return ret;
-}
-
-uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
-{
- uint32_t ret;
-
- set_helper_retaddr(1);
- ret = lduw_p(g2h_untagged(ptr));
- clear_helper_retaddr();
- return ret;
-}
-
-uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
-{
- uint32_t ret;
-
- set_helper_retaddr(1);
- ret = ldl_p(g2h_untagged(ptr));
- clear_helper_retaddr();
- return ret;
-}
-
-uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
-{
- uint64_t ret;
-
- set_helper_retaddr(1);
- ret = ldq_p(g2h_untagged(ptr));
- clear_helper_retaddr();
- return ret;
-}
-
-uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
+uint8_t cpu_ldb_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
- uint8_t ret;
-
- haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
- ret = ldub_p(haddr);
- clear_helper_retaddr();
- return ret;
+ return do_ld1_mmu(env_cpu(env), addr, oi, ra ? ra : 1, MMU_INST_FETCH);
}
-uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
+uint16_t cpu_ldw_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
- uint16_t ret;
-
- haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
- ret = lduw_p(haddr);
- clear_helper_retaddr();
- if (get_memop(oi) & MO_BSWAP) {
- ret = bswap16(ret);
- }
- return ret;
+ return do_ld2_mmu(env_cpu(env), addr, oi, ra ? ra : 1, MMU_INST_FETCH);
}
-uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
+uint32_t cpu_ldl_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
- uint32_t ret;
-
- haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
- ret = ldl_p(haddr);
- clear_helper_retaddr();
- if (get_memop(oi) & MO_BSWAP) {
- ret = bswap32(ret);
- }
- return ret;
+ return do_ld4_mmu(env_cpu(env), addr, oi, ra ? ra : 1, MMU_INST_FETCH);
}
-uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
+uint64_t cpu_ldq_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
- uint64_t ret;
-
- haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
- ret = ldq_p(haddr);
- clear_helper_retaddr();
- if (get_memop(oi) & MO_BSWAP) {
- ret = bswap64(ret);
- }
- return ret;
+ return do_ld8_mmu(env_cpu(env), addr, oi, ra ? ra : 1, MMU_INST_FETCH);
}
#include "ldst_common.c.inc"
@@ -1242,7 +1271,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr)
{
MemOp mop = get_memop(oi);
- int a_bits = get_alignment_bits(mop);
+ int a_bits = memop_alignment_bits(mop);
void *ret;
/* Enforce guest required alignment. */
diff --git a/accel/tcg/user-retaddr.h b/accel/tcg/user-retaddr.h
deleted file mode 100644
index e0f57e1..0000000
--- a/accel/tcg/user-retaddr.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef ACCEL_TCG_USER_RETADDR_H
-#define ACCEL_TCG_USER_RETADDR_H
-
-#include "qemu/atomic.h"
-
-extern __thread uintptr_t helper_retaddr;
-
-static inline void set_helper_retaddr(uintptr_t ra)
-{
- helper_retaddr = ra;
- /*
- * Ensure that this write is visible to the SIGSEGV handler that
- * may be invoked due to a subsequent invalid memory operation.
- */
- signal_barrier();
-}
-
-static inline void clear_helper_retaddr(void)
-{
- /*
- * Ensure that previous memory operations have succeeded before
- * removing the data visible to the signal handler.
- */
- signal_barrier();
- helper_retaddr = 0;
-}
-
-#endif
diff --git a/accel/tcg/vcpu-state.h b/accel/tcg/vcpu-state.h
index e407d91..2e3464b 100644
--- a/accel/tcg/vcpu-state.h
+++ b/accel/tcg/vcpu-state.h
@@ -1,6 +1,11 @@
/*
- * SPDX-FileContributor: Philippe Mathieu-DaudƩ <philmd@linaro.org>
- * SPDX-FileCopyrightText: 2023 Linaro Ltd.
+ * TaskState helpers for QEMU
+ *
+ * Copyright (c) 2023 Linaro Ltd.
+ *
+ * Authors:
+ * Philippe Mathieu-DaudƩ
+ *
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef ACCEL_TCG_VCPU_STATE_H
diff --git a/accel/tcg/watchpoint.c b/accel/tcg/watchpoint.c
index d3aab11..cfb37a4 100644
--- a/accel/tcg/watchpoint.c
+++ b/accel/tcg/watchpoint.c
@@ -19,13 +19,15 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
-#include "qemu/error-report.h"
-#include "exec/exec-all.h"
-#include "exec/translate-all.h"
-#include "sysemu/tcg.h"
-#include "sysemu/replay.h"
-#include "hw/core/tcg-cpu-ops.h"
+#include "exec/breakpoint.h"
+#include "exec/cpu-interrupt.h"
+#include "exec/page-protection.h"
+#include "exec/translation-block.h"
+#include "system/tcg.h"
+#include "system/replay.h"
+#include "accel/tcg/cpu-ops.h"
#include "hw/core/cpu.h"
+#include "internal-common.h"
/*
* Return true if this watchpoint address matches the specified
@@ -66,7 +68,6 @@ int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
CPUWatchpoint *wp;
assert(tcg_enabled());
@@ -82,9 +83,9 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
return;
}
- if (cc->tcg_ops->adjust_watchpoint_address) {
+ if (cpu->cc->tcg_ops->adjust_watchpoint_address) {
/* this is currently used only by ARM BE32 */
- addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len);
+ addr = cpu->cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len);
}
assert((flags & ~BP_MEM_ACCESS) == 0);
@@ -116,24 +117,21 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
wp->hitattrs = attrs;
if (wp->flags & BP_CPU
- && cc->tcg_ops->debug_check_watchpoint
- && !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
+ && cpu->cc->tcg_ops->debug_check_watchpoint
+ && !cpu->cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
wp->flags &= ~BP_WATCHPOINT_HIT;
continue;
}
cpu->watchpoint_hit = wp;
- mmap_lock();
/* This call also restores vCPU state */
tb_check_watchpoint(cpu, ra);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
- mmap_unlock();
cpu_loop_exit(cpu);
} else {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
- mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
} else {
diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c
index 0bdefce..de52a8f 100644
--- a/accel/xen/xen-all.c
+++ b/accel/xen/xen-all.c
@@ -18,9 +18,10 @@
#include "hw/xen/xen_igd.h"
#include "chardev/char.h"
#include "qemu/accel.h"
-#include "sysemu/cpus.h"
-#include "sysemu/xen.h"
-#include "sysemu/runstate.h"
+#include "system/accel-ops.h"
+#include "system/cpus.h"
+#include "system/xen.h"
+#include "system/runstate.h"
#include "migration/misc.h"
#include "migration/global_state.h"
#include "hw/boards.h"
@@ -115,7 +116,7 @@ static int xen_init(MachineState *ms)
return 0;
}
-static void xen_accel_class_init(ObjectClass *oc, void *data)
+static void xen_accel_class_init(ObjectClass *oc, const void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
static GlobalProperty compat[] = {
@@ -146,7 +147,7 @@ static const TypeInfo xen_accel_type = {
.class_init = xen_accel_class_init,
};
-static void xen_accel_ops_class_init(ObjectClass *oc, void *data)
+static void xen_accel_ops_class_init(ObjectClass *oc, const void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
diff --git a/audio/alsaaudio.c b/audio/alsaaudio.c
index cacae1e..9b6c01c 100644
--- a/audio/alsaaudio.c
+++ b/audio/alsaaudio.c
@@ -899,7 +899,7 @@ static void alsa_enable_in(HWVoiceIn *hw, bool enable)
static void alsa_init_per_direction(AudiodevAlsaPerDirectionOptions *apdo)
{
if (!apdo->has_try_poll) {
- apdo->try_poll = true;
+ apdo->try_poll = false;
apdo->has_try_poll = true;
}
}
diff --git a/audio/audio-hmp-cmds.c b/audio/audio-hmp-cmds.c
index c9608b7..8774c09 100644
--- a/audio/audio-hmp-cmds.c
+++ b/audio/audio-hmp-cmds.c
@@ -27,7 +27,7 @@
#include "monitor/hmp.h"
#include "monitor/monitor.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
static QLIST_HEAD (capture_list_head, CaptureState) capture_head;
diff --git a/audio/audio.c b/audio/audio.c
index af0ae33..89f091b 100644
--- a/audio/audio.c
+++ b/audio/audio.c
@@ -32,15 +32,15 @@
#include "qapi/qobject-input-visitor.h"
#include "qapi/qapi-visit-audio.h"
#include "qapi/qapi-commands-audio.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/cutils.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/help_option.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/replay.h"
-#include "sysemu/runstate.h"
+#include "system/system.h"
+#include "system/replay.h"
+#include "system/runstate.h"
#include "ui/qemu-spice.h"
#include "trace.h"
@@ -905,6 +905,14 @@ size_t AUD_read(SWVoiceIn *sw, void *buf, size_t size)
int AUD_get_buffer_size_out(SWVoiceOut *sw)
{
+ if (!sw) {
+ return 0;
+ }
+
+ if (audio_get_pdo_out(sw->s->dev)->mixing_engine) {
+ return sw->resample_buf.size * sw->info.bytes_per_frame;
+ }
+
return sw->hw->samples * sw->hw->info.bytes_per_frame;
}
@@ -1884,7 +1892,8 @@ CaptureVoiceOut *AUD_add_capture(
cap->buf = g_malloc0_n(hw->mix_buf.size, hw->info.bytes_per_frame);
if (hw->info.is_float) {
- hw->clip = mixeng_clip_float[hw->info.nchannels == 2];
+ hw->clip = mixeng_clip_float[hw->info.nchannels == 2]
+ [hw->info.swap_endianness];
} else {
hw->clip = mixeng_clip
[hw->info.nchannels == 2]
@@ -2274,17 +2283,19 @@ size_t audio_rate_peek_bytes(RateCtl *rate, struct audio_pcm_info *info)
ticks = now - rate->start_ticks;
bytes = muldiv64(ticks, info->bytes_per_second, NANOSECONDS_PER_SECOND);
frames = (bytes - rate->bytes_sent) / info->bytes_per_frame;
- if (frames < 0 || frames > 65536) {
- AUD_log(NULL, "Resetting rate control (%" PRId64 " frames)\n", frames);
- audio_rate_start(rate);
- frames = 0;
- }
+ rate->peeked_frames = frames;
- return frames * info->bytes_per_frame;
+ return frames < 0 ? 0 : frames * info->bytes_per_frame;
}
void audio_rate_add_bytes(RateCtl *rate, size_t bytes_used)
{
+ if (rate->peeked_frames < 0 || rate->peeked_frames > 65536) {
+ AUD_log(NULL, "Resetting rate control (%" PRId64 " frames)\n",
+ rate->peeked_frames);
+ audio_rate_start(rate);
+ }
+
rate->bytes_sent += bytes_used;
}
diff --git a/audio/audio_int.h b/audio/audio_int.h
index 2d079d0..f78ca05 100644
--- a/audio/audio_int.h
+++ b/audio/audio_int.h
@@ -255,6 +255,7 @@ const char *audio_application_name(void);
typedef struct RateCtl {
int64_t start_ticks;
int64_t bytes_sent;
+ int64_t peeked_frames;
} RateCtl;
void audio_rate_start(RateCtl *rate);
diff --git a/audio/audio_template.h b/audio/audio_template.h
index 7ccfec0..c29d79c 100644
--- a/audio/audio_template.h
+++ b/audio/audio_template.h
@@ -174,9 +174,11 @@ static int glue (audio_pcm_sw_init_, TYPE) (
if (sw->info.is_float) {
#ifdef DAC
- sw->conv = mixeng_conv_float[sw->info.nchannels == 2];
+ sw->conv = mixeng_conv_float[sw->info.nchannels == 2]
+ [sw->info.swap_endianness];
#else
- sw->clip = mixeng_clip_float[sw->info.nchannels == 2];
+ sw->clip = mixeng_clip_float[sw->info.nchannels == 2]
+ [sw->info.swap_endianness];
#endif
} else {
#ifdef DAC
@@ -303,9 +305,11 @@ static HW *glue(audio_pcm_hw_add_new_, TYPE)(AudioState *s,
if (hw->info.is_float) {
#ifdef DAC
- hw->clip = mixeng_clip_float[hw->info.nchannels == 2];
+ hw->clip = mixeng_clip_float[hw->info.nchannels == 2]
+ [hw->info.swap_endianness];
#else
- hw->conv = mixeng_conv_float[hw->info.nchannels == 2];
+ hw->conv = mixeng_conv_float[hw->info.nchannels == 2]
+ [hw->info.swap_endianness];
#endif
} else {
#ifdef DAC
diff --git a/audio/dbusaudio.c b/audio/dbusaudio.c
index 60fcf64..b44fdd1 100644
--- a/audio/dbusaudio.c
+++ b/audio/dbusaudio.c
@@ -43,9 +43,10 @@
#define DBUS_DISPLAY1_AUDIO_PATH DBUS_DISPLAY1_ROOT "/Audio"
-#define DBUS_AUDIO_NSAMPLES 1024 /* could be configured? */
+#define DBUS_DEFAULT_AUDIO_NSAMPLES 480
typedef struct DBusAudio {
+ Audiodev *dev;
GDBusObjectManagerServer *server;
bool p2p;
GDBusObjectSkeleton *audio;
@@ -105,7 +106,7 @@ static size_t dbus_put_buffer_out(HWVoiceOut *hw, void *buf, size_t size)
assert(buf == vo->buf + vo->buf_pos && vo->buf_pos + size <= vo->buf_size);
vo->buf_pos += size;
- trace_dbus_audio_put_buffer_out(size);
+ trace_dbus_audio_put_buffer_out(vo->buf_pos, vo->buf_size);
if (vo->buf_pos < vo->buf_size) {
return size;
@@ -151,6 +152,18 @@ dbus_init_out_listener(QemuDBusDisplay1AudioOutListener *listener,
G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL);
}
+static guint
+dbus_audio_get_nsamples(DBusAudio *da)
+{
+ AudiodevDBusOptions *opts = &da->dev->u.dbus;
+
+ if (opts->has_nsamples && opts->nsamples) {
+ return opts->nsamples;
+ } else {
+ return DBUS_DEFAULT_AUDIO_NSAMPLES;
+ }
+}
+
static int
dbus_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque)
{
@@ -160,7 +173,7 @@ dbus_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque)
QemuDBusDisplay1AudioOutListener *listener = NULL;
audio_pcm_init_info(&hw->info, as);
- hw->samples = DBUS_AUDIO_NSAMPLES;
+ hw->samples = dbus_audio_get_nsamples(da);
audio_rate_start(&vo->rate);
g_hash_table_iter_init(&iter, da->out_listeners);
@@ -274,7 +287,7 @@ dbus_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
QemuDBusDisplay1AudioInListener *listener = NULL;
audio_pcm_init_info(&hw->info, as);
- hw->samples = DBUS_AUDIO_NSAMPLES;
+ hw->samples = dbus_audio_get_nsamples(da);
audio_rate_start(&vo->rate);
g_hash_table_iter_init(&iter, da->in_listeners);
@@ -399,6 +412,7 @@ dbus_audio_init(Audiodev *dev, Error **errp)
{
DBusAudio *da = g_new0(DBusAudio, 1);
+ da->dev = dev;
da->out_listeners = g_hash_table_new_full(g_str_hash, g_str_equal,
g_free, g_object_unref);
da->in_listeners = g_hash_table_new_full(g_str_hash, g_str_equal,
@@ -524,11 +538,17 @@ dbus_audio_register_listener(AudioState *s,
);
}
+ GDBusConnectionFlags flags =
+ G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER;
+#ifdef WIN32
+ flags |= G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_ALLOW_ANONYMOUS;
+#endif
+
listener_conn =
g_dbus_connection_new_sync(
G_IO_STREAM(socket_conn),
guid,
- G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER,
+ flags,
NULL, NULL, &err);
if (err) {
error_report("Failed to setup peer connection: %s", err->message);
@@ -646,6 +666,7 @@ dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server, bool p2p)
"swapped-signal::handle-register-out-listener",
dbus_audio_register_out_listener, s,
NULL);
+ qemu_dbus_display1_audio_set_nsamples(da->iface, dbus_audio_get_nsamples(da));
g_dbus_object_skeleton_add_interface(G_DBUS_OBJECT_SKELETON(da->audio),
G_DBUS_INTERFACE_SKELETON(da->iface));
diff --git a/audio/mixeng.c b/audio/mixeng.c
index 69f6549..703ee54 100644
--- a/audio/mixeng.c
+++ b/audio/mixeng.c
@@ -283,10 +283,15 @@ static const float float_scale_reciprocal = 1.f / ((int64_t)INT32_MAX + 1);
#endif
#endif
+#define F32_TO_F32S(v) \
+ bswap32((union { uint32_t i; float f; }){ .f = (v) }.i)
+#define F32S_TO_F32(v) \
+ ((union { uint32_t i; float f; }){ .i = bswap32(v) }.f)
+
static void conv_natural_float_to_mono(struct st_sample *dst, const void *src,
int samples)
{
- float *in = (float *)src;
+ const float *in = src;
while (samples--) {
dst->r = dst->l = CONV_NATURAL_FLOAT(*in++);
@@ -294,10 +299,21 @@ static void conv_natural_float_to_mono(struct st_sample *dst, const void *src,
}
}
+static void conv_swap_float_to_mono(struct st_sample *dst, const void *src,
+ int samples)
+{
+ const uint32_t *in_f32s = src;
+
+ while (samples--) {
+ dst->r = dst->l = CONV_NATURAL_FLOAT(F32S_TO_F32(*in_f32s++));
+ dst++;
+ }
+}
+
static void conv_natural_float_to_stereo(struct st_sample *dst, const void *src,
int samples)
{
- float *in = (float *)src;
+ const float *in = src;
while (samples--) {
dst->l = CONV_NATURAL_FLOAT(*in++);
@@ -306,15 +322,33 @@ static void conv_natural_float_to_stereo(struct st_sample *dst, const void *src,
}
}
-t_sample *mixeng_conv_float[2] = {
- conv_natural_float_to_mono,
- conv_natural_float_to_stereo,
+static void conv_swap_float_to_stereo(struct st_sample *dst, const void *src,
+ int samples)
+{
+ const uint32_t *in_f32s = src;
+
+ while (samples--) {
+ dst->l = CONV_NATURAL_FLOAT(F32S_TO_F32(*in_f32s++));
+ dst->r = CONV_NATURAL_FLOAT(F32S_TO_F32(*in_f32s++));
+ dst++;
+ }
+}
+
+t_sample *mixeng_conv_float[2][2] = {
+ {
+ conv_natural_float_to_mono,
+ conv_swap_float_to_mono,
+ },
+ {
+ conv_natural_float_to_stereo,
+ conv_swap_float_to_stereo,
+ }
};
static void clip_natural_float_from_mono(void *dst, const struct st_sample *src,
int samples)
{
- float *out = (float *)dst;
+ float *out = dst;
while (samples--) {
*out++ = CLIP_NATURAL_FLOAT(src->l + src->r);
@@ -322,10 +356,21 @@ static void clip_natural_float_from_mono(void *dst, const struct st_sample *src,
}
}
+static void clip_swap_float_from_mono(void *dst, const struct st_sample *src,
+ int samples)
+{
+ uint32_t *out_f32s = dst;
+
+ while (samples--) {
+ *out_f32s++ = F32_TO_F32S(CLIP_NATURAL_FLOAT(src->l + src->r));
+ src++;
+ }
+}
+
static void clip_natural_float_from_stereo(
void *dst, const struct st_sample *src, int samples)
{
- float *out = (float *)dst;
+ float *out = dst;
while (samples--) {
*out++ = CLIP_NATURAL_FLOAT(src->l);
@@ -334,9 +379,27 @@ static void clip_natural_float_from_stereo(
}
}
-f_sample *mixeng_clip_float[2] = {
- clip_natural_float_from_mono,
- clip_natural_float_from_stereo,
+static void clip_swap_float_from_stereo(
+ void *dst, const struct st_sample *src, int samples)
+{
+ uint32_t *out_f32s = dst;
+
+ while (samples--) {
+ *out_f32s++ = F32_TO_F32S(CLIP_NATURAL_FLOAT(src->l));
+ *out_f32s++ = F32_TO_F32S(CLIP_NATURAL_FLOAT(src->r));
+ src++;
+ }
+}
+
+f_sample *mixeng_clip_float[2][2] = {
+ {
+ clip_natural_float_from_mono,
+ clip_swap_float_from_mono,
+ },
+ {
+ clip_natural_float_from_stereo,
+ clip_swap_float_from_stereo,
+ }
};
void audio_sample_to_uint64(const void *samples, int pos,
diff --git a/audio/mixeng.h b/audio/mixeng.h
index a5f56d2..ead93ac 100644
--- a/audio/mixeng.h
+++ b/audio/mixeng.h
@@ -42,9 +42,9 @@ typedef void (f_sample) (void *dst, const struct st_sample *src, int samples);
extern t_sample *mixeng_conv[2][2][2][3];
extern f_sample *mixeng_clip[2][2][2][3];
-/* indices: [stereo] */
-extern t_sample *mixeng_conv_float[2];
-extern f_sample *mixeng_clip_float[2];
+/* indices: [stereo][swap endianness] */
+extern t_sample *mixeng_conv_float[2][2];
+extern f_sample *mixeng_clip_float[2][2];
void *st_rate_start (int inrate, int outrate);
void st_rate_flow(void *opaque, st_sample *ibuf, st_sample *obuf,
diff --git a/audio/pwaudio.c b/audio/pwaudio.c
index 3b14e04..8e13b58 100644
--- a/audio/pwaudio.c
+++ b/audio/pwaudio.c
@@ -769,13 +769,15 @@ qpw_audio_init(Audiodev *dev, Error **errp)
pw->core = pw_context_connect(pw->context, NULL, 0);
if (pw->core == NULL) {
pw_thread_loop_unlock(pw->thread_loop);
- goto fail_error;
+ error_setg_errno(errp, errno, "Failed to connect to PipeWire instance");
+ goto fail;
}
if (pw_core_add_listener(pw->core, &pw->core_listener,
&core_events, pw) < 0) {
pw_thread_loop_unlock(pw->thread_loop);
- goto fail_error;
+ error_setg(errp, "Failed to add PipeWire listener");
+ goto fail;
}
if (wait_resync(pw) < 0) {
pw_thread_loop_unlock(pw->thread_loop);
@@ -785,8 +787,6 @@ qpw_audio_init(Audiodev *dev, Error **errp)
return g_steal_pointer(&pw);
-fail_error:
- error_setg(errp, "Failed to initialize PW context");
fail:
if (pw->thread_loop) {
pw_thread_loop_stop(pw->thread_loop);
diff --git a/audio/trace-events b/audio/trace-events
index ab04f02..7e3f159 100644
--- a/audio/trace-events
+++ b/audio/trace-events
@@ -15,7 +15,7 @@ oss_version(int version) "OSS version = 0x%x"
# dbusaudio.c
dbus_audio_register(const char *s, const char *dir) "sender = %s, dir = %s"
-dbus_audio_put_buffer_out(size_t len) "len = %zu"
+dbus_audio_put_buffer_out(size_t pos, size_t size) "buf_pos = %zu, buf_size = %zu"
dbus_audio_read(size_t len) "len = %zu"
# pwaudio.c
diff --git a/authz/list.c b/authz/list.c
index 0e17eed..17aa0ef 100644
--- a/authz/list.c
+++ b/authz/list.c
@@ -116,7 +116,7 @@ qauthz_list_finalize(Object *obj)
static void
-qauthz_list_class_init(ObjectClass *oc, void *data)
+qauthz_list_class_init(ObjectClass *oc, const void *data)
{
QAuthZClass *authz = QAUTHZ_CLASS(oc);
@@ -253,7 +253,7 @@ static const TypeInfo qauthz_list_info = {
.instance_size = sizeof(QAuthZList),
.instance_finalize = qauthz_list_finalize,
.class_init = qauthz_list_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/authz/listfile.c b/authz/listfile.c
index 45a60e9..13741d5 100644
--- a/authz/listfile.c
+++ b/authz/listfile.c
@@ -28,8 +28,8 @@
#include "qemu/filemonitor.h"
#include "qom/object_interfaces.h"
#include "qapi/qapi-visit-authz.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/qobject.h"
+#include "qobject/qjson.h"
+#include "qobject/qobject.h"
#include "qapi/qobject-input-visitor.h"
@@ -220,7 +220,7 @@ qauthz_list_file_finalize(Object *obj)
static void
-qauthz_list_file_class_init(ObjectClass *oc, void *data)
+qauthz_list_file_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
QAuthZClass *authz = QAUTHZ_CLASS(oc);
@@ -272,7 +272,7 @@ static const TypeInfo qauthz_list_file_info = {
.instance_size = sizeof(QAuthZListFile),
.instance_finalize = qauthz_list_file_finalize,
.class_init = qauthz_list_file_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/authz/pamacct.c b/authz/pamacct.c
index c862d9f..c0ad674 100644
--- a/authz/pamacct.c
+++ b/authz/pamacct.c
@@ -103,7 +103,7 @@ qauthz_pam_finalize(Object *obj)
static void
-qauthz_pam_class_init(ObjectClass *oc, void *data)
+qauthz_pam_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
QAuthZClass *authz = QAUTHZ_CLASS(oc);
@@ -136,7 +136,7 @@ static const TypeInfo qauthz_pam_info = {
.instance_size = sizeof(QAuthZPAM),
.instance_finalize = qauthz_pam_finalize,
.class_init = qauthz_pam_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/authz/simple.c b/authz/simple.c
index 0597dcd..f8f2b98 100644
--- a/authz/simple.c
+++ b/authz/simple.c
@@ -78,7 +78,7 @@ qauthz_simple_complete(UserCreatable *uc, Error **errp)
static void
-qauthz_simple_class_init(ObjectClass *oc, void *data)
+qauthz_simple_class_init(ObjectClass *oc, const void *data)
{
QAuthZClass *authz = QAUTHZ_CLASS(oc);
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
@@ -111,7 +111,7 @@ static const TypeInfo qauthz_simple_info = {
.instance_size = sizeof(QAuthZSimple),
.instance_finalize = qauthz_simple_finalize,
.class_init = qauthz_simple_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/backends/Kconfig b/backends/Kconfig
index 2cb23f6..d3dbe19 100644
--- a/backends/Kconfig
+++ b/backends/Kconfig
@@ -3,3 +3,7 @@ source tpm/Kconfig
config IOMMUFD
bool
depends on VFIO
+
+config SPDM_SOCKET
+ bool
+ default y
diff --git a/backends/confidential-guest-support.c b/backends/confidential-guest-support.c
index 052fde8..8ff7bfa 100644
--- a/backends/confidential-guest-support.c
+++ b/backends/confidential-guest-support.c
@@ -13,14 +13,15 @@
#include "qemu/osdep.h"
-#include "exec/confidential-guest-support.h"
+#include "system/confidential-guest-support.h"
OBJECT_DEFINE_ABSTRACT_TYPE(ConfidentialGuestSupport,
confidential_guest_support,
CONFIDENTIAL_GUEST_SUPPORT,
OBJECT)
-static void confidential_guest_support_class_init(ObjectClass *oc, void *data)
+static void confidential_guest_support_class_init(ObjectClass *oc,
+ const void *data)
{
}
diff --git a/backends/cryptodev-builtin.c b/backends/cryptodev-builtin.c
index 940104e..0414c01 100644
--- a/backends/cryptodev-builtin.c
+++ b/backends/cryptodev-builtin.c
@@ -22,7 +22,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/cryptodev.h"
+#include "system/cryptodev.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "standard-headers/linux/virtio_crypto.h"
@@ -64,11 +64,11 @@ static void cryptodev_builtin_init_akcipher(CryptoDevBackend *backend)
{
QCryptoAkCipherOptions opts;
- opts.alg = QCRYPTO_AKCIPHER_ALG_RSA;
- opts.u.rsa.padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW;
+ opts.alg = QCRYPTO_AK_CIPHER_ALGO_RSA;
+ opts.u.rsa.padding_alg = QCRYPTO_RSA_PADDING_ALGO_RAW;
if (qcrypto_akcipher_supports(&opts)) {
backend->conf.crypto_services |=
- (1u << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER);
+ (1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_AKCIPHER);
backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA;
}
}
@@ -93,9 +93,9 @@ static void cryptodev_builtin_init(
backend->conf.peers.ccs[0] = cc;
backend->conf.crypto_services =
- 1u << QCRYPTODEV_BACKEND_SERVICE_CIPHER |
- 1u << QCRYPTODEV_BACKEND_SERVICE_HASH |
- 1u << QCRYPTODEV_BACKEND_SERVICE_MAC;
+ 1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_CIPHER |
+ 1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_HASH |
+ 1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_MAC;
backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC;
backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1;
/*
@@ -138,18 +138,18 @@ cryptodev_builtin_get_aes_algo(uint32_t key_len, int mode, Error **errp)
int algo;
if (key_len == AES_KEYSIZE_128) {
- algo = QCRYPTO_CIPHER_ALG_AES_128;
+ algo = QCRYPTO_CIPHER_ALGO_AES_128;
} else if (key_len == AES_KEYSIZE_192) {
- algo = QCRYPTO_CIPHER_ALG_AES_192;
+ algo = QCRYPTO_CIPHER_ALGO_AES_192;
} else if (key_len == AES_KEYSIZE_256) { /* equals AES_KEYSIZE_128_XTS */
if (mode == QCRYPTO_CIPHER_MODE_XTS) {
- algo = QCRYPTO_CIPHER_ALG_AES_128;
+ algo = QCRYPTO_CIPHER_ALGO_AES_128;
} else {
- algo = QCRYPTO_CIPHER_ALG_AES_256;
+ algo = QCRYPTO_CIPHER_ALGO_AES_256;
}
} else if (key_len == AES_KEYSIZE_256_XTS) {
if (mode == QCRYPTO_CIPHER_MODE_XTS) {
- algo = QCRYPTO_CIPHER_ALG_AES_256;
+ algo = QCRYPTO_CIPHER_ALGO_AES_256;
} else {
goto err;
}
@@ -169,16 +169,16 @@ static int cryptodev_builtin_get_rsa_hash_algo(
{
switch (virtio_rsa_hash) {
case VIRTIO_CRYPTO_RSA_MD5:
- return QCRYPTO_HASH_ALG_MD5;
+ return QCRYPTO_HASH_ALGO_MD5;
case VIRTIO_CRYPTO_RSA_SHA1:
- return QCRYPTO_HASH_ALG_SHA1;
+ return QCRYPTO_HASH_ALGO_SHA1;
case VIRTIO_CRYPTO_RSA_SHA256:
- return QCRYPTO_HASH_ALG_SHA256;
+ return QCRYPTO_HASH_ALGO_SHA256;
case VIRTIO_CRYPTO_RSA_SHA512:
- return QCRYPTO_HASH_ALG_SHA512;
+ return QCRYPTO_HASH_ALGO_SHA512;
default:
error_setg(errp, "Unsupported rsa hash algo: %d", virtio_rsa_hash);
@@ -200,12 +200,12 @@ static int cryptodev_builtin_set_rsa_options(
return -1;
}
opt->hash_alg = hash_alg;
- opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1;
+ opt->padding_alg = QCRYPTO_RSA_PADDING_ALGO_PKCS1;
return 0;
}
if (virtio_padding_algo == VIRTIO_CRYPTO_RSA_RAW_PADDING) {
- opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW;
+ opt->padding_alg = QCRYPTO_RSA_PADDING_ALGO_RAW;
return 0;
}
@@ -271,15 +271,15 @@ static int cryptodev_builtin_create_cipher_session(
break;
case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
mode = QCRYPTO_CIPHER_MODE_ECB;
- algo = QCRYPTO_CIPHER_ALG_3DES;
+ algo = QCRYPTO_CIPHER_ALGO_3DES;
break;
case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
mode = QCRYPTO_CIPHER_MODE_CBC;
- algo = QCRYPTO_CIPHER_ALG_3DES;
+ algo = QCRYPTO_CIPHER_ALGO_3DES;
break;
case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
mode = QCRYPTO_CIPHER_MODE_CTR;
- algo = QCRYPTO_CIPHER_ALG_3DES;
+ algo = QCRYPTO_CIPHER_ALGO_3DES;
break;
default:
error_setg(errp, "Unsupported cipher alg :%u",
@@ -318,7 +318,7 @@ static int cryptodev_builtin_create_akcipher_session(
switch (sess_info->algo) {
case VIRTIO_CRYPTO_AKCIPHER_RSA:
- opts.alg = QCRYPTO_AKCIPHER_ALG_RSA;
+ opts.alg = QCRYPTO_AK_CIPHER_ALGO_RSA;
if (cryptodev_builtin_set_rsa_options(sess_info->u.rsa.padding_algo,
sess_info->u.rsa.hash_algo, &opts.u.rsa, errp) != 0) {
return -1;
@@ -334,11 +334,11 @@ static int cryptodev_builtin_create_akcipher_session(
switch (sess_info->keytype) {
case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC:
- type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
+ type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC;
break;
case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE:
- type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
+ type = QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE;
break;
default:
@@ -549,7 +549,7 @@ static int cryptodev_builtin_operation(
CryptoDevBackendBuiltinSession *sess;
CryptoDevBackendSymOpInfo *sym_op_info;
CryptoDevBackendAsymOpInfo *asym_op_info;
- QCryptodevBackendAlgType algtype = op_info->algtype;
+ QCryptodevBackendAlgoType algtype = op_info->algtype;
int status = -VIRTIO_CRYPTO_ERR;
Error *local_error = NULL;
@@ -561,11 +561,11 @@ static int cryptodev_builtin_operation(
}
sess = builtin->sessions[op_info->session_id];
- if (algtype == QCRYPTODEV_BACKEND_ALG_SYM) {
+ if (algtype == QCRYPTODEV_BACKEND_ALGO_TYPE_SYM) {
sym_op_info = op_info->u.sym_op_info;
status = cryptodev_builtin_sym_operation(sess, sym_op_info,
&local_error);
- } else if (algtype == QCRYPTODEV_BACKEND_ALG_ASYM) {
+ } else if (algtype == QCRYPTODEV_BACKEND_ALGO_TYPE_ASYM) {
asym_op_info = op_info->u.asym_op_info;
status = cryptodev_builtin_asym_operation(sess, op_info->op_code,
asym_op_info, &local_error);
@@ -608,7 +608,7 @@ static void cryptodev_builtin_cleanup(
}
static void
-cryptodev_builtin_class_init(ObjectClass *oc, void *data)
+cryptodev_builtin_class_init(ObjectClass *oc, const void *data)
{
CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_CLASS(oc);
diff --git a/backends/cryptodev-hmp-cmds.c b/backends/cryptodev-hmp-cmds.c
index 4f7220b..01396d2 100644
--- a/backends/cryptodev-hmp-cmds.c
+++ b/backends/cryptodev-hmp-cmds.c
@@ -14,7 +14,7 @@
#include "monitor/hmp.h"
#include "monitor/monitor.h"
#include "qapi/qapi-commands-cryptodev.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
void hmp_info_cryptodev(Monitor *mon, const QDict *qdict)
diff --git a/backends/cryptodev-lkcf.c b/backends/cryptodev-lkcf.c
index 45aba1f..bb7a81d 100644
--- a/backends/cryptodev-lkcf.c
+++ b/backends/cryptodev-lkcf.c
@@ -30,7 +30,7 @@
#include "qemu/error-report.h"
#include "qemu/queue.h"
#include "qom/object.h"
-#include "sysemu/cryptodev.h"
+#include "system/cryptodev.h"
#include "standard-headers/linux/virtio_crypto.h"
#include <keyutils.h>
@@ -133,20 +133,20 @@ static int cryptodev_lkcf_set_op_desc(QCryptoAkCipherOptions *opts,
Error **errp)
{
QCryptoAkCipherOptionsRSA *rsa_opt;
- if (opts->alg != QCRYPTO_AKCIPHER_ALG_RSA) {
+ if (opts->alg != QCRYPTO_AK_CIPHER_ALGO_RSA) {
error_setg(errp, "Unsupported alg: %u", opts->alg);
return -1;
}
rsa_opt = &opts->u.rsa;
- if (rsa_opt->padding_alg == QCRYPTO_RSA_PADDING_ALG_PKCS1) {
+ if (rsa_opt->padding_alg == QCRYPTO_RSA_PADDING_ALGO_PKCS1) {
snprintf(key_desc, desc_len, "enc=%s hash=%s",
- QCryptoRSAPaddingAlgorithm_str(rsa_opt->padding_alg),
- QCryptoHashAlgorithm_str(rsa_opt->hash_alg));
+ QCryptoRSAPaddingAlgo_str(rsa_opt->padding_alg),
+ QCryptoHashAlgo_str(rsa_opt->hash_alg));
} else {
snprintf(key_desc, desc_len, "enc=%s",
- QCryptoRSAPaddingAlgorithm_str(rsa_opt->padding_alg));
+ QCryptoRSAPaddingAlgo_str(rsa_opt->padding_alg));
}
return 0;
}
@@ -157,23 +157,23 @@ static int cryptodev_lkcf_set_rsa_opt(int virtio_padding_alg,
Error **errp)
{
if (virtio_padding_alg == VIRTIO_CRYPTO_RSA_PKCS1_PADDING) {
- opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1;
+ opt->padding_alg = QCRYPTO_RSA_PADDING_ALGO_PKCS1;
switch (virtio_hash_alg) {
case VIRTIO_CRYPTO_RSA_MD5:
- opt->hash_alg = QCRYPTO_HASH_ALG_MD5;
+ opt->hash_alg = QCRYPTO_HASH_ALGO_MD5;
break;
case VIRTIO_CRYPTO_RSA_SHA1:
- opt->hash_alg = QCRYPTO_HASH_ALG_SHA1;
+ opt->hash_alg = QCRYPTO_HASH_ALGO_SHA1;
break;
case VIRTIO_CRYPTO_RSA_SHA256:
- opt->hash_alg = QCRYPTO_HASH_ALG_SHA256;
+ opt->hash_alg = QCRYPTO_HASH_ALGO_SHA256;
break;
case VIRTIO_CRYPTO_RSA_SHA512:
- opt->hash_alg = QCRYPTO_HASH_ALG_SHA512;
+ opt->hash_alg = QCRYPTO_HASH_ALGO_SHA512;
break;
default:
@@ -184,7 +184,7 @@ static int cryptodev_lkcf_set_rsa_opt(int virtio_padding_alg,
}
if (virtio_padding_alg == VIRTIO_CRYPTO_RSA_RAW_PADDING) {
- opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW;
+ opt->padding_alg = QCRYPTO_RSA_PADDING_ALGO_RAW;
return 0;
}
@@ -230,7 +230,7 @@ static void cryptodev_lkcf_init(CryptoDevBackend *backend, Error **errp)
backend->conf.peers.ccs[0] = cc;
backend->conf.crypto_services =
- 1u << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER;
+ 1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_AKCIPHER;
backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA;
lkcf->running = true;
@@ -322,7 +322,7 @@ static void cryptodev_lkcf_execute_task(CryptoDevLKCFTask *task)
* 2. generally, public key related compution is fast, just compute it with
* thread-pool.
*/
- if (session->keytype == QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE) {
+ if (session->keytype == QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE) {
if (qcrypto_akcipher_export_p8info(&session->akcipher_opts,
session->key, session->keylen,
&p8info, &p8info_len,
@@ -330,6 +330,8 @@ static void cryptodev_lkcf_execute_task(CryptoDevLKCFTask *task)
cryptodev_lkcf_set_op_desc(&session->akcipher_opts, op_desc,
sizeof(op_desc), &local_error) != 0) {
error_report_err(local_error);
+ status = -VIRTIO_CRYPTO_ERR;
+ goto out;
} else {
key_id = add_key(KCTL_KEY_TYPE_PKEY, "lkcf-backend-priv-key",
p8info, p8info_len, KCTL_KEY_RING);
@@ -346,6 +348,7 @@ static void cryptodev_lkcf_execute_task(CryptoDevLKCFTask *task)
session->key, session->keylen,
&local_error);
if (!akcipher) {
+ error_report_err(local_error);
status = -VIRTIO_CRYPTO_ERR;
goto out;
}
@@ -474,7 +477,7 @@ static int cryptodev_lkcf_operation(
CryptoDevBackendLKCF *lkcf =
CRYPTODEV_BACKEND_LKCF(backend);
CryptoDevBackendLKCFSession *sess;
- QCryptodevBackendAlgType algtype = op_info->algtype;
+ QCryptodevBackendAlgoType algtype = op_info->algtype;
CryptoDevLKCFTask *task;
if (op_info->session_id >= MAX_SESSIONS ||
@@ -485,7 +488,7 @@ static int cryptodev_lkcf_operation(
}
sess = lkcf->sess[op_info->session_id];
- if (algtype != QCRYPTODEV_BACKEND_ALG_ASYM) {
+ if (algtype != QCRYPTODEV_BACKEND_ALGO_TYPE_ASYM) {
error_report("algtype not supported: %u", algtype);
return -VIRTIO_CRYPTO_NOTSUPP;
}
@@ -518,7 +521,7 @@ static int cryptodev_lkcf_create_asym_session(
switch (sess_info->algo) {
case VIRTIO_CRYPTO_AKCIPHER_RSA:
- sess->akcipher_opts.alg = QCRYPTO_AKCIPHER_ALG_RSA;
+ sess->akcipher_opts.alg = QCRYPTO_AK_CIPHER_ALGO_RSA;
if (cryptodev_lkcf_set_rsa_opt(
sess_info->u.rsa.padding_algo, sess_info->u.rsa.hash_algo,
&sess->akcipher_opts.u.rsa, &local_error) != 0) {
@@ -534,11 +537,11 @@ static int cryptodev_lkcf_create_asym_session(
switch (sess_info->keytype) {
case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC:
- sess->keytype = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
+ sess->keytype = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC;
break;
case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE:
- sess->keytype = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
+ sess->keytype = QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE;
break;
default:
@@ -616,7 +619,7 @@ static int cryptodev_lkcf_close_session(CryptoDevBackend *backend,
return 0;
}
-static void cryptodev_lkcf_class_init(ObjectClass *oc, void *data)
+static void cryptodev_lkcf_class_init(ObjectClass *oc, const void *data)
{
CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_CLASS(oc);
diff --git a/backends/cryptodev-vhost-user.c b/backends/cryptodev-vhost-user.c
index c3283ba..cb04e68 100644
--- a/backends/cryptodev-vhost-user.c
+++ b/backends/cryptodev-vhost-user.c
@@ -27,9 +27,9 @@
#include "qemu/error-report.h"
#include "hw/virtio/vhost-user.h"
#include "standard-headers/linux/virtio_crypto.h"
-#include "sysemu/cryptodev-vhost.h"
+#include "system/cryptodev-vhost.h"
#include "chardev/char-fe.h"
-#include "sysemu/cryptodev-vhost-user.h"
+#include "system/cryptodev-vhost-user.h"
#include "qom/object.h"
@@ -221,9 +221,9 @@ static void cryptodev_vhost_user_init(
cryptodev_vhost_user_event, NULL, s, NULL, true);
backend->conf.crypto_services =
- 1u << QCRYPTODEV_BACKEND_SERVICE_CIPHER |
- 1u << QCRYPTODEV_BACKEND_SERVICE_HASH |
- 1u << QCRYPTODEV_BACKEND_SERVICE_MAC;
+ 1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_CIPHER |
+ 1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_HASH |
+ 1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_MAC;
backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC;
backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1;
@@ -281,8 +281,7 @@ static int cryptodev_vhost_user_create_session(
break;
default:
- error_setg(&local_error, "Unsupported opcode :%" PRIu32 "",
- sess_info->op_code);
+ error_report("Unsupported opcode :%" PRIu32 "", sess_info->op_code);
return -VIRTIO_CRYPTO_NOTSUPP;
}
@@ -394,7 +393,7 @@ static void cryptodev_vhost_user_finalize(Object *obj)
}
static void
-cryptodev_vhost_user_class_init(ObjectClass *oc, void *data)
+cryptodev_vhost_user_class_init(ObjectClass *oc, const void *data)
{
CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_CLASS(oc);
diff --git a/backends/cryptodev-vhost.c b/backends/cryptodev-vhost.c
index 9352373..943680a 100644
--- a/backends/cryptodev-vhost.c
+++ b/backends/cryptodev-vhost.c
@@ -24,13 +24,13 @@
#include "qemu/osdep.h"
#include "hw/virtio/virtio-bus.h"
-#include "sysemu/cryptodev-vhost.h"
+#include "system/cryptodev-vhost.h"
#ifdef CONFIG_VHOST_CRYPTO
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "hw/virtio/virtio-crypto.h"
-#include "sysemu/cryptodev-vhost-user.h"
+#include "system/cryptodev-vhost-user.h"
uint64_t
cryptodev_vhost_get_max_queues(
@@ -53,7 +53,7 @@ cryptodev_vhost_init(
CryptoDevBackendVhost *crypto;
Error *local_err = NULL;
- crypto = g_new(CryptoDevBackendVhost, 1);
+ crypto = g_new0(CryptoDevBackendVhost, 1);
crypto->dev.max_queues = 1;
crypto->dev.nvqs = 1;
crypto->dev.vqs = crypto->vqs;
diff --git a/backends/cryptodev.c b/backends/cryptodev.c
index fff89fd..79f8882 100644
--- a/backends/cryptodev.c
+++ b/backends/cryptodev.c
@@ -22,8 +22,8 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/cryptodev.h"
-#include "sysemu/stats.h"
+#include "system/cryptodev.h"
+#include "system/stats.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-cryptodev.h"
#include "qapi/qapi-types-stats.h"
@@ -74,7 +74,7 @@ static int qmp_query_cryptodev_foreach(Object *obj, void *data)
backend = CRYPTODEV_BACKEND(obj);
services = backend->conf.crypto_services;
- for (i = 0; i < QCRYPTODEV_BACKEND_SERVICE__MAX; i++) {
+ for (i = 0; i < QCRYPTODEV_BACKEND_SERVICE_TYPE__MAX; i++) {
if (services & (1 << i)) {
QAPI_LIST_PREPEND(info->service, i);
}
@@ -97,7 +97,7 @@ static int qmp_query_cryptodev_foreach(Object *obj, void *data)
QCryptodevInfoList *qmp_query_cryptodev(Error **errp)
{
QCryptodevInfoList *list = NULL;
- Object *objs = container_get(object_get_root(), "/objects");
+ Object *objs = object_get_container("objects");
object_child_foreach(objs, qmp_query_cryptodev_foreach, &list);
@@ -185,10 +185,10 @@ static int cryptodev_backend_operation(
static int cryptodev_backend_account(CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info)
{
- enum QCryptodevBackendAlgType algtype = op_info->algtype;
+ enum QCryptodevBackendAlgoType algtype = op_info->algtype;
int len;
- if (algtype == QCRYPTODEV_BACKEND_ALG_ASYM) {
+ if (algtype == QCRYPTODEV_BACKEND_ALGO_TYPE_ASYM) {
CryptoDevBackendAsymOpInfo *asym_op_info = op_info->u.asym_op_info;
len = asym_op_info->src_len;
@@ -212,7 +212,7 @@ static int cryptodev_backend_account(CryptoDevBackend *backend,
default:
return -VIRTIO_CRYPTO_NOTSUPP;
}
- } else if (algtype == QCRYPTODEV_BACKEND_ALG_SYM) {
+ } else if (algtype == QCRYPTODEV_BACKEND_ALGO_TYPE_SYM) {
CryptoDevBackendSymOpInfo *sym_op_info = op_info->u.sym_op_info;
len = sym_op_info->src_len;
@@ -424,11 +424,11 @@ cryptodev_backend_complete(UserCreatable *uc, Error **errp)
}
services = backend->conf.crypto_services;
- if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_CIPHER)) {
+ if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_TYPE_CIPHER)) {
backend->sym_stat = g_new0(CryptodevBackendSymStat, 1);
}
- if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER)) {
+ if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_TYPE_AKCIPHER)) {
backend->asym_stat = g_new0(CryptodevBackendAsymStat, 1);
}
}
@@ -557,7 +557,7 @@ static void cryptodev_backend_stats_cb(StatsResultList **result,
switch (target) {
case STATS_TARGET_CRYPTODEV:
{
- Object *objs = container_get(object_get_root(), "/objects");
+ Object *objs = object_get_container("objects");
StatsArgs stats_args;
stats_args.result.stats = result;
stats_args.names = names;
@@ -608,7 +608,7 @@ static void cryptodev_backend_schemas_cb(StatsSchemaList **result,
}
static void
-cryptodev_backend_class_init(ObjectClass *oc, void *data)
+cryptodev_backend_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
@@ -641,7 +641,7 @@ static const TypeInfo cryptodev_backend_info = {
.instance_finalize = cryptodev_backend_finalize,
.class_size = sizeof(CryptoDevBackendClass),
.class_init = cryptodev_backend_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/backends/dbus-vmstate.c b/backends/dbus-vmstate.c
index be6c4d8..7d5b58b 100644
--- a/backends/dbus-vmstate.c
+++ b/backends/dbus-vmstate.c
@@ -485,7 +485,7 @@ dbus_vmstate_get_id(VMStateIf *vmif)
}
static void
-dbus_vmstate_class_init(ObjectClass *oc, void *data)
+dbus_vmstate_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
VMStateIfClass *vc = VMSTATE_IF_CLASS(oc);
@@ -505,7 +505,7 @@ static const TypeInfo dbus_vmstate_info = {
.instance_size = sizeof(DBusVMState),
.instance_finalize = dbus_vmstate_finalize,
.class_init = dbus_vmstate_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ TYPE_VMSTATE_IF },
{ }
diff --git a/backends/host_iommu_device.c b/backends/host_iommu_device.c
index 8f2dda1..f6965e4 100644
--- a/backends/host_iommu_device.c
+++ b/backends/host_iommu_device.c
@@ -10,14 +10,14 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/host_iommu_device.h"
+#include "system/host_iommu_device.h"
OBJECT_DEFINE_ABSTRACT_TYPE(HostIOMMUDevice,
host_iommu_device,
HOST_IOMMU_DEVICE,
OBJECT)
-static void host_iommu_device_class_init(ObjectClass *oc, void *data)
+static void host_iommu_device_class_init(ObjectClass *oc, const void *data)
{
}
diff --git a/backends/hostmem-epc.c b/backends/hostmem-epc.c
index 6c024d6..ab20b18 100644
--- a/backends/hostmem-epc.c
+++ b/backends/hostmem-epc.c
@@ -14,7 +14,7 @@
#include <sys/ioctl.h>
#include "qom/object_interfaces.h"
#include "qapi/error.h"
-#include "sysemu/hostmem.h"
+#include "system/hostmem.h"
#include "hw/i386/hostmem-epc.h"
static bool
@@ -36,7 +36,7 @@ sgx_epc_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
backend->aligned = true;
name = object_get_canonical_path(OBJECT(backend));
- ram_flags = (backend->share ? RAM_SHARED : 0) | RAM_PROTECTED;
+ ram_flags = (backend->share ? RAM_SHARED : RAM_PRIVATE) | RAM_PROTECTED;
return memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name,
backend->size, ram_flags, fd, 0, errp);
}
@@ -50,7 +50,7 @@ static void sgx_epc_backend_instance_init(Object *obj)
m->dump = false;
}
-static void sgx_epc_backend_class_init(ObjectClass *oc, void *data)
+static void sgx_epc_backend_class_init(ObjectClass *oc, const void *data)
{
HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc);
diff --git a/backends/hostmem-file.c b/backends/hostmem-file.c
index 7e5072e..8e3219c 100644
--- a/backends/hostmem-file.c
+++ b/backends/hostmem-file.c
@@ -15,7 +15,7 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qemu/madvise.h"
-#include "sysemu/hostmem.h"
+#include "system/hostmem.h"
#include "qom/object_interfaces.h"
#include "qom/object.h"
#include "qapi/visitor.h"
@@ -82,7 +82,7 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
backend->aligned = true;
name = host_memory_backend_get_name(backend);
- ram_flags = backend->share ? RAM_SHARED : 0;
+ ram_flags = backend->share ? RAM_SHARED : RAM_PRIVATE;
ram_flags |= fb->readonly ? RAM_READONLY_FD : 0;
ram_flags |= fb->rom == ON_OFF_AUTO_ON ? RAM_READONLY : 0;
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
@@ -270,7 +270,7 @@ static void file_backend_unparent(Object *obj)
}
static void
-file_backend_class_init(ObjectClass *oc, void *data)
+file_backend_class_init(ObjectClass *oc, const void *data)
{
HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc);
diff --git a/backends/hostmem-memfd.c b/backends/hostmem-memfd.c
index 6a3c89a..923239f 100644
--- a/backends/hostmem-memfd.c
+++ b/backends/hostmem-memfd.c
@@ -11,14 +11,13 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/hostmem.h"
+#include "system/hostmem.h"
#include "qom/object_interfaces.h"
#include "qemu/memfd.h"
#include "qemu/module.h"
#include "qapi/error.h"
#include "qom/object.h"
-
-#define TYPE_MEMORY_BACKEND_MEMFD "memory-backend-memfd"
+#include "migration/cpr.h"
OBJECT_DECLARE_SIMPLE_TYPE(HostMemoryBackendMemfd, MEMORY_BACKEND_MEMFD)
@@ -35,15 +34,19 @@ static bool
memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
{
HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(backend);
- g_autofree char *name = NULL;
+ g_autofree char *name = host_memory_backend_get_name(backend);
+ int fd = cpr_find_fd(name, 0);
uint32_t ram_flags;
- int fd;
if (!backend->size) {
error_setg(errp, "can't create backend with size 0");
return false;
}
+ if (fd >= 0) {
+ goto have_fd;
+ }
+
fd = qemu_memfd_create(TYPE_MEMORY_BACKEND_MEMFD, backend->size,
m->hugetlb, m->hugetlbsize, m->seal ?
F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL : 0,
@@ -51,10 +54,11 @@ memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
if (fd == -1) {
return false;
}
+ cpr_save_fd(name, 0, fd);
+have_fd:
backend->aligned = true;
- name = host_memory_backend_get_name(backend);
- ram_flags = backend->share ? RAM_SHARED : 0;
+ ram_flags = backend->share ? RAM_SHARED : RAM_PRIVATE;
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
ram_flags |= backend->guest_memfd ? RAM_GUEST_MEMFD : 0;
return memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name,
@@ -129,7 +133,7 @@ memfd_backend_instance_init(Object *obj)
}
static void
-memfd_backend_class_init(ObjectClass *oc, void *data)
+memfd_backend_class_init(ObjectClass *oc, const void *data)
{
HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc);
diff --git a/backends/hostmem-ram.c b/backends/hostmem-ram.c
index f7d81af..062b1ab 100644
--- a/backends/hostmem-ram.c
+++ b/backends/hostmem-ram.c
@@ -11,7 +11,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/hostmem.h"
+#include "system/hostmem.h"
#include "qapi/error.h"
#include "qemu/module.h"
#include "qom/object_interfaces.h"
@@ -28,7 +28,7 @@ ram_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
}
name = host_memory_backend_get_name(backend);
- ram_flags = backend->share ? RAM_SHARED : 0;
+ ram_flags = backend->share ? RAM_SHARED : RAM_PRIVATE;
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
ram_flags |= backend->guest_memfd ? RAM_GUEST_MEMFD : 0;
return memory_region_init_ram_flags_nomigrate(&backend->mr, OBJECT(backend),
@@ -37,7 +37,7 @@ ram_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
}
static void
-ram_backend_class_init(ObjectClass *oc, void *data)
+ram_backend_class_init(ObjectClass *oc, const void *data)
{
HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc);
diff --git a/backends/hostmem-shm.c b/backends/hostmem-shm.c
index 374edc3..f66211a 100644
--- a/backends/hostmem-shm.c
+++ b/backends/hostmem-shm.c
@@ -11,8 +11,9 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/hostmem.h"
+#include "system/hostmem.h"
#include "qapi/error.h"
+#include "migration/cpr.h"
#define TYPE_MEMORY_BACKEND_SHM "memory-backend-shm"
@@ -25,11 +26,9 @@ struct HostMemoryBackendShm {
static bool
shm_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
{
- g_autoptr(GString) shm_name = g_string_new(NULL);
- g_autofree char *backend_name = NULL;
+ g_autofree char *backend_name = host_memory_backend_get_name(backend);
uint32_t ram_flags;
- int fd, oflag;
- mode_t mode;
+ int fd = cpr_find_fd(backend_name, 0);
if (!backend->size) {
error_setg(errp, "can't create shm backend with size 0");
@@ -41,48 +40,18 @@ shm_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
return false;
}
- /*
- * Let's use `mode = 0` because we don't want other processes to open our
- * memory unless we share the file descriptor with them.
- */
- mode = 0;
- oflag = O_RDWR | O_CREAT | O_EXCL;
- backend_name = host_memory_backend_get_name(backend);
-
- /*
- * Some operating systems allow creating anonymous POSIX shared memory
- * objects (e.g. FreeBSD provides the SHM_ANON constant), but this is not
- * defined by POSIX, so let's create a unique name.
- *
- * From Linux's shm_open(3) man-page:
- * For portable use, a shared memory object should be identified
- * by a name of the form /somename;"
- */
- g_string_printf(shm_name, "/qemu-" FMT_pid "-shm-%s", getpid(),
- backend_name);
-
- fd = shm_open(shm_name->str, oflag, mode);
- if (fd < 0) {
- error_setg_errno(errp, errno,
- "failed to create POSIX shared memory");
- return false;
+ if (fd >= 0) {
+ goto have_fd;
}
- /*
- * We have the file descriptor, so we no longer need to expose the
- * POSIX shared memory object. However it will remain allocated as long as
- * there are file descriptors pointing to it.
- */
- shm_unlink(shm_name->str);
-
- if (ftruncate(fd, backend->size) == -1) {
- error_setg_errno(errp, errno,
- "failed to resize POSIX shared memory to %" PRIu64,
- backend->size);
- close(fd);
+ fd = qemu_shm_alloc(backend->size, errp);
+ if (fd < 0) {
return false;
}
+ cpr_save_fd(backend_name, 0, fd);
+have_fd:
+ /* Let's do the same as memory-backend-ram,share=on would do. */
ram_flags = RAM_SHARED;
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
@@ -100,7 +69,7 @@ shm_backend_instance_init(Object *obj)
}
static void
-shm_backend_class_init(ObjectClass *oc, void *data)
+shm_backend_class_init(ObjectClass *oc, const void *data)
{
HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc);
diff --git a/backends/hostmem.c b/backends/hostmem.c
index 4e5576a..35734d6 100644
--- a/backends/hostmem.c
+++ b/backends/hostmem.c
@@ -11,7 +11,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/hostmem.h"
+#include "system/hostmem.h"
#include "hw/boards.h"
#include "qapi/error.h"
#include "qapi/qapi-builtin-visit.h"
@@ -178,7 +178,7 @@ static void host_memory_backend_set_merge(Object *obj, bool value, Error **errp)
return;
}
- if (!host_memory_backend_mr_inited(backend) &&
+ if (host_memory_backend_mr_inited(backend) &&
value != backend->merge) {
void *ptr = memory_region_get_ram_ptr(&backend->mr);
uint64_t sz = memory_region_size(&backend->mr);
@@ -501,7 +501,7 @@ host_memory_backend_set_use_canonical_path(Object *obj, bool value,
}
static void
-host_memory_backend_class_init(ObjectClass *oc, void *data)
+host_memory_backend_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
@@ -586,7 +586,7 @@ static const TypeInfo host_memory_backend_info = {
.instance_size = sizeof(HostMemoryBackend),
.instance_init = host_memory_backend_init,
.instance_post_init = host_memory_backend_post_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/backends/iommufd.c b/backends/iommufd.c
index cabd1b5..c2c47ab 100644
--- a/backends/iommufd.c
+++ b/backends/iommufd.c
@@ -11,13 +11,14 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/iommufd.h"
+#include "system/iommufd.h"
#include "qapi/error.h"
#include "qemu/module.h"
#include "qom/object_interfaces.h"
#include "qemu/error-report.h"
#include "monitor/monitor.h"
#include "trace.h"
+#include "hw/vfio/vfio-device.h"
#include <sys/ioctl.h>
#include <linux/iommufd.h>
@@ -63,7 +64,7 @@ static bool iommufd_backend_can_be_deleted(UserCreatable *uc)
return !be->users;
}
-static void iommufd_backend_class_init(ObjectClass *oc, void *data)
+static void iommufd_backend_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
@@ -166,8 +167,6 @@ int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova,
/* TODO: Not support mapping hardware PCI BAR region for now. */
if (errno == EFAULT) {
warn_report("IOMMU_IOAS_MAP failed: %m, PCI BAR?");
- } else {
- error_report("IOMMU_IOAS_MAP failed: %m");
}
}
return ret;
@@ -202,14 +201,95 @@ int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id,
if (ret) {
ret = -errno;
- error_report("IOMMU_IOAS_UNMAP failed: %m");
}
return ret;
}
+bool iommufd_backend_alloc_hwpt(IOMMUFDBackend *be, uint32_t dev_id,
+ uint32_t pt_id, uint32_t flags,
+ uint32_t data_type, uint32_t data_len,
+ void *data_ptr, uint32_t *out_hwpt,
+ Error **errp)
+{
+ int ret, fd = be->fd;
+ struct iommu_hwpt_alloc alloc_hwpt = {
+ .size = sizeof(struct iommu_hwpt_alloc),
+ .flags = flags,
+ .dev_id = dev_id,
+ .pt_id = pt_id,
+ .data_type = data_type,
+ .data_len = data_len,
+ .data_uptr = (uintptr_t)data_ptr,
+ };
+
+ ret = ioctl(fd, IOMMU_HWPT_ALLOC, &alloc_hwpt);
+ trace_iommufd_backend_alloc_hwpt(fd, dev_id, pt_id, flags, data_type,
+ data_len, (uintptr_t)data_ptr,
+ alloc_hwpt.out_hwpt_id, ret);
+ if (ret) {
+ error_setg_errno(errp, errno, "Failed to allocate hwpt");
+ return false;
+ }
+
+ *out_hwpt = alloc_hwpt.out_hwpt_id;
+ return true;
+}
+
+bool iommufd_backend_set_dirty_tracking(IOMMUFDBackend *be,
+ uint32_t hwpt_id, bool start,
+ Error **errp)
+{
+ int ret;
+ struct iommu_hwpt_set_dirty_tracking set_dirty = {
+ .size = sizeof(set_dirty),
+ .hwpt_id = hwpt_id,
+ .flags = start ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0,
+ };
+
+ ret = ioctl(be->fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &set_dirty);
+ trace_iommufd_backend_set_dirty(be->fd, hwpt_id, start, ret ? errno : 0);
+ if (ret) {
+ error_setg_errno(errp, errno,
+ "IOMMU_HWPT_SET_DIRTY_TRACKING(hwpt_id %u) failed",
+ hwpt_id);
+ return false;
+ }
+
+ return true;
+}
+
+bool iommufd_backend_get_dirty_bitmap(IOMMUFDBackend *be,
+ uint32_t hwpt_id,
+ uint64_t iova, ram_addr_t size,
+ uint64_t page_size, uint64_t *data,
+ Error **errp)
+{
+ int ret;
+ struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap = {
+ .size = sizeof(get_dirty_bitmap),
+ .hwpt_id = hwpt_id,
+ .iova = iova,
+ .length = size,
+ .page_size = page_size,
+ .data = (uintptr_t)data,
+ };
+
+ ret = ioctl(be->fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &get_dirty_bitmap);
+ trace_iommufd_backend_get_dirty_bitmap(be->fd, hwpt_id, iova, size,
+ page_size, ret ? errno : 0);
+ if (ret) {
+ error_setg_errno(errp, errno,
+ "IOMMU_HWPT_GET_DIRTY_BITMAP (iova: 0x%"HWADDR_PRIx
+ " size: 0x"RAM_ADDR_FMT") failed", iova, size);
+ return false;
+ }
+
+ return true;
+}
+
bool iommufd_backend_get_device_info(IOMMUFDBackend *be, uint32_t devid,
uint32_t *type, void *data, uint32_t len,
- Error **errp)
+ uint64_t *caps, Error **errp)
{
struct iommu_hw_info info = {
.size = sizeof(info),
@@ -225,10 +305,68 @@ bool iommufd_backend_get_device_info(IOMMUFDBackend *be, uint32_t devid,
g_assert(type);
*type = info.out_data_type;
+ g_assert(caps);
+ *caps = info.out_capabilities;
return true;
}
+bool iommufd_backend_invalidate_cache(IOMMUFDBackend *be, uint32_t id,
+ uint32_t data_type, uint32_t entry_len,
+ uint32_t *entry_num, void *data,
+ Error **errp)
+{
+ int ret, fd = be->fd;
+ uint32_t total_entries = *entry_num;
+ struct iommu_hwpt_invalidate cache = {
+ .size = sizeof(cache),
+ .hwpt_id = id,
+ .data_type = data_type,
+ .entry_len = entry_len,
+ .entry_num = total_entries,
+ .data_uptr = (uintptr_t)data,
+ };
+
+ ret = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cache);
+ trace_iommufd_backend_invalidate_cache(fd, id, data_type, entry_len,
+ total_entries, cache.entry_num,
+ (uintptr_t)data, ret ? errno : 0);
+ *entry_num = cache.entry_num;
+
+ if (ret) {
+ error_setg_errno(errp, errno, "IOMMU_HWPT_INVALIDATE failed:"
+ " total %d entries, processed %d entries",
+ total_entries, cache.entry_num);
+ } else if (total_entries != cache.entry_num) {
+ error_setg(errp, "IOMMU_HWPT_INVALIDATE succeed but with unprocessed"
+ " entries: total %d entries, processed %d entries."
+ " Kernel BUG?!", total_entries, cache.entry_num);
+ return false;
+ }
+
+ return !ret;
+}
+
+bool host_iommu_device_iommufd_attach_hwpt(HostIOMMUDeviceIOMMUFD *idev,
+ uint32_t hwpt_id, Error **errp)
+{
+ HostIOMMUDeviceIOMMUFDClass *idevc =
+ HOST_IOMMU_DEVICE_IOMMUFD_GET_CLASS(idev);
+
+ g_assert(idevc->attach_hwpt);
+ return idevc->attach_hwpt(idev, hwpt_id, errp);
+}
+
+bool host_iommu_device_iommufd_detach_hwpt(HostIOMMUDeviceIOMMUFD *idev,
+ Error **errp)
+{
+ HostIOMMUDeviceIOMMUFDClass *idevc =
+ HOST_IOMMU_DEVICE_IOMMUFD_GET_CLASS(idev);
+
+ g_assert(idevc->detach_hwpt);
+ return idevc->detach_hwpt(idev, errp);
+}
+
static int hiod_iommufd_get_cap(HostIOMMUDevice *hiod, int cap, Error **errp)
{
HostIOMMUDeviceCaps *caps = &hiod->caps;
@@ -237,14 +375,14 @@ static int hiod_iommufd_get_cap(HostIOMMUDevice *hiod, int cap, Error **errp)
case HOST_IOMMU_DEVICE_CAP_IOMMU_TYPE:
return caps->type;
case HOST_IOMMU_DEVICE_CAP_AW_BITS:
- return caps->aw_bits;
+ return vfio_device_get_aw_bits(hiod->agent);
default:
error_setg(errp, "%s: unsupported capability %x", hiod->name, cap);
return -EINVAL;
}
}
-static void hiod_iommufd_class_init(ObjectClass *oc, void *data)
+static void hiod_iommufd_class_init(ObjectClass *oc, const void *data)
{
HostIOMMUDeviceClass *hioc = HOST_IOMMU_DEVICE_CLASS(oc);
@@ -260,13 +398,15 @@ static const TypeInfo types[] = {
.instance_finalize = iommufd_backend_finalize,
.class_size = sizeof(IOMMUFDBackendClass),
.class_init = iommufd_backend_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
}, {
.name = TYPE_HOST_IOMMU_DEVICE_IOMMUFD,
.parent = TYPE_HOST_IOMMU_DEVICE,
+ .instance_size = sizeof(HostIOMMUDeviceIOMMUFD),
+ .class_size = sizeof(HostIOMMUDeviceIOMMUFDClass),
.class_init = hiod_iommufd_class_init,
.abstract = true,
}
diff --git a/backends/meson.build b/backends/meson.build
index 749b491..9b88d22 100644
--- a/backends/meson.build
+++ b/backends/meson.build
@@ -12,8 +12,10 @@ system_ss.add([files(
if host_os != 'windows'
system_ss.add(files('rng-random.c'))
- system_ss.add(files('hostmem-file.c'))
- system_ss.add([files('hostmem-shm.c'), rt])
+ if host_os != 'emscripten'
+ system_ss.add(files('hostmem-file.c'))
+ system_ss.add([files('hostmem-shm.c'), rt])
+ endif
endif
if host_os == 'linux'
system_ss.add(files('hostmem-memfd.c'))
@@ -33,4 +35,6 @@ endif
system_ss.add(when: gio, if_true: files('dbus-vmstate.c'))
system_ss.add(when: 'CONFIG_SGX', if_true: files('hostmem-epc.c'))
+system_ss.add(when: 'CONFIG_SPDM_SOCKET', if_true: files('spdm-socket.c'))
+
subdir('tpm')
diff --git a/backends/rng-builtin.c b/backends/rng-builtin.c
index f367eb6..41b7bfa 100644
--- a/backends/rng-builtin.c
+++ b/backends/rng-builtin.c
@@ -6,11 +6,11 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/rng.h"
+#include "system/rng.h"
#include "qemu/main-loop.h"
#include "qemu/guest-random.h"
#include "qom/object.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
OBJECT_DECLARE_SIMPLE_TYPE(RngBuiltin, RNG_BUILTIN)
@@ -55,7 +55,7 @@ static void rng_builtin_finalize(Object *obj)
qemu_bh_delete(s->bh);
}
-static void rng_builtin_class_init(ObjectClass *klass, void *data)
+static void rng_builtin_class_init(ObjectClass *klass, const void *data)
{
RngBackendClass *rbc = RNG_BACKEND_CLASS(klass);
diff --git a/backends/rng-egd.c b/backends/rng-egd.c
index 684c3cf..9fd3393 100644
--- a/backends/rng-egd.c
+++ b/backends/rng-egd.c
@@ -11,7 +11,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/rng.h"
+#include "system/rng.h"
#include "chardev/char-fe.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
@@ -143,7 +143,7 @@ static void rng_egd_finalize(Object *obj)
g_free(s->chr_name);
}
-static void rng_egd_class_init(ObjectClass *klass, void *data)
+static void rng_egd_class_init(ObjectClass *klass, const void *data)
{
RngBackendClass *rbc = RNG_BACKEND_CLASS(klass);
diff --git a/backends/rng-random.c b/backends/rng-random.c
index 489c091..820bf48 100644
--- a/backends/rng-random.c
+++ b/backends/rng-random.c
@@ -11,8 +11,8 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/rng-random.h"
-#include "sysemu/rng.h"
+#include "system/rng-random.h"
+#include "system/rng.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qemu/main-loop.h"
@@ -121,7 +121,7 @@ static void rng_random_finalize(Object *obj)
g_free(s->filename);
}
-static void rng_random_class_init(ObjectClass *klass, void *data)
+static void rng_random_class_init(ObjectClass *klass, const void *data)
{
RngBackendClass *rbc = RNG_BACKEND_CLASS(klass);
diff --git a/backends/rng.c b/backends/rng.c
index 9bbd0c7..ab94dfe 100644
--- a/backends/rng.c
+++ b/backends/rng.c
@@ -11,7 +11,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/rng.h"
+#include "system/rng.h"
#include "qapi/error.h"
#include "qemu/module.h"
#include "qom/object_interfaces.h"
@@ -99,7 +99,7 @@ static void rng_backend_finalize(Object *obj)
rng_backend_free_requests(s);
}
-static void rng_backend_class_init(ObjectClass *oc, void *data)
+static void rng_backend_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
@@ -119,7 +119,7 @@ static const TypeInfo rng_backend_info = {
.class_size = sizeof(RngBackendClass),
.class_init = rng_backend_class_init,
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/backends/spdm-socket.c b/backends/spdm-socket.c
new file mode 100644
index 0000000..2c709c6
--- /dev/null
+++ b/backends/spdm-socket.c
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * QEMU SPDM socket support
+ *
+ * This is based on:
+ * https://github.com/DMTF/spdm-emu/blob/07c0a838bcc1c6207c656ac75885c0603e344b6f/spdm_emu/spdm_emu_common/command.c
+ * but has been re-written to match QEMU style
+ *
+ * Copyright (c) 2021, DMTF. All rights reserved.
+ * Copyright (c) 2023. Western Digital Corporation or its affiliates.
+ */
+
+#include "qemu/osdep.h"
+#include "system/spdm-socket.h"
+#include "qapi/error.h"
+
+static bool read_bytes(const int socket, uint8_t *buffer,
+ size_t number_of_bytes)
+{
+ ssize_t number_received = 0;
+ ssize_t result;
+
+ while (number_received < number_of_bytes) {
+ result = recv(socket, buffer + number_received,
+ number_of_bytes - number_received, 0);
+ if (result <= 0) {
+ return false;
+ }
+ number_received += result;
+ }
+ return true;
+}
+
+static bool read_data32(const int socket, uint32_t *data)
+{
+ bool result;
+
+ result = read_bytes(socket, (uint8_t *)data, sizeof(uint32_t));
+ if (!result) {
+ return result;
+ }
+ *data = ntohl(*data);
+ return true;
+}
+
+static bool read_multiple_bytes(const int socket, uint8_t *buffer,
+ uint32_t *bytes_received,
+ uint32_t max_buffer_length)
+{
+ uint32_t length;
+ bool result;
+
+ result = read_data32(socket, &length);
+ if (!result) {
+ return result;
+ }
+
+ if (length > max_buffer_length) {
+ return false;
+ }
+
+ if (bytes_received) {
+ *bytes_received = length;
+ }
+
+ if (length == 0) {
+ return true;
+ }
+
+ return read_bytes(socket, buffer, length);
+}
+
+static bool receive_platform_data(const int socket,
+ uint32_t transport_type,
+ uint32_t *command,
+ uint8_t *receive_buffer,
+ uint32_t *bytes_to_receive)
+{
+ bool result;
+ uint32_t response;
+ uint32_t bytes_received;
+
+ result = read_data32(socket, &response);
+ if (!result) {
+ return result;
+ }
+ *command = response;
+
+ result = read_data32(socket, &transport_type);
+ if (!result) {
+ return result;
+ }
+
+ bytes_received = 0;
+ result = read_multiple_bytes(socket, receive_buffer, &bytes_received,
+ *bytes_to_receive);
+ if (!result) {
+ return result;
+ }
+ *bytes_to_receive = bytes_received;
+
+ return result;
+}
+
+static bool write_bytes(const int socket, const uint8_t *buffer,
+ uint32_t number_of_bytes)
+{
+ ssize_t number_sent = 0;
+ ssize_t result;
+
+ while (number_sent < number_of_bytes) {
+ result = send(socket, buffer + number_sent,
+ number_of_bytes - number_sent, 0);
+ if (result == -1) {
+ return false;
+ }
+ number_sent += result;
+ }
+ return true;
+}
+
+static bool write_data32(const int socket, uint32_t data)
+{
+ data = htonl(data);
+ return write_bytes(socket, (uint8_t *)&data, sizeof(uint32_t));
+}
+
+static bool write_multiple_bytes(const int socket, const uint8_t *buffer,
+ uint32_t bytes_to_send)
+{
+ bool result;
+
+ result = write_data32(socket, bytes_to_send);
+ if (!result) {
+ return result;
+ }
+
+ return write_bytes(socket, buffer, bytes_to_send);
+}
+
+static bool send_platform_data(const int socket,
+ uint32_t transport_type, uint32_t command,
+ const uint8_t *send_buffer, size_t bytes_to_send)
+{
+ bool result;
+
+ result = write_data32(socket, command);
+ if (!result) {
+ return result;
+ }
+
+ result = write_data32(socket, transport_type);
+ if (!result) {
+ return result;
+ }
+
+ return write_multiple_bytes(socket, send_buffer, bytes_to_send);
+}
+
+int spdm_socket_connect(uint16_t port, Error **errp)
+{
+ int client_socket;
+ struct sockaddr_in server_addr;
+
+ client_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+ if (client_socket < 0) {
+ error_setg(errp, "cannot create socket: %s", strerror(errno));
+ return -1;
+ }
+
+ memset((char *)&server_addr, 0, sizeof(server_addr));
+ server_addr.sin_family = AF_INET;
+ server_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ server_addr.sin_port = htons(port);
+
+
+ if (connect(client_socket, (struct sockaddr *)&server_addr,
+ sizeof(server_addr)) < 0) {
+ error_setg(errp, "cannot connect: %s", strerror(errno));
+ close(client_socket);
+ return -1;
+ }
+
+ return client_socket;
+}
+
+uint32_t spdm_socket_rsp(const int socket, uint32_t transport_type,
+ void *req, uint32_t req_len,
+ void *rsp, uint32_t rsp_len)
+{
+ uint32_t command;
+ bool result;
+
+ result = send_platform_data(socket, transport_type,
+ SPDM_SOCKET_COMMAND_NORMAL,
+ req, req_len);
+ if (!result) {
+ return 0;
+ }
+
+ result = receive_platform_data(socket, transport_type, &command,
+ (uint8_t *)rsp, &rsp_len);
+ if (!result) {
+ return 0;
+ }
+
+ assert(command != 0);
+
+ return rsp_len;
+}
+
+void spdm_socket_close(const int socket, uint32_t transport_type)
+{
+ send_platform_data(socket, transport_type,
+ SPDM_SOCKET_COMMAND_SHUTDOWN, NULL, 0);
+}
diff --git a/backends/tpm/tpm_backend.c b/backends/tpm/tpm_backend.c
index 485a20b..8cf8004 100644
--- a/backends/tpm/tpm_backend.c
+++ b/backends/tpm/tpm_backend.c
@@ -13,9 +13,9 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/tpm_backend.h"
+#include "system/tpm_backend.h"
#include "qapi/error.h"
-#include "sysemu/tpm.h"
+#include "system/tpm.h"
#include "qemu/thread.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
diff --git a/backends/tpm/tpm_emulator.c b/backends/tpm/tpm_emulator.c
index 5a8fba9..4a234ab 100644
--- a/backends/tpm/tpm_emulator.c
+++ b/backends/tpm/tpm_emulator.c
@@ -32,9 +32,9 @@
#include "qemu/sockets.h"
#include "qemu/lockable.h"
#include "io/channel-socket.h"
-#include "sysemu/runstate.h"
-#include "sysemu/tpm_backend.h"
-#include "sysemu/tpm_util.h"
+#include "system/runstate.h"
+#include "system/tpm_backend.h"
+#include "system/tpm_util.h"
#include "tpm_int.h"
#include "tpm_ioctl.h"
#include "migration/blocker.h"
@@ -72,7 +72,7 @@ struct TPMEmulator {
CharBackend ctrl_chr;
QIOChannel *data_ioc;
TPMVersion tpm_version;
- ptm_cap caps; /* capabilities of the TPM */
+ uint32_t caps; /* capabilities of the TPM */
uint8_t cur_locty_number; /* last set locality */
Error *migration_blocker;
@@ -123,15 +123,17 @@ static const char *tpm_emulator_strerror(uint32_t tpm_result)
}
static int tpm_emulator_ctrlcmd(TPMEmulator *tpm, unsigned long cmd, void *msg,
- size_t msg_len_in, size_t msg_len_out)
+ size_t msg_len_in, size_t msg_len_out_err,
+ size_t msg_len_out_total)
{
CharBackend *dev = &tpm->ctrl_chr;
uint32_t cmd_no = cpu_to_be32(cmd);
ssize_t n = sizeof(uint32_t) + msg_len_in;
- uint8_t *buf = NULL;
+ ptm_res res;
WITH_QEMU_LOCK_GUARD(&tpm->mutex) {
- buf = g_alloca(n);
+ g_autofree uint8_t *buf = g_malloc(n);
+
memcpy(buf, &cmd_no, sizeof(cmd_no));
memcpy(buf + sizeof(cmd_no), msg, msg_len_in);
@@ -140,8 +142,25 @@ static int tpm_emulator_ctrlcmd(TPMEmulator *tpm, unsigned long cmd, void *msg,
return -1;
}
- if (msg_len_out != 0) {
- n = qemu_chr_fe_read_all(dev, msg, msg_len_out);
+ if (msg_len_out_total > 0) {
+ assert(msg_len_out_total >= msg_len_out_err);
+
+ n = qemu_chr_fe_read_all(dev, (uint8_t *)msg, msg_len_out_err);
+ if (n <= 0) {
+ return -1;
+ }
+ if (msg_len_out_err == msg_len_out_total) {
+ return 0;
+ }
+ /* result error code is always in the first 4 bytes */
+ assert(sizeof(res) <= msg_len_out_err);
+ memcpy(&res, msg, sizeof(res));
+ if (res) {
+ return 0;
+ }
+
+ n = qemu_chr_fe_read_all(dev, (uint8_t *)msg + msg_len_out_err,
+ msg_len_out_total - msg_len_out_err);
if (n <= 0) {
return -1;
}
@@ -204,7 +223,8 @@ static int tpm_emulator_set_locality(TPMEmulator *tpm_emu, uint8_t locty_number,
memset(&loc, 0, sizeof(loc));
loc.u.req.loc = locty_number;
if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SET_LOCALITY, &loc,
- sizeof(loc), sizeof(loc)) < 0) {
+ sizeof(loc), sizeof(loc.u.resp.tpm_result),
+ sizeof(loc)) < 0) {
error_setg(errp, "tpm-emulator: could not set locality : %s",
strerror(errno));
return -1;
@@ -239,13 +259,16 @@ static void tpm_emulator_handle_request(TPMBackend *tb, TPMBackendCmd *cmd,
static int tpm_emulator_probe_caps(TPMEmulator *tpm_emu)
{
- if (tpm_emulator_ctrlcmd(tpm_emu, CMD_GET_CAPABILITY,
- &tpm_emu->caps, 0, sizeof(tpm_emu->caps)) < 0) {
+ ptm_cap_n cap_n;
+
+ if (tpm_emulator_ctrlcmd(tpm_emu, CMD_GET_CAPABILITY, &cap_n, 0,
+ sizeof(cap_n.u.resp.tpm_result),
+ sizeof(cap_n)) < 0) {
error_report("tpm-emulator: probing failed : %s", strerror(errno));
return -1;
}
- tpm_emu->caps = be64_to_cpu(tpm_emu->caps);
+ tpm_emu->caps = be32_to_cpu(cap_n.u.resp.caps);
trace_tpm_emulator_probe_caps(tpm_emu->caps);
@@ -254,7 +277,7 @@ static int tpm_emulator_probe_caps(TPMEmulator *tpm_emu)
static int tpm_emulator_check_caps(TPMEmulator *tpm_emu)
{
- ptm_cap caps = 0;
+ uint32_t caps = 0;
const char *tpm = NULL;
/* check for min. required capabilities */
@@ -290,7 +313,8 @@ static int tpm_emulator_stop_tpm(TPMBackend *tb)
TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
ptm_res res;
- if (tpm_emulator_ctrlcmd(tpm_emu, CMD_STOP, &res, 0, sizeof(res)) < 0) {
+ if (tpm_emulator_ctrlcmd(tpm_emu, CMD_STOP, &res, 0,
+ sizeof(ptm_res), sizeof(res)) < 0) {
error_report("tpm-emulator: Could not stop TPM: %s",
strerror(errno));
return -1;
@@ -317,8 +341,9 @@ static int tpm_emulator_lock_storage(TPMEmulator *tpm_emu)
/* give failing side 300 * 10ms time to release lock */
pls.u.req.retries = cpu_to_be32(300);
- if (tpm_emulator_ctrlcmd(tpm_emu, CMD_LOCK_STORAGE, &pls,
- sizeof(pls.u.req), sizeof(pls.u.resp)) < 0) {
+ if (tpm_emulator_ctrlcmd(tpm_emu, CMD_LOCK_STORAGE, &pls, sizeof(pls.u.req),
+ sizeof(pls.u.resp.tpm_result),
+ sizeof(pls.u.resp)) < 0) {
error_report("tpm-emulator: Could not lock storage within 3 seconds: "
"%s", strerror(errno));
return -1;
@@ -349,7 +374,8 @@ static int tpm_emulator_set_buffer_size(TPMBackend *tb,
psbs.u.req.buffersize = cpu_to_be32(wanted_size);
if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SET_BUFFERSIZE, &psbs,
- sizeof(psbs.u.req), sizeof(psbs.u.resp)) < 0) {
+ sizeof(psbs.u.req), sizeof(psbs.u.resp.tpm_result),
+ sizeof(psbs.u.resp)) < 0) {
error_report("tpm-emulator: Could not set buffer size: %s",
strerror(errno));
return -1;
@@ -396,6 +422,7 @@ static int tpm_emulator_startup_tpm_resume(TPMBackend *tb, size_t buffersize,
}
if (tpm_emulator_ctrlcmd(tpm_emu, CMD_INIT, &init, sizeof(init),
+ sizeof(init.u.resp.tpm_result),
sizeof(init)) < 0) {
error_report("tpm-emulator: could not send INIT: %s",
strerror(errno));
@@ -437,8 +464,9 @@ static bool tpm_emulator_get_tpm_established_flag(TPMBackend *tb)
return tpm_emu->established_flag;
}
- if (tpm_emulator_ctrlcmd(tpm_emu, CMD_GET_TPMESTABLISHED, &est,
- 0, sizeof(est)) < 0) {
+ if (tpm_emulator_ctrlcmd(tpm_emu, CMD_GET_TPMESTABLISHED, &est, 0,
+ sizeof(est) /* always returns resp.bit */,
+ sizeof(est)) < 0) {
error_report("tpm-emulator: Could not get the TPM established flag: %s",
strerror(errno));
return false;
@@ -466,6 +494,7 @@ static int tpm_emulator_reset_tpm_established_flag(TPMBackend *tb,
reset_est.u.req.loc = tpm_emu->cur_locty_number;
if (tpm_emulator_ctrlcmd(tpm_emu, CMD_RESET_TPMESTABLISHED,
&reset_est, sizeof(reset_est),
+ sizeof(reset_est.u.resp.tpm_result),
sizeof(reset_est)) < 0) {
error_report("tpm-emulator: Could not reset the establishment bit: %s",
strerror(errno));
@@ -497,7 +526,7 @@ static void tpm_emulator_cancel_cmd(TPMBackend *tb)
/* FIXME: make the function non-blocking, or it may block a VCPU */
if (tpm_emulator_ctrlcmd(tpm_emu, CMD_CANCEL_TPM_CMD, &res, 0,
- sizeof(res)) < 0) {
+ sizeof(ptm_res), sizeof(res)) < 0) {
error_report("tpm-emulator: Could not cancel command: %s",
strerror(errno));
} else if (res != 0) {
@@ -527,8 +556,8 @@ static size_t tpm_emulator_get_buffer_size(TPMBackend *tb)
static int tpm_emulator_block_migration(TPMEmulator *tpm_emu)
{
Error *err = NULL;
- ptm_cap caps = PTM_CAP_GET_STATEBLOB | PTM_CAP_SET_STATEBLOB |
- PTM_CAP_STOP;
+ uint32_t caps = PTM_CAP_GET_STATEBLOB | PTM_CAP_SET_STATEBLOB |
+ PTM_CAP_STOP;
if (!TPM_EMULATOR_IMPLEMENTS_ALL_CAPS(tpm_emu, caps)) {
error_setg(&tpm_emu->migration_blocker,
@@ -557,7 +586,7 @@ static int tpm_emulator_prepare_data_fd(TPMEmulator *tpm_emu)
qemu_chr_fe_set_msgfds(&tpm_emu->ctrl_chr, fds + 1, 1);
if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SET_DATAFD, &res, 0,
- sizeof(res)) < 0 || res != 0) {
+ sizeof(ptm_res), sizeof(res)) < 0 || res != 0) {
error_report("tpm-emulator: Failed to send CMD_SET_DATAFD: %s",
strerror(errno));
goto err_exit;
@@ -704,6 +733,8 @@ static int tpm_emulator_get_state_blob(TPMEmulator *tpm_emu,
if (tpm_emulator_ctrlcmd(tpm_emu, CMD_GET_STATEBLOB,
&pgs, sizeof(pgs.u.req),
+ /* always returns up to resp.data */
+ offsetof(ptm_getstate, u.resp.data),
offsetof(ptm_getstate, u.resp.data)) < 0) {
error_report("tpm-emulator: could not get state blob type %d : %s",
type, strerror(errno));
@@ -806,7 +837,7 @@ static int tpm_emulator_set_state_blob(TPMEmulator *tpm_emu,
/* write the header only */
if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SET_STATEBLOB, &pss,
- offsetof(ptm_setstate, u.req.data), 0) < 0) {
+ offsetof(ptm_setstate, u.req.data), 0, 0) < 0) {
error_report("tpm-emulator: could not set state blob type %d : %s",
type, strerror(errno));
return -1;
@@ -990,7 +1021,8 @@ static void tpm_emulator_shutdown(TPMEmulator *tpm_emu)
return;
}
- if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SHUTDOWN, &res, 0, sizeof(res)) < 0) {
+ if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SHUTDOWN, &res, 0,
+ sizeof(ptm_res), sizeof(res)) < 0) {
error_report("tpm-emulator: Could not cleanly shutdown the TPM: %s",
strerror(errno));
} else if (res != 0) {
@@ -1024,7 +1056,7 @@ static void tpm_emulator_inst_finalize(Object *obj)
vmstate_unregister(NULL, &vmstate_tpm_emulator, obj);
}
-static void tpm_emulator_class_init(ObjectClass *klass, void *data)
+static void tpm_emulator_class_init(ObjectClass *klass, const void *data)
{
TPMBackendClass *tbc = TPM_BACKEND_CLASS(klass);
diff --git a/backends/tpm/tpm_int.h b/backends/tpm/tpm_int.h
index ba61093..2319a1c 100644
--- a/backends/tpm/tpm_int.h
+++ b/backends/tpm/tpm_int.h
@@ -13,7 +13,7 @@
#define BACKENDS_TPM_INT_H
#include "qemu/option.h"
-#include "sysemu/tpm.h"
+#include "system/tpm.h"
#define TPM_STANDARD_CMDLINE_OPTS \
{ \
diff --git a/backends/tpm/tpm_ioctl.h b/backends/tpm/tpm_ioctl.h
index 1933ab6..ee2dd15 100644
--- a/backends/tpm/tpm_ioctl.h
+++ b/backends/tpm/tpm_ioctl.h
@@ -29,6 +29,16 @@
typedef uint32_t ptm_res;
+/* PTM_GET_CAPABILITY: Get supported capabilities (ioctl's) */
+struct ptm_cap_n {
+ union {
+ struct {
+ ptm_res tpm_result; /* will always be TPM_SUCCESS (0) */
+ uint32_t caps;
+ } resp; /* response */
+ } u;
+};
+
/* PTM_GET_TPMESTABLISHED: get the establishment bit */
struct ptm_est {
union {
@@ -242,7 +252,8 @@ struct ptm_lockstorage {
} u;
};
-typedef uint64_t ptm_cap;
+typedef uint64_t ptm_cap; /* CUSE-only; use ptm_cap_n otherwise */
+typedef struct ptm_cap_n ptm_cap_n;
typedef struct ptm_est ptm_est;
typedef struct ptm_reset_est ptm_reset_est;
typedef struct ptm_loc ptm_loc;
diff --git a/backends/tpm/tpm_passthrough.c b/backends/tpm/tpm_passthrough.c
index 179697a..b7c7074 100644
--- a/backends/tpm/tpm_passthrough.c
+++ b/backends/tpm/tpm_passthrough.c
@@ -26,8 +26,8 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qemu/sockets.h"
-#include "sysemu/tpm_backend.h"
-#include "sysemu/tpm_util.h"
+#include "system/tpm_backend.h"
+#include "system/tpm_util.h"
#include "tpm_int.h"
#include "qapi/clone-visitor.h"
#include "qapi/qapi-visit-tpm.h"
@@ -364,7 +364,7 @@ static void tpm_passthrough_inst_finalize(Object *obj)
qapi_free_TPMPassthroughOptions(tpm_pt->options);
}
-static void tpm_passthrough_class_init(ObjectClass *klass, void *data)
+static void tpm_passthrough_class_init(ObjectClass *klass, const void *data)
{
TPMBackendClass *tbc = TPM_BACKEND_CLASS(klass);
diff --git a/backends/tpm/tpm_util.c b/backends/tpm/tpm_util.c
index cf13855..f2d1739 100644
--- a/backends/tpm/tpm_util.c
+++ b/backends/tpm/tpm_util.c
@@ -21,13 +21,14 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
+#include "qemu/cutils.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "tpm_int.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/qdev-properties.h"
-#include "sysemu/tpm_backend.h"
-#include "sysemu/tpm_util.h"
+#include "system/tpm_backend.h"
+#include "system/tpm_util.h"
#include "trace.h"
/* tpm backend property */
@@ -46,7 +47,7 @@ static void get_tpm(Object *obj, Visitor *v, const char *name, void *opaque,
static void set_tpm(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
TPMBackend *s, **be = object_field_prop_ptr(obj, prop);
char *str;
@@ -66,7 +67,7 @@ static void set_tpm(Object *obj, Visitor *v, const char *name, void *opaque,
static void release_tpm(Object *obj, const char *name, void *opaque)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
TPMBackend **be = object_field_prop_ptr(obj, prop);
if (*be) {
@@ -75,7 +76,7 @@ static void release_tpm(Object *obj, const char *name, void *opaque)
}
const PropertyInfo qdev_prop_tpm = {
- .name = "str",
+ .type = "str",
.description = "ID of a tpm to use as a backend",
.get = get_tpm,
.set = set_tpm,
@@ -336,8 +337,8 @@ void tpm_sized_buffer_reset(TPMSizedBuffer *tsb)
void tpm_util_show_buffer(const unsigned char *buffer,
size_t buffer_size, const char *string)
{
- size_t len, i;
- char *line_buffer, *p;
+ g_autoptr(GString) str = NULL;
+ size_t len, i, l;
if (!trace_event_get_state_backends(TRACE_TPM_UTIL_SHOW_BUFFER_CONTENT)) {
return;
@@ -345,19 +346,14 @@ void tpm_util_show_buffer(const unsigned char *buffer,
len = MIN(tpm_cmd_get_size(buffer), buffer_size);
trace_tpm_util_show_buffer_header(string, len);
- /*
- * allocate enough room for 3 chars per buffer entry plus a
- * newline after every 16 chars and a final null terminator.
- */
- line_buffer = g_malloc(len * 3 + (len / 16) + 1);
-
- for (i = 0, p = line_buffer; i < len; i++) {
- if (i && !(i % 16)) {
- p += sprintf(p, "\n");
+ for (i = 0; i < len; i += l) {
+ if (str) {
+ g_string_append_c(str, '\n');
}
- p += sprintf(p, "%.2X ", buffer[i]);
+ l = MIN(len, 16);
+ str = qemu_hexdump_line(str, buffer, l, 1, 0);
}
- trace_tpm_util_show_buffer_content(line_buffer);
- g_free(line_buffer);
+ g_string_ascii_up(str);
+ trace_tpm_util_show_buffer_content(str->str);
}
diff --git a/backends/tpm/trace-events b/backends/tpm/trace-events
index cb5cfa6..05e3053 100644
--- a/backends/tpm/trace-events
+++ b/backends/tpm/trace-events
@@ -16,7 +16,7 @@ tpm_util_show_buffer_content(const char *buf) "%s"
# tpm_emulator.c
tpm_emulator_set_locality(uint8_t locty) "setting locality to %d"
tpm_emulator_handle_request(void) "processing TPM command"
-tpm_emulator_probe_caps(uint64_t caps) "capabilities: 0x%"PRIx64
+tpm_emulator_probe_caps(uint32_t caps) "capabilities: 0x%x"
tpm_emulator_set_buffer_size(uint32_t buffersize, uint32_t minsize, uint32_t maxsize) "buffer size: %u, min: %u, max: %u"
tpm_emulator_startup_tpm_resume(bool is_resume, size_t buffersize) "is_resume: %d, buffer size: %zu"
tpm_emulator_get_tpm_established_flag(uint8_t flag) "got established flag: %d"
diff --git a/backends/trace-events b/backends/trace-events
index 211e6f3..7278214 100644
--- a/backends/trace-events
+++ b/backends/trace-events
@@ -14,4 +14,8 @@ iommufd_backend_map_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size
iommufd_backend_unmap_dma_non_exist(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int ret) " Unmap nonexistent mapping: iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" (%d)"
iommufd_backend_unmap_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int ret) " iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" (%d)"
iommufd_backend_alloc_ioas(int iommufd, uint32_t ioas) " iommufd=%d ioas=%d"
+iommufd_backend_alloc_hwpt(int iommufd, uint32_t dev_id, uint32_t pt_id, uint32_t flags, uint32_t hwpt_type, uint32_t len, uint64_t data_ptr, uint32_t out_hwpt_id, int ret) " iommufd=%d dev_id=%u pt_id=%u flags=0x%x hwpt_type=%u len=%u data_ptr=0x%"PRIx64" out_hwpt=%u (%d)"
iommufd_backend_free_id(int iommufd, uint32_t id, int ret) " iommufd=%d id=%d (%d)"
+iommufd_backend_set_dirty(int iommufd, uint32_t hwpt_id, bool start, int ret) " iommufd=%d hwpt=%u enable=%d (%d)"
+iommufd_backend_get_dirty_bitmap(int iommufd, uint32_t hwpt_id, uint64_t iova, uint64_t size, uint64_t page_size, int ret) " iommufd=%d hwpt=%u iova=0x%"PRIx64" size=0x%"PRIx64" page_size=0x%"PRIx64" (%d)"
+iommufd_backend_invalidate_cache(int iommufd, uint32_t id, uint32_t data_type, uint32_t entry_len, uint32_t entry_num, uint32_t done_num, uint64_t data_ptr, int ret) " iommufd=%d id=%u data_type=%u entry_len=%u entry_num=%u done_num=%u data_ptr=0x%"PRIx64" (%d)"
diff --git a/backends/vhost-user.c b/backends/vhost-user.c
index 94c6a82..4284532 100644
--- a/backends/vhost-user.c
+++ b/backends/vhost-user.c
@@ -15,8 +15,8 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qom/object_interfaces.h"
-#include "sysemu/vhost-user-backend.h"
-#include "sysemu/kvm.h"
+#include "system/vhost-user-backend.h"
+#include "system/kvm.h"
#include "io/channel-command.h"
#include "hw/virtio/virtio-bus.h"
@@ -97,30 +97,28 @@ err_host_notifiers:
vhost_dev_disable_notifiers(&b->dev, b->vdev);
}
-void
+int
vhost_user_backend_stop(VhostUserBackend *b)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(b->vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- int ret = 0;
+ int ret;
if (!b->started) {
- return;
+ return 0;
}
- vhost_dev_stop(&b->dev, b->vdev, true);
+ ret = vhost_dev_stop(&b->dev, b->vdev, true);
- if (k->set_guest_notifiers) {
- ret = k->set_guest_notifiers(qbus->parent,
- b->dev.nvqs, false);
- if (ret < 0) {
- error_report("vhost guest notifier cleanup failed: %d", ret);
- }
+ if (k->set_guest_notifiers &&
+ k->set_guest_notifiers(qbus->parent, b->dev.nvqs, false) < 0) {
+ error_report("vhost guest notifier cleanup failed: %d", ret);
+ return -1;
}
- assert(ret >= 0);
vhost_dev_disable_notifiers(&b->dev, b->vdev);
b->started = false;
+ return ret;
}
static void set_chardev(Object *obj, const char *value, Error **errp)
@@ -163,7 +161,7 @@ static char *get_chardev(Object *obj, Error **errp)
return NULL;
}
-static void vhost_user_backend_class_init(ObjectClass *oc, void *data)
+static void vhost_user_backend_class_init(ObjectClass *oc, const void *data)
{
object_class_property_add_str(oc, "chardev", get_chardev, set_chardev);
}
diff --git a/block.c b/block.c
index c317de9..bfd4340 100644
--- a/block.c
+++ b/block.c
@@ -36,13 +36,13 @@
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/qnull.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qjson.h"
+#include "qobject/qnull.h"
+#include "qobject/qstring.h"
#include "qapi/qobject-output-visitor.h"
#include "qapi/qapi-visit-block-core.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/notify.h"
#include "qemu/option.h"
#include "qemu/coroutine.h"
@@ -106,9 +106,9 @@ static void bdrv_reopen_abort(BDRVReopenState *reopen_state);
static bool bdrv_backing_overridden(BlockDriverState *bs);
-static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx,
- GHashTable *visited, Transaction *tran,
- Error **errp);
+static bool GRAPH_RDLOCK
+bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx,
+ GHashTable *visited, Transaction *tran, Error **errp);
/* If non-zero, use only whitelisted block drivers */
static int use_bdrv_whitelist;
@@ -1226,9 +1226,10 @@ static int bdrv_child_cb_inactivate(BdrvChild *child)
return 0;
}
-static bool bdrv_child_cb_change_aio_ctx(BdrvChild *child, AioContext *ctx,
- GHashTable *visited, Transaction *tran,
- Error **errp)
+static bool GRAPH_RDLOCK
+bdrv_child_cb_change_aio_ctx(BdrvChild *child, AioContext *ctx,
+ GHashTable *visited, Transaction *tran,
+ Error **errp)
{
BlockDriverState *bs = child->opaque;
return bdrv_change_aio_context(bs, ctx, visited, tran, errp);
@@ -1573,6 +1574,10 @@ static void update_flags_from_options(int *flags, QemuOpts *opts)
if (qemu_opt_get_bool_del(opts, BDRV_OPT_AUTO_READ_ONLY, false)) {
*flags |= BDRV_O_AUTO_RDONLY;
}
+
+ if (!qemu_opt_get_bool_del(opts, BDRV_OPT_ACTIVE, true)) {
+ *flags |= BDRV_O_INACTIVE;
+ }
}
static void update_options_from_flags(QDict *options, int flags)
@@ -1716,12 +1721,14 @@ bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name,
open_failed:
bs->drv = NULL;
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
if (bs->file != NULL) {
bdrv_unref_child(bs, bs->file);
assert(!bs->file);
}
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
g_free(bs->opaque);
bs->opaque = NULL;
@@ -1800,6 +1807,11 @@ QemuOptsList bdrv_runtime_opts = {
.help = "Ignore flush requests",
},
{
+ .name = BDRV_OPT_ACTIVE,
+ .type = QEMU_OPT_BOOL,
+ .help = "Node is activated",
+ },
+ {
.name = BDRV_OPT_READ_ONLY,
.type = QEMU_OPT_BOOL,
.help = "Node is opened in read-only mode",
@@ -3018,7 +3030,8 @@ static void GRAPH_WRLOCK bdrv_attach_child_common_abort(void *opaque)
bdrv_replace_child_noperm(s->child, NULL);
if (bdrv_get_aio_context(bs) != s->old_child_ctx) {
- bdrv_try_change_aio_context(bs, s->old_child_ctx, NULL, &error_abort);
+ bdrv_try_change_aio_context_locked(bs, s->old_child_ctx, NULL,
+ &error_abort);
}
if (bdrv_child_get_parent_aio_context(s->child) != s->old_parent_ctx) {
@@ -3060,6 +3073,9 @@ static TransactionActionDrv bdrv_attach_child_common_drv = {
*
* Both @parent_bs and @child_bs can move to a different AioContext in this
* function.
+ *
+ * All block nodes must be drained before this function is called until after
+ * the transaction is finalized.
*/
static BdrvChild * GRAPH_WRLOCK
bdrv_attach_child_common(BlockDriverState *child_bs,
@@ -3077,6 +3093,13 @@ bdrv_attach_child_common(BlockDriverState *child_bs,
assert(child_class->get_parent_desc);
GLOBAL_STATE_CODE();
+ if (bdrv_is_inactive(child_bs) && (perm & ~BLK_PERM_CONSISTENT_READ)) {
+ g_autofree char *perm_names = bdrv_perm_names(perm);
+ error_setg(errp, "Permission '%s' unavailable on inactive node",
+ perm_names);
+ return NULL;
+ }
+
new_child = g_new(BdrvChild, 1);
*new_child = (BdrvChild) {
.bs = NULL,
@@ -3096,8 +3119,8 @@ bdrv_attach_child_common(BlockDriverState *child_bs,
parent_ctx = bdrv_child_get_parent_aio_context(new_child);
if (child_ctx != parent_ctx) {
Error *local_err = NULL;
- int ret = bdrv_try_change_aio_context(child_bs, parent_ctx, NULL,
- &local_err);
+ int ret = bdrv_try_change_aio_context_locked(child_bs, parent_ctx, NULL,
+ &local_err);
if (ret < 0 && child_class->change_aio_ctx) {
Transaction *aio_ctx_tran = tran_new();
@@ -3137,7 +3160,7 @@ bdrv_attach_child_common(BlockDriverState *child_bs,
* stop new requests from coming in. This is fine, we don't care about the
* old requests here, they are not for this child. If another place enters a
* drain section for the same parent, but wants it to be fully quiesced, it
- * will not run most of the the code in .drained_begin() again (which is not
+ * will not run most of the code in .drained_begin() again (which is not
* a problem, we already did this), but it will still poll until the parent
* is fully quiesced, so it will not be negatively affected either.
*/
@@ -3163,6 +3186,9 @@ bdrv_attach_child_common(BlockDriverState *child_bs,
*
* After calling this function, the transaction @tran may only be completed
* while holding a writer lock for the graph.
+ *
+ * All block nodes must be drained before this function is called until after
+ * the transaction is finalized.
*/
static BdrvChild * GRAPH_WRLOCK
bdrv_attach_child_noperm(BlockDriverState *parent_bs,
@@ -3183,6 +3209,11 @@ bdrv_attach_child_noperm(BlockDriverState *parent_bs,
child_bs->node_name, child_name, parent_bs->node_name);
return NULL;
}
+ if (bdrv_is_inactive(child_bs) && !bdrv_is_inactive(parent_bs)) {
+ error_setg(errp, "Inactive '%s' can't be a %s child of active '%s'",
+ child_bs->node_name, child_name, parent_bs->node_name);
+ return NULL;
+ }
bdrv_get_cumulative_perm(parent_bs, &perm, &shared_perm);
bdrv_child_perm(parent_bs, child_bs, NULL, child_role, NULL,
@@ -3199,6 +3230,8 @@ bdrv_attach_child_noperm(BlockDriverState *parent_bs,
*
* On failure NULL is returned, errp is set and the reference to
* child_bs is also dropped.
+ *
+ * All block nodes must be drained.
*/
BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
const char *child_name,
@@ -3238,6 +3271,8 @@ out:
*
* On failure NULL is returned, errp is set and the reference to
* child_bs is also dropped.
+ *
+ * All block nodes must be drained.
*/
BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
BlockDriverState *child_bs,
@@ -3272,7 +3307,11 @@ out:
return ret < 0 ? NULL : child;
}
-/* Callers must ensure that child->frozen is false. */
+/*
+ * Callers must ensure that child->frozen is false.
+ *
+ * All block nodes must be drained.
+ */
void bdrv_root_unref_child(BdrvChild *child)
{
BlockDriverState *child_bs = child->bs;
@@ -3293,8 +3332,8 @@ void bdrv_root_unref_child(BdrvChild *child)
* When the parent requiring a non-default AioContext is removed, the
* node moves back to the main AioContext
*/
- bdrv_try_change_aio_context(child_bs, qemu_get_aio_context(), NULL,
- NULL);
+ bdrv_try_change_aio_context_locked(child_bs, qemu_get_aio_context(),
+ NULL, NULL);
}
bdrv_schedule_unref(child_bs);
@@ -3367,7 +3406,11 @@ bdrv_unset_inherits_from(BlockDriverState *root, BdrvChild *child,
}
}
-/* Callers must ensure that child->frozen is false. */
+/*
+ * Callers must ensure that child->frozen is false.
+ *
+ * All block nodes must be drained.
+ */
void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child)
{
GLOBAL_STATE_CODE();
@@ -3432,6 +3475,9 @@ static BdrvChildRole bdrv_backing_role(BlockDriverState *bs)
*
* After calling this function, the transaction @tran may only be completed
* while holding a writer lock for the graph.
+ *
+ * All block nodes must be drained before this function is called until after
+ * the transaction is finalized.
*/
static int GRAPH_WRLOCK
bdrv_set_file_or_backing_noperm(BlockDriverState *parent_bs,
@@ -3524,8 +3570,7 @@ out:
* Both @bs and @backing_hd can move to a different AioContext in this
* function.
*
- * If a backing child is already present (i.e. we're detaching a node), that
- * child node must be drained.
+ * All block nodes must be drained.
*/
int bdrv_set_backing_hd_drained(BlockDriverState *bs,
BlockDriverState *backing_hd,
@@ -3554,21 +3599,14 @@ out:
int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
Error **errp)
{
- BlockDriverState *drain_bs;
int ret;
GLOBAL_STATE_CODE();
- bdrv_graph_rdlock_main_loop();
- drain_bs = bs->backing ? bs->backing->bs : bs;
- bdrv_graph_rdunlock_main_loop();
-
- bdrv_ref(drain_bs);
- bdrv_drained_begin(drain_bs);
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
ret = bdrv_set_backing_hd_drained(bs, backing_hd, errp);
bdrv_graph_wrunlock();
- bdrv_drained_end(drain_bs);
- bdrv_unref(drain_bs);
+ bdrv_drain_all_end();
return ret;
}
@@ -3759,10 +3797,12 @@ static BdrvChild *bdrv_open_child_common(const char *filename,
return NULL;
}
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
child = bdrv_attach_child(parent, bs, bdref_key, child_class, child_role,
errp);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
return child;
}
@@ -4337,9 +4377,7 @@ bdrv_recurse_has_child(BlockDriverState *bs, BlockDriverState *child)
* returns a pointer to bs_queue, which is either the newly allocated
* bs_queue, or the existing bs_queue being used.
*
- * bs is drained here and undrained by bdrv_reopen_queue_free().
- *
- * To be called with bs->aio_context locked.
+ * bs must be drained.
*/
static BlockReopenQueue * GRAPH_RDLOCK
bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, BlockDriverState *bs,
@@ -4358,12 +4396,7 @@ bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, BlockDriverState *bs,
GLOBAL_STATE_CODE();
- /*
- * Strictly speaking, draining is illegal under GRAPH_RDLOCK. We know that
- * we've been called with bdrv_graph_rdlock_main_loop(), though, so it's ok
- * in practice.
- */
- bdrv_drained_begin(bs);
+ assert(bs->quiesce_counter > 0);
if (bs_queue == NULL) {
bs_queue = g_new0(BlockReopenQueue, 1);
@@ -4498,12 +4531,17 @@ bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, BlockDriverState *bs,
return bs_queue;
}
-/* To be called with bs->aio_context locked */
BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
BlockDriverState *bs,
QDict *options, bool keep_old_opts)
{
GLOBAL_STATE_CODE();
+
+ if (bs_queue == NULL) {
+ /* Paired with bdrv_drain_all_end() in bdrv_reopen_queue_free(). */
+ bdrv_drain_all_begin();
+ }
+
GRAPH_RDLOCK_GUARD_MAINLOOP();
return bdrv_reopen_queue_child(bs_queue, bs, options, NULL, 0, false,
@@ -4516,12 +4554,14 @@ void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue)
if (bs_queue) {
BlockReopenQueueEntry *bs_entry, *next;
QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
- bdrv_drained_end(bs_entry->state.bs);
qobject_unref(bs_entry->state.explicit_options);
qobject_unref(bs_entry->state.options);
g_free(bs_entry);
}
g_free(bs_queue);
+
+ /* Paired with bdrv_drain_all_begin() in bdrv_reopen_queue(). */
+ bdrv_drain_all_end();
}
}
@@ -4688,6 +4728,9 @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
* Return 0 on success, otherwise return < 0 and set @errp.
*
* @reopen_state->bs can move to a different AioContext in this function.
+ *
+ * All block nodes must be drained before this function is called until after
+ * the transaction is finalized.
*/
static int GRAPH_UNLOCKED
bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
@@ -4781,7 +4824,7 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
if (old_child_bs) {
bdrv_ref(old_child_bs);
- bdrv_drained_begin(old_child_bs);
+ assert(old_child_bs->quiesce_counter > 0);
}
bdrv_graph_rdunlock_main_loop();
@@ -4793,7 +4836,6 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
bdrv_graph_wrunlock();
if (old_child_bs) {
- bdrv_drained_end(old_child_bs);
bdrv_unref(old_child_bs);
}
@@ -4822,6 +4864,9 @@ out_rdlock:
*
* After calling this function, the transaction @change_child_tran may only be
* completed while holding a writer lock for the graph.
+ *
+ * All block nodes must be drained before this function is called until after
+ * the transaction is finalized.
*/
static int GRAPH_UNLOCKED
bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
@@ -5135,6 +5180,7 @@ static void bdrv_close(BlockDriverState *bs)
bs->drv = NULL;
}
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
bdrv_unref_child(bs, child);
@@ -5143,6 +5189,7 @@ static void bdrv_close(BlockDriverState *bs)
assert(!bs->backing);
assert(!bs->file);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
g_free(bs->opaque);
bs->opaque = NULL;
@@ -5468,9 +5515,7 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
assert(!bs_new->backing);
bdrv_graph_rdunlock_main_loop();
- bdrv_drained_begin(bs_top);
- bdrv_drained_begin(bs_new);
-
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
child = bdrv_attach_child_noperm(bs_new, bs_top, "backing",
@@ -5492,9 +5537,7 @@ out:
bdrv_refresh_limits(bs_top, NULL, NULL);
bdrv_graph_wrunlock();
-
- bdrv_drained_end(bs_top);
- bdrv_drained_end(bs_new);
+ bdrv_drain_all_end();
return ret;
}
@@ -6351,7 +6394,7 @@ XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp)
if (!*name) {
name = allocated_name = blk_get_attached_dev_id(blk);
}
- xdbg_graph_add_node(gr, blk, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_BACKEND,
+ xdbg_graph_add_node(gr, blk, XDBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_BACKEND,
name);
g_free(allocated_name);
if (blk_root(blk)) {
@@ -6364,7 +6407,7 @@ XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp)
job = block_job_next_locked(job)) {
GSList *el;
- xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB,
+ xdbg_graph_add_node(gr, job, XDBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB,
job->job.id);
for (el = job->nodes; el; el = el->next) {
xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data);
@@ -6373,7 +6416,7 @@ XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp)
}
QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
- xdbg_graph_add_node(gr, bs, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_DRIVER,
+ xdbg_graph_add_node(gr, bs, XDBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_DRIVER,
bs->node_name);
QLIST_FOREACH(child, &bs->children, next) {
xdbg_graph_add_edge(gr, bs, child);
@@ -6824,6 +6867,10 @@ void bdrv_init_with_whitelist(void)
bdrv_init();
}
+bool bdrv_is_inactive(BlockDriverState *bs) {
+ return bs->open_flags & BDRV_O_INACTIVE;
+}
+
int bdrv_activate(BlockDriverState *bs, Error **errp)
{
BdrvChild *child, *parent;
@@ -6955,7 +7002,8 @@ bdrv_has_bds_parent(BlockDriverState *bs, bool only_active)
return false;
}
-static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs)
+static int GRAPH_RDLOCK
+bdrv_inactivate_recurse(BlockDriverState *bs, bool top_level)
{
BdrvChild *child, *parent;
int ret;
@@ -6963,6 +7011,8 @@ static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs)
GLOBAL_STATE_CODE();
+ assert(bs->quiesce_counter > 0);
+
if (!bs->drv) {
return -ENOMEDIUM;
}
@@ -6973,7 +7023,14 @@ static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs)
return 0;
}
- assert(!(bs->open_flags & BDRV_O_INACTIVE));
+ /*
+ * Inactivating an already inactive node on user request is harmless, but if
+ * a child is already inactive before its parent, that's bad.
+ */
+ if (bs->open_flags & BDRV_O_INACTIVE) {
+ assert(top_level);
+ return 0;
+ }
/* Inactivate this node */
if (bs->drv->bdrv_inactivate) {
@@ -7010,7 +7067,7 @@ static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs)
/* Recursively inactivate children */
QLIST_FOREACH(child, &bs->children, next) {
- ret = bdrv_inactivate_recurse(child->bs);
+ ret = bdrv_inactivate_recurse(child->bs, false);
if (ret < 0) {
return ret;
}
@@ -7019,6 +7076,33 @@ static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs)
return 0;
}
+int bdrv_inactivate(BlockDriverState *bs, Error **errp)
+{
+ int ret;
+
+ GLOBAL_STATE_CODE();
+
+ bdrv_drain_all_begin();
+ bdrv_graph_rdlock_main_loop();
+
+ if (bdrv_has_bds_parent(bs, true)) {
+ error_setg(errp, "Node has active parent node");
+ ret = -EPERM;
+ goto out;
+ }
+
+ ret = bdrv_inactivate_recurse(bs, true);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to inactivate node");
+ goto out;
+ }
+
+out:
+ bdrv_graph_rdunlock_main_loop();
+ bdrv_drain_all_end();
+ return ret;
+}
+
int bdrv_inactivate_all(void)
{
BlockDriverState *bs = NULL;
@@ -7026,7 +7110,9 @@ int bdrv_inactivate_all(void)
int ret = 0;
GLOBAL_STATE_CODE();
- GRAPH_RDLOCK_GUARD_MAINLOOP();
+
+ bdrv_drain_all_begin();
+ bdrv_graph_rdlock_main_loop();
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
/* Nodes with BDS parents are covered by recursion from the last
@@ -7035,13 +7121,16 @@ int bdrv_inactivate_all(void)
if (bdrv_has_bds_parent(bs, false)) {
continue;
}
- ret = bdrv_inactivate_recurse(bs);
+ ret = bdrv_inactivate_recurse(bs, true);
if (ret < 0) {
bdrv_next_cleanup(&it);
break;
}
}
+ bdrv_graph_rdunlock_main_loop();
+ bdrv_drain_all_end();
+
return ret;
}
@@ -7222,10 +7311,6 @@ bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
return true;
}
-/*
- * Must not be called while holding the lock of an AioContext other than the
- * current one.
- */
void bdrv_img_create(const char *filename, const char *fmt,
const char *base_filename, const char *base_fmt,
char *options, uint64_t img_size, int flags, bool quiet,
@@ -7512,10 +7597,21 @@ typedef struct BdrvStateSetAioContext {
BlockDriverState *bs;
} BdrvStateSetAioContext;
-static bool bdrv_parent_change_aio_context(BdrvChild *c, AioContext *ctx,
- GHashTable *visited,
- Transaction *tran,
- Error **errp)
+/*
+ * Changes the AioContext of @child to @ctx and recursively for the associated
+ * block nodes and all their children and parents. Returns true if the change is
+ * possible and the transaction @tran can be continued. Returns false and sets
+ * @errp if not and the transaction must be aborted.
+ *
+ * @visited will accumulate all visited BdrvChild objects. The caller is
+ * responsible for freeing the list afterwards.
+ *
+ * Must be called with the affected block nodes drained.
+ */
+static bool GRAPH_RDLOCK
+bdrv_parent_change_aio_context(BdrvChild *c, AioContext *ctx,
+ GHashTable *visited, Transaction *tran,
+ Error **errp)
{
GLOBAL_STATE_CODE();
if (g_hash_table_contains(visited, c)) {
@@ -7540,6 +7636,17 @@ static bool bdrv_parent_change_aio_context(BdrvChild *c, AioContext *ctx,
return true;
}
+/*
+ * Changes the AioContext of @c->bs to @ctx and recursively for all its children
+ * and parents. Returns true if the change is possible and the transaction @tran
+ * can be continued. Returns false and sets @errp if not and the transaction
+ * must be aborted.
+ *
+ * @visited will accumulate all visited BdrvChild objects. The caller is
+ * responsible for freeing the list afterwards.
+ *
+ * Must be called with the affected block nodes drained.
+ */
bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx,
GHashTable *visited, Transaction *tran,
Error **errp)
@@ -7555,10 +7662,6 @@ bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx,
static void bdrv_set_aio_context_clean(void *opaque)
{
BdrvStateSetAioContext *state = (BdrvStateSetAioContext *) opaque;
- BlockDriverState *bs = (BlockDriverState *) state->bs;
-
- /* Paired with bdrv_drained_begin in bdrv_change_aio_context() */
- bdrv_drained_end(bs);
g_free(state);
}
@@ -7586,10 +7689,12 @@ static TransactionActionDrv set_aio_context = {
*
* @visited will accumulate all visited BdrvChild objects. The caller is
* responsible for freeing the list afterwards.
+ *
+ * @bs must be drained.
*/
-static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx,
- GHashTable *visited, Transaction *tran,
- Error **errp)
+static bool GRAPH_RDLOCK
+bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx,
+ GHashTable *visited, Transaction *tran, Error **errp)
{
BdrvChild *c;
BdrvStateSetAioContext *state;
@@ -7600,21 +7705,17 @@ static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx,
return true;
}
- bdrv_graph_rdlock_main_loop();
QLIST_FOREACH(c, &bs->parents, next_parent) {
if (!bdrv_parent_change_aio_context(c, ctx, visited, tran, errp)) {
- bdrv_graph_rdunlock_main_loop();
return false;
}
}
QLIST_FOREACH(c, &bs->children, next) {
if (!bdrv_child_change_aio_context(c, ctx, visited, tran, errp)) {
- bdrv_graph_rdunlock_main_loop();
return false;
}
}
- bdrv_graph_rdunlock_main_loop();
state = g_new(BdrvStateSetAioContext, 1);
*state = (BdrvStateSetAioContext) {
@@ -7622,8 +7723,7 @@ static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx,
.bs = bs,
};
- /* Paired with bdrv_drained_end in bdrv_set_aio_context_clean() */
- bdrv_drained_begin(bs);
+ assert(bs->quiesce_counter > 0);
tran_add(tran, &set_aio_context, state);
@@ -7636,9 +7736,13 @@ static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx,
*
* If ignore_child is not NULL, that child (and its subgraph) will not
* be touched.
+ *
+ * Called with the graph lock held.
+ *
+ * Called while all bs are drained.
*/
-int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx,
- BdrvChild *ignore_child, Error **errp)
+int bdrv_try_change_aio_context_locked(BlockDriverState *bs, AioContext *ctx,
+ BdrvChild *ignore_child, Error **errp)
{
Transaction *tran;
GHashTable *visited;
@@ -7647,9 +7751,9 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx,
/*
* Recursion phase: go through all nodes of the graph.
- * Take care of checking that all nodes support changing AioContext
- * and drain them, building a linear list of callbacks to run if everything
- * is successful (the transaction itself).
+ * Take care of checking that all nodes support changing AioContext,
+ * building a linear list of callbacks to run if everything is successful
+ * (the transaction itself).
*/
tran = tran_new();
visited = g_hash_table_new(NULL, NULL);
@@ -7676,6 +7780,29 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx,
return 0;
}
+/*
+ * Change bs's and recursively all of its parents' and children's AioContext
+ * to the given new context, returning an error if that isn't possible.
+ *
+ * If ignore_child is not NULL, that child (and its subgraph) will not
+ * be touched.
+ */
+int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx,
+ BdrvChild *ignore_child, Error **errp)
+{
+ int ret;
+
+ GLOBAL_STATE_CODE();
+
+ bdrv_drain_all_begin();
+ bdrv_graph_rdlock_main_loop();
+ ret = bdrv_try_change_aio_context_locked(bs, ctx, ignore_child, errp);
+ bdrv_graph_rdunlock_main_loop();
+ bdrv_drain_all_end();
+
+ return ret;
+}
+
void bdrv_add_aio_context_notifier(BlockDriverState *bs,
void (*attached_aio_context)(AioContext *new_context, void *opaque),
void (*detach_aio_context)(void *opaque), void *opaque)
@@ -8103,8 +8230,10 @@ char *bdrv_dirname(BlockDriverState *bs, Error **errp)
}
/*
- * Hot add/remove a BDS's child. So the user can take a child offline when
- * it is broken and take a new child online
+ * Hot add a BDS's child. Used in combination with bdrv_del_child, so the user
+ * can take a child offline when it is broken and take a new child online.
+ *
+ * All block nodes must be drained.
*/
void bdrv_add_child(BlockDriverState *parent_bs, BlockDriverState *child_bs,
Error **errp)
@@ -8144,6 +8273,12 @@ void bdrv_add_child(BlockDriverState *parent_bs, BlockDriverState *child_bs,
parent_bs->drv->bdrv_add_child(parent_bs, child_bs, errp);
}
+/*
+ * Hot remove a BDS's child. Used in combination with bdrv_add_child, so the
+ * user can take a child offline when it is broken and take a new child online.
+ *
+ * All block nodes must be drained.
+ */
void bdrv_del_child(BlockDriverState *parent_bs, BdrvChild *child, Error **errp)
{
BdrvChild *tmp;
diff --git a/block/accounting.c b/block/accounting.c
index 2829745..3e46159 100644
--- a/block/accounting.c
+++ b/block/accounting.c
@@ -27,7 +27,7 @@
#include "block/accounting.h"
#include "block/block_int.h"
#include "qemu/timer.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
static QEMUClockType clock_type = QEMU_CLOCK_REALTIME;
static const int qtest_latency_ns = NANOSECONDS_PER_SECOND / 1000;
diff --git a/block/aio_task.c b/block/aio_task.c
index 9bd17ea..bb5c05f 100644
--- a/block/aio_task.c
+++ b/block/aio_task.c
@@ -119,8 +119,3 @@ int aio_task_pool_status(AioTaskPool *pool)
return pool->status;
}
-
-bool aio_task_pool_empty(AioTaskPool *pool)
-{
- return pool->busy_tasks == 0;
-}
diff --git a/block/backup.c b/block/backup.c
index 3dd2e22..909027c 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -23,7 +23,7 @@
#include "block/dirty-bitmap.h"
#include "qapi/error.h"
#include "qemu/cutils.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/bitmap.h"
#include "qemu/error-report.h"
@@ -361,6 +361,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BackupPerf *perf,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
+ OnCbwError on_cbw_error,
int creation_flags,
BlockCompletionFunc *cb, void *opaque,
JobTxn *txn, Error **errp)
@@ -458,7 +459,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
}
cbw = bdrv_cbw_append(bs, target, filter_node_name, discard_source,
- &bcs, errp);
+ perf->min_cluster_size, &bcs, on_cbw_error, errp);
if (!cbw) {
goto error;
}
@@ -497,10 +498,12 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
block_copy_set_speed(bcs, speed);
/* Required permissions are taken by copy-before-write filter target */
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
&error_abort);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
return &job->common;
diff --git a/block/blkdebug.c b/block/blkdebug.c
index c95c818..c54aee0 100644
--- a/block/blkdebug.c
+++ b/block/blkdebug.c
@@ -33,11 +33,11 @@
#include "qemu/module.h"
#include "qemu/option.h"
#include "qapi/qapi-visit-block-core.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
+#include "qobject/qstring.h"
#include "qapi/qobject-input-visitor.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
/* All APIs are thread-safe */
@@ -751,9 +751,9 @@ blkdebug_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
}
static int coroutine_fn GRAPH_RDLOCK
-blkdebug_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset,
- int64_t bytes, int64_t *pnum, int64_t *map,
- BlockDriverState **file)
+blkdebug_co_block_status(BlockDriverState *bs, unsigned int mode,
+ int64_t offset, int64_t bytes, int64_t *pnum,
+ int64_t *map, BlockDriverState **file)
{
int err;
diff --git a/block/blkio.c b/block/blkio.c
index 3d9a2e7..4142673 100644
--- a/block/blkio.c
+++ b/block/blkio.c
@@ -11,15 +11,15 @@
#include "qemu/osdep.h"
#include <blkio.h>
#include "block/block_int.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "exec/cpu-common.h" /* for qemu_ram_get_fd() */
#include "qemu/defer-call.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/module.h"
-#include "sysemu/block-backend.h"
-#include "exec/memory.h" /* for ram_block_discard_disable() */
+#include "system/block-backend.h"
+#include "system/memory.h" /* for ram_block_discard_disable() */
#include "block/block-io.h"
@@ -899,8 +899,10 @@ static int blkio_open(BlockDriverState *bs, QDict *options, int flags,
}
bs->supported_write_flags = BDRV_REQ_FUA | BDRV_REQ_REGISTERED_BUF;
- bs->supported_zero_flags = BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP |
- BDRV_REQ_NO_FALLBACK;
+ bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK;
+#ifdef CONFIG_BLKIO_WRITE_ZEROS_FUA
+ bs->supported_zero_flags |= BDRV_REQ_FUA;
+#endif
qemu_mutex_init(&s->blkio_lock);
qemu_co_mutex_init(&s->bounce_lock);
diff --git a/block/blklogwrites.c b/block/blklogwrites.c
index ed38a93..70ac76f 100644
--- a/block/blklogwrites.c
+++ b/block/blklogwrites.c
@@ -14,8 +14,8 @@
#include "qemu/sockets.h" /* for EINPROGRESS on Windows */
#include "block/block-io.h"
#include "block/block_int.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "qemu/cutils.h"
#include "qemu/module.h"
#include "qemu/option.h"
@@ -281,9 +281,11 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags,
ret = 0;
fail_log:
if (ret < 0) {
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_unref_child(bs, s->log_file);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
s->log_file = NULL;
qemu_mutex_destroy(&s->mutex);
}
@@ -296,10 +298,12 @@ static void blk_log_writes_close(BlockDriverState *bs)
{
BDRVBlkLogWritesState *s = bs->opaque;
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_unref_child(bs, s->log_file);
s->log_file = NULL;
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
qemu_mutex_destroy(&s->mutex);
}
diff --git a/block/blkreplay.c b/block/blkreplay.c
index 792d980..16d8b12 100644
--- a/block/blkreplay.c
+++ b/block/blkreplay.c
@@ -13,7 +13,7 @@
#include "qemu/module.h"
#include "block/block-io.h"
#include "block/block_int.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "qapi/error.h"
typedef struct Request {
diff --git a/block/blkverify.c b/block/blkverify.c
index 5a9bf67..3a71f74 100644
--- a/block/blkverify.c
+++ b/block/blkverify.c
@@ -12,8 +12,8 @@
#include "qemu/sockets.h" /* for EINPROGRESS on Windows */
#include "block/block-io.h"
#include "block/block_int.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "qemu/cutils.h"
#include "qemu/module.h"
#include "qemu/option.h"
@@ -151,10 +151,12 @@ static void blkverify_close(BlockDriverState *bs)
{
BDRVBlkverifyState *s = bs->opaque;
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_unref_child(bs, s->test_file);
s->test_file = NULL;
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
}
static int64_t coroutine_fn GRAPH_RDLOCK
diff --git a/block/block-backend.c b/block/block-backend.c
index db6f9b9..68209bb 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -11,15 +11,15 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "block/block_int.h"
#include "block/blockjob.h"
#include "block/coroutines.h"
#include "block/throttle-groups.h"
#include "hw/qdev-core.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/runstate.h"
-#include "sysemu/replay.h"
+#include "system/blockdev.h"
+#include "system/runstate.h"
+#include "system/replay.h"
#include "qapi/error.h"
#include "qapi/qapi-events-block.h"
#include "qemu/id.h"
@@ -136,9 +136,9 @@ static void blk_root_drained_end(BdrvChild *child);
static void blk_root_change_media(BdrvChild *child, bool load);
static void blk_root_resize(BdrvChild *child);
-static bool blk_root_change_aio_ctx(BdrvChild *child, AioContext *ctx,
- GHashTable *visited, Transaction *tran,
- Error **errp);
+static bool GRAPH_RDLOCK
+blk_root_change_aio_ctx(BdrvChild *child, AioContext *ctx, GHashTable *visited,
+ Transaction *tran, Error **errp);
static char *blk_root_get_parent_desc(BdrvChild *child)
{
@@ -253,7 +253,7 @@ static bool blk_can_inactivate(BlockBackend *blk)
* guest. For block job BBs that satisfy this, we can just allow
* it. This is the case for mirror job source, which is required
* by libvirt non-shared block migration. */
- if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) {
+ if (!(blk->perm & ~BLK_PERM_CONSISTENT_READ)) {
return true;
}
@@ -854,15 +854,6 @@ BlockBackendPublic *blk_get_public(BlockBackend *blk)
}
/*
- * Returns a BlockBackend given the associated @public fields.
- */
-BlockBackend *blk_by_public(BlockBackendPublic *public)
-{
- GLOBAL_STATE_CODE();
- return container_of(public, BlockBackend, public);
-}
-
-/*
* Disassociates the currently associated BlockDriverState from @blk.
*/
void blk_remove_bs(BlockBackend *blk)
@@ -898,9 +889,11 @@ void blk_remove_bs(BlockBackend *blk)
root = blk->root;
blk->root = NULL;
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_root_unref_child(root);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
}
/*
@@ -909,15 +902,27 @@ void blk_remove_bs(BlockBackend *blk)
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
{
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
+ uint64_t perm, shared_perm;
GLOBAL_STATE_CODE();
bdrv_ref(bs);
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
+
+ if ((bs->open_flags & BDRV_O_INACTIVE) && blk_can_inactivate(blk)) {
+ blk->disable_perm = true;
+ perm = 0;
+ shared_perm = BLK_PERM_ALL;
+ } else {
+ perm = blk->perm;
+ shared_perm = blk->shared_perm;
+ }
+
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
- blk->perm, blk->shared_perm,
- blk, errp);
+ perm, shared_perm, blk, errp);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
if (blk->root == NULL) {
return -EPERM;
}
@@ -1028,22 +1033,38 @@ DeviceState *blk_get_attached_dev(BlockBackend *blk)
return blk->dev;
}
-/* Return the qdev ID, or if no ID is assigned the QOM path, of the block
- * device attached to the BlockBackend. */
-char *blk_get_attached_dev_id(BlockBackend *blk)
+/*
+ * The caller is responsible for releasing the value returned
+ * with g_free() after use.
+ */
+static char *blk_get_attached_dev_id_or_path(BlockBackend *blk, bool want_id)
{
DeviceState *dev = blk->dev;
IO_CODE();
if (!dev) {
return g_strdup("");
- } else if (dev->id) {
+ } else if (want_id && dev->id) {
return g_strdup(dev->id);
}
return object_get_canonical_path(OBJECT(dev)) ?: g_strdup("");
}
+char *blk_get_attached_dev_id(BlockBackend *blk)
+{
+ return blk_get_attached_dev_id_or_path(blk, true);
+}
+
+/*
+ * The caller is responsible for releasing the value returned
+ * with g_free() after use.
+ */
+static char *blk_get_attached_dev_path(BlockBackend *blk)
+{
+ return blk_get_attached_dev_id_or_path(blk, false);
+}
+
/*
* Return the BlockBackend which has the device model @dev attached if it
* exists, else null.
@@ -1214,12 +1235,6 @@ BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
return blk->iostatus;
}
-void blk_iostatus_disable(BlockBackend *blk)
-{
- GLOBAL_STATE_CODE();
- blk->iostatus_enabled = false;
-}
-
void blk_iostatus_reset(BlockBackend *blk)
{
GLOBAL_STATE_CODE();
@@ -2137,9 +2152,10 @@ static void send_qmp_error_event(BlockBackend *blk,
{
IoOperationType optype;
BlockDriverState *bs = blk_bs(blk);
+ g_autofree char *path = blk_get_attached_dev_path(blk);
optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
- qapi_event_send_block_io_error(blk_name(blk),
+ qapi_event_send_block_io_error(path, blk_name(blk),
bs ? bdrv_get_node_name(bs) : NULL, optype,
action, blk_iostatus_is_enabled(blk),
error == ENOSPC, strerror(error));
@@ -2228,28 +2244,6 @@ void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
blk->enable_write_cache = wce;
}
-void blk_activate(BlockBackend *blk, Error **errp)
-{
- BlockDriverState *bs = blk_bs(blk);
- GLOBAL_STATE_CODE();
-
- if (!bs) {
- error_setg(errp, "Device '%s' has no medium", blk->name);
- return;
- }
-
- /*
- * Migration code can call this function in coroutine context, so leave
- * coroutine context if necessary.
- */
- if (qemu_in_coroutine()) {
- bdrv_co_activate(bs, errp);
- } else {
- GRAPH_RDLOCK_GUARD_MAINLOOP();
- bdrv_activate(bs, errp);
- }
-}
-
bool coroutine_fn blk_co_is_inserted(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
@@ -2367,48 +2361,6 @@ void *blk_blockalign(BlockBackend *blk, size_t size)
return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
}
-bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
-{
- BlockDriverState *bs = blk_bs(blk);
- GLOBAL_STATE_CODE();
- GRAPH_RDLOCK_GUARD_MAINLOOP();
-
- if (!bs) {
- return false;
- }
-
- return bdrv_op_is_blocked(bs, op, errp);
-}
-
-void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
-{
- BlockDriverState *bs = blk_bs(blk);
- GLOBAL_STATE_CODE();
-
- if (bs) {
- bdrv_op_unblock(bs, op, reason);
- }
-}
-
-void blk_op_block_all(BlockBackend *blk, Error *reason)
-{
- BlockDriverState *bs = blk_bs(blk);
- GLOBAL_STATE_CODE();
-
- if (bs) {
- bdrv_op_block_all(bs, reason);
- }
-}
-
-void blk_op_unblock_all(BlockBackend *blk, Error *reason)
-{
- BlockDriverState *bs = blk_bs(blk);
- GLOBAL_STATE_CODE();
-
- if (bs) {
- bdrv_op_unblock_all(bs, reason);
- }
-}
/**
* Return BB's current AioContext. Note that this context may change
@@ -2564,12 +2516,6 @@ void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
notifier_list_add(&blk->remove_bs_notifiers, notify);
}
-void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
-{
- GLOBAL_STATE_CODE();
- notifier_list_add(&blk->insert_bs_notifiers, notify);
-}
-
BlockAcctStats *blk_get_stats(BlockBackend *blk)
{
IO_CODE();
diff --git a/block/block-copy.c b/block/block-copy.c
index 7e3b378..1826c2e 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -20,7 +20,7 @@
#include "block/block_int-io.h"
#include "block/dirty-bitmap.h"
#include "block/reqlist.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/units.h"
#include "qemu/co-shared-resource.h"
#include "qemu/coroutine.h"
@@ -310,6 +310,7 @@ void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
}
static int64_t block_copy_calculate_cluster_size(BlockDriverState *target,
+ int64_t min_cluster_size,
Error **errp)
{
int ret;
@@ -319,6 +320,9 @@ static int64_t block_copy_calculate_cluster_size(BlockDriverState *target,
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
+ min_cluster_size = MAX(min_cluster_size,
+ (int64_t)BLOCK_COPY_CLUSTER_SIZE_DEFAULT);
+
target_does_cow = bdrv_backing_chain_next(target);
/*
@@ -329,13 +333,13 @@ static int64_t block_copy_calculate_cluster_size(BlockDriverState *target,
ret = bdrv_get_info(target, &bdi);
if (ret == -ENOTSUP && !target_does_cow) {
/* Cluster size is not defined */
- warn_report("The target block device doesn't provide "
- "information about the block size and it doesn't have a "
- "backing file. The default block size of %u bytes is "
- "used. If the actual block size of the target exceeds "
- "this default, the backup may be unusable",
- BLOCK_COPY_CLUSTER_SIZE_DEFAULT);
- return BLOCK_COPY_CLUSTER_SIZE_DEFAULT;
+ warn_report("The target block device doesn't provide information about "
+ "the block size and it doesn't have a backing file. The "
+ "(default) block size of %" PRIi64 " bytes is used. If the "
+ "actual block size of the target exceeds this value, the "
+ "backup may be unusable",
+ min_cluster_size);
+ return min_cluster_size;
} else if (ret < 0 && !target_does_cow) {
error_setg_errno(errp, -ret,
"Couldn't determine the cluster size of the target image, "
@@ -345,16 +349,17 @@ static int64_t block_copy_calculate_cluster_size(BlockDriverState *target,
return ret;
} else if (ret < 0 && target_does_cow) {
/* Not fatal; just trudge on ahead. */
- return BLOCK_COPY_CLUSTER_SIZE_DEFAULT;
+ return min_cluster_size;
}
- return MAX(BLOCK_COPY_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
+ return MAX(min_cluster_size, bdi.cluster_size);
}
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
BlockDriverState *copy_bitmap_bs,
const BdrvDirtyBitmap *bitmap,
bool discard_source,
+ uint64_t min_cluster_size,
Error **errp)
{
ERRP_GUARD();
@@ -365,7 +370,18 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
GLOBAL_STATE_CODE();
- cluster_size = block_copy_calculate_cluster_size(target->bs, errp);
+ if (min_cluster_size > INT64_MAX) {
+ error_setg(errp, "min-cluster-size too large: %" PRIu64 " > %" PRIi64,
+ min_cluster_size, INT64_MAX);
+ return NULL;
+ } else if (min_cluster_size && !is_power_of_2(min_cluster_size)) {
+ error_setg(errp, "min-cluster-size needs to be a power of 2");
+ return NULL;
+ }
+
+ cluster_size = block_copy_calculate_cluster_size(target->bs,
+ (int64_t)min_cluster_size,
+ errp);
if (cluster_size < 0) {
return NULL;
}
@@ -568,7 +584,7 @@ static coroutine_fn int block_copy_task_entry(AioTask *task)
BlockCopyState *s = t->s;
bool error_is_read = false;
BlockCopyMethod method = t->method;
- int ret;
+ int ret = -1;
WITH_GRAPH_RDLOCK_GUARD() {
ret = block_copy_do_copy(s, t->req.offset, t->req.bytes, &method,
@@ -595,7 +611,9 @@ static coroutine_fn int block_copy_task_entry(AioTask *task)
if (s->discard_source && ret == 0) {
int64_t nbytes =
MIN(t->req.offset + t->req.bytes, s->len) - t->req.offset;
- bdrv_co_pdiscard(s->source, t->req.offset, nbytes);
+ WITH_GRAPH_RDLOCK_GUARD() {
+ bdrv_co_pdiscard(s->source, t->req.offset, nbytes);
+ }
}
return ret;
diff --git a/block/block-ram-registrar.c b/block/block-ram-registrar.c
index 25dbafa..fcda2b8 100644
--- a/block/block-ram-registrar.c
+++ b/block/block-ram-registrar.c
@@ -5,8 +5,8 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/block-ram-registrar.h"
+#include "system/block-backend.h"
+#include "system/block-ram-registrar.h"
#include "qapi/error.h"
static void ram_block_added(RAMBlockNotifier *n, void *host, size_t size,
diff --git a/block/commit.c b/block/commit.c
index 7c3fdcb..6c4b736 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -15,12 +15,14 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "trace.h"
+#include "block/block-common.h"
+#include "block/coroutines.h"
#include "block/block_int.h"
#include "block/blockjob_int.h"
#include "qapi/error.h"
#include "qemu/ratelimit.h"
#include "qemu/memalign.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
enum {
/*
@@ -126,6 +128,84 @@ static void commit_clean(Job *job)
blk_unref(s->top);
}
+static int commit_iteration(CommitBlockJob *s, int64_t offset,
+ int64_t *requested_bytes, void *buf)
+{
+ BlockErrorAction action;
+ int64_t bytes = *requested_bytes;
+ int ret = 0;
+ bool error_in_source = true;
+
+ /* Copy if allocated above the base */
+ WITH_GRAPH_RDLOCK_GUARD() {
+ ret = bdrv_co_common_block_status_above(blk_bs(s->top),
+ s->base_overlay, true, true, offset, COMMIT_BUFFER_SIZE,
+ &bytes, NULL, NULL, NULL);
+ }
+
+ trace_commit_one_iteration(s, offset, bytes, ret);
+
+ if (ret < 0) {
+ goto fail;
+ }
+
+ if (ret & BDRV_BLOCK_ALLOCATED) {
+ if (ret & BDRV_BLOCK_ZERO) {
+ /*
+ * If the top (sub)clusters are smaller than the base
+ * (sub)clusters, this will not unmap unless the underlying device
+ * does some tracking of these requests. Ideally, we would find
+ * the maximal extent of the zero clusters.
+ */
+ ret = blk_co_pwrite_zeroes(s->base, offset, bytes,
+ BDRV_REQ_MAY_UNMAP);
+ if (ret < 0) {
+ error_in_source = false;
+ goto fail;
+ }
+ } else {
+ assert(bytes < SIZE_MAX);
+
+ ret = blk_co_pread(s->top, offset, bytes, buf, 0);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ ret = blk_co_pwrite(s->base, offset, bytes, buf, 0);
+ if (ret < 0) {
+ error_in_source = false;
+ goto fail;
+ }
+ }
+
+ /*
+ * Whether zeroes actually end up on disk depends on the details of
+ * the underlying driver. Therefore, this might rate limit more than
+ * is necessary.
+ */
+ block_job_ratelimit_processed_bytes(&s->common, bytes);
+ }
+
+ /* Publish progress */
+
+ job_progress_update(&s->common.job, bytes);
+
+ *requested_bytes = bytes;
+
+ return 0;
+
+fail:
+ action = block_job_error_action(&s->common, s->on_error,
+ error_in_source, -ret);
+ if (action == BLOCK_ERROR_ACTION_REPORT) {
+ return ret;
+ }
+
+ *requested_bytes = 0;
+
+ return 0;
+}
+
static int coroutine_fn commit_run(Job *job, Error **errp)
{
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
@@ -156,9 +236,6 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
buf = blk_blockalign(s->top, COMMIT_BUFFER_SIZE);
for (offset = 0; offset < len; offset += n) {
- bool copy;
- bool error_in_source = true;
-
/* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns.
*/
@@ -166,38 +243,11 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
if (job_is_cancelled(&s->common.job)) {
break;
}
- /* Copy if allocated above the base */
- ret = blk_co_is_allocated_above(s->top, s->base_overlay, true,
- offset, COMMIT_BUFFER_SIZE, &n);
- copy = (ret > 0);
- trace_commit_one_iteration(s, offset, n, ret);
- if (copy) {
- assert(n < SIZE_MAX);
-
- ret = blk_co_pread(s->top, offset, n, buf, 0);
- if (ret >= 0) {
- ret = blk_co_pwrite(s->base, offset, n, buf, 0);
- if (ret < 0) {
- error_in_source = false;
- }
- }
- }
- if (ret < 0) {
- BlockErrorAction action =
- block_job_error_action(&s->common, s->on_error,
- error_in_source, -ret);
- if (action == BLOCK_ERROR_ACTION_REPORT) {
- return ret;
- } else {
- n = 0;
- continue;
- }
- }
- /* Publish progress */
- job_progress_update(&s->common.job, n);
- if (copy) {
- block_job_ratelimit_processed_bytes(&s->common, n);
+ ret = commit_iteration(s, offset, &n, buf);
+
+ if (ret < 0) {
+ return ret;
}
}
@@ -342,6 +392,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
* this is the responsibility of the interface (i.e. whoever calls
* commit_start()).
*/
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
s->base_overlay = bdrv_find_overlay(top, base);
assert(s->base_overlay);
@@ -374,18 +425,21 @@ void commit_start(const char *job_id, BlockDriverState *bs,
iter_shared_perms, errp);
if (ret < 0) {
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
goto fail;
}
}
if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) {
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
goto fail;
}
s->chain_frozen = true;
ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
if (ret < 0) {
goto fail;
diff --git a/block/copy-before-write.c b/block/copy-before-write.c
index 853e01a..36d5d3e 100644
--- a/block/copy-before-write.c
+++ b/block/copy-before-write.c
@@ -24,9 +24,9 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qjson.h"
+#include "qobject/qjson.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
#include "block/block_int.h"
@@ -66,7 +66,8 @@ typedef struct BDRVCopyBeforeWriteState {
/*
* @frozen_read_reqs: current read requests for fleecing user in bs->file
- * node. These areas must not be rewritten by guest.
+ * node. These areas must not be rewritten by guest. There can be multiple
+ * overlapping read requests.
*/
BlockReqList frozen_read_reqs;
@@ -290,8 +291,8 @@ cbw_co_preadv_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes,
}
static int coroutine_fn GRAPH_RDLOCK
-cbw_co_snapshot_block_status(BlockDriverState *bs,
- bool want_zero, int64_t offset, int64_t bytes,
+cbw_co_snapshot_block_status(BlockDriverState *bs, unsigned int mode,
+ int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map,
BlockDriverState **file)
{
@@ -417,6 +418,7 @@ static BlockdevOptions *cbw_parse_options(QDict *options, Error **errp)
qdict_extract_subqdict(options, NULL, "bitmap");
qdict_del(options, "on-cbw-error");
qdict_del(options, "cbw-timeout");
+ qdict_del(options, "min-cluster-size");
out:
visit_free(v);
@@ -476,8 +478,10 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
bs->file->bs->supported_zero_flags);
s->discard_source = flags & BDRV_O_CBW_DISCARD_SOURCE;
+
s->bcs = block_copy_state_new(bs->file, s->target, bs, bitmap,
- flags & BDRV_O_CBW_DISCARD_SOURCE, errp);
+ flags & BDRV_O_CBW_DISCARD_SOURCE,
+ opts->min_cluster_size, errp);
if (!s->bcs) {
error_prepend(errp, "Cannot create block-copy-state: ");
return -EINVAL;
@@ -545,7 +549,9 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
BlockDriverState *target,
const char *filter_node_name,
bool discard_source,
+ uint64_t min_cluster_size,
BlockCopyState **bcs,
+ OnCbwError on_cbw_error,
Error **errp)
{
BDRVCopyBeforeWriteState *state;
@@ -563,6 +569,15 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
}
qdict_put_str(opts, "file", bdrv_get_node_name(source));
qdict_put_str(opts, "target", bdrv_get_node_name(target));
+ qdict_put_str(opts, "on-cbw-error", OnCbwError_str(on_cbw_error));
+
+ if (min_cluster_size > INT64_MAX) {
+ error_setg(errp, "min-cluster-size too large: %" PRIu64 " > %" PRIi64,
+ min_cluster_size, INT64_MAX);
+ qobject_unref(opts);
+ return NULL;
+ }
+ qdict_put_int(opts, "min-cluster-size", (int64_t)min_cluster_size);
top = bdrv_insert_node(source, opts, flags, errp);
if (!top) {
diff --git a/block/copy-before-write.h b/block/copy-before-write.h
index 01af0cd..eb93364 100644
--- a/block/copy-before-write.h
+++ b/block/copy-before-write.h
@@ -40,7 +40,9 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
BlockDriverState *target,
const char *filter_node_name,
bool discard_source,
+ uint64_t min_cluster_size,
BlockCopyState **bcs,
+ OnCbwError on_cbw_error,
Error **errp);
void bdrv_cbw_drop(BlockDriverState *bs);
diff --git a/block/copy-on-read.c b/block/copy-on-read.c
index c36f253..accf140 100644
--- a/block/copy-on-read.c
+++ b/block/copy-on-read.c
@@ -25,7 +25,7 @@
#include "block/block_int.h"
#include "qemu/module.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "block/copy-on-read.h"
diff --git a/block/coroutines.h b/block/coroutines.h
index f322668..892646b 100644
--- a/block/coroutines.h
+++ b/block/coroutines.h
@@ -28,7 +28,7 @@
#include "block/block_int.h"
/* For blk_bs() in generated block/block-gen.c */
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
/*
* I/O API functions. These functions are thread-safe.
@@ -47,7 +47,7 @@ int coroutine_fn GRAPH_RDLOCK
bdrv_co_common_block_status_above(BlockDriverState *bs,
BlockDriverState *base,
bool include_base,
- bool want_zero,
+ unsigned int mode,
int64_t offset,
int64_t bytes,
int64_t *pnum,
@@ -78,7 +78,7 @@ int co_wrapper_mixed_bdrv_rdlock
bdrv_common_block_status_above(BlockDriverState *bs,
BlockDriverState *base,
bool include_base,
- bool want_zero,
+ unsigned int mode,
int64_t offset,
int64_t bytes,
int64_t *pnum,
diff --git a/block/crypto.c b/block/crypto.c
index 4eed3ff..d4226cc 100644
--- a/block/crypto.c
+++ b/block/crypto.c
@@ -22,7 +22,7 @@
#include "block/block_int.h"
#include "block/qdict.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "crypto/block.h"
#include "qapi/opts-visitor.h"
#include "qapi/qapi-visit-crypto.h"
@@ -682,7 +682,7 @@ err:
static int block_crypto_probe_luks(const uint8_t *buf,
int buf_size,
const char *filename) {
- return block_crypto_probe_generic(Q_CRYPTO_BLOCK_FORMAT_LUKS,
+ return block_crypto_probe_generic(QCRYPTO_BLOCK_FORMAT_LUKS,
buf, buf_size, filename);
}
@@ -691,7 +691,7 @@ static int block_crypto_open_luks(BlockDriverState *bs,
int flags,
Error **errp)
{
- return block_crypto_open_generic(Q_CRYPTO_BLOCK_FORMAT_LUKS,
+ return block_crypto_open_generic(QCRYPTO_BLOCK_FORMAT_LUKS,
&block_crypto_runtime_opts_luks,
bs, options, flags, errp);
}
@@ -724,7 +724,7 @@ block_crypto_co_create_luks(BlockdevCreateOptions *create_options, Error **errp)
}
create_opts = (QCryptoBlockCreateOptions) {
- .format = Q_CRYPTO_BLOCK_FORMAT_LUKS,
+ .format = QCRYPTO_BLOCK_FORMAT_LUKS,
.u.luks = *qapi_BlockdevCreateOptionsLUKS_base(luks_opts),
};
@@ -889,7 +889,7 @@ block_crypto_get_specific_info_luks(BlockDriverState *bs, Error **errp)
if (!info) {
return NULL;
}
- assert(info->format == Q_CRYPTO_BLOCK_FORMAT_LUKS);
+ assert(info->format == QCRYPTO_BLOCK_FORMAT_LUKS);
spec_info = g_new(ImageInfoSpecific, 1);
spec_info->type = IMAGE_INFO_SPECIFIC_KIND_LUKS;
@@ -1002,7 +1002,7 @@ coroutine_fn block_crypto_co_amend_luks(BlockDriverState *bs,
QCryptoBlockAmendOptions amend_opts;
amend_opts = (QCryptoBlockAmendOptions) {
- .format = Q_CRYPTO_BLOCK_FORMAT_LUKS,
+ .format = QCRYPTO_BLOCK_FORMAT_LUKS,
.u.luks = *qapi_BlockdevAmendOptionsLUKS_base(&opts->u.luks),
};
return block_crypto_amend_options_generic_luks(bs, &amend_opts,
diff --git a/block/curl.c b/block/curl.c
index 0fdb6d3..5467678 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -29,8 +29,8 @@
#include "qemu/option.h"
#include "block/block-io.h"
#include "block/block_int.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "crypto/secret.h"
#include <curl/curl.h>
#include "qemu/cutils.h"
diff --git a/block/export/export.c b/block/export/export.c
index 6d51ae8..f3bbf11 100644
--- a/block/export/export.c
+++ b/block/export/export.c
@@ -14,8 +14,8 @@
#include "qemu/osdep.h"
#include "block/block.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/iothread.h"
+#include "system/block-backend.h"
+#include "system/iothread.h"
#include "block/export.h"
#include "block/fuse.h"
#include "block/nbd.h"
@@ -75,6 +75,7 @@ static const BlockExportDriver *blk_exp_find_driver(BlockExportType type)
BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
{
bool fixed_iothread = export->has_fixed_iothread && export->fixed_iothread;
+ bool allow_inactive = export->has_allow_inactive && export->allow_inactive;
const BlockExportDriver *drv;
BlockExport *exp = NULL;
BlockDriverState *bs;
@@ -138,14 +139,25 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
}
}
- /*
- * Block exports are used for non-shared storage migration. Make sure
- * that BDRV_O_INACTIVE is cleared and the image is ready for write
- * access since the export could be available before migration handover.
- * ctx was acquired in the caller.
- */
bdrv_graph_rdlock_main_loop();
- bdrv_activate(bs, NULL);
+ if (allow_inactive) {
+ if (!drv->supports_inactive) {
+ error_setg(errp, "Export type does not support inactive exports");
+ bdrv_graph_rdunlock_main_loop();
+ goto fail;
+ }
+ } else {
+ /*
+ * Block exports are used for non-shared storage migration. Make sure
+ * that BDRV_O_INACTIVE is cleared and the image is ready for write
+ * access since the export could be available before migration handover.
+ */
+ ret = bdrv_activate(bs, errp);
+ if (ret < 0) {
+ bdrv_graph_rdunlock_main_loop();
+ goto fail;
+ }
+ }
bdrv_graph_rdunlock_main_loop();
perm = BLK_PERM_CONSISTENT_READ;
@@ -158,6 +170,9 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
if (!fixed_iothread) {
blk_set_allow_aio_context_change(blk, true);
}
+ if (allow_inactive) {
+ blk_set_force_allow_inactivate(blk);
+ }
ret = blk_insert_bs(blk, bs, errp);
if (ret < 0) {
diff --git a/block/export/fuse.c b/block/export/fuse.c
index 3307b64..465cc98 100644
--- a/block/export/fuse.c
+++ b/block/export/fuse.c
@@ -28,7 +28,7 @@
#include "qapi/error.h"
#include "qapi/qapi-commands-block.h"
#include "qemu/main-loop.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include <fuse.h>
#include <fuse_lowlevel.h>
diff --git a/block/export/vduse-blk.c b/block/export/vduse-blk.c
index 172f73c..bd852e5 100644
--- a/block/export/vduse-blk.c
+++ b/block/export/vduse-blk.c
@@ -273,7 +273,6 @@ static int vduse_blk_exp_create(BlockExport *exp, BlockExportOptions *opts,
uint64_t logical_block_size = VIRTIO_BLK_SECTOR_SIZE;
uint16_t num_queues = VDUSE_DEFAULT_NUM_QUEUE;
uint16_t queue_size = VDUSE_DEFAULT_QUEUE_SIZE;
- Error *local_err = NULL;
struct virtio_blk_config config = { 0 };
uint64_t features;
int i, ret;
@@ -297,10 +296,8 @@ static int vduse_blk_exp_create(BlockExport *exp, BlockExportOptions *opts,
if (vblk_opts->has_logical_block_size) {
logical_block_size = vblk_opts->logical_block_size;
- check_block_size(exp->id, "logical-block-size", logical_block_size,
- &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
+ if (!check_block_size("logical-block-size", logical_block_size,
+ errp)) {
return -EINVAL;
}
}
diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c
index 50c358e..d9d2014 100644
--- a/block/export/vhost-user-blk-server.c
+++ b/block/export/vhost-user-blk-server.c
@@ -319,7 +319,6 @@ static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts,
{
VuBlkExport *vexp = container_of(exp, VuBlkExport, export);
BlockExportOptionsVhostUserBlk *vu_opts = &opts->u.vhost_user_blk;
- Error *local_err = NULL;
uint64_t logical_block_size;
uint16_t num_queues = VHOST_USER_BLK_NUM_QUEUES_DEFAULT;
@@ -330,10 +329,7 @@ static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts,
} else {
logical_block_size = VIRTIO_BLK_SECTOR_SIZE;
}
- check_block_size(exp->id, "logical-block-size", logical_block_size,
- &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
+ if (!check_block_size("logical-block-size", logical_block_size, errp)) {
return -EINVAL;
}
diff --git a/block/export/virtio-blk-handler.h b/block/export/virtio-blk-handler.h
index 150d44c..cca1544 100644
--- a/block/export/virtio-blk-handler.h
+++ b/block/export/virtio-blk-handler.h
@@ -13,7 +13,7 @@
#ifndef VIRTIO_BLK_HANDLER_H
#define VIRTIO_BLK_HANDLER_H
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#define VIRTIO_BLK_SECTOR_BITS 9
#define VIRTIO_BLK_SECTOR_SIZE (1ULL << VIRTIO_BLK_SECTOR_BITS)
diff --git a/block/file-posix.c b/block/file-posix.c
index ff928b5..9b5f08c 100644
--- a/block/file-posix.c
+++ b/block/file-posix.c
@@ -36,11 +36,12 @@
#include "block/thread-pool.h"
#include "qemu/iov.h"
#include "block/raw-aio.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "scsi/pr-manager.h"
#include "scsi/constants.h"
+#include "scsi/utils.h"
#if defined(__APPLE__) && (__MACH__)
#include <sys/ioctl.h>
@@ -72,6 +73,7 @@
#include <linux/blkzoned.h>
#endif
#include <linux/cdrom.h>
+#include <linux/dm-ioctl.h>
#include <linux/fd.h>
#include <linux/fs.h>
#include <linux/hdreg.h>
@@ -110,6 +112,10 @@
#include <sys/diskslice.h>
#endif
+#ifdef EMSCRIPTEN
+#include <sys/ioctl.h>
+#endif
+
/* OS X does not have O_DSYNC */
#ifndef O_DSYNC
#ifdef O_SYNC
@@ -134,6 +140,22 @@
#define RAW_LOCK_PERM_BASE 100
#define RAW_LOCK_SHARED_BASE 200
+/*
+ * Multiple retries are mostly meant for two separate scenarios:
+ *
+ * - DM_MPATH_PROBE_PATHS returns success, but before SG_IO completes, another
+ * path goes down.
+ *
+ * - DM_MPATH_PROBE_PATHS failed all paths in the current path group, so we have
+ * to send another SG_IO to switch to another path group to probe the paths in
+ * it.
+ *
+ * Even if each path is in a separate path group (path_grouping_policy set to
+ * failover), it's rare to have more than eight path groups - and even then
+ * pretty unlikely that only bad path groups would be chosen in eight retries.
+ */
+#define SG_IO_MAX_RETRIES 8
+
typedef struct BDRVRawState {
int fd;
bool use_lock;
@@ -161,6 +183,7 @@ typedef struct BDRVRawState {
bool use_linux_aio:1;
bool has_laio_fdsync:1;
bool use_linux_io_uring:1;
+ bool use_mpath:1;
int page_cache_inconsistent; /* errno from fdatasync failure */
bool has_fallocate;
bool needs_alignment;
@@ -194,6 +217,7 @@ static int fd_open(BlockDriverState *bs)
}
static int64_t raw_getlength(BlockDriverState *bs);
+static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs);
typedef struct RawPosixAIOData {
BlockDriverState *bs;
@@ -780,17 +804,6 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
}
#endif
- if (S_ISBLK(st.st_mode)) {
-#ifdef __linux__
- /* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do
- * not rely on the contents of discarded blocks unless using O_DIRECT.
- * Same for BLKZEROOUT.
- */
- if (!(bs->open_flags & BDRV_O_NOCACHE)) {
- s->has_write_zeroes = false;
- }
-#endif
- }
#ifdef __FreeBSD__
if (S_ISCHR(st.st_mode)) {
/*
@@ -804,6 +817,13 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
#endif
s->needs_alignment = raw_needs_alignment(bs);
+ bs->supported_write_flags = BDRV_REQ_FUA;
+ if (s->use_linux_aio && !laio_has_fua()) {
+ bs->supported_write_flags &= ~BDRV_REQ_FUA;
+ } else if (s->use_linux_io_uring && !luring_has_fua()) {
+ bs->supported_write_flags &= ~BDRV_REQ_FUA;
+ }
+
bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK;
if (S_ISREG(st.st_mode)) {
/* When extending regular files, we get zeros from the OS */
@@ -1268,10 +1288,10 @@ static int get_sysfs_zoned_model(struct stat *st, BlockZoneModel *zoned)
}
#endif /* defined(CONFIG_BLKZONED) */
+#ifdef CONFIG_LINUX
/*
* Get a sysfs attribute value as a long integer.
*/
-#ifdef CONFIG_LINUX
static long get_sysfs_long_val(struct stat *st, const char *attribute)
{
g_autofree char *str = NULL;
@@ -1291,6 +1311,30 @@ static long get_sysfs_long_val(struct stat *st, const char *attribute)
}
return ret;
}
+
+/*
+ * Get a sysfs attribute value as a uint32_t.
+ */
+static int get_sysfs_u32_val(struct stat *st, const char *attribute,
+ uint32_t *u32)
+{
+ g_autofree char *str = NULL;
+ const char *end;
+ unsigned int val;
+ int ret;
+
+ ret = get_sysfs_str_val(st, attribute, &str);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* The file is ended with '\n', pass 'end' to accept that. */
+ ret = qemu_strtoui(str, &end, 10, &val);
+ if (ret == 0 && end && *end == '\0') {
+ *u32 = val;
+ }
+ return ret;
+}
#endif
static int hdev_get_max_segments(int fd, struct stat *st)
@@ -1310,6 +1354,23 @@ static int hdev_get_max_segments(int fd, struct stat *st)
#endif
}
+/*
+ * Fills in *dalign with the discard alignment and returns 0 on success,
+ * -errno otherwise.
+ */
+static int hdev_get_pdiscard_alignment(struct stat *st, uint32_t *dalign)
+{
+#ifdef CONFIG_LINUX
+ /*
+ * Note that Linux "discard_granularity" is QEMU "discard_alignment". Linux
+ * "discard_alignment" is something else.
+ */
+ return get_sysfs_u32_val(st, "discard_granularity", dalign);
+#else
+ return -ENOTSUP;
+#endif
+}
+
#if defined(CONFIG_BLKZONED)
/*
* If the reset_all flag is true, then the wps of zone whose state is
@@ -1398,7 +1459,7 @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
Error **errp)
{
BDRVRawState *s = bs->opaque;
- BlockZoneModel zoned;
+ BlockZoneModel zoned = BLK_Z_NONE;
int ret;
ret = get_sysfs_zoned_model(st, &zoned);
@@ -1519,6 +1580,30 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
}
}
+ if (S_ISBLK(st.st_mode)) {
+ uint32_t dalign = 0;
+ int ret;
+
+ ret = hdev_get_pdiscard_alignment(&st, &dalign);
+ if (ret == 0 && dalign != 0) {
+ uint32_t ralign = bs->bl.request_alignment;
+
+ /* Probably never happens, but handle it just in case */
+ if (dalign < ralign && (ralign % dalign == 0)) {
+ dalign = ralign;
+ }
+
+ /* The block layer requires a multiple of request_alignment */
+ if (dalign % ralign != 0) {
+ error_setg(errp, "Invalid pdiscard_alignment limit %u is not a "
+ "multiple of request_alignment %u", dalign, ralign);
+ return;
+ }
+
+ bs->bl.pdiscard_alignment = dalign;
+ }
+ }
+
raw_refresh_zoned_limits(bs, &st, errp);
}
@@ -2003,8 +2088,11 @@ static int handle_aiocb_write_zeroes_unmap(void *opaque)
}
#ifndef HAVE_COPY_FILE_RANGE
-static off_t copy_file_range(int in_fd, off_t *in_off, int out_fd,
- off_t *out_off, size_t len, unsigned int flags)
+#ifndef EMSCRIPTEN
+static
+#endif
+ssize_t copy_file_range(int in_fd, off_t *in_off, int out_fd,
+ off_t *out_off, size_t len, unsigned int flags)
{
#ifdef __NR_copy_file_range
return syscall(__NR_copy_file_range, in_fd, in_off, out_fd,
@@ -2477,7 +2565,8 @@ static inline bool raw_check_linux_aio(BDRVRawState *s)
#endif
static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
- uint64_t bytes, QEMUIOVector *qiov, int type)
+ uint64_t bytes, QEMUIOVector *qiov, int type,
+ int flags)
{
BDRVRawState *s = bs->opaque;
RawPosixAIOData acb;
@@ -2508,13 +2597,13 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
#ifdef CONFIG_LINUX_IO_URING
} else if (raw_check_linux_io_uring(s)) {
assert(qiov->size == bytes);
- ret = luring_co_submit(bs, s->fd, offset, qiov, type);
+ ret = luring_co_submit(bs, s->fd, offset, qiov, type, flags);
goto out;
#endif
#ifdef CONFIG_LINUX_AIO
} else if (raw_check_linux_aio(s)) {
assert(qiov->size == bytes);
- ret = laio_co_submit(s->fd, offset, qiov, type,
+ ret = laio_co_submit(s->fd, offset, qiov, type, flags,
s->aio_max_batch);
goto out;
#endif
@@ -2534,6 +2623,10 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
assert(qiov->size == bytes);
ret = raw_thread_pool_submit(handle_aiocb_rw, &acb);
+ if (ret == 0 && (flags & BDRV_REQ_FUA)) {
+ /* TODO Use pwritev2() instead if it's available */
+ ret = raw_co_flush_to_disk(bs);
+ }
goto out; /* Avoid the compiler err of unused label */
out:
@@ -2571,14 +2664,14 @@ static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
- return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_READ);
+ return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_READ, flags);
}
static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
- return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_WRITE);
+ return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_WRITE, flags);
}
static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
@@ -2600,12 +2693,12 @@ static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
#ifdef CONFIG_LINUX_IO_URING
if (raw_check_linux_io_uring(s)) {
- return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH);
+ return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH, 0);
}
#endif
#ifdef CONFIG_LINUX_AIO
if (s->has_laio_fdsync && raw_check_linux_aio(s)) {
- return laio_co_submit(s->fd, 0, NULL, QEMU_AIO_FLUSH, 0);
+ return laio_co_submit(s->fd, 0, NULL, QEMU_AIO_FLUSH, 0, 0);
}
#endif
return raw_thread_pool_submit(handle_aiocb_flush, &acb);
@@ -3188,7 +3281,7 @@ static int find_allocation(BlockDriverState *bs, off_t start,
* well exceed it.
*/
static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
- bool want_zero,
+ unsigned int mode,
int64_t offset,
int64_t bytes, int64_t *pnum,
int64_t *map,
@@ -3204,7 +3297,8 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
return ret;
}
- if (!want_zero) {
+ if (!(mode & BDRV_WANT_ZERO)) {
+ /* There is no backing file - all bytes are allocated in this file. */
*pnum = bytes;
*map = offset;
*file = bs;
@@ -3540,7 +3634,7 @@ static int coroutine_fn raw_co_zone_append(BlockDriverState *bs,
}
trace_zbd_zone_append(bs, *offset >> BDRV_SECTOR_BITS);
- return raw_co_prw(bs, offset, len, qiov, QEMU_AIO_ZONE_APPEND);
+ return raw_co_prw(bs, offset, len, qiov, QEMU_AIO_ZONE_APPEND, 0);
}
#endif
@@ -4178,15 +4272,105 @@ hdev_open_Mac_error:
/* Since this does ioctl the device must be already opened */
bs->sg = hdev_is_sg(bs);
+ /* sg devices aren't even block devices and can't use dm-mpath */
+ s->use_mpath = !bs->sg;
+
return ret;
}
#if defined(__linux__)
+#if defined(DM_MPATH_PROBE_PATHS)
+static bool coroutine_fn sgio_path_error(int ret, sg_io_hdr_t *io_hdr)
+{
+ if (ret < 0) {
+ switch (ret) {
+ case -ENODEV:
+ return true;
+ case -EAGAIN:
+ /*
+ * The device is probably suspended. This happens while the dm table
+ * is reloaded, e.g. because a path is added or removed. This is an
+ * operation that should complete within 1ms, so just wait a bit and
+ * retry.
+ *
+ * If the device was suspended for another reason, we'll wait and
+ * retry SG_IO_MAX_RETRIES times. This is a tolerable delay before
+ * we return an error and potentially stop the VM.
+ */
+ qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ if (io_hdr->host_status != SCSI_HOST_OK) {
+ return true;
+ }
+
+ switch (io_hdr->status) {
+ case GOOD:
+ case CONDITION_GOOD:
+ case INTERMEDIATE_GOOD:
+ case INTERMEDIATE_C_GOOD:
+ case RESERVATION_CONFLICT:
+ case COMMAND_TERMINATED:
+ return false;
+ case CHECK_CONDITION:
+ return !scsi_sense_buf_is_guest_recoverable(io_hdr->sbp,
+ io_hdr->mx_sb_len);
+ default:
+ return true;
+ }
+}
+
+static bool coroutine_fn hdev_co_ioctl_sgio_retry(RawPosixAIOData *acb, int ret)
+{
+ BDRVRawState *s = acb->bs->opaque;
+ RawPosixAIOData probe_acb;
+
+ if (!s->use_mpath) {
+ return false;
+ }
+
+ if (!sgio_path_error(ret, acb->ioctl.buf)) {
+ return false;
+ }
+
+ probe_acb = (RawPosixAIOData) {
+ .bs = acb->bs,
+ .aio_type = QEMU_AIO_IOCTL,
+ .aio_fildes = s->fd,
+ .aio_offset = 0,
+ .ioctl = {
+ .buf = NULL,
+ .cmd = DM_MPATH_PROBE_PATHS,
+ },
+ };
+
+ ret = raw_thread_pool_submit(handle_aiocb_ioctl, &probe_acb);
+ if (ret == -ENOTTY) {
+ s->use_mpath = false;
+ } else if (ret == -EAGAIN) {
+ /* The device might be suspended for a table reload, worth retrying */
+ return true;
+ }
+
+ return ret == 0;
+}
+#else
+static bool coroutine_fn hdev_co_ioctl_sgio_retry(RawPosixAIOData *acb, int ret)
+{
+ return false;
+}
+#endif /* DM_MPATH_PROBE_PATHS */
+
static int coroutine_fn
hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
{
BDRVRawState *s = bs->opaque;
RawPosixAIOData acb;
+ int retries = SG_IO_MAX_RETRIES;
int ret;
ret = fd_open(bs);
@@ -4214,7 +4398,11 @@ hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
},
};
- return raw_thread_pool_submit(handle_aiocb_ioctl, &acb);
+ do {
+ ret = raw_thread_pool_submit(handle_aiocb_ioctl, &acb);
+ } while (req == SG_IO && retries-- && hdev_co_ioctl_sgio_retry(&acb, ret));
+
+ return ret;
}
#endif /* linux */
diff --git a/block/file-win32.c b/block/file-win32.c
index 7e1baa1..af9aea6 100644
--- a/block/file-win32.c
+++ b/block/file-win32.c
@@ -33,8 +33,8 @@
#include "trace.h"
#include "block/thread-pool.h"
#include "qemu/iov.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include <windows.h>
#include <winioctl.h>
diff --git a/block/gluster.c b/block/gluster.c
index f8b415f..89abd40 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -15,7 +15,7 @@
#include "block/block_int.h"
#include "block/qdict.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/qmp/qerror.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
@@ -514,7 +514,6 @@ static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf,
SocketAddressList **tail;
QDict *backing_options = NULL;
Error *local_err = NULL;
- char *str = NULL;
const char *ptr;
int i, type, num_servers;
@@ -547,7 +546,8 @@ static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf,
tail = &gconf->server;
for (i = 0; i < num_servers; i++) {
- str = g_strdup_printf(GLUSTER_OPT_SERVER_PATTERN"%d.", i);
+ g_autofree char *str = g_strdup_printf(GLUSTER_OPT_SERVER_PATTERN"%d.",
+ i);
qdict_extract_subqdict(options, &backing_options, str);
/* create opts info from runtime_type_opts list */
@@ -658,8 +658,6 @@ static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf,
qobject_unref(backing_options);
backing_options = NULL;
- g_free(str);
- str = NULL;
}
return 0;
@@ -668,7 +666,6 @@ out:
error_propagate(errp, local_err);
qapi_free_SocketAddress(gsconf);
qemu_opts_del(opts);
- g_free(str);
qobject_unref(backing_options);
errno = EINVAL;
return -errno;
@@ -809,6 +806,8 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
goto out;
}
+ warn_report_once("'gluster' is deprecated");
+
filename = qemu_opt_get(opts, GLUSTER_OPT_FILENAME);
s->debug = qemu_opt_get_number(opts, GLUSTER_OPT_DEBUG,
@@ -973,8 +972,6 @@ static void qemu_gluster_reopen_commit(BDRVReopenState *state)
g_free(state->opaque);
state->opaque = NULL;
-
- return;
}
@@ -994,8 +991,6 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state)
g_free(state->opaque);
state->opaque = NULL;
-
- return;
}
#ifdef CONFIG_GLUSTERFS_ZEROFILL
@@ -1466,7 +1461,7 @@ exit:
* (Based on raw_co_block_status() from file-posix.c.)
*/
static int coroutine_fn qemu_gluster_co_block_status(BlockDriverState *bs,
- bool want_zero,
+ unsigned int mode,
int64_t offset,
int64_t bytes,
int64_t *pnum,
@@ -1483,7 +1478,7 @@ static int coroutine_fn qemu_gluster_co_block_status(BlockDriverState *bs,
return ret;
}
- if (!want_zero) {
+ if (!(mode & BDRV_WANT_ZERO)) {
*pnum = bytes;
*map = offset;
*file = bs;
diff --git a/block/io.c b/block/io.c
index 301514c..ac5c717 100644
--- a/block/io.c
+++ b/block/io.c
@@ -24,7 +24,7 @@
#include "qemu/osdep.h"
#include "trace.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "block/aio-wait.h"
#include "block/blockjob.h"
#include "block/blockjob_int.h"
@@ -37,11 +37,15 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
+#include "qemu/units.h"
/* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
#define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
+/* Maximum read size for checking if data reads as zero, in bytes */
+#define MAX_ZERO_CHECK_BUFFER (128 * KiB)
+
static void coroutine_fn GRAPH_RDLOCK
bdrv_parent_cb_resize(BlockDriverState *bs);
@@ -409,7 +413,6 @@ static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
/* At this point, we should be always running in the main loop. */
GLOBAL_STATE_CODE();
assert(bs->quiesce_counter > 0);
- GLOBAL_STATE_CODE();
/* Re-enable things in child-to-parent order */
old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
@@ -1058,6 +1061,10 @@ bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
return -ENOMEDIUM;
}
+ if (bs->open_flags & BDRV_O_NO_FLUSH) {
+ flags &= ~BDRV_REQ_FUA;
+ }
+
if ((flags & BDRV_REQ_FUA) &&
(~bs->supported_write_flags & BDRV_REQ_FUA)) {
flags &= ~BDRV_REQ_FUA;
@@ -2360,10 +2367,8 @@ int bdrv_flush_all(void)
* Drivers not implementing the functionality are assumed to not support
* backing files, hence all their sectors are reported as allocated.
*
- * If 'want_zero' is true, the caller is querying for mapping
- * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
- * _ZERO where possible; otherwise, the result favors larger 'pnum',
- * with a focus on accurate BDRV_BLOCK_ALLOCATED.
+ * 'mode' serves as a hint as to which results are favored; see the
+ * BDRV_WANT_* macros for details.
*
* If 'offset' is beyond the end of the disk image the return value is
* BDRV_BLOCK_EOF and 'pnum' is set to 0.
@@ -2383,7 +2388,7 @@ int bdrv_flush_all(void)
* set to the host mapping and BDS corresponding to the guest offset.
*/
static int coroutine_fn GRAPH_RDLOCK
-bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
+bdrv_co_do_block_status(BlockDriverState *bs, unsigned int mode,
int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map, BlockDriverState **file)
{
@@ -2472,7 +2477,7 @@ bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
local_file = bs;
local_map = aligned_offset;
} else {
- ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
+ ret = bs->drv->bdrv_co_block_status(bs, mode, aligned_offset,
aligned_bytes, pnum, &local_map,
&local_file);
@@ -2484,10 +2489,10 @@ bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
* the cache requires an RCU update, so double check here to avoid
* such an update if possible.
*
- * Check want_zero, because we only want to update the cache when we
+ * Check mode, because we only want to update the cache when we
* have accurate information about what is zero and what is data.
*/
- if (want_zero &&
+ if (mode == BDRV_WANT_PRECISE &&
ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
QLIST_EMPTY(&bs->children))
{
@@ -2544,7 +2549,7 @@ bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
if (ret & BDRV_BLOCK_RAW) {
assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
- ret = bdrv_co_do_block_status(local_file, want_zero, local_map,
+ ret = bdrv_co_do_block_status(local_file, mode, local_map,
*pnum, pnum, &local_map, &local_file);
goto out;
}
@@ -2556,7 +2561,7 @@ bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
if (!cow_bs) {
ret |= BDRV_BLOCK_ZERO;
- } else if (want_zero) {
+ } else if (mode == BDRV_WANT_PRECISE) {
int64_t size2 = bdrv_co_getlength(cow_bs);
if (size2 >= 0 && offset >= size2) {
@@ -2565,14 +2570,14 @@ bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
}
}
- if (want_zero && ret & BDRV_BLOCK_RECURSE &&
+ if (mode == BDRV_WANT_PRECISE && ret & BDRV_BLOCK_RECURSE &&
local_file && local_file != bs &&
(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
(ret & BDRV_BLOCK_OFFSET_VALID)) {
int64_t file_pnum;
int ret2;
- ret2 = bdrv_co_do_block_status(local_file, want_zero, local_map,
+ ret2 = bdrv_co_do_block_status(local_file, mode, local_map,
*pnum, &file_pnum, NULL, NULL);
if (ret2 >= 0) {
/* Ignore errors. This is just providing extra information, it
@@ -2623,7 +2628,7 @@ int coroutine_fn
bdrv_co_common_block_status_above(BlockDriverState *bs,
BlockDriverState *base,
bool include_base,
- bool want_zero,
+ unsigned int mode,
int64_t offset,
int64_t bytes,
int64_t *pnum,
@@ -2650,7 +2655,7 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
return 0;
}
- ret = bdrv_co_do_block_status(bs, want_zero, offset, bytes, pnum,
+ ret = bdrv_co_do_block_status(bs, mode, offset, bytes, pnum,
map, file);
++*depth;
if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
@@ -2667,7 +2672,7 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
p = bdrv_filter_or_cow_bs(p))
{
- ret = bdrv_co_do_block_status(p, want_zero, offset, bytes, pnum,
+ ret = bdrv_co_do_block_status(p, mode, offset, bytes, pnum,
map, file);
++*depth;
if (ret < 0) {
@@ -2730,7 +2735,8 @@ int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
BlockDriverState **file)
{
IO_CODE();
- return bdrv_co_common_block_status_above(bs, base, false, true, offset,
+ return bdrv_co_common_block_status_above(bs, base, false,
+ BDRV_WANT_PRECISE, offset,
bytes, pnum, map, file, NULL);
}
@@ -2748,27 +2754,89 @@ int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, int64_t offset,
* by @offset and @bytes is known to read as zeroes.
* Return 1 if that is the case, 0 otherwise and -errno on error.
* This test is meant to be fast rather than accurate so returning 0
- * does not guarantee non-zero data.
+ * does not guarantee non-zero data; but a return of 1 is reliable.
*/
int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
int64_t bytes)
{
int ret;
- int64_t pnum = bytes;
+ int64_t pnum;
IO_CODE();
- if (!bytes) {
- return 1;
+ while (bytes) {
+ ret = bdrv_co_common_block_status_above(bs, NULL, false,
+ BDRV_WANT_ZERO, offset, bytes,
+ &pnum, NULL, NULL, NULL);
+
+ if (ret < 0) {
+ return ret;
+ }
+ if (!(ret & BDRV_BLOCK_ZERO)) {
+ return 0;
+ }
+ offset += pnum;
+ bytes -= pnum;
}
- ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset,
- bytes, &pnum, NULL, NULL, NULL);
+ return 1;
+}
+
+/*
+ * Check @bs (and its backing chain) to see if the entire image is known
+ * to read as zeroes.
+ * Return 1 if that is the case, 0 otherwise and -errno on error.
+ * This test is meant to be fast rather than accurate so returning 0
+ * does not guarantee non-zero data; however, a return of 1 is reliable,
+ * and this function can report 1 in more cases than bdrv_co_is_zero_fast.
+ */
+int coroutine_fn bdrv_co_is_all_zeroes(BlockDriverState *bs)
+{
+ int ret;
+ int64_t pnum, bytes;
+ char *buf;
+ QEMUIOVector local_qiov;
+ IO_CODE();
+ bytes = bdrv_co_getlength(bs);
+ if (bytes < 0) {
+ return bytes;
+ }
+
+ /* First probe - see if the entire image reads as zero */
+ ret = bdrv_co_common_block_status_above(bs, NULL, false, BDRV_WANT_ZERO,
+ 0, bytes, &pnum, NULL, NULL,
+ NULL);
if (ret < 0) {
return ret;
}
+ if (ret & BDRV_BLOCK_ZERO) {
+ return bdrv_co_is_zero_fast(bs, pnum, bytes - pnum);
+ }
- return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
+ /*
+ * Because of the way 'blockdev-create' works, raw files tend to
+ * be created with a non-sparse region at the front to make
+ * alignment probing easier. If the block starts with only a
+ * small allocated region, it is still worth the effort to see if
+ * the rest of the image is still sparse, coupled with manually
+ * reading the first region to see if it reads zero after all.
+ */
+ if (pnum > MAX_ZERO_CHECK_BUFFER) {
+ return 0;
+ }
+ ret = bdrv_co_is_zero_fast(bs, pnum, bytes - pnum);
+ if (ret <= 0) {
+ return ret;
+ }
+ /* Only the head of the image is unknown, and it's small. Read it. */
+ buf = qemu_blockalign(bs, pnum);
+ qemu_iovec_init_buf(&local_qiov, buf, pnum);
+ ret = bdrv_driver_preadv(bs, 0, pnum, &local_qiov, 0, 0);
+ if (ret >= 0) {
+ ret = buffer_is_zero(buf, pnum);
+ }
+ qemu_vfree(buf);
+ return ret;
}
int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
@@ -2778,9 +2846,9 @@ int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
int64_t dummy;
IO_CODE();
- ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset,
- bytes, pnum ? pnum : &dummy, NULL,
- NULL, NULL);
+ ret = bdrv_co_common_block_status_above(bs, bs, true, BDRV_WANT_ALLOCATED,
+ offset, bytes, pnum ? pnum : &dummy,
+ NULL, NULL, NULL);
if (ret < 0) {
return ret;
}
@@ -2813,7 +2881,8 @@ int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *bs,
int ret;
IO_CODE();
- ret = bdrv_co_common_block_status_above(bs, base, include_base, false,
+ ret = bdrv_co_common_block_status_above(bs, base, include_base,
+ BDRV_WANT_ALLOCATED,
offset, bytes, pnum, NULL, NULL,
&depth);
if (ret < 0) {
@@ -3098,18 +3167,19 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
return 0;
}
- if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
+ if (!bs->drv->bdrv_co_pdiscard) {
return 0;
}
/* Invalidate the cached block-status data range if this discard overlaps */
bdrv_bsc_invalidate_range(bs, offset, bytes);
- /* Discard is advisory, but some devices track and coalesce
+ /*
+ * Discard is advisory, but some devices track and coalesce
* unaligned requests, so we must pass everything down rather than
- * round here. Still, most devices will just silently ignore
- * unaligned requests (by returning -ENOTSUP), so we must fragment
- * the request accordingly. */
+ * round here. Still, most devices reject unaligned requests with
+ * -EINVAL or -ENOTSUP, so we must fragment the request accordingly.
+ */
align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
assert(align % bs->bl.request_alignment == 0);
head = offset % align;
@@ -3157,27 +3227,15 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
ret = -ENOMEDIUM;
goto out;
}
- if (bs->drv->bdrv_co_pdiscard) {
- ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
- } else {
- BlockAIOCB *acb;
- CoroutineIOCompletion co = {
- .coroutine = qemu_coroutine_self(),
- };
-
- acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
- bdrv_co_io_em_complete, &co);
- if (acb == NULL) {
- ret = -EIO;
- goto out;
+
+ ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
+ if (ret && ret != -ENOTSUP) {
+ if (ret == -EINVAL && (offset % align != 0 || num % align != 0)) {
+ /* Silently skip rejected unaligned head/tail requests */
} else {
- qemu_coroutine_yield();
- ret = co.ret;
+ goto out; /* bail out */
}
}
- if (ret && ret != -ENOTSUP) {
- goto out;
- }
offset += num;
bytes -= num;
@@ -3705,8 +3763,8 @@ bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes,
}
int coroutine_fn
-bdrv_co_snapshot_block_status(BlockDriverState *bs,
- bool want_zero, int64_t offset, int64_t bytes,
+bdrv_co_snapshot_block_status(BlockDriverState *bs, unsigned int mode,
+ int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map,
BlockDriverState **file)
{
@@ -3724,7 +3782,7 @@ bdrv_co_snapshot_block_status(BlockDriverState *bs,
}
bdrv_inc_in_flight(bs);
- ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes,
+ ret = drv->bdrv_co_snapshot_block_status(bs, mode, offset, bytes,
pnum, map, file);
bdrv_dec_in_flight(bs);
diff --git a/block/io_uring.c b/block/io_uring.c
index d11b205..dd4f304 100644
--- a/block/io_uring.c
+++ b/block/io_uring.c
@@ -17,7 +17,7 @@
#include "qemu/coroutine.h"
#include "qemu/defer-call.h"
#include "qapi/error.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "trace.h"
/* Only used for assertions. */
@@ -335,15 +335,24 @@ static void luring_deferred_fn(void *opaque)
*
*/
static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s,
- uint64_t offset, int type)
+ uint64_t offset, int type, BdrvRequestFlags flags)
{
int ret;
struct io_uring_sqe *sqes = &luringcb->sqeq;
switch (type) {
case QEMU_AIO_WRITE:
+#ifdef HAVE_IO_URING_PREP_WRITEV2
+ {
+ int luring_flags = (flags & BDRV_REQ_FUA) ? RWF_DSYNC : 0;
+ io_uring_prep_writev2(sqes, fd, luringcb->qiov->iov,
+ luringcb->qiov->niov, offset, luring_flags);
+ }
+#else
+ assert(flags == 0);
io_uring_prep_writev(sqes, fd, luringcb->qiov->iov,
luringcb->qiov->niov, offset);
+#endif
break;
case QEMU_AIO_ZONE_APPEND:
io_uring_prep_writev(sqes, fd, luringcb->qiov->iov,
@@ -380,7 +389,8 @@ static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s,
}
int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset,
- QEMUIOVector *qiov, int type)
+ QEMUIOVector *qiov, int type,
+ BdrvRequestFlags flags)
{
int ret;
AioContext *ctx = qemu_get_current_aio_context();
@@ -393,7 +403,7 @@ int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset,
};
trace_luring_co_submit(bs, s, &luringcb, fd, offset, qiov ? qiov->size : 0,
type);
- ret = luring_do_submit(fd, &luringcb, s, offset, type);
+ ret = luring_do_submit(fd, &luringcb, s, offset, type, flags);
if (ret < 0) {
return ret;
@@ -448,3 +458,12 @@ void luring_cleanup(LuringState *s)
trace_luring_cleanup_state(s);
g_free(s);
}
+
+bool luring_has_fua(void)
+{
+#ifdef HAVE_IO_URING_PREP_WRITEV2
+ return true;
+#else
+ return false;
+#endif
+}
diff --git a/block/iscsi.c b/block/iscsi.c
index 979bf90..15b96ee 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -28,7 +28,7 @@
#include <poll.h>
#include <math.h>
#include <arpa/inet.h>
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "qemu/bitops.h"
@@ -41,11 +41,11 @@
#include "qemu/module.h"
#include "qemu/option.h"
#include "qemu/uuid.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-machine.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "crypto/secret.h"
#include "scsi/utils.h"
#include "trace.h"
@@ -694,9 +694,9 @@ out_unlock:
static int coroutine_fn iscsi_co_block_status(BlockDriverState *bs,
- bool want_zero, int64_t offset,
- int64_t bytes, int64_t *pnum,
- int64_t *map,
+ unsigned int mode,
+ int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map,
BlockDriverState **file)
{
IscsiLun *iscsilun = bs->opaque;
diff --git a/block/linux-aio.c b/block/linux-aio.c
index e3b5ec9..c200e7a 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -16,7 +16,7 @@
#include "qemu/coroutine.h"
#include "qemu/defer-call.h"
#include "qapi/error.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
/* Only used for assertions. */
#include "qemu/coroutine_int.h"
@@ -291,7 +291,7 @@ static void ioq_submit(LinuxAioState *s)
{
int ret, len;
struct qemu_laiocb *aiocb;
- struct iocb *iocbs[MAX_EVENTS];
+ QEMU_UNINITIALIZED struct iocb *iocbs[MAX_EVENTS];
QSIMPLEQ_HEAD(, qemu_laiocb) completed;
do {
@@ -368,7 +368,8 @@ static void laio_deferred_fn(void *opaque)
}
static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
- int type, uint64_t dev_max_batch)
+ int type, BdrvRequestFlags flags,
+ uint64_t dev_max_batch)
{
LinuxAioState *s = laiocb->ctx;
struct iocb *iocbs = &laiocb->iocb;
@@ -376,7 +377,15 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
switch (type) {
case QEMU_AIO_WRITE:
+#ifdef HAVE_IO_PREP_PWRITEV2
+ {
+ int laio_flags = (flags & BDRV_REQ_FUA) ? RWF_DSYNC : 0;
+ io_prep_pwritev2(iocbs, fd, qiov->iov, qiov->niov, offset, laio_flags);
+ }
+#else
+ assert(flags == 0);
io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
+#endif
break;
case QEMU_AIO_ZONE_APPEND:
io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
@@ -409,7 +418,8 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
}
int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov,
- int type, uint64_t dev_max_batch)
+ int type, BdrvRequestFlags flags,
+ uint64_t dev_max_batch)
{
int ret;
AioContext *ctx = qemu_get_current_aio_context();
@@ -422,7 +432,7 @@ int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov,
.qiov = qiov,
};
- ret = laio_do_submit(fd, &laiocb, offset, type, dev_max_batch);
+ ret = laio_do_submit(fd, &laiocb, offset, type, flags, dev_max_batch);
if (ret < 0) {
return ret;
}
@@ -505,3 +515,12 @@ bool laio_has_fdsync(int fd)
io_destroy(ctx);
return (ret == -EINVAL) ? false : true;
}
+
+bool laio_has_fua(void)
+{
+#ifdef HAVE_IO_PREP_PWRITEV2
+ return true;
+#else
+ return false;
+#endif
+}
diff --git a/block/meson.build b/block/meson.build
index f1262ec..34b1b2a 100644
--- a/block/meson.build
+++ b/block/meson.build
@@ -154,8 +154,8 @@ block_gen_c = custom_target('block-gen.c',
'../include/block/dirty-bitmap.h',
'../include/block/block_int-io.h',
'../include/block/block-global-state.h',
- '../include/sysemu/block-backend-global-state.h',
- '../include/sysemu/block-backend-io.h',
+ '../include/system/block-backend-global-state.h',
+ '../include/system/block-backend-io.h',
'coroutines.h'
),
command: [wrapper_py, '@OUTPUT@', '@INPUT@'])
@@ -163,7 +163,7 @@ block_ss.add(block_gen_c)
block_ss.add(files('stream.c'))
-system_ss.add(files('qapi-sysemu.c'))
+system_ss.add(files('qapi-system.c'))
subdir('export')
subdir('monitor')
diff --git a/block/mirror.c b/block/mirror.c
index 61f0a71..6e8caf4 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -19,7 +19,7 @@
#include "block/blockjob_int.h"
#include "block/block_int.h"
#include "block/dirty-bitmap.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qapi/error.h"
#include "qemu/ratelimit.h"
#include "qemu/bitmap.h"
@@ -51,10 +51,10 @@ typedef struct MirrorBlockJob {
BlockDriverState *to_replace;
/* Used to block operations on the drive-mirror-replace target */
Error *replace_blocker;
- bool is_none_mode;
+ MirrorSyncMode sync_mode;
BlockMirrorBackingMode backing_mode;
- /* Whether the target image requires explicit zero-initialization */
- bool zero_target;
+ /* Whether the target should be assumed to be already zero initialized */
+ bool target_is_zero;
/*
* To be accesssed with atomics. Written only under the BQL (required by the
* current implementation of mirror_change()).
@@ -73,6 +73,7 @@ typedef struct MirrorBlockJob {
size_t buf_size;
int64_t bdev_length;
unsigned long *cow_bitmap;
+ unsigned long *zero_bitmap;
BdrvDirtyBitmap *dirty_bitmap;
BdrvDirtyBitmapIter *dbi;
uint8_t *buf;
@@ -108,9 +109,12 @@ struct MirrorOp {
int64_t offset;
uint64_t bytes;
- /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
- * mirror_co_discard() before yielding for the first time */
+ /*
+ * These pointers are set by mirror_co_read(), mirror_co_zero(), and
+ * mirror_co_discard() before yielding for the first time
+ */
int64_t *bytes_handled;
+ bool *io_skipped;
bool is_pseudo_op;
bool is_active_write;
@@ -349,7 +353,7 @@ static void coroutine_fn mirror_co_read(void *opaque)
MirrorOp *op = opaque;
MirrorBlockJob *s = op->s;
int nb_chunks;
- uint64_t ret;
+ int ret = -1;
uint64_t max_bytes;
max_bytes = s->granularity * s->max_iov;
@@ -408,15 +412,34 @@ static void coroutine_fn mirror_co_read(void *opaque)
static void coroutine_fn mirror_co_zero(void *opaque)
{
MirrorOp *op = opaque;
- int ret;
+ bool write_needed = true;
+ int ret = 0;
op->s->in_flight++;
op->s->bytes_in_flight += op->bytes;
*op->bytes_handled = op->bytes;
op->is_in_flight = true;
- ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
- op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
+ if (op->s->zero_bitmap) {
+ unsigned long end = DIV_ROUND_UP(op->offset + op->bytes,
+ op->s->granularity);
+ assert(QEMU_IS_ALIGNED(op->offset, op->s->granularity));
+ assert(QEMU_IS_ALIGNED(op->bytes, op->s->granularity) ||
+ op->offset + op->bytes == op->s->bdev_length);
+ if (find_next_zero_bit(op->s->zero_bitmap, end,
+ op->offset / op->s->granularity) == end) {
+ write_needed = false;
+ *op->io_skipped = true;
+ }
+ }
+ if (write_needed) {
+ ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
+ op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
+ }
+ if (ret >= 0 && op->s->zero_bitmap) {
+ bitmap_set(op->s->zero_bitmap, op->offset / op->s->granularity,
+ DIV_ROUND_UP(op->bytes, op->s->granularity));
+ }
mirror_write_complete(op, ret);
}
@@ -435,29 +458,43 @@ static void coroutine_fn mirror_co_discard(void *opaque)
}
static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
- unsigned bytes, MirrorMethod mirror_method)
+ unsigned bytes, MirrorMethod mirror_method,
+ bool *io_skipped)
{
MirrorOp *op;
Coroutine *co;
int64_t bytes_handled = -1;
+ assert(QEMU_IS_ALIGNED(offset, s->granularity));
+ assert(QEMU_IS_ALIGNED(bytes, s->granularity) ||
+ offset + bytes == s->bdev_length);
op = g_new(MirrorOp, 1);
*op = (MirrorOp){
.s = s,
.offset = offset,
.bytes = bytes,
.bytes_handled = &bytes_handled,
+ .io_skipped = io_skipped,
};
qemu_co_queue_init(&op->waiting_requests);
switch (mirror_method) {
case MIRROR_METHOD_COPY:
+ if (s->zero_bitmap) {
+ bitmap_clear(s->zero_bitmap, offset / s->granularity,
+ DIV_ROUND_UP(bytes, s->granularity));
+ }
co = qemu_coroutine_create(mirror_co_read, op);
break;
case MIRROR_METHOD_ZERO:
+ /* s->zero_bitmap handled in mirror_co_zero */
co = qemu_coroutine_create(mirror_co_zero, op);
break;
case MIRROR_METHOD_DISCARD:
+ if (s->zero_bitmap) {
+ bitmap_clear(s->zero_bitmap, offset / s->granularity,
+ DIV_ROUND_UP(bytes, s->granularity));
+ }
co = qemu_coroutine_create(mirror_co_discard, op);
break;
default:
@@ -565,9 +602,10 @@ static void coroutine_fn GRAPH_UNLOCKED mirror_iteration(MirrorBlockJob *s)
bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
while (nb_chunks > 0 && offset < s->bdev_length) {
- int ret;
+ int ret = -1;
int64_t io_bytes;
int64_t io_bytes_acct;
+ bool io_skipped = false;
MirrorMethod mirror_method = MIRROR_METHOD_COPY;
assert(!(offset % s->granularity));
@@ -611,8 +649,10 @@ static void coroutine_fn GRAPH_UNLOCKED mirror_iteration(MirrorBlockJob *s)
}
io_bytes = mirror_clip_bytes(s, offset, io_bytes);
- io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
- if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
+ io_bytes = mirror_perform(s, offset, io_bytes, mirror_method,
+ &io_skipped);
+ if (io_skipped ||
+ (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok)) {
io_bytes_acct = 0;
} else {
io_bytes_acct = io_bytes;
@@ -723,9 +763,10 @@ static int mirror_exit_common(Job *job)
&error_abort);
if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
- BlockDriverState *backing = s->is_none_mode ? src : s->base;
+ BlockDriverState *backing;
BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs);
+ backing = s->sync_mode == MIRROR_SYNC_MODE_NONE ? src : s->base;
if (bdrv_cow_bs(unfiltered_target) != backing) {
bdrv_set_backing_hd(unfiltered_target, backing, &local_err);
if (local_err) {
@@ -841,15 +882,54 @@ static int coroutine_fn GRAPH_UNLOCKED mirror_dirty_init(MirrorBlockJob *s)
int64_t offset;
BlockDriverState *bs;
BlockDriverState *target_bs = blk_bs(s->target);
- int ret;
+ int ret = -EIO;
int64_t count;
+ bool punch_holes =
+ target_bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP &&
+ bdrv_can_write_zeroes_with_unmap(target_bs);
+ int64_t bitmap_length = DIV_ROUND_UP(s->bdev_length, s->granularity);
+ /* Determine if the image is already zero, regardless of sync mode. */
+ s->zero_bitmap = bitmap_new(bitmap_length);
bdrv_graph_co_rdlock();
bs = s->mirror_top_bs->backing->bs;
+ if (s->target_is_zero) {
+ ret = 1;
+ } else {
+ ret = bdrv_co_is_all_zeroes(target_bs);
+ }
bdrv_graph_co_rdunlock();
- if (s->zero_target) {
- if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
+ /* Determine if a pre-zeroing pass is necessary. */
+ if (ret < 0) {
+ return ret;
+ } else if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
+ /*
+ * In TOP mode, there is no benefit to a pre-zeroing pass, but
+ * the zero bitmap can be set if the destination already reads
+ * as zero and we are not punching holes.
+ */
+ if (ret > 0 && !punch_holes) {
+ bitmap_set(s->zero_bitmap, 0, bitmap_length);
+ }
+ } else if (ret == 0 || punch_holes) {
+ /*
+ * Here, we are in FULL mode; our goal is to avoid writing
+ * zeroes if the destination already reads as zero, except
+ * when we are trying to punch holes. This is possible if
+ * zeroing happened externally (ret > 0) or if we have a fast
+ * way to pre-zero the image (the dirty bitmap will be
+ * populated later by the non-zero portions, the same as for
+ * TOP mode). If pre-zeroing is not fast, or we need to visit
+ * the entire image in order to punch holes even in the
+ * non-allocated regions of the source, then just mark the
+ * entire image dirty and leave the zero bitmap clear at this
+ * point in time. Otherwise, it can be faster to pre-zero the
+ * image now, even if we re-write the allocated portions of
+ * the disk later, and the pre-zero pass will populate the
+ * zero bitmap.
+ */
+ if (!bdrv_can_write_zeroes_with_unmap(target_bs) || punch_holes) {
bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
return 0;
}
@@ -858,6 +938,7 @@ static int coroutine_fn GRAPH_UNLOCKED mirror_dirty_init(MirrorBlockJob *s)
for (offset = 0; offset < s->bdev_length; ) {
int bytes = MIN(s->bdev_length - offset,
QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
+ bool ignored;
mirror_throttle(s);
@@ -873,12 +954,15 @@ static int coroutine_fn GRAPH_UNLOCKED mirror_dirty_init(MirrorBlockJob *s)
continue;
}
- mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
+ mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO, &ignored);
offset += bytes;
}
mirror_wait_for_all_io(s);
s->initial_zeroing_ongoing = false;
+ } else {
+ /* In FULL mode, and image already reads as zero. */
+ bitmap_set(s->zero_bitmap, 0, bitmap_length);
}
/* First part, loop on the sectors and initialize the dirty bitmap. */
@@ -931,7 +1015,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque;
BlockDriverState *target_bs = blk_bs(s->target);
bool need_drain = true;
- BlockDeviceIoStatus iostatus;
+ BlockDeviceIoStatus iostatus = BLOCK_DEVICE_IO_STATUS__MAX;
int64_t length;
int64_t target_length;
BlockDriverInfo bdi;
@@ -1020,7 +1104,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
mirror_free_init(s);
s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
- if (!s->is_none_mode) {
+ if (s->sync_mode != MIRROR_SYNC_MODE_NONE) {
ret = mirror_dirty_init(s);
if (ret < 0 || job_is_cancelled(&s->common.job)) {
goto immediate_exit;
@@ -1163,6 +1247,7 @@ immediate_exit:
assert(s->in_flight == 0);
qemu_vfree(s->buf);
g_free(s->cow_bitmap);
+ g_free(s->zero_bitmap);
g_free(s->in_flight_bitmap);
bdrv_dirty_iter_free(s->dbi);
@@ -1341,7 +1426,8 @@ do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
{
int ret;
size_t qiov_offset = 0;
- int64_t bitmap_offset, bitmap_end;
+ int64_t dirty_bitmap_offset, dirty_bitmap_end;
+ int64_t zero_bitmap_offset, zero_bitmap_end;
if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
@@ -1385,31 +1471,54 @@ do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
}
/*
- * Tails are either clean or shrunk, so for bitmap resetting
- * we safely align the range down.
+ * Tails are either clean or shrunk, so for dirty bitmap resetting
+ * we safely align the range narrower. But for zero bitmap, round
+ * range wider for checking or clearing, and narrower for setting.
*/
- bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
- bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
- if (bitmap_offset < bitmap_end) {
- bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
- bitmap_end - bitmap_offset);
+ dirty_bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
+ dirty_bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
+ if (dirty_bitmap_offset < dirty_bitmap_end) {
+ bdrv_reset_dirty_bitmap(job->dirty_bitmap, dirty_bitmap_offset,
+ dirty_bitmap_end - dirty_bitmap_offset);
}
+ zero_bitmap_offset = offset / job->granularity;
+ zero_bitmap_end = DIV_ROUND_UP(offset + bytes, job->granularity);
job_progress_increase_remaining(&job->common.job, bytes);
job->active_write_bytes_in_flight += bytes;
switch (method) {
case MIRROR_METHOD_COPY:
+ if (job->zero_bitmap) {
+ bitmap_clear(job->zero_bitmap, zero_bitmap_offset,
+ zero_bitmap_end - zero_bitmap_offset);
+ }
ret = blk_co_pwritev_part(job->target, offset, bytes,
qiov, qiov_offset, flags);
break;
case MIRROR_METHOD_ZERO:
+ if (job->zero_bitmap) {
+ if (find_next_zero_bit(job->zero_bitmap, zero_bitmap_end,
+ zero_bitmap_offset) == zero_bitmap_end) {
+ ret = 0;
+ break;
+ }
+ }
assert(!qiov);
ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
+ if (job->zero_bitmap && ret >= 0) {
+ bitmap_set(job->zero_bitmap, dirty_bitmap_offset / job->granularity,
+ (dirty_bitmap_end - dirty_bitmap_offset) /
+ job->granularity);
+ }
break;
case MIRROR_METHOD_DISCARD:
+ if (job->zero_bitmap) {
+ bitmap_clear(job->zero_bitmap, zero_bitmap_offset,
+ zero_bitmap_end - zero_bitmap_offset);
+ }
assert(!qiov);
ret = blk_co_pdiscard(job->target, offset, bytes);
break;
@@ -1430,10 +1539,10 @@ do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
* at function start, and they must be still dirty, as we've locked
* the region for in-flight op.
*/
- bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
- bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
- bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
- bitmap_end - bitmap_offset);
+ dirty_bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
+ dirty_bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
+ bdrv_set_dirty_bitmap(job->dirty_bitmap, dirty_bitmap_offset,
+ dirty_bitmap_end - dirty_bitmap_offset);
qatomic_set(&job->actively_synced, false);
action = mirror_error_action(job, false, -ret);
@@ -1711,15 +1820,16 @@ static BlockJob *mirror_start_job(
int creation_flags, BlockDriverState *target,
const char *replaces, int64_t speed,
uint32_t granularity, int64_t buf_size,
+ MirrorSyncMode sync_mode,
BlockMirrorBackingMode backing_mode,
- bool zero_target,
+ bool target_is_zero,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
bool unmap,
BlockCompletionFunc *cb,
void *opaque,
const BlockJobDriver *driver,
- bool is_none_mode, BlockDriverState *base,
+ BlockDriverState *base,
bool auto_complete, const char *filter_node_name,
bool is_mirror, MirrorCopyMode copy_mode,
bool base_ro,
@@ -1878,9 +1988,9 @@ static BlockJob *mirror_start_job(
s->replaces = g_strdup(replaces);
s->on_source_error = on_source_error;
s->on_target_error = on_target_error;
- s->is_none_mode = is_none_mode;
+ s->sync_mode = sync_mode;
s->backing_mode = backing_mode;
- s->zero_target = zero_target;
+ s->target_is_zero = target_is_zero;
qatomic_set(&s->copy_mode, copy_mode);
s->base = base;
s->base_overlay = bdrv_find_overlay(bs, base);
@@ -1904,6 +2014,7 @@ static BlockJob *mirror_start_job(
*/
bdrv_disable_dirty_bitmap(s->dirty_bitmap);
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
ret = block_job_add_bdrv(&s->common, "source", bs, 0,
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
@@ -1911,6 +2022,7 @@ static BlockJob *mirror_start_job(
errp);
if (ret < 0) {
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
goto fail;
}
@@ -1956,16 +2068,19 @@ static BlockJob *mirror_start_job(
iter_shared_perms, errp);
if (ret < 0) {
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
goto fail;
}
}
if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
goto fail;
}
}
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
QTAILQ_INIT(&s->ops_in_flight);
@@ -2009,13 +2124,12 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
int creation_flags, int64_t speed,
uint32_t granularity, int64_t buf_size,
MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
- bool zero_target,
+ bool target_is_zero,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
bool unmap, const char *filter_node_name,
MirrorCopyMode copy_mode, Error **errp)
{
- bool is_none_mode;
BlockDriverState *base;
GLOBAL_STATE_CODE();
@@ -2028,14 +2142,13 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
}
bdrv_graph_rdlock_main_loop();
- is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL;
bdrv_graph_rdunlock_main_loop();
mirror_start_job(job_id, bs, creation_flags, target, replaces,
- speed, granularity, buf_size, backing_mode, zero_target,
- on_source_error, on_target_error, unmap, NULL, NULL,
- &mirror_job_driver, is_none_mode, base, false,
+ speed, granularity, buf_size, mode, backing_mode,
+ target_is_zero, on_source_error, on_target_error, unmap,
+ NULL, NULL, &mirror_job_driver, base, false,
filter_node_name, true, copy_mode, false, errp);
}
@@ -2061,9 +2174,9 @@ BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
job = mirror_start_job(
job_id, bs, creation_flags, base, NULL, speed, 0, 0,
- MIRROR_LEAVE_BACKING_CHAIN, false,
+ MIRROR_SYNC_MODE_TOP, MIRROR_LEAVE_BACKING_CHAIN, false,
on_error, on_error, true, cb, opaque,
- &commit_active_job_driver, false, base, auto_complete,
+ &commit_active_job_driver, base, auto_complete,
filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
base_read_only, errp);
if (!job) {
diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c
index d954bec..6919a49 100644
--- a/block/monitor/block-hmp-cmds.c
+++ b/block/monitor/block-hmp-cmds.c
@@ -37,11 +37,11 @@
#include "qemu/osdep.h"
#include "hw/boards.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
+#include "system/block-backend.h"
+#include "system/blockdev.h"
#include "qapi/qapi-commands-block.h"
#include "qapi/qapi-commands-block-export.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qemu/config-file.h"
@@ -49,7 +49,7 @@
#include "qemu/sockets.h"
#include "qemu/cutils.h"
#include "qemu/error-report.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "monitor/monitor.h"
#include "monitor/hmp.h"
#include "block/nbd.h"
@@ -402,7 +402,8 @@ void hmp_nbd_server_start(Monitor *mon, const QDict *qdict)
goto exit;
}
- nbd_server_start(addr, NULL, NULL, 0, &local_err);
+ nbd_server_start(addr, NBD_DEFAULT_HANDSHAKE_MAX_SECS, NULL, NULL,
+ NBD_DEFAULT_MAX_CONNECTIONS, &local_err);
qapi_free_SocketAddress(addr);
if (local_err != NULL) {
goto exit;
@@ -629,11 +630,12 @@ static void print_block_info(Monitor *mon, BlockInfo *info,
}
if (inserted) {
- monitor_printf(mon, ": %s (%s%s%s)\n",
+ monitor_printf(mon, ": %s (%s%s%s%s)\n",
inserted->file,
inserted->drv,
inserted->ro ? ", read-only" : "",
- inserted->encrypted ? ", encrypted" : "");
+ inserted->encrypted ? ", encrypted" : "",
+ inserted->active ? "" : ", inactive");
} else {
monitor_printf(mon, ": [not inserted]\n");
}
diff --git a/block/nbd.c b/block/nbd.c
index d464315..d5a2b21 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -36,7 +36,7 @@
#include "qemu/main-loop.h"
#include "qapi/qapi-visit-sockets.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qstring.h"
#include "qapi/clone-visitor.h"
#include "block/qdict.h"
@@ -1397,8 +1397,8 @@ nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
}
static int coroutine_fn GRAPH_RDLOCK nbd_client_co_block_status(
- BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
- int64_t *pnum, int64_t *map, BlockDriverState **file)
+ BlockDriverState *bs, unsigned int mode, int64_t offset,
+ int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file)
{
int ret, request_ret;
NBDExtent64 extent = { 0 };
diff --git a/block/nfs.c b/block/nfs.c
index 0500f60..0a7d38d 100644
--- a/block/nfs.c
+++ b/block/nfs.c
@@ -39,10 +39,10 @@
#include "qemu/module.h"
#include "qemu/option.h"
#include "qemu/cutils.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "qapi/qapi-visit-block-core.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qobject-output-visitor.h"
#include <nfsc/libnfs.h>
diff --git a/block/null.c b/block/null.c
index 4730acc..4e448d5 100644
--- a/block/null.c
+++ b/block/null.c
@@ -12,13 +12,13 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include "block/block-io.h"
#include "block/block_int.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#define NULL_OPT_LATENCY "latency-ns"
#define NULL_OPT_ZEROES "read-zeroes"
@@ -227,9 +227,9 @@ static int null_reopen_prepare(BDRVReopenState *reopen_state,
}
static int coroutine_fn null_co_block_status(BlockDriverState *bs,
- bool want_zero, int64_t offset,
- int64_t bytes, int64_t *pnum,
- int64_t *map,
+ unsigned int mode,
+ int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map,
BlockDriverState **file)
{
BDRVNullState *s = bs->opaque;
diff --git a/block/nvme.c b/block/nvme.c
index 3b588b1..8df53ee 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -14,10 +14,11 @@
#include "qemu/osdep.h"
#include <linux/vfio.h>
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "qemu/defer-call.h"
#include "qemu/error-report.h"
+#include "qemu/host-pci-mmio.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qemu/cutils.h"
@@ -26,8 +27,8 @@
#include "qemu/vfio-helpers.h"
#include "block/block-io.h"
#include "block/block_int.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/replay.h"
+#include "system/block-backend.h"
+#include "system/replay.h"
#include "trace.h"
#include "block/nvme.h"
@@ -60,7 +61,7 @@ typedef struct {
uint8_t *queue;
uint64_t iova;
/* Hardware MMIO register */
- volatile uint32_t *doorbell;
+ uint32_t *doorbell;
} NVMeQueue;
typedef struct {
@@ -100,7 +101,7 @@ struct BDRVNVMeState {
QEMUVFIOState *vfio;
void *bar0_wo_map;
/* Memory mapped registers */
- volatile struct {
+ struct {
uint32_t sq_tail;
uint32_t cq_head;
} *doorbells;
@@ -292,7 +293,7 @@ static void nvme_kick(NVMeQueuePair *q)
assert(!(q->sq.tail & 0xFF00));
/* Fence the write to submission queue entry before notifying the device. */
smp_wmb();
- *q->sq.doorbell = cpu_to_le32(q->sq.tail);
+ host_pci_stl_le_p(q->sq.doorbell, q->sq.tail);
q->inflight += q->need_kick;
q->need_kick = 0;
}
@@ -441,7 +442,7 @@ static bool nvme_process_completion(NVMeQueuePair *q)
if (progress) {
/* Notify the device so it can post more completions. */
smp_mb_release();
- *q->cq.doorbell = cpu_to_le32(q->cq.head);
+ host_pci_stl_le_p(q->cq.doorbell, q->cq.head);
nvme_wake_free_req_locked(q);
}
@@ -460,7 +461,7 @@ static void nvme_process_completion_bh(void *opaque)
* so notify the device that it has space to fill in more completions now.
*/
smp_mb_release();
- *q->cq.doorbell = cpu_to_le32(q->cq.head);
+ host_pci_stl_le_p(q->cq.doorbell, q->cq.head);
nvme_wake_free_req_locked(q);
nvme_process_completion(q);
@@ -749,9 +750,10 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
int ret;
uint64_t cap;
uint32_t ver;
+ uint32_t cc;
uint64_t timeout_ms;
uint64_t deadline, now;
- volatile NvmeBar *regs = NULL;
+ NvmeBar *regs = NULL;
qemu_co_mutex_init(&s->dma_map_lock);
qemu_co_queue_init(&s->dma_flush_queue);
@@ -779,7 +781,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
/* Perform initialize sequence as described in NVMe spec "7.6.1
* Initialization". */
- cap = le64_to_cpu(regs->cap);
+ cap = host_pci_ldq_le_p(&regs->cap);
trace_nvme_controller_capability_raw(cap);
trace_nvme_controller_capability("Maximum Queue Entries Supported",
1 + NVME_CAP_MQES(cap));
@@ -805,16 +807,17 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
bs->bl.request_alignment = s->page_size;
timeout_ms = MIN(500 * NVME_CAP_TO(cap), 30000);
- ver = le32_to_cpu(regs->vs);
+ ver = host_pci_ldl_le_p(&regs->vs);
trace_nvme_controller_spec_version(extract32(ver, 16, 16),
extract32(ver, 8, 8),
extract32(ver, 0, 8));
/* Reset device to get a clean state. */
- regs->cc = cpu_to_le32(le32_to_cpu(regs->cc) & 0xFE);
+ cc = host_pci_ldl_le_p(&regs->cc);
+ host_pci_stl_le_p(&regs->cc, cc & 0xFE);
/* Wait for CSTS.RDY = 0. */
deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS;
- while (NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
+ while (NVME_CSTS_RDY(host_pci_ldl_le_p(&regs->csts))) {
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
error_setg(errp, "Timeout while waiting for device to reset (%"
PRId64 " ms)",
@@ -843,19 +846,21 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
s->queues[INDEX_ADMIN] = q;
s->queue_count = 1;
QEMU_BUILD_BUG_ON((NVME_QUEUE_SIZE - 1) & 0xF000);
- regs->aqa = cpu_to_le32(((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) |
- ((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT));
- regs->asq = cpu_to_le64(q->sq.iova);
- regs->acq = cpu_to_le64(q->cq.iova);
+ host_pci_stl_le_p(&regs->aqa,
+ ((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) |
+ ((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT));
+ host_pci_stq_le_p(&regs->asq, q->sq.iova);
+ host_pci_stq_le_p(&regs->acq, q->cq.iova);
/* After setting up all control registers we can enable device now. */
- regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) |
- (ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) |
- CC_EN_MASK);
+ host_pci_stl_le_p(&regs->cc,
+ (ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) |
+ (ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) |
+ CC_EN_MASK);
/* Wait for CSTS.RDY = 1. */
now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
deadline = now + timeout_ms * SCALE_MS;
- while (!NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
+ while (!NVME_CSTS_RDY(host_pci_ldl_le_p(&regs->csts))) {
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
error_setg(errp, "Timeout while waiting for device to start (%"
PRId64 " ms)",
diff --git a/block/parallels-ext.c b/block/parallels-ext.c
index b4e14c8..778b8f6 100644
--- a/block/parallels-ext.c
+++ b/block/parallels-ext.c
@@ -206,7 +206,7 @@ parallels_parse_format_extension(BlockDriverState *bs, uint8_t *ext_cluster,
goto fail;
}
- ret = qcrypto_hash_bytes(QCRYPTO_HASH_ALG_MD5, (char *)pos, remaining,
+ ret = qcrypto_hash_bytes(QCRYPTO_HASH_ALGO_MD5, (char *)pos, remaining,
&hash, &hash_len, errp);
if (ret < 0) {
goto fail;
diff --git a/block/parallels.c b/block/parallels.c
index 9205a08..3a375e2 100644
--- a/block/parallels.c
+++ b/block/parallels.c
@@ -33,10 +33,10 @@
#include "qapi/error.h"
#include "block/block_int.h"
#include "block/qdict.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/module.h"
#include "qemu/option.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qapi-visit-block-core.h"
#include "qemu/bswap.h"
@@ -184,11 +184,11 @@ static int mark_used(BlockDriverState *bs, unsigned long *bitmap,
BDRVParallelsState *s = bs->opaque;
uint32_t cluster_index = host_cluster_index(s, off);
unsigned long next_used;
- if (cluster_index + count > bitmap_size) {
+ if ((uint64_t)cluster_index + count > bitmap_size) {
return -E2BIG;
}
next_used = find_next_bit(bitmap, bitmap_size, cluster_index);
- if (next_used < cluster_index + count) {
+ if (next_used < (uint64_t)cluster_index + count) {
return -EBUSY;
}
bitmap_set(bitmap, cluster_index, count);
@@ -416,9 +416,9 @@ parallels_co_flush_to_os(BlockDriverState *bs)
}
static int coroutine_fn GRAPH_RDLOCK
-parallels_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset,
- int64_t bytes, int64_t *pnum, int64_t *map,
- BlockDriverState **file)
+parallels_co_block_status(BlockDriverState *bs, unsigned int mode,
+ int64_t offset, int64_t bytes, int64_t *pnum,
+ int64_t *map, BlockDriverState **file)
{
BDRVParallelsState *s = bs->opaque;
int count;
@@ -1298,6 +1298,10 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
error_setg(errp, "Catalog too large");
return -EFBIG;
}
+ if (le64_to_cpu(ph.ext_off) >= (INT64_MAX >> BDRV_SECTOR_BITS)) {
+ error_setg(errp, "Invalid image: Too big offset");
+ return -EFBIG;
+ }
size = bat_entry_off(s->bat_size);
s->header_size = ROUND_UP(size, bdrv_opt_mem_align(bs->file->bs));
diff --git a/block/qapi-sysemu.c b/block/qapi-sysemu.c
deleted file mode 100644
index e428263..0000000
--- a/block/qapi-sysemu.c
+++ /dev/null
@@ -1,574 +0,0 @@
-/*
- * QMP command handlers specific to the system emulators
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or
- * later. See the COPYING file in the top-level directory.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-
-#include "block/block_int.h"
-#include "qapi/error.h"
-#include "qapi/qapi-commands-block.h"
-#include "qapi/qmp/qdict.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
-
-static BlockBackend *qmp_get_blk(const char *blk_name, const char *qdev_id,
- Error **errp)
-{
- BlockBackend *blk;
-
- if (!blk_name == !qdev_id) {
- error_setg(errp, "Need exactly one of 'device' and 'id'");
- return NULL;
- }
-
- if (qdev_id) {
- blk = blk_by_qdev_id(qdev_id, errp);
- } else {
- blk = blk_by_name(blk_name);
- if (blk == NULL) {
- error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
- "Device '%s' not found", blk_name);
- }
- }
-
- return blk;
-}
-
-/*
- * Attempt to open the tray of @device.
- * If @force, ignore its tray lock.
- * Else, if the tray is locked, don't open it, but ask the guest to open it.
- * On error, store an error through @errp and return -errno.
- * If @device does not exist, return -ENODEV.
- * If it has no removable media, return -ENOTSUP.
- * If it has no tray, return -ENOSYS.
- * If the guest was asked to open the tray, return -EINPROGRESS.
- * Else, return 0.
- */
-static int do_open_tray(const char *blk_name, const char *qdev_id,
- bool force, Error **errp)
-{
- BlockBackend *blk;
- const char *device = qdev_id ?: blk_name;
- bool locked;
-
- blk = qmp_get_blk(blk_name, qdev_id, errp);
- if (!blk) {
- return -ENODEV;
- }
-
- if (!blk_dev_has_removable_media(blk)) {
- error_setg(errp, "Device '%s' is not removable", device);
- return -ENOTSUP;
- }
-
- if (!blk_dev_has_tray(blk)) {
- error_setg(errp, "Device '%s' does not have a tray", device);
- return -ENOSYS;
- }
-
- if (blk_dev_is_tray_open(blk)) {
- return 0;
- }
-
- locked = blk_dev_is_medium_locked(blk);
- if (locked) {
- blk_dev_eject_request(blk, force);
- }
-
- if (!locked || force) {
- blk_dev_change_media_cb(blk, false, &error_abort);
- }
-
- if (locked && !force) {
- error_setg(errp, "Device '%s' is locked and force was not specified, "
- "wait for tray to open and try again", device);
- return -EINPROGRESS;
- }
-
- return 0;
-}
-
-void qmp_blockdev_open_tray(const char *device,
- const char *id,
- bool has_force, bool force,
- Error **errp)
-{
- Error *local_err = NULL;
- int rc;
-
- if (!has_force) {
- force = false;
- }
- rc = do_open_tray(device, id, force, &local_err);
- if (rc && rc != -ENOSYS && rc != -EINPROGRESS) {
- error_propagate(errp, local_err);
- return;
- }
- error_free(local_err);
-}
-
-void qmp_blockdev_close_tray(const char *device,
- const char *id,
- Error **errp)
-{
- BlockBackend *blk;
- Error *local_err = NULL;
-
- blk = qmp_get_blk(device, id, errp);
- if (!blk) {
- return;
- }
-
- if (!blk_dev_has_removable_media(blk)) {
- error_setg(errp, "Device '%s' is not removable", device ?: id);
- return;
- }
-
- if (!blk_dev_has_tray(blk)) {
- /* Ignore this command on tray-less devices */
- return;
- }
-
- if (!blk_dev_is_tray_open(blk)) {
- return;
- }
-
- blk_dev_change_media_cb(blk, true, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
-}
-
-static void GRAPH_UNLOCKED
-blockdev_remove_medium(const char *device, const char *id, Error **errp)
-{
- BlockBackend *blk;
- BlockDriverState *bs;
- bool has_attached_device;
-
- GLOBAL_STATE_CODE();
-
- blk = qmp_get_blk(device, id, errp);
- if (!blk) {
- return;
- }
-
- /* For BBs without a device, we can exchange the BDS tree at will */
- has_attached_device = blk_get_attached_dev(blk);
-
- if (has_attached_device && !blk_dev_has_removable_media(blk)) {
- error_setg(errp, "Device '%s' is not removable", device ?: id);
- return;
- }
-
- if (has_attached_device && blk_dev_has_tray(blk) &&
- !blk_dev_is_tray_open(blk))
- {
- error_setg(errp, "Tray of device '%s' is not open", device ?: id);
- return;
- }
-
- bs = blk_bs(blk);
- if (!bs) {
- return;
- }
-
- bdrv_graph_rdlock_main_loop();
- if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) {
- bdrv_graph_rdunlock_main_loop();
- return;
- }
- bdrv_graph_rdunlock_main_loop();
-
- blk_remove_bs(blk);
-
- if (!blk_dev_has_tray(blk)) {
- /* For tray-less devices, blockdev-open-tray is a no-op (or may not be
- * called at all); therefore, the medium needs to be ejected here.
- * Do it after blk_remove_bs() so blk_is_inserted(blk) returns the @load
- * value passed here (i.e. false). */
- blk_dev_change_media_cb(blk, false, &error_abort);
- }
-}
-
-void qmp_blockdev_remove_medium(const char *id, Error **errp)
-{
- blockdev_remove_medium(NULL, id, errp);
-}
-
-static void qmp_blockdev_insert_anon_medium(BlockBackend *blk,
- BlockDriverState *bs, Error **errp)
-{
- Error *local_err = NULL;
- bool has_device;
- int ret;
-
- /* For BBs without a device, we can exchange the BDS tree at will */
- has_device = blk_get_attached_dev(blk);
-
- if (has_device && !blk_dev_has_removable_media(blk)) {
- error_setg(errp, "Device is not removable");
- return;
- }
-
- if (has_device && blk_dev_has_tray(blk) && !blk_dev_is_tray_open(blk)) {
- error_setg(errp, "Tray of the device is not open");
- return;
- }
-
- if (blk_bs(blk)) {
- error_setg(errp, "There already is a medium in the device");
- return;
- }
-
- ret = blk_insert_bs(blk, bs, errp);
- if (ret < 0) {
- return;
- }
-
- if (!blk_dev_has_tray(blk)) {
- /* For tray-less devices, blockdev-close-tray is a no-op (or may not be
- * called at all); therefore, the medium needs to be pushed into the
- * slot here.
- * Do it after blk_insert_bs() so blk_is_inserted(blk) returns the @load
- * value passed here (i.e. true). */
- blk_dev_change_media_cb(blk, true, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- blk_remove_bs(blk);
- return;
- }
- }
-}
-
-static void blockdev_insert_medium(const char *device, const char *id,
- const char *node_name, Error **errp)
-{
- BlockBackend *blk;
- BlockDriverState *bs;
-
- GRAPH_RDLOCK_GUARD_MAINLOOP();
-
- blk = qmp_get_blk(device, id, errp);
- if (!blk) {
- return;
- }
-
- bs = bdrv_find_node(node_name);
- if (!bs) {
- error_setg(errp, "Node '%s' not found", node_name);
- return;
- }
-
- if (bdrv_has_blk(bs)) {
- error_setg(errp, "Node '%s' is already in use", node_name);
- return;
- }
-
- qmp_blockdev_insert_anon_medium(blk, bs, errp);
-}
-
-void qmp_blockdev_insert_medium(const char *id, const char *node_name,
- Error **errp)
-{
- blockdev_insert_medium(NULL, id, node_name, errp);
-}
-
-void qmp_blockdev_change_medium(const char *device,
- const char *id,
- const char *filename,
- const char *format,
- bool has_force, bool force,
- bool has_read_only,
- BlockdevChangeReadOnlyMode read_only,
- Error **errp)
-{
- BlockBackend *blk;
- BlockDriverState *medium_bs = NULL;
- int bdrv_flags;
- bool detect_zeroes;
- int rc;
- QDict *options = NULL;
- Error *err = NULL;
-
- blk = qmp_get_blk(device, id, errp);
- if (!blk) {
- goto fail;
- }
-
- if (blk_bs(blk)) {
- blk_update_root_state(blk);
- }
-
- bdrv_flags = blk_get_open_flags_from_root_state(blk);
- bdrv_flags &= ~(BDRV_O_TEMPORARY | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING |
- BDRV_O_PROTOCOL | BDRV_O_AUTO_RDONLY);
-
- if (!has_read_only) {
- read_only = BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN;
- }
-
- switch (read_only) {
- case BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN:
- break;
-
- case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_ONLY:
- bdrv_flags &= ~BDRV_O_RDWR;
- break;
-
- case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_WRITE:
- bdrv_flags |= BDRV_O_RDWR;
- break;
-
- default:
- abort();
- }
-
- options = qdict_new();
- detect_zeroes = blk_get_detect_zeroes_from_root_state(blk);
- qdict_put_str(options, "detect-zeroes", detect_zeroes ? "on" : "off");
-
- if (format) {
- qdict_put_str(options, "driver", format);
- }
-
- medium_bs = bdrv_open(filename, NULL, options, bdrv_flags, errp);
-
- if (!medium_bs) {
- goto fail;
- }
-
- rc = do_open_tray(device, id, force, &err);
- if (rc && rc != -ENOSYS) {
- error_propagate(errp, err);
- goto fail;
- }
- error_free(err);
- err = NULL;
-
- blockdev_remove_medium(device, id, &err);
- if (err) {
- error_propagate(errp, err);
- goto fail;
- }
-
- qmp_blockdev_insert_anon_medium(blk, medium_bs, &err);
- if (err) {
- error_propagate(errp, err);
- goto fail;
- }
-
- qmp_blockdev_close_tray(device, id, errp);
-
-fail:
- /* If the medium has been inserted, the device has its own reference, so
- * ours must be relinquished; and if it has not been inserted successfully,
- * the reference must be relinquished anyway */
- bdrv_unref(medium_bs);
-}
-
-void qmp_eject(const char *device, const char *id,
- bool has_force, bool force, Error **errp)
-{
- Error *local_err = NULL;
- int rc;
-
- if (!has_force) {
- force = false;
- }
-
- rc = do_open_tray(device, id, force, &local_err);
- if (rc && rc != -ENOSYS) {
- error_propagate(errp, local_err);
- return;
- }
- error_free(local_err);
-
- blockdev_remove_medium(device, id, errp);
-}
-
-/* throttling disk I/O limits */
-void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp)
-{
- ThrottleConfig cfg;
- BlockDriverState *bs;
- BlockBackend *blk;
-
- blk = qmp_get_blk(arg->device, arg->id, errp);
- if (!blk) {
- return;
- }
-
- bs = blk_bs(blk);
- if (!bs) {
- error_setg(errp, "Device has no medium");
- return;
- }
-
- throttle_config_init(&cfg);
- cfg.buckets[THROTTLE_BPS_TOTAL].avg = arg->bps;
- cfg.buckets[THROTTLE_BPS_READ].avg = arg->bps_rd;
- cfg.buckets[THROTTLE_BPS_WRITE].avg = arg->bps_wr;
-
- cfg.buckets[THROTTLE_OPS_TOTAL].avg = arg->iops;
- cfg.buckets[THROTTLE_OPS_READ].avg = arg->iops_rd;
- cfg.buckets[THROTTLE_OPS_WRITE].avg = arg->iops_wr;
-
- if (arg->has_bps_max) {
- cfg.buckets[THROTTLE_BPS_TOTAL].max = arg->bps_max;
- }
- if (arg->has_bps_rd_max) {
- cfg.buckets[THROTTLE_BPS_READ].max = arg->bps_rd_max;
- }
- if (arg->has_bps_wr_max) {
- cfg.buckets[THROTTLE_BPS_WRITE].max = arg->bps_wr_max;
- }
- if (arg->has_iops_max) {
- cfg.buckets[THROTTLE_OPS_TOTAL].max = arg->iops_max;
- }
- if (arg->has_iops_rd_max) {
- cfg.buckets[THROTTLE_OPS_READ].max = arg->iops_rd_max;
- }
- if (arg->has_iops_wr_max) {
- cfg.buckets[THROTTLE_OPS_WRITE].max = arg->iops_wr_max;
- }
-
- if (arg->has_bps_max_length) {
- cfg.buckets[THROTTLE_BPS_TOTAL].burst_length = arg->bps_max_length;
- }
- if (arg->has_bps_rd_max_length) {
- cfg.buckets[THROTTLE_BPS_READ].burst_length = arg->bps_rd_max_length;
- }
- if (arg->has_bps_wr_max_length) {
- cfg.buckets[THROTTLE_BPS_WRITE].burst_length = arg->bps_wr_max_length;
- }
- if (arg->has_iops_max_length) {
- cfg.buckets[THROTTLE_OPS_TOTAL].burst_length = arg->iops_max_length;
- }
- if (arg->has_iops_rd_max_length) {
- cfg.buckets[THROTTLE_OPS_READ].burst_length = arg->iops_rd_max_length;
- }
- if (arg->has_iops_wr_max_length) {
- cfg.buckets[THROTTLE_OPS_WRITE].burst_length = arg->iops_wr_max_length;
- }
-
- if (arg->has_iops_size) {
- cfg.op_size = arg->iops_size;
- }
-
- if (!throttle_is_valid(&cfg, errp)) {
- return;
- }
-
- if (throttle_enabled(&cfg)) {
- /* Enable I/O limits if they're not enabled yet, otherwise
- * just update the throttling group. */
- if (!blk_get_public(blk)->throttle_group_member.throttle_state) {
- blk_io_limits_enable(blk, arg->group ?: arg->device ?: arg->id);
- } else if (arg->group) {
- blk_io_limits_update_group(blk, arg->group);
- }
- /* Set the new throttling configuration */
- blk_set_io_limits(blk, &cfg);
- } else if (blk_get_public(blk)->throttle_group_member.throttle_state) {
- /* If all throttling settings are set to 0, disable I/O limits */
- blk_io_limits_disable(blk);
- }
-}
-
-void qmp_block_latency_histogram_set(
- const char *id,
- bool has_boundaries, uint64List *boundaries,
- bool has_boundaries_read, uint64List *boundaries_read,
- bool has_boundaries_write, uint64List *boundaries_write,
- bool has_boundaries_append, uint64List *boundaries_append,
- bool has_boundaries_flush, uint64List *boundaries_flush,
- Error **errp)
-{
- BlockBackend *blk = qmp_get_blk(NULL, id, errp);
- BlockAcctStats *stats;
- int ret;
-
- if (!blk) {
- return;
- }
-
- stats = blk_get_stats(blk);
-
- if (!has_boundaries && !has_boundaries_read && !has_boundaries_write &&
- !has_boundaries_flush)
- {
- block_latency_histograms_clear(stats);
- return;
- }
-
- if (has_boundaries || has_boundaries_read) {
- ret = block_latency_histogram_set(
- stats, BLOCK_ACCT_READ,
- has_boundaries_read ? boundaries_read : boundaries);
- if (ret) {
- error_setg(errp, "Device '%s' set read boundaries fail", id);
- return;
- }
- }
-
- if (has_boundaries || has_boundaries_write) {
- ret = block_latency_histogram_set(
- stats, BLOCK_ACCT_WRITE,
- has_boundaries_write ? boundaries_write : boundaries);
- if (ret) {
- error_setg(errp, "Device '%s' set write boundaries fail", id);
- return;
- }
- }
-
- if (has_boundaries || has_boundaries_append) {
- ret = block_latency_histogram_set(
- stats, BLOCK_ACCT_ZONE_APPEND,
- has_boundaries_append ? boundaries_append : boundaries);
- if (ret) {
- error_setg(errp, "Device '%s' set append write boundaries fail", id);
- return;
- }
- }
-
- if (has_boundaries || has_boundaries_flush) {
- ret = block_latency_histogram_set(
- stats, BLOCK_ACCT_FLUSH,
- has_boundaries_flush ? boundaries_flush : boundaries);
- if (ret) {
- error_setg(errp, "Device '%s' set flush boundaries fail", id);
- return;
- }
- }
-}
diff --git a/block/qapi-system.c b/block/qapi-system.c
new file mode 100644
index 0000000..54b7409
--- /dev/null
+++ b/block/qapi-system.c
@@ -0,0 +1,574 @@
+/*
+ * QMP command handlers specific to the system emulators
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+
+#include "block/block_int.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-block.h"
+#include "qobject/qdict.h"
+#include "system/block-backend.h"
+#include "system/blockdev.h"
+
+static BlockBackend *qmp_get_blk(const char *blk_name, const char *qdev_id,
+ Error **errp)
+{
+ BlockBackend *blk;
+
+ if (!blk_name == !qdev_id) {
+ error_setg(errp, "Need exactly one of 'device' and 'id'");
+ return NULL;
+ }
+
+ if (qdev_id) {
+ blk = blk_by_qdev_id(qdev_id, errp);
+ } else {
+ blk = blk_by_name(blk_name);
+ if (blk == NULL) {
+ error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
+ "Device '%s' not found", blk_name);
+ }
+ }
+
+ return blk;
+}
+
+/*
+ * Attempt to open the tray of @device.
+ * If @force, ignore its tray lock.
+ * Else, if the tray is locked, don't open it, but ask the guest to open it.
+ * On error, store an error through @errp and return -errno.
+ * If @device does not exist, return -ENODEV.
+ * If it has no removable media, return -ENOTSUP.
+ * If it has no tray, return -ENOSYS.
+ * If the guest was asked to open the tray, return -EINPROGRESS.
+ * Else, return 0.
+ */
+static int do_open_tray(const char *blk_name, const char *qdev_id,
+ bool force, Error **errp)
+{
+ BlockBackend *blk;
+ const char *device = qdev_id ?: blk_name;
+ bool locked;
+
+ blk = qmp_get_blk(blk_name, qdev_id, errp);
+ if (!blk) {
+ return -ENODEV;
+ }
+
+ if (!blk_dev_has_removable_media(blk)) {
+ error_setg(errp, "Device '%s' is not removable", device);
+ return -ENOTSUP;
+ }
+
+ if (!blk_dev_has_tray(blk)) {
+ error_setg(errp, "Device '%s' does not have a tray", device);
+ return -ENOSYS;
+ }
+
+ if (blk_dev_is_tray_open(blk)) {
+ return 0;
+ }
+
+ locked = blk_dev_is_medium_locked(blk);
+ if (locked) {
+ blk_dev_eject_request(blk, force);
+ }
+
+ if (!locked || force) {
+ blk_dev_change_media_cb(blk, false, &error_abort);
+ }
+
+ if (locked && !force) {
+ error_setg(errp, "Device '%s' is locked and force was not specified, "
+ "wait for tray to open and try again", device);
+ return -EINPROGRESS;
+ }
+
+ return 0;
+}
+
+void qmp_blockdev_open_tray(const char *device,
+ const char *id,
+ bool has_force, bool force,
+ Error **errp)
+{
+ Error *local_err = NULL;
+ int rc;
+
+ if (!has_force) {
+ force = false;
+ }
+ rc = do_open_tray(device, id, force, &local_err);
+ if (rc && rc != -ENOSYS && rc != -EINPROGRESS) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ error_free(local_err);
+}
+
+void qmp_blockdev_close_tray(const char *device,
+ const char *id,
+ Error **errp)
+{
+ BlockBackend *blk;
+ Error *local_err = NULL;
+
+ blk = qmp_get_blk(device, id, errp);
+ if (!blk) {
+ return;
+ }
+
+ if (!blk_dev_has_removable_media(blk)) {
+ error_setg(errp, "Device '%s' is not removable", device ?: id);
+ return;
+ }
+
+ if (!blk_dev_has_tray(blk)) {
+ /* Ignore this command on tray-less devices */
+ return;
+ }
+
+ if (!blk_dev_is_tray_open(blk)) {
+ return;
+ }
+
+ blk_dev_change_media_cb(blk, true, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+}
+
+static void GRAPH_UNLOCKED
+blockdev_remove_medium(const char *device, const char *id, Error **errp)
+{
+ BlockBackend *blk;
+ BlockDriverState *bs;
+ bool has_attached_device;
+
+ GLOBAL_STATE_CODE();
+
+ blk = qmp_get_blk(device, id, errp);
+ if (!blk) {
+ return;
+ }
+
+ /* For BBs without a device, we can exchange the BDS tree at will */
+ has_attached_device = blk_get_attached_dev(blk);
+
+ if (has_attached_device && !blk_dev_has_removable_media(blk)) {
+ error_setg(errp, "Device '%s' is not removable", device ?: id);
+ return;
+ }
+
+ if (has_attached_device && blk_dev_has_tray(blk) &&
+ !blk_dev_is_tray_open(blk))
+ {
+ error_setg(errp, "Tray of device '%s' is not open", device ?: id);
+ return;
+ }
+
+ bs = blk_bs(blk);
+ if (!bs) {
+ return;
+ }
+
+ bdrv_graph_rdlock_main_loop();
+ if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) {
+ bdrv_graph_rdunlock_main_loop();
+ return;
+ }
+ bdrv_graph_rdunlock_main_loop();
+
+ blk_remove_bs(blk);
+
+ if (!blk_dev_has_tray(blk)) {
+ /* For tray-less devices, blockdev-open-tray is a no-op (or may not be
+ * called at all); therefore, the medium needs to be ejected here.
+ * Do it after blk_remove_bs() so blk_is_inserted(blk) returns the @load
+ * value passed here (i.e. false). */
+ blk_dev_change_media_cb(blk, false, &error_abort);
+ }
+}
+
+void qmp_blockdev_remove_medium(const char *id, Error **errp)
+{
+ blockdev_remove_medium(NULL, id, errp);
+}
+
+static void qmp_blockdev_insert_anon_medium(BlockBackend *blk,
+ BlockDriverState *bs, Error **errp)
+{
+ Error *local_err = NULL;
+ bool has_device;
+ int ret;
+
+ /* For BBs without a device, we can exchange the BDS tree at will */
+ has_device = blk_get_attached_dev(blk);
+
+ if (has_device && !blk_dev_has_removable_media(blk)) {
+ error_setg(errp, "Device is not removable");
+ return;
+ }
+
+ if (has_device && blk_dev_has_tray(blk) && !blk_dev_is_tray_open(blk)) {
+ error_setg(errp, "Tray of the device is not open");
+ return;
+ }
+
+ if (blk_bs(blk)) {
+ error_setg(errp, "There already is a medium in the device");
+ return;
+ }
+
+ ret = blk_insert_bs(blk, bs, errp);
+ if (ret < 0) {
+ return;
+ }
+
+ if (!blk_dev_has_tray(blk)) {
+ /* For tray-less devices, blockdev-close-tray is a no-op (or may not be
+ * called at all); therefore, the medium needs to be pushed into the
+ * slot here.
+ * Do it after blk_insert_bs() so blk_is_inserted(blk) returns the @load
+ * value passed here (i.e. true). */
+ blk_dev_change_media_cb(blk, true, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ blk_remove_bs(blk);
+ return;
+ }
+ }
+}
+
+static void blockdev_insert_medium(const char *device, const char *id,
+ const char *node_name, Error **errp)
+{
+ BlockBackend *blk;
+ BlockDriverState *bs;
+
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
+ blk = qmp_get_blk(device, id, errp);
+ if (!blk) {
+ return;
+ }
+
+ bs = bdrv_find_node(node_name);
+ if (!bs) {
+ error_setg(errp, "Node '%s' not found", node_name);
+ return;
+ }
+
+ if (bdrv_has_blk(bs)) {
+ error_setg(errp, "Node '%s' is already in use", node_name);
+ return;
+ }
+
+ qmp_blockdev_insert_anon_medium(blk, bs, errp);
+}
+
+void qmp_blockdev_insert_medium(const char *id, const char *node_name,
+ Error **errp)
+{
+ blockdev_insert_medium(NULL, id, node_name, errp);
+}
+
+void qmp_blockdev_change_medium(const char *device,
+ const char *id,
+ const char *filename,
+ const char *format,
+ bool has_force, bool force,
+ bool has_read_only,
+ BlockdevChangeReadOnlyMode read_only,
+ Error **errp)
+{
+ BlockBackend *blk;
+ BlockDriverState *medium_bs = NULL;
+ int bdrv_flags;
+ bool detect_zeroes;
+ int rc;
+ QDict *options = NULL;
+ Error *err = NULL;
+
+ blk = qmp_get_blk(device, id, errp);
+ if (!blk) {
+ goto fail;
+ }
+
+ if (blk_bs(blk)) {
+ blk_update_root_state(blk);
+ }
+
+ bdrv_flags = blk_get_open_flags_from_root_state(blk);
+ bdrv_flags &= ~(BDRV_O_TEMPORARY | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING |
+ BDRV_O_PROTOCOL | BDRV_O_AUTO_RDONLY);
+
+ if (!has_read_only) {
+ read_only = BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN;
+ }
+
+ switch (read_only) {
+ case BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN:
+ break;
+
+ case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_ONLY:
+ bdrv_flags &= ~BDRV_O_RDWR;
+ break;
+
+ case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_WRITE:
+ bdrv_flags |= BDRV_O_RDWR;
+ break;
+
+ default:
+ abort();
+ }
+
+ options = qdict_new();
+ detect_zeroes = blk_get_detect_zeroes_from_root_state(blk);
+ qdict_put_str(options, "detect-zeroes", detect_zeroes ? "on" : "off");
+
+ if (format) {
+ qdict_put_str(options, "driver", format);
+ }
+
+ medium_bs = bdrv_open(filename, NULL, options, bdrv_flags, errp);
+
+ if (!medium_bs) {
+ goto fail;
+ }
+
+ rc = do_open_tray(device, id, force, &err);
+ if (rc && rc != -ENOSYS) {
+ error_propagate(errp, err);
+ goto fail;
+ }
+ error_free(err);
+ err = NULL;
+
+ blockdev_remove_medium(device, id, &err);
+ if (err) {
+ error_propagate(errp, err);
+ goto fail;
+ }
+
+ qmp_blockdev_insert_anon_medium(blk, medium_bs, &err);
+ if (err) {
+ error_propagate(errp, err);
+ goto fail;
+ }
+
+ qmp_blockdev_close_tray(device, id, errp);
+
+fail:
+ /* If the medium has been inserted, the device has its own reference, so
+ * ours must be relinquished; and if it has not been inserted successfully,
+ * the reference must be relinquished anyway */
+ bdrv_unref(medium_bs);
+}
+
+void qmp_eject(const char *device, const char *id,
+ bool has_force, bool force, Error **errp)
+{
+ Error *local_err = NULL;
+ int rc;
+
+ if (!has_force) {
+ force = false;
+ }
+
+ rc = do_open_tray(device, id, force, &local_err);
+ if (rc && rc != -ENOSYS) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ error_free(local_err);
+
+ blockdev_remove_medium(device, id, errp);
+}
+
+/* throttling disk I/O limits */
+void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp)
+{
+ ThrottleConfig cfg;
+ BlockDriverState *bs;
+ BlockBackend *blk;
+
+ blk = qmp_get_blk(arg->device, arg->id, errp);
+ if (!blk) {
+ return;
+ }
+
+ bs = blk_bs(blk);
+ if (!bs) {
+ error_setg(errp, "Device has no medium");
+ return;
+ }
+
+ throttle_config_init(&cfg);
+ cfg.buckets[THROTTLE_BPS_TOTAL].avg = arg->bps;
+ cfg.buckets[THROTTLE_BPS_READ].avg = arg->bps_rd;
+ cfg.buckets[THROTTLE_BPS_WRITE].avg = arg->bps_wr;
+
+ cfg.buckets[THROTTLE_OPS_TOTAL].avg = arg->iops;
+ cfg.buckets[THROTTLE_OPS_READ].avg = arg->iops_rd;
+ cfg.buckets[THROTTLE_OPS_WRITE].avg = arg->iops_wr;
+
+ if (arg->has_bps_max) {
+ cfg.buckets[THROTTLE_BPS_TOTAL].max = arg->bps_max;
+ }
+ if (arg->has_bps_rd_max) {
+ cfg.buckets[THROTTLE_BPS_READ].max = arg->bps_rd_max;
+ }
+ if (arg->has_bps_wr_max) {
+ cfg.buckets[THROTTLE_BPS_WRITE].max = arg->bps_wr_max;
+ }
+ if (arg->has_iops_max) {
+ cfg.buckets[THROTTLE_OPS_TOTAL].max = arg->iops_max;
+ }
+ if (arg->has_iops_rd_max) {
+ cfg.buckets[THROTTLE_OPS_READ].max = arg->iops_rd_max;
+ }
+ if (arg->has_iops_wr_max) {
+ cfg.buckets[THROTTLE_OPS_WRITE].max = arg->iops_wr_max;
+ }
+
+ if (arg->has_bps_max_length) {
+ cfg.buckets[THROTTLE_BPS_TOTAL].burst_length = arg->bps_max_length;
+ }
+ if (arg->has_bps_rd_max_length) {
+ cfg.buckets[THROTTLE_BPS_READ].burst_length = arg->bps_rd_max_length;
+ }
+ if (arg->has_bps_wr_max_length) {
+ cfg.buckets[THROTTLE_BPS_WRITE].burst_length = arg->bps_wr_max_length;
+ }
+ if (arg->has_iops_max_length) {
+ cfg.buckets[THROTTLE_OPS_TOTAL].burst_length = arg->iops_max_length;
+ }
+ if (arg->has_iops_rd_max_length) {
+ cfg.buckets[THROTTLE_OPS_READ].burst_length = arg->iops_rd_max_length;
+ }
+ if (arg->has_iops_wr_max_length) {
+ cfg.buckets[THROTTLE_OPS_WRITE].burst_length = arg->iops_wr_max_length;
+ }
+
+ if (arg->has_iops_size) {
+ cfg.op_size = arg->iops_size;
+ }
+
+ if (!throttle_is_valid(&cfg, errp)) {
+ return;
+ }
+
+ if (throttle_enabled(&cfg)) {
+ /* Enable I/O limits if they're not enabled yet, otherwise
+ * just update the throttling group. */
+ if (!blk_get_public(blk)->throttle_group_member.throttle_state) {
+ blk_io_limits_enable(blk, arg->group ?: arg->device ?: arg->id);
+ } else if (arg->group) {
+ blk_io_limits_update_group(blk, arg->group);
+ }
+ /* Set the new throttling configuration */
+ blk_set_io_limits(blk, &cfg);
+ } else if (blk_get_public(blk)->throttle_group_member.throttle_state) {
+ /* If all throttling settings are set to 0, disable I/O limits */
+ blk_io_limits_disable(blk);
+ }
+}
+
+void qmp_block_latency_histogram_set(
+ const char *id,
+ bool has_boundaries, uint64List *boundaries,
+ bool has_boundaries_read, uint64List *boundaries_read,
+ bool has_boundaries_write, uint64List *boundaries_write,
+ bool has_boundaries_append, uint64List *boundaries_append,
+ bool has_boundaries_flush, uint64List *boundaries_flush,
+ Error **errp)
+{
+ BlockBackend *blk = qmp_get_blk(NULL, id, errp);
+ BlockAcctStats *stats;
+ int ret;
+
+ if (!blk) {
+ return;
+ }
+
+ stats = blk_get_stats(blk);
+
+ if (!has_boundaries && !has_boundaries_read && !has_boundaries_write &&
+ !has_boundaries_flush)
+ {
+ block_latency_histograms_clear(stats);
+ return;
+ }
+
+ if (has_boundaries || has_boundaries_read) {
+ ret = block_latency_histogram_set(
+ stats, BLOCK_ACCT_READ,
+ has_boundaries_read ? boundaries_read : boundaries);
+ if (ret) {
+ error_setg(errp, "Device '%s' set read boundaries fail", id);
+ return;
+ }
+ }
+
+ if (has_boundaries || has_boundaries_write) {
+ ret = block_latency_histogram_set(
+ stats, BLOCK_ACCT_WRITE,
+ has_boundaries_write ? boundaries_write : boundaries);
+ if (ret) {
+ error_setg(errp, "Device '%s' set write boundaries fail", id);
+ return;
+ }
+ }
+
+ if (has_boundaries || has_boundaries_append) {
+ ret = block_latency_histogram_set(
+ stats, BLOCK_ACCT_ZONE_APPEND,
+ has_boundaries_append ? boundaries_append : boundaries);
+ if (ret) {
+ error_setg(errp, "Device '%s' set append write boundaries fail", id);
+ return;
+ }
+ }
+
+ if (has_boundaries || has_boundaries_flush) {
+ ret = block_latency_histogram_set(
+ stats, BLOCK_ACCT_FLUSH,
+ has_boundaries_flush ? boundaries_flush : boundaries);
+ if (ret) {
+ error_setg(errp, "Device '%s' set flush boundaries fail", id);
+ return;
+ }
+ }
+}
diff --git a/block/qapi.c b/block/qapi.c
index 2b5793f..2c50a6b 100644
--- a/block/qapi.c
+++ b/block/qapi.c
@@ -33,13 +33,13 @@
#include "qapi/qapi-commands-block-core.h"
#include "qapi/qobject-output-visitor.h"
#include "qapi/qapi-visit-block-core.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
#include "qemu/qemu-print.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
BlockDriverState *bs,
@@ -63,6 +63,7 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
info->file = g_strdup(bs->filename);
info->ro = bdrv_is_read_only(bs);
info->drv = g_strdup(bs->drv->format_name);
+ info->active = !bdrv_is_inactive(bs);
info->encrypted = bs->encrypted;
info->cache = g_new(BlockdevCacheInfo, 1);
diff --git a/block/qcow.c b/block/qcow.c
index c2f89db..8a3e759 100644
--- a/block/qcow.c
+++ b/block/qcow.c
@@ -27,15 +27,15 @@
#include "qemu/error-report.h"
#include "block/block_int.h"
#include "block/qdict.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include "qemu/bswap.h"
#include "qemu/cutils.h"
#include "qemu/memalign.h"
#include <zlib.h>
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qapi-visit-block-core.h"
#include "crypto/block.h"
@@ -530,7 +530,7 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
}
static int coroutine_fn GRAPH_RDLOCK
-qcow_co_block_status(BlockDriverState *bs, bool want_zero,
+qcow_co_block_status(BlockDriverState *bs, unsigned int mode,
int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file)
{
@@ -831,7 +831,7 @@ qcow_co_create(BlockdevCreateOptions *opts, Error **errp)
}
if (qcow_opts->encrypt &&
- qcow_opts->encrypt->format != Q_CRYPTO_BLOCK_FORMAT_QCOW)
+ qcow_opts->encrypt->format != QCRYPTO_BLOCK_FORMAT_QCOW)
{
error_setg(errp, "Unsupported encryption format");
return -EINVAL;
diff --git a/block/qcow2-snapshot.c b/block/qcow2-snapshot.c
index 92e4797..1e8dc48 100644
--- a/block/qcow2-snapshot.c
+++ b/block/qcow2-snapshot.c
@@ -23,7 +23,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qapi/error.h"
#include "qcow2.h"
#include "qemu/bswap.h"
diff --git a/block/qcow2.c b/block/qcow2.c
index 70b1973..45451a7 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -25,15 +25,15 @@
#include "qemu/osdep.h"
#include "block/qdict.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qcow2.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/qapi-events-block-core.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "trace.h"
#include "qemu/option_int.h"
#include "qemu/cutils.h"
@@ -1721,7 +1721,7 @@ qcow2_do_open(BlockDriverState *bs, QDict *options, int flags,
ret = -EINVAL;
goto fail;
}
- } else if (!(flags & BDRV_O_NO_IO)) {
+ } else {
error_setg(errp, "Missing CRYPTO header for crypt method %d",
s->crypt_method_header);
ret = -EINVAL;
@@ -1895,7 +1895,9 @@ qcow2_do_open(BlockDriverState *bs, QDict *options, int flags,
g_free(s->image_data_file);
if (open_data_file && has_data_file(bs)) {
bdrv_graph_co_rdunlock();
+ bdrv_drain_all_begin();
bdrv_co_unref_child(bs, s->data_file);
+ bdrv_drain_all_end();
bdrv_graph_co_rdlock();
s->data_file = NULL;
}
@@ -1976,7 +1978,7 @@ static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp)
{
BDRVQcow2State *s = bs->opaque;
- if (bs->encrypted) {
+ if (s->crypto) {
/* Encryption works on a sector granularity */
bs->bl.request_alignment = qcrypto_block_get_sector_size(s->crypto);
}
@@ -2141,9 +2143,9 @@ static void qcow2_join_options(QDict *options, QDict *old_options)
}
static int coroutine_fn GRAPH_RDLOCK
-qcow2_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset,
- int64_t count, int64_t *pnum, int64_t *map,
- BlockDriverState **file)
+qcow2_co_block_status(BlockDriverState *bs, unsigned int mode,
+ int64_t offset, int64_t count, int64_t *pnum,
+ int64_t *map, BlockDriverState **file)
{
BDRVQcow2State *s = bs->opaque;
uint64_t host_offset;
@@ -2821,9 +2823,11 @@ qcow2_do_close(BlockDriverState *bs, bool close_data_file)
if (close_data_file && has_data_file(bs)) {
GLOBAL_STATE_CODE();
bdrv_graph_rdunlock_main_loop();
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_unref_child(bs, s->data_file);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
s->data_file = NULL;
bdrv_graph_rdlock_main_loop();
}
@@ -3214,10 +3218,10 @@ qcow2_set_up_encryption(BlockDriverState *bs,
int fmt, ret;
switch (cryptoopts->format) {
- case Q_CRYPTO_BLOCK_FORMAT_LUKS:
+ case QCRYPTO_BLOCK_FORMAT_LUKS:
fmt = QCOW_CRYPT_LUKS;
break;
- case Q_CRYPTO_BLOCK_FORMAT_QCOW:
+ case QCRYPTO_BLOCK_FORMAT_QCOW:
fmt = QCOW_CRYPT_AES;
break;
default:
@@ -5299,17 +5303,17 @@ qcow2_get_specific_info(BlockDriverState *bs, Error **errp)
} else {
/* if this assertion fails, this probably means a new version was
* added without having it covered here */
- assert(false);
+ g_assert_not_reached();
}
if (encrypt_info) {
ImageInfoSpecificQCow2Encryption *qencrypt =
g_new(ImageInfoSpecificQCow2Encryption, 1);
switch (encrypt_info->format) {
- case Q_CRYPTO_BLOCK_FORMAT_QCOW:
+ case QCRYPTO_BLOCK_FORMAT_QCOW:
qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES;
break;
- case Q_CRYPTO_BLOCK_FORMAT_LUKS:
+ case QCRYPTO_BLOCK_FORMAT_LUKS:
qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS;
qencrypt->u.luks = encrypt_info->u.luks;
break;
@@ -5948,7 +5952,7 @@ static int coroutine_fn qcow2_co_amend(BlockDriverState *bs,
return -EOPNOTSUPP;
}
- if (qopts->encrypt->format != Q_CRYPTO_BLOCK_FORMAT_LUKS) {
+ if (qopts->encrypt->format != QCRYPTO_BLOCK_FORMAT_LUKS) {
error_setg(errp,
"Amend can't be used to change the qcow2 encryption format");
return -EOPNOTSUPP;
diff --git a/block/qed.c b/block/qed.c
index fa5bc11..4a36fb3 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -23,8 +23,8 @@
#include "qemu/memalign.h"
#include "trace.h"
#include "qed.h"
-#include "sysemu/block-backend.h"
-#include "qapi/qmp/qdict.h"
+#include "system/block-backend.h"
+#include "qobject/qdict.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qapi-visit-block-core.h"
@@ -353,6 +353,7 @@ static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
qed_cancel_need_check_timer(s);
timer_free(s->need_check_timer);
+ s->need_check_timer = NULL;
}
static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
@@ -832,9 +833,9 @@ fail:
}
static int coroutine_fn GRAPH_RDLOCK
-bdrv_qed_co_block_status(BlockDriverState *bs, bool want_zero, int64_t pos,
- int64_t bytes, int64_t *pnum, int64_t *map,
- BlockDriverState **file)
+bdrv_qed_co_block_status(BlockDriverState *bs, unsigned int mode,
+ int64_t pos, int64_t bytes, int64_t *pnum,
+ int64_t *map, BlockDriverState **file)
{
BDRVQEDState *s = bs->opaque;
size_t len = MIN(bytes, SIZE_MAX);
diff --git a/block/quorum.c b/block/quorum.c
index db8fe89..cc3bc5f 100644
--- a/block/quorum.c
+++ b/block/quorum.c
@@ -23,10 +23,10 @@
#include "block/qdict.h"
#include "qapi/error.h"
#include "qapi/qapi-events-block.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/qmp/qerror.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qlist.h"
+#include "qobject/qstring.h"
#include "crypto/hash.h"
#define HASH_LENGTH 32
@@ -393,7 +393,7 @@ static int quorum_compute_hash(QuorumAIOCB *acb, int i, QuorumVoteValue *hash)
/* XXX - would be nice if we could pass in the Error **
* and propagate that back, but this quorum code is
* restricted to just errno values currently */
- if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALG_SHA256,
+ if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALGO_SHA256,
qiov->iov, qiov->niov,
&data, &len,
NULL) < 0) {
@@ -1037,6 +1037,7 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
close_exit:
/* cleanup on error */
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
for (i = 0; i < s->num_children; i++) {
if (!opened[i]) {
@@ -1045,6 +1046,7 @@ close_exit:
bdrv_unref_child(bs, s->children[i]);
}
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
g_free(s->children);
g_free(opened);
exit:
@@ -1057,11 +1059,13 @@ static void quorum_close(BlockDriverState *bs)
BDRVQuorumState *s = bs->opaque;
int i;
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
for (i = 0; i < s->num_children; i++) {
bdrv_unref_child(bs, s->children[i]);
}
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
g_free(s->children);
}
@@ -1226,7 +1230,7 @@ static void quorum_child_perm(BlockDriverState *bs, BdrvChild *c,
* region contains zeroes, and BDRV_BLOCK_DATA otherwise.
*/
static int coroutine_fn GRAPH_RDLOCK
-quorum_co_block_status(BlockDriverState *bs, bool want_zero,
+quorum_co_block_status(BlockDriverState *bs, unsigned int mode,
int64_t offset, int64_t count,
int64_t *pnum, int64_t *map, BlockDriverState **file)
{
@@ -1238,7 +1242,7 @@ quorum_co_block_status(BlockDriverState *bs, bool want_zero,
for (i = 0; i < s->num_children; i++) {
int64_t bytes;
ret = bdrv_co_common_block_status_above(s->children[i]->bs, NULL, false,
- want_zero, offset, count,
+ mode, offset, count,
&bytes, NULL, NULL, NULL);
if (ret < 0) {
quorum_report_bad(QUORUM_OP_TYPE_READ, offset, count,
@@ -1308,7 +1312,7 @@ static BlockDriver bdrv_quorum = {
static void bdrv_quorum_init(void)
{
- if (!qcrypto_hash_supports(QCRYPTO_HASH_ALG_SHA256)) {
+ if (!qcrypto_hash_supports(QCRYPTO_HASH_ALGO_SHA256)) {
/* SHA256 hash support is required for quorum device */
return;
}
diff --git a/block/raw-format.c b/block/raw-format.c
index ac7e849..df16ac1 100644
--- a/block/raw-format.c
+++ b/block/raw-format.c
@@ -111,7 +111,7 @@ raw_apply_options(BlockDriverState *bs, BDRVRawState *s, uint64_t offset,
if (offset > real_size) {
error_setg(errp, "Offset (%" PRIu64 ") cannot be greater than "
"size of the containing file (%" PRId64 ")",
- s->offset, real_size);
+ offset, real_size);
return -EINVAL;
}
@@ -119,7 +119,7 @@ raw_apply_options(BlockDriverState *bs, BDRVRawState *s, uint64_t offset,
error_setg(errp, "The sum of offset (%" PRIu64 ") and size "
"(%" PRIu64 ") has to be smaller or equal to the "
" actual size of the containing file (%" PRId64 ")",
- s->offset, s->size, real_size);
+ offset, size, real_size);
return -EINVAL;
}
@@ -283,8 +283,8 @@ fail:
}
static int coroutine_fn GRAPH_RDLOCK
-raw_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset,
- int64_t bytes, int64_t *pnum, int64_t *map,
+raw_co_block_status(BlockDriverState *bs, unsigned int mode,
+ int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map,
BlockDriverState **file)
{
BDRVRawState *s = bs->opaque;
diff --git a/block/rbd.c b/block/rbd.c
index 9c0fd0c..951cd63 100644
--- a/block/rbd.c
+++ b/block/rbd.c
@@ -23,11 +23,11 @@
#include "block/qdict.h"
#include "crypto/secret.h"
#include "qemu/cutils.h"
-#include "sysemu/replay.h"
-#include "qapi/qmp/qstring.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/qlist.h"
+#include "system/replay.h"
+#include "qobject/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qjson.h"
+#include "qobject/qlist.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qapi-visit-block-core.h"
@@ -254,7 +254,6 @@ static void qemu_rbd_parse_filename(const char *filename, QDict *options,
done:
g_free(buf);
qobject_unref(keypairs);
- return;
}
static int qemu_rbd_set_auth(rados_t cluster, BlockdevOptionsRbd *opts,
@@ -367,11 +366,11 @@ static int qemu_rbd_convert_luks_create_options(
if (luks_opts->has_cipher_alg) {
switch (luks_opts->cipher_alg) {
- case QCRYPTO_CIPHER_ALG_AES_128: {
+ case QCRYPTO_CIPHER_ALGO_AES_128: {
*alg = RBD_ENCRYPTION_ALGORITHM_AES128;
break;
}
- case QCRYPTO_CIPHER_ALG_AES_256: {
+ case QCRYPTO_CIPHER_ALGO_AES_256: {
*alg = RBD_ENCRYPTION_ALGORITHM_AES256;
break;
}
@@ -1504,9 +1503,9 @@ static int qemu_rbd_diff_iterate_cb(uint64_t offs, size_t len,
}
static int coroutine_fn qemu_rbd_co_block_status(BlockDriverState *bs,
- bool want_zero, int64_t offset,
- int64_t bytes, int64_t *pnum,
- int64_t *map,
+ unsigned int mode,
+ int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map,
BlockDriverState **file)
{
BDRVRBDState *s = bs->opaque;
diff --git a/block/replication.c b/block/replication.c
index 0415a5e..0879718 100644
--- a/block/replication.c
+++ b/block/replication.c
@@ -19,9 +19,9 @@
#include "block/blockjob.h"
#include "block/block_int.h"
#include "block/block_backup.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "block/replication.h"
typedef enum {
@@ -176,7 +176,6 @@ static void replication_child_perm(BlockDriverState *bs, BdrvChild *c,
*nshared = BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE
| BLK_PERM_WRITE_UNCHANGED;
- return;
}
static int64_t coroutine_fn GRAPH_RDLOCK
@@ -541,6 +540,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
return;
}
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_ref(hidden_disk->bs);
@@ -550,6 +550,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
if (local_err) {
error_propagate(errp, local_err);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
return;
}
@@ -560,6 +561,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
if (local_err) {
error_propagate(errp, local_err);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
return;
}
@@ -572,20 +574,23 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
!check_top_bs(top_bs, bs)) {
error_setg(errp, "No top_bs or it is invalid");
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
reopen_backing_file(bs, false, NULL);
return;
}
bdrv_op_block_all(top_bs, s->blocker);
- bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
s->backup_job = backup_job_create(
NULL, s->secondary_disk->bs, s->hidden_disk->bs,
0, MIRROR_SYNC_MODE_NONE, NULL, 0, false, false,
NULL, &perf,
BLOCKDEV_ON_ERROR_REPORT,
- BLOCKDEV_ON_ERROR_REPORT, JOB_INTERNAL,
+ BLOCKDEV_ON_ERROR_REPORT,
+ ON_CBW_ERROR_BREAK_GUEST_WRITE,
+ JOB_INTERNAL,
backup_job_completed, bs, NULL, &local_err);
if (local_err) {
error_propagate(errp, local_err);
@@ -651,12 +656,14 @@ static void replication_done(void *opaque, int ret)
if (ret == 0) {
s->stage = BLOCK_REPLICATION_DONE;
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_unref_child(bs, s->secondary_disk);
s->secondary_disk = NULL;
bdrv_unref_child(bs, s->hidden_disk);
s->hidden_disk = NULL;
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
s->error = 0;
} else {
diff --git a/block/reqlist.c b/block/reqlist.c
index 08cb57c..098e807 100644
--- a/block/reqlist.c
+++ b/block/reqlist.c
@@ -20,8 +20,6 @@
void reqlist_init_req(BlockReqList *reqs, BlockReq *req, int64_t offset,
int64_t bytes)
{
- assert(!reqlist_find_conflict(reqs, offset, bytes));
-
*req = (BlockReq) {
.offset = offset,
.bytes = bytes,
diff --git a/block/snapshot-access.c b/block/snapshot-access.c
index 84d0d13..17ed240 100644
--- a/block/snapshot-access.c
+++ b/block/snapshot-access.c
@@ -22,7 +22,7 @@
#include "qemu/osdep.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/cutils.h"
#include "block/block_int.h"
@@ -41,11 +41,11 @@ snapshot_access_co_preadv_part(BlockDriverState *bs,
static int coroutine_fn GRAPH_RDLOCK
snapshot_access_co_block_status(BlockDriverState *bs,
- bool want_zero, int64_t offset,
+ unsigned int mode, int64_t offset,
int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file)
{
- return bdrv_co_snapshot_block_status(bs->file->bs, want_zero, offset,
+ return bdrv_co_snapshot_block_status(bs->file->bs, mode, offset,
bytes, pnum, map, file);
}
diff --git a/block/snapshot.c b/block/snapshot.c
index 8fd1756..28c9c43 100644
--- a/block/snapshot.c
+++ b/block/snapshot.c
@@ -27,10 +27,10 @@
#include "block/block_int.h"
#include "block/qdict.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "qemu/option.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
QemuOptsList internal_snapshot_opts = {
.name = "snapshot",
@@ -291,11 +291,14 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
}
/* .bdrv_open() will re-attach it */
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_unref_child(bs, fallback);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp);
+ memset(bs->opaque, 0, drv->instance_size);
open_ret = drv->bdrv_open(bs, options, bs->open_flags, &local_err);
qobject_unref(options);
if (open_ret < 0) {
@@ -326,7 +329,7 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
/**
* Delete an internal snapshot by @snapshot_id and @name.
- * @bs: block device used in the operation
+ * @bs: block device used in the operation, must be drained
* @snapshot_id: unique snapshot ID, or NULL
* @name: snapshot name, or NULL
* @errp: location to store error
@@ -357,6 +360,8 @@ int bdrv_snapshot_delete(BlockDriverState *bs,
GLOBAL_STATE_CODE();
+ assert(bs->quiesce_counter > 0);
+
if (!drv) {
error_setg(errp, "Device '%s' has no medium",
bdrv_get_device_name(bs));
@@ -367,9 +372,6 @@ int bdrv_snapshot_delete(BlockDriverState *bs,
return -EINVAL;
}
- /* drain all pending i/o before deleting snapshot */
- bdrv_drained_begin(bs);
-
if (drv->bdrv_snapshot_delete) {
ret = drv->bdrv_snapshot_delete(bs, snapshot_id, name, errp);
} else if (fallback_bs) {
@@ -381,7 +383,6 @@ int bdrv_snapshot_delete(BlockDriverState *bs,
ret = -ENOTSUP;
}
- bdrv_drained_end(bs);
return ret;
}
@@ -570,19 +571,22 @@ int bdrv_all_delete_snapshot(const char *name,
ERRP_GUARD();
g_autoptr(GList) bdrvs = NULL;
GList *iterbdrvs;
+ int ret = 0;
GLOBAL_STATE_CODE();
- GRAPH_RDLOCK_GUARD_MAINLOOP();
- if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
- return -1;
+ bdrv_drain_all_begin();
+ bdrv_graph_rdlock_main_loop();
+
+ ret = bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp);
+ if (ret < 0) {
+ goto out;
}
iterbdrvs = bdrvs;
while (iterbdrvs) {
BlockDriverState *bs = iterbdrvs->data;
QEMUSnapshotInfo sn1, *snapshot = &sn1;
- int ret = 0;
if ((devices || bdrv_all_snapshots_includes_bs(bs)) &&
bdrv_snapshot_find(bs, snapshot, name) >= 0)
@@ -593,13 +597,16 @@ int bdrv_all_delete_snapshot(const char *name,
if (ret < 0) {
error_prepend(errp, "Could not delete snapshot '%s' on '%s': ",
name, bdrv_get_device_or_node_name(bs));
- return -1;
+ goto out;
}
iterbdrvs = iterbdrvs->next;
}
- return 0;
+out:
+ bdrv_graph_rdunlock_main_loop();
+ bdrv_drain_all_end();
+ return ret;
}
diff --git a/block/ssh.c b/block/ssh.c
index 27d582e..70fe7cf 100644
--- a/block/ssh.c
+++ b/block/ssh.c
@@ -39,8 +39,8 @@
#include "qemu/sockets.h"
#include "qapi/qapi-visit-sockets.h"
#include "qapi/qapi-visit-block-core.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qobject-output-visitor.h"
#include "trace.h"
@@ -364,7 +364,7 @@ static unsigned hex2decimal(char ch)
return 10 + (ch - 'A');
}
- return -1;
+ return UINT_MAX;
}
/* Compare the binary fingerprint (hash of host key) with the
@@ -376,13 +376,15 @@ static int compare_fingerprint(const unsigned char *fingerprint, size_t len,
unsigned c;
while (len > 0) {
+ unsigned c0, c1;
while (*host_key_check == ':')
host_key_check++;
- if (!qemu_isxdigit(host_key_check[0]) ||
- !qemu_isxdigit(host_key_check[1]))
+ c0 = hex2decimal(host_key_check[0]);
+ c1 = hex2decimal(host_key_check[1]);
+ if (c0 > 0xf || c1 > 0xf) {
return 1;
- c = hex2decimal(host_key_check[0]) * 16 +
- hex2decimal(host_key_check[1]);
+ }
+ c = c0 * 16 + c1;
if (c - *fingerprint != 0)
return c - *fingerprint;
fingerprint++;
@@ -474,7 +476,6 @@ static int check_host_key(BDRVSSHState *s, SshHostKeyCheck *hkc, Error **errp)
errp);
}
g_assert_not_reached();
- break;
case SSH_HOST_KEY_CHECK_MODE_KNOWN_HOSTS:
return check_host_key_knownhosts(s, errp);
default:
@@ -865,9 +866,6 @@ static int ssh_open(BlockDriverState *bs, QDict *options, int bdrv_flags,
goto err;
}
- /* Go non-blocking. */
- ssh_set_blocking(s->session, 0);
-
if (s->attrs->type == SSH_FILEXFER_TYPE_REGULAR) {
bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE;
}
diff --git a/block/stream.c b/block/stream.c
index 7031eef..f5441f2 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -16,9 +16,9 @@
#include "block/block_int.h"
#include "block/blockjob_int.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/ratelimit.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "block/copy-on-read.h"
enum {
@@ -80,11 +80,10 @@ static int stream_prepare(Job *job)
* may end up working with the wrong base node (or it might even have gone
* away by the time we want to use it).
*/
- bdrv_drained_begin(unfiltered_bs);
if (unfiltered_bs_cow) {
bdrv_ref(unfiltered_bs_cow);
- bdrv_drained_begin(unfiltered_bs_cow);
}
+ bdrv_drain_all_begin();
bdrv_graph_rdlock_main_loop();
base = bdrv_filter_or_cow_bs(s->above_base);
@@ -123,11 +122,10 @@ static int stream_prepare(Job *job)
}
out:
+ bdrv_drain_all_end();
if (unfiltered_bs_cow) {
- bdrv_drained_end(unfiltered_bs_cow);
bdrv_unref(unfiltered_bs_cow);
}
- bdrv_drained_end(unfiltered_bs);
return ret;
}
@@ -155,8 +153,8 @@ static void stream_clean(Job *job)
static int coroutine_fn stream_run(Job *job, Error **errp)
{
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
- BlockDriverState *unfiltered_bs;
- int64_t len;
+ BlockDriverState *unfiltered_bs = NULL;
+ int64_t len = -1;
int64_t offset = 0;
int error = 0;
int64_t n = 0; /* bytes */
@@ -177,7 +175,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
for ( ; offset < len; offset += n) {
bool copy;
- int ret;
+ int ret = -1;
/* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns.
@@ -373,10 +371,12 @@ void stream_start(const char *job_id, BlockDriverState *bs,
* already have our own plans. Also don't allow resize as the image size is
* queried only at the job start and then cached.
*/
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
if (block_job_add_bdrv(&s->common, "active node", bs, 0,
basic_flags | BLK_PERM_WRITE, errp)) {
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
goto fail;
}
@@ -397,10 +397,12 @@ void stream_start(const char *job_id, BlockDriverState *bs,
basic_flags, errp);
if (ret < 0) {
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
goto fail;
}
}
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
s->base_overlay = base_overlay;
s->above_base = above_base;
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index f5c0fac..66fdce9 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -23,13 +23,13 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "block/throttle-groups.h"
#include "qemu/throttle-options.h"
#include "qemu/main-loop.h"
#include "qemu/queue.h"
#include "qemu/thread.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#include "qapi/error.h"
#include "qapi/qapi-visit-block-core.h"
#include "qom/object.h"
@@ -908,7 +908,6 @@ unlock:
qemu_mutex_unlock(&tg->lock);
qapi_free_ThrottleLimits(argp);
error_propagate(errp, local_err);
- return;
}
static void throttle_group_get_limits(Object *obj, Visitor *v,
@@ -934,7 +933,8 @@ static bool throttle_group_can_be_deleted(UserCreatable *uc)
return OBJECT(uc)->ref == 1;
}
-static void throttle_group_obj_class_init(ObjectClass *klass, void *class_data)
+static void throttle_group_obj_class_init(ObjectClass *klass,
+ const void *class_data)
{
size_t i = 0;
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
@@ -967,7 +967,7 @@ static const TypeInfo throttle_group_info = {
.instance_size = sizeof(ThrottleGroup),
.instance_init = throttle_group_obj_init,
.instance_finalize = throttle_group_obj_finalize,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
},
diff --git a/block/vdi.c b/block/vdi.c
index 6363da0..3ddc62a 100644
--- a/block/vdi.c
+++ b/block/vdi.c
@@ -3,10 +3,12 @@
*
* Copyright (c) 2009, 2012 Stefan Weil
*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
- * (at your option) version 3 or any later version.
+ * (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -56,7 +58,7 @@
#include "qapi/qapi-visit-block-core.h"
#include "block/block_int.h"
#include "block/qdict.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include "qemu/bswap.h"
@@ -85,7 +87,7 @@
/* Command line option for static images. */
#define BLOCK_OPT_STATIC "static"
-#define SECTOR_SIZE 512
+#define SECTOR_SIZE 512ULL
#define DEFAULT_CLUSTER_SIZE 1048576
/* Note: can't use 1 * MiB, because it's passed to stringify() */
@@ -440,7 +442,7 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
} else if (header.sector_size != SECTOR_SIZE) {
error_setg(errp, "unsupported VDI image (sector size %" PRIu32
- " is not %u)", header.sector_size, SECTOR_SIZE);
+ " is not %llu)", header.sector_size, SECTOR_SIZE);
ret = -ENOTSUP;
goto fail;
} else if (header.block_size != DEFAULT_CLUSTER_SIZE) {
@@ -521,8 +523,8 @@ static int vdi_reopen_prepare(BDRVReopenState *state,
}
static int coroutine_fn GRAPH_RDLOCK
-vdi_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset,
- int64_t bytes, int64_t *pnum, int64_t *map,
+vdi_co_block_status(BlockDriverState *bs, unsigned int mode,
+ int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map,
BlockDriverState **file)
{
BDRVVdiState *s = (BDRVVdiState *)bs->opaque;
diff --git a/block/vhdx.c b/block/vhdx.c
index 5aa1a13..b2a4b81 100644
--- a/block/vhdx.c
+++ b/block/vhdx.c
@@ -19,7 +19,7 @@
#include "qapi/error.h"
#include "block/block_int.h"
#include "block/qdict.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include "qemu/crc32c.h"
@@ -29,7 +29,7 @@
#include "vhdx.h"
#include "migration/blocker.h"
#include "qemu/uuid.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qapi-visit-block-core.h"
diff --git a/block/vmdk.c b/block/vmdk.c
index 78f6433..89a7250 100644
--- a/block/vmdk.c
+++ b/block/vmdk.c
@@ -26,8 +26,8 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "block/block_int.h"
-#include "sysemu/block-backend.h"
-#include "qapi/qmp/qdict.h"
+#include "system/block-backend.h"
+#include "qobject/qdict.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qemu/option.h"
@@ -271,6 +271,7 @@ static void vmdk_free_extents(BlockDriverState *bs)
BDRVVmdkState *s = bs->opaque;
VmdkExtent *e;
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
for (i = 0; i < s->num_extents; i++) {
e = &s->extents[i];
@@ -283,6 +284,7 @@ static void vmdk_free_extents(BlockDriverState *bs)
}
}
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
g_free(s->extents);
}
@@ -1247,9 +1249,11 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
0, 0, 0, 0, 0, &extent, errp);
if (ret < 0) {
bdrv_graph_rdunlock_main_loop();
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_unref_child(bs, extent_file);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
bdrv_graph_rdlock_main_loop();
goto out;
}
@@ -1266,9 +1270,11 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
g_free(buf);
if (ret) {
bdrv_graph_rdunlock_main_loop();
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_unref_child(bs, extent_file);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
bdrv_graph_rdlock_main_loop();
goto out;
}
@@ -1277,9 +1283,11 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
ret = vmdk_open_se_sparse(bs, extent_file, bs->open_flags, errp);
if (ret) {
bdrv_graph_rdunlock_main_loop();
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_unref_child(bs, extent_file);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
bdrv_graph_rdlock_main_loop();
goto out;
}
@@ -1287,9 +1295,11 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
} else {
error_setg(errp, "Unsupported extent type '%s'", type);
bdrv_graph_rdunlock_main_loop();
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_unref_child(bs, extent_file);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
bdrv_graph_rdlock_main_loop();
ret = -ENOTSUP;
goto out;
@@ -1777,7 +1787,7 @@ static inline uint64_t vmdk_find_offset_in_cluster(VmdkExtent *extent,
}
static int coroutine_fn GRAPH_RDLOCK
-vmdk_co_block_status(BlockDriverState *bs, bool want_zero,
+vmdk_co_block_status(BlockDriverState *bs, unsigned int mode,
int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file)
{
diff --git a/block/vpc.c b/block/vpc.c
index d95a204..801ff57 100644
--- a/block/vpc.c
+++ b/block/vpc.c
@@ -27,14 +27,14 @@
#include "qapi/error.h"
#include "block/block_int.h"
#include "block/qdict.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include "migration/blocker.h"
#include "qemu/bswap.h"
#include "qemu/uuid.h"
#include "qemu/memalign.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qapi-visit-block-core.h"
@@ -216,6 +216,39 @@ static void vpc_parse_options(BlockDriverState *bs, QemuOpts *opts,
}
}
+/*
+ * Microsoft Virtual PC and Microsoft Hyper-V produce and read
+ * VHD image sizes differently. VPC will rely on CHS geometry,
+ * while Hyper-V and disk2vhd use the size specified in the footer.
+ *
+ * We use a couple of approaches to try and determine the correct method:
+ * look at the Creator App field, and look for images that have CHS
+ * geometry that is the maximum value.
+ *
+ * If the CHS geometry is the maximum CHS geometry, then we assume that
+ * the size is the footer->current_size to avoid truncation. Otherwise,
+ * we follow the table based on footer->creator_app:
+ *
+ * Known creator apps:
+ * 'vpc ' : CHS Virtual PC (uses disk geometry)
+ * 'qemu' : CHS QEMU (uses disk geometry)
+ * 'qem2' : current_size QEMU (uses current_size)
+ * 'win ' : current_size Hyper-V
+ * 'd2v ' : current_size Disk2vhd
+ * 'tap\0' : current_size XenServer
+ * 'CTXS' : current_size XenConverter
+ * 'wa\0\0': current_size Azure
+ *
+ * The user can override the table values via drive options, however
+ * even with an override we will still use current_size for images
+ * that have CHS geometry of the maximum size.
+ */
+static bool vpc_ignore_current_size(VHDFooter *footer)
+{
+ return !strncmp(footer->creator_app, "vpc ", 4) ||
+ !strncmp(footer->creator_app, "qemu", 4);
+}
+
static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
@@ -304,36 +337,8 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
bs->total_sectors = (int64_t)
be16_to_cpu(footer->cyls) * footer->heads * footer->secs_per_cyl;
- /* Microsoft Virtual PC and Microsoft Hyper-V produce and read
- * VHD image sizes differently. VPC will rely on CHS geometry,
- * while Hyper-V and disk2vhd use the size specified in the footer.
- *
- * We use a couple of approaches to try and determine the correct method:
- * look at the Creator App field, and look for images that have CHS
- * geometry that is the maximum value.
- *
- * If the CHS geometry is the maximum CHS geometry, then we assume that
- * the size is the footer->current_size to avoid truncation. Otherwise,
- * we follow the table based on footer->creator_app:
- *
- * Known creator apps:
- * 'vpc ' : CHS Virtual PC (uses disk geometry)
- * 'qemu' : CHS QEMU (uses disk geometry)
- * 'qem2' : current_size QEMU (uses current_size)
- * 'win ' : current_size Hyper-V
- * 'd2v ' : current_size Disk2vhd
- * 'tap\0' : current_size XenServer
- * 'CTXS' : current_size XenConverter
- *
- * The user can override the table values via drive options, however
- * even with an override we will still use current_size for images
- * that have CHS geometry of the maximum size.
- */
- use_chs = (!!strncmp(footer->creator_app, "win ", 4) &&
- !!strncmp(footer->creator_app, "qem2", 4) &&
- !!strncmp(footer->creator_app, "d2v ", 4) &&
- !!strncmp(footer->creator_app, "CTXS", 4) &&
- !!memcmp(footer->creator_app, "tap", 4)) || s->force_use_chs;
+ /* Use CHS or current_size to determine the image size. */
+ use_chs = vpc_ignore_current_size(footer) || s->force_use_chs;
if (!use_chs || bs->total_sectors == VHD_MAX_GEOMETRY || s->force_use_sz) {
bs->total_sectors = be64_to_cpu(footer->current_size) /
@@ -721,7 +726,7 @@ fail:
}
static int coroutine_fn GRAPH_RDLOCK
-vpc_co_block_status(BlockDriverState *bs, bool want_zero,
+vpc_co_block_status(BlockDriverState *bs, unsigned int mode,
int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map,
BlockDriverState **file)
diff --git a/block/vvfat.c b/block/vvfat.c
index 086fedf..814796d 100644
--- a/block/vvfat.c
+++ b/block/vvfat.c
@@ -34,8 +34,8 @@
#include "qemu/option.h"
#include "qemu/bswap.h"
#include "migration/blocker.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "qemu/ctype.h"
#include "qemu/cutils.h"
#include "qemu/error-report.h"
@@ -403,7 +403,6 @@ static direntry_t *create_long_filename(BDRVVVFATState *s, const char *filename)
{
int number_of_entries, i;
glong length;
- direntry_t *entry;
gunichar2 *longname = g_utf8_to_utf16(filename, -1, NULL, &length, NULL);
if (!longname) {
@@ -414,24 +413,24 @@ static direntry_t *create_long_filename(BDRVVVFATState *s, const char *filename)
number_of_entries = DIV_ROUND_UP(length * 2, 26);
for(i=0;i<number_of_entries;i++) {
- entry=array_get_next(&(s->directory));
+ direntry_t *entry=array_get_next(&(s->directory));
entry->attributes=0xf;
entry->reserved[0]=0;
entry->begin=0;
entry->name[0]=(number_of_entries-i)|(i==0?0x40:0);
}
for(i=0;i<26*number_of_entries;i++) {
+ unsigned char *entry=array_get(&(s->directory),s->directory.next-1-(i/26));
int offset=(i%26);
if(offset<10) offset=1+offset;
else if(offset<22) offset=14+offset-10;
else offset=28+offset-22;
- entry=array_get(&(s->directory),s->directory.next-1-(i/26));
if (i >= 2 * length + 2) {
- entry->name[offset] = 0xff;
+ entry[offset] = 0xff;
} else if (i % 2 == 0) {
- entry->name[offset] = longname[i / 2] & 0xff;
+ entry[offset] = longname[i / 2] & 0xff;
} else {
- entry->name[offset] = longname[i / 2] >> 8;
+ entry[offset] = longname[i / 2] >> 8;
}
}
g_free(longname);
@@ -1369,8 +1368,9 @@ static int open_file(BDRVVVFATState* s,mapping_t* mapping)
return -1;
vvfat_close_current_file(s);
s->current_fd = fd;
- s->current_mapping = mapping;
}
+
+ s->current_mapping = mapping;
return 0;
}
@@ -1408,7 +1408,9 @@ read_cluster_directory:
assert(s->current_fd);
- offset=s->cluster_size*(cluster_num-s->current_mapping->begin)+s->current_mapping->info.file.offset;
+ offset = s->cluster_size *
+ ((cluster_num - s->current_mapping->begin)
+ + s->current_mapping->info.file.offset);
if(lseek(s->current_fd, offset, SEEK_SET)!=offset)
return -3;
s->cluster=s->cluster_buffer;
@@ -1878,7 +1880,6 @@ get_cluster_count_for_direntry(BDRVVVFATState* s, direntry_t* direntry, const ch
uint32_t cluster_num = begin_of_direntry(direntry);
uint32_t offset = 0;
- int first_mapping_index = -1;
mapping_t* mapping = NULL;
const char* basename2 = NULL;
@@ -1929,8 +1930,9 @@ get_cluster_count_for_direntry(BDRVVVFATState* s, direntry_t* direntry, const ch
(mapping->mode & MODE_DIRECTORY) == 0) {
/* was modified in qcow */
- if (offset != mapping->info.file.offset + s->cluster_size
- * (cluster_num - mapping->begin)) {
+ if (offset != s->cluster_size
+ * ((cluster_num - mapping->begin)
+ + mapping->info.file.offset)) {
/* offset of this cluster in file chain has changed */
abort();
copy_it = 1;
@@ -1939,14 +1941,9 @@ get_cluster_count_for_direntry(BDRVVVFATState* s, direntry_t* direntry, const ch
if (strcmp(basename, basename2))
copy_it = 1;
- first_mapping_index = array_index(&(s->mapping), mapping);
- }
-
- if (mapping->first_mapping_index != first_mapping_index
- && mapping->info.file.offset > 0) {
- abort();
- copy_it = 1;
}
+ assert(mapping->first_mapping_index == -1
+ || mapping->info.file.offset > 0);
/* need to write out? */
if (!was_modified && is_file(direntry)) {
@@ -2404,7 +2401,7 @@ static int commit_mappings(BDRVVVFATState* s,
(mapping->end - mapping->begin);
} else
next_mapping->info.file.offset = mapping->info.file.offset +
- mapping->end - mapping->begin;
+ (mapping->end - mapping->begin);
mapping = next_mapping;
}
@@ -2525,8 +2522,9 @@ commit_one_file(BDRVVVFATState* s, int dir_index, uint32_t offset)
return -1;
}
- for (i = s->cluster_size; i < offset; i += s->cluster_size)
+ for (i = 0; i < offset; i += s->cluster_size) {
c = modified_fat_get(s, c);
+ }
fd = qemu_open_old(mapping->path, O_RDWR | O_CREAT | O_BINARY, 0666);
if (fd < 0) {
@@ -3136,9 +3134,9 @@ vvfat_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
}
static int coroutine_fn vvfat_co_block_status(BlockDriverState *bs,
- bool want_zero, int64_t offset,
- int64_t bytes, int64_t *n,
- int64_t *map,
+ unsigned int mode,
+ int64_t offset, int64_t bytes,
+ int64_t *n, int64_t *map,
BlockDriverState **file)
{
*n = bytes;
diff --git a/blockdev-nbd.c b/blockdev-nbd.c
index 2130124..1e3e634 100644
--- a/blockdev-nbd.c
+++ b/blockdev-nbd.c
@@ -10,8 +10,8 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/block-backend.h"
+#include "system/blockdev.h"
+#include "system/block-backend.h"
#include "hw/block/block.h"
#include "qapi/error.h"
#include "qapi/clone-visitor.h"
@@ -21,12 +21,19 @@
#include "io/channel-socket.h"
#include "io/net-listener.h"
+typedef struct NBDConn {
+ QIOChannelSocket *cioc;
+ QLIST_ENTRY(NBDConn) next;
+} NBDConn;
+
typedef struct NBDServerData {
QIONetListener *listener;
+ uint32_t handshake_max_secs;
QCryptoTLSCreds *tlscreds;
char *tlsauthz;
uint32_t max_connections;
uint32_t connections;
+ QLIST_HEAD(, NBDConn) conns;
} NBDServerData;
static NBDServerData *nbd_server;
@@ -51,6 +58,14 @@ int nbd_server_max_connections(void)
static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
{
+ NBDConn *conn = nbd_client_owner(client);
+
+ assert(qemu_in_main_thread() && nbd_server);
+
+ object_unref(OBJECT(conn->cioc));
+ QLIST_REMOVE(conn, next);
+ g_free(conn);
+
nbd_client_put(client);
assert(nbd_server->connections > 0);
nbd_server->connections--;
@@ -60,31 +75,55 @@ static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
gpointer opaque)
{
+ NBDConn *conn = g_new0(NBDConn, 1);
+
+ assert(qemu_in_main_thread() && nbd_server);
nbd_server->connections++;
+ object_ref(OBJECT(cioc));
+ conn->cioc = cioc;
+ QLIST_INSERT_HEAD(&nbd_server->conns, conn, next);
nbd_update_server_watch(nbd_server);
qio_channel_set_name(QIO_CHANNEL(cioc), "nbd-server");
- nbd_client_new(cioc, nbd_server->tlscreds, nbd_server->tlsauthz,
- nbd_blockdev_client_closed);
+ nbd_client_new(cioc, nbd_server->handshake_max_secs,
+ nbd_server->tlscreds, nbd_server->tlsauthz,
+ nbd_blockdev_client_closed, conn);
}
static void nbd_update_server_watch(NBDServerData *s)
{
- if (!s->max_connections || s->connections < s->max_connections) {
- qio_net_listener_set_client_func(s->listener, nbd_accept, NULL, NULL);
- } else {
- qio_net_listener_set_client_func(s->listener, NULL, NULL, NULL);
+ if (s->listener) {
+ if (!s->max_connections || s->connections < s->max_connections) {
+ qio_net_listener_set_client_func(s->listener, nbd_accept, NULL,
+ NULL);
+ } else {
+ qio_net_listener_set_client_func(s->listener, NULL, NULL, NULL);
+ }
}
}
static void nbd_server_free(NBDServerData *server)
{
+ NBDConn *conn, *tmp;
+
if (!server) {
return;
}
+ /*
+ * Forcefully close the listener socket, and any clients that have
+ * not yet disconnected on their own.
+ */
qio_net_listener_disconnect(server->listener);
object_unref(OBJECT(server->listener));
+ server->listener = NULL;
+ QLIST_FOREACH_SAFE(conn, &server->conns, next, tmp) {
+ qio_channel_shutdown(QIO_CHANNEL(conn->cioc), QIO_CHANNEL_SHUTDOWN_BOTH,
+ NULL);
+ }
+
+ AIO_WAIT_WHILE_UNLOCKED(NULL, server->connections > 0);
+
if (server->tlscreds) {
object_unref(OBJECT(server->tlscreds));
}
@@ -123,9 +162,9 @@ static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp)
}
-void nbd_server_start(SocketAddress *addr, const char *tls_creds,
- const char *tls_authz, uint32_t max_connections,
- Error **errp)
+void nbd_server_start(SocketAddress *addr, uint32_t handshake_max_secs,
+ const char *tls_creds, const char *tls_authz,
+ uint32_t max_connections, Error **errp)
{
if (nbd_server) {
error_setg(errp, "NBD server already running");
@@ -134,6 +173,7 @@ void nbd_server_start(SocketAddress *addr, const char *tls_creds,
nbd_server = g_new0(NBDServerData, 1);
nbd_server->max_connections = max_connections;
+ nbd_server->handshake_max_secs = handshake_max_secs;
nbd_server->listener = qio_net_listener_new();
qio_net_listener_set_name(nbd_server->listener,
@@ -168,19 +208,36 @@ void nbd_server_start(SocketAddress *addr, const char *tls_creds,
void nbd_server_start_options(NbdServerOptions *arg, Error **errp)
{
- nbd_server_start(arg->addr, arg->tls_creds, arg->tls_authz,
- arg->max_connections, errp);
+ if (!arg->has_max_connections) {
+ arg->max_connections = NBD_DEFAULT_MAX_CONNECTIONS;
+ }
+ if (!arg->has_handshake_max_seconds) {
+ arg->handshake_max_seconds = NBD_DEFAULT_HANDSHAKE_MAX_SECS;
+ }
+
+ nbd_server_start(arg->addr, arg->handshake_max_seconds, arg->tls_creds,
+ arg->tls_authz, arg->max_connections, errp);
}
-void qmp_nbd_server_start(SocketAddressLegacy *addr,
+void qmp_nbd_server_start(bool has_handshake_max_secs,
+ uint32_t handshake_max_secs,
const char *tls_creds,
const char *tls_authz,
bool has_max_connections, uint32_t max_connections,
+ SocketAddressLegacy *addr,
Error **errp)
{
SocketAddress *addr_flat = socket_address_flatten(addr);
- nbd_server_start(addr_flat, tls_creds, tls_authz, max_connections, errp);
+ if (!has_max_connections) {
+ max_connections = NBD_DEFAULT_MAX_CONNECTIONS;
+ }
+ if (!has_handshake_max_secs) {
+ handshake_max_secs = NBD_DEFAULT_HANDSHAKE_MAX_SECS;
+ }
+
+ nbd_server_start(addr_flat, handshake_max_secs, tls_creds, tls_authz,
+ max_connections, errp);
qapi_free_SocketAddress(addr_flat);
}
diff --git a/blockdev.c b/blockdev.c
index 835064e..2e7fda6 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -31,8 +31,8 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
+#include "system/block-backend.h"
+#include "system/blockdev.h"
#include "hw/block/block.h"
#include "block/blockjob.h"
#include "block/dirty-bitmap.h"
@@ -46,19 +46,19 @@
#include "qapi/qapi-commands-block.h"
#include "qapi/qapi-commands-transaction.h"
#include "qapi/qapi-visit-block-core.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qlist.h"
#include "qapi/qobject-output-visitor.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/iothread.h"
+#include "system/system.h"
+#include "system/iothread.h"
#include "block/block_int.h"
#include "block/trace.h"
-#include "sysemu/runstate.h"
-#include "sysemu/replay.h"
+#include "system/runstate.h"
+#include "system/replay.h"
#include "qemu/cutils.h"
#include "qemu/help_option.h"
#include "qemu/main-loop.h"
@@ -1132,39 +1132,41 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device,
int ret;
GLOBAL_STATE_CODE();
- GRAPH_RDLOCK_GUARD_MAINLOOP();
+
+ bdrv_drain_all_begin();
+ bdrv_graph_rdlock_main_loop();
bs = qmp_get_root_bs(device, errp);
if (!bs) {
- return NULL;
+ goto error;
}
if (!id && !name) {
error_setg(errp, "Name or id must be provided");
- return NULL;
+ goto error;
}
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, errp)) {
- return NULL;
+ goto error;
}
ret = bdrv_snapshot_find_by_id_and_name(bs, id, name, &sn, &local_err);
if (local_err) {
error_propagate(errp, local_err);
- return NULL;
+ goto error;
}
if (!ret) {
error_setg(errp,
"Snapshot with id '%s' and name '%s' does not exist on "
"device '%s'",
STR_OR_NULL(id), STR_OR_NULL(name), device);
- return NULL;
+ goto error;
}
bdrv_snapshot_delete(bs, id, name, &local_err);
if (local_err) {
error_propagate(errp, local_err);
- return NULL;
+ goto error;
}
info = g_new0(SnapshotInfo, 1);
@@ -1180,6 +1182,9 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device,
info->has_icount = true;
}
+error:
+ bdrv_graph_rdunlock_main_loop();
+ bdrv_drain_all_end();
return info;
}
@@ -1203,7 +1208,7 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal,
Error *local_err = NULL;
const char *device;
const char *name;
- BlockDriverState *bs;
+ BlockDriverState *bs, *check_bs;
QEMUSnapshotInfo old_sn, *sn;
bool ret;
int64_t rt;
@@ -1211,7 +1216,7 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal,
int ret1;
GLOBAL_STATE_CODE();
- GRAPH_RDLOCK_GUARD_MAINLOOP();
+ bdrv_graph_rdlock_main_loop();
tran_add(tran, &internal_snapshot_drv, state);
@@ -1220,14 +1225,29 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal,
bs = qmp_get_root_bs(device, errp);
if (!bs) {
+ bdrv_graph_rdunlock_main_loop();
return;
}
state->bs = bs;
+ /* Need to drain while unlocked. */
+ bdrv_graph_rdunlock_main_loop();
/* Paired with .clean() */
bdrv_drained_begin(bs);
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
+ /* Make sure the root bs did not change with the drain. */
+ check_bs = qmp_get_root_bs(device, errp);
+ if (bs != check_bs) {
+ if (check_bs) {
+ error_setg(errp, "Block node of device '%s' unexpectedly changed",
+ device);
+ } /* else errp is already set */
+ return;
+ }
+
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) {
return;
}
@@ -1295,12 +1315,14 @@ static void internal_snapshot_abort(void *opaque)
Error *local_error = NULL;
GLOBAL_STATE_CODE();
- GRAPH_RDLOCK_GUARD_MAINLOOP();
if (!state->created) {
return;
}
+ bdrv_drain_all_begin();
+ bdrv_graph_rdlock_main_loop();
+
if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) {
error_reportf_err(local_error,
"Failed to delete snapshot with id '%s' and "
@@ -1308,6 +1330,8 @@ static void internal_snapshot_abort(void *opaque)
sn->id_str, sn->name,
bdrv_get_device_name(bs));
}
+ bdrv_graph_rdunlock_main_loop();
+ bdrv_drain_all_end();
}
static void internal_snapshot_clean(void *opaque)
@@ -1353,9 +1377,10 @@ static void external_snapshot_action(TransactionAction *action,
const char *new_image_file;
ExternalSnapshotState *state = g_new0(ExternalSnapshotState, 1);
uint64_t perm, shared;
+ BlockDriverState *check_bs;
/* TODO We'll eventually have to take a writer lock in this function */
- GRAPH_RDLOCK_GUARD_MAINLOOP();
+ bdrv_graph_rdlock_main_loop();
tran_add(tran, &external_snapshot_drv, state);
@@ -1388,11 +1413,25 @@ static void external_snapshot_action(TransactionAction *action,
state->old_bs = bdrv_lookup_bs(device, node_name, errp);
if (!state->old_bs) {
+ bdrv_graph_rdunlock_main_loop();
return;
}
+ /* Need to drain while unlocked. */
+ bdrv_graph_rdunlock_main_loop();
/* Paired with .clean() */
bdrv_drained_begin(state->old_bs);
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
+ /* Make sure the associated bs did not change with the drain. */
+ check_bs = bdrv_lookup_bs(device, node_name, errp);
+ if (state->old_bs != check_bs) {
+ if (check_bs) {
+ error_setg(errp, "Block node of device '%s' unexpectedly changed",
+ device);
+ } /* else errp is already set */
+ return;
+ }
if (!bdrv_is_inserted(state->old_bs)) {
error_setg(errp, "Device '%s' has no medium",
@@ -1497,6 +1536,22 @@ static void external_snapshot_action(TransactionAction *action,
return;
}
+ /*
+ * Older QEMU versions have allowed adding an active parent node to an
+ * inactive child node. This is unsafe in the general case, but there is an
+ * important use case, which is taking a VM snapshot with migration to file
+ * and then adding an external snapshot while the VM is still stopped and
+ * images are inactive. Requiring the user to explicitly create the overlay
+ * as inactive would break compatibility, so just do it automatically here
+ * to keep this working.
+ */
+ if (bdrv_is_inactive(state->old_bs) && !bdrv_is_inactive(state->new_bs)) {
+ ret = bdrv_inactivate(state->new_bs, errp);
+ if (ret < 0) {
+ return;
+ }
+ }
+
ret = bdrv_append(state->new_bs, state->old_bs, errp);
if (ret < 0) {
return;
@@ -2625,6 +2680,7 @@ static BlockJob *do_backup_common(BackupCommon *backup,
BdrvDirtyBitmap *bmap = NULL;
BackupPerf perf = { .max_workers = 64 };
int job_flags = JOB_DEFAULT;
+ OnCbwError on_cbw_error = ON_CBW_ERROR_BREAK_GUEST_WRITE;
if (!backup->has_speed) {
backup->speed = 0;
@@ -2655,6 +2711,9 @@ static BlockJob *do_backup_common(BackupCommon *backup,
if (backup->x_perf->has_max_chunk) {
perf.max_chunk = backup->x_perf->max_chunk;
}
+ if (backup->x_perf->has_min_cluster_size) {
+ perf.min_cluster_size = backup->x_perf->min_cluster_size;
+ }
}
if ((backup->sync == MIRROR_SYNC_MODE_BITMAP) ||
@@ -2726,6 +2785,10 @@ static BlockJob *do_backup_common(BackupCommon *backup,
job_flags |= JOB_MANUAL_DISMISS;
}
+ if (backup->has_on_cbw_error) {
+ on_cbw_error = backup->on_cbw_error;
+ }
+
job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
backup->sync, bmap, backup->bitmap_mode,
backup->compress, backup->discard_source,
@@ -2733,6 +2796,7 @@ static BlockJob *do_backup_common(BackupCommon *backup,
&perf,
backup->on_source_error,
backup->on_target_error,
+ on_cbw_error,
job_flags, NULL, NULL, txn, errp);
return job;
}
@@ -2779,7 +2843,7 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
const char *replaces,
enum MirrorSyncMode sync,
BlockMirrorBackingMode backing_mode,
- bool zero_target,
+ bool target_is_zero,
bool has_speed, int64_t speed,
bool has_granularity, uint32_t granularity,
bool has_buf_size, int64_t buf_size,
@@ -2846,10 +2910,6 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
return;
}
- if (!bdrv_backing_chain_next(bs) && sync == MIRROR_SYNC_MODE_TOP) {
- sync = MIRROR_SYNC_MODE_FULL;
- }
-
if (!replaces) {
/* We want to mirror from @bs, but keep implicit filters on top */
unfiltered_bs = bdrv_skip_implicit_filters(bs);
@@ -2890,11 +2950,10 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
/* pass the node name to replace to mirror start since it's loose coupling
* and will allow to check whether the node still exist at mirror completion
*/
- mirror_start(job_id, bs, target,
- replaces, job_flags,
- speed, granularity, buf_size, sync, backing_mode, zero_target,
- on_source_error, on_target_error, unmap, filter_node_name,
- copy_mode, errp);
+ mirror_start(job_id, bs, target, replaces, job_flags,
+ speed, granularity, buf_size, sync, backing_mode,
+ target_is_zero, on_source_error, on_target_error, unmap,
+ filter_node_name, copy_mode, errp);
}
void qmp_drive_mirror(DriveMirror *arg, Error **errp)
@@ -2908,7 +2967,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
int flags;
int64_t size;
const char *format = arg->format;
- bool zero_target;
+ bool target_is_zero;
int ret;
bs = qmp_get_root_bs(arg->device, errp);
@@ -3022,9 +3081,8 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
}
bdrv_graph_rdlock_main_loop();
- zero_target = (arg->sync == MIRROR_SYNC_MODE_FULL &&
- (arg->mode == NEW_IMAGE_MODE_EXISTING ||
- !bdrv_has_zero_init(target_bs)));
+ target_is_zero = (arg->mode != NEW_IMAGE_MODE_EXISTING &&
+ bdrv_has_zero_init(target_bs));
bdrv_graph_rdunlock_main_loop();
@@ -3036,7 +3094,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
blockdev_mirror_common(arg->job_id, bs, target_bs,
arg->replaces, arg->sync,
- backing_mode, zero_target,
+ backing_mode, target_is_zero,
arg->has_speed, arg->speed,
arg->has_granularity, arg->granularity,
arg->has_buf_size, arg->buf_size,
@@ -3066,13 +3124,13 @@ void qmp_blockdev_mirror(const char *job_id,
bool has_copy_mode, MirrorCopyMode copy_mode,
bool has_auto_finalize, bool auto_finalize,
bool has_auto_dismiss, bool auto_dismiss,
+ bool has_target_is_zero, bool target_is_zero,
Error **errp)
{
BlockDriverState *bs;
BlockDriverState *target_bs;
AioContext *aio_context;
BlockMirrorBackingMode backing_mode = MIRROR_LEAVE_BACKING_CHAIN;
- bool zero_target;
int ret;
bs = qmp_get_root_bs(device, errp);
@@ -3085,8 +3143,6 @@ void qmp_blockdev_mirror(const char *job_id,
return;
}
- zero_target = (sync == MIRROR_SYNC_MODE_FULL);
-
aio_context = bdrv_get_aio_context(bs);
ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
@@ -3096,7 +3152,8 @@ void qmp_blockdev_mirror(const char *job_id,
blockdev_mirror_common(job_id, bs, target_bs,
replaces, sync, backing_mode,
- zero_target, has_speed, speed,
+ has_target_is_zero && target_is_zero,
+ has_speed, speed,
has_granularity, granularity,
has_buf_size, buf_size,
has_on_source_error, on_source_error,
@@ -3452,6 +3509,38 @@ void qmp_blockdev_del(const char *node_name, Error **errp)
bdrv_unref(bs);
}
+void qmp_blockdev_set_active(const char *node_name, bool active, Error **errp)
+{
+ int ret;
+
+ GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
+ if (!node_name) {
+ if (active) {
+ bdrv_activate_all(errp);
+ } else {
+ ret = bdrv_inactivate_all();
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to inactivate all nodes");
+ }
+ }
+ } else {
+ BlockDriverState *bs = bdrv_find_node(node_name);
+ if (!bs) {
+ error_setg(errp, "Failed to find node with node-name='%s'",
+ node_name);
+ return;
+ }
+
+ if (active) {
+ bdrv_activate(bs, errp);
+ } else {
+ bdrv_inactivate(bs, errp);
+ }
+ }
+}
+
static BdrvChild * GRAPH_RDLOCK
bdrv_find_child(BlockDriverState *parent_bs, const char *child_name)
{
@@ -3472,6 +3561,7 @@ void qmp_x_blockdev_change(const char *parent, const char *child,
BlockDriverState *parent_bs, *new_bs = NULL;
BdrvChild *p_child;
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
parent_bs = bdrv_lookup_bs(parent, parent, errp);
@@ -3509,6 +3599,7 @@ void qmp_x_blockdev_change(const char *parent, const char *child,
out:
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
}
BlockJobInfoList *qmp_query_block_jobs(Error **errp)
@@ -3542,12 +3633,13 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
AioContext *new_context;
BlockDriverState *bs;
- GRAPH_RDLOCK_GUARD_MAINLOOP();
+ bdrv_drain_all_begin();
+ bdrv_graph_rdlock_main_loop();
bs = bdrv_find_node(node_name);
if (!bs) {
error_setg(errp, "Failed to find node with node-name='%s'", node_name);
- return;
+ goto out;
}
/* Protects against accidents. */
@@ -3555,14 +3647,14 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
error_setg(errp, "Node %s is associated with a BlockBackend and could "
"be in use (use force=true to override this check)",
node_name);
- return;
+ goto out;
}
if (iothread->type == QTYPE_QSTRING) {
IOThread *obj = iothread_by_id(iothread->u.s);
if (!obj) {
error_setg(errp, "Cannot find iothread %s", iothread->u.s);
- return;
+ goto out;
}
new_context = iothread_get_aio_context(obj);
@@ -3570,7 +3662,11 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
new_context = qemu_get_aio_context();
}
- bdrv_try_change_aio_context(bs, new_context, NULL, errp);
+ bdrv_try_change_aio_context_locked(bs, new_context, NULL, errp);
+
+out:
+ bdrv_graph_rdunlock_main_loop();
+ bdrv_drain_all_end();
}
QemuOptsList qemu_common_drive_opts = {
diff --git a/blockjob.c b/blockjob.c
index d5f29e1..e68181a 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -29,7 +29,7 @@
#include "block/blockjob_int.h"
#include "block/block_int.h"
#include "block/trace.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qapi/error.h"
#include "qapi/qapi-events-block-core.h"
#include "qapi/qmp/qerror.h"
@@ -144,9 +144,9 @@ static TransactionActionDrv change_child_job_context = {
.clean = g_free,
};
-static bool child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx,
- GHashTable *visited, Transaction *tran,
- Error **errp)
+static bool GRAPH_RDLOCK
+child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx, GHashTable *visited,
+ Transaction *tran, Error **errp)
{
BlockJob *job = c->opaque;
BdrvStateChildJobContext *s;
@@ -198,6 +198,7 @@ void block_job_remove_all_bdrv(BlockJob *job)
* one to make sure that such a concurrent access does not attempt
* to process an already freed BdrvChild.
*/
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
while (job->nodes) {
GSList *l = job->nodes;
@@ -211,6 +212,7 @@ void block_job_remove_all_bdrv(BlockJob *job)
g_slist_free_1(l);
}
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
}
bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs)
@@ -496,6 +498,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
int ret;
GLOBAL_STATE_CODE();
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
if (job_id == NULL && !(flags & JOB_INTERNAL)) {
@@ -506,6 +509,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
flags, cb, opaque, errp);
if (job == NULL) {
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
return NULL;
}
@@ -539,17 +543,17 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
goto fail;
}
- bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
-
if (!block_job_set_speed(job, speed, errp)) {
goto fail;
}
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
return job;
fail:
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
job_early_fail(&job->job);
return NULL;
}
diff --git a/bsd-user/aarch64/signal.c b/bsd-user/aarch64/signal.c
new file mode 100644
index 0000000..6bc73a7
--- /dev/null
+++ b/bsd-user/aarch64/signal.c
@@ -0,0 +1,137 @@
+/*
+ * ARM AArch64 specific signal definitions for bsd-user
+ *
+ * Copyright (c) 2015 Stacey D. Son <sson at FreeBSD>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+
+#include "qemu.h"
+
+/*
+ * Compare to sendsig() in sys/arm64/arm64/exec_machdep.c
+ * Assumes that target stack frame memory is locked.
+ */
+abi_long set_sigtramp_args(CPUARMState *regs, int sig,
+ struct target_sigframe *frame,
+ abi_ulong frame_addr,
+ struct target_sigaction *ka)
+{
+ /*
+ * Arguments to signal handler:
+ * x0 = signal number
+ * x1 = siginfo pointer
+ * x2 = ucontext pointer
+ * pc/elr = signal handler pointer
+ * sp = sigframe struct pointer
+ * lr = sigtramp at base of user stack
+ */
+
+ regs->xregs[0] = sig;
+ regs->xregs[1] = frame_addr +
+ offsetof(struct target_sigframe, sf_si);
+ regs->xregs[2] = frame_addr +
+ offsetof(struct target_sigframe, sf_uc);
+
+ regs->pc = ka->_sa_handler;
+ regs->xregs[TARGET_REG_SP] = frame_addr;
+ regs->xregs[TARGET_REG_LR] = TARGET_PS_STRINGS - TARGET_SZSIGCODE;
+
+ return 0;
+}
+
+/*
+ * Compare to get_mcontext() in arm64/arm64/machdep.c
+ * Assumes that the memory is locked if mcp points to user memory.
+ */
+abi_long get_mcontext(CPUARMState *regs, target_mcontext_t *mcp, int flags)
+{
+ int err = 0, i;
+ uint64_t *gr = mcp->mc_gpregs.gp_x;
+
+ mcp->mc_gpregs.gp_spsr = pstate_read(regs);
+ if (flags & TARGET_MC_GET_CLEAR_RET) {
+ gr[0] = 0UL;
+ mcp->mc_gpregs.gp_spsr &= ~CPSR_C;
+ } else {
+ gr[0] = tswap64(regs->xregs[0]);
+ }
+
+ for (i = 1; i < 30; i++) {
+ gr[i] = tswap64(regs->xregs[i]);
+ }
+
+ mcp->mc_gpregs.gp_sp = tswap64(regs->xregs[TARGET_REG_SP]);
+ mcp->mc_gpregs.gp_lr = tswap64(regs->xregs[TARGET_REG_LR]);
+ mcp->mc_gpregs.gp_elr = tswap64(regs->pc);
+
+ /* XXX FP? */
+
+ return err;
+}
+
+/*
+ * Compare to arm64/arm64/exec_machdep.c sendsig()
+ * Assumes that the memory is locked if frame points to user memory.
+ */
+abi_long setup_sigframe_arch(CPUARMState *env, abi_ulong frame_addr,
+ struct target_sigframe *frame, int flags)
+{
+ target_mcontext_t *mcp = &frame->sf_uc.uc_mcontext;
+
+ get_mcontext(env, mcp, flags);
+ return 0;
+}
+
+/*
+ * Compare to set_mcontext() in arm64/arm64/machdep.c
+ * Assumes that the memory is locked if frame points to user memory.
+ */
+abi_long set_mcontext(CPUARMState *regs, target_mcontext_t *mcp, int srflag)
+{
+ int err = 0, i;
+ const uint64_t *gr = mcp->mc_gpregs.gp_x;
+
+ for (i = 0; i < 30; i++) {
+ regs->xregs[i] = tswap64(gr[i]);
+ }
+
+ regs->xregs[TARGET_REG_SP] = tswap64(mcp->mc_gpregs.gp_sp);
+ regs->xregs[TARGET_REG_LR] = tswap64(mcp->mc_gpregs.gp_lr);
+ regs->pc = mcp->mc_gpregs.gp_elr;
+ pstate_write(regs, mcp->mc_gpregs.gp_spsr);
+
+ /* XXX FP? */
+
+ return err;
+}
+
+/* Compare to sys_sigreturn() in arm64/arm64/machdep.c */
+abi_long get_ucontext_sigreturn(CPUARMState *regs, abi_ulong target_sf,
+ abi_ulong *target_uc)
+{
+ uint32_t pstate = pstate_read(regs);
+
+ *target_uc = 0;
+
+ if ((pstate & PSTATE_M) != PSTATE_MODE_EL0t ||
+ (pstate & (PSTATE_F | PSTATE_I | PSTATE_A | PSTATE_D)) != 0) {
+ return -TARGET_EINVAL;
+ }
+
+ *target_uc = target_sf;
+
+ return 0;
+}
diff --git a/bsd-user/aarch64/target.h b/bsd-user/aarch64/target.h
new file mode 100644
index 0000000..702aeb7
--- /dev/null
+++ b/bsd-user/aarch64/target.h
@@ -0,0 +1,20 @@
+/*
+ * Aarch64 general target stuff that's common to all aarch details
+ *
+ * Copyright (c) 2022 M. Warner Losh <imp@bsdimp.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef TARGET_H
+#define TARGET_H
+
+/*
+ * aaarch64 ABI does not 'lump' the registers for 64-bit args.
+ */
+static inline bool regpairs_aligned(void *cpu_env)
+{
+ return false;
+}
+
+#endif /* TARGET_H */
diff --git a/bsd-user/aarch64/target_arch.h b/bsd-user/aarch64/target_arch.h
new file mode 100644
index 0000000..4815a56
--- /dev/null
+++ b/bsd-user/aarch64/target_arch.h
@@ -0,0 +1,29 @@
+/*
+ * ARM AArch64 specific prototypes for bsd-user
+ *
+ * Copyright (c) 2015 Stacey D. Son <sson at FreeBSD>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_H
+#define TARGET_ARCH_H
+
+#include "qemu.h"
+#include "target/arm/cpu-features.h"
+
+void target_cpu_set_tls(CPUARMState *env, target_ulong newtls);
+target_ulong target_cpu_get_tls(CPUARMState *env);
+
+#endif /* TARGET_ARCH_H */
diff --git a/bsd-user/aarch64/target_arch_cpu.c b/bsd-user/aarch64/target_arch_cpu.c
new file mode 100644
index 0000000..b2fa59e
--- /dev/null
+++ b/bsd-user/aarch64/target_arch_cpu.c
@@ -0,0 +1,31 @@
+/*
+ * ARM AArch64 specific CPU for bsd-user
+ *
+ * Copyright (c) 2015 Stacey Son
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "target_arch.h"
+
+/* See cpu_set_user_tls() in arm64/arm64/vm_machdep.c */
+void target_cpu_set_tls(CPUARMState *env, target_ulong newtls)
+{
+ env->cp15.tpidr_el[0] = newtls;
+}
+
+target_ulong target_cpu_get_tls(CPUARMState *env)
+{
+ return env->cp15.tpidr_el[0];
+}
diff --git a/bsd-user/aarch64/target_arch_cpu.h b/bsd-user/aarch64/target_arch_cpu.h
new file mode 100644
index 0000000..87fbf6d
--- /dev/null
+++ b/bsd-user/aarch64/target_arch_cpu.h
@@ -0,0 +1,189 @@
+/*
+ * ARM AArch64 cpu init and loop
+ *
+ * Copyright (c) 2015 Stacey Son
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_CPU_H
+#define TARGET_ARCH_CPU_H
+
+#include "target_arch.h"
+#include "signal-common.h"
+#include "target/arm/syndrome.h"
+
+#define TARGET_DEFAULT_CPU_MODEL "any"
+
+static inline void target_cpu_init(CPUARMState *env,
+ struct target_pt_regs *regs)
+{
+ int i;
+
+ if (!(arm_feature(env, ARM_FEATURE_AARCH64))) {
+ fprintf(stderr, "The selected ARM CPU does not support 64 bit mode\n");
+ exit(1);
+ }
+ for (i = 0; i < 31; i++) {
+ env->xregs[i] = regs->regs[i];
+ }
+ env->pc = regs->pc;
+ env->xregs[31] = regs->sp;
+}
+
+
+static inline G_NORETURN void target_cpu_loop(CPUARMState *env)
+{
+ CPUState *cs = env_cpu(env);
+ int trapnr, ec, fsc, si_code, si_signo;
+ uint64_t code, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8;
+ abi_long ret;
+
+ for (;;) {
+ cpu_exec_start(cs);
+ trapnr = cpu_exec(cs);
+ cpu_exec_end(cs);
+ process_queued_cpu_work(cs);
+
+ switch (trapnr) {
+ case EXCP_SWI:
+ /* See arm64/arm64/trap.c cpu_fetch_syscall_args() */
+ code = env->xregs[8];
+ if (code == TARGET_FREEBSD_NR_syscall ||
+ code == TARGET_FREEBSD_NR___syscall) {
+ code = env->xregs[0];
+ arg1 = env->xregs[1];
+ arg2 = env->xregs[2];
+ arg3 = env->xregs[3];
+ arg4 = env->xregs[4];
+ arg5 = env->xregs[5];
+ arg6 = env->xregs[6];
+ arg7 = env->xregs[7];
+ arg8 = 0;
+ } else {
+ arg1 = env->xregs[0];
+ arg2 = env->xregs[1];
+ arg3 = env->xregs[2];
+ arg4 = env->xregs[3];
+ arg5 = env->xregs[4];
+ arg6 = env->xregs[5];
+ arg7 = env->xregs[6];
+ arg8 = env->xregs[7];
+ }
+ ret = do_freebsd_syscall(env, code, arg1, arg2, arg3,
+ arg4, arg5, arg6, arg7, arg8);
+ /*
+ * The carry bit is cleared for no error; set for error.
+ * See arm64/arm64/vm_machdep.c cpu_set_syscall_retval()
+ */
+ if (ret >= 0) {
+ env->CF = 0;
+ env->xregs[0] = ret;
+ } else if (ret == -TARGET_ERESTART) {
+ env->pc -= 4;
+ break;
+ } else if (ret != -TARGET_EJUSTRETURN) {
+ env->CF = 1;
+ env->xregs[0] = -ret;
+ }
+ break;
+
+ case EXCP_INTERRUPT:
+ /* Just indicate that signals should be handle ASAP. */
+ break;
+
+ case EXCP_UDEF:
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN, env->pc);
+ break;
+
+
+ case EXCP_PREFETCH_ABORT:
+ case EXCP_DATA_ABORT:
+ /* We should only arrive here with EC in {DATAABORT, INSNABORT}. */
+ ec = syn_get_ec(env->exception.syndrome);
+ assert(ec == EC_DATAABORT || ec == EC_INSNABORT);
+
+ /* Both EC have the same format for FSC, or close enough. */
+ fsc = extract32(env->exception.syndrome, 0, 6);
+ switch (fsc) {
+ case 0x04 ... 0x07: /* Translation fault, level {0-3} */
+ si_signo = TARGET_SIGSEGV;
+ si_code = TARGET_SEGV_MAPERR;
+ break;
+ case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */
+ case 0x0d ... 0x0f: /* Permission fault, level {1-3} */
+ si_signo = TARGET_SIGSEGV;
+ si_code = TARGET_SEGV_ACCERR;
+ break;
+ case 0x11: /* Synchronous Tag Check Fault */
+ si_signo = TARGET_SIGSEGV;
+ si_code = /* TARGET_SEGV_MTESERR; */ TARGET_SEGV_ACCERR;
+ break;
+ case 0x21: /* Alignment fault */
+ si_signo = TARGET_SIGBUS;
+ si_code = TARGET_BUS_ADRALN;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ force_sig_fault(si_signo, si_code, env->exception.vaddress);
+ break;
+
+ case EXCP_DEBUG:
+ case EXCP_BKPT:
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
+ break;
+
+ case EXCP_ATOMIC:
+ cpu_exec_step_atomic(cs);
+ break;
+
+ case EXCP_YIELD:
+ /* nothing to do here for user-mode, just resume guest code */
+ break;
+ default:
+ fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
+ trapnr);
+ cpu_dump_state(cs, stderr, 0);
+ abort();
+ } /* switch() */
+ process_pending_signals(env);
+ /*
+ * Exception return on AArch64 always clears the exclusive
+ * monitor, so any return to running guest code implies this.
+ * A strex (successful or otherwise) also clears the monitor, so
+ * we don't need to specialcase EXCP_STREX.
+ */
+ env->exclusive_addr = -1;
+ } /* for (;;) */
+}
+
+
+/* See arm64/arm64/vm_machdep.c cpu_fork() */
+static inline void target_cpu_clone_regs(CPUARMState *env, target_ulong newsp)
+{
+ if (newsp) {
+ env->xregs[31] = newsp;
+ }
+ env->regs[0] = 0;
+ env->regs[1] = 0;
+ pstate_write(env, 0);
+}
+
+static inline void target_cpu_reset(CPUArchState *env)
+{
+}
+
+
+#endif /* TARGET_ARCH_CPU_H */
diff --git a/bsd-user/aarch64/target_arch_elf.h b/bsd-user/aarch64/target_arch_elf.h
new file mode 100644
index 0000000..cc87f47
--- /dev/null
+++ b/bsd-user/aarch64/target_arch_elf.h
@@ -0,0 +1,163 @@
+/*
+ * ARM AArch64 ELF definitions for bsd-user
+ *
+ * Copyright (c) 2015 Stacey D. Son
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_ELF_H
+#define TARGET_ARCH_ELF_H
+
+#define ELF_START_MMAP 0x80000000
+#define ELF_ET_DYN_LOAD_ADDR 0x100000
+
+#define elf_check_arch(x) ((x) == EM_AARCH64)
+
+#define ELF_CLASS ELFCLASS64
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_AARCH64
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 4096
+
+enum {
+ ARM_HWCAP_A64_FP = 1 << 0,
+ ARM_HWCAP_A64_ASIMD = 1 << 1,
+ ARM_HWCAP_A64_EVTSTRM = 1 << 2,
+ ARM_HWCAP_A64_AES = 1 << 3,
+ ARM_HWCAP_A64_PMULL = 1 << 4,
+ ARM_HWCAP_A64_SHA1 = 1 << 5,
+ ARM_HWCAP_A64_SHA2 = 1 << 6,
+ ARM_HWCAP_A64_CRC32 = 1 << 7,
+ ARM_HWCAP_A64_ATOMICS = 1 << 8,
+ ARM_HWCAP_A64_FPHP = 1 << 9,
+ ARM_HWCAP_A64_ASIMDHP = 1 << 10,
+ ARM_HWCAP_A64_CPUID = 1 << 11,
+ ARM_HWCAP_A64_ASIMDRDM = 1 << 12,
+ ARM_HWCAP_A64_JSCVT = 1 << 13,
+ ARM_HWCAP_A64_FCMA = 1 << 14,
+ ARM_HWCAP_A64_LRCPC = 1 << 15,
+ ARM_HWCAP_A64_DCPOP = 1 << 16,
+ ARM_HWCAP_A64_SHA3 = 1 << 17,
+ ARM_HWCAP_A64_SM3 = 1 << 18,
+ ARM_HWCAP_A64_SM4 = 1 << 19,
+ ARM_HWCAP_A64_ASIMDDP = 1 << 20,
+ ARM_HWCAP_A64_SHA512 = 1 << 21,
+ ARM_HWCAP_A64_SVE = 1 << 22,
+ ARM_HWCAP_A64_ASIMDFHM = 1 << 23,
+ ARM_HWCAP_A64_DIT = 1 << 24,
+ ARM_HWCAP_A64_USCAT = 1 << 25,
+ ARM_HWCAP_A64_ILRCPC = 1 << 26,
+ ARM_HWCAP_A64_FLAGM = 1 << 27,
+ ARM_HWCAP_A64_SSBS = 1 << 28,
+ ARM_HWCAP_A64_SB = 1 << 29,
+ ARM_HWCAP_A64_PACA = 1 << 30,
+ ARM_HWCAP_A64_PACG = 1UL << 31,
+
+ ARM_HWCAP2_A64_DCPODP = 1 << 0,
+ ARM_HWCAP2_A64_SVE2 = 1 << 1,
+ ARM_HWCAP2_A64_SVEAES = 1 << 2,
+ ARM_HWCAP2_A64_SVEPMULL = 1 << 3,
+ ARM_HWCAP2_A64_SVEBITPERM = 1 << 4,
+ ARM_HWCAP2_A64_SVESHA3 = 1 << 5,
+ ARM_HWCAP2_A64_SVESM4 = 1 << 6,
+ ARM_HWCAP2_A64_FLAGM2 = 1 << 7,
+ ARM_HWCAP2_A64_FRINT = 1 << 8,
+ ARM_HWCAP2_A64_SVEI8MM = 1 << 9,
+ ARM_HWCAP2_A64_SVEF32MM = 1 << 10,
+ ARM_HWCAP2_A64_SVEF64MM = 1 << 11,
+ ARM_HWCAP2_A64_SVEBF16 = 1 << 12,
+ ARM_HWCAP2_A64_I8MM = 1 << 13,
+ ARM_HWCAP2_A64_BF16 = 1 << 14,
+ ARM_HWCAP2_A64_DGH = 1 << 15,
+ ARM_HWCAP2_A64_RNG = 1 << 16,
+ ARM_HWCAP2_A64_BTI = 1 << 17,
+ ARM_HWCAP2_A64_MTE = 1 << 18,
+};
+
+#define ELF_HWCAP get_elf_hwcap()
+#define ELF_HWCAP2 get_elf_hwcap2()
+
+#define GET_FEATURE_ID(feat, hwcap) \
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
+
+static uint32_t get_elf_hwcap(void)
+{
+ ARMCPU *cpu = ARM_CPU(thread_cpu);
+ uint32_t hwcaps = 0;
+
+ hwcaps |= ARM_HWCAP_A64_FP;
+ hwcaps |= ARM_HWCAP_A64_ASIMD;
+ hwcaps |= ARM_HWCAP_A64_CPUID;
+
+ /* probe for the extra features */
+
+ GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
+ GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
+ GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
+ GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
+ GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
+ GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
+ GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
+ GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
+ GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
+ GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
+ GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
+ GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
+ GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
+ GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
+ GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
+ GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG);
+ GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM);
+ GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT);
+ GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB);
+ GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM);
+ GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP);
+ GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC);
+ GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC);
+
+ return hwcaps;
+}
+
+static uint32_t get_elf_hwcap2(void)
+{
+ ARMCPU *cpu = ARM_CPU(thread_cpu);
+ uint32_t hwcaps = 0;
+
+ GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP);
+ GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2);
+ GET_FEATURE_ID(aa64_sve2_aes, ARM_HWCAP2_A64_SVEAES);
+ GET_FEATURE_ID(aa64_sve2_pmull128, ARM_HWCAP2_A64_SVEPMULL);
+ GET_FEATURE_ID(aa64_sve2_bitperm, ARM_HWCAP2_A64_SVEBITPERM);
+ GET_FEATURE_ID(aa64_sve2_sha3, ARM_HWCAP2_A64_SVESHA3);
+ GET_FEATURE_ID(aa64_sve2_sm4, ARM_HWCAP2_A64_SVESM4);
+ GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2);
+ GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT);
+ GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM);
+ GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM);
+ GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM);
+ GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16);
+ GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM);
+ GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16);
+ GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG);
+ GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI);
+ GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE);
+
+ return hwcaps;
+}
+
+#undef GET_FEATURE_ID
+
+#endif /* TARGET_ARCH_ELF_H */
diff --git a/bsd-user/aarch64/target_arch_reg.h b/bsd-user/aarch64/target_arch_reg.h
new file mode 100644
index 0000000..b53302e
--- /dev/null
+++ b/bsd-user/aarch64/target_arch_reg.h
@@ -0,0 +1,56 @@
+/*
+ * FreeBSD arm64 register structures
+ *
+ * Copyright (c) 2015 Stacey Son
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_REG_H
+#define TARGET_ARCH_REG_H
+
+/* See sys/arm64/include/reg.h */
+typedef struct target_reg {
+ uint64_t x[30];
+ uint64_t lr;
+ uint64_t sp;
+ uint64_t elr;
+ uint64_t spsr;
+} target_reg_t;
+
+typedef struct target_fpreg {
+ Int128 fp_q[32];
+ uint32_t fp_sr;
+ uint32_t fp_cr;
+} target_fpreg_t;
+
+#define tswapreg(ptr) tswapal(ptr)
+
+static inline void target_copy_regs(target_reg_t *regs, CPUARMState *env)
+{
+ int i;
+
+ for (i = 0; i < 30; i++) {
+ regs->x[i] = tswapreg(env->xregs[i]);
+ }
+ regs->lr = tswapreg(env->xregs[30]);
+ regs->sp = tswapreg(env->xregs[31]);
+ regs->elr = tswapreg(env->pc);
+ regs->spsr = tswapreg(pstate_read(env));
+}
+
+#undef tswapreg
+
+#endif /* TARGET_ARCH_REG_H */
diff --git a/bsd-user/aarch64/target_arch_signal.h b/bsd-user/aarch64/target_arch_signal.h
new file mode 100644
index 0000000..b72ba7a
--- /dev/null
+++ b/bsd-user/aarch64/target_arch_signal.h
@@ -0,0 +1,82 @@
+/*
+ * ARM AArch64 specific signal definitions for bsd-user
+ *
+ * Copyright (c) 2015 Stacey D. Son <sson at FreeBSD>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_SIGNAL_H
+#define TARGET_ARCH_SIGNAL_H
+
+#include "cpu.h"
+
+#define TARGET_REG_X0 0
+#define TARGET_REG_X30 30
+#define TARGET_REG_X31 31
+#define TARGET_REG_LR TARGET_REG_X30
+#define TARGET_REG_SP TARGET_REG_X31
+
+#define TARGET_INSN_SIZE 4 /* arm64 instruction size */
+
+/* Size of the signal trampolin code. See _sigtramp(). */
+#define TARGET_SZSIGCODE ((abi_ulong)(9 * TARGET_INSN_SIZE))
+
+/* compare to sys/arm64/include/_limits.h */
+#define TARGET_MINSIGSTKSZ (1024 * 4) /* min sig stack size */
+#define TARGET_SIGSTKSZ (TARGET_MINSIGSTKSZ + 32768) /* recommended size */
+
+/* struct __mcontext in sys/arm64/include/ucontext.h */
+
+struct target_gpregs {
+ uint64_t gp_x[30];
+ uint64_t gp_lr;
+ uint64_t gp_sp;
+ uint64_t gp_elr;
+ uint32_t gp_spsr;
+ uint32_t gp_pad;
+};
+
+struct target_fpregs {
+ Int128 fp_q[32];
+ uint32_t fp_sr;
+ uint32_t fp_cr;
+ uint32_t fp_flags;
+ uint32_t fp_pad;
+};
+
+struct target__mcontext {
+ struct target_gpregs mc_gpregs;
+ struct target_fpregs mc_fpregs;
+ uint32_t mc_flags;
+#define TARGET_MC_FP_VALID 0x1
+ uint32_t mc_pad;
+ uint64_t mc_spare[8];
+};
+
+typedef struct target__mcontext target_mcontext_t;
+
+#define TARGET_MCONTEXT_SIZE 880
+#define TARGET_UCONTEXT_SIZE 960
+
+#include "target_os_ucontext.h"
+
+struct target_sigframe {
+ target_siginfo_t sf_si; /* saved siginfo */
+ target_ucontext_t sf_uc; /* saved ucontext */
+};
+
+#define TARGET_SIGSTACK_ALIGN 16
+
+#endif /* TARGET_ARCH_SIGNAL_H */
diff --git a/bsd-user/aarch64/target_arch_sigtramp.h b/bsd-user/aarch64/target_arch_sigtramp.h
new file mode 100644
index 0000000..8cdd33b
--- /dev/null
+++ b/bsd-user/aarch64/target_arch_sigtramp.h
@@ -0,0 +1,48 @@
+/*
+ * ARM AArch64 sigcode for bsd-user
+ *
+ * Copyright (c) 2015 Stacey D. Son <sson at FreeBSD>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_SIGTRAMP_H
+#define TARGET_ARCH_SIGTRAMP_H
+
+/* Compare to ENTRY(sigcode) in arm64/arm64/locore.S */
+static inline abi_long setup_sigtramp(abi_ulong offset, unsigned sigf_uc,
+ unsigned sys_sigreturn)
+{
+ int i;
+ uint32_t sys_exit = TARGET_FREEBSD_NR_exit;
+
+ uint32_t sigtramp_code[] = {
+ /* 1 */ 0x910003e0, /* mov x0, sp */
+ /* 2 */ 0x91000000 + (sigf_uc << 10), /* add x0, x0, #SIGF_UC */
+ /* 3 */ 0xd2800000 + (sys_sigreturn << 5) + 0x8, /* mov x8, #SYS_sigreturn */
+ /* 4 */ 0xd4000001, /* svc #0 */
+ /* 5 */ 0xd2800028 + (sys_exit << 5) + 0x8, /* mov x8, #SYS_exit */
+ /* 6 */ 0xd4000001, /* svc #0 */
+ /* 7 */ 0x17fffffc, /* b -4 */
+ /* 8 */ sys_sigreturn,
+ /* 9 */ sys_exit
+ };
+
+ for (i = 0; i < 9; i++) {
+ tswap32s(&sigtramp_code[i]);
+ }
+
+ return memcpy_to_target(offset, sigtramp_code, TARGET_SZSIGCODE);
+}
+#endif /* TARGET_ARCH_SIGTRAMP_H */
diff --git a/bsd-user/aarch64/target_arch_sysarch.h b/bsd-user/aarch64/target_arch_sysarch.h
new file mode 100644
index 0000000..b003015
--- /dev/null
+++ b/bsd-user/aarch64/target_arch_sysarch.h
@@ -0,0 +1,42 @@
+/*
+ * ARM AArch64 sysarch() system call emulation for bsd-user.
+ *
+ * Copyright (c) 2015 <sson at FreeBSD>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_SYSARCH_H
+#define TARGET_ARCH_SYSARCH_H
+
+#include "target_syscall.h"
+#include "target_arch.h"
+
+/* See sysarch() in sys/arm64/arm64/sys_machdep.c */
+static inline abi_long do_freebsd_arch_sysarch(CPUARMState *env, int op,
+ abi_ulong parms)
+{
+ int ret = -TARGET_EOPNOTSUPP;
+
+ fprintf(stderr, "sysarch");
+ return ret;
+}
+
+static inline void do_freebsd_arch_print_sysarch(
+ const struct syscallname *name, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6)
+{
+}
+
+#endif /* TARGET_ARCH_SYSARCH_H */
diff --git a/bsd-user/aarch64/target_arch_thread.h b/bsd-user/aarch64/target_arch_thread.h
new file mode 100644
index 0000000..4c911e6
--- /dev/null
+++ b/bsd-user/aarch64/target_arch_thread.h
@@ -0,0 +1,61 @@
+/*
+ * ARM AArch64 thread support for bsd-user.
+ *
+ * Copyright (c) 2015 Stacey D. Son <sson at FreeBSD>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_THREAD_H
+#define TARGET_ARCH_THREAD_H
+
+/* Compare to arm64/arm64/vm_machdep.c cpu_set_upcall_kse() */
+static inline void target_thread_set_upcall(CPUARMState *regs, abi_ulong entry,
+ abi_ulong arg, abi_ulong stack_base, abi_ulong stack_size)
+{
+ abi_ulong sp;
+
+ /*
+ * Make sure the stack is properly aligned.
+ * arm64/include/param.h (STACKLIGN() macro)
+ */
+ sp = ROUND_DOWN(stack_base + stack_size, 16);
+
+ /* sp = stack base */
+ regs->xregs[31] = sp;
+ /* pc = start function entry */
+ regs->pc = entry;
+ /* r0 = arg */
+ regs->xregs[0] = arg;
+
+
+}
+
+static inline void target_thread_init(struct target_pt_regs *regs,
+ struct image_info *infop)
+{
+ abi_long stack = infop->start_stack;
+
+ /*
+ * Make sure the stack is properly aligned.
+ * arm64/include/param.h (STACKLIGN() macro)
+ */
+
+ memset(regs, 0, sizeof(*regs));
+ regs->regs[0] = infop->start_stack;
+ regs->pc = infop->entry;
+ regs->sp = ROUND_DOWN(stack, 16);
+}
+
+#endif /* TARGET_ARCH_THREAD_H */
diff --git a/bsd-user/aarch64/target_arch_vmparam.h b/bsd-user/aarch64/target_arch_vmparam.h
new file mode 100644
index 0000000..0c35491
--- /dev/null
+++ b/bsd-user/aarch64/target_arch_vmparam.h
@@ -0,0 +1,74 @@
+/*
+ * ARM AArch64 VM parameters definitions for bsd-user.
+ *
+ * Copyright (c) 2015 Stacey D. Son <sson at FreeBSD>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_VMPARAM_H
+#define TARGET_ARCH_VMPARAM_H
+
+#include "cpu.h"
+
+/**
+ * FreeBSD/arm64 Address space layout.
+ *
+ * ARMv8 implements up to a 48 bit virtual address space. The address space is
+ * split into 2 regions at each end of the 64 bit address space, with an
+ * out of range "hole" in the middle.
+ *
+ * We limit the size of the two spaces to 39 bits each.
+ *
+ * Upper region: 0xffffffffffffffff
+ * 0xffffff8000000000
+ *
+ * Hole: 0xffffff7fffffffff
+ * 0x0000008000000000
+ *
+ * Lower region: 0x0000007fffffffff
+ * 0x0000000000000000
+ *
+ * The upper region for the kernel, and the lower region for userland.
+ */
+
+
+/* compare to sys/arm64/include/vmparam.h */
+#define TARGET_MAXTSIZ (1 * GiB) /* max text size */
+#define TARGET_DFLDSIZ (128 * MiB) /* initial data size limit */
+#define TARGET_MAXDSIZ (1 * GiB) /* max data size */
+#define TARGET_DFLSSIZ (128 * MiB) /* initial stack size limit */
+#define TARGET_MAXSSIZ (1 * GiB) /* max stack size */
+#define TARGET_SGROWSIZ (128 * KiB) /* amount to grow stack */
+
+ /* KERNBASE - 512 MB */
+#define TARGET_VM_MAXUSER_ADDRESS (0x00007fffff000000ULL - (512 * MiB))
+#define TARGET_USRSTACK TARGET_VM_MAXUSER_ADDRESS
+
+static inline abi_ulong get_sp_from_cpustate(CPUARMState *state)
+{
+ return state->xregs[31]; /* sp */
+}
+
+static inline void set_second_rval(CPUARMState *state, abi_ulong retval2)
+{
+ state->xregs[1] = retval2; /* XXX not really used on 64-bit arch */
+}
+
+static inline abi_ulong get_second_rval(CPUARMState *state)
+{
+ return state->xregs[1];
+}
+
+#endif /* TARGET_ARCH_VMPARAM_H */
diff --git a/bsd-user/aarch64/target_syscall.h b/bsd-user/aarch64/target_syscall.h
new file mode 100644
index 0000000..08ae913
--- /dev/null
+++ b/bsd-user/aarch64/target_syscall.h
@@ -0,0 +1,51 @@
+/*
+ * ARM AArch64 specific CPU for bsd-user
+ *
+ * Copyright (c) 2015 Stacey D. Son <sson at Freebsd>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef BSD_USER_AARCH64_TARGET_SYSCALL_H
+#define BSD_USER_AARCH64_TARGET_SYSCALL_H
+
+/*
+ * The aarch64 registers are named:
+ *
+ * x0 through x30 - for 64-bit-wide access (same registers)
+ * Register '31' is one of two registers depending on the instruction context:
+ * For instructions dealing with the stack, it is the stack pointer, named rsp
+ * For all other instructions, it is a "zero" register, which returns 0 when
+ * read and discards data when written - named rzr (xzr, wzr)
+ *
+ * Usage during syscall/function call:
+ * r0-r7 are used for arguments and return values
+ * For syscalls, the syscall number is in r8
+ * r9-r15 are for temporary values (may get trampled)
+ * r16-r18 are used for intra-procedure-call and platform values (avoid)
+ * The called routine is expected to preserve r19-r28
+ * r29 and r30 are used as the frame register and link register (avoid)
+ * See the ARM Procedure Call Reference for details.
+ */
+struct target_pt_regs {
+ uint64_t regs[31];
+ uint64_t sp;
+ uint64_t pc;
+ uint64_t pstate;
+};
+
+#define TARGET_HW_MACHINE "arm64"
+#define TARGET_HW_MACHINE_ARCH "aarch64"
+
+#endif /* BSD_USER_AARCH64_TARGET_SYSCALL_H */
diff --git a/bsd-user/arm/target_arch_cpu.h b/bsd-user/arm/target_arch_cpu.h
index 517d008..bc2eaa0 100644
--- a/bsd-user/arm/target_arch_cpu.h
+++ b/bsd-user/arm/target_arch_cpu.h
@@ -37,7 +37,7 @@ static inline void target_cpu_init(CPUARMState *env,
}
}
-static inline void target_cpu_loop(CPUARMState *env)
+static inline G_NORETURN void target_cpu_loop(CPUARMState *env)
{
int trapnr, si_signo, si_code;
CPUState *cs = env_cpu(env);
diff --git a/bsd-user/arm/target_arch_signal.h b/bsd-user/arm/target_arch_signal.h
index 02b2b33..10f96b8 100644
--- a/bsd-user/arm/target_arch_signal.h
+++ b/bsd-user/arm/target_arch_signal.h
@@ -86,4 +86,6 @@ struct target_sigframe {
target_mcontext_vfp_t sf_vfp; /* actual saved VFP context */
};
+#define TARGET_SIGSTACK_ALIGN 8
+
#endif /* TARGET_ARCH_SIGNAL_H */
diff --git a/bsd-user/bsd-mem.h b/bsd-user/bsd-mem.h
index eef6b22..1be906c 100644
--- a/bsd-user/bsd-mem.h
+++ b/bsd-user/bsd-mem.h
@@ -56,7 +56,9 @@
#include <fcntl.h>
#include "qemu-bsd.h"
+#include "exec/mmap-lock.h"
#include "exec/page-protection.h"
+#include "user/page-protection.h"
extern struct bsd_shm_regions bsd_shm_regions[];
extern abi_ulong target_brk;
@@ -369,9 +371,11 @@ static inline abi_long do_bsd_shmat(int shmid, abi_ulong shmaddr, int shmflg)
if (shmaddr) {
host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
} else {
+ abi_ulong alignment;
abi_ulong mmap_start;
- mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
+ alignment = 0; /* alignment above page size not required */
+ mmap_start = mmap_find_vma(0, shm_info.shm_segsz, alignment);
if (mmap_start == -1) {
return -TARGET_ENOMEM;
diff --git a/bsd-user/elfload.c b/bsd-user/elfload.c
index 833fa3b..3bca0cc 100644
--- a/bsd-user/elfload.c
+++ b/bsd-user/elfload.c
@@ -44,7 +44,7 @@ static inline void memcpy_fromfs(void *to, const void *from, unsigned long n)
memcpy(to, from, n);
}
-#ifdef BSWAP_NEEDED
+#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
static void bswap_ehdr(struct elfhdr *ehdr)
{
bswap16s(&ehdr->e_type); /* Object file type */
@@ -111,7 +111,7 @@ static void bswap_note(struct elf_note *en)
bswap32s(&en->n_type);
}
-#else /* ! BSWAP_NEEDED */
+#else
static void bswap_ehdr(struct elfhdr *ehdr) { }
static void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
@@ -119,7 +119,7 @@ static void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
static void bswap_sym(struct elf_sym *sym) { }
static void bswap_note(struct elf_note *en) { }
-#endif /* ! BSWAP_NEEDED */
+#endif /* HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN */
#include "elfcore.c"
diff --git a/bsd-user/freebsd/os-proc.c b/bsd-user/freebsd/os-proc.c
index e0203e2..bf993f1 100644
--- a/bsd-user/freebsd/os-proc.c
+++ b/bsd-user/freebsd/os-proc.c
@@ -27,64 +27,12 @@ struct kinfo_proc;
#include "qemu.h"
/*
- * Get the filename for the given file descriptor.
- * Note that this may return NULL (fail) if no longer cached in the kernel.
- */
-static char *
-get_filename_from_fd(pid_t pid, int fd, char *filename, size_t len)
-{
- char *ret = NULL;
- unsigned int cnt;
- struct procstat *procstat = NULL;
- struct kinfo_proc *kp = NULL;
- struct filestat_list *head = NULL;
- struct filestat *fst;
-
- procstat = procstat_open_sysctl();
- if (procstat == NULL) {
- goto out;
- }
-
- kp = procstat_getprocs(procstat, KERN_PROC_PID, pid, &cnt);
- if (kp == NULL) {
- goto out;
- }
-
- head = procstat_getfiles(procstat, kp, 0);
- if (head == NULL) {
- goto out;
- }
-
- STAILQ_FOREACH(fst, head, next) {
- if (fd == fst->fs_fd) {
- if (fst->fs_path != NULL) {
- (void)strlcpy(filename, fst->fs_path, len);
- ret = filename;
- }
- break;
- }
- }
-
-out:
- if (head != NULL) {
- procstat_freefiles(procstat, head);
- }
- if (kp != NULL) {
- procstat_freeprocs(procstat, kp);
- }
- if (procstat != NULL) {
- procstat_close(procstat);
- }
- return ret;
-}
-
-/*
* execve/fexecve
*/
abi_long freebsd_exec_common(abi_ulong path_or_fd, abi_ulong guest_argp,
abi_ulong guest_envp, int do_fexec)
{
- char **argp, **envp, **qargp, **qarg1, **qarg0, **qargend;
+ char **argp, **envp, **qarg0;
int argc, envc;
abi_ulong gp;
abi_ulong addr;
@@ -117,9 +65,7 @@ abi_long freebsd_exec_common(abi_ulong path_or_fd, abi_ulong guest_argp,
qarg0 = argp = g_new0(char *, argc + 9);
/* save the first argument for the emulator */
*argp++ = (char *)getprogname();
- qargp = argp;
*argp++ = (char *)getprogname();
- qarg1 = argp;
envp = g_new0(char *, envc + 1);
for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
if (get_user_ual(addr, gp)) {
@@ -137,7 +83,6 @@ abi_long freebsd_exec_common(abi_ulong path_or_fd, abi_ulong guest_argp,
total_size += strlen(*q) + 1;
}
*q++ = NULL;
- qargend = q;
for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
if (get_user_ual(addr, gp)) {
@@ -166,71 +111,14 @@ abi_long freebsd_exec_common(abi_ulong path_or_fd, abi_ulong guest_argp,
}
if (do_fexec) {
- if (((int)path_or_fd > 0 &&
- is_target_elf_binary((int)path_or_fd)) == 1) {
- char execpath[PATH_MAX];
-
- /*
- * The executable is an elf binary for the target
- * arch. execve() it using the emulator if we can
- * determine the filename path from the fd.
- */
- if (get_filename_from_fd(getpid(), (int)path_or_fd, execpath,
- sizeof(execpath)) != NULL) {
- memmove(qarg1 + 2, qarg1, (qargend - qarg1) * sizeof(*qarg1));
- qarg1[1] = qarg1[0];
- qarg1[0] = (char *)"-0";
- qarg1 += 2;
- qargend += 2;
- *qarg1 = execpath;
-#ifndef DONT_INHERIT_INTERP_PREFIX
- memmove(qarg1 + 2, qarg1, (qargend - qarg1) * sizeof(*qarg1));
- *qarg1++ = (char *)"-L";
- *qarg1++ = (char *)interp_prefix;
-#endif
- ret = get_errno(execve(qemu_proc_pathname, qargp, envp));
- } else {
- /* Getting the filename path failed. */
- ret = -TARGET_EBADF;
- goto execve_end;
- }
- } else {
- ret = get_errno(fexecve((int)path_or_fd, argp, envp));
- }
+ ret = get_errno(fexecve((int)path_or_fd, argp, envp));
} else {
- int fd;
-
p = lock_user_string(path_or_fd);
if (p == NULL) {
ret = -TARGET_EFAULT;
goto execve_end;
}
-
- /*
- * Check the header and see if it a target elf binary. If so
- * then execute using qemu user mode emulator.
- */
- fd = open(p, O_RDONLY | O_CLOEXEC);
- if (fd > 0 && is_target_elf_binary(fd) == 1) {
- close(fd);
- /* execve() as a target binary using emulator. */
- memmove(qarg1 + 2, qarg1, (qargend - qarg1) * sizeof(*qarg1));
- qarg1[1] = qarg1[0];
- qarg1[0] = (char *)"-0";
- qarg1 += 2;
- qargend += 2;
- *qarg1 = (char *)p;
-#ifndef DONT_INHERIT_INTERP_PREFIX
- memmove(qarg1 + 2, qarg1, (qargend - qarg1) * sizeof(*qarg1));
- *qarg1++ = (char *)"-L";
- *qarg1++ = (char *)interp_prefix;
-#endif
- ret = get_errno(execve(qemu_proc_pathname, qargp, envp));
- } else {
- close(fd);
- /* Execve() as a host native binary. */
- ret = get_errno(execve(p, argp, envp));
- }
+ ret = get_errno(execve(p, argp, envp));
unlock_user(p, path_or_fd, 0);
}
diff --git a/bsd-user/i386/target_arch_cpu.h b/bsd-user/i386/target_arch_cpu.h
index 9bf2c42..5d4c931 100644
--- a/bsd-user/i386/target_arch_cpu.h
+++ b/bsd-user/i386/target_arch_cpu.h
@@ -102,7 +102,7 @@ static inline void target_cpu_init(CPUX86State *env,
env->segs[R_FS].selector = 0;
}
-static inline void target_cpu_loop(CPUX86State *env)
+static inline G_NORETURN void target_cpu_loop(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
int trapnr;
diff --git a/bsd-user/i386/target_arch_signal.h b/bsd-user/i386/target_arch_signal.h
index 279dadc..2c14153 100644
--- a/bsd-user/i386/target_arch_signal.h
+++ b/bsd-user/i386/target_arch_signal.h
@@ -88,4 +88,6 @@ struct target_sigframe {
uint32_t __spare__[2];
};
+#define TARGET_SIGSTACK_ALIGN 8
+
#endif /* TARGET_ARCH_SIGNAL_H */
diff --git a/bsd-user/main.c b/bsd-user/main.c
index dcad266..7c0a059 100644
--- a/bsd-user/main.c
+++ b/bsd-user/main.c
@@ -35,8 +35,9 @@
#include "qemu/path.h"
#include "qemu/help_option.h"
#include "qemu/module.h"
-#include "exec/exec-all.h"
+#include "qemu/plugin.h"
#include "user/guest-base.h"
+#include "user/page-protection.h"
#include "tcg/startup.h"
#include "qemu/timer.h"
#include "qemu/envlist.h"
@@ -59,6 +60,7 @@ uintptr_t qemu_host_page_size;
intptr_t qemu_host_page_mask;
static bool opt_one_insn_per_tb;
+static unsigned long opt_tb_size;
uintptr_t guest_base;
bool have_guest_base;
/*
@@ -87,10 +89,10 @@ bool have_guest_base;
#endif
unsigned long reserved_va;
+unsigned long guest_addr_max;
const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
const char *qemu_uname_release;
-char qemu_proc_pathname[PATH_MAX]; /* full path to exeutable */
unsigned long target_maxtsiz = TARGET_MAXTSIZ; /* max text size */
unsigned long target_dfldsiz = TARGET_DFLDSIZ; /* initial data size limit */
@@ -104,8 +106,9 @@ unsigned long target_sgrowsiz = TARGET_SGROWSIZ; /* amount to grow stack */
void fork_start(void)
{
start_exclusive();
- cpu_list_lock();
mmap_fork_start();
+ cpu_list_lock();
+ qemu_plugin_user_prefork_lock();
gdbserver_fork_start();
}
@@ -113,31 +116,31 @@ void fork_end(pid_t pid)
{
bool child = pid == 0;
+ qemu_plugin_user_postfork(child);
+ mmap_fork_end(child);
if (child) {
CPUState *cpu, *next_cpu;
/*
- * Child processes created by fork() only have a single thread. Discard
- * information about the parent threads.
+ * Child processes created by fork() only have a single thread.
+ * Discard information about the parent threads.
*/
CPU_FOREACH_SAFE(cpu, next_cpu) {
if (cpu != thread_cpu) {
QTAILQ_REMOVE_RCU(&cpus_queue, cpu, node);
}
}
- mmap_fork_end(child);
- /*
- * qemu_init_cpu_list() takes care of reinitializing the exclusive
- * state, so we don't need to end_exclusive() here.
- */
qemu_init_cpu_list();
get_task_state(thread_cpu)->ts_tid = qemu_get_thread_id();
- gdbserver_fork_end(thread_cpu, pid);
} else {
- mmap_fork_end(child);
cpu_list_unlock();
- gdbserver_fork_end(thread_cpu, pid);
- end_exclusive();
}
+ gdbserver_fork_end(thread_cpu, pid);
+ /*
+ * qemu_init_cpu_list() reinitialized the child exclusive state, but we
+ * also need to keep current_cpu consistent, so call end_exclusive() for
+ * both child and parent.
+ */
+ end_exclusive();
}
void cpu_loop(CPUArchState *env)
@@ -168,9 +171,13 @@ static void usage(void)
" (use '-d help' for a list of log items)\n"
"-D logfile write logs to 'logfile' (default stderr)\n"
"-one-insn-per-tb run with one guest instruction per emulated TB\n"
+ "-tb-size size TCG translation block cache size\n"
"-strace log system calls\n"
"-trace [[enable=]<pattern>][,events=<file>][,file=<file>]\n"
" specify tracing options\n"
+#ifdef CONFIG_PLUGIN
+ "-plugin [file=]<file>[,<argname>=<argvalue>]\n"
+#endif
"\n"
"Environment variables:\n"
"QEMU_STRACE Print system calls and arguments similar to the\n"
@@ -221,6 +228,8 @@ static void init_task_state(TaskState *ts)
};
}
+static QemuPluginList plugins = QTAILQ_HEAD_INITIALIZER(plugins);
+
void gemu_log(const char *fmt, ...)
{
va_list ap;
@@ -247,22 +256,6 @@ adjust_ssize(void)
setrlimit(RLIMIT_STACK, &rl);
}
-static void save_proc_pathname(char *argv0)
-{
- int mib[4];
- size_t len;
-
- mib[0] = CTL_KERN;
- mib[1] = KERN_PROC;
- mib[2] = KERN_PROC_PATHNAME;
- mib[3] = -1;
-
- len = sizeof(qemu_proc_pathname);
- if (sysctl(mib, 4, qemu_proc_pathname, &len, NULL, 0)) {
- perror("sysctl");
- }
-}
-
int main(int argc, char **argv)
{
const char *filename;
@@ -292,7 +285,6 @@ int main(int argc, char **argv)
usage();
}
- save_proc_pathname(argv[0]);
error_init(argv[0]);
module_call_init(MODULE_INIT_TRACE);
@@ -320,6 +312,7 @@ int main(int argc, char **argv)
cpu_model = NULL;
qemu_add_opts(&qemu_trace_opts);
+ qemu_plugin_add_opts();
optind = 1;
for (;;) {
@@ -403,10 +396,20 @@ int main(int argc, char **argv)
seed_optarg = optarg;
} else if (!strcmp(r, "one-insn-per-tb")) {
opt_one_insn_per_tb = true;
+ } else if (!strcmp(r, "tb-size")) {
+ r = argv[optind++];
+ if (qemu_strtoul(r, NULL, 0, &opt_tb_size)) {
+ usage();
+ }
} else if (!strcmp(r, "strace")) {
do_strace = 1;
} else if (!strcmp(r, "trace")) {
trace_opt_parse(optarg);
+#ifdef CONFIG_PLUGIN
+ } else if (!strcmp(r, "plugin")) {
+ r = argv[optind++];
+ qemu_plugin_opt_parse(r, &plugins);
+#endif
} else if (!strcmp(r, "0")) {
argv0 = argv[optind++];
} else {
@@ -441,6 +444,7 @@ int main(int argc, char **argv)
exit(1);
}
trace_init_file();
+ qemu_plugin_load_list(&plugins, &error_fatal);
/* Zero out regs */
memset(regs, 0, sizeof(struct target_pt_regs));
@@ -468,6 +472,8 @@ int main(int argc, char **argv)
accel_init_interfaces(ac);
object_property_set_bool(OBJECT(accel), "one-insn-per-tb",
opt_one_insn_per_tb, &error_abort);
+ object_property_set_int(OBJECT(accel), "tb-size",
+ opt_tb_size, &error_abort);
ac->init_machine(NULL);
}
@@ -507,6 +513,13 @@ int main(int argc, char **argv)
/* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */
reserved_va = max_reserved_va;
}
+ if (reserved_va != 0) {
+ guest_addr_max = reserved_va;
+ } else if (MIN(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) {
+ guest_addr_max = UINT32_MAX;
+ } else {
+ guest_addr_max = ~0ul;
+ }
if (getenv("QEMU_STRACE")) {
do_strace = 1;
@@ -617,6 +630,7 @@ int main(int argc, char **argv)
init_task_state(ts);
ts->info = info;
ts->bprm = &bprm;
+ ts->ts_tid = qemu_get_thread_id();
cpu->opaque = ts;
target_set_brk(info->brk);
@@ -633,8 +647,7 @@ int main(int argc, char **argv)
target_cpu_init(env, regs);
if (gdbstub) {
- gdbserver_start(gdbstub);
- gdb_handlesig(cpu, 0, NULL, NULL, 0);
+ gdbserver_start(gdbstub, &error_fatal);
}
cpu_loop(env);
/* never exits */
diff --git a/bsd-user/meson.build b/bsd-user/meson.build
index 39bad0a..37b7cd6 100644
--- a/bsd-user/meson.build
+++ b/bsd-user/meson.build
@@ -13,6 +13,7 @@ bsd_user_ss.add(files(
'elfload.c',
'main.c',
'mmap.c',
+ 'plugin-api.c',
'signal.c',
'strace.c',
'uaccess.c',
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
index f3a4f17..47e3175 100644
--- a/bsd-user/mmap.c
+++ b/bsd-user/mmap.c
@@ -17,7 +17,9 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
+#include "exec/mmap-lock.h"
#include "exec/page-protection.h"
+#include "user/page-protection.h"
#include "qemu.h"
@@ -129,6 +131,40 @@ error:
}
/*
+ * Perform a pread on behalf of target_mmap. We can reach EOF, we can be
+ * interrupted by signals, and in general there's no good error return path.
+ * If @zero, zero the rest of the block at EOF.
+ * Return true on success.
+ */
+static bool mmap_pread(int fd, void *p, size_t len, off_t offset, bool zero)
+{
+ while (1) {
+ ssize_t r = pread(fd, p, len, offset);
+
+ if (likely(r == len)) {
+ /* Complete */
+ return true;
+ }
+ if (r == 0) {
+ /* EOF */
+ if (zero) {
+ memset(p, 0, len);
+ }
+ return true;
+ }
+ if (r > 0) {
+ /* Short read */
+ p += r;
+ len -= r;
+ offset += r;
+ } else if (errno != EINTR) {
+ /* Error */
+ return false;
+ }
+ }
+}
+
+/*
* map an incomplete host page
*
* mmap_frag can be called with a valid fd, if flags doesn't contain one of
@@ -190,7 +226,7 @@ static int mmap_frag(abi_ulong real_start,
mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
/* read the corresponding file data */
- if (pread(fd, g2h_untagged(start), end - start, offset) == -1) {
+ if (!mmap_pread(fd, g2h_untagged(start), end - start, offset, true)) {
return -1;
}
@@ -240,8 +276,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
* It must be called with mmap_lock() held.
* Return -1 if error.
*/
-static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
- abi_ulong alignment)
+abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong alignment)
{
void *ptr, *prev;
abi_ulong addr;
@@ -360,11 +395,6 @@ static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
}
}
-abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
-{
- return mmap_find_vma_aligned(start, size, 0);
-}
-
/* NOTE: all the constants are the HOST ones */
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
int flags, int fd, off_t offset)
@@ -454,13 +484,12 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
* before we truncate the length for mapping files below.
*/
if (!(flags & MAP_FIXED)) {
+ abi_ulong alignment;
+
host_len = len + offset - host_offset;
host_len = HOST_PAGE_ALIGN(host_len);
- if ((flags & MAP_ALIGNMENT_MASK) != 0)
- start = mmap_find_vma_aligned(real_start, host_len,
- (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
- else
- start = mmap_find_vma(real_start, host_len);
+ alignment = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
+ start = mmap_find_vma(real_start, host_len, alignment);
if (start == (abi_ulong)-1) {
errno = ENOMEM;
goto fail;
@@ -565,7 +594,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
-1, 0);
if (retaddr == -1)
goto fail;
- if (pread(fd, g2h_untagged(start), len, offset) == -1) {
+ if (!mmap_pread(fd, g2h_untagged(start), len, offset, false)) {
goto fail;
}
if (!(prot & PROT_WRITE)) {
diff --git a/bsd-user/plugin-api.c b/bsd-user/plugin-api.c
new file mode 100644
index 0000000..6ccef7e
--- /dev/null
+++ b/bsd-user/plugin-api.c
@@ -0,0 +1,15 @@
+/*
+ * QEMU Plugin API - bsd-user-mode only implementations
+ *
+ * Common user-mode only APIs are in plugins/api-user. These helpers
+ * are only specific to bsd-user.
+ *
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ * Copyright (C) 2019-2025, Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu.h"
+#include "common-user/plugin-api.c.inc"
diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h
index 9d2fc71..93388e7 100644
--- a/bsd-user/qemu.h
+++ b/bsd-user/qemu.h
@@ -17,16 +17,21 @@
#ifndef QEMU_H
#define QEMU_H
+#include <sys/param.h>
+
+#include "qemu/int128.h"
#include "cpu.h"
#include "qemu/units.h"
-#include "exec/cpu_ldst.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-ldst.h"
#include "user/abitypes.h"
+#include "user/cpu_loop.h"
+#include "user/page-protection.h"
extern char **environ;
#include "user/thunk.h"
+#include "user/mmap.h"
#include "target_arch.h"
#include "syscall_defs.h"
#include "target_syscall.h"
@@ -35,7 +40,6 @@ extern char **environ;
#include "target.h"
#include "exec/gdbstub.h"
#include "exec/page-protection.h"
-#include "qemu/clang-tsa.h"
#include "accel/tcg/vcpu-state.h"
#include "qemu-os.h"
@@ -183,7 +187,6 @@ abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1,
abi_long arg5, abi_long arg6);
void gemu_log(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
extern __thread CPUState *thread_cpu;
-void cpu_loop(CPUArchState *env);
char *target_strerror(int err);
int get_osversion(void);
void fork_start(void);
@@ -230,19 +233,8 @@ void print_taken_signal(int target_signum, const target_siginfo_t *tinfo);
extern int do_strace;
/* mmap.c */
-int target_mprotect(abi_ulong start, abi_ulong len, int prot);
-abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
- int flags, int fd, off_t offset);
-int target_munmap(abi_ulong start, abi_ulong len);
-abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
- abi_ulong new_size, unsigned long flags,
- abi_ulong new_addr);
int target_msync(abi_ulong start, abi_ulong len, int flags);
-extern abi_ulong mmap_next_start;
-abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size);
void mmap_reserve(abi_ulong start, abi_ulong size);
-void TSA_NO_TSA mmap_fork_start(void);
-void TSA_NO_TSA mmap_fork_end(int child);
/* main.c */
extern char qemu_proc_pathname[];
diff --git a/bsd-user/riscv/signal.c b/bsd-user/riscv/signal.c
new file mode 100644
index 0000000..10c940c
--- /dev/null
+++ b/bsd-user/riscv/signal.c
@@ -0,0 +1,170 @@
+/*
+ * RISC-V signal definitions
+ *
+ * Copyright (c) 2019 Mark Corbin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+
+#include "qemu.h"
+
+/*
+ * Compare with sendsig() in riscv/riscv/exec_machdep.c
+ * Assumes that target stack frame memory is locked.
+ */
+abi_long
+set_sigtramp_args(CPURISCVState *regs, int sig, struct target_sigframe *frame,
+ abi_ulong frame_addr, struct target_sigaction *ka)
+{
+ /*
+ * Arguments to signal handler:
+ * a0 (10) = signal number
+ * a1 (11) = siginfo pointer
+ * a2 (12) = ucontext pointer
+ * pc = signal pointer handler
+ * sp (2) = sigframe pointer
+ * ra (1) = sigtramp at base of user stack
+ */
+
+ regs->gpr[xA0] = sig;
+ regs->gpr[xA1] = frame_addr +
+ offsetof(struct target_sigframe, sf_si);
+ regs->gpr[xA2] = frame_addr +
+ offsetof(struct target_sigframe, sf_uc);
+ regs->pc = ka->_sa_handler;
+ regs->gpr[xSP] = frame_addr;
+ regs->gpr[xRA] = TARGET_PS_STRINGS - TARGET_SZSIGCODE;
+ return 0;
+}
+
+/*
+ * Compare to riscv/riscv/exec_machdep.c sendsig()
+ * Assumes that the memory is locked if frame points to user memory.
+ */
+abi_long setup_sigframe_arch(CPURISCVState *env, abi_ulong frame_addr,
+ struct target_sigframe *frame, int flags)
+{
+ target_mcontext_t *mcp = &frame->sf_uc.uc_mcontext;
+
+ get_mcontext(env, mcp, flags);
+ return 0;
+}
+
+/*
+ * Compare with get_mcontext() in riscv/riscv/machdep.c
+ * Assumes that the memory is locked if mcp points to user memory.
+ */
+abi_long get_mcontext(CPURISCVState *regs, target_mcontext_t *mcp,
+ int flags)
+{
+
+ mcp->mc_gpregs.gp_t[0] = tswap64(regs->gpr[5]);
+ mcp->mc_gpregs.gp_t[1] = tswap64(regs->gpr[6]);
+ mcp->mc_gpregs.gp_t[2] = tswap64(regs->gpr[7]);
+ mcp->mc_gpregs.gp_t[3] = tswap64(regs->gpr[28]);
+ mcp->mc_gpregs.gp_t[4] = tswap64(regs->gpr[29]);
+ mcp->mc_gpregs.gp_t[5] = tswap64(regs->gpr[30]);
+ mcp->mc_gpregs.gp_t[6] = tswap64(regs->gpr[31]);
+
+ mcp->mc_gpregs.gp_s[0] = tswap64(regs->gpr[8]);
+ mcp->mc_gpregs.gp_s[1] = tswap64(regs->gpr[9]);
+ mcp->mc_gpregs.gp_s[2] = tswap64(regs->gpr[18]);
+ mcp->mc_gpregs.gp_s[3] = tswap64(regs->gpr[19]);
+ mcp->mc_gpregs.gp_s[4] = tswap64(regs->gpr[20]);
+ mcp->mc_gpregs.gp_s[5] = tswap64(regs->gpr[21]);
+ mcp->mc_gpregs.gp_s[6] = tswap64(regs->gpr[22]);
+ mcp->mc_gpregs.gp_s[7] = tswap64(regs->gpr[23]);
+ mcp->mc_gpregs.gp_s[8] = tswap64(regs->gpr[24]);
+ mcp->mc_gpregs.gp_s[9] = tswap64(regs->gpr[25]);
+ mcp->mc_gpregs.gp_s[10] = tswap64(regs->gpr[26]);
+ mcp->mc_gpregs.gp_s[11] = tswap64(regs->gpr[27]);
+
+ mcp->mc_gpregs.gp_a[0] = tswap64(regs->gpr[10]);
+ mcp->mc_gpregs.gp_a[1] = tswap64(regs->gpr[11]);
+ mcp->mc_gpregs.gp_a[2] = tswap64(regs->gpr[12]);
+ mcp->mc_gpregs.gp_a[3] = tswap64(regs->gpr[13]);
+ mcp->mc_gpregs.gp_a[4] = tswap64(regs->gpr[14]);
+ mcp->mc_gpregs.gp_a[5] = tswap64(regs->gpr[15]);
+ mcp->mc_gpregs.gp_a[6] = tswap64(regs->gpr[16]);
+ mcp->mc_gpregs.gp_a[7] = tswap64(regs->gpr[17]);
+
+ if (flags & TARGET_MC_GET_CLEAR_RET) {
+ mcp->mc_gpregs.gp_a[0] = 0; /* a0 */
+ mcp->mc_gpregs.gp_a[1] = 0; /* a1 */
+ mcp->mc_gpregs.gp_t[0] = 0; /* clear syscall error */
+ }
+
+ mcp->mc_gpregs.gp_ra = tswap64(regs->gpr[1]);
+ mcp->mc_gpregs.gp_sp = tswap64(regs->gpr[2]);
+ mcp->mc_gpregs.gp_gp = tswap64(regs->gpr[3]);
+ mcp->mc_gpregs.gp_tp = tswap64(regs->gpr[4]);
+ mcp->mc_gpregs.gp_sepc = tswap64(regs->pc);
+
+ return 0;
+}
+
+/* Compare with set_mcontext() in riscv/riscv/exec_machdep.c */
+abi_long set_mcontext(CPURISCVState *regs, target_mcontext_t *mcp,
+ int srflag)
+{
+
+ regs->gpr[5] = tswap64(mcp->mc_gpregs.gp_t[0]);
+ regs->gpr[6] = tswap64(mcp->mc_gpregs.gp_t[1]);
+ regs->gpr[7] = tswap64(mcp->mc_gpregs.gp_t[2]);
+ regs->gpr[28] = tswap64(mcp->mc_gpregs.gp_t[3]);
+ regs->gpr[29] = tswap64(mcp->mc_gpregs.gp_t[4]);
+ regs->gpr[30] = tswap64(mcp->mc_gpregs.gp_t[5]);
+ regs->gpr[31] = tswap64(mcp->mc_gpregs.gp_t[6]);
+
+ regs->gpr[8] = tswap64(mcp->mc_gpregs.gp_s[0]);
+ regs->gpr[9] = tswap64(mcp->mc_gpregs.gp_s[1]);
+ regs->gpr[18] = tswap64(mcp->mc_gpregs.gp_s[2]);
+ regs->gpr[19] = tswap64(mcp->mc_gpregs.gp_s[3]);
+ regs->gpr[20] = tswap64(mcp->mc_gpregs.gp_s[4]);
+ regs->gpr[21] = tswap64(mcp->mc_gpregs.gp_s[5]);
+ regs->gpr[22] = tswap64(mcp->mc_gpregs.gp_s[6]);
+ regs->gpr[23] = tswap64(mcp->mc_gpregs.gp_s[7]);
+ regs->gpr[24] = tswap64(mcp->mc_gpregs.gp_s[8]);
+ regs->gpr[25] = tswap64(mcp->mc_gpregs.gp_s[9]);
+ regs->gpr[26] = tswap64(mcp->mc_gpregs.gp_s[10]);
+ regs->gpr[27] = tswap64(mcp->mc_gpregs.gp_s[11]);
+
+ regs->gpr[10] = tswap64(mcp->mc_gpregs.gp_a[0]);
+ regs->gpr[11] = tswap64(mcp->mc_gpregs.gp_a[1]);
+ regs->gpr[12] = tswap64(mcp->mc_gpregs.gp_a[2]);
+ regs->gpr[13] = tswap64(mcp->mc_gpregs.gp_a[3]);
+ regs->gpr[14] = tswap64(mcp->mc_gpregs.gp_a[4]);
+ regs->gpr[15] = tswap64(mcp->mc_gpregs.gp_a[5]);
+ regs->gpr[16] = tswap64(mcp->mc_gpregs.gp_a[6]);
+ regs->gpr[17] = tswap64(mcp->mc_gpregs.gp_a[7]);
+
+
+ regs->gpr[1] = tswap64(mcp->mc_gpregs.gp_ra);
+ regs->gpr[2] = tswap64(mcp->mc_gpregs.gp_sp);
+ regs->gpr[3] = tswap64(mcp->mc_gpregs.gp_gp);
+ regs->gpr[4] = tswap64(mcp->mc_gpregs.gp_tp);
+ regs->pc = tswap64(mcp->mc_gpregs.gp_sepc);
+
+ return 0;
+}
+
+/* Compare with sys_sigreturn() in riscv/riscv/machdep.c */
+abi_long get_ucontext_sigreturn(CPURISCVState *regs,
+ abi_ulong target_sf, abi_ulong *target_uc)
+{
+
+ *target_uc = target_sf;
+ return 0;
+}
diff --git a/bsd-user/riscv/target.h b/bsd-user/riscv/target.h
new file mode 100644
index 0000000..036ddd1
--- /dev/null
+++ b/bsd-user/riscv/target.h
@@ -0,0 +1,20 @@
+/*
+ * Riscv64 general target stuff that's common to all aarch details
+ *
+ * Copyright (c) 2022 M. Warner Losh <imp@bsdimp.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef TARGET_H
+#define TARGET_H
+
+/*
+ * riscv64 ABI does not 'lump' the registers for 64-bit args.
+ */
+static inline bool regpairs_aligned(void *cpu_env)
+{
+ return false;
+}
+
+#endif /* TARGET_H */
diff --git a/bsd-user/riscv/target_arch.h b/bsd-user/riscv/target_arch.h
new file mode 100644
index 0000000..26ce07f
--- /dev/null
+++ b/bsd-user/riscv/target_arch.h
@@ -0,0 +1,27 @@
+/*
+ * RISC-V specific prototypes
+ *
+ * Copyright (c) 2019 Mark Corbin <mark.corbin@embecsom.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_H
+#define TARGET_ARCH_H
+
+#include "qemu.h"
+
+void target_cpu_set_tls(CPURISCVState *env, target_ulong newtls);
+
+#endif /* TARGET_ARCH_H */
diff --git a/bsd-user/riscv/target_arch_cpu.c b/bsd-user/riscv/target_arch_cpu.c
new file mode 100644
index 0000000..44e25d2
--- /dev/null
+++ b/bsd-user/riscv/target_arch_cpu.c
@@ -0,0 +1,29 @@
+/*
+ * RISC-V CPU related code
+ *
+ * Copyright (c) 2019 Mark Corbin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+
+#include "target_arch.h"
+
+#define TP_OFFSET 16
+
+/* Compare with cpu_set_user_tls() in riscv/riscv/vm_machdep.c */
+void target_cpu_set_tls(CPURISCVState *env, target_ulong newtls)
+{
+ env->gpr[xTP] = newtls + TP_OFFSET;
+}
diff --git a/bsd-user/riscv/target_arch_cpu.h b/bsd-user/riscv/target_arch_cpu.h
new file mode 100644
index 0000000..ef92f00
--- /dev/null
+++ b/bsd-user/riscv/target_arch_cpu.h
@@ -0,0 +1,148 @@
+/*
+ * RISC-V CPU init and loop
+ *
+ * Copyright (c) 2019 Mark Corbin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_CPU_H
+#define TARGET_ARCH_CPU_H
+
+#include "target_arch.h"
+#include "signal-common.h"
+
+#define TARGET_DEFAULT_CPU_MODEL "max"
+
+static inline void target_cpu_init(CPURISCVState *env,
+ struct target_pt_regs *regs)
+{
+ int i;
+
+ for (i = 1; i < 32; i++) {
+ env->gpr[i] = regs->regs[i];
+ }
+
+ env->pc = regs->sepc;
+}
+
+static inline G_NORETURN void target_cpu_loop(CPURISCVState *env)
+{
+ CPUState *cs = env_cpu(env);
+ int trapnr;
+ abi_long ret;
+ unsigned int syscall_num;
+ int32_t signo, code;
+
+ for (;;) {
+ cpu_exec_start(cs);
+ trapnr = cpu_exec(cs);
+ cpu_exec_end(cs);
+ process_queued_cpu_work(cs);
+
+ signo = 0;
+
+ switch (trapnr) {
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCP_ATOMIC:
+ cpu_exec_step_atomic(cs);
+ break;
+ case RISCV_EXCP_U_ECALL:
+ syscall_num = env->gpr[xT0];
+ env->pc += TARGET_INSN_SIZE;
+ /* Compare to cpu_fetch_syscall_args() in riscv/riscv/trap.c */
+ if (TARGET_FREEBSD_NR___syscall == syscall_num ||
+ TARGET_FREEBSD_NR_syscall == syscall_num) {
+ ret = do_freebsd_syscall(env,
+ env->gpr[xA0],
+ env->gpr[xA1],
+ env->gpr[xA2],
+ env->gpr[xA3],
+ env->gpr[xA4],
+ env->gpr[xA5],
+ env->gpr[xA6],
+ env->gpr[xA7],
+ 0);
+ } else {
+ ret = do_freebsd_syscall(env,
+ syscall_num,
+ env->gpr[xA0],
+ env->gpr[xA1],
+ env->gpr[xA2],
+ env->gpr[xA3],
+ env->gpr[xA4],
+ env->gpr[xA5],
+ env->gpr[xA6],
+ env->gpr[xA7]
+ );
+ }
+
+ /*
+ * Compare to cpu_set_syscall_retval() in
+ * riscv/riscv/vm_machdep.c
+ */
+ if (ret >= 0) {
+ env->gpr[xA0] = ret;
+ env->gpr[xT0] = 0;
+ } else if (ret == -TARGET_ERESTART) {
+ env->pc -= TARGET_INSN_SIZE;
+ } else if (ret != -TARGET_EJUSTRETURN) {
+ env->gpr[xA0] = -ret;
+ env->gpr[xT0] = 1;
+ }
+ break;
+ case RISCV_EXCP_ILLEGAL_INST:
+ signo = TARGET_SIGILL;
+ code = TARGET_ILL_ILLOPC;
+ break;
+ case RISCV_EXCP_BREAKPOINT:
+ signo = TARGET_SIGTRAP;
+ code = TARGET_TRAP_BRKPT;
+ break;
+ case EXCP_DEBUG:
+ signo = TARGET_SIGTRAP;
+ code = TARGET_TRAP_BRKPT;
+ break;
+ default:
+ fprintf(stderr, "qemu: unhandled CPU exception "
+ "0x%x - aborting\n", trapnr);
+ cpu_dump_state(cs, stderr, 0);
+ abort();
+ }
+
+ if (signo) {
+ force_sig_fault(signo, code, env->pc);
+ }
+
+ process_pending_signals(env);
+ }
+}
+
+static inline void target_cpu_clone_regs(CPURISCVState *env, target_ulong newsp)
+{
+ if (newsp) {
+ env->gpr[xSP] = newsp;
+ }
+
+ env->gpr[xA0] = 0;
+ env->gpr[xT0] = 0;
+}
+
+static inline void target_cpu_reset(CPUArchState *env)
+{
+}
+
+#endif /* TARGET_ARCH_CPU_H */
diff --git a/bsd-user/riscv/target_arch_elf.h b/bsd-user/riscv/target_arch_elf.h
new file mode 100644
index 0000000..4eb915e
--- /dev/null
+++ b/bsd-user/riscv/target_arch_elf.h
@@ -0,0 +1,42 @@
+/*
+ * RISC-V ELF definitions
+ *
+ * Copyright (c) 2019 Mark Corbin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_ELF_H
+#define TARGET_ARCH_ELF_H
+
+#define elf_check_arch(x) ((x) == EM_RISCV)
+#define ELF_START_MMAP 0x80000000
+#define ELF_ET_DYN_LOAD_ADDR 0x100000
+#define ELF_CLASS ELFCLASS64
+
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_RISCV
+
+#define ELF_HWCAP get_elf_hwcap()
+static uint32_t get_elf_hwcap(void)
+{
+ RISCVCPU *cpu = RISCV_CPU(thread_cpu);
+
+ return cpu->env.misa_ext_mask;
+}
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 4096
+
+#endif /* TARGET_ARCH_ELF_H */
diff --git a/bsd-user/riscv/target_arch_reg.h b/bsd-user/riscv/target_arch_reg.h
new file mode 100644
index 0000000..12b1c96
--- /dev/null
+++ b/bsd-user/riscv/target_arch_reg.h
@@ -0,0 +1,88 @@
+/*
+ * RISC-V register structures
+ *
+ * Copyright (c) 2019 Mark Corbin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_REG_H
+#define TARGET_ARCH_REG_H
+
+/* Compare with riscv/include/reg.h */
+typedef struct target_reg {
+ uint64_t ra; /* return address */
+ uint64_t sp; /* stack pointer */
+ uint64_t gp; /* global pointer */
+ uint64_t tp; /* thread pointer */
+ uint64_t t[7]; /* temporaries */
+ uint64_t s[12]; /* saved registers */
+ uint64_t a[8]; /* function arguments */
+ uint64_t sepc; /* exception program counter */
+ uint64_t sstatus; /* status register */
+} target_reg_t;
+
+typedef struct target_fpreg {
+ uint64_t fp_x[32][2]; /* Floating point registers */
+ uint64_t fp_fcsr; /* Floating point control reg */
+} target_fpreg_t;
+
+#define tswapreg(ptr) tswapal(ptr)
+
+/* Compare with struct trapframe in riscv/include/frame.h */
+static inline void target_copy_regs(target_reg_t *regs,
+ const CPURISCVState *env)
+{
+
+ regs->ra = tswapreg(env->gpr[1]);
+ regs->sp = tswapreg(env->gpr[2]);
+ regs->gp = tswapreg(env->gpr[3]);
+ regs->tp = tswapreg(env->gpr[4]);
+
+ regs->t[0] = tswapreg(env->gpr[5]);
+ regs->t[1] = tswapreg(env->gpr[6]);
+ regs->t[2] = tswapreg(env->gpr[7]);
+ regs->t[3] = tswapreg(env->gpr[28]);
+ regs->t[4] = tswapreg(env->gpr[29]);
+ regs->t[5] = tswapreg(env->gpr[30]);
+ regs->t[6] = tswapreg(env->gpr[31]);
+
+ regs->s[0] = tswapreg(env->gpr[8]);
+ regs->s[1] = tswapreg(env->gpr[9]);
+ regs->s[2] = tswapreg(env->gpr[18]);
+ regs->s[3] = tswapreg(env->gpr[19]);
+ regs->s[4] = tswapreg(env->gpr[20]);
+ regs->s[5] = tswapreg(env->gpr[21]);
+ regs->s[6] = tswapreg(env->gpr[22]);
+ regs->s[7] = tswapreg(env->gpr[23]);
+ regs->s[8] = tswapreg(env->gpr[24]);
+ regs->s[9] = tswapreg(env->gpr[25]);
+ regs->s[10] = tswapreg(env->gpr[26]);
+ regs->s[11] = tswapreg(env->gpr[27]);
+
+ regs->a[0] = tswapreg(env->gpr[10]);
+ regs->a[1] = tswapreg(env->gpr[11]);
+ regs->a[2] = tswapreg(env->gpr[12]);
+ regs->a[3] = tswapreg(env->gpr[13]);
+ regs->a[4] = tswapreg(env->gpr[14]);
+ regs->a[5] = tswapreg(env->gpr[15]);
+ regs->a[6] = tswapreg(env->gpr[16]);
+ regs->a[7] = tswapreg(env->gpr[17]);
+
+ regs->sepc = tswapreg(env->pc);
+}
+
+#undef tswapreg
+
+#endif /* TARGET_ARCH_REG_H */
diff --git a/bsd-user/riscv/target_arch_signal.h b/bsd-user/riscv/target_arch_signal.h
new file mode 100644
index 0000000..1a634b8
--- /dev/null
+++ b/bsd-user/riscv/target_arch_signal.h
@@ -0,0 +1,75 @@
+/*
+ * RISC-V signal definitions
+ *
+ * Copyright (c) 2019 Mark Corbin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_SIGNAL_H
+#define TARGET_ARCH_SIGNAL_H
+
+#include "cpu.h"
+
+
+#define TARGET_INSN_SIZE 4 /* riscv instruction size */
+
+/* Size of the signal trampoline code placed on the stack. */
+#define TARGET_SZSIGCODE ((abi_ulong)(7 * TARGET_INSN_SIZE))
+
+/* Compare with riscv/include/_limits.h */
+#define TARGET_MINSIGSTKSZ (1024 * 4)
+#define TARGET_SIGSTKSZ (TARGET_MINSIGSTKSZ + 32768)
+
+struct target_gpregs {
+ uint64_t gp_ra;
+ uint64_t gp_sp;
+ uint64_t gp_gp;
+ uint64_t gp_tp;
+ uint64_t gp_t[7];
+ uint64_t gp_s[12];
+ uint64_t gp_a[8];
+ uint64_t gp_sepc;
+ uint64_t gp_sstatus;
+};
+
+struct target_fpregs {
+ uint64_t fp_x[32][2];
+ uint64_t fp_fcsr;
+ uint32_t fp_flags;
+ uint32_t pad;
+};
+
+typedef struct target_mcontext {
+ struct target_gpregs mc_gpregs;
+ struct target_fpregs mc_fpregs;
+ uint32_t mc_flags;
+#define TARGET_MC_FP_VALID 0x01
+ uint32_t mc_pad;
+ uint64_t mc_spare[8];
+} target_mcontext_t;
+
+#define TARGET_MCONTEXT_SIZE 864
+#define TARGET_UCONTEXT_SIZE 936
+
+#include "target_os_ucontext.h"
+
+struct target_sigframe {
+ target_ucontext_t sf_uc; /* = *sf_uncontext */
+ target_siginfo_t sf_si; /* = *sf_siginfo (SA_SIGINFO case)*/
+};
+
+#define TARGET_SIGSTACK_ALIGN 16
+
+#endif /* TARGET_ARCH_SIGNAL_H */
diff --git a/bsd-user/riscv/target_arch_sigtramp.h b/bsd-user/riscv/target_arch_sigtramp.h
new file mode 100644
index 0000000..dfe5076
--- /dev/null
+++ b/bsd-user/riscv/target_arch_sigtramp.h
@@ -0,0 +1,41 @@
+/*
+ * RISC-V sigcode
+ *
+ * Copyright (c) 2019 Mark Corbin
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_SIGTRAMP_H
+#define TARGET_ARCH_SIGTRAMP_H
+
+/* Compare with sigcode() in riscv/riscv/locore.S */
+static inline abi_long setup_sigtramp(abi_ulong offset, unsigned sigf_uc,
+ unsigned sys_sigreturn)
+{
+ uint32_t sys_exit = TARGET_FREEBSD_NR_exit;
+
+ uint32_t sigtramp_code[] = {
+ /*1*/ const_le32(0x00010513), /*mv a0, sp*/
+ /*2*/ const_le32(0x00050513 + (sigf_uc << 20)), /*addi a0,a0,sigf_uc*/
+ /*3*/ const_le32(0x00000293 + (sys_sigreturn << 20)),/*li t0,sys_sigreturn*/
+ /*4*/ const_le32(0x00000073), /*ecall*/
+ /*5*/ const_le32(0x00000293 + (sys_exit << 20)), /*li t0,sys_exit*/
+ /*6*/ const_le32(0x00000073), /*ecall*/
+ /*7*/ const_le32(0xFF1FF06F) /*b -16*/
+ };
+
+ return memcpy_to_target(offset, sigtramp_code, TARGET_SZSIGCODE);
+}
+#endif /* TARGET_ARCH_SIGTRAMP_H */
diff --git a/bsd-user/riscv/target_arch_sysarch.h b/bsd-user/riscv/target_arch_sysarch.h
new file mode 100644
index 0000000..9af4233
--- /dev/null
+++ b/bsd-user/riscv/target_arch_sysarch.h
@@ -0,0 +1,41 @@
+/*
+ * RISC-V sysarch() system call emulation
+ *
+ * Copyright (c) 2019 Mark Corbin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_SYSARCH_H
+#define TARGET_ARCH_SYSARCH_H
+
+#include "target_syscall.h"
+#include "target_arch.h"
+
+static inline abi_long do_freebsd_arch_sysarch(CPURISCVState *env, int op,
+ abi_ulong parms)
+{
+
+ return -TARGET_EOPNOTSUPP;
+}
+
+static inline void do_freebsd_arch_print_sysarch(
+ const struct syscallname *name, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6)
+{
+
+ gemu_log("UNKNOWN OP: %d, " TARGET_ABI_FMT_lx ")", (int)arg1, arg2);
+}
+
+#endif /* TARGET_ARCH_SYSARCH_H */
diff --git a/bsd-user/riscv/target_arch_thread.h b/bsd-user/riscv/target_arch_thread.h
new file mode 100644
index 0000000..95cd0b6
--- /dev/null
+++ b/bsd-user/riscv/target_arch_thread.h
@@ -0,0 +1,47 @@
+/*
+ * RISC-V thread support
+ *
+ * Copyright (c) 2019 Mark Corbin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_THREAD_H
+#define TARGET_ARCH_THREAD_H
+
+/* Compare with cpu_set_upcall() in riscv/riscv/vm_machdep.c */
+static inline void target_thread_set_upcall(CPURISCVState *regs,
+ abi_ulong entry, abi_ulong arg, abi_ulong stack_base,
+ abi_ulong stack_size)
+{
+ abi_ulong sp;
+
+ sp = ROUND_DOWN(stack_base + stack_size, 16);
+
+ regs->gpr[xSP] = sp;
+ regs->pc = entry;
+ regs->gpr[xA0] = arg;
+}
+
+/* Compare with exec_setregs() in riscv/riscv/machdep.c */
+static inline void target_thread_init(struct target_pt_regs *regs,
+ struct image_info *infop)
+{
+ regs->sepc = infop->entry;
+ regs->regs[xRA] = infop->entry;
+ regs->regs[xA0] = infop->start_stack;
+ regs->regs[xSP] = ROUND_DOWN(infop->start_stack, 16);
+}
+
+#endif /* TARGET_ARCH_THREAD_H */
diff --git a/bsd-user/riscv/target_arch_vmparam.h b/bsd-user/riscv/target_arch_vmparam.h
new file mode 100644
index 0000000..0f2486d
--- /dev/null
+++ b/bsd-user/riscv/target_arch_vmparam.h
@@ -0,0 +1,53 @@
+/*
+ * RISC-V VM parameters definitions
+ *
+ * Copyright (c) 2019 Mark Corbin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARCH_VMPARAM_H
+#define TARGET_ARCH_VMPARAM_H
+
+#include "cpu.h"
+
+/* Compare with riscv/include/vmparam.h */
+#define TARGET_MAXTSIZ (1 * GiB) /* max text size */
+#define TARGET_DFLDSIZ (128 * MiB) /* initial data size limit */
+#define TARGET_MAXDSIZ (1 * GiB) /* max data size */
+#define TARGET_DFLSSIZ (128 * MiB) /* initial stack size limit */
+#define TARGET_MAXSSIZ (1 * GiB) /* max stack size */
+#define TARGET_SGROWSIZ (128 * KiB) /* amount to grow stack */
+
+#define TARGET_VM_MINUSER_ADDRESS (0x0000000000000000UL)
+#define TARGET_VM_MAXUSER_ADDRESS (0x0000004000000000UL)
+
+#define TARGET_USRSTACK (TARGET_VM_MAXUSER_ADDRESS - TARGET_PAGE_SIZE)
+
+static inline abi_ulong get_sp_from_cpustate(CPURISCVState *state)
+{
+ return state->gpr[xSP];
+}
+
+static inline void set_second_rval(CPURISCVState *state, abi_ulong retval2)
+{
+ state->gpr[xA1] = retval2;
+}
+
+static inline abi_ulong get_second_rval(CPURISCVState *state)
+{
+ return state->gpr[xA1];
+}
+
+#endif /* TARGET_ARCH_VMPARAM_H */
diff --git a/bsd-user/riscv/target_syscall.h b/bsd-user/riscv/target_syscall.h
new file mode 100644
index 0000000..e7e5231
--- /dev/null
+++ b/bsd-user/riscv/target_syscall.h
@@ -0,0 +1,38 @@
+/*
+ * RISC-V system call definitions
+ *
+ * Copyright (c) Mark Corbin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef BSD_USER_RISCV_TARGET_SYSCALL_H
+#define BSD_USER_RISCV_TARGET_SYSCALL_H
+
+/*
+ * struct target_pt_regs defines the way the registers are stored on the stack
+ * during a system call.
+ */
+
+struct target_pt_regs {
+ abi_ulong regs[32];
+ abi_ulong sepc;
+};
+
+#define UNAME_MACHINE "riscv64"
+
+#define TARGET_HW_MACHINE "riscv"
+#define TARGET_HW_MACHINE_ARCH UNAME_MACHINE
+
+#endif /* BSD_USER_RISCV_TARGET_SYSCALL_H */
diff --git a/bsd-user/signal-common.h b/bsd-user/signal-common.h
index 77d7c7a..4e634e0 100644
--- a/bsd-user/signal-common.h
+++ b/bsd-user/signal-common.h
@@ -42,7 +42,6 @@ void process_pending_signals(CPUArchState *env);
void queue_signal(CPUArchState *env, int sig, int si_type,
target_siginfo_t *info);
void signal_init(void);
-int target_to_host_signal(int sig);
void target_to_host_sigset(sigset_t *d, const target_sigset_t *s);
/*
diff --git a/bsd-user/signal.c b/bsd-user/signal.c
index 8b6654b..dadcc03 100644
--- a/bsd-user/signal.c
+++ b/bsd-user/signal.c
@@ -21,12 +21,15 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu.h"
+#include "user/cpu_loop.h"
#include "exec/page-protection.h"
+#include "user/page-protection.h"
+#include "user/signal.h"
#include "user/tswap-target.h"
#include "gdbstub/user.h"
#include "signal-common.h"
#include "trace.h"
-#include "hw/core/tcg-cpu-ops.h"
+#include "accel/tcg/cpu-ops.h"
#include "host-signal.h"
/* target_siginfo_t must fit in gdbstub's siginfo save area. */
@@ -48,6 +51,8 @@ static inline int sas_ss_flags(TaskState *ts, unsigned long sp)
on_sig_stack(ts, sp) ? SS_ONSTACK : 0;
}
+int host_interrupt_signal = SIGRTMAX;
+
/*
* The BSD ABIs use the same signal numbers across all the CPU architectures, so
* (unlike Linux) these functions are just the identity mapping. This might not
@@ -436,7 +441,6 @@ void queue_signal(CPUArchState *env, int sig, int si_type,
ts->sync_signal.pending = sig;
/* Signal that a new signal is pending. */
qatomic_set(&ts->signal_pending, 1);
- return;
}
static int fatal_signal(int sig)
@@ -488,6 +492,12 @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
uintptr_t pc = 0;
bool sync_sig = false;
+ if (host_sig == host_interrupt_signal) {
+ ts->signal_pending = 1;
+ cpu_exit(thread_cpu);
+ return;
+ }
+
/*
* Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
* handling wrt signal blocking and unwinding.
@@ -728,14 +738,7 @@ static inline abi_ulong get_sigframe(struct target_sigaction *ka,
sp = ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
}
-/* TODO: make this a target_arch function / define */
-#if defined(TARGET_ARM)
- return (sp - frame_size) & ~7;
-#elif defined(TARGET_AARCH64)
- return (sp - frame_size) & ~15;
-#else
- return sp - frame_size;
-#endif
+ return ROUND_DOWN(sp - frame_size, TARGET_SIGSTACK_ALIGN);
}
/* compare to $M/$M/exec_machdep.c sendsig and sys/kern/kern_sig.c sigexit */
@@ -858,6 +861,9 @@ void signal_init(void)
for (i = 1; i <= TARGET_NSIG; i++) {
host_sig = target_to_host_signal(i);
+ if (host_sig == host_interrupt_signal) {
+ continue;
+ }
sigaction(host_sig, NULL, &oact);
if (oact.sa_sigaction == (void *)SIG_IGN) {
sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
@@ -876,6 +882,7 @@ void signal_init(void)
sigaction(host_sig, &act, NULL);
}
}
+ sigaction(host_interrupt_signal, &act, NULL);
}
static void handle_pending_signal(CPUArchState *env, int sig,
@@ -1023,10 +1030,10 @@ void process_pending_signals(CPUArchState *env)
ts->in_sigsuspend = false;
}
-void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
+void cpu_loop_exit_sigsegv(CPUState *cpu, vaddr addr,
MMUAccessType access_type, bool maperr, uintptr_t ra)
{
- const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
if (tcg_ops->record_sigsegv) {
tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
@@ -1039,10 +1046,10 @@ void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
cpu_loop_exit_restore(cpu, ra);
}
-void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
+void cpu_loop_exit_sigbus(CPUState *cpu, vaddr addr,
MMUAccessType access_type, uintptr_t ra)
{
- const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
if (tcg_ops->record_sigbus) {
tcg_ops->record_sigbus(cpu, addr, access_type, ra);
diff --git a/bsd-user/x86_64/target_arch_cpu.h b/bsd-user/x86_64/target_arch_cpu.h
index 4094d61..f82042e 100644
--- a/bsd-user/x86_64/target_arch_cpu.h
+++ b/bsd-user/x86_64/target_arch_cpu.h
@@ -110,7 +110,7 @@ static inline void target_cpu_init(CPUX86State *env,
cpu_x86_load_seg(env, R_GS, 0);
}
-static inline void target_cpu_loop(CPUX86State *env)
+static inline G_NORETURN void target_cpu_loop(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
int trapnr;
diff --git a/bsd-user/x86_64/target_arch_signal.h b/bsd-user/x86_64/target_arch_signal.h
index ca24bf1..f833ee6 100644
--- a/bsd-user/x86_64/target_arch_signal.h
+++ b/bsd-user/x86_64/target_arch_signal.h
@@ -97,4 +97,6 @@ struct target_sigframe {
uint32_t __spare__[2];
};
+#define TARGET_SIGSTACK_ALIGN 16
+
#endif /* TARGET_ARCH_SIGNAL_H */
diff --git a/bsd-user/x86_64/target_arch_thread.h b/bsd-user/x86_64/target_arch_thread.h
index 52c2890..7739bb2 100644
--- a/bsd-user/x86_64/target_arch_thread.h
+++ b/bsd-user/x86_64/target_arch_thread.h
@@ -31,7 +31,7 @@ static inline void target_thread_init(struct target_pt_regs *regs,
struct image_info *infop)
{
regs->rax = 0;
- regs->rsp = infop->start_stack;
+ regs->rsp = ((infop->start_stack - 8) & ~0xfUL) + 8;
regs->rip = infop->entry;
regs->rdi = infop->start_stack;
}
diff --git a/chardev/baum.c b/chardev/baum.c
index a1d9784..f3e8cd2 100644
--- a/chardev/baum.c
+++ b/chardev/baum.c
@@ -668,7 +668,7 @@ static void baum_chr_open(Chardev *chr,
qemu_set_fd_handler(baum->brlapi_fd, baum_chr_read, NULL, baum);
}
-static void char_braille_class_init(ObjectClass *oc, void *data)
+static void char_braille_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char-console.c b/chardev/char-console.c
index 6c4ce5d..7e1bf64 100644
--- a/chardev/char-console.c
+++ b/chardev/char-console.c
@@ -34,7 +34,7 @@ static void qemu_chr_open_win_con(Chardev *chr,
win_chr_set_file(chr, GetStdHandle(STD_OUTPUT_HANDLE), true);
}
-static void char_console_class_init(ObjectClass *oc, void *data)
+static void char_console_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char-fd.c b/chardev/char-fd.c
index d2c4923..6f03adf 100644
--- a/chardev/char-fd.c
+++ b/chardev/char-fd.c
@@ -50,7 +50,7 @@ static gboolean fd_chr_read(QIOChannel *chan, GIOCondition cond, void *opaque)
Chardev *chr = CHARDEV(opaque);
FDChardev *s = FD_CHARDEV(opaque);
int len;
- uint8_t buf[CHR_READ_BUF_LEN];
+ QEMU_UNINITIALIZED uint8_t buf[CHR_READ_BUF_LEN];
ssize_t ret;
len = sizeof(buf);
@@ -238,7 +238,7 @@ void qemu_chr_open_fd(Chardev *chr,
}
}
-static void char_fd_class_init(ObjectClass *oc, void *data)
+static void char_fd_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char-fe.c b/chardev/char-fe.c
index b214ba3..158a5f4 100644
--- a/chardev/char-fe.c
+++ b/chardev/char-fe.c
@@ -24,7 +24,7 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "chardev/char-fe.h"
#include "chardev/char-io.h"
@@ -191,22 +191,15 @@ bool qemu_chr_fe_backend_open(CharBackend *be)
bool qemu_chr_fe_init(CharBackend *b, Chardev *s, Error **errp)
{
- int tag = 0;
+ unsigned int tag = 0;
if (s) {
if (CHARDEV_IS_MUX(s)) {
MuxChardev *d = MUX_CHARDEV(s);
- if (d->mux_cnt >= MAX_MUX) {
- error_setg(errp,
- "too many uses of multiplexed chardev '%s'"
- " (maximum is " stringify(MAX_MUX) ")",
- s->label);
+ if (!mux_chr_attach_frontend(d, b, &tag, errp)) {
return false;
}
-
- d->backends[d->mux_cnt] = b;
- tag = d->mux_cnt++;
} else if (s->be) {
error_setg(errp, "chardev '%s' is already in use", s->label);
return false;
@@ -232,7 +225,7 @@ void qemu_chr_fe_deinit(CharBackend *b, bool del)
}
if (CHARDEV_IS_MUX(b->chr)) {
MuxChardev *d = MUX_CHARDEV(b->chr);
- d->backends[b->tag] = NULL;
+ mux_chr_detach_frontend(d, b->tag);
}
if (del) {
Object *obj = OBJECT(b->chr);
diff --git a/chardev/char-file.c b/chardev/char-file.c
index 263e6da..a9e8c5e 100644
--- a/chardev/char-file.c
+++ b/chardev/char-file.c
@@ -123,7 +123,7 @@ static void qemu_chr_parse_file_out(QemuOpts *opts, ChardevBackend *backend,
file->append = qemu_opt_get_bool(opts, "append", false);
}
-static void char_file_class_init(ObjectClass *oc, void *data)
+static void char_file_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char-hmp-cmds.c b/chardev/char-hmp-cmds.c
index 287c2b1..8e9e1c1 100644
--- a/chardev/char-hmp-cmds.c
+++ b/chardev/char-hmp-cmds.c
@@ -19,7 +19,7 @@
#include "monitor/monitor.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-char.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/config-file.h"
#include "qemu/option.h"
diff --git a/chardev/char-hub.c b/chardev/char-hub.c
new file mode 100644
index 0000000..16ffee2
--- /dev/null
+++ b/chardev/char-hub.c
@@ -0,0 +1,301 @@
+/*
+ * QEMU Character Hub Device
+ *
+ * Author: Roman Penyaev <r.peniaev@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/option.h"
+#include "chardev/char.h"
+#include "chardev-internal.h"
+
+/*
+ * Character hub device aggregates input from multiple backend devices
+ * and forwards it to a single frontend device. Additionally, hub
+ * device takes the output from the frontend device and sends it back
+ * to all the connected backend devices.
+ */
+
+/*
+ * Write to all backends. Different backend devices accept data with
+ * various rate, so it is quite possible that one device returns less,
+ * then others. In this case we return minimum to the caller,
+ * expecting caller will repeat operation soon. When repeat happens
+ * send to the devices which consume data faster must be avoided
+ * for obvious reasons not to send data, which was already sent.
+ * Called with chr_write_lock held.
+ */
+static int hub_chr_write(Chardev *chr, const uint8_t *buf, int len)
+{
+ HubChardev *d = HUB_CHARDEV(chr);
+ int r, i, ret = len;
+ unsigned int written;
+
+ /* Invalidate index on every write */
+ d->be_eagain_ind = -1;
+
+ for (i = 0; i < d->be_cnt; i++) {
+ if (!d->backends[i].be.chr->be_open) {
+ /* Skip closed backend */
+ continue;
+ }
+ written = d->be_written[i] - d->be_min_written;
+ if (written) {
+ /* Written in the previous call so take into account */
+ ret = MIN(written, ret);
+ continue;
+ }
+ r = qemu_chr_fe_write(&d->backends[i].be, buf, len);
+ if (r < 0) {
+ if (errno == EAGAIN) {
+ /* Set index and expect to be called soon on watch wake up */
+ d->be_eagain_ind = i;
+ }
+ return r;
+ }
+ d->be_written[i] += r;
+ ret = MIN(r, ret);
+ }
+ d->be_min_written += ret;
+
+
+ return ret;
+}
+
+static int hub_chr_can_read(void *opaque)
+{
+ HubCharBackend *backend = opaque;
+ CharBackend *fe = backend->hub->parent.be;
+
+ if (fe && fe->chr_can_read) {
+ return fe->chr_can_read(fe->opaque);
+ }
+
+ return 0;
+}
+
+static void hub_chr_read(void *opaque, const uint8_t *buf, int size)
+{
+ HubCharBackend *backend = opaque;
+ CharBackend *fe = backend->hub->parent.be;
+
+ if (fe && fe->chr_read) {
+ fe->chr_read(fe->opaque, buf, size);
+ }
+}
+
+static void hub_chr_event(void *opaque, QEMUChrEvent event)
+{
+ HubCharBackend *backend = opaque;
+ HubChardev *d = backend->hub;
+ CharBackend *fe = d->parent.be;
+
+ if (event == CHR_EVENT_OPENED) {
+ /*
+ * Catch up with what was already written while this backend
+ * was closed
+ */
+ d->be_written[backend->be_ind] = d->be_min_written;
+
+ if (d->be_event_opened_cnt++) {
+ /* Ignore subsequent open events from other backends */
+ return;
+ }
+ } else if (event == CHR_EVENT_CLOSED) {
+ if (!d->be_event_opened_cnt) {
+ /* Don't go below zero. Probably assert is better */
+ return;
+ }
+ if (--d->be_event_opened_cnt) {
+ /* Serve only the last one close event */
+ return;
+ }
+ }
+
+ if (fe && fe->chr_event) {
+ fe->chr_event(fe->opaque, event);
+ }
+}
+
+static GSource *hub_chr_add_watch(Chardev *s, GIOCondition cond)
+{
+ HubChardev *d = HUB_CHARDEV(s);
+ Chardev *chr;
+ ChardevClass *cc;
+
+ if (d->be_eagain_ind == -1) {
+ return NULL;
+ }
+
+ assert(d->be_eagain_ind < d->be_cnt);
+ chr = qemu_chr_fe_get_driver(&d->backends[d->be_eagain_ind].be);
+ cc = CHARDEV_GET_CLASS(chr);
+ if (!cc->chr_add_watch) {
+ return NULL;
+ }
+
+ return cc->chr_add_watch(chr, cond);
+}
+
+static bool hub_chr_attach_chardev(HubChardev *d, Chardev *chr,
+ Error **errp)
+{
+ bool ret;
+
+ if (d->be_cnt >= MAX_HUB) {
+ error_setg(errp, "hub: too many uses of chardevs '%s'"
+ " (maximum is " stringify(MAX_HUB) ")",
+ d->parent.label);
+ return false;
+ }
+ ret = qemu_chr_fe_init(&d->backends[d->be_cnt].be, chr, errp);
+ if (ret) {
+ d->backends[d->be_cnt].hub = d;
+ d->backends[d->be_cnt].be_ind = d->be_cnt;
+ d->be_cnt += 1;
+ }
+
+ return ret;
+}
+
+static void char_hub_finalize(Object *obj)
+{
+ HubChardev *d = HUB_CHARDEV(obj);
+ int i;
+
+ for (i = 0; i < d->be_cnt; i++) {
+ qemu_chr_fe_deinit(&d->backends[i].be, false);
+ }
+}
+
+static void hub_chr_update_read_handlers(Chardev *chr)
+{
+ HubChardev *d = HUB_CHARDEV(chr);
+ int i;
+
+ for (i = 0; i < d->be_cnt; i++) {
+ qemu_chr_fe_set_handlers_full(&d->backends[i].be,
+ hub_chr_can_read,
+ hub_chr_read,
+ hub_chr_event,
+ NULL,
+ &d->backends[i],
+ chr->gcontext, true, false);
+ }
+}
+
+static void qemu_chr_open_hub(Chardev *chr,
+ ChardevBackend *backend,
+ bool *be_opened,
+ Error **errp)
+{
+ ChardevHub *hub = backend->u.hub.data;
+ HubChardev *d = HUB_CHARDEV(chr);
+ strList *list = hub->chardevs;
+
+ d->be_eagain_ind = -1;
+
+ if (list == NULL) {
+ error_setg(errp, "hub: 'chardevs' list is not defined");
+ return;
+ }
+
+ while (list) {
+ Chardev *s;
+
+ s = qemu_chr_find(list->value);
+ if (s == NULL) {
+ error_setg(errp, "hub: chardev can't be found by id '%s'",
+ list->value);
+ return;
+ }
+ if (CHARDEV_IS_HUB(s) || CHARDEV_IS_MUX(s)) {
+ error_setg(errp, "hub: multiplexers and hub devices can't be "
+ "stacked, check chardev '%s', chardev should not "
+ "be a hub device or have 'mux=on' enabled",
+ list->value);
+ return;
+ }
+ if (!hub_chr_attach_chardev(d, s, errp)) {
+ return;
+ }
+ list = list->next;
+ }
+
+ /* Closed until an explicit event from backend */
+ *be_opened = false;
+}
+
+static void qemu_chr_parse_hub(QemuOpts *opts, ChardevBackend *backend,
+ Error **errp)
+{
+ ChardevHub *hub;
+ strList **tail;
+ int i;
+
+ backend->type = CHARDEV_BACKEND_KIND_HUB;
+ hub = backend->u.hub.data = g_new0(ChardevHub, 1);
+ qemu_chr_parse_common(opts, qapi_ChardevHub_base(hub));
+
+ tail = &hub->chardevs;
+
+ for (i = 0; i < MAX_HUB; i++) {
+ char optbuf[16];
+ const char *dev;
+
+ snprintf(optbuf, sizeof(optbuf), "chardevs.%u", i);
+ dev = qemu_opt_get(opts, optbuf);
+ if (!dev) {
+ break;
+ }
+
+ QAPI_LIST_APPEND(tail, g_strdup(dev));
+ }
+}
+
+static void char_hub_class_init(ObjectClass *oc, const void *data)
+{
+ ChardevClass *cc = CHARDEV_CLASS(oc);
+
+ cc->parse = qemu_chr_parse_hub;
+ cc->open = qemu_chr_open_hub;
+ cc->chr_write = hub_chr_write;
+ cc->chr_add_watch = hub_chr_add_watch;
+ /* We handle events from backends only */
+ cc->chr_be_event = NULL;
+ cc->chr_update_read_handler = hub_chr_update_read_handlers;
+}
+
+static const TypeInfo char_hub_type_info = {
+ .name = TYPE_CHARDEV_HUB,
+ .parent = TYPE_CHARDEV,
+ .class_init = char_hub_class_init,
+ .instance_size = sizeof(HubChardev),
+ .instance_finalize = char_hub_finalize,
+};
+
+static void register_types(void)
+{
+ type_register_static(&char_hub_type_info);
+}
+
+type_init(register_types);
diff --git a/chardev/char-mux.c b/chardev/char-mux.c
index ee2d47b..6b36290 100644
--- a/chardev/char-mux.c
+++ b/chardev/char-mux.c
@@ -26,8 +26,9 @@
#include "qapi/error.h"
#include "qemu/module.h"
#include "qemu/option.h"
+#include "qemu/bitops.h"
#include "chardev/char.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qapi/qapi-commands-control.h"
#include "chardev-internal.h"
@@ -73,11 +74,11 @@ static int mux_chr_write(Chardev *chr, const uint8_t *buf, int len)
* qemu_chr_fe_write and background I/O callbacks */
qemu_chr_fe_write_all(&d->chr,
(uint8_t *)buf1, strlen(buf1));
- d->linestart = 0;
+ d->linestart = false;
}
ret += qemu_chr_fe_write(&d->chr, buf + i, 1);
if (buf[i] == '\n') {
- d->linestart = 1;
+ d->linestart = true;
}
}
}
@@ -124,7 +125,8 @@ static void mux_print_help(Chardev *chr)
}
}
-static void mux_chr_send_event(MuxChardev *d, int mux_nr, QEMUChrEvent event)
+static void mux_chr_send_event(MuxChardev *d, unsigned int mux_nr,
+ QEMUChrEvent event)
{
CharBackend *be = d->backends[mux_nr];
@@ -145,7 +147,7 @@ static void mux_chr_be_event(Chardev *chr, QEMUChrEvent event)
static int mux_proc_byte(Chardev *chr, MuxChardev *d, int ch)
{
if (d->term_got_escape) {
- d->term_got_escape = 0;
+ d->term_got_escape = false;
if (ch == term_escape_char) {
goto send_char;
}
@@ -167,19 +169,26 @@ static int mux_proc_byte(Chardev *chr, MuxChardev *d, int ch)
case 'b':
qemu_chr_be_event(chr, CHR_EVENT_BREAK);
break;
- case 'c':
- assert(d->mux_cnt > 0); /* handler registered with first fe */
+ case 'c': {
+ unsigned int bit;
+
+ /* Handler registered with first fe */
+ assert(d->mux_bitset != 0);
/* Switch to the next registered device */
- mux_set_focus(chr, (d->focus + 1) % d->mux_cnt);
+ bit = find_next_bit(&d->mux_bitset, MAX_MUX, d->focus + 1);
+ if (bit >= MAX_MUX) {
+ bit = find_next_bit(&d->mux_bitset, MAX_MUX, 0);
+ }
+ mux_set_focus(chr, bit);
break;
- case 't':
+ } case 't':
d->timestamps = !d->timestamps;
d->timestamps_start = -1;
- d->linestart = 0;
+ d->linestart = false;
break;
}
} else if (ch == term_escape_char) {
- d->term_got_escape = 1;
+ d->term_got_escape = true;
} else {
send_char:
return 1;
@@ -242,15 +251,16 @@ static void mux_chr_read(void *opaque, const uint8_t *buf, int size)
void mux_chr_send_all_event(Chardev *chr, QEMUChrEvent event)
{
MuxChardev *d = MUX_CHARDEV(chr);
- int i;
+ int bit;
if (!muxes_opened) {
return;
}
/* Send the event to all registered listeners */
- for (i = 0; i < d->mux_cnt; i++) {
- mux_chr_send_event(d, i, event);
+ bit = -1;
+ while ((bit = find_next_bit(&d->mux_bitset, MAX_MUX, bit + 1)) < MAX_MUX) {
+ mux_chr_send_event(d, bit, event);
}
}
@@ -275,14 +285,15 @@ static GSource *mux_chr_add_watch(Chardev *s, GIOCondition cond)
static void char_mux_finalize(Object *obj)
{
MuxChardev *d = MUX_CHARDEV(obj);
- int i;
+ int bit;
- for (i = 0; i < d->mux_cnt; i++) {
- CharBackend *be = d->backends[i];
- if (be) {
- be->chr = NULL;
- }
+ bit = -1;
+ while ((bit = find_next_bit(&d->mux_bitset, MAX_MUX, bit + 1)) < MAX_MUX) {
+ CharBackend *be = d->backends[bit];
+ be->chr = NULL;
+ d->backends[bit] = NULL;
}
+ d->mux_bitset = 0;
qemu_chr_fe_deinit(&d->chr, false);
}
@@ -300,12 +311,46 @@ static void mux_chr_update_read_handlers(Chardev *chr)
chr->gcontext, true, false);
}
-void mux_set_focus(Chardev *chr, int focus)
+bool mux_chr_attach_frontend(MuxChardev *d, CharBackend *b,
+ unsigned int *tag, Error **errp)
+{
+ unsigned int bit;
+
+ QEMU_BUILD_BUG_ON(MAX_MUX > (sizeof(d->mux_bitset) * BITS_PER_BYTE));
+
+ bit = find_next_zero_bit(&d->mux_bitset, MAX_MUX, 0);
+ if (bit >= MAX_MUX) {
+ error_setg(errp,
+ "too many uses of multiplexed chardev '%s'"
+ " (maximum is " stringify(MAX_MUX) ")",
+ d->parent.label);
+ return false;
+ }
+
+ d->mux_bitset |= (1ul << bit);
+ d->backends[bit] = b;
+ *tag = bit;
+
+ return true;
+}
+
+bool mux_chr_detach_frontend(MuxChardev *d, unsigned int tag)
+{
+ if (!(d->mux_bitset & (1ul << tag))) {
+ return false;
+ }
+
+ d->mux_bitset &= ~(1ul << tag);
+ d->backends[tag] = NULL;
+
+ return true;
+}
+
+void mux_set_focus(Chardev *chr, unsigned int focus)
{
MuxChardev *d = MUX_CHARDEV(chr);
- assert(focus >= 0);
- assert(focus < d->mux_cnt);
+ assert(d->mux_bitset & (1ul << focus));
if (d->focus != -1) {
mux_chr_send_event(d, d->focus, CHR_EVENT_MUX_OUT);
@@ -402,7 +447,7 @@ void resume_mux_open(void)
chardev_options_parsed_cb, NULL);
}
-static void char_mux_class_init(ObjectClass *oc, void *data)
+static void char_mux_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char-null.c b/chardev/char-null.c
index 1c6a290..89cb85d 100644
--- a/chardev/char-null.c
+++ b/chardev/char-null.c
@@ -34,7 +34,7 @@ static void null_chr_open(Chardev *chr,
*be_opened = false;
}
-static void char_null_class_init(ObjectClass *oc, void *data)
+static void char_null_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char-parallel.c b/chardev/char-parallel.c
index 78697d7..62a44b2 100644
--- a/chardev/char-parallel.c
+++ b/chardev/char-parallel.c
@@ -270,7 +270,7 @@ static void qemu_chr_parse_parallel(QemuOpts *opts, ChardevBackend *backend,
parallel->device = g_strdup(device);
}
-static void char_parallel_class_init(ObjectClass *oc, void *data)
+static void char_parallel_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char-pipe.c b/chardev/char-pipe.c
index 5ad30bc..3d1b0ce 100644
--- a/chardev/char-pipe.c
+++ b/chardev/char-pipe.c
@@ -171,7 +171,7 @@ static void qemu_chr_parse_pipe(QemuOpts *opts, ChardevBackend *backend,
dev->device = g_strdup(device);
}
-static void char_pipe_class_init(ObjectClass *oc, void *data)
+static void char_pipe_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char-pty.c b/chardev/char-pty.c
index cc2f761..674e9b3 100644
--- a/chardev/char-pty.c
+++ b/chardev/char-pty.c
@@ -29,6 +29,7 @@
#include "qemu/sockets.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
+#include "qemu/option.h"
#include "qemu/qemu-print.h"
#include "chardev/char-io.h"
@@ -41,6 +42,7 @@ struct PtyChardev {
int connected;
GSource *timer_src;
+ char *path;
};
typedef struct PtyChardev PtyChardev;
@@ -152,7 +154,7 @@ static gboolean pty_chr_read(QIOChannel *chan, GIOCondition cond, void *opaque)
Chardev *chr = CHARDEV(opaque);
PtyChardev *s = PTY_CHARDEV(opaque);
gsize len;
- uint8_t buf[CHR_READ_BUF_LEN];
+ QEMU_UNINITIALIZED uint8_t buf[CHR_READ_BUF_LEN];
ssize_t ret;
len = sizeof(buf);
@@ -179,6 +181,9 @@ static void pty_chr_state(Chardev *chr, int connected)
if (!connected) {
remove_fd_in_watch(chr);
+ if (s->connected) {
+ qemu_chr_be_event(chr, CHR_EVENT_CLOSED);
+ }
s->connected = 0;
/* (re-)connect poll interval for idle guests: once per second.
* We check more frequently in case the guests sends data to
@@ -204,10 +209,15 @@ static void char_pty_finalize(Object *obj)
Chardev *chr = CHARDEV(obj);
PtyChardev *s = PTY_CHARDEV(obj);
+ /* unlink symlink */
+ if (s->path) {
+ unlink(s->path);
+ g_free(s->path);
+ }
+
pty_chr_state(chr, 0);
object_unref(OBJECT(s->ioc));
pty_chr_timer_cancel(s);
- qemu_chr_be_event(chr, CHR_EVENT_CLOSED);
}
#if defined HAVE_PTY_H
@@ -330,6 +340,7 @@ static void char_pty_open(Chardev *chr,
int master_fd, slave_fd;
char pty_name[PATH_MAX];
char *name;
+ char *path = backend->u.pty.data->path;
master_fd = qemu_openpty_raw(&slave_fd, pty_name);
if (master_fd < 0) {
@@ -354,12 +365,36 @@ static void char_pty_open(Chardev *chr,
g_free(name);
s->timer_src = NULL;
*be_opened = false;
+
+ /* create symbolic link */
+ if (path) {
+ int res = symlink(pty_name, path);
+
+ if (res != 0) {
+ error_setg_errno(errp, errno, "Failed to create PTY symlink");
+ } else {
+ s->path = g_strdup(path);
+ }
+ }
+}
+
+static void char_pty_parse(QemuOpts *opts, ChardevBackend *backend,
+ Error **errp)
+{
+ const char *path = qemu_opt_get(opts, "path");
+ ChardevPty *pty;
+
+ backend->type = CHARDEV_BACKEND_KIND_PTY;
+ pty = backend->u.pty.data = g_new0(ChardevPty, 1);
+ qemu_chr_parse_common(opts, qapi_ChardevPty_base(pty));
+ pty->path = g_strdup(path);
}
-static void char_pty_class_init(ObjectClass *oc, void *data)
+static void char_pty_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
+ cc->parse = char_pty_parse;
cc->open = char_pty_open;
cc->chr_write = char_pty_chr_write;
cc->chr_update_read_handler = pty_chr_update_read_handler;
diff --git a/chardev/char-ringbuf.c b/chardev/char-ringbuf.c
index d40d21d..98aadb6 100644
--- a/chardev/char-ringbuf.c
+++ b/chardev/char-ringbuf.c
@@ -223,7 +223,7 @@ static void qemu_chr_parse_ringbuf(QemuOpts *opts, ChardevBackend *backend,
}
}
-static void char_ringbuf_class_init(ObjectClass *oc, void *data)
+static void char_ringbuf_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char-serial.c b/chardev/char-serial.c
index 4b0b83d..0a68b4b 100644
--- a/chardev/char-serial.c
+++ b/chardev/char-serial.c
@@ -298,7 +298,7 @@ static void qemu_chr_parse_serial(QemuOpts *opts, ChardevBackend *backend,
serial->device = g_strdup(device);
}
-static void char_serial_class_init(ObjectClass *oc, void *data)
+static void char_serial_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char-socket.c b/chardev/char-socket.c
index 812d7aa..1e83139 100644
--- a/chardev/char-socket.c
+++ b/chardev/char-socket.c
@@ -33,6 +33,7 @@
#include "qapi/clone-visitor.h"
#include "qapi/qapi-visit-sockets.h"
#include "qemu/yank.h"
+#include "trace.h"
#include "chardev/char-io.h"
#include "chardev/char-socket.h"
@@ -73,7 +74,7 @@ static void qemu_chr_socket_restart_timer(Chardev *chr)
assert(!s->reconnect_timer);
name = g_strdup_printf("chardev-socket-reconnect-%s", chr->label);
s->reconnect_timer = qemu_chr_timeout_add_ms(chr,
- s->reconnect_time * 1000,
+ s->reconnect_time_ms,
socket_reconnect_timeout,
chr);
g_source_set_name(s->reconnect_timer, name);
@@ -126,6 +127,7 @@ static int tcp_chr_write(Chardev *chr, const uint8_t *buf, int len)
if (ret < 0 && errno != EAGAIN) {
if (tcp_chr_read_poll(chr) <= 0) {
/* Perform disconnect and return error. */
+ trace_chr_socket_poll_err(chr, chr->label);
tcp_chr_disconnect_locked(chr);
} /* else let the read handler finish it properly */
}
@@ -279,15 +281,16 @@ static ssize_t tcp_chr_recv(Chardev *chr, char *buf, size_t len)
size_t i;
int *msgfds = NULL;
size_t msgfds_num = 0;
+ Error *err = NULL;
if (qio_channel_has_feature(s->ioc, QIO_CHANNEL_FEATURE_FD_PASS)) {
ret = qio_channel_readv_full(s->ioc, &iov, 1,
&msgfds, &msgfds_num,
- 0, NULL);
+ 0, &err);
} else {
ret = qio_channel_readv_full(s->ioc, &iov, 1,
NULL, NULL,
- 0, NULL);
+ 0, &err);
}
if (msgfds_num) {
@@ -322,7 +325,11 @@ static ssize_t tcp_chr_recv(Chardev *chr, char *buf, size_t len)
errno = EAGAIN;
ret = -1;
} else if (ret == -1) {
+ trace_chr_socket_recv_err(chr, chr->label, error_get_pretty(err));
+ error_free(err);
errno = EIO;
+ } else if (ret == 0) {
+ trace_chr_socket_recv_eof(chr, chr->label);
}
return ret;
@@ -463,6 +470,7 @@ static void tcp_chr_disconnect_locked(Chardev *chr)
SocketChardev *s = SOCKET_CHARDEV(chr);
bool emit_close = s->state == TCP_CHARDEV_STATE_CONNECTED;
+ trace_chr_socket_disconnect(chr, chr->label);
tcp_chr_free_connection(chr);
if (s->listener) {
@@ -473,7 +481,7 @@ static void tcp_chr_disconnect_locked(Chardev *chr)
if (emit_close) {
qemu_chr_be_event(chr, CHR_EVENT_CLOSED);
}
- if (s->reconnect_time && !s->reconnect_timer) {
+ if (s->reconnect_time_ms && !s->reconnect_timer) {
qemu_chr_socket_restart_timer(chr);
}
}
@@ -489,7 +497,7 @@ static gboolean tcp_chr_read(QIOChannel *chan, GIOCondition cond, void *opaque)
{
Chardev *chr = CHARDEV(opaque);
SocketChardev *s = SOCKET_CHARDEV(opaque);
- uint8_t buf[CHR_READ_BUF_LEN];
+ QEMU_UNINITIALIZED uint8_t buf[CHR_READ_BUF_LEN];
int len, size;
if ((s->state != TCP_CHARDEV_STATE_CONNECTED) ||
@@ -521,6 +529,7 @@ static gboolean tcp_chr_hup(QIOChannel *channel,
void *opaque)
{
Chardev *chr = CHARDEV(opaque);
+ trace_chr_socket_hangup(chr, chr->label);
tcp_chr_disconnect(chr);
return G_SOURCE_REMOVE;
}
@@ -562,9 +571,13 @@ static char *qemu_chr_compute_filename(SocketChardev *s)
switch (ss->ss_family) {
case AF_UNIX:
- return g_strdup_printf("unix:%s%s",
- ((struct sockaddr_un *)(ss))->sun_path,
- s->is_listen ? ",server=on" : "");
+ if (s->is_listen) {
+ return g_strdup_printf("unix:%s,server=on",
+ ((struct sockaddr_un *)(ss))->sun_path);
+ } else {
+ return g_strdup_printf("unix:%s",
+ ((struct sockaddr_un *)(ps))->sun_path);
+ }
case AF_INET6:
left = "[";
right = "]";
@@ -672,15 +685,18 @@ static gboolean tcp_chr_telnet_init_io(QIOChannel *ioc,
SocketChardev *s = user_data;
Chardev *chr = CHARDEV(s);
TCPChardevTelnetInit *init = s->telnet_init;
+ Error *err = NULL;
ssize_t ret;
assert(init);
- ret = qio_channel_write(ioc, init->buf, init->buflen, NULL);
+ ret = qio_channel_write(ioc, init->buf, init->buflen, &err);
if (ret < 0) {
if (ret == QIO_CHANNEL_ERR_BLOCK) {
ret = 0;
} else {
+ trace_chr_socket_write_err(chr, chr->label, error_get_pretty(err));
+ error_free(err);
tcp_chr_disconnect(chr);
goto end;
}
@@ -765,9 +781,9 @@ static void tcp_chr_websock_handshake(QIOTask *task, gpointer user_data)
Error *err = NULL;
if (qio_task_propagate_error(task, &err)) {
- error_reportf_err(err,
- "websock handshake of character device %s failed: ",
- chr->label);
+ trace_chr_socket_ws_handshake_err(chr, chr->label,
+ error_get_pretty(err));
+ error_free(err);
tcp_chr_disconnect(chr);
} else {
if (s->do_telnetopt) {
@@ -805,9 +821,9 @@ static void tcp_chr_tls_handshake(QIOTask *task,
Error *err = NULL;
if (qio_task_propagate_error(task, &err)) {
- error_reportf_err(err,
- "TLS handshake of character device %s failed: ",
- chr->label);
+ trace_chr_socket_tls_handshake_err(chr, chr->label,
+ error_get_pretty(err));
+ error_free(err);
tcp_chr_disconnect(chr);
} else {
if (s->is_websock) {
@@ -826,19 +842,22 @@ static void tcp_chr_tls_init(Chardev *chr)
SocketChardev *s = SOCKET_CHARDEV(chr);
QIOChannelTLS *tioc;
gchar *name;
+ Error *err = NULL;
if (s->is_listen) {
tioc = qio_channel_tls_new_server(
s->ioc, s->tls_creds,
s->tls_authz,
- NULL);
+ &err);
} else {
tioc = qio_channel_tls_new_client(
s->ioc, s->tls_creds,
s->addr->u.inet.host,
- NULL);
+ &err);
}
if (tioc == NULL) {
+ trace_chr_socket_tls_init_err(chr, chr->label, error_get_pretty(err));
+ error_free(err);
tcp_chr_disconnect(chr);
return;
}
@@ -1065,9 +1084,9 @@ static int tcp_chr_wait_connected(Chardev *chr, Error **errp)
} else {
Error *err = NULL;
if (tcp_chr_connect_client_sync(chr, &err) < 0) {
- if (s->reconnect_time) {
+ if (s->reconnect_time_ms) {
error_free(err);
- g_usleep(s->reconnect_time * 1000ULL * 1000ULL);
+ g_usleep(s->reconnect_time_ms * 1000ULL);
} else {
error_propagate(errp, err);
return -1;
@@ -1252,13 +1271,13 @@ skip_listen:
static int qmp_chardev_open_socket_client(Chardev *chr,
- int64_t reconnect,
+ int64_t reconnect_ms,
Error **errp)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
- if (reconnect > 0) {
- s->reconnect_time = reconnect;
+ if (reconnect_ms > 0) {
+ s->reconnect_time_ms = reconnect_ms;
tcp_chr_connect_client_async(chr);
return 0;
} else {
@@ -1339,6 +1358,12 @@ static bool qmp_chardev_validate_socket(ChardevSocket *sock,
}
}
+ if (sock->has_reconnect_ms && sock->has_reconnect) {
+ error_setg(errp,
+ "'reconnect' and 'reconnect-ms' are mutually exclusive");
+ return false;
+ }
+
return true;
}
@@ -1356,7 +1381,7 @@ static void qmp_chardev_open_socket(Chardev *chr,
bool is_tn3270 = sock->has_tn3270 ? sock->tn3270 : false;
bool is_waitconnect = sock->has_wait ? sock->wait : false;
bool is_websock = sock->has_websocket ? sock->websocket : false;
- int64_t reconnect = sock->has_reconnect ? sock->reconnect : 0;
+ int64_t reconnect_ms = 0;
SocketAddress *addr;
s->is_listen = is_listen;
@@ -1428,7 +1453,13 @@ static void qmp_chardev_open_socket(Chardev *chr,
return;
}
} else {
- if (qmp_chardev_open_socket_client(chr, reconnect, errp) < 0) {
+ if (sock->has_reconnect) {
+ reconnect_ms = sock->reconnect * 1000ULL;
+ } else if (sock->has_reconnect_ms) {
+ reconnect_ms = sock->reconnect_ms;
+ }
+
+ if (qmp_chardev_open_socket_client(chr, reconnect_ms, errp) < 0) {
return;
}
}
@@ -1494,6 +1525,9 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend,
sock->wait = qemu_opt_get_bool(opts, "wait", true);
sock->has_reconnect = qemu_opt_find(opts, "reconnect");
sock->reconnect = qemu_opt_get_number(opts, "reconnect", 0);
+ sock->has_reconnect_ms = qemu_opt_find(opts, "reconnect-ms");
+ sock->reconnect_ms = qemu_opt_get_number(opts, "reconnect-ms", 0);
+
sock->tls_creds = g_strdup(qemu_opt_get(opts, "tls-creds"));
sock->tls_authz = g_strdup(qemu_opt_get(opts, "tls-authz"));
@@ -1547,7 +1581,7 @@ char_socket_get_connected(Object *obj, Error **errp)
return s->state == TCP_CHARDEV_STATE_CONNECTED;
}
-static void char_socket_class_init(ObjectClass *oc, void *data)
+static void char_socket_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char-stdio.c b/chardev/char-stdio.c
index b960ddd..48db8d2 100644
--- a/chardev/char-stdio.c
+++ b/chardev/char-stdio.c
@@ -136,7 +136,7 @@ static void qemu_chr_parse_stdio(QemuOpts *opts, ChardevBackend *backend,
stdio->signal = qemu_opt_get_bool(opts, "signal", true);
}
-static void char_stdio_class_init(ObjectClass *oc, void *data)
+static void char_stdio_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char-udp.c b/chardev/char-udp.c
index 3d9a2d5..572fab0 100644
--- a/chardev/char-udp.c
+++ b/chardev/char-udp.c
@@ -219,7 +219,7 @@ static void qmp_chardev_open_udp(Chardev *chr,
*be_opened = false;
}
-static void char_udp_class_init(ObjectClass *oc, void *data)
+static void char_udp_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char-win-stdio.c b/chardev/char-win-stdio.c
index 1a18999..fb802a0 100644
--- a/chardev/char-win-stdio.c
+++ b/chardev/char-win-stdio.c
@@ -33,6 +33,7 @@
struct WinStdioChardev {
Chardev parent;
HANDLE hStdIn;
+ DWORD dwOldMode;
HANDLE hInputReadyEvent;
HANDLE hInputDoneEvent;
HANDLE hInputThread;
@@ -159,6 +160,7 @@ static void qemu_chr_open_stdio(Chardev *chr,
}
is_console = GetConsoleMode(stdio->hStdIn, &dwMode) != 0;
+ stdio->dwOldMode = dwMode;
if (is_console) {
if (qemu_add_wait_object(stdio->hStdIn,
@@ -221,6 +223,9 @@ static void char_win_stdio_finalize(Object *obj)
{
WinStdioChardev *stdio = WIN_STDIO_CHARDEV(obj);
+ if (stdio->hStdIn != INVALID_HANDLE_VALUE) {
+ SetConsoleMode(stdio->hStdIn, stdio->dwOldMode);
+ }
if (stdio->hInputReadyEvent != INVALID_HANDLE_VALUE) {
CloseHandle(stdio->hInputReadyEvent);
}
@@ -251,7 +256,7 @@ static int win_stdio_write(Chardev *chr, const uint8_t *buf, int len)
return len - len1;
}
-static void char_win_stdio_class_init(ObjectClass *oc, void *data)
+static void char_win_stdio_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char-win.c b/chardev/char-win.c
index d4fb44c..fef45e8 100644
--- a/chardev/char-win.c
+++ b/chardev/char-win.c
@@ -220,7 +220,7 @@ void win_chr_set_file(Chardev *chr, HANDLE file, bool keep_open)
s->file = file;
}
-static void char_win_class_init(ObjectClass *oc, void *data)
+static void char_win_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/char.c b/chardev/char.c
index 3c43fb1..bbebd24 100644
--- a/chardev/char.c
+++ b/chardev/char.c
@@ -33,7 +33,7 @@
#include "qapi/error.h"
#include "qapi/qapi-commands-char.h"
#include "qapi/qmp/qerror.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "qemu/help_option.h"
#include "qemu/module.h"
#include "qemu/option.h"
@@ -48,7 +48,7 @@
Object *get_chardevs_root(void)
{
- return container_get(object_get_root(), "/chardevs");
+ return object_get_container("chardevs");
}
static void chr_be_event(Chardev *s, QEMUChrEvent event)
@@ -295,7 +295,7 @@ static int null_chr_write(Chardev *chr, const uint8_t *buf, int len)
return len;
}
-static void char_class_init(ObjectClass *oc, void *data)
+static void char_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
@@ -333,7 +333,7 @@ static bool qemu_chr_is_busy(Chardev *s)
{
if (CHARDEV_IS_MUX(s)) {
MuxChardev *d = MUX_CHARDEV(s);
- return d->mux_cnt >= 0;
+ return d->mux_bitset != 0;
} else {
return s->be != NULL;
}
@@ -428,6 +428,11 @@ QemuOpts *qemu_chr_parse_compat(const char *label, const char *filename,
qemu_opt_set(opts, "path", p, &error_abort);
return opts;
}
+ if (strstart(filename, "pty:", &p)) {
+ qemu_opt_set(opts, "backend", "pty", &error_abort);
+ qemu_opt_set(opts, "path", p, &error_abort);
+ return opts;
+ }
if (strstart(filename, "tcp:", &p) ||
strstart(filename, "telnet:", &p) ||
strstart(filename, "tn3270:", &p) ||
@@ -615,11 +620,24 @@ ChardevBackend *qemu_chr_parse_opts(QemuOpts *opts, Error **errp)
return backend;
}
-Chardev *qemu_chr_new_from_opts(QemuOpts *opts, GMainContext *context,
- Error **errp)
+static void qemu_chardev_set_replay(Chardev *chr, Error **errp)
+{
+ if (replay_mode != REPLAY_MODE_NONE) {
+ if (CHARDEV_GET_CLASS(chr)->chr_ioctl) {
+ error_setg(errp, "Replay: ioctl is not supported "
+ "for serial devices yet");
+ return;
+ }
+ qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_REPLAY);
+ replay_register_char_driver(chr);
+ }
+}
+
+static Chardev *do_qemu_chr_new_from_opts(QemuOpts *opts, GMainContext *context,
+ bool replay, Error **errp)
{
const ChardevClass *cc;
- Chardev *chr = NULL;
+ Chardev *base = NULL, *chr = NULL;
ChardevBackend *backend = NULL;
const char *name = qemu_opt_get(opts, "backend");
const char *id = qemu_opts_id(opts);
@@ -657,11 +675,11 @@ Chardev *qemu_chr_new_from_opts(QemuOpts *opts, GMainContext *context,
chr = qemu_chardev_new(bid ? bid : id,
object_class_get_name(OBJECT_CLASS(cc)),
backend, context, errp);
-
if (chr == NULL) {
goto out;
}
+ base = chr;
if (bid) {
Chardev *mux;
qapi_free_ChardevBackend(backend);
@@ -681,11 +699,25 @@ Chardev *qemu_chr_new_from_opts(QemuOpts *opts, GMainContext *context,
out:
qapi_free_ChardevBackend(backend);
g_free(bid);
+
+ if (replay && base) {
+ /* RR should be set on the base device, not the mux */
+ qemu_chardev_set_replay(base, errp);
+ }
+
return chr;
}
-Chardev *qemu_chr_new_noreplay(const char *label, const char *filename,
- bool permit_mux_mon, GMainContext *context)
+Chardev *qemu_chr_new_from_opts(QemuOpts *opts, GMainContext *context,
+ Error **errp)
+{
+ /* XXX: should this really not record/replay? */
+ return do_qemu_chr_new_from_opts(opts, context, false, errp);
+}
+
+static Chardev *qemu_chr_new_from_name(const char *label, const char *filename,
+ bool permit_mux_mon,
+ GMainContext *context, bool replay)
{
const char *p;
Chardev *chr;
@@ -693,14 +725,22 @@ Chardev *qemu_chr_new_noreplay(const char *label, const char *filename,
Error *err = NULL;
if (strstart(filename, "chardev:", &p)) {
- return qemu_chr_find(p);
+ chr = qemu_chr_find(p);
+ if (replay && chr) {
+ qemu_chardev_set_replay(chr, &err);
+ if (err) {
+ error_report_err(err);
+ return NULL;
+ }
+ }
+ return chr;
}
opts = qemu_chr_parse_compat(label, filename, permit_mux_mon);
if (!opts)
return NULL;
- chr = qemu_chr_new_from_opts(opts, context, &err);
+ chr = do_qemu_chr_new_from_opts(opts, context, replay, &err);
if (!chr) {
error_report_err(err);
goto out;
@@ -722,24 +762,20 @@ out:
return chr;
}
+Chardev *qemu_chr_new_noreplay(const char *label, const char *filename,
+ bool permit_mux_mon, GMainContext *context)
+{
+ return qemu_chr_new_from_name(label, filename, permit_mux_mon, context,
+ false);
+}
+
static Chardev *qemu_chr_new_permit_mux_mon(const char *label,
const char *filename,
bool permit_mux_mon,
GMainContext *context)
{
- Chardev *chr;
- chr = qemu_chr_new_noreplay(label, filename, permit_mux_mon, context);
- if (chr) {
- if (replay_mode != REPLAY_MODE_NONE) {
- qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_REPLAY);
- }
- if (qemu_chr_replay(chr) && CHARDEV_GET_CLASS(chr)->chr_ioctl) {
- error_report("Replay: ioctl is not supported "
- "for serial devices yet");
- }
- replay_register_char_driver(chr);
- }
- return chr;
+ return qemu_chr_new_from_name(label, filename, permit_mux_mon, context,
+ true);
}
Chardev *qemu_chr_new(const char *label, const char *filename,
@@ -860,6 +896,9 @@ QemuOptsList qemu_chardev_opts = {
.name = "reconnect",
.type = QEMU_OPT_NUMBER,
},{
+ .name = "reconnect-ms",
+ .type = QEMU_OPT_NUMBER,
+ },{
.name = "telnet",
.type = QEMU_OPT_BOOL,
},{
@@ -904,7 +943,26 @@ QemuOptsList qemu_chardev_opts = {
},{
.name = "chardev",
.type = QEMU_OPT_STRING,
+ },
+ /*
+ * Multiplexer options. Follows QAPI array syntax.
+ * See MAX_HUB macro to obtain array capacity.
+ */
+ {
+ .name = "chardevs.0",
+ .type = QEMU_OPT_STRING,
+ },{
+ .name = "chardevs.1",
+ .type = QEMU_OPT_STRING,
},{
+ .name = "chardevs.2",
+ .type = QEMU_OPT_STRING,
+ },{
+ .name = "chardevs.3",
+ .type = QEMU_OPT_STRING,
+ },
+
+ {
.name = "append",
.type = QEMU_OPT_BOOL,
},{
@@ -1067,8 +1125,8 @@ ChardevReturn *qmp_chardev_change(const char *id, ChardevBackend *backend,
return NULL;
}
- if (CHARDEV_IS_MUX(chr)) {
- error_setg(errp, "Mux device hotswap not supported yet");
+ if (CHARDEV_IS_MUX(chr) || CHARDEV_IS_HUB(chr)) {
+ error_setg(errp, "For mux or hub device hotswap is not supported yet");
return NULL;
}
diff --git a/chardev/chardev-internal.h b/chardev/chardev-internal.h
index 4e03af3..9752dd7 100644
--- a/chardev/chardev-internal.h
+++ b/chardev/chardev-internal.h
@@ -29,38 +29,89 @@
#include "chardev/char-fe.h"
#include "qom/object.h"
+#define MAX_HUB 4
#define MAX_MUX 4
#define MUX_BUFFER_SIZE 32 /* Must be a power of 2. */
#define MUX_BUFFER_MASK (MUX_BUFFER_SIZE - 1)
struct MuxChardev {
Chardev parent;
+ /* Linked frontends */
CharBackend *backends[MAX_MUX];
+ /* Linked backend */
CharBackend chr;
+ unsigned long mux_bitset;
int focus;
- int mux_cnt;
- int term_got_escape;
- int max_size;
+ bool term_got_escape;
/* Intermediate input buffer catches escape sequences even if the
currently active device is not accepting any input - but only until it
is full as well. */
unsigned char buffer[MAX_MUX][MUX_BUFFER_SIZE];
- int prod[MAX_MUX];
- int cons[MAX_MUX];
+ unsigned int prod[MAX_MUX];
+ unsigned int cons[MAX_MUX];
int timestamps;
/* Protected by the Chardev chr_write_lock. */
- int linestart;
+ bool linestart;
int64_t timestamps_start;
};
typedef struct MuxChardev MuxChardev;
+typedef struct HubChardev HubChardev;
+typedef struct HubCharBackend HubCharBackend;
+
+/*
+ * Back-pointer on a hub, actual backend and its index in
+ * `hub->backends` array
+ */
+struct HubCharBackend {
+ HubChardev *hub;
+ CharBackend be;
+ unsigned int be_ind;
+};
+
+struct HubChardev {
+ Chardev parent;
+ /* Linked backends */
+ HubCharBackend backends[MAX_HUB];
+ /*
+ * Number of backends attached to this hub. Once attached, a
+ * backend can't be detached, so the counter is only increasing.
+ * To safely remove a backend, hub has to be removed first.
+ */
+ unsigned int be_cnt;
+ /*
+ * Number of CHR_EVEN_OPENED events from all backends. Needed to
+ * send CHR_EVEN_CLOSED only when counter goes to zero.
+ */
+ unsigned int be_event_opened_cnt;
+ /*
+ * Counters of written bytes from a single frontend device
+ * to multiple backend devices.
+ */
+ unsigned int be_written[MAX_HUB];
+ unsigned int be_min_written;
+ /*
+ * Index of a backend device which got EAGAIN on last write,
+ * -1 is invalid index.
+ */
+ int be_eagain_ind;
+};
+typedef struct HubChardev HubChardev;
DECLARE_INSTANCE_CHECKER(MuxChardev, MUX_CHARDEV,
TYPE_CHARDEV_MUX)
-#define CHARDEV_IS_MUX(chr) \
+DECLARE_INSTANCE_CHECKER(HubChardev, HUB_CHARDEV,
+ TYPE_CHARDEV_HUB)
+
+#define CHARDEV_IS_MUX(chr) \
object_dynamic_cast(OBJECT(chr), TYPE_CHARDEV_MUX)
+#define CHARDEV_IS_HUB(chr) \
+ object_dynamic_cast(OBJECT(chr), TYPE_CHARDEV_HUB)
-void mux_set_focus(Chardev *chr, int focus);
+bool mux_chr_attach_frontend(MuxChardev *d, CharBackend *b,
+ unsigned int *tag, Error **errp);
+bool mux_chr_detach_frontend(MuxChardev *d, unsigned int tag);
+void mux_set_focus(Chardev *chr, unsigned int focus);
void mux_chr_send_all_event(Chardev *chr, QEMUChrEvent event);
Object *get_chardevs_root(void);
diff --git a/chardev/meson.build b/chardev/meson.build
index 70070a8..56ee39a 100644
--- a/chardev/meson.build
+++ b/chardev/meson.build
@@ -3,6 +3,7 @@ chardev_ss.add(files(
'char-file.c',
'char-io.c',
'char-mux.c',
+ 'char-hub.c',
'char-null.c',
'char-pipe.c',
'char-ringbuf.c',
diff --git a/chardev/msmouse.c b/chardev/msmouse.c
index a774c39..1a55755 100644
--- a/chardev/msmouse.c
+++ b/chardev/msmouse.c
@@ -81,7 +81,7 @@ static void msmouse_chr_accept_input(Chardev *chr)
const uint8_t *buf;
uint32_t size;
- buf = fifo8_pop_buf(&mouse->outbuf, MIN(len, avail), &size);
+ buf = fifo8_pop_bufptr(&mouse->outbuf, MIN(len, avail), &size);
qemu_chr_be_write(chr, buf, size);
len = qemu_chr_be_can_write(chr);
avail -= size;
@@ -267,7 +267,7 @@ static void msmouse_chr_open(Chardev *chr,
fifo8_create(&mouse->outbuf, MSMOUSE_BUF_SZ);
}
-static void char_msmouse_class_init(ObjectClass *oc, void *data)
+static void char_msmouse_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/spice.c b/chardev/spice.c
index e843d96..db53b49 100644
--- a/chardev/spice.c
+++ b/chardev/spice.c
@@ -347,7 +347,7 @@ static void qemu_chr_parse_spice_port(QemuOpts *opts, ChardevBackend *backend,
spiceport->fqdn = g_strdup(name);
}
-static void char_spice_class_init(ObjectClass *oc, void *data)
+static void char_spice_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
@@ -366,7 +366,7 @@ static const TypeInfo char_spice_type_info = {
};
module_obj(TYPE_CHARDEV_SPICE);
-static void char_spicevmc_class_init(ObjectClass *oc, void *data)
+static void char_spicevmc_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
@@ -382,7 +382,7 @@ static const TypeInfo char_spicevmc_type_info = {
};
module_obj(TYPE_CHARDEV_SPICEVMC);
-static void char_spiceport_class_init(ObjectClass *oc, void *data)
+static void char_spiceport_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/testdev.c b/chardev/testdev.c
index a92caca..e91f4e8 100644
--- a/chardev/testdev.c
+++ b/chardev/testdev.c
@@ -110,7 +110,7 @@ static int testdev_chr_write(Chardev *chr, const uint8_t *buf, int len)
return orig_len;
}
-static void char_testdev_class_init(ObjectClass *oc, void *data)
+static void char_testdev_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/chardev/trace-events b/chardev/trace-events
index 027107b..7e97b8a 100644
--- a/chardev/trace-events
+++ b/chardev/trace-events
@@ -17,3 +17,13 @@ spice_vmc_register_interface(void *scd) "spice vmc registered interface %p"
spice_vmc_unregister_interface(void *scd) "spice vmc unregistered interface %p"
spice_vmc_event(int event) "spice vmc event %d"
+# char-socket.c
+chr_socket_poll_err(void *chrdev, const char *label) "chardev socket poll error %p (%s)"
+chr_socket_recv_err(void *chrdev, const char *label, const char *err) "chardev socket recv error %p (%s): %s"
+chr_socket_recv_eof(void *chrdev, const char *label) "chardev socket recv end-of-file %p (%s)"
+chr_socket_write_err(void *chrdev, const char *label, const char *err) "chardev socket write error %p (%s): %s"
+chr_socket_disconnect(void *chrdev, const char *label) "chardev socket disconnect %p (%s)"
+chr_socket_hangup(void *chrdev, const char *label) "chardev socket hangup %p (%s)"
+chr_socket_ws_handshake_err(void *chrdev, const char *label, const char *err) "chardev socket websock handshake error %p (%s): %s"
+chr_socket_tls_handshake_err(void *chrdev, const char *label, const char *err) "chardev socket TLS handshake error %p (%s): %s"
+chr_socket_tls_init_err(void *chrdev, const char *label, const char *err) "chardev socket TLS init error %p (%s): %s"
diff --git a/chardev/wctablet.c b/chardev/wctablet.c
index f4008bf..0dc6ef0 100644
--- a/chardev/wctablet.c
+++ b/chardev/wctablet.c
@@ -342,7 +342,7 @@ static void wctablet_chr_open(Chardev *chr,
&wctablet_handler);
}
-static void wctablet_chr_class_init(ObjectClass *oc, void *data)
+static void wctablet_chr_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/clippy.toml b/clippy.toml
new file mode 100644
index 0000000..9016172
--- /dev/null
+++ b/clippy.toml
@@ -0,0 +1,3 @@
+doc-valid-idents = ["IrDA", "PrimeCell", ".."]
+allow-mixed-uninlined-format-args = false
+msrv = "1.77.0"
diff --git a/common-user/host/riscv/safe-syscall.inc.S b/common-user/host/riscv/safe-syscall.inc.S
index dfe83c3..c8b81e3 100644
--- a/common-user/host/riscv/safe-syscall.inc.S
+++ b/common-user/host/riscv/safe-syscall.inc.S
@@ -69,11 +69,11 @@ safe_syscall_end:
/* code path setting errno */
0: neg a0, a0
- j safe_syscall_set_errno_tail
+ tail safe_syscall_set_errno_tail
/* code path when we didn't execute the syscall */
2: li a0, QEMU_ERESTARTSYS
- j safe_syscall_set_errno_tail
+ tail safe_syscall_set_errno_tail
.cfi_endproc
.size safe_syscall_base, .-safe_syscall_base
diff --git a/common-user/plugin-api.c.inc b/common-user/plugin-api.c.inc
new file mode 100644
index 0000000..5b8a139
--- /dev/null
+++ b/common-user/plugin-api.c.inc
@@ -0,0 +1,43 @@
+/*
+ * QEMU Plugin API - *-user-mode only implementations
+ *
+ * Common user-mode only APIs are in plugins/api-user. These helpers
+ * are only specific to the *-user frontends.
+ *
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ * Copyright (C) 2019-2025, Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "qemu/plugin.h"
+#include "qemu.h"
+
+/*
+ * Binary path, start and end locations. Host specific due to TaskState.
+ */
+const char *qemu_plugin_path_to_binary(void)
+{
+ TaskState *ts = get_task_state(current_cpu);
+ return g_strdup(ts->bprm->filename);
+}
+
+uint64_t qemu_plugin_start_code(void)
+{
+ TaskState *ts = get_task_state(current_cpu);
+ return ts->info->start_code;
+}
+
+uint64_t qemu_plugin_end_code(void)
+{
+ TaskState *ts = get_task_state(current_cpu);
+ return ts->info->end_code;
+}
+
+uint64_t qemu_plugin_entry_code(void)
+{
+ TaskState *ts = get_task_state(current_cpu);
+ return ts->info->entry;
+}
diff --git a/configs/devices/aarch64-softmmu/default.mak b/configs/devices/aarch64-softmmu/default.mak
index f82a04c..ad8028c 100644
--- a/configs/devices/aarch64-softmmu/default.mak
+++ b/configs/devices/aarch64-softmmu/default.mak
@@ -8,3 +8,5 @@ include ../arm-softmmu/default.mak
# CONFIG_XLNX_ZYNQMP_ARM=n
# CONFIG_XLNX_VERSAL=n
# CONFIG_SBSA_REF=n
+# CONFIG_NPCM8XX=n
+CONFIG_VMAPPLE=n
diff --git a/configs/devices/arm-softmmu/default.mak b/configs/devices/arm-softmmu/default.mak
index 31f77c2..57ef1b8 100644
--- a/configs/devices/arm-softmmu/default.mak
+++ b/configs/devices/arm-softmmu/default.mak
@@ -18,9 +18,7 @@
# CONFIG_MUSICPAL=n
# CONFIG_MPS3R=n
# CONFIG_MUSCA=n
-# CONFIG_CHEETAH=n
# CONFIG_SX1=n
-# CONFIG_NSERIES=n
# CONFIG_STELLARIS=n
# CONFIG_STM32VLDISCOVERY=n
# CONFIG_B_L475E_IOT01A=n
@@ -28,11 +26,6 @@
# CONFIG_VERSATILE=n
# CONFIG_VEXPRESS=n
# CONFIG_ZYNQ=n
-# CONFIG_MAINSTONE=n
-# CONFIG_GUMSTIX=n
-# CONFIG_SPITZ=n
-# CONFIG_TOSA=n
-# CONFIG_Z2=n
# CONFIG_NPCM7XX=n
# CONFIG_COLLIE=n
# CONFIG_ASPEED_SOC=n
diff --git a/configs/devices/cris-softmmu/default.mak b/configs/devices/cris-softmmu/default.mak
deleted file mode 100644
index ff73cd4..0000000
--- a/configs/devices/cris-softmmu/default.mak
+++ /dev/null
@@ -1,4 +0,0 @@
-# Default configuration for cris-softmmu
-
-# Boards are selected by default, uncomment to keep out of the build.
-# CONFIG_AXIS=n
diff --git a/configs/devices/i386-softmmu/default.mak b/configs/devices/i386-softmmu/default.mak
index 448e3e3..bc0479a 100644
--- a/configs/devices/i386-softmmu/default.mak
+++ b/configs/devices/i386-softmmu/default.mak
@@ -18,6 +18,7 @@
#CONFIG_QXL=n
#CONFIG_SEV=n
#CONFIG_SGA=n
+#CONFIG_TDX=n
#CONFIG_TEST_DEVICES=n
#CONFIG_TPM_CRB=n
#CONFIG_TPM_TIS_ISA=n
@@ -29,3 +30,4 @@
# CONFIG_I440FX=n
# CONFIG_Q35=n
# CONFIG_MICROVM=n
+# CONFIG_NITRO_ENCLAVE=n
diff --git a/configs/devices/microblaze-softmmu/default.mak b/configs/devices/microblaze-softmmu/default.mak
index 583e395..7894106 100644
--- a/configs/devices/microblaze-softmmu/default.mak
+++ b/configs/devices/microblaze-softmmu/default.mak
@@ -2,5 +2,3 @@
# Boards are selected by default, uncomment to keep out of the build.
# CONFIG_PETALOGIX_S3ADSP1800=n
-# CONFIG_PETALOGIX_ML605=n
-# CONFIG_XLNX_ZYNQMP_PMU=n
diff --git a/configs/devices/microblazeel-softmmu/default.mak b/configs/devices/microblazeel-softmmu/default.mak
index 29f7f13..4c10864 100644
--- a/configs/devices/microblazeel-softmmu/default.mak
+++ b/configs/devices/microblazeel-softmmu/default.mak
@@ -1,3 +1,6 @@
# Default configuration for microblazeel-softmmu
-include ../microblaze-softmmu/default.mak
+# Boards are selected by default, uncomment to keep out of the build.
+# CONFIG_PETALOGIX_S3ADSP1800=n
+# CONFIG_PETALOGIX_ML605=n
+# CONFIG_XLNX_ZYNQMP_PMU=n
diff --git a/configs/devices/sh4-softmmu/default.mak b/configs/devices/sh4-softmmu/default.mak
index c06a427..efb401b 100644
--- a/configs/devices/sh4-softmmu/default.mak
+++ b/configs/devices/sh4-softmmu/default.mak
@@ -1,4 +1,4 @@
-# Default configuration for sh4eb-softmmu
+# Default configuration for sh4-softmmu
# Uncomment the following lines to disable these optional devices:
#
@@ -7,4 +7,3 @@
# Boards are selected by default, uncomment to keep out of the build.
# CONFIG_R2D=n
-# CONFIG_SHIX=n
diff --git a/configs/meson/emscripten.txt b/configs/meson/emscripten.txt
new file mode 100644
index 0000000..4230e88
--- /dev/null
+++ b/configs/meson/emscripten.txt
@@ -0,0 +1,8 @@
+[built-in options]
+c_args = ['-pthread']
+cpp_args = ['-pthread']
+objc_args = ['-pthread']
+# -sPROXY_TO_PTHREAD link time flag always requires -pthread even during
+# configuration so explicitly add the flag here.
+c_link_args = ['-pthread','-sASYNCIFY=1','-sPROXY_TO_PTHREAD=1','-sFORCE_FILESYSTEM','-sALLOW_TABLE_GROWTH','-sTOTAL_MEMORY=2GB','-sWASM_BIGINT','-sEXPORT_ES6=1','-sASYNCIFY_IMPORTS=ffi_call_js','-sEXPORTED_RUNTIME_METHODS=addFunction,removeFunction,TTY,FS']
+cpp_link_args = ['-pthread','-sASYNCIFY=1','-sPROXY_TO_PTHREAD=1','-sFORCE_FILESYSTEM','-sALLOW_TABLE_GROWTH','-sTOTAL_MEMORY=2GB','-sWASM_BIGINT','-sEXPORT_ES6=1','-sASYNCIFY_IMPORTS=ffi_call_js','-sEXPORTED_RUNTIME_METHODS=addFunction,removeFunction,TTY,FS']
diff --git a/configs/targets/aarch64-bsd-user.mak b/configs/targets/aarch64-bsd-user.mak
new file mode 100644
index 0000000..f99c733
--- /dev/null
+++ b/configs/targets/aarch64-bsd-user.mak
@@ -0,0 +1,4 @@
+TARGET_ARCH=aarch64
+TARGET_BASE_ARCH=arm
+TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch64-pauth.xml
+TARGET_LONG_BITS=64
diff --git a/configs/targets/aarch64-linux-user.mak b/configs/targets/aarch64-linux-user.mak
index 8f0ed21..b779ac3 100644
--- a/configs/targets/aarch64-linux-user.mak
+++ b/configs/targets/aarch64-linux-user.mak
@@ -4,3 +4,6 @@ TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch
TARGET_HAS_BFLT=y
CONFIG_SEMIHOSTING=y
CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y
+TARGET_SYSTBL_ABI=common,64,renameat,rlimit,memfd_secret
+TARGET_SYSTBL=syscall_64.tbl
+TARGET_LONG_BITS=64
diff --git a/configs/targets/aarch64-softmmu.mak b/configs/targets/aarch64-softmmu.mak
index 84cb32d..5dfeb35 100644
--- a/configs/targets/aarch64-softmmu.mak
+++ b/configs/targets/aarch64-softmmu.mak
@@ -1,7 +1,7 @@
TARGET_ARCH=aarch64
TARGET_BASE_ARCH=arm
-TARGET_SUPPORTS_MTTCG=y
TARGET_KVM_HAVE_GUEST_DEBUG=y
TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml gdb-xml/aarch64-pauth.xml
# needed by boot.c
TARGET_NEED_FDT=y
+TARGET_LONG_BITS=64
diff --git a/configs/targets/aarch64_be-linux-user.mak b/configs/targets/aarch64_be-linux-user.mak
index acb5620..ef9be02 100644
--- a/configs/targets/aarch64_be-linux-user.mak
+++ b/configs/targets/aarch64_be-linux-user.mak
@@ -1,7 +1,10 @@
TARGET_ARCH=aarch64
TARGET_BASE_ARCH=arm
TARGET_BIG_ENDIAN=y
-TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch64-pauth.xml
+TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch64-pauth.xml gdb-xml/aarch64-mte.xml
TARGET_HAS_BFLT=y
CONFIG_SEMIHOSTING=y
CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y
+TARGET_SYSTBL_ABI=common,64,renameat,rlimit,memfd_secret
+TARGET_SYSTBL=syscall_64.tbl
+TARGET_LONG_BITS=64
diff --git a/configs/targets/alpha-linux-user.mak b/configs/targets/alpha-linux-user.mak
index f7d3fb4..ef8e365 100644
--- a/configs/targets/alpha-linux-user.mak
+++ b/configs/targets/alpha-linux-user.mak
@@ -1,3 +1,4 @@
TARGET_ARCH=alpha
TARGET_SYSTBL_ABI=common
TARGET_SYSTBL=syscall.tbl
+TARGET_LONG_BITS=64
diff --git a/configs/targets/alpha-softmmu.mak b/configs/targets/alpha-softmmu.mak
index 9dbe160..5275076 100644
--- a/configs/targets/alpha-softmmu.mak
+++ b/configs/targets/alpha-softmmu.mak
@@ -1,2 +1,2 @@
TARGET_ARCH=alpha
-TARGET_SUPPORTS_MTTCG=y
+TARGET_LONG_BITS=64
diff --git a/configs/targets/arm-bsd-user.mak b/configs/targets/arm-bsd-user.mak
index cb143e6..472a4f9 100644
--- a/configs/targets/arm-bsd-user.mak
+++ b/configs/targets/arm-bsd-user.mak
@@ -1,2 +1,3 @@
TARGET_ARCH=arm
TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml
+TARGET_LONG_BITS=32
diff --git a/configs/targets/arm-linux-user.mak b/configs/targets/arm-linux-user.mak
index 7f5d657..bf35ded 100644
--- a/configs/targets/arm-linux-user.mak
+++ b/configs/targets/arm-linux-user.mak
@@ -5,3 +5,4 @@ TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml
TARGET_HAS_BFLT=y
CONFIG_SEMIHOSTING=y
CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/arm-softmmu.mak b/configs/targets/arm-softmmu.mak
index bf390b7..6a5a8ed 100644
--- a/configs/targets/arm-softmmu.mak
+++ b/configs/targets/arm-softmmu.mak
@@ -1,5 +1,5 @@
TARGET_ARCH=arm
-TARGET_SUPPORTS_MTTCG=y
TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml
# needed by boot.c
TARGET_NEED_FDT=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/armeb-linux-user.mak b/configs/targets/armeb-linux-user.mak
index 943d0d8..35fa4d9 100644
--- a/configs/targets/armeb-linux-user.mak
+++ b/configs/targets/armeb-linux-user.mak
@@ -6,3 +6,4 @@ TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml
TARGET_HAS_BFLT=y
CONFIG_SEMIHOSTING=y
CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/avr-softmmu.mak b/configs/targets/avr-softmmu.mak
index e3f921c..b6157fc 100644
--- a/configs/targets/avr-softmmu.mak
+++ b/configs/targets/avr-softmmu.mak
@@ -1,2 +1,3 @@
TARGET_ARCH=avr
TARGET_XML_FILES= gdb-xml/avr-cpu.xml
+TARGET_LONG_BITS=32
diff --git a/configs/targets/cris-linux-user.mak b/configs/targets/cris-linux-user.mak
deleted file mode 100644
index e483c42..0000000
--- a/configs/targets/cris-linux-user.mak
+++ /dev/null
@@ -1 +0,0 @@
-TARGET_ARCH=cris
diff --git a/configs/targets/cris-softmmu.mak b/configs/targets/cris-softmmu.mak
deleted file mode 100644
index e483c42..0000000
--- a/configs/targets/cris-softmmu.mak
+++ /dev/null
@@ -1 +0,0 @@
-TARGET_ARCH=cris
diff --git a/configs/targets/hexagon-linux-user.mak b/configs/targets/hexagon-linux-user.mak
index 2765a4c..aec1a04 100644
--- a/configs/targets/hexagon-linux-user.mak
+++ b/configs/targets/hexagon-linux-user.mak
@@ -1,2 +1,5 @@
TARGET_ARCH=hexagon
TARGET_XML_FILES=gdb-xml/hexagon-core.xml gdb-xml/hexagon-hvx.xml
+TARGET_SYSTBL=syscall.tbl
+TARGET_SYSTBL_ABI=common,32,hexagon,time32,stat64,rlimit,renameat
+TARGET_LONG_BITS=32
diff --git a/configs/targets/hppa-linux-user.mak b/configs/targets/hppa-linux-user.mak
index 8e0a804..59190f63 100644
--- a/configs/targets/hppa-linux-user.mak
+++ b/configs/targets/hppa-linux-user.mak
@@ -3,3 +3,5 @@ TARGET_ABI32=y
TARGET_SYSTBL_ABI=common,32
TARGET_SYSTBL=syscall.tbl
TARGET_BIG_ENDIAN=y
+# Compromise to ease maintenance vs system mode
+TARGET_LONG_BITS=64
diff --git a/configs/targets/hppa-softmmu.mak b/configs/targets/hppa-softmmu.mak
index a41662a..ea33110 100644
--- a/configs/targets/hppa-softmmu.mak
+++ b/configs/targets/hppa-softmmu.mak
@@ -1,3 +1,3 @@
TARGET_ARCH=hppa
TARGET_BIG_ENDIAN=y
-TARGET_SUPPORTS_MTTCG=y
+TARGET_LONG_BITS=64
diff --git a/configs/targets/i386-bsd-user.mak b/configs/targets/i386-bsd-user.mak
index 0283bb6..70e098d 100644
--- a/configs/targets/i386-bsd-user.mak
+++ b/configs/targets/i386-bsd-user.mak
@@ -1,2 +1,3 @@
TARGET_ARCH=i386
TARGET_XML_FILES= gdb-xml/i386-32bit.xml
+TARGET_LONG_BITS=32
diff --git a/configs/targets/i386-linux-user.mak b/configs/targets/i386-linux-user.mak
index 5b2546a..ea68a26 100644
--- a/configs/targets/i386-linux-user.mak
+++ b/configs/targets/i386-linux-user.mak
@@ -1,4 +1,5 @@
TARGET_ARCH=i386
TARGET_SYSTBL_ABI=i386
TARGET_SYSTBL=syscall_32.tbl
-TARGET_XML_FILES= gdb-xml/i386-32bit.xml
+TARGET_XML_FILES= gdb-xml/i386-32bit.xml gdb-xml/i386-32bit-linux.xml
+TARGET_LONG_BITS=32
diff --git a/configs/targets/i386-softmmu.mak b/configs/targets/i386-softmmu.mak
index 2ac69d5..e9d89e8 100644
--- a/configs/targets/i386-softmmu.mak
+++ b/configs/targets/i386-softmmu.mak
@@ -1,4 +1,5 @@
TARGET_ARCH=i386
-TARGET_SUPPORTS_MTTCG=y
TARGET_KVM_HAVE_GUEST_DEBUG=y
+TARGET_KVM_HAVE_RESET_PARKED_VCPU=y
TARGET_XML_FILES= gdb-xml/i386-32bit.xml
+TARGET_LONG_BITS=32
diff --git a/configs/targets/loongarch64-linux-user.mak b/configs/targets/loongarch64-linux-user.mak
index ea9b7e8..249a26a 100644
--- a/configs/targets/loongarch64-linux-user.mak
+++ b/configs/targets/loongarch64-linux-user.mak
@@ -2,3 +2,6 @@
TARGET_ARCH=loongarch64
TARGET_BASE_ARCH=loongarch
TARGET_XML_FILES=gdb-xml/loongarch-base64.xml gdb-xml/loongarch-fpu.xml gdb-xml/loongarch-lsx.xml gdb-xml/loongarch-lasx.xml
+TARGET_SYSTBL=syscall.tbl
+TARGET_SYSTBL_ABI=common,64
+TARGET_LONG_BITS=64
diff --git a/configs/targets/loongarch64-softmmu.mak b/configs/targets/loongarch64-softmmu.mak
index ce19ab6..fc44c54 100644
--- a/configs/targets/loongarch64-softmmu.mak
+++ b/configs/targets/loongarch64-softmmu.mak
@@ -1,7 +1,7 @@
TARGET_ARCH=loongarch64
TARGET_BASE_ARCH=loongarch
TARGET_KVM_HAVE_GUEST_DEBUG=y
-TARGET_SUPPORTS_MTTCG=y
TARGET_XML_FILES= gdb-xml/loongarch-base32.xml gdb-xml/loongarch-base64.xml gdb-xml/loongarch-fpu.xml gdb-xml/loongarch-lsx.xml gdb-xml/loongarch-lasx.xml
# all boards require libfdt
TARGET_NEED_FDT=y
+TARGET_LONG_BITS=64
diff --git a/configs/targets/m68k-linux-user.mak b/configs/targets/m68k-linux-user.mak
index 579b5d2..2d9bae2 100644
--- a/configs/targets/m68k-linux-user.mak
+++ b/configs/targets/m68k-linux-user.mak
@@ -4,3 +4,4 @@ TARGET_SYSTBL=syscall.tbl
TARGET_BIG_ENDIAN=y
TARGET_XML_FILES= gdb-xml/cf-core.xml gdb-xml/cf-fp.xml gdb-xml/m68k-core.xml gdb-xml/m68k-fp.xml
TARGET_HAS_BFLT=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/m68k-softmmu.mak b/configs/targets/m68k-softmmu.mak
index bbcd0ba..bacc52e 100644
--- a/configs/targets/m68k-softmmu.mak
+++ b/configs/targets/m68k-softmmu.mak
@@ -1,3 +1,4 @@
TARGET_ARCH=m68k
TARGET_BIG_ENDIAN=y
TARGET_XML_FILES= gdb-xml/cf-core.xml gdb-xml/cf-fp.xml gdb-xml/m68k-core.xml gdb-xml/m68k-fp.xml
+TARGET_LONG_BITS=32
diff --git a/configs/targets/microblaze-linux-user.mak b/configs/targets/microblaze-linux-user.mak
index 0a2322c..3772779 100644
--- a/configs/targets/microblaze-linux-user.mak
+++ b/configs/targets/microblaze-linux-user.mak
@@ -4,3 +4,4 @@ TARGET_SYSTBL=syscall.tbl
TARGET_BIG_ENDIAN=y
TARGET_HAS_BFLT=y
TARGET_XML_FILES=gdb-xml/microblaze-core.xml gdb-xml/microblaze-stack-protect.xml
+TARGET_LONG_BITS=32
diff --git a/configs/targets/microblaze-softmmu.mak b/configs/targets/microblaze-softmmu.mak
index eea266d..bab7b49 100644
--- a/configs/targets/microblaze-softmmu.mak
+++ b/configs/targets/microblaze-softmmu.mak
@@ -1,6 +1,6 @@
TARGET_ARCH=microblaze
TARGET_BIG_ENDIAN=y
-TARGET_SUPPORTS_MTTCG=y
# needed by boot.c
TARGET_NEED_FDT=y
TARGET_XML_FILES=gdb-xml/microblaze-core.xml gdb-xml/microblaze-stack-protect.xml
+TARGET_LONG_BITS=32
diff --git a/configs/targets/microblazeel-linux-user.mak b/configs/targets/microblazeel-linux-user.mak
index 2707431..a51a054 100644
--- a/configs/targets/microblazeel-linux-user.mak
+++ b/configs/targets/microblazeel-linux-user.mak
@@ -3,3 +3,4 @@ TARGET_SYSTBL_ABI=common
TARGET_SYSTBL=syscall.tbl
TARGET_HAS_BFLT=y
TARGET_XML_FILES=gdb-xml/microblaze-core.xml gdb-xml/microblaze-stack-protect.xml
+TARGET_LONG_BITS=32
diff --git a/configs/targets/microblazeel-softmmu.mak b/configs/targets/microblazeel-softmmu.mak
index 77b968a..8aee7eb 100644
--- a/configs/targets/microblazeel-softmmu.mak
+++ b/configs/targets/microblazeel-softmmu.mak
@@ -1,5 +1,5 @@
TARGET_ARCH=microblaze
-TARGET_SUPPORTS_MTTCG=y
# needed by boot.c
TARGET_NEED_FDT=y
TARGET_XML_FILES=gdb-xml/microblaze-core.xml gdb-xml/microblaze-stack-protect.xml
+TARGET_LONG_BITS=32
diff --git a/configs/targets/mips-linux-user.mak b/configs/targets/mips-linux-user.mak
index b4569a9..69bdc45 100644
--- a/configs/targets/mips-linux-user.mak
+++ b/configs/targets/mips-linux-user.mak
@@ -3,3 +3,4 @@ TARGET_ABI_MIPSO32=y
TARGET_SYSTBL_ABI=o32
TARGET_SYSTBL=syscall_o32.tbl
TARGET_BIG_ENDIAN=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/mips-softmmu.mak b/configs/targets/mips-softmmu.mak
index d34b408..c958806 100644
--- a/configs/targets/mips-softmmu.mak
+++ b/configs/targets/mips-softmmu.mak
@@ -1,3 +1,3 @@
TARGET_ARCH=mips
TARGET_BIG_ENDIAN=y
-TARGET_SUPPORTS_MTTCG=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/mips64-linux-user.mak b/configs/targets/mips64-linux-user.mak
index d2ff509..04e82b3 100644
--- a/configs/targets/mips64-linux-user.mak
+++ b/configs/targets/mips64-linux-user.mak
@@ -4,3 +4,4 @@ TARGET_BASE_ARCH=mips
TARGET_SYSTBL_ABI=n64
TARGET_SYSTBL=syscall_n64.tbl
TARGET_BIG_ENDIAN=y
+TARGET_LONG_BITS=64
diff --git a/configs/targets/mips64-softmmu.mak b/configs/targets/mips64-softmmu.mak
index 12d9483..7202655 100644
--- a/configs/targets/mips64-softmmu.mak
+++ b/configs/targets/mips64-softmmu.mak
@@ -1,3 +1,4 @@
TARGET_ARCH=mips64
TARGET_BASE_ARCH=mips
TARGET_BIG_ENDIAN=y
+TARGET_LONG_BITS=64
diff --git a/configs/targets/mips64el-linux-user.mak b/configs/targets/mips64el-linux-user.mak
index f9efeec..27f4169 100644
--- a/configs/targets/mips64el-linux-user.mak
+++ b/configs/targets/mips64el-linux-user.mak
@@ -3,3 +3,4 @@ TARGET_ABI_MIPSN64=y
TARGET_BASE_ARCH=mips
TARGET_SYSTBL_ABI=n64
TARGET_SYSTBL=syscall_n64.tbl
+TARGET_LONG_BITS=64
diff --git a/configs/targets/mips64el-softmmu.mak b/configs/targets/mips64el-softmmu.mak
index 3864daa..3ebeadb 100644
--- a/configs/targets/mips64el-softmmu.mak
+++ b/configs/targets/mips64el-softmmu.mak
@@ -1,2 +1,3 @@
TARGET_ARCH=mips64
TARGET_BASE_ARCH=mips
+TARGET_LONG_BITS=64
diff --git a/configs/targets/mipsel-linux-user.mak b/configs/targets/mipsel-linux-user.mak
index e8d7241..8b7e86a 100644
--- a/configs/targets/mipsel-linux-user.mak
+++ b/configs/targets/mipsel-linux-user.mak
@@ -2,3 +2,4 @@ TARGET_ARCH=mips
TARGET_ABI_MIPSO32=y
TARGET_SYSTBL_ABI=o32
TARGET_SYSTBL=syscall_o32.tbl
+TARGET_LONG_BITS=32
diff --git a/configs/targets/mipsel-softmmu.mak b/configs/targets/mipsel-softmmu.mak
index 0829659..90e09bd 100644
--- a/configs/targets/mipsel-softmmu.mak
+++ b/configs/targets/mipsel-softmmu.mak
@@ -1,2 +1,2 @@
TARGET_ARCH=mips
-TARGET_SUPPORTS_MTTCG=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/mipsn32-linux-user.mak b/configs/targets/mipsn32-linux-user.mak
index 206095d..39ae214 100644
--- a/configs/targets/mipsn32-linux-user.mak
+++ b/configs/targets/mipsn32-linux-user.mak
@@ -5,3 +5,4 @@ TARGET_BASE_ARCH=mips
TARGET_SYSTBL_ABI=n32
TARGET_SYSTBL=syscall_n32.tbl
TARGET_BIG_ENDIAN=y
+TARGET_LONG_BITS=64
diff --git a/configs/targets/mipsn32el-linux-user.mak b/configs/targets/mipsn32el-linux-user.mak
index ca2a3ed..d9b61d6 100644
--- a/configs/targets/mipsn32el-linux-user.mak
+++ b/configs/targets/mipsn32el-linux-user.mak
@@ -4,3 +4,4 @@ TARGET_ABI32=y
TARGET_BASE_ARCH=mips
TARGET_SYSTBL_ABI=n32
TARGET_SYSTBL=syscall_n32.tbl
+TARGET_LONG_BITS=64
diff --git a/configs/targets/or1k-linux-user.mak b/configs/targets/or1k-linux-user.mak
index 39558f7..810567a 100644
--- a/configs/targets/or1k-linux-user.mak
+++ b/configs/targets/or1k-linux-user.mak
@@ -1,2 +1,5 @@
TARGET_ARCH=openrisc
TARGET_BIG_ENDIAN=y
+TARGET_SYSTBL_ABI=common,32,or1k,time32,stat64,rlimit,renameat
+TARGET_SYSTBL=syscall.tbl
+TARGET_LONG_BITS=32
diff --git a/configs/targets/or1k-softmmu.mak b/configs/targets/or1k-softmmu.mak
index 0341cb2..0e47d98 100644
--- a/configs/targets/or1k-softmmu.mak
+++ b/configs/targets/or1k-softmmu.mak
@@ -1,5 +1,5 @@
TARGET_ARCH=openrisc
-TARGET_SUPPORTS_MTTCG=y
TARGET_BIG_ENDIAN=y
# needed by boot.c and all boards
TARGET_NEED_FDT=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/ppc-linux-user.mak b/configs/targets/ppc-linux-user.mak
index cc0439a..970d04a 100644
--- a/configs/targets/ppc-linux-user.mak
+++ b/configs/targets/ppc-linux-user.mak
@@ -3,3 +3,4 @@ TARGET_SYSTBL_ABI=common,nospu,32
TARGET_SYSTBL=syscall.tbl
TARGET_BIG_ENDIAN=y
TARGET_XML_FILES= gdb-xml/power-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml
+TARGET_LONG_BITS=32
diff --git a/configs/targets/ppc-softmmu.mak b/configs/targets/ppc-softmmu.mak
index 53120da..9bfa7df 100644
--- a/configs/targets/ppc-softmmu.mak
+++ b/configs/targets/ppc-softmmu.mak
@@ -2,3 +2,4 @@ TARGET_ARCH=ppc
TARGET_BIG_ENDIAN=y
TARGET_KVM_HAVE_GUEST_DEBUG=y
TARGET_XML_FILES= gdb-xml/power-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml
+TARGET_LONG_BITS=32
diff --git a/configs/targets/ppc64-linux-user.mak b/configs/targets/ppc64-linux-user.mak
index 4d81969..461f1c6 100644
--- a/configs/targets/ppc64-linux-user.mak
+++ b/configs/targets/ppc64-linux-user.mak
@@ -5,3 +5,4 @@ TARGET_SYSTBL_ABI=common,nospu,64
TARGET_SYSTBL=syscall.tbl
TARGET_BIG_ENDIAN=y
TARGET_XML_FILES= gdb-xml/power64-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml gdb-xml/power-vsx.xml
+TARGET_LONG_BITS=64
diff --git a/configs/targets/ppc64-softmmu.mak b/configs/targets/ppc64-softmmu.mak
index 40881d9..7457286 100644
--- a/configs/targets/ppc64-softmmu.mak
+++ b/configs/targets/ppc64-softmmu.mak
@@ -1,8 +1,8 @@
TARGET_ARCH=ppc64
TARGET_BASE_ARCH=ppc
TARGET_BIG_ENDIAN=y
-TARGET_SUPPORTS_MTTCG=y
TARGET_KVM_HAVE_GUEST_DEBUG=y
TARGET_XML_FILES= gdb-xml/power64-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml gdb-xml/power-vsx.xml
# all boards require libfdt
TARGET_NEED_FDT=y
+TARGET_LONG_BITS=64
diff --git a/configs/targets/ppc64le-linux-user.mak b/configs/targets/ppc64le-linux-user.mak
index 426d5a2..cf9d8a4 100644
--- a/configs/targets/ppc64le-linux-user.mak
+++ b/configs/targets/ppc64le-linux-user.mak
@@ -4,3 +4,4 @@ TARGET_ABI_DIR=ppc
TARGET_SYSTBL_ABI=common,nospu,64
TARGET_SYSTBL=syscall.tbl
TARGET_XML_FILES= gdb-xml/power64-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml gdb-xml/power-vsx.xml
+TARGET_LONG_BITS=64
diff --git a/configs/targets/riscv32-linux-user.mak b/configs/targets/riscv32-linux-user.mak
index 9761618..a0ef03c 100644
--- a/configs/targets/riscv32-linux-user.mak
+++ b/configs/targets/riscv32-linux-user.mak
@@ -4,3 +4,7 @@ TARGET_ABI_DIR=riscv
TARGET_XML_FILES= gdb-xml/riscv-32bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-32bit-virtual.xml
CONFIG_SEMIHOSTING=y
CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y
+TARGET_SYSTBL_ABI=32
+TARGET_SYSTBL_ABI=common,32,riscv,memfd_secret
+TARGET_SYSTBL=syscall.tbl
+TARGET_LONG_BITS=32
diff --git a/configs/targets/riscv32-softmmu.mak b/configs/targets/riscv32-softmmu.mak
index 338182d..db55275 100644
--- a/configs/targets/riscv32-softmmu.mak
+++ b/configs/targets/riscv32-softmmu.mak
@@ -1,6 +1,6 @@
TARGET_ARCH=riscv32
TARGET_BASE_ARCH=riscv
-TARGET_SUPPORTS_MTTCG=y
TARGET_XML_FILES= gdb-xml/riscv-32bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-32bit-virtual.xml
# needed by boot.c
TARGET_NEED_FDT=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/riscv64-bsd-user.mak b/configs/targets/riscv64-bsd-user.mak
new file mode 100644
index 0000000..c6348a7
--- /dev/null
+++ b/configs/targets/riscv64-bsd-user.mak
@@ -0,0 +1,5 @@
+TARGET_ARCH=riscv64
+TARGET_BASE_ARCH=riscv
+TARGET_ABI_DIR=riscv
+TARGET_XML_FILES= gdb-xml/riscv-64bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-64bit-virtual.xml
+TARGET_LONG_BITS=64
diff --git a/configs/targets/riscv64-linux-user.mak b/configs/targets/riscv64-linux-user.mak
index cfd1fd3..aac7568 100644
--- a/configs/targets/riscv64-linux-user.mak
+++ b/configs/targets/riscv64-linux-user.mak
@@ -4,3 +4,7 @@ TARGET_ABI_DIR=riscv
TARGET_XML_FILES= gdb-xml/riscv-64bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-64bit-virtual.xml
CONFIG_SEMIHOSTING=y
CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y
+TARGET_SYSTBL_ABI=64
+TARGET_SYSTBL_ABI=common,64,riscv,rlimit,memfd_secret
+TARGET_SYSTBL=syscall.tbl
+TARGET_LONG_BITS=64
diff --git a/configs/targets/riscv64-softmmu.mak b/configs/targets/riscv64-softmmu.mak
index 917980e..2bdd4a6 100644
--- a/configs/targets/riscv64-softmmu.mak
+++ b/configs/targets/riscv64-softmmu.mak
@@ -1,7 +1,7 @@
TARGET_ARCH=riscv64
TARGET_BASE_ARCH=riscv
-TARGET_SUPPORTS_MTTCG=y
TARGET_KVM_HAVE_GUEST_DEBUG=y
-TARGET_XML_FILES= gdb-xml/riscv-64bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-64bit-virtual.xml
+TARGET_XML_FILES= gdb-xml/riscv-64bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-64bit-virtual.xml gdb-xml/riscv-32bit-cpu.xml gdb-xml/riscv-32bit-virtual.xml
# needed by boot.c
TARGET_NEED_FDT=y
+TARGET_LONG_BITS=64
diff --git a/configs/targets/rx-softmmu.mak b/configs/targets/rx-softmmu.mak
index 706bbe6..1c250a6 100644
--- a/configs/targets/rx-softmmu.mak
+++ b/configs/targets/rx-softmmu.mak
@@ -2,3 +2,4 @@ TARGET_ARCH=rx
TARGET_XML_FILES= gdb-xml/rx-core.xml
# all boards require libfdt
TARGET_NEED_FDT=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/s390x-linux-user.mak b/configs/targets/s390x-linux-user.mak
index 24c04c8..68c2f28 100644
--- a/configs/targets/s390x-linux-user.mak
+++ b/configs/targets/s390x-linux-user.mak
@@ -3,3 +3,4 @@ TARGET_SYSTBL_ABI=common,64
TARGET_SYSTBL=syscall.tbl
TARGET_BIG_ENDIAN=y
TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-virt-kvm.xml gdb-xml/s390-gs.xml
+TARGET_LONG_BITS=64
diff --git a/configs/targets/s390x-softmmu.mak b/configs/targets/s390x-softmmu.mak
index b22218a..76dd5de 100644
--- a/configs/targets/s390x-softmmu.mak
+++ b/configs/targets/s390x-softmmu.mak
@@ -1,5 +1,5 @@
TARGET_ARCH=s390x
TARGET_BIG_ENDIAN=y
-TARGET_SUPPORTS_MTTCG=y
TARGET_KVM_HAVE_GUEST_DEBUG=y
TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-virt-kvm.xml gdb-xml/s390-gs.xml
+TARGET_LONG_BITS=64
diff --git a/configs/targets/sh4-linux-user.mak b/configs/targets/sh4-linux-user.mak
index 9908887..d58c547 100644
--- a/configs/targets/sh4-linux-user.mak
+++ b/configs/targets/sh4-linux-user.mak
@@ -2,3 +2,4 @@ TARGET_ARCH=sh4
TARGET_SYSTBL_ABI=common
TARGET_SYSTBL=syscall.tbl
TARGET_HAS_BFLT=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/sh4-softmmu.mak b/configs/targets/sh4-softmmu.mak
index f9d62d9..787d349 100644
--- a/configs/targets/sh4-softmmu.mak
+++ b/configs/targets/sh4-softmmu.mak
@@ -1 +1,2 @@
TARGET_ARCH=sh4
+TARGET_LONG_BITS=32
diff --git a/configs/targets/sh4eb-linux-user.mak b/configs/targets/sh4eb-linux-user.mak
index 9db6b36..99007f0 100644
--- a/configs/targets/sh4eb-linux-user.mak
+++ b/configs/targets/sh4eb-linux-user.mak
@@ -3,3 +3,4 @@ TARGET_SYSTBL_ABI=common
TARGET_SYSTBL=syscall.tbl
TARGET_BIG_ENDIAN=y
TARGET_HAS_BFLT=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/sh4eb-softmmu.mak b/configs/targets/sh4eb-softmmu.mak
index 226b1fc..cdea2c6 100644
--- a/configs/targets/sh4eb-softmmu.mak
+++ b/configs/targets/sh4eb-softmmu.mak
@@ -1,2 +1,3 @@
TARGET_ARCH=sh4
TARGET_BIG_ENDIAN=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/sparc-linux-user.mak b/configs/targets/sparc-linux-user.mak
index abcfb8f..4ff4b72 100644
--- a/configs/targets/sparc-linux-user.mak
+++ b/configs/targets/sparc-linux-user.mak
@@ -2,3 +2,4 @@ TARGET_ARCH=sparc
TARGET_SYSTBL_ABI=common,32
TARGET_SYSTBL=syscall.tbl
TARGET_BIG_ENDIAN=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/sparc-softmmu.mak b/configs/targets/sparc-softmmu.mak
index a5d9200..57801fa 100644
--- a/configs/targets/sparc-softmmu.mak
+++ b/configs/targets/sparc-softmmu.mak
@@ -1,3 +1,3 @@
TARGET_ARCH=sparc
TARGET_BIG_ENDIAN=y
-TARGET_SUPPORTS_MTTCG=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/sparc32plus-linux-user.mak b/configs/targets/sparc32plus-linux-user.mak
index 6cc8fa5..7a16934 100644
--- a/configs/targets/sparc32plus-linux-user.mak
+++ b/configs/targets/sparc32plus-linux-user.mak
@@ -5,3 +5,4 @@ TARGET_ABI_DIR=sparc
TARGET_SYSTBL_ABI=common,32
TARGET_SYSTBL=syscall.tbl
TARGET_BIG_ENDIAN=y
+TARGET_LONG_BITS=64
diff --git a/configs/targets/sparc64-linux-user.mak b/configs/targets/sparc64-linux-user.mak
index 52f05ec..64ea04e 100644
--- a/configs/targets/sparc64-linux-user.mak
+++ b/configs/targets/sparc64-linux-user.mak
@@ -4,3 +4,4 @@ TARGET_ABI_DIR=sparc
TARGET_SYSTBL_ABI=common,64
TARGET_SYSTBL=syscall.tbl
TARGET_BIG_ENDIAN=y
+TARGET_LONG_BITS=64
diff --git a/configs/targets/sparc64-softmmu.mak b/configs/targets/sparc64-softmmu.mak
index 36ca64e..2504e31 100644
--- a/configs/targets/sparc64-softmmu.mak
+++ b/configs/targets/sparc64-softmmu.mak
@@ -1,4 +1,4 @@
TARGET_ARCH=sparc64
TARGET_BASE_ARCH=sparc
TARGET_BIG_ENDIAN=y
-TARGET_SUPPORTS_MTTCG=y
+TARGET_LONG_BITS=64
diff --git a/configs/targets/tricore-softmmu.mak b/configs/targets/tricore-softmmu.mak
index 96b10af..781ce49 100644
--- a/configs/targets/tricore-softmmu.mak
+++ b/configs/targets/tricore-softmmu.mak
@@ -1 +1,2 @@
TARGET_ARCH=tricore
+TARGET_LONG_BITS=32
diff --git a/configs/targets/x86_64-bsd-user.mak b/configs/targets/x86_64-bsd-user.mak
index 799cd4a..d62d656 100644
--- a/configs/targets/x86_64-bsd-user.mak
+++ b/configs/targets/x86_64-bsd-user.mak
@@ -1,3 +1,4 @@
TARGET_ARCH=x86_64
TARGET_BASE_ARCH=i386
TARGET_XML_FILES= gdb-xml/i386-64bit.xml
+TARGET_LONG_BITS=64
diff --git a/configs/targets/x86_64-linux-user.mak b/configs/targets/x86_64-linux-user.mak
index 9ceefbb..b093ab5 100644
--- a/configs/targets/x86_64-linux-user.mak
+++ b/configs/targets/x86_64-linux-user.mak
@@ -2,4 +2,5 @@ TARGET_ARCH=x86_64
TARGET_BASE_ARCH=i386
TARGET_SYSTBL_ABI=common,64
TARGET_SYSTBL=syscall_64.tbl
-TARGET_XML_FILES= gdb-xml/i386-64bit.xml
+TARGET_XML_FILES= gdb-xml/i386-64bit.xml gdb-xml/i386-64bit-linux.xml
+TARGET_LONG_BITS=64
diff --git a/configs/targets/x86_64-softmmu.mak b/configs/targets/x86_64-softmmu.mak
index e12ac3d..5619b2b 100644
--- a/configs/targets/x86_64-softmmu.mak
+++ b/configs/targets/x86_64-softmmu.mak
@@ -1,5 +1,6 @@
TARGET_ARCH=x86_64
TARGET_BASE_ARCH=i386
-TARGET_SUPPORTS_MTTCG=y
TARGET_KVM_HAVE_GUEST_DEBUG=y
+TARGET_KVM_HAVE_RESET_PARKED_VCPU=y
TARGET_XML_FILES= gdb-xml/i386-64bit.xml
+TARGET_LONG_BITS=64
diff --git a/configs/targets/xtensa-linux-user.mak b/configs/targets/xtensa-linux-user.mak
index 420b30a..cbec6e3 100644
--- a/configs/targets/xtensa-linux-user.mak
+++ b/configs/targets/xtensa-linux-user.mak
@@ -2,3 +2,4 @@ TARGET_ARCH=xtensa
TARGET_SYSTBL_ABI=common
TARGET_SYSTBL=syscall.tbl
TARGET_HAS_BFLT=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/xtensa-softmmu.mak b/configs/targets/xtensa-softmmu.mak
index f075557..2a97973 100644
--- a/configs/targets/xtensa-softmmu.mak
+++ b/configs/targets/xtensa-softmmu.mak
@@ -1,2 +1,2 @@
TARGET_ARCH=xtensa
-TARGET_SUPPORTS_MTTCG=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/xtensaeb-linux-user.mak b/configs/targets/xtensaeb-linux-user.mak
index bce2d1d..f455b1c 100644
--- a/configs/targets/xtensaeb-linux-user.mak
+++ b/configs/targets/xtensaeb-linux-user.mak
@@ -3,3 +3,4 @@ TARGET_SYSTBL_ABI=common
TARGET_SYSTBL=syscall.tbl
TARGET_BIG_ENDIAN=y
TARGET_HAS_BFLT=y
+TARGET_LONG_BITS=32
diff --git a/configs/targets/xtensaeb-softmmu.mak b/configs/targets/xtensaeb-softmmu.mak
index b02e11b..5204729 100644
--- a/configs/targets/xtensaeb-softmmu.mak
+++ b/configs/targets/xtensaeb-softmmu.mak
@@ -1,3 +1,3 @@
TARGET_ARCH=xtensa
TARGET_BIG_ENDIAN=y
-TARGET_SUPPORTS_MTTCG=y
+TARGET_LONG_BITS=32
diff --git a/configure b/configure
index 019fcbd..2b2b3d6 100755
--- a/configure
+++ b/configure
@@ -13,7 +13,7 @@ export CCACHE_RECACHE=yes
# make source path absolute
source_path=$(cd "$(dirname -- "$0")"; pwd)
-if test "$PWD" = "$source_path"
+if test "$PWD" -ef "$source_path"
then
echo "Using './build' as the directory for build output"
@@ -207,6 +207,10 @@ for opt do
;;
--objcc=*) objcc="$optarg"
;;
+ --rustc=*) RUSTC="$optarg"
+ ;;
+ --rustdoc=*) RUSTDOC="$optarg"
+ ;;
--cpu=*) cpu="$optarg"
;;
--extra-cflags=*)
@@ -252,6 +256,8 @@ python=
download="enabled"
skip_meson=no
use_containers="yes"
+rust="disabled"
+rust_target_triple=""
gdb_bin=$(command -v "gdb-multiarch" || command -v "gdb")
gdb_arches=""
@@ -310,6 +316,7 @@ objcopy="${OBJCOPY-${cross_prefix}objcopy}"
ld="${LD-${cross_prefix}ld}"
ranlib="${RANLIB-${cross_prefix}ranlib}"
nm="${NM-${cross_prefix}nm}"
+readelf="${READELF-${cross_prefix}readelf}"
strip="${STRIP-${cross_prefix}strip}"
widl="${WIDL-${cross_prefix}widl}"
windres="${WINDRES-${cross_prefix}windres}"
@@ -317,6 +324,9 @@ windmc="${WINDMC-${cross_prefix}windmc}"
pkg_config="${PKG_CONFIG-${cross_prefix}pkg-config}"
sdl2_config="${SDL2_CONFIG-${cross_prefix}sdl2-config}"
+rustc="${RUSTC-rustc}"
+rustdoc="${RUSTDOC-rustdoc}"
+
check_define() {
cat > $TMPC <<EOF
#if !defined($1)
@@ -353,6 +363,10 @@ elif check_define __NetBSD__; then
host_os=netbsd
elif check_define __APPLE__; then
host_os=darwin
+elif check_define EMSCRIPTEN ; then
+ host_os=emscripten
+ cpu=wasm32
+ cross_compile="yes"
else
# This is a fatal error, but don't report it yet, because we
# might be going to just print the --help text, or it might
@@ -388,7 +402,11 @@ elif check_define _ARCH_PPC ; then
cpu="ppc"
fi
elif check_define __mips__ ; then
- cpu="mips"
+ if check_define __mips64 ; then
+ cpu="mips64"
+ else
+ cpu="mips"
+ fi
elif check_define __s390__ ; then
if check_define __s390x__ ; then
cpu="s390x"
@@ -425,6 +443,7 @@ fi
# Please keep it sorted and synchronized with meson.build's host_arch.
host_arch=
linux_arch=
+raw_cpu=$cpu
case "$cpu" in
aarch64)
host_arch=aarch64
@@ -514,6 +533,9 @@ case "$cpu" in
linux_arch=x86
CPU_CFLAGS="-m64"
;;
+ wasm32)
+ CPU_CFLAGS="-m32"
+ ;;
esac
if test -n "$host_arch" && {
@@ -528,17 +550,17 @@ if test -n "$linux_arch" && ! test -d "$source_path/linux-headers/asm-$linux_arc
fi
check_py_version() {
- # We require python >= 3.8.
+ # We require python >= 3.9.
# NB: a True python conditional creates a non-zero return code (Failure)
- "$1" -c 'import sys; sys.exit(sys.version_info < (3,8))'
+ "$1" -c 'import sys; sys.exit(sys.version_info < (3,9))'
}
first_python=
if test -z "${PYTHON}"; then
# A bare 'python' is traditionally python 2.x, but some distros
# have it as python 3.x, so check in both places.
- for binary in python3 python python3.12 python3.11 \
- python3.10 python3.9 python3.8; do
+ for binary in python3 python python3.13 python3.12 python3.11 \
+ python3.10 python3.9 ; do
if has "$binary"; then
python=$(command -v "$binary")
if check_py_version "$python"; then
@@ -610,6 +632,9 @@ meson_option_parse() {
exit 1
fi
}
+has_meson_option() {
+ test "${meson_options#*"$1"}" != "$meson_options"
+}
meson_add_machine_file() {
if test "$cross_compile" = "yes"; then
@@ -636,6 +661,10 @@ for opt do
;;
--objcc=*)
;;
+ --rustc=*)
+ ;;
+ --rustdoc=*)
+ ;;
--make=*)
;;
--install=*)
@@ -755,8 +784,14 @@ for opt do
;;
--container-engine=*) container_engine="$optarg"
;;
+ --rust-target-triple=*) rust_target_triple="$optarg"
+ ;;
--gdb=*) gdb_bin="$optarg"
;;
+ --enable-rust) rust=enabled
+ ;;
+ --disable-rust) rust=disabled
+ ;;
# everything else has the same name in configure and meson
--*) meson_option_parse "$opt" "$optarg"
;;
@@ -859,6 +894,8 @@ Advanced options (experts only):
at build time [$host_cc]
--cxx=CXX use C++ compiler CXX [$cxx]
--objcc=OBJCC use Objective-C compiler OBJCC [$objcc]
+ --rustc=RUSTC use Rust compiler RUSTC [$rustc]
+ --rustdoc=RUSTDOC use rustdoc binary RUSTDOC [$rustdoc]
--extra-cflags=CFLAGS append extra C compiler flags CFLAGS
--extra-cxxflags=CXXFLAGS append extra C++ compiler flags CXXFLAGS
--extra-objcflags=OBJCFLAGS append extra Objective C compiler flags OBJCFLAGS
@@ -869,8 +906,9 @@ Advanced options (experts only):
--python=PYTHON use specified python [$python]
--ninja=NINJA use specified ninja [$ninja]
--static enable static build [$static]
- --without-default-features default all --enable-* options to "disabled"
- --without-default-devices do not include any device that is not needed to
+ --rust-target-triple=TRIPLE compilation target for Rust code [autodetect]
+ --without-default-features default all --enable-* options to "disabled"
+ --without-default-devices do not include any device that is not needed to
start the emulator (only use if you are including
desired devices in configs/devices/)
--with-devices-ARCH=NAME override default configs/devices
@@ -908,7 +946,7 @@ then
# If first_python is set, there was a binary somewhere even though
# it was not suitable. Use it for the error message.
if test -n "$first_python"; then
- error_exit "Cannot use '$first_python', Python >= 3.8 is required." \
+ error_exit "Cannot use '$first_python', Python >= 3.9 is required." \
"Use --python=/path/to/python to specify a supported Python."
else
error_exit "Python not found. Use --python=/path/to/python"
@@ -916,11 +954,11 @@ then
fi
if ! check_py_version "$python"; then
- error_exit "Cannot use '$python', Python >= 3.8 is required." \
+ error_exit "Cannot use '$python', Python >= 3.9 is required." \
"Use --python=/path/to/python to specify a supported Python." \
"Maybe try:" \
" openSUSE Leap 15.3+: zypper install python39" \
- " CentOS 8: dnf install python38"
+ " CentOS: dnf install python3.12"
fi
# Resolve PATH
@@ -1029,8 +1067,11 @@ if test "$static" = "yes" ; then
plugins="no"
fi
if test "$plugins" != "no"; then
- plugins=yes
- subdirs="$subdirs contrib/plugins"
+ if has_meson_option "-Dtcg_interpreter=true"; then
+ plugins="no"
+ else
+ plugins=yes
+ fi
fi
cat > $TMPC << EOF
@@ -1103,8 +1144,10 @@ fi
# gdb test
if test -n "$gdb_bin"; then
- gdb_version=$($gdb_bin --version | head -n 1)
- if version_ge ${gdb_version##* } 9.1; then
+ gdb_version_string=$($gdb_bin --version | head -n 1)
+ # Extract last field in the version string
+ gdb_version=${gdb_version_string##* }
+ if version_ge $gdb_version 9.1; then
gdb_arches=$($python "$source_path/scripts/probe-gdb-support.py" $gdb_bin)
else
gdb_bin=""
@@ -1139,6 +1182,140 @@ EOF
fi
##########################################
+# detect rust triple
+
+meson_version=$($meson --version)
+if test "$rust" != disabled && ! version_ge "$meson_version" 1.8.1; then
+ if test "$rust" = enabled; then
+ error_exit "Rust support needs Meson 1.8.1 or newer"
+ fi
+ echo "Rust needs Meson 1.8.1, disabling" 2>&1
+ rust=disabled
+fi
+if test "$rust" != disabled && has "$rustc" && $rustc -vV > "${TMPDIR1}/${TMPB}.out"; then
+ rust_host_triple=$(sed -n 's/^host: //p' "${TMPDIR1}/${TMPB}.out")
+else
+ if test "$rust" = enabled; then
+ error_exit "could not execute rustc binary \"$rustc\""
+ fi
+ rust=disabled
+fi
+if test "$rust" != disabled && test -z "$rust_target_triple"; then
+ # arch and os generally matches between meson and rust
+ rust_arch=$host_arch
+ rust_os=$host_os
+ rust_machine=unknown
+ rust_osvariant=
+
+ # tweak rust_os if needed; also, machine and variant depend on the OS
+ android=no
+ case "$host_os" in
+ darwin)
+ # e.g. aarch64-apple-darwin
+ rust_machine=apple
+ ;;
+
+ linux)
+ # detect android/glibc/musl
+ if check_define __ANDROID__; then
+ rust_osvariant=android
+ android=yes
+ else
+ cat > $TMPC << EOF
+#define _GNU_SOURCE
+#include <features.h>
+#ifndef __USE_GNU
+error using musl
+#endif
+EOF
+ if compile_object; then
+ rust_osvariant=gnu
+ else
+ rust_osvariant=musl
+ fi
+ fi
+
+ case "$cpu" in
+ arm)
+ # e.g. arm-unknown-linux-gnueabi, arm-unknown-linux-gnueabihf
+ write_c_skeleton
+ compile_object
+ if $READELF -A $TMPO | grep Tag_API_VFP_args: > /dev/null; then
+ rust_osvariant=${rust_osvariant}eabihf
+ else
+ rust_osvariant=${rust_osvariant}eabi
+ fi
+ ;;
+
+ mips64)
+ # e.g. mips64-unknown-linux-gnuabi64
+ rust_osvariant=${rust_osvariant}abi64
+ ;;
+ esac
+ ;;
+
+ netbsd)
+ # e.g. arm-unknown-netbsd-eabihf
+ test "$host_arch" = arm && rust_osvariant=eabihf
+ ;;
+
+ sunos)
+ rust_machine=pc
+ rust_os=solaris
+ ;;
+
+ windows)
+ # e.g. aarch64-pc-windows-gnullvm, x86_64-pc-windows-gnu (MSVC not supported)
+ rust_machine=pc
+ if test "$host_arch" = aarch64; then
+ rust_osvariant=gnullvm
+ else
+ rust_osvariant=gnu
+ fi
+ ;;
+ esac
+
+ # now tweak the architecture part, possibly based on pre-canonicalization --cpu
+ case "$host_arch" in
+ arm)
+ # preserve ISA version (armv7 etc.) from $raw_cpu if passed via --cpu
+ rust_arch=$raw_cpu
+ test "$rust_arch" = arm && test "$rust_os" != linux && rust_arch=armv7
+ ;;
+
+ mips)
+ # preserve ISA version (mipsisa64r6 etc.) and include endianness
+ rust_arch=${raw_cpu%el}
+ test "$bigendian" = no && rust_arch=${rust_arch}el
+ ;;
+
+ riscv32|riscv64)
+ # e.g. riscv64gc-unknown-linux-gnu, but riscv64-linux-android
+ test "$android" = no && rust_arch=${rust_arch}gc
+ ;;
+
+ sparc64)
+ if test "$rust_os" = solaris; then
+ rust_arch=sparcv9
+ rust_machine=sun
+ fi
+ ;;
+
+ x86_64)
+ # e.g. x86_64-unknown-linux-gnux32
+ test "$raw_cpu" = x32 && rust_osvariant=${rust_osvariant}x32
+ ;;
+ esac
+
+ if test "$android" = yes; then
+ # e.g. aarch64-linux-android
+ rust_target_triple=$rust_arch-$rust_os-$rust_osvariant
+ else
+ rust_target_triple=$rust_arch-$rust_machine-$rust_os${rust_osvariant:+-$rust_osvariant}
+ fi
+fi
+
+##########################################
# functions to probe cross compilers
container="no"
@@ -1246,9 +1423,9 @@ probe_target_compiler() {
target_arch=${1%%-*}
case $target_arch in
aarch64) container_hosts="x86_64 aarch64" ;;
+ aarch64_be) container_hosts="x86_64 aarch64" ;;
alpha) container_hosts=x86_64 ;;
arm) container_hosts="x86_64 aarch64" ;;
- cris) container_hosts=x86_64 ;;
hexagon) container_hosts=x86_64 ;;
hppa) container_hosts=x86_64 ;;
i386) container_hosts=x86_64 ;;
@@ -1276,6 +1453,10 @@ probe_target_compiler() {
case $target_arch in
# debian-all-test-cross architectures
+ aarch64_be)
+ container_image=debian-all-test-cross
+ container_cross_prefix=aarch64-linux-gnu-
+ ;;
hppa|m68k|mips|riscv64|sparc64)
container_image=debian-all-test-cross
;;
@@ -1307,9 +1488,6 @@ probe_target_compiler() {
container_image=debian-armhf-cross
container_cross_prefix=arm-linux-gnueabihf-
;;
- cris)
- container_image=fedora-cris-cross
- ;;
hexagon)
container_cross_prefix=hexagon-unknown-linux-musl-
container_cross_cc=${container_cross_prefix}clang
@@ -1326,7 +1504,7 @@ probe_target_compiler() {
container_cross_prefix=microblaze-linux-musl-
;;
mips64el)
- container_image=debian-mips64el-cross
+ container_image=debian-all-test-cross
container_cross_prefix=mips64el-linux-gnuabi64-
;;
tricore)
@@ -1528,10 +1706,9 @@ LINKS="$LINKS pc-bios/optionrom/Makefile"
LINKS="$LINKS pc-bios/s390-ccw/Makefile"
LINKS="$LINKS pc-bios/vof/Makefile"
LINKS="$LINKS .gdbinit scripts" # scripts needed by relative path in .gdbinit
-LINKS="$LINKS tests/avocado tests/data"
+LINKS="$LINKS tests/data"
LINKS="$LINKS tests/qemu-iotests/check tests/qemu-iotests/Makefile"
LINKS="$LINKS python"
-LINKS="$LINKS contrib/plugins/Makefile "
for f in $LINKS ; do
if [ -e "$source_path/$f" ]; then
symlink "$source_path/$f" "$f"
@@ -1604,6 +1781,9 @@ if test "$container" != no; then
echo "RUNC=$runc" >> $config_host_mak
fi
echo "SUBDIRS=$subdirs" >> $config_host_mak
+if test "$rust" != disabled; then
+ echo "RUST_TARGET_TRIPLE=$rust_target_triple" >> $config_host_mak
+fi
echo "PYTHON=$python" >> $config_host_mak
echo "MKVENV_ENSUREGROUP=$mkvenv ensuregroup $mkvenv_online_flag" >> $config_host_mak
echo "GENISOIMAGE=$genisoimage" >> $config_host_mak
@@ -1614,22 +1794,6 @@ if test "$default_targets" = "yes"; then
echo "CONFIG_DEFAULT_TARGETS=y" >> $config_host_mak
fi
-# contrib/plugins configuration
-echo "# Automatically generated by configure - do not modify" > contrib/plugins/$config_host_mak
-echo "SRC_PATH=$source_path/contrib/plugins" >> contrib/plugins/$config_host_mak
-echo "PKG_CONFIG=${pkg_config}" >> contrib/plugins/$config_host_mak
-echo "CC=$cc $CPU_CFLAGS" >> contrib/plugins/$config_host_mak
-echo "CFLAGS=${CFLAGS-$default_cflags} $EXTRA_CFLAGS" >> contrib/plugins/$config_host_mak
-if test "$host_os" = windows; then
- echo "DLLTOOL=$dlltool" >> contrib/plugins/$config_host_mak
-fi
-if test "$host_os" = darwin; then
- echo "CONFIG_DARWIN=y" >> contrib/plugins/$config_host_mak
-fi
-if test "$host_os" = windows; then
- echo "CONFIG_WIN32=y" >> contrib/plugins/$config_host_mak
-fi
-
# tests/tcg configuration
mkdir -p tests/tcg
echo "# Automatically generated by configure - do not modify" > tests/tcg/$config_host_mak
@@ -1673,10 +1837,15 @@ for target in $target_list; do
echo "GDB=$gdb_bin" >> $config_target_mak
fi
- if test "${arch}" = "aarch64" && version_ge ${gdb_version##* } 15.0; then
+ if test "${gdb_arches#*aarch64}" != "$gdb_arches" && version_ge $gdb_version 15.1; then
echo "GDB_HAS_MTE=y" >> $config_target_mak
fi
+ if test "${gdb_arches#*aarch64}" != "$gdb_arches" && version_ge $gdb_version 16.0; then
+ # GDB has to support MTE in baremetal to allow debugging MTE in QEMU system mode
+ echo "GDB_SUPPORTS_MTE_IN_BAREMETAL=y" >> $config_target_mak
+ fi
+
echo "run-tcg-tests-$target: $qemu\$(EXESUF)" >> Makefile.prereqs
tcg_tests_targets="$tcg_tests_targets $target"
fi
@@ -1735,12 +1904,22 @@ if test "$skip_meson" = no; then
echo "c = [$(meson_quote $cc $CPU_CFLAGS)]" >> $cross
test -n "$cxx" && echo "cpp = [$(meson_quote $cxx $CPU_CFLAGS)]" >> $cross
test -n "$objcc" && echo "objc = [$(meson_quote $objcc $CPU_CFLAGS)]" >> $cross
+ if test "$rust" != disabled; then
+ if test "$rust_host_triple" != "$rust_target_triple"; then
+ echo "rust = [$(meson_quote $rustc --target "$rust_target_triple")]" >> $cross
+ echo "rustdoc = [$(meson_quote $rustdoc --target "$rust_target_triple")]" >> $cross
+ else
+ echo "rust = [$(meson_quote $rustc)]" >> $cross
+ echo "rustdoc = [$(meson_quote $rustdoc)]" >> $cross
+ fi
+ fi
echo "ar = [$(meson_quote $ar)]" >> $cross
echo "dlltool = [$(meson_quote $dlltool)]" >> $cross
echo "nm = [$(meson_quote $nm)]" >> $cross
echo "pkgconfig = [$(meson_quote $pkg_config)]" >> $cross
echo "pkg-config = [$(meson_quote $pkg_config)]" >> $cross
echo "ranlib = [$(meson_quote $ranlib)]" >> $cross
+ echo "readelf = [$(meson_quote $readelf)]" >> $cross
if has $sdl2_config; then
echo "sdl2-config = [$(meson_quote $sdl2_config)]" >> $cross
fi
@@ -1770,6 +1949,9 @@ if test "$skip_meson" = no; then
echo "# Automatically generated by configure - do not modify" > $native
echo "[binaries]" >> $native
echo "c = [$(meson_quote $host_cc)]" >> $native
+ if test "$rust" != disabled; then
+ echo "rust = [$(meson_quote $rustc)]" >> $cross
+ fi
mv $native config-meson.native
meson_option_add --native-file
meson_option_add config-meson.native
@@ -1788,6 +1970,7 @@ if test "$skip_meson" = no; then
test "$pie" = no && meson_option_add -Db_pie=false
# QEMU options
+ test "$rust" != "disabled" && meson_option_add "-Drust=$rust"
test "$cfi" != false && meson_option_add "-Dcfi=$cfi" "-Db_lto=$cfi"
test "$docs" != auto && meson_option_add "-Ddocs=$docs"
test -n "${LIB_FUZZING_ENGINE+xxx}" && meson_option_add "-Dfuzzing_engine=$LIB_FUZZING_ENGINE"
@@ -1872,3 +2055,11 @@ echo ' "$@"' >>config.status
chmod +x config.status
rm -r "$TMPDIR1"
+
+if test "$rust" != disabled; then
+ echo
+ echo 'INFO: Rust bindings generation with `bindgen` might fail in some cases where'
+ echo 'the detected `libclang` does not match the expected `clang` version/target. In'
+ echo 'this case you must pass the path to `clang` and `libclang` to your build'
+ echo 'command invocation using the environment variables CLANG_PATH and LIBCLANG_PATH'
+fi
diff --git a/contrib/elf2dmp/pdb.c b/contrib/elf2dmp/pdb.c
index 492aca4..47c5126 100644
--- a/contrib/elf2dmp/pdb.c
+++ b/contrib/elf2dmp/pdb.c
@@ -14,8 +14,8 @@
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+ * License along with this library; if not, see
+ * <https://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
diff --git a/contrib/plugins/Makefile b/contrib/plugins/Makefile
deleted file mode 100644
index 449ead1..0000000
--- a/contrib/plugins/Makefile
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- Mode: makefile -*-
-#
-# This Makefile example is fairly independent from the main makefile
-# so users can take and adapt it for their build. We only really
-# include config-host.mak so we don't have to repeat probing for
-# programs that the main configure has already done for us.
-#
-
-include config-host.mak
-
-TOP_SRC_PATH = $(SRC_PATH)/../..
-
-VPATH += $(SRC_PATH)
-
-NAMES :=
-NAMES += execlog
-NAMES += hotblocks
-NAMES += hotpages
-NAMES += howvec
-
-# The lockstep example communicates using unix sockets,
-# and can't be easily made to work on windows.
-ifneq ($(CONFIG_WIN32),y)
-NAMES += lockstep
-endif
-
-NAMES += hwprofile
-NAMES += cache
-NAMES += drcov
-NAMES += ips
-
-ifeq ($(CONFIG_WIN32),y)
-SO_SUFFIX := .dll
-LDLIBS += $(shell $(PKG_CONFIG) --libs glib-2.0)
-else
-SO_SUFFIX := .so
-endif
-
-SONAMES := $(addsuffix $(SO_SUFFIX),$(addprefix lib,$(NAMES)))
-
-# The main QEMU uses Glib extensively so it's perfectly fine to use it
-# in plugins (which many example do).
-PLUGIN_CFLAGS := $(shell $(PKG_CONFIG) --cflags glib-2.0)
-PLUGIN_CFLAGS += -fPIC -Wall
-PLUGIN_CFLAGS += -I$(TOP_SRC_PATH)/include/qemu
-
-all: $(SONAMES)
-
-%.o: %.c
- $(CC) $(CFLAGS) $(PLUGIN_CFLAGS) -c -o $@ $<
-
-ifeq ($(CONFIG_WIN32),y)
-lib%$(SO_SUFFIX): %.o win32_linker.o ../../plugins/libqemu_plugin_api.a
- $(CC) -shared -o $@ $^ $(LDLIBS)
-else ifeq ($(CONFIG_DARWIN),y)
-lib%$(SO_SUFFIX): %.o
- $(CC) -bundle -Wl,-undefined,dynamic_lookup -o $@ $^ $(LDLIBS)
-else
-lib%$(SO_SUFFIX): %.o
- $(CC) -shared -o $@ $^ $(LDLIBS)
-endif
-
-
-clean:
- rm -f *.o *$(SO_SUFFIX) *.d
- rm -Rf .libs
-
-.PHONY: all clean
-.SECONDARY:
diff --git a/contrib/plugins/bbv.c b/contrib/plugins/bbv.c
new file mode 100644
index 0000000..b9da6f8
--- /dev/null
+++ b/contrib/plugins/bbv.c
@@ -0,0 +1,158 @@
+/*
+ * Generate basic block vectors for use with the SimPoint analysis tool.
+ * SimPoint: https://cseweb.ucsd.edu/~calder/simpoint/
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <stdio.h>
+#include <glib.h>
+
+#include <qemu-plugin.h>
+
+typedef struct Bb {
+ uint64_t vaddr;
+ struct qemu_plugin_scoreboard *count;
+ unsigned int index;
+} Bb;
+
+typedef struct Vcpu {
+ uint64_t count;
+ FILE *file;
+} Vcpu;
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+static GHashTable *bbs;
+static GRWLock bbs_lock;
+static char *filename;
+static struct qemu_plugin_scoreboard *vcpus;
+static uint64_t interval = 100000000;
+
+static void plugin_exit(qemu_plugin_id_t id, void *p)
+{
+ for (int i = 0; i < qemu_plugin_num_vcpus(); i++) {
+ fclose(((Vcpu *)qemu_plugin_scoreboard_find(vcpus, i))->file);
+ }
+
+ g_hash_table_unref(bbs);
+ g_free(filename);
+ qemu_plugin_scoreboard_free(vcpus);
+}
+
+static void free_bb(void *data)
+{
+ qemu_plugin_scoreboard_free(((Bb *)data)->count);
+ g_free(data);
+}
+
+static qemu_plugin_u64 count_u64(void)
+{
+ return qemu_plugin_scoreboard_u64_in_struct(vcpus, Vcpu, count);
+}
+
+static qemu_plugin_u64 bb_count_u64(Bb *bb)
+{
+ return qemu_plugin_scoreboard_u64(bb->count);
+}
+
+static void vcpu_init(qemu_plugin_id_t id, unsigned int vcpu_index)
+{
+ g_autofree gchar *vcpu_filename = NULL;
+ Vcpu *vcpu = qemu_plugin_scoreboard_find(vcpus, vcpu_index);
+
+ vcpu_filename = g_strdup_printf("%s.%u.bb", filename, vcpu_index);
+ vcpu->file = fopen(vcpu_filename, "w");
+}
+
+static void vcpu_interval_exec(unsigned int vcpu_index, void *udata)
+{
+ Vcpu *vcpu = qemu_plugin_scoreboard_find(vcpus, vcpu_index);
+ GHashTableIter iter;
+ void *value;
+
+ if (!vcpu->file) {
+ return;
+ }
+
+ vcpu->count -= interval;
+
+ fputc('T', vcpu->file);
+
+ g_rw_lock_reader_lock(&bbs_lock);
+ g_hash_table_iter_init(&iter, bbs);
+
+ while (g_hash_table_iter_next(&iter, NULL, &value)) {
+ Bb *bb = value;
+ uint64_t bb_count = qemu_plugin_u64_get(bb_count_u64(bb), vcpu_index);
+
+ if (!bb_count) {
+ continue;
+ }
+
+ fprintf(vcpu->file, ":%u:%" PRIu64 " ", bb->index, bb_count);
+ qemu_plugin_u64_set(bb_count_u64(bb), vcpu_index, 0);
+ }
+
+ g_rw_lock_reader_unlock(&bbs_lock);
+ fputc('\n', vcpu->file);
+}
+
+static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
+{
+ uint64_t n_insns = qemu_plugin_tb_n_insns(tb);
+ uint64_t vaddr = qemu_plugin_tb_vaddr(tb);
+ Bb *bb;
+
+ g_rw_lock_writer_lock(&bbs_lock);
+ bb = g_hash_table_lookup(bbs, &vaddr);
+ if (!bb) {
+ bb = g_new(Bb, 1);
+ bb->vaddr = vaddr;
+ bb->count = qemu_plugin_scoreboard_new(sizeof(uint64_t));
+ bb->index = g_hash_table_size(bbs) + 1;
+ g_hash_table_replace(bbs, &bb->vaddr, bb);
+ }
+ g_rw_lock_writer_unlock(&bbs_lock);
+
+ qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
+ tb, QEMU_PLUGIN_INLINE_ADD_U64, count_u64(), n_insns);
+
+ qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
+ tb, QEMU_PLUGIN_INLINE_ADD_U64, bb_count_u64(bb), n_insns);
+
+ qemu_plugin_register_vcpu_tb_exec_cond_cb(
+ tb, vcpu_interval_exec, QEMU_PLUGIN_CB_NO_REGS,
+ QEMU_PLUGIN_COND_GE, count_u64(), interval, NULL);
+}
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
+ const qemu_info_t *info,
+ int argc, char **argv)
+{
+ for (int i = 0; i < argc; i++) {
+ char *opt = argv[i];
+ g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
+ if (g_strcmp0(tokens[0], "interval") == 0) {
+ interval = g_ascii_strtoull(tokens[1], NULL, 10);
+ } else if (g_strcmp0(tokens[0], "outfile") == 0) {
+ filename = tokens[1];
+ tokens[1] = NULL;
+ } else {
+ fprintf(stderr, "option parsing failed: %s\n", opt);
+ return -1;
+ }
+ }
+
+ if (!filename) {
+ fputs("outfile unspecified\n", stderr);
+ return -1;
+ }
+
+ bbs = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, free_bb);
+ vcpus = qemu_plugin_scoreboard_new(sizeof(Vcpu));
+ qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
+ qemu_plugin_register_vcpu_init_cb(id, vcpu_init);
+ qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
+
+ return 0;
+}
diff --git a/contrib/plugins/cache.c b/contrib/plugins/cache.c
index c5c8ac7..5650858 100644
--- a/contrib/plugins/cache.c
+++ b/contrib/plugins/cache.c
@@ -208,7 +208,7 @@ static int fifo_get_first_block(Cache *cache, int set)
static void fifo_update_on_miss(Cache *cache, int set, int blk_idx)
{
GQueue *q = cache->sets[set].fifo_queue;
- g_queue_push_head(q, GINT_TO_POINTER(blk_idx));
+ g_queue_push_head(q, (gpointer)(intptr_t) blk_idx);
}
static void fifo_destroy(Cache *cache)
@@ -471,13 +471,8 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
n_insns = qemu_plugin_tb_n_insns(tb);
for (i = 0; i < n_insns; i++) {
struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
- uint64_t effective_addr;
-
- if (sys) {
- effective_addr = (uint64_t) qemu_plugin_insn_haddr(insn);
- } else {
- effective_addr = (uint64_t) qemu_plugin_insn_vaddr(insn);
- }
+ uint64_t effective_addr = sys ? (uintptr_t) qemu_plugin_insn_haddr(insn) :
+ qemu_plugin_insn_vaddr(insn);
/*
* Instructions might get translated multiple times, we do not create
@@ -485,14 +480,13 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
* entry from the hash table and register it for the callback again.
*/
g_mutex_lock(&hashtable_lock);
- data = g_hash_table_lookup(miss_ht, GUINT_TO_POINTER(effective_addr));
+ data = g_hash_table_lookup(miss_ht, &effective_addr);
if (data == NULL) {
data = g_new0(InsnData, 1);
data->disas_str = qemu_plugin_insn_disas(insn);
data->symbol = qemu_plugin_insn_symbol(insn);
data->addr = effective_addr;
- g_hash_table_insert(miss_ht, GUINT_TO_POINTER(effective_addr),
- (gpointer) data);
+ g_hash_table_insert(miss_ht, &data->addr, data);
}
g_mutex_unlock(&hashtable_lock);
@@ -558,7 +552,7 @@ static void append_stats_line(GString *line,
" %-12" PRIu64 " %-11" PRIu64 " %10.4lf%%",
l2_access,
l2_misses,
- l2_access ? l2_miss_rate : 0.0);
+ l2_miss_rate);
}
g_string_append(line, "\n");
@@ -582,7 +576,7 @@ static void sum_stats(void)
}
}
-static int dcmp(gconstpointer a, gconstpointer b)
+static int dcmp(gconstpointer a, gconstpointer b, gpointer d)
{
InsnData *insn_a = (InsnData *) a;
InsnData *insn_b = (InsnData *) b;
@@ -590,7 +584,7 @@ static int dcmp(gconstpointer a, gconstpointer b)
return insn_a->l1_dmisses < insn_b->l1_dmisses ? 1 : -1;
}
-static int icmp(gconstpointer a, gconstpointer b)
+static int icmp(gconstpointer a, gconstpointer b, gpointer d)
{
InsnData *insn_a = (InsnData *) a;
InsnData *insn_b = (InsnData *) b;
@@ -598,7 +592,7 @@ static int icmp(gconstpointer a, gconstpointer b)
return insn_a->l1_imisses < insn_b->l1_imisses ? 1 : -1;
}
-static int l2_cmp(gconstpointer a, gconstpointer b)
+static int l2_cmp(gconstpointer a, gconstpointer b, gpointer d)
{
InsnData *insn_a = (InsnData *) a;
InsnData *insn_b = (InsnData *) b;
@@ -609,7 +603,7 @@ static int l2_cmp(gconstpointer a, gconstpointer b)
static void log_stats(void)
{
int i;
- Cache *icache, *dcache, *l2_cache;
+ Cache *icache, *dcache, *l2_cache = NULL;
g_autoptr(GString) rep = g_string_new("core #, data accesses, data misses,"
" dmiss rate, insn accesses,"
@@ -651,7 +645,7 @@ static void log_top_insns(void)
InsnData *insn;
miss_insns = g_hash_table_get_values(miss_ht);
- miss_insns = g_list_sort(miss_insns, dcmp);
+ miss_insns = g_list_sort_with_data(miss_insns, dcmp, NULL);
g_autoptr(GString) rep = g_string_new("");
g_string_append_printf(rep, "%s", "address, data misses, instruction\n");
@@ -665,7 +659,7 @@ static void log_top_insns(void)
insn->l1_dmisses, insn->disas_str);
}
- miss_insns = g_list_sort(miss_insns, icmp);
+ miss_insns = g_list_sort_with_data(miss_insns, icmp, NULL);
g_string_append_printf(rep, "%s", "\naddress, fetch misses, instruction\n");
for (curr = miss_insns, i = 0; curr && i < limit; i++, curr = curr->next) {
@@ -682,7 +676,7 @@ static void log_top_insns(void)
goto finish;
}
- miss_insns = g_list_sort(miss_insns, l2_cmp);
+ miss_insns = g_list_sort_with_data(miss_insns, l2_cmp, NULL);
g_string_append_printf(rep, "%s", "\naddress, L2 misses, instruction\n");
for (curr = miss_insns, i = 0; curr && i < limit; i++, curr = curr->next) {
@@ -853,7 +847,7 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
- miss_ht = g_hash_table_new_full(NULL, g_direct_equal, NULL, insn_free);
+ miss_ht = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, insn_free);
return 0;
}
diff --git a/contrib/plugins/cflow.c b/contrib/plugins/cflow.c
new file mode 100644
index 0000000..b5e33f2
--- /dev/null
+++ b/contrib/plugins/cflow.c
@@ -0,0 +1,393 @@
+/*
+ * Control Flow plugin
+ *
+ * This plugin will track changes to control flow and detect where
+ * instructions fault.
+ *
+ * Copyright (c) 2024 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <glib.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <qemu-plugin.h>
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+
+typedef enum {
+ SORT_HOTTEST, /* hottest branch insn */
+ SORT_EXCEPTION, /* most early exits */
+ SORT_POPDEST, /* most destinations (usually ret's) */
+} ReportType;
+
+ReportType report = SORT_HOTTEST;
+int topn = 10;
+
+typedef struct {
+ uint64_t daddr;
+ uint64_t dcount;
+} DestData;
+
+/* A node is an address where we can go to multiple places */
+typedef struct {
+ GMutex lock;
+ /* address of the branch point */
+ uint64_t addr;
+ /* array of DestData */
+ GArray *dests;
+ /* early exit/fault count */
+ uint64_t early_exit;
+ /* jump destination count */
+ uint64_t dest_count;
+ /* instruction data */
+ char *insn_disas;
+ /* symbol? */
+ const char *symbol;
+ /* times translated as last in block? */
+ int last_count;
+ /* times translated in the middle of block? */
+ int mid_count;
+} NodeData;
+
+typedef enum {
+ /* last insn in block, expected flow control */
+ LAST_INSN = (1 << 0),
+ /* mid-block insn, can only be an exception */
+ EXCP_INSN = (1 << 1),
+ /* multiple disassembly, may have changed */
+ MULT_INSN = (1 << 2),
+} InsnTypes;
+
+typedef struct {
+ /* address of the branch point */
+ uint64_t addr;
+ /* disassembly */
+ char *insn_disas;
+ /* symbol? */
+ const char *symbol;
+ /* types */
+ InsnTypes type_flag;
+} InsnData;
+
+/* We use this to track the current execution state */
+typedef struct {
+ /* address of current translated block */
+ uint64_t tb_pc;
+ /* address of end of block */
+ uint64_t end_block;
+ /* next pc after end of block */
+ uint64_t pc_after_block;
+ /* address of last executed PC */
+ uint64_t last_pc;
+} VCPUScoreBoard;
+
+/* descriptors for accessing the above scoreboard */
+static qemu_plugin_u64 tb_pc;
+static qemu_plugin_u64 end_block;
+static qemu_plugin_u64 pc_after_block;
+static qemu_plugin_u64 last_pc;
+
+
+static GMutex node_lock;
+static GHashTable *nodes;
+struct qemu_plugin_scoreboard *state;
+
+/* SORT_HOTTEST */
+static gint hottest(gconstpointer a, gconstpointer b, gpointer d)
+{
+ NodeData *na = (NodeData *) a;
+ NodeData *nb = (NodeData *) b;
+
+ return na->dest_count > nb->dest_count ? -1 :
+ na->dest_count == nb->dest_count ? 0 : 1;
+}
+
+static gint exception(gconstpointer a, gconstpointer b, gpointer d)
+{
+ NodeData *na = (NodeData *) a;
+ NodeData *nb = (NodeData *) b;
+
+ return na->early_exit > nb->early_exit ? -1 :
+ na->early_exit == nb->early_exit ? 0 : 1;
+}
+
+static gint popular(gconstpointer a, gconstpointer b, gpointer d)
+{
+ NodeData *na = (NodeData *) a;
+ NodeData *nb = (NodeData *) b;
+
+ return na->dests->len > nb->dests->len ? -1 :
+ na->dests->len == nb->dests->len ? 0 : 1;
+}
+
+/* Filter out non-branches - returns true to remove entry */
+static gboolean filter_non_branches(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ NodeData *node = (NodeData *) value;
+
+ return node->dest_count == 0;
+}
+
+static void plugin_exit(qemu_plugin_id_t id, void *p)
+{
+ g_autoptr(GString) result = g_string_new("collected ");
+ GList *data;
+ GCompareDataFunc sort = &hottest;
+ int i = 0;
+
+ g_mutex_lock(&node_lock);
+ g_string_append_printf(result, "%d control flow nodes in the hash table\n",
+ g_hash_table_size(nodes));
+
+ /* remove all nodes that didn't branch */
+ g_hash_table_foreach_remove(nodes, filter_non_branches, NULL);
+
+ data = g_hash_table_get_values(nodes);
+
+ switch (report) {
+ case SORT_HOTTEST:
+ sort = &hottest;
+ break;
+ case SORT_EXCEPTION:
+ sort = &exception;
+ break;
+ case SORT_POPDEST:
+ sort = &popular;
+ break;
+ }
+
+ data = g_list_sort_with_data(data, sort, NULL);
+
+ for (GList *l = data;
+ l != NULL && i < topn;
+ l = l->next, i++) {
+ NodeData *n = l->data;
+ const char *type = n->mid_count ? "sync fault" : "branch";
+ g_string_append_printf(result, " addr: 0x%"PRIx64 " %s: %s (%s)\n",
+ n->addr, n->symbol, n->insn_disas, type);
+ if (n->early_exit) {
+ g_string_append_printf(result, " early exits %"PRId64"\n",
+ n->early_exit);
+ }
+ g_string_append_printf(result, " branches %"PRId64"\n",
+ n->dest_count);
+ for (int j = 0; j < n->dests->len; j++) {
+ DestData *dd = &g_array_index(n->dests, DestData, j);
+ g_string_append_printf(result, " to 0x%"PRIx64" (%"PRId64")\n",
+ dd->daddr, dd->dcount);
+ }
+ }
+
+ qemu_plugin_outs(result->str);
+
+ g_mutex_unlock(&node_lock);
+}
+
+static void plugin_init(void)
+{
+ g_mutex_init(&node_lock);
+ nodes = g_hash_table_new(g_int64_hash, g_int64_equal);
+ state = qemu_plugin_scoreboard_new(sizeof(VCPUScoreBoard));
+
+ /* score board declarations */
+ tb_pc = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, tb_pc);
+ end_block = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard,
+ end_block);
+ pc_after_block = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard,
+ pc_after_block);
+ last_pc = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard,
+ last_pc);
+}
+
+static NodeData *create_node(uint64_t addr)
+{
+ NodeData *node = g_new0(NodeData, 1);
+ g_mutex_init(&node->lock);
+ node->addr = addr;
+ node->dests = g_array_new(true, true, sizeof(DestData));
+ return node;
+}
+
+static NodeData *fetch_node(uint64_t addr, bool create_if_not_found)
+{
+ NodeData *node = NULL;
+
+ g_mutex_lock(&node_lock);
+ node = (NodeData *) g_hash_table_lookup(nodes, &addr);
+ if (!node && create_if_not_found) {
+ node = create_node(addr);
+ g_hash_table_insert(nodes, &node->addr, node);
+ }
+ g_mutex_unlock(&node_lock);
+ return node;
+}
+
+/*
+ * Called when we detect a non-linear execution (pc !=
+ * pc_after_block). This could be due to a fault causing some sort of
+ * exit exception (if last_pc != block_end) or just a taken branch.
+ */
+static void vcpu_tb_branched_exec(unsigned int cpu_index, void *udata)
+{
+ uint64_t lpc = qemu_plugin_u64_get(last_pc, cpu_index);
+ uint64_t ebpc = qemu_plugin_u64_get(end_block, cpu_index);
+ uint64_t npc = qemu_plugin_u64_get(pc_after_block, cpu_index);
+ uint64_t pc = qemu_plugin_u64_get(tb_pc, cpu_index);
+
+ /* return early for address 0 */
+ if (!lpc) {
+ return;
+ }
+
+ NodeData *node = fetch_node(lpc, true);
+ DestData *data = NULL;
+ bool early_exit = (lpc != ebpc);
+ GArray *dests;
+
+ /* the condition should never hit */
+ g_assert(pc != npc);
+
+ g_mutex_lock(&node->lock);
+
+ if (early_exit) {
+ fprintf(stderr, "%s: pc=%"PRIx64", epbc=%"PRIx64
+ " npc=%"PRIx64", lpc=%"PRIx64"\n",
+ __func__, pc, ebpc, npc, lpc);
+ node->early_exit++;
+ if (!node->mid_count) {
+ /* count now as we've only just allocated */
+ node->mid_count++;
+ }
+ }
+
+ dests = node->dests;
+ for (int i = 0; i < dests->len; i++) {
+ if (g_array_index(dests, DestData, i).daddr == pc) {
+ data = &g_array_index(dests, DestData, i);
+ }
+ }
+
+ /* we've never seen this before, allocate a new entry */
+ if (!data) {
+ DestData new_entry = { .daddr = pc };
+ g_array_append_val(dests, new_entry);
+ data = &g_array_index(dests, DestData, dests->len - 1);
+ g_assert(data->daddr == pc);
+ }
+
+ data->dcount++;
+ node->dest_count++;
+
+ g_mutex_unlock(&node->lock);
+}
+
+/*
+ * At the start of each block we need to resolve two things:
+ *
+ * - is last_pc == block_end, if not we had an early exit
+ * - is start of block last_pc + insn width, if not we jumped
+ *
+ * Once those are dealt with we can instrument the rest of the
+ * instructions for their execution.
+ *
+ */
+static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
+{
+ uint64_t pc = qemu_plugin_tb_vaddr(tb);
+ size_t insns = qemu_plugin_tb_n_insns(tb);
+ struct qemu_plugin_insn *first_insn = qemu_plugin_tb_get_insn(tb, 0);
+ struct qemu_plugin_insn *last_insn = qemu_plugin_tb_get_insn(tb, insns - 1);
+
+ /*
+ * check if we are executing linearly after the last block. We can
+ * handle both early block exits and normal branches in the
+ * callback if we hit it.
+ */
+ qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
+ tb, QEMU_PLUGIN_INLINE_STORE_U64, tb_pc, pc);
+ qemu_plugin_register_vcpu_tb_exec_cond_cb(
+ tb, vcpu_tb_branched_exec, QEMU_PLUGIN_CB_NO_REGS,
+ QEMU_PLUGIN_COND_NE, pc_after_block, pc, NULL);
+
+ /*
+ * Now we can set start/end for this block so the next block can
+ * check where we are at. Do this on the first instruction and not
+ * the TB so we don't get mixed up with above.
+ */
+ qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(first_insn,
+ QEMU_PLUGIN_INLINE_STORE_U64,
+ end_block, qemu_plugin_insn_vaddr(last_insn));
+ qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(first_insn,
+ QEMU_PLUGIN_INLINE_STORE_U64,
+ pc_after_block,
+ qemu_plugin_insn_vaddr(last_insn) +
+ qemu_plugin_insn_size(last_insn));
+
+ for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) {
+ struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx);
+ uint64_t ipc = qemu_plugin_insn_vaddr(insn);
+ /*
+ * If this is a potential branch point check if we could grab
+ * the disassembly for it. If it is the last instruction
+ * always create an entry.
+ */
+ NodeData *node = fetch_node(ipc, last_insn);
+ if (node) {
+ g_mutex_lock(&node->lock);
+ if (!node->insn_disas) {
+ node->insn_disas = qemu_plugin_insn_disas(insn);
+ }
+ if (!node->symbol) {
+ node->symbol = qemu_plugin_insn_symbol(insn);
+ }
+ if (last_insn == insn) {
+ node->last_count++;
+ } else {
+ node->mid_count++;
+ }
+ g_mutex_unlock(&node->lock);
+ }
+
+ /* Store the PC of what we are about to execute */
+ qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(insn,
+ QEMU_PLUGIN_INLINE_STORE_U64,
+ last_pc, ipc);
+ }
+}
+
+QEMU_PLUGIN_EXPORT
+int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
+ int argc, char **argv)
+{
+ for (int i = 0; i < argc; i++) {
+ char *opt = argv[i];
+ g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
+ if (g_strcmp0(tokens[0], "sort") == 0) {
+ if (g_strcmp0(tokens[1], "hottest") == 0) {
+ report = SORT_HOTTEST;
+ } else if (g_strcmp0(tokens[1], "early") == 0) {
+ report = SORT_EXCEPTION;
+ } else if (g_strcmp0(tokens[1], "exceptions") == 0) {
+ report = SORT_POPDEST;
+ } else {
+ fprintf(stderr, "failed to parse: %s\n", tokens[1]);
+ return -1;
+ }
+ } else {
+ fprintf(stderr, "option parsing failed: %s\n", opt);
+ return -1;
+ }
+ }
+
+ plugin_init();
+
+ qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
+ qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
+ return 0;
+}
diff --git a/contrib/plugins/execlog.c b/contrib/plugins/execlog.c
index 371db97..d67d010 100644
--- a/contrib/plugins/execlog.c
+++ b/contrib/plugins/execlog.c
@@ -101,7 +101,7 @@ static void insn_check_regs(CPU *cpu)
GByteArray *temp = reg->last;
g_string_append_printf(cpu->last_exec, ", %s -> 0x", reg->name);
/* TODO: handle BE properly */
- for (int i = sz; i >= 0; i--) {
+ for (int i = sz - 1; i >= 0; i--) {
g_string_append_printf(cpu->last_exec, "%02x",
reg->new->data[i]);
}
@@ -181,8 +181,8 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
bool check_regs_this = rmatches;
bool check_regs_next = false;
- size_t n = qemu_plugin_tb_n_insns(tb);
- for (size_t i = 0; i < n; i++) {
+ size_t n_insns = qemu_plugin_tb_n_insns(tb);
+ for (size_t i = 0; i < n_insns; i++) {
char *insn_disas;
uint64_t insn_vaddr;
diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c
index 02bc507..98404b6 100644
--- a/contrib/plugins/hotblocks.c
+++ b/contrib/plugins/hotblocks.c
@@ -29,7 +29,7 @@ static guint64 limit = 20;
*
* The internals of the TCG are not exposed to plugins so we can only
* get the starting PC for each block. We cheat this slightly by
- * xor'ing the number of instructions to the hash to help
+ * checking the number of instructions as well to help
* differentiate.
*/
typedef struct {
@@ -39,7 +39,7 @@ typedef struct {
unsigned long insns;
} ExecCount;
-static gint cmp_exec_count(gconstpointer a, gconstpointer b)
+static gint cmp_exec_count(gconstpointer a, gconstpointer b, gpointer d)
{
ExecCount *ea = (ExecCount *) a;
ExecCount *eb = (ExecCount *) b;
@@ -50,6 +50,20 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b)
return count_a > count_b ? -1 : 1;
}
+static guint exec_count_hash(gconstpointer v)
+{
+ const ExecCount *e = v;
+ return e->start_addr ^ e->insns;
+}
+
+static gboolean exec_count_equal(gconstpointer v1, gconstpointer v2)
+{
+ const ExecCount *ea = v1;
+ const ExecCount *eb = v2;
+ return (ea->start_addr == eb->start_addr) &&
+ (ea->insns == eb->insns);
+}
+
static void exec_count_free(gpointer key, gpointer value, gpointer user_data)
{
ExecCount *cnt = value;
@@ -65,7 +79,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
g_string_append_printf(report, "%d entries in the hash table\n",
g_hash_table_size(hotblocks));
counts = g_hash_table_get_values(hotblocks);
- it = g_list_sort(counts, cmp_exec_count);
+ it = g_list_sort_with_data(counts, cmp_exec_count, NULL);
if (it) {
g_string_append_printf(report, "pc, tcount, icount, ecount\n");
@@ -91,7 +105,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
static void plugin_init(void)
{
- hotblocks = g_hash_table_new(NULL, g_direct_equal);
+ hotblocks = g_hash_table_new(exec_count_hash, exec_count_equal);
}
static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
@@ -111,10 +125,15 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
ExecCount *cnt;
uint64_t pc = qemu_plugin_tb_vaddr(tb);
size_t insns = qemu_plugin_tb_n_insns(tb);
- uint64_t hash = pc ^ insns;
g_mutex_lock(&lock);
- cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash);
+ {
+ ExecCount e;
+ e.start_addr = pc;
+ e.insns = insns;
+ cnt = (ExecCount *) g_hash_table_lookup(hotblocks, &e);
+ }
+
if (cnt) {
cnt->trans_count++;
} else {
@@ -123,7 +142,7 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
cnt->trans_count = 1;
cnt->insns = insns;
cnt->exec_count = qemu_plugin_scoreboard_new(sizeof(uint64_t));
- g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt);
+ g_hash_table_insert(hotblocks, cnt, cnt);
}
g_mutex_unlock(&lock);
diff --git a/contrib/plugins/hotpages.c b/contrib/plugins/hotpages.c
index 8316ae5..9d48ac9 100644
--- a/contrib/plugins/hotpages.c
+++ b/contrib/plugins/hotpages.c
@@ -48,7 +48,7 @@ typedef struct {
static GMutex lock;
static GHashTable *pages;
-static gint cmp_access_count(gconstpointer a, gconstpointer b)
+static gint cmp_access_count(gconstpointer a, gconstpointer b, gpointer d)
{
PageCounters *ea = (PageCounters *) a;
PageCounters *eb = (PageCounters *) b;
@@ -83,7 +83,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
if (counts && g_list_next(counts)) {
GList *it;
- it = g_list_sort(counts, cmp_access_count);
+ it = g_list_sort_with_data(counts, cmp_access_count, NULL);
for (i = 0; i < limit && it->next; i++, it = it->next) {
PageCounters *rec = (PageCounters *) it->data;
@@ -103,7 +103,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
static void plugin_init(void)
{
page_mask = (page_size - 1);
- pages = g_hash_table_new(NULL, g_direct_equal);
+ pages = g_hash_table_new(g_int64_hash, g_int64_equal);
}
static void vcpu_haddr(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
@@ -130,12 +130,12 @@ static void vcpu_haddr(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
page &= ~page_mask;
g_mutex_lock(&lock);
- count = (PageCounters *) g_hash_table_lookup(pages, GUINT_TO_POINTER(page));
+ count = (PageCounters *) g_hash_table_lookup(pages, &page);
if (!count) {
count = g_new0(PageCounters, 1);
count->page_address = page;
- g_hash_table_insert(pages, GUINT_TO_POINTER(page), (gpointer) count);
+ g_hash_table_insert(pages, &count->page_address, count);
}
if (qemu_plugin_mem_is_store(meminfo)) {
count->writes++;
diff --git a/contrib/plugins/howvec.c b/contrib/plugins/howvec.c
index 9be67f7..42bddb6 100644
--- a/contrib/plugins/howvec.c
+++ b/contrib/plugins/howvec.c
@@ -155,7 +155,7 @@ static ClassSelector class_tables[] = {
static InsnClassExecCount *class_table;
static int class_table_sz;
-static gint cmp_exec_count(gconstpointer a, gconstpointer b)
+static gint cmp_exec_count(gconstpointer a, gconstpointer b, gpointer d)
{
InsnExecCount *ea = (InsnExecCount *) a;
InsnExecCount *eb = (InsnExecCount *) b;
@@ -208,7 +208,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
counts = g_hash_table_get_values(insns);
if (counts && g_list_next(counts)) {
g_string_append_printf(report, "Individual Instructions:\n");
- counts = g_list_sort(counts, cmp_exec_count);
+ counts = g_list_sort_with_data(counts, cmp_exec_count, NULL);
for (i = 0; i < limit && g_list_next(counts);
i++, counts = g_list_next(counts)) {
@@ -253,6 +253,8 @@ static struct qemu_plugin_scoreboard *find_counter(
int i;
uint64_t *cnt = NULL;
uint32_t opcode = 0;
+ /* if opcode is greater than 32 bits, we should refactor insn hash table. */
+ G_STATIC_ASSERT(sizeof(opcode) == sizeof(uint32_t));
InsnClassExecCount *class = NULL;
/*
@@ -284,7 +286,7 @@ static struct qemu_plugin_scoreboard *find_counter(
g_mutex_lock(&lock);
icount = (InsnExecCount *) g_hash_table_lookup(insns,
- GUINT_TO_POINTER(opcode));
+ (gpointer)(intptr_t) opcode);
if (!icount) {
icount = g_new0(InsnExecCount, 1);
@@ -295,8 +297,7 @@ static struct qemu_plugin_scoreboard *find_counter(
qemu_plugin_scoreboard_new(sizeof(uint64_t));
icount->count = qemu_plugin_scoreboard_u64(score);
- g_hash_table_insert(insns, GUINT_TO_POINTER(opcode),
- (gpointer) icount);
+ g_hash_table_insert(insns, (gpointer)(intptr_t) opcode, icount);
}
g_mutex_unlock(&lock);
diff --git a/contrib/plugins/hwprofile.c b/contrib/plugins/hwprofile.c
index 739ac0c..a9838cc 100644
--- a/contrib/plugins/hwprofile.c
+++ b/contrib/plugins/hwprofile.c
@@ -43,6 +43,8 @@ typedef struct {
static GMutex lock;
static GHashTable *devices;
+static struct qemu_plugin_scoreboard *source_pc_scoreboard;
+static qemu_plugin_u64 source_pc;
/* track the access pattern to a piece of HW */
static bool pattern;
@@ -69,7 +71,7 @@ static void plugin_init(void)
devices = g_hash_table_new(NULL, NULL);
}
-static gint sort_cmp(gconstpointer a, gconstpointer b)
+static gint sort_cmp(gconstpointer a, gconstpointer b, gpointer d)
{
DeviceCounts *ea = (DeviceCounts *) a;
DeviceCounts *eb = (DeviceCounts *) b;
@@ -77,7 +79,7 @@ static gint sort_cmp(gconstpointer a, gconstpointer b)
eb->totals.reads + eb->totals.writes ? -1 : 1;
}
-static gint sort_loc(gconstpointer a, gconstpointer b)
+static gint sort_loc(gconstpointer a, gconstpointer b, gpointer d)
{
IOLocationCounts *ea = (IOLocationCounts *) a;
IOLocationCounts *eb = (IOLocationCounts *) b;
@@ -124,13 +126,13 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
if (counts && g_list_next(counts)) {
GList *it;
- it = g_list_sort(counts, sort_cmp);
+ it = g_list_sort_with_data(counts, sort_cmp, NULL);
while (it) {
DeviceCounts *rec = (DeviceCounts *) it->data;
if (rec->detail) {
GList *accesses = g_hash_table_get_values(rec->detail);
- GList *io_it = g_list_sort(accesses, sort_loc);
+ GList *io_it = g_list_sort_with_data(accesses, sort_loc, NULL);
const char *prefix = pattern ? "off" : "pc";
g_string_append_printf(report, "%s @ 0x%"PRIx64"\n",
rec->name, rec->base);
@@ -159,7 +161,7 @@ static DeviceCounts *new_count(const char *name, uint64_t base)
count->name = name;
count->base = base;
if (pattern || source) {
- count->detail = g_hash_table_new(NULL, NULL);
+ count->detail = g_hash_table_new(g_int64_hash, g_int64_equal);
}
g_hash_table_insert(devices, (gpointer) name, count);
return count;
@@ -169,7 +171,7 @@ static IOLocationCounts *new_location(GHashTable *table, uint64_t off_or_pc)
{
IOLocationCounts *loc = g_new0(IOLocationCounts, 1);
loc->off_or_pc = off_or_pc;
- g_hash_table_insert(table, (gpointer) off_or_pc, loc);
+ g_hash_table_insert(table, &loc->off_or_pc, loc);
return loc;
}
@@ -224,12 +226,12 @@ static void vcpu_haddr(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
/* either track offsets or source of access */
if (source) {
- off = (uint64_t) udata;
+ off = qemu_plugin_u64_get(source_pc, cpu_index);
}
if (pattern || source) {
IOLocationCounts *io_count = g_hash_table_lookup(counts->detail,
- (gpointer) off);
+ &off);
if (!io_count) {
io_count = new_location(counts->detail, off);
}
@@ -247,10 +249,14 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
for (i = 0; i < n; i++) {
struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
- gpointer udata = (gpointer) (source ? qemu_plugin_insn_vaddr(insn) : 0);
+ if (source) {
+ uint64_t pc = qemu_plugin_insn_vaddr(insn);
+ qemu_plugin_register_vcpu_mem_inline_per_vcpu(
+ insn, rw, QEMU_PLUGIN_INLINE_STORE_U64,
+ source_pc, pc);
+ }
qemu_plugin_register_vcpu_mem_cb(insn, vcpu_haddr,
- QEMU_PLUGIN_CB_NO_REGS,
- rw, udata);
+ QEMU_PLUGIN_CB_NO_REGS, rw, NULL);
}
}
@@ -306,10 +312,9 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
return -1;
}
- /* Just warn about overflow */
- if (info->system.smp_vcpus > 64 ||
- info->system.max_vcpus > 64) {
- fprintf(stderr, "hwprofile: can only track up to 64 CPUs\n");
+ if (source) {
+ source_pc_scoreboard = qemu_plugin_scoreboard_new(sizeof(uint64_t));
+ source_pc = qemu_plugin_scoreboard_u64(source_pc_scoreboard);
}
plugin_init();
diff --git a/contrib/plugins/ips.c b/contrib/plugins/ips.c
index 29fa556..f110c56 100644
--- a/contrib/plugins/ips.c
+++ b/contrib/plugins/ips.c
@@ -129,20 +129,62 @@ static void plugin_exit(qemu_plugin_id_t id, void *udata)
qemu_plugin_scoreboard_free(vcpus);
}
+typedef struct {
+ const char *suffix;
+ unsigned long multipler;
+} ScaleEntry;
+
+/* a bit like units.h but not binary */
+static const ScaleEntry scales[] = {
+ { "k", 1000 },
+ { "m", 1000 * 1000 },
+ { "g", 1000 * 1000 * 1000 },
+};
+
QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
const qemu_info_t *info, int argc,
char **argv)
{
+ bool ipq_set = false;
+
for (int i = 0; i < argc; i++) {
char *opt = argv[i];
g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
if (g_strcmp0(tokens[0], "ips") == 0) {
- max_insn_per_second = g_ascii_strtoull(tokens[1], NULL, 10);
+ char *endptr = NULL;
+ max_insn_per_second = g_ascii_strtoull(tokens[1], &endptr, 10);
if (!max_insn_per_second && errno) {
fprintf(stderr, "%s: couldn't parse %s (%s)\n",
__func__, tokens[1], g_strerror(errno));
return -1;
}
+
+ if (endptr && *endptr != 0) {
+ g_autofree gchar *lower = g_utf8_strdown(endptr, -1);
+ unsigned long scale = 0;
+
+ for (int j = 0; j < G_N_ELEMENTS(scales); j++) {
+ if (g_strcmp0(lower, scales[j].suffix) == 0) {
+ scale = scales[j].multipler;
+ break;
+ }
+ }
+
+ if (scale) {
+ max_insn_per_second *= scale;
+ } else {
+ fprintf(stderr, "bad suffix: %s\n", endptr);
+ return -1;
+ }
+ }
+ } else if (g_strcmp0(tokens[0], "ipq") == 0) {
+ max_insn_per_quantum = g_ascii_strtoull(tokens[1], NULL, 10);
+
+ if (!max_insn_per_quantum) {
+ fprintf(stderr, "bad ipq value: %s\n", tokens[0]);
+ return -1;
+ }
+ ipq_set = true;
} else {
fprintf(stderr, "option parsing failed: %s\n", opt);
return -1;
@@ -150,7 +192,16 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
}
vcpus = qemu_plugin_scoreboard_new(sizeof(vCPUTime));
- max_insn_per_quantum = max_insn_per_second / NUM_TIME_UPDATE_PER_SEC;
+
+ if (!ipq_set) {
+ max_insn_per_quantum = max_insn_per_second / NUM_TIME_UPDATE_PER_SEC;
+ }
+
+ if (max_insn_per_quantum == 0) {
+ fprintf(stderr, "minimum of %d instructions per second needed\n",
+ NUM_TIME_UPDATE_PER_SEC);
+ return -1;
+ }
time_handle = qemu_plugin_request_time_control();
g_assert(time_handle);
diff --git a/contrib/plugins/lockstep.c b/contrib/plugins/lockstep.c
index 6a7e9bb..62981d4 100644
--- a/contrib/plugins/lockstep.c
+++ b/contrib/plugins/lockstep.c
@@ -101,6 +101,31 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
plugin_cleanup(id);
}
+/*
+ * g_memdup has been deprecated in Glib since 2.68 and
+ * will complain about it if you try to use it. However until
+ * glib_req_ver for QEMU is bumped we make a copy of the glib-compat
+ * handler.
+ */
+static inline gpointer g_memdup2_qemu(gconstpointer mem, gsize byte_size)
+{
+#if GLIB_CHECK_VERSION(2, 68, 0)
+ return g_memdup2(mem, byte_size);
+#else
+ gpointer new_mem;
+
+ if (mem && byte_size != 0) {
+ new_mem = g_malloc(byte_size);
+ memcpy(new_mem, mem, byte_size);
+ } else {
+ new_mem = NULL;
+ }
+
+ return new_mem;
+#endif
+}
+#define g_memdup2(m, s) g_memdup2_qemu(m, s)
+
static void report_divergance(ExecState *us, ExecState *them)
{
DivergeState divrec = { log, 0 };
diff --git a/contrib/plugins/meson.build b/contrib/plugins/meson.build
new file mode 100644
index 0000000..1876bc7
--- /dev/null
+++ b/contrib/plugins/meson.build
@@ -0,0 +1,30 @@
+contrib_plugins = ['bbv', 'cache', 'cflow', 'drcov', 'execlog', 'hotblocks',
+ 'hotpages', 'howvec', 'hwprofile', 'ips', 'stoptrigger']
+if host_os != 'windows'
+ # lockstep uses socket.h
+ contrib_plugins += 'lockstep'
+endif
+
+t = []
+if get_option('plugins')
+ foreach i : contrib_plugins
+ if host_os == 'windows'
+ t += shared_module(i, files(i + '.c') + 'win32_linker.c',
+ include_directories: '../../include/qemu',
+ link_depends: [win32_qemu_plugin_api_lib],
+ link_args: win32_qemu_plugin_api_link_flags,
+ dependencies: glib)
+ else
+ t += shared_module(i, files(i + '.c'),
+ include_directories: '../../include/qemu',
+ dependencies: glib)
+ endif
+ endforeach
+endif
+if t.length() > 0
+ alias_target('contrib-plugins', t)
+else
+ run_target('contrib-plugins', command: [python, '-c', ''])
+endif
+
+plugin_modules += t
diff --git a/contrib/plugins/stoptrigger.c b/contrib/plugins/stoptrigger.c
new file mode 100644
index 0000000..b3a6ed6
--- /dev/null
+++ b/contrib/plugins/stoptrigger.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2024, Simon Hamelin <simon.hamelin@grenoble-inp.org>
+ *
+ * Stop execution once a given address is reached or if the
+ * count of executed instructions reached a specified limit
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <assert.h>
+#include <glib.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <qemu-plugin.h>
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+
+/* Scoreboard to track executed instructions count */
+typedef struct {
+ uint64_t insn_count;
+ uint64_t current_pc;
+} InstructionsCount;
+static struct qemu_plugin_scoreboard *insn_count_sb;
+static qemu_plugin_u64 insn_count;
+static qemu_plugin_u64 current_pc;
+
+static uint64_t icount;
+static int icount_exit_code;
+
+static bool exit_on_icount;
+static bool exit_on_address;
+
+/* Map trigger addresses to exit code */
+static GHashTable *addrs_ht;
+
+typedef struct {
+ uint64_t exit_addr;
+ int exit_code;
+} ExitInfo;
+
+static void exit_emulation(int return_code, char *message)
+{
+ qemu_plugin_outs(message);
+ g_free(message);
+ exit(return_code);
+}
+
+static void exit_icount_reached(unsigned int cpu_index, void *udata)
+{
+ uint64_t insn_vaddr = qemu_plugin_u64_get(current_pc, cpu_index);
+ char *msg = g_strdup_printf("icount reached at 0x%" PRIx64 ", exiting\n",
+ insn_vaddr);
+ exit_emulation(icount_exit_code, msg);
+}
+
+static void exit_address_reached(unsigned int cpu_index, void *udata)
+{
+ ExitInfo *ei = udata;
+ g_assert(ei);
+ char *msg = g_strdup_printf("0x%" PRIx64 " reached, exiting\n", ei->exit_addr);
+ exit_emulation(ei->exit_code, msg);
+}
+
+static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
+{
+ size_t tb_n = qemu_plugin_tb_n_insns(tb);
+ for (size_t i = 0; i < tb_n; i++) {
+ struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
+ uint64_t insn_vaddr = qemu_plugin_insn_vaddr(insn);
+
+ if (exit_on_icount) {
+ /* Increment and check scoreboard for each instruction */
+ qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
+ insn, QEMU_PLUGIN_INLINE_ADD_U64, insn_count, 1);
+ qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
+ insn, QEMU_PLUGIN_INLINE_STORE_U64, current_pc, insn_vaddr);
+ qemu_plugin_register_vcpu_insn_exec_cond_cb(
+ insn, exit_icount_reached, QEMU_PLUGIN_CB_NO_REGS,
+ QEMU_PLUGIN_COND_EQ, insn_count, icount + 1, NULL);
+ }
+
+ if (exit_on_address) {
+ ExitInfo *ei = g_hash_table_lookup(addrs_ht, &insn_vaddr);
+ if (ei) {
+ /* Exit triggered by address */
+ qemu_plugin_register_vcpu_insn_exec_cb(
+ insn, exit_address_reached, QEMU_PLUGIN_CB_NO_REGS, ei);
+ }
+ }
+ }
+}
+
+static void plugin_exit(qemu_plugin_id_t id, void *p)
+{
+ g_hash_table_destroy(addrs_ht);
+ qemu_plugin_scoreboard_free(insn_count_sb);
+}
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
+ const qemu_info_t *info, int argc,
+ char **argv)
+{
+ addrs_ht = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, g_free);
+
+ insn_count_sb = qemu_plugin_scoreboard_new(sizeof(InstructionsCount));
+ insn_count = qemu_plugin_scoreboard_u64_in_struct(
+ insn_count_sb, InstructionsCount, insn_count);
+ current_pc = qemu_plugin_scoreboard_u64_in_struct(
+ insn_count_sb, InstructionsCount, current_pc);
+
+ for (int i = 0; i < argc; i++) {
+ char *opt = argv[i];
+ g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
+ if (g_strcmp0(tokens[0], "icount") == 0) {
+ g_auto(GStrv) icount_tokens = g_strsplit(tokens[1], ":", 2);
+ icount = g_ascii_strtoull(icount_tokens[0], NULL, 0);
+ if (icount < 1 || g_strrstr(icount_tokens[0], "-") != NULL) {
+ fprintf(stderr,
+ "icount parsing failed: '%s' must be a positive "
+ "integer\n",
+ icount_tokens[0]);
+ return -1;
+ }
+ if (icount_tokens[1]) {
+ icount_exit_code = g_ascii_strtoull(icount_tokens[1], NULL, 0);
+ }
+ exit_on_icount = true;
+ } else if (g_strcmp0(tokens[0], "addr") == 0) {
+ g_auto(GStrv) addr_tokens = g_strsplit(tokens[1], ":", 2);
+ ExitInfo *ei = g_malloc(sizeof(ExitInfo));
+ ei->exit_addr = g_ascii_strtoull(addr_tokens[0], NULL, 0);
+ ei->exit_code = 0;
+ if (addr_tokens[1]) {
+ ei->exit_code = g_ascii_strtoull(addr_tokens[1], NULL, 0);
+ }
+ g_hash_table_insert(addrs_ht, &ei->exit_addr, ei);
+ exit_on_address = true;
+ } else {
+ fprintf(stderr, "option parsing failed: %s\n", opt);
+ return -1;
+ }
+ }
+
+ if (!exit_on_icount && !exit_on_address) {
+ fprintf(stderr, "'icount' or 'addr' argument missing\n");
+ return -1;
+ }
+
+ /* Register translation block and exit callbacks */
+ qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
+ qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
+
+ return 0;
+}
diff --git a/contrib/systemd/qemu-vmsr-helper.service b/contrib/systemd/qemu-vmsr-helper.service
new file mode 100644
index 0000000..8fd397b
--- /dev/null
+++ b/contrib/systemd/qemu-vmsr-helper.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Virtual RAPL MSR Daemon for QEMU
+
+[Service]
+WorkingDirectory=/tmp
+Type=simple
+ExecStart=/usr/bin/qemu-vmsr-helper
+PrivateTmp=yes
+ProtectSystem=strict
+ReadWritePaths=/var/run
+RestrictAddressFamilies=AF_UNIX
+Restart=always
+RestartSec=0
+
+[Install]
diff --git a/contrib/systemd/qemu-vmsr-helper.socket b/contrib/systemd/qemu-vmsr-helper.socket
new file mode 100644
index 0000000..183e830
--- /dev/null
+++ b/contrib/systemd/qemu-vmsr-helper.socket
@@ -0,0 +1,9 @@
+[Unit]
+Description=Virtual RAPL MSR helper for QEMU
+
+[Socket]
+ListenStream=/run/qemu-vmsr-helper.sock
+SocketMode=0600
+
+[Install]
+WantedBy=multi-user.target
diff --git a/contrib/vhost-user-blk/vhost-user-blk.c b/contrib/vhost-user-blk/vhost-user-blk.c
index 9492146..6cc18a1 100644
--- a/contrib/vhost-user-blk/vhost-user-blk.c
+++ b/contrib/vhost-user-blk/vhost-user-blk.c
@@ -196,7 +196,7 @@ vub_discard_write_zeroes(VubReq *req, struct iovec *iov, uint32_t iovcnt,
VubDev *vdev_blk = req->vdev_blk;
desc = buf;
uint64_t range[2] = { le64_to_cpu(desc->sector) << 9,
- le32_to_cpu(desc->num_sectors) << 9 };
+ (uint64_t)le32_to_cpu(desc->num_sectors) << 9 };
if (type == VIRTIO_BLK_T_DISCARD) {
if (ioctl(vdev_blk->blk_fd, BLKDISCARD, range) == 0) {
g_free(buf);
diff --git a/contrib/vmapple/uuid.sh b/contrib/vmapple/uuid.sh
new file mode 100755
index 0000000..f563722
--- /dev/null
+++ b/contrib/vmapple/uuid.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+#
+# Used for converting a guest provisioned using Virtualization.framework
+# for use with the QEMU 'vmapple' aarch64 machine type.
+#
+# Extracts the Machine UUID from Virtualization.framework VM JSON file.
+# (as produced by 'macosvm', passed as command line argument)
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+plutil -extract machineId raw "$1" | base64 -d | plutil -extract ECID raw -
+
diff --git a/cpu-common.c b/cpu-common.c
index 7ae136f..ef5757d 100644
--- a/cpu-common.c
+++ b/cpu-common.c
@@ -21,7 +21,6 @@
#include "qemu/main-loop.h"
#include "exec/cpu-common.h"
#include "hw/core/cpu.h"
-#include "sysemu/cpus.h"
#include "qemu/lockable.h"
#include "trace/trace-root.h"
@@ -57,14 +56,12 @@ void cpu_list_unlock(void)
qemu_mutex_unlock(&qemu_cpu_list_lock);
}
-static bool cpu_index_auto_assigned;
-static int cpu_get_free_index(void)
+int cpu_get_free_index(void)
{
CPUState *some_cpu;
int max_cpu_index = 0;
- cpu_index_auto_assigned = true;
CPU_FOREACH(some_cpu) {
if (some_cpu->cpu_index >= max_cpu_index) {
max_cpu_index = some_cpu->cpu_index + 1;
@@ -83,8 +80,11 @@ unsigned int cpu_list_generation_id_get(void)
void cpu_list_add(CPUState *cpu)
{
+ static bool cpu_index_auto_assigned;
+
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
+ cpu_index_auto_assigned = true;
cpu->cpu_index = cpu_get_free_index();
assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
} else {
@@ -193,6 +193,9 @@ void start_exclusive(void)
CPUState *other_cpu;
int running_cpus;
+ /* Ensure we are not running, or start_exclusive will be blocked. */
+ g_assert(!current_cpu->running);
+
if (current_cpu->exclusive_context_count) {
current_cpu->exclusive_context_count++;
return;
@@ -385,11 +388,10 @@ void process_queued_cpu_work(CPUState *cpu)
int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
CPUBreakpoint **breakpoint)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
CPUBreakpoint *bp;
- if (cc->gdb_adjust_breakpoint) {
- pc = cc->gdb_adjust_breakpoint(cpu, pc);
+ if (cpu->cc->gdb_adjust_breakpoint) {
+ pc = cpu->cc->gdb_adjust_breakpoint(cpu, pc);
}
bp = g_malloc(sizeof(*bp));
@@ -415,11 +417,10 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
/* Remove a specific breakpoint. */
int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
CPUBreakpoint *bp;
- if (cc->gdb_adjust_breakpoint) {
- pc = cc->gdb_adjust_breakpoint(cpu, pc);
+ if (cpu->cc->gdb_adjust_breakpoint) {
+ pc = cpu->cc->gdb_adjust_breakpoint(cpu, pc);
}
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
diff --git a/cpu-target.c b/cpu-target.c
index 499facf..1c90a30 100644
--- a/cpu-target.c
+++ b/cpu-target.c
@@ -18,305 +18,19 @@
*/
#include "qemu/osdep.h"
-#include "qapi/error.h"
-
-#include "exec/target_page.h"
-#include "exec/page-protection.h"
-#include "hw/qdev-core.h"
-#include "hw/qdev-properties.h"
-#include "qemu/error-report.h"
-#include "qemu/qemu-print.h"
-#include "migration/vmstate.h"
-#ifdef CONFIG_USER_ONLY
-#include "qemu.h"
-#else
-#include "hw/core/sysemu-cpu-ops.h"
-#include "exec/address-spaces.h"
-#include "exec/memory.h"
-#endif
-#include "sysemu/cpus.h"
-#include "sysemu/tcg.h"
+#include "cpu.h"
+#include "system/accel-ops.h"
+#include "system/cpus.h"
+#include "exec/cpu-common.h"
#include "exec/tswap.h"
#include "exec/replay-core.h"
-#include "exec/cpu-common.h"
-#include "exec/exec-all.h"
-#include "exec/tb-flush.h"
-#include "exec/translate-all.h"
#include "exec/log.h"
-#include "hw/core/accel-cpu.h"
+#include "hw/core/cpu.h"
#include "trace/trace-root.h"
-#include "qemu/accel.h"
-
-#ifndef CONFIG_USER_ONLY
-static int cpu_common_post_load(void *opaque, int version_id)
-{
- CPUState *cpu = opaque;
-
- /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
- version_id is increased. */
- cpu->interrupt_request &= ~0x01;
- tlb_flush(cpu);
-
- /* loadvm has just updated the content of RAM, bypassing the
- * usual mechanisms that ensure we flush TBs for writes to
- * memory we've translated code from. So we must flush all TBs,
- * which will now be stale.
- */
- tb_flush(cpu);
-
- return 0;
-}
-
-static int cpu_common_pre_load(void *opaque)
-{
- CPUState *cpu = opaque;
-
- cpu->exception_index = -1;
-
- return 0;
-}
-
-static bool cpu_common_exception_index_needed(void *opaque)
-{
- CPUState *cpu = opaque;
-
- return tcg_enabled() && cpu->exception_index != -1;
-}
-
-static const VMStateDescription vmstate_cpu_common_exception_index = {
- .name = "cpu_common/exception_index",
- .version_id = 1,
- .minimum_version_id = 1,
- .needed = cpu_common_exception_index_needed,
- .fields = (const VMStateField[]) {
- VMSTATE_INT32(exception_index, CPUState),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static bool cpu_common_crash_occurred_needed(void *opaque)
-{
- CPUState *cpu = opaque;
-
- return cpu->crash_occurred;
-}
-
-static const VMStateDescription vmstate_cpu_common_crash_occurred = {
- .name = "cpu_common/crash_occurred",
- .version_id = 1,
- .minimum_version_id = 1,
- .needed = cpu_common_crash_occurred_needed,
- .fields = (const VMStateField[]) {
- VMSTATE_BOOL(crash_occurred, CPUState),
- VMSTATE_END_OF_LIST()
- }
-};
-
-const VMStateDescription vmstate_cpu_common = {
- .name = "cpu_common",
- .version_id = 1,
- .minimum_version_id = 1,
- .pre_load = cpu_common_pre_load,
- .post_load = cpu_common_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32(halted, CPUState),
- VMSTATE_UINT32(interrupt_request, CPUState),
- VMSTATE_END_OF_LIST()
- },
- .subsections = (const VMStateDescription * const []) {
- &vmstate_cpu_common_exception_index,
- &vmstate_cpu_common_crash_occurred,
- NULL
- }
-};
-#endif
-
-bool cpu_exec_realizefn(CPUState *cpu, Error **errp)
-{
- /* cache the cpu class for the hotpath */
- cpu->cc = CPU_GET_CLASS(cpu);
-
- if (!accel_cpu_common_realize(cpu, errp)) {
- return false;
- }
-
- /* Wait until cpu initialization complete before exposing cpu. */
- cpu_list_add(cpu);
-
-#ifdef CONFIG_USER_ONLY
- assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
- qdev_get_vmsd(DEVICE(cpu))->unmigratable);
-#else
- if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
- vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
- }
- if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
- vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu);
- }
-#endif /* CONFIG_USER_ONLY */
-
- return true;
-}
-
-void cpu_exec_unrealizefn(CPUState *cpu)
-{
-#ifndef CONFIG_USER_ONLY
- CPUClass *cc = CPU_GET_CLASS(cpu);
- if (cc->sysemu_ops->legacy_vmsd != NULL) {
- vmstate_unregister(NULL, cc->sysemu_ops->legacy_vmsd, cpu);
- }
- if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
- vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
- }
-#endif
-
- cpu_list_remove(cpu);
- /*
- * Now that the vCPU has been removed from the RCU list, we can call
- * accel_cpu_common_unrealize, which may free fields using call_rcu.
- */
- accel_cpu_common_unrealize(cpu);
-}
-
-/*
- * This can't go in hw/core/cpu.c because that file is compiled only
- * once for both user-mode and system builds.
- */
-static Property cpu_common_props[] = {
-#ifdef CONFIG_USER_ONLY
- /*
- * Create a property for the user-only object, so users can
- * adjust prctl(PR_SET_UNALIGN) from the command-line.
- * Has no effect if the target does not support the feature.
- */
- DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState,
- prctl_unalign_sigbus, false),
-#else
- /*
- * Create a memory property for system CPU object, so users can
- * wire up its memory. The default if no link is set up is to use
- * the system address space.
- */
- DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
- MemoryRegion *),
-#endif
- DEFINE_PROP_END_OF_LIST(),
-};
-
-#ifndef CONFIG_USER_ONLY
-static bool cpu_get_start_powered_off(Object *obj, Error **errp)
-{
- CPUState *cpu = CPU(obj);
- return cpu->start_powered_off;
-}
-
-static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp)
-{
- CPUState *cpu = CPU(obj);
- cpu->start_powered_off = value;
-}
-#endif
-
-void cpu_class_init_props(DeviceClass *dc)
-{
-#ifndef CONFIG_USER_ONLY
- ObjectClass *oc = OBJECT_CLASS(dc);
-
- /*
- * We can't use DEFINE_PROP_BOOL in the Property array for this
- * property, because we want this to be settable after realize.
- */
- object_class_property_add_bool(oc, "start-powered-off",
- cpu_get_start_powered_off,
- cpu_set_start_powered_off);
-#endif
-
- device_class_set_props(dc, cpu_common_props);
-}
-
-void cpu_exec_initfn(CPUState *cpu)
-{
- cpu->as = NULL;
- cpu->num_ases = 0;
-
-#ifndef CONFIG_USER_ONLY
- cpu->memory = get_system_memory();
- object_ref(OBJECT(cpu->memory));
-#endif
-}
-
-char *cpu_model_from_type(const char *typename)
-{
- const char *suffix = "-" CPU_RESOLVING_TYPE;
-
- if (!object_class_by_name(typename)) {
- return NULL;
- }
-
- if (g_str_has_suffix(typename, suffix)) {
- return g_strndup(typename, strlen(typename) - strlen(suffix));
- }
-
- return g_strdup(typename);
-}
-
-const char *parse_cpu_option(const char *cpu_option)
-{
- ObjectClass *oc;
- CPUClass *cc;
- gchar **model_pieces;
- const char *cpu_type;
-
- model_pieces = g_strsplit(cpu_option, ",", 2);
- if (!model_pieces[0]) {
- error_report("-cpu option cannot be empty");
- exit(1);
- }
-
- oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]);
- if (oc == NULL) {
- error_report("unable to find CPU model '%s'", model_pieces[0]);
- g_strfreev(model_pieces);
- exit(EXIT_FAILURE);
- }
-
- cpu_type = object_class_get_name(oc);
- cc = CPU_CLASS(oc);
- cc->parse_features(cpu_type, model_pieces[1], &error_fatal);
- g_strfreev(model_pieces);
- return cpu_type;
-}
-
-#ifndef cpu_list
-static void cpu_list_entry(gpointer data, gpointer user_data)
-{
- CPUClass *cc = CPU_CLASS(OBJECT_CLASS(data));
- const char *typename = object_class_get_name(OBJECT_CLASS(data));
- g_autofree char *model = cpu_model_from_type(typename);
-
- if (cc->deprecation_note) {
- qemu_printf(" %s (deprecated)\n", model);
- } else {
- qemu_printf(" %s\n", model);
- }
-}
-
-static void cpu_list(void)
-{
- GSList *list;
-
- list = object_class_get_list_sorted(TYPE_CPU, false);
- qemu_printf("Available CPUs:\n");
- g_slist_foreach(list, cpu_list_entry, NULL);
- g_slist_free(list);
-}
-#endif
-
-void list_cpus(void)
-{
- cpu_list();
-}
+/* Validate correct placement of CPUArchState. */
+QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0);
+QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState));
/* enable or disable single step mode. EXCP_DEBUG is returned by the
CPU loop after each instruction */
@@ -372,103 +86,8 @@ void cpu_abort(CPUState *cpu, const char *fmt, ...)
abort();
}
-/* physical memory access (slow version, mainly for debug) */
-#if defined(CONFIG_USER_ONLY)
-int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
- void *ptr, size_t len, bool is_write)
-{
- int flags;
- vaddr l, page;
- void * p;
- uint8_t *buf = ptr;
- ssize_t written;
- int ret = -1;
- int fd = -1;
-
- while (len > 0) {
- page = addr & TARGET_PAGE_MASK;
- l = (page + TARGET_PAGE_SIZE) - addr;
- if (l > len)
- l = len;
- flags = page_get_flags(page);
- if (!(flags & PAGE_VALID)) {
- goto out_close;
- }
- if (is_write) {
- if (flags & PAGE_WRITE) {
- /* XXX: this code should not depend on lock_user */
- p = lock_user(VERIFY_WRITE, addr, l, 0);
- if (!p) {
- goto out_close;
- }
- memcpy(p, buf, l);
- unlock_user(p, addr, l);
- } else {
- /* Bypass the host page protection using ptrace. */
- if (fd == -1) {
- fd = open("/proc/self/mem", O_WRONLY);
- if (fd == -1) {
- goto out;
- }
- }
- /*
- * If there is a TranslationBlock and we weren't bypassing the
- * host page protection, the memcpy() above would SEGV,
- * ultimately leading to page_unprotect(). So invalidate the
- * translations manually. Both invalidation and pwrite() must
- * be under mmap_lock() in order to prevent the creation of
- * another TranslationBlock in between.
- */
- mmap_lock();
- tb_invalidate_phys_range(addr, addr + l - 1);
- written = pwrite(fd, buf, l,
- (off_t)(uintptr_t)g2h_untagged(addr));
- mmap_unlock();
- if (written != l) {
- goto out_close;
- }
- }
- } else if (flags & PAGE_READ) {
- /* XXX: this code should not depend on lock_user */
- p = lock_user(VERIFY_READ, addr, l, 1);
- if (!p) {
- goto out_close;
- }
- memcpy(buf, p, l);
- unlock_user(p, addr, 0);
- } else {
- /* Bypass the host page protection using ptrace. */
- if (fd == -1) {
- fd = open("/proc/self/mem", O_RDONLY);
- if (fd == -1) {
- goto out;
- }
- }
- if (pread(fd, buf, l,
- (off_t)(uintptr_t)g2h_untagged(addr)) != l) {
- goto out_close;
- }
- }
- len -= l;
- buf += l;
- addr += l;
- }
- ret = 0;
-out_close:
- if (fd != -1) {
- close(fd);
- }
-out:
- return ret;
-}
-#endif
-
-bool target_words_bigendian(void)
+#undef target_big_endian
+bool target_big_endian(void)
{
return TARGET_BIG_ENDIAN;
}
-
-const char *target_name(void)
-{
- return TARGET_NAME;
-}
diff --git a/crypto/afalg.c b/crypto/afalg.c
index 52a491d..246d067 100644
--- a/crypto/afalg.c
+++ b/crypto/afalg.c
@@ -66,13 +66,13 @@ qcrypto_afalg_socket_bind(const char *type, const char *name,
return sbind;
}
-QCryptoAFAlg *
+QCryptoAFAlgo *
qcrypto_afalg_comm_alloc(const char *type, const char *name,
Error **errp)
{
- QCryptoAFAlg *afalg;
+ QCryptoAFAlgo *afalg;
- afalg = g_new0(QCryptoAFAlg, 1);
+ afalg = g_new0(QCryptoAFAlgo, 1);
/* initialize crypto API socket */
afalg->opfd = -1;
afalg->tfmfd = qcrypto_afalg_socket_bind(type, name, errp);
@@ -93,7 +93,7 @@ error:
return NULL;
}
-void qcrypto_afalg_comm_free(QCryptoAFAlg *afalg)
+void qcrypto_afalg_comm_free(QCryptoAFAlgo *afalg)
{
if (!afalg) {
return;
diff --git a/crypto/afalgpriv.h b/crypto/afalgpriv.h
index 5a2393f..3fdcc0f 100644
--- a/crypto/afalgpriv.h
+++ b/crypto/afalgpriv.h
@@ -30,9 +30,9 @@
#define ALG_OPTYPE_LEN 4
#define ALG_MSGIV_LEN(len) (sizeof(struct af_alg_iv) + (len))
-typedef struct QCryptoAFAlg QCryptoAFAlg;
+typedef struct QCryptoAFAlgo QCryptoAFAlgo;
-struct QCryptoAFAlg {
+struct QCryptoAFAlgo {
QCryptoCipher base;
int tfmfd;
@@ -46,22 +46,22 @@ struct QCryptoAFAlg {
* @type: the type of crypto operation
* @name: the name of crypto operation
*
- * Allocate a QCryptoAFAlg object and bind itself to
+ * Allocate a QCryptoAFAlgo object and bind itself to
* a AF_ALG socket.
*
* Returns:
- * a new QCryptoAFAlg object, or NULL in error.
+ * a new QCryptoAFAlgo object, or NULL in error.
*/
-QCryptoAFAlg *
+QCryptoAFAlgo *
qcrypto_afalg_comm_alloc(const char *type, const char *name,
Error **errp);
/**
* afalg_comm_free:
- * @afalg: the QCryptoAFAlg object
+ * @afalg: the QCryptoAFAlgo object
*
* Free the @afalg.
*/
-void qcrypto_afalg_comm_free(QCryptoAFAlg *afalg);
+void qcrypto_afalg_comm_free(QCryptoAFAlgo *afalg);
#endif
diff --git a/crypto/afsplit.c b/crypto/afsplit.c
index b1a5a20..b2e383a 100644
--- a/crypto/afsplit.c
+++ b/crypto/afsplit.c
@@ -40,7 +40,7 @@ static void qcrypto_afsplit_xor(size_t blocklen,
}
-static int qcrypto_afsplit_hash(QCryptoHashAlgorithm hash,
+static int qcrypto_afsplit_hash(QCryptoHashAlgo hash,
size_t blocklen,
uint8_t *block,
Error **errp)
@@ -85,7 +85,7 @@ static int qcrypto_afsplit_hash(QCryptoHashAlgorithm hash,
}
-int qcrypto_afsplit_encode(QCryptoHashAlgorithm hash,
+int qcrypto_afsplit_encode(QCryptoHashAlgo hash,
size_t blocklen,
uint32_t stripes,
const uint8_t *in,
@@ -117,7 +117,7 @@ int qcrypto_afsplit_encode(QCryptoHashAlgorithm hash,
}
-int qcrypto_afsplit_decode(QCryptoHashAlgorithm hash,
+int qcrypto_afsplit_decode(QCryptoHashAlgo hash,
size_t blocklen,
uint32_t stripes,
const uint8_t *in,
diff --git a/crypto/akcipher-gcrypt.c.inc b/crypto/akcipher-gcrypt.c.inc
index abb1fb2..bcf030f 100644
--- a/crypto/akcipher-gcrypt.c.inc
+++ b/crypto/akcipher-gcrypt.c.inc
@@ -26,14 +26,14 @@
#include "crypto/akcipher.h"
#include "crypto/random.h"
#include "qapi/error.h"
-#include "sysemu/cryptodev.h"
+#include "system/cryptodev.h"
#include "rsakey.h"
typedef struct QCryptoGcryptRSA {
QCryptoAkCipher akcipher;
gcry_sexp_t key;
- QCryptoRSAPaddingAlgorithm padding_alg;
- QCryptoHashAlgorithm hash_alg;
+ QCryptoRSAPaddingAlgo padding_alg;
+ QCryptoHashAlgo hash_alg;
} QCryptoGcryptRSA;
static void qcrypto_gcrypt_rsa_free(QCryptoAkCipher *akcipher)
@@ -59,7 +59,7 @@ QCryptoAkCipher *qcrypto_akcipher_new(const QCryptoAkCipherOptions *opts,
Error **errp)
{
switch (opts->alg) {
- case QCRYPTO_AKCIPHER_ALG_RSA:
+ case QCRYPTO_AK_CIPHER_ALGO_RSA:
return (QCryptoAkCipher *)qcrypto_gcrypt_rsa_new(
&opts->u.rsa, type, key, keylen, errp);
@@ -85,7 +85,7 @@ static int qcrypto_gcrypt_parse_rsa_private_key(
const uint8_t *key, size_t keylen, Error **errp)
{
g_autoptr(QCryptoAkCipherRSAKey) rsa_key = qcrypto_akcipher_rsakey_parse(
- QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, key, keylen, errp);
+ QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE, key, keylen, errp);
gcry_mpi_t n = NULL, e = NULL, d = NULL, p = NULL, q = NULL, u = NULL;
bool compute_mul_inv = false;
int ret = -1;
@@ -178,7 +178,7 @@ static int qcrypto_gcrypt_parse_rsa_public_key(QCryptoGcryptRSA *rsa,
{
g_autoptr(QCryptoAkCipherRSAKey) rsa_key = qcrypto_akcipher_rsakey_parse(
- QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, key, keylen, errp);
+ QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC, key, keylen, errp);
gcry_mpi_t n = NULL, e = NULL;
int ret = -1;
gcry_error_t err;
@@ -241,7 +241,7 @@ static int qcrypto_gcrypt_rsa_encrypt(QCryptoAkCipher *akcipher,
err = gcry_sexp_build(&data_sexp, NULL,
"(data (flags %s) (value %b))",
- QCryptoRSAPaddingAlgorithm_str(rsa->padding_alg),
+ QCryptoRSAPaddingAlgo_str(rsa->padding_alg),
in_len, in);
if (gcry_err_code(err) != 0) {
error_setg(errp, "Failed to build plaintext: %s/%s",
@@ -263,7 +263,7 @@ static int qcrypto_gcrypt_rsa_encrypt(QCryptoAkCipher *akcipher,
goto cleanup;
}
- if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALG_RAW) {
+ if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALGO_RAW) {
cipher_mpi = gcry_sexp_nth_mpi(cipher_sexp_item, 1, GCRYMPI_FMT_USG);
if (!cipher_mpi) {
error_setg(errp, "Invalid ciphertext result");
@@ -332,7 +332,7 @@ static int qcrypto_gcrypt_rsa_decrypt(QCryptoAkCipher *akcipher,
err = gcry_sexp_build(&cipher_sexp, NULL,
"(enc-val (flags %s) (rsa (a %b) ))",
- QCryptoRSAPaddingAlgorithm_str(rsa->padding_alg),
+ QCryptoRSAPaddingAlgo_str(rsa->padding_alg),
in_len, in);
if (gcry_err_code(err) != 0) {
error_setg(errp, "Failed to build ciphertext: %s/%s",
@@ -348,7 +348,7 @@ static int qcrypto_gcrypt_rsa_decrypt(QCryptoAkCipher *akcipher,
}
/* S-expression of plaintext: (value plaintext) */
- if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALG_RAW) {
+ if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALGO_RAW) {
data_mpi = gcry_sexp_nth_mpi(data_sexp, 1, GCRYMPI_FMT_USG);
if (!data_mpi) {
error_setg(errp, "Invalid plaintext result");
@@ -410,14 +410,14 @@ static int qcrypto_gcrypt_rsa_sign(QCryptoAkCipher *akcipher,
return ret;
}
- if (rsa->padding_alg != QCRYPTO_RSA_PADDING_ALG_PKCS1) {
+ if (rsa->padding_alg != QCRYPTO_RSA_PADDING_ALGO_PKCS1) {
error_setg(errp, "Invalid padding %u", rsa->padding_alg);
return ret;
}
err = gcry_sexp_build(&dgst_sexp, NULL,
"(data (flags pkcs1) (hash %s %b))",
- QCryptoHashAlgorithm_str(rsa->hash_alg),
+ QCryptoHashAlgo_str(rsa->hash_alg),
in_len, in);
if (gcry_err_code(err) != 0) {
error_setg(errp, "Failed to build dgst: %s/%s",
@@ -482,7 +482,7 @@ static int qcrypto_gcrypt_rsa_verify(QCryptoAkCipher *akcipher,
return ret;
}
- if (rsa->padding_alg != QCRYPTO_RSA_PADDING_ALG_PKCS1) {
+ if (rsa->padding_alg != QCRYPTO_RSA_PADDING_ALGO_PKCS1) {
error_setg(errp, "Invalid padding %u", rsa->padding_alg);
return ret;
}
@@ -497,7 +497,7 @@ static int qcrypto_gcrypt_rsa_verify(QCryptoAkCipher *akcipher,
err = gcry_sexp_build(&dgst_sexp, NULL,
"(data (flags pkcs1) (hash %s %b))",
- QCryptoHashAlgorithm_str(rsa->hash_alg),
+ QCryptoHashAlgo_str(rsa->hash_alg),
in2_len, in2);
if (gcry_err_code(err) != 0) {
error_setg(errp, "Failed to build dgst: %s/%s",
@@ -540,13 +540,13 @@ static QCryptoGcryptRSA *qcrypto_gcrypt_rsa_new(
rsa->akcipher.driver = &gcrypt_rsa;
switch (type) {
- case QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE:
+ case QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE:
if (qcrypto_gcrypt_parse_rsa_private_key(rsa, key, keylen, errp) != 0) {
goto error;
}
break;
- case QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC:
+ case QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC:
if (qcrypto_gcrypt_parse_rsa_public_key(rsa, key, keylen, errp) != 0) {
goto error;
}
@@ -568,17 +568,17 @@ error:
bool qcrypto_akcipher_supports(QCryptoAkCipherOptions *opts)
{
switch (opts->alg) {
- case QCRYPTO_AKCIPHER_ALG_RSA:
+ case QCRYPTO_AK_CIPHER_ALGO_RSA:
switch (opts->u.rsa.padding_alg) {
- case QCRYPTO_RSA_PADDING_ALG_RAW:
+ case QCRYPTO_RSA_PADDING_ALGO_RAW:
return true;
- case QCRYPTO_RSA_PADDING_ALG_PKCS1:
+ case QCRYPTO_RSA_PADDING_ALGO_PKCS1:
switch (opts->u.rsa.hash_alg) {
- case QCRYPTO_HASH_ALG_MD5:
- case QCRYPTO_HASH_ALG_SHA1:
- case QCRYPTO_HASH_ALG_SHA256:
- case QCRYPTO_HASH_ALG_SHA512:
+ case QCRYPTO_HASH_ALGO_MD5:
+ case QCRYPTO_HASH_ALGO_SHA1:
+ case QCRYPTO_HASH_ALGO_SHA256:
+ case QCRYPTO_HASH_ALGO_SHA512:
return true;
default:
diff --git a/crypto/akcipher-nettle.c.inc b/crypto/akcipher-nettle.c.inc
index 02699e6..1d4bd69 100644
--- a/crypto/akcipher-nettle.c.inc
+++ b/crypto/akcipher-nettle.c.inc
@@ -26,15 +26,15 @@
#include "crypto/akcipher.h"
#include "crypto/random.h"
#include "qapi/error.h"
-#include "sysemu/cryptodev.h"
+#include "system/cryptodev.h"
#include "rsakey.h"
typedef struct QCryptoNettleRSA {
QCryptoAkCipher akcipher;
struct rsa_public_key pub;
struct rsa_private_key priv;
- QCryptoRSAPaddingAlgorithm padding_alg;
- QCryptoHashAlgorithm hash_alg;
+ QCryptoRSAPaddingAlgo padding_alg;
+ QCryptoHashAlgo hash_alg;
} QCryptoNettleRSA;
static void qcrypto_nettle_rsa_free(QCryptoAkCipher *akcipher)
@@ -61,7 +61,7 @@ QCryptoAkCipher *qcrypto_akcipher_new(const QCryptoAkCipherOptions *opts,
Error **errp)
{
switch (opts->alg) {
- case QCRYPTO_AKCIPHER_ALG_RSA:
+ case QCRYPTO_AK_CIPHER_ALGO_RSA:
return qcrypto_nettle_rsa_new(&opts->u.rsa, type, key, keylen, errp);
default:
@@ -87,7 +87,7 @@ static int qcrypt_nettle_parse_rsa_private_key(QCryptoNettleRSA *rsa,
Error **errp)
{
g_autoptr(QCryptoAkCipherRSAKey) rsa_key = qcrypto_akcipher_rsakey_parse(
- QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, key, keylen, errp);
+ QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE, key, keylen, errp);
if (!rsa_key) {
return -1;
@@ -137,7 +137,7 @@ static int qcrypt_nettle_parse_rsa_public_key(QCryptoNettleRSA *rsa,
Error **errp)
{
g_autoptr(QCryptoAkCipherRSAKey) rsa_key = qcrypto_akcipher_rsakey_parse(
- QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, key, keylen, errp);
+ QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC, key, keylen, errp);
if (!rsa_key) {
return -1;
@@ -184,11 +184,11 @@ static int qcrypto_nettle_rsa_encrypt(QCryptoAkCipher *akcipher,
/* Nettle do not support RSA encryption without any padding */
switch (rsa->padding_alg) {
- case QCRYPTO_RSA_PADDING_ALG_RAW:
+ case QCRYPTO_RSA_PADDING_ALGO_RAW:
error_setg(errp, "RSA with raw padding is not supported");
break;
- case QCRYPTO_RSA_PADDING_ALG_PKCS1:
+ case QCRYPTO_RSA_PADDING_ALGO_PKCS1:
mpz_init(c);
if (rsa_encrypt(&rsa->pub, NULL, wrap_nettle_random_func,
data_len, (uint8_t *)data, c) != 1) {
@@ -223,11 +223,11 @@ static int qcrypto_nettle_rsa_decrypt(QCryptoAkCipher *akcipher,
}
switch (rsa->padding_alg) {
- case QCRYPTO_RSA_PADDING_ALG_RAW:
+ case QCRYPTO_RSA_PADDING_ALGO_RAW:
error_setg(errp, "RSA with raw padding is not supported");
break;
- case QCRYPTO_RSA_PADDING_ALG_PKCS1:
+ case QCRYPTO_RSA_PADDING_ALGO_PKCS1:
nettle_mpz_init_set_str_256_u(c, enc_len, enc);
if (!rsa_decrypt(&rsa->priv, &data_len, (uint8_t *)data, c)) {
error_setg(errp, "Failed to decrypt");
@@ -257,7 +257,7 @@ static int qcrypto_nettle_rsa_sign(QCryptoAkCipher *akcipher,
* The RSA algorithm cannot be used for signature/verification
* without padding.
*/
- if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALG_RAW) {
+ if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALGO_RAW) {
error_setg(errp, "Try to make signature without padding");
return ret;
}
@@ -276,19 +276,19 @@ static int qcrypto_nettle_rsa_sign(QCryptoAkCipher *akcipher,
mpz_init(s);
switch (rsa->hash_alg) {
- case QCRYPTO_HASH_ALG_MD5:
+ case QCRYPTO_HASH_ALGO_MD5:
rv = rsa_md5_sign_digest(&rsa->priv, data, s);
break;
- case QCRYPTO_HASH_ALG_SHA1:
+ case QCRYPTO_HASH_ALGO_SHA1:
rv = rsa_sha1_sign_digest(&rsa->priv, data, s);
break;
- case QCRYPTO_HASH_ALG_SHA256:
+ case QCRYPTO_HASH_ALGO_SHA256:
rv = rsa_sha256_sign_digest(&rsa->priv, data, s);
break;
- case QCRYPTO_HASH_ALG_SHA512:
+ case QCRYPTO_HASH_ALGO_SHA512:
rv = rsa_sha512_sign_digest(&rsa->priv, data, s);
break;
@@ -324,7 +324,7 @@ static int qcrypto_nettle_rsa_verify(QCryptoAkCipher *akcipher,
* The RSA algorithm cannot be used for signature/verification
* without padding.
*/
- if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALG_RAW) {
+ if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALGO_RAW) {
error_setg(errp, "Try to verify signature without padding");
return ret;
}
@@ -341,19 +341,19 @@ static int qcrypto_nettle_rsa_verify(QCryptoAkCipher *akcipher,
nettle_mpz_init_set_str_256_u(s, sig_len, sig);
switch (rsa->hash_alg) {
- case QCRYPTO_HASH_ALG_MD5:
+ case QCRYPTO_HASH_ALGO_MD5:
rv = rsa_md5_verify_digest(&rsa->pub, data, s);
break;
- case QCRYPTO_HASH_ALG_SHA1:
+ case QCRYPTO_HASH_ALGO_SHA1:
rv = rsa_sha1_verify_digest(&rsa->pub, data, s);
break;
- case QCRYPTO_HASH_ALG_SHA256:
+ case QCRYPTO_HASH_ALGO_SHA256:
rv = rsa_sha256_verify_digest(&rsa->pub, data, s);
break;
- case QCRYPTO_HASH_ALG_SHA512:
+ case QCRYPTO_HASH_ALGO_SHA512:
rv = rsa_sha512_verify_digest(&rsa->pub, data, s);
break;
@@ -397,13 +397,13 @@ static QCryptoAkCipher *qcrypto_nettle_rsa_new(
rsa_private_key_init(&rsa->priv);
switch (type) {
- case QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE:
+ case QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE:
if (qcrypt_nettle_parse_rsa_private_key(rsa, key, keylen, errp) != 0) {
goto error;
}
break;
- case QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC:
+ case QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC:
if (qcrypt_nettle_parse_rsa_public_key(rsa, key, keylen, errp) != 0) {
goto error;
}
@@ -425,21 +425,21 @@ error:
bool qcrypto_akcipher_supports(QCryptoAkCipherOptions *opts)
{
switch (opts->alg) {
- case QCRYPTO_AKCIPHER_ALG_RSA:
+ case QCRYPTO_AK_CIPHER_ALGO_RSA:
switch (opts->u.rsa.padding_alg) {
- case QCRYPTO_RSA_PADDING_ALG_PKCS1:
+ case QCRYPTO_RSA_PADDING_ALGO_PKCS1:
switch (opts->u.rsa.hash_alg) {
- case QCRYPTO_HASH_ALG_MD5:
- case QCRYPTO_HASH_ALG_SHA1:
- case QCRYPTO_HASH_ALG_SHA256:
- case QCRYPTO_HASH_ALG_SHA512:
+ case QCRYPTO_HASH_ALGO_MD5:
+ case QCRYPTO_HASH_ALGO_SHA1:
+ case QCRYPTO_HASH_ALGO_SHA256:
+ case QCRYPTO_HASH_ALGO_SHA512:
return true;
default:
return false;
}
- case QCRYPTO_RSA_PADDING_ALG_RAW:
+ case QCRYPTO_RSA_PADDING_ALGO_RAW:
default:
return false;
}
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index e4bbc6e..0a0576b 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -115,7 +115,7 @@ int qcrypto_akcipher_export_p8info(const QCryptoAkCipherOptions *opts,
Error **errp)
{
switch (opts->alg) {
- case QCRYPTO_AKCIPHER_ALG_RSA:
+ case QCRYPTO_AK_CIPHER_ALGO_RSA:
qcrypto_akcipher_rsakey_export_p8info(key, keylen, dst, dst_len);
return 0;
diff --git a/crypto/akcipherpriv.h b/crypto/akcipherpriv.h
index 739f639..3b33e54 100644
--- a/crypto/akcipherpriv.h
+++ b/crypto/akcipherpriv.h
@@ -27,7 +27,7 @@
typedef struct QCryptoAkCipherDriver QCryptoAkCipherDriver;
struct QCryptoAkCipher {
- QCryptoAkCipherAlgorithm alg;
+ QCryptoAkCipherAlgo alg;
QCryptoAkCipherKeyType type;
int max_plaintext_len;
int max_ciphertext_len;
diff --git a/crypto/block-luks.c b/crypto/block-luks.c
index 5b777c1..0926ad2 100644
--- a/crypto/block-luks.c
+++ b/crypto/block-luks.c
@@ -33,6 +33,7 @@
#include "qemu/uuid.h"
#include "qemu/bitmap.h"
+#include "qemu/range.h"
/*
* Reference for the LUKS format implemented here is
@@ -67,38 +68,38 @@ struct QCryptoBlockLUKSCipherNameMap {
static const QCryptoBlockLUKSCipherSizeMap
qcrypto_block_luks_cipher_size_map_aes[] = {
- { 16, QCRYPTO_CIPHER_ALG_AES_128 },
- { 24, QCRYPTO_CIPHER_ALG_AES_192 },
- { 32, QCRYPTO_CIPHER_ALG_AES_256 },
+ { 16, QCRYPTO_CIPHER_ALGO_AES_128 },
+ { 24, QCRYPTO_CIPHER_ALGO_AES_192 },
+ { 32, QCRYPTO_CIPHER_ALGO_AES_256 },
{ 0, 0 },
};
static const QCryptoBlockLUKSCipherSizeMap
qcrypto_block_luks_cipher_size_map_cast5[] = {
- { 16, QCRYPTO_CIPHER_ALG_CAST5_128 },
+ { 16, QCRYPTO_CIPHER_ALGO_CAST5_128 },
{ 0, 0 },
};
static const QCryptoBlockLUKSCipherSizeMap
qcrypto_block_luks_cipher_size_map_serpent[] = {
- { 16, QCRYPTO_CIPHER_ALG_SERPENT_128 },
- { 24, QCRYPTO_CIPHER_ALG_SERPENT_192 },
- { 32, QCRYPTO_CIPHER_ALG_SERPENT_256 },
+ { 16, QCRYPTO_CIPHER_ALGO_SERPENT_128 },
+ { 24, QCRYPTO_CIPHER_ALGO_SERPENT_192 },
+ { 32, QCRYPTO_CIPHER_ALGO_SERPENT_256 },
{ 0, 0 },
};
static const QCryptoBlockLUKSCipherSizeMap
qcrypto_block_luks_cipher_size_map_twofish[] = {
- { 16, QCRYPTO_CIPHER_ALG_TWOFISH_128 },
- { 24, QCRYPTO_CIPHER_ALG_TWOFISH_192 },
- { 32, QCRYPTO_CIPHER_ALG_TWOFISH_256 },
+ { 16, QCRYPTO_CIPHER_ALGO_TWOFISH_128 },
+ { 24, QCRYPTO_CIPHER_ALGO_TWOFISH_192 },
+ { 32, QCRYPTO_CIPHER_ALGO_TWOFISH_256 },
{ 0, 0 },
};
#ifdef CONFIG_CRYPTO_SM4
static const QCryptoBlockLUKSCipherSizeMap
qcrypto_block_luks_cipher_size_map_sm4[] = {
- { 16, QCRYPTO_CIPHER_ALG_SM4},
+ { 16, QCRYPTO_CIPHER_ALGO_SM4},
{ 0, 0 },
};
#endif
@@ -122,25 +123,25 @@ struct QCryptoBlockLUKS {
QCryptoBlockLUKSHeader header;
/* Main encryption algorithm used for encryption*/
- QCryptoCipherAlgorithm cipher_alg;
+ QCryptoCipherAlgo cipher_alg;
/* Mode of encryption for the selected encryption algorithm */
QCryptoCipherMode cipher_mode;
/* Initialization vector generation algorithm */
- QCryptoIVGenAlgorithm ivgen_alg;
+ QCryptoIVGenAlgo ivgen_alg;
/* Hash algorithm used for IV generation*/
- QCryptoHashAlgorithm ivgen_hash_alg;
+ QCryptoHashAlgo ivgen_hash_alg;
/*
* Encryption algorithm used for IV generation.
* Usually the same as main encryption algorithm
*/
- QCryptoCipherAlgorithm ivgen_cipher_alg;
+ QCryptoCipherAlgo ivgen_cipher_alg;
/* Hash algorithm used in pbkdf2 function */
- QCryptoHashAlgorithm hash_alg;
+ QCryptoHashAlgo hash_alg;
/* Name of the secret that was used to open the image */
char *secret;
@@ -178,7 +179,7 @@ static int qcrypto_block_luks_cipher_name_lookup(const char *name,
}
static const char *
-qcrypto_block_luks_cipher_alg_lookup(QCryptoCipherAlgorithm alg,
+qcrypto_block_luks_cipher_alg_lookup(QCryptoCipherAlgo alg,
Error **errp)
{
const QCryptoBlockLUKSCipherNameMap *map =
@@ -194,7 +195,7 @@ qcrypto_block_luks_cipher_alg_lookup(QCryptoCipherAlgorithm alg,
}
error_setg(errp, "Algorithm '%s' not supported",
- QCryptoCipherAlgorithm_str(alg));
+ QCryptoCipherAlgo_str(alg));
return NULL;
}
@@ -222,13 +223,13 @@ static int qcrypto_block_luks_name_lookup(const char *name,
#define qcrypto_block_luks_hash_name_lookup(name, errp) \
qcrypto_block_luks_name_lookup(name, \
- &QCryptoHashAlgorithm_lookup, \
+ &QCryptoHashAlgo_lookup, \
"Hash algorithm", \
errp)
#define qcrypto_block_luks_ivgen_name_lookup(name, errp) \
qcrypto_block_luks_name_lookup(name, \
- &QCryptoIVGenAlgorithm_lookup, \
+ &QCryptoIVGenAlgo_lookup, \
"IV generator", \
errp)
@@ -261,9 +262,9 @@ qcrypto_block_luks_has_format(const uint8_t *buf,
* the cipher since that gets a key length matching the digest
* size, not AES 128 with truncated digest as might be imagined
*/
-static QCryptoCipherAlgorithm
-qcrypto_block_luks_essiv_cipher(QCryptoCipherAlgorithm cipher,
- QCryptoHashAlgorithm hash,
+static QCryptoCipherAlgo
+qcrypto_block_luks_essiv_cipher(QCryptoCipherAlgo cipher,
+ QCryptoHashAlgo hash,
Error **errp)
{
size_t digestlen = qcrypto_hash_digest_len(hash);
@@ -273,54 +274,54 @@ qcrypto_block_luks_essiv_cipher(QCryptoCipherAlgorithm cipher,
}
switch (cipher) {
- case QCRYPTO_CIPHER_ALG_AES_128:
- case QCRYPTO_CIPHER_ALG_AES_192:
- case QCRYPTO_CIPHER_ALG_AES_256:
+ case QCRYPTO_CIPHER_ALGO_AES_128:
+ case QCRYPTO_CIPHER_ALGO_AES_192:
+ case QCRYPTO_CIPHER_ALGO_AES_256:
if (digestlen == qcrypto_cipher_get_key_len(
- QCRYPTO_CIPHER_ALG_AES_128)) {
- return QCRYPTO_CIPHER_ALG_AES_128;
+ QCRYPTO_CIPHER_ALGO_AES_128)) {
+ return QCRYPTO_CIPHER_ALGO_AES_128;
} else if (digestlen == qcrypto_cipher_get_key_len(
- QCRYPTO_CIPHER_ALG_AES_192)) {
- return QCRYPTO_CIPHER_ALG_AES_192;
+ QCRYPTO_CIPHER_ALGO_AES_192)) {
+ return QCRYPTO_CIPHER_ALGO_AES_192;
} else if (digestlen == qcrypto_cipher_get_key_len(
- QCRYPTO_CIPHER_ALG_AES_256)) {
- return QCRYPTO_CIPHER_ALG_AES_256;
+ QCRYPTO_CIPHER_ALGO_AES_256)) {
+ return QCRYPTO_CIPHER_ALGO_AES_256;
} else {
error_setg(errp, "No AES cipher with key size %zu available",
digestlen);
return 0;
}
break;
- case QCRYPTO_CIPHER_ALG_SERPENT_128:
- case QCRYPTO_CIPHER_ALG_SERPENT_192:
- case QCRYPTO_CIPHER_ALG_SERPENT_256:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_128:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_192:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_256:
if (digestlen == qcrypto_cipher_get_key_len(
- QCRYPTO_CIPHER_ALG_SERPENT_128)) {
- return QCRYPTO_CIPHER_ALG_SERPENT_128;
+ QCRYPTO_CIPHER_ALGO_SERPENT_128)) {
+ return QCRYPTO_CIPHER_ALGO_SERPENT_128;
} else if (digestlen == qcrypto_cipher_get_key_len(
- QCRYPTO_CIPHER_ALG_SERPENT_192)) {
- return QCRYPTO_CIPHER_ALG_SERPENT_192;
+ QCRYPTO_CIPHER_ALGO_SERPENT_192)) {
+ return QCRYPTO_CIPHER_ALGO_SERPENT_192;
} else if (digestlen == qcrypto_cipher_get_key_len(
- QCRYPTO_CIPHER_ALG_SERPENT_256)) {
- return QCRYPTO_CIPHER_ALG_SERPENT_256;
+ QCRYPTO_CIPHER_ALGO_SERPENT_256)) {
+ return QCRYPTO_CIPHER_ALGO_SERPENT_256;
} else {
error_setg(errp, "No Serpent cipher with key size %zu available",
digestlen);
return 0;
}
break;
- case QCRYPTO_CIPHER_ALG_TWOFISH_128:
- case QCRYPTO_CIPHER_ALG_TWOFISH_192:
- case QCRYPTO_CIPHER_ALG_TWOFISH_256:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_128:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_192:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_256:
if (digestlen == qcrypto_cipher_get_key_len(
- QCRYPTO_CIPHER_ALG_TWOFISH_128)) {
- return QCRYPTO_CIPHER_ALG_TWOFISH_128;
+ QCRYPTO_CIPHER_ALGO_TWOFISH_128)) {
+ return QCRYPTO_CIPHER_ALGO_TWOFISH_128;
} else if (digestlen == qcrypto_cipher_get_key_len(
- QCRYPTO_CIPHER_ALG_TWOFISH_192)) {
- return QCRYPTO_CIPHER_ALG_TWOFISH_192;
+ QCRYPTO_CIPHER_ALGO_TWOFISH_192)) {
+ return QCRYPTO_CIPHER_ALGO_TWOFISH_192;
} else if (digestlen == qcrypto_cipher_get_key_len(
- QCRYPTO_CIPHER_ALG_TWOFISH_256)) {
- return QCRYPTO_CIPHER_ALG_TWOFISH_256;
+ QCRYPTO_CIPHER_ALGO_TWOFISH_256)) {
+ return QCRYPTO_CIPHER_ALGO_TWOFISH_256;
} else {
error_setg(errp, "No Twofish cipher with key size %zu available",
digestlen);
@@ -329,7 +330,7 @@ qcrypto_block_luks_essiv_cipher(QCryptoCipherAlgorithm cipher,
break;
default:
error_setg(errp, "Cipher %s not supported with essiv",
- QCryptoCipherAlgorithm_str(cipher));
+ QCryptoCipherAlgo_str(cipher));
return 0;
}
}
@@ -572,7 +573,7 @@ qcrypto_block_luks_check_header(const QCryptoBlockLUKS *luks,
header_sectors,
slot2->stripes);
- if (start1 + len1 > start2 && start2 + len2 > start1) {
+ if (ranges_overlap(start1, len1, start2, len2)) {
error_setg(errp,
"Keyslots %zu and %zu are overlapping in the header",
i, j);
@@ -659,7 +660,7 @@ qcrypto_block_luks_parse_header(QCryptoBlockLUKS *luks, Error **errp)
return -1;
}
- if (luks->ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) {
+ if (luks->ivgen_alg == QCRYPTO_IV_GEN_ALGO_ESSIV) {
if (!ivhash_name) {
error_setg(errp, "Missing IV generator hash specification");
return -1;
@@ -1321,20 +1322,20 @@ qcrypto_block_luks_create(QCryptoBlock *block,
luks_opts.iter_time = QCRYPTO_BLOCK_LUKS_DEFAULT_ITER_TIME_MS;
}
if (!luks_opts.has_cipher_alg) {
- luks_opts.cipher_alg = QCRYPTO_CIPHER_ALG_AES_256;
+ luks_opts.cipher_alg = QCRYPTO_CIPHER_ALGO_AES_256;
}
if (!luks_opts.has_cipher_mode) {
luks_opts.cipher_mode = QCRYPTO_CIPHER_MODE_XTS;
}
if (!luks_opts.has_ivgen_alg) {
- luks_opts.ivgen_alg = QCRYPTO_IVGEN_ALG_PLAIN64;
+ luks_opts.ivgen_alg = QCRYPTO_IV_GEN_ALGO_PLAIN64;
}
if (!luks_opts.has_hash_alg) {
- luks_opts.hash_alg = QCRYPTO_HASH_ALG_SHA256;
+ luks_opts.hash_alg = QCRYPTO_HASH_ALGO_SHA256;
}
- if (luks_opts.ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) {
+ if (luks_opts.ivgen_alg == QCRYPTO_IV_GEN_ALGO_ESSIV) {
if (!luks_opts.has_ivgen_hash_alg) {
- luks_opts.ivgen_hash_alg = QCRYPTO_HASH_ALG_SHA256;
+ luks_opts.ivgen_hash_alg = QCRYPTO_HASH_ALGO_SHA256;
luks_opts.has_ivgen_hash_alg = true;
}
}
@@ -1383,15 +1384,15 @@ qcrypto_block_luks_create(QCryptoBlock *block,
}
cipher_mode = QCryptoCipherMode_str(luks_opts.cipher_mode);
- ivgen_alg = QCryptoIVGenAlgorithm_str(luks_opts.ivgen_alg);
+ ivgen_alg = QCryptoIVGenAlgo_str(luks_opts.ivgen_alg);
if (luks_opts.has_ivgen_hash_alg) {
- ivgen_hash_alg = QCryptoHashAlgorithm_str(luks_opts.ivgen_hash_alg);
+ ivgen_hash_alg = QCryptoHashAlgo_str(luks_opts.ivgen_hash_alg);
cipher_mode_spec = g_strdup_printf("%s-%s:%s", cipher_mode, ivgen_alg,
ivgen_hash_alg);
} else {
cipher_mode_spec = g_strdup_printf("%s-%s", cipher_mode, ivgen_alg);
}
- hash_alg = QCryptoHashAlgorithm_str(luks_opts.hash_alg);
+ hash_alg = QCryptoHashAlgo_str(luks_opts.hash_alg);
if (strlen(cipher_alg) >= QCRYPTO_BLOCK_LUKS_CIPHER_NAME_LEN) {
@@ -1410,7 +1411,7 @@ qcrypto_block_luks_create(QCryptoBlock *block,
goto error;
}
- if (luks_opts.ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) {
+ if (luks_opts.ivgen_alg == QCRYPTO_IV_GEN_ALGO_ESSIV) {
luks->ivgen_cipher_alg =
qcrypto_block_luks_essiv_cipher(luks_opts.cipher_alg,
luks_opts.ivgen_hash_alg,
@@ -1860,11 +1861,11 @@ qcrypto_block_luks_amend_options(QCryptoBlock *block,
QCryptoBlockAmendOptionsLUKS *opts_luks = &options->u.luks;
switch (opts_luks->state) {
- case Q_CRYPTO_BLOCKLUKS_KEYSLOT_STATE_ACTIVE:
+ case QCRYPTO_BLOCK_LUKS_KEYSLOT_STATE_ACTIVE:
return qcrypto_block_luks_amend_add_keyslot(block, readfunc,
writefunc, opaque,
opts_luks, force, errp);
- case Q_CRYPTO_BLOCKLUKS_KEYSLOT_STATE_INACTIVE:
+ case QCRYPTO_BLOCK_LUKS_KEYSLOT_STATE_INACTIVE:
return qcrypto_block_luks_amend_erase_keyslots(block, readfunc,
writefunc, opaque,
opts_luks, force, errp);
@@ -1885,7 +1886,7 @@ static int qcrypto_block_luks_get_info(QCryptoBlock *block,
info->u.luks.cipher_alg = luks->cipher_alg;
info->u.luks.cipher_mode = luks->cipher_mode;
info->u.luks.ivgen_alg = luks->ivgen_alg;
- if (info->u.luks.ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) {
+ if (info->u.luks.ivgen_alg == QCRYPTO_IV_GEN_ALGO_ESSIV) {
info->u.luks.has_ivgen_hash_alg = true;
info->u.luks.ivgen_hash_alg = luks->ivgen_hash_alg;
}
diff --git a/crypto/block-qcow.c b/crypto/block-qcow.c
index 42e9556..054078b 100644
--- a/crypto/block-qcow.c
+++ b/crypto/block-qcow.c
@@ -62,16 +62,16 @@ qcrypto_block_qcow_init(QCryptoBlock *block,
memcpy(keybuf, password, MIN(len, sizeof(keybuf)));
g_free(password);
- block->niv = qcrypto_cipher_get_iv_len(QCRYPTO_CIPHER_ALG_AES_128,
+ block->niv = qcrypto_cipher_get_iv_len(QCRYPTO_CIPHER_ALGO_AES_128,
QCRYPTO_CIPHER_MODE_CBC);
- block->ivgen = qcrypto_ivgen_new(QCRYPTO_IVGEN_ALG_PLAIN64,
+ block->ivgen = qcrypto_ivgen_new(QCRYPTO_IV_GEN_ALGO_PLAIN64,
0, 0, NULL, 0, errp);
if (!block->ivgen) {
ret = -ENOTSUP;
goto fail;
}
- ret = qcrypto_block_init_cipher(block, QCRYPTO_CIPHER_ALG_AES_128,
+ ret = qcrypto_block_init_cipher(block, QCRYPTO_CIPHER_ALGO_AES_128,
QCRYPTO_CIPHER_MODE_CBC,
keybuf, G_N_ELEMENTS(keybuf),
errp);
diff --git a/crypto/block.c b/crypto/block.c
index 3bcc427..96c83e6 100644
--- a/crypto/block.c
+++ b/crypto/block.c
@@ -26,8 +26,8 @@
#include "block-luks.h"
static const QCryptoBlockDriver *qcrypto_block_drivers[] = {
- [Q_CRYPTO_BLOCK_FORMAT_QCOW] = &qcrypto_block_driver_qcow,
- [Q_CRYPTO_BLOCK_FORMAT_LUKS] = &qcrypto_block_driver_luks,
+ [QCRYPTO_BLOCK_FORMAT_QCOW] = &qcrypto_block_driver_qcow,
+ [QCRYPTO_BLOCK_FORMAT_LUKS] = &qcrypto_block_driver_luks,
};
@@ -267,7 +267,7 @@ static void qcrypto_block_push_cipher(QCryptoBlock *block,
int qcrypto_block_init_cipher(QCryptoBlock *block,
- QCryptoCipherAlgorithm alg,
+ QCryptoCipherAlgo alg,
QCryptoCipherMode mode,
const uint8_t *key, size_t nkey,
Error **errp)
@@ -332,7 +332,7 @@ QCryptoIVGen *qcrypto_block_get_ivgen(QCryptoBlock *block)
}
-QCryptoHashAlgorithm qcrypto_block_get_kdf_hash(QCryptoBlock *block)
+QCryptoHashAlgo qcrypto_block_get_kdf_hash(QCryptoBlock *block)
{
return block->kdfhash;
}
diff --git a/crypto/blockpriv.h b/crypto/blockpriv.h
index b8f77cb..edf0b3a 100644
--- a/crypto/blockpriv.h
+++ b/crypto/blockpriv.h
@@ -33,7 +33,7 @@ struct QCryptoBlock {
void *opaque;
/* Cipher parameters */
- QCryptoCipherAlgorithm alg;
+ QCryptoCipherAlgo alg;
QCryptoCipherMode mode;
uint8_t *key;
size_t nkey;
@@ -44,7 +44,7 @@ struct QCryptoBlock {
QCryptoIVGen *ivgen;
QemuMutex mutex;
- QCryptoHashAlgorithm kdfhash;
+ QCryptoHashAlgo kdfhash;
size_t niv;
uint64_t payload_offset; /* In bytes */
uint64_t sector_size; /* In bytes */
@@ -132,7 +132,7 @@ int qcrypto_block_encrypt_helper(QCryptoBlock *block,
Error **errp);
int qcrypto_block_init_cipher(QCryptoBlock *block,
- QCryptoCipherAlgorithm alg,
+ QCryptoCipherAlgo alg,
QCryptoCipherMode mode,
const uint8_t *key, size_t nkey,
Error **errp);
diff --git a/crypto/cipher-afalg.c b/crypto/cipher-afalg.c
index 3df8fc5..4980d41 100644
--- a/crypto/cipher-afalg.c
+++ b/crypto/cipher-afalg.c
@@ -18,7 +18,7 @@
static char *
-qcrypto_afalg_cipher_format_name(QCryptoCipherAlgorithm alg,
+qcrypto_afalg_cipher_format_name(QCryptoCipherAlgo alg,
QCryptoCipherMode mode,
Error **errp)
{
@@ -27,22 +27,22 @@ qcrypto_afalg_cipher_format_name(QCryptoCipherAlgorithm alg,
const char *mode_name;
switch (alg) {
- case QCRYPTO_CIPHER_ALG_AES_128:
- case QCRYPTO_CIPHER_ALG_AES_192:
- case QCRYPTO_CIPHER_ALG_AES_256:
+ case QCRYPTO_CIPHER_ALGO_AES_128:
+ case QCRYPTO_CIPHER_ALGO_AES_192:
+ case QCRYPTO_CIPHER_ALGO_AES_256:
alg_name = "aes";
break;
- case QCRYPTO_CIPHER_ALG_CAST5_128:
+ case QCRYPTO_CIPHER_ALGO_CAST5_128:
alg_name = "cast5";
break;
- case QCRYPTO_CIPHER_ALG_SERPENT_128:
- case QCRYPTO_CIPHER_ALG_SERPENT_192:
- case QCRYPTO_CIPHER_ALG_SERPENT_256:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_128:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_192:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_256:
alg_name = "serpent";
break;
- case QCRYPTO_CIPHER_ALG_TWOFISH_128:
- case QCRYPTO_CIPHER_ALG_TWOFISH_192:
- case QCRYPTO_CIPHER_ALG_TWOFISH_256:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_128:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_192:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_256:
alg_name = "twofish";
break;
@@ -60,12 +60,12 @@ qcrypto_afalg_cipher_format_name(QCryptoCipherAlgorithm alg,
static const struct QCryptoCipherDriver qcrypto_cipher_afalg_driver;
QCryptoCipher *
-qcrypto_afalg_cipher_ctx_new(QCryptoCipherAlgorithm alg,
+qcrypto_afalg_cipher_ctx_new(QCryptoCipherAlgo alg,
QCryptoCipherMode mode,
const uint8_t *key,
size_t nkey, Error **errp)
{
- QCryptoAFAlg *afalg;
+ QCryptoAFAlgo *afalg;
size_t expect_niv;
char *name;
@@ -119,7 +119,7 @@ qcrypto_afalg_cipher_setiv(QCryptoCipher *cipher,
const uint8_t *iv,
size_t niv, Error **errp)
{
- QCryptoAFAlg *afalg = container_of(cipher, QCryptoAFAlg, base);
+ QCryptoAFAlgo *afalg = container_of(cipher, QCryptoAFAlgo, base);
struct af_alg_iv *alg_iv;
size_t expect_niv;
@@ -143,7 +143,7 @@ qcrypto_afalg_cipher_setiv(QCryptoCipher *cipher,
}
static int
-qcrypto_afalg_cipher_op(QCryptoAFAlg *afalg,
+qcrypto_afalg_cipher_op(QCryptoAFAlgo *afalg,
const void *in, void *out,
size_t len, bool do_encrypt,
Error **errp)
@@ -202,7 +202,7 @@ qcrypto_afalg_cipher_encrypt(QCryptoCipher *cipher,
const void *in, void *out,
size_t len, Error **errp)
{
- QCryptoAFAlg *afalg = container_of(cipher, QCryptoAFAlg, base);
+ QCryptoAFAlgo *afalg = container_of(cipher, QCryptoAFAlgo, base);
return qcrypto_afalg_cipher_op(afalg, in, out, len, true, errp);
}
@@ -212,14 +212,14 @@ qcrypto_afalg_cipher_decrypt(QCryptoCipher *cipher,
const void *in, void *out,
size_t len, Error **errp)
{
- QCryptoAFAlg *afalg = container_of(cipher, QCryptoAFAlg, base);
+ QCryptoAFAlgo *afalg = container_of(cipher, QCryptoAFAlgo, base);
return qcrypto_afalg_cipher_op(afalg, in, out, len, false, errp);
}
static void qcrypto_afalg_comm_ctx_free(QCryptoCipher *cipher)
{
- QCryptoAFAlg *afalg = container_of(cipher, QCryptoAFAlg, base);
+ QCryptoAFAlgo *afalg = container_of(cipher, QCryptoAFAlgo, base);
qcrypto_afalg_comm_free(afalg);
}
diff --git a/crypto/cipher-builtin.c.inc b/crypto/cipher-builtin.c.inc
deleted file mode 100644
index b409089..0000000
--- a/crypto/cipher-builtin.c.inc
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * QEMU Crypto cipher built-in algorithms
- *
- * Copyright (c) 2015 Red Hat, Inc.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- *
- */
-
-#include "crypto/aes.h"
-
-typedef struct QCryptoCipherBuiltinAESContext QCryptoCipherBuiltinAESContext;
-struct QCryptoCipherBuiltinAESContext {
- AES_KEY enc;
- AES_KEY dec;
-};
-
-typedef struct QCryptoCipherBuiltinAES QCryptoCipherBuiltinAES;
-struct QCryptoCipherBuiltinAES {
- QCryptoCipher base;
- QCryptoCipherBuiltinAESContext key;
- uint8_t iv[AES_BLOCK_SIZE];
-};
-
-
-static inline bool qcrypto_length_check(size_t len, size_t blocksize,
- Error **errp)
-{
- if (unlikely(len & (blocksize - 1))) {
- error_setg(errp, "Length %zu must be a multiple of block size %zu",
- len, blocksize);
- return false;
- }
- return true;
-}
-
-static void qcrypto_cipher_ctx_free(QCryptoCipher *cipher)
-{
- g_free(cipher);
-}
-
-static int qcrypto_cipher_no_setiv(QCryptoCipher *cipher,
- const uint8_t *iv, size_t niv,
- Error **errp)
-{
- error_setg(errp, "Setting IV is not supported");
- return -1;
-}
-
-static void do_aes_encrypt_ecb(const void *vctx,
- size_t len,
- uint8_t *out,
- const uint8_t *in)
-{
- const QCryptoCipherBuiltinAESContext *ctx = vctx;
-
- /* We have already verified that len % AES_BLOCK_SIZE == 0. */
- while (len) {
- AES_encrypt(in, out, &ctx->enc);
- in += AES_BLOCK_SIZE;
- out += AES_BLOCK_SIZE;
- len -= AES_BLOCK_SIZE;
- }
-}
-
-static void do_aes_decrypt_ecb(const void *vctx,
- size_t len,
- uint8_t *out,
- const uint8_t *in)
-{
- const QCryptoCipherBuiltinAESContext *ctx = vctx;
-
- /* We have already verified that len % AES_BLOCK_SIZE == 0. */
- while (len) {
- AES_decrypt(in, out, &ctx->dec);
- in += AES_BLOCK_SIZE;
- out += AES_BLOCK_SIZE;
- len -= AES_BLOCK_SIZE;
- }
-}
-
-static void do_aes_encrypt_cbc(const AES_KEY *key,
- size_t len,
- uint8_t *out,
- const uint8_t *in,
- uint8_t *ivec)
-{
- uint8_t tmp[AES_BLOCK_SIZE];
- size_t n;
-
- /* We have already verified that len % AES_BLOCK_SIZE == 0. */
- while (len) {
- for (n = 0; n < AES_BLOCK_SIZE; ++n) {
- tmp[n] = in[n] ^ ivec[n];
- }
- AES_encrypt(tmp, out, key);
- memcpy(ivec, out, AES_BLOCK_SIZE);
- len -= AES_BLOCK_SIZE;
- in += AES_BLOCK_SIZE;
- out += AES_BLOCK_SIZE;
- }
-}
-
-static void do_aes_decrypt_cbc(const AES_KEY *key,
- size_t len,
- uint8_t *out,
- const uint8_t *in,
- uint8_t *ivec)
-{
- uint8_t tmp[AES_BLOCK_SIZE];
- size_t n;
-
- /* We have already verified that len % AES_BLOCK_SIZE == 0. */
- while (len) {
- memcpy(tmp, in, AES_BLOCK_SIZE);
- AES_decrypt(in, out, key);
- for (n = 0; n < AES_BLOCK_SIZE; ++n) {
- out[n] ^= ivec[n];
- }
- memcpy(ivec, tmp, AES_BLOCK_SIZE);
- len -= AES_BLOCK_SIZE;
- in += AES_BLOCK_SIZE;
- out += AES_BLOCK_SIZE;
- }
-}
-
-static int qcrypto_cipher_aes_encrypt_ecb(QCryptoCipher *cipher,
- const void *in, void *out,
- size_t len, Error **errp)
-{
- QCryptoCipherBuiltinAES *ctx
- = container_of(cipher, QCryptoCipherBuiltinAES, base);
-
- if (!qcrypto_length_check(len, AES_BLOCK_SIZE, errp)) {
- return -1;
- }
- do_aes_encrypt_ecb(&ctx->key, len, out, in);
- return 0;
-}
-
-static int qcrypto_cipher_aes_decrypt_ecb(QCryptoCipher *cipher,
- const void *in, void *out,
- size_t len, Error **errp)
-{
- QCryptoCipherBuiltinAES *ctx
- = container_of(cipher, QCryptoCipherBuiltinAES, base);
-
- if (!qcrypto_length_check(len, AES_BLOCK_SIZE, errp)) {
- return -1;
- }
- do_aes_decrypt_ecb(&ctx->key, len, out, in);
- return 0;
-}
-
-static int qcrypto_cipher_aes_encrypt_cbc(QCryptoCipher *cipher,
- const void *in, void *out,
- size_t len, Error **errp)
-{
- QCryptoCipherBuiltinAES *ctx
- = container_of(cipher, QCryptoCipherBuiltinAES, base);
-
- if (!qcrypto_length_check(len, AES_BLOCK_SIZE, errp)) {
- return -1;
- }
- do_aes_encrypt_cbc(&ctx->key.enc, len, out, in, ctx->iv);
- return 0;
-}
-
-static int qcrypto_cipher_aes_decrypt_cbc(QCryptoCipher *cipher,
- const void *in, void *out,
- size_t len, Error **errp)
-{
- QCryptoCipherBuiltinAES *ctx
- = container_of(cipher, QCryptoCipherBuiltinAES, base);
-
- if (!qcrypto_length_check(len, AES_BLOCK_SIZE, errp)) {
- return -1;
- }
- do_aes_decrypt_cbc(&ctx->key.dec, len, out, in, ctx->iv);
- return 0;
-}
-
-static int qcrypto_cipher_aes_setiv(QCryptoCipher *cipher, const uint8_t *iv,
- size_t niv, Error **errp)
-{
- QCryptoCipherBuiltinAES *ctx
- = container_of(cipher, QCryptoCipherBuiltinAES, base);
-
- if (niv != AES_BLOCK_SIZE) {
- error_setg(errp, "IV must be %d bytes not %zu",
- AES_BLOCK_SIZE, niv);
- return -1;
- }
-
- memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
- return 0;
-}
-
-static const struct QCryptoCipherDriver qcrypto_cipher_aes_driver_ecb = {
- .cipher_encrypt = qcrypto_cipher_aes_encrypt_ecb,
- .cipher_decrypt = qcrypto_cipher_aes_decrypt_ecb,
- .cipher_setiv = qcrypto_cipher_no_setiv,
- .cipher_free = qcrypto_cipher_ctx_free,
-};
-
-static const struct QCryptoCipherDriver qcrypto_cipher_aes_driver_cbc = {
- .cipher_encrypt = qcrypto_cipher_aes_encrypt_cbc,
- .cipher_decrypt = qcrypto_cipher_aes_decrypt_cbc,
- .cipher_setiv = qcrypto_cipher_aes_setiv,
- .cipher_free = qcrypto_cipher_ctx_free,
-};
-
-bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg,
- QCryptoCipherMode mode)
-{
- switch (alg) {
- case QCRYPTO_CIPHER_ALG_AES_128:
- case QCRYPTO_CIPHER_ALG_AES_192:
- case QCRYPTO_CIPHER_ALG_AES_256:
- switch (mode) {
- case QCRYPTO_CIPHER_MODE_ECB:
- case QCRYPTO_CIPHER_MODE_CBC:
- return true;
- default:
- return false;
- }
- break;
- default:
- return false;
- }
-}
-
-static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
- QCryptoCipherMode mode,
- const uint8_t *key,
- size_t nkey,
- Error **errp)
-{
- if (!qcrypto_cipher_validate_key_length(alg, mode, nkey, errp)) {
- return NULL;
- }
-
- switch (alg) {
- case QCRYPTO_CIPHER_ALG_AES_128:
- case QCRYPTO_CIPHER_ALG_AES_192:
- case QCRYPTO_CIPHER_ALG_AES_256:
- {
- QCryptoCipherBuiltinAES *ctx;
- const QCryptoCipherDriver *drv;
-
- switch (mode) {
- case QCRYPTO_CIPHER_MODE_ECB:
- drv = &qcrypto_cipher_aes_driver_ecb;
- break;
- case QCRYPTO_CIPHER_MODE_CBC:
- drv = &qcrypto_cipher_aes_driver_cbc;
- break;
- default:
- goto bad_mode;
- }
-
- ctx = g_new0(QCryptoCipherBuiltinAES, 1);
- ctx->base.driver = drv;
-
- if (AES_set_encrypt_key(key, nkey * 8, &ctx->key.enc)) {
- error_setg(errp, "Failed to set encryption key");
- goto error;
- }
- if (AES_set_decrypt_key(key, nkey * 8, &ctx->key.dec)) {
- error_setg(errp, "Failed to set decryption key");
- goto error;
- }
-
- return &ctx->base;
-
- error:
- g_free(ctx);
- return NULL;
- }
-
- default:
- error_setg(errp,
- "Unsupported cipher algorithm %s",
- QCryptoCipherAlgorithm_str(alg));
- return NULL;
- }
-
- bad_mode:
- error_setg(errp, "Unsupported cipher mode %s",
- QCryptoCipherMode_str(mode));
- return NULL;
-}
diff --git a/crypto/cipher-gcrypt.c.inc b/crypto/cipher-gcrypt.c.inc
index 4a83147..12eb9dd 100644
--- a/crypto/cipher-gcrypt.c.inc
+++ b/crypto/cipher-gcrypt.c.inc
@@ -20,33 +20,33 @@
#include <gcrypt.h>
-static int qcrypto_cipher_alg_to_gcry_alg(QCryptoCipherAlgorithm alg)
+static int qcrypto_cipher_alg_to_gcry_alg(QCryptoCipherAlgo alg)
{
switch (alg) {
- case QCRYPTO_CIPHER_ALG_DES:
+ case QCRYPTO_CIPHER_ALGO_DES:
return GCRY_CIPHER_DES;
- case QCRYPTO_CIPHER_ALG_3DES:
+ case QCRYPTO_CIPHER_ALGO_3DES:
return GCRY_CIPHER_3DES;
- case QCRYPTO_CIPHER_ALG_AES_128:
+ case QCRYPTO_CIPHER_ALGO_AES_128:
return GCRY_CIPHER_AES128;
- case QCRYPTO_CIPHER_ALG_AES_192:
+ case QCRYPTO_CIPHER_ALGO_AES_192:
return GCRY_CIPHER_AES192;
- case QCRYPTO_CIPHER_ALG_AES_256:
+ case QCRYPTO_CIPHER_ALGO_AES_256:
return GCRY_CIPHER_AES256;
- case QCRYPTO_CIPHER_ALG_CAST5_128:
+ case QCRYPTO_CIPHER_ALGO_CAST5_128:
return GCRY_CIPHER_CAST5;
- case QCRYPTO_CIPHER_ALG_SERPENT_128:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_128:
return GCRY_CIPHER_SERPENT128;
- case QCRYPTO_CIPHER_ALG_SERPENT_192:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_192:
return GCRY_CIPHER_SERPENT192;
- case QCRYPTO_CIPHER_ALG_SERPENT_256:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_256:
return GCRY_CIPHER_SERPENT256;
- case QCRYPTO_CIPHER_ALG_TWOFISH_128:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_128:
return GCRY_CIPHER_TWOFISH128;
- case QCRYPTO_CIPHER_ALG_TWOFISH_256:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_256:
return GCRY_CIPHER_TWOFISH;
#ifdef CONFIG_CRYPTO_SM4
- case QCRYPTO_CIPHER_ALG_SM4:
+ case QCRYPTO_CIPHER_ALGO_SM4:
return GCRY_CIPHER_SM4;
#endif
default:
@@ -70,23 +70,23 @@ static int qcrypto_cipher_mode_to_gcry_mode(QCryptoCipherMode mode)
}
}
-bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg,
+bool qcrypto_cipher_supports(QCryptoCipherAlgo alg,
QCryptoCipherMode mode)
{
switch (alg) {
- case QCRYPTO_CIPHER_ALG_DES:
- case QCRYPTO_CIPHER_ALG_3DES:
- case QCRYPTO_CIPHER_ALG_AES_128:
- case QCRYPTO_CIPHER_ALG_AES_192:
- case QCRYPTO_CIPHER_ALG_AES_256:
- case QCRYPTO_CIPHER_ALG_CAST5_128:
- case QCRYPTO_CIPHER_ALG_SERPENT_128:
- case QCRYPTO_CIPHER_ALG_SERPENT_192:
- case QCRYPTO_CIPHER_ALG_SERPENT_256:
- case QCRYPTO_CIPHER_ALG_TWOFISH_128:
- case QCRYPTO_CIPHER_ALG_TWOFISH_256:
+ case QCRYPTO_CIPHER_ALGO_DES:
+ case QCRYPTO_CIPHER_ALGO_3DES:
+ case QCRYPTO_CIPHER_ALGO_AES_128:
+ case QCRYPTO_CIPHER_ALGO_AES_192:
+ case QCRYPTO_CIPHER_ALGO_AES_256:
+ case QCRYPTO_CIPHER_ALGO_CAST5_128:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_128:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_192:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_256:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_128:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_256:
#ifdef CONFIG_CRYPTO_SM4
- case QCRYPTO_CIPHER_ALG_SM4:
+ case QCRYPTO_CIPHER_ALGO_SM4:
#endif
break;
default:
@@ -228,7 +228,7 @@ static const struct QCryptoCipherDriver qcrypto_gcrypt_ctr_driver = {
.cipher_free = qcrypto_gcrypt_ctx_free,
};
-static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
+static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgo alg,
QCryptoCipherMode mode,
const uint8_t *key,
size_t nkey,
@@ -246,7 +246,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
gcryalg = qcrypto_cipher_alg_to_gcry_alg(alg);
if (gcryalg == GCRY_CIPHER_NONE) {
error_setg(errp, "Unsupported cipher algorithm %s",
- QCryptoCipherAlgorithm_str(alg));
+ QCryptoCipherAlgo_str(alg));
return NULL;
}
diff --git a/crypto/cipher-gnutls.c.inc b/crypto/cipher-gnutls.c.inc
index d3e231c..b9450d4 100644
--- a/crypto/cipher-gnutls.c.inc
+++ b/crypto/cipher-gnutls.c.inc
@@ -27,7 +27,7 @@
#define QEMU_GNUTLS_XTS
#endif
-bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg,
+bool qcrypto_cipher_supports(QCryptoCipherAlgo alg,
QCryptoCipherMode mode)
{
@@ -35,11 +35,11 @@ bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg,
case QCRYPTO_CIPHER_MODE_ECB:
case QCRYPTO_CIPHER_MODE_CBC:
switch (alg) {
- case QCRYPTO_CIPHER_ALG_AES_128:
- case QCRYPTO_CIPHER_ALG_AES_192:
- case QCRYPTO_CIPHER_ALG_AES_256:
- case QCRYPTO_CIPHER_ALG_DES:
- case QCRYPTO_CIPHER_ALG_3DES:
+ case QCRYPTO_CIPHER_ALGO_AES_128:
+ case QCRYPTO_CIPHER_ALGO_AES_192:
+ case QCRYPTO_CIPHER_ALGO_AES_256:
+ case QCRYPTO_CIPHER_ALGO_DES:
+ case QCRYPTO_CIPHER_ALGO_3DES:
return true;
default:
return false;
@@ -47,8 +47,8 @@ bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg,
#ifdef QEMU_GNUTLS_XTS
case QCRYPTO_CIPHER_MODE_XTS:
switch (alg) {
- case QCRYPTO_CIPHER_ALG_AES_128:
- case QCRYPTO_CIPHER_ALG_AES_256:
+ case QCRYPTO_CIPHER_ALGO_AES_128:
+ case QCRYPTO_CIPHER_ALGO_AES_256:
return true;
default:
return false;
@@ -229,7 +229,7 @@ static struct QCryptoCipherDriver gnutls_driver = {
.cipher_free = qcrypto_gnutls_cipher_free,
};
-static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
+static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgo alg,
QCryptoCipherMode mode,
const uint8_t *key,
size_t nkey,
@@ -244,10 +244,10 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
#ifdef QEMU_GNUTLS_XTS
case QCRYPTO_CIPHER_MODE_XTS:
switch (alg) {
- case QCRYPTO_CIPHER_ALG_AES_128:
+ case QCRYPTO_CIPHER_ALGO_AES_128:
galg = GNUTLS_CIPHER_AES_128_XTS;
break;
- case QCRYPTO_CIPHER_ALG_AES_256:
+ case QCRYPTO_CIPHER_ALGO_AES_256:
galg = GNUTLS_CIPHER_AES_256_XTS;
break;
default:
@@ -259,19 +259,19 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
case QCRYPTO_CIPHER_MODE_ECB:
case QCRYPTO_CIPHER_MODE_CBC:
switch (alg) {
- case QCRYPTO_CIPHER_ALG_AES_128:
+ case QCRYPTO_CIPHER_ALGO_AES_128:
galg = GNUTLS_CIPHER_AES_128_CBC;
break;
- case QCRYPTO_CIPHER_ALG_AES_192:
+ case QCRYPTO_CIPHER_ALGO_AES_192:
galg = GNUTLS_CIPHER_AES_192_CBC;
break;
- case QCRYPTO_CIPHER_ALG_AES_256:
+ case QCRYPTO_CIPHER_ALGO_AES_256:
galg = GNUTLS_CIPHER_AES_256_CBC;
break;
- case QCRYPTO_CIPHER_ALG_DES:
+ case QCRYPTO_CIPHER_ALGO_DES:
galg = GNUTLS_CIPHER_DES_CBC;
break;
- case QCRYPTO_CIPHER_ALG_3DES:
+ case QCRYPTO_CIPHER_ALGO_3DES:
galg = GNUTLS_CIPHER_3DES_CBC;
break;
default:
@@ -284,7 +284,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
if (galg == GNUTLS_CIPHER_UNKNOWN) {
error_setg(errp, "Unsupported cipher algorithm %s with %s mode",
- QCryptoCipherAlgorithm_str(alg),
+ QCryptoCipherAlgo_str(alg),
QCryptoCipherMode_str(mode));
return NULL;
}
@@ -310,8 +310,8 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
}
}
- if (alg == QCRYPTO_CIPHER_ALG_DES ||
- alg == QCRYPTO_CIPHER_ALG_3DES)
+ if (alg == QCRYPTO_CIPHER_ALGO_DES ||
+ alg == QCRYPTO_CIPHER_ALGO_3DES)
ctx->blocksize = 8;
else
ctx->blocksize = 16;
diff --git a/crypto/cipher-nettle.c.inc b/crypto/cipher-nettle.c.inc
index 42b39e1..ae91363 100644
--- a/crypto/cipher-nettle.c.inc
+++ b/crypto/cipher-nettle.c.inc
@@ -454,24 +454,24 @@ DEFINE_ECB(qcrypto_nettle_sm4,
sm4_encrypt_native, sm4_decrypt_native)
#endif
-bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg,
+bool qcrypto_cipher_supports(QCryptoCipherAlgo alg,
QCryptoCipherMode mode)
{
switch (alg) {
- case QCRYPTO_CIPHER_ALG_DES:
- case QCRYPTO_CIPHER_ALG_3DES:
- case QCRYPTO_CIPHER_ALG_AES_128:
- case QCRYPTO_CIPHER_ALG_AES_192:
- case QCRYPTO_CIPHER_ALG_AES_256:
- case QCRYPTO_CIPHER_ALG_CAST5_128:
- case QCRYPTO_CIPHER_ALG_SERPENT_128:
- case QCRYPTO_CIPHER_ALG_SERPENT_192:
- case QCRYPTO_CIPHER_ALG_SERPENT_256:
- case QCRYPTO_CIPHER_ALG_TWOFISH_128:
- case QCRYPTO_CIPHER_ALG_TWOFISH_192:
- case QCRYPTO_CIPHER_ALG_TWOFISH_256:
+ case QCRYPTO_CIPHER_ALGO_DES:
+ case QCRYPTO_CIPHER_ALGO_3DES:
+ case QCRYPTO_CIPHER_ALGO_AES_128:
+ case QCRYPTO_CIPHER_ALGO_AES_192:
+ case QCRYPTO_CIPHER_ALGO_AES_256:
+ case QCRYPTO_CIPHER_ALGO_CAST5_128:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_128:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_192:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_256:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_128:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_192:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_256:
#ifdef CONFIG_CRYPTO_SM4
- case QCRYPTO_CIPHER_ALG_SM4:
+ case QCRYPTO_CIPHER_ALGO_SM4:
#endif
break;
default:
@@ -489,7 +489,7 @@ bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg,
}
}
-static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
+static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgo alg,
QCryptoCipherMode mode,
const uint8_t *key,
size_t nkey,
@@ -510,7 +510,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
}
switch (alg) {
- case QCRYPTO_CIPHER_ALG_DES:
+ case QCRYPTO_CIPHER_ALGO_DES:
{
QCryptoNettleDES *ctx;
const QCryptoCipherDriver *drv;
@@ -525,8 +525,10 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
case QCRYPTO_CIPHER_MODE_CTR:
drv = &qcrypto_nettle_des_driver_ctr;
break;
- default:
+ case QCRYPTO_CIPHER_MODE_XTS:
goto bad_cipher_mode;
+ default:
+ g_assert_not_reached();
}
ctx = g_new0(QCryptoNettleDES, 1);
@@ -536,7 +538,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
return &ctx->base;
}
- case QCRYPTO_CIPHER_ALG_3DES:
+ case QCRYPTO_CIPHER_ALGO_3DES:
{
QCryptoNettleDES3 *ctx;
const QCryptoCipherDriver *drv;
@@ -551,8 +553,10 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
case QCRYPTO_CIPHER_MODE_CTR:
drv = &qcrypto_nettle_des3_driver_ctr;
break;
- default:
+ case QCRYPTO_CIPHER_MODE_XTS:
goto bad_cipher_mode;
+ default:
+ g_assert_not_reached();
}
ctx = g_new0(QCryptoNettleDES3, 1);
@@ -561,7 +565,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
return &ctx->base;
}
- case QCRYPTO_CIPHER_ALG_AES_128:
+ case QCRYPTO_CIPHER_ALGO_AES_128:
{
QCryptoNettleAES128 *ctx = g_new0(QCryptoNettleAES128, 1);
@@ -590,7 +594,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
return &ctx->base;
}
- case QCRYPTO_CIPHER_ALG_AES_192:
+ case QCRYPTO_CIPHER_ALGO_AES_192:
{
QCryptoNettleAES192 *ctx = g_new0(QCryptoNettleAES192, 1);
@@ -619,7 +623,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
return &ctx->base;
}
- case QCRYPTO_CIPHER_ALG_AES_256:
+ case QCRYPTO_CIPHER_ALGO_AES_256:
{
QCryptoNettleAES256 *ctx = g_new0(QCryptoNettleAES256, 1);
@@ -648,7 +652,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
return &ctx->base;
}
- case QCRYPTO_CIPHER_ALG_CAST5_128:
+ case QCRYPTO_CIPHER_ALGO_CAST5_128:
{
QCryptoNettleCAST128 *ctx;
const QCryptoCipherDriver *drv;
@@ -663,8 +667,10 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
case QCRYPTO_CIPHER_MODE_CTR:
drv = &qcrypto_nettle_cast128_driver_ctr;
break;
- default:
+ case QCRYPTO_CIPHER_MODE_XTS:
goto bad_cipher_mode;
+ default:
+ g_assert_not_reached();
}
ctx = g_new0(QCryptoNettleCAST128, 1);
@@ -674,9 +680,9 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
return &ctx->base;
}
- case QCRYPTO_CIPHER_ALG_SERPENT_128:
- case QCRYPTO_CIPHER_ALG_SERPENT_192:
- case QCRYPTO_CIPHER_ALG_SERPENT_256:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_128:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_192:
+ case QCRYPTO_CIPHER_ALGO_SERPENT_256:
{
QCryptoNettleSerpent *ctx = g_new0(QCryptoNettleSerpent, 1);
@@ -703,9 +709,9 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
return &ctx->base;
}
- case QCRYPTO_CIPHER_ALG_TWOFISH_128:
- case QCRYPTO_CIPHER_ALG_TWOFISH_192:
- case QCRYPTO_CIPHER_ALG_TWOFISH_256:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_128:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_192:
+ case QCRYPTO_CIPHER_ALGO_TWOFISH_256:
{
QCryptoNettleTwofish *ctx = g_new0(QCryptoNettleTwofish, 1);
@@ -732,18 +738,25 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
return &ctx->base;
}
#ifdef CONFIG_CRYPTO_SM4
- case QCRYPTO_CIPHER_ALG_SM4:
+ case QCRYPTO_CIPHER_ALGO_SM4:
{
- QCryptoNettleSm4 *ctx = g_new0(QCryptoNettleSm4, 1);
+ QCryptoNettleSm4 *ctx;
+ const QCryptoCipherDriver *drv;
switch (mode) {
case QCRYPTO_CIPHER_MODE_ECB:
- ctx->base.driver = &qcrypto_nettle_sm4_driver_ecb;
+ drv = &qcrypto_nettle_sm4_driver_ecb;
break;
- default:
+ case QCRYPTO_CIPHER_MODE_CBC:
+ case QCRYPTO_CIPHER_MODE_CTR:
+ case QCRYPTO_CIPHER_MODE_XTS:
goto bad_cipher_mode;
+ default:
+ g_assert_not_reached();
}
+ ctx = g_new0(QCryptoNettleSm4, 1);
+ ctx->base.driver = drv;
sm4_set_encrypt_key(&ctx->key[0], key);
sm4_set_decrypt_key(&ctx->key[1], key);
@@ -753,7 +766,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg,
default:
error_setg(errp, "Unsupported cipher algorithm %s",
- QCryptoCipherAlgorithm_str(alg));
+ QCryptoCipherAlgo_str(alg));
return NULL;
}
diff --git a/crypto/cipher-stub.c.inc b/crypto/cipher-stub.c.inc
new file mode 100644
index 0000000..1b7ea81
--- /dev/null
+++ b/crypto/cipher-stub.c.inc
@@ -0,0 +1,30 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * QEMU Crypto cipher impl stub
+ *
+ * Copyright (c) 2025 Red Hat, Inc.
+ *
+ */
+
+bool qcrypto_cipher_supports(QCryptoCipherAlgo alg,
+ QCryptoCipherMode mode)
+{
+ return false;
+}
+
+static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgo alg,
+ QCryptoCipherMode mode,
+ const uint8_t *key,
+ size_t nkey,
+ Error **errp)
+{
+ if (!qcrypto_cipher_validate_key_length(alg, mode, nkey, errp)) {
+ return NULL;
+ }
+
+ error_setg(errp,
+ "Unsupported cipher algorithm %s, no crypto library enabled in build",
+ QCryptoCipherAlgo_str(alg));
+ return NULL;
+}
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 5f51276..229710f 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -25,39 +25,39 @@
#include "cipherpriv.h"
-static const size_t alg_key_len[QCRYPTO_CIPHER_ALG__MAX] = {
- [QCRYPTO_CIPHER_ALG_AES_128] = 16,
- [QCRYPTO_CIPHER_ALG_AES_192] = 24,
- [QCRYPTO_CIPHER_ALG_AES_256] = 32,
- [QCRYPTO_CIPHER_ALG_DES] = 8,
- [QCRYPTO_CIPHER_ALG_3DES] = 24,
- [QCRYPTO_CIPHER_ALG_CAST5_128] = 16,
- [QCRYPTO_CIPHER_ALG_SERPENT_128] = 16,
- [QCRYPTO_CIPHER_ALG_SERPENT_192] = 24,
- [QCRYPTO_CIPHER_ALG_SERPENT_256] = 32,
- [QCRYPTO_CIPHER_ALG_TWOFISH_128] = 16,
- [QCRYPTO_CIPHER_ALG_TWOFISH_192] = 24,
- [QCRYPTO_CIPHER_ALG_TWOFISH_256] = 32,
+static const size_t alg_key_len[QCRYPTO_CIPHER_ALGO__MAX] = {
+ [QCRYPTO_CIPHER_ALGO_AES_128] = 16,
+ [QCRYPTO_CIPHER_ALGO_AES_192] = 24,
+ [QCRYPTO_CIPHER_ALGO_AES_256] = 32,
+ [QCRYPTO_CIPHER_ALGO_DES] = 8,
+ [QCRYPTO_CIPHER_ALGO_3DES] = 24,
+ [QCRYPTO_CIPHER_ALGO_CAST5_128] = 16,
+ [QCRYPTO_CIPHER_ALGO_SERPENT_128] = 16,
+ [QCRYPTO_CIPHER_ALGO_SERPENT_192] = 24,
+ [QCRYPTO_CIPHER_ALGO_SERPENT_256] = 32,
+ [QCRYPTO_CIPHER_ALGO_TWOFISH_128] = 16,
+ [QCRYPTO_CIPHER_ALGO_TWOFISH_192] = 24,
+ [QCRYPTO_CIPHER_ALGO_TWOFISH_256] = 32,
#ifdef CONFIG_CRYPTO_SM4
- [QCRYPTO_CIPHER_ALG_SM4] = 16,
+ [QCRYPTO_CIPHER_ALGO_SM4] = 16,
#endif
};
-static const size_t alg_block_len[QCRYPTO_CIPHER_ALG__MAX] = {
- [QCRYPTO_CIPHER_ALG_AES_128] = 16,
- [QCRYPTO_CIPHER_ALG_AES_192] = 16,
- [QCRYPTO_CIPHER_ALG_AES_256] = 16,
- [QCRYPTO_CIPHER_ALG_DES] = 8,
- [QCRYPTO_CIPHER_ALG_3DES] = 8,
- [QCRYPTO_CIPHER_ALG_CAST5_128] = 8,
- [QCRYPTO_CIPHER_ALG_SERPENT_128] = 16,
- [QCRYPTO_CIPHER_ALG_SERPENT_192] = 16,
- [QCRYPTO_CIPHER_ALG_SERPENT_256] = 16,
- [QCRYPTO_CIPHER_ALG_TWOFISH_128] = 16,
- [QCRYPTO_CIPHER_ALG_TWOFISH_192] = 16,
- [QCRYPTO_CIPHER_ALG_TWOFISH_256] = 16,
+static const size_t alg_block_len[QCRYPTO_CIPHER_ALGO__MAX] = {
+ [QCRYPTO_CIPHER_ALGO_AES_128] = 16,
+ [QCRYPTO_CIPHER_ALGO_AES_192] = 16,
+ [QCRYPTO_CIPHER_ALGO_AES_256] = 16,
+ [QCRYPTO_CIPHER_ALGO_DES] = 8,
+ [QCRYPTO_CIPHER_ALGO_3DES] = 8,
+ [QCRYPTO_CIPHER_ALGO_CAST5_128] = 8,
+ [QCRYPTO_CIPHER_ALGO_SERPENT_128] = 16,
+ [QCRYPTO_CIPHER_ALGO_SERPENT_192] = 16,
+ [QCRYPTO_CIPHER_ALGO_SERPENT_256] = 16,
+ [QCRYPTO_CIPHER_ALGO_TWOFISH_128] = 16,
+ [QCRYPTO_CIPHER_ALGO_TWOFISH_192] = 16,
+ [QCRYPTO_CIPHER_ALGO_TWOFISH_256] = 16,
#ifdef CONFIG_CRYPTO_SM4
- [QCRYPTO_CIPHER_ALG_SM4] = 16,
+ [QCRYPTO_CIPHER_ALGO_SM4] = 16,
#endif
};
@@ -69,21 +69,21 @@ static const bool mode_need_iv[QCRYPTO_CIPHER_MODE__MAX] = {
};
-size_t qcrypto_cipher_get_block_len(QCryptoCipherAlgorithm alg)
+size_t qcrypto_cipher_get_block_len(QCryptoCipherAlgo alg)
{
assert(alg < G_N_ELEMENTS(alg_key_len));
return alg_block_len[alg];
}
-size_t qcrypto_cipher_get_key_len(QCryptoCipherAlgorithm alg)
+size_t qcrypto_cipher_get_key_len(QCryptoCipherAlgo alg)
{
assert(alg < G_N_ELEMENTS(alg_key_len));
return alg_key_len[alg];
}
-size_t qcrypto_cipher_get_iv_len(QCryptoCipherAlgorithm alg,
+size_t qcrypto_cipher_get_iv_len(QCryptoCipherAlgo alg,
QCryptoCipherMode mode)
{
if (alg >= G_N_ELEMENTS(alg_block_len)) {
@@ -101,20 +101,20 @@ size_t qcrypto_cipher_get_iv_len(QCryptoCipherAlgorithm alg,
static bool
-qcrypto_cipher_validate_key_length(QCryptoCipherAlgorithm alg,
+qcrypto_cipher_validate_key_length(QCryptoCipherAlgo alg,
QCryptoCipherMode mode,
size_t nkey,
Error **errp)
{
- if ((unsigned)alg >= QCRYPTO_CIPHER_ALG__MAX) {
+ if ((unsigned)alg >= QCRYPTO_CIPHER_ALGO__MAX) {
error_setg(errp, "Cipher algorithm %d out of range",
alg);
return false;
}
if (mode == QCRYPTO_CIPHER_MODE_XTS) {
- if (alg == QCRYPTO_CIPHER_ALG_DES ||
- alg == QCRYPTO_CIPHER_ALG_3DES) {
+ if (alg == QCRYPTO_CIPHER_ALGO_DES ||
+ alg == QCRYPTO_CIPHER_ALGO_3DES) {
error_setg(errp, "XTS mode not compatible with DES/3DES");
return false;
}
@@ -145,10 +145,10 @@ qcrypto_cipher_validate_key_length(QCryptoCipherAlgorithm alg,
#elif defined CONFIG_GNUTLS_CRYPTO
#include "cipher-gnutls.c.inc"
#else
-#include "cipher-builtin.c.inc"
+#include "cipher-stub.c.inc"
#endif
-QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgorithm alg,
+QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgo alg,
QCryptoCipherMode mode,
const uint8_t *key, size_t nkey,
Error **errp)
diff --git a/crypto/cipherpriv.h b/crypto/cipherpriv.h
index 3965278..64737ce 100644
--- a/crypto/cipherpriv.h
+++ b/crypto/cipherpriv.h
@@ -42,7 +42,7 @@ struct QCryptoCipherDriver {
#include "afalgpriv.h"
extern QCryptoCipher *
-qcrypto_afalg_cipher_ctx_new(QCryptoCipherAlgorithm alg,
+qcrypto_afalg_cipher_ctx_new(QCryptoCipherAlgo alg,
QCryptoCipherMode mode,
const uint8_t *key,
size_t nkey, Error **errp);
diff --git a/crypto/der.c b/crypto/der.c
index ebbecfc..8136752 100644
--- a/crypto/der.c
+++ b/crypto/der.c
@@ -408,19 +408,6 @@ void qcrypto_der_encode_octet_str(QCryptoEncodeContext *ctx,
qcrypto_der_encode_prim(ctx, tag, src, src_len);
}
-void qcrypto_der_encode_octet_str_begin(QCryptoEncodeContext *ctx)
-{
- uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
- QCRYPTO_DER_TAG_ENC_PRIM,
- QCRYPTO_DER_TYPE_TAG_OCT_STR);
- qcrypto_der_encode_cons_begin(ctx, tag);
-}
-
-void qcrypto_der_encode_octet_str_end(QCryptoEncodeContext *ctx)
-{
- qcrypto_der_encode_cons_end(ctx);
-}
-
size_t qcrypto_der_encode_ctx_buffer_len(QCryptoEncodeContext *ctx)
{
return ctx->root.dlen;
diff --git a/crypto/der.h b/crypto/der.h
index f4ba6da..bcfa4a2 100644
--- a/crypto/der.h
+++ b/crypto/der.h
@@ -243,28 +243,6 @@ void qcrypto_der_encode_octet_str(QCryptoEncodeContext *ctx,
const uint8_t *src, size_t src_len);
/**
- * qcrypto_der_encode_octet_str_begin:
- * @ctx: the encode context.
- *
- * Start encoding a octet string, All fields between
- * qcrypto_der_encode_octet_str_begin and qcrypto_der_encode_octet_str_end
- * are encoded as an octet string. This is useful when we need to encode a
- * encoded SEQUENCE as OCTET STRING.
- */
-void qcrypto_der_encode_octet_str_begin(QCryptoEncodeContext *ctx);
-
-/**
- * qcrypto_der_encode_octet_str_end:
- * @ctx: the encode context.
- *
- * Finish encoding a octet string, All fields between
- * qcrypto_der_encode_octet_str_begin and qcrypto_der_encode_octet_str_end
- * are encoded as an octet string. This is useful when we need to encode a
- * encoded SEQUENCE as OCTET STRING.
- */
-void qcrypto_der_encode_octet_str_end(QCryptoEncodeContext *ctx);
-
-/**
* qcrypto_der_encode_ctx_buffer_len:
* @ctx: the encode context.
*
diff --git a/crypto/hash-afalg.c b/crypto/hash-afalg.c
index 3ebea39..bd3fe3b 100644
--- a/crypto/hash-afalg.c
+++ b/crypto/hash-afalg.c
@@ -1,6 +1,7 @@
/*
* QEMU Crypto af_alg-backend hash/hmac support
*
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
* Copyright (c) 2017 HUAWEI TECHNOLOGIES CO., LTD.
*
* Authors:
@@ -20,7 +21,7 @@
#include "hmacpriv.h"
static char *
-qcrypto_afalg_hash_format_name(QCryptoHashAlgorithm alg,
+qcrypto_afalg_hash_format_name(QCryptoHashAlgo alg,
bool is_hmac,
Error **errp)
{
@@ -28,25 +29,25 @@ qcrypto_afalg_hash_format_name(QCryptoHashAlgorithm alg,
const char *alg_name;
switch (alg) {
- case QCRYPTO_HASH_ALG_MD5:
+ case QCRYPTO_HASH_ALGO_MD5:
alg_name = "md5";
break;
- case QCRYPTO_HASH_ALG_SHA1:
+ case QCRYPTO_HASH_ALGO_SHA1:
alg_name = "sha1";
break;
- case QCRYPTO_HASH_ALG_SHA224:
+ case QCRYPTO_HASH_ALGO_SHA224:
alg_name = "sha224";
break;
- case QCRYPTO_HASH_ALG_SHA256:
+ case QCRYPTO_HASH_ALGO_SHA256:
alg_name = "sha256";
break;
- case QCRYPTO_HASH_ALG_SHA384:
+ case QCRYPTO_HASH_ALGO_SHA384:
alg_name = "sha384";
break;
- case QCRYPTO_HASH_ALG_SHA512:
+ case QCRYPTO_HASH_ALGO_SHA512:
alg_name = "sha512";
break;
- case QCRYPTO_HASH_ALG_RIPEMD160:
+ case QCRYPTO_HASH_ALGO_RIPEMD160:
alg_name = "rmd160";
break;
@@ -58,18 +59,18 @@ qcrypto_afalg_hash_format_name(QCryptoHashAlgorithm alg,
if (is_hmac) {
name = g_strdup_printf("hmac(%s)", alg_name);
} else {
- name = g_strdup_printf("%s", alg_name);
+ name = g_strdup(alg_name);
}
return name;
}
-static QCryptoAFAlg *
-qcrypto_afalg_hash_hmac_ctx_new(QCryptoHashAlgorithm alg,
+static QCryptoAFAlgo *
+qcrypto_afalg_hash_hmac_ctx_new(QCryptoHashAlgo alg,
const uint8_t *key, size_t nkey,
bool is_hmac, Error **errp)
{
- QCryptoAFAlg *afalg;
+ QCryptoAFAlgo *afalg;
char *name;
name = qcrypto_afalg_hash_format_name(alg, is_hmac, errp);
@@ -98,89 +99,160 @@ qcrypto_afalg_hash_hmac_ctx_new(QCryptoHashAlgorithm alg,
return afalg;
}
-static QCryptoAFAlg *
-qcrypto_afalg_hash_ctx_new(QCryptoHashAlgorithm alg,
+static QCryptoAFAlgo *
+qcrypto_afalg_hash_ctx_new(QCryptoHashAlgo alg,
Error **errp)
{
return qcrypto_afalg_hash_hmac_ctx_new(alg, NULL, 0, false, errp);
}
-QCryptoAFAlg *
-qcrypto_afalg_hmac_ctx_new(QCryptoHashAlgorithm alg,
+QCryptoAFAlgo *
+qcrypto_afalg_hmac_ctx_new(QCryptoHashAlgo alg,
const uint8_t *key, size_t nkey,
Error **errp)
{
return qcrypto_afalg_hash_hmac_ctx_new(alg, key, nkey, true, errp);
}
-static int
-qcrypto_afalg_hash_hmac_bytesv(QCryptoAFAlg *hmac,
- QCryptoHashAlgorithm alg,
- const struct iovec *iov,
- size_t niov, uint8_t **result,
- size_t *resultlen,
- Error **errp)
+static
+QCryptoHash *qcrypto_afalg_hash_new(QCryptoHashAlgo alg, Error **errp)
{
- QCryptoAFAlg *afalg;
- struct iovec outv;
- int ret = 0;
- bool is_hmac = (hmac != NULL) ? true : false;
- const int expect_len = qcrypto_hash_digest_len(alg);
+ /* Check if hash algorithm is supported */
+ char *alg_name = qcrypto_afalg_hash_format_name(alg, false, NULL);
+ QCryptoHash *hash;
- if (*resultlen == 0) {
- *resultlen = expect_len;
- *result = g_new0(uint8_t, *resultlen);
- } else if (*resultlen != expect_len) {
- error_setg(errp,
- "Result buffer size %zu is not match hash %d",
- *resultlen, expect_len);
- return -1;
+ if (alg_name == NULL) {
+ error_setg(errp, "Unknown hash algorithm %d", alg);
+ return NULL;
}
- if (is_hmac) {
- afalg = hmac;
- } else {
- afalg = qcrypto_afalg_hash_ctx_new(alg, errp);
- if (!afalg) {
- return -1;
- }
+ g_free(alg_name);
+
+ hash = g_new(QCryptoHash, 1);
+ hash->alg = alg;
+ hash->opaque = qcrypto_afalg_hash_ctx_new(alg, errp);
+ if (!hash->opaque) {
+ free(hash);
+ return NULL;
}
+ return hash;
+}
+
+static
+void qcrypto_afalg_hash_free(QCryptoHash *hash)
+{
+ QCryptoAFAlgo *ctx = hash->opaque;
+
+ if (ctx) {
+ qcrypto_afalg_comm_free(ctx);
+ }
+
+ g_free(hash);
+}
+
+/**
+ * Send data to the kernel's crypto core.
+ *
+ * The more_data parameter is used to notify the crypto engine
+ * that this is an "update" operation, and that more data will
+ * be provided to calculate the final hash.
+ */
+static
+int qcrypto_afalg_send_to_kernel(QCryptoAFAlgo *afalg,
+ const struct iovec *iov,
+ size_t niov,
+ bool more_data,
+ Error **errp)
+{
+ int ret = 0;
+ int flags = (more_data ? MSG_MORE : 0);
+
/* send data to kernel's crypto core */
- ret = iov_send_recv(afalg->opfd, iov, niov,
- 0, iov_size(iov, niov), true);
+ ret = iov_send_recv_with_flags(afalg->opfd, flags, iov, niov,
+ 0, iov_size(iov, niov), true);
if (ret < 0) {
error_setg_errno(errp, errno, "Send data to afalg-core failed");
- goto out;
+ ret = -1;
+ } else {
+ /* No error, so return 0 */
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static
+int qcrypto_afalg_recv_from_kernel(QCryptoAFAlgo *afalg,
+ QCryptoHashAlgo alg,
+ uint8_t **result,
+ size_t *result_len,
+ Error **errp)
+{
+ struct iovec outv;
+ int ret;
+ const int expected_len = qcrypto_hash_digest_len(alg);
+
+ if (*result_len == 0) {
+ *result_len = expected_len;
+ *result = g_new0(uint8_t, *result_len);
+ } else if (*result_len != expected_len) {
+ error_setg(errp,
+ "Result buffer size %zu is not match hash %d",
+ *result_len, expected_len);
+ return -1;
}
/* hash && get result */
outv.iov_base = *result;
- outv.iov_len = *resultlen;
+ outv.iov_len = *result_len;
ret = iov_send_recv(afalg->opfd, &outv, 1,
0, iov_size(&outv, 1), false);
if (ret < 0) {
error_setg_errno(errp, errno, "Recv result from afalg-core failed");
- } else {
- ret = 0;
+ return -1;
}
-out:
- if (!is_hmac) {
- qcrypto_afalg_comm_free(afalg);
- }
- return ret;
+ return 0;
+}
+
+static
+int qcrypto_afalg_hash_update(QCryptoHash *hash,
+ const struct iovec *iov,
+ size_t niov,
+ Error **errp)
+{
+ return qcrypto_afalg_send_to_kernel((QCryptoAFAlgo *) hash->opaque,
+ iov, niov, true, errp);
+}
+
+static
+int qcrypto_afalg_hash_finalize(QCryptoHash *hash,
+ uint8_t **result,
+ size_t *result_len,
+ Error **errp)
+{
+ return qcrypto_afalg_recv_from_kernel((QCryptoAFAlgo *) hash->opaque,
+ hash->alg, result, result_len, errp);
}
static int
-qcrypto_afalg_hash_bytesv(QCryptoHashAlgorithm alg,
- const struct iovec *iov,
- size_t niov, uint8_t **result,
- size_t *resultlen,
- Error **errp)
+qcrypto_afalg_hash_hmac_bytesv(QCryptoAFAlgo *hmac,
+ QCryptoHashAlgo alg,
+ const struct iovec *iov,
+ size_t niov, uint8_t **result,
+ size_t *resultlen,
+ Error **errp)
{
- return qcrypto_afalg_hash_hmac_bytesv(NULL, alg, iov, niov, result,
- resultlen, errp);
+ int ret = 0;
+
+ ret = qcrypto_afalg_send_to_kernel(hmac, iov, niov, false, errp);
+ if (ret == 0) {
+ ret = qcrypto_afalg_recv_from_kernel(hmac, alg, result,
+ resultlen, errp);
+ }
+
+ return ret;
}
static int
@@ -197,14 +269,17 @@ qcrypto_afalg_hmac_bytesv(QCryptoHmac *hmac,
static void qcrypto_afalg_hmac_ctx_free(QCryptoHmac *hmac)
{
- QCryptoAFAlg *afalg;
+ QCryptoAFAlgo *afalg;
afalg = hmac->opaque;
qcrypto_afalg_comm_free(afalg);
}
QCryptoHashDriver qcrypto_hash_afalg_driver = {
- .hash_bytesv = qcrypto_afalg_hash_bytesv,
+ .hash_new = qcrypto_afalg_hash_new,
+ .hash_free = qcrypto_afalg_hash_free,
+ .hash_update = qcrypto_afalg_hash_update,
+ .hash_finalize = qcrypto_afalg_hash_finalize
};
QCryptoHmacDriver qcrypto_hmac_afalg_driver = {
diff --git a/crypto/hash-gcrypt.c b/crypto/hash-gcrypt.c
index 829e482..af61c4e 100644
--- a/crypto/hash-gcrypt.c
+++ b/crypto/hash-gcrypt.c
@@ -1,6 +1,7 @@
/*
* QEMU Crypto hash algorithms
*
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
* Copyright (c) 2016 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
@@ -25,92 +26,115 @@
#include "hashpriv.h"
-static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALG__MAX] = {
- [QCRYPTO_HASH_ALG_MD5] = GCRY_MD_MD5,
- [QCRYPTO_HASH_ALG_SHA1] = GCRY_MD_SHA1,
- [QCRYPTO_HASH_ALG_SHA224] = GCRY_MD_SHA224,
- [QCRYPTO_HASH_ALG_SHA256] = GCRY_MD_SHA256,
- [QCRYPTO_HASH_ALG_SHA384] = GCRY_MD_SHA384,
- [QCRYPTO_HASH_ALG_SHA512] = GCRY_MD_SHA512,
- [QCRYPTO_HASH_ALG_RIPEMD160] = GCRY_MD_RMD160,
+static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALGO__MAX] = {
+ [QCRYPTO_HASH_ALGO_MD5] = GCRY_MD_MD5,
+ [QCRYPTO_HASH_ALGO_SHA1] = GCRY_MD_SHA1,
+ [QCRYPTO_HASH_ALGO_SHA224] = GCRY_MD_SHA224,
+ [QCRYPTO_HASH_ALGO_SHA256] = GCRY_MD_SHA256,
+ [QCRYPTO_HASH_ALGO_SHA384] = GCRY_MD_SHA384,
+ [QCRYPTO_HASH_ALGO_SHA512] = GCRY_MD_SHA512,
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = GCRY_MD_RMD160,
+#ifdef CONFIG_CRYPTO_SM3
+ [QCRYPTO_HASH_ALGO_SM3] = GCRY_MD_SM3,
+#endif
};
-gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg)
+gboolean qcrypto_hash_supports(QCryptoHashAlgo alg)
{
if (alg < G_N_ELEMENTS(qcrypto_hash_alg_map) &&
qcrypto_hash_alg_map[alg] != GCRY_MD_NONE) {
- return true;
+ return gcry_md_test_algo(qcrypto_hash_alg_map[alg]) == 0;
}
return false;
}
-
-static int
-qcrypto_gcrypt_hash_bytesv(QCryptoHashAlgorithm alg,
- const struct iovec *iov,
- size_t niov,
- uint8_t **result,
- size_t *resultlen,
- Error **errp)
+static
+QCryptoHash *qcrypto_gcrypt_hash_new(QCryptoHashAlgo alg, Error **errp)
{
- int i, ret;
- gcry_md_hd_t md;
- unsigned char *digest;
-
- if (!qcrypto_hash_supports(alg)) {
- error_setg(errp,
- "Unknown hash algorithm %d",
- alg);
- return -1;
- }
+ QCryptoHash *hash;
+ gcry_error_t ret;
- ret = gcry_md_open(&md, qcrypto_hash_alg_map[alg], 0);
+ hash = g_new(QCryptoHash, 1);
+ hash->alg = alg;
+ hash->opaque = g_new(gcry_md_hd_t, 1);
- if (ret < 0) {
+ ret = gcry_md_open((gcry_md_hd_t *) hash->opaque,
+ qcrypto_hash_alg_map[alg], 0);
+ if (ret != 0) {
error_setg(errp,
"Unable to initialize hash algorithm: %s",
gcry_strerror(ret));
- return -1;
+ g_free(hash->opaque);
+ g_free(hash);
+ return NULL;
}
+ return hash;
+}
- for (i = 0; i < niov; i++) {
- gcry_md_write(md, iov[i].iov_base, iov[i].iov_len);
+static
+void qcrypto_gcrypt_hash_free(QCryptoHash *hash)
+{
+ gcry_md_hd_t *ctx = hash->opaque;
+
+ if (ctx) {
+ gcry_md_close(*ctx);
+ g_free(ctx);
}
- ret = gcry_md_get_algo_dlen(qcrypto_hash_alg_map[alg]);
- if (ret <= 0) {
- error_setg(errp,
- "Unable to get hash length: %s",
- gcry_strerror(ret));
- goto error;
+ g_free(hash);
+}
+
+
+static
+int qcrypto_gcrypt_hash_update(QCryptoHash *hash,
+ const struct iovec *iov,
+ size_t niov,
+ Error **errp)
+{
+ gcry_md_hd_t *ctx = hash->opaque;
+
+ for (int i = 0; i < niov; i++) {
+ gcry_md_write(*ctx, iov[i].iov_base, iov[i].iov_len);
}
- if (*resultlen == 0) {
- *resultlen = ret;
- *result = g_new0(uint8_t, *resultlen);
- } else if (*resultlen != ret) {
- error_setg(errp,
- "Result buffer size %zu is smaller than hash %d",
- *resultlen, ret);
- goto error;
+
+ return 0;
+}
+
+static
+int qcrypto_gcrypt_hash_finalize(QCryptoHash *hash,
+ uint8_t **result,
+ size_t *result_len,
+ Error **errp)
+{
+ int ret;
+ unsigned char *digest;
+ gcry_md_hd_t *ctx = hash->opaque;
+
+ ret = gcry_md_get_algo_dlen(qcrypto_hash_alg_map[hash->alg]);
+ if (ret == 0) {
+ error_setg(errp, "Unable to get hash length");
+ return -1;
}
- digest = gcry_md_read(md, 0);
- if (!digest) {
+ if (*result_len == 0) {
+ *result_len = ret;
+ *result = g_new(uint8_t, *result_len);
+ } else if (*result_len != ret) {
error_setg(errp,
- "No digest produced");
- goto error;
+ "Result buffer size %zu is smaller than hash %d",
+ *result_len, ret);
+ return -1;
}
- memcpy(*result, digest, *resultlen);
- gcry_md_close(md);
+ /* Digest is freed by gcry_md_close(), copy it */
+ digest = gcry_md_read(*ctx, 0);
+ memcpy(*result, digest, *result_len);
return 0;
-
- error:
- gcry_md_close(md);
- return -1;
}
-
QCryptoHashDriver qcrypto_hash_lib_driver = {
- .hash_bytesv = qcrypto_gcrypt_hash_bytesv,
+ .hash_new = qcrypto_gcrypt_hash_new,
+ .hash_update = qcrypto_gcrypt_hash_update,
+ .hash_finalize = qcrypto_gcrypt_hash_finalize,
+ .hash_free = qcrypto_gcrypt_hash_free,
};
diff --git a/crypto/hash-glib.c b/crypto/hash-glib.c
index 82de9db..809cef9 100644
--- a/crypto/hash-glib.c
+++ b/crypto/hash-glib.c
@@ -1,6 +1,7 @@
/*
* QEMU Crypto hash algorithms
*
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
* Copyright (c) 2016 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
@@ -24,17 +25,17 @@
#include "hashpriv.h"
-static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALG__MAX] = {
- [QCRYPTO_HASH_ALG_MD5] = G_CHECKSUM_MD5,
- [QCRYPTO_HASH_ALG_SHA1] = G_CHECKSUM_SHA1,
- [QCRYPTO_HASH_ALG_SHA224] = -1,
- [QCRYPTO_HASH_ALG_SHA256] = G_CHECKSUM_SHA256,
- [QCRYPTO_HASH_ALG_SHA384] = -1,
- [QCRYPTO_HASH_ALG_SHA512] = G_CHECKSUM_SHA512,
- [QCRYPTO_HASH_ALG_RIPEMD160] = -1,
+static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALGO__MAX] = {
+ [QCRYPTO_HASH_ALGO_MD5] = G_CHECKSUM_MD5,
+ [QCRYPTO_HASH_ALGO_SHA1] = G_CHECKSUM_SHA1,
+ [QCRYPTO_HASH_ALGO_SHA224] = -1,
+ [QCRYPTO_HASH_ALGO_SHA256] = G_CHECKSUM_SHA256,
+ [QCRYPTO_HASH_ALGO_SHA384] = G_CHECKSUM_SHA384,
+ [QCRYPTO_HASH_ALGO_SHA512] = G_CHECKSUM_SHA512,
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = -1,
};
-gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg)
+gboolean qcrypto_hash_supports(QCryptoHashAlgo alg)
{
if (alg < G_N_ELEMENTS(qcrypto_hash_alg_map) &&
qcrypto_hash_alg_map[alg] != -1) {
@@ -43,58 +44,78 @@ gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg)
return false;
}
-
-static int
-qcrypto_glib_hash_bytesv(QCryptoHashAlgorithm alg,
- const struct iovec *iov,
- size_t niov,
- uint8_t **result,
- size_t *resultlen,
- Error **errp)
+static
+QCryptoHash *qcrypto_glib_hash_new(QCryptoHashAlgo alg,
+ Error **errp)
{
- int i, ret;
- GChecksum *cs;
+ QCryptoHash *hash;
- if (!qcrypto_hash_supports(alg)) {
- error_setg(errp,
- "Unknown hash algorithm %d",
- alg);
- return -1;
+ hash = g_new(QCryptoHash, 1);
+ hash->alg = alg;
+ hash->opaque = g_checksum_new(qcrypto_hash_alg_map[alg]);
+
+ return hash;
+}
+
+static
+void qcrypto_glib_hash_free(QCryptoHash *hash)
+{
+ if (hash->opaque) {
+ g_checksum_free(hash->opaque);
}
- cs = g_checksum_new(qcrypto_hash_alg_map[alg]);
+ g_free(hash);
+}
+
+
+static
+int qcrypto_glib_hash_update(QCryptoHash *hash,
+ const struct iovec *iov,
+ size_t niov,
+ Error **errp)
+{
+ GChecksum *ctx = hash->opaque;
- for (i = 0; i < niov; i++) {
- g_checksum_update(cs, iov[i].iov_base, iov[i].iov_len);
+ for (int i = 0; i < niov; i++) {
+ g_checksum_update(ctx, iov[i].iov_base, iov[i].iov_len);
}
- ret = g_checksum_type_get_length(qcrypto_hash_alg_map[alg]);
+ return 0;
+}
+
+static
+int qcrypto_glib_hash_finalize(QCryptoHash *hash,
+ uint8_t **result,
+ size_t *result_len,
+ Error **errp)
+{
+ int ret;
+ GChecksum *ctx = hash->opaque;
+
+ ret = g_checksum_type_get_length(qcrypto_hash_alg_map[hash->alg]);
if (ret < 0) {
- error_setg(errp, "%s",
- "Unable to get hash length");
- goto error;
+ error_setg(errp, "Unable to get hash length");
+ *result_len = 0;
+ return -1;
}
- if (*resultlen == 0) {
- *resultlen = ret;
- *result = g_new0(uint8_t, *resultlen);
- } else if (*resultlen != ret) {
+
+ if (*result_len == 0) {
+ *result_len = ret;
+ *result = g_new(uint8_t, *result_len);
+ } else if (*result_len != ret) {
error_setg(errp,
"Result buffer size %zu is smaller than hash %d",
- *resultlen, ret);
- goto error;
+ *result_len, ret);
+ return -1;
}
- g_checksum_get_digest(cs, *result, resultlen);
-
- g_checksum_free(cs);
+ g_checksum_get_digest(ctx, *result, result_len);
return 0;
-
- error:
- g_checksum_free(cs);
- return -1;
}
-
QCryptoHashDriver qcrypto_hash_lib_driver = {
- .hash_bytesv = qcrypto_glib_hash_bytesv,
+ .hash_new = qcrypto_glib_hash_new,
+ .hash_update = qcrypto_glib_hash_update,
+ .hash_finalize = qcrypto_glib_hash_finalize,
+ .hash_free = qcrypto_glib_hash_free,
};
diff --git a/crypto/hash-gnutls.c b/crypto/hash-gnutls.c
index 17911ac..99fbe82 100644
--- a/crypto/hash-gnutls.c
+++ b/crypto/hash-gnutls.c
@@ -1,6 +1,7 @@
/*
* QEMU Crypto hash algorithms
*
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
* Copyright (c) 2021 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
@@ -25,17 +26,17 @@
#include "hashpriv.h"
-static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALG__MAX] = {
- [QCRYPTO_HASH_ALG_MD5] = GNUTLS_DIG_MD5,
- [QCRYPTO_HASH_ALG_SHA1] = GNUTLS_DIG_SHA1,
- [QCRYPTO_HASH_ALG_SHA224] = GNUTLS_DIG_SHA224,
- [QCRYPTO_HASH_ALG_SHA256] = GNUTLS_DIG_SHA256,
- [QCRYPTO_HASH_ALG_SHA384] = GNUTLS_DIG_SHA384,
- [QCRYPTO_HASH_ALG_SHA512] = GNUTLS_DIG_SHA512,
- [QCRYPTO_HASH_ALG_RIPEMD160] = GNUTLS_DIG_RMD160,
+static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALGO__MAX] = {
+ [QCRYPTO_HASH_ALGO_MD5] = GNUTLS_DIG_MD5,
+ [QCRYPTO_HASH_ALGO_SHA1] = GNUTLS_DIG_SHA1,
+ [QCRYPTO_HASH_ALGO_SHA224] = GNUTLS_DIG_SHA224,
+ [QCRYPTO_HASH_ALGO_SHA256] = GNUTLS_DIG_SHA256,
+ [QCRYPTO_HASH_ALGO_SHA384] = GNUTLS_DIG_SHA384,
+ [QCRYPTO_HASH_ALGO_SHA512] = GNUTLS_DIG_SHA512,
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = GNUTLS_DIG_RMD160,
};
-gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg)
+gboolean qcrypto_hash_supports(QCryptoHashAlgo alg)
{
size_t i;
const gnutls_digest_algorithm_t *algs;
@@ -52,53 +53,93 @@ gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg)
return false;
}
-
-static int
-qcrypto_gnutls_hash_bytesv(QCryptoHashAlgorithm alg,
- const struct iovec *iov,
- size_t niov,
- uint8_t **result,
- size_t *resultlen,
- Error **errp)
+static
+QCryptoHash *qcrypto_gnutls_hash_new(QCryptoHashAlgo alg, Error **errp)
{
- int i, ret;
- gnutls_hash_hd_t hash;
-
- if (!qcrypto_hash_supports(alg)) {
- error_setg(errp,
- "Unknown hash algorithm %d",
- alg);
- return -1;
- }
+ QCryptoHash *hash;
+ int ret;
- ret = gnutls_hash_get_len(qcrypto_hash_alg_map[alg]);
- if (*resultlen == 0) {
- *resultlen = ret;
- *result = g_new0(uint8_t, *resultlen);
- } else if (*resultlen != ret) {
- error_setg(errp,
- "Result buffer size %zu is smaller than hash %d",
- *resultlen, ret);
- return -1;
- }
+ hash = g_new(QCryptoHash, 1);
+ hash->alg = alg;
+ hash->opaque = g_new(gnutls_hash_hd_t, 1);
- ret = gnutls_hash_init(&hash, qcrypto_hash_alg_map[alg]);
+ ret = gnutls_hash_init(hash->opaque, qcrypto_hash_alg_map[alg]);
if (ret < 0) {
error_setg(errp,
"Unable to initialize hash algorithm: %s",
gnutls_strerror(ret));
- return -1;
+ g_free(hash->opaque);
+ g_free(hash);
+ return NULL;
}
- for (i = 0; i < niov; i++) {
- gnutls_hash(hash, iov[i].iov_base, iov[i].iov_len);
+ return hash;
+}
+
+static
+void qcrypto_gnutls_hash_free(QCryptoHash *hash)
+{
+ gnutls_hash_hd_t *ctx = hash->opaque;
+
+ gnutls_hash_deinit(*ctx, NULL);
+ g_free(ctx);
+ g_free(hash);
+}
+
+
+static
+int qcrypto_gnutls_hash_update(QCryptoHash *hash,
+ const struct iovec *iov,
+ size_t niov,
+ Error **errp)
+{
+ int ret = 0;
+ gnutls_hash_hd_t *ctx = hash->opaque;
+
+ for (int i = 0; i < niov; i++) {
+ ret = gnutls_hash(*ctx, iov[i].iov_base, iov[i].iov_len);
+ if (ret != 0) {
+ error_setg(errp, "Failed to hash data: %s",
+ gnutls_strerror(ret));
+ return -1;
+ }
}
- gnutls_hash_deinit(hash, *result);
return 0;
}
+static
+int qcrypto_gnutls_hash_finalize(QCryptoHash *hash,
+ uint8_t **result,
+ size_t *result_len,
+ Error **errp)
+{
+ gnutls_hash_hd_t *ctx = hash->opaque;
+ int ret;
+
+ ret = gnutls_hash_get_len(qcrypto_hash_alg_map[hash->alg]);
+ if (ret == 0) {
+ error_setg(errp, "Unable to get hash length");
+ return -1;
+ }
+
+ if (*result_len == 0) {
+ *result_len = ret;
+ *result = g_new(uint8_t, *result_len);
+ } else if (*result_len != ret) {
+ error_setg(errp,
+ "Result buffer size %zu is smaller than hash %d",
+ *result_len, ret);
+ return -1;
+ }
+
+ gnutls_hash_output(*ctx, *result);
+ return 0;
+}
QCryptoHashDriver qcrypto_hash_lib_driver = {
- .hash_bytesv = qcrypto_gnutls_hash_bytesv,
+ .hash_new = qcrypto_gnutls_hash_new,
+ .hash_update = qcrypto_gnutls_hash_update,
+ .hash_finalize = qcrypto_gnutls_hash_finalize,
+ .hash_free = qcrypto_gnutls_hash_free,
};
diff --git a/crypto/hash-nettle.c b/crypto/hash-nettle.c
index 1ca1a41..53f6830 100644
--- a/crypto/hash-nettle.c
+++ b/crypto/hash-nettle.c
@@ -1,6 +1,7 @@
/*
* QEMU Crypto hash algorithms
*
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
* Copyright (c) 2016 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
@@ -25,6 +26,9 @@
#include <nettle/md5.h>
#include <nettle/sha.h>
#include <nettle/ripemd160.h>
+#ifdef CONFIG_CRYPTO_SM3
+#include <nettle/sm3.h>
+#endif
typedef void (*qcrypto_nettle_init)(void *ctx);
typedef void (*qcrypto_nettle_write)(void *ctx,
@@ -42,6 +46,9 @@ union qcrypto_hash_ctx {
struct sha384_ctx sha384;
struct sha512_ctx sha512;
struct ripemd160_ctx ripemd160;
+#ifdef CONFIG_CRYPTO_SM3
+ struct sm3_ctx sm3;
+#endif
};
struct qcrypto_hash_alg {
@@ -50,51 +57,59 @@ struct qcrypto_hash_alg {
qcrypto_nettle_result result;
size_t len;
} qcrypto_hash_alg_map[] = {
- [QCRYPTO_HASH_ALG_MD5] = {
+ [QCRYPTO_HASH_ALGO_MD5] = {
.init = (qcrypto_nettle_init)md5_init,
.write = (qcrypto_nettle_write)md5_update,
.result = (qcrypto_nettle_result)md5_digest,
.len = MD5_DIGEST_SIZE,
},
- [QCRYPTO_HASH_ALG_SHA1] = {
+ [QCRYPTO_HASH_ALGO_SHA1] = {
.init = (qcrypto_nettle_init)sha1_init,
.write = (qcrypto_nettle_write)sha1_update,
.result = (qcrypto_nettle_result)sha1_digest,
.len = SHA1_DIGEST_SIZE,
},
- [QCRYPTO_HASH_ALG_SHA224] = {
+ [QCRYPTO_HASH_ALGO_SHA224] = {
.init = (qcrypto_nettle_init)sha224_init,
.write = (qcrypto_nettle_write)sha224_update,
.result = (qcrypto_nettle_result)sha224_digest,
.len = SHA224_DIGEST_SIZE,
},
- [QCRYPTO_HASH_ALG_SHA256] = {
+ [QCRYPTO_HASH_ALGO_SHA256] = {
.init = (qcrypto_nettle_init)sha256_init,
.write = (qcrypto_nettle_write)sha256_update,
.result = (qcrypto_nettle_result)sha256_digest,
.len = SHA256_DIGEST_SIZE,
},
- [QCRYPTO_HASH_ALG_SHA384] = {
+ [QCRYPTO_HASH_ALGO_SHA384] = {
.init = (qcrypto_nettle_init)sha384_init,
.write = (qcrypto_nettle_write)sha384_update,
.result = (qcrypto_nettle_result)sha384_digest,
.len = SHA384_DIGEST_SIZE,
},
- [QCRYPTO_HASH_ALG_SHA512] = {
+ [QCRYPTO_HASH_ALGO_SHA512] = {
.init = (qcrypto_nettle_init)sha512_init,
.write = (qcrypto_nettle_write)sha512_update,
.result = (qcrypto_nettle_result)sha512_digest,
.len = SHA512_DIGEST_SIZE,
},
- [QCRYPTO_HASH_ALG_RIPEMD160] = {
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = {
.init = (qcrypto_nettle_init)ripemd160_init,
.write = (qcrypto_nettle_write)ripemd160_update,
.result = (qcrypto_nettle_result)ripemd160_digest,
.len = RIPEMD160_DIGEST_SIZE,
},
+#ifdef CONFIG_CRYPTO_SM3
+ [QCRYPTO_HASH_ALGO_SM3] = {
+ .init = (qcrypto_nettle_init)sm3_init,
+ .write = (qcrypto_nettle_write)sm3_update,
+ .result = (qcrypto_nettle_result)sm3_digest,
+ .len = SM3_DIGEST_SIZE,
+ },
+#endif
};
-gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg)
+gboolean qcrypto_hash_supports(QCryptoHashAlgo alg)
{
if (alg < G_N_ELEMENTS(qcrypto_hash_alg_map) &&
qcrypto_hash_alg_map[alg].init != NULL) {
@@ -103,59 +118,72 @@ gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg)
return false;
}
+static
+QCryptoHash *qcrypto_nettle_hash_new(QCryptoHashAlgo alg, Error **errp)
+{
+ QCryptoHash *hash;
+
+ hash = g_new(QCryptoHash, 1);
+ hash->alg = alg;
+ hash->opaque = g_new(union qcrypto_hash_ctx, 1);
+
+ qcrypto_hash_alg_map[alg].init(hash->opaque);
+ return hash;
+}
-static int
-qcrypto_nettle_hash_bytesv(QCryptoHashAlgorithm alg,
- const struct iovec *iov,
- size_t niov,
- uint8_t **result,
- size_t *resultlen,
- Error **errp)
+static
+void qcrypto_nettle_hash_free(QCryptoHash *hash)
{
- size_t i;
- union qcrypto_hash_ctx ctx;
+ union qcrypto_hash_ctx *ctx = hash->opaque;
- if (!qcrypto_hash_supports(alg)) {
- error_setg(errp,
- "Unknown hash algorithm %d",
- alg);
- return -1;
- }
+ g_free(ctx);
+ g_free(hash);
+}
- qcrypto_hash_alg_map[alg].init(&ctx);
-
- for (i = 0; i < niov; i++) {
- /* Some versions of nettle have functions
- * declared with 'int' instead of 'size_t'
- * so to be safe avoid writing more than
- * UINT_MAX bytes at a time
- */
- size_t len = iov[i].iov_len;
- uint8_t *base = iov[i].iov_base;
- while (len) {
- size_t shortlen = MIN(len, UINT_MAX);
- qcrypto_hash_alg_map[alg].write(&ctx, len, base);
- len -= shortlen;
- base += len;
- }
+static
+int qcrypto_nettle_hash_update(QCryptoHash *hash,
+ const struct iovec *iov,
+ size_t niov,
+ Error **errp)
+{
+ union qcrypto_hash_ctx *ctx = hash->opaque;
+
+ for (int i = 0; i < niov; i++) {
+ qcrypto_hash_alg_map[hash->alg].write(ctx,
+ iov[i].iov_len,
+ iov[i].iov_base);
}
- if (*resultlen == 0) {
- *resultlen = qcrypto_hash_alg_map[alg].len;
- *result = g_new0(uint8_t, *resultlen);
- } else if (*resultlen != qcrypto_hash_alg_map[alg].len) {
+ return 0;
+}
+
+static
+int qcrypto_nettle_hash_finalize(QCryptoHash *hash,
+ uint8_t **result,
+ size_t *result_len,
+ Error **errp)
+{
+ union qcrypto_hash_ctx *ctx = hash->opaque;
+ int ret = qcrypto_hash_alg_map[hash->alg].len;
+
+ if (*result_len == 0) {
+ *result_len = ret;
+ *result = g_new(uint8_t, *result_len);
+ } else if (*result_len != ret) {
error_setg(errp,
- "Result buffer size %zu is smaller than hash %zu",
- *resultlen, qcrypto_hash_alg_map[alg].len);
+ "Result buffer size %zu is smaller than hash %d",
+ *result_len, ret);
return -1;
}
- qcrypto_hash_alg_map[alg].result(&ctx, *resultlen, *result);
+ qcrypto_hash_alg_map[hash->alg].result(ctx, *result_len, *result);
return 0;
}
-
QCryptoHashDriver qcrypto_hash_lib_driver = {
- .hash_bytesv = qcrypto_nettle_hash_bytesv,
+ .hash_new = qcrypto_nettle_hash_new,
+ .hash_update = qcrypto_nettle_hash_update,
+ .hash_finalize = qcrypto_nettle_hash_finalize,
+ .hash_free = qcrypto_nettle_hash_free,
};
diff --git a/crypto/hash.c b/crypto/hash.c
index b0f8228..7513769 100644
--- a/crypto/hash.c
+++ b/crypto/hash.c
@@ -1,6 +1,7 @@
/*
* QEMU Crypto hash algorithms
*
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
* Copyright (c) 2015 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
@@ -19,53 +20,53 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi-types-crypto.h"
#include "crypto/hash.h"
#include "hashpriv.h"
-static size_t qcrypto_hash_alg_size[QCRYPTO_HASH_ALG__MAX] = {
- [QCRYPTO_HASH_ALG_MD5] = 16,
- [QCRYPTO_HASH_ALG_SHA1] = 20,
- [QCRYPTO_HASH_ALG_SHA224] = 28,
- [QCRYPTO_HASH_ALG_SHA256] = 32,
- [QCRYPTO_HASH_ALG_SHA384] = 48,
- [QCRYPTO_HASH_ALG_SHA512] = 64,
- [QCRYPTO_HASH_ALG_RIPEMD160] = 20,
+static size_t qcrypto_hash_alg_size[QCRYPTO_HASH_ALGO__MAX] = {
+ [QCRYPTO_HASH_ALGO_MD5] = QCRYPTO_HASH_DIGEST_LEN_MD5,
+ [QCRYPTO_HASH_ALGO_SHA1] = QCRYPTO_HASH_DIGEST_LEN_SHA1,
+ [QCRYPTO_HASH_ALGO_SHA224] = QCRYPTO_HASH_DIGEST_LEN_SHA224,
+ [QCRYPTO_HASH_ALGO_SHA256] = QCRYPTO_HASH_DIGEST_LEN_SHA256,
+ [QCRYPTO_HASH_ALGO_SHA384] = QCRYPTO_HASH_DIGEST_LEN_SHA384,
+ [QCRYPTO_HASH_ALGO_SHA512] = QCRYPTO_HASH_DIGEST_LEN_SHA512,
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = QCRYPTO_HASH_DIGEST_LEN_RIPEMD160,
+#ifdef CONFIG_CRYPTO_SM3
+ [QCRYPTO_HASH_ALGO_SM3] = QCRYPTO_HASH_DIGEST_LEN_SM3,
+#endif
};
-size_t qcrypto_hash_digest_len(QCryptoHashAlgorithm alg)
+size_t qcrypto_hash_digest_len(QCryptoHashAlgo alg)
{
assert(alg < G_N_ELEMENTS(qcrypto_hash_alg_size));
return qcrypto_hash_alg_size[alg];
}
-int qcrypto_hash_bytesv(QCryptoHashAlgorithm alg,
+int qcrypto_hash_bytesv(QCryptoHashAlgo alg,
const struct iovec *iov,
size_t niov,
uint8_t **result,
size_t *resultlen,
Error **errp)
{
-#ifdef CONFIG_AF_ALG
- int ret;
- /*
- * TODO:
- * Maybe we should treat some afalg errors as fatal
- */
- ret = qcrypto_hash_afalg_driver.hash_bytesv(alg, iov, niov,
- result, resultlen,
- NULL);
- if (ret == 0) {
- return ret;
+ g_autoptr(QCryptoHash) ctx = qcrypto_hash_new(alg, errp);
+
+ if (!ctx) {
+ return -1;
+ }
+
+ if (qcrypto_hash_updatev(ctx, iov, niov, errp) < 0 ||
+ qcrypto_hash_finalize_bytes(ctx, result, resultlen, errp) < 0) {
+ return -1;
}
-#endif
- return qcrypto_hash_lib_driver.hash_bytesv(alg, iov, niov,
- result, resultlen,
- errp);
+ return 0;
}
-int qcrypto_hash_bytes(QCryptoHashAlgorithm alg,
+int qcrypto_hash_bytes(QCryptoHashAlgo alg,
const char *buf,
size_t len,
uint8_t **result,
@@ -77,33 +78,134 @@ int qcrypto_hash_bytes(QCryptoHashAlgorithm alg,
return qcrypto_hash_bytesv(alg, &iov, 1, result, resultlen, errp);
}
+int qcrypto_hash_updatev(QCryptoHash *hash,
+ const struct iovec *iov,
+ size_t niov,
+ Error **errp)
+{
+ QCryptoHashDriver *drv = hash->driver;
+
+ return drv->hash_update(hash, iov, niov, errp);
+}
+
+int qcrypto_hash_update(QCryptoHash *hash,
+ const char *buf,
+ size_t len,
+ Error **errp)
+{
+ struct iovec iov = { .iov_base = (char *)buf, .iov_len = len };
+
+ return qcrypto_hash_updatev(hash, &iov, 1, errp);
+}
+
+QCryptoHash *qcrypto_hash_new(QCryptoHashAlgo alg, Error **errp)
+{
+ QCryptoHash *hash = NULL;
+
+ if (!qcrypto_hash_supports(alg)) {
+ error_setg(errp, "Unsupported hash algorithm %s",
+ QCryptoHashAlgo_str(alg));
+ return NULL;
+ }
+
+#ifdef CONFIG_AF_ALG
+ hash = qcrypto_hash_afalg_driver.hash_new(alg, NULL);
+ if (hash) {
+ hash->driver = &qcrypto_hash_afalg_driver;
+ return hash;
+ }
+#endif
+
+ hash = qcrypto_hash_lib_driver.hash_new(alg, errp);
+ if (!hash) {
+ return NULL;
+ }
+
+ hash->driver = &qcrypto_hash_lib_driver;
+ return hash;
+}
+
+void qcrypto_hash_free(QCryptoHash *hash)
+{
+ QCryptoHashDriver *drv;
+
+ if (hash) {
+ drv = hash->driver;
+ drv->hash_free(hash);
+ }
+}
+
+int qcrypto_hash_finalize_bytes(QCryptoHash *hash,
+ uint8_t **result,
+ size_t *result_len,
+ Error **errp)
+{
+ QCryptoHashDriver *drv = hash->driver;
+
+ return drv->hash_finalize(hash, result, result_len, errp);
+}
+
static const char hex[] = "0123456789abcdef";
-int qcrypto_hash_digestv(QCryptoHashAlgorithm alg,
+int qcrypto_hash_finalize_digest(QCryptoHash *hash,
+ char **digest,
+ Error **errp)
+{
+ int ret;
+ g_autofree uint8_t *result = NULL;
+ size_t resultlen = 0;
+ size_t i;
+
+ ret = qcrypto_hash_finalize_bytes(hash, &result, &resultlen, errp);
+ if (ret == 0) {
+ *digest = g_new0(char, (resultlen * 2) + 1);
+ for (i = 0 ; i < resultlen ; i++) {
+ (*digest)[(i * 2)] = hex[(result[i] >> 4) & 0xf];
+ (*digest)[(i * 2) + 1] = hex[result[i] & 0xf];
+ }
+ (*digest)[resultlen * 2] = '\0';
+ }
+
+ return ret;
+}
+
+int qcrypto_hash_finalize_base64(QCryptoHash *hash,
+ char **base64,
+ Error **errp)
+{
+ int ret;
+ g_autofree uint8_t *result = NULL;
+ size_t resultlen = 0;
+
+ ret = qcrypto_hash_finalize_bytes(hash, &result, &resultlen, errp);
+ if (ret == 0) {
+ *base64 = g_base64_encode(result, resultlen);
+ }
+
+ return ret;
+}
+
+int qcrypto_hash_digestv(QCryptoHashAlgo alg,
const struct iovec *iov,
size_t niov,
char **digest,
Error **errp)
{
- uint8_t *result = NULL;
- size_t resultlen = 0;
- size_t i;
+ g_autoptr(QCryptoHash) ctx = qcrypto_hash_new(alg, errp);
- if (qcrypto_hash_bytesv(alg, iov, niov, &result, &resultlen, errp) < 0) {
+ if (!ctx) {
return -1;
}
- *digest = g_new0(char, (resultlen * 2) + 1);
- for (i = 0 ; i < resultlen ; i++) {
- (*digest)[(i * 2)] = hex[(result[i] >> 4) & 0xf];
- (*digest)[(i * 2) + 1] = hex[result[i] & 0xf];
+ if (qcrypto_hash_updatev(ctx, iov, niov, errp) < 0 ||
+ qcrypto_hash_finalize_digest(ctx, digest, errp) < 0) {
+ return -1;
}
- (*digest)[resultlen * 2] = '\0';
- g_free(result);
+
return 0;
}
-int qcrypto_hash_digest(QCryptoHashAlgorithm alg,
+int qcrypto_hash_digest(QCryptoHashAlgo alg,
const char *buf,
size_t len,
char **digest,
@@ -114,25 +216,27 @@ int qcrypto_hash_digest(QCryptoHashAlgorithm alg,
return qcrypto_hash_digestv(alg, &iov, 1, digest, errp);
}
-int qcrypto_hash_base64v(QCryptoHashAlgorithm alg,
+int qcrypto_hash_base64v(QCryptoHashAlgo alg,
const struct iovec *iov,
size_t niov,
char **base64,
Error **errp)
{
- uint8_t *result = NULL;
- size_t resultlen = 0;
+ g_autoptr(QCryptoHash) ctx = qcrypto_hash_new(alg, errp);
+
+ if (!ctx) {
+ return -1;
+ }
- if (qcrypto_hash_bytesv(alg, iov, niov, &result, &resultlen, errp) < 0) {
+ if (qcrypto_hash_updatev(ctx, iov, niov, errp) < 0 ||
+ qcrypto_hash_finalize_base64(ctx, base64, errp) < 0) {
return -1;
}
- *base64 = g_base64_encode(result, resultlen);
- g_free(result);
return 0;
}
-int qcrypto_hash_base64(QCryptoHashAlgorithm alg,
+int qcrypto_hash_base64(QCryptoHashAlgo alg,
const char *buf,
size_t len,
char **base64,
diff --git a/crypto/hashpriv.h b/crypto/hashpriv.h
index cee26cc..83b9256 100644
--- a/crypto/hashpriv.h
+++ b/crypto/hashpriv.h
@@ -1,6 +1,7 @@
/*
* QEMU Crypto hash driver supports
*
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
* Copyright (c) 2017 HUAWEI TECHNOLOGIES CO., LTD.
*
* Authors:
@@ -15,15 +16,21 @@
#ifndef QCRYPTO_HASHPRIV_H
#define QCRYPTO_HASHPRIV_H
+#include "crypto/hash.h"
+
typedef struct QCryptoHashDriver QCryptoHashDriver;
struct QCryptoHashDriver {
- int (*hash_bytesv)(QCryptoHashAlgorithm alg,
+ QCryptoHash *(*hash_new)(QCryptoHashAlgo alg, Error **errp);
+ int (*hash_update)(QCryptoHash *hash,
const struct iovec *iov,
size_t niov,
- uint8_t **result,
- size_t *resultlen,
Error **errp);
+ int (*hash_finalize)(QCryptoHash *hash,
+ uint8_t **result,
+ size_t *resultlen,
+ Error **errp);
+ void (*hash_free)(QCryptoHash *hash);
};
extern QCryptoHashDriver qcrypto_hash_lib_driver;
diff --git a/crypto/hmac-gcrypt.c b/crypto/hmac-gcrypt.c
index 0c6f979..5273086 100644
--- a/crypto/hmac-gcrypt.c
+++ b/crypto/hmac-gcrypt.c
@@ -18,14 +18,17 @@
#include "hmacpriv.h"
#include <gcrypt.h>
-static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALG__MAX] = {
- [QCRYPTO_HASH_ALG_MD5] = GCRY_MAC_HMAC_MD5,
- [QCRYPTO_HASH_ALG_SHA1] = GCRY_MAC_HMAC_SHA1,
- [QCRYPTO_HASH_ALG_SHA224] = GCRY_MAC_HMAC_SHA224,
- [QCRYPTO_HASH_ALG_SHA256] = GCRY_MAC_HMAC_SHA256,
- [QCRYPTO_HASH_ALG_SHA384] = GCRY_MAC_HMAC_SHA384,
- [QCRYPTO_HASH_ALG_SHA512] = GCRY_MAC_HMAC_SHA512,
- [QCRYPTO_HASH_ALG_RIPEMD160] = GCRY_MAC_HMAC_RMD160,
+static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALGO__MAX] = {
+ [QCRYPTO_HASH_ALGO_MD5] = GCRY_MAC_HMAC_MD5,
+ [QCRYPTO_HASH_ALGO_SHA1] = GCRY_MAC_HMAC_SHA1,
+ [QCRYPTO_HASH_ALGO_SHA224] = GCRY_MAC_HMAC_SHA224,
+ [QCRYPTO_HASH_ALGO_SHA256] = GCRY_MAC_HMAC_SHA256,
+ [QCRYPTO_HASH_ALGO_SHA384] = GCRY_MAC_HMAC_SHA384,
+ [QCRYPTO_HASH_ALGO_SHA512] = GCRY_MAC_HMAC_SHA512,
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = GCRY_MAC_HMAC_RMD160,
+#ifdef CONFIG_CRYPTO_SM3
+ [QCRYPTO_HASH_ALGO_SM3] = GCRY_MAC_HMAC_SM3,
+#endif
};
typedef struct QCryptoHmacGcrypt QCryptoHmacGcrypt;
@@ -33,17 +36,17 @@ struct QCryptoHmacGcrypt {
gcry_mac_hd_t handle;
};
-bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg)
+bool qcrypto_hmac_supports(QCryptoHashAlgo alg)
{
if (alg < G_N_ELEMENTS(qcrypto_hmac_alg_map) &&
qcrypto_hmac_alg_map[alg] != GCRY_MAC_NONE) {
- return true;
+ return gcry_mac_test_algo(qcrypto_hmac_alg_map[alg]) == 0;
}
return false;
}
-void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg,
+void *qcrypto_hmac_ctx_new(QCryptoHashAlgo alg,
const uint8_t *key, size_t nkey,
Error **errp)
{
@@ -52,7 +55,7 @@ void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg,
if (!qcrypto_hmac_supports(alg)) {
error_setg(errp, "Unsupported hmac algorithm %s",
- QCryptoHashAlgorithm_str(alg));
+ QCryptoHashAlgo_str(alg));
return NULL;
}
diff --git a/crypto/hmac-glib.c b/crypto/hmac-glib.c
index 509bbc7..ea80c8d 100644
--- a/crypto/hmac-glib.c
+++ b/crypto/hmac-glib.c
@@ -17,14 +17,14 @@
#include "crypto/hmac.h"
#include "hmacpriv.h"
-static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALG__MAX] = {
- [QCRYPTO_HASH_ALG_MD5] = G_CHECKSUM_MD5,
- [QCRYPTO_HASH_ALG_SHA1] = G_CHECKSUM_SHA1,
- [QCRYPTO_HASH_ALG_SHA256] = G_CHECKSUM_SHA256,
- [QCRYPTO_HASH_ALG_SHA512] = G_CHECKSUM_SHA512,
- [QCRYPTO_HASH_ALG_SHA224] = -1,
- [QCRYPTO_HASH_ALG_SHA384] = -1,
- [QCRYPTO_HASH_ALG_RIPEMD160] = -1,
+static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALGO__MAX] = {
+ [QCRYPTO_HASH_ALGO_MD5] = G_CHECKSUM_MD5,
+ [QCRYPTO_HASH_ALGO_SHA1] = G_CHECKSUM_SHA1,
+ [QCRYPTO_HASH_ALGO_SHA256] = G_CHECKSUM_SHA256,
+ [QCRYPTO_HASH_ALGO_SHA512] = G_CHECKSUM_SHA512,
+ [QCRYPTO_HASH_ALGO_SHA224] = -1,
+ [QCRYPTO_HASH_ALGO_SHA384] = -1,
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = -1,
};
typedef struct QCryptoHmacGlib QCryptoHmacGlib;
@@ -32,7 +32,7 @@ struct QCryptoHmacGlib {
GHmac *ghmac;
};
-bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg)
+bool qcrypto_hmac_supports(QCryptoHashAlgo alg)
{
if (alg < G_N_ELEMENTS(qcrypto_hmac_alg_map) &&
qcrypto_hmac_alg_map[alg] != -1) {
@@ -42,7 +42,7 @@ bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg)
return false;
}
-void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg,
+void *qcrypto_hmac_ctx_new(QCryptoHashAlgo alg,
const uint8_t *key, size_t nkey,
Error **errp)
{
@@ -50,7 +50,7 @@ void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg,
if (!qcrypto_hmac_supports(alg)) {
error_setg(errp, "Unsupported hmac algorithm %s",
- QCryptoHashAlgorithm_str(alg));
+ QCryptoHashAlgo_str(alg));
return NULL;
}
diff --git a/crypto/hmac-gnutls.c b/crypto/hmac-gnutls.c
index 24db383..8229955 100644
--- a/crypto/hmac-gnutls.c
+++ b/crypto/hmac-gnutls.c
@@ -20,14 +20,14 @@
#include "crypto/hmac.h"
#include "hmacpriv.h"
-static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALG__MAX] = {
- [QCRYPTO_HASH_ALG_MD5] = GNUTLS_MAC_MD5,
- [QCRYPTO_HASH_ALG_SHA1] = GNUTLS_MAC_SHA1,
- [QCRYPTO_HASH_ALG_SHA224] = GNUTLS_MAC_SHA224,
- [QCRYPTO_HASH_ALG_SHA256] = GNUTLS_MAC_SHA256,
- [QCRYPTO_HASH_ALG_SHA384] = GNUTLS_MAC_SHA384,
- [QCRYPTO_HASH_ALG_SHA512] = GNUTLS_MAC_SHA512,
- [QCRYPTO_HASH_ALG_RIPEMD160] = GNUTLS_MAC_RMD160,
+static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALGO__MAX] = {
+ [QCRYPTO_HASH_ALGO_MD5] = GNUTLS_MAC_MD5,
+ [QCRYPTO_HASH_ALGO_SHA1] = GNUTLS_MAC_SHA1,
+ [QCRYPTO_HASH_ALGO_SHA224] = GNUTLS_MAC_SHA224,
+ [QCRYPTO_HASH_ALGO_SHA256] = GNUTLS_MAC_SHA256,
+ [QCRYPTO_HASH_ALGO_SHA384] = GNUTLS_MAC_SHA384,
+ [QCRYPTO_HASH_ALGO_SHA512] = GNUTLS_MAC_SHA512,
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = GNUTLS_MAC_RMD160,
};
typedef struct QCryptoHmacGnutls QCryptoHmacGnutls;
@@ -35,7 +35,7 @@ struct QCryptoHmacGnutls {
gnutls_hmac_hd_t handle;
};
-bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg)
+bool qcrypto_hmac_supports(QCryptoHashAlgo alg)
{
size_t i;
const gnutls_digest_algorithm_t *algs;
@@ -52,7 +52,7 @@ bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg)
return false;
}
-void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg,
+void *qcrypto_hmac_ctx_new(QCryptoHashAlgo alg,
const uint8_t *key, size_t nkey,
Error **errp)
{
@@ -61,7 +61,7 @@ void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg,
if (!qcrypto_hmac_supports(alg)) {
error_setg(errp, "Unsupported hmac algorithm %s",
- QCryptoHashAlgorithm_str(alg));
+ QCryptoHashAlgo_str(alg));
return NULL;
}
diff --git a/crypto/hmac-nettle.c b/crypto/hmac-nettle.c
index 1ad6c4f..dd5b2ab 100644
--- a/crypto/hmac-nettle.c
+++ b/crypto/hmac-nettle.c
@@ -38,6 +38,9 @@ struct QCryptoHmacNettle {
struct hmac_sha256_ctx sha256_ctx; /* equals hmac_sha224_ctx */
struct hmac_sha512_ctx sha512_ctx; /* equals hmac_sha384_ctx */
struct hmac_ripemd160_ctx ripemd160_ctx;
+#ifdef CONFIG_CRYPTO_SM3
+ struct hmac_sm3_ctx ctx;
+#endif
} u;
};
@@ -46,52 +49,60 @@ struct qcrypto_nettle_hmac_alg {
qcrypto_nettle_hmac_update update;
qcrypto_nettle_hmac_digest digest;
size_t len;
-} qcrypto_hmac_alg_map[QCRYPTO_HASH_ALG__MAX] = {
- [QCRYPTO_HASH_ALG_MD5] = {
+} qcrypto_hmac_alg_map[QCRYPTO_HASH_ALGO__MAX] = {
+ [QCRYPTO_HASH_ALGO_MD5] = {
.setkey = (qcrypto_nettle_hmac_setkey)hmac_md5_set_key,
.update = (qcrypto_nettle_hmac_update)hmac_md5_update,
.digest = (qcrypto_nettle_hmac_digest)hmac_md5_digest,
.len = MD5_DIGEST_SIZE,
},
- [QCRYPTO_HASH_ALG_SHA1] = {
+ [QCRYPTO_HASH_ALGO_SHA1] = {
.setkey = (qcrypto_nettle_hmac_setkey)hmac_sha1_set_key,
.update = (qcrypto_nettle_hmac_update)hmac_sha1_update,
.digest = (qcrypto_nettle_hmac_digest)hmac_sha1_digest,
.len = SHA1_DIGEST_SIZE,
},
- [QCRYPTO_HASH_ALG_SHA224] = {
+ [QCRYPTO_HASH_ALGO_SHA224] = {
.setkey = (qcrypto_nettle_hmac_setkey)hmac_sha224_set_key,
.update = (qcrypto_nettle_hmac_update)hmac_sha224_update,
.digest = (qcrypto_nettle_hmac_digest)hmac_sha224_digest,
.len = SHA224_DIGEST_SIZE,
},
- [QCRYPTO_HASH_ALG_SHA256] = {
+ [QCRYPTO_HASH_ALGO_SHA256] = {
.setkey = (qcrypto_nettle_hmac_setkey)hmac_sha256_set_key,
.update = (qcrypto_nettle_hmac_update)hmac_sha256_update,
.digest = (qcrypto_nettle_hmac_digest)hmac_sha256_digest,
.len = SHA256_DIGEST_SIZE,
},
- [QCRYPTO_HASH_ALG_SHA384] = {
+ [QCRYPTO_HASH_ALGO_SHA384] = {
.setkey = (qcrypto_nettle_hmac_setkey)hmac_sha384_set_key,
.update = (qcrypto_nettle_hmac_update)hmac_sha384_update,
.digest = (qcrypto_nettle_hmac_digest)hmac_sha384_digest,
.len = SHA384_DIGEST_SIZE,
},
- [QCRYPTO_HASH_ALG_SHA512] = {
+ [QCRYPTO_HASH_ALGO_SHA512] = {
.setkey = (qcrypto_nettle_hmac_setkey)hmac_sha512_set_key,
.update = (qcrypto_nettle_hmac_update)hmac_sha512_update,
.digest = (qcrypto_nettle_hmac_digest)hmac_sha512_digest,
.len = SHA512_DIGEST_SIZE,
},
- [QCRYPTO_HASH_ALG_RIPEMD160] = {
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = {
.setkey = (qcrypto_nettle_hmac_setkey)hmac_ripemd160_set_key,
.update = (qcrypto_nettle_hmac_update)hmac_ripemd160_update,
.digest = (qcrypto_nettle_hmac_digest)hmac_ripemd160_digest,
.len = RIPEMD160_DIGEST_SIZE,
},
+#ifdef CONFIG_CRYPTO_SM3
+ [QCRYPTO_HASH_ALGO_SM3] = {
+ .setkey = (qcrypto_nettle_hmac_setkey)hmac_sm3_set_key,
+ .update = (qcrypto_nettle_hmac_update)hmac_sm3_update,
+ .digest = (qcrypto_nettle_hmac_digest)hmac_sm3_digest,
+ .len = SM3_DIGEST_SIZE,
+ },
+#endif
};
-bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg)
+bool qcrypto_hmac_supports(QCryptoHashAlgo alg)
{
if (alg < G_N_ELEMENTS(qcrypto_hmac_alg_map) &&
qcrypto_hmac_alg_map[alg].setkey != NULL) {
@@ -101,7 +112,7 @@ bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg)
return false;
}
-void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg,
+void *qcrypto_hmac_ctx_new(QCryptoHashAlgo alg,
const uint8_t *key, size_t nkey,
Error **errp)
{
@@ -109,7 +120,7 @@ void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg,
if (!qcrypto_hmac_supports(alg)) {
error_setg(errp, "Unsupported hmac algorithm %s",
- QCryptoHashAlgorithm_str(alg));
+ QCryptoHashAlgo_str(alg));
return NULL;
}
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 4de7e8c..422e005 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -83,7 +83,7 @@ int qcrypto_hmac_digest(QCryptoHmac *hmac,
return qcrypto_hmac_digestv(hmac, &iov, 1, digest, errp);
}
-QCryptoHmac *qcrypto_hmac_new(QCryptoHashAlgorithm alg,
+QCryptoHmac *qcrypto_hmac_new(QCryptoHashAlgo alg,
const uint8_t *key, size_t nkey,
Error **errp)
{
diff --git a/crypto/hmacpriv.h b/crypto/hmacpriv.h
index 62dfe82..f339596 100644
--- a/crypto/hmacpriv.h
+++ b/crypto/hmacpriv.h
@@ -28,7 +28,7 @@ struct QCryptoHmacDriver {
void (*hmac_free)(QCryptoHmac *hmac);
};
-void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg,
+void *qcrypto_hmac_ctx_new(QCryptoHashAlgo alg,
const uint8_t *key, size_t nkey,
Error **errp);
extern QCryptoHmacDriver qcrypto_hmac_lib_driver;
@@ -37,7 +37,7 @@ extern QCryptoHmacDriver qcrypto_hmac_lib_driver;
#include "afalgpriv.h"
-QCryptoAFAlg *qcrypto_afalg_hmac_ctx_new(QCryptoHashAlgorithm alg,
+QCryptoAFAlgo *qcrypto_afalg_hmac_ctx_new(QCryptoHashAlgo alg,
const uint8_t *key, size_t nkey,
Error **errp);
extern QCryptoHmacDriver qcrypto_hmac_afalg_driver;
diff --git a/crypto/init.c b/crypto/init.c
index fb7f1bf..674d237 100644
--- a/crypto/init.c
+++ b/crypto/init.c
@@ -34,14 +34,11 @@
#include "crypto/random.h"
-/* #define DEBUG_GNUTLS */
-#ifdef DEBUG_GNUTLS
-static void qcrypto_gnutls_log(int level, const char *str)
-{
- fprintf(stderr, "%d: %s", level, str);
-}
-#endif
+/*
+ * To debug GNUTLS see env vars listed in
+ * https://gnutls.org/manual/html_node/Debugging-and-auditing.html
+ */
int qcrypto_init(Error **errp)
{
#ifdef CONFIG_GNUTLS
@@ -53,10 +50,6 @@ int qcrypto_init(Error **errp)
gnutls_strerror(ret));
return -1;
}
-#ifdef DEBUG_GNUTLS
- gnutls_global_set_log_level(10);
- gnutls_global_set_log_function(qcrypto_gnutls_log);
-#endif
#endif
#ifdef CONFIG_GCRYPT
diff --git a/crypto/ivgen.c b/crypto/ivgen.c
index 12822f8..6b7d24d 100644
--- a/crypto/ivgen.c
+++ b/crypto/ivgen.c
@@ -27,9 +27,9 @@
#include "ivgen-essiv.h"
-QCryptoIVGen *qcrypto_ivgen_new(QCryptoIVGenAlgorithm alg,
- QCryptoCipherAlgorithm cipheralg,
- QCryptoHashAlgorithm hash,
+QCryptoIVGen *qcrypto_ivgen_new(QCryptoIVGenAlgo alg,
+ QCryptoCipherAlgo cipheralg,
+ QCryptoHashAlgo hash,
const uint8_t *key, size_t nkey,
Error **errp)
{
@@ -40,13 +40,13 @@ QCryptoIVGen *qcrypto_ivgen_new(QCryptoIVGenAlgorithm alg,
ivgen->hash = hash;
switch (alg) {
- case QCRYPTO_IVGEN_ALG_PLAIN:
+ case QCRYPTO_IV_GEN_ALGO_PLAIN:
ivgen->driver = &qcrypto_ivgen_plain;
break;
- case QCRYPTO_IVGEN_ALG_PLAIN64:
+ case QCRYPTO_IV_GEN_ALGO_PLAIN64:
ivgen->driver = &qcrypto_ivgen_plain64;
break;
- case QCRYPTO_IVGEN_ALG_ESSIV:
+ case QCRYPTO_IV_GEN_ALGO_ESSIV:
ivgen->driver = &qcrypto_ivgen_essiv;
break;
default:
@@ -73,19 +73,19 @@ int qcrypto_ivgen_calculate(QCryptoIVGen *ivgen,
}
-QCryptoIVGenAlgorithm qcrypto_ivgen_get_algorithm(QCryptoIVGen *ivgen)
+QCryptoIVGenAlgo qcrypto_ivgen_get_algorithm(QCryptoIVGen *ivgen)
{
return ivgen->algorithm;
}
-QCryptoCipherAlgorithm qcrypto_ivgen_get_cipher(QCryptoIVGen *ivgen)
+QCryptoCipherAlgo qcrypto_ivgen_get_cipher(QCryptoIVGen *ivgen)
{
return ivgen->cipher;
}
-QCryptoHashAlgorithm qcrypto_ivgen_get_hash(QCryptoIVGen *ivgen)
+QCryptoHashAlgo qcrypto_ivgen_get_hash(QCryptoIVGen *ivgen)
{
return ivgen->hash;
}
diff --git a/crypto/ivgenpriv.h b/crypto/ivgenpriv.h
index cecdbed..e3388d3 100644
--- a/crypto/ivgenpriv.h
+++ b/crypto/ivgenpriv.h
@@ -40,9 +40,9 @@ struct QCryptoIVGen {
QCryptoIVGenDriver *driver;
void *private;
- QCryptoIVGenAlgorithm algorithm;
- QCryptoCipherAlgorithm cipher;
- QCryptoHashAlgorithm hash;
+ QCryptoIVGenAlgo algorithm;
+ QCryptoCipherAlgo cipher;
+ QCryptoHashAlgo hash;
};
diff --git a/crypto/meson.build b/crypto/meson.build
index c46f9c2..735635d 100644
--- a/crypto/meson.build
+++ b/crypto/meson.build
@@ -24,6 +24,10 @@ crypto_ss.add(files(
'rsakey.c',
))
+if gnutls.found()
+ crypto_ss.add(files('x509-utils.c'))
+endif
+
if nettle.found()
crypto_ss.add(nettle, files('hash-nettle.c', 'hmac-nettle.c', 'pbkdf-nettle.c'))
if hogweed.found()
diff --git a/crypto/pbkdf-gcrypt.c b/crypto/pbkdf-gcrypt.c
index a8d8e64..e89b8b1 100644
--- a/crypto/pbkdf-gcrypt.c
+++ b/crypto/pbkdf-gcrypt.c
@@ -23,37 +23,43 @@
#include "qapi/error.h"
#include "crypto/pbkdf.h"
-bool qcrypto_pbkdf2_supports(QCryptoHashAlgorithm hash)
+bool qcrypto_pbkdf2_supports(QCryptoHashAlgo hash)
{
switch (hash) {
- case QCRYPTO_HASH_ALG_MD5:
- case QCRYPTO_HASH_ALG_SHA1:
- case QCRYPTO_HASH_ALG_SHA224:
- case QCRYPTO_HASH_ALG_SHA256:
- case QCRYPTO_HASH_ALG_SHA384:
- case QCRYPTO_HASH_ALG_SHA512:
- case QCRYPTO_HASH_ALG_RIPEMD160:
- return true;
+ case QCRYPTO_HASH_ALGO_MD5:
+ case QCRYPTO_HASH_ALGO_SHA1:
+ case QCRYPTO_HASH_ALGO_SHA224:
+ case QCRYPTO_HASH_ALGO_SHA256:
+ case QCRYPTO_HASH_ALGO_SHA384:
+ case QCRYPTO_HASH_ALGO_SHA512:
+ case QCRYPTO_HASH_ALGO_RIPEMD160:
+#ifdef CONFIG_CRYPTO_SM3
+ case QCRYPTO_HASH_ALGO_SM3:
+#endif
+ return qcrypto_hash_supports(hash);
default:
return false;
}
}
-int qcrypto_pbkdf2(QCryptoHashAlgorithm hash,
+int qcrypto_pbkdf2(QCryptoHashAlgo hash,
const uint8_t *key, size_t nkey,
const uint8_t *salt, size_t nsalt,
uint64_t iterations,
uint8_t *out, size_t nout,
Error **errp)
{
- static const int hash_map[QCRYPTO_HASH_ALG__MAX] = {
- [QCRYPTO_HASH_ALG_MD5] = GCRY_MD_MD5,
- [QCRYPTO_HASH_ALG_SHA1] = GCRY_MD_SHA1,
- [QCRYPTO_HASH_ALG_SHA224] = GCRY_MD_SHA224,
- [QCRYPTO_HASH_ALG_SHA256] = GCRY_MD_SHA256,
- [QCRYPTO_HASH_ALG_SHA384] = GCRY_MD_SHA384,
- [QCRYPTO_HASH_ALG_SHA512] = GCRY_MD_SHA512,
- [QCRYPTO_HASH_ALG_RIPEMD160] = GCRY_MD_RMD160,
+ static const int hash_map[QCRYPTO_HASH_ALGO__MAX] = {
+ [QCRYPTO_HASH_ALGO_MD5] = GCRY_MD_MD5,
+ [QCRYPTO_HASH_ALGO_SHA1] = GCRY_MD_SHA1,
+ [QCRYPTO_HASH_ALGO_SHA224] = GCRY_MD_SHA224,
+ [QCRYPTO_HASH_ALGO_SHA256] = GCRY_MD_SHA256,
+ [QCRYPTO_HASH_ALGO_SHA384] = GCRY_MD_SHA384,
+ [QCRYPTO_HASH_ALGO_SHA512] = GCRY_MD_SHA512,
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = GCRY_MD_RMD160,
+#ifdef CONFIG_CRYPTO_SM3
+ [QCRYPTO_HASH_ALGO_SM3] = GCRY_MD_SM3,
+#endif
};
int ret;
@@ -68,7 +74,7 @@ int qcrypto_pbkdf2(QCryptoHashAlgorithm hash,
hash_map[hash] == GCRY_MD_NONE) {
error_setg_errno(errp, ENOSYS,
"PBKDF does not support hash algorithm %s",
- QCryptoHashAlgorithm_str(hash));
+ QCryptoHashAlgo_str(hash));
return -1;
}
diff --git a/crypto/pbkdf-gnutls.c b/crypto/pbkdf-gnutls.c
index 2dfbbd3..f34423f 100644
--- a/crypto/pbkdf-gnutls.c
+++ b/crypto/pbkdf-gnutls.c
@@ -23,37 +23,37 @@
#include "qapi/error.h"
#include "crypto/pbkdf.h"
-bool qcrypto_pbkdf2_supports(QCryptoHashAlgorithm hash)
+bool qcrypto_pbkdf2_supports(QCryptoHashAlgo hash)
{
switch (hash) {
- case QCRYPTO_HASH_ALG_MD5:
- case QCRYPTO_HASH_ALG_SHA1:
- case QCRYPTO_HASH_ALG_SHA224:
- case QCRYPTO_HASH_ALG_SHA256:
- case QCRYPTO_HASH_ALG_SHA384:
- case QCRYPTO_HASH_ALG_SHA512:
- case QCRYPTO_HASH_ALG_RIPEMD160:
- return true;
+ case QCRYPTO_HASH_ALGO_MD5:
+ case QCRYPTO_HASH_ALGO_SHA1:
+ case QCRYPTO_HASH_ALGO_SHA224:
+ case QCRYPTO_HASH_ALGO_SHA256:
+ case QCRYPTO_HASH_ALGO_SHA384:
+ case QCRYPTO_HASH_ALGO_SHA512:
+ case QCRYPTO_HASH_ALGO_RIPEMD160:
+ return qcrypto_hash_supports(hash);
default:
return false;
}
}
-int qcrypto_pbkdf2(QCryptoHashAlgorithm hash,
+int qcrypto_pbkdf2(QCryptoHashAlgo hash,
const uint8_t *key, size_t nkey,
const uint8_t *salt, size_t nsalt,
uint64_t iterations,
uint8_t *out, size_t nout,
Error **errp)
{
- static const int hash_map[QCRYPTO_HASH_ALG__MAX] = {
- [QCRYPTO_HASH_ALG_MD5] = GNUTLS_DIG_MD5,
- [QCRYPTO_HASH_ALG_SHA1] = GNUTLS_DIG_SHA1,
- [QCRYPTO_HASH_ALG_SHA224] = GNUTLS_DIG_SHA224,
- [QCRYPTO_HASH_ALG_SHA256] = GNUTLS_DIG_SHA256,
- [QCRYPTO_HASH_ALG_SHA384] = GNUTLS_DIG_SHA384,
- [QCRYPTO_HASH_ALG_SHA512] = GNUTLS_DIG_SHA512,
- [QCRYPTO_HASH_ALG_RIPEMD160] = GNUTLS_DIG_RMD160,
+ static const int hash_map[QCRYPTO_HASH_ALGO__MAX] = {
+ [QCRYPTO_HASH_ALGO_MD5] = GNUTLS_DIG_MD5,
+ [QCRYPTO_HASH_ALGO_SHA1] = GNUTLS_DIG_SHA1,
+ [QCRYPTO_HASH_ALGO_SHA224] = GNUTLS_DIG_SHA224,
+ [QCRYPTO_HASH_ALGO_SHA256] = GNUTLS_DIG_SHA256,
+ [QCRYPTO_HASH_ALGO_SHA384] = GNUTLS_DIG_SHA384,
+ [QCRYPTO_HASH_ALGO_SHA512] = GNUTLS_DIG_SHA512,
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = GNUTLS_DIG_RMD160,
};
int ret;
const gnutls_datum_t gkey = { (unsigned char *)key, nkey };
@@ -70,7 +70,7 @@ int qcrypto_pbkdf2(QCryptoHashAlgorithm hash,
hash_map[hash] == GNUTLS_DIG_UNKNOWN) {
error_setg_errno(errp, ENOSYS,
"PBKDF does not support hash algorithm %s",
- QCryptoHashAlgorithm_str(hash));
+ QCryptoHashAlgo_str(hash));
return -1;
}
diff --git a/crypto/pbkdf-nettle.c b/crypto/pbkdf-nettle.c
index d6293c2..3ef9c1b 100644
--- a/crypto/pbkdf-nettle.c
+++ b/crypto/pbkdf-nettle.c
@@ -25,22 +25,25 @@
#include "crypto/pbkdf.h"
-bool qcrypto_pbkdf2_supports(QCryptoHashAlgorithm hash)
+bool qcrypto_pbkdf2_supports(QCryptoHashAlgo hash)
{
switch (hash) {
- case QCRYPTO_HASH_ALG_SHA1:
- case QCRYPTO_HASH_ALG_SHA224:
- case QCRYPTO_HASH_ALG_SHA256:
- case QCRYPTO_HASH_ALG_SHA384:
- case QCRYPTO_HASH_ALG_SHA512:
- case QCRYPTO_HASH_ALG_RIPEMD160:
+ case QCRYPTO_HASH_ALGO_SHA1:
+ case QCRYPTO_HASH_ALGO_SHA224:
+ case QCRYPTO_HASH_ALGO_SHA256:
+ case QCRYPTO_HASH_ALGO_SHA384:
+ case QCRYPTO_HASH_ALGO_SHA512:
+ case QCRYPTO_HASH_ALGO_RIPEMD160:
+#ifdef CONFIG_CRYPTO_SM3
+ case QCRYPTO_HASH_ALGO_SM3:
+#endif
return true;
default:
return false;
}
}
-int qcrypto_pbkdf2(QCryptoHashAlgorithm hash,
+int qcrypto_pbkdf2(QCryptoHashAlgo hash,
const uint8_t *key, size_t nkey,
const uint8_t *salt, size_t nsalt,
uint64_t iterations,
@@ -55,6 +58,9 @@ int qcrypto_pbkdf2(QCryptoHashAlgorithm hash,
struct hmac_sha384_ctx sha384;
struct hmac_sha512_ctx sha512;
struct hmac_ripemd160_ctx ripemd160;
+#ifdef CONFIG_CRYPTO_SM3
+ struct hmac_sm3_ctx sm3;
+#endif
} ctx;
if (iterations > UINT_MAX) {
@@ -65,52 +71,59 @@ int qcrypto_pbkdf2(QCryptoHashAlgorithm hash,
}
switch (hash) {
- case QCRYPTO_HASH_ALG_MD5:
+ case QCRYPTO_HASH_ALGO_MD5:
hmac_md5_set_key(&ctx.md5, nkey, key);
PBKDF2(&ctx.md5, hmac_md5_update, hmac_md5_digest,
MD5_DIGEST_SIZE, iterations, nsalt, salt, nout, out);
break;
- case QCRYPTO_HASH_ALG_SHA1:
+ case QCRYPTO_HASH_ALGO_SHA1:
hmac_sha1_set_key(&ctx.sha1, nkey, key);
PBKDF2(&ctx.sha1, hmac_sha1_update, hmac_sha1_digest,
SHA1_DIGEST_SIZE, iterations, nsalt, salt, nout, out);
break;
- case QCRYPTO_HASH_ALG_SHA224:
+ case QCRYPTO_HASH_ALGO_SHA224:
hmac_sha224_set_key(&ctx.sha224, nkey, key);
PBKDF2(&ctx.sha224, hmac_sha224_update, hmac_sha224_digest,
SHA224_DIGEST_SIZE, iterations, nsalt, salt, nout, out);
break;
- case QCRYPTO_HASH_ALG_SHA256:
+ case QCRYPTO_HASH_ALGO_SHA256:
hmac_sha256_set_key(&ctx.sha256, nkey, key);
PBKDF2(&ctx.sha256, hmac_sha256_update, hmac_sha256_digest,
SHA256_DIGEST_SIZE, iterations, nsalt, salt, nout, out);
break;
- case QCRYPTO_HASH_ALG_SHA384:
+ case QCRYPTO_HASH_ALGO_SHA384:
hmac_sha384_set_key(&ctx.sha384, nkey, key);
PBKDF2(&ctx.sha384, hmac_sha384_update, hmac_sha384_digest,
SHA384_DIGEST_SIZE, iterations, nsalt, salt, nout, out);
break;
- case QCRYPTO_HASH_ALG_SHA512:
+ case QCRYPTO_HASH_ALGO_SHA512:
hmac_sha512_set_key(&ctx.sha512, nkey, key);
PBKDF2(&ctx.sha512, hmac_sha512_update, hmac_sha512_digest,
SHA512_DIGEST_SIZE, iterations, nsalt, salt, nout, out);
break;
- case QCRYPTO_HASH_ALG_RIPEMD160:
+ case QCRYPTO_HASH_ALGO_RIPEMD160:
hmac_ripemd160_set_key(&ctx.ripemd160, nkey, key);
PBKDF2(&ctx.ripemd160, hmac_ripemd160_update, hmac_ripemd160_digest,
RIPEMD160_DIGEST_SIZE, iterations, nsalt, salt, nout, out);
break;
+#ifdef CONFIG_CRYPTO_SM3
+ case QCRYPTO_HASH_ALGO_SM3:
+ hmac_sm3_set_key(&ctx.sm3, nkey, key);
+ PBKDF2(&ctx.sm3, hmac_sm3_update, hmac_sm3_digest,
+ SM3_DIGEST_SIZE, iterations, nsalt, salt, nout, out);
+ break;
+#endif
default:
error_setg_errno(errp, ENOSYS,
"PBKDF does not support hash algorithm %s",
- QCryptoHashAlgorithm_str(hash));
+ QCryptoHashAlgo_str(hash));
return -1;
}
return 0;
diff --git a/crypto/pbkdf-stub.c b/crypto/pbkdf-stub.c
index 9c4622e..9f29d0e 100644
--- a/crypto/pbkdf-stub.c
+++ b/crypto/pbkdf-stub.c
@@ -22,12 +22,12 @@
#include "qapi/error.h"
#include "crypto/pbkdf.h"
-bool qcrypto_pbkdf2_supports(QCryptoHashAlgorithm hash G_GNUC_UNUSED)
+bool qcrypto_pbkdf2_supports(QCryptoHashAlgo hash G_GNUC_UNUSED)
{
return false;
}
-int qcrypto_pbkdf2(QCryptoHashAlgorithm hash G_GNUC_UNUSED,
+int qcrypto_pbkdf2(QCryptoHashAlgo hash G_GNUC_UNUSED,
const uint8_t *key G_GNUC_UNUSED,
size_t nkey G_GNUC_UNUSED,
const uint8_t *salt G_GNUC_UNUSED,
diff --git a/crypto/pbkdf.c b/crypto/pbkdf.c
index 8d198c1..2989fc0 100644
--- a/crypto/pbkdf.c
+++ b/crypto/pbkdf.c
@@ -19,6 +19,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/thread.h"
#include "qapi/error.h"
#include "crypto/pbkdf.h"
#ifndef _WIN32
@@ -85,12 +86,28 @@ static int qcrypto_pbkdf2_get_thread_cpu(unsigned long long *val_ms,
#endif
}
-uint64_t qcrypto_pbkdf2_count_iters(QCryptoHashAlgorithm hash,
- const uint8_t *key, size_t nkey,
- const uint8_t *salt, size_t nsalt,
- size_t nout,
- Error **errp)
+typedef struct CountItersData {
+ QCryptoHashAlgo hash;
+ const uint8_t *key;
+ size_t nkey;
+ const uint8_t *salt;
+ size_t nsalt;
+ size_t nout;
+ uint64_t iterations;
+ Error **errp;
+} CountItersData;
+
+static void *threaded_qcrypto_pbkdf2_count_iters(void *data)
{
+ CountItersData *iters_data = (CountItersData *) data;
+ QCryptoHashAlgo hash = iters_data->hash;
+ const uint8_t *key = iters_data->key;
+ size_t nkey = iters_data->nkey;
+ const uint8_t *salt = iters_data->salt;
+ size_t nsalt = iters_data->nsalt;
+ size_t nout = iters_data->nout;
+ Error **errp = iters_data->errp;
+ size_t scaled = 0;
uint64_t ret = -1;
g_autofree uint8_t *out = g_new(uint8_t, nout);
uint64_t iterations = (1 << 15);
@@ -114,13 +131,27 @@ uint64_t qcrypto_pbkdf2_count_iters(QCryptoHashAlgorithm hash,
delta_ms = end_ms - start_ms;
- if (delta_ms > 500) {
+ /*
+ * For very small 'iterations' values, CPU (or crypto
+ * accelerator) might be fast enough that the scheduler
+ * hasn't incremented getrusage() data, or incremented
+ * it by a very small amount, resulting in delta_ms == 0.
+ * Once we've scaled 'iterations' x10, 5 times, we really
+ * should be seeing delta_ms != 0, so sanity check at
+ * that point.
+ */
+ if (scaled > 5 &&
+ delta_ms == 0) { /* sanity check */
+ error_setg(errp, "Unable to get accurate CPU usage");
+ goto cleanup;
+ } else if (delta_ms > 500) {
break;
} else if (delta_ms < 100) {
iterations = iterations * 10;
} else {
iterations = (iterations * 1000 / delta_ms);
}
+ scaled++;
}
iterations = iterations * 1000 / delta_ms;
@@ -129,5 +160,24 @@ uint64_t qcrypto_pbkdf2_count_iters(QCryptoHashAlgorithm hash,
cleanup:
memset(out, 0, nout);
- return ret;
+ iters_data->iterations = ret;
+ return NULL;
+}
+
+uint64_t qcrypto_pbkdf2_count_iters(QCryptoHashAlgo hash,
+ const uint8_t *key, size_t nkey,
+ const uint8_t *salt, size_t nsalt,
+ size_t nout,
+ Error **errp)
+{
+ CountItersData data = {
+ hash, key, nkey, salt, nsalt, nout, 0, errp
+ };
+ QemuThread thread;
+
+ qemu_thread_create(&thread, "pbkdf2", threaded_qcrypto_pbkdf2_count_iters,
+ &data, QEMU_THREAD_JOINABLE);
+ qemu_thread_join(&thread);
+
+ return data.iterations;
}
diff --git a/crypto/rsakey-builtin.c.inc b/crypto/rsakey-builtin.c.inc
index 46cc7af..6337b84 100644
--- a/crypto/rsakey-builtin.c.inc
+++ b/crypto/rsakey-builtin.c.inc
@@ -183,10 +183,10 @@ QCryptoAkCipherRSAKey *qcrypto_akcipher_rsakey_parse(
size_t keylen, Error **errp)
{
switch (type) {
- case QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE:
+ case QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE:
return qcrypto_builtin_rsa_private_key_parse(key, keylen, errp);
- case QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC:
+ case QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC:
return qcrypto_builtin_rsa_public_key_parse(key, keylen, errp);
default:
diff --git a/crypto/rsakey-nettle.c.inc b/crypto/rsakey-nettle.c.inc
index cc49872..b7f34b0 100644
--- a/crypto/rsakey-nettle.c.inc
+++ b/crypto/rsakey-nettle.c.inc
@@ -145,10 +145,10 @@ QCryptoAkCipherRSAKey *qcrypto_akcipher_rsakey_parse(
size_t keylen, Error **errp)
{
switch (type) {
- case QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE:
+ case QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE:
return qcrypto_nettle_rsa_private_key_parse(key, keylen, errp);
- case QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC:
+ case QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC:
return qcrypto_nettle_rsa_public_key_parse(key, keylen, errp);
default:
diff --git a/crypto/secret.c b/crypto/secret.c
index 44eaff1..61a4584 100644
--- a/crypto/secret.c
+++ b/crypto/secret.c
@@ -117,7 +117,7 @@ qcrypto_secret_finalize(Object *obj)
}
static void
-qcrypto_secret_class_init(ObjectClass *oc, void *data)
+qcrypto_secret_class_init(ObjectClass *oc, const void *data)
{
QCryptoSecretCommonClass *sic = QCRYPTO_SECRET_COMMON_CLASS(oc);
sic->load_data = qcrypto_secret_load_data;
diff --git a/crypto/secret_common.c b/crypto/secret_common.c
index 3441c44..a5ecb87 100644
--- a/crypto/secret_common.c
+++ b/crypto/secret_common.c
@@ -71,7 +71,7 @@ static void qcrypto_secret_decrypt(QCryptoSecretCommon *secret,
return;
}
- aes = qcrypto_cipher_new(QCRYPTO_CIPHER_ALG_AES_256,
+ aes = qcrypto_cipher_new(QCRYPTO_CIPHER_ALGO_AES_256,
QCRYPTO_CIPHER_MODE_CBC,
key, keylen,
errp);
@@ -191,15 +191,6 @@ qcrypto_secret_complete(UserCreatable *uc, Error **errp)
}
-static bool
-qcrypto_secret_prop_get_loaded(Object *obj,
- Error **errp G_GNUC_UNUSED)
-{
- QCryptoSecretCommon *secret = QCRYPTO_SECRET_COMMON(obj);
- return secret->rawdata != NULL;
-}
-
-
static void
qcrypto_secret_prop_set_format(Object *obj,
int value,
@@ -272,15 +263,12 @@ qcrypto_secret_finalize(Object *obj)
}
static void
-qcrypto_secret_class_init(ObjectClass *oc, void *data)
+qcrypto_secret_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
ucc->complete = qcrypto_secret_complete;
- object_class_property_add_bool(oc, "loaded",
- qcrypto_secret_prop_get_loaded,
- NULL);
object_class_property_add_enum(oc, "format",
"QCryptoSecretFormat",
&QCryptoSecretFormat_lookup,
@@ -387,7 +375,7 @@ static const TypeInfo qcrypto_secret_info = {
.class_size = sizeof(QCryptoSecretCommonClass),
.class_init = qcrypto_secret_class_init,
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/crypto/secret_keyring.c b/crypto/secret_keyring.c
index 1b7edec..78d7f09 100644
--- a/crypto/secret_keyring.c
+++ b/crypto/secret_keyring.c
@@ -103,7 +103,7 @@ qcrypto_secret_prop_get_key(Object *obj, Visitor *v,
static void
-qcrypto_secret_keyring_class_init(ObjectClass *oc, void *data)
+qcrypto_secret_keyring_class_init(ObjectClass *oc, const void *data)
{
QCryptoSecretCommonClass *sic = QCRYPTO_SECRET_COMMON_CLASS(oc);
sic->load_data = qcrypto_secret_keyring_load_data;
diff --git a/crypto/tls-cipher-suites.c b/crypto/tls-cipher-suites.c
index d0df4ba..d9b61d0 100644
--- a/crypto/tls-cipher-suites.c
+++ b/crypto/tls-cipher-suites.c
@@ -102,7 +102,8 @@ static GByteArray *qcrypto_tls_cipher_suites_fw_cfg_gen_data(Object *obj,
errp);
}
-static void qcrypto_tls_cipher_suites_class_init(ObjectClass *oc, void *data)
+static void qcrypto_tls_cipher_suites_class_init(ObjectClass *oc,
+ const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
FWCfgDataGeneratorClass *fwgc = FW_CFG_DATA_GENERATOR_CLASS(oc);
@@ -117,7 +118,7 @@ static const TypeInfo qcrypto_tls_cipher_suites_info = {
.instance_size = sizeof(QCryptoTLSCipherSuites),
.class_size = sizeof(QCryptoTLSCredsClass),
.class_init = qcrypto_tls_cipher_suites_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ TYPE_FW_CFG_DATA_GENERATOR_INTERFACE },
{ }
diff --git a/crypto/tlscreds.c b/crypto/tlscreds.c
index 084ce0d..9e59594 100644
--- a/crypto/tlscreds.c
+++ b/crypto/tlscreds.c
@@ -223,7 +223,7 @@ qcrypto_tls_creds_prop_get_endpoint(Object *obj,
static void
-qcrypto_tls_creds_class_init(ObjectClass *oc, void *data)
+qcrypto_tls_creds_class_init(ObjectClass *oc, const void *data)
{
object_class_property_add_bool(oc, "verify-peer",
qcrypto_tls_creds_prop_get_verify,
diff --git a/crypto/tlscredsanon.c b/crypto/tlscredsanon.c
index c0d23a0..44af9e6 100644
--- a/crypto/tlscredsanon.c
+++ b/crypto/tlscredsanon.c
@@ -127,37 +127,6 @@ qcrypto_tls_creds_anon_complete(UserCreatable *uc, Error **errp)
}
-#ifdef CONFIG_GNUTLS
-
-
-static bool
-qcrypto_tls_creds_anon_prop_get_loaded(Object *obj,
- Error **errp G_GNUC_UNUSED)
-{
- QCryptoTLSCredsAnon *creds = QCRYPTO_TLS_CREDS_ANON(obj);
-
- if (creds->parent_obj.endpoint == QCRYPTO_TLS_CREDS_ENDPOINT_SERVER) {
- return creds->data.server != NULL;
- } else {
- return creds->data.client != NULL;
- }
-}
-
-
-#else /* ! CONFIG_GNUTLS */
-
-
-static bool
-qcrypto_tls_creds_anon_prop_get_loaded(Object *obj G_GNUC_UNUSED,
- Error **errp G_GNUC_UNUSED)
-{
- return false;
-}
-
-
-#endif /* ! CONFIG_GNUTLS */
-
-
static void
qcrypto_tls_creds_anon_finalize(Object *obj)
{
@@ -168,15 +137,11 @@ qcrypto_tls_creds_anon_finalize(Object *obj)
static void
-qcrypto_tls_creds_anon_class_init(ObjectClass *oc, void *data)
+qcrypto_tls_creds_anon_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
ucc->complete = qcrypto_tls_creds_anon_complete;
-
- object_class_property_add_bool(oc, "loaded",
- qcrypto_tls_creds_anon_prop_get_loaded,
- NULL);
}
@@ -187,7 +152,7 @@ static const TypeInfo qcrypto_tls_creds_anon_info = {
.instance_finalize = qcrypto_tls_creds_anon_finalize,
.class_size = sizeof(QCryptoTLSCredsAnonClass),
.class_init = qcrypto_tls_creds_anon_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/crypto/tlscredspsk.c b/crypto/tlscredspsk.c
index 546cad1..5b68a6b 100644
--- a/crypto/tlscredspsk.c
+++ b/crypto/tlscredspsk.c
@@ -206,43 +206,13 @@ qcrypto_tls_creds_psk_complete(UserCreatable *uc, Error **errp)
}
-#ifdef CONFIG_GNUTLS
-
-
-static bool
-qcrypto_tls_creds_psk_prop_get_loaded(Object *obj,
- Error **errp G_GNUC_UNUSED)
-{
- QCryptoTLSCredsPSK *creds = QCRYPTO_TLS_CREDS_PSK(obj);
-
- if (creds->parent_obj.endpoint == QCRYPTO_TLS_CREDS_ENDPOINT_SERVER) {
- return creds->data.server != NULL;
- } else {
- return creds->data.client != NULL;
- }
-}
-
-
-#else /* ! CONFIG_GNUTLS */
-
-
-static bool
-qcrypto_tls_creds_psk_prop_get_loaded(Object *obj G_GNUC_UNUSED,
- Error **errp G_GNUC_UNUSED)
-{
- return false;
-}
-
-
-#endif /* ! CONFIG_GNUTLS */
-
-
static void
qcrypto_tls_creds_psk_finalize(Object *obj)
{
QCryptoTLSCredsPSK *creds = QCRYPTO_TLS_CREDS_PSK(obj);
qcrypto_tls_creds_psk_unload(creds);
+ g_free(creds->username);
}
static void
@@ -266,15 +236,12 @@ qcrypto_tls_creds_psk_prop_get_username(Object *obj,
}
static void
-qcrypto_tls_creds_psk_class_init(ObjectClass *oc, void *data)
+qcrypto_tls_creds_psk_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
ucc->complete = qcrypto_tls_creds_psk_complete;
- object_class_property_add_bool(oc, "loaded",
- qcrypto_tls_creds_psk_prop_get_loaded,
- NULL);
object_class_property_add_str(oc, "username",
qcrypto_tls_creds_psk_prop_get_username,
qcrypto_tls_creds_psk_prop_set_username);
@@ -288,7 +255,7 @@ static const TypeInfo qcrypto_tls_creds_psk_info = {
.instance_finalize = qcrypto_tls_creds_psk_finalize,
.class_size = sizeof(QCryptoTLSCredsPSKClass),
.class_init = qcrypto_tls_creds_psk_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/crypto/tlscredsx509.c b/crypto/tlscredsx509.c
index d143139..63a72fe 100644
--- a/crypto/tlscredsx509.c
+++ b/crypto/tlscredsx509.c
@@ -695,33 +695,6 @@ qcrypto_tls_creds_x509_complete(UserCreatable *uc, Error **errp)
}
-#ifdef CONFIG_GNUTLS
-
-
-static bool
-qcrypto_tls_creds_x509_prop_get_loaded(Object *obj,
- Error **errp G_GNUC_UNUSED)
-{
- QCryptoTLSCredsX509 *creds = QCRYPTO_TLS_CREDS_X509(obj);
-
- return creds->data != NULL;
-}
-
-
-#else /* ! CONFIG_GNUTLS */
-
-
-static bool
-qcrypto_tls_creds_x509_prop_get_loaded(Object *obj G_GNUC_UNUSED,
- Error **errp G_GNUC_UNUSED)
-{
- return false;
-}
-
-
-#endif /* ! CONFIG_GNUTLS */
-
-
static void
qcrypto_tls_creds_x509_prop_set_sanity(Object *obj,
bool value,
@@ -829,7 +802,7 @@ qcrypto_tls_creds_x509_finalize(Object *obj)
static void
-qcrypto_tls_creds_x509_class_init(ObjectClass *oc, void *data)
+qcrypto_tls_creds_x509_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
QCryptoTLSCredsClass *ctcc = QCRYPTO_TLS_CREDS_CLASS(oc);
@@ -838,9 +811,6 @@ qcrypto_tls_creds_x509_class_init(ObjectClass *oc, void *data)
ucc->complete = qcrypto_tls_creds_x509_complete;
- object_class_property_add_bool(oc, "loaded",
- qcrypto_tls_creds_x509_prop_get_loaded,
- NULL);
object_class_property_add_bool(oc, "sanity-check",
qcrypto_tls_creds_x509_prop_get_sanity,
qcrypto_tls_creds_x509_prop_set_sanity);
@@ -858,7 +828,7 @@ static const TypeInfo qcrypto_tls_creds_x509_info = {
.instance_finalize = qcrypto_tls_creds_x509_finalize,
.class_size = sizeof(QCryptoTLSCredsX509Class),
.class_init = qcrypto_tls_creds_x509_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/crypto/tlssession.c b/crypto/tlssession.c
index 1e98f44..6d8f8df 100644
--- a/crypto/tlssession.c
+++ b/crypto/tlssession.c
@@ -44,6 +44,13 @@ struct QCryptoTLSSession {
QCryptoTLSSessionReadFunc readFunc;
void *opaque;
char *peername;
+
+ /*
+ * Allow concurrent reads and writes, so track
+ * errors separately
+ */
+ Error *rerr;
+ Error *werr;
};
@@ -54,6 +61,9 @@ qcrypto_tls_session_free(QCryptoTLSSession *session)
return;
}
+ error_free(session->rerr);
+ error_free(session->werr);
+
gnutls_deinit(session->handle);
g_free(session->hostname);
g_free(session->peername);
@@ -67,13 +77,26 @@ static ssize_t
qcrypto_tls_session_push(void *opaque, const void *buf, size_t len)
{
QCryptoTLSSession *session = opaque;
+ ssize_t ret;
if (!session->writeFunc) {
errno = EIO;
return -1;
};
- return session->writeFunc(buf, len, session->opaque);
+ error_free(session->werr);
+ session->werr = NULL;
+
+ ret = session->writeFunc(buf, len, session->opaque, &session->werr);
+ if (ret == QCRYPTO_TLS_SESSION_ERR_BLOCK) {
+ errno = EAGAIN;
+ return -1;
+ } else if (ret < 0) {
+ errno = EIO;
+ return -1;
+ } else {
+ return ret;
+ }
}
@@ -81,13 +104,26 @@ static ssize_t
qcrypto_tls_session_pull(void *opaque, void *buf, size_t len)
{
QCryptoTLSSession *session = opaque;
+ ssize_t ret;
if (!session->readFunc) {
errno = EIO;
return -1;
};
- return session->readFunc(buf, len, session->opaque);
+ error_free(session->rerr);
+ session->rerr = NULL;
+
+ ret = session->readFunc(buf, len, session->opaque, &session->rerr);
+ if (ret == QCRYPTO_TLS_SESSION_ERR_BLOCK) {
+ errno = EAGAIN;
+ return -1;
+ } else if (ret < 0) {
+ errno = EIO;
+ return -1;
+ } else {
+ return ret;
+ }
}
#define TLS_PRIORITY_ADDITIONAL_ANON "+ANON-DH"
@@ -441,23 +477,25 @@ qcrypto_tls_session_set_callbacks(QCryptoTLSSession *session,
ssize_t
qcrypto_tls_session_write(QCryptoTLSSession *session,
const char *buf,
- size_t len)
+ size_t len,
+ Error **errp)
{
ssize_t ret = gnutls_record_send(session->handle, buf, len);
if (ret < 0) {
- switch (ret) {
- case GNUTLS_E_AGAIN:
- errno = EAGAIN;
- break;
- case GNUTLS_E_INTERRUPTED:
- errno = EINTR;
- break;
- default:
- errno = EIO;
- break;
+ if (ret == GNUTLS_E_AGAIN) {
+ return QCRYPTO_TLS_SESSION_ERR_BLOCK;
+ } else {
+ if (session->werr) {
+ error_propagate(errp, session->werr);
+ session->werr = NULL;
+ } else {
+ error_setg(errp,
+ "Cannot write to TLS channel: %s",
+ gnutls_strerror(ret));
+ }
+ return -1;
}
- ret = -1;
}
return ret;
@@ -467,26 +505,29 @@ qcrypto_tls_session_write(QCryptoTLSSession *session,
ssize_t
qcrypto_tls_session_read(QCryptoTLSSession *session,
char *buf,
- size_t len)
+ size_t len,
+ bool gracefulTermination,
+ Error **errp)
{
ssize_t ret = gnutls_record_recv(session->handle, buf, len);
if (ret < 0) {
- switch (ret) {
- case GNUTLS_E_AGAIN:
- errno = EAGAIN;
- break;
- case GNUTLS_E_INTERRUPTED:
- errno = EINTR;
- break;
- case GNUTLS_E_PREMATURE_TERMINATION:
- errno = ECONNABORTED;
- break;
- default:
- errno = EIO;
- break;
+ if (ret == GNUTLS_E_AGAIN) {
+ return QCRYPTO_TLS_SESSION_ERR_BLOCK;
+ } else if ((ret == GNUTLS_E_PREMATURE_TERMINATION) &&
+ gracefulTermination){
+ return 0;
+ } else {
+ if (session->rerr) {
+ error_propagate(errp, session->rerr);
+ session->rerr = NULL;
+ } else {
+ error_setg(errp,
+ "Cannot read from TLS channel: %s",
+ gnutls_strerror(ret));
+ }
+ return -1;
}
- ret = -1;
}
return ret;
@@ -505,35 +546,69 @@ qcrypto_tls_session_handshake(QCryptoTLSSession *session,
Error **errp)
{
int ret = gnutls_handshake(session->handle);
- if (ret == 0) {
+ if (!ret) {
session->handshakeComplete = true;
+ return QCRYPTO_TLS_HANDSHAKE_COMPLETE;
+ }
+
+ if (ret == GNUTLS_E_INTERRUPTED || ret == GNUTLS_E_AGAIN) {
+ int direction = gnutls_record_get_direction(session->handle);
+ return direction ? QCRYPTO_TLS_HANDSHAKE_SENDING :
+ QCRYPTO_TLS_HANDSHAKE_RECVING;
+ }
+
+ if (session->rerr || session->werr) {
+ error_setg(errp, "TLS handshake failed: %s: %s",
+ gnutls_strerror(ret),
+ error_get_pretty(session->rerr ?
+ session->rerr : session->werr));
} else {
- if (ret == GNUTLS_E_INTERRUPTED ||
- ret == GNUTLS_E_AGAIN) {
- ret = 1;
- } else {
- error_setg(errp, "TLS handshake failed: %s",
- gnutls_strerror(ret));
- ret = -1;
- }
+ error_setg(errp, "TLS handshake failed: %s",
+ gnutls_strerror(ret));
}
- return ret;
+ error_free(session->rerr);
+ error_free(session->werr);
+ session->rerr = session->werr = NULL;
+
+ return -1;
}
-QCryptoTLSSessionHandshakeStatus
-qcrypto_tls_session_get_handshake_status(QCryptoTLSSession *session)
+int
+qcrypto_tls_session_bye(QCryptoTLSSession *session, Error **errp)
{
- if (session->handshakeComplete) {
- return QCRYPTO_TLS_HANDSHAKE_COMPLETE;
- } else if (gnutls_record_get_direction(session->handle) == 0) {
- return QCRYPTO_TLS_HANDSHAKE_RECVING;
+ int ret;
+
+ if (!session->handshakeComplete) {
+ return 0;
+ }
+
+ ret = gnutls_bye(session->handle, GNUTLS_SHUT_WR);
+
+ if (!ret) {
+ return QCRYPTO_TLS_BYE_COMPLETE;
+ }
+
+ if (ret == GNUTLS_E_INTERRUPTED || ret == GNUTLS_E_AGAIN) {
+ int direction = gnutls_record_get_direction(session->handle);
+ return direction ? QCRYPTO_TLS_BYE_SENDING : QCRYPTO_TLS_BYE_RECVING;
+ }
+
+ if (session->rerr || session->werr) {
+ error_setg(errp, "TLS termination failed: %s: %s", gnutls_strerror(ret),
+ error_get_pretty(session->rerr ?
+ session->rerr : session->werr));
} else {
- return QCRYPTO_TLS_HANDSHAKE_SENDING;
+ error_setg(errp, "TLS termination failed: %s", gnutls_strerror(ret));
}
-}
+ error_free(session->rerr);
+ error_free(session->werr);
+ session->rerr = session->werr = NULL;
+
+ return -1;
+}
int
qcrypto_tls_session_get_key_size(QCryptoTLSSession *session,
@@ -605,9 +680,10 @@ qcrypto_tls_session_set_callbacks(
ssize_t
qcrypto_tls_session_write(QCryptoTLSSession *sess,
const char *buf,
- size_t len)
+ size_t len,
+ Error **errp)
{
- errno = -EIO;
+ error_setg(errp, "TLS requires GNUTLS support");
return -1;
}
@@ -615,9 +691,11 @@ qcrypto_tls_session_write(QCryptoTLSSession *sess,
ssize_t
qcrypto_tls_session_read(QCryptoTLSSession *sess,
char *buf,
- size_t len)
+ size_t len,
+ bool gracefulTermination,
+ Error **errp)
{
- errno = -EIO;
+ error_setg(errp, "TLS requires GNUTLS support");
return -1;
}
@@ -638,10 +716,10 @@ qcrypto_tls_session_handshake(QCryptoTLSSession *sess,
}
-QCryptoTLSSessionHandshakeStatus
-qcrypto_tls_session_get_handshake_status(QCryptoTLSSession *sess)
+int
+qcrypto_tls_session_bye(QCryptoTLSSession *session, Error **errp)
{
- return QCRYPTO_TLS_HANDSHAKE_COMPLETE;
+ return QCRYPTO_TLS_BYE_COMPLETE;
}
diff --git a/crypto/x509-utils.c b/crypto/x509-utils.c
new file mode 100644
index 0000000..8bad00a
--- /dev/null
+++ b/crypto/x509-utils.c
@@ -0,0 +1,76 @@
+/*
+ * X.509 certificate related helpers
+ *
+ * Copyright (c) 2024 Dorjoy Chowdhury <dorjoychy111@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "crypto/x509-utils.h"
+#include <gnutls/gnutls.h>
+#include <gnutls/crypto.h>
+#include <gnutls/x509.h>
+
+static const int qcrypto_to_gnutls_hash_alg_map[QCRYPTO_HASH_ALGO__MAX] = {
+ [QCRYPTO_HASH_ALGO_MD5] = GNUTLS_DIG_MD5,
+ [QCRYPTO_HASH_ALGO_SHA1] = GNUTLS_DIG_SHA1,
+ [QCRYPTO_HASH_ALGO_SHA224] = GNUTLS_DIG_SHA224,
+ [QCRYPTO_HASH_ALGO_SHA256] = GNUTLS_DIG_SHA256,
+ [QCRYPTO_HASH_ALGO_SHA384] = GNUTLS_DIG_SHA384,
+ [QCRYPTO_HASH_ALGO_SHA512] = GNUTLS_DIG_SHA512,
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = GNUTLS_DIG_RMD160,
+};
+
+int qcrypto_get_x509_cert_fingerprint(uint8_t *cert, size_t size,
+ QCryptoHashAlgo alg,
+ uint8_t *result,
+ size_t *resultlen,
+ Error **errp)
+{
+ int ret = -1;
+ int hlen;
+ gnutls_x509_crt_t crt;
+ gnutls_datum_t datum = {.data = cert, .size = size};
+
+ if (alg >= G_N_ELEMENTS(qcrypto_to_gnutls_hash_alg_map)) {
+ error_setg(errp, "Unknown hash algorithm");
+ return -1;
+ }
+
+ if (result == NULL) {
+ error_setg(errp, "No valid buffer given");
+ return -1;
+ }
+
+ gnutls_x509_crt_init(&crt);
+
+ if (gnutls_x509_crt_import(crt, &datum, GNUTLS_X509_FMT_PEM) != 0) {
+ error_setg(errp, "Failed to import certificate");
+ goto cleanup;
+ }
+
+ hlen = gnutls_hash_get_len(qcrypto_to_gnutls_hash_alg_map[alg]);
+ if (*resultlen < hlen) {
+ error_setg(errp,
+ "Result buffer size %zu is smaller than hash %d",
+ *resultlen, hlen);
+ goto cleanup;
+ }
+
+ if (gnutls_x509_crt_get_fingerprint(crt,
+ qcrypto_to_gnutls_hash_alg_map[alg],
+ result, resultlen) != 0) {
+ error_setg(errp, "Failed to get fingerprint from certificate");
+ goto cleanup;
+ }
+
+ ret = 0;
+
+ cleanup:
+ gnutls_x509_crt_deinit(crt);
+ return ret;
+}
diff --git a/disas/cris.c b/disas/cris.c
deleted file mode 100644
index 409a224..0000000
--- a/disas/cris.c
+++ /dev/null
@@ -1,2863 +0,0 @@
-/* Disassembler code for CRIS.
- Copyright 2000, 2001, 2002, 2004, 2005, 2006 Free Software Foundation, Inc.
- Contributed by Axis Communications AB, Lund, Sweden.
- Written by Hans-Peter Nilsson.
-
- This file is part of the GNU binutils and GDB, the GNU debugger.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the
- Free Software Foundation; either version 2, or (at your option) any later
- version.
-
- This program is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, see <http://www.gnu.org/licenses/>. */
-
-#include "qemu/osdep.h"
-#include "disas/dis-asm.h"
-#include "target/cris/opcode-cris.h"
-
-#define CONST_STRNEQ(STR1,STR2) (strncmp ((STR1), (STR2), sizeof (STR2) - 1) == 0)
-
-/* cris-opc.c -- Table of opcodes for the CRIS processor.
- Copyright 2000, 2001, 2004 Free Software Foundation, Inc.
- Contributed by Axis Communications AB, Lund, Sweden.
- Originally written for GAS 1.38.1 by Mikael Asker.
- Reorganized by Hans-Peter Nilsson.
-
-This file is part of GAS, GDB and the GNU binutils.
-
-GAS, GDB, and GNU binutils is free software; you can redistribute it
-and/or modify it under the terms of the GNU General Public License as
-published by the Free Software Foundation; either version 2, or (at your
-option) any later version.
-
-GAS, GDB, and GNU binutils are distributed in the hope that they will be
-useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, see <http://www.gnu.org/licenses/>. */
-
-#ifndef NULL
-#define NULL (0)
-#endif
-
-/* This table isn't used for CRISv32 and the size of immediate operands. */
-const struct cris_spec_reg
-cris_spec_regs[] =
-{
- {"bz", 0, 1, cris_ver_v32p, NULL},
- {"p0", 0, 1, 0, NULL},
- {"vr", 1, 1, 0, NULL},
- {"p1", 1, 1, 0, NULL},
- {"pid", 2, 1, cris_ver_v32p, NULL},
- {"p2", 2, 1, cris_ver_v32p, NULL},
- {"p2", 2, 1, cris_ver_warning, NULL},
- {"srs", 3, 1, cris_ver_v32p, NULL},
- {"p3", 3, 1, cris_ver_v32p, NULL},
- {"p3", 3, 1, cris_ver_warning, NULL},
- {"wz", 4, 2, cris_ver_v32p, NULL},
- {"p4", 4, 2, 0, NULL},
- {"ccr", 5, 2, cris_ver_v0_10, NULL},
- {"exs", 5, 4, cris_ver_v32p, NULL},
- {"p5", 5, 2, cris_ver_v0_10, NULL},
- {"p5", 5, 4, cris_ver_v32p, NULL},
- {"dcr0",6, 2, cris_ver_v0_3, NULL},
- {"eda", 6, 4, cris_ver_v32p, NULL},
- {"p6", 6, 2, cris_ver_v0_3, NULL},
- {"p6", 6, 4, cris_ver_v32p, NULL},
- {"dcr1/mof", 7, 4, cris_ver_v10p,
- "Register `dcr1/mof' with ambiguous size specified. Guessing 4 bytes"},
- {"dcr1/mof", 7, 2, cris_ver_v0_3,
- "Register `dcr1/mof' with ambiguous size specified. Guessing 2 bytes"},
- {"mof", 7, 4, cris_ver_v10p, NULL},
- {"dcr1",7, 2, cris_ver_v0_3, NULL},
- {"p7", 7, 4, cris_ver_v10p, NULL},
- {"p7", 7, 2, cris_ver_v0_3, NULL},
- {"dz", 8, 4, cris_ver_v32p, NULL},
- {"p8", 8, 4, 0, NULL},
- {"ibr", 9, 4, cris_ver_v0_10, NULL},
- {"ebp", 9, 4, cris_ver_v32p, NULL},
- {"p9", 9, 4, 0, NULL},
- {"irp", 10, 4, cris_ver_v0_10, NULL},
- {"erp", 10, 4, cris_ver_v32p, NULL},
- {"p10", 10, 4, 0, NULL},
- {"srp", 11, 4, 0, NULL},
- {"p11", 11, 4, 0, NULL},
- /* For disassembly use only. Accept at assembly with a warning. */
- {"bar/dtp0", 12, 4, cris_ver_warning,
- "Ambiguous register `bar/dtp0' specified"},
- {"nrp", 12, 4, cris_ver_v32p, NULL},
- {"bar", 12, 4, cris_ver_v8_10, NULL},
- {"dtp0",12, 4, cris_ver_v0_3, NULL},
- {"p12", 12, 4, 0, NULL},
- /* For disassembly use only. Accept at assembly with a warning. */
- {"dccr/dtp1",13, 4, cris_ver_warning,
- "Ambiguous register `dccr/dtp1' specified"},
- {"ccs", 13, 4, cris_ver_v32p, NULL},
- {"dccr",13, 4, cris_ver_v8_10, NULL},
- {"dtp1",13, 4, cris_ver_v0_3, NULL},
- {"p13", 13, 4, 0, NULL},
- {"brp", 14, 4, cris_ver_v3_10, NULL},
- {"usp", 14, 4, cris_ver_v32p, NULL},
- {"p14", 14, 4, cris_ver_v3p, NULL},
- {"usp", 15, 4, cris_ver_v10, NULL},
- {"spc", 15, 4, cris_ver_v32p, NULL},
- {"p15", 15, 4, cris_ver_v10p, NULL},
- {NULL, 0, 0, cris_ver_version_all, NULL}
-};
-
-/* Add version specifiers to this table when necessary.
- The (now) regular coding of register names suggests a simpler
- implementation. */
-const struct cris_support_reg cris_support_regs[] =
-{
- {"s0", 0},
- {"s1", 1},
- {"s2", 2},
- {"s3", 3},
- {"s4", 4},
- {"s5", 5},
- {"s6", 6},
- {"s7", 7},
- {"s8", 8},
- {"s9", 9},
- {"s10", 10},
- {"s11", 11},
- {"s12", 12},
- {"s13", 13},
- {"s14", 14},
- {"s15", 15},
- {NULL, 0}
-};
-
-/* All CRIS opcodes are 16 bits.
-
- - The match component is a mask saying which bits must match a
- particular opcode in order for an instruction to be an instance
- of that opcode.
-
- - The args component is a string containing characters symbolically
- matching the operands of an instruction. Used for both assembly
- and disassembly.
-
- Operand-matching characters:
- [ ] , space
- Verbatim.
- A The string "ACR" (case-insensitive).
- B Not really an operand. It causes a "BDAP -size,SP" prefix to be
- output for the PUSH alias-instructions and recognizes a push-
- prefix at disassembly. This letter isn't recognized for v32.
- Must be followed by a R or P letter.
- ! Non-match pattern, will not match if there's a prefix insn.
- b Non-matching operand, used for branches with 16-bit
- displacement. Only recognized by the disassembler.
- c 5-bit unsigned immediate in bits <4:0>.
- C 4-bit unsigned immediate in bits <3:0>.
- d At assembly, optionally (as in put other cases before this one)
- ".d" or ".D" at the start of the operands, followed by one space
- character. At disassembly, nothing.
- D General register in bits <15:12> and <3:0>.
- f List of flags in bits <15:12> and <3:0>.
- i 6-bit signed immediate in bits <5:0>.
- I 6-bit unsigned immediate in bits <5:0>.
- M Size modifier (B, W or D) for CLEAR instructions.
- m Size modifier (B, W or D) in bits <5:4>
- N A 32-bit dword, like in the difference between s and y.
- This has no effect on bits in the opcode. Can also be expressed
- as "[pc+]" in input.
- n As N, but PC-relative (to the start of the instruction).
- o [-128..127] word offset in bits <7:1> and <0>. Used by 8-bit
- branch instructions.
- O [-128..127] offset in bits <7:0>. Also matches a comma and a
- general register after the expression, in bits <15:12>. Used
- only for the BDAP prefix insn (in v32 the ADDOQ insn; same opcode).
- P Special register in bits <15:12>.
- p Indicates that the insn is a prefix insn. Must be first
- character.
- Q As O, but don't relax; force an 8-bit offset.
- R General register in bits <15:12>.
- r General register in bits <3:0>.
- S Source operand in bit <10> and a prefix; a 3-operand prefix
- without side-effect.
- s Source operand in bits <10> and <3:0>, optionally with a
- side-effect prefix, except [pc] (the name, not R15 as in ACR)
- isn't allowed for v32 and higher.
- T Support register in bits <15:12>.
- u 4-bit (PC-relative) unsigned immediate word offset in bits <3:0>.
- U Relaxes to either u or n, instruction is assumed LAPCQ or LAPC.
- Not recognized at disassembly.
- x Register-dot-modifier, for example "r5.w" in bits <15:12> and <5:4>.
- y Like 's' but do not allow an integer at assembly.
- Y The difference s-y; only an integer is allowed.
- z Size modifier (B or W) in bit <4>. */
-
-
-/* Please note the order of the opcodes in this table is significant.
- The assembler requires that all instances of the same mnemonic must
- be consecutive. If they aren't, the assembler might not recognize
- them, or may indicate an internal error.
-
- The disassembler should not normally care about the order of the
- opcodes, but will prefer an earlier alternative if the "match-score"
- (see cris-dis.c) is computed as equal.
-
- It should not be significant for proper execution that this table is
- in alphabetical order, but please follow that convention for an easy
- overview. */
-
-const struct cris_opcode
-cris_opcodes[] =
-{
- {"abs", 0x06B0, 0x0940, "r,R", 0, SIZE_NONE, 0,
- cris_abs_op},
-
- {"add", 0x0600, 0x09c0, "m r,R", 0, SIZE_NONE, 0,
- cris_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"add", 0x0A00, 0x01c0, "m s,R", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"add", 0x0A00, 0x01c0, "m S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"add", 0x0a00, 0x05c0, "m S,R,r", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_three_operand_add_sub_cmp_and_or_op},
-
- {"add", 0x0A00, 0x01c0, "m s,R", 0, SIZE_FIELD,
- cris_ver_v32p,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"addc", 0x0570, 0x0A80, "r,R", 0, SIZE_FIX_32,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"addc", 0x09A0, 0x0250, "s,R", 0, SIZE_FIX_32,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"addi", 0x0540, 0x0A80, "x,r,A", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_addi_op},
-
- {"addi", 0x0500, 0x0Ac0, "x,r", 0, SIZE_NONE, 0,
- cris_addi_op},
-
- /* This collates after "addo", but we want to disassemble as "addoq",
- not "addo". */
- {"addoq", 0x0100, 0x0E00, "Q,A", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"addo", 0x0940, 0x0280, "m s,R,A", 0, SIZE_FIELD_SIGNED,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- /* This must be located after the insn above, lest we misinterpret
- "addo.b -1,r0,acr" as "addo .b-1,r0,acr". FIXME: Sounds like a
- parser bug. */
- {"addo", 0x0100, 0x0E00, "O,A", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"addq", 0x0200, 0x0Dc0, "I,R", 0, SIZE_NONE, 0,
- cris_quick_mode_add_sub_op},
-
- {"adds", 0x0420, 0x0Bc0, "z r,R", 0, SIZE_NONE, 0,
- cris_reg_mode_add_sub_cmp_and_or_move_op},
-
- /* FIXME: SIZE_FIELD_SIGNED and all necessary changes. */
- {"adds", 0x0820, 0x03c0, "z s,R", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"adds", 0x0820, 0x03c0, "z S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"adds", 0x0820, 0x07c0, "z S,R,r", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_three_operand_add_sub_cmp_and_or_op},
-
- {"addu", 0x0400, 0x0be0, "z r,R", 0, SIZE_NONE, 0,
- cris_reg_mode_add_sub_cmp_and_or_move_op},
-
- /* FIXME: SIZE_FIELD_UNSIGNED and all necessary changes. */
- {"addu", 0x0800, 0x03e0, "z s,R", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"addu", 0x0800, 0x03e0, "z S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"addu", 0x0800, 0x07e0, "z S,R,r", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_three_operand_add_sub_cmp_and_or_op},
-
- {"and", 0x0700, 0x08C0, "m r,R", 0, SIZE_NONE, 0,
- cris_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"and", 0x0B00, 0x00C0, "m s,R", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"and", 0x0B00, 0x00C0, "m S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"and", 0x0B00, 0x04C0, "m S,R,r", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_three_operand_add_sub_cmp_and_or_op},
-
- {"andq", 0x0300, 0x0CC0, "i,R", 0, SIZE_NONE, 0,
- cris_quick_mode_and_cmp_move_or_op},
-
- {"asr", 0x0780, 0x0840, "m r,R", 0, SIZE_NONE, 0,
- cris_asr_op},
-
- {"asrq", 0x03a0, 0x0c40, "c,R", 0, SIZE_NONE, 0,
- cris_asrq_op},
-
- {"ax", 0x15B0, 0xEA4F, "", 0, SIZE_NONE, 0,
- cris_ax_ei_setf_op},
-
- /* FIXME: Should use branch #defines. */
- {"b", 0x0dff, 0x0200, "b", 1, SIZE_NONE, 0,
- cris_sixteen_bit_offset_branch_op},
-
- {"ba",
- BA_QUICK_OPCODE,
- 0x0F00+(0xF-CC_A)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- /* Needs to come after the usual "ba o", which might be relaxed to
- this one. */
- {"ba", BA_DWORD_OPCODE,
- 0xffff & (~BA_DWORD_OPCODE), "n", 0, SIZE_FIX_32,
- cris_ver_v32p,
- cris_none_reg_mode_jump_op},
-
- {"bas", 0x0EBF, 0x0140, "n,P", 0, SIZE_FIX_32,
- cris_ver_v32p,
- cris_none_reg_mode_jump_op},
-
- {"basc", 0x0EFF, 0x0100, "n,P", 0, SIZE_FIX_32,
- cris_ver_v32p,
- cris_none_reg_mode_jump_op},
-
- {"bcc",
- BRANCH_QUICK_OPCODE+CC_CC*0x1000,
- 0x0f00+(0xF-CC_CC)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"bcs",
- BRANCH_QUICK_OPCODE+CC_CS*0x1000,
- 0x0f00+(0xF-CC_CS)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"bdap",
- BDAP_INDIR_OPCODE, BDAP_INDIR_Z_BITS, "pm s,R", 0, SIZE_FIELD_SIGNED,
- cris_ver_v0_10,
- cris_bdap_prefix},
-
- {"bdap",
- BDAP_QUICK_OPCODE, BDAP_QUICK_Z_BITS, "pO", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_quick_mode_bdap_prefix},
-
- {"beq",
- BRANCH_QUICK_OPCODE+CC_EQ*0x1000,
- 0x0f00+(0xF-CC_EQ)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- /* This is deliberately put before "bext" to trump it, even though not
- in alphabetical order, since we don't do excluding version checks
- for v0..v10. */
- {"bwf",
- BRANCH_QUICK_OPCODE+CC_EXT*0x1000,
- 0x0f00+(0xF-CC_EXT)*0x1000, "o", 1, SIZE_NONE,
- cris_ver_v10,
- cris_eight_bit_offset_branch_op},
-
- {"bext",
- BRANCH_QUICK_OPCODE+CC_EXT*0x1000,
- 0x0f00+(0xF-CC_EXT)*0x1000, "o", 1, SIZE_NONE,
- cris_ver_v0_3,
- cris_eight_bit_offset_branch_op},
-
- {"bge",
- BRANCH_QUICK_OPCODE+CC_GE*0x1000,
- 0x0f00+(0xF-CC_GE)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"bgt",
- BRANCH_QUICK_OPCODE+CC_GT*0x1000,
- 0x0f00+(0xF-CC_GT)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"bhi",
- BRANCH_QUICK_OPCODE+CC_HI*0x1000,
- 0x0f00+(0xF-CC_HI)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"bhs",
- BRANCH_QUICK_OPCODE+CC_HS*0x1000,
- 0x0f00+(0xF-CC_HS)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"biap", BIAP_OPCODE, BIAP_Z_BITS, "pm r,R", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_biap_prefix},
-
- {"ble",
- BRANCH_QUICK_OPCODE+CC_LE*0x1000,
- 0x0f00+(0xF-CC_LE)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"blo",
- BRANCH_QUICK_OPCODE+CC_LO*0x1000,
- 0x0f00+(0xF-CC_LO)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"bls",
- BRANCH_QUICK_OPCODE+CC_LS*0x1000,
- 0x0f00+(0xF-CC_LS)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"blt",
- BRANCH_QUICK_OPCODE+CC_LT*0x1000,
- 0x0f00+(0xF-CC_LT)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"bmi",
- BRANCH_QUICK_OPCODE+CC_MI*0x1000,
- 0x0f00+(0xF-CC_MI)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"bmod", 0x0ab0, 0x0140, "s,R", 0, SIZE_FIX_32,
- cris_ver_sim_v0_10,
- cris_not_implemented_op},
-
- {"bmod", 0x0ab0, 0x0140, "S,D", 0, SIZE_NONE,
- cris_ver_sim_v0_10,
- cris_not_implemented_op},
-
- {"bmod", 0x0ab0, 0x0540, "S,R,r", 0, SIZE_NONE,
- cris_ver_sim_v0_10,
- cris_not_implemented_op},
-
- {"bne",
- BRANCH_QUICK_OPCODE+CC_NE*0x1000,
- 0x0f00+(0xF-CC_NE)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"bound", 0x05c0, 0x0A00, "m r,R", 0, SIZE_NONE, 0,
- cris_two_operand_bound_op},
- /* FIXME: SIZE_FIELD_UNSIGNED and all necessary changes. */
- {"bound", 0x09c0, 0x0200, "m s,R", 0, SIZE_FIELD,
- cris_ver_v0_10,
- cris_two_operand_bound_op},
- /* FIXME: SIZE_FIELD_UNSIGNED and all necessary changes. */
- {"bound", 0x0dcf, 0x0200, "m Y,R", 0, SIZE_FIELD, 0,
- cris_two_operand_bound_op},
- {"bound", 0x09c0, 0x0200, "m S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_two_operand_bound_op},
- {"bound", 0x09c0, 0x0600, "m S,R,r", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_three_operand_bound_op},
-
- {"bpl",
- BRANCH_QUICK_OPCODE+CC_PL*0x1000,
- 0x0f00+(0xF-CC_PL)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"break", 0xe930, 0x16c0, "C", 0, SIZE_NONE,
- cris_ver_v3p,
- cris_break_op},
-
- {"bsb",
- BRANCH_QUICK_OPCODE+CC_EXT*0x1000,
- 0x0f00+(0xF-CC_EXT)*0x1000, "o", 1, SIZE_NONE,
- cris_ver_v32p,
- cris_eight_bit_offset_branch_op},
-
- {"bsr", 0xBEBF, 0x4140, "n", 0, SIZE_FIX_32,
- cris_ver_v32p,
- cris_none_reg_mode_jump_op},
-
- {"bsrc", 0xBEFF, 0x4100, "n", 0, SIZE_FIX_32,
- cris_ver_v32p,
- cris_none_reg_mode_jump_op},
-
- {"bstore", 0x0af0, 0x0100, "s,R", 0, SIZE_FIX_32,
- cris_ver_warning,
- cris_not_implemented_op},
-
- {"bstore", 0x0af0, 0x0100, "S,D", 0, SIZE_NONE,
- cris_ver_warning,
- cris_not_implemented_op},
-
- {"bstore", 0x0af0, 0x0500, "S,R,r", 0, SIZE_NONE,
- cris_ver_warning,
- cris_not_implemented_op},
-
- {"btst", 0x04F0, 0x0B00, "r,R", 0, SIZE_NONE, 0,
- cris_btst_nop_op},
- {"btstq", 0x0380, 0x0C60, "c,R", 0, SIZE_NONE, 0,
- cris_btst_nop_op},
-
- {"bvc",
- BRANCH_QUICK_OPCODE+CC_VC*0x1000,
- 0x0f00+(0xF-CC_VC)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"bvs",
- BRANCH_QUICK_OPCODE+CC_VS*0x1000,
- 0x0f00+(0xF-CC_VS)*0x1000, "o", 1, SIZE_NONE, 0,
- cris_eight_bit_offset_branch_op},
-
- {"clear", 0x0670, 0x3980, "M r", 0, SIZE_NONE, 0,
- cris_reg_mode_clear_op},
-
- {"clear", 0x0A70, 0x3180, "M y", 0, SIZE_NONE, 0,
- cris_none_reg_mode_clear_test_op},
-
- {"clear", 0x0A70, 0x3180, "M S", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_clear_test_op},
-
- {"clearf", 0x05F0, 0x0A00, "f", 0, SIZE_NONE, 0,
- cris_clearf_di_op},
-
- {"cmp", 0x06C0, 0x0900, "m r,R", 0, SIZE_NONE, 0,
- cris_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"cmp", 0x0Ac0, 0x0100, "m s,R", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"cmp", 0x0Ac0, 0x0100, "m S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"cmpq", 0x02C0, 0x0D00, "i,R", 0, SIZE_NONE, 0,
- cris_quick_mode_and_cmp_move_or_op},
-
- /* FIXME: SIZE_FIELD_SIGNED and all necessary changes. */
- {"cmps", 0x08e0, 0x0300, "z s,R", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"cmps", 0x08e0, 0x0300, "z S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- /* FIXME: SIZE_FIELD_UNSIGNED and all necessary changes. */
- {"cmpu", 0x08c0, 0x0320, "z s,R" , 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"cmpu", 0x08c0, 0x0320, "z S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"di", 0x25F0, 0xDA0F, "", 0, SIZE_NONE, 0,
- cris_clearf_di_op},
-
- {"dip", DIP_OPCODE, DIP_Z_BITS, "ps", 0, SIZE_FIX_32,
- cris_ver_v0_10,
- cris_dip_prefix},
-
- {"div", 0x0980, 0x0640, "m R,r", 0, SIZE_FIELD, 0,
- cris_not_implemented_op},
-
- {"dstep", 0x06f0, 0x0900, "r,R", 0, SIZE_NONE, 0,
- cris_dstep_logshift_mstep_neg_not_op},
-
- {"ei", 0x25B0, 0xDA4F, "", 0, SIZE_NONE, 0,
- cris_ax_ei_setf_op},
-
- {"fidxd", 0x0ab0, 0xf540, "[r]", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"fidxi", 0x0d30, 0xF2C0, "[r]", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"ftagd", 0x1AB0, 0xE540, "[r]", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"ftagi", 0x1D30, 0xE2C0, "[r]", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"halt", 0xF930, 0x06CF, "", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"jas", 0x09B0, 0x0640, "r,P", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_reg_mode_jump_op},
-
- {"jas", 0x0DBF, 0x0240, "N,P", 0, SIZE_FIX_32,
- cris_ver_v32p,
- cris_reg_mode_jump_op},
-
- {"jasc", 0x0B30, 0x04C0, "r,P", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_reg_mode_jump_op},
-
- {"jasc", 0x0F3F, 0x00C0, "N,P", 0, SIZE_FIX_32,
- cris_ver_v32p,
- cris_reg_mode_jump_op},
-
- {"jbrc", 0x69b0, 0x9640, "r", 0, SIZE_NONE,
- cris_ver_v8_10,
- cris_reg_mode_jump_op},
-
- {"jbrc", 0x6930, 0x92c0, "s", 0, SIZE_FIX_32,
- cris_ver_v8_10,
- cris_none_reg_mode_jump_op},
-
- {"jbrc", 0x6930, 0x92c0, "S", 0, SIZE_NONE,
- cris_ver_v8_10,
- cris_none_reg_mode_jump_op},
-
- {"jir", 0xA9b0, 0x5640, "r", 0, SIZE_NONE,
- cris_ver_v8_10,
- cris_reg_mode_jump_op},
-
- {"jir", 0xA930, 0x52c0, "s", 0, SIZE_FIX_32,
- cris_ver_v8_10,
- cris_none_reg_mode_jump_op},
-
- {"jir", 0xA930, 0x52c0, "S", 0, SIZE_NONE,
- cris_ver_v8_10,
- cris_none_reg_mode_jump_op},
-
- {"jirc", 0x29b0, 0xd640, "r", 0, SIZE_NONE,
- cris_ver_v8_10,
- cris_reg_mode_jump_op},
-
- {"jirc", 0x2930, 0xd2c0, "s", 0, SIZE_FIX_32,
- cris_ver_v8_10,
- cris_none_reg_mode_jump_op},
-
- {"jirc", 0x2930, 0xd2c0, "S", 0, SIZE_NONE,
- cris_ver_v8_10,
- cris_none_reg_mode_jump_op},
-
- {"jsr", 0xB9b0, 0x4640, "r", 0, SIZE_NONE, 0,
- cris_reg_mode_jump_op},
-
- {"jsr", 0xB930, 0x42c0, "s", 0, SIZE_FIX_32,
- cris_ver_v0_10,
- cris_none_reg_mode_jump_op},
-
- {"jsr", 0xBDBF, 0x4240, "N", 0, SIZE_FIX_32,
- cris_ver_v32p,
- cris_none_reg_mode_jump_op},
-
- {"jsr", 0xB930, 0x42c0, "S", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_jump_op},
-
- {"jsrc", 0x39b0, 0xc640, "r", 0, SIZE_NONE,
- cris_ver_v8_10,
- cris_reg_mode_jump_op},
-
- {"jsrc", 0x3930, 0xc2c0, "s", 0, SIZE_FIX_32,
- cris_ver_v8_10,
- cris_none_reg_mode_jump_op},
-
- {"jsrc", 0x3930, 0xc2c0, "S", 0, SIZE_NONE,
- cris_ver_v8_10,
- cris_none_reg_mode_jump_op},
-
- {"jsrc", 0xBB30, 0x44C0, "r", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_reg_mode_jump_op},
-
- {"jsrc", 0xBF3F, 0x40C0, "N", 0, SIZE_FIX_32,
- cris_ver_v32p,
- cris_reg_mode_jump_op},
-
- {"jump", 0x09b0, 0xF640, "r", 0, SIZE_NONE, 0,
- cris_reg_mode_jump_op},
-
- {"jump",
- JUMP_INDIR_OPCODE, JUMP_INDIR_Z_BITS, "s", 0, SIZE_FIX_32,
- cris_ver_v0_10,
- cris_none_reg_mode_jump_op},
-
- {"jump",
- JUMP_INDIR_OPCODE, JUMP_INDIR_Z_BITS, "S", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_jump_op},
-
- {"jump", 0x09F0, 0x060F, "P", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_none_reg_mode_jump_op},
-
- {"jump",
- JUMP_PC_INCR_OPCODE_V32,
- (0xffff & ~JUMP_PC_INCR_OPCODE_V32), "N", 0, SIZE_FIX_32,
- cris_ver_v32p,
- cris_none_reg_mode_jump_op},
-
- {"jmpu", 0x8930, 0x72c0, "s", 0, SIZE_FIX_32,
- cris_ver_v10,
- cris_none_reg_mode_jump_op},
-
- {"jmpu", 0x8930, 0x72c0, "S", 0, SIZE_NONE,
- cris_ver_v10,
- cris_none_reg_mode_jump_op},
-
- {"lapc", 0x0970, 0x0680, "U,R", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"lapc", 0x0D7F, 0x0280, "dn,R", 0, SIZE_FIX_32,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"lapcq", 0x0970, 0x0680, "u,R", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_addi_op},
-
- {"lsl", 0x04C0, 0x0B00, "m r,R", 0, SIZE_NONE, 0,
- cris_dstep_logshift_mstep_neg_not_op},
-
- {"lslq", 0x03c0, 0x0C20, "c,R", 0, SIZE_NONE, 0,
- cris_dstep_logshift_mstep_neg_not_op},
-
- {"lsr", 0x07C0, 0x0800, "m r,R", 0, SIZE_NONE, 0,
- cris_dstep_logshift_mstep_neg_not_op},
-
- {"lsrq", 0x03e0, 0x0C00, "c,R", 0, SIZE_NONE, 0,
- cris_dstep_logshift_mstep_neg_not_op},
-
- {"lz", 0x0730, 0x08C0, "r,R", 0, SIZE_NONE,
- cris_ver_v3p,
- cris_not_implemented_op},
-
- {"mcp", 0x07f0, 0x0800, "P,r", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"move", 0x0640, 0x0980, "m r,R", 0, SIZE_NONE, 0,
- cris_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"move", 0x0A40, 0x0180, "m s,R", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"move", 0x0A40, 0x0180, "m S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"move", 0x0630, 0x09c0, "r,P", 0, SIZE_NONE, 0,
- cris_move_to_preg_op},
-
- {"move", 0x0670, 0x0980, "P,r", 0, SIZE_NONE, 0,
- cris_reg_mode_move_from_preg_op},
-
- {"move", 0x0BC0, 0x0000, "m R,y", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"move", 0x0BC0, 0x0000, "m D,S", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"move",
- MOVE_M_TO_PREG_OPCODE, MOVE_M_TO_PREG_ZBITS,
- "s,P", 0, SIZE_SPEC_REG, 0,
- cris_move_to_preg_op},
-
- {"move", 0x0A30, 0x01c0, "S,P", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_move_to_preg_op},
-
- {"move", 0x0A70, 0x0180, "P,y", 0, SIZE_SPEC_REG, 0,
- cris_none_reg_mode_move_from_preg_op},
-
- {"move", 0x0A70, 0x0180, "P,S", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_move_from_preg_op},
-
- {"move", 0x0B70, 0x0480, "r,T", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"move", 0x0F70, 0x0080, "T,r", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"movem", 0x0BF0, 0x0000, "R,y", 0, SIZE_FIX_32, 0,
- cris_move_reg_to_mem_movem_op},
-
- {"movem", 0x0BF0, 0x0000, "D,S", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_move_reg_to_mem_movem_op},
-
- {"movem", 0x0BB0, 0x0040, "s,R", 0, SIZE_FIX_32, 0,
- cris_move_mem_to_reg_movem_op},
-
- {"movem", 0x0BB0, 0x0040, "S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_move_mem_to_reg_movem_op},
-
- {"moveq", 0x0240, 0x0D80, "i,R", 0, SIZE_NONE, 0,
- cris_quick_mode_and_cmp_move_or_op},
-
- {"movs", 0x0460, 0x0B80, "z r,R", 0, SIZE_NONE, 0,
- cris_reg_mode_add_sub_cmp_and_or_move_op},
-
- /* FIXME: SIZE_FIELD_SIGNED and all necessary changes. */
- {"movs", 0x0860, 0x0380, "z s,R", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"movs", 0x0860, 0x0380, "z S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"movu", 0x0440, 0x0Ba0, "z r,R", 0, SIZE_NONE, 0,
- cris_reg_mode_add_sub_cmp_and_or_move_op},
-
- /* FIXME: SIZE_FIELD_UNSIGNED and all necessary changes. */
- {"movu", 0x0840, 0x03a0, "z s,R", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"movu", 0x0840, 0x03a0, "z S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"mstep", 0x07f0, 0x0800, "r,R", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_dstep_logshift_mstep_neg_not_op},
-
- {"muls", 0x0d00, 0x02c0, "m r,R", 0, SIZE_NONE,
- cris_ver_v10p,
- cris_muls_op},
-
- {"mulu", 0x0900, 0x06c0, "m r,R", 0, SIZE_NONE,
- cris_ver_v10p,
- cris_mulu_op},
-
- {"neg", 0x0580, 0x0A40, "m r,R", 0, SIZE_NONE, 0,
- cris_dstep_logshift_mstep_neg_not_op},
-
- {"nop", NOP_OPCODE, NOP_Z_BITS, "", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_btst_nop_op},
-
- {"nop", NOP_OPCODE_V32, NOP_Z_BITS_V32, "", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_btst_nop_op},
-
- {"not", 0x8770, 0x7880, "r", 0, SIZE_NONE, 0,
- cris_dstep_logshift_mstep_neg_not_op},
-
- {"or", 0x0740, 0x0880, "m r,R", 0, SIZE_NONE, 0,
- cris_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"or", 0x0B40, 0x0080, "m s,R", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"or", 0x0B40, 0x0080, "m S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"or", 0x0B40, 0x0480, "m S,R,r", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_three_operand_add_sub_cmp_and_or_op},
-
- {"orq", 0x0340, 0x0C80, "i,R", 0, SIZE_NONE, 0,
- cris_quick_mode_and_cmp_move_or_op},
-
- {"pop", 0x0E6E, 0x0191, "!R", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"pop", 0x0e3e, 0x01c1, "!P", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_move_from_preg_op},
-
- {"push", 0x0FEE, 0x0011, "BR", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"push", 0x0E7E, 0x0181, "BP", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_move_to_preg_op},
-
- {"rbf", 0x3b30, 0xc0c0, "y", 0, SIZE_NONE,
- cris_ver_v10,
- cris_not_implemented_op},
-
- {"rbf", 0x3b30, 0xc0c0, "S", 0, SIZE_NONE,
- cris_ver_v10,
- cris_not_implemented_op},
-
- {"rfe", 0x2930, 0xD6CF, "", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"rfg", 0x4930, 0xB6CF, "", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"rfn", 0x5930, 0xA6CF, "", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- {"ret", 0xB67F, 0x4980, "", 1, SIZE_NONE,
- cris_ver_v0_10,
- cris_reg_mode_move_from_preg_op},
-
- {"ret", 0xB9F0, 0x460F, "", 1, SIZE_NONE,
- cris_ver_v32p,
- cris_reg_mode_move_from_preg_op},
-
- {"retb", 0xe67f, 0x1980, "", 1, SIZE_NONE,
- cris_ver_v0_10,
- cris_reg_mode_move_from_preg_op},
-
- {"rete", 0xA9F0, 0x560F, "", 1, SIZE_NONE,
- cris_ver_v32p,
- cris_reg_mode_move_from_preg_op},
-
- {"reti", 0xA67F, 0x5980, "", 1, SIZE_NONE,
- cris_ver_v0_10,
- cris_reg_mode_move_from_preg_op},
-
- {"retn", 0xC9F0, 0x360F, "", 1, SIZE_NONE,
- cris_ver_v32p,
- cris_reg_mode_move_from_preg_op},
-
- {"sbfs", 0x3b70, 0xc080, "y", 0, SIZE_NONE,
- cris_ver_v10,
- cris_not_implemented_op},
-
- {"sbfs", 0x3b70, 0xc080, "S", 0, SIZE_NONE,
- cris_ver_v10,
- cris_not_implemented_op},
-
- {"sa",
- 0x0530+CC_A*0x1000,
- 0x0AC0+(0xf-CC_A)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"ssb",
- 0x0530+CC_EXT*0x1000,
- 0x0AC0+(0xf-CC_EXT)*0x1000, "r", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_scc_op},
-
- {"scc",
- 0x0530+CC_CC*0x1000,
- 0x0AC0+(0xf-CC_CC)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"scs",
- 0x0530+CC_CS*0x1000,
- 0x0AC0+(0xf-CC_CS)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"seq",
- 0x0530+CC_EQ*0x1000,
- 0x0AC0+(0xf-CC_EQ)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"setf", 0x05b0, 0x0A40, "f", 0, SIZE_NONE, 0,
- cris_ax_ei_setf_op},
-
- {"sfe", 0x3930, 0xC6CF, "", 0, SIZE_NONE,
- cris_ver_v32p,
- cris_not_implemented_op},
-
- /* Need to have "swf" in front of "sext" so it is the one displayed in
- disassembly. */
- {"swf",
- 0x0530+CC_EXT*0x1000,
- 0x0AC0+(0xf-CC_EXT)*0x1000, "r", 0, SIZE_NONE,
- cris_ver_v10,
- cris_scc_op},
-
- {"sext",
- 0x0530+CC_EXT*0x1000,
- 0x0AC0+(0xf-CC_EXT)*0x1000, "r", 0, SIZE_NONE,
- cris_ver_v0_3,
- cris_scc_op},
-
- {"sge",
- 0x0530+CC_GE*0x1000,
- 0x0AC0+(0xf-CC_GE)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"sgt",
- 0x0530+CC_GT*0x1000,
- 0x0AC0+(0xf-CC_GT)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"shi",
- 0x0530+CC_HI*0x1000,
- 0x0AC0+(0xf-CC_HI)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"shs",
- 0x0530+CC_HS*0x1000,
- 0x0AC0+(0xf-CC_HS)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"sle",
- 0x0530+CC_LE*0x1000,
- 0x0AC0+(0xf-CC_LE)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"slo",
- 0x0530+CC_LO*0x1000,
- 0x0AC0+(0xf-CC_LO)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"sls",
- 0x0530+CC_LS*0x1000,
- 0x0AC0+(0xf-CC_LS)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"slt",
- 0x0530+CC_LT*0x1000,
- 0x0AC0+(0xf-CC_LT)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"smi",
- 0x0530+CC_MI*0x1000,
- 0x0AC0+(0xf-CC_MI)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"sne",
- 0x0530+CC_NE*0x1000,
- 0x0AC0+(0xf-CC_NE)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"spl",
- 0x0530+CC_PL*0x1000,
- 0x0AC0+(0xf-CC_PL)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"sub", 0x0680, 0x0940, "m r,R", 0, SIZE_NONE, 0,
- cris_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"sub", 0x0a80, 0x0140, "m s,R", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"sub", 0x0a80, 0x0140, "m S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"sub", 0x0a80, 0x0540, "m S,R,r", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_three_operand_add_sub_cmp_and_or_op},
-
- {"subq", 0x0280, 0x0d40, "I,R", 0, SIZE_NONE, 0,
- cris_quick_mode_add_sub_op},
-
- {"subs", 0x04a0, 0x0b40, "z r,R", 0, SIZE_NONE, 0,
- cris_reg_mode_add_sub_cmp_and_or_move_op},
-
- /* FIXME: SIZE_FIELD_SIGNED and all necessary changes. */
- {"subs", 0x08a0, 0x0340, "z s,R", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"subs", 0x08a0, 0x0340, "z S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"subs", 0x08a0, 0x0740, "z S,R,r", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_three_operand_add_sub_cmp_and_or_op},
-
- {"subu", 0x0480, 0x0b60, "z r,R", 0, SIZE_NONE, 0,
- cris_reg_mode_add_sub_cmp_and_or_move_op},
-
- /* FIXME: SIZE_FIELD_UNSIGNED and all necessary changes. */
- {"subu", 0x0880, 0x0360, "z s,R", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"subu", 0x0880, 0x0360, "z S,D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op},
-
- {"subu", 0x0880, 0x0760, "z S,R,r", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_three_operand_add_sub_cmp_and_or_op},
-
- {"svc",
- 0x0530+CC_VC*0x1000,
- 0x0AC0+(0xf-CC_VC)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- {"svs",
- 0x0530+CC_VS*0x1000,
- 0x0AC0+(0xf-CC_VS)*0x1000, "r", 0, SIZE_NONE, 0,
- cris_scc_op},
-
- /* The insn "swapn" is the same as "not" and will be disassembled as
- such, but the swap* family of mnmonics are generally v8-and-higher
- only, so count it in. */
- {"swapn", 0x8770, 0x7880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"swapw", 0x4770, 0xb880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"swapnw", 0xc770, 0x3880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"swapb", 0x2770, 0xd880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"swapnb", 0xA770, 0x5880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"swapwb", 0x6770, 0x9880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"swapnwb", 0xE770, 0x1880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"swapr", 0x1770, 0xe880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"swapnr", 0x9770, 0x6880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"swapwr", 0x5770, 0xa880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"swapnwr", 0xd770, 0x2880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"swapbr", 0x3770, 0xc880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"swapnbr", 0xb770, 0x4880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"swapwbr", 0x7770, 0x8880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"swapnwbr", 0xf770, 0x0880, "r", 0, SIZE_NONE,
- cris_ver_v8p,
- cris_not_implemented_op},
-
- {"test", 0x0640, 0x0980, "m D", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_reg_mode_test_op},
-
- {"test", 0x0b80, 0xf040, "m y", 0, SIZE_FIELD, 0,
- cris_none_reg_mode_clear_test_op},
-
- {"test", 0x0b80, 0xf040, "m S", 0, SIZE_NONE,
- cris_ver_v0_10,
- cris_none_reg_mode_clear_test_op},
-
- {"xor", 0x07B0, 0x0840, "r,R", 0, SIZE_NONE, 0,
- cris_xor_op},
-
- {NULL, 0, 0, NULL, 0, 0, 0, cris_not_implemented_op}
-};
-
-/* Condition-names, indexed by the CC_* numbers as found in cris.h. */
-const char * const
-cris_cc_strings[] =
-{
- "hs",
- "lo",
- "ne",
- "eq",
- "vc",
- "vs",
- "pl",
- "mi",
- "ls",
- "hi",
- "ge",
- "lt",
- "gt",
- "le",
- "a",
- /* This is a placeholder. In v0, this would be "ext". In v32, this
- is "sb". */
- "wf"
-};
-
-/*
- * Local variables:
- * eval: (c-set-style "gnu")
- * indent-tabs-mode: t
- * End:
- */
-
-
-/* No instruction will be disassembled longer than this. In theory, and
- in silicon, address prefixes can be cascaded. In practice, cascading
- is not used by GCC, and not supported by the assembler. */
-#ifndef MAX_BYTES_PER_CRIS_INSN
-#define MAX_BYTES_PER_CRIS_INSN 8
-#endif
-
-/* Whether or not to decode prefixes, folding it into the following
- instruction. FIXME: Make this optional later. */
-#ifndef PARSE_PREFIX
-#define PARSE_PREFIX 1
-#endif
-
-/* Sometimes we prefix all registers with this character. */
-#define REGISTER_PREFIX_CHAR '$'
-
-/* Whether or not to trace the following sequence:
- sub* X,r%d
- bound* Y,r%d
- adds.w [pc+r%d.w],pc
-
- This is the assembly form of a switch-statement in C.
- The "sub is optional. If there is none, then X will be zero.
- X is the value of the first case,
- Y is the number of cases (including default).
-
- This results in case offsets printed on the form:
- case N: -> case_address
- where N is an estimation on the corresponding 'case' operand in C,
- and case_address is where execution of that case continues after the
- sequence presented above.
-
- The old style of output was to print the offsets as instructions,
- which made it hard to follow "case"-constructs in the disassembly,
- and caused a lot of annoying warnings about undefined instructions.
-
- FIXME: Make this optional later. */
-#ifndef TRACE_CASE
-#define TRACE_CASE (disdata->trace_case)
-#endif
-
-enum cris_disass_family
- { cris_dis_v0_v10, cris_dis_common_v10_v32, cris_dis_v32 };
-
-/* Stored in the disasm_info->private_data member. */
-struct cris_disasm_data
-{
- /* Whether to print something less confusing if we find something
- matching a switch-construct. */
- bfd_boolean trace_case;
-
- /* Whether this code is flagged as crisv32. FIXME: Should be an enum
- that includes "compatible". */
- enum cris_disass_family distype;
-};
-
-/* Value of first element in switch. */
-static long case_offset = 0;
-
-/* How many more case-offsets to print. */
-static long case_offset_counter = 0;
-
-/* Number of case offsets. */
-static long no_of_case_offsets = 0;
-
-/* Candidate for next case_offset. */
-static long last_immediate = 0;
-
-static int cris_constraint
- (const char *, unsigned, unsigned, struct cris_disasm_data *);
-
-/* Parse disassembler options and store state in info. FIXME: For the
- time being, we abuse static variables. */
-
-static void
-cris_parse_disassembler_options (struct cris_disasm_data *disdata,
- char *disassembler_options,
- enum cris_disass_family distype)
-{
- /* Default true. */
- disdata->trace_case
- = (disassembler_options == NULL
- || (strcmp (disassembler_options, "nocase") != 0));
-
- disdata->distype = distype;
-}
-
-static const struct cris_spec_reg *
-spec_reg_info (unsigned int sreg, enum cris_disass_family distype)
-{
- int i;
-
- for (i = 0; cris_spec_regs[i].name != NULL; i++)
- {
- if (cris_spec_regs[i].number == sreg)
- {
- if (distype == cris_dis_v32)
- switch (cris_spec_regs[i].applicable_version)
- {
- case cris_ver_warning:
- case cris_ver_version_all:
- case cris_ver_v3p:
- case cris_ver_v8p:
- case cris_ver_v10p:
- case cris_ver_v32p:
- /* No ambiguous sizes or register names with CRISv32. */
- if (cris_spec_regs[i].warning == NULL)
- return &cris_spec_regs[i];
- default:
- ;
- }
- else if (cris_spec_regs[i].applicable_version != cris_ver_v32p)
- return &cris_spec_regs[i];
- }
- }
-
- return NULL;
-}
-
-/* Return the number of bits in the argument. */
-
-static int
-number_of_bits (unsigned int val)
-{
- int bits;
-
- for (bits = 0; val != 0; val &= val - 1)
- bits++;
-
- return bits;
-}
-
-/* Get an entry in the opcode-table. */
-
-static const struct cris_opcode *
-get_opcode_entry (unsigned int insn,
- unsigned int prefix_insn,
- struct cris_disasm_data *disdata)
-{
- /* For non-prefixed insns, we keep a table of pointers, indexed by the
- insn code. Each entry is initialized when found to be NULL. */
- static const struct cris_opcode **opc_table = NULL;
-
- const struct cris_opcode *max_matchedp = NULL;
- const struct cris_opcode **prefix_opc_table = NULL;
-
- /* We hold a table for each prefix that need to be handled differently. */
- static const struct cris_opcode **dip_prefixes = NULL;
- static const struct cris_opcode **bdapq_m1_prefixes = NULL;
- static const struct cris_opcode **bdapq_m2_prefixes = NULL;
- static const struct cris_opcode **bdapq_m4_prefixes = NULL;
- static const struct cris_opcode **rest_prefixes = NULL;
-
- /* Allocate and clear the opcode-table. */
- if (opc_table == NULL)
- {
- opc_table = g_new0(const struct cris_opcode *, 65536);
- dip_prefixes = g_new0(const struct cris_opcode *, 65536);
- bdapq_m1_prefixes = g_new0(const struct cris_opcode *, 65536);
- bdapq_m2_prefixes = g_new0(const struct cris_opcode *, 65536);
- bdapq_m4_prefixes = g_new0(const struct cris_opcode *, 65536);
- rest_prefixes = g_new0(const struct cris_opcode *, 65536);
- }
-
- /* Get the right table if this is a prefix.
- This code is connected to cris_constraints in that it knows what
- prefixes play a role in recognition of patterns; the necessary
- state is reflected by which table is used. If constraints
- involving match or non-match of prefix insns are changed, then this
- probably needs changing too. */
- if (prefix_insn != NO_CRIS_PREFIX)
- {
- const struct cris_opcode *popcodep
- = (opc_table[prefix_insn] != NULL
- ? opc_table[prefix_insn]
- : get_opcode_entry (prefix_insn, NO_CRIS_PREFIX, disdata));
-
- if (popcodep == NULL)
- return NULL;
-
- if (popcodep->match == BDAP_QUICK_OPCODE)
- {
- /* Since some offsets are recognized with "push" macros, we
- have to have different tables for them. */
- int offset = (prefix_insn & 255);
-
- if (offset > 127)
- offset -= 256;
-
- switch (offset)
- {
- case -4:
- prefix_opc_table = bdapq_m4_prefixes;
- break;
-
- case -2:
- prefix_opc_table = bdapq_m2_prefixes;
- break;
-
- case -1:
- prefix_opc_table = bdapq_m1_prefixes;
- break;
-
- default:
- prefix_opc_table = rest_prefixes;
- break;
- }
- }
- else if (popcodep->match == DIP_OPCODE)
- /* We don't allow postincrement when the prefix is DIP, so use a
- different table for DIP. */
- prefix_opc_table = dip_prefixes;
- else
- prefix_opc_table = rest_prefixes;
- }
-
- if (prefix_insn != NO_CRIS_PREFIX
- && prefix_opc_table[insn] != NULL)
- max_matchedp = prefix_opc_table[insn];
- else if (prefix_insn == NO_CRIS_PREFIX && opc_table[insn] != NULL)
- max_matchedp = opc_table[insn];
- else
- {
- const struct cris_opcode *opcodep;
- int max_level_of_match = -1;
-
- for (opcodep = cris_opcodes;
- opcodep->name != NULL;
- opcodep++)
- {
- int level_of_match;
-
- if (disdata->distype == cris_dis_v32)
- {
- switch (opcodep->applicable_version)
- {
- case cris_ver_version_all:
- break;
-
- case cris_ver_v0_3:
- case cris_ver_v0_10:
- case cris_ver_v3_10:
- case cris_ver_sim_v0_10:
- case cris_ver_v8_10:
- case cris_ver_v10:
- case cris_ver_warning:
- continue;
-
- case cris_ver_v3p:
- case cris_ver_v8p:
- case cris_ver_v10p:
- case cris_ver_v32p:
- break;
-
- case cris_ver_v8:
- abort ();
- default:
- abort ();
- }
- }
- else
- {
- switch (opcodep->applicable_version)
- {
- case cris_ver_version_all:
- case cris_ver_v0_3:
- case cris_ver_v3p:
- case cris_ver_v0_10:
- case cris_ver_v8p:
- case cris_ver_v8_10:
- case cris_ver_v10:
- case cris_ver_sim_v0_10:
- case cris_ver_v10p:
- case cris_ver_warning:
- break;
-
- case cris_ver_v32p:
- continue;
-
- case cris_ver_v8:
- abort ();
- default:
- abort ();
- }
- }
-
- /* We give a double lead for bits matching the template in
- cris_opcodes. Not even, because then "move p8,r10" would
- be given 2 bits lead over "clear.d r10". When there's a
- tie, the first entry in the table wins. This is
- deliberate, to avoid a more complicated recognition
- formula. */
- if ((opcodep->match & insn) == opcodep->match
- && (opcodep->lose & insn) == 0
- && ((level_of_match
- = cris_constraint (opcodep->args,
- insn,
- prefix_insn,
- disdata))
- >= 0)
- && ((level_of_match
- += 2 * number_of_bits (opcodep->match
- | opcodep->lose))
- > max_level_of_match))
- {
- max_matchedp = opcodep;
- max_level_of_match = level_of_match;
-
- /* If there was a full match, never mind looking
- further. */
- if (level_of_match >= 2 * 16)
- break;
- }
- }
- /* Fill in the new entry.
-
- If there are changes to the opcode-table involving prefixes, and
- disassembly then does not work correctly, try removing the
- else-clause below that fills in the prefix-table. If that
- helps, you need to change the prefix_opc_table setting above, or
- something related. */
- if (prefix_insn == NO_CRIS_PREFIX)
- opc_table[insn] = max_matchedp;
- else
- prefix_opc_table[insn] = max_matchedp;
- }
-
- return max_matchedp;
-}
-
-/* Return -1 if the constraints of a bitwise-matched instruction say
- that there is no match. Otherwise return a nonnegative number
- indicating the confidence in the match (higher is better). */
-
-static int
-cris_constraint (const char *cs,
- unsigned int insn,
- unsigned int prefix_insn,
- struct cris_disasm_data *disdata)
-{
- int retval = 0;
- int tmp;
- int prefix_ok = 0;
- const char *s;
-
- for (s = cs; *s; s++)
- switch (*s)
- {
- case '!':
- /* Do not recognize "pop" if there's a prefix and then only for
- v0..v10. */
- if (prefix_insn != NO_CRIS_PREFIX
- || disdata->distype != cris_dis_v0_v10)
- return -1;
- break;
-
- case 'U':
- /* Not recognized at disassembly. */
- return -1;
-
- case 'M':
- /* Size modifier for "clear", i.e. special register 0, 4 or 8.
- Check that it is one of them. Only special register 12 could
- be mismatched, but checking for matches is more logical than
- checking for mismatches when there are only a few cases. */
- tmp = ((insn >> 12) & 0xf);
- if (tmp != 0 && tmp != 4 && tmp != 8)
- return -1;
- break;
-
- case 'm':
- if ((insn & 0x30) == 0x30)
- return -1;
- break;
-
- case 'S':
- /* A prefix operand without side-effect. */
- if (prefix_insn != NO_CRIS_PREFIX && (insn & 0x400) == 0)
- {
- prefix_ok = 1;
- break;
- }
- else
- return -1;
-
- case 's':
- case 'y':
- case 'Y':
- /* If this is a prefixed insn with postincrement (side-effect),
- the prefix must not be DIP. */
- if (prefix_insn != NO_CRIS_PREFIX)
- {
- if (insn & 0x400)
- {
- const struct cris_opcode *prefix_opcodep
- = get_opcode_entry (prefix_insn, NO_CRIS_PREFIX, disdata);
-
- if (prefix_opcodep->match == DIP_OPCODE)
- return -1;
- }
-
- prefix_ok = 1;
- }
- break;
-
- case 'B':
- /* If we don't fall through, then the prefix is ok. */
- prefix_ok = 1;
-
- /* A "push" prefix. Check for valid "push" size.
- In case of special register, it may be != 4. */
- if (prefix_insn != NO_CRIS_PREFIX)
- {
- /* Match the prefix insn to BDAPQ. */
- const struct cris_opcode *prefix_opcodep
- = get_opcode_entry (prefix_insn, NO_CRIS_PREFIX, disdata);
-
- if (prefix_opcodep->match == BDAP_QUICK_OPCODE)
- {
- int pushsize = (prefix_insn & 255);
-
- if (pushsize > 127)
- pushsize -= 256;
-
- if (s[1] == 'P')
- {
- unsigned int spec_reg = (insn >> 12) & 15;
- const struct cris_spec_reg *sregp
- = spec_reg_info (spec_reg, disdata->distype);
-
- /* For a special-register, the "prefix size" must
- match the size of the register. */
- if (sregp && sregp->reg_size == (unsigned int) -pushsize)
- break;
- }
- else if (s[1] == 'R')
- {
- if ((insn & 0x30) == 0x20 && pushsize == -4)
- break;
- }
- /* FIXME: Should abort here; next constraint letter
- *must* be 'P' or 'R'. */
- }
- }
- return -1;
-
- case 'D':
- retval = (((insn >> 12) & 15) == (insn & 15));
- if (!retval)
- return -1;
- else
- retval += 4;
- break;
-
- case 'P':
- {
- const struct cris_spec_reg *sregp
- = spec_reg_info ((insn >> 12) & 15, disdata->distype);
-
- /* Since we match four bits, we will give a value of 4-1 = 3
- in a match. If there is a corresponding exact match of a
- special register in another pattern, it will get a value of
- 4, which will be higher. This should be correct in that an
- exact pattern would match better than a general pattern.
-
- Note that there is a reason for not returning zero; the
- pattern for "clear" is partly matched in the bit-pattern
- (the two lower bits must be zero), while the bit-pattern
- for a move from a special register is matched in the
- register constraint. */
-
- if (sregp != NULL)
- {
- retval += 3;
- break;
- }
- else
- return -1;
- }
- }
-
- if (prefix_insn != NO_CRIS_PREFIX && ! prefix_ok)
- return -1;
-
- return retval;
-}
-
-/* Format number as hex with a leading "0x" into outbuffer. */
-
-static char *
-format_hex (unsigned long number,
- char *outbuffer,
- struct cris_disasm_data *disdata)
-{
- /* Truncate negative numbers on >32-bit hosts. */
- number &= 0xffffffff;
-
- sprintf (outbuffer, "0x%lx", number);
-
- /* Save this value for the "case" support. */
- if (TRACE_CASE)
- last_immediate = number;
-
- return outbuffer + strlen (outbuffer);
-}
-
-/* Format number as decimal into outbuffer. Parameter signedp says
- whether the number should be formatted as signed (!= 0) or
- unsigned (== 0). */
-
-static char *
-format_dec (long number, char *outbuffer, size_t outsize, int signedp)
-{
- last_immediate = number;
- snprintf (outbuffer, outsize, signedp ? "%ld" : "%lu", number);
-
- return outbuffer + strlen (outbuffer);
-}
-
-/* Format the name of the general register regno into outbuffer. */
-
-static char *
-format_reg (struct cris_disasm_data *disdata,
- int regno,
- char *outbuffer_start,
- bfd_boolean with_reg_prefix)
-{
- char *outbuffer = outbuffer_start;
-
- if (with_reg_prefix)
- *outbuffer++ = REGISTER_PREFIX_CHAR;
-
- switch (regno)
- {
- case 15:
- /* For v32, there is no context in which we output PC. */
- if (disdata->distype == cris_dis_v32)
- strcpy (outbuffer, "acr");
- else
- strcpy (outbuffer, "pc");
- break;
-
- case 14:
- strcpy (outbuffer, "sp");
- break;
-
- default:
- sprintf (outbuffer, "r%d", regno);
- break;
- }
-
- return outbuffer_start + strlen (outbuffer_start);
-}
-
-/* Format the name of a support register into outbuffer. */
-
-static char *
-format_sup_reg (unsigned int regno,
- char *outbuffer_start,
- bfd_boolean with_reg_prefix)
-{
- char *outbuffer = outbuffer_start;
- int i;
-
- if (with_reg_prefix)
- *outbuffer++ = REGISTER_PREFIX_CHAR;
-
- for (i = 0; cris_support_regs[i].name != NULL; i++)
- if (cris_support_regs[i].number == regno)
- {
- sprintf (outbuffer, "%s", cris_support_regs[i].name);
- return outbuffer_start + strlen (outbuffer_start);
- }
-
- /* There's supposed to be register names covering all numbers, though
- some may be generic names. */
- sprintf (outbuffer, "format_sup_reg-BUG");
- return outbuffer_start + strlen (outbuffer_start);
-}
-
-/* Return the length of an instruction. */
-
-static unsigned
-bytes_to_skip (unsigned int insn,
- const struct cris_opcode *matchedp,
- enum cris_disass_family distype,
- const struct cris_opcode *prefix_matchedp)
-{
- /* Each insn is a word plus "immediate" operands. */
- unsigned to_skip = 2;
- const char *template = matchedp->args;
- const char *s;
-
- for (s = template; *s; s++)
- if ((*s == 's' || *s == 'N' || *s == 'Y')
- && (insn & 0x400) && (insn & 15) == 15
- && prefix_matchedp == NULL)
- {
- /* Immediate via [pc+], so we have to check the size of the
- operand. */
- int mode_size = 1 << ((insn >> 4) & (*template == 'z' ? 1 : 3));
-
- if (matchedp->imm_oprnd_size == SIZE_FIX_32)
- to_skip += 4;
- else if (matchedp->imm_oprnd_size == SIZE_SPEC_REG)
- {
- const struct cris_spec_reg *sregp
- = spec_reg_info ((insn >> 12) & 15, distype);
-
- /* FIXME: Improve error handling; should have been caught
- earlier. */
- if (sregp == NULL)
- return 2;
-
- /* PC is incremented by two, not one, for a byte. Except on
- CRISv32, where constants are always DWORD-size for
- special registers. */
- to_skip +=
- distype == cris_dis_v32 ? 4 : (sregp->reg_size + 1) & ~1;
- }
- else
- to_skip += (mode_size + 1) & ~1;
- }
- else if (*s == 'n')
- to_skip += 4;
- else if (*s == 'b')
- to_skip += 2;
-
- return to_skip;
-}
-
-/* Print condition code flags. */
-
-static char *
-print_flags (struct cris_disasm_data *disdata, unsigned int insn, char *cp)
-{
- /* Use the v8 (Etrax 100) flag definitions for disassembly.
- The differences with v0 (Etrax 1..4) vs. Svinto are:
- v0 'd' <=> v8 'm'
- v0 'e' <=> v8 'b'.
- FIXME: Emit v0..v3 flag names somehow. */
- static const char v8_fnames[] = "cvznxibm";
- static const char v32_fnames[] = "cvznxiup";
- const char *fnames
- = disdata->distype == cris_dis_v32 ? v32_fnames : v8_fnames;
-
- unsigned char flagbits = (((insn >> 8) & 0xf0) | (insn & 15));
- int i;
-
- for (i = 0; i < 8; i++)
- if (flagbits & (1 << i))
- *cp++ = fnames[i];
-
- return cp;
-}
-
-#define FORMAT_DEC(number, tp, signedp) \
- format_dec (number, tp, ({ \
- assert(tp >= temp && tp <= temp + sizeof(temp)); \
- temp + sizeof(temp) - tp; \
- }), signedp)
-
-/* Print out an insn with its operands, and update the info->insn_type
- fields. The prefix_opcodep and the rest hold a prefix insn that is
- supposed to be output as an address mode. */
-
-static void
-print_with_operands (const struct cris_opcode *opcodep,
- unsigned int insn,
- unsigned char *buffer,
- bfd_vma addr,
- disassemble_info *info,
- /* If a prefix insn was before this insn (and is supposed
- to be output as an address), here is a description of
- it. */
- const struct cris_opcode *prefix_opcodep,
- unsigned int prefix_insn,
- unsigned char *prefix_buffer,
- bfd_boolean with_reg_prefix)
-{
- /* Get a buffer of somewhat reasonable size where we store
- intermediate parts of the insn. */
- char temp[sizeof (".d [$r13=$r12-2147483648],$r10") * 2];
- char *tp = temp;
- static const char mode_char[] = "bwd?";
- const char *s;
- const char *cs;
- struct cris_disasm_data *disdata
- = (struct cris_disasm_data *) info->private_data;
-
- /* Print out the name first thing we do. */
- (*info->fprintf_func) (info->stream, "%s", opcodep->name);
-
- cs = opcodep->args;
- s = cs;
-
- /* Ignore any prefix indicator. */
- if (*s == 'p')
- s++;
-
- if (*s == 'm' || *s == 'M' || *s == 'z')
- {
- *tp++ = '.';
-
- /* Get the size-letter. */
- *tp++ = *s == 'M'
- ? (insn & 0x8000 ? 'd'
- : insn & 0x4000 ? 'w' : 'b')
- : mode_char[(insn >> 4) & (*s == 'z' ? 1 : 3)];
-
- /* Ignore the size and the space character that follows. */
- s += 2;
- }
-
- /* Add a space if this isn't a long-branch, because for those will add
- the condition part of the name later. */
- if (opcodep->match != (BRANCH_PC_LOW + BRANCH_INCR_HIGH * 256))
- *tp++ = ' ';
-
- /* Fill in the insn-type if deducible from the name (and there's no
- better way). */
- if (opcodep->name[0] == 'j')
- {
- if (CONST_STRNEQ (opcodep->name, "jsr"))
- /* It's "jsr" or "jsrc". */
- info->insn_type = dis_jsr;
- else
- /* Any other jump-type insn is considered a branch. */
- info->insn_type = dis_branch;
- }
-
- /* We might know some more fields right now. */
- info->branch_delay_insns = opcodep->delayed;
-
- /* Handle operands. */
- for (; *s; s++)
- {
- switch (*s)
- {
- case 'T':
- tp = format_sup_reg ((insn >> 12) & 15, tp, with_reg_prefix);
- break;
-
- case 'A':
- if (with_reg_prefix)
- *tp++ = REGISTER_PREFIX_CHAR;
- *tp++ = 'a';
- *tp++ = 'c';
- *tp++ = 'r';
- break;
-
- case '[':
- case ']':
- case ',':
- *tp++ = *s;
- break;
-
- case '!':
- /* Ignore at this point; used at earlier stages to avoid
- recognition if there's a prefix at something that in other
- ways looks like a "pop". */
- break;
-
- case 'd':
- /* Ignore. This is an optional ".d " on the large one of
- relaxable insns. */
- break;
-
- case 'B':
- /* This was the prefix that made this a "push". We've already
- handled it by recognizing it, so signal that the prefix is
- handled by setting it to NULL. */
- prefix_opcodep = NULL;
- break;
-
- case 'D':
- case 'r':
- tp = format_reg (disdata, insn & 15, tp, with_reg_prefix);
- break;
-
- case 'R':
- tp = format_reg (disdata, (insn >> 12) & 15, tp, with_reg_prefix);
- break;
-
- case 'n':
- {
- /* Like N but pc-relative to the start of the insn. */
- uint32_t number
- = (buffer[2] + buffer[3] * 256 + buffer[4] * 65536
- + buffer[5] * 0x1000000 + addr);
-
- /* Finish off and output previous formatted bytes. */
- *tp = 0;
- if (temp[0])
- (*info->fprintf_func) (info->stream, "%s", temp);
- tp = temp;
-
- (*info->print_address_func) ((bfd_vma) number, info);
- }
- break;
-
- case 'u':
- {
- /* Like n but the offset is bits <3:0> in the instruction. */
- unsigned long number = (buffer[0] & 0xf) * 2 + addr;
-
- /* Finish off and output previous formatted bytes. */
- *tp = 0;
- if (temp[0])
- (*info->fprintf_func) (info->stream, "%s", temp);
- tp = temp;
-
- (*info->print_address_func) ((bfd_vma) number, info);
- }
- break;
-
- case 'N':
- case 'y':
- case 'Y':
- case 'S':
- case 's':
- /* Any "normal" memory operand. */
- if ((insn & 0x400) && (insn & 15) == 15 && prefix_opcodep == NULL)
- {
- /* We're looking at [pc+], i.e. we need to output an immediate
- number, where the size can depend on different things. */
- int32_t number;
- int signedp
- = ((*cs == 'z' && (insn & 0x20))
- || opcodep->match == BDAP_QUICK_OPCODE);
- int nbytes;
-
- if (opcodep->imm_oprnd_size == SIZE_FIX_32)
- nbytes = 4;
- else if (opcodep->imm_oprnd_size == SIZE_SPEC_REG)
- {
- const struct cris_spec_reg *sregp
- = spec_reg_info ((insn >> 12) & 15, disdata->distype);
-
- /* A NULL return should have been as a non-match earlier,
- so catch it as an internal error in the error-case
- below. */
- if (sregp == NULL)
- /* Whatever non-valid size. */
- nbytes = 42;
- else
- /* PC is always incremented by a multiple of two.
- For CRISv32, immediates are always 4 bytes for
- special registers. */
- nbytes = disdata->distype == cris_dis_v32
- ? 4 : (sregp->reg_size + 1) & ~1;
- }
- else
- {
- int mode_size = 1 << ((insn >> 4) & (*cs == 'z' ? 1 : 3));
-
- if (mode_size == 1)
- nbytes = 2;
- else
- nbytes = mode_size;
- }
-
- switch (nbytes)
- {
- case 1:
- number = buffer[2];
- if (signedp && number > 127)
- number -= 256;
- break;
-
- case 2:
- number = buffer[2] + buffer[3] * 256;
- if (signedp && number > 32767)
- number -= 65536;
- break;
-
- case 4:
- number
- = buffer[2] + buffer[3] * 256 + buffer[4] * 65536
- + buffer[5] * 0x1000000;
- break;
-
- default:
- strcpy (tp, "bug");
- tp += 3;
- number = 42;
- }
-
- if ((*cs == 'z' && (insn & 0x20))
- || (opcodep->match == BDAP_QUICK_OPCODE
- && (nbytes <= 2 || buffer[1 + nbytes] == 0)))
- tp = FORMAT_DEC (number, tp, signedp);
- else
- {
- unsigned int highbyte = (number >> 24) & 0xff;
-
- /* Either output this as an address or as a number. If it's
- a dword with the same high-byte as the address of the
- insn, assume it's an address, and also if it's a non-zero
- non-0xff high-byte. If this is a jsr or a jump, then
- it's definitely an address. */
- if (nbytes == 4
- && (highbyte == ((addr >> 24) & 0xff)
- || (highbyte != 0 && highbyte != 0xff)
- || info->insn_type == dis_branch
- || info->insn_type == dis_jsr))
- {
- /* Finish off and output previous formatted bytes. */
- *tp = 0;
- tp = temp;
- if (temp[0])
- (*info->fprintf_func) (info->stream, "%s", temp);
-
- (*info->print_address_func) ((bfd_vma) number, info);
-
- info->target = number;
- }
- else
- tp = format_hex (number, tp, disdata);
- }
- }
- else
- {
- /* Not an immediate number. Then this is a (possibly
- prefixed) memory operand. */
- if (info->insn_type != dis_nonbranch)
- {
- int mode_size
- = 1 << ((insn >> 4)
- & (opcodep->args[0] == 'z' ? 1 : 3));
- int size;
- info->insn_type = dis_dref;
- info->flags |= CRIS_DIS_FLAG_MEMREF;
-
- if (opcodep->imm_oprnd_size == SIZE_FIX_32)
- size = 4;
- else if (opcodep->imm_oprnd_size == SIZE_SPEC_REG)
- {
- const struct cris_spec_reg *sregp
- = spec_reg_info ((insn >> 12) & 15, disdata->distype);
-
- /* FIXME: Improve error handling; should have been caught
- earlier. */
- if (sregp == NULL)
- size = 4;
- else
- size = sregp->reg_size;
- }
- else
- size = mode_size;
-
- info->data_size = size;
- }
-
- *tp++ = '[';
-
- if (prefix_opcodep
- /* We don't match dip with a postincremented field
- as a side-effect address mode. */
- && ((insn & 0x400) == 0
- || prefix_opcodep->match != DIP_OPCODE))
- {
- if (insn & 0x400)
- {
- tp = format_reg (disdata, insn & 15, tp, with_reg_prefix);
- *tp++ = '=';
- }
-
-
- /* We mainly ignore the prefix format string when the
- address-mode syntax is output. */
- switch (prefix_opcodep->match)
- {
- case DIP_OPCODE:
- /* It's [r], [r+] or [pc+]. */
- if ((prefix_insn & 0x400) && (prefix_insn & 15) == 15)
- {
- /* It's [pc+]. This cannot possibly be anything
- but an address. */
- uint32_t number
- = prefix_buffer[2] + prefix_buffer[3] * 256
- + prefix_buffer[4] * 65536
- + prefix_buffer[5] * 0x1000000;
-
- info->target = (bfd_vma) number;
-
- /* Finish off and output previous formatted
- data. */
- *tp = 0;
- tp = temp;
- if (temp[0])
- (*info->fprintf_func) (info->stream, "%s", temp);
-
- (*info->print_address_func) ((bfd_vma) number, info);
- }
- else
- {
- /* For a memref in an address, we use target2.
- In this case, target is zero. */
- info->flags
- |= (CRIS_DIS_FLAG_MEM_TARGET2_IS_REG
- | CRIS_DIS_FLAG_MEM_TARGET2_MEM);
-
- info->target2 = prefix_insn & 15;
-
- *tp++ = '[';
- tp = format_reg (disdata, prefix_insn & 15, tp,
- with_reg_prefix);
- if (prefix_insn & 0x400)
- *tp++ = '+';
- *tp++ = ']';
- }
- break;
-
- case BDAP_QUICK_OPCODE:
- {
- int number;
-
- number = prefix_buffer[0];
- if (number > 127)
- number -= 256;
-
- /* Output "reg+num" or, if num < 0, "reg-num". */
- tp = format_reg (disdata, (prefix_insn >> 12) & 15, tp,
- with_reg_prefix);
- if (number >= 0)
- *tp++ = '+';
- tp = FORMAT_DEC (number, tp, 1);
-
- info->flags |= CRIS_DIS_FLAG_MEM_TARGET_IS_REG;
- info->target = (prefix_insn >> 12) & 15;
- info->target2 = (bfd_vma) number;
- break;
- }
-
- case BIAP_OPCODE:
- /* Output "r+R.m". */
- tp = format_reg (disdata, prefix_insn & 15, tp,
- with_reg_prefix);
- *tp++ = '+';
- tp = format_reg (disdata, (prefix_insn >> 12) & 15, tp,
- with_reg_prefix);
- *tp++ = '.';
- *tp++ = mode_char[(prefix_insn >> 4) & 3];
-
- info->flags
- |= (CRIS_DIS_FLAG_MEM_TARGET2_IS_REG
- | CRIS_DIS_FLAG_MEM_TARGET_IS_REG
-
- | ((prefix_insn & 0x8000)
- ? CRIS_DIS_FLAG_MEM_TARGET2_MULT4
- : ((prefix_insn & 0x8000)
- ? CRIS_DIS_FLAG_MEM_TARGET2_MULT2 : 0)));
-
- /* Is it the casejump? It's a "adds.w [pc+r%d.w],pc". */
- if (insn == 0xf83f && (prefix_insn & ~0xf000) == 0x55f)
- /* Then start interpreting data as offsets. */
- case_offset_counter = no_of_case_offsets;
- break;
-
- case BDAP_INDIR_OPCODE:
- /* Output "r+s.m", or, if "s" is [pc+], "r+s" or
- "r-s". */
- tp = format_reg (disdata, (prefix_insn >> 12) & 15, tp,
- with_reg_prefix);
-
- if ((prefix_insn & 0x400) && (prefix_insn & 15) == 15)
- {
- int32_t number;
- unsigned int nbytes;
-
- /* It's a value. Get its size. */
- int mode_size = 1 << ((prefix_insn >> 4) & 3);
-
- if (mode_size == 1)
- nbytes = 2;
- else
- nbytes = mode_size;
-
- switch (nbytes)
- {
- case 1:
- number = prefix_buffer[2];
- if (number > 127)
- number -= 256;
- break;
-
- case 2:
- number = prefix_buffer[2] + prefix_buffer[3] * 256;
- if (number > 32767)
- number -= 65536;
- break;
-
- case 4:
- number
- = prefix_buffer[2] + prefix_buffer[3] * 256
- + prefix_buffer[4] * 65536
- + prefix_buffer[5] * 0x1000000;
- break;
-
- default:
- strcpy (tp, "bug");
- tp += 3;
- number = 42;
- }
-
- info->flags |= CRIS_DIS_FLAG_MEM_TARGET_IS_REG;
- info->target2 = (bfd_vma) number;
-
- /* If the size is dword, then assume it's an
- address. */
- if (nbytes == 4)
- {
- /* Finish off and output previous formatted
- bytes. */
- *tp++ = '+';
- *tp = 0;
- tp = temp;
- (*info->fprintf_func) (info->stream, "%s", temp);
-
- (*info->print_address_func) ((bfd_vma) number, info);
- }
- else
- {
- if (number >= 0)
- *tp++ = '+';
- tp = FORMAT_DEC (number, tp, 1);
- }
- }
- else
- {
- /* Output "r+[R].m" or "r+[R+].m". */
- *tp++ = '+';
- *tp++ = '[';
- tp = format_reg (disdata, prefix_insn & 15, tp,
- with_reg_prefix);
- if (prefix_insn & 0x400)
- *tp++ = '+';
- *tp++ = ']';
- *tp++ = '.';
- *tp++ = mode_char[(prefix_insn >> 4) & 3];
-
- info->flags
- |= (CRIS_DIS_FLAG_MEM_TARGET2_IS_REG
- | CRIS_DIS_FLAG_MEM_TARGET2_MEM
- | CRIS_DIS_FLAG_MEM_TARGET_IS_REG
-
- | (((prefix_insn >> 4) == 2)
- ? 0
- : (((prefix_insn >> 4) & 3) == 1
- ? CRIS_DIS_FLAG_MEM_TARGET2_MEM_WORD
- : CRIS_DIS_FLAG_MEM_TARGET2_MEM_BYTE)));
- }
- break;
-
- default:
- (*info->fprintf_func) (info->stream, "?prefix-bug");
- }
-
- /* To mark that the prefix is used, reset it. */
- prefix_opcodep = NULL;
- }
- else
- {
- tp = format_reg (disdata, insn & 15, tp, with_reg_prefix);
-
- info->flags |= CRIS_DIS_FLAG_MEM_TARGET_IS_REG;
- info->target = insn & 15;
-
- if (insn & 0x400)
- *tp++ = '+';
- }
- *tp++ = ']';
- }
- break;
-
- case 'x':
- tp = format_reg (disdata, (insn >> 12) & 15, tp, with_reg_prefix);
- *tp++ = '.';
- *tp++ = mode_char[(insn >> 4) & 3];
- break;
-
- case 'I':
- tp = FORMAT_DEC (insn & 63, tp, 0);
- break;
-
- case 'b':
- {
- int where = buffer[2] + buffer[3] * 256;
-
- if (where > 32767)
- where -= 65536;
-
- where += addr + ((disdata->distype == cris_dis_v32) ? 0 : 4);
-
- if (insn == BA_PC_INCR_OPCODE)
- info->insn_type = dis_branch;
- else
- info->insn_type = dis_condbranch;
-
- info->target = (bfd_vma) where;
-
- *tp = 0;
- tp = temp;
- (*info->fprintf_func) (info->stream, "%s%s ",
- temp, cris_cc_strings[insn >> 12]);
-
- (*info->print_address_func) ((bfd_vma) where, info);
- }
- break;
-
- case 'c':
- tp = FORMAT_DEC (insn & 31, tp, 0);
- break;
-
- case 'C':
- tp = FORMAT_DEC (insn & 15, tp, 0);
- break;
-
- case 'o':
- {
- long offset = insn & 0xfe;
- bfd_vma target;
-
- if (insn & 1)
- offset |= ~0xff;
-
- if (opcodep->match == BA_QUICK_OPCODE)
- info->insn_type = dis_branch;
- else
- info->insn_type = dis_condbranch;
-
- target = addr + ((disdata->distype == cris_dis_v32) ? 0 : 2) + offset;
- info->target = target;
- *tp = 0;
- tp = temp;
- (*info->fprintf_func) (info->stream, "%s", temp);
- (*info->print_address_func) (target, info);
- }
- break;
-
- case 'Q':
- case 'O':
- {
- long number = buffer[0];
-
- if (number > 127)
- number = number - 256;
-
- tp = FORMAT_DEC (number, tp, 1);
- *tp++ = ',';
- tp = format_reg (disdata, (insn >> 12) & 15, tp, with_reg_prefix);
- }
- break;
-
- case 'f':
- tp = print_flags (disdata, insn, tp);
- break;
-
- case 'i':
- tp = FORMAT_DEC ((insn & 32) ? (insn & 31) | ~31L : insn & 31, tp, 1);
- break;
-
- case 'P':
- {
- const struct cris_spec_reg *sregp
- = spec_reg_info ((insn >> 12) & 15, disdata->distype);
-
- if (sregp == NULL || sregp->name == NULL)
- /* Should have been caught as a non-match earlier. */
- *tp++ = '?';
- else
- {
- if (with_reg_prefix)
- *tp++ = REGISTER_PREFIX_CHAR;
- strcpy (tp, sregp->name);
- tp += strlen (tp);
- }
- }
- break;
-
- default:
- strcpy (tp, "???");
- tp += 3;
- }
- }
-
- *tp = 0;
-
- if (prefix_opcodep)
- (*info->fprintf_func) (info->stream, " (OOPS unused prefix \"%s: %s\")",
- prefix_opcodep->name, prefix_opcodep->args);
-
- (*info->fprintf_func) (info->stream, "%s", temp);
-
- /* Get info for matching case-tables, if we don't have any active.
- We assume that the last constant seen is used; either in the insn
- itself or in a "move.d const,rN, sub.d rN,rM"-like sequence. */
- if (TRACE_CASE && case_offset_counter == 0)
- {
- if (CONST_STRNEQ (opcodep->name, "sub"))
- case_offset = last_immediate;
-
- /* It could also be an "add", if there are negative case-values. */
- else if (CONST_STRNEQ (opcodep->name, "add"))
- /* The first case is the negated operand to the add. */
- case_offset = -last_immediate;
-
- /* A bound insn will tell us the number of cases. */
- else if (CONST_STRNEQ (opcodep->name, "bound"))
- no_of_case_offsets = last_immediate + 1;
-
- /* A jump or jsr or branch breaks the chain of insns for a
- case-table, so assume default first-case again. */
- else if (info->insn_type == dis_jsr
- || info->insn_type == dis_branch
- || info->insn_type == dis_condbranch)
- case_offset = 0;
- }
-}
-
-
-/* Print the CRIS instruction at address memaddr on stream. Returns
- length of the instruction, in bytes. Prefix register names with `$' if
- WITH_REG_PREFIX. */
-
-static int
-print_insn_cris_generic (bfd_vma memaddr,
- disassemble_info *info,
- bfd_boolean with_reg_prefix)
-{
- int nbytes;
- unsigned int insn;
- const struct cris_opcode *matchedp;
- int advance = 0;
- struct cris_disasm_data *disdata
- = (struct cris_disasm_data *) info->private_data;
-
- /* No instruction will be disassembled as longer than this number of
- bytes; stacked prefixes will not be expanded. */
- unsigned char buffer[MAX_BYTES_PER_CRIS_INSN];
- unsigned char *bufp;
- int status = 0;
- bfd_vma addr;
-
- /* There will be an "out of range" error after the last instruction.
- Reading pairs of bytes in decreasing number, we hope that we will get
- at least the amount that we will consume.
-
- If we can't get any data, or we do not get enough data, we print
- the error message. */
-
- nbytes = info->buffer_length ? info->buffer_length
- : MAX_BYTES_PER_CRIS_INSN;
- nbytes = MIN(nbytes, MAX_BYTES_PER_CRIS_INSN);
- status = (*info->read_memory_func) (memaddr, buffer, nbytes, info);
-
- /* If we did not get all we asked for, then clear the rest.
- Hopefully this makes a reproducible result in case of errors. */
- if (nbytes != MAX_BYTES_PER_CRIS_INSN)
- memset (buffer + nbytes, 0, MAX_BYTES_PER_CRIS_INSN - nbytes);
-
- addr = memaddr;
- bufp = buffer;
-
- /* Set some defaults for the insn info. */
- info->insn_info_valid = 1;
- info->branch_delay_insns = 0;
- info->data_size = 0;
- info->insn_type = dis_nonbranch;
- info->flags = 0;
- info->target = 0;
- info->target2 = 0;
-
- /* If we got any data, disassemble it. */
- if (nbytes != 0)
- {
- matchedp = NULL;
-
- insn = bufp[0] + bufp[1] * 256;
-
- /* If we're in a case-table, don't disassemble the offsets. */
- if (TRACE_CASE && case_offset_counter != 0)
- {
- info->insn_type = dis_noninsn;
- advance += 2;
-
- /* If to print data as offsets, then shortcut here. */
- (*info->fprintf_func) (info->stream, "case %ld%s: -> ",
- case_offset + no_of_case_offsets
- - case_offset_counter,
- case_offset_counter == 1 ? "/default" :
- "");
-
- (*info->print_address_func) ((bfd_vma)
- ((short) (insn)
- + (long) (addr
- - (no_of_case_offsets
- - case_offset_counter)
- * 2)), info);
- case_offset_counter--;
-
- /* The default case start (without a "sub" or "add") must be
- zero. */
- if (case_offset_counter == 0)
- case_offset = 0;
- }
- else if (insn == 0)
- {
- /* We're often called to disassemble zeroes. While this is a
- valid "bcc .+2" insn, it is also useless enough and enough
- of a nuiscance that we will just output "bcc .+2" for it
- and signal it as a noninsn. */
- (*info->fprintf_func) (info->stream,
- disdata->distype == cris_dis_v32
- ? "bcc ." : "bcc .+2");
- info->insn_type = dis_noninsn;
- advance += 2;
- }
- else
- {
- const struct cris_opcode *prefix_opcodep = NULL;
- unsigned char *prefix_buffer = bufp;
- unsigned int prefix_insn = insn;
- int prefix_size = 0;
-
- matchedp = get_opcode_entry (insn, NO_CRIS_PREFIX, disdata);
-
- /* Check if we're supposed to write out prefixes as address
- modes and if this was a prefix. */
- if (matchedp != NULL && PARSE_PREFIX && matchedp->args[0] == 'p')
- {
- /* If it's a prefix, put it into the prefix vars and get the
- main insn. */
- prefix_size = bytes_to_skip (prefix_insn, matchedp,
- disdata->distype, NULL);
- prefix_opcodep = matchedp;
-
- insn = bufp[prefix_size] + bufp[prefix_size + 1] * 256;
- matchedp = get_opcode_entry (insn, prefix_insn, disdata);
-
- if (matchedp != NULL)
- {
- addr += prefix_size;
- bufp += prefix_size;
- advance += prefix_size;
- }
- else
- {
- /* The "main" insn wasn't valid, at least not when
- prefixed. Put back things enough to output the
- prefix insn only, as a normal insn. */
- matchedp = prefix_opcodep;
- insn = prefix_insn;
- prefix_opcodep = NULL;
- }
- }
-
- if (matchedp == NULL)
- {
- (*info->fprintf_func) (info->stream, "??0x%x", insn);
- advance += 2;
-
- info->insn_type = dis_noninsn;
- }
- else
- {
- advance
- += bytes_to_skip (insn, matchedp, disdata->distype,
- prefix_opcodep);
-
- /* The info_type and assorted fields will be set according
- to the operands. */
- print_with_operands (matchedp, insn, bufp, addr, info,
- prefix_opcodep, prefix_insn,
- prefix_buffer, with_reg_prefix);
- }
- }
- }
- else
- info->insn_type = dis_noninsn;
-
- /* If we read less than MAX_BYTES_PER_CRIS_INSN, i.e. we got an error
- status when reading that much, and the insn decoding indicated a
- length exceeding what we read, there is an error. */
- if (status != 0 && (nbytes == 0 || advance > nbytes))
- {
- (*info->memory_error_func) (status, memaddr, info);
- return -1;
- }
-
- /* Max supported insn size with one folded prefix insn. */
- info->bytes_per_line = MAX_BYTES_PER_CRIS_INSN;
-
- /* I would like to set this to a fixed value larger than the actual
- number of bytes to print in order to avoid spaces between bytes,
- but objdump.c (2.9.1) does not like that, so we print 16-bit
- chunks, which is the next choice. */
- info->bytes_per_chunk = 2;
-
- /* Printing bytes in order of increasing addresses makes sense,
- especially on a little-endian target.
- This is completely the opposite of what you think; setting this to
- BFD_ENDIAN_LITTLE will print bytes in order N..0 rather than the 0..N
- we want. */
- info->display_endian = BFD_ENDIAN_BIG;
-
- return advance;
-}
-
-/* Disassemble, prefixing register names with `$'. CRIS v0..v10. */
-static int
-print_insn_cris_with_register_prefix (bfd_vma vma,
- disassemble_info *info)
-{
- struct cris_disasm_data disdata;
- info->private_data = &disdata;
- cris_parse_disassembler_options (&disdata, info->disassembler_options,
- cris_dis_v0_v10);
- return print_insn_cris_generic (vma, info, true);
-}
-/* Disassemble, prefixing register names with `$'. CRIS v32. */
-
-static int
-print_insn_crisv32_with_register_prefix (bfd_vma vma,
- disassemble_info *info)
-{
- struct cris_disasm_data disdata;
- info->private_data = &disdata;
- cris_parse_disassembler_options (&disdata, info->disassembler_options,
- cris_dis_v32);
- return print_insn_cris_generic (vma, info, true);
-}
-
-#if 0
-/* Disassemble, prefixing register names with `$'.
- Common v10 and v32 subset. */
-
-static int
-print_insn_crisv10_v32_with_register_prefix (bfd_vma vma,
- disassemble_info *info)
-{
- struct cris_disasm_data disdata;
- info->private_data = &disdata;
- cris_parse_disassembler_options (&disdata, info->disassembler_options,
- cris_dis_common_v10_v32);
- return print_insn_cris_generic (vma, info, true);
-}
-
-/* Disassemble, no prefixes on register names. CRIS v0..v10. */
-
-static int
-print_insn_cris_without_register_prefix (bfd_vma vma,
- disassemble_info *info)
-{
- struct cris_disasm_data disdata;
- info->private_data = &disdata;
- cris_parse_disassembler_options (&disdata, info->disassembler_options,
- cris_dis_v0_v10);
- return print_insn_cris_generic (vma, info, false);
-}
-
-/* Disassemble, no prefixes on register names. CRIS v32. */
-
-static int
-print_insn_crisv32_without_register_prefix (bfd_vma vma,
- disassemble_info *info)
-{
- struct cris_disasm_data disdata;
- info->private_data = &disdata;
- cris_parse_disassembler_options (&disdata, info->disassembler_options,
- cris_dis_v32);
- return print_insn_cris_generic (vma, info, false);
-}
-
-/* Disassemble, no prefixes on register names.
- Common v10 and v32 subset. */
-
-static int
-print_insn_crisv10_v32_without_register_prefix (bfd_vma vma,
- disassemble_info *info)
-{
- struct cris_disasm_data disdata;
- info->private_data = &disdata;
- cris_parse_disassembler_options (&disdata, info->disassembler_options,
- cris_dis_common_v10_v32);
- return print_insn_cris_generic (vma, info, false);
-}
-#endif
-
-int
-print_insn_crisv10 (bfd_vma vma,
- disassemble_info *info)
-{
- return print_insn_cris_with_register_prefix(vma, info);
-}
-
-int
-print_insn_crisv32 (bfd_vma vma,
- disassemble_info *info)
-{
- return print_insn_crisv32_with_register_prefix(vma, info);
-}
-
-/* Return a disassembler-function that prints registers with a `$' prefix,
- or one that prints registers without a prefix.
- FIXME: We should improve the solution to avoid the multitude of
- functions seen above. */
-#if 0
-disassembler_ftype
-cris_get_disassembler (bfd *abfd)
-{
- /* If there's no bfd in sight, we return what is valid as input in all
- contexts if fed back to the assembler: disassembly *with* register
- prefix. Unfortunately this will be totally wrong for v32. */
- if (abfd == NULL)
- return print_insn_cris_with_register_prefix;
-
- if (bfd_get_symbol_leading_char (abfd) == 0)
- {
- if (bfd_get_mach (abfd) == bfd_mach_cris_v32)
- return print_insn_crisv32_with_register_prefix;
- if (bfd_get_mach (abfd) == bfd_mach_cris_v10_v32)
- return print_insn_crisv10_v32_with_register_prefix;
-
- /* We default to v10. This may be specifically specified in the
- bfd mach, but is also the default setting. */
- return print_insn_cris_with_register_prefix;
- }
-
- if (bfd_get_mach (abfd) == bfd_mach_cris_v32)
- return print_insn_crisv32_without_register_prefix;
- if (bfd_get_mach (abfd) == bfd_mach_cris_v10_v32)
- return print_insn_crisv10_v32_without_register_prefix;
- return print_insn_cris_without_register_prefix;
-}
-#endif
-/* Local variables:
- eval: (c-set-style "gnu")
- indent-tabs-mode: t
- End: */
diff --git a/disas/disas-common.c b/disas/disas-common.c
index de61f6d..21c2f03 100644
--- a/disas/disas-common.c
+++ b/disas/disas-common.c
@@ -7,7 +7,6 @@
#include "disas/disas.h"
#include "disas/capstone.h"
#include "hw/core/cpu.h"
-#include "exec/tswap.h"
#include "disas-internal.h"
@@ -61,15 +60,11 @@ void disas_initialize_debug_target(CPUDebug *s, CPUState *cpu)
s->cpu = cpu;
s->info.print_address_func = print_address;
- if (target_words_bigendian()) {
- s->info.endian = BFD_ENDIAN_BIG;
- } else {
- s->info.endian = BFD_ENDIAN_LITTLE;
- }
+ s->info.endian = BFD_ENDIAN_UNKNOWN;
- CPUClass *cc = CPU_GET_CLASS(cpu);
- if (cc->disas_set_info) {
- cc->disas_set_info(cpu, &s->info);
+ if (cpu->cc->disas_set_info) {
+ cpu->cc->disas_set_info(cpu, &s->info);
+ g_assert(s->info.endian != BFD_ENDIAN_UNKNOWN);
}
}
diff --git a/disas/disas-mon.c b/disas/disas-mon.c
index 37bf16a..9c69361 100644
--- a/disas/disas-mon.c
+++ b/disas/disas-mon.c
@@ -7,7 +7,7 @@
#include "qemu/osdep.h"
#include "disas-internal.h"
#include "disas/disas.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/core/cpu.h"
#include "monitor/monitor.h"
diff --git a/disas/hppa.c b/disas/hppa.c
index 49e2231..2b58434 100644
--- a/disas/hppa.c
+++ b/disas/hppa.c
@@ -606,7 +606,7 @@ struct pa_opcode
In the args field, the following characters are unused:
- ' " - / 34 6789:; '
+ ' " - / 34 678 :; '
'@ C M [\] '
'` e g } '
@@ -650,6 +650,7 @@ Also these:
| 6 bit field length at 19,27:31 (fixed extract/deposit)
A 13 bit immediate at 18 (to support the BREAK instruction)
^ like b, but describes a control register
+ 9 like b, but describes a diagnose register
! sar (cr11) register
D 26 bit immediate at 31 (to support the DIAG instruction)
$ 9 bit immediate at 28 (to support POPBTS)
@@ -1322,13 +1323,19 @@ static const struct pa_opcode pa_opcodes[] =
{ "fdce", 0x040012c0, 0xfc00ffdf, "cZx(b)", pa10, 0},
{ "fdce", 0x040012c0, 0xfc003fdf, "cZx(s,b)", pa10, 0},
{ "fice", 0x040002c0, 0xfc001fdf, "cZx(S,b)", pa10, 0},
-{ "diag", 0x14000000, 0xfc000000, "D", pa10, 0},
{ "idtlbt", 0x04001800, 0xfc00ffff, "x,b", pa20, FLAG_STRICT},
{ "iitlbt", 0x04000800, 0xfc00ffff, "x,b", pa20, FLAG_STRICT},
+/* completely undocumented, but used by ODE, HP-UX and Linux: */
+{ "mfcpu_pcxu", 0x140008a0, 0xfc9fffe0, "9,t", pa20, 0}, /* PCXU: mfdiag */
+{ "mtcpu_pcxu", 0x14001840, 0xfc00ffff, "x,9", pa20, 0},
+
/* These may be specific to certain versions of the PA. Joel claimed
they were 72000 (7200?) specific. However, I'm almost certain the
mtcpu/mfcpu were undocumented, but available in the older 700 machines. */
+{ "mfcpu_c", 0x14000600, 0xfc00ffff, "9,x", pa10, 0}, /* PCXL: for dr0 and dr8 only */
+{ "mfcpu_t", 0x14001400, 0xfc9fffe0, "9,t", pa10, 0}, /* PCXL: all dr except dr0 and dr8 */
+{ "mtcpu_pcxl", 0x14000240, 0xfc00ffff, "x,9", pa11, 0}, /* PCXL: mtcpu for dr0 and dr8 */
{ "mtcpu", 0x14001600, 0xfc00ffff, "x,^", pa10, 0},
{ "mfcpu", 0x14001A00, 0xfc00ffff, "^,x", pa10, 0},
{ "tocen", 0x14403600, 0xffffffff, "", pa10, 0},
@@ -1336,6 +1343,9 @@ static const struct pa_opcode pa_opcodes[] =
{ "shdwgr", 0x14402600, 0xffffffff, "", pa10, 0},
{ "grshdw", 0x14400620, 0xffffffff, "", pa10, 0},
+/* instead of showing D only, show all other registers too */
+{ "diag", 0x14000000, 0xfc000000, "D x,9,t", pa10, 0},
+
/* gfw and gfr are not in the HP PA 1.1 manual, but they are in either
the Timex FPU or the Mustang ERS (not sure which) manual. */
{ "gfw", 0x04001680, 0xfc00ffdf, "cZx(b)", pa11, 0},
@@ -1801,6 +1811,12 @@ fput_creg (unsigned reg, disassemble_info *info)
(*info->fprintf_func) (info->stream, "%s", control_reg[reg]);
}
+static void
+fput_dreg (unsigned reg, disassemble_info *info)
+{
+ (*info->fprintf_func) (info->stream, "dr%d", reg);
+}
+
/* Print constants with sign. */
static void
@@ -2007,6 +2023,9 @@ print_insn_hppa (bfd_vma memaddr, disassemble_info *info)
case '^':
fput_creg (GET_FIELD (insn, 6, 10), info);
break;
+ case '9':
+ fput_dreg (GET_FIELD (insn, 6, 10), info);
+ break;
case 't':
fput_reg (GET_FIELD (insn, 27, 31), info);
break;
diff --git a/disas/meson.build b/disas/meson.build
index 20d6aef..bbfa119 100644
--- a/disas/meson.build
+++ b/disas/meson.build
@@ -1,5 +1,4 @@
common_ss.add(when: 'CONFIG_ALPHA_DIS', if_true: files('alpha.c'))
-common_ss.add(when: 'CONFIG_CRIS_DIS', if_true: files('cris.c'))
common_ss.add(when: 'CONFIG_HEXAGON_DIS', if_true: files('hexagon.c'))
common_ss.add(when: 'CONFIG_HPPA_DIS', if_true: files('hppa.c'))
common_ss.add(when: 'CONFIG_M68K_DIS', if_true: files('m68k.c'))
diff --git a/disas/riscv.c b/disas/riscv.c
index 5965574..85cd2a9 100644
--- a/disas/riscv.c
+++ b/disas/riscv.c
@@ -976,6 +976,14 @@ typedef enum {
rv_op_amocas_h = 945,
rv_op_wrs_sto = 946,
rv_op_wrs_nto = 947,
+ rv_op_lpad = 948,
+ rv_op_sspush = 949,
+ rv_op_sspopchk = 950,
+ rv_op_ssrdp = 951,
+ rv_op_ssamoswap_w = 952,
+ rv_op_ssamoswap_d = 953,
+ rv_op_c_sspush = 954,
+ rv_op_c_sspopchk = 955,
} rv_op;
/* register names */
@@ -1654,7 +1662,7 @@ const rv_opcode_data rvi_opcode_data[] = {
{ "aes32esi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 },
{ "aes32dsmi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 },
{ "aes32dsi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 },
- { "aes64ks1i", rv_codec_k_rnum, rv_fmt_rd_rs1_rnum, NULL, 0, 0, 0 },
+ { "aes64ks1i", rv_codec_k_rnum, rv_fmt_rd_rs1_rnum, NULL, 0, 0, 0 },
{ "aes64ks2", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "aes64im", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "aes64esm", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
@@ -2206,11 +2214,11 @@ const rv_opcode_data rvi_opcode_data[] = {
{ "mop.rr.5", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "mop.rr.6", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "mop.rr.7", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
- { "c.mop.1", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
- { "c.mop.3", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
- { "c.mop.5", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
- { "c.mop.7", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
- { "c.mop.9", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "c.mop.1", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "c.mop.3", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "c.mop.5", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "c.mop.7", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "c.mop.9", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.11", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.13", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.15", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
@@ -2236,6 +2244,16 @@ const rv_opcode_data rvi_opcode_data[] = {
{ "amocas.h", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "wrs.sto", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "wrs.nto", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
+ { "lpad", rv_codec_lp, rv_fmt_imm, NULL, 0, 0, 0 },
+ { "sspush", rv_codec_r, rv_fmt_rs2, NULL, 0, 0, 0 },
+ { "sspopchk", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 },
+ { "ssrdp", rv_codec_r, rv_fmt_rd, NULL, 0, 0, 0 },
+ { "ssamoswap.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
+ { "ssamoswap.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
+ { "c.sspush", rv_codec_cmop_ss, rv_fmt_rs2, NULL, rv_op_sspush,
+ rv_op_sspush, 0 },
+ { "c.sspopchk", rv_codec_cmop_ss, rv_fmt_rs1, NULL, rv_op_sspopchk,
+ rv_op_sspopchk, 0 },
};
/* CSR names */
@@ -2253,6 +2271,7 @@ static const char *csr_name(int csrno)
case 0x0009: return "vxsat";
case 0x000a: return "vxrm";
case 0x000f: return "vcsr";
+ case 0x0011: return "ssp";
case 0x0015: return "seed";
case 0x0017: return "jvt";
case 0x0040: return "uscratch";
@@ -2419,9 +2438,11 @@ static const char *csr_name(int csrno)
case 0x07a1: return "tdata1";
case 0x07a2: return "tdata2";
case 0x07a3: return "tdata3";
+ case 0x07a4: return "tinfo";
case 0x07b0: return "dcsr";
case 0x07b1: return "dpc";
- case 0x07b2: return "dscratch";
+ case 0x07b2: return "dscratch0";
+ case 0x07b3: return "dscratch1";
case 0x0b00: return "mcycle";
case 0x0b01: return "mtime";
case 0x0b02: return "minstret";
@@ -2592,10 +2613,16 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
break;
case 2: op = rv_op_c_li; break;
case 3:
- if (dec->cfg->ext_zcmop) {
+ if (dec->cfg && dec->cfg->ext_zcmop) {
if ((((inst >> 2) & 0b111111) == 0b100000) &&
(((inst >> 11) & 0b11) == 0b0)) {
- op = rv_c_mop_1 + ((inst >> 8) & 0b111);
+ unsigned int cmop_code = 0;
+ cmop_code = ((inst >> 8) & 0b111);
+ op = rv_c_mop_1 + cmop_code;
+ if (dec->cfg->ext_zicfiss) {
+ op = (cmop_code == 0) ? rv_op_c_sspush : op;
+ op = (cmop_code == 2) ? rv_op_c_sspopchk : op;
+ }
break;
}
}
@@ -2687,7 +2714,7 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
op = rv_op_c_sqsp;
} else {
op = rv_op_c_fsdsp;
- if (dec->cfg->ext_zcmp && ((inst >> 12) & 0b01)) {
+ if (dec->cfg && dec->cfg->ext_zcmp && ((inst >> 12) & 0b01)) {
switch ((inst >> 8) & 0b01111) {
case 8:
if (((inst >> 4) & 0b01111) >= 4) {
@@ -2713,7 +2740,7 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
} else {
switch ((inst >> 10) & 0b011) {
case 0:
- if (!dec->cfg->ext_zcmt) {
+ if (dec->cfg && !dec->cfg->ext_zcmt) {
break;
}
if (((inst >> 2) & 0xFF) >= 32) {
@@ -2723,7 +2750,7 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
}
break;
case 3:
- if (!dec->cfg->ext_zcmp) {
+ if (dec->cfg && !dec->cfg->ext_zcmp) {
break;
}
switch ((inst >> 5) & 0b011) {
@@ -2929,7 +2956,13 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
case 7: op = rv_op_andi; break;
}
break;
- case 5: op = rv_op_auipc; break;
+ case 5:
+ op = rv_op_auipc;
+ if (dec->cfg && dec->cfg->ext_zicfilp &&
+ (((inst >> 7) & 0b11111) == 0b00000)) {
+ op = rv_op_lpad;
+ }
+ break;
case 6:
switch ((inst >> 12) & 0b111) {
case 0: op = rv_op_addiw; break;
@@ -3073,6 +3106,8 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
case 66: op = rv_op_amoor_w; break;
case 67: op = rv_op_amoor_d; break;
case 68: op = rv_op_amoor_q; break;
+ case 74: op = rv_op_ssamoswap_w; break;
+ case 75: op = rv_op_ssamoswap_d; break;
case 96: op = rv_op_amoand_b; break;
case 97: op = rv_op_amoand_h; break;
case 98: op = rv_op_amoand_w; break;
@@ -4025,8 +4060,8 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
case 2: op = rv_op_csrrs; break;
case 3: op = rv_op_csrrc; break;
case 4:
- if (dec->cfg->ext_zimop) {
- int imm_mop5, imm_mop3;
+ if (dec->cfg && dec->cfg->ext_zimop) {
+ int imm_mop5, imm_mop3, reg_num;
if ((extract32(inst, 22, 10) & 0b1011001111)
== 0b1000000111) {
imm_mop5 = deposit32(deposit32(extract32(inst, 20, 2),
@@ -4034,11 +4069,36 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
extract32(inst, 26, 2)),
4, 1, extract32(inst, 30, 1));
op = rv_mop_r_0 + imm_mop5;
+ /* if zicfiss enabled and mop5 is shadow stack */
+ if (dec->cfg->ext_zicfiss &&
+ ((imm_mop5 & 0b11100) == 0b11100)) {
+ /* rs1=0 means ssrdp */
+ if ((inst & (0b011111 << 15)) == 0) {
+ op = rv_op_ssrdp;
+ }
+ /* rd=0 means sspopchk */
+ reg_num = (inst >> 15) & 0b011111;
+ if (((inst & (0b011111 << 7)) == 0) &&
+ ((reg_num == 1) || (reg_num == 5))) {
+ op = rv_op_sspopchk;
+ }
+ }
} else if ((extract32(inst, 25, 7) & 0b1011001)
== 0b1000001) {
imm_mop3 = deposit32(extract32(inst, 26, 2),
2, 1, extract32(inst, 30, 1));
op = rv_mop_rr_0 + imm_mop3;
+ /* if zicfiss enabled and mop3 is shadow stack */
+ if (dec->cfg->ext_zicfiss &&
+ ((imm_mop3 & 0b111) == 0b111)) {
+ /* rs1=0 and rd=0 means sspush */
+ reg_num = (inst >> 20) & 0b011111;
+ if (((inst & (0b011111 << 15)) == 0) &&
+ ((inst & (0b011111 << 7)) == 0) &&
+ ((reg_num == 1) || (reg_num == 5))) {
+ op = rv_op_sspush;
+ }
+ }
}
}
break;
@@ -4488,6 +4548,11 @@ static uint32_t operand_tbl_index(rv_inst inst)
return ((inst << 54) >> 56);
}
+static uint32_t operand_lpl(rv_inst inst)
+{
+ return inst >> 12;
+}
+
/* decode operands */
static void decode_inst_operands(rv_decode *dec, rv_isa isa)
@@ -4808,7 +4873,7 @@ static void decode_inst_operands(rv_decode *dec, rv_isa isa)
break;
case rv_codec_vsetivli:
dec->rd = operand_rd(inst);
- dec->imm = operand_vimm(inst);
+ dec->imm = extract32(inst, 15, 5);
dec->vzimm = operand_vzimm10(inst);
break;
case rv_codec_zcb_lb:
@@ -4875,6 +4940,14 @@ static void decode_inst_operands(rv_decode *dec, rv_isa isa)
dec->imm = sextract32(operand_rs2(inst), 0, 5);
dec->imm1 = operand_imm2(inst);
break;
+ case rv_codec_lp:
+ dec->imm = operand_lpl(inst);
+ break;
+ case rv_codec_cmop_ss:
+ dec->rd = rv_ireg_zero;
+ dec->rs1 = dec->rs2 = operand_crs1(inst);
+ dec->imm = 0;
+ break;
};
}
@@ -5041,28 +5114,28 @@ static GString *format_inst(size_t tab, rv_decode *dec)
g_string_append(buf, rv_ireg_name_sym[dec->rs2]);
break;
case '3':
- if (dec->cfg->ext_zfinx) {
+ if (dec->cfg && dec->cfg->ext_zfinx) {
g_string_append(buf, rv_ireg_name_sym[dec->rd]);
} else {
g_string_append(buf, rv_freg_name_sym[dec->rd]);
}
break;
case '4':
- if (dec->cfg->ext_zfinx) {
+ if (dec->cfg && dec->cfg->ext_zfinx) {
g_string_append(buf, rv_ireg_name_sym[dec->rs1]);
} else {
g_string_append(buf, rv_freg_name_sym[dec->rs1]);
}
break;
case '5':
- if (dec->cfg->ext_zfinx) {
+ if (dec->cfg && dec->cfg->ext_zfinx) {
g_string_append(buf, rv_ireg_name_sym[dec->rs2]);
} else {
g_string_append(buf, rv_freg_name_sym[dec->rs2]);
}
break;
case '6':
- if (dec->cfg->ext_zfinx) {
+ if (dec->cfg && dec->cfg->ext_zfinx) {
g_string_append(buf, rv_ireg_name_sym[dec->rs3]);
} else {
g_string_append(buf, rv_freg_name_sym[dec->rs3]);
@@ -5368,7 +5441,8 @@ static GString *disasm_inst(rv_isa isa, uint64_t pc, rv_inst inst,
const rv_opcode_data *opcode_data = decoders[i].opcode_data;
void (*decode_func)(rv_decode *, rv_isa) = decoders[i].decode_func;
- if (guard_func(cfg)) {
+ /* always_true_p don't dereference cfg */
+ if (((i == 0) || cfg) && guard_func(cfg)) {
dec.opcode_data = opcode_data;
decode_func(&dec, isa);
if (dec.op != rv_op_illegal)
diff --git a/disas/riscv.h b/disas/riscv.h
index 16a08e4..d211700 100644
--- a/disas/riscv.h
+++ b/disas/riscv.h
@@ -166,6 +166,8 @@ typedef enum {
rv_codec_r2_immhl,
rv_codec_r2_imm2_imm5,
rv_codec_fli,
+ rv_codec_lp,
+ rv_codec_cmop_ss,
} rv_codec;
/* structures */
@@ -223,11 +225,13 @@ enum {
#define rv_fmt_none "O\t"
#define rv_fmt_rs1 "O\t1"
+#define rv_fmt_rs2 "O\t2"
#define rv_fmt_offset "O\to"
#define rv_fmt_pred_succ "O\tp,s"
#define rv_fmt_rs1_rs2 "O\t1,2"
#define rv_fmt_rd_imm "O\t0,i"
#define rv_fmt_rd_uimm "O\t0,Ui"
+#define rv_fmt_imm "O\ti"
#define rv_fmt_rd_offset "O\t0,o"
#define rv_fmt_rd_uoffset "O\t0,Uo"
#define rv_fmt_rd_rs1_rs2 "O\t0,1,2"
@@ -290,7 +294,7 @@ enum {
#define rv_fmt_fd_vs2 "O\t3,F"
#define rv_fmt_vd_vm "O\tDm"
#define rv_fmt_vsetvli "O\t0,1,v"
-#define rv_fmt_vsetivli "O\t0,u,v"
+#define rv_fmt_vsetivli "O\t0,i,v"
#define rv_fmt_rs1_rs2_zce_ldst "O\t2,i(1)"
#define rv_fmt_push_rlist "O\tx,-i"
#define rv_fmt_pop_rlist "O\tx,i"
diff --git a/docs/COLO-FT.txt b/docs/COLO-FT.txt
index 2e760a4..2283a09 100644
--- a/docs/COLO-FT.txt
+++ b/docs/COLO-FT.txt
@@ -193,8 +193,8 @@ any IP's here, except for the $primary_ip variable.
-device piix3-usb-uhci -device usb-tablet -name secondary \
-netdev tap,id=hn0,vhost=off,helper=/usr/lib/qemu/qemu-bridge-helper \
-device rtl8139,id=e0,netdev=hn0 \
- -chardev socket,id=red0,host=$primary_ip,port=9003,reconnect=1 \
- -chardev socket,id=red1,host=$primary_ip,port=9004,reconnect=1 \
+ -chardev socket,id=red0,host=$primary_ip,port=9003,reconnect-ms=1000 \
+ -chardev socket,id=red1,host=$primary_ip,port=9004,reconnect-ms=1000 \
-object filter-redirector,id=f1,netdev=hn0,queue=tx,indev=red0 \
-object filter-redirector,id=f2,netdev=hn0,queue=rx,outdev=red1 \
-object filter-rewriter,id=rew0,netdev=hn0,queue=all \
diff --git a/docs/about/build-platforms.rst b/docs/about/build-platforms.rst
index 8fd7da1..8ecbd6b 100644
--- a/docs/about/build-platforms.rst
+++ b/docs/about/build-platforms.rst
@@ -29,6 +29,9 @@ The `Repology`_ site is a useful resource to identify
currently shipped versions of software in various operating systems,
though it does not cover all distros listed below.
+You can find how to install build dependencies for different systems on the
+:ref:`setup-build-env` page.
+
Supported host architectures
----------------------------
@@ -40,8 +43,8 @@ Those hosts are officially supported, with various accelerators:
* - CPU Architecture
- Accelerators
* - Arm
- - kvm (64 bit only), tcg, xen
- * - MIPS (little endian only)
+ - hvf (64 bit only), kvm (64 bit only), tcg, xen
+ * - MIPS (64 bit little endian only)
- kvm, tcg
* - PPC
- kvm, tcg
@@ -98,7 +101,7 @@ Python runtime
option of the ``configure`` script to point QEMU to a supported
version of the Python runtime.
- As of QEMU |version|, the minimum supported version of Python is 3.7.
+ As of QEMU |version|, the minimum supported version of Python is 3.9.
Python build dependencies
Some of QEMU's build dependencies are written in Python. Usually these
@@ -107,18 +110,34 @@ Python build dependencies
required, it may be necessary to fetch python modules from the Python
Package Index (PyPI) via ``pip``, in order to build QEMU.
+Rust build dependencies
+ QEMU is generally conservative in adding new Rust dependencies, and all
+ of them are included in the distributed tarballs. One exception is the
+ bindgen tool, which is too big to package and distribute. The minimum
+ supported version of bindgen is 0.60.x. For distributions that do not
+ include bindgen or have an older version, it is recommended to install
+ a newer version using ``cargo install bindgen-cli``.
+
+ QEMU requires Rust 1.77.0. This is available on all supported platforms
+ with one exception, namely the ``mips64el`` architecture on Debian bookworm.
+ For all other architectures, Debian bookworm provides a new-enough Rust
+ compiler in the ``rustc-web`` package.
+
+ Also, on Ubuntu 22.04 or 24.04 this requires the ``rustc-1.77``
+ (or newer) package. The path to ``rustc`` and ``rustdoc`` must be
+ provided manually to the configure script.
+
Optional build dependencies
- Build components whose absence does not affect the ability to build
- QEMU may not be available in distros, or may be too old for QEMU's
- requirements. Many of these, such as the Avocado testing framework
- or various linters, are written in Python and therefore can also
- be installed using ``pip``. Cross compilers are another example
+ Build components whose absence does not affect the ability to build QEMU
+ may not be available in distros, or may be too old for our requirements.
+ Many of these, such as additional modules for the functional testing
+ framework or various linters, are written in Python and therefore can
+ also be installed using ``pip``. Cross compilers are another example
of optional build-time dependency; in this case it is possible to
download them from repositories such as EPEL, to use container-based
cross compilation using ``docker`` or ``podman``, or to use pre-built
binaries distributed with QEMU.
-
Windows
-------
diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst
index 88f0f03..4203713 100644
--- a/docs/about/deprecated.rst
+++ b/docs/about/deprecated.rst
@@ -24,12 +24,6 @@ should exclusively use a non-deprecated machine type, with use of the most
recent version highly recommended. Non-versioned machine types follow the
general feature deprecation policy.
-Prior to the 2.10.0 release there was no official policy on how
-long features would be deprecated prior to their removal, nor
-any documented list of which features were deprecated. Thus
-any features deprecated prior to 2.10.0 will be treated as if
-they were first deprecated in the 2.10.0 release.
-
What follows is a list of all features currently marked as
deprecated.
@@ -74,11 +68,18 @@ configurations (e.g. -smp drawers=1,books=1,clusters=1 for x86 PC machine) is
marked deprecated since 9.0, users have to ensure that all the topology members
described with -smp are supported by the target machine.
-``-runas`` (since 9.1)
-----------------------
-
-Use ``-run-with user=..`` instead.
+``-old-param`` option for booting Arm kernels via param_struct (since 10.0)
+'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+The ``-old-param`` command line option is specific to Arm targets:
+it is used when directly booting a guest kernel to pass it the
+command line and other information via the old ``param_struct`` ABI,
+rather than the newer ATAGS or DTB mechanisms. This option was only
+ever needed to support ancient kernels on some old board types
+like the ``akita`` or ``terrier``; it has been deprecated in the
+kernel since 2001. None of the board types QEMU supports need
+``param_struct`` support, so this option has been deprecated and will
+be removed in a future QEMU version.
User-mode emulator command line arguments
-----------------------------------------
@@ -147,32 +148,66 @@ options are removed in favor of using explicit ``blockdev-create`` and
``blockdev-add`` calls. See :doc:`/interop/live-block-operations` for
details.
-Incorrectly typed ``device_add`` arguments (since 6.2)
-''''''''''''''''''''''''''''''''''''''''''''''''''''''
+``query-migrationthreads`` (since 9.2)
+''''''''''''''''''''''''''''''''''''''
+
+To be removed with no replacement, as it reports only a limited set of
+threads (for example, it only reports source side of multifd threads,
+without reporting any destination threads, or non-multifd source threads).
+For debugging purpose, please use ``-name $VM,debug-threads=on`` instead.
+
+``block-job-pause`` (since 10.1)
+''''''''''''''''''''''''''''''''
+
+Use ``job-pause`` instead. The only difference is that ``job-pause``
+always reports GenericError on failure when ``block-job-pause`` reports
+DeviceNotActive when block-job is not found.
+
+``block-job-resume`` (since 10.1)
+'''''''''''''''''''''''''''''''''
+
+Use ``job-resume`` instead. The only difference is that ``job-resume``
+always reports GenericError on failure when ``block-job-resume`` reports
+DeviceNotActive when block-job is not found.
+
+``block-job-complete`` (since 10.1)
+'''''''''''''''''''''''''''''''''''
-Due to shortcomings in the internal implementation of ``device_add``, QEMU
-incorrectly accepts certain invalid arguments: Any object or list arguments are
-silently ignored. Other argument types are not checked, but an implicit
-conversion happens, so that e.g. string values can be assigned to integer
-device properties or vice versa.
+Use ``job-complete`` instead. The only difference is that ``job-complete``
+always reports GenericError on failure when ``block-job-complete`` reports
+DeviceNotActive when block-job is not found.
-This is a bug in QEMU that will be fixed in the future so that previously
-accepted incorrect commands will return an error. Users should make sure that
-all arguments passed to ``device_add`` are consistent with the documented
-property types.
+``block-job-dismiss`` (since 10.1)
+''''''''''''''''''''''''''''''''''
+
+Use ``job-dismiss`` instead.
+
+``block-job-finalize`` (since 10.1)
+'''''''''''''''''''''''''''''''''''
+
+Use ``job-finalize`` instead.
+
+``migrate`` argument ``detach`` (since 10.1)
+''''''''''''''''''''''''''''''''''''''''''''
+
+This argument has always been ignored.
Host Architectures
------------------
-BE MIPS (since 7.2)
-'''''''''''''''''''
+Big endian MIPS since 7.2; 32-bit little endian MIPS since 9.2
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
As Debian 10 ("Buster") moved into LTS the big endian 32 bit version of
MIPS moved out of support making it hard to maintain our
cross-compilation CI tests of the architecture. As we no longer have
CI coverage support may bitrot away before the deprecation process
-completes. The little endian variants of MIPS (both 32 and 64 bit) are
-still a supported host architecture.
+completes.
+
+Likewise, the little endian variant of 32 bit MIPS is not supported by
+Debian 13 ("Trixie") and newer.
+
+64 bit little endian MIPS is still a supported host architecture.
System emulation on 32-bit x86 hosts (since 8.0)
''''''''''''''''''''''''''''''''''''''''''''''''
@@ -184,6 +219,53 @@ be an effective use of its limited resources, and thus intends to discontinue
it. Since all recent x86 hardware from the past >10 years is capable of the
64-bit x86 extensions, a corresponding 64-bit OS should be used instead.
+TCG Plugin support not enabled by default on 32-bit hosts (since 9.2)
+'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+While it is still possible to enable TCG plugin support for 32-bit
+hosts there are a number of potential pitfalls when instrumenting
+64-bit guests. The plugin APIs typically pass most addresses as
+uint64_t but practices like encoding that address in a host pointer
+for passing as user-data will lose data. As most software analysis
+benefits from having plenty of host memory it seems reasonable to
+encourage users to use 64 bit builds of QEMU for analysis work
+whatever targets they are instrumenting.
+
+TCG Plugin support not enabled by default with TCI (since 9.2)
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+While the TCG interpreter can interpret the TCG ops used by plugins it
+is going to be so much slower it wouldn't make sense for any serious
+instrumentation. Due to implementation differences there will also be
+anomalies in things like memory instrumentation.
+
+32-bit host operating systems (since 10.0)
+''''''''''''''''''''''''''''''''''''''''''
+
+Keeping 32-bit host support alive is a substantial burden for the
+QEMU project. Thus QEMU will in future drop the support for all
+32-bit host systems.
+
+linux-user mode CPUs
+--------------------
+
+iwMMXt emulation and the ``pxa`` CPUs (since 10.0)
+''''''''''''''''''''''''''''''''''''''''''''''''''
+
+The ``pxa`` CPU family (``pxa250``, ``pxa255``, ``pxa260``,
+``pxa261``, ``pxa262``, ``pxa270-a0``, ``pxa270-a1``, ``pxa270``,
+``pxa270-b0``, ``pxa270-b1``, ``pxa270-c0``, ``pxa270-c5``) are no
+longer used in system emulation, because all the machine types which
+used these CPUs were removed in the QEMU 9.2 release. These CPUs can
+now only be used in linux-user mode, and to do that you would have to
+explicitly select one of these CPUs with the ``-cpu`` command line
+option or the ``QEMU_CPU`` environment variable.
+
+We don't believe that anybody is using the iwMMXt emulation, and we do
+not have any tests to validate it or any real hardware or similar
+known-good implementation to test against. GCC is in the process of
+dropping their support for iwMMXt codegen. These CPU types are
+therefore deprecated in QEMU, and will be removed in a future release.
System emulator CPUs
--------------------
@@ -206,17 +288,25 @@ in the QEMU object model anymore. ``Sun-UltraSparc-IIIi+`` and
but for consistency these will get removed in a future release, too.
Use ``Sun-UltraSparc-IIIi-plus`` and ``Sun-UltraSparc-IV-plus`` instead.
-CRIS CPU architecture (since 9.0)
-'''''''''''''''''''''''''''''''''
+PPC 405 CPUs (since 10.0)
+'''''''''''''''''''''''''
-The CRIS architecture was pulled from Linux in 4.17 and the compiler
-is no longer packaged in any distro making it harder to run the
-``check-tcg`` tests. Unless we can improve the testing situation there
-is a chance the code will bitrot without anyone noticing.
+The PPC 405 CPU has no known users and the ``ref405ep`` machine was
+removed in QEMU 10.0. Since the IBM POWER [8-11] processors uses an
+embedded 405 for power management (OCC) and other internal tasks, it
+is theoretically possible to use QEMU to model them. Let's keep the
+CPU implementation for a while before removing all support.
System emulator machines
------------------------
+Versioned machine types (aarch64, arm, i386, m68k, ppc64, s390x, x86_64)
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+In accordance with our versioned machine type deprecation policy, all machine
+types with version |VER_MACHINE_DEPRECATION_VERSION|, or older, have been
+deprecated.
+
Arm ``virt`` machine ``dtb-kaslr-seed`` property (since 7.1)
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
@@ -225,57 +315,48 @@ deprecated; use the new name ``dtb-randomness`` instead. The new name
better reflects the way this property affects all random data within
the device tree blob, not just the ``kaslr-seed`` node.
-``pc-i440fx-2.4`` up to ``pc-i440fx-2.12`` (since 9.1)
-''''''''''''''''''''''''''''''''''''''''''''''''''''''
+Mips ``mipssim`` machine (since 10.0)
+'''''''''''''''''''''''''''''''''''''
-These old machine types are quite neglected nowadays and thus might have
-various pitfalls with regards to live migration. Use a newer machine type
+Linux dropped support for this virtual machine type in kernel v3.7, and
+there does not seem to be anybody around who is still using this board
+in QEMU: Most former MIPS-related people are working on other architectures
+in their everyday job nowadays, and we are also not aware of anybody still
+using old binaries with this board (i.e. there is also no binary available
+online to check that this board did not completely bitrot yet). It is
+recommended to use another MIPS machine for future MIPS code development
instead.
-``shix`` (since 9.0)
-''''''''''''''''''''
+RISC-V default machine option (since 10.0)
+''''''''''''''''''''''''''''''''''''''''''
-The machine is no longer in existence and has been long unmaintained
-in QEMU. This also holds for the TC51828 16MiB flash that it uses.
+RISC-V defines ``spike`` as the default machine if no machine option is
+given in the command line. This happens because ``spike`` is the first
+RISC-V machine implemented in QEMU and setting it as default was
+convenient at that time. Now we have 7 riscv64 and 6 riscv32 machines
+and having ``spike`` as a default is no longer justified. This default
+will also promote situations where users think they're running ``virt``
+(the most used RISC-V machine type in 10.0) when in fact they're
+running ``spike``.
-``pseries-2.1`` up to ``pseries-2.12`` (since 9.0)
-''''''''''''''''''''''''''''''''''''''''''''''''''
+Removing the default machine option forces users to always set the machine
+they want to use and avoids confusion. Existing users of the ``spike``
+machine must ensure that they're setting the ``spike`` machine in the
+command line (``-M spike``).
+
+
+System emulator binaries
+------------------------
+
+``qemu-system-microblazeel`` (since 10.1)
+'''''''''''''''''''''''''''''''''''''''''
+
+The ``qemu-system-microblaze`` binary can emulate little-endian machines
+now, too, so the separate binary ``qemu-system-microblazeel`` (with the
+``el`` suffix) for little-endian targets is not required anymore. The
+``petalogix-s3adsp1800`` machine can now be switched to little endian by
+setting its ``endianness`` property to ``little``.
-Older pseries machines before version 3.0 have undergone many changes
-to correct issues, mostly regarding migration compatibility. These are
-no longer maintained and removing them will make the code easier to
-read and maintain. Use versions 3.0 and above as a replacement.
-
-Arm machines ``akita``, ``borzoi``, ``cheetah``, ``connex``, ``mainstone``, ``n800``, ``n810``, ``spitz``, ``terrier``, ``tosa``, ``verdex``, ``z2`` (since 9.0)
-''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
-
-QEMU includes models of some machine types where the QEMU code that
-emulates their SoCs is very old and unmaintained. This code is now
-blocking our ability to move forward with various changes across
-the codebase, and over many years nobody has been interested in
-trying to modernise it. We don't expect any of these machines to have
-a large number of users, because they're all modelling hardware that
-has now passed away into history. We are therefore dropping support
-for all machine types using the PXA2xx and OMAP2 SoCs. We are also
-dropping the ``cheetah`` OMAP1 board, because we don't have any
-test images for it and don't know of anybody who does; the ``sx1``
-and ``sx1-v1`` OMAP1 machines remain supported for now.
-
-PPC 405 ``ref405ep`` machine (since 9.1)
-''''''''''''''''''''''''''''''''''''''''
-
-The ``ref405ep`` machine and PPC 405 CPU have no known users, firmware
-images are not available, OpenWRT dropped support in 2019, U-Boot in
-2017, Linux also is dropping support in 2024. It is time to let go of
-this ancient hardware and focus on newer CPUs and platforms.
-
-Arm ``tacoma-bmc`` machine (since 9.1)
-''''''''''''''''''''''''''''''''''''''''
-
-The ``tacoma-bmc`` machine was a board including an AST2600 SoC based
-BMC and a witherspoon like OpenPOWER system. It was used for bring up
-of the AST2600 SoC in labs. It can be easily replaced by the
-``rainier-bmc`` machine which is a real product.
Backend options
---------------
@@ -324,41 +405,6 @@ the addition of volatile memory support, it is now necessary to distinguish
between persistent and volatile memory backends. As such, memdev is deprecated
in favor of persistent-memdev.
-``-fsdev proxy`` and ``-virtfs proxy`` (since 8.1)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The 9p ``proxy`` filesystem backend driver has been deprecated and will be
-removed (along with its proxy helper daemon) in a future version of QEMU. Please
-use ``-fsdev local`` or ``-virtfs local`` for using the 9p ``local`` filesystem
-backend, or alternatively consider deploying virtiofsd instead.
-
-The 9p ``proxy`` backend was originally developed as an alternative to the 9p
-``local`` backend. The idea was to enhance security by dispatching actual low
-level filesystem operations from 9p server (QEMU process) over to a separate
-process (the virtfs-proxy-helper binary). However this alternative never gained
-momentum. The proxy backend is much slower than the local backend, hasn't seen
-any development in years, and showed to be less secure, especially due to the
-fact that its helper daemon must be run as root, whereas with the local backend
-QEMU is typically run as unprivileged user and allows to tighten behaviour by
-mapping permissions et al by using its 'mapped' security model option.
-
-Nowadays it would make sense to reimplement the ``proxy`` backend by using
-QEMU's ``vhost`` feature, which would eliminate the high latency costs under
-which the 9p ``proxy`` backend currently suffers. However as of to date nobody
-has indicated plans for such kind of reimplementation unfortunately.
-
-RISC-V 'any' CPU type ``-cpu any`` (since 8.2)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The 'any' CPU type was introduced back in 2018 and has been around since the
-initial RISC-V QEMU port. Its usage has always been unclear: users don't know
-what to expect from a CPU called 'any', and in fact the CPU does not do anything
-special that isn't already done by the default CPUs rv32/rv64.
-
-After the introduction of the 'max' CPU type, RISC-V now has a good coverage
-of generic CPUs: rv32 and rv64 as default CPUs and 'max' as a feature complete
-CPU for both 32 and 64 bit builds. Users are then discouraged to use the 'any'
-CPU type starting in 8.2.
RISC-V CPU properties which start with capital 'Z' (since 8.2)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -422,6 +468,15 @@ Specifying the iSCSI password in plain text on the command line using the
used instead, to refer to a ``--object secret...`` instance that provides
a password via a file, or encrypted.
+``gluster`` backend (since 9.2)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+According to https://marc.info/?l=fedora-devel-list&m=171934833215726
+the GlusterFS development effectively ended. Unless the development
+gains momentum again, the QEMU project will remove the gluster backend
+in a future release.
+
+
Character device options
''''''''''''''''''''''''
@@ -430,16 +485,49 @@ Backend ``memory`` (since 9.0)
``memory`` is a deprecated synonym for ``ringbuf``.
-CPU device properties
-'''''''''''''''''''''
+``reconnect`` (since 9.2)
+^^^^^^^^^^^^^^^^^^^^^^^^^
-``pcommit`` on x86 (since 9.1)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The ``reconnect`` option only allows specifying second granularity timeouts,
+which is not enough for all types of use cases, use ``reconnect-ms`` instead.
-The PCOMMIT instruction was never included in any physical processor.
-It was implemented as a no-op instruction in TCG up to QEMU 9.0, but
-only with ``-cpu max`` (which does not guarantee migration compatibility
-across versions).
+
+Net device options
+''''''''''''''''''
+
+Stream ``reconnect`` (since 9.2)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``reconnect`` option only allows specifying second granularity timeouts,
+which is not enough for all types of use cases, use ``reconnect-ms`` instead.
+
+VFIO device options
+'''''''''''''''''''
+
+``-device vfio-calxeda-xgmac`` (since 10.0)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The vfio-calxeda-xgmac device allows to assign a host Calxeda Highbank
+10Gb XGMAC Ethernet controller device ("calxeda,hb-xgmac" compatibility
+string) to a guest. Calxeda HW has been ewasted now and there is no point
+keeping that device.
+
+``-device vfio-amd-xgbe`` (since 10.0)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The vfio-amd-xgbe device allows to assign a host AMD 10GbE controller
+to a guest ("amd,xgbe-seattle-v1a" compatibility string). AMD "Seattle"
+is not supported anymore and there is no point keeping that device.
+
+``-device vfio-platform`` (since 10.0)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The vfio-platform device allows to assign a host platform device
+to a guest in a generic manner. Integrating a new device into
+the vfio-platform infrastructure requires some adaptation at
+both kernel and qemu level. No such attempt has been done for years
+and the conclusion is that vfio-platform has not got any traction.
+PCIe passthrough shall be the mainline solution.
+
+CPU device properties
+'''''''''''''''''''''
``pmu-num=n`` on RISC-V CPUs (since 8.2)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -450,6 +538,14 @@ be calculated with ``((2 ^ n) - 1) << 3``. The least significant three bits
must be left clear.
+``pcommit`` on x86 (since 9.1)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The PCOMMIT instruction was never included in any physical processor.
+It was implemented as a no-op instruction in TCG up to QEMU 9.0, but
+only with ``-cpu max`` (which does not guarantee migration compatibility
+across versions).
+
Backwards compatibility
-----------------------
@@ -503,3 +599,9 @@ usage of providing a file descriptor to a plain file has been
deprecated in favor of explicitly using the ``file:`` URI with the
file descriptor being passed as an ``fdset``. Refer to the ``add-fd``
command documentation for details on the ``fdset`` usage.
+
+``zero-blocks`` capability (since 9.2)
+''''''''''''''''''''''''''''''''''''''
+
+The ``zero-blocks`` capability was part of the block migration which
+doesn't exist anymore since it was removed in QEMU v9.1.
diff --git a/docs/about/emulation.rst b/docs/about/emulation.rst
index b5ff9c5..456d01d 100644
--- a/docs/about/emulation.rst
+++ b/docs/about/emulation.rst
@@ -26,10 +26,6 @@ depending on the guest architecture.
- :ref:`Yes<AVR-System-emulator>`
- No
- 8 bit micro controller, often used in maker projects
- * - Cris
- - Yes
- - Yes
- - Embedded RISC chip developed by AXIS
* - Hexagon
- No
- Yes
@@ -42,7 +38,7 @@ depending on the guest architecture.
- :ref:`Yes<QEMU-PC-System-emulator>`
- Yes
- The ubiquitous desktop PC CPU architecture, 32 and 64 bit.
- * - Loongarch
+ * - LoongArch
- Yes
- Yes
- A MIPS-like 64bit RISC architecture developed in China
@@ -95,9 +91,6 @@ depending on the guest architecture.
- Yes
- A configurable 32 bit soft core now owned by Cadence
-A number of features are only available when running under
-emulation including :ref:`Record/Replay<replay>` and :ref:`TCG Plugins`.
-
.. _Semihosting:
Semihosting
@@ -178,7 +171,654 @@ for that architecture.
- Unified Hosting Interface (MD01069)
* - RISC-V
- System and User-mode
- - https://github.com/riscv/riscv-semihosting-spec/blob/main/riscv-semihosting-spec.adoc
+ - https://github.com/riscv-non-isa/riscv-semihosting/blob/main/riscv-semihosting.adoc
* - Xtensa
- System
- Tensilica ISS SIMCALL
+
+.. _tcg-plugins:
+
+TCG Plugins
+-----------
+
+QEMU TCG plugins provide a way for users to run experiments taking
+advantage of the total system control emulation can have over a guest.
+It provides a mechanism for plugins to subscribe to events during
+translation and execution and optionally callback into the plugin
+during these events. TCG plugins are unable to change the system state
+only monitor it passively. However they can do this down to an
+individual instruction granularity including potentially subscribing
+to all load and store operations.
+
+See the developer section of the manual for details about
+:ref:`writing plugins<TCG Plugins>`.
+
+Usage
+~~~~~
+
+Any QEMU binary with TCG support has plugins enabled by default.
+Earlier releases needed to be explicitly enabled with::
+
+ configure --enable-plugins
+
+Once built a program can be run with multiple plugins loaded each with
+their own arguments::
+
+ $QEMU $OTHER_QEMU_ARGS \
+ -plugin contrib/plugins/libhowvec.so,inline=on,count=hint \
+ -plugin contrib/plugins/libhotblocks.so
+
+Arguments are plugin specific and can be used to modify their
+behaviour. In this case the howvec plugin is being asked to use inline
+ops to count and break down the hint instructions by type.
+
+Linux user-mode emulation also evaluates the environment variable
+``QEMU_PLUGIN``::
+
+ QEMU_PLUGIN="file=contrib/plugins/libhowvec.so,inline=on,count=hint" $QEMU
+
+QEMU plugins avoid to write directly to stdin/stderr, and use the log provided
+by the API (see function ``qemu_plugin_outs``).
+To show output, you may use this additional parameter::
+
+ $QEMU $OTHER_QEMU_ARGS \
+ -d plugin \
+ -plugin contrib/plugins/libhowvec.so,inline=on,count=hint
+
+Example Plugins
+~~~~~~~~~~~~~~~
+
+There are a number of plugins included with QEMU and you are
+encouraged to contribute your own plugins plugins upstream. There is a
+``contrib/plugins`` directory where they can go. There are also some
+basic plugins that are used to test and exercise the API during the
+``make check-tcg`` target in ``tests/tcg/plugins`` that are never the
+less useful for basic analysis.
+
+Empty
+.....
+
+``tests/tcg/plugins/empty.c``
+
+Purely a test plugin for measuring the overhead of the plugins system
+itself. Does no instrumentation.
+
+Basic Blocks
+............
+
+``tests/tcg/plugins/bb.c``
+
+A very basic plugin which will measure execution in coarse terms as
+each basic block is executed. By default the results are shown once
+execution finishes::
+
+ $ qemu-aarch64 -plugin tests/plugin/libbb.so \
+ -d plugin ./tests/tcg/aarch64-linux-user/sha1
+ SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6
+ bb's: 2277338, insns: 158483046
+
+Behaviour can be tweaked with the following arguments:
+
+.. list-table:: Basic Block plugin arguments
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Option
+ - Description
+ * - inline=true|false
+ - Use faster inline addition of a single counter.
+ * - idle=true|false
+ - Dump the current execution stats whenever the guest vCPU idles
+
+Basic Block Vectors
+...................
+
+``contrib/plugins/bbv.c``
+
+The bbv plugin allows you to generate basic block vectors for use with the
+`SimPoint <https://cseweb.ucsd.edu/~calder/simpoint/>`__ analysis tool.
+
+.. list-table:: Basic block vectors arguments
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Option
+ - Description
+ * - interval=N
+ - The interval to generate a basic block vector specified by the number of
+ instructions (Default: N = 100000000)
+ * - outfile=PATH
+ - The path to output files.
+ It will be suffixed with ``.N.bb`` where ``N`` is a vCPU index.
+
+Example::
+
+ $ qemu-aarch64 \
+ -plugin contrib/plugins/libbbv.so,interval=100,outfile=sha1 \
+ tests/tcg/aarch64-linux-user/sha1
+ SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6
+ $ du sha1.0.bb
+ 23128 sha1.0.bb
+
+Instruction
+...........
+
+``tests/tcg/plugins/insn.c``
+
+This is a basic instruction level instrumentation which can count the
+number of instructions executed on each core/thread::
+
+ $ qemu-aarch64 -plugin tests/plugin/libinsn.so \
+ -d plugin ./tests/tcg/aarch64-linux-user/threadcount
+ Created 10 threads
+ Done
+ cpu 0 insns: 46765
+ cpu 1 insns: 3694
+ cpu 2 insns: 3694
+ cpu 3 insns: 2994
+ cpu 4 insns: 1497
+ cpu 5 insns: 1497
+ cpu 6 insns: 1497
+ cpu 7 insns: 1497
+ total insns: 63135
+
+Behaviour can be tweaked with the following arguments:
+
+.. list-table:: Instruction plugin arguments
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Option
+ - Description
+ * - inline=true|false
+ - Use faster inline addition of a single counter.
+ * - sizes=true|false
+ - Give a summary of the instruction sizes for the execution
+ * - match=<string>
+ - Only instrument instructions matching the string prefix
+
+The ``match`` option will show some basic stats including how many
+instructions have executed since the last execution. For
+example::
+
+ $ qemu-aarch64 -plugin tests/plugin/libinsn.so,match=bl \
+ -d plugin ./tests/tcg/aarch64-linux-user/sha512-vector
+ ...
+ 0x40069c, 'bl #0x4002b0', 10 hits, 1093 match hits, Ī”+1257 since last match, 98 avg insns/match
+ 0x4006ac, 'bl #0x403690', 10 hits, 1094 match hits, Ī”+47 since last match, 98 avg insns/match
+ 0x4037fc, 'bl #0x4002b0', 18 hits, 1095 match hits, Ī”+22 since last match, 98 avg insns/match
+ 0x400720, 'bl #0x403690', 10 hits, 1096 match hits, Ī”+58 since last match, 98 avg insns/match
+ 0x4037fc, 'bl #0x4002b0', 19 hits, 1097 match hits, Ī”+22 since last match, 98 avg insns/match
+ 0x400730, 'bl #0x403690', 10 hits, 1098 match hits, Ī”+33 since last match, 98 avg insns/match
+ 0x4037ac, 'bl #0x4002b0', 12 hits, 1099 match hits, Ī”+20 since last match, 98 avg insns/match
+ ...
+
+For more detailed execution tracing see the ``execlog`` plugin for
+other options.
+
+Memory
+......
+
+``tests/tcg/plugins/mem.c``
+
+Basic instruction level memory instrumentation::
+
+ $ qemu-aarch64 -plugin tests/plugin/libmem.so,inline=true \
+ -d plugin ./tests/tcg/aarch64-linux-user/sha1
+ SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6
+ inline mem accesses: 79525013
+
+Behaviour can be tweaked with the following arguments:
+
+.. list-table:: Memory plugin arguments
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Option
+ - Description
+ * - inline=true|false
+ - Use faster inline addition of a single counter
+ * - callback=true|false
+ - Use callbacks on each memory instrumentation.
+ * - hwaddr=true|false
+ - Count IO accesses (only for system emulation)
+
+System Calls
+............
+
+``tests/tcg/plugins/syscall.c``
+
+A basic syscall tracing plugin. This only works for user-mode. By
+default it will give a summary of syscall stats at the end of the
+run::
+
+ $ qemu-aarch64 -plugin tests/plugin/libsyscall \
+ -d plugin ./tests/tcg/aarch64-linux-user/threadcount
+ Created 10 threads
+ Done
+ syscall no. calls errors
+ 226 12 0
+ 99 11 11
+ 115 11 0
+ 222 11 0
+ 93 10 0
+ 220 10 0
+ 233 10 0
+ 215 8 0
+ 214 4 0
+ 134 2 0
+ 64 2 0
+ 96 1 0
+ 94 1 0
+ 80 1 0
+ 261 1 0
+ 78 1 0
+ 160 1 0
+ 135 1 0
+
+Behaviour can be tweaked with the following arguments:
+
+.. list-table:: Syscall plugin arguments
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Option
+ - Description
+ * - print=true|false
+ - Print the number of times each syscall is called
+ * - log_writes=true|false
+ - Log the buffer of each write syscall in hexdump format
+
+Test inline operations
+......................
+
+``tests/plugins/inline.c``
+
+This plugin is used for testing all inline operations, conditional callbacks and
+scoreboard. It prints a per-cpu summary of all events.
+
+
+Hot Blocks
+..........
+
+``contrib/plugins/hotblocks.c``
+
+The hotblocks plugin allows you to examine the where hot paths of
+execution are in your program. Once the program has finished you will
+get a sorted list of blocks reporting the starting PC, translation
+count, number of instructions and execution count. This will work best
+with linux-user execution as system emulation tends to generate
+re-translations as blocks from different programs get swapped in and
+out of system memory.
+
+Example::
+
+ $ qemu-aarch64 \
+ -plugin contrib/plugins/libhotblocks.so -d plugin \
+ ./tests/tcg/aarch64-linux-user/sha1
+ SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6
+ collected 903 entries in the hash table
+ pc, tcount, icount, ecount
+ 0x0000000041ed10, 1, 5, 66087
+ 0x000000004002b0, 1, 4, 66087
+ ...
+
+
+Hot Pages
+.........
+
+``contrib/plugins/hotpages.c``
+
+Similar to hotblocks but this time tracks memory accesses::
+
+ $ qemu-aarch64 \
+ -plugin contrib/plugins/libhotpages.so -d plugin \
+ ./tests/tcg/aarch64-linux-user/sha1
+ SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6
+ Addr, RCPUs, Reads, WCPUs, Writes
+ 0x000055007fe000, 0x0001, 31747952, 0x0001, 8835161
+ 0x000055007ff000, 0x0001, 29001054, 0x0001, 8780625
+ 0x00005500800000, 0x0001, 687465, 0x0001, 335857
+ 0x0000000048b000, 0x0001, 130594, 0x0001, 355
+ 0x0000000048a000, 0x0001, 1826, 0x0001, 11
+
+The hotpages plugin can be configured using the following arguments:
+
+.. list-table:: Hot pages arguments
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Option
+ - Description
+ * - sortby=reads|writes|address
+ - Log the data sorted by either the number of reads, the number of writes, or
+ memory address. (Default: entries are sorted by the sum of reads and writes)
+ * - io=on
+ - Track IO addresses. Only relevant to full system emulation. (Default: off)
+ * - pagesize=N
+ - The page size used. (Default: N = 4096)
+
+Instruction Distribution
+........................
+
+``contrib/plugins/howvec.c``
+
+This is an instruction classifier so can be used to count different
+types of instructions. It has a number of options to refine which get
+counted. You can give a value to the ``count`` argument for a class of
+instructions to break it down fully, so for example to see all the system
+registers accesses::
+
+ $ qemu-system-aarch64 $(QEMU_ARGS) \
+ -append "root=/dev/sda2 systemd.unit=benchmark.service" \
+ -smp 4 -plugin ./contrib/plugins/libhowvec.so,count=sreg -d plugin
+
+which will lead to a sorted list after the class breakdown::
+
+ Instruction Classes:
+ Class: UDEF not counted
+ Class: SVE (68 hits)
+ Class: PCrel addr (47789483 hits)
+ Class: Add/Sub (imm) (192817388 hits)
+ Class: Logical (imm) (93852565 hits)
+ Class: Move Wide (imm) (76398116 hits)
+ Class: Bitfield (44706084 hits)
+ Class: Extract (5499257 hits)
+ Class: Cond Branch (imm) (147202932 hits)
+ Class: Exception Gen (193581 hits)
+ Class: NOP not counted
+ Class: Hints (6652291 hits)
+ Class: Barriers (8001661 hits)
+ Class: PSTATE (1801695 hits)
+ Class: System Insn (6385349 hits)
+ Class: System Reg counted individually
+ Class: Branch (reg) (69497127 hits)
+ Class: Branch (imm) (84393665 hits)
+ Class: Cmp & Branch (110929659 hits)
+ Class: Tst & Branch (44681442 hits)
+ Class: AdvSimd ldstmult (736 hits)
+ Class: ldst excl (9098783 hits)
+ Class: Load Reg (lit) (87189424 hits)
+ Class: ldst noalloc pair (3264433 hits)
+ Class: ldst pair (412526434 hits)
+ Class: ldst reg (imm) (314734576 hits)
+ Class: Loads & Stores (2117774 hits)
+ Class: Data Proc Reg (223519077 hits)
+ Class: Scalar FP (31657954 hits)
+ Individual Instructions:
+ Instr: mrs x0, sp_el0 (2682661 hits) (op=0xd5384100/ System Reg)
+ Instr: mrs x1, tpidr_el2 (1789339 hits) (op=0xd53cd041/ System Reg)
+ Instr: mrs x2, tpidr_el2 (1513494 hits) (op=0xd53cd042/ System Reg)
+ Instr: mrs x0, tpidr_el2 (1490823 hits) (op=0xd53cd040/ System Reg)
+ Instr: mrs x1, sp_el0 (933793 hits) (op=0xd5384101/ System Reg)
+ Instr: mrs x2, sp_el0 (699516 hits) (op=0xd5384102/ System Reg)
+ Instr: mrs x4, tpidr_el2 (528437 hits) (op=0xd53cd044/ System Reg)
+ Instr: mrs x30, ttbr1_el1 (480776 hits) (op=0xd538203e/ System Reg)
+ Instr: msr ttbr1_el1, x30 (480713 hits) (op=0xd518203e/ System Reg)
+ Instr: msr vbar_el1, x30 (480671 hits) (op=0xd518c01e/ System Reg)
+ ...
+
+To find the argument shorthand for the class you need to examine the
+source code of the plugin at the moment, specifically the ``*opt``
+argument in the InsnClassExecCount tables.
+
+Lockstep Execution
+..................
+
+``contrib/plugins/lockstep.c``
+
+This is a debugging tool for developers who want to find out when and
+where execution diverges after a subtle change to TCG code generation.
+It is not an exact science and results are likely to be mixed once
+asynchronous events are introduced. While the use of -icount can
+introduce determinism to the execution flow it doesn't always follow
+the translation sequence will be exactly the same. Typically this is
+caused by a timer firing to service the GUI causing a block to end
+early. However in some cases it has proved to be useful in pointing
+people at roughly where execution diverges. The only argument you need
+for the plugin is a path for the socket the two instances will
+communicate over::
+
+
+ $ qemu-system-sparc -monitor none -parallel none \
+ -net none -M SS-20 -m 256 -kernel day11/zImage.elf \
+ -plugin ./contrib/plugins/liblockstep.so,sockpath=lockstep-sparc.sock \
+ -d plugin,nochain
+
+which will eventually report::
+
+ qemu-system-sparc: warning: nic lance.0 has no peer
+ @ 0x000000ffd06678 vs 0x000000ffd001e0 (2/1 since last)
+ @ 0x000000ffd07d9c vs 0x000000ffd06678 (3/1 since last)
+ Ī” insn_count @ 0x000000ffd07d9c (809900609) vs 0x000000ffd06678 (809900612)
+ previously @ 0x000000ffd06678/10 (809900609 insns)
+ previously @ 0x000000ffd001e0/4 (809900599 insns)
+ previously @ 0x000000ffd080ac/2 (809900595 insns)
+ previously @ 0x000000ffd08098/5 (809900593 insns)
+ previously @ 0x000000ffd080c0/1 (809900588 insns)
+
+
+Hardware Profile
+................
+
+``contrib/plugins/hwprofile.c``
+
+The hwprofile tool can only be used with system emulation and allows
+the user to see what hardware is accessed how often. It has a number of options:
+
+.. list-table:: Hardware Profile arguments
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Option
+ - Description
+ * - track=[read|write]
+ - By default the plugin tracks both reads and writes. You can use
+ this option to limit the tracking to just one class of accesses.
+ * - source
+ - Will include a detailed break down of what the guest PC that made the
+ access was. Not compatible with the pattern option. Example output::
+
+ cirrus-low-memory @ 0xfffffd00000a0000
+ pc:fffffc0000005cdc, 1, 256
+ pc:fffffc0000005ce8, 1, 256
+ pc:fffffc0000005cec, 1, 256
+
+ * - pattern
+ - Instead break down the accesses based on the offset into the HW
+ region. This can be useful for seeing the most used registers of
+ a device. Example output::
+
+ pci0-conf @ 0xfffffd01fe000000
+ off:00000004, 1, 1
+ off:00000010, 1, 3
+ off:00000014, 1, 3
+ off:00000018, 1, 2
+ off:0000001c, 1, 2
+ off:00000020, 1, 2
+ ...
+
+
+Execution Log
+.............
+
+``contrib/plugins/execlog.c``
+
+The execlog tool traces executed instructions with memory access. It can be used
+for debugging and security analysis purposes.
+Please be aware that this will generate a lot of output.
+
+The plugin needs default argument::
+
+ $ qemu-system-arm $(QEMU_ARGS) \
+ -plugin ./contrib/plugins/libexeclog.so -d plugin
+
+which will output an execution trace following this structure::
+
+ # vCPU, vAddr, opcode, disassembly[, load/store, memory addr, device]...
+ 0, 0xa12, 0xf8012400, "movs r4, #0"
+ 0, 0xa14, 0xf87f42b4, "cmp r4, r6"
+ 0, 0xa16, 0xd206, "bhs #0xa26"
+ 0, 0xa18, 0xfff94803, "ldr r0, [pc, #0xc]", load, 0x00010a28, RAM
+ 0, 0xa1a, 0xf989f000, "bl #0xd30"
+ 0, 0xd30, 0xfff9b510, "push {r4, lr}", store, 0x20003ee0, RAM, store, 0x20003ee4, RAM
+ 0, 0xd32, 0xf9893014, "adds r0, #0x14"
+ 0, 0xd34, 0xf9c8f000, "bl #0x10c8"
+ 0, 0x10c8, 0xfff96c43, "ldr r3, [r0, #0x44]", load, 0x200000e4, RAM
+
+Please note that you need to configure QEMU with Capstone support to get disassembly.
+
+The output can be filtered to only track certain instructions or
+addresses using the ``ifilter`` or ``afilter`` options. You can stack the
+arguments if required::
+
+ $ qemu-system-arm $(QEMU_ARGS) \
+ -plugin ./contrib/plugins/libexeclog.so,ifilter=st1w,afilter=0x40001808 -d plugin
+
+This plugin can also dump registers when they change value. Specify the name of the
+registers with multiple ``reg`` options. You can also use glob style matching if you wish::
+
+ $ qemu-system-arm $(QEMU_ARGS) \
+ -plugin ./contrib/plugins/libexeclog.so,reg=\*_el2,reg=sp -d plugin
+
+Be aware that each additional register to check will slow down
+execution quite considerably. You can optimise the number of register
+checks done by using the rdisas option. This will only instrument
+instructions that mention the registers in question in disassembly.
+This is not foolproof as some instructions implicitly change
+instructions. You can use the ifilter to catch these cases::
+
+ $ qemu-system-arm $(QEMU_ARGS) \
+ -plugin ./contrib/plugins/libexeclog.so,ifilter=msr,ifilter=blr,reg=x30,reg=\*_el1,rdisas=on
+
+Cache Modelling
+...............
+
+``contrib/plugins/cache.c``
+
+Cache modelling plugin that measures the performance of a given L1 cache
+configuration, and optionally a unified L2 per-core cache when a given working
+set is run::
+
+ $ qemu-x86_64 -plugin ./contrib/plugins/libcache.so \
+ -d plugin -D cache.log ./tests/tcg/x86_64-linux-user/float_convs
+
+will report the following::
+
+ core #, data accesses, data misses, dmiss rate, insn accesses, insn misses, imiss rate
+ 0 996695 508 0.0510% 2642799 18617 0.7044%
+
+ address, data misses, instruction
+ 0x424f1e (_int_malloc), 109, movq %rax, 8(%rcx)
+ 0x41f395 (_IO_default_xsputn), 49, movb %dl, (%rdi, %rax)
+ 0x42584d (ptmalloc_init.part.0), 33, movaps %xmm0, (%rax)
+ 0x454d48 (__tunables_init), 20, cmpb $0, (%r8)
+ ...
+
+ address, fetch misses, instruction
+ 0x4160a0 (__vfprintf_internal), 744, movl $1, %ebx
+ 0x41f0a0 (_IO_setb), 744, endbr64
+ 0x415882 (__vfprintf_internal), 744, movq %r12, %rdi
+ 0x4268a0 (__malloc), 696, andq $0xfffffffffffffff0, %rax
+ ...
+
+The plugin has a number of arguments, all of them are optional:
+
+.. list-table:: Cache modelling arguments
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Option
+ - Description
+ * - limit=N
+ - Print top N icache and dcache thrashing instructions along with
+ their address, number of misses, and its disassembly. (default: 32)
+ * - icachesize=N
+ iblksize=B
+ iassoc=A
+ - Instruction cache configuration arguments. They specify the
+ cache size, block size, and associativity of the instruction
+ cache, respectively. (default: N = 16384, B = 64, A = 8)
+ * - dcachesize=N
+ - Data cache size (default: 16834)
+ * - dblksize=B
+ - Data cache block size (default: 64)
+ * - dassoc=A
+ - Data cache associativity (default: 8)
+ * - evict=POLICY
+ - Sets the eviction policy to POLICY. Available policies are:
+ ``lru``, ``fifo``, and ``rand``. The plugin will use
+ the specified policy for both instruction and data caches.
+ (default: POLICY = ``lru``)
+ * - cores=N
+ - Sets the number of cores for which we maintain separate icache
+ and dcache. (default: for linux-user, N = 1, for full system
+ emulation: N = cores available to guest)
+ * - l2=on
+ - Simulates a unified L2 cache (stores blocks for both
+ instructions and data) using the default L2 configuration (cache
+ size = 2MB, associativity = 16-way, block size = 64B).
+ * - l2cachesize=N
+ - L2 cache size (default: 2097152 (2MB)), implies ``l2=on``
+ * - l2blksize=B
+ - L2 cache block size (default: 64), implies ``l2=on``
+ * - l2assoc=A
+ - L2 cache associativity (default: 16), implies ``l2=on``
+
+Stop on Trigger
+...............
+
+``contrib/plugins/stoptrigger.c``
+
+The stoptrigger plugin allows to setup triggers to stop emulation.
+It can be used for research purposes to launch some code and precisely stop it
+and understand where its execution flow went.
+
+Two types of triggers can be configured: a count of instructions to stop at,
+or an address to stop at. Multiple triggers can be set at once.
+
+By default, QEMU will exit with return code 0. A custom return code can be
+configured for each trigger using ``:CODE`` syntax.
+
+For example, to stop at the 20-th instruction with return code 41, at address
+0xd4 with return code 0 or at address 0xd8 with return code 42::
+
+ $ qemu-system-aarch64 $(QEMU_ARGS) \
+ -plugin ./contrib/plugins/libstoptrigger.so,icount=20:41,addr=0xd4,addr=0xd8:42 -d plugin
+
+The plugin will log the reason of exit, for example::
+
+ 0xd4 reached, exiting
+
+Limit instructions per second
+.............................
+
+This plugin can limit the number of Instructions Per Second that are executed::
+
+ # get number of instructions
+ $ num_insn=$(./build/qemu-x86_64 -plugin ./build/tests/plugin/libinsn.so -d plugin /bin/true |& grep total | sed -e 's/.*: //')
+ # limit speed to execute in 10 seconds
+ $ time ./build/qemu-x86_64 -plugin ./build/contrib/plugins/libips.so,ips=$(($num_insn/10)) /bin/true
+ real 10.000s
+
+
+.. list-table:: IPS arguments
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Option
+ - Description
+ * - ips=N
+ - Maximum number of instructions per cpu that can be executed in one second.
+ The plugin will sleep when the given number of instructions is reached.
+ * - ipq=N
+ - Instructions per quantum. How many instructions before we re-calculate time.
+ The lower the number the more accurate time will be, but the less efficient the plugin.
+ Defaults to ips/10
+
+Other emulation features
+------------------------
+
+When running system emulation you can also enable deterministic
+execution which allows for repeatable record/replay debugging. See
+:ref:`Record/Replay<replay>` for more details.
diff --git a/docs/about/removed-features.rst b/docs/about/removed-features.rst
index fc7b28e..d7c2113 100644
--- a/docs/about/removed-features.rst
+++ b/docs/about/removed-features.rst
@@ -162,6 +162,12 @@ specified with ``-mem-path`` can actually provide the guest RAM configured with
The ``name`` parameter of the ``-net`` option was a synonym
for the ``id`` parameter, which should now be used instead.
+RISC-V firmware not booted by default (removed in 5.1)
+''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+QEMU 5.1 changes the default behaviour from ``-bios none`` to ``-bios default``
+for the RISC-V ``virt`` machine and ``sifive_u`` machine.
+
``-numa node,mem=...`` (removed in 5.1)
'''''''''''''''''''''''''''''''''''''''
@@ -324,12 +330,6 @@ devices. Drives the board doesn't pick up can no longer be used with
This option was undocumented and not used in the field.
Use ``-device usb-ccid`` instead.
-RISC-V firmware not booted by default (removed in 5.1)
-''''''''''''''''''''''''''''''''''''''''''''''''''''''
-
-QEMU 5.1 changes the default behaviour from ``-bios none`` to ``-bios default``
-for the RISC-V ``virt`` machine and ``sifive_u`` machine.
-
``-no-quit`` (removed in 7.0)
'''''''''''''''''''''''''''''
@@ -355,13 +355,13 @@ The ``-writeconfig`` option was not able to serialize the entire contents
of the QEMU command line. It is thus considered a failed experiment
and removed without a replacement.
-``loaded`` property of ``secret`` and ``secret_keyring`` objects (removed in 7.1)
-'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+``loaded`` property of secret and TLS credential objects (removed in 9.2)
+'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
The ``loaded=on`` option in the command line or QMP ``object-add`` either had
no effect (if ``loaded`` was the last option) or caused options to be
effectively ignored as if they were not given. The property is therefore
-useless and should simply be removed.
+useless and has been removed.
``opened`` property of ``rng-*`` objects (removed in 7.1)
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
@@ -403,13 +403,13 @@ Sound card devices should be created using ``-device`` or ``-audio``.
The exception is ``pcspk`` which can be activated using ``-machine
pcspk-audiodev=<name>``.
-``-watchdog`` (since 7.2)
-'''''''''''''''''''''''''
+``-watchdog`` (removed in 7.2)
+''''''''''''''''''''''''''''''
Use ``-device`` instead.
-Hexadecimal sizes with scaling multipliers (since 8.0)
-''''''''''''''''''''''''''''''''''''''''''''''''''''''
+Hexadecimal sizes with scaling multipliers (removed in 8.0)
+'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
Input parameters that take a size value should only use a size suffix
(such as 'k' or 'M') when the base is written in decimal, and not when
@@ -510,13 +510,56 @@ than zero.
Removed along with the ``compression`` migration capability.
-``-device virtio-blk,scsi=on|off`` (since 9.1)
-''''''''''''''''''''''''''''''''''''''''''''''
+``-device virtio-blk,scsi=on|off`` (removed in 9.1)
+'''''''''''''''''''''''''''''''''''''''''''''''''''
The virtio-blk SCSI passthrough feature is a legacy VIRTIO feature. VIRTIO 1.0
and later do not support it because the virtio-scsi device was introduced for
full SCSI support. Use virtio-scsi instead when SCSI passthrough is required.
+``-fsdev proxy`` and ``-virtfs proxy`` (removed in 9.2)
+'''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+The 9p ``proxy`` filesystem backend driver was originally developed to
+enhance security by dispatching low level filesystem operations from 9p
+server (QEMU process) over to a separate process (the virtfs-proxy-helper
+binary). However the proxy backend was much slower than the local backend,
+didn't see any development in years, and showed to be less secure,
+especially due to the fact that its helper daemon must be run as root.
+
+Use ``local``, possibly mapping permissions et al by using its 'mapped'
+security model option, or switch to ``virtiofs``. The virtiofs daemon
+``virtiofsd`` uses vhost to eliminate the high latency costs of the 9p
+``proxy`` backend.
+
+``-portrait`` and ``-rotate`` (removed in 9.2)
+''''''''''''''''''''''''''''''''''''''''''''''
+
+The ``-portrait`` and ``-rotate`` options were documented as only
+working with the PXA LCD device, and all the machine types using
+that display device were removed in 9.2, so these options also
+have been dropped.
+
+These options were intended to simulate a mobile device being
+rotated by the user, and had three effects:
+
+* the display output was rotated by 90, 180 or 270 degrees
+* the mouse/trackpad input was rotated the opposite way
+* the machine model would signal to the guest about its
+ orientation
+
+Of these three things, the input-rotation was coded without being
+restricted to boards which supported the full set of device-rotation
+handling, so in theory the options were usable on other machine models
+to produce an odd effect (rotating input but not display output). But
+this was never intended or documented behaviour, so we have dropped
+the options along with the machine models they were intended for.
+
+``-runas`` (removed in 10.0)
+''''''''''''''''''''''''''''
+
+Use ``-run-with user=..`` instead.
+
User-mode emulator command line arguments
-----------------------------------------
@@ -679,6 +722,15 @@ Use ``multifd-channels`` instead.
Use ``multifd-compression`` instead.
+Incorrectly typed ``device_add`` arguments (since 9.2)
+''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+Due to shortcomings in the internal implementation of ``device_add``,
+QEMU used to incorrectly accept certain invalid arguments. Any object
+or list arguments were silently ignored. Other argument types were not
+checked, but an implicit conversion happened, so that e.g. string
+values could be assigned to integer device properties or vice versa.
+
QEMU Machine Protocol (QMP) events
----------------------------------
@@ -815,6 +867,15 @@ QEMU. Since all recent x86 hardware from the past >10 years is
capable of the 64-bit x86 extensions, a corresponding 64-bit OS should
be used instead.
+32-bit hosts for 64-bit guests (removed in 10.0)
+''''''''''''''''''''''''''''''''''''''''''''''''
+
+In general, 32-bit hosts cannot support the memory space or atomicity
+requirements of 64-bit guests. Prior to 10.0, QEMU attempted to
+work around the atomicity issues in system mode by running all vCPUs
+in a single thread context; in user mode atomicity was simply broken.
+From 10.0, QEMU has disabled configuration of 64-bit guests on 32-bit hosts.
+
Guest Emulator ISAs
-------------------
@@ -889,6 +950,21 @@ Nios II CPU (removed in 9.1)
QEMU Nios II architecture was orphan; Intel has EOL'ed the Nios II
processor IP (see `Intel discontinuance notification`_).
+CRIS CPU architecture (removed in 9.2)
+''''''''''''''''''''''''''''''''''''''
+
+The CRIS architecture was pulled from Linux in 4.17 and the compiler
+was no longer packaged in any distro making it harder to run the
+``check-tcg`` tests.
+
+RISC-V 'any' CPU type ``-cpu any`` (removed in 9.2)
+'''''''''''''''''''''''''''''''''''''''''''''''''''
+
+The 'any' CPU type was introduced back in 2018 and was around since the
+initial RISC-V QEMU port. Its usage was always been unclear: users don't know
+what to expect from a CPU called 'any', and in fact the CPU does not do anything
+special that isn't already done by the default CPUs rv32/rv64.
+
System accelerators
-------------------
@@ -899,21 +975,28 @@ Userspace local APIC with KVM (x86, removed in 8.0)
a local APIC. The ``split`` setting is supported, as is using ``-M
kernel-irqchip=off`` when the CPU does not have a local APIC.
-HAXM (``-accel hax``) (removed in 8.2)
-''''''''''''''''''''''''''''''''''''''
-
-The HAXM project has been retired (see https://github.com/intel/haxm#status).
-Use "whpx" (on Windows) or "hvf" (on macOS) instead.
-
MIPS "Trap-and-Emulate" KVM support (removed in 8.0)
''''''''''''''''''''''''''''''''''''''''''''''''''''
The MIPS "Trap-and-Emulate" KVM host and guest support was removed
from Linux in 2021, and is not supported anymore by QEMU either.
+HAXM (``-accel hax``) (removed in 8.2)
+''''''''''''''''''''''''''''''''''''''
+
+The HAXM project has been retired (see https://github.com/intel/haxm#status).
+Use "whpx" (on Windows) or "hvf" (on macOS) instead.
+
System emulator machines
------------------------
+Versioned machine types (aarch64, arm, i386, m68k, ppc64, s390x, x86_64)
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+In accordance with our versioned machine type deprecation policy, all machine
+types with version |VER_MACHINE_DELETION_VERSION|, or older, have been
+removed.
+
``s390-virtio`` (removed in 2.6)
''''''''''''''''''''''''''''''''
@@ -948,12 +1031,6 @@ mips ``fulong2e`` machine alias (removed in 6.0)
This machine has been renamed ``fuloong2e``.
-``pc-0.10`` up to ``pc-i440fx-2.3`` (removed in 4.0 up to 9.0)
-''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
-
-These machine types were very old and likely could not be used for live
-migration from old QEMU versions anymore. Use a newer machine type instead.
-
Raspberry Pi ``raspi2`` and ``raspi3`` machines (removed in 6.2)
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
@@ -978,6 +1055,51 @@ Nios II ``10m50-ghrd`` and ``nios2-generic-nommu`` machines (removed in 9.1)
The Nios II architecture was orphan.
+``shix`` (removed in 9.2)
+'''''''''''''''''''''''''
+
+The machine was unmaintained.
+
+Arm machines ``akita``, ``borzoi``, ``cheetah``, ``connex``, ``mainstone``, ``n800``, ``n810``, ``spitz``, ``terrier``, ``tosa``, ``verdex``, ``z2`` (removed in 9.2)
+'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+QEMU included models of some machine types where the QEMU code that
+emulates their SoCs was very old and unmaintained. This code was
+blocking our ability to move forward with various changes across
+the codebase, and over many years nobody has been interested in
+trying to modernise it. We don't expect any of these machines to have
+a large number of users, because they're all modelling hardware that
+has now passed away into history. We are therefore dropping support
+for all machine types using the PXA2xx and OMAP2 SoCs. We are also
+dropping the ``cheetah`` OMAP1 board, because we don't have any
+test images for it and don't know of anybody who does.
+
+Aspeed ``tacoma-bmc`` machine (removed in 10.0)
+'''''''''''''''''''''''''''''''''''''''''''''''
+
+The ``tacoma-bmc`` machine was removed because it didn't bring much
+compared to the ``rainier-bmc`` machine. Also, the ``tacoma-bmc`` was
+a board used for bring up of the AST2600 SoC that never left the
+labs. It can be easily replaced by the ``rainier-bmc`` machine, which
+was the actual final product, or by the ``ast2600-evb`` with some
+tweaks.
+
+ppc ``ref405ep`` machine (removed in 10.0)
+''''''''''''''''''''''''''''''''''''''''''
+
+This machine was removed because PPC 405 CPU have no known users,
+firmware images are not available, OpenWRT dropped support in 2019,
+U-Boot in 2017, and Linux in 2024.
+
+Big-Endian variants of ``petalogix-ml605`` and ``xlnx-zynqmp-pmu`` machines (removed in 10.1)
+'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+Both the MicroBlaze ``petalogix-ml605`` and ``xlnx-zynqmp-pmu`` machines
+were added for little endian CPUs. Big endian support was never tested
+and likely never worked. Starting with QEMU v10.1, the machines are now
+only available as little-endian machines.
+
+
linux-user mode CPUs
--------------------
@@ -1006,8 +1128,8 @@ processor IP (see `Intel discontinuance notification`_).
TCG introspection features
--------------------------
-TCG trace-events (since 6.2)
-''''''''''''''''''''''''''''
+TCG trace-events (removed in 7.0)
+'''''''''''''''''''''''''''''''''
The ability to add new TCG trace points had bit rotted and as the
feature can be replicated with TCG plugins it was removed. If
diff --git a/docs/conf.py b/docs/conf.py
index 876f676..f892a6e 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -60,7 +60,14 @@ needs_sphinx = '3.4.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['kerneldoc', 'qmp_lexer', 'hxtool', 'depfile', 'qapidoc']
+extensions = [
+ 'depfile',
+ 'hxtool',
+ 'kerneldoc',
+ 'qapi_domain',
+ 'qapidoc',
+ 'qmp_lexer',
+]
if sphinx.version_info[:3] > (4, 0, 0):
tags.add('sphinx4')
@@ -87,7 +94,7 @@ default_role = 'any'
# General information about the project.
project = u'QEMU'
-copyright = u'2024, The QEMU Project Developers'
+copyright = u'2025, The QEMU Project Developers'
author = u'The QEMU Project Developers'
# The version info for the project you're documenting, acts as replacement for
@@ -110,6 +117,32 @@ finally:
else:
version = release = "unknown version"
+bits = version.split(".")
+
+major = int(bits[0])
+minor = int(bits[1])
+micro = int(bits[2])
+
+# Check for a dev snapshot, so we can adjust to next
+# predicted release version.
+#
+# This assumes we do 3 releases per year, so must bump
+# major if minor == 2
+if micro >= 50:
+ micro = 0
+ if minor == 2:
+ major += 1
+ minor = 0
+ else:
+ minor += 1
+
+# These thresholds must match the constants
+# MACHINE_VER_DELETION_MAJOR & MACHINE_VER_DEPRECATION_MAJOR
+# defined in include/hw/boards.h and the introductory text in
+# docs/about/deprecated.rst
+ver_machine_deprecation_version = "%d.%d.0" % (major - 3, minor)
+ver_machine_deletion_version = "%d.%d.0" % (major - 6, minor)
+
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
@@ -138,7 +171,18 @@ suppress_warnings = ["ref.option"]
# environment variable is not set is for the benefit of readthedocs
# style document building; our Makefile always sets the variable.
confdir = os.getenv('CONFDIR', "/etc/qemu")
-rst_epilog = ".. |CONFDIR| replace:: ``" + confdir + "``\n"
+
+vars = {
+ "CONFDIR": confdir,
+ "VER_MACHINE_DEPRECATION_VERSION": ver_machine_deprecation_version,
+ "VER_MACHINE_DELETION_VERSION": ver_machine_deletion_version,
+}
+
+rst_epilog = "".join([
+ ".. |" + key + "| replace:: ``" + vars[key] + "``\n"
+ for key in vars.keys()
+])
+
# We slurp in the defs.rst.inc and literally include it into rst_epilog,
# because Sphinx's include:: directive doesn't work with absolute paths
# and there isn't any one single relative path that will work for all
@@ -146,6 +190,22 @@ rst_epilog = ".. |CONFDIR| replace:: ``" + confdir + "``\n"
with open(os.path.join(qemu_docdir, 'defs.rst.inc')) as f:
rst_epilog += f.read()
+
+# Normally, the QAPI domain is picky about what field lists you use to
+# describe a QAPI entity. If you'd like to use arbitrary additional
+# fields in source documentation, add them here.
+qapi_allowed_fields = {
+ "see also",
+}
+
+# Due to a limitation in Sphinx, we need to know which indices to
+# generate in advance. Adding a namespace here allows that generation.
+qapi_namespaces = {
+ "QGA",
+ "QMP",
+ "QSD",
+}
+
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
@@ -186,7 +246,7 @@ html_js_files = [
]
html_context = {
- "display_gitlab": True,
+ "source_url_prefix": "https://gitlab.com/qemu-project/qemu/-/blob/master/docs/",
"gitlab_user": "qemu-project",
"gitlab_repo": "qemu",
"gitlab_version": "master",
@@ -275,9 +335,6 @@ man_pages = [
('tools/qemu-trace-stap', 'qemu-trace-stap',
'QEMU SystemTap trace tool',
[], 1),
- ('tools/virtfs-proxy-helper', 'virtfs-proxy-helper',
- 'QEMU 9p virtfs proxy filesystem helper',
- ['M. Mohan Kumar'], 1),
]
man_make_section_directory = False
diff --git a/docs/devel/acpi-bits.rst b/docs/devel/acpi-bits.rst
deleted file mode 100644
index 1ec394f..0000000
--- a/docs/devel/acpi-bits.rst
+++ /dev/null
@@ -1,167 +0,0 @@
-=============================================================================
-ACPI/SMBIOS avocado tests using biosbits
-=============================================================================
-************
-Introduction
-************
-Biosbits is a software written by Josh Triplett that can be downloaded
-from https://biosbits.org/. The github codebase can be found
-`here <https://github.com/biosbits/bits/tree/master>`__. It is a software that
-executes the bios components such as acpi and smbios tables directly through
-acpica bios interpreter (a freely available C based library written by Intel,
-downloadable from https://acpica.org/ and is included with biosbits) without an
-operating system getting involved in between. Bios-bits has python integration
-with grub so actual routines that executes bios components can be written in
-python instead of bash-ish (grub's native scripting language).
-There are several advantages to directly testing the bios in a real physical
-machine or in a VM as opposed to indirectly discovering bios issues through the
-operating system (the OS). Operating systems tend to bypass bios problems and
-hide them from the end user. We have more control of what we wanted to test and
-how by being as close to the bios on a running system as possible without a
-complicated software component such as an operating system coming in between.
-Another issue is that we cannot exercise bios components such as ACPI and
-SMBIOS without being in the highest hardware privilege level, ring 0 for
-example in case of x86. Since the OS executes from ring 0 whereas normal user
-land software resides in unprivileged ring 3, operating system must be modified
-in order to write our test routines that exercise and test the bios. This is
-not possible in all cases. Lastly, test frameworks and routines are preferably
-written using a high level scripting language such as python. OSes and
-OS modules are generally written using low level languages such as C and
-low level assembly machine language. Writing test routines in a low level
-language makes things more cumbersome. These and other reasons makes using
-bios-bits very attractive for testing bioses. More details on the inspiration
-for developing biosbits and its real life uses can be found in [#a]_ and [#b]_.
-
-For QEMU, we maintain a fork of bios bits in gitlab along with all the
-dependent submodules `here <https://gitlab.com/qemu-project/biosbits-bits>`__.
-This fork contains numerous fixes, a newer acpica and changes specific to
-running this avocado QEMU tests using bits. The author of this document
-is the sole maintainer of the QEMU fork of bios bits repository. For more
-information, please see author's `FOSDEM talk on this bios-bits based test
-framework <https://fosdem.org/2024/schedule/event/fosdem-2024-2262-exercising-qemu-generated-acpi-smbios-tables-using-biosbits-from-within-a-guest-vm-/>`__.
-
-*********************************
-Description of the test framework
-*********************************
-
-Under the directory ``tests/avocado/``, ``acpi-bits.py`` is a QEMU avocado
-test that drives all this.
-
-A brief description of the various test files follows.
-
-Under ``tests/avocado/`` as the root we have:
-
-::
-
- ā”œā”€ā”€ acpi-bits
- │ ā”œā”€ā”€ bits-config
- │ │ └── bits-cfg.txt
- │ ā”œā”€ā”€ bits-tests
- │ ā”œā”€ā”€ smbios.py2
- │ ā”œā”€ā”€ testacpi.py2
- │ └── testcpuid.py2
- ā”œā”€ā”€ acpi-bits.py
-
-* ``tests/avocado``:
-
- ``acpi-bits.py``:
- This is the main python avocado test script that generates a
- biosbits iso. It then spawns a QEMU VM with it, collects the log and reports
- test failures. This is the script one would be interested in if they wanted
- to add or change some component of the log parsing, add a new command line
- to alter how QEMU is spawned etc. Test writers typically would not need to
- modify this script unless they wanted to enhance or change the log parsing
- for their tests. In order to enable debugging, you can set **V=1**
- environment variable. This enables verbose mode for the test and also dumps
- the entire log from bios bits and more information in case failure happens.
- You can also set **BITS_DEBUG=1** to turn on debug mode. It will enable
- verbose logs and also retain the temporary work directory the test used for
- you to inspect and run the specific commands manually.
-
- In order to run this test, please perform the following steps from the QEMU
- build directory:
- ::
-
- $ make check-venv (needed only the first time to create the venv)
- $ ./pyvenv/bin/avocado run -t acpi tests/avocado
-
- The above will run all acpi avocado tests including this one.
- In order to run the individual tests, perform the following:
- ::
-
- $ ./pyvenv/bin/avocado run tests/avocado/acpi-bits.py --tap -
-
- The above will produce output in tap format. You can omit "--tap -" in the
- end and it will produce output like the following:
- ::
-
- $ ./pyvenv/bin/avocado run tests/avocado/acpi-bits.py
- Fetching asset from tests/avocado/acpi-bits.py:AcpiBitsTest.test_acpi_smbios_bits
- JOB ID : eab225724da7b64c012c65705dc2fa14ab1defef
- JOB LOG : /home/anisinha/avocado/job-results/job-2022-10-10T17.58-eab2257/job.log
- (1/1) tests/avocado/acpi-bits.py:AcpiBitsTest.test_acpi_smbios_bits: PASS (33.09 s)
- RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0
- JOB TIME : 39.22 s
-
- You can inspect the log file for more information about the run or in order
- to diagnoze issues. If you pass V=1 in the environment, more diagnostic logs
- would be found in the test log.
-
-* ``tests/avocado/acpi-bits/bits-config``:
-
- This location contains biosbits configuration files that determine how the
- software runs the tests.
-
- ``bits-config.txt``:
- This is the biosbits config file that determines what tests
- or actions are performed by bits. The description of the config options are
- provided in the file itself.
-
-* ``tests/avocado/acpi-bits/bits-tests``:
-
- This directory contains biosbits python based tests that are run from within
- the biosbits environment in the spawned VM. New additions of test cases can
- be made in the appropriate test file. For example, new acpi tests can go
- into testacpi.py2 and one would call testsuite.add_test() to register the new
- test so that it gets executed as a part of the ACPI tests.
- It might be occasionally necessary to disable some subtests or add a new
- test that belongs to a test suite not already present in this directory. To
- do this, please clone the bits source from
- https://gitlab.com/qemu-project/biosbits-bits/-/tree/qemu-bits.
- Note that this is the "qemu-bits" branch and not the "bits" branch of the
- repository. "qemu-bits" is the branch where we have made all the QEMU
- specific enhancements and we must use the source from this branch only.
- Copy the test suite/script that needs modification (addition of new tests
- or disabling them) from python directory into this directory. For
- example, in order to change cpuid related tests, copy the following
- file into this directory and rename it with .py2 extension:
- https://gitlab.com/qemu-project/biosbits-bits/-/blob/qemu-bits/python/testcpuid.py
- Then make your additions and changes here. Therefore, the steps are:
-
- (a) Copy unmodified test script to this directory from bits source.
- (b) Add a SPDX license header.
- (c) Perform modifications to the test.
-
- Commits (a), (b) and (c) preferably should go under separate commits so that
- the original test script and the changes we have made are separated and
- clear. (a) and (b) can sometimes be combined into a single step.
-
- The test framework will then use your modified test script to run the test.
- No further changes would be needed. Please check the logs to make sure that
- appropriate changes have taken effect.
-
- The tests have an extension .py2 in order to indicate that:
-
- (a) They are python2.7 based scripts and not python 3 scripts.
- (b) They are run from within the bios bits VM and is not subjected to QEMU
- build/test python script maintenance and dependency resolutions.
- (c) They need not be loaded by avocado framework when running tests.
-
-
-Author: Ani Sinha <anisinha@redhat.com>
-
-References:
------------
-.. [#a] https://blog.linuxplumbersconf.org/2011/ocw/system/presentations/867/original/bits.pdf
-.. [#b] https://www.youtube.com/watch?v=36QIepyUuhg
-.. [#c] https://fosdem.org/2024/schedule/event/fosdem-2024-2262-exercising-qemu-generated-acpi-smbios-tables-using-biosbits-from-within-a-guest-vm-/
diff --git a/docs/devel/atomics.rst b/docs/devel/atomics.rst
index b77c6e1..95c7b77 100644
--- a/docs/devel/atomics.rst
+++ b/docs/devel/atomics.rst
@@ -204,7 +204,7 @@ They come in six kinds:
before the second with respect to the other components of the system.
Therefore, unlike ``smp_rmb()`` or ``qatomic_load_acquire()``,
``smp_read_barrier_depends()`` can be just a compiler barrier on
- weakly-ordered architectures such as Arm or PPC[#]_.
+ weakly-ordered architectures such as Arm or PPC\ [#alpha]_.
Note that the first load really has to have a _data_ dependency and not
a control dependency. If the address for the second load is dependent
@@ -212,7 +212,7 @@ They come in six kinds:
than actually loading the address itself, then it's a _control_
dependency and a full read barrier or better is required.
-.. [#] The DEC Alpha is an exception, because ``smp_read_barrier_depends()``
+.. [#alpha] The DEC Alpha is an exception, because ``smp_read_barrier_depends()``
needs a processor barrier. On strongly-ordered architectures such
as x86 or s390, ``smp_rmb()`` and ``qatomic_load_acquire()`` can
also be compiler barriers only.
@@ -295,7 +295,7 @@ Acquire/release pairing and the *synchronizes-with* relation
------------------------------------------------------------
Atomic operations other than ``qatomic_set()`` and ``qatomic_read()`` have
-either *acquire* or *release* semantics [#rmw]_. This has two effects:
+either *acquire* or *release* semantics\ [#rmw]_. This has two effects:
.. [#rmw] Read-modify-write operations can have both---acquire applies to the
read part, and release to the write.
diff --git a/docs/devel/blkdebug.txt b/docs/devel/blkdebug.txt
deleted file mode 100644
index 0b0c128..0000000
--- a/docs/devel/blkdebug.txt
+++ /dev/null
@@ -1,162 +0,0 @@
-Block I/O error injection using blkdebug
-----------------------------------------
-Copyright (C) 2014-2015 Red Hat Inc
-
-This work is licensed under the terms of the GNU GPL, version 2 or later. See
-the COPYING file in the top-level directory.
-
-The blkdebug block driver is a rule-based error injection engine. It can be
-used to exercise error code paths in block drivers including ENOSPC (out of
-space) and EIO.
-
-This document gives an overview of the features available in blkdebug.
-
-Background
-----------
-Block drivers have many error code paths that handle I/O errors. Image formats
-are especially complex since metadata I/O errors during cluster allocation or
-while updating tables happen halfway through request processing and require
-discipline to keep image files consistent.
-
-Error injection allows test cases to trigger I/O errors at specific points.
-This way, all error paths can be tested to make sure they are correct.
-
-Rules
------
-The blkdebug block driver takes a list of "rules" that tell the error injection
-engine when to fail an I/O request.
-
-Each I/O request is evaluated against the rules. If a rule matches the request
-then its "action" is executed.
-
-Rules can be placed in a configuration file; the configuration file
-follows the same .ini-like format used by QEMU's -readconfig option, and
-each section of the file represents a rule.
-
-The following configuration file defines a single rule:
-
- $ cat blkdebug.conf
- [inject-error]
- event = "read_aio"
- errno = "28"
-
-This rule fails all aio read requests with ENOSPC (28). Note that the errno
-value depends on the host. On Linux, see
-/usr/include/asm-generic/errno-base.h for errno values.
-
-Invoke QEMU as follows:
-
- $ qemu-system-x86_64
- -drive if=none,cache=none,file=blkdebug:blkdebug.conf:test.img,id=drive0 \
- -device virtio-blk-pci,drive=drive0,id=virtio-blk-pci0
-
-Rules support the following attributes:
-
- event - which type of operation to match (e.g. read_aio, write_aio,
- flush_to_os, flush_to_disk). See the "Events" section for
- information on events.
-
- state - (optional) the engine must be in this state number in order for this
- rule to match. See the "State transitions" section for information
- on states.
-
- errno - the numeric errno value to return when a request matches this rule.
- The errno values depend on the host since the numeric values are not
- standardized in the POSIX specification.
-
- sector - (optional) a sector number that the request must overlap in order to
- match this rule
-
- once - (optional, default "off") only execute this action on the first
- matching request
-
- immediately - (optional, default "off") return a NULL BlockAIOCB
- pointer and fail without an errno instead. This
- exercises the code path where BlockAIOCB fails and the
- caller's BlockCompletionFunc is not invoked.
-
-Events
-------
-Block drivers provide information about the type of I/O request they are about
-to make so rules can match specific types of requests. For example, the qcow2
-block driver tells blkdebug when it accesses the L1 table so rules can match
-only L1 table accesses and not other metadata or guest data requests.
-
-The core events are:
-
- read_aio - guest data read
-
- write_aio - guest data write
-
- flush_to_os - write out unwritten block driver state (e.g. cached metadata)
-
- flush_to_disk - flush the host block device's disk cache
-
-See qapi/block-core.json:BlkdebugEvent for the full list of events.
-You may need to grep block driver source code to understand the
-meaning of specific events.
-
-State transitions
------------------
-There are cases where more power is needed to match a particular I/O request in
-a longer sequence of requests. For example:
-
- write_aio
- flush_to_disk
- write_aio
-
-How do we match the 2nd write_aio but not the first? This is where state
-transitions come in.
-
-The error injection engine has an integer called the "state" that always starts
-initialized to 1. The state integer is internal to blkdebug and cannot be
-observed from outside but rules can interact with it for powerful matching
-behavior.
-
-Rules can be conditional on the current state and they can transition to a new
-state.
-
-When a rule's "state" attribute is non-zero then the current state must equal
-the attribute in order for the rule to match.
-
-For example, to match the 2nd write_aio:
-
- [set-state]
- event = "write_aio"
- state = "1"
- new_state = "2"
-
- [inject-error]
- event = "write_aio"
- state = "2"
- errno = "5"
-
-The first write_aio request matches the set-state rule and transitions from
-state 1 to state 2. Once state 2 has been entered, the set-state rule no
-longer matches since it requires state 1. But the inject-error rule now
-matches the next write_aio request and injects EIO (5).
-
-State transition rules support the following attributes:
-
- event - which type of operation to match (e.g. read_aio, write_aio,
- flush_to_os, flush_to_disk). See the "Events" section for
- information on events.
-
- state - (optional) the engine must be in this state number in order for this
- rule to match
-
- new_state - transition to this state number
-
-Suspend and resume
-------------------
-Exercising code paths in block drivers may require specific ordering amongst
-concurrent requests. The "breakpoint" feature allows requests to be halted on
-a blkdebug event and resumed later. This makes it possible to achieve
-deterministic ordering when multiple requests are in flight.
-
-Breakpoints on blkdebug events are associated with a user-defined "tag" string.
-This tag serves as an identifier by which the request can be resumed at a later
-point.
-
-See the qemu-io(1) break, resume, remove_break, and wait_break commands for
-details.
diff --git a/docs/devel/blkverify.txt b/docs/devel/blkverify.txt
deleted file mode 100644
index aca826c..0000000
--- a/docs/devel/blkverify.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-= Block driver correctness testing with blkverify =
-
-== Introduction ==
-
-This document describes how to use the blkverify protocol to test that a block
-driver is operating correctly.
-
-It is difficult to test and debug block drivers against real guests. Often
-processes inside the guest will crash because corrupt sectors were read as part
-of the executable. Other times obscure errors are raised by a program inside
-the guest. These issues are extremely hard to trace back to bugs in the block
-driver.
-
-Blkverify solves this problem by catching data corruption inside QEMU the first
-time bad data is read and reporting the disk sector that is corrupted.
-
-== How it works ==
-
-The blkverify protocol has two child block devices, the "test" device and the
-"raw" device. Read/write operations are mirrored to both devices so their
-state should always be in sync.
-
-The "raw" device is a raw image, a flat file, that has identical starting
-contents to the "test" image. The idea is that the "raw" device will handle
-read/write operations correctly and not corrupt data. It can be used as a
-reference for comparison against the "test" device.
-
-After a mirrored read operation completes, blkverify will compare the data and
-raise an error if it is not identical. This makes it possible to catch the
-first instance where corrupt data is read.
-
-== Example ==
-
-Imagine raw.img has 0xcd repeated throughout its first sector:
-
- $ ./qemu-io -c 'read -v 0 512' raw.img
- 00000000: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................
- 00000010: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................
- [...]
- 000001e0: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................
- 000001f0: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................
- read 512/512 bytes at offset 0
- 512.000000 bytes, 1 ops; 0.0000 sec (97.656 MiB/sec and 200000.0000 ops/sec)
-
-And test.img is corrupt, its first sector is zeroed when it shouldn't be:
-
- $ ./qemu-io -c 'read -v 0 512' test.img
- 00000000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
- 00000010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
- [...]
- 000001e0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
- 000001f0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
- read 512/512 bytes at offset 0
- 512.000000 bytes, 1 ops; 0.0000 sec (81.380 MiB/sec and 166666.6667 ops/sec)
-
-This error is caught by blkverify:
-
- $ ./qemu-io -c 'read 0 512' blkverify:a.img:b.img
- blkverify: read sector_num=0 nb_sectors=4 contents mismatch in sector 0
-
-A more realistic scenario is verifying the installation of a guest OS:
-
- $ ./qemu-img create raw.img 16G
- $ ./qemu-img create -f qcow2 test.qcow2 16G
- $ ./qemu-system-x86_64 -cdrom debian.iso \
- -drive file=blkverify:raw.img:test.qcow2
-
-If the installation is aborted when blkverify detects corruption, use qemu-io
-to explore the contents of the disk image at the sector in question.
diff --git a/docs/devel/build-environment.rst b/docs/devel/build-environment.rst
new file mode 100644
index 0000000..661f6ea
--- /dev/null
+++ b/docs/devel/build-environment.rst
@@ -0,0 +1,118 @@
+
+.. _setup-build-env:
+
+Setup build environment
+=======================
+
+QEMU uses a lot of dependencies on the host system. glib2 is used everywhere in
+the code base, and most of the other dependencies are optional.
+
+We present here simple instructions to enable native builds on most popular
+systems.
+
+You can find additional instructions on `QEMU wiki <https://wiki.qemu.org/>`_:
+
+- `Linux <https://wiki.qemu.org/Hosts/Linux>`_
+- `MacOS <https://wiki.qemu.org/Hosts/Mac>`_
+- `Windows <https://wiki.qemu.org/Hosts/W32>`_
+- `BSD <https://wiki.qemu.org/Hosts/BSD>`_
+
+Note: Installing dependencies using your package manager build dependencies may
+miss out on deps that have been newly introduced in qemu.git. In more, it misses
+deps the distribution has decided to exclude.
+
+Linux
+-----
+
+Fedora
+++++++
+
+::
+
+ sudo dnf update && sudo dnf builddep qemu
+
+Debian/Ubuntu
++++++++++++++
+
+You first need to enable `Sources List <https://wiki.debian.org/SourcesList>`_.
+Then, use apt to install dependencies:
+
+::
+
+ sudo apt update && sudo apt build-dep qemu
+
+MacOS
+-----
+
+You first need to install `Homebrew <https://brew.sh/>`_. Then, use it to
+install dependencies:
+
+::
+
+ brew update && brew install $(brew deps --include-build qemu)
+
+Windows
+-------
+
+You first need to install `MSYS2 <https://www.msys2.org/>`_.
+MSYS2 offers `different environments <https://www.msys2.org/docs/environments/>`_.
+x86_64 environments are based on GCC, while aarch64 is based on Clang.
+
+We recommend to use MINGW64 for windows-x86_64 and CLANGARM64 for windows-aarch64
+(only available on windows-aarch64 hosts).
+
+Then, you can open a windows shell, and enter msys2 env using:
+
+::
+
+ c:/msys64/msys2_shell.cmd -defterm -here -no-start -mingw64
+ # Replace -ucrt64 by -clangarm64 or -ucrt64 for other environments.
+
+MSYS2 package manager does not offer a built-in way to install build
+dependencies. You can start with this list of packages using pacman:
+
+Note: Dependencies need to be installed again if you use a different MSYS2
+environment.
+
+::
+
+ # update MSYS2 itself, you need to reopen your shell at the end.
+ pacman -Syu
+ pacman -S \
+ base-devel binutils bison diffutils flex git grep make sed \
+ ${MINGW_PACKAGE_PREFIX}-toolchain \
+ ${MINGW_PACKAGE_PREFIX}-glib2 \
+ ${MINGW_PACKAGE_PREFIX}-gtk3 \
+ ${MINGW_PACKAGE_PREFIX}-libnfs \
+ ${MINGW_PACKAGE_PREFIX}-libssh \
+ ${MINGW_PACKAGE_PREFIX}-ninja \
+ ${MINGW_PACKAGE_PREFIX}-pixman \
+ ${MINGW_PACKAGE_PREFIX}-pkgconf \
+ ${MINGW_PACKAGE_PREFIX}-python \
+ ${MINGW_PACKAGE_PREFIX}-SDL2 \
+ ${MINGW_PACKAGE_PREFIX}-zstd
+
+If you want to install all dependencies, it's possible to use recipe used to
+build QEMU in MSYS2 itself.
+
+::
+
+ pacman -S wget base-devel git
+ wget https://raw.githubusercontent.com/msys2/MINGW-packages/refs/heads/master/mingw-w64-qemu/PKGBUILD
+ # Some packages may be missing for your environment, installation will still
+ # be done though.
+ makepkg --syncdeps --nobuild PKGBUILD || true
+
+Build on windows-aarch64
+++++++++++++++++++++++++
+
+When trying to cross compile meson for x86_64 using UCRT64 or MINGW64 env,
+configure will run into an error because the cpu detected is not correct.
+
+Meson detects x86_64 processes emulated, so you need to manually set the cpu,
+and force a cross compilation (with empty prefix).
+
+::
+
+ ./configure --cpu=x86_64 --cross-prefix=
+
diff --git a/docs/devel/build-system.rst b/docs/devel/build-system.rst
index 79eceb1..2c88419 100644
--- a/docs/devel/build-system.rst
+++ b/docs/devel/build-system.rst
@@ -134,7 +134,7 @@ in how the build process runs Python code.
At this stage, ``configure`` also queries the chosen Python interpreter
about QEMU's build dependencies. Note that the build process does *not*
-look for ``meson``, ``sphinx-build`` or ``avocado`` binaries in the PATH;
+look for ``meson`` or ``sphinx-build`` binaries in the PATH;
likewise, there are no options such as ``--meson`` or ``--sphinx-build``.
This avoids a potential mismatch, where Meson and Sphinx binaries on the
PATH might operate in a different Python environment than the one chosen
@@ -145,13 +145,13 @@ was installed in the ``site-packages`` directory of another interpreter,
or with the wrong ``pip`` program.
If a package is available for the chosen interpreter, ``configure``
-prepares a small script that invokes it from the venv itself[#distlib]_.
+prepares a small script that invokes it from the venv itself\ [#distlib]_.
If not, ``configure`` can also optionally install dependencies in the
virtual environment with ``pip``, either from wheels in ``python/wheels``
or by downloading the package with PyPI. Downloading can be disabled with
``--disable-download``; and anyway, it only happens when a ``configure``
option (currently, only ``--enable-docs``) is explicitly enabled but
-the dependencies are not present[#pip]_.
+the dependencies are not present.
.. [#distlib] The scripts are created based on the package's metadata,
specifically the ``console_script`` entry points. This is the
@@ -164,15 +164,11 @@ the dependencies are not present[#pip]_.
because the Python Packaging Authority provides a package
``distlib.scripts`` to perform this task.
-.. [#pip] ``pip`` might also be used when running ``make check-avocado``
- if downloading is enabled, to ensure that Avocado is
- available.
-
The required versions of the packages are stored in a configuration file
``pythondeps.toml``. The format is custom to QEMU, but it is documented
at the top of the file itself and it should be easy to understand. The
requirements should make it possible to use the version that is packaged
-that is provided by supported distros.
+by QEMU's supported distros.
When dependencies are downloaded, instead, ``configure`` uses a "known
good" version that is also listed in ``pythondeps.toml``. In this
@@ -260,7 +256,7 @@ Target-dependent emulator sourcesets:
Each emulator also includes sources for files in the ``hw/`` and ``target/``
subdirectories. The subdirectory used for each emulator comes
from the target's definition of ``TARGET_BASE_ARCH`` or (if missing)
- ``TARGET_ARCH``, as found in ``default-configs/targets/*.mak``.
+ ``TARGET_ARCH``, as found in ``configs/targets/*.mak``.
Each subdirectory in ``hw/`` adds one sourceset to the ``hw_arch`` dictionary,
for example::
@@ -317,8 +313,8 @@ Utility sourcesets:
The following files concur in the definition of which files are linked
into each emulator:
-``default-configs/devices/*.mak``
- The files under ``default-configs/devices/`` control the boards and devices
+``configs/devices/*.mak``
+ The files under ``configs/devices/`` control the boards and devices
that are built into each QEMU system emulation targets. They merely contain
a list of config variable definitions such as::
@@ -327,13 +323,13 @@ into each emulator:
CONFIG_XLNX_VERSAL=y
``*/Kconfig``
- These files are processed together with ``default-configs/devices/*.mak`` and
+ These files are processed together with ``configs/devices/*.mak`` and
describe the dependencies between various features, subsystems and
device models. They are described in :ref:`kconfig`
-``default-configs/targets/*.mak``
+``configs/targets/*.mak``
These files mostly define symbols that appear in the ``*-config-target.h``
- file for each emulator [#cfgtarget]_. However, the ``TARGET_ARCH``
+ file for each emulator\ [#cfgtarget]_. However, the ``TARGET_ARCH``
and ``TARGET_BASE_ARCH`` will also be used to select the ``hw/`` and
``target/`` subdirectories that are compiled into each target.
@@ -497,8 +493,7 @@ number of dynamically created files listed later.
``pyvenv/bin``, and calling ``pip`` to install dependencies.
``tests/Makefile.include``
- Rules for external test harnesses. These include the TCG tests
- and the Avocado-based integration tests.
+ Rules for external test harnesses like the TCG tests.
``tests/docker/Makefile.include``
Rules for Docker tests. Like ``tests/Makefile.include``, this file is
diff --git a/docs/devel/ci-definitions.rst.inc b/docs/devel/ci-definitions.rst.inc
deleted file mode 100644
index 6d5c6fd..0000000
--- a/docs/devel/ci-definitions.rst.inc
+++ /dev/null
@@ -1,121 +0,0 @@
-Definition of terms
-===================
-
-This section defines the terms used in this document and correlates them with
-what is currently used on QEMU.
-
-Automated tests
----------------
-
-An automated test is written on a test framework using its generic test
-functions/classes. The test framework can run the tests and report their
-success or failure [1]_.
-
-An automated test has essentially three parts:
-
-1. The test initialization of the parameters, where the expected parameters,
- like inputs and expected results, are set up;
-2. The call to the code that should be tested;
-3. An assertion, comparing the result from the previous call with the expected
- result set during the initialization of the parameters. If the result
- matches the expected result, the test has been successful; otherwise, it has
- failed.
-
-Unit testing
-------------
-
-A unit test is responsible for exercising individual software components as a
-unit, like interfaces, data structures, and functionality, uncovering errors
-within the boundaries of a component. The verification effort is in the
-smallest software unit and focuses on the internal processing logic and data
-structures. A test case of unit tests should be designed to uncover errors due
-to erroneous computations, incorrect comparisons, or improper control flow [2]_.
-
-On QEMU, unit testing is represented by the 'check-unit' target from 'make'.
-
-Functional testing
-------------------
-
-A functional test focuses on the functional requirement of the software.
-Deriving sets of input conditions, the functional tests should fully exercise
-all the functional requirements for a program. Functional testing is
-complementary to other testing techniques, attempting to find errors like
-incorrect or missing functions, interface errors, behavior errors, and
-initialization and termination errors [3]_.
-
-On QEMU, functional testing is represented by the 'check-qtest' target from
-'make'.
-
-System testing
---------------
-
-System tests ensure all application elements mesh properly while the overall
-functionality and performance are achieved [4]_. Some or all system components
-are integrated to create a complete system to be tested as a whole. System
-testing ensures that components are compatible, interact correctly, and
-transfer the right data at the right time across their interfaces. As system
-testing focuses on interactions, use case-based testing is a practical approach
-to system testing [5]_. Note that, in some cases, system testing may require
-interaction with third-party software, like operating system images, databases,
-networks, and so on.
-
-On QEMU, system testing is represented by the 'check-avocado' target from
-'make'.
-
-Flaky tests
------------
-
-A flaky test is defined as a test that exhibits both a passing and a failing
-result with the same code on different runs. Some usual reasons for an
-intermittent/flaky test are async wait, concurrency, and test order dependency
-[6]_.
-
-Gating
-------
-
-A gate restricts the move of code from one stage to another on a
-test/deployment pipeline. The step move is granted with approval. The approval
-can be a manual intervention or a set of tests succeeding [7]_.
-
-On QEMU, the gating process happens during the pull request. The approval is
-done by the project leader running its own set of tests. The pull request gets
-merged when the tests succeed.
-
-Continuous Integration (CI)
----------------------------
-
-Continuous integration (CI) requires the builds of the entire application and
-the execution of a comprehensive set of automated tests every time there is a
-need to commit any set of changes [8]_. The automated tests can be composed of
-the unit, functional, system, and other tests.
-
-Keynotes about continuous integration (CI) [9]_:
-
-1. System tests may depend on external software (operating system images,
- firmware, database, network).
-2. It may take a long time to build and test. It may be impractical to build
- the system being developed several times per day.
-3. If the development platform is different from the target platform, it may
- not be possible to run system tests in the developer’s private workspace.
- There may be differences in hardware, operating system, or installed
- software. Therefore, more time is required for testing the system.
-
-References
-----------
-
-.. [1] Sommerville, Ian (2016). Software Engineering. p. 233.
-.. [2] Pressman, Roger S. & Maxim, Bruce R. (2020). Software Engineering,
- A Practitioner’s Approach. p. 48, 376, 378, 381.
-.. [3] Pressman, Roger S. & Maxim, Bruce R. (2020). Software Engineering,
- A Practitioner’s Approach. p. 388.
-.. [4] Pressman, Roger S. & Maxim, Bruce R. (2020). Software Engineering,
- A Practitioner’s Approach. Software Engineering, p. 377.
-.. [5] Sommerville, Ian (2016). Software Engineering. p. 59, 232, 240.
-.. [6] Luo, Qingzhou, et al. An empirical analysis of flaky tests.
- Proceedings of the 22nd ACM SIGSOFT International Symposium on
- Foundations of Software Engineering. 2014.
-.. [7] Humble, Jez & Farley, David (2010). Continuous Delivery:
- Reliable Software Releases Through Build, Test, and Deployment, p. 122.
-.. [8] Humble, Jez & Farley, David (2010). Continuous Delivery:
- Reliable Software Releases Through Build, Test, and Deployment, p. 55.
-.. [9] Sommerville, Ian (2016). Software Engineering. p. 743.
diff --git a/docs/devel/ci-jobs.rst.inc b/docs/devel/ci-jobs.rst.inc
deleted file mode 100644
index 3756bbe..0000000
--- a/docs/devel/ci-jobs.rst.inc
+++ /dev/null
@@ -1,190 +0,0 @@
-.. _ci_var:
-
-Custom CI/CD variables
-======================
-
-QEMU CI pipelines can be tuned by setting some CI environment variables.
-
-Set variable globally in the user's CI namespace
-------------------------------------------------
-
-Variables can be set globally in the user's CI namespace setting.
-
-For further information about how to set these variables, please refer to::
-
- https://docs.gitlab.com/ee/ci/variables/#add-a-cicd-variable-to-a-project
-
-Set variable manually when pushing a branch or tag to the user's repository
----------------------------------------------------------------------------
-
-Variables can be set manually when pushing a branch or tag, using
-git-push command line arguments.
-
-Example setting the QEMU_CI_EXAMPLE_VAR variable:
-
-.. code::
-
- git push -o ci.variable="QEMU_CI_EXAMPLE_VAR=value" myrepo mybranch
-
-For further information about how to set these variables, please refer to::
-
- https://docs.gitlab.com/ee/user/project/push_options.html#push-options-for-gitlab-cicd
-
-Setting aliases in your git config
-----------------------------------
-
-You can use aliases to make it easier to push branches with different
-CI configurations. For example define an alias for triggering CI:
-
-.. code::
-
- git config --local alias.push-ci "push -o ci.variable=QEMU_CI=1"
- git config --local alias.push-ci-now "push -o ci.variable=QEMU_CI=2"
-
-Which lets you run:
-
-.. code::
-
- git push-ci
-
-to create the pipeline, or:
-
-.. code::
-
- git push-ci-now
-
-to create and run the pipeline
-
-
-Variable naming and grouping
-----------------------------
-
-The variables used by QEMU's CI configuration are grouped together
-in a handful of namespaces
-
- * QEMU_JOB_nnnn - variables to be defined in individual jobs
- or templates, to influence the shared rules defined in the
- .base_job_template.
-
- * QEMU_CI_nnn - variables to be set by contributors in their
- repository CI settings, or as git push variables, to influence
- which jobs get run in a pipeline
-
- * QEMU_CI_CONTAINER_TAG - the tag used to publish containers
- in stage 1, for use by build jobs in stage 2. Defaults to
- 'latest', but if running pipelines for different branches
- concurrently, it should be overridden per pipeline.
-
- * QEMU_CI_UPSTREAM - gitlab namespace that is considered to be
- the 'upstream'. This defaults to 'qemu-project'. Contributors
- may choose to override this if they are modifying rules in
- base.yml and need to validate how they will operate when in
- an upstream context, as opposed to their fork context.
-
- * nnn - other misc variables not falling into the above
- categories, or using different names for historical reasons
- and not yet converted.
-
-Maintainer controlled job variables
------------------------------------
-
-The following variables may be set when defining a job in the
-CI configuration file.
-
-QEMU_JOB_CIRRUS
-~~~~~~~~~~~~~~~
-
-The job makes use of Cirrus CI infrastructure, requiring the
-configuration setup for cirrus-run to be present in the repository
-
-QEMU_JOB_OPTIONAL
-~~~~~~~~~~~~~~~~~
-
-The job is expected to be successful in general, but is not run
-by default due to need to conserve limited CI resources. It is
-available to be started manually by the contributor in the CI
-pipelines UI.
-
-QEMU_JOB_ONLY_FORKS
-~~~~~~~~~~~~~~~~~~~
-
-The job results are only of interest to contributors prior to
-submitting code. They are not required as part of the gating
-CI pipeline.
-
-QEMU_JOB_SKIPPED
-~~~~~~~~~~~~~~~~
-
-The job is not reliably successful in general, so is not
-currently suitable to be run by default. Ideally this should
-be a temporary marker until the problems can be addressed, or
-the job permanently removed.
-
-QEMU_JOB_PUBLISH
-~~~~~~~~~~~~~~~~
-
-The job is for publishing content after a branch has been
-merged into the upstream default branch.
-
-QEMU_JOB_AVOCADO
-~~~~~~~~~~~~~~~~
-
-The job runs the Avocado integration test suite
-
-Contributor controlled runtime variables
-----------------------------------------
-
-The following variables may be set by contributors to control
-job execution
-
-QEMU_CI
-~~~~~~~
-
-By default, no pipelines will be created on contributor forks
-in order to preserve CI credits
-
-Set this variable to 1 to create the pipelines, but leave all
-the jobs to be manually started from the UI
-
-Set this variable to 2 to create the pipelines and run all
-the jobs immediately, as was the historical behaviour
-
-QEMU_CI_AVOCADO_TESTING
-~~~~~~~~~~~~~~~~~~~~~~~
-By default, tests using the Avocado framework are not run automatically in
-the pipelines (because multiple artifacts have to be downloaded, and if
-these artifacts are not already cached, downloading them make the jobs
-reach the timeout limit). Set this variable to have the tests using the
-Avocado framework run automatically.
-
-Other misc variables
---------------------
-
-These variables are primarily to control execution of jobs on
-private runners
-
-AARCH64_RUNNER_AVAILABLE
-~~~~~~~~~~~~~~~~~~~~~~~~
-If you've got access to an aarch64 host that can be used as a gitlab-CI
-runner, you can set this variable to enable the tests that require this
-kind of host. The runner should be tagged with "aarch64".
-
-AARCH32_RUNNER_AVAILABLE
-~~~~~~~~~~~~~~~~~~~~~~~~
-If you've got access to an armhf host or an arch64 host that can run
-aarch32 EL0 code to be used as a gitlab-CI runner, you can set this
-variable to enable the tests that require this kind of host. The
-runner should be tagged with "aarch32".
-
-S390X_RUNNER_AVAILABLE
-~~~~~~~~~~~~~~~~~~~~~~
-If you've got access to an IBM Z host that can be used as a gitlab-CI
-runner, you can set this variable to enable the tests that require this
-kind of host. The runner should be tagged with "s390x".
-
-CCACHE_DISABLE
-~~~~~~~~~~~~~~
-The jobs are configured to use "ccache" by default since this typically
-reduces compilation time, at the cost of increased storage. If the
-use of "ccache" is suspected to be hurting the overall job execution
-time, setting the "CCACHE_DISABLE=1" env variable to disable it.
diff --git a/docs/devel/ci.rst b/docs/devel/ci.rst
deleted file mode 100644
index ed88a20..0000000
--- a/docs/devel/ci.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-.. _ci:
-
-==
-CI
-==
-
-Most of QEMU's CI is run on GitLab's infrastructure although a number
-of other CI services are used for specialised purposes. The most up to
-date information about them and their status can be found on the
-`project wiki testing page <https://wiki.qemu.org/Testing/CI>`_.
-
-.. include:: ci-definitions.rst.inc
-.. include:: ci-jobs.rst.inc
-.. include:: ci-runners.rst.inc
diff --git a/docs/devel/clocks.rst b/docs/devel/clocks.rst
index 177ee1c..3f744f2 100644
--- a/docs/devel/clocks.rst
+++ b/docs/devel/clocks.rst
@@ -358,6 +358,12 @@ humans (for instance in debugging), use ``clock_display_freq()``,
which returns a prettified string-representation, e.g. "33.3 MHz".
The caller must free the string with g_free() after use.
+It's also possible to retrieve the clock period from a QTest by
+accessing QOM property ``qtest-clock-period`` using a QMP command.
+This property is only present when the device is being run under
+the ``qtest`` accelerator; it is not available when QEMU is
+being run normally.
+
Calculating expiry deadlines
----------------------------
diff --git a/docs/devel/code-provenance.rst b/docs/devel/code-provenance.rst
new file mode 100644
index 0000000..b5aae2e
--- /dev/null
+++ b/docs/devel/code-provenance.rst
@@ -0,0 +1,338 @@
+.. _code-provenance:
+
+Code provenance
+===============
+
+Certifying patch submissions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The QEMU community **mandates** all contributors to certify provenance of
+patch submissions they make to the project. To put it another way,
+contributors must indicate that they are legally permitted to contribute to
+the project.
+
+Certification is achieved with a low overhead by adding a single line to the
+bottom of every git commit::
+
+ Signed-off-by: YOUR NAME <YOUR@EMAIL>
+
+The addition of this line asserts that the author of the patch is contributing
+in accordance with the clauses specified in the
+`Developer's Certificate of Origin <https://developercertificate.org>`__:
+
+.. _dco:
+
+ Developer's Certificate of Origin 1.1
+
+ By making a contribution to this project, I certify that:
+
+ (a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+ (b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+ (c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+ (d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+
+The name used with "Signed-off-by" does not need to be your legal name, nor
+birth name, nor appear on any government ID. It is the identity you choose to
+be known by in the community, but should not be anonymous, nor misrepresent
+whom you are.
+
+It is generally expected that the name and email addresses used in one of the
+``Signed-off-by`` lines, matches that of the git commit ``Author`` field.
+It's okay if you subscribe or contribute to the list via more than one
+address, but using multiple addresses in one commit just confuses
+things.
+
+If the person sending the mail is not one of the patch authors, they are
+nonetheless expected to add their own ``Signed-off-by`` to comply with the
+DCO clause (c).
+
+Multiple authorship
+~~~~~~~~~~~~~~~~~~~
+
+It is not uncommon for a patch to have contributions from multiple authors. In
+this scenario, git commits will usually be expected to have a ``Signed-off-by``
+line for each contributor involved in creation of the patch. Some edge cases:
+
+ * The non-primary author's contributions were so trivial that they can be
+ considered not subject to copyright. In this case the secondary authors
+ need not include a ``Signed-off-by``.
+
+ This case most commonly applies where QEMU reviewers give short snippets
+ of code as suggested fixes to a patch. The reviewers don't need to have
+ their own ``Signed-off-by`` added unless their code suggestion was
+ unusually large, but it is common to add ``Suggested-by`` as a credit
+ for non-trivial code.
+
+ * Both contributors work for the same employer and the employer requires
+ copyright assignment.
+
+ It can be said that in this case a ``Signed-off-by`` is indicating that
+ the person has permission to contribute from their employer who is the
+ copyright holder. It is nonetheless still preferable to include a
+ ``Signed-off-by`` for each contributor, as in some countries employees are
+ not able to assign copyright to their employer, and it also covers any
+ time invested outside working hours.
+
+When multiple ``Signed-off-by`` tags are present, they should be strictly kept
+in order of authorship, from oldest to newest.
+
+Other commit tags
+~~~~~~~~~~~~~~~~~
+
+While the ``Signed-off-by`` tag is mandatory, there are a number of other tags
+that are commonly used during QEMU development:
+
+ * **``Reviewed-by``**: when a QEMU community member reviews a patch on the
+ mailing list, if they consider the patch acceptable, they should send an
+ email reply containing a ``Reviewed-by`` tag. Subsystem maintainers who
+ review a patch should add this even if they are also adding their
+ ``Signed-off-by`` to the same commit.
+
+ * **``Acked-by``**: when a QEMU subsystem maintainer approves a patch that
+ touches their subsystem, but intends to allow a different maintainer to
+ queue it and send a pull request, they would send a mail containing a
+ ``Acked-by`` tag. Where a patch touches multiple subsystems, ``Acked-by``
+ only implies review of the maintainers' own areas of responsibility. If a
+ maintainer wants to indicate they have done a full review they should use
+ a ``Reviewed-by`` tag.
+
+ * **``Tested-by``**: when a QEMU community member has functionally tested the
+ behaviour of the patch in some manner, they should send an email reply
+ containing a ``Tested-by`` tag.
+
+ * **``Reported-by``**: when a QEMU community member reports a problem via the
+ mailing list, or some other informal channel that is not the issue tracker,
+ it is good practice to credit them by including a ``Reported-by`` tag on
+ any patch fixing the issue. When the problem is reported via the GitLab
+ issue tracker, however, it is sufficient to just include a link to the
+ issue.
+
+ * **``Suggested-by``**: when a reviewer or other 3rd party makes non-trivial
+ suggestions for how to change a patch, it is good practice to credit them
+ by including a ``Suggested-by`` tag.
+
+Subsystem maintainer requirements
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When a subsystem maintainer accepts a patch from a contributor, in addition to
+the normal code review points, they are expected to validate the presence of
+suitable ``Signed-off-by`` tags.
+
+At the time they queue the patch in their subsystem tree, the maintainer
+**must** also then add their own ``Signed-off-by`` to indicate that they have
+done the aforementioned validation. This is in addition to any of their own
+``Reviewed-by`` tags the subsystem maintainer may wish to include.
+
+When the maintainer modifies the patch after pulling into their tree, they
+should record their contribution. This is typically done via a note in the
+commit message, just prior to the maintainer's ``Signed-off-by``::
+
+ Signed-off-by: Cory Contributor <cory.contributor@example.com>
+ [Comment rephrased for clarity]
+ Signed-off-by: Mary Maintainer <mary.maintainer@mycorp.test>
+
+
+Tools for adding ``Signed-off-by``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are a variety of ways tools can support adding ``Signed-off-by`` tags
+for patches, avoiding the need for contributors to manually type in this
+repetitive text each time.
+
+git commands
+^^^^^^^^^^^^
+
+When creating, or amending, a commit the ``-s`` flag to ``git commit`` will
+append a suitable line matching the configured git author details.
+
+If preparing patches using the ``git format-patch`` tool, the ``-s`` flag can
+be used to append a suitable line in the emails it creates, without modifying
+the local commits. Alternatively to modify all the local commits on a branch::
+
+ git rebase master -x 'git commit --amend --no-edit -s'
+
+emacs
+^^^^^
+
+In the file ``$HOME/.emacs.d/abbrev_defs`` add:
+
+.. code:: elisp
+
+ (define-abbrev-table 'global-abbrev-table
+ '(
+ ("8rev" "Reviewed-by: YOUR NAME <your@email.addr>" nil 1)
+ ("8ack" "Acked-by: YOUR NAME <your@email.addr>" nil 1)
+ ("8test" "Tested-by: YOUR NAME <your@email.addr>" nil 1)
+ ("8sob" "Signed-off-by: YOUR NAME <your@email.addr>" nil 1)
+ ))
+
+with this change, if you type (for example) ``8rev`` followed by ``<space>``
+or ``<enter>`` it will expand to the whole phrase.
+
+vim
+^^^
+
+In the file ``$HOME/.vimrc`` add::
+
+ iabbrev 8rev Reviewed-by: YOUR NAME <your@email.addr>
+ iabbrev 8ack Acked-by: YOUR NAME <your@email.addr>
+ iabbrev 8test Tested-by: YOUR NAME <your@email.addr>
+ iabbrev 8sob Signed-off-by: YOUR NAME <your@email.addr>
+
+with this change, if you type (for example) ``8rev`` followed by ``<space>``
+or ``<enter>`` it will expand to the whole phrase.
+
+Re-starting abandoned work
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For a variety of reasons there are some patches that get submitted to QEMU but
+never merged. An unrelated contributor may decide (months or years later) to
+continue working from the abandoned patch and re-submit it with extra changes.
+
+The general principles when picking up abandoned work are:
+
+ * Continue to credit the original author for their work, by maintaining their
+ original ``Signed-off-by``
+ * Indicate where the original patch was obtained from (mailing list, bug
+ tracker, author's git repo, etc) when sending it for review
+ * Acknowledge the extra work of the new contributor by including their
+ ``Signed-off-by`` in the patch in addition to the orignal author's
+ * Indicate who is responsible for what parts of the patch. This is typically
+ done via a note in the commit message, just prior to the new contributor's
+ ``Signed-off-by``::
+
+ Signed-off-by: Some Person <some.person@example.com>
+ [Rebased and added support for 'foo']
+ Signed-off-by: New Person <new.person@mycorp.test>
+
+In complicated cases, or if otherwise unsure, ask for advice on the project
+mailing list.
+
+It is also recommended to attempt to contact the original author to let them
+know you are interested in taking over their work, in case they still intended
+to return to the work, or had any suggestions about the best way to continue.
+
+Inclusion of generated files
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Files in patches contributed to QEMU are generally expected to be provided
+only in the preferred format for making modifications. The implication of
+this is that the output of code generators or compilers is usually not
+appropriate to contribute to QEMU.
+
+For reasons of practicality there are some exceptions to this rule, where
+generated code is permitted, provided it is also accompanied by the
+corresponding preferred source format. This is done where it is impractical
+to expect those building QEMU to run the code generation or compilation
+process. A non-exhaustive list of examples is:
+
+ * Images: where an bitmap image is created from a vector file it is common
+ to include the rendered bitmaps at desired resolution(s), since subtle
+ changes in the rasterization process / tools may affect quality. The
+ original vector file is expected to accompany any generated bitmaps.
+
+ * Firmware: QEMU includes pre-compiled binary ROMs for a variety of guest
+ firmwares. When such binary ROMs are contributed, the corresponding source
+ must also be provided, either directly, or through a git submodule link.
+
+ * Dockerfiles: the majority of the dockerfiles are automatically generated
+ from a canonical list of build dependencies maintained in tree, together
+ with the libvirt-ci git submodule link. The generated dockerfiles are
+ included in tree because it is desirable to be able to directly build
+ container images from a clean git checkout.
+
+ * eBPF: QEMU includes some generated eBPF machine code, since the required
+ eBPF compilation tools are not broadly available on all targetted OS
+ distributions. The corresponding eBPF C code for the binary is also
+ provided. This is a time-limited exception until the eBPF toolchain is
+ sufficiently broadly available in distros.
+
+In all cases above, the existence of generated files must be acknowledged
+and justified in the commit that introduces them.
+
+Tools which perform changes to existing code with deterministic algorithmic
+manipulation, driven by user specified inputs, are not generally considered
+to be "generators".
+
+For instance, using Coccinelle to convert code from one pattern to another
+pattern, or fixing documentation typos with a spell checker, or transforming
+code using sed / awk / etc, are not considered to be acts of code
+generation. Where an automated manipulation is performed on code, however,
+this should be declared in the commit message.
+
+At times contributors may use or create scripts/tools to generate an initial
+boilerplate code template which is then filled in to produce the final patch.
+The output of such a tool would still be considered the "preferred format",
+since it is intended to be a foundation for further human authored changes.
+Such tools are acceptable to use, provided there is clearly defined copyright
+and licensing for their output. Note in particular the caveats applying to AI
+content generators below.
+
+Use of AI content generators
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+TL;DR:
+
+ **Current QEMU project policy is to DECLINE any contributions which are
+ believed to include or derive from AI generated content. This includes
+ ChatGPT, Claude, Copilot, Llama and similar tools.**
+
+The increasing prevalence of AI-assisted software development results in a
+number of difficult legal questions and risks for software projects, including
+QEMU. Of particular concern is content generated by `Large Language Models
+<https://en.wikipedia.org/wiki/Large_language_model>`__ (LLMs).
+
+The QEMU community requires that contributors certify their patch submissions
+are made in accordance with the rules of the `Developer's Certificate of
+Origin (DCO) <dco>`.
+
+To satisfy the DCO, the patch contributor has to fully understand the
+copyright and license status of content they are contributing to QEMU. With AI
+content generators, the copyright and license status of the output is
+ill-defined with no generally accepted, settled legal foundation.
+
+Where the training material is known, it is common for it to include large
+volumes of material under restrictive licensing/copyright terms. Even where
+the training material is all known to be under open source licenses, it is
+likely to be under a variety of terms, not all of which will be compatible
+with QEMU's licensing requirements.
+
+How contributors could comply with DCO terms (b) or (c) for the output of AI
+content generators commonly available today is unclear. The QEMU project is
+not willing or able to accept the legal risks of non-compliance.
+
+The QEMU project thus requires that contributors refrain from using AI content
+generators on patches intended to be submitted to the project, and will
+decline any contribution if use of AI is either known or suspected.
+
+This policy does not apply to other uses of AI, such as researching APIs or
+algorithms, static analysis, or debugging, provided their output is not to be
+included in contributions.
+
+Examples of tools impacted by this policy includes GitHub's CoPilot, OpenAI's
+ChatGPT, Anthropic's Claude, and Meta's Code Llama, and code/content
+generation agents which are built on top of such tools.
+
+This policy may evolve as AI tools mature and the legal situation is
+clarifed. In the meanwhile, requests for exceptions to this policy will be
+evaluated by the QEMU project on a case by case basis. To be granted an
+exception, a contributor will need to demonstrate clarity of the license and
+copyright status for the tool's output in relation to its training model and
+code, to the satisfaction of the project maintainers.
diff --git a/docs/devel/codebase.rst b/docs/devel/codebase.rst
new file mode 100644
index 0000000..2a31437
--- /dev/null
+++ b/docs/devel/codebase.rst
@@ -0,0 +1,215 @@
+========
+Codebase
+========
+
+This section presents the various parts of QEMU and how the codebase is
+organized.
+
+Beyond giving succinct descriptions, the goal is to offer links to various
+parts of the documentation/codebase.
+
+Subsystems
+----------
+
+An exhaustive list of subsystems and associated files can be found in the
+`MAINTAINERS <https://gitlab.com/qemu-project/qemu/-/blob/master/MAINTAINERS>`_
+file.
+
+Some of the main QEMU subsystems are:
+
+- `Accelerators<Accelerators>`
+- Block devices and `disk images<disk images>` support
+- `CI<ci>` and `Tests<testing>`
+- `Devices<device-emulation>` & Board models
+- `Documentation <documentation-root>`
+- `GDB support<GDB usage>`
+- :ref:`Migration<migration>`
+- `Monitor<QEMU monitor>`
+- :ref:`QOM (QEMU Object Model)<qom>`
+- `System mode<System emulation>`
+- :ref:`TCG (Tiny Code Generator)<tcg>`
+- `User mode<user-mode>` (`Linux<linux-user-mode>` & `BSD<bsd-user-mode>`)
+- User Interfaces
+
+More documentation on QEMU subsystems can be found on :ref:`internal-subsystem`
+page.
+
+The Grand tour
+--------------
+
+We present briefly here what every folder in the top directory of the codebase
+contains. Hop on!
+
+The folder name links here will take you to that folder in our gitlab
+repository. Other links will take you to more detailed documentation for that
+subsystem, where we have it. Unfortunately not every subsystem has documentation
+yet, so sometimes the source code is all you have.
+
+* `accel <https://gitlab.com/qemu-project/qemu/-/tree/master/accel>`_:
+ Infrastructure and architecture agnostic code related to the various
+ `accelerators <Accelerators>` supported by QEMU
+ (TCG, KVM, hvf, whpx, xen, nvmm).
+ Contains interfaces for operations that will be implemented per
+ `target <https://gitlab.com/qemu-project/qemu/-/tree/master/target>`_.
+* `audio <https://gitlab.com/qemu-project/qemu/-/tree/master/audio>`_:
+ Audio (host) support.
+* `authz <https://gitlab.com/qemu-project/qemu/-/tree/master/authz>`_:
+ `QEMU Authorization framework<client authorization>`.
+* `backends <https://gitlab.com/qemu-project/qemu/-/tree/master/backends>`_:
+ Various backends that are used to access resources on the host (e.g. for
+ random number generation, memory backing or cryptographic functions).
+* `block <https://gitlab.com/qemu-project/qemu/-/tree/master/block>`_:
+ Block devices and `image formats<disk images>` implementation.
+* `bsd-user <https://gitlab.com/qemu-project/qemu/-/tree/master/bsd-user>`_:
+ `BSD User mode<bsd-user-mode>`.
+* build: Where the code built goes by default. You can tell the QEMU build
+ system to put the built code anywhere else you like.
+* `chardev <https://gitlab.com/qemu-project/qemu/-/tree/master/chardev>`_:
+ Various backends used by char devices.
+* `common-user <https://gitlab.com/qemu-project/qemu/-/tree/master/common-user>`_:
+ User-mode assembly code for dealing with signals occurring during syscalls.
+* `configs <https://gitlab.com/qemu-project/qemu/-/tree/master/configs>`_:
+ Makefiles defining configurations to build QEMU.
+* `contrib <https://gitlab.com/qemu-project/qemu/-/tree/master/contrib>`_:
+ Community contributed devices/plugins/tools.
+* `crypto <https://gitlab.com/qemu-project/qemu/-/tree/master/crypto>`_:
+ Cryptographic algorithms used in QEMU.
+* `disas <https://gitlab.com/qemu-project/qemu/-/tree/master/disas>`_:
+ Disassembly functions used by QEMU target code.
+* `docs <https://gitlab.com/qemu-project/qemu/-/tree/master/docs>`_:
+ QEMU Documentation.
+* `dump <https://gitlab.com/qemu-project/qemu/-/tree/master/dump>`_:
+ Code to dump memory of a running VM.
+* `ebpf <https://gitlab.com/qemu-project/qemu/-/tree/master/ebpf>`_:
+ eBPF program support in QEMU. `virtio-net RSS<ebpf-rss>` uses it.
+* `fpu <https://gitlab.com/qemu-project/qemu/-/tree/master/fpu>`_:
+ Floating-point software emulation.
+* `fsdev <https://gitlab.com/qemu-project/qemu/-/tree/master/fsdev>`_:
+ `VirtFS <https://www.linux-kvm.org/page/VirtFS>`_ support.
+* `gdbstub <https://gitlab.com/qemu-project/qemu/-/tree/master/gdbstub>`_:
+ `GDB <GDB usage>` support.
+* `gdb-xml <https://gitlab.com/qemu-project/qemu/-/tree/master/gdb-xml>`_:
+ Set of XML files describing architectures and used by `gdbstub <GDB usage>`.
+* `host <https://gitlab.com/qemu-project/qemu/-/tree/master/host>`_:
+ Various architecture specific header files (crypto, atomic, memory
+ operations).
+* `linux-headers <https://gitlab.com/qemu-project/qemu/-/tree/master/linux-headers>`_:
+ A subset of headers imported from Linux kernel and used for implementing
+ KVM support and user-mode.
+* `linux-user <https://gitlab.com/qemu-project/qemu/-/tree/master/linux-user>`_:
+ `User mode <user-mode>` implementation. Contains one folder per target
+ architecture.
+* `.gitlab-ci.d <https://gitlab.com/qemu-project/qemu/-/tree/master/.gitlab-ci.d>`_:
+ `CI <ci>` yaml and scripts.
+* `include <https://gitlab.com/qemu-project/qemu/-/tree/master/include>`_:
+ All headers associated to different subsystems in QEMU. The hierarchy used
+ mirrors source code organization and naming.
+* `hw <https://gitlab.com/qemu-project/qemu/-/tree/master/hw>`_:
+ `Devices <device-emulation>` and boards emulation. Devices are categorized by
+ type/protocol/architecture and located in associated subfolder.
+* `io <https://gitlab.com/qemu-project/qemu/-/tree/master/io>`_:
+ QEMU `I/O channels <https://lists.gnu.org/archive/html/qemu-devel/2015-11/msg04208.html>`_.
+* `libdecnumber <https://gitlab.com/qemu-project/qemu/-/tree/master/libdecnumber>`_:
+ Import of gcc library, used to implement decimal number arithmetic.
+* `migration <https://gitlab.com/qemu-project/qemu/-/tree/master/migration>`__:
+ :ref:`Migration framework <migration>`.
+* `monitor <https://gitlab.com/qemu-project/qemu/-/tree/master/monitor>`_:
+ `Monitor <QEMU monitor>` implementation (HMP & QMP).
+* `nbd <https://gitlab.com/qemu-project/qemu/-/tree/master/nbd>`_:
+ QEMU NBD (Network Block Device) server.
+* `net <https://gitlab.com/qemu-project/qemu/-/tree/master/net>`_:
+ Network (host) support.
+* `pc-bios <https://gitlab.com/qemu-project/qemu/-/tree/master/pc-bios>`_:
+ Contains pre-built firmware binaries and boot images, ready to use in
+ QEMU without compilation.
+* `plugins <https://gitlab.com/qemu-project/qemu/-/tree/master/plugins>`_:
+ :ref:`TCG plugins <tcg-plugins>` core implementation. Plugins can be found in
+ `tests <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/tcg/plugins>`__
+ and `contrib <https://gitlab.com/qemu-project/qemu/-/tree/master/contrib/plugins>`__
+ folders.
+* `po <https://gitlab.com/qemu-project/qemu/-/tree/master/po>`_:
+ Translation files.
+* `python <https://gitlab.com/qemu-project/qemu/-/tree/master/python>`_:
+ Python part of our build/test system.
+* `qapi <https://gitlab.com/qemu-project/qemu/-/tree/master/qapi>`_:
+ `QAPI <qapi>` implementation.
+* `qobject <https://gitlab.com/qemu-project/qemu/-/tree/master/qobject>`_:
+ QEMU Object implementation.
+* `qga <https://gitlab.com/qemu-project/qemu/-/tree/master/qga>`_:
+ QEMU `Guest agent <qemu-ga>` implementation.
+* `qom <https://gitlab.com/qemu-project/qemu/-/tree/master/qom>`_:
+ QEMU :ref:`Object model <qom>` implementation, with monitor associated commands.
+* `replay <https://gitlab.com/qemu-project/qemu/-/tree/master/replay>`_:
+ QEMU :ref:`Record/replay <replay>` implementation.
+* `roms <https://gitlab.com/qemu-project/qemu/-/tree/master/roms>`_:
+ Contains source code for various firmware and ROMs, which can be compiled if
+ custom or updated versions are needed.
+* `rust <https://gitlab.com/qemu-project/qemu/-/tree/master/rust>`_:
+ Rust integration in QEMU. It contains the new interfaces defined and
+ associated devices using it.
+* `scripts <https://gitlab.com/qemu-project/qemu/-/tree/master/scripts>`_:
+ Collection of scripts used in build and test systems, and various
+ tools for QEMU codebase and execution traces.
+* `scsi <https://gitlab.com/qemu-project/qemu/-/tree/master/scsi>`_:
+ Code related to SCSI support, used by SCSI devices.
+* `semihosting <https://gitlab.com/qemu-project/qemu/-/tree/master/semihosting>`_:
+ QEMU `Semihosting <Semihosting>` implementation.
+* `stats <https://gitlab.com/qemu-project/qemu/-/tree/master/stats>`_:
+ `Monitor <QEMU monitor>` stats commands implementation.
+* `storage-daemon <https://gitlab.com/qemu-project/qemu/-/tree/master/storage-daemon>`_:
+ QEMU `Storage daemon <storage-daemon>` implementation.
+* `stubs <https://gitlab.com/qemu-project/qemu/-/tree/master/stubs>`_:
+ Various stubs (empty functions) used to compile QEMU with specific
+ configurations.
+* `subprojects <https://gitlab.com/qemu-project/qemu/-/tree/master/subprojects>`_:
+ QEMU submodules used by QEMU build system.
+* `system <https://gitlab.com/qemu-project/qemu/-/tree/master/system>`_:
+ QEMU `system mode <System emulation>` implementation (cpu, mmu, boot support).
+* `target <https://gitlab.com/qemu-project/qemu/-/tree/master/target>`_:
+ Contains code for all target architectures supported (one subfolder
+ per arch). For every architecture, you can find accelerator specific
+ implementations.
+* `tcg <https://gitlab.com/qemu-project/qemu/-/tree/master/tcg>`_:
+ :ref:`TCG <tcg>` related code.
+ Contains one subfolder per host supported architecture.
+* `tests <https://gitlab.com/qemu-project/qemu/-/tree/master/tests>`_:
+ QEMU `test <testing>` suite
+
+ - `data <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/data>`_:
+ Data for various tests.
+ - `decode <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/decode>`_:
+ Testsuite for :ref:`decodetree <decodetree>` implementation.
+ - `docker <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/docker>`_:
+ Code and scripts to create `containers <container-ref>` used in `CI <ci>`.
+ - `fp <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/fp>`_:
+ QEMU testsuite for soft float implementation.
+ - `functional <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/functional>`_:
+ `Functional tests <checkfunctional-ref>` (full VM boot).
+ - `lcitool <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/lcitool>`_:
+ Generate dockerfiles for CI containers.
+ - `migration <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/migration>`_:
+ Test scripts and data for :ref:`Migration framework <migration>`.
+ - `multiboot <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/multiboot>`_:
+ Test multiboot functionality for x86_64/i386.
+ - `qapi-schema <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/qapi-schema>`_:
+ Test scripts and data for `QAPI <qapi-tests>`.
+ - `qemu-iotests <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/qemu-iotests>`_:
+ `Disk image and block tests <qemu-iotests>`.
+ - `qtest <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/qtest>`_:
+ `Device emulation testing <qtest>`.
+ - `tcg <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/tcg>`__:
+ `TCG related tests <checktcg-ref>`. Contains code per architecture
+ (subfolder) and multiarch tests as well.
+ - `tsan <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/tsan>`_:
+ `Suppressions <tsan-suppressions>` for thread sanitizer.
+ - `uefi-test-tools <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/uefi-test-tools>`_:
+ Test tool for UEFI support.
+ - `unit <https://gitlab.com/qemu-project/qemu/-/tree/master/tests/unit>`_:
+ QEMU `Unit tests <unit-tests>`.
+* `trace <https://gitlab.com/qemu-project/qemu/-/tree/master/trace>`_:
+ :ref:`Tracing framework <tracing>`. Used to print information associated to various
+ events during execution.
+* `ui <https://gitlab.com/qemu-project/qemu/-/tree/master/ui>`_:
+ QEMU User interfaces.
+* `util <https://gitlab.com/qemu-project/qemu/-/tree/master/util>`_:
+ Utility code used by other parts of QEMU.
diff --git a/docs/devel/control-flow-integrity.rst b/docs/devel/control-flow-integrity.rst
index e6b73a4..3d5702f 100644
--- a/docs/devel/control-flow-integrity.rst
+++ b/docs/devel/control-flow-integrity.rst
@@ -1,3 +1,5 @@
+.. _cfi:
+
============================
Control-Flow Integrity (CFI)
============================
diff --git a/docs/devel/crypto.rst b/docs/devel/crypto.rst
new file mode 100644
index 0000000..39b1c91
--- /dev/null
+++ b/docs/devel/crypto.rst
@@ -0,0 +1,10 @@
+.. _crypto-ref:
+
+====================
+Cryptography in QEMU
+====================
+
+.. toctree::
+ :maxdepth: 2
+
+ luks-detached-header
diff --git a/docs/devel/decodetree.rst b/docs/devel/decodetree.rst
index e3392aa..98ad33a 100644
--- a/docs/devel/decodetree.rst
+++ b/docs/devel/decodetree.rst
@@ -1,3 +1,5 @@
+.. _decodetree:
+
========================
Decodetree Specification
========================
diff --git a/docs/devel/ebpf_rss.rst b/docs/devel/ebpf_rss.rst
index 4a68682..ed5d337 100644
--- a/docs/devel/ebpf_rss.rst
+++ b/docs/devel/ebpf_rss.rst
@@ -1,3 +1,5 @@
+.. _ebpf-rss:
+
===========================
eBPF RSS virtio-net support
===========================
diff --git a/docs/devel/fuzzing.rst b/docs/devel/fuzzing.rst
deleted file mode 100644
index 3bfcb33..0000000
--- a/docs/devel/fuzzing.rst
+++ /dev/null
@@ -1,304 +0,0 @@
-========
-Fuzzing
-========
-
-This document describes the virtual-device fuzzing infrastructure in QEMU and
-how to use it to implement additional fuzzers.
-
-Basics
-------
-
-Fuzzing operates by passing inputs to an entry point/target function. The
-fuzzer tracks the code coverage triggered by the input. Based on these
-findings, the fuzzer mutates the input and repeats the fuzzing.
-
-To fuzz QEMU, we rely on libfuzzer. Unlike other fuzzers such as AFL, libfuzzer
-is an *in-process* fuzzer. For the developer, this means that it is their
-responsibility to ensure that state is reset between fuzzing-runs.
-
-Building the fuzzers
---------------------
-
-To build the fuzzers, install a recent version of clang:
-Configure with (substitute the clang binaries with the version you installed).
-Here, enable-sanitizers, is optional but it allows us to reliably detect bugs
-such as out-of-bounds accesses, use-after-frees, double-frees etc.::
-
- CC=clang-8 CXX=clang++-8 /path/to/configure --enable-fuzzing \
- --enable-sanitizers
-
-Fuzz targets are built similarly to system targets::
-
- make qemu-fuzz-i386
-
-This builds ``./qemu-fuzz-i386``
-
-The first option to this command is: ``--fuzz-target=FUZZ_NAME``
-To list all of the available fuzzers run ``qemu-fuzz-i386`` with no arguments.
-
-For example::
-
- ./qemu-fuzz-i386 --fuzz-target=virtio-scsi-fuzz
-
-Internally, libfuzzer parses all arguments that do not begin with ``"--"``.
-Information about these is available by passing ``-help=1``
-
-Now the only thing left to do is wait for the fuzzer to trigger potential
-crashes.
-
-Useful libFuzzer flags
-----------------------
-
-As mentioned above, libFuzzer accepts some arguments. Passing ``-help=1`` will
-list the available arguments. In particular, these arguments might be helpful:
-
-* ``CORPUS_DIR/`` : Specify a directory as the last argument to libFuzzer.
- libFuzzer stores each "interesting" input in this corpus directory. The next
- time you run libFuzzer, it will read all of the inputs from the corpus, and
- continue fuzzing from there. You can also specify multiple directories.
- libFuzzer loads existing inputs from all specified directories, but will only
- write new ones to the first one specified.
-
-* ``-max_len=4096`` : specify the maximum byte-length of the inputs libFuzzer
- will generate.
-
-* ``-close_fd_mask={1,2,3}`` : close, stderr, or both. Useful for targets that
- trigger many debug/error messages, or create output on the serial console.
-
-* ``-jobs=4 -workers=4`` : These arguments configure libFuzzer to run 4 fuzzers in
- parallel (4 fuzzing jobs in 4 worker processes). Alternatively, with only
- ``-jobs=N``, libFuzzer automatically spawns a number of workers less than or equal
- to half the available CPU cores. Replace 4 with a number appropriate for your
- machine. Make sure to specify a ``CORPUS_DIR``, which will allow the parallel
- fuzzers to share information about the interesting inputs they find.
-
-* ``-use_value_profile=1`` : For each comparison operation, libFuzzer computes
- ``(caller_pc&4095) | (popcnt(Arg1 ^ Arg2) << 12)`` and places this in the
- coverage table. Useful for targets with "magic" constants. If Arg1 came from
- the fuzzer's input and Arg2 is a magic constant, then each time the Hamming
- distance between Arg1 and Arg2 decreases, libFuzzer adds the input to the
- corpus.
-
-* ``-shrink=1`` : Tries to make elements of the corpus "smaller". Might lead to
- better coverage performance, depending on the target.
-
-Note that libFuzzer's exact behavior will depend on the version of
-clang and libFuzzer used to build the device fuzzers.
-
-Generating Coverage Reports
----------------------------
-
-Code coverage is a crucial metric for evaluating a fuzzer's performance.
-libFuzzer's output provides a "cov: " column that provides a total number of
-unique blocks/edges covered. To examine coverage on a line-by-line basis we
-can use Clang coverage:
-
- 1. Configure libFuzzer to store a corpus of all interesting inputs (see
- CORPUS_DIR above)
- 2. ``./configure`` the QEMU build with ::
-
- --enable-fuzzing \
- --extra-cflags="-fprofile-instr-generate -fcoverage-mapping"
-
- 3. Re-run the fuzzer. Specify $CORPUS_DIR/* as an argument, telling libfuzzer
- to execute all of the inputs in $CORPUS_DIR and exit. Once the process
- exits, you should find a file, "default.profraw" in the working directory.
- 4. Execute these commands to generate a detailed HTML coverage-report::
-
- llvm-profdata merge -output=default.profdata default.profraw
- llvm-cov show ./path/to/qemu-fuzz-i386 -instr-profile=default.profdata \
- --format html -output-dir=/path/to/output/report
-
-Adding a new fuzzer
--------------------
-
-Coverage over virtual devices can be improved by adding additional fuzzers.
-Fuzzers are kept in ``tests/qtest/fuzz/`` and should be added to
-``tests/qtest/fuzz/meson.build``
-
-Fuzzers can rely on both qtest and libqos to communicate with virtual devices.
-
-1. Create a new source file. For example ``tests/qtest/fuzz/foo-device-fuzz.c``.
-
-2. Write the fuzzing code using the libqtest/libqos API. See existing fuzzers
- for reference.
-
-3. Add the fuzzer to ``tests/qtest/fuzz/meson.build``.
-
-Fuzzers can be more-or-less thought of as special qtest programs which can
-modify the qtest commands and/or qtest command arguments based on inputs
-provided by libfuzzer. Libfuzzer passes a byte array and length. Commonly the
-fuzzer loops over the byte-array interpreting it as a list of qtest commands,
-addresses, or values.
-
-The Generic Fuzzer
-------------------
-
-Writing a fuzz target can be a lot of effort (especially if a device driver has
-not be built-out within libqos). Many devices can be fuzzed to some degree,
-without any device-specific code, using the generic-fuzz target.
-
-The generic-fuzz target is capable of fuzzing devices over their PIO, MMIO,
-and DMA input-spaces. To apply the generic-fuzz to a device, we need to define
-two env-variables, at minimum:
-
-* ``QEMU_FUZZ_ARGS=`` is the set of QEMU arguments used to configure a machine, with
- the device attached. For example, if we want to fuzz the virtio-net device
- attached to a pc-i440fx machine, we can specify::
-
- QEMU_FUZZ_ARGS="-M pc -nodefaults -netdev user,id=user0 \
- -device virtio-net,netdev=user0"
-
-* ``QEMU_FUZZ_OBJECTS=`` is a set of space-delimited strings used to identify
- the MemoryRegions that will be fuzzed. These strings are compared against
- MemoryRegion names and MemoryRegion owner names, to decide whether each
- MemoryRegion should be fuzzed. These strings support globbing. For the
- virtio-net example, we could use one of ::
-
- QEMU_FUZZ_OBJECTS='virtio-net'
- QEMU_FUZZ_OBJECTS='virtio*'
- QEMU_FUZZ_OBJECTS='virtio* pcspk' # Fuzz the virtio devices and the speaker
- QEMU_FUZZ_OBJECTS='*' # Fuzz the whole machine``
-
-The ``"info mtree"`` and ``"info qom-tree"`` monitor commands can be especially
-useful for identifying the ``MemoryRegion`` and ``Object`` names used for
-matching.
-
-As a generic rule-of-thumb, the more ``MemoryRegions``/Devices we match, the
-greater the input-space, and the smaller the probability of finding crashing
-inputs for individual devices. As such, it is usually a good idea to limit the
-fuzzer to only a few ``MemoryRegions``.
-
-To ensure that these env variables have been configured correctly, we can use::
-
- ./qemu-fuzz-i386 --fuzz-target=generic-fuzz -runs=0
-
-The output should contain a complete list of matched MemoryRegions.
-
-OSS-Fuzz
---------
-QEMU is continuously fuzzed on `OSS-Fuzz
-<https://github.com/google/oss-fuzz>`_. By default, the OSS-Fuzz build
-will try to fuzz every fuzz-target. Since the generic-fuzz target
-requires additional information provided in environment variables, we
-pre-define some generic-fuzz configs in
-``tests/qtest/fuzz/generic_fuzz_configs.h``. Each config must specify:
-
-- ``.name``: To identify the fuzzer config
-
-- ``.args`` OR ``.argfunc``: A string or pointer to a function returning a
- string. These strings are used to specify the ``QEMU_FUZZ_ARGS``
- environment variable. ``argfunc`` is useful when the config relies on e.g.
- a dynamically created temp directory, or a free tcp/udp port.
-
-- ``.objects``: A string that specifies the ``QEMU_FUZZ_OBJECTS`` environment
- variable.
-
-To fuzz additional devices/device configuration on OSS-Fuzz, send patches for
-either a new device-specific fuzzer or a new generic-fuzz config.
-
-Build details:
-
-- The Dockerfile that sets up the environment for building QEMU's
- fuzzers on OSS-Fuzz can be fund in the OSS-Fuzz repository
- __(https://github.com/google/oss-fuzz/blob/master/projects/qemu/Dockerfile)
-
-- The script responsible for building the fuzzers can be found in the
- QEMU source tree at ``scripts/oss-fuzz/build.sh``
-
-Building Crash Reproducers
------------------------------------------
-When we find a crash, we should try to create an independent reproducer, that
-can be used on a non-fuzzer build of QEMU. This filters out any potential
-false-positives, and improves the debugging experience for developers.
-Here are the steps for building a reproducer for a crash found by the
-generic-fuzz target.
-
-- Ensure the crash reproduces::
-
- qemu-fuzz-i386 --fuzz-target... ./crash-...
-
-- Gather the QTest output for the crash::
-
- QEMU_FUZZ_TIMEOUT=0 QTEST_LOG=1 FUZZ_SERIALIZE_QTEST=1 \
- qemu-fuzz-i386 --fuzz-target... ./crash-... &> /tmp/trace
-
-- Reorder and clean-up the resulting trace::
-
- scripts/oss-fuzz/reorder_fuzzer_qtest_trace.py /tmp/trace > /tmp/reproducer
-
-- Get the arguments needed to start qemu, and provide a path to qemu::
-
- less /tmp/trace # The args should be logged at the top of this file
- export QEMU_ARGS="-machine ..."
- export QEMU_PATH="path/to/qemu-system"
-
-- Ensure the crash reproduces in qemu-system::
-
- $QEMU_PATH $QEMU_ARGS -qtest stdio < /tmp/reproducer
-
-- From the crash output, obtain some string that identifies the crash. This
- can be a line in the stack-trace, for example::
-
- export CRASH_TOKEN="hw/usb/hcd-xhci.c:1865"
-
-- Minimize the reproducer::
-
- scripts/oss-fuzz/minimize_qtest_trace.py -M1 -M2 \
- /tmp/reproducer /tmp/reproducer-minimized
-
-- Confirm that the minimized reproducer still crashes::
-
- $QEMU_PATH $QEMU_ARGS -qtest stdio < /tmp/reproducer-minimized
-
-- Create a one-liner reproducer that can be sent over email::
-
- ./scripts/oss-fuzz/output_reproducer.py -bash /tmp/reproducer-minimized
-
-- Output the C source code for a test case that will reproduce the bug::
-
- ./scripts/oss-fuzz/output_reproducer.py -owner "John Smith <john@smith.com>"\
- -name "test_function_name" /tmp/reproducer-minimized
-
-- Report the bug and send a patch with the C reproducer upstream
-
-Implementation Details / Fuzzer Lifecycle
------------------------------------------
-
-The fuzzer has two entrypoints that libfuzzer calls. libfuzzer provides it's
-own ``main()``, which performs some setup, and calls the entrypoints:
-
-``LLVMFuzzerInitialize``: called prior to fuzzing. Used to initialize all of the
-necessary state
-
-``LLVMFuzzerTestOneInput``: called for each fuzzing run. Processes the input and
-resets the state at the end of each run.
-
-In more detail:
-
-``LLVMFuzzerInitialize`` parses the arguments to the fuzzer (must start with two
-dashes, so they are ignored by libfuzzer ``main()``). Currently, the arguments
-select the fuzz target. Then, the qtest client is initialized. If the target
-requires qos, qgraph is set up and the QOM/LIBQOS modules are initialized.
-Then the QGraph is walked and the QEMU cmd_line is determined and saved.
-
-After this, the ``vl.c:main`` is called to set up the guest. There are
-target-specific hooks that can be called before and after main, for
-additional setup(e.g. PCI setup, or VM snapshotting).
-
-``LLVMFuzzerTestOneInput``: Uses qtest/qos functions to act based on the fuzz
-input. It is also responsible for manually calling ``main_loop_wait`` to ensure
-that bottom halves are executed and any cleanup required before the next input.
-
-Since the same process is reused for many fuzzing runs, QEMU state needs to
-be reset at the end of each run. For example, this can be done by rebooting the
-VM, after each run.
-
- - *Pros*: Straightforward and fast for simple fuzz targets.
-
- - *Cons*: Depending on the device, does not reset all device state. If the
- device requires some initialization prior to being ready for fuzzing (common
- for QOS-based targets), this initialization needs to be done after each
- reboot.
-
- - *Example target*: ``i440fx-qtest-reboot-fuzz``
diff --git a/docs/devel/index-api.rst b/docs/devel/index-api.rst
index fe01b2b..1c487c1 100644
--- a/docs/devel/index-api.rst
+++ b/docs/devel/index-api.rst
@@ -9,6 +9,7 @@ generated from in-code annotations to function prototypes.
bitops
loads-stores
+ lockcnt
memory
modules
pci
diff --git a/docs/devel/index-build.rst b/docs/devel/index-build.rst
index 90b406c..3f3cb21 100644
--- a/docs/devel/index-build.rst
+++ b/docs/devel/index-build.rst
@@ -1,20 +1,16 @@
-QEMU Build and Test System
---------------------------
+QEMU Build System
+-----------------
-Details about how QEMU's build system works and how it is integrated
-into our testing infrastructure. You will need to understand some of
-the basics if you are adding new files and targets to the build.
+Details about how QEMU's build system works. You will need to understand
+some of the basics if you are adding new files and targets to the build.
.. toctree::
:maxdepth: 3
build-system
+ build-environment
kconfig
docs
- testing
- acpi-bits
- qtest
- ci
qapi-code-gen
- fuzzing
+ qapi-domain
control-flow-integrity
diff --git a/docs/devel/index-internals.rst b/docs/devel/index-internals.rst
index 5636e9c..7a0678c 100644
--- a/docs/devel/index-internals.rst
+++ b/docs/devel/index-internals.rst
@@ -1,3 +1,5 @@
+.. _internal-subsystem:
+
Internal Subsystem Information
------------------------------
@@ -8,6 +10,7 @@ Details about QEMU's various subsystems including how to add features to them.
qom
atomics
+ rcu
block-coroutine-wrapper
clocks
ebpf_rss
@@ -17,6 +20,9 @@ Details about QEMU's various subsystems including how to add features to them.
s390-cpu-topology
s390-dasd-ipl
tracing
+ uefi-vars
vfio-iommufd
writing-monitor-commands
virtio-backends
+ crypto
+ multiple-iothreads
diff --git a/docs/devel/index-process.rst b/docs/devel/index-process.rst
index 362f97e..5807752 100644
--- a/docs/devel/index-process.rst
+++ b/docs/devel/index-process.rst
@@ -13,7 +13,9 @@ Notes about how to interact with the community and how and where to submit patch
maintainers
style
submitting-a-patch
+ code-provenance
trivial-patches
stable-process
submitting-a-pull-request
secure-coding-practices
+ rust
diff --git a/docs/devel/index.rst b/docs/devel/index.rst
index abf6045..29f032d 100644
--- a/docs/devel/index.rst
+++ b/docs/devel/index.rst
@@ -31,6 +31,8 @@ the :ref:`tcg_internals`.
index-process
index-build
+ testing/index
index-api
index-internals
index-tcg
+ codebase
diff --git a/docs/devel/kconfig.rst b/docs/devel/kconfig.rst
index 52d4b90..493b76c 100644
--- a/docs/devel/kconfig.rst
+++ b/docs/devel/kconfig.rst
@@ -38,7 +38,7 @@ originated in the Linux kernel, though it was heavily simplified and
the handling of dependencies is stricter in QEMU.
Unlike Linux, there is no user interface to edit the configuration, which
-is instead specified in per-target files under the ``default-configs/``
+is instead specified in per-target files under the ``configs/``
directory of the QEMU source tree. This is because, unlike Linux,
configuration and dependencies can be treated as a black box when building
QEMU; the default configuration that QEMU ships with should be okay in
@@ -103,7 +103,7 @@ directives can be included:
**default value**: ``default <value> [if <expr>]``
Default values are assigned to the config symbol if no other value was
- set by the user via ``default-configs/*.mak`` files, and only if
+ set by the user via ``configs/*.mak`` files, and only if
``select`` or ``depends on`` directives do not force the value to true
or false respectively. ``<value>`` can be ``y`` or ``n``; it cannot
be an arbitrary Boolean expression. However, a condition for applying
@@ -119,7 +119,7 @@ directives can be included:
This is similar to ``select`` as it applies a lower limit of ``y``
to another symbol. However, the lower limit is only a default
and the "implied" symbol's value may still be set to ``n`` from a
- ``default-configs/*.mak`` files. The following two examples are
+ ``configs/*.mak`` files. The following two examples are
equivalent::
config FOO
@@ -146,7 +146,7 @@ declares its dependencies in different ways:
bool
Subsystems always default to false (they have no ``default`` directive)
- and are never visible in ``default-configs/*.mak`` files. It's
+ and are never visible in ``configs/*.mak`` files. It's
up to other symbols to ``select`` whatever subsystems they require.
They sometimes have ``select`` directives to bring in other required
@@ -238,7 +238,7 @@ declares its dependencies in different ways:
include libraries (such as ``FDT``) or ``TARGET_BIG_ENDIAN``
(possibly negated).
- Boards are listed for convenience in the ``default-configs/*.mak``
+ Boards are listed for convenience in the ``configs/*.mak``
for the target they apply to.
**internal elements**
@@ -251,18 +251,18 @@ declares its dependencies in different ways:
Internal elements group code that is useful in several boards or
devices. They are usually enabled with ``select`` and in turn select
- other elements; they are never visible in ``default-configs/*.mak``
+ other elements; they are never visible in ``configs/*.mak``
files, and often not even in the Makefile.
Writing and modifying default configurations
--------------------------------------------
In addition to the Kconfig files under hw/, each target also includes
-a file called ``default-configs/TARGETNAME-softmmu.mak``. These files
+a file called ``configs/TARGETNAME-softmmu.mak``. These files
initialize some Kconfig variables to non-default values and provide the
starting point to turn on devices and subsystems.
-A file in ``default-configs/`` looks like the following example::
+A file in ``configs/`` looks like the following example::
# Default configuration for alpha-softmmu
diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst
index ec627aa..9471bac 100644
--- a/docs/devel/loads-stores.rst
+++ b/docs/devel/loads-stores.rst
@@ -95,7 +95,7 @@ guest CPU state in case of a guest CPU exception. This is passed
to ``cpu_restore_state()``. Therefore the value should either be 0,
to indicate that the guest CPU state is already synchronized, or
the result of ``GETPC()`` from the top level ``HELPER(foo)``
-function, which is a return address into the generated code [#gpc]_.
+function, which is a return address into the generated code\ [#gpc]_.
.. [#gpc] Note that ``GETPC()`` should be used with great care: calling
it in other functions that are *not* the top level
diff --git a/docs/devel/lockcnt.rst b/docs/devel/lockcnt.rst
new file mode 100644
index 0000000..8b43578
--- /dev/null
+++ b/docs/devel/lockcnt.rst
@@ -0,0 +1,278 @@
+Locked Counters (aka ``QemuLockCnt``)
+=====================================
+
+QEMU often uses reference counts to track data structures that are being
+accessed and should not be freed. For example, a loop that invoke
+callbacks like this is not safe::
+
+ QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
+ if (ioh->revents & G_IO_OUT) {
+ ioh->fd_write(ioh->opaque);
+ }
+ }
+
+``QLIST_FOREACH_SAFE`` protects against deletion of the current node (``ioh``)
+by stashing away its ``next`` pointer. However, ``ioh->fd_write`` could
+actually delete the next node from the list. The simplest way to
+avoid this is to mark the node as deleted, and remove it from the
+list in the above loop::
+
+ QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
+ if (ioh->deleted) {
+ QLIST_REMOVE(ioh, next);
+ g_free(ioh);
+ } else {
+ if (ioh->revents & G_IO_OUT) {
+ ioh->fd_write(ioh->opaque);
+ }
+ }
+ }
+
+If however this loop must also be reentrant, i.e. it is possible that
+``ioh->fd_write`` invokes the loop again, some kind of counting is needed::
+
+ walking_handlers++;
+ QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
+ if (ioh->deleted) {
+ if (walking_handlers == 1) {
+ QLIST_REMOVE(ioh, next);
+ g_free(ioh);
+ }
+ } else {
+ if (ioh->revents & G_IO_OUT) {
+ ioh->fd_write(ioh->opaque);
+ }
+ }
+ }
+ walking_handlers--;
+
+One may think of using the RCU primitives, ``rcu_read_lock()`` and
+``rcu_read_unlock()``; effectively, the RCU nesting count would take
+the place of the walking_handlers global variable. Indeed,
+reference counting and RCU have similar purposes, but their usage in
+general is complementary:
+
+- reference counting is fine-grained and limited to a single data
+ structure; RCU delays reclamation of *all* RCU-protected data
+ structures;
+
+- reference counting works even in the presence of code that keeps
+ a reference for a long time; RCU critical sections in principle
+ should be kept short;
+
+- reference counting is often applied to code that is not thread-safe
+ but is reentrant; in fact, usage of reference counting in QEMU predates
+ the introduction of threads by many years. RCU is generally used to
+ protect readers from other threads freeing memory after concurrent
+ modifications to a data structure.
+
+- reclaiming data can be done by a separate thread in the case of RCU;
+ this can improve performance, but also delay reclamation undesirably.
+ With reference counting, reclamation is deterministic.
+
+This file documents ``QemuLockCnt``, an abstraction for using reference
+counting in code that has to be both thread-safe and reentrant.
+
+
+``QemuLockCnt`` concepts
+------------------------
+
+A ``QemuLockCnt`` comprises both a counter and a mutex; it has primitives
+to increment and decrement the counter, and to take and release the
+mutex. The counter notes how many visits to the data structures are
+taking place (the visits could be from different threads, or there could
+be multiple reentrant visits from the same thread). The basic rules
+governing the counter/mutex pair then are the following:
+
+- Data protected by the QemuLockCnt must not be freed unless the
+ counter is zero and the mutex is taken.
+
+- A new visit cannot be started while the counter is zero and the
+ mutex is taken.
+
+Most of the time, the mutex protects all writes to the data structure,
+not just frees, though there could be cases where this is not necessary.
+
+Reads, instead, can be done without taking the mutex, as long as the
+readers and writers use the same macros that are used for RCU, for
+example ``qatomic_rcu_read``, ``qatomic_rcu_set``, ``QLIST_FOREACH_RCU``,
+etc. This is because the reads are done outside a lock and a set
+or ``QLIST_INSERT_HEAD``
+can happen concurrently with the read. The RCU API ensures that the
+processor and the compiler see all required memory barriers.
+
+This could be implemented simply by protecting the counter with the
+mutex, for example::
+
+ // (1)
+ qemu_mutex_lock(&walking_handlers_mutex);
+ walking_handlers++;
+ qemu_mutex_unlock(&walking_handlers_mutex);
+
+ ...
+
+ // (2)
+ qemu_mutex_lock(&walking_handlers_mutex);
+ if (--walking_handlers == 0) {
+ QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
+ if (ioh->deleted) {
+ QLIST_REMOVE(ioh, next);
+ g_free(ioh);
+ }
+ }
+ }
+ qemu_mutex_unlock(&walking_handlers_mutex);
+
+Here, no frees can happen in the code represented by the ellipsis.
+If another thread is executing critical section (2), that part of
+the code cannot be entered, because the thread will not be able
+to increment the ``walking_handlers`` variable. And of course
+during the visit any other thread will see a nonzero value for
+``walking_handlers``, as in the single-threaded code.
+
+Note that it is possible for multiple concurrent accesses to delay
+the cleanup arbitrarily; in other words, for the ``walking_handlers``
+counter to never become zero. For this reason, this technique is
+more easily applicable if concurrent access to the structure is rare.
+
+However, critical sections are easy to forget since you have to do
+them for each modification of the counter. ``QemuLockCnt`` ensures that
+all modifications of the counter take the lock appropriately, and it
+can also be more efficient in two ways:
+
+- it avoids taking the lock for many operations (for example
+ incrementing the counter while it is non-zero);
+
+- on some platforms, one can implement ``QemuLockCnt`` to hold the lock
+ and the mutex in a single word, making the fast path no more expensive
+ than simply managing a counter using atomic operations (see
+ :doc:`atomics`). This can be very helpful if concurrent access to
+ the data structure is expected to be rare.
+
+
+Using the same mutex for frees and writes can still incur some small
+inefficiencies; for example, a visit can never start if the counter is
+zero and the mutex is taken -- even if the mutex is taken by a write,
+which in principle need not block a visit of the data structure.
+However, these are usually not a problem if any of the following
+assumptions are valid:
+
+- concurrent access is possible but rare
+
+- writes are rare
+
+- writes are frequent, but this kind of write (e.g. appending to a
+ list) has a very small critical section.
+
+For example, QEMU uses ``QemuLockCnt`` to manage an ``AioContext``'s list of
+bottom halves and file descriptor handlers. Modifications to the list
+of file descriptor handlers are rare. Creation of a new bottom half is
+frequent and can happen on a fast path; however: 1) it is almost never
+concurrent with a visit to the list of bottom halves; 2) it only has
+three instructions in the critical path, two assignments and a ``smp_wmb()``.
+
+
+``QemuLockCnt`` API
+-------------------
+
+.. kernel-doc:: include/qemu/lockcnt.h
+
+
+``QemuLockCnt`` usage
+---------------------
+
+This section explains the typical usage patterns for ``QemuLockCnt`` functions.
+
+Setting a variable to a non-NULL value can be done between
+``qemu_lockcnt_lock`` and ``qemu_lockcnt_unlock``::
+
+ qemu_lockcnt_lock(&xyz_lockcnt);
+ if (!xyz) {
+ new_xyz = g_new(XYZ, 1);
+ ...
+ qatomic_rcu_set(&xyz, new_xyz);
+ }
+ qemu_lockcnt_unlock(&xyz_lockcnt);
+
+Accessing the value can be done between ``qemu_lockcnt_inc`` and
+``qemu_lockcnt_dec``::
+
+ qemu_lockcnt_inc(&xyz_lockcnt);
+ if (xyz) {
+ XYZ *p = qatomic_rcu_read(&xyz);
+ ...
+ /* Accesses can now be done through "p". */
+ }
+ qemu_lockcnt_dec(&xyz_lockcnt);
+
+Freeing the object can similarly use ``qemu_lockcnt_lock`` and
+``qemu_lockcnt_unlock``, but you also need to ensure that the count
+is zero (i.e. there is no concurrent visit). Because ``qemu_lockcnt_inc``
+takes the ``QemuLockCnt``'s lock, the count cannot become non-zero while
+the object is being freed. Freeing an object looks like this::
+
+ qemu_lockcnt_lock(&xyz_lockcnt);
+ if (!qemu_lockcnt_count(&xyz_lockcnt)) {
+ g_free(xyz);
+ xyz = NULL;
+ }
+ qemu_lockcnt_unlock(&xyz_lockcnt);
+
+If an object has to be freed right after a visit, you can combine
+the decrement, the locking and the check on count as follows::
+
+ qemu_lockcnt_inc(&xyz_lockcnt);
+ if (xyz) {
+ XYZ *p = qatomic_rcu_read(&xyz);
+ ...
+ /* Accesses can now be done through "p". */
+ }
+ if (qemu_lockcnt_dec_and_lock(&xyz_lockcnt)) {
+ g_free(xyz);
+ xyz = NULL;
+ qemu_lockcnt_unlock(&xyz_lockcnt);
+ }
+
+``QemuLockCnt`` can also be used to access a list as follows::
+
+ qemu_lockcnt_inc(&io_handlers_lockcnt);
+ QLIST_FOREACH_RCU(ioh, &io_handlers, pioh) {
+ if (ioh->revents & G_IO_OUT) {
+ ioh->fd_write(ioh->opaque);
+ }
+ }
+
+ if (qemu_lockcnt_dec_and_lock(&io_handlers_lockcnt)) {
+ QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
+ if (ioh->deleted) {
+ QLIST_REMOVE(ioh, next);
+ g_free(ioh);
+ }
+ }
+ qemu_lockcnt_unlock(&io_handlers_lockcnt);
+ }
+
+Again, the RCU primitives are used because new items can be added to the
+list during the walk. ``QLIST_FOREACH_RCU`` ensures that the processor and
+the compiler see the appropriate memory barriers.
+
+An alternative pattern uses ``qemu_lockcnt_dec_if_lock``::
+
+ qemu_lockcnt_inc(&io_handlers_lockcnt);
+ QLIST_FOREACH_SAFE_RCU(ioh, &io_handlers, next, pioh) {
+ if (ioh->deleted) {
+ if (qemu_lockcnt_dec_if_lock(&io_handlers_lockcnt)) {
+ QLIST_REMOVE(ioh, next);
+ g_free(ioh);
+ qemu_lockcnt_inc_and_unlock(&io_handlers_lockcnt);
+ }
+ } else {
+ if (ioh->revents & G_IO_OUT) {
+ ioh->fd_write(ioh->opaque);
+ }
+ }
+ }
+ qemu_lockcnt_dec(&io_handlers_lockcnt);
+
+Here you can use ``qemu_lockcnt_dec`` instead of ``qemu_lockcnt_dec_and_lock``,
+because there is no special task to do if the count goes from 1 to 0.
diff --git a/docs/devel/lockcnt.txt b/docs/devel/lockcnt.txt
deleted file mode 100644
index a3fb3bc..0000000
--- a/docs/devel/lockcnt.txt
+++ /dev/null
@@ -1,277 +0,0 @@
-DOCUMENTATION FOR LOCKED COUNTERS (aka QemuLockCnt)
-===================================================
-
-QEMU often uses reference counts to track data structures that are being
-accessed and should not be freed. For example, a loop that invoke
-callbacks like this is not safe:
-
- QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
- if (ioh->revents & G_IO_OUT) {
- ioh->fd_write(ioh->opaque);
- }
- }
-
-QLIST_FOREACH_SAFE protects against deletion of the current node (ioh)
-by stashing away its "next" pointer. However, ioh->fd_write could
-actually delete the next node from the list. The simplest way to
-avoid this is to mark the node as deleted, and remove it from the
-list in the above loop:
-
- QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
- if (ioh->deleted) {
- QLIST_REMOVE(ioh, next);
- g_free(ioh);
- } else {
- if (ioh->revents & G_IO_OUT) {
- ioh->fd_write(ioh->opaque);
- }
- }
- }
-
-If however this loop must also be reentrant, i.e. it is possible that
-ioh->fd_write invokes the loop again, some kind of counting is needed:
-
- walking_handlers++;
- QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
- if (ioh->deleted) {
- if (walking_handlers == 1) {
- QLIST_REMOVE(ioh, next);
- g_free(ioh);
- }
- } else {
- if (ioh->revents & G_IO_OUT) {
- ioh->fd_write(ioh->opaque);
- }
- }
- }
- walking_handlers--;
-
-One may think of using the RCU primitives, rcu_read_lock() and
-rcu_read_unlock(); effectively, the RCU nesting count would take
-the place of the walking_handlers global variable. Indeed,
-reference counting and RCU have similar purposes, but their usage in
-general is complementary:
-
-- reference counting is fine-grained and limited to a single data
- structure; RCU delays reclamation of *all* RCU-protected data
- structures;
-
-- reference counting works even in the presence of code that keeps
- a reference for a long time; RCU critical sections in principle
- should be kept short;
-
-- reference counting is often applied to code that is not thread-safe
- but is reentrant; in fact, usage of reference counting in QEMU predates
- the introduction of threads by many years. RCU is generally used to
- protect readers from other threads freeing memory after concurrent
- modifications to a data structure.
-
-- reclaiming data can be done by a separate thread in the case of RCU;
- this can improve performance, but also delay reclamation undesirably.
- With reference counting, reclamation is deterministic.
-
-This file documents QemuLockCnt, an abstraction for using reference
-counting in code that has to be both thread-safe and reentrant.
-
-
-QemuLockCnt concepts
---------------------
-
-A QemuLockCnt comprises both a counter and a mutex; it has primitives
-to increment and decrement the counter, and to take and release the
-mutex. The counter notes how many visits to the data structures are
-taking place (the visits could be from different threads, or there could
-be multiple reentrant visits from the same thread). The basic rules
-governing the counter/mutex pair then are the following:
-
-- Data protected by the QemuLockCnt must not be freed unless the
- counter is zero and the mutex is taken.
-
-- A new visit cannot be started while the counter is zero and the
- mutex is taken.
-
-Most of the time, the mutex protects all writes to the data structure,
-not just frees, though there could be cases where this is not necessary.
-
-Reads, instead, can be done without taking the mutex, as long as the
-readers and writers use the same macros that are used for RCU, for
-example qatomic_rcu_read, qatomic_rcu_set, QLIST_FOREACH_RCU, etc. This is
-because the reads are done outside a lock and a set or QLIST_INSERT_HEAD
-can happen concurrently with the read. The RCU API ensures that the
-processor and the compiler see all required memory barriers.
-
-This could be implemented simply by protecting the counter with the
-mutex, for example:
-
- // (1)
- qemu_mutex_lock(&walking_handlers_mutex);
- walking_handlers++;
- qemu_mutex_unlock(&walking_handlers_mutex);
-
- ...
-
- // (2)
- qemu_mutex_lock(&walking_handlers_mutex);
- if (--walking_handlers == 0) {
- QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
- if (ioh->deleted) {
- QLIST_REMOVE(ioh, next);
- g_free(ioh);
- }
- }
- }
- qemu_mutex_unlock(&walking_handlers_mutex);
-
-Here, no frees can happen in the code represented by the ellipsis.
-If another thread is executing critical section (2), that part of
-the code cannot be entered, because the thread will not be able
-to increment the walking_handlers variable. And of course
-during the visit any other thread will see a nonzero value for
-walking_handlers, as in the single-threaded code.
-
-Note that it is possible for multiple concurrent accesses to delay
-the cleanup arbitrarily; in other words, for the walking_handlers
-counter to never become zero. For this reason, this technique is
-more easily applicable if concurrent access to the structure is rare.
-
-However, critical sections are easy to forget since you have to do
-them for each modification of the counter. QemuLockCnt ensures that
-all modifications of the counter take the lock appropriately, and it
-can also be more efficient in two ways:
-
-- it avoids taking the lock for many operations (for example
- incrementing the counter while it is non-zero);
-
-- on some platforms, one can implement QemuLockCnt to hold the lock
- and the mutex in a single word, making the fast path no more expensive
- than simply managing a counter using atomic operations (see
- docs/devel/atomics.rst). This can be very helpful if concurrent access to
- the data structure is expected to be rare.
-
-
-Using the same mutex for frees and writes can still incur some small
-inefficiencies; for example, a visit can never start if the counter is
-zero and the mutex is taken---even if the mutex is taken by a write,
-which in principle need not block a visit of the data structure.
-However, these are usually not a problem if any of the following
-assumptions are valid:
-
-- concurrent access is possible but rare
-
-- writes are rare
-
-- writes are frequent, but this kind of write (e.g. appending to a
- list) has a very small critical section.
-
-For example, QEMU uses QemuLockCnt to manage an AioContext's list of
-bottom halves and file descriptor handlers. Modifications to the list
-of file descriptor handlers are rare. Creation of a new bottom half is
-frequent and can happen on a fast path; however: 1) it is almost never
-concurrent with a visit to the list of bottom halves; 2) it only has
-three instructions in the critical path, two assignments and a smp_wmb().
-
-
-QemuLockCnt API
----------------
-
-The QemuLockCnt API is described in include/qemu/thread.h.
-
-
-QemuLockCnt usage
------------------
-
-This section explains the typical usage patterns for QemuLockCnt functions.
-
-Setting a variable to a non-NULL value can be done between
-qemu_lockcnt_lock and qemu_lockcnt_unlock:
-
- qemu_lockcnt_lock(&xyz_lockcnt);
- if (!xyz) {
- new_xyz = g_new(XYZ, 1);
- ...
- qatomic_rcu_set(&xyz, new_xyz);
- }
- qemu_lockcnt_unlock(&xyz_lockcnt);
-
-Accessing the value can be done between qemu_lockcnt_inc and
-qemu_lockcnt_dec:
-
- qemu_lockcnt_inc(&xyz_lockcnt);
- if (xyz) {
- XYZ *p = qatomic_rcu_read(&xyz);
- ...
- /* Accesses can now be done through "p". */
- }
- qemu_lockcnt_dec(&xyz_lockcnt);
-
-Freeing the object can similarly use qemu_lockcnt_lock and
-qemu_lockcnt_unlock, but you also need to ensure that the count
-is zero (i.e. there is no concurrent visit). Because qemu_lockcnt_inc
-takes the QemuLockCnt's lock, the count cannot become non-zero while
-the object is being freed. Freeing an object looks like this:
-
- qemu_lockcnt_lock(&xyz_lockcnt);
- if (!qemu_lockcnt_count(&xyz_lockcnt)) {
- g_free(xyz);
- xyz = NULL;
- }
- qemu_lockcnt_unlock(&xyz_lockcnt);
-
-If an object has to be freed right after a visit, you can combine
-the decrement, the locking and the check on count as follows:
-
- qemu_lockcnt_inc(&xyz_lockcnt);
- if (xyz) {
- XYZ *p = qatomic_rcu_read(&xyz);
- ...
- /* Accesses can now be done through "p". */
- }
- if (qemu_lockcnt_dec_and_lock(&xyz_lockcnt)) {
- g_free(xyz);
- xyz = NULL;
- qemu_lockcnt_unlock(&xyz_lockcnt);
- }
-
-QemuLockCnt can also be used to access a list as follows:
-
- qemu_lockcnt_inc(&io_handlers_lockcnt);
- QLIST_FOREACH_RCU(ioh, &io_handlers, pioh) {
- if (ioh->revents & G_IO_OUT) {
- ioh->fd_write(ioh->opaque);
- }
- }
-
- if (qemu_lockcnt_dec_and_lock(&io_handlers_lockcnt)) {
- QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
- if (ioh->deleted) {
- QLIST_REMOVE(ioh, next);
- g_free(ioh);
- }
- }
- qemu_lockcnt_unlock(&io_handlers_lockcnt);
- }
-
-Again, the RCU primitives are used because new items can be added to the
-list during the walk. QLIST_FOREACH_RCU ensures that the processor and
-the compiler see the appropriate memory barriers.
-
-An alternative pattern uses qemu_lockcnt_dec_if_lock:
-
- qemu_lockcnt_inc(&io_handlers_lockcnt);
- QLIST_FOREACH_SAFE_RCU(ioh, &io_handlers, next, pioh) {
- if (ioh->deleted) {
- if (qemu_lockcnt_dec_if_lock(&io_handlers_lockcnt)) {
- QLIST_REMOVE(ioh, next);
- g_free(ioh);
- qemu_lockcnt_inc_and_unlock(&io_handlers_lockcnt);
- }
- } else {
- if (ioh->revents & G_IO_OUT) {
- ioh->fd_write(ioh->opaque);
- }
- }
- }
- qemu_lockcnt_dec(&io_handlers_lockcnt);
-
-Here you can use qemu_lockcnt_dec instead of qemu_lockcnt_dec_and_lock,
-because there is no special task to do if the count goes from 1 to 0.
diff --git a/docs/devel/luks-detached-header.rst b/docs/devel/luks-detached-header.rst
new file mode 100644
index 0000000..94ec285
--- /dev/null
+++ b/docs/devel/luks-detached-header.rst
@@ -0,0 +1,182 @@
+================================
+LUKS volume with detached header
+================================
+
+Introduction
+============
+
+This document gives an overview of the design of LUKS volume with detached
+header and how to use it.
+
+Background
+==========
+
+The LUKS format has ability to store the header in a separate volume from
+the payload. We could extend the LUKS driver in QEMU to support this use
+case.
+
+Normally a LUKS volume has a layout:
+
+::
+
+ +-----------------------------------------------+
+ | | | |
+ disk | header | key material | disk payload data |
+ | | | |
+ +-----------------------------------------------+
+
+With a detached LUKS header, you need 2 disks so getting:
+
+::
+
+ +--------------------------+
+ disk1 | header | key material |
+ +--------------------------+
+ +---------------------+
+ disk2 | disk payload data |
+ +---------------------+
+
+There are a variety of benefits to doing this:
+
+ * Secrecy - the disk2 cannot be identified as containing LUKS
+ volume since there's no header
+ * Control - if access to the disk1 is restricted, then even
+ if someone has access to disk2 they can't unlock
+ it. Might be useful if you have disks on NFS but
+ want to restrict which host can launch a VM
+ instance from it, by dynamically providing access
+ to the header to a designated host
+ * Flexibility - your application data volume may be a given
+ size and it is inconvenient to resize it to
+ add encryption.You can store the LUKS header
+ separately and use the existing storage
+ volume for payload
+ * Recovery - corruption of a bit in the header may make the
+ entire payload inaccessible. It might be
+ convenient to take backups of the header. If
+ your primary disk header becomes corrupt, you
+ can unlock the data still by pointing to the
+ backup detached header
+
+Architecture
+============
+
+Take the qcow2 encryption, for example. The architecture of the
+LUKS volume with detached header is shown in the diagram below.
+
+There are two children of the root node: a file and a header.
+Data from the disk payload is stored in the file node. The
+LUKS header and key material are located in the header node,
+as previously mentioned.
+
+::
+
+ +-----------------------------+
+ Root node | foo[luks] |
+ +-----------------------------+
+ | |
+ file | header |
+ | |
+ +---------------------+ +------------------+
+ Child node |payload-format[qcow2]| |header-format[raw]|
+ +---------------------+ +------------------+
+ | |
+ file | file |
+ | |
+ +----------------------+ +---------------------+
+ Child node |payload-protocol[file]| |header-protocol[file]|
+ +----------------------+ +---------------------+
+ | |
+ | |
+ | |
+ Host storage Host storage
+
+Usage
+=====
+
+Create a LUKS disk with a detached header using qemu-img
+--------------------------------------------------------
+
+Shell commandline::
+
+ # qemu-img create --object secret,id=sec0,data=abc123 -f luks \
+ -o cipher-alg=aes-256,cipher-mode=xts -o key-secret=sec0 \
+ -o detached-header=true test-header.img
+ # qemu-img create -f qcow2 test-payload.qcow2 200G
+ # qemu-img info 'json:{"driver":"luks","file":{"filename": \
+ "test-payload.img"},"header":{"filename":"test-header.img"}}'
+
+Set up a VM's LUKS volume with a detached header
+------------------------------------------------
+
+Qemu commandline::
+
+ # qemu-system-x86_64 ... \
+ -object '{"qom-type":"secret","id":"libvirt-3-format-secret", \
+ "data":"abc123"}' \
+ -blockdev '{"driver":"file","filename":"/path/to/test-header.img", \
+ "node-name":"libvirt-1-storage"}' \
+ -blockdev '{"node-name":"libvirt-1-format","read-only":false, \
+ "driver":"raw","file":"libvirt-1-storage"}' \
+ -blockdev '{"driver":"file","filename":"/path/to/test-payload.qcow2", \
+ "node-name":"libvirt-2-storage"}' \
+ -blockdev '{"node-name":"libvirt-2-format","read-only":false, \
+ "driver":"qcow2","file":"libvirt-2-storage"}' \
+ -blockdev '{"node-name":"libvirt-3-format","driver":"luks", \
+ "file":"libvirt-2-format","header":"libvirt-1-format","key-secret": \
+ "libvirt-3-format-secret"}' \
+ -device '{"driver":"virtio-blk-pci","bus":XXX,"addr":YYY,"drive": \
+ "libvirt-3-format","id":"virtio-disk1"}'
+
+Add LUKS volume to a VM with a detached header
+----------------------------------------------
+
+1. object-add the secret for decrypting the cipher stored in
+ LUKS header above::
+
+ # virsh qemu-monitor-command vm '{"execute":"object-add", \
+ "arguments":{"qom-type":"secret", "id": \
+ "libvirt-4-format-secret", "data":"abc123"}}'
+
+2. block-add the protocol node for LUKS header::
+
+ # virsh qemu-monitor-command vm '{"execute":"blockdev-add", \
+ "arguments":{"node-name":"libvirt-1-storage", "driver":"file", \
+ "filename": "/path/to/test-header.img" }}'
+
+3. block-add the raw-drived node for LUKS header::
+
+ # virsh qemu-monitor-command vm '{"execute":"blockdev-add", \
+ "arguments":{"node-name":"libvirt-1-format", "driver":"raw", \
+ "file":"libvirt-1-storage"}}'
+
+4. block-add the protocol node for disk payload image::
+
+ # virsh qemu-monitor-command vm '{"execute":"blockdev-add", \
+ "arguments":{"node-name":"libvirt-2-storage", "driver":"file", \
+ "filename":"/path/to/test-payload.qcow2"}}'
+
+5. block-add the qcow2-drived format node for disk payload data::
+
+ # virsh qemu-monitor-command vm '{"execute":"blockdev-add", \
+ "arguments":{"node-name":"libvirt-2-format", "driver":"qcow2", \
+ "file":"libvirt-2-storage"}}'
+
+6. block-add the luks-drived format node to link the qcow2 disk
+ with the LUKS header by specifying the field "header"::
+
+ # virsh qemu-monitor-command vm '{"execute":"blockdev-add", \
+ "arguments":{"node-name":"libvirt-3-format", "driver":"luks", \
+ "file":"libvirt-2-format", "header":"libvirt-1-format", \
+ "key-secret":"libvirt-2-format-secret"}}'
+
+7. hot-plug the virtio-blk device finally::
+
+ # virsh qemu-monitor-command vm '{"execute":"device_add", \
+ "arguments": {"driver":"virtio-blk-pci", \
+ "drive": "libvirt-3-format", "id":"virtio-disk2"}}
+
+TODO
+====
+
+1. Support the shared detached LUKS header within the VM.
diff --git a/docs/devel/maintainers.rst b/docs/devel/maintainers.rst
index 5c907d9..88a613e 100644
--- a/docs/devel/maintainers.rst
+++ b/docs/devel/maintainers.rst
@@ -99,9 +99,9 @@ members of the QEMU community, you should make arrangements to attend
a `KeySigningParty <https://wiki.qemu.org/KeySigningParty>`__ (for
example at KVM Forum) or make alternative arrangements to have your
key signed by an attendee. Key signing requires meeting another
-community member **in person** [#]_ so please make appropriate
+community member **in person**\ [#2020]_ so please make appropriate
arrangements.
-.. [#] In recent pandemic times we have had to exercise some
+.. [#2020] In recent pandemic times we have had to exercise some
flexibility here. Maintainers still need to sign their pull
requests though.
diff --git a/docs/devel/memory.rst b/docs/devel/memory.rst
index 69c5e3f..57fb2ae 100644
--- a/docs/devel/memory.rst
+++ b/docs/devel/memory.rst
@@ -369,4 +369,4 @@ callbacks are called:
API Reference
-------------
-.. kernel-doc:: include/exec/memory.h
+.. kernel-doc:: include/system/memory.h
diff --git a/docs/devel/migration/CPR.rst b/docs/devel/migration/CPR.rst
index 63c3647..7897873 100644
--- a/docs/devel/migration/CPR.rst
+++ b/docs/devel/migration/CPR.rst
@@ -5,7 +5,7 @@ CPR is the umbrella name for a set of migration modes in which the
VM is migrated to a new QEMU instance on the same host. It is
intended for use when the goal is to update host software components
that run the VM, such as QEMU or even the host kernel. At this time,
-cpr-reboot is the only available mode.
+the cpr-reboot and cpr-transfer modes are available.
Because QEMU is restarted on the same host, with access to the same
local devices, CPR is allowed in certain cases where normal migration
@@ -53,7 +53,7 @@ RAM is copied to the migration URI.
Outgoing:
* Set the migration mode parameter to ``cpr-reboot``.
* Set the ``x-ignore-shared`` capability if desired.
- * Issue the ``migrate`` command. It is recommended the the URI be a
+ * Issue the ``migrate`` command. It is recommended the URI be a
``file`` type, but one can use other types such as ``exec``,
provided the command captures all the data from the outgoing side,
and provides all the data to the incoming side.
@@ -145,3 +145,183 @@ Caveats
cpr-reboot mode may not be used with postcopy, background-snapshot,
or COLO.
+
+cpr-transfer mode
+-----------------
+
+This mode allows the user to transfer a guest to a new QEMU instance
+on the same host with minimal guest pause time, by preserving guest
+RAM in place, albeit with new virtual addresses in new QEMU. Devices
+and their pinned memory pages will also be preserved in a future QEMU
+release.
+
+The user starts new QEMU on the same host as old QEMU, with command-
+line arguments to create the same machine, plus the ``-incoming``
+option for the main migration channel, like normal live migration.
+In addition, the user adds a second -incoming option with channel
+type ``cpr``. This CPR channel must support file descriptor transfer
+with SCM_RIGHTS, i.e. it must be a UNIX domain socket.
+
+To initiate CPR, the user issues a migrate command to old QEMU,
+adding a second migration channel of type ``cpr`` in the channels
+argument. Old QEMU stops the VM, saves state to the migration
+channels, and enters the postmigrate state. Execution resumes in
+new QEMU.
+
+New QEMU reads the CPR channel before opening a monitor, hence
+the CPR channel cannot be specified in the list of channels for a
+migrate-incoming command. It may only be specified on the command
+line.
+
+Usage
+^^^^^
+
+Memory backend objects must have the ``share=on`` attribute.
+
+The VM must be started with the ``-machine aux-ram-share=on``
+option. This causes implicit RAM blocks (those not described by
+a memory-backend object) to be allocated by mmap'ing a memfd.
+Examples include VGA and ROM.
+
+Outgoing:
+ * Set the migration mode parameter to ``cpr-transfer``.
+ * Issue the ``migrate`` command, containing a main channel and
+ a cpr channel.
+
+Incoming:
+ * Start new QEMU with two ``-incoming`` options.
+ * If the VM was running when the outgoing ``migrate`` command was
+ issued, then QEMU automatically resumes VM execution.
+
+Caveats
+^^^^^^^
+
+cpr-transfer mode may not be used with postcopy, background-snapshot,
+or COLO.
+
+memory-backend-epc is not supported.
+
+The main incoming migration channel address cannot be a file type.
+
+If the main incoming channel address is an inet socket, then the port
+cannot be 0 (meaning dynamically choose a port).
+
+When using ``-incoming defer``, you must issue the migrate command to
+old QEMU before issuing any monitor commands to new QEMU, because new
+QEMU blocks waiting to read from the cpr channel before starting its
+monitor, and old QEMU does not write to the channel until the migrate
+command is issued. However, new QEMU does not open and read the
+main migration channel until you issue the migrate incoming command.
+
+Example 1: incoming channel
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In these examples, we simply restart the same version of QEMU, but
+in a real scenario one would start new QEMU on the incoming side.
+Note that new QEMU does not print the monitor prompt until old QEMU
+has issued the migrate command. The outgoing side uses QMP because
+HMP cannot specify a CPR channel. Some QMP responses are omitted for
+brevity.
+
+::
+
+ Outgoing: Incoming:
+
+ # qemu-kvm -qmp stdio
+ -object memory-backend-file,id=ram0,size=4G,
+ mem-path=/dev/shm/ram0,share=on -m 4G
+ -machine memory-backend=ram0
+ -machine aux-ram-share=on
+ ...
+ # qemu-kvm -monitor stdio
+ -incoming tcp:0:44444
+ -incoming '{"channel-type": "cpr",
+ "addr": { "transport": "socket",
+ "type": "unix", "path": "cpr.sock"}}'
+ ...
+ {"execute":"qmp_capabilities"}
+
+ {"execute": "query-status"}
+ {"return": {"status": "running",
+ "running": true}}
+
+ {"execute":"migrate-set-parameters",
+ "arguments":{"mode":"cpr-transfer"}}
+
+ {"execute": "migrate", "arguments": { "channels": [
+ {"channel-type": "main",
+ "addr": { "transport": "socket", "type": "inet",
+ "host": "0", "port": "44444" }},
+ {"channel-type": "cpr",
+ "addr": { "transport": "socket", "type": "unix",
+ "path": "cpr.sock" }}]}}
+
+ QEMU 10.0.50 monitor
+ (qemu) info status
+ VM status: running
+
+ {"execute": "query-status"}
+ {"return": {"status": "postmigrate",
+ "running": false}}
+
+Example 2: incoming defer
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This example uses ``-incoming defer`` to hot plug a device before
+accepting the main migration channel. Again note you must issue the
+migrate command to old QEMU before you can issue any monitor
+commands to new QEMU.
+
+
+::
+
+ Outgoing: Incoming:
+
+ # qemu-kvm -monitor stdio
+ -object memory-backend-file,id=ram0,size=4G,
+ mem-path=/dev/shm/ram0,share=on -m 4G
+ -machine memory-backend=ram0
+ -machine aux-ram-share=on
+ ...
+ # qemu-kvm -monitor stdio
+ -incoming defer
+ -incoming '{"channel-type": "cpr",
+ "addr": { "transport": "socket",
+ "type": "unix", "path": "cpr.sock"}}'
+ ...
+ {"execute":"qmp_capabilities"}
+
+ {"execute": "device_add",
+ "arguments": {"driver": "pcie-root-port"}}
+
+ {"execute":"migrate-set-parameters",
+ "arguments":{"mode":"cpr-transfer"}}
+
+ {"execute": "migrate", "arguments": { "channels": [
+ {"channel-type": "main",
+ "addr": { "transport": "socket", "type": "inet",
+ "host": "0", "port": "44444" }},
+ {"channel-type": "cpr",
+ "addr": { "transport": "socket", "type": "unix",
+ "path": "cpr.sock" }}]}}
+
+ QEMU 10.0.50 monitor
+ (qemu) info status
+ VM status: paused (inmigrate)
+ (qemu) device_add pcie-root-port
+ (qemu) migrate_incoming tcp:0:44444
+ (qemu) info status
+ VM status: running
+
+ {"execute": "query-status"}
+ {"return": {"status": "postmigrate",
+ "running": false}}
+
+Futures
+^^^^^^^
+
+cpr-transfer mode is based on a capability to transfer open file
+descriptors from old to new QEMU. In the future, descriptors for
+vfio, iommufd, vhost, and char devices could be transferred,
+preserving those devices and their kernel state without interruption,
+even if they do not explicitly support live migration.
diff --git a/docs/devel/migration/compatibility.rst b/docs/devel/migration/compatibility.rst
index 5a5417e..ecb887e 100644
--- a/docs/devel/migration/compatibility.rst
+++ b/docs/devel/migration/compatibility.rst
@@ -198,7 +198,7 @@ was done::
The relevant parts for migration are::
- @@ -1281,7 +1284,8 @@ static Property virtio_blk_properties[] = {
+ @@ -1281,7 +1284,8 @@ static const Property virtio_blk_properties[] = {
#endif
DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
true),
@@ -395,13 +395,12 @@ the old behaviour or the new behaviour::
index 8a87ccc8b0..5153ad63d6 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
- @@ -79,6 +79,8 @@ static Property pci_props[] = {
+ @@ -79,6 +79,8 @@ static const Property pci_props[] = {
DEFINE_PROP_STRING("failover_pair_id", PCIDevice,
failover_pair_id),
DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0),
+ DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present,
+ QEMU_PCIE_ERR_UNC_MASK_BITNR, true),
- DEFINE_PROP_END_OF_LIST()
};
Notice that we enable the feature for new machine types.
diff --git a/docs/devel/migration/features.rst b/docs/devel/migration/features.rst
index 58f8fd9..8f431d5 100644
--- a/docs/devel/migration/features.rst
+++ b/docs/devel/migration/features.rst
@@ -14,3 +14,4 @@ Migration has plenty of features to support different use cases.
CPR
qpl-compression
uadk-compression
+ qatzip-compression
diff --git a/docs/devel/migration/main.rst b/docs/devel/migration/main.rst
index 784c899..cdd4f4a 100644
--- a/docs/devel/migration/main.rst
+++ b/docs/devel/migration/main.rst
@@ -1,3 +1,5 @@
+.. _migration:
+
===================
Migration framework
===================
@@ -465,6 +467,12 @@ Examples of such API functions are:
- portio_list_set_address()
- portio_list_set_enabled()
+Since the order of device save/restore is not defined, you must
+avoid accessing or changing any other device's state in one of these
+callbacks. (For instance, don't do anything that calls ``update_irq()``
+in a ``post_load`` hook.) Otherwise, restore will not be deterministic,
+and this will break execution record/replay.
+
Iterative device migration
--------------------------
diff --git a/docs/devel/migration/mapped-ram.rst b/docs/devel/migration/mapped-ram.rst
index d352b54..b08c2b4 100644
--- a/docs/devel/migration/mapped-ram.rst
+++ b/docs/devel/migration/mapped-ram.rst
@@ -44,7 +44,7 @@ Use-cases
The mapped-ram feature was designed for use cases where the migration
stream will be directed to a file in the filesystem and not
-immediately restored on the destination VM [#]_. These could be
+immediately restored on the destination VM\ [#alternatives]_. These could be
thought of as snapshots. We can further categorize them into live and
non-live.
@@ -70,7 +70,7 @@ mapped-ram in this scenario is portability since background-snapshot
depends on async dirty tracking (KVM_GET_DIRTY_LOG) which is not
supported outside of Linux.
-.. [#] While this same effect could be obtained with the usage of
+.. [#alternatives] While this same effect could be obtained with the usage of
snapshots or the ``file:`` migration alone, mapped-ram provides
a performance increase for VMs with larger RAM sizes (10s to
100s of GiBs), specially if the VM has been stopped beforehand.
diff --git a/docs/devel/migration/qatzip-compression.rst b/docs/devel/migration/qatzip-compression.rst
new file mode 100644
index 0000000..862b383
--- /dev/null
+++ b/docs/devel/migration/qatzip-compression.rst
@@ -0,0 +1,165 @@
+==================
+QATzip Compression
+==================
+In scenarios with limited network bandwidth, the ``QATzip`` solution can help
+users save a lot of host CPU resources by accelerating compression and
+decompression through the Intel QuickAssist Technology(``QAT``) hardware.
+
+
+The following test was conducted using 8 multifd channels and 10Gbps network
+bandwidth. The results show that, compared to zstd, ``QATzip`` significantly
+saves CPU resources on the sender and reduces migration time. Compared to the
+uncompressed solution, ``QATzip`` greatly improves the dirty page processing
+capability, indicated by the Pages per Second metric, and also reduces the
+total migration time.
+
+::
+
+ VM Configuration: 16 vCPU and 64G memory
+ VM Workload: all vCPUs are idle and 54G memory is filled with Silesia data.
+ QAT Devices: 4
+ |-----------|--------|---------|----------|----------|------|------|
+ |8 Channels |Total |down |throughput|pages per | send | recv |
+ | |time(ms)|time(ms) |(mbps) |second | cpu %| cpu% |
+ |-----------|--------|---------|----------|----------|------|------|
+ |qatzip | 16630| 28| 10467| 2940235| 160| 360|
+ |-----------|--------|---------|----------|----------|------|------|
+ |zstd | 20165| 24| 8579| 2391465| 810| 340|
+ |-----------|--------|---------|----------|----------|------|------|
+ |none | 46063| 40| 10848| 330240| 45| 85|
+ |-----------|--------|---------|----------|----------|------|------|
+
+
+QATzip Compression Framework
+============================
+
+``QATzip`` is a user space library which builds on top of the Intel QuickAssist
+Technology to provide extended accelerated compression and decompression
+services.
+
+For more ``QATzip`` introduction, please refer to `QATzip Introduction
+<https://github.com/intel/QATzip?tab=readme-ov-file#introductionl>`_
+
+::
+
+ +----------------+
+ | MultiFd Thread |
+ +-------+--------+
+ |
+ | compress/decompress
+ +-------+--------+
+ | QATzip library |
+ +-------+--------+
+ |
+ +-------+--------+
+ | QAT library |
+ +-------+--------+
+ | user space
+ --------+---------------------
+ | kernel space
+ +------+-------+
+ | QAT Driver |
+ +------+-------+
+ |
+ +------+-------+
+ | QAT Devices |
+ +--------------+
+
+
+QATzip Installation
+-------------------
+
+The ``QATzip`` installation package has been integrated into some Linux
+distributions and can be installed directly. For example, the Ubuntu Server
+24.04 LTS system can be installed using below command
+
+.. code-block:: shell
+
+ #apt search qatzip
+ libqatzip-dev/noble 1.2.0-0ubuntu3 amd64
+ Intel QuickAssist user space library development files
+
+ libqatzip3/noble 1.2.0-0ubuntu3 amd64
+ Intel QuickAssist user space library
+
+ qatzip/noble,now 1.2.0-0ubuntu3 amd64 [installed]
+ Compression user-space tool for Intel QuickAssist Technology
+
+ #sudo apt install libqatzip-dev libqatzip3 qatzip
+
+If your system does not support the ``QATzip`` installation package, you can
+use the source code to build and install, please refer to `QATzip source code installation
+<https://github.com/intel/QATzip?tab=readme-ov-file#build-intel-quickassist-technology-driver>`_
+
+QAT Hardware Deployment
+-----------------------
+
+``QAT`` supports physical functions(PFs) and virtual functions(VFs) for
+deployment, and users can configure ``QAT`` resources for migration according
+to actual needs. For more details about ``QAT`` deployment, please refer to
+`Intel QuickAssist Technology Documentation
+<https://intel.github.io/quickassist/index.html>`_
+
+For more ``QAT`` hardware introduction, please refer to `intel-quick-assist-technology-overview
+<https://www.intel.com/content/www/us/en/architecture-and-technology/intel-quick-assist-technology-overview.html>`_
+
+How To Use QATzip Compression
+=============================
+
+1 - Install ``QATzip`` library
+
+2 - Build ``QEMU`` with ``--enable-qatzip`` parameter
+
+ E.g. configure --target-list=x86_64-softmmu --enable-kvm ``--enable-qatzip``
+
+3 - Set ``migrate_set_parameter multifd-compression qatzip``
+
+4 - Set ``migrate_set_parameter multifd-qatzip-level comp_level``, the default
+comp_level value is 1, and it supports levels from 1 to 9
+
+QAT Memory Requirements
+=======================
+
+The user needs to reserve system memory for the QAT memory management to
+allocate DMA memory. The size of the reserved system memory depends on the
+number of devices used for migration and the number of multifd channels.
+
+Because memory usage depends on QAT configuration, please refer to `QAT Memory
+Driver Queries
+<https://intel.github.io/quickassist/PG/infrastructure_debugability.html?highlight=memory>`_
+for memory usage calculation.
+
+.. list-table:: An example of a PF used for migration
+ :header-rows: 1
+
+ * - Number of channels
+ - Sender memory usage
+ - Receiver memory usage
+ * - 2
+ - 10M
+ - 10M
+ * - 4
+ - 12M
+ - 14M
+ * - 8
+ - 16M
+ - 20M
+
+How To Choose Between QATzip and QPL
+====================================
+Starting from 4th Gen Intel Xeon Scalable processors, codenamed Sapphire Rapids
+processor(``SPR``), multiple built-in accelerators are supported including
+``QAT`` and ``IAA``. The former can accelerate ``QATzip`` and the latter is
+used to accelerate ``QPL``.
+
+Here are some suggestions:
+
+1 - If the live migration scenario is limited by network bandwidth and ``QAT``
+hardware resources exceed ``IAA``, use the ``QATzip`` method, which can save a
+lot of host CPU resources for compression.
+
+2 - If the system cannot support shared virtual memory (SVM) technology, use
+the ``QATzip`` method because ``QPL`` performance is not good without SVM
+support.
+
+3 - For other scenarios, use the ``QPL`` method first.
diff --git a/docs/devel/migration/uadk-compression.rst b/docs/devel/migration/uadk-compression.rst
index 3f73345..64cadeb 100644
--- a/docs/devel/migration/uadk-compression.rst
+++ b/docs/devel/migration/uadk-compression.rst
@@ -114,7 +114,7 @@ Make sure all these above kernel configurations are selected.
Accelerator dev node permissions
--------------------------------
-Harware accelerators(eg: HiSilicon Kunpeng Zip accelerator) gets registered to
+Hardware accelerators (eg: HiSilicon Kunpeng Zip accelerator) gets registered to
UADK and char devices are created in dev directory. In order to access resources
on hardware accelerator devices, write permission should be provided to user.
@@ -134,7 +134,7 @@ How To Use UADK Compression In QEMU Migration
Set ``migrate_set_parameter multifd-compression uadk``
Since UADK uses Shared Virtual Addressing(SVA) and device access virtual memory
-directly it is possible that SMMUv3 may enounter page faults while walking the
+directly it is possible that SMMUv3 may encounter page faults while walking the
IO page tables. This may impact the performance. In order to mitigate this,
please make sure to specify ``-mem-prealloc`` parameter to the destination VM
boot parameters.
diff --git a/docs/devel/migration/vfio.rst b/docs/devel/migration/vfio.rst
index c49482e..673e354 100644
--- a/docs/devel/migration/vfio.rst
+++ b/docs/devel/migration/vfio.rst
@@ -67,15 +67,35 @@ VFIO implements the device hooks for the iterative approach as follows:
* A ``switchover_ack_needed`` function that checks if the VFIO device uses
"switchover-ack" migration capability when this capability is enabled.
-* A ``save_state`` function to save the device config space if it is present.
+* A ``switchover_start`` function that in the multifd mode starts a thread that
+ reassembles the multifd received data and loads it in-order into the device.
+ In the non-multifd mode this function is a NOP.
+
+* A ``save_state`` function to save the device config space if it is present
+ in the non-multifd mode.
+ In the multifd mode it just emits either a dummy EOS marker.
* A ``save_live_complete_precopy`` function that sets the VFIO device in
_STOP_COPY state and iteratively copies the data for the VFIO device until
the vendor driver indicates that no data remains.
+ In the multifd mode it just emits a dummy EOS marker.
+
+* A ``save_live_complete_precopy_thread`` function that in the multifd mode
+ provides thread handler performing multifd device state transfer.
+ It sets the VFIO device to _STOP_COPY state, iteratively reads the data
+ from the VFIO device and queues it for multifd transmission until the vendor
+ driver indicates that no data remains.
+ After that, it saves the device config space and queues it for multifd
+ transfer too.
+ In the non-multifd mode this thread is a NOP.
* A ``load_state`` function that loads the config section and the data
sections that are generated by the save functions above.
+* A ``load_state_buffer`` function that loads the device state and the device
+ config that arrived via multifd channels.
+ It's used only in the multifd mode.
+
* ``cleanup`` functions for both save and load that perform any migration
related cleanup.
@@ -176,8 +196,11 @@ Live migration save path
Then the VFIO device is put in _STOP_COPY state
(FINISH_MIGRATE, _ACTIVE, _STOP_COPY)
.save_live_complete_precopy() is called for each active device
- For the VFIO device, iterate in .save_live_complete_precopy() until
+ For the VFIO device: in the non-multifd mode iterate in
+ .save_live_complete_precopy() until
pending data is 0
+ In the multifd mode this iteration is done in
+ .save_live_complete_precopy_thread() instead.
|
(POSTMIGRATE, _COMPLETED, _STOP_COPY)
Migraton thread schedules cleanup bottom half and exits
@@ -194,6 +217,9 @@ Live migration resume path
(RESTORE_VM, _ACTIVE, _STOP)
|
For each device, .load_state() is called for that device section data
+ transmitted via the main migration channel.
+ For data transmitted via multifd channels .load_state_buffer() is called
+ instead.
(RESTORE_VM, _ACTIVE, _RESUMING)
|
At the end, .load_cleanup() is called for each device and vCPUs are started
@@ -206,3 +232,18 @@ Postcopy
========
Postcopy migration is currently not supported for VFIO devices.
+
+Multifd
+=======
+
+Starting from QEMU version 10.0 there's a possibility to transfer VFIO device
+_STOP_COPY state via multifd channels. This helps reduce downtime - especially
+with multiple VFIO devices or with devices having a large migration state.
+As an additional benefit, setting the VFIO device to _STOP_COPY state and
+saving its config space is also parallelized (run in a separate thread) in
+such migration mode.
+
+The multifd VFIO device state transfer is controlled by
+"x-migration-multifd-transfer" VFIO device property. This property defaults to
+AUTO, which means that VFIO device state transfer via multifd channels is
+attempted in configurations that otherwise support it.
diff --git a/docs/devel/multi-thread-tcg.rst b/docs/devel/multi-thread-tcg.rst
index d706c27..da9a153 100644
--- a/docs/devel/multi-thread-tcg.rst
+++ b/docs/devel/multi-thread-tcg.rst
@@ -4,6 +4,8 @@
This work is licensed under the terms of the GNU GPL, version 2 or
later. See the COPYING file in the top-level directory.
+.. _mttcg:
+
==================
Multi-threaded TCG
==================
@@ -26,16 +28,15 @@ vCPU Scheduling
We introduce a new running mode where each vCPU will run on its own
user-space thread. This is enabled by default for all FE/BE
combinations where the host memory model is able to accommodate the
-guest (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO is zero) and the
-guest has had the required work done to support this safely
-(TARGET_SUPPORTS_MTTCG).
+guest (TCGCPUOps::guest_default_memory_order & ~TCG_TARGET_DEFAULT_MO is zero)
+and the guest has had the required work done to support this safely
+(TCGCPUOps::mttcg_supported).
System emulation will fall back to the original round robin approach
if:
* forced by --accel tcg,thread=single
* enabling --icount mode
-* 64 bit guests on 32 bit hosts (TCG_OVERSIZED_GUEST)
In the general case of running translated code there should be no
inter-vCPU dependencies and all vCPUs should be able to run at full
diff --git a/docs/devel/multiple-iothreads.rst b/docs/devel/multiple-iothreads.rst
new file mode 100644
index 0000000..d1f3fc4
--- /dev/null
+++ b/docs/devel/multiple-iothreads.rst
@@ -0,0 +1,139 @@
+Using Multiple ``IOThread``\ s
+==============================
+
+..
+ Copyright (c) 2014-2017 Red Hat Inc.
+
+ This work is licensed under the terms of the GNU GPL, version 2 or later. See
+ the COPYING file in the top-level directory.
+
+
+This document explains the ``IOThread`` feature and how to write code that runs
+outside the BQL.
+
+The main loop and ``IOThread``\ s
+---------------------------------
+QEMU is an event-driven program that can do several things at once using an
+event loop. The VNC server and the QMP monitor are both processed from the
+same event loop, which monitors their file descriptors until they become
+readable and then invokes a callback.
+
+The default event loop is called the main loop (see ``main-loop.c``). It is
+possible to create additional event loop threads using
+``-object iothread,id=my-iothread``.
+
+Side note: The main loop and ``IOThread`` are both event loops but their code is
+not shared completely. Sometimes it is useful to remember that although they
+are conceptually similar they are currently not interchangeable.
+
+Why ``IOThread``\ s are useful
+------------------------------
+``IOThread``\ s allow the user to control the placement of work. The main loop is a
+scalability bottleneck on hosts with many CPUs. Work can be spread across
+several ``IOThread``\ s instead of just one main loop. When set up correctly this
+can improve I/O latency and reduce jitter seen by the guest.
+
+The main loop is also deeply associated with the BQL, which is a
+scalability bottleneck in itself. vCPU threads and the main loop use the BQL
+to serialize execution of QEMU code. This mutex is necessary because a lot of
+QEMU's code historically was not thread-safe.
+
+The fact that all I/O processing is done in a single main loop and that the
+BQL is contended by all vCPU threads and the main loop explain
+why it is desirable to place work into ``IOThread``\ s.
+
+The experimental ``virtio-blk`` data-plane implementation has been benchmarked and
+shows these effects:
+ftp://public.dhe.ibm.com/linux/pdfs/KVM_Virtualized_IO_Performance_Paper.pdf
+
+.. _how-to-program:
+
+How to program for ``IOThread``\ s
+----------------------------------
+The main difference between legacy code and new code that can run in an
+``IOThread`` is dealing explicitly with the event loop object, ``AioContext``
+(see ``include/block/aio.h``). Code that only works in the main loop
+implicitly uses the main loop's ``AioContext``. Code that supports running
+in ``IOThread``\ s must be aware of its ``AioContext``.
+
+AioContext supports the following services:
+ * File descriptor monitoring (read/write/error on POSIX hosts)
+ * Event notifiers (inter-thread signalling)
+ * Timers
+ * Bottom Halves (BH) deferred callbacks
+
+There are several old APIs that use the main loop AioContext:
+ * LEGACY ``qemu_aio_set_fd_handler()`` - monitor a file descriptor
+ * LEGACY ``qemu_aio_set_event_notifier()`` - monitor an event notifier
+ * LEGACY ``timer_new_ms()`` - create a timer
+ * LEGACY ``qemu_bh_new()`` - create a BH
+ * LEGACY ``qemu_bh_new_guarded()`` - create a BH with a device re-entrancy guard
+ * LEGACY ``qemu_aio_wait()`` - run an event loop iteration
+
+Since they implicitly work on the main loop they cannot be used in code that
+runs in an ``IOThread``. They might cause a crash or deadlock if called from an
+``IOThread`` since the BQL is not held.
+
+Instead, use the ``AioContext`` functions directly (see ``include/block/aio.h``):
+ * ``aio_set_fd_handler()`` - monitor a file descriptor
+ * ``aio_set_event_notifier()`` - monitor an event notifier
+ * ``aio_timer_new()`` - create a timer
+ * ``aio_bh_new()`` - create a BH
+ * ``aio_bh_new_guarded()`` - create a BH with a device re-entrancy guard
+ * ``aio_poll()`` - run an event loop iteration
+
+The ``qemu_bh_new_guarded``/``aio_bh_new_guarded`` APIs accept a
+``MemReentrancyGuard``
+argument, which is used to check for and prevent re-entrancy problems. For
+BHs associated with devices, the reentrancy-guard is contained in the
+corresponding ``DeviceState`` and named ``mem_reentrancy_guard``.
+
+The ``AioContext`` can be obtained from the ``IOThread`` using
+``iothread_get_aio_context()`` or for the main loop using
+``qemu_get_aio_context()``. Code that takes an ``AioContext`` argument
+works both in ``IOThread``\ s or the main loop, depending on which ``AioContext``
+instance the caller passes in.
+
+How to synchronize with an ``IOThread``
+---------------------------------------
+Variables that can be accessed by multiple threads require some form of
+synchronization such as ``qemu_mutex_lock()``, ``rcu_read_lock()``, etc.
+
+``AioContext`` functions like ``aio_set_fd_handler()``,
+``aio_set_event_notifier()``, ``aio_bh_new()``, and ``aio_timer_new()``
+are thread-safe. They can be used to trigger activity in an ``IOThread``.
+
+Side note: the best way to schedule a function call across threads is to call
+``aio_bh_schedule_oneshot()``.
+
+The main loop thread can wait synchronously for a condition using
+``AIO_WAIT_WHILE()``.
+
+``AioContext`` and the block layer
+----------------------------------
+The ``AioContext`` originates from the QEMU block layer, even though nowadays
+``AioContext`` is a generic event loop that can be used by any QEMU subsystem.
+
+The block layer has support for ``AioContext`` integrated. Each
+``BlockDriverState`` is associated with an ``AioContext`` using
+``bdrv_try_change_aio_context()`` and ``bdrv_get_aio_context()``.
+This allows block layer code to process I/O inside the
+right ``AioContext``. Other subsystems may wish to follow a similar approach.
+
+Block layer code must therefore expect to run in an ``IOThread`` and avoid using
+old APIs that implicitly use the main loop. See
+`How to program for IOThreads`_ for information on how to do that.
+
+Code running in the monitor typically needs to ensure that past
+requests from the guest are completed. When a block device is running
+in an ``IOThread``, the ``IOThread`` can also process requests from the guest
+(via ioeventfd). To achieve both objects, wrap the code between
+``bdrv_drained_begin()`` and ``bdrv_drained_end()``, thus creating a "drained
+section".
+
+Long-running jobs (usually in the form of coroutines) are often scheduled in
+the ``BlockDriverState``'s ``AioContext``. The functions
+``bdrv_add``/``remove_aio_context_notifier``, or alternatively
+``blk_add``/``remove_aio_context_notifier`` if you use ``BlockBackends``,
+can be used to get a notification whenever ``bdrv_try_change_aio_context()``
+moves a ``BlockDriverState`` to a different ``AioContext``.
diff --git a/docs/devel/multiple-iothreads.txt b/docs/devel/multiple-iothreads.txt
deleted file mode 100644
index de85767..0000000
--- a/docs/devel/multiple-iothreads.txt
+++ /dev/null
@@ -1,130 +0,0 @@
-Copyright (c) 2014-2017 Red Hat Inc.
-
-This work is licensed under the terms of the GNU GPL, version 2 or later. See
-the COPYING file in the top-level directory.
-
-
-This document explains the IOThread feature and how to write code that runs
-outside the BQL.
-
-The main loop and IOThreads
----------------------------
-QEMU is an event-driven program that can do several things at once using an
-event loop. The VNC server and the QMP monitor are both processed from the
-same event loop, which monitors their file descriptors until they become
-readable and then invokes a callback.
-
-The default event loop is called the main loop (see main-loop.c). It is
-possible to create additional event loop threads using -object
-iothread,id=my-iothread.
-
-Side note: The main loop and IOThread are both event loops but their code is
-not shared completely. Sometimes it is useful to remember that although they
-are conceptually similar they are currently not interchangeable.
-
-Why IOThreads are useful
-------------------------
-IOThreads allow the user to control the placement of work. The main loop is a
-scalability bottleneck on hosts with many CPUs. Work can be spread across
-several IOThreads instead of just one main loop. When set up correctly this
-can improve I/O latency and reduce jitter seen by the guest.
-
-The main loop is also deeply associated with the BQL, which is a
-scalability bottleneck in itself. vCPU threads and the main loop use the BQL
-to serialize execution of QEMU code. This mutex is necessary because a lot of
-QEMU's code historically was not thread-safe.
-
-The fact that all I/O processing is done in a single main loop and that the
-BQL is contended by all vCPU threads and the main loop explain
-why it is desirable to place work into IOThreads.
-
-The experimental virtio-blk data-plane implementation has been benchmarked and
-shows these effects:
-ftp://public.dhe.ibm.com/linux/pdfs/KVM_Virtualized_IO_Performance_Paper.pdf
-
-How to program for IOThreads
-----------------------------
-The main difference between legacy code and new code that can run in an
-IOThread is dealing explicitly with the event loop object, AioContext
-(see include/block/aio.h). Code that only works in the main loop
-implicitly uses the main loop's AioContext. Code that supports running
-in IOThreads must be aware of its AioContext.
-
-AioContext supports the following services:
- * File descriptor monitoring (read/write/error on POSIX hosts)
- * Event notifiers (inter-thread signalling)
- * Timers
- * Bottom Halves (BH) deferred callbacks
-
-There are several old APIs that use the main loop AioContext:
- * LEGACY qemu_aio_set_fd_handler() - monitor a file descriptor
- * LEGACY qemu_aio_set_event_notifier() - monitor an event notifier
- * LEGACY timer_new_ms() - create a timer
- * LEGACY qemu_bh_new() - create a BH
- * LEGACY qemu_bh_new_guarded() - create a BH with a device re-entrancy guard
- * LEGACY qemu_aio_wait() - run an event loop iteration
-
-Since they implicitly work on the main loop they cannot be used in code that
-runs in an IOThread. They might cause a crash or deadlock if called from an
-IOThread since the BQL is not held.
-
-Instead, use the AioContext functions directly (see include/block/aio.h):
- * aio_set_fd_handler() - monitor a file descriptor
- * aio_set_event_notifier() - monitor an event notifier
- * aio_timer_new() - create a timer
- * aio_bh_new() - create a BH
- * aio_bh_new_guarded() - create a BH with a device re-entrancy guard
- * aio_poll() - run an event loop iteration
-
-The qemu_bh_new_guarded/aio_bh_new_guarded APIs accept a "MemReentrancyGuard"
-argument, which is used to check for and prevent re-entrancy problems. For
-BHs associated with devices, the reentrancy-guard is contained in the
-corresponding DeviceState and named "mem_reentrancy_guard".
-
-The AioContext can be obtained from the IOThread using
-iothread_get_aio_context() or for the main loop using qemu_get_aio_context().
-Code that takes an AioContext argument works both in IOThreads or the main
-loop, depending on which AioContext instance the caller passes in.
-
-How to synchronize with an IOThread
------------------------------------
-Variables that can be accessed by multiple threads require some form of
-synchronization such as qemu_mutex_lock(), rcu_read_lock(), etc.
-
-AioContext functions like aio_set_fd_handler(), aio_set_event_notifier(),
-aio_bh_new(), and aio_timer_new() are thread-safe. They can be used to trigger
-activity in an IOThread.
-
-Side note: the best way to schedule a function call across threads is to call
-aio_bh_schedule_oneshot().
-
-The main loop thread can wait synchronously for a condition using
-AIO_WAIT_WHILE().
-
-AioContext and the block layer
-------------------------------
-The AioContext originates from the QEMU block layer, even though nowadays
-AioContext is a generic event loop that can be used by any QEMU subsystem.
-
-The block layer has support for AioContext integrated. Each BlockDriverState
-is associated with an AioContext using bdrv_try_change_aio_context() and
-bdrv_get_aio_context(). This allows block layer code to process I/O inside the
-right AioContext. Other subsystems may wish to follow a similar approach.
-
-Block layer code must therefore expect to run in an IOThread and avoid using
-old APIs that implicitly use the main loop. See the "How to program for
-IOThreads" above for information on how to do that.
-
-Code running in the monitor typically needs to ensure that past
-requests from the guest are completed. When a block device is running
-in an IOThread, the IOThread can also process requests from the guest
-(via ioeventfd). To achieve both objects, wrap the code between
-bdrv_drained_begin() and bdrv_drained_end(), thus creating a "drained
-section".
-
-Long-running jobs (usually in the form of coroutines) are often scheduled in
-the BlockDriverState's AioContext. The functions
-bdrv_add/remove_aio_context_notifier, or alternatively
-blk_add/remove_aio_context_notifier if you use BlockBackends, can be used to
-get a notification whenever bdrv_try_change_aio_context() moves a
-BlockDriverState to a different AioContext.
diff --git a/docs/devel/nested-papr.txt b/docs/devel/nested-papr.txt
deleted file mode 100644
index 9094365..0000000
--- a/docs/devel/nested-papr.txt
+++ /dev/null
@@ -1,119 +0,0 @@
-Nested PAPR API (aka KVM on PowerVM)
-====================================
-
-This API aims at providing support to enable nested virtualization with
-KVM on PowerVM. While the existing support for nested KVM on PowerNV was
-introduced with cap-nested-hv option, however, with a slight design change,
-to enable this on papr/pseries, a new cap-nested-papr option is added. eg:
-
- qemu-system-ppc64 -cpu POWER10 -machine pseries,cap-nested-papr=true ...
-
-Work by:
- Michael Neuling <mikey@neuling.org>
- Vaibhav Jain <vaibhav@linux.ibm.com>
- Jordan Niethe <jniethe5@gmail.com>
- Harsh Prateek Bora <harshpb@linux.ibm.com>
- Shivaprasad G Bhat <sbhat@linux.ibm.com>
- Kautuk Consul <kconsul@linux.vnet.ibm.com>
-
-Below taken from the kernel documentation:
-
-Introduction
-============
-
-This document explains how a guest operating system can act as a
-hypervisor and run nested guests through the use of hypercalls, if the
-hypervisor has implemented them. The terms L0, L1, and L2 are used to
-refer to different software entities. L0 is the hypervisor mode entity
-that would normally be called the "host" or "hypervisor". L1 is a
-guest virtual machine that is directly run under L0 and is initiated
-and controlled by L0. L2 is a guest virtual machine that is initiated
-and controlled by L1 acting as a hypervisor. A significant design change
-wrt existing API is that now the entire L2 state is maintained within L0.
-
-Existing Nested-HV API
-======================
-
-Linux/KVM has had support for Nesting as an L0 or L1 since 2018
-
-The L0 code was added::
-
- commit 8e3f5fc1045dc49fd175b978c5457f5f51e7a2ce
- Author: Paul Mackerras <paulus@ozlabs.org>
- Date: Mon Oct 8 16:31:03 2018 +1100
- KVM: PPC: Book3S HV: Framework and hcall stubs for nested virtualization
-
-The L1 code was added::
-
- commit 360cae313702cdd0b90f82c261a8302fecef030a
- Author: Paul Mackerras <paulus@ozlabs.org>
- Date: Mon Oct 8 16:31:04 2018 +1100
- KVM: PPC: Book3S HV: Nested guest entry via hypercall
-
-This API works primarily using a signal hcall h_enter_nested(). This
-call made by the L1 to tell the L0 to start an L2 vCPU with the given
-state. The L0 then starts this L2 and runs until an L2 exit condition
-is reached. Once the L2 exits, the state of the L2 is given back to
-the L1 by the L0. The full L2 vCPU state is always transferred from
-and to L1 when the L2 is run. The L0 doesn't keep any state on the L2
-vCPU (except in the short sequence in the L0 on L1 -> L2 entry and L2
--> L1 exit).
-
-The only state kept by the L0 is the partition table. The L1 registers
-it's partition table using the h_set_partition_table() hcall. All
-other state held by the L0 about the L2s is cached state (such as
-shadow page tables).
-
-The L1 may run any L2 or vCPU without first informing the L0. It
-simply starts the vCPU using h_enter_nested(). The creation of L2s and
-vCPUs is done implicitly whenever h_enter_nested() is called.
-
-In this document, we call this existing API the v1 API.
-
-New PAPR API
-===============
-
-The new PAPR API changes from the v1 API such that the creating L2 and
-associated vCPUs is explicit. In this document, we call this the v2
-API.
-
-h_enter_nested() is replaced with H_GUEST_VCPU_RUN(). Before this can
-be called the L1 must explicitly create the L2 using h_guest_create()
-and any associated vCPUs() created with h_guest_create_vCPU(). Getting
-and setting vCPU state can also be performed using h_guest_{g|s}et
-hcall.
-
-The basic execution flow is for an L1 to create an L2, run it, and
-delete it is:
-
-- L1 and L0 negotiate capabilities with H_GUEST_{G,S}ET_CAPABILITIES()
- (normally at L1 boot time).
-
-- L1 requests the L0 to create an L2 with H_GUEST_CREATE() and receives a token
-
-- L1 requests the L0 to create an L2 vCPU with H_GUEST_CREATE_VCPU()
-
-- L1 and L0 communicate the vCPU state using the H_GUEST_{G,S}ET() hcall
-
-- L1 requests the L0 to run the vCPU using H_GUEST_RUN_VCPU() hcall
-
-- L1 deletes L2 with H_GUEST_DELETE()
-
-For more details, please refer:
-
-[1] Linux Kernel documentation (upstream documentation commit):
-
-commit 476652297f94a2e5e5ef29e734b0da37ade94110
-Author: Michael Neuling <mikey@neuling.org>
-Date: Thu Sep 14 13:06:00 2023 +1000
-
- docs: powerpc: Document nested KVM on POWER
-
- Document support for nested KVM on POWER using the existing API as well
- as the new PAPR API. This includes the new HCALL interface and how it
- used by KVM.
-
- Signed-off-by: Michael Neuling <mikey@neuling.org>
- Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
- Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
- Link: https://msgid.link/20230914030600.16993-12-jniethe5@gmail.com
diff --git a/docs/devel/qapi-code-gen.rst b/docs/devel/qapi-code-gen.rst
index 583207a..231cc0f 100644
--- a/docs/devel/qapi-code-gen.rst
+++ b/docs/devel/qapi-code-gen.rst
@@ -9,6 +9,7 @@ How to use the QAPI code generator
This work is licensed under the terms of the GNU GPL, version 2 or
later. See the COPYING file in the top-level directory.
+.. _qapi:
Introduction
============
@@ -228,7 +229,8 @@ These are of the form PREFIX_NAME, where PREFIX is derived from the
enumeration type's name, and NAME from the value's name. For the
example above, the generator maps 'MyEnum' to MY_ENUM and 'value1' to
VALUE1, resulting in the enumeration constant MY_ENUM_VALUE1. The
-optional 'prefix' member overrides PREFIX.
+optional 'prefix' member overrides PREFIX. This is rarely necessary,
+and should be used with restraint.
The generated C enumeration constants have values 0, 1, ..., N-1 (in
QAPI schema order), where N is the number of values. There is an
@@ -761,8 +763,8 @@ Names beginning with ``x-`` used to signify "experimental". This
convention has been replaced by special feature "unstable".
Pragmas ``command-name-exceptions`` and ``member-name-exceptions`` let
-you violate naming rules. Use for new code is strongly discouraged. See
-`Pragma directives`_ for details.
+you violate naming rules. Use for new code is strongly discouraged.
+See `Pragma directives`_ for details.
Downstream extensions
@@ -1011,7 +1013,7 @@ like this::
document the success and the error response, respectively.
"Errors" sections should be formatted as an rST list, each entry
-detailing a relevant error condition. For example::
+detailing a relevant error condition. For example::
# Errors:
# - If @device does not exist, DeviceNotFound
@@ -1024,31 +1026,28 @@ definition.
QMP). In other sections, the text is formatted, and rST markup can be
used.
-QMP Examples can be added by using the ``.. qmp-example::``
-directive. In its simplest form, this can be used to contain a single
-QMP code block which accepts standard JSON syntax with additional server
-directionality indicators (``->`` and ``<-``), and elisions (``...``).
+QMP Examples can be added by using the ``.. qmp-example::`` directive.
+In its simplest form, this can be used to contain a single QMP code
+block which accepts standard JSON syntax with additional server
+directionality indicators (``->`` and ``<-``), and elisions. An
+elision is commonly ``...``, but it can also be or a pair of ``...``
+with text in between.
Optionally, a plaintext title may be provided by using the ``:title:``
-directive option. If the title is omitted, the example title will
+directive option. If the title is omitted, the example title will
default to "Example:".
A simple QMP example::
# .. qmp-example::
- # :title: Using query-block
#
- # -> { "execute": "query-block" }
- # <- { ... }
+ # -> { "execute": "query-name" }
+ # <- { "return": { "name": "Fred" } }
-More complex or multi-step examples where exposition is needed before or
-between QMP code blocks can be created by using the ``:annotated:``
-directive option. When using this option, nested QMP code blocks must be
-entered explicitly with rST's ``::`` syntax.
-
-Highlighting in non-QMP languages can be accomplished by using the
-``.. code-block:: lang`` directive, and non-highlighted text can be
-achieved by omitting the language argument.
+More complex or multi-step examples where exposition is needed before
+or between QMP code blocks can be created by using the ``:annotated:``
+directive option. When using this option, nested QMP code blocks must
+be entered explicitly with rST's ``::`` syntax.
For example::
@@ -1059,11 +1058,21 @@ For example::
# This is a more complex example that can use
# ``arbitrary rST syntax`` in its exposition::
#
- # -> { "execute": "query-block" }
- # <- { ... }
+ # -> { "execute": "query-block" }
+ # <- { "return": [
+ # {
+ # "device": "ide0-hd0",
+ # ...
+ # }
+ # ... more ...
+ # ] }
#
# Above, lengthy output has been omitted for brevity.
+Highlighting in non-QMP languages can be accomplished by using the
+``.. code-block:: lang`` directive, and non-highlighted text can be
+achieved by omitting the language argument.
+
Examples of complete definition documentation::
@@ -1464,7 +1473,9 @@ As an example, we'll use the following schema, which describes a
single complex user-defined type, along with command which takes a
list of that type as a parameter, and returns a single element of that
type. The user is responsible for writing the implementation of
-qmp_my_command(); everything else is produced by the generator. ::
+qmp_my_command(); everything else is produced by the generator.
+
+::
$ cat example-schema.json
{ 'struct': 'UserDefOne',
@@ -1854,7 +1865,7 @@ Example::
#ifndef EXAMPLE_QAPI_INIT_COMMANDS_H
#define EXAMPLE_QAPI_INIT_COMMANDS_H
- #include "qapi/qmp/dispatch.h"
+ #include "qapi/qmp-registry.h"
void example_qmp_init_marshal(QmpCommandList *cmds);
@@ -1985,7 +1996,7 @@ Example::
#ifndef EXAMPLE_QAPI_INTROSPECT_H
#define EXAMPLE_QAPI_INTROSPECT_H
- #include "qapi/qmp/qlit.h"
+ #include "qobject/qlit.h"
extern const QLitObject example_qmp_schema_qlit;
diff --git a/docs/devel/qapi-domain.rst b/docs/devel/qapi-domain.rst
new file mode 100644
index 0000000..1123872
--- /dev/null
+++ b/docs/devel/qapi-domain.rst
@@ -0,0 +1,716 @@
+======================
+The Sphinx QAPI Domain
+======================
+
+An extension to the `rST syntax
+<https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_
+in Sphinx is provided by the QAPI Domain, located in
+``docs/sphinx/qapi_domain.py``. This extension is analogous to the
+`Python Domain
+<https://www.sphinx-doc.org/en/master/usage/domains/python.html>`_
+included with Sphinx, but provides special directives and roles
+speciically for annotating and documenting QAPI definitions
+specifically.
+
+A `Domain
+<https://www.sphinx-doc.org/en/master/usage/domains/index.html>`_
+provides a set of special rST directives and cross-referencing roles to
+Sphinx for understanding rST markup written to document a specific
+language. By itself, this QAPI extension is only sufficient to parse rST
+markup written by hand; the `autodoc
+<https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html>`_
+functionality is provided elsewhere, in ``docs/sphinx/qapidoc.py``, by
+the "Transmogrifier".
+
+It is not expected that any developer nor documentation writer would
+never need to write *nor* read these special rST forms. However, in the
+event that something needs to be debugged, knowing the syntax of the
+domain is quite handy. This reference may also be useful as a guide for
+understanding the QAPI Domain extension code itself. Although most of
+these forms will not be needed for documentation writing purposes,
+understanding the cross-referencing syntax *will* be helpful when
+writing rST documentation elsewhere, or for enriching the body of
+QAPIDoc blocks themselves.
+
+
+Concepts
+========
+
+The QAPI Domain itself provides no mechanisms for reading the QAPI
+Schema or generating documentation from code that exists. It is merely
+the rST syntax used to describe things. For instance, the Sphinx Python
+domain adds syntax like ``:py:func:`` for describing Python functions in
+documentation, but it's the autodoc module that is responsible for
+reading Python code and generating such syntax. QAPI is analogous here:
+qapidoc.py is responsible for reading the QAPI Schema and generating rST
+syntax, and qapi_domain.py is responsible for translating that special
+syntax and providing APIs for Sphinx internals.
+
+In other words:
+
+qapi_domain.py adds syntax like ``.. qapi:command::`` to Sphinx, and
+qapidoc.py transforms the documentation in ``qapi/*.json`` into rST
+using directives defined by the domain.
+
+Or even shorter:
+
+``:py:`` is to ``:qapi:`` as *autodoc* is to *qapidoc*.
+
+
+Info Field Lists
+================
+
+`Field lists
+<https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#field-lists>`_
+are a standard syntax in reStructuredText. Sphinx `extends that syntax
+<https://www.sphinx-doc.org/en/master/usage/domains/python.html#info-field-lists>`_
+to give certain field list entries special meaning and parsing to, for
+example, add cross-references. The QAPI Domain takes advantage of this
+field list extension to document things like Arguments, Members, Values,
+and so on.
+
+The special parsing and handling of info field lists in Sphinx is provided by
+three main classes; Field, GroupedField, and TypedField. The behavior
+and formatting for each configured field list entry in the domain
+changes depending on which class is used.
+
+Field:
+ * Creates an ungrouped field: i.e., each entry will create its own
+ section and they will not be combined.
+ * May *optionally* support an argument.
+ * May apply cross-reference roles to *either* the argument *or* the
+ content body, both, or neither.
+
+This is used primarily for entries which are not expected to be
+repeated, i.e., items that may only show up at most once. The QAPI
+domain uses this class for "Errors" section.
+
+GroupedField:
+ * Creates a grouped field: i.e. multiple adjacent entries will be
+ merged into one section, and the content will form a bulleted list.
+ * *Must* take an argument.
+ * May optionally apply a cross-reference role to the argument, but not
+ the body.
+ * Can be configured to remove the bulleted list if there is only a
+ single entry.
+ * All items will be generated with the form: "argument -- body"
+
+This is used for entries which are expected to be repeated, but aren't
+expected to have two arguments, i.e. types without names, or names
+without types. The QAPI domain uses this class for features, returns,
+and enum values.
+
+TypedField:
+ * Creates a grouped, typed field. Multiple adjacent entres will be
+ merged into one section, and the content will form a bulleted list.
+ * *Must* take at least one argument, but supports up to two -
+ nominally, a name and a type.
+ * May optionally apply a cross-reference role to the type or the name
+ argument, but not the body.
+ * Can be configured to remove the bulleted list if there is only a
+ single entry.
+ * All items will be generated with the form "name (type) -- body"
+
+This is used for entries that are expected to be repeated and will have
+a name, a type, and a description. The QAPI domain uses this class for
+arguments, alternatives, and members. Wherever type names are referenced
+below, They must be a valid, documented type that will be
+cross-referenced in the HTML output; or one of the built-in JSON types
+(string, number, int, boolean, null, value, q_empty).
+
+
+``:feat:``
+----------
+
+Document a feature attached to a QAPI definition.
+
+:availability: This field list is available in the body of Command,
+ Event, Enum, Object and Alternate directives.
+:syntax: ``:feat name: Lorem ipsum, dolor sit amet...``
+:type: `sphinx.util.docfields.GroupedField
+ <https://pydoc.dev/sphinx/latest/sphinx.util.docfields.GroupedField.html?private=1>`_
+
+Example::
+
+ .. qapi:object:: BlockdevOptionsVirtioBlkVhostVdpa
+ :since: 7.2
+ :ifcond: CONFIG_BLKIO
+
+ Driver specific block device options for the virtio-blk-vhost-vdpa
+ backend.
+
+ :memb string path: path to the vhost-vdpa character device.
+ :feat fdset: Member ``path`` supports the special "/dev/fdset/N" path
+ (since 8.1)
+
+
+``:arg:``
+---------
+
+Document an argument to a QAPI command.
+
+:availability: This field list is only available in the body of the
+ Command directive.
+:syntax: ``:arg type name: description``
+:type: `sphinx.util.docfields.TypedField
+ <https://pydoc.dev/sphinx/latest/sphinx.util.docfields.TypedField.html?private=1>`_
+
+
+Example::
+
+ .. qapi:command:: job-pause
+ :since: 3.0
+
+ Pause an active job.
+
+ This command returns immediately after marking the active job for
+ pausing. Pausing an already paused job is an error.
+
+ The job will pause as soon as possible, which means transitioning
+ into the PAUSED state if it was RUNNING, or into STANDBY if it was
+ READY. The corresponding JOB_STATUS_CHANGE event will be emitted.
+
+ Cancelling a paused job automatically resumes it.
+
+ :arg string id: The job identifier.
+
+
+``:error:``
+-----------
+
+Document the error condition(s) of a QAPI command.
+
+:availability: This field list is only available in the body of the
+ Command directive.
+:syntax: ``:error: Lorem ipsum dolor sit amet ...``
+:type: `sphinx.util.docfields.Field
+ <https://pydoc.dev/sphinx/latest/sphinx.util.docfields.Field.html?private=1>`_
+
+The format of the :errors: field list description is free-form rST. The
+alternative spelling ":errors:" is also permitted, but strictly
+analogous.
+
+Example::
+
+ .. qapi:command:: block-job-set-speed
+ :since: 1.1
+
+ Set maximum speed for a background block operation.
+
+ This command can only be issued when there is an active block job.
+
+ Throttling can be disabled by setting the speed to 0.
+
+ :arg string device: The job identifier. This used to be a device
+ name (hence the name of the parameter), but since QEMU 2.7 it
+ can have other values.
+ :arg int speed: the maximum speed, in bytes per second, or 0 for
+ unlimited. Defaults to 0.
+ :error:
+ - If no background operation is active on this device,
+ DeviceNotActive
+
+
+``:return:``
+-------------
+
+Document the return type(s) and value(s) of a QAPI command.
+
+:availability: This field list is only available in the body of the
+ Command directive.
+:syntax: ``:return type: Lorem ipsum dolor sit amet ...``
+:type: `sphinx.util.docfields.GroupedField
+ <https://pydoc.dev/sphinx/latest/sphinx.util.docfields.GroupedField.html?private=1>`_
+
+
+Example::
+
+ .. qapi:command:: query-replay
+ :since: 5.2
+
+ Retrieve the record/replay information. It includes current
+ instruction count which may be used for ``replay-break`` and
+ ``replay-seek`` commands.
+
+ :return ReplayInfo: record/replay information.
+
+ .. qmp-example::
+
+ -> { "execute": "query-replay" }
+ <- { "return": {
+ "mode": "play", "filename": "log.rr", "icount": 220414 }
+ }
+
+
+``:value:``
+-----------
+
+Document a possible value for a QAPI enum.
+
+:availability: This field list is only available in the body of the Enum
+ directive.
+:syntax: ``:value name: Lorem ipsum, dolor sit amet ...``
+:type: `sphinx.util.docfields.GroupedField
+ <https://pydoc.dev/sphinx/latest/sphinx.util.docfields.GroupedField.html?private=1>`_
+
+Example::
+
+ .. qapi:enum:: QapiErrorClass
+ :since: 1.2
+
+ QEMU error classes
+
+ :value GenericError: this is used for errors that don't require a specific
+ error class. This should be the default case for most errors
+ :value CommandNotFound: the requested command has not been found
+ :value DeviceNotActive: a device has failed to be become active
+ :value DeviceNotFound: the requested device has not been found
+ :value KVMMissingCap: the requested operation can't be fulfilled because a
+ required KVM capability is missing
+
+
+``:alt:``
+------------
+
+Document a possible branch for a QAPI alternate.
+
+:availability: This field list is only available in the body of the
+ Alternate directive.
+:syntax: ``:alt type name: Lorem ipsum, dolor sit amet ...``
+:type: `sphinx.util.docfields.TypedField
+ <https://pydoc.dev/sphinx/latest/sphinx.util.docfields.TypedField.html?private=1>`_
+
+As a limitation of Sphinx, we must document the "name" of the branch in
+addition to the type, even though this information is not visible on the
+wire in the QMP protocol format. This limitation *may* be lifted at a
+future date.
+
+Example::
+
+ .. qapi:alternate:: StrOrNull
+ :since: 2.10
+
+ This is a string value or the explicit lack of a string (null
+ pointer in C). Intended for cases when 'optional absent' already
+ has a different meaning.
+
+ :alt string s: the string value
+ :alt null n: no string value
+
+
+``:memb:``
+----------
+
+Document a member of an Event or Object.
+
+:availability: This field list is available in the body of Event or
+ Object directives.
+:syntax: ``:memb type name: Lorem ipsum, dolor sit amet ...``
+:type: `sphinx.util.docfields.TypedField
+ <https://pydoc.dev/sphinx/latest/sphinx.util.docfields.TypedField.html?private=1>`_
+
+This is fundamentally the same as ``:arg:`` and ``:alt:``, but uses the
+"Members" phrasing for Events and Objects (Structs and Unions).
+
+Example::
+
+ .. qapi:event:: JOB_STATUS_CHANGE
+ :since: 3.0
+
+ Emitted when a job transitions to a different status.
+
+ :memb string id: The job identifier
+ :memb JobStatus status: The new job status
+
+
+Arbitrary field lists
+---------------------
+
+Other field list names, while valid rST syntax, are prohibited inside of
+QAPI directives to help prevent accidental misspellings of info field
+list names. If you want to add a new arbitrary "non-value-added" field
+list to QAPI documentation, you must add the field name to the allow
+list in ``docs/conf.py``
+
+For example::
+
+ qapi_allowed_fields = {
+ "see also",
+ }
+
+Will allow you to add arbitrary field lists in QAPI directives::
+
+ .. qapi:command:: x-fake-command
+
+ :see also: Lorem ipsum, dolor sit amet ...
+
+
+Cross-references
+================
+
+Cross-reference `roles
+<https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html>`_
+in the QAPI domain are modeled closely after the `Python
+cross-referencing syntax
+<https://www.sphinx-doc.org/en/master/usage/domains/python.html#cross-referencing-python-objects>`_.
+
+QAPI definitions can be referenced using the standard `any
+<https://www.sphinx-doc.org/en/master/usage/referencing.html#role-any>`_
+role cross-reference syntax, such as with ```query-blockstats```. In
+the event that disambiguation is needed, cross-references can also be
+written using a number of explicit cross-reference roles:
+
+* ``:qapi:mod:`block-core``` -- Reference a QAPI module. The link will
+ take you to the beginning of that section in the documentation.
+* ``:qapi:cmd:`query-block``` -- Reference a QAPI command.
+* ``:qapi:event:`JOB_STATUS_CHANGE``` -- Reference a QAPI event.
+* ``:qapi:enum:`QapiErrorClass``` -- Reference a QAPI enum.
+* ``:qapi:obj:`BlockdevOptionsVirtioBlkVhostVdpa`` -- Reference a QAPI
+ object (struct or union)
+* ``:qapi:alt:`StrOrNull``` -- Reference a QAPI alternate.
+* ``:qapi:type:`BlockDirtyInfo``` -- Reference *any* QAPI type; this
+ excludes modules, commands, and events.
+* ``:qapi:any:`block-job-set-speed``` -- Reference absolutely any QAPI entity.
+
+Type arguments in info field lists are converted into references as if
+you had used the ``:qapi:type:`` role. All of the special syntax below
+applies to both info field lists and standalone explicit
+cross-references.
+
+
+Type decorations
+----------------
+
+Type names in references can be surrounded by brackets, like
+``[typename]``, to indicate an array of that type. The cross-reference
+will apply only to the type name between the brackets. For example;
+``:qapi:type:`[Qcow2BitmapInfoFlags]``` renders to:
+:qapi:type:`[QMP:Qcow2BitmapInfoFlags]`
+
+To indicate an optional argument/member in a field list, the type name
+can be suffixed with ``?``. The cross-reference will be transformed to
+"type, Optional" with the link applying only to the type name. For
+example; ``:qapi:type:`BitmapSyncMode?``` renders to:
+:qapi:type:`QMP:BitmapSyncMode?`
+
+
+Namespaces
+----------
+
+Mimicking the `Python domain target specification syntax
+<https://www.sphinx-doc.org/en/master/usage/domains/python.html#target-specification>`_,
+QAPI allows you to specify the fully qualified path for a data
+type.
+
+* A namespace can be explicitly provided;
+ e.g. ``:qapi:type:`QMP:BitmapSyncMode``
+* A module can be explicitly provided;
+ ``:qapi:type:`QMP:block-core.BitmapSyncMode``` will render to:
+ :qapi:type:`QMP:block-core.BitmapSyncMode`
+* If you don't want to display the "fully qualified" name, it can be
+ prefixed with a tilde; ``:qapi:type:`~QMP:block-core.BitmapSyncMode```
+ will render to: :qapi:type:`~QMP:block-core.BitmapSyncMode`
+
+
+Target resolution
+-----------------
+
+Any cross-reference to a QAPI type, whether using the ```any``` style of
+reference or the more explicit ```:qapi:any:`target``` syntax, allows
+for the presence or absence of either the namespace or module
+information.
+
+When absent, their value will be inferred from context by the presence
+of any ``qapi:namespace`` or ``qapi:module`` directives preceding the
+cross-reference.
+
+If no results are found when using the inferred values, other
+namespaces/modules will be searched as a last resort; but any explicitly
+provided values must always match in order to succeed.
+
+This allows for efficient cross-referencing with a minimum of syntax in
+the large majority of cases, but additional context or namespace markup
+may be required outside of the QAPI reference documents when linking to
+items that share a name across multiple documented QAPI schema.
+
+
+Custom link text
+----------------
+
+The name of a cross-reference link can be explicitly overridden like
+`most stock Sphinx references
+<https://www.sphinx-doc.org/en/master/usage/referencing.html#syntax>`_
+using the ``custom text <target>`` syntax.
+
+For example, ``:qapi:cmd:`Merge dirty bitmaps
+<block-dirty-bitmap-merge>``` will render as: :qapi:cmd:`Merge dirty
+bitmaps <QMP:block-dirty-bitmap-merge>`
+
+
+Directives
+==========
+
+The QAPI domain adds a number of custom directives for documenting
+various QAPI/QMP entities. The syntax is plain rST, and follows this
+general format::
+
+ .. qapi:directive:: argument
+ :option:
+ :another-option: with an argument
+
+ Content body, arbitrary rST is allowed here.
+
+
+Sphinx standard options
+-----------------------
+
+All QAPI directives inherit a number of `standard options
+<https://www.sphinx-doc.org/en/master/usage/domains/index.html#basic-markup>`_
+from Sphinx's ObjectDescription class.
+
+The dashed spellings of the below options were added in Sphinx 7.2, the
+undashed spellings are currently retained as aliases, but will be
+removed in a future version.
+
+* ``:no-index:`` and ``:noindex:`` -- Do not add this item into the
+ Index, and do not make it available for cross-referencing.
+* ``no-index-entry:`` and ``:noindexentry:`` -- Do not add this item
+ into the Index, but allow it to be cross-referenced.
+* ``no-contents-entry`` and ``:nocontentsentry:`` -- Exclude this item
+ from the Table of Contents.
+* ``no-typesetting`` -- Create TOC, Index and cross-referencing
+ entities, but don't actually display the content.
+
+
+QAPI standard options
+---------------------
+
+All QAPI directives -- *except* for namespace and module -- support
+these common options.
+
+* ``:namespace: name`` -- This option allows you to override the
+ namespace association of a given definition.
+* ``:module: modname`` -- Borrowed from the Python domain, this option allows
+ you to override the module association of a given definition.
+* ``:since: x.y`` -- Allows the documenting of "Since" information, which is
+ displayed in the signature bar.
+* ``:ifcond: CONDITION`` -- Allows the documenting of conditional availability
+ information, which is displayed in an eyecatch just below the
+ signature bar.
+* ``:deprecated:`` -- Adds an eyecatch just below the signature bar that
+ advertises that this definition is deprecated and should be avoided.
+* ``:unstable:`` -- Adds an eyecatch just below the signature bar that
+ advertises that this definition is unstable and should not be used in
+ production code.
+
+
+qapi:namespace
+--------------
+
+The ``qapi:namespace`` directive marks the start of a QAPI namespace. It
+does not take a content body, nor any options. All subsequent QAPI
+directives are associated with the most recent namespace. This affects
+the definition's "fully qualified name", allowing two different
+namespaces to create an otherwise identically named definition.
+
+This directive also influences how reference resolution works for any
+references that do not explicitly specify a namespace, so this directive
+can be used to nudge references into preferring targets from within that
+namespace.
+
+Example::
+
+ .. qapi:namespace:: QMP
+
+
+This directive has no visible effect.
+
+
+qapi:module
+-----------
+
+The ``qapi:module`` directive marks the start of a QAPI module. It may have
+a content body, but it can be omitted. All subsequent QAPI directives
+are associated with the most recent module; this effects their "fully
+qualified" name, but has no other effect.
+
+Example::
+
+ .. qapi:module:: block-core
+
+ Welcome to the block-core module!
+
+Will be rendered as:
+
+.. qapi:module:: block-core
+ :noindex:
+
+ Welcome to the block-core module!
+
+
+qapi:command
+------------
+
+This directive documents a QMP command. It may use any of the standard
+Sphinx or QAPI options, and the documentation body may contain
+``:arg:``, ``:feat:``, ``:error:``, or ``:return:`` info field list
+entries.
+
+Example::
+
+ .. qapi:command:: x-fake-command
+ :since: 42.0
+ :unstable:
+
+ This command is fake, so it can't hurt you!
+
+ :arg int foo: Your favorite number.
+ :arg string? bar: Your favorite season.
+ :return [string]: A lovely computer-written poem for you.
+
+
+Will be rendered as:
+
+ .. qapi:command:: x-fake-command
+ :noindex:
+ :since: 42.0
+ :unstable:
+
+ This command is fake, so it can't hurt you!
+
+ :arg int foo: Your favorite number.
+ :arg string? bar: Your favorite season.
+ :return [string]: A lovely computer-written poem for you.
+
+
+qapi:event
+----------
+
+This directive documents a QMP event. It may use any of the standard
+Sphinx or QAPI options, and the documentation body may contain
+``:memb:`` or ``:feat:`` info field list entries.
+
+Example::
+
+ .. qapi:event:: COMPUTER_IS_RUINED
+ :since: 0.1
+ :deprecated:
+
+ This event is emitted when your computer is *extremely* ruined.
+
+ :memb string reason: Diagnostics as to what caused your computer to
+ be ruined.
+ :feat sadness: When present, the diagnostic message will also
+ explain how sad the computer is as a result of your wrongdoings.
+
+Will be rendered as:
+
+.. qapi:event:: COMPUTER_IS_RUINED
+ :noindex:
+ :since: 0.1
+ :deprecated:
+
+ This event is emitted when your computer is *extremely* ruined.
+
+ :memb string reason: Diagnostics as to what caused your computer to
+ be ruined.
+ :feat sadness: When present, the diagnostic message will also explain
+ how sad the computer is as a result of your wrongdoings.
+
+
+qapi:enum
+---------
+
+This directive documents a QAPI enum. It may use any of the standard
+Sphinx or QAPI options, and the documentation body may contain
+``:value:`` or ``:feat:`` info field list entries.
+
+Example::
+
+ .. qapi:enum:: Mood
+ :ifcond: LIB_PERSONALITY
+
+ This enum represents your virtual machine's current mood!
+
+ :value Happy: Your VM is content and well-fed.
+ :value Hungry: Your VM needs food.
+ :value Melancholic: Your VM is experiencing existential angst.
+ :value Petulant: Your VM is throwing a temper tantrum.
+
+Will be rendered as:
+
+.. qapi:enum:: Mood
+ :noindex:
+ :ifcond: LIB_PERSONALITY
+
+ This enum represents your virtual machine's current mood!
+
+ :value Happy: Your VM is content and well-fed.
+ :value Hungry: Your VM needs food.
+ :value Melancholic: Your VM is experiencing existential angst.
+ :value Petulant: Your VM is throwing a temper tantrum.
+
+
+qapi:object
+-----------
+
+This directive documents a QAPI structure or union and represents a QMP
+object. It may use any of the standard Sphinx or QAPI options, and the
+documentation body may contain ``:memb:`` or ``:feat:`` info field list
+entries.
+
+Example::
+
+ .. qapi:object:: BigBlobOfStuff
+
+ This object has a bunch of disparate and unrelated things in it.
+
+ :memb int Birthday: Your birthday, represented in seconds since the
+ UNIX epoch.
+ :memb [string] Fav-Foods: A list of your favorite foods.
+ :memb boolean? Bizarre-Docs: True if the documentation reference
+ should be strange.
+
+Will be rendered as:
+
+.. qapi:object:: BigBlobOfStuff
+ :noindex:
+
+ This object has a bunch of disparate and unrelated things in it.
+
+ :memb int Birthday: Your birthday, represented in seconds since the
+ UNIX epoch.
+ :memb [string] Fav-Foods: A list of your favorite foods.
+ :memb boolean? Bizarre-Docs: True if the documentation reference
+ should be strange.
+
+
+qapi:alternate
+--------------
+
+This directive documents a QAPI alternate. It may use any of the
+standard Sphinx or QAPI options, and the documentation body may contain
+``:alt:`` or ``:feat:`` info field list entries.
+
+Example::
+
+ .. qapi:alternate:: ErrorCode
+
+ This alternate represents an Error Code from the VM.
+
+ :alt int ec: An error code, like the type you're used to.
+ :alt string em: An expletive-laced error message, if your
+ computer is feeling particularly cranky and tired of your
+ antics.
+
+Will be rendered as:
+
+.. qapi:alternate:: ErrorCode
+ :noindex:
+
+ This alternate represents an Error Code from the VM.
+
+ :alt int ec: An error code, like the type you're used to.
+ :alt string em: An expletive-laced error message, if your
+ computer is feeling particularly cranky and tired of your
+ antics.
diff --git a/docs/devel/qom.rst b/docs/devel/qom.rst
index 0889ca9..5870745 100644
--- a/docs/devel/qom.rst
+++ b/docs/devel/qom.rst
@@ -147,7 +147,7 @@ to introduce an overridden virtual function:
#include "qdev.h"
- void my_device_class_init(ObjectClass *klass, void *class_data)
+ void my_device_class_init(ObjectClass *klass, const void *class_data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->reset = my_device_reset;
@@ -249,7 +249,7 @@ class, which someone might choose to change at some point.
// do something
}
- static void my_class_init(ObjectClass *oc, void *data)
+ static void my_class_init(ObjectClass *oc, const void *data)
{
MyClass *mc = MY_CLASS(oc);
@@ -279,7 +279,7 @@ class, which someone might choose to change at some point.
// do something else here
}
- static void derived_class_init(ObjectClass *oc, void *data)
+ static void derived_class_init(ObjectClass *oc, const void *data)
{
MyClass *mc = MY_CLASS(oc);
DerivedClass *dc = DERIVED_CLASS(oc);
@@ -363,7 +363,7 @@ This is equivalent to the following:
:caption: Expansion from defining a simple type
static void my_device_finalize(Object *obj);
- static void my_device_class_init(ObjectClass *oc, void *data);
+ static void my_device_class_init(ObjectClass *oc, const void *data);
static void my_device_init(Object *obj);
static const TypeInfo my_device_info = {
diff --git a/docs/devel/qtest.rst b/docs/devel/qtest.rst
deleted file mode 100644
index c5b8546..0000000
--- a/docs/devel/qtest.rst
+++ /dev/null
@@ -1,91 +0,0 @@
-========================================
-QTest Device Emulation Testing Framework
-========================================
-
-.. toctree::
-
- qgraph
-
-QTest is a device emulation testing framework. It can be very useful to test
-device models; it could also control certain aspects of QEMU (such as virtual
-clock stepping), with a special purpose "qtest" protocol. Refer to
-:ref:`qtest-protocol` for more details of the protocol.
-
-QTest cases can be executed with
-
-.. code::
-
- make check-qtest
-
-The QTest library is implemented by ``tests/qtest/libqtest.c`` and the API is
-defined in ``tests/qtest/libqtest.h``.
-
-Consider adding a new QTest case when you are introducing a new virtual
-hardware, or extending one if you are adding functionalities to an existing
-virtual device.
-
-On top of libqtest, a higher level library, ``libqos``, was created to
-encapsulate common tasks of device drivers, such as memory management and
-communicating with system buses or devices. Many virtual device tests use
-libqos instead of directly calling into libqtest.
-Libqos also offers the Qgraph API to increase each test coverage and
-automate QEMU command line arguments and devices setup.
-Refer to :ref:`qgraph` for Qgraph explanation and API.
-
-Steps to add a new QTest case are:
-
-1. Create a new source file for the test. (More than one file can be added as
- necessary.) For example, ``tests/qtest/foo-test.c``.
-
-2. Write the test code with the glib and libqtest/libqos API. See also existing
- tests and the library headers for reference.
-
-3. Register the new test in ``tests/qtest/meson.build``. Add the test
- executable name to an appropriate ``qtests_*`` variable. There is
- one variable per architecture, plus ``qtests_generic`` for tests
- that can be run for all architectures. For example::
-
- qtests_generic = [
- ...
- 'foo-test',
- ...
- ]
-
-4. If the test has more than one source file or needs to be linked with any
- dependency other than ``qemuutil`` and ``qos``, list them in the ``qtests``
- dictionary. For example a test that needs to use the ``QIO`` library
- will have an entry like::
-
- {
- ...
- 'foo-test': [io],
- ...
- }
-
-Debugging a QTest failure is slightly harder than the unit test because the
-tests look up QEMU program names in the environment variables, such as
-``QTEST_QEMU_BINARY`` and ``QTEST_QEMU_IMG``, and also because it is not easy
-to attach gdb to the QEMU process spawned from the test. But manual invoking
-and using gdb on the test is still simple to do: find out the actual command
-from the output of
-
-.. code::
-
- make check-qtest V=1
-
-which you can run manually.
-
-
-.. _qtest-protocol:
-
-QTest Protocol
---------------
-
-.. kernel-doc:: system/qtest.c
- :doc: QTest Protocol
-
-
-libqtest API reference
-----------------------
-
-.. kernel-doc:: tests/qtest/libqtest.h
diff --git a/docs/devel/rcu.rst b/docs/devel/rcu.rst
new file mode 100644
index 0000000..dd07c1d
--- /dev/null
+++ b/docs/devel/rcu.rst
@@ -0,0 +1,394 @@
+Using RCU (Read-Copy-Update) for synchronization
+================================================
+
+Read-copy update (RCU) is a synchronization mechanism that is used to
+protect read-mostly data structures. RCU is very efficient and scalable
+on the read side (it is wait-free), and thus can make the read paths
+extremely fast.
+
+RCU supports concurrency between a single writer and multiple readers,
+thus it is not used alone. Typically, the write-side will use a lock to
+serialize multiple updates, but other approaches are possible (e.g.,
+restricting updates to a single task). In QEMU, when a lock is used,
+this will often be the "iothread mutex", also known as the "big QEMU
+lock" (BQL). Also, restricting updates to a single task is done in
+QEMU using the "bottom half" API.
+
+RCU is fundamentally a "wait-to-finish" mechanism. The read side marks
+sections of code with "critical sections", and the update side will wait
+for the execution of all *currently running* critical sections before
+proceeding, or before asynchronously executing a callback.
+
+The key point here is that only the currently running critical sections
+are waited for; critical sections that are started **after** the beginning
+of the wait do not extend the wait, despite running concurrently with
+the updater. This is the reason why RCU is more scalable than,
+for example, reader-writer locks. It is so much more scalable that
+the system will have a single instance of the RCU mechanism; a single
+mechanism can be used for an arbitrary number of "things", without
+having to worry about things such as contention or deadlocks.
+
+How is this possible? The basic idea is to split updates in two phases,
+"removal" and "reclamation". During removal, we ensure that subsequent
+readers will not be able to get a reference to the old data. After
+removal has completed, a critical section will not be able to access
+the old data. Therefore, critical sections that begin after removal
+do not matter; as soon as all previous critical sections have finished,
+there cannot be any readers who hold references to the data structure,
+and these can now be safely reclaimed (e.g., freed or unref'ed).
+
+Here is a picture::
+
+ thread 1 thread 2 thread 3
+ ------------------- ------------------------ -------------------
+ enter RCU crit.sec.
+ | finish removal phase
+ | begin wait
+ | | enter RCU crit.sec.
+ exit RCU crit.sec | |
+ complete wait |
+ begin reclamation phase |
+ exit RCU crit.sec.
+
+
+Note how thread 3 is still executing its critical section when thread 2
+starts reclaiming data. This is possible, because the old version of the
+data structure was not accessible at the time thread 3 began executing
+that critical section.
+
+
+RCU API
+-------
+
+The core RCU API is small:
+
+``void rcu_read_lock(void);``
+ Used by a reader to inform the reclaimer that the reader is
+ entering an RCU read-side critical section.
+
+``void rcu_read_unlock(void);``
+ Used by a reader to inform the reclaimer that the reader is
+ exiting an RCU read-side critical section. Note that RCU
+ read-side critical sections may be nested and/or overlapping.
+
+``void synchronize_rcu(void);``
+ Blocks until all pre-existing RCU read-side critical sections
+ on all threads have completed. This marks the end of the removal
+ phase and the beginning of reclamation phase.
+
+ Note that it would be valid for another update to come while
+ ``synchronize_rcu`` is running. Because of this, it is better that
+ the updater releases any locks it may hold before calling
+ ``synchronize_rcu``. If this is not possible (for example, because
+ the updater is protected by the BQL), you can use ``call_rcu``.
+
+``void call_rcu1(struct rcu_head * head, void (*func)(struct rcu_head *head));``
+ This function invokes ``func(head)`` after all pre-existing RCU
+ read-side critical sections on all threads have completed. This
+ marks the end of the removal phase, with func taking care
+ asynchronously of the reclamation phase.
+
+ The ``foo`` struct needs to have an ``rcu_head`` structure added,
+ perhaps as follows::
+
+ struct foo {
+ struct rcu_head rcu;
+ int a;
+ char b;
+ long c;
+ };
+
+ so that the reclaimer function can fetch the ``struct foo`` address
+ and free it::
+
+ call_rcu1(&foo.rcu, foo_reclaim);
+
+ void foo_reclaim(struct rcu_head *rp)
+ {
+ struct foo *fp = container_of(rp, struct foo, rcu);
+ g_free(fp);
+ }
+
+ ``call_rcu1`` is typically used via either the ``call_rcu`` or
+ ``g_free_rcu`` macros, which handle the common case where the
+ ``rcu_head`` member is the first of the struct.
+
+``void call_rcu(T *p, void (*func)(T *p), field-name);``
+ If the ``struct rcu_head`` is the first field in the struct, you can
+ use this macro instead of ``call_rcu1``.
+
+``void g_free_rcu(T *p, field-name);``
+ This is a special-case version of ``call_rcu`` where the callback
+ function is ``g_free``.
+ In the example given in ``call_rcu1``, one could have written simply::
+
+ g_free_rcu(&foo, rcu);
+
+``typeof(*p) qatomic_rcu_read(p);``
+ ``qatomic_rcu_read()`` is similar to ``qatomic_load_acquire()``, but
+ it makes some assumptions on the code that calls it. This allows a
+ more optimized implementation.
+
+ ``qatomic_rcu_read`` assumes that whenever a single RCU critical
+ section reads multiple shared data, these reads are either
+ data-dependent or need no ordering. This is almost always the
+ case when using RCU, because read-side critical sections typically
+ navigate one or more pointers (the pointers that are changed on
+ every update) until reaching a data structure of interest,
+ and then read from there.
+
+ RCU read-side critical sections must use ``qatomic_rcu_read()`` to
+ read data, unless concurrent writes are prevented by another
+ synchronization mechanism.
+
+ Furthermore, RCU read-side critical sections should traverse the
+ data structure in a single direction, opposite to the direction
+ in which the updater initializes it.
+
+``void qatomic_rcu_set(p, typeof(*p) v);``
+ ``qatomic_rcu_set()`` is similar to ``qatomic_store_release()``,
+ though it also makes assumptions on the code that calls it in
+ order to allow a more optimized implementation.
+
+ In particular, ``qatomic_rcu_set()`` suffices for synchronization
+ with readers, if the updater never mutates a field within a
+ data item that is already accessible to readers. This is the
+ case when initializing a new copy of the RCU-protected data
+ structure; just ensure that initialization of ``*p`` is carried out
+ before ``qatomic_rcu_set()`` makes the data item visible to readers.
+ If this rule is observed, writes will happen in the opposite
+ order as reads in the RCU read-side critical sections (or if
+ there is just one update), and there will be no need for other
+ synchronization mechanism to coordinate the accesses.
+
+The following APIs must be used before RCU is used in a thread:
+
+``void rcu_register_thread(void);``
+ Mark a thread as taking part in the RCU mechanism. Such a thread
+ will have to report quiescent points regularly, either manually
+ or through the ``QemuCond``/``QemuSemaphore``/``QemuEvent`` APIs.
+
+``void rcu_unregister_thread(void);``
+ Mark a thread as not taking part anymore in the RCU mechanism.
+ It is not a problem if such a thread reports quiescent points,
+ either manually or by using the
+ ``QemuCond``/``QemuSemaphore``/``QemuEvent`` APIs.
+
+Note that these APIs are relatively heavyweight, and should **not** be
+nested.
+
+Convenience macros
+------------------
+
+Two macros are provided that automatically release the read lock at the
+end of the scope.
+
+``RCU_READ_LOCK_GUARD()``
+ Takes the lock and will release it at the end of the block it's
+ used in.
+
+``WITH_RCU_READ_LOCK_GUARD() { code }``
+ Is used at the head of a block to protect the code within the block.
+
+Note that a ``goto`` out of the guarded block will also drop the lock.
+
+Differences with Linux
+----------------------
+
+- Waiting on a mutex is possible, though discouraged, within an RCU critical
+ section. This is because spinlocks are rarely (if ever) used in userspace
+ programming; not allowing this would prevent upgrading an RCU read-side
+ critical section to become an updater.
+
+- ``qatomic_rcu_read`` and ``qatomic_rcu_set`` replace ``rcu_dereference`` and
+ ``rcu_assign_pointer``. They take a **pointer** to the variable being accessed.
+
+- ``call_rcu`` is a macro that has an extra argument (the name of the first
+ field in the struct, which must be a struct ``rcu_head``), and expects the
+ type of the callback's argument to be the type of the first argument.
+ ``call_rcu1`` is the same as Linux's ``call_rcu``.
+
+
+RCU Patterns
+------------
+
+Many patterns using read-writer locks translate directly to RCU, with
+the advantages of higher scalability and deadlock immunity.
+
+In general, RCU can be used whenever it is possible to create a new
+"version" of a data structure every time the updater runs. This may
+sound like a very strict restriction, however:
+
+- the updater does not mean "everything that writes to a data structure",
+ but rather "everything that involves a reclamation step". See the
+ array example below
+
+- in some cases, creating a new version of a data structure may actually
+ be very cheap. For example, modifying the "next" pointer of a singly
+ linked list is effectively creating a new version of the list.
+
+Here are some frequently-used RCU idioms that are worth noting.
+
+
+RCU list processing
+^^^^^^^^^^^^^^^^^^^
+
+TBD (not yet used in QEMU)
+
+
+RCU reference counting
+^^^^^^^^^^^^^^^^^^^^^^
+
+Because grace periods are not allowed to complete while there is an RCU
+read-side critical section in progress, the RCU read-side primitives
+may be used as a restricted reference-counting mechanism. For example,
+consider the following code fragment::
+
+ rcu_read_lock();
+ p = qatomic_rcu_read(&foo);
+ /* do something with p. */
+ rcu_read_unlock();
+
+The RCU read-side critical section ensures that the value of ``p`` remains
+valid until after the ``rcu_read_unlock()``. In some sense, it is acquiring
+a reference to ``p`` that is later released when the critical section ends.
+The write side looks simply like this (with appropriate locking)::
+
+ qemu_mutex_lock(&foo_mutex);
+ old = foo;
+ qatomic_rcu_set(&foo, new);
+ qemu_mutex_unlock(&foo_mutex);
+ synchronize_rcu();
+ free(old);
+
+If the processing cannot be done purely within the critical section, it
+is possible to combine this idiom with a "real" reference count::
+
+ rcu_read_lock();
+ p = qatomic_rcu_read(&foo);
+ foo_ref(p);
+ rcu_read_unlock();
+ /* do something with p. */
+ foo_unref(p);
+
+The write side can be like this::
+
+ qemu_mutex_lock(&foo_mutex);
+ old = foo;
+ qatomic_rcu_set(&foo, new);
+ qemu_mutex_unlock(&foo_mutex);
+ synchronize_rcu();
+ foo_unref(old);
+
+or with ``call_rcu``::
+
+ qemu_mutex_lock(&foo_mutex);
+ old = foo;
+ qatomic_rcu_set(&foo, new);
+ qemu_mutex_unlock(&foo_mutex);
+ call_rcu(foo_unref, old, rcu);
+
+In both cases, the write side only performs removal. Reclamation
+happens when the last reference to a ``foo`` object is dropped.
+Using ``synchronize_rcu()`` is undesirably expensive, because the
+last reference may be dropped on the read side. Hence you can
+use ``call_rcu()`` instead::
+
+ foo_unref(struct foo *p) {
+ if (qatomic_fetch_dec(&p->refcount) == 1) {
+ call_rcu(foo_destroy, p, rcu);
+ }
+ }
+
+
+Note that the same idioms would be possible with reader/writer
+locks::
+
+ read_lock(&foo_rwlock); write_mutex_lock(&foo_rwlock);
+ p = foo; p = foo;
+ /* do something with p. */ foo = new;
+ read_unlock(&foo_rwlock); free(p);
+ write_mutex_unlock(&foo_rwlock);
+ free(p);
+
+ ------------------------------------------------------------------
+
+ read_lock(&foo_rwlock); write_mutex_lock(&foo_rwlock);
+ p = foo; old = foo;
+ foo_ref(p); foo = new;
+ read_unlock(&foo_rwlock); foo_unref(old);
+ /* do something with p. */ write_mutex_unlock(&foo_rwlock);
+ read_lock(&foo_rwlock);
+ foo_unref(p);
+ read_unlock(&foo_rwlock);
+
+``foo_unref`` could use a mechanism such as bottom halves to move deallocation
+out of the write-side critical section.
+
+
+RCU resizable arrays
+^^^^^^^^^^^^^^^^^^^^
+
+Resizable arrays can be used with RCU. The expensive RCU synchronization
+(or ``call_rcu``) only needs to take place when the array is resized.
+The two items to take care of are:
+
+- ensuring that the old version of the array is available between removal
+ and reclamation;
+
+- avoiding mismatches in the read side between the array data and the
+ array size.
+
+The first problem is avoided simply by not using ``realloc``. Instead,
+each resize will allocate a new array and copy the old data into it.
+The second problem would arise if the size and the data pointers were
+two members of a larger struct::
+
+ struct mystuff {
+ ...
+ int data_size;
+ int data_alloc;
+ T *data;
+ ...
+ };
+
+Instead, we store the size of the array with the array itself::
+
+ struct arr {
+ int size;
+ int alloc;
+ T data[];
+ };
+ struct arr *global_array;
+
+ read side:
+ rcu_read_lock();
+ struct arr *array = qatomic_rcu_read(&global_array);
+ x = i < array->size ? array->data[i] : -1;
+ rcu_read_unlock();
+ return x;
+
+ write side (running under a lock):
+ if (global_array->size == global_array->alloc) {
+ /* Creating a new version. */
+ new_array = g_malloc(sizeof(struct arr) +
+ global_array->alloc * 2 * sizeof(T));
+ new_array->size = global_array->size;
+ new_array->alloc = global_array->alloc * 2;
+ memcpy(new_array->data, global_array->data,
+ global_array->alloc * sizeof(T));
+
+ /* Removal phase. */
+ old_array = global_array;
+ qatomic_rcu_set(&global_array, new_array);
+ synchronize_rcu();
+
+ /* Reclamation phase. */
+ free(old_array);
+ }
+
+
+References
+----------
+
+* The `Linux kernel RCU documentation <https://docs.kernel.org/RCU/>`__
diff --git a/docs/devel/rcu.txt b/docs/devel/rcu.txt
deleted file mode 100644
index 2e6cc60..0000000
--- a/docs/devel/rcu.txt
+++ /dev/null
@@ -1,406 +0,0 @@
-Using RCU (Read-Copy-Update) for synchronization
-================================================
-
-Read-copy update (RCU) is a synchronization mechanism that is used to
-protect read-mostly data structures. RCU is very efficient and scalable
-on the read side (it is wait-free), and thus can make the read paths
-extremely fast.
-
-RCU supports concurrency between a single writer and multiple readers,
-thus it is not used alone. Typically, the write-side will use a lock to
-serialize multiple updates, but other approaches are possible (e.g.,
-restricting updates to a single task). In QEMU, when a lock is used,
-this will often be the "iothread mutex", also known as the "big QEMU
-lock" (BQL). Also, restricting updates to a single task is done in
-QEMU using the "bottom half" API.
-
-RCU is fundamentally a "wait-to-finish" mechanism. The read side marks
-sections of code with "critical sections", and the update side will wait
-for the execution of all *currently running* critical sections before
-proceeding, or before asynchronously executing a callback.
-
-The key point here is that only the currently running critical sections
-are waited for; critical sections that are started _after_ the beginning
-of the wait do not extend the wait, despite running concurrently with
-the updater. This is the reason why RCU is more scalable than,
-for example, reader-writer locks. It is so much more scalable that
-the system will have a single instance of the RCU mechanism; a single
-mechanism can be used for an arbitrary number of "things", without
-having to worry about things such as contention or deadlocks.
-
-How is this possible? The basic idea is to split updates in two phases,
-"removal" and "reclamation". During removal, we ensure that subsequent
-readers will not be able to get a reference to the old data. After
-removal has completed, a critical section will not be able to access
-the old data. Therefore, critical sections that begin after removal
-do not matter; as soon as all previous critical sections have finished,
-there cannot be any readers who hold references to the data structure,
-and these can now be safely reclaimed (e.g., freed or unref'ed).
-
-Here is a picture:
-
- thread 1 thread 2 thread 3
- ------------------- ------------------------ -------------------
- enter RCU crit.sec.
- | finish removal phase
- | begin wait
- | | enter RCU crit.sec.
- exit RCU crit.sec | |
- complete wait |
- begin reclamation phase |
- exit RCU crit.sec.
-
-
-Note how thread 3 is still executing its critical section when thread 2
-starts reclaiming data. This is possible, because the old version of the
-data structure was not accessible at the time thread 3 began executing
-that critical section.
-
-
-RCU API
-=======
-
-The core RCU API is small:
-
- void rcu_read_lock(void);
-
- Used by a reader to inform the reclaimer that the reader is
- entering an RCU read-side critical section.
-
- void rcu_read_unlock(void);
-
- Used by a reader to inform the reclaimer that the reader is
- exiting an RCU read-side critical section. Note that RCU
- read-side critical sections may be nested and/or overlapping.
-
- void synchronize_rcu(void);
-
- Blocks until all pre-existing RCU read-side critical sections
- on all threads have completed. This marks the end of the removal
- phase and the beginning of reclamation phase.
-
- Note that it would be valid for another update to come while
- synchronize_rcu is running. Because of this, it is better that
- the updater releases any locks it may hold before calling
- synchronize_rcu. If this is not possible (for example, because
- the updater is protected by the BQL), you can use call_rcu.
-
- void call_rcu1(struct rcu_head * head,
- void (*func)(struct rcu_head *head));
-
- This function invokes func(head) after all pre-existing RCU
- read-side critical sections on all threads have completed. This
- marks the end of the removal phase, with func taking care
- asynchronously of the reclamation phase.
-
- The foo struct needs to have an rcu_head structure added,
- perhaps as follows:
-
- struct foo {
- struct rcu_head rcu;
- int a;
- char b;
- long c;
- };
-
- so that the reclaimer function can fetch the struct foo address
- and free it:
-
- call_rcu1(&foo.rcu, foo_reclaim);
-
- void foo_reclaim(struct rcu_head *rp)
- {
- struct foo *fp = container_of(rp, struct foo, rcu);
- g_free(fp);
- }
-
- For the common case where the rcu_head member is the first of the
- struct, you can use the following macro.
-
- void call_rcu(T *p,
- void (*func)(T *p),
- field-name);
- void g_free_rcu(T *p,
- field-name);
-
- call_rcu1 is typically used through these macro, in the common case
- where the "struct rcu_head" is the first field in the struct. If
- the callback function is g_free, in particular, g_free_rcu can be
- used. In the above case, one could have written simply:
-
- g_free_rcu(&foo, rcu);
-
- typeof(*p) qatomic_rcu_read(p);
-
- qatomic_rcu_read() is similar to qatomic_load_acquire(), but it makes
- some assumptions on the code that calls it. This allows a more
- optimized implementation.
-
- qatomic_rcu_read assumes that whenever a single RCU critical
- section reads multiple shared data, these reads are either
- data-dependent or need no ordering. This is almost always the
- case when using RCU, because read-side critical sections typically
- navigate one or more pointers (the pointers that are changed on
- every update) until reaching a data structure of interest,
- and then read from there.
-
- RCU read-side critical sections must use qatomic_rcu_read() to
- read data, unless concurrent writes are prevented by another
- synchronization mechanism.
-
- Furthermore, RCU read-side critical sections should traverse the
- data structure in a single direction, opposite to the direction
- in which the updater initializes it.
-
- void qatomic_rcu_set(p, typeof(*p) v);
-
- qatomic_rcu_set() is similar to qatomic_store_release(), though it also
- makes assumptions on the code that calls it in order to allow a more
- optimized implementation.
-
- In particular, qatomic_rcu_set() suffices for synchronization
- with readers, if the updater never mutates a field within a
- data item that is already accessible to readers. This is the
- case when initializing a new copy of the RCU-protected data
- structure; just ensure that initialization of *p is carried out
- before qatomic_rcu_set() makes the data item visible to readers.
- If this rule is observed, writes will happen in the opposite
- order as reads in the RCU read-side critical sections (or if
- there is just one update), and there will be no need for other
- synchronization mechanism to coordinate the accesses.
-
-The following APIs must be used before RCU is used in a thread:
-
- void rcu_register_thread(void);
-
- Mark a thread as taking part in the RCU mechanism. Such a thread
- will have to report quiescent points regularly, either manually
- or through the QemuCond/QemuSemaphore/QemuEvent APIs.
-
- void rcu_unregister_thread(void);
-
- Mark a thread as not taking part anymore in the RCU mechanism.
- It is not a problem if such a thread reports quiescent points,
- either manually or by using the QemuCond/QemuSemaphore/QemuEvent
- APIs.
-
-Note that these APIs are relatively heavyweight, and should _not_ be
-nested.
-
-Convenience macros
-==================
-
-Two macros are provided that automatically release the read lock at the
-end of the scope.
-
- RCU_READ_LOCK_GUARD()
-
- Takes the lock and will release it at the end of the block it's
- used in.
-
- WITH_RCU_READ_LOCK_GUARD() { code }
-
- Is used at the head of a block to protect the code within the block.
-
-Note that 'goto'ing out of the guarded block will also drop the lock.
-
-DIFFERENCES WITH LINUX
-======================
-
-- Waiting on a mutex is possible, though discouraged, within an RCU critical
- section. This is because spinlocks are rarely (if ever) used in userspace
- programming; not allowing this would prevent upgrading an RCU read-side
- critical section to become an updater.
-
-- qatomic_rcu_read and qatomic_rcu_set replace rcu_dereference and
- rcu_assign_pointer. They take a _pointer_ to the variable being accessed.
-
-- call_rcu is a macro that has an extra argument (the name of the first
- field in the struct, which must be a struct rcu_head), and expects the
- type of the callback's argument to be the type of the first argument.
- call_rcu1 is the same as Linux's call_rcu.
-
-
-RCU PATTERNS
-============
-
-Many patterns using read-writer locks translate directly to RCU, with
-the advantages of higher scalability and deadlock immunity.
-
-In general, RCU can be used whenever it is possible to create a new
-"version" of a data structure every time the updater runs. This may
-sound like a very strict restriction, however:
-
-- the updater does not mean "everything that writes to a data structure",
- but rather "everything that involves a reclamation step". See the
- array example below
-
-- in some cases, creating a new version of a data structure may actually
- be very cheap. For example, modifying the "next" pointer of a singly
- linked list is effectively creating a new version of the list.
-
-Here are some frequently-used RCU idioms that are worth noting.
-
-
-RCU list processing
--------------------
-
-TBD (not yet used in QEMU)
-
-
-RCU reference counting
-----------------------
-
-Because grace periods are not allowed to complete while there is an RCU
-read-side critical section in progress, the RCU read-side primitives
-may be used as a restricted reference-counting mechanism. For example,
-consider the following code fragment:
-
- rcu_read_lock();
- p = qatomic_rcu_read(&foo);
- /* do something with p. */
- rcu_read_unlock();
-
-The RCU read-side critical section ensures that the value of "p" remains
-valid until after the rcu_read_unlock(). In some sense, it is acquiring
-a reference to p that is later released when the critical section ends.
-The write side looks simply like this (with appropriate locking):
-
- qemu_mutex_lock(&foo_mutex);
- old = foo;
- qatomic_rcu_set(&foo, new);
- qemu_mutex_unlock(&foo_mutex);
- synchronize_rcu();
- free(old);
-
-If the processing cannot be done purely within the critical section, it
-is possible to combine this idiom with a "real" reference count:
-
- rcu_read_lock();
- p = qatomic_rcu_read(&foo);
- foo_ref(p);
- rcu_read_unlock();
- /* do something with p. */
- foo_unref(p);
-
-The write side can be like this:
-
- qemu_mutex_lock(&foo_mutex);
- old = foo;
- qatomic_rcu_set(&foo, new);
- qemu_mutex_unlock(&foo_mutex);
- synchronize_rcu();
- foo_unref(old);
-
-or with call_rcu:
-
- qemu_mutex_lock(&foo_mutex);
- old = foo;
- qatomic_rcu_set(&foo, new);
- qemu_mutex_unlock(&foo_mutex);
- call_rcu(foo_unref, old, rcu);
-
-In both cases, the write side only performs removal. Reclamation
-happens when the last reference to a "foo" object is dropped.
-Using synchronize_rcu() is undesirably expensive, because the
-last reference may be dropped on the read side. Hence you can
-use call_rcu() instead:
-
- foo_unref(struct foo *p) {
- if (qatomic_fetch_dec(&p->refcount) == 1) {
- call_rcu(foo_destroy, p, rcu);
- }
- }
-
-
-Note that the same idioms would be possible with reader/writer
-locks:
-
- read_lock(&foo_rwlock); write_mutex_lock(&foo_rwlock);
- p = foo; p = foo;
- /* do something with p. */ foo = new;
- read_unlock(&foo_rwlock); free(p);
- write_mutex_unlock(&foo_rwlock);
- free(p);
-
- ------------------------------------------------------------------
-
- read_lock(&foo_rwlock); write_mutex_lock(&foo_rwlock);
- p = foo; old = foo;
- foo_ref(p); foo = new;
- read_unlock(&foo_rwlock); foo_unref(old);
- /* do something with p. */ write_mutex_unlock(&foo_rwlock);
- read_lock(&foo_rwlock);
- foo_unref(p);
- read_unlock(&foo_rwlock);
-
-foo_unref could use a mechanism such as bottom halves to move deallocation
-out of the write-side critical section.
-
-
-RCU resizable arrays
---------------------
-
-Resizable arrays can be used with RCU. The expensive RCU synchronization
-(or call_rcu) only needs to take place when the array is resized.
-The two items to take care of are:
-
-- ensuring that the old version of the array is available between removal
- and reclamation;
-
-- avoiding mismatches in the read side between the array data and the
- array size.
-
-The first problem is avoided simply by not using realloc. Instead,
-each resize will allocate a new array and copy the old data into it.
-The second problem would arise if the size and the data pointers were
-two members of a larger struct:
-
- struct mystuff {
- ...
- int data_size;
- int data_alloc;
- T *data;
- ...
- };
-
-Instead, we store the size of the array with the array itself:
-
- struct arr {
- int size;
- int alloc;
- T data[];
- };
- struct arr *global_array;
-
- read side:
- rcu_read_lock();
- struct arr *array = qatomic_rcu_read(&global_array);
- x = i < array->size ? array->data[i] : -1;
- rcu_read_unlock();
- return x;
-
- write side (running under a lock):
- if (global_array->size == global_array->alloc) {
- /* Creating a new version. */
- new_array = g_malloc(sizeof(struct arr) +
- global_array->alloc * 2 * sizeof(T));
- new_array->size = global_array->size;
- new_array->alloc = global_array->alloc * 2;
- memcpy(new_array->data, global_array->data,
- global_array->alloc * sizeof(T));
-
- /* Removal phase. */
- old_array = global_array;
- qatomic_rcu_set(&global_array, new_array);
- synchronize_rcu();
-
- /* Reclamation phase. */
- free(old_array);
- }
-
-
-SOURCES
-=======
-
-* Documentation/RCU/ from the Linux kernel
diff --git a/docs/devel/replay.rst b/docs/devel/replay.rst
index effd856..40f58d9 100644
--- a/docs/devel/replay.rst
+++ b/docs/devel/replay.rst
@@ -202,6 +202,9 @@ into the log.
Saving/restoring the VM state
-----------------------------
+Record/replay relies on VM state save and restore being complete and
+deterministic.
+
All fields in the device state structure (including virtual timers)
should be restored by loadvm to the same values they had before savevm.
diff --git a/docs/devel/reset.rst b/docs/devel/reset.rst
index 9746a4e..c02fe0a 100644
--- a/docs/devel/reset.rst
+++ b/docs/devel/reset.rst
@@ -44,6 +44,26 @@ The Resettable interface handles reset types with an enum ``ResetType``:
value on each cold reset, such as RNG seed information, and which they
must not reinitialize on a snapshot-load reset.
+``RESET_TYPE_WAKEUP``
+ If the machine supports waking up from a suspended state and needs to reset
+ its devices during wake-up (from the ``MachineClass::wakeup()`` method), this
+ reset type should be used for such a request. Devices can utilize this reset
+ type to differentiate the reset requested during machine wake-up from other
+ reset requests. For example, RAM content must not be lost during wake-up, and
+ memory devices like virtio-mem that provide additional RAM must not reset
+ such state during wake-ups, but might do so during cold resets. However, this
+ reset type should not be used for wake-up detection, as not every machine
+ type issues a device reset request during wake-up.
+
+``RESET_TYPE_S390_CPU_NORMAL``
+ This is only used for S390 CPU objects; it clears interrupts, stops
+ processing, and clears the TLB, but does not touch register contents.
+
+``RESET_TYPE_S390_CPU_INITIAL``
+ This is only used for S390 CPU objects; it does everything
+ ``RESET_TYPE_S390_CPU_NORMAL`` does and also clears the PSW, prefix,
+ FPC, timer and control registers. It does not touch gprs, fprs or acrs.
+
Devices which implement reset methods must treat any unknown ``ResetType``
as equivalent to ``RESET_TYPE_COLD``; this will reduce the amount of
existing code we need to change if we add more types in future.
@@ -123,6 +143,11 @@ The *exit* phase is executed only when the last reset operation ends. Therefore
the object does not need to care how many of reset controllers it has and how
many of them have started a reset.
+DMA capable devices are expected to cancel all outstanding DMA operations
+during either 'enter' or 'hold' phases. IOMMUs are expected to reset during
+the 'exit' phase and this sequencing makes sure no outstanding DMA request
+will fault.
+
Handling reset in a resettable object
-------------------------------------
@@ -191,7 +216,7 @@ in reset.
ResettablePhases parent_phases;
} MyDevClass;
- static void mydev_class_init(ObjectClass *class, void *data)
+ static void mydev_class_init(ObjectClass *class, const void *data)
{
MyDevClass *myclass = MYDEV_CLASS(class);
ResettableClass *rc = RESETTABLE_CLASS(class);
@@ -266,8 +291,8 @@ every reset child of the given resettable object. All children must be
resettable too. Additional parameters (a reset type and an opaque pointer) must
be passed to the callback too.
-In ``DeviceClass`` and ``BusClass`` the ``ResettableState`` is located
-``DeviceState`` and ``BusState`` structure. ``child_foreach()`` is implemented
+In ``DeviceClass`` and ``BusClass`` the ``ResettableState`` is located in the
+``DeviceState`` and ``BusState`` structures. ``child_foreach()`` is implemented
to follow the bus hierarchy; for a bus, it calls the function on every child
device; for a device, it calls the function on every bus child. When we reset
the main system bus, we reset the whole machine bus tree.
diff --git a/docs/devel/rust.rst b/docs/devel/rust.rst
new file mode 100644
index 0000000..dc8c441
--- /dev/null
+++ b/docs/devel/rust.rst
@@ -0,0 +1,478 @@
+.. |msrv| replace:: 1.63.0
+
+Rust in QEMU
+============
+
+Rust in QEMU is a project to enable using the Rust programming language
+to add new functionality to QEMU.
+
+Right now, the focus is on making it possible to write devices that inherit
+from ``SysBusDevice`` in `*safe*`__ Rust. Later, it may become possible
+to write other kinds of devices (e.g. PCI devices that can do DMA),
+complete boards, or backends (e.g. block device formats).
+
+__ https://doc.rust-lang.org/nomicon/meet-safe-and-unsafe.html
+
+Building the Rust in QEMU code
+------------------------------
+
+The Rust in QEMU code is included in the emulators via Meson. Meson
+invokes rustc directly, building static libraries that are then linked
+together with the C code. This is completely automatic when you run
+``make`` or ``ninja``.
+
+However, QEMU's build system also tries to be easy to use for people who
+are accustomed to the more "normal" Cargo-based development workflow.
+In particular:
+
+* the set of warnings and lints that are used to build QEMU always
+ comes from the ``rust/Cargo.toml`` workspace file
+
+* it is also possible to use ``cargo`` for common Rust-specific coding
+ tasks, in particular to invoke ``clippy``, ``rustfmt`` and ``rustdoc``.
+
+To this end, QEMU includes a ``build.rs`` build script that picks up
+generated sources from QEMU's build directory and puts it in Cargo's
+output directory (typically ``rust/target/``). A vanilla invocation
+of Cargo will complain that it cannot find the generated sources,
+which can be fixed in different ways:
+
+* by using Makefile targets, provided by Meson, that run ``clippy`` or
+ ``rustdoc``:
+
+ make clippy
+ make rustdoc
+
+A target for ``rustfmt`` is also declared in ``rust/meson.build``:
+
+ make rustfmt
+
+* by invoking ``cargo`` through the Meson `development environment`__
+ feature::
+
+ pyvenv/bin/meson devenv -w ../rust cargo clippy --tests
+ pyvenv/bin/meson devenv -w ../rust cargo fmt
+
+ If you are going to use ``cargo`` repeatedly, ``pyvenv/bin/meson devenv``
+ will enter a shell where commands like ``cargo fmt`` just work.
+
+__ https://mesonbuild.com/Commands.html#devenv
+
+* by pointing the ``MESON_BUILD_ROOT`` to the top of your QEMU build
+ tree. This third method is useful if you are using ``rust-analyzer``;
+ you can set the environment variable through the
+ ``rust-analyzer.cargo.extraEnv`` setting.
+
+As shown above, you can use the ``--tests`` option as usual to operate on test
+code. Note however that you cannot *build* or run tests via ``cargo``, because
+they need support C code from QEMU that Cargo does not know about. Tests can
+be run via ``meson test`` or ``make``::
+
+ make check-rust
+
+Note that doctests require all ``.o`` files from the build to be available.
+
+Supported tools
+'''''''''''''''
+
+QEMU supports rustc version 1.77.0 and newer. Notably, the following features
+are missing:
+
+* inline const expression (stable in 1.79.0), currently worked around with
+ associated constants in the ``FnCall`` trait.
+
+* associated constants have to be explicitly marked ``'static`` (`changed in
+ 1.81.0`__)
+
+* ``&raw`` (stable in 1.82.0). Use ``addr_of!`` and ``addr_of_mut!`` instead,
+ though hopefully the need for raw pointers will go down over time.
+
+* ``new_uninit`` (stable in 1.82.0). This is used internally by the ``pinned_init``
+ crate, which is planned for inclusion in QEMU, but it can be easily patched
+ out.
+
+* referencing statics in constants (stable in 1.83.0). For now use a const
+ function; this is an important limitation for QEMU's migration stream
+ architecture (VMState). Right now, VMState lacks type safety because
+ it is hard to place the ``VMStateField`` definitions in traits.
+
+* NUL-terminated file names with ``#[track_caller]`` are scheduled for
+ inclusion as ``#![feature(location_file_nul)]``, but it will be a while
+ before QEMU can use them. For now, there is special code in
+ ``util/error.c`` to support non-NUL-terminated file names.
+
+* associated const equality would be nice to have for some users of
+ ``callbacks::FnCall``, but is still experimental. ``ASSERT_IS_SOME``
+ replaces it.
+
+__ https://github.com/rust-lang/rust/pull/125258
+
+QEMU also supports version 0.60.x of bindgen, which is missing option
+``--generate-cstr``. This option requires version 0.66.x and will
+be adopted as soon as supporting these older versions is not necessary
+anymore.
+
+Writing Rust code in QEMU
+-------------------------
+
+QEMU includes four crates:
+
+* ``qemu_api`` for bindings to C code and useful functionality
+
+* ``qemu_api_macros`` defines several procedural macros that are useful when
+ writing C code
+
+* ``pl011`` (under ``rust/hw/char/pl011``) and ``hpet`` (under ``rust/hw/timer/hpet``)
+ are sample devices that demonstrate ``qemu_api`` and ``qemu_api_macros``, and are
+ used to further develop them. These two crates are functional\ [#issues]_ replacements
+ for the ``hw/char/pl011.c`` and ``hw/timer/hpet.c`` files.
+
+.. [#issues] The ``pl011`` crate is synchronized with ``hw/char/pl011.c``
+ as of commit 3e0f118f82. The ``hpet`` crate is synchronized as of
+ commit 1433e38cc8. Both are lacking tracing functionality.
+
+This section explains how to work with them.
+
+Status
+''''''
+
+Modules of ``qemu_api`` can be defined as:
+
+- *complete*: ready for use in new devices; if applicable, the API supports the
+ full functionality available in C
+
+- *stable*: ready for production use, the API is safe and should not undergo
+ major changes
+
+- *proof of concept*: the API is subject to change but allows working with safe
+ Rust
+
+- *initial*: the API is in its initial stages; it requires large amount of
+ unsafe code; it might have soundness or type-safety issues
+
+The status of the modules is as follows:
+
+================ ======================
+module status
+================ ======================
+``assertions`` stable
+``bitops`` complete
+``callbacks`` complete
+``cell`` stable
+``errno`` complete
+``error`` stable
+``irq`` complete
+``log`` proof of concept
+``memory`` stable
+``module`` complete
+``qdev`` stable
+``qom`` stable
+``sysbus`` stable
+``timer`` stable
+``vmstate`` proof of concept
+``zeroable`` stable
+================ ======================
+
+.. note::
+ API stability is not a promise, if anything because the C APIs are not a stable
+ interface either. Also, ``unsafe`` interfaces may be replaced by safe interfaces
+ later.
+
+Naming convention
+'''''''''''''''''
+
+C function names usually are prefixed according to the data type that they
+apply to, for example ``timer_mod`` or ``sysbus_connect_irq``. Furthermore,
+both function and structs sometimes have a ``qemu_`` or ``QEMU`` prefix.
+Generally speaking, these are all removed in the corresponding Rust functions:
+``QEMUTimer`` becomes ``timer::Timer``, ``timer_mod`` becomes ``Timer::modify``,
+``sysbus_connect_irq`` becomes ``SysBusDeviceMethods::connect_irq``.
+
+Sometimes however a name appears multiple times in the QOM class hierarchy,
+and the only difference is in the prefix. An example is ``qdev_realize`` and
+``sysbus_realize``. In such cases, whenever a name is not unique in
+the hierarchy, always add the prefix to the classes that are lower in
+the hierarchy; for the top class, decide on a case by case basis.
+
+For example:
+
+========================== =========================================
+``device_cold_reset()`` ``DeviceMethods::cold_reset()``
+``pci_device_reset()`` ``PciDeviceMethods::pci_device_reset()``
+``pci_bridge_reset()`` ``PciBridgeMethods::pci_bridge_reset()``
+========================== =========================================
+
+Here, the name is not exactly the same, but nevertheless ``PciDeviceMethods``
+adds the prefix to avoid confusion, because the functionality of
+``device_cold_reset()`` and ``pci_device_reset()`` is subtly different.
+
+In this case, however, no prefix is needed:
+
+========================== =========================================
+``device_realize()`` ``DeviceMethods::realize()``
+``sysbus_realize()`` ``SysbusDeviceMethods::sysbus_realize()``
+``pci_realize()`` ``PciDeviceMethods::pci_realize()``
+========================== =========================================
+
+Here, the lower classes do not add any functionality, and mostly
+provide extra compile-time checking; the basic *realize* functionality
+is the same for all devices. Therefore, ``DeviceMethods`` does not
+add the prefix.
+
+Whenever a name is unique in the hierarchy, instead, you should
+always remove the class name prefix.
+
+Common pitfalls
+'''''''''''''''
+
+Rust has very strict rules with respect to how you get an exclusive (``&mut``)
+reference; failure to respect those rules is a source of undefined behavior.
+In particular, even if a value is loaded from a raw mutable pointer (``*mut``),
+it *cannot* be casted to ``&mut`` unless the value was stored to the ``*mut``
+from a mutable reference. Furthermore, it is undefined behavior if any
+shared reference was created between the store to the ``*mut`` and the load::
+
+ let mut p: u32 = 42;
+ let p_mut = &mut p; // 1
+ let p_raw = p_mut as *mut u32; // 2
+
+ // p_raw keeps the mutable reference "alive"
+
+ let p_shared = &p; // 3
+ println!("access from &u32: {}", *p_shared);
+
+ // Bring back the mutable reference, its lifetime overlaps
+ // with that of a shared reference.
+ let p_mut = unsafe { &mut *p_raw }; // 4
+ println!("access from &mut 32: {}", *p_mut);
+
+ println!("access from &u32: {}", *p_shared); // 5
+
+These rules can be tested with `MIRI`__, for example.
+
+__ https://github.com/rust-lang/miri
+
+Almost all Rust code in QEMU will involve QOM objects, and pointers to these
+objects are *shared*, for example because they are part of the QOM composition
+tree. This creates exactly the above scenario:
+
+1. a QOM object is created
+
+2. a ``*mut`` is created, for example as the opaque value for a ``MemoryRegion``
+
+3. the QOM object is placed in the composition tree
+
+4. a memory access dereferences the opaque value to a ``&mut``
+
+5. but the shared reference is still present in the composition tree
+
+Because of this, QOM objects should almost always use ``&self`` instead
+of ``&mut self``; access to internal fields must use *interior mutability*
+to go from a shared reference to a ``&mut``.
+
+Whenever C code provides you with an opaque ``void *``, avoid converting it
+to a Rust mutable reference, and use a shared reference instead. The
+``qemu_api::cell`` module provides wrappers that can be used to tell the
+Rust compiler about interior mutability, and optionally to enforce locking
+rules for the "Big QEMU Lock". In the future, similar cell types might
+also be provided for ``AioContext``-based locking as well.
+
+In particular, device code will usually rely on the ``BqlRefCell`` and
+``BqlCell`` type to ensure that data is accessed correctly under the
+"Big QEMU Lock". These cell types are also known to the ``vmstate``
+crate, which is able to "look inside" them when building an in-memory
+representation of a ``struct``'s layout. Note that the same is not true
+of a ``RefCell`` or ``Mutex``.
+
+Bindings code instead will usually use the ``Opaque`` type, which hides
+the contents of the underlying struct and can be easily converted to
+a raw pointer, for use in calls to C functions. It can be used for
+example as follows::
+
+ #[repr(transparent)]
+ #[derive(Debug, qemu_api_macros::Wrapper)]
+ pub struct Object(Opaque<bindings::Object>);
+
+where the special ``derive`` macro provides useful methods such as
+``from_raw``, ``as_ptr`, ``as_mut_ptr`` and ``raw_get``. The bindings will
+then manually check for the big QEMU lock with assertions, which allows
+the wrapper to be declared thread-safe::
+
+ unsafe impl Send for Object {}
+ unsafe impl Sync for Object {}
+
+Writing bindings to C code
+''''''''''''''''''''''''''
+
+Here are some things to keep in mind when working on the ``qemu_api`` crate.
+
+**Look at existing code**
+ Very often, similar idioms in C code correspond to similar tricks in
+ Rust bindings. If the C code uses ``offsetof``, look at qdev properties
+ or ``vmstate``. If the C code has a complex const struct, look at
+ ``MemoryRegion``. Reuse existing patterns for handling lifetimes;
+ for example use ``&T`` for QOM objects that do not need a reference
+ count (including those that can be embedded in other objects) and
+ ``Owned<T>`` for those that need it.
+
+**Use the type system**
+ Bindings often will need access information that is specific to a type
+ (either a builtin one or a user-defined one) in order to pass it to C
+ functions. Put them in a trait and access it through generic parameters.
+ The ``vmstate`` module has examples of how to retrieve type information
+ for the fields of a Rust ``struct``.
+
+**Prefer unsafe traits to unsafe functions**
+ Unsafe traits are much easier to prove correct than unsafe functions.
+ They are an excellent place to store metadata that can later be accessed
+ by generic functions. C code usually places metadata in global variables;
+ in Rust, they can be stored in traits and then turned into ``static``
+ variables. Often, unsafe traits can be generated by procedural macros.
+
+**Document limitations due to old Rust versions**
+ If you need to settle for an inferior solution because of the currently
+ supported set of Rust versions, document it in the source and in this
+ file. This ensures that it can be fixed when the minimum supported
+ version is bumped.
+
+**Keep locking in mind**.
+ When marking a type ``Sync``, be careful of whether it needs the big
+ QEMU lock. Use ``BqlCell`` and ``BqlRefCell`` for interior data,
+ or assert ``bql_locked()``.
+
+**Don't be afraid of complexity, but document and isolate it**
+ It's okay to be tricky; device code is written more often than bindings
+ code and it's important that it is idiomatic. However, you should strive
+ to isolate any tricks in a place (for example a ``struct``, a trait
+ or a macro) where it can be documented and tested. If needed, include
+ toy versions of the code in the documentation.
+
+Writing procedural macros
+'''''''''''''''''''''''''
+
+By conventions, procedural macros are split in two functions, one
+returning ``Result<proc_macro2::TokenStream, MacroError>`` with the body of
+the procedural macro, and the second returning ``proc_macro::TokenStream``
+which is the actual procedural macro. The former's name is the same as
+the latter with the ``_or_error`` suffix. The code for the latter is more
+or less fixed; it follows the following template, which is fixed apart
+from the type after ``as`` in the invocation of ``parse_macro_input!``::
+
+ #[proc_macro_derive(Object)]
+ pub fn derive_object(input: TokenStream) -> TokenStream {
+ let input = parse_macro_input!(input as DeriveInput);
+ let expanded = derive_object_or_error(input).unwrap_or_else(Into::into);
+
+ TokenStream::from(expanded)
+ }
+
+The ``qemu_api_macros`` crate has utility functions to examine a
+``DeriveInput`` and perform common checks (e.g. looking for a struct
+with named fields). These functions return ``Result<..., MacroError>``
+and can be used easily in the procedural macro function::
+
+ fn derive_object_or_error(input: DeriveInput) ->
+ Result<proc_macro2::TokenStream, MacroError>
+ {
+ is_c_repr(&input, "#[derive(Object)]")?;
+
+ let name = &input.ident;
+ let parent = &get_fields(&input, "#[derive(Object)]")?[0].ident;
+ ...
+ }
+
+Use procedural macros with care. They are mostly useful for two purposes:
+
+* Performing consistency checks; for example ``#[derive(Object)]`` checks
+ that the structure has ``#[repr[C])`` and that the type of the first field
+ is consistent with the ``ObjectType`` declaration.
+
+* Extracting information from Rust source code into traits, typically based
+ on types and attributes. For example, ``#[derive(TryInto)]`` builds an
+ implementation of ``TryFrom``, and it uses the ``#[repr(...)]`` attribute
+ as the ``TryFrom`` source and error types.
+
+Procedural macros can be hard to debug and test; if the code generation
+exceeds a few lines of code, it may be worthwhile to delegate work to
+"regular" declarative (``macro_rules!``) macros and write unit tests for
+those instead.
+
+
+Coding style
+''''''''''''
+
+Code should pass clippy and be formatted with rustfmt.
+
+Right now, only the nightly version of ``rustfmt`` is supported. This
+might change in the future. While CI checks for correct formatting via
+``cargo fmt --check``, maintainers can fix this for you when applying patches.
+
+It is expected that ``qemu_api`` provides full ``rustdoc`` documentation for
+bindings that are in their final shape or close.
+
+Adding dependencies
+-------------------
+
+Generally, the set of dependent crates is kept small. Think twice before
+adding a new external crate, especially if it comes with a large set of
+dependencies itself. Sometimes QEMU only needs a small subset of the
+functionality; see for example QEMU's ``assertions`` module.
+
+On top of this recommendation, adding external crates to QEMU is a
+slightly complicated process, mostly due to the need to teach Meson how
+to build them. While Meson has initial support for parsing ``Cargo.lock``
+files, it is still highly experimental and is therefore not used.
+
+Therefore, external crates must be added as subprojects for Meson to
+learn how to build them, as well as to the relevant ``Cargo.toml`` files.
+The versions specified in ``rust/Cargo.lock`` must be the same as the
+subprojects; note that the ``rust/`` directory forms a Cargo `workspace`__,
+and therefore there is a single lock file for the whole build.
+
+__ https://doc.rust-lang.org/cargo/reference/workspaces.html#virtual-workspace
+
+Choose a version of the crate that works with QEMU's minimum supported
+Rust version (|msrv|).
+
+Second, a new ``wrap`` file must be added to teach Meson how to download the
+crate. The wrap file must be named ``NAME-SEMVER-rs.wrap``, where ``NAME``
+is the name of the crate and ``SEMVER`` is the version up to and including the
+first non-zero number. For example, a crate with version ``0.2.3`` will use
+``0.2`` for its ``SEMVER``, while a crate with version ``1.0.84`` will use ``1``.
+
+Third, the Meson rules to build the crate must be added at
+``subprojects/NAME-SEMVER-rs/meson.build``. Generally this includes:
+
+* ``subproject`` and ``dependency`` lines for all dependent crates
+
+* a ``static_library`` or ``rust.proc_macro`` line to perform the actual build
+
+* ``declare_dependency`` and a ``meson.override_dependency`` lines to expose
+ the result to QEMU and to other subprojects
+
+Remember to add ``native: true`` to ``dependency``, ``static_library`` and
+``meson.override_dependency`` for dependencies of procedural macros.
+If a crate is needed in both procedural macros and QEMU binaries, everything
+apart from ``subproject`` must be duplicated to build both native and
+non-native versions of the crate.
+
+It's important to specify the right compiler options. These include:
+
+* the language edition (which can be found in the ``Cargo.toml`` file)
+
+* the ``--cfg`` (which have to be "reverse engineered" from the ``build.rs``
+ file of the crate).
+
+* usually, a ``--cap-lints allow`` argument to hide warnings from rustc
+ or clippy.
+
+After every change to the ``meson.build`` file you have to update the patched
+version with ``meson subprojects update --reset ``NAME-SEMVER-rs``. This might
+be automated in the future.
+
+Also, after every change to the ``meson.build`` file it is strongly suggested to
+do a dummy change to the ``.wrap`` file (for example adding a comment like
+``# version 2``), which will help Meson notice that the subproject is out of date.
+
+As a last step, add the new subproject to ``scripts/archive-source.sh``,
+``scripts/make-release`` and ``subprojects/.gitignore``.
diff --git a/docs/devel/style.rst b/docs/devel/style.rst
index 2f68b50..d025933 100644
--- a/docs/devel/style.rst
+++ b/docs/devel/style.rst
@@ -416,6 +416,26 @@ definitions instead of typedefs in headers and function prototypes; this
avoids problems with duplicated typedefs and reduces the need to include
headers from other headers.
+Bitfields
+---------
+
+C bitfields can be a cause of non-portability issues, especially under windows
+where `MSVC has a different way to lay them out than GCC
+<https://gcc.gnu.org/onlinedocs/gcc/x86-Type-Attributes.html>`_, or where
+endianness matters.
+
+For this reason, we disallow usage of bitfields in packed structures and in any
+structures which are supposed to exactly match a specific layout in guest
+memory. Some existing code may use it, and we carefully ensured the layout was
+the one expected.
+
+We also suggest avoiding bitfields even in structures where the exact
+layout does not matter, unless you can show that they provide a significant
+usability benefit.
+
+We encourage the usage of ``include/hw/registerfields.h`` as a safe replacement
+for bitfields.
+
Reserved namespaces in C and POSIX
----------------------------------
diff --git a/docs/devel/submitting-a-patch.rst b/docs/devel/submitting-a-patch.rst
index 83e9092..f7917b8 100644
--- a/docs/devel/submitting-a-patch.rst
+++ b/docs/devel/submitting-a-patch.rst
@@ -18,7 +18,7 @@ one-shot fix, the bare minimum we ask is that:
* - Check
- Reason
- * - Patches contain Signed-off-by: Real Name <author@email>
+ * - Patches contain Signed-off-by: Your Name <author@email>
- States you are legally able to contribute the code. See :ref:`patch_emails_must_include_a_signed_off_by_line`
* - Sent as patch emails to ``qemu-devel@nongnu.org``
- The project uses an email list based workflow. See :ref:`submitting_your_patches`
@@ -235,6 +235,31 @@ to another list.) ``git send-email`` (`step-by-step setup guide
works best for delivering the patch without mangling it, but
attachments can be used as a last resort on a first-time submission.
+.. _use_git_publish:
+
+Use git-publish
+~~~~~~~~~~~~~~~
+
+If you already configured git send-email, you can simply use `git-publish
+<https://github.com/stefanha/git-publish>`__ to send series.
+
+::
+
+ $ git checkout master -b my-feature
+ $ # work on new commits, add your 'Signed-off-by' lines to each
+ $ git publish
+ $ ... more work, rebase on master, ...
+ $ git publish # will send a v2
+
+Each time you post a series, git-publish will create a local tag with the format
+``<branchname>-v<version>`` to record the patch series.
+
+When sending patch emails, 'git publish' will consult the output of
+'scripts/get_maintainers.pl' and automatically CC anyone listed as maintainers
+of the affected code. Generally you should accept the suggested CC list, but
+there may sometimes be scenarios where it is appropriate to cut it down (eg on
+certain large tree-wide cleanups), or augment it with other interested people.
+
.. _if_you_cannot_send_patch_emails:
If you cannot send patch emails
@@ -252,10 +277,7 @@ patches to the QEMU mailing list by following these steps:
#. Send your patches to the QEMU mailing list using the web-based
``git-send-email`` UI at https://git.sr.ht/~USERNAME/qemu/send-email
-`This video
-<https://spacepub.space/videos/watch/ad258d23-0ac6-488c-83fc-2bacf578de3a>`__
-shows the web-based ``git-send-email`` workflow. Documentation is
-available `here
+Documentation for sourcehut is available `here
<https://man.sr.ht/git.sr.ht/#sending-patches-upstream>`__.
.. _cc_the_relevant_maintainer:
@@ -322,23 +344,9 @@ Patch emails must include a ``Signed-off-by:`` line
Your patches **must** include a Signed-off-by: line. This is a hard
requirement because it's how you say "I'm legally okay to contribute
-this and happy for it to go into QEMU". The process is modelled after
-the `Linux kernel
-<http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/SubmittingPatches?id=f6f94e2ab1b33f0082ac22d71f66385a60d8157f#n297>`__
-policy.
-
-If you wrote the patch, make sure your "From:" and "Signed-off-by:"
-lines use the same spelling. It's okay if you subscribe or contribute to
-the list via more than one address, but using multiple addresses in one
-commit just confuses things. If someone else wrote the patch, git will
-include a "From:" line in the body of the email (different from your
-envelope From:) that will give credit to the correct author; but again,
-that author's Signed-off-by: line is mandatory, with the same spelling.
-
-There are various tooling options for automatically adding these tags
-include using ``git commit -s`` or ``git format-patch -s``. For more
-information see `SubmittingPatches 1.12
-<http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/SubmittingPatches?id=f6f94e2ab1b33f0082ac22d71f66385a60d8157f#n297>`__.
+this and happy for it to go into QEMU". For full guidance, read the
+:ref:`code-provenance` documentation.
+
.. _include_a_meaningful_cover_letter:
@@ -406,6 +414,20 @@ For more details on how QEMU's stable process works, refer to the
.. _participating_in_code_review:
+Retrieve an existing series
+---------------------------
+
+If you want to apply an existing series on top of your tree, you can simply use
+`b4 <https://github.com/mricon/b4>`__.
+
+::
+
+ b4 shazam $msg-id
+
+The message id is related to the patch series that has been sent to the mailing
+list. You need to retrieve the "Message-Id:" header from one of the patches. Any
+of them can be used and b4 will apply the whole series.
+
Participating in Code Review
----------------------------
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
index d46b625..f26b837 100644
--- a/docs/devel/tcg-ops.rst
+++ b/docs/devel/tcg-ops.rst
@@ -239,7 +239,7 @@ Jumps/Labels
- | Jump to label.
- * - brcond_i32/i64 *t0*, *t1*, *cond*, *label*
+ * - brcond *t0*, *t1*, *cond*, *label*
- | Conditional jump if *t0* *cond* *t1* is true. *cond* can be:
|
@@ -261,98 +261,117 @@ Arithmetic
.. list-table::
- * - add_i32/i64 *t0*, *t1*, *t2*
+ * - add *t0*, *t1*, *t2*
- | *t0* = *t1* + *t2*
- * - sub_i32/i64 *t0*, *t1*, *t2*
+ * - sub *t0*, *t1*, *t2*
- | *t0* = *t1* - *t2*
- * - neg_i32/i64 *t0*, *t1*
+ * - neg *t0*, *t1*
- | *t0* = -*t1* (two's complement)
- * - mul_i32/i64 *t0*, *t1*, *t2*
+ * - mul *t0*, *t1*, *t2*
- | *t0* = *t1* * *t2*
- * - div_i32/i64 *t0*, *t1*, *t2*
+ * - divs *t0*, *t1*, *t2*
- | *t0* = *t1* / *t2* (signed)
| Undefined behavior if division by zero or overflow.
- * - divu_i32/i64 *t0*, *t1*, *t2*
+ * - divu *t0*, *t1*, *t2*
- | *t0* = *t1* / *t2* (unsigned)
| Undefined behavior if division by zero.
- * - rem_i32/i64 *t0*, *t1*, *t2*
+ * - rems *t0*, *t1*, *t2*
- | *t0* = *t1* % *t2* (signed)
| Undefined behavior if division by zero or overflow.
- * - remu_i32/i64 *t0*, *t1*, *t2*
+ * - remu *t0*, *t1*, *t2*
- | *t0* = *t1* % *t2* (unsigned)
| Undefined behavior if division by zero.
+ * - divs2 *q*, *r*, *nl*, *nh*, *d*
+
+ - | *q* = *nh:nl* / *d* (signed)
+ | *r* = *nh:nl* % *d*
+ | Undefined behaviour if division by zero, or the double-word
+ numerator divided by the single-word divisor does not fit
+ within the single-word quotient. The code generator will
+ pass *nh* as a simple sign-extension of *nl*, so the only
+ overflow should be *INT_MIN* / -1.
+
+ * - divu2 *q*, *r*, *nl*, *nh*, *d*
+
+ - | *q* = *nh:nl* / *d* (unsigned)
+ | *r* = *nh:nl* % *d*
+ | Undefined behaviour if division by zero, or the double-word
+ numerator divided by the single-word divisor does not fit
+ within the single-word quotient. The code generator will
+ pass 0 to *nh* to make a simple zero-extension of *nl*,
+ so overflow should never occur.
Logical
-------
.. list-table::
- * - and_i32/i64 *t0*, *t1*, *t2*
+ * - and *t0*, *t1*, *t2*
- | *t0* = *t1* & *t2*
- * - or_i32/i64 *t0*, *t1*, *t2*
+ * - or *t0*, *t1*, *t2*
- | *t0* = *t1* | *t2*
- * - xor_i32/i64 *t0*, *t1*, *t2*
+ * - xor *t0*, *t1*, *t2*
- | *t0* = *t1* ^ *t2*
- * - not_i32/i64 *t0*, *t1*
+ * - not *t0*, *t1*
- | *t0* = ~\ *t1*
- * - andc_i32/i64 *t0*, *t1*, *t2*
+ * - andc *t0*, *t1*, *t2*
- | *t0* = *t1* & ~\ *t2*
- * - eqv_i32/i64 *t0*, *t1*, *t2*
+ * - eqv *t0*, *t1*, *t2*
- | *t0* = ~(*t1* ^ *t2*), or equivalently, *t0* = *t1* ^ ~\ *t2*
- * - nand_i32/i64 *t0*, *t1*, *t2*
+ * - nand *t0*, *t1*, *t2*
- | *t0* = ~(*t1* & *t2*)
- * - nor_i32/i64 *t0*, *t1*, *t2*
+ * - nor *t0*, *t1*, *t2*
- | *t0* = ~(*t1* | *t2*)
- * - orc_i32/i64 *t0*, *t1*, *t2*
+ * - orc *t0*, *t1*, *t2*
- | *t0* = *t1* | ~\ *t2*
- * - clz_i32/i64 *t0*, *t1*, *t2*
+ * - clz *t0*, *t1*, *t2*
- | *t0* = *t1* ? clz(*t1*) : *t2*
- * - ctz_i32/i64 *t0*, *t1*, *t2*
+ * - ctz *t0*, *t1*, *t2*
- | *t0* = *t1* ? ctz(*t1*) : *t2*
- * - ctpop_i32/i64 *t0*, *t1*
+ * - ctpop *t0*, *t1*
- | *t0* = number of bits set in *t1*
|
- | With *ctpop* short for "count population", matching
- | the function name used in ``include/qemu/host-utils.h``.
+ | The name *ctpop* is short for "count population", and matches
+ the function name used in ``include/qemu/host-utils.h``.
Shifts/Rotates
@@ -360,30 +379,30 @@ Shifts/Rotates
.. list-table::
- * - shl_i32/i64 *t0*, *t1*, *t2*
+ * - shl *t0*, *t1*, *t2*
- | *t0* = *t1* << *t2*
- | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
+ | Unspecified behavior for negative or out-of-range shifts.
- * - shr_i32/i64 *t0*, *t1*, *t2*
+ * - shr *t0*, *t1*, *t2*
- | *t0* = *t1* >> *t2* (unsigned)
- | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
+ | Unspecified behavior for negative or out-of-range shifts.
- * - sar_i32/i64 *t0*, *t1*, *t2*
+ * - sar *t0*, *t1*, *t2*
- | *t0* = *t1* >> *t2* (signed)
- | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
+ | Unspecified behavior for negative or out-of-range shifts.
- * - rotl_i32/i64 *t0*, *t1*, *t2*
+ * - rotl *t0*, *t1*, *t2*
- | Rotation of *t2* bits to the left
- | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
+ | Unspecified behavior for negative or out-of-range shifts.
- * - rotr_i32/i64 *t0*, *t1*, *t2*
+ * - rotr *t0*, *t1*, *t2*
- | Rotation of *t2* bits to the right.
- | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
+ | Unspecified behavior for negative or out-of-range shifts.
Misc
@@ -391,26 +410,12 @@ Misc
.. list-table::
- * - mov_i32/i64 *t0*, *t1*
+ * - mov *t0*, *t1*
- | *t0* = *t1*
- | Move *t1* to *t0* (both operands must have the same type).
-
- * - ext8s_i32/i64 *t0*, *t1*
-
- ext8u_i32/i64 *t0*, *t1*
-
- ext16s_i32/i64 *t0*, *t1*
-
- ext16u_i32/i64 *t0*, *t1*
+ | Move *t1* to *t0*.
- ext32s_i64 *t0*, *t1*
-
- ext32u_i64 *t0*, *t1*
-
- - | 8, 16 or 32 bit sign/zero extension (both operands must have the same type)
-
- * - bswap16_i32/i64 *t0*, *t1*, *flags*
+ * - bswap16 *t0*, *t1*, *flags*
- | 16 bit byte swap on the low bits of a 32/64 bit input.
|
@@ -420,24 +425,24 @@ Misc
|
| If neither ``TCG_BSWAP_OZ`` nor ``TCG_BSWAP_OS`` are set, then the bits of *t0* above bit 15 may contain any value.
- * - bswap32_i64 *t0*, *t1*, *flags*
-
- - | 32 bit byte swap on a 64-bit value. The flags are the same as for bswap16,
- except they apply from bit 31 instead of bit 15.
+ * - bswap32 *t0*, *t1*, *flags*
- * - bswap32_i32 *t0*, *t1*, *flags*
+ - | 32 bit byte swap. The flags are the same as for bswap16, except
+ they apply from bit 31 instead of bit 15. On TCG_TYPE_I32, the
+ flags should be zero.
- bswap64_i64 *t0*, *t1*, *flags*
+ * - bswap64 *t0*, *t1*, *flags*
- - | 32/64 bit byte swap. The flags are ignored, but still present
- for consistency with the other bswap opcodes.
+ - | 64 bit byte swap. The flags are ignored, but still present
+ for consistency with the other bswap opcodes. For future
+ compatibility, the flags should be zero.
* - discard_i32/i64 *t0*
- | Indicate that the value of *t0* won't be used later. It is useful to
force dead code elimination.
- * - deposit_i32/i64 *dest*, *t1*, *t2*, *pos*, *len*
+ * - deposit *dest*, *t1*, *t2*, *pos*, *len*
- | Deposit *t2* as a bitfield into *t1*, placing the result in *dest*.
|
@@ -446,14 +451,16 @@ Misc
| *len* - the length of the bitfield
| *pos* - the position of the first bit, counting from the LSB
|
- | For example, "deposit_i32 dest, t1, t2, 8, 4" indicates a 4-bit field
+ | For example, "deposit dest, t1, t2, 8, 4" indicates a 4-bit field
at bit 8. This operation would be equivalent to
|
| *dest* = (*t1* & ~0x0f00) | ((*t2* << 8) & 0x0f00)
+ |
+ | on TCG_TYPE_I32.
- * - extract_i32/i64 *dest*, *t1*, *pos*, *len*
+ * - extract *dest*, *t1*, *pos*, *len*
- sextract_i32/i64 *dest*, *t1*, *pos*, *len*
+ sextract *dest*, *t1*, *pos*, *len*
- | Extract a bitfield from *t1*, placing the result in *dest*.
|
@@ -462,16 +469,16 @@ Misc
to the left with zeros; for sextract_*, the result will be extended
to the left with copies of the bitfield sign bit at *pos* + *len* - 1.
|
- | For example, "sextract_i32 dest, t1, 8, 4" indicates a 4-bit field
+ | For example, "sextract dest, t1, 8, 4" indicates a 4-bit field
at bit 8. This operation would be equivalent to
|
| *dest* = (*t1* << 20) >> 28
|
- | (using an arithmetic right shift).
+ | (using an arithmetic right shift) on TCG_TYPE_I32.
- * - extract2_i32/i64 *dest*, *t1*, *t2*, *pos*
+ * - extract2 *dest*, *t1*, *t2*, *pos*
- - | For N = {32,64}, extract an N-bit quantity from the concatenation
+ - | For TCG_TYPE_I{N}, extract an N-bit quantity from the concatenation
of *t2*:*t1*, beginning at *pos*. The tcg_gen_extract2_{i32,i64} expander
accepts 0 <= *pos* <= N as inputs. The backend code generator will
not see either 0 or N as inputs for these opcodes.
@@ -494,19 +501,19 @@ Conditional moves
.. list-table::
- * - setcond_i32/i64 *dest*, *t1*, *t2*, *cond*
+ * - setcond *dest*, *t1*, *t2*, *cond*
- | *dest* = (*t1* *cond* *t2*)
|
| Set *dest* to 1 if (*t1* *cond* *t2*) is true, otherwise set to 0.
- * - negsetcond_i32/i64 *dest*, *t1*, *t2*, *cond*
+ * - negsetcond *dest*, *t1*, *t2*, *cond*
- | *dest* = -(*t1* *cond* *t2*)
|
| Set *dest* to -1 if (*t1* *cond* *t2*) is true, otherwise set to 0.
- * - movcond_i32/i64 *dest*, *c1*, *c2*, *v1*, *v2*, *cond*
+ * - movcond *dest*, *c1*, *c2*, *v1*, *v2*, *cond*
- | *dest* = (*c1* *cond* *c2* ? *v1* : *v2*)
|
@@ -586,26 +593,79 @@ Multiword arithmetic support
.. list-table::
- * - add2_i32/i64 *t0_low*, *t0_high*, *t1_low*, *t1_high*, *t2_low*, *t2_high*
+ * - addco *t0*, *t1*, *t2*
+
+ - | Compute *t0* = *t1* + *t2* and in addition output to the
+ carry bit provided by the host architecture.
+
+ * - addci *t0, *t1*, *t2*
- sub2_i32/i64 *t0_low*, *t0_high*, *t1_low*, *t1_high*, *t2_low*, *t2_high*
+ - | Compute *t0* = *t1* + *t2* + *C*, where *C* is the
+ input carry bit provided by the host architecture.
+ The output carry bit need not be computed.
- - | Similar to add/sub, except that the double-word inputs *t1* and *t2* are
- formed from two single-word arguments, and the double-word output *t0*
- is returned in two single-word outputs.
+ * - addcio *t0, *t1*, *t2*
- * - mulu2_i32/i64 *t0_low*, *t0_high*, *t1*, *t2*
+ - | Compute *t0* = *t1* + *t2* + *C*, where *C* is the
+ input carry bit provided by the host architecture,
+ and also compute the output carry bit.
+
+ * - addc1o *t0, *t1*, *t2*
+
+ - | Compute *t0* = *t1* + *t2* + 1, and in addition output to the
+ carry bit provided by the host architecture. This is akin to
+ *addcio* with a fixed carry-in value of 1.
+ | This is intended to be used by the optimization pass,
+ intermediate to complete folding of the addition chain.
+ In some cases complete folding is not possible and this
+ opcode will remain until output. If this happens, the
+ code generator will use ``tcg_out_set_carry`` and then
+ the output routine for *addcio*.
+
+ * - subbo *t0*, *t1*, *t2*
+
+ - | Compute *t0* = *t1* - *t2* and in addition output to the
+ borrow bit provided by the host architecture.
+ | Depending on the host architecture, the carry bit may or may not be
+ identical to the borrow bit. Thus the addc\* and subb\*
+ opcodes must not be mixed.
+
+ * - subbi *t0, *t1*, *t2*
+
+ - | Compute *t0* = *t1* - *t2* - *B*, where *B* is the
+ input borrow bit provided by the host architecture.
+ The output borrow bit need not be computed.
+
+ * - subbio *t0, *t1*, *t2*
+
+ - | Compute *t0* = *t1* - *t2* - *B*, where *B* is the
+ input borrow bit provided by the host architecture,
+ and also compute the output borrow bit.
+
+ * - subb1o *t0, *t1*, *t2*
+
+ - | Compute *t0* = *t1* - *t2* - 1, and in addition output to the
+ borrow bit provided by the host architecture. This is akin to
+ *subbio* with a fixed borrow-in value of 1.
+ | This is intended to be used by the optimization pass,
+ intermediate to complete folding of the subtraction chain.
+ In some cases complete folding is not possible and this
+ opcode will remain until output. If this happens, the
+ code generator will use ``tcg_out_set_borrow`` and then
+ the output routine for *subbio*.
+
+ * - mulu2 *t0_low*, *t0_high*, *t1*, *t2*
- | Similar to mul, except two unsigned inputs *t1* and *t2* yielding the full
double-word product *t0*. The latter is returned in two single-word outputs.
- * - muls2_i32/i64 *t0_low*, *t0_high*, *t1*, *t2*
+ * - muls2 *t0_low*, *t0_high*, *t1*, *t2*
- | Similar to mulu2, except the two inputs *t1* and *t2* are signed.
- * - mulsh_i32/i64 *t0*, *t1*, *t2*
+ * - mulsh *t0*, *t1*, *t2*
- muluh_i32/i64 *t0*, *t1*, *t2*
+ muluh *t0*, *t1*, *t2*
- | Provide the high part of a signed or unsigned multiply, respectively.
|
@@ -684,8 +744,6 @@ QEMU specific operations
qemu_st_i32/i64/i128 *t0*, *t1*, *flags*, *memidx*
- qemu_st8_i32 *t0*, *t1*, *flags*, *memidx*
-
- | Load data at the guest address *t1* into *t0*, or store data in *t0* at guest
address *t1*. The _i32/_i64/_i128 size applies to the size of the input/output
register *t0* only. The address *t1* is always sized according to the guest,
@@ -703,19 +761,14 @@ QEMU specific operations
64-bit memory access specified in *flags*.
|
| For qemu_ld/st_i128, these are only supported for a 64-bit host.
- |
- | For i386, qemu_st8_i32 is exactly like qemu_st_i32, except the size of
- the memory operation is known to be 8-bit. This allows the backend to
- provide a different set of register constraints.
Host vector operations
----------------------
-All of the vector ops have two parameters, ``TCGOP_VECL`` & ``TCGOP_VECE``.
-The former specifies the length of the vector in log2 64-bit units; the
-latter specifies the length of the element (if applicable) in log2 8-bit units.
-E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32.
+All of the vector ops have two parameters, ``TCGOP_TYPE`` & ``TCGOP_VECE``.
+The former specifies the length of the vector as a TCGType; the latter
+specifies the length of the element (if applicable) in log2 8-bit units.
.. list-table::
@@ -729,7 +782,7 @@ E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32.
* - dup_vec *v0*, *r1*
- - | Duplicate the low N bits of *r1* into VECL/VECE copies across *v0*.
+ - | Duplicate the low N bits of *r1* into TYPE/VECE copies across *v0*.
* - dupi_vec *v0*, *c*
@@ -738,7 +791,7 @@ E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32.
* - dup2_vec *v0*, *r1*, *r2*
- - | Duplicate *r2*:*r1* into VECL/64 copies across *v0*. This opcode is
+ - | Duplicate *r2*:*r1* into TYPE/64 copies across *v0*. This opcode is
only present for 32-bit hosts.
* - add_vec *v0*, *v1*, *v2*
@@ -810,7 +863,7 @@ E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32.
.. code-block:: c
- for (i = 0; i < VECL/VECE; ++i) {
+ for (i = 0; i < TYPE/VECE; ++i) {
v0[i] = v1[i] << s2;
}
@@ -832,7 +885,7 @@ E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32.
.. code-block:: c
- for (i = 0; i < VECL/VECE; ++i) {
+ for (i = 0; i < TYPE/VECE; ++i) {
v0[i] = v1[i] << v2[i];
}
@@ -885,9 +938,9 @@ Assumptions
The target word size (``TCG_TARGET_REG_BITS``) is expected to be 32 bit or
64 bit. It is expected that the pointer has the same size as the word.
-On a 32 bit target, all 64 bit operations are converted to 32 bits. A
-few specific operations must be implemented to allow it (see add2_i32,
-sub2_i32, brcond2_i32).
+On a 32 bit target, all 64 bit operations are converted to 32 bits.
+A few specific operations must be implemented to allow it
+(see brcond2_i32, setcond2_i32).
On a 64 bit target, the values are transferred between 32 and 64-bit
registers using the following ops:
@@ -928,7 +981,9 @@ operation uses a constant input constraint which does not allow all
constants, it must also accept registers in order to have a fallback.
The constraint '``i``' is defined generically to accept any constant.
The constraint '``r``' is not defined generically, but is consistently
-used by each backend to indicate all registers.
+used by each backend to indicate all registers. If ``TCG_REG_ZERO``
+is defined by the backend, the constraint '``z``' is defined generically
+to map constant 0 to the hardware zero register.
The movi_i32 and movi_i64 operations must accept any constants.
diff --git a/docs/devel/tcg-plugins.rst b/docs/devel/tcg-plugins.rst
index f7d7b9e..9463692 100644
--- a/docs/devel/tcg-plugins.rst
+++ b/docs/devel/tcg-plugins.rst
@@ -8,38 +8,6 @@
QEMU TCG Plugins
================
-QEMU TCG plugins provide a way for users to run experiments taking
-advantage of the total system control emulation can have over a guest.
-It provides a mechanism for plugins to subscribe to events during
-translation and execution and optionally callback into the plugin
-during these events. TCG plugins are unable to change the system state
-only monitor it passively. However they can do this down to an
-individual instruction granularity including potentially subscribing
-to all load and store operations.
-
-Usage
------
-
-Any QEMU binary with TCG support has plugins enabled by default.
-Earlier releases needed to be explicitly enabled with::
-
- configure --enable-plugins
-
-Once built a program can be run with multiple plugins loaded each with
-their own arguments::
-
- $QEMU $OTHER_QEMU_ARGS \
- -plugin contrib/plugin/libhowvec.so,inline=on,count=hint \
- -plugin contrib/plugin/libhotblocks.so
-
-Arguments are plugin specific and can be used to modify their
-behaviour. In this case the howvec plugin is being asked to use inline
-ops to count and break down the hint instructions by type.
-
-Linux user-mode emulation also evaluates the environment variable
-``QEMU_PLUGIN``::
-
- QEMU_PLUGIN="file=contrib/plugins/libhowvec.so,inline=on,count=hint" $QEMU
Writing plugins
---------------
@@ -93,11 +61,14 @@ translation event the plugin has an option to enumerate the
instructions in a block of instructions and optionally register
callbacks to some or all instructions when they are executed.
-There is also a facility to add an inline event where code to
-increment a counter can be directly inlined with the translation.
-Currently only a simple increment is supported. This is not atomic so
-can miss counts. If you want absolute precision you should use a
-callback which can then ensure atomicity itself.
+There is also a facility to add inline instructions doing various operations,
+like adding or storing an immediate value. It is also possible to execute a
+callback conditionally, with condition being evaluated inline. All those inline
+operations are associated to a ``scoreboard``, which is a thread-local storage
+automatically expanded when new cores/threads are created and that can be
+accessed/modified in a thread-safe way without any lock needed. Combining inline
+operations and conditional callbacks offer a more efficient way to instrument
+binaries, compared to classic callbacks.
Finally when QEMU exits all the registered *atexit* callbacks are
invoked.
@@ -191,457 +162,6 @@ which means callbacks may still occur after the uninstall operation is
requested. The plugin isn't completely uninstalled until the safe work
has executed while all vCPUs are quiescent.
-Example Plugins
-===============
-
-There are a number of plugins included with QEMU and you are
-encouraged to contribute your own plugins plugins upstream. There is a
-``contrib/plugins`` directory where they can go. There are also some
-basic plugins that are used to test and exercise the API during the
-``make check-tcg`` target in ``tests\plugins``.
-
-- tests/plugins/empty.c
-
-Purely a test plugin for measuring the overhead of the plugins system
-itself. Does no instrumentation.
-
-- tests/plugins/bb.c
-
-A very basic plugin which will measure execution in course terms as
-each basic block is executed. By default the results are shown once
-execution finishes::
-
- $ qemu-aarch64 -plugin tests/plugin/libbb.so \
- -d plugin ./tests/tcg/aarch64-linux-user/sha1
- SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6
- bb's: 2277338, insns: 158483046
-
-Behaviour can be tweaked with the following arguments:
-
- * inline=true|false
-
- Use faster inline addition of a single counter. Not per-cpu and not
- thread safe.
-
- * idle=true|false
-
- Dump the current execution stats whenever the guest vCPU idles
-
-- tests/plugins/insn.c
-
-This is a basic instruction level instrumentation which can count the
-number of instructions executed on each core/thread::
-
- $ qemu-aarch64 -plugin tests/plugin/libinsn.so \
- -d plugin ./tests/tcg/aarch64-linux-user/threadcount
- Created 10 threads
- Done
- cpu 0 insns: 46765
- cpu 1 insns: 3694
- cpu 2 insns: 3694
- cpu 3 insns: 2994
- cpu 4 insns: 1497
- cpu 5 insns: 1497
- cpu 6 insns: 1497
- cpu 7 insns: 1497
- total insns: 63135
-
-Behaviour can be tweaked with the following arguments:
-
- * inline=true|false
-
- Use faster inline addition of a single counter. Not per-cpu and not
- thread safe.
-
- * sizes=true|false
-
- Give a summary of the instruction sizes for the execution
-
- * match=<string>
-
- Only instrument instructions matching the string prefix. Will show
- some basic stats including how many instructions have executed since
- the last execution. For example::
-
- $ qemu-aarch64 -plugin tests/plugin/libinsn.so,match=bl \
- -d plugin ./tests/tcg/aarch64-linux-user/sha512-vector
- ...
- 0x40069c, 'bl #0x4002b0', 10 hits, 1093 match hits, Ī”+1257 since last match, 98 avg insns/match
- 0x4006ac, 'bl #0x403690', 10 hits, 1094 match hits, Ī”+47 since last match, 98 avg insns/match
- 0x4037fc, 'bl #0x4002b0', 18 hits, 1095 match hits, Ī”+22 since last match, 98 avg insns/match
- 0x400720, 'bl #0x403690', 10 hits, 1096 match hits, Ī”+58 since last match, 98 avg insns/match
- 0x4037fc, 'bl #0x4002b0', 19 hits, 1097 match hits, Ī”+22 since last match, 98 avg insns/match
- 0x400730, 'bl #0x403690', 10 hits, 1098 match hits, Ī”+33 since last match, 98 avg insns/match
- 0x4037ac, 'bl #0x4002b0', 12 hits, 1099 match hits, Ī”+20 since last match, 98 avg insns/match
- ...
-
-For more detailed execution tracing see the ``execlog`` plugin for
-other options.
-
-- tests/plugins/mem.c
-
-Basic instruction level memory instrumentation::
-
- $ qemu-aarch64 -plugin tests/plugin/libmem.so,inline=true \
- -d plugin ./tests/tcg/aarch64-linux-user/sha1
- SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6
- inline mem accesses: 79525013
-
-Behaviour can be tweaked with the following arguments:
-
- * inline=true|false
-
- Use faster inline addition of a single counter. Not per-cpu and not
- thread safe.
-
- * callback=true|false
-
- Use callbacks on each memory instrumentation.
-
- * hwaddr=true|false
-
- Count IO accesses (only for system emulation)
-
-- tests/plugins/syscall.c
-
-A basic syscall tracing plugin. This only works for user-mode. By
-default it will give a summary of syscall stats at the end of the
-run::
-
- $ qemu-aarch64 -plugin tests/plugin/libsyscall \
- -d plugin ./tests/tcg/aarch64-linux-user/threadcount
- Created 10 threads
- Done
- syscall no. calls errors
- 226 12 0
- 99 11 11
- 115 11 0
- 222 11 0
- 93 10 0
- 220 10 0
- 233 10 0
- 215 8 0
- 214 4 0
- 134 2 0
- 64 2 0
- 96 1 0
- 94 1 0
- 80 1 0
- 261 1 0
- 78 1 0
- 160 1 0
- 135 1 0
-
-- contrib/plugins/hotblocks.c
-
-The hotblocks plugin allows you to examine the where hot paths of
-execution are in your program. Once the program has finished you will
-get a sorted list of blocks reporting the starting PC, translation
-count, number of instructions and execution count. This will work best
-with linux-user execution as system emulation tends to generate
-re-translations as blocks from different programs get swapped in and
-out of system memory.
-
-If your program is single-threaded you can use the ``inline`` option for
-slightly faster (but not thread safe) counters.
-
-Example::
-
- $ qemu-aarch64 \
- -plugin contrib/plugins/libhotblocks.so -d plugin \
- ./tests/tcg/aarch64-linux-user/sha1
- SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6
- collected 903 entries in the hash table
- pc, tcount, icount, ecount
- 0x0000000041ed10, 1, 5, 66087
- 0x000000004002b0, 1, 4, 66087
- ...
-
-- contrib/plugins/hotpages.c
-
-Similar to hotblocks but this time tracks memory accesses::
-
- $ qemu-aarch64 \
- -plugin contrib/plugins/libhotpages.so -d plugin \
- ./tests/tcg/aarch64-linux-user/sha1
- SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6
- Addr, RCPUs, Reads, WCPUs, Writes
- 0x000055007fe000, 0x0001, 31747952, 0x0001, 8835161
- 0x000055007ff000, 0x0001, 29001054, 0x0001, 8780625
- 0x00005500800000, 0x0001, 687465, 0x0001, 335857
- 0x0000000048b000, 0x0001, 130594, 0x0001, 355
- 0x0000000048a000, 0x0001, 1826, 0x0001, 11
-
-The hotpages plugin can be configured using the following arguments:
-
- * sortby=reads|writes|address
-
- Log the data sorted by either the number of reads, the number of writes, or
- memory address. (Default: entries are sorted by the sum of reads and writes)
-
- * io=on
-
- Track IO addresses. Only relevant to full system emulation. (Default: off)
-
- * pagesize=N
-
- The page size used. (Default: N = 4096)
-
-- contrib/plugins/howvec.c
-
-This is an instruction classifier so can be used to count different
-types of instructions. It has a number of options to refine which get
-counted. You can give a value to the ``count`` argument for a class of
-instructions to break it down fully, so for example to see all the system
-registers accesses::
-
- $ qemu-system-aarch64 $(QEMU_ARGS) \
- -append "root=/dev/sda2 systemd.unit=benchmark.service" \
- -smp 4 -plugin ./contrib/plugins/libhowvec.so,count=sreg -d plugin
-
-which will lead to a sorted list after the class breakdown::
-
- Instruction Classes:
- Class: UDEF not counted
- Class: SVE (68 hits)
- Class: PCrel addr (47789483 hits)
- Class: Add/Sub (imm) (192817388 hits)
- Class: Logical (imm) (93852565 hits)
- Class: Move Wide (imm) (76398116 hits)
- Class: Bitfield (44706084 hits)
- Class: Extract (5499257 hits)
- Class: Cond Branch (imm) (147202932 hits)
- Class: Exception Gen (193581 hits)
- Class: NOP not counted
- Class: Hints (6652291 hits)
- Class: Barriers (8001661 hits)
- Class: PSTATE (1801695 hits)
- Class: System Insn (6385349 hits)
- Class: System Reg counted individually
- Class: Branch (reg) (69497127 hits)
- Class: Branch (imm) (84393665 hits)
- Class: Cmp & Branch (110929659 hits)
- Class: Tst & Branch (44681442 hits)
- Class: AdvSimd ldstmult (736 hits)
- Class: ldst excl (9098783 hits)
- Class: Load Reg (lit) (87189424 hits)
- Class: ldst noalloc pair (3264433 hits)
- Class: ldst pair (412526434 hits)
- Class: ldst reg (imm) (314734576 hits)
- Class: Loads & Stores (2117774 hits)
- Class: Data Proc Reg (223519077 hits)
- Class: Scalar FP (31657954 hits)
- Individual Instructions:
- Instr: mrs x0, sp_el0 (2682661 hits) (op=0xd5384100/ System Reg)
- Instr: mrs x1, tpidr_el2 (1789339 hits) (op=0xd53cd041/ System Reg)
- Instr: mrs x2, tpidr_el2 (1513494 hits) (op=0xd53cd042/ System Reg)
- Instr: mrs x0, tpidr_el2 (1490823 hits) (op=0xd53cd040/ System Reg)
- Instr: mrs x1, sp_el0 (933793 hits) (op=0xd5384101/ System Reg)
- Instr: mrs x2, sp_el0 (699516 hits) (op=0xd5384102/ System Reg)
- Instr: mrs x4, tpidr_el2 (528437 hits) (op=0xd53cd044/ System Reg)
- Instr: mrs x30, ttbr1_el1 (480776 hits) (op=0xd538203e/ System Reg)
- Instr: msr ttbr1_el1, x30 (480713 hits) (op=0xd518203e/ System Reg)
- Instr: msr vbar_el1, x30 (480671 hits) (op=0xd518c01e/ System Reg)
- ...
-
-To find the argument shorthand for the class you need to examine the
-source code of the plugin at the moment, specifically the ``*opt``
-argument in the InsnClassExecCount tables.
-
-- contrib/plugins/lockstep.c
-
-This is a debugging tool for developers who want to find out when and
-where execution diverges after a subtle change to TCG code generation.
-It is not an exact science and results are likely to be mixed once
-asynchronous events are introduced. While the use of -icount can
-introduce determinism to the execution flow it doesn't always follow
-the translation sequence will be exactly the same. Typically this is
-caused by a timer firing to service the GUI causing a block to end
-early. However in some cases it has proved to be useful in pointing
-people at roughly where execution diverges. The only argument you need
-for the plugin is a path for the socket the two instances will
-communicate over::
-
-
- $ qemu-system-sparc -monitor none -parallel none \
- -net none -M SS-20 -m 256 -kernel day11/zImage.elf \
- -plugin ./contrib/plugins/liblockstep.so,sockpath=lockstep-sparc.sock \
- -d plugin,nochain
-
-which will eventually report::
-
- qemu-system-sparc: warning: nic lance.0 has no peer
- @ 0x000000ffd06678 vs 0x000000ffd001e0 (2/1 since last)
- @ 0x000000ffd07d9c vs 0x000000ffd06678 (3/1 since last)
- Ī” insn_count @ 0x000000ffd07d9c (809900609) vs 0x000000ffd06678 (809900612)
- previously @ 0x000000ffd06678/10 (809900609 insns)
- previously @ 0x000000ffd001e0/4 (809900599 insns)
- previously @ 0x000000ffd080ac/2 (809900595 insns)
- previously @ 0x000000ffd08098/5 (809900593 insns)
- previously @ 0x000000ffd080c0/1 (809900588 insns)
-
-- contrib/plugins/hwprofile.c
-
-The hwprofile tool can only be used with system emulation and allows
-the user to see what hardware is accessed how often. It has a number of options:
-
- * track=read or track=write
-
- By default the plugin tracks both reads and writes. You can use one
- of these options to limit the tracking to just one class of accesses.
-
- * source
-
- Will include a detailed break down of what the guest PC that made the
- access was. Not compatible with the pattern option. Example output::
-
- cirrus-low-memory @ 0xfffffd00000a0000
- pc:fffffc0000005cdc, 1, 256
- pc:fffffc0000005ce8, 1, 256
- pc:fffffc0000005cec, 1, 256
-
- * pattern
-
- Instead break down the accesses based on the offset into the HW
- region. This can be useful for seeing the most used registers of a
- device. Example output::
-
- pci0-conf @ 0xfffffd01fe000000
- off:00000004, 1, 1
- off:00000010, 1, 3
- off:00000014, 1, 3
- off:00000018, 1, 2
- off:0000001c, 1, 2
- off:00000020, 1, 2
- ...
-
-- contrib/plugins/execlog.c
-
-The execlog tool traces executed instructions with memory access. It can be used
-for debugging and security analysis purposes.
-Please be aware that this will generate a lot of output.
-
-The plugin needs default argument::
-
- $ qemu-system-arm $(QEMU_ARGS) \
- -plugin ./contrib/plugins/libexeclog.so -d plugin
-
-which will output an execution trace following this structure::
-
- # vCPU, vAddr, opcode, disassembly[, load/store, memory addr, device]...
- 0, 0xa12, 0xf8012400, "movs r4, #0"
- 0, 0xa14, 0xf87f42b4, "cmp r4, r6"
- 0, 0xa16, 0xd206, "bhs #0xa26"
- 0, 0xa18, 0xfff94803, "ldr r0, [pc, #0xc]", load, 0x00010a28, RAM
- 0, 0xa1a, 0xf989f000, "bl #0xd30"
- 0, 0xd30, 0xfff9b510, "push {r4, lr}", store, 0x20003ee0, RAM, store, 0x20003ee4, RAM
- 0, 0xd32, 0xf9893014, "adds r0, #0x14"
- 0, 0xd34, 0xf9c8f000, "bl #0x10c8"
- 0, 0x10c8, 0xfff96c43, "ldr r3, [r0, #0x44]", load, 0x200000e4, RAM
-
-Please note that you need to configure QEMU with Capstone support to get disassembly.
-
-The output can be filtered to only track certain instructions or
-addresses using the ``ifilter`` or ``afilter`` options. You can stack the
-arguments if required::
-
- $ qemu-system-arm $(QEMU_ARGS) \
- -plugin ./contrib/plugins/libexeclog.so,ifilter=st1w,afilter=0x40001808 -d plugin
-
-This plugin can also dump registers when they change value. Specify the name of the
-registers with multiple ``reg`` options. You can also use glob style matching if you wish::
-
- $ qemu-system-arm $(QEMU_ARGS) \
- -plugin ./contrib/plugins/libexeclog.so,reg=\*_el2,reg=sp -d plugin
-
-Be aware that each additional register to check will slow down
-execution quite considerably. You can optimise the number of register
-checks done by using the rdisas option. This will only instrument
-instructions that mention the registers in question in disassembly.
-This is not foolproof as some instructions implicitly change
-instructions. You can use the ifilter to catch these cases:
-
- $ qemu-system-arm $(QEMU_ARGS) \
- -plugin ./contrib/plugins/libexeclog.so,ifilter=msr,ifilter=blr,reg=x30,reg=\*_el1,rdisas=on
-
-- contrib/plugins/cache.c
-
-Cache modelling plugin that measures the performance of a given L1 cache
-configuration, and optionally a unified L2 per-core cache when a given working
-set is run::
-
- $ qemu-x86_64 -plugin ./contrib/plugins/libcache.so \
- -d plugin -D cache.log ./tests/tcg/x86_64-linux-user/float_convs
-
-will report the following::
-
- core #, data accesses, data misses, dmiss rate, insn accesses, insn misses, imiss rate
- 0 996695 508 0.0510% 2642799 18617 0.7044%
-
- address, data misses, instruction
- 0x424f1e (_int_malloc), 109, movq %rax, 8(%rcx)
- 0x41f395 (_IO_default_xsputn), 49, movb %dl, (%rdi, %rax)
- 0x42584d (ptmalloc_init.part.0), 33, movaps %xmm0, (%rax)
- 0x454d48 (__tunables_init), 20, cmpb $0, (%r8)
- ...
-
- address, fetch misses, instruction
- 0x4160a0 (__vfprintf_internal), 744, movl $1, %ebx
- 0x41f0a0 (_IO_setb), 744, endbr64
- 0x415882 (__vfprintf_internal), 744, movq %r12, %rdi
- 0x4268a0 (__malloc), 696, andq $0xfffffffffffffff0, %rax
- ...
-
-The plugin has a number of arguments, all of them are optional:
-
- * limit=N
-
- Print top N icache and dcache thrashing instructions along with their
- address, number of misses, and its disassembly. (default: 32)
-
- * icachesize=N
- * iblksize=B
- * iassoc=A
-
- Instruction cache configuration arguments. They specify the cache size, block
- size, and associativity of the instruction cache, respectively.
- (default: N = 16384, B = 64, A = 8)
-
- * dcachesize=N
- * dblksize=B
- * dassoc=A
-
- Data cache configuration arguments. They specify the cache size, block size,
- and associativity of the data cache, respectively.
- (default: N = 16384, B = 64, A = 8)
-
- * evict=POLICY
-
- Sets the eviction policy to POLICY. Available policies are: :code:`lru`,
- :code:`fifo`, and :code:`rand`. The plugin will use the specified policy for
- both instruction and data caches. (default: POLICY = :code:`lru`)
-
- * cores=N
-
- Sets the number of cores for which we maintain separate icache and dcache.
- (default: for linux-user, N = 1, for full system emulation: N = cores
- available to guest)
-
- * l2=on
-
- Simulates a unified L2 cache (stores blocks for both instructions and data)
- using the default L2 configuration (cache size = 2MB, associativity = 16-way,
- block size = 64B).
-
- * l2cachesize=N
- * l2blksize=B
- * l2assoc=A
-
- L2 cache configuration arguments. They specify the cache size, block size, and
- associativity of the L2 cache, respectively. Setting any of the L2
- configuration arguments implies ``l2=on``.
- (default: N = 2097152 (2MB), B = 64, A = 16)
-
Plugin API
==========
diff --git a/docs/devel/testing.rst b/docs/devel/testing.rst
deleted file mode 100644
index 23d3f44..0000000
--- a/docs/devel/testing.rst
+++ /dev/null
@@ -1,1529 +0,0 @@
-.. _testing:
-
-Testing in QEMU
-===============
-
-This document describes the testing infrastructure in QEMU.
-
-Testing with "make check"
--------------------------
-
-The "make check" testing family includes most of the C based tests in QEMU. For
-a quick help, run ``make check-help`` from the source tree.
-
-The usual way to run these tests is:
-
-.. code::
-
- make check
-
-which includes QAPI schema tests, unit tests, QTests and some iotests.
-Different sub-types of "make check" tests will be explained below.
-
-Before running tests, it is best to build QEMU programs first. Some tests
-expect the executables to exist and will fail with obscure messages if they
-cannot find them.
-
-Unit tests
-~~~~~~~~~~
-
-Unit tests, which can be invoked with ``make check-unit``, are simple C tests
-that typically link to individual QEMU object files and exercise them by
-calling exported functions.
-
-If you are writing new code in QEMU, consider adding a unit test, especially
-for utility modules that are relatively stateless or have few dependencies. To
-add a new unit test:
-
-1. Create a new source file. For example, ``tests/unit/foo-test.c``.
-
-2. Write the test. Normally you would include the header file which exports
- the module API, then verify the interface behaves as expected from your
- test. The test code should be organized with the glib testing framework.
- Copying and modifying an existing test is usually a good idea.
-
-3. Add the test to ``tests/unit/meson.build``. The unit tests are listed in a
- dictionary called ``tests``. The values are any additional sources and
- dependencies to be linked with the test. For a simple test whose source
- is in ``tests/unit/foo-test.c``, it is enough to add an entry like::
-
- {
- ...
- 'foo-test': [],
- ...
- }
-
-Since unit tests don't require environment variables, the simplest way to debug
-a unit test failure is often directly invoking it or even running it under
-``gdb``. However there can still be differences in behavior between ``make``
-invocations and your manual run, due to ``$MALLOC_PERTURB_`` environment
-variable (which affects memory reclamation and catches invalid pointers better)
-and gtester options. If necessary, you can run
-
-.. code::
-
- make check-unit V=1
-
-and copy the actual command line which executes the unit test, then run
-it from the command line.
-
-QTest
-~~~~~
-
-QTest is a device emulation testing framework. It can be very useful to test
-device models; it could also control certain aspects of QEMU (such as virtual
-clock stepping), with a special purpose "qtest" protocol. Refer to
-:doc:`qtest` for more details.
-
-QTest cases can be executed with
-
-.. code::
-
- make check-qtest
-
-Writing portable test cases
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Both unit tests and qtests can run on POSIX hosts as well as Windows hosts.
-Care must be taken when writing portable test cases that can be built and run
-successfully on various hosts. The following list shows some best practices:
-
-* Use portable APIs from glib whenever necessary, e.g.: g_setenv(),
- g_mkdtemp(), g_mkdir().
-* Avoid using hardcoded /tmp for temporary file directory.
- Use g_get_tmp_dir() instead.
-* Bear in mind that Windows has different special string representation for
- stdin/stdout/stderr and null devices. For example if your test case uses
- "/dev/fd/2" and "/dev/null" on Linux, remember to use "2" and "nul" on
- Windows instead. Also IO redirection does not work on Windows, so avoid
- using "2>nul" whenever necessary.
-* If your test cases uses the blkdebug feature, use relative path to pass
- the config and image file paths in the command line as Windows absolute
- path contains the delimiter ":" which will confuse the blkdebug parser.
-* Use double quotes in your extra QEMU command line in your test cases
- instead of single quotes, as Windows does not drop single quotes when
- passing the command line to QEMU.
-* Windows opens a file in text mode by default, while a POSIX compliant
- implementation treats text files and binary files the same. So if your
- test cases opens a file to write some data and later wants to compare the
- written data with the original one, be sure to pass the letter 'b' as
- part of the mode string to fopen(), or O_BINARY flag for the open() call.
-* If a certain test case can only run on POSIX or Linux hosts, use a proper
- #ifdef in the codes. If the whole test suite cannot run on Windows, disable
- the build in the meson.build file.
-
-QAPI schema tests
-~~~~~~~~~~~~~~~~~
-
-The QAPI schema tests validate the QAPI parser used by QMP, by feeding
-predefined input to the parser and comparing the result with the reference
-output.
-
-The input/output data is managed under the ``tests/qapi-schema`` directory.
-Each test case includes four files that have a common base name:
-
- * ``${casename}.json`` - the file contains the JSON input for feeding the
- parser
- * ``${casename}.out`` - the file contains the expected stdout from the parser
- * ``${casename}.err`` - the file contains the expected stderr from the parser
- * ``${casename}.exit`` - the expected error code
-
-Consider adding a new QAPI schema test when you are making a change on the QAPI
-parser (either fixing a bug or extending/modifying the syntax). To do this:
-
-1. Add four files for the new case as explained above. For example:
-
- ``$EDITOR tests/qapi-schema/foo.{json,out,err,exit}``.
-
-2. Add the new test in ``tests/Makefile.include``. For example:
-
- ``qapi-schema += foo.json``
-
-check-block
-~~~~~~~~~~~
-
-``make check-block`` runs a subset of the block layer iotests (the tests that
-are in the "auto" group).
-See the "QEMU iotests" section below for more information.
-
-QEMU iotests
-------------
-
-QEMU iotests, under the directory ``tests/qemu-iotests``, is the testing
-framework widely used to test block layer related features. It is higher level
-than "make check" tests and 99% of the code is written in bash or Python
-scripts. The testing success criteria is golden output comparison, and the
-test files are named with numbers.
-
-To run iotests, make sure QEMU is built successfully, then switch to the
-``tests/qemu-iotests`` directory under the build directory, and run ``./check``
-with desired arguments from there.
-
-By default, "raw" format and "file" protocol is used; all tests will be
-executed, except the unsupported ones. You can override the format and protocol
-with arguments:
-
-.. code::
-
- # test with qcow2 format
- ./check -qcow2
- # or test a different protocol
- ./check -nbd
-
-It's also possible to list test numbers explicitly:
-
-.. code::
-
- # run selected cases with qcow2 format
- ./check -qcow2 001 030 153
-
-Cache mode can be selected with the "-c" option, which may help reveal bugs
-that are specific to certain cache mode.
-
-More options are supported by the ``./check`` script, run ``./check -h`` for
-help.
-
-Writing a new test case
-~~~~~~~~~~~~~~~~~~~~~~~
-
-Consider writing a tests case when you are making any changes to the block
-layer. An iotest case is usually the choice for that. There are already many
-test cases, so it is possible that extending one of them may achieve the goal
-and save the boilerplate to create one. (Unfortunately, there isn't a 100%
-reliable way to find a related one out of hundreds of tests. One approach is
-using ``git grep``.)
-
-Usually an iotest case consists of two files. One is an executable that
-produces output to stdout and stderr, the other is the expected reference
-output. They are given the same number in file names. E.g. Test script ``055``
-and reference output ``055.out``.
-
-In rare cases, when outputs differ between cache mode ``none`` and others, a
-``.out.nocache`` file is added. In other cases, when outputs differ between
-image formats, more than one ``.out`` files are created ending with the
-respective format names, e.g. ``178.out.qcow2`` and ``178.out.raw``.
-
-There isn't a hard rule about how to write a test script, but a new test is
-usually a (copy and) modification of an existing case. There are a few
-commonly used ways to create a test:
-
-* A Bash script. It will make use of several environmental variables related
- to the testing procedure, and could source a group of ``common.*`` libraries
- for some common helper routines.
-
-* A Python unittest script. Import ``iotests`` and create a subclass of
- ``iotests.QMPTestCase``, then call ``iotests.main`` method. The downside of
- this approach is that the output is too scarce, and the script is considered
- harder to debug.
-
-* A simple Python script without using unittest module. This could also import
- ``iotests`` for launching QEMU and utilities etc, but it doesn't inherit
- from ``iotests.QMPTestCase`` therefore doesn't use the Python unittest
- execution. This is a combination of 1 and 2.
-
-Pick the language per your preference since both Bash and Python have
-comparable library support for invoking and interacting with QEMU programs. If
-you opt for Python, it is strongly recommended to write Python 3 compatible
-code.
-
-Both Python and Bash frameworks in iotests provide helpers to manage test
-images. They can be used to create and clean up images under the test
-directory. If no I/O or any protocol specific feature is needed, it is often
-more convenient to use the pseudo block driver, ``null-co://``, as the test
-image, which doesn't require image creation or cleaning up. Avoid system-wide
-devices or files whenever possible, such as ``/dev/null`` or ``/dev/zero``.
-Otherwise, image locking implications have to be considered. For example,
-another application on the host may have locked the file, possibly leading to a
-test failure. If using such devices are explicitly desired, consider adding
-``locking=off`` option to disable image locking.
-
-Debugging a test case
-~~~~~~~~~~~~~~~~~~~~~
-
-The following options to the ``check`` script can be useful when debugging
-a failing test:
-
-* ``-gdb`` wraps every QEMU invocation in a ``gdbserver``, which waits for a
- connection from a gdb client. The options given to ``gdbserver`` (e.g. the
- address on which to listen for connections) are taken from the ``$GDB_OPTIONS``
- environment variable. By default (if ``$GDB_OPTIONS`` is empty), it listens on
- ``localhost:12345``.
- It is possible to connect to it for example with
- ``gdb -iex "target remote $addr"``, where ``$addr`` is the address
- ``gdbserver`` listens on.
- If the ``-gdb`` option is not used, ``$GDB_OPTIONS`` is ignored,
- regardless of whether it is set or not.
-
-* ``-valgrind`` attaches a valgrind instance to QEMU. If it detects
- warnings, it will print and save the log in
- ``$TEST_DIR/<valgrind_pid>.valgrind``.
- The final command line will be ``valgrind --log-file=$TEST_DIR/
- <valgrind_pid>.valgrind --error-exitcode=99 $QEMU ...``
-
-* ``-d`` (debug) just increases the logging verbosity, showing
- for example the QMP commands and answers.
-
-* ``-p`` (print) redirects QEMU’s stdout and stderr to the test output,
- instead of saving it into a log file in
- ``$TEST_DIR/qemu-machine-<random_string>``.
-
-Test case groups
-~~~~~~~~~~~~~~~~
-
-"Tests may belong to one or more test groups, which are defined in the form
-of a comment in the test source file. By convention, test groups are listed
-in the second line of the test file, after the "#!/..." line, like this:
-
-.. code::
-
- #!/usr/bin/env python3
- # group: auto quick
- #
- ...
-
-Another way of defining groups is creating the tests/qemu-iotests/group.local
-file. This should be used only for downstream (this file should never appear
-in upstream). This file may be used for defining some downstream test groups
-or for temporarily disabling tests, like this:
-
-.. code::
-
- # groups for some company downstream process
- #
- # ci - tests to run on build
- # down - our downstream tests, not for upstream
- #
- # Format of each line is:
- # TEST_NAME TEST_GROUP [TEST_GROUP ]...
-
- 013 ci
- 210 disabled
- 215 disabled
- our-ugly-workaround-test down ci
-
-Note that the following group names have a special meaning:
-
-- quick: Tests in this group should finish within a few seconds.
-
-- auto: Tests in this group are used during "make check" and should be
- runnable in any case. That means they should run with every QEMU binary
- (also non-x86), with every QEMU configuration (i.e. must not fail if
- an optional feature is not compiled in - but reporting a "skip" is ok),
- work at least with the qcow2 file format, work with all kind of host
- filesystems and users (e.g. "nobody" or "root") and must not take too
- much memory and disk space (since CI pipelines tend to fail otherwise).
-
-- disabled: Tests in this group are disabled and ignored by check.
-
-.. _container-ref:
-
-Container based tests
----------------------
-
-Introduction
-~~~~~~~~~~~~
-
-The container testing framework in QEMU utilizes public images to
-build and test QEMU in predefined and widely accessible Linux
-environments. This makes it possible to expand the test coverage
-across distros, toolchain flavors and library versions. The support
-was originally written for Docker although we also support Podman as
-an alternative container runtime. Although many of the target
-names and scripts are prefixed with "docker" the system will
-automatically run on whichever is configured.
-
-The container images are also used to augment the generation of tests
-for testing TCG. See :ref:`checktcg-ref` for more details.
-
-Docker Prerequisites
-~~~~~~~~~~~~~~~~~~~~
-
-Install "docker" with the system package manager and start the Docker service
-on your development machine, then make sure you have the privilege to run
-Docker commands. Typically it means setting up passwordless ``sudo docker``
-command or login as root. For example:
-
-.. code::
-
- $ sudo yum install docker
- $ # or `apt-get install docker` for Ubuntu, etc.
- $ sudo systemctl start docker
- $ sudo docker ps
-
-The last command should print an empty table, to verify the system is ready.
-
-An alternative method to set up permissions is by adding the current user to
-"docker" group and making the docker daemon socket file (by default
-``/var/run/docker.sock``) accessible to the group:
-
-.. code::
-
- $ sudo groupadd docker
- $ sudo usermod $USER -a -G docker
- $ sudo chown :docker /var/run/docker.sock
-
-Note that any one of above configurations makes it possible for the user to
-exploit the whole host with Docker bind mounting or other privileged
-operations. So only do it on development machines.
-
-Podman Prerequisites
-~~~~~~~~~~~~~~~~~~~~
-
-Install "podman" with the system package manager.
-
-.. code::
-
- $ sudo dnf install podman
- $ podman ps
-
-The last command should print an empty table, to verify the system is ready.
-
-Quickstart
-~~~~~~~~~~
-
-From source tree, type ``make docker-help`` to see the help. Testing
-can be started without configuring or building QEMU (``configure`` and
-``make`` are done in the container, with parameters defined by the
-make target):
-
-.. code::
-
- make docker-test-build@debian
-
-This will create a container instance using the ``debian`` image (the image
-is downloaded and initialized automatically), in which the ``test-build`` job
-is executed.
-
-Registry
-~~~~~~~~
-
-The QEMU project has a container registry hosted by GitLab at
-``registry.gitlab.com/qemu-project/qemu`` which will automatically be
-used to pull in pre-built layers. This avoids unnecessary strain on
-the distro archives created by multiple developers running the same
-container build steps over and over again. This can be overridden
-locally by using the ``NOCACHE`` build option:
-
-.. code::
-
- make docker-image-debian-arm64-cross NOCACHE=1
-
-Images
-~~~~~~
-
-Along with many other images, the ``debian`` image is defined in a Dockerfile
-in ``tests/docker/dockerfiles/``, called ``debian.docker``. ``make docker-help``
-command will list all the available images.
-
-A ``.pre`` script can be added beside the ``.docker`` file, which will be
-executed before building the image under the build context directory. This is
-mainly used to do necessary host side setup. One such setup is ``binfmt_misc``,
-for example, to make qemu-user powered cross build containers work.
-
-Most of the existing Dockerfiles were written by hand, simply by creating a
-a new ``.docker`` file under the ``tests/docker/dockerfiles/`` directory.
-This has led to an inconsistent set of packages being present across the
-different containers.
-
-Thus going forward, QEMU is aiming to automatically generate the Dockerfiles
-using the ``lcitool`` program provided by the ``libvirt-ci`` project:
-
- https://gitlab.com/libvirt/libvirt-ci
-
-``libvirt-ci`` contains an ``lcitool`` program as well as a list of
-mappings to distribution package names for a wide variety of third
-party projects. ``lcitool`` applies the mappings to a list of build
-pre-requisites in ``tests/lcitool/projects/qemu.yml``, determines the
-list of native packages to install on each distribution, and uses them
-to generate build environments (dockerfiles and Cirrus CI variable files)
-that are consistent across OS distribution.
-
-
-Adding new build pre-requisites
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-When preparing a patch series that adds a new build
-pre-requisite to QEMU, the prerequisites should to be added to
-``tests/lcitool/projects/qemu.yml`` in order to make the dependency
-available in the CI build environments.
-
-In the simple case where the pre-requisite is already known to ``libvirt-ci``
-the following steps are needed:
-
- * Edit ``tests/lcitool/projects/qemu.yml`` and add the pre-requisite
-
- * Run ``make lcitool-refresh`` to re-generate all relevant build environment
- manifests
-
-It may be that ``libvirt-ci`` does not know about the new pre-requisite.
-If that is the case, some extra preparation steps will be required
-first to contribute the mapping to the ``libvirt-ci`` project:
-
- * Fork the ``libvirt-ci`` project on gitlab
-
- * Add an entry for the new build prerequisite to
- ``lcitool/facts/mappings.yml``, listing its native package name on as
- many OS distros as practical. Run ``python -m pytest --regenerate-output``
- and check that the changes are correct.
-
- * Commit the ``mappings.yml`` change together with the regenerated test
- files, and submit a merge request to the ``libvirt-ci`` project.
- Please note in the description that this is a new build pre-requisite
- desired for use with QEMU.
-
- * CI pipeline will run to validate that the changes to ``mappings.yml``
- are correct, by attempting to install the newly listed package on
- all OS distributions supported by ``libvirt-ci``.
-
- * Once the merge request is accepted, go back to QEMU and update
- the ``tests/lcitool/libvirt-ci`` submodule to point to a commit that
- contains the ``mappings.yml`` update. Then add the prerequisite and
- run ``make lcitool-refresh``.
-
- * Please also trigger gitlab container generation pipelines on your change
- for as many OS distros as practical to make sure that there are no
- obvious breakages when adding the new pre-requisite. Please see
- `CI <https://www.qemu.org/docs/master/devel/ci.html>`__ documentation
- page on how to trigger gitlab CI pipelines on your change.
-
- * Please also trigger gitlab container generation pipelines on your change
- for as many OS distros as practical to make sure that there are no
- obvious breakages when adding the new pre-requisite. Please see
- `CI <https://www.qemu.org/docs/master/devel/ci.html>`__ documentation
- page on how to trigger gitlab CI pipelines on your change.
-
-For enterprise distros that default to old, end-of-life versions of the
-Python runtime, QEMU uses a separate set of mappings that work with more
-recent versions. These can be found in ``tests/lcitool/mappings.yml``.
-Modifying this file should not be necessary unless the new pre-requisite
-is a Python library or tool.
-
-
-Adding new OS distros
-^^^^^^^^^^^^^^^^^^^^^
-
-In some cases ``libvirt-ci`` will not know about the OS distro that is
-desired to be tested. Before adding a new OS distro, discuss the proposed
-addition:
-
- * Send a mail to qemu-devel, copying people listed in the
- MAINTAINERS file for ``Build and test automation``.
-
- There are limited CI compute resources available to QEMU, so the
- cost/benefit tradeoff of adding new OS distros needs to be considered.
-
- * File an issue at https://gitlab.com/libvirt/libvirt-ci/-/issues
- pointing to the qemu-devel mail thread in the archives.
-
- This alerts other people who might be interested in the work
- to avoid duplication, as well as to get feedback from libvirt-ci
- maintainers on any tips to ease the addition
-
-Assuming there is agreement to add a new OS distro then
-
- * Fork the ``libvirt-ci`` project on gitlab
-
- * Add metadata under ``lcitool/facts/targets/`` for the new OS
- distro. There might be code changes required if the OS distro
- uses a package format not currently known. The ``libvirt-ci``
- maintainers can advise on this when the issue is filed.
-
- * Edit the ``lcitool/facts/mappings.yml`` change to add entries for
- the new OS, listing the native package names for as many packages
- as practical. Run ``python -m pytest --regenerate-output`` and
- check that the changes are correct.
-
- * Commit the changes to ``lcitool/facts`` and the regenerated test
- files, and submit a merge request to the ``libvirt-ci`` project.
- Please note in the description that this is a new build pre-requisite
- desired for use with QEMU
-
- * CI pipeline will run to validate that the changes to ``mappings.yml``
- are correct, by attempting to install the newly listed package on
- all OS distributions supported by ``libvirt-ci``.
-
- * Once the merge request is accepted, go back to QEMU and update
- the ``libvirt-ci`` submodule to point to a commit that contains
- the ``mappings.yml`` update.
-
-
-Tests
-~~~~~
-
-Different tests are added to cover various configurations to build and test
-QEMU. Docker tests are the executables under ``tests/docker`` named
-``test-*``. They are typically shell scripts and are built on top of a shell
-library, ``tests/docker/common.rc``, which provides helpers to find the QEMU
-source and build it.
-
-The full list of tests is printed in the ``make docker-help`` help.
-
-Debugging a Docker test failure
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-When CI tasks, maintainers or yourself report a Docker test failure, follow the
-below steps to debug it:
-
-1. Locally reproduce the failure with the reported command line. E.g. run
- ``make docker-test-mingw@fedora-win64-cross J=8``.
-2. Add "V=1" to the command line, try again, to see the verbose output.
-3. Further add "DEBUG=1" to the command line. This will pause in a shell prompt
- in the container right before testing starts. You could either manually
- build QEMU and run tests from there, or press Ctrl-D to let the Docker
- testing continue.
-4. If you press Ctrl-D, the same building and testing procedure will begin, and
- will hopefully run into the error again. After that, you will be dropped to
- the prompt for debug.
-
-Options
-~~~~~~~
-
-Various options can be used to affect how Docker tests are done. The full
-list is in the ``make docker`` help text. The frequently used ones are:
-
-* ``V=1``: the same as in top level ``make``. It will be propagated to the
- container and enable verbose output.
-* ``J=$N``: the number of parallel tasks in make commands in the container,
- similar to the ``-j $N`` option in top level ``make``. (The ``-j`` option in
- top level ``make`` will not be propagated into the container.)
-* ``DEBUG=1``: enables debug. See the previous "Debugging a Docker test
- failure" section.
-
-Thread Sanitizer
-----------------
-
-Thread Sanitizer (TSan) is a tool which can detect data races. QEMU supports
-building and testing with this tool.
-
-For more information on TSan:
-
-https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual
-
-Thread Sanitizer in Docker
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-TSan is currently supported in the ubuntu2204 docker.
-
-The test-tsan test will build using TSan and then run make check.
-
-.. code::
-
- make docker-test-tsan@ubuntu2204
-
-TSan warnings under docker are placed in files located at build/tsan/.
-
-We recommend using DEBUG=1 to allow launching the test from inside the docker,
-and to allow review of the warnings generated by TSan.
-
-Building and Testing with TSan
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-It is possible to build and test with TSan, with a few additional steps.
-These steps are normally done automatically in the docker.
-
-There is a one time patch needed in clang-9 or clang-10 at this time:
-
-.. code::
-
- sed -i 's/^const/static const/g' \
- /usr/lib/llvm-10/lib/clang/10.0.0/include/sanitizer/tsan_interface.h
-
-To configure the build for TSan:
-
-.. code::
-
- ../configure --enable-tsan --cc=clang-10 --cxx=clang++-10 \
- --disable-werror --extra-cflags="-O0"
-
-The runtime behavior of TSAN is controlled by the TSAN_OPTIONS environment
-variable.
-
-More information on the TSAN_OPTIONS can be found here:
-
-https://github.com/google/sanitizers/wiki/ThreadSanitizerFlags
-
-For example:
-
-.. code::
-
- export TSAN_OPTIONS=suppressions=<path to qemu>/tests/tsan/suppressions.tsan \
- detect_deadlocks=false history_size=7 exitcode=0 \
- log_path=<build path>/tsan/tsan_warning
-
-The above exitcode=0 has TSan continue without error if any warnings are found.
-This allows for running the test and then checking the warnings afterwards.
-If you want TSan to stop and exit with error on warnings, use exitcode=66.
-
-TSan Suppressions
-~~~~~~~~~~~~~~~~~
-Keep in mind that for any data race warning, although there might be a data race
-detected by TSan, there might be no actual bug here. TSan provides several
-different mechanisms for suppressing warnings. In general it is recommended
-to fix the code if possible to eliminate the data race rather than suppress
-the warning.
-
-A few important files for suppressing warnings are:
-
-tests/tsan/suppressions.tsan - Has TSan warnings we wish to suppress at runtime.
-The comment on each suppression will typically indicate why we are
-suppressing it. More information on the file format can be found here:
-
-https://github.com/google/sanitizers/wiki/ThreadSanitizerSuppressions
-
-tests/tsan/ignore.tsan - Has TSan warnings we wish to disable
-at compile time for test or debug.
-Add flags to configure to enable:
-
-"--extra-cflags=-fsanitize-blacklist=<src path>/tests/tsan/ignore.tsan"
-
-More information on the file format can be found here under "Blacklist Format":
-
-https://github.com/google/sanitizers/wiki/ThreadSanitizerFlags
-
-TSan Annotations
-~~~~~~~~~~~~~~~~
-include/qemu/tsan.h defines annotations. See this file for more descriptions
-of the annotations themselves. Annotations can be used to suppress
-TSan warnings or give TSan more information so that it can detect proper
-relationships between accesses of data.
-
-Annotation examples can be found here:
-
-https://github.com/llvm/llvm-project/tree/master/compiler-rt/test/tsan/
-
-Good files to start with are: annotate_happens_before.cpp and ignore_race.cpp
-
-The full set of annotations can be found here:
-
-https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
-
-docker-binfmt-image-debian-% targets
-------------------------------------
-
-It is possible to combine Debian's bootstrap scripts with a configured
-``binfmt_misc`` to bootstrap a number of Debian's distros including
-experimental ports not yet supported by a released OS. This can
-simplify setting up a rootfs by using docker to contain the foreign
-rootfs rather than manually invoking chroot.
-
-Setting up ``binfmt_misc``
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-You can use the script ``qemu-binfmt-conf.sh`` to configure a QEMU
-user binary to automatically run binaries for the foreign
-architecture. While the scripts will try their best to work with
-dynamically linked QEMU's a statically linked one will present less
-potential complications when copying into the docker image. Modern
-kernels support the ``F`` (fix binary) flag which will open the QEMU
-executable on setup and avoids the need to find and re-open in the
-chroot environment. This is triggered with the ``--persistent`` flag.
-
-Example invocation
-~~~~~~~~~~~~~~~~~~
-
-For example to setup the HPPA ports builds of Debian::
-
- make docker-binfmt-image-debian-sid-hppa \
- DEB_TYPE=sid DEB_ARCH=hppa \
- DEB_URL=http://ftp.ports.debian.org/debian-ports/ \
- DEB_KEYRING=/usr/share/keyrings/debian-ports-archive-keyring.gpg \
- EXECUTABLE=(pwd)/qemu-hppa V=1
-
-The ``DEB_`` variables are substitutions used by
-``debian-bootstrap.pre`` which is called to do the initial debootstrap
-of the rootfs before it is copied into the container. The second stage
-is run as part of the build. The final image will be tagged as
-``qemu/debian-sid-hppa``.
-
-VM testing
-----------
-
-This test suite contains scripts that bootstrap various guest images that have
-necessary packages to build QEMU. The basic usage is documented in ``Makefile``
-help which is displayed with ``make vm-help``.
-
-Quickstart
-~~~~~~~~~~
-
-Run ``make vm-help`` to list available make targets. Invoke a specific make
-command to run build test in an image. For example, ``make vm-build-freebsd``
-will build the source tree in the FreeBSD image. The command can be executed
-from either the source tree or the build dir; if the former, ``./configure`` is
-not needed. The command will then generate the test image in ``./tests/vm/``
-under the working directory.
-
-Note: images created by the scripts accept a well-known RSA key pair for SSH
-access, so they SHOULD NOT be exposed to external interfaces if you are
-concerned about attackers taking control of the guest and potentially
-exploiting a QEMU security bug to compromise the host.
-
-QEMU binaries
-~~~~~~~~~~~~~
-
-By default, ``qemu-system-x86_64`` is searched in $PATH to run the guest. If
-there isn't one, or if it is older than 2.10, the test won't work. In this case,
-provide the QEMU binary in env var: ``QEMU=/path/to/qemu-2.10+``.
-
-Likewise the path to ``qemu-img`` can be set in QEMU_IMG environment variable.
-
-Make jobs
-~~~~~~~~~
-
-The ``-j$X`` option in the make command line is not propagated into the VM,
-specify ``J=$X`` to control the make jobs in the guest.
-
-Debugging
-~~~~~~~~~
-
-Add ``DEBUG=1`` and/or ``V=1`` to the make command to allow interactive
-debugging and verbose output. If this is not enough, see the next section.
-``V=1`` will be propagated down into the make jobs in the guest.
-
-Manual invocation
-~~~~~~~~~~~~~~~~~
-
-Each guest script is an executable script with the same command line options.
-For example to work with the netbsd guest, use ``$QEMU_SRC/tests/vm/netbsd``:
-
-.. code::
-
- $ cd $QEMU_SRC/tests/vm
-
- # To bootstrap the image
- $ ./netbsd --build-image --image /var/tmp/netbsd.img
- <...>
-
- # To run an arbitrary command in guest (the output will not be echoed unless
- # --debug is added)
- $ ./netbsd --debug --image /var/tmp/netbsd.img uname -a
-
- # To build QEMU in guest
- $ ./netbsd --debug --image /var/tmp/netbsd.img --build-qemu $QEMU_SRC
-
- # To get to an interactive shell
- $ ./netbsd --interactive --image /var/tmp/netbsd.img sh
-
-Adding new guests
-~~~~~~~~~~~~~~~~~
-
-Please look at existing guest scripts for how to add new guests.
-
-Most importantly, create a subclass of BaseVM and implement ``build_image()``
-method and define ``BUILD_SCRIPT``, then finally call ``basevm.main()`` from
-the script's ``main()``.
-
-* Usually in ``build_image()``, a template image is downloaded from a
- predefined URL. ``BaseVM._download_with_cache()`` takes care of the cache and
- the checksum, so consider using it.
-
-* Once the image is downloaded, users, SSH server and QEMU build deps should
- be set up:
-
- - Root password set to ``BaseVM.ROOT_PASS``
- - User ``BaseVM.GUEST_USER`` is created, and password set to
- ``BaseVM.GUEST_PASS``
- - SSH service is enabled and started on boot,
- ``$QEMU_SRC/tests/keys/id_rsa.pub`` is added to ssh's ``authorized_keys``
- file of both root and the normal user
- - DHCP client service is enabled and started on boot, so that it can
- automatically configure the virtio-net-pci NIC and communicate with QEMU
- user net (10.0.2.2)
- - Necessary packages are installed to untar the source tarball and build
- QEMU
-
-* Write a proper ``BUILD_SCRIPT`` template, which should be a shell script that
- untars a raw virtio-blk block device, which is the tarball data blob of the
- QEMU source tree, then configure/build it. Running "make check" is also
- recommended.
-
-Image fuzzer testing
---------------------
-
-An image fuzzer was added to exercise format drivers. Currently only qcow2 is
-supported. To start the fuzzer, run
-
-.. code::
-
- tests/image-fuzzer/runner.py -c '[["qemu-img", "info", "$test_img"]]' /tmp/test qcow2
-
-Alternatively, some command different from ``qemu-img info`` can be tested, by
-changing the ``-c`` option.
-
-Integration tests using the Avocado Framework
----------------------------------------------
-
-The ``tests/avocado`` directory hosts integration tests. They're usually
-higher level tests, and may interact with external resources and with
-various guest operating systems.
-
-These tests are written using the Avocado Testing Framework (which must
-be installed separately) in conjunction with a the ``avocado_qemu.Test``
-class, implemented at ``tests/avocado/avocado_qemu``.
-
-Tests based on ``avocado_qemu.Test`` can easily:
-
- * Customize the command line arguments given to the convenience
- ``self.vm`` attribute (a QEMUMachine instance)
-
- * Interact with the QEMU monitor, send QMP commands and check
- their results
-
- * Interact with the guest OS, using the convenience console device
- (which may be useful to assert the effectiveness and correctness of
- command line arguments or QMP commands)
-
- * Interact with external data files that accompany the test itself
- (see ``self.get_data()``)
-
- * Download (and cache) remote data files, such as firmware and kernel
- images
-
- * Have access to a library of guest OS images (by means of the
- ``avocado.utils.vmimage`` library)
-
- * Make use of various other test related utilities available at the
- test class itself and at the utility library:
-
- - http://avocado-framework.readthedocs.io/en/latest/api/test/avocado.html#avocado.Test
- - http://avocado-framework.readthedocs.io/en/latest/api/utils/avocado.utils.html
-
-Running tests
-~~~~~~~~~~~~~
-
-You can run the avocado tests simply by executing:
-
-.. code::
-
- make check-avocado
-
-This involves the automatic installation, from PyPI, of all the
-necessary avocado-framework dependencies into the QEMU venv within the
-build tree (at ``./pyvenv``). Test results are also saved within the
-build tree (at ``tests/results``).
-
-Note: the build environment must be using a Python 3 stack, and have
-the ``venv`` and ``pip`` packages installed. If necessary, make sure
-``configure`` is called with ``--python=`` and that those modules are
-available. On Debian and Ubuntu based systems, depending on the
-specific version, they may be on packages named ``python3-venv`` and
-``python3-pip``.
-
-It is also possible to run tests based on tags using the
-``make check-avocado`` command and the ``AVOCADO_TAGS`` environment
-variable:
-
-.. code::
-
- make check-avocado AVOCADO_TAGS=quick
-
-Note that tags separated with commas have an AND behavior, while tags
-separated by spaces have an OR behavior. For more information on Avocado
-tags, see:
-
- https://avocado-framework.readthedocs.io/en/latest/guides/user/chapters/tags.html
-
-To run a single test file, a couple of them, or a test within a file
-using the ``make check-avocado`` command, set the ``AVOCADO_TESTS``
-environment variable with the test files or test names. To run all
-tests from a single file, use:
-
- .. code::
-
- make check-avocado AVOCADO_TESTS=$FILEPATH
-
-The same is valid to run tests from multiple test files:
-
- .. code::
-
- make check-avocado AVOCADO_TESTS='$FILEPATH1 $FILEPATH2'
-
-To run a single test within a file, use:
-
- .. code::
-
- make check-avocado AVOCADO_TESTS=$FILEPATH:$TESTCLASS.$TESTNAME
-
-The same is valid to run single tests from multiple test files:
-
- .. code::
-
- make check-avocado AVOCADO_TESTS='$FILEPATH1:$TESTCLASS1.$TESTNAME1 $FILEPATH2:$TESTCLASS2.$TESTNAME2'
-
-The scripts installed inside the virtual environment may be used
-without an "activation". For instance, the Avocado test runner
-may be invoked by running:
-
- .. code::
-
- pyvenv/bin/avocado run $OPTION1 $OPTION2 tests/avocado/
-
-Note that if ``make check-avocado`` was not executed before, it is
-possible to create the Python virtual environment with the dependencies
-needed running:
-
- .. code::
-
- make check-venv
-
-It is also possible to run tests from a single file or a single test within
-a test file. To run tests from a single file within the build tree, use:
-
- .. code::
-
- pyvenv/bin/avocado run tests/avocado/$TESTFILE
-
-To run a single test within a test file, use:
-
- .. code::
-
- pyvenv/bin/avocado run tests/avocado/$TESTFILE:$TESTCLASS.$TESTNAME
-
-Valid test names are visible in the output from any previous execution
-of Avocado or ``make check-avocado``, and can also be queried using:
-
- .. code::
-
- pyvenv/bin/avocado list tests/avocado
-
-Manual Installation
-~~~~~~~~~~~~~~~~~~~
-
-To manually install Avocado and its dependencies, run:
-
-.. code::
-
- pip install --user avocado-framework
-
-Alternatively, follow the instructions on this link:
-
- https://avocado-framework.readthedocs.io/en/latest/guides/user/chapters/installing.html
-
-Overview
-~~~~~~~~
-
-The ``tests/avocado/avocado_qemu`` directory provides the
-``avocado_qemu`` Python module, containing the ``avocado_qemu.Test``
-class. Here's a simple usage example:
-
-.. code::
-
- from avocado_qemu import QemuSystemTest
-
-
- class Version(QemuSystemTest):
- """
- :avocado: tags=quick
- """
- def test_qmp_human_info_version(self):
- self.vm.launch()
- res = self.vm.cmd('human-monitor-command',
- command_line='info version')
- self.assertRegex(res, r'^(\d+\.\d+\.\d)')
-
-To execute your test, run:
-
-.. code::
-
- avocado run version.py
-
-Tests may be classified according to a convention by using docstring
-directives such as ``:avocado: tags=TAG1,TAG2``. To run all tests
-in the current directory, tagged as "quick", run:
-
-.. code::
-
- avocado run -t quick .
-
-The ``avocado_qemu.Test`` base test class
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The ``avocado_qemu.Test`` class has a number of characteristics that
-are worth being mentioned right away.
-
-First of all, it attempts to give each test a ready to use QEMUMachine
-instance, available at ``self.vm``. Because many tests will tweak the
-QEMU command line, launching the QEMUMachine (by using ``self.vm.launch()``)
-is left to the test writer.
-
-The base test class has also support for tests with more than one
-QEMUMachine. The way to get machines is through the ``self.get_vm()``
-method which will return a QEMUMachine instance. The ``self.get_vm()``
-method accepts arguments that will be passed to the QEMUMachine creation
-and also an optional ``name`` attribute so you can identify a specific
-machine and get it more than once through the tests methods. A simple
-and hypothetical example follows:
-
-.. code::
-
- from avocado_qemu import QemuSystemTest
-
-
- class MultipleMachines(QemuSystemTest):
- def test_multiple_machines(self):
- first_machine = self.get_vm()
- second_machine = self.get_vm()
- self.get_vm(name='third_machine').launch()
-
- first_machine.launch()
- second_machine.launch()
-
- first_res = first_machine.cmd(
- 'human-monitor-command',
- command_line='info version')
-
- second_res = second_machine.cmd(
- 'human-monitor-command',
- command_line='info version')
-
- third_res = self.get_vm(name='third_machine').cmd(
- 'human-monitor-command',
- command_line='info version')
-
- self.assertEqual(first_res, second_res, third_res)
-
-At test "tear down", ``avocado_qemu.Test`` handles all the QEMUMachines
-shutdown.
-
-The ``avocado_qemu.LinuxTest`` base test class
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The ``avocado_qemu.LinuxTest`` is further specialization of the
-``avocado_qemu.Test`` class, so it contains all the characteristics of
-the later plus some extra features.
-
-First of all, this base class is intended for tests that need to
-interact with a fully booted and operational Linux guest. At this
-time, it uses a Fedora 31 guest image. The most basic example looks
-like this:
-
-.. code::
-
- from avocado_qemu import LinuxTest
-
-
- class SomeTest(LinuxTest):
-
- def test(self):
- self.launch_and_wait()
- self.ssh_command('some_command_to_be_run_in_the_guest')
-
-Please refer to tests that use ``avocado_qemu.LinuxTest`` under
-``tests/avocado`` for more examples.
-
-QEMUMachine
-~~~~~~~~~~~
-
-The QEMUMachine API is already widely used in the Python iotests,
-device-crash-test and other Python scripts. It's a wrapper around the
-execution of a QEMU binary, giving its users:
-
- * the ability to set command line arguments to be given to the QEMU
- binary
-
- * a ready to use QMP connection and interface, which can be used to
- send commands and inspect its results, as well as asynchronous
- events
-
- * convenience methods to set commonly used command line arguments in
- a more succinct and intuitive way
-
-QEMU binary selection
-^^^^^^^^^^^^^^^^^^^^^
-
-The QEMU binary used for the ``self.vm`` QEMUMachine instance will
-primarily depend on the value of the ``qemu_bin`` parameter. If it's
-not explicitly set, its default value will be the result of a dynamic
-probe in the same source tree. A suitable binary will be one that
-targets the architecture matching host machine.
-
-Based on this description, test writers will usually rely on one of
-the following approaches:
-
-1) Set ``qemu_bin``, and use the given binary
-
-2) Do not set ``qemu_bin``, and use a QEMU binary named like
- "qemu-system-${arch}", either in the current
- working directory, or in the current source tree.
-
-The resulting ``qemu_bin`` value will be preserved in the
-``avocado_qemu.Test`` as an attribute with the same name.
-
-Attribute reference
-~~~~~~~~~~~~~~~~~~~
-
-Test
-^^^^
-
-Besides the attributes and methods that are part of the base
-``avocado.Test`` class, the following attributes are available on any
-``avocado_qemu.Test`` instance.
-
-vm
-''
-
-A QEMUMachine instance, initially configured according to the given
-``qemu_bin`` parameter.
-
-arch
-''''
-
-The architecture can be used on different levels of the stack, e.g. by
-the framework or by the test itself. At the framework level, it will
-currently influence the selection of a QEMU binary (when one is not
-explicitly given).
-
-Tests are also free to use this attribute value, for their own needs.
-A test may, for instance, use the same value when selecting the
-architecture of a kernel or disk image to boot a VM with.
-
-The ``arch`` attribute will be set to the test parameter of the same
-name. If one is not given explicitly, it will either be set to
-``None``, or, if the test is tagged with one (and only one)
-``:avocado: tags=arch:VALUE`` tag, it will be set to ``VALUE``.
-
-cpu
-'''
-
-The cpu model that will be set to all QEMUMachine instances created
-by the test.
-
-The ``cpu`` attribute will be set to the test parameter of the same
-name. If one is not given explicitly, it will either be set to
-``None ``, or, if the test is tagged with one (and only one)
-``:avocado: tags=cpu:VALUE`` tag, it will be set to ``VALUE``.
-
-machine
-'''''''
-
-The machine type that will be set to all QEMUMachine instances created
-by the test.
-
-The ``machine`` attribute will be set to the test parameter of the same
-name. If one is not given explicitly, it will either be set to
-``None``, or, if the test is tagged with one (and only one)
-``:avocado: tags=machine:VALUE`` tag, it will be set to ``VALUE``.
-
-qemu_bin
-''''''''
-
-The preserved value of the ``qemu_bin`` parameter or the result of the
-dynamic probe for a QEMU binary in the current working directory or
-source tree.
-
-LinuxTest
-^^^^^^^^^
-
-Besides the attributes present on the ``avocado_qemu.Test`` base
-class, the ``avocado_qemu.LinuxTest`` adds the following attributes:
-
-distro
-''''''
-
-The name of the Linux distribution used as the guest image for the
-test. The name should match the **Provider** column on the list
-of images supported by the avocado.utils.vmimage library:
-
-https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images
-
-distro_version
-''''''''''''''
-
-The version of the Linux distribution as the guest image for the
-test. The name should match the **Version** column on the list
-of images supported by the avocado.utils.vmimage library:
-
-https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images
-
-distro_checksum
-'''''''''''''''
-
-The sha256 hash of the guest image file used for the test.
-
-If this value is not set in the code or by a test parameter (with the
-same name), no validation on the integrity of the image will be
-performed.
-
-Parameter reference
-~~~~~~~~~~~~~~~~~~~
-
-To understand how Avocado parameters are accessed by tests, and how
-they can be passed to tests, please refer to::
-
- https://avocado-framework.readthedocs.io/en/latest/guides/writer/chapters/writing.html#accessing-test-parameters
-
-Parameter values can be easily seen in the log files, and will look
-like the following:
-
-.. code::
-
- PARAMS (key=qemu_bin, path=*, default=./qemu-system-x86_64) => './qemu-system-x86_64
-
-Test
-^^^^
-
-arch
-''''
-
-The architecture that will influence the selection of a QEMU binary
-(when one is not explicitly given).
-
-Tests are also free to use this parameter value, for their own needs.
-A test may, for instance, use the same value when selecting the
-architecture of a kernel or disk image to boot a VM with.
-
-This parameter has a direct relation with the ``arch`` attribute. If
-not given, it will default to None.
-
-cpu
-'''
-
-The cpu model that will be set to all QEMUMachine instances created
-by the test.
-
-machine
-'''''''
-
-The machine type that will be set to all QEMUMachine instances created
-by the test.
-
-qemu_bin
-''''''''
-
-The exact QEMU binary to be used on QEMUMachine.
-
-LinuxTest
-^^^^^^^^^
-
-Besides the parameters present on the ``avocado_qemu.Test`` base
-class, the ``avocado_qemu.LinuxTest`` adds the following parameters:
-
-distro
-''''''
-
-The name of the Linux distribution used as the guest image for the
-test. The name should match the **Provider** column on the list
-of images supported by the avocado.utils.vmimage library:
-
-https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images
-
-distro_version
-''''''''''''''
-
-The version of the Linux distribution as the guest image for the
-test. The name should match the **Version** column on the list
-of images supported by the avocado.utils.vmimage library:
-
-https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images
-
-distro_checksum
-'''''''''''''''
-
-The sha256 hash of the guest image file used for the test.
-
-If this value is not set in the code or by this parameter no
-validation on the integrity of the image will be performed.
-
-Skipping tests
-~~~~~~~~~~~~~~
-
-The Avocado framework provides Python decorators which allow for easily skip
-tests running under certain conditions. For example, on the lack of a binary
-on the test system or when the running environment is a CI system. For further
-information about those decorators, please refer to::
-
- https://avocado-framework.readthedocs.io/en/latest/guides/writer/chapters/writing.html#skipping-tests
-
-While the conditions for skipping tests are often specifics of each one, there
-are recurring scenarios identified by the QEMU developers and the use of
-environment variables became a kind of standard way to enable/disable tests.
-
-Here is a list of the most used variables:
-
-AVOCADO_ALLOW_LARGE_STORAGE
-^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Tests which are going to fetch or produce assets considered *large* are not
-going to run unless that ``AVOCADO_ALLOW_LARGE_STORAGE=1`` is exported on
-the environment.
-
-The definition of *large* is a bit arbitrary here, but it usually means an
-asset which occupies at least 1GB of size on disk when uncompressed.
-
-SPEED
-^^^^^
-Tests which have a long runtime will not be run unless ``SPEED=slow`` is
-exported on the environment.
-
-The definition of *long* is a bit arbitrary here, and it depends on the
-usefulness of the test too. A unique test is worth spending more time on,
-small variations on existing tests perhaps less so. As a rough guide,
-a test or set of similar tests which take more than 100 seconds to
-complete.
-
-AVOCADO_ALLOW_UNTRUSTED_CODE
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-There are tests which will boot a kernel image or firmware that can be
-considered not safe to run on the developer's workstation, thus they are
-skipped by default. The definition of *not safe* is also arbitrary but
-usually it means a blob which either its source or build process aren't
-public available.
-
-You should export ``AVOCADO_ALLOW_UNTRUSTED_CODE=1`` on the environment in
-order to allow tests which make use of those kind of assets.
-
-AVOCADO_TIMEOUT_EXPECTED
-^^^^^^^^^^^^^^^^^^^^^^^^
-The Avocado framework has a timeout mechanism which interrupts tests to avoid the
-test suite of getting stuck. The timeout value can be set via test parameter or
-property defined in the test class, for further details::
-
- https://avocado-framework.readthedocs.io/en/latest/guides/writer/chapters/writing.html#setting-a-test-timeout
-
-Even though the timeout can be set by the test developer, there are some tests
-that may not have a well-defined limit of time to finish under certain
-conditions. For example, tests that take longer to execute when QEMU is
-compiled with debug flags. Therefore, the ``AVOCADO_TIMEOUT_EXPECTED`` variable
-has been used to determine whether those tests should run or not.
-
-QEMU_TEST_FLAKY_TESTS
-^^^^^^^^^^^^^^^^^^^^^
-Some tests are not working reliably and thus are disabled by default.
-This includes tests that don't run reliably on GitLab's CI which
-usually expose real issues that are rarely seen on developer machines
-due to the constraints of the CI environment. If you encounter a
-similar situation then raise a bug and then mark the test as shown on
-the code snippet below:
-
-.. code::
-
- # See https://gitlab.com/qemu-project/qemu/-/issues/nnnn
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
- def test(self):
- do_something()
-
-You can also add ``:avocado: tags=flaky`` to the test meta-data so
-only the flaky tests can be run as a group:
-
-.. code::
-
- env QEMU_TEST_FLAKY_TESTS=1 ./pyvenv/bin/avocado \
- run tests/avocado -filter-by-tags=flaky
-
-Tests should not live in this state forever and should either be fixed
-or eventually removed.
-
-
-Uninstalling Avocado
-~~~~~~~~~~~~~~~~~~~~
-
-If you've followed the manual installation instructions above, you can
-easily uninstall Avocado. Start by listing the packages you have
-installed::
-
- pip list --user
-
-And remove any package you want with::
-
- pip uninstall <package_name>
-
-If you've used ``make check-avocado``, the Python virtual environment where
-Avocado is installed will be cleaned up as part of ``make check-clean``.
-
-.. _checktcg-ref:
-
-Testing with "make check-tcg"
------------------------------
-
-The check-tcg tests are intended for simple smoke tests of both
-linux-user and softmmu TCG functionality. However to build test
-programs for guest targets you need to have cross compilers available.
-If your distribution supports cross compilers you can do something as
-simple as::
-
- apt install gcc-aarch64-linux-gnu
-
-The configure script will automatically pick up their presence.
-Sometimes compilers have slightly odd names so the availability of
-them can be prompted by passing in the appropriate configure option
-for the architecture in question, for example::
-
- $(configure) --cross-cc-aarch64=aarch64-cc
-
-There is also a ``--cross-cc-cflags-ARCH`` flag in case additional
-compiler flags are needed to build for a given target.
-
-If you have the ability to run containers as the user the build system
-will automatically use them where no system compiler is available. For
-architectures where we also support building QEMU we will generally
-use the same container to build tests. However there are a number of
-additional containers defined that have a minimal cross-build
-environment that is only suitable for building test cases. Sometimes
-we may use a bleeding edge distribution for compiler features needed
-for test cases that aren't yet in the LTS distros we support for QEMU
-itself.
-
-See :ref:`container-ref` for more details.
-
-Running subset of tests
-~~~~~~~~~~~~~~~~~~~~~~~
-
-You can build the tests for one architecture::
-
- make build-tcg-tests-$TARGET
-
-And run with::
-
- make run-tcg-tests-$TARGET
-
-Adding ``V=1`` to the invocation will show the details of how to
-invoke QEMU for the test which is useful for debugging tests.
-
-TCG test dependencies
-~~~~~~~~~~~~~~~~~~~~~
-
-The TCG tests are deliberately very light on dependencies and are
-either totally bare with minimal gcc lib support (for system-mode tests)
-or just glibc (for linux-user tests). This is because getting a cross
-compiler to work with additional libraries can be challenging.
-
-Other TCG Tests
----------------
-
-There are a number of out-of-tree test suites that are used for more
-extensive testing of processor features.
-
-KVM Unit Tests
-~~~~~~~~~~~~~~
-
-The KVM unit tests are designed to run as a Guest OS under KVM but
-there is no reason why they can't exercise the TCG as well. It
-provides a minimal OS kernel with hooks for enabling the MMU as well
-as reporting test results via a special device::
-
- https://git.kernel.org/pub/scm/virt/kvm/kvm-unit-tests.git
-
-Linux Test Project
-~~~~~~~~~~~~~~~~~~
-
-The LTP is focused on exercising the syscall interface of a Linux
-kernel. It checks that syscalls behave as documented and strives to
-exercise as many corner cases as possible. It is a useful test suite
-to run to exercise QEMU's linux-user code::
-
- https://linux-test-project.github.io/
-
-GCC gcov support
-----------------
-
-``gcov`` is a GCC tool to analyze the testing coverage by
-instrumenting the tested code. To use it, configure QEMU with
-``--enable-gcov`` option and build. Then run the tests as usual.
-
-If you want to gather coverage information on a single test the ``make
-clean-gcda`` target can be used to delete any existing coverage
-information before running a single test.
-
-You can generate a HTML coverage report by executing ``make
-coverage-html`` which will create
-``meson-logs/coveragereport/index.html``.
-
-Further analysis can be conducted by running the ``gcov`` command
-directly on the various .gcda output files. Please read the ``gcov``
-documentation for more information.
diff --git a/docs/devel/testing/acpi-bits.rst b/docs/devel/testing/acpi-bits.rst
new file mode 100644
index 0000000..9a4d716
--- /dev/null
+++ b/docs/devel/testing/acpi-bits.rst
@@ -0,0 +1,155 @@
+==================================
+ACPI/SMBIOS testing using biosbits
+==================================
+************
+Introduction
+************
+Biosbits is a software written by Josh Triplett that can be downloaded
+from https://biosbits.org/. The github codebase can be found
+`here <https://github.com/biosbits/bits/tree/master>`__. It is a software that
+executes the bios components such as acpi and smbios tables directly through
+acpica bios interpreter (a freely available C based library written by Intel,
+downloadable from https://acpica.org/ and is included with biosbits) without an
+operating system getting involved in between. Bios-bits has python integration
+with grub so actual routines that executes bios components can be written in
+python instead of bash-ish (grub's native scripting language).
+There are several advantages to directly testing the bios in a real physical
+machine or in a VM as opposed to indirectly discovering bios issues through the
+operating system (the OS). Operating systems tend to bypass bios problems and
+hide them from the end user. We have more control of what we wanted to test and
+how by being as close to the bios on a running system as possible without a
+complicated software component such as an operating system coming in between.
+Another issue is that we cannot exercise bios components such as ACPI and
+SMBIOS without being in the highest hardware privilege level, ring 0 for
+example in case of x86. Since the OS executes from ring 0 whereas normal user
+land software resides in unprivileged ring 3, operating system must be modified
+in order to write our test routines that exercise and test the bios. This is
+not possible in all cases. Lastly, test frameworks and routines are preferably
+written using a high level scripting language such as python. OSes and
+OS modules are generally written using low level languages such as C and
+low level assembly machine language. Writing test routines in a low level
+language makes things more cumbersome. These and other reasons makes using
+bios-bits very attractive for testing bioses. More details on the inspiration
+for developing biosbits and its real life uses were presented `at Plumbers
+in 2011 <Plumbers_>`__ and `at Linux.conf.au in 2012 <Linux.conf.au_>`__.
+
+For QEMU, we maintain a fork of bios bits in `gitlab`_, along with all
+the dependent submodules. This fork contains numerous fixes, a newer
+acpica and changes specific to running these functional QEMU tests using
+bits. The author of this document is the current maintainer of the QEMU
+fork of bios bits repository. For more information, please see `the
+author's FOSDEM presentation <FOSDEM_>`__ on this bios-bits based test framework.
+
+.. _Plumbers: https://blog.linuxplumbersconf.org/2011/ocw/system/presentations/867/original/bits.pdf
+.. _Linux.conf.au: https://www.youtube.com/watch?v=36QIepyUuhg
+.. _gitlab: https://gitlab.com/qemu-project/biosbits-bits
+.. _FOSDEM: https://fosdem.org/2024/schedule/event/fosdem-2024-2262-exercising-qemu-generated-acpi-smbios-tables-using-biosbits-from-within-a-guest-vm-/
+
+*********************************
+Description of the test framework
+*********************************
+
+Under the directory ``tests/functional/``, ``test_acpi_bits.py`` is a QEMU
+functional test that drives all this.
+
+A brief description of the various test files follows.
+
+Under ``tests/functional/`` as the root we have:
+
+::
+
+ ā”œā”€ā”€ acpi-bits
+ │ ā”œā”€ā”€ bits-config
+ │ │ └── bits-cfg.txt
+ │ ā”œā”€ā”€ bits-tests
+ │ ā”œā”€ā”€ smbios.py2
+ │ ā”œā”€ā”€ testacpi.py2
+ │ └── testcpuid.py2
+ ā”œā”€ā”€ test_acpi_bits.py
+
+* ``tests/functional``:
+
+ ``test_acpi_bits.py``:
+ This is the main python functional test script that generates a
+ biosbits iso. It then spawns a QEMU VM with it, collects the log and reports
+ test failures. This is the script one would be interested in if they wanted
+ to add or change some component of the log parsing, add a new command line
+ to alter how QEMU is spawned etc. Test writers typically would not need to
+ modify this script unless they wanted to enhance or change the log parsing
+ for their tests. In order to enable debugging, you can set **V=1**
+ environment variable. This enables verbose mode for the test and also dumps
+ the entire log from bios bits and more information in case failure happens.
+ You can also set **BITS_DEBUG=1** to turn on debug mode. It will enable
+ verbose logs and also retain the temporary work directory the test used for
+ you to inspect and run the specific commands manually.
+
+ In order to run this test, please perform the following steps from the QEMU
+ build directory (assuming that the sources are in ".."):
+ ::
+
+ $ export PYTHONPATH=../python:../tests/functional
+ $ export QEMU_TEST_QEMU_BINARY=$PWD/qemu-system-x86_64
+ $ python3 ../tests/functional/test_acpi_bits.py
+
+ The above will run all acpi-bits functional tests (producing output in
+ tap format).
+
+ You can inspect the log files in tests/functional/x86_64/test_acpi_bits.*/
+ for more information about the run or in order to diagnoze issues.
+ If you pass V=1 in the environment, more diagnostic logs will be put into
+ the test log.
+
+* ``tests/functional/acpi-bits/bits-config``:
+
+ This location contains biosbits configuration files that determine how the
+ software runs the tests.
+
+ ``bits-config.txt``:
+ This is the biosbits config file that determines what tests
+ or actions are performed by bits. The description of the config options are
+ provided in the file itself.
+
+* ``tests/functional/acpi-bits/bits-tests``:
+
+ This directory contains biosbits python based tests that are run from within
+ the biosbits environment in the spawned VM. New additions of test cases can
+ be made in the appropriate test file. For example, new acpi tests can go
+ into testacpi.py2 and one would call testsuite.add_test() to register the new
+ test so that it gets executed as a part of the ACPI tests.
+ It might be occasionally necessary to disable some subtests or add a new
+ test that belongs to a test suite not already present in this directory. To
+ do this, please clone the bits source from
+ https://gitlab.com/qemu-project/biosbits-bits/-/tree/qemu-bits.
+ Note that this is the "qemu-bits" branch and not the "bits" branch of the
+ repository. "qemu-bits" is the branch where we have made all the QEMU
+ specific enhancements and we must use the source from this branch only.
+ Copy the test suite/script that needs modification (addition of new tests
+ or disabling them) from python directory into this directory. For
+ example, in order to change cpuid related tests, copy the following
+ file into this directory and rename it with .py2 extension:
+ https://gitlab.com/qemu-project/biosbits-bits/-/blob/qemu-bits/python/testcpuid.py
+ Then make your additions and changes here. Therefore, the steps are:
+
+ (a) Copy unmodified test script to this directory from bits source.
+ (b) Add a SPDX license header.
+ (c) Perform modifications to the test.
+
+ Commits (a), (b) and (c) preferably should go under separate commits so that
+ the original test script and the changes we have made are separated and
+ clear. (a) and (b) can sometimes be combined into a single step.
+
+ The test framework will then use your modified test script to run the test.
+ No further changes would be needed. Please check the logs to make sure that
+ appropriate changes have taken effect.
+
+ The tests have an extension .py2 in order to indicate that:
+
+ (a) They are python2.7 based scripts and not python 3 scripts.
+ (b) They are run from within the bios bits VM and is not subjected to QEMU
+ build/test python script maintenance and dependency resolutions.
+ (c) They need not be loaded by the test framework by accident when running
+ tests.
+
+
+Author: Ani Sinha <anisinha@redhat.com>
+
diff --git a/docs/devel/testing/blkdebug.rst b/docs/devel/testing/blkdebug.rst
new file mode 100644
index 0000000..63887c9
--- /dev/null
+++ b/docs/devel/testing/blkdebug.rst
@@ -0,0 +1,177 @@
+Block I/O error injection using ``blkdebug``
+============================================
+
+..
+ Copyright (C) 2014-2015 Red Hat Inc
+
+ This work is licensed under the terms of the GNU GPL, version 2 or later. See
+ the COPYING file in the top-level directory.
+
+The ``blkdebug`` block driver is a rule-based error injection engine. It can be
+used to exercise error code paths in block drivers including ``ENOSPC`` (out of
+space) and ``EIO``.
+
+This document gives an overview of the features available in ``blkdebug``.
+
+Background
+----------
+Block drivers have many error code paths that handle I/O errors. Image formats
+are especially complex since metadata I/O errors during cluster allocation or
+while updating tables happen halfway through request processing and require
+discipline to keep image files consistent.
+
+Error injection allows test cases to trigger I/O errors at specific points.
+This way, all error paths can be tested to make sure they are correct.
+
+Rules
+-----
+The ``blkdebug`` block driver takes a list of "rules" that tell the error injection
+engine when to fail an I/O request.
+
+Each I/O request is evaluated against the rules. If a rule matches the request
+then its "action" is executed.
+
+Rules can be placed in a configuration file; the configuration file
+follows the same .ini-like format used by QEMU's ``-readconfig`` option, and
+each section of the file represents a rule.
+
+The following configuration file defines a single rule::
+
+ $ cat blkdebug.conf
+ [inject-error]
+ event = "read_aio"
+ errno = "28"
+
+This rule fails all aio read requests with ``ENOSPC`` (28). Note that the errno
+value depends on the host. On Linux, see
+``/usr/include/asm-generic/errno-base.h`` for errno values.
+
+Invoke QEMU as follows::
+
+ $ qemu-system-x86_64
+ -drive if=none,cache=none,file=blkdebug:blkdebug.conf:test.img,id=drive0 \
+ -device virtio-blk-pci,drive=drive0,id=virtio-blk-pci0
+
+Rules support the following attributes:
+
+``event``
+ which type of operation to match (e.g. ``read_aio``, ``write_aio``,
+ ``flush_to_os``, ``flush_to_disk``). See `Events`_ for
+ information on events.
+
+``state``
+ (optional) the engine must be in this state number in order for this
+ rule to match. See `State transitions`_ for information
+ on states.
+
+``errno``
+ the numeric errno value to return when a request matches this rule.
+ The errno values depend on the host since the numeric values are not
+ standardized in the POSIX specification.
+
+``sector``
+ (optional) a sector number that the request must overlap in order to
+ match this rule
+
+``once``
+ (optional, default ``off``) only execute this action on the first
+ matching request
+
+``immediately``
+ (optional, default ``off``) return a NULL ``BlockAIOCB``
+ pointer and fail without an errno instead. This
+ exercises the code path where ``BlockAIOCB`` fails and the
+ caller's ``BlockCompletionFunc`` is not invoked.
+
+Events
+------
+Block drivers provide information about the type of I/O request they are about
+to make so rules can match specific types of requests. For example, the ``qcow2``
+block driver tells ``blkdebug`` when it accesses the L1 table so rules can match
+only L1 table accesses and not other metadata or guest data requests.
+
+The core events are:
+
+``read_aio``
+ guest data read
+
+``write_aio``
+ guest data write
+
+``flush_to_os``
+ write out unwritten block driver state (e.g. cached metadata)
+
+``flush_to_disk``
+ flush the host block device's disk cache
+
+See ``qapi/block-core.json:BlkdebugEvent`` for the full list of events.
+You may need to grep block driver source code to understand the
+meaning of specific events.
+
+State transitions
+-----------------
+There are cases where more power is needed to match a particular I/O request in
+a longer sequence of requests. For example::
+
+ write_aio
+ flush_to_disk
+ write_aio
+
+How do we match the 2nd ``write_aio`` but not the first? This is where state
+transitions come in.
+
+The error injection engine has an integer called the "state" that always starts
+initialized to 1. The state integer is internal to ``blkdebug`` and cannot be
+observed from outside but rules can interact with it for powerful matching
+behavior.
+
+Rules can be conditional on the current state and they can transition to a new
+state.
+
+When a rule's "state" attribute is non-zero then the current state must equal
+the attribute in order for the rule to match.
+
+For example, to match the 2nd write_aio::
+
+ [set-state]
+ event = "write_aio"
+ state = "1"
+ new_state = "2"
+
+ [inject-error]
+ event = "write_aio"
+ state = "2"
+ errno = "5"
+
+The first ``write_aio`` request matches the ``set-state`` rule and transitions from
+state 1 to state 2. Once state 2 has been entered, the ``set-state`` rule no
+longer matches since it requires state 1. But the ``inject-error`` rule now
+matches the next ``write_aio`` request and injects ``EIO`` (5).
+
+State transition rules support the following attributes:
+
+``event``
+ which type of operation to match (e.g. ``read_aio``, ``write_aio``,
+ ``flush_to_os`, ``flush_to_disk``). See `Events`_ for
+ information on events.
+
+``state``
+ (optional) the engine must be in this state number in order for this
+ rule to match
+
+``new_state``
+ transition to this state number
+
+Suspend and resume
+------------------
+Exercising code paths in block drivers may require specific ordering amongst
+concurrent requests. The "breakpoint" feature allows requests to be halted on
+a ``blkdebug`` event and resumed later. This makes it possible to achieve
+deterministic ordering when multiple requests are in flight.
+
+Breakpoints on ``blkdebug`` events are associated with a user-defined ``tag`` string.
+This tag serves as an identifier by which the request can be resumed at a later
+point.
+
+See the ``qemu-io(1)`` ``break``, ``resume``, ``remove_break``, and ``wait_break``
+commands for details.
diff --git a/docs/devel/testing/blkverify.rst b/docs/devel/testing/blkverify.rst
new file mode 100644
index 0000000..2a71778
--- /dev/null
+++ b/docs/devel/testing/blkverify.rst
@@ -0,0 +1,73 @@
+Block driver correctness testing with ``blkverify``
+===================================================
+
+Introduction
+------------
+
+This document describes how to use the ``blkverify`` protocol to test that a block
+driver is operating correctly.
+
+It is difficult to test and debug block drivers against real guests. Often
+processes inside the guest will crash because corrupt sectors were read as part
+of the executable. Other times obscure errors are raised by a program inside
+the guest. These issues are extremely hard to trace back to bugs in the block
+driver.
+
+``blkverify`` solves this problem by catching data corruption inside QEMU the first
+time bad data is read and reporting the disk sector that is corrupted.
+
+How it works
+------------
+
+The ``blkverify`` protocol has two child block devices, the "test" device and the
+"raw" device. Read/write operations are mirrored to both devices so their
+state should always be in sync.
+
+The "raw" device is a raw image, a flat file, that has identical starting
+contents to the "test" image. The idea is that the "raw" device will handle
+read/write operations correctly and not corrupt data. It can be used as a
+reference for comparison against the "test" device.
+
+After a mirrored read operation completes, ``blkverify`` will compare the data and
+raise an error if it is not identical. This makes it possible to catch the
+first instance where corrupt data is read.
+
+Example
+-------
+
+Imagine raw.img has 0xcd repeated throughout its first sector::
+
+ $ ./qemu-io -c 'read -v 0 512' raw.img
+ 00000000: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................
+ 00000010: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................
+ [...]
+ 000001e0: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................
+ 000001f0: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................
+ read 512/512 bytes at offset 0
+ 512.000000 bytes, 1 ops; 0.0000 sec (97.656 MiB/sec and 200000.0000 ops/sec)
+
+And test.img is corrupt, its first sector is zeroed when it shouldn't be::
+
+ $ ./qemu-io -c 'read -v 0 512' test.img
+ 00000000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 00000010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ [...]
+ 000001e0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 000001f0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ read 512/512 bytes at offset 0
+ 512.000000 bytes, 1 ops; 0.0000 sec (81.380 MiB/sec and 166666.6667 ops/sec)
+
+This error is caught by ``blkverify``::
+
+ $ ./qemu-io -c 'read 0 512' blkverify:a.img:b.img
+ blkverify: read sector_num=0 nb_sectors=4 contents mismatch in sector 0
+
+A more realistic scenario is verifying the installation of a guest OS::
+
+ $ ./qemu-img create raw.img 16G
+ $ ./qemu-img create -f qcow2 test.qcow2 16G
+ $ ./qemu-system-x86_64 -cdrom debian.iso \
+ -drive file=blkverify:raw.img:test.qcow2
+
+If the installation is aborted when ``blkverify`` detects corruption, use ``qemu-io``
+to explore the contents of the disk image at the sector in question.
diff --git a/docs/devel/testing/ci-jobs.rst.inc b/docs/devel/testing/ci-jobs.rst.inc
new file mode 100644
index 0000000..f1c541c
--- /dev/null
+++ b/docs/devel/testing/ci-jobs.rst.inc
@@ -0,0 +1,189 @@
+.. _ci_var:
+
+Custom CI/CD variables
+======================
+
+QEMU CI pipelines can be tuned by setting some CI environment variables.
+
+Set variable globally in the user's CI namespace
+------------------------------------------------
+
+Variables can be set globally in the user's CI namespace setting.
+
+For further information about how to set these variables, please refer to::
+
+ https://docs.gitlab.com/ee/ci/variables/#add-a-cicd-variable-to-a-project
+
+Set variable manually when pushing a branch or tag to the user's repository
+---------------------------------------------------------------------------
+
+Variables can be set manually when pushing a branch or tag, using
+git-push command line arguments.
+
+Example setting the QEMU_CI_EXAMPLE_VAR variable:
+
+.. code::
+
+ git push -o ci.variable="QEMU_CI_EXAMPLE_VAR=value" myrepo mybranch
+
+For further information about how to set these variables, please refer to::
+
+ https://docs.gitlab.com/ee/user/project/push_options.html#push-options-for-gitlab-cicd
+
+Setting aliases in your git config
+----------------------------------
+
+You can use aliases to make it easier to push branches with different
+CI configurations. For example define an alias for triggering CI:
+
+.. code::
+
+ git config --local alias.push-ci "push -o ci.variable=QEMU_CI=1"
+ git config --local alias.push-ci-now "push -o ci.variable=QEMU_CI=2"
+
+Which lets you run:
+
+.. code::
+
+ git push-ci
+
+to create the pipeline, or:
+
+.. code::
+
+ git push-ci-now
+
+to create and run the pipeline
+
+
+Variable naming and grouping
+----------------------------
+
+The variables used by QEMU's CI configuration are grouped together
+in a handful of namespaces
+
+ * QEMU_JOB_nnnn - variables to be defined in individual jobs
+ or templates, to influence the shared rules defined in the
+ .base_job_template.
+
+ * QEMU_CI_nnn - variables to be set by contributors in their
+ repository CI settings, or as git push variables, to influence
+ which jobs get run in a pipeline
+
+ * QEMU_CI_CONTAINER_TAG - the tag used to publish containers
+ in stage 1, for use by build jobs in stage 2. Defaults to
+ 'latest', but if running pipelines for different branches
+ concurrently, it should be overridden per pipeline.
+
+ * QEMU_CI_UPSTREAM - gitlab namespace that is considered to be
+ the 'upstream'. This defaults to 'qemu-project'. Contributors
+ may choose to override this if they are modifying rules in
+ base.yml and need to validate how they will operate when in
+ an upstream context, as opposed to their fork context.
+
+ * nnn - other misc variables not falling into the above
+ categories, or using different names for historical reasons
+ and not yet converted.
+
+Maintainer controlled job variables
+-----------------------------------
+
+The following variables may be set when defining a job in the
+CI configuration file.
+
+QEMU_JOB_CIRRUS
+~~~~~~~~~~~~~~~
+
+The job makes use of Cirrus CI infrastructure, requiring the
+configuration setup for cirrus-run to be present in the repository
+
+QEMU_JOB_OPTIONAL
+~~~~~~~~~~~~~~~~~
+
+The job is expected to be successful in general, but is not run
+by default due to need to conserve limited CI resources. It is
+available to be started manually by the contributor in the CI
+pipelines UI.
+
+QEMU_JOB_ONLY_FORKS
+~~~~~~~~~~~~~~~~~~~
+
+The job results are only of interest to contributors prior to
+submitting code. They are not required as part of the gating
+CI pipeline.
+
+QEMU_JOB_SKIPPED
+~~~~~~~~~~~~~~~~
+
+The job is not reliably successful in general, so is not
+currently suitable to be run by default. Ideally this should
+be a temporary marker until the problems can be addressed, or
+the job permanently removed.
+
+QEMU_JOB_PUBLISH
+~~~~~~~~~~~~~~~~
+
+The job is for publishing content after a branch has been
+merged into the upstream default branch.
+
+QEMU_JOB_FUNCTIONAL
+~~~~~~~~~~~~~~~~~~~
+
+The job runs the functional test suite
+
+Contributor controlled runtime variables
+----------------------------------------
+
+The following variables may be set by contributors to control
+job execution
+
+QEMU_CI
+~~~~~~~
+
+By default, no pipelines will be created on contributor forks
+in order to preserve CI credits
+
+Set this variable to 1 to create the pipelines, but leave all
+the jobs to be manually started from the UI
+
+Set this variable to 2 to create the pipelines and run all
+the jobs immediately, as was the historical behaviour
+
+QEMU_CI_FUNCTIONAL
+~~~~~~~~~~~~~~~~~~
+By default, tests using the functional framework are not run automatically
+in the pipelines (because multiple artifacts have to be downloaded, which
+might cause a lot of network traffic). Set this variable to have the tests
+using the functional framework run automatically.
+
+Other misc variables
+--------------------
+
+These variables are primarily to control execution of jobs on
+private runners
+
+AARCH64_RUNNER_AVAILABLE
+~~~~~~~~~~~~~~~~~~~~~~~~
+If you've got access to an aarch64 host that can be used as a gitlab-CI
+runner, you can set this variable to enable the tests that require this
+kind of host. The runner should be tagged with "aarch64".
+
+AARCH32_RUNNER_AVAILABLE
+~~~~~~~~~~~~~~~~~~~~~~~~
+If you've got access to an armhf host or an arch64 host that can run
+aarch32 EL0 code to be used as a gitlab-CI runner, you can set this
+variable to enable the tests that require this kind of host. The
+runner should be tagged with "aarch32".
+
+S390X_RUNNER_AVAILABLE
+~~~~~~~~~~~~~~~~~~~~~~
+If you've got access to an IBM Z host that can be used as a gitlab-CI
+runner, you can set this variable to enable the tests that require this
+kind of host. The runner should be tagged with "s390x".
+
+CCACHE_DISABLE
+~~~~~~~~~~~~~~
+The jobs are configured to use "ccache" by default since this typically
+reduces compilation time, at the cost of increased storage. If the
+use of "ccache" is suspected to be hurting the overall job execution
+time, setting the "CCACHE_DISABLE=1" env variable to disable it.
diff --git a/docs/devel/ci-runners.rst.inc b/docs/devel/testing/ci-runners.rst.inc
index 67b23d3..67b23d3 100644
--- a/docs/devel/ci-runners.rst.inc
+++ b/docs/devel/testing/ci-runners.rst.inc
diff --git a/docs/devel/testing/ci.rst b/docs/devel/testing/ci.rst
new file mode 100644
index 0000000..e21d39d
--- /dev/null
+++ b/docs/devel/testing/ci.rst
@@ -0,0 +1,34 @@
+.. _ci:
+
+Continuous Integration (CI)
+===========================
+
+Continuous integration (CI) requires the builds of the entire application and
+the execution of a comprehensive set of automated tests every time there is a
+need to commit any set of changes [1]_. The automated tests are composed
+of unit, functional and other tests.
+
+Most of QEMU's CI is run on GitLab's infrastructure although a number
+of other CI services are used for specialised purposes. The most up to
+date information about them and their status can be found on the
+`project wiki testing page <https://wiki.qemu.org/Testing/CI>`_.
+
+These tests are also used as gating tests before merging pull requests.
+A gating test restricts the move of code from one stage to another on a
+test/deployment pipeline. The step move is granted with approval. The approval
+can be a manual intervention or a set of tests succeeding [2]_.
+
+On QEMU, the gating process happens during the pull request. The approval is
+done by the project leader running its own set of tests. The pull request gets
+merged when the tests succeed.
+
+.. include:: ci-jobs.rst.inc
+.. include:: ci-runners.rst.inc
+
+References
+----------
+
+.. [1] Humble, Jez & Farley, David (2010). Continuous Delivery:
+ Reliable Software Releases Through Build, Test, and Deployment, p. 55.
+.. [2] Humble, Jez & Farley, David (2010). Continuous Delivery:
+ Reliable Software Releases Through Build, Test, and Deployment, p. 122.
diff --git a/docs/devel/testing/functional.rst b/docs/devel/testing/functional.rst
new file mode 100644
index 0000000..9e56dd1
--- /dev/null
+++ b/docs/devel/testing/functional.rst
@@ -0,0 +1,380 @@
+.. _checkfunctional-ref:
+
+Functional testing with Python
+==============================
+
+The ``tests/functional`` directory hosts functional tests written in
+Python. They are usually higher level tests, and may interact with
+external resources and with various guest operating systems.
+
+The tests should be written in the style of the Python `unittest`_ framework,
+using stdio for the TAP protocol. The folder ``tests/functional/qemu_test``
+provides classes (e.g. the ``QemuBaseTest``, ``QemuUserTest`` and the
+``QemuSystemTest`` classes) and utility functions that help to get your test
+into the right shape, e.g. by replacing the 'stdout' python object to redirect
+the normal output of your test to stderr instead.
+
+Note that if you don't use one of the QemuBaseTest based classes for your
+test, or if you spawn subprocesses from your test, you have to make sure
+that there is no TAP-incompatible output written to stdio, e.g. either by
+prefixing every line with a "# " to mark the output as a TAP comment, or
+e.g. by capturing the stdout output of subprocesses (redirecting it to
+stderr is OK).
+
+Tests based on ``qemu_test.QemuSystemTest`` can easily:
+
+ * Customize the command line arguments given to the convenience
+ ``self.vm`` attribute (a QEMUMachine instance)
+
+ * Interact with the QEMU monitor, send QMP commands and check
+ their results
+
+ * Interact with the guest OS, using the convenience console device
+ (which may be useful to assert the effectiveness and correctness of
+ command line arguments or QMP commands)
+
+ * Download (and cache) remote data files, such as firmware and kernel
+ images
+
+Running tests
+-------------
+
+You can run the functional tests simply by executing:
+
+.. code::
+
+ make check-functional
+
+It is also possible to run tests for a certain target only, for example
+the following line will only run the tests for the x86_64 target:
+
+.. code::
+
+ make check-functional-x86_64
+
+To run a single test file without the meson test runner, you can also
+execute the file directly by specifying two environment variables first,
+the PYTHONPATH that has to include the python folder and the tests/functional
+folder of the source tree, and QEMU_TEST_QEMU_BINARY that has to point
+to the QEMU binary that should be used for the test. The current working
+directory should be your build folder. For example::
+
+ $ export PYTHONPATH=../python:../tests/functional
+ $ export QEMU_TEST_QEMU_BINARY=$PWD/qemu-system-x86_64
+ $ pyvenv/bin/python3 ../tests/functional/test_file.py
+
+The test framework will automatically purge any scratch files created during
+the tests. If needing to debug a failed test, it is possible to keep these
+files around on disk by setting ```QEMU_TEST_KEEP_SCRATCH=1``` as an env
+variable. Any preserved files will be deleted the next time the test is run
+without this variable set.
+
+Logging
+-------
+
+The framework collects log files for each test in the build directory
+in the following subfolder::
+
+ <builddir>/tests/functional/<arch>/<fileid>.<classid>.<testname>/
+
+There are usually three log files:
+
+* ``base.log`` contains the generic logging information that is written
+ by the calls to the logging functions in the test code (e.g. by calling
+ the ``self.log.info()`` or ``self.log.debug()`` functions).
+* ``console.log`` contains the output of the serial console of the guest.
+* ``default.log`` contains the output of QEMU. This file could be named
+ differently if the test chooses to use a different identifier for
+ the guest VM (e.g. when the test spins up multiple VMs).
+
+Introduction to writing tests
+-----------------------------
+
+The ``tests/functional/qemu_test`` directory provides the ``qemu_test``
+Python module, containing the ``qemu_test.QemuSystemTest`` class.
+Here is a simple usage example:
+
+.. code::
+
+ #!/usr/bin/env python3
+
+ from qemu_test import QemuSystemTest
+
+ class Version(QemuSystemTest):
+
+ def test_qmp_human_info_version(self):
+ self.vm.launch()
+ res = self.vm.cmd('human-monitor-command',
+ command_line='info version')
+ self.assertRegex(res, r'^(\d+\.\d+\.\d)')
+
+ if __name__ == '__main__':
+ QemuSystemTest.main()
+
+By providing the "hash bang" line at the beginning of the script, marking
+the file as executable and by calling into QemuSystemTest.main(), the test
+can also be run stand-alone, without a test runner. OTOH when run via a test
+runner, the QemuSystemTest.main() function takes care of running the test
+functions in the right fassion (e.g. with TAP output that is required by the
+meson test runner).
+
+The ``qemu_test.QemuSystemTest`` base test class
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``qemu_test.QemuSystemTest`` class has a number of characteristics
+that are worth being mentioned.
+
+First of all, it attempts to give each test a ready to use QEMUMachine
+instance, available at ``self.vm``. Because many tests will tweak the
+QEMU command line, launching the QEMUMachine (by using ``self.vm.launch()``)
+is left to the test writer.
+
+The base test class has also support for tests with more than one
+QEMUMachine. The way to get machines is through the ``self.get_vm()``
+method which will return a QEMUMachine instance. The ``self.get_vm()``
+method accepts arguments that will be passed to the QEMUMachine creation
+and also an optional ``name`` attribute so you can identify a specific
+machine and get it more than once through the tests methods. A simple
+and hypothetical example follows:
+
+.. code::
+
+ from qemu_test import QemuSystemTest
+
+ class MultipleMachines(QemuSystemTest):
+ def test_multiple_machines(self):
+ first_machine = self.get_vm()
+ second_machine = self.get_vm()
+ self.get_vm(name='third_machine').launch()
+
+ first_machine.launch()
+ second_machine.launch()
+
+ first_res = first_machine.cmd(
+ 'human-monitor-command',
+ command_line='info version')
+
+ second_res = second_machine.cmd(
+ 'human-monitor-command',
+ command_line='info version')
+
+ third_res = self.get_vm(name='third_machine').cmd(
+ 'human-monitor-command',
+ command_line='info version')
+
+ self.assertEqual(first_res, second_res, third_res)
+
+At test "tear down", ``qemu_test.QemuSystemTest`` handles all the QEMUMachines
+shutdown.
+
+QEMUMachine
+-----------
+
+The QEMUMachine API is already widely used in the Python iotests,
+device-crash-test and other Python scripts. It's a wrapper around the
+execution of a QEMU binary, giving its users:
+
+ * the ability to set command line arguments to be given to the QEMU
+ binary
+
+ * a ready to use QMP connection and interface, which can be used to
+ send commands and inspect its results, as well as asynchronous
+ events
+
+ * convenience methods to set commonly used command line arguments in
+ a more succinct and intuitive way
+
+QEMU binary selection
+^^^^^^^^^^^^^^^^^^^^^
+
+The QEMU binary used for the ``self.vm`` QEMUMachine instance will
+primarily depend on the value of the ``qemu_bin`` instance attribute.
+If it is not explicitly set by the test code, its default value will
+be the result the QEMU_TEST_QEMU_BINARY environment variable.
+
+Debugging hung QEMU
+^^^^^^^^^^^^^^^^^^^
+
+When test cases go wrong it may be helpful to debug a stalled QEMU
+process. While the QEMUMachine class owns the primary QMP monitor
+socket, it is possible to request a second QMP monitor be created
+by setting the ``QEMU_TEST_QMP_BACKDOOR`` env variable to refer
+to a UNIX socket name. The ``qmp-shell`` command can then be
+attached to the stalled QEMU to examine its live state.
+
+Attribute reference
+-------------------
+
+QemuBaseTest
+^^^^^^^^^^^^
+
+The following attributes are available on any ``qemu_test.QemuBaseTest``
+instance.
+
+arch
+""""
+
+The target architecture of the QEMU binary.
+
+Tests are also free to use this attribute value, for their own needs.
+A test may, for instance, use this value when selecting the architecture
+of a kernel or disk image to boot a VM with.
+
+qemu_bin
+""""""""
+
+The preserved value of the ``QEMU_TEST_QEMU_BINARY`` environment
+variable.
+
+QemuUserTest
+^^^^^^^^^^^^
+
+The QemuUserTest class can be used for running an executable via the
+usermode emulation binaries.
+
+QemuSystemTest
+^^^^^^^^^^^^^^
+
+The QemuSystemTest class can be used for running tests via one of the
+qemu-system-* binaries.
+
+vm
+""
+
+A QEMUMachine instance, initially configured according to the given
+``qemu_bin`` parameter.
+
+cpu
+"""
+
+The cpu model that will be set to all QEMUMachine instances created
+by the test.
+
+machine
+"""""""
+
+The machine type that will be set to all QEMUMachine instances created
+by the test. By using the set_machine() function of the QemuSystemTest
+class to set this attribute, you can automatically check whether the
+machine is available to skip the test in case it is not built into the
+QEMU binary.
+
+Asset handling
+--------------
+
+Many functional tests download assets (e.g. Linux kernels, initrds,
+firmware images, etc.) from the internet to be able to run tests with
+them. This imposes additional challenges to the test framework.
+
+First there is the problem that some people might not have an
+unconstrained internet connection, so such tests should not be run by
+default when running ``make check``. To accomplish this situation,
+the tests that download files should only be added to the "thorough"
+speed mode in the meson.build file, while the "quick" speed mode is
+fine for functional tests that can be run without downloading files.
+``make check`` then only runs the quick functional tests along with
+the other quick tests from the other test suites. If you choose to
+run only ``make check-functional``, the "thorough" tests will be
+executed, too. And to run all functional tests along with the others,
+you can use something like::
+
+ make -j$(nproc) check SPEED=thorough
+
+The second problem with downloading files from the internet are time
+constraints. The time for downloading files should not be taken into
+account when the test is running and the timeout of the test is ticking
+(since downloading can be very slow, depending on the network bandwidth).
+This problem is solved by downloading the assets ahead of time, before
+the tests are run. This pre-caching is done with the qemu_test.Asset
+class. To use it in your test, declare an asset in your test class with
+its URL and SHA256 checksum like this::
+
+ from qemu_test import Asset
+
+ ASSET_somename = Asset(
+ ('https://www.qemu.org/assets/images/qemu_head_200.png'),
+ '34b74cad46ea28a2966c1d04e102510daf1fd73e6582b6b74523940d5da029dd')
+
+In your test function, you can then get the file name of the cached
+asset like this::
+
+ def test_function(self):
+ file_path = self.ASSET_somename.fetch()
+
+The pre-caching will be done automatically when running
+``make check-functional`` (but not when running e.g.
+``make check-functional-<target>``). In case you just want to download
+the assets without running the tests, you can do so by running::
+
+ make precache-functional
+
+The cache is populated in the ``~/.cache/qemu/download`` directory by
+default, but the location can be changed by setting the
+``QEMU_TEST_CACHE_DIR`` environment variable.
+
+Skipping tests
+--------------
+
+Since the test framework is based on the common Python unittest framework,
+you can use the usual Python decorators which allow for easily skipping
+tests running under certain conditions, for example, on the lack of a binary
+on the test system or when the running environment is a CI system. For further
+information about those decorators, please refer to:
+
+ https://docs.python.org/3/library/unittest.html#skipping-tests-and-expected-failures
+
+While the conditions for skipping tests are often specifics of each one, there
+are recurring scenarios identified by the QEMU developers and the use of
+environment variables became a kind of standard way to enable/disable tests.
+
+Here is a list of the most used variables:
+
+QEMU_TEST_ALLOW_LARGE_STORAGE
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Tests which are going to fetch or produce assets considered *large* are not
+going to run unless that ``QEMU_TEST_ALLOW_LARGE_STORAGE=1`` is exported on
+the environment.
+
+The definition of *large* is a bit arbitrary here, but it usually means an
+asset which occupies at least 1GB of size on disk when uncompressed.
+
+QEMU_TEST_ALLOW_UNTRUSTED_CODE
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+There are tests which will boot a kernel image or firmware that can be
+considered not safe to run on the developer's workstation, thus they are
+skipped by default. The definition of *not safe* is also arbitrary but
+usually it means a blob which either its source or build process aren't
+public available.
+
+You should export ``QEMU_TEST_ALLOW_UNTRUSTED_CODE=1`` on the environment in
+order to allow tests which make use of those kind of assets.
+
+QEMU_TEST_FLAKY_TESTS
+^^^^^^^^^^^^^^^^^^^^^
+Some tests are not working reliably and thus are disabled by default.
+This includes tests that don't run reliably on GitLab's CI which
+usually expose real issues that are rarely seen on developer machines
+due to the constraints of the CI environment. If you encounter a
+similar situation then raise a bug and then mark the test as shown on
+the code snippet below:
+
+.. code::
+
+ # See https://gitlab.com/qemu-project/qemu/-/issues/nnnn
+ @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
+ def test(self):
+ do_something()
+
+Tests should not live in this state forever and should either be fixed
+or eventually removed.
+
+QEMU_TEST_ALLOW_SLOW
+^^^^^^^^^^^^^^^^^^^^
+Tests that have a very long runtime and might run into timeout issues
+e.g. if the QEMU binary has been compiled with debugging options enabled.
+To avoid these timeout issues by default and to save some precious CPU
+cycles during normal testing, such tests are disabled by default unless
+the QEMU_TEST_ALLOW_SLOW environment variable has been set.
+
+
+.. _unittest: https://docs.python.org/3/library/unittest.html
diff --git a/docs/devel/testing/fuzzing.rst b/docs/devel/testing/fuzzing.rst
new file mode 100644
index 0000000..c3ac084
--- /dev/null
+++ b/docs/devel/testing/fuzzing.rst
@@ -0,0 +1,305 @@
+========
+Fuzzing
+========
+
+This document describes the virtual-device fuzzing infrastructure in QEMU and
+how to use it to implement additional fuzzers.
+
+Basics
+------
+
+Fuzzing operates by passing inputs to an entry point/target function. The
+fuzzer tracks the code coverage triggered by the input. Based on these
+findings, the fuzzer mutates the input and repeats the fuzzing.
+
+To fuzz QEMU, we rely on libfuzzer. Unlike other fuzzers such as AFL, libfuzzer
+is an *in-process* fuzzer. For the developer, this means that it is their
+responsibility to ensure that state is reset between fuzzing-runs.
+
+Building the fuzzers
+--------------------
+
+To build the fuzzers, install a recent version of clang:
+Configure with (substitute the clang binaries with the version you installed).
+Here, enable-asan and enable-ubsan are optional but they allow us to reliably
+detect bugs such as out-of-bounds accesses, uses-after-free, double-frees
+etc.::
+
+ CC=clang-8 CXX=clang++-8 /path/to/configure \
+ --enable-fuzzing --enable-asan --enable-ubsan
+
+Fuzz targets are built similarly to system targets::
+
+ make qemu-fuzz-i386
+
+This builds ``./qemu-fuzz-i386``
+
+The first option to this command is: ``--fuzz-target=FUZZ_NAME``
+To list all of the available fuzzers run ``qemu-fuzz-i386`` with no arguments.
+
+For example::
+
+ ./qemu-fuzz-i386 --fuzz-target=virtio-scsi-fuzz
+
+Internally, libfuzzer parses all arguments that do not begin with ``"--"``.
+Information about these is available by passing ``-help=1``
+
+Now the only thing left to do is wait for the fuzzer to trigger potential
+crashes.
+
+Useful libFuzzer flags
+----------------------
+
+As mentioned above, libFuzzer accepts some arguments. Passing ``-help=1`` will
+list the available arguments. In particular, these arguments might be helpful:
+
+* ``CORPUS_DIR/`` : Specify a directory as the last argument to libFuzzer.
+ libFuzzer stores each "interesting" input in this corpus directory. The next
+ time you run libFuzzer, it will read all of the inputs from the corpus, and
+ continue fuzzing from there. You can also specify multiple directories.
+ libFuzzer loads existing inputs from all specified directories, but will only
+ write new ones to the first one specified.
+
+* ``-max_len=4096`` : specify the maximum byte-length of the inputs libFuzzer
+ will generate.
+
+* ``-close_fd_mask={1,2,3}`` : close, stderr, or both. Useful for targets that
+ trigger many debug/error messages, or create output on the serial console.
+
+* ``-jobs=4 -workers=4`` : These arguments configure libFuzzer to run 4 fuzzers in
+ parallel (4 fuzzing jobs in 4 worker processes). Alternatively, with only
+ ``-jobs=N``, libFuzzer automatically spawns a number of workers less than or equal
+ to half the available CPU cores. Replace 4 with a number appropriate for your
+ machine. Make sure to specify a ``CORPUS_DIR``, which will allow the parallel
+ fuzzers to share information about the interesting inputs they find.
+
+* ``-use_value_profile=1`` : For each comparison operation, libFuzzer computes
+ ``(caller_pc&4095) | (popcnt(Arg1 ^ Arg2) << 12)`` and places this in the
+ coverage table. Useful for targets with "magic" constants. If Arg1 came from
+ the fuzzer's input and Arg2 is a magic constant, then each time the Hamming
+ distance between Arg1 and Arg2 decreases, libFuzzer adds the input to the
+ corpus.
+
+* ``-shrink=1`` : Tries to make elements of the corpus "smaller". Might lead to
+ better coverage performance, depending on the target.
+
+Note that libFuzzer's exact behavior will depend on the version of
+clang and libFuzzer used to build the device fuzzers.
+
+Generating Coverage Reports
+---------------------------
+
+Code coverage is a crucial metric for evaluating a fuzzer's performance.
+libFuzzer's output provides a "cov: " column that provides a total number of
+unique blocks/edges covered. To examine coverage on a line-by-line basis we
+can use Clang coverage:
+
+ 1. Configure libFuzzer to store a corpus of all interesting inputs (see
+ CORPUS_DIR above)
+ 2. ``./configure`` the QEMU build with ::
+
+ --enable-fuzzing \
+ --extra-cflags="-fprofile-instr-generate -fcoverage-mapping"
+
+ 3. Re-run the fuzzer. Specify $CORPUS_DIR/* as an argument, telling libfuzzer
+ to execute all of the inputs in $CORPUS_DIR and exit. Once the process
+ exits, you should find a file, "default.profraw" in the working directory.
+ 4. Execute these commands to generate a detailed HTML coverage-report::
+
+ llvm-profdata merge -output=default.profdata default.profraw
+ llvm-cov show ./path/to/qemu-fuzz-i386 -instr-profile=default.profdata \
+ --format html -output-dir=/path/to/output/report
+
+Adding a new fuzzer
+-------------------
+
+Coverage over virtual devices can be improved by adding additional fuzzers.
+Fuzzers are kept in ``tests/qtest/fuzz/`` and should be added to
+``tests/qtest/fuzz/meson.build``
+
+Fuzzers can rely on both qtest and libqos to communicate with virtual devices.
+
+1. Create a new source file. For example ``tests/qtest/fuzz/foo-device-fuzz.c``.
+
+2. Write the fuzzing code using the libqtest/libqos API. See existing fuzzers
+ for reference.
+
+3. Add the fuzzer to ``tests/qtest/fuzz/meson.build``.
+
+Fuzzers can be more-or-less thought of as special qtest programs which can
+modify the qtest commands and/or qtest command arguments based on inputs
+provided by libfuzzer. Libfuzzer passes a byte array and length. Commonly the
+fuzzer loops over the byte-array interpreting it as a list of qtest commands,
+addresses, or values.
+
+The Generic Fuzzer
+------------------
+
+Writing a fuzz target can be a lot of effort (especially if a device driver has
+not be built-out within libqos). Many devices can be fuzzed to some degree,
+without any device-specific code, using the generic-fuzz target.
+
+The generic-fuzz target is capable of fuzzing devices over their PIO, MMIO,
+and DMA input-spaces. To apply the generic-fuzz to a device, we need to define
+two env-variables, at minimum:
+
+* ``QEMU_FUZZ_ARGS=`` is the set of QEMU arguments used to configure a machine, with
+ the device attached. For example, if we want to fuzz the virtio-net device
+ attached to a pc-i440fx machine, we can specify::
+
+ QEMU_FUZZ_ARGS="-M pc -nodefaults -netdev user,id=user0 \
+ -device virtio-net,netdev=user0"
+
+* ``QEMU_FUZZ_OBJECTS=`` is a set of space-delimited strings used to identify
+ the MemoryRegions that will be fuzzed. These strings are compared against
+ MemoryRegion names and MemoryRegion owner names, to decide whether each
+ MemoryRegion should be fuzzed. These strings support globbing. For the
+ virtio-net example, we could use one of ::
+
+ QEMU_FUZZ_OBJECTS='virtio-net'
+ QEMU_FUZZ_OBJECTS='virtio*'
+ QEMU_FUZZ_OBJECTS='virtio* pcspk' # Fuzz the virtio devices and the speaker
+ QEMU_FUZZ_OBJECTS='*' # Fuzz the whole machine``
+
+The ``"info mtree"`` and ``"info qom-tree"`` monitor commands can be especially
+useful for identifying the ``MemoryRegion`` and ``Object`` names used for
+matching.
+
+As a generic rule-of-thumb, the more ``MemoryRegions``/Devices we match, the
+greater the input-space, and the smaller the probability of finding crashing
+inputs for individual devices. As such, it is usually a good idea to limit the
+fuzzer to only a few ``MemoryRegions``.
+
+To ensure that these env variables have been configured correctly, we can use::
+
+ ./qemu-fuzz-i386 --fuzz-target=generic-fuzz -runs=0
+
+The output should contain a complete list of matched MemoryRegions.
+
+OSS-Fuzz
+--------
+QEMU is continuously fuzzed on `OSS-Fuzz
+<https://github.com/google/oss-fuzz>`_. By default, the OSS-Fuzz build
+will try to fuzz every fuzz-target. Since the generic-fuzz target
+requires additional information provided in environment variables, we
+pre-define some generic-fuzz configs in
+``tests/qtest/fuzz/generic_fuzz_configs.h``. Each config must specify:
+
+- ``.name``: To identify the fuzzer config
+
+- ``.args`` OR ``.argfunc``: A string or pointer to a function returning a
+ string. These strings are used to specify the ``QEMU_FUZZ_ARGS``
+ environment variable. ``argfunc`` is useful when the config relies on e.g.
+ a dynamically created temp directory, or a free tcp/udp port.
+
+- ``.objects``: A string that specifies the ``QEMU_FUZZ_OBJECTS`` environment
+ variable.
+
+To fuzz additional devices/device configuration on OSS-Fuzz, send patches for
+either a new device-specific fuzzer or a new generic-fuzz config.
+
+Build details:
+
+- The Dockerfile that sets up the environment for building QEMU's
+ fuzzers on OSS-Fuzz can be fund in the OSS-Fuzz repository
+ __(https://github.com/google/oss-fuzz/blob/master/projects/qemu/Dockerfile)
+
+- The script responsible for building the fuzzers can be found in the
+ QEMU source tree at ``scripts/oss-fuzz/build.sh``
+
+Building Crash Reproducers
+-----------------------------------------
+When we find a crash, we should try to create an independent reproducer, that
+can be used on a non-fuzzer build of QEMU. This filters out any potential
+false-positives, and improves the debugging experience for developers.
+Here are the steps for building a reproducer for a crash found by the
+generic-fuzz target.
+
+- Ensure the crash reproduces::
+
+ qemu-fuzz-i386 --fuzz-target... ./crash-...
+
+- Gather the QTest output for the crash::
+
+ QEMU_FUZZ_TIMEOUT=0 QTEST_LOG=1 FUZZ_SERIALIZE_QTEST=1 \
+ qemu-fuzz-i386 --fuzz-target... ./crash-... &> /tmp/trace
+
+- Reorder and clean-up the resulting trace::
+
+ scripts/oss-fuzz/reorder_fuzzer_qtest_trace.py /tmp/trace > /tmp/reproducer
+
+- Get the arguments needed to start qemu, and provide a path to qemu::
+
+ less /tmp/trace # The args should be logged at the top of this file
+ export QEMU_ARGS="-machine ..."
+ export QEMU_PATH="path/to/qemu-system"
+
+- Ensure the crash reproduces in qemu-system::
+
+ $QEMU_PATH $QEMU_ARGS -qtest stdio < /tmp/reproducer
+
+- From the crash output, obtain some string that identifies the crash. This
+ can be a line in the stack-trace, for example::
+
+ export CRASH_TOKEN="hw/usb/hcd-xhci.c:1865"
+
+- Minimize the reproducer::
+
+ scripts/oss-fuzz/minimize_qtest_trace.py -M1 -M2 \
+ /tmp/reproducer /tmp/reproducer-minimized
+
+- Confirm that the minimized reproducer still crashes::
+
+ $QEMU_PATH $QEMU_ARGS -qtest stdio < /tmp/reproducer-minimized
+
+- Create a one-liner reproducer that can be sent over email::
+
+ ./scripts/oss-fuzz/output_reproducer.py -bash /tmp/reproducer-minimized
+
+- Output the C source code for a test case that will reproduce the bug::
+
+ ./scripts/oss-fuzz/output_reproducer.py -owner "John Smith <john@smith.com>"\
+ -name "test_function_name" /tmp/reproducer-minimized
+
+- Report the bug and send a patch with the C reproducer upstream
+
+Implementation Details / Fuzzer Lifecycle
+-----------------------------------------
+
+The fuzzer has two entrypoints that libfuzzer calls. libfuzzer provides it's
+own ``main()``, which performs some setup, and calls the entrypoints:
+
+``LLVMFuzzerInitialize``: called prior to fuzzing. Used to initialize all of the
+necessary state
+
+``LLVMFuzzerTestOneInput``: called for each fuzzing run. Processes the input and
+resets the state at the end of each run.
+
+In more detail:
+
+``LLVMFuzzerInitialize`` parses the arguments to the fuzzer (must start with two
+dashes, so they are ignored by libfuzzer ``main()``). Currently, the arguments
+select the fuzz target. Then, the qtest client is initialized. If the target
+requires qos, qgraph is set up and the QOM/LIBQOS modules are initialized.
+Then the QGraph is walked and the QEMU cmd_line is determined and saved.
+
+After this, the ``vl.c:main`` is called to set up the guest. There are
+target-specific hooks that can be called before and after main, for
+additional setup(e.g. PCI setup, or VM snapshotting).
+
+``LLVMFuzzerTestOneInput``: Uses qtest/qos functions to act based on the fuzz
+input. It is also responsible for manually calling ``main_loop_wait`` to ensure
+that bottom halves are executed and any cleanup required before the next input.
+
+Since the same process is reused for many fuzzing runs, QEMU state needs to
+be reset at the end of each run. For example, this can be done by rebooting the
+VM, after each run.
+
+ - *Pros*: Straightforward and fast for simple fuzz targets.
+
+ - *Cons*: Depending on the device, does not reset all device state. If the
+ device requires some initialization prior to being ready for fuzzing (common
+ for QOS-based targets), this initialization needs to be done after each
+ reboot.
+
+ - *Example target*: ``i440fx-qtest-reboot-fuzz``
diff --git a/docs/devel/testing/index.rst b/docs/devel/testing/index.rst
new file mode 100644
index 0000000..ccc2fc6
--- /dev/null
+++ b/docs/devel/testing/index.rst
@@ -0,0 +1,17 @@
+Testing QEMU
+------------
+
+Details about how to test QEMU and how it is integrated into our CI
+testing infrastructure.
+
+.. toctree::
+ :maxdepth: 3
+
+ main
+ qtest
+ functional
+ acpi-bits
+ ci
+ fuzzing
+ blkdebug
+ blkverify
diff --git a/docs/devel/testing/main.rst b/docs/devel/testing/main.rst
new file mode 100644
index 0000000..6b18ed8
--- /dev/null
+++ b/docs/devel/testing/main.rst
@@ -0,0 +1,1059 @@
+.. _testing:
+
+Testing in QEMU
+===============
+
+QEMU's testing infrastructure is fairly complex as it covers
+everything from unit testing and exercising specific sub-systems all
+the way to full blown functional tests. To get an overview of the
+tests you can run ``make check-help`` from either the source or build
+tree.
+
+Most (but not all) tests are also integrated as an automated test into
+the meson build system so can be run directly from the build tree,
+for example::
+
+ [./pyvenv/bin/]meson test --suite qemu:softfloat
+
+will run just the softfloat tests.
+
+An automated test is written with one of the test frameworks using its
+generic test functions/classes. The test framework can run the tests and
+report their success or failure [1]_.
+
+An automated test has essentially three parts:
+
+1. The test initialization of the parameters, where the expected parameters,
+ like inputs and expected results, are set up;
+2. The call to the code that should be tested;
+3. An assertion, comparing the result from the previous call with the expected
+ result set during the initialization of the parameters. If the result
+ matches the expected result, the test has been successful; otherwise, it has
+ failed.
+
+The rest of this document will cover the details for specific test
+groups.
+
+Testing with "make check"
+-------------------------
+
+The "make check" testing family includes most of the C based tests in QEMU.
+
+The usual way to run these tests is:
+
+.. code::
+
+ make check
+
+which includes QAPI schema tests, unit tests, QTests and some iotests.
+Different sub-types of "make check" tests will be explained below.
+
+Before running tests, it is best to build QEMU programs first. Some tests
+expect the executables to exist and will fail with obscure messages if they
+cannot find them.
+
+.. _unit-tests:
+
+Unit tests
+~~~~~~~~~~
+
+A unit test is responsible for exercising individual software components as a
+unit, like interfaces, data structures, and functionality, uncovering errors
+within the boundaries of a component. The verification effort is in the
+smallest software unit and focuses on the internal processing logic and data
+structures. A test case of unit tests should be designed to uncover errors
+due to erroneous computations, incorrect comparisons, or improper control
+flow [2]_.
+
+In QEMU, unit tests can be invoked with ``make check-unit``. They are
+simple C tests that typically link to individual QEMU object files and
+exercise them by calling exported functions.
+
+If you are writing new code in QEMU, consider adding a unit test, especially
+for utility modules that are relatively stateless or have few dependencies. To
+add a new unit test:
+
+1. Create a new source file. For example, ``tests/unit/foo-test.c``.
+
+2. Write the test. Normally you would include the header file which exports
+ the module API, then verify the interface behaves as expected from your
+ test. The test code should be organized with the glib testing framework.
+ Copying and modifying an existing test is usually a good idea.
+
+3. Add the test to ``tests/unit/meson.build``. The unit tests are listed in a
+ dictionary called ``tests``. The values are any additional sources and
+ dependencies to be linked with the test. For a simple test whose source
+ is in ``tests/unit/foo-test.c``, it is enough to add an entry like::
+
+ {
+ ...
+ 'foo-test': [],
+ ...
+ }
+
+Since unit tests don't require environment variables, the simplest way to debug
+a unit test failure is often directly invoking it or even running it under
+``gdb``. However there can still be differences in behavior between ``make``
+invocations and your manual run, due to ``$MALLOC_PERTURB_`` environment
+variable (which affects memory reclamation and catches invalid pointers better)
+and gtester options. If necessary, you can run
+
+.. code::
+
+ make check-unit V=1
+
+and copy the actual command line which executes the unit test, then run
+it from the command line.
+
+QTest
+~~~~~
+
+QTest is a device emulation testing framework. It can be very useful to test
+device models; it could also control certain aspects of QEMU (such as virtual
+clock stepping), with a special purpose "qtest" protocol. Refer to
+:doc:`qtest` for more details.
+
+QTest cases can be executed with
+
+.. code::
+
+ make check-qtest
+
+Writing portable test cases
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Both unit tests and qtests can run on POSIX hosts as well as Windows hosts.
+Care must be taken when writing portable test cases that can be built and run
+successfully on various hosts. The following list shows some best practices:
+
+* Use portable APIs from glib whenever necessary, e.g.: g_setenv(),
+ g_mkdtemp(), g_mkdir().
+* Avoid using hardcoded /tmp for temporary file directory.
+ Use g_get_tmp_dir() instead.
+* Bear in mind that Windows has different special string representation for
+ stdin/stdout/stderr and null devices. For example if your test case uses
+ "/dev/fd/2" and "/dev/null" on Linux, remember to use "2" and "nul" on
+ Windows instead. Also IO redirection does not work on Windows, so avoid
+ using "2>nul" whenever necessary.
+* If your test cases uses the blkdebug feature, use relative path to pass
+ the config and image file paths in the command line as Windows absolute
+ path contains the delimiter ":" which will confuse the blkdebug parser.
+* Use double quotes in your extra QEMU command line in your test cases
+ instead of single quotes, as Windows does not drop single quotes when
+ passing the command line to QEMU.
+* Windows opens a file in text mode by default, while a POSIX compliant
+ implementation treats text files and binary files the same. So if your
+ test cases opens a file to write some data and later wants to compare the
+ written data with the original one, be sure to pass the letter 'b' as
+ part of the mode string to fopen(), or O_BINARY flag for the open() call.
+* If a certain test case can only run on POSIX or Linux hosts, use a proper
+ #ifdef in the codes. If the whole test suite cannot run on Windows, disable
+ the build in the meson.build file.
+
+.. _qapi-tests:
+
+QAPI schema tests
+~~~~~~~~~~~~~~~~~
+
+The QAPI schema tests validate the QAPI parser used by QMP, by feeding
+predefined input to the parser and comparing the result with the reference
+output.
+
+The input/output data is managed under the ``tests/qapi-schema`` directory.
+Each test case includes four files that have a common base name:
+
+ * ``${casename}.json`` - the file contains the JSON input for feeding the
+ parser
+ * ``${casename}.out`` - the file contains the expected stdout from the parser
+ * ``${casename}.err`` - the file contains the expected stderr from the parser
+ * ``${casename}.exit`` - the expected error code
+
+Consider adding a new QAPI schema test when you are making a change on the QAPI
+parser (either fixing a bug or extending/modifying the syntax). To do this:
+
+1. Add four files for the new case as explained above. For example:
+
+ ``$EDITOR tests/qapi-schema/foo.{json,out,err,exit}``.
+
+2. Add the new test in ``tests/Makefile.include``. For example:
+
+ ``qapi-schema += foo.json``
+
+check-block
+~~~~~~~~~~~
+
+``make check-block`` runs a subset of the block layer iotests (the tests that
+are in the "auto" group).
+See the "QEMU iotests" section below for more information.
+
+.. _qemu-iotests:
+
+QEMU iotests
+------------
+
+QEMU iotests, under the directory ``tests/qemu-iotests``, is the testing
+framework widely used to test block layer related features. It is higher level
+than "make check" tests and 99% of the code is written in bash or Python
+scripts. The testing success criteria is golden output comparison, and the
+test files are named with numbers.
+
+To run iotests, make sure QEMU is built successfully, then switch to the
+``tests/qemu-iotests`` directory under the build directory, and run ``./check``
+with desired arguments from there.
+
+By default, "raw" format and "file" protocol is used; all tests will be
+executed, except the unsupported ones. You can override the format and protocol
+with arguments:
+
+.. code::
+
+ # test with qcow2 format
+ ./check -qcow2
+ # or test a different protocol
+ ./check -nbd
+
+It's also possible to list test numbers explicitly:
+
+.. code::
+
+ # run selected cases with qcow2 format
+ ./check -qcow2 001 030 153
+
+Cache mode can be selected with the "-c" option, which may help reveal bugs
+that are specific to certain cache mode.
+
+More options are supported by the ``./check`` script, run ``./check -h`` for
+help.
+
+Writing a new test case
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Consider writing a tests case when you are making any changes to the block
+layer. An iotest case is usually the choice for that. There are already many
+test cases, so it is possible that extending one of them may achieve the goal
+and save the boilerplate to create one. (Unfortunately, there isn't a 100%
+reliable way to find a related one out of hundreds of tests. One approach is
+using ``git grep``.)
+
+Usually an iotest case consists of two files. One is an executable that
+produces output to stdout and stderr, the other is the expected reference
+output. They are given the same number in file names. E.g. Test script ``055``
+and reference output ``055.out``.
+
+In rare cases, when outputs differ between cache mode ``none`` and others, a
+``.out.nocache`` file is added. In other cases, when outputs differ between
+image formats, more than one ``.out`` files are created ending with the
+respective format names, e.g. ``178.out.qcow2`` and ``178.out.raw``.
+
+There isn't a hard rule about how to write a test script, but a new test is
+usually a (copy and) modification of an existing case. There are a few
+commonly used ways to create a test:
+
+* A Bash script. It will make use of several environmental variables related
+ to the testing procedure, and could source a group of ``common.*`` libraries
+ for some common helper routines.
+
+* A Python unittest script. Import ``iotests`` and create a subclass of
+ ``iotests.QMPTestCase``, then call ``iotests.main`` method. The downside of
+ this approach is that the output is too scarce, and the script is considered
+ harder to debug.
+
+* A simple Python script without using unittest module. This could also import
+ ``iotests`` for launching QEMU and utilities etc, but it doesn't inherit
+ from ``iotests.QMPTestCase`` therefore doesn't use the Python unittest
+ execution. This is a combination of 1 and 2.
+
+Pick the language per your preference since both Bash and Python have
+comparable library support for invoking and interacting with QEMU programs. If
+you opt for Python, it is strongly recommended to write Python 3 compatible
+code.
+
+Both Python and Bash frameworks in iotests provide helpers to manage test
+images. They can be used to create and clean up images under the test
+directory. If no I/O or any protocol specific feature is needed, it is often
+more convenient to use the pseudo block driver, ``null-co://``, as the test
+image, which doesn't require image creation or cleaning up. Avoid system-wide
+devices or files whenever possible, such as ``/dev/null`` or ``/dev/zero``.
+Otherwise, image locking implications have to be considered. For example,
+another application on the host may have locked the file, possibly leading to a
+test failure. If using such devices are explicitly desired, consider adding
+``locking=off`` option to disable image locking.
+
+Debugging a test case
+~~~~~~~~~~~~~~~~~~~~~
+
+The following options to the ``check`` script can be useful when debugging
+a failing test:
+
+* ``-gdb`` wraps every QEMU invocation in a ``gdbserver``, which waits for a
+ connection from a gdb client. The options given to ``gdbserver`` (e.g. the
+ address on which to listen for connections) are taken from the ``$GDB_OPTIONS``
+ environment variable. By default (if ``$GDB_OPTIONS`` is empty), it listens on
+ ``localhost:12345``.
+ It is possible to connect to it for example with
+ ``gdb -iex "target remote $addr"``, where ``$addr`` is the address
+ ``gdbserver`` listens on.
+ If the ``-gdb`` option is not used, ``$GDB_OPTIONS`` is ignored,
+ regardless of whether it is set or not.
+
+* ``-valgrind`` attaches a valgrind instance to QEMU. If it detects
+ warnings, it will print and save the log in
+ ``$TEST_DIR/<valgrind_pid>.valgrind``.
+ The final command line will be ``valgrind --log-file=$TEST_DIR/
+ <valgrind_pid>.valgrind --error-exitcode=99 $QEMU ...``
+
+* ``-d`` (debug) just increases the logging verbosity, showing
+ for example the QMP commands and answers.
+
+* ``-p`` (print) redirects QEMU’s stdout and stderr to the test output,
+ instead of saving it into a log file in
+ ``$TEST_DIR/qemu-machine-<random_string>``.
+
+Test case groups
+~~~~~~~~~~~~~~~~
+
+"Tests may belong to one or more test groups, which are defined in the form
+of a comment in the test source file. By convention, test groups are listed
+in the second line of the test file, after the "#!/..." line, like this:
+
+.. code::
+
+ #!/usr/bin/env python3
+ # group: auto quick
+ #
+ ...
+
+Another way of defining groups is creating the tests/qemu-iotests/group.local
+file. This should be used only for downstream (this file should never appear
+in upstream). This file may be used for defining some downstream test groups
+or for temporarily disabling tests, like this:
+
+.. code::
+
+ # groups for some company downstream process
+ #
+ # ci - tests to run on build
+ # down - our downstream tests, not for upstream
+ #
+ # Format of each line is:
+ # TEST_NAME TEST_GROUP [TEST_GROUP ]...
+
+ 013 ci
+ 210 disabled
+ 215 disabled
+ our-ugly-workaround-test down ci
+
+Note that the following group names have a special meaning:
+
+- quick: Tests in this group should finish within a few seconds.
+
+- auto: Tests in this group are used during "make check" and should be
+ runnable in any case. That means they should run with every QEMU binary
+ (also non-x86), with every QEMU configuration (i.e. must not fail if
+ an optional feature is not compiled in - but reporting a "skip" is ok),
+ work at least with the qcow2 file format, work with all kind of host
+ filesystems and users (e.g. "nobody" or "root") and must not take too
+ much memory and disk space (since CI pipelines tend to fail otherwise).
+
+- disabled: Tests in this group are disabled and ignored by check.
+
+.. _container-ref:
+
+Container based tests
+---------------------
+
+Introduction
+~~~~~~~~~~~~
+
+The container testing framework in QEMU utilizes public images to
+build and test QEMU in predefined and widely accessible Linux
+environments. This makes it possible to expand the test coverage
+across distros, toolchain flavors and library versions. The support
+was originally written for Docker although we also support Podman as
+an alternative container runtime. Although many of the target
+names and scripts are prefixed with "docker" the system will
+automatically run on whichever is configured.
+
+The container images are also used to augment the generation of tests
+for testing TCG. See :ref:`checktcg-ref` for more details.
+
+Docker Prerequisites
+~~~~~~~~~~~~~~~~~~~~
+
+Install "docker" with the system package manager and start the Docker service
+on your development machine, then make sure you have the privilege to run
+Docker commands. Typically it means setting up passwordless ``sudo docker``
+command or login as root. For example:
+
+.. code::
+
+ $ sudo yum install docker
+ $ # or `apt-get install docker` for Ubuntu, etc.
+ $ sudo systemctl start docker
+ $ sudo docker ps
+
+The last command should print an empty table, to verify the system is ready.
+
+An alternative method to set up permissions is by adding the current user to
+"docker" group and making the docker daemon socket file (by default
+``/var/run/docker.sock``) accessible to the group:
+
+.. code::
+
+ $ sudo groupadd docker
+ $ sudo usermod $USER -a -G docker
+ $ sudo chown :docker /var/run/docker.sock
+
+Note that any one of above configurations makes it possible for the user to
+exploit the whole host with Docker bind mounting or other privileged
+operations. So only do it on development machines.
+
+Podman Prerequisites
+~~~~~~~~~~~~~~~~~~~~
+
+Install "podman" with the system package manager.
+
+.. code::
+
+ $ sudo dnf install podman
+ $ podman ps
+
+The last command should print an empty table, to verify the system is ready.
+
+Quickstart
+~~~~~~~~~~
+
+From source tree, type ``make docker-help`` to see the help. Testing
+can be started without configuring or building QEMU (``configure`` and
+``make`` are done in the container, with parameters defined by the
+make target):
+
+.. code::
+
+ make docker-test-build@debian
+
+This will create a container instance using the ``debian`` image (the image
+is downloaded and initialized automatically), in which the ``test-build`` job
+is executed.
+
+Registry
+~~~~~~~~
+
+The QEMU project has a container registry hosted by GitLab at
+``registry.gitlab.com/qemu-project/qemu`` which will automatically be
+used to pull in pre-built layers. This avoids unnecessary strain on
+the distro archives created by multiple developers running the same
+container build steps over and over again. This can be overridden
+locally by using the ``NOCACHE`` build option:
+
+.. code::
+
+ make docker-image-debian-arm64-cross NOCACHE=1
+
+Images
+~~~~~~
+
+Along with many other images, the ``debian`` image is defined in a Dockerfile
+in ``tests/docker/dockerfiles/``, called ``debian.docker``. ``make docker-help``
+command will list all the available images.
+
+A ``.pre`` script can be added beside the ``.docker`` file, which will be
+executed before building the image under the build context directory. This is
+mainly used to do necessary host side setup. One such setup is ``binfmt_misc``,
+for example, to make qemu-user powered cross build containers work.
+
+Most of the existing Dockerfiles were written by hand, simply by creating a
+a new ``.docker`` file under the ``tests/docker/dockerfiles/`` directory.
+This has led to an inconsistent set of packages being present across the
+different containers.
+
+Thus going forward, QEMU is aiming to automatically generate the Dockerfiles
+using the ``lcitool`` program provided by the ``libvirt-ci`` project:
+
+ https://gitlab.com/libvirt/libvirt-ci
+
+``libvirt-ci`` contains an ``lcitool`` program as well as a list of
+mappings to distribution package names for a wide variety of third
+party projects. ``lcitool`` applies the mappings to a list of build
+pre-requisites in ``tests/lcitool/projects/qemu.yml``, determines the
+list of native packages to install on each distribution, and uses them
+to generate build environments (dockerfiles and Cirrus CI variable files)
+that are consistent across OS distribution.
+
+
+Adding new build pre-requisites
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When preparing a patch series that adds a new build
+pre-requisite to QEMU, the prerequisites should to be added to
+``tests/lcitool/projects/qemu.yml`` in order to make the dependency
+available in the CI build environments.
+
+In the simple case where the pre-requisite is already known to ``libvirt-ci``
+the following steps are needed:
+
+ * Edit ``tests/lcitool/projects/qemu.yml`` and add the pre-requisite
+
+ * Run ``make lcitool-refresh`` to re-generate all relevant build environment
+ manifests
+
+It may be that ``libvirt-ci`` does not know about the new pre-requisite.
+If that is the case, some extra preparation steps will be required
+first to contribute the mapping to the ``libvirt-ci`` project:
+
+ * Fork the ``libvirt-ci`` project on gitlab
+
+ * Add an entry for the new build prerequisite to
+ ``lcitool/facts/mappings.yml``, listing its native package name on as
+ many OS distros as practical. Run ``python -m pytest --regenerate-output``
+ and check that the changes are correct.
+
+ * Commit the ``mappings.yml`` change together with the regenerated test
+ files, and submit a merge request to the ``libvirt-ci`` project.
+ Please note in the description that this is a new build pre-requisite
+ desired for use with QEMU.
+
+ * CI pipeline will run to validate that the changes to ``mappings.yml``
+ are correct, by attempting to install the newly listed package on
+ all OS distributions supported by ``libvirt-ci``.
+
+ * Once the merge request is accepted, go back to QEMU and update
+ the ``tests/lcitool/libvirt-ci`` submodule to point to a commit that
+ contains the ``mappings.yml`` update. Then add the prerequisite and
+ run ``make lcitool-refresh``.
+
+ * Please also trigger gitlab container generation pipelines on your change
+ for as many OS distros as practical to make sure that there are no
+ obvious breakages when adding the new pre-requisite. Please see
+ `CI <https://www.qemu.org/docs/master/devel/ci.html>`__ documentation
+ page on how to trigger gitlab CI pipelines on your change.
+
+For enterprise distros that default to old, end-of-life versions of the
+Python runtime, QEMU uses a separate set of mappings that work with more
+recent versions. These can be found in ``tests/lcitool/mappings.yml``.
+Modifying this file should not be necessary unless the new pre-requisite
+is a Python library or tool.
+
+
+Adding new OS distros
+^^^^^^^^^^^^^^^^^^^^^
+
+In some cases ``libvirt-ci`` will not know about the OS distro that is
+desired to be tested. Before adding a new OS distro, discuss the proposed
+addition:
+
+ * Send a mail to qemu-devel, copying people listed in the
+ MAINTAINERS file for ``Build and test automation``.
+
+ There are limited CI compute resources available to QEMU, so the
+ cost/benefit tradeoff of adding new OS distros needs to be considered.
+
+ * File an issue at https://gitlab.com/libvirt/libvirt-ci/-/issues
+ pointing to the qemu-devel mail thread in the archives.
+
+ This alerts other people who might be interested in the work
+ to avoid duplication, as well as to get feedback from libvirt-ci
+ maintainers on any tips to ease the addition
+
+Assuming there is agreement to add a new OS distro then
+
+ * Fork the ``libvirt-ci`` project on gitlab
+
+ * Add metadata under ``lcitool/facts/targets/`` for the new OS
+ distro. There might be code changes required if the OS distro
+ uses a package format not currently known. The ``libvirt-ci``
+ maintainers can advise on this when the issue is filed.
+
+ * Edit the ``lcitool/facts/mappings.yml`` change to add entries for
+ the new OS, listing the native package names for as many packages
+ as practical. Run ``python -m pytest --regenerate-output`` and
+ check that the changes are correct.
+
+ * Commit the changes to ``lcitool/facts`` and the regenerated test
+ files, and submit a merge request to the ``libvirt-ci`` project.
+ Please note in the description that this is a new build pre-requisite
+ desired for use with QEMU
+
+ * CI pipeline will run to validate that the changes to ``mappings.yml``
+ are correct, by attempting to install the newly listed package on
+ all OS distributions supported by ``libvirt-ci``.
+
+ * Once the merge request is accepted, go back to QEMU and update
+ the ``libvirt-ci`` submodule to point to a commit that contains
+ the ``mappings.yml`` update.
+
+
+Tests
+~~~~~
+
+Different tests are added to cover various configurations to build and test
+QEMU. Docker tests are the executables under ``tests/docker`` named
+``test-*``. They are typically shell scripts and are built on top of a shell
+library, ``tests/docker/common.rc``, which provides helpers to find the QEMU
+source and build it.
+
+The full list of tests is printed in the ``make docker-help`` help.
+
+Debugging a Docker test failure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When CI tasks, maintainers or yourself report a Docker test failure, follow the
+below steps to debug it:
+
+1. Locally reproduce the failure with the reported command line. E.g. run
+ ``make docker-test-mingw@fedora-win64-cross J=8``.
+2. Add "V=1" to the command line, try again, to see the verbose output.
+3. Further add "DEBUG=1" to the command line. This will pause in a shell prompt
+ in the container right before testing starts. You could either manually
+ build QEMU and run tests from there, or press Ctrl-D to let the Docker
+ testing continue.
+4. If you press Ctrl-D, the same building and testing procedure will begin, and
+ will hopefully run into the error again. After that, you will be dropped to
+ the prompt for debug.
+
+Options
+~~~~~~~
+
+Various options can be used to affect how Docker tests are done. The full
+list is in the ``make docker`` help text. The frequently used ones are:
+
+* ``V=1``: the same as in top level ``make``. It will be propagated to the
+ container and enable verbose output.
+* ``J=$N``: the number of parallel tasks in make commands in the container,
+ similar to the ``-j $N`` option in top level ``make``. (The ``-j`` option in
+ top level ``make`` will not be propagated into the container.)
+* ``DEBUG=1``: enables debug. See the previous "Debugging a Docker test
+ failure" section.
+
+Thread Sanitizer
+----------------
+
+Thread Sanitizer (TSan) is a tool which can detect data races. QEMU supports
+building and testing with this tool.
+
+For more information on TSan:
+
+https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual
+
+Thread Sanitizer in Docker
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+TSan is currently supported in the ubuntu2204 docker.
+
+The test-tsan test will build using TSan and then run make check.
+
+.. code::
+
+ make docker-test-tsan@ubuntu2204
+
+TSan warnings under docker are placed in files located at build/tsan/.
+
+We recommend using DEBUG=1 to allow launching the test from inside the docker,
+and to allow review of the warnings generated by TSan.
+
+Building and Testing with TSan
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It is possible to build and test with TSan, with a few additional steps.
+These steps are normally done automatically in the docker.
+
+TSan is supported for clang and gcc.
+One particularity of sanitizers is that all the code, including shared objects
+dependencies, should be built with it.
+In the case of TSan, any synchronization primitive from glib (GMutex for
+instance) will not be recognized, and will lead to false positives.
+
+To build a tsan version of glib:
+
+.. code::
+
+ $ git clone --depth=1 --branch=2.81.0 https://github.com/GNOME/glib.git
+ $ cd glib
+ $ CFLAGS="-O2 -g -fsanitize=thread" meson build
+ $ ninja -C build
+
+To configure the build for TSan:
+
+.. code::
+
+ ../configure --enable-tsan \
+ --disable-werror --extra-cflags="-O0"
+
+When executing qemu, don't forget to point to tsan glib:
+
+.. code::
+
+ $ glib_dir=/path/to/glib
+ $ export LD_LIBRARY_PATH=$glib_dir/build/gio:$glib_dir/build/glib:$glib_dir/build/gmodule:$glib_dir/build/gobject:$glib_dir/build/gthread
+ # check correct version is used
+ $ ldd build/qemu-x86_64 | grep glib
+ $ qemu-system-x86_64 ...
+
+The runtime behavior of TSAN is controlled by the TSAN_OPTIONS environment
+variable.
+
+More information on the TSAN_OPTIONS can be found here:
+
+https://github.com/google/sanitizers/wiki/ThreadSanitizerFlags
+
+For example:
+
+.. code::
+
+ export TSAN_OPTIONS=suppressions=<path to qemu>/tests/tsan/suppressions.tsan \
+ detect_deadlocks=false history_size=7 exitcode=0 \
+ log_path=<build path>/tsan/tsan_warning
+
+The above exitcode=0 has TSan continue without error if any warnings are found.
+This allows for running the test and then checking the warnings afterwards.
+If you want TSan to stop and exit with error on warnings, use exitcode=66.
+
+.. _tsan-suppressions:
+
+TSan Suppressions
+~~~~~~~~~~~~~~~~~
+Keep in mind that for any data race warning, although there might be a data race
+detected by TSan, there might be no actual bug here. TSan provides several
+different mechanisms for suppressing warnings. In general it is recommended
+to fix the code if possible to eliminate the data race rather than suppress
+the warning.
+
+A few important files for suppressing warnings are:
+
+tests/tsan/suppressions.tsan - Has TSan warnings we wish to suppress at runtime.
+The comment on each suppression will typically indicate why we are
+suppressing it. More information on the file format can be found here:
+
+https://github.com/google/sanitizers/wiki/ThreadSanitizerSuppressions
+
+tests/tsan/ignore.tsan - Has TSan warnings we wish to disable
+at compile time for test or debug.
+Add flags to configure to enable:
+
+"--extra-cflags=-fsanitize-blacklist=<src path>/tests/tsan/ignore.tsan"
+
+More information on the file format can be found here under "Blacklist Format":
+
+https://github.com/google/sanitizers/wiki/ThreadSanitizerFlags
+
+TSan Annotations
+~~~~~~~~~~~~~~~~
+include/qemu/tsan.h defines annotations. See this file for more descriptions
+of the annotations themselves. Annotations can be used to suppress
+TSan warnings or give TSan more information so that it can detect proper
+relationships between accesses of data.
+
+Annotation examples can be found here:
+
+https://github.com/llvm/llvm-project/tree/master/compiler-rt/test/tsan/
+
+Good files to start with are: annotate_happens_before.cpp and ignore_race.cpp
+
+The full set of annotations can be found here:
+
+https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
+
+docker-binfmt-image-debian-% targets
+------------------------------------
+
+It is possible to combine Debian's bootstrap scripts with a configured
+``binfmt_misc`` to bootstrap a number of Debian's distros including
+experimental ports not yet supported by a released OS. This can
+simplify setting up a rootfs by using docker to contain the foreign
+rootfs rather than manually invoking chroot.
+
+Setting up ``binfmt_misc``
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can use the script ``qemu-binfmt-conf.sh`` to configure a QEMU
+user binary to automatically run binaries for the foreign
+architecture. While the scripts will try their best to work with
+dynamically linked QEMU's a statically linked one will present less
+potential complications when copying into the docker image. Modern
+kernels support the ``F`` (fix binary) flag which will open the QEMU
+executable on setup and avoids the need to find and re-open in the
+chroot environment. This is triggered with the ``--persistent`` flag.
+
+Example invocation
+~~~~~~~~~~~~~~~~~~
+
+For example to setup the HPPA ports builds of Debian::
+
+ make docker-binfmt-image-debian-sid-hppa \
+ DEB_TYPE=sid DEB_ARCH=hppa \
+ DEB_URL=http://ftp.ports.debian.org/debian-ports/ \
+ DEB_KEYRING=/usr/share/keyrings/debian-ports-archive-keyring.gpg \
+ EXECUTABLE=(pwd)/qemu-hppa V=1
+
+The ``DEB_`` variables are substitutions used by
+``debian-bootstrap.pre`` which is called to do the initial debootstrap
+of the rootfs before it is copied into the container. The second stage
+is run as part of the build. The final image will be tagged as
+``qemu/debian-sid-hppa``.
+
+VM testing
+----------
+
+This test suite contains scripts that bootstrap various guest images that have
+necessary packages to build QEMU. The basic usage is documented in ``Makefile``
+help which is displayed with ``make vm-help``.
+
+Quickstart
+~~~~~~~~~~
+
+Run ``make vm-help`` to list available make targets. Invoke a specific make
+command to run build test in an image. For example, ``make vm-build-freebsd``
+will build the source tree in the FreeBSD image. The command can be executed
+from either the source tree or the build dir; if the former, ``./configure`` is
+not needed. The command will then generate the test image in ``./tests/vm/``
+under the working directory.
+
+Note: images created by the scripts accept a well-known RSA key pair for SSH
+access, so they SHOULD NOT be exposed to external interfaces if you are
+concerned about attackers taking control of the guest and potentially
+exploiting a QEMU security bug to compromise the host.
+
+QEMU binaries
+~~~~~~~~~~~~~
+
+By default, ``qemu-system-x86_64`` is searched in $PATH to run the guest. If
+there isn't one, or if it is older than 2.10, the test won't work. In this case,
+provide the QEMU binary in env var: ``QEMU=/path/to/qemu-2.10+``.
+
+Likewise the path to ``qemu-img`` can be set in QEMU_IMG environment variable.
+
+Make jobs
+~~~~~~~~~
+
+The ``-j$X`` option in the make command line is not propagated into the VM,
+specify ``J=$X`` to control the make jobs in the guest.
+
+Debugging
+~~~~~~~~~
+
+Add ``DEBUG=1`` and/or ``V=1`` to the make command to allow interactive
+debugging and verbose output. If this is not enough, see the next section.
+``V=1`` will be propagated down into the make jobs in the guest.
+
+Manual invocation
+~~~~~~~~~~~~~~~~~
+
+Each guest script is an executable script with the same command line options.
+For example to work with the netbsd guest, use ``$QEMU_SRC/tests/vm/netbsd``:
+
+.. code::
+
+ $ cd $QEMU_SRC/tests/vm
+
+ # To bootstrap the image
+ $ ./netbsd --build-image --image /var/tmp/netbsd.img
+ <...>
+
+ # To run an arbitrary command in guest (the output will not be echoed unless
+ # --debug is added)
+ $ ./netbsd --debug --image /var/tmp/netbsd.img uname -a
+
+ # To build QEMU in guest
+ $ ./netbsd --debug --image /var/tmp/netbsd.img --build-qemu $QEMU_SRC
+
+ # To get to an interactive shell
+ $ ./netbsd --interactive --image /var/tmp/netbsd.img sh
+
+Adding new guests
+~~~~~~~~~~~~~~~~~
+
+Please look at existing guest scripts for how to add new guests.
+
+Most importantly, create a subclass of BaseVM and implement ``build_image()``
+method and define ``BUILD_SCRIPT``, then finally call ``basevm.main()`` from
+the script's ``main()``.
+
+* Usually in ``build_image()``, a template image is downloaded from a
+ predefined URL. ``BaseVM._download_with_cache()`` takes care of the cache and
+ the checksum, so consider using it.
+
+* Once the image is downloaded, users, SSH server and QEMU build deps should
+ be set up:
+
+ - Root password set to ``BaseVM.ROOT_PASS``
+ - User ``BaseVM.GUEST_USER`` is created, and password set to
+ ``BaseVM.GUEST_PASS``
+ - SSH service is enabled and started on boot,
+ ``$QEMU_SRC/tests/keys/id_rsa.pub`` is added to ssh's ``authorized_keys``
+ file of both root and the normal user
+ - DHCP client service is enabled and started on boot, so that it can
+ automatically configure the virtio-net-pci NIC and communicate with QEMU
+ user net (10.0.2.2)
+ - Necessary packages are installed to untar the source tarball and build
+ QEMU
+
+* Write a proper ``BUILD_SCRIPT`` template, which should be a shell script that
+ untars a raw virtio-blk block device, which is the tarball data blob of the
+ QEMU source tree, then configure/build it. Running "make check" is also
+ recommended.
+
+Image fuzzer testing
+--------------------
+
+An image fuzzer was added to exercise format drivers. Currently only qcow2 is
+supported. To start the fuzzer, run
+
+.. code::
+
+ tests/image-fuzzer/runner.py -c '[["qemu-img", "info", "$test_img"]]' /tmp/test qcow2
+
+Alternatively, some command different from ``qemu-img info`` can be tested, by
+changing the ``-c`` option.
+
+Functional tests using Python
+-----------------------------
+
+A functional test focuses on the functional requirement of the software,
+attempting to find errors like incorrect functions, interface errors,
+behavior errors, and initialization and termination errors [3]_.
+
+The ``tests/functional`` directory hosts functional tests written in
+Python. You can run the functional tests simply by executing:
+
+.. code::
+
+ make check-functional
+
+See :ref:`checkfunctional-ref` for more details.
+
+.. _checktcg-ref:
+
+Testing with "make check-tcg"
+-----------------------------
+
+The check-tcg tests are intended for simple smoke tests of both
+linux-user and softmmu TCG functionality. However to build test
+programs for guest targets you need to have cross compilers available.
+If your distribution supports cross compilers you can do something as
+simple as::
+
+ apt install gcc-aarch64-linux-gnu
+
+The configure script will automatically pick up their presence.
+Sometimes compilers have slightly odd names so the availability of
+them can be prompted by passing in the appropriate configure option
+for the architecture in question, for example::
+
+ $(configure) --cross-cc-aarch64=aarch64-cc
+
+There is also a ``--cross-cc-cflags-ARCH`` flag in case additional
+compiler flags are needed to build for a given target.
+
+If you have the ability to run containers as the user the build system
+will automatically use them where no system compiler is available. For
+architectures where we also support building QEMU we will generally
+use the same container to build tests. However there are a number of
+additional containers defined that have a minimal cross-build
+environment that is only suitable for building test cases. Sometimes
+we may use a bleeding edge distribution for compiler features needed
+for test cases that aren't yet in the LTS distros we support for QEMU
+itself.
+
+See :ref:`container-ref` for more details.
+
+Running subset of tests
+~~~~~~~~~~~~~~~~~~~~~~~
+
+You can build the tests for one architecture::
+
+ make build-tcg-tests-$TARGET
+
+And run with::
+
+ make run-tcg-tests-$TARGET
+
+Adding ``V=1`` to the invocation will show the details of how to
+invoke QEMU for the test which is useful for debugging tests.
+
+Running individual tests
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Tests can also be run directly from the test build directory. If you
+run ``make help`` from the test build directory you will get a list of
+all the tests that can be run. Please note that same binaries are used
+in multiple tests, for example::
+
+ make run-plugin-test-mmap-with-libinline.so
+
+will run the mmap test with the ``libinline.so`` TCG plugin. The
+gdbstub tests also re-use the test binaries but while exercising gdb.
+
+TCG test dependencies
+~~~~~~~~~~~~~~~~~~~~~
+
+The TCG tests are deliberately very light on dependencies and are
+either totally bare with minimal gcc lib support (for system-mode tests)
+or just glibc (for linux-user tests). This is because getting a cross
+compiler to work with additional libraries can be challenging.
+
+Other TCG Tests
+---------------
+
+There are a number of out-of-tree test suites that are used for more
+extensive testing of processor features.
+
+KVM Unit Tests
+~~~~~~~~~~~~~~
+
+The KVM unit tests are designed to run as a Guest OS under KVM but
+there is no reason why they can't exercise the TCG as well. It
+provides a minimal OS kernel with hooks for enabling the MMU as well
+as reporting test results via a special device::
+
+ https://git.kernel.org/pub/scm/virt/kvm/kvm-unit-tests.git
+
+Linux Test Project
+~~~~~~~~~~~~~~~~~~
+
+The LTP is focused on exercising the syscall interface of a Linux
+kernel. It checks that syscalls behave as documented and strives to
+exercise as many corner cases as possible. It is a useful test suite
+to run to exercise QEMU's linux-user code::
+
+ https://linux-test-project.github.io/
+
+GCC gcov support
+----------------
+
+``gcov`` is a GCC tool to analyze the testing coverage by
+instrumenting the tested code. To use it, configure QEMU with
+``--enable-gcov`` option and build. Then run the tests as usual.
+
+If you want to gather coverage information on a single test the ``make
+clean-gcda`` target can be used to delete any existing coverage
+information before running a single test.
+
+You can generate a HTML coverage report by executing ``make
+coverage-html`` which will create
+``meson-logs/coveragereport/index.html``.
+
+Further analysis can be conducted by running the ``gcov`` command
+directly on the various .gcda output files. Please read the ``gcov``
+documentation for more information.
+
+Flaky tests
+-----------
+
+A flaky test is defined as a test that exhibits both a passing and a failing
+result with the same code on different runs. Some usual reasons for an
+intermittent/flaky test are async wait, concurrency, and test order dependency
+[4]_.
+
+In QEMU, tests that are identified to be flaky are normally disabled by
+default. Set the QEMU_TEST_FLAKY_TESTS environment variable before running
+the tests to enable them.
+
+References
+----------
+
+.. [1] Sommerville, Ian (2016). Software Engineering. p. 233.
+.. [2] Pressman, Roger S. & Maxim, Bruce R. (2020). Software Engineering,
+ A Practitioner’s Approach. p. 48, 376, 378, 381.
+.. [3] Pressman, Roger S. & Maxim, Bruce R. (2020). Software Engineering,
+ A Practitioner’s Approach. p. 388.
+.. [4] Luo, Qingzhou, et al. An empirical analysis of flaky tests.
+ Proceedings of the 22nd ACM SIGSOFT International Symposium on
+ Foundations of Software Engineering. 2014.
diff --git a/docs/devel/qgraph.rst b/docs/devel/testing/qgraph.rst
index 43342d9..43342d9 100644
--- a/docs/devel/qgraph.rst
+++ b/docs/devel/testing/qgraph.rst
diff --git a/docs/devel/testing/qtest.rst b/docs/devel/testing/qtest.rst
new file mode 100644
index 0000000..73ef770
--- /dev/null
+++ b/docs/devel/testing/qtest.rst
@@ -0,0 +1,93 @@
+.. _qtest:
+
+========================================
+QTest Device Emulation Testing Framework
+========================================
+
+.. toctree::
+
+ qgraph
+
+QTest is a device emulation testing framework. It can be very useful to test
+device models; it could also control certain aspects of QEMU (such as virtual
+clock stepping), with a special purpose "qtest" protocol. Refer to
+:ref:`qtest-protocol` for more details of the protocol.
+
+QTest cases can be executed with
+
+.. code::
+
+ make check-qtest
+
+The QTest library is implemented by ``tests/qtest/libqtest.c`` and the API is
+defined in ``tests/qtest/libqtest.h``.
+
+Consider adding a new QTest case when you are introducing a new virtual
+hardware, or extending one if you are adding functionalities to an existing
+virtual device.
+
+On top of libqtest, a higher level library, ``libqos``, was created to
+encapsulate common tasks of device drivers, such as memory management and
+communicating with system buses or devices. Many virtual device tests use
+libqos instead of directly calling into libqtest.
+Libqos also offers the Qgraph API to increase each test coverage and
+automate QEMU command line arguments and devices setup.
+Refer to :ref:`qgraph` for Qgraph explanation and API.
+
+Steps to add a new QTest case are:
+
+1. Create a new source file for the test. (More than one file can be added as
+ necessary.) For example, ``tests/qtest/foo-test.c``.
+
+2. Write the test code with the glib and libqtest/libqos API. See also existing
+ tests and the library headers for reference.
+
+3. Register the new test in ``tests/qtest/meson.build``. Add the test
+ executable name to an appropriate ``qtests_*`` variable. There is
+ one variable per architecture, plus ``qtests_generic`` for tests
+ that can be run for all architectures. For example::
+
+ qtests_generic = [
+ ...
+ 'foo-test',
+ ...
+ ]
+
+4. If the test has more than one source file or needs to be linked with any
+ dependency other than ``qemuutil`` and ``qos``, list them in the ``qtests``
+ dictionary. For example a test that needs to use the ``QIO`` library
+ will have an entry like::
+
+ {
+ ...
+ 'foo-test': [io],
+ ...
+ }
+
+Debugging a QTest failure is slightly harder than the unit test because the
+tests look up QEMU program names in the environment variables, such as
+``QTEST_QEMU_BINARY`` and ``QTEST_QEMU_IMG``, and also because it is not easy
+to attach gdb to the QEMU process spawned from the test. But manual invoking
+and using gdb on the test is still simple to do: find out the actual command
+from the output of
+
+.. code::
+
+ make check-qtest V=1
+
+which you can run manually.
+
+
+.. _qtest-protocol:
+
+QTest Protocol
+--------------
+
+.. kernel-doc:: system/qtest.c
+ :doc: QTest Protocol
+
+
+libqtest API reference
+----------------------
+
+.. kernel-doc:: tests/qtest/libqtest.h
diff --git a/docs/devel/uefi-vars.rst b/docs/devel/uefi-vars.rst
new file mode 100644
index 0000000..0151a26
--- /dev/null
+++ b/docs/devel/uefi-vars.rst
@@ -0,0 +1,68 @@
+==============
+UEFI variables
+==============
+
+Guest UEFI variable management
+==============================
+
+The traditional approach for UEFI Variable storage in qemu guests is
+to work as close as possible to physical hardware. That means
+providing pflash as storage and leaving the management of variables
+and flash to the guest.
+
+Secure boot support comes with the requirement that the UEFI variable
+storage must be protected against direct access by the OS. All update
+requests must pass the sanity checks. (Parts of) the firmware must
+run with a higher privilege level than the OS so this can be enforced
+by the firmware. On x86 this has been implemented using System
+Management Mode (SMM) in qemu and kvm, which again is the same
+approach taken by physical hardware. Only privileged code running in
+SMM mode is allowed to access flash storage.
+
+Communication with the firmware code running in SMM mode works by
+serializing the requests to a shared buffer, then trapping into SMM
+mode via SMI. The SMM code processes the request, stores the reply in
+the same buffer and returns.
+
+Host UEFI variable service
+==========================
+
+Instead of running the privileged code inside the guest we can run it
+on the host. The serialization protocol can be reused. The
+communication with the host uses a virtual device, which essentially
+configures the shared buffer location and size, and traps to the host
+to process the requests.
+
+The ``uefi-vars`` device implements the UEFI virtual device. It comes
+in ``uefi-vars-x86`` and ``uefi-vars-sysbus`` flavours. The device
+reimplements the handlers needed, specifically
+``EfiSmmVariableProtocol`` and ``VarCheckPolicyLibMmiHandler``. It
+also consumes events (``EfiEndOfDxeEventGroup``,
+``EfiEventReadyToBoot`` and ``EfiEventExitBootServices``).
+
+The advantage of the approach is that we do not need a special
+privilege level for the firmware to protect itself, i.e. it does not
+depend on SMM emulation on x64, which allows the removal of a bunch of
+complex code for SMM emulation from the linux kernel
+(CONFIG_KVM_SMM=n). It also allows support for secure boot on arm
+without implementing secure world (el3) emulation in kvm.
+
+Of course there are also downsides. The added device increases the
+attack surface of the host, and we are adding some code duplication
+because we have to reimplement some edk2 functionality in qemu.
+
+usage on x86_64
+---------------
+
+.. code::
+
+ qemu-system-x86_64 \
+ -device uefi-vars-x86,jsonfile=/path/to/vars.json
+
+usage on aarch64
+----------------
+
+.. code::
+
+ qemu-system-aarch64 -M virt \
+ -device uefi-vars-sysbus,jsonfile=/path/to/vars.json
diff --git a/docs/devel/virtio-backends.rst b/docs/devel/virtio-backends.rst
index 9ff092e..ebddc3b 100644
--- a/docs/devel/virtio-backends.rst
+++ b/docs/devel/virtio-backends.rst
@@ -101,13 +101,12 @@ manually instantiated:
VirtIOBlock vdev;
};
- static Property virtio_blk_pci_properties[] = {
+ static const Property virtio_blk_pci_properties[] = {
DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
DEV_NVECTORS_UNSPECIFIED),
- DEFINE_PROP_END_OF_LIST(),
};
static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -120,7 +119,7 @@ manually instantiated:
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
- static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
+ static void virtio_blk_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/docs/glossary.rst b/docs/glossary.rst
new file mode 100644
index 0000000..4fa044b
--- /dev/null
+++ b/docs/glossary.rst
@@ -0,0 +1,280 @@
+.. _Glossary:
+
+--------
+Glossary
+--------
+
+This section of the manual presents brief definitions of acronyms and terms used
+by QEMU developers.
+
+Accelerator
+-----------
+
+A specific API used to accelerate execution of guest instructions. It can be
+hardware-based, through a virtualization API provided by the host OS (kvm, hvf,
+whpx, ...), or software-based (tcg). See this description of `supported
+accelerators<Accelerators>`.
+
+Board
+-----
+
+Another name for :ref:`machine`.
+
+Block
+-----
+
+Block drivers are the available `disk formats and front-ends
+<block-drivers>` available, and block devices `(see Block device section on
+options page)<sec_005finvocation>` are using them to implement disks for a
+virtual machine.
+
+CFI
+---
+
+Control Flow Integrity is a hardening technique used to prevent exploits
+targeting QEMU by detecting unexpected branches during execution. QEMU `actively
+supports<cfi>` being compiled with CFI enabled.
+
+Device
+------
+
+In QEMU, a device is a piece of hardware visible to the guest. Examples include
+UARTs, PCI controllers, PCI cards, VGA controllers, and many more.
+
+QEMU is able to emulate a CPU, and all the hardware interacting with it,
+including `many devices<device-emulation>`. When QEMU runs a virtual machine
+using a hardware-based accelerator, it is responsible for emulating, using
+software, all devices.
+
+EDK2
+----
+
+EDK2, as known as `TianoCore <https://www.tianocore.org/>`_, is an open source
+implementation of UEFI standard. QEMU virtual machines that boot a UEFI firmware
+usually use EDK2.
+
+gdbstub
+-------
+
+QEMU implements a `gdb server <GDB usage>`, allowing gdb to attach to it and
+debug a running virtual machine, or a program in user-mode. This allows
+debugging the guest code that is running inside QEMU.
+
+glib2
+-----
+
+`GLib2 <https://docs.gtk.org/glib/>`_ is one of the most important libraries we
+are using through the codebase. It provides many data structures, macros, string
+and thread utilities and portable functions across different OS. It's required
+to build QEMU.
+
+Guest agent
+-----------
+
+The `QEMU Guest Agent <qemu-ga>` is a daemon intended to be run within virtual
+machines. It provides various services to help QEMU to interact with it.
+
+.. _guest:
+
+Guest
+-----
+
+Guest is the architecture of the virtual machine, which is emulated.
+See also :ref:`host`.
+
+Sometimes this is called the :ref:`target` architecture, but that term
+can be ambiguous.
+
+.. _host:
+
+Host
+----
+
+Host is the architecture on which QEMU is running on, which is native.
+See also :ref:`guest`.
+
+Hypervisor
+----------
+
+The formal definition of an hypervisor is a program or API than can be used to
+manage a virtual machine. QEMU is a virtualizer, that interacts with various
+hypervisors.
+
+In the context of QEMU, an hypervisor is an API, provided by the Host OS,
+allowing to execute virtual machines. Linux implementation is KVM (and supports
+Xen as well). For MacOS, it's HVF. Windows defines WHPX. And NetBSD provides
+NVMM.
+
+.. _machine:
+
+Machine
+-------
+
+QEMU's system emulation models many different types of hardware. A machine model
+(sometimes called a board model) is the model of a complete virtual system with
+RAM, one or more CPUs, and various devices. It can be selected with the option
+``-machine`` of qemu-system. Our machine models can be found on this `page
+<system-targets-ref>`.
+
+Migration
+---------
+
+QEMU can save and restore the execution of a virtual machine between different
+host systems. This is provided by the :ref:`Migration framework<migration>`.
+
+NBD
+---
+
+The `QEMU Network Block Device server <qemu-nbd>` is a tool that can be used to
+mount and access QEMU images, providing functionality similar to a loop device.
+
+Mailing List
+------------
+
+This is `where <https://wiki.qemu.org/Contribute/MailingLists>`_ all the
+development happens! Changes are posted as series, that all developers can
+review and share feedback for.
+
+For reporting issues, our `GitLab
+<https://gitlab.com/qemu-project/qemu/-/issues>`_ tracker is the best place.
+
+.. _softmmu:
+
+MMU / softmmu
+-------------
+
+The Memory Management Unit is responsible for translating virtual addresses to
+physical addresses and managing memory protection. QEMU system mode is named
+"softmmu" precisely because it implements this in software, including a TLB
+(Translation lookaside buffer), for the guest virtual machine.
+
+QEMU user-mode does not implement a full software MMU, but "simply" translates
+virtual addresses by adding a specific offset, and relying on host MMU/OS
+instead.
+
+Monitor / QMP / HMP
+-------------------
+
+The `QEMU Monitor <QEMU monitor>` is a text interface which can be used to interact
+with a running virtual machine.
+
+QMP stands for QEMU Monitor Protocol and is a json based interface.
+HMP stands for Human Monitor Protocol and is a set of text commands available
+for users who prefer natural language to json.
+
+MTTCG
+-----
+
+Multiple CPU support was first implemented using a round-robin algorithm
+running on a single thread. Later on, `Multi-threaded TCG <mttcg>` was developed
+to benefit from multiple cores to speed up execution.
+
+Plugins
+-------
+
+`TCG Plugins <TCG Plugins>` is an API used to instrument guest code, in system
+and user mode. The end goal is to have a similar set of functionality compared
+to `DynamoRIO <https://dynamorio.org/>`_ or `valgrind <https://valgrind.org/>`_.
+
+One key advantage of QEMU plugins is that they can be used to perform
+architecture agnostic instrumentation.
+
+Patchew
+-------
+
+`Patchew <https://patchew.org/QEMU/>`_ is a website that tracks patches on the
+Mailing List.
+
+PR
+--
+
+Once a series is reviewed and accepted by a subsystem maintainer, it will be
+included in a PR (Pull Request) that the project maintainer will merge into QEMU
+main branch, after running tests.
+
+The QEMU project doesn't currently expect most developers to directly submit
+pull requests.
+
+QCOW2
+-----
+
+QEMU Copy On Write is a disk format developed by QEMU. It provides transparent
+compression, automatic extension, and many other advantages over a raw image.
+
+qcow2 is the recommended format to use.
+
+QEMU
+----
+
+`QEMU (Quick Emulator) <https://www.qemu.org/>`_ is a generic and open source
+machine emulator and virtualizer.
+
+QOM
+---
+
+:ref:`QEMU Object Model <qom>` is an object oriented API used to define
+various devices and hardware in the QEMU codebase.
+
+Record/replay
+-------------
+
+:ref:`Record/replay <replay>` is a feature of QEMU allowing to have a
+deterministic and reproducible execution of a virtual machine.
+
+Rust
+----
+
+`A new programming language <https://www.rust-lang.org/>`_, memory safe by
+default. There is a work in progress to integrate it in QEMU codebase for
+various subsystems.
+
+System mode
+-----------
+
+QEMU System mode provides a virtual model of an entire machine (CPU, memory and
+emulated devices) to run a guest OS. In this mode the CPU may be fully emulated,
+or it may work with a hypervisor such as KVM, Xen or Hypervisor.Framework to
+allow the guest to run directly on the host CPU.
+
+QEMU System mode is called :ref:`softmmu <softmmu>` as well.
+
+.. _target:
+
+Target
+------
+
+The term "target" can be ambiguous. In most places in QEMU it is used as a
+synonym for :ref:`guest`. For example the code for emulating Arm CPUs is in
+``target/arm/``. However in the :ref:`TCG subsystem <tcg>` "target" refers to the
+architecture which QEMU is running on, i.e. the :ref:`host`.
+
+TCG
+---
+
+TCG is the QEMU `Tiny Code Generator <tcg>`. It is the JIT (just-in-time)
+compiler we use to emulate a guest CPU in software.
+
+It is one of the accelerators supported by QEMU, and supports a lot of
+guest/host architectures.
+
+User mode
+---------
+
+QEMU User mode can launch processes compiled for one CPU on another CPU. In this
+mode the CPU is always emulated. In this mode, QEMU translate system calls from
+guest to host kernel. It is available for Linux and BSD.
+
+VirtIO
+------
+
+VirtIO is an open standard used to define and implement virtual devices with a
+minimal overhead, defining a set of data structures and hypercalls (similar to
+system calls, but targeting an hypervisor, which happens to be QEMU in our
+case). It's designed to be more efficient than emulating a real device, by
+minimizing the amount of interactions between a guest VM and its hypervisor.
+
+vhost-user
+----------
+
+`Vhost-user <vhost_user>` is an interface used to implement VirtIO devices
+outside of QEMU itself.
diff --git a/docs/igd-assign.txt b/docs/igd-assign.txt
index e17bb50..af4e839 100644
--- a/docs/igd-assign.txt
+++ b/docs/igd-assign.txt
@@ -1,44 +1,70 @@
Intel Graphics Device (IGD) assignment with vfio-pci
====================================================
-IGD has two different modes for assignment using vfio-pci:
-
-1) Universal Pass-Through (UPT) mode:
-
- In this mode the IGD device is added as a *secondary* (ie. non-primary)
- graphics device in combination with an emulated primary graphics device.
- This mode *requires* guest driver support to remove the external
- dependencies generally associated with IGD (see below). Those guest
- drivers only support this mode for Broadwell and newer IGD, according to
- Intel. Additionally, this mode by default, and as officially supported
- by Intel, does not support direct video output. The intention is to use
- this mode either to provide hardware acceleration to the emulated graphics
- or to use this mode in combination with guest-based remote access software,
- for example VNC (see below for optional output support). This mode
- theoretically has no device specific handling dependencies on vfio-pci or
- the VM firmware.
-
-2) "Legacy" mode:
-
- In this mode the IGD device is intended to be the primary and exclusive
- graphics device in the VM[1], as such QEMU does not facilitate any sort
- of remote graphics to the VM in this mode. A connected physical monitor
- is the intended output device for IGD. This mode includes several
- requirements and restrictions:
-
- * IGD must be given address 02.0 on the PCI root bus in the VM
- * The host kernel must support vfio extensions for IGD (v4.6)
- * vfio VGA support very likely needs to be enabled in the host kernel
- * The VM firmware must support specific fw_cfg enablers for IGD
- * The VM machine type must support a PCI host bridge at 00.0 (standard)
- * The VM machine type must provide or allow to be created a special
- ISA/LPC bridge device (vfio-pci-igd-lpc-bridge) on the root bus at
- PCI address 1f.0.
- * The IGD device must have a VGA ROM, either provided via the romfile
- option or loaded automatically through vfio (standard). rombar=0
- will disable legacy mode support.
- * Hotplug of the IGD device is not supported.
- * The IGD device must be a SandyBridge or newer model device.
+Using vfio-pci, we can passthrough Intel Graphics Device (IGD) to guest, either
+serve as primary and exclusive graphics adapter, or used in combination with an
+emulated primary graphics device, depending on the config and guest driver
+support. However, IGD devices are not "clean" PCI devices, they use extra
+memory regions other than BARs. Special handling is required to make them work
+properly, including:
+
+* OpRegion for accessing Virtual BIOS Table (VBT) that contains display output
+ information.
+* Data Stolen Memory (DSM) region used as VRAM at early stage (BIOS/UEFI)
+
+Certain guest software also depends on following conditions to work:
+(*-Required by)
+
+| Condition | Linux | Windows | VBIOS | EFI GOP |
+|---------------------------------------------|-------|---------|-------|---------|
+| #1 IGD has a valid OpRegion containing VBT | * ^1 | * | * | * |
+| #2 VID/DID of LPC bridge at 00:1f.0 matches | | | * | * |
+| #3 IGD is assigned to BDF 00:02.0 | | | * | * |
+| #4 IGD has VGA controller device class | | | * | * |
+| #5 Host's VGA ranges are mapped to IGD | | | * | |
+| #6 Guest has valid VBIOS or UEFI Option ROM | | | * | * |
+
+^1 Though i915 driver is able to mock a OpRegion, it is still recommended to
+ use the VBT copied from host OpRegion to prevent incorrect configuration.
+
+For #1, the "x-igd-opregion=on" option exposes a copy of host IGD OpRegion to
+guest via fw_cfg, where guest firmware can set up guest OpRegion with it.
+
+For #2, "x-igd-lpc=on" option copies the IDs of host LPC bridge and host bridge
+to guest. Currently this is only supported on i440fx machines as there is
+already an ICH9 LPC bridge present on q35 machines, overwriting its IDs may
+lead to unexpected behavior.
+
+For #3, "addr=2.0" assigns IGD to 00:02.0.
+
+For #4, the primary display must be set to IGD in host BIOS.
+
+For #5, "x-vga=on" enables guest access to standard VGA IO/MMIO ranges.
+
+For #6, ROM either provided via the ROM BAR or romfile= option is needed, this
+Intel document [1] shows how to dump VBIOS to file. For UEFI Option ROM, see
+"Guest firmware" section.
+
+QEMU also provides a "Legacy" mode that implicitly enables full functionality
+on IGD, it is automatically enabled when
+* IGD generation is 6 to 9 (Sandy Bridge to Comet Lake)
+* Machine type is i440fx
+* IGD is assigned to guest BDF 00:02.0
+* ROM BAR or romfile is present
+
+In "Legacy" mode, QEMU will automatically setup OpRegion, LPC bridge IDs and
+VGA range access, which is equivalent to:
+ x-igd-opregion=on,x-igd-lpc=on,x-vga=on
+
+By default, "Legacy" mode won't fail, it continues on error. User can set
+"x-igd-legacy-mode=on" to force enabling legacy mode, this also checks if the
+conditions above for legacy mode is met, and if any error occurs, QEMU will
+fail immediately. Users can also set "x-igd-legacy-mode=off" to disable legacy
+mode.
+
+In legacy mode, as the guest VGA ranges are assigned to IGD device, all other
+graphics devices should be removed, this can be done using "-nographic" or
+"-vga none" or "-nodefaults", along with adding the device using vfio-pci.
For either mode, depending on the host kernel, the i915 driver in the host
may generate faults and errors upon re-binding to an IGD device after it
@@ -73,31 +99,39 @@ DVI, or DisplayPort) may be unsupported in some use cases. In the author's
experience, even DP to VGA adapters can be troublesome while adapters between
digital formats work well.
-Usage
-=====
-The intention is for IGD assignment to be transparent for users and thus for
-management tools like libvirt. To make use of legacy mode, simply remove all
-other graphics options and use "-nographic" and either "-vga none" or
-"-nodefaults", along with adding the device using vfio-pci:
- -device vfio-pci,host=00:02.0,id=hostdev0,bus=pci.0,addr=0x2
+Options
+=======
+* x-igd-opregion=[*on*|off]
+ Copy host IGD OpRegion and expose it to guest with fw_cfg
+
+* x-igd-lpc=[on|*off*]
+ Creates a dummy LPC bridge at 00:1f:0 with host VID/DID (i440fx only)
+
+* x-igd-legacy-mode=[on|off|*auto*]
+ Enable/Disable legacy mode
+
+* x-igd-gms=[hex, default 0]
+ Overriding DSM region size in GGC register, 0 means uses host value.
+ Use this only when the DSM size cannot be changed through the
+ 'DVMT Pre-Allocated' option in host BIOS.
+
-For UPT mode, retain the default emulated graphics and simply add the vfio-pci
-device making use of any other bus address other than 02.0. libvirt will
-default to assigning the device a UPT compatible address while legacy mode
-users will need to manually edit the XML if using a tool like virt-manager
-where the VM device address is not expressly specified.
+Examples
+========
+* Adding IGD with automatically legacy mode support
+ -device vfio-pci,host=00:02.0,id=hostdev0,addr=2.0
-An experimental vfio-pci option also exists to enable OpRegion, and thus
-external monitor support, for UPT mode. This can be enabled by adding
-"x-igd-opregion=on" to the vfio-pci device options for the IGD device. As
-with legacy mode, this requires the host to support features introduced in
-the v4.6 kernel. If Intel chooses to embrace this support, the option may
-be made non-experimental in the future, opening it to libvirt support.
+* Adding IGD with OpRegion and LPC ID hack, but without VGA ranges
+ (For UEFI guests)
+ -device vfio-pci,host=00:02.0,id=hostdev0,addr=2.0,x-igd-legacy-mode=off,x-igd-lpc=on,romfile=efi_oprom.rom
-Developer ABI
-=============
-Legacy mode IGD support imposes two fw_cfg requirements on the VM firmware:
+
+Guest firmware
+==============
+Guest firmware is responsible for setting up OpRegion and Base of Data Stolen
+Memory (BDSM) in guest address space. IGD passthrough support imposes two
+fw_cfg requirements on the VM firmware:
1) "etc/igd-opregion"
@@ -117,17 +151,117 @@ Legacy mode IGD support imposes two fw_cfg requirements on the VM firmware:
Firmware must allocate a reserved memory below 4GB with required 1MB
alignment equal to this size. Additionally the base address of this
reserved region must be written to the dword BDSM register in PCI config
- space of the IGD device at offset 0x5C. As this support is related to
- running the IGD ROM, which has other dependencies on the device appearing
- at guest address 00:02.0, it's expected that this fw_cfg file is only
- relevant to a single PCI class VGA device with Intel vendor ID, appearing
- at PCI bus address 00:02.0.
+ space of the IGD device at offset 0x5C (or 0xC0 for Gen 11+ devices using
+ 64-bit BDSM). As this support is related to running the IGD ROM, which
+ has other dependencies on the device appearing at guest address 00:02.0,
+ it's expected that this fw_cfg file is only relevant to a single PCI
+ class VGA device with Intel vendor ID, appearing at PCI bus address 00:02.0.
+
+ Starting from Meteor Lake, IGD devices access stolen memory via its MMIO
+ BAR2 (LMEMBAR) and removed the BDSM register in config space. There is
+ no need for guest firmware to allocate data stolen memory in guest address
+ space and write it to BDSM register. Value of this fw_cfg file is 0 in
+ such case.
+
+Upstream Seabios has OpRegion and BDSM (pre-Gen11 device only) support.
+However, the support is not accepted by upstream EDK2/OVMF. A recommended
+solution is to create a virtual OpRom with following DXE drivers:
+
+* IgdAssignmentDxe: Set up OpRegion and BDSM according to fw_cfg (must)
+* IntelGopDriver: Closed-source Intel GOP driver
+* PlatformGopPolicy: Protocol required by IntelGopDriver
+
+IntelGopDriver and PlatformGopPolicy is only required when enabling GOP on IGD.
+
+The original IgdAssignmentDxe can be found at [3]. A Intel maintained version
+with PlatformGopPolicy for industrial computing is at [4]. There is also an
+unofficially maintained version with newer Gen11+ device support at [5].
+You need to build them with EDK2.
+
+For the IntelGopDriver, Intel never released it to public. You may contact
+Intel support to get one as [4] said, if you are an Intel Premier Support
+customer, or you can try extracting it from your host firmware using
+"UEFI BIOS Updater"[6].
+
+Once you got all the required DXE drivers, a Option ROM can be generated with
+EfiRom utility in EDK2, using
+ EfiRom -f 0x8086 -i <Device ID of your IGD> -o output.rom \
+ -e IgdAssignmentDxe.efi PlatformGOPPolicy.efi IntelGopDriver.efi
+
+
+Known issues
+============
+When using OVMF as guest firmware, you may encounter the following warning:
+warning: vfio_container_dma_map(0x55fab36ce610, 0x380010000000, 0x108000, 0x7fd336000000) = -22 (Invalid argument)
+
+Solution:
+Set the host physical address bits to IOMMU address width using
+ -cpu host,host-phys-bits-limit=<IOMMU address width>
+Or in libvirt XML with
+ <cpu>
+ <maxphysaddr mode='passthrough' limit='<IOMMU address width>'/>
+ </cpu>
+The IOMMU address width can be determined with
+ echo $(( ((0x$(cat /sys/devices/virtual/iommu/dmar0/intel-iommu/cap) & 0x3F0000) >> 16) + 1 ))
+Refer https://edk2.groups.io/g/devel/topic/patch_v1/102359124 for more details
+
+
+Memory View
+===========
+IGD has it own address space. To use system RAM as VRAM, a single-level page
+table named Global Graphics Translation Table (GTT) is used for the address
+translation. Each page table entry points a 4KB page. Illustration below shows
+the translation flow on IGD with 64-bit GTT PTEs.
+
+(PTE_SIZE == 8) +-------------+---+
+ | Address | V | V: Valid Bit
+ +-------------+---+
+ | ... | |
+IGD:0x01ae9010 0xd740| 0x70ffc000 | 1 | Mem:0x42ba3e010^
+-----------------------> 0xd748| 0x42ba3e000 | 1 +------------------>
+(addr >> 12) * PTE_SIZE 0xd750| 0x42ba3f000 | 1 |
+ | ... | |
+ +-------------+---+
+^ The address may be remapped by IOMMU
+
+The memory region store GTT is called GTT Stolen Memory (GSM) it is located
+right below the Data Stolen Memory (DSM). Accessing this region directly is
+not allowed, any access will immediately freeze the whole system. The only way
+to access it is through the second half of MMIO BAR0.
+
+The Data Stolen Memory is reserved by firmware, and acts as the VRAM in pre-OS
+environments. In QEMU, guest firmware (Seabios/OVMF) is responsible for
+reserving a continuous region and program its base address to BDSM register,
+then let VBIOS/GOP driver initializing this region. Illustration below shows
+how DSM is mapped.
+
+ IGD Addr Space Host Addr Space Guest Addr Space
+ +-------------+ +-------------+ +-------------+
+ | | | | | |
+ | | | | | |
+ | | +-------------+ +-------------+
+ | | | Data Stolen | | Data Stolen |
+ | | | (Guest) | | (Guest) |
+ | | +------------>+-------------+<------->+-------------+<--Guest BDSM
+ | | | Passthrough | | EPT | | Emulated by QEMU
+DSMSIZE+-------------+ | with IOMMU | | Mapping | | Programmed by guest FW
+ | | | | | | |
+ | | | | | | |
+ 0+-------------+--+ | | | |
+ | +-------------+ | |
+ | | Data Stolen | +-------------+
+ | | (Host) |
+ +------------>+-------------+<--Host BDSM
+ Non- | | "real" one in HW
+ Passthrough | | Programmed by host FW
+ +-------------+
Footnotes
=========
-[1] Nothing precludes adding additional emulated or assigned graphics devices
- as non-primary, other than the combination typically not working. I only
- intend to set user expectations, others are welcome to find working
- combinations or fix whatever issues prevent this from working in the common
- case.
+[1] https://www.intel.com/content/www/us/en/docs/graphics-for-linux/developer-reference/1-0/dump-video-bios.html
[2] # echo "vfio-pci" > /sys/bus/pci/devices/0000:00:02.0/driver_override
+[3] https://web.archive.org/web/20240827012422/https://bugzilla.tianocore.org/show_bug.cgi?id=935
+ Tianocore bugzilla was down since Jan 2025 :(
+[4] https://eci.intel.com/docs/3.3/components/kvm-hypervisor.html, Patch 0001-0004
+[5] https://github.com/tomitamoeko/VfioIgdPkg
+[6] https://winraid.level1techs.com/t/tool-guide-news-uefi-bios-updater-ubu/30357
diff --git a/docs/index.rst b/docs/index.rst
index 0b9ee99..5665de8 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -3,6 +3,8 @@
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
+.. _documentation-root:
+
================================
Welcome to QEMU's documentation!
================================
@@ -18,3 +20,4 @@ Welcome to QEMU's documentation!
interop/index
specs/index
devel/index
+ glossary
diff --git a/docs/interop/bitmaps.rst b/docs/interop/bitmaps.rst
index ddf8947..7536f0b 100644
--- a/docs/interop/bitmaps.rst
+++ b/docs/interop/bitmaps.rst
@@ -97,7 +97,7 @@ time.
- Persistent storage formats may impose their own requirements on bitmap names
and namespaces. Presently, only qcow2 supports persistent bitmaps. See
- docs/interop/qcow2.txt for more details on restrictions. Notably:
+ :doc:`qcow2` for more details on restrictions. Notably:
- qcow2 bitmap names are limited to between 1 and 1023 bytes long.
diff --git a/docs/interop/firmware.json b/docs/interop/firmware.json
index 54a1fc6..745d21d 100644
--- a/docs/interop/firmware.json
+++ b/docs/interop/firmware.json
@@ -14,8 +14,10 @@
# = Firmware
##
-{ 'include' : 'machine.json' }
-{ 'include' : 'block-core.json' }
+{ 'pragma': {
+ 'member-name-exceptions': [
+ 'FirmwareArchitecture' # x86_64
+ ] } }
##
# @FirmwareOSInterface:
@@ -61,6 +63,27 @@
'data' : [ 'flash', 'kernel', 'memory' ] }
##
+# @FirmwareArchitecture:
+#
+# Enumeration of architectures for which Qemu uses additional
+# firmware files.
+#
+# @aarch64: 64-bit Arm.
+#
+# @arm: 32-bit Arm.
+#
+# @i386: 32-bit x86.
+#
+# @loongarch64: 64-bit LoongArch. (since: 7.1)
+#
+# @x86_64: 64-bit x86.
+#
+# Since: 3.0
+##
+{ 'enum' : 'FirmwareArchitecture',
+ 'data' : [ 'aarch64', 'arm', 'i386', 'loongarch64', 'x86_64' ] }
+
+##
# @FirmwareTarget:
#
# Defines the machine types that firmware may execute on.
@@ -81,7 +104,7 @@
# Since: 3.0
##
{ 'struct' : 'FirmwareTarget',
- 'data' : { 'architecture' : 'SysEmuTarget',
+ 'data' : { 'architecture' : 'FirmwareArchitecture',
'machines' : [ 'str' ] } }
##
@@ -191,16 +214,40 @@
# PL011 UART. @verbose-static is mutually exclusive
# with @verbose-dynamic.
#
+# @host-uefi-vars: The firmware expects the host to provide an uefi
+# variable store. qemu supports that via
+# "uefi-vars-sysbus" (aarch64, riscv64, loongarch64)
+# or "uefi-vars-x64" (x86_64) devices. The firmware
+# will not use flash for nvram. When loading the
+# firmware into flash the 'stateless' setup should be
+# used. It is recommened to load the firmware into
+# memory though.
+#
# Since: 3.0
##
{ 'enum' : 'FirmwareFeature',
'data' : [ 'acpi-s3', 'acpi-s4',
'amd-sev', 'amd-sev-es', 'amd-sev-snp',
'intel-tdx',
- 'enrolled-keys', 'requires-smm', 'secure-boot',
+ 'enrolled-keys', 'requires-smm',
+ 'secure-boot', 'host-uefi-vars',
'verbose-dynamic', 'verbose-static' ] }
##
+# @FirmwareFormat:
+#
+# Formats that are supported for firmware images.
+#
+# @raw: Raw disk image format.
+#
+# @qcow2: The QCOW2 image format.
+#
+# Since: 3.0
+##
+{ 'enum': 'FirmwareFormat',
+ 'data': [ 'raw', 'qcow2' ] }
+
+##
# @FirmwareFlashFile:
#
# Defines common properties that are necessary for loading a firmware
@@ -219,7 +266,7 @@
##
{ 'struct' : 'FirmwareFlashFile',
'data' : { 'filename' : 'str',
- 'format' : 'BlockdevDriver' } }
+ 'format' : 'FirmwareFormat' } }
##
@@ -433,7 +480,7 @@
#
# Since: 3.0
#
-# Examples:
+# .. qmp-example::
#
# {
# "description": "SeaBIOS",
diff --git a/docs/interop/index.rst b/docs/interop/index.rst
index ed65395..d830c5c 100644
--- a/docs/interop/index.rst
+++ b/docs/interop/index.rst
@@ -14,12 +14,18 @@ are useful for making QEMU interoperate with other software.
dbus-vmstate
dbus-display
live-block-operations
+ nbd
+ parallels
+ prl-xml
+ qcow2
+ qed_spec
pr-helper
qmp-spec
qemu-ga
qemu-ga-ref
qemu-qmp-ref
qemu-storage-daemon-qmp-ref
+ vfio-user
vhost-user
vhost-user-gpu
vhost-vdpa
diff --git a/docs/interop/live-block-operations.rst b/docs/interop/live-block-operations.rst
index 691429c..6b549ed 100644
--- a/docs/interop/live-block-operations.rst
+++ b/docs/interop/live-block-operations.rst
@@ -931,8 +931,8 @@ Shutdown the guest, by issuing the ``quit`` QMP command::
}
-Live disk backup --- ``blockdev-backup`` and the deprecated``drive-backup``
----------------------------------------------------------------------------
+Live disk backup --- ``blockdev-backup`` and the deprecated ``drive-backup``
+----------------------------------------------------------------------------
The ``blockdev-backup`` (and the deprecated ``drive-backup``) allows
you to create a point-in-time snapshot.
diff --git a/docs/interop/nbd.rst b/docs/interop/nbd.rst
new file mode 100644
index 0000000..de079d3
--- /dev/null
+++ b/docs/interop/nbd.rst
@@ -0,0 +1,89 @@
+QEMU NBD protocol support
+=========================
+
+QEMU supports the NBD protocol, and has an internal NBD client (see
+``block/nbd.c``), an internal NBD server (see ``blockdev-nbd.c``), and an
+external NBD server tool (see ``qemu-nbd.c``). The common code is placed
+in ``nbd/*``.
+
+The NBD protocol is specified here:
+https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
+
+The following paragraphs describe some specific properties of NBD
+protocol realization in QEMU.
+
+Metadata namespaces
+-------------------
+
+QEMU supports the ``base:allocation`` metadata context as defined in the
+NBD protocol specification, and also defines an additional metadata
+namespace ``qemu``.
+
+``qemu`` namespace
+------------------
+
+The ``qemu`` namespace currently contains two available metadata context
+types. The first is related to exposing the contents of a dirty
+bitmap alongside the associated disk contents. That metadata context
+is named with the following form::
+
+ qemu:dirty-bitmap:<dirty-bitmap-export-name>
+
+Each dirty-bitmap metadata context defines only one flag for extents
+in reply for ``NBD_CMD_BLOCK_STATUS``:
+
+bit 0:
+ ``NBD_STATE_DIRTY``, set when the extent is "dirty"
+
+The second is related to exposing the source of various extents within
+the image, with a single metadata context named::
+
+ qemu:allocation-depth
+
+In the allocation depth context, the entire 32-bit value represents a
+depth of which layer in a thin-provisioned backing chain provided the
+data (0 for unallocated, 1 for the active layer, 2 for the first
+backing layer, and so forth).
+
+For ``NBD_OPT_LIST_META_CONTEXT`` the following queries are supported
+in addition to the specific ``qemu:allocation-depth`` and
+``qemu:dirty-bitmap:<dirty-bitmap-export-name>``:
+
+``qemu:``
+ returns list of all available metadata contexts in the namespace
+``qemu:dirty-bitmap:``
+ returns list of all available dirty-bitmap metadata contexts
+
+Features by version
+-------------------
+
+The following list documents which qemu version first implemented
+various features (both as a server exposing the feature, and as a
+client taking advantage of the feature when present), to make it
+easier to plan for cross-version interoperability. Note that in
+several cases, the initial release containing a feature may require
+additional patches from the corresponding stable branch to fix bugs in
+the operation of that feature.
+
+2.6
+ ``NBD_OPT_STARTTLS`` with TLS X.509 Certificates
+2.8
+ ``NBD_CMD_WRITE_ZEROES``
+2.10
+ ``NBD_OPT_GO``, ``NBD_INFO_BLOCK``
+2.11
+ ``NBD_OPT_STRUCTURED_REPLY``
+2.12
+ ``NBD_CMD_BLOCK_STATUS`` for ``base:allocation``
+3.0
+ ``NBD_OPT_STARTTLS`` with TLS Pre-Shared Keys (PSK),
+ ``NBD_CMD_BLOCK_STATUS`` for ``qemu:dirty-bitmap:``, ``NBD_CMD_CACHE``
+4.2
+ ``NBD_FLAG_CAN_MULTI_CONN`` for shareable read-only exports,
+ ``NBD_CMD_FLAG_FAST_ZERO``
+5.2
+ ``NBD_CMD_BLOCK_STATUS`` for ``qemu:allocation-depth``
+7.1
+ ``NBD_FLAG_CAN_MULTI_CONN`` for shareable writable exports
+8.2
+ ``NBD_OPT_EXTENDED_HEADERS``, ``NBD_FLAG_BLOCK_STATUS_PAYLOAD``
diff --git a/docs/interop/nbd.txt b/docs/interop/nbd.txt
deleted file mode 100644
index 18efb25..0000000
--- a/docs/interop/nbd.txt
+++ /dev/null
@@ -1,72 +0,0 @@
-QEMU supports the NBD protocol, and has an internal NBD client (see
-block/nbd.c), an internal NBD server (see blockdev-nbd.c), and an
-external NBD server tool (see qemu-nbd.c). The common code is placed
-in nbd/*.
-
-The NBD protocol is specified here:
-https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
-
-The following paragraphs describe some specific properties of NBD
-protocol realization in QEMU.
-
-= Metadata namespaces =
-
-QEMU supports the "base:allocation" metadata context as defined in the
-NBD protocol specification, and also defines an additional metadata
-namespace "qemu".
-
-== "qemu" namespace ==
-
-The "qemu" namespace currently contains two available metadata context
-types. The first is related to exposing the contents of a dirty
-bitmap alongside the associated disk contents. That metadata context
-is named with the following form:
-
- qemu:dirty-bitmap:<dirty-bitmap-export-name>
-
-Each dirty-bitmap metadata context defines only one flag for extents
-in reply for NBD_CMD_BLOCK_STATUS:
-
- bit 0: NBD_STATE_DIRTY, set when the extent is "dirty"
-
-The second is related to exposing the source of various extents within
-the image, with a single metadata context named:
-
- qemu:allocation-depth
-
-In the allocation depth context, the entire 32-bit value represents a
-depth of which layer in a thin-provisioned backing chain provided the
-data (0 for unallocated, 1 for the active layer, 2 for the first
-backing layer, and so forth).
-
-For NBD_OPT_LIST_META_CONTEXT the following queries are supported
-in addition to the specific "qemu:allocation-depth" and
-"qemu:dirty-bitmap:<dirty-bitmap-export-name>":
-
-* "qemu:" - returns list of all available metadata contexts in the
- namespace.
-* "qemu:dirty-bitmap:" - returns list of all available dirty-bitmap
- metadata contexts.
-
-= Features by version =
-
-The following list documents which qemu version first implemented
-various features (both as a server exposing the feature, and as a
-client taking advantage of the feature when present), to make it
-easier to plan for cross-version interoperability. Note that in
-several cases, the initial release containing a feature may require
-additional patches from the corresponding stable branch to fix bugs in
-the operation of that feature.
-
-* 2.6: NBD_OPT_STARTTLS with TLS X.509 Certificates
-* 2.8: NBD_CMD_WRITE_ZEROES
-* 2.10: NBD_OPT_GO, NBD_INFO_BLOCK
-* 2.11: NBD_OPT_STRUCTURED_REPLY
-* 2.12: NBD_CMD_BLOCK_STATUS for "base:allocation"
-* 3.0: NBD_OPT_STARTTLS with TLS Pre-Shared Keys (PSK),
-NBD_CMD_BLOCK_STATUS for "qemu:dirty-bitmap:", NBD_CMD_CACHE
-* 4.2: NBD_FLAG_CAN_MULTI_CONN for shareable read-only exports,
-NBD_CMD_FLAG_FAST_ZERO
-* 5.2: NBD_CMD_BLOCK_STATUS for "qemu:allocation-depth"
-* 7.1: NBD_FLAG_CAN_MULTI_CONN for shareable writable exports
-* 8.2: NBD_OPT_EXTENDED_HEADERS, NBD_FLAG_BLOCK_STATUS_PAYLOAD
diff --git a/docs/interop/parallels.rst b/docs/interop/parallels.rst
new file mode 100644
index 0000000..7b328a4
--- /dev/null
+++ b/docs/interop/parallels.rst
@@ -0,0 +1,240 @@
+Parallels Expandable Image File Format
+======================================
+
+..
+ Copyright (c) 2015 Denis Lunev
+ Copyright (c) 2015 Vladimir Sementsov-Ogievskiy
+
+ This work is licensed under the terms of the GNU GPL, version 2 or later.
+ See the COPYING file in the top-level directory.
+
+
+A Parallels expandable image file consists of three consecutive parts:
+
+* header
+* BAT
+* data area
+
+All numbers in a Parallels expandable image are stored in little-endian byte
+order.
+
+
+Definitions
+-----------
+
+Sector
+ A 512-byte data chunk.
+
+Cluster
+ A data chunk of the size specified in the image header.
+ Currently, the default size is 1MiB (2048 sectors). In previous
+ versions, cluster sizes of 63 sectors, 256 and 252 kilobytes were used.
+
+BAT
+ Block Allocation Table, an entity that contains information for
+ guest-to-host I/O data address translation.
+
+Header
+------
+
+The header is placed at the start of an image and contains the following
+fields::
+
+ Bytes:
+ 0 - 15: magic
+ Must contain "WithoutFreeSpace" or "WithouFreSpacExt".
+
+ 16 - 19: version
+ Must be 2.
+
+ 20 - 23: heads
+ Disk geometry parameter for guest.
+
+ 24 - 27: cylinders
+ Disk geometry parameter for guest.
+
+ 28 - 31: tracks
+ Cluster size, in sectors.
+
+ 32 - 35: nb_bat_entries
+ Disk size, in clusters (BAT size).
+
+ 36 - 43: nb_sectors
+ Disk size, in sectors.
+
+ For "WithoutFreeSpace" images:
+ Only the lowest 4 bytes are used. The highest 4 bytes must be
+ cleared in this case.
+
+ For "WithouFreSpacExt" images, there are no such
+ restrictions.
+
+ 44 - 47: in_use
+ Set to 0x746F6E59 when the image is opened by software in R/W
+ mode; set to 0x312e3276 when the image is closed.
+
+ A zero in this field means that the image was opened by an old
+ version of the software that doesn't support Format Extension
+ (see below).
+
+ Other values are not allowed.
+
+ 48 - 51: data_off
+ An offset, in sectors, from the start of the file to the start of
+ the data area.
+
+ For "WithoutFreeSpace" images:
+ - If data_off is zero, the offset is calculated as the end of BAT
+ table plus some padding to ensure sector size alignment.
+ - If data_off is non-zero, the offset should be aligned to sector
+ size. However it is recommended to align it to cluster size for
+ newly created images.
+
+ For "WithouFreSpacExt" images:
+ data_off must be non-zero and aligned to cluster size.
+
+ 52 - 55: flags
+ Miscellaneous flags.
+
+ Bit 0: Empty Image bit. If set, the image should be
+ considered clear.
+
+ Bits 1-31: Unused.
+
+ 56 - 63: ext_off
+ Format Extension offset, an offset, in sectors, from the start of
+ the file to the start of the Format Extension Cluster.
+
+ ext_off must meet the same requirements as cluster offsets
+ defined by BAT entries (see below).
+
+BAT
+---
+
+BAT is placed immediately after the image header. In the file, BAT is a
+contiguous array of 32-bit unsigned little-endian integers with
+``(bat_entries * 4)`` bytes size.
+
+Each BAT entry contains an offset from the start of the file to the
+corresponding cluster. The offset set in clusters for ``WithouFreSpacExt``
+images and in sectors for ``WithoutFreeSpace`` images.
+
+If a BAT entry is zero, the corresponding cluster is not allocated and should
+be considered as filled with zeroes.
+
+Cluster offsets specified by BAT entries must meet the following requirements:
+
+- the value must not be lower than data offset (provided by ``header.data_off``
+ or calculated as specified above)
+- the value must be lower than the desired file size
+- the value must be unique among all BAT entries
+- the result of ``(cluster offset - data offset)`` must be aligned to
+ cluster size
+
+Data Area
+---------
+
+The data area is an area from the data offset (provided by ``header.data_off``
+or calculated as specified above) to the end of the file. It represents a
+contiguous array of clusters. Most of them are allocated by the BAT, some may
+be allocated by the ``ext_off`` field in the header while other may be
+allocated by extensions. All clusters allocated by ``ext_off`` and extensions
+should meet the same requirements as clusters specified by BAT entries.
+
+
+Format Extension
+----------------
+
+The Format Extension is an area 1 cluster in size that provides additional
+format features. This cluster is addressed by the ext_off field in the header.
+The format of the Format Extension area is the following::
+
+ 0 - 7: magic
+ Must be 0xAB234CEF23DCEA87
+
+ 8 - 23: m_CheckSum
+ The MD5 checksum of the entire Header Extension cluster except
+ the first 24 bytes.
+
+The above are followed by feature sections or "extensions". The last
+extension must be "End of features" (see below).
+
+Each feature section has the following format::
+
+ 0 - 7: magic
+ The identifier of the feature:
+ 0x0000000000000000 - End of features
+ 0x20385FAE252CB34A - Dirty bitmap
+
+ 8 - 15: flags
+ External flags for extension:
+
+ Bit 0: NECESSARY
+ If the software cannot load the extension (due to an
+ unknown magic number or error), the file should not be
+ changed. If this flag is unset and there is an error on
+ loading the extension, said extension should be dropped.
+
+ Bit 1: TRANSIT
+ If there is an unknown extension with this flag set,
+ said extension should be left as is.
+
+ If neither NECESSARY nor TRANSIT are set, the extension should be
+ dropped.
+
+ 16 - 19: data_size
+ The size of the following feature data, in bytes.
+
+ 20 - 23: unused32
+ Align header to 8 bytes boundary.
+
+ variable: data (data_size bytes)
+
+The above is followed by padding to the next 8 bytes boundary, then the
+next extension starts.
+
+The last extension must be "End of features" with all the fields set to 0.
+
+
+Dirty bitmaps feature
+---------------------
+
+This feature provides a way of storing dirty bitmaps in the image. The fields
+of its data area are::
+
+ 0 - 7: size
+ The bitmap size, should be equal to disk size in sectors.
+
+ 8 - 23: id
+ An identifier for backup consistency checking.
+
+ 24 - 27: granularity
+ Bitmap granularity, in sectors. I.e., the number of sectors
+ corresponding to one bit of the bitmap. Granularity must be
+ a power of 2.
+
+ 28 - 31: l1_size
+ The number of entries in the L1 table of the bitmap.
+
+ variable: L1 offset table (l1_table), size: 8 * l1_size bytes
+
+The dirty bitmap described by this feature extension is stored in a set of
+clusters inside the Parallels image file. The offsets of these clusters are
+saved in the L1 offset table specified by the feature extension. Each L1 table
+entry is a 64 bit integer as described below:
+
+Given an offset in bytes into the bitmap data, corresponding L1 entry is::
+
+ l1_table[offset / cluster_size]
+
+If an L1 table entry is 0, all bits in the corresponding cluster of the bitmap
+are assumed to be 0.
+
+If an L1 table entry is 1, all bits in the corresponding cluster of the bitmap
+are assumed to be 1.
+
+If an L1 table entry is not 0 or 1, it contains the corresponding cluster
+offset (in 512b sectors). Given an offset in bytes into the bitmap data the
+offset in bytes into the image file can be obtained as follows::
+
+ offset = l1_table[offset / cluster_size] * 512 + (offset % cluster_size)
diff --git a/docs/interop/parallels.txt b/docs/interop/parallels.txt
deleted file mode 100644
index bb3fadf..0000000
--- a/docs/interop/parallels.txt
+++ /dev/null
@@ -1,232 +0,0 @@
-= License =
-
-Copyright (c) 2015 Denis Lunev
-Copyright (c) 2015 Vladimir Sementsov-Ogievskiy
-
-This work is licensed under the terms of the GNU GPL, version 2 or later.
-See the COPYING file in the top-level directory.
-
-= Parallels Expandable Image File Format =
-
-A Parallels expandable image file consists of three consecutive parts:
- * header
- * BAT
- * data area
-
-All numbers in a Parallels expandable image are stored in little-endian byte
-order.
-
-
-== Definitions ==
-
- Sector A 512-byte data chunk.
-
- Cluster A data chunk of the size specified in the image header.
- Currently, the default size is 1MiB (2048 sectors). In previous
- versions, cluster sizes of 63 sectors, 256 and 252 kilobytes were
- used.
-
- BAT Block Allocation Table, an entity that contains information for
- guest-to-host I/O data address translation.
-
-
-== Header ==
-
-The header is placed at the start of an image and contains the following
-fields:
-
-Bytes:
- 0 - 15: magic
- Must contain "WithoutFreeSpace" or "WithouFreSpacExt".
-
- 16 - 19: version
- Must be 2.
-
- 20 - 23: heads
- Disk geometry parameter for guest.
-
- 24 - 27: cylinders
- Disk geometry parameter for guest.
-
- 28 - 31: tracks
- Cluster size, in sectors.
-
- 32 - 35: nb_bat_entries
- Disk size, in clusters (BAT size).
-
- 36 - 43: nb_sectors
- Disk size, in sectors.
-
- For "WithoutFreeSpace" images:
- Only the lowest 4 bytes are used. The highest 4 bytes must be
- cleared in this case.
-
- For "WithouFreSpacExt" images, there are no such
- restrictions.
-
- 44 - 47: in_use
- Set to 0x746F6E59 when the image is opened by software in R/W
- mode; set to 0x312e3276 when the image is closed.
-
- A zero in this field means that the image was opened by an old
- version of the software that doesn't support Format Extension
- (see below).
-
- Other values are not allowed.
-
- 48 - 51: data_off
- An offset, in sectors, from the start of the file to the start of
- the data area.
-
- For "WithoutFreeSpace" images:
- - If data_off is zero, the offset is calculated as the end of BAT
- table plus some padding to ensure sector size alignment.
- - If data_off is non-zero, the offset should be aligned to sector
- size. However it is recommended to align it to cluster size for
- newly created images.
-
- For "WithouFreSpacExt" images:
- data_off must be non-zero and aligned to cluster size.
-
- 52 - 55: flags
- Miscellaneous flags.
-
- Bit 0: Empty Image bit. If set, the image should be
- considered clear.
-
- Bits 1-31: Unused.
-
- 56 - 63: ext_off
- Format Extension offset, an offset, in sectors, from the start of
- the file to the start of the Format Extension Cluster.
-
- ext_off must meet the same requirements as cluster offsets
- defined by BAT entries (see below).
-
-
-== BAT ==
-
-BAT is placed immediately after the image header. In the file, BAT is a
-contiguous array of 32-bit unsigned little-endian integers with
-(bat_entries * 4) bytes size.
-
-Each BAT entry contains an offset from the start of the file to the
-corresponding cluster. The offset set in clusters for "WithouFreSpacExt" images
-and in sectors for "WithoutFreeSpace" images.
-
-If a BAT entry is zero, the corresponding cluster is not allocated and should
-be considered as filled with zeroes.
-
-Cluster offsets specified by BAT entries must meet the following requirements:
- - the value must not be lower than data offset (provided by header.data_off
- or calculated as specified above),
- - the value must be lower than the desired file size,
- - the value must be unique among all BAT entries,
- - the result of (cluster offset - data offset) must be aligned to cluster
- size.
-
-
-== Data Area ==
-
-The data area is an area from the data offset (provided by header.data_off or
-calculated as specified above) to the end of the file. It represents a
-contiguous array of clusters. Most of them are allocated by the BAT, some may
-be allocated by the ext_off field in the header while other may be allocated by
-extensions. All clusters allocated by ext_off and extensions should meet the
-same requirements as clusters specified by BAT entries.
-
-
-== Format Extension ==
-
-The Format Extension is an area 1 cluster in size that provides additional
-format features. This cluster is addressed by the ext_off field in the header.
-The format of the Format Extension area is the following:
-
- 0 - 7: magic
- Must be 0xAB234CEF23DCEA87
-
- 8 - 23: m_CheckSum
- The MD5 checksum of the entire Header Extension cluster except
- the first 24 bytes.
-
- The above are followed by feature sections or "extensions". The last
- extension must be "End of features" (see below).
-
-Each feature section has the following format:
-
- 0 - 7: magic
- The identifier of the feature:
- 0x0000000000000000 - End of features
- 0x20385FAE252CB34A - Dirty bitmap
-
- 8 - 15: flags
- External flags for extension:
-
- Bit 0: NECESSARY
- If the software cannot load the extension (due to an
- unknown magic number or error), the file should not be
- changed. If this flag is unset and there is an error on
- loading the extension, said extension should be dropped.
-
- Bit 1: TRANSIT
- If there is an unknown extension with this flag set,
- said extension should be left as is.
-
- If neither NECESSARY nor TRANSIT are set, the extension should be
- dropped.
-
- 16 - 19: data_size
- The size of the following feature data, in bytes.
-
- 20 - 23: unused32
- Align header to 8 bytes boundary.
-
- variable: data (data_size bytes)
-
- The above is followed by padding to the next 8 bytes boundary, then the
- next extension starts.
-
- The last extension must be "End of features" with all the fields set to 0.
-
-
-=== Dirty bitmaps feature ===
-
-This feature provides a way of storing dirty bitmaps in the image. The fields
-of its data area are:
-
- 0 - 7: size
- The bitmap size, should be equal to disk size in sectors.
-
- 8 - 23: id
- An identifier for backup consistency checking.
-
- 24 - 27: granularity
- Bitmap granularity, in sectors. I.e., the number of sectors
- corresponding to one bit of the bitmap. Granularity must be
- a power of 2.
-
- 28 - 31: l1_size
- The number of entries in the L1 table of the bitmap.
-
- variable: L1 offset table (l1_table), size: 8 * l1_size bytes
-
-The dirty bitmap described by this feature extension is stored in a set of
-clusters inside the Parallels image file. The offsets of these clusters are
-saved in the L1 offset table specified by the feature extension. Each L1 table
-entry is a 64 bit integer as described below:
-
-Given an offset in bytes into the bitmap data, corresponding L1 entry is
-
- l1_table[offset / cluster_size]
-
-If an L1 table entry is 0, all bits in the corresponding cluster of the bitmap
-are assumed to be 0.
-
-If an L1 table entry is 1, all bits in the corresponding cluster of the bitmap
-are assumed to be 1.
-
-If an L1 table entry is not 0 or 1, it contains the corresponding cluster
-offset (in 512b sectors). Given an offset in bytes into the bitmap data the
-offset in bytes into the image file can be obtained as follows:
-
- offset = l1_table[offset / cluster_size] * 512 + (offset % cluster_size)
diff --git a/docs/interop/prl-xml.rst b/docs/interop/prl-xml.rst
new file mode 100644
index 0000000..5bb63bb
--- /dev/null
+++ b/docs/interop/prl-xml.rst
@@ -0,0 +1,192 @@
+Parallels Disk Format
+=====================
+
+..
+ Copyright (c) 2015-2017, Virtuozzo, Inc.
+ Authors:
+ 2015 Denis Lunev <den@openvz.org>
+ 2015 Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
+ 2016-2017 Klim Kireev <klim.kireev@virtuozzo.com>
+ 2016-2017 Edgar Kaziakhmedov <edgar.kaziakhmedov@virtuozzo.com>
+
+ This work is licensed under the terms of the GNU GPL, version 2 or later.
+ See the COPYING file in the top-level directory.
+
+This specification contains minimal information about Parallels Disk Format,
+which is enough to properly work with QEMU. Nevertheless, Parallels Cloud Server
+and Parallels Desktop are able to add some unspecified nodes to the xml and use
+them, but they are for internal work and don't affect functionality. Also it
+uses auxiliary xml ``Snapshot.xml``, which allows storage of optional snapshot
+information, but this doesn't influence open/read/write functionality. QEMU and
+other software should not use fields not covered in this document or the
+``Snapshot.xml`` file, and must leave them as is.
+
+A Parallels disk consists of two parts: the set of snapshots and the disk
+descriptor file, which stores information about all files and snapshots.
+
+Definitions
+-----------
+
+Snapshot
+ a record of the contents captured at a particular time, capable
+ of storing current state. A snapshot has a UUID and a parent UUID.
+
+Snapshot image
+ an overlay representing the difference between this
+ snapshot and some earlier snapshot.
+
+Overlay
+ an image storing the different sectors between two captured states.
+
+Root image
+ a snapshot image with no parent, the root of the snapshot tree.
+
+Storage
+ the backing storage for a subset of the virtual disk. When
+ there is more than one storage in a Parallels disk then that
+ is referred to as a split image. In this case every storage
+ covers a specific address space area of the disk and has its
+ particular root image. Split images are not considered here
+ and are not supported. Each storage consists of disk
+ parameters and a list of images. The list of images always
+ contains a root image and may also contain overlays. The
+ root image can be an expandable Parallels image file or
+ plain. Overlays must be expandable.
+
+Description file
+ ``DiskDescriptor.xml`` stores information about disk parameters,
+ snapshots, and storages.
+
+Top Snapshot
+ The overlay between actual state and some previous snapshot.
+ It is not a snapshot in the classical sense because it
+ serves as the active image that the guest writes to.
+
+Sector
+ a 512-byte data chunk.
+
+Description file
+----------------
+
+All information is placed in a single XML element
+``Parallels_disk_image``.
+The element has only one attribute, ``Version``, which must be ``1.0``.
+
+The schema of ``DiskDescriptor.xml``::
+
+ <Parallels_disk_image Version="1.0">
+ <Disk_Parameters>
+ ...
+ </Disk_Parameters>
+ <StorageData>
+ ...
+ </StorageData>
+ <Snapshots>
+ ...
+ </Snapshots>
+ </Parallels_disk_image>
+
+``Disk_Parameters`` element
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``Disk_Parameters`` element describes the physical layout of the
+virtual disk and some general settings.
+
+The ``Disk_Parameters`` element MUST contain the following child elements:
+
+* ``Disk_size`` - number of sectors in the disk,
+ desired size of the disk.
+* ``Cylinders`` - number of the disk cylinders.
+* ``Heads`` - number of the disk heads.
+* ``Sectors`` - number of the disk sectors per cylinder
+ (sector size is 512 bytes)
+ Limitation: The product of the ``Heads``, ``Sectors`` and ``Cylinders``
+ values MUST be equal to the value of the Disk_size parameter.
+* ``Padding`` - must be 0. Parallels Cloud Server and Parallels Desktop may
+ use padding set to 1; however this case is not covered
+ by this specification. QEMU and other software should not open
+ such disks and should not create them.
+
+``StorageData`` element
+^^^^^^^^^^^^^^^^^^^^^^^
+
+This element of the file describes the root image and all snapshot images.
+
+The ``StorageData`` element consists of the ``Storage`` child element,
+as shown below::
+
+ <StorageData>
+ <Storage>
+ ...
+ </Storage>
+ </StorageData>
+
+A ``Storage`` element has the following child elements:
+
+* ``Start`` - start sector of the storage, in case of non split storage
+ equals to 0.
+* ``End`` - number of sector following the last sector, in case of non
+ split storage equals to ``Disk_size``.
+* ``Blocksize`` - storage cluster size, number of sectors per one cluster.
+ The cluster size for each "Compressed" (see below) image in
+ a parallels disk must be equal to this field. Note: the cluster
+ size for a Parallels Expandable Image is in the ``tracks`` field of
+ its header (see :doc:`parallels`).
+* Several ``Image`` child elements.
+
+Each ``Image`` element has the following child elements:
+
+* ``GUID`` - image identifier, UUID in curly brackets.
+ For instance, ``{12345678-9abc-def1-2345-6789abcdef12}.``
+ The GUID is used by the Snapshots element to reference images
+ (see below)
+* ``Type`` - image type of the element. It can be:
+
+ * ``Plain`` for raw files.
+ * ``Compressed`` for expanding disks.
+
+* ``File`` - path to image file. The path can be relative to
+ ``DiskDescriptor.xml`` or absolute.
+
+``Snapshots`` element
+^^^^^^^^^^^^^^^^^^^^^
+
+The ``Snapshots`` element describes the snapshot relations with the snapshot tree.
+
+The element contains the set of ``Shot`` child elements, as shown below::
+
+ <Snapshots>
+ <TopGUID> ... </TopGUID> /* Optional child element */
+ <Shot>
+ ...
+ </Shot>
+ <Shot>
+ ...
+ </Shot>
+ ...
+ </Snapshots>
+
+Each ``Shot`` element contains the following child elements:
+
+* ``GUID`` - an image GUID.
+* ``ParentGUID`` - GUID of the image of the parent snapshot.
+
+The software may traverse snapshots from child to parent using the
+``<ParentGUID>`` field as reference. The ``ParentGUID`` of the root
+snapshot is ``{00000000-0000-0000-0000-000000000000}``.
+There should be only one root snapshot.
+
+The Top snapshot could be
+described via two ways: via the ``TopGUID`` child
+element of the ``Snapshots`` element, or via the predefined GUID
+``{5fbaabe3-6958-40ff-92a7-860e329aab41}``. If ``TopGUID`` is defined,
+the predefined GUID is interpreted as a normal GUID. All snapshot images
+(except the Top Snapshot) should be
+opened read-only.
+
+There is another predefined GUID,
+``BackupID = {704718e1-2314-44c8-9087-d78ed36b0f4e}``, which is used by
+original and some third-party software for backup. QEMU and other
+software may operate with images with ``GUID = BackupID`` as usual.
+However, it is not recommended to use this
+GUID for new disks. The Top snapshot cannot have this GUID.
diff --git a/docs/interop/prl-xml.txt b/docs/interop/prl-xml.txt
deleted file mode 100644
index cf9b3fb..0000000
--- a/docs/interop/prl-xml.txt
+++ /dev/null
@@ -1,158 +0,0 @@
-= License =
-
-Copyright (c) 2015-2017, Virtuozzo, Inc.
-Authors:
- 2015 Denis Lunev <den@openvz.org>
- 2015 Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
- 2016-2017 Klim Kireev <klim.kireev@virtuozzo.com>
- 2016-2017 Edgar Kaziakhmedov <edgar.kaziakhmedov@virtuozzo.com>
-
-This work is licensed under the terms of the GNU GPL, version 2 or later.
-See the COPYING file in the top-level directory.
-
-This specification contains minimal information about Parallels Disk Format,
-which is enough to proper work with QEMU. Nevertheless, Parallels Cloud Server
-and Parallels Desktop are able to add some unspecified nodes to xml and use
-them, but they are for internal work and don't affect functionality. Also it
-uses auxiliary xml "Snapshot.xml", which allows to store optional snapshot
-information, but it doesn't influence open/read/write functionality. QEMU and
-other software should not use fields not covered in this document and
-Snapshot.xml file and must leave them as is.
-
-= Parallels Disk Format =
-
-Parallels disk consists of two parts: the set of snapshots and the disk
-descriptor file, which stores information about all files and snapshots.
-
-== Definitions ==
- Snapshot a record of the contents captured at a particular time,
- capable of storing current state. A snapshot has UUID and
- parent UUID.
-
- Snapshot image an overlay representing the difference between this
- snapshot and some earlier snapshot.
-
- Overlay an image storing the different sectors between two captured
- states.
-
- Root image snapshot image with no parent, the root of snapshot tree.
-
- Storage the backing storage for a subset of the virtual disk. When
- there is more than one storage in a Parallels disk then that
- is referred to as a split image. In this case every storage
- covers specific address space area of the disk and has its
- particular root image. Split images are not considered here
- and are not supported. Each storage consists of disk
- parameters and a list of images. The list of images always
- contains a root image and may also contain overlays. The
- root image can be an expandable Parallels image file or
- plain. Overlays must be expandable.
-
- Description DiskDescriptor.xml stores information about disk parameters,
- file snapshots, storages.
-
- Top The overlay between actual state and some previous snapshot.
- Snapshot It is not a snapshot in the classical sense because it
- serves as the active image that the guest writes to.
-
- Sector a 512-byte data chunk.
-
-== Description file ==
-All information is placed in a single XML element Parallels_disk_image.
-The element has only one attribute "Version", that must be 1.0.
-Schema of DiskDescriptor.xml:
-
-<Parallels_disk_image Version="1.0">
- <Disk_Parameters>
- ...
- </Disk_Parameters>
- <StorageData>
- ...
- </StorageData>
- <Snapshots>
- ...
- </Snapshots>
-</Parallels_disk_image>
-
-== Disk_Parameters element ==
-The Disk_Parameters element describes the physical layout of the virtual disk
-and some general settings.
-
-The Disk_Parameters element MUST contain the following child elements:
- * Disk_size - number of sectors in the disk,
- desired size of the disk.
- * Cylinders - number of the disk cylinders.
- * Heads - number of the disk heads.
- * Sectors - number of the disk sectors per cylinder
- (sector size is 512 bytes)
- Limitation: Product of the Heads, Sectors and Cylinders
- values MUST be equal to the value of the Disk_size parameter.
- * Padding - must be 0. Parallels Cloud Server and Parallels Desktop may
- use padding set to 1, however this case is not covered
- by this spec, QEMU and other software should not open
- such disks and should not create them.
-
-== StorageData element ==
-This element of the file describes the root image and all snapshot images.
-
-The StorageData element consists of the Storage child element, as shown below:
-<StorageData>
- <Storage>
- ...
- </Storage>
-</StorageData>
-
-A Storage element has following child elements:
- * Start - start sector of the storage, in case of non split storage
- equals to 0.
- * End - number of sector following the last sector, in case of non
- split storage equals to Disk_size.
- * Blocksize - storage cluster size, number of sectors per one cluster.
- Cluster size for each "Compressed" (see below) image in
- parallels disk must be equal to this field. Note: cluster
- size for Parallels Expandable Image is in 'tracks' field of
- its header (see docs/interop/parallels.txt).
- * Several Image child elements.
-
-Each Image element has following child elements:
- * GUID - image identifier, UUID in curly brackets.
- For instance, {12345678-9abc-def1-2345-6789abcdef12}.
- The GUID is used by the Snapshots element to reference images
- (see below)
- * Type - image type of the element. It can be:
- "Plain" for raw files.
- "Compressed" for expanding disks.
- * File - path to image file. Path can be relative to DiskDescriptor.xml or
- absolute.
-
-== Snapshots element ==
-The Snapshots element describes the snapshot relations with the snapshot tree.
-
-The element contains the set of Shot child elements, as shown below:
-<Snapshots>
- <TopGUID> ... </TopGUID> /* Optional child element */
- <Shot>
- ...
- </Shot>
- <Shot>
- ...
- </Shot>
- ...
-</Snapshots>
-
-Each Shot element contains the following child elements:
- * GUID - an image GUID.
- * ParentGUID - GUID of the image of the parent snapshot.
-
-The software may traverse snapshots from child to parent using <ParentGUID>
-field as reference. ParentGUID of root snapshot is
-{00000000-0000-0000-0000-000000000000}. There should be only one root
-snapshot. Top snapshot could be described via two ways: via TopGUID child
-element of the Snapshots element or via predefined GUID
-{5fbaabe3-6958-40ff-92a7-860e329aab41}. If TopGUID is defined, predefined GUID is
-interpreted as usual GUID. All snapshot images (except Top Snapshot) should be
-opened read-only. There is another predefined GUID,
-BackupID = {704718e1-2314-44c8-9087-d78ed36b0f4e}, which is used by original and
-some third-party software for backup, QEMU and other software may operate with
-images with GUID = BackupID as usual, however, it is not recommended to use this
-GUID for new disks. Top snapshot cannot have this GUID.
diff --git a/docs/interop/qcow2.rst b/docs/interop/qcow2.rst
new file mode 100644
index 0000000..5948591
--- /dev/null
+++ b/docs/interop/qcow2.rst
@@ -0,0 +1,937 @@
+=======================
+Qcow2 Image File Format
+=======================
+
+A ``qcow2`` image file is organized in units of constant size, which are called
+(host) clusters. A cluster is the unit in which all allocations are done,
+both for actual guest data and for image metadata.
+
+Likewise, the virtual disk as seen by the guest is divided into (guest)
+clusters of the same size.
+
+All numbers in qcow2 are stored in Big Endian byte order.
+
+Header
+------
+
+The first cluster of a qcow2 image contains the file header::
+
+ Byte 0 - 3: magic
+ QCOW magic string ("QFI\xfb")
+
+ 4 - 7: version
+ Version number (valid values are 2 and 3)
+
+ 8 - 15: backing_file_offset
+ Offset into the image file at which the backing file name
+ is stored (NB: The string is not null terminated). 0 if the
+ image doesn't have a backing file.
+
+ Note: backing files are incompatible with raw external data
+ files (auto-clear feature bit 1).
+
+ 16 - 19: backing_file_size
+ Length of the backing file name in bytes. Must not be
+ longer than 1023 bytes. Undefined if the image doesn't have
+ a backing file.
+
+ 20 - 23: cluster_bits
+ Number of bits that are used for addressing an offset
+ within a cluster (1 << cluster_bits is the cluster size).
+ Must not be less than 9 (i.e. 512 byte clusters).
+
+ Note: QEMU as of today has an implementation limit of 2 MB
+ as the maximum cluster size and won't be able to open images
+ with larger cluster sizes.
+
+ Note: if the image has Extended L2 Entries then cluster_bits
+ must be at least 14 (i.e. 16384 byte clusters).
+
+ 24 - 31: size
+ Virtual disk size in bytes.
+
+ Note: QEMU has an implementation limit of 32 MB as
+ the maximum L1 table size. With a 2 MB cluster
+ size, it is unable to populate a virtual cluster
+ beyond 2 EB (61 bits); with a 512 byte cluster
+ size, it is unable to populate a virtual size
+ larger than 128 GB (37 bits). Meanwhile, L1/L2
+ table layouts limit an image to no more than 64 PB
+ (56 bits) of populated clusters, and an image may
+ hit other limits first (such as a file system's
+ maximum size).
+
+ 32 - 35: crypt_method
+ 0 for no encryption
+ 1 for AES encryption
+ 2 for LUKS encryption
+
+ 36 - 39: l1_size
+ Number of entries in the active L1 table
+
+ 40 - 47: l1_table_offset
+ Offset into the image file at which the active L1 table
+ starts. Must be aligned to a cluster boundary.
+
+ 48 - 55: refcount_table_offset
+ Offset into the image file at which the refcount table
+ starts. Must be aligned to a cluster boundary.
+
+ 56 - 59: refcount_table_clusters
+ Number of clusters that the refcount table occupies
+
+ 60 - 63: nb_snapshots
+ Number of snapshots contained in the image
+
+ 64 - 71: snapshots_offset
+ Offset into the image file at which the snapshot table
+ starts. Must be aligned to a cluster boundary.
+
+For version 2, the header is exactly 72 bytes in length, and finishes here.
+For version 3 or higher, the header length is at least 104 bytes, including
+the next fields through ``header_length``.
+::
+
+ 72 - 79: incompatible_features
+ Bitmask of incompatible features. An implementation must
+ fail to open an image if an unknown bit is set.
+
+ Bit 0: Dirty bit. If this bit is set then refcounts
+ may be inconsistent, make sure to scan L1/L2
+ tables to repair refcounts before accessing the
+ image.
+
+ Bit 1: Corrupt bit. If this bit is set then any data
+ structure may be corrupt and the image must not
+ be written to (unless for regaining
+ consistency).
+
+ Bit 2: External data file bit. If this bit is set, an
+ external data file is used. Guest clusters are
+ then stored in the external data file. For such
+ images, clusters in the external data file are
+ not refcounted. The offset field in the
+ Standard Cluster Descriptor must match the
+ guest offset and neither compressed clusters
+ nor internal snapshots are supported.
+
+ An External Data File Name header extension may
+ be present if this bit is set.
+
+ Bit 3: Compression type bit. If this bit is set,
+ a non-default compression is used for compressed
+ clusters. The compression_type field must be
+ present and not zero.
+
+ Bit 4: Extended L2 Entries. If this bit is set then
+ L2 table entries use an extended format that
+ allows subcluster-based allocation. See the
+ Extended L2 Entries section for more details.
+
+ Bits 5-63: Reserved (set to 0)
+
+ 80 - 87: compatible_features
+ Bitmask of compatible features. An implementation can
+ safely ignore any unknown bits that are set.
+
+ Bit 0: Lazy refcounts bit. If this bit is set then
+ lazy refcount updates can be used. This means
+ marking the image file dirty and postponing
+ refcount metadata updates.
+
+ Bits 1-63: Reserved (set to 0)
+
+ 88 - 95: autoclear_features
+ Bitmask of auto-clear features. An implementation may only
+ write to an image with unknown auto-clear features if it
+ clears the respective bits from this field first.
+
+ Bit 0: Bitmaps extension bit
+ This bit indicates consistency for the bitmaps
+ extension data.
+
+ It is an error if this bit is set without the
+ bitmaps extension present.
+
+ If the bitmaps extension is present but this
+ bit is unset, the bitmaps extension data must be
+ considered inconsistent.
+
+ Bit 1: Raw external data bit
+ If this bit is set, the external data file can
+ be read as a consistent standalone raw image
+ without looking at the qcow2 metadata.
+
+ Setting this bit has a performance impact for
+ some operations on the image (e.g. writing
+ zeros requires writing to the data file instead
+ of only setting the zero flag in the L2 table
+ entry) and conflicts with backing files.
+
+ This bit may only be set if the External Data
+ File bit (incompatible feature bit 1) is also
+ set.
+
+ Bits 2-63: Reserved (set to 0)
+
+ 96 - 99: refcount_order
+ Describes the width of a reference count block entry (width
+ in bits: refcount_bits = 1 << refcount_order). For version 2
+ images, the order is always assumed to be 4
+ (i.e. refcount_bits = 16).
+ This value may not exceed 6 (i.e. refcount_bits = 64).
+
+ 100 - 103: header_length
+ Length of the header structure in bytes. For version 2
+ images, the length is always assumed to be 72 bytes.
+ For version 3 it's at least 104 bytes and must be a multiple
+ of 8.
+
+
+Additional fields (version 3 and higher)
+----------------------------------------
+
+In general, these fields are optional and may be safely ignored by the software,
+as well as filled by zeros (which is equal to field absence), if software needs
+to set field B, but does not care about field A which precedes B. More
+formally, additional fields have the following compatibility rules:
+
+1. If the value of the additional field must not be ignored for correct
+ handling of the file, it will be accompanied by a corresponding incompatible
+ feature bit.
+
+2. If there are no unrecognized incompatible feature bits set, an unknown
+ additional field may be safely ignored other than preserving its value when
+ rewriting the image header.
+
+.. _ref_rules_3:
+
+3. An explicit value of 0 will have the same behavior as when the field is not
+ present*, if not altered by a specific incompatible bit.
+
+(*) A field is considered not present when ``header_length`` is less than or equal
+to the field's offset. Also, all additional fields are not present for
+version 2.
+
+::
+
+ 104: compression_type
+
+ Defines the compression method used for compressed clusters.
+ All compressed clusters in an image use the same compression
+ type.
+
+ If the incompatible bit "Compression type" is set: the field
+ must be present and non-zero (which means non-deflate
+ compression type). Otherwise, this field must not be present
+ or must be zero (which means deflate).
+
+ Available compression type values:
+ - 0: deflate <https://www.ietf.org/rfc/rfc1951.txt>
+ - 1: zstd <http://github.com/facebook/zstd>
+
+ The deflate compression type is called "zlib"
+ <https://www.zlib.net/> in QEMU. However, clusters with the
+ deflate compression type do not have zlib headers.
+
+ 105 - 111: Padding, contents defined below.
+
+Header padding
+--------------
+
+``header_length`` must be a multiple of 8, which means that if the end of the last
+additional field is not aligned, some padding is needed. This padding must be
+zeroed, so that if some existing (or future) additional field will fall into
+the padding, it will be interpreted accordingly to point `[3.] <#ref_rules_3>`_ of the previous
+paragraph, i.e. in the same manner as when this field is not present.
+
+
+Header extensions
+-----------------
+
+Directly after the image header, optional sections called header extensions can
+be stored. Each extension has a structure like the following::
+
+ Byte 0 - 3: Header extension type:
+ 0x00000000 - End of the header extension area
+ 0xe2792aca - Backing file format name string
+ 0x6803f857 - Feature name table
+ 0x23852875 - Bitmaps extension
+ 0x0537be77 - Full disk encryption header pointer
+ 0x44415441 - External data file name string
+ other - Unknown header extension, can be safely
+ ignored
+
+ 4 - 7: Length of the header extension data
+
+ 8 - n: Header extension data
+
+ n - m: Padding to round up the header extension size to the next
+ multiple of 8.
+
+Unless stated otherwise, each header extension type shall appear at most once
+in the same image.
+
+If the image has a backing file then the backing file name should be stored in
+the remaining space between the end of the header extension area and the end of
+the first cluster. It is not allowed to store other data here, so that an
+implementation can safely modify the header and add extensions without harming
+data of compatible features that it doesn't support. Compatible features that
+need space for additional data can use a header extension.
+
+
+String header extensions
+------------------------
+
+Some header extensions (such as the backing file format name and the external
+data file name) are just a single string. In this case, the header extension
+length is the string length and the string is not ``\0`` terminated. (The header
+extension padding can make it look like a string is ``\0`` terminated, but
+neither is padding always necessary nor is there a guarantee that zero bytes
+are used for padding.)
+
+
+Feature name table
+------------------
+
+The feature name table is an optional header extension that contains the name
+for features used by the image. It can be used by applications that don't know
+the respective feature (e.g. because the feature was introduced only later) to
+display a useful error message.
+
+The number of entries in the feature name table is determined by the length of
+the header extension data. Each entry looks like this::
+
+ Byte 0: Type of feature (select feature bitmap)
+ 0: Incompatible feature
+ 1: Compatible feature
+ 2: Autoclear feature
+
+ 1: Bit number within the selected feature bitmap (valid
+ values: 0-63)
+
+ 2 - 47: Feature name (padded with zeros, but not necessarily null
+ terminated if it has full length)
+
+
+Bitmaps extension
+-----------------
+
+The bitmaps extension is an optional header extension. It provides the ability
+to store bitmaps related to a virtual disk. For now, there is only one bitmap
+type: the dirty tracking bitmap, which tracks virtual disk changes from some
+point in time.
+
+The data of the extension should be considered consistent only if the
+corresponding auto-clear feature bit is set, see ``autoclear_features`` above.
+
+The fields of the bitmaps extension are::
+
+ Byte 0 - 3: nb_bitmaps
+ The number of bitmaps contained in the image. Must be
+ greater than or equal to 1.
+
+ Note: QEMU currently only supports up to 65535 bitmaps per
+ image.
+
+ 4 - 7: Reserved, must be zero.
+
+ 8 - 15: bitmap_directory_size
+ Size of the bitmap directory in bytes. It is the cumulative
+ size of all (nb_bitmaps) bitmap directory entries.
+
+ 16 - 23: bitmap_directory_offset
+ Offset into the image file at which the bitmap directory
+ starts. Must be aligned to a cluster boundary.
+
+Full disk encryption header pointer
+-----------------------------------
+
+The full disk encryption header must be present if, and only if, the
+``crypt_method`` header requires metadata. Currently this is only true
+of the ``LUKS`` crypt method. The header extension must be absent for
+other methods.
+
+This header provides the offset at which the crypt method can store
+its additional data, as well as the length of such data.
+::
+
+ Byte 0 - 7: Offset into the image file at which the encryption
+ header starts in bytes. Must be aligned to a cluster
+ boundary.
+ Byte 8 - 15: Length of the written encryption header in bytes.
+ Note actual space allocated in the qcow2 file may
+ be larger than this value, since it will be rounded
+ to the nearest multiple of the cluster size. Any
+ unused bytes in the allocated space will be initialized
+ to 0.
+
+For the LUKS crypt method, the encryption header works as follows.
+
+The first 592 bytes of the header clusters will contain the LUKS
+partition header. This is then followed by the key material data areas.
+The size of the key material data areas is determined by the number of
+stripes in the key slot and key size. Refer to the LUKS format
+specification (``docs/on-disk-format.pdf`` in the cryptsetup source
+package) for details of the LUKS partition header format.
+
+In the LUKS partition header, the ``payload-offset`` field will be
+calculated as normal for the LUKS spec. ie the size of the LUKS
+header, plus key material regions, plus padding, relative to the
+start of the LUKS header. This offset value is not required to be
+qcow2 cluster aligned. Its value is currently never used in the
+context of qcow2, since the qcow2 file format itself defines where
+the real payload offset is, but none the less a valid payload offset
+should always be present.
+
+In the LUKS key slots header, the ``key-material-offset`` is relative
+to the start of the LUKS header clusters in the qcow2 container,
+not the start of the qcow2 file.
+
+Logically the layout looks like
+::
+
+ +-----------------------------+
+ | QCow2 header |
+ | QCow2 header extension X |
+ | QCow2 header extension FDE |
+ | QCow2 header extension ... |
+ | QCow2 header extension Z |
+ +-----------------------------+
+ | ....other QCow2 tables.... |
+ . .
+ . .
+ +-----------------------------+
+ | +-------------------------+ |
+ | | LUKS partition header | |
+ | +-------------------------+ |
+ | | LUKS key material 1 | |
+ | +-------------------------+ |
+ | | LUKS key material 2 | |
+ | +-------------------------+ |
+ | | LUKS key material ... | |
+ | +-------------------------+ |
+ | | LUKS key material 8 | |
+ | +-------------------------+ |
+ +-----------------------------+
+ | QCow2 cluster payload |
+ . .
+ . .
+ . .
+ | |
+ +-----------------------------+
+
+Data encryption
+---------------
+
+When an encryption method is requested in the header, the image payload
+data must be encrypted/decrypted on every write/read. The image headers
+and metadata are never encrypted.
+
+The algorithms used for encryption vary depending on the method
+
+ - ``AES``:
+
+ The AES cipher, in CBC mode, with 256 bit keys.
+
+ Initialization vectors generated using plain64 method, with
+ the virtual disk sector as the input tweak.
+
+ This format is no longer supported in QEMU system emulators, due
+ to a number of design flaws affecting its security. It is only
+ supported in the command line tools for the sake of back compatibility
+ and data liberation.
+
+ - ``LUKS``:
+
+ The algorithms are specified in the LUKS header.
+
+ Initialization vectors generated using the method specified
+ in the LUKS header, with the physical disk sector as the
+ input tweak.
+
+Host cluster management
+-----------------------
+
+qcow2 manages the allocation of host clusters by maintaining a reference count
+for each host cluster. A refcount of 0 means that the cluster is free, 1 means
+that it is used, and >= 2 means that it is used and any write access must
+perform a COW (copy on write) operation.
+
+The refcounts are managed in a two-level table. The first level is called
+refcount table and has a variable size (which is stored in the header). The
+refcount table can cover multiple clusters, however it needs to be contiguous
+in the image file.
+
+It contains pointers to the second level structures which are called refcount
+blocks and are exactly one cluster in size.
+
+Although a large enough refcount table can reserve clusters past 64 PB
+(56 bits) (assuming the underlying protocol can even be sized that
+large), note that some qcow2 metadata such as L1/L2 tables must point
+to clusters prior to that point.
+
+.. note::
+ QEMU has an implementation limit of 8 MB as the maximum refcount
+ table size. With a 2 MB cluster size and a default refcount_order of
+ 4, it is unable to reference host resources beyond 2 EB (61 bits); in
+ the worst case, with a 512 cluster size and refcount_order of 6, it is
+ unable to access beyond 32 GB (35 bits).
+
+Given an offset into the image file, the refcount of its cluster can be
+obtained as follows::
+
+ refcount_block_entries = (cluster_size * 8 / refcount_bits)
+
+ refcount_block_index = (offset / cluster_size) % refcount_block_entries
+ refcount_table_index = (offset / cluster_size) / refcount_block_entries
+
+ refcount_block = load_cluster(refcount_table[refcount_table_index]);
+ return refcount_block[refcount_block_index];
+
+Refcount table entry::
+
+ Bit 0 - 8: Reserved (set to 0)
+
+ 9 - 63: Bits 9-63 of the offset into the image file at which the
+ refcount block starts. Must be aligned to a cluster
+ boundary.
+
+ If this is 0, the corresponding refcount block has not yet
+ been allocated. All refcounts managed by this refcount block
+ are 0.
+
+Refcount block entry ``(x = refcount_bits - 1)``::
+
+ Bit 0 - x: Reference count of the cluster. If refcount_bits implies a
+ sub-byte width, note that bit 0 means the least significant
+ bit in this context.
+
+
+Cluster mapping
+---------------
+
+Just as for refcounts, qcow2 uses a two-level structure for the mapping of
+guest clusters to host clusters. They are called L1 and L2 table.
+
+The L1 table has a variable size (stored in the header) and may use multiple
+clusters, however it must be contiguous in the image file. L2 tables are
+exactly one cluster in size.
+
+The L1 and L2 tables have implications on the maximum virtual file
+size; for a given L1 table size, a larger cluster size is required for
+the guest to have access to more space. Furthermore, a virtual
+cluster must currently map to a host offset below 64 PB (56 bits)
+(although this limit could be relaxed by putting reserved bits into
+use). Additionally, as cluster size increases, the maximum host
+offset for a compressed cluster is reduced (a 2M cluster size requires
+compressed clusters to reside below 512 TB (49 bits), and this limit
+cannot be relaxed without an incompatible layout change).
+
+Given an offset into the virtual disk, the offset into the image file can be
+obtained as follows::
+
+ l2_entries = (cluster_size / sizeof(uint64_t)) [*]
+
+ l2_index = (offset / cluster_size) % l2_entries
+ l1_index = (offset / cluster_size) / l2_entries
+
+ l2_table = load_cluster(l1_table[l1_index]);
+ cluster_offset = l2_table[l2_index];
+
+ return cluster_offset + (offset % cluster_size)
+
+ [*] this changes if Extended L2 Entries are enabled, see next section
+
+L1 table entry::
+
+ Bit 0 - 8: Reserved (set to 0)
+
+ 9 - 55: Bits 9-55 of the offset into the image file at which the L2
+ table starts. Must be aligned to a cluster boundary. If the
+ offset is 0, the L2 table and all clusters described by this
+ L2 table are unallocated.
+
+ 56 - 62: Reserved (set to 0)
+
+ 63: 0 for an L2 table that is unused or requires COW, 1 if its
+ refcount is exactly one. This information is only accurate
+ in the active L1 table.
+
+L2 table entry::
+
+ Bit 0 - 61: Cluster descriptor
+
+ 62: 0 for standard clusters
+ 1 for compressed clusters
+
+ 63: 0 for clusters that are unused, compressed or require COW.
+ 1 for standard clusters whose refcount is exactly one.
+ This information is only accurate in L2 tables
+ that are reachable from the active L1 table.
+
+ With external data files, all guest clusters have an
+ implicit refcount of 1 (because of the fixed host = guest
+ mapping for guest cluster offsets), so this bit should be 1
+ for all allocated clusters.
+
+Standard Cluster Descriptor::
+
+ Bit 0: If set to 1, the cluster reads as all zeros. The host
+ cluster offset can be used to describe a preallocation,
+ but it won't be used for reading data from this cluster,
+ nor is data read from the backing file if the cluster is
+ unallocated.
+
+ With version 2 or with extended L2 entries (see the next
+ section), this is always 0.
+
+ 1 - 8: Reserved (set to 0)
+
+ 9 - 55: Bits 9-55 of host cluster offset. Must be aligned to a
+ cluster boundary. If the offset is 0 and bit 63 is clear,
+ the cluster is unallocated. The offset may only be 0 with
+ bit 63 set (indicating a host cluster offset of 0) when an
+ external data file is used.
+
+ 56 - 61: Reserved (set to 0)
+
+
+Compressed Clusters Descriptor ``(x = 62 - (cluster_bits - 8))``::
+
+ Bit 0 - x-1: Host cluster offset. This is usually _not_ aligned to a
+ cluster or sector boundary! If cluster_bits is
+ small enough that this field includes bits beyond
+ 55, those upper bits must be set to 0.
+
+ x - 61: Number of additional 512-byte sectors used for the
+ compressed data, beyond the sector containing the offset
+ in the previous field. Some of these sectors may reside
+ in the next contiguous host cluster.
+
+ Note that the compressed data does not necessarily occupy
+ all of the bytes in the final sector; rather, decompression
+ stops when it has produced a cluster of data.
+
+ Another compressed cluster may map to the tail of the final
+ sector used by this compressed cluster.
+
+If a cluster is unallocated, read requests shall read the data from the backing
+file (except if bit 0 in the Standard Cluster Descriptor is set). If there is
+no backing file or the backing file is smaller than the image, they shall read
+zeros for all parts that are not covered by the backing file.
+
+Extended L2 Entries
+-------------------
+
+An image uses Extended L2 Entries if bit 4 is set on the incompatible_features
+field of the header.
+
+In these images standard data clusters are divided into 32 subclusters of the
+same size. They are contiguous and start from the beginning of the cluster.
+Subclusters can be allocated independently and the L2 entry contains information
+indicating the status of each one of them. Compressed data clusters don't have
+subclusters so they are treated the same as in images without this feature.
+
+The size of an extended L2 entry is 128 bits so the number of entries per table
+is calculated using this formula:
+
+.. code::
+
+ l2_entries = (cluster_size / (2 * sizeof(uint64_t)))
+
+The first 64 bits have the same format as the standard L2 table entry described
+in the previous section, with the exception of bit 0 of the standard cluster
+descriptor.
+
+The last 64 bits contain a subcluster allocation bitmap with this format:
+
+Subcluster Allocation Bitmap (for standard clusters)::
+
+ Bit 0 - 31: Allocation status (one bit per subcluster)
+
+ 1: the subcluster is allocated. In this case the
+ host cluster offset field must contain a valid
+ offset.
+ 0: the subcluster is not allocated. In this case
+ read requests shall go to the backing file or
+ return zeros if there is no backing file data.
+
+ Bits are assigned starting from the least significant
+ one (i.e. bit x is used for subcluster x).
+
+ 32 - 63 Subcluster reads as zeros (one bit per subcluster)
+
+ 1: the subcluster reads as zeros. In this case the
+ allocation status bit must be unset. The host
+ cluster offset field may or may not be set.
+ 0: no effect.
+
+ Bits are assigned starting from the least significant
+ one (i.e. bit x is used for subcluster x - 32).
+
+Subcluster Allocation Bitmap (for compressed clusters)::
+
+ Bit 0 - 63: Reserved (set to 0)
+ Compressed clusters don't have subclusters,
+ so this field is not used.
+
+Snapshots
+---------
+
+qcow2 supports internal snapshots. Their basic principle of operation is to
+switch the active L1 table, so that a different set of host clusters are
+exposed to the guest.
+
+When creating a snapshot, the L1 table should be copied and the refcount of all
+L2 tables and clusters reachable from this L1 table must be increased, so that
+a write causes a COW and isn't visible in other snapshots.
+
+When loading a snapshot, bit 63 of all entries in the new active L1 table and
+all L2 tables referenced by it must be reconstructed from the refcount table
+as it doesn't need to be accurate in inactive L1 tables.
+
+A directory of all snapshots is stored in the snapshot table, a contiguous area
+in the image file, whose starting offset and length are given by the header
+fields snapshots_offset and nb_snapshots. The entries of the snapshot table
+have variable length, depending on the length of ID, name and extra data.
+
+Snapshot table entry::
+
+ Byte 0 - 7: Offset into the image file at which the L1 table for the
+ snapshot starts. Must be aligned to a cluster boundary.
+
+ 8 - 11: Number of entries in the L1 table of the snapshots
+
+ 12 - 13: Length of the unique ID string describing the snapshot
+
+ 14 - 15: Length of the name of the snapshot
+
+ 16 - 19: Time at which the snapshot was taken in seconds since the
+ Epoch
+
+ 20 - 23: Subsecond part of the time at which the snapshot was taken
+ in nanoseconds
+
+ 24 - 31: Time that the guest was running until the snapshot was
+ taken in nanoseconds
+
+ 32 - 35: Size of the VM state in bytes. 0 if no VM state is saved.
+ If there is VM state, it starts at the first cluster
+ described by first L1 table entry that doesn't describe a
+ regular guest cluster (i.e. VM state is stored like guest
+ disk content, except that it is stored at offsets that are
+ larger than the virtual disk presented to the guest)
+
+ 36 - 39: Size of extra data in the table entry (used for future
+ extensions of the format)
+
+ variable: Extra data for future extensions. Unknown fields must be
+ ignored. Currently defined are (offset relative to snapshot
+ table entry):
+
+ Byte 40 - 47: Size of the VM state in bytes. 0 if no VM
+ state is saved. If this field is present,
+ the 32-bit value in bytes 32-35 is ignored.
+
+ Byte 48 - 55: Virtual disk size of the snapshot in bytes
+
+ Byte 56 - 63: icount value which corresponds to
+ the record/replay instruction count
+ when the snapshot was taken. Set to -1
+ if icount was disabled
+
+ Version 3 images must include extra data at least up to
+ byte 55.
+
+ variable: Unique ID string for the snapshot (not null terminated)
+
+ variable: Name of the snapshot (not null terminated)
+
+ variable: Padding to round up the snapshot table entry size to the
+ next multiple of 8.
+
+
+Bitmaps
+-------
+
+As mentioned above, the bitmaps extension provides the ability to store bitmaps
+related to a virtual disk. This section describes how these bitmaps are stored.
+
+All stored bitmaps are related to the virtual disk stored in the same image, so
+each bitmap size is equal to the virtual disk size.
+
+Each bit of the bitmap is responsible for strictly defined range of the virtual
+disk. For bit number bit_nr the corresponding range (in bytes) will be:
+
+.. code::
+
+ [bit_nr * bitmap_granularity .. (bit_nr + 1) * bitmap_granularity - 1]
+
+Granularity is a property of the concrete bitmap, see below.
+
+
+Bitmap directory
+----------------
+
+Each bitmap saved in the image is described in a bitmap directory entry. The
+bitmap directory is a contiguous area in the image file, whose starting offset
+and length are given by the header extension fields ``bitmap_directory_offset`` and
+``bitmap_directory_size``. The entries of the bitmap directory have variable
+length, depending on the lengths of the bitmap name and extra data.
+
+Structure of a bitmap directory entry::
+
+ Byte 0 - 7: bitmap_table_offset
+ Offset into the image file at which the bitmap table
+ (described below) for the bitmap starts. Must be aligned to
+ a cluster boundary.
+
+ 8 - 11: bitmap_table_size
+ Number of entries in the bitmap table of the bitmap.
+
+ 12 - 15: flags
+ Bit
+ 0: in_use
+ The bitmap was not saved correctly and may be
+ inconsistent. Although the bitmap metadata is still
+ well-formed from a qcow2 perspective, the metadata
+ (such as the auto flag or bitmap size) or data
+ contents may be outdated.
+
+ 1: auto
+ The bitmap must reflect all changes of the virtual
+ disk by any application that would write to this qcow2
+ file (including writes, snapshot switching, etc.). The
+ type of this bitmap must be 'dirty tracking bitmap'.
+
+ 2: extra_data_compatible
+ This flags is meaningful when the extra data is
+ unknown to the software (currently any extra data is
+ unknown to QEMU).
+ If it is set, the bitmap may be used as expected, extra
+ data must be left as is.
+ If it is not set, the bitmap must not be used, but
+ both it and its extra data be left as is.
+
+ Bits 3 - 31 are reserved and must be 0.
+
+ 16: type
+ This field describes the sort of the bitmap.
+ Values:
+ 1: Dirty tracking bitmap
+
+ Values 0, 2 - 255 are reserved.
+
+ 17: granularity_bits
+ Granularity bits. Valid values: 0 - 63.
+
+ Note: QEMU currently supports only values 9 - 31.
+
+ Granularity is calculated as
+ granularity = 1 << granularity_bits
+
+ A bitmap's granularity is how many bytes of the image
+ accounts for one bit of the bitmap.
+
+ 18 - 19: name_size
+ Size of the bitmap name. Must be non-zero.
+
+ Note: QEMU currently doesn't support values greater than
+ 1023.
+
+ 20 - 23: extra_data_size
+ Size of type-specific extra data.
+
+ For now, as no extra data is defined, extra_data_size is
+ reserved and should be zero. If it is non-zero the
+ behavior is defined by extra_data_compatible flag.
+
+ variable: extra_data
+ Extra data for the bitmap, occupying extra_data_size bytes.
+ Extra data must never contain references to clusters or in
+ some other way allocate additional clusters.
+
+ variable: name
+ The name of the bitmap (not null terminated), occupying
+ name_size bytes. Must be unique among all bitmap names
+ within the bitmaps extension.
+
+ variable: Padding to round up the bitmap directory entry size to the
+ next multiple of 8. All bytes of the padding must be zero.
+
+
+Bitmap table
+------------
+
+Each bitmap is stored using a one-level structure (as opposed to two-level
+structures like for refcounts and guest clusters mapping) for the mapping of
+bitmap data to host clusters. This structure is called the bitmap table.
+
+Each bitmap table has a variable size (stored in the bitmap directory entry)
+and may use multiple clusters, however, it must be contiguous in the image
+file.
+
+Structure of a bitmap table entry::
+
+ Bit 0: Reserved and must be zero if bits 9 - 55 are non-zero.
+ If bits 9 - 55 are zero:
+ 0: Cluster should be read as all zeros.
+ 1: Cluster should be read as all ones.
+
+ 1 - 8: Reserved and must be zero.
+
+ 9 - 55: Bits 9 - 55 of the host cluster offset. Must be aligned to
+ a cluster boundary. If the offset is 0, the cluster is
+ unallocated; in that case, bit 0 determines how this
+ cluster should be treated during reads.
+
+ 56 - 63: Reserved and must be zero.
+
+
+Bitmap data
+-----------
+
+As noted above, bitmap data is stored in separate clusters, described by the
+bitmap table. Given an offset (in bytes) into the bitmap data, the offset into
+the image file can be obtained as follows::
+
+ image_offset(bitmap_data_offset) =
+ bitmap_table[bitmap_data_offset / cluster_size] +
+ (bitmap_data_offset % cluster_size)
+
+This offset is not defined if bits 9 - 55 of bitmap table entry are zero (see
+above).
+
+Given an offset byte_nr into the virtual disk and the bitmap's granularity, the
+bit offset into the image file to the corresponding bit of the bitmap can be
+calculated like this::
+
+ bit_offset(byte_nr) =
+ image_offset(byte_nr / granularity / 8) * 8 +
+ (byte_nr / granularity) % 8
+
+If the size of the bitmap data is not a multiple of the cluster size then the
+last cluster of the bitmap data contains some unused tail bits. These bits must
+be zero.
+
+
+Dirty tracking bitmaps
+----------------------
+
+Bitmaps with ``type`` field equal to one are dirty tracking bitmaps.
+
+When the virtual disk is in use dirty tracking bitmap may be ``enabled`` or
+``disabled``. While the bitmap is ``enabled``, all writes to the virtual disk
+should be reflected in the bitmap. A set bit in the bitmap means that the
+corresponding range of the virtual disk (see above) was written to while the
+bitmap was ``enabled``. An unset bit means that this range was not written to.
+
+The software doesn't have to sync the bitmap in the image file with its
+representation in RAM after each write or metadata change. Flag ``in_use``
+should be set while the bitmap is not synced.
+
+In the image file the ``enabled`` state is reflected by the ``auto`` flag. If this
+flag is set, the software must consider the bitmap as ``enabled`` and start
+tracking virtual disk changes to this bitmap from the first write to the
+virtual disk. If this flag is not set then the bitmap is disabled.
diff --git a/docs/interop/qcow2.txt b/docs/interop/qcow2.txt
deleted file mode 100644
index 2c46183..0000000
--- a/docs/interop/qcow2.txt
+++ /dev/null
@@ -1,906 +0,0 @@
-== General ==
-
-A qcow2 image file is organized in units of constant size, which are called
-(host) clusters. A cluster is the unit in which all allocations are done,
-both for actual guest data and for image metadata.
-
-Likewise, the virtual disk as seen by the guest is divided into (guest)
-clusters of the same size.
-
-All numbers in qcow2 are stored in Big Endian byte order.
-
-
-== Header ==
-
-The first cluster of a qcow2 image contains the file header:
-
- Byte 0 - 3: magic
- QCOW magic string ("QFI\xfb")
-
- 4 - 7: version
- Version number (valid values are 2 and 3)
-
- 8 - 15: backing_file_offset
- Offset into the image file at which the backing file name
- is stored (NB: The string is not null terminated). 0 if the
- image doesn't have a backing file.
-
- Note: backing files are incompatible with raw external data
- files (auto-clear feature bit 1).
-
- 16 - 19: backing_file_size
- Length of the backing file name in bytes. Must not be
- longer than 1023 bytes. Undefined if the image doesn't have
- a backing file.
-
- 20 - 23: cluster_bits
- Number of bits that are used for addressing an offset
- within a cluster (1 << cluster_bits is the cluster size).
- Must not be less than 9 (i.e. 512 byte clusters).
-
- Note: qemu as of today has an implementation limit of 2 MB
- as the maximum cluster size and won't be able to open images
- with larger cluster sizes.
-
- Note: if the image has Extended L2 Entries then cluster_bits
- must be at least 14 (i.e. 16384 byte clusters).
-
- 24 - 31: size
- Virtual disk size in bytes.
-
- Note: qemu has an implementation limit of 32 MB as
- the maximum L1 table size. With a 2 MB cluster
- size, it is unable to populate a virtual cluster
- beyond 2 EB (61 bits); with a 512 byte cluster
- size, it is unable to populate a virtual size
- larger than 128 GB (37 bits). Meanwhile, L1/L2
- table layouts limit an image to no more than 64 PB
- (56 bits) of populated clusters, and an image may
- hit other limits first (such as a file system's
- maximum size).
-
- 32 - 35: crypt_method
- 0 for no encryption
- 1 for AES encryption
- 2 for LUKS encryption
-
- 36 - 39: l1_size
- Number of entries in the active L1 table
-
- 40 - 47: l1_table_offset
- Offset into the image file at which the active L1 table
- starts. Must be aligned to a cluster boundary.
-
- 48 - 55: refcount_table_offset
- Offset into the image file at which the refcount table
- starts. Must be aligned to a cluster boundary.
-
- 56 - 59: refcount_table_clusters
- Number of clusters that the refcount table occupies
-
- 60 - 63: nb_snapshots
- Number of snapshots contained in the image
-
- 64 - 71: snapshots_offset
- Offset into the image file at which the snapshot table
- starts. Must be aligned to a cluster boundary.
-
-For version 2, the header is exactly 72 bytes in length, and finishes here.
-For version 3 or higher, the header length is at least 104 bytes, including
-the next fields through header_length.
-
- 72 - 79: incompatible_features
- Bitmask of incompatible features. An implementation must
- fail to open an image if an unknown bit is set.
-
- Bit 0: Dirty bit. If this bit is set then refcounts
- may be inconsistent, make sure to scan L1/L2
- tables to repair refcounts before accessing the
- image.
-
- Bit 1: Corrupt bit. If this bit is set then any data
- structure may be corrupt and the image must not
- be written to (unless for regaining
- consistency).
-
- Bit 2: External data file bit. If this bit is set, an
- external data file is used. Guest clusters are
- then stored in the external data file. For such
- images, clusters in the external data file are
- not refcounted. The offset field in the
- Standard Cluster Descriptor must match the
- guest offset and neither compressed clusters
- nor internal snapshots are supported.
-
- An External Data File Name header extension may
- be present if this bit is set.
-
- Bit 3: Compression type bit. If this bit is set,
- a non-default compression is used for compressed
- clusters. The compression_type field must be
- present and not zero.
-
- Bit 4: Extended L2 Entries. If this bit is set then
- L2 table entries use an extended format that
- allows subcluster-based allocation. See the
- Extended L2 Entries section for more details.
-
- Bits 5-63: Reserved (set to 0)
-
- 80 - 87: compatible_features
- Bitmask of compatible features. An implementation can
- safely ignore any unknown bits that are set.
-
- Bit 0: Lazy refcounts bit. If this bit is set then
- lazy refcount updates can be used. This means
- marking the image file dirty and postponing
- refcount metadata updates.
-
- Bits 1-63: Reserved (set to 0)
-
- 88 - 95: autoclear_features
- Bitmask of auto-clear features. An implementation may only
- write to an image with unknown auto-clear features if it
- clears the respective bits from this field first.
-
- Bit 0: Bitmaps extension bit
- This bit indicates consistency for the bitmaps
- extension data.
-
- It is an error if this bit is set without the
- bitmaps extension present.
-
- If the bitmaps extension is present but this
- bit is unset, the bitmaps extension data must be
- considered inconsistent.
-
- Bit 1: Raw external data bit
- If this bit is set, the external data file can
- be read as a consistent standalone raw image
- without looking at the qcow2 metadata.
-
- Setting this bit has a performance impact for
- some operations on the image (e.g. writing
- zeros requires writing to the data file instead
- of only setting the zero flag in the L2 table
- entry) and conflicts with backing files.
-
- This bit may only be set if the External Data
- File bit (incompatible feature bit 1) is also
- set.
-
- Bits 2-63: Reserved (set to 0)
-
- 96 - 99: refcount_order
- Describes the width of a reference count block entry (width
- in bits: refcount_bits = 1 << refcount_order). For version 2
- images, the order is always assumed to be 4
- (i.e. refcount_bits = 16).
- This value may not exceed 6 (i.e. refcount_bits = 64).
-
- 100 - 103: header_length
- Length of the header structure in bytes. For version 2
- images, the length is always assumed to be 72 bytes.
- For version 3 it's at least 104 bytes and must be a multiple
- of 8.
-
-
-=== Additional fields (version 3 and higher) ===
-
-In general, these fields are optional and may be safely ignored by the software,
-as well as filled by zeros (which is equal to field absence), if software needs
-to set field B, but does not care about field A which precedes B. More
-formally, additional fields have the following compatibility rules:
-
-1. If the value of the additional field must not be ignored for correct
-handling of the file, it will be accompanied by a corresponding incompatible
-feature bit.
-
-2. If there are no unrecognized incompatible feature bits set, an unknown
-additional field may be safely ignored other than preserving its value when
-rewriting the image header.
-
-3. An explicit value of 0 will have the same behavior as when the field is not
-present*, if not altered by a specific incompatible bit.
-
-*. A field is considered not present when header_length is less than or equal
-to the field's offset. Also, all additional fields are not present for
-version 2.
-
- 104: compression_type
-
- Defines the compression method used for compressed clusters.
- All compressed clusters in an image use the same compression
- type.
-
- If the incompatible bit "Compression type" is set: the field
- must be present and non-zero (which means non-deflate
- compression type). Otherwise, this field must not be present
- or must be zero (which means deflate).
-
- Available compression type values:
- 0: deflate <https://www.ietf.org/rfc/rfc1951.txt>
- 1: zstd <http://github.com/facebook/zstd>
-
- The deflate compression type is called "zlib"
- <https://www.zlib.net/> in QEMU. However, clusters with the
- deflate compression type do not have zlib headers.
-
- 105 - 111: Padding, contents defined below.
-
-=== Header padding ===
-
-@header_length must be a multiple of 8, which means that if the end of the last
-additional field is not aligned, some padding is needed. This padding must be
-zeroed, so that if some existing (or future) additional field will fall into
-the padding, it will be interpreted accordingly to point [3.] of the previous
-paragraph, i.e. in the same manner as when this field is not present.
-
-
-=== Header extensions ===
-
-Directly after the image header, optional sections called header extensions can
-be stored. Each extension has a structure like the following:
-
- Byte 0 - 3: Header extension type:
- 0x00000000 - End of the header extension area
- 0xe2792aca - Backing file format name string
- 0x6803f857 - Feature name table
- 0x23852875 - Bitmaps extension
- 0x0537be77 - Full disk encryption header pointer
- 0x44415441 - External data file name string
- other - Unknown header extension, can be safely
- ignored
-
- 4 - 7: Length of the header extension data
-
- 8 - n: Header extension data
-
- n - m: Padding to round up the header extension size to the next
- multiple of 8.
-
-Unless stated otherwise, each header extension type shall appear at most once
-in the same image.
-
-If the image has a backing file then the backing file name should be stored in
-the remaining space between the end of the header extension area and the end of
-the first cluster. It is not allowed to store other data here, so that an
-implementation can safely modify the header and add extensions without harming
-data of compatible features that it doesn't support. Compatible features that
-need space for additional data can use a header extension.
-
-
-== String header extensions ==
-
-Some header extensions (such as the backing file format name and the external
-data file name) are just a single string. In this case, the header extension
-length is the string length and the string is not '\0' terminated. (The header
-extension padding can make it look like a string is '\0' terminated, but
-neither is padding always necessary nor is there a guarantee that zero bytes
-are used for padding.)
-
-
-== Feature name table ==
-
-The feature name table is an optional header extension that contains the name
-for features used by the image. It can be used by applications that don't know
-the respective feature (e.g. because the feature was introduced only later) to
-display a useful error message.
-
-The number of entries in the feature name table is determined by the length of
-the header extension data. Each entry look like this:
-
- Byte 0: Type of feature (select feature bitmap)
- 0: Incompatible feature
- 1: Compatible feature
- 2: Autoclear feature
-
- 1: Bit number within the selected feature bitmap (valid
- values: 0-63)
-
- 2 - 47: Feature name (padded with zeros, but not necessarily null
- terminated if it has full length)
-
-
-== Bitmaps extension ==
-
-The bitmaps extension is an optional header extension. It provides the ability
-to store bitmaps related to a virtual disk. For now, there is only one bitmap
-type: the dirty tracking bitmap, which tracks virtual disk changes from some
-point in time.
-
-The data of the extension should be considered consistent only if the
-corresponding auto-clear feature bit is set, see autoclear_features above.
-
-The fields of the bitmaps extension are:
-
- Byte 0 - 3: nb_bitmaps
- The number of bitmaps contained in the image. Must be
- greater than or equal to 1.
-
- Note: QEMU currently only supports up to 65535 bitmaps per
- image.
-
- 4 - 7: Reserved, must be zero.
-
- 8 - 15: bitmap_directory_size
- Size of the bitmap directory in bytes. It is the cumulative
- size of all (nb_bitmaps) bitmap directory entries.
-
- 16 - 23: bitmap_directory_offset
- Offset into the image file at which the bitmap directory
- starts. Must be aligned to a cluster boundary.
-
-== Full disk encryption header pointer ==
-
-The full disk encryption header must be present if, and only if, the
-'crypt_method' header requires metadata. Currently this is only true
-of the 'LUKS' crypt method. The header extension must be absent for
-other methods.
-
-This header provides the offset at which the crypt method can store
-its additional data, as well as the length of such data.
-
- Byte 0 - 7: Offset into the image file at which the encryption
- header starts in bytes. Must be aligned to a cluster
- boundary.
- Byte 8 - 15: Length of the written encryption header in bytes.
- Note actual space allocated in the qcow2 file may
- be larger than this value, since it will be rounded
- to the nearest multiple of the cluster size. Any
- unused bytes in the allocated space will be initialized
- to 0.
-
-For the LUKS crypt method, the encryption header works as follows.
-
-The first 592 bytes of the header clusters will contain the LUKS
-partition header. This is then followed by the key material data areas.
-The size of the key material data areas is determined by the number of
-stripes in the key slot and key size. Refer to the LUKS format
-specification ('docs/on-disk-format.pdf' in the cryptsetup source
-package) for details of the LUKS partition header format.
-
-In the LUKS partition header, the "payload-offset" field will be
-calculated as normal for the LUKS spec. ie the size of the LUKS
-header, plus key material regions, plus padding, relative to the
-start of the LUKS header. This offset value is not required to be
-qcow2 cluster aligned. Its value is currently never used in the
-context of qcow2, since the qcow2 file format itself defines where
-the real payload offset is, but none the less a valid payload offset
-should always be present.
-
-In the LUKS key slots header, the "key-material-offset" is relative
-to the start of the LUKS header clusters in the qcow2 container,
-not the start of the qcow2 file.
-
-Logically the layout looks like
-
- +-----------------------------+
- | QCow2 header |
- | QCow2 header extension X |
- | QCow2 header extension FDE |
- | QCow2 header extension ... |
- | QCow2 header extension Z |
- +-----------------------------+
- | ....other QCow2 tables.... |
- . .
- . .
- +-----------------------------+
- | +-------------------------+ |
- | | LUKS partition header | |
- | +-------------------------+ |
- | | LUKS key material 1 | |
- | +-------------------------+ |
- | | LUKS key material 2 | |
- | +-------------------------+ |
- | | LUKS key material ... | |
- | +-------------------------+ |
- | | LUKS key material 8 | |
- | +-------------------------+ |
- +-----------------------------+
- | QCow2 cluster payload |
- . .
- . .
- . .
- | |
- +-----------------------------+
-
-== Data encryption ==
-
-When an encryption method is requested in the header, the image payload
-data must be encrypted/decrypted on every write/read. The image headers
-and metadata are never encrypted.
-
-The algorithms used for encryption vary depending on the method
-
- - AES:
-
- The AES cipher, in CBC mode, with 256 bit keys.
-
- Initialization vectors generated using plain64 method, with
- the virtual disk sector as the input tweak.
-
- This format is no longer supported in QEMU system emulators, due
- to a number of design flaws affecting its security. It is only
- supported in the command line tools for the sake of back compatibility
- and data liberation.
-
- - LUKS:
-
- The algorithms are specified in the LUKS header.
-
- Initialization vectors generated using the method specified
- in the LUKS header, with the physical disk sector as the
- input tweak.
-
-== Host cluster management ==
-
-qcow2 manages the allocation of host clusters by maintaining a reference count
-for each host cluster. A refcount of 0 means that the cluster is free, 1 means
-that it is used, and >= 2 means that it is used and any write access must
-perform a COW (copy on write) operation.
-
-The refcounts are managed in a two-level table. The first level is called
-refcount table and has a variable size (which is stored in the header). The
-refcount table can cover multiple clusters, however it needs to be contiguous
-in the image file.
-
-It contains pointers to the second level structures which are called refcount
-blocks and are exactly one cluster in size.
-
-Although a large enough refcount table can reserve clusters past 64 PB
-(56 bits) (assuming the underlying protocol can even be sized that
-large), note that some qcow2 metadata such as L1/L2 tables must point
-to clusters prior to that point.
-
-Note: qemu has an implementation limit of 8 MB as the maximum refcount
-table size. With a 2 MB cluster size and a default refcount_order of
-4, it is unable to reference host resources beyond 2 EB (61 bits); in
-the worst case, with a 512 cluster size and refcount_order of 6, it is
-unable to access beyond 32 GB (35 bits).
-
-Given an offset into the image file, the refcount of its cluster can be
-obtained as follows:
-
- refcount_block_entries = (cluster_size * 8 / refcount_bits)
-
- refcount_block_index = (offset / cluster_size) % refcount_block_entries
- refcount_table_index = (offset / cluster_size) / refcount_block_entries
-
- refcount_block = load_cluster(refcount_table[refcount_table_index]);
- return refcount_block[refcount_block_index];
-
-Refcount table entry:
-
- Bit 0 - 8: Reserved (set to 0)
-
- 9 - 63: Bits 9-63 of the offset into the image file at which the
- refcount block starts. Must be aligned to a cluster
- boundary.
-
- If this is 0, the corresponding refcount block has not yet
- been allocated. All refcounts managed by this refcount block
- are 0.
-
-Refcount block entry (x = refcount_bits - 1):
-
- Bit 0 - x: Reference count of the cluster. If refcount_bits implies a
- sub-byte width, note that bit 0 means the least significant
- bit in this context.
-
-
-== Cluster mapping ==
-
-Just as for refcounts, qcow2 uses a two-level structure for the mapping of
-guest clusters to host clusters. They are called L1 and L2 table.
-
-The L1 table has a variable size (stored in the header) and may use multiple
-clusters, however it must be contiguous in the image file. L2 tables are
-exactly one cluster in size.
-
-The L1 and L2 tables have implications on the maximum virtual file
-size; for a given L1 table size, a larger cluster size is required for
-the guest to have access to more space. Furthermore, a virtual
-cluster must currently map to a host offset below 64 PB (56 bits)
-(although this limit could be relaxed by putting reserved bits into
-use). Additionally, as cluster size increases, the maximum host
-offset for a compressed cluster is reduced (a 2M cluster size requires
-compressed clusters to reside below 512 TB (49 bits), and this limit
-cannot be relaxed without an incompatible layout change).
-
-Given an offset into the virtual disk, the offset into the image file can be
-obtained as follows:
-
- l2_entries = (cluster_size / sizeof(uint64_t)) [*]
-
- l2_index = (offset / cluster_size) % l2_entries
- l1_index = (offset / cluster_size) / l2_entries
-
- l2_table = load_cluster(l1_table[l1_index]);
- cluster_offset = l2_table[l2_index];
-
- return cluster_offset + (offset % cluster_size)
-
- [*] this changes if Extended L2 Entries are enabled, see next section
-
-L1 table entry:
-
- Bit 0 - 8: Reserved (set to 0)
-
- 9 - 55: Bits 9-55 of the offset into the image file at which the L2
- table starts. Must be aligned to a cluster boundary. If the
- offset is 0, the L2 table and all clusters described by this
- L2 table are unallocated.
-
- 56 - 62: Reserved (set to 0)
-
- 63: 0 for an L2 table that is unused or requires COW, 1 if its
- refcount is exactly one. This information is only accurate
- in the active L1 table.
-
-L2 table entry:
-
- Bit 0 - 61: Cluster descriptor
-
- 62: 0 for standard clusters
- 1 for compressed clusters
-
- 63: 0 for clusters that are unused, compressed or require COW.
- 1 for standard clusters whose refcount is exactly one.
- This information is only accurate in L2 tables
- that are reachable from the active L1 table.
-
- With external data files, all guest clusters have an
- implicit refcount of 1 (because of the fixed host = guest
- mapping for guest cluster offsets), so this bit should be 1
- for all allocated clusters.
-
-Standard Cluster Descriptor:
-
- Bit 0: If set to 1, the cluster reads as all zeros. The host
- cluster offset can be used to describe a preallocation,
- but it won't be used for reading data from this cluster,
- nor is data read from the backing file if the cluster is
- unallocated.
-
- With version 2 or with extended L2 entries (see the next
- section), this is always 0.
-
- 1 - 8: Reserved (set to 0)
-
- 9 - 55: Bits 9-55 of host cluster offset. Must be aligned to a
- cluster boundary. If the offset is 0 and bit 63 is clear,
- the cluster is unallocated. The offset may only be 0 with
- bit 63 set (indicating a host cluster offset of 0) when an
- external data file is used.
-
- 56 - 61: Reserved (set to 0)
-
-
-Compressed Clusters Descriptor (x = 62 - (cluster_bits - 8)):
-
- Bit 0 - x-1: Host cluster offset. This is usually _not_ aligned to a
- cluster or sector boundary! If cluster_bits is
- small enough that this field includes bits beyond
- 55, those upper bits must be set to 0.
-
- x - 61: Number of additional 512-byte sectors used for the
- compressed data, beyond the sector containing the offset
- in the previous field. Some of these sectors may reside
- in the next contiguous host cluster.
-
- Note that the compressed data does not necessarily occupy
- all of the bytes in the final sector; rather, decompression
- stops when it has produced a cluster of data.
-
- Another compressed cluster may map to the tail of the final
- sector used by this compressed cluster.
-
-If a cluster is unallocated, read requests shall read the data from the backing
-file (except if bit 0 in the Standard Cluster Descriptor is set). If there is
-no backing file or the backing file is smaller than the image, they shall read
-zeros for all parts that are not covered by the backing file.
-
-== Extended L2 Entries ==
-
-An image uses Extended L2 Entries if bit 4 is set on the incompatible_features
-field of the header.
-
-In these images standard data clusters are divided into 32 subclusters of the
-same size. They are contiguous and start from the beginning of the cluster.
-Subclusters can be allocated independently and the L2 entry contains information
-indicating the status of each one of them. Compressed data clusters don't have
-subclusters so they are treated the same as in images without this feature.
-
-The size of an extended L2 entry is 128 bits so the number of entries per table
-is calculated using this formula:
-
- l2_entries = (cluster_size / (2 * sizeof(uint64_t)))
-
-The first 64 bits have the same format as the standard L2 table entry described
-in the previous section, with the exception of bit 0 of the standard cluster
-descriptor.
-
-The last 64 bits contain a subcluster allocation bitmap with this format:
-
-Subcluster Allocation Bitmap (for standard clusters):
-
- Bit 0 - 31: Allocation status (one bit per subcluster)
-
- 1: the subcluster is allocated. In this case the
- host cluster offset field must contain a valid
- offset.
- 0: the subcluster is not allocated. In this case
- read requests shall go to the backing file or
- return zeros if there is no backing file data.
-
- Bits are assigned starting from the least significant
- one (i.e. bit x is used for subcluster x).
-
- 32 - 63 Subcluster reads as zeros (one bit per subcluster)
-
- 1: the subcluster reads as zeros. In this case the
- allocation status bit must be unset. The host
- cluster offset field may or may not be set.
- 0: no effect.
-
- Bits are assigned starting from the least significant
- one (i.e. bit x is used for subcluster x - 32).
-
-Subcluster Allocation Bitmap (for compressed clusters):
-
- Bit 0 - 63: Reserved (set to 0)
- Compressed clusters don't have subclusters,
- so this field is not used.
-
-== Snapshots ==
-
-qcow2 supports internal snapshots. Their basic principle of operation is to
-switch the active L1 table, so that a different set of host clusters are
-exposed to the guest.
-
-When creating a snapshot, the L1 table should be copied and the refcount of all
-L2 tables and clusters reachable from this L1 table must be increased, so that
-a write causes a COW and isn't visible in other snapshots.
-
-When loading a snapshot, bit 63 of all entries in the new active L1 table and
-all L2 tables referenced by it must be reconstructed from the refcount table
-as it doesn't need to be accurate in inactive L1 tables.
-
-A directory of all snapshots is stored in the snapshot table, a contiguous area
-in the image file, whose starting offset and length are given by the header
-fields snapshots_offset and nb_snapshots. The entries of the snapshot table
-have variable length, depending on the length of ID, name and extra data.
-
-Snapshot table entry:
-
- Byte 0 - 7: Offset into the image file at which the L1 table for the
- snapshot starts. Must be aligned to a cluster boundary.
-
- 8 - 11: Number of entries in the L1 table of the snapshots
-
- 12 - 13: Length of the unique ID string describing the snapshot
-
- 14 - 15: Length of the name of the snapshot
-
- 16 - 19: Time at which the snapshot was taken in seconds since the
- Epoch
-
- 20 - 23: Subsecond part of the time at which the snapshot was taken
- in nanoseconds
-
- 24 - 31: Time that the guest was running until the snapshot was
- taken in nanoseconds
-
- 32 - 35: Size of the VM state in bytes. 0 if no VM state is saved.
- If there is VM state, it starts at the first cluster
- described by first L1 table entry that doesn't describe a
- regular guest cluster (i.e. VM state is stored like guest
- disk content, except that it is stored at offsets that are
- larger than the virtual disk presented to the guest)
-
- 36 - 39: Size of extra data in the table entry (used for future
- extensions of the format)
-
- variable: Extra data for future extensions. Unknown fields must be
- ignored. Currently defined are (offset relative to snapshot
- table entry):
-
- Byte 40 - 47: Size of the VM state in bytes. 0 if no VM
- state is saved. If this field is present,
- the 32-bit value in bytes 32-35 is ignored.
-
- Byte 48 - 55: Virtual disk size of the snapshot in bytes
-
- Byte 56 - 63: icount value which corresponds to
- the record/replay instruction count
- when the snapshot was taken. Set to -1
- if icount was disabled
-
- Version 3 images must include extra data at least up to
- byte 55.
-
- variable: Unique ID string for the snapshot (not null terminated)
-
- variable: Name of the snapshot (not null terminated)
-
- variable: Padding to round up the snapshot table entry size to the
- next multiple of 8.
-
-
-== Bitmaps ==
-
-As mentioned above, the bitmaps extension provides the ability to store bitmaps
-related to a virtual disk. This section describes how these bitmaps are stored.
-
-All stored bitmaps are related to the virtual disk stored in the same image, so
-each bitmap size is equal to the virtual disk size.
-
-Each bit of the bitmap is responsible for strictly defined range of the virtual
-disk. For bit number bit_nr the corresponding range (in bytes) will be:
-
- [bit_nr * bitmap_granularity .. (bit_nr + 1) * bitmap_granularity - 1]
-
-Granularity is a property of the concrete bitmap, see below.
-
-
-=== Bitmap directory ===
-
-Each bitmap saved in the image is described in a bitmap directory entry. The
-bitmap directory is a contiguous area in the image file, whose starting offset
-and length are given by the header extension fields bitmap_directory_offset and
-bitmap_directory_size. The entries of the bitmap directory have variable
-length, depending on the lengths of the bitmap name and extra data.
-
-Structure of a bitmap directory entry:
-
- Byte 0 - 7: bitmap_table_offset
- Offset into the image file at which the bitmap table
- (described below) for the bitmap starts. Must be aligned to
- a cluster boundary.
-
- 8 - 11: bitmap_table_size
- Number of entries in the bitmap table of the bitmap.
-
- 12 - 15: flags
- Bit
- 0: in_use
- The bitmap was not saved correctly and may be
- inconsistent. Although the bitmap metadata is still
- well-formed from a qcow2 perspective, the metadata
- (such as the auto flag or bitmap size) or data
- contents may be outdated.
-
- 1: auto
- The bitmap must reflect all changes of the virtual
- disk by any application that would write to this qcow2
- file (including writes, snapshot switching, etc.). The
- type of this bitmap must be 'dirty tracking bitmap'.
-
- 2: extra_data_compatible
- This flags is meaningful when the extra data is
- unknown to the software (currently any extra data is
- unknown to QEMU).
- If it is set, the bitmap may be used as expected, extra
- data must be left as is.
- If it is not set, the bitmap must not be used, but
- both it and its extra data be left as is.
-
- Bits 3 - 31 are reserved and must be 0.
-
- 16: type
- This field describes the sort of the bitmap.
- Values:
- 1: Dirty tracking bitmap
-
- Values 0, 2 - 255 are reserved.
-
- 17: granularity_bits
- Granularity bits. Valid values: 0 - 63.
-
- Note: QEMU currently supports only values 9 - 31.
-
- Granularity is calculated as
- granularity = 1 << granularity_bits
-
- A bitmap's granularity is how many bytes of the image
- accounts for one bit of the bitmap.
-
- 18 - 19: name_size
- Size of the bitmap name. Must be non-zero.
-
- Note: QEMU currently doesn't support values greater than
- 1023.
-
- 20 - 23: extra_data_size
- Size of type-specific extra data.
-
- For now, as no extra data is defined, extra_data_size is
- reserved and should be zero. If it is non-zero the
- behavior is defined by extra_data_compatible flag.
-
- variable: extra_data
- Extra data for the bitmap, occupying extra_data_size bytes.
- Extra data must never contain references to clusters or in
- some other way allocate additional clusters.
-
- variable: name
- The name of the bitmap (not null terminated), occupying
- name_size bytes. Must be unique among all bitmap names
- within the bitmaps extension.
-
- variable: Padding to round up the bitmap directory entry size to the
- next multiple of 8. All bytes of the padding must be zero.
-
-
-=== Bitmap table ===
-
-Each bitmap is stored using a one-level structure (as opposed to two-level
-structures like for refcounts and guest clusters mapping) for the mapping of
-bitmap data to host clusters. This structure is called the bitmap table.
-
-Each bitmap table has a variable size (stored in the bitmap directory entry)
-and may use multiple clusters, however, it must be contiguous in the image
-file.
-
-Structure of a bitmap table entry:
-
- Bit 0: Reserved and must be zero if bits 9 - 55 are non-zero.
- If bits 9 - 55 are zero:
- 0: Cluster should be read as all zeros.
- 1: Cluster should be read as all ones.
-
- 1 - 8: Reserved and must be zero.
-
- 9 - 55: Bits 9 - 55 of the host cluster offset. Must be aligned to
- a cluster boundary. If the offset is 0, the cluster is
- unallocated; in that case, bit 0 determines how this
- cluster should be treated during reads.
-
- 56 - 63: Reserved and must be zero.
-
-
-=== Bitmap data ===
-
-As noted above, bitmap data is stored in separate clusters, described by the
-bitmap table. Given an offset (in bytes) into the bitmap data, the offset into
-the image file can be obtained as follows:
-
- image_offset(bitmap_data_offset) =
- bitmap_table[bitmap_data_offset / cluster_size] +
- (bitmap_data_offset % cluster_size)
-
-This offset is not defined if bits 9 - 55 of bitmap table entry are zero (see
-above).
-
-Given an offset byte_nr into the virtual disk and the bitmap's granularity, the
-bit offset into the image file to the corresponding bit of the bitmap can be
-calculated like this:
-
- bit_offset(byte_nr) =
- image_offset(byte_nr / granularity / 8) * 8 +
- (byte_nr / granularity) % 8
-
-If the size of the bitmap data is not a multiple of the cluster size then the
-last cluster of the bitmap data contains some unused tail bits. These bits must
-be zero.
-
-
-=== Dirty tracking bitmaps ===
-
-Bitmaps with 'type' field equal to one are dirty tracking bitmaps.
-
-When the virtual disk is in use dirty tracking bitmap may be 'enabled' or
-'disabled'. While the bitmap is 'enabled', all writes to the virtual disk
-should be reflected in the bitmap. A set bit in the bitmap means that the
-corresponding range of the virtual disk (see above) was written to while the
-bitmap was 'enabled'. An unset bit means that this range was not written to.
-
-The software doesn't have to sync the bitmap in the image file with its
-representation in RAM after each write or metadata change. Flag 'in_use'
-should be set while the bitmap is not synced.
-
-In the image file the 'enabled' state is reflected by the 'auto' flag. If this
-flag is set, the software must consider the bitmap as 'enabled' and start
-tracking virtual disk changes to this bitmap from the first write to the
-virtual disk. If this flag is not set then the bitmap is disabled.
diff --git a/docs/interop/qed_spec.rst b/docs/interop/qed_spec.rst
new file mode 100644
index 0000000..cd6c7d9
--- /dev/null
+++ b/docs/interop/qed_spec.rst
@@ -0,0 +1,219 @@
+===================================
+QED Image File Format Specification
+===================================
+
+The file format looks like this::
+
+ +----------+----------+----------+-----+
+ | cluster0 | cluster1 | cluster2 | ... |
+ +----------+----------+----------+-----+
+
+The first cluster begins with the ``header``. The header contains information
+about where regular clusters start; this allows the header to be extensible and
+store extra information about the image file. A regular cluster may be
+a ``data cluster``, an ``L2``, or an ``L1 table``. L1 and L2 tables are composed
+of one or more contiguous clusters.
+
+Normally the file size will be a multiple of the cluster size. If the file size
+is not a multiple, extra information after the last cluster may not be preserved
+if data is written. Legitimate extra information should use space between the header
+and the first regular cluster.
+
+All fields are little-endian.
+
+Header
+------
+
+::
+
+ Header {
+ uint32_t magic; /* QED\0 */
+
+ uint32_t cluster_size; /* in bytes */
+ uint32_t table_size; /* for L1 and L2 tables, in clusters */
+ uint32_t header_size; /* in clusters */
+
+ uint64_t features; /* format feature bits */
+ uint64_t compat_features; /* compat feature bits */
+ uint64_t autoclear_features; /* self-resetting feature bits */
+
+ uint64_t l1_table_offset; /* in bytes */
+ uint64_t image_size; /* total logical image size, in bytes */
+
+ /* if (features & QED_F_BACKING_FILE) */
+ uint32_t backing_filename_offset; /* in bytes from start of header */
+ uint32_t backing_filename_size; /* in bytes */
+ }
+
+Field descriptions:
+~~~~~~~~~~~~~~~~~~~
+
+- ``cluster_size`` must be a power of 2 in range [2^12, 2^26].
+- ``table_size`` must be a power of 2 in range [1, 16].
+- ``header_size`` is the number of clusters used by the header and any additional
+ information stored before regular clusters.
+- ``features``, ``compat_features``, and ``autoclear_features`` are file format
+ extension bitmaps. They work as follows:
+
+ - An image with unknown ``features`` bits enabled must not be opened. File format
+ changes that are not backwards-compatible must use ``features`` bits.
+ - An image with unknown ``compat_features`` bits enabled can be opened safely.
+ The unknown features are simply ignored and represent backwards-compatible
+ changes to the file format.
+ - An image with unknown ``autoclear_features`` bits enable can be opened safely
+ after clearing the unknown bits. This allows for backwards-compatible changes
+ to the file format which degrade gracefully and can be re-enabled again by a
+ new program later.
+- ``l1_table_offset`` is the offset of the first byte of the L1 table in the image
+ file and must be a multiple of ``cluster_size``.
+- ``image_size`` is the block device size seen by the guest and must be a multiple
+ of 512 bytes.
+- ``backing_filename_offset`` and ``backing_filename_size`` describe a string in
+ (byte offset, byte size) form. It is not NUL-terminated and has no alignment constraints.
+ The string must be stored within the first ``header_size`` clusters. The backing filename
+ may be an absolute path or relative to the image file.
+
+Feature bits:
+~~~~~~~~~~~~~
+
+- ``QED_F_BACKING_FILE = 0x01``. The image uses a backing file.
+- ``QED_F_NEED_CHECK = 0x02``. The image needs a consistency check before use.
+- ``QED_F_BACKING_FORMAT_NO_PROBE = 0x04``. The backing file is a raw disk image
+ and no file format autodetection should be attempted. This should be used to
+ ensure that raw backing files are never detected as an image format if they happen
+ to contain magic constants.
+
+There are currently no defined ``compat_features`` or ``autoclear_features`` bits.
+
+Fields predicated on a feature bit are only used when that feature is set.
+The fields always take up header space, regardless of whether or not the feature
+bit is set.
+
+Tables
+------
+
+Tables provide the translation from logical offsets in the block device to cluster
+offsets in the file.
+
+::
+
+ #define TABLE_NOFFSETS (table_size * cluster_size / sizeof(uint64_t))
+
+ Table {
+ uint64_t offsets[TABLE_NOFFSETS];
+ }
+
+The tables are organized as follows::
+
+ +----------+
+ | L1 table |
+ +----------+
+ ,------' | '------.
+ +----------+ | +----------+
+ | L2 table | ... | L2 table |
+ +----------+ +----------+
+ ,------' | '------.
+ +----------+ | +----------+
+ | Data | ... | Data |
+ +----------+ +----------+
+
+A table is made up of one or more contiguous clusters. The ``table_size`` header
+field determines table size for an image file. For example, ``cluster_size=64 KB``
+and ``table_size=4`` results in 256 KB tables.
+
+The logical image size must be less than or equal to the maximum possible size of
+clusters rooted by the L1 table:
+
+.. code::
+
+ header.image_size <= TABLE_NOFFSETS * TABLE_NOFFSETS * header.cluster_size
+
+L1, L2, and data cluster offsets must be aligned to ``header.cluster_size``.
+The following offsets have special meanings:
+
+L2 table offsets
+~~~~~~~~~~~~~~~~
+
+- 0 - unallocated. The L2 table is not yet allocated.
+
+Data cluster offsets
+~~~~~~~~~~~~~~~~~~~~
+
+- 0 - unallocated. The data cluster is not yet allocated.
+- 1 - zero. The data cluster contents are all zeroes and no cluster is allocated.
+
+Future format extensions may wish to store per-offset information. The least
+significant 12 bits of an offset are reserved for this purpose and must be set
+to zero. Image files with ``cluster_size`` > 2^12 will have more unused bits
+which should also be zeroed.
+
+Unallocated L2 tables and data clusters
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Reads to an unallocated area of the image file access the backing file. If there
+is no backing file, then zeroes are produced. The backing file may be smaller
+than the image file and reads of unallocated areas beyond the end of the backing
+file produce zeroes.
+
+Writes to an unallocated area cause a new data clusters to be allocated, and a new
+L2 table if that is also unallocated. The new data cluster is populated with data
+from the backing file (or zeroes if no backing file) and the data being written.
+
+Zero data clusters
+~~~~~~~~~~~~~~~~~~
+
+Zero data clusters are a space-efficient way of storing zeroed regions of the image.
+
+Reads to a zero data cluster produce zeroes.
+
+.. note::
+ The difference between an unallocated and a zero data cluster is that zero data
+ clusters stop the reading of contents from the backing file.
+
+Writes to a zero data cluster cause a new data cluster to be allocated. The new
+data cluster is populated with zeroes and the data being written.
+
+Logical offset translation
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Logical offsets are translated into cluster offsets as follows::
+
+ table_bits table_bits cluster_bits
+ <--------> <--------> <--------------->
+ +----------+----------+-----------------+
+ | L1 index | L2 index | byte offset |
+ +----------+----------+-----------------+
+
+ Structure of a logical offset
+
+ offset_mask = ~(cluster_size - 1) # mask for the image file byte offset
+
+ def logical_to_cluster_offset(l1_index, l2_index, byte_offset):
+ l2_offset = l1_table[l1_index]
+ l2_table = load_table(l2_offset)
+ cluster_offset = l2_table[l2_index] & offset_mask
+ return cluster_offset + byte_offset
+
+Consistency checking
+--------------------
+
+This section is informational and included to provide background on the use
+of the ``QED_F_NEED_CHECK features`` bit.
+
+The ``QED_F_NEED_CHECK`` bit is used to mark an image as dirty before starting
+an operation that could leave the image in an inconsistent state if interrupted
+by a crash or power failure. A dirty image must be checked on open because its
+metadata may not be consistent.
+
+Consistency check includes the following invariants:
+
+- Each cluster is referenced once and only once. It is an inconsistency to have
+ a cluster referenced more than once by L1 or L2 tables. A cluster has been leaked
+ if it has no references.
+- Offsets must be within the image file size and must be ``cluster_size`` aligned.
+- Table offsets must at least ``table_size`` * ``cluster_size`` bytes from the end
+ of the image file so that there is space for the entire table.
+
+The consistency check process starts from ``l1_table_offset`` and scans all L2 tables.
+After the check completes with no other errors besides leaks, the ``QED_F_NEED_CHECK``
+bit can be cleared and the image can be accessed.
diff --git a/docs/interop/qed_spec.txt b/docs/interop/qed_spec.txt
deleted file mode 100644
index 7982e05..0000000
--- a/docs/interop/qed_spec.txt
+++ /dev/null
@@ -1,138 +0,0 @@
-=Specification=
-
-The file format looks like this:
-
- +----------+----------+----------+-----+
- | cluster0 | cluster1 | cluster2 | ... |
- +----------+----------+----------+-----+
-
-The first cluster begins with the '''header'''. The header contains information about where regular clusters start; this allows the header to be extensible and store extra information about the image file. A regular cluster may be a '''data cluster''', an '''L2''', or an '''L1 table'''. L1 and L2 tables are composed of one or more contiguous clusters.
-
-Normally the file size will be a multiple of the cluster size. If the file size is not a multiple, extra information after the last cluster may not be preserved if data is written. Legitimate extra information should use space between the header and the first regular cluster.
-
-All fields are little-endian.
-
-==Header==
- Header {
- uint32_t magic; /* QED\0 */
-
- uint32_t cluster_size; /* in bytes */
- uint32_t table_size; /* for L1 and L2 tables, in clusters */
- uint32_t header_size; /* in clusters */
-
- uint64_t features; /* format feature bits */
- uint64_t compat_features; /* compat feature bits */
- uint64_t autoclear_features; /* self-resetting feature bits */
-
- uint64_t l1_table_offset; /* in bytes */
- uint64_t image_size; /* total logical image size, in bytes */
-
- /* if (features & QED_F_BACKING_FILE) */
- uint32_t backing_filename_offset; /* in bytes from start of header */
- uint32_t backing_filename_size; /* in bytes */
- }
-
-Field descriptions:
-* ''cluster_size'' must be a power of 2 in range [2^12, 2^26].
-* ''table_size'' must be a power of 2 in range [1, 16].
-* ''header_size'' is the number of clusters used by the header and any additional information stored before regular clusters.
-* ''features'', ''compat_features'', and ''autoclear_features'' are file format extension bitmaps. They work as follows:
-** An image with unknown ''features'' bits enabled must not be opened. File format changes that are not backwards-compatible must use ''features'' bits.
-** An image with unknown ''compat_features'' bits enabled can be opened safely. The unknown features are simply ignored and represent backwards-compatible changes to the file format.
-** An image with unknown ''autoclear_features'' bits enable can be opened safely after clearing the unknown bits. This allows for backwards-compatible changes to the file format which degrade gracefully and can be re-enabled again by a new program later.
-* ''l1_table_offset'' is the offset of the first byte of the L1 table in the image file and must be a multiple of ''cluster_size''.
-* ''image_size'' is the block device size seen by the guest and must be a multiple of 512 bytes.
-* ''backing_filename_offset'' and ''backing_filename_size'' describe a string in (byte offset, byte size) form. It is not NUL-terminated and has no alignment constraints. The string must be stored within the first ''header_size'' clusters. The backing filename may be an absolute path or relative to the image file.
-
-Feature bits:
-* QED_F_BACKING_FILE = 0x01. The image uses a backing file.
-* QED_F_NEED_CHECK = 0x02. The image needs a consistency check before use.
-* QED_F_BACKING_FORMAT_NO_PROBE = 0x04. The backing file is a raw disk image and no file format autodetection should be attempted. This should be used to ensure that raw backing files are never detected as an image format if they happen to contain magic constants.
-
-There are currently no defined ''compat_features'' or ''autoclear_features'' bits.
-
-Fields predicated on a feature bit are only used when that feature is set. The fields always take up header space, regardless of whether or not the feature bit is set.
-
-==Tables==
-
-Tables provide the translation from logical offsets in the block device to cluster offsets in the file.
-
- #define TABLE_NOFFSETS (table_size * cluster_size / sizeof(uint64_t))
-
- Table {
- uint64_t offsets[TABLE_NOFFSETS];
- }
-
-The tables are organized as follows:
-
- +----------+
- | L1 table |
- +----------+
- ,------' | '------.
- +----------+ | +----------+
- | L2 table | ... | L2 table |
- +----------+ +----------+
- ,------' | '------.
- +----------+ | +----------+
- | Data | ... | Data |
- +----------+ +----------+
-
-A table is made up of one or more contiguous clusters. The table_size header field determines table size for an image file. For example, cluster_size=64 KB and table_size=4 results in 256 KB tables.
-
-The logical image size must be less than or equal to the maximum possible size of clusters rooted by the L1 table:
- header.image_size <= TABLE_NOFFSETS * TABLE_NOFFSETS * header.cluster_size
-
-L1, L2, and data cluster offsets must be aligned to header.cluster_size. The following offsets have special meanings:
-
-===L2 table offsets===
-* 0 - unallocated. The L2 table is not yet allocated.
-
-===Data cluster offsets===
-* 0 - unallocated. The data cluster is not yet allocated.
-* 1 - zero. The data cluster contents are all zeroes and no cluster is allocated.
-
-Future format extensions may wish to store per-offset information. The least significant 12 bits of an offset are reserved for this purpose and must be set to zero. Image files with cluster_size > 2^12 will have more unused bits which should also be zeroed.
-
-===Unallocated L2 tables and data clusters===
-Reads to an unallocated area of the image file access the backing file. If there is no backing file, then zeroes are produced. The backing file may be smaller than the image file and reads of unallocated areas beyond the end of the backing file produce zeroes.
-
-Writes to an unallocated area cause a new data clusters to be allocated, and a new L2 table if that is also unallocated. The new data cluster is populated with data from the backing file (or zeroes if no backing file) and the data being written.
-
-===Zero data clusters===
-Zero data clusters are a space-efficient way of storing zeroed regions of the image.
-
-Reads to a zero data cluster produce zeroes. Note that the difference between an unallocated and a zero data cluster is that zero data clusters stop the reading of contents from the backing file.
-
-Writes to a zero data cluster cause a new data cluster to be allocated. The new data cluster is populated with zeroes and the data being written.
-
-===Logical offset translation===
-Logical offsets are translated into cluster offsets as follows:
-
- table_bits table_bits cluster_bits
- <--------> <--------> <--------------->
- +----------+----------+-----------------+
- | L1 index | L2 index | byte offset |
- +----------+----------+-----------------+
-
- Structure of a logical offset
-
- offset_mask = ~(cluster_size - 1) # mask for the image file byte offset
-
- def logical_to_cluster_offset(l1_index, l2_index, byte_offset):
- l2_offset = l1_table[l1_index]
- l2_table = load_table(l2_offset)
- cluster_offset = l2_table[l2_index] & offset_mask
- return cluster_offset + byte_offset
-
-==Consistency checking==
-
-This section is informational and included to provide background on the use of the QED_F_NEED_CHECK ''features'' bit.
-
-The QED_F_NEED_CHECK bit is used to mark an image as dirty before starting an operation that could leave the image in an inconsistent state if interrupted by a crash or power failure. A dirty image must be checked on open because its metadata may not be consistent.
-
-Consistency check includes the following invariants:
-# Each cluster is referenced once and only once. It is an inconsistency to have a cluster referenced more than once by L1 or L2 tables. A cluster has been leaked if it has no references.
-# Offsets must be within the image file size and must be ''cluster_size'' aligned.
-# Table offsets must at least ''table_size'' * ''cluster_size'' bytes from the end of the image file so that there is space for the entire table.
-
-The consistency check process starts by from ''l1_table_offset'' and scans all L2 tables. After the check completes with no other errors besides leaks, the QED_F_NEED_CHECK bit can be cleared and the image can be accessed.
diff --git a/docs/interop/qemu-ga-ref.rst b/docs/interop/qemu-ga-ref.rst
index 032d492..25f6e24 100644
--- a/docs/interop/qemu-ga-ref.rst
+++ b/docs/interop/qemu-ga-ref.rst
@@ -1,7 +1,6 @@
QEMU Guest Agent Protocol Reference
===================================
-.. contents::
- :depth: 3
-
.. qapi-doc:: qga/qapi-schema.json
+ :transmogrify:
+ :namespace: QGA
diff --git a/docs/interop/qemu-ga.rst b/docs/interop/qemu-ga.rst
index 72fb75a..d16cc1b 100644
--- a/docs/interop/qemu-ga.rst
+++ b/docs/interop/qemu-ga.rst
@@ -1,3 +1,5 @@
+.. _qemu-ga:
+
QEMU Guest Agent
================
@@ -28,11 +30,30 @@ configuration options on the command line. For the same key, the last
option wins, but the lists accumulate (see below for configuration
file format).
+If an allowed RPCs list is defined in the configuration, then all
+RPCs will be blocked by default, except for the allowed list.
+
+If a blocked RPCs list is defined in the configuration, then all
+RPCs will be allowed by default, except for the blocked list.
+
+If both allowed and blocked RPCs lists are defined in the configuration,
+then all RPCs will be blocked by default, then the allowed list will
+be applied, followed by the blocked list.
+
+While filesystems are frozen, all except for a designated safe set
+of RPCs will blocked, regardless of what the general configuration
+declares.
+
Options
-------
.. program:: qemu-ga
+.. option:: -c, --config=PATH
+
+ Configuration file path (the default is |CONFDIR|\ ``/qemu-ga.conf``,
+ unless overridden by the QGA_CONF environment variable)
+
.. option:: -m, --method=METHOD
Transport method: one of ``unix-listen``, ``virtio-serial``, or
@@ -131,6 +152,7 @@ fsfreeze-hook string
statedir string
verbose boolean
block-rpcs string list
+allow-rpcs string list
============= ===========
See also
diff --git a/docs/interop/qemu-qmp-ref.rst b/docs/interop/qemu-qmp-ref.rst
index f94614a..3bc1ca1 100644
--- a/docs/interop/qemu-qmp-ref.rst
+++ b/docs/interop/qemu-qmp-ref.rst
@@ -4,6 +4,8 @@ QEMU QMP Reference Manual
=========================
.. contents::
- :depth: 3
+ :local:
.. qapi-doc:: qapi/qapi-schema.json
+ :transmogrify:
+ :namespace: QMP
diff --git a/docs/interop/qemu-storage-daemon-qmp-ref.rst b/docs/interop/qemu-storage-daemon-qmp-ref.rst
index 9fed681..dc7bde2 100644
--- a/docs/interop/qemu-storage-daemon-qmp-ref.rst
+++ b/docs/interop/qemu-storage-daemon-qmp-ref.rst
@@ -2,6 +2,8 @@ QEMU Storage Daemon QMP Reference Manual
========================================
.. contents::
- :depth: 3
+ :local:
.. qapi-doc:: storage-daemon/qapi/qapi-schema.json
+ :transmogrify:
+ :namespace: QSD
diff --git a/docs/interop/vfio-user.rst b/docs/interop/vfio-user.rst
new file mode 100644
index 0000000..0b06f02
--- /dev/null
+++ b/docs/interop/vfio-user.rst
@@ -0,0 +1,1520 @@
+.. include:: <isonum.txt>
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+================================
+vfio-user Protocol Specification
+================================
+
+.. contents:: Table of Contents
+
+Introduction
+============
+vfio-user is a protocol that allows a device to be emulated in a separate
+process outside of a Virtual Machine Monitor (VMM). vfio-user devices consist
+of a generic VFIO device type, living inside the VMM, which we call the client,
+and the core device implementation, living outside the VMM, which we call the
+server.
+
+The vfio-user specification is partly based on the
+`Linux VFIO ioctl interface <https://www.kernel.org/doc/html/latest/driver-api/vfio.html>`_.
+
+VFIO is a mature and stable API, backed by an extensively used framework. The
+existing VFIO client implementation in QEMU (``qemu/hw/vfio/``) can be largely
+re-used, though there is nothing in this specification that requires that
+particular implementation. None of the VFIO kernel modules are required for
+supporting the protocol, on either the client or server side. Some source
+definitions in VFIO are re-used for vfio-user.
+
+The main idea is to allow a virtual device to function in a separate process in
+the same host over a UNIX domain socket. A UNIX domain socket (``AF_UNIX``) is
+chosen because file descriptors can be trivially sent over it, which in turn
+allows:
+
+* Sharing of client memory for DMA with the server.
+* Sharing of server memory with the client for fast MMIO.
+* Efficient sharing of eventfd's for triggering interrupts.
+
+Other socket types could be used which allow the server to run in a separate
+guest in the same host (``AF_VSOCK``) or remotely (``AF_INET``). Theoretically
+the underlying transport does not necessarily have to be a socket, however we do
+not examine such alternatives. In this protocol version we focus on using a UNIX
+domain socket and introduce basic support for the other two types of sockets
+without considering performance implications.
+
+While passing of file descriptors is desirable for performance reasons, support
+is not necessary for either the client or the server in order to implement the
+protocol. There is always an in-band, message-passing fall back mechanism.
+
+Overview
+========
+
+VFIO is a framework that allows a physical device to be securely passed through
+to a user space process; the device-specific kernel driver does not drive the
+device at all. Typically, the user space process is a VMM and the device is
+passed through to it in order to achieve high performance. VFIO provides an API
+and the required functionality in the kernel. QEMU has adopted VFIO to allow a
+guest to directly access physical devices, instead of emulating them in
+software.
+
+vfio-user reuses the core VFIO concepts defined in its API, but implements them
+as messages to be sent over a socket. It does not change the kernel-based VFIO
+in any way, in fact none of the VFIO kernel modules need to be loaded to use
+vfio-user. It is also possible for the client to concurrently use the current
+kernel-based VFIO for one device, and vfio-user for another device.
+
+VFIO Device Model
+-----------------
+
+A device under VFIO presents a standard interface to the user process. Many of
+the VFIO operations in the existing interface use the ``ioctl()`` system call, and
+references to the existing interface are called the ``ioctl()`` implementation in
+this document.
+
+The following sections describe the set of messages that implement the vfio-user
+interface over a socket. In many cases, the messages are analogous to data
+structures used in the ``ioctl()`` implementation. Messages derived from the
+``ioctl()`` will have a name derived from the ``ioctl()`` command name. E.g., the
+``VFIO_DEVICE_GET_INFO`` ``ioctl()`` command becomes a
+``VFIO_USER_DEVICE_GET_INFO`` message. The purpose of this reuse is to share as
+much code as feasible with the ``ioctl()`` implementation``.
+
+Connection Initiation
+^^^^^^^^^^^^^^^^^^^^^
+
+After the client connects to the server, the initial client message is
+``VFIO_USER_VERSION`` to propose a protocol version and set of capabilities to
+apply to the session. The server replies with a compatible version and set of
+capabilities it supports, or closes the connection if it cannot support the
+advertised version.
+
+Device Information
+^^^^^^^^^^^^^^^^^^
+
+The client uses a ``VFIO_USER_DEVICE_GET_INFO`` message to query the server for
+information about the device. This information includes:
+
+* The device type and whether it supports reset (``VFIO_DEVICE_FLAGS_``),
+* the number of device regions, and
+* the device presents to the client the number of interrupt types the device
+ supports.
+
+Region Information
+^^^^^^^^^^^^^^^^^^
+
+The client uses ``VFIO_USER_DEVICE_GET_REGION_INFO`` messages to query the
+server for information about the device's regions. This information describes:
+
+* Read and write permissions, whether it can be memory mapped, and whether it
+ supports additional capabilities (``VFIO_REGION_INFO_CAP_``).
+* Region index, size, and offset.
+
+When a device region can be mapped by the client, the server provides a file
+descriptor which the client can ``mmap()``. The server is responsible for
+polling for client updates to memory mapped regions.
+
+Region Capabilities
+"""""""""""""""""""
+
+Some regions have additional capabilities that cannot be described adequately
+by the region info data structure. These capabilities are returned in the
+region info reply in a list similar to PCI capabilities in a PCI device's
+configuration space.
+
+Sparse Regions
+""""""""""""""
+A region can be memory-mappable in whole or in part. When only a subset of a
+region can be mapped by the client, a ``VFIO_REGION_INFO_CAP_SPARSE_MMAP``
+capability is included in the region info reply. This capability describes
+which portions can be mapped by the client.
+
+.. Note::
+ For example, in a virtual NVMe controller, sparse regions can be used so
+ that accesses to the NVMe registers (found in the beginning of BAR0) are
+ trapped (an infrequent event), while allowing direct access to the doorbells
+ (an extremely frequent event as every I/O submission requires a write to
+ BAR0), found in the next page after the NVMe registers in BAR0.
+
+Device-Specific Regions
+"""""""""""""""""""""""
+
+A device can define regions additional to the standard ones (e.g. PCI indexes
+0-8). This is achieved by including a ``VFIO_REGION_INFO_CAP_TYPE`` capability
+in the region info reply of a device-specific region. Such regions are reflected
+in ``struct vfio_user_device_info.num_regions``. Thus, for PCI devices this
+value can be equal to, or higher than, ``VFIO_PCI_NUM_REGIONS``.
+
+Region I/O via file descriptors
+-------------------------------
+
+For unmapped regions, region I/O from the client is done via
+``VFIO_USER_REGION_READ/WRITE``. As an optimization, ioeventfds or ioregionfds
+may be configured for sub-regions of some regions. A client may request
+information on these sub-regions via ``VFIO_USER_DEVICE_GET_REGION_IO_FDS``; by
+configuring the returned file descriptors as ioeventfds or ioregionfds, the
+server can be directly notified of I/O (for example, by KVM) without taking a
+trip through the client.
+
+Interrupts
+^^^^^^^^^^
+
+The client uses ``VFIO_USER_DEVICE_GET_IRQ_INFO`` messages to query the server
+for the device's interrupt types. The interrupt types are specific to the bus
+the device is attached to, and the client is expected to know the capabilities
+of each interrupt type. The server can signal an interrupt by directly injecting
+interrupts into the guest via an event file descriptor. The client configures
+how the server signals an interrupt with ``VFIO_USER_SET_IRQS`` messages.
+
+Device Read and Write
+^^^^^^^^^^^^^^^^^^^^^
+
+When the guest executes load or store operations to an unmapped device region,
+the client forwards these operations to the server with
+``VFIO_USER_REGION_READ`` or ``VFIO_USER_REGION_WRITE`` messages. The server
+will reply with data from the device on read operations or an acknowledgement on
+write operations. See `Read and Write Operations`_.
+
+Client memory access
+--------------------
+
+The client uses ``VFIO_USER_DMA_MAP`` and ``VFIO_USER_DMA_UNMAP`` messages to
+inform the server of the valid DMA ranges that the server can access on behalf
+of a device (typically, VM guest memory). DMA memory may be accessed by the
+server via ``VFIO_USER_DMA_READ`` and ``VFIO_USER_DMA_WRITE`` messages over the
+socket. In this case, the "DMA" part of the naming is a misnomer.
+
+Actual direct memory access of client memory from the server is possible if the
+client provides file descriptors the server can ``mmap()``. Note that ``mmap()``
+privileges cannot be revoked by the client, therefore file descriptors should
+only be exported in environments where the client trusts the server not to
+corrupt guest memory.
+
+See `Read and Write Operations`_.
+
+Client/server interactions
+==========================
+
+Socket
+------
+
+A server can serve:
+
+1) one or more clients, and/or
+2) one or more virtual devices, belonging to one or more clients.
+
+The current protocol specification requires a dedicated socket per
+client/server connection. It is a server-side implementation detail whether a
+single server handles multiple virtual devices from the same or multiple
+clients. The location of the socket is implementation-specific. Multiplexing
+clients, devices, and servers over the same socket is not supported in this
+version of the protocol.
+
+Authentication
+--------------
+
+For ``AF_UNIX``, we rely on OS mandatory access controls on the socket files,
+therefore it is up to the management layer to set up the socket as required.
+Socket types that span guests or hosts will require a proper authentication
+mechanism. Defining that mechanism is deferred to a future version of the
+protocol.
+
+Command Concurrency
+-------------------
+
+A client may pipeline multiple commands without waiting for previous command
+replies. The server will process commands in the order they are received. A
+consequence of this is if a client issues a command with the *No_reply* bit,
+then subsequently issues a command without *No_reply*, the older command will
+have been processed before the reply to the younger command is sent by the
+server. The client must be aware of the device's capability to process
+concurrent commands if pipelining is used. For example, pipelining allows
+multiple client threads to concurrently access device regions; the client must
+ensure these accesses obey device semantics.
+
+An example is a frame buffer device, where the device may allow concurrent
+access to different areas of video memory, but may have indeterminate behavior
+if concurrent accesses are performed to command or status registers.
+
+Note that unrelated messages sent from the server to the client can appear in
+between a client to server request/reply and vice versa.
+
+Implementers should be prepared for certain commands to exhibit potentially
+unbounded latencies. For example, ``VFIO_USER_DEVICE_RESET`` may take an
+arbitrarily long time to complete; clients should take care not to block
+unnecessarily.
+
+Socket Disconnection Behavior
+-----------------------------
+The server and the client can disconnect from each other, either intentionally
+or unexpectedly. Both the client and the server need to know how to handle such
+events.
+
+Server Disconnection
+^^^^^^^^^^^^^^^^^^^^
+A server disconnecting from the client may indicate that:
+
+1) A virtual device has been restarted, either intentionally (e.g. because of a
+ device update) or unintentionally (e.g. because of a crash).
+2) A virtual device has been shut down with no intention to be restarted.
+
+It is impossible for the client to know whether or not a failure is
+intermittent or innocuous and should be retried, therefore the client should
+reset the VFIO device when it detects the socket has been disconnected.
+Error recovery will be driven by the guest's device error handling
+behavior.
+
+Client Disconnection
+^^^^^^^^^^^^^^^^^^^^
+The client disconnecting from the server primarily means that the client
+has exited. Currently, this means that the guest is shut down so the device is
+no longer needed therefore the server can automatically exit. However, there
+can be cases where a client disconnection should not result in a server exit:
+
+1) A single server serving multiple clients.
+2) A multi-process QEMU upgrading itself step by step, which is not yet
+ implemented.
+
+Therefore in order for the protocol to be forward compatible, the server should
+respond to a client disconnection as follows:
+
+ - all client memory regions are unmapped and cleaned up (including closing any
+ passed file descriptors)
+ - all IRQ file descriptors passed from the old client are closed
+ - the device state should otherwise be retained
+
+The expectation is that when a client reconnects, it will re-establish IRQ and
+client memory mappings.
+
+If anything happens to the client (such as qemu really did exit), the control
+stack will know about it and can clean up resources accordingly.
+
+Security Considerations
+-----------------------
+
+Speaking generally, vfio-user clients should not trust servers, and vice versa.
+Standard tools and mechanisms should be used on both sides to validate input and
+prevent against denial of service scenarios, buffer overflow, etc.
+
+Request Retry and Response Timeout
+----------------------------------
+A failed command is a command that has been successfully sent and has been
+responded to with an error code. Failure to send the command in the first place
+(e.g. because the socket is disconnected) is a different type of error examined
+earlier in the disconnect section.
+
+.. Note::
+ QEMU's VFIO retries certain operations if they fail. While this makes sense
+ for real HW, we don't know for sure whether it makes sense for virtual
+ devices.
+
+Defining a retry and timeout scheme is deferred to a future version of the
+protocol.
+
+Message sizes
+-------------
+
+Some requests have an ``argsz`` field. In a request, it defines the maximum
+expected reply payload size, which should be at least the size of the fixed
+reply payload headers defined here. The *request* payload size is defined by the
+usual ``msg_size`` field in the header, not the ``argsz`` field.
+
+In a reply, the server sets ``argsz`` field to the size needed for a full
+payload size. This may be less than the requested maximum size. This may be
+larger than the requested maximum size: in that case, the full payload is not
+included in the reply, but the ``argsz`` field in the reply indicates the needed
+size, allowing a client to allocate a larger buffer for holding the reply before
+trying again.
+
+In addition, during negotiation (see `Version`_), the client and server may
+each specify a ``max_data_xfer_size`` value; this defines the maximum data that
+may be read or written via one of the ``VFIO_USER_DMA/REGION_READ/WRITE``
+messages; see `Read and Write Operations`_.
+
+Protocol Specification
+======================
+
+To distinguish from the base VFIO symbols, all vfio-user symbols are prefixed
+with ``vfio_user`` or ``VFIO_USER``. In this revision, all data is in the
+endianness of the host system, although this may be relaxed in future
+revisions in cases where the client and server run on different hosts
+with different endianness.
+
+Unless otherwise specified, all sizes should be presumed to be in bytes.
+
+.. _Commands:
+
+Commands
+--------
+The following table lists the VFIO message command IDs, and whether the
+message command is sent from the client or the server.
+
+====================================== ========= =================
+Name Command Request Direction
+====================================== ========= =================
+``VFIO_USER_VERSION`` 1 client -> server
+``VFIO_USER_DMA_MAP`` 2 client -> server
+``VFIO_USER_DMA_UNMAP`` 3 client -> server
+``VFIO_USER_DEVICE_GET_INFO`` 4 client -> server
+``VFIO_USER_DEVICE_GET_REGION_INFO`` 5 client -> server
+``VFIO_USER_DEVICE_GET_REGION_IO_FDS`` 6 client -> server
+``VFIO_USER_DEVICE_GET_IRQ_INFO`` 7 client -> server
+``VFIO_USER_DEVICE_SET_IRQS`` 8 client -> server
+``VFIO_USER_REGION_READ`` 9 client -> server
+``VFIO_USER_REGION_WRITE`` 10 client -> server
+``VFIO_USER_DMA_READ`` 11 server -> client
+``VFIO_USER_DMA_WRITE`` 12 server -> client
+``VFIO_USER_DEVICE_RESET`` 13 client -> server
+``VFIO_USER_REGION_WRITE_MULTI`` 15 client -> server
+====================================== ========= =================
+
+Header
+------
+
+All messages, both command messages and reply messages, are preceded by a
+16-byte header that contains basic information about the message. The header is
+followed by message-specific data described in the sections below.
+
++----------------+--------+-------------+
+| Name | Offset | Size |
++================+========+=============+
+| Message ID | 0 | 2 |
++----------------+--------+-------------+
+| Command | 2 | 2 |
++----------------+--------+-------------+
+| Message size | 4 | 4 |
++----------------+--------+-------------+
+| Flags | 8 | 4 |
++----------------+--------+-------------+
+| | +-----+------------+ |
+| | | Bit | Definition | |
+| | +=====+============+ |
+| | | 0-3 | Type | |
+| | +-----+------------+ |
+| | | 4 | No_reply | |
+| | +-----+------------+ |
+| | | 5 | Error | |
+| | +-----+------------+ |
++----------------+--------+-------------+
+| Error | 12 | 4 |
++----------------+--------+-------------+
+| <message data> | 16 | variable |
++----------------+--------+-------------+
+
+* *Message ID* identifies the message, and is echoed in the command's reply
+ message. Message IDs belong entirely to the sender, can be re-used (even
+ concurrently) and the receiver must not make any assumptions about their
+ uniqueness.
+* *Command* specifies the command to be executed, listed in Commands_. It is
+ also set in the reply header.
+* *Message size* contains the size of the entire message, including the header.
+* *Flags* contains attributes of the message:
+
+ * The *Type* bits indicate the message type.
+
+ * *Command* (value 0x0) indicates a command message.
+ * *Reply* (value 0x1) indicates a reply message acknowledging a previous
+ command with the same message ID.
+ * *No_reply* in a command message indicates that no reply is needed for this
+ command. This is commonly used when multiple commands are sent, and only
+ the last needs acknowledgement.
+ * *Error* in a reply message indicates the command being acknowledged had
+ an error. In this case, the *Error* field will be valid.
+
+* *Error* in a reply message is an optional UNIX errno value. It may be zero
+ even if the Error bit is set in Flags. It is reserved in a command message.
+
+Each command message in Commands_ must be replied to with a reply message,
+unless the message sets the *No_Reply* bit. The reply consists of the header
+with the *Reply* bit set, plus any additional data.
+
+If an error occurs, the reply message must only include the reply header.
+
+As the header is standard in both requests and replies, it is not included in
+the command-specific specifications below; each message definition should be
+appended to the standard header, and the offsets are given from the end of the
+standard header.
+
+``VFIO_USER_VERSION``
+---------------------
+
+.. _Version:
+
+This is the initial message sent by the client after the socket connection is
+established; the same format is used for the server's reply.
+
+Upon establishing a connection, the client must send a ``VFIO_USER_VERSION``
+message proposing a protocol version and a set of capabilities. The server
+compares these with the versions and capabilities it supports and sends a
+``VFIO_USER_VERSION`` reply according to the following rules.
+
+* The major version in the reply must be the same as proposed. If the client
+ does not support the proposed major, it closes the connection.
+* The minor version in the reply must be equal to or less than the minor
+ version proposed.
+* The capability list must be a subset of those proposed. If the server
+ requires a capability the client did not include, it closes the connection.
+
+The protocol major version will only change when incompatible protocol changes
+are made, such as changing the message format. The minor version may change
+when compatible changes are made, such as adding new messages or capabilities,
+Both the client and server must support all minor versions less than the
+maximum minor version it supports. E.g., an implementation that supports
+version 1.3 must also support 1.0 through 1.2.
+
+When making a change to this specification, the protocol version number must
+be included in the form "added in version X.Y"
+
+Request
+^^^^^^^
+
+============== ====== ====
+Name Offset Size
+============== ====== ====
+version major 0 2
+version minor 2 2
+version data 4 variable (including terminating NUL). Optional.
+============== ====== ====
+
+The version data is an optional UTF-8 encoded JSON byte array with the following
+format:
+
++--------------+--------+-----------------------------------+
+| Name | Type | Description |
++==============+========+===================================+
+| capabilities | object | Contains common capabilities that |
+| | | the sender supports. Optional. |
++--------------+--------+-----------------------------------+
+
+Capabilities:
+
++--------------------+---------+------------------------------------------------+
+| Name | Type | Description |
++====================+=========+================================================+
+| max_msg_fds | number | Maximum number of file descriptors that can be |
+| | | received by the sender in one message. |
+| | | Optional. If not specified then the receiver |
+| | | must assume a value of ``1``. |
++--------------------+---------+------------------------------------------------+
+| max_data_xfer_size | number | Maximum ``count`` for data transfer messages; |
+| | | see `Read and Write Operations`_. Optional, |
+| | | with a default value of 1048576 bytes. |
++--------------------+---------+------------------------------------------------+
+| pgsizes | number | Page sizes supported in DMA map operations |
+| | | or'ed together. Optional, with a default value |
+| | | of supporting only 4k pages. |
++--------------------+---------+------------------------------------------------+
+| max_dma_maps | number | Maximum number DMA map windows that can be |
+| | | valid simultaneously. Optional, with a |
+| | | value of 65535 (64k-1). |
++--------------------+---------+------------------------------------------------+
+| migration | object | Migration capability parameters. If missing |
+| | | then migration is not supported by the sender. |
++--------------------+---------+------------------------------------------------+
+| write_multiple | boolean | ``VFIO_USER_REGION_WRITE_MULTI`` messages |
+| | | are supported if the value is ``true``. |
++--------------------+---------+------------------------------------------------+
+
+The migration capability contains the following name/value pairs:
+
++-----------------+--------+--------------------------------------------------+
+| Name | Type | Description |
++=================+========+==================================================+
+| pgsize | number | Page size of dirty pages bitmap. The smallest |
+| | | between the client and the server is used. |
++-----------------+--------+--------------------------------------------------+
+| max_bitmap_size | number | Maximum bitmap size in ``VFIO_USER_DIRTY_PAGES`` |
+| | | and ``VFIO_DMA_UNMAP`` messages. Optional, |
+| | | with a default value of 256MB. |
++-----------------+--------+--------------------------------------------------+
+
+Reply
+^^^^^
+
+The same message format is used in the server's reply with the semantics
+described above.
+
+``VFIO_USER_DMA_MAP``
+---------------------
+
+This command message is sent by the client to the server to inform it of the
+memory regions the server can access. It must be sent before the server can
+perform any DMA to the client. It is normally sent directly after the version
+handshake is completed, but may also occur when memory is added to the client,
+or if the client uses a vIOMMU.
+
+Request
+^^^^^^^
+
+The request payload for this message is a structure of the following format:
+
++-------------+--------+-------------+
+| Name | Offset | Size |
++=============+========+=============+
+| argsz | 0 | 4 |
++-------------+--------+-------------+
+| flags | 4 | 4 |
++-------------+--------+-------------+
+| | +-----+------------+ |
+| | | Bit | Definition | |
+| | +=====+============+ |
+| | | 0 | readable | |
+| | +-----+------------+ |
+| | | 1 | writeable | |
+| | +-----+------------+ |
++-------------+--------+-------------+
+| offset | 8 | 8 |
++-------------+--------+-------------+
+| address | 16 | 8 |
++-------------+--------+-------------+
+| size | 24 | 8 |
++-------------+--------+-------------+
+
+* *argsz* is the size of the above structure. Note there is no reply payload,
+ so this field differs from other message types.
+* *flags* contains the following region attributes:
+
+ * *readable* indicates that the region can be read from.
+
+ * *writeable* indicates that the region can be written to.
+
+* *offset* is the file offset of the region with respect to the associated file
+ descriptor, or zero if the region is not mappable
+* *address* is the base DMA address of the region.
+* *size* is the size of the region.
+
+This structure is 32 bytes in size, so the message size is 16 + 32 bytes.
+
+If the DMA region being added can be directly mapped by the server, a file
+descriptor must be sent as part of the message meta-data. The region can be
+mapped via the mmap() system call. On ``AF_UNIX`` sockets, the file descriptor
+must be passed as ``SCM_RIGHTS`` type ancillary data. Otherwise, if the DMA
+region cannot be directly mapped by the server, no file descriptor must be sent
+as part of the message meta-data and the DMA region can be accessed by the
+server using ``VFIO_USER_DMA_READ`` and ``VFIO_USER_DMA_WRITE`` messages,
+explained in `Read and Write Operations`_. A command to map over an existing
+region must be failed by the server with ``EEXIST`` set in error field in the
+reply.
+
+Reply
+^^^^^
+
+There is no payload in the reply message.
+
+``VFIO_USER_DMA_UNMAP``
+-----------------------
+
+This command message is sent by the client to the server to inform it that a
+DMA region, previously made available via a ``VFIO_USER_DMA_MAP`` command
+message, is no longer available for DMA. It typically occurs when memory is
+subtracted from the client or if the client uses a vIOMMU. The DMA region is
+described by the following structure:
+
+Request
+^^^^^^^
+
+The request payload for this message is a structure of the following format:
+
++--------------+--------+------------------------+
+| Name | Offset | Size |
++==============+========+========================+
+| argsz | 0 | 4 |
++--------------+--------+------------------------+
+| flags | 4 | 4 |
++--------------+--------+------------------------+
+| address | 8 | 8 |
++--------------+--------+------------------------+
+| size | 16 | 8 |
++--------------+--------+------------------------+
+
+* *argsz* is the maximum size of the reply payload.
+* *flags* is unused in this version.
+* *address* is the base DMA address of the DMA region.
+* *size* is the size of the DMA region.
+
+The address and size of the DMA region being unmapped must match exactly a
+previous mapping.
+
+Reply
+^^^^^
+
+Upon receiving a ``VFIO_USER_DMA_UNMAP`` command, if the file descriptor is
+mapped then the server must release all references to that DMA region before
+replying, which potentially includes in-flight DMA transactions.
+
+The server responds with the original DMA entry in the request.
+
+
+``VFIO_USER_DEVICE_GET_INFO``
+-----------------------------
+
+This command message is sent by the client to the server to query for basic
+information about the device.
+
+Request
+^^^^^^^
+
++-------------+--------+--------------------------+
+| Name | Offset | Size |
++=============+========+==========================+
+| argsz | 0 | 4 |
++-------------+--------+--------------------------+
+| flags | 4 | 4 |
++-------------+--------+--------------------------+
+| | +-----+-------------------------+ |
+| | | Bit | Definition | |
+| | +=====+=========================+ |
+| | | 0 | VFIO_DEVICE_FLAGS_RESET | |
+| | +-----+-------------------------+ |
+| | | 1 | VFIO_DEVICE_FLAGS_PCI | |
+| | +-----+-------------------------+ |
++-------------+--------+--------------------------+
+| num_regions | 8 | 4 |
++-------------+--------+--------------------------+
+| num_irqs | 12 | 4 |
++-------------+--------+--------------------------+
+
+* *argsz* is the maximum size of the reply payload
+* all other fields must be zero.
+
+Reply
+^^^^^
+
++-------------+--------+--------------------------+
+| Name | Offset | Size |
++=============+========+==========================+
+| argsz | 0 | 4 |
++-------------+--------+--------------------------+
+| flags | 4 | 4 |
++-------------+--------+--------------------------+
+| | +-----+-------------------------+ |
+| | | Bit | Definition | |
+| | +=====+=========================+ |
+| | | 0 | VFIO_DEVICE_FLAGS_RESET | |
+| | +-----+-------------------------+ |
+| | | 1 | VFIO_DEVICE_FLAGS_PCI | |
+| | +-----+-------------------------+ |
++-------------+--------+--------------------------+
+| num_regions | 8 | 4 |
++-------------+--------+--------------------------+
+| num_irqs | 12 | 4 |
++-------------+--------+--------------------------+
+
+* *argsz* is the size required for the full reply payload (16 bytes today)
+* *flags* contains the following device attributes.
+
+ * ``VFIO_DEVICE_FLAGS_RESET`` indicates that the device supports the
+ ``VFIO_USER_DEVICE_RESET`` message.
+ * ``VFIO_DEVICE_FLAGS_PCI`` indicates that the device is a PCI device.
+
+* *num_regions* is the number of memory regions that the device exposes.
+* *num_irqs* is the number of distinct interrupt types that the device supports.
+
+This version of the protocol only supports PCI devices. Additional devices may
+be supported in future versions.
+
+``VFIO_USER_DEVICE_GET_REGION_INFO``
+------------------------------------
+
+This command message is sent by the client to the server to query for
+information about device regions. The VFIO region info structure is defined in
+``<linux/vfio.h>`` (``struct vfio_region_info``).
+
+Request
+^^^^^^^
+
++------------+--------+------------------------------+
+| Name | Offset | Size |
++============+========+==============================+
+| argsz | 0 | 4 |
++------------+--------+------------------------------+
+| flags | 4 | 4 |
++------------+--------+------------------------------+
+| index | 8 | 4 |
++------------+--------+------------------------------+
+| cap_offset | 12 | 4 |
++------------+--------+------------------------------+
+| size | 16 | 8 |
++------------+--------+------------------------------+
+| offset | 24 | 8 |
++------------+--------+------------------------------+
+
+* *argsz* the maximum size of the reply payload
+* *index* is the index of memory region being queried, it is the only field
+ that is required to be set in the command message.
+* all other fields must be zero.
+
+Reply
+^^^^^
+
++------------+--------+------------------------------+
+| Name | Offset | Size |
++============+========+==============================+
+| argsz | 0 | 4 |
++------------+--------+------------------------------+
+| flags | 4 | 4 |
++------------+--------+------------------------------+
+| | +-----+-----------------------------+ |
+| | | Bit | Definition | |
+| | +=====+=============================+ |
+| | | 0 | VFIO_REGION_INFO_FLAG_READ | |
+| | +-----+-----------------------------+ |
+| | | 1 | VFIO_REGION_INFO_FLAG_WRITE | |
+| | +-----+-----------------------------+ |
+| | | 2 | VFIO_REGION_INFO_FLAG_MMAP | |
+| | +-----+-----------------------------+ |
+| | | 3 | VFIO_REGION_INFO_FLAG_CAPS | |
+| | +-----+-----------------------------+ |
++------------+--------+------------------------------+
++------------+--------+------------------------------+
+| index | 8 | 4 |
++------------+--------+------------------------------+
+| cap_offset | 12 | 4 |
++------------+--------+------------------------------+
+| size | 16 | 8 |
++------------+--------+------------------------------+
+| offset | 24 | 8 |
++------------+--------+------------------------------+
+
+* *argsz* is the size required for the full reply payload (region info structure
+ plus the size of any region capabilities)
+* *flags* are attributes of the region:
+
+ * ``VFIO_REGION_INFO_FLAG_READ`` allows client read access to the region.
+ * ``VFIO_REGION_INFO_FLAG_WRITE`` allows client write access to the region.
+ * ``VFIO_REGION_INFO_FLAG_MMAP`` specifies the client can mmap() the region.
+ When this flag is set, the reply will include a file descriptor in its
+ meta-data. On ``AF_UNIX`` sockets, the file descriptors will be passed as
+ ``SCM_RIGHTS`` type ancillary data.
+ * ``VFIO_REGION_INFO_FLAG_CAPS`` indicates additional capabilities found in the
+ reply.
+
+* *index* is the index of memory region being queried, it is the only field
+ that is required to be set in the command message.
+* *cap_offset* describes where additional region capabilities can be found.
+ cap_offset is relative to the beginning of the VFIO region info structure.
+ The data structure it points is a VFIO cap header defined in
+ ``<linux/vfio.h>``.
+* *size* is the size of the region.
+* *offset* is the offset that should be given to the mmap() system call for
+ regions with the MMAP attribute. It is also used as the base offset when
+ mapping a VFIO sparse mmap area, described below.
+
+VFIO region capabilities
+""""""""""""""""""""""""
+
+The VFIO region information can also include a capabilities list. This list is
+similar to a PCI capability list - each entry has a common header that
+identifies a capability and where the next capability in the list can be found.
+The VFIO capability header format is defined in ``<linux/vfio.h>`` (``struct
+vfio_info_cap_header``).
+
+VFIO cap header format
+""""""""""""""""""""""
+
++---------+--------+------+
+| Name | Offset | Size |
++=========+========+======+
+| id | 0 | 2 |
++---------+--------+------+
+| version | 2 | 2 |
++---------+--------+------+
+| next | 4 | 4 |
++---------+--------+------+
+
+* *id* is the capability identity.
+* *version* is a capability-specific version number.
+* *next* specifies the offset of the next capability in the capability list. It
+ is relative to the beginning of the VFIO region info structure.
+
+VFIO sparse mmap cap header
+"""""""""""""""""""""""""""
+
++------------------+----------------------------------+
+| Name | Value |
++==================+==================================+
+| id | VFIO_REGION_INFO_CAP_SPARSE_MMAP |
++------------------+----------------------------------+
+| version | 0x1 |
++------------------+----------------------------------+
+| next | <next> |
++------------------+----------------------------------+
+| sparse mmap info | VFIO region info sparse mmap |
++------------------+----------------------------------+
+
+This capability is defined when only a subrange of the region supports
+direct access by the client via mmap(). The VFIO sparse mmap area is defined in
+``<linux/vfio.h>`` (``struct vfio_region_sparse_mmap_area`` and ``struct
+vfio_region_info_cap_sparse_mmap``).
+
+VFIO region info cap sparse mmap
+""""""""""""""""""""""""""""""""
+
++----------+--------+------+
+| Name | Offset | Size |
++==========+========+======+
+| nr_areas | 0 | 4 |
++----------+--------+------+
+| reserved | 4 | 4 |
++----------+--------+------+
+| offset | 8 | 8 |
++----------+--------+------+
+| size | 16 | 8 |
++----------+--------+------+
+| ... | | |
++----------+--------+------+
+
+* *nr_areas* is the number of sparse mmap areas in the region.
+* *offset* and size describe a single area that can be mapped by the client.
+ There will be *nr_areas* pairs of offset and size. The offset will be added to
+ the base offset given in the ``VFIO_USER_DEVICE_GET_REGION_INFO`` to form the
+ offset argument of the subsequent mmap() call.
+
+The VFIO sparse mmap area is defined in ``<linux/vfio.h>`` (``struct
+vfio_region_info_cap_sparse_mmap``).
+
+
+``VFIO_USER_DEVICE_GET_REGION_IO_FDS``
+--------------------------------------
+
+Clients can access regions via ``VFIO_USER_REGION_READ/WRITE`` or, if provided, by
+``mmap()`` of a file descriptor provided by the server.
+
+``VFIO_USER_DEVICE_GET_REGION_IO_FDS`` provides an alternative access mechanism via
+file descriptors. This is an optional feature intended for performance
+improvements where an underlying sub-system (such as KVM) supports communication
+across such file descriptors to the vfio-user server, without needing to
+round-trip through the client.
+
+The server returns an array of sub-regions for the requested region. Each
+sub-region describes a span (offset and size) of a region, along with the
+requested file descriptor notification mechanism to use. Each sub-region in the
+response message may choose to use a different method, as defined below. The
+two mechanisms supported in this specification are ioeventfds and ioregionfds.
+
+The server in addition returns a file descriptor in the ancillary data; clients
+are expected to configure each sub-region's file descriptor with the requested
+notification method. For example, a client could configure KVM with the
+requested ioeventfd via a ``KVM_IOEVENTFD`` ``ioctl()``.
+
+Request
+^^^^^^^
+
++-------------+--------+------+
+| Name | Offset | Size |
++=============+========+======+
+| argsz | 0 | 4 |
++-------------+--------+------+
+| flags | 4 | 4 |
++-------------+--------+------+
+| index | 8 | 4 |
++-------------+--------+------+
+| count | 12 | 4 |
++-------------+--------+------+
+
+* *argsz* the maximum size of the reply payload
+* *index* is the index of memory region being queried
+* all other fields must be zero
+
+The client must set ``flags`` to zero and specify the region being queried in
+the ``index``.
+
+Reply
+^^^^^
+
++-------------+--------+------+
+| Name | Offset | Size |
++=============+========+======+
+| argsz | 0 | 4 |
++-------------+--------+------+
+| flags | 4 | 4 |
++-------------+--------+------+
+| index | 8 | 4 |
++-------------+--------+------+
+| count | 12 | 4 |
++-------------+--------+------+
+| sub-regions | 16 | ... |
++-------------+--------+------+
+
+* *argsz* is the size of the region IO FD info structure plus the
+ total size of the sub-region array. Thus, each array entry "i" is at offset
+ i * ((argsz - 32) / count). Note that currently this is 40 bytes for both IO
+ FD types, but this is not to be relied on. As elsewhere, this indicates the
+ full reply payload size needed.
+* *flags* must be zero
+* *index* is the index of memory region being queried
+* *count* is the number of sub-regions in the array
+* *sub-regions* is the array of Sub-Region IO FD info structures
+
+The reply message will additionally include at least one file descriptor in the
+ancillary data. Note that more than one sub-region may share the same file
+descriptor.
+
+Note that it is the client's responsibility to verify the requested values (for
+example, that the requested offset does not exceed the region's bounds).
+
+Each sub-region given in the response has one of two possible structures,
+depending whether *type* is ``VFIO_USER_IO_FD_TYPE_IOEVENTFD`` or
+``VFIO_USER_IO_FD_TYPE_IOREGIONFD``:
+
+Sub-Region IO FD info format (ioeventfd)
+""""""""""""""""""""""""""""""""""""""""
+
++-----------+--------+------+
+| Name | Offset | Size |
++===========+========+======+
+| offset | 0 | 8 |
++-----------+--------+------+
+| size | 8 | 8 |
++-----------+--------+------+
+| fd_index | 16 | 4 |
++-----------+--------+------+
+| type | 20 | 4 |
++-----------+--------+------+
+| flags | 24 | 4 |
++-----------+--------+------+
+| padding | 28 | 4 |
++-----------+--------+------+
+| datamatch | 32 | 8 |
++-----------+--------+------+
+
+* *offset* is the offset of the start of the sub-region within the region
+ requested ("physical address offset" for the region)
+* *size* is the length of the sub-region. This may be zero if the access size is
+ not relevant, which may allow for optimizations
+* *fd_index* is the index in the ancillary data of the FD to use for ioeventfd
+ notification; it may be shared.
+* *type* is ``VFIO_USER_IO_FD_TYPE_IOEVENTFD``
+* *flags* is any of:
+
+ * ``KVM_IOEVENTFD_FLAG_DATAMATCH``
+ * ``KVM_IOEVENTFD_FLAG_PIO``
+ * ``KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY`` (FIXME: makes sense?)
+
+* *datamatch* is the datamatch value if needed
+
+See https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt, *4.59
+KVM_IOEVENTFD* for further context on the ioeventfd-specific fields.
+
+Sub-Region IO FD info format (ioregionfd)
+"""""""""""""""""""""""""""""""""""""""""
+
++-----------+--------+------+
+| Name | Offset | Size |
++===========+========+======+
+| offset | 0 | 8 |
++-----------+--------+------+
+| size | 8 | 8 |
++-----------+--------+------+
+| fd_index | 16 | 4 |
++-----------+--------+------+
+| type | 20 | 4 |
++-----------+--------+------+
+| flags | 24 | 4 |
++-----------+--------+------+
+| padding | 28 | 4 |
++-----------+--------+------+
+| user_data | 32 | 8 |
++-----------+--------+------+
+
+* *offset* is the offset of the start of the sub-region within the region
+ requested ("physical address offset" for the region)
+* *size* is the length of the sub-region. This may be zero if the access size is
+ not relevant, which may allow for optimizations; ``KVM_IOREGION_POSTED_WRITES``
+ must be set in *flags* in this case
+* *fd_index* is the index in the ancillary data of the FD to use for ioregionfd
+ messages; it may be shared
+* *type* is ``VFIO_USER_IO_FD_TYPE_IOREGIONFD``
+* *flags* is any of:
+
+ * ``KVM_IOREGION_PIO``
+ * ``KVM_IOREGION_POSTED_WRITES``
+
+* *user_data* is an opaque value passed back to the server via a message on the
+ file descriptor
+
+For further information on the ioregionfd-specific fields, see:
+https://lore.kernel.org/kvm/cover.1613828726.git.eafanasova@gmail.com/
+
+(FIXME: update with final API docs.)
+
+``VFIO_USER_DEVICE_GET_IRQ_INFO``
+---------------------------------
+
+This command message is sent by the client to the server to query for
+information about device interrupt types. The VFIO IRQ info structure is
+defined in ``<linux/vfio.h>`` (``struct vfio_irq_info``).
+
+Request
+^^^^^^^
+
++-------+--------+---------------------------+
+| Name | Offset | Size |
++=======+========+===========================+
+| argsz | 0 | 4 |
++-------+--------+---------------------------+
+| flags | 4 | 4 |
++-------+--------+---------------------------+
+| | +-----+--------------------------+ |
+| | | Bit | Definition | |
+| | +=====+==========================+ |
+| | | 0 | VFIO_IRQ_INFO_EVENTFD | |
+| | +-----+--------------------------+ |
+| | | 1 | VFIO_IRQ_INFO_MASKABLE | |
+| | +-----+--------------------------+ |
+| | | 2 | VFIO_IRQ_INFO_AUTOMASKED | |
+| | +-----+--------------------------+ |
+| | | 3 | VFIO_IRQ_INFO_NORESIZE | |
+| | +-----+--------------------------+ |
++-------+--------+---------------------------+
+| index | 8 | 4 |
++-------+--------+---------------------------+
+| count | 12 | 4 |
++-------+--------+---------------------------+
+
+* *argsz* is the maximum size of the reply payload (16 bytes today)
+* index is the index of IRQ type being queried (e.g. ``VFIO_PCI_MSIX_IRQ_INDEX``)
+* all other fields must be zero
+
+Reply
+^^^^^
+
++-------+--------+---------------------------+
+| Name | Offset | Size |
++=======+========+===========================+
+| argsz | 0 | 4 |
++-------+--------+---------------------------+
+| flags | 4 | 4 |
++-------+--------+---------------------------+
+| | +-----+--------------------------+ |
+| | | Bit | Definition | |
+| | +=====+==========================+ |
+| | | 0 | VFIO_IRQ_INFO_EVENTFD | |
+| | +-----+--------------------------+ |
+| | | 1 | VFIO_IRQ_INFO_MASKABLE | |
+| | +-----+--------------------------+ |
+| | | 2 | VFIO_IRQ_INFO_AUTOMASKED | |
+| | +-----+--------------------------+ |
+| | | 3 | VFIO_IRQ_INFO_NORESIZE | |
+| | +-----+--------------------------+ |
++-------+--------+---------------------------+
+| index | 8 | 4 |
++-------+--------+---------------------------+
+| count | 12 | 4 |
++-------+--------+---------------------------+
+
+* *argsz* is the size required for the full reply payload (16 bytes today)
+* *flags* defines IRQ attributes:
+
+ * ``VFIO_IRQ_INFO_EVENTFD`` indicates the IRQ type can support server eventfd
+ signalling.
+ * ``VFIO_IRQ_INFO_MASKABLE`` indicates that the IRQ type supports the ``MASK``
+ and ``UNMASK`` actions in a ``VFIO_USER_DEVICE_SET_IRQS`` message.
+ * ``VFIO_IRQ_INFO_AUTOMASKED`` indicates the IRQ type masks itself after being
+ triggered, and the client must send an ``UNMASK`` action to receive new
+ interrupts.
+ * ``VFIO_IRQ_INFO_NORESIZE`` indicates ``VFIO_USER_SET_IRQS`` operations setup
+ interrupts as a set, and new sub-indexes cannot be enabled without disabling
+ the entire type.
+* index is the index of IRQ type being queried
+* count describes the number of interrupts of the queried type.
+
+``VFIO_USER_DEVICE_SET_IRQS``
+-----------------------------
+
+This command message is sent by the client to the server to set actions for
+device interrupt types. The VFIO IRQ set structure is defined in
+``<linux/vfio.h>`` (``struct vfio_irq_set``).
+
+Request
+^^^^^^^
+
++-------+--------+------------------------------+
+| Name | Offset | Size |
++=======+========+==============================+
+| argsz | 0 | 4 |
++-------+--------+------------------------------+
+| flags | 4 | 4 |
++-------+--------+------------------------------+
+| | +-----+-----------------------------+ |
+| | | Bit | Definition | |
+| | +=====+=============================+ |
+| | | 0 | VFIO_IRQ_SET_DATA_NONE | |
+| | +-----+-----------------------------+ |
+| | | 1 | VFIO_IRQ_SET_DATA_BOOL | |
+| | +-----+-----------------------------+ |
+| | | 2 | VFIO_IRQ_SET_DATA_EVENTFD | |
+| | +-----+-----------------------------+ |
+| | | 3 | VFIO_IRQ_SET_ACTION_MASK | |
+| | +-----+-----------------------------+ |
+| | | 4 | VFIO_IRQ_SET_ACTION_UNMASK | |
+| | +-----+-----------------------------+ |
+| | | 5 | VFIO_IRQ_SET_ACTION_TRIGGER | |
+| | +-----+-----------------------------+ |
++-------+--------+------------------------------+
+| index | 8 | 4 |
++-------+--------+------------------------------+
+| start | 12 | 4 |
++-------+--------+------------------------------+
+| count | 16 | 4 |
++-------+--------+------------------------------+
+| data | 20 | variable |
++-------+--------+------------------------------+
+
+* *argsz* is the size of the VFIO IRQ set request payload, including any *data*
+ field. Note there is no reply payload, so this field differs from other
+ message types.
+* *flags* defines the action performed on the interrupt range. The ``DATA``
+ flags describe the data field sent in the message; the ``ACTION`` flags
+ describe the action to be performed. The flags are mutually exclusive for
+ both sets.
+
+ * ``VFIO_IRQ_SET_DATA_NONE`` indicates there is no data field in the command.
+ The action is performed unconditionally.
+ * ``VFIO_IRQ_SET_DATA_BOOL`` indicates the data field is an array of boolean
+ bytes. The action is performed if the corresponding boolean is true.
+ * ``VFIO_IRQ_SET_DATA_EVENTFD`` indicates an array of event file descriptors
+ was sent in the message meta-data. These descriptors will be signalled when
+ the action defined by the action flags occurs. In ``AF_UNIX`` sockets, the
+ descriptors are sent as ``SCM_RIGHTS`` type ancillary data.
+ If no file descriptors are provided, this de-assigns the specified
+ previously configured interrupts.
+ * ``VFIO_IRQ_SET_ACTION_MASK`` indicates a masking event. It can be used with
+ ``VFIO_IRQ_SET_DATA_BOOL`` or ``VFIO_IRQ_SET_DATA_NONE`` to mask an interrupt,
+ or with ``VFIO_IRQ_SET_DATA_EVENTFD`` to generate an event when the guest masks
+ the interrupt.
+ * ``VFIO_IRQ_SET_ACTION_UNMASK`` indicates an unmasking event. It can be used
+ with ``VFIO_IRQ_SET_DATA_BOOL`` or ``VFIO_IRQ_SET_DATA_NONE`` to unmask an
+ interrupt, or with ``VFIO_IRQ_SET_DATA_EVENTFD`` to generate an event when the
+ guest unmasks the interrupt.
+ * ``VFIO_IRQ_SET_ACTION_TRIGGER`` indicates a triggering event. It can be used
+ with ``VFIO_IRQ_SET_DATA_BOOL`` or ``VFIO_IRQ_SET_DATA_NONE`` to trigger an
+ interrupt, or with ``VFIO_IRQ_SET_DATA_EVENTFD`` to generate an event when the
+ server triggers the interrupt.
+
+* *index* is the index of IRQ type being setup.
+* *start* is the start of the sub-index being set.
+* *count* describes the number of sub-indexes being set. As a special case, a
+ count (and start) of 0, with data flags of ``VFIO_IRQ_SET_DATA_NONE`` disables
+ all interrupts of the index.
+* *data* is an optional field included when the
+ ``VFIO_IRQ_SET_DATA_BOOL`` flag is present. It contains an array of booleans
+ that specify whether the action is to be performed on the corresponding
+ index. It's used when the action is only performed on a subset of the range
+ specified.
+
+Not all interrupt types support every combination of data and action flags.
+The client must know the capabilities of the device and IRQ index before it
+sends a ``VFIO_USER_DEVICE_SET_IRQ`` message.
+
+In typical operation, a specific IRQ may operate as follows:
+
+1. The client sends a ``VFIO_USER_DEVICE_SET_IRQ`` message with
+ ``flags=(VFIO_IRQ_SET_DATA_EVENTFD|VFIO_IRQ_SET_ACTION_TRIGGER)`` along
+ with an eventfd. This associates the IRQ with a particular eventfd on the
+ server side.
+
+#. The client may send a ``VFIO_USER_DEVICE_SET_IRQ`` message with
+ ``flags=(VFIO_IRQ_SET_DATA_EVENTFD|VFIO_IRQ_SET_ACTION_MASK/UNMASK)`` along
+ with another eventfd. This associates the given eventfd with the
+ mask/unmask state on the server side.
+
+#. The server may trigger the IRQ by writing 1 to the eventfd.
+
+#. The server may mask/unmask an IRQ which will write 1 to the corresponding
+ mask/unmask eventfd, if there is one.
+
+5. A client may trigger a device IRQ itself, by sending a
+ ``VFIO_USER_DEVICE_SET_IRQ`` message with
+ ``flags=(VFIO_IRQ_SET_DATA_NONE/BOOL|VFIO_IRQ_SET_ACTION_TRIGGER)``.
+
+6. A client may mask or unmask the IRQ, by sending a
+ ``VFIO_USER_DEVICE_SET_IRQ`` message with
+ ``flags=(VFIO_IRQ_SET_DATA_NONE/BOOL|VFIO_IRQ_SET_ACTION_MASK/UNMASK)``.
+
+Reply
+^^^^^
+
+There is no payload in the reply.
+
+.. _Read and Write Operations:
+
+Note that all of these operations must be supported by the client and/or server,
+even if the corresponding memory or device region has been shared as mappable.
+
+The ``count`` field must not exceed the value of ``max_data_xfer_size`` of the
+peer, for both reads and writes.
+
+``VFIO_USER_REGION_READ``
+-------------------------
+
+If a device region is not mappable, it's not directly accessible by the client
+via ``mmap()`` of the underlying file descriptor. In this case, a client can
+read from a device region with this message.
+
+Request
+^^^^^^^
+
++--------+--------+----------+
+| Name | Offset | Size |
++========+========+==========+
+| offset | 0 | 8 |
++--------+--------+----------+
+| region | 8 | 4 |
++--------+--------+----------+
+| count | 12 | 4 |
++--------+--------+----------+
+
+* *offset* into the region being accessed.
+* *region* is the index of the region being accessed.
+* *count* is the size of the data to be transferred.
+
+Reply
+^^^^^
+
++--------+--------+----------+
+| Name | Offset | Size |
++========+========+==========+
+| offset | 0 | 8 |
++--------+--------+----------+
+| region | 8 | 4 |
++--------+--------+----------+
+| count | 12 | 4 |
++--------+--------+----------+
+| data | 16 | variable |
++--------+--------+----------+
+
+* *offset* into the region accessed.
+* *region* is the index of the region accessed.
+* *count* is the size of the data transferred.
+* *data* is the data that was read from the device region.
+
+``VFIO_USER_REGION_WRITE``
+--------------------------
+
+If a device region is not mappable, it's not directly accessible by the client
+via mmap() of the underlying fd. In this case, a client can write to a device
+region with this message.
+
+Request
+^^^^^^^
+
++--------+--------+----------+
+| Name | Offset | Size |
++========+========+==========+
+| offset | 0 | 8 |
++--------+--------+----------+
+| region | 8 | 4 |
++--------+--------+----------+
+| count | 12 | 4 |
++--------+--------+----------+
+| data | 16 | variable |
++--------+--------+----------+
+
+* *offset* into the region being accessed.
+* *region* is the index of the region being accessed.
+* *count* is the size of the data to be transferred.
+* *data* is the data to write
+
+Reply
+^^^^^
+
++--------+--------+----------+
+| Name | Offset | Size |
++========+========+==========+
+| offset | 0 | 8 |
++--------+--------+----------+
+| region | 8 | 4 |
++--------+--------+----------+
+| count | 12 | 4 |
++--------+--------+----------+
+
+* *offset* into the region accessed.
+* *region* is the index of the region accessed.
+* *count* is the size of the data transferred.
+
+``VFIO_USER_DMA_READ``
+-----------------------
+
+If the client has not shared mappable memory, the server can use this message to
+read from guest memory.
+
+Request
+^^^^^^^
+
++---------+--------+----------+
+| Name | Offset | Size |
++=========+========+==========+
+| address | 0 | 8 |
++---------+--------+----------+
+| count | 8 | 8 |
++---------+--------+----------+
+
+* *address* is the client DMA memory address being accessed. This address must have
+ been previously exported to the server with a ``VFIO_USER_DMA_MAP`` message.
+* *count* is the size of the data to be transferred.
+
+Reply
+^^^^^
+
++---------+--------+----------+
+| Name | Offset | Size |
++=========+========+==========+
+| address | 0 | 8 |
++---------+--------+----------+
+| count | 8 | 8 |
++---------+--------+----------+
+| data | 16 | variable |
++---------+--------+----------+
+
+* *address* is the client DMA memory address being accessed.
+* *count* is the size of the data transferred.
+* *data* is the data read.
+
+``VFIO_USER_DMA_WRITE``
+-----------------------
+
+If the client has not shared mappable memory, the server can use this message to
+write to guest memory.
+
+Request
+^^^^^^^
+
++---------+--------+----------+
+| Name | Offset | Size |
++=========+========+==========+
+| address | 0 | 8 |
++---------+--------+----------+
+| count | 8 | 8 |
++---------+--------+----------+
+| data | 16 | variable |
++---------+--------+----------+
+
+* *address* is the client DMA memory address being accessed. This address must have
+ been previously exported to the server with a ``VFIO_USER_DMA_MAP`` message.
+* *count* is the size of the data to be transferred.
+* *data* is the data to write
+
+Reply
+^^^^^
+
++---------+--------+----------+
+| Name | Offset | Size |
++=========+========+==========+
+| address | 0 | 8 |
++---------+--------+----------+
+| count | 8 | 4 |
++---------+--------+----------+
+
+* *address* is the client DMA memory address being accessed.
+* *count* is the size of the data transferred.
+
+``VFIO_USER_DEVICE_RESET``
+--------------------------
+
+This command message is sent from the client to the server to reset the device.
+Neither the request or reply have a payload.
+
+``VFIO_USER_REGION_WRITE_MULTI``
+--------------------------------
+
+This message can be used to coalesce multiple device write operations
+into a single messgage. It is only used as an optimization when the
+outgoing message queue is relatively full.
+
+Request
+^^^^^^^
+
++---------+--------+----------+
+| Name | Offset | Size |
++=========+========+==========+
+| wr_cnt | 0 | 8 |
++---------+--------+----------+
+| wrs | 8 | variable |
++---------+--------+----------+
+
+* *wr_cnt* is the number of device writes coalesced in the message
+* *wrs* is an array of device writes defined below
+
+Single Device Write Format
+""""""""""""""""""""""""""
+
++--------+--------+----------+
+| Name | Offset | Size |
++========+========+==========+
+| offset | 0 | 8 |
++--------+--------+----------+
+| region | 8 | 4 |
++--------+--------+----------+
+| count | 12 | 4 |
++--------+--------+----------+
+| data | 16 | 8 |
++--------+--------+----------+
+
+* *offset* into the region being accessed.
+* *region* is the index of the region being accessed.
+* *count* is the size of the data to be transferred. This format can
+ only describe writes of 8 bytes or less.
+* *data* is the data to write.
+
+Reply
+^^^^^
+
++---------+--------+----------+
+| Name | Offset | Size |
++=========+========+==========+
+| wr_cnt | 0 | 8 |
++---------+--------+----------+
+
+* *wr_cnt* is the number of device writes completed.
+
+
+Appendices
+==========
+
+Unused VFIO ``ioctl()`` commands
+--------------------------------
+
+The following VFIO commands do not have an equivalent vfio-user command:
+
+* ``VFIO_GET_API_VERSION``
+* ``VFIO_CHECK_EXTENSION``
+* ``VFIO_SET_IOMMU``
+* ``VFIO_GROUP_GET_STATUS``
+* ``VFIO_GROUP_SET_CONTAINER``
+* ``VFIO_GROUP_UNSET_CONTAINER``
+* ``VFIO_GROUP_GET_DEVICE_FD``
+* ``VFIO_IOMMU_GET_INFO``
+
+However, once support for live migration for VFIO devices is finalized some
+of the above commands may have to be handled by the client in their
+corresponding vfio-user form. This will be addressed in a future protocol
+version.
+
+VFIO groups and containers
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The current VFIO implementation includes group and container idioms that
+describe how a device relates to the host IOMMU. In the vfio-user
+implementation, the IOMMU is implemented in SW by the client, and is not
+visible to the server. The simplest idea would be that the client put each
+device into its own group and container.
+
+Backend Program Conventions
+---------------------------
+
+vfio-user backend program conventions are based on the vhost-user ones.
+
+* The backend program must not daemonize itself.
+* No assumptions must be made as to what access the backend program has on the
+ system.
+* File descriptors 0, 1 and 2 must exist, must have regular
+ stdin/stdout/stderr semantics, and can be redirected.
+* The backend program must honor the SIGTERM signal.
+* The backend program must accept the following commands line options:
+
+ * ``--socket-path=PATH``: path to UNIX domain socket,
+ * ``--fd=FDNUM``: file descriptor for UNIX domain socket, incompatible with
+ ``--socket-path``
+* The backend program must be accompanied with a JSON file stored under
+ ``/usr/share/vfio-user``.
+
+TODO add schema similar to docs/interop/vhost-user.json.
diff --git a/docs/interop/vhost-user.rst b/docs/interop/vhost-user.rst
index d8419fd..2e50f2d 100644
--- a/docs/interop/vhost-user.rst
+++ b/docs/interop/vhost-user.rst
@@ -167,6 +167,8 @@ A vring address description
Note that a ring address is an IOVA if ``VIRTIO_F_IOMMU_PLATFORM`` has
been negotiated. Otherwise it is a user address.
+.. _memory_region_description:
+
Memory region description
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -180,7 +182,7 @@ Memory region description
:user address: a 64-bit user address
-:mmap offset: 64-bit offset where region starts in the mapped memory
+:mmap offset: a 64-bit offset where region starts in the mapped memory
When the ``VHOST_USER_PROTOCOL_F_XEN_MMAP`` protocol feature has been
successfully negotiated, the memory region description contains two extra
@@ -190,7 +192,7 @@ fields at the end.
| guest address | size | user address | mmap offset | xen mmap flags | domid |
+---------------+------+--------------+-------------+----------------+-------+
-:xen mmap flags: 32-bit bit field
+:xen mmap flags: a 32-bit bit field
- Bit 0 is set for Xen foreign memory mapping.
- Bit 1 is set for Xen grant memory mapping.
@@ -211,7 +213,7 @@ Single memory region description
:padding: 64-bit
-A region is represented by Memory region description.
+:region: region is represented by :ref:`Memory region description <memory_region_description>`.
Multiple Memory regions description
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -224,7 +226,7 @@ Multiple Memory regions description
:padding: 32-bit
-A region is represented by Memory region description.
+:regions: regions field contains 8 regions of type :ref:`Memory region description <memory_region_description>`.
Log description
^^^^^^^^^^^^^^^
@@ -233,9 +235,9 @@ Log description
| log size | log offset |
+----------+------------+
-:log size: size of area used for logging
+:log size: a 64-bit size of area used for logging
-:log offset: offset from start of supplied file descriptor where
+:log offset: a 64-bit offset from start of supplied file descriptor where
logging starts (i.e. where guest address 0 would be
logged)
@@ -382,7 +384,7 @@ the kernel implementation.
The communication consists of the *front-end* sending message requests and
the *back-end* sending message replies. Most of the requests don't require
-replies. Here is a list of the ones that do:
+replies, except for the following requests:
* ``VHOST_USER_GET_FEATURES``
* ``VHOST_USER_GET_PROTOCOL_FEATURES``
@@ -1239,11 +1241,11 @@ Front-end message types
(*a vring descriptor index for split virtqueues* vs. *vring descriptor
indices for packed virtqueues*).
- When and as long as all of a device’s vrings are stopped, it is
+ When and as long as all of a device's vrings are stopped, it is
*suspended*, see :ref:`Suspended device state
<suspended_device_state>`.
- The request payload’s *num* field is currently reserved and must be
+ The request payload's *num* field is currently reserved and must be
set to 0.
``VHOST_USER_SET_VRING_KICK``
@@ -1662,7 +1664,7 @@ Front-end message types
:reply payload: ``u64``
Front-end and back-end negotiate a channel over which to transfer the
- back-end’s internal state during migration. Either side (front-end or
+ back-end's internal state during migration. Either side (front-end or
back-end) may create the channel. The nature of this channel is not
restricted or defined in this document, but whichever side creates it
must create a file descriptor that is provided to the respectively
@@ -1714,7 +1716,7 @@ Front-end message types
:request payload: N/A
:reply payload: ``u64``
- After transferring the back-end’s internal state during migration (see
+ After transferring the back-end's internal state during migration (see
the :ref:`Migrating back-end state <migrating_backend_state>`
section), check whether the back-end was able to successfully fully
process the state.
diff --git a/docs/meson.build b/docs/meson.build
index 9040f86..3676f81 100644
--- a/docs/meson.build
+++ b/docs/meson.build
@@ -54,7 +54,6 @@ if build_docs
'qemu-pr-helper.8': (have_tools ? 'man8' : ''),
'qemu-storage-daemon.1': (have_tools ? 'man1' : ''),
'qemu-trace-stap.1': (stap.found() ? 'man1' : ''),
- 'virtfs-proxy-helper.1': (have_virtfs_proxy_helper ? 'man1' : ''),
'qemu.1': 'man1',
'qemu-block-drivers.7': 'man7',
'qemu-cpu-models.7': 'man7'
@@ -99,3 +98,8 @@ if build_docs
alias_target('html', sphinxdocs)
alias_target('man', sphinxmans)
endif
+
+test('QAPI firmware.json regression tests', qapi_gen,
+ args: ['-o', meson.current_build_dir() / 'qapi',
+ meson.current_source_dir() / 'interop/firmware.json'],
+ suite: ['qapi-schema', 'qapi-interop'])
diff --git a/docs/qcow2-cache.txt b/docs/qcow2-cache.txt
index 5f763aa..204a574 100644
--- a/docs/qcow2-cache.txt
+++ b/docs/qcow2-cache.txt
@@ -15,7 +15,7 @@ not a straightforward operation.
This document attempts to give an overview of the L2 and refcount
caches, and how to configure them.
-Please refer to the docs/interop/qcow2.txt file for an in-depth
+Please refer to the docs/interop/qcow2.rst file for an in-depth
technical description of the qcow2 file format.
diff --git a/docs/specs/acpi_hest_ghes.rst b/docs/specs/acpi_hest_ghes.rst
index 68f1fbe..c3e9f8d 100644
--- a/docs/specs/acpi_hest_ghes.rst
+++ b/docs/specs/acpi_hest_ghes.rst
@@ -67,8 +67,10 @@ Design Details
(3) The address registers table contains N Error Block Address entries
and N Read Ack Register entries. The size for each entry is 8-byte.
The Error Status Data Block table contains N Error Status Data Block
- entries. The size for each entry is 4096(0x1000) bytes. The total size
- for the "etc/hardware_errors" fw_cfg blob is (N * 8 * 2 + N * 4096) bytes.
+ entries. The size for each entry is defined at the source code as
+ ACPI_GHES_MAX_RAW_DATA_LENGTH (currently 1024 bytes). The total size
+ for the "etc/hardware_errors" fw_cfg blob is
+ (N * 8 * 2 + N * ACPI_GHES_MAX_RAW_DATA_LENGTH) bytes.
N is the number of the kinds of hardware error sources.
(4) QEMU generates the ACPI linker/loader script for the firmware. The
diff --git a/docs/specs/acpi_hw_reduced_hotplug.rst b/docs/specs/acpi_hw_reduced_hotplug.rst
index 0bd3f93..3acd6fc 100644
--- a/docs/specs/acpi_hw_reduced_hotplug.rst
+++ b/docs/specs/acpi_hw_reduced_hotplug.rst
@@ -64,7 +64,8 @@ GED IO interface (4 byte access)
0: Memory hotplug event
1: System power down event
2: NVDIMM hotplug event
- 3-31: Reserved
+ 3: CPU hotplug event
+ 4-31: Reserved
**write_access:**
diff --git a/docs/specs/aspeed-intc.rst b/docs/specs/aspeed-intc.rst
new file mode 100644
index 0000000..9cefd7f
--- /dev/null
+++ b/docs/specs/aspeed-intc.rst
@@ -0,0 +1,136 @@
+===========================
+ASPEED Interrupt Controller
+===========================
+
+AST2700
+-------
+There are a total of 480 interrupt sources in AST2700. Due to the limitation of
+interrupt numbers of processors, the interrupts are merged every 32 sources for
+interrupt numbers greater than 127.
+
+There are two levels of interrupt controllers, INTC (CPU Die) and INTCIO
+(I/O Die).
+
+Interrupt Mapping
+-----------------
+- INTC: Handles interrupt sources 0 - 127 and integrates signals from INTCIO.
+- INTCIO: Handles interrupt sources 128 - 319 independently.
+
+QEMU Support
+------------
+Currently, only GIC 192 to 201 are supported, and their source interrupts are
+from INTCIO and connected to INTC at input pin 0 and output pins 0 to 9 for
+GIC 192-201.
+
+Design for GICINT 196
+---------------------
+The orgate has interrupt sources ranging from 0 to 31, with its output pin
+connected to INTCIO "T0 GICINT_196". The output pin is then connected to INTC
+"GIC_192_201" at bit 4, and its bit 4 output pin is connected to GIC 196.
+
+INTC GIC_192_201 Output Pin Mapping
+-----------------------------------
+The design of INTC GIC_192_201 have 10 output pins, mapped as following:
+
+==== ====
+Bit GIC
+==== ====
+0 192
+1 193
+2 194
+3 195
+4 196
+5 197
+6 198
+7 199
+8 200
+9 201
+==== ====
+
+AST2700 A0
+----------
+It has only one INTC controller, and currently, only GIC 128-136 is supported.
+To support both AST2700 A1 and AST2700 A0, there are 10 OR gates in the INTC,
+with gates 1 to 9 supporting GIC 128-136.
+
+Design for GICINT 132
+---------------------
+The orgate has interrupt sources ranging from 0 to 31, with its output pin
+connected to INTC. The output pin is then connected to GIC 132.
+
+Block Diagram of GICINT 196 for AST2700 A1 and GICINT 132 for AST2700 A0
+------------------------------------------------------------------------
+
+.. code-block::
+
+ |-------------------------------------------------------------------------------------------------------|
+ | AST2700 A1 Design |
+ | To GICINT196 |
+ | |
+ | ETH1 |-----------| |--------------------------| |--------------| |
+ | -------->|0 | | INTCIO | | orgates[0] | |
+ | ETH2 | 4| orgates[0]------>|inpin[0]-------->outpin[0]|------->| 0 | |
+ | -------->|1 5| orgates[1]------>|inpin[1]-------->outpin[1]|------->| 1 | |
+ | ETH3 | 6| orgates[2]------>|inpin[2]-------->outpin[2]|------->| 2 | |
+ | -------->|2 19| orgates[3]------>|inpin[3]-------->outpin[3]|------->| 3 OR[0:9] |-----| |
+ | UART0 | 20|-->orgates[4]------>|inpin[4]-------->outpin[4]|------->| 4 | | |
+ | -------->|7 21| orgates[5]------>|inpin[5]-------->outpin[5]|------->| 5 | | |
+ | UART1 | 22| orgates[6]------>|inpin[6]-------->outpin[6]|------->| 6 | | |
+ | -------->|8 23| orgates[7]------>|inpin[7]-------->outpin[7]|------->| 7 | | |
+ | UART2 | 24| orgates[8]------>|inpin[8]-------->outpin[8]|------->| 8 | | |
+ | -------->|9 25| orgates[9]------>|inpin[9]-------->outpin[9]|------->| 9 | | |
+ | UART3 | 26| |--------------------------| |--------------| | |
+ | ---------|10 27| | |
+ | UART5 | 28| | |
+ | -------->|11 29| | |
+ | UART6 | | | |
+ | -------->|12 30| |-----------------------------------------------------------------------| |
+ | UART7 | 31| | |
+ | -------->|13 | | |
+ | UART8 | OR[0:31] | | |------------------------------| |----------| |
+ | -------->|14 | | | INTC | | GIC | |
+ | UART9 | | | |inpin[0:0]--------->outpin[0] |---------->|192 | |
+ | -------->|15 | | |inpin[0:1]--------->outpin[1] |---------->|193 | |
+ | UART10 | | | |inpin[0:2]--------->outpin[2] |---------->|194 | |
+ | -------->|16 | | |inpin[0:3]--------->outpin[3] |---------->|195 | |
+ | UART11 | | |--------------> |inpin[0:4]--------->outpin[4] |---------->|196 | |
+ | -------->|17 | |inpin[0:5]--------->outpin[5] |---------->|197 | |
+ | UART12 | | |inpin[0:6]--------->outpin[6] |---------->|198 | |
+ | -------->|18 | |inpin[0:7]--------->outpin[7] |---------->|199 | |
+ | |-----------| |inpin[0:8]--------->outpin[8] |---------->|200 | |
+ | |inpin[0:9]--------->outpin[9] |---------->|201 | |
+ |-------------------------------------------------------------------------------------------------------|
+ |-------------------------------------------------------------------------------------------------------|
+ | ETH1 |-----------| orgates[1]------->|inpin[1]----------->outpin[10]|---------->|128 | |
+ | -------->|0 | orgates[2]------->|inpin[2]----------->outpin[11]|---------->|129 | |
+ | ETH2 | 4| orgates[3]------->|inpin[3]----------->outpin[12]|---------->|130 | |
+ | -------->|1 5| orgates[4]------->|inpin[4]----------->outpin[13]|---------->|131 | |
+ | ETH3 | 6|---->orgates[5]------->|inpin[5]----------->outpin[14]|---------->|132 | |
+ | -------->|2 19| orgates[6]------->|inpin[6]----------->outpin[15]|---------->|133 | |
+ | UART0 | 20| orgates[7]------->|inpin[7]----------->outpin[16]|---------->|134 | |
+ | -------->|7 21| orgates[8]------->|inpin[8]----------->outpin[17]|---------->|135 | |
+ | UART1 | 22| orgates[9]------->|inpin[9]----------->outpin[18]|---------->|136 | |
+ | -------->|8 23| |------------------------------| |----------| |
+ | UART2 | 24| |
+ | -------->|9 25| AST2700 A0 Design |
+ | UART3 | 26| |
+ | -------->|10 27| |
+ | UART5 | 28| |
+ | -------->|11 29| GICINT132 |
+ | UART6 | | |
+ | -------->|12 30| |
+ | UART7 | 31| |
+ | -------->|13 | |
+ | UART8 | OR[0:31] | |
+ | -------->|14 | |
+ | UART9 | | |
+ | -------->|15 | |
+ | UART10 | | |
+ | -------->|16 | |
+ | UART11 | | |
+ | -------->|17 | |
+ | UART12 | | |
+ | -------->|18 | |
+ | |-----------| |
+ | |
+ |-------------------------------------------------------------------------------------------------------|
diff --git a/docs/specs/fw_cfg.rst b/docs/specs/fw_cfg.rst
index 5ad47a9..31ae315 100644
--- a/docs/specs/fw_cfg.rst
+++ b/docs/specs/fw_cfg.rst
@@ -54,11 +54,11 @@ Data Register
-------------
* Read/Write (writes ignored as of QEMU v2.4, but see the DMA interface)
-* Location: platform dependent (IOport [#]_ or MMIO)
+* Location: platform dependent (IOport\ [#placement]_ or MMIO)
* Width: 8-bit (if IOport), 8/16/32/64-bit (if MMIO)
* Endianness: string-preserving
-.. [#]
+.. [#placement]
On platforms where the data register is exposed as an IOport, its
port number will always be one greater than the port number of the
selector register. In other words, the two ports overlap, and can not
diff --git a/docs/specs/index.rst b/docs/specs/index.rst
index 1484e3e..f19d73c 100644
--- a/docs/specs/index.rst
+++ b/docs/specs/index.rst
@@ -29,7 +29,13 @@ guest hardware that is specific to QEMU.
edu
ivshmem-spec
pvpanic
+ spdm
standard-vga
virt-ctlr
vmcoreinfo
vmgenid
+ rapl-msr
+ rocker
+ riscv-iommu
+ riscv-aia
+ aspeed-intc
diff --git a/docs/specs/pci-ids.rst b/docs/specs/pci-ids.rst
index c0a3dec..261b0f3 100644
--- a/docs/specs/pci-ids.rst
+++ b/docs/specs/pci-ids.rst
@@ -77,13 +77,17 @@ PCI devices (other than virtio):
1b36:0008
PCIe host bridge
1b36:0009
- PCI Expander Bridge (-device pxb)
+ PCI Expander Bridge (``-device pxb``)
1b36:000a
PCI-PCI bridge (multiseat)
1b36:000b
- PCIe Expander Bridge (-device pxb-pcie)
+ PCIe Expander Bridge (``-device pxb-pcie``)
+1b36:000c
+ PCIe Root Port (``-device pcie-root-port``)
1b36:000d
PCI xhci usb host adapter
+1b36:000e
+ PCIe-to-PCI bridge (``-device pcie-pci-bridge``)
1b36:000f
mdpy (mdev sample device), ``linux/samples/vfio-mdev/mdpy.c``
1b36:0010
@@ -94,6 +98,8 @@ PCI devices (other than virtio):
PCI ACPI ERST device (``-device acpi-erst``)
1b36:0013
PCI UFS device (``-device ufs``)
+1b36:0014
+ PCI RISC-V IOMMU device
All these devices are documented in :doc:`index`.
diff --git a/docs/specs/rapl-msr.rst b/docs/specs/rapl-msr.rst
new file mode 100644
index 0000000..aaf0db9
--- /dev/null
+++ b/docs/specs/rapl-msr.rst
@@ -0,0 +1,154 @@
+================
+RAPL MSR support
+================
+
+The RAPL interface (Running Average Power Limit) is advertising the accumulated
+energy consumption of various power domains (e.g. CPU packages, DRAM, etc.).
+
+The consumption is reported via MSRs (model specific registers) like
+MSR_PKG_ENERGY_STATUS for the CPU package power domain. These MSRs are 64 bits
+registers that represent the accumulated energy consumption in micro Joules.
+
+Thanks to KVM's `MSR filtering <msr-filter-patch_>`__ functionality,
+not all MSRs are handled by KVM. Some of them can now be handled by the
+userspace (QEMU); a list of MSRs is given at VM creation time to KVM, and
+a userspace exit occurs when they are accessed.
+
+.. _msr-filter-patch: https://patchwork.kernel.org/project/kvm/patch/20200916202951.23760-7-graf@amazon.com/
+
+At the moment the following MSRs are involved:
+
+.. code:: C
+
+ #define MSR_RAPL_POWER_UNIT 0x00000606
+ #define MSR_PKG_POWER_LIMIT 0x00000610
+ #define MSR_PKG_ENERGY_STATUS 0x00000611
+ #define MSR_PKG_POWER_INFO 0x00000614
+
+The ``*_POWER_UNIT``, ``*_POWER_LIMIT``, ``*_POWER INFO`` are part of the RAPL
+spec and specify the power limit of the package, provide range of parameter(min
+power, max power,..) and also the information of the multiplier for the energy
+counter to calculate the power. Those MSRs are populated once at the beginning
+by reading the host CPU MSRs and are given back to the guest 1:1 when
+requested.
+
+The MSR_PKG_ENERGY_STATUS is a counter; it represents the total amount of
+energy consumed since the last time the register was cleared. If you multiply
+it with the UNIT provided above you'll get the power in micro-joules. This
+counter is always increasing and it increases more or less faster depending on
+the consumption of the package. This counter is supposed to overflow at some
+point.
+
+Each core belonging to the same Package reading the MSR_PKG_ENERGY_STATUS (i.e
+"rdmsr 0x611") will retrieve the same value. The value represents the energy
+for the whole package. Whatever Core reading it will get the same value and a
+core that belongs to PKG-0 will not be able to get the value of PKG-1 and
+vice-versa.
+
+High level implementation
+-------------------------
+
+In order to update the value of the virtual MSR, a QEMU thread is created.
+The thread is basically just an infinity loop that does:
+
+1. Snapshot of the time metrics of all QEMU threads (Time spent scheduled in
+ Userspace and System)
+
+2. Snapshot of the actual MSR_PKG_ENERGY_STATUS counter of all packages where
+ the QEMU threads are running on.
+
+3. Sleep for 1 second - During this pause the vcpu and other non-vcpu threads
+ will do what they have to do and so the energy counter will increase.
+
+4. Repeat 2. and 3. and calculate the delta of every metrics representing the
+ time spent scheduled for each QEMU thread *and* the energy spent by the
+ packages during the pause.
+
+5. Filter the vcpu threads and the non-vcpu threads.
+
+6. Retrieve the topology of the Virtual Machine. This helps identify which
+ vCPU is running on which virtual package.
+
+7. The total energy spent by the non-vcpu threads is divided by the number
+ of vcpu threads so that each vcpu thread will get an equal part of the
+ energy spent by the QEMU workers.
+
+8. Calculate the ratio of energy spent per vcpu threads.
+
+9. Calculate the energy for each virtual package.
+
+10. The virtual MSRs are updated for each virtual package. Each vCPU that
+ belongs to the same package will return the same value when accessing the
+ the MSR.
+
+11. Loop back to 1.
+
+Ratio calculation
+-----------------
+
+In Linux, a process has an execution time associated with it. The scheduler is
+dividing the time in clock ticks. The number of clock ticks per second can be
+found by the sysconf system call. A typical value of clock ticks per second is
+100. So a core can run a process at the maximum of 100 ticks per second. If a
+package has 4 cores, 400 ticks maximum can be scheduled on all the cores
+of the package for a period of 1 second.
+
+`/proc/[pid]/stat <stat_>`__ is a procfs file that can give the executed
+time of a process with the [pid] as the process ID. It gives the amount
+of ticks the process has been scheduled in userspace (utime) and kernel
+space (stime).
+
+.. _stat: https://man7.org/linux/man-pages/man5/proc.5.html
+
+By reading those metrics for a thread, one can calculate the ratio of time the
+package has spent executing the thread.
+
+Example:
+
+A 4 cores package can schedule a maximum of 400 ticks per second with 100 ticks
+per second per core. If a thread was scheduled for 100 ticks between a second
+on this package, that means my thread has been scheduled for 1/4 of the whole
+package. With that, the calculation of the energy spent by the thread on this
+package during this whole second is 1/4 of the total energy spent by the
+package.
+
+Usage
+-----
+
+Currently this feature is only working on an Intel CPU that has the RAPL driver
+mounted and available in the sysfs. if not, QEMU fails at start-up.
+
+This feature is activated with -accel
+kvm,rapl=true,rapl-helper-socket=/path/sock.sock
+
+It is important that the socket path is the same as the one
+:program:`qemu-vmsr-helper` is listening to.
+
+qemu-vmsr-helper
+----------------
+
+The qemu-vmsr-helper is working very much like the qemu-pr-helper. Instead of
+making persistent reservation, qemu-vmsr-helper is here to overcome the
+CVE-2020-8694 which remove user access to the rapl msr attributes.
+
+A socket communication is established between QEMU processes that has the RAPL
+MSR support activated and the qemu-vmsr-helper. A systemd service and socket
+activation is provided in contrib/systemd/qemu-vmsr-helper.(service/socket).
+
+The systemd socket uses 600, like contrib/systemd/qemu-pr-helper.socket. The
+socket can be passed via SCM_RIGHTS by libvirt, or its permissions can be
+changed (e.g. 660 and root:kvm for a Debian system for example). Libvirt could
+also start a separate helper if needed. All in all, the policy is left to the
+user.
+
+See the qemu-pr-helper documentation or manpage for further details.
+
+Current Limitations
+-------------------
+
+- Works only on Intel host CPUs because AMD CPUs are using different MSR
+ addresses.
+
+- Only the Package Power-Plane (MSR_PKG_ENERGY_STATUS) is reported at the
+ moment.
+
diff --git a/docs/specs/riscv-aia.rst b/docs/specs/riscv-aia.rst
new file mode 100644
index 0000000..8097e2f
--- /dev/null
+++ b/docs/specs/riscv-aia.rst
@@ -0,0 +1,83 @@
+.. _riscv-aia:
+
+RISC-V AIA support for RISC-V machines
+======================================
+
+AIA (Advanced Interrupt Architecture) support is implemented in the ``virt``
+RISC-V machine for TCG and KVM accelerators.
+
+The support consists of two main modes:
+
+- "aia=aplic": adds one or more APLIC (Advanced Platform Level Interrupt Controller)
+ devices
+- "aia=aplic-imsic": adds one or more APLIC device and an IMSIC (Incoming MSI
+ Controller) device for each CPU
+
+From an user standpoint, these modes will behave the same regardless of the accelerator
+used. From a developer standpoint the accelerator settings will change what it being
+emulated in userspace versus what is being emulated by an in-kernel irqchip.
+
+When running TCG, all controllers are emulated in userspace, including machine mode
+(m-mode) APLIC and IMSIC (when applicable).
+
+When running KVM:
+
+- no m-mode is provided, so there is no m-mode APLIC or IMSIC emulation regardless of
+ the AIA mode chosen
+- with "aia=aplic", s-mode APLIC will be emulated by userspace
+- with "aia=aplic-imsic" there are two possibilities. If no additional KVM option
+ is provided there will be no APLIC or IMSIC emulation in userspace, and the virtual
+ machine will use the provided in-kernel APLIC and IMSIC controllers. If the user
+ chooses to use the irqchip in split mode via "-accel kvm,kernel-irqchip=split",
+ s-mode APLIC will be emulated while using the s-mode IMSIC from the irqchip
+
+The following table summarizes how the AIA and accelerator options defines what
+we will emulate in userspace:
+
+
+.. list-table:: How AIA and accel options changes controller emulation
+ :widths: 25 25 25 25 25 25 25
+ :header-rows: 1
+
+ * - Accel
+ - Accel props
+ - AIA type
+ - APLIC m-mode
+ - IMSIC m-mode
+ - APLIC s-mode
+ - IMSIC s-mode
+ * - tcg
+ - ---
+ - aplic
+ - emul
+ - n/a
+ - emul
+ - n/a
+ * - tcg
+ - ---
+ - aplic-imsic
+ - emul
+ - emul
+ - emul
+ - emul
+ * - kvm
+ - ---
+ - aplic
+ - n/a
+ - n/a
+ - emul
+ - n/a
+ * - kvm
+ - none
+ - aplic-imsic
+ - n/a
+ - n/a
+ - in-kernel
+ - in-kernel
+ * - kvm
+ - irqchip=split
+ - aplic-imsic
+ - n/a
+ - n/a
+ - emul
+ - in-kernel
diff --git a/docs/specs/riscv-iommu.rst b/docs/specs/riscv-iommu.rst
new file mode 100644
index 0000000..991d376
--- /dev/null
+++ b/docs/specs/riscv-iommu.rst
@@ -0,0 +1,116 @@
+.. _riscv-iommu:
+
+RISC-V IOMMU support for RISC-V machines
+========================================
+
+QEMU implements a RISC-V IOMMU emulation based on the RISC-V IOMMU spec
+version 1.0 `iommu1.0.0`_.
+
+The emulation includes a PCI reference device (riscv-iommu-pci) and a platform
+bus device (riscv-iommu-sys) that QEMU RISC-V boards can use. The 'virt'
+RISC-V machine is compatible with both devices.
+
+riscv-iommu-pci reference device
+--------------------------------
+
+This device implements the RISC-V IOMMU emulation as recommended by the section
+"Integrating an IOMMU as a PCIe device" of `iommu1.0.0`_: a PCI device with base
+class 08h, sub-class 06h and programming interface 00h.
+
+As a reference device it doesn't implement anything outside of the specification,
+so it uses a generic default PCI ID given by QEMU: 1b36:0014.
+
+To include the device in the 'virt' machine:
+
+.. code-block:: bash
+
+ $ qemu-system-riscv64 -M virt -device riscv-iommu-pci,[optional_pci_opts] (...)
+
+This will add a RISC-V IOMMU PCI device in the board following any additional
+PCI parameters (like PCI bus address). The behavior of the RISC-V IOMMU is
+defined by the spec but its operation is OS dependent.
+
+As of this writing the existing Linux kernel support `linux-v8`_, not yet merged,
+does not have support for features like VFIO passthrough. The IOMMU emulation
+was tested using a public Ventana Micro Systems kernel repository in
+`ventana-linux`_. This kernel is based on `linux-v8`_ with additional patches that
+enable features like KVM VFIO passthrough with irqbypass. Until the kernel support
+is feature complete feel free to use the kernel available in the Ventana Micro Systems
+mirror.
+
+The current Linux kernel support will use the IOMMU device to create IOMMU groups
+with any eligible cards available in the system, regardless of factors such as the
+order in which the devices are added in the command line.
+
+This means that these command lines are equivalent as far as the current
+IOMMU kernel driver behaves:
+
+.. code-block:: bash
+
+ $ qemu-system-riscv64 \
+ -M virt,aia=aplic-imsic,aia-guests=5 \
+ -device riscv-iommu-pci,addr=1.0,vendor-id=0x1efd,device-id=0xedf1 \
+ -device e1000e,netdev=net1 -netdev user,id=net1,net=192.168.0.0/24 \
+ -device e1000e,netdev=net2 -netdev user,id=net2,net=192.168.200.0/24 \
+ (...)
+
+ $ qemu-system-riscv64 \
+ -M virt,aia=aplic-imsic,aia-guests=5 \
+ -device e1000e,netdev=net1 -netdev user,id=net1,net=192.168.0.0/24 \
+ -device e1000e,netdev=net2 -netdev user,id=net2,net=192.168.200.0/24 \
+ -device riscv-iommu-pci,addr=1.0,vendor-id=0x1efd,device-id=0xedf1 \
+ (...)
+
+Both will create iommu groups for the two e1000e cards.
+
+Another thing to notice on `linux-v8`_ and `ventana-linux`_ is that the kernel driver
+considers an IOMMU identified as a Rivos device, i.e. it uses Rivos vendor ID. To
+use the riscv-iommu-pci device with the existing kernel support we need to emulate
+a Rivos PCI IOMMU by setting 'vendor-id' and 'device-id':
+
+.. code-block:: bash
+
+ $ qemu-system-riscv64 -M virt \
+ -device riscv-iommu-pci,vendor-id=0x1efd,device-id=0xedf1 (...)
+
+Several options are available to control the capabilities of the device, namely:
+
+- "bus": the bus that the IOMMU device uses
+- "ioatc-limit": size of the Address Translation Cache (default to 2Mb)
+- "intremap": enable/disable MSI support
+- "ats": enable ATS support
+- "off" (Out-of-reset translation mode: 'on' for DMA disabled, 'off' for 'BARE' (passthrough))
+- "s-stage": enable s-stage support
+- "g-stage": enable g-stage support
+- "hpm-counters": number of hardware performance counters available. Maximum value is 31.
+ Default value is 31. Use 0 (zero) to disable HPM support
+
+riscv-iommu-sys device
+----------------------
+
+This device implements the RISC-V IOMMU emulation as a platform bus device that
+RISC-V boards can use.
+
+For the 'virt' board the device is disabled by default. To enable it use the
+'iommu-sys' machine option:
+
+.. code-block:: bash
+
+ $ qemu-system-riscv64 -M virt,iommu-sys=on (...)
+
+There is no options to configure the capabilities of this device in the 'virt'
+board using the QEMU command line. The device is configured with the following
+riscv-iommu options:
+
+- "ioatc-limit": default value (2Mb)
+- "intremap": enabled
+- "ats": enabled
+- "off": on (DMA disabled)
+- "s-stage": enabled
+- "g-stage": enabled
+
+.. _iommu1.0.0: https://github.com/riscv-non-isa/riscv-iommu/releases/download/v1.0.0/riscv-iommu.pdf
+
+.. _linux-v8: https://lore.kernel.org/linux-riscv/cover.1718388908.git.tjeznach@rivosinc.com/
+
+.. _ventana-linux: https://github.com/ventanamicro/linux/tree/dev-upstream
diff --git a/docs/specs/rocker.rst b/docs/specs/rocker.rst
new file mode 100644
index 0000000..3a7fc6a
--- /dev/null
+++ b/docs/specs/rocker.rst
@@ -0,0 +1,1015 @@
+Rocker Network Switch Register Programming Guide
+************************************************
+
+..
+ Copyright (c) Scott Feldman <sfeldma@gmail.com>
+ Copyright (c) Neil Horman <nhorman@tuxdriver.com>
+ Version 0.11, 12/29/2014
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+Introduction
+============
+
+Overview
+--------
+
+This document describes the hardware/software interface for the Rocker switch
+device. The intended audience is authors of OS drivers and device emulation
+software.
+
+Notations and Conventions
+-------------------------
+
+* In register descriptions, [n:m] indicates a range from bit n to bit m,
+ inclusive.
+* Use of leading 0x indicates a hexadecimal number.
+* Use of leading 0b indicates a binary number.
+* The use of RSVD or Reserved indicates that a bit or field is reserved for
+ future use.
+* Field width is in bytes, unless otherwise noted.
+* Register are (R) read-only, (R/W) read/write, (W) write-only, or (COR) clear
+ on read
+* TLV values in network-byte-order are designated with (N).
+
+
+PCI Configuration Registers
+===========================
+
+PCI Configuration Space
+-----------------------
+
+Each switch instance registers as a PCI device with PCI configuration space::
+
+ offset width description value
+ ---------------------------------------------
+ 0x0 2 Vendor ID 0x1b36
+ 0x2 2 Device ID 0x0006
+ 0x4 4 Command/Status
+ 0x8 1 Revision ID 0x01
+ 0x9 3 Class code 0x2800
+ 0xC 1 Cache line size
+ 0xD 1 Latency timer
+ 0xE 1 Header type
+ 0xF 1 Built-in self test
+ 0x10 4 Base address low
+ 0x14 4 Base address high
+ 0x18-28 Reserved
+ 0x2C 2 Subsystem vendor ID *
+ 0x2E 2 Subsystem ID *
+ 0x30-38 Reserved
+ 0x3C 1 Interrupt line
+ 0x3D 1 Interrupt pin 0x00
+ 0x3E 1 Min grant 0x00
+ 0x3D 1 Max latency 0x00
+ 0x40 1 TRDY timeout
+ 0x41 1 Retry count
+ 0x42 2 Reserved
+
+ * Assigned by sub-system implementation
+
+Memory-Mapped Register Space
+============================
+
+There are two memory-mapped BARs. BAR0 maps device register space and is
+0x2000 in size. BAR1 maps MSI-X vector and PBA tables and is also 0x2000 in
+size, allowing for 256 MSI-X vectors.
+
+All registers are 4 or 8 bytes long. It is assumed host software will access 4
+byte registers with one 4-byte access, and 8 byte registers with either two
+4-byte accesses or a single 8-byte access. In the case of two 4-byte accesses,
+access must be lower and then upper 4-bytes, in that order.
+
+BAR0 device register space is organized as follows::
+
+ offset description
+ ------------------------------------------------------
+ 0x0000-0x000f Bogus registers to catch misbehaving
+ drivers. Writes do nothing. Reads
+ back as 0xDEADBABE.
+ 0x0010-0x00ff Test registers
+ 0x0300-0x03ff General purpose registers
+ 0x1000-0x1fff Descriptor control
+
+Holes in register space are reserved. Writes to reserved registers do nothing.
+Reads to reserved registers read back as 0.
+
+No fancy stuff like write-combining is enabled on any of the registers.
+
+BAR1 MSI-X register space is organized as follows::
+
+ offset description
+ ------------------------------------------------------
+ 0x0000-0x0fff MSI-X vector table (256 vectors total)
+ 0x1000-0x1fff MSI-X PBA table
+
+
+Interrupts, DMA, and Endianness
+===============================
+
+PCI Interrupts
+--------------
+
+The device supports only MSI-X interrupts. BAR1 memory-mapped region contains
+the MSI-X vector and PBA tables, with support for up to 256 MSI-X vectors.
+
+The vector assignment is::
+
+ vector description
+ -----------------------------------------------------
+ 0 Command descriptor ring completion
+ 1 Event descriptor ring completion
+ 2 Test operation completion
+ 3 RSVD
+ 4-255 Tx and Rx descriptor ring completion
+ Tx vector is even
+ Rx vector is odd
+
+A MSI-X vector table entry is 16 bytes::
+
+ field offset width description
+ -------------------------------------------------------------
+ lower_addr 0x0 4 [31:2] message address[31:2]
+ [1:0] Rsvd (4 byte alignment
+ required)
+ upper_addr 0x4 4 [31:19] Rsvd
+ [14:0] message address[46:32]
+ data 0x8 4 message data[31:0]
+ control 0xc 4 [31:1] Rsvd
+ [0] mask (0 = enable,
+ 1 = masked)
+
+Software should install the Interrupt Service Routine (ISR) before any ports
+are enabled or any commands are issued on the command ring.
+
+DMA Operations
+--------------
+
+DMA operations are used for packet DMA to/from the CPU, command and event
+processing. Command processing includes statistical counters and table dumps,
+table insertion/deletion, and more. Event processing provides an async
+notification method for device-originating events. Each DMA operation has a
+set of control registers to manage a descriptor ring. The descriptor rings are
+allocated from contiguous host DMA-able memory and registers specify the rings
+base address, size and current head and tail indices. Software always writes
+the head, and hardware always writes the tail.
+
+The higher-order bit of DMA_DESC_COMP_ERR is used to mark hardware completion
+of a descriptor. Software will clear this bit when posting a descriptor to the
+ring, and hardware will set this bit when the descriptor is complete.
+
+Descriptor ring sizes must be a power of 2 and range from 2 to 64K entries.
+Descriptor rings' base address must be 8-byte aligned. Descriptors must be
+packed within ring. Each descriptor in each ring must also be aligned on an 8
+byte boundary. Each descriptor ring will have these registers::
+
+ DMA_DESC_xxx_BASE_ADDR, offset 0x1000 + (x * 32), 64-bit, (R/W)
+ DMA_DESC_xxx_SIZE, offset 0x1008 + (x * 32), 32-bit, (R/W)
+ DMA_DESC_xxx_HEAD, offset 0x100c + (x * 32), 32-bit, (R/W)
+ DMA_DESC_xxx_TAIL, offset 0x1010 + (x * 32), 32-bit, (R)
+ DMA_DESC_xxx_CTRL, offset 0x1014 + (x * 32), 32-bit, (W)
+ DMA_DESC_xxx_CREDITS, offset 0x1018 + (x * 32), 32-bit, (R/W)
+ DMA_DESC_xxx_RSVD1, offset 0x101c + (x * 32), 32-bit, (R/W)
+
+Where x is descriptor ring index::
+
+ index ring
+ --------------------
+ 0 CMD
+ 1 EVENT
+ 2 TX (port 0)
+ 3 RX (port 0)
+ 4 TX (port 1)
+ 5 RX (port 1)
+ .
+ .
+ .
+ 124 TX (port 61)
+ 125 RX (port 61)
+ 126 Resv
+ 127 Resv
+
+Writing BASE_ADDR or SIZE will reset HEAD and TAIL to zero. HEAD cannot be
+written past TAIL. To do so would wrap the ring. An empty ring is when HEAD
+== TAIL. A full ring is when HEAD is one position behind TAIL. Both HEAD and
+TAIL increment and modulo wrap at the ring size.
+
+CTRL register bits::
+
+ bit name description
+ ------------------------------------------------------------------------
+ [0] CTRL_RESET Reset the descriptor ring
+ [1:31] Reserved
+
+All descriptor types share some common fields::
+
+ field width description
+ -------------------------------------------------------------------
+ DMA_DESC_BUF_ADDR 8 Phys addr of desc payload, 8-byte
+ aligned
+ DMA_DESC_COOKIE 8 Desc cookie for completion matching,
+ upper-most bit is reserved
+ DMA_DESC_BUF_SIZE 2 Desc payload size in bytes
+ DMA_DESC_TLV_SIZE 2 Desc payload total size in bytes
+ used for TLVs. Must be <=
+ DMA_DESC_BUF_SIZE.
+ DMA_DESC_COMP_ERR 2 Completion status of associated
+ desc payload. High order bit is
+ clear on new descs, toggled by
+ hw for completed items.
+
+To support forward- and backward-compatibility, descriptor and completion
+payloads are specified in TLV format. Fields are packed with Type=field name,
+Length=field length, and Value=field value. Software will ignore unknown fields
+filled in by the switch. Likewise, the switch will ignore unknown fields
+filled in by software.
+
+Descriptor payload buffer is 8-byte aligned and TLVs are 8-byte aligned. The
+value within a TLV is also 8-byte aligned. The (packed, 8 byte) TLV header is::
+
+ field width description
+ -----------------------------
+ type 4 TLV type
+ len 2 TLV value length
+ pad 2 Reserved
+
+The alignment requirements for descriptors and TLVs are to avoid unaligned
+access exceptions in software. Note that the payload for each TLV is also
+8 byte aligned.
+
+Figure 1 shows an example descriptor buffer with two TLVs::
+
+ <------- 8 bytes ------->
+
+ 8-byte +––––+ +–––––––––––+–––––+–––––+ +–+
+ align | type | len | pad | TLV#1 hdr |
+ +–––––––––––+–––––+–––––+ (len=22) |
+ | | |
+ | value | TVL#1 value |
+ | | (padded to 8-byte |
+ | +–––––+ alignment) |
+ | |/////| |
+ 8-byte +––––+ +–––––––––––+–––––––––––+ |
+ align | type | len | pad | TLV#2 hdr DESC_BUF_SIZE
+ +–––––+–––––+–––––+–––––+ (len=2) |
+ |value|/////////////////| TLV#2 value |
+ +–––––+/////////////////| |
+ |///////////////////////| |
+ |///////////////////////| |
+ |///////////////////////| |
+ |////////unused/////////| |
+ |////////space//////////| |
+ |///////////////////////| |
+ |///////////////////////| |
+ |///////////////////////| |
+ +–––––––––––––––––––––––+ +–+
+
+ fig. 1
+
+TLVs can be nested within the NEST TLV type.
+
+Interrupt credits
+^^^^^^^^^^^^^^^^^
+
+MSI-X vectors used for descriptor ring completions use a credit mechanism for
+efficient device, PCIe bus, OS and driver operations. Each descriptor ring has
+a credit count which represents the number of outstanding descriptors to be
+processed by the driver. As the device marks descriptors complete, the credit
+count is incremented. As the driver processes those outstanding descriptors,
+it returns credits back to the device. This way, the device knows the driver's
+progress and can make decisions about when to fire the next interrupt or not.
+When the credit count is zero, and the first descriptors are posted for the
+driver, a single interrupt is fired. Once the interrupt is fired, the
+interrupt is disabled (auto-masked*). In response to the interrupt, the driver
+will process descriptors and PIO write a returned credit value for that
+descriptor ring. If the driver returns all credits (the driver caught up with
+the device and there is no outstanding work), then the interrupt is unmasked,
+but not fired. If only partial credits are returned, the interrupt remains
+masked but the device generates an interrupt, signaling the driver that more
+outstanding work is available.
+
+(* this masking is unrelated to the MSI-X interrupt mask register)
+
+Endianness
+----------
+
+Device registers are hard-coded to little-endian (LE). The driver should
+convert to/from host endianness to LE for device register accesses.
+
+Descriptors are LE. Descriptor buffer TLVs will have LE type and length
+fields, but the value field can either be LE or network-byte-order, depending
+on context. TLV values containing network packet data will be in network-byte
+order. A TLV value containing a field or mask used to compare against network
+packet data is network-byte order. For example, flow match fields (and masks)
+are network-byte-order since they're matched directly, byte-by-byte, against
+network packet data. All non-network-packet TLV multi-byte values will be LE.
+
+TLV values in network-byte-order are designated with (N).
+
+
+Test Registers
+==============
+
+Rocker has several test registers to support troubleshooting register access,
+interrupt generation, and DMA operations::
+
+ TEST_REG, offset 0x0010, 32-bit (R/W)
+ TEST_REG64, offset 0x0018, 64-bit (R/W)
+ TEST_IRQ, offset 0x0020, 32-bit (R/W)
+ TEST_DMA_ADDR, offset 0x0028, 64-bit (R/W)
+ TEST_DMA_SIZE, offset 0x0030, 32-bit (R/W)
+ TEST_DMA_CTRL, offset 0x0034, 32-bit (R/W)
+
+Reads to TEST_REG and TEST_REG64 will read a value equal to twice the last
+value written to the register. The 32-bit and 64-bit versions are for testing
+32-bit and 64-bit host accesses.
+
+A vector can be written to TEST_IRQ and the device will generate an interrupt
+for that vector.
+
+To test basic DMA operations, allocate a DMA-able host buffer and put the
+buffer address into TEST_DMA_ADDR and size into TEST_DMA_SIZE. Then, write to
+TEST_DMA_CTRL to manipulate the buffer contents. TEST_DMA_CTRL operations are::
+
+ operation value description
+ -----------------------------------------------------------
+ TEST_DMA_CTRL_CLEAR 1 clear buffer
+ TEST_DMA_CTRL_FILL 2 fill buffer bytes with 0x96
+ TEST_DMA_CTRL_INVERT 4 invert bytes in buffer
+
+Various buffer address and sizes should be tested to verify no address boundary
+issue exists. In particular, buffers that start on odd-8-byte boundary and/or
+span multiple PAGE sizes should be tested.
+
+
+Ports
+=====
+
+Physical and Logical Ports
+------------------------------------
+
+The switch supports up to 62 physical (front-panel) ports. Register
+PORT_PHYS_COUNT returns the actual number of physical ports available::
+
+ PORT_PHYS_COUNT, offset 0x0304, 32-bit, (R)
+
+In addition to front-panel ports, the switch supports logical ports for
+tunnels.
+
+Front-panel ports and logical tunnel ports are mapped into a single 32-bit port
+space. A special CPU port is assigned port 0. The front-panel ports are
+mapped to ports 1-62. A special loopback port is assigned port 63. Logical
+tunnel ports are assigned ports 0x0001000-0x0001ffff.
+To summarize the port assignments::
+
+ port mapping
+ -------------------------------------------------------
+ 0 CPU port (for packets to/from host CPU)
+ 1-62 front-panel physical ports
+ 63 loopback port
+ 64-0x0000ffff RSVD
+ 0x00010000-0x0001ffff logical tunnel ports
+ 0x00020000-0xffffffff RSVD
+
+Physical Port Mode
+------------------
+
+Switch front-panel ports operate in a mode. Currently, the only mode is
+OF-DPA. OF-DPA[1] mode is based on OpenFlow Data Plane Abstraction (OF-DPA)
+Abstract Switch Specification, Version 1.0, from Broadcom Corporation. To
+set/get the mode for front-panel ports, see port settings, below.
+
+Port Settings
+-------------
+
+Link status for all front-panel ports is available via PORT_PHYS_LINK_STATUS::
+
+ PORT_PHYS_LINK_STATUS, offset 0x0310, 64-bit, (R)
+
+ Value is port bitmap. Bits 0 and 63 always read 0. Bits 1-62
+ read 1 for link UP and 0 for link DOWN for respective front-panel ports.
+
+Other properties for front-panel ports are available via DMA CMD descriptors::
+
+ Get PORT_SETTINGS descriptor:
+
+ field width description
+ ----------------------------------------------
+ PORT_SETTINGS 2 CMD_GET
+ PPORT 4 Physical port #
+
+ Get PORT_SETTINGS completion:
+
+ field width description
+ ----------------------------------------------
+ PPORT 4 Physical port #
+ SPEED 4 Current port interface speed, in Mbps
+ DUPLEX 1 1 = Full, 0 = Half
+ AUTONEG 1 1 = enabled, 0 = disabled
+ MACADDR 6 Port MAC address
+ MODE 1 0 = OF-DPA
+ LEARNING 1 MAC address learning on port
+ 1 = enabled
+ 0 = disabled
+ PHYS_NAME <var> Physical port name (string)
+
+ Set PORT_SETTINGS descriptor:
+
+ field width description
+ ----------------------------------------------
+ PORT_SETTINGS 2 CMD_SET
+ PPORT 4 Physical port #
+ SPEED 4 Port interface speed, in Mbps
+ DUPLEX 1 1 = Full, 0 = Half
+ AUTONEG 1 1 = enabled, 0 = disabled
+ MACADDR 6 Port MAC address
+ MODE 1 0 = OF-DPA
+
+Port Enable
+-----------
+
+Front-panel ports are initially disabled, which means port ingress and egress
+packets will be dropped. To enable or disable a port, use PORT_PHYS_ENABLE::
+
+ PORT_PHYS_ENABLE: offset 0x0318, 64-bit, (R/W)
+
+ Value is bitmap of first 64 ports. Bits 0 and 63 are ignored
+ and always read as 0. Write 1 to enable port; write 0 to disable it.
+ Default is 0.
+
+
+Switch Control
+==============
+
+This section covers switch-wide register settings.
+
+Control
+-------
+
+This register is used for low level control of the switch::
+
+ CONTROL: offset 0x0300, 32-bit, (W)
+
+ bit name description
+ ------------------------------------------------------------------------
+ [0] CONTROL_RESET If set, device will perform reset
+ [1:31] Reserved
+
+Switch ID
+---------
+
+The switch has a SWITCH_ID to be used by software to uniquely identify the
+switch::
+
+ SWITCH_ID: offset 0x0320, 64-bit, (R)
+
+ Value is opaque to switch software and no special encoding is implied.
+
+
+Events
+======
+
+Non-I/O asynchronous events from the device are notified to the host using the
+event ring. The TLV structure for events is::
+
+ field width description
+ ---------------------------------------------------
+ TYPE 4 Event type, one of:
+ 1: LINK_CHANGED
+ 2: MAC_VLAN_SEEN
+ INFO <nest> Event info (details below)
+
+Link Changed Event
+------------------
+
+When link status changes on a physical port, this event is generated::
+
+ field width description
+ ---------------------------------------------------
+ INFO <nest>
+ PPORT 4 Physical port
+ LINKUP 1 Link status:
+ 0: down
+ 1: up
+
+MAC VLAN Seen Event
+-------------------
+
+When a packet ingresses on a port and the source MAC/VLAN isn't known to the
+device, the device will generate this event. In response to the event, the
+driver should install to the device the MAC/VLAN on the port into the bridge
+table. Once installed, the MAC/VLAN is known on the port and this event will
+no longer be generated.
+
+::
+
+ field width description
+ ---------------------------------------------------
+ INFO <nest>
+ PPORT 4 Physical port
+ MAC 6 MAC address
+ VLAN 2 VLAN ID
+
+
+CPU Packet Processing
+=====================
+
+Ingress packets directed to the host CPU for further processing are delivered
+in the DMA RX ring. Likewise, host CPU originating packets destined to egress
+on switch ports are scheduled by software using the DMA TX ring.
+
+Tx Packet Processing
+--------------------
+
+Software schedules packets for egress on switch ports using the DMA TX ring. A
+TX descriptor buffer describes the packet location and size in host DMA-able
+memory, the destination port, and any hardware-offload functions (such as L3
+payload checksum offload). Software then bumps the descriptor head to signal
+hardware of new Tx work. In response, hardware will DMA read Tx descriptors up
+to head, DMA read descriptor buffer and packet data, perform offloading
+functions, and finally frame packet on wire (network). Once packet processing
+is complete, hardware will writeback status to descriptor(s) to signal to
+software that Tx is complete and software resources (e.g. skb) backing packet
+can be released.
+
+Figure 2 shows an example 3-fragment packet queued with one Tx descriptor. A
+TLV is used for each packet fragment::
+
+ pkt frag 1
+ +–––––––+ +–+
+ +–––+ | |
+ desc buf | | | |
+ +––––––––+ | | | |
+ Tx ring +–––+ +–––––+ | | |
+ +–––––––––+ | | TLVs | +–––––––+ |
+ | +–––+ +––––––––+ pkt frag 2 |
+ | desc 0 | | +–––––+ +–––––––+ |
+ +–––––––––+ | TLVs | +–––+ | |
+ head+–+ | +––––––––+ | | |
+ | desc 1 | | +–––––+ +–––––––+ |pkt
+ +–––––––––+ | TLVs | | |
+ | | +––––––––+ | pkt frag 3 |
+ | | | +–––––––+ |
+ +–––––––––+ +–––+ | |
+ | | | | |
+ | | | | |
+ +–––––––––+ | | |
+ | | | | |
+ | | | | |
+ +–––––––––+ | | |
+ | | +–––––––+ +–+
+ | |
+ +–––––––––+
+
+ fig 2.
+
+The TLVs for Tx descriptor buffer are::
+
+ field width description
+ ---------------------------------------------------------------------
+ PPORT 4 Destination physical port #
+ TX_OFFLOAD 1 Hardware offload modes:
+ 0: no offload
+ 1: insert IP csum (ipv4 only)
+ 2: insert TCP/UDP csum
+ 3: L3 csum calc and insert
+ into csum offset (TX_L3_CSUM_OFF)
+ 16-bit 1's complement csum value.
+ IPv4 pseudo-header and IP
+ already calculated by OS
+ and inserted.
+ 4: TSO (TCP Segmentation Offload)
+ TX_L3_CSUM_OFF 2 For L3 csum offload mode, the offset,
+ from the beginning of the packet,
+ of the csum field in the L3 header
+ TX_TSO_MSS 2 For TSO offload mode, the
+ Maximum Segment Size in bytes
+ TX_TSO_HDR_LEN 2 For TSO offload mode, the
+ length of ethernet, IP, and
+ TCP/UDP headers, including IP
+ and TCP options.
+ TX_FRAGS <array> Packet fragments
+ TX_FRAG <nest> Packet fragment
+ TX_FRAG_ADDR 8 DMA address of packet fragment
+ TX_FRAG_LEN 2 Packet fragment length
+
+Possible status return codes in descriptor on completion are::
+
+ DESC_COMP_ERR reason
+ --------------------------------------------------------------------
+ 0 OK
+ -ROCKER_ENXIO address or data read err on desc buf or packet
+ fragment
+ -ROCKER_EINVAL bad pport or TSO or csum offloading error
+ -ROCKER_ENOMEM no memory for internal staging tx fragment
+
+Rx Packet Processing
+--------------------
+
+For packets ingressing on switch ports that are not forwarded by the switch but
+rather directed to the host CPU for further processing are delivered in the DMA
+RX ring. Rx descriptor buffers are allocated by software and placed on the
+ring. Hardware will fill Rx descriptor buffers with packet data, write the
+completion, and signal to software that a new packet is ready. Since Rx packet
+size is not known a-priori, the Rx descriptor buffer must be allocated for
+worst-case packet size. A single Rx descriptor will contain the entire Rx
+packet data in one RX_FRAG. Other Rx TLVs describe and hardware offloads
+performed on the packet, such as checksum validation.
+
+The TLVs for Rx descriptor buffer are::
+
+ field width description
+ ---------------------------------------------------
+ PPORT 4 Source physical port #
+ RX_FLAGS 2 Packet parsing flags:
+ (1 << 0): IPv4 packet
+ (1 << 1): IPv6 packet
+ (1 << 2): csum calculated
+ (1 << 3): IPv4 csum good
+ (1 << 4): IP fragment
+ (1 << 5): TCP packet
+ (1 << 6): UDP packet
+ (1 << 7): TCP/UDP csum good
+ (1 << 8): Offload forward
+ RX_CSUM 2 IP calculated checksum:
+ IPv4: IP payload csum
+ IPv6: header and payload csum
+ (Only valid is RX_FLAGS:csum calc is set)
+ RX_FRAG_ADDR 8 DMA address of packet fragment
+ RX_FRAG_MAX_LEN 2 Packet maximum fragment length
+ RX_FRAG_LEN 2 Actual packet fragment length after receive
+
+Offload forward RX_FLAG indicates the device has already forwarded the packet
+so the host CPU should not also forward the packet.
+
+Possible status return codes in descriptor on completion are::
+
+ DESC_COMP_ERR reason
+ --------------------------------------------------------------------
+ 0 OK
+ -ROCKER_ENXIO address or data read err on desc buf
+ -ROCKER_ENOMEM no memory for internal staging desc buf
+ -ROCKER_EMSGSIZE Rx descriptor buffer wasn't big enough to contain
+ packet data TLV and other TLVs.
+
+
+OF-DPA Mode
+===========
+
+OF-DPA mode allows the switch to offload flow packet processing functions to
+hardware. An OpenFlow controller would communicate with an OpenFlow agent
+installed on the switch. The OpenFlow agent would (directly or indirectly)
+communicate with the Rocker switch driver, which in turn would program switch
+hardware with flow functionality, as defined in OF-DPA. The block diagram is::
+
+ +–––––––––––––––----–––+
+ | OF |
+ | Remote Controller |
+ +––––––––+––----–––––––+
+ |
+ |
+ +––––––––+–––––––––+
+ | OF |
+ | Local Agent |
+ +––––––––––––––––––+
+ | |
+ | Rocker Driver |
+ +––––––––––––––––––+
+ <this spec>
+ +––––––––––––––––––+
+ | |
+ | Rocker Switch |
+ +––––––––––––––––––+
+
+To participate in flow functions, ports must be configure for OF-DPA mode
+during switch initialization.
+
+OF-DPA Flow Table Interface
+---------------------------
+
+There are commands to add, modify, delete, and get stats of flow table entries.
+The commands are issued using the DMA CMD descriptor ring. The following
+commands are defined::
+
+ CMD_ADD: add an entry to flow table
+ CMD_MOD: modify an entry in flow table
+ CMD_DEL: delete an entry from flow table
+ CMD_GET_STATS: get stats for flow entry
+
+TLVs for add and modify commands are::
+
+ field width description
+ ----------------------------------------------------
+ OF_DPA_CMD 2 CMD_[ADD|MOD]
+ OF_DPA_TBL 2 Flow table ID
+ 0: ingress port
+ 10: vlan
+ 20: termination mac
+ 30: unicast routing
+ 40: multicast routing
+ 50: bridging
+ 60: ACL policy
+ OF_DPA_PRIORITY 4 Flow priority
+ OF_DPA_HARDTIME 4 Hard timeout for flow
+ OF_DPA_IDLETIME 4 Idle timeout for flow
+ OF_DPA_COOKIE 8 Cookie
+
+Additional TLVs based on flow table ID:
+
+Table ID 0: ingress port::
+
+ field width description
+ ----------------------------------------------------
+ OF_DPA_IN_PPORT 4 ingress physical port number
+ OF_DPA_GOTO_TBL 2 goto table ID; zero to drop
+
+Table ID 10: vlan::
+
+ field width description
+ ----------------------------------------------------
+ OF_DPA_IN_PPORT 4 ingress physical port number
+ OF_DPA_VLAN_ID 2 (N) vlan ID
+ OF_DPA_VLAN_ID_MASK 2 (N) vlan ID mask
+ OF_DPA_GOTO_TBL 2 goto table ID; zero to drop
+ OF_DPA_NEW_VLAN_ID 2 (N) new vlan ID
+
+Table ID 20: termination mac::
+
+ field width description
+ ----------------------------------------------------
+ OF_DPA_IN_PPORT 4 ingress physical port number
+ OF_DPA_IN_PPORT_MASK 4 ingress physical port number mask
+ OF_DPA_ETHERTYPE 2 (N) must be either 0x0800 or 0x86dd
+ OF_DPA_DST_MAC 6 (N) destination MAC
+ OF_DPA_DST_MAC_MASK 6 (N) destination MAC mask
+ OF_DPA_VLAN_ID 2 (N) vlan ID
+ OF_DPA_VLAN_ID_MASK 2 (N) vlan ID mask
+ OF_DPA_GOTO_TBL 2 only acceptable values are
+ unicast or multicast routing
+ table IDs
+ OF_DPA_OUT_PPORT 2 if specified, must be
+ controller, set zero otherwise
+
+Table ID 30: unicast routing::
+
+ field width description
+ ----------------------------------------------------
+ OF_DPA_ETHERTYPE 2 (N) must be either 0x0800 or 0x86dd
+ OF_DPA_DST_IP 4 (N) destination IPv4 address.
+ Must be unicast address
+ OF_DPA_DST_IP_MASK 4 (N) IP mask. Must be prefix mask
+ OF_DPA_DST_IPV6 16 (N) destination IPv6 address.
+ Must be unicast address
+ OF_DPA_DST_IPV6_MASK 16 (N) IPv6 mask. Must be prefix mask
+ OF_DPA_GOTO_TBL 2 goto table ID; zero to drop
+ OF_DPA_GROUP_ID 4 data for GROUP action must
+ be an L3 Unicast group entry
+
+Table ID 40: multicast routing::
+
+ field width description
+ ----------------------------------------------------
+ OF_DPA_ETHERTYPE 2 (N) must be either 0x0800 or 0x86dd
+ OF_DPA_VLAN_ID 2 (N) vlan ID
+ OF_DPA_SRC_IP 4 (N) source IPv4. Optional,
+ can contain IPv4 address,
+ must be completely masked
+ if not used
+ OF_DPA_SRC_IP_MASK 4 (N) IP Mask
+ OF_DPA_DST_IP 4 (N) destination IPv4 address.
+ Must be multicast address
+ OF_DPA_SRC_IPV6 16 (N) source IPv6 Address. Optional.
+ Can contain IPv6 address,
+ must be completely masked
+ if not used
+ OF_DPA_SRC_IPV6_MASK 16 (N) IPv6 mask.
+ OF_DPA_DST_IPV6 16 (N) destination IPv6 Address. Must
+ be multicast address
+ Must be multicast address
+ OF_DPA_GOTO_TBL 2 goto table ID; zero to drop
+ OF_DPA_GROUP_ID 4 data for GROUP action must
+ be an L3 multicast group entry
+
+Table ID 50: bridging::
+
+ field width description
+ ----------------------------------------------------
+ OF_DPA_VLAN_ID 2 (N) vlan ID
+ OF_DPA_TUNNEL_ID 4 tunnel ID
+ OF_DPA_DST_MAC 6 (N) destination MAC
+ OF_DPA_DST_MAC_MASK 6 (N) destination MAC mask
+ OF_DPA_GOTO_TBL 2 goto table ID; zero to drop
+ OF_DPA_GROUP_ID 4 data for GROUP action must
+ be a L2 Interface, L2
+ Multicast, L2 Flood,
+ or L2 Overlay group entry
+ as appropriate
+ OF_DPA_TUNNEL_LPORT 4 unicast Tenant Bridging
+ flows specify a tunnel
+ logical port ID
+ OF_DPA_OUT_PPORT 2 data for OUTPUT action,
+ restricted to CONTROLLER,
+ set to 0 otherwise
+
+Table ID 60: acl policy::
+
+ field width description
+ ----------------------------------------------------
+ OF_DPA_IN_PPORT 4 ingress physical port number
+ OF_DPA_IN_PPORT_MASK 4 ingress physical port number mask
+ OF_DPA_ETHERTYPE 2 (N) ethertype
+ OF_DPA_VLAN_ID 2 (N) vlan ID
+ OF_DPA_VLAN_ID_MASK 2 (N) vlan ID mask
+ OF_DPA_VLAN_PCP 2 (N) vlan Priority Code Point
+ OF_DPA_VLAN_PCP_MASK 2 (N) vlan Priority Code Point mask
+ OF_DPA_SRC_MAC 6 (N) source MAC
+ OF_DPA_SRC_MAC_MASK 6 (N) source MAC mask
+ OF_DPA_DST_MAC 6 (N) destination MAC
+ OF_DPA_DST_MAC_MASK 6 (N) destination MAC mask
+ OF_DPA_TUNNEL_ID 4 tunnel ID
+ OF_DPA_SRC_IP 4 (N) source IPv4. Optional,
+ can contain IPv4 address,
+ must be completely masked
+ if not used
+ OF_DPA_SRC_IP_MASK 4 (N) IP Mask
+ OF_DPA_DST_IP 4 (N) destination IPv4 address.
+ Must be multicast address
+ OF_DPA_DST_IP_MASK 4 (N) IP Mask
+ OF_DPA_SRC_IPV6 16 (N) source IPv6 Address. Optional.
+ Can contain IPv6 address,
+ must be completely masked
+ if not used
+ OF_DPA_SRC_IPV6_MASK 16 (N) IPv6 mask
+ OF_DPA_DST_IPV6 16 (N) destination IPv6 Address. Must
+ be multicast address.
+ OF_DPA_DST_IPV6_MASK 16 (N) IPv6 mask
+ OF_DPA_SRC_ARP_IP 4 (N) source IPv4 address in the ARP
+ payload. Only used if ethertype
+ == 0x0806.
+ OF_DPA_SRC_ARP_IP_MASK 4 (N) IP Mask
+ OF_DPA_IP_PROTO 1 IP protocol
+ OF_DPA_IP_PROTO_MASK 1 IP protocol mask
+ OF_DPA_IP_DSCP 1 DSCP
+ OF_DPA_IP_DSCP_MASK 1 DSCP mask
+ OF_DPA_IP_ECN 1 ECN
+ OF_DPA_IP_ECN_MASK 1 ECN mask
+ OF_DPA_L4_SRC_PORT 2 (N) L4 source port, only for
+ TCP, UDP, or SCTP
+ OF_DPA_L4_SRC_PORT_MASK 2 (N) L4 source port mask
+ OF_DPA_L4_DST_PORT 2 (N) L4 source port, only for
+ TCP, UDP, or SCTP
+ OF_DPA_L4_DST_PORT_MASK 2 (N) L4 source port mask
+ OF_DPA_ICMP_TYPE 1 ICMP type, only if IP
+ protocol is 1
+ OF_DPA_ICMP_TYPE_MASK 1 ICMP type mask
+ OF_DPA_ICMP_CODE 1 ICMP code
+ OF_DPA_ICMP_CODE_MASK 1 ICMP code mask
+ OF_DPA_IPV6_LABEL 4 (N) IPv6 flow label
+ OF_DPA_IPV6_LABEL_MASK 4 (N) IPv6 flow label mask
+ OF_DPA_GROUP_ID 4 data for GROUP action
+ OF_DPA_QUEUE_ID_ACTION 1 write the queue ID
+ OF_DPA_NEW_QUEUE_ID 1 queue ID
+ OF_DPA_VLAN_PCP_ACTION 1 write the VLAN priority
+ OF_DPA_NEW_VLAN_PCP 1 VLAN priority
+ OF_DPA_IP_DSCP_ACTION 1 write the DSCP
+ OF_DPA_NEW_IP_DSCP 1 new DSCP
+ OF_DPA_TUNNEL_LPORT 4 restrct to valid tunnel
+ logical port, set to 0
+ otherwise.
+ OF_DPA_OUT_PPORT 2 data for OUTPUT action,
+ restricted to CONTROLLER,
+ set to 0 otherwise
+ OF_DPA_CLEAR_ACTIONS 4 if 1 packets matching flow are
+ dropped (all other instructions
+ ignored)
+
+TLVs for flow delete and get stats command are::
+
+ field width description
+ ---------------------------------------------------
+ OF_DPA_CMD 2 CMD_[DEL|GET_STATS]
+ OF_DPA_COOKIE 8 Cookie
+
+On completion of get stats command, the descriptor buffer is written back with
+the following TLVs::
+
+ field width description
+ ---------------------------------------------------
+ OF_DPA_STAT_DURATION 4 Flow duration
+ OF_DPA_STAT_RX_PKTS 8 Received packets
+ OF_DPA_STAT_TX_PKTS 8 Transmit packets
+
+Possible status return codes in descriptor on completion are::
+
+ DESC_COMP_ERR command reason
+ --------------------------------------------------------------------
+ 0 all OK
+ -ROCKER_EFAULT all head or tail index outside
+ of ring
+ -ROCKER_ENXIO all address or data read err on
+ desc buf
+ -ROCKER_EMSGSIZE GET_STATS cmd descriptor buffer wasn't
+ big enough to contain write-back
+ TLVs
+ -ROCKER_EINVAL all invalid parameters passed in
+ -ROCKER_EEXIST ADD entry already exists
+ -ROCKER_ENOSPC ADD no space left in flow table
+ -ROCKER_ENOENT MOD|DEL|GET_STATS cookie invalid
+
+Group Table Interface
+---------------------
+
+There are commands to add, modify, delete, and get stats of group table
+entries. The commands are issued using the DMA CMD descriptor ring. The
+following commands are defined::
+
+ CMD_ADD: add an entry to group table
+ CMD_MOD: modify an entry in group table
+ CMD_DEL: delete an entry from group table
+ CMD_GET_STATS: get stats for group entry
+
+TLVs for add and modify commands are::
+
+ field width description
+ -----------------------------------------------------------
+ FLOW_GROUP_CMD 2 CMD_[ADD|MOD]
+ FLOW_GROUP_ID 2 Flow group ID
+ FLOW_GROUP_TYPE 1 Group type:
+ 0: L2 interface
+ 1: L2 rewrite
+ 2: L3 unicast
+ 3: L2 multicast
+ 4: L2 flood
+ 5: L3 interface
+ 6: L3 multicast
+ 7: L3 ECMP
+ 8: L2 overlay
+ FLOW_VLAN_ID 2 Vlan ID (types 0, 3, 4, 6)
+ FLOW_L2_PORT 2 Port (types 0)
+ FLOW_INDEX 4 Index (all types but 0)
+ FLOW_OVERLAY_TYPE 1 Overlay sub-type (type 8):
+ 0: Flood unicast tunnel
+ 1: Flood multicast tunnel
+ 2: Multicast unicast tunnel
+ 3: Multicast multicast tunnel
+ FLOW_GROUP_ACTION nest
+ FLOW_GROUP_ID 2 next group ID in chain (all
+ types except 0)
+ FLOW_OUT_PORT 4 egress port (types 0, 8)
+ FLOW_POP_VLAN_TAG 1 strip outer VLAN tag (type 1
+ only)
+ FLOW_VLAN_ID 2 (types 1, 5)
+ FLOW_SRC_MAC 6 (types 1, 2, 5)
+ FLOW_DST_MAC 6 (types 1, 2)
+
+TLVs for flow delete and get stats command are::
+
+ field width description
+ -----------------------------------------------------------
+ FLOW_GROUP_CMD 2 CMD_[DEL|GET_STATS]
+ FLOW_GROUP_ID 2 Flow group ID
+
+On completion of get stats command, the descriptor buffer is written back with
+the following TLVs::
+
+ field width description
+ ---------------------------------------------------
+ FLOW_GROUP_ID 2 Flow group ID
+ FLOW_STAT_DURATION 4 Flow duration
+ FLOW_STAT_REF_COUNT 4 Flow reference count
+ FLOW_STAT_BUCKET_COUNT 4 Flow bucket count
+
+Possible status return codes in descriptor on completion are::
+
+ DESC_COMP_ERR command reason
+ --------------------------------------------------------------------
+ 0 all OK
+ -ROCKER_EFAULT all head or tail index outside
+ of ring
+ -ROCKER_ENXIO all address or data read err on
+ desc buf
+ -ROCKER_ENOSPC GET_STATS cmd descriptor buffer wasn't
+ big enough to contain write-back
+ TLVs
+ -ROCKER_EINVAL ADD|MOD invalid parameters passed in
+ -ROCKER_EEXIST ADD entry already exists
+ -ROCKER_ENOSPC ADD no space left in flow table
+ -ROCKER_ENOENT MOD|DEL|GET_STATS group ID invalid
+ -ROCKER_EBUSY DEL group reference count non-zero
+ -ROCKER_ENODEV ADD next group ID doesn't exist
+
+
+
+References
+==========
+
+[1] OpenFlow Data Plane Abstraction (OF-DPA) Abstract Switch Specification,
+Version 1.0, from Broadcom Corporation, February 21, 2014.
diff --git a/docs/specs/rocker.txt b/docs/specs/rocker.txt
deleted file mode 100644
index 1857b31..0000000
--- a/docs/specs/rocker.txt
+++ /dev/null
@@ -1,1014 +0,0 @@
-Rocker Network Switch Register Programming Guide
-Copyright (c) Scott Feldman <sfeldma@gmail.com>
-Copyright (c) Neil Horman <nhorman@tuxdriver.com>
-Version 0.11, 12/29/2014
-
-LICENSE
-=======
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-SECTION 1: Introduction
-=======================
-
-Overview
---------
-
-This document describes the hardware/software interface for the Rocker switch
-device. The intended audience is authors of OS drivers and device emulation
-software.
-
-Notations and Conventions
--------------------------
-
-o In register descriptions, [n:m] indicates a range from bit n to bit m,
-inclusive.
-o Use of leading 0x indicates a hexadecimal number.
-o Use of leading 0b indicates a binary number.
-o The use of RSVD or Reserved indicates that a bit or field is reserved for
-future use.
-o Field width is in bytes, unless otherwise noted.
-o Register are (R) read-only, (R/W) read/write, (W) write-only, or (COR) clear
-on read
-o TLV values in network-byte-order are designated with (N).
-
-
-SECTION 2: PCI Configuration Registers
-======================================
-
-PCI Configuration Space
------------------------
-
-Each switch instance registers as a PCI device with PCI configuration space:
-
- offset width description value
- ---------------------------------------------
- 0x0 2 Vendor ID 0x1b36
- 0x2 2 Device ID 0x0006
- 0x4 4 Command/Status
- 0x8 1 Revision ID 0x01
- 0x9 3 Class code 0x2800
- 0xC 1 Cache line size
- 0xD 1 Latency timer
- 0xE 1 Header type
- 0xF 1 Built-in self test
- 0x10 4 Base address low
- 0x14 4 Base address high
- 0x18-28 Reserved
- 0x2C 2 Subsystem vendor ID *
- 0x2E 2 Subsystem ID *
- 0x30-38 Reserved
- 0x3C 1 Interrupt line
- 0x3D 1 Interrupt pin 0x00
- 0x3E 1 Min grant 0x00
- 0x3D 1 Max latency 0x00
- 0x40 1 TRDY timeout
- 0x41 1 Retry count
- 0x42 2 Reserved
-
-
-* Assigned by sub-system implementation
-
-SECTION 3: Memory-Mapped Register Space
-=======================================
-
-There are two memory-mapped BARs. BAR0 maps device register space and is
-0x2000 in size. BAR1 maps MSI-X vector and PBA tables and is also 0x2000 in
-size, allowing for 256 MSI-X vectors.
-
-All registers are 4 or 8 bytes long. It is assumed host software will access 4
-byte registers with one 4-byte access, and 8 byte registers with either two
-4-byte accesses or a single 8-byte access. In the case of two 4-byte accesses,
-access must be lower and then upper 4-bytes, in that order.
-
-BAR0 device register space is organized as follows:
-
- offset description
- ------------------------------------------------------
- 0x0000-0x000f Bogus registers to catch misbehaving
- drivers. Writes do nothing. Reads
- back as 0xDEADBABE.
- 0x0010-0x00ff Test registers
- 0x0300-0x03ff General purpose registers
- 0x1000-0x1fff Descriptor control
-
-Holes in register space are reserved. Writes to reserved registers do nothing.
-Reads to reserved registers read back as 0.
-
-No fancy stuff like write-combining is enabled on any of the registers.
-
-BAR1 MSI-X register space is organized as follows:
-
- offset description
- ------------------------------------------------------
- 0x0000-0x0fff MSI-X vector table (256 vectors total)
- 0x1000-0x1fff MSI-X PBA table
-
-
-SECTION 4: Interrupts, DMA, and Endianness
-==========================================
-
-PCI Interrupts
---------------
-
-The device supports only MSI-X interrupts. BAR1 memory-mapped region contains
-the MSI-X vector and PBA tables, with support for up to 256 MSI-X vectors.
-
-The vector assignment is:
-
- vector description
- -----------------------------------------------------
- 0 Command descriptor ring completion
- 1 Event descriptor ring completion
- 2 Test operation completion
- 3 RSVD
- 4-255 Tx and Rx descriptor ring completion
- Tx vector is even
- Rx vector is odd
-
-A MSI-X vector table entry is 16 bytes:
-
- field offset width description
- -------------------------------------------------------------
- lower_addr 0x0 4 [31:2] message address[31:2]
- [1:0] Rsvd (4 byte alignment
- required)
- upper_addr 0x4 4 [31:19] Rsvd
- [14:0] message address[46:32]
- data 0x8 4 message data[31:0]
- control 0xc 4 [31:1] Rsvd
- [0] mask (0 = enable,
- 1 = masked)
-
-Software should install the Interrupt Service Routine (ISR) before any ports
-are enabled or any commands are issued on the command ring.
-
-DMA Operations
---------------
-
-DMA operations are used for packet DMA to/from the CPU, command and event
-processing. Command processing includes statistical counters and table dumps,
-table insertion/deletion, and more. Event processing provides an async
-notification method for device-originating events. Each DMA operation has a
-set of control registers to manage a descriptor ring. The descriptor rings are
-allocated from contiguous host DMA-able memory and registers specify the rings
-base address, size and current head and tail indices. Software always writes
-the head, and hardware always writes the tail.
-
-The higher-order bit of DMA_DESC_COMP_ERR is used to mark hardware completion
-of a descriptor. Software will clear this bit when posting a descriptor to the
-ring, and hardware will set this bit when the descriptor is complete.
-
-Descriptor ring sizes must be a power of 2 and range from 2 to 64K entries.
-Descriptor rings' base address must be 8-byte aligned. Descriptors must be
-packed within ring. Each descriptor in each ring must also be aligned on an 8
-byte boundary. Each descriptor ring will have these registers:
-
- DMA_DESC_xxx_BASE_ADDR, offset 0x1000 + (x * 32), 64-bit, (R/W)
- DMA_DESC_xxx_SIZE, offset 0x1008 + (x * 32), 32-bit, (R/W)
- DMA_DESC_xxx_HEAD, offset 0x100c + (x * 32), 32-bit, (R/W)
- DMA_DESC_xxx_TAIL, offset 0x1010 + (x * 32), 32-bit, (R)
- DMA_DESC_xxx_CTRL, offset 0x1014 + (x * 32), 32-bit, (W)
- DMA_DESC_xxx_CREDITS, offset 0x1018 + (x * 32), 32-bit, (R/W)
- DMA_DESC_xxx_RSVD1, offset 0x101c + (x * 32), 32-bit, (R/W)
-
-Where x is descriptor ring index:
-
- index ring
- --------------------
- 0 CMD
- 1 EVENT
- 2 TX (port 0)
- 3 RX (port 0)
- 4 TX (port 1)
- 5 RX (port 1)
- .
- .
- .
- 124 TX (port 61)
- 125 RX (port 61)
- 126 Resv
- 127 Resv
-
-Writing BASE_ADDR or SIZE will reset HEAD and TAIL to zero. HEAD cannot be
-written past TAIL. To do so would wrap the ring. An empty ring is when HEAD
-== TAIL. A full ring is when HEAD is one position behind TAIL. Both HEAD and
-TAIL increment and modulo wrap at the ring size.
-
-CTRL register bits:
-
- bit name description
- ------------------------------------------------------------------------
- [0] CTRL_RESET Reset the descriptor ring
- [1:31] Reserved
-
-All descriptor types share some common fields:
-
- field width description
- -------------------------------------------------------------------
- DMA_DESC_BUF_ADDR 8 Phys addr of desc payload, 8-byte
- aligned
- DMA_DESC_COOKIE 8 Desc cookie for completion matching,
- upper-most bit is reserved
- DMA_DESC_BUF_SIZE 2 Desc payload size in bytes
- DMA_DESC_TLV_SIZE 2 Desc payload total size in bytes
- used for TLVs. Must be <=
- DMA_DESC_BUF_SIZE.
- DMA_DESC_COMP_ERR 2 Completion status of associated
- desc payload. High order bit is
- clear on new descs, toggled by
- hw for completed items.
-
-To support forward- and backward-compatibility, descriptor and completion
-payloads are specified in TLV format. Fields are packed with Type=field name,
-Length=field length, and Value=field value. Software will ignore unknown fields
-filled in by the switch. Likewise, the switch will ignore unknown fields
-filled in by software.
-
-Descriptor payload buffer is 8-byte aligned and TLVs are 8-byte aligned. The
-value within a TLV is also 8-byte aligned. The (packed, 8 byte) TLV header is:
-
- field width description
- -----------------------------
- type 4 TLV type
- len 2 TLV value length
- pad 2 Reserved
-
-The alignment requirements for descriptors and TLVs are to avoid unaligned
-access exceptions in software. Note that the payload for each TLV is also
-8 byte aligned.
-
-Figure 1 shows an example descriptor buffer with two TLVs.
-
- <------- 8 bytes ------->
-
- 8-byte +––––+ +–––––––––––+–––––+–––––+ +–+
- align | type | len | pad | TLV#1 hdr |
- +–––––––––––+–––––+–––––+ (len=22) |
- | | |
- | value | TVL#1 value |
- | | (padded to 8-byte |
- | +–––––+ alignment) |
- | |/////| |
- 8-byte +––––+ +–––––––––––+–––––––––––+ |
- align | type | len | pad | TLV#2 hdr DESC_BUF_SIZE
- +–––––+–––––+–––––+–––––+ (len=2) |
- |value|/////////////////| TLV#2 value |
- +–––––+/////////////////| |
- |///////////////////////| |
- |///////////////////////| |
- |///////////////////////| |
- |////////unused/////////| |
- |////////space//////////| |
- |///////////////////////| |
- |///////////////////////| |
- |///////////////////////| |
- +–––––––––––––––––––––––+ +–+
-
- fig. 1
-
-TLVs can be nested within the NEST TLV type.
-
-Interrupt credits
-^^^^^^^^^^^^^^^^^
-
-MSI-X vectors used for descriptor ring completions use a credit mechanism for
-efficient device, PCIe bus, OS and driver operations. Each descriptor ring has
-a credit count which represents the number of outstanding descriptors to be
-processed by the driver. As the device marks descriptors complete, the credit
-count is incremented. As the driver processes those outstanding descriptors,
-it returns credits back to the device. This way, the device knows the driver's
-progress and can make decisions about when to fire the next interrupt or not.
-When the credit count is zero, and the first descriptors are posted for the
-driver, a single interrupt is fired. Once the interrupt is fired, the
-interrupt is disabled (auto-masked*). In response to the interrupt, the driver
-will process descriptors and PIO write a returned credit value for that
-descriptor ring. If the driver returns all credits (the driver caught up with
-the device and there is no outstanding work), then the interrupt is unmasked,
-but not fired. If only partial credits are returned, the interrupt remains
-masked but the device generates an interrupt, signaling the driver that more
-outstanding work is available.
-
-(* this masking is unrelated to the MSI-X interrupt mask register)
-
-Endianness
-----------
-
-Device registers are hard-coded to little-endian (LE). The driver should
-convert to/from host endianness to LE for device register accesses.
-
-Descriptors are LE. Descriptor buffer TLVs will have LE type and length
-fields, but the value field can either be LE or network-byte-order, depending
-on context. TLV values containing network packet data will be in network-byte
-order. A TLV value containing a field or mask used to compare against network
-packet data is network-byte order. For example, flow match fields (and masks)
-are network-byte-order since they're matched directly, byte-by-byte, against
-network packet data. All non-network-packet TLV multi-byte values will be LE.
-
-TLV values in network-byte-order are designated with (N).
-
-
-SECTION 5: Test Registers
-=========================
-
-Rocker has several test registers to support troubleshooting register access,
-interrupt generation, and DMA operations:
-
- TEST_REG, offset 0x0010, 32-bit (R/W)
- TEST_REG64, offset 0x0018, 64-bit (R/W)
- TEST_IRQ, offset 0x0020, 32-bit (R/W)
- TEST_DMA_ADDR, offset 0x0028, 64-bit (R/W)
- TEST_DMA_SIZE, offset 0x0030, 32-bit (R/W)
- TEST_DMA_CTRL, offset 0x0034, 32-bit (R/W)
-
-Reads to TEST_REG and TEST_REG64 will read a value equal to twice the last
-value written to the register. The 32-bit and 64-bit versions are for testing
-32-bit and 64-bit host accesses.
-
-A vector can be written to TEST_IRQ and the device will generate an interrupt
-for that vector.
-
-To test basic DMA operations, allocate a DMA-able host buffer and put the
-buffer address into TEST_DMA_ADDR and size into TEST_DMA_SIZE. Then, write to
-TEST_DMA_CTRL to manipulate the buffer contents. TEST_DMA_CTRL operations are:
-
- operation value description
- -----------------------------------------------------------
- TEST_DMA_CTRL_CLEAR 1 clear buffer
- TEST_DMA_CTRL_FILL 2 fill buffer bytes with 0x96
- TEST_DMA_CTRL_INVERT 4 invert bytes in buffer
-
-Various buffer address and sizes should be tested to verify no address boundary
-issue exists. In particular, buffers that start on odd-8-byte boundary and/or
-span multiple PAGE sizes should be tested.
-
-
-SECTION 6: Ports
-================
-
-Physical and Logical Ports
-------------------------------------
-
-The switch supports up to 62 physical (front-panel) ports. Register
-PORT_PHYS_COUNT returns the actual number of physical ports available:
-
- PORT_PHYS_COUNT, offset 0x0304, 32-bit, (R)
-
-In addition to front-panel ports, the switch supports logical ports for
-tunnels.
-
-Front-panel ports and logical tunnel ports are mapped into a single 32-bit port
-space. A special CPU port is assigned port 0. The front-panel ports are
-mapped to ports 1-62. A special loopback port is assigned port 63. Logical
-tunnel ports are assigned ports 0x0001000-0x0001ffff.
-To summarize the port assignments:
-
- port mapping
- -------------------------------------------------------
- 0 CPU port (for packets to/from host CPU)
- 1-62 front-panel physical ports
- 63 loopback port
- 64-0x0000ffff RSVD
- 0x00010000-0x0001ffff logical tunnel ports
- 0x00020000-0xffffffff RSVD
-
-Physical Port Mode
-------------------
-
-Switch front-panel ports operate in a mode. Currently, the only mode is
-OF-DPA. OF-DPA[1] mode is based on OpenFlow Data Plane Abstraction (OF-DPA)
-Abstract Switch Specification, Version 1.0, from Broadcom Corporation. To
-set/get the mode for front-panel ports, see port settings, below.
-
-Port Settings
--------------
-
-Link status for all front-panel ports is available via PORT_PHYS_LINK_STATUS:
-
- PORT_PHYS_LINK_STATUS, offset 0x0310, 64-bit, (R)
-
- Value is port bitmap. Bits 0 and 63 always read 0. Bits 1-62
- read 1 for link UP and 0 for link DOWN for respective front-panel ports.
-
-Other properties for front-panel ports are available via DMA CMD descriptors:
-
- Get PORT_SETTINGS descriptor:
-
- field width description
- ----------------------------------------------
- PORT_SETTINGS 2 CMD_GET
- PPORT 4 Physical port #
-
- Get PORT_SETTINGS completion:
-
- field width description
- ----------------------------------------------
- PPORT 4 Physical port #
- SPEED 4 Current port interface speed, in Mbps
- DUPLEX 1 1 = Full, 0 = Half
- AUTONEG 1 1 = enabled, 0 = disabled
- MACADDR 6 Port MAC address
- MODE 1 0 = OF-DPA
- LEARNING 1 MAC address learning on port
- 1 = enabled
- 0 = disabled
- PHYS_NAME <var> Physical port name (string)
-
- Set PORT_SETTINGS descriptor:
-
- field width description
- ----------------------------------------------
- PORT_SETTINGS 2 CMD_SET
- PPORT 4 Physical port #
- SPEED 4 Port interface speed, in Mbps
- DUPLEX 1 1 = Full, 0 = Half
- AUTONEG 1 1 = enabled, 0 = disabled
- MACADDR 6 Port MAC address
- MODE 1 0 = OF-DPA
-
-Port Enable
------------
-
-Front-panel ports are initially disabled, which means port ingress and egress
-packets will be dropped. To enable or disable a port, use PORT_PHYS_ENABLE:
-
- PORT_PHYS_ENABLE: offset 0x0318, 64-bit, (R/W)
-
- Value is bitmap of first 64 ports. Bits 0 and 63 are ignored
- and always read as 0. Write 1 to enable port; write 0 to disable it.
- Default is 0.
-
-
-SECTION 7: Switch Control
-=========================
-
-This section covers switch-wide register settings.
-
-Control
--------
-
-This register is used for low level control of the switch.
-
- CONTROL: offset 0x0300, 32-bit, (W)
-
- bit name description
- ------------------------------------------------------------------------
- [0] CONTROL_RESET If set, device will perform reset
- [1:31] Reserved
-
-Switch ID
----------
-
-The switch has a SWITCH_ID to be used by software to uniquely identify the
-switch:
-
- SWITCH_ID: offset 0x0320, 64-bit, (R)
-
- Value is opaque to switch software and no special encoding is implied.
-
-
-SECTION 8: Events
-=================
-
-Non-I/O asynchronous events from the device are notified to the host using the
-event ring. The TLV structure for events is:
-
- field width description
- ---------------------------------------------------
- TYPE 4 Event type, one of:
- 1: LINK_CHANGED
- 2: MAC_VLAN_SEEN
- INFO <nest> Event info (details below)
-
-Link Changed Event
-------------------
-
-When link status changes on a physical port, this event is generated.
-
- field width description
- ---------------------------------------------------
- INFO <nest>
- PPORT 4 Physical port
- LINKUP 1 Link status:
- 0: down
- 1: up
-
-MAC VLAN Seen Event
--------------------
-
-When a packet ingresses on a port and the source MAC/VLAN isn't known to the
-device, the device will generate this event. In response to the event, the
-driver should install to the device the MAC/VLAN on the port into the bridge
-table. Once installed, the MAC/VLAN is known on the port and this event will
-no longer be generated.
-
- field width description
- ---------------------------------------------------
- INFO <nest>
- PPORT 4 Physical port
- MAC 6 MAC address
- VLAN 2 VLAN ID
-
-
-SECTION 9: CPU Packet Processing
-================================
-
-Ingress packets directed to the host CPU for further processing are delivered
-in the DMA RX ring. Likewise, host CPU originating packets destined to egress
-on switch ports are scheduled by software using the DMA TX ring.
-
-Tx Packet Processing
---------------------
-
-Software schedules packets for egress on switch ports using the DMA TX ring. A
-TX descriptor buffer describes the packet location and size in host DMA-able
-memory, the destination port, and any hardware-offload functions (such as L3
-payload checksum offload). Software then bumps the descriptor head to signal
-hardware of new Tx work. In response, hardware will DMA read Tx descriptors up
-to head, DMA read descriptor buffer and packet data, perform offloading
-functions, and finally frame packet on wire (network). Once packet processing
-is complete, hardware will writeback status to descriptor(s) to signal to
-software that Tx is complete and software resources (e.g. skb) backing packet
-can be released.
-
-Figure 2 shows an example 3-fragment packet queued with one Tx descriptor. A
-TLV is used for each packet fragment.
-
- pkt frag 1
- +–––––––+ +–+
- +–––+ | |
- desc buf | | | |
- +––––––––+ | | | |
- Tx ring +–––+ +–––––+ | | |
- +–––––––––+ | | TLVs | +–––––––+ |
- | +–––+ +––––––––+ pkt frag 2 |
- | desc 0 | | +–––––+ +–––––––+ |
- +–––––––––+ | TLVs | +–––+ | |
- head+–+ | +––––––––+ | | |
- | desc 1 | | +–––––+ +–––––––+ |pkt
- +–––––––––+ | TLVs | | |
- | | +––––––––+ | pkt frag 3 |
- | | | +–––––––+ |
- +–––––––––+ +–––+ | |
- | | | | |
- | | | | |
- +–––––––––+ | | |
- | | | | |
- | | | | |
- +–––––––––+ | | |
- | | +–––––––+ +–+
- | |
- +–––––––––+
-
- fig 2.
-
-The TLVs for Tx descriptor buffer are:
-
- field width description
- ---------------------------------------------------------------------
- PPORT 4 Destination physical port #
- TX_OFFLOAD 1 Hardware offload modes:
- 0: no offload
- 1: insert IP csum (ipv4 only)
- 2: insert TCP/UDP csum
- 3: L3 csum calc and insert
- into csum offset (TX_L3_CSUM_OFF)
- 16-bit 1's complement csum value.
- IPv4 pseudo-header and IP
- already calculated by OS
- and inserted.
- 4: TSO (TCP Segmentation Offload)
- TX_L3_CSUM_OFF 2 For L3 csum offload mode, the offset,
- from the beginning of the packet,
- of the csum field in the L3 header
- TX_TSO_MSS 2 For TSO offload mode, the
- Maximum Segment Size in bytes
- TX_TSO_HDR_LEN 2 For TSO offload mode, the
- length of ethernet, IP, and
- TCP/UDP headers, including IP
- and TCP options.
- TX_FRAGS <array> Packet fragments
- TX_FRAG <nest> Packet fragment
- TX_FRAG_ADDR 8 DMA address of packet fragment
- TX_FRAG_LEN 2 Packet fragment length
-
-Possible status return codes in descriptor on completion are:
-
- DESC_COMP_ERR reason
- --------------------------------------------------------------------
- 0 OK
- -ROCKER_ENXIO address or data read err on desc buf or packet
- fragment
- -ROCKER_EINVAL bad pport or TSO or csum offloading error
- -ROCKER_ENOMEM no memory for internal staging tx fragment
-
-Rx Packet Processing
---------------------
-
-For packets ingressing on switch ports that are not forwarded by the switch but
-rather directed to the host CPU for further processing are delivered in the DMA
-RX ring. Rx descriptor buffers are allocated by software and placed on the
-ring. Hardware will fill Rx descriptor buffers with packet data, write the
-completion, and signal to software that a new packet is ready. Since Rx packet
-size is not known a-priori, the Rx descriptor buffer must be allocated for
-worst-case packet size. A single Rx descriptor will contain the entire Rx
-packet data in one RX_FRAG. Other Rx TLVs describe and hardware offloads
-performed on the packet, such as checksum validation.
-
-The TLVs for Rx descriptor buffer are:
-
- field width description
- ---------------------------------------------------
- PPORT 4 Source physical port #
- RX_FLAGS 2 Packet parsing flags:
- (1 << 0): IPv4 packet
- (1 << 1): IPv6 packet
- (1 << 2): csum calculated
- (1 << 3): IPv4 csum good
- (1 << 4): IP fragment
- (1 << 5): TCP packet
- (1 << 6): UDP packet
- (1 << 7): TCP/UDP csum good
- (1 << 8): Offload forward
- RX_CSUM 2 IP calculated checksum:
- IPv4: IP payload csum
- IPv6: header and payload csum
- (Only valid is RX_FLAGS:csum calc is set)
- RX_FRAG_ADDR 8 DMA address of packet fragment
- RX_FRAG_MAX_LEN 2 Packet maximum fragment length
- RX_FRAG_LEN 2 Actual packet fragment length after receive
-
-Offload forward RX_FLAG indicates the device has already forwarded the packet
-so the host CPU should not also forward the packet.
-
-Possible status return codes in descriptor on completion are:
-
- DESC_COMP_ERR reason
- --------------------------------------------------------------------
- 0 OK
- -ROCKER_ENXIO address or data read err on desc buf
- -ROCKER_ENOMEM no memory for internal staging desc buf
- -ROCKER_EMSGSIZE Rx descriptor buffer wasn't big enough to contain
- packet data TLV and other TLVs.
-
-
-SECTION 10: OF-DPA Mode
-======================
-
-OF-DPA mode allows the switch to offload flow packet processing functions to
-hardware. An OpenFlow controller would communicate with an OpenFlow agent
-installed on the switch. The OpenFlow agent would (directly or indirectly)
-communicate with the Rocker switch driver, which in turn would program switch
-hardware with flow functionality, as defined in OF-DPA. The block diagram is:
-
- +–––––––––––––––----–––+
- | OF |
- | Remote Controller |
- +––––––––+––----–––––––+
- |
- |
- +––––––––+–––––––––+
- | OF |
- | Local Agent |
- +––––––––––––––––––+
- | |
- | Rocker Driver |
- +––––––––––––––––––+
- <this spec>
- +––––––––––––––––––+
- | |
- | Rocker Switch |
- +––––––––––––––––––+
-
-To participate in flow functions, ports must be configure for OF-DPA mode
-during switch initialization.
-
-OF-DPA Flow Table Interface
----------------------------
-
-There are commands to add, modify, delete, and get stats of flow table entries.
-The commands are issued using the DMA CMD descriptor ring. The following
-commands are defined:
-
- CMD_ADD: add an entry to flow table
- CMD_MOD: modify an entry in flow table
- CMD_DEL: delete an entry from flow table
- CMD_GET_STATS: get stats for flow entry
-
-TLVs for add and modify commands are:
-
- field width description
- ----------------------------------------------------
- OF_DPA_CMD 2 CMD_[ADD|MOD]
- OF_DPA_TBL 2 Flow table ID
- 0: ingress port
- 10: vlan
- 20: termination mac
- 30: unicast routing
- 40: multicast routing
- 50: bridging
- 60: ACL policy
- OF_DPA_PRIORITY 4 Flow priority
- OF_DPA_HARDTIME 4 Hard timeout for flow
- OF_DPA_IDLETIME 4 Idle timeout for flow
- OF_DPA_COOKIE 8 Cookie
-
-Additional TLVs based on flow table ID:
-
-Table ID 0: ingress port
-
- field width description
- ----------------------------------------------------
- OF_DPA_IN_PPORT 4 ingress physical port number
- OF_DPA_GOTO_TBL 2 goto table ID; zero to drop
-
-Table ID 10: vlan
-
- field width description
- ----------------------------------------------------
- OF_DPA_IN_PPORT 4 ingress physical port number
- OF_DPA_VLAN_ID 2 (N) vlan ID
- OF_DPA_VLAN_ID_MASK 2 (N) vlan ID mask
- OF_DPA_GOTO_TBL 2 goto table ID; zero to drop
- OF_DPA_NEW_VLAN_ID 2 (N) new vlan ID
-
-Table ID 20: termination mac
-
- field width description
- ----------------------------------------------------
- OF_DPA_IN_PPORT 4 ingress physical port number
- OF_DPA_IN_PPORT_MASK 4 ingress physical port number mask
- OF_DPA_ETHERTYPE 2 (N) must be either 0x0800 or 0x86dd
- OF_DPA_DST_MAC 6 (N) destination MAC
- OF_DPA_DST_MAC_MASK 6 (N) destination MAC mask
- OF_DPA_VLAN_ID 2 (N) vlan ID
- OF_DPA_VLAN_ID_MASK 2 (N) vlan ID mask
- OF_DPA_GOTO_TBL 2 only acceptable values are
- unicast or multicast routing
- table IDs
- OF_DPA_OUT_PPORT 2 if specified, must be
- controller, set zero otherwise
-
-Table ID 30: unicast routing
-
- field width description
- ----------------------------------------------------
- OF_DPA_ETHERTYPE 2 (N) must be either 0x0800 or 0x86dd
- OF_DPA_DST_IP 4 (N) destination IPv4 address.
- Must be unicast address
- OF_DPA_DST_IP_MASK 4 (N) IP mask. Must be prefix mask
- OF_DPA_DST_IPV6 16 (N) destination IPv6 address.
- Must be unicast address
- OF_DPA_DST_IPV6_MASK 16 (N) IPv6 mask. Must be prefix mask
- OF_DPA_GOTO_TBL 2 goto table ID; zero to drop
- OF_DPA_GROUP_ID 4 data for GROUP action must
- be an L3 Unicast group entry
-
-Table ID 40: multicast routing
-
- field width description
- ----------------------------------------------------
- OF_DPA_ETHERTYPE 2 (N) must be either 0x0800 or 0x86dd
- OF_DPA_VLAN_ID 2 (N) vlan ID
- OF_DPA_SRC_IP 4 (N) source IPv4. Optional,
- can contain IPv4 address,
- must be completely masked
- if not used
- OF_DPA_SRC_IP_MASK 4 (N) IP Mask
- OF_DPA_DST_IP 4 (N) destination IPv4 address.
- Must be multicast address
- OF_DPA_SRC_IPV6 16 (N) source IPv6 Address. Optional.
- Can contain IPv6 address,
- must be completely masked
- if not used
- OF_DPA_SRC_IPV6_MASK 16 (N) IPv6 mask.
- OF_DPA_DST_IPV6 16 (N) destination IPv6 Address. Must
- be multicast address
- Must be multicast address
- OF_DPA_GOTO_TBL 2 goto table ID; zero to drop
- OF_DPA_GROUP_ID 4 data for GROUP action must
- be an L3 multicast group entry
-
-Table ID 50: bridging
-
- field width description
- ----------------------------------------------------
- OF_DPA_VLAN_ID 2 (N) vlan ID
- OF_DPA_TUNNEL_ID 4 tunnel ID
- OF_DPA_DST_MAC 6 (N) destination MAC
- OF_DPA_DST_MAC_MASK 6 (N) destination MAC mask
- OF_DPA_GOTO_TBL 2 goto table ID; zero to drop
- OF_DPA_GROUP_ID 4 data for GROUP action must
- be a L2 Interface, L2
- Multicast, L2 Flood,
- or L2 Overlay group entry
- as appropriate
- OF_DPA_TUNNEL_LPORT 4 unicast Tenant Bridging
- flows specify a tunnel
- logical port ID
- OF_DPA_OUT_PPORT 2 data for OUTPUT action,
- restricted to CONTROLLER,
- set to 0 otherwise
-
-Table ID 60: acl policy
-
- field width description
- ----------------------------------------------------
- OF_DPA_IN_PPORT 4 ingress physical port number
- OF_DPA_IN_PPORT_MASK 4 ingress physical port number mask
- OF_DPA_ETHERTYPE 2 (N) ethertype
- OF_DPA_VLAN_ID 2 (N) vlan ID
- OF_DPA_VLAN_ID_MASK 2 (N) vlan ID mask
- OF_DPA_VLAN_PCP 2 (N) vlan Priority Code Point
- OF_DPA_VLAN_PCP_MASK 2 (N) vlan Priority Code Point mask
- OF_DPA_SRC_MAC 6 (N) source MAC
- OF_DPA_SRC_MAC_MASK 6 (N) source MAC mask
- OF_DPA_DST_MAC 6 (N) destination MAC
- OF_DPA_DST_MAC_MASK 6 (N) destination MAC mask
- OF_DPA_TUNNEL_ID 4 tunnel ID
- OF_DPA_SRC_IP 4 (N) source IPv4. Optional,
- can contain IPv4 address,
- must be completely masked
- if not used
- OF_DPA_SRC_IP_MASK 4 (N) IP Mask
- OF_DPA_DST_IP 4 (N) destination IPv4 address.
- Must be multicast address
- OF_DPA_DST_IP_MASK 4 (N) IP Mask
- OF_DPA_SRC_IPV6 16 (N) source IPv6 Address. Optional.
- Can contain IPv6 address,
- must be completely masked
- if not used
- OF_DPA_SRC_IPV6_MASK 16 (N) IPv6 mask
- OF_DPA_DST_IPV6 16 (N) destination IPv6 Address. Must
- be multicast address.
- OF_DPA_DST_IPV6_MASK 16 (N) IPv6 mask
- OF_DPA_SRC_ARP_IP 4 (N) source IPv4 address in the ARP
- payload. Only used if ethertype
- == 0x0806.
- OF_DPA_SRC_ARP_IP_MASK 4 (N) IP Mask
- OF_DPA_IP_PROTO 1 IP protocol
- OF_DPA_IP_PROTO_MASK 1 IP protocol mask
- OF_DPA_IP_DSCP 1 DSCP
- OF_DPA_IP_DSCP_MASK 1 DSCP mask
- OF_DPA_IP_ECN 1 ECN
- OF_DPA_IP_ECN_MASK 1 ECN mask
- OF_DPA_L4_SRC_PORT 2 (N) L4 source port, only for
- TCP, UDP, or SCTP
- OF_DPA_L4_SRC_PORT_MASK 2 (N) L4 source port mask
- OF_DPA_L4_DST_PORT 2 (N) L4 source port, only for
- TCP, UDP, or SCTP
- OF_DPA_L4_DST_PORT_MASK 2 (N) L4 source port mask
- OF_DPA_ICMP_TYPE 1 ICMP type, only if IP
- protocol is 1
- OF_DPA_ICMP_TYPE_MASK 1 ICMP type mask
- OF_DPA_ICMP_CODE 1 ICMP code
- OF_DPA_ICMP_CODE_MASK 1 ICMP code mask
- OF_DPA_IPV6_LABEL 4 (N) IPv6 flow label
- OF_DPA_IPV6_LABEL_MASK 4 (N) IPv6 flow label mask
- OF_DPA_GROUP_ID 4 data for GROUP action
- OF_DPA_QUEUE_ID_ACTION 1 write the queue ID
- OF_DPA_NEW_QUEUE_ID 1 queue ID
- OF_DPA_VLAN_PCP_ACTION 1 write the VLAN priority
- OF_DPA_NEW_VLAN_PCP 1 VLAN priority
- OF_DPA_IP_DSCP_ACTION 1 write the DSCP
- OF_DPA_NEW_IP_DSCP 1 new DSCP
- OF_DPA_TUNNEL_LPORT 4 restrct to valid tunnel
- logical port, set to 0
- otherwise.
- OF_DPA_OUT_PPORT 2 data for OUTPUT action,
- restricted to CONTROLLER,
- set to 0 otherwise
- OF_DPA_CLEAR_ACTIONS 4 if 1 packets matching flow are
- dropped (all other instructions
- ignored)
-
-TLVs for flow delete and get stats command are:
-
- field width description
- ---------------------------------------------------
- OF_DPA_CMD 2 CMD_[DEL|GET_STATS]
- OF_DPA_COOKIE 8 Cookie
-
-On completion of get stats command, the descriptor buffer is written back with
-the following TLVs:
-
- field width description
- ---------------------------------------------------
- OF_DPA_STAT_DURATION 4 Flow duration
- OF_DPA_STAT_RX_PKTS 8 Received packets
- OF_DPA_STAT_TX_PKTS 8 Transmit packets
-
-Possible status return codes in descriptor on completion are:
-
- DESC_COMP_ERR command reason
- --------------------------------------------------------------------
- 0 all OK
- -ROCKER_EFAULT all head or tail index outside
- of ring
- -ROCKER_ENXIO all address or data read err on
- desc buf
- -ROCKER_EMSGSIZE GET_STATS cmd descriptor buffer wasn't
- big enough to contain write-back
- TLVs
- -ROCKER_EINVAL all invalid parameters passed in
- -ROCKER_EEXIST ADD entry already exists
- -ROCKER_ENOSPC ADD no space left in flow table
- -ROCKER_ENOENT MOD|DEL|GET_STATS cookie invalid
-
-Group Table Interface
----------------------
-
-There are commands to add, modify, delete, and get stats of group table
-entries. The commands are issued using the DMA CMD descriptor ring. The
-following commands are defined:
-
- CMD_ADD: add an entry to group table
- CMD_MOD: modify an entry in group table
- CMD_DEL: delete an entry from group table
- CMD_GET_STATS: get stats for group entry
-
-TLVs for add and modify commands are:
-
- field width description
- -----------------------------------------------------------
- FLOW_GROUP_CMD 2 CMD_[ADD|MOD]
- FLOW_GROUP_ID 2 Flow group ID
- FLOW_GROUP_TYPE 1 Group type:
- 0: L2 interface
- 1: L2 rewrite
- 2: L3 unicast
- 3: L2 multicast
- 4: L2 flood
- 5: L3 interface
- 6: L3 multicast
- 7: L3 ECMP
- 8: L2 overlay
- FLOW_VLAN_ID 2 Vlan ID (types 0, 3, 4, 6)
- FLOW_L2_PORT 2 Port (types 0)
- FLOW_INDEX 4 Index (all types but 0)
- FLOW_OVERLAY_TYPE 1 Overlay sub-type (type 8):
- 0: Flood unicast tunnel
- 1: Flood multicast tunnel
- 2: Multicast unicast tunnel
- 3: Multicast multicast tunnel
- FLOW_GROUP_ACTION nest
- FLOW_GROUP_ID 2 next group ID in chain (all
- types except 0)
- FLOW_OUT_PORT 4 egress port (types 0, 8)
- FLOW_POP_VLAN_TAG 1 strip outer VLAN tag (type 1
- only)
- FLOW_VLAN_ID 2 (types 1, 5)
- FLOW_SRC_MAC 6 (types 1, 2, 5)
- FLOW_DST_MAC 6 (types 1, 2)
-
-TLVs for flow delete and get stats command are:
-
- field width description
- -----------------------------------------------------------
- FLOW_GROUP_CMD 2 CMD_[DEL|GET_STATS]
- FLOW_GROUP_ID 2 Flow group ID
-
-On completion of get stats command, the descriptor buffer is written back with
-the following TLVs:
-
- field width description
- ---------------------------------------------------
- FLOW_GROUP_ID 2 Flow group ID
- FLOW_STAT_DURATION 4 Flow duration
- FLOW_STAT_REF_COUNT 4 Flow reference count
- FLOW_STAT_BUCKET_COUNT 4 Flow bucket count
-
-Possible status return codes in descriptor on completion are:
-
- DESC_COMP_ERR command reason
- --------------------------------------------------------------------
- 0 all OK
- -ROCKER_EFAULT all head or tail index outside
- of ring
- -ROCKER_ENXIO all address or data read err on
- desc buf
- -ROCKER_ENOSPC GET_STATS cmd descriptor buffer wasn't
- big enough to contain write-back
- TLVs
- -ROCKER_EINVAL ADD|MOD invalid parameters passed in
- -ROCKER_EEXIST ADD entry already exists
- -ROCKER_ENOSPC ADD no space left in flow table
- -ROCKER_ENOENT MOD|DEL|GET_STATS group ID invalid
- -ROCKER_EBUSY DEL group reference count non-zero
- -ROCKER_ENODEV ADD next group ID doesn't exist
-
-
-
-References
-==========
-
-[1] OpenFlow Data Plane Abstraction (OF-DPA) Abstract Switch Specification,
-Version 1.0, from Broadcom Corporation, February 21, 2014.
diff --git a/docs/specs/spdm.rst b/docs/specs/spdm.rst
new file mode 100644
index 0000000..f7de080
--- /dev/null
+++ b/docs/specs/spdm.rst
@@ -0,0 +1,134 @@
+======================================================
+QEMU Security Protocols and Data Models (SPDM) Support
+======================================================
+
+SPDM enables authentication, attestation and key exchange to assist in
+providing infrastructure security enablement. It's a standard published
+by the `DMTF`_.
+
+QEMU supports connecting to a SPDM responder implementation. This allows an
+external application to emulate the SPDM responder logic for an SPDM device.
+
+Setting up a SPDM server
+========================
+
+When using QEMU with SPDM devices QEMU will connect to a server which
+implements the SPDM functionality.
+
+SPDM-Utils
+----------
+
+You can use `SPDM Utils`_ to emulate a responder. This is the simplest method.
+
+SPDM-Utils is a Linux applications to manage, test and develop devices
+supporting DMTF Security Protocol and Data Model (SPDM). It is written in Rust
+and utilises libspdm.
+
+To use SPDM-Utils you will need to do the following steps. Details are included
+in the SPDM-Utils README.
+
+ 1. `Build libspdm`_
+ 2. `Build SPDM Utils`_
+ 3. `Run it as a server`_
+
+spdm-emu
+--------
+
+You can use `spdm emu`_ to model the
+SPDM responder.
+
+.. code-block:: shell
+
+ $ cd spdm-emu
+ $ git submodule init; git submodule update --recursive
+ $ mkdir build; cd build
+ $ cmake -DARCH=x64 -DTOOLCHAIN=GCC -DTARGET=Debug -DCRYPTO=openssl ..
+ $ make -j32
+ $ make copy_sample_key # Build certificates, required for SPDM authentication.
+
+It is worth noting that the certificates should be in compliance with
+PCIe r6.1 sec 6.31.3. This means you will need to add the following to
+openssl.cnf
+
+.. code-block::
+
+ subjectAltName = otherName:2.23.147;UTF8:Vendor=1b36:Device=0010:CC=010802:REV=02:SSVID=1af4:SSID=1100
+ 2.23.147 = ASN1:OID:2.23.147
+
+and then manually regenerate some certificates with:
+
+.. code-block:: shell
+
+ $ openssl req -nodes -newkey ec:param.pem -keyout end_responder.key \
+ -out end_responder.req -sha384 -batch \
+ -subj "/CN=DMTF libspdm ECP384 responder cert"
+
+ $ openssl x509 -req -in end_responder.req -out end_responder.cert \
+ -CA inter.cert -CAkey inter.key -sha384 -days 3650 -set_serial 3 \
+ -extensions v3_end -extfile ../openssl.cnf
+
+ $ openssl asn1parse -in end_responder.cert -out end_responder.cert.der
+
+ $ cat ca.cert.der inter.cert.der end_responder.cert.der > bundle_responder.certchain.der
+
+You can use SPDM-Utils instead as it will generate the correct certificates
+automatically.
+
+The responder can then be launched with
+
+.. code-block:: shell
+
+ $ cd bin
+ $ ./spdm_responder_emu --trans PCI_DOE
+
+Connecting an SPDM NVMe device
+==============================
+
+Once a SPDM server is running we can start QEMU and connect to the server.
+
+For an NVMe device first let's setup a block we can use
+
+.. code-block:: shell
+
+ $ cd qemu-spdm/linux/image
+ $ dd if=/dev/zero of=blknvme bs=1M count=2096 # 2GB NNMe Drive
+
+Then you can add this to your QEMU command line:
+
+.. code-block:: shell
+
+ -drive file=blknvme,if=none,id=mynvme,format=raw \
+ -device nvme,drive=mynvme,serial=deadbeef,spdm_port=2323
+
+At which point QEMU will try to connect to the SPDM server.
+
+Note that if using x64-64 you will want to use the q35 machine instead
+of the default. So the entire QEMU command might look like this
+
+.. code-block:: shell
+
+ qemu-system-x86_64 -M q35 \
+ --kernel bzImage \
+ -drive file=rootfs.ext2,if=virtio,format=raw \
+ -append "root=/dev/vda console=ttyS0" \
+ -net none -nographic \
+ -drive file=blknvme,if=none,id=mynvme,format=raw \
+ -device nvme,drive=mynvme,serial=deadbeef,spdm_port=2323
+
+.. _DMTF:
+ https://www.dmtf.org/standards/SPDM
+
+.. _SPDM Utils:
+ https://github.com/westerndigitalcorporation/spdm-utils
+
+.. _spdm emu:
+ https://github.com/dmtf/spdm-emu
+
+.. _Build libspdm:
+ https://github.com/westerndigitalcorporation/spdm-utils?tab=readme-ov-file#build-libspdm
+
+.. _Build SPDM Utils:
+ https://github.com/westerndigitalcorporation/spdm-utils?tab=readme-ov-file#build-the-binary
+
+.. _Run it as a server:
+ https://github.com/westerndigitalcorporation/spdm-utils#qemu-spdm-device-emulation
diff --git a/docs/specs/tpm.rst b/docs/specs/tpm.rst
index 1ad36ad..b630a35 100644
--- a/docs/specs/tpm.rst
+++ b/docs/specs/tpm.rst
@@ -205,8 +205,8 @@ to be used with the passthrough backend or the swtpm backend.
QEMU files related to TPM backends:
- ``backends/tpm.c``
- - ``include/sysemu/tpm.h``
- - ``include/sysemu/tpm_backend.h``
+ - ``include/system/tpm.h``
+ - ``include/system/tpm_backend.h``
The QEMU TPM passthrough device
-------------------------------
@@ -240,7 +240,7 @@ PCRs.
QEMU files related to the TPM passthrough device:
- ``backends/tpm/tpm_passthrough.c``
- ``backends/tpm/tpm_util.c``
- - ``include/sysemu/tpm_util.h``
+ - ``include/system/tpm_util.h``
Command line to start QEMU with the TPM passthrough device using the host's
@@ -301,7 +301,7 @@ command.
QEMU files related to the TPM emulator device:
- ``backends/tpm/tpm_emulator.c``
- ``backends/tpm/tpm_util.c``
- - ``include/sysemu/tpm_util.h``
+ - ``include/system/tpm_util.h``
The following commands start the swtpm with a UnixIO control channel over
a socket interface. They do not need to be run as root.
diff --git a/docs/sphinx-static/theme_overrides.css b/docs/sphinx-static/theme_overrides.css
index 965ecac..b225bf7 100644
--- a/docs/sphinx-static/theme_overrides.css
+++ b/docs/sphinx-static/theme_overrides.css
@@ -18,8 +18,8 @@ h1, h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend {
.rst-content dl:not(.docutils) dt {
border-top: none;
- border-left: solid 3px #ccc;
- background-color: #f0f0f0;
+ border-left: solid 5px #bcc6d2;
+ background-color: #eaedf1;
color: black;
}
@@ -208,3 +208,97 @@ div[class^="highlight"] pre {
color: inherit;
}
}
+
+/* QAPI domain theming */
+
+/* most content in a QAPI object definition should not eclipse about
+ 80ch, but nested field lists are explicitly exempt due to their
+ two-column nature */
+.qapi dd *:not(dl) {
+ max-width: 80ch;
+}
+
+/* but the content column itself should still be less than ~80ch. */
+.qapi .field-list dd {
+ max-width: 80ch;
+}
+
+.qapi-infopips {
+ margin-bottom: 1em;
+}
+
+.qapi-infopip {
+ display: inline-block;
+ padding: 0em 0.5em 0em 0.5em;
+ margin: 0.25em;
+}
+
+.qapi-deprecated,.qapi-unstable {
+ background-color: #fffef5;
+ border: solid #fff176 6px;
+ font-weight: bold;
+ padding: 8px;
+ border-radius: 15px;
+ margin: 5px;
+}
+
+.qapi-unstable::before {
+ content: '🚧 ';
+}
+
+.qapi-deprecated::before {
+ content: 'āš ļø ';
+}
+
+.qapi-ifcond::before {
+ /* gaze ye into the crystal ball to determine feature availability */
+ content: 'šŸ”® ';
+}
+
+.qapi-ifcond {
+ background-color: #f9f5ff;
+ border: solid #dac2ff 6px;
+ padding: 8px;
+ border-radius: 15px;
+ margin: 5px;
+}
+
+/* code blocks */
+.qapi div[class^="highlight"] {
+ width: fit-content;
+ background-color: #fffafd;
+ border: 2px solid #ffe1f3;
+}
+
+/* note, warning, etc. */
+.qapi .admonition {
+ width: fit-content;
+}
+
+/* pad the top of the field-list so the text doesn't start directly at
+ the top border; primarily for the field list labels, but adjust the
+ field bodies as well for parity. */
+dl.field-list > dt:first-of-type, dl.field-list > dd:first-of-type {
+ padding-top: 0.3em;
+}
+
+dl.field-list > dt:last-of-type, dl.field-list > dd:last-of-type {
+ padding-bottom: 0.3em;
+}
+
+/* pad the field list labels so they don't crash into the border */
+dl.field-list > dt {
+ padding-left: 0.5em;
+ padding-right: 0.5em;
+}
+
+/* Add a little padding between field list sections */
+dl.field-list > dd:not(:last-child) {
+ padding-bottom: 1em;
+}
+
+/* Sphinx 3.x: unresolved xrefs */
+.rst-content *:not(a) > code.xref {
+ font-weight: 400;
+ color: #333333;
+}
diff --git a/docs/sphinx/compat.py b/docs/sphinx/compat.py
new file mode 100644
index 0000000..9cf7fe0
--- /dev/null
+++ b/docs/sphinx/compat.py
@@ -0,0 +1,230 @@
+"""
+Sphinx cross-version compatibility goop
+"""
+
+import re
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Optional,
+ Type,
+)
+
+from docutils import nodes
+from docutils.nodes import Element, Node, Text
+from docutils.statemachine import StringList
+
+import sphinx
+from sphinx import addnodes, util
+from sphinx.directives import ObjectDescription
+from sphinx.environment import BuildEnvironment
+from sphinx.roles import XRefRole
+from sphinx.util import docfields
+from sphinx.util.docutils import (
+ ReferenceRole,
+ SphinxDirective,
+ switch_source_input,
+)
+from sphinx.util.typing import TextlikeNode
+
+
+MAKE_XREF_WORKAROUND = sphinx.version_info[:3] < (4, 1, 0)
+
+
+SpaceNode: Callable[[str], Node]
+KeywordNode: Callable[[str, str], Node]
+
+if sphinx.version_info[:3] >= (4, 0, 0):
+ SpaceNode = addnodes.desc_sig_space
+ KeywordNode = addnodes.desc_sig_keyword
+else:
+ SpaceNode = Text
+ KeywordNode = addnodes.desc_annotation
+
+
+def nested_parse_with_titles(
+ directive: SphinxDirective, content_node: Element
+) -> None:
+ """
+ This helper preserves error parsing context across sphinx versions.
+ """
+
+ # necessary so that the child nodes get the right source/line set
+ content_node.document = directive.state.document
+
+ try:
+ # Modern sphinx (6.2.0+) supports proper offsetting for
+ # nested parse error context management
+ util.nodes.nested_parse_with_titles(
+ directive.state,
+ directive.content,
+ content_node,
+ content_offset=directive.content_offset,
+ )
+ except TypeError:
+ # No content_offset argument. Fall back to SSI method.
+ with switch_source_input(directive.state, directive.content):
+ util.nodes.nested_parse_with_titles(
+ directive.state, directive.content, content_node
+ )
+
+
+# ###########################################
+# xref compatibility hacks for Sphinx < 4.1 #
+# ###########################################
+
+# When we require >= Sphinx 4.1, the following function and the
+# subsequent 3 compatibility classes can be removed. Anywhere in
+# qapi_domain that uses one of these Compat* types can be switched to
+# using the garden-variety lib-provided classes with no trickery.
+
+
+def _compat_make_xref( # pylint: disable=unused-argument
+ self: sphinx.util.docfields.Field,
+ rolename: str,
+ domain: str,
+ target: str,
+ innernode: Type[TextlikeNode] = addnodes.literal_emphasis,
+ contnode: Optional[Node] = None,
+ env: Optional[BuildEnvironment] = None,
+ inliner: Any = None,
+ location: Any = None,
+) -> Node:
+ """
+ Compatibility workaround for Sphinx versions prior to 4.1.0.
+
+ Older sphinx versions do not use the domain's XRefRole for parsing
+ and formatting cross-references, so we need to perform this magick
+ ourselves to avoid needing to write the parser/formatter in two
+ separate places.
+
+ This workaround isn't brick-for-brick compatible with modern Sphinx
+ versions, because we do not have access to the parent directive's
+ state during this parsing like we do in more modern versions.
+
+ It's no worse than what pre-Sphinx 4.1.0 does, so... oh well!
+ """
+
+ # Yes, this function is gross. Pre-4.1 support is a miracle.
+ # pylint: disable=too-many-locals
+
+ assert env
+ # Note: Sphinx's own code ignores the type warning here, too.
+ if not rolename:
+ return contnode or innernode(target, target) # type: ignore[call-arg]
+
+ # Get the role instance, but don't *execute it* - we lack the
+ # correct state to do so. Instead, we'll just use its public
+ # methods to do our reference formatting, and emulate the rest.
+ role = env.get_domain(domain).roles[rolename]
+ assert isinstance(role, XRefRole)
+
+ # XRefRole features not supported by this compatibility shim;
+ # these were not supported in Sphinx 3.x either, so nothing of
+ # value is really lost.
+ assert not target.startswith("!")
+ assert not re.match(ReferenceRole.explicit_title_re, target)
+ assert not role.lowercase
+ assert not role.fix_parens
+
+ # Code below based mostly on sphinx.roles.XRefRole; run() and
+ # create_xref_node()
+ options = {
+ "refdoc": env.docname,
+ "refdomain": domain,
+ "reftype": rolename,
+ "refexplicit": False,
+ "refwarn": role.warn_dangling,
+ }
+ refnode = role.nodeclass(target, **options)
+ title, target = role.process_link(env, refnode, False, target, target)
+ refnode["reftarget"] = target
+ classes = ["xref", domain, f"{domain}-{rolename}"]
+ refnode += role.innernodeclass(target, title, classes=classes)
+
+ # This is the very gross part of the hack. Normally,
+ # result_nodes takes a document object to which we would pass
+ # self.inliner.document. Prior to Sphinx 4.1, we don't *have* an
+ # inliner to pass, so we have nothing to pass here. However, the
+ # actual implementation of role.result_nodes in this case
+ # doesn't actually use that argument, so this winds up being
+ # ... fine. Rest easy at night knowing this code only runs under
+ # old versions of Sphinx, so at least it won't change in the
+ # future on us and lead to surprising new failures.
+ # Gross, I know.
+ result_nodes, _messages = role.result_nodes(
+ None, # type: ignore
+ env,
+ refnode,
+ is_ref=True,
+ )
+ return nodes.inline(target, "", *result_nodes)
+
+
+class CompatField(docfields.Field):
+ if MAKE_XREF_WORKAROUND:
+ make_xref = _compat_make_xref
+
+
+class CompatGroupedField(docfields.GroupedField):
+ if MAKE_XREF_WORKAROUND:
+ make_xref = _compat_make_xref
+
+
+class CompatTypedField(docfields.TypedField):
+ if MAKE_XREF_WORKAROUND:
+ make_xref = _compat_make_xref
+
+
+# ################################################################
+# Nested parsing error location fix for Sphinx 5.3.0 < x < 6.2.0 #
+# ################################################################
+
+# When we require Sphinx 4.x, the TYPE_CHECKING hack where we avoid
+# subscripting ObjectDescription at runtime can be removed in favor of
+# just always subscripting the class.
+
+# When we require Sphinx > 6.2.0, the rest of this compatibility hack
+# can be dropped and QAPIObject can just inherit directly from
+# ObjectDescription[Signature].
+
+SOURCE_LOCATION_FIX = (5, 3, 0) <= sphinx.version_info[:3] < (6, 2, 0)
+
+Signature = str
+
+
+if TYPE_CHECKING:
+ _BaseClass = ObjectDescription[Signature]
+else:
+ _BaseClass = ObjectDescription
+
+
+class ParserFix(_BaseClass):
+
+ _temp_content: StringList
+ _temp_offset: int
+ _temp_node: Optional[addnodes.desc_content]
+
+ def before_content(self) -> None:
+ # Work around a sphinx bug and parse the content ourselves.
+ self._temp_content = self.content
+ self._temp_offset = self.content_offset
+ self._temp_node = None
+
+ if SOURCE_LOCATION_FIX:
+ self._temp_node = addnodes.desc_content()
+ self.state.nested_parse(
+ self.content, self.content_offset, self._temp_node
+ )
+ # Sphinx will try to parse the content block itself,
+ # Give it nothingness to parse instead.
+ self.content = StringList()
+ self.content_offset = 0
+
+ def transform_content(self, content_node: addnodes.desc_content) -> None:
+ # Sphinx workaround: Inject our parsed content and restore state.
+ if self._temp_node:
+ content_node += self._temp_node.children
+ self.content = self._temp_content
+ self.content_offset = self._temp_offset
diff --git a/docs/sphinx/depfile.py b/docs/sphinx/depfile.py
index afdcbce..d3c774d 100644
--- a/docs/sphinx/depfile.py
+++ b/docs/sphinx/depfile.py
@@ -19,7 +19,7 @@ __version__ = '1.0'
def get_infiles(env):
for x in env.found_docs:
- yield env.doc2path(x)
+ yield str(env.doc2path(x))
yield from ((os.path.join(env.srcdir, dep)
for dep in env.dependencies[x]))
for mod in sys.modules.values():
@@ -31,6 +31,9 @@ def get_infiles(env):
for path in Path(static_path).rglob('*'):
yield str(path)
+ # also include kdoc script
+ yield str(env.config.kerneldoc_bin[1])
+
def write_depfile(app, exception):
if exception:
diff --git a/docs/sphinx/qapi_domain.py b/docs/sphinx/qapi_domain.py
new file mode 100644
index 0000000..ebc46a7
--- /dev/null
+++ b/docs/sphinx/qapi_domain.py
@@ -0,0 +1,1055 @@
+"""
+QAPI domain extension.
+"""
+
+# The best laid plans of mice and men, ...
+# pylint: disable=too-many-lines
+
+from __future__ import annotations
+
+import re
+import types
+from typing import (
+ TYPE_CHECKING,
+ List,
+ NamedTuple,
+ Tuple,
+ Type,
+ cast,
+)
+
+from docutils import nodes
+from docutils.parsers.rst import directives
+from sphinx import addnodes
+from sphinx.directives import ObjectDescription
+from sphinx.domains import (
+ Domain,
+ Index,
+ IndexEntry,
+ ObjType,
+)
+from sphinx.locale import _, __
+from sphinx.roles import XRefRole
+from sphinx.util import logging
+from sphinx.util.docutils import SphinxDirective
+from sphinx.util.nodes import make_id, make_refnode
+
+from compat import (
+ CompatField,
+ CompatGroupedField,
+ CompatTypedField,
+ KeywordNode,
+ ParserFix,
+ Signature,
+ SpaceNode,
+)
+
+
+if TYPE_CHECKING:
+ from typing import (
+ AbstractSet,
+ Any,
+ Dict,
+ Iterable,
+ Optional,
+ Union,
+ )
+
+ from docutils.nodes import Element, Node
+ from sphinx.addnodes import desc_signature, pending_xref
+ from sphinx.application import Sphinx
+ from sphinx.builders import Builder
+ from sphinx.environment import BuildEnvironment
+ from sphinx.util.typing import OptionSpec
+
+
+logger = logging.getLogger(__name__)
+
+
+def _unpack_field(
+ field: nodes.Node,
+) -> Tuple[nodes.field_name, nodes.field_body]:
+ """
+ docutils helper: unpack a field node in a type-safe manner.
+ """
+ assert isinstance(field, nodes.field)
+ assert len(field.children) == 2
+ assert isinstance(field.children[0], nodes.field_name)
+ assert isinstance(field.children[1], nodes.field_body)
+ return (field.children[0], field.children[1])
+
+
+class ObjectEntry(NamedTuple):
+ docname: str
+ node_id: str
+ objtype: str
+ aliased: bool
+
+
+class QAPIXRefRole(XRefRole):
+
+ def process_link(
+ self,
+ env: BuildEnvironment,
+ refnode: Element,
+ has_explicit_title: bool,
+ title: str,
+ target: str,
+ ) -> tuple[str, str]:
+ refnode["qapi:namespace"] = env.ref_context.get("qapi:namespace")
+ refnode["qapi:module"] = env.ref_context.get("qapi:module")
+
+ # Cross-references that begin with a tilde adjust the title to
+ # only show the reference without a leading module, even if one
+ # was provided. This is a Sphinx-standard syntax; give it
+ # priority over QAPI-specific type markup below.
+ hide_module = False
+ if target.startswith("~"):
+ hide_module = True
+ target = target[1:]
+
+ # Type names that end with "?" are considered optional
+ # arguments and should be documented as such, but it's not
+ # part of the xref itself.
+ if target.endswith("?"):
+ refnode["qapi:optional"] = True
+ target = target[:-1]
+
+ # Type names wrapped in brackets denote lists. strip the
+ # brackets and remember to add them back later.
+ if target.startswith("[") and target.endswith("]"):
+ refnode["qapi:array"] = True
+ target = target[1:-1]
+
+ if has_explicit_title:
+ # Don't mess with the title at all if it was explicitly set.
+ # Explicit title syntax for references is e.g.
+ # :qapi:type:`target <explicit title>`
+ # and this explicit title overrides everything else here.
+ return title, target
+
+ title = target
+ if hide_module:
+ title = target.split(".")[-1]
+
+ return title, target
+
+ def result_nodes(
+ self,
+ document: nodes.document,
+ env: BuildEnvironment,
+ node: Element,
+ is_ref: bool,
+ ) -> Tuple[List[nodes.Node], List[nodes.system_message]]:
+
+ # node here is the pending_xref node (or whatever nodeclass was
+ # configured at XRefRole class instantiation time).
+ results: List[nodes.Node] = [node]
+
+ if node.get("qapi:array"):
+ results.insert(0, nodes.literal("[", "["))
+ results.append(nodes.literal("]", "]"))
+
+ if node.get("qapi:optional"):
+ results.append(nodes.Text(", "))
+ results.append(nodes.emphasis("?", "optional"))
+
+ return results, []
+
+
+class QAPIDescription(ParserFix):
+ """
+ Generic QAPI description.
+
+ This is meant to be an abstract class, not instantiated
+ directly. This class handles the abstract details of indexing, the
+ TOC, and reference targets for QAPI descriptions.
+ """
+
+ def handle_signature(self, sig: str, signode: desc_signature) -> Signature:
+ # pylint: disable=unused-argument
+
+ # Do nothing. The return value here is the "name" of the entity
+ # being documented; for QAPI, this is the same as the
+ # "signature", which is just a name.
+
+ # Normally this method must also populate signode with nodes to
+ # render the signature; here we do nothing instead - the
+ # subclasses will handle this.
+ return sig
+
+ def get_index_text(self, name: Signature) -> Tuple[str, str]:
+ """Return the text for the index entry of the object."""
+
+ # NB: this is used for the global index, not the QAPI index.
+ return ("single", f"{name} (QMP {self.objtype})")
+
+ def _get_context(self) -> Tuple[str, str]:
+ namespace = self.options.get(
+ "namespace", self.env.ref_context.get("qapi:namespace", "")
+ )
+ modname = self.options.get(
+ "module", self.env.ref_context.get("qapi:module", "")
+ )
+
+ return namespace, modname
+
+ def _get_fqn(self, name: Signature) -> str:
+ namespace, modname = self._get_context()
+
+ # If we're documenting a module, don't include the module as
+ # part of the FQN; we ARE the module!
+ if self.objtype == "module":
+ modname = ""
+
+ if modname:
+ name = f"{modname}.{name}"
+ if namespace:
+ name = f"{namespace}:{name}"
+ return name
+
+ def add_target_and_index(
+ self, name: Signature, sig: str, signode: desc_signature
+ ) -> None:
+ # pylint: disable=unused-argument
+
+ # name is the return value of handle_signature.
+ # sig is the original, raw text argument to handle_signature.
+ # For QAPI, these are identical, currently.
+
+ assert self.objtype
+
+ if not (fullname := signode.get("fullname", "")):
+ fullname = self._get_fqn(name)
+
+ node_id = make_id(
+ self.env, self.state.document, self.objtype, fullname
+ )
+ signode["ids"].append(node_id)
+
+ self.state.document.note_explicit_target(signode)
+ domain = cast(QAPIDomain, self.env.get_domain("qapi"))
+ domain.note_object(fullname, self.objtype, node_id, location=signode)
+
+ if "no-index-entry" not in self.options:
+ arity, indextext = self.get_index_text(name)
+ assert self.indexnode is not None
+ if indextext:
+ self.indexnode["entries"].append(
+ (arity, indextext, node_id, "", None)
+ )
+
+ @staticmethod
+ def split_fqn(name: str) -> Tuple[str, str, str]:
+ if ":" in name:
+ ns, name = name.split(":")
+ else:
+ ns = ""
+
+ if "." in name:
+ module, name = name.split(".")
+ else:
+ module = ""
+
+ return (ns, module, name)
+
+ def _object_hierarchy_parts(
+ self, sig_node: desc_signature
+ ) -> Tuple[str, ...]:
+ if "fullname" not in sig_node:
+ return ()
+ return self.split_fqn(sig_node["fullname"])
+
+ def _toc_entry_name(self, sig_node: desc_signature) -> str:
+ # This controls the name in the TOC and on the sidebar.
+
+ # This is the return type of _object_hierarchy_parts().
+ toc_parts = cast(Tuple[str, ...], sig_node.get("_toc_parts", ()))
+ if not toc_parts:
+ return ""
+
+ config = self.env.app.config
+ namespace, modname, name = toc_parts
+
+ if config.toc_object_entries_show_parents == "domain":
+ ret = name
+ if modname and modname != self.env.ref_context.get(
+ "qapi:module", ""
+ ):
+ ret = f"{modname}.{name}"
+ if namespace and namespace != self.env.ref_context.get(
+ "qapi:namespace", ""
+ ):
+ ret = f"{namespace}:{ret}"
+ return ret
+ if config.toc_object_entries_show_parents == "hide":
+ return name
+ if config.toc_object_entries_show_parents == "all":
+ return sig_node.get("fullname", name)
+ return ""
+
+
+class QAPIObject(QAPIDescription):
+ """
+ Description of a generic QAPI object.
+
+ It's not used directly, but is instead subclassed by specific directives.
+ """
+
+ # Inherit some standard options from Sphinx's ObjectDescription
+ option_spec: OptionSpec = ( # type:ignore[misc]
+ ObjectDescription.option_spec.copy()
+ )
+ option_spec.update(
+ {
+ # Context overrides:
+ "namespace": directives.unchanged,
+ "module": directives.unchanged,
+ # These are QAPI originals:
+ "since": directives.unchanged,
+ "ifcond": directives.unchanged,
+ "deprecated": directives.flag,
+ "unstable": directives.flag,
+ }
+ )
+
+ doc_field_types = [
+ # :feat name: descr
+ CompatGroupedField(
+ "feature",
+ label=_("Features"),
+ names=("feat",),
+ can_collapse=False,
+ ),
+ ]
+
+ def get_signature_prefix(self) -> List[nodes.Node]:
+ """Return a prefix to put before the object name in the signature."""
+ assert self.objtype
+ return [
+ KeywordNode("", self.objtype.title()),
+ SpaceNode(" "),
+ ]
+
+ def get_signature_suffix(self) -> List[nodes.Node]:
+ """Return a suffix to put after the object name in the signature."""
+ ret: List[nodes.Node] = []
+
+ if "since" in self.options:
+ ret += [
+ SpaceNode(" "),
+ addnodes.desc_sig_element(
+ "", f"(Since: {self.options['since']})"
+ ),
+ ]
+
+ return ret
+
+ def handle_signature(self, sig: str, signode: desc_signature) -> Signature:
+ """
+ Transform a QAPI definition name into RST nodes.
+
+ This method was originally intended for handling function
+ signatures. In the QAPI domain, however, we only pass the
+ definition name as the directive argument and handle everything
+ else in the content body with field lists.
+
+ As such, the only argument here is "sig", which is just the QAPI
+ definition name.
+ """
+ # No module or domain info allowed in the signature!
+ assert ":" not in sig
+ assert "." not in sig
+
+ namespace, modname = self._get_context()
+ signode["fullname"] = self._get_fqn(sig)
+ signode["namespace"] = namespace
+ signode["module"] = modname
+
+ sig_prefix = self.get_signature_prefix()
+ if sig_prefix:
+ signode += addnodes.desc_annotation(
+ str(sig_prefix), "", *sig_prefix
+ )
+ signode += addnodes.desc_name(sig, sig)
+ signode += self.get_signature_suffix()
+
+ return sig
+
+ def _add_infopips(self, contentnode: addnodes.desc_content) -> None:
+ # Add various eye-catches and things that go below the signature
+ # bar, but precede the user-defined content.
+ infopips = nodes.container()
+ infopips.attributes["classes"].append("qapi-infopips")
+
+ def _add_pip(
+ source: str, content: Union[str, List[nodes.Node]], classname: str
+ ) -> None:
+ node = nodes.container(source)
+ if isinstance(content, str):
+ node.append(nodes.Text(content))
+ else:
+ node.extend(content)
+ node.attributes["classes"].extend(["qapi-infopip", classname])
+ infopips.append(node)
+
+ if "deprecated" in self.options:
+ _add_pip(
+ ":deprecated:",
+ f"This {self.objtype} is deprecated.",
+ "qapi-deprecated",
+ )
+
+ if "unstable" in self.options:
+ _add_pip(
+ ":unstable:",
+ f"This {self.objtype} is unstable/experimental.",
+ "qapi-unstable",
+ )
+
+ if self.options.get("ifcond", ""):
+ ifcond = self.options["ifcond"]
+ _add_pip(
+ f":ifcond: {ifcond}",
+ [
+ nodes.emphasis("", "Availability"),
+ nodes.Text(": "),
+ nodes.literal(ifcond, ifcond),
+ ],
+ "qapi-ifcond",
+ )
+
+ if infopips.children:
+ contentnode.insert(0, infopips)
+
+ def _validate_field(self, field: nodes.field) -> None:
+ """Validate field lists in this QAPI Object Description."""
+ name, _ = _unpack_field(field)
+ allowed_fields = set(self.env.app.config.qapi_allowed_fields)
+
+ field_label = name.astext()
+ if field_label in allowed_fields:
+ # Explicitly allowed field list name, OK.
+ return
+
+ try:
+ # split into field type and argument (if provided)
+ # e.g. `:arg type name: descr` is
+ # field_type = "arg", field_arg = "type name".
+ field_type, field_arg = field_label.split(None, 1)
+ except ValueError:
+ # No arguments provided
+ field_type = field_label
+ field_arg = ""
+
+ typemap = self.get_field_type_map()
+ if field_type in typemap:
+ # This is a special docfield, yet-to-be-processed. Catch
+ # correct names, but incorrect arguments. This mismatch WILL
+ # cause Sphinx to render this field incorrectly (without a
+ # warning), which is never what we want.
+ typedesc = typemap[field_type][0]
+ if typedesc.has_arg != bool(field_arg):
+ msg = f"docfield field list type {field_type!r} "
+ if typedesc.has_arg:
+ msg += "requires an argument."
+ else:
+ msg += "takes no arguments."
+ logger.warning(msg, location=field)
+ else:
+ # This is unrecognized entirely. It's valid rST to use
+ # arbitrary fields, but let's ensure the documentation
+ # writer has done this intentionally.
+ valid = ", ".join(sorted(set(typemap) | allowed_fields))
+ msg = (
+ f"Unrecognized field list name {field_label!r}.\n"
+ f"Valid fields for qapi:{self.objtype} are: {valid}\n"
+ "\n"
+ "If this usage is intentional, please add it to "
+ "'qapi_allowed_fields' in docs/conf.py."
+ )
+ logger.warning(msg, location=field)
+
+ def transform_content(self, content_node: addnodes.desc_content) -> None:
+ # This hook runs after before_content and the nested parse, but
+ # before the DocFieldTransformer is executed.
+ super().transform_content(content_node)
+
+ self._add_infopips(content_node)
+
+ # Validate field lists.
+ for child in content_node:
+ if isinstance(child, nodes.field_list):
+ for field in child.children:
+ assert isinstance(field, nodes.field)
+ self._validate_field(field)
+
+
+class SpecialTypedField(CompatTypedField):
+ def make_field(self, *args: Any, **kwargs: Any) -> nodes.field:
+ ret = super().make_field(*args, **kwargs)
+
+ # Look for the characteristic " -- " text node that Sphinx
+ # inserts for each TypedField entry ...
+ for node in ret.traverse(lambda n: str(n) == " -- "):
+ par = node.parent
+ if par.children[0].astext() != "q_dummy":
+ continue
+
+ # If the first node's text is q_dummy, this is a dummy
+ # field we want to strip down to just its contents.
+ del par.children[:-1]
+
+ return ret
+
+
+class QAPICommand(QAPIObject):
+ """Description of a QAPI Command."""
+
+ doc_field_types = QAPIObject.doc_field_types.copy()
+ doc_field_types.extend(
+ [
+ # :arg TypeName ArgName: descr
+ SpecialTypedField(
+ "argument",
+ label=_("Arguments"),
+ names=("arg",),
+ typerolename="type",
+ can_collapse=False,
+ ),
+ # :error: descr
+ CompatField(
+ "error",
+ label=_("Errors"),
+ names=("error", "errors"),
+ has_arg=False,
+ ),
+ # :return TypeName: descr
+ CompatGroupedField(
+ "returnvalue",
+ label=_("Return"),
+ rolename="type",
+ names=("return",),
+ can_collapse=True,
+ ),
+ ]
+ )
+
+
+class QAPIEnum(QAPIObject):
+ """Description of a QAPI Enum."""
+
+ doc_field_types = QAPIObject.doc_field_types.copy()
+ doc_field_types.extend(
+ [
+ # :value name: descr
+ CompatGroupedField(
+ "value",
+ label=_("Values"),
+ names=("value",),
+ can_collapse=False,
+ )
+ ]
+ )
+
+
+class QAPIAlternate(QAPIObject):
+ """Description of a QAPI Alternate."""
+
+ doc_field_types = QAPIObject.doc_field_types.copy()
+ doc_field_types.extend(
+ [
+ # :alt type name: descr
+ CompatTypedField(
+ "alternative",
+ label=_("Alternatives"),
+ names=("alt",),
+ typerolename="type",
+ can_collapse=False,
+ ),
+ ]
+ )
+
+
+class QAPIObjectWithMembers(QAPIObject):
+ """Base class for Events/Structs/Unions"""
+
+ doc_field_types = QAPIObject.doc_field_types.copy()
+ doc_field_types.extend(
+ [
+ # :member type name: descr
+ SpecialTypedField(
+ "member",
+ label=_("Members"),
+ names=("memb",),
+ typerolename="type",
+ can_collapse=False,
+ ),
+ ]
+ )
+
+
+class QAPIEvent(QAPIObjectWithMembers):
+ # pylint: disable=too-many-ancestors
+ """Description of a QAPI Event."""
+
+
+class QAPIJSONObject(QAPIObjectWithMembers):
+ # pylint: disable=too-many-ancestors
+ """Description of a QAPI Object: structs and unions."""
+
+
+class QAPIModule(QAPIDescription):
+ """
+ Directive to mark description of a new module.
+
+ This directive doesn't generate any special formatting, and is just
+ a pass-through for the content body. Named section titles are
+ allowed in the content body.
+
+ Use this directive to create entries for the QAPI module in the
+ global index and the QAPI index; as well as to associate subsequent
+ definitions with the module they are defined in for purposes of
+ search and QAPI index organization.
+
+ :arg: The name of the module.
+ :opt no-index: Don't add cross-reference targets or index entries.
+ :opt no-typesetting: Don't render the content body (but preserve any
+ cross-reference target IDs in the squelched output.)
+
+ Example::
+
+ .. qapi:module:: block-core
+ :no-index:
+ :no-typesetting:
+
+ Lorem ipsum, dolor sit amet ...
+ """
+
+ def run(self) -> List[Node]:
+ modname = self.arguments[0].strip()
+ self.env.ref_context["qapi:module"] = modname
+ ret = super().run()
+
+ # ObjectDescription always creates a visible signature bar. We
+ # want module items to be "invisible", however.
+
+ # Extract the content body of the directive:
+ assert isinstance(ret[-1], addnodes.desc)
+ desc_node = ret.pop(-1)
+ assert isinstance(desc_node.children[1], addnodes.desc_content)
+ ret.extend(desc_node.children[1].children)
+
+ # Re-home node_ids so anchor refs still work:
+ node_ids: List[str]
+ if node_ids := [
+ node_id
+ for el in desc_node.children[0].traverse(nodes.Element)
+ for node_id in cast(List[str], el.get("ids", ()))
+ ]:
+ target_node = nodes.target(ids=node_ids)
+ ret.insert(1, target_node)
+
+ return ret
+
+
+class QAPINamespace(SphinxDirective):
+ has_content = False
+ required_arguments = 1
+
+ def run(self) -> List[Node]:
+ namespace = self.arguments[0].strip()
+ self.env.ref_context["qapi:namespace"] = namespace
+
+ return []
+
+
+class QAPIIndex(Index):
+ """
+ Index subclass to provide the QAPI definition index.
+ """
+
+ # pylint: disable=too-few-public-methods
+
+ name = "index"
+ localname = _("QAPI Index")
+ shortname = _("QAPI Index")
+ namespace = ""
+
+ def generate(
+ self,
+ docnames: Optional[Iterable[str]] = None,
+ ) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]:
+ assert isinstance(self.domain, QAPIDomain)
+ content: Dict[str, List[IndexEntry]] = {}
+ collapse = False
+
+ for objname, obj in self.domain.objects.items():
+ if docnames and obj.docname not in docnames:
+ continue
+
+ ns, _mod, name = QAPIDescription.split_fqn(objname)
+
+ if self.namespace != ns:
+ continue
+
+ # Add an alphabetical entry:
+ entries = content.setdefault(name[0].upper(), [])
+ entries.append(
+ IndexEntry(
+ name, 0, obj.docname, obj.node_id, obj.objtype, "", ""
+ )
+ )
+
+ # Add a categorical entry:
+ category = obj.objtype.title() + "s"
+ entries = content.setdefault(category, [])
+ entries.append(
+ IndexEntry(name, 0, obj.docname, obj.node_id, "", "", "")
+ )
+
+ # Sort entries within each category alphabetically
+ for category in content:
+ content[category] = sorted(content[category])
+
+ # Sort the categories themselves; type names first, ABC entries last.
+ sorted_content = sorted(
+ content.items(),
+ key=lambda x: (len(x[0]) == 1, x[0]),
+ )
+ return sorted_content, collapse
+
+
+class QAPIDomain(Domain):
+ """QAPI language domain."""
+
+ name = "qapi"
+ label = "QAPI"
+
+ # This table associates cross-reference object types (key) with an
+ # ObjType instance, which defines the valid cross-reference roles
+ # for each object type.
+ #
+ # e.g., the :qapi:type: cross-reference role can refer to enum,
+ # struct, union, or alternate objects; but :qapi:obj: can refer to
+ # anything. Each object also gets its own targeted cross-reference role.
+ object_types: Dict[str, ObjType] = {
+ "module": ObjType(_("module"), "mod", "any"),
+ "command": ObjType(_("command"), "cmd", "any"),
+ "event": ObjType(_("event"), "event", "any"),
+ "enum": ObjType(_("enum"), "enum", "type", "any"),
+ "object": ObjType(_("object"), "obj", "type", "any"),
+ "alternate": ObjType(_("alternate"), "alt", "type", "any"),
+ }
+
+ # Each of these provides a rST directive,
+ # e.g. .. qapi:module:: block-core
+ directives = {
+ "namespace": QAPINamespace,
+ "module": QAPIModule,
+ "command": QAPICommand,
+ "event": QAPIEvent,
+ "enum": QAPIEnum,
+ "object": QAPIJSONObject,
+ "alternate": QAPIAlternate,
+ }
+
+ # These are all cross-reference roles; e.g.
+ # :qapi:cmd:`query-block`. The keys correlate to the names used in
+ # the object_types table values above.
+ roles = {
+ "mod": QAPIXRefRole(),
+ "cmd": QAPIXRefRole(),
+ "event": QAPIXRefRole(),
+ "enum": QAPIXRefRole(),
+ "obj": QAPIXRefRole(), # specifically structs and unions.
+ "alt": QAPIXRefRole(),
+ # reference any data type (excludes modules, commands, events)
+ "type": QAPIXRefRole(),
+ "any": QAPIXRefRole(), # reference *any* type of QAPI object.
+ }
+
+ # Moved into the data property at runtime;
+ # this is the internal index of reference-able objects.
+ initial_data: Dict[str, Dict[str, Tuple[Any]]] = {
+ "objects": {}, # fullname -> ObjectEntry
+ }
+
+ # Index pages to generate; each entry is an Index class.
+ indices = [
+ QAPIIndex,
+ ]
+
+ @property
+ def objects(self) -> Dict[str, ObjectEntry]:
+ ret = self.data.setdefault("objects", {})
+ return ret # type: ignore[no-any-return]
+
+ def setup(self) -> None:
+ namespaces = set(self.env.app.config.qapi_namespaces)
+ for namespace in namespaces:
+ new_index: Type[QAPIIndex] = types.new_class(
+ f"{namespace}Index", bases=(QAPIIndex,)
+ )
+ new_index.name = f"{namespace.lower()}-index"
+ new_index.localname = _(f"{namespace} Index")
+ new_index.shortname = _(f"{namespace} Index")
+ new_index.namespace = namespace
+
+ self.indices.append(new_index)
+
+ super().setup()
+
+ def note_object(
+ self,
+ name: str,
+ objtype: str,
+ node_id: str,
+ aliased: bool = False,
+ location: Any = None,
+ ) -> None:
+ """Note a QAPI object for cross reference."""
+ if name in self.objects:
+ other = self.objects[name]
+ if other.aliased and aliased is False:
+ # The original definition found. Override it!
+ pass
+ elif other.aliased is False and aliased:
+ # The original definition is already registered.
+ return
+ else:
+ # duplicated
+ logger.warning(
+ __(
+ "duplicate object description of %s, "
+ "other instance in %s, use :no-index: for one of them"
+ ),
+ name,
+ other.docname,
+ location=location,
+ )
+ self.objects[name] = ObjectEntry(
+ self.env.docname, node_id, objtype, aliased
+ )
+
+ def clear_doc(self, docname: str) -> None:
+ for fullname, obj in list(self.objects.items()):
+ if obj.docname == docname:
+ del self.objects[fullname]
+
+ def merge_domaindata(
+ self, docnames: AbstractSet[str], otherdata: Dict[str, Any]
+ ) -> None:
+ for fullname, obj in otherdata["objects"].items():
+ if obj.docname in docnames:
+ # Sphinx's own python domain doesn't appear to bother to
+ # check for collisions. Assert they don't happen and
+ # we'll fix it if/when the case arises.
+ assert fullname not in self.objects, (
+ "bug - collision on merge?"
+ f" {fullname=} {obj=} {self.objects[fullname]=}"
+ )
+ self.objects[fullname] = obj
+
+ def find_obj(
+ self, namespace: str, modname: str, name: str, typ: Optional[str]
+ ) -> List[Tuple[str, ObjectEntry]]:
+ """
+ Find a QAPI object for "name", maybe using contextual information.
+
+ Returns a list of (name, object entry) tuples.
+
+ :param namespace: The current namespace context (if any!) under
+ which we are searching.
+ :param modname: The current module context (if any!) under
+ which we are searching.
+ :param name: The name of the x-ref to resolve; may or may not
+ include leading context.
+ :param type: The role name of the x-ref we're resolving, if
+ provided. This is absent for "any" role lookups.
+ """
+ if not name:
+ return []
+
+ # ##
+ # what to search for
+ # ##
+
+ parts = list(QAPIDescription.split_fqn(name))
+ explicit = tuple(bool(x) for x in parts)
+
+ # Fill in the blanks where possible:
+ if namespace and not parts[0]:
+ parts[0] = namespace
+ if modname and not parts[1]:
+ parts[1] = modname
+
+ implicit_fqn = ""
+ if all(parts):
+ implicit_fqn = f"{parts[0]}:{parts[1]}.{parts[2]}"
+
+ if typ is None:
+ # :any: lookup, search everything:
+ objtypes: List[str] = list(self.object_types)
+ else:
+ # type is specified and will be a role (e.g. obj, mod, cmd)
+ # convert this to eligible object types (e.g. command, module)
+ # using the QAPIDomain.object_types table.
+ objtypes = self.objtypes_for_role(typ, [])
+
+ # ##
+ # search!
+ # ##
+
+ def _search(needle: str) -> List[str]:
+ if (
+ needle
+ and needle in self.objects
+ and self.objects[needle].objtype in objtypes
+ ):
+ return [needle]
+ return []
+
+ if found := _search(name):
+ # Exact match!
+ pass
+ elif found := _search(implicit_fqn):
+ # Exact match using contextual information to fill in the gaps.
+ pass
+ else:
+ # No exact hits, perform applicable fuzzy searches.
+ searches = []
+
+ esc = tuple(re.escape(s) for s in parts)
+
+ # Try searching for ns:*.name or ns:name
+ if explicit[0] and not explicit[1]:
+ searches.append(f"^{esc[0]}:([^\\.]+\\.)?{esc[2]}$")
+ # Try searching for *:module.name or module.name
+ if explicit[1] and not explicit[0]:
+ searches.append(f"(^|:){esc[1]}\\.{esc[2]}$")
+ # Try searching for context-ns:*.name or context-ns:name
+ if parts[0] and not (explicit[0] or explicit[1]):
+ searches.append(f"^{esc[0]}:([^\\.]+\\.)?{esc[2]}$")
+ # Try searching for *:context-mod.name or context-mod.name
+ if parts[1] and not (explicit[0] or explicit[1]):
+ searches.append(f"(^|:){esc[1]}\\.{esc[2]}$")
+ # Try searching for *:name, *.name, or name
+ if not (explicit[0] or explicit[1]):
+ searches.append(f"(^|:|\\.){esc[2]}$")
+
+ for search in searches:
+ if found := [
+ oname
+ for oname in self.objects
+ if re.search(search, oname)
+ and self.objects[oname].objtype in objtypes
+ ]:
+ break
+
+ matches = [(oname, self.objects[oname]) for oname in found]
+ if len(matches) > 1:
+ matches = [m for m in matches if not m[1].aliased]
+ return matches
+
+ def resolve_xref(
+ self,
+ env: BuildEnvironment,
+ fromdocname: str,
+ builder: Builder,
+ typ: str,
+ target: str,
+ node: pending_xref,
+ contnode: Element,
+ ) -> nodes.reference | None:
+ namespace = node.get("qapi:namespace")
+ modname = node.get("qapi:module")
+ matches = self.find_obj(namespace, modname, target, typ)
+
+ if not matches:
+ # Normally, we could pass warn_dangling=True to QAPIXRefRole(),
+ # but that will trigger on references to these built-in types,
+ # which we'd like to ignore instead.
+
+ # Take care of that warning here instead, so long as the
+ # reference isn't to one of our built-in core types.
+ if target not in (
+ "string",
+ "number",
+ "int",
+ "boolean",
+ "null",
+ "value",
+ "q_empty",
+ ):
+ logger.warning(
+ __("qapi:%s reference target not found: %r"),
+ typ,
+ target,
+ type="ref",
+ subtype="qapi",
+ location=node,
+ )
+ return None
+
+ if len(matches) > 1:
+ logger.warning(
+ __("more than one target found for cross-reference %r: %s"),
+ target,
+ ", ".join(match[0] for match in matches),
+ type="ref",
+ subtype="qapi",
+ location=node,
+ )
+
+ name, obj = matches[0]
+ return make_refnode(
+ builder, fromdocname, obj.docname, obj.node_id, contnode, name
+ )
+
+ def resolve_any_xref(
+ self,
+ env: BuildEnvironment,
+ fromdocname: str,
+ builder: Builder,
+ target: str,
+ node: pending_xref,
+ contnode: Element,
+ ) -> List[Tuple[str, nodes.reference]]:
+ results: List[Tuple[str, nodes.reference]] = []
+ matches = self.find_obj(
+ node.get("qapi:namespace"), node.get("qapi:module"), target, None
+ )
+ for name, obj in matches:
+ rolename = self.role_for_objtype(obj.objtype)
+ assert rolename is not None
+ role = f"qapi:{rolename}"
+ refnode = make_refnode(
+ builder, fromdocname, obj.docname, obj.node_id, contnode, name
+ )
+ results.append((role, refnode))
+ return results
+
+
+def setup(app: Sphinx) -> Dict[str, Any]:
+ app.setup_extension("sphinx.directives")
+ app.add_config_value(
+ "qapi_allowed_fields",
+ set(),
+ "env", # Setting impacts parsing phase
+ types=set,
+ )
+ app.add_config_value(
+ "qapi_namespaces",
+ set(),
+ "env",
+ types=set,
+ )
+ app.add_domain(QAPIDomain)
+
+ return {
+ "version": "1.0",
+ "env_version": 1,
+ "parallel_read_safe": True,
+ "parallel_write_safe": True,
+ }
diff --git a/docs/sphinx/qapidoc.py b/docs/sphinx/qapidoc.py
index 738b245..8011ac9 100644
--- a/docs/sphinx/qapidoc.py
+++ b/docs/sphinx/qapidoc.py
@@ -2,6 +2,7 @@
#
# QEMU qapidoc QAPI file parsing extension
#
+# Copyright (c) 2024-2025 Red Hat
# Copyright (c) 2020 Linaro
#
# This work is licensed under the terms of the GNU GPLv2 or later.
@@ -24,446 +25,438 @@ The Sphinx documentation on writing extensions is at:
https://www.sphinx-doc.org/en/master/development/index.html
"""
+from __future__ import annotations
+
+
+__version__ = "2.0"
+
+from contextlib import contextmanager
import os
+from pathlib import Path
import re
import sys
-import textwrap
-from typing import List
+from typing import TYPE_CHECKING
from docutils import nodes
-from docutils.parsers.rst import Directive, directives
-from docutils.statemachine import ViewList
-from qapi.error import QAPIError, QAPISemError
-from qapi.gen import QAPISchemaVisitor
-from qapi.schema import QAPISchema
-
+from docutils.parsers.rst import directives
+from docutils.statemachine import StringList
+from qapi.error import QAPIError
+from qapi.parser import QAPIDoc
+from qapi.schema import (
+ QAPISchema,
+ QAPISchemaArrayType,
+ QAPISchemaCommand,
+ QAPISchemaDefinition,
+ QAPISchemaEnumMember,
+ QAPISchemaEvent,
+ QAPISchemaFeature,
+ QAPISchemaMember,
+ QAPISchemaObjectType,
+ QAPISchemaObjectTypeMember,
+ QAPISchemaType,
+ QAPISchemaVisitor,
+)
+from qapi.source import QAPISourceInfo
from sphinx import addnodes
from sphinx.directives.code import CodeBlock
from sphinx.errors import ExtensionError
-from sphinx.util.docutils import switch_source_input
+from sphinx.util import logging
+from sphinx.util.docutils import SphinxDirective, switch_source_input
from sphinx.util.nodes import nested_parse_with_titles
+from qapidoc_legacy import QAPISchemaGenRSTVisitor # type: ignore
-__version__ = "1.0"
+if TYPE_CHECKING:
+ from typing import (
+ Any,
+ Generator,
+ List,
+ Optional,
+ Sequence,
+ Union,
+ )
-def dedent(text: str) -> str:
- # Adjust indentation to make description text parse as paragraph.
+ from sphinx.application import Sphinx
+ from sphinx.util.typing import ExtensionMetadata
- lines = text.splitlines(True)
- if re.match(r"\s+", lines[0]):
- # First line is indented; description started on the line after
- # the name. dedent the whole block.
- return textwrap.dedent(text)
- # Descr started on same line. Dedent line 2+.
- return lines[0] + textwrap.dedent("".join(lines[1:]))
+logger = logging.getLogger(__name__)
-# Disable black auto-formatter until re-enabled:
-# fmt: off
+class Transmogrifier:
+ # pylint: disable=too-many-public-methods
+ # Field names used for different entity types:
+ field_types = {
+ "enum": "value",
+ "struct": "memb",
+ "union": "memb",
+ "event": "memb",
+ "command": "arg",
+ "alternate": "alt",
+ }
-class QAPISchemaGenRSTVisitor(QAPISchemaVisitor):
- """A QAPI schema visitor which generates docutils/Sphinx nodes
-
- This class builds up a tree of docutils/Sphinx nodes corresponding
- to documentation for the various QAPI objects. To use it, first
- create a QAPISchemaGenRSTVisitor object, and call its
- visit_begin() method. Then you can call one of the two methods
- 'freeform' (to add documentation for a freeform documentation
- chunk) or 'symbol' (to add documentation for a QAPI symbol). These
- will cause the visitor to build up the tree of document
- nodes. Once you've added all the documentation via 'freeform' and
- 'symbol' method calls, you can call 'get_document_nodes' to get
- the final list of document nodes (in a form suitable for returning
- from a Sphinx directive's 'run' method).
- """
- def __init__(self, sphinx_directive):
- self._cur_doc = None
- self._sphinx_directive = sphinx_directive
- self._top_node = nodes.section()
- self._active_headings = [self._top_node]
-
- def _make_dlitem(self, term, defn):
- """Return a dlitem node with the specified term and definition.
-
- term should be a list of Text and literal nodes.
- defn should be one of:
- - a string, which will be handed to _parse_text_into_node
- - a list of Text and literal nodes, which will be put into
- a paragraph node
- """
- dlitem = nodes.definition_list_item()
- dlterm = nodes.term('', '', *term)
- dlitem += dlterm
- if defn:
- dldef = nodes.definition()
- if isinstance(defn, list):
- dldef += nodes.paragraph('', '', *defn)
- else:
- self._parse_text_into_node(defn, dldef)
- dlitem += dldef
- return dlitem
-
- def _make_section(self, title):
- """Return a section node with optional title"""
- section = nodes.section(ids=[self._sphinx_directive.new_serialno()])
- if title:
- section += nodes.title(title, title)
- return section
-
- def _nodes_for_ifcond(self, ifcond, with_if=True):
- """Return list of Text, literal nodes for the ifcond
-
- Return a list which gives text like ' (If: condition)'.
- If with_if is False, we don't return the "(If: " and ")".
- """
-
- doc = ifcond.docgen()
- if not doc:
- return []
- doc = nodes.literal('', doc)
- if not with_if:
- return [doc]
+ def __init__(self) -> None:
+ self._curr_ent: Optional[QAPISchemaDefinition] = None
+ self._result = StringList()
+ self.indent = 0
- nodelist = [nodes.Text(' ('), nodes.strong('', 'If: ')]
- nodelist.append(doc)
- nodelist.append(nodes.Text(')'))
- return nodelist
+ @property
+ def result(self) -> StringList:
+ return self._result
- def _nodes_for_one_member(self, member):
- """Return list of Text, literal nodes for this member
+ @property
+ def entity(self) -> QAPISchemaDefinition:
+ assert self._curr_ent is not None
+ return self._curr_ent
- Return a list of doctree nodes which give text like
- 'name: type (optional) (If: ...)' suitable for use as the
- 'term' part of a definition list item.
- """
- term = [nodes.literal('', member.name)]
- if member.type.doc_type():
- term.append(nodes.Text(': '))
- term.append(nodes.literal('', member.type.doc_type()))
- if member.optional:
- term.append(nodes.Text(' (optional)'))
- if member.ifcond.is_present():
- term.extend(self._nodes_for_ifcond(member.ifcond))
- return term
-
- def _nodes_for_variant_when(self, branches, variant):
- """Return list of Text, literal nodes for variant 'when' clause
-
- Return a list of doctree nodes which give text like
- 'when tagname is variant (If: ...)' suitable for use in
- the 'branches' part of a definition list.
- """
- term = [nodes.Text(' when '),
- nodes.literal('', branches.tag_member.name),
- nodes.Text(' is '),
- nodes.literal('', '"%s"' % variant.name)]
- if variant.ifcond.is_present():
- term.extend(self._nodes_for_ifcond(variant.ifcond))
- return term
-
- def _nodes_for_members(self, doc, what, base=None, branches=None):
- """Return list of doctree nodes for the table of members"""
- dlnode = nodes.definition_list()
- for section in doc.args.values():
- term = self._nodes_for_one_member(section.member)
- # TODO drop fallbacks when undocumented members are outlawed
- if section.text:
- defn = dedent(section.text)
- else:
- defn = [nodes.Text('Not documented')]
+ @property
+ def member_field_type(self) -> str:
+ return self.field_types[self.entity.meta]
- dlnode += self._make_dlitem(term, defn)
+ # General-purpose rST generation functions
- if base:
- dlnode += self._make_dlitem([nodes.Text('The members of '),
- nodes.literal('', base.doc_type())],
- None)
+ def get_indent(self) -> str:
+ return " " * self.indent
- if branches:
- for v in branches.variants:
- if v.type.name == 'q_empty':
- continue
- assert not v.type.is_implicit()
- term = [nodes.Text('The members of '),
- nodes.literal('', v.type.doc_type())]
- term.extend(self._nodes_for_variant_when(branches, v))
- dlnode += self._make_dlitem(term, None)
-
- if not dlnode.children:
- return []
-
- section = self._make_section(what)
- section += dlnode
- return [section]
-
- def _nodes_for_enum_values(self, doc):
- """Return list of doctree nodes for the table of enum values"""
- seen_item = False
- dlnode = nodes.definition_list()
- for section in doc.args.values():
- termtext = [nodes.literal('', section.member.name)]
- if section.member.ifcond.is_present():
- termtext.extend(self._nodes_for_ifcond(section.member.ifcond))
- # TODO drop fallbacks when undocumented members are outlawed
- if section.text:
- defn = dedent(section.text)
- else:
- defn = [nodes.Text('Not documented')]
-
- dlnode += self._make_dlitem(termtext, defn)
- seen_item = True
-
- if not seen_item:
- return []
-
- section = self._make_section('Values')
- section += dlnode
- return [section]
-
- def _nodes_for_arguments(self, doc, arg_type):
- """Return list of doctree nodes for the arguments section"""
- if arg_type and not arg_type.is_implicit():
- assert not doc.args
- section = self._make_section('Arguments')
- dlnode = nodes.definition_list()
- dlnode += self._make_dlitem(
- [nodes.Text('The members of '),
- nodes.literal('', arg_type.name)],
- None)
- section += dlnode
- return [section]
-
- return self._nodes_for_members(doc, 'Arguments')
-
- def _nodes_for_features(self, doc):
- """Return list of doctree nodes for the table of features"""
- seen_item = False
- dlnode = nodes.definition_list()
- for section in doc.features.values():
- dlnode += self._make_dlitem(
- [nodes.literal('', section.member.name)], dedent(section.text))
- seen_item = True
-
- if not seen_item:
- return []
-
- section = self._make_section('Features')
- section += dlnode
- return [section]
-
- def _nodes_for_example(self, exampletext):
- """Return list of doctree nodes for a code example snippet"""
- return [nodes.literal_block(exampletext, exampletext)]
-
- def _nodes_for_sections(self, doc):
- """Return list of doctree nodes for additional sections"""
- nodelist = []
- for section in doc.sections:
- if section.tag and section.tag == 'TODO':
- # Hide TODO: sections
- continue
-
- if not section.tag:
- # Sphinx cannot handle sectionless titles;
- # Instead, just append the results to the prior section.
- container = nodes.container()
- self._parse_text_into_node(section.text, container)
- nodelist += container.children
- continue
-
- snode = self._make_section(section.tag)
- if section.tag.startswith('Example'):
- snode += self._nodes_for_example(dedent(section.text))
- else:
- self._parse_text_into_node(dedent(section.text), snode)
- nodelist.append(snode)
- return nodelist
-
- def _nodes_for_if_section(self, ifcond):
- """Return list of doctree nodes for the "If" section"""
- nodelist = []
- if ifcond.is_present():
- snode = self._make_section('If')
- snode += nodes.paragraph(
- '', '', *self._nodes_for_ifcond(ifcond, with_if=False)
- )
- nodelist.append(snode)
- return nodelist
-
- def _add_doc(self, typ, sections):
- """Add documentation for a command/object/enum...
-
- We assume we're documenting the thing defined in self._cur_doc.
- typ is the type of thing being added ("Command", "Object", etc)
+ @contextmanager
+ def indented(self) -> Generator[None]:
+ self.indent += 1
+ try:
+ yield
+ finally:
+ self.indent -= 1
- sections is a list of nodes for sections to add to the definition.
- """
+ def add_line_raw(self, line: str, source: str, *lineno: int) -> None:
+ """Append one line of generated reST to the output."""
- doc = self._cur_doc
- snode = nodes.section(ids=[self._sphinx_directive.new_serialno()])
- snode += nodes.title('', '', *[nodes.literal(doc.symbol, doc.symbol),
- nodes.Text(' (' + typ + ')')])
- self._parse_text_into_node(doc.body.text, snode)
- for s in sections:
- if s is not None:
- snode += s
- self._add_node_to_current_heading(snode)
-
- def visit_enum_type(self, name, info, ifcond, features, members, prefix):
- doc = self._cur_doc
- self._add_doc('Enum',
- self._nodes_for_enum_values(doc)
- + self._nodes_for_features(doc)
- + self._nodes_for_sections(doc)
- + self._nodes_for_if_section(ifcond))
-
- def visit_object_type(self, name, info, ifcond, features,
- base, members, branches):
- doc = self._cur_doc
- if base and base.is_implicit():
- base = None
- self._add_doc('Object',
- self._nodes_for_members(doc, 'Members', base, branches)
- + self._nodes_for_features(doc)
- + self._nodes_for_sections(doc)
- + self._nodes_for_if_section(ifcond))
-
- def visit_alternate_type(self, name, info, ifcond, features,
- alternatives):
- doc = self._cur_doc
- self._add_doc('Alternate',
- self._nodes_for_members(doc, 'Members')
- + self._nodes_for_features(doc)
- + self._nodes_for_sections(doc)
- + self._nodes_for_if_section(ifcond))
-
- def visit_command(self, name, info, ifcond, features, arg_type,
- ret_type, gen, success_response, boxed, allow_oob,
- allow_preconfig, coroutine):
- doc = self._cur_doc
- self._add_doc('Command',
- self._nodes_for_arguments(doc, arg_type)
- + self._nodes_for_features(doc)
- + self._nodes_for_sections(doc)
- + self._nodes_for_if_section(ifcond))
-
- def visit_event(self, name, info, ifcond, features, arg_type, boxed):
- doc = self._cur_doc
- self._add_doc('Event',
- self._nodes_for_arguments(doc, arg_type)
- + self._nodes_for_features(doc)
- + self._nodes_for_sections(doc)
- + self._nodes_for_if_section(ifcond))
-
- def symbol(self, doc, entity):
- """Add documentation for one symbol to the document tree
-
- This is the main entry point which causes us to add documentation
- nodes for a symbol (which could be a 'command', 'object', 'event',
- etc). We do this by calling 'visit' on the schema entity, which
- will then call back into one of our visit_* methods, depending
- on what kind of thing this symbol is.
- """
- self._cur_doc = doc
- entity.visit(self)
- self._cur_doc = None
+ # NB: Sphinx uses zero-indexed lines; subtract one.
+ lineno = tuple((n - 1 for n in lineno))
- def _start_new_heading(self, heading, level):
- """Start a new heading at the specified heading level
+ if line.strip():
+ # not a blank line
+ self._result.append(
+ self.get_indent() + line.rstrip("\n"), source, *lineno
+ )
+ else:
+ self._result.append("", source, *lineno)
+
+ def add_line(self, content: str, info: QAPISourceInfo) -> None:
+ # NB: We *require* an info object; this works out OK because we
+ # don't document built-in objects that don't have
+ # one. Everything else should.
+ self.add_line_raw(content, info.fname, info.line)
+
+ def add_lines(
+ self,
+ content: str,
+ info: QAPISourceInfo,
+ ) -> None:
+ lines = content.splitlines(True)
+ for i, line in enumerate(lines):
+ self.add_line_raw(line, info.fname, info.line + i)
+
+ def ensure_blank_line(self) -> None:
+ # Empty document -- no blank line required.
+ if not self._result:
+ return
+
+ # Last line isn't blank, add one.
+ if self._result[-1].strip(): # pylint: disable=no-member
+ fname, line = self._result.info(-1)
+ assert isinstance(line, int)
+ # New blank line is credited to one-after the current last line.
+ # +2: correct for zero/one index, then increment by one.
+ self.add_line_raw("", fname, line + 2)
+
+ def add_field(
+ self,
+ kind: str,
+ name: str,
+ body: str,
+ info: QAPISourceInfo,
+ typ: Optional[str] = None,
+ ) -> None:
+ if typ:
+ text = f":{kind} {typ} {name}: {body}"
+ else:
+ text = f":{kind} {name}: {body}"
+ self.add_lines(text, info)
+
+ def format_type(
+ self, ent: Union[QAPISchemaDefinition | QAPISchemaMember]
+ ) -> Optional[str]:
+ if isinstance(ent, (QAPISchemaEnumMember, QAPISchemaFeature)):
+ return None
+
+ qapi_type = ent
+ optional = False
+ if isinstance(ent, QAPISchemaObjectTypeMember):
+ qapi_type = ent.type
+ optional = ent.optional
+
+ if isinstance(qapi_type, QAPISchemaArrayType):
+ ret = f"[{qapi_type.element_type.doc_type()}]"
+ else:
+ assert isinstance(qapi_type, QAPISchemaType)
+ tmp = qapi_type.doc_type()
+ assert tmp
+ ret = tmp
+ if optional:
+ ret += "?"
+
+ return ret
+
+ def generate_field(
+ self,
+ kind: str,
+ member: QAPISchemaMember,
+ body: str,
+ info: QAPISourceInfo,
+ ) -> None:
+ typ = self.format_type(member)
+ self.add_field(kind, member.name, body, info, typ)
+
+ # Transmogrification helpers
+
+ def visit_paragraph(self, section: QAPIDoc.Section) -> None:
+ # Squelch empty paragraphs.
+ if not section.text:
+ return
+
+ self.ensure_blank_line()
+ self.add_lines(section.text, section.info)
+ self.ensure_blank_line()
+
+ def visit_member(self, section: QAPIDoc.ArgSection) -> None:
+ # FIXME: ifcond for members
+ # TODO: features for members (documented at entity-level,
+ # but sometimes defined per-member. Should we add such
+ # information to member descriptions when we can?)
+ assert section.member
+ self.generate_field(
+ self.member_field_type,
+ section.member,
+ # TODO drop fallbacks when undocumented members are outlawed
+ section.text if section.text else "Not documented",
+ section.info,
+ )
- Create a new section whose title is 'heading' and which is placed
- in the docutils node tree as a child of the most recent level-1
- heading. Subsequent document sections (commands, freeform doc chunks,
- etc) will be placed as children of this new heading section.
+ def visit_feature(self, section: QAPIDoc.ArgSection) -> None:
+ # FIXME - ifcond for features is not handled at all yet!
+ # Proposal: decorate the right-hand column with some graphical
+ # element to indicate conditional availability?
+ assert section.text # Guaranteed by parser.py
+ assert section.member
+
+ self.generate_field("feat", section.member, section.text, section.info)
+
+ def visit_returns(self, section: QAPIDoc.Section) -> None:
+ assert isinstance(self.entity, QAPISchemaCommand)
+ rtype = self.entity.ret_type
+ # q_empty can produce None, but we won't be documenting anything
+ # without an explicit return statement in the doc block, and we
+ # should not have any such explicit statements when there is no
+ # return value.
+ assert rtype
+
+ typ = self.format_type(rtype)
+ assert typ
+ assert section.text
+ self.add_field("return", typ, section.text, section.info)
+
+ def visit_errors(self, section: QAPIDoc.Section) -> None:
+ # FIXME: the formatting for errors may be inconsistent and may
+ # or may not require different newline placement to ensure
+ # proper rendering as a nested list.
+ self.add_lines(f":error:\n{section.text}", section.info)
+
+ def preamble(self, ent: QAPISchemaDefinition) -> None:
"""
- if len(self._active_headings) < level:
- raise QAPISemError(self._cur_doc.info,
- 'Level %d subheading found outside a '
- 'level %d heading'
- % (level, level - 1))
- snode = self._make_section(heading)
- self._active_headings[level - 1] += snode
- self._active_headings = self._active_headings[:level]
- self._active_headings.append(snode)
-
- def _add_node_to_current_heading(self, node):
- """Add the node to whatever the current active heading is"""
- self._active_headings[-1] += node
-
- def freeform(self, doc):
- """Add a piece of 'freeform' documentation to the document tree
-
- A 'freeform' document chunk doesn't relate to any particular
- symbol (for instance, it could be an introduction).
-
- If the freeform document starts with a line of the form
- '= Heading text', this is a section or subsection heading, with
- the heading level indicated by the number of '=' signs.
+ Generate option lines for QAPI entity directives.
"""
+ if ent.doc and ent.doc.since:
+ assert ent.doc.since.kind == QAPIDoc.Kind.SINCE
+ # Generated from the entity's docblock; info location is exact.
+ self.add_line(f":since: {ent.doc.since.text}", ent.doc.since.info)
+
+ if ent.ifcond.is_present():
+ doc = ent.ifcond.docgen()
+ assert ent.info
+ # Generated from entity definition; info location is approximate.
+ self.add_line(f":ifcond: {doc}", ent.info)
+
+ # Hoist special features such as :deprecated: and :unstable:
+ # into the options block for the entity. If, in the future, new
+ # special features are added, qapi-domain will chirp about
+ # unrecognized options and fail until they are handled in
+ # qapi-domain.
+ for feat in ent.features:
+ if feat.is_special():
+ # FIXME: handle ifcond if present. How to display that
+ # information is TBD.
+ # Generated from entity def; info location is approximate.
+ assert feat.info
+ self.add_line(f":{feat.name}:", feat.info)
+
+ self.ensure_blank_line()
+
+ def _insert_member_pointer(self, ent: QAPISchemaDefinition) -> None:
+
+ def _get_target(
+ ent: QAPISchemaDefinition,
+ ) -> Optional[QAPISchemaDefinition]:
+ if isinstance(ent, (QAPISchemaCommand, QAPISchemaEvent)):
+ return ent.arg_type
+ if isinstance(ent, QAPISchemaObjectType):
+ return ent.base
+ return None
+
+ target = _get_target(ent)
+ if target is not None and not target.is_implicit():
+ assert ent.info
+ self.add_field(
+ self.member_field_type,
+ "q_dummy",
+ f"The members of :qapi:type:`{target.name}`.",
+ ent.info,
+ "q_dummy",
+ )
- # QAPIDoc documentation says free-form documentation blocks
- # must have only a body section, nothing else.
- assert not doc.sections
- assert not doc.args
- assert not doc.features
- self._cur_doc = doc
-
- text = doc.body.text
- if re.match(r'=+ ', text):
- # Section/subsection heading (if present, will always be
- # the first line of the block)
- (heading, _, text) = text.partition('\n')
- (leader, _, heading) = heading.partition(' ')
- self._start_new_heading(heading, len(leader))
- if text == '':
- return
-
- node = self._make_section(None)
- self._parse_text_into_node(text, node)
- self._add_node_to_current_heading(node)
- self._cur_doc = None
-
- def _parse_text_into_node(self, doctext, node):
- """Parse a chunk of QAPI-doc-format text into the node
-
- The doc comment can contain most inline rST markup, including
- bulleted and enumerated lists.
- As an extra permitted piece of markup, @var will be turned
- into ``var``.
- """
+ if isinstance(ent, QAPISchemaObjectType) and ent.branches is not None:
+ for variant in ent.branches.variants:
+ if variant.type.name == "q_empty":
+ continue
+ assert ent.info
+ self.add_field(
+ self.member_field_type,
+ "q_dummy",
+ f" When ``{ent.branches.tag_member.name}`` is "
+ f"``{variant.name}``: "
+ f"The members of :qapi:type:`{variant.type.name}`.",
+ ent.info,
+ "q_dummy",
+ )
+
+ def visit_sections(self, ent: QAPISchemaDefinition) -> None:
+ sections = ent.doc.all_sections if ent.doc else []
+
+ # Determine the index location at which we should generate
+ # documentation for "The members of ..." pointers. This should
+ # go at the end of the members section(s) if any. Note that
+ # index 0 is assumed to be a plain intro section, even if it is
+ # empty; and that a members section if present will always
+ # immediately follow the opening PLAIN section.
+ gen_index = 1
+ if len(sections) > 1:
+ while sections[gen_index].kind == QAPIDoc.Kind.MEMBER:
+ gen_index += 1
+ if gen_index >= len(sections):
+ break
+
+ # Add sections in source order:
+ for i, section in enumerate(sections):
+ # @var is translated to ``var``:
+ section.text = re.sub(r"@([\w-]+)", r"``\1``", section.text)
+
+ if section.kind == QAPIDoc.Kind.PLAIN:
+ self.visit_paragraph(section)
+ elif section.kind == QAPIDoc.Kind.MEMBER:
+ assert isinstance(section, QAPIDoc.ArgSection)
+ self.visit_member(section)
+ elif section.kind == QAPIDoc.Kind.FEATURE:
+ assert isinstance(section, QAPIDoc.ArgSection)
+ self.visit_feature(section)
+ elif section.kind in (QAPIDoc.Kind.SINCE, QAPIDoc.Kind.TODO):
+ # Since is handled in preamble, TODO is skipped intentionally.
+ pass
+ elif section.kind == QAPIDoc.Kind.RETURNS:
+ self.visit_returns(section)
+ elif section.kind == QAPIDoc.Kind.ERRORS:
+ self.visit_errors(section)
+ else:
+ assert False
+
+ # Generate "The members of ..." entries if necessary:
+ if i == gen_index - 1:
+ self._insert_member_pointer(ent)
+
+ self.ensure_blank_line()
+
+ # Transmogrification core methods
+
+ def visit_module(self, path: str) -> None:
+ name = Path(path).stem
+ # module directives are credited to the first line of a module file.
+ self.add_line_raw(f".. qapi:module:: {name}", path, 1)
+ self.ensure_blank_line()
+
+ def visit_freeform(self, doc: QAPIDoc) -> None:
+ # TODO: Once the old qapidoc transformer is deprecated, freeform
+ # sections can be updated to pure rST, and this transformed removed.
+ #
+ # For now, translate our micro-format into rST. Code adapted
+ # from Peter Maydell's freeform().
+
+ assert len(doc.all_sections) == 1, doc.all_sections
+ body = doc.all_sections[0]
+ text = body.text
+ info = doc.info
+
+ if re.match(r"=+ ", text):
+ # Section/subsection heading (if present, will always be the
+ # first line of the block)
+ (heading, _, text) = text.partition("\n")
+ (leader, _, heading) = heading.partition(" ")
+ # Implicit +1 for heading in the containing .rst doc
+ level = len(leader) + 1
+
+ # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#sections
+ markers = ' #*=_^"'
+ overline = level <= 2
+ marker = markers[level]
+
+ self.ensure_blank_line()
+ # This credits all 2 or 3 lines to the single source line.
+ if overline:
+ self.add_line(marker * len(heading), info)
+ self.add_line(heading, info)
+ self.add_line(marker * len(heading), info)
+ self.ensure_blank_line()
+
+ # Eat blank line(s) and advance info
+ trimmed = text.lstrip("\n")
+ text = trimmed
+ info = info.next_line(len(text) - len(trimmed) + 1)
+
+ self.add_lines(text, info)
+ self.ensure_blank_line()
+
+ def visit_entity(self, ent: QAPISchemaDefinition) -> None:
+ assert ent.info
- # Handle the "@var means ``var`` case
- doctext = re.sub(r'@([\w-]+)', r'``\1``', doctext)
-
- rstlist = ViewList()
- for line in doctext.splitlines():
- # The reported line number will always be that of the start line
- # of the doc comment, rather than the actual location of the error.
- # Being more precise would require overhaul of the QAPIDoc class
- # to track lines more exactly within all the sub-parts of the doc
- # comment, as well as counting lines here.
- rstlist.append(line, self._cur_doc.info.fname,
- self._cur_doc.info.line)
- # Append a blank line -- in some cases rST syntax errors get
- # attributed to the line after one with actual text, and if there
- # isn't anything in the ViewList corresponding to that then Sphinx
- # 1.6's AutodocReporter will then misidentify the source/line location
- # in the error message (usually attributing it to the top-level
- # .rst file rather than the offending .json file). The extra blank
- # line won't affect the rendered output.
- rstlist.append("", self._cur_doc.info.fname, self._cur_doc.info.line)
- self._sphinx_directive.do_parse(rstlist, node)
-
- def get_document_nodes(self):
- """Return the list of docutils nodes which make up the document"""
- return self._top_node.children
-
-
-# Turn the black formatter on for the rest of the file.
-# fmt: on
+ try:
+ self._curr_ent = ent
+
+ # Squish structs and unions together into an "object" directive.
+ meta = ent.meta
+ if meta in ("struct", "union"):
+ meta = "object"
+
+ # This line gets credited to the start of the /definition/.
+ self.add_line(f".. qapi:{meta}:: {ent.name}", ent.info)
+ with self.indented():
+ self.preamble(ent)
+ self.visit_sections(ent)
+ finally:
+ self._curr_ent = None
+
+ def set_namespace(self, namespace: str, source: str, lineno: int) -> None:
+ self.add_line_raw(
+ f".. qapi:namespace:: {namespace}", source, lineno + 1
+ )
+ self.ensure_blank_line()
class QAPISchemaGenDepVisitor(QAPISchemaVisitor):
@@ -474,22 +467,22 @@ class QAPISchemaGenDepVisitor(QAPISchemaVisitor):
schema file associated with each module in the QAPI input.
"""
- def __init__(self, env, qapidir):
+ def __init__(self, env: Any, qapidir: str) -> None:
self._env = env
self._qapidir = qapidir
- def visit_module(self, name):
+ def visit_module(self, name: str) -> None:
if name != "./builtin":
qapifile = self._qapidir + "/" + name
self._env.note_dependency(os.path.abspath(qapifile))
super().visit_module(name)
-class NestedDirective(Directive):
- def run(self):
+class NestedDirective(SphinxDirective):
+ def run(self) -> Sequence[nodes.Node]:
raise NotImplementedError
- def do_parse(self, rstlist, node):
+ def do_parse(self, rstlist: StringList, node: nodes.Node) -> None:
"""
Parse rST source lines and add them to the specified node
@@ -508,18 +501,110 @@ class QAPIDocDirective(NestedDirective):
required_argument = 1
optional_arguments = 1
- option_spec = {"qapifile": directives.unchanged_required}
+ option_spec = {
+ "qapifile": directives.unchanged_required,
+ "namespace": directives.unchanged,
+ "transmogrify": directives.flag,
+ }
has_content = False
- def new_serialno(self):
+ def new_serialno(self) -> str:
"""Return a unique new ID string suitable for use as a node's ID"""
env = self.state.document.settings.env
return "qapidoc-%d" % env.new_serialno("qapidoc")
- def run(self):
+ def transmogrify(self, schema: QAPISchema) -> nodes.Element:
+ logger.info("Transmogrifying QAPI to rST ...")
+ vis = Transmogrifier()
+ modules = set()
+
+ if "namespace" in self.options:
+ vis.set_namespace(
+ self.options["namespace"], *self.get_source_info()
+ )
+
+ for doc in schema.docs:
+ module_source = doc.info.fname
+ if module_source not in modules:
+ vis.visit_module(module_source)
+ modules.add(module_source)
+
+ if doc.symbol:
+ ent = schema.lookup_entity(doc.symbol)
+ assert isinstance(ent, QAPISchemaDefinition)
+ vis.visit_entity(ent)
+ else:
+ vis.visit_freeform(doc)
+
+ logger.info("Transmogrification complete.")
+
+ contentnode = nodes.section()
+ content = vis.result
+ titles_allowed = True
+
+ logger.info("Transmogrifier running nested parse ...")
+ with switch_source_input(self.state, content):
+ if titles_allowed:
+ node: nodes.Element = nodes.section()
+ node.document = self.state.document
+ nested_parse_with_titles(self.state, content, contentnode)
+ else:
+ node = nodes.paragraph()
+ node.document = self.state.document
+ self.state.nested_parse(content, 0, contentnode)
+ logger.info("Transmogrifier's nested parse completed.")
+
+ if self.env.app.verbosity >= 2 or os.environ.get("DEBUG"):
+ argname = "_".join(Path(self.arguments[0]).parts)
+ name = Path(argname).stem + ".ir"
+ self.write_intermediate(content, name)
+
+ sys.stdout.flush()
+ return contentnode
+
+ def write_intermediate(self, content: StringList, filename: str) -> None:
+ logger.info(
+ "writing intermediate rST for '%s' to '%s'",
+ self.arguments[0],
+ filename,
+ )
+
+ srctree = Path(self.env.app.config.qapidoc_srctree).resolve()
+ outlines = []
+ lcol_width = 0
+
+ for i, line in enumerate(content):
+ src, lineno = content.info(i)
+ srcpath = Path(src).resolve()
+ srcpath = srcpath.relative_to(srctree)
+
+ lcol = f"{srcpath}:{lineno:04d}"
+ lcol_width = max(lcol_width, len(lcol))
+ outlines.append((lcol, line))
+
+ with open(filename, "w", encoding="UTF-8") as outfile:
+ for lcol, rcol in outlines:
+ outfile.write(lcol.rjust(lcol_width))
+ outfile.write(" |")
+ if rcol:
+ outfile.write(f" {rcol}")
+ outfile.write("\n")
+
+ def legacy(self, schema: QAPISchema) -> nodes.Element:
+ vis = QAPISchemaGenRSTVisitor(self)
+ vis.visit_begin(schema)
+ for doc in schema.docs:
+ if doc.symbol:
+ vis.symbol(doc, schema.lookup_entity(doc.symbol))
+ else:
+ vis.freeform(doc)
+ return vis.get_document_node() # type: ignore
+
+ def run(self) -> Sequence[nodes.Node]:
env = self.state.document.settings.env
qapifile = env.config.qapidoc_srctree + "/" + self.arguments[0]
qapidir = os.path.dirname(qapifile)
+ transmogrify = "transmogrify" in self.options
try:
schema = QAPISchema(qapifile)
@@ -527,20 +612,18 @@ class QAPIDocDirective(NestedDirective):
# First tell Sphinx about all the schema files that the
# output documentation depends on (including 'qapifile' itself)
schema.visit(QAPISchemaGenDepVisitor(env, qapidir))
-
- vis = QAPISchemaGenRSTVisitor(self)
- vis.visit_begin(schema)
- for doc in schema.docs:
- if doc.symbol:
- vis.symbol(doc, schema.lookup_entity(doc.symbol))
- else:
- vis.freeform(doc)
- return vis.get_document_nodes()
except QAPIError as err:
# Launder QAPI parse errors into Sphinx extension errors
# so they are displayed nicely to the user
raise ExtensionError(str(err)) from err
+ if transmogrify:
+ contentnode = self.transmogrify(schema)
+ else:
+ contentnode = self.legacy(schema)
+
+ return contentnode.children
+
class QMPExample(CodeBlock, NestedDirective):
"""
@@ -591,7 +674,7 @@ class QMPExample(CodeBlock, NestedDirective):
)
return node
- def admonition_wrap(self, *content) -> List[nodes.Node]:
+ def admonition_wrap(self, *content: nodes.Node) -> List[nodes.Node]:
title = "Example:"
if "title" in self.options:
title = f"{title} {self.options['title']}"
@@ -637,8 +720,9 @@ class QMPExample(CodeBlock, NestedDirective):
return self.admonition_wrap(*content_nodes)
-def setup(app):
+def setup(app: Sphinx) -> ExtensionMetadata:
"""Register qapi-doc directive with Sphinx"""
+ app.setup_extension("qapi_domain")
app.add_config_value("qapidoc_srctree", None, "env")
app.add_directive("qapi-doc", QAPIDocDirective)
app.add_directive("qmp-example", QMPExample)
diff --git a/docs/sphinx/qapidoc_legacy.py b/docs/sphinx/qapidoc_legacy.py
new file mode 100644
index 0000000..13520f4
--- /dev/null
+++ b/docs/sphinx/qapidoc_legacy.py
@@ -0,0 +1,440 @@
+# coding=utf-8
+# type: ignore
+#
+# QEMU qapidoc QAPI file parsing extension
+#
+# Copyright (c) 2020 Linaro
+#
+# This work is licensed under the terms of the GNU GPLv2 or later.
+# See the COPYING file in the top-level directory.
+
+"""
+qapidoc is a Sphinx extension that implements the qapi-doc directive
+
+The purpose of this extension is to read the documentation comments
+in QAPI schema files, and insert them all into the current document.
+
+It implements one new rST directive, "qapi-doc::".
+Each qapi-doc:: directive takes one argument, which is the
+pathname of the schema file to process, relative to the source tree.
+
+The docs/conf.py file must set the qapidoc_srctree config value to
+the root of the QEMU source tree.
+
+The Sphinx documentation on writing extensions is at:
+https://www.sphinx-doc.org/en/master/development/index.html
+"""
+
+import re
+import textwrap
+
+from docutils import nodes
+from docutils.statemachine import ViewList
+from qapi.error import QAPISemError
+from qapi.gen import QAPISchemaVisitor
+from qapi.parser import QAPIDoc
+
+
+def dedent(text: str) -> str:
+ # Adjust indentation to make description text parse as paragraph.
+
+ lines = text.splitlines(True)
+ if re.match(r"\s+", lines[0]):
+ # First line is indented; description started on the line after
+ # the name. dedent the whole block.
+ return textwrap.dedent(text)
+
+ # Descr started on same line. Dedent line 2+.
+ return lines[0] + textwrap.dedent("".join(lines[1:]))
+
+
+class QAPISchemaGenRSTVisitor(QAPISchemaVisitor):
+ """A QAPI schema visitor which generates docutils/Sphinx nodes
+
+ This class builds up a tree of docutils/Sphinx nodes corresponding
+ to documentation for the various QAPI objects. To use it, first
+ create a QAPISchemaGenRSTVisitor object, and call its
+ visit_begin() method. Then you can call one of the two methods
+ 'freeform' (to add documentation for a freeform documentation
+ chunk) or 'symbol' (to add documentation for a QAPI symbol). These
+ will cause the visitor to build up the tree of document
+ nodes. Once you've added all the documentation via 'freeform' and
+ 'symbol' method calls, you can call 'get_document_nodes' to get
+ the final list of document nodes (in a form suitable for returning
+ from a Sphinx directive's 'run' method).
+ """
+ def __init__(self, sphinx_directive):
+ self._cur_doc = None
+ self._sphinx_directive = sphinx_directive
+ self._top_node = nodes.section()
+ self._active_headings = [self._top_node]
+
+ def _make_dlitem(self, term, defn):
+ """Return a dlitem node with the specified term and definition.
+
+ term should be a list of Text and literal nodes.
+ defn should be one of:
+ - a string, which will be handed to _parse_text_into_node
+ - a list of Text and literal nodes, which will be put into
+ a paragraph node
+ """
+ dlitem = nodes.definition_list_item()
+ dlterm = nodes.term('', '', *term)
+ dlitem += dlterm
+ if defn:
+ dldef = nodes.definition()
+ if isinstance(defn, list):
+ dldef += nodes.paragraph('', '', *defn)
+ else:
+ self._parse_text_into_node(defn, dldef)
+ dlitem += dldef
+ return dlitem
+
+ def _make_section(self, title):
+ """Return a section node with optional title"""
+ section = nodes.section(ids=[self._sphinx_directive.new_serialno()])
+ if title:
+ section += nodes.title(title, title)
+ return section
+
+ def _nodes_for_ifcond(self, ifcond, with_if=True):
+ """Return list of Text, literal nodes for the ifcond
+
+ Return a list which gives text like ' (If: condition)'.
+ If with_if is False, we don't return the "(If: " and ")".
+ """
+
+ doc = ifcond.docgen()
+ if not doc:
+ return []
+ doc = nodes.literal('', doc)
+ if not with_if:
+ return [doc]
+
+ nodelist = [nodes.Text(' ('), nodes.strong('', 'If: ')]
+ nodelist.append(doc)
+ nodelist.append(nodes.Text(')'))
+ return nodelist
+
+ def _nodes_for_one_member(self, member):
+ """Return list of Text, literal nodes for this member
+
+ Return a list of doctree nodes which give text like
+ 'name: type (optional) (If: ...)' suitable for use as the
+ 'term' part of a definition list item.
+ """
+ term = [nodes.literal('', member.name)]
+ if member.type.doc_type():
+ term.append(nodes.Text(': '))
+ term.append(nodes.literal('', member.type.doc_type()))
+ if member.optional:
+ term.append(nodes.Text(' (optional)'))
+ if member.ifcond.is_present():
+ term.extend(self._nodes_for_ifcond(member.ifcond))
+ return term
+
+ def _nodes_for_variant_when(self, branches, variant):
+ """Return list of Text, literal nodes for variant 'when' clause
+
+ Return a list of doctree nodes which give text like
+ 'when tagname is variant (If: ...)' suitable for use in
+ the 'branches' part of a definition list.
+ """
+ term = [nodes.Text(' when '),
+ nodes.literal('', branches.tag_member.name),
+ nodes.Text(' is '),
+ nodes.literal('', '"%s"' % variant.name)]
+ if variant.ifcond.is_present():
+ term.extend(self._nodes_for_ifcond(variant.ifcond))
+ return term
+
+ def _nodes_for_members(self, doc, what, base=None, branches=None):
+ """Return list of doctree nodes for the table of members"""
+ dlnode = nodes.definition_list()
+ for section in doc.args.values():
+ term = self._nodes_for_one_member(section.member)
+ # TODO drop fallbacks when undocumented members are outlawed
+ if section.text:
+ defn = dedent(section.text)
+ else:
+ defn = [nodes.Text('Not documented')]
+
+ dlnode += self._make_dlitem(term, defn)
+
+ if base:
+ dlnode += self._make_dlitem([nodes.Text('The members of '),
+ nodes.literal('', base.doc_type())],
+ None)
+
+ if branches:
+ for v in branches.variants:
+ if v.type.name == 'q_empty':
+ continue
+ assert not v.type.is_implicit()
+ term = [nodes.Text('The members of '),
+ nodes.literal('', v.type.doc_type())]
+ term.extend(self._nodes_for_variant_when(branches, v))
+ dlnode += self._make_dlitem(term, None)
+
+ if not dlnode.children:
+ return []
+
+ section = self._make_section(what)
+ section += dlnode
+ return [section]
+
+ def _nodes_for_enum_values(self, doc):
+ """Return list of doctree nodes for the table of enum values"""
+ seen_item = False
+ dlnode = nodes.definition_list()
+ for section in doc.args.values():
+ termtext = [nodes.literal('', section.member.name)]
+ if section.member.ifcond.is_present():
+ termtext.extend(self._nodes_for_ifcond(section.member.ifcond))
+ # TODO drop fallbacks when undocumented members are outlawed
+ if section.text:
+ defn = dedent(section.text)
+ else:
+ defn = [nodes.Text('Not documented')]
+
+ dlnode += self._make_dlitem(termtext, defn)
+ seen_item = True
+
+ if not seen_item:
+ return []
+
+ section = self._make_section('Values')
+ section += dlnode
+ return [section]
+
+ def _nodes_for_arguments(self, doc, arg_type):
+ """Return list of doctree nodes for the arguments section"""
+ if arg_type and not arg_type.is_implicit():
+ assert not doc.args
+ section = self._make_section('Arguments')
+ dlnode = nodes.definition_list()
+ dlnode += self._make_dlitem(
+ [nodes.Text('The members of '),
+ nodes.literal('', arg_type.name)],
+ None)
+ section += dlnode
+ return [section]
+
+ return self._nodes_for_members(doc, 'Arguments')
+
+ def _nodes_for_features(self, doc):
+ """Return list of doctree nodes for the table of features"""
+ seen_item = False
+ dlnode = nodes.definition_list()
+ for section in doc.features.values():
+ dlnode += self._make_dlitem(
+ [nodes.literal('', section.member.name)], dedent(section.text))
+ seen_item = True
+
+ if not seen_item:
+ return []
+
+ section = self._make_section('Features')
+ section += dlnode
+ return [section]
+
+ def _nodes_for_sections(self, doc):
+ """Return list of doctree nodes for additional sections"""
+ nodelist = []
+ for section in doc.sections:
+ if section.kind == QAPIDoc.Kind.TODO:
+ # Hide TODO: sections
+ continue
+
+ if section.kind == QAPIDoc.Kind.PLAIN:
+ # Sphinx cannot handle sectionless titles;
+ # Instead, just append the results to the prior section.
+ container = nodes.container()
+ self._parse_text_into_node(section.text, container)
+ nodelist += container.children
+ continue
+
+ snode = self._make_section(section.kind.name.title())
+ self._parse_text_into_node(dedent(section.text), snode)
+ nodelist.append(snode)
+ return nodelist
+
+ def _nodes_for_if_section(self, ifcond):
+ """Return list of doctree nodes for the "If" section"""
+ nodelist = []
+ if ifcond.is_present():
+ snode = self._make_section('If')
+ snode += nodes.paragraph(
+ '', '', *self._nodes_for_ifcond(ifcond, with_if=False)
+ )
+ nodelist.append(snode)
+ return nodelist
+
+ def _add_doc(self, typ, sections):
+ """Add documentation for a command/object/enum...
+
+ We assume we're documenting the thing defined in self._cur_doc.
+ typ is the type of thing being added ("Command", "Object", etc)
+
+ sections is a list of nodes for sections to add to the definition.
+ """
+
+ doc = self._cur_doc
+ snode = nodes.section(ids=[self._sphinx_directive.new_serialno()])
+ snode += nodes.title('', '', *[nodes.literal(doc.symbol, doc.symbol),
+ nodes.Text(' (' + typ + ')')])
+ self._parse_text_into_node(doc.body.text, snode)
+ for s in sections:
+ if s is not None:
+ snode += s
+ self._add_node_to_current_heading(snode)
+
+ def visit_enum_type(self, name, info, ifcond, features, members, prefix):
+ doc = self._cur_doc
+ self._add_doc('Enum',
+ self._nodes_for_enum_values(doc)
+ + self._nodes_for_features(doc)
+ + self._nodes_for_sections(doc)
+ + self._nodes_for_if_section(ifcond))
+
+ def visit_object_type(self, name, info, ifcond, features,
+ base, members, branches):
+ doc = self._cur_doc
+ if base and base.is_implicit():
+ base = None
+ self._add_doc('Object',
+ self._nodes_for_members(doc, 'Members', base, branches)
+ + self._nodes_for_features(doc)
+ + self._nodes_for_sections(doc)
+ + self._nodes_for_if_section(ifcond))
+
+ def visit_alternate_type(self, name, info, ifcond, features,
+ alternatives):
+ doc = self._cur_doc
+ self._add_doc('Alternate',
+ self._nodes_for_members(doc, 'Members')
+ + self._nodes_for_features(doc)
+ + self._nodes_for_sections(doc)
+ + self._nodes_for_if_section(ifcond))
+
+ def visit_command(self, name, info, ifcond, features, arg_type,
+ ret_type, gen, success_response, boxed, allow_oob,
+ allow_preconfig, coroutine):
+ doc = self._cur_doc
+ self._add_doc('Command',
+ self._nodes_for_arguments(doc, arg_type)
+ + self._nodes_for_features(doc)
+ + self._nodes_for_sections(doc)
+ + self._nodes_for_if_section(ifcond))
+
+ def visit_event(self, name, info, ifcond, features, arg_type, boxed):
+ doc = self._cur_doc
+ self._add_doc('Event',
+ self._nodes_for_arguments(doc, arg_type)
+ + self._nodes_for_features(doc)
+ + self._nodes_for_sections(doc)
+ + self._nodes_for_if_section(ifcond))
+
+ def symbol(self, doc, entity):
+ """Add documentation for one symbol to the document tree
+
+ This is the main entry point which causes us to add documentation
+ nodes for a symbol (which could be a 'command', 'object', 'event',
+ etc). We do this by calling 'visit' on the schema entity, which
+ will then call back into one of our visit_* methods, depending
+ on what kind of thing this symbol is.
+ """
+ self._cur_doc = doc
+ entity.visit(self)
+ self._cur_doc = None
+
+ def _start_new_heading(self, heading, level):
+ """Start a new heading at the specified heading level
+
+ Create a new section whose title is 'heading' and which is placed
+ in the docutils node tree as a child of the most recent level-1
+ heading. Subsequent document sections (commands, freeform doc chunks,
+ etc) will be placed as children of this new heading section.
+ """
+ if len(self._active_headings) < level:
+ raise QAPISemError(self._cur_doc.info,
+ 'Level %d subheading found outside a '
+ 'level %d heading'
+ % (level, level - 1))
+ snode = self._make_section(heading)
+ self._active_headings[level - 1] += snode
+ self._active_headings = self._active_headings[:level]
+ self._active_headings.append(snode)
+ return snode
+
+ def _add_node_to_current_heading(self, node):
+ """Add the node to whatever the current active heading is"""
+ self._active_headings[-1] += node
+
+ def freeform(self, doc):
+ """Add a piece of 'freeform' documentation to the document tree
+
+ A 'freeform' document chunk doesn't relate to any particular
+ symbol (for instance, it could be an introduction).
+
+ If the freeform document starts with a line of the form
+ '= Heading text', this is a section or subsection heading, with
+ the heading level indicated by the number of '=' signs.
+ """
+
+ # QAPIDoc documentation says free-form documentation blocks
+ # must have only a body section, nothing else.
+ assert not doc.sections
+ assert not doc.args
+ assert not doc.features
+ self._cur_doc = doc
+
+ text = doc.body.text
+ if re.match(r'=+ ', text):
+ # Section/subsection heading (if present, will always be
+ # the first line of the block)
+ (heading, _, text) = text.partition('\n')
+ (leader, _, heading) = heading.partition(' ')
+ node = self._start_new_heading(heading, len(leader))
+ if text == '':
+ return
+ else:
+ node = nodes.container()
+
+ self._parse_text_into_node(text, node)
+ self._cur_doc = None
+
+ def _parse_text_into_node(self, doctext, node):
+ """Parse a chunk of QAPI-doc-format text into the node
+
+ The doc comment can contain most inline rST markup, including
+ bulleted and enumerated lists.
+ As an extra permitted piece of markup, @var will be turned
+ into ``var``.
+ """
+
+ # Handle the "@var means ``var`` case
+ doctext = re.sub(r'@([\w-]+)', r'``\1``', doctext)
+
+ rstlist = ViewList()
+ for line in doctext.splitlines():
+ # The reported line number will always be that of the start line
+ # of the doc comment, rather than the actual location of the error.
+ # Being more precise would require overhaul of the QAPIDoc class
+ # to track lines more exactly within all the sub-parts of the doc
+ # comment, as well as counting lines here.
+ rstlist.append(line, self._cur_doc.info.fname,
+ self._cur_doc.info.line)
+ # Append a blank line -- in some cases rST syntax errors get
+ # attributed to the line after one with actual text, and if there
+ # isn't anything in the ViewList corresponding to that then Sphinx
+ # 1.6's AutodocReporter will then misidentify the source/line location
+ # in the error message (usually attributing it to the top-level
+ # .rst file rather than the offending .json file). The extra blank
+ # line won't affect the rendered output.
+ rstlist.append("", self._cur_doc.info.fname, self._cur_doc.info.line)
+ self._sphinx_directive.do_parse(rstlist, node)
+
+ def get_document_node(self):
+ """Return the root docutils node which makes up the document"""
+ return self._top_node
diff --git a/docs/sphinx/qmp_lexer.py b/docs/sphinx/qmp_lexer.py
index a59de8a..7b3b808 100644
--- a/docs/sphinx/qmp_lexer.py
+++ b/docs/sphinx/qmp_lexer.py
@@ -24,7 +24,7 @@ class QMPExampleMarkersLexer(RegexLexer):
'root': [
(r'-> ', token.Generic.Prompt),
(r'<- ', token.Generic.Prompt),
- (r' ?\.{3} ?', token.Generic.Prompt),
+ (r'\.{3}( .* \.{3})?', token.Comment.Multiline),
]
}
diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst
index cd9559e..43d27d8 100644
--- a/docs/system/arm/aspeed.rst
+++ b/docs/system/arm/aspeed.rst
@@ -1,12 +1,11 @@
-Aspeed family boards (``*-bmc``, ``ast2500-evb``, ``ast2600-evb``, ``ast2700-evb``)
-===================================================================================
+Aspeed family boards (``ast2500-evb``, ``ast2600-evb``, ``ast2700-evb``, ``bletchley-bmc``, ``fuji-bmc``, ``fby35-bmc``, ``fp5280g2-bmc``, ``g220a-bmc``, ``palmetto-bmc``, ``qcom-dc-scm-v1-bmc``, ``qcom-firework-bmc``, ``quanta-q71l-bmc``, ``rainier-bmc``, ``romulus-bmc``, ``sonorapass-bmc``, ``supermicrox11-bmc``, ``supermicrox11spi-bmc``, ``tiogapass-bmc``, ``witherspoon-bmc``, ``yosemitev2-bmc``)
+=================================================================================================================================================================================================================================================================================================================================================================================================================================
The QEMU Aspeed machines model BMCs of various OpenPOWER systems and
Aspeed evaluation boards. They are based on different releases of the
Aspeed SoC : the AST2400 integrating an ARM926EJ-S CPU (400MHz), the
AST2500 with an ARM1176JZS CPU (800MHz), the AST2600
-with dual cores ARM Cortex-A7 CPUs (1.2GHz) and more recently the AST2700
-with quad cores ARM Cortex-A35 64 bits CPUs (1.6GHz)
+with dual cores ARM Cortex-A7 CPUs (1.2GHz).
The SoC comes with RAM, Gigabit ethernet, USB, SD/MMC, USB, SPI, I2C,
etc.
@@ -15,7 +14,8 @@ AST2400 SoC based machines :
- ``palmetto-bmc`` OpenPOWER Palmetto POWER8 BMC
- ``quanta-q71l-bmc`` OpenBMC Quanta BMC
-- ``supermicrox11-bmc`` Supermicro X11 BMC
+- ``supermicrox11-bmc`` Supermicro X11 BMC (ARM926EJ-S)
+- ``supermicrox11spi-bmc`` Supermicro X11 SPI BMC (ARM1176)
AST2500 SoC based machines :
@@ -31,7 +31,6 @@ AST2500 SoC based machines :
AST2600 SoC based machines :
- ``ast2600-evb`` Aspeed AST2600 Evaluation board (Cortex-A7)
-- ``tacoma-bmc`` OpenPOWER Witherspoon POWER9 AST2600 BMC
- ``rainier-bmc`` IBM Rainier POWER10 BMC
- ``fuji-bmc`` Facebook Fuji BMC
- ``bletchley-bmc`` Facebook Bletchley BMC
@@ -39,10 +38,6 @@ AST2600 SoC based machines :
- ``qcom-dc-scm-v1-bmc`` Qualcomm DC-SCM V1 BMC
- ``qcom-firework-bmc`` Qualcomm Firework BMC
-AST2700 SoC based machines :
-
-- ``ast2700-evb`` Aspeed AST2700 Evaluation board (Cortex-A35)
-
Supported devices
-----------------
@@ -105,6 +100,9 @@ or directly from the ASPEED Forked OpenBMC GitHub release repository :
https://github.com/AspeedTech-BMC/openbmc/releases
+Booting from a kernel image
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
To boot a kernel directly from a Linux build tree:
.. code-block:: bash
@@ -114,14 +112,10 @@ To boot a kernel directly from a Linux build tree:
-dtb arch/arm/boot/dts/aspeed-ast2600-evb.dtb \
-initrd rootfs.cpio
-To boot the machine from the flash image, use an MTD drive :
-
-.. code-block:: bash
-
- $ qemu-system-arm -M romulus-bmc -nic user \
- -drive file=obmc-phosphor-image-romulus.static.mtd,format=raw,if=mtd -nographic
+Booting from a flash image
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Options specific to Aspeed machines are :
+The machine options specific to Aspeed to boot from a flash image are :
* ``execute-in-place`` which emulates the boot from the CE0 flash
device by using the FMC controller to load the instructions, and
@@ -132,10 +126,12 @@ Options specific to Aspeed machines are :
* ``spi-model`` to change the default SPI Flash model.
- * ``bmc-console`` to change the default console device. Most of the
- machines use the ``UART5`` device for a boot console, which is
- mapped on ``/dev/ttyS4`` under Linux, but it is not always the
- case.
+To boot the machine from the flash image, use an MTD drive :
+
+.. code-block:: bash
+
+ $ qemu-system-arm -M romulus-bmc -nic user \
+ -drive file=obmc-phosphor-image-romulus.static.mtd,format=raw,if=mtd -nographic
To use other flash models, for instance a different FMC chip and a
bigger (64M) SPI for the ``ast2500-evb`` machine, run :
@@ -167,6 +163,78 @@ In that case, the machine boots fetching instructions from the FMC0
device. It is slower to start but closer to what HW does. Using the
machine option ``execute-in-place`` has a similar effect.
+Booting from an eMMC image
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The machine options specific to Aspeed machines to boot from an eMMC
+image are :
+
+ * ``boot-emmc`` to set or unset boot from eMMC (AST2600).
+
+Only the ``ast2600-evb`` and ``rainier-emmc`` machines have support to
+boot from an eMMC device. In this case, the machine assumes that the
+eMMC image includes special boot partitions. Such an image can be
+built this way :
+
+.. code-block:: bash
+
+ $ dd if=/dev/zero of=mmc-bootarea.img count=2 bs=1M
+ $ dd if=u-boot-spl.bin of=mmc-bootarea.img conv=notrunc
+ $ dd if=u-boot.bin of=mmc-bootarea.img conv=notrunc count=64 bs=1K
+ $ cat mmc-bootarea.img obmc-phosphor-image.wic > mmc.img
+ $ truncate --size 16GB mmc.img
+
+Boot the machine ``rainier-emmc`` with :
+
+.. code-block:: bash
+
+ $ qemu-system-arm -M rainier-bmc \
+ -drive file=mmc.img,format=raw,if=sd,index=2 \
+ -nographic
+
+The ``boot-emmc`` option can be set or unset, to change the default
+boot mode of machine: SPI or eMMC. This can be useful to boot the
+``ast2600-evb`` machine from an eMMC device (default being SPI) or to
+boot the ``rainier-bmc`` machine from a flash device (default being
+eMMC).
+
+As an example, here is how to to boot the ``rainier-bmc`` machine from
+the flash device with ``boot-emmc=false`` and let the machine use an
+eMMC image :
+
+.. code-block:: bash
+
+ $ qemu-system-arm -M rainier-bmc,boot-emmc=false \
+ -drive file=flash.img,format=raw,if=mtd \
+ -drive file=mmc.img,format=raw,if=sd,index=2 \
+ -nographic
+
+It should be noted that in this case the eMMC device must not have
+boot partitions, otherwise the contents will not be accessible to the
+machine. This limitation is due to the use of the ``-drive``
+interface.
+
+Ideally, one should be able to define the eMMC device and the
+associated backend directly on the command line, such as :
+
+.. code-block:: bash
+
+ -blockdev node-name=emmc0,driver=file,filename=mmc.img \
+ -device emmc,bus=sdhci-bus.2,drive=emmc0,boot-partition-size=1048576,boot-config=8
+
+This is not yet supported (as of QEMU-10.0). Work is needed to
+refactor the sdhci bus model.
+
+Other booting options
+^^^^^^^^^^^^^^^^^^^^^
+
+Other machine options specific to Aspeed machines are :
+
+ * ``bmc-console`` to change the default console device. Most of the
+ machines use the ``UART5`` device for a boot console, which is
+ mapped on ``/dev/ttyS4`` under Linux, but it is not always the
+ case.
+
To change the boot console and use device ``UART3`` (``/dev/ttyS2``
under Linux), use :
@@ -174,8 +242,78 @@ under Linux), use :
-M ast2500-evb,bmc-console=uart3
+Aspeed 2700 family boards (``ast2700-evb``)
+==================================================================
+
+The QEMU Aspeed machines model BMCs of Aspeed evaluation boards.
+They are based on different releases of the Aspeed SoC :
+the AST2700 with quad cores ARM Cortex-A35 64 bits CPUs (1.6GHz).
+
+The SoC comes with RAM, Gigabit ethernet, USB, SD/MMC, USB, SPI, I2C,
+etc.
+
+AST2700 SoC based machines :
+
+- ``ast2700-evb`` Aspeed AST2700 Evaluation board (Cortex-A35)
+- ``ast2700fc`` Aspeed AST2700 Evaluation board (Cortex-A35 + Cortex-M4)
+
+Supported devices
+-----------------
+ * Interrupt Controller
+ * Timer Controller
+ * RTC Controller
+ * I2C Controller
+ * System Control Unit (SCU)
+ * SRAM mapping
+ * X-DMA Controller (basic interface)
+ * Static Memory Controller (SMC or FMC) - Only SPI Flash support
+ * SPI Memory Controller
+ * USB 2.0 Controller
+ * SD/MMC storage controllers
+ * SDRAM controller (dummy interface for basic settings and training)
+ * Watchdog Controller
+ * GPIO Controller (Master only)
+ * UART
+ * Ethernet controllers
+ * Front LEDs (PCA9552 on I2C bus)
+ * LPC Peripheral Controller (a subset of subdevices are supported)
+ * Hash/Crypto Engine (HACE) - Hash support only. TODO: Crypto
+ * ADC
+ * eMMC Boot Controller (dummy)
+ * PECI Controller (minimal)
+ * I3C Controller
+ * Internal Bridge Controller (SLI dummy)
+
+Missing devices
+---------------
+ * PWM and Fan Controller
+ * Slave GPIO Controller
+ * Super I/O Controller
+ * PCI-Express 1 Controller
+ * Graphic Display Controller
+ * MCTP Controller
+ * Mailbox Controller
+ * Virtual UART
+ * eSPI Controller
+
+Boot options
+------------
+
+Images can be downloaded from the ASPEED Forked OpenBMC GitHub release repository :
-Boot the AST2700 machine from the flash image, use an MTD drive :
+ https://github.com/AspeedTech-BMC/openbmc/releases
+
+Booting the ast2700-evb machine
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Boot the AST2700 machine from the flash image.
+
+There are two supported methods for booting the AST2700 machine with a flash image:
+
+Manual boot using ``-device loader``:
+
+It causes all 4 CPU cores to start execution from address ``0x430000000``, which
+corresponds to the BL31 image load address.
.. code-block:: bash
@@ -195,6 +333,89 @@ Boot the AST2700 machine from the flash image, use an MTD drive :
-drive file=${IMGDIR}/image-bmc,format=raw,if=mtd \
-nographic
+Boot using a virtual boot ROM (``-bios``):
+
+If users do not specify the ``-bios option``, QEMU will attempt to load the
+default vbootrom image ``ast27x0_bootrom.bin`` from either the current working
+directory or the ``pc-bios`` directory within the QEMU source tree.
+
+.. code-block:: bash
+
+ $ qemu-system-aarch64 -M ast2700-evb \
+ -drive file=image-bmc,format=raw,if=mtd \
+ -nographic
+
+The ``-bios`` option allows users to specify a custom path for the vbootrom
+image to be loaded during boot. This will load the vbootrom image from the
+specified path in the ${HOME} directory.
+
+.. code-block:: bash
+
+ -bios ${HOME}/ast27x0_bootrom.bin
+
+Booting the ast2700fc machine
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+AST2700 features four Cortex-A35 primary processors and two Cortex-M4 coprocessors.
+**ast2700-evb** machine focuses on emulating the four Cortex-A35 primary processors,
+**ast2700fc** machine extends **ast2700-evb** by adding support for the two Cortex-M4 coprocessors.
+
+Steps to boot the AST2700fc machine:
+
+1. Ensure you have the following AST2700A1 binaries available in a directory
+
+ * u-boot-nodtb.bin
+ * u-boot.dtb
+ * bl31.bin
+ * optee/tee-raw.bin
+ * image-bmc
+ * zephyr-aspeed-ssp.elf (for SSP firmware, CPU 5)
+ * zephyr-aspeed-tsp.elf (for TSP firmware, CPU 6)
+
+2. Execute the following command to start ``ast2700fc`` machine:
+
+.. code-block:: bash
+
+ IMGDIR=ast2700-default
+ UBOOT_SIZE=$(stat --format=%s -L ${IMGDIR}/u-boot-nodtb.bin)
+
+ $ qemu-system-aarch64 -M ast2700fc \
+ -device loader,force-raw=on,addr=0x400000000,file=${IMGDIR}/u-boot-nodtb.bin \
+ -device loader,force-raw=on,addr=$((0x400000000 + ${UBOOT_SIZE})),file=${IMGDIR}/u-boot.dtb \
+ -device loader,force-raw=on,addr=0x430000000,file=${IMGDIR}/bl31.bin \
+ -device loader,force-raw=on,addr=0x430080000,file=${IMGDIR}/optee/tee-raw.bin \
+ -device loader,cpu-num=0,addr=0x430000000 \
+ -device loader,cpu-num=1,addr=0x430000000 \
+ -device loader,cpu-num=2,addr=0x430000000 \
+ -device loader,cpu-num=3,addr=0x430000000 \
+ -drive file=${IMGDIR}/image-bmc,if=mtd,format=raw \
+ -device loader,file=${IMGDIR}/zephyr-aspeed-ssp.elf,cpu-num=4 \
+ -device loader,file=${IMGDIR}/zephyr-aspeed-tsp.elf,cpu-num=5 \
+ -serial pty -serial pty -serial pty \
+ -snapshot \
+ -S -nographic
+
+After launching QEMU, serial devices will be automatically redirected.
+Example output:
+
+.. code-block:: bash
+
+ char device redirected to /dev/pts/55 (label serial0)
+ char device redirected to /dev/pts/56 (label serial1)
+ char device redirected to /dev/pts/57 (label serial2)
+
+- serial0: Console for the four Cortex-A35 primary processors.
+- serial1 and serial2: Consoles for the two Cortex-M4 coprocessors.
+
+Use ``tio`` or another terminal emulator to connect to the consoles:
+
+.. code-block:: bash
+
+ $ tio /dev/pts/55
+ $ tio /dev/pts/56
+ $ tio /dev/pts/57
+
+
Aspeed minibmc family boards (``ast1030-evb``)
==================================================================
@@ -255,51 +476,3 @@ To boot a kernel directly from a Zephyr build tree:
$ qemu-system-arm -M ast1030-evb -nographic \
-kernel zephyr.elf
-
-Facebook Yosemite v3.5 Platform and CraterLake Server (``fby35``)
-==================================================================
-
-Facebook has a series of multi-node compute server designs named
-Yosemite. The most recent version released was
-`Yosemite v3 <https://www.opencompute.org/documents/ocp-yosemite-v3-platform-design-specification-1v16-pdf>`__.
-
-Yosemite v3.5 is an iteration on this design, and is very similar: there's a
-baseboard with a BMC, and 4 server slots. The new server board design termed
-"CraterLake" includes a Bridge IC (BIC), with room for expansion boards to
-include various compute accelerators (video, inferencing, etc). At the moment,
-only the first server slot's BIC is included.
-
-Yosemite v3.5 is itself a sled which fits into a 40U chassis, and 3 sleds
-can be fit into a chassis. See `here <https://www.opencompute.org/products/423/wiwynn-yosemite-v3-server>`__
-for an example.
-
-In this generation, the BMC is an AST2600 and each BIC is an AST1030. The BMC
-runs `OpenBMC <https://github.com/facebook/openbmc>`__, and the BIC runs
-`OpenBIC <https://github.com/facebook/openbic>`__.
-
-Firmware images can be retrieved from the Github releases or built from the
-source code, see the README's for instructions on that. This image uses the
-"fby35" machine recipe from OpenBMC, and the "yv35-cl" target from OpenBIC.
-Some reference images can also be found here:
-
-.. code-block:: bash
-
- $ wget https://github.com/facebook/openbmc/releases/download/openbmc-e2294ff5d31d/fby35.mtd
- $ wget https://github.com/peterdelevoryas/OpenBIC/releases/download/oby35-cl-2022.13.01/Y35BCL.elf
-
-Since this machine has multiple SoC's, each with their own serial console, the
-recommended way to run it is to allocate a pseudoterminal for each serial
-console and let the monitor use stdio. Also, starting in a paused state is
-useful because it allows you to attach to the pseudoterminals before the boot
-process starts.
-
-.. code-block:: bash
-
- $ qemu-system-arm -machine fby35 \
- -drive file=fby35.mtd,format=raw,if=mtd \
- -device loader,file=Y35BCL.elf,addr=0,cpu-num=2 \
- -serial pty -serial pty -serial mon:stdio \
- -display none -S
- $ screen /dev/tty0 # In a separate TMUX pane, terminal window, etc.
- $ screen /dev/tty1
- $ (qemu) c # Start the boot process once screen is setup.
diff --git a/docs/system/arm/bananapi_m2u.rst b/docs/system/arm/bananapi_m2u.rst
index 587b488..03cc561 100644
--- a/docs/system/arm/bananapi_m2u.rst
+++ b/docs/system/arm/bananapi_m2u.rst
@@ -125,16 +125,15 @@ And then boot it.
$ qemu-system-arm -M bpim2u -nographic -sd sd.img
-Banana Pi M2U integration tests
-"""""""""""""""""""""""""""""""
+Banana Pi M2U functional tests
+""""""""""""""""""""""""""""""
-The Banana Pi M2U machine has several integration tests included.
+The Banana Pi M2U machine has several functional tests included.
To run the whole set of tests, build QEMU from source and simply
provide the following command:
.. code-block:: bash
$ cd qemu-build-dir
- $ AVOCADO_ALLOW_LARGE_STORAGE=yes tests/venv/bin/avocado \
- --verbose --show=app,console run -t machine:bpim2u \
- ../tests/avocado/boot_linux_console.py
+ $ QEMU_TEST_ALLOW_LARGE_STORAGE=1 \
+ pyvenv/bin/meson test --suite thorough func-arm-arm_bpim2u
diff --git a/docs/system/arm/cpu-features.rst b/docs/system/arm/cpu-features.rst
index a5fb929..37d5dfd 100644
--- a/docs/system/arm/cpu-features.rst
+++ b/docs/system/arm/cpu-features.rst
@@ -219,8 +219,11 @@ Below is the list of TCG VCPU features and their descriptions.
``pauth-qarma3``
When ``pauth`` is enabled, select the architected QARMA3 algorithm.
-Without either ``pauth-impdef`` or ``pauth-qarma3`` enabled,
-the architected QARMA5 algorithm is used. The architected QARMA5
+``pauth-qarma5``
+ When ``pauth`` is enabled, select the architected QARMA5 algorithm.
+
+Without ``pauth-impdef``, ``pauth-qarma3`` or ``pauth-qarma5`` enabled,
+the QEMU impdef algorithm is used. The architected QARMA5
and QARMA3 algorithms have good cryptographic properties, but can
be quite slow to emulate. The impdef algorithm used by QEMU is
non-cryptographic but significantly faster.
diff --git a/docs/system/arm/cubieboard.rst b/docs/system/arm/cubieboard.rst
index 58c4a2d..90d24c7 100644
--- a/docs/system/arm/cubieboard.rst
+++ b/docs/system/arm/cubieboard.rst
@@ -15,4 +15,5 @@ Emulated devices:
- USB controller
- SATA controller
- TWI (I2C) controller
+- SPI controller
- Watchdog timer
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
index 3ab6e72..78c2fd2 100644
--- a/docs/system/arm/emulation.rst
+++ b/docs/system/arm/emulation.rst
@@ -3,8 +3,8 @@
A-profile CPU architecture support
==================================
-QEMU's TCG emulation includes support for the Armv5, Armv6, Armv7 and
-Armv8 versions of the A-profile architecture. It also has support for
+QEMU's TCG emulation includes support for the Armv5, Armv6, Armv7,
+Armv8 and Armv9 versions of the A-profile architecture. It also has support for
the following architecture extensions:
- FEAT_AA32BF16 (AArch32 BFloat16 instructions)
@@ -20,12 +20,14 @@ the following architecture extensions:
- FEAT_AA64EL3 (Support for AArch64 at EL3)
- FEAT_AdvSIMD (Advanced SIMD Extension)
- FEAT_AES (AESD and AESE instructions)
+- FEAT_AFP (Alternate floating-point behavior)
- FEAT_Armv9_Crypto (Armv9 Cryptographic Extension)
- FEAT_ASID16 (16 bit ASID)
- FEAT_BBM at level 2 (Translation table break-before-make levels)
- FEAT_BF16 (AArch64 BFloat16 instructions)
- FEAT_BTI (Branch Target Identification)
- FEAT_CCIDX (Extended cache index)
+- FEAT_CMOW (Control for cache maintenance permission)
- FEAT_CRC32 (CRC32 instructions)
- FEAT_Crypto (Cryptographic Extension)
- FEAT_CSV2 (Cache speculation variant 2)
@@ -36,6 +38,7 @@ the following architecture extensions:
- FEAT_CSV3 (Cache speculation variant 3)
- FEAT_DGH (Data gathering hint)
- FEAT_DIT (Data Independent Timing instructions)
+- FEAT_DoubleLock (Double Lock)
- FEAT_DPB (DC CVAP instruction)
- FEAT_DPB2 (DC CVADP instruction)
- FEAT_Debugv8p1 (Debug with VHE)
@@ -45,6 +48,7 @@ the following architecture extensions:
- FEAT_DotProd (Advanced SIMD dot product instructions)
- FEAT_DoubleFault (Double Fault Extension)
- FEAT_E0PD (Preventing EL0 access to halves of address maps)
+- FEAT_EBF16 (AArch64 Extended BFloat16 instructions)
- FEAT_ECV (Enhanced Counter Virtualization)
- FEAT_EL0 (Support for execution at EL0)
- FEAT_EL1 (Support for execution at EL1)
@@ -86,12 +90,13 @@ the following architecture extensions:
- FEAT_LSE2 (Large System Extensions v2)
- FEAT_LVA (Large Virtual Address space)
- FEAT_MixedEnd (Mixed-endian support)
-- FEAT_MixdEndEL0 (Mixed-endian support at EL0)
+- FEAT_MixedEndEL0 (Mixed-endian support at EL0)
- FEAT_MOPS (Standardization of memory operations)
- FEAT_MTE (Memory Tagging Extension)
- FEAT_MTE2 (Memory Tagging Extension)
- FEAT_MTE3 (MTE Asymmetric Fault Handling)
- FEAT_MTE_ASYM_FAULT (Memory tagging asymmetric faults)
+- FEAT_MTE_ASYNC (Asynchronous reporting of Tag Check Fault)
- FEAT_NMI (Non-maskable Interrupt)
- FEAT_NV (Nested Virtualization)
- FEAT_NV2 (Enhanced nested virtualization support)
@@ -113,6 +118,7 @@ the following architecture extensions:
- FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions)
- FEAT_RME (Realm Management Extension) (NB: support status in QEMU is experimental)
- FEAT_RNG (Random number generator)
+- FEAT_RPRES (Increased precision of FRECPE and FRSQRTE)
- FEAT_S2FWB (Stage 2 forced Write-Back)
- FEAT_SB (Speculation Barrier)
- FEAT_SEL2 (Secure EL2)
@@ -135,6 +141,7 @@ the following architecture extensions:
- FEAT_SVE2 (Scalable Vector Extension version 2)
- FEAT_SPECRES (Speculation restriction instructions)
- FEAT_SSBS (Speculative Store Bypass Safe)
+- FEAT_SSBS2 (MRS and MSR instructions for SSBS version 2)
- FEAT_TGran16K (Support for 16KB memory translation granule size at stage 1)
- FEAT_TGran4K (Support for 4KB memory translation granule size at stage 1)
- FEAT_TGran64K (Support for 64KB memory translation granule size at stage 1)
@@ -149,9 +156,10 @@ the following architecture extensions:
- FEAT_VMID16 (16-bit VMID)
- FEAT_WFxT (WFE and WFI instructions with timeout)
- FEAT_XNX (Translation table stage 2 Unprivileged Execute-never)
+- FEAT_XS (XS attribute)
For information on the specifics of these extensions, please refer
-to the `Armv8-A Arm Architecture Reference Manual
+to the `Arm Architecture Reference Manual for A-profile architecture
<https://developer.arm.com/documentation/ddi0487/latest>`_.
When a specific named CPU is being emulated, only those features which
diff --git a/docs/system/arm/exynos.rst b/docs/system/arm/exynos.rst
new file mode 100644
index 0000000..86894bc
--- /dev/null
+++ b/docs/system/arm/exynos.rst
@@ -0,0 +1,9 @@
+Exynos4 boards (``nuri``, ``smdkc210``)
+=======================================
+
+These are machines which use the Samsung Exynos4210 SoC, which has Cortex-A9 CPUs.
+
+``nuri`` models the Samsung NURI board.
+
+``smdkc210`` models the Samsung SMDKC210 board.
+
diff --git a/docs/system/arm/fby35.rst b/docs/system/arm/fby35.rst
new file mode 100644
index 0000000..e19274e
--- /dev/null
+++ b/docs/system/arm/fby35.rst
@@ -0,0 +1,52 @@
+Facebook Yosemite v3.5 Platform and CraterLake Server (``fby35``)
+==================================================================
+
+Facebook has a series of multi-node compute server designs named
+Yosemite. The most recent version released was
+`Yosemite v3 <https://www.opencompute.org/documents/ocp-yosemite-v3-platform-design-specification-1v16-pdf>`__.
+
+Yosemite v3.5 is an iteration on this design, and is very similar: there's a
+baseboard with a BMC, and 4 server slots. The new server board design termed
+"CraterLake" includes a Bridge IC (BIC), with room for expansion boards to
+include various compute accelerators (video, inferencing, etc). At the moment,
+only the first server slot's BIC is included.
+
+Yosemite v3.5 is itself a sled which fits into a 40U chassis, and 3 sleds
+can be fit into a chassis. See `here <https://www.opencompute.org/products-chiplets/237/wiwynn-yosemite-v3-server>`__
+for an example.
+
+In this generation, the BMC is an AST2600 and each BIC is an AST1030. The BMC
+runs `OpenBMC <https://github.com/facebook/openbmc>`__, and the BIC runs
+`OpenBIC <https://github.com/facebook/openbic>`__.
+
+Firmware images can be retrieved from the Github releases or built from the
+source code, see the README's for instructions on that. This image uses the
+"fby35" machine recipe from OpenBMC, and the "yv35-cl" target from OpenBIC.
+Some reference images can also be found here:
+
+.. code-block:: bash
+
+ $ wget https://github.com/facebook/openbmc/releases/download/openbmc-e2294ff5d31d/fby35.mtd
+ $ wget https://github.com/peterdelevoryas/OpenBIC/releases/download/oby35-cl-2022.13.01/Y35BCL.elf
+
+Since this machine has multiple SoC's, each with their own serial console, the
+recommended way to run it is to allocate a pseudoterminal for each serial
+console and let the monitor use stdio. Also, starting in a paused state is
+useful because it allows you to attach to the pseudoterminals before the boot
+process starts.
+
+.. code-block:: bash
+
+ $ qemu-system-arm -machine fby35 \
+ -drive file=fby35.mtd,format=raw,if=mtd \
+ -device loader,file=Y35BCL.elf,addr=0,cpu-num=2 \
+ -serial pty -serial pty -serial mon:stdio \
+ -display none -S
+ $ screen /dev/tty0 # In a separate TMUX pane, terminal window, etc.
+ $ screen /dev/tty1
+ $ (qemu) c # Start the boot process once screen is setup.
+
+This machine model supports emulation of the boot from the CE0 flash device by
+setting option ``execute-in-place``. When using this option, the CPU fetches
+instructions to execute by reading CE0 and not from a preloaded ROM
+initialized at machine init time. As a result, execution will be slower.
diff --git a/docs/system/arm/gumstix.rst b/docs/system/arm/gumstix.rst
deleted file mode 100644
index cb37313..0000000
--- a/docs/system/arm/gumstix.rst
+++ /dev/null
@@ -1,21 +0,0 @@
-Gumstix Connex and Verdex (``connex``, ``verdex``)
-==================================================
-
-These machines model the Gumstix Connex and Verdex boards.
-The Connex has a PXA255 CPU and the Verdex has a PXA270.
-
-Implemented devices:
-
- * NOR flash
- * SMC91C111 ethernet
- * Interrupt controller
- * DMA
- * Timer
- * GPIO
- * MMC/SD card
- * Fast infra-red communications port (FIR)
- * LCD controller
- * Synchronous serial ports (SPI)
- * PCMCIA interface
- * I2C
- * I2S
diff --git a/docs/system/arm/imx8mp-evk.rst b/docs/system/arm/imx8mp-evk.rst
new file mode 100644
index 0000000..b2f7d29
--- /dev/null
+++ b/docs/system/arm/imx8mp-evk.rst
@@ -0,0 +1,62 @@
+NXP i.MX 8M Plus Evaluation Kit (``imx8mp-evk``)
+================================================
+
+The ``imx8mp-evk`` machine models the i.MX 8M Plus Evaluation Kit, based on an
+i.MX 8M Plus SoC.
+
+Supported devices
+-----------------
+
+The ``imx8mp-evk`` machine implements the following devices:
+
+ * Up to 4 Cortex-A53 cores
+ * Generic Interrupt Controller (GICv3)
+ * 4 UARTs
+ * 3 USDHC Storage Controllers
+ * 1 Designware PCI Express Controller
+ * 1 Ethernet Controller
+ * 2 Designware USB 3 Controllers
+ * 5 GPIO Controllers
+ * 6 I2C Controllers
+ * 3 SPI Controllers
+ * 3 Watchdogs
+ * 6 General Purpose Timers
+ * Secure Non-Volatile Storage (SNVS) including an RTC
+ * Clock Tree
+
+Boot options
+------------
+
+The ``imx8mp-evk`` machine can start a Linux kernel directly using the standard
+``-kernel`` functionality.
+
+Direct Linux Kernel Boot
+''''''''''''''''''''''''
+
+Probably the easiest way to get started with a whole Linux system on the machine
+is to generate an image with Buildroot. Version 2024.11.1 is tested at the time
+of writing and involves two steps. First run the following commands in the
+toplevel directory of the Buildroot source tree:
+
+.. code-block:: bash
+
+ $ make freescale_imx8mpevk_defconfig
+ $ make
+
+Once finished successfully there is an ``output/image`` subfolder. Navigate into
+it and resize the SD card image to a power of two:
+
+.. code-block:: bash
+
+ $ qemu-img resize sdcard.img 256M
+
+Now that everything is prepared the machine can be started as follows:
+
+.. code-block:: bash
+
+ $ qemu-system-aarch64 -M imx8mp-evk -smp 4 -m 3G \
+ -display none -serial null -serial stdio \
+ -kernel Image \
+ -dtb imx8mp-evk.dtb \
+ -append "root=/dev/mmcblk2p2" \
+ -drive file=sdcard.img,if=sd,bus=2,format=raw,id=mmcblk2
diff --git a/docs/system/arm/mainstone.rst b/docs/system/arm/mainstone.rst
deleted file mode 100644
index 05310f4..0000000
--- a/docs/system/arm/mainstone.rst
+++ /dev/null
@@ -1,25 +0,0 @@
-Intel Mainstone II board (``mainstone``)
-========================================
-
-The ``mainstone`` board emulates the Intel Mainstone II development
-board, which uses a PXA270 CPU.
-
-Emulated devices:
-
-- Flash memory
-- Keypad
-- MMC controller
-- 91C111 ethernet
-- PIC
-- Timer
-- DMA
-- GPIO
-- FIR
-- Serial
-- LCD controller
-- SSP
-- USB controller
-- RTC
-- PCMCIA
-- I2C
-- I2S
diff --git a/docs/system/arm/mcimx6ul-evk.rst b/docs/system/arm/mcimx6ul-evk.rst
new file mode 100644
index 0000000..8871138
--- /dev/null
+++ b/docs/system/arm/mcimx6ul-evk.rst
@@ -0,0 +1,5 @@
+NXP MCIMX6UL-EVK (``mcimx6ul-evk``)
+===================================
+
+The ``mcimx6ul-evk`` machine models the NXP i.MX6UltraLite Evaluation Kit
+MCIMX6UL-EVK development board. It has a single Cortex-A7 CPU.
diff --git a/docs/system/arm/mcimx7d-sabre.rst b/docs/system/arm/mcimx7d-sabre.rst
new file mode 100644
index 0000000..c5d35af
--- /dev/null
+++ b/docs/system/arm/mcimx7d-sabre.rst
@@ -0,0 +1,5 @@
+NXP MCIMX7D Sabre (``mcimx7d-sabre``)
+=====================================
+
+The ``mcimx7d-sabre`` machine models the NXP SABRE Board MCIMX7SABRE,
+based an an i.MX7Dual SoC.
diff --git a/docs/system/arm/nseries.rst b/docs/system/arm/nseries.rst
deleted file mode 100644
index cd9edf5..0000000
--- a/docs/system/arm/nseries.rst
+++ /dev/null
@@ -1,33 +0,0 @@
-Nokia N800 and N810 tablets (``n800``, ``n810``)
-================================================
-
-Nokia N800 and N810 internet tablets (known also as RX-34 and RX-44 /
-48) emulation supports the following elements:
-
-- Texas Instruments OMAP2420 System-on-chip (ARM1136 core)
-
-- RAM and non-volatile OneNAND Flash memories
-
-- Display connected to EPSON remote framebuffer chip and OMAP on-chip
- display controller and a LS041y3 MIPI DBI-C controller
-
-- TI TSC2301 (in N800) and TI TSC2005 (in N810) touchscreen
- controllers driven through SPI bus
-
-- National Semiconductor LM8323-controlled qwerty keyboard driven
- through |I2C| bus
-
-- Secure Digital card connected to OMAP MMC/SD host
-
-- Three OMAP on-chip UARTs and on-chip STI debugging console
-
-- Mentor Graphics \"Inventra\" dual-role USB controller embedded in a
- TI TUSB6010 chip - only USB host mode is supported
-
-- TI TMP105 temperature sensor driven through |I2C| bus
-
-- TI TWL92230C power management companion with an RTC on
- |I2C| bus
-
-- Nokia RETU and TAHVO multi-purpose chips with an RTC, connected
- through CBUS
diff --git a/docs/system/arm/nuvoton.rst b/docs/system/arm/nuvoton.rst
index 0424cae..e4827fb 100644
--- a/docs/system/arm/nuvoton.rst
+++ b/docs/system/arm/nuvoton.rst
@@ -1,12 +1,13 @@
-Nuvoton iBMC boards (``*-bmc``, ``npcm750-evb``, ``quanta-gsj``)
-================================================================
+Nuvoton iBMC boards (``kudo-bmc``, ``mori-bmc``, ``npcm750-evb``, ``quanta-gbs-bmc``, ``quanta-gsj``, ``npcm845-evb``)
+======================================================================================================================
-The `Nuvoton iBMC`_ chips (NPCM7xx) are a family of ARM-based SoCs that are
+The `Nuvoton iBMC`_ chips are a family of Arm-based SoCs that are
designed to be used as Baseboard Management Controllers (BMCs) in various
-servers. They all feature one or two ARM Cortex-A9 CPU cores, as well as an
-assortment of peripherals targeted for either Enterprise or Data Center /
-Hyperscale applications. The former is a superset of the latter, so NPCM750 has
-all the peripherals of NPCM730 and more.
+servers. Currently there are two families: NPCM7XX series and
+NPCM8XX series. NPCM7XX series feature one or two Arm Cortex-A9 CPU cores,
+while NPCM8XX feature 4 Arm Cortex-A35 CPU cores. Both series contain a
+different assortment of peripherals targeted for either Enterprise or Data
+Center / Hyperscale applications.
.. _Nuvoton iBMC: https://www.nuvoton.com/products/cloud-computing/ibmc/
@@ -27,6 +28,11 @@ There are also two more SoCs, NPCM710 and NPCM705, which are single-core
variants of NPCM750 and NPCM730, respectively. These are currently not
supported by QEMU.
+The NPCM8xx SoC is the successor of the NPCM7xx SoC. It has 4 Cortex-A35 cores.
+The following machines are based on this chip :
+
+- ``npcm845-evb`` Nuvoton NPCM845 Evaluation board
+
Supported devices
-----------------
@@ -62,6 +68,8 @@ Missing devices
* System Wake-up Control (SWC)
* Shared memory (SHM)
* eSPI slave interface
+ * Block-transfer interface (8XX only)
+ * Virtual UART (8XX only)
* Ethernet controller (GMAC)
* USB device (USBD)
@@ -76,6 +84,11 @@ Missing devices
* Video capture
* Encoding compression engine
* Security features
+ * I3C buses (8XX only)
+ * Temperature sensor interface (8XX only)
+ * Virtual UART (8XX only)
+ * Flash monitor (8XX only)
+ * JTAG master (8XX only)
Boot options
------------
diff --git a/docs/system/arm/orangepi.rst b/docs/system/arm/orangepi.rst
index 9afa542..d81f6c3 100644
--- a/docs/system/arm/orangepi.rst
+++ b/docs/system/arm/orangepi.rst
@@ -119,7 +119,7 @@ Orange Pi PC images
Note that the mainline kernel does not have a root filesystem. You may provide it
with an official Orange Pi PC image from the official website:
- http://www.orangepi.org/downloadresources/
+ http://www.orangepi.org/html/serviceAndSupport/index.html
Another possibility is to run an Armbian image for Orange Pi PC which
can be downloaded from:
@@ -213,7 +213,7 @@ including the Orange Pi PC. NetBSD 9.0 is known to work best for the Orange Pi P
board and provides a fully working system with serial console, networking and storage.
For the Orange Pi PC machine, get the 'evbarm-earmv7hf' based image from:
- https://cdn.netbsd.org/pub/NetBSD/NetBSD-9.0/evbarm-earmv7hf/binary/gzimg/armv7.img.gz
+ https://archive.netbsd.org/pub/NetBSD-archive/NetBSD-9.0/evbarm-earmv7hf/binary/gzimg/armv7.img.gz
The image requires manually installing U-Boot in the image. Build U-Boot with
the orangepi_pc_defconfig configuration as described in the previous section.
@@ -252,14 +252,14 @@ and set the following environment variables before booting:
Optionally you may save the environment variables to SD card with 'saveenv'.
To continue booting simply give the 'boot' command and NetBSD boots.
-Orange Pi PC integration tests
-""""""""""""""""""""""""""""""
+Orange Pi PC functional tests
+"""""""""""""""""""""""""""""
-The Orange Pi PC machine has several integration tests included.
+The Orange Pi PC machine has several functional tests included.
To run the whole set of tests, build QEMU from source and simply
-provide the following command:
+provide the following command from the build directory:
.. code-block:: bash
- $ AVOCADO_ALLOW_LARGE_STORAGE=yes avocado --show=app,console run \
- -t machine:orangepi-pc tests/avocado/boot_linux_console.py
+ $ QEMU_TEST_ALLOW_LARGE_STORAGE=1 \
+ pyvenv/bin/meson test --suite thorough func-arm-arm_orangepi
diff --git a/docs/system/arm/palm.rst b/docs/system/arm/palm.rst
deleted file mode 100644
index 61bc8d3..0000000
--- a/docs/system/arm/palm.rst
+++ /dev/null
@@ -1,23 +0,0 @@
-Palm Tungsten|E PDA (``cheetah``)
-=================================
-
-The Palm Tungsten|E PDA (codename \"Cheetah\") emulation includes the
-following elements:
-
-- Texas Instruments OMAP310 System-on-chip (ARM925T core)
-
-- ROM and RAM memories (ROM firmware image can be loaded with
- -option-rom)
-
-- On-chip LCD controller
-
-- On-chip Real Time Clock
-
-- TI TSC2102i touchscreen controller / analog-digital converter /
- Audio CODEC, connected through MicroWire and |I2S| buses
-
-- GPIO-connected matrix keypad
-
-- Secure Digital card connected to OMAP MMC/SD host
-
-- Three on-chip UARTs
diff --git a/docs/system/arm/stm32.rst b/docs/system/arm/stm32.rst
index 3b640f3..511e3eb 100644
--- a/docs/system/arm/stm32.rst
+++ b/docs/system/arm/stm32.rst
@@ -1,5 +1,5 @@
-STMicroelectronics STM32 boards (``netduino2``, ``netduinoplus2``, ``stm32vldiscovery``)
-========================================================================================
+STMicroelectronics STM32 boards (``netduino2``, ``netduinoplus2``, ``olimex-stm32-h405``, ``stm32vldiscovery``)
+===============================================================================================================
The `STM32`_ chips are a family of 32-bit ARM-based microcontroller by
STMicroelectronics.
@@ -36,6 +36,7 @@ Supported devices
* SPI controller
* System configuration (SYSCFG)
* Timer controller (TIMER)
+ * Reset and Clock Controller (RCC) (STM32F4 only, reset and enable only)
Missing devices
---------------
@@ -53,7 +54,7 @@ Missing devices
* Power supply configuration (PWR)
* Random Number Generator (RNG)
* Real-Time Clock (RTC) controller
- * Reset and Clock Controller (RCC)
+ * Reset and Clock Controller (RCC) (other features than reset and enable)
* Secure Digital Input/Output (SDIO) interface
* USB OTG
* Watchdog controller (IWDG, WWDG)
diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst
index e67e7f0..6a719b9 100644
--- a/docs/system/arm/virt.rst
+++ b/docs/system/arm/virt.rst
@@ -1,3 +1,5 @@
+.. _arm-virt:
+
'virt' generic virtual platform (``virt``)
==========================================
@@ -19,6 +21,10 @@ of the 5.0 release and ``virt-5.0`` of the 5.1 release. Migration
is not guaranteed to work between different QEMU releases for
the non-versioned ``virt`` machine type.
+VM migration is not guaranteed when using ``-cpu max``, as features
+supported may change between QEMU versions. To ensure your VM can be
+migrated, it is recommended to use another cpu model instead.
+
Supported devices
"""""""""""""""""
@@ -64,11 +70,11 @@ Supported guest CPU types:
- ``cortex-a76`` (64-bit)
- ``cortex-a710`` (64-bit)
- ``a64fx`` (64-bit)
-- ``host`` (with KVM only)
+- ``host`` (with KVM and HVF only)
- ``neoverse-n1`` (64-bit)
- ``neoverse-v1`` (64-bit)
- ``neoverse-n2`` (64-bit)
-- ``max`` (same as ``host`` for KVM; best possible emulation with TCG)
+- ``max`` (same as ``host`` for KVM and HVF; best possible emulation with TCG)
Note that the default is ``cortex-a15``, so for an AArch64 guest you must
specify a CPU type.
@@ -138,6 +144,10 @@ highmem-mmio
Set ``on``/``off`` to enable/disable the high memory region for PCI MMIO.
The default is ``on``.
+highmem-mmio-size
+ Set the high memory region size for PCI MMIO. Must be a power of 2 and
+ greater than or equal to the default size (512G).
+
gic-version
Specify the version of the Generic Interrupt Controller (GIC) to provide.
Valid values are:
@@ -167,10 +177,18 @@ iommu
``smmuv3``
Create an SMMUv3
+default-bus-bypass-iommu
+ Set ``on``/``off`` to enable/disable `bypass_iommu
+ <https://gitlab.com/qemu-project/qemu/-/blob/master/docs/bypass-iommu.txt>`_
+ for default root bus.
+
ras
Set ``on``/``off`` to enable/disable reporting host memory errors to a guest
using ACPI and guest external abort exceptions. The default is off.
+acpi
+ Set ``on``/``off``/``auto`` to enable/disable ACPI.
+
dtb-randomness
Set ``on``/``off`` to pass random seeds via the guest DTB
rng-seed and kaslr-seed nodes (in both "/chosen" and
@@ -184,6 +202,14 @@ dtb-randomness
dtb-kaslr-seed
A deprecated synonym for dtb-randomness.
+x-oem-id
+ Set string (up to 6 bytes) to override the default value of field OEMID in ACPI
+ table header.
+
+x-oem-table-id
+ Set string (up to 8 bytes) to override the default value of field OEM Table ID
+ in ACPI table header.
+
Linux guest kernel configuration
""""""""""""""""""""""""""""""""
diff --git a/docs/system/arm/vmapple.rst b/docs/system/arm/vmapple.rst
new file mode 100644
index 0000000..35c329e
--- /dev/null
+++ b/docs/system/arm/vmapple.rst
@@ -0,0 +1,65 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+VMApple machine emulation
+========================================================================================
+
+VMApple is the device model that the macOS built-in hypervisor called "Virtualization.framework"
+exposes to Apple Silicon macOS guests. The "vmapple" machine model in QEMU implements the same
+device model, but does not use any code from Virtualization.Framework.
+
+Prerequisites
+-------------
+
+To run the vmapple machine model, you need to
+
+ * Run on Apple Silicon
+ * Run on macOS 12.0 or above
+ * Have an already installed copy of a Virtualization.Framework macOS 12 virtual
+ machine. Note that newer versions than 12.x are currently NOT supported on
+ the guest side. I will assume that you installed it using the
+ `macosvm <https://github.com/s-u/macosvm>`__ CLI.
+
+First, we need to extract the UUID from the virtual machine that you installed. You can do this
+by running the shell script in contrib/vmapple/uuid.sh on the macosvm.json file.
+
+.. code-block:: bash
+ :caption: uuid.sh script to extract the UUID from a macosvm.json file
+
+ $ contrib/vmapple/uuid.sh "path/to/macosvm.json"
+
+Now we also need to trim the aux partition. It contains metadata that we can just discard:
+
+.. code-block:: bash
+ :caption: Command to trim the aux file
+
+ $ dd if="aux.img" of="aux.img.trimmed" bs=$(( 0x4000 )) skip=1
+
+How to run
+----------
+
+Then, we can launch QEMU with the Virtualization.Framework pre-boot environment and the readily
+installed target disk images. I recommend to port forward the VM's ssh and vnc ports to the host
+to get better interactive access into the target system:
+
+.. code-block:: bash
+ :caption: Example execution command line
+
+ $ UUID="$(contrib/vmapple/uuid.sh 'macosvm.json')"
+ $ AVPBOOTER="/System/Library/Frameworks/Virtualization.framework/Resources/AVPBooter.vmapple2.bin"
+ $ AUX="aux.img.trimmed"
+ $ DISK="disk.img"
+ $ qemu-system-aarch64 \
+ -serial mon:stdio \
+ -m 4G \
+ -accel hvf \
+ -M vmapple,uuid="$UUID" \
+ -bios "$AVPBOOTER" \
+ -drive file="$AUX",if=pflash,format=raw \
+ -drive file="$DISK",if=pflash,format=raw \
+ -drive file="$AUX",if=none,id=aux,format=raw \
+ -drive file="$DISK",if=none,id=root,format=raw \
+ -device vmapple-virtio-blk-pci,variant=aux,drive=aux \
+ -device vmapple-virtio-blk-pci,variant=root,drive=root \
+ -netdev user,id=net0,ipv6=off,hostfwd=tcp::2222-:22,hostfwd=tcp::5901-:5900 \
+ -device virtio-net-pci,netdev=net0
+
diff --git a/docs/system/arm/xlnx-versal-virt.rst b/docs/system/arm/xlnx-versal-virt.rst
index 0bafc76..c5f35f2 100644
--- a/docs/system/arm/xlnx-versal-virt.rst
+++ b/docs/system/arm/xlnx-versal-virt.rst
@@ -178,6 +178,9 @@ Run the following at the U-Boot prompt:
fdt set /chosen/dom0 reg <0x00000000 0x40000000 0x0 0x03100000>
booti 30000000 - 20000000
+It's possible to change the OSPI flash model emulated by using the machine model
+option ``ospi-flash``.
+
BBRAM File Backend
""""""""""""""""""
BBRAM can have an optional file backend, which must be a seekable
diff --git a/docs/system/arm/xlnx-zcu102.rst b/docs/system/arm/xlnx-zcu102.rst
new file mode 100644
index 0000000..534cd1d
--- /dev/null
+++ b/docs/system/arm/xlnx-zcu102.rst
@@ -0,0 +1,19 @@
+Xilinx ZynqMP ZCU102 (``xlnx-zcu102``)
+======================================
+
+The ``xlnx-zcu102`` board models the Xilinx ZynqMP ZCU102 board.
+This board has 4 Cortex-A53 CPUs and 2 Cortex-R5F CPUs.
+
+Machine-specific options
+""""""""""""""""""""""""
+
+The following machine-specific options are supported:
+
+secure
+ Set ``on``/``off`` to enable/disable emulating a guest CPU which implements the
+ Arm Security Extensions (TrustZone). The default is ``off``.
+
+virtualization
+ Set ``on``/``off`` to enable/disable emulating a guest CPU which implements the
+ Arm Virtualization Extensions. The default is ``off``.
+
diff --git a/docs/system/arm/xscale.rst b/docs/system/arm/xscale.rst
deleted file mode 100644
index e239136..0000000
--- a/docs/system/arm/xscale.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-Sharp XScale-based PDA models (``akita``, ``borzoi``, ``spitz``, ``terrier``, ``tosa``)
-=======================================================================================
-
-The Sharp Zaurus are PDAs based on XScale, able to run Linux ('SL series').
-
-The SL-6000 (\"Tosa\"), released in 2005, uses a PXA255 System-on-chip.
-
-The SL-C3000 (\"Spitz\"), SL-C1000 (\"Akita\"), SL-C3100 (\"Borzoi\") and
-SL-C3200 (\"Terrier\") use a PXA270.
-
-The clamshell PDA models emulation includes the following peripherals:
-
-- Intel PXA255/PXA270 System-on-chip (ARMv5TE core)
-
-- NAND Flash memory - not in \"Tosa\"
-
-- IBM/Hitachi DSCM microdrive in a PXA PCMCIA slot - not in \"Akita\"
-
-- On-chip OHCI USB controller - not in \"Tosa\"
-
-- On-chip LCD controller
-
-- On-chip Real Time Clock
-
-- TI ADS7846 touchscreen controller on SSP bus
-
-- Maxim MAX1111 analog-digital converter on |I2C| bus
-
-- GPIO-connected keyboard controller and LEDs
-
-- Secure Digital card connected to PXA MMC/SD host
-
-- Three on-chip UARTs
-
-- WM8750 audio CODEC on |I2C| and |I2S| buses
diff --git a/docs/system/bootindex.rst b/docs/system/bootindex.rst
index 8b057f8..5e1b33e 100644
--- a/docs/system/bootindex.rst
+++ b/docs/system/bootindex.rst
@@ -49,10 +49,11 @@ Limitations
-----------
Some firmware has limitations on which devices can be considered for
-booting. For instance, the PC BIOS boot specification allows only one
-disk to be bootable. If boot from disk fails for some reason, the BIOS
+booting. For instance, the x86 PC BIOS boot specification allows only one
+disk to be bootable. If boot from disk fails for some reason, the x86 BIOS
won't retry booting from other disk. It can still try to boot from
-floppy or net, though.
+floppy or net, though. In the case of s390x BIOS, the BIOS will try up to
+8 total devices, any number of which may be disks or virtio-net devices.
Sometimes, firmware cannot map the device path QEMU wants firmware to
boot from to a boot method. It doesn't happen for devices the firmware
diff --git a/docs/system/confidential-guest-support.rst b/docs/system/confidential-guest-support.rst
index 0c490db..66129fb 100644
--- a/docs/system/confidential-guest-support.rst
+++ b/docs/system/confidential-guest-support.rst
@@ -38,6 +38,7 @@ Supported mechanisms
Currently supported confidential guest mechanisms are:
* AMD Secure Encrypted Virtualization (SEV) (see :doc:`i386/amd-memory-encryption`)
+* Intel Trust Domain Extension (TDX) (see :doc:`i386/tdx`)
* POWER Protected Execution Facility (PEF) (see :ref:`power-papr-protected-execution-facility-pef`)
* s390x Protected Virtualization (PV) (see :doc:`s390x/protvirt`)
diff --git a/docs/system/cpu-hotplug.rst b/docs/system/cpu-hotplug.rst
index 015ce2b..cc50937 100644
--- a/docs/system/cpu-hotplug.rst
+++ b/docs/system/cpu-hotplug.rst
@@ -33,23 +33,23 @@ vCPU hotplug
{
"return": [
{
- "type": "IvyBridge-IBRS-x86_64-cpu",
- "vcpus-count": 1,
"props": {
- "socket-id": 1,
- "core-id": 0,
+ "core-id": 1,
+ "socket-id": 0,
"thread-id": 0
- }
+ },
+ "type": "IvyBridge-IBRS-x86_64-cpu",
+ "vcpus-count": 1
},
{
- "qom-path": "/machine/unattached/device[0]",
- "type": "IvyBridge-IBRS-x86_64-cpu",
- "vcpus-count": 1,
"props": {
- "socket-id": 0,
"core-id": 0,
+ "socket-id": 0,
"thread-id": 0
- }
+ },
+ "qom-path": "/machine/unattached/device[0]",
+ "type": "IvyBridge-IBRS-x86_64-cpu",
+ "vcpus-count": 1
}
]
}
@@ -58,18 +58,18 @@ vCPU hotplug
(4) The ``query-hotpluggable-cpus`` command returns an object for CPUs
that are present (containing a "qom-path" member) or which may be
hot-plugged (no "qom-path" member). From its output in step (3), we
- can see that ``IvyBridge-IBRS-x86_64-cpu`` is present in socket 0,
- while hot-plugging a CPU into socket 1 requires passing the listed
+ can see that ``IvyBridge-IBRS-x86_64-cpu`` is present in socket 0 core 0,
+ while hot-plugging a CPU into socket 0 core 1 requires passing the listed
properties to QMP ``device_add``::
- (QEMU) device_add id=cpu-2 driver=IvyBridge-IBRS-x86_64-cpu socket-id=1 core-id=0 thread-id=0
+ (QEMU) device_add id=cpu-2 driver=IvyBridge-IBRS-x86_64-cpu socket-id=0 core-id=1 thread-id=0
{
"execute": "device_add",
"arguments": {
- "socket-id": 1,
+ "core-id": 1,
"driver": "IvyBridge-IBRS-x86_64-cpu",
"id": "cpu-2",
- "core-id": 0,
+ "socket-id": 0,
"thread-id": 0
}
}
@@ -83,34 +83,32 @@ vCPU hotplug
(QEMU) query-cpus-fast
{
- "execute": "query-cpus-fast",
"arguments": {}
+ "execute": "query-cpus-fast",
}
{
"return": [
{
- "qom-path": "/machine/unattached/device[0]",
- "target": "x86_64",
- "thread-id": 11534,
"cpu-index": 0,
"props": {
- "socket-id": 0,
"core-id": 0,
+ "socket-id": 0,
"thread-id": 0
},
- "arch": "x86"
+ "qom-path": "/machine/unattached/device[0]",
+ "target": "x86_64",
+ "thread-id": 28957
},
{
- "qom-path": "/machine/peripheral/cpu-2",
- "target": "x86_64",
- "thread-id": 12106,
"cpu-index": 1,
"props": {
- "socket-id": 1,
- "core-id": 0,
+ "core-id": 1,
+ "socket-id": 0,
"thread-id": 0
},
- "arch": "x86"
+ "qom-path": "/machine/peripheral/cpu-2",
+ "target": "x86_64",
+ "thread-id": 29095
}
]
}
@@ -123,10 +121,10 @@ From the 'qmp-shell', invoke the QMP ``device_del`` command::
(QEMU) device_del id=cpu-2
{
- "execute": "device_del",
"arguments": {
"id": "cpu-2"
}
+ "execute": "device_del",
}
{
"return": {}
diff --git a/docs/system/cpu-models-x86.rst.inc b/docs/system/cpu-models-x86.rst.inc
index ba27b56..6a770ca 100644
--- a/docs/system/cpu-models-x86.rst.inc
+++ b/docs/system/cpu-models-x86.rst.inc
@@ -71,6 +71,16 @@ mixture of host CPU models between machines, if live migration
compatibility is required, use the newest CPU model that is compatible
across all desired hosts.
+``ClearwaterForest``
+ Intel Xeon Processor (ClearwaterForest, 2025)
+
+``SierraForest``, ``SierraForest-v2``
+ Intel Xeon Processor (SierraForest, 2024), SierraForest-v2 mitigates
+ the GDS and RFDS vulnerabilities with stepping 3.
+
+``GraniteRapids``, ``GraniteRapids-v2``
+ Intel Xeon Processor (GraniteRapids, 2024)
+
``Cascadelake-Server``, ``Cascadelake-Server-noTSX``
Intel Xeon Processor (Cascade Lake, 2019), with "stepping" levels 6
or 7 only. (The Cascade Lake Xeon processor with *stepping 5 is
@@ -181,7 +191,7 @@ features are included if using "Host passthrough" or "Host model".
CVE-2018-12127, [MSBDS] CVE-2018-12126).
This is an MSR (Model-Specific Register) feature rather than a CPUID feature,
- so it will not appear in the Linux ``/proc/cpuinfo`` in the host or
+ therefore it will not appear in the Linux ``/proc/cpuinfo`` in the host or
guest. Instead, the host kernel uses it to populate the MDS
vulnerability file in ``sysfs``.
@@ -189,10 +199,10 @@ features are included if using "Host passthrough" or "Host model".
affected} in the ``/sys/devices/system/cpu/vulnerabilities/mds`` file.
``taa-no``
- Recommended to inform that the guest that the host is ``not``
+ Recommended to inform the guest that the host is ``not``
vulnerable to CVE-2019-11135, TSX Asynchronous Abort (TAA).
- This too is an MSR feature, so it does not show up in the Linux
+ This is also an MSR feature, therefore it does not show up in the Linux
``/proc/cpuinfo`` in the host or guest.
It should only be enabled for VMs if the host reports ``Not affected``
@@ -214,7 +224,7 @@ features are included if using "Host passthrough" or "Host model".
By disabling TSX, KVM-based guests can avoid paying the price of
mitigating TSX-based attacks.
- Note that ``tsx-ctrl`` too is an MSR feature, so it does not show
+ Note that ``tsx-ctrl`` is also an MSR feature, therefore it does not show
up in the Linux ``/proc/cpuinfo`` in the host or guest.
To validate that Intel TSX is indeed disabled for the guest, there are
@@ -223,6 +233,38 @@ features are included if using "Host passthrough" or "Host model".
``/sys/devices/system/cpu/vulnerabilities/tsx_async_abort`` file in
the guest should report ``Mitigation: TSX disabled``.
+``bhi-no``
+ Recommended to inform the guest that the host is ``not``
+ vulnerable to CVE-2022-0001, Branch History Injection (BHI).
+
+ This is also an MSR feature, therefore it does not show up in the Linux
+ ``/proc/cpuinfo`` in the host or guest.
+
+ It should only be enabled for VMs if the host reports
+ ``BHI: Not affected`` in the
+ ``/sys/devices/system/cpu/vulnerabilities/spectre_v2`` file.
+
+``gds-no``
+ Recommended to inform the guest that the host is ``not``
+ vulnerable to CVE-2022-40982, Gather Data Sampling (GDS).
+
+ This is also an MSR feature, therefore it does not show up in the Linux
+ ``/proc/cpuinfo`` in the host or guest.
+
+ It should only be enabled for VMs if the host reports ``Not affected``
+ in the ``/sys/devices/system/cpu/vulnerabilities/gather_data_sampling``
+ file.
+
+``rfds-no``
+ Recommended to inform the guest that the host is ``not``
+ vulnerable to CVE-2023-28746, Register File Data Sampling (RFDS).
+
+ This is also an MSR feature, therefore it does not show up in the Linux
+ ``/proc/cpuinfo`` in the host or guest.
+
+ It should only be enabled for VMs if the host reports ``Not affected``
+ in the ``/sys/devices/system/cpu/vulnerabilities/reg_file_data_sampling``
+ file.
Preferred CPU models for AMD x86 hosts
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/system/device-emulation.rst b/docs/system/device-emulation.rst
index f197774..9113816 100644
--- a/docs/system/device-emulation.rst
+++ b/docs/system/device-emulation.rst
@@ -85,7 +85,9 @@ Emulated Devices
devices/can.rst
devices/ccid.rst
devices/cxl.rst
+ devices/vfio-user.rst
devices/ivshmem.rst
+ devices/ivshmem-flat.rst
devices/keyboard.rst
devices/net.rst
devices/nvme.rst
diff --git a/docs/system/devices/cxl.rst b/docs/system/devices/cxl.rst
index 882b036..e307caf 100644
--- a/docs/system/devices/cxl.rst
+++ b/docs/system/devices/cxl.rst
@@ -308,7 +308,7 @@ A very simple setup with just one directly attached CXL Type 3 Persistent Memory
-object memory-backend-file,id=cxl-lsa1,share=on,mem-path=/tmp/lsa.raw,size=256M \
-device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \
-device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \
- -device cxl-type3,bus=root_port13,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0 \
+ -device cxl-type3,bus=root_port13,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0,sn=0x1 \
-M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G
A very simple setup with just one directly attached CXL Type 3 Volatile Memory device::
@@ -349,13 +349,13 @@ the CXL Type3 device directly attached (no switches).::
-device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \
-device pxb-cxl,bus_nr=222,bus=pcie.0,id=cxl.2 \
-device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \
- -device cxl-type3,bus=root_port13,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0 \
+ -device cxl-type3,bus=root_port13,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0,sn=0x1 \
-device cxl-rp,port=1,bus=cxl.1,id=root_port14,chassis=0,slot=3 \
- -device cxl-type3,bus=root_port14,persistent-memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem1 \
+ -device cxl-type3,bus=root_port14,persistent-memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem1,sn=0x2 \
-device cxl-rp,port=0,bus=cxl.2,id=root_port15,chassis=0,slot=5 \
- -device cxl-type3,bus=root_port15,persistent-memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem2 \
+ -device cxl-type3,bus=root_port15,persistent-memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem2,sn=0x3 \
-device cxl-rp,port=1,bus=cxl.2,id=root_port16,chassis=0,slot=6 \
- -device cxl-type3,bus=root_port16,persistent-memdev=cxl-mem4,lsa=cxl-lsa4,id=cxl-pmem3 \
+ -device cxl-type3,bus=root_port16,persistent-memdev=cxl-mem4,lsa=cxl-lsa4,id=cxl-pmem3,sn=0x4 \
-M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.targets.1=cxl.2,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=8k
An example of 4 devices below a switch suitable for 1, 2 or 4 way interleave::
@@ -375,13 +375,13 @@ An example of 4 devices below a switch suitable for 1, 2 or 4 way interleave::
-device cxl-rp,port=1,bus=cxl.1,id=root_port1,chassis=0,slot=1 \
-device cxl-upstream,bus=root_port0,id=us0 \
-device cxl-downstream,port=0,bus=us0,id=swport0,chassis=0,slot=4 \
- -device cxl-type3,bus=swport0,persistent-memdev=cxl-mem0,lsa=cxl-lsa0,id=cxl-pmem0 \
+ -device cxl-type3,bus=swport0,persistent-memdev=cxl-mem0,lsa=cxl-lsa0,id=cxl-pmem0,sn=0x1 \
-device cxl-downstream,port=1,bus=us0,id=swport1,chassis=0,slot=5 \
- -device cxl-type3,bus=swport1,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem1 \
+ -device cxl-type3,bus=swport1,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem1,sn=0x2 \
-device cxl-downstream,port=2,bus=us0,id=swport2,chassis=0,slot=6 \
- -device cxl-type3,bus=swport2,persistent-memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem2 \
+ -device cxl-type3,bus=swport2,persistent-memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem2,sn=0x3 \
-device cxl-downstream,port=3,bus=us0,id=swport3,chassis=0,slot=7 \
- -device cxl-type3,bus=swport3,persistent-memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3 \
+ -device cxl-type3,bus=swport3,persistent-memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3,sn=0x4 \
-M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=4k
Deprecations
diff --git a/docs/system/devices/igb.rst b/docs/system/devices/igb.rst
index 04e79df..71f31cb 100644
--- a/docs/system/devices/igb.rst
+++ b/docs/system/devices/igb.rst
@@ -57,11 +57,12 @@ directory:
meson test qtest-x86_64/qos-test
ethtool can test register accesses, interrupts, etc. It is automated as an
-Avocado test and can be ran with the following command:
+functional test and can be run from the build directory with the following
+command:
.. code:: shell
- make check-avocado AVOCADO_TESTS=tests/avocado/netdev-ethtool.py
+ pyvenv/bin/meson test --suite thorough func-x86_64-netdev_ethtool
References
==========
diff --git a/docs/system/devices/ivshmem-flat.rst b/docs/system/devices/ivshmem-flat.rst
new file mode 100644
index 0000000..1f97052
--- /dev/null
+++ b/docs/system/devices/ivshmem-flat.rst
@@ -0,0 +1,33 @@
+Inter-VM Shared Memory Flat Device
+----------------------------------
+
+The ivshmem-flat device is meant to be used on machines that lack a PCI bus,
+making them unsuitable for the use of the traditional ivshmem device modeled as
+a PCI device. Machines like those with a Cortex-M MCU are good candidates to use
+the ivshmem-flat device. Also, since the flat version maps the control and
+status registers directly to the memory, it requires a quite tiny "device
+driver" to interact with other VMs, which is useful in some RTOSes, like
+Zephyr, which usually run on constrained resource targets.
+
+Similar to the ivshmem device, the ivshmem-flat device supports both peer
+notification via HW interrupts and Inter-VM shared memory. This allows the
+device to be used together with the traditional ivshmem, enabling communication
+between, for instance, an aarch64 VM (using the traditional ivshmem device and
+running Linux), and an arm VM (using the ivshmem-flat device and running Zephyr
+instead).
+
+The ivshmem-flat device does not support the use of a ``memdev`` option (see
+ivshmem.rst for more details). It relies on the ivshmem server to create and
+distribute the proper shared memory file descriptor and the eventfd(s) to notify
+(interrupt) the peers. Therefore, to use this device, it is always necessary to
+have an ivshmem server up and running for proper device creation.
+
+Although the ivshmem-flat supports both peer notification (interrupts) and
+shared memory, the interrupt mechanism is optional. If no input IRQ is
+specified for the device it is disabled, preventing the VM from notifying or
+being notified by other VMs (a warning will be displayed to the user to inform
+the IRQ mechanism is disabled). The shared memory region is always present.
+
+The MMRs (INTRMASK, INTRSTATUS, IVPOSITION, and DOORBELL registers) offsets at
+the MMR region, and their functions, follow the ivshmem spec, so they work
+exactly as in the ivshmem PCI device (see ./specs/ivshmem-spec.txt).
diff --git a/docs/system/devices/net.rst b/docs/system/devices/net.rst
index 2ab516d..a3efbdc 100644
--- a/docs/system/devices/net.rst
+++ b/docs/system/devices/net.rst
@@ -77,6 +77,106 @@ When using the ``'-netdev user,hostfwd=...'`` option, TCP or UDP
connections can be redirected from the host to the guest. It allows for
example to redirect X11, telnet or SSH connections.
+Using passt as the user mode network stack
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+passt_ can be used as a simple replacement for SLIRP (``-net user``).
+passt doesn't require any capability or privilege. passt has
+better performance than ``-net user``, full IPv6 support and better security
+as it's a daemon that is not executed in QEMU context.
+
+passt can be connected to QEMU either by using a socket
+(``-netdev stream``) or using the vhost-user interface (``-netdev vhost-user``).
+See `passt(1)`_ for more details on passt.
+
+.. _passt: https://passt.top/
+.. _passt(1): https://passt.top/builds/latest/web/passt.1.html
+
+To use socket based passt interface:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Start passt as a daemon::
+
+ passt --socket ~/passt.socket
+
+If ``--socket`` is not provided, passt will print the path of the UNIX domain socket QEMU can connect to (``/tmp/passt_1.socket``, ``/tmp/passt_2.socket``,
+...). Then you can connect your QEMU instance to passt:
+
+.. parsed-literal::
+ |qemu_system| [...OPTIONS...] -device virtio-net-pci,netdev=netdev0 -netdev stream,id=netdev0,server=off,addr.type=unix,addr.path=~/passt.socket
+
+Where ``~/passt.socket`` is the UNIX socket created by passt to
+communicate with QEMU.
+
+To use vhost-based interface:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Start passt with ``--vhost-user``::
+
+ passt --vhost-user --socket ~/passt.socket
+
+Then to connect QEMU:
+
+.. parsed-literal::
+ |qemu_system| [...OPTIONS...] -m $RAMSIZE -chardev socket,id=chr0,path=~/passt.socket -netdev vhost-user,id=netdev0,chardev=chr0 -device virtio-net,netdev=netdev0 -object memory-backend-memfd,id=memfd0,share=on,size=$RAMSIZE -numa node,memdev=memfd0
+
+Where ``$RAMSIZE`` is the memory size of your VM ``-m`` and ``-object memory-backend-memfd,size=`` must match.
+
+Migration of passt:
+^^^^^^^^^^^^^^^^^^^
+
+When passt is connected to QEMU using the vhost-user interface it can
+be migrated with QEMU and the network connections are not interrupted.
+
+As passt runs with no privileges, it relies on passt-repair to save and
+load the TCP connections state, using the TCP_REPAIR socket option.
+The passt-repair helper needs to have the CAP_NET_ADMIN capability, or run as root. If passt-repair is not available, TCP connections will not be preserved.
+
+Example of migration of a guest on the same host
+________________________________________________
+
+Before being able to run passt-repair, the CAP_NET_ADMIN capability must be set
+on the file, run as root::
+
+ setcap cap_net_admin+eip ./passt-repair
+
+Start passt for the source side::
+
+ passt --vhost-user --socket ~/passt_src.socket --repair-path ~/passt-repair_src.socket
+
+Where ``~/passt-repair_src.socket`` is the UNIX socket created by passt to
+communicate with passt-repair. The default value is the ``--socket`` path
+appended with ``.repair``.
+
+Start passt-repair::
+
+ passt-repair ~/passt-repair_src.socket
+
+Start source side QEMU with a monitor to be able to send the migrate command:
+
+.. parsed-literal::
+ |qemu_system| [...OPTIONS...] [...VHOST USER OPTIONS...] -monitor stdio
+
+Start passt for the destination side::
+
+ passt --vhost-user --socket ~/passt_dst.socket --repair-path ~/passt-repair_dst.socket
+
+Start passt-repair::
+
+ passt-repair ~/passt-repair_dst.socket
+
+Start QEMU with the ``-incoming`` parameter:
+
+.. parsed-literal::
+ |qemu_system| [...OPTIONS...] [...VHOST USER OPTIONS...] -incoming tcp:localhost:4444
+
+Then in the source guest monitor the migration can be started::
+
+ (qemu) migrate tcp:localhost:4444
+
+A separate passt-repair instance must be started for every migration. In the case of a failed migration, passt-repair also needs to be restarted before trying
+again.
+
Hubs
~~~~
diff --git a/docs/system/devices/nvme.rst b/docs/system/devices/nvme.rst
index d2b1ca9..6509b35 100644
--- a/docs/system/devices/nvme.rst
+++ b/docs/system/devices/nvme.rst
@@ -53,6 +53,13 @@ parameters.
Vendor ID. Set this to ``on`` to revert to the unallocated Intel ID
previously used.
+``ocp`` (default: ``off``)
+ The Open Compute Project defines the Datacenter NVMe SSD Specification that
+ sits on top of NVMe. It describes additional commands and NVMe behaviors
+ specific for the Datacenter. When this option is ``on`` OCP features such as
+ the SMART / Health information extended log become available in the
+ controller. We emulate version 5 of this log page.
+
Additional Namespaces
---------------------
diff --git a/docs/system/devices/vfio-user.rst b/docs/system/devices/vfio-user.rst
new file mode 100644
index 0000000..b6dcaa5
--- /dev/null
+++ b/docs/system/devices/vfio-user.rst
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+=========
+vfio-user
+=========
+
+QEMU includes a ``vfio-user`` client. The ``vfio-user`` specification allows for
+implementing (PCI) devices in userspace outside of QEMU; it is similar to
+``vhost-user`` in this respect (see :doc:`vhost-user`), but can emulate arbitrary
+PCI devices, not just ``virtio``. Whereas ``vfio`` is handled by the host
+kernel, ``vfio-user``, while similar in implementation, is handled entirely in
+userspace.
+
+For example, SPDK includes a virtual PCI NVMe controller implementation; by
+setting up a ``vfio-user`` UNIX socket between QEMU and SPDK, a VM can send NVMe
+I/O to the SPDK process.
+
+Presuming a suitable ``vfio-user`` server has opened a socket at
+``/tmp/vfio-user.sock``, a device can be configured with for example:
+
+.. code-block:: console
+
+-device '{"driver": "vfio-user-pci","socket": {"path": "/tmp/vfio-user.sock", "type": "unix"}}'
+
+See `libvfio-user <https://github.com/nutanix/libvfio-user/>`_ for further
+information.
diff --git a/docs/system/devices/virtio-gpu.rst b/docs/system/devices/virtio-gpu.rst
index cb73dd7..b7eb0fc 100644
--- a/docs/system/devices/virtio-gpu.rst
+++ b/docs/system/devices/virtio-gpu.rst
@@ -71,6 +71,17 @@ representation back to OpenGL API calls.
.. _Gallium3D: https://www.freedesktop.org/wiki/Software/gallium/
.. _virglrenderer: https://gitlab.freedesktop.org/virgl/virglrenderer/
+Translation of Vulkan API calls is supported since release of `virglrenderer`_
+v1.0.0 using `venus`_ protocol. ``Venus`` virtio-gpu capability set ("capset")
+requires host blob support (``hostmem`` and ``blob`` fields) and should
+be enabled using ``venus`` field. The ``hostmem`` field specifies the size
+of virtio-gpu host memory window. This is typically between 256M and 8G.
+
+.. parsed-literal::
+ -device virtio-gpu-gl,hostmem=8G,blob=true,venus=true
+
+.. _venus: https://gitlab.freedesktop.org/virgl/venus-protocol/
+
virtio-gpu rutabaga
-------------------
diff --git a/docs/system/gdb.rst b/docs/system/gdb.rst
index 4228cb5..d50470b 100644
--- a/docs/system/gdb.rst
+++ b/docs/system/gdb.rst
@@ -20,7 +20,7 @@ connection, use the ``-gdb dev`` option instead of ``-s``. See
.. parsed-literal::
- |qemu_system| -s -S -kernel bzImage -hda rootdisk.img -append "root=/dev/hda"
+ |qemu_system| -s -S -kernel bzImage -drive file=rootdisk.img,format=raw -append "root=/dev/sda"
QEMU will launch but will silently wait for gdb to connect.
diff --git a/docs/system/i386/hyperv.rst b/docs/system/i386/hyperv.rst
index 2505dc4..1c1de77 100644
--- a/docs/system/i386/hyperv.rst
+++ b/docs/system/i386/hyperv.rst
@@ -262,14 +262,19 @@ Supplementary features
``hv-passthrough``
In some cases (e.g. during development) it may make sense to use QEMU in
'pass-through' mode and give Windows guests all enlightenments currently
- supported by KVM. This pass-through mode is enabled by "hv-passthrough" CPU
- flag.
+ supported by KVM.
Note: ``hv-passthrough`` flag only enables enlightenments which are known to QEMU
(have corresponding 'hv-' flag) and copies ``hv-spinlocks`` and ``hv-vendor-id``
values from KVM to QEMU. ``hv-passthrough`` overrides all other 'hv-' settings on
- the command line. Also, enabling this flag effectively prevents migration as the
- list of enabled enlightenments may differ between target and destination hosts.
+ the command line.
+
+ Note: ``hv-passthrough`` does not enable ``hv-syndbg`` which can prevent certain
+ Windows guests from booting when used without proper configuration. If needed,
+ ``hv-syndbg`` can be enabled additionally.
+
+ Note: ``hv-passthrough`` effectively prevents migration as the list of enabled
+ enlightenments may differ between target and destination hosts.
``hv-enforce-cpuid``
By default, KVM allows the guest to use all currently supported Hyper-V
@@ -278,6 +283,36 @@ Supplementary features
feature alters this behavior and only allows the guest to use exposed Hyper-V
enlightenments.
+Recommendations
+---------------
+
+To achieve the best performance of Windows and Hyper-V guests and unless there
+are any specific requirements (e.g. migration to older QEMU/KVM versions,
+emulating specific Hyper-V version, ...), it is recommended to enable all
+currently implemented Hyper-V enlightenments with the following exceptions:
+
+- ``hv-syndbg``, ``hv-passthrough``, ``hv-enforce-cpuid`` should not be enabled
+ in production configurations as these are debugging/development features.
+- ``hv-reset`` can be avoided as modern Hyper-V versions don't expose it.
+- ``hv-evmcs`` can (and should) be enabled on Intel CPUs only. While the feature
+ is only used in nested configurations (Hyper-V, WSL2), enabling it for regular
+ Windows guests should not have any negative effects.
+- ``hv-no-nonarch-coresharing`` must only be enabled if vCPUs are properly pinned
+ so no non-architectural core sharing is possible.
+- ``hv-vendor-id``, ``hv-version-id-build``, ``hv-version-id-major``,
+ ``hv-version-id-minor``, ``hv-version-id-spack``, ``hv-version-id-sbranch``,
+ ``hv-version-id-snumber`` can be left unchanged, guests are not supposed to
+ behave differently when different Hyper-V version is presented to them.
+- ``hv-crash`` must only be enabled if the crash information is consumed via
+ QAPI by higher levels of the virtualization stack. Enabling this feature
+ effectively prevents Windows from creating dumps upon crashes.
+- ``hv-reenlightenment`` can only be used on hardware which supports TSC
+ scaling or when guest migration is not needed.
+- ``hv-spinlocks`` should be set to e.g. 0xfff when host CPUs are overcommited
+ (meaning there are other scheduled tasks or guests) and can be left unchanged
+ from the default value (0xffffffff) otherwise.
+- ``hv-avic``/``hv-apicv`` should not be enabled if the hardware does not
+ support APIC virtualization (Intel APICv, AMD AVIC).
Useful links
------------
diff --git a/docs/system/i386/nitro-enclave.rst b/docs/system/i386/nitro-enclave.rst
new file mode 100644
index 0000000..7317f54
--- /dev/null
+++ b/docs/system/i386/nitro-enclave.rst
@@ -0,0 +1,78 @@
+'nitro-enclave' virtual machine (``nitro-enclave``)
+===================================================
+
+``nitro-enclave`` is a machine type which emulates an *AWS nitro enclave*
+virtual machine. `AWS nitro enclaves`_ is an Amazon EC2 feature that allows
+creating isolated execution environments, called enclaves, from Amazon EC2
+instances which are used for processing highly sensitive data. Enclaves have
+no persistent storage and no external networking. The enclave VMs are based
+on Firecracker microvm with a vhost-vsock device for communication with the
+parent EC2 instance that spawned it and a Nitro Secure Module (NSM) device
+for cryptographic attestation. The parent instance VM always has CID 3 while
+the enclave VM gets a dynamic CID. Enclaves use an EIF (`Enclave Image Format`_)
+file which contains the necessary kernel, cmdline and ramdisk(s) to boot.
+
+In QEMU, ``nitro-enclave`` is a machine type based on ``microvm`` similar to how
+AWS nitro enclaves look like a `Firecracker`_ microvm. This is useful for
+local testing of EIF files using QEMU instead of running real AWS Nitro Enclaves
+which can be difficult for debugging due to its roots in security. The vsock
+device emulation is done using vhost-user-vsock which means another process that
+can do the userspace emulation, like `vhost-device-vsock`_ from rust-vmm crate,
+must be run alongside nitro-enclave for the vsock communication to work.
+
+``libcbor`` and ``gnutls`` are required dependencies for nitro-enclave machine
+support to be added when building QEMU from source.
+
+.. _AWS nitro enclaves: https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html
+.. _Enclave Image Format: https://github.com/aws/aws-nitro-enclaves-image-format
+.. _vhost-device-vsock: https://github.com/rust-vmm/vhost-device/tree/main/vhost-device-vsock
+.. _Firecracker: https://firecracker-microvm.github.io
+
+Using the nitro-enclave machine type
+------------------------------------
+
+Machine-specific options
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+It supports the following machine-specific options:
+
+- nitro-enclave.vsock=string (required) (Id of the chardev from '-chardev' option that vhost-user-vsock device will use)
+- nitro-enclave.id=string (optional) (Set enclave identifier)
+- nitro-enclave.parent-role=string (optional) (Set parent instance IAM role ARN)
+- nitro-enclave.parent-id=string (optional) (Set parent instance identifier)
+
+
+Running a nitro-enclave VM
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+First, run `vhost-device-vsock`__ (or a similar tool that supports vhost-user-vsock).
+The forward-cid option below with value 1 forwards all connections from the enclave
+VM to the host machine and the forward-listen (port numbers separated by '+') is used
+for forwarding connections from the host machine to the enclave VM::
+
+ $ vhost-device-vsock \
+ --vm guest-cid=4,forward-cid=1,forward-listen=9001+9002,socket=/tmp/vhost4.socket
+
+__ https://github.com/rust-vmm/vhost-device/tree/main/vhost-device-vsock#using-the-vsock-backend
+
+Now run the necessary applications on the host machine so that the nitro-enclave VM
+applications' vsock communication works. For example, the nitro-enclave VM's init
+process connects to CID 3 and sends a single byte hello heartbeat (0xB7) to let the
+parent VM know that it booted expecting a heartbeat (0xB7) response. So you must run
+a AF_VSOCK server on the host machine that listens on port 9000 and sends the heartbeat
+after it receives the heartbeat for enclave VM to boot successfully. You should run all
+the applications on the host machine that would typically be running in the parent EC2
+VM for successful communication with the enclave VM.
+
+Then run the nitro-enclave VM using the following command where ``hello.eif`` is
+an EIF file you would use to spawn a real AWS nitro enclave virtual machine::
+
+ $ qemu-system-x86_64 -M nitro-enclave,vsock=c,id=hello-world \
+ -kernel hello-world.eif -nographic -m 4G --enable-kvm -cpu host \
+ -chardev socket,id=c,path=/tmp/vhost4.socket
+
+In this example, the nitro-enclave VM has CID 4. If there are applications that
+connect to the enclave VM, run them on the host machine after enclave VM starts.
+You need to modify the applications to connect to CID 1 (instead of the enclave
+VM's CID) and use the forward-listen (e.g., 9001+9002) option of vhost-device-vsock
+to forward the ports they connect to.
diff --git a/docs/system/i386/tdx.rst b/docs/system/i386/tdx.rst
new file mode 100644
index 0000000..8131750
--- /dev/null
+++ b/docs/system/i386/tdx.rst
@@ -0,0 +1,161 @@
+Intel Trusted Domain eXtension (TDX)
+====================================
+
+Intel Trusted Domain eXtensions (TDX) refers to an Intel technology that extends
+Virtual Machine Extensions (VMX) and Multi-Key Total Memory Encryption (MKTME)
+with a new kind of virtual machine guest called a Trust Domain (TD). A TD runs
+in a CPU mode that is designed to protect the confidentiality of its memory
+contents and its CPU state from any other software, including the hosting
+Virtual Machine Monitor (VMM), unless explicitly shared by the TD itself.
+
+Prerequisites
+-------------
+
+To run TD, the physical machine needs to have TDX module loaded and initialized
+while KVM hypervisor has TDX support and has TDX enabled. If those requirements
+are met, the ``KVM_CAP_VM_TYPES`` will report the support of ``KVM_X86_TDX_VM``.
+
+Trust Domain Virtual Firmware (TDVF)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Trust Domain Virtual Firmware (TDVF) is required to provide TD services to boot
+TD Guest OS. TDVF needs to be copied to guest private memory and measured before
+the TD boots.
+
+KVM vcpu ioctl ``KVM_TDX_INIT_MEM_REGION`` can be used to populate the TDVF
+content into its private memory.
+
+Since TDX doesn't support readonly memslot, TDVF cannot be mapped as pflash
+device and it actually works as RAM. "-bios" option is chosen to load TDVF.
+
+OVMF is the opensource firmware that implements the TDVF support. Thus the
+command line to specify and load TDVF is ``-bios OVMF.fd``
+
+Feature Configuration
+---------------------
+
+Unlike non-TDX VM, the CPU features (enumerated by CPU or MSR) of a TD are not
+under full control of VMM. VMM can only configure part of features of a TD on
+``KVM_TDX_INIT_VM`` command of VM scope ``MEMORY_ENCRYPT_OP`` ioctl.
+
+The configurable features have three types:
+
+- Attributes:
+ - PKS (bit 30) controls whether Supervisor Protection Keys is exposed to TD,
+ which determines related CPUID bit and CR4 bit;
+ - PERFMON (bit 63) controls whether PMU is exposed to TD.
+
+- XSAVE related features (XFAM):
+ XFAM is a 64b mask, which has the same format as XCR0 or IA32_XSS MSR. It
+ determines the set of extended features available for use by the guest TD.
+
+- CPUID features:
+ Only some bits of some CPUID leaves are directly configurable by VMM.
+
+What features can be configured is reported via TDX capabilities.
+
+TDX capabilities
+~~~~~~~~~~~~~~~~
+
+The VM scope ``MEMORY_ENCRYPT_OP`` ioctl provides command ``KVM_TDX_CAPABILITIES``
+to get the TDX capabilities from KVM. It returns a data structure of
+``struct kvm_tdx_capabilities``, which tells the supported configuration of
+attributes, XFAM and CPUIDs.
+
+TD attributes
+~~~~~~~~~~~~~
+
+QEMU supports configuring raw 64-bit TD attributes directly via "attributes"
+property of "tdx-guest" object. Note, it's users' responsibility to provide a
+valid value because some bits may not supported by current QEMU or KVM yet.
+
+QEMU also supports the configuration of individual attribute bits that are
+supported by it, via properties of "tdx-guest" object.
+E.g., "sept-ve-disable" (bit 28).
+
+MSR based features
+~~~~~~~~~~~~~~~~~~
+
+Current KVM doesn't support MSR based feature (e.g., MSR_IA32_ARCH_CAPABILITIES)
+configuration for TDX, and it's a future work to enable it in QEMU when KVM adds
+support of it.
+
+Feature check
+~~~~~~~~~~~~~
+
+QEMU checks if the final (CPU) features, determined by given cpu model and
+explicit feature adjustment of "+featureA/-featureB", can be supported or not.
+It can produce feature not supported warning like
+
+ "warning: host doesn't support requested feature: CPUID.07H:EBX.intel-pt [bit 25]"
+
+It can also produce warning like
+
+ "warning: TDX forcibly sets the feature: CPUID.80000007H:EDX.invtsc [bit 8]"
+
+if the fixed-1 feature is requested to be disabled explicitly. This is newly
+added to QEMU for TDX because TDX has fixed-1 features that are forcibly enabled
+by TDX module and VMM cannot disable them.
+
+Launching a TD (TDX VM)
+-----------------------
+
+To launch a TD, the necessary command line options are tdx-guest object and
+split kernel-irqchip, as below:
+
+.. parsed-literal::
+
+ |qemu_system_x86| \\
+ -accel kvm \\
+ -cpu host \\
+ -object tdx-guest,id=tdx0 \\
+ -machine ...,confidential-guest-support=tdx0 \\
+ -bios OVMF.fd \\
+
+Restrictions
+------------
+
+ - kernel-irqchip must be split;
+
+ This is set by default for TDX guest if kernel-irqchip is left on its default
+ 'auto' setting.
+
+ - No readonly support for private memory;
+
+ - No SMM support: SMM support requires manipulating the guest register states
+ which is not allowed;
+
+Debugging
+---------
+
+Bit 0 of TD attributes, is DEBUG bit, which decides if the TD runs in off-TD
+debug mode. When in off-TD debug mode, TD's VCPU state and private memory are
+accessible via given SEAMCALLs. This requires KVM to expose APIs to invoke those
+SEAMCALLs and corresonponding QEMU change.
+
+It's targeted as future work.
+
+TD attestation
+--------------
+
+In TD guest, the attestation process is used to verify the TDX guest
+trustworthiness to other entities before provisioning secrets to the guest.
+
+TD attestation is initiated first by calling TDG.MR.REPORT inside TD to get the
+REPORT. Then the REPORT data needs to be converted into a remotely verifiable
+Quote by SGX Quoting Enclave (QE).
+
+It's a future work in QEMU to add support of TD attestation since it lacks
+support in current KVM.
+
+Live Migration
+--------------
+
+Future work.
+
+References
+----------
+
+- `TDX Homepage <https://www.intel.com/content/www/us/en/developer/articles/technical/intel-trust-domain-extensions.html>`__
+
+- `SGX QE <https://github.com/intel/SGXDataCenterAttestationPrimitives/tree/master/QuoteGeneration>`__
diff --git a/docs/system/i386/xenpvh.rst b/docs/system/i386/xenpvh.rst
new file mode 100644
index 0000000..354250f
--- /dev/null
+++ b/docs/system/i386/xenpvh.rst
@@ -0,0 +1,49 @@
+Xen PVH machine (``xenpvh``)
+=========================================
+
+Xen supports a spectrum of types of guests that vary in how they depend
+on HW virtualization features, emulation models and paravirtualization.
+PVH is a mode that uses HW virtualization features (like HVM) but tries
+to avoid emulation models and instead use passthrough or
+paravirtualized devices.
+
+QEMU can be used to provide PV virtio devices on an emulated PCIe controller.
+That is the purpose of this minimal machine.
+
+Supported devices
+-----------------
+
+The x86 Xen PVH QEMU machine provide the following devices:
+
+- RAM
+- GPEX host bridge
+- virtio-pci devices
+
+The idea is to only connect virtio-pci devices but in theory any compatible
+PCI device model will work depending on Xen and guest support.
+
+Running
+-------
+
+The Xen tools will typically construct a command-line and launch QEMU
+for you when needed. But here's an example of what it can look like in
+case you need to construct one manually:
+
+.. code-block:: console
+
+ qemu-system-i386 -xen-domid 3 -no-shutdown \
+ -chardev socket,id=libxl-cmd,path=/var/run/xen/qmp-libxl-3,server=on,wait=off \
+ -mon chardev=libxl-cmd,mode=control \
+ -chardev socket,id=libxenstat-cmd,path=/var/run/xen/qmp-libxenstat-3,server=on,wait=off \
+ -mon chardev=libxenstat-cmd,mode=control \
+ -nodefaults \
+ -no-user-config \
+ -xen-attach -name g0 \
+ -vnc none \
+ -display none \
+ -device virtio-net-pci,id=nic0,netdev=net0,mac=00:16:3e:5c:81:78 \
+ -netdev type=tap,id=net0,ifname=vif3.0-emu,br=xenbr0,script=no,downscript=no \
+ -smp 4,maxcpus=4 \
+ -nographic \
+ -machine xenpvh,ram-low-base=0,ram-low-size=2147483648,ram-high-base=4294967296,ram-high-size=2147483648,pci-ecam-base=824633720832,pci-ecam-size=268435456,pci-mmio-base=4026531840,pci-mmio-size=33554432,pci-mmio-high-base=824902156288,pci-mmio-high-size=68719476736 \
+ -m 4096
diff --git a/docs/system/images.rst b/docs/system/images.rst
index d000bd6..a555117 100644
--- a/docs/system/images.rst
+++ b/docs/system/images.rst
@@ -82,4 +82,6 @@ VM snapshots currently have the following known limitations:
- A few device drivers still have incomplete snapshot support so their
state is not saved or restored properly (in particular USB).
+.. _block-drivers:
+
.. include:: qemu-block-drivers.rst.inc
diff --git a/docs/system/index.rst b/docs/system/index.rst
index c21065e..718e9d3 100644
--- a/docs/system/index.rst
+++ b/docs/system/index.rst
@@ -39,3 +39,4 @@ or Hypervisor.Framework.
multi-process
confidential-guest-support
vm-templating
+ sriov
diff --git a/docs/system/introduction.rst b/docs/system/introduction.rst
index 746707e..338d374 100644
--- a/docs/system/introduction.rst
+++ b/docs/system/introduction.rst
@@ -169,7 +169,7 @@ would default to it anyway.
.. code::
- -cpu max,pauth-impdef=on \
+ -cpu max \
-smp 4 \
-accel tcg \
diff --git a/docs/system/linuxboot.rst b/docs/system/linuxboot.rst
index 5db2e56..2328b4a 100644
--- a/docs/system/linuxboot.rst
+++ b/docs/system/linuxboot.rst
@@ -11,7 +11,7 @@ The syntax is:
.. parsed-literal::
- |qemu_system| -kernel bzImage -hda rootdisk.img -append "root=/dev/hda"
+ |qemu_system| -kernel bzImage -drive file=rootdisk.img,format=raw -append "root=/dev/sda"
Use ``-kernel`` to provide the Linux kernel image and ``-append`` to
give the kernel command line arguments. The ``-initrd`` option can be
@@ -23,8 +23,8 @@ virtual serial port and the QEMU monitor to the console with the
.. parsed-literal::
- |qemu_system| -kernel bzImage -hda rootdisk.img \
- -append "root=/dev/hda console=ttyS0" -nographic
+ |qemu_system| -kernel bzImage -drive file=rootdisk.img,format=raw \
+ -append "root=/dev/sda console=ttyS0" -nographic
Use Ctrl-a c to switch between the serial console and the monitor (see
:ref:`GUI_keys`).
diff --git a/docs/system/loongarch/virt.rst b/docs/system/loongarch/virt.rst
index 06d034b..7845878 100644
--- a/docs/system/loongarch/virt.rst
+++ b/docs/system/loongarch/virt.rst
@@ -12,14 +12,15 @@ Supported devices
-----------------
The ``virt`` machine supports:
-- Gpex host bridge
-- Ls7a RTC device
-- Ls7a IOAPIC device
-- ACPI GED device
-- Fw_cfg device
-- PCI/PCIe devices
-- Memory device
-- CPU device. Type: la464.
+
+* Gpex host bridge
+* Ls7a RTC device
+* Ls7a IOAPIC device
+* ACPI GED device
+* Fw_cfg device
+* PCI/PCIe devices
+* Memory device
+* CPU device. Type: la464.
CPU and machine Type
--------------------
@@ -39,13 +40,7 @@ can be accessed by following steps.
.. code-block:: bash
- ./configure --disable-rdma --prefix=/usr \
- --target-list="loongarch64-softmmu" \
- --disable-libiscsi --disable-libnfs --disable-libpmem \
- --disable-glusterfs --enable-libusb --enable-usb-redir \
- --disable-opengl --disable-xen --enable-spice \
- --enable-debug --disable-capstone --disable-kvm \
- --enable-profiler
+ ./configure --target-list="loongarch64-softmmu"
make -j8
(2) Set cross tools:
@@ -53,9 +48,7 @@ can be accessed by following steps.
.. code-block:: bash
wget https://github.com/loongson/build-tools/releases/download/2022.09.06/loongarch64-clfs-6.3-cross-tools-gcc-glibc.tar.xz
-
tar -vxf loongarch64-clfs-6.3-cross-tools-gcc-glibc.tar.xz -C /opt
-
export PATH=/opt/cross-tools/bin:$PATH
export LD_LIBRARY_PATH=/opt/cross-tools/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=/opt/cross-tools/loongarch64-unknown-linux-gnu/lib/:$LD_LIBRARY_PATH
@@ -64,7 +57,7 @@ Note: You need get the latest cross-tools at https://github.com/loongson/build-t
(3) Build BIOS:
- See: https://github.com/tianocore/edk2-platforms/tree/master/Platform/Loongson/LoongArchQemuPkg#readme
+ See: https://github.com/tianocore/edk2/tree/master/OvmfPkg/LoongArchVirt#readme
Note: To build the release version of the bios, set --buildtarget=RELEASE,
the bios file path: Build/LoongArchQemu/RELEASE_GCC5/FV/QEMU_EFI.fd
@@ -74,13 +67,9 @@ Note: To build the release version of the bios, set --buildtarget=RELEASE,
.. code-block:: bash
git clone https://github.com/loongson/linux.git
-
cd linux
-
git checkout loongarch-next
-
make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- loongson3_defconfig
-
make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- -j32
Note: The branch of linux source code is loongarch-next.
diff --git a/docs/system/ppc/amigang.rst b/docs/system/ppc/amigang.rst
index e2c9cb7..21bb14e 100644
--- a/docs/system/ppc/amigang.rst
+++ b/docs/system/ppc/amigang.rst
@@ -21,6 +21,7 @@ Emulated devices
* VIA VT82C686B south bridge
* PCI VGA compatible card (guests may need other card instead)
* PS/2 keyboard and mouse
+ * 4 KiB NVRAM (use ``-drive if=mtd,format=raw,file=nvram.bin`` to keep contents persistent)
Firmware
--------
@@ -54,14 +55,14 @@ To boot the system run:
-cdrom "A1 Linux Net Installer.iso" \
-device ati-vga,model=rv100,romfile=VGABIOS-lgpl-latest.bin
-From the firmware menu that appears select ``Boot sequence`` →
-``Amiga Multiboot Options`` and set ``Boot device 1`` to
-``Onboard VIA IDE CDROM``. Then hit escape until the main screen appears again,
-hit escape once more and from the exit menu that appears select either
-``Save settings and exit`` or ``Use settings for this session only``. It may
-take a long time loading the kernel into memory but eventually it boots and the
-installer becomes visible. The ``ati-vga`` RV100 emulation is not
-complete yet so only frame buffer works, DRM and 3D is not available.
+If a firmware menu appears, select ``Boot sequence`` → ``Amiga Multiboot Options``
+and set ``Boot device 1`` to ``Onboard VIA IDE CDROM``. Then hit escape until
+the main screen appears again, hit escape once more and from the exit menu that
+appears select either ``Save settings and exit`` or ``Use settings for this
+session only``. It may take a long time loading the kernel into memory but
+eventually it boots and the installer becomes visible. The ``ati-vga`` RV100
+emulation is not complete yet so only frame buffer works, DRM and 3D is not
+available.
Genesi/bPlan Pegasos II (``pegasos2``)
======================================
diff --git a/docs/system/ppc/embedded.rst b/docs/system/ppc/embedded.rst
index af3b3d9..5cb7d98 100644
--- a/docs/system/ppc/embedded.rst
+++ b/docs/system/ppc/embedded.rst
@@ -4,6 +4,5 @@ Embedded family boards
- ``bamboo`` bamboo
- ``mpc8544ds`` mpc8544ds
- ``ppce500`` generic paravirt e500 platform
-- ``ref405ep`` ref405ep
- ``sam460ex`` aCube Sam460ex
- ``virtex-ml507`` Xilinx Virtex ML507 reference design
diff --git a/docs/system/ppc/powermac.rst b/docs/system/ppc/powermac.rst
index 04334ba..3eac81c 100644
--- a/docs/system/ppc/powermac.rst
+++ b/docs/system/ppc/powermac.rst
@@ -4,8 +4,8 @@ PowerMac family boards (``g3beige``, ``mac99``)
Use the executable ``qemu-system-ppc`` to simulate a complete PowerMac
PowerPC system.
-- ``g3beige`` Heathrow based PowerMAC
-- ``mac99`` Mac99 based PowerMAC
+- ``g3beige`` Heathrow based PowerMac
+- ``mac99`` Mac99 based PowerMac
Supported devices
-----------------
diff --git a/docs/system/ppc/powernv.rst b/docs/system/ppc/powernv.rst
index 09f3965..f3ec2cc 100644
--- a/docs/system/ppc/powernv.rst
+++ b/docs/system/ppc/powernv.rst
@@ -181,7 +181,7 @@ connected to a remote QEMU machine acting as BMC, using these options
.. code-block:: bash
- -chardev socket,id=ipmi0,host=localhost,port=9002,reconnect=10 \
+ -chardev socket,id=ipmi0,host=localhost,port=9002,reconnect-ms=10000 \
-device ipmi-bmc-extern,id=bmc0,chardev=ipmi0 \
-device isa-ipmi-bt,bmc=bmc0,irq=10 \
-nodefaults
@@ -195,6 +195,13 @@ Use a MTD drive to add a PNOR to the machine, and get a NVRAM :
-drive file=./witherspoon.pnor,format=raw,if=mtd
+If no mtd drive is provided, the powernv platform will create a default
+PNOR device using a tiny formatted PNOR in pc-bios/pnv-pnor.bin opened
+read-only (PNOR changes will be persistent across reboots but not across
+invocations of QEMU). If no defaults are used, an erased 128MB PNOR is
+provided (which skiboot will probably not recognize since it is not
+formatted).
+
Maintainer contact information
------------------------------
diff --git a/docs/system/ppc/pseries.rst b/docs/system/ppc/pseries.rst
index a876d89..bbc51aa 100644
--- a/docs/system/ppc/pseries.rst
+++ b/docs/system/ppc/pseries.rst
@@ -14,10 +14,19 @@ virtualization capabilities.
Supported devices
=================
- * Multi processor support for many Power processors generations: POWER7,
- POWER7+, POWER8, POWER8NVL, POWER9, and Power10. Support for POWER5+ exists,
- but its state is unknown.
- * Interrupt Controller, XICS (POWER8) and XIVE (POWER9 and Power10)
+ * Multi processor support for many Power processors generations:
+ - POWER7, POWER7+
+ - POWER8, POWER8NVL
+ - POWER9
+ - Power10
+ - Power11
+ - Support for POWER5+ also exists, works with correct kernel/userspace
+ * Interrupt Controller
+ - XICS (POWER8)
+ - XIVE (Supported by below:)
+ - POWER9
+ - Power10
+ - Power11
* vPHB PCIe Host bridge.
* vscsi and vnet devices, compatible with the same devices available on a
PowerVM hypervisor with VIOS managing LPARs.
diff --git a/docs/system/riscv/microblaze-v-generic.rst b/docs/system/riscv/microblaze-v-generic.rst
new file mode 100644
index 0000000..5606f88
--- /dev/null
+++ b/docs/system/riscv/microblaze-v-generic.rst
@@ -0,0 +1,42 @@
+Microblaze-V generic board (``amd-microblaze-v-generic``)
+=========================================================
+The AMD MicroBlazeā„¢ V processor is a soft-core RISC-V processor IP for AMD
+adaptive SoCs and FPGAs. The MicroBlazeā„¢ V processor is based on the 32-bit (or
+64-bit) RISC-V instruction set architecture (ISA) and contains interfaces
+compatible with the classic MicroBlazeā„¢ V processor (i.e it is a drop in
+replacement for the classic MicroBlazeā„¢ processor in existing RTL designs).
+More information can be found in below document.
+
+https://docs.amd.com/r/en-US/ug1629-microblaze-v-user-guide/MicroBlaze-V-Architecture
+
+The MicroBlazeā„¢ V generic board in QEMU has following supported devices:
+
+ - timer
+ - uartlite
+ - uart16550
+ - emaclite
+ - timer2
+ - axi emac
+ - axi dma
+
+The MicroBlazeā„¢ V core in QEMU has the following configuration:
+
+ - RV32I base integer instruction set
+ - "Zicsr" Control and Status register instructions
+ - "Zifencei" instruction-fetch
+ - Extensions: m, a, f, c
+
+Running
+"""""""
+Below is an example command line for launching mainline U-boot
+(xilinx_mbv32_defconfig) on the Microblaze-V generic board.
+
+.. code-block:: bash
+
+ $ qemu-system-riscv32 -M amd-microblaze-v-generic \
+ -display none \
+ -device loader,addr=0x80000000,file=u-boot-spl.bin,cpu-num=0 \
+ -device loader,addr=0x80200000,file=u-boot.img \
+ -serial mon:stdio \
+ -device loader,addr=0x83000000,file=system.dtb \
+ -m 2g
diff --git a/docs/system/riscv/microchip-icicle-kit.rst b/docs/system/riscv/microchip-icicle-kit.rst
index 40798b1..9809e94 100644
--- a/docs/system/riscv/microchip-icicle-kit.rst
+++ b/docs/system/riscv/microchip-icicle-kit.rst
@@ -5,10 +5,10 @@ Microchip PolarFire SoC Icicle Kit integrates a PolarFire SoC, with one
SiFive's E51 plus four U54 cores and many on-chip peripherals and an FPGA.
For more details about Microchip PolarFire SoC, please see:
-https://www.microsemi.com/product-directory/soc-fpgas/5498-polarfire-soc-fpga
+https://www.microchip.com/en-us/products/fpgas-and-plds/system-on-chip-fpgas/polarfire-soc-fpgas
The Icicle Kit board information can be found here:
-https://www.microsemi.com/existing-parts/parts/152514
+https://www.microchip.com/en-us/development-tool/mpfs-icicle-kit-es
Supported devices
-----------------
@@ -26,95 +26,48 @@ The ``microchip-icicle-kit`` machine supports the following devices:
* 2 GEM Ethernet controllers
* 1 SDHC storage controller
+The memory is set to 1537 MiB by default. A sanity check on RAM size is
+performed in the machine init routine to prompt user to increase the RAM size
+to > 1537 MiB when less than 1537 MiB RAM is detected.
+
Boot options
------------
-The ``microchip-icicle-kit`` machine can start using the standard -bios
-functionality for loading its BIOS image, aka Hart Software Services (HSS_).
-HSS loads the second stage bootloader U-Boot from an SD card. Then a kernel
-can be loaded from U-Boot. It also supports direct kernel booting via the
--kernel option along with the device tree blob via -dtb. When direct kernel
-boot is used, the OpenSBI fw_dynamic BIOS image is used to boot a payload
-like U-Boot or OS kernel directly.
-
-The user provided DTB should have the following requirements:
-
-* The /cpus node should contain at least one subnode for E51 and the number
- of subnodes should match QEMU's ``-smp`` option
-* The /memory reg size should match QEMU’s selected ram_size via ``-m``
-* Should contain a node for the CLINT device with a compatible string
- "riscv,clint0"
-
-QEMU follows below truth table to select which payload to execute:
-
-===== ========== ========== =======
--bios -kernel -dtb payload
-===== ========== ========== =======
- N N don't care HSS
- Y don't care don't care HSS
- N Y Y kernel
-===== ========== ========== =======
-
-The memory is set to 1537 MiB by default which is the minimum required high
-memory size by HSS. A sanity check on ram size is performed in the machine
-init routine to prompt user to increase the RAM size to > 1537 MiB when less
-than 1537 MiB ram is detected.
-
-Running HSS
------------
-
-HSS 2020.12 release is tested at the time of writing. To build an HSS image
-that can be booted by the ``microchip-icicle-kit`` machine, type the following
-in the HSS source tree:
-
-.. code-block:: bash
-
- $ export CROSS_COMPILE=riscv64-linux-
- $ cp boards/mpfs-icicle-kit-es/def_config .config
- $ make BOARD=mpfs-icicle-kit-es
-
-Download the official SD card image released by Microchip and prepare it for
-QEMU usage:
-
-.. code-block:: bash
-
- $ wget ftp://ftpsoc.microsemi.com/outgoing/core-image-minimal-dev-icicle-kit-es-sd-20201009141623.rootfs.wic.gz
- $ gunzip core-image-minimal-dev-icicle-kit-es-sd-20201009141623.rootfs.wic.gz
- $ qemu-img resize core-image-minimal-dev-icicle-kit-es-sd-20201009141623.rootfs.wic 4G
-
-Then we can boot the machine by:
-
-.. code-block:: bash
-
- $ qemu-system-riscv64 -M microchip-icicle-kit -smp 5 \
- -bios path/to/hss.bin -sd path/to/sdcard.img \
- -nic user,model=cadence_gem \
- -nic tap,ifname=tap,model=cadence_gem,script=no \
- -display none -serial stdio \
- -chardev socket,id=serial1,path=serial1.sock,server=on,wait=on \
- -serial chardev:serial1
+The ``microchip-icicle-kit`` machine provides some options to run a firmware
+(BIOS) or a kernel image. QEMU follows below truth table to select the
+firmware:
-With above command line, current terminal session will be used for the first
-serial port. Open another terminal window, and use ``minicom`` to connect the
-second serial port.
+============= =========== ======================================
+-bios -kernel firmware
+============= =========== ======================================
+none N this is an error
+none Y the kernel image
+NULL, default N hss.bin
+NULL, default Y opensbi-riscv64-generic-fw_dynamic.bin
+other don't care the BIOS image
+============= =========== ======================================
-.. code-block:: bash
+Direct Kernel Boot
+------------------
- $ minicom -D unix\#serial1.sock
+Use the ``-kernel`` option to directly run a kernel image. When a direct
+kernel boot is requested, a device tree blob may be specified via the ``-dtb``
+option. Unlike other QEMU machines, this machine does not generate a device
+tree for the kernel. It shall be provided by the user. The user provided DTB
+should meet the following requirements:
-HSS output is on the first serial port (stdio) and U-Boot outputs on the
-second serial port. U-Boot will automatically load the Linux kernel from
-the SD card image.
+* The ``/cpus`` node should contain at least one subnode for E51 and the number
+ of subnodes should match QEMU's ``-smp`` option.
-Direct Kernel Boot
-------------------
+* The ``/memory`` reg size should match QEMU’s selected RAM size via the ``-m``
+ option.
-Sometimes we just want to test booting a new kernel, and transforming the
-kernel image to the format required by the HSS bootflow is tedious. We can
-use '-kernel' for direct kernel booting just like other RISC-V machines do.
+* It should contain a node for the CLINT device with a compatible string
+ "riscv,clint0".
-In this mode, the OpenSBI fw_dynamic BIOS image for 'generic' platform is
-used to boot an S-mode payload like U-Boot or OS kernel directly.
+When ``-bios`` is not specified or set to ``default``, the OpenSBI
+``fw_dynamic`` BIOS image for the ``generic`` platform is used to boot an
+S-mode payload like U-Boot or OS kernel directly.
For example, the following commands show building a U-Boot image from U-Boot
mainline v2021.07 for the Microchip Icicle Kit board:
@@ -146,4 +99,13 @@ CAVEATS:
``u-boot.bin`` has to be used which does contain one. To use the ELF image,
we need to change to CONFIG_OF_EMBED or CONFIG_OF_PRIOR_STAGE.
+Running HSS
+-----------
+
+The machine ``microchip-icicle-kit`` used to run the Hart Software Services
+(HSS_), however, the HSS development progressed and the QEMU machine
+implementation lacks behind. Currently, running the HSS no longer works.
+There is missing support in the clock and memory controller devices. In
+particular, reading from the SD card does not work.
+
.. _HSS: https://github.com/polarfire-soc/hart-software-services
diff --git a/docs/system/riscv/virt.rst b/docs/system/riscv/virt.rst
index 9a06f95..6085097 100644
--- a/docs/system/riscv/virt.rst
+++ b/docs/system/riscv/virt.rst
@@ -84,6 +84,25 @@ none``, as in
Firmware images used for pflash must be exactly 32 MiB in size.
+riscv-iommu support
+-------------------
+
+The board has support for the riscv-iommu-pci device by using the following
+command line:
+
+.. code-block:: bash
+
+ $ qemu-system-riscv64 -M virt -device riscv-iommu-pci (...)
+
+It also has support for the riscv-iommu-sys platform device:
+
+.. code-block:: bash
+
+ $ qemu-system-riscv64 -M virt,iommu-sys=on (...)
+
+Refer to :ref:`riscv-iommu` for more information on how the RISC-V IOMMU support
+works.
+
Machine-specific options
------------------------
@@ -110,12 +129,23 @@ The following machine-specific options are supported:
MSIs. When not specified, this option is assumed to be "none" which selects
SiFive PLIC to handle wired interrupts.
+ This option also interacts with '-accel kvm'. When using "aia=aplic-imsic"
+ with KVM, it is possible to set the use of the kernel irqchip in split mode
+ by using "-accel kvm,kernel-irqchip=split". In this case the ``virt`` machine
+ will emulate the APLIC controller instead of using the APLIC controller from
+ the irqchip. See :ref:`riscv-aia` for more details on all available AIA
+ modes.
+
- aia-guests=nnn
The number of per-HART VS-level AIA IMSIC pages to be emulated for a guest
having AIA IMSIC (i.e. "aia=aplic-imsic" selected). When not specified,
the default number of per-HART VS-level AIA IMSIC pages is 0.
+- iommu-sys=[on|off]
+
+ Enables the riscv-iommu-sys platform device. Defaults to 'off'.
+
Running Linux kernel
--------------------
diff --git a/docs/system/s390x/bootdevices.rst b/docs/system/s390x/bootdevices.rst
index 1a7a18b..97b3914 100644
--- a/docs/system/s390x/bootdevices.rst
+++ b/docs/system/s390x/bootdevices.rst
@@ -6,9 +6,7 @@ Booting with bootindex parameter
For classical mainframe guests (i.e. LPAR or z/VM installations), you always
have to explicitly specify the disk where you want to boot from (or "IPL" from,
-in s390x-speak -- IPL means "Initial Program Load"). In particular, there can
-also be only one boot device according to the architecture specification, thus
-specifying multiple boot devices is not possible (yet).
+in s390x-speak -- IPL means "Initial Program Load").
So for booting an s390x guest in QEMU, you should always mark the
device where you want to boot from with the ``bootindex`` property, for
@@ -17,6 +15,11 @@ example::
qemu-system-s390x -drive if=none,id=dr1,file=guest.qcow2 \
-device virtio-blk,drive=dr1,bootindex=1
+Multiple devices may have a bootindex. The lowest bootindex is assigned to the
+device to IPL first. If the IPL fails for the first, the device with the second
+lowest bootindex will be tried and so on until IPL is successful or there are no
+remaining boot devices to try.
+
For booting from a CD-ROM ISO image (which needs to include El-Torito boot
information in order to be bootable), it is recommended to specify a ``scsi-cd``
device, for example like this::
@@ -76,29 +79,45 @@ The second way to use this parameter is to use a number in the range from 0
to 31. The numbers that can be used here correspond to the numbers that are
shown when using the ``PROMPT`` option, and the s390-ccw bios will then try
to automatically boot the kernel that is associated with the given number.
-Note that ``0`` can be used to boot the default entry.
+Note that ``0`` can be used to boot the default entry. If the machine
+``loadparm`` is not assigned a value, then the default entry is used.
+
+By default, the machine ``loadparm`` applies to all boot devices. If multiple
+devices are assigned a ``bootindex`` and the ``loadparm`` is to be different
+between them, an independent ``loadparm`` may be assigned on a per-device basis.
+
+An example guest using per-device ``loadparm``::
+
+ qemu-system-s390x -drive if=none,id=dr1,file=primary.qcow2 \
+ -device virtio-blk,drive=dr1,bootindex=1 \
+ -drive if=none,id=dr2,file=secondary.qcow2 \
+ -device virtio-blk,drive=dr2,bootindex=2,loadparm=3
+
+In this case, the primary boot device will attempt to IPL using the default
+entry (because no ``loadparm`` is specified for this device or for the
+machine). If that device fails to boot, the secondary device will attempt to
+IPL using entry number 3.
+
+If a ``loadparm`` is specified on both the machine and a device, the per-device
+value will superseded the machine value. Per-device ``loadparm`` values are
+only used for devices with an assigned ``bootindex``. The machine ``loadparm``
+is used when attempting to boot without a ``bootindex``.
Booting from a network device
-----------------------------
-Beside the normal guest firmware (which is loaded from the file ``s390-ccw.img``
-in the data directory of QEMU, or via the ``-bios`` option), QEMU ships with
-a small TFTP network bootloader firmware for virtio-net-ccw devices, too. This
-firmware is loaded from a file called ``s390-netboot.img`` in the QEMU data
-directory. In case you want to load it from a different filename instead,
-you can specify it via the ``-global s390-ipl.netboot_fw=filename``
-command line option.
-
-The ``bootindex`` property is especially important for booting via the network.
-If you don't specify the ``bootindex`` property here, the network bootloader
-firmware code won't get loaded into the guest memory so that the network boot
-will fail. For a successful network boot, try something like this::
+The firmware that ships with QEMU includes a small TFTP network bootloader
+for virtio-net-ccw devices. The ``bootindex`` property is especially
+important for booting via the network. If you don't specify the ``bootindex``
+property here, the network bootloader won't be taken into consideration and
+the network boot will fail. For a successful network boot, try something
+like this::
qemu-system-s390x -netdev user,id=n1,tftp=...,bootfile=... \
-device virtio-net-ccw,netdev=n1,bootindex=1
-The network bootloader firmware also has basic support for pxelinux.cfg-style
+The network bootloader also has basic support for pxelinux.cfg-style
configuration files. See the `PXELINUX Configuration page
<https://wiki.syslinux.org/wiki/index.php?title=PXELINUX#Configuration>`__
for details how to set up the configuration file on your TFTP server.
diff --git a/docs/system/sriov.rst b/docs/system/sriov.rst
new file mode 100644
index 0000000..d12178f
--- /dev/null
+++ b/docs/system/sriov.rst
@@ -0,0 +1,37 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+Compsable SR-IOV device
+=======================
+
+SR-IOV (Single Root I/O Virtualization) is an optional extended capability of a
+PCI Express device. It allows a single physical function (PF) to appear as
+multiple virtual functions (VFs) for the main purpose of eliminating software
+overhead in I/O from virtual machines.
+
+There are devices with predefined SR-IOV configurations, but it is also possible
+to compose an SR-IOV device yourself. Composing an SR-IOV device is currently
+only supported by virtio-net-pci.
+
+Users can configure an SR-IOV-capable virtio-net device by adding
+virtio-net-pci functions to a bus. Below is a command line example:
+
+.. code-block:: shell
+
+ -netdev user,id=n -netdev user,id=o
+ -netdev user,id=p -netdev user,id=q
+ -device pcie-root-port,id=b
+ -device virtio-net-pci,bus=b,addr=0x0.0x3,netdev=q,sriov-pf=f
+ -device virtio-net-pci,bus=b,addr=0x0.0x2,netdev=p,sriov-pf=f
+ -device virtio-net-pci,bus=b,addr=0x0.0x1,netdev=o,sriov-pf=f
+ -device virtio-net-pci,bus=b,addr=0x0.0x0,netdev=n,id=f
+
+The VFs specify the paired PF with ``sriov-pf`` property. The PF must be
+added after all VFs. It is the user's responsibility to ensure that VFs have
+function numbers larger than one of the PF, and that the function numbers
+have a consistent stride. Both the PF and VFs are ARI-capable so you can have
+255 VFs at maximum.
+
+You may also need to perform additional steps to activate the SR-IOV feature on
+your guest. For Linux, refer to [1]_.
+
+.. [1] https://docs.kernel.org/PCI/pci-iov-howto.html
diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst
index 7b99272..b96a05a 100644
--- a/docs/system/target-arm.rst
+++ b/docs/system/target-arm.rst
@@ -63,10 +63,6 @@ large amounts of RAM. It also supports 64-bit CPUs.
Board-specific documentation
============================
-Unfortunately many of the Arm boards QEMU supports are currently
-undocumented; you can get a complete list by running
-``qemu-system-aarch64 --machine help``.
-
..
This table of contents should be kept sorted alphabetically
by the title text of each file, which isn't the same ordering
@@ -90,26 +86,28 @@ undocumented; you can get a complete list by running
arm/digic
arm/cubieboard
arm/emcraft-sf2
+ arm/exynos
+ arm/fby35
arm/musicpal
- arm/gumstix
- arm/mainstone
arm/kzm
- arm/nseries
arm/nrf
arm/nuvoton
arm/imx25-pdk
+ arm/mcimx6ul-evk
+ arm/mcimx7d-sabre
+ arm/imx8mp-evk
arm/orangepi
- arm/palm
arm/raspi
- arm/xscale
arm/collie
arm/sx1
arm/stellaris
arm/stm32
arm/virt
+ arm/vmapple
arm/xenpvh
arm/xlnx-versal-virt
arm/xlnx-zynq
+ arm/xlnx-zcu102
Emulated CPU architecture support
=================================
diff --git a/docs/system/target-i386.rst b/docs/system/target-i386.rst
index 1b8a1f2..43b09c7 100644
--- a/docs/system/target-i386.rst
+++ b/docs/system/target-i386.rst
@@ -14,8 +14,9 @@ Board-specific documentation
.. toctree::
:maxdepth: 1
- i386/microvm
i386/pc
+ i386/microvm
+ i386/nitro-enclave
Architectural features
~~~~~~~~~~~~~~~~~~~~~~
@@ -26,9 +27,11 @@ Architectural features
i386/cpu
i386/hyperv
i386/xen
+ i386/xenpvh
i386/kvm-pv
i386/sgx
i386/amd-memory-encryption
+ i386/tdx
OS requirements
~~~~~~~~~~~~~~~
diff --git a/docs/system/target-loongarch.rst b/docs/system/target-loongarch.rst
new file mode 100644
index 0000000..316c604
--- /dev/null
+++ b/docs/system/target-loongarch.rst
@@ -0,0 +1,19 @@
+.. _LoongArch-System-emulator:
+
+LoongArch System emulator
+-------------------------
+
+QEMU can emulate loongArch 64 bit systems via the
+``qemu-system-loongarch64`` binary. Only one machine type ``virt`` is
+supported.
+
+When using KVM as accelerator, QEMU can emulate la464 cpu model. And when
+using the default cpu model with TCG as accelerator, QEMU will emulate a
+subset of la464 cpu features that should be enough to run distributions
+built for the la464.
+
+Board-specific documentation
+============================
+
+.. toctree::
+ loongarch/virt
diff --git a/docs/system/target-mips.rst b/docs/system/target-mips.rst
index 83239fb..9028c3b 100644
--- a/docs/system/target-mips.rst
+++ b/docs/system/target-mips.rst
@@ -112,5 +112,5 @@ https://mipsdistros.mips.com/LinuxDistro/nanomips/kernels/v4.15.18-432-gb2eb9a8b
Start system emulation of Malta board with nanoMIPS I7200 CPU::
qemu-system-mipsel -cpu I7200 -kernel <kernel_image_file> \
- -M malta -serial stdio -m <memory_size> -hda <disk_image_file> \
+ -M malta -serial stdio -m <memory_size> -drive file=<disk_image_file>,format=raw \
-append "mem=256m@0x0 rw console=ttyS0 vga=cirrus vesa=0x111 root=/dev/sda"
diff --git a/docs/system/target-riscv.rst b/docs/system/target-riscv.rst
index ba195f1..95457af 100644
--- a/docs/system/target-riscv.rst
+++ b/docs/system/target-riscv.rst
@@ -66,6 +66,7 @@ undocumented; you can get a complete list by running
.. toctree::
:maxdepth: 1
+ riscv/microblaze-v-generic
riscv/microchip-icicle-kit
riscv/shakti-c
riscv/sifive_u
diff --git a/docs/system/targets.rst b/docs/system/targets.rst
index 224fada..38e2418 100644
--- a/docs/system/targets.rst
+++ b/docs/system/targets.rst
@@ -18,6 +18,7 @@ Contents:
target-arm
target-avr
+ target-loongarch
target-m68k
target-mips
target-ppc
diff --git a/docs/tools/index.rst b/docs/tools/index.rst
index 8e65ce0..1e88ae4 100644
--- a/docs/tools/index.rst
+++ b/docs/tools/index.rst
@@ -15,4 +15,4 @@ command line utilities and other standalone programs.
qemu-nbd
qemu-pr-helper
qemu-trace-stap
- virtfs-proxy-helper
+ qemu-vmsr-helper
diff --git a/docs/tools/qemu-nbd.rst b/docs/tools/qemu-nbd.rst
index 329f44d..f82ea5f 100644
--- a/docs/tools/qemu-nbd.rst
+++ b/docs/tools/qemu-nbd.rst
@@ -1,3 +1,5 @@
+.. _qemu-nbd:
+
=====================================
QEMU Disk Network Block Device Server
=====================================
@@ -154,6 +156,11 @@ driver options if :option:`--image-opts` is specified.
Set the NBD volume export description, as a human-readable
string.
+.. option:: --handshake-limit=N
+
+ Set the timeout for a client to successfully complete its handshake
+ to N seconds (default 10), or 0 for no limit.
+
.. option:: -L, --list
Connect as a client and list all details about the exports exposed by
diff --git a/docs/tools/qemu-storage-daemon.rst b/docs/tools/qemu-storage-daemon.rst
index ea00149..35ab2d7 100644
--- a/docs/tools/qemu-storage-daemon.rst
+++ b/docs/tools/qemu-storage-daemon.rst
@@ -1,3 +1,5 @@
+.. _storage-daemon:
+
===================
QEMU Storage Daemon
===================
diff --git a/docs/tools/qemu-vmsr-helper.rst b/docs/tools/qemu-vmsr-helper.rst
new file mode 100644
index 0000000..9ce10b9
--- /dev/null
+++ b/docs/tools/qemu-vmsr-helper.rst
@@ -0,0 +1,89 @@
+==================================
+QEMU virtual RAPL MSR helper
+==================================
+
+Synopsis
+--------
+
+**qemu-vmsr-helper** [*OPTION*]
+
+Description
+-----------
+
+Implements the virtual RAPL MSR helper for QEMU.
+
+Accessing the RAPL (Running Average Power Limit) MSR enables the RAPL powercap
+driver to advertise and monitor the power consumption or accumulated energy
+consumption of different power domains, such as CPU packages, DRAM, and other
+components when available.
+
+However those registers are accessible under privileged access (CAP_SYS_RAWIO).
+QEMU can use an external helper to access those privileged registers.
+
+:program:`qemu-vmsr-helper` is that external helper; it creates a listener
+socket which will accept incoming connections for communication with QEMU.
+
+If you want to run VMs in a setup like this, this helper should be started as a
+system service, and you should read the QEMU manual section on "RAPL MSR
+support" to find out how to configure QEMU to connect to the socket created by
+:program:`qemu-vmsr-helper`.
+
+After connecting to the socket, :program:`qemu-vmsr-helper` can
+optionally drop root privileges, except for those capabilities that
+are needed for its operation.
+
+:program:`qemu-vmsr-helper` can also use the systemd socket activation
+protocol. In this case, the systemd socket unit should specify a
+Unix stream socket, like this::
+
+ [Socket]
+ ListenStream=/var/run/qemu-vmsr-helper.sock
+
+Options
+-------
+
+.. program:: qemu-vmsr-helper
+
+.. option:: -d, --daemon
+
+ run in the background (and create a PID file)
+
+.. option:: -q, --quiet
+
+ decrease verbosity
+
+.. option:: -v, --verbose
+
+ increase verbosity
+
+.. option:: -f, --pidfile=PATH
+
+ PID file when running as a daemon. By default the PID file
+ is created in the system runtime state directory, for example
+ :file:`/var/run/qemu-vmsr-helper.pid`.
+
+.. option:: -k, --socket=PATH
+
+ path to the socket. By default the socket is created in
+ the system runtime state directory, for example
+ :file:`/var/run/qemu-vmsr-helper.sock`.
+
+.. option:: -T, --trace [[enable=]PATTERN][,events=FILE][,file=FILE]
+
+ .. include:: ../qemu-option-trace.rst.inc
+
+.. option:: -u, --user=USER
+
+ user to drop privileges to
+
+.. option:: -g, --group=GROUP
+
+ group to drop privileges to
+
+.. option:: -h, --help
+
+ Display a help message and exit.
+
+.. option:: -V, --version
+
+ Display version information and exit.
diff --git a/docs/tools/virtfs-proxy-helper.rst b/docs/tools/virtfs-proxy-helper.rst
deleted file mode 100644
index bd310eb..0000000
--- a/docs/tools/virtfs-proxy-helper.rst
+++ /dev/null
@@ -1,75 +0,0 @@
-QEMU 9p virtfs proxy filesystem helper
-======================================
-
-Synopsis
---------
-
-**virtfs-proxy-helper** [*OPTIONS*]
-
-Description
------------
-
-NOTE: The 9p 'proxy' backend is deprecated (since QEMU 8.1) and will be
-removed, along with this daemon, in a future version of QEMU!
-
-Pass-through security model in QEMU 9p server needs root privilege to do
-few file operations (like chown, chmod to any mode/uid:gid). There are two
-issues in pass-through security model:
-
-- TOCTTOU vulnerability: Following symbolic links in the server could
- provide access to files beyond 9p export path.
-
-- Running QEMU with root privilege could be a security issue.
-
-To overcome above issues, following approach is used: A new filesystem
-type 'proxy' is introduced. Proxy FS uses chroot + socket combination
-for securing the vulnerability known with following symbolic links.
-Intention of adding a new filesystem type is to allow qemu to run
-in non-root mode, but doing privileged operations using socket IO.
-
-Proxy helper (a stand alone binary part of qemu) is invoked with
-root privileges. Proxy helper chroots into 9p export path and creates
-a socket pair or a named socket based on the command line parameter.
-QEMU and proxy helper communicate using this socket. QEMU proxy fs
-driver sends filesystem request to proxy helper and receives the
-response from it.
-
-The proxy helper is designed so that it can drop root privileges except
-for the capabilities needed for doing filesystem operations.
-
-Options
--------
-
-The following options are supported:
-
-.. program:: virtfs-proxy-helper
-
-.. option:: -h
-
- Display help and exit
-
-.. option:: -p, --path PATH
-
- Path to export for proxy filesystem driver
-
-.. option:: -f, --fd SOCKET_ID
-
- Use given file descriptor as socket descriptor for communicating with
- qemu proxy fs drier. Usually a helper like libvirt will create
- socketpair and pass one of the fds as parameter to this option.
-
-.. option:: -s, --socket SOCKET_FILE
-
- Creates named socket file for communicating with qemu proxy fs driver
-
-.. option:: -u, --uid UID
-
- uid to give access to named socket file; used in combination with -g.
-
-.. option:: -g, --gid GID
-
- gid to give access to named socket file; used in combination with -u.
-
-.. option:: -n, --nodaemon
-
- Run as a normal program. By default program will run in daemon mode
diff --git a/docs/user/main.rst b/docs/user/main.rst
index e04bc2c..9a1c604 100644
--- a/docs/user/main.rst
+++ b/docs/user/main.rst
@@ -1,3 +1,5 @@
+.. _user-mode:
+
QEMU User space emulator
========================
@@ -42,6 +44,8 @@ QEMU was conceived so that ultimately it can emulate itself. Although it
is not very useful, it is an important test to show the power of the
emulator.
+.. _linux-user-mode:
+
Linux User space emulator
-------------------------
@@ -50,7 +54,7 @@ Command line options
::
- qemu-i386 [-h] [-d] [-L path] [-s size] [-cpu model] [-g port] [-B offset] [-R size] program [arguments...]
+ qemu-i386 [-h] [-d] [-L path] [-s size] [-cpu model] [-g endpoint] [-B offset] [-R size] program [arguments...]
``-h``
Print the help
@@ -87,8 +91,18 @@ Debug options:
Activate logging of the specified items (use '-d help' for a list of
log items)
-``-g port``
- Wait gdb connection to port
+``-g endpoint``
+ Wait gdb connection to a port (e.g., ``1234``) or a unix socket (e.g.,
+ ``/tmp/qemu.sock``).
+
+ If a unix socket path contains single ``%d`` placeholder (e.g.,
+ ``/tmp/qemu-%d.sock``), it is replaced by the emulator PID, which is useful
+ when passing this option via the ``QEMU_GDB`` environment variable to a
+ multi-process application.
+
+ If the endpoint address is followed by ``,suspend=n`` (e.g.,
+ ``1234,suspend=n``), then the emulated program starts without waiting for a
+ connection, which can be established at any later point in time.
``-one-insn-per-tb``
Run the emulation with one guest instruction per translation block.
@@ -130,10 +144,6 @@ Other binaries
The binary format is detected automatically.
-- user mode (Cris)
-
- * ``qemu-cris`` TODO.
-
- user mode (i386)
* ``qemu-i386`` TODO.
@@ -179,6 +189,8 @@ Other binaries
* ``qemu-sparc64`` can execute some Sparc64 (Sparc64 CPU, 64 bit ABI) and
SPARC32PLUS binaries (Sparc64 CPU, 32 bit ABI).
+.. _bsd-user-mode:
+
BSD User space emulator
-----------------------
diff --git a/dump/dump-hmp-cmds.c b/dump/dump-hmp-cmds.c
index d934042..21023db 100644
--- a/dump/dump-hmp-cmds.c
+++ b/dump/dump-hmp-cmds.c
@@ -10,7 +10,7 @@
#include "monitor/monitor.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-dump.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
void hmp_dump_guest_memory(Monitor *mon, const QDict *qdict)
{
diff --git a/dump/dump.c b/dump/dump.c
index 84064d8..15bbcc0 100644
--- a/dump/dump.c
+++ b/dump/dump.c
@@ -17,9 +17,9 @@
#include "qemu/bswap.h"
#include "exec/target_page.h"
#include "monitor/monitor.h"
-#include "sysemu/dump.h"
-#include "sysemu/runstate.h"
-#include "sysemu/cpus.h"
+#include "system/dump.h"
+#include "system/runstate.h"
+#include "system/cpus.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-dump.h"
#include "qapi/qapi-events-dump.h"
@@ -30,6 +30,7 @@
#include "migration/blocker.h"
#include "hw/core/cpu.h"
#include "win_dump.h"
+#include "qemu/range.h"
#include <zlib.h>
#ifdef CONFIG_LZO
@@ -574,8 +575,10 @@ static void get_offset_range(hwaddr phys_addr,
QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
if (dump_has_filter(s)) {
- if (block->target_start >= s->filter_area_begin + s->filter_area_length ||
- block->target_end <= s->filter_area_begin) {
+ if (!ranges_overlap(block->target_start,
+ block->target_end - block->target_start,
+ s->filter_area_begin,
+ s->filter_area_length)) {
/* This block is out of the range */
continue;
}
@@ -734,8 +737,9 @@ int64_t dump_filtered_memblock_start(GuestPhysBlock *block,
{
if (filter_area_length) {
/* return -1 if the block is not within filter area */
- if (block->target_start >= filter_area_start + filter_area_length ||
- block->target_end <= filter_area_start) {
+ if (!ranges_overlap(block->target_start,
+ block->target_end - block->target_start,
+ filter_area_start, filter_area_length)) {
return -1;
}
diff --git a/dump/win_dump.c b/dump/win_dump.c
index 0e4fe69..3162e8b 100644
--- a/dump/win_dump.c
+++ b/dump/win_dump.c
@@ -9,7 +9,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/dump.h"
+#include "system/dump.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "exec/cpu-defs.h"
@@ -476,8 +476,6 @@ out_free:
g_free(saved_ctx);
out_cr3:
first_x86_cpu->env.cr[3] = saved_cr3;
-
- return;
}
#else /* !TARGET_X86_64 */
diff --git a/dump/win_dump.h b/dump/win_dump.h
index c9b49f8..9d6cfa4 100644
--- a/dump/win_dump.h
+++ b/dump/win_dump.h
@@ -11,7 +11,7 @@
#ifndef WIN_DUMP_H
#define WIN_DUMP_H
-#include "sysemu/dump.h"
+#include "system/dump.h"
/* Check Windows dump availability for the current target */
bool win_dump_available(Error **errp);
diff --git a/ebpf/ebpf_rss-stub.c b/ebpf/ebpf_rss-stub.c
index 8d7fae2..d0e7f99 100644
--- a/ebpf/ebpf_rss-stub.c
+++ b/ebpf/ebpf_rss-stub.c
@@ -23,19 +23,21 @@ bool ebpf_rss_is_loaded(struct EBPFRSSContext *ctx)
return false;
}
-bool ebpf_rss_load(struct EBPFRSSContext *ctx)
+bool ebpf_rss_load(struct EBPFRSSContext *ctx, Error **errp)
{
return false;
}
bool ebpf_rss_load_fds(struct EBPFRSSContext *ctx, int program_fd,
- int config_fd, int toeplitz_fd, int table_fd)
+ int config_fd, int toeplitz_fd, int table_fd,
+ Error **errp)
{
return false;
}
bool ebpf_rss_set_all(struct EBPFRSSContext *ctx, struct EBPFRSSConfig *config,
- uint16_t *indirections_table, uint8_t *toeplitz_key)
+ uint16_t *indirections_table, uint8_t *toeplitz_key,
+ Error **errp)
{
return false;
}
diff --git a/ebpf/ebpf_rss.c b/ebpf/ebpf_rss.c
index 87f0714..e793786 100644
--- a/ebpf/ebpf_rss.c
+++ b/ebpf/ebpf_rss.c
@@ -47,34 +47,37 @@ bool ebpf_rss_is_loaded(struct EBPFRSSContext *ctx)
return ctx != NULL && (ctx->obj != NULL || ctx->program_fd != -1);
}
-static bool ebpf_rss_mmap(struct EBPFRSSContext *ctx)
+static bool ebpf_rss_mmap(struct EBPFRSSContext *ctx, Error **errp)
{
- if (!ebpf_rss_is_loaded(ctx)) {
- return false;
- }
-
ctx->mmap_configuration = mmap(NULL, qemu_real_host_page_size(),
PROT_READ | PROT_WRITE, MAP_SHARED,
ctx->map_configuration, 0);
if (ctx->mmap_configuration == MAP_FAILED) {
- trace_ebpf_error("eBPF RSS", "can not mmap eBPF configuration array");
+ trace_ebpf_rss_mmap_error(ctx, "configuration");
+ error_setg(errp, "Unable to map eBPF configuration array");
return false;
}
ctx->mmap_toeplitz_key = mmap(NULL, qemu_real_host_page_size(),
PROT_READ | PROT_WRITE, MAP_SHARED,
ctx->map_toeplitz_key, 0);
if (ctx->mmap_toeplitz_key == MAP_FAILED) {
- trace_ebpf_error("eBPF RSS", "can not mmap eBPF toeplitz key");
+ trace_ebpf_rss_mmap_error(ctx, "toeplitz key");
+ error_setg(errp, "Unable to map eBPF toeplitz array");
goto toeplitz_fail;
}
ctx->mmap_indirections_table = mmap(NULL, qemu_real_host_page_size(),
PROT_READ | PROT_WRITE, MAP_SHARED,
ctx->map_indirections_table, 0);
if (ctx->mmap_indirections_table == MAP_FAILED) {
- trace_ebpf_error("eBPF RSS", "can not mmap eBPF indirection table");
+ trace_ebpf_rss_mmap_error(ctx, "indirections table");
+ error_setg(errp, "Unable to map eBPF indirection array");
goto indirection_fail;
}
+ trace_ebpf_rss_mmap(ctx,
+ ctx->mmap_configuration,
+ ctx->mmap_toeplitz_key,
+ ctx->mmap_indirections_table);
return true;
indirection_fail:
@@ -90,10 +93,6 @@ toeplitz_fail:
static void ebpf_rss_munmap(struct EBPFRSSContext *ctx)
{
- if (!ebpf_rss_is_loaded(ctx)) {
- return;
- }
-
munmap(ctx->mmap_indirections_table, qemu_real_host_page_size());
munmap(ctx->mmap_toeplitz_key, qemu_real_host_page_size());
munmap(ctx->mmap_configuration, qemu_real_host_page_size());
@@ -103,7 +102,7 @@ static void ebpf_rss_munmap(struct EBPFRSSContext *ctx)
ctx->mmap_indirections_table = NULL;
}
-bool ebpf_rss_load(struct EBPFRSSContext *ctx)
+bool ebpf_rss_load(struct EBPFRSSContext *ctx, Error **errp)
{
struct rss_bpf *rss_bpf_ctx;
@@ -113,14 +112,16 @@ bool ebpf_rss_load(struct EBPFRSSContext *ctx)
rss_bpf_ctx = rss_bpf__open();
if (rss_bpf_ctx == NULL) {
- trace_ebpf_error("eBPF RSS", "can not open eBPF RSS object");
+ trace_ebpf_rss_open_error(ctx);
+ error_setg(errp, "Unable to open eBPF RSS object");
goto error;
}
bpf_program__set_type(rss_bpf_ctx->progs.tun_rss_steering_prog, BPF_PROG_TYPE_SOCKET_FILTER);
if (rss_bpf__load(rss_bpf_ctx)) {
- trace_ebpf_error("eBPF RSS", "can not load RSS program");
+ trace_ebpf_rss_load_error(ctx);
+ error_setg(errp, "Unable to load eBPF program");
goto error;
}
@@ -134,7 +135,12 @@ bool ebpf_rss_load(struct EBPFRSSContext *ctx)
ctx->map_toeplitz_key = bpf_map__fd(
rss_bpf_ctx->maps.tap_rss_map_toeplitz_key);
- if (!ebpf_rss_mmap(ctx)) {
+ trace_ebpf_rss_load(ctx,
+ ctx->program_fd,
+ ctx->map_configuration,
+ ctx->map_indirections_table,
+ ctx->map_toeplitz_key);
+ if (!ebpf_rss_mmap(ctx, errp)) {
goto error;
}
@@ -151,13 +157,28 @@ error:
}
bool ebpf_rss_load_fds(struct EBPFRSSContext *ctx, int program_fd,
- int config_fd, int toeplitz_fd, int table_fd)
+ int config_fd, int toeplitz_fd, int table_fd,
+ Error **errp)
{
if (ebpf_rss_is_loaded(ctx)) {
+ error_setg(errp, "eBPF program is already loaded");
return false;
}
- if (program_fd < 0 || config_fd < 0 || toeplitz_fd < 0 || table_fd < 0) {
+ if (program_fd < 0) {
+ error_setg(errp, "eBPF program FD is not open");
+ return false;
+ }
+ if (config_fd < 0) {
+ error_setg(errp, "eBPF config FD is not open");
+ return false;
+ }
+ if (toeplitz_fd < 0) {
+ error_setg(errp, "eBPF toeplitz FD is not open");
+ return false;
+ }
+ if (table_fd < 0) {
+ error_setg(errp, "eBPF indirection FD is not open");
return false;
}
@@ -166,7 +187,13 @@ bool ebpf_rss_load_fds(struct EBPFRSSContext *ctx, int program_fd,
ctx->map_toeplitz_key = toeplitz_fd;
ctx->map_indirections_table = table_fd;
- if (!ebpf_rss_mmap(ctx)) {
+ trace_ebpf_rss_load(ctx,
+ ctx->program_fd,
+ ctx->map_configuration,
+ ctx->map_indirections_table,
+ ctx->map_toeplitz_key);
+
+ if (!ebpf_rss_mmap(ctx, errp)) {
ctx->program_fd = -1;
ctx->map_configuration = -1;
ctx->map_toeplitz_key = -1;
@@ -177,25 +204,22 @@ bool ebpf_rss_load_fds(struct EBPFRSSContext *ctx, int program_fd,
return true;
}
-static bool ebpf_rss_set_config(struct EBPFRSSContext *ctx,
+static void ebpf_rss_set_config(struct EBPFRSSContext *ctx,
struct EBPFRSSConfig *config)
{
- if (!ebpf_rss_is_loaded(ctx)) {
- return false;
- }
-
memcpy(ctx->mmap_configuration, config, sizeof(*config));
- return true;
}
static bool ebpf_rss_set_indirections_table(struct EBPFRSSContext *ctx,
uint16_t *indirections_table,
- size_t len)
+ size_t len,
+ Error **errp)
{
char *cursor = ctx->mmap_indirections_table;
- if (!ebpf_rss_is_loaded(ctx) || indirections_table == NULL ||
- len > VIRTIO_NET_RSS_MAX_TABLE_LEN) {
+ if (len > VIRTIO_NET_RSS_MAX_TABLE_LEN) {
+ error_setg(errp, "Indirections table length %zu exceeds limit %d",
+ len, VIRTIO_NET_RSS_MAX_TABLE_LEN);
return false;
}
@@ -207,43 +231,51 @@ static bool ebpf_rss_set_indirections_table(struct EBPFRSSContext *ctx,
return true;
}
-static bool ebpf_rss_set_toepliz_key(struct EBPFRSSContext *ctx,
+static void ebpf_rss_set_toepliz_key(struct EBPFRSSContext *ctx,
uint8_t *toeplitz_key)
{
/* prepare toeplitz key */
uint8_t toe[VIRTIO_NET_RSS_MAX_KEY_SIZE] = {};
- if (!ebpf_rss_is_loaded(ctx) || toeplitz_key == NULL) {
- return false;
- }
memcpy(toe, toeplitz_key, VIRTIO_NET_RSS_MAX_KEY_SIZE);
*(uint32_t *)toe = ntohl(*(uint32_t *)toe);
memcpy(ctx->mmap_toeplitz_key, toe, VIRTIO_NET_RSS_MAX_KEY_SIZE);
- return true;
}
bool ebpf_rss_set_all(struct EBPFRSSContext *ctx, struct EBPFRSSConfig *config,
- uint16_t *indirections_table, uint8_t *toeplitz_key)
+ uint16_t *indirections_table, uint8_t *toeplitz_key,
+ Error **errp)
{
- if (!ebpf_rss_is_loaded(ctx) || config == NULL ||
- indirections_table == NULL || toeplitz_key == NULL) {
+ if (!ebpf_rss_is_loaded(ctx)) {
+ error_setg(errp, "eBPF program is not loaded");
return false;
}
-
- if (!ebpf_rss_set_config(ctx, config)) {
+ if (config == NULL) {
+ error_setg(errp, "eBPF config table is NULL");
return false;
}
-
- if (!ebpf_rss_set_indirections_table(ctx, indirections_table,
- config->indirections_len)) {
+ if (indirections_table == NULL) {
+ error_setg(errp, "eBPF indirections table is NULL");
+ return false;
+ }
+ if (toeplitz_key == NULL) {
+ error_setg(errp, "eBPF toeplitz key is NULL");
return false;
}
- if (!ebpf_rss_set_toepliz_key(ctx, toeplitz_key)) {
+ ebpf_rss_set_config(ctx, config);
+
+ if (!ebpf_rss_set_indirections_table(ctx, indirections_table,
+ config->indirections_len,
+ errp)) {
return false;
}
+ ebpf_rss_set_toepliz_key(ctx, toeplitz_key);
+
+ trace_ebpf_rss_set_data(ctx, config, indirections_table, toeplitz_key);
+
return true;
}
@@ -253,6 +285,8 @@ void ebpf_rss_unload(struct EBPFRSSContext *ctx)
return;
}
+ trace_ebpf_rss_unload(ctx);
+
ebpf_rss_munmap(ctx);
if (ctx->obj) {
@@ -271,4 +305,4 @@ void ebpf_rss_unload(struct EBPFRSSContext *ctx)
ctx->map_indirections_table = -1;
}
-ebpf_binary_init(EBPF_PROGRAMID_RSS, rss_bpf__elf_bytes)
+ebpf_binary_init(EBPF_PROGRAM_ID_RSS, rss_bpf__elf_bytes)
diff --git a/ebpf/ebpf_rss.h b/ebpf/ebpf_rss.h
index 239242b..86a5787 100644
--- a/ebpf/ebpf_rss.h
+++ b/ebpf/ebpf_rss.h
@@ -14,6 +14,8 @@
#ifndef QEMU_EBPF_RSS_H
#define QEMU_EBPF_RSS_H
+#include "qapi/error.h"
+
#define EBPF_RSS_MAX_FDS 4
struct EBPFRSSContext {
@@ -41,13 +43,15 @@ void ebpf_rss_init(struct EBPFRSSContext *ctx);
bool ebpf_rss_is_loaded(struct EBPFRSSContext *ctx);
-bool ebpf_rss_load(struct EBPFRSSContext *ctx);
+bool ebpf_rss_load(struct EBPFRSSContext *ctx, Error **errp);
bool ebpf_rss_load_fds(struct EBPFRSSContext *ctx, int program_fd,
- int config_fd, int toeplitz_fd, int table_fd);
+ int config_fd, int toeplitz_fd, int table_fd,
+ Error **errp);
bool ebpf_rss_set_all(struct EBPFRSSContext *ctx, struct EBPFRSSConfig *config,
- uint16_t *indirections_table, uint8_t *toeplitz_key);
+ uint16_t *indirections_table, uint8_t *toeplitz_key,
+ Error **errp);
void ebpf_rss_unload(struct EBPFRSSContext *ctx);
diff --git a/ebpf/trace-events b/ebpf/trace-events
index b3ad1a3..bf3d9b6 100644
--- a/ebpf/trace-events
+++ b/ebpf/trace-events
@@ -1,4 +1,10 @@
# See docs/devel/tracing.rst for syntax documentation.
# ebpf-rss.c
-ebpf_error(const char *s1, const char *s2) "error in %s: %s"
+ebpf_rss_load(void *ctx, int progfd, int cfgfd, int toepfd, int indirfd) "ctx=%p program-fd=%d config-fd=%d toeplitz-fd=%d indirection-fd=%d"
+ebpf_rss_load_error(void *ctx) "ctx=%p"
+ebpf_rss_mmap(void *ctx, void *cfgptr, void *toepptr, void *indirptr) "ctx=%p config-ptr=%p toeplitz-ptr=%p indirection-ptr=%p"
+ebpf_rss_mmap_error(void *ctx, const char *object) "ctx=%p object=%s"
+ebpf_rss_open_error(void *ctx) "ctx=%p"
+ebpf_rss_set_data(void *ctx, void *cfgptr, void *toepptr, void *indirptr) "ctx=%p config-ptr=%p toeplitz-ptr=%p indirection-ptr=%p"
+ebpf_rss_unload(void *ctx) "rss unload ctx=%p"
diff --git a/event-loop-base.c b/event-loop-base.c
index d5be4dc..8ca143b 100644
--- a/event-loop-base.c
+++ b/event-loop-base.c
@@ -15,7 +15,7 @@
#include "qom/object_interfaces.h"
#include "qapi/error.h"
#include "block/thread-pool.h"
-#include "sysemu/event-loop-base.h"
+#include "system/event-loop-base.h"
typedef struct {
const char *name;
@@ -73,8 +73,6 @@ static void event_loop_base_set_param(Object *obj, Visitor *v,
if (bc->update_params) {
bc->update_params(base, errp);
}
-
- return;
}
static void event_loop_base_complete(UserCreatable *uc, Error **errp)
@@ -99,7 +97,8 @@ static bool event_loop_base_can_be_deleted(UserCreatable *uc)
return true;
}
-static void event_loop_base_class_init(ObjectClass *klass, void *class_data)
+static void event_loop_base_class_init(ObjectClass *klass,
+ const void *class_data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
ucc->complete = event_loop_base_complete;
@@ -127,7 +126,7 @@ static const TypeInfo event_loop_base_info = {
.class_size = sizeof(EventLoopBaseClass),
.class_init = event_loop_base_class_init,
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/fpu/meson.build b/fpu/meson.build
index 1a9992d..646c76f 100644
--- a/fpu/meson.build
+++ b/fpu/meson.build
@@ -1 +1 @@
-specific_ss.add(when: 'CONFIG_TCG', if_true: files('softfloat.c'))
+common_ss.add(when: 'CONFIG_TCG', if_true: files('softfloat.c'))
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
index a44649f..171bfd0 100644
--- a/fpu/softfloat-parts.c.inc
+++ b/fpu/softfloat-parts.c.inc
@@ -39,65 +39,152 @@ static void partsN(return_nan)(FloatPartsN *a, float_status *s)
static FloatPartsN *partsN(pick_nan)(FloatPartsN *a, FloatPartsN *b,
float_status *s)
{
+ bool have_snan = false;
+ FloatPartsN *ret;
+ int cmp;
+
if (is_snan(a->cls) || is_snan(b->cls)) {
float_raise(float_flag_invalid | float_flag_invalid_snan, s);
+ have_snan = true;
}
if (s->default_nan_mode) {
parts_default_nan(a, s);
- } else {
- int cmp = frac_cmp(a, b);
- if (cmp == 0) {
- cmp = a->sign < b->sign;
- }
+ return a;
+ }
- if (pickNaN(a->cls, b->cls, cmp > 0, s)) {
- a = b;
+ switch (s->float_2nan_prop_rule) {
+ case float_2nan_prop_s_ab:
+ if (have_snan) {
+ ret = is_snan(a->cls) ? a : b;
+ break;
+ }
+ /* fall through */
+ case float_2nan_prop_ab:
+ ret = is_nan(a->cls) ? a : b;
+ break;
+ case float_2nan_prop_s_ba:
+ if (have_snan) {
+ ret = is_snan(b->cls) ? b : a;
+ break;
}
+ /* fall through */
+ case float_2nan_prop_ba:
+ ret = is_nan(b->cls) ? b : a;
+ break;
+ case float_2nan_prop_x87:
+ /*
+ * This implements x87 NaN propagation rules:
+ * SNaN + QNaN => return the QNaN
+ * two SNaNs => return the one with the larger significand, silenced
+ * two QNaNs => return the one with the larger significand
+ * SNaN and a non-NaN => return the SNaN, silenced
+ * QNaN and a non-NaN => return the QNaN
+ *
+ * If we get down to comparing significands and they are the same,
+ * return the NaN with the positive sign bit (if any).
+ */
if (is_snan(a->cls)) {
- parts_silence_nan(a, s);
+ if (!is_snan(b->cls)) {
+ ret = is_qnan(b->cls) ? b : a;
+ break;
+ }
+ } else if (is_qnan(a->cls)) {
+ if (is_snan(b->cls) || !is_qnan(b->cls)) {
+ ret = a;
+ break;
+ }
+ } else {
+ ret = b;
+ break;
+ }
+ cmp = frac_cmp(a, b);
+ if (cmp == 0) {
+ cmp = a->sign < b->sign;
}
+ ret = cmp > 0 ? a : b;
+ break;
+ default:
+ g_assert_not_reached();
}
- return a;
+
+ if (is_snan(ret->cls)) {
+ parts_silence_nan(ret, s);
+ }
+ return ret;
}
static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
FloatPartsN *c, float_status *s,
int ab_mask, int abc_mask)
{
- int which;
+ bool infzero = (ab_mask == float_cmask_infzero);
+ bool have_snan = (abc_mask & float_cmask_snan);
+ FloatPartsN *ret;
- if (unlikely(abc_mask & float_cmask_snan)) {
+ if (unlikely(have_snan)) {
float_raise(float_flag_invalid | float_flag_invalid_snan, s);
}
- which = pickNaNMulAdd(a->cls, b->cls, c->cls,
- ab_mask == float_cmask_infzero, s);
+ if (infzero &&
+ !(s->float_infzeronan_rule & float_infzeronan_suppress_invalid)) {
+ /* This is (0 * inf) + NaN or (inf * 0) + NaN */
+ float_raise(float_flag_invalid | float_flag_invalid_imz, s);
+ }
- if (s->default_nan_mode || which == 3) {
+ if (s->default_nan_mode) {
/*
- * Note that this check is after pickNaNMulAdd so that function
- * has an opportunity to set the Invalid flag for infzero.
+ * We guarantee not to require the target to tell us how to
+ * pick a NaN if we're always returning the default NaN.
+ * But if we're not in default-NaN mode then the target must
+ * specify.
*/
- parts_default_nan(a, s);
- return a;
+ goto default_nan;
+ } else if (infzero) {
+ /*
+ * Inf * 0 + NaN -- some implementations return the
+ * default NaN here, and some return the input NaN.
+ */
+ switch (s->float_infzeronan_rule & ~float_infzeronan_suppress_invalid) {
+ case float_infzeronan_dnan_never:
+ break;
+ case float_infzeronan_dnan_always:
+ goto default_nan;
+ case float_infzeronan_dnan_if_qnan:
+ if (is_qnan(c->cls)) {
+ goto default_nan;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ ret = c;
+ } else {
+ FloatPartsN *val[R_3NAN_1ST_MASK + 1] = { a, b, c };
+ Float3NaNPropRule rule = s->float_3nan_prop_rule;
+
+ assert(rule != float_3nan_prop_none);
+ if (have_snan && (rule & R_3NAN_SNAN_MASK)) {
+ /* We have at least one SNaN input and should prefer it */
+ do {
+ ret = val[rule & R_3NAN_1ST_MASK];
+ rule >>= R_3NAN_1ST_LENGTH;
+ } while (!is_snan(ret->cls));
+ } else {
+ do {
+ ret = val[rule & R_3NAN_1ST_MASK];
+ rule >>= R_3NAN_1ST_LENGTH;
+ } while (!is_nan(ret->cls));
+ }
}
- switch (which) {
- case 0:
- break;
- case 1:
- a = b;
- break;
- case 2:
- a = c;
- break;
- default:
- g_assert_not_reached();
- }
- if (is_snan(a->cls)) {
- parts_silence_nan(a, s);
+ if (is_snan(ret->cls)) {
+ parts_silence_nan(ret, s);
}
+ return ret;
+
+ default_nan:
+ parts_default_nan(a, s);
return a;
}
@@ -108,18 +195,37 @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
static void partsN(canonicalize)(FloatPartsN *p, float_status *status,
const FloatFmt *fmt)
{
+ /*
+ * It's target-dependent how to handle the case of exponent 0
+ * and Integer bit set. Intel calls these "pseudodenormals",
+ * and treats them as if the integer bit was 0, and never
+ * produces them on output. This is the default behaviour for QEMU.
+ * For m68k, the integer bit is considered validly part of the
+ * input value when the exponent is 0, and may be 0 or 1,
+ * giving extra range. They may also be generated as outputs.
+ * (The m68k manual actually calls these values part of the
+ * normalized number range, not the denormalized number range,
+ * but that distinction is not important for us, because
+ * m68k doesn't care about the input_denormal_used status flag.)
+ * floatx80_pseudo_denormal_valid selects the m68k behaviour,
+ * which changes both how we canonicalize such a value and
+ * how we uncanonicalize results.
+ */
+ bool has_pseudo_denormals = fmt->has_explicit_bit &&
+ (status->floatx80_behaviour & floatx80_pseudo_denormal_valid);
+
if (unlikely(p->exp == 0)) {
if (likely(frac_eqz(p))) {
p->cls = float_class_zero;
} else if (status->flush_inputs_to_zero) {
- float_raise(float_flag_input_denormal, status);
+ float_raise(float_flag_input_denormal_flushed, status);
p->cls = float_class_zero;
frac_clear(p);
} else {
int shift = frac_normalize(p);
- p->cls = float_class_normal;
+ p->cls = float_class_denormal;
p->exp = fmt->frac_shift - fmt->exp_bias
- - shift + !fmt->m68k_denormal;
+ - shift + !has_pseudo_denormals;
}
} else if (likely(p->exp < fmt->exp_max) || fmt->arm_althp) {
p->cls = float_class_normal;
@@ -155,6 +261,9 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
int exp, flags = 0;
switch (s->float_rounding_mode) {
+ case float_round_nearest_even_max:
+ overflow_norm = true;
+ /* fall through */
case float_round_nearest_even:
if (N > 64 && frac_lsb == 0) {
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
@@ -244,20 +353,23 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
p->frac_lo &= ~round_mask;
}
frac_shr(p, frac_shift);
- } else if (s->flush_to_zero) {
- flags |= float_flag_output_denormal;
+ } else if (s->flush_to_zero &&
+ s->ftz_detection == float_ftz_before_rounding) {
+ flags |= float_flag_output_denormal_flushed;
p->cls = float_class_zero;
exp = 0;
frac_clear(p);
} else {
bool is_tiny = s->tininess_before_rounding || exp < 0;
+ bool has_pseudo_denormals = fmt->has_explicit_bit &&
+ (s->floatx80_behaviour & floatx80_pseudo_denormal_valid);
if (!is_tiny) {
FloatPartsN discard;
is_tiny = !frac_addi(&discard, p, inc);
}
- frac_shrjam(p, !fmt->m68k_denormal - exp);
+ frac_shrjam(p, !has_pseudo_denormals - exp);
if (p->frac_lo & round_mask) {
/* Need to recompute round-to-even/round-to-odd. */
@@ -288,14 +400,22 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
p->frac_lo &= ~round_mask;
}
- exp = (p->frac_hi & DECOMPOSED_IMPLICIT_BIT) && !fmt->m68k_denormal;
+ exp = (p->frac_hi & DECOMPOSED_IMPLICIT_BIT) && !has_pseudo_denormals;
frac_shr(p, frac_shift);
- if (is_tiny && (flags & float_flag_inexact)) {
- flags |= float_flag_underflow;
- }
- if (exp == 0 && frac_eqz(p)) {
- p->cls = float_class_zero;
+ if (is_tiny) {
+ if (s->flush_to_zero) {
+ assert(s->ftz_detection == float_ftz_after_rounding);
+ flags |= float_flag_output_denormal_flushed;
+ p->cls = float_class_zero;
+ exp = 0;
+ frac_clear(p);
+ } else if (flags & float_flag_inexact) {
+ flags |= float_flag_underflow;
+ }
+ if (exp == 0 && frac_eqz(p)) {
+ p->cls = float_class_zero;
+ }
}
}
p->exp = exp;
@@ -305,7 +425,7 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
static void partsN(uncanon)(FloatPartsN *p, float_status *s,
const FloatFmt *fmt)
{
- if (likely(p->cls == float_class_normal)) {
+ if (likely(is_anynorm(p->cls))) {
parts_uncanon_normal(p, s, fmt);
} else {
switch (p->cls) {
@@ -343,9 +463,18 @@ static FloatPartsN *partsN(addsub)(FloatPartsN *a, FloatPartsN *b,
bool b_sign = b->sign ^ subtract;
int ab_mask = float_cmask(a->cls) | float_cmask(b->cls);
+ /*
+ * For addition and subtraction, we will consume an
+ * input denormal unless the other input is a NaN.
+ */
+ if ((ab_mask & (float_cmask_denormal | float_cmask_anynan)) ==
+ float_cmask_denormal) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
+
if (a->sign != b_sign) {
/* Subtraction */
- if (likely(ab_mask == float_cmask_normal)) {
+ if (likely(cmask_is_only_normals(ab_mask))) {
if (parts_sub_normal(a, b)) {
return a;
}
@@ -378,7 +507,7 @@ static FloatPartsN *partsN(addsub)(FloatPartsN *a, FloatPartsN *b,
}
} else {
/* Addition */
- if (likely(ab_mask == float_cmask_normal)) {
+ if (likely(cmask_is_only_normals(ab_mask))) {
parts_add_normal(a, b);
return a;
}
@@ -398,12 +527,12 @@ static FloatPartsN *partsN(addsub)(FloatPartsN *a, FloatPartsN *b,
}
if (b->cls == float_class_zero) {
- g_assert(a->cls == float_class_normal);
+ g_assert(is_anynorm(a->cls));
return a;
}
g_assert(a->cls == float_class_zero);
- g_assert(b->cls == float_class_normal);
+ g_assert(is_anynorm(b->cls));
return_b:
b->sign = b_sign;
return b;
@@ -423,9 +552,13 @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
int ab_mask = float_cmask(a->cls) | float_cmask(b->cls);
bool sign = a->sign ^ b->sign;
- if (likely(ab_mask == float_cmask_normal)) {
+ if (likely(cmask_is_only_normals(ab_mask))) {
FloatPartsW tmp;
+ if (ab_mask & float_cmask_denormal) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
+
frac_mulw(&tmp, a, b);
frac_truncjam(a, &tmp);
@@ -451,6 +584,10 @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
}
/* Multiply by 0 or Inf */
+ if (ab_mask & float_cmask_denormal) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
+
if (ab_mask & float_cmask_inf) {
a->cls = float_class_inf;
a->sign = sign;
@@ -476,8 +613,9 @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
* Requires A and C extracted into a double-sized structure to provide the
* extra space for the widening multiply.
*/
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
- FloatPartsN *c, int flags, float_status *s)
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
+ FloatPartsN *c, int scale,
+ int flags, float_status *s)
{
int ab_mask, abc_mask;
FloatPartsW p_widen, c_widen;
@@ -505,7 +643,7 @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
a->sign ^= 1;
}
- if (unlikely(ab_mask != float_cmask_normal)) {
+ if (unlikely(!cmask_is_only_normals(ab_mask))) {
if (unlikely(ab_mask == float_cmask_infzero)) {
float_raise(float_flag_invalid | float_flag_invalid_imz, s);
goto d_nan;
@@ -520,12 +658,14 @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
}
g_assert(ab_mask & float_cmask_zero);
- if (c->cls == float_class_normal) {
+ if (is_anynorm(c->cls)) {
*a = *c;
goto return_normal;
}
if (c->cls == float_class_zero) {
- if (a->sign != c->sign) {
+ if (flags & float_muladd_suppress_add_product_zero) {
+ a->sign = c->sign;
+ } else if (a->sign != c->sign) {
goto return_sub_zero;
}
goto return_zero;
@@ -566,13 +706,21 @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
a->exp = p_widen.exp;
return_normal:
- if (flags & float_muladd_halve_result) {
- a->exp -= 1;
- }
+ a->exp += scale;
finish_sign:
if (flags & float_muladd_negate_result) {
a->sign ^= 1;
}
+
+ /*
+ * All result types except for "return the default NaN
+ * because this is an Invalid Operation" go through here;
+ * this matches the set of cases where we consumed a
+ * denormal input.
+ */
+ if (abc_mask & float_cmask_denormal) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
return a;
return_sub_zero:
@@ -601,7 +749,10 @@ static FloatPartsN *partsN(div)(FloatPartsN *a, FloatPartsN *b,
int ab_mask = float_cmask(a->cls) | float_cmask(b->cls);
bool sign = a->sign ^ b->sign;
- if (likely(ab_mask == float_cmask_normal)) {
+ if (likely(cmask_is_only_normals(ab_mask))) {
+ if (ab_mask & float_cmask_denormal) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
a->sign = sign;
a->exp -= b->exp + frac_div(a, b);
return a;
@@ -622,6 +773,10 @@ static FloatPartsN *partsN(div)(FloatPartsN *a, FloatPartsN *b,
return parts_pick_nan(a, b, s);
}
+ if ((ab_mask & float_cmask_denormal) && b->cls != float_class_zero) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
+
a->sign = sign;
/* Inf / X */
@@ -659,7 +814,10 @@ static FloatPartsN *partsN(modrem)(FloatPartsN *a, FloatPartsN *b,
{
int ab_mask = float_cmask(a->cls) | float_cmask(b->cls);
- if (likely(ab_mask == float_cmask_normal)) {
+ if (likely(cmask_is_only_normals(ab_mask))) {
+ if (ab_mask & float_cmask_denormal) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
frac_modrem(a, b, mod_quot);
return a;
}
@@ -680,6 +838,10 @@ static FloatPartsN *partsN(modrem)(FloatPartsN *a, FloatPartsN *b,
return a;
}
+ if (ab_mask & float_cmask_denormal) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
+
/* N % Inf; 0 % N */
g_assert(b->cls == float_class_inf || a->cls == float_class_zero);
return a;
@@ -709,6 +871,12 @@ static void partsN(sqrt)(FloatPartsN *a, float_status *status,
if (unlikely(a->cls != float_class_normal)) {
switch (a->cls) {
+ case float_class_denormal:
+ if (!a->sign) {
+ /* -ve denormal will be InvalidOperation */
+ float_raise(float_flag_input_denormal_used, status);
+ }
+ break;
case float_class_snan:
case float_class_qnan:
parts_return_nan(a, status);
@@ -1039,6 +1207,7 @@ static void partsN(round_to_int)(FloatPartsN *a, FloatRoundMode rmode,
case float_class_inf:
break;
case float_class_normal:
+ case float_class_denormal:
if (parts_round_to_int_normal(a, rmode, scale, fmt->frac_size)) {
float_raise(float_flag_inexact, s);
}
@@ -1083,6 +1252,7 @@ static int64_t partsN(float_to_sint)(FloatPartsN *p, FloatRoundMode rmode,
return 0;
case float_class_normal:
+ case float_class_denormal:
/* TODO: N - 2 is frac_size for rounding; could use input fmt. */
if (parts_round_to_int_normal(p, rmode, scale, N - 2)) {
flags = float_flag_inexact;
@@ -1150,6 +1320,7 @@ static uint64_t partsN(float_to_uint)(FloatPartsN *p, FloatRoundMode rmode,
return 0;
case float_class_normal:
+ case float_class_denormal:
/* TODO: N - 2 is frac_size for rounding; could use input fmt. */
if (parts_round_to_int_normal(p, rmode, scale, N - 2)) {
flags = float_flag_inexact;
@@ -1213,6 +1384,7 @@ static int64_t partsN(float_to_sint_modulo)(FloatPartsN *p,
return 0;
case float_class_normal:
+ case float_class_denormal:
/* TODO: N - 2 is frac_size for rounding; could use input fmt. */
if (parts_round_to_int_normal(p, rmode, 0, N - 2)) {
flags = float_flag_inexact;
@@ -1334,6 +1506,9 @@ static FloatPartsN *partsN(minmax)(FloatPartsN *a, FloatPartsN *b,
if ((flags & (minmax_isnum | minmax_isnumber))
&& !(ab_mask & float_cmask_snan)
&& (ab_mask & ~float_cmask_qnan)) {
+ if (ab_mask & float_cmask_denormal) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
return is_nan(a->cls) ? b : a;
}
@@ -1358,12 +1533,17 @@ static FloatPartsN *partsN(minmax)(FloatPartsN *a, FloatPartsN *b,
return parts_pick_nan(a, b, s);
}
+ if (ab_mask & float_cmask_denormal) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
+
a_exp = a->exp;
b_exp = b->exp;
- if (unlikely(ab_mask != float_cmask_normal)) {
+ if (unlikely(!cmask_is_only_normals(ab_mask))) {
switch (a->cls) {
case float_class_normal:
+ case float_class_denormal:
break;
case float_class_inf:
a_exp = INT16_MAX;
@@ -1373,10 +1553,10 @@ static FloatPartsN *partsN(minmax)(FloatPartsN *a, FloatPartsN *b,
break;
default:
g_assert_not_reached();
- break;
}
switch (b->cls) {
case float_class_normal:
+ case float_class_denormal:
break;
case float_class_inf:
b_exp = INT16_MAX;
@@ -1386,7 +1566,6 @@ static FloatPartsN *partsN(minmax)(FloatPartsN *a, FloatPartsN *b,
break;
default:
g_assert_not_reached();
- break;
}
}
@@ -1424,9 +1603,13 @@ static FloatRelation partsN(compare)(FloatPartsN *a, FloatPartsN *b,
{
int ab_mask = float_cmask(a->cls) | float_cmask(b->cls);
- if (likely(ab_mask == float_cmask_normal)) {
+ if (likely(cmask_is_only_normals(ab_mask))) {
FloatRelation cmp;
+ if (ab_mask & float_cmask_denormal) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
+
if (a->sign != b->sign) {
goto a_sign;
}
@@ -1452,6 +1635,10 @@ static FloatRelation partsN(compare)(FloatPartsN *a, FloatPartsN *b,
return float_relation_unordered;
}
+ if (ab_mask & float_cmask_denormal) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
+
if (ab_mask & float_cmask_zero) {
if (ab_mask == float_cmask_zero) {
return float_relation_equal;
@@ -1491,6 +1678,9 @@ static void partsN(scalbn)(FloatPartsN *a, int n, float_status *s)
case float_class_zero:
case float_class_inf:
break;
+ case float_class_denormal:
+ float_raise(float_flag_input_denormal_used, s);
+ /* fall through */
case float_class_normal:
a->exp += MIN(MAX(n, -0x10000), 0x10000);
break;
@@ -1510,6 +1700,12 @@ static void partsN(log2)(FloatPartsN *a, float_status *s, const FloatFmt *fmt)
if (unlikely(a->cls != float_class_normal)) {
switch (a->cls) {
+ case float_class_denormal:
+ if (!a->sign) {
+ /* -ve denormal will be InvalidOperation */
+ float_raise(float_flag_input_denormal_used, s);
+ }
+ break;
case float_class_snan:
case float_class_qnan:
parts_return_nan(a, s);
@@ -1526,9 +1722,8 @@ static void partsN(log2)(FloatPartsN *a, float_status *s, const FloatFmt *fmt)
}
return;
default:
- break;
+ g_assert_not_reached();
}
- g_assert_not_reached();
}
if (unlikely(a->sign)) {
goto d_nan;
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
index 8f3b97d..ba4fa08 100644
--- a/fpu/softfloat-specialize.c.inc
+++ b/fpu/softfloat-specialize.c.inc
@@ -85,11 +85,7 @@ this code that are retained.
*/
static inline bool no_signaling_nans(float_status *status)
{
-#if defined(TARGET_XTENSA)
return status->no_signaling_nans;
-#else
- return false;
-#endif
}
/* Define how the architecture discriminates signaling NaNs.
@@ -97,17 +93,10 @@ static inline bool no_signaling_nans(float_status *status)
* In IEEE 754-1985 this was implementation defined, but in IEEE 754-2008
* the msb must be zero. MIPS is (so far) unique in supporting both the
* 2008 revision and backward compatibility with their original choice.
- * Thus for MIPS we must make the choice at runtime.
*/
static inline bool snan_bit_is_one(float_status *status)
{
-#if defined(TARGET_MIPS)
return status->snan_bit_is_one;
-#elif defined(TARGET_HPPA) || defined(TARGET_SH4)
- return 1;
-#else
- return 0;
-#endif
}
/*----------------------------------------------------------------------------
@@ -133,35 +122,17 @@ static void parts64_default_nan(FloatParts64 *p, float_status *status)
{
bool sign = 0;
uint64_t frac;
+ uint8_t dnan_pattern = status->default_nan_pattern;
-#if defined(TARGET_SPARC) || defined(TARGET_M68K)
- /* !snan_bit_is_one, set all bits */
- frac = (1ULL << DECOMPOSED_BINARY_POINT) - 1;
-#elif defined(TARGET_I386) || defined(TARGET_X86_64) \
- || defined(TARGET_MICROBLAZE)
- /* !snan_bit_is_one, set sign and msb */
- frac = 1ULL << (DECOMPOSED_BINARY_POINT - 1);
- sign = 1;
-#elif defined(TARGET_HPPA)
- /* snan_bit_is_one, set msb-1. */
- frac = 1ULL << (DECOMPOSED_BINARY_POINT - 2);
-#elif defined(TARGET_HEXAGON)
- sign = 1;
- frac = ~0ULL;
-#else
+ assert(dnan_pattern != 0);
+
+ sign = dnan_pattern >> 7;
/*
- * This case is true for Alpha, ARM, MIPS, OpenRISC, PPC, RISC-V,
- * S390, SH4, TriCore, and Xtensa. Our other supported targets,
- * such CRIS, do not have floating-point.
+ * Place default_nan_pattern [6:0] into bits [62:56],
+ * and replecate bit [0] down into [55:0]
*/
- if (snan_bit_is_one(status)) {
- /* set all bits other than msb */
- frac = (1ULL << (DECOMPOSED_BINARY_POINT - 1)) - 1;
- } else {
- /* set msb */
- frac = 1ULL << (DECOMPOSED_BINARY_POINT - 1);
- }
-#endif
+ frac = deposit64(0, DECOMPOSED_BINARY_POINT - 7, 7, dnan_pattern);
+ frac = deposit64(frac, 0, DECOMPOSED_BINARY_POINT - 7, -(dnan_pattern & 1));
*p = (FloatParts64) {
.cls = float_class_qnan,
@@ -227,17 +198,17 @@ static void parts128_silence_nan(FloatParts128 *p, float_status *status)
floatx80 floatx80_default_nan(float_status *status)
{
floatx80 r;
+ /*
+ * Extrapolate from the choices made by parts64_default_nan to fill
+ * in the floatx80 format. We assume that floatx80's explicit
+ * integer bit is always set (this is true for i386 and m68k,
+ * which are the only real users of this format).
+ */
+ FloatParts64 p64;
+ parts64_default_nan(&p64, status);
- /* None of the targets that have snan_bit_is_one use floatx80. */
- assert(!snan_bit_is_one(status));
-#if defined(TARGET_M68K)
- r.low = UINT64_C(0xFFFFFFFFFFFFFFFF);
- r.high = 0x7FFF;
-#else
- /* X86 */
- r.low = UINT64_C(0xC000000000000000);
- r.high = 0xFFFF;
-#endif
+ r.high = 0x7FFF | (p64.sign << 15);
+ r.low = (1ULL << DECOMPOSED_BINARY_POINT) | p64.frac;
return r;
}
@@ -245,15 +216,15 @@ floatx80 floatx80_default_nan(float_status *status)
| The pattern for a default generated extended double-precision inf.
*----------------------------------------------------------------------------*/
-#define floatx80_infinity_high 0x7FFF
-#if defined(TARGET_M68K)
-#define floatx80_infinity_low UINT64_C(0x0000000000000000)
-#else
-#define floatx80_infinity_low UINT64_C(0x8000000000000000)
-#endif
-
-const floatx80 floatx80_infinity
- = make_floatx80_init(floatx80_infinity_high, floatx80_infinity_low);
+floatx80 floatx80_default_inf(bool zSign, float_status *status)
+{
+ /*
+ * Whether the Integer bit is set in the default Infinity is
+ * target dependent.
+ */
+ bool z = status->floatx80_behaviour & floatx80_default_inf_int_bit_is_zero;
+ return packFloatx80(zSign, 0x7fff, z ? 0 : (1ULL << 63));
+}
/*----------------------------------------------------------------------------
| Returns 1 if the half-precision floating-point value `a' is a quiet
@@ -371,331 +342,6 @@ bool float32_is_signaling_nan(float32 a_, float_status *status)
}
/*----------------------------------------------------------------------------
-| Select which NaN to propagate for a two-input operation.
-| IEEE754 doesn't specify all the details of this, so the
-| algorithm is target-specific.
-| The routine is passed various bits of information about the
-| two NaNs and should return 0 to select NaN a and 1 for NaN b.
-| Note that signalling NaNs are always squashed to quiet NaNs
-| by the caller, by calling floatXX_silence_nan() before
-| returning them.
-|
-| aIsLargerSignificand is only valid if both a and b are NaNs
-| of some kind, and is true if a has the larger significand,
-| or if both a and b have the same significand but a is
-| positive but b is negative. It is only needed for the x87
-| tie-break rule.
-*----------------------------------------------------------------------------*/
-
-static int pickNaN(FloatClass a_cls, FloatClass b_cls,
- bool aIsLargerSignificand, float_status *status)
-{
-#if defined(TARGET_ARM) || defined(TARGET_MIPS) || defined(TARGET_HPPA) || \
- defined(TARGET_LOONGARCH64) || defined(TARGET_S390X)
- /* ARM mandated NaN propagation rules (see FPProcessNaNs()), take
- * the first of:
- * 1. A if it is signaling
- * 2. B if it is signaling
- * 3. A (quiet)
- * 4. B (quiet)
- * A signaling NaN is always quietened before returning it.
- */
- /* According to MIPS specifications, if one of the two operands is
- * a sNaN, a new qNaN has to be generated. This is done in
- * floatXX_silence_nan(). For qNaN inputs the specifications
- * says: "When possible, this QNaN result is one of the operand QNaN
- * values." In practice it seems that most implementations choose
- * the first operand if both operands are qNaN. In short this gives
- * the following rules:
- * 1. A if it is signaling
- * 2. B if it is signaling
- * 3. A (quiet)
- * 4. B (quiet)
- * A signaling NaN is always silenced before returning it.
- */
- if (is_snan(a_cls)) {
- return 0;
- } else if (is_snan(b_cls)) {
- return 1;
- } else if (is_qnan(a_cls)) {
- return 0;
- } else {
- return 1;
- }
-#elif defined(TARGET_PPC) || defined(TARGET_M68K)
- /* PowerPC propagation rules:
- * 1. A if it sNaN or qNaN
- * 2. B if it sNaN or qNaN
- * A signaling NaN is always silenced before returning it.
- */
- /* M68000 FAMILY PROGRAMMER'S REFERENCE MANUAL
- * 3.4 FLOATING-POINT INSTRUCTION DETAILS
- * If either operand, but not both operands, of an operation is a
- * nonsignaling NaN, then that NaN is returned as the result. If both
- * operands are nonsignaling NaNs, then the destination operand
- * nonsignaling NaN is returned as the result.
- * If either operand to an operation is a signaling NaN (SNaN), then the
- * SNaN bit is set in the FPSR EXC byte. If the SNaN exception enable bit
- * is set in the FPCR ENABLE byte, then the exception is taken and the
- * destination is not modified. If the SNaN exception enable bit is not
- * set, setting the SNaN bit in the operand to a one converts the SNaN to
- * a nonsignaling NaN. The operation then continues as described in the
- * preceding paragraph for nonsignaling NaNs.
- */
- if (is_nan(a_cls)) {
- return 0;
- } else {
- return 1;
- }
-#elif defined(TARGET_SPARC)
- /* Prefer SNaN over QNaN, order B then A. */
- if (is_snan(b_cls)) {
- return 1;
- } else if (is_snan(a_cls)) {
- return 0;
- } else if (is_qnan(b_cls)) {
- return 1;
- } else {
- return 0;
- }
-#elif defined(TARGET_XTENSA)
- /*
- * Xtensa has two NaN propagation modes.
- * Which one is active is controlled by float_status::use_first_nan.
- */
- if (status->use_first_nan) {
- if (is_nan(a_cls)) {
- return 0;
- } else {
- return 1;
- }
- } else {
- if (is_nan(b_cls)) {
- return 1;
- } else {
- return 0;
- }
- }
-#else
- /* This implements x87 NaN propagation rules:
- * SNaN + QNaN => return the QNaN
- * two SNaNs => return the one with the larger significand, silenced
- * two QNaNs => return the one with the larger significand
- * SNaN and a non-NaN => return the SNaN, silenced
- * QNaN and a non-NaN => return the QNaN
- *
- * If we get down to comparing significands and they are the same,
- * return the NaN with the positive sign bit (if any).
- */
- if (is_snan(a_cls)) {
- if (is_snan(b_cls)) {
- return aIsLargerSignificand ? 0 : 1;
- }
- return is_qnan(b_cls) ? 1 : 0;
- } else if (is_qnan(a_cls)) {
- if (is_snan(b_cls) || !is_qnan(b_cls)) {
- return 0;
- } else {
- return aIsLargerSignificand ? 0 : 1;
- }
- } else {
- return 1;
- }
-#endif
-}
-
-/*----------------------------------------------------------------------------
-| Select which NaN to propagate for a three-input operation.
-| For the moment we assume that no CPU needs the 'larger significand'
-| information.
-| Return values : 0 : a; 1 : b; 2 : c; 3 : default-NaN
-*----------------------------------------------------------------------------*/
-static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
- bool infzero, float_status *status)
-{
-#if defined(TARGET_ARM)
- /* For ARM, the (inf,zero,qnan) case sets InvalidOp and returns
- * the default NaN
- */
- if (infzero && is_qnan(c_cls)) {
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
- return 3;
- }
-
- /* This looks different from the ARM ARM pseudocode, because the ARM ARM
- * puts the operands to a fused mac operation (a*b)+c in the order c,a,b.
- */
- if (is_snan(c_cls)) {
- return 2;
- } else if (is_snan(a_cls)) {
- return 0;
- } else if (is_snan(b_cls)) {
- return 1;
- } else if (is_qnan(c_cls)) {
- return 2;
- } else if (is_qnan(a_cls)) {
- return 0;
- } else {
- return 1;
- }
-#elif defined(TARGET_MIPS)
- if (snan_bit_is_one(status)) {
- /*
- * For MIPS systems that conform to IEEE754-1985, the (inf,zero,nan)
- * case sets InvalidOp and returns the default NaN
- */
- if (infzero) {
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
- return 3;
- }
- /* Prefer sNaN over qNaN, in the a, b, c order. */
- if (is_snan(a_cls)) {
- return 0;
- } else if (is_snan(b_cls)) {
- return 1;
- } else if (is_snan(c_cls)) {
- return 2;
- } else if (is_qnan(a_cls)) {
- return 0;
- } else if (is_qnan(b_cls)) {
- return 1;
- } else {
- return 2;
- }
- } else {
- /*
- * For MIPS systems that conform to IEEE754-2008, the (inf,zero,nan)
- * case sets InvalidOp and returns the input value 'c'
- */
- if (infzero) {
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
- return 2;
- }
- /* Prefer sNaN over qNaN, in the c, a, b order. */
- if (is_snan(c_cls)) {
- return 2;
- } else if (is_snan(a_cls)) {
- return 0;
- } else if (is_snan(b_cls)) {
- return 1;
- } else if (is_qnan(c_cls)) {
- return 2;
- } else if (is_qnan(a_cls)) {
- return 0;
- } else {
- return 1;
- }
- }
-#elif defined(TARGET_LOONGARCH64)
- /*
- * For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan)
- * case sets InvalidOp and returns the input value 'c'
- */
- if (infzero) {
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
- return 2;
- }
- /* Prefer sNaN over qNaN, in the c, a, b order. */
- if (is_snan(c_cls)) {
- return 2;
- } else if (is_snan(a_cls)) {
- return 0;
- } else if (is_snan(b_cls)) {
- return 1;
- } else if (is_qnan(c_cls)) {
- return 2;
- } else if (is_qnan(a_cls)) {
- return 0;
- } else {
- return 1;
- }
-#elif defined(TARGET_PPC)
- /* For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer
- * to return an input NaN if we have one (ie c) rather than generating
- * a default NaN
- */
- if (infzero) {
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
- return 2;
- }
-
- /* If fRA is a NaN return it; otherwise if fRB is a NaN return it;
- * otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB
- */
- if (is_nan(a_cls)) {
- return 0;
- } else if (is_nan(c_cls)) {
- return 2;
- } else {
- return 1;
- }
-#elif defined(TARGET_RISCV)
- /* For RISC-V, InvalidOp is set when multiplicands are Inf and zero */
- if (infzero) {
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
- }
- return 3; /* default NaN */
-#elif defined(TARGET_SPARC)
- /* For (inf,0,nan) return c. */
- if (infzero) {
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
- return 2;
- }
- /* Prefer SNaN over QNaN, order C, B, A. */
- if (is_snan(c_cls)) {
- return 2;
- } else if (is_snan(b_cls)) {
- return 1;
- } else if (is_snan(a_cls)) {
- return 0;
- } else if (is_qnan(c_cls)) {
- return 2;
- } else if (is_qnan(b_cls)) {
- return 1;
- } else {
- return 0;
- }
-#elif defined(TARGET_XTENSA)
- /*
- * For Xtensa, the (inf,zero,nan) case sets InvalidOp and returns
- * an input NaN if we have one (ie c).
- */
- if (infzero) {
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
- return 2;
- }
- if (status->use_first_nan) {
- if (is_nan(a_cls)) {
- return 0;
- } else if (is_nan(b_cls)) {
- return 1;
- } else {
- return 2;
- }
- } else {
- if (is_nan(c_cls)) {
- return 2;
- } else if (is_nan(b_cls)) {
- return 1;
- } else {
- return 0;
- }
- }
-#else
- /* A default implementation: prefer a to b to c.
- * This is unlikely to actually match any real implementation.
- */
- if (is_nan(a_cls)) {
- return 0;
- } else if (is_nan(b_cls)) {
- return 1;
- } else {
- return 2;
- }
-#endif
-}
-
-/*----------------------------------------------------------------------------
| Returns 1 if the double-precision floating-point value `a' is a quiet
| NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
@@ -799,58 +445,6 @@ floatx80 floatx80_silence_nan(floatx80 a, float_status *status)
}
/*----------------------------------------------------------------------------
-| Takes two extended double-precision floating-point values `a' and `b', one
-| of which is a NaN, and returns the appropriate NaN result. If either `a' or
-| `b' is a signaling NaN, the invalid exception is raised.
-*----------------------------------------------------------------------------*/
-
-floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, float_status *status)
-{
- bool aIsLargerSignificand;
- FloatClass a_cls, b_cls;
-
- /* This is not complete, but is good enough for pickNaN. */
- a_cls = (!floatx80_is_any_nan(a)
- ? float_class_normal
- : floatx80_is_signaling_nan(a, status)
- ? float_class_snan
- : float_class_qnan);
- b_cls = (!floatx80_is_any_nan(b)
- ? float_class_normal
- : floatx80_is_signaling_nan(b, status)
- ? float_class_snan
- : float_class_qnan);
-
- if (is_snan(a_cls) || is_snan(b_cls)) {
- float_raise(float_flag_invalid, status);
- }
-
- if (status->default_nan_mode) {
- return floatx80_default_nan(status);
- }
-
- if (a.low < b.low) {
- aIsLargerSignificand = 0;
- } else if (b.low < a.low) {
- aIsLargerSignificand = 1;
- } else {
- aIsLargerSignificand = (a.high < b.high) ? 1 : 0;
- }
-
- if (pickNaN(a_cls, b_cls, aIsLargerSignificand, status)) {
- if (is_snan(b_cls)) {
- return floatx80_silence_nan(b, status);
- }
- return b;
- } else {
- if (is_snan(a_cls)) {
- return floatx80_silence_nan(a, status);
- }
- return a;
- }
-}
-
-/*----------------------------------------------------------------------------
| Returns 1 if the quadruple-precision floating-point value `a' is a quiet
| NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index 027a8e5..34c962d 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -79,9 +79,6 @@ this code that are retained.
* version 2 or later. See the COPYING file in the top-level directory.
*/
-/* softfloat (and in particular the code in softfloat-specialize.h) is
- * target-dependent and needs the TARGET_* macros.
- */
#include "qemu/osdep.h"
#include <math.h>
#include "qemu/bitops.h"
@@ -132,7 +129,7 @@ this code that are retained.
if (unlikely(soft_t ## _is_denormal(*a))) { \
*a = soft_t ## _set_sign(soft_t ## _zero, \
soft_t ## _is_neg(*a)); \
- float_raise(float_flag_input_denormal, s); \
+ float_raise(float_flag_input_denormal_flushed, s); \
} \
}
@@ -220,11 +217,9 @@ GEN_INPUT_FLUSH3(float64_input_flush3, float64)
* the use of hardfloat, since hardfloat relies on the inexact flag being
* already set.
*/
-#if defined(TARGET_PPC) || defined(__FAST_MATH__)
# if defined(__FAST_MATH__)
# warning disabling hardfloat due to -ffast-math: hardfloat requires an exact \
IEEE implementation
-# endif
# define QEMU_NO_HARDFLOAT 1
# define QEMU_SOFTFLOAT_ATTR QEMU_FLATTEN
#else
@@ -404,12 +399,16 @@ float64_gen2(float64 xa, float64 xb, float_status *s,
/*
* Classify a floating point number. Everything above float_class_qnan
* is a NaN so cls >= float_class_qnan is any NaN.
+ *
+ * Note that we canonicalize denormals, so most code should treat
+ * class_normal and class_denormal identically.
*/
typedef enum __attribute__ ((__packed__)) {
float_class_unclassified,
float_class_zero,
float_class_normal,
+ float_class_denormal, /* input was a non-squashed denormal */
float_class_inf,
float_class_qnan, /* all NaNs from here */
float_class_snan,
@@ -420,12 +419,14 @@ typedef enum __attribute__ ((__packed__)) {
enum {
float_cmask_zero = float_cmask(float_class_zero),
float_cmask_normal = float_cmask(float_class_normal),
+ float_cmask_denormal = float_cmask(float_class_denormal),
float_cmask_inf = float_cmask(float_class_inf),
float_cmask_qnan = float_cmask(float_class_qnan),
float_cmask_snan = float_cmask(float_class_snan),
float_cmask_infzero = float_cmask_zero | float_cmask_inf,
float_cmask_anynan = float_cmask_qnan | float_cmask_snan,
+ float_cmask_anynorm = float_cmask_normal | float_cmask_denormal,
};
/* Flags for parts_minmax. */
@@ -460,6 +461,20 @@ static inline __attribute__((unused)) bool is_qnan(FloatClass c)
}
/*
+ * Return true if the float_cmask has only normals in it
+ * (including input denormals that were canonicalized)
+ */
+static inline bool cmask_is_only_normals(int cmask)
+{
+ return !(cmask & ~float_cmask_anynorm);
+}
+
+static inline bool is_anynorm(FloatClass c)
+{
+ return float_cmask(c) & float_cmask_anynorm;
+}
+
+/*
* Structure holding all of the decomposed parts of a float.
* The exponent is unbiased and the fraction is normalized.
*
@@ -517,7 +532,8 @@ typedef struct {
* round_mask: bits below lsb which must be rounded
* The following optional modifiers are available:
* arm_althp: handle ARM Alternative Half Precision
- * m68k_denormal: explicit integer bit for extended precision may be 1
+ * has_explicit_bit: has an explicit integer bit; this affects whether
+ * the float_status floatx80_behaviour handling applies
*/
typedef struct {
int exp_size;
@@ -527,7 +543,7 @@ typedef struct {
int frac_size;
int frac_shift;
bool arm_althp;
- bool m68k_denormal;
+ bool has_explicit_bit;
uint64_t round_mask;
} FloatFmt;
@@ -580,9 +596,7 @@ static const FloatFmt floatx80_params[3] = {
[floatx80_precision_d] = { FLOATX80_PARAMS(52) },
[floatx80_precision_x] = {
FLOATX80_PARAMS(64),
-#ifdef TARGET_M68K
- .m68k_denormal = true,
-#endif
+ .has_explicit_bit = true,
},
};
@@ -789,15 +803,15 @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
#define parts_mul(A, B, S) \
PARTS_GENERIC_64_128(mul, A)(A, B, S)
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
- FloatParts64 *c, int flags,
- float_status *s);
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
- FloatParts128 *c, int flags,
- float_status *s);
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
+ FloatParts64 *c, int scale,
+ int flags, float_status *s);
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
+ FloatParts128 *c, int scale,
+ int flags, float_status *s);
-#define parts_muladd(A, B, C, Z, S) \
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
float_status *s);
@@ -1729,6 +1743,7 @@ static float64 float64r32_round_pack_canonical(FloatParts64 *p,
*/
switch (p->cls) {
case float_class_normal:
+ case float_class_denormal:
if (unlikely(p->exp == 0)) {
/*
* The result is denormal for float32, but can be represented
@@ -1789,7 +1804,7 @@ static bool floatx80_unpack_canonical(FloatParts128 *p, floatx80 f,
g_assert_not_reached();
}
- if (unlikely(floatx80_invalid_encoding(f))) {
+ if (unlikely(floatx80_invalid_encoding(f, s))) {
float_raise(float_flag_invalid, s);
return false;
}
@@ -1817,6 +1832,7 @@ static floatx80 floatx80_round_pack_canonical(FloatParts128 *p,
switch (p->cls) {
case float_class_normal:
+ case float_class_denormal:
if (s->floatx80_rounding_precision == floatx80_precision_x) {
parts_uncanon_normal(p, s, fmt);
frac = p->frac_hi;
@@ -1838,7 +1854,8 @@ static floatx80 floatx80_round_pack_canonical(FloatParts128 *p,
case float_class_inf:
/* x86 and m68k differ in the setting of the integer bit. */
- frac = floatx80_infinity_low;
+ frac = s->floatx80_behaviour & floatx80_default_inf_int_bit_is_zero ?
+ 0 : (1ULL << 63);
exp = fmt->exp_max;
break;
@@ -2212,43 +2229,50 @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
* Fused multiply-add
*/
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
- int flags, float_status *status)
+float16 QEMU_FLATTEN
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
+ int scale, int flags, float_status *status)
{
FloatParts64 pa, pb, pc, *pr;
float16_unpack_canonical(&pa, a, status);
float16_unpack_canonical(&pb, b, status);
float16_unpack_canonical(&pc, c, status);
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
return float16_round_pack_canonical(pr, status);
}
-static float32 QEMU_SOFTFLOAT_ATTR
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
- float_status *status)
+float16 float16_muladd(float16 a, float16 b, float16 c,
+ int flags, float_status *status)
+{
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
+}
+
+float32 QEMU_SOFTFLOAT_ATTR
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
+ int scale, int flags, float_status *status)
{
FloatParts64 pa, pb, pc, *pr;
float32_unpack_canonical(&pa, a, status);
float32_unpack_canonical(&pb, b, status);
float32_unpack_canonical(&pc, c, status);
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
return float32_round_pack_canonical(pr, status);
}
-static float64 QEMU_SOFTFLOAT_ATTR
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
- float_status *status)
+float64 QEMU_SOFTFLOAT_ATTR
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
+ int scale, int flags, float_status *status)
{
FloatParts64 pa, pb, pc, *pr;
float64_unpack_canonical(&pa, a, status);
float64_unpack_canonical(&pb, b, status);
float64_unpack_canonical(&pc, c, status);
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
return float64_round_pack_canonical(pr, status);
}
@@ -2267,7 +2291,7 @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
if (unlikely(!can_use_fpu(s))) {
goto soft;
}
- if (unlikely(flags & float_muladd_halve_result)) {
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
goto soft;
}
@@ -2323,7 +2347,7 @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
return ur.s;
soft:
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
}
float64 QEMU_FLATTEN
@@ -2338,9 +2362,6 @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
if (unlikely(!can_use_fpu(s))) {
goto soft;
}
- if (unlikely(flags & float_muladd_halve_result)) {
- goto soft;
- }
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
@@ -2394,7 +2415,7 @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
return ur.s;
soft:
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
}
float64 float64r32_muladd(float64 a, float64 b, float64 c,
@@ -2405,7 +2426,7 @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
float64_unpack_canonical(&pa, a, status);
float64_unpack_canonical(&pb, b, status);
float64_unpack_canonical(&pc, c, status);
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
return float64r32_round_pack_canonical(pr, status);
}
@@ -2418,7 +2439,7 @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
bfloat16_unpack_canonical(&pa, a, status);
bfloat16_unpack_canonical(&pb, b, status);
bfloat16_unpack_canonical(&pc, c, status);
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
return bfloat16_round_pack_canonical(pr, status);
}
@@ -2431,7 +2452,7 @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
float128_unpack_canonical(&pa, a, status);
float128_unpack_canonical(&pb, b, status);
float128_unpack_canonical(&pc, c, status);
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
return float128_round_pack_canonical(pr, status);
}
@@ -2692,6 +2713,9 @@ static void parts_float_to_ahp(FloatParts64 *a, float_status *s)
float16_params_ahp.frac_size + 1);
break;
+ case float_class_denormal:
+ float_raise(float_flag_input_denormal_used, s);
+ break;
case float_class_normal:
case float_class_zero:
break;
@@ -2706,6 +2730,9 @@ static void parts64_float_to_float(FloatParts64 *a, float_status *s)
if (is_nan(a->cls)) {
parts_return_nan(a, s);
}
+ if (a->cls == float_class_denormal) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
}
static void parts128_float_to_float(FloatParts128 *a, float_status *s)
@@ -2713,6 +2740,9 @@ static void parts128_float_to_float(FloatParts128 *a, float_status *s)
if (is_nan(a->cls)) {
parts_return_nan(a, s);
}
+ if (a->cls == float_class_denormal) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
}
#define parts_float_to_float(P, S) \
@@ -2725,12 +2755,21 @@ static void parts_float_to_float_narrow(FloatParts64 *a, FloatParts128 *b,
a->sign = b->sign;
a->exp = b->exp;
- if (a->cls == float_class_normal) {
+ switch (a->cls) {
+ case float_class_denormal:
+ float_raise(float_flag_input_denormal_used, s);
+ /* fall through */
+ case float_class_normal:
frac_truncjam(a, b);
- } else if (is_nan(a->cls)) {
+ break;
+ case float_class_snan:
+ case float_class_qnan:
/* Discard the low bits of the NaN. */
a->frac = b->frac_hi;
parts_return_nan(a, s);
+ break;
+ default:
+ break;
}
}
@@ -2745,6 +2784,9 @@ static void parts_float_to_float_widen(FloatParts128 *a, FloatParts64 *b,
if (is_nan(a->cls)) {
parts_return_nan(a, s);
}
+ if (a->cls == float_class_denormal) {
+ float_raise(float_flag_input_denormal_used, s);
+ }
}
float32 float16_to_float32(float16 a, bool ieee, float_status *s)
@@ -3214,6 +3256,7 @@ static Int128 float128_to_int128_scalbn(float128 a, FloatRoundMode rmode,
return int128_zero();
case float_class_normal:
+ case float_class_denormal:
if (parts_round_to_int_normal(&p, rmode, scale, 128 - 2)) {
flags = float_flag_inexact;
}
@@ -3641,6 +3684,7 @@ static Int128 float128_to_uint128_scalbn(float128 a, FloatRoundMode rmode,
return int128_zero();
case float_class_normal:
+ case float_class_denormal:
if (parts_round_to_int_normal(&p, rmode, scale, 128 - 2)) {
flags = float_flag_inexact;
if (p.cls == float_class_zero) {
@@ -4382,7 +4426,11 @@ float32_hs_compare(float32 xa, float32 xb, float_status *s, bool is_quiet)
goto soft;
}
- float32_input_flush2(&ua.s, &ub.s, s);
+ if (unlikely(float32_is_denormal(ua.s) || float32_is_denormal(ub.s))) {
+ /* We may need to set the input_denormal_used flag */
+ goto soft;
+ }
+
if (isgreaterequal(ua.h, ub.h)) {
if (isgreater(ua.h, ub.h)) {
return float_relation_greater;
@@ -4432,7 +4480,11 @@ float64_hs_compare(float64 xa, float64 xb, float_status *s, bool is_quiet)
goto soft;
}
- float64_input_flush2(&ua.s, &ub.s, s);
+ if (unlikely(float64_is_denormal(ua.s) || float64_is_denormal(ub.s))) {
+ /* We may need to set the input_denormal_used flag */
+ goto soft;
+ }
+
if (isgreaterequal(ua.h, ub.h)) {
if (isgreater(ua.h, ub.h)) {
return float_relation_greater;
@@ -4844,7 +4896,7 @@ float128 float128_silence_nan(float128 a, float_status *status)
static bool parts_squash_denormal(FloatParts64 p, float_status *status)
{
if (p.exp == 0 && p.frac != 0) {
- float_raise(float_flag_input_denormal, status);
+ float_raise(float_flag_input_denormal_flushed, status);
return true;
}
@@ -4921,6 +4973,25 @@ void normalizeFloatx80Subnormal(uint64_t aSig, int32_t *zExpPtr,
}
/*----------------------------------------------------------------------------
+| Takes two extended double-precision floating-point values `a' and `b', one
+| of which is a NaN, and returns the appropriate NaN result. If either `a' or
+| `b' is a signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, float_status *status)
+{
+ FloatParts128 pa, pb, *pr;
+
+ if (!floatx80_unpack_canonical(&pa, a, status) ||
+ !floatx80_unpack_canonical(&pb, b, status)) {
+ return floatx80_default_nan(status);
+ }
+
+ pr = parts_pick_nan(&pa, &pb, status);
+ return floatx80_round_pack_canonical(pr, status);
+}
+
+/*----------------------------------------------------------------------------
| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
| and extended significand formed by the concatenation of `zSig0' and `zSig1',
| and returns the proper extended double-precision floating-point value
@@ -4994,7 +5065,7 @@ floatx80 roundAndPackFloatx80(FloatX80RoundPrec roundingPrecision, bool zSign,
}
if ( zExp <= 0 ) {
if (status->flush_to_zero) {
- float_raise(float_flag_output_denormal, status);
+ float_raise(float_flag_output_denormal_flushed, status);
return packFloatx80(zSign, 0, 0);
}
isTiny = status->tininess_before_rounding
@@ -5068,9 +5139,7 @@ floatx80 roundAndPackFloatx80(FloatX80RoundPrec roundingPrecision, bool zSign,
) {
return packFloatx80( zSign, 0x7FFE, ~ roundMask );
}
- return packFloatx80(zSign,
- floatx80_infinity_high,
- floatx80_infinity_low);
+ return floatx80_default_inf(zSign, status);
}
if ( zExp <= 0 ) {
isTiny = status->tininess_before_rounding
@@ -5208,6 +5277,8 @@ float32 float32_exp2(float32 a, float_status *status)
float32_unpack_canonical(&xp, a, status);
if (unlikely(xp.cls != float_class_normal)) {
switch (xp.cls) {
+ case float_class_denormal:
+ break;
case float_class_snan:
case float_class_qnan:
parts_return_nan(&xp, status);
@@ -5217,9 +5288,8 @@ float32 float32_exp2(float32 a, float_status *status)
case float_class_zero:
return float32_one;
default:
- break;
+ g_assert_not_reached();
}
- g_assert_not_reached();
}
float_raise(float_flag_inexact, status);
@@ -5230,8 +5300,9 @@ float32 float32_exp2(float32 a, float_status *status)
float64_unpack_canonical(&rp, float64_one, status);
for (i = 0 ; i < 15 ; i++) {
+
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
xnp = *parts_mul(&xnp, &xp, status);
}
diff --git a/fsdev/9p-iov-marshal.c b/fsdev/9p-iov-marshal.c
index a1c9bed..0c5a1a0 100644
--- a/fsdev/9p-iov-marshal.c
+++ b/fsdev/9p-iov-marshal.c
@@ -84,9 +84,12 @@ ssize_t v9fs_iov_vunmarshal(struct iovec *out_sg, int out_num, size_t offset,
break;
}
case 'w': {
- uint16_t val, *valp;
+ uint16_t val = 0, *valp;
valp = va_arg(ap, uint16_t *);
copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val));
+ if (copied <= 0) {
+ break;
+ }
if (bswap) {
*valp = le16_to_cpu(val);
} else {
@@ -95,9 +98,12 @@ ssize_t v9fs_iov_vunmarshal(struct iovec *out_sg, int out_num, size_t offset,
break;
}
case 'd': {
- uint32_t val, *valp;
+ uint32_t val = 0, *valp;
valp = va_arg(ap, uint32_t *);
copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val));
+ if (copied <= 0) {
+ break;
+ }
if (bswap) {
*valp = le32_to_cpu(val);
} else {
@@ -106,9 +112,12 @@ ssize_t v9fs_iov_vunmarshal(struct iovec *out_sg, int out_num, size_t offset,
break;
}
case 'q': {
- uint64_t val, *valp;
+ uint64_t val = 0, *valp;
valp = va_arg(ap, uint64_t *);
copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val));
+ if (copied <= 0) {
+ break;
+ }
if (bswap) {
*valp = le64_to_cpu(val);
} else {
diff --git a/fsdev/file-op-9p.h b/fsdev/file-op-9p.h
index 4997677..b9dae8c 100644
--- a/fsdev/file-op-9p.h
+++ b/fsdev/file-op-9p.h
@@ -129,6 +129,8 @@ struct FileOperations {
int (*chown)(FsContext *, V9fsPath *, FsCred *);
int (*mknod)(FsContext *, V9fsPath *, const char *, FsCred *);
int (*utimensat)(FsContext *, V9fsPath *, const struct timespec *);
+ int (*futimens)(FsContext *ctx, int fid_type, V9fsFidOpenState *fs,
+ const struct timespec *times);
int (*remove)(FsContext *, const char *);
int (*symlink)(FsContext *, const char *, V9fsPath *,
const char *, FsCred *);
@@ -152,6 +154,8 @@ struct FileOperations {
int (*fstat)(FsContext *, int, V9fsFidOpenState *, struct stat *);
int (*rename)(FsContext *, const char *, const char *);
int (*truncate)(FsContext *, V9fsPath *, off_t);
+ int (*ftruncate)(FsContext *ctx, int fid_type, V9fsFidOpenState *fs,
+ off_t size);
int (*fsync)(FsContext *, int, V9fsFidOpenState *, int);
int (*statfs)(FsContext *s, V9fsPath *path, struct statfs *stbuf);
ssize_t (*lgetxattr)(FsContext *, V9fsPath *,
@@ -164,6 +168,7 @@ struct FileOperations {
int (*renameat)(FsContext *ctx, V9fsPath *olddir, const char *old_name,
V9fsPath *newdir, const char *new_name);
int (*unlinkat)(FsContext *ctx, V9fsPath *dir, const char *name, int flags);
+ bool (*has_valid_file_handle)(int fid_type, V9fsFidOpenState *fs);
};
#endif
diff --git a/fsdev/meson.build b/fsdev/meson.build
index e20d725..c751d8c 100644
--- a/fsdev/meson.build
+++ b/fsdev/meson.build
@@ -8,11 +8,3 @@ fsdev_ss.add(when: ['CONFIG_FSDEV_9P'], if_true: files(
if host_os in ['linux', 'darwin']
system_ss.add_all(fsdev_ss)
endif
-
-if have_virtfs_proxy_helper
- executable('virtfs-proxy-helper',
- files('virtfs-proxy-helper.c', '9p-marshal.c', '9p-iov-marshal.c'),
- dependencies: [qemuutil, libattr, libcap_ng],
- install: true,
- install_dir: get_option('libexecdir'))
-endif
diff --git a/fsdev/qemu-fsdev.c b/fsdev/qemu-fsdev.c
index f5c953a..57877da 100644
--- a/fsdev/qemu-fsdev.c
+++ b/fsdev/qemu-fsdev.c
@@ -89,17 +89,6 @@ static FsDriverTable FsDrivers[] = {
NULL
},
},
- {
- .name = "proxy",
- .ops = &proxy_ops,
- .opts = (const char * []) {
- COMMON_FS_DRIVER_OPTIONS,
- "socket",
- "sock_fd",
- "writeout",
- NULL
- },
- },
};
static int validate_opt(void *opaque, const char *name, const char *value,
@@ -133,14 +122,6 @@ int qemu_fsdev_add(QemuOpts *opts, Error **errp)
}
if (fsdriver) {
- if (strncmp(fsdriver, "proxy", 5) == 0) {
- warn_report(
- "'-fsdev proxy' and '-virtfs proxy' are deprecated, use "
- "'local' instead of 'proxy, or consider deploying virtiofsd "
- "as alternative to 9p"
- );
- }
-
for (i = 0; i < ARRAY_SIZE(FsDrivers); i++) {
if (strcmp(FsDrivers[i].name, fsdriver) == 0) {
break;
diff --git a/fsdev/qemu-fsdev.h b/fsdev/qemu-fsdev.h
index 52a5397..731f140 100644
--- a/fsdev/qemu-fsdev.h
+++ b/fsdev/qemu-fsdev.h
@@ -18,5 +18,4 @@ int qemu_fsdev_add(QemuOpts *opts, Error **errp);
FsDriverEntry *get_fsdev_fsentry(char *id);
extern FileOperations local_ops;
extern FileOperations synth_ops;
-extern FileOperations proxy_ops;
#endif
diff --git a/fsdev/virtfs-proxy-helper.c b/fsdev/virtfs-proxy-helper.c
deleted file mode 100644
index 144aaf5..0000000
--- a/fsdev/virtfs-proxy-helper.c
+++ /dev/null
@@ -1,1193 +0,0 @@
-/*
- * Helper for QEMU Proxy FS Driver
- * Copyright IBM, Corp. 2011
- *
- * Authors:
- * M. Mohan Kumar <mohan@in.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- */
-
-/*
- * NOTE: The 9p 'proxy' backend is deprecated (since QEMU 8.1) and will be
- * removed in a future version of QEMU!
- */
-
-#include "qemu/osdep.h"
-#include <glib/gstdio.h>
-#include <sys/resource.h>
-#include <getopt.h>
-#include <syslog.h>
-#include <sys/fsuid.h>
-#include <sys/vfs.h>
-#include <sys/ioctl.h>
-#include <linux/fs.h>
-#ifdef CONFIG_LINUX_MAGIC_H
-#include <linux/magic.h>
-#endif
-#include <cap-ng.h>
-#include "qemu/sockets.h"
-#include "qemu/xattr.h"
-#include "9p-iov-marshal.h"
-#include "hw/9pfs/9p-proxy.h"
-#include "hw/9pfs/9p-util.h"
-#include "fsdev/9p-iov-marshal.h"
-
-#define PROGNAME "virtfs-proxy-helper"
-
-#ifndef XFS_SUPER_MAGIC
-#define XFS_SUPER_MAGIC 0x58465342
-#endif
-#ifndef EXT2_SUPER_MAGIC
-#define EXT2_SUPER_MAGIC 0xEF53
-#endif
-#ifndef REISERFS_SUPER_MAGIC
-#define REISERFS_SUPER_MAGIC 0x52654973
-#endif
-#ifndef BTRFS_SUPER_MAGIC
-#define BTRFS_SUPER_MAGIC 0x9123683E
-#endif
-
-static const struct option helper_opts[] = {
- {"fd", required_argument, NULL, 'f'},
- {"path", required_argument, NULL, 'p'},
- {"nodaemon", no_argument, NULL, 'n'},
- {"socket", required_argument, NULL, 's'},
- {"uid", required_argument, NULL, 'u'},
- {"gid", required_argument, NULL, 'g'},
- {},
-};
-
-static bool is_daemon;
-static bool get_version; /* IOC getversion IOCTL supported */
-static char *prog_name;
-
-static void G_GNUC_PRINTF(2, 3) do_log(int loglevel, const char *format, ...)
-{
- va_list ap;
-
- va_start(ap, format);
- if (is_daemon) {
- vsyslog(LOG_CRIT, format, ap);
- } else {
- vfprintf(stderr, format, ap);
- }
- va_end(ap);
-}
-
-static void do_perror(const char *string)
-{
- if (is_daemon) {
- syslog(LOG_CRIT, "%s:%s", string, strerror(errno));
- } else {
- fprintf(stderr, "%s:%s\n", string, strerror(errno));
- }
-}
-
-static int init_capabilities(void)
-{
- /* helper needs following capabilities only */
- int cap_list[] = {
- CAP_CHOWN,
- CAP_DAC_OVERRIDE,
- CAP_FOWNER,
- CAP_FSETID,
- CAP_SETGID,
- CAP_MKNOD,
- CAP_SETUID,
- };
- int i;
-
- capng_clear(CAPNG_SELECT_BOTH);
- for (i = 0; i < ARRAY_SIZE(cap_list); i++) {
- if (capng_update(CAPNG_ADD, CAPNG_EFFECTIVE | CAPNG_PERMITTED,
- cap_list[i]) < 0) {
- do_perror("capng_update");
- return -1;
- }
- }
- if (capng_apply(CAPNG_SELECT_BOTH) < 0) {
- do_perror("capng_apply");
- return -1;
- }
-
- /* Prepare effective set for setugid. */
- for (i = 0; i < ARRAY_SIZE(cap_list); i++) {
- if (cap_list[i] == CAP_DAC_OVERRIDE) {
- continue;
- }
-
- if (capng_update(CAPNG_DROP, CAPNG_EFFECTIVE,
- cap_list[i]) < 0) {
- do_perror("capng_update");
- return -1;
- }
- }
- return 0;
-}
-
-static int socket_read(int sockfd, void *buff, ssize_t size)
-{
- ssize_t retval, total = 0;
-
- while (size) {
- retval = read(sockfd, buff, size);
- if (retval == 0) {
- return -EIO;
- }
- if (retval < 0) {
- if (errno == EINTR) {
- continue;
- }
- return -errno;
- }
- size -= retval;
- buff += retval;
- total += retval;
- }
- return total;
-}
-
-static int socket_write(int sockfd, void *buff, ssize_t size)
-{
- ssize_t retval, total = 0;
-
- while (size) {
- retval = write(sockfd, buff, size);
- if (retval < 0) {
- if (errno == EINTR) {
- continue;
- }
- return -errno;
- }
- size -= retval;
- buff += retval;
- total += retval;
- }
- return total;
-}
-
-static int read_request(int sockfd, struct iovec *iovec, ProxyHeader *header)
-{
- int retval;
-
- /*
- * read the request header.
- */
- iovec->iov_len = 0;
- retval = socket_read(sockfd, iovec->iov_base, PROXY_HDR_SZ);
- if (retval < 0) {
- return retval;
- }
- iovec->iov_len = PROXY_HDR_SZ;
- retval = proxy_unmarshal(iovec, 0, "dd", &header->type, &header->size);
- if (retval < 0) {
- return retval;
- }
- /*
- * We can't process message.size > PROXY_MAX_IO_SZ.
- * Treat it as fatal error
- */
- if (header->size > PROXY_MAX_IO_SZ) {
- return -ENOBUFS;
- }
- retval = socket_read(sockfd, iovec->iov_base + PROXY_HDR_SZ, header->size);
- if (retval < 0) {
- return retval;
- }
- iovec->iov_len += header->size;
- return 0;
-}
-
-static int send_fd(int sockfd, int fd)
-{
- struct msghdr msg;
- struct iovec iov;
- int retval, data;
- struct cmsghdr *cmsg;
- union MsgControl msg_control;
-
- iov.iov_base = &data;
- iov.iov_len = sizeof(data);
-
- memset(&msg, 0, sizeof(msg));
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- /* No ancillary data on error */
- if (fd < 0) {
- /* fd is really negative errno if the request failed */
- data = fd;
- } else {
- data = V9FS_FD_VALID;
- msg.msg_control = &msg_control;
- msg.msg_controllen = sizeof(msg_control);
-
- cmsg = &msg_control.cmsg;
- cmsg->cmsg_len = CMSG_LEN(sizeof(fd));
- cmsg->cmsg_level = SOL_SOCKET;
- cmsg->cmsg_type = SCM_RIGHTS;
- memcpy(CMSG_DATA(cmsg), &fd, sizeof(fd));
- }
-
- do {
- retval = sendmsg(sockfd, &msg, 0);
- } while (retval < 0 && errno == EINTR);
- if (fd >= 0) {
- close(fd);
- }
- if (retval < 0) {
- return retval;
- }
- return 0;
-}
-
-static int send_status(int sockfd, struct iovec *iovec, int status)
-{
- ProxyHeader header;
- int retval, msg_size;
-
- if (status < 0) {
- header.type = T_ERROR;
- } else {
- header.type = T_SUCCESS;
- }
- header.size = sizeof(status);
- /*
- * marshal the return status. We don't check error.
- * because we are sure we have enough space for the status
- */
- msg_size = proxy_marshal(iovec, 0, "ddd", header.type,
- header.size, status);
- if (msg_size < 0) {
- return msg_size;
- }
- retval = socket_write(sockfd, iovec->iov_base, msg_size);
- if (retval < 0) {
- return retval;
- }
- return 0;
-}
-
-/*
- * from man 7 capabilities, section
- * Effect of User ID Changes on Capabilities:
- * If the effective user ID is changed from nonzero to 0, then the permitted
- * set is copied to the effective set. If the effective user ID is changed
- * from 0 to nonzero, then all capabilities are are cleared from the effective
- * set.
- *
- * The setfsuid/setfsgid man pages warn that changing the effective user ID may
- * expose the program to unwanted signals, but this is not true anymore: for an
- * unprivileged (without CAP_KILL) program to send a signal, the real or
- * effective user ID of the sending process must equal the real or saved user
- * ID of the target process. Even when dropping privileges, it is enough to
- * keep the saved UID to a "privileged" value and virtfs-proxy-helper won't
- * be exposed to signals. So just use setresuid/setresgid.
- */
-static int setugid(int uid, int gid, int *suid, int *sgid)
-{
- int retval;
-
- *suid = geteuid();
- *sgid = getegid();
-
- if (setresgid(-1, gid, *sgid) == -1) {
- return -errno;
- }
-
- if (setresuid(-1, uid, *suid) == -1) {
- retval = -errno;
- goto err_sgid;
- }
-
- if (uid == 0 && gid == 0) {
- /* Linux has already copied the permitted set to the effective set. */
- return 0;
- }
-
- /*
- * All capabilities have been cleared from the effective set. However
- * we still need DAC_OVERRIDE because we don't change supplementary
- * group ids, and hence may be subject to DAC rules. init_capabilities
- * left the set of capabilities that we want in libcap-ng's state.
- */
- if (capng_apply(CAPNG_SELECT_CAPS) < 0) {
- retval = -errno;
- do_perror("capng_apply");
- goto err_suid;
- }
- return 0;
-
-err_suid:
- if (setresuid(-1, *suid, *suid) == -1) {
- abort();
- }
-err_sgid:
- if (setresgid(-1, *sgid, *sgid) == -1) {
- abort();
- }
- return retval;
-}
-
-/*
- * This is used to reset the ugid back with the saved values
- * There is nothing much we can do checking error values here.
- */
-static void resetugid(int suid, int sgid)
-{
- if (setresgid(-1, sgid, sgid) == -1) {
- abort();
- }
- if (setresuid(-1, suid, suid) == -1) {
- abort();
- }
-}
-
-/*
- * Open regular file or directory. Attempts to open any special file are
- * rejected.
- *
- * returns file descriptor or -1 on error
- */
-static int open_regular(const char *pathname, int flags, mode_t mode)
-{
- int fd;
-
- fd = open(pathname, flags, mode);
- if (fd < 0) {
- return fd;
- }
-
- if (close_if_special_file(fd) < 0) {
- return -1;
- }
-
- return fd;
-}
-
-/*
- * send response in two parts
- * 1) ProxyHeader
- * 2) Response or error status
- * This function should be called with marshaled response
- * send_response constructs header part and error part only.
- * send response sends {ProxyHeader,Response} if the request was success
- * otherwise sends {ProxyHeader,error status}
- */
-static int send_response(int sock, struct iovec *iovec, int size)
-{
- int retval;
- ProxyHeader header;
-
- /*
- * If response size exceeds available iovec->iov_len,
- * we return ENOBUFS
- */
- if (size > PROXY_MAX_IO_SZ) {
- size = -ENOBUFS;
- }
-
- if (size < 0) {
- /*
- * In case of error we would not have got the error encoded
- * already so encode the error here.
- */
- header.type = T_ERROR;
- header.size = sizeof(size);
- proxy_marshal(iovec, PROXY_HDR_SZ, "d", size);
- } else {
- header.type = T_SUCCESS;
- header.size = size;
- }
- proxy_marshal(iovec, 0, "dd", header.type, header.size);
- retval = socket_write(sock, iovec->iov_base, header.size + PROXY_HDR_SZ);
- if (retval < 0) {
- return retval;
- }
- return 0;
-}
-
-/*
- * gets generation number
- * returns -errno on failure and sizeof(generation number) on success
- */
-static int do_getversion(struct iovec *iovec, struct iovec *out_iovec)
-{
- uint64_t version;
- int retval = -ENOTTY;
-#ifdef FS_IOC_GETVERSION
- int fd;
- V9fsString path;
-#endif
-
-
- /* no need to issue ioctl */
- if (!get_version) {
- version = 0;
- retval = proxy_marshal(out_iovec, PROXY_HDR_SZ, "q", version);
- return retval;
- }
-#ifdef FS_IOC_GETVERSION
- retval = proxy_unmarshal(iovec, PROXY_HDR_SZ, "s", &path);
- if (retval < 0) {
- return retval;
- }
-
- fd = open(path.data, O_RDONLY);
- if (fd < 0) {
- retval = -errno;
- goto err_out;
- }
- if (ioctl(fd, FS_IOC_GETVERSION, &version) < 0) {
- retval = -errno;
- } else {
- retval = proxy_marshal(out_iovec, PROXY_HDR_SZ, "q", version);
- }
- close(fd);
-err_out:
- v9fs_string_free(&path);
-#endif
- return retval;
-}
-
-static int do_getxattr(int type, struct iovec *iovec, struct iovec *out_iovec)
-{
- int size = 0, offset, retval;
- V9fsString path, name, xattr;
-
- v9fs_string_init(&xattr);
- v9fs_string_init(&path);
- retval = proxy_unmarshal(iovec, PROXY_HDR_SZ, "ds", &size, &path);
- if (retval < 0) {
- return retval;
- }
- offset = PROXY_HDR_SZ + retval;
-
- if (size) {
- xattr.data = g_malloc(size);
- xattr.size = size;
- }
- switch (type) {
- case T_LGETXATTR:
- v9fs_string_init(&name);
- retval = proxy_unmarshal(iovec, offset, "s", &name);
- if (retval > 0) {
- retval = lgetxattr(path.data, name.data, xattr.data, size);
- if (retval < 0) {
- retval = -errno;
- } else {
- xattr.size = retval;
- }
- }
- v9fs_string_free(&name);
- break;
- case T_LLISTXATTR:
- retval = llistxattr(path.data, xattr.data, size);
- if (retval < 0) {
- retval = -errno;
- } else {
- xattr.size = retval;
- }
- break;
- }
- if (retval < 0) {
- goto err_out;
- }
-
- if (!size) {
- proxy_marshal(out_iovec, PROXY_HDR_SZ, "d", retval);
- retval = sizeof(retval);
- } else {
- retval = proxy_marshal(out_iovec, PROXY_HDR_SZ, "s", &xattr);
- }
-err_out:
- v9fs_string_free(&xattr);
- v9fs_string_free(&path);
- return retval;
-}
-
-static void stat_to_prstat(ProxyStat *pr_stat, struct stat *stat)
-{
- memset(pr_stat, 0, sizeof(*pr_stat));
- pr_stat->st_dev = stat->st_dev;
- pr_stat->st_ino = stat->st_ino;
- pr_stat->st_nlink = stat->st_nlink;
- pr_stat->st_mode = stat->st_mode;
- pr_stat->st_uid = stat->st_uid;
- pr_stat->st_gid = stat->st_gid;
- pr_stat->st_rdev = stat->st_rdev;
- pr_stat->st_size = stat->st_size;
- pr_stat->st_blksize = stat->st_blksize;
- pr_stat->st_blocks = stat->st_blocks;
- pr_stat->st_atim_sec = stat->st_atim.tv_sec;
- pr_stat->st_atim_nsec = stat->st_atim.tv_nsec;
- pr_stat->st_mtim_sec = stat->st_mtim.tv_sec;
- pr_stat->st_mtim_nsec = stat->st_mtim.tv_nsec;
- pr_stat->st_ctim_sec = stat->st_ctim.tv_sec;
- pr_stat->st_ctim_nsec = stat->st_ctim.tv_nsec;
-}
-
-static void statfs_to_prstatfs(ProxyStatFS *pr_stfs, struct statfs *stfs)
-{
- memset(pr_stfs, 0, sizeof(*pr_stfs));
- pr_stfs->f_type = stfs->f_type;
- pr_stfs->f_bsize = stfs->f_bsize;
- pr_stfs->f_blocks = stfs->f_blocks;
- pr_stfs->f_bfree = stfs->f_bfree;
- pr_stfs->f_bavail = stfs->f_bavail;
- pr_stfs->f_files = stfs->f_files;
- pr_stfs->f_ffree = stfs->f_ffree;
- pr_stfs->f_fsid[0] = stfs->f_fsid.__val[0];
- pr_stfs->f_fsid[1] = stfs->f_fsid.__val[1];
- pr_stfs->f_namelen = stfs->f_namelen;
- pr_stfs->f_frsize = stfs->f_frsize;
-}
-
-/*
- * Gets stat/statfs information and packs in out_iovec structure
- * on success returns number of bytes packed in out_iovec structure
- * otherwise returns -errno
- */
-static int do_stat(int type, struct iovec *iovec, struct iovec *out_iovec)
-{
- int retval;
- V9fsString path;
- ProxyStat pr_stat;
- ProxyStatFS pr_stfs;
- struct stat st_buf;
- struct statfs stfs_buf;
-
- v9fs_string_init(&path);
- retval = proxy_unmarshal(iovec, PROXY_HDR_SZ, "s", &path);
- if (retval < 0) {
- return retval;
- }
-
- switch (type) {
- case T_LSTAT:
- retval = lstat(path.data, &st_buf);
- if (retval < 0) {
- retval = -errno;
- } else {
- stat_to_prstat(&pr_stat, &st_buf);
- retval = proxy_marshal(out_iovec, PROXY_HDR_SZ,
- "qqqdddqqqqqqqqqq", pr_stat.st_dev,
- pr_stat.st_ino, pr_stat.st_nlink,
- pr_stat.st_mode, pr_stat.st_uid,
- pr_stat.st_gid, pr_stat.st_rdev,
- pr_stat.st_size, pr_stat.st_blksize,
- pr_stat.st_blocks,
- pr_stat.st_atim_sec, pr_stat.st_atim_nsec,
- pr_stat.st_mtim_sec, pr_stat.st_mtim_nsec,
- pr_stat.st_ctim_sec, pr_stat.st_ctim_nsec);
- }
- break;
- case T_STATFS:
- retval = statfs(path.data, &stfs_buf);
- if (retval < 0) {
- retval = -errno;
- } else {
- statfs_to_prstatfs(&pr_stfs, &stfs_buf);
- retval = proxy_marshal(out_iovec, PROXY_HDR_SZ,
- "qqqqqqqqqqq", pr_stfs.f_type,
- pr_stfs.f_bsize, pr_stfs.f_blocks,
- pr_stfs.f_bfree, pr_stfs.f_bavail,
- pr_stfs.f_files, pr_stfs.f_ffree,
- pr_stfs.f_fsid[0], pr_stfs.f_fsid[1],
- pr_stfs.f_namelen, pr_stfs.f_frsize);
- }
- break;
- }
- v9fs_string_free(&path);
- return retval;
-}
-
-static int do_readlink(struct iovec *iovec, struct iovec *out_iovec)
-{
- char *buffer;
- int size, retval;
- V9fsString target, path;
-
- v9fs_string_init(&path);
- retval = proxy_unmarshal(iovec, PROXY_HDR_SZ, "sd", &path, &size);
- if (retval < 0) {
- v9fs_string_free(&path);
- return retval;
- }
- buffer = g_malloc(size);
- v9fs_string_init(&target);
- retval = readlink(path.data, buffer, size - 1);
- if (retval > 0) {
- buffer[retval] = '\0';
- v9fs_string_sprintf(&target, "%s", buffer);
- retval = proxy_marshal(out_iovec, PROXY_HDR_SZ, "s", &target);
- } else {
- retval = -errno;
- }
- g_free(buffer);
- v9fs_string_free(&target);
- v9fs_string_free(&path);
- return retval;
-}
-
-/*
- * create other filesystem objects and send 0 on success
- * return -errno on error
- */
-static int do_create_others(int type, struct iovec *iovec)
-{
- dev_t rdev;
- int retval = 0;
- int offset = PROXY_HDR_SZ;
- V9fsString oldpath, path;
- int mode, uid, gid, cur_uid, cur_gid;
-
- v9fs_string_init(&path);
- v9fs_string_init(&oldpath);
-
- retval = proxy_unmarshal(iovec, offset, "dd", &uid, &gid);
- if (retval < 0) {
- return retval;
- }
- offset += retval;
- retval = setugid(uid, gid, &cur_uid, &cur_gid);
- if (retval < 0) {
- goto unmarshal_err_out;
- }
- switch (type) {
- case T_MKNOD:
- retval = proxy_unmarshal(iovec, offset, "sdq", &path, &mode, &rdev);
- if (retval < 0) {
- goto err_out;
- }
- retval = mknod(path.data, mode, rdev);
- break;
- case T_MKDIR:
- retval = proxy_unmarshal(iovec, offset, "sd", &path, &mode);
- if (retval < 0) {
- goto err_out;
- }
- retval = g_mkdir(path.data, mode);
- break;
- case T_SYMLINK:
- retval = proxy_unmarshal(iovec, offset, "ss", &oldpath, &path);
- if (retval < 0) {
- goto err_out;
- }
- retval = symlink(oldpath.data, path.data);
- break;
- }
- if (retval < 0) {
- retval = -errno;
- }
-
-err_out:
- resetugid(cur_uid, cur_gid);
-unmarshal_err_out:
- v9fs_string_free(&path);
- v9fs_string_free(&oldpath);
- return retval;
-}
-
-/*
- * create a file and send fd on success
- * return -errno on error
- */
-static int do_create(struct iovec *iovec)
-{
- int ret;
- V9fsString path;
- int flags, mode, uid, gid, cur_uid, cur_gid;
-
- v9fs_string_init(&path);
- ret = proxy_unmarshal(iovec, PROXY_HDR_SZ, "sdddd",
- &path, &flags, &mode, &uid, &gid);
- if (ret < 0) {
- goto unmarshal_err_out;
- }
- ret = setugid(uid, gid, &cur_uid, &cur_gid);
- if (ret < 0) {
- goto unmarshal_err_out;
- }
- ret = open_regular(path.data, flags, mode);
- if (ret < 0) {
- ret = -errno;
- }
-
- resetugid(cur_uid, cur_gid);
-unmarshal_err_out:
- v9fs_string_free(&path);
- return ret;
-}
-
-/*
- * open a file and send fd on success
- * return -errno on error
- */
-static int do_open(struct iovec *iovec)
-{
- int flags, ret;
- V9fsString path;
-
- v9fs_string_init(&path);
- ret = proxy_unmarshal(iovec, PROXY_HDR_SZ, "sd", &path, &flags);
- if (ret < 0) {
- goto err_out;
- }
- ret = open_regular(path.data, flags, 0);
- if (ret < 0) {
- ret = -errno;
- }
-err_out:
- v9fs_string_free(&path);
- return ret;
-}
-
-/* create unix domain socket and return the descriptor */
-static int proxy_socket(const char *path, uid_t uid, gid_t gid)
-{
- int sock, client;
- struct sockaddr_un proxy, qemu;
- socklen_t size;
-
- /* requested socket already exists, refuse to start */
- if (!access(path, F_OK)) {
- do_log(LOG_CRIT, "socket already exists\n");
- return -1;
- }
-
- if (strlen(path) >= sizeof(proxy.sun_path)) {
- do_log(LOG_CRIT, "UNIX domain socket path exceeds %zu characters\n",
- sizeof(proxy.sun_path));
- return -1;
- }
-
- sock = socket(AF_UNIX, SOCK_STREAM, 0);
- if (sock < 0) {
- do_perror("socket");
- return -1;
- }
-
- /* mask other part of mode bits */
- umask(7);
-
- proxy.sun_family = AF_UNIX;
- strcpy(proxy.sun_path, path);
- if (bind(sock, (struct sockaddr *)&proxy,
- sizeof(struct sockaddr_un)) < 0) {
- do_perror("bind");
- goto error;
- }
- if (chown(proxy.sun_path, uid, gid) < 0) {
- do_perror("chown");
- goto error;
- }
- if (listen(sock, 1) < 0) {
- do_perror("listen");
- goto error;
- }
-
- size = sizeof(qemu);
- client = accept(sock, (struct sockaddr *)&qemu, &size);
- if (client < 0) {
- do_perror("accept");
- goto error;
- }
- close(sock);
- return client;
-
-error:
- close(sock);
- return -1;
-}
-
-static void usage(void)
-{
- fprintf(stderr, "usage: %s\n"
- " -p|--path <path> 9p path to export\n"
- " {-f|--fd <socket-descriptor>} socket file descriptor to be used\n"
- " {-s|--socket <socketname> socket file used for communication\n"
- " \t-u|--uid <uid> -g|--gid <gid>} - uid:gid combination to give "
- " access to this socket\n"
- " \tNote: -s & -f can not be used together\n"
- " [-n|--nodaemon] Run as a normal program\n",
- prog_name);
-}
-
-static int process_reply(int sock, int type,
- struct iovec *out_iovec, int retval)
-{
- switch (type) {
- case T_OPEN:
- case T_CREATE:
- if (send_fd(sock, retval) < 0) {
- return -1;
- }
- break;
- case T_MKNOD:
- case T_MKDIR:
- case T_SYMLINK:
- case T_LINK:
- case T_CHMOD:
- case T_CHOWN:
- case T_TRUNCATE:
- case T_UTIME:
- case T_RENAME:
- case T_REMOVE:
- case T_LSETXATTR:
- case T_LREMOVEXATTR:
- if (send_status(sock, out_iovec, retval) < 0) {
- return -1;
- }
- break;
- case T_LSTAT:
- case T_STATFS:
- case T_READLINK:
- case T_LGETXATTR:
- case T_LLISTXATTR:
- case T_GETVERSION:
- if (send_response(sock, out_iovec, retval) < 0) {
- return -1;
- }
- break;
- default:
- return -1;
- break;
- }
- return 0;
-}
-
-static int process_requests(int sock)
-{
- int flags;
- int size = 0;
- int retval = 0;
- uint64_t offset;
- ProxyHeader header;
- int mode, uid, gid;
- V9fsString name, value;
- struct timespec spec[2];
- V9fsString oldpath, path;
- struct iovec in_iovec, out_iovec;
-
- in_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ);
- in_iovec.iov_len = PROXY_MAX_IO_SZ + PROXY_HDR_SZ;
- out_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ);
- out_iovec.iov_len = PROXY_MAX_IO_SZ + PROXY_HDR_SZ;
-
- while (1) {
- /*
- * initialize the header type, so that we send
- * response to proper request type.
- */
- header.type = 0;
- retval = read_request(sock, &in_iovec, &header);
- if (retval < 0) {
- goto err_out;
- }
-
- switch (header.type) {
- case T_OPEN:
- retval = do_open(&in_iovec);
- break;
- case T_CREATE:
- retval = do_create(&in_iovec);
- break;
- case T_MKNOD:
- case T_MKDIR:
- case T_SYMLINK:
- retval = do_create_others(header.type, &in_iovec);
- break;
- case T_LINK:
- v9fs_string_init(&path);
- v9fs_string_init(&oldpath);
- retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ,
- "ss", &oldpath, &path);
- if (retval > 0) {
- retval = link(oldpath.data, path.data);
- if (retval < 0) {
- retval = -errno;
- }
- }
- v9fs_string_free(&oldpath);
- v9fs_string_free(&path);
- break;
- case T_LSTAT:
- case T_STATFS:
- retval = do_stat(header.type, &in_iovec, &out_iovec);
- break;
- case T_READLINK:
- retval = do_readlink(&in_iovec, &out_iovec);
- break;
- case T_CHMOD:
- v9fs_string_init(&path);
- retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ,
- "sd", &path, &mode);
- if (retval > 0) {
- retval = chmod(path.data, mode);
- if (retval < 0) {
- retval = -errno;
- }
- }
- v9fs_string_free(&path);
- break;
- case T_CHOWN:
- v9fs_string_init(&path);
- retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ, "sdd", &path,
- &uid, &gid);
- if (retval > 0) {
- retval = lchown(path.data, uid, gid);
- if (retval < 0) {
- retval = -errno;
- }
- }
- v9fs_string_free(&path);
- break;
- case T_TRUNCATE:
- v9fs_string_init(&path);
- retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ, "sq",
- &path, &offset);
- if (retval > 0) {
- retval = truncate(path.data, offset);
- if (retval < 0) {
- retval = -errno;
- }
- }
- v9fs_string_free(&path);
- break;
- case T_UTIME:
- v9fs_string_init(&path);
- retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ, "sqqqq", &path,
- &spec[0].tv_sec, &spec[0].tv_nsec,
- &spec[1].tv_sec, &spec[1].tv_nsec);
- if (retval > 0) {
- retval = utimensat(AT_FDCWD, path.data, spec,
- AT_SYMLINK_NOFOLLOW);
- if (retval < 0) {
- retval = -errno;
- }
- }
- v9fs_string_free(&path);
- break;
- case T_RENAME:
- v9fs_string_init(&path);
- v9fs_string_init(&oldpath);
- retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ,
- "ss", &oldpath, &path);
- if (retval > 0) {
- retval = rename(oldpath.data, path.data);
- if (retval < 0) {
- retval = -errno;
- }
- }
- v9fs_string_free(&oldpath);
- v9fs_string_free(&path);
- break;
- case T_REMOVE:
- v9fs_string_init(&path);
- retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ, "s", &path);
- if (retval > 0) {
- retval = remove(path.data);
- if (retval < 0) {
- retval = -errno;
- }
- }
- v9fs_string_free(&path);
- break;
- case T_LGETXATTR:
- case T_LLISTXATTR:
- retval = do_getxattr(header.type, &in_iovec, &out_iovec);
- break;
- case T_LSETXATTR:
- v9fs_string_init(&path);
- v9fs_string_init(&name);
- v9fs_string_init(&value);
- retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ, "sssdd", &path,
- &name, &value, &size, &flags);
- if (retval > 0) {
- retval = lsetxattr(path.data,
- name.data, value.data, size, flags);
- if (retval < 0) {
- retval = -errno;
- }
- }
- v9fs_string_free(&path);
- v9fs_string_free(&name);
- v9fs_string_free(&value);
- break;
- case T_LREMOVEXATTR:
- v9fs_string_init(&path);
- v9fs_string_init(&name);
- retval = proxy_unmarshal(&in_iovec,
- PROXY_HDR_SZ, "ss", &path, &name);
- if (retval > 0) {
- retval = lremovexattr(path.data, name.data);
- if (retval < 0) {
- retval = -errno;
- }
- }
- v9fs_string_free(&path);
- v9fs_string_free(&name);
- break;
- case T_GETVERSION:
- retval = do_getversion(&in_iovec, &out_iovec);
- break;
- default:
- goto err_out;
- break;
- }
-
- if (process_reply(sock, header.type, &out_iovec, retval) < 0) {
- goto err_out;
- }
- }
-err_out:
- g_free(in_iovec.iov_base);
- g_free(out_iovec.iov_base);
- return -1;
-}
-
-int main(int argc, char **argv)
-{
- int sock;
- uid_t own_u;
- gid_t own_g;
- char *rpath = NULL;
- char *sock_name = NULL;
- struct stat stbuf;
- int c, option_index;
-#ifdef FS_IOC_GETVERSION
- int retval;
- struct statfs st_fs;
-#endif
-
- fprintf(stderr, "NOTE: The 9p 'proxy' backend is deprecated (since "
- "QEMU 8.1) and will be removed in a future version of "
- "QEMU!\n");
-
- prog_name = g_path_get_basename(argv[0]);
-
- is_daemon = true;
- sock = -1;
- own_u = own_g = -1;
- while (1) {
- option_index = 0;
- c = getopt_long(argc, argv, "p:nh?f:s:u:g:", helper_opts,
- &option_index);
- if (c == -1) {
- break;
- }
- switch (c) {
- case 'p':
- rpath = g_strdup(optarg);
- break;
- case 'n':
- is_daemon = false;
- break;
- case 'f':
- sock = atoi(optarg);
- break;
- case 's':
- sock_name = g_strdup(optarg);
- break;
- case 'u':
- own_u = atoi(optarg);
- break;
- case 'g':
- own_g = atoi(optarg);
- break;
- case '?':
- case 'h':
- default:
- usage();
- exit(EXIT_FAILURE);
- }
- }
-
- /* Parameter validation */
- if ((sock_name == NULL && sock == -1) || rpath == NULL) {
- fprintf(stderr, "socket, socket descriptor or path not specified\n");
- usage();
- return -1;
- }
-
- if (sock_name && sock != -1) {
- fprintf(stderr, "both named socket and socket descriptor specified\n");
- usage();
- exit(EXIT_FAILURE);
- }
-
- if (sock_name && (own_u == -1 || own_g == -1)) {
- fprintf(stderr, "owner uid:gid not specified, ");
- fprintf(stderr,
- "owner uid:gid specifies who can access the socket file\n");
- usage();
- exit(EXIT_FAILURE);
- }
-
- if (lstat(rpath, &stbuf) < 0) {
- fprintf(stderr, "invalid path \"%s\" specified, %s\n",
- rpath, strerror(errno));
- exit(EXIT_FAILURE);
- }
-
- if (!S_ISDIR(stbuf.st_mode)) {
- fprintf(stderr, "specified path \"%s\" is not directory\n", rpath);
- exit(EXIT_FAILURE);
- }
-
- if (is_daemon) {
- if (daemon(0, 0) < 0) {
- fprintf(stderr, "daemon call failed\n");
- exit(EXIT_FAILURE);
- }
- openlog(PROGNAME, LOG_PID, LOG_DAEMON);
- }
-
- do_log(LOG_INFO, "Started\n");
- if (sock_name) {
- sock = proxy_socket(sock_name, own_u, own_g);
- if (sock < 0) {
- goto error;
- }
- }
-
- if (chroot(rpath) < 0) {
- do_perror("chroot");
- goto error;
- }
- if (chdir("/") < 0) {
- do_perror("chdir");
- goto error;
- }
-
- get_version = false;
-#ifdef FS_IOC_GETVERSION
- /* check whether underlying FS support IOC_GETVERSION */
- retval = statfs("/", &st_fs);
- if (!retval) {
- switch (st_fs.f_type) {
- case EXT2_SUPER_MAGIC:
- case BTRFS_SUPER_MAGIC:
- case REISERFS_SUPER_MAGIC:
- case XFS_SUPER_MAGIC:
- get_version = true;
- break;
- }
- }
-#endif
-
- umask(0);
- if (init_capabilities() < 0) {
- goto error;
- }
-
- process_requests(sock);
-error:
- g_free(rpath);
- g_free(sock_name);
- do_log(LOG_INFO, "Done\n");
- closelog();
- return 0;
-}
diff --git a/gdb-xml/aarch64-core.xml b/gdb-xml/aarch64-core.xml
index e1e9dc3..b804651 100644
--- a/gdb-xml/aarch64-core.xml
+++ b/gdb-xml/aarch64-core.xml
@@ -1,5 +1,5 @@
<?xml version="1.0"?>
-<!-- Copyright (C) 2009-2012 Free Software Foundation, Inc.
+<!-- Copyright (C) 2009-2025 Free Software Foundation, Inc.
Contributed by ARM Ltd.
Copying and distribution of this file, with or without modification,
@@ -42,5 +42,53 @@
<reg name="sp" bitsize="64" type="data_ptr"/>
<reg name="pc" bitsize="64" type="code_ptr"/>
- <reg name="cpsr" bitsize="32"/>
+
+ <flags id="cpsr_flags" size="4">
+ <!-- Stack Pointer. -->
+ <field name="SP" start="0" end="0"/>
+
+ <!-- Exception Level. -->
+ <field name="EL" start="2" end="3"/>
+ <!-- Execution state. -->
+ <field name="nRW" start="4" end="4"/>
+
+ <!-- FIQ interrupt mask. -->
+ <field name="F" start="6" end="6"/>
+ <!-- IRQ interrupt mask. -->
+ <field name="I" start="7" end="7"/>
+ <!-- SError interrupt mask. -->
+ <field name="A" start="8" end="8"/>
+ <!-- Debug exception mask. -->
+ <field name="D" start="9" end="9"/>
+
+ <!-- ARMv8.5-A: Branch Target Identification BTYPE. -->
+ <field name="BTYPE" start="10" end="11"/>
+
+ <!-- ARMv8.0-A: Speculative Store Bypass. -->
+ <field name="SSBS" start="12" end="12"/>
+
+ <!-- Illegal Execution state. -->
+ <field name="IL" start="20" end="20"/>
+ <!-- Software Step. -->
+ <field name="SS" start="21" end="21"/>
+ <!-- ARMv8.1-A: Privileged Access Never. -->
+ <field name="PAN" start="22" end="22"/>
+ <!-- ARMv8.2-A: User Access Override. -->
+ <field name="UAO" start="23" end="23"/>
+ <!-- ARMv8.4-A: Data Independent Timing. -->
+ <field name="DIT" start="24" end="24"/>
+ <!-- ARMv8.5-A: Tag Check Override. -->
+ <field name="TCO" start="25" end="25"/>
+
+ <!-- Overflow Condition flag. -->
+ <field name="V" start="28" end="28"/>
+ <!-- Carry Condition flag. -->
+ <field name="C" start="29" end="29"/>
+ <!-- Zero Condition flag. -->
+ <field name="Z" start="30" end="30"/>
+ <!-- Negative Condition flag. -->
+ <field name="N" start="31" end="31"/>
+ </flags>
+ <reg name="cpsr" bitsize="32" type="cpsr_flags"/>
+
</feature>
diff --git a/gdb-xml/hexagon-core.xml b/gdb-xml/hexagon-core.xml
index e181163..b943781 100644
--- a/gdb-xml/hexagon-core.xml
+++ b/gdb-xml/hexagon-core.xml
@@ -1,6 +1,6 @@
<?xml version="1.0"?>
<!--
- Copyright(c) 2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ Copyright(c) 2023-2024 Qualcomm Innovation Center, Inc. All Rights Reserved.
This work is licensed under the terms of the GNU GPL, version 2 or
(at your option) any later version. See the COPYING file in the
@@ -80,5 +80,9 @@
<reg name="c29" bitsize="32" offset="244" encoding="uint" format="hex" group="Thread Registers" dwarf_regnum="61"/>
<reg name="utimerlo" bitsize="32" offset="248" encoding="uint" format="hex" group="Thread Registers" dwarf_regnum="62"/>
<reg name="utimerhi" bitsize="32" offset="252" encoding="uint" format="hex" group="Thread Registers" dwarf_regnum="63"/>
+ <reg name="p0" bitsize="8" offset="256" encoding="uint" format="hex" group="Predicate Registers" dwarf_regnum="64"/>
+ <reg name="p1" bitsize="8" offset="257" encoding="uint" format="hex" group="Predicate Registers" dwarf_regnum="65"/>
+ <reg name="p2" bitsize="8" offset="258" encoding="uint" format="hex" group="Predicate Registers" dwarf_regnum="66"/>
+ <reg name="p3" bitsize="8" offset="259" encoding="uint" format="hex" group="Predicate Registers" dwarf_regnum="67"/>
</feature>
diff --git a/gdb-xml/i386-32bit-linux.xml b/gdb-xml/i386-32bit-linux.xml
new file mode 100644
index 0000000..5ffe561
--- /dev/null
+++ b/gdb-xml/i386-32bit-linux.xml
@@ -0,0 +1,11 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2010-2024 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-target.dtd">
+<feature name="org.gnu.gdb.i386.linux">
+ <reg name="orig_eax" bitsize="32" type="int"/>
+</feature>
diff --git a/gdb-xml/i386-64bit-linux.xml b/gdb-xml/i386-64bit-linux.xml
new file mode 100644
index 0000000..0f26990
--- /dev/null
+++ b/gdb-xml/i386-64bit-linux.xml
@@ -0,0 +1,11 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2010-2024 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-target.dtd">
+<feature name="org.gnu.gdb.i386.linux">
+ <reg name="orig_rax" bitsize="64" type="int"/>
+</feature>
diff --git a/gdbstub/gdbstub.c b/gdbstub/gdbstub.c
index b9ad0a0..def0b7e 100644
--- a/gdbstub/gdbstub.c
+++ b/gdbstub/gdbstub.c
@@ -20,7 +20,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#include "qemu/osdep.h"
@@ -28,6 +28,7 @@
#include "qemu/cutils.h"
#include "qemu/module.h"
#include "qemu/error-report.h"
+#include "qemu/target-info.h"
#include "trace.h"
#include "exec/gdbstub.h"
#include "gdbstub/commands.h"
@@ -41,8 +42,8 @@
#endif
#include "hw/core/cpu.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/runstate.h"
+#include "system/hw_accel.h"
+#include "system/runstate.h"
#include "exec/replay-core.h"
#include "exec/hwaddr.h"
@@ -354,7 +355,6 @@ static const char *get_feature_xml(const char *p, const char **newp,
GDBProcess *process)
{
CPUState *cpu = gdb_get_first_cpu_in_process(process);
- CPUClass *cc = CPU_GET_CLASS(cpu);
GDBRegisterState *r;
size_t len;
@@ -377,11 +377,11 @@ static const char *get_feature_xml(const char *p, const char **newp,
"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
"<target>"));
- if (cc->gdb_arch_name) {
+ if (cpu->cc->gdb_arch_name) {
g_ptr_array_add(
xml,
g_markup_printf_escaped("<architecture>%s</architecture>",
- cc->gdb_arch_name(cpu)));
+ cpu->cc->gdb_arch_name(cpu)));
}
for (guint i = 0; i < cpu->gdb_regs->len; i++) {
r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i);
@@ -520,11 +520,10 @@ GArray *gdb_get_register_list(CPUState *cpu)
int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
GDBRegisterState *r;
- if (reg < cc->gdb_num_core_regs) {
- return cc->gdb_read_register(cpu, buf, reg);
+ if (reg < cpu->cc->gdb_num_core_regs) {
+ return cpu->cc->gdb_read_register(cpu, buf, reg);
}
for (guint i = 0; i < cpu->gdb_regs->len; i++) {
@@ -538,11 +537,10 @@ int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
GDBRegisterState *r;
- if (reg < cc->gdb_num_core_regs) {
- return cc->gdb_write_register(cpu, mem_buf, reg);
+ if (reg < cpu->cc->gdb_num_core_regs) {
+ return cpu->cc->gdb_write_register(cpu, mem_buf, reg);
}
for (guint i = 0; i < cpu->gdb_regs->len; i++) {
@@ -568,15 +566,30 @@ static void gdb_register_feature(CPUState *cpu, int base_reg,
g_array_append_val(cpu->gdb_regs, s);
}
+static const char *gdb_get_core_xml_file(CPUState *cpu)
+{
+ CPUClass *cc = cpu->cc;
+
+ /*
+ * The CPU class can provide the XML filename via a method,
+ * or as a simple fixed string field.
+ */
+ if (cc->gdb_get_core_xml_file) {
+ return cc->gdb_get_core_xml_file(cpu);
+ }
+ return cc->gdb_core_xml_file;
+}
+
void gdb_init_cpu(CPUState *cpu)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
+ CPUClass *cc = cpu->cc;
const GDBFeature *feature;
+ const char *xmlfile = gdb_get_core_xml_file(cpu);
cpu->gdb_regs = g_array_new(false, false, sizeof(GDBRegisterState));
- if (cc->gdb_core_xml_file) {
- feature = gdb_find_static_feature(cc->gdb_core_xml_file);
+ if (xmlfile) {
+ feature = gdb_find_static_feature(xmlfile);
gdb_register_feature(cpu, 0,
cc->gdb_read_register, cc->gdb_write_register,
feature);
@@ -618,6 +631,19 @@ void gdb_register_coprocessor(CPUState *cpu,
}
}
+void gdb_unregister_coprocessor_all(CPUState *cpu)
+{
+ /*
+ * Safe to nuke everything. GDBRegisterState::xml is static const char so
+ * it won't be freed
+ */
+ g_array_free(cpu->gdb_regs, true);
+
+ cpu->gdb_regs = NULL;
+ cpu->gdb_num_regs = 0;
+ cpu->gdb_num_g_regs = 0;
+}
+
static void gdb_process_breakpoint_remove_all(GDBProcess *p)
{
CPUState *cpu = gdb_get_first_cpu_in_process(p);
@@ -1318,8 +1344,8 @@ static void handle_read_all_regs(GArray *params, void *user_ctx)
len += gdb_read_register(gdbserver_state.g_cpu,
gdbserver_state.mem_buf,
reg_id);
+ g_assert(len == gdbserver_state.mem_buf->len);
}
- g_assert(len == gdbserver_state.mem_buf->len);
gdb_memtohex(gdbserver_state.str_buf, gdbserver_state.mem_buf->data, len);
gdb_put_strbuf();
@@ -1572,6 +1598,18 @@ static void handle_query_threads(GArray *params, void *user_ctx)
gdbserver_state.query_cpu = gdb_next_attached_cpu(gdbserver_state.query_cpu);
}
+static void handle_query_gdb_server_version(GArray *params, void *user_ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ g_string_printf(gdbserver_state.str_buf, "name:qemu-%s;version:%s;",
+ target_name(), QEMU_VERSION);
+#else
+ g_string_printf(gdbserver_state.str_buf, "name:qemu-system-%s;version:%s;",
+ target_name(), QEMU_VERSION);
+#endif
+ gdb_put_strbuf();
+}
+
static void handle_query_first_threads(GArray *params, void *user_ctx)
{
gdbserver_state.query_cpu = gdb_first_attached_cpu();
@@ -1614,27 +1652,27 @@ static void handle_query_thread_extra(GArray *params, void *user_ctx)
gdb_put_strbuf();
}
-static char *extended_qsupported_features;
-void gdb_extend_qsupported_features(char *qsupported_features)
-{
- /*
- * We don't support different sets of CPU gdb features on different CPUs yet
- * so assert the feature strings are the same on all CPUs, or is set only
- * once (1 CPU).
- */
- g_assert(extended_qsupported_features == NULL ||
- g_strcmp0(extended_qsupported_features, qsupported_features) == 0);
- extended_qsupported_features = qsupported_features;
+static char **extra_query_flags;
+
+void gdb_extend_qsupported_features(char *qflags)
+{
+ if (!extra_query_flags) {
+ extra_query_flags = g_new0(char *, 2);
+ extra_query_flags[0] = g_strdup(qflags);
+ } else if (!g_strv_contains((const gchar * const *) extra_query_flags,
+ qflags)) {
+ int len = g_strv_length(extra_query_flags);
+ extra_query_flags = g_realloc_n(extra_query_flags, len + 2,
+ sizeof(char *));
+ extra_query_flags[len] = g_strdup(qflags);
+ }
}
static void handle_query_supported(GArray *params, void *user_ctx)
{
- CPUClass *cc;
-
g_string_printf(gdbserver_state.str_buf, "PacketSize=%x", MAX_PACKET_LENGTH);
- cc = CPU_GET_CLASS(first_cpu);
- if (cc->gdb_core_xml_file) {
+ if (gdb_get_core_xml_file(first_cpu)) {
g_string_append(gdbserver_state.str_buf, ";qXfer:features:read+");
}
@@ -1668,8 +1706,11 @@ static void handle_query_supported(GArray *params, void *user_ctx)
g_string_append(gdbserver_state.str_buf, ";vContSupported+;multiprocess+");
- if (extended_qsupported_features) {
- g_string_append(gdbserver_state.str_buf, extended_qsupported_features);
+ if (extra_query_flags) {
+ int extras = g_strv_length(extra_query_flags);
+ for (int i = 0; i < extras; i++) {
+ g_string_append(gdbserver_state.str_buf, extra_query_flags[i]);
+ }
}
gdb_put_strbuf();
@@ -1678,7 +1719,6 @@ static void handle_query_supported(GArray *params, void *user_ctx)
static void handle_query_xfer_features(GArray *params, void *user_ctx)
{
GDBProcess *process;
- CPUClass *cc;
unsigned long len, total_len, addr;
const char *xml;
const char *p;
@@ -1689,8 +1729,7 @@ static void handle_query_xfer_features(GArray *params, void *user_ctx)
}
process = gdb_get_cpu_process(gdbserver_state.g_cpu);
- cc = CPU_GET_CLASS(gdbserver_state.g_cpu);
- if (!cc->gdb_core_xml_file) {
+ if (!gdb_get_core_xml_file(gdbserver_state.g_cpu)) {
gdb_put_packet("");
return;
}
@@ -1753,39 +1792,58 @@ static const GdbCmdParseEntry gdb_gen_query_set_common_table[] = {
},
};
-/* Compares if a set of command parsers is equal to another set of parsers. */
-static bool cmp_cmds(GdbCmdParseEntry *c, GdbCmdParseEntry *d, int size)
+/**
+ * extend_table() - extend one of the command tables
+ * @table: the command table to extend (or NULL)
+ * @extensions: a list of GdbCmdParseEntry pointers
+ *
+ * The entries themselves should be pointers to static const
+ * GdbCmdParseEntry entries. If the entry is already in the table we
+ * skip adding it again.
+ *
+ * Returns (a potentially freshly allocated) GPtrArray of GdbCmdParseEntry
+ */
+static GPtrArray *extend_table(GPtrArray *table, GPtrArray *extensions)
{
- for (int i = 0; i < size; i++) {
- if (!(c[i].handler == d[i].handler &&
- g_strcmp0(c[i].cmd, d[i].cmd) == 0 &&
- c[i].cmd_startswith == d[i].cmd_startswith &&
- g_strcmp0(c[i].schema, d[i].schema) == 0)) {
+ if (!table) {
+ table = g_ptr_array_new();
+ }
- /* Sets are different. */
- return false;
+ for (int i = 0; i < extensions->len; i++) {
+ gpointer entry = g_ptr_array_index(extensions, i);
+ if (!g_ptr_array_find(table, entry, NULL)) {
+ g_ptr_array_add(table, entry);
}
}
- /* Sets are equal, i.e. contain the same command parsers. */
- return true;
+ return table;
}
-static GdbCmdParseEntry *extended_query_table;
-static int extended_query_table_size;
-void gdb_extend_query_table(GdbCmdParseEntry *table, int size)
+/**
+ * process_extended_table() - run through an extended command table
+ * @table: the command table to check
+ * @data: parameters
+ *
+ * returns true if the command was found and executed
+ */
+static bool process_extended_table(GPtrArray *table, const char *data)
{
- /*
- * We don't support different sets of CPU gdb features on different CPUs yet
- * so assert query table is the same on all CPUs, or is set only once
- * (1 CPU).
- */
- g_assert(extended_query_table == NULL ||
- (extended_query_table_size == size &&
- cmp_cmds(extended_query_table, table, size)));
+ for (int i = 0; i < table->len; i++) {
+ const GdbCmdParseEntry *entry = g_ptr_array_index(table, i);
+ if (process_string_cmd(data, entry, 1)) {
+ return true;
+ }
+ }
+ return false;
+}
- extended_query_table = table;
- extended_query_table_size = size;
+
+/* Ptr to GdbCmdParseEntry */
+static GPtrArray *extended_query_table;
+
+void gdb_extend_query_table(GPtrArray *new_queries)
+{
+ extended_query_table = extend_table(extended_query_table, new_queries);
}
static const GdbCmdParseEntry gdb_gen_query_table[] = {
@@ -1798,6 +1856,10 @@ static const GdbCmdParseEntry gdb_gen_query_table[] = {
.cmd = "sThreadInfo",
},
{
+ .handler = handle_query_gdb_server_version,
+ .cmd = "GDBServerVersion",
+ },
+ {
.handler = handle_query_first_threads,
.cmd = "fThreadInfo",
},
@@ -1880,20 +1942,12 @@ static const GdbCmdParseEntry gdb_gen_query_table[] = {
#endif
};
-static GdbCmdParseEntry *extended_set_table;
-static int extended_set_table_size;
-void gdb_extend_set_table(GdbCmdParseEntry *table, int size)
-{
- /*
- * We don't support different sets of CPU gdb features on different CPUs yet
- * so assert set table is the same on all CPUs, or is set only once (1 CPU).
- */
- g_assert(extended_set_table == NULL ||
- (extended_set_table_size == size &&
- cmp_cmds(extended_set_table, table, size)));
+/* Ptr to GdbCmdParseEntry */
+static GPtrArray *extended_set_table;
- extended_set_table = table;
- extended_set_table_size = size;
+void gdb_extend_set_table(GPtrArray *new_set)
+{
+ extended_set_table = extend_table(extended_set_table, new_set);
}
static const GdbCmdParseEntry gdb_gen_set_table[] = {
@@ -1924,26 +1978,28 @@ static const GdbCmdParseEntry gdb_gen_set_table[] = {
static void handle_gen_query(GArray *params, void *user_ctx)
{
+ const char *data;
+
if (!params->len) {
return;
}
- if (process_string_cmd(gdb_get_cmd_param(params, 0)->data,
+ data = gdb_get_cmd_param(params, 0)->data;
+
+ if (process_string_cmd(data,
gdb_gen_query_set_common_table,
ARRAY_SIZE(gdb_gen_query_set_common_table))) {
return;
}
- if (process_string_cmd(gdb_get_cmd_param(params, 0)->data,
+ if (process_string_cmd(data,
gdb_gen_query_table,
ARRAY_SIZE(gdb_gen_query_table))) {
return;
}
if (extended_query_table &&
- process_string_cmd(gdb_get_cmd_param(params, 0)->data,
- extended_query_table,
- extended_query_table_size)) {
+ process_extended_table(extended_query_table, data)) {
return;
}
@@ -1953,26 +2009,28 @@ static void handle_gen_query(GArray *params, void *user_ctx)
static void handle_gen_set(GArray *params, void *user_ctx)
{
+ const char *data;
+
if (!params->len) {
return;
}
- if (process_string_cmd(gdb_get_cmd_param(params, 0)->data,
+ data = gdb_get_cmd_param(params, 0)->data;
+
+ if (process_string_cmd(data,
gdb_gen_query_set_common_table,
ARRAY_SIZE(gdb_gen_query_set_common_table))) {
return;
}
- if (process_string_cmd(gdb_get_cmd_param(params, 0)->data,
+ if (process_string_cmd(data,
gdb_gen_set_table,
ARRAY_SIZE(gdb_gen_set_table))) {
return;
}
if (extended_set_table &&
- process_string_cmd(gdb_get_cmd_param(params, 0)->data,
- extended_set_table,
- extended_set_table_size)) {
+ process_extended_table(extended_set_table, data)) {
return;
}
diff --git a/gdbstub/meson.build b/gdbstub/meson.build
index dff741d..15c666f 100644
--- a/gdbstub/meson.build
+++ b/gdbstub/meson.build
@@ -4,34 +4,18 @@
# types such as hwaddr.
#
-# We need to build the core gdb code via a library to be able to tweak
-# cflags so:
-
-gdb_user_ss = ss.source_set()
-gdb_system_ss = ss.source_set()
-
# We build two versions of gdbstub, one for each mode
-gdb_user_ss.add(files('gdbstub.c', 'user.c'))
-gdb_system_ss.add(files('gdbstub.c', 'system.c'))
-
-gdb_user_ss = gdb_user_ss.apply({})
-gdb_system_ss = gdb_system_ss.apply({})
-
-libgdb_user = static_library('gdb_user',
- gdb_user_ss.sources() + genh,
- c_args: '-DCONFIG_USER_ONLY',
- build_by_default: false)
-
-libgdb_system = static_library('gdb_system',
- gdb_system_ss.sources() + genh,
- build_by_default: false)
-
-gdb_user = declare_dependency(objects: libgdb_user.extract_all_objects(recursive: false))
-user_ss.add(gdb_user)
-gdb_system = declare_dependency(objects: libgdb_system.extract_all_objects(recursive: false))
-system_ss.add(gdb_system)
-
-common_ss.add(files('syscalls.c'))
+user_ss.add(files(
+ 'gdbstub.c',
+ 'syscalls.c',
+ 'user.c'
+))
+
+system_ss.add(files(
+ 'gdbstub.c',
+ 'syscalls.c',
+ 'system.c'
+))
# The user-target is specialised by the guest
specific_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-target.c'))
diff --git a/gdbstub/syscalls.c b/gdbstub/syscalls.c
index 4e1295b..e855df2 100644
--- a/gdbstub/syscalls.c
+++ b/gdbstub/syscalls.c
@@ -7,13 +7,13 @@
* Copyright (c) 2003-2005 Fabrice Bellard
* Copyright (c) 2023 Linaro Ltd
*
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "semihosting/semihost.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "gdbstub/user.h"
#include "gdbstub/syscalls.h"
#include "gdbstub/commands.h"
diff --git a/gdbstub/system.c b/gdbstub/system.c
index 1ad87fe..8a32d8e 100644
--- a/gdbstub/system.c
+++ b/gdbstub/system.c
@@ -7,7 +7,7 @@
* Copyright (c) 2003-2005 Fabrice Bellard
* Copyright (c) 2022 Linaro Ltd
*
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#include "qemu/osdep.h"
@@ -19,9 +19,11 @@
#include "gdbstub/commands.h"
#include "exec/hwaddr.h"
#include "exec/tb-flush.h"
-#include "sysemu/cpus.h"
-#include "sysemu/runstate.h"
-#include "sysemu/replay.h"
+#include "system/accel-ops.h"
+#include "system/cpus.h"
+#include "system/runstate.h"
+#include "system/replay.h"
+#include "system/tcg.h"
#include "hw/core/cpu.h"
#include "hw/cpu/cluster.h"
#include "hw/boards.h"
@@ -171,7 +173,9 @@ static void gdb_vm_state_change(void *opaque, bool running, RunState state)
} else {
trace_gdbstub_hit_break();
}
- tb_flush(cpu);
+ if (tcg_enabled()) {
+ tb_flush(cpu);
+ }
ret = GDB_SIGNAL_TRAP;
break;
case RUN_STATE_PAUSED:
@@ -239,7 +243,7 @@ static void gdb_monitor_open(Chardev *chr, ChardevBackend *backend,
*be_opened = false;
}
-static void char_gdb_class_init(ObjectClass *oc, void *data)
+static void char_gdb_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
@@ -330,26 +334,27 @@ static void create_processes(GDBState *s)
gdb_create_default_process(s);
}
-int gdbserver_start(const char *device)
+bool gdbserver_start(const char *device, Error **errp)
{
Chardev *chr = NULL;
Chardev *mon_chr;
g_autoptr(GString) cs = g_string_new(device);
if (!first_cpu) {
- error_report("gdbstub: meaningless to attach gdb to a "
- "machine without any CPU.");
- return -1;
+ error_setg(errp, "gdbstub: meaningless to attach gdb to a "
+ "machine without any CPU.");
+ return false;
}
if (!gdb_supports_guest_debug()) {
- error_report("gdbstub: current accelerator doesn't "
- "support guest debugging");
- return -1;
+ error_setg(errp, "gdbstub: current accelerator doesn't "
+ "support guest debugging");
+ return false;
}
if (cs->len == 0) {
- return -1;
+ error_setg(errp, "gdbstub: missing connection string");
+ return false;
}
trace_gdbstub_op_start(cs->str);
@@ -374,7 +379,8 @@ int gdbserver_start(const char *device)
*/
chr = qemu_chr_new_noreplay("gdb", cs->str, true, NULL);
if (!chr) {
- return -1;
+ error_setg(errp, "gdbstub: couldn't create chardev");
+ return false;
}
}
@@ -406,7 +412,7 @@ int gdbserver_start(const char *device)
gdbserver_system_state.mon_chr = mon_chr;
gdb_syscall_reset();
- return 0;
+ return true;
}
static void register_types(void)
@@ -450,8 +456,6 @@ static int phy_memory_mode;
int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr,
uint8_t *buf, int len, bool is_write)
{
- CPUClass *cc;
-
if (phy_memory_mode) {
if (is_write) {
cpu_physical_memory_write(addr, buf, len);
@@ -461,9 +465,8 @@ int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr,
return 0;
}
- cc = CPU_GET_CLASS(cpu);
- if (cc->memory_rw_debug) {
- return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
+ if (cpu->cc->memory_rw_debug) {
+ return cpu->cc->memory_rw_debug(cpu, addr, buf, len, is_write);
}
return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
diff --git a/gdbstub/user-target.c b/gdbstub/user-target.c
index b5e01fd..43231e6 100644
--- a/gdbstub/user-target.c
+++ b/gdbstub/user-target.c
@@ -4,7 +4,7 @@
* Copyright (c) 2003-2005 Fabrice Bellard
* Copyright (c) 2022 Linaro Ltd
*
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#include "qemu/osdep.h"
@@ -233,10 +233,8 @@ void gdb_handle_query_offsets(GArray *params, void *user_ctx)
static inline int target_memory_rw_debug(CPUState *cpu, target_ulong addr,
uint8_t *buf, int len, bool is_write)
{
- CPUClass *cc;
- cc = CPU_GET_CLASS(cpu);
- if (cc->memory_rw_debug) {
- return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
+ if (cpu->cc->memory_rw_debug) {
+ return cpu->cc->memory_rw_debug(cpu, addr, buf, len, is_write);
}
return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
}
@@ -317,9 +315,9 @@ void gdb_handle_v_file_open(GArray *params, void *user_ctx)
int fd = open(filename, flags, mode);
#endif
if (fd < 0) {
- g_string_printf(gdbserver_state.str_buf, "F-1,%d", errno);
+ g_string_printf(gdbserver_state.str_buf, "F-1,%x", errno);
} else {
- g_string_printf(gdbserver_state.str_buf, "F%d", fd);
+ g_string_printf(gdbserver_state.str_buf, "F%x", fd);
}
gdb_put_strbuf();
}
@@ -329,7 +327,7 @@ void gdb_handle_v_file_close(GArray *params, void *user_ctx)
int fd = gdb_get_cmd_param(params, 0)->val_ul;
if (close(fd) == -1) {
- g_string_printf(gdbserver_state.str_buf, "F-1,%d", errno);
+ g_string_printf(gdbserver_state.str_buf, "F-1,%x", errno);
gdb_put_strbuf();
return;
}
@@ -352,7 +350,7 @@ void gdb_handle_v_file_pread(GArray *params, void *user_ctx)
ssize_t n = pread(fd, buf, bufsiz, offset);
if (n < 0) {
- g_string_printf(gdbserver_state.str_buf, "F-1,%d", errno);
+ g_string_printf(gdbserver_state.str_buf, "F-1,%x", errno);
gdb_put_strbuf();
return;
}
@@ -375,7 +373,7 @@ void gdb_handle_v_file_readlink(GArray *params, void *user_ctx)
ssize_t n = readlink(filename, buf, BUFSIZ);
#endif
if (n < 0) {
- g_string_printf(gdbserver_state.str_buf, "F-1,%d", errno);
+ g_string_printf(gdbserver_state.str_buf, "F-1,%x", errno);
gdb_put_strbuf();
return;
}
diff --git a/gdbstub/user.c b/gdbstub/user.c
index b36033b..67403e5 100644
--- a/gdbstub/user.c
+++ b/gdbstub/user.c
@@ -6,13 +6,14 @@
* Copyright (c) 2003-2005 Fabrice Bellard
* Copyright (c) 2022 Linaro Ltd
*
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "qemu/bitops.h"
#include "qemu/cutils.h"
#include "qemu/sockets.h"
+#include "qapi/error.h"
#include "exec/hwaddr.h"
#include "exec/tb-flush.h"
#include "exec/gdbstub.h"
@@ -21,6 +22,7 @@
#include "gdbstub/user.h"
#include "gdbstub/enums.h"
#include "hw/core/cpu.h"
+#include "user/signal.h"
#include "trace.h"
#include "internals.h"
@@ -314,33 +316,20 @@ static bool gdb_accept_socket(int gdb_fd)
return true;
}
-static int gdbserver_open_socket(const char *path)
+static int gdbserver_open_socket(const char *path, Error **errp)
{
- struct sockaddr_un sockaddr = {};
- int fd, ret;
+ g_autoptr(GString) buf = g_string_new("");
+ char *pid_placeholder;
- fd = socket(AF_UNIX, SOCK_STREAM, 0);
- if (fd < 0) {
- perror("create socket");
- return -1;
+ pid_placeholder = strstr(path, "%d");
+ if (pid_placeholder != NULL) {
+ g_string_append_len(buf, path, pid_placeholder - path);
+ g_string_append_printf(buf, "%d", qemu_get_thread_id());
+ g_string_append(buf, pid_placeholder + 2);
+ path = buf->str;
}
- sockaddr.sun_family = AF_UNIX;
- pstrcpy(sockaddr.sun_path, sizeof(sockaddr.sun_path) - 1, path);
- ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
- if (ret < 0) {
- perror("bind socket");
- close(fd);
- return -1;
- }
- ret = listen(fd, 1);
- if (ret < 0) {
- perror("listen socket");
- close(fd);
- return -1;
- }
-
- return fd;
+ return unix_listen(path, errp);
}
static bool gdb_accept_tcp(int gdb_fd)
@@ -372,14 +361,14 @@ static bool gdb_accept_tcp(int gdb_fd)
return true;
}
-static int gdbserver_open_port(int port)
+static int gdbserver_open_port(int port, Error **errp)
{
struct sockaddr_in sockaddr;
int fd, ret;
fd = socket(PF_INET, SOCK_STREAM, 0);
if (fd < 0) {
- perror("socket");
+ error_setg_errno(errp, errno, "Failed to create socket");
return -1;
}
qemu_set_cloexec(fd);
@@ -391,13 +380,13 @@ static int gdbserver_open_port(int port)
sockaddr.sin_addr.s_addr = 0;
ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
if (ret < 0) {
- perror("bind");
+ error_setg_errno(errp, errno, "Failed to bind socket");
close(fd);
return -1;
}
ret = listen(fd, 1);
if (ret < 0) {
- perror("listen");
+ error_setg_errno(errp, errno, "Failed to listen to socket");
close(fd);
return -1;
}
@@ -405,31 +394,122 @@ static int gdbserver_open_port(int port)
return fd;
}
-int gdbserver_start(const char *port_or_path)
+static bool gdbserver_accept(int port, int gdb_fd, const char *path)
{
- int port = g_ascii_strtoull(port_or_path, NULL, 10);
- int gdb_fd;
+ bool ret;
if (port > 0) {
- gdb_fd = gdbserver_open_port(port);
+ ret = gdb_accept_tcp(gdb_fd);
} else {
- gdb_fd = gdbserver_open_socket(port_or_path);
+ ret = gdb_accept_socket(gdb_fd);
+ if (ret) {
+ gdbserver_user_state.socket_path = g_strdup(path);
+ }
}
- if (gdb_fd < 0) {
- return -1;
+ if (!ret) {
+ close(gdb_fd);
+ }
+
+ return ret;
+}
+
+struct {
+ int port;
+ int gdb_fd;
+ char *path;
+} gdbserver_args;
+
+static void do_gdb_handlesig(CPUState *cs, run_on_cpu_data arg)
+{
+ int sig;
+
+ sig = target_to_host_signal(gdb_handlesig(cs, 0, NULL, NULL, 0));
+ if (sig >= 1 && sig < NSIG) {
+ qemu_kill_thread(gdb_get_cpu_index(cs), sig);
+ }
+}
+
+static void *gdbserver_accept_thread(void *arg)
+{
+ if (gdbserver_accept(gdbserver_args.port, gdbserver_args.gdb_fd,
+ gdbserver_args.path)) {
+ CPUState *cs = first_cpu;
+
+ async_safe_run_on_cpu(cs, do_gdb_handlesig, RUN_ON_CPU_NULL);
+ qemu_kill_thread(gdb_get_cpu_index(cs), host_interrupt_signal);
+ }
+
+ g_free(gdbserver_args.path);
+ gdbserver_args.path = NULL;
+
+ return NULL;
+}
+
+#define USAGE "\nUsage: -g {port|path}[,suspend={y|n}]"
+
+bool gdbserver_start(const char *args, Error **errp)
+{
+ g_auto(GStrv) argv = g_strsplit(args, ",", 0);
+ const char *port_or_path = NULL;
+ bool suspend = true;
+ int gdb_fd, port;
+ GStrv arg;
+
+ for (arg = argv; *arg; arg++) {
+ g_auto(GStrv) tokens = g_strsplit(*arg, "=", 2);
+
+ if (g_strcmp0(tokens[0], "suspend") == 0) {
+ if (tokens[1] == NULL) {
+ error_setg(errp,
+ "gdbstub: missing \"suspend\" option value" USAGE);
+ return false;
+ } else if (!qapi_bool_parse(tokens[0], tokens[1],
+ &suspend, errp)) {
+ return false;
+ }
+ } else {
+ if (port_or_path) {
+ error_setg(errp, "gdbstub: unknown option \"%s\"" USAGE, *arg);
+ return false;
+ }
+ port_or_path = *arg;
+ }
+ }
+ if (!port_or_path) {
+ error_setg(errp, "gdbstub: port or path not specified" USAGE);
+ return false;
}
- if (port > 0 && gdb_accept_tcp(gdb_fd)) {
- return 0;
- } else if (gdb_accept_socket(gdb_fd)) {
- gdbserver_user_state.socket_path = g_strdup(port_or_path);
- return 0;
+ port = g_ascii_strtoull(port_or_path, NULL, 10);
+ if (port > 0) {
+ gdb_fd = gdbserver_open_port(port, errp);
+ } else {
+ gdb_fd = gdbserver_open_socket(port_or_path, errp);
+ }
+ if (gdb_fd < 0) {
+ return false;
}
- /* gone wrong */
- close(gdb_fd);
- return -1;
+ if (suspend) {
+ if (gdbserver_accept(port, gdb_fd, port_or_path)) {
+ gdb_handlesig(first_cpu, 0, NULL, NULL, 0);
+ return true;
+ } else {
+ error_setg(errp, "gdbstub: failed to accept connection");
+ return false;
+ }
+ } else {
+ QemuThread thread;
+
+ gdbserver_args.port = port;
+ gdbserver_args.gdb_fd = gdb_fd;
+ gdbserver_args.path = g_strdup(port_or_path);
+ qemu_thread_create(&thread, "gdb-accept",
+ &gdbserver_accept_thread, NULL,
+ QEMU_THREAD_DETACHED);
+ return true;
+ }
}
void gdbserver_fork_start(void)
@@ -663,11 +743,8 @@ int gdb_continue_partial(char *newstates)
int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr,
uint8_t *buf, int len, bool is_write)
{
- CPUClass *cc;
-
- cc = CPU_GET_CLASS(cpu);
- if (cc->memory_rw_debug) {
- return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
+ if (cpu->cc->memory_rw_debug) {
+ return cpu->cc->memory_rw_debug(cpu, addr, buf, len, is_write);
}
return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
}
diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx
index c59cd66..639a450 100644
--- a/hmp-commands-info.hx
+++ b/hmp-commands-info.hx
@@ -475,9 +475,9 @@ ERST
{
.name = "migrate",
- .args_type = "",
- .params = "",
- .help = "show migration status",
+ .args_type = "all:-a",
+ .params = "[-a]",
+ .help = "show migration status (-a: all, dump all status)",
.cmd = hmp_info_migrate,
},
diff --git a/host/include/aarch64/host/atomic128-cas.h b/host/include/aarch64/host/atomic128-cas.h
index 5863010..991da4e 100644
--- a/host/include/aarch64/host/atomic128-cas.h
+++ b/host/include/aarch64/host/atomic128-cas.h
@@ -13,7 +13,7 @@
/* Through gcc 10, aarch64 has no support for 128-bit atomics. */
#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
-#include "host/include/generic/host/atomic128-cas.h"
+#include "host/include/generic/host/atomic128-cas.h.inc"
#else
static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
{
diff --git a/host/include/aarch64/host/atomic128-ldst.h b/host/include/aarch64/host/atomic128-ldst.h.inc
index a08f62c..a08f62c 100644
--- a/host/include/aarch64/host/atomic128-ldst.h
+++ b/host/include/aarch64/host/atomic128-ldst.h.inc
diff --git a/host/include/generic/host/atomic128-cas.h b/host/include/generic/host/atomic128-cas.h.inc
index 6b40cc2..6b40cc2 100644
--- a/host/include/generic/host/atomic128-cas.h
+++ b/host/include/generic/host/atomic128-cas.h.inc
diff --git a/host/include/generic/host/atomic128-ldst.h b/host/include/generic/host/atomic128-ldst.h.inc
index 691e6a8..691e6a8 100644
--- a/host/include/generic/host/atomic128-ldst.h
+++ b/host/include/generic/host/atomic128-ldst.h.inc
diff --git a/host/include/i386/host/cpuinfo.h b/host/include/i386/host/cpuinfo.h
index 8177173..9541a64 100644
--- a/host/include/i386/host/cpuinfo.h
+++ b/host/include/i386/host/cpuinfo.h
@@ -9,6 +9,7 @@
/* Digested version of <cpuid.h> */
#define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */
+#define CPUINFO_OSXSAVE (1u << 1)
#define CPUINFO_MOVBE (1u << 2)
#define CPUINFO_LZCNT (1u << 3)
#define CPUINFO_POPCNT (1u << 4)
diff --git a/host/include/loongarch64/host/atomic128-ldst.h b/host/include/loongarch64/host/atomic128-ldst.h
deleted file mode 100644
index 9a4a8f8..0000000
--- a/host/include/loongarch64/host/atomic128-ldst.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * SPDX-License-Identifier: GPL-2.0-or-later
- * Load/store for 128-bit atomic operations, LoongArch version.
- *
- * See docs/devel/atomics.rst for discussion about the guarantees each
- * atomic primitive is meant to provide.
- */
-
-#ifndef LOONGARCH_ATOMIC128_LDST_H
-#define LOONGARCH_ATOMIC128_LDST_H
-
-#include "host/cpuinfo.h"
-#include "tcg/debug-assert.h"
-
-#define HAVE_ATOMIC128_RO likely(cpuinfo & CPUINFO_LSX)
-#define HAVE_ATOMIC128_RW HAVE_ATOMIC128_RO
-
-/*
- * As of gcc 13 and clang 16, there is no compiler support for LSX at all.
- * Use inline assembly throughout.
- */
-
-static inline Int128 atomic16_read_ro(const Int128 *ptr)
-{
- uint64_t l, h;
-
- tcg_debug_assert(HAVE_ATOMIC128_RO);
- asm("vld $vr0, %2, 0\n\t"
- "vpickve2gr.d %0, $vr0, 0\n\t"
- "vpickve2gr.d %1, $vr0, 1"
- : "=r"(l), "=r"(h) : "r"(ptr), "m"(*ptr) : "f0");
-
- return int128_make128(l, h);
-}
-
-static inline Int128 atomic16_read_rw(Int128 *ptr)
-{
- return atomic16_read_ro(ptr);
-}
-
-static inline void atomic16_set(Int128 *ptr, Int128 val)
-{
- uint64_t l = int128_getlo(val), h = int128_gethi(val);
-
- tcg_debug_assert(HAVE_ATOMIC128_RW);
- asm("vinsgr2vr.d $vr0, %1, 0\n\t"
- "vinsgr2vr.d $vr0, %2, 1\n\t"
- "vst $vr0, %3, 0"
- : "=m"(*ptr) : "r"(l), "r"(h), "r"(ptr) : "f0");
-}
-
-#endif /* LOONGARCH_ATOMIC128_LDST_H */
diff --git a/host/include/loongarch64/host/atomic128-ldst.h.inc b/host/include/loongarch64/host/atomic128-ldst.h.inc
new file mode 100644
index 0000000..754d214
--- /dev/null
+++ b/host/include/loongarch64/host/atomic128-ldst.h.inc
@@ -0,0 +1,52 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Load/store for 128-bit atomic operations, LoongArch version.
+ *
+ * See docs/devel/atomics.rst for discussion about the guarantees each
+ * atomic primitive is meant to provide.
+ */
+
+#ifndef LOONGARCH_ATOMIC128_LDST_H
+#define LOONGARCH_ATOMIC128_LDST_H
+
+#include "host/cpuinfo.h"
+#include "tcg/debug-assert.h"
+
+#define HAVE_ATOMIC128_RO likely(cpuinfo & CPUINFO_LSX)
+#define HAVE_ATOMIC128_RW HAVE_ATOMIC128_RO
+
+/*
+ * As of gcc 13 and clang 16, there is no compiler support for LSX at all.
+ * Use inline assembly throughout.
+ */
+
+static inline Int128 atomic16_read_ro(const Int128 *ptr)
+{
+ uint64_t l, h;
+
+ tcg_debug_assert(HAVE_ATOMIC128_RO);
+ asm("vld $vr0, %2, 0\n\t"
+ "vpickve2gr.d %0, $vr0, 0\n\t"
+ "vpickve2gr.d %1, $vr0, 1"
+ : "=r"(l), "=r"(h) : "r"(ptr), "m"(*ptr) : "$f0");
+
+ return int128_make128(l, h);
+}
+
+static inline Int128 atomic16_read_rw(Int128 *ptr)
+{
+ return atomic16_read_ro(ptr);
+}
+
+static inline void atomic16_set(Int128 *ptr, Int128 val)
+{
+ uint64_t l = int128_getlo(val), h = int128_gethi(val);
+
+ tcg_debug_assert(HAVE_ATOMIC128_RW);
+ asm("vinsgr2vr.d $vr0, %1, 0\n\t"
+ "vinsgr2vr.d $vr0, %2, 1\n\t"
+ "vst $vr0, %3, 0"
+ : "=m"(*ptr) : "r"(l), "r"(h), "r"(ptr) : "$f0");
+}
+
+#endif /* LOONGARCH_ATOMIC128_LDST_H */
diff --git a/host/include/loongarch64/host/bufferiszero.c.inc b/host/include/loongarch64/host/bufferiszero.c.inc
index 69891ea..bb2598f 100644
--- a/host/include/loongarch64/host/bufferiszero.c.inc
+++ b/host/include/loongarch64/host/bufferiszero.c.inc
@@ -61,7 +61,8 @@ static bool buffer_is_zero_lsx(const void *buf, size_t len)
"2:"
: "=&r"(ret), "+r"(p)
: "r"(buf), "r"(e), "r"(l)
- : "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "fcc0");
+ : "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8",
+ "$fcc0");
return ret;
}
@@ -119,7 +120,8 @@ static bool buffer_is_zero_lasx(const void *buf, size_t len)
"3:"
: "=&r"(ret), "+r"(p)
: "r"(buf), "r"(e), "r"(l)
- : "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "fcc0");
+ : "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8",
+ "$fcc0");
return ret;
}
diff --git a/host/include/loongarch64/host/load-extract-al16-al8.h.inc b/host/include/loongarch64/host/load-extract-al16-al8.h.inc
index d1fb59d..9528521 100644
--- a/host/include/loongarch64/host/load-extract-al16-al8.h.inc
+++ b/host/include/loongarch64/host/load-extract-al16-al8.h.inc
@@ -31,7 +31,7 @@ static inline uint64_t load_atom_extract_al16_or_al8(void *pv, int s)
asm("vld $vr0, %2, 0\n\t"
"vpickve2gr.d %0, $vr0, 0\n\t"
"vpickve2gr.d %1, $vr0, 1"
- : "=r"(l), "=r"(h) : "r"(ptr_align), "m"(*ptr_align) : "f0");
+ : "=r"(l), "=r"(h) : "r"(ptr_align), "m"(*ptr_align) : "$f0");
return (l >> shr) | (h << (-shr & 63));
}
diff --git a/host/include/riscv/host/cpuinfo.h b/host/include/riscv/host/cpuinfo.h
index 2b00660..b2b53db 100644
--- a/host/include/riscv/host/cpuinfo.h
+++ b/host/include/riscv/host/cpuinfo.h
@@ -9,10 +9,13 @@
#define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */
#define CPUINFO_ZBA (1u << 1)
#define CPUINFO_ZBB (1u << 2)
-#define CPUINFO_ZICOND (1u << 3)
+#define CPUINFO_ZBS (1u << 3)
+#define CPUINFO_ZICOND (1u << 4)
+#define CPUINFO_ZVE64X (1u << 5)
/* Initialized with a constructor. */
extern unsigned cpuinfo;
+extern unsigned riscv_lg2_vlenb;
/*
* We cannot rely on constructor ordering, so other constructors must
diff --git a/host/include/x86_64/host/atomic128-ldst.h b/host/include/x86_64/host/atomic128-ldst.h
deleted file mode 100644
index 8d6f909..0000000
--- a/host/include/x86_64/host/atomic128-ldst.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * SPDX-License-Identifier: GPL-2.0-or-later
- * Load/store for 128-bit atomic operations, x86_64 version.
- *
- * Copyright (C) 2023 Linaro, Ltd.
- *
- * See docs/devel/atomics.rst for discussion about the guarantees each
- * atomic primitive is meant to provide.
- */
-
-#ifndef X86_64_ATOMIC128_LDST_H
-#define X86_64_ATOMIC128_LDST_H
-
-#ifdef CONFIG_INT128_TYPE
-#include "host/cpuinfo.h"
-#include "tcg/debug-assert.h"
-#include <immintrin.h>
-
-typedef union {
- __m128i v;
- __int128_t i;
- Int128 s;
-} X86Int128Union;
-
-/*
- * Through clang 16, with -mcx16, __atomic_load_n is incorrectly
- * expanded to a read-write operation: lock cmpxchg16b.
- */
-
-#define HAVE_ATOMIC128_RO likely(cpuinfo & CPUINFO_ATOMIC_VMOVDQA)
-#define HAVE_ATOMIC128_RW 1
-
-static inline Int128 atomic16_read_ro(const Int128 *ptr)
-{
- X86Int128Union r;
-
- tcg_debug_assert(HAVE_ATOMIC128_RO);
- asm("vmovdqa %1, %0" : "=x" (r.v) : "m" (*ptr));
-
- return r.s;
-}
-
-static inline Int128 atomic16_read_rw(Int128 *ptr)
-{
- __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
- X86Int128Union r;
-
- if (HAVE_ATOMIC128_RO) {
- asm("vmovdqa %1, %0" : "=x" (r.v) : "m" (*ptr_align));
- } else {
- r.i = __sync_val_compare_and_swap_16(ptr_align, 0, 0);
- }
- return r.s;
-}
-
-static inline void atomic16_set(Int128 *ptr, Int128 val)
-{
- __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
- X86Int128Union new = { .s = val };
-
- if (HAVE_ATOMIC128_RO) {
- asm("vmovdqa %1, %0" : "=m"(*ptr_align) : "x" (new.v));
- } else {
- __int128_t old;
- do {
- old = *ptr_align;
- } while (!__sync_bool_compare_and_swap_16(ptr_align, old, new.i));
- }
-}
-#else
-/* Provide QEMU_ERROR stubs. */
-#include "host/include/generic/host/atomic128-ldst.h"
-#endif
-
-#endif /* X86_64_ATOMIC128_LDST_H */
diff --git a/host/include/x86_64/host/atomic128-ldst.h.inc b/host/include/x86_64/host/atomic128-ldst.h.inc
new file mode 100644
index 0000000..4c698e3
--- /dev/null
+++ b/host/include/x86_64/host/atomic128-ldst.h.inc
@@ -0,0 +1,75 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Load/store for 128-bit atomic operations, x86_64 version.
+ *
+ * Copyright (C) 2023 Linaro, Ltd.
+ *
+ * See docs/devel/atomics.rst for discussion about the guarantees each
+ * atomic primitive is meant to provide.
+ */
+
+#ifndef X86_64_ATOMIC128_LDST_H
+#define X86_64_ATOMIC128_LDST_H
+
+#ifdef CONFIG_INT128_TYPE
+#include "host/cpuinfo.h"
+#include "tcg/debug-assert.h"
+#include <immintrin.h>
+
+typedef union {
+ __m128i v;
+ __int128_t i;
+ Int128 s;
+} X86Int128Union;
+
+/*
+ * Through clang 16, with -mcx16, __atomic_load_n is incorrectly
+ * expanded to a read-write operation: lock cmpxchg16b.
+ */
+
+#define HAVE_ATOMIC128_RO likely(cpuinfo & CPUINFO_ATOMIC_VMOVDQA)
+#define HAVE_ATOMIC128_RW 1
+
+static inline Int128 atomic16_read_ro(const Int128 *ptr)
+{
+ X86Int128Union r;
+
+ tcg_debug_assert(HAVE_ATOMIC128_RO);
+ asm("vmovdqa %1, %0" : "=x" (r.v) : "m" (*ptr));
+
+ return r.s;
+}
+
+static inline Int128 atomic16_read_rw(Int128 *ptr)
+{
+ __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
+ X86Int128Union r;
+
+ if (HAVE_ATOMIC128_RO) {
+ asm("vmovdqa %1, %0" : "=x" (r.v) : "m" (*ptr_align));
+ } else {
+ r.i = __sync_val_compare_and_swap_16(ptr_align, 0, 0);
+ }
+ return r.s;
+}
+
+static inline void atomic16_set(Int128 *ptr, Int128 val)
+{
+ __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
+ X86Int128Union new = { .s = val };
+
+ if (HAVE_ATOMIC128_RO) {
+ asm("vmovdqa %1, %0" : "=m"(*ptr_align) : "x" (new.v));
+ } else {
+ __int128_t old;
+ do {
+ old = *ptr_align;
+ } while (!__sync_bool_compare_and_swap_16(ptr_align, old, new.i));
+ }
+}
+#else
+/* Provide QEMU_ERROR stubs. */
+#include "host/include/generic/host/atomic128-ldst.h.inc"
+#endif
+
+#endif /* X86_64_ATOMIC128_LDST_H */
diff --git a/host/include/x86_64/host/load-extract-al16-al8.h.inc b/host/include/x86_64/host/load-extract-al16-al8.h.inc
index baa506b..b837c37 100644
--- a/host/include/x86_64/host/load-extract-al16-al8.h.inc
+++ b/host/include/x86_64/host/load-extract-al16-al8.h.inc
@@ -9,7 +9,7 @@
#define X86_64_LOAD_EXTRACT_AL16_AL8_H
#ifdef CONFIG_INT128_TYPE
-#include "host/atomic128-ldst.h"
+#include "host/atomic128-ldst.h.inc"
/**
* load_atom_extract_al16_or_al8:
diff --git a/hw/9pfs/9p-local.c b/hw/9pfs/9p-local.c
index 1b1f3b9..31e2162 100644
--- a/hw/9pfs/9p-local.c
+++ b/hw/9pfs/9p-local.c
@@ -766,16 +766,19 @@ out:
return err;
}
-static int local_fstat(FsContext *fs_ctx, int fid_type,
- V9fsFidOpenState *fs, struct stat *stbuf)
+static int local_fid_fd(int fid_type, V9fsFidOpenState *fs)
{
- int err, fd;
-
if (fid_type == P9_FID_DIR) {
- fd = dirfd(fs->dir.stream);
+ return dirfd(fs->dir.stream);
} else {
- fd = fs->fd;
+ return fs->fd;
}
+}
+
+static int local_fstat(FsContext *fs_ctx, int fid_type,
+ V9fsFidOpenState *fs, struct stat *stbuf)
+{
+ int err, fd = local_fid_fd(fid_type, fs);
err = fstat(fd, stbuf);
if (err) {
@@ -1039,6 +1042,14 @@ static int local_truncate(FsContext *ctx, V9fsPath *fs_path, off_t size)
return ret;
}
+static int local_ftruncate(FsContext *ctx, int fid_type, V9fsFidOpenState *fs,
+ off_t size)
+{
+ int fd = local_fid_fd(fid_type, fs);
+
+ return ftruncate(fd, size);
+}
+
static int local_chown(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp)
{
char *dirpath = g_path_get_dirname(fs_path->data);
@@ -1089,6 +1100,14 @@ out:
return ret;
}
+static int local_futimens(FsContext *s, int fid_type, V9fsFidOpenState *fs,
+ const struct timespec *times)
+{
+ int fd = local_fid_fd(fid_type, fs);
+
+ return qemu_futimens(fd, times);
+}
+
static int local_unlinkat_common(FsContext *ctx, int dirfd, const char *name,
int flags)
{
@@ -1167,13 +1186,7 @@ out:
static int local_fsync(FsContext *ctx, int fid_type,
V9fsFidOpenState *fs, int datasync)
{
- int fd;
-
- if (fid_type == P9_FID_DIR) {
- fd = dirfd(fs->dir.stream);
- } else {
- fd = fs->fd;
- }
+ int fd = local_fid_fd(fid_type, fs);
if (datasync) {
return qemu_fdatasync(fd);
@@ -1538,6 +1551,9 @@ static int local_parse_opts(QemuOpts *opts, FsDriverEntry *fse, Error **errp)
"[remap|forbid|warn]\n");
return -1;
}
+ } else {
+ fse->export_flags &= ~V9FS_FORBID_MULTIDEVS;
+ fse->export_flags |= V9FS_REMAP_INODES;
}
if (!path) {
@@ -1572,6 +1588,13 @@ static int local_parse_opts(QemuOpts *opts, FsDriverEntry *fse, Error **errp)
return 0;
}
+static bool local_has_valid_file_handle(int fid_type, V9fsFidOpenState *fs)
+{
+ return
+ (fid_type == P9_FID_FILE && fs->fd != -1) ||
+ (fid_type == P9_FID_DIR && fs->dir.stream != NULL);
+}
+
FileOperations local_ops = {
.parse_opts = local_parse_opts,
.init = local_init,
@@ -1609,4 +1632,7 @@ FileOperations local_ops = {
.name_to_path = local_name_to_path,
.renameat = local_renameat,
.unlinkat = local_unlinkat,
+ .has_valid_file_handle = local_has_valid_file_handle,
+ .ftruncate = local_ftruncate,
+ .futimens = local_futimens,
};
diff --git a/hw/9pfs/9p-proxy.c b/hw/9pfs/9p-proxy.c
deleted file mode 100644
index 7aac49a..0000000
--- a/hw/9pfs/9p-proxy.c
+++ /dev/null
@@ -1,1279 +0,0 @@
-/*
- * 9p Proxy callback
- *
- * Copyright IBM, Corp. 2011
- *
- * Authors:
- * M. Mohan Kumar <mohan@in.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- */
-
-/*
- * Not so fast! You might want to read the 9p developer docs first:
- * https://wiki.qemu.org/Documentation/9p
- */
-
-/*
- * NOTE: The 9p 'proxy' backend is deprecated (since QEMU 8.1) and will be
- * removed in a future version of QEMU!
- */
-
-#include "qemu/osdep.h"
-#include <sys/socket.h>
-#include <sys/un.h>
-#include "9p.h"
-#include "qapi/error.h"
-#include "qemu/cutils.h"
-#include "qemu/error-report.h"
-#include "qemu/option.h"
-#include "fsdev/qemu-fsdev.h"
-#include "9p-proxy.h"
-
-typedef struct V9fsProxy {
- int sockfd;
- QemuMutex mutex;
- struct iovec in_iovec;
- struct iovec out_iovec;
-} V9fsProxy;
-
-/*
- * Return received file descriptor on success in *status.
- * errno is also returned on *status (which will be < 0)
- * return < 0 on transport error.
- */
-static int v9fs_receivefd(int sockfd, int *status)
-{
- struct iovec iov;
- struct msghdr msg;
- struct cmsghdr *cmsg;
- int retval, data, fd;
- union MsgControl msg_control;
-
- iov.iov_base = &data;
- iov.iov_len = sizeof(data);
-
- memset(&msg, 0, sizeof(msg));
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = &msg_control;
- msg.msg_controllen = sizeof(msg_control);
-
- do {
- retval = recvmsg(sockfd, &msg, 0);
- } while (retval < 0 && errno == EINTR);
- if (retval <= 0) {
- return retval;
- }
- /*
- * data is set to V9FS_FD_VALID, if ancillary data is sent. If this
- * request doesn't need ancillary data (fd) or an error occurred,
- * data is set to negative errno value.
- */
- if (data != V9FS_FD_VALID) {
- *status = data;
- return 0;
- }
- /*
- * File descriptor (fd) is sent in the ancillary data. Check if we
- * indeed received it. One of the reasons to fail to receive it is if
- * we exceeded the maximum number of file descriptors!
- */
- for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
- if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)) ||
- cmsg->cmsg_level != SOL_SOCKET ||
- cmsg->cmsg_type != SCM_RIGHTS) {
- continue;
- }
- fd = *((int *)CMSG_DATA(cmsg));
- *status = fd;
- return 0;
- }
- *status = -ENFILE; /* Ancillary data sent but not received */
- return 0;
-}
-
-static ssize_t socket_read(int sockfd, void *buff, size_t size)
-{
- ssize_t retval, total = 0;
-
- while (size) {
- retval = read(sockfd, buff, size);
- if (retval == 0) {
- return -EIO;
- }
- if (retval < 0) {
- if (errno == EINTR) {
- continue;
- }
- return -errno;
- }
- size -= retval;
- buff += retval;
- total += retval;
- }
- return total;
-}
-
-/* Converts proxy_statfs to VFS statfs structure */
-static void prstatfs_to_statfs(struct statfs *stfs, ProxyStatFS *prstfs)
-{
- memset(stfs, 0, sizeof(*stfs));
- stfs->f_type = prstfs->f_type;
- stfs->f_bsize = prstfs->f_bsize;
- stfs->f_blocks = prstfs->f_blocks;
- stfs->f_bfree = prstfs->f_bfree;
- stfs->f_bavail = prstfs->f_bavail;
- stfs->f_files = prstfs->f_files;
- stfs->f_ffree = prstfs->f_ffree;
-#ifdef CONFIG_DARWIN
- /* f_namelen and f_frsize do not exist on Darwin */
- stfs->f_fsid.val[0] = prstfs->f_fsid[0] & 0xFFFFFFFFU;
- stfs->f_fsid.val[1] = prstfs->f_fsid[1] >> 32 & 0xFFFFFFFFU;
-#else
- stfs->f_fsid.__val[0] = prstfs->f_fsid[0] & 0xFFFFFFFFU;
- stfs->f_fsid.__val[1] = prstfs->f_fsid[1] >> 32 & 0xFFFFFFFFU;
- stfs->f_namelen = prstfs->f_namelen;
- stfs->f_frsize = prstfs->f_frsize;
-#endif
-}
-
-/* Converts proxy_stat structure to VFS stat structure */
-static void prstat_to_stat(struct stat *stbuf, ProxyStat *prstat)
-{
- memset(stbuf, 0, sizeof(*stbuf));
- stbuf->st_dev = prstat->st_dev;
- stbuf->st_ino = prstat->st_ino;
- stbuf->st_nlink = prstat->st_nlink;
- stbuf->st_mode = prstat->st_mode;
- stbuf->st_uid = prstat->st_uid;
- stbuf->st_gid = prstat->st_gid;
- stbuf->st_rdev = prstat->st_rdev;
- stbuf->st_size = prstat->st_size;
- stbuf->st_blksize = prstat->st_blksize;
- stbuf->st_blocks = prstat->st_blocks;
- stbuf->st_atime = prstat->st_atim_sec;
- stbuf->st_mtime = prstat->st_mtim_sec;
- stbuf->st_ctime = prstat->st_ctim_sec;
-#ifdef CONFIG_DARWIN
- stbuf->st_atimespec.tv_sec = prstat->st_atim_sec;
- stbuf->st_mtimespec.tv_sec = prstat->st_mtim_sec;
- stbuf->st_ctimespec.tv_sec = prstat->st_ctim_sec;
- stbuf->st_atimespec.tv_nsec = prstat->st_atim_nsec;
- stbuf->st_mtimespec.tv_nsec = prstat->st_mtim_nsec;
- stbuf->st_ctimespec.tv_nsec = prstat->st_ctim_nsec;
-#else
- stbuf->st_atim.tv_sec = prstat->st_atim_sec;
- stbuf->st_mtim.tv_sec = prstat->st_mtim_sec;
- stbuf->st_ctim.tv_sec = prstat->st_ctim_sec;
- stbuf->st_atim.tv_nsec = prstat->st_atim_nsec;
- stbuf->st_mtim.tv_nsec = prstat->st_mtim_nsec;
- stbuf->st_ctim.tv_nsec = prstat->st_ctim_nsec;
-#endif
-}
-
-/*
- * Response contains two parts
- * {header, data}
- * header.type == T_ERROR, data -> -errno
- * header.type == T_SUCCESS, data -> response
- * size of errno/response is given by header.size
- * returns < 0, on transport error. response is
- * valid only if status >= 0.
- */
-static int v9fs_receive_response(V9fsProxy *proxy, int type,
- int *status, void *response)
-{
- int retval;
- ProxyHeader header;
- struct iovec *reply = &proxy->in_iovec;
-
- *status = 0;
- reply->iov_len = 0;
- retval = socket_read(proxy->sockfd, reply->iov_base, PROXY_HDR_SZ);
- if (retval < 0) {
- return retval;
- }
- reply->iov_len = PROXY_HDR_SZ;
- retval = proxy_unmarshal(reply, 0, "dd", &header.type, &header.size);
- assert(retval == 4 * 2);
- /*
- * if response size > PROXY_MAX_IO_SZ, read the response but ignore it and
- * return -ENOBUFS
- */
- if (header.size > PROXY_MAX_IO_SZ) {
- int count;
- while (header.size > 0) {
- count = MIN(PROXY_MAX_IO_SZ, header.size);
- count = socket_read(proxy->sockfd, reply->iov_base, count);
- if (count < 0) {
- return count;
- }
- header.size -= count;
- }
- *status = -ENOBUFS;
- return 0;
- }
-
- retval = socket_read(proxy->sockfd,
- reply->iov_base + PROXY_HDR_SZ, header.size);
- if (retval < 0) {
- return retval;
- }
- reply->iov_len += header.size;
- /* there was an error during processing request */
- if (header.type == T_ERROR) {
- int ret;
- ret = proxy_unmarshal(reply, PROXY_HDR_SZ, "d", status);
- assert(ret == 4);
- return 0;
- }
-
- switch (type) {
- case T_LSTAT: {
- ProxyStat prstat;
- retval = proxy_unmarshal(reply, PROXY_HDR_SZ,
- "qqqdddqqqqqqqqqq", &prstat.st_dev,
- &prstat.st_ino, &prstat.st_nlink,
- &prstat.st_mode, &prstat.st_uid,
- &prstat.st_gid, &prstat.st_rdev,
- &prstat.st_size, &prstat.st_blksize,
- &prstat.st_blocks,
- &prstat.st_atim_sec, &prstat.st_atim_nsec,
- &prstat.st_mtim_sec, &prstat.st_mtim_nsec,
- &prstat.st_ctim_sec, &prstat.st_ctim_nsec);
- assert(retval == 8 * 3 + 4 * 3 + 8 * 10);
- prstat_to_stat(response, &prstat);
- break;
- }
- case T_STATFS: {
- ProxyStatFS prstfs;
- retval = proxy_unmarshal(reply, PROXY_HDR_SZ,
- "qqqqqqqqqqq", &prstfs.f_type,
- &prstfs.f_bsize, &prstfs.f_blocks,
- &prstfs.f_bfree, &prstfs.f_bavail,
- &prstfs.f_files, &prstfs.f_ffree,
- &prstfs.f_fsid[0], &prstfs.f_fsid[1],
- &prstfs.f_namelen, &prstfs.f_frsize);
- assert(retval == 8 * 11);
- prstatfs_to_statfs(response, &prstfs);
- break;
- }
- case T_READLINK: {
- V9fsString target;
- v9fs_string_init(&target);
- retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "s", &target);
- strcpy(response, target.data);
- v9fs_string_free(&target);
- break;
- }
- case T_LGETXATTR:
- case T_LLISTXATTR: {
- V9fsString xattr;
- v9fs_string_init(&xattr);
- retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "s", &xattr);
- memcpy(response, xattr.data, xattr.size);
- v9fs_string_free(&xattr);
- break;
- }
- case T_GETVERSION:
- retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "q", response);
- assert(retval == 8);
- break;
- default:
- return -1;
- }
- if (retval < 0) {
- *status = retval;
- }
- return 0;
-}
-
-/*
- * return < 0 on transport error.
- * *status is valid only if return >= 0
- */
-static int v9fs_receive_status(V9fsProxy *proxy,
- struct iovec *reply, int *status)
-{
- int retval;
- ProxyHeader header;
-
- *status = 0;
- reply->iov_len = 0;
- retval = socket_read(proxy->sockfd, reply->iov_base, PROXY_HDR_SZ);
- if (retval < 0) {
- return retval;
- }
- reply->iov_len = PROXY_HDR_SZ;
- retval = proxy_unmarshal(reply, 0, "dd", &header.type, &header.size);
- assert(retval == 4 * 2);
- retval = socket_read(proxy->sockfd,
- reply->iov_base + PROXY_HDR_SZ, header.size);
- if (retval < 0) {
- return retval;
- }
- reply->iov_len += header.size;
- retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "d", status);
- assert(retval == 4);
- return 0;
-}
-
-/*
- * Proxy->header and proxy->request written to socket by QEMU process.
- * This request read by proxy helper process
- * returns 0 on success and -errno on error
- */
-static int v9fs_request(V9fsProxy *proxy, int type, void *response, ...)
-{
- dev_t rdev;
- va_list ap;
- int size = 0;
- int retval = 0;
- uint64_t offset;
- ProxyHeader header = { 0, 0};
- struct timespec spec[2];
- int flags, mode, uid, gid;
- V9fsString *name, *value;
- V9fsString *path, *oldpath;
- struct iovec *iovec = NULL, *reply = NULL;
-
- qemu_mutex_lock(&proxy->mutex);
-
- if (proxy->sockfd == -1) {
- retval = -EIO;
- goto err_out;
- }
- iovec = &proxy->out_iovec;
- reply = &proxy->in_iovec;
- va_start(ap, response);
- switch (type) {
- case T_OPEN:
- path = va_arg(ap, V9fsString *);
- flags = va_arg(ap, int);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sd", path, flags);
- if (retval > 0) {
- header.size = retval;
- header.type = T_OPEN;
- }
- break;
- case T_CREATE:
- path = va_arg(ap, V9fsString *);
- flags = va_arg(ap, int);
- mode = va_arg(ap, int);
- uid = va_arg(ap, int);
- gid = va_arg(ap, int);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sdddd", path,
- flags, mode, uid, gid);
- if (retval > 0) {
- header.size = retval;
- header.type = T_CREATE;
- }
- break;
- case T_MKNOD:
- path = va_arg(ap, V9fsString *);
- mode = va_arg(ap, int);
- rdev = va_arg(ap, long int);
- uid = va_arg(ap, int);
- gid = va_arg(ap, int);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ddsdq",
- uid, gid, path, mode, rdev);
- if (retval > 0) {
- header.size = retval;
- header.type = T_MKNOD;
- }
- break;
- case T_MKDIR:
- path = va_arg(ap, V9fsString *);
- mode = va_arg(ap, int);
- uid = va_arg(ap, int);
- gid = va_arg(ap, int);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ddsd",
- uid, gid, path, mode);
- if (retval > 0) {
- header.size = retval;
- header.type = T_MKDIR;
- }
- break;
- case T_SYMLINK:
- oldpath = va_arg(ap, V9fsString *);
- path = va_arg(ap, V9fsString *);
- uid = va_arg(ap, int);
- gid = va_arg(ap, int);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ddss",
- uid, gid, oldpath, path);
- if (retval > 0) {
- header.size = retval;
- header.type = T_SYMLINK;
- }
- break;
- case T_LINK:
- oldpath = va_arg(ap, V9fsString *);
- path = va_arg(ap, V9fsString *);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ss",
- oldpath, path);
- if (retval > 0) {
- header.size = retval;
- header.type = T_LINK;
- }
- break;
- case T_LSTAT:
- path = va_arg(ap, V9fsString *);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "s", path);
- if (retval > 0) {
- header.size = retval;
- header.type = T_LSTAT;
- }
- break;
- case T_READLINK:
- path = va_arg(ap, V9fsString *);
- size = va_arg(ap, int);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sd", path, size);
- if (retval > 0) {
- header.size = retval;
- header.type = T_READLINK;
- }
- break;
- case T_STATFS:
- path = va_arg(ap, V9fsString *);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "s", path);
- if (retval > 0) {
- header.size = retval;
- header.type = T_STATFS;
- }
- break;
- case T_CHMOD:
- path = va_arg(ap, V9fsString *);
- mode = va_arg(ap, int);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sd", path, mode);
- if (retval > 0) {
- header.size = retval;
- header.type = T_CHMOD;
- }
- break;
- case T_CHOWN:
- path = va_arg(ap, V9fsString *);
- uid = va_arg(ap, int);
- gid = va_arg(ap, int);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sdd", path, uid, gid);
- if (retval > 0) {
- header.size = retval;
- header.type = T_CHOWN;
- }
- break;
- case T_TRUNCATE:
- path = va_arg(ap, V9fsString *);
- offset = va_arg(ap, uint64_t);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sq", path, offset);
- if (retval > 0) {
- header.size = retval;
- header.type = T_TRUNCATE;
- }
- break;
- case T_UTIME:
- path = va_arg(ap, V9fsString *);
- spec[0].tv_sec = va_arg(ap, long);
- spec[0].tv_nsec = va_arg(ap, long);
- spec[1].tv_sec = va_arg(ap, long);
- spec[1].tv_nsec = va_arg(ap, long);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sqqqq", path,
- spec[0].tv_sec, spec[1].tv_nsec,
- spec[1].tv_sec, spec[1].tv_nsec);
- if (retval > 0) {
- header.size = retval;
- header.type = T_UTIME;
- }
- break;
- case T_RENAME:
- oldpath = va_arg(ap, V9fsString *);
- path = va_arg(ap, V9fsString *);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ss", oldpath, path);
- if (retval > 0) {
- header.size = retval;
- header.type = T_RENAME;
- }
- break;
- case T_REMOVE:
- path = va_arg(ap, V9fsString *);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "s", path);
- if (retval > 0) {
- header.size = retval;
- header.type = T_REMOVE;
- }
- break;
- case T_LGETXATTR:
- size = va_arg(ap, int);
- path = va_arg(ap, V9fsString *);
- name = va_arg(ap, V9fsString *);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ,
- "dss", size, path, name);
- if (retval > 0) {
- header.size = retval;
- header.type = T_LGETXATTR;
- }
- break;
- case T_LLISTXATTR:
- size = va_arg(ap, int);
- path = va_arg(ap, V9fsString *);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ds", size, path);
- if (retval > 0) {
- header.size = retval;
- header.type = T_LLISTXATTR;
- }
- break;
- case T_LSETXATTR:
- path = va_arg(ap, V9fsString *);
- name = va_arg(ap, V9fsString *);
- value = va_arg(ap, V9fsString *);
- size = va_arg(ap, int);
- flags = va_arg(ap, int);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sssdd",
- path, name, value, size, flags);
- if (retval > 0) {
- header.size = retval;
- header.type = T_LSETXATTR;
- }
- break;
- case T_LREMOVEXATTR:
- path = va_arg(ap, V9fsString *);
- name = va_arg(ap, V9fsString *);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ss", path, name);
- if (retval > 0) {
- header.size = retval;
- header.type = T_LREMOVEXATTR;
- }
- break;
- case T_GETVERSION:
- path = va_arg(ap, V9fsString *);
- retval = proxy_marshal(iovec, PROXY_HDR_SZ, "s", path);
- if (retval > 0) {
- header.size = retval;
- header.type = T_GETVERSION;
- }
- break;
- default:
- error_report("Invalid type %d", type);
- retval = -EINVAL;
- break;
- }
- va_end(ap);
-
- if (retval < 0) {
- goto err_out;
- }
-
- /* marshal the header details */
- retval = proxy_marshal(iovec, 0, "dd", header.type, header.size);
- assert(retval == 4 * 2);
- header.size += PROXY_HDR_SZ;
-
- retval = qemu_write_full(proxy->sockfd, iovec->iov_base, header.size);
- if (retval != header.size) {
- goto close_error;
- }
-
- switch (type) {
- case T_OPEN:
- case T_CREATE:
- /*
- * A file descriptor is returned as response for
- * T_OPEN,T_CREATE on success
- */
- if (v9fs_receivefd(proxy->sockfd, &retval) < 0) {
- goto close_error;
- }
- break;
- case T_MKNOD:
- case T_MKDIR:
- case T_SYMLINK:
- case T_LINK:
- case T_CHMOD:
- case T_CHOWN:
- case T_RENAME:
- case T_TRUNCATE:
- case T_UTIME:
- case T_REMOVE:
- case T_LSETXATTR:
- case T_LREMOVEXATTR:
- if (v9fs_receive_status(proxy, reply, &retval) < 0) {
- goto close_error;
- }
- break;
- case T_LSTAT:
- case T_READLINK:
- case T_STATFS:
- case T_GETVERSION:
- if (v9fs_receive_response(proxy, type, &retval, response) < 0) {
- goto close_error;
- }
- break;
- case T_LGETXATTR:
- case T_LLISTXATTR:
- if (!size) {
- if (v9fs_receive_status(proxy, reply, &retval) < 0) {
- goto close_error;
- }
- } else {
- if (v9fs_receive_response(proxy, type, &retval, response) < 0) {
- goto close_error;
- }
- }
- break;
- }
-
-err_out:
- qemu_mutex_unlock(&proxy->mutex);
- return retval;
-
-close_error:
- close(proxy->sockfd);
- proxy->sockfd = -1;
- qemu_mutex_unlock(&proxy->mutex);
- return -EIO;
-}
-
-static int proxy_lstat(FsContext *fs_ctx, V9fsPath *fs_path, struct stat *stbuf)
-{
- int retval;
- retval = v9fs_request(fs_ctx->private, T_LSTAT, stbuf, fs_path);
- if (retval < 0) {
- errno = -retval;
- return -1;
- }
- return retval;
-}
-
-static ssize_t proxy_readlink(FsContext *fs_ctx, V9fsPath *fs_path,
- char *buf, size_t bufsz)
-{
- int retval;
- retval = v9fs_request(fs_ctx->private, T_READLINK, buf, fs_path, bufsz);
- if (retval < 0) {
- errno = -retval;
- return -1;
- }
- return strlen(buf);
-}
-
-static int proxy_close(FsContext *ctx, V9fsFidOpenState *fs)
-{
- return close(fs->fd);
-}
-
-static int proxy_closedir(FsContext *ctx, V9fsFidOpenState *fs)
-{
- return closedir(fs->dir.stream);
-}
-
-static int proxy_open(FsContext *ctx, V9fsPath *fs_path,
- int flags, V9fsFidOpenState *fs)
-{
- fs->fd = v9fs_request(ctx->private, T_OPEN, NULL, fs_path, flags);
- if (fs->fd < 0) {
- errno = -fs->fd;
- fs->fd = -1;
- }
- return fs->fd;
-}
-
-static int proxy_opendir(FsContext *ctx,
- V9fsPath *fs_path, V9fsFidOpenState *fs)
-{
- int serrno, fd;
-
- fs->dir.stream = NULL;
- fd = v9fs_request(ctx->private, T_OPEN, NULL, fs_path, O_DIRECTORY);
- if (fd < 0) {
- errno = -fd;
- return -1;
- }
- fs->dir.stream = fdopendir(fd);
- if (!fs->dir.stream) {
- serrno = errno;
- close(fd);
- errno = serrno;
- return -1;
- }
- return 0;
-}
-
-static void proxy_rewinddir(FsContext *ctx, V9fsFidOpenState *fs)
-{
- rewinddir(fs->dir.stream);
-}
-
-static off_t proxy_telldir(FsContext *ctx, V9fsFidOpenState *fs)
-{
- return telldir(fs->dir.stream);
-}
-
-static struct dirent *proxy_readdir(FsContext *ctx, V9fsFidOpenState *fs)
-{
- struct dirent *entry;
- entry = readdir(fs->dir.stream);
-#ifdef CONFIG_DARWIN
- if (!entry) {
- return NULL;
- }
- int td;
- td = telldir(fs->dir.stream);
- /* If telldir fails, fail the entire readdir call */
- if (td < 0) {
- return NULL;
- }
- entry->d_seekoff = td;
-#endif
- return entry;
-}
-
-static void proxy_seekdir(FsContext *ctx, V9fsFidOpenState *fs, off_t off)
-{
- seekdir(fs->dir.stream, off);
-}
-
-static ssize_t proxy_preadv(FsContext *ctx, V9fsFidOpenState *fs,
- const struct iovec *iov,
- int iovcnt, off_t offset)
-{
- ssize_t ret;
-#ifdef CONFIG_PREADV
- ret = preadv(fs->fd, iov, iovcnt, offset);
-#else
- ret = lseek(fs->fd, offset, SEEK_SET);
- if (ret >= 0) {
- ret = readv(fs->fd, iov, iovcnt);
- }
-#endif
- return ret;
-}
-
-static ssize_t proxy_pwritev(FsContext *ctx, V9fsFidOpenState *fs,
- const struct iovec *iov,
- int iovcnt, off_t offset)
-{
- ssize_t ret;
-
-#ifdef CONFIG_PREADV
- ret = pwritev(fs->fd, iov, iovcnt, offset);
-#else
- ret = lseek(fs->fd, offset, SEEK_SET);
- if (ret >= 0) {
- ret = writev(fs->fd, iov, iovcnt);
- }
-#endif
-#ifdef CONFIG_SYNC_FILE_RANGE
- if (ret > 0 && ctx->export_flags & V9FS_IMMEDIATE_WRITEOUT) {
- /*
- * Initiate a writeback. This is not a data integrity sync.
- * We want to ensure that we don't leave dirty pages in the cache
- * after write when writeout=immediate is specified.
- */
- sync_file_range(fs->fd, offset, ret,
- SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE);
- }
-#endif
- return ret;
-}
-
-static int proxy_chmod(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp)
-{
- int retval;
- retval = v9fs_request(fs_ctx->private, T_CHMOD, NULL, fs_path,
- credp->fc_mode);
- if (retval < 0) {
- errno = -retval;
- }
- return retval;
-}
-
-static int proxy_mknod(FsContext *fs_ctx, V9fsPath *dir_path,
- const char *name, FsCred *credp)
-{
- int retval;
- V9fsString fullname;
-
- v9fs_string_init(&fullname);
- v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name);
-
- retval = v9fs_request(fs_ctx->private, T_MKNOD, NULL, &fullname,
- credp->fc_mode, credp->fc_rdev,
- credp->fc_uid, credp->fc_gid);
- v9fs_string_free(&fullname);
- if (retval < 0) {
- errno = -retval;
- retval = -1;
- }
- return retval;
-}
-
-static int proxy_mkdir(FsContext *fs_ctx, V9fsPath *dir_path,
- const char *name, FsCred *credp)
-{
- int retval;
- V9fsString fullname;
-
- v9fs_string_init(&fullname);
- v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name);
-
- retval = v9fs_request(fs_ctx->private, T_MKDIR, NULL, &fullname,
- credp->fc_mode, credp->fc_uid, credp->fc_gid);
- v9fs_string_free(&fullname);
- if (retval < 0) {
- errno = -retval;
- retval = -1;
- }
- return retval;
-}
-
-static int proxy_fstat(FsContext *fs_ctx, int fid_type,
- V9fsFidOpenState *fs, struct stat *stbuf)
-{
- int fd;
-
- if (fid_type == P9_FID_DIR) {
- fd = dirfd(fs->dir.stream);
- } else {
- fd = fs->fd;
- }
- return fstat(fd, stbuf);
-}
-
-static int proxy_open2(FsContext *fs_ctx, V9fsPath *dir_path, const char *name,
- int flags, FsCred *credp, V9fsFidOpenState *fs)
-{
- V9fsString fullname;
-
- v9fs_string_init(&fullname);
- v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name);
-
- fs->fd = v9fs_request(fs_ctx->private, T_CREATE, NULL, &fullname, flags,
- credp->fc_mode, credp->fc_uid, credp->fc_gid);
- v9fs_string_free(&fullname);
- if (fs->fd < 0) {
- errno = -fs->fd;
- fs->fd = -1;
- }
- return fs->fd;
-}
-
-static int proxy_symlink(FsContext *fs_ctx, const char *oldpath,
- V9fsPath *dir_path, const char *name, FsCred *credp)
-{
- int retval;
- V9fsString fullname, target;
-
- v9fs_string_init(&fullname);
- v9fs_string_init(&target);
-
- v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name);
- v9fs_string_sprintf(&target, "%s", oldpath);
-
- retval = v9fs_request(fs_ctx->private, T_SYMLINK, NULL, &target, &fullname,
- credp->fc_uid, credp->fc_gid);
- v9fs_string_free(&fullname);
- v9fs_string_free(&target);
- if (retval < 0) {
- errno = -retval;
- retval = -1;
- }
- return retval;
-}
-
-static int proxy_link(FsContext *ctx, V9fsPath *oldpath,
- V9fsPath *dirpath, const char *name)
-{
- int retval;
- V9fsString newpath;
-
- v9fs_string_init(&newpath);
- v9fs_string_sprintf(&newpath, "%s/%s", dirpath->data, name);
-
- retval = v9fs_request(ctx->private, T_LINK, NULL, oldpath, &newpath);
- v9fs_string_free(&newpath);
- if (retval < 0) {
- errno = -retval;
- retval = -1;
- }
- return retval;
-}
-
-static int proxy_truncate(FsContext *ctx, V9fsPath *fs_path, off_t size)
-{
- int retval;
-
- retval = v9fs_request(ctx->private, T_TRUNCATE, NULL, fs_path, size);
- if (retval < 0) {
- errno = -retval;
- return -1;
- }
- return 0;
-}
-
-static int proxy_rename(FsContext *ctx, const char *oldpath,
- const char *newpath)
-{
- int retval;
- V9fsString oldname, newname;
-
- v9fs_string_init(&oldname);
- v9fs_string_init(&newname);
-
- v9fs_string_sprintf(&oldname, "%s", oldpath);
- v9fs_string_sprintf(&newname, "%s", newpath);
- retval = v9fs_request(ctx->private, T_RENAME, NULL, &oldname, &newname);
- v9fs_string_free(&oldname);
- v9fs_string_free(&newname);
- if (retval < 0) {
- errno = -retval;
- }
- return retval;
-}
-
-static int proxy_chown(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp)
-{
- int retval;
- retval = v9fs_request(fs_ctx->private, T_CHOWN, NULL, fs_path,
- credp->fc_uid, credp->fc_gid);
- if (retval < 0) {
- errno = -retval;
- }
- return retval;
-}
-
-static int proxy_utimensat(FsContext *s, V9fsPath *fs_path,
- const struct timespec *buf)
-{
- int retval;
- retval = v9fs_request(s->private, T_UTIME, NULL, fs_path,
- buf[0].tv_sec, buf[0].tv_nsec,
- buf[1].tv_sec, buf[1].tv_nsec);
- if (retval < 0) {
- errno = -retval;
- }
- return retval;
-}
-
-static int proxy_remove(FsContext *ctx, const char *path)
-{
- int retval;
- V9fsString name;
- v9fs_string_init(&name);
- v9fs_string_sprintf(&name, "%s", path);
- retval = v9fs_request(ctx->private, T_REMOVE, NULL, &name);
- v9fs_string_free(&name);
- if (retval < 0) {
- errno = -retval;
- }
- return retval;
-}
-
-static int proxy_fsync(FsContext *ctx, int fid_type,
- V9fsFidOpenState *fs, int datasync)
-{
- int fd;
-
- if (fid_type == P9_FID_DIR) {
- fd = dirfd(fs->dir.stream);
- } else {
- fd = fs->fd;
- }
-
- if (datasync) {
- return qemu_fdatasync(fd);
- } else {
- return fsync(fd);
- }
-}
-
-static int proxy_statfs(FsContext *s, V9fsPath *fs_path, struct statfs *stbuf)
-{
- int retval;
- retval = v9fs_request(s->private, T_STATFS, stbuf, fs_path);
- if (retval < 0) {
- errno = -retval;
- return -1;
- }
- return retval;
-}
-
-static ssize_t proxy_lgetxattr(FsContext *ctx, V9fsPath *fs_path,
- const char *name, void *value, size_t size)
-{
- int retval;
- V9fsString xname;
-
- v9fs_string_init(&xname);
- v9fs_string_sprintf(&xname, "%s", name);
- retval = v9fs_request(ctx->private, T_LGETXATTR, value, size, fs_path,
- &xname);
- v9fs_string_free(&xname);
- if (retval < 0) {
- errno = -retval;
- }
- return retval;
-}
-
-static ssize_t proxy_llistxattr(FsContext *ctx, V9fsPath *fs_path,
- void *value, size_t size)
-{
- int retval;
- retval = v9fs_request(ctx->private, T_LLISTXATTR, value, size, fs_path);
- if (retval < 0) {
- errno = -retval;
- }
- return retval;
-}
-
-static int proxy_lsetxattr(FsContext *ctx, V9fsPath *fs_path, const char *name,
- void *value, size_t size, int flags)
-{
- int retval;
- V9fsString xname, xvalue;
-
- v9fs_string_init(&xname);
- v9fs_string_sprintf(&xname, "%s", name);
-
- v9fs_string_init(&xvalue);
- xvalue.size = size;
- xvalue.data = g_malloc(size);
- memcpy(xvalue.data, value, size);
-
- retval = v9fs_request(ctx->private, T_LSETXATTR, value, fs_path, &xname,
- &xvalue, size, flags);
- v9fs_string_free(&xname);
- v9fs_string_free(&xvalue);
- if (retval < 0) {
- errno = -retval;
- }
- return retval;
-}
-
-static int proxy_lremovexattr(FsContext *ctx, V9fsPath *fs_path,
- const char *name)
-{
- int retval;
- V9fsString xname;
-
- v9fs_string_init(&xname);
- v9fs_string_sprintf(&xname, "%s", name);
- retval = v9fs_request(ctx->private, T_LREMOVEXATTR, NULL, fs_path, &xname);
- v9fs_string_free(&xname);
- if (retval < 0) {
- errno = -retval;
- }
- return retval;
-}
-
-static int proxy_name_to_path(FsContext *ctx, V9fsPath *dir_path,
- const char *name, V9fsPath *target)
-{
- if (dir_path) {
- v9fs_path_sprintf(target, "%s/%s", dir_path->data, name);
- } else {
- v9fs_path_sprintf(target, "%s", name);
- }
- return 0;
-}
-
-static int proxy_renameat(FsContext *ctx, V9fsPath *olddir,
- const char *old_name, V9fsPath *newdir,
- const char *new_name)
-{
- int ret;
- V9fsString old_full_name, new_full_name;
-
- v9fs_string_init(&old_full_name);
- v9fs_string_init(&new_full_name);
-
- v9fs_string_sprintf(&old_full_name, "%s/%s", olddir->data, old_name);
- v9fs_string_sprintf(&new_full_name, "%s/%s", newdir->data, new_name);
-
- ret = proxy_rename(ctx, old_full_name.data, new_full_name.data);
- v9fs_string_free(&old_full_name);
- v9fs_string_free(&new_full_name);
- return ret;
-}
-
-static int proxy_unlinkat(FsContext *ctx, V9fsPath *dir,
- const char *name, int flags)
-{
- int ret;
- V9fsString fullname;
- v9fs_string_init(&fullname);
-
- v9fs_string_sprintf(&fullname, "%s/%s", dir->data, name);
- ret = proxy_remove(ctx, fullname.data);
- v9fs_string_free(&fullname);
-
- return ret;
-}
-
-static int proxy_ioc_getversion(FsContext *fs_ctx, V9fsPath *path,
- mode_t st_mode, uint64_t *st_gen)
-{
- int err;
-
- /* Do not try to open special files like device nodes, fifos etc
- * we can get fd for regular files and directories only
- */
- if (!S_ISREG(st_mode) && !S_ISDIR(st_mode)) {
- errno = ENOTTY;
- return -1;
- }
- err = v9fs_request(fs_ctx->private, T_GETVERSION, st_gen, path);
- if (err < 0) {
- errno = -err;
- err = -1;
- }
- return err;
-}
-
-static int connect_namedsocket(const char *path, Error **errp)
-{
- int sockfd;
- struct sockaddr_un helper;
-
- if (strlen(path) >= sizeof(helper.sun_path)) {
- error_setg(errp, "socket name too long");
- return -1;
- }
- sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
- if (sockfd < 0) {
- error_setg_errno(errp, errno, "failed to create client socket");
- return -1;
- }
- strcpy(helper.sun_path, path);
- helper.sun_family = AF_UNIX;
- if (connect(sockfd, (struct sockaddr *)&helper, sizeof(helper)) < 0) {
- error_setg_errno(errp, errno, "failed to connect to '%s'", path);
- close(sockfd);
- return -1;
- }
-
- /* remove the socket for security reasons */
- unlink(path);
- return sockfd;
-}
-
-static void error_append_socket_sockfd_hint(Error *const *errp)
-{
- error_append_hint(errp, "Either specify socket=/some/path where /some/path"
- " points to a listening AF_UNIX socket or sock_fd=fd"
- " where fd is a file descriptor to a connected AF_UNIX"
- " socket\n");
-}
-
-static int proxy_parse_opts(QemuOpts *opts, FsDriverEntry *fs, Error **errp)
-{
- const char *socket = qemu_opt_get(opts, "socket");
- const char *sock_fd = qemu_opt_get(opts, "sock_fd");
-
- if (!socket && !sock_fd) {
- error_setg(errp, "both socket and sock_fd properties are missing");
- error_append_socket_sockfd_hint(errp);
- return -1;
- }
- if (socket && sock_fd) {
- error_setg(errp, "both socket and sock_fd properties are set");
- error_append_socket_sockfd_hint(errp);
- return -1;
- }
- if (socket) {
- fs->path = g_strdup(socket);
- fs->export_flags |= V9FS_PROXY_SOCK_NAME;
- } else {
- fs->path = g_strdup(sock_fd);
- fs->export_flags |= V9FS_PROXY_SOCK_FD;
- }
- return 0;
-}
-
-static int proxy_init(FsContext *ctx, Error **errp)
-{
- V9fsProxy *proxy = g_new(V9fsProxy, 1);
- int sock_id;
-
- if (ctx->export_flags & V9FS_PROXY_SOCK_NAME) {
- sock_id = connect_namedsocket(ctx->fs_root, errp);
- } else {
- sock_id = atoi(ctx->fs_root);
- if (sock_id < 0) {
- error_setg(errp, "socket descriptor not initialized");
- }
- }
- if (sock_id < 0) {
- g_free(proxy);
- return -1;
- }
- g_free(ctx->fs_root);
- ctx->fs_root = NULL;
-
- proxy->in_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ);
- proxy->in_iovec.iov_len = PROXY_MAX_IO_SZ + PROXY_HDR_SZ;
- proxy->out_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ);
- proxy->out_iovec.iov_len = PROXY_MAX_IO_SZ + PROXY_HDR_SZ;
-
- ctx->private = proxy;
- proxy->sockfd = sock_id;
- qemu_mutex_init(&proxy->mutex);
-
- ctx->export_flags |= V9FS_PATHNAME_FSCONTEXT;
- ctx->exops.get_st_gen = proxy_ioc_getversion;
- return 0;
-}
-
-static void proxy_cleanup(FsContext *ctx)
-{
- V9fsProxy *proxy = ctx->private;
-
- if (!proxy) {
- return;
- }
-
- g_free(proxy->out_iovec.iov_base);
- g_free(proxy->in_iovec.iov_base);
- if (ctx->export_flags & V9FS_PROXY_SOCK_NAME) {
- close(proxy->sockfd);
- }
- g_free(proxy);
-}
-
-FileOperations proxy_ops = {
- .parse_opts = proxy_parse_opts,
- .init = proxy_init,
- .cleanup = proxy_cleanup,
- .lstat = proxy_lstat,
- .readlink = proxy_readlink,
- .close = proxy_close,
- .closedir = proxy_closedir,
- .open = proxy_open,
- .opendir = proxy_opendir,
- .rewinddir = proxy_rewinddir,
- .telldir = proxy_telldir,
- .readdir = proxy_readdir,
- .seekdir = proxy_seekdir,
- .preadv = proxy_preadv,
- .pwritev = proxy_pwritev,
- .chmod = proxy_chmod,
- .mknod = proxy_mknod,
- .mkdir = proxy_mkdir,
- .fstat = proxy_fstat,
- .open2 = proxy_open2,
- .symlink = proxy_symlink,
- .link = proxy_link,
- .truncate = proxy_truncate,
- .rename = proxy_rename,
- .chown = proxy_chown,
- .utimensat = proxy_utimensat,
- .remove = proxy_remove,
- .fsync = proxy_fsync,
- .statfs = proxy_statfs,
- .lgetxattr = proxy_lgetxattr,
- .llistxattr = proxy_llistxattr,
- .lsetxattr = proxy_lsetxattr,
- .lremovexattr = proxy_lremovexattr,
- .name_to_path = proxy_name_to_path,
- .renameat = proxy_renameat,
- .unlinkat = proxy_unlinkat,
-};
diff --git a/hw/9pfs/9p-proxy.h b/hw/9pfs/9p-proxy.h
deleted file mode 100644
index 9be4718..0000000
--- a/hw/9pfs/9p-proxy.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * 9p Proxy callback
- *
- * Copyright IBM, Corp. 2011
- *
- * Authors:
- * M. Mohan Kumar <mohan@in.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- */
-
-/*
- * NOTE: The 9p 'proxy' backend is deprecated (since QEMU 8.1) and will be
- * removed in a future version of QEMU!
- */
-
-#ifndef QEMU_9P_PROXY_H
-#define QEMU_9P_PROXY_H
-
-#define PROXY_MAX_IO_SZ (64 * 1024)
-#define V9FS_FD_VALID INT_MAX
-
-/*
- * proxy iovec only support one element and
- * marsha/unmarshal doesn't do little endian conversion.
- */
-#define proxy_unmarshal(in_sg, offset, fmt, args...) \
- v9fs_iov_unmarshal(in_sg, 1, offset, 0, fmt, ##args)
-#define proxy_marshal(out_sg, offset, fmt, args...) \
- v9fs_iov_marshal(out_sg, 1, offset, 0, fmt, ##args)
-
-union MsgControl {
- struct cmsghdr cmsg;
- char control[CMSG_SPACE(sizeof(int))];
-};
-
-typedef struct {
- uint32_t type;
- uint32_t size;
-} ProxyHeader;
-
-#define PROXY_HDR_SZ (sizeof(ProxyHeader))
-
-enum {
- T_SUCCESS = 0,
- T_ERROR,
- T_OPEN,
- T_CREATE,
- T_MKNOD,
- T_MKDIR,
- T_SYMLINK,
- T_LINK,
- T_LSTAT,
- T_READLINK,
- T_STATFS,
- T_CHMOD,
- T_CHOWN,
- T_TRUNCATE,
- T_UTIME,
- T_RENAME,
- T_REMOVE,
- T_LGETXATTR,
- T_LLISTXATTR,
- T_LSETXATTR,
- T_LREMOVEXATTR,
- T_GETVERSION,
-};
-
-typedef struct {
- uint64_t st_dev;
- uint64_t st_ino;
- uint64_t st_nlink;
- uint32_t st_mode;
- uint32_t st_uid;
- uint32_t st_gid;
- uint64_t st_rdev;
- uint64_t st_size;
- uint64_t st_blksize;
- uint64_t st_blocks;
- uint64_t st_atim_sec;
- uint64_t st_atim_nsec;
- uint64_t st_mtim_sec;
- uint64_t st_mtim_nsec;
- uint64_t st_ctim_sec;
- uint64_t st_ctim_nsec;
-} ProxyStat;
-
-typedef struct {
- uint64_t f_type;
- uint64_t f_bsize;
- uint64_t f_blocks;
- uint64_t f_bfree;
- uint64_t f_bavail;
- uint64_t f_files;
- uint64_t f_ffree;
- uint64_t f_fsid[2];
- uint64_t f_namelen;
- uint64_t f_frsize;
-} ProxyStatFS;
-#endif
diff --git a/hw/9pfs/9p-synth.c b/hw/9pfs/9p-synth.c
index 0ac79a5..9cd1884 100644
--- a/hw/9pfs/9p-synth.c
+++ b/hw/9pfs/9p-synth.c
@@ -24,7 +24,7 @@
#include "qemu/rcu.h"
#include "qemu/rcu_queue.h"
#include "qemu/cutils.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
/* Root node for synth file system */
static V9fsSynthNode synth_root = {
@@ -356,6 +356,13 @@ static int synth_truncate(FsContext *ctx, V9fsPath *path, off_t offset)
return -1;
}
+static int synth_ftruncate(FsContext *ctx, int fid_type, V9fsFidOpenState *fs,
+ off_t size)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
static int synth_chmod(FsContext *fs_ctx, V9fsPath *path, FsCred *credp)
{
errno = EPERM;
@@ -417,6 +424,13 @@ static int synth_utimensat(FsContext *fs_ctx, V9fsPath *path,
return 0;
}
+static int synth_futimens(FsContext *fs_ctx, int fid_type, V9fsFidOpenState *fs,
+ const struct timespec *buf)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
static int synth_remove(FsContext *ctx, const char *path)
{
errno = EPERM;
@@ -615,6 +629,11 @@ static int synth_init(FsContext *ctx, Error **errp)
return 0;
}
+static bool synth_has_valid_file_handle(int fid_type, V9fsFidOpenState *fs)
+{
+ return false;
+}
+
FileOperations synth_ops = {
.init = synth_init,
.lstat = synth_lstat,
@@ -650,4 +669,7 @@ FileOperations synth_ops = {
.name_to_path = synth_name_to_path,
.renameat = synth_renameat,
.unlinkat = synth_unlinkat,
+ .has_valid_file_handle = synth_has_valid_file_handle,
+ .ftruncate = synth_ftruncate,
+ .futimens = synth_futimens,
};
diff --git a/hw/9pfs/9p-util-generic.c b/hw/9pfs/9p-util-generic.c
new file mode 100644
index 0000000..4c1e9c8
--- /dev/null
+++ b/hw/9pfs/9p-util-generic.c
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+#include "9p-util.h"
+#include <glib/gstrfuncs.h>
+
+char *qemu_open_flags_tostr(int flags)
+{
+ int acc = flags & O_ACCMODE;
+ return g_strconcat(
+ (acc == O_WRONLY) ? "WRONLY" : (acc == O_RDONLY) ? "RDONLY" : "RDWR",
+ (flags & O_CREAT) ? "|CREAT" : "",
+ (flags & O_EXCL) ? "|EXCL" : "",
+ (flags & O_NOCTTY) ? "|NOCTTY" : "",
+ (flags & O_TRUNC) ? "|TRUNC" : "",
+ (flags & O_APPEND) ? "|APPEND" : "",
+ (flags & O_NONBLOCK) ? "|NONBLOCK" : "",
+ (flags & O_DSYNC) ? "|DSYNC" : "",
+ #ifdef O_DIRECT
+ (flags & O_DIRECT) ? "|DIRECT" : "",
+ #endif
+ (flags & O_LARGEFILE) ? "|LARGEFILE" : "",
+ (flags & O_DIRECTORY) ? "|DIRECTORY" : "",
+ (flags & O_NOFOLLOW) ? "|NOFOLLOW" : "",
+ #ifdef O_NOATIME
+ (flags & O_NOATIME) ? "|NOATIME" : "",
+ #endif
+ #ifdef O_CLOEXEC
+ (flags & O_CLOEXEC) ? "|CLOEXEC" : "",
+ #endif
+ #ifdef __O_SYNC
+ (flags & __O_SYNC) ? "|SYNC" : "",
+ #else
+ ((flags & O_SYNC) == O_SYNC) ? "|SYNC" : "",
+ #endif
+ #ifdef O_PATH
+ (flags & O_PATH) ? "|PATH" : "",
+ #endif
+ #ifdef __O_TMPFILE
+ (flags & __O_TMPFILE) ? "|TMPFILE" : "",
+ #elif defined(O_TMPFILE)
+ ((flags & O_TMPFILE) == O_TMPFILE) ? "|TMPFILE" : "",
+ #endif
+ /* O_NDELAY is usually just an alias of O_NONBLOCK */
+ #if defined(O_NDELAY) && O_NDELAY != O_NONBLOCK
+ (flags & O_NDELAY) ? "|NDELAY" : "",
+ #endif
+ NULL /* always last (required NULL termination) */
+ );
+}
diff --git a/hw/9pfs/9p-util.h b/hw/9pfs/9p-util.h
index 51c94b0..a1924fe 100644
--- a/hw/9pfs/9p-util.h
+++ b/hw/9pfs/9p-util.h
@@ -103,6 +103,7 @@ static inline int errno_to_dotl(int err) {
#define qemu_renameat renameat
#define qemu_utimensat utimensat
#define qemu_unlinkat unlinkat
+#define qemu_futimens futimens
static inline void close_preserve_errno(int fd)
{
@@ -177,20 +178,27 @@ again:
return -1;
}
- if (close_if_special_file(fd) < 0) {
- return -1;
- }
-
- serrno = errno;
- /* O_NONBLOCK was only needed to open the file. Let's drop it. We don't
- * do that with O_PATH since fcntl(F_SETFL) isn't supported, and openat()
- * ignored it anyway.
- */
+ /* Only if O_PATH is not set ... */
if (!(flags & O_PATH_9P_UTIL)) {
+ /*
+ * Prevent I/O on special files (device files, etc.) on host side,
+ * however it is safe and required to allow opening them with O_PATH,
+ * as this is limited to (required) path based operations only.
+ */
+ if (close_if_special_file(fd) < 0) {
+ return -1;
+ }
+
+ serrno = errno;
+ /*
+ * O_NONBLOCK was only needed to open the file. Let's drop it. We don't
+ * do that with O_PATH since fcntl(F_SETFL) isn't supported, and
+ * openat() ignored it anyway.
+ */
ret = fcntl(fd, F_SETFL, flags);
assert(!ret);
+ errno = serrno;
}
- errno = serrno;
return fd;
}
@@ -260,4 +268,10 @@ int pthread_fchdir_np(int fd) __attribute__((weak_import));
#endif
int qemu_mknodat(int dirfd, const char *filename, mode_t mode, dev_t dev);
+/*
+ * Returns a newly allocated string presentation of open() flags, intended
+ * for debugging (tracing) purposes only.
+ */
+char *qemu_open_flags_tostr(int flags);
+
#endif
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index af636cf..8b001b9 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -434,16 +434,24 @@ void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu)
V9fsFidState *f;
GHashTableIter iter;
gpointer fid;
+ int err;
+ int nclosed = 0;
+
+ /* prevent multiple coroutines running this function simultaniously */
+ if (s->reclaiming) {
+ return;
+ }
+ s->reclaiming = true;
g_hash_table_iter_init(&iter, s->fids);
QSLIST_HEAD(, V9fsFidState) reclaim_list =
QSLIST_HEAD_INITIALIZER(reclaim_list);
+ /* Pick FIDs to be closed, collect them on reclaim_list. */
while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &f)) {
/*
- * Unlink fids cannot be reclaimed. Check
- * for them and skip them. Also skip fids
+ * Unlinked fids cannot be reclaimed, skip those, and also skip fids
* currently being operated on.
*/
if (f->ref || f->flags & FID_NON_RECLAIMABLE) {
@@ -493,23 +501,42 @@ void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu)
}
}
/*
- * Now close the fid in reclaim list. Free them if they
- * are already clunked.
+ * Close the picked FIDs altogether on a background I/O driver thread. Do
+ * this all at once to keep latency (i.e. amount of thread hops between main
+ * thread <-> fs driver background thread) as low as possible.
*/
+ v9fs_co_run_in_worker({
+ QSLIST_FOREACH(f, &reclaim_list, reclaim_next) {
+ err = (f->fid_type == P9_FID_DIR) ?
+ s->ops->closedir(&s->ctx, &f->fs_reclaim) :
+ s->ops->close(&s->ctx, &f->fs_reclaim);
+
+ /* 'man 2 close' suggests to ignore close() errors except of EBADF */
+ if (unlikely(err && errno == EBADF)) {
+ /*
+ * unexpected case as FIDs were picked above by having a valid
+ * file descriptor
+ */
+ error_report("9pfs: v9fs_reclaim_fd() WARNING: close() failed with EBADF");
+ } else {
+ /* total_open_fd must only be mutated on main thread */
+ nclosed++;
+ }
+ }
+ });
+ total_open_fd -= nclosed;
+ /* Free the closed FIDs. */
while (!QSLIST_EMPTY(&reclaim_list)) {
f = QSLIST_FIRST(&reclaim_list);
QSLIST_REMOVE(&reclaim_list, f, V9fsFidState, reclaim_next);
- if (f->fid_type == P9_FID_FILE) {
- v9fs_co_close(pdu, &f->fs_reclaim);
- } else if (f->fid_type == P9_FID_DIR) {
- v9fs_co_closedir(pdu, &f->fs_reclaim);
- }
/*
* Now drop the fid reference, free it
* if clunked.
*/
put_fid(pdu, f);
}
+
+ s->reclaiming = false;
}
/*
@@ -1574,6 +1601,11 @@ out_nofid:
pdu_complete(pdu, err);
}
+static bool fid_has_valid_file_handle(V9fsState *s, V9fsFidState *fidp)
+{
+ return s->ops->has_valid_file_handle(fidp->fid_type, &fidp->fs);
+}
+
static void coroutine_fn v9fs_getattr(void *opaque)
{
int32_t fid;
@@ -1596,11 +1628,11 @@ static void coroutine_fn v9fs_getattr(void *opaque)
retval = -ENOENT;
goto out_nofid;
}
- /*
- * Currently we only support BASIC fields in stat, so there is no
- * need to look at request_mask.
- */
- retval = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
+ if (fid_has_valid_file_handle(pdu->s, fidp)) {
+ retval = v9fs_co_fstat(pdu, fidp, &stbuf);
+ } else {
+ retval = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
+ }
if (retval < 0) {
goto out;
}
@@ -1703,7 +1735,11 @@ static void coroutine_fn v9fs_setattr(void *opaque)
} else {
times[1].tv_nsec = UTIME_OMIT;
}
- err = v9fs_co_utimensat(pdu, &fidp->path, times);
+ if (fid_has_valid_file_handle(pdu->s, fidp)) {
+ err = v9fs_co_futimens(pdu, fidp, times);
+ } else {
+ err = v9fs_co_utimensat(pdu, &fidp->path, times);
+ }
if (err < 0) {
goto out;
}
@@ -1728,7 +1764,11 @@ static void coroutine_fn v9fs_setattr(void *opaque)
}
}
if (v9iattr.valid & (P9_ATTR_SIZE)) {
- err = v9fs_co_truncate(pdu, &fidp->path, v9iattr.size);
+ if (fid_has_valid_file_handle(pdu->s, fidp)) {
+ err = v9fs_co_ftruncate(pdu, fidp, v9iattr.size);
+ } else {
+ err = v9fs_co_truncate(pdu, &fidp->path, v9iattr.size);
+ }
if (err < 0) {
goto out;
}
@@ -1772,6 +1812,21 @@ static bool same_stat_id(const struct stat *a, const struct stat *b)
return a->st_dev == b->st_dev && a->st_ino == b->st_ino;
}
+/*
+ * Returns a (newly allocated) comma-separated string presentation of the
+ * passed array for logging (tracing) purpose for trace event "v9fs_walk".
+ *
+ * It is caller's responsibility to free the returned string.
+ */
+static char *trace_v9fs_walk_wnames(V9fsString *wnames, size_t nwnames)
+{
+ g_autofree char **arr = g_malloc0_n(nwnames + 1, sizeof(char *));
+ for (size_t i = 0; i < nwnames; ++i) {
+ arr[i] = wnames[i].data;
+ }
+ return g_strjoinv(", ", arr);
+}
+
static void coroutine_fn v9fs_walk(void *opaque)
{
int name_idx, nwalked;
@@ -1785,6 +1840,7 @@ static void coroutine_fn v9fs_walk(void *opaque)
size_t offset = 7;
int32_t fid, newfid;
P9ARRAY_REF(V9fsString) wnames = NULL;
+ g_autofree char *trace_wnames = NULL;
V9fsFidState *fidp;
V9fsFidState *newfidp = NULL;
V9fsPDU *pdu = opaque;
@@ -1798,11 +1854,9 @@ static void coroutine_fn v9fs_walk(void *opaque)
}
offset += err;
- trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames);
-
if (nwnames > P9_MAXWELEM) {
err = -EINVAL;
- goto out_nofid;
+ goto out_nofid_nownames;
}
if (nwnames) {
P9ARRAY_NEW(V9fsString, wnames, nwnames);
@@ -1812,15 +1866,23 @@ static void coroutine_fn v9fs_walk(void *opaque)
for (i = 0; i < nwnames; i++) {
err = pdu_unmarshal(pdu, offset, "s", &wnames[i]);
if (err < 0) {
- goto out_nofid;
+ goto out_nofid_nownames;
}
if (name_is_illegal(wnames[i].data)) {
err = -ENOENT;
- goto out_nofid;
+ goto out_nofid_nownames;
}
offset += err;
}
+ if (trace_event_get_state_backends(TRACE_V9FS_WALK)) {
+ trace_wnames = trace_v9fs_walk_wnames(wnames, nwnames);
+ trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames,
+ trace_wnames);
+ }
+ } else {
+ trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames, "");
}
+
fidp = get_fid(pdu, fid);
if (fidp == NULL) {
err = -ENOENT;
@@ -1955,7 +2017,11 @@ out:
}
v9fs_path_free(&dpath);
v9fs_path_free(&path);
+ goto out_pdu_complete;
+out_nofid_nownames:
+ trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames, "<?>");
out_nofid:
+out_pdu_complete:
pdu_complete(pdu, err);
}
@@ -1980,6 +2046,7 @@ static void coroutine_fn v9fs_open(void *opaque)
V9fsFidState *fidp;
V9fsPDU *pdu = opaque;
V9fsState *s = pdu->s;
+ g_autofree char *trace_oflags = NULL;
if (s->proto_version == V9FS_PROTO_2000L) {
err = pdu_unmarshal(pdu, offset, "dd", &fid, &mode);
@@ -1991,7 +2058,13 @@ static void coroutine_fn v9fs_open(void *opaque)
if (err < 0) {
goto out_nofid;
}
- trace_v9fs_open(pdu->tag, pdu->id, fid, mode);
+ if (trace_event_get_state_backends(TRACE_V9FS_OPEN)) {
+ trace_oflags = qemu_open_flags_tostr(
+ (s->proto_version == V9FS_PROTO_2000L) ?
+ dotl_to_open_flags(mode) : omode_to_uflags(mode)
+ );
+ trace_v9fs_open(pdu->tag, pdu->id, fid, mode, trace_oflags);
+ }
fidp = get_fid(pdu, fid);
if (fidp == NULL) {
@@ -2587,6 +2660,11 @@ static void coroutine_fn v9fs_readdir(void *opaque)
retval = -EINVAL;
goto out_nofid;
}
+ if (fidp->fid_type != P9_FID_DIR) {
+ warn_report_once("9p: bad client: T_readdir on non-directory stream");
+ retval = -ENOTDIR;
+ goto out;
+ }
if (!fidp->fs.dir.stream) {
retval = -EINVAL;
goto out;
@@ -4284,6 +4362,8 @@ int v9fs_device_realize_common(V9fsState *s, const V9fsTransport *t,
s->ctx.fst = &fse->fst;
fsdev_throttle_init(s->ctx.fst);
+ s->reclaiming = false;
+
rc = 0;
out:
if (rc) {
diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
index a6f59ab..259ad32 100644
--- a/hw/9pfs/9p.h
+++ b/hw/9pfs/9p.h
@@ -280,7 +280,6 @@ struct V9fsFidState {
uid_t uid;
int ref;
bool clunked;
- QSIMPLEQ_ENTRY(V9fsFidState) next;
QSLIST_ENTRY(V9fsFidState) reclaim_next;
};
@@ -363,6 +362,7 @@ struct V9fsState {
uint64_t qp_ndevices; /* Amount of entries in qpd_table. */
uint16_t qp_affix_next;
uint64_t qp_fullpath_next;
+ bool reclaiming;
};
/* 9p2000.L open flags */
diff --git a/hw/9pfs/codir.c b/hw/9pfs/codir.c
index 2068a47..bce7dd9 100644
--- a/hw/9pfs/codir.c
+++ b/hw/9pfs/codir.c
@@ -20,6 +20,7 @@
#include "fsdev/qemu-fsdev.h"
#include "qemu/thread.h"
#include "qemu/main-loop.h"
+#include "qemu/error-report.h"
#include "coth.h"
#include "9p-xattr.h"
#include "9p-util.h"
@@ -353,7 +354,11 @@ int coroutine_fn v9fs_co_closedir(V9fsPDU *pdu, V9fsFidOpenState *fs)
err = -errno;
}
});
- if (!err) {
+ /* 'man 2 close' suggests to ignore close() errors except of EBADF */
+ if (unlikely(err && errno == EBADF)) {
+ /* unexpected case as we should have checked for a valid file handle */
+ error_report("9pfs: WARNING: v9fs_co_closedir() failed with EBADF");
+ } else {
total_open_fd--;
}
return err;
diff --git a/hw/9pfs/cofile.c b/hw/9pfs/cofile.c
index 71174c3..6e775c8 100644
--- a/hw/9pfs/cofile.c
+++ b/hw/9pfs/cofile.c
@@ -20,6 +20,7 @@
#include "fsdev/qemu-fsdev.h"
#include "qemu/thread.h"
#include "qemu/main-loop.h"
+#include "qemu/error-report.h"
#include "coth.h"
int coroutine_fn v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t st_mode,
@@ -197,7 +198,11 @@ int coroutine_fn v9fs_co_close(V9fsPDU *pdu, V9fsFidOpenState *fs)
err = -errno;
}
});
- if (!err) {
+ /* 'man 2 close' suggests to ignore close() errors except of EBADF */
+ if (unlikely(err && errno == EBADF)) {
+ /* unexpected case as we should have checked for a valid file handle */
+ error_report("9pfs: WARNING: v9fs_co_close() failed with EBADF");
+ } else {
total_open_fd--;
}
return err;
diff --git a/hw/9pfs/cofs.c b/hw/9pfs/cofs.c
index 67e3ae5..12fa8c9 100644
--- a/hw/9pfs/cofs.c
+++ b/hw/9pfs/cofs.c
@@ -139,6 +139,25 @@ int coroutine_fn v9fs_co_utimensat(V9fsPDU *pdu, V9fsPath *path,
return err;
}
+int coroutine_fn v9fs_co_futimens(V9fsPDU *pdu, V9fsFidState *fidp,
+ struct timespec times[2])
+{
+ int err;
+ V9fsState *s = pdu->s;
+
+ if (v9fs_request_cancelled(pdu)) {
+ return -EINTR;
+ }
+ v9fs_co_run_in_worker(
+ {
+ err = s->ops->futimens(&s->ctx, fidp->fid_type, &fidp->fs, times);
+ if (err < 0) {
+ err = -errno;
+ }
+ });
+ return err;
+}
+
int coroutine_fn v9fs_co_chown(V9fsPDU *pdu, V9fsPath *path, uid_t uid,
gid_t gid)
{
@@ -184,6 +203,24 @@ int coroutine_fn v9fs_co_truncate(V9fsPDU *pdu, V9fsPath *path, off_t size)
return err;
}
+int coroutine_fn v9fs_co_ftruncate(V9fsPDU *pdu, V9fsFidState *fidp, off_t size)
+{
+ int err;
+ V9fsState *s = pdu->s;
+
+ if (v9fs_request_cancelled(pdu)) {
+ return -EINTR;
+ }
+ v9fs_co_run_in_worker(
+ {
+ err = s->ops->ftruncate(&s->ctx, fidp->fid_type, &fidp->fs, size);
+ if (err < 0) {
+ err = -errno;
+ }
+ });
+ return err;
+}
+
int coroutine_fn v9fs_co_mknod(V9fsPDU *pdu, V9fsFidState *fidp,
V9fsString *name, uid_t uid, gid_t gid,
dev_t dev, mode_t mode, struct stat *stbuf)
diff --git a/hw/9pfs/coth.h b/hw/9pfs/coth.h
index 2c54249..7906fa7 100644
--- a/hw/9pfs/coth.h
+++ b/hw/9pfs/coth.h
@@ -71,8 +71,12 @@ int coroutine_fn v9fs_co_statfs(V9fsPDU *, V9fsPath *, struct statfs *);
int coroutine_fn v9fs_co_lstat(V9fsPDU *, V9fsPath *, struct stat *);
int coroutine_fn v9fs_co_chmod(V9fsPDU *, V9fsPath *, mode_t);
int coroutine_fn v9fs_co_utimensat(V9fsPDU *, V9fsPath *, struct timespec [2]);
+int coroutine_fn v9fs_co_futimens(V9fsPDU *pdu, V9fsFidState *fidp,
+ struct timespec times[2]);
int coroutine_fn v9fs_co_chown(V9fsPDU *, V9fsPath *, uid_t, gid_t);
int coroutine_fn v9fs_co_truncate(V9fsPDU *, V9fsPath *, off_t);
+int coroutine_fn v9fs_co_ftruncate(V9fsPDU *pdu, V9fsFidState *fidp,
+ off_t size);
int coroutine_fn v9fs_co_llistxattr(V9fsPDU *, V9fsPath *, void *, size_t);
int coroutine_fn v9fs_co_lgetxattr(V9fsPDU *, V9fsPath *,
V9fsString *, void *, size_t);
diff --git a/hw/9pfs/meson.build b/hw/9pfs/meson.build
index f1b62fa..d35d4f4 100644
--- a/hw/9pfs/meson.build
+++ b/hw/9pfs/meson.build
@@ -2,8 +2,8 @@ fs_ss = ss.source_set()
fs_ss.add(files(
'9p-local.c',
'9p-posix-acl.c',
- '9p-proxy.c',
'9p-synth.c',
+ '9p-util-generic.c',
'9p-xattr-user.c',
'9p-xattr.c',
'9p.c',
diff --git a/hw/9pfs/trace-events b/hw/9pfs/trace-events
index a12e55c..0e0fc37 100644
--- a/hw/9pfs/trace-events
+++ b/hw/9pfs/trace-events
@@ -11,9 +11,9 @@ v9fs_stat(uint16_t tag, uint8_t id, int32_t fid) "tag %d id %d fid %d"
v9fs_stat_return(uint16_t tag, uint8_t id, int32_t mode, int32_t atime, int32_t mtime, int64_t length) "tag %d id %d stat={mode %d atime %d mtime %d length %"PRId64"}"
v9fs_getattr(uint16_t tag, uint8_t id, int32_t fid, uint64_t request_mask) "tag %d id %d fid %d request_mask %"PRIu64
v9fs_getattr_return(uint16_t tag, uint8_t id, uint64_t result_mask, uint32_t mode, uint32_t uid, uint32_t gid) "tag %d id %d getattr={result_mask %"PRId64" mode %u uid %u gid %u}"
-v9fs_walk(uint16_t tag, uint8_t id, int32_t fid, int32_t newfid, uint16_t nwnames) "tag %d id %d fid %d newfid %d nwnames %d"
+v9fs_walk(uint16_t tag, uint8_t id, int32_t fid, int32_t newfid, uint16_t nwnames, const char* wnames) "tag=%d id=%d fid=%d newfid=%d nwnames=%d wnames={%s}"
v9fs_walk_return(uint16_t tag, uint8_t id, uint16_t nwnames, void* qids) "tag %d id %d nwnames %d qids %p"
-v9fs_open(uint16_t tag, uint8_t id, int32_t fid, int32_t mode) "tag %d id %d fid %d mode %d"
+v9fs_open(uint16_t tag, uint8_t id, int32_t fid, int32_t mode, const char* oflags) "tag=%d id=%d fid=%d mode=%d(%s)"
v9fs_open_return(uint16_t tag, uint8_t id, uint8_t type, uint32_t version, uint64_t path, int iounit) "tag %u id %u qid={type %u version %u path %"PRIu64"} iounit %d"
v9fs_lcreate(uint16_t tag, uint8_t id, int32_t dfid, int32_t flags, int32_t mode, uint32_t gid) "tag %d id %d dfid %d flags %d mode %d gid %u"
v9fs_lcreate_return(uint16_t tag, uint8_t id, uint8_t type, uint32_t version, uint64_t path, int32_t iounit) "tag %u id %u qid={type %u version %u path %"PRIu64"} iounit %d"
diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
index efa41cf..81b91e4 100644
--- a/hw/9pfs/virtio-9p-device.c
+++ b/hw/9pfs/virtio-9p-device.c
@@ -26,7 +26,7 @@
#include "hw/virtio/virtio-access.h"
#include "qemu/iov.h"
#include "qemu/module.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
static void virtio_9p_push_and_notify(V9fsPDU *pdu)
{
@@ -243,13 +243,12 @@ static const VMStateDescription vmstate_virtio_9p = {
},
};
-static Property virtio_9p_properties[] = {
+static const Property virtio_9p_properties[] = {
DEFINE_PROP_STRING("mount_tag", V9fsVirtioState, state.fsconf.tag),
DEFINE_PROP_STRING("fsdev", V9fsVirtioState, state.fsconf.fsdev_id),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_9p_class_init(ObjectClass *klass, void *data)
+static void virtio_9p_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/Kconfig b/hw/Kconfig
index f7866e7..9e6c789 100644
--- a/hw/Kconfig
+++ b/hw/Kconfig
@@ -27,7 +27,6 @@ source nvme/Kconfig
source nvram/Kconfig
source pci-bridge/Kconfig
source pci-host/Kconfig
-source pcmcia/Kconfig
source pci/Kconfig
source remote/Kconfig
source rtc/Kconfig
@@ -38,10 +37,13 @@ source smbios/Kconfig
source ssi/Kconfig
source timer/Kconfig
source tpm/Kconfig
+source uefi/Kconfig
source ufs/Kconfig
source usb/Kconfig
source virtio/Kconfig
source vfio/Kconfig
+source vfio-user/Kconfig
+source vmapple/Kconfig
source xen/Kconfig
source watchdog/Kconfig
@@ -50,7 +52,6 @@ source arm/Kconfig
source cpu/Kconfig
source alpha/Kconfig
source avr/Kconfig
-source cris/Kconfig
source hppa/Kconfig
source i386/Kconfig
source loongarch/Kconfig
diff --git a/hw/acpi/Kconfig b/hw/acpi/Kconfig
index e07d320..1d4e9f0 100644
--- a/hw/acpi/Kconfig
+++ b/hw/acpi/Kconfig
@@ -60,6 +60,11 @@ config ACPI_VMGENID
default y
depends on PC
+config ACPI_VMCLOCK
+ bool
+ default y
+ depends on PC
+
config ACPI_VIOT
bool
depends on ACPI
diff --git a/hw/acpi/acpi-cpu-hotplug-stub.c b/hw/acpi/acpi-cpu-hotplug-stub.c
index 3fc4b14..9872dd5 100644
--- a/hw/acpi/acpi-cpu-hotplug-stub.c
+++ b/hw/acpi/acpi-cpu-hotplug-stub.c
@@ -10,41 +10,39 @@ void acpi_switch_to_modern_cphp(AcpiCpuHotplug *gpe_cpu,
CPUHotplugState *cpuhp_state,
uint16_t io_port)
{
- return;
}
void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner,
AcpiCpuHotplug *gpe_cpu, uint16_t base)
{
- return;
+}
+
+void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
+ CPUHotplugState *state, hwaddr base_addr)
+{
}
void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list)
{
- return;
}
void acpi_cpu_plug_cb(HotplugHandler *hotplug_dev,
CPUHotplugState *cpu_st, DeviceState *dev, Error **errp)
{
- return;
}
void legacy_acpi_cpu_plug_cb(HotplugHandler *hotplug_dev,
AcpiCpuHotplug *g, DeviceState *dev, Error **errp)
{
- return;
}
void acpi_cpu_unplug_cb(CPUHotplugState *cpu_st,
DeviceState *dev, Error **errp)
{
- return;
}
void acpi_cpu_unplug_request_cb(HotplugHandler *hotplug_dev,
CPUHotplugState *cpu_st,
DeviceState *dev, Error **errp)
{
- return;
}
diff --git a/hw/acpi/acpi-mem-hotplug-stub.c b/hw/acpi/acpi-mem-hotplug-stub.c
index 73a076a..7ad0fdc 100644
--- a/hw/acpi/acpi-mem-hotplug-stub.c
+++ b/hw/acpi/acpi-mem-hotplug-stub.c
@@ -7,29 +7,24 @@ const VMStateDescription vmstate_memory_hotplug;
void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner,
MemHotplugState *state, hwaddr io_base)
{
- return;
}
void acpi_memory_ospm_status(MemHotplugState *mem_st, ACPIOSTInfoList ***list)
{
- return;
}
void acpi_memory_plug_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st,
DeviceState *dev, Error **errp)
{
- return;
}
void acpi_memory_unplug_cb(MemHotplugState *mem_st,
DeviceState *dev, Error **errp)
{
- return;
}
void acpi_memory_unplug_request_cb(HotplugHandler *hotplug_dev,
MemHotplugState *mem_st,
DeviceState *dev, Error **errp)
{
- return;
}
diff --git a/hw/acpi/acpi-nvdimm-stub.c b/hw/acpi/acpi-nvdimm-stub.c
index 8baff9b..65f491d 100644
--- a/hw/acpi/acpi-nvdimm-stub.c
+++ b/hw/acpi/acpi-nvdimm-stub.c
@@ -4,5 +4,4 @@
void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev)
{
- return;
}
diff --git a/hw/acpi/acpi-pci-hotplug-stub.c b/hw/acpi/acpi-pci-hotplug-stub.c
index dcee3ad..b7bc6e4 100644
--- a/hw/acpi/acpi-pci-hotplug-stub.c
+++ b/hw/acpi/acpi-pci-hotplug-stub.c
@@ -7,40 +7,34 @@ const VMStateDescription vmstate_acpi_pcihp_pci_status;
void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus,
MemoryRegion *address_space_io, uint16_t io_base)
{
- return;
}
void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s,
DeviceState *dev, Error **errp)
{
- return;
}
void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
- return;
}
void acpi_pcihp_device_unplug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s,
DeviceState *dev, Error **errp)
{
- return;
}
void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev,
AcpiPciHpState *s, DeviceState *dev,
Error **errp)
{
- return;
}
void acpi_pcihp_reset(AcpiPciHpState *s)
{
- return;
}
-bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus)
+bool acpi_pcihp_is_hotpluggable_bus(AcpiPciHpState *s, BusState *bus)
{
return true;
}
diff --git a/hw/acpi/acpi-stub.c b/hw/acpi/acpi-stub.c
index e268ce9..fd0b62f 100644
--- a/hw/acpi/acpi-stub.c
+++ b/hw/acpi/acpi-stub.c
@@ -21,7 +21,15 @@
#include "qemu/osdep.h"
#include "hw/acpi/acpi.h"
+char unsigned *acpi_tables;
+size_t acpi_tables_len;
+
void acpi_table_add(const QemuOpts *opts, Error **errp)
{
g_assert_not_reached();
}
+
+bool acpi_builtin(void)
+{
+ return false;
+}
diff --git a/hw/acpi/acpi_generic_initiator.c b/hw/acpi/acpi_generic_initiator.c
deleted file mode 100644
index 17b9a05..0000000
--- a/hw/acpi/acpi_generic_initiator.c
+++ /dev/null
@@ -1,148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
- */
-
-#include "qemu/osdep.h"
-#include "hw/acpi/acpi_generic_initiator.h"
-#include "hw/acpi/aml-build.h"
-#include "hw/boards.h"
-#include "hw/pci/pci_device.h"
-#include "qemu/error-report.h"
-
-typedef struct AcpiGenericInitiatorClass {
- ObjectClass parent_class;
-} AcpiGenericInitiatorClass;
-
-OBJECT_DEFINE_TYPE_WITH_INTERFACES(AcpiGenericInitiator, acpi_generic_initiator,
- ACPI_GENERIC_INITIATOR, OBJECT,
- { TYPE_USER_CREATABLE },
- { NULL })
-
-OBJECT_DECLARE_SIMPLE_TYPE(AcpiGenericInitiator, ACPI_GENERIC_INITIATOR)
-
-static void acpi_generic_initiator_init(Object *obj)
-{
- AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj);
-
- gi->node = MAX_NODES;
- gi->pci_dev = NULL;
-}
-
-static void acpi_generic_initiator_finalize(Object *obj)
-{
- AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj);
-
- g_free(gi->pci_dev);
-}
-
-static void acpi_generic_initiator_set_pci_device(Object *obj, const char *val,
- Error **errp)
-{
- AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj);
-
- gi->pci_dev = g_strdup(val);
-}
-
-static void acpi_generic_initiator_set_node(Object *obj, Visitor *v,
- const char *name, void *opaque,
- Error **errp)
-{
- AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj);
- MachineState *ms = MACHINE(qdev_get_machine());
- uint32_t value;
-
- if (!visit_type_uint32(v, name, &value, errp)) {
- return;
- }
-
- if (value >= MAX_NODES) {
- error_printf("%s: Invalid NUMA node specified\n",
- TYPE_ACPI_GENERIC_INITIATOR);
- exit(1);
- }
-
- gi->node = value;
- ms->numa_state->nodes[gi->node].has_gi = true;
-}
-
-static void acpi_generic_initiator_class_init(ObjectClass *oc, void *data)
-{
- object_class_property_add_str(oc, "pci-dev", NULL,
- acpi_generic_initiator_set_pci_device);
- object_class_property_add(oc, "node", "int", NULL,
- acpi_generic_initiator_set_node, NULL, NULL);
-}
-
-/*
- * ACPI 6.3:
- * Table 5-78 Generic Initiator Affinity Structure
- */
-static void
-build_srat_generic_pci_initiator_affinity(GArray *table_data, int node,
- PCIDeviceHandle *handle)
-{
- uint8_t index;
-
- build_append_int_noprefix(table_data, 5, 1); /* Type */
- build_append_int_noprefix(table_data, 32, 1); /* Length */
- build_append_int_noprefix(table_data, 0, 1); /* Reserved */
- build_append_int_noprefix(table_data, 1, 1); /* Device Handle Type: PCI */
- build_append_int_noprefix(table_data, node, 4); /* Proximity Domain */
-
- /* Device Handle - PCI */
- build_append_int_noprefix(table_data, handle->segment, 2);
- build_append_int_noprefix(table_data, handle->bdf, 2);
- for (index = 0; index < 12; index++) {
- build_append_int_noprefix(table_data, 0, 1);
- }
-
- build_append_int_noprefix(table_data, GEN_AFFINITY_ENABLED, 4); /* Flags */
- build_append_int_noprefix(table_data, 0, 4); /* Reserved */
-}
-
-static int build_all_acpi_generic_initiators(Object *obj, void *opaque)
-{
- MachineState *ms = MACHINE(qdev_get_machine());
- AcpiGenericInitiator *gi;
- GArray *table_data = opaque;
- PCIDeviceHandle dev_handle;
- PCIDevice *pci_dev;
- Object *o;
-
- if (!object_dynamic_cast(obj, TYPE_ACPI_GENERIC_INITIATOR)) {
- return 0;
- }
-
- gi = ACPI_GENERIC_INITIATOR(obj);
- if (gi->node >= ms->numa_state->num_nodes) {
- error_printf("%s: Specified node %d is invalid.\n",
- TYPE_ACPI_GENERIC_INITIATOR, gi->node);
- exit(1);
- }
-
- o = object_resolve_path_type(gi->pci_dev, TYPE_PCI_DEVICE, NULL);
- if (!o) {
- error_printf("%s: Specified device must be a PCI device.\n",
- TYPE_ACPI_GENERIC_INITIATOR);
- exit(1);
- }
-
- pci_dev = PCI_DEVICE(o);
-
- dev_handle.segment = 0;
- dev_handle.bdf = PCI_BUILD_BDF(pci_bus_num(pci_get_bus(pci_dev)),
- pci_dev->devfn);
-
- build_srat_generic_pci_initiator_affinity(table_data,
- gi->node, &dev_handle);
-
- return 0;
-}
-
-void build_srat_generic_pci_initiator(GArray *table_data)
-{
- object_child_foreach_recursive(object_get_root(),
- build_all_acpi_generic_initiators,
- table_data);
-}
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index 6d4517c..f8f93a9 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -24,7 +24,7 @@
#include "hw/acpi/aml-build.h"
#include "qemu/bswap.h"
#include "qemu/bitops.h"
-#include "sysemu/numa.h"
+#include "system/numa.h"
#include "hw/boards.h"
#include "hw/acpi/tpm.h"
#include "hw/pci/pci_host.h"
@@ -534,8 +534,7 @@ void aml_append(Aml *parent_ctx, Aml *child)
case AML_NO_OPCODE:
break;
default:
- assert(0);
- break;
+ g_assert_not_reached();
}
build_append_array(parent_ctx->buf, buf);
build_free_array(buf);
@@ -1939,6 +1938,89 @@ void build_srat_memory(GArray *table_data, uint64_t base,
}
/*
+ * ACPI Spec Revision 6.3
+ * Table 5-80 Device Handle - PCI
+ */
+static void build_append_srat_pci_device_handle(GArray *table_data,
+ uint16_t segment,
+ uint8_t bus, uint8_t devfn)
+{
+ /* PCI segment number */
+ build_append_int_noprefix(table_data, segment, 2);
+ /* PCI Bus Device Function */
+ build_append_int_noprefix(table_data, bus, 1);
+ build_append_int_noprefix(table_data, devfn, 1);
+ /* Reserved */
+ build_append_int_noprefix(table_data, 0, 12);
+}
+
+static void build_append_srat_acpi_device_handle(GArray *table_data,
+ const char *hid,
+ uint32_t uid)
+{
+ assert(strlen(hid) == 8);
+ /* Device Handle - ACPI */
+ for (int i = 0; i < 8; i++) {
+ build_append_int_noprefix(table_data, hid[i], 1);
+ }
+ build_append_int_noprefix(table_data, uid, 4);
+ build_append_int_noprefix(table_data, 0, 4);
+}
+
+/*
+ * ACPI spec, Revision 6.3
+ * 5.2.16.6 Generic Initiator Affinity Structure
+ * With PCI Device Handle.
+ */
+void build_srat_pci_generic_initiator(GArray *table_data, uint32_t node,
+ uint16_t segment, uint8_t bus,
+ uint8_t devfn)
+{
+ /* Type */
+ build_append_int_noprefix(table_data, 5, 1);
+ /* Length */
+ build_append_int_noprefix(table_data, 32, 1);
+ /* Reserved */
+ build_append_int_noprefix(table_data, 0, 1);
+ /* Device Handle Type: PCI */
+ build_append_int_noprefix(table_data, 1, 1);
+ /* Proximity Domain */
+ build_append_int_noprefix(table_data, node, 4);
+ /* Device Handle */
+ build_append_srat_pci_device_handle(table_data, segment, bus, devfn);
+ /* Flags - GI Enabled */
+ build_append_int_noprefix(table_data, 1, 4);
+ /* Reserved */
+ build_append_int_noprefix(table_data, 0, 4);
+}
+
+/*
+ * ACPI spec, Revision 6.5
+ * 5.2.16.7 Generic Port Affinity Structure
+ * With ACPI Device Handle.
+ */
+void build_srat_acpi_generic_port(GArray *table_data, uint32_t node,
+ const char *hid, uint32_t uid)
+{
+ /* Type */
+ build_append_int_noprefix(table_data, 6, 1);
+ /* Length */
+ build_append_int_noprefix(table_data, 32, 1);
+ /* Reserved */
+ build_append_int_noprefix(table_data, 0, 1);
+ /* Device Handle Type: ACPI */
+ build_append_int_noprefix(table_data, 0, 1);
+ /* Proximity Domain */
+ build_append_int_noprefix(table_data, node, 4);
+ /* Device Handle */
+ build_append_srat_acpi_device_handle(table_data, hid, uid);
+ /* Flags - GP Enabled */
+ build_append_int_noprefix(table_data, 1, 4);
+ /* Reserved */
+ build_append_int_noprefix(table_data, 0, 4);
+}
+
+/*
* ACPI spec 5.2.17 System Locality Distance Information Table
* (Revision 2.0 or later)
*/
@@ -1996,7 +2078,7 @@ static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags,
void build_spcr(GArray *table_data, BIOSLinker *linker,
const AcpiSpcrData *f, const uint8_t rev,
- const char *oem_id, const char *oem_table_id)
+ const char *oem_id, const char *oem_table_id, const char *name)
{
AcpiTable table = { .sig = "SPCR", .rev = rev, .oem_id = oem_id,
.oem_table_id = oem_table_id };
@@ -2042,9 +2124,21 @@ void build_spcr(GArray *table_data, BIOSLinker *linker,
build_append_int_noprefix(table_data, f->pci_flags, 4);
/* PCI Segment */
build_append_int_noprefix(table_data, f->pci_segment, 1);
- /* Reserved */
- build_append_int_noprefix(table_data, 0, 4);
-
+ if (rev < 4) {
+ /* Reserved */
+ build_append_int_noprefix(table_data, 0, 4);
+ } else {
+ /* UartClkFreq */
+ build_append_int_noprefix(table_data, f->uart_clk_freq, 4);
+ /* PreciseBaudrate */
+ build_append_int_noprefix(table_data, f->precise_baudrate, 4);
+ /* NameSpaceStringLength */
+ build_append_int_noprefix(table_data, f->namespace_string_length, 2);
+ /* NameSpaceStringOffset */
+ build_append_int_noprefix(table_data, f->namespace_string_offset, 2);
+ /* NamespaceString[] */
+ g_array_append_vals(table_data, name, f->namespace_string_length);
+ }
acpi_table_end(linker, &table);
}
/*
diff --git a/hw/acpi/core.c b/hw/acpi/core.c
index ec5e127..58f8964 100644
--- a/hw/acpi/core.c
+++ b/hw/acpi/core.c
@@ -31,7 +31,7 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qemu/option.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "trace.h"
struct acpi_table_header {
@@ -78,6 +78,11 @@ static void acpi_register_config(void)
opts_init(acpi_register_config);
+bool acpi_builtin(void)
+{
+ return true;
+}
+
static int acpi_checksum(const uint8_t *data, int len)
{
int sum, i;
diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c
index 2d81c1e..6f1ae79 100644
--- a/hw/acpi/cpu.c
+++ b/hw/acpi/cpu.c
@@ -5,9 +5,8 @@
#include "qapi/error.h"
#include "qapi/qapi-events-acpi.h"
#include "trace.h"
-#include "sysemu/numa.h"
+#include "system/numa.h"
-#define ACPI_CPU_HOTPLUG_REG_LEN 12
#define ACPI_CPU_SELECTOR_OFFSET_WR 0
#define ACPI_CPU_FLAGS_OFFSET_RW 4
#define ACPI_CPU_CMD_OFFSET_WR 5
@@ -236,8 +235,8 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
static AcpiCpuStatus *get_cpu_status(CPUHotplugState *cpu_st, DeviceState *dev)
{
- CPUClass *k = CPU_GET_CLASS(dev);
- uint64_t cpu_arch_id = k->get_arch_id(CPU(dev));
+ CPUState *cpu = CPU(dev);
+ uint64_t cpu_arch_id = cpu->cc->get_arch_id(cpu);
int i;
for (i = 0; i < cpu_st->dev_count; i++) {
@@ -328,6 +327,7 @@ const VMStateDescription vmstate_cpu_hotplug = {
#define CPU_EJECT_METHOD "CEJ0"
#define CPU_OST_METHOD "COST"
#define CPU_ADDED_LIST "CNEW"
+#define CPU_EJ_LIST "CEJL"
#define CPU_ENABLED "CPEN"
#define CPU_SELECTOR "CSEL"
@@ -339,9 +339,10 @@ const VMStateDescription vmstate_cpu_hotplug = {
#define CPU_FW_EJECT_EVENT "CEJF"
void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
- build_madt_cpu_fn build_madt_cpu, hwaddr io_base,
+ build_madt_cpu_fn build_madt_cpu, hwaddr base_addr,
const char *res_root,
- const char *event_handler_method)
+ const char *event_handler_method,
+ AmlRegionSpace rs)
{
Aml *ifctx;
Aml *field;
@@ -365,14 +366,22 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
aml_name_decl("_UID", aml_string("CPU Hotplug resources")));
aml_append(cpu_ctrl_dev, aml_mutex(CPU_LOCK, 0));
+ assert((rs == AML_SYSTEM_IO) || (rs == AML_SYSTEM_MEMORY));
+
crs = aml_resource_template();
- aml_append(crs, aml_io(AML_DECODE16, io_base, io_base, 1,
+ if (rs == AML_SYSTEM_IO) {
+ aml_append(crs, aml_io(AML_DECODE16, base_addr, base_addr, 1,
ACPI_CPU_HOTPLUG_REG_LEN));
+ } else if (rs == AML_SYSTEM_MEMORY) {
+ aml_append(crs, aml_memory32_fixed(base_addr,
+ ACPI_CPU_HOTPLUG_REG_LEN, AML_READ_WRITE));
+ }
+
aml_append(cpu_ctrl_dev, aml_name_decl("_CRS", crs));
/* declare CPU hotplug MMIO region with related access fields */
aml_append(cpu_ctrl_dev,
- aml_operation_region("PRST", AML_SYSTEM_IO, aml_int(io_base),
+ aml_operation_region("PRST", rs, aml_int(base_addr),
ACPI_CPU_HOTPLUG_REG_LEN));
field = aml_field("PRST", AML_BYTE_ACC, AML_NOLOCK,
@@ -480,7 +489,6 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
method = aml_method(CPU_SCAN_METHOD, 0, AML_SERIALIZED);
{
const uint8_t max_cpus_per_pass = 255;
- Aml *else_ctx;
Aml *while_ctx, *while_ctx2;
Aml *has_event = aml_local(0);
Aml *dev_chk = aml_int(1);
@@ -491,6 +499,8 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
Aml *uid = aml_local(3);
Aml *has_job = aml_local(4);
Aml *new_cpus = aml_name(CPU_ADDED_LIST);
+ Aml *ej_cpus = aml_name(CPU_EJ_LIST);
+ Aml *num_ej_cpus = aml_local(5);
aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
@@ -505,6 +515,8 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
*/
aml_append(method, aml_name_decl(CPU_ADDED_LIST,
aml_package(max_cpus_per_pass)));
+ aml_append(method, aml_name_decl(CPU_EJ_LIST,
+ aml_package(max_cpus_per_pass)));
aml_append(method, aml_store(zero, uid));
aml_append(method, aml_store(one, has_job));
@@ -519,6 +531,7 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
aml_append(while_ctx2, aml_store(one, has_event));
aml_append(while_ctx2, aml_store(zero, num_added_cpus));
+ aml_append(while_ctx2, aml_store(zero, num_ej_cpus));
/*
* Scan CPUs, till there are CPUs with events or
@@ -551,8 +564,10 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
* if CPU_ADDED_LIST is full, exit inner loop and process
* collected CPUs
*/
- ifctx = aml_if(
- aml_equal(num_added_cpus, aml_int(max_cpus_per_pass)));
+ ifctx = aml_if(aml_lor(
+ aml_equal(num_added_cpus, aml_int(max_cpus_per_pass)),
+ aml_equal(num_ej_cpus, aml_int(max_cpus_per_pass))
+ ));
{
aml_append(ifctx, aml_store(one, has_job));
aml_append(ifctx, aml_break());
@@ -569,16 +584,16 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
aml_append(ifctx, aml_store(one, has_event));
}
aml_append(while_ctx, ifctx);
- else_ctx = aml_else();
+
ifctx = aml_if(aml_equal(rm_evt, one));
{
- aml_append(ifctx,
- aml_call2(CPU_NOTIFY_METHOD, uid, eject_req));
- aml_append(ifctx, aml_store(one, rm_evt));
+ /* cache to be removed CPUs to Notify later */
+ aml_append(ifctx, aml_store(uid,
+ aml_index(ej_cpus, num_ej_cpus)));
+ aml_append(ifctx, aml_increment(num_ej_cpus));
aml_append(ifctx, aml_store(one, has_event));
}
- aml_append(else_ctx, ifctx);
- aml_append(while_ctx, else_ctx);
+ aml_append(while_ctx, ifctx);
aml_append(while_ctx, aml_increment(uid));
}
aml_append(while_ctx2, while_ctx);
@@ -612,6 +627,24 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
aml_append(while_ctx, aml_increment(cpu_idx));
}
aml_append(while_ctx2, while_ctx);
+
+ /*
+ * Notify OSPM about to be removed CPUs and clear remove flag
+ */
+ aml_append(while_ctx2, aml_store(zero, cpu_idx));
+ while_ctx = aml_while(aml_lless(cpu_idx, num_ej_cpus));
+ {
+ aml_append(while_ctx,
+ aml_store(aml_derefof(aml_index(ej_cpus, cpu_idx)),
+ uid));
+ aml_append(while_ctx,
+ aml_call2(CPU_NOTIFY_METHOD, uid, eject_req));
+ aml_append(while_ctx, aml_store(uid, cpu_selector));
+ aml_append(while_ctx, aml_store(one, rm_evt));
+ aml_append(while_ctx, aml_increment(cpu_idx));
+ }
+ aml_append(while_ctx2, while_ctx);
+
/*
* If another batch is needed, then it will resume scanning
* exactly at -- and not after -- the last CPU that's currently
diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c
index 83b8bc5..aa0e1e3 100644
--- a/hw/acpi/cpu_hotplug.c
+++ b/hw/acpi/cpu_hotplug.c
@@ -62,10 +62,9 @@ static const MemoryRegionOps AcpiCpuHotplug_ops = {
static void acpi_set_cpu_present_bit(AcpiCpuHotplug *g, CPUState *cpu,
bool *swtchd_to_modern)
{
- CPUClass *k = CPU_GET_CLASS(cpu);
int64_t cpu_id;
- cpu_id = k->get_arch_id(cpu);
+ cpu_id = cpu->cc->get_arch_id(cpu);
if ((cpu_id / 8) >= ACPI_GPE_PROC_LEN) {
object_property_set_bool(g->device, "cpu-hotplug-legacy", false,
&error_abort);
diff --git a/hw/acpi/erst.c b/hw/acpi/erst.c
index b2f1b13..099cabb 100644
--- a/hw/acpi/erst.c
+++ b/hw/acpi/erst.c
@@ -12,7 +12,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/qdev-core.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qom/object.h"
#include "hw/pci/pci_device.h"
#include "qom/object_interfaces.h"
@@ -23,8 +23,8 @@
#include "hw/acpi/acpi-defs.h"
#include "hw/acpi/aml-build.h"
#include "hw/acpi/bios-linker-loader.h"
-#include "exec/address-spaces.h"
-#include "sysemu/hostmem.h"
+#include "system/address-spaces.h"
+#include "system/hostmem.h"
#include "hw/acpi/erst.h"
#include "trace.h"
@@ -1011,15 +1011,14 @@ static void erst_reset(DeviceState *dev)
trace_acpi_erst_reset_out(le32_to_cpu(s->header->record_count));
}
-static Property erst_properties[] = {
+static const Property erst_properties[] = {
DEFINE_PROP_LINK(ACPI_ERST_MEMDEV_PROP, ERSTDeviceState, hostmem,
TYPE_MEMORY_BACKEND, HostMemoryBackend *),
DEFINE_PROP_UINT32(ACPI_ERST_RECORD_SIZE_PROP, ERSTDeviceState,
default_record_size, ERST_RECORD_SIZE),
- DEFINE_PROP_END_OF_LIST(),
};
-static void erst_class_init(ObjectClass *klass, void *data)
+static void erst_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -1030,7 +1029,7 @@ static void erst_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_REDHAT_ACPI_ERST;
k->revision = 0x00;
k->class_id = PCI_CLASS_OTHERS;
- dc->reset = erst_reset;
+ device_class_set_legacy_reset(dc, erst_reset);
dc->vmsd = &erst_vmstate;
dc->user_creatable = true;
dc->hotpluggable = false;
@@ -1045,7 +1044,7 @@ static const TypeInfo erst_type_info = {
.parent = TYPE_PCI_DEVICE,
.class_init = erst_class_init,
.instance_size = sizeof(ERSTDeviceState),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ }
}
diff --git a/hw/acpi/generic_event_device.c b/hw/acpi/generic_event_device.c
index 2d6e91b..7a62f8d 100644
--- a/hw/acpi/generic_event_device.c
+++ b/hw/acpi/generic_event_device.c
@@ -19,12 +19,13 @@
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
#include "qemu/error-report.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
static const uint32_t ged_supported_events[] = {
ACPI_GED_MEM_HOTPLUG_EVT,
ACPI_GED_PWR_DOWN_EVT,
ACPI_GED_NVDIMM_HOTPLUG_EVT,
+ ACPI_GED_CPU_HOTPLUG_EVT,
};
/*
@@ -107,6 +108,9 @@ void build_ged_aml(Aml *table, const char *name, HotplugHandler *hotplug_dev,
aml_append(if_ctx, aml_call0(MEMORY_DEVICES_CONTAINER "."
MEMORY_SLOT_SCAN_METHOD));
break;
+ case ACPI_GED_CPU_HOTPLUG_EVT:
+ aml_append(if_ctx, aml_call0(AML_GED_EVT_CPU_SCAN_METHOD));
+ break;
case ACPI_GED_PWR_DOWN_EVT:
aml_append(if_ctx,
aml_notify(aml_name(ACPI_POWER_BUTTON_DEVICE),
@@ -197,9 +201,9 @@ static void ged_regs_write(void *opaque, hwaddr addr, uint64_t data,
switch (addr) {
case ACPI_GED_REG_SLEEP_CTL:
- slp_typ = (data >> 2) & 0x07;
- slp_en = (data >> 5) & 0x01;
- if (slp_en && slp_typ == 5) {
+ slp_typ = (data >> ACPI_GED_SLP_TYP_POS) & ACPI_GED_SLP_TYP_MASK;
+ slp_en = !!(data & ACPI_GED_SLP_EN);
+ if (slp_en && slp_typ == ACPI_GED_SLP_TYP_S5) {
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
}
return;
@@ -234,6 +238,8 @@ static void acpi_ged_device_plug_cb(HotplugHandler *hotplug_dev,
} else {
acpi_memory_plug_cb(hotplug_dev, &s->memhp_state, dev, errp);
}
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
+ acpi_cpu_plug_cb(hotplug_dev, &s->cpuhp_state, dev, errp);
} else {
error_setg(errp, "virt: device plug request for unsupported device"
" type: %s", object_get_typename(OBJECT(dev)));
@@ -248,6 +254,8 @@ static void acpi_ged_unplug_request_cb(HotplugHandler *hotplug_dev,
if ((object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) &&
!(object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)))) {
acpi_memory_unplug_request_cb(hotplug_dev, &s->memhp_state, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
+ acpi_cpu_unplug_request_cb(hotplug_dev, &s->cpuhp_state, dev, errp);
} else {
error_setg(errp, "acpi: device unplug request for unsupported device"
" type: %s", object_get_typename(OBJECT(dev)));
@@ -261,6 +269,8 @@ static void acpi_ged_unplug_cb(HotplugHandler *hotplug_dev,
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
acpi_memory_unplug_cb(&s->memhp_state, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
+ acpi_cpu_unplug_cb(&s->cpuhp_state, dev, errp);
} else {
error_setg(errp, "acpi: device unplug for unsupported device"
" type: %s", object_get_typename(OBJECT(dev)));
@@ -272,6 +282,7 @@ static void acpi_ged_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list)
AcpiGedState *s = ACPI_GED(adev);
acpi_memory_ospm_status(&s->memhp_state, list);
+ acpi_cpu_ospm_status(&s->cpuhp_state, list);
}
static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
@@ -286,6 +297,8 @@ static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
sel = ACPI_GED_PWR_DOWN_EVT;
} else if (ev & ACPI_NVDIMM_HOTPLUG_STATUS) {
sel = ACPI_GED_NVDIMM_HOTPLUG_EVT;
+ } else if (ev & ACPI_CPU_HOTPLUG_STATUS) {
+ sel = ACPI_GED_CPU_HOTPLUG_EVT;
} else {
/* Unknown event. Return without generating interrupt. */
warn_report("GED: Unsupported event %d. No irq injected", ev);
@@ -303,9 +316,8 @@ static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
qemu_irq_pulse(s->irq);
}
-static Property acpi_ged_properties[] = {
+static const Property acpi_ged_properties[] = {
DEFINE_PROP_UINT32("ged-event", AcpiGedState, ged_event_bitmap, 0),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_memhp_state = {
@@ -318,6 +330,24 @@ static const VMStateDescription vmstate_memhp_state = {
}
};
+static bool cpuhp_needed(void *opaque)
+{
+ MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
+
+ return mc->has_hotpluggable_cpus;
+}
+
+static const VMStateDescription vmstate_cpuhp_state = {
+ .name = "acpi-ged/cpuhp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = cpuhp_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_CPU_HOTPLUG(cpuhp_state, AcpiGedState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_ged_state = {
.name = "acpi-ged-state",
.version_id = 1,
@@ -333,7 +363,7 @@ static const VMStateDescription vmstate_ghes = {
.version_id = 1,
.minimum_version_id = 1,
.fields = (const VMStateField[]) {
- VMSTATE_UINT64(ghes_addr_le, AcpiGhesState),
+ VMSTATE_UINT64(hw_error_le, AcpiGhesState),
VMSTATE_END_OF_LIST()
},
};
@@ -341,7 +371,7 @@ static const VMStateDescription vmstate_ghes = {
static bool ghes_needed(void *opaque)
{
AcpiGedState *s = opaque;
- return s->ghes_state.ghes_addr_le;
+ return s->ghes_state.hw_error_le;
}
static const VMStateDescription vmstate_ghes_state = {
@@ -366,11 +396,48 @@ static const VMStateDescription vmstate_acpi_ged = {
},
.subsections = (const VMStateDescription * const []) {
&vmstate_memhp_state,
+ &vmstate_cpuhp_state,
&vmstate_ghes_state,
NULL
}
};
+static void acpi_ged_realize(DeviceState *dev, Error **errp)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ AcpiGedState *s = ACPI_GED(dev);
+ uint32_t ged_events;
+ int i;
+
+ ged_events = ctpop32(s->ged_event_bitmap);
+
+ for (i = 0; i < ARRAY_SIZE(ged_supported_events) && ged_events; i++) {
+ uint32_t event = s->ged_event_bitmap & ged_supported_events[i];
+
+ if (!event) {
+ continue;
+ }
+
+ switch (event) {
+ case ACPI_GED_CPU_HOTPLUG_EVT:
+ /* initialize CPU Hotplug related regions */
+ memory_region_init(&s->container_cpuhp, OBJECT(dev),
+ "cpuhp container",
+ ACPI_CPU_HOTPLUG_REG_LEN);
+ sysbus_init_mmio(sbd, &s->container_cpuhp);
+ cpu_hotplug_hw_init(&s->container_cpuhp, OBJECT(dev),
+ &s->cpuhp_state, 0);
+ break;
+ }
+ ged_events--;
+ }
+
+ if (ged_events) {
+ error_report("Unsupported events specified");
+ abort();
+ }
+}
+
static void acpi_ged_initfn(Object *obj)
{
DeviceState *dev = DEVICE(obj);
@@ -391,18 +458,18 @@ static void acpi_ged_initfn(Object *obj)
* container for memory hotplug IO and expose it as GED sysbus
* MMIO so that boards can map it separately.
*/
- memory_region_init(&s->container_memhp, OBJECT(dev), "memhp container",
- MEMORY_HOTPLUG_IO_LEN);
- sysbus_init_mmio(sbd, &s->container_memhp);
- acpi_memory_hotplug_init(&s->container_memhp, OBJECT(dev),
- &s->memhp_state, 0);
+ memory_region_init(&s->container_memhp, OBJECT(dev), "memhp container",
+ MEMORY_HOTPLUG_IO_LEN);
+ sysbus_init_mmio(sbd, &s->container_memhp);
+ acpi_memory_hotplug_init(&s->container_memhp, OBJECT(dev),
+ &s->memhp_state, 0);
memory_region_init_io(&ged_st->regs, obj, &ged_regs_ops, ged_st,
TYPE_ACPI_GED "-regs", ACPI_GED_REG_COUNT);
sysbus_init_mmio(sbd, &ged_st->regs);
}
-static void acpi_ged_class_init(ObjectClass *class, void *data)
+static void acpi_ged_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(class);
@@ -411,6 +478,7 @@ static void acpi_ged_class_init(ObjectClass *class, void *data)
dc->desc = "ACPI Generic Event Device";
device_class_set_props(dc, acpi_ged_properties);
dc->vmsd = &vmstate_acpi_ged;
+ dc->realize = acpi_ged_realize;
hc->plug = acpi_ged_device_plug_cb;
hc->unplug_request = acpi_ged_unplug_request_cb;
@@ -426,7 +494,7 @@ static const TypeInfo acpi_ged_info = {
.instance_size = sizeof(AcpiGedState),
.instance_init = acpi_ged_initfn,
.class_init = acpi_ged_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ TYPE_ACPI_DEVICE_IF },
{ }
diff --git a/hw/acpi/ghes-stub.c b/hw/acpi/ghes-stub.c
index c315de1..7cec181 100644
--- a/hw/acpi/ghes-stub.c
+++ b/hw/acpi/ghes-stub.c
@@ -11,7 +11,7 @@
#include "qemu/osdep.h"
#include "hw/acpi/ghes.h"
-int acpi_ghes_record_errors(uint8_t source_id, uint64_t physical_address)
+int acpi_ghes_memory_errors(uint16_t source_id, uint64_t physical_address)
{
return -1;
}
diff --git a/hw/acpi/ghes.c b/hw/acpi/ghes.c
index e9511d9..b85bb48 100644
--- a/hw/acpi/ghes.c
+++ b/hw/acpi/ghes.c
@@ -28,15 +28,12 @@
#include "hw/nvram/fw_cfg.h"
#include "qemu/uuid.h"
-#define ACPI_GHES_ERRORS_FW_CFG_FILE "etc/hardware_errors"
-#define ACPI_GHES_DATA_ADDR_FW_CFG_FILE "etc/hardware_errors_addr"
+#define ACPI_HW_ERROR_FW_CFG_FILE "etc/hardware_errors"
+#define ACPI_HW_ERROR_ADDR_FW_CFG_FILE "etc/hardware_errors_addr"
/* The max size in bytes for one error block */
#define ACPI_GHES_MAX_RAW_DATA_LENGTH (1 * KiB)
-/* Now only support ARMv8 SEA notification type error source */
-#define ACPI_GHES_ERROR_SOURCE_COUNT 1
-
/* Generic Hardware Error Source version 2 */
#define ACPI_GHES_SOURCE_GENERIC_ERROR_V2 10
@@ -184,51 +181,24 @@ static void acpi_ghes_build_append_mem_cper(GArray *table,
build_append_int_noprefix(table, 0, 7);
}
-static int acpi_ghes_record_mem_error(uint64_t error_block_address,
- uint64_t error_physical_addr)
+static void
+ghes_gen_err_data_uncorrectable_recoverable(GArray *block,
+ const uint8_t *section_type,
+ int data_length)
{
- GArray *block;
-
- /* Memory Error Section Type */
- const uint8_t uefi_cper_mem_sec[] =
- UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
- 0xED, 0x7C, 0x83, 0xB1);
-
/* invalid fru id: ACPI 4.0: 17.3.2.6.1 Generic Error Data,
* Table 17-13 Generic Error Data Entry
*/
QemuUUID fru_id = {};
- uint32_t data_length;
-
- block = g_array_new(false, true /* clear */, 1);
-
- /* This is the length if adding a new generic error data entry*/
- data_length = ACPI_GHES_DATA_LENGTH + ACPI_GHES_MEM_CPER_LENGTH;
- /*
- * It should not run out of the preallocated memory if adding a new generic
- * error data entry
- */
- assert((data_length + ACPI_GHES_GESB_SIZE) <=
- ACPI_GHES_MAX_RAW_DATA_LENGTH);
/* Build the new generic error status block header */
acpi_ghes_generic_error_status(block, ACPI_GEBS_UNCORRECTABLE,
0, 0, data_length, ACPI_CPER_SEV_RECOVERABLE);
/* Build this new generic error data entry header */
- acpi_ghes_generic_error_data(block, uefi_cper_mem_sec,
+ acpi_ghes_generic_error_data(block, section_type,
ACPI_CPER_SEV_RECOVERABLE, 0, 0,
ACPI_GHES_MEM_CPER_LENGTH, fru_id, 0);
-
- /* Build the memory section CPER for above new generic error data entry */
- acpi_ghes_build_append_mem_cper(block, error_physical_addr);
-
- /* Write the generic error data entry into guest memory */
- cpu_physical_memory_write(error_block_address, block->data, block->len);
-
- g_array_free(block, true);
-
- return 0;
}
/*
@@ -236,7 +206,7 @@ static int acpi_ghes_record_mem_error(uint64_t error_block_address,
* Initialize "etc/hardware_errors" and "etc/hardware_errors_addr" fw_cfg blobs.
* See docs/specs/acpi_hest_ghes.rst for blobs format.
*/
-void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker)
+static void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker)
{
int i, error_status_block_offset;
@@ -264,7 +234,7 @@ void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker)
ACPI_GHES_MAX_RAW_DATA_LENGTH * ACPI_GHES_ERROR_SOURCE_COUNT);
/* Tell guest firmware to place hardware_errors blob into RAM */
- bios_linker_loader_alloc(linker, ACPI_GHES_ERRORS_FW_CFG_FILE,
+ bios_linker_loader_alloc(linker, ACPI_HW_ERROR_FW_CFG_FILE,
hardware_errors, sizeof(uint64_t), false);
for (i = 0; i < ACPI_GHES_ERROR_SOURCE_COUNT; i++) {
@@ -273,23 +243,31 @@ void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker)
* corresponding "Generic Error Status Block"
*/
bios_linker_loader_add_pointer(linker,
- ACPI_GHES_ERRORS_FW_CFG_FILE, sizeof(uint64_t) * i,
- sizeof(uint64_t), ACPI_GHES_ERRORS_FW_CFG_FILE,
- error_status_block_offset + i * ACPI_GHES_MAX_RAW_DATA_LENGTH);
+ ACPI_HW_ERROR_FW_CFG_FILE,
+ sizeof(uint64_t) * i,
+ sizeof(uint64_t),
+ ACPI_HW_ERROR_FW_CFG_FILE,
+ error_status_block_offset +
+ i * ACPI_GHES_MAX_RAW_DATA_LENGTH);
}
/*
* tell firmware to write hardware_errors GPA into
* hardware_errors_addr fw_cfg, once the former has been initialized.
*/
- bios_linker_loader_write_pointer(linker, ACPI_GHES_DATA_ADDR_FW_CFG_FILE,
- 0, sizeof(uint64_t), ACPI_GHES_ERRORS_FW_CFG_FILE, 0);
+ bios_linker_loader_write_pointer(linker, ACPI_HW_ERROR_ADDR_FW_CFG_FILE, 0,
+ sizeof(uint64_t),
+ ACPI_HW_ERROR_FW_CFG_FILE, 0);
}
/* Build Generic Hardware Error Source version 2 (GHESv2) */
-static void build_ghes_v2(GArray *table_data, int source_id, BIOSLinker *linker)
+static void build_ghes_v2(GArray *table_data,
+ BIOSLinker *linker,
+ enum AcpiGhesNotifyType notify,
+ uint16_t source_id)
{
uint64_t address_offset;
+
/*
* Type:
* Generic Hardware Error Source version 2(GHESv2 - Type 10)
@@ -316,21 +294,13 @@ static void build_ghes_v2(GArray *table_data, int source_id, BIOSLinker *linker)
build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 0x40, 0,
4 /* QWord access */, 0);
bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE,
- address_offset + GAS_ADDR_OFFSET, sizeof(uint64_t),
- ACPI_GHES_ERRORS_FW_CFG_FILE, source_id * sizeof(uint64_t));
+ address_offset + GAS_ADDR_OFFSET,
+ sizeof(uint64_t),
+ ACPI_HW_ERROR_FW_CFG_FILE,
+ source_id * sizeof(uint64_t));
- switch (source_id) {
- case ACPI_HEST_SRC_ID_SEA:
- /*
- * Notification Structure
- * Now only enable ARMv8 SEA notification type
- */
- build_ghes_hw_error_notification(table_data, ACPI_GHES_NOTIFY_SEA);
- break;
- default:
- error_report("Not support this error source");
- abort();
- }
+ /* Notification Structure */
+ build_ghes_hw_error_notification(table_data, notify);
/* Error Status Block Length */
build_append_int_noprefix(table_data, ACPI_GHES_MAX_RAW_DATA_LENGTH, 4);
@@ -344,9 +314,11 @@ static void build_ghes_v2(GArray *table_data, int source_id, BIOSLinker *linker)
build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 0x40, 0,
4 /* QWord access */, 0);
bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE,
- address_offset + GAS_ADDR_OFFSET,
- sizeof(uint64_t), ACPI_GHES_ERRORS_FW_CFG_FILE,
- (ACPI_GHES_ERROR_SOURCE_COUNT + source_id) * sizeof(uint64_t));
+ address_offset + GAS_ADDR_OFFSET,
+ sizeof(uint64_t),
+ ACPI_HW_ERROR_FW_CFG_FILE,
+ (ACPI_GHES_ERROR_SOURCE_COUNT + source_id)
+ * sizeof(uint64_t));
/*
* Read Ack Preserve field
@@ -359,17 +331,21 @@ static void build_ghes_v2(GArray *table_data, int source_id, BIOSLinker *linker)
}
/* Build Hardware Error Source Table */
-void acpi_build_hest(GArray *table_data, BIOSLinker *linker,
+void acpi_build_hest(GArray *table_data, GArray *hardware_errors,
+ BIOSLinker *linker,
const char *oem_id, const char *oem_table_id)
{
AcpiTable table = { .sig = "HEST", .rev = 1,
.oem_id = oem_id, .oem_table_id = oem_table_id };
+ build_ghes_error_table(hardware_errors, linker);
+
acpi_table_begin(&table, table_data);
/* Error Source Count */
build_append_int_noprefix(table_data, ACPI_GHES_ERROR_SOURCE_COUNT, 4);
- build_ghes_v2(table_data, ACPI_HEST_SRC_ID_SEA, linker);
+ build_ghes_v2(table_data, linker,
+ ACPI_GHES_NOTIFY_SEA, ACPI_HEST_SRC_ID_SEA);
acpi_table_end(linker, &table);
}
@@ -378,70 +354,130 @@ void acpi_ghes_add_fw_cfg(AcpiGhesState *ags, FWCfgState *s,
GArray *hardware_error)
{
/* Create a read-only fw_cfg file for GHES */
- fw_cfg_add_file(s, ACPI_GHES_ERRORS_FW_CFG_FILE, hardware_error->data,
+ fw_cfg_add_file(s, ACPI_HW_ERROR_FW_CFG_FILE, hardware_error->data,
hardware_error->len);
/* Create a read-write fw_cfg file for Address */
- fw_cfg_add_file_callback(s, ACPI_GHES_DATA_ADDR_FW_CFG_FILE, NULL, NULL,
- NULL, &(ags->ghes_addr_le), sizeof(ags->ghes_addr_le), false);
+ fw_cfg_add_file_callback(s, ACPI_HW_ERROR_ADDR_FW_CFG_FILE, NULL, NULL,
+ NULL, &(ags->hw_error_le), sizeof(ags->hw_error_le), false);
ags->present = true;
}
-int acpi_ghes_record_errors(uint8_t source_id, uint64_t physical_address)
+static void get_hw_error_offsets(uint64_t ghes_addr,
+ uint64_t *cper_addr,
+ uint64_t *read_ack_register_addr)
+{
+ if (!ghes_addr) {
+ return;
+ }
+
+ /*
+ * non-HEST version supports only one source, so no need to change
+ * the start offset based on the source ID. Also, we can't validate
+ * the source ID, as it is stored inside the HEST table.
+ */
+
+ cpu_physical_memory_read(ghes_addr, cper_addr,
+ sizeof(*cper_addr));
+
+ *cper_addr = le64_to_cpu(*cper_addr);
+
+ /*
+ * As the current version supports only one source, the ack offset is
+ * just sizeof(uint64_t).
+ */
+ *read_ack_register_addr = ghes_addr + sizeof(uint64_t);
+}
+
+static void ghes_record_cper_errors(const void *cper, size_t len,
+ uint16_t source_id, Error **errp)
{
- uint64_t error_block_addr, read_ack_register_addr, read_ack_register = 0;
- uint64_t start_addr;
- bool ret = -1;
+ uint64_t cper_addr = 0, read_ack_register_addr = 0, read_ack_register;
AcpiGedState *acpi_ged_state;
AcpiGhesState *ags;
- assert(source_id < ACPI_HEST_SRC_ID_RESERVED);
+ if (len > ACPI_GHES_MAX_RAW_DATA_LENGTH) {
+ error_setg(errp, "GHES CPER record is too big: %zd", len);
+ return;
+ }
acpi_ged_state = ACPI_GED(object_resolve_path_type("", TYPE_ACPI_GED,
NULL));
- g_assert(acpi_ged_state);
+ if (!acpi_ged_state) {
+ error_setg(errp, "Can't find ACPI_GED object");
+ return;
+ }
ags = &acpi_ged_state->ghes_state;
- start_addr = le64_to_cpu(ags->ghes_addr_le);
+ assert(ACPI_GHES_ERROR_SOURCE_COUNT == 1);
+ get_hw_error_offsets(le64_to_cpu(ags->hw_error_le),
+ &cper_addr, &read_ack_register_addr);
+
+ if (!cper_addr) {
+ error_setg(errp, "can not find Generic Error Status Block");
+ return;
+ }
+
+ cpu_physical_memory_read(read_ack_register_addr,
+ &read_ack_register, sizeof(read_ack_register));
+
+ /* zero means OSPM does not acknowledge the error */
+ if (!read_ack_register) {
+ error_setg(errp,
+ "OSPM does not acknowledge previous error,"
+ " so can not record CPER for current error anymore");
+ return;
+ }
+
+ read_ack_register = cpu_to_le64(0);
+ /*
+ * Clear the Read Ack Register, OSPM will write 1 to this register when
+ * it acknowledges the error.
+ */
+ cpu_physical_memory_write(read_ack_register_addr,
+ &read_ack_register, sizeof(uint64_t));
+
+ /* Write the generic error data entry into guest memory */
+ cpu_physical_memory_write(cper_addr, cper, len);
+}
- if (physical_address) {
+int acpi_ghes_memory_errors(uint16_t source_id, uint64_t physical_address)
+{
+ /* Memory Error Section Type */
+ const uint8_t guid[] =
+ UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
+ 0xED, 0x7C, 0x83, 0xB1);
+ Error *errp = NULL;
+ int data_length;
+ GArray *block;
- if (source_id < ACPI_HEST_SRC_ID_RESERVED) {
- start_addr += source_id * sizeof(uint64_t);
- }
+ block = g_array_new(false, true /* clear */, 1);
- cpu_physical_memory_read(start_addr, &error_block_addr,
- sizeof(error_block_addr));
+ data_length = ACPI_GHES_DATA_LENGTH + ACPI_GHES_MEM_CPER_LENGTH;
+ /*
+ * It should not run out of the preallocated memory if adding a new generic
+ * error data entry
+ */
+ assert((data_length + ACPI_GHES_GESB_SIZE) <=
+ ACPI_GHES_MAX_RAW_DATA_LENGTH);
- error_block_addr = le64_to_cpu(error_block_addr);
+ ghes_gen_err_data_uncorrectable_recoverable(block, guid, data_length);
- read_ack_register_addr = start_addr +
- ACPI_GHES_ERROR_SOURCE_COUNT * sizeof(uint64_t);
+ /* Build the memory section CPER for above new generic error data entry */
+ acpi_ghes_build_append_mem_cper(block, physical_address);
- cpu_physical_memory_read(read_ack_register_addr,
- &read_ack_register, sizeof(read_ack_register));
+ /* Report the error */
+ ghes_record_cper_errors(block->data, block->len, source_id, &errp);
- /* zero means OSPM does not acknowledge the error */
- if (!read_ack_register) {
- error_report("OSPM does not acknowledge previous error,"
- " so can not record CPER for current error anymore");
- } else if (error_block_addr) {
- read_ack_register = cpu_to_le64(0);
- /*
- * Clear the Read Ack Register, OSPM will write it to 1 when
- * it acknowledges this error.
- */
- cpu_physical_memory_write(read_ack_register_addr,
- &read_ack_register, sizeof(uint64_t));
+ g_array_free(block, true);
- ret = acpi_ghes_record_mem_error(error_block_addr,
- physical_address);
- } else
- error_report("can not find Generic Error Status Block");
+ if (errp) {
+ error_report_err(errp);
+ return -1;
}
- return ret;
+ return 0;
}
bool acpi_ghes_present(void)
diff --git a/hw/acpi/hmat.c b/hw/acpi/hmat.c
index 9b1662b..ca7b183 100644
--- a/hw/acpi/hmat.c
+++ b/hw/acpi/hmat.c
@@ -26,7 +26,7 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
-#include "sysemu/numa.h"
+#include "system/numa.h"
#include "hw/acpi/aml-build.h"
#include "hw/acpi/hmat.h"
diff --git a/hw/acpi/hmat.h b/hw/acpi/hmat.h
index fd989cb..362b05e 100644
--- a/hw/acpi/hmat.h
+++ b/hw/acpi/hmat.h
@@ -28,7 +28,7 @@
#define HMAT_H
#include "hw/acpi/bios-linker-loader.h"
-#include "sysemu/numa.h"
+#include "system/numa.h"
/*
* ACPI 6.3: 5.2.27.3 Memory Proximity Domain Attributes Structure,
diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c
index 02d8546..967b674 100644
--- a/hw/acpi/ich9.c
+++ b/hw/acpi/ich9.c
@@ -31,24 +31,16 @@
#include "migration/vmstate.h"
#include "qemu/timer.h"
#include "hw/core/cpu.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
+#include "system/reset.h"
+#include "system/runstate.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/ich9_tco.h"
+#include "hw/acpi/ich9_timer.h"
#include "hw/southbridge/ich9.h"
#include "hw/mem/pc-dimm.h"
#include "hw/mem/nvdimm.h"
-//#define DEBUG
-
-#ifdef DEBUG
-#define ICH9_DEBUG(fmt, ...) \
-do { printf("%s "fmt, __func__, ## __VA_ARGS__); } while (0)
-#else
-#define ICH9_DEBUG(fmt, ...) do { } while (0)
-#endif
-
static void ich9_pm_update_sci_fn(ACPIREGS *regs)
{
ICH9LPCPMRegs *pm = container_of(regs, ICH9LPCPMRegs, acpi_regs);
@@ -108,6 +100,18 @@ static void ich9_smi_writel(void *opaque, hwaddr addr, uint64_t val,
}
pm->smi_en &= ~pm->smi_en_wmask;
pm->smi_en |= (val & pm->smi_en_wmask);
+ if (pm->swsmi_timer_enabled) {
+ ich9_pm_update_swsmi_timer(pm, pm->smi_en &
+ ICH9_PMIO_SMI_EN_SWSMI_EN);
+ }
+ if (pm->periodic_timer_enabled) {
+ ich9_pm_update_periodic_timer(pm, pm->smi_en &
+ ICH9_PMIO_SMI_EN_PERIODIC_EN);
+ }
+ break;
+ case 4:
+ pm->smi_sts &= ~pm->smi_sts_wmask;
+ pm->smi_sts |= (val & pm->smi_sts_wmask);
break;
}
}
@@ -122,8 +126,6 @@ static const MemoryRegionOps ich9_smi_ops = {
void ich9_pm_iospace_update(ICH9LPCPMRegs *pm, uint32_t pm_io_base)
{
- ICH9_DEBUG("to 0x%x\n", pm_io_base);
-
assert((pm_io_base & ICH9_PMIO_MASK) == 0);
pm->pm_io_base = pm_io_base;
@@ -286,6 +288,8 @@ static void pm_powerdown_req(Notifier *n, void *opaque)
void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, qemu_irq sci_irq)
{
+ pm->smi_sts_wmask = 0;
+
memory_region_init(&pm->io, OBJECT(lpc_pci), "ich9-pm", ICH9_PMIO_SIZE);
memory_region_set_enabled(&pm->io, false);
memory_region_add_subregion(pci_address_space_io(lpc_pci),
@@ -305,6 +309,14 @@ void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, qemu_irq sci_irq)
"acpi-smi", 8);
memory_region_add_subregion(&pm->io, ICH9_PMIO_SMI_EN, &pm->io_smi);
+ if (pm->swsmi_timer_enabled) {
+ ich9_pm_swsmi_timer_init(pm);
+ }
+
+ if (pm->periodic_timer_enabled) {
+ ich9_pm_periodic_timer_init(pm);
+ }
+
if (pm->enable_tco) {
acpi_pm_tco_init(&pm->tco_regs, &pm->io);
}
@@ -547,7 +559,7 @@ void ich9_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
bool ich9_pm_is_hotpluggable_bus(HotplugHandler *hotplug_dev, BusState *bus)
{
ICH9LPCState *lpc = ICH9_LPC_DEVICE(hotplug_dev);
- return acpi_pcihp_is_hotpluggbale_bus(&lpc->pm.acpi_pci_hotplug, bus);
+ return acpi_pcihp_is_hotpluggable_bus(&lpc->pm.acpi_pci_hotplug, bus);
}
void ich9_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list)
diff --git a/hw/acpi/ich9_tco.c b/hw/acpi/ich9_tco.c
index 8160621..6300db6 100644
--- a/hw/acpi/ich9_tco.c
+++ b/hw/acpi/ich9_tco.c
@@ -8,7 +8,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/watchdog.h"
+#include "system/watchdog.h"
#include "hw/southbridge/ich9.h"
#include "migration/vmstate.h"
diff --git a/hw/acpi/ich9_timer.c b/hw/acpi/ich9_timer.c
new file mode 100644
index 0000000..5b1c910
--- /dev/null
+++ b/hw/acpi/ich9_timer.c
@@ -0,0 +1,93 @@
+/*
+ * QEMU ICH9 Timer emulation
+ *
+ * Copyright (c) 2024 Dominic Prinz <git@dprinz.de>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/core/cpu.h"
+#include "hw/pci/pci.h"
+#include "hw/southbridge/ich9.h"
+#include "qemu/timer.h"
+
+#include "hw/acpi/ich9_timer.h"
+
+void ich9_pm_update_swsmi_timer(ICH9LPCPMRegs *pm, bool enable)
+{
+ uint16_t swsmi_rate_sel;
+ int64_t expire_time;
+ ICH9LPCState *lpc;
+
+ if (enable) {
+ lpc = container_of(pm, ICH9LPCState, pm);
+ swsmi_rate_sel =
+ (pci_get_word(lpc->d.config + ICH9_LPC_GEN_PMCON_3) & 0xc0) >> 6;
+
+ if (swsmi_rate_sel == 0) {
+ expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1500000LL;
+ } else {
+ expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ 8 * (1 << swsmi_rate_sel) * 1000000LL;
+ }
+
+ timer_mod(pm->swsmi_timer, expire_time);
+ } else {
+ timer_del(pm->swsmi_timer);
+ }
+}
+
+static void ich9_pm_swsmi_timer_expired(void *opaque)
+{
+ ICH9LPCPMRegs *pm = opaque;
+
+ pm->smi_sts |= ICH9_PMIO_SMI_STS_SWSMI_STS;
+ ich9_generate_smi();
+
+ ich9_pm_update_swsmi_timer(pm, pm->smi_en & ICH9_PMIO_SMI_EN_SWSMI_EN);
+}
+
+void ich9_pm_swsmi_timer_init(ICH9LPCPMRegs *pm)
+{
+ pm->smi_sts_wmask |= ICH9_PMIO_SMI_STS_SWSMI_STS;
+ pm->swsmi_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, ich9_pm_swsmi_timer_expired, pm);
+}
+
+void ich9_pm_update_periodic_timer(ICH9LPCPMRegs *pm, bool enable)
+{
+ uint16_t per_smi_sel;
+ int64_t expire_time;
+ ICH9LPCState *lpc;
+
+ if (enable) {
+ lpc = container_of(pm, ICH9LPCState, pm);
+ per_smi_sel = pci_get_word(lpc->d.config + ICH9_LPC_GEN_PMCON_1) & 3;
+ expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ 8 * (1 << (3 - per_smi_sel)) * NANOSECONDS_PER_SECOND;
+
+ timer_mod(pm->periodic_timer, expire_time);
+ } else {
+ timer_del(pm->periodic_timer);
+ }
+}
+
+static void ich9_pm_periodic_timer_expired(void *opaque)
+{
+ ICH9LPCPMRegs *pm = opaque;
+
+ pm->smi_sts = ICH9_PMIO_SMI_STS_PERIODIC_STS;
+ ich9_generate_smi();
+
+ ich9_pm_update_periodic_timer(pm,
+ pm->smi_en & ICH9_PMIO_SMI_EN_PERIODIC_EN);
+}
+
+void ich9_pm_periodic_timer_init(ICH9LPCPMRegs *pm)
+{
+ pm->smi_sts_wmask |= ICH9_PMIO_SMI_STS_PERIODIC_STS;
+ pm->periodic_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, ich9_pm_periodic_timer_expired, pm);
+}
diff --git a/hw/acpi/ipmi.c b/hw/acpi/ipmi.c
index a20e57d..39f8f2f 100644
--- a/hw/acpi/ipmi.c
+++ b/hw/acpi/ipmi.c
@@ -55,7 +55,8 @@ static Aml *aml_ipmi_crs(IPMIFwInfo *info)
abort();
}
- if (info->interrupt_number) {
+ /* Should PCI interrupts also be appended? */
+ if (info->irq_source == IPMI_ISA_IRQ && info->interrupt_number) {
aml_append(crs, aml_irq_no_flags(info->interrupt_number));
}
diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build
index fa5c07d..73f02b9 100644
--- a/hw/acpi/meson.build
+++ b/hw/acpi/meson.build
@@ -1,6 +1,5 @@
acpi_ss = ss.source_set()
acpi_ss.add(files(
- 'acpi_generic_initiator.c',
'acpi_interface.c',
'aml-build.c',
'bios-linker-loader.c',
@@ -16,6 +15,7 @@ acpi_ss.add(when: 'CONFIG_ACPI_NVDIMM', if_false: files('acpi-nvdimm-stub.c'))
acpi_ss.add(when: 'CONFIG_ACPI_PCI', if_true: files('pci.c'))
acpi_ss.add(when: 'CONFIG_ACPI_CXL', if_true: files('cxl.c'), if_false: files('cxl-stub.c'))
acpi_ss.add(when: 'CONFIG_ACPI_VMGENID', if_true: files('vmgenid.c'))
+acpi_ss.add(when: 'CONFIG_ACPI_VMCLOCK', if_true: files('vmclock.c'))
acpi_ss.add(when: 'CONFIG_ACPI_HW_REDUCED', if_true: files('generic_event_device.c'))
acpi_ss.add(when: 'CONFIG_ACPI_HMAT', if_true: files('hmat.c'))
acpi_ss.add(when: 'CONFIG_ACPI_APEI', if_true: files('ghes.c'), if_false: files('ghes-stub.c'))
@@ -24,7 +24,7 @@ acpi_ss.add(when: 'CONFIG_ACPI_PCI_BRIDGE', if_true: files('pci-bridge.c'))
acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_true: files('pcihp.c'))
acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_false: files('acpi-pci-hotplug-stub.c'))
acpi_ss.add(when: 'CONFIG_ACPI_VIOT', if_true: files('viot.c'))
-acpi_ss.add(when: 'CONFIG_ACPI_ICH9', if_true: files('ich9.c', 'ich9_tco.c'))
+acpi_ss.add(when: 'CONFIG_ACPI_ICH9', if_true: files('ich9.c', 'ich9_tco.c', 'ich9_timer.c'))
acpi_ss.add(when: 'CONFIG_ACPI_ERST', if_true: files('erst.c'))
acpi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c'), if_false: files('ipmi-stub.c'))
acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c'))
diff --git a/hw/acpi/pci.c b/hw/acpi/pci.c
index 20b70dc..d511a85 100644
--- a/hw/acpi/pci.c
+++ b/hw/acpi/pci.c
@@ -24,8 +24,14 @@
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qom/object_interfaces.h"
+#include "qapi/error.h"
+#include "hw/boards.h"
#include "hw/acpi/aml-build.h"
#include "hw/acpi/pci.h"
+#include "hw/pci/pci_bridge.h"
+#include "hw/pci/pci_device.h"
#include "hw/pci/pcie_host.h"
/*
@@ -59,3 +65,239 @@ void build_mcfg(GArray *table_data, BIOSLinker *linker, AcpiMcfgInfo *info,
acpi_table_end(linker, &table);
}
+
+typedef struct AcpiGenericInitiator {
+ /* private */
+ Object parent;
+
+ /* public */
+ char *pci_dev;
+ uint32_t node;
+} AcpiGenericInitiator;
+
+typedef struct AcpiGenericInitiatorClass {
+ ObjectClass parent_class;
+} AcpiGenericInitiatorClass;
+
+#define TYPE_ACPI_GENERIC_INITIATOR "acpi-generic-initiator"
+
+OBJECT_DEFINE_TYPE_WITH_INTERFACES(AcpiGenericInitiator, acpi_generic_initiator,
+ ACPI_GENERIC_INITIATOR, OBJECT,
+ { TYPE_USER_CREATABLE },
+ { NULL })
+
+OBJECT_DECLARE_SIMPLE_TYPE(AcpiGenericInitiator, ACPI_GENERIC_INITIATOR)
+
+static void acpi_generic_initiator_init(Object *obj)
+{
+ AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj);
+
+ gi->node = MAX_NODES;
+ gi->pci_dev = NULL;
+}
+
+static void acpi_generic_initiator_finalize(Object *obj)
+{
+ AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj);
+
+ g_free(gi->pci_dev);
+}
+
+static void acpi_generic_initiator_set_pci_device(Object *obj, const char *val,
+ Error **errp)
+{
+ AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj);
+
+ gi->pci_dev = g_strdup(val);
+}
+
+static void acpi_generic_initiator_set_node(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj);
+ MachineState *ms = MACHINE(qdev_get_machine());
+ uint32_t value;
+
+ if (!visit_type_uint32(v, name, &value, errp)) {
+ return;
+ }
+
+ if (value >= MAX_NODES) {
+ error_printf("%s: Invalid NUMA node specified\n",
+ TYPE_ACPI_GENERIC_INITIATOR);
+ exit(1);
+ }
+
+ gi->node = value;
+ ms->numa_state->nodes[gi->node].has_gi = true;
+}
+
+static void acpi_generic_initiator_class_init(ObjectClass *oc, const void *data)
+{
+ object_class_property_add_str(oc, "pci-dev", NULL,
+ acpi_generic_initiator_set_pci_device);
+ object_class_property_set_description(oc, "pci-dev",
+ "PCI device to associate with the node");
+ object_class_property_add(oc, "node", "int", NULL,
+ acpi_generic_initiator_set_node, NULL, NULL);
+ object_class_property_set_description(oc, "node",
+ "NUMA node associated with the PCI device");
+}
+
+static int build_acpi_generic_initiator(Object *obj, void *opaque)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ AcpiGenericInitiator *gi;
+ GArray *table_data = opaque;
+ int32_t devfn;
+ uint8_t bus;
+ Object *o;
+
+ if (!object_dynamic_cast(obj, TYPE_ACPI_GENERIC_INITIATOR)) {
+ return 0;
+ }
+
+ gi = ACPI_GENERIC_INITIATOR(obj);
+ if (gi->node >= ms->numa_state->num_nodes) {
+ error_printf("%s: Specified node %d is invalid.\n",
+ TYPE_ACPI_GENERIC_INITIATOR, gi->node);
+ exit(1);
+ }
+
+ o = object_resolve_path_type(gi->pci_dev, TYPE_PCI_DEVICE, NULL);
+ if (!o) {
+ error_printf("%s: Specified device must be a PCI device.\n",
+ TYPE_ACPI_GENERIC_INITIATOR);
+ exit(1);
+ }
+
+ bus = object_property_get_uint(o, "busnr", &error_fatal);
+ devfn = object_property_get_uint(o, "addr", &error_fatal);
+ /* devfn is constrained in PCI to be 8 bit but storage is an int32_t */
+ assert(devfn >= 0 && devfn < PCI_DEVFN_MAX);
+
+ build_srat_pci_generic_initiator(table_data, gi->node, 0, bus, devfn);
+
+ return 0;
+}
+
+typedef struct AcpiGenericPort {
+ /* private */
+ Object parent;
+
+ /* public */
+ char *pci_bus;
+ uint32_t node;
+} AcpiGenericPort;
+
+typedef struct AcpiGenericPortClass {
+ ObjectClass parent_class;
+} AcpiGenericPortClass;
+
+#define TYPE_ACPI_GENERIC_PORT "acpi-generic-port"
+
+OBJECT_DEFINE_TYPE_WITH_INTERFACES(AcpiGenericPort, acpi_generic_port,
+ ACPI_GENERIC_PORT, OBJECT,
+ { TYPE_USER_CREATABLE },
+ { NULL })
+
+OBJECT_DECLARE_SIMPLE_TYPE(AcpiGenericPort, ACPI_GENERIC_PORT)
+
+static void acpi_generic_port_init(Object *obj)
+{
+ AcpiGenericPort *gp = ACPI_GENERIC_PORT(obj);
+
+ gp->node = MAX_NODES;
+ gp->pci_bus = NULL;
+}
+
+static void acpi_generic_port_finalize(Object *obj)
+{
+ AcpiGenericPort *gp = ACPI_GENERIC_PORT(obj);
+
+ g_free(gp->pci_bus);
+}
+
+static void acpi_generic_port_set_pci_bus(Object *obj, const char *val,
+ Error **errp)
+{
+ AcpiGenericPort *gp = ACPI_GENERIC_PORT(obj);
+
+ gp->pci_bus = g_strdup(val);
+}
+
+static void acpi_generic_port_set_node(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ AcpiGenericPort *gp = ACPI_GENERIC_PORT(obj);
+ uint32_t value;
+
+ if (!visit_type_uint32(v, name, &value, errp)) {
+ return;
+ }
+
+ if (value >= MAX_NODES) {
+ error_printf("%s: Invalid NUMA node specified\n",
+ TYPE_ACPI_GENERIC_INITIATOR);
+ exit(1);
+ }
+
+ gp->node = value;
+}
+
+static void acpi_generic_port_class_init(ObjectClass *oc, const void *data)
+{
+ object_class_property_add_str(oc, "pci-bus", NULL,
+ acpi_generic_port_set_pci_bus);
+ object_class_property_set_description(oc, "pci-bus",
+ "PCI Bus of the host bridge associated with this GP affinity structure");
+ object_class_property_add(oc, "node", "int", NULL,
+ acpi_generic_port_set_node, NULL, NULL);
+ object_class_property_set_description(oc, "node",
+ "The NUMA node like ID to index HMAT/SLIT NUMA properties involving GP");
+}
+
+static int build_acpi_generic_port(Object *obj, void *opaque)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ const char *hid = "ACPI0016";
+ GArray *table_data = opaque;
+ AcpiGenericPort *gp;
+ uint32_t uid;
+ Object *o;
+
+ if (!object_dynamic_cast(obj, TYPE_ACPI_GENERIC_PORT)) {
+ return 0;
+ }
+
+ gp = ACPI_GENERIC_PORT(obj);
+
+ if (gp->node >= ms->numa_state->num_nodes) {
+ error_printf("%s: node %d is invalid.\n",
+ TYPE_ACPI_GENERIC_PORT, gp->node);
+ exit(1);
+ }
+
+ o = object_resolve_path_type(gp->pci_bus, TYPE_PXB_CXL_BUS, NULL);
+ if (!o) {
+ error_printf("%s: device must be a CXL host bridge.\n",
+ TYPE_ACPI_GENERIC_PORT);
+ exit(1);
+ }
+
+ uid = object_property_get_uint(o, "acpi_uid", &error_fatal);
+ build_srat_acpi_generic_port(table_data, gp->node, hid, uid);
+
+ return 0;
+}
+
+void build_srat_generic_affinity_structures(GArray *table_data)
+{
+ object_child_foreach_recursive(object_get_root(),
+ build_acpi_generic_initiator,
+ table_data);
+ object_child_foreach_recursive(object_get_root(), build_acpi_generic_port,
+ table_data);
+}
diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c
index 5f79c90..aac9001 100644
--- a/hw/acpi/pcihp.c
+++ b/hw/acpi/pcihp.c
@@ -371,7 +371,7 @@ void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev,
acpi_send_event(DEVICE(hotplug_dev), ACPI_PCI_HOTPLUG_STATUS);
}
-bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus)
+bool acpi_pcihp_is_hotpluggable_bus(AcpiPciHpState *s, BusState *bus)
{
Object *o = OBJECT(bus->parent);
diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
index debe1ad..d98b80d 100644
--- a/hw/acpi/piix4.c
+++ b/hw/acpi/piix4.c
@@ -28,9 +28,9 @@
#include "hw/acpi/acpi.h"
#include "hw/acpi/pcihp.h"
#include "hw/acpi/piix4.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/xen.h"
+#include "system/runstate.h"
+#include "system/system.h"
+#include "system/xen.h"
#include "qapi/error.h"
#include "qemu/range.h"
#include "hw/acpi/cpu_hotplug.h"
@@ -406,7 +406,7 @@ static bool piix4_is_hotpluggable_bus(HotplugHandler *hotplug_dev,
BusState *bus)
{
PIIX4PMState *s = PIIX4_PM(hotplug_dev);
- return acpi_pcihp_is_hotpluggbale_bus(&s->acpi_pci_hotplug, bus);
+ return acpi_pcihp_is_hotpluggable_bus(&s->acpi_pci_hotplug, bus);
}
static void piix4_pm_machine_ready(Notifier *n, void *opaque)
@@ -602,7 +602,7 @@ static void piix4_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
acpi_send_gpe_event(&s->ar, s->irq, ev);
}
-static Property piix4_pm_properties[] = {
+static const Property piix4_pm_properties[] = {
DEFINE_PROP_UINT32("smb_io_base", PIIX4PMState, smb_io_base, 0),
DEFINE_PROP_UINT8(ACPI_PM_PROP_S3_DISABLED, PIIX4PMState, disable_s3, 0),
DEFINE_PROP_UINT8(ACPI_PM_PROP_S4_DISABLED, PIIX4PMState, disable_s4, 0),
@@ -617,10 +617,9 @@ static Property piix4_pm_properties[] = {
DEFINE_PROP_BOOL("smm-enabled", PIIX4PMState, smm_enabled, false),
DEFINE_PROP_BOOL("x-not-migrate-acpi-index", PIIX4PMState,
not_migrate_acpi_index, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void piix4_pm_class_init(ObjectClass *klass, void *data)
+static void piix4_pm_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -633,7 +632,7 @@ static void piix4_pm_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_INTEL_82371AB_3;
k->revision = 0x03;
k->class_id = PCI_CLASS_BRIDGE_OTHER;
- dc->reset = piix4_pm_reset;
+ device_class_set_legacy_reset(dc, piix4_pm_reset);
dc->desc = "PM";
dc->vmsd = &vmstate_acpi;
device_class_set_props(dc, piix4_pm_properties);
@@ -658,7 +657,7 @@ static const TypeInfo piix4_pm_info = {
.instance_init = piix4_pm_init,
.instance_size = sizeof(PIIX4PMState),
.class_init = piix4_pm_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ TYPE_ACPI_DEVICE_IF },
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
diff --git a/hw/acpi/vmclock.c b/hw/acpi/vmclock.c
new file mode 100644
index 0000000..c582c0c
--- /dev/null
+++ b/hw/acpi/vmclock.c
@@ -0,0 +1,179 @@
+/*
+ * Virtual Machine Clock Device
+ *
+ * Copyright Ā© 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "hw/i386/e820_memory_layout.h"
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/aml-build.h"
+#include "hw/acpi/vmclock.h"
+#include "hw/nvram/fw_cfg.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
+#include "migration/vmstate.h"
+#include "system/reset.h"
+
+#include "standard-headers/linux/vmclock-abi.h"
+
+void vmclock_build_acpi(VmclockState *vms, GArray *table_data,
+ BIOSLinker *linker, const char *oem_id)
+{
+ Aml *ssdt, *dev, *scope, *crs;
+ AcpiTable table = { .sig = "SSDT", .rev = 1,
+ .oem_id = oem_id, .oem_table_id = "VMCLOCK" };
+
+ /* Put VMCLOCK into a separate SSDT table */
+ acpi_table_begin(&table, table_data);
+ ssdt = init_aml_allocator();
+
+ scope = aml_scope("\\_SB");
+ dev = aml_device("VCLK");
+ aml_append(dev, aml_name_decl("_HID", aml_string("AMZNC10C")));
+ aml_append(dev, aml_name_decl("_CID", aml_string("VMCLOCK")));
+ aml_append(dev, aml_name_decl("_DDN", aml_string("VMCLOCK")));
+
+ /* Simple status method */
+ aml_append(dev, aml_name_decl("_STA", aml_int(0xf)));
+
+ crs = aml_resource_template();
+ aml_append(crs, aml_qword_memory(AML_POS_DECODE,
+ AML_MIN_FIXED, AML_MAX_FIXED,
+ AML_CACHEABLE, AML_READ_ONLY,
+ 0xffffffffffffffffULL,
+ vms->physaddr,
+ vms->physaddr + VMCLOCK_SIZE - 1,
+ 0, VMCLOCK_SIZE));
+ aml_append(dev, aml_name_decl("_CRS", crs));
+ aml_append(scope, dev);
+ aml_append(ssdt, scope);
+
+ g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len);
+ acpi_table_end(linker, &table);
+ free_aml_allocator();
+}
+
+static void vmclock_update_guest(VmclockState *vms)
+{
+ uint64_t disruption_marker;
+ uint32_t seq_count;
+
+ if (!vms->clk) {
+ return;
+ }
+
+ seq_count = le32_to_cpu(vms->clk->seq_count) | 1;
+ vms->clk->seq_count = cpu_to_le32(seq_count);
+ /* These barriers pair with read barriers in the guest */
+ smp_wmb();
+
+ disruption_marker = le64_to_cpu(vms->clk->disruption_marker);
+ disruption_marker++;
+ vms->clk->disruption_marker = cpu_to_le64(disruption_marker);
+
+ /* These barriers pair with read barriers in the guest */
+ smp_wmb();
+ vms->clk->seq_count = cpu_to_le32(seq_count + 1);
+}
+
+/*
+ * After restoring an image, we need to update the guest memory to notify
+ * it of clock disruption.
+ */
+static int vmclock_post_load(void *opaque, int version_id)
+{
+ VmclockState *vms = opaque;
+
+ vmclock_update_guest(vms);
+ return 0;
+}
+
+static const VMStateDescription vmstate_vmclock = {
+ .name = "vmclock",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .post_load = vmclock_post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT64(physaddr, VmclockState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void vmclock_handle_reset(void *opaque)
+{
+ VmclockState *vms = VMCLOCK(opaque);
+
+ if (!memory_region_is_mapped(&vms->clk_page)) {
+ memory_region_add_subregion_overlap(get_system_memory(),
+ vms->physaddr,
+ &vms->clk_page, 0);
+ }
+}
+
+static void vmclock_realize(DeviceState *dev, Error **errp)
+{
+ VmclockState *vms = VMCLOCK(dev);
+
+ /*
+ * Given that this function is executing, there is at least one VMCLOCK
+ * device. Check if there are several.
+ */
+ if (!find_vmclock_dev()) {
+ error_setg(errp, "at most one %s device is permitted", TYPE_VMCLOCK);
+ return;
+ }
+
+ vms->physaddr = VMCLOCK_ADDR;
+
+ e820_add_entry(vms->physaddr, VMCLOCK_SIZE, E820_RESERVED);
+
+ memory_region_init_ram(&vms->clk_page, OBJECT(dev), "vmclock_page",
+ VMCLOCK_SIZE, &error_abort);
+ memory_region_set_enabled(&vms->clk_page, true);
+ vms->clk = memory_region_get_ram_ptr(&vms->clk_page);
+ memset(vms->clk, 0, VMCLOCK_SIZE);
+
+ vms->clk->magic = cpu_to_le32(VMCLOCK_MAGIC);
+ vms->clk->size = cpu_to_le16(VMCLOCK_SIZE);
+ vms->clk->version = cpu_to_le16(1);
+
+ /* These are all zero and thus default, but be explicit */
+ vms->clk->clock_status = VMCLOCK_STATUS_UNKNOWN;
+ vms->clk->counter_id = VMCLOCK_COUNTER_INVALID;
+
+ qemu_register_reset(vmclock_handle_reset, vms);
+
+ vmclock_update_guest(vms);
+}
+
+static void vmclock_device_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->vmsd = &vmstate_vmclock;
+ dc->realize = vmclock_realize;
+ dc->hotpluggable = false;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static const TypeInfo vmclock_device_info = {
+ .name = TYPE_VMCLOCK,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(VmclockState),
+ .class_init = vmclock_device_class_init,
+};
+
+static void vmclock_register_types(void)
+{
+ type_register_static(&vmclock_device_info);
+}
+
+type_init(vmclock_register_types)
diff --git a/hw/acpi/vmgenid.c b/hw/acpi/vmgenid.c
index e63c8af..fac3d6d 100644
--- a/hw/acpi/vmgenid.c
+++ b/hw/acpi/vmgenid.c
@@ -20,7 +20,7 @@
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
#include "migration/vmstate.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
void vmgenid_build_acpi(VmGenIdState *vms, GArray *table_data, GArray *guid,
BIOSLinker *linker, const char *oem_id)
@@ -214,12 +214,11 @@ static void vmgenid_realize(DeviceState *dev, Error **errp)
vmgenid_update_guest(vms);
}
-static Property vmgenid_device_properties[] = {
+static const Property vmgenid_device_properties[] = {
DEFINE_PROP_UUID(VMGENID_GUID, VmGenIdState, guid),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vmgenid_device_class_init(ObjectClass *klass, void *data)
+static void vmgenid_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/adc/Kconfig b/hw/adc/Kconfig
index a825bd3..25d2229 100644
--- a/hw/adc/Kconfig
+++ b/hw/adc/Kconfig
@@ -1,5 +1,2 @@
config STM32F2XX_ADC
bool
-
-config MAX111X
- bool
diff --git a/hw/adc/aspeed_adc.c b/hw/adc/aspeed_adc.c
index 68bdbc7..3e820ca 100644
--- a/hw/adc/aspeed_adc.c
+++ b/hw/adc/aspeed_adc.c
@@ -286,18 +286,17 @@ static const VMStateDescription vmstate_aspeed_adc_engine = {
}
};
-static Property aspeed_adc_engine_properties[] = {
+static const Property aspeed_adc_engine_properties[] = {
DEFINE_PROP_UINT32("engine-id", AspeedADCEngineState, engine_id, 0),
DEFINE_PROP_UINT32("nr-channels", AspeedADCEngineState, nr_channels, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void aspeed_adc_engine_class_init(ObjectClass *klass, void *data)
+static void aspeed_adc_engine_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aspeed_adc_engine_realize;
- dc->reset = aspeed_adc_engine_reset;
+ device_class_set_legacy_reset(dc, aspeed_adc_engine_reset);
device_class_set_props(dc, aspeed_adc_engine_properties);
dc->desc = "Aspeed Analog-to-Digital Engine";
dc->vmsd = &vmstate_aspeed_adc_engine;
@@ -370,7 +369,7 @@ static void aspeed_adc_realize(DeviceState *dev, Error **errp)
}
}
-static void aspeed_adc_class_init(ObjectClass *klass, void *data)
+static void aspeed_adc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedADCClass *aac = ASPEED_ADC_CLASS(klass);
@@ -380,7 +379,7 @@ static void aspeed_adc_class_init(ObjectClass *klass, void *data)
aac->nr_engines = 1;
}
-static void aspeed_2600_adc_class_init(ObjectClass *klass, void *data)
+static void aspeed_2600_adc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedADCClass *aac = ASPEED_ADC_CLASS(klass);
@@ -389,7 +388,7 @@ static void aspeed_2600_adc_class_init(ObjectClass *klass, void *data)
aac->nr_engines = 2;
}
-static void aspeed_1030_adc_class_init(ObjectClass *klass, void *data)
+static void aspeed_1030_adc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedADCClass *aac = ASPEED_ADC_CLASS(klass);
@@ -398,6 +397,15 @@ static void aspeed_1030_adc_class_init(ObjectClass *klass, void *data)
aac->nr_engines = 2;
}
+static void aspeed_2700_adc_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedADCClass *aac = ASPEED_ADC_CLASS(klass);
+
+ dc->desc = "ASPEED 2700 ADC Controller";
+ aac->nr_engines = 2;
+}
+
static const TypeInfo aspeed_adc_info = {
.name = TYPE_ASPEED_ADC,
.parent = TYPE_SYS_BUS_DEVICE,
@@ -430,6 +438,12 @@ static const TypeInfo aspeed_1030_adc_info = {
.class_init = aspeed_1030_adc_class_init, /* No change since AST2600 */
};
+static const TypeInfo aspeed_2700_adc_info = {
+ .name = TYPE_ASPEED_2700_ADC,
+ .parent = TYPE_ASPEED_ADC,
+ .class_init = aspeed_2700_adc_class_init,
+};
+
static void aspeed_adc_register_types(void)
{
type_register_static(&aspeed_adc_engine_info);
@@ -438,6 +452,7 @@ static void aspeed_adc_register_types(void)
type_register_static(&aspeed_2500_adc_info);
type_register_static(&aspeed_2600_adc_info);
type_register_static(&aspeed_1030_adc_info);
+ type_register_static(&aspeed_2700_adc_info);
}
type_init(aspeed_adc_register_types);
diff --git a/hw/adc/max111x.c b/hw/adc/max111x.c
deleted file mode 100644
index 957d177..0000000
--- a/hw/adc/max111x.c
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Maxim MAX1110/1111 ADC chip emulation.
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GNU GPLv2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu/osdep.h"
-#include "hw/adc/max111x.h"
-#include "hw/irq.h"
-#include "migration/vmstate.h"
-#include "qemu/module.h"
-#include "hw/qdev-properties.h"
-
-/* Control-byte bitfields */
-#define CB_PD0 (1 << 0)
-#define CB_PD1 (1 << 1)
-#define CB_SGL (1 << 2)
-#define CB_UNI (1 << 3)
-#define CB_SEL0 (1 << 4)
-#define CB_SEL1 (1 << 5)
-#define CB_SEL2 (1 << 6)
-#define CB_START (1 << 7)
-
-#define CHANNEL_NUM(v, b0, b1, b2) \
- ((((v) >> (2 + (b0))) & 4) | \
- (((v) >> (3 + (b1))) & 2) | \
- (((v) >> (4 + (b2))) & 1))
-
-static uint32_t max111x_read(MAX111xState *s)
-{
- if (!s->tb1)
- return 0;
-
- switch (s->cycle ++) {
- case 1:
- return s->rb2;
- case 2:
- return s->rb3;
- }
-
- return 0;
-}
-
-/* Interpret a control-byte */
-static void max111x_write(MAX111xState *s, uint32_t value)
-{
- int measure, chan;
-
- /* Ignore the value if START bit is zero */
- if (!(value & CB_START))
- return;
-
- s->cycle = 0;
-
- if (!(value & CB_PD1)) {
- s->tb1 = 0;
- return;
- }
-
- s->tb1 = value;
-
- if (s->inputs == 8)
- chan = CHANNEL_NUM(value, 1, 0, 2);
- else
- chan = CHANNEL_NUM(value & ~CB_SEL0, 0, 1, 2);
-
- if (value & CB_SGL)
- measure = s->input[chan] - s->com;
- else
- measure = s->input[chan] - s->input[chan ^ 1];
-
- if (!(value & CB_UNI))
- measure ^= 0x80;
-
- s->rb2 = (measure >> 2) & 0x3f;
- s->rb3 = (measure << 6) & 0xc0;
-
- /* FIXME: When should the IRQ be lowered? */
- qemu_irq_raise(s->interrupt);
-}
-
-static uint32_t max111x_transfer(SSIPeripheral *dev, uint32_t value)
-{
- MAX111xState *s = MAX_111X(dev);
- max111x_write(s, value);
- return max111x_read(s);
-}
-
-static const VMStateDescription vmstate_max111x = {
- .name = "max111x",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_SSI_PERIPHERAL(parent_obj, MAX111xState),
- VMSTATE_UINT8(tb1, MAX111xState),
- VMSTATE_UINT8(rb2, MAX111xState),
- VMSTATE_UINT8(rb3, MAX111xState),
- VMSTATE_INT32_EQUAL(inputs, MAX111xState, NULL),
- VMSTATE_INT32(com, MAX111xState),
- VMSTATE_ARRAY_INT32_UNSAFE(input, MAX111xState, inputs,
- vmstate_info_uint8, uint8_t),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static void max111x_input_set(void *opaque, int line, int value)
-{
- MAX111xState *s = MAX_111X(opaque);
-
- assert(line >= 0 && line < s->inputs);
- s->input[line] = value;
-}
-
-static int max111x_init(SSIPeripheral *d, int inputs)
-{
- DeviceState *dev = DEVICE(d);
- MAX111xState *s = MAX_111X(dev);
-
- qdev_init_gpio_out(dev, &s->interrupt, 1);
- qdev_init_gpio_in(dev, max111x_input_set, inputs);
-
- s->inputs = inputs;
-
- return 0;
-}
-
-static void max1110_realize(SSIPeripheral *dev, Error **errp)
-{
- max111x_init(dev, 8);
-}
-
-static void max1111_realize(SSIPeripheral *dev, Error **errp)
-{
- max111x_init(dev, 4);
-}
-
-static void max111x_reset(DeviceState *dev)
-{
- MAX111xState *s = MAX_111X(dev);
- int i;
-
- for (i = 0; i < s->inputs; i++) {
- s->input[i] = s->reset_input[i];
- }
- s->com = 0;
- s->tb1 = 0;
- s->rb2 = 0;
- s->rb3 = 0;
- s->cycle = 0;
-}
-
-static Property max1110_properties[] = {
- /* Reset values for ADC inputs */
- DEFINE_PROP_UINT8("input0", MAX111xState, reset_input[0], 0xf0),
- DEFINE_PROP_UINT8("input1", MAX111xState, reset_input[1], 0xe0),
- DEFINE_PROP_UINT8("input2", MAX111xState, reset_input[2], 0xd0),
- DEFINE_PROP_UINT8("input3", MAX111xState, reset_input[3], 0xc0),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static Property max1111_properties[] = {
- /* Reset values for ADC inputs */
- DEFINE_PROP_UINT8("input0", MAX111xState, reset_input[0], 0xf0),
- DEFINE_PROP_UINT8("input1", MAX111xState, reset_input[1], 0xe0),
- DEFINE_PROP_UINT8("input2", MAX111xState, reset_input[2], 0xd0),
- DEFINE_PROP_UINT8("input3", MAX111xState, reset_input[3], 0xc0),
- DEFINE_PROP_UINT8("input4", MAX111xState, reset_input[4], 0xb0),
- DEFINE_PROP_UINT8("input5", MAX111xState, reset_input[5], 0xa0),
- DEFINE_PROP_UINT8("input6", MAX111xState, reset_input[6], 0x90),
- DEFINE_PROP_UINT8("input7", MAX111xState, reset_input[7], 0x80),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void max111x_class_init(ObjectClass *klass, void *data)
-{
- SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass);
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- k->transfer = max111x_transfer;
- dc->reset = max111x_reset;
- dc->vmsd = &vmstate_max111x;
- set_bit(DEVICE_CATEGORY_MISC, dc->categories);
-}
-
-static const TypeInfo max111x_info = {
- .name = TYPE_MAX_111X,
- .parent = TYPE_SSI_PERIPHERAL,
- .instance_size = sizeof(MAX111xState),
- .class_init = max111x_class_init,
- .abstract = true,
-};
-
-static void max1110_class_init(ObjectClass *klass, void *data)
-{
- SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass);
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- k->realize = max1110_realize;
- device_class_set_props(dc, max1110_properties);
-}
-
-static const TypeInfo max1110_info = {
- .name = TYPE_MAX_1110,
- .parent = TYPE_MAX_111X,
- .class_init = max1110_class_init,
-};
-
-static void max1111_class_init(ObjectClass *klass, void *data)
-{
- SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass);
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- k->realize = max1111_realize;
- device_class_set_props(dc, max1111_properties);
-}
-
-static const TypeInfo max1111_info = {
- .name = TYPE_MAX_1111,
- .parent = TYPE_MAX_111X,
- .class_init = max1111_class_init,
-};
-
-static void max111x_register_types(void)
-{
- type_register_static(&max111x_info);
- type_register_static(&max1110_info);
- type_register_static(&max1111_info);
-}
-
-type_init(max111x_register_types)
diff --git a/hw/adc/meson.build b/hw/adc/meson.build
index a4f85b7..7f7acc1 100644
--- a/hw/adc/meson.build
+++ b/hw/adc/meson.build
@@ -2,4 +2,3 @@ system_ss.add(when: 'CONFIG_STM32F2XX_ADC', if_true: files('stm32f2xx_adc.c'))
system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_adc.c'))
system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_adc.c'))
system_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq-xadc.c'))
-system_ss.add(when: 'CONFIG_MAX111X', if_true: files('max111x.c'))
diff --git a/hw/adc/npcm7xx_adc.c b/hw/adc/npcm7xx_adc.c
index de8469d..ddb219d 100644
--- a/hw/adc/npcm7xx_adc.c
+++ b/hw/adc/npcm7xx_adc.c
@@ -267,12 +267,11 @@ static const VMStateDescription vmstate_npcm7xx_adc = {
},
};
-static Property npcm7xx_timer_properties[] = {
+static const Property npcm7xx_timer_properties[] = {
DEFINE_PROP_UINT32("iref", NPCM7xxADCState, iref, NPCM7XX_ADC_DEFAULT_IREF),
- DEFINE_PROP_END_OF_LIST(),
};
-static void npcm7xx_adc_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_adc_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/adc/stm32f2xx_adc.c b/hw/adc/stm32f2xx_adc.c
index e9df6ea..a490ae6 100644
--- a/hw/adc/stm32f2xx_adc.c
+++ b/hw/adc/stm32f2xx_adc.c
@@ -284,11 +284,11 @@ static void stm32f2xx_adc_init(Object *obj)
sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
}
-static void stm32f2xx_adc_class_init(ObjectClass *klass, void *data)
+static void stm32f2xx_adc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = stm32f2xx_adc_reset;
+ device_class_set_legacy_reset(dc, stm32f2xx_adc_reset);
dc->vmsd = &vmstate_stm32f2xx_adc;
}
diff --git a/hw/adc/zynq-xadc.c b/hw/adc/zynq-xadc.c
index 3426831..748a51b 100644
--- a/hw/adc/zynq-xadc.c
+++ b/hw/adc/zynq-xadc.c
@@ -281,12 +281,12 @@ static const VMStateDescription vmstate_zynq_xadc = {
}
};
-static void zynq_xadc_class_init(ObjectClass *klass, void *data)
+static void zynq_xadc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_zynq_xadc;
- dc->reset = zynq_xadc_reset;
+ device_class_set_legacy_reset(dc, zynq_xadc_reset);
}
static const TypeInfo zynq_xadc_info = {
diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c
index 52a1fa3..19562b5 100644
--- a/hw/alpha/dp264.c
+++ b/hw/alpha/dp264.c
@@ -8,6 +8,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
+#include "exec/target_page.h"
#include "elf.h"
#include "hw/loader.h"
#include "alpha_sys.h"
@@ -144,7 +145,7 @@ static void clipper_init(MachineState *machine)
}
size = load_elf(palcode_filename, NULL, cpu_alpha_superpage_to_phys,
NULL, &palcode_entry, NULL, NULL, NULL,
- 0, EM_ALPHA, 0, 0);
+ ELFDATA2LSB, EM_ALPHA, 0, 0);
if (size < 0) {
error_report("could not load palcode '%s'", palcode_filename);
exit(1);
@@ -163,7 +164,7 @@ static void clipper_init(MachineState *machine)
size = load_elf(kernel_filename, NULL, cpu_alpha_superpage_to_phys,
NULL, &kernel_entry, &kernel_low, NULL, NULL,
- 0, EM_ALPHA, 0, 0);
+ ELFDATA2LSB, EM_ALPHA, 0, 0);
if (size < 0) {
error_report("could not load kernel '%s'", kernel_filename);
exit(1);
diff --git a/hw/alpha/typhoon.c b/hw/alpha/typhoon.c
index e8711ae..4c56f98 100644
--- a/hw/alpha/typhoon.c
+++ b/hw/alpha/typhoon.c
@@ -9,6 +9,7 @@
#include "qemu/osdep.h"
#include "qemu/module.h"
#include "qemu/units.h"
+#include "exec/cpu-interrupt.h"
#include "qapi/error.h"
#include "hw/pci/pci_host.h"
#include "cpu.h"
@@ -934,7 +935,7 @@ static const TypeInfo typhoon_pcihost_info = {
};
static void typhoon_iommu_memory_region_class_init(ObjectClass *klass,
- void *data)
+ const void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig
index 1ad60da..f543d94 100644
--- a/hw/arm/Kconfig
+++ b/hw/arm/Kconfig
@@ -2,6 +2,7 @@ config ARM_VIRT
bool
default y
depends on ARM
+ depends on TCG || KVM || HVF
imply PCI_DEVICES
imply TEST_DEVICES
imply VFIO_AMD_XGBE
@@ -37,13 +38,6 @@ config ARM_VIRT
select ACPI_CXL
select ACPI_HMAT
-config CHEETAH
- bool
- default y
- depends on TCG && ARM
- select OMAP
- select TSC210X
-
config CUBIEBOARD
bool
default y
@@ -77,7 +71,7 @@ config HIGHBANK
depends on TCG && ARM
select A9MPCORE
select A15MPCORE
- select AHCI
+ select AHCI_SYSBUS
select ARM_TIMER # sp804
select ARM_V7M
select PL011 # UART
@@ -101,14 +95,6 @@ config INTEGRATOR
select PL181 # display
select SMC91C111
-config MAINSTONE
- bool
- default y
- depends on TCG && ARM
- select PXA2XX
- select PFLASH_CFI01
- select SMC91C111
-
config MPS3R
bool
default y
@@ -119,7 +105,7 @@ config MUSCA
default y
depends on TCG && ARM
select ARMSSE
- select PL011
+ select PL011 # UART
select PL031
select SPLIT_IRQ
select UNIMP
@@ -136,7 +122,7 @@ config MUSICPAL
select MARVELL_88W8618
select PTIMER
select PFLASH_CFI02
- select SERIAL
+ select SERIAL_MM
select WM8750
config NETDUINO2
@@ -157,79 +143,13 @@ config OLIMEX_STM32_H405
depends on TCG && ARM
select STM32F405_SOC
-config NSERIES
- bool
- default y
- depends on TCG && ARM
- select OMAP
- select TMP105 # temperature sensor
- select BLIZZARD # LCD/TV controller
- select ONENAND
- select TSC210X # touchscreen/sensors/audio
- select TSC2005 # touchscreen/sensors/keypad
- select LM832X # GPIO keyboard chip
- select TWL92230 # energy-management
- select TUSB6010
-
config OMAP
bool
select FRAMEBUFFER
select I2C
- select ECC
- select NAND
select PFLASH_CFI01
select SD
- select SERIAL
-
-config PXA2XX
- bool
- select FRAMEBUFFER
- select I2C
- select SERIAL
- select SD
- select SSI
- select USB_OHCI_SYSBUS
- select PCMCIA
-
-config GUMSTIX
- bool
- default y
- depends on TCG && ARM
- select PFLASH_CFI01
- select SMC91C111
- select PXA2XX
-
-config TOSA
- bool
- default y
- depends on TCG && ARM
- select ZAURUS # scoop
- select MICRODRIVE
- select PXA2XX
- select LED
-
-config SPITZ
- bool
- default y
- depends on TCG && ARM
- select ADS7846 # touch-screen controller
- select MAX111X # A/D converter
- select WM8750 # audio codec
- select MAX7310 # GPIO expander
- select ZAURUS # scoop
- select NAND # memory
- select ECC # Error-correcting for NAND
- select MICRODRIVE
- select PXA2XX
-
-config Z2
- bool
- default y
- depends on TCG && ARM
- select PFLASH_CFI01
- select WM8750
- select PL011 # UART
- select PXA2XX
+ select SERIAL_MM
config REALVIEW
bool
@@ -248,7 +168,7 @@ config REALVIEW
select WM8750 # audio codec
select LSI_SCSI_PCI
select PCI
- select PL011 # UART
+ select PL011 # UART
select PL031 # RTC
select PL041 # audio codec
select PL050 # keyboard/mouse
@@ -267,7 +187,7 @@ config SBSA_REF
depends on TCG && AARCH64
imply PCI_DEVICES
select DEVICE_TREE
- select AHCI
+ select AHCI_SYSBUS
select ARM_SMMUV3
select GPIO_KEY
select PCI_EXPRESS
@@ -316,14 +236,15 @@ config STM32VLDISCOVERY
config STRONGARM
bool
- select PXA2XX
+ select PXA2XX_TIMER
+ select SSI
config COLLIE
bool
default y
depends on TCG && ARM
select PFLASH_CFI01
- select ZAURUS # scoop
+ select ZAURUS_SCOOP
select STRONGARM
config SX1
@@ -374,7 +295,7 @@ config ZYNQ
select PL330
select SDHCI
select SSI_M25P80
- select USB_EHCI_SYSBUS
+ select USB_CHIPIDEA
select XILINX # UART
select XILINX_AXI
select XILINX_SPI
@@ -390,7 +311,7 @@ config ARM_V7M
config ALLWINNER_A10
bool
- select AHCI
+ select AHCI_SYSBUS
select ALLWINNER_A10_PIT
select ALLWINNER_A10_PIC
select ALLWINNER_A10_CCM
@@ -398,8 +319,9 @@ config ALLWINNER_A10
select ALLWINNER_WDT
select ALLWINNER_EMAC
select ALLWINNER_I2C
+ select ALLWINNER_A10_SPI
select AXP2XX_PMU
- select SERIAL
+ select SERIAL_MM
select UNIMP
select USB_OHCI_SYSBUS
@@ -411,7 +333,7 @@ config ALLWINNER_H3
select ALLWINNER_SUN8I_EMAC
select ALLWINNER_I2C
select ALLWINNER_WDT
- select SERIAL
+ select SERIAL_MM
select ARM_TIMER
select ARM_GIC
select UNIMP
@@ -422,12 +344,12 @@ config ALLWINNER_H3
config ALLWINNER_R40
bool
default y if TCG && ARM
- select AHCI
+ select AHCI_SYSBUS
select ALLWINNER_SRAMC
select ALLWINNER_A10_PIT
select ALLWINNER_WDT
select AXP2XX_PMU
- select SERIAL
+ select SERIAL_MM
select ARM_TIMER
select ARM_GIC
select UNIMP
@@ -466,6 +388,7 @@ config STM32F405_SOC
bool
select ARM_V7M
select OR_IRQ
+ select STM32_RCC
select STM32F4XX_SYSCFG
select STM32F4XX_EXTI
@@ -490,7 +413,7 @@ config XLNX_ZYNQMP_ARM
bool
default y if PIXMAN
depends on TCG && AARCH64
- select AHCI
+ select AHCI_SYSBUS
select ARM_GIC
select CADENCE
select CPU_CLUSTER
@@ -515,7 +438,7 @@ config XLNX_VERSAL
select ARM_GIC
select CPU_CLUSTER
select DEVICE_TREE
- select PL011
+ select PL011 # UART
select CADENCE
select VIRTIO_MMIO
select UNIMP
@@ -542,12 +465,25 @@ config NPCM7XX
select ISL_PMBUS_VR
select PL310 # cache controller
select PMBUS
- select SERIAL
+ select SERIAL_MM
select SSI
select UNIMP
select PCA954X
select USB_OHCI_SYSBUS
+config NPCM8XX
+ bool
+ default y
+ depends on TCG && AARCH64
+ select ARM_GIC
+ select SMBUS
+ select PL310 # cache controller
+ select NPCM7XX
+ select SERIAL
+ select SSI
+ select UNIMP
+
+
config FSL_IMX25
bool
default y
@@ -556,6 +492,7 @@ config FSL_IMX25
select IMX
select IMX_FEC
select IMX_I2C
+ select USB_CHIPIDEA
select WDT_IMX2
select SDHCI
@@ -564,7 +501,7 @@ config FSL_IMX31
default y
depends on TCG && ARM
imply I2C_DEVICES
- select SERIAL
+ select SERIAL_MM
select IMX
select IMX_I2C
select WDT_IMX2
@@ -583,6 +520,8 @@ config FSL_IMX6
select PL310 # cache controller
select PCI_EXPRESS_DESIGNWARE
select SDHCI
+ select USB_CHIPIDEA
+ select OR_IRQ
config ASPEED_SOC
bool
@@ -593,7 +532,7 @@ config ASPEED_SOC
select I2C
select DPS310
select PCA9552
- select SERIAL
+ select SERIAL_MM
select SMBUS_EEPROM
select PCA954X
select SSI
@@ -606,6 +545,7 @@ config ASPEED_SOC
select PMBUS
select MAX31785
select FSI_APB2OPB_ASPEED
+ select AT24C
config MPS2
bool
@@ -639,7 +579,33 @@ config FSL_IMX7
select WDT_IMX2
select PCI_EXPRESS_DESIGNWARE
select SDHCI
+ select OR_IRQ
select UNIMP
+ select USB_CHIPIDEA
+
+config FSL_IMX8MP
+ bool
+ imply I2C_DEVICES
+ imply PCI_DEVICES
+ select ARM_GIC
+ select FSL_IMX8MP_ANALOG
+ select FSL_IMX8MP_CCM
+ select IMX
+ select IMX_FEC
+ select IMX_I2C
+ select OR_IRQ
+ select PCI_EXPRESS_DESIGNWARE
+ select PCI_EXPRESS_FSL_IMX8M_PHY
+ select SDHCI
+ select UNIMP
+ select USB_DWC3
+ select WDT_IMX2
+
+config FSL_IMX8MP_EVK
+ bool
+ default y
+ depends on TCG && AARCH64
+ select FSL_IMX8MP
config ARM_SMMUV3
bool
@@ -655,6 +621,7 @@ config FSL_IMX6UL
select IMX_I2C
select WDT_IMX2
select SDHCI
+ select USB_CHIPIDEA
select UNIMP
config MICROBIT
@@ -681,15 +648,10 @@ config MSF2
bool
select ARM_V7M
select PTIMER
- select SERIAL
+ select SERIAL_MM
select SSI
select UNIMP
-config ZAURUS
- bool
- select NAND
- select ECC
-
config ARMSSE
bool
select ARM_V7M
diff --git a/hw/arm/allwinner-a10.c b/hw/arm/allwinner-a10.c
index 57d5d80..dc910d4 100644
--- a/hw/arm/allwinner-a10.c
+++ b/hw/arm/allwinner-a10.c
@@ -17,12 +17,13 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "qemu/module.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/sysbus.h"
#include "hw/arm/allwinner-a10.h"
#include "hw/misc/unimp.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/boards.h"
#include "hw/usb/hcd-ohci.h"
#include "hw/loader.h"
@@ -35,6 +36,7 @@
#define AW_A10_PIC_REG_BASE 0x01c20400
#define AW_A10_PIT_REG_BASE 0x01c20c00
#define AW_A10_UART0_REG_BASE 0x01c28000
+#define AW_A10_SPI0_BASE 0x01c05000
#define AW_A10_EMAC_BASE 0x01c0b000
#define AW_A10_EHCI_BASE 0x01c14000
#define AW_A10_OHCI_BASE 0x01c14400
@@ -49,9 +51,8 @@ void allwinner_a10_bootrom_setup(AwA10State *s, BlockBackend *blk)
g_autofree uint8_t *buffer = g_new0(uint8_t, rom_size);
if (blk_pread(blk, 8 * KiB, rom_size, buffer, 0) < 0) {
- error_setg(&error_fatal, "%s: failed to read BlockBackend data",
- __func__);
- return;
+ error_report("%s: failed to read BlockBackend data", __func__);
+ exit(1);
}
rom_add_blob("allwinner-a10.bootrom", buffer, rom_size,
@@ -80,6 +81,8 @@ static void aw_a10_init(Object *obj)
object_initialize_child(obj, "i2c0", &s->i2c0, TYPE_AW_I2C);
+ object_initialize_child(obj, "spi0", &s->spi0, TYPE_AW_A10_SPI);
+
for (size_t i = 0; i < AW_A10_NUM_USB; i++) {
object_initialize_child(obj, "ehci[*]", &s->ehci[i],
TYPE_PLATFORM_EHCI);
@@ -155,7 +158,7 @@ static void aw_a10_realize(DeviceState *dev, Error **errp)
/* FIXME use a qdev chardev prop instead of serial_hd() */
serial_mm_init(get_system_memory(), AW_A10_UART0_REG_BASE, 2,
qdev_get_gpio_in(dev, 1),
- 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN);
+ 115200, serial_hd(0), DEVICE_LITTLE_ENDIAN);
for (size_t i = 0; i < AW_A10_NUM_USB; i++) {
g_autofree char *bus = g_strdup_printf("usb-bus.%zu", i);
@@ -195,12 +198,17 @@ static void aw_a10_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c0), 0, AW_A10_I2C0_BASE);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c0), 0, qdev_get_gpio_in(dev, 7));
+ /* SPI */
+ sysbus_realize(SYS_BUS_DEVICE(&s->spi0), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi0), 0, AW_A10_SPI0_BASE);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi0), 0, qdev_get_gpio_in(dev, 10));
+
/* WDT */
sysbus_realize(SYS_BUS_DEVICE(&s->wdt), &error_fatal);
sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->wdt), 0, AW_A10_WDT_BASE, 1);
}
-static void aw_a10_class_init(ObjectClass *oc, void *data)
+static void aw_a10_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/arm/allwinner-h3.c b/hw/arm/allwinner-h3.c
index 6870c3f..edffc21 100644
--- a/hw/arm/allwinner-h3.c
+++ b/hw/arm/allwinner-h3.c
@@ -24,11 +24,11 @@
#include "qemu/units.h"
#include "hw/qdev-core.h"
#include "hw/sysbus.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/misc/unimp.h"
#include "hw/usb/hcd-ehci.h"
#include "hw/loader.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/arm/allwinner-h3.h"
#include "target/arm/cpu-qom.h"
#include "target/arm/gtimer.h"
@@ -182,9 +182,8 @@ void allwinner_h3_bootrom_setup(AwH3State *s, BlockBackend *blk)
g_autofree uint8_t *buffer = g_new0(uint8_t, rom_size);
if (blk_pread(blk, 8 * KiB, rom_size, buffer, 0) < 0) {
- error_setg(&error_fatal, "%s: failed to read BlockBackend data",
- __func__);
- return;
+ error_report("%s: failed to read BlockBackend data", __func__);
+ exit(1);
}
rom_add_blob("allwinner-h3.bootrom", buffer, rom_size,
@@ -409,19 +408,19 @@ static void allwinner_h3_realize(DeviceState *dev, Error **errp)
/* UART0. For future clocktree API: All UARTS are connected to APB2_CLK. */
serial_mm_init(get_system_memory(), s->memmap[AW_H3_DEV_UART0], 2,
qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_UART0),
- 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN);
+ 115200, serial_hd(0), DEVICE_LITTLE_ENDIAN);
/* UART1 */
serial_mm_init(get_system_memory(), s->memmap[AW_H3_DEV_UART1], 2,
qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_UART1),
- 115200, serial_hd(1), DEVICE_NATIVE_ENDIAN);
+ 115200, serial_hd(1), DEVICE_LITTLE_ENDIAN);
/* UART2 */
serial_mm_init(get_system_memory(), s->memmap[AW_H3_DEV_UART2], 2,
qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_UART2),
- 115200, serial_hd(2), DEVICE_NATIVE_ENDIAN);
+ 115200, serial_hd(2), DEVICE_LITTLE_ENDIAN);
/* UART3 */
serial_mm_init(get_system_memory(), s->memmap[AW_H3_DEV_UART3], 2,
qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_UART3),
- 115200, serial_hd(3), DEVICE_NATIVE_ENDIAN);
+ 115200, serial_hd(3), DEVICE_LITTLE_ENDIAN);
/* DRAMC */
sysbus_realize(SYS_BUS_DEVICE(&s->dramc), &error_fatal);
@@ -467,7 +466,7 @@ static void allwinner_h3_realize(DeviceState *dev, Error **errp)
}
}
-static void allwinner_h3_class_init(ObjectClass *oc, void *data)
+static void allwinner_h3_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/arm/allwinner-r40.c b/hw/arm/allwinner-r40.c
index b8c7202..0bf7008 100644
--- a/hw/arm/allwinner-r40.c
+++ b/hw/arm/allwinner-r40.c
@@ -26,11 +26,11 @@
#include "hw/boards.h"
#include "hw/qdev-core.h"
#include "hw/sysbus.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/misc/unimp.h"
#include "hw/usb/hcd-ehci.h"
#include "hw/loader.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/arm/allwinner-r40.h"
#include "hw/misc/allwinner-r40-dramc.h"
#include "target/arm/cpu-qom.h"
@@ -231,9 +231,8 @@ bool allwinner_r40_bootrom_setup(AwR40State *s, BlockBackend *blk, int unit)
struct boot_file_head *head = (struct boot_file_head *)buffer;
if (blk_pread(blk, 8 * KiB, rom_size, buffer, 0) < 0) {
- error_setg(&error_fatal, "%s: failed to read BlockBackend data",
- __func__);
- return false;
+ error_report("%s: failed to read BlockBackend data", __func__);
+ exit(1);
}
/* we only check the magic string here. */
@@ -493,7 +492,7 @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp)
serial_mm_init(get_system_memory(), addr, 2,
qdev_get_gpio_in(DEVICE(&s->gic), uart_irqs[i]),
- 115200, serial_hd(i), DEVICE_NATIVE_ENDIAN);
+ 115200, serial_hd(i), DEVICE_LITTLE_ENDIAN);
}
/* I2C */
@@ -540,7 +539,7 @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp)
}
}
-static void allwinner_r40_class_init(ObjectClass *oc, void *data)
+static void allwinner_r40_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c
index 91502d1..50ab7f4 100644
--- a/hw/arm/armsse.c
+++ b/hw/arm/armsse.c
@@ -72,12 +72,13 @@ struct ARMSSEInfo {
bool has_cpu_pwrctrl;
bool has_sse_counter;
bool has_tcms;
- Property *props;
+ uint8_t props_count;
+ const Property *props;
const ARMSSEDeviceInfo *devinfo;
const bool *irq_is_common;
};
-static Property iotkit_properties[] = {
+static const Property iotkit_properties[] = {
DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION,
MemoryRegion *),
DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64),
@@ -87,10 +88,9 @@ static Property iotkit_properties[] = {
DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true),
DEFINE_PROP_UINT32("CPU0_MPU_NS", ARMSSE, cpu_mpu_ns[0], 8),
DEFINE_PROP_UINT32("CPU0_MPU_S", ARMSSE, cpu_mpu_s[0], 8),
- DEFINE_PROP_END_OF_LIST()
};
-static Property sse200_properties[] = {
+static const Property sse200_properties[] = {
DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION,
MemoryRegion *),
DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64),
@@ -104,10 +104,9 @@ static Property sse200_properties[] = {
DEFINE_PROP_UINT32("CPU0_MPU_S", ARMSSE, cpu_mpu_s[0], 8),
DEFINE_PROP_UINT32("CPU1_MPU_NS", ARMSSE, cpu_mpu_ns[1], 8),
DEFINE_PROP_UINT32("CPU1_MPU_S", ARMSSE, cpu_mpu_s[1], 8),
- DEFINE_PROP_END_OF_LIST()
};
-static Property sse300_properties[] = {
+static const Property sse300_properties[] = {
DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION,
MemoryRegion *),
DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64),
@@ -117,7 +116,6 @@ static Property sse300_properties[] = {
DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true),
DEFINE_PROP_UINT32("CPU0_MPU_NS", ARMSSE, cpu_mpu_ns[0], 8),
DEFINE_PROP_UINT32("CPU0_MPU_S", ARMSSE, cpu_mpu_s[0], 8),
- DEFINE_PROP_END_OF_LIST()
};
static const ARMSSEDeviceInfo iotkit_devices[] = {
@@ -528,6 +526,7 @@ static const ARMSSEInfo armsse_variants[] = {
.has_sse_counter = false,
.has_tcms = false,
.props = iotkit_properties,
+ .props_count = ARRAY_SIZE(iotkit_properties),
.devinfo = iotkit_devices,
.irq_is_common = sse200_irq_is_common,
},
@@ -549,6 +548,7 @@ static const ARMSSEInfo armsse_variants[] = {
.has_sse_counter = false,
.has_tcms = false,
.props = sse200_properties,
+ .props_count = ARRAY_SIZE(sse200_properties),
.devinfo = sse200_devices,
.irq_is_common = sse200_irq_is_common,
},
@@ -570,6 +570,7 @@ static const ARMSSEInfo armsse_variants[] = {
.has_sse_counter = true,
.has_tcms = true,
.props = sse300_properties,
+ .props_count = ARRAY_SIZE(sse300_properties),
.devinfo = sse300_devices,
.irq_is_common = sse300_irq_is_common,
},
@@ -1690,7 +1691,7 @@ static void armsse_reset(DeviceState *dev)
s->nsccfg = 0;
}
-static void armsse_class_init(ObjectClass *klass, void *data)
+static void armsse_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
IDAUInterfaceClass *iic = IDAU_INTERFACE_CLASS(klass);
@@ -1699,8 +1700,8 @@ static void armsse_class_init(ObjectClass *klass, void *data)
dc->realize = armsse_realize;
dc->vmsd = &armsse_vmstate;
- device_class_set_props(dc, info->props);
- dc->reset = armsse_reset;
+ device_class_set_props_n(dc, info->props, info->props_count);
+ device_class_set_legacy_reset(dc, armsse_reset);
iic->check = armsse_idau_check;
asc->info = info;
}
@@ -1712,7 +1713,7 @@ static const TypeInfo armsse_info = {
.class_size = sizeof(ARMSSEClass),
.instance_init = armsse_init,
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_IDAU_INTERFACE },
{ }
}
@@ -1729,9 +1730,9 @@ static void armsse_register_types(void)
.name = armsse_variants[i].name,
.parent = TYPE_ARM_SSE,
.class_init = armsse_class_init,
- .class_data = (void *)&armsse_variants[i],
+ .class_data = &armsse_variants[i],
};
- type_register(&ti);
+ type_register_static(&ti);
}
}
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
index 7c68525..cea3eb4 100644
--- a/hw/arm/armv7m.c
+++ b/hw/arm/armv7m.c
@@ -16,7 +16,7 @@
#include "hw/qdev-properties.h"
#include "hw/qdev-clock.h"
#include "elf.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qemu/log.h"
@@ -140,7 +140,7 @@ static MemTxResult v7m_sysreg_ns_write(void *opaque, hwaddr addr,
/* S accesses to the alias act like NS accesses to the real region */
attrs.secure = 0;
return memory_region_dispatch_write(mr, addr, value,
- size_memop(size) | MO_TE, attrs);
+ size_memop(size) | MO_LE, attrs);
} else {
/* NS attrs are RAZ/WI for privileged, and BusFault for user */
if (attrs.user) {
@@ -160,7 +160,7 @@ static MemTxResult v7m_sysreg_ns_read(void *opaque, hwaddr addr,
/* S accesses to the alias act like NS accesses to the real region */
attrs.secure = 0;
return memory_region_dispatch_read(mr, addr, data,
- size_memop(size) | MO_TE, attrs);
+ size_memop(size) | MO_LE, attrs);
} else {
/* NS attrs are RAZ/WI for privileged, and BusFault for user */
if (attrs.user) {
@@ -174,7 +174,7 @@ static MemTxResult v7m_sysreg_ns_read(void *opaque, hwaddr addr,
static const MemoryRegionOps v7m_sysreg_ns_ops = {
.read_with_attrs = v7m_sysreg_ns_read,
.write_with_attrs = v7m_sysreg_ns_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
};
static MemTxResult v7m_systick_write(void *opaque, hwaddr addr,
@@ -187,7 +187,7 @@ static MemTxResult v7m_systick_write(void *opaque, hwaddr addr,
/* Direct the access to the correct systick */
mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
return memory_region_dispatch_write(mr, addr, value,
- size_memop(size) | MO_TE, attrs);
+ size_memop(size) | MO_LE, attrs);
}
static MemTxResult v7m_systick_read(void *opaque, hwaddr addr,
@@ -199,14 +199,14 @@ static MemTxResult v7m_systick_read(void *opaque, hwaddr addr,
/* Direct the access to the correct systick */
mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
- return memory_region_dispatch_read(mr, addr, data, size_memop(size) | MO_TE,
- attrs);
+ return memory_region_dispatch_read(mr, addr, data,
+ size_memop(size) | MO_LE, attrs);
}
static const MemoryRegionOps v7m_systick_ops = {
.read_with_attrs = v7m_systick_read,
.write_with_attrs = v7m_systick_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
};
/*
@@ -538,7 +538,7 @@ static void armv7m_realize(DeviceState *dev, Error **errp)
}
}
-static Property armv7m_properties[] = {
+static const Property armv7m_properties[] = {
DEFINE_PROP_STRING("cpu-type", ARMv7MState, cpu_type),
DEFINE_PROP_LINK("memory", ARMv7MState, board_memory, TYPE_MEMORY_REGION,
MemoryRegion *),
@@ -552,7 +552,6 @@ static Property armv7m_properties[] = {
DEFINE_PROP_BOOL("dsp", ARMv7MState, dsp, true),
DEFINE_PROP_UINT32("mpu-ns-regions", ARMv7MState, mpu_ns_regions, UINT_MAX),
DEFINE_PROP_UINT32("mpu-s-regions", ARMv7MState, mpu_s_regions, UINT_MAX),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_armv7m = {
@@ -566,7 +565,7 @@ static const VMStateDescription vmstate_armv7m = {
}
};
-static void armv7m_class_init(ObjectClass *klass, void *data)
+static void armv7m_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -609,7 +608,7 @@ void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename,
if (kernel_filename) {
image_size = load_elf_as(kernel_filename, NULL, NULL, NULL,
&entry, NULL, NULL,
- NULL, 0, EM_ARM, 1, 0, as);
+ NULL, ELFDATA2LSB, EM_ARM, 1, 0, as);
if (image_size < 0) {
image_size = load_image_targphys_as(kernel_filename, mem_base,
mem_size, as);
@@ -631,14 +630,13 @@ void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename,
qemu_register_reset(armv7m_reset, cpu);
}
-static Property bitband_properties[] = {
+static const Property bitband_properties[] = {
DEFINE_PROP_UINT32("base", BitBandState, base, 0),
DEFINE_PROP_LINK("source-memory", BitBandState, source_memory,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void bitband_class_init(ObjectClass *klass, void *data)
+static void bitband_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c
index 53a4f66..d0b3336 100644
--- a/hw/arm/aspeed.c
+++ b/hw/arm/aspeed.c
@@ -23,13 +23,14 @@
#include "hw/sensor/tmp105.h"
#include "hw/misc/led.h"
#include "hw/qdev-properties.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/reset.h"
+#include "system/block-backend.h"
+#include "system/reset.h"
#include "hw/loader.h"
#include "qemu/error-report.h"
+#include "qemu/datadir.h"
#include "qemu/units.h"
#include "hw/qdev-clock.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
static struct arm_boot_info aspeed_board_binfo = {
.board_id = -1, /* device-tree-only board */
@@ -46,6 +47,7 @@ struct AspeedMachineState {
uint32_t uart_chosen;
char *fmc_model;
char *spi_model;
+ uint32_t hw_strap1;
};
/* On 32-bit hosts, lower RAM to 1G because of the 2047 MB limit */
@@ -180,16 +182,14 @@ struct AspeedMachineState {
#ifdef TARGET_AARCH64
/* AST2700 evb hardware value */
-#define AST2700_EVB_HW_STRAP1 0x000000C0
-#define AST2700_EVB_HW_STRAP2 0x00000003
+/* SCU HW Strap1 */
+#define AST2700_EVB_HW_STRAP1 0x00000800
+/* SCUIO HW Strap1 */
+#define AST2700_EVB_HW_STRAP2 0x00000700
#endif
-/* Tacoma hardware value */
-#define TACOMA_BMC_HW_STRAP1 0x00000000
-#define TACOMA_BMC_HW_STRAP2 0x00000040
-
/* Rainier hardware value: (QEMU prototype) */
-#define RAINIER_BMC_HW_STRAP1 0x00422016
+#define RAINIER_BMC_HW_STRAP1 (0x00422016 | SCU_AST2600_HW_STRAP_BOOT_SRC_EMMC)
#define RAINIER_BMC_HW_STRAP2 0x80000848
/* Fuji hardware value */
@@ -265,7 +265,8 @@ static void write_boot_rom(BlockBackend *blk, hwaddr addr, size_t rom_size,
g_autofree void *storage = NULL;
int64_t size;
- /* The block backend size should have already been 'validated' by
+ /*
+ * The block backend size should have already been 'validated' by
* the creation of the m25p80 object.
*/
size = blk_getlength(blk);
@@ -305,6 +306,33 @@ static void aspeed_install_boot_rom(AspeedMachineState *bmc, BlockBackend *blk,
rom_size, &error_abort);
}
+#define VBOOTROM_FILE_NAME "ast27x0_bootrom.bin"
+
+/*
+ * This function locates the vbootrom image file specified via the command line
+ * using the -bios option. It loads the specified image into the vbootrom
+ * memory region and handles errors if the file cannot be found or loaded.
+ */
+static void aspeed_load_vbootrom(AspeedMachineState *bmc, const char *bios_name,
+ Error **errp)
+{
+ g_autofree char *filename = NULL;
+ AspeedSoCState *soc = bmc->soc;
+ int ret;
+
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+ if (!filename) {
+ error_setg(errp, "Could not find vbootrom image '%s'", bios_name);
+ return;
+ }
+
+ ret = load_image_mr(filename, &soc->vbootrom);
+ if (ret < 0) {
+ error_setg(errp, "Failed to load vbootrom image '%s'", bios_name);
+ return;
+ }
+}
+
void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype,
unsigned int count, int unit0)
{
@@ -327,14 +355,30 @@ void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype,
}
}
-static void sdhci_attach_drive(SDHCIState *sdhci, DriveInfo *dinfo)
+static void sdhci_attach_drive(SDHCIState *sdhci, DriveInfo *dinfo, bool emmc,
+ bool boot_emmc)
{
DeviceState *card;
if (!dinfo) {
return;
}
- card = qdev_new(TYPE_SD_CARD);
+ card = qdev_new(emmc ? TYPE_EMMC : TYPE_SD_CARD);
+
+ /*
+ * Force the boot properties of the eMMC device only when the
+ * machine is strapped to boot from eMMC. Without these
+ * settings, the machine would not boot.
+ *
+ * This also allows the machine to use an eMMC device without
+ * boot areas when booting from the flash device (or -kernel)
+ * Ideally, the device and its properties should be defined on
+ * the command line.
+ */
+ if (emmc && boot_emmc) {
+ qdev_prop_set_uint64(card, "boot-partition-size", 1 * MiB);
+ qdev_prop_set_uint8(card, "boot-config", 0x1 << 3);
+ }
qdev_prop_set_drive_err(card, "drive", blk_by_legacy_dinfo(dinfo),
&error_fatal);
qdev_realize_and_unref(card,
@@ -350,11 +394,11 @@ static void connect_serial_hds_to_uarts(AspeedMachineState *bmc)
int uart_chosen = bmc->uart_chosen ? bmc->uart_chosen : amc->uart_default;
aspeed_soc_uart_set_chr(s, uart_chosen, serial_hd(0));
- for (int i = 1, uart = sc->uarts_base; i < sc->uarts_num; i++, uart++) {
+ for (int i = 1, uart = sc->uarts_base; i < sc->uarts_num; uart++) {
if (uart == uart_chosen) {
continue;
}
- aspeed_soc_uart_set_chr(s, uart, serial_hd(i));
+ aspeed_soc_uart_set_chr(s, uart, serial_hd(i++));
}
}
@@ -364,6 +408,9 @@ static void aspeed_machine_init(MachineState *machine)
AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(machine);
AspeedSoCClass *sc;
int i;
+ const char *bios_name = NULL;
+ DriveInfo *emmc0 = NULL;
+ bool boot_emmc;
bmc->soc = ASPEED_SOC(object_new(amc->soc_name));
object_property_add_child(OBJECT(machine), "soc", OBJECT(bmc->soc));
@@ -385,7 +432,7 @@ static void aspeed_machine_init(MachineState *machine)
}
}
- object_property_set_int(OBJECT(bmc->soc), "hw-strap1", amc->hw_strap1,
+ object_property_set_int(OBJECT(bmc->soc), "hw-strap1", bmc->hw_strap1,
&error_abort);
object_property_set_int(OBJECT(bmc->soc), "hw-strap2", amc->hw_strap2,
&error_abort);
@@ -393,6 +440,12 @@ static void aspeed_machine_init(MachineState *machine)
OBJECT(get_system_memory()), &error_abort);
object_property_set_link(OBJECT(bmc->soc), "dram",
OBJECT(machine->ram), &error_abort);
+ if (amc->sdhci_wp_inverted) {
+ for (i = 0; i < bmc->soc->sdhci.num_slots; i++) {
+ object_property_set_bool(OBJECT(&bmc->soc->sdhci.slots[i]),
+ "wp-inverted", true, &error_abort);
+ }
+ }
if (machine->kernel_filename) {
/*
* When booting with a -kernel command line there is no u-boot
@@ -434,26 +487,35 @@ static void aspeed_machine_init(MachineState *machine)
amc->i2c_init(bmc);
}
- for (i = 0; i < bmc->soc->sdhci.num_slots; i++) {
+ for (i = 0; i < bmc->soc->sdhci.num_slots && defaults_enabled(); i++) {
sdhci_attach_drive(&bmc->soc->sdhci.slots[i],
- drive_get(IF_SD, 0, i));
+ drive_get(IF_SD, 0, i), false, false);
}
- if (bmc->soc->emmc.num_slots) {
- sdhci_attach_drive(&bmc->soc->emmc.slots[0],
- drive_get(IF_SD, 0, bmc->soc->sdhci.num_slots));
+ boot_emmc = sc->boot_from_emmc(bmc->soc);
+
+ if (bmc->soc->emmc.num_slots && defaults_enabled()) {
+ emmc0 = drive_get(IF_SD, 0, bmc->soc->sdhci.num_slots);
+ sdhci_attach_drive(&bmc->soc->emmc.slots[0], emmc0, true, boot_emmc);
}
if (!bmc->mmio_exec) {
DeviceState *dev = ssi_get_cs(bmc->soc->fmc.spi, 0);
BlockBackend *fmc0 = dev ? m25p80_get_blk(dev) : NULL;
- if (fmc0) {
+ if (fmc0 && !boot_emmc) {
uint64_t rom_size = memory_region_size(&bmc->soc->spi_boot);
aspeed_install_boot_rom(bmc, fmc0, rom_size);
+ } else if (emmc0) {
+ aspeed_install_boot_rom(bmc, blk_by_legacy_dinfo(emmc0), 64 * KiB);
}
}
+ if (amc->vbootrom) {
+ bios_name = machine->firmware ?: VBOOTROM_FILE_NAME;
+ aspeed_load_vbootrom(bmc, bios_name, &error_abort);
+ }
+
arm_load_kernel(ARM_CPU(first_cpu), machine, &aspeed_board_binfo);
}
@@ -463,8 +525,10 @@ static void palmetto_bmc_i2c_init(AspeedMachineState *bmc)
DeviceState *dev;
uint8_t *eeprom_buf = g_malloc0(32 * 1024);
- /* The palmetto platform expects a ds3231 RTC but a ds1338 is
- * enough to provide basic RTC features. Alarms will be missing */
+ /*
+ * The palmetto platform expects a ds3231 RTC but a ds1338 is
+ * enough to provide basic RTC features. Alarms will be missing
+ */
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 0), "ds1338", 0x68);
smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 0), 0x50,
@@ -555,8 +619,10 @@ static void romulus_bmc_i2c_init(AspeedMachineState *bmc)
{
AspeedSoCState *soc = bmc->soc;
- /* The romulus board expects Epson RX8900 I2C RTC but a ds1338 is
- * good enough */
+ /*
+ * The romulus board expects Epson RX8900 I2C RTC but a ds1338 is
+ * good enough
+ */
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), "ds1338", 0x32);
}
@@ -664,8 +730,10 @@ static void witherspoon_bmc_i2c_init(AspeedMachineState *bmc)
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), TYPE_TMP105,
0x4a);
- /* The witherspoon board expects Epson RX8900 I2C RTC but a ds1338 is
- * good enough */
+ /*
+ * The witherspoon board expects Epson RX8900 I2C RTC but a ds1338 is
+ * good enough
+ */
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), "ds1338", 0x32);
smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 11), 0x51,
@@ -1065,7 +1133,10 @@ static void aspeed_set_mmio_exec(Object *obj, bool value, Error **errp)
static void aspeed_machine_instance_init(Object *obj)
{
+ AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(obj);
+
ASPEED_MACHINE(obj)->mmio_exec = false;
+ ASPEED_MACHINE(obj)->hw_strap1 = amc->hw_strap1;
}
static char *aspeed_get_fmc_model(Object *obj, Error **errp)
@@ -1162,7 +1233,35 @@ static void aspeed_machine_class_init_cpus_defaults(MachineClass *mc)
mc->valid_cpu_types = sc->valid_cpu_types;
}
-static void aspeed_machine_class_init(ObjectClass *oc, void *data)
+static bool aspeed_machine_ast2600_get_boot_from_emmc(Object *obj, Error **errp)
+{
+ AspeedMachineState *bmc = ASPEED_MACHINE(obj);
+
+ return !!(bmc->hw_strap1 & SCU_AST2600_HW_STRAP_BOOT_SRC_EMMC);
+}
+
+static void aspeed_machine_ast2600_set_boot_from_emmc(Object *obj, bool value,
+ Error **errp)
+{
+ AspeedMachineState *bmc = ASPEED_MACHINE(obj);
+
+ if (value) {
+ bmc->hw_strap1 |= SCU_AST2600_HW_STRAP_BOOT_SRC_EMMC;
+ } else {
+ bmc->hw_strap1 &= ~SCU_AST2600_HW_STRAP_BOOT_SRC_EMMC;
+ }
+}
+
+static void aspeed_machine_ast2600_class_emmc_init(ObjectClass *oc)
+{
+ object_class_property_add_bool(oc, "boot-emmc",
+ aspeed_machine_ast2600_get_boot_from_emmc,
+ aspeed_machine_ast2600_set_boot_from_emmc);
+ object_class_property_set_description(oc, "boot-emmc",
+ "Set or unset boot from EMMC");
+}
+
+static void aspeed_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1178,7 +1277,8 @@ static void aspeed_machine_class_init(ObjectClass *oc, void *data)
aspeed_machine_class_props_init(oc);
}
-static void aspeed_machine_palmetto_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_palmetto_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1190,11 +1290,13 @@ static void aspeed_machine_palmetto_class_init(ObjectClass *oc, void *data)
amc->spi_model = "mx25l25635f";
amc->num_cs = 1;
amc->i2c_init = palmetto_bmc_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 256 * MiB;
aspeed_machine_class_init_cpus_defaults(mc);
};
-static void aspeed_machine_quanta_q71l_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_quanta_q71l_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1206,12 +1308,13 @@ static void aspeed_machine_quanta_q71l_class_init(ObjectClass *oc, void *data)
amc->spi_model = "mx25l25635e";
amc->num_cs = 1;
amc->i2c_init = quanta_q71l_bmc_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 128 * MiB;
aspeed_machine_class_init_cpus_defaults(mc);
}
static void aspeed_machine_supermicrox11_bmc_class_init(ObjectClass *oc,
- void *data)
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1224,12 +1327,13 @@ static void aspeed_machine_supermicrox11_bmc_class_init(ObjectClass *oc,
amc->num_cs = 1;
amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON;
amc->i2c_init = palmetto_bmc_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 256 * MiB;
aspeed_machine_class_init_cpus_defaults(mc);
}
static void aspeed_machine_supermicro_x11spi_bmc_class_init(ObjectClass *oc,
- void *data)
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1242,11 +1346,13 @@ static void aspeed_machine_supermicro_x11spi_bmc_class_init(ObjectClass *oc,
amc->num_cs = 1;
amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON;
amc->i2c_init = palmetto_bmc_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 512 * MiB;
aspeed_machine_class_init_cpus_defaults(mc);
}
-static void aspeed_machine_ast2500_evb_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_ast2500_evb_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1258,11 +1364,13 @@ static void aspeed_machine_ast2500_evb_class_init(ObjectClass *oc, void *data)
amc->spi_model = "mx25l25635f";
amc->num_cs = 1;
amc->i2c_init = ast2500_evb_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 512 * MiB;
aspeed_machine_class_init_cpus_defaults(mc);
};
-static void aspeed_machine_yosemitev2_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_yosemitev2_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1275,11 +1383,13 @@ static void aspeed_machine_yosemitev2_class_init(ObjectClass *oc, void *data)
amc->spi_model = "mx25l25635e";
amc->num_cs = 2;
amc->i2c_init = yosemitev2_bmc_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 512 * MiB;
aspeed_machine_class_init_cpus_defaults(mc);
};
-static void aspeed_machine_romulus_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_romulus_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1291,11 +1401,13 @@ static void aspeed_machine_romulus_class_init(ObjectClass *oc, void *data)
amc->spi_model = "mx66l1g45g";
amc->num_cs = 2;
amc->i2c_init = romulus_bmc_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 512 * MiB;
aspeed_machine_class_init_cpus_defaults(mc);
};
-static void aspeed_machine_tiogapass_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_tiogapass_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1308,11 +1420,13 @@ static void aspeed_machine_tiogapass_class_init(ObjectClass *oc, void *data)
amc->spi_model = "mx25l25635e";
amc->num_cs = 2;
amc->i2c_init = tiogapass_bmc_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 1 * GiB;
aspeed_machine_class_init_cpus_defaults(mc);
};
-static void aspeed_machine_sonorapass_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_sonorapass_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1324,11 +1438,13 @@ static void aspeed_machine_sonorapass_class_init(ObjectClass *oc, void *data)
amc->spi_model = "mx66l1g45g";
amc->num_cs = 2;
amc->i2c_init = sonorapass_bmc_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 512 * MiB;
aspeed_machine_class_init_cpus_defaults(mc);
};
-static void aspeed_machine_witherspoon_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_witherspoon_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1340,11 +1456,13 @@ static void aspeed_machine_witherspoon_class_init(ObjectClass *oc, void *data)
amc->spi_model = "mx66l1g45g";
amc->num_cs = 2;
amc->i2c_init = witherspoon_bmc_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 512 * MiB;
aspeed_machine_class_init_cpus_defaults(mc);
};
-static void aspeed_machine_ast2600_evb_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_ast2600_evb_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1358,32 +1476,15 @@ static void aspeed_machine_ast2600_evb_class_init(ObjectClass *oc, void *data)
amc->num_cs = 1;
amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON | ASPEED_MAC2_ON |
ASPEED_MAC3_ON;
+ amc->sdhci_wp_inverted = true;
amc->i2c_init = ast2600_evb_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 1 * GiB;
aspeed_machine_class_init_cpus_defaults(mc);
+ aspeed_machine_ast2600_class_emmc_init(oc);
};
-static void aspeed_machine_tacoma_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
- AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
-
- mc->desc = "OpenPOWER Tacoma BMC (Cortex-A7)";
- amc->soc_name = "ast2600-a3";
- amc->hw_strap1 = TACOMA_BMC_HW_STRAP1;
- amc->hw_strap2 = TACOMA_BMC_HW_STRAP2;
- amc->fmc_model = "mx66l1g45g";
- amc->spi_model = "mx66l1g45g";
- amc->num_cs = 2;
- amc->macs_mask = ASPEED_MAC2_ON;
- amc->i2c_init = witherspoon_bmc_i2c_init; /* Same board layout */
- mc->default_ram_size = 1 * GiB;
- aspeed_machine_class_init_cpus_defaults(mc);
-
- mc->deprecation_reason = "Please use the similar 'rainier-bmc' machine";
-};
-
-static void aspeed_machine_g220a_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_g220a_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1396,11 +1497,13 @@ static void aspeed_machine_g220a_class_init(ObjectClass *oc, void *data)
amc->num_cs = 2;
amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON;
amc->i2c_init = g220a_bmc_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 1024 * MiB;
aspeed_machine_class_init_cpus_defaults(mc);
};
-static void aspeed_machine_fp5280g2_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_fp5280g2_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1413,11 +1516,12 @@ static void aspeed_machine_fp5280g2_class_init(ObjectClass *oc, void *data)
amc->num_cs = 2;
amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON;
amc->i2c_init = fp5280g2_bmc_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 512 * MiB;
aspeed_machine_class_init_cpus_defaults(mc);
};
-static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_rainier_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1431,13 +1535,15 @@ static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data)
amc->num_cs = 2;
amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON;
amc->i2c_init = rainier_bmc_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 1 * GiB;
aspeed_machine_class_init_cpus_defaults(mc);
+ aspeed_machine_ast2600_class_emmc_init(oc);
};
#define FUJI_BMC_RAM_SIZE ASPEED_RAM_SIZE(2 * GiB)
-static void aspeed_machine_fuji_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_fuji_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1452,13 +1558,15 @@ static void aspeed_machine_fuji_class_init(ObjectClass *oc, void *data)
amc->macs_mask = ASPEED_MAC3_ON;
amc->i2c_init = fuji_bmc_i2c_init;
amc->uart_default = ASPEED_DEV_UART1;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = FUJI_BMC_RAM_SIZE;
aspeed_machine_class_init_cpus_defaults(mc);
};
#define BLETCHLEY_BMC_RAM_SIZE ASPEED_RAM_SIZE(2 * GiB)
-static void aspeed_machine_bletchley_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_bletchley_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1472,16 +1580,17 @@ static void aspeed_machine_bletchley_class_init(ObjectClass *oc, void *data)
amc->num_cs = 2;
amc->macs_mask = ASPEED_MAC2_ON;
amc->i2c_init = bletchley_bmc_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = BLETCHLEY_BMC_RAM_SIZE;
aspeed_machine_class_init_cpus_defaults(mc);
}
-static void fby35_reset(MachineState *state, ShutdownCause reason)
+static void fby35_reset(MachineState *state, ResetType type)
{
AspeedMachineState *bmc = ASPEED_MACHINE(state);
AspeedGPIOState *gpio = &bmc->soc->gpio;
- qemu_devices_reset(reason);
+ qemu_devices_reset(type);
/* Board ID: 7 (Class-1, 4 slots) */
object_property_set_bool(OBJECT(gpio), "gpioV4", true, &error_fatal);
@@ -1502,7 +1611,7 @@ static void fby35_reset(MachineState *state, ShutdownCause reason)
object_property_set_bool(OBJECT(gpio), "gpioB5", false, &error_fatal);
}
-static void aspeed_machine_fby35_class_init(ObjectClass *oc, void *data)
+static void aspeed_machine_fby35_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1513,6 +1622,7 @@ static void aspeed_machine_fby35_class_init(ObjectClass *oc, void *data)
amc->num_cs = 2;
amc->macs_mask = ASPEED_MAC3_ON;
amc->i2c_init = fby35_i2c_init;
+ mc->auto_create_sdcard = true;
/* FIXME: Replace this macro with something more general */
mc->default_ram_size = FUJI_BMC_RAM_SIZE;
aspeed_machine_class_init_cpus_defaults(mc);
@@ -1541,18 +1651,20 @@ static void aspeed_minibmc_machine_init(MachineState *machine)
connect_serial_hds_to_uarts(bmc);
qdev_realize(DEVICE(bmc->soc), NULL, &error_abort);
- aspeed_board_init_flashes(&bmc->soc->fmc,
- bmc->fmc_model ? bmc->fmc_model : amc->fmc_model,
- amc->num_cs,
- 0);
+ if (defaults_enabled()) {
+ aspeed_board_init_flashes(&bmc->soc->fmc,
+ bmc->fmc_model ? bmc->fmc_model : amc->fmc_model,
+ amc->num_cs,
+ 0);
- aspeed_board_init_flashes(&bmc->soc->spi[0],
- bmc->spi_model ? bmc->spi_model : amc->spi_model,
- amc->num_cs, amc->num_cs);
+ aspeed_board_init_flashes(&bmc->soc->spi[0],
+ bmc->spi_model ? bmc->spi_model : amc->spi_model,
+ amc->num_cs, amc->num_cs);
- aspeed_board_init_flashes(&bmc->soc->spi[1],
- bmc->spi_model ? bmc->spi_model : amc->spi_model,
- amc->num_cs, (amc->num_cs * 2));
+ aspeed_board_init_flashes(&bmc->soc->spi[1],
+ bmc->spi_model ? bmc->spi_model : amc->spi_model,
+ amc->num_cs, (amc->num_cs * 2));
+ }
if (amc->i2c_init) {
amc->i2c_init(bmc);
@@ -1577,7 +1689,7 @@ static void ast1030_evb_i2c_init(AspeedMachineState *bmc)
}
static void aspeed_minibmc_machine_ast1030_evb_class_init(ObjectClass *oc,
- void *data)
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1589,20 +1701,31 @@ static void aspeed_minibmc_machine_ast1030_evb_class_init(ObjectClass *oc,
mc->init = aspeed_minibmc_machine_init;
amc->i2c_init = ast1030_evb_i2c_init;
mc->default_ram_size = 0;
- amc->fmc_model = "sst25vf032b";
- amc->spi_model = "sst25vf032b";
+ amc->fmc_model = "w25q80bl";
+ amc->spi_model = "w25q256";
amc->num_cs = 2;
amc->macs_mask = 0;
aspeed_machine_class_init_cpus_defaults(mc);
}
#ifdef TARGET_AARCH64
-static void aspeed_machine_ast2700_evb_class_init(ObjectClass *oc, void *data)
+static void ast2700_evb_i2c_init(AspeedMachineState *bmc)
+{
+ AspeedSoCState *soc = bmc->soc;
+
+ /* LM75 is compatible with TMP105 driver */
+ i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 0),
+ TYPE_TMP105, 0x4d);
+}
+
+static void aspeed_machine_ast2700a0_evb_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
- mc->desc = "Aspeed AST2700 EVB (Cortex-A35)";
+ mc->alias = "ast2700-evb";
+ mc->desc = "Aspeed AST2700 A0 EVB (Cortex-A35)";
amc->soc_name = "ast2700-a0";
amc->hw_strap1 = AST2700_EVB_HW_STRAP1;
amc->hw_strap2 = AST2700_EVB_HW_STRAP2;
@@ -1611,13 +1734,38 @@ static void aspeed_machine_ast2700_evb_class_init(ObjectClass *oc, void *data)
amc->num_cs = 2;
amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON | ASPEED_MAC2_ON;
amc->uart_default = ASPEED_DEV_UART12;
+ amc->i2c_init = ast2700_evb_i2c_init;
+ amc->vbootrom = true;
+ mc->auto_create_sdcard = true;
+ mc->default_ram_size = 1 * GiB;
+ aspeed_machine_class_init_cpus_defaults(mc);
+}
+
+static void aspeed_machine_ast2700a1_evb_class_init(ObjectClass *oc,
+ const void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+ AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
+
+ mc->desc = "Aspeed AST2700 A1 EVB (Cortex-A35)";
+ amc->soc_name = "ast2700-a1";
+ amc->hw_strap1 = AST2700_EVB_HW_STRAP1;
+ amc->hw_strap2 = AST2700_EVB_HW_STRAP2;
+ amc->fmc_model = "w25q01jvq";
+ amc->spi_model = "w25q512jv";
+ amc->num_cs = 2;
+ amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON | ASPEED_MAC2_ON;
+ amc->uart_default = ASPEED_DEV_UART12;
+ amc->i2c_init = ast2700_evb_i2c_init;
+ amc->vbootrom = true;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 1 * GiB;
aspeed_machine_class_init_cpus_defaults(mc);
}
#endif
static void aspeed_machine_qcom_dc_scm_v1_class_init(ObjectClass *oc,
- void *data)
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1631,12 +1779,13 @@ static void aspeed_machine_qcom_dc_scm_v1_class_init(ObjectClass *oc,
amc->num_cs = 2;
amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON;
amc->i2c_init = qcom_dc_scm_bmc_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 1 * GiB;
aspeed_machine_class_init_cpus_defaults(mc);
};
static void aspeed_machine_qcom_firework_class_init(ObjectClass *oc,
- void *data)
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
@@ -1650,6 +1799,7 @@ static void aspeed_machine_qcom_firework_class_init(ObjectClass *oc,
amc->num_cs = 2;
amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON;
amc->i2c_init = qcom_dc_scm_firework_i2c_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 1 * GiB;
aspeed_machine_class_init_cpus_defaults(mc);
};
@@ -1692,10 +1842,6 @@ static const TypeInfo aspeed_machine_types[] = {
.parent = TYPE_ASPEED_MACHINE,
.class_init = aspeed_machine_yosemitev2_class_init,
}, {
- .name = MACHINE_TYPE_NAME("tacoma-bmc"),
- .parent = TYPE_ASPEED_MACHINE,
- .class_init = aspeed_machine_tacoma_class_init,
- }, {
.name = MACHINE_TYPE_NAME("tiogapass-bmc"),
.parent = TYPE_ASPEED_MACHINE,
.class_init = aspeed_machine_tiogapass_class_init,
@@ -1741,9 +1887,13 @@ static const TypeInfo aspeed_machine_types[] = {
.class_init = aspeed_minibmc_machine_ast1030_evb_class_init,
#ifdef TARGET_AARCH64
}, {
- .name = MACHINE_TYPE_NAME("ast2700-evb"),
+ .name = MACHINE_TYPE_NAME("ast2700a0-evb"),
+ .parent = TYPE_ASPEED_MACHINE,
+ .class_init = aspeed_machine_ast2700a0_evb_class_init,
+ }, {
+ .name = MACHINE_TYPE_NAME("ast2700a1-evb"),
.parent = TYPE_ASPEED_MACHINE,
- .class_init = aspeed_machine_ast2700_evb_class_init,
+ .class_init = aspeed_machine_ast2700a1_evb_class_init,
#endif
}, {
.name = TYPE_ASPEED_MACHINE,
diff --git a/hw/arm/aspeed_ast10x0.c b/hw/arm/aspeed_ast10x0.c
index 9f98ad8..e6e1ee6 100644
--- a/hw/arm/aspeed_ast10x0.c
+++ b/hw/arm/aspeed_ast10x0.c
@@ -11,8 +11,8 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "exec/address-spaces.h"
-#include "sysemu/sysemu.h"
+#include "system/address-spaces.h"
+#include "system/system.h"
#include "hw/qdev-clock.h"
#include "hw/misc/unimp.h"
#include "hw/arm/aspeed_soc.h"
@@ -116,7 +116,7 @@ static void aspeed_soc_ast1030_init(Object *obj)
char typename[64];
int i;
- if (sscanf(sc->name, "%7s", socname) != 1) {
+ if (sscanf(object_get_typename(obj), "%7s", socname) != 1) {
g_assert_not_reached();
}
@@ -415,7 +415,7 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp)
sc->memmap[ASPEED_DEV_JTAG1], 0x20);
}
-static void aspeed_soc_ast1030_class_init(ObjectClass *klass, void *data)
+static void aspeed_soc_ast1030_class_init(ObjectClass *klass, const void *data)
{
static const char * const valid_cpu_types[] = {
ARM_CPU_TYPE_NAME("cortex-m4"), /* TODO cortex-m4f */
@@ -428,7 +428,6 @@ static void aspeed_soc_ast1030_class_init(ObjectClass *klass, void *data)
dc->user_creatable = false;
dc->realize = aspeed_soc_ast1030_realize;
- sc->name = "ast1030-a1";
sc->valid_cpu_types = valid_cpu_types;
sc->silicon_rev = AST1030_A1_SILICON_REV;
sc->sram_size = 0xc0000;
diff --git a/hw/arm/aspeed_ast2400.c b/hw/arm/aspeed_ast2400.c
index d125886..c7b0f21 100644
--- a/hw/arm/aspeed_ast2400.c
+++ b/hw/arm/aspeed_ast2400.c
@@ -15,12 +15,12 @@
#include "qapi/error.h"
#include "hw/misc/unimp.h"
#include "hw/arm/aspeed_soc.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "qemu/module.h"
#include "qemu/error-report.h"
#include "hw/i2c/aspeed_i2c.h"
#include "net/net.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "target/arm/cpu-qom.h"
#define ASPEED_SOC_IOMEM_SIZE 0x00200000
@@ -151,7 +151,7 @@ static void aspeed_ast2400_soc_init(Object *obj)
char socname[8];
char typename[64];
- if (sscanf(sc->name, "%7s", socname) != 1) {
+ if (sscanf(object_get_typename(obj), "%7s", socname) != 1) {
g_assert_not_reached();
}
@@ -224,7 +224,8 @@ static void aspeed_ast2400_soc_init(Object *obj)
snprintf(typename, sizeof(typename), "aspeed.gpio-%s", socname);
object_initialize_child(obj, "gpio", &s->gpio, typename);
- object_initialize_child(obj, "sdc", &s->sdhci, TYPE_ASPEED_SDHCI);
+ snprintf(typename, sizeof(typename), "aspeed.sdhci-%s", socname);
+ object_initialize_child(obj, "sdc", &s->sdhci, typename);
object_property_set_int(OBJECT(&s->sdhci), "num-slots", 2, &error_abort);
@@ -501,7 +502,7 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp)
aspeed_soc_get_irq(s, ASPEED_DEV_HACE));
}
-static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data)
+static void aspeed_soc_ast2400_class_init(ObjectClass *oc, const void *data)
{
static const char * const valid_cpu_types[] = {
ARM_CPU_TYPE_NAME("arm926"),
@@ -514,7 +515,6 @@ static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data)
/* Reason: Uses serial_hds and nd_table in realize() directly */
dc->user_creatable = false;
- sc->name = "ast2400-a1";
sc->valid_cpu_types = valid_cpu_types;
sc->silicon_rev = AST2400_A1_SILICON_REV;
sc->sram_size = 0x8000;
@@ -530,7 +530,7 @@ static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data)
sc->get_irq = aspeed_soc_ast2400_get_irq;
}
-static void aspeed_soc_ast2500_class_init(ObjectClass *oc, void *data)
+static void aspeed_soc_ast2500_class_init(ObjectClass *oc, const void *data)
{
static const char * const valid_cpu_types[] = {
ARM_CPU_TYPE_NAME("arm1176"),
@@ -543,7 +543,6 @@ static void aspeed_soc_ast2500_class_init(ObjectClass *oc, void *data)
/* Reason: Uses serial_hds and nd_table in realize() directly */
dc->user_creatable = false;
- sc->name = "ast2500-a1";
sc->valid_cpu_types = valid_cpu_types;
sc->silicon_rev = AST2500_A1_SILICON_REV;
sc->sram_size = 0x9000;
diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c
index 31713de..d12707f 100644
--- a/hw/arm/aspeed_ast2600.c
+++ b/hw/arm/aspeed_ast2600.c
@@ -15,7 +15,7 @@
#include "qemu/error-report.h"
#include "hw/i2c/aspeed_i2c.h"
#include "net/net.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "target/arm/cpu-qom.h"
#define ASPEED_SOC_IOMEM_SIZE 0x00200000
@@ -157,7 +157,7 @@ static void aspeed_soc_ast2600_init(Object *obj)
char socname[8];
char typename[64];
- if (sscanf(sc->name, "%7s", socname) != 1) {
+ if (sscanf(object_get_typename(obj), "%7s", socname) != 1) {
g_assert_not_reached();
}
@@ -236,8 +236,8 @@ static void aspeed_soc_ast2600_init(Object *obj)
snprintf(typename, sizeof(typename), "aspeed.gpio-%s-1_8v", socname);
object_initialize_child(obj, "gpio_1_8v", &s->gpio_1_8v, typename);
- object_initialize_child(obj, "sd-controller", &s->sdhci,
- TYPE_ASPEED_SDHCI);
+ snprintf(typename, sizeof(typename), "aspeed.sdhci-%s", socname);
+ object_initialize_child(obj, "sd-controller", &s->sdhci, typename);
object_property_set_int(OBJECT(&s->sdhci), "num-slots", 2, &error_abort);
@@ -247,8 +247,7 @@ static void aspeed_soc_ast2600_init(Object *obj)
&s->sdhci.slots[i], TYPE_SYSBUS_SDHCI);
}
- object_initialize_child(obj, "emmc-controller", &s->emmc,
- TYPE_ASPEED_SDHCI);
+ object_initialize_child(obj, "emmc-controller", &s->emmc, typename);
object_property_set_int(OBJECT(&s->emmc), "num-slots", 1, &error_abort);
@@ -541,7 +540,8 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) {
return;
}
- aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio), 0, sc->memmap[ASPEED_DEV_GPIO]);
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio), 0,
+ sc->memmap[ASPEED_DEV_GPIO]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0,
aspeed_soc_get_irq(s, ASPEED_DEV_GPIO));
@@ -646,7 +646,14 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
}
}
-static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data)
+static bool aspeed_soc_ast2600_boot_from_emmc(AspeedSoCState *s)
+{
+ uint32_t hw_strap1 = object_property_get_uint(OBJECT(&s->scu),
+ "hw-strap1", &error_abort);
+ return !!(hw_strap1 & SCU_AST2600_HW_STRAP_BOOT_SRC_EMMC);
+}
+
+static void aspeed_soc_ast2600_class_init(ObjectClass *oc, const void *data)
{
static const char * const valid_cpu_types[] = {
ARM_CPU_TYPE_NAME("cortex-a7"),
@@ -659,7 +666,6 @@ static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data)
/* Reason: The Aspeed SoC can only be instantiated from a board */
dc->user_creatable = false;
- sc->name = "ast2600-a3";
sc->valid_cpu_types = valid_cpu_types;
sc->silicon_rev = AST2600_A3_SILICON_REV;
sc->sram_size = 0x16400;
@@ -673,6 +679,7 @@ static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data)
sc->memmap = aspeed_soc_ast2600_memmap;
sc->num_cpus = 2;
sc->get_irq = aspeed_soc_ast2600_get_irq;
+ sc->boot_from_emmc = aspeed_soc_ast2600_boot_from_emmc;
}
static const TypeInfo aspeed_soc_ast2600_types[] = {
diff --git a/hw/arm/aspeed_ast27x0-fc.c b/hw/arm/aspeed_ast27x0-fc.c
new file mode 100644
index 0000000..7087be4
--- /dev/null
+++ b/hw/arm/aspeed_ast27x0-fc.c
@@ -0,0 +1,200 @@
+/*
+ * ASPEED SoC 2700 family
+ *
+ * Copyright (C) 2025 ASPEED Technology Inc.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "qapi/error.h"
+#include "system/block-backend.h"
+#include "system/system.h"
+#include "hw/arm/aspeed.h"
+#include "hw/boards.h"
+#include "hw/qdev-clock.h"
+#include "hw/arm/aspeed_soc.h"
+#include "hw/loader.h"
+#include "hw/arm/boot.h"
+#include "hw/block/flash.h"
+
+
+#define TYPE_AST2700A1FC MACHINE_TYPE_NAME("ast2700fc")
+OBJECT_DECLARE_SIMPLE_TYPE(Ast2700FCState, AST2700A1FC);
+
+static struct arm_boot_info ast2700fc_board_info = {
+ .board_id = -1, /* device-tree-only board */
+};
+
+struct Ast2700FCState {
+ MachineState parent_obj;
+
+ MemoryRegion ca35_memory;
+ MemoryRegion ca35_dram;
+ MemoryRegion ssp_memory;
+ MemoryRegion tsp_memory;
+
+ Clock *ssp_sysclk;
+ Clock *tsp_sysclk;
+
+ Aspeed27x0SoCState ca35;
+ Aspeed27x0SSPSoCState ssp;
+ Aspeed27x0TSPSoCState tsp;
+
+ bool mmio_exec;
+};
+
+#define AST2700FC_BMC_RAM_SIZE (1 * GiB)
+#define AST2700FC_CM4_DRAM_SIZE (32 * MiB)
+
+#define AST2700FC_HW_STRAP1 0x000000C0
+#define AST2700FC_HW_STRAP2 0x00000003
+#define AST2700FC_FMC_MODEL "w25q01jvq"
+#define AST2700FC_SPI_MODEL "w25q512jv"
+
+static void ast2700fc_ca35_init(MachineState *machine)
+{
+ Ast2700FCState *s = AST2700A1FC(machine);
+ AspeedSoCState *soc;
+ AspeedSoCClass *sc;
+
+ object_initialize_child(OBJECT(s), "ca35", &s->ca35, "ast2700-a1");
+ soc = ASPEED_SOC(&s->ca35);
+ sc = ASPEED_SOC_GET_CLASS(soc);
+
+ memory_region_init(&s->ca35_memory, OBJECT(&s->ca35), "ca35-memory",
+ UINT64_MAX);
+ memory_region_add_subregion(get_system_memory(), 0, &s->ca35_memory);
+
+ if (!memory_region_init_ram(&s->ca35_dram, OBJECT(&s->ca35), "ca35-dram",
+ AST2700FC_BMC_RAM_SIZE, &error_abort)) {
+ return;
+ }
+ if (!object_property_set_link(OBJECT(&s->ca35), "memory",
+ OBJECT(&s->ca35_memory),
+ &error_abort)) {
+ return;
+ };
+ if (!object_property_set_link(OBJECT(&s->ca35), "dram",
+ OBJECT(&s->ca35_dram), &error_abort)) {
+ return;
+ }
+ if (!object_property_set_int(OBJECT(&s->ca35), "ram-size",
+ AST2700FC_BMC_RAM_SIZE, &error_abort)) {
+ return;
+ }
+
+ for (int i = 0; i < sc->macs_num; i++) {
+ if (!qemu_configure_nic_device(DEVICE(&soc->ftgmac100[i]),
+ true, NULL)) {
+ break;
+ }
+ }
+ if (!object_property_set_int(OBJECT(&s->ca35), "hw-strap1",
+ AST2700FC_HW_STRAP1, &error_abort)) {
+ return;
+ }
+ if (!object_property_set_int(OBJECT(&s->ca35), "hw-strap2",
+ AST2700FC_HW_STRAP2, &error_abort)) {
+ return;
+ }
+ aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART12, serial_hd(0));
+ if (!qdev_realize(DEVICE(&s->ca35), NULL, &error_abort)) {
+ return;
+ }
+
+ /*
+ * AST2700 EVB has a LM75 temperature sensor on I2C bus 0 at address 0x4d.
+ */
+ i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 0), "tmp105", 0x4d);
+
+ aspeed_board_init_flashes(&soc->fmc, AST2700FC_FMC_MODEL, 2, 0);
+ aspeed_board_init_flashes(&soc->spi[0], AST2700FC_SPI_MODEL, 1, 2);
+
+ ast2700fc_board_info.ram_size = machine->ram_size;
+ ast2700fc_board_info.loader_start = sc->memmap[ASPEED_DEV_SDRAM];
+
+ arm_load_kernel(ARM_CPU(first_cpu), machine, &ast2700fc_board_info);
+}
+
+static void ast2700fc_ssp_init(MachineState *machine)
+{
+ AspeedSoCState *soc;
+ Ast2700FCState *s = AST2700A1FC(machine);
+ s->ssp_sysclk = clock_new(OBJECT(s), "SSP_SYSCLK");
+ clock_set_hz(s->ssp_sysclk, 200000000ULL);
+
+ object_initialize_child(OBJECT(s), "ssp", &s->ssp, TYPE_ASPEED27X0SSP_SOC);
+ memory_region_init(&s->ssp_memory, OBJECT(&s->ssp), "ssp-memory",
+ UINT64_MAX);
+
+ qdev_connect_clock_in(DEVICE(&s->ssp), "sysclk", s->ssp_sysclk);
+ if (!object_property_set_link(OBJECT(&s->ssp), "memory",
+ OBJECT(&s->ssp_memory), &error_abort)) {
+ return;
+ }
+
+ soc = ASPEED_SOC(&s->ssp);
+ aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART4, serial_hd(1));
+ if (!qdev_realize(DEVICE(&s->ssp), NULL, &error_abort)) {
+ return;
+ }
+}
+
+static void ast2700fc_tsp_init(MachineState *machine)
+{
+ AspeedSoCState *soc;
+ Ast2700FCState *s = AST2700A1FC(machine);
+ s->tsp_sysclk = clock_new(OBJECT(s), "TSP_SYSCLK");
+ clock_set_hz(s->tsp_sysclk, 200000000ULL);
+
+ object_initialize_child(OBJECT(s), "tsp", &s->tsp, TYPE_ASPEED27X0TSP_SOC);
+ memory_region_init(&s->tsp_memory, OBJECT(&s->tsp), "tsp-memory",
+ UINT64_MAX);
+
+ qdev_connect_clock_in(DEVICE(&s->tsp), "sysclk", s->tsp_sysclk);
+ if (!object_property_set_link(OBJECT(&s->tsp), "memory",
+ OBJECT(&s->tsp_memory), &error_abort)) {
+ return;
+ }
+
+ soc = ASPEED_SOC(&s->tsp);
+ aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART7, serial_hd(2));
+ if (!qdev_realize(DEVICE(&s->tsp), NULL, &error_abort)) {
+ return;
+ }
+}
+
+static void ast2700fc_init(MachineState *machine)
+{
+ ast2700fc_ca35_init(machine);
+ ast2700fc_ssp_init(machine);
+ ast2700fc_tsp_init(machine);
+}
+
+static void ast2700fc_class_init(ObjectClass *oc, const void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->alias = "ast2700fc";
+ mc->desc = "ast2700 full core support";
+ mc->init = ast2700fc_init;
+ mc->no_floppy = 1;
+ mc->no_cdrom = 1;
+ mc->min_cpus = mc->max_cpus = mc->default_cpus = 6;
+}
+
+static const TypeInfo ast2700fc_types[] = {
+ {
+ .name = MACHINE_TYPE_NAME("ast2700fc"),
+ .parent = TYPE_MACHINE,
+ .class_init = ast2700fc_class_init,
+ .instance_size = sizeof(Ast2700FCState),
+ },
+};
+
+DEFINE_TYPES(ast2700fc_types)
diff --git a/hw/arm/aspeed_ast27x0-ssp.c b/hw/arm/aspeed_ast27x0-ssp.c
new file mode 100644
index 0000000..80ec599
--- /dev/null
+++ b/hw/arm/aspeed_ast27x0-ssp.c
@@ -0,0 +1,294 @@
+/*
+ * ASPEED Ast27x0 SSP SoC
+ *
+ * Copyright (C) 2025 ASPEED Technology Inc.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/qdev-clock.h"
+#include "hw/misc/unimp.h"
+#include "hw/arm/aspeed_soc.h"
+
+#define AST2700_SSP_RAM_SIZE (32 * MiB)
+
+static const hwaddr aspeed_soc_ast27x0ssp_memmap[] = {
+ [ASPEED_DEV_SRAM] = 0x00000000,
+ [ASPEED_DEV_INTC] = 0x72100000,
+ [ASPEED_DEV_SCU] = 0x72C02000,
+ [ASPEED_DEV_SCUIO] = 0x74C02000,
+ [ASPEED_DEV_UART0] = 0x74C33000,
+ [ASPEED_DEV_UART1] = 0x74C33100,
+ [ASPEED_DEV_UART2] = 0x74C33200,
+ [ASPEED_DEV_UART3] = 0x74C33300,
+ [ASPEED_DEV_UART4] = 0x72C1A000,
+ [ASPEED_DEV_INTCIO] = 0x74C18000,
+ [ASPEED_DEV_IPC0] = 0x72C1C000,
+ [ASPEED_DEV_IPC1] = 0x74C39000,
+ [ASPEED_DEV_UART5] = 0x74C33400,
+ [ASPEED_DEV_UART6] = 0x74C33500,
+ [ASPEED_DEV_UART7] = 0x74C33600,
+ [ASPEED_DEV_UART8] = 0x74C33700,
+ [ASPEED_DEV_UART9] = 0x74C33800,
+ [ASPEED_DEV_UART10] = 0x74C33900,
+ [ASPEED_DEV_UART11] = 0x74C33A00,
+ [ASPEED_DEV_UART12] = 0x74C33B00,
+ [ASPEED_DEV_TIMER1] = 0x72C10000,
+};
+
+static const int aspeed_soc_ast27x0ssp_irqmap[] = {
+ [ASPEED_DEV_SCU] = 12,
+ [ASPEED_DEV_UART0] = 164,
+ [ASPEED_DEV_UART1] = 164,
+ [ASPEED_DEV_UART2] = 164,
+ [ASPEED_DEV_UART3] = 164,
+ [ASPEED_DEV_UART4] = 8,
+ [ASPEED_DEV_UART5] = 164,
+ [ASPEED_DEV_UART6] = 164,
+ [ASPEED_DEV_UART7] = 164,
+ [ASPEED_DEV_UART8] = 164,
+ [ASPEED_DEV_UART9] = 164,
+ [ASPEED_DEV_UART10] = 164,
+ [ASPEED_DEV_UART11] = 164,
+ [ASPEED_DEV_UART12] = 164,
+ [ASPEED_DEV_TIMER1] = 16,
+};
+
+/* SSPINT 164 */
+static const int ast2700_ssp132_ssp164_intcmap[] = {
+ [ASPEED_DEV_UART0] = 7,
+ [ASPEED_DEV_UART1] = 8,
+ [ASPEED_DEV_UART2] = 9,
+ [ASPEED_DEV_UART3] = 10,
+ [ASPEED_DEV_UART5] = 11,
+ [ASPEED_DEV_UART6] = 12,
+ [ASPEED_DEV_UART7] = 13,
+ [ASPEED_DEV_UART8] = 14,
+ [ASPEED_DEV_UART9] = 15,
+ [ASPEED_DEV_UART10] = 16,
+ [ASPEED_DEV_UART11] = 17,
+ [ASPEED_DEV_UART12] = 18,
+};
+
+struct nvic_intc_irq_info {
+ int irq;
+ int intc_idx;
+ int orgate_idx;
+ const int *ptr;
+};
+
+static struct nvic_intc_irq_info ast2700_ssp_intcmap[] = {
+ {160, 1, 0, NULL},
+ {161, 1, 1, NULL},
+ {162, 1, 2, NULL},
+ {163, 1, 3, NULL},
+ {164, 1, 4, ast2700_ssp132_ssp164_intcmap},
+ {165, 1, 5, NULL},
+ {166, 1, 6, NULL},
+ {167, 1, 7, NULL},
+ {168, 1, 8, NULL},
+ {169, 1, 9, NULL},
+ {128, 0, 1, NULL},
+ {129, 0, 2, NULL},
+ {130, 0, 3, NULL},
+ {131, 0, 4, NULL},
+ {132, 0, 5, ast2700_ssp132_ssp164_intcmap},
+ {133, 0, 6, NULL},
+ {134, 0, 7, NULL},
+ {135, 0, 8, NULL},
+ {136, 0, 9, NULL},
+};
+
+static qemu_irq aspeed_soc_ast27x0ssp_get_irq(AspeedSoCState *s, int dev)
+{
+ Aspeed27x0SSPSoCState *a = ASPEED27X0SSP_SOC(s);
+ AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
+
+ int or_idx;
+ int idx;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ast2700_ssp_intcmap); i++) {
+ if (sc->irqmap[dev] == ast2700_ssp_intcmap[i].irq) {
+ assert(ast2700_ssp_intcmap[i].ptr);
+ or_idx = ast2700_ssp_intcmap[i].orgate_idx;
+ idx = ast2700_ssp_intcmap[i].intc_idx;
+ return qdev_get_gpio_in(DEVICE(&a->intc[idx].orgates[or_idx]),
+ ast2700_ssp_intcmap[i].ptr[dev]);
+ }
+ }
+
+ return qdev_get_gpio_in(DEVICE(&a->armv7m), sc->irqmap[dev]);
+}
+
+static void aspeed_soc_ast27x0ssp_init(Object *obj)
+{
+ Aspeed27x0SSPSoCState *a = ASPEED27X0SSP_SOC(obj);
+ AspeedSoCState *s = ASPEED_SOC(obj);
+ AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
+ int i;
+
+ object_initialize_child(obj, "armv7m", &a->armv7m, TYPE_ARMV7M);
+ object_initialize_child(obj, "scu", &s->scu, TYPE_ASPEED_2700_SCU);
+ s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0);
+ qdev_prop_set_uint32(DEVICE(&s->scu), "silicon-rev", sc->silicon_rev);
+
+ for (i = 0; i < sc->uarts_num; i++) {
+ object_initialize_child(obj, "uart[*]", &s->uart[i], TYPE_SERIAL_MM);
+ }
+
+ object_initialize_child(obj, "intc0", &a->intc[0],
+ TYPE_ASPEED_2700SSP_INTC);
+ object_initialize_child(obj, "intc1", &a->intc[1],
+ TYPE_ASPEED_2700SSP_INTCIO);
+
+ object_initialize_child(obj, "timerctrl", &s->timerctrl,
+ TYPE_UNIMPLEMENTED_DEVICE);
+ object_initialize_child(obj, "ipc0", &a->ipc[0],
+ TYPE_UNIMPLEMENTED_DEVICE);
+ object_initialize_child(obj, "ipc1", &a->ipc[1],
+ TYPE_UNIMPLEMENTED_DEVICE);
+ object_initialize_child(obj, "scuio", &a->scuio,
+ TYPE_UNIMPLEMENTED_DEVICE);
+}
+
+static void aspeed_soc_ast27x0ssp_realize(DeviceState *dev_soc, Error **errp)
+{
+ Aspeed27x0SSPSoCState *a = ASPEED27X0SSP_SOC(dev_soc);
+ AspeedSoCState *s = ASPEED_SOC(dev_soc);
+ AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
+ DeviceState *armv7m;
+ g_autofree char *sram_name = NULL;
+ int i;
+
+ if (!clock_has_source(s->sysclk)) {
+ error_setg(errp, "sysclk clock must be wired up by the board code");
+ return;
+ }
+
+ /* AST27X0 SSP Core */
+ armv7m = DEVICE(&a->armv7m);
+ qdev_prop_set_uint32(armv7m, "num-irq", 256);
+ qdev_prop_set_string(armv7m, "cpu-type", aspeed_soc_cpu_type(sc));
+ qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk);
+ object_property_set_link(OBJECT(&a->armv7m), "memory",
+ OBJECT(s->memory), &error_abort);
+ sysbus_realize(SYS_BUS_DEVICE(&a->armv7m), &error_abort);
+
+ sram_name = g_strdup_printf("aspeed.dram.%d",
+ CPU(a->armv7m.cpu)->cpu_index);
+
+ if (!memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size,
+ errp)) {
+ return;
+ }
+ memory_region_add_subregion(s->memory,
+ sc->memmap[ASPEED_DEV_SRAM],
+ &s->sram);
+
+ /* SCU */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) {
+ return;
+ }
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]);
+
+ /* INTC */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc[0]), errp)) {
+ return;
+ }
+
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[0]), 0,
+ sc->memmap[ASPEED_DEV_INTC]);
+
+ /* INTCIO */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc[1]), errp)) {
+ return;
+ }
+
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[1]), 0,
+ sc->memmap[ASPEED_DEV_INTCIO]);
+
+ /* irq source orgates -> INTC0 */
+ for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[0])->num_inpins; i++) {
+ qdev_connect_gpio_out(DEVICE(&a->intc[0].orgates[i]), 0,
+ qdev_get_gpio_in(DEVICE(&a->intc[0]), i));
+ }
+ for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[0])->num_outpins; i++) {
+ assert(i < ARRAY_SIZE(ast2700_ssp_intcmap));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc[0]), i,
+ qdev_get_gpio_in(DEVICE(&a->armv7m),
+ ast2700_ssp_intcmap[i].irq));
+ }
+ /* irq source orgates -> INTCIO */
+ for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[1])->num_inpins; i++) {
+ qdev_connect_gpio_out(DEVICE(&a->intc[1].orgates[i]), 0,
+ qdev_get_gpio_in(DEVICE(&a->intc[1]), i));
+ }
+ /* INTCIO -> INTC */
+ for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[1])->num_outpins; i++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc[1]), i,
+ qdev_get_gpio_in(DEVICE(&a->intc[0].orgates[0]), i));
+ }
+ /* UART */
+ if (!aspeed_soc_uart_realize(s, errp)) {
+ return;
+ }
+
+ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->timerctrl),
+ "aspeed.timerctrl",
+ sc->memmap[ASPEED_DEV_TIMER1], 0x200);
+ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->ipc[0]),
+ "aspeed.ipc0",
+ sc->memmap[ASPEED_DEV_IPC0], 0x1000);
+ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->ipc[1]),
+ "aspeed.ipc1",
+ sc->memmap[ASPEED_DEV_IPC1], 0x1000);
+ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->scuio),
+ "aspeed.scuio",
+ sc->memmap[ASPEED_DEV_SCUIO], 0x1000);
+}
+
+static void aspeed_soc_ast27x0ssp_class_init(ObjectClass *klass, const void *data)
+{
+ static const char * const valid_cpu_types[] = {
+ ARM_CPU_TYPE_NAME("cortex-m4"), /* TODO: cortex-m4f */
+ NULL
+ };
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedSoCClass *sc = ASPEED_SOC_CLASS(dc);
+
+ /* Reason: The Aspeed SoC can only be instantiated from a board */
+ dc->user_creatable = false;
+ dc->realize = aspeed_soc_ast27x0ssp_realize;
+
+ sc->valid_cpu_types = valid_cpu_types;
+ sc->silicon_rev = AST2700_A1_SILICON_REV;
+ sc->sram_size = AST2700_SSP_RAM_SIZE;
+ sc->spis_num = 0;
+ sc->ehcis_num = 0;
+ sc->wdts_num = 0;
+ sc->macs_num = 0;
+ sc->uarts_num = 13;
+ sc->uarts_base = ASPEED_DEV_UART0;
+ sc->irqmap = aspeed_soc_ast27x0ssp_irqmap;
+ sc->memmap = aspeed_soc_ast27x0ssp_memmap;
+ sc->num_cpus = 1;
+ sc->get_irq = aspeed_soc_ast27x0ssp_get_irq;
+}
+
+static const TypeInfo aspeed_soc_ast27x0ssp_types[] = {
+ {
+ .name = TYPE_ASPEED27X0SSP_SOC,
+ .parent = TYPE_ASPEED_SOC,
+ .instance_size = sizeof(Aspeed27x0SSPSoCState),
+ .instance_init = aspeed_soc_ast27x0ssp_init,
+ .class_init = aspeed_soc_ast27x0ssp_class_init,
+ },
+};
+
+DEFINE_TYPES(aspeed_soc_ast27x0ssp_types)
diff --git a/hw/arm/aspeed_ast27x0-tsp.c b/hw/arm/aspeed_ast27x0-tsp.c
new file mode 100644
index 0000000..4e0efae
--- /dev/null
+++ b/hw/arm/aspeed_ast27x0-tsp.c
@@ -0,0 +1,294 @@
+/*
+ * ASPEED Ast27x0 TSP SoC
+ *
+ * Copyright (C) 2025 ASPEED Technology Inc.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/qdev-clock.h"
+#include "hw/misc/unimp.h"
+#include "hw/arm/aspeed_soc.h"
+
+#define AST2700_TSP_RAM_SIZE (32 * MiB)
+
+static const hwaddr aspeed_soc_ast27x0tsp_memmap[] = {
+ [ASPEED_DEV_SRAM] = 0x00000000,
+ [ASPEED_DEV_INTC] = 0x72100000,
+ [ASPEED_DEV_SCU] = 0x72C02000,
+ [ASPEED_DEV_SCUIO] = 0x74C02000,
+ [ASPEED_DEV_UART0] = 0x74C33000,
+ [ASPEED_DEV_UART1] = 0x74C33100,
+ [ASPEED_DEV_UART2] = 0x74C33200,
+ [ASPEED_DEV_UART3] = 0x74C33300,
+ [ASPEED_DEV_UART4] = 0x72C1A000,
+ [ASPEED_DEV_INTCIO] = 0x74C18000,
+ [ASPEED_DEV_IPC0] = 0x72C1C000,
+ [ASPEED_DEV_IPC1] = 0x74C39000,
+ [ASPEED_DEV_UART5] = 0x74C33400,
+ [ASPEED_DEV_UART6] = 0x74C33500,
+ [ASPEED_DEV_UART7] = 0x74C33600,
+ [ASPEED_DEV_UART8] = 0x74C33700,
+ [ASPEED_DEV_UART9] = 0x74C33800,
+ [ASPEED_DEV_UART10] = 0x74C33900,
+ [ASPEED_DEV_UART11] = 0x74C33A00,
+ [ASPEED_DEV_UART12] = 0x74C33B00,
+ [ASPEED_DEV_TIMER1] = 0x72C10000,
+};
+
+static const int aspeed_soc_ast27x0tsp_irqmap[] = {
+ [ASPEED_DEV_SCU] = 12,
+ [ASPEED_DEV_UART0] = 164,
+ [ASPEED_DEV_UART1] = 164,
+ [ASPEED_DEV_UART2] = 164,
+ [ASPEED_DEV_UART3] = 164,
+ [ASPEED_DEV_UART4] = 8,
+ [ASPEED_DEV_UART5] = 164,
+ [ASPEED_DEV_UART6] = 164,
+ [ASPEED_DEV_UART7] = 164,
+ [ASPEED_DEV_UART8] = 164,
+ [ASPEED_DEV_UART9] = 164,
+ [ASPEED_DEV_UART10] = 164,
+ [ASPEED_DEV_UART11] = 164,
+ [ASPEED_DEV_UART12] = 164,
+ [ASPEED_DEV_TIMER1] = 16,
+};
+
+/* TSPINT 164 */
+static const int ast2700_tsp132_tsp164_intcmap[] = {
+ [ASPEED_DEV_UART0] = 7,
+ [ASPEED_DEV_UART1] = 8,
+ [ASPEED_DEV_UART2] = 9,
+ [ASPEED_DEV_UART3] = 10,
+ [ASPEED_DEV_UART5] = 11,
+ [ASPEED_DEV_UART6] = 12,
+ [ASPEED_DEV_UART7] = 13,
+ [ASPEED_DEV_UART8] = 14,
+ [ASPEED_DEV_UART9] = 15,
+ [ASPEED_DEV_UART10] = 16,
+ [ASPEED_DEV_UART11] = 17,
+ [ASPEED_DEV_UART12] = 18,
+};
+
+struct nvic_intc_irq_info {
+ int irq;
+ int intc_idx;
+ int orgate_idx;
+ const int *ptr;
+};
+
+static struct nvic_intc_irq_info ast2700_tsp_intcmap[] = {
+ {160, 1, 0, NULL},
+ {161, 1, 1, NULL},
+ {162, 1, 2, NULL},
+ {163, 1, 3, NULL},
+ {164, 1, 4, ast2700_tsp132_tsp164_intcmap},
+ {165, 1, 5, NULL},
+ {166, 1, 6, NULL},
+ {167, 1, 7, NULL},
+ {168, 1, 8, NULL},
+ {169, 1, 9, NULL},
+ {128, 0, 1, NULL},
+ {129, 0, 2, NULL},
+ {130, 0, 3, NULL},
+ {131, 0, 4, NULL},
+ {132, 0, 5, ast2700_tsp132_tsp164_intcmap},
+ {133, 0, 6, NULL},
+ {134, 0, 7, NULL},
+ {135, 0, 8, NULL},
+ {136, 0, 9, NULL},
+};
+
+static qemu_irq aspeed_soc_ast27x0tsp_get_irq(AspeedSoCState *s, int dev)
+{
+ Aspeed27x0TSPSoCState *a = ASPEED27X0TSP_SOC(s);
+ AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
+
+ int or_idx;
+ int idx;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ast2700_tsp_intcmap); i++) {
+ if (sc->irqmap[dev] == ast2700_tsp_intcmap[i].irq) {
+ assert(ast2700_tsp_intcmap[i].ptr);
+ or_idx = ast2700_tsp_intcmap[i].orgate_idx;
+ idx = ast2700_tsp_intcmap[i].intc_idx;
+ return qdev_get_gpio_in(DEVICE(&a->intc[idx].orgates[or_idx]),
+ ast2700_tsp_intcmap[i].ptr[dev]);
+ }
+ }
+
+ return qdev_get_gpio_in(DEVICE(&a->armv7m), sc->irqmap[dev]);
+}
+
+static void aspeed_soc_ast27x0tsp_init(Object *obj)
+{
+ Aspeed27x0TSPSoCState *a = ASPEED27X0TSP_SOC(obj);
+ AspeedSoCState *s = ASPEED_SOC(obj);
+ AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
+ int i;
+
+ object_initialize_child(obj, "armv7m", &a->armv7m, TYPE_ARMV7M);
+ object_initialize_child(obj, "scu", &s->scu, TYPE_ASPEED_2700_SCU);
+ s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0);
+ qdev_prop_set_uint32(DEVICE(&s->scu), "silicon-rev", sc->silicon_rev);
+
+ for (i = 0; i < sc->uarts_num; i++) {
+ object_initialize_child(obj, "uart[*]", &s->uart[i], TYPE_SERIAL_MM);
+ }
+
+ object_initialize_child(obj, "intc0", &a->intc[0],
+ TYPE_ASPEED_2700TSP_INTC);
+ object_initialize_child(obj, "intc1", &a->intc[1],
+ TYPE_ASPEED_2700TSP_INTCIO);
+
+ object_initialize_child(obj, "timerctrl", &s->timerctrl,
+ TYPE_UNIMPLEMENTED_DEVICE);
+ object_initialize_child(obj, "ipc0", &a->ipc[0],
+ TYPE_UNIMPLEMENTED_DEVICE);
+ object_initialize_child(obj, "ipc1", &a->ipc[1],
+ TYPE_UNIMPLEMENTED_DEVICE);
+ object_initialize_child(obj, "scuio", &a->scuio,
+ TYPE_UNIMPLEMENTED_DEVICE);
+}
+
+static void aspeed_soc_ast27x0tsp_realize(DeviceState *dev_soc, Error **errp)
+{
+ Aspeed27x0TSPSoCState *a = ASPEED27X0TSP_SOC(dev_soc);
+ AspeedSoCState *s = ASPEED_SOC(dev_soc);
+ AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
+ DeviceState *armv7m;
+ g_autofree char *sram_name = NULL;
+ int i;
+
+ if (!clock_has_source(s->sysclk)) {
+ error_setg(errp, "sysclk clock must be wired up by the board code");
+ return;
+ }
+
+ /* AST27X0 TSP Core */
+ armv7m = DEVICE(&a->armv7m);
+ qdev_prop_set_uint32(armv7m, "num-irq", 256);
+ qdev_prop_set_string(armv7m, "cpu-type", aspeed_soc_cpu_type(sc));
+ qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk);
+ object_property_set_link(OBJECT(&a->armv7m), "memory",
+ OBJECT(s->memory), &error_abort);
+ sysbus_realize(SYS_BUS_DEVICE(&a->armv7m), &error_abort);
+
+ sram_name = g_strdup_printf("aspeed.dram.%d",
+ CPU(a->armv7m.cpu)->cpu_index);
+
+ if (!memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size,
+ errp)) {
+ return;
+ }
+ memory_region_add_subregion(s->memory,
+ sc->memmap[ASPEED_DEV_SRAM],
+ &s->sram);
+
+ /* SCU */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) {
+ return;
+ }
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]);
+
+ /* INTC */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc[0]), errp)) {
+ return;
+ }
+
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[0]), 0,
+ sc->memmap[ASPEED_DEV_INTC]);
+
+ /* INTCIO */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc[1]), errp)) {
+ return;
+ }
+
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[1]), 0,
+ sc->memmap[ASPEED_DEV_INTCIO]);
+
+ /* irq source orgates -> INTC */
+ for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[0])->num_inpins; i++) {
+ qdev_connect_gpio_out(DEVICE(&a->intc[0].orgates[i]), 0,
+ qdev_get_gpio_in(DEVICE(&a->intc[0]), i));
+ }
+ for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[0])->num_outpins; i++) {
+ assert(i < ARRAY_SIZE(ast2700_tsp_intcmap));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc[0]), i,
+ qdev_get_gpio_in(DEVICE(&a->armv7m),
+ ast2700_tsp_intcmap[i].irq));
+ }
+ /* irq source orgates -> INTC */
+ for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[1])->num_inpins; i++) {
+ qdev_connect_gpio_out(DEVICE(&a->intc[1].orgates[i]), 0,
+ qdev_get_gpio_in(DEVICE(&a->intc[1]), i));
+ }
+ /* INTCIO -> INTC */
+ for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[1])->num_outpins; i++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc[1]), i,
+ qdev_get_gpio_in(DEVICE(&a->intc[0].orgates[0]), i));
+ }
+ /* UART */
+ if (!aspeed_soc_uart_realize(s, errp)) {
+ return;
+ }
+
+ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->timerctrl),
+ "aspeed.timerctrl",
+ sc->memmap[ASPEED_DEV_TIMER1], 0x200);
+ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->ipc[0]),
+ "aspeed.ipc0",
+ sc->memmap[ASPEED_DEV_IPC0], 0x1000);
+ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->ipc[1]),
+ "aspeed.ipc1",
+ sc->memmap[ASPEED_DEV_IPC1], 0x1000);
+ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->scuio),
+ "aspeed.scuio",
+ sc->memmap[ASPEED_DEV_SCUIO], 0x1000);
+}
+
+static void aspeed_soc_ast27x0tsp_class_init(ObjectClass *klass, const void *data)
+{
+ static const char * const valid_cpu_types[] = {
+ ARM_CPU_TYPE_NAME("cortex-m4"), /* TODO cortex-m4f */
+ NULL
+ };
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedSoCClass *sc = ASPEED_SOC_CLASS(dc);
+
+ /* Reason: The Aspeed SoC can only be instantiated from a board */
+ dc->user_creatable = false;
+ dc->realize = aspeed_soc_ast27x0tsp_realize;
+
+ sc->valid_cpu_types = valid_cpu_types;
+ sc->silicon_rev = AST2700_A1_SILICON_REV;
+ sc->sram_size = AST2700_TSP_RAM_SIZE;
+ sc->spis_num = 0;
+ sc->ehcis_num = 0;
+ sc->wdts_num = 0;
+ sc->macs_num = 0;
+ sc->uarts_num = 13;
+ sc->uarts_base = ASPEED_DEV_UART0;
+ sc->irqmap = aspeed_soc_ast27x0tsp_irqmap;
+ sc->memmap = aspeed_soc_ast27x0tsp_memmap;
+ sc->num_cpus = 1;
+ sc->get_irq = aspeed_soc_ast27x0tsp_get_irq;
+}
+
+static const TypeInfo aspeed_soc_ast27x0tsp_types[] = {
+ {
+ .name = TYPE_ASPEED27X0TSP_SOC,
+ .parent = TYPE_ASPEED_SOC,
+ .instance_size = sizeof(Aspeed27x0TSPSoCState),
+ .instance_init = aspeed_soc_ast27x0tsp_init,
+ .class_init = aspeed_soc_ast27x0tsp_class_init,
+ },
+};
+
+DEFINE_TYPES(aspeed_soc_ast27x0tsp_types)
diff --git a/hw/arm/aspeed_ast27x0.c b/hw/arm/aspeed_ast27x0.c
index a9fb0d4..6aa3841 100644
--- a/hw/arm/aspeed_ast27x0.c
+++ b/hw/arm/aspeed_ast27x0.c
@@ -13,64 +13,115 @@
#include "qapi/error.h"
#include "hw/misc/unimp.h"
#include "hw/arm/aspeed_soc.h"
+#include "hw/arm/bsa.h"
#include "qemu/module.h"
#include "qemu/error-report.h"
#include "hw/i2c/aspeed_i2c.h"
#include "net/net.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/intc/arm_gicv3.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qlist.h"
#include "qemu/log.h"
+#define AST2700_SOC_IO_SIZE 0x00FE0000
+#define AST2700_SOC_IOMEM_SIZE 0x01000000
+#define AST2700_SOC_DPMCU_SIZE 0x00040000
+#define AST2700_SOC_LTPI_SIZE 0x01000000
+
static const hwaddr aspeed_soc_ast2700_memmap[] = {
- [ASPEED_DEV_SPI_BOOT] = 0x400000000,
+ [ASPEED_DEV_VBOOTROM] = 0x00000000,
+ [ASPEED_DEV_IOMEM] = 0x00020000,
[ASPEED_DEV_SRAM] = 0x10000000,
+ [ASPEED_DEV_DPMCU] = 0x11000000,
+ [ASPEED_DEV_IOMEM0] = 0x12000000,
+ [ASPEED_DEV_EHCI1] = 0x12061000,
+ [ASPEED_DEV_EHCI2] = 0x12063000,
+ [ASPEED_DEV_HACE] = 0x12070000,
+ [ASPEED_DEV_EMMC] = 0x12090000,
+ [ASPEED_DEV_INTC] = 0x12100000,
+ [ASPEED_GIC_DIST] = 0x12200000,
+ [ASPEED_GIC_REDIST] = 0x12280000,
[ASPEED_DEV_SDMC] = 0x12C00000,
[ASPEED_DEV_SCU] = 0x12C02000,
- [ASPEED_DEV_SCUIO] = 0x14C02000,
- [ASPEED_DEV_UART0] = 0X14C33000,
- [ASPEED_DEV_UART1] = 0X14C33100,
- [ASPEED_DEV_UART2] = 0X14C33200,
- [ASPEED_DEV_UART3] = 0X14C33300,
- [ASPEED_DEV_UART4] = 0X12C1A000,
- [ASPEED_DEV_UART5] = 0X14C33400,
- [ASPEED_DEV_UART6] = 0X14C33500,
- [ASPEED_DEV_UART7] = 0X14C33600,
- [ASPEED_DEV_UART8] = 0X14C33700,
- [ASPEED_DEV_UART9] = 0X14C33800,
- [ASPEED_DEV_UART10] = 0X14C33900,
- [ASPEED_DEV_UART11] = 0X14C33A00,
- [ASPEED_DEV_UART12] = 0X14C33B00,
- [ASPEED_DEV_WDT] = 0x14C37000,
- [ASPEED_DEV_VUART] = 0X14C30000,
+ [ASPEED_DEV_RTC] = 0x12C0F000,
+ [ASPEED_DEV_TIMER1] = 0x12C10000,
+ [ASPEED_DEV_SLI] = 0x12C17000,
+ [ASPEED_DEV_UART4] = 0x12C1A000,
+ [ASPEED_DEV_IOMEM1] = 0x14000000,
[ASPEED_DEV_FMC] = 0x14000000,
[ASPEED_DEV_SPI0] = 0x14010000,
[ASPEED_DEV_SPI1] = 0x14020000,
[ASPEED_DEV_SPI2] = 0x14030000,
- [ASPEED_DEV_SDRAM] = 0x400000000,
[ASPEED_DEV_MII1] = 0x14040000,
[ASPEED_DEV_MII2] = 0x14040008,
[ASPEED_DEV_MII3] = 0x14040010,
[ASPEED_DEV_ETH1] = 0x14050000,
[ASPEED_DEV_ETH2] = 0x14060000,
[ASPEED_DEV_ETH3] = 0x14070000,
- [ASPEED_DEV_EMMC] = 0x12090000,
- [ASPEED_DEV_INTC] = 0x12100000,
- [ASPEED_DEV_SLI] = 0x12C17000,
+ [ASPEED_DEV_SDHCI] = 0x14080000,
+ [ASPEED_DEV_EHCI3] = 0x14121000,
+ [ASPEED_DEV_EHCI4] = 0x14123000,
+ [ASPEED_DEV_ADC] = 0x14C00000,
+ [ASPEED_DEV_SCUIO] = 0x14C02000,
+ [ASPEED_DEV_GPIO] = 0x14C0B000,
+ [ASPEED_DEV_I2C] = 0x14C0F000,
+ [ASPEED_DEV_INTCIO] = 0x14C18000,
[ASPEED_DEV_SLIIO] = 0x14C1E000,
- [ASPEED_GIC_DIST] = 0x12200000,
- [ASPEED_GIC_REDIST] = 0x12280000,
+ [ASPEED_DEV_VUART] = 0x14C30000,
+ [ASPEED_DEV_UART0] = 0x14C33000,
+ [ASPEED_DEV_UART1] = 0x14C33100,
+ [ASPEED_DEV_UART2] = 0x14C33200,
+ [ASPEED_DEV_UART3] = 0x14C33300,
+ [ASPEED_DEV_UART5] = 0x14C33400,
+ [ASPEED_DEV_UART6] = 0x14C33500,
+ [ASPEED_DEV_UART7] = 0x14C33600,
+ [ASPEED_DEV_UART8] = 0x14C33700,
+ [ASPEED_DEV_UART9] = 0x14C33800,
+ [ASPEED_DEV_UART10] = 0x14C33900,
+ [ASPEED_DEV_UART11] = 0x14C33A00,
+ [ASPEED_DEV_UART12] = 0x14C33B00,
+ [ASPEED_DEV_WDT] = 0x14C37000,
+ [ASPEED_DEV_SPI_BOOT] = 0x100000000,
+ [ASPEED_DEV_LTPI] = 0x300000000,
+ [ASPEED_DEV_SDRAM] = 0x400000000,
};
-#define AST2700_MAX_IRQ 288
+#define AST2700_MAX_IRQ 256
/* Shared Peripheral Interrupt values below are offset by -32 from datasheet */
-static const int aspeed_soc_ast2700_irqmap[] = {
+static const int aspeed_soc_ast2700a0_irqmap[] = {
+ [ASPEED_DEV_SDMC] = 0,
+ [ASPEED_DEV_HACE] = 4,
+ [ASPEED_DEV_XDMA] = 5,
+ [ASPEED_DEV_UART4] = 8,
+ [ASPEED_DEV_SCU] = 12,
+ [ASPEED_DEV_RTC] = 13,
+ [ASPEED_DEV_EMMC] = 15,
+ [ASPEED_DEV_TIMER1] = 16,
+ [ASPEED_DEV_TIMER2] = 17,
+ [ASPEED_DEV_TIMER3] = 18,
+ [ASPEED_DEV_TIMER4] = 19,
+ [ASPEED_DEV_TIMER5] = 20,
+ [ASPEED_DEV_TIMER6] = 21,
+ [ASPEED_DEV_TIMER7] = 22,
+ [ASPEED_DEV_TIMER8] = 23,
+ [ASPEED_DEV_DP] = 28,
+ [ASPEED_DEV_EHCI1] = 33,
+ [ASPEED_DEV_EHCI2] = 37,
+ [ASPEED_DEV_LPC] = 128,
+ [ASPEED_DEV_IBT] = 128,
+ [ASPEED_DEV_KCS] = 128,
+ [ASPEED_DEV_ADC] = 130,
+ [ASPEED_DEV_GPIO] = 130,
+ [ASPEED_DEV_I2C] = 130,
+ [ASPEED_DEV_FMC] = 131,
+ [ASPEED_DEV_WDT] = 131,
+ [ASPEED_DEV_PWM] = 131,
+ [ASPEED_DEV_I3C] = 131,
[ASPEED_DEV_UART0] = 132,
[ASPEED_DEV_UART1] = 132,
[ASPEED_DEV_UART2] = 132,
[ASPEED_DEV_UART3] = 132,
- [ASPEED_DEV_UART4] = 8,
[ASPEED_DEV_UART5] = 132,
[ASPEED_DEV_UART6] = 132,
[ASPEED_DEV_UART7] = 132,
@@ -79,15 +130,21 @@ static const int aspeed_soc_ast2700_irqmap[] = {
[ASPEED_DEV_UART10] = 132,
[ASPEED_DEV_UART11] = 132,
[ASPEED_DEV_UART12] = 132,
- [ASPEED_DEV_FMC] = 131,
+ [ASPEED_DEV_ETH1] = 132,
+ [ASPEED_DEV_ETH2] = 132,
+ [ASPEED_DEV_ETH3] = 132,
+ [ASPEED_DEV_PECI] = 133,
+ [ASPEED_DEV_SDHCI] = 133,
+};
+
+static const int aspeed_soc_ast2700a1_irqmap[] = {
[ASPEED_DEV_SDMC] = 0,
- [ASPEED_DEV_SCU] = 12,
- [ASPEED_DEV_ADC] = 130,
+ [ASPEED_DEV_HACE] = 4,
[ASPEED_DEV_XDMA] = 5,
- [ASPEED_DEV_EMMC] = 15,
- [ASPEED_DEV_GPIO] = 11,
- [ASPEED_DEV_GPIO_1_8V] = 130,
+ [ASPEED_DEV_UART4] = 8,
+ [ASPEED_DEV_SCU] = 12,
[ASPEED_DEV_RTC] = 13,
+ [ASPEED_DEV_EMMC] = 15,
[ASPEED_DEV_TIMER1] = 16,
[ASPEED_DEV_TIMER2] = 17,
[ASPEED_DEV_TIMER3] = 18,
@@ -96,37 +153,60 @@ static const int aspeed_soc_ast2700_irqmap[] = {
[ASPEED_DEV_TIMER6] = 21,
[ASPEED_DEV_TIMER7] = 22,
[ASPEED_DEV_TIMER8] = 23,
- [ASPEED_DEV_WDT] = 131,
- [ASPEED_DEV_PWM] = 131,
- [ASPEED_DEV_LPC] = 128,
- [ASPEED_DEV_IBT] = 128,
- [ASPEED_DEV_I2C] = 130,
- [ASPEED_DEV_PECI] = 133,
- [ASPEED_DEV_ETH1] = 132,
- [ASPEED_DEV_ETH2] = 132,
- [ASPEED_DEV_ETH3] = 132,
- [ASPEED_DEV_HACE] = 4,
- [ASPEED_DEV_KCS] = 128,
[ASPEED_DEV_DP] = 28,
- [ASPEED_DEV_I3C] = 131,
+ [ASPEED_DEV_EHCI1] = 33,
+ [ASPEED_DEV_EHCI2] = 37,
+ [ASPEED_DEV_LPC] = 192,
+ [ASPEED_DEV_IBT] = 192,
+ [ASPEED_DEV_KCS] = 192,
+ [ASPEED_DEV_I2C] = 194,
+ [ASPEED_DEV_ADC] = 194,
+ [ASPEED_DEV_GPIO] = 194,
+ [ASPEED_DEV_FMC] = 195,
+ [ASPEED_DEV_WDT] = 195,
+ [ASPEED_DEV_PWM] = 195,
+ [ASPEED_DEV_I3C] = 195,
+ [ASPEED_DEV_UART0] = 196,
+ [ASPEED_DEV_UART1] = 196,
+ [ASPEED_DEV_UART2] = 196,
+ [ASPEED_DEV_UART3] = 196,
+ [ASPEED_DEV_UART5] = 196,
+ [ASPEED_DEV_UART6] = 196,
+ [ASPEED_DEV_UART7] = 196,
+ [ASPEED_DEV_UART8] = 196,
+ [ASPEED_DEV_UART9] = 196,
+ [ASPEED_DEV_UART10] = 196,
+ [ASPEED_DEV_UART11] = 196,
+ [ASPEED_DEV_UART12] = 196,
+ [ASPEED_DEV_ETH1] = 196,
+ [ASPEED_DEV_ETH2] = 196,
+ [ASPEED_DEV_ETH3] = 196,
+ [ASPEED_DEV_PECI] = 197,
+ [ASPEED_DEV_SDHCI] = 197,
};
/* GICINT 128 */
-static const int aspeed_soc_ast2700_gic128_intcmap[] = {
+/* GICINT 192 */
+static const int ast2700_gic128_gic192_intcmap[] = {
[ASPEED_DEV_LPC] = 0,
[ASPEED_DEV_IBT] = 2,
[ASPEED_DEV_KCS] = 4,
};
+/* GICINT 129 */
+/* GICINT 193 */
+
/* GICINT 130 */
-static const int aspeed_soc_ast2700_gic130_intcmap[] = {
+/* GICINT 194 */
+static const int ast2700_gic130_gic194_intcmap[] = {
[ASPEED_DEV_I2C] = 0,
[ASPEED_DEV_ADC] = 16,
- [ASPEED_DEV_GPIO_1_8V] = 18,
+ [ASPEED_DEV_GPIO] = 18,
};
/* GICINT 131 */
-static const int aspeed_soc_ast2700_gic131_intcmap[] = {
+/* GICINT 195 */
+static const int ast2700_gic131_gic195_intcmap[] = {
[ASPEED_DEV_I3C] = 0,
[ASPEED_DEV_WDT] = 16,
[ASPEED_DEV_FMC] = 25,
@@ -134,7 +214,8 @@ static const int aspeed_soc_ast2700_gic131_intcmap[] = {
};
/* GICINT 132 */
-static const int aspeed_soc_ast2700_gic132_intcmap[] = {
+/* GICINT 196 */
+static const int ast2700_gic132_gic196_intcmap[] = {
[ASPEED_DEV_ETH1] = 0,
[ASPEED_DEV_ETH2] = 1,
[ASPEED_DEV_ETH3] = 2,
@@ -150,48 +231,95 @@ static const int aspeed_soc_ast2700_gic132_intcmap[] = {
[ASPEED_DEV_UART10] = 16,
[ASPEED_DEV_UART11] = 17,
[ASPEED_DEV_UART12] = 18,
+ [ASPEED_DEV_EHCI3] = 28,
+ [ASPEED_DEV_EHCI4] = 29,
};
/* GICINT 133 */
-static const int aspeed_soc_ast2700_gic133_intcmap[] = {
+/* GICINT 197 */
+static const int ast2700_gic133_gic197_intcmap[] = {
+ [ASPEED_DEV_SDHCI] = 1,
[ASPEED_DEV_PECI] = 4,
};
/* GICINT 128 ~ 136 */
+/* GICINT 192 ~ 201 */
struct gic_intc_irq_info {
int irq;
+ int intc_idx;
+ int orgate_idx;
const int *ptr;
};
-static const struct gic_intc_irq_info aspeed_soc_ast2700_gic_intcmap[] = {
- {128, aspeed_soc_ast2700_gic128_intcmap},
- {129, NULL},
- {130, aspeed_soc_ast2700_gic130_intcmap},
- {131, aspeed_soc_ast2700_gic131_intcmap},
- {132, aspeed_soc_ast2700_gic132_intcmap},
- {133, aspeed_soc_ast2700_gic133_intcmap},
- {134, NULL},
- {135, NULL},
- {136, NULL},
+static const struct gic_intc_irq_info ast2700_gic_intcmap[] = {
+ {192, 1, 0, ast2700_gic128_gic192_intcmap},
+ {193, 1, 1, NULL},
+ {194, 1, 2, ast2700_gic130_gic194_intcmap},
+ {195, 1, 3, ast2700_gic131_gic195_intcmap},
+ {196, 1, 4, ast2700_gic132_gic196_intcmap},
+ {197, 1, 5, ast2700_gic133_gic197_intcmap},
+ {198, 1, 6, NULL},
+ {199, 1, 7, NULL},
+ {200, 1, 8, NULL},
+ {201, 1, 9, NULL},
+ {128, 0, 1, ast2700_gic128_gic192_intcmap},
+ {129, 0, 2, NULL},
+ {130, 0, 3, ast2700_gic130_gic194_intcmap},
+ {131, 0, 4, ast2700_gic131_gic195_intcmap},
+ {132, 0, 5, ast2700_gic132_gic196_intcmap},
+ {133, 0, 6, ast2700_gic133_gic197_intcmap},
+ {134, 0, 7, NULL},
+ {135, 0, 8, NULL},
+ {136, 0, 9, NULL},
};
static qemu_irq aspeed_soc_ast2700_get_irq(AspeedSoCState *s, int dev)
{
Aspeed27x0SoCState *a = ASPEED27X0_SOC(s);
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
+ int or_idx;
+ int idx;
int i;
- for (i = 0; i < ARRAY_SIZE(aspeed_soc_ast2700_gic_intcmap); i++) {
- if (sc->irqmap[dev] == aspeed_soc_ast2700_gic_intcmap[i].irq) {
- assert(aspeed_soc_ast2700_gic_intcmap[i].ptr);
- return qdev_get_gpio_in(DEVICE(&a->intc.orgates[i]),
- aspeed_soc_ast2700_gic_intcmap[i].ptr[dev]);
+ for (i = 0; i < ARRAY_SIZE(ast2700_gic_intcmap); i++) {
+ if (sc->irqmap[dev] == ast2700_gic_intcmap[i].irq) {
+ assert(ast2700_gic_intcmap[i].ptr);
+ or_idx = ast2700_gic_intcmap[i].orgate_idx;
+ idx = ast2700_gic_intcmap[i].intc_idx;
+ return qdev_get_gpio_in(DEVICE(&a->intc[idx].orgates[or_idx]),
+ ast2700_gic_intcmap[i].ptr[dev]);
}
}
return qdev_get_gpio_in(DEVICE(&a->gic), sc->irqmap[dev]);
}
+static qemu_irq aspeed_soc_ast2700_get_irq_index(AspeedSoCState *s, int dev,
+ int index)
+{
+ Aspeed27x0SoCState *a = ASPEED27X0_SOC(s);
+ AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
+ int or_idx;
+ int idx;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ast2700_gic_intcmap); i++) {
+ if (sc->irqmap[dev] == ast2700_gic_intcmap[i].irq) {
+ assert(ast2700_gic_intcmap[i].ptr);
+ or_idx = ast2700_gic_intcmap[i].orgate_idx;
+ idx = ast2700_gic_intcmap[i].intc_idx;
+ return qdev_get_gpio_in(DEVICE(&a->intc[idx].orgates[or_idx]),
+ ast2700_gic_intcmap[i].ptr[dev] + index);
+ }
+ }
+
+ /*
+ * Invalid OR gate index, device IRQ should be between 128 to 136
+ * and 192 to 201.
+ */
+ g_assert_not_reached();
+}
+
static uint64_t aspeed_ram_capacity_read(void *opaque, hwaddr addr,
unsigned int size)
{
@@ -218,8 +346,9 @@ static void aspeed_ram_capacity_write(void *opaque, hwaddr addr, uint64_t data,
* If writes the data to the address which is beyond the ram size,
* it would write the data to the "address % ram_size".
*/
- result = address_space_write(&s->dram_as, addr % ram_size,
- MEMTXATTRS_UNSPECIFIED, &data, 4);
+ address_space_stl_le(&s->dram_as, addr % ram_size, data,
+ MEMTXATTRS_UNSPECIFIED, &result);
+
if (result != MEMTX_OK) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: DRAM write failed, addr:0x%" HWADDR_PRIx
@@ -232,9 +361,10 @@ static const MemoryRegionOps aspeed_ram_capacity_ops = {
.read = aspeed_ram_capacity_read,
.write = aspeed_ram_capacity_write,
.endianness = DEVICE_LITTLE_ENDIAN,
+ .impl.min_access_size = 4,
.valid = {
- .min_access_size = 1,
- .max_access_size = 8,
+ .min_access_size = 4,
+ .max_access_size = 4,
},
};
@@ -287,7 +417,7 @@ static void aspeed_soc_ast2700_init(Object *obj)
char socname[8];
char typename[64];
- if (sscanf(sc->name, "%7s", socname) != 1) {
+ if (sscanf(object_get_typename(obj), "%7s", socname) != 1) {
g_assert_not_reached();
}
@@ -303,14 +433,21 @@ static void aspeed_soc_ast2700_init(Object *obj)
sc->silicon_rev);
object_property_add_alias(obj, "hw-strap1", OBJECT(&s->scu),
"hw-strap1");
- object_property_add_alias(obj, "hw-strap2", OBJECT(&s->scu),
- "hw-strap2");
object_property_add_alias(obj, "hw-prot-key", OBJECT(&s->scu),
"hw-prot-key");
object_initialize_child(obj, "scuio", &s->scuio, TYPE_ASPEED_2700_SCUIO);
qdev_prop_set_uint32(DEVICE(&s->scuio), "silicon-rev",
sc->silicon_rev);
+ /*
+ * There is one hw-strap1 register in the SCU (CPU DIE) and another
+ * hw-strap1 register in the SCUIO (IO DIE). To reuse the current design
+ * of hw-strap, hw-strap1 is assigned to the SCU and sets the value in the
+ * SCU hw-strap1 register, while hw-strap2 is assigned to the SCUIO and
+ * sets the value in the SCUIO hw-strap1 register.
+ */
+ object_property_add_alias(obj, "hw-strap2", OBJECT(&s->scuio),
+ "hw-strap1");
snprintf(typename, sizeof(typename), "aspeed.fmc-%s", socname);
object_initialize_child(obj, "fmc", &s->fmc, typename);
@@ -320,6 +457,11 @@ static void aspeed_soc_ast2700_init(Object *obj)
object_initialize_child(obj, "spi[*]", &s->spi[i], typename);
}
+ for (i = 0; i < sc->ehcis_num; i++) {
+ object_initialize_child(obj, "ehci[*]", &s->ehci[i],
+ TYPE_PLATFORM_EHCI);
+ }
+
snprintf(typename, sizeof(typename), "aspeed.sdmc-%s", socname);
object_initialize_child(obj, "sdmc", &s->sdmc, typename);
object_property_add_alias(obj, "ram-size", OBJECT(&s->sdmc),
@@ -343,7 +485,50 @@ static void aspeed_soc_ast2700_init(Object *obj)
object_initialize_child(obj, "sli", &s->sli, TYPE_ASPEED_2700_SLI);
object_initialize_child(obj, "sliio", &s->sliio, TYPE_ASPEED_2700_SLIIO);
- object_initialize_child(obj, "intc", &a->intc, TYPE_ASPEED_2700_INTC);
+ object_initialize_child(obj, "intc", &a->intc[0], TYPE_ASPEED_2700_INTC);
+ object_initialize_child(obj, "intcio", &a->intc[1],
+ TYPE_ASPEED_2700_INTCIO);
+
+ snprintf(typename, sizeof(typename), "aspeed.adc-%s", socname);
+ object_initialize_child(obj, "adc", &s->adc, typename);
+
+ snprintf(typename, sizeof(typename), "aspeed.i2c-%s", socname);
+ object_initialize_child(obj, "i2c", &s->i2c, typename);
+
+ snprintf(typename, sizeof(typename), "aspeed.gpio-%s", socname);
+ object_initialize_child(obj, "gpio", &s->gpio, typename);
+
+ object_initialize_child(obj, "rtc", &s->rtc, TYPE_ASPEED_RTC);
+
+ snprintf(typename, sizeof(typename), "aspeed.sdhci-%s", socname);
+ object_initialize_child(obj, "sd-controller", &s->sdhci, typename);
+ object_property_set_int(OBJECT(&s->sdhci), "num-slots", 1, &error_abort);
+
+ /* Init sd card slot class here so that they're under the correct parent */
+ object_initialize_child(obj, "sd-controller.sdhci",
+ &s->sdhci.slots[0], TYPE_SYSBUS_SDHCI);
+
+ object_initialize_child(obj, "emmc-controller", &s->emmc, typename);
+ object_property_set_int(OBJECT(&s->emmc), "num-slots", 1, &error_abort);
+
+ object_initialize_child(obj, "emmc-controller.sdhci", &s->emmc.slots[0],
+ TYPE_SYSBUS_SDHCI);
+
+ snprintf(typename, sizeof(typename), "aspeed.timer-%s", socname);
+ object_initialize_child(obj, "timerctrl", &s->timerctrl, typename);
+
+ snprintf(typename, sizeof(typename), "aspeed.hace-%s", socname);
+ object_initialize_child(obj, "hace", &s->hace, typename);
+ object_initialize_child(obj, "dpmcu", &s->dpmcu,
+ TYPE_UNIMPLEMENTED_DEVICE);
+ object_initialize_child(obj, "ltpi", &s->ltpi,
+ TYPE_UNIMPLEMENTED_DEVICE);
+ object_initialize_child(obj, "iomem", &s->iomem,
+ TYPE_UNIMPLEMENTED_DEVICE);
+ object_initialize_child(obj, "iomem0", &s->iomem0,
+ TYPE_UNIMPLEMENTED_DEVICE);
+ object_initialize_child(obj, "iomem1", &s->iomem1,
+ TYPE_UNIMPLEMENTED_DEVICE);
}
/*
@@ -370,7 +555,7 @@ static bool aspeed_soc_ast2700_gic_realize(DeviceState *dev, Error **errp)
gicdev = DEVICE(&a->gic);
qdev_prop_set_uint32(gicdev, "revision", 3);
qdev_prop_set_uint32(gicdev, "num-cpu", sc->num_cpus);
- qdev_prop_set_uint32(gicdev, "num-irq", AST2700_MAX_IRQ);
+ qdev_prop_set_uint32(gicdev, "num-irq", AST2700_MAX_IRQ + GIC_INTERNAL);
redist_region_count = qlist_new();
qlist_append_int(redist_region_count, sc->num_cpus);
@@ -379,33 +564,35 @@ static bool aspeed_soc_ast2700_gic_realize(DeviceState *dev, Error **errp)
if (!sysbus_realize(gicbusdev, errp)) {
return false;
}
- sysbus_mmio_map(gicbusdev, 0, sc->memmap[ASPEED_GIC_DIST]);
- sysbus_mmio_map(gicbusdev, 1, sc->memmap[ASPEED_GIC_REDIST]);
+
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->gic), 0,
+ sc->memmap[ASPEED_GIC_DIST]);
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->gic), 1,
+ sc->memmap[ASPEED_GIC_REDIST]);
for (i = 0; i < sc->num_cpus; i++) {
DeviceState *cpudev = DEVICE(&a->cpu[i]);
- int NUM_IRQS = 256, ARCH_GIC_MAINT_IRQ = 9, VIRTUAL_PMU_IRQ = 7;
- int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS;
+ int intidbase = AST2700_MAX_IRQ + i * GIC_INTERNAL;
const int timer_irq[] = {
- [GTIMER_PHYS] = 14,
- [GTIMER_VIRT] = 11,
- [GTIMER_HYP] = 10,
- [GTIMER_SEC] = 13,
+ [GTIMER_PHYS] = ARCH_TIMER_NS_EL1_IRQ,
+ [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ,
+ [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ,
+ [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ,
};
int j;
for (j = 0; j < ARRAY_SIZE(timer_irq); j++) {
qdev_connect_gpio_out(cpudev, j,
- qdev_get_gpio_in(gicdev, ppibase + timer_irq[j]));
+ qdev_get_gpio_in(gicdev, intidbase + timer_irq[j]));
}
qemu_irq irq = qdev_get_gpio_in(gicdev,
- ppibase + ARCH_GIC_MAINT_IRQ);
+ intidbase + ARCH_GIC_MAINT_IRQ);
qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt",
0, irq);
qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0,
- qdev_get_gpio_in(gicdev, ppibase + VIRTUAL_PMU_IRQ));
+ qdev_get_gpio_in(gicdev, intidbase + VIRTUAL_PMU_IRQ));
sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
sysbus_connect_irq(gicbusdev, i + sc->num_cpus,
@@ -414,6 +601,10 @@ static bool aspeed_soc_ast2700_gic_realize(DeviceState *dev, Error **errp)
qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ));
sysbus_connect_irq(gicbusdev, i + 3 * sc->num_cpus,
qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ));
+ sysbus_connect_irq(gicbusdev, i + 4 * sc->num_cpus,
+ qdev_get_gpio_in(cpudev, ARM_CPU_NMI));
+ sysbus_connect_irq(gicbusdev, i + 5 * sc->num_cpus,
+ qdev_get_gpio_in(cpudev, ARM_CPU_VINMI));
}
return true;
@@ -425,8 +616,10 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp)
Aspeed27x0SoCState *a = ASPEED27X0_SOC(dev);
AspeedSoCState *s = ASPEED_SOC(dev);
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
- AspeedINTCClass *ic = ASPEED_INTC_GET_CLASS(&a->intc);
- g_autofree char *sram_name = NULL;
+ AspeedINTCClass *ic = ASPEED_INTC_GET_CLASS(&a->intc[0]);
+ AspeedINTCClass *icio = ASPEED_INTC_GET_CLASS(&a->intc[1]);
+ g_autofree char *name = NULL;
+ qemu_irq irq;
/* Default boot region (SPI memory or ROMs) */
memory_region_init(&s->spi_boot_container, OBJECT(s),
@@ -455,31 +648,64 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp)
}
/* INTC */
- if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc), errp)) {
+ if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc[0]), errp)) {
return;
}
- aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc), 0,
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[0]), 0,
sc->memmap[ASPEED_DEV_INTC]);
- /* GICINT orgates -> INTC -> GIC */
- for (i = 0; i < ic->num_ints; i++) {
- qdev_connect_gpio_out(DEVICE(&a->intc.orgates[i]), 0,
- qdev_get_gpio_in(DEVICE(&a->intc), i));
- sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc), i,
+ /* INTCIO */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc[1]), errp)) {
+ return;
+ }
+
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[1]), 0,
+ sc->memmap[ASPEED_DEV_INTCIO]);
+
+ /* irq sources -> orgates -> INTC */
+ for (i = 0; i < ic->num_inpins; i++) {
+ qdev_connect_gpio_out(DEVICE(&a->intc[0].orgates[i]), 0,
+ qdev_get_gpio_in(DEVICE(&a->intc[0]), i));
+ }
+
+ /* INTC -> GIC192 - GIC201 */
+ /* INTC -> GIC128 - GIC136 */
+ for (i = 0; i < ic->num_outpins; i++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc[0]), i,
qdev_get_gpio_in(DEVICE(&a->gic),
- aspeed_soc_ast2700_gic_intcmap[i].irq));
+ ast2700_gic_intcmap[i].irq));
+ }
+
+ /* irq source -> orgates -> INTCIO */
+ for (i = 0; i < icio->num_inpins; i++) {
+ qdev_connect_gpio_out(DEVICE(&a->intc[1].orgates[i]), 0,
+ qdev_get_gpio_in(DEVICE(&a->intc[1]), i));
+ }
+
+ /* INTCIO -> INTC */
+ for (i = 0; i < icio->num_outpins; i++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc[1]), i,
+ qdev_get_gpio_in(DEVICE(&a->intc[0].orgates[0]), i));
}
/* SRAM */
- sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&a->cpu[0])->cpu_index);
- if (!memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size,
- errp)) {
+ name = g_strdup_printf("aspeed.sram.%d", CPU(&a->cpu[0])->cpu_index);
+ if (!memory_region_init_ram(&s->sram, OBJECT(s), name, sc->sram_size,
+ errp)) {
return;
}
memory_region_add_subregion(s->memory,
sc->memmap[ASPEED_DEV_SRAM], &s->sram);
+ /* VBOOTROM */
+ if (!memory_region_init_ram(&s->vbootrom, OBJECT(s), "aspeed.vbootrom",
+ 0x20000, errp)) {
+ return;
+ }
+ memory_region_add_subregion(s->memory,
+ sc->memmap[ASPEED_DEV_VBOOTROM], &s->vbootrom);
+
/* SCU */
if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) {
return;
@@ -532,6 +758,17 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp)
ASPEED_SMC_GET_CLASS(&s->spi[i])->flash_window_base);
}
+ /* EHCI */
+ for (i = 0; i < sc->ehcis_num; i++) {
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), errp)) {
+ return;
+ }
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->ehci[i]), 0,
+ sc->memmap[ASPEED_DEV_EHCI1 + i]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0,
+ aspeed_soc_get_irq(s, ASPEED_DEV_EHCI1 + i));
+ }
+
/*
* SDMC - SDRAM Memory Controller
* The SDMC controller is unlocked at SPL stage.
@@ -601,14 +838,127 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp)
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sliio), 0,
sc->memmap[ASPEED_DEV_SLIIO]);
- create_unimplemented_device("ast2700.dpmcu", 0x11000000, 0x40000);
- create_unimplemented_device("ast2700.iomem0", 0x12000000, 0x01000000);
- create_unimplemented_device("ast2700.iomem1", 0x14000000, 0x01000000);
- create_unimplemented_device("ast2700.ltpi", 0x30000000, 0x1000000);
- create_unimplemented_device("ast2700.io", 0x0, 0x4000000);
+ /* ADC */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->adc), errp)) {
+ return;
+ }
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->adc), 0, sc->memmap[ASPEED_DEV_ADC]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0,
+ aspeed_soc_get_irq(s, ASPEED_DEV_ADC));
+
+ /* I2C */
+ object_property_set_link(OBJECT(&s->i2c), "dram", OBJECT(s->dram_mr),
+ &error_abort);
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c), errp)) {
+ return;
+ }
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]);
+ for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) {
+ /*
+ * The AST2700 I2C controller has one source INTC per bus.
+ *
+ * For AST2700 A0:
+ * I2C bus interrupts are connected to the OR gate from bit 0 to bit
+ * 15, and the OR gate output pin is connected to the input pin of
+ * GICINT130 of INTC (CPU Die). Then, the output pin is connected to
+ * the GIC.
+ *
+ * For AST2700 A1:
+ * I2C bus interrupts are connected to the OR gate from bit 0 to bit
+ * 15, and the OR gate output pin is connected to the input pin of
+ * GICINT194 of INTCIO (IO Die). Then, the output pin is connected
+ * to the INTC (CPU Die) input pin, and its output pin is connected
+ * to the GIC.
+ *
+ * I2C bus 0 is connected to the OR gate at bit 0.
+ * I2C bus 15 is connected to the OR gate at bit 15.
+ */
+ irq = aspeed_soc_ast2700_get_irq_index(s, ASPEED_DEV_I2C, i);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c.busses[i]), 0, irq);
+ }
+
+ /* GPIO */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) {
+ return;
+ }
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio), 0,
+ sc->memmap[ASPEED_DEV_GPIO]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0,
+ aspeed_soc_get_irq(s, ASPEED_DEV_GPIO));
+
+ /* RTC */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->rtc), errp)) {
+ return;
+ }
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->rtc), 0, sc->memmap[ASPEED_DEV_RTC]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0,
+ aspeed_soc_get_irq(s, ASPEED_DEV_RTC));
+
+ /* SDHCI */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdhci), errp)) {
+ return;
+ }
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sdhci), 0,
+ sc->memmap[ASPEED_DEV_SDHCI]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0,
+ aspeed_soc_get_irq(s, ASPEED_DEV_SDHCI));
+
+ /* eMMC */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->emmc), errp)) {
+ return;
+ }
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->emmc), 0,
+ sc->memmap[ASPEED_DEV_EMMC]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->emmc), 0,
+ aspeed_soc_get_irq(s, ASPEED_DEV_EMMC));
+
+ /* Timer */
+ object_property_set_link(OBJECT(&s->timerctrl), "scu", OBJECT(&s->scu),
+ &error_abort);
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->timerctrl), errp)) {
+ return;
+ }
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->timerctrl), 0,
+ sc->memmap[ASPEED_DEV_TIMER1]);
+ for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) {
+ irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq);
+ }
+
+ /* HACE */
+ object_property_set_link(OBJECT(&s->hace), "dram", OBJECT(s->dram_mr),
+ &error_abort);
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->hace), errp)) {
+ return;
+ }
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->hace), 0,
+ sc->memmap[ASPEED_DEV_HACE]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->hace), 0,
+ aspeed_soc_get_irq(s, ASPEED_DEV_HACE));
+
+ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->dpmcu),
+ "aspeed.dpmcu",
+ sc->memmap[ASPEED_DEV_DPMCU],
+ AST2700_SOC_DPMCU_SIZE);
+ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->ltpi),
+ "aspeed.ltpi",
+ sc->memmap[ASPEED_DEV_LTPI],
+ AST2700_SOC_LTPI_SIZE);
+ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem),
+ "aspeed.io",
+ sc->memmap[ASPEED_DEV_IOMEM],
+ AST2700_SOC_IO_SIZE);
+ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem0),
+ "aspeed.iomem0",
+ sc->memmap[ASPEED_DEV_IOMEM0],
+ AST2700_SOC_IOMEM_SIZE);
+ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem1),
+ "aspeed.iomem1",
+ sc->memmap[ASPEED_DEV_IOMEM1],
+ AST2700_SOC_IOMEM_SIZE);
}
-static void aspeed_soc_ast2700_class_init(ObjectClass *oc, void *data)
+static void aspeed_soc_ast2700a0_class_init(ObjectClass *oc, const void *data)
{
static const char * const valid_cpu_types[] = {
ARM_CPU_TYPE_NAME("cortex-a35"),
@@ -621,17 +971,45 @@ static void aspeed_soc_ast2700_class_init(ObjectClass *oc, void *data)
dc->user_creatable = false;
dc->realize = aspeed_soc_ast2700_realize;
- sc->name = "ast2700-a0";
sc->valid_cpu_types = valid_cpu_types;
sc->silicon_rev = AST2700_A0_SILICON_REV;
sc->sram_size = 0x20000;
sc->spis_num = 3;
+ sc->ehcis_num = 2;
sc->wdts_num = 8;
sc->macs_num = 1;
sc->uarts_num = 13;
sc->num_cpus = 4;
sc->uarts_base = ASPEED_DEV_UART0;
- sc->irqmap = aspeed_soc_ast2700_irqmap;
+ sc->irqmap = aspeed_soc_ast2700a0_irqmap;
+ sc->memmap = aspeed_soc_ast2700_memmap;
+ sc->get_irq = aspeed_soc_ast2700_get_irq;
+}
+
+static void aspeed_soc_ast2700a1_class_init(ObjectClass *oc, const void *data)
+{
+ static const char * const valid_cpu_types[] = {
+ ARM_CPU_TYPE_NAME("cortex-a35"),
+ NULL
+ };
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc);
+
+ /* Reason: The Aspeed SoC can only be instantiated from a board */
+ dc->user_creatable = false;
+ dc->realize = aspeed_soc_ast2700_realize;
+
+ sc->valid_cpu_types = valid_cpu_types;
+ sc->silicon_rev = AST2700_A1_SILICON_REV;
+ sc->sram_size = 0x20000;
+ sc->spis_num = 3;
+ sc->ehcis_num = 4;
+ sc->wdts_num = 8;
+ sc->macs_num = 3;
+ sc->uarts_num = 13;
+ sc->num_cpus = 4;
+ sc->uarts_base = ASPEED_DEV_UART0;
+ sc->irqmap = aspeed_soc_ast2700a1_irqmap;
sc->memmap = aspeed_soc_ast2700_memmap;
sc->get_irq = aspeed_soc_ast2700_get_irq;
}
@@ -646,7 +1024,13 @@ static const TypeInfo aspeed_soc_ast27x0_types[] = {
.name = "ast2700-a0",
.parent = TYPE_ASPEED27X0_SOC,
.instance_init = aspeed_soc_ast2700_init,
- .class_init = aspeed_soc_ast2700_class_init,
+ .class_init = aspeed_soc_ast2700a0_class_init,
+ },
+ {
+ .name = "ast2700-a1",
+ .parent = TYPE_ASPEED27X0_SOC,
+ .instance_init = aspeed_soc_ast2700_init,
+ .class_init = aspeed_soc_ast2700a1_class_init,
},
};
diff --git a/hw/arm/aspeed_soc_common.c b/hw/arm/aspeed_soc_common.c
index 1e8f255..1c4ac93 100644
--- a/hw/arm/aspeed_soc_common.c
+++ b/hw/arm/aspeed_soc_common.c
@@ -15,7 +15,7 @@
#include "hw/qdev-properties.h"
#include "hw/misc/unimp.h"
#include "hw/arm/aspeed_soc.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
const char *aspeed_soc_cpu_type(AspeedSoCClass *sc)
@@ -134,20 +134,26 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
}
}
-static Property aspeed_soc_properties[] = {
+static bool aspeed_soc_boot_from_emmc(AspeedSoCState *s)
+{
+ return false;
+}
+
+static const Property aspeed_soc_properties[] = {
DEFINE_PROP_LINK("dram", AspeedSoCState, dram_mr, TYPE_MEMORY_REGION,
MemoryRegion *),
DEFINE_PROP_LINK("memory", AspeedSoCState, memory, TYPE_MEMORY_REGION,
MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void aspeed_soc_class_init(ObjectClass *oc, void *data)
+static void aspeed_soc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
+ AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc);
dc->realize = aspeed_soc_realize;
device_class_set_props(dc, aspeed_soc_properties);
+ sc->boot_from_emmc = aspeed_soc_boot_from_emmc;
}
static const TypeInfo aspeed_soc_types[] = {
diff --git a/hw/arm/b-l475e-iot01a.c b/hw/arm/b-l475e-iot01a.c
index 5002a40..34ed2e0 100644
--- a/hw/arm/b-l475e-iot01a.c
+++ b/hw/arm/b-l475e-iot01a.c
@@ -82,7 +82,7 @@ static void bl475e_init(MachineState *machine)
sysbus_realize(SYS_BUS_DEVICE(&s->soc), &error_fatal);
sc = STM32L4X5_SOC_GET_CLASS(&s->soc);
- armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, 0,
+ armv7m_load_kernel(s->soc.armv7m.cpu, machine->kernel_filename, 0,
sc->flash_size);
if (object_class_by_name(TYPE_DM163)) {
@@ -110,7 +110,7 @@ static void bl475e_init(MachineState *machine)
}
}
-static void bl475e_machine_init(ObjectClass *oc, void *data)
+static void bl475e_machine_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
static const char *machine_valid_cpu_types[] = {
diff --git a/hw/arm/bananapi_m2u.c b/hw/arm/bananapi_m2u.c
index 0a4b6f2..b750a57 100644
--- a/hw/arm/bananapi_m2u.c
+++ b/hw/arm/bananapi_m2u.c
@@ -19,7 +19,7 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "hw/boards.h"
@@ -141,6 +141,7 @@ static void bpim2u_machine_init(MachineClass *mc)
mc->valid_cpu_types = valid_cpu_types;
mc->default_ram_size = 1 * GiB;
mc->default_ram_id = "bpim2u.ram";
+ mc->auto_create_sdcard = true;
}
DEFINE_MACHINE("bpim2u", bpim2u_machine_init)
diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c
index ac153a9..8a1e72d 100644
--- a/hw/arm/bcm2835_peripherals.c
+++ b/hw/arm/bcm2835_peripherals.c
@@ -15,7 +15,7 @@
#include "hw/arm/bcm2835_peripherals.h"
#include "hw/misc/bcm2835_mbox_defs.h"
#include "hw/arm/raspi_platform.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
/* Peripheral base address on the VC (GPU) system bus */
#define BCM2835_VC_PERI_BASE 0x7e000000
@@ -520,7 +520,7 @@ void bcm_soc_peripherals_common_realize(DeviceState *dev, Error **errp)
create_unimp(s, &s->sdramc, "bcm2835-sdramc", SDRAMC_OFFSET, 0x100);
}
-static void bcm2835_peripherals_class_init(ObjectClass *oc, void *data)
+static void bcm2835_peripherals_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
BCMSocPeripheralBaseClass *bc = BCM_SOC_PERIPHERALS_BASE_CLASS(oc);
diff --git a/hw/arm/bcm2836.c b/hw/arm/bcm2836.c
index 40a379b..cd61ba1 100644
--- a/hw/arm/bcm2836.c
+++ b/hw/arm/bcm2836.c
@@ -18,7 +18,7 @@
#include "target/arm/cpu-qom.h"
#include "target/arm/gtimer.h"
-static Property bcm2836_enabled_cores_property =
+static const Property bcm2836_enabled_cores_property =
DEFINE_PROP_UINT32("enabled-cpus", BCM283XBaseState, enabled_cpus, 0);
static void bcm283x_base_init(Object *obj)
@@ -163,7 +163,7 @@ static void bcm2836_realize(DeviceState *dev, Error **errp)
}
}
-static void bcm283x_base_class_init(ObjectClass *oc, void *data)
+static void bcm283x_base_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -171,7 +171,7 @@ static void bcm283x_base_class_init(ObjectClass *oc, void *data)
dc->user_creatable = false;
}
-static void bcm2835_class_init(ObjectClass *oc, void *data)
+static void bcm2835_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
BCM283XBaseClass *bc = BCM283X_BASE_CLASS(oc);
@@ -182,7 +182,7 @@ static void bcm2835_class_init(ObjectClass *oc, void *data)
dc->realize = bcm2835_realize;
};
-static void bcm2836_class_init(ObjectClass *oc, void *data)
+static void bcm2836_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
BCM283XBaseClass *bc = BCM283X_BASE_CLASS(oc);
@@ -196,7 +196,7 @@ static void bcm2836_class_init(ObjectClass *oc, void *data)
};
#ifdef TARGET_AARCH64
-static void bcm2837_class_init(ObjectClass *oc, void *data)
+static void bcm2837_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
BCM283XBaseClass *bc = BCM283X_BASE_CLASS(oc);
diff --git a/hw/arm/bcm2838.c b/hw/arm/bcm2838.c
index ddb7c5f..22aa754 100644
--- a/hw/arm/bcm2838.c
+++ b/hw/arm/bcm2838.c
@@ -233,7 +233,7 @@ static void bcm2838_realize(DeviceState *dev, Error **errp)
qdev_pass_gpios(DEVICE(&s->gic), DEVICE(&s->peripherals), NULL);
}
-static void bcm2838_class_init(ObjectClass *oc, void *data)
+static void bcm2838_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
BCM283XBaseClass *bc_base = BCM283X_BASE_CLASS(oc);
diff --git a/hw/arm/bcm2838_peripherals.c b/hw/arm/bcm2838_peripherals.c
index e28bef4..812b5b8 100644
--- a/hw/arm/bcm2838_peripherals.c
+++ b/hw/arm/bcm2838_peripherals.c
@@ -196,7 +196,7 @@ static void bcm2838_peripherals_realize(DeviceState *dev, Error **errp)
create_unimp(s_base, &s->asb, "bcm2838-asb", BRDG_OFFSET, 0x24);
}
-static void bcm2838_peripherals_class_init(ObjectClass *oc, void *data)
+static void bcm2838_peripherals_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
BCM2838PeripheralClass *bc = BCM2838_PERIPHERALS_CLASS(oc);
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index d480a7d..becd827 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -14,15 +14,18 @@
#include <libfdt.h>
#include "hw/arm/boot.h"
#include "hw/arm/linux-boot-if.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/numa.h"
+#include "cpu.h"
+#include "exec/target_page.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
+#include "system/system.h"
+#include "system/memory.h"
+#include "system/numa.h"
#include "hw/boards.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "hw/loader.h"
#include "elf.h"
-#include "sysemu/device_tree.h"
+#include "system/device_tree.h"
#include "qemu/config-file.h"
#include "qemu/option.h"
#include "qemu/units.h"
@@ -432,13 +435,12 @@ out:
return ret;
}
-static void fdt_add_psci_node(void *fdt)
+static void fdt_add_psci_node(void *fdt, ARMCPU *armcpu)
{
uint32_t cpu_suspend_fn;
uint32_t cpu_off_fn;
uint32_t cpu_on_fn;
uint32_t migrate_fn;
- ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(0));
const char *psci_method;
int64_t psci_conduit;
int rc;
@@ -512,7 +514,8 @@ static void fdt_add_psci_node(void *fdt)
}
int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
- hwaddr addr_limit, AddressSpace *as, MachineState *ms)
+ hwaddr addr_limit, AddressSpace *as, MachineState *ms,
+ ARMCPU *cpu)
{
void *fdt = NULL;
int size, rc, n = 0;
@@ -524,7 +527,7 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
if (binfo->dtb_filename) {
char *filename;
- filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, binfo->dtb_filename);
+ filename = qemu_find_file(QEMU_FILE_TYPE_DTB, binfo->dtb_filename);
if (!filename) {
fprintf(stderr, "Couldn't open dtb file %s\n", binfo->dtb_filename);
goto fail;
@@ -655,14 +658,12 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
}
}
- fdt_add_psci_node(fdt);
+ fdt_add_psci_node(fdt, cpu);
if (binfo->modify_dtb) {
binfo->modify_dtb(binfo, fdt);
}
- qemu_fdt_dumpdtb(fdt, size);
-
/* Put the DTB into the memory map as a ROM image: this will ensure
* the DTB is copied again upon reset, even if addr points into RAM.
*/
@@ -743,7 +744,7 @@ static void do_cpu_reset(void *opaque)
} else {
if (arm_feature(env, ARM_FEATURE_EL3) &&
(info->secure_boot ||
- (info->secure_board_setup && cs == first_cpu))) {
+ (info->secure_board_setup && cpu == info->primary_cpu))) {
/* Start this CPU in Secure SVC */
target_el = 3;
}
@@ -751,7 +752,7 @@ static void do_cpu_reset(void *opaque)
arm_emulate_firmware_reset(cs, target_el);
- if (cs == first_cpu) {
+ if (cpu == info->primary_cpu) {
AddressSpace *as = arm_boot_address_space(cpu, info);
cpu_set_pc(cs, info->loader_start);
@@ -798,24 +799,28 @@ static ssize_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry,
Elf64_Ehdr h64;
} elf_header;
int data_swab = 0;
- bool big_endian;
- ssize_t ret = -1;
+ int elf_data_order;
+ ssize_t ret;
Error *err = NULL;
load_elf_hdr(info->kernel_filename, &elf_header, &elf_is64, &err);
if (err) {
+ /*
+ * If the file is not an ELF file we silently return.
+ * The caller will fall back to try other formats.
+ */
error_free(err);
- return ret;
+ return -1;
}
if (elf_is64) {
- big_endian = elf_header.h64.e_ident[EI_DATA] == ELFDATA2MSB;
- info->endianness = big_endian ? ARM_ENDIANNESS_BE8
- : ARM_ENDIANNESS_LE;
+ elf_data_order = elf_header.h64.e_ident[EI_DATA];
+ info->endianness = elf_data_order == ELFDATA2MSB ? ARM_ENDIANNESS_BE8
+ : ARM_ENDIANNESS_LE;
} else {
- big_endian = elf_header.h32.e_ident[EI_DATA] == ELFDATA2MSB;
- if (big_endian) {
+ elf_data_order = elf_header.h32.e_ident[EI_DATA];
+ if (elf_data_order == ELFDATA2MSB) {
if (bswap32(elf_header.h32.e_flags) & EF_ARM_BE8) {
info->endianness = ARM_ENDIANNESS_BE8;
} else {
@@ -835,10 +840,12 @@ static ssize_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry,
}
ret = load_elf_as(info->kernel_filename, NULL, NULL, NULL,
- pentry, lowaddr, highaddr, NULL, big_endian, elf_machine,
- 1, data_swab, as);
+ pentry, lowaddr, highaddr, NULL, elf_data_order,
+ elf_machine, 1, data_swab, as);
if (ret <= 0) {
/* The header loaded but the image didn't */
+ error_report("Couldn't load elf '%s': %s",
+ info->kernel_filename, load_elf_strerror(ret));
exit(1);
}
@@ -851,7 +858,7 @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
hwaddr kernel_load_offset = KERNEL64_LOAD_ADDR;
uint64_t kernel_size = 0;
uint8_t *buffer;
- int size;
+ ssize_t size;
/* On aarch64, it's the bootloader's job to uncompress the kernel. */
size = load_image_gzipped_buffer(filename, LOAD_IMAGE_MAX_GUNZIP_BYTES,
@@ -1232,6 +1239,9 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info)
info->dtb_filename = ms->dtb;
info->dtb_limit = 0;
+ /* We assume the CPU passed as argument is the primary CPU. */
+ info->primary_cpu = cpu;
+
/* Load the kernel. */
if (!info->kernel_filename || info->firmware_loaded) {
arm_setup_firmware_boot(cpu, info);
@@ -1281,12 +1291,8 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info)
object_property_set_int(cpuobj, "psci-conduit", info->psci_conduit,
&error_abort);
- /*
- * Secondary CPUs start in PSCI powered-down state. Like the
- * code in do_cpu_reset(), we assume first_cpu is the primary
- * CPU.
- */
- if (cs != first_cpu) {
+ /* Secondary CPUs start in PSCI powered-down state. */
+ if (ARM_CPU(cs) != info->primary_cpu) {
object_property_set_bool(cpuobj, "start-powered-off", true,
&error_abort);
}
@@ -1321,7 +1327,8 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info)
* decided whether to enable PSCI and set the psci-conduit CPU properties.
*/
if (!info->skip_dtb_autoload && have_dtb(info)) {
- if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as, ms) < 0) {
+ if (arm_load_dtb(info->dtb_start, info, info->dtb_limit,
+ as, ms, cpu) < 0) {
exit(1);
}
}
diff --git a/hw/arm/collie.c b/hw/arm/collie.c
index eaa5c52..93bb190 100644
--- a/hw/arm/collie.c
+++ b/hw/arm/collie.c
@@ -16,7 +16,7 @@
#include "strongarm.h"
#include "hw/arm/boot.h"
#include "hw/block/flash.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qom/object.h"
#include "qemu/error-report.h"
@@ -69,7 +69,7 @@ static void collie_init(MachineState *machine)
arm_load_kernel(cms->sa1110->cpu, machine, &collie_binfo);
}
-static void collie_machine_class_init(ObjectClass *oc, void *data)
+static void collie_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c
index b976727..d665d4e 100644
--- a/hw/arm/cubieboard.c
+++ b/hw/arm/cubieboard.c
@@ -122,6 +122,7 @@ static void cubieboard_machine_init(MachineClass *mc)
mc->units_per_default_bus = 1;
mc->ignore_memory_transaction_failures = true;
mc->default_ram_id = "cubieboard.ram";
+ mc->auto_create_sdcard = true;
}
DEFINE_MACHINE("cubieboard", cubieboard_machine_init)
diff --git a/hw/arm/digic.c b/hw/arm/digic.c
index 6df5547..d831bc9 100644
--- a/hw/arm/digic.c
+++ b/hw/arm/digic.c
@@ -25,7 +25,7 @@
#include "qemu/module.h"
#include "hw/arm/digic.h"
#include "hw/qdev-properties.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#define DIGIC4_TIMER_BASE(n) (0xc0210000 + (n) * 0x100)
@@ -79,7 +79,7 @@ static void digic_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(sbd, 0, DIGIC_UART_BASE);
}
-static void digic_class_init(ObjectClass *oc, void *data)
+static void digic_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/arm/digic_boards.c b/hw/arm/digic_boards.c
index 4093af0..466b8b8 100644
--- a/hw/arm/digic_boards.c
+++ b/hw/arm/digic_boards.c
@@ -31,7 +31,7 @@
#include "hw/arm/digic.h"
#include "hw/block/flash.h"
#include "hw/loader.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#include "qemu/units.h"
#include "qemu/cutils.h"
@@ -80,7 +80,7 @@ static void digic4_board_init(MachineState *machine, DigicBoard *board)
static void digic_load_rom(DigicState *s, hwaddr addr,
hwaddr max_size, const char *filename)
{
- target_long rom_size;
+ ssize_t rom_size;
if (qtest_enabled()) {
/* qtest runs no code so don't attempt a ROM load which
diff --git a/hw/arm/exynos4210.c b/hw/arm/exynos4210.c
index e3f1de2..76001ff 100644
--- a/hw/arm/exynos4210.c
+++ b/hw/arm/exynos4210.c
@@ -27,8 +27,8 @@
#include "cpu.h"
#include "hw/cpu/a9mpcore.h"
#include "hw/irq.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/sysemu.h"
+#include "system/blockdev.h"
+#include "system/system.h"
#include "hw/sysbus.h"
#include "hw/arm/boot.h"
#include "hw/loader.h"
@@ -103,6 +103,8 @@
#define EXYNOS4210_PL330_BASE1_ADDR 0x12690000
#define EXYNOS4210_PL330_BASE2_ADDR 0x12850000
+#define GIC_EXT_IRQS 64 /* FIXME: verify for this SoC */
+
enum ExtGicId {
EXT_GIC_ID_MDMA_LCD0 = 66,
EXT_GIC_ID_PDMA0,
@@ -394,7 +396,8 @@ static void exynos4210_init_board_irqs(Exynos4210State *s)
}
if (irq_id) {
qdev_connect_gpio_out(splitter, splitin,
- qdev_get_gpio_in(extgicdev, irq_id - 32));
+ qdev_get_gpio_in(extgicdev,
+ irq_id - GIC_INTERNAL));
}
}
for (; n < EXYNOS4210_MAX_INT_COMBINER_IN_IRQ; n++) {
@@ -421,7 +424,8 @@ static void exynos4210_init_board_irqs(Exynos4210State *s)
s->irq_table[n] = qdev_get_gpio_in(splitter, 0);
qdev_connect_gpio_out(splitter, 0, qdev_get_gpio_in(intcdev, n));
qdev_connect_gpio_out(splitter, 1,
- qdev_get_gpio_in(extgicdev, irq_id - 32));
+ qdev_get_gpio_in(extgicdev,
+ irq_id - GIC_INTERNAL));
} else {
s->irq_table[n] = qdev_get_gpio_in(intcdev, n);
}
@@ -458,7 +462,6 @@ static uint64_t exynos4210_chipid_and_omr_read(void *opaque, hwaddr offset,
static void exynos4210_chipid_and_omr_write(void *opaque, hwaddr offset,
uint64_t value, unsigned size)
{
- return;
}
static const MemoryRegionOps exynos4210_chipid_and_omr_ops = {
@@ -586,6 +589,8 @@ static void exynos4210_realize(DeviceState *socdev, Error **errp)
/* Private memory region and Internal GIC */
qdev_prop_set_uint32(DEVICE(&s->a9mpcore), "num-cpu", EXYNOS4210_NCPUS);
+ qdev_prop_set_uint32(DEVICE(&s->a9mpcore), "num-irq",
+ GIC_EXT_IRQS + GIC_INTERNAL);
busdev = SYS_BUS_DEVICE(&s->a9mpcore);
sysbus_realize(busdev, &error_fatal);
sysbus_mmio_map(busdev, 0, EXYNOS4210_SMP_PRIVATE_BASE_ADDR);
@@ -837,7 +842,7 @@ static void exynos4210_init(Object *obj)
TYPE_EXYNOS4210_COMBINER);
}
-static void exynos4210_class_init(ObjectClass *klass, void *data)
+static void exynos4210_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/arm/exynos4_boards.c b/hw/arm/exynos4_boards.c
index 2410e2a..7304974 100644
--- a/hw/arm/exynos4_boards.c
+++ b/hw/arm/exynos4_boards.c
@@ -28,7 +28,7 @@
#include "hw/sysbus.h"
#include "net/net.h"
#include "hw/arm/boot.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/arm/exynos4210.h"
#include "hw/net/lan9118.h"
#include "hw/qdev-properties.h"
@@ -154,7 +154,7 @@ static const char * const valid_cpu_types[] = {
NULL
};
-static void nuri_class_init(ObjectClass *oc, void *data)
+static void nuri_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -165,6 +165,7 @@ static void nuri_class_init(ObjectClass *oc, void *data)
mc->min_cpus = EXYNOS4210_NCPUS;
mc->default_cpus = EXYNOS4210_NCPUS;
mc->ignore_memory_transaction_failures = true;
+ mc->auto_create_sdcard = true;
}
static const TypeInfo nuri_type = {
@@ -173,7 +174,7 @@ static const TypeInfo nuri_type = {
.class_init = nuri_class_init,
};
-static void smdkc210_class_init(ObjectClass *oc, void *data)
+static void smdkc210_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -184,6 +185,7 @@ static void smdkc210_class_init(ObjectClass *oc, void *data)
mc->min_cpus = EXYNOS4210_NCPUS;
mc->default_cpus = EXYNOS4210_NCPUS;
mc->ignore_memory_transaction_failures = true;
+ mc->auto_create_sdcard = true;
}
static const TypeInfo smdkc210_type = {
diff --git a/hw/arm/fby35.c b/hw/arm/fby35.c
index c9964bd..c14fc2e 100644
--- a/hw/arm/fby35.c
+++ b/hw/arm/fby35.c
@@ -8,8 +8,8 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qapi/error.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/block-backend.h"
+#include "system/system.h"
+#include "system/block-backend.h"
#include "hw/boards.h"
#include "hw/qdev-clock.h"
#include "hw/arm/aspeed_soc.h"
@@ -77,6 +77,7 @@ static void fby35_bmc_init(Fby35State *s)
memory_region_init(&s->bmc_memory, OBJECT(&s->bmc), "bmc-memory",
UINT64_MAX);
+ memory_region_add_subregion(get_system_memory(), 0, &s->bmc_memory);
memory_region_init_ram(&s->bmc_dram, OBJECT(&s->bmc), "bmc-dram",
FBY35_BMC_RAM_SIZE, &error_abort);
@@ -162,7 +163,7 @@ static void fby35_instance_init(Object *obj)
FBY35(obj)->mmio_exec = false;
}
-static void fby35_class_init(ObjectClass *oc, void *data)
+static void fby35_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -170,6 +171,7 @@ static void fby35_class_init(ObjectClass *oc, void *data)
mc->init = fby35_init;
mc->no_floppy = 1;
mc->no_cdrom = 1;
+ mc->auto_create_sdcard = true;
mc->min_cpus = mc->max_cpus = mc->default_cpus = 3;
object_class_property_add_bool(oc, "execute-in-place",
diff --git a/hw/arm/fsl-imx25.c b/hw/arm/fsl-imx25.c
index 5ed87ed..7aad635 100644
--- a/hw/arm/fsl-imx25.c
+++ b/hw/arm/fsl-imx25.c
@@ -25,7 +25,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/arm/fsl-imx25.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/qdev-properties.h"
#include "chardev/char.h"
#include "target/arm/cpu-qom.h"
@@ -243,8 +243,6 @@ static void fsl_imx25_realize(DeviceState *dev, Error **errp)
&error_abort);
object_property_set_uint(OBJECT(&s->esdhc[i]), "capareg",
IMX25_ESDHC_CAPABILITIES, &error_abort);
- object_property_set_uint(OBJECT(&s->esdhc[i]), "vendor",
- SDHCI_VENDOR_IMX, &error_abort);
if (!sysbus_realize(SYS_BUS_DEVICE(&s->esdhc[i]), errp)) {
return;
}
@@ -309,12 +307,11 @@ static void fsl_imx25_realize(DeviceState *dev, Error **errp)
&s->iram_alias);
}
-static Property fsl_imx25_properties[] = {
+static const Property fsl_imx25_properties[] = {
DEFINE_PROP_UINT32("fec-phy-num", FslIMX25State, phy_num, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void fsl_imx25_class_init(ObjectClass *oc, void *data)
+static void fsl_imx25_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/arm/fsl-imx31.c b/hw/arm/fsl-imx31.c
index 4b8d9b8..e9f70ad 100644
--- a/hw/arm/fsl-imx31.c
+++ b/hw/arm/fsl-imx31.c
@@ -22,8 +22,8 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/arm/fsl-imx31.h"
-#include "sysemu/sysemu.h"
-#include "exec/address-spaces.h"
+#include "system/system.h"
+#include "system/address-spaces.h"
#include "hw/qdev-properties.h"
#include "chardev/char.h"
#include "target/arm/cpu-qom.h"
@@ -218,7 +218,7 @@ static void fsl_imx31_realize(DeviceState *dev, Error **errp)
&s->iram_alias);
}
-static void fsl_imx31_class_init(ObjectClass *oc, void *data)
+static void fsl_imx31_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c
index 85748cb..f3a6002 100644
--- a/hw/arm/fsl-imx6.c
+++ b/hw/arm/fsl-imx6.c
@@ -26,7 +26,7 @@
#include "hw/usb/imx-usb-phy.h"
#include "hw/boards.h"
#include "hw/qdev-properties.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "chardev/char.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
@@ -106,6 +106,8 @@ static void fsl_imx6_init(Object *obj)
object_initialize_child(obj, "eth", &s->eth, TYPE_IMX_ENET);
object_initialize_child(obj, "pcie", &s->pcie, TYPE_DESIGNWARE_PCIE_HOST);
+ object_initialize_child(obj, "pcie4-msi-irq", &s->pcie4_msi_irq,
+ TYPE_OR_IRQ);
}
static void fsl_imx6_realize(DeviceState *dev, Error **errp)
@@ -115,6 +117,8 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
uint16_t i;
qemu_irq irq;
unsigned int smp_cpus = ms->smp.cpus;
+ DeviceState *mpcore = DEVICE(&s->a9mpcore);
+ DeviceState *gic;
if (smp_cpus > FSL_IMX6_NUM_CPUS) {
error_setg(errp, "%s: Only %d CPUs are supported (%d requested)",
@@ -141,21 +145,21 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
}
}
- object_property_set_int(OBJECT(&s->a9mpcore), "num-cpu", smp_cpus,
- &error_abort);
+ object_property_set_int(OBJECT(mpcore), "num-cpu", smp_cpus, &error_abort);
- object_property_set_int(OBJECT(&s->a9mpcore), "num-irq",
+ object_property_set_int(OBJECT(mpcore), "num-irq",
FSL_IMX6_MAX_IRQ + GIC_INTERNAL, &error_abort);
- if (!sysbus_realize(SYS_BUS_DEVICE(&s->a9mpcore), errp)) {
+ if (!sysbus_realize(SYS_BUS_DEVICE(mpcore), errp)) {
return;
}
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->a9mpcore), 0, FSL_IMX6_A9MPCORE_ADDR);
+ sysbus_mmio_map(SYS_BUS_DEVICE(mpcore), 0, FSL_IMX6_A9MPCORE_ADDR);
+ gic = mpcore;
for (i = 0; i < smp_cpus; i++) {
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->a9mpcore), i,
+ sysbus_connect_irq(SYS_BUS_DEVICE(gic), i,
qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_IRQ));
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->a9mpcore), i + smp_cpus,
+ sysbus_connect_irq(SYS_BUS_DEVICE(gic), i + smp_cpus,
qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_FIQ));
}
@@ -193,8 +197,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, serial_table[i].addr);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a9mpcore),
- serial_table[i].irq));
+ qdev_get_gpio_in(gic, serial_table[i].irq));
}
s->gpt.ccm = IMX_CCM(&s->ccm);
@@ -205,8 +208,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt), 0, FSL_IMX6_GPT_ADDR);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt), 0,
- qdev_get_gpio_in(DEVICE(&s->a9mpcore),
- FSL_IMX6_GPT_IRQ));
+ qdev_get_gpio_in(gic, FSL_IMX6_GPT_IRQ));
/* Initialize all EPIT timers */
for (i = 0; i < FSL_IMX6_NUM_EPITS; i++) {
@@ -226,8 +228,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->epit[i]), 0, epit_table[i].addr);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->epit[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a9mpcore),
- epit_table[i].irq));
+ qdev_get_gpio_in(gic, epit_table[i].irq));
}
/* Initialize all I2C */
@@ -247,8 +248,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, i2c_table[i].addr);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a9mpcore),
- i2c_table[i].irq));
+ qdev_get_gpio_in(gic, i2c_table[i].irq));
}
/* Initialize all GPIOs */
@@ -305,11 +305,9 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0, gpio_table[i].addr);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a9mpcore),
- gpio_table[i].irq_low));
+ qdev_get_gpio_in(gic, gpio_table[i].irq_low));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 1,
- qdev_get_gpio_in(DEVICE(&s->a9mpcore),
- gpio_table[i].irq_high));
+ qdev_get_gpio_in(gic, gpio_table[i].irq_high));
}
/* Initialize all SDHC */
@@ -329,15 +327,12 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
&error_abort);
object_property_set_uint(OBJECT(&s->esdhc[i]), "capareg",
IMX6_ESDHC_CAPABILITIES, &error_abort);
- object_property_set_uint(OBJECT(&s->esdhc[i]), "vendor",
- SDHCI_VENDOR_IMX, &error_abort);
if (!sysbus_realize(SYS_BUS_DEVICE(&s->esdhc[i]), errp)) {
return;
}
sysbus_mmio_map(SYS_BUS_DEVICE(&s->esdhc[i]), 0, esdhc_table[i].addr);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->esdhc[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a9mpcore),
- esdhc_table[i].irq));
+ qdev_get_gpio_in(gic, esdhc_table[i].irq));
}
/* USB */
@@ -358,8 +353,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0,
FSL_IMX6_USBOH3_USB_ADDR + i * 0x200);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a9mpcore),
- FSL_IMX6_USBn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX6_USBn_IRQ[i]));
}
/* Initialize all ECSPI */
@@ -382,8 +376,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, spi_table[i].addr);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a9mpcore),
- spi_table[i].irq));
+ qdev_get_gpio_in(gic, spi_table[i].irq));
}
object_property_set_uint(OBJECT(&s->eth), "phy-num", s->phy_num,
@@ -394,11 +387,9 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
}
sysbus_mmio_map(SYS_BUS_DEVICE(&s->eth), 0, FSL_IMX6_ENET_ADDR);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth), 0,
- qdev_get_gpio_in(DEVICE(&s->a9mpcore),
- FSL_IMX6_ENET_MAC_IRQ));
+ qdev_get_gpio_in(gic, FSL_IMX6_ENET_MAC_IRQ));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth), 1,
- qdev_get_gpio_in(DEVICE(&s->a9mpcore),
- FSL_IMX6_ENET_MAC_1588_IRQ));
+ qdev_get_gpio_in(gic, FSL_IMX6_ENET_MAC_1588_IRQ));
/*
* SNVS
@@ -425,8 +416,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, FSL_IMX6_WDOGn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->wdt[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a9mpcore),
- FSL_IMX6_WDOGn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX6_WDOGn_IRQ[i]));
}
/*
@@ -435,14 +425,23 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
sysbus_realize(SYS_BUS_DEVICE(&s->pcie), &error_abort);
sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcie), 0, FSL_IMX6_PCIe_REG_ADDR);
+ object_property_set_int(OBJECT(&s->pcie4_msi_irq), "num-lines", 2,
+ &error_abort);
+ qdev_realize(DEVICE(&s->pcie4_msi_irq), NULL, &error_abort);
+
+ irq = qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_PCIE4_MSI_IRQ);
+ qdev_connect_gpio_out(DEVICE(&s->pcie4_msi_irq), 0, irq);
+
irq = qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_PCIE1_IRQ);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 0, irq);
irq = qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_PCIE2_IRQ);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 1, irq);
irq = qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_PCIE3_IRQ);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 2, irq);
- irq = qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_PCIE4_IRQ);
+ irq = qdev_get_gpio_in(DEVICE(&s->pcie4_msi_irq), 0);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 3, irq);
+ irq = qdev_get_gpio_in(DEVICE(&s->pcie4_msi_irq), 1);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 4, irq);
/*
* PCIe PHY
@@ -481,12 +480,11 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
&s->ocram_alias);
}
-static Property fsl_imx6_properties[] = {
+static const Property fsl_imx6_properties[] = {
DEFINE_PROP_UINT32("fec-phy-num", FslIMX6State, phy_num, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void fsl_imx6_class_init(ObjectClass *oc, void *data)
+static void fsl_imx6_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c
index 19f4435..883c7fc 100644
--- a/hw/arm/fsl-imx6ul.c
+++ b/hw/arm/fsl-imx6ul.c
@@ -22,7 +22,7 @@
#include "hw/misc/unimp.h"
#include "hw/usb/imx-usb-phy.h"
#include "hw/boards.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "target/arm/cpu-qom.h"
@@ -157,10 +157,12 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
{
MachineState *ms = MACHINE(qdev_get_machine());
FslIMX6ULState *s = FSL_IMX6UL(dev);
+ DeviceState *mpcore = DEVICE(&s->a7mpcore);
int i;
char name[NAME_SIZE];
- SysBusDevice *sbd;
- DeviceState *d;
+ DeviceState *gic;
+ SysBusDevice *gicsbd;
+ DeviceState *cpu;
if (ms->smp.cpus > 1) {
error_setg(errp, "%s: Only a single CPU is supported (%d requested)",
@@ -173,19 +175,19 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
/*
* A7MPCORE
*/
- object_property_set_int(OBJECT(&s->a7mpcore), "num-cpu", 1, &error_abort);
- object_property_set_int(OBJECT(&s->a7mpcore), "num-irq",
+ object_property_set_int(OBJECT(mpcore), "num-cpu", 1, &error_abort);
+ object_property_set_int(OBJECT(mpcore), "num-irq",
FSL_IMX6UL_MAX_IRQ + GIC_INTERNAL, &error_abort);
- sysbus_realize(SYS_BUS_DEVICE(&s->a7mpcore), &error_abort);
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->a7mpcore), 0, FSL_IMX6UL_A7MPCORE_ADDR);
+ sysbus_realize(SYS_BUS_DEVICE(mpcore), &error_abort);
+ sysbus_mmio_map(SYS_BUS_DEVICE(mpcore), 0, FSL_IMX6UL_A7MPCORE_ADDR);
- sbd = SYS_BUS_DEVICE(&s->a7mpcore);
- d = DEVICE(&s->cpu);
-
- sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(d, ARM_CPU_IRQ));
- sysbus_connect_irq(sbd, 1, qdev_get_gpio_in(d, ARM_CPU_FIQ));
- sysbus_connect_irq(sbd, 2, qdev_get_gpio_in(d, ARM_CPU_VIRQ));
- sysbus_connect_irq(sbd, 3, qdev_get_gpio_in(d, ARM_CPU_VFIQ));
+ gic = mpcore;
+ gicsbd = SYS_BUS_DEVICE(gic);
+ cpu = DEVICE(&s->cpu);
+ sysbus_connect_irq(gicsbd, 0, qdev_get_gpio_in(cpu, ARM_CPU_IRQ));
+ sysbus_connect_irq(gicsbd, 1, qdev_get_gpio_in(cpu, ARM_CPU_FIQ));
+ sysbus_connect_irq(gicsbd, 2, qdev_get_gpio_in(cpu, ARM_CPU_VIRQ));
+ sysbus_connect_irq(gicsbd, 3, qdev_get_gpio_in(cpu, ARM_CPU_VFIQ));
/*
* A7MPCORE DAP
@@ -244,8 +246,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
FSL_IMX6UL_GPTn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX6UL_GPTn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX6UL_GPTn_IRQ[i]));
}
/*
@@ -269,8 +270,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
FSL_IMX6UL_EPITn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->epit[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX6UL_EPITn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX6UL_EPITn_IRQ[i]));
}
/*
@@ -307,12 +307,10 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
FSL_IMX6UL_GPIOn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX6UL_GPIOn_LOW_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX6UL_GPIOn_LOW_IRQ[i]));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 1,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX6UL_GPIOn_HIGH_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX6UL_GPIOn_HIGH_IRQ[i]));
}
/*
@@ -366,8 +364,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
FSL_IMX6UL_SPIn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX6UL_SPIn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX6UL_SPIn_IRQ[i]));
}
/*
@@ -392,8 +389,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, FSL_IMX6UL_I2Cn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX6UL_I2Cn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX6UL_I2Cn_IRQ[i]));
}
/*
@@ -430,8 +426,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
FSL_IMX6UL_UARTn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX6UL_UARTn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX6UL_UARTn_IRQ[i]));
}
/*
@@ -480,12 +475,10 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
FSL_IMX6UL_ENETn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX6UL_ENETn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX6UL_ENETn_IRQ[i]));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 1,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX6UL_ENETn_TIMER_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX6UL_ENETn_TIMER_IRQ[i]));
}
/*
@@ -521,8 +514,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0,
FSL_IMX6UL_USB02_USBn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX6UL_USBn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX6UL_USBn_IRQ[i]));
}
/*
@@ -539,16 +531,13 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
FSL_IMX6UL_USDHC2_IRQ,
};
- object_property_set_uint(OBJECT(&s->usdhc[i]), "vendor",
- SDHCI_VENDOR_IMX, &error_abort);
sysbus_realize(SYS_BUS_DEVICE(&s->usdhc[i]), &error_abort);
sysbus_mmio_map(SYS_BUS_DEVICE(&s->usdhc[i]), 0,
FSL_IMX6UL_USDHCn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->usdhc[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX6UL_USDHCn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX6UL_USDHCn_IRQ[i]));
}
/*
@@ -580,8 +569,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0,
FSL_IMX6UL_WDOGn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->wdt[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX6UL_WDOGn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX6UL_WDOGn_IRQ[i]));
}
/*
@@ -718,17 +706,16 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
FSL_IMX6UL_OCRAM_ALIAS_ADDR, &s->ocram_alias);
}
-static Property fsl_imx6ul_properties[] = {
+static const Property fsl_imx6ul_properties[] = {
DEFINE_PROP_UINT32("fec1-phy-num", FslIMX6ULState, phy_num[0], 0),
DEFINE_PROP_UINT32("fec2-phy-num", FslIMX6ULState, phy_num[1], 1),
DEFINE_PROP_BOOL("fec1-phy-connected", FslIMX6ULState, phy_connected[0],
true),
DEFINE_PROP_BOOL("fec2-phy-connected", FslIMX6ULState, phy_connected[1],
true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void fsl_imx6ul_class_init(ObjectClass *oc, void *data)
+static void fsl_imx6ul_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c
index 9f2ef34..02f7602 100644
--- a/hw/arm/fsl-imx7.c
+++ b/hw/arm/fsl-imx7.c
@@ -23,7 +23,7 @@
#include "hw/arm/fsl-imx7.h"
#include "hw/misc/unimp.h"
#include "hw/boards.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "target/arm/cpu-qom.h"
@@ -150,6 +150,8 @@ static void fsl_imx7_init(Object *obj)
* PCIE
*/
object_initialize_child(obj, "pcie", &s->pcie, TYPE_DESIGNWARE_PCIE_HOST);
+ object_initialize_child(obj, "pcie4-msi-irq", &s->pcie4_msi_irq,
+ TYPE_OR_IRQ);
/*
* USBs
@@ -164,7 +166,8 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
{
MachineState *ms = MACHINE(qdev_get_machine());
FslIMX7State *s = FSL_IMX7(dev);
- Object *o;
+ DeviceState *mpcore = DEVICE(&s->a7mpcore);
+ DeviceState *gic;
int i;
qemu_irq irq;
char name[NAME_SIZE];
@@ -180,7 +183,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
* CPUs
*/
for (i = 0; i < smp_cpus; i++) {
- o = OBJECT(&s->cpu[i]);
+ Object *o = OBJECT(&s->cpu[i]);
/* On uniprocessor, the CBAR is set to 0 */
if (smp_cpus > 1) {
@@ -203,16 +206,15 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
/*
* A7MPCORE
*/
- object_property_set_int(OBJECT(&s->a7mpcore), "num-cpu", smp_cpus,
- &error_abort);
- object_property_set_int(OBJECT(&s->a7mpcore), "num-irq",
+ object_property_set_int(OBJECT(mpcore), "num-cpu", smp_cpus, &error_abort);
+ object_property_set_int(OBJECT(mpcore), "num-irq",
FSL_IMX7_MAX_IRQ + GIC_INTERNAL, &error_abort);
+ sysbus_realize(SYS_BUS_DEVICE(mpcore), &error_abort);
+ sysbus_mmio_map(SYS_BUS_DEVICE(mpcore), 0, FSL_IMX7_A7MPCORE_ADDR);
- sysbus_realize(SYS_BUS_DEVICE(&s->a7mpcore), &error_abort);
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->a7mpcore), 0, FSL_IMX7_A7MPCORE_ADDR);
-
+ gic = mpcore;
for (i = 0; i < smp_cpus; i++) {
- SysBusDevice *sbd = SYS_BUS_DEVICE(&s->a7mpcore);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(gic);
DeviceState *d = DEVICE(qemu_get_cpu(i));
irq = qdev_get_gpio_in(d, ARM_CPU_IRQ);
@@ -253,8 +255,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
sysbus_realize(SYS_BUS_DEVICE(&s->gpt[i]), &error_abort);
sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt[i]), 0, FSL_IMX7_GPTn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX7_GPTn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX7_GPTn_IRQ[i]));
}
/*
@@ -296,12 +297,10 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
FSL_IMX7_GPIOn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX7_GPIOn_LOW_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX7_GPIOn_LOW_IRQ[i]));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 1,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX7_GPIOn_HIGH_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX7_GPIOn_HIGH_IRQ[i]));
}
/*
@@ -353,8 +352,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0,
FSL_IMX7_SPIn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX7_SPIn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX7_SPIn_IRQ[i]));
}
/*
@@ -379,8 +377,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, FSL_IMX7_I2Cn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX7_I2Cn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX7_I2Cn_IRQ[i]));
}
/*
@@ -414,7 +411,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, FSL_IMX7_UARTn_ADDR[i]);
- irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_UARTn_IRQ[i]);
+ irq = qdev_get_gpio_in(gic, FSL_IMX7_UARTn_IRQ[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, irq);
}
@@ -452,9 +449,9 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->eth[i]), 0, FSL_IMX7_ENETn_ADDR[i]);
- irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_ENET_IRQ(i, 0));
+ irq = qdev_get_gpio_in(gic, FSL_IMX7_ENET_IRQ(i, 0));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 0, irq);
- irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_ENET_IRQ(i, 3));
+ irq = qdev_get_gpio_in(gic, FSL_IMX7_ENET_IRQ(i, 3));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 1, irq);
}
@@ -474,14 +471,12 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
FSL_IMX7_USDHC3_IRQ,
};
- object_property_set_uint(OBJECT(&s->usdhc[i]), "vendor",
- SDHCI_VENDOR_IMX, &error_abort);
sysbus_realize(SYS_BUS_DEVICE(&s->usdhc[i]), &error_abort);
sysbus_mmio_map(SYS_BUS_DEVICE(&s->usdhc[i]), 0,
FSL_IMX7_USDHCn_ADDR[i]);
- irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_USDHCn_IRQ[i]);
+ irq = qdev_get_gpio_in(gic, FSL_IMX7_USDHCn_IRQ[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->usdhc[i]), 0, irq);
}
@@ -520,8 +515,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, FSL_IMX7_WDOGn_ADDR[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->wdt[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- FSL_IMX7_WDOGn_IRQ[i]));
+ qdev_get_gpio_in(gic, FSL_IMX7_WDOGn_IRQ[i]));
}
/*
@@ -597,14 +591,23 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
sysbus_realize(SYS_BUS_DEVICE(&s->pcie), &error_abort);
sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcie), 0, FSL_IMX7_PCIE_REG_ADDR);
- irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTA_IRQ);
+ object_property_set_int(OBJECT(&s->pcie4_msi_irq), "num-lines", 2,
+ &error_abort);
+ qdev_realize(DEVICE(&s->pcie4_msi_irq), NULL, &error_abort);
+
+ irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTD_MSI_IRQ);
+ qdev_connect_gpio_out(DEVICE(&s->pcie4_msi_irq), 0, irq);
+
+ irq = qdev_get_gpio_in(gic, FSL_IMX7_PCI_INTA_IRQ);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 0, irq);
- irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTB_IRQ);
+ irq = qdev_get_gpio_in(gic, FSL_IMX7_PCI_INTB_IRQ);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 1, irq);
- irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTC_IRQ);
+ irq = qdev_get_gpio_in(gic, FSL_IMX7_PCI_INTC_IRQ);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 2, irq);
- irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTD_IRQ);
+ irq = qdev_get_gpio_in(DEVICE(&s->pcie4_msi_irq), 0);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 3, irq);
+ irq = qdev_get_gpio_in(DEVICE(&s->pcie4_msi_irq), 1);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 4, irq);
/*
* USBs
@@ -632,7 +635,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0,
FSL_IMX7_USBn_ADDR[i]);
- irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_USBn_IRQ[i]);
+ irq = qdev_get_gpio_in(gic, FSL_IMX7_USBn_IRQ[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0, irq);
snprintf(name, NAME_SIZE, "usbmisc%d", i);
@@ -736,17 +739,16 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
&s->caam);
}
-static Property fsl_imx7_properties[] = {
+static const Property fsl_imx7_properties[] = {
DEFINE_PROP_UINT32("fec1-phy-num", FslIMX7State, phy_num[0], 0),
DEFINE_PROP_UINT32("fec2-phy-num", FslIMX7State, phy_num[1], 1),
DEFINE_PROP_BOOL("fec1-phy-connected", FslIMX7State, phy_connected[0],
true),
DEFINE_PROP_BOOL("fec2-phy-connected", FslIMX7State, phy_connected[1],
true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void fsl_imx7_class_init(ObjectClass *oc, void *data)
+static void fsl_imx7_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/arm/fsl-imx8mp.c b/hw/arm/fsl-imx8mp.c
new file mode 100644
index 0000000..23e662c
--- /dev/null
+++ b/hw/arm/fsl-imx8mp.c
@@ -0,0 +1,712 @@
+/*
+ * i.MX 8M Plus SoC Implementation
+ *
+ * Based on hw/arm/fsl-imx6.c
+ *
+ * Copyright (c) 2024, Bernhard Beschow <shentey@gmail.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "system/address-spaces.h"
+#include "hw/arm/bsa.h"
+#include "hw/arm/fsl-imx8mp.h"
+#include "hw/intc/arm_gicv3.h"
+#include "hw/misc/unimp.h"
+#include "hw/boards.h"
+#include "system/system.h"
+#include "target/arm/cpu-qom.h"
+#include "qapi/error.h"
+#include "qobject/qlist.h"
+
+static const struct {
+ hwaddr addr;
+ size_t size;
+ const char *name;
+} fsl_imx8mp_memmap[] = {
+ [FSL_IMX8MP_RAM] = { FSL_IMX8MP_RAM_START, FSL_IMX8MP_RAM_SIZE_MAX, "ram" },
+ [FSL_IMX8MP_DDR_PHY_BROADCAST] = { 0x3dc00000, 4 * MiB, "ddr_phy_broadcast" },
+ [FSL_IMX8MP_DDR_PERF_MON] = { 0x3d800000, 4 * MiB, "ddr_perf_mon" },
+ [FSL_IMX8MP_DDR_CTL] = { 0x3d400000, 4 * MiB, "ddr_ctl" },
+ [FSL_IMX8MP_DDR_BLK_CTRL] = { 0x3d000000, 1 * MiB, "ddr_blk_ctrl" },
+ [FSL_IMX8MP_DDR_PHY] = { 0x3c000000, 16 * MiB, "ddr_phy" },
+ [FSL_IMX8MP_AUDIO_DSP] = { 0x3b000000, 16 * MiB, "audio_dsp" },
+ [FSL_IMX8MP_GIC_DIST] = { 0x38800000, 512 * KiB, "gic_dist" },
+ [FSL_IMX8MP_GIC_REDIST] = { 0x38880000, 512 * KiB, "gic_redist" },
+ [FSL_IMX8MP_NPU] = { 0x38500000, 2 * MiB, "npu" },
+ [FSL_IMX8MP_VPU] = { 0x38340000, 2 * MiB, "vpu" },
+ [FSL_IMX8MP_VPU_BLK_CTRL] = { 0x38330000, 2 * MiB, "vpu_blk_ctrl" },
+ [FSL_IMX8MP_VPU_VC8000E_ENCODER] = { 0x38320000, 2 * MiB, "vpu_vc8000e_encoder" },
+ [FSL_IMX8MP_VPU_G2_DECODER] = { 0x38310000, 2 * MiB, "vpu_g2_decoder" },
+ [FSL_IMX8MP_VPU_G1_DECODER] = { 0x38300000, 2 * MiB, "vpu_g1_decoder" },
+ [FSL_IMX8MP_USB2_GLUE] = { 0x382f0000, 0x100, "usb2_glue" },
+ [FSL_IMX8MP_USB2_OTG] = { 0x3820cc00, 0x100, "usb2_otg" },
+ [FSL_IMX8MP_USB2_DEV] = { 0x3820c700, 0x500, "usb2_dev" },
+ [FSL_IMX8MP_USB2] = { 0x38200000, 0xc700, "usb2" },
+ [FSL_IMX8MP_USB1_GLUE] = { 0x381f0000, 0x100, "usb1_glue" },
+ [FSL_IMX8MP_USB1_OTG] = { 0x3810cc00, 0x100, "usb1_otg" },
+ [FSL_IMX8MP_USB1_DEV] = { 0x3810c700, 0x500, "usb1_dev" },
+ [FSL_IMX8MP_USB1] = { 0x38100000, 0xc700, "usb1" },
+ [FSL_IMX8MP_GPU2D] = { 0x38008000, 32 * KiB, "gpu2d" },
+ [FSL_IMX8MP_GPU3D] = { 0x38000000, 32 * KiB, "gpu3d" },
+ [FSL_IMX8MP_QSPI1_RX_BUFFER] = { 0x34000000, 32 * MiB, "qspi1_rx_buffer" },
+ [FSL_IMX8MP_PCIE1] = { 0x33800000, 4 * MiB, "pcie1" },
+ [FSL_IMX8MP_QSPI1_TX_BUFFER] = { 0x33008000, 32 * KiB, "qspi1_tx_buffer" },
+ [FSL_IMX8MP_APBH_DMA] = { 0x33000000, 32 * KiB, "apbh_dma" },
+
+ /* AIPS-5 Begin */
+ [FSL_IMX8MP_MU_3_B] = { 0x30e90000, 64 * KiB, "mu_3_b" },
+ [FSL_IMX8MP_MU_3_A] = { 0x30e80000, 64 * KiB, "mu_3_a" },
+ [FSL_IMX8MP_MU_2_B] = { 0x30e70000, 64 * KiB, "mu_2_b" },
+ [FSL_IMX8MP_MU_2_A] = { 0x30e60000, 64 * KiB, "mu_2_a" },
+ [FSL_IMX8MP_EDMA_CHANNELS] = { 0x30e40000, 128 * KiB, "edma_channels" },
+ [FSL_IMX8MP_EDMA_MANAGEMENT_PAGE] = { 0x30e30000, 64 * KiB, "edma_management_page" },
+ [FSL_IMX8MP_AUDIO_BLK_CTRL] = { 0x30e20000, 64 * KiB, "audio_blk_ctrl" },
+ [FSL_IMX8MP_SDMA2] = { 0x30e10000, 64 * KiB, "sdma2" },
+ [FSL_IMX8MP_SDMA3] = { 0x30e00000, 64 * KiB, "sdma3" },
+ [FSL_IMX8MP_AIPS5_CONFIGURATION] = { 0x30df0000, 64 * KiB, "aips5_configuration" },
+ [FSL_IMX8MP_SPBA2] = { 0x30cf0000, 64 * KiB, "spba2" },
+ [FSL_IMX8MP_AUDIO_XCVR_RX] = { 0x30cc0000, 64 * KiB, "audio_xcvr_rx" },
+ [FSL_IMX8MP_HDMI_TX_AUDLNK_MSTR] = { 0x30cb0000, 64 * KiB, "hdmi_tx_audlnk_mstr" },
+ [FSL_IMX8MP_PDM] = { 0x30ca0000, 64 * KiB, "pdm" },
+ [FSL_IMX8MP_ASRC] = { 0x30c90000, 64 * KiB, "asrc" },
+ [FSL_IMX8MP_SAI7] = { 0x30c80000, 64 * KiB, "sai7" },
+ [FSL_IMX8MP_SAI6] = { 0x30c60000, 64 * KiB, "sai6" },
+ [FSL_IMX8MP_SAI5] = { 0x30c50000, 64 * KiB, "sai5" },
+ [FSL_IMX8MP_SAI3] = { 0x30c30000, 64 * KiB, "sai3" },
+ [FSL_IMX8MP_SAI2] = { 0x30c20000, 64 * KiB, "sai2" },
+ [FSL_IMX8MP_SAI1] = { 0x30c10000, 64 * KiB, "sai1" },
+ /* AIPS-5 End */
+
+ /* AIPS-4 Begin */
+ [FSL_IMX8MP_HDMI_TX] = { 0x32fc0000, 128 * KiB, "hdmi_tx" },
+ [FSL_IMX8MP_TZASC] = { 0x32f80000, 64 * KiB, "tzasc" },
+ [FSL_IMX8MP_HSIO_BLK_CTL] = { 0x32f10000, 64 * KiB, "hsio_blk_ctl" },
+ [FSL_IMX8MP_PCIE_PHY1] = { 0x32f00000, 64 * KiB, "pcie_phy1" },
+ [FSL_IMX8MP_MEDIA_BLK_CTL] = { 0x32ec0000, 64 * KiB, "media_blk_ctl" },
+ [FSL_IMX8MP_LCDIF2] = { 0x32e90000, 64 * KiB, "lcdif2" },
+ [FSL_IMX8MP_LCDIF1] = { 0x32e80000, 64 * KiB, "lcdif1" },
+ [FSL_IMX8MP_MIPI_DSI1] = { 0x32e60000, 64 * KiB, "mipi_dsi1" },
+ [FSL_IMX8MP_MIPI_CSI2] = { 0x32e50000, 64 * KiB, "mipi_csi2" },
+ [FSL_IMX8MP_MIPI_CSI1] = { 0x32e40000, 64 * KiB, "mipi_csi1" },
+ [FSL_IMX8MP_IPS_DEWARP] = { 0x32e30000, 64 * KiB, "ips_dewarp" },
+ [FSL_IMX8MP_ISP2] = { 0x32e20000, 64 * KiB, "isp2" },
+ [FSL_IMX8MP_ISP1] = { 0x32e10000, 64 * KiB, "isp1" },
+ [FSL_IMX8MP_ISI] = { 0x32e00000, 64 * KiB, "isi" },
+ [FSL_IMX8MP_AIPS4_CONFIGURATION] = { 0x32df0000, 64 * KiB, "aips4_configuration" },
+ /* AIPS-4 End */
+
+ [FSL_IMX8MP_INTERCONNECT] = { 0x32700000, 1 * MiB, "interconnect" },
+
+ /* AIPS-3 Begin */
+ [FSL_IMX8MP_ENET2_TSN] = { 0x30bf0000, 64 * KiB, "enet2_tsn" },
+ [FSL_IMX8MP_ENET1] = { 0x30be0000, 64 * KiB, "enet1" },
+ [FSL_IMX8MP_SDMA1] = { 0x30bd0000, 64 * KiB, "sdma1" },
+ [FSL_IMX8MP_QSPI] = { 0x30bb0000, 64 * KiB, "qspi" },
+ [FSL_IMX8MP_USDHC3] = { 0x30b60000, 64 * KiB, "usdhc3" },
+ [FSL_IMX8MP_USDHC2] = { 0x30b50000, 64 * KiB, "usdhc2" },
+ [FSL_IMX8MP_USDHC1] = { 0x30b40000, 64 * KiB, "usdhc1" },
+ [FSL_IMX8MP_I2C6] = { 0x30ae0000, 64 * KiB, "i2c6" },
+ [FSL_IMX8MP_I2C5] = { 0x30ad0000, 64 * KiB, "i2c5" },
+ [FSL_IMX8MP_SEMAPHORE_HS] = { 0x30ac0000, 64 * KiB, "semaphore_hs" },
+ [FSL_IMX8MP_MU_1_B] = { 0x30ab0000, 64 * KiB, "mu_1_b" },
+ [FSL_IMX8MP_MU_1_A] = { 0x30aa0000, 64 * KiB, "mu_1_a" },
+ [FSL_IMX8MP_AUD_IRQ_STEER] = { 0x30a80000, 64 * KiB, "aud_irq_steer" },
+ [FSL_IMX8MP_UART4] = { 0x30a60000, 64 * KiB, "uart4" },
+ [FSL_IMX8MP_I2C4] = { 0x30a50000, 64 * KiB, "i2c4" },
+ [FSL_IMX8MP_I2C3] = { 0x30a40000, 64 * KiB, "i2c3" },
+ [FSL_IMX8MP_I2C2] = { 0x30a30000, 64 * KiB, "i2c2" },
+ [FSL_IMX8MP_I2C1] = { 0x30a20000, 64 * KiB, "i2c1" },
+ [FSL_IMX8MP_AIPS3_CONFIGURATION] = { 0x309f0000, 64 * KiB, "aips3_configuration" },
+ [FSL_IMX8MP_CAAM] = { 0x30900000, 256 * KiB, "caam" },
+ [FSL_IMX8MP_SPBA1] = { 0x308f0000, 64 * KiB, "spba1" },
+ [FSL_IMX8MP_FLEXCAN2] = { 0x308d0000, 64 * KiB, "flexcan2" },
+ [FSL_IMX8MP_FLEXCAN1] = { 0x308c0000, 64 * KiB, "flexcan1" },
+ [FSL_IMX8MP_UART2] = { 0x30890000, 64 * KiB, "uart2" },
+ [FSL_IMX8MP_UART3] = { 0x30880000, 64 * KiB, "uart3" },
+ [FSL_IMX8MP_UART1] = { 0x30860000, 64 * KiB, "uart1" },
+ [FSL_IMX8MP_ECSPI3] = { 0x30840000, 64 * KiB, "ecspi3" },
+ [FSL_IMX8MP_ECSPI2] = { 0x30830000, 64 * KiB, "ecspi2" },
+ [FSL_IMX8MP_ECSPI1] = { 0x30820000, 64 * KiB, "ecspi1" },
+ /* AIPS-3 End */
+
+ /* AIPS-2 Begin */
+ [FSL_IMX8MP_QOSC] = { 0x307f0000, 64 * KiB, "qosc" },
+ [FSL_IMX8MP_PERFMON2] = { 0x307d0000, 64 * KiB, "perfmon2" },
+ [FSL_IMX8MP_PERFMON1] = { 0x307c0000, 64 * KiB, "perfmon1" },
+ [FSL_IMX8MP_GPT4] = { 0x30700000, 64 * KiB, "gpt4" },
+ [FSL_IMX8MP_GPT5] = { 0x306f0000, 64 * KiB, "gpt5" },
+ [FSL_IMX8MP_GPT6] = { 0x306e0000, 64 * KiB, "gpt6" },
+ [FSL_IMX8MP_SYSCNT_CTRL] = { 0x306c0000, 64 * KiB, "syscnt_ctrl" },
+ [FSL_IMX8MP_SYSCNT_CMP] = { 0x306b0000, 64 * KiB, "syscnt_cmp" },
+ [FSL_IMX8MP_SYSCNT_RD] = { 0x306a0000, 64 * KiB, "syscnt_rd" },
+ [FSL_IMX8MP_PWM4] = { 0x30690000, 64 * KiB, "pwm4" },
+ [FSL_IMX8MP_PWM3] = { 0x30680000, 64 * KiB, "pwm3" },
+ [FSL_IMX8MP_PWM2] = { 0x30670000, 64 * KiB, "pwm2" },
+ [FSL_IMX8MP_PWM1] = { 0x30660000, 64 * KiB, "pwm1" },
+ [FSL_IMX8MP_AIPS2_CONFIGURATION] = { 0x305f0000, 64 * KiB, "aips2_configuration" },
+ /* AIPS-2 End */
+
+ /* AIPS-1 Begin */
+ [FSL_IMX8MP_CSU] = { 0x303e0000, 64 * KiB, "csu" },
+ [FSL_IMX8MP_RDC] = { 0x303d0000, 64 * KiB, "rdc" },
+ [FSL_IMX8MP_SEMAPHORE2] = { 0x303c0000, 64 * KiB, "semaphore2" },
+ [FSL_IMX8MP_SEMAPHORE1] = { 0x303b0000, 64 * KiB, "semaphore1" },
+ [FSL_IMX8MP_GPC] = { 0x303a0000, 64 * KiB, "gpc" },
+ [FSL_IMX8MP_SRC] = { 0x30390000, 64 * KiB, "src" },
+ [FSL_IMX8MP_CCM] = { 0x30380000, 64 * KiB, "ccm" },
+ [FSL_IMX8MP_SNVS_HP] = { 0x30370000, 64 * KiB, "snvs_hp" },
+ [FSL_IMX8MP_ANA_PLL] = { 0x30360000, 64 * KiB, "ana_pll" },
+ [FSL_IMX8MP_OCOTP_CTRL] = { 0x30350000, 64 * KiB, "ocotp_ctrl" },
+ [FSL_IMX8MP_IOMUXC_GPR] = { 0x30340000, 64 * KiB, "iomuxc_gpr" },
+ [FSL_IMX8MP_IOMUXC] = { 0x30330000, 64 * KiB, "iomuxc" },
+ [FSL_IMX8MP_GPT3] = { 0x302f0000, 64 * KiB, "gpt3" },
+ [FSL_IMX8MP_GPT2] = { 0x302e0000, 64 * KiB, "gpt2" },
+ [FSL_IMX8MP_GPT1] = { 0x302d0000, 64 * KiB, "gpt1" },
+ [FSL_IMX8MP_WDOG3] = { 0x302a0000, 64 * KiB, "wdog3" },
+ [FSL_IMX8MP_WDOG2] = { 0x30290000, 64 * KiB, "wdog2" },
+ [FSL_IMX8MP_WDOG1] = { 0x30280000, 64 * KiB, "wdog1" },
+ [FSL_IMX8MP_ANA_OSC] = { 0x30270000, 64 * KiB, "ana_osc" },
+ [FSL_IMX8MP_ANA_TSENSOR] = { 0x30260000, 64 * KiB, "ana_tsensor" },
+ [FSL_IMX8MP_GPIO5] = { 0x30240000, 64 * KiB, "gpio5" },
+ [FSL_IMX8MP_GPIO4] = { 0x30230000, 64 * KiB, "gpio4" },
+ [FSL_IMX8MP_GPIO3] = { 0x30220000, 64 * KiB, "gpio3" },
+ [FSL_IMX8MP_GPIO2] = { 0x30210000, 64 * KiB, "gpio2" },
+ [FSL_IMX8MP_GPIO1] = { 0x30200000, 64 * KiB, "gpio1" },
+ [FSL_IMX8MP_AIPS1_CONFIGURATION] = { 0x301f0000, 64 * KiB, "aips1_configuration" },
+ /* AIPS-1 End */
+
+ [FSL_IMX8MP_A53_DAP] = { 0x28000000, 16 * MiB, "a53_dap" },
+ [FSL_IMX8MP_PCIE1_MEM] = { 0x18000000, 128 * MiB, "pcie1_mem" },
+ [FSL_IMX8MP_QSPI_MEM] = { 0x08000000, 256 * MiB, "qspi_mem" },
+ [FSL_IMX8MP_OCRAM] = { 0x00900000, 576 * KiB, "ocram" },
+ [FSL_IMX8MP_TCM_DTCM] = { 0x00800000, 128 * KiB, "tcm_dtcm" },
+ [FSL_IMX8MP_TCM_ITCM] = { 0x007e0000, 128 * KiB, "tcm_itcm" },
+ [FSL_IMX8MP_OCRAM_S] = { 0x00180000, 36 * KiB, "ocram_s" },
+ [FSL_IMX8MP_CAAM_MEM] = { 0x00100000, 32 * KiB, "caam_mem" },
+ [FSL_IMX8MP_BOOT_ROM_PROTECTED] = { 0x0003f000, 4 * KiB, "boot_rom_protected" },
+ [FSL_IMX8MP_BOOT_ROM] = { 0x00000000, 252 * KiB, "boot_rom" },
+};
+
+static void fsl_imx8mp_init(Object *obj)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ FslImx8mpState *s = FSL_IMX8MP(obj);
+ int i;
+
+ for (i = 0; i < MIN(ms->smp.cpus, FSL_IMX8MP_NUM_CPUS); i++) {
+ g_autofree char *name = g_strdup_printf("cpu%d", i);
+ object_initialize_child(obj, name, &s->cpu[i],
+ ARM_CPU_TYPE_NAME("cortex-a53"));
+ }
+
+ object_initialize_child(obj, "gic", &s->gic, TYPE_ARM_GICV3);
+
+ object_initialize_child(obj, "ccm", &s->ccm, TYPE_IMX8MP_CCM);
+
+ object_initialize_child(obj, "analog", &s->analog, TYPE_IMX8MP_ANALOG);
+
+ object_initialize_child(obj, "snvs", &s->snvs, TYPE_IMX7_SNVS);
+
+ for (i = 0; i < FSL_IMX8MP_NUM_UARTS; i++) {
+ g_autofree char *name = g_strdup_printf("uart%d", i + 1);
+ object_initialize_child(obj, name, &s->uart[i], TYPE_IMX_SERIAL);
+ }
+
+ for (i = 0; i < FSL_IMX8MP_NUM_GPTS; i++) {
+ g_autofree char *name = g_strdup_printf("gpt%d", i + 1);
+ object_initialize_child(obj, name, &s->gpt[i], TYPE_IMX8MP_GPT);
+ }
+ object_initialize_child(obj, "gpt5-gpt6-irq", &s->gpt5_gpt6_irq,
+ TYPE_OR_IRQ);
+
+ for (i = 0; i < FSL_IMX8MP_NUM_I2CS; i++) {
+ g_autofree char *name = g_strdup_printf("i2c%d", i + 1);
+ object_initialize_child(obj, name, &s->i2c[i], TYPE_IMX_I2C);
+ }
+
+ for (i = 0; i < FSL_IMX8MP_NUM_GPIOS; i++) {
+ g_autofree char *name = g_strdup_printf("gpio%d", i + 1);
+ object_initialize_child(obj, name, &s->gpio[i], TYPE_IMX_GPIO);
+ }
+
+ for (i = 0; i < FSL_IMX8MP_NUM_USDHCS; i++) {
+ g_autofree char *name = g_strdup_printf("usdhc%d", i + 1);
+ object_initialize_child(obj, name, &s->usdhc[i], TYPE_IMX_USDHC);
+ }
+
+ for (i = 0; i < FSL_IMX8MP_NUM_USBS; i++) {
+ g_autofree char *name = g_strdup_printf("usb%d", i);
+ object_initialize_child(obj, name, &s->usb[i], TYPE_USB_DWC3);
+ }
+
+ for (i = 0; i < FSL_IMX8MP_NUM_ECSPIS; i++) {
+ g_autofree char *name = g_strdup_printf("spi%d", i + 1);
+ object_initialize_child(obj, name, &s->spi[i], TYPE_IMX_SPI);
+ }
+
+ for (i = 0; i < FSL_IMX8MP_NUM_WDTS; i++) {
+ g_autofree char *name = g_strdup_printf("wdt%d", i);
+ object_initialize_child(obj, name, &s->wdt[i], TYPE_IMX2_WDT);
+ }
+
+ object_initialize_child(obj, "eth0", &s->enet, TYPE_IMX_ENET);
+
+ object_initialize_child(obj, "pcie", &s->pcie, TYPE_DESIGNWARE_PCIE_HOST);
+ object_initialize_child(obj, "pcie_phy", &s->pcie_phy,
+ TYPE_FSL_IMX8M_PCIE_PHY);
+}
+
+static void fsl_imx8mp_realize(DeviceState *dev, Error **errp)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ FslImx8mpState *s = FSL_IMX8MP(dev);
+ DeviceState *gicdev = DEVICE(&s->gic);
+ int i;
+
+ if (ms->smp.cpus > FSL_IMX8MP_NUM_CPUS) {
+ error_setg(errp, "%s: Only %d CPUs are supported (%d requested)",
+ TYPE_FSL_IMX8MP, FSL_IMX8MP_NUM_CPUS, ms->smp.cpus);
+ return;
+ }
+
+ /* CPUs */
+ for (i = 0; i < ms->smp.cpus; i++) {
+ /* On uniprocessor, the CBAR is set to 0 */
+ if (ms->smp.cpus > 1) {
+ object_property_set_int(OBJECT(&s->cpu[i]), "reset-cbar",
+ fsl_imx8mp_memmap[FSL_IMX8MP_GIC_DIST].addr,
+ &error_abort);
+ }
+
+ /*
+ * CNTFID0 base frequency in Hz of system counter
+ */
+ object_property_set_int(OBJECT(&s->cpu[i]), "cntfrq", 8000000,
+ &error_abort);
+
+ if (i) {
+ /*
+ * Secondary CPUs start in powered-down state (and can be
+ * powered up via the SRC system reset controller)
+ */
+ object_property_set_bool(OBJECT(&s->cpu[i]), "start-powered-off",
+ true, &error_abort);
+ }
+
+ if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) {
+ return;
+ }
+ }
+
+ /* GIC */
+ {
+ SysBusDevice *gicsbd = SYS_BUS_DEVICE(&s->gic);
+ QList *redist_region_count;
+
+ qdev_prop_set_uint32(gicdev, "num-cpu", ms->smp.cpus);
+ qdev_prop_set_uint32(gicdev, "num-irq",
+ FSL_IMX8MP_NUM_IRQS + GIC_INTERNAL);
+ redist_region_count = qlist_new();
+ qlist_append_int(redist_region_count, ms->smp.cpus);
+ qdev_prop_set_array(gicdev, "redist-region-count", redist_region_count);
+ object_property_set_link(OBJECT(&s->gic), "sysmem",
+ OBJECT(get_system_memory()), &error_fatal);
+ if (!sysbus_realize(gicsbd, errp)) {
+ return;
+ }
+ sysbus_mmio_map(gicsbd, 0, fsl_imx8mp_memmap[FSL_IMX8MP_GIC_DIST].addr);
+ sysbus_mmio_map(gicsbd, 1, fsl_imx8mp_memmap[FSL_IMX8MP_GIC_REDIST].addr);
+
+ /*
+ * Wire the outputs from each CPU's generic timer and the GICv3
+ * maintenance interrupt signal to the appropriate GIC PPI inputs, and
+ * the GIC's IRQ/FIQ interrupt outputs to the CPU's inputs.
+ */
+ for (i = 0; i < ms->smp.cpus; i++) {
+ DeviceState *cpudev = DEVICE(&s->cpu[i]);
+ int intidbase = FSL_IMX8MP_NUM_IRQS + i * GIC_INTERNAL;
+ qemu_irq irq;
+
+ /*
+ * Mapping from the output timer irq lines from the CPU to the
+ * GIC PPI inputs.
+ */
+ static const int timer_irqs[] = {
+ [GTIMER_PHYS] = ARCH_TIMER_NS_EL1_IRQ,
+ [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ,
+ [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ,
+ [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ,
+ };
+
+ for (int j = 0; j < ARRAY_SIZE(timer_irqs); j++) {
+ irq = qdev_get_gpio_in(gicdev, intidbase + timer_irqs[j]);
+ qdev_connect_gpio_out(cpudev, j, irq);
+ }
+
+ irq = qdev_get_gpio_in(gicdev, intidbase + ARCH_GIC_MAINT_IRQ);
+ qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt",
+ 0, irq);
+
+ irq = qdev_get_gpio_in(gicdev, intidbase + VIRTUAL_PMU_IRQ);
+ qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0, irq);
+
+ sysbus_connect_irq(gicsbd, i,
+ qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
+ sysbus_connect_irq(gicsbd, i + ms->smp.cpus,
+ qdev_get_gpio_in(cpudev, ARM_CPU_FIQ));
+ }
+ }
+
+ /* CCM */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->ccm), errp)) {
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccm), 0,
+ fsl_imx8mp_memmap[FSL_IMX8MP_CCM].addr);
+
+ /* Analog */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->analog), errp)) {
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->analog), 0,
+ fsl_imx8mp_memmap[FSL_IMX8MP_ANA_PLL].addr);
+
+ /* UARTs */
+ for (i = 0; i < FSL_IMX8MP_NUM_UARTS; i++) {
+ struct {
+ hwaddr addr;
+ unsigned int irq;
+ } serial_table[FSL_IMX8MP_NUM_UARTS] = {
+ { fsl_imx8mp_memmap[FSL_IMX8MP_UART1].addr, FSL_IMX8MP_UART1_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_UART2].addr, FSL_IMX8MP_UART2_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_UART3].addr, FSL_IMX8MP_UART3_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_UART4].addr, FSL_IMX8MP_UART4_IRQ },
+ };
+
+ qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", serial_hd(i));
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->uart[i]), errp)) {
+ return;
+ }
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, serial_table[i].addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0,
+ qdev_get_gpio_in(gicdev, serial_table[i].irq));
+ }
+
+ /* GPTs */
+ object_property_set_int(OBJECT(&s->gpt5_gpt6_irq), "num-lines", 2,
+ &error_abort);
+ if (!qdev_realize(DEVICE(&s->gpt5_gpt6_irq), NULL, errp)) {
+ return;
+ }
+
+ qdev_connect_gpio_out(DEVICE(&s->gpt5_gpt6_irq), 0,
+ qdev_get_gpio_in(gicdev, FSL_IMX8MP_GPT5_GPT6_IRQ));
+
+ for (i = 0; i < FSL_IMX8MP_NUM_GPTS; i++) {
+ hwaddr gpt_addrs[FSL_IMX8MP_NUM_GPTS] = {
+ fsl_imx8mp_memmap[FSL_IMX8MP_GPT1].addr,
+ fsl_imx8mp_memmap[FSL_IMX8MP_GPT2].addr,
+ fsl_imx8mp_memmap[FSL_IMX8MP_GPT3].addr,
+ fsl_imx8mp_memmap[FSL_IMX8MP_GPT4].addr,
+ fsl_imx8mp_memmap[FSL_IMX8MP_GPT5].addr,
+ fsl_imx8mp_memmap[FSL_IMX8MP_GPT6].addr,
+ };
+
+ s->gpt[i].ccm = IMX_CCM(&s->ccm);
+
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpt[i]), errp)) {
+ return;
+ }
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt[i]), 0, gpt_addrs[i]);
+
+ if (i < FSL_IMX8MP_NUM_GPTS - 2) {
+ static const unsigned int gpt_irqs[FSL_IMX8MP_NUM_GPTS - 2] = {
+ FSL_IMX8MP_GPT1_IRQ,
+ FSL_IMX8MP_GPT2_IRQ,
+ FSL_IMX8MP_GPT3_IRQ,
+ FSL_IMX8MP_GPT4_IRQ,
+ };
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt[i]), 0,
+ qdev_get_gpio_in(gicdev, gpt_irqs[i]));
+ } else {
+ int irq = i - FSL_IMX8MP_NUM_GPTS + 2;
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt[i]), 0,
+ qdev_get_gpio_in(DEVICE(&s->gpt5_gpt6_irq), irq));
+ }
+ }
+
+ /* I2Cs */
+ for (i = 0; i < FSL_IMX8MP_NUM_I2CS; i++) {
+ struct {
+ hwaddr addr;
+ unsigned int irq;
+ } i2c_table[FSL_IMX8MP_NUM_I2CS] = {
+ { fsl_imx8mp_memmap[FSL_IMX8MP_I2C1].addr, FSL_IMX8MP_I2C1_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_I2C2].addr, FSL_IMX8MP_I2C2_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_I2C3].addr, FSL_IMX8MP_I2C3_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_I2C4].addr, FSL_IMX8MP_I2C4_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_I2C5].addr, FSL_IMX8MP_I2C5_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_I2C6].addr, FSL_IMX8MP_I2C6_IRQ },
+ };
+
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c[i]), errp)) {
+ return;
+ }
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, i2c_table[i].addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0,
+ qdev_get_gpio_in(gicdev, i2c_table[i].irq));
+ }
+
+ /* GPIOs */
+ for (i = 0; i < FSL_IMX8MP_NUM_GPIOS; i++) {
+ struct {
+ hwaddr addr;
+ unsigned int irq_low;
+ unsigned int irq_high;
+ } gpio_table[FSL_IMX8MP_NUM_GPIOS] = {
+ {
+ fsl_imx8mp_memmap[FSL_IMX8MP_GPIO1].addr,
+ FSL_IMX8MP_GPIO1_LOW_IRQ,
+ FSL_IMX8MP_GPIO1_HIGH_IRQ
+ },
+ {
+ fsl_imx8mp_memmap[FSL_IMX8MP_GPIO2].addr,
+ FSL_IMX8MP_GPIO2_LOW_IRQ,
+ FSL_IMX8MP_GPIO2_HIGH_IRQ
+ },
+ {
+ fsl_imx8mp_memmap[FSL_IMX8MP_GPIO3].addr,
+ FSL_IMX8MP_GPIO3_LOW_IRQ,
+ FSL_IMX8MP_GPIO3_HIGH_IRQ
+ },
+ {
+ fsl_imx8mp_memmap[FSL_IMX8MP_GPIO4].addr,
+ FSL_IMX8MP_GPIO4_LOW_IRQ,
+ FSL_IMX8MP_GPIO4_HIGH_IRQ
+ },
+ {
+ fsl_imx8mp_memmap[FSL_IMX8MP_GPIO5].addr,
+ FSL_IMX8MP_GPIO5_LOW_IRQ,
+ FSL_IMX8MP_GPIO5_HIGH_IRQ
+ },
+ };
+
+ object_property_set_bool(OBJECT(&s->gpio[i]), "has-edge-sel", true,
+ &error_abort);
+ object_property_set_bool(OBJECT(&s->gpio[i]), "has-upper-pin-irq",
+ true, &error_abort);
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio[i]), errp)) {
+ return;
+ }
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0, gpio_table[i].addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0,
+ qdev_get_gpio_in(gicdev, gpio_table[i].irq_low));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 1,
+ qdev_get_gpio_in(gicdev, gpio_table[i].irq_high));
+ }
+
+ /* USDHCs */
+ for (i = 0; i < FSL_IMX8MP_NUM_USDHCS; i++) {
+ struct {
+ hwaddr addr;
+ unsigned int irq;
+ } usdhc_table[FSL_IMX8MP_NUM_USDHCS] = {
+ { fsl_imx8mp_memmap[FSL_IMX8MP_USDHC1].addr, FSL_IMX8MP_USDHC1_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_USDHC2].addr, FSL_IMX8MP_USDHC2_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_USDHC3].addr, FSL_IMX8MP_USDHC3_IRQ },
+ };
+
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->usdhc[i]), errp)) {
+ return;
+ }
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->usdhc[i]), 0, usdhc_table[i].addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->usdhc[i]), 0,
+ qdev_get_gpio_in(gicdev, usdhc_table[i].irq));
+ }
+
+ /* USBs */
+ for (i = 0; i < FSL_IMX8MP_NUM_USBS; i++) {
+ struct {
+ hwaddr addr;
+ unsigned int irq;
+ } usb_table[FSL_IMX8MP_NUM_USBS] = {
+ { fsl_imx8mp_memmap[FSL_IMX8MP_USB1].addr, FSL_IMX8MP_USB1_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_USB2].addr, FSL_IMX8MP_USB2_IRQ },
+ };
+
+ qdev_prop_set_uint32(DEVICE(&s->usb[i].sysbus_xhci), "p2", 1);
+ qdev_prop_set_uint32(DEVICE(&s->usb[i].sysbus_xhci), "p3", 1);
+ qdev_prop_set_uint32(DEVICE(&s->usb[i].sysbus_xhci), "slots", 2);
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->usb[i]), errp)) {
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0, usb_table[i].addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i].sysbus_xhci), 0,
+ qdev_get_gpio_in(gicdev, usb_table[i].irq));
+ }
+
+ /* ECSPIs */
+ for (i = 0; i < FSL_IMX8MP_NUM_ECSPIS; i++) {
+ struct {
+ hwaddr addr;
+ unsigned int irq;
+ } spi_table[FSL_IMX8MP_NUM_ECSPIS] = {
+ { fsl_imx8mp_memmap[FSL_IMX8MP_ECSPI1].addr, FSL_IMX8MP_ECSPI1_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_ECSPI2].addr, FSL_IMX8MP_ECSPI2_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_ECSPI3].addr, FSL_IMX8MP_ECSPI3_IRQ },
+ };
+
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) {
+ return;
+ }
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, spi_table[i].addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0,
+ qdev_get_gpio_in(gicdev, spi_table[i].irq));
+ }
+
+ /* ENET1 */
+ object_property_set_uint(OBJECT(&s->enet), "phy-num", s->phy_num,
+ &error_abort);
+ object_property_set_uint(OBJECT(&s->enet), "tx-ring-num", 3, &error_abort);
+ qemu_configure_nic_device(DEVICE(&s->enet), true, NULL);
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->enet), errp)) {
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->enet), 0,
+ fsl_imx8mp_memmap[FSL_IMX8MP_ENET1].addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->enet), 0,
+ qdev_get_gpio_in(gicdev, FSL_IMX8MP_ENET1_MAC_IRQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->enet), 1,
+ qdev_get_gpio_in(gicdev, FSL_IMX6_ENET1_MAC_1588_IRQ));
+
+ /* SNVS */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->snvs), errp)) {
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0,
+ fsl_imx8mp_memmap[FSL_IMX8MP_SNVS_HP].addr);
+
+ /* Watchdogs */
+ for (i = 0; i < FSL_IMX8MP_NUM_WDTS; i++) {
+ struct {
+ hwaddr addr;
+ unsigned int irq;
+ } wdog_table[FSL_IMX8MP_NUM_WDTS] = {
+ { fsl_imx8mp_memmap[FSL_IMX8MP_WDOG1].addr, FSL_IMX8MP_WDOG1_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_WDOG2].addr, FSL_IMX8MP_WDOG2_IRQ },
+ { fsl_imx8mp_memmap[FSL_IMX8MP_WDOG3].addr, FSL_IMX8MP_WDOG3_IRQ },
+ };
+
+ object_property_set_bool(OBJECT(&s->wdt[i]), "pretimeout-support",
+ true, &error_abort);
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), errp)) {
+ return;
+ }
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, wdog_table[i].addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->wdt[i]), 0,
+ qdev_get_gpio_in(gicdev, wdog_table[i].irq));
+ }
+
+ /* PCIe */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->pcie), errp)) {
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcie), 0,
+ fsl_imx8mp_memmap[FSL_IMX8MP_PCIE1].addr);
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 0,
+ qdev_get_gpio_in(gicdev, FSL_IMX8MP_PCI_INTA_IRQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 1,
+ qdev_get_gpio_in(gicdev, FSL_IMX8MP_PCI_INTB_IRQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 2,
+ qdev_get_gpio_in(gicdev, FSL_IMX8MP_PCI_INTC_IRQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 3,
+ qdev_get_gpio_in(gicdev, FSL_IMX8MP_PCI_INTD_IRQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 4,
+ qdev_get_gpio_in(gicdev, FSL_IMX8MP_PCI_MSI_IRQ));
+
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->pcie_phy), errp)) {
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcie_phy), 0,
+ fsl_imx8mp_memmap[FSL_IMX8MP_PCIE_PHY1].addr);
+
+ /* On-Chip RAM */
+ if (!memory_region_init_ram(&s->ocram, NULL, "imx8mp.ocram",
+ fsl_imx8mp_memmap[FSL_IMX8MP_OCRAM].size,
+ errp)) {
+ return;
+ }
+ memory_region_add_subregion(get_system_memory(),
+ fsl_imx8mp_memmap[FSL_IMX8MP_OCRAM].addr,
+ &s->ocram);
+
+ /* Unimplemented devices */
+ for (i = 0; i < ARRAY_SIZE(fsl_imx8mp_memmap); i++) {
+ switch (i) {
+ case FSL_IMX8MP_ANA_PLL:
+ case FSL_IMX8MP_CCM:
+ case FSL_IMX8MP_GIC_DIST:
+ case FSL_IMX8MP_GIC_REDIST:
+ case FSL_IMX8MP_GPIO1 ... FSL_IMX8MP_GPIO5:
+ case FSL_IMX8MP_ECSPI1 ... FSL_IMX8MP_ECSPI3:
+ case FSL_IMX8MP_ENET1:
+ case FSL_IMX8MP_I2C1 ... FSL_IMX8MP_I2C6:
+ case FSL_IMX8MP_OCRAM:
+ case FSL_IMX8MP_PCIE1:
+ case FSL_IMX8MP_PCIE_PHY1:
+ case FSL_IMX8MP_RAM:
+ case FSL_IMX8MP_SNVS_HP:
+ case FSL_IMX8MP_UART1 ... FSL_IMX8MP_UART4:
+ case FSL_IMX8MP_USB1 ... FSL_IMX8MP_USB2:
+ case FSL_IMX8MP_USDHC1 ... FSL_IMX8MP_USDHC3:
+ case FSL_IMX8MP_WDOG1 ... FSL_IMX8MP_WDOG3:
+ /* device implemented and treated above */
+ break;
+
+ default:
+ create_unimplemented_device(fsl_imx8mp_memmap[i].name,
+ fsl_imx8mp_memmap[i].addr,
+ fsl_imx8mp_memmap[i].size);
+ break;
+ }
+ }
+}
+
+static const Property fsl_imx8mp_properties[] = {
+ DEFINE_PROP_UINT32("fec1-phy-num", FslImx8mpState, phy_num, 0),
+ DEFINE_PROP_BOOL("fec1-phy-connected", FslImx8mpState, phy_connected, true),
+};
+
+static void fsl_imx8mp_class_init(ObjectClass *oc, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ device_class_set_props(dc, fsl_imx8mp_properties);
+ dc->realize = fsl_imx8mp_realize;
+
+ dc->desc = "i.MX 8M Plus SoC";
+}
+
+static const TypeInfo fsl_imx8mp_types[] = {
+ {
+ .name = TYPE_FSL_IMX8MP,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(FslImx8mpState),
+ .instance_init = fsl_imx8mp_init,
+ .class_init = fsl_imx8mp_class_init,
+ },
+};
+
+DEFINE_TYPES(fsl_imx8mp_types)
diff --git a/hw/arm/gumstix.c b/hw/arm/gumstix.c
deleted file mode 100644
index 9146269..0000000
--- a/hw/arm/gumstix.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Gumstix Platforms
- *
- * Copyright (c) 2007 by Thorsten Zitterell <info@bitmux.org>
- *
- * Code based on spitz platform by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GNU GPL v2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-/*
- * Example usage:
- *
- * connex:
- * =======
- * create image:
- * # dd of=flash bs=1k count=16k if=/dev/zero
- * # dd of=flash bs=1k conv=notrunc if=u-boot.bin
- * # dd of=flash bs=1k conv=notrunc seek=256 if=rootfs.arm_nofpu.jffs2
- * start it:
- * # qemu-system-arm -M connex -pflash flash -monitor null -nographic
- *
- * verdex:
- * =======
- * create image:
- * # dd of=flash bs=1k count=32k if=/dev/zero
- * # dd of=flash bs=1k conv=notrunc if=u-boot.bin
- * # dd of=flash bs=1k conv=notrunc seek=256 if=rootfs.arm_nofpu.jffs2
- * # dd of=flash bs=1k conv=notrunc seek=31744 if=uImage
- * start it:
- * # qemu-system-arm -M verdex -pflash flash -monitor null -nographic -m 289
- */
-
-#include "qemu/osdep.h"
-#include "qemu/units.h"
-#include "qemu/error-report.h"
-#include "hw/arm/pxa.h"
-#include "net/net.h"
-#include "hw/block/flash.h"
-#include "hw/net/smc91c111.h"
-#include "hw/boards.h"
-#include "exec/address-spaces.h"
-#include "sysemu/qtest.h"
-
-#define CONNEX_FLASH_SIZE (16 * MiB)
-#define CONNEX_RAM_SIZE (64 * MiB)
-
-#define VERDEX_FLASH_SIZE (32 * MiB)
-#define VERDEX_RAM_SIZE (256 * MiB)
-
-#define FLASH_SECTOR_SIZE (128 * KiB)
-
-static void connex_init(MachineState *machine)
-{
- PXA2xxState *cpu;
- DriveInfo *dinfo;
-
- cpu = pxa255_init(CONNEX_RAM_SIZE);
-
- dinfo = drive_get(IF_PFLASH, 0, 0);
- if (!dinfo && !qtest_enabled()) {
- error_report("A flash image must be given with the "
- "'pflash' parameter");
- exit(1);
- }
-
- /* Numonyx RC28F128J3F75 */
- pflash_cfi01_register(0x00000000, "connext.rom", CONNEX_FLASH_SIZE,
- dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
- FLASH_SECTOR_SIZE, 2, 0, 0, 0, 0, 0);
-
- /* Interrupt line of NIC is connected to GPIO line 36 */
- smc91c111_init(0x04000300, qdev_get_gpio_in(cpu->gpio, 36));
-}
-
-static void verdex_init(MachineState *machine)
-{
- PXA2xxState *cpu;
- DriveInfo *dinfo;
-
- cpu = pxa270_init(VERDEX_RAM_SIZE, machine->cpu_type);
-
- dinfo = drive_get(IF_PFLASH, 0, 0);
- if (!dinfo && !qtest_enabled()) {
- error_report("A flash image must be given with the "
- "'pflash' parameter");
- exit(1);
- }
-
- /* Micron RC28F256P30TFA */
- pflash_cfi01_register(0x00000000, "verdex.rom", VERDEX_FLASH_SIZE,
- dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
- FLASH_SECTOR_SIZE, 2, 0, 0, 0, 0, 0);
-
- /* Interrupt line of NIC is connected to GPIO line 99 */
- smc91c111_init(0x04000300, qdev_get_gpio_in(cpu->gpio, 99));
-}
-
-static void connex_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
-
- mc->desc = "Gumstix Connex (PXA255)";
- mc->init = connex_init;
- mc->ignore_memory_transaction_failures = true;
- mc->deprecation_reason = "machine is old and unmaintained";
-}
-
-static const TypeInfo connex_type = {
- .name = MACHINE_TYPE_NAME("connex"),
- .parent = TYPE_MACHINE,
- .class_init = connex_class_init,
-};
-
-static void verdex_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
-
- mc->desc = "Gumstix Verdex Pro XL6P COMs (PXA270)";
- mc->init = verdex_init;
- mc->ignore_memory_transaction_failures = true;
- mc->deprecation_reason = "machine is old and unmaintained";
- mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c0");
-}
-
-static const TypeInfo verdex_type = {
- .name = MACHINE_TYPE_NAME("verdex"),
- .parent = TYPE_MACHINE,
- .class_init = verdex_class_init,
-};
-
-static void gumstix_machine_init(void)
-{
- type_register_static(&connex_type);
- type_register_static(&verdex_type);
-}
-
-type_init(gumstix_machine_init)
diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c
index c71b1a8..3ae26eb 100644
--- a/hw/arm/highbank.c
+++ b/hw/arm/highbank.c
@@ -25,8 +25,8 @@
#include "hw/arm/boot.h"
#include "hw/loader.h"
#include "net/net.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/runstate.h"
+#include "system/system.h"
#include "hw/boards.h"
#include "qemu/error-report.h"
#include "hw/char/pl011.h"
@@ -45,7 +45,7 @@
#define MVBAR_ADDR 0x200
#define BOARD_SETUP_ADDR (MVBAR_ADDR + 8 * sizeof(uint32_t))
-#define NIRQ_GIC 160
+#define GIC_EXT_IRQS 128 /* EnergyCore ECX-1000 & ECX-2000 */
/* Board init. */
@@ -139,13 +139,13 @@ static void highbank_regs_init(Object *obj)
sysbus_init_mmio(dev, &s->iomem);
}
-static void highbank_regs_class_init(ObjectClass *klass, void *data)
+static void highbank_regs_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "Calxeda Highbank registers";
dc->vmsd = &vmstate_highbank_regs;
- dc->reset = highbank_regs_reset;
+ device_class_set_legacy_reset(dc, highbank_regs_reset);
}
static const TypeInfo highbank_regs_info = {
@@ -180,7 +180,7 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
{
DeviceState *dev = NULL;
SysBusDevice *busdev;
- qemu_irq pic[128];
+ qemu_irq pic[GIC_EXT_IRQS];
int n;
unsigned int smp_cpus = machine->smp.cpus;
qemu_irq cpu_irq[4];
@@ -199,7 +199,7 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
machine->cpu_type = ARM_CPU_TYPE_NAME("cortex-a15");
break;
default:
- assert(0);
+ g_assert_not_reached();
}
for (n = 0; n < smp_cpus; n++) {
@@ -260,7 +260,7 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
break;
}
qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
- qdev_prop_set_uint32(dev, "num-irq", NIRQ_GIC);
+ qdev_prop_set_uint32(dev, "num-irq", GIC_EXT_IRQS + GIC_INTERNAL);
busdev = SYS_BUS_DEVICE(dev);
sysbus_realize_and_unref(busdev, &error_fatal);
sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE);
@@ -271,7 +271,7 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
sysbus_connect_irq(busdev, n + 3 * smp_cpus, cpu_vfiq[n]);
}
- for (n = 0; n < 128; n++) {
+ for (n = 0; n < GIC_EXT_IRQS; n++) {
pic[n] = qdev_get_gpio_in(dev, n);
}
@@ -341,7 +341,7 @@ static void midway_init(MachineState *machine)
calxeda_init(machine, CALXEDA_MIDWAY);
}
-static void highbank_class_init(ObjectClass *oc, void *data)
+static void highbank_class_init(ObjectClass *oc, const void *data)
{
static const char * const valid_cpu_types[] = {
ARM_CPU_TYPE_NAME("cortex-a9"),
@@ -365,7 +365,7 @@ static const TypeInfo highbank_type = {
.class_init = highbank_class_init,
};
-static void midway_class_init(ObjectClass *oc, void *data)
+static void midway_class_init(ObjectClass *oc, const void *data)
{
static const char * const valid_cpu_types[] = {
ARM_CPU_TYPE_NAME("cortex-a15"),
diff --git a/hw/arm/imx25_pdk.c b/hw/arm/imx25_pdk.c
index 7dfddd4..e95ea5e 100644
--- a/hw/arm/imx25_pdk.c
+++ b/hw/arm/imx25_pdk.c
@@ -30,7 +30,7 @@
#include "hw/arm/boot.h"
#include "hw/boards.h"
#include "qemu/error-report.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#include "hw/i2c/i2c.h"
#include "qemu/cutils.h"
@@ -147,6 +147,7 @@ static void imx25_pdk_machine_init(MachineClass *mc)
mc->init = imx25_pdk_init;
mc->ignore_memory_transaction_failures = true;
mc->default_ram_id = "imx25.ram";
+ mc->auto_create_sdcard = true;
}
DEFINE_MACHINE("imx25-pdk", imx25_pdk_machine_init)
diff --git a/hw/arm/imx8mp-evk.c b/hw/arm/imx8mp-evk.c
new file mode 100644
index 0000000..b3082fa
--- /dev/null
+++ b/hw/arm/imx8mp-evk.c
@@ -0,0 +1,103 @@
+/*
+ * NXP i.MX 8M Plus Evaluation Kit System Emulation
+ *
+ * Copyright (c) 2024, Bernhard Beschow <shentey@gmail.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "system/address-spaces.h"
+#include "hw/arm/boot.h"
+#include "hw/arm/fsl-imx8mp.h"
+#include "hw/boards.h"
+#include "hw/qdev-properties.h"
+#include "system/qtest.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include <libfdt.h>
+
+static void imx8mp_evk_modify_dtb(const struct arm_boot_info *info, void *fdt)
+{
+ int i, offset;
+
+ /* Temporarily disable following nodes until they are implemented */
+ const char *nodes_to_remove[] = {
+ "nxp,imx8mp-fspi",
+ };
+
+ for (i = 0; i < ARRAY_SIZE(nodes_to_remove); i++) {
+ const char *dev_str = nodes_to_remove[i];
+
+ offset = fdt_node_offset_by_compatible(fdt, -1, dev_str);
+ while (offset >= 0) {
+ fdt_nop_node(fdt, offset);
+ offset = fdt_node_offset_by_compatible(fdt, offset, dev_str);
+ }
+ }
+
+ /* Remove cpu-idle-states property from CPU nodes */
+ offset = fdt_node_offset_by_compatible(fdt, -1, "arm,cortex-a53");
+ while (offset >= 0) {
+ fdt_nop_property(fdt, offset, "cpu-idle-states");
+ offset = fdt_node_offset_by_compatible(fdt, offset, "arm,cortex-a53");
+ }
+}
+
+static void imx8mp_evk_init(MachineState *machine)
+{
+ static struct arm_boot_info boot_info;
+ FslImx8mpState *s;
+
+ if (machine->ram_size > FSL_IMX8MP_RAM_SIZE_MAX) {
+ error_report("RAM size " RAM_ADDR_FMT " above max supported (%08" PRIx64 ")",
+ machine->ram_size, FSL_IMX8MP_RAM_SIZE_MAX);
+ exit(1);
+ }
+
+ boot_info = (struct arm_boot_info) {
+ .loader_start = FSL_IMX8MP_RAM_START,
+ .board_id = -1,
+ .ram_size = machine->ram_size,
+ .psci_conduit = QEMU_PSCI_CONDUIT_SMC,
+ .modify_dtb = imx8mp_evk_modify_dtb,
+ };
+
+ s = FSL_IMX8MP(object_new(TYPE_FSL_IMX8MP));
+ object_property_add_child(OBJECT(machine), "soc", OBJECT(s));
+ object_property_set_uint(OBJECT(s), "fec1-phy-num", 1, &error_fatal);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(s), &error_fatal);
+
+ memory_region_add_subregion(get_system_memory(), FSL_IMX8MP_RAM_START,
+ machine->ram);
+
+ for (int i = 0; i < FSL_IMX8MP_NUM_USDHCS; i++) {
+ BusState *bus;
+ DeviceState *carddev;
+ BlockBackend *blk;
+ DriveInfo *di = drive_get(IF_SD, i, 0);
+
+ if (!di) {
+ continue;
+ }
+
+ blk = blk_by_legacy_dinfo(di);
+ bus = qdev_get_child_bus(DEVICE(&s->usdhc[i]), "sd-bus");
+ carddev = qdev_new(TYPE_SD_CARD);
+ qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal);
+ qdev_realize_and_unref(carddev, bus, &error_fatal);
+ }
+
+ if (!qtest_enabled()) {
+ arm_load_kernel(&s->cpu[0], machine, &boot_info);
+ }
+}
+
+static void imx8mp_evk_machine_init(MachineClass *mc)
+{
+ mc->desc = "NXP i.MX 8M Plus EVK Board";
+ mc->init = imx8mp_evk_init;
+ mc->max_cpus = FSL_IMX8MP_NUM_CPUS;
+ mc->default_ram_id = "imx8mp-evk.ram";
+}
+DEFINE_MACHINE("imx8mp-evk", imx8mp_evk_machine_init)
diff --git a/hw/arm/integratorcp.c b/hw/arm/integratorcp.c
index feb0dd6..b1d8fbd 100644
--- a/hw/arm/integratorcp.c
+++ b/hw/arm/integratorcp.c
@@ -16,9 +16,9 @@
#include "hw/misc/arm_integrator_debug.h"
#include "hw/net/smc91c111.h"
#include "net/net.h"
-#include "exec/address-spaces.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/address-spaces.h"
+#include "system/runstate.h"
+#include "system/system.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
#include "hw/char/pl011.h"
@@ -688,18 +688,18 @@ static void integratorcp_machine_init(MachineClass *mc)
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926");
mc->default_ram_id = "integrator.ram";
+ mc->auto_create_sdcard = true;
machine_add_audiodev_property(mc);
}
DEFINE_MACHINE("integratorcp", integratorcp_machine_init)
-static Property core_properties[] = {
+static const Property core_properties[] = {
DEFINE_PROP_UINT32("memsz", IntegratorCMState, memsz, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void core_class_init(ObjectClass *klass, void *data)
+static void core_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -708,14 +708,14 @@ static void core_class_init(ObjectClass *klass, void *data)
dc->vmsd = &vmstate_integratorcm;
}
-static void icp_pic_class_init(ObjectClass *klass, void *data)
+static void icp_pic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_icp_pic;
}
-static void icp_control_class_init(ObjectClass *klass, void *data)
+static void icp_control_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/arm/kzm.c b/hw/arm/kzm.c
index 2ccd6f8..362c145 100644
--- a/hw/arm/kzm.c
+++ b/hw/arm/kzm.c
@@ -19,12 +19,12 @@
#include "hw/arm/boot.h"
#include "hw/boards.h"
#include "qemu/error-report.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "net/net.h"
#include "hw/net/lan9118.h"
-#include "hw/char/serial.h"
-#include "sysemu/qtest.h"
-#include "sysemu/sysemu.h"
+#include "hw/char/serial-mm.h"
+#include "system/qtest.h"
+#include "system/system.h"
#include "qemu/cutils.h"
/* Memory map for Kzm Emulation Baseboard:
diff --git a/hw/arm/mainstone.c b/hw/arm/mainstone.c
deleted file mode 100644
index 3a6c22f..0000000
--- a/hw/arm/mainstone.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * PXA270-based Intel Mainstone platforms.
- *
- * Copyright (c) 2007 by Armin Kuster <akuster@kama-aina.net> or
- * <akuster@mvista.com>
- *
- * Code based on spitz platform by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GNU GPL v2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-#include "qemu/osdep.h"
-#include "qemu/units.h"
-#include "qemu/error-report.h"
-#include "qapi/error.h"
-#include "hw/arm/pxa.h"
-#include "hw/arm/boot.h"
-#include "net/net.h"
-#include "hw/net/smc91c111.h"
-#include "hw/boards.h"
-#include "hw/block/flash.h"
-#include "hw/sysbus.h"
-#include "exec/address-spaces.h"
-
-/* Device addresses */
-#define MST_FPGA_PHYS 0x08000000
-#define MST_ETH_PHYS 0x10000300
-#define MST_FLASH_0 0x00000000
-#define MST_FLASH_1 0x04000000
-
-/* IRQ definitions */
-#define MMC_IRQ 0
-#define USIM_IRQ 1
-#define USBC_IRQ 2
-#define ETHERNET_IRQ 3
-#define AC97_IRQ 4
-#define PEN_IRQ 5
-#define MSINS_IRQ 6
-#define EXBRD_IRQ 7
-#define S0_CD_IRQ 9
-#define S0_STSCHG_IRQ 10
-#define S0_IRQ 11
-#define S1_CD_IRQ 13
-#define S1_STSCHG_IRQ 14
-#define S1_IRQ 15
-
-static const struct keymap map[0xE0] = {
- [0 ... 0xDF] = { -1, -1 },
- [0x1e] = {0,0}, /* a */
- [0x30] = {0,1}, /* b */
- [0x2e] = {0,2}, /* c */
- [0x20] = {0,3}, /* d */
- [0x12] = {0,4}, /* e */
- [0x21] = {0,5}, /* f */
- [0x22] = {1,0}, /* g */
- [0x23] = {1,1}, /* h */
- [0x17] = {1,2}, /* i */
- [0x24] = {1,3}, /* j */
- [0x25] = {1,4}, /* k */
- [0x26] = {1,5}, /* l */
- [0x32] = {2,0}, /* m */
- [0x31] = {2,1}, /* n */
- [0x18] = {2,2}, /* o */
- [0x19] = {2,3}, /* p */
- [0x10] = {2,4}, /* q */
- [0x13] = {2,5}, /* r */
- [0x1f] = {3,0}, /* s */
- [0x14] = {3,1}, /* t */
- [0x16] = {3,2}, /* u */
- [0x2f] = {3,3}, /* v */
- [0x11] = {3,4}, /* w */
- [0x2d] = {3,5}, /* x */
- [0x34] = {4,0}, /* . */
- [0x15] = {4,2}, /* y */
- [0x2c] = {4,3}, /* z */
- [0x35] = {4,4}, /* / */
- [0xc7] = {5,0}, /* Home */
- [0x2a] = {5,1}, /* shift */
- /*
- * There are two matrix positions which map to space,
- * but QEMU can only use one of them for the reverse
- * mapping, so simply use the second one.
- */
- /* [0x39] = {5,2}, space */
- [0x39] = {5,3}, /* space */
- /*
- * Matrix position {5,4} and other keys are missing here.
- * TODO: Compare with Linux code and test real hardware.
- */
- [0x1c] = {5,4}, /* enter */
- [0x0e] = {5,5}, /* backspace */
- [0xc8] = {6,0}, /* up */
- [0xd0] = {6,1}, /* down */
- [0xcb] = {6,2}, /* left */
- [0xcd] = {6,3}, /* right */
-};
-
-enum mainstone_model_e { mainstone };
-
-#define MAINSTONE_RAM_SIZE (64 * MiB)
-#define MAINSTONE_ROM_SIZE (8 * MiB)
-#define MAINSTONE_FLASH_SIZE (32 * MiB)
-
-static struct arm_boot_info mainstone_binfo = {
- .loader_start = PXA2XX_SDRAM_BASE,
- .ram_size = MAINSTONE_RAM_SIZE,
-};
-
-#define FLASH_SECTOR_SIZE (256 * KiB)
-
-static void mainstone_common_init(MachineState *machine,
- enum mainstone_model_e model, int arm_id)
-{
- hwaddr mainstone_flash_base[] = { MST_FLASH_0, MST_FLASH_1 };
- PXA2xxState *mpu;
- DeviceState *mst_irq;
- DriveInfo *dinfo;
- int i;
- MemoryRegion *rom = g_new(MemoryRegion, 1);
-
- /* Setup CPU & memory */
- mpu = pxa270_init(mainstone_binfo.ram_size, machine->cpu_type);
- memory_region_init_rom(rom, NULL, "mainstone.rom", MAINSTONE_ROM_SIZE,
- &error_fatal);
- memory_region_add_subregion(get_system_memory(), 0x00000000, rom);
-
- /* There are two 32MiB flash devices on the board */
- for (i = 0; i < 2; i ++) {
- dinfo = drive_get(IF_PFLASH, 0, i);
- pflash_cfi01_register(mainstone_flash_base[i],
- i ? "mainstone.flash1" : "mainstone.flash0",
- MAINSTONE_FLASH_SIZE,
- dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
- FLASH_SECTOR_SIZE, 4, 0, 0, 0, 0, 0);
- }
-
- mst_irq = sysbus_create_simple("mainstone-fpga", MST_FPGA_PHYS,
- qdev_get_gpio_in(mpu->gpio, 0));
-
- /* setup keypad */
- pxa27x_register_keypad(mpu->kp, map, 0xe0);
-
- /* MMC/SD host */
- pxa2xx_mmci_handlers(mpu->mmc, NULL, qdev_get_gpio_in(mst_irq, MMC_IRQ));
-
- pxa2xx_pcmcia_set_irq_cb(mpu->pcmcia[0],
- qdev_get_gpio_in(mst_irq, S0_IRQ),
- qdev_get_gpio_in(mst_irq, S0_CD_IRQ));
- pxa2xx_pcmcia_set_irq_cb(mpu->pcmcia[1],
- qdev_get_gpio_in(mst_irq, S1_IRQ),
- qdev_get_gpio_in(mst_irq, S1_CD_IRQ));
-
- smc91c111_init(MST_ETH_PHYS, qdev_get_gpio_in(mst_irq, ETHERNET_IRQ));
-
- mainstone_binfo.board_id = arm_id;
- arm_load_kernel(mpu->cpu, machine, &mainstone_binfo);
-}
-
-static void mainstone_init(MachineState *machine)
-{
- mainstone_common_init(machine, mainstone, 0x196);
-}
-
-static void mainstone2_machine_init(MachineClass *mc)
-{
- mc->desc = "Mainstone II (PXA27x)";
- mc->init = mainstone_init;
- mc->ignore_memory_transaction_failures = true;
- mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c5");
- mc->deprecation_reason = "machine is old and unmaintained";
-}
-
-DEFINE_MACHINE("mainstone", mainstone2_machine_init)
diff --git a/hw/arm/mcimx6ul-evk.c b/hw/arm/mcimx6ul-evk.c
index 500427e..86982cb 100644
--- a/hw/arm/mcimx6ul-evk.c
+++ b/hw/arm/mcimx6ul-evk.c
@@ -17,7 +17,7 @@
#include "hw/boards.h"
#include "hw/qdev-properties.h"
#include "qemu/error-report.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
static void mcimx6ul_evk_init(MachineState *machine)
{
@@ -74,5 +74,6 @@ static void mcimx6ul_evk_machine_init(MachineClass *mc)
mc->init = mcimx6ul_evk_init;
mc->max_cpus = FSL_IMX6UL_NUM_CPUS;
mc->default_ram_id = "mcimx6ul-evk.ram";
+ mc->auto_create_sdcard = true;
}
DEFINE_MACHINE("mcimx6ul-evk", mcimx6ul_evk_machine_init)
diff --git a/hw/arm/mcimx7d-sabre.c b/hw/arm/mcimx7d-sabre.c
index 693a102..3311961 100644
--- a/hw/arm/mcimx7d-sabre.c
+++ b/hw/arm/mcimx7d-sabre.c
@@ -19,7 +19,7 @@
#include "hw/boards.h"
#include "hw/qdev-properties.h"
#include "qemu/error-report.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
static void mcimx7d_sabre_init(MachineState *machine)
{
@@ -74,5 +74,6 @@ static void mcimx7d_sabre_machine_init(MachineClass *mc)
mc->init = mcimx7d_sabre_init;
mc->max_cpus = FSL_IMX7_NUM_CPUS;
mc->default_ram_id = "mcimx7d-sabre.ram";
+ mc->auto_create_sdcard = true;
}
DEFINE_MACHINE("mcimx7d-sabre", mcimx7d_sabre_machine_init)
diff --git a/hw/arm/meson.build b/hw/arm/meson.build
index 0c07ab5..d90be8f 100644
--- a/hw/arm/meson.build
+++ b/hw/arm/meson.build
@@ -1,81 +1,85 @@
arm_ss = ss.source_set()
-arm_ss.add(files('boot.c'))
+arm_common_ss = ss.source_set()
arm_ss.add(when: 'CONFIG_ARM_VIRT', if_true: files('virt.c'))
arm_ss.add(when: 'CONFIG_ACPI', if_true: files('virt-acpi-build.c'))
-arm_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic_boards.c'))
-arm_ss.add(when: 'CONFIG_EMCRAFT_SF2', if_true: files('msf2-som.c'))
-arm_ss.add(when: 'CONFIG_HIGHBANK', if_true: files('highbank.c'))
-arm_ss.add(when: 'CONFIG_INTEGRATOR', if_true: files('integratorcp.c'))
-arm_ss.add(when: 'CONFIG_MAINSTONE', if_true: files('mainstone.c'))
-arm_ss.add(when: 'CONFIG_MICROBIT', if_true: files('microbit.c'))
-arm_ss.add(when: 'CONFIG_MPS3R', if_true: files('mps3r.c'))
-arm_ss.add(when: 'CONFIG_MUSICPAL', if_true: files('musicpal.c'))
-arm_ss.add(when: 'CONFIG_NETDUINOPLUS2', if_true: files('netduinoplus2.c'))
-arm_ss.add(when: 'CONFIG_OLIMEX_STM32_H405', if_true: files('olimex-stm32-h405.c'))
-arm_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx.c', 'npcm7xx_boards.c'))
-arm_ss.add(when: 'CONFIG_NSERIES', if_true: files('nseries.c'))
-arm_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview.c'))
+arm_common_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic_boards.c'))
+arm_common_ss.add(when: 'CONFIG_EMCRAFT_SF2', if_true: files('msf2-som.c'))
+arm_common_ss.add(when: 'CONFIG_HIGHBANK', if_true: files('highbank.c'))
+arm_common_ss.add(when: 'CONFIG_INTEGRATOR', if_true: files('integratorcp.c'))
+arm_common_ss.add(when: 'CONFIG_MICROBIT', if_true: files('microbit.c'))
+arm_common_ss.add(when: 'CONFIG_MPS3R', if_true: files('mps3r.c'))
+arm_common_ss.add(when: 'CONFIG_MUSICPAL', if_true: [files('musicpal.c')])
+arm_common_ss.add(when: 'CONFIG_NETDUINOPLUS2', if_true: files('netduinoplus2.c'))
+arm_common_ss.add(when: 'CONFIG_OLIMEX_STM32_H405', if_true: files('olimex-stm32-h405.c'))
+arm_common_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx.c', 'npcm7xx_boards.c'))
+arm_common_ss.add(when: 'CONFIG_NPCM8XX', if_true: files('npcm8xx.c', 'npcm8xx_boards.c'))
+arm_common_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview.c'))
arm_ss.add(when: 'CONFIG_SBSA_REF', if_true: files('sbsa-ref.c'))
-arm_ss.add(when: 'CONFIG_STELLARIS', if_true: files('stellaris.c'))
-arm_ss.add(when: 'CONFIG_STM32VLDISCOVERY', if_true: files('stm32vldiscovery.c'))
-arm_ss.add(when: 'CONFIG_ZYNQ', if_true: files('xilinx_zynq.c'))
-arm_ss.add(when: 'CONFIG_SABRELITE', if_true: files('sabrelite.c'))
+arm_common_ss.add(when: 'CONFIG_STELLARIS', if_true: files('stellaris.c'))
+arm_common_ss.add(when: 'CONFIG_STM32VLDISCOVERY', if_true: files('stm32vldiscovery.c'))
+arm_common_ss.add(when: 'CONFIG_ZYNQ', if_true: files('xilinx_zynq.c'))
+arm_common_ss.add(when: 'CONFIG_SABRELITE', if_true: files('sabrelite.c'))
-arm_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m.c'))
-arm_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210.c'))
-arm_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx.c', 'pxa2xx_gpio.c', 'pxa2xx_pic.c'))
-arm_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic.c'))
-arm_ss.add(when: 'CONFIG_OMAP', if_true: files('omap1.c'))
-arm_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('allwinner-a10.c', 'cubieboard.c'))
-arm_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3.c', 'orangepi.c'))
-arm_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40.c', 'bananapi_m2u.c'))
+arm_common_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m.c'))
+arm_common_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210.c'))
+arm_common_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic.c'))
+arm_common_ss.add(when: 'CONFIG_OMAP', if_true: files('omap1.c'))
+arm_common_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('allwinner-a10.c', 'cubieboard.c'))
+arm_common_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3.c', 'orangepi.c'))
+arm_common_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40.c', 'bananapi_m2u.c'))
arm_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2836.c', 'raspi.c'))
-arm_ss.add(when: ['CONFIG_RASPI', 'TARGET_AARCH64'], if_true: files('bcm2838.c', 'raspi4b.c'))
-arm_ss.add(when: 'CONFIG_STM32F100_SOC', if_true: files('stm32f100_soc.c'))
-arm_ss.add(when: 'CONFIG_STM32F205_SOC', if_true: files('stm32f205_soc.c'))
-arm_ss.add(when: 'CONFIG_STM32F405_SOC', if_true: files('stm32f405_soc.c'))
-arm_ss.add(when: 'CONFIG_B_L475E_IOT01A', if_true: files('b-l475e-iot01a.c'))
-arm_ss.add(when: 'CONFIG_STM32L4X5_SOC', if_true: files('stm32l4x5_soc.c'))
-arm_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp.c', 'xlnx-zcu102.c'))
-arm_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal.c', 'xlnx-versal-virt.c'))
-arm_ss.add(when: 'CONFIG_FSL_IMX25', if_true: files('fsl-imx25.c', 'imx25_pdk.c'))
-arm_ss.add(when: 'CONFIG_FSL_IMX31', if_true: files('fsl-imx31.c', 'kzm.c'))
-arm_ss.add(when: 'CONFIG_FSL_IMX6', if_true: files('fsl-imx6.c'))
+arm_common_ss.add(when: ['CONFIG_RASPI', 'TARGET_AARCH64'], if_true: files('bcm2838.c', 'raspi4b.c'))
+arm_common_ss.add(when: 'CONFIG_STM32F100_SOC', if_true: files('stm32f100_soc.c'))
+arm_common_ss.add(when: 'CONFIG_STM32F205_SOC', if_true: files('stm32f205_soc.c'))
+arm_common_ss.add(when: 'CONFIG_STM32F405_SOC', if_true: files('stm32f405_soc.c'))
+arm_common_ss.add(when: 'CONFIG_B_L475E_IOT01A', if_true: files('b-l475e-iot01a.c'))
+arm_common_ss.add(when: 'CONFIG_STM32L4X5_SOC', if_true: files('stm32l4x5_soc.c'))
+arm_common_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp.c', 'xlnx-zcu102.c'))
+arm_common_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal.c', 'xlnx-versal-virt.c'))
+arm_common_ss.add(when: 'CONFIG_FSL_IMX25', if_true: files('fsl-imx25.c', 'imx25_pdk.c'))
+arm_common_ss.add(when: 'CONFIG_FSL_IMX31', if_true: files('fsl-imx31.c', 'kzm.c'))
+arm_common_ss.add(when: 'CONFIG_FSL_IMX6', if_true: files('fsl-imx6.c'))
arm_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files(
'aspeed.c',
'aspeed_soc_common.c',
'aspeed_ast2400.c',
'aspeed_ast2600.c',
+ 'aspeed_ast27x0-ssp.c',
+ 'aspeed_ast27x0-tsp.c',
'aspeed_ast10x0.c',
'aspeed_eeprom.c',
'fby35.c'))
-arm_ss.add(when: ['CONFIG_ASPEED_SOC', 'TARGET_AARCH64'], if_true: files('aspeed_ast27x0.c'))
-arm_ss.add(when: 'CONFIG_MPS2', if_true: files('mps2.c'))
-arm_ss.add(when: 'CONFIG_MPS2', if_true: files('mps2-tz.c'))
-arm_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-soc.c'))
-arm_ss.add(when: 'CONFIG_MUSCA', if_true: files('musca.c'))
-arm_ss.add(when: 'CONFIG_ARMSSE', if_true: files('armsse.c'))
-arm_ss.add(when: 'CONFIG_FSL_IMX7', if_true: files('fsl-imx7.c', 'mcimx7d-sabre.c'))
-arm_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmuv3.c'))
-arm_ss.add(when: 'CONFIG_FSL_IMX6UL', if_true: files('fsl-imx6ul.c', 'mcimx6ul-evk.c'))
-arm_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_soc.c'))
-arm_ss.add(when: 'CONFIG_XEN', if_true: files('xen_arm.c'))
+arm_common_ss.add(when: ['CONFIG_ASPEED_SOC', 'TARGET_AARCH64'], if_true: files(
+ 'aspeed_ast27x0.c',
+ 'aspeed_ast27x0-fc.c',))
+arm_common_ss.add(when: 'CONFIG_MPS2', if_true: files('mps2.c'))
+arm_common_ss.add(when: 'CONFIG_MPS2', if_true: files('mps2-tz.c'))
+arm_common_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-soc.c'))
+arm_common_ss.add(when: 'CONFIG_MUSCA', if_true: files('musca.c'))
+arm_common_ss.add(when: 'CONFIG_ARMSSE', if_true: files('armsse.c'))
+arm_common_ss.add(when: 'CONFIG_FSL_IMX7', if_true: files('fsl-imx7.c', 'mcimx7d-sabre.c'))
+arm_common_ss.add(when: 'CONFIG_FSL_IMX8MP', if_true: files('fsl-imx8mp.c'))
+arm_common_ss.add(when: 'CONFIG_FSL_IMX8MP_EVK', if_true: files('imx8mp-evk.c'))
+arm_common_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmuv3.c'))
+arm_common_ss.add(when: 'CONFIG_FSL_IMX6UL', if_true: files('fsl-imx6ul.c', 'mcimx6ul-evk.c'))
+arm_common_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_soc.c'))
+arm_ss.add(when: 'CONFIG_XEN', if_true: files(
+ 'xen-stubs.c',
+ 'xen-pvh.c',
+))
-system_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmu-common.c'))
-system_ss.add(when: 'CONFIG_CHEETAH', if_true: files('palm.c'))
-system_ss.add(when: 'CONFIG_COLLIE', if_true: files('collie.c'))
-system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4_boards.c'))
-system_ss.add(when: 'CONFIG_GUMSTIX', if_true: files('gumstix.c'))
-system_ss.add(when: 'CONFIG_NETDUINO2', if_true: files('netduino2.c'))
-system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap2.c'))
-system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_peripherals.c'))
-system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2838_peripherals.c'))
-system_ss.add(when: 'CONFIG_SPITZ', if_true: files('spitz.c'))
-system_ss.add(when: 'CONFIG_STRONGARM', if_true: files('strongarm.c'))
-system_ss.add(when: 'CONFIG_SX1', if_true: files('omap_sx1.c'))
-system_ss.add(when: 'CONFIG_TOSA', if_true: files('tosa.c'))
-system_ss.add(when: 'CONFIG_VERSATILE', if_true: files('versatilepb.c'))
-system_ss.add(when: 'CONFIG_VEXPRESS', if_true: files('vexpress.c'))
-system_ss.add(when: 'CONFIG_Z2', if_true: files('z2.c'))
+arm_common_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmu-common.c'))
+arm_common_ss.add(when: 'CONFIG_COLLIE', if_true: files('collie.c'))
+arm_common_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4_boards.c'))
+arm_common_ss.add(when: 'CONFIG_NETDUINO2', if_true: files('netduino2.c'))
+arm_common_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_peripherals.c'))
+arm_common_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2838_peripherals.c'))
+arm_common_ss.add(when: 'CONFIG_STRONGARM', if_true: files('strongarm.c'))
+arm_common_ss.add(when: 'CONFIG_SX1', if_true: files('omap_sx1.c'))
+arm_common_ss.add(when: 'CONFIG_VERSATILE', if_true: files('versatilepb.c'))
+arm_common_ss.add(when: 'CONFIG_VEXPRESS', if_true: files('vexpress.c'))
+
+arm_common_ss.add(files('boot.c'))
hw_arch += {'arm': arm_ss}
+hw_common_arch += {'arm': arm_common_ss}
diff --git a/hw/arm/microbit.c b/hw/arm/microbit.c
index 50df362..525443f 100644
--- a/hw/arm/microbit.c
+++ b/hw/arm/microbit.c
@@ -12,8 +12,8 @@
#include "qapi/error.h"
#include "hw/boards.h"
#include "hw/arm/boot.h"
-#include "sysemu/sysemu.h"
-#include "exec/address-spaces.h"
+#include "system/system.h"
+#include "system/address-spaces.h"
#include "hw/arm/nrf51_soc.h"
#include "hw/i2c/microbit_i2c.h"
@@ -56,11 +56,11 @@ static void microbit_init(MachineState *machine)
memory_region_add_subregion_overlap(&s->nrf51.container, NRF51_TWI_BASE,
mr, -1);
- armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename,
+ armv7m_load_kernel(s->nrf51.armv7m.cpu, machine->kernel_filename,
0, s->nrf51.flash_size);
}
-static void microbit_machine_class_init(ObjectClass *oc, void *data)
+static void microbit_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
index a2d18af..5dd87cc 100644
--- a/hw/arm/mps2-tz.c
+++ b/hw/arm/mps2-tz.c
@@ -48,15 +48,15 @@
#include "qemu/units.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qlist.h"
#include "qemu/error-report.h"
#include "hw/arm/boot.h"
#include "hw/arm/armv7m.h"
#include "hw/or-irq.h"
#include "hw/boards.h"
-#include "exec/address-spaces.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/reset.h"
+#include "system/address-spaces.h"
+#include "system/system.h"
+#include "system/reset.h"
#include "hw/misc/unimp.h"
#include "hw/char/cmsdk-apb-uart.h"
#include "hw/timer/cmsdk-apb-timer.h"
@@ -435,7 +435,7 @@ static MemoryRegion *make_uart(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
const int *irqs, const PPCExtraData *extradata)
{
- /* The irq[] array is tx, rx, combined, in that order */
+ /* The irq[] array is rx, tx, combined, in that order */
MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms);
CMSDKAPBUART *uart = opaque;
int i = uart - &mms->uart[0];
@@ -447,8 +447,8 @@ static MemoryRegion *make_uart(MPS2TZMachineState *mms, void *opaque,
qdev_prop_set_uint32(DEVICE(uart), "pclk-frq", mmc->apb_periph_frq);
sysbus_realize(SYS_BUS_DEVICE(uart), &error_fatal);
s = SYS_BUS_DEVICE(uart);
- sysbus_connect_irq(s, 0, get_sse_irq_in(mms, irqs[0]));
- sysbus_connect_irq(s, 1, get_sse_irq_in(mms, irqs[1]));
+ sysbus_connect_irq(s, 0, get_sse_irq_in(mms, irqs[1]));
+ sysbus_connect_irq(s, 1, get_sse_irq_in(mms, irqs[0]));
sysbus_connect_irq(s, 2, qdev_get_gpio_in(orgate_dev, i * 2));
sysbus_connect_irq(s, 3, qdev_get_gpio_in(orgate_dev, i * 2 + 1));
sysbus_connect_irq(s, 4, get_sse_irq_in(mms, irqs[2]));
@@ -1211,7 +1211,7 @@ static void mps2tz_common_init(MachineState *machine)
mms->remap_irq);
}
- armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename,
+ armv7m_load_kernel(mms->iotkit.armv7m[0].cpu, machine->kernel_filename,
0, boot_ram_size(mms));
}
@@ -1254,7 +1254,7 @@ static void mps2_set_remap(Object *obj, const char *value, Error **errp)
}
}
-static void mps2_machine_reset(MachineState *machine, ShutdownCause reason)
+static void mps2_machine_reset(MachineState *machine, ResetType type)
{
MPS2TZMachineState *mms = MPS2TZ_MACHINE(machine);
@@ -1264,10 +1264,10 @@ static void mps2_machine_reset(MachineState *machine, ShutdownCause reason)
* reset see the correct mapping.
*/
remap_memory(mms, mms->remap);
- qemu_devices_reset(reason);
+ qemu_devices_reset(type);
}
-static void mps2tz_class_init(ObjectClass *oc, void *data)
+static void mps2tz_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
IDAUInterfaceClass *iic = IDAU_INTERFACE_CLASS(oc);
@@ -1304,7 +1304,7 @@ static void mps2tz_set_default_ram_info(MPS2TZMachineClass *mmc)
g_assert_not_reached();
}
-static void mps2tz_an505_class_init(ObjectClass *oc, void *data)
+static void mps2tz_an505_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc);
@@ -1338,7 +1338,7 @@ static void mps2tz_an505_class_init(ObjectClass *oc, void *data)
mps2tz_set_default_ram_info(mmc);
}
-static void mps2tz_an521_class_init(ObjectClass *oc, void *data)
+static void mps2tz_an521_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc);
@@ -1372,7 +1372,7 @@ static void mps2tz_an521_class_init(ObjectClass *oc, void *data)
mps2tz_set_default_ram_info(mmc);
}
-static void mps3tz_an524_class_init(ObjectClass *oc, void *data)
+static void mps3tz_an524_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc);
@@ -1411,7 +1411,7 @@ static void mps3tz_an524_class_init(ObjectClass *oc, void *data)
"are BRAM (default) and QSPI.");
}
-static void mps3tz_an547_class_init(ObjectClass *oc, void *data)
+static void mps3tz_an547_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc);
@@ -1453,7 +1453,7 @@ static const TypeInfo mps2tz_info = {
.instance_size = sizeof(MPS2TZMachineState),
.class_size = sizeof(MPS2TZMachineClass),
.class_init = mps2tz_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_IDAU_INTERFACE },
{ }
},
diff --git a/hw/arm/mps2.c b/hw/arm/mps2.c
index 50919ee..bd378e3 100644
--- a/hw/arm/mps2.c
+++ b/hw/arm/mps2.c
@@ -33,8 +33,8 @@
#include "hw/arm/armv7m.h"
#include "hw/or-irq.h"
#include "hw/boards.h"
-#include "exec/address-spaces.h"
-#include "sysemu/sysemu.h"
+#include "system/address-spaces.h"
+#include "system/system.h"
#include "hw/qdev-properties.h"
#include "hw/misc/unimp.h"
#include "hw/char/cmsdk-apb-uart.h"
@@ -48,7 +48,7 @@
#include "net/net.h"
#include "hw/watchdog/cmsdk-apb-watchdog.h"
#include "hw/qdev-clock.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qlist.h"
#include "qom/object.h"
typedef enum MPS2FPGAType {
@@ -224,7 +224,11 @@ static void mps2_common_init(MachineState *machine)
switch (mmc->fpga_type) {
case FPGA_AN385:
case FPGA_AN386:
+ qdev_prop_set_uint32(armv7m, "num-irq", 32);
+ break;
case FPGA_AN500:
+ /* The AN500 configures its Cortex-M7 with 16 MPU regions */
+ qdev_prop_set_uint32(armv7m, "mpu-ns-regions", 16);
qdev_prop_set_uint32(armv7m, "num-irq", 32);
break;
case FPGA_AN511:
@@ -460,11 +464,11 @@ static void mps2_common_init(MachineState *machine)
qdev_get_gpio_in(armv7m,
mmc->fpga_type == FPGA_AN511 ? 47 : 13));
- armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename,
+ armv7m_load_kernel(mms->armv7m.cpu, machine->kernel_filename,
0, 0x400000);
}
-static void mps2_class_init(ObjectClass *oc, void *data)
+static void mps2_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -474,7 +478,7 @@ static void mps2_class_init(ObjectClass *oc, void *data)
mc->default_ram_id = "mps.ram";
}
-static void mps2_an385_class_init(ObjectClass *oc, void *data)
+static void mps2_an385_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc);
@@ -493,7 +497,7 @@ static void mps2_an385_class_init(ObjectClass *oc, void *data)
mmc->has_block_ram = true;
}
-static void mps2_an386_class_init(ObjectClass *oc, void *data)
+static void mps2_an386_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc);
@@ -512,7 +516,7 @@ static void mps2_an386_class_init(ObjectClass *oc, void *data)
mmc->has_block_ram = true;
}
-static void mps2_an500_class_init(ObjectClass *oc, void *data)
+static void mps2_an500_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc);
@@ -531,7 +535,7 @@ static void mps2_an500_class_init(ObjectClass *oc, void *data)
mmc->has_block_ram = false;
}
-static void mps2_an511_class_init(ObjectClass *oc, void *data)
+static void mps2_an511_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc);
diff --git a/hw/arm/mps3r.c b/hw/arm/mps3r.c
index 4d55a65..48c73ac 100644
--- a/hw/arm/mps3r.c
+++ b/hw/arm/mps3r.c
@@ -27,10 +27,10 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qapi/error.h"
-#include "qapi/qmp/qlist.h"
-#include "exec/address-spaces.h"
+#include "qobject/qlist.h"
+#include "system/address-spaces.h"
#include "cpu.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/boards.h"
#include "hw/or-irq.h"
#include "hw/qdev-clock.h"
@@ -583,14 +583,14 @@ static void mps3r_set_default_ram_info(MPS3RMachineClass *mmc)
g_assert_not_reached();
}
-static void mps3r_class_init(ObjectClass *oc, void *data)
+static void mps3r_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
mc->init = mps3r_common_init;
}
-static void mps3r_an536_class_init(ObjectClass *oc, void *data)
+static void mps3r_an536_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
MPS3RMachineClass *mmc = MPS3R_MACHINE_CLASS(oc);
diff --git a/hw/arm/msf2-soc.c b/hw/arm/msf2-soc.c
index a94a10a..c5e9c717 100644
--- a/hw/arm/msf2-soc.c
+++ b/hw/arm/msf2-soc.c
@@ -25,12 +25,12 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qapi/error.h"
-#include "exec/address-spaces.h"
-#include "hw/char/serial.h"
+#include "system/address-spaces.h"
+#include "hw/char/serial-mm.h"
#include "hw/arm/msf2-soc.h"
#include "hw/misc/unimp.h"
#include "hw/qdev-clock.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#define MSF2_TIMER_BASE 0x40004000
#define MSF2_SYSREG_BASE 0x40038000
@@ -222,7 +222,7 @@ static void m2sxxx_soc_realize(DeviceState *dev_soc, Error **errp)
create_unimplemented_device("usb", 0x40043000, 0x1000);
}
-static Property m2sxxx_soc_properties[] = {
+static const Property m2sxxx_soc_properties[] = {
/*
* part name specifies the type of SmartFusion2 device variant(this
* property is for information purpose only.
@@ -234,10 +234,9 @@ static Property m2sxxx_soc_properties[] = {
/* default divisors in Libero GUI */
DEFINE_PROP_UINT8("apb0div", MSF2State, apb0div, 2),
DEFINE_PROP_UINT8("apb1div", MSF2State, apb1div, 2),
- DEFINE_PROP_END_OF_LIST(),
};
-static void m2sxxx_soc_class_init(ObjectClass *klass, void *data)
+static void m2sxxx_soc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/arm/msf2-som.c b/hw/arm/msf2-som.c
index 5c415ab..29c76c6 100644
--- a/hw/arm/msf2-som.c
+++ b/hw/arm/msf2-som.c
@@ -33,7 +33,7 @@
#include "hw/qdev-properties.h"
#include "hw/arm/boot.h"
#include "hw/qdev-clock.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/arm/msf2-soc.h"
#define DDR_BASE_ADDRESS 0xA0000000
@@ -92,7 +92,7 @@ static void emcraft_sf2_s2s010_init(MachineState *machine)
cs_line = qdev_get_gpio_in_named(spi_flash, SSI_GPIO_CS, 0);
sysbus_connect_irq(SYS_BUS_DEVICE(&soc->spi[0]), 1, cs_line);
- armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename,
+ armv7m_load_kernel(soc->armv7m.cpu, machine->kernel_filename,
0, soc->envm_size);
}
diff --git a/hw/arm/musca.c b/hw/arm/musca.c
index e2c9d49..250b3b5 100644
--- a/hw/arm/musca.c
+++ b/hw/arm/musca.c
@@ -22,8 +22,8 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
-#include "exec/address-spaces.h"
-#include "sysemu/sysemu.h"
+#include "system/address-spaces.h"
+#include "system/system.h"
#include "hw/arm/boot.h"
#include "hw/arm/armsse.h"
#include "hw/boards.h"
@@ -590,11 +590,11 @@ static void musca_init(MachineState *machine)
"cfg_sec_resp", 0));
}
- armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename,
+ armv7m_load_kernel(mms->sse.armv7m[0].cpu, machine->kernel_filename,
0, 0x2000000);
}
-static void musca_class_init(ObjectClass *oc, void *data)
+static void musca_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
static const char * const valid_cpu_types[] = {
@@ -609,7 +609,7 @@ static void musca_class_init(ObjectClass *oc, void *data)
mc->init = musca_init;
}
-static void musca_a_class_init(ObjectClass *oc, void *data)
+static void musca_a_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
MuscaMachineClass *mmc = MUSCA_MACHINE_CLASS(oc);
@@ -623,7 +623,7 @@ static void musca_a_class_init(ObjectClass *oc, void *data)
mmc->num_mpcs = ARRAY_SIZE(a_mpc_info);
}
-static void musca_b1_class_init(ObjectClass *oc, void *data)
+static void musca_b1_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
MuscaMachineClass *mmc = MUSCA_MACHINE_CLASS(oc);
diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c
index 2020f73..329b162 100644
--- a/hw/arm/musicpal.c
+++ b/hw/arm/musicpal.c
@@ -16,9 +16,9 @@
#include "migration/vmstate.h"
#include "hw/arm/boot.h"
#include "net/net.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/boards.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "qemu/timer.h"
#include "hw/ptimer.h"
#include "hw/qdev-properties.h"
@@ -29,9 +29,9 @@
#include "hw/irq.h"
#include "hw/or-irq.h"
#include "hw/audio/wm8750.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/runstate.h"
-#include "sysemu/dma.h"
+#include "system/block-backend.h"
+#include "system/runstate.h"
+#include "system/dma.h"
#include "ui/pixel_ops.h"
#include "qemu/cutils.h"
#include "qom/object.h"
@@ -286,7 +286,7 @@ static const VMStateDescription musicpal_lcd_vmsd = {
}
};
-static void musicpal_lcd_class_init(ObjectClass *klass, void *data)
+static void musicpal_lcd_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -407,11 +407,11 @@ static const VMStateDescription mv88w8618_pic_vmsd = {
}
};
-static void mv88w8618_pic_class_init(ObjectClass *klass, void *data)
+static void mv88w8618_pic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = mv88w8618_pic_reset;
+ device_class_set_legacy_reset(dc, mv88w8618_pic_reset);
dc->vmsd = &mv88w8618_pic_vmsd;
}
@@ -601,11 +601,11 @@ static const VMStateDescription mv88w8618_pit_vmsd = {
}
};
-static void mv88w8618_pit_class_init(ObjectClass *klass, void *data)
+static void mv88w8618_pit_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = mv88w8618_pit_reset;
+ device_class_set_legacy_reset(dc, mv88w8618_pit_reset);
dc->vmsd = &mv88w8618_pit_vmsd;
}
@@ -687,7 +687,7 @@ static const VMStateDescription mv88w8618_flashcfg_vmsd = {
}
};
-static void mv88w8618_flashcfg_class_init(ObjectClass *klass, void *data)
+static void mv88w8618_flashcfg_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -1026,11 +1026,11 @@ static const VMStateDescription musicpal_gpio_vmsd = {
}
};
-static void musicpal_gpio_class_init(ObjectClass *klass, void *data)
+static void musicpal_gpio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = musicpal_gpio_reset;
+ device_class_set_legacy_reset(dc, musicpal_gpio_reset);
dc->vmsd = &musicpal_gpio_vmsd;
}
@@ -1171,7 +1171,7 @@ static const VMStateDescription musicpal_key_vmsd = {
}
};
-static void musicpal_key_class_init(ObjectClass *klass, void *data)
+static void musicpal_key_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -1238,7 +1238,7 @@ static void musicpal_init(MachineState *machine)
qdev_get_gpio_in(pic, MP_TIMER4_IRQ), NULL);
/* Logically OR both UART IRQs together */
- uart_orgate = DEVICE(object_new(TYPE_OR_IRQ));
+ uart_orgate = qdev_new(TYPE_OR_IRQ);
object_property_set_int(OBJECT(uart_orgate), "num-lines", 2, &error_fatal);
qdev_realize_and_unref(uart_orgate, NULL, &error_fatal);
qdev_connect_gpio_out(uart_orgate, 0,
@@ -1348,7 +1348,7 @@ static void musicpal_machine_init(MachineClass *mc)
DEFINE_MACHINE("musicpal", musicpal_machine_init)
-static void mv88w8618_wlan_class_init(ObjectClass *klass, void *data)
+static void mv88w8618_wlan_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/arm/netduino2.c b/hw/arm/netduino2.c
index 8b1a9a2..df793c7 100644
--- a/hw/arm/netduino2.c
+++ b/hw/arm/netduino2.c
@@ -48,7 +48,7 @@ static void netduino2_init(MachineState *machine)
qdev_connect_clock_in(dev, "sysclk", sysclk);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
- armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename,
+ armv7m_load_kernel(STM32F205_SOC(dev)->armv7m.cpu, machine->kernel_filename,
0, FLASH_SIZE);
}
diff --git a/hw/arm/netduinoplus2.c b/hw/arm/netduinoplus2.c
index bccd100..81b6334 100644
--- a/hw/arm/netduinoplus2.c
+++ b/hw/arm/netduinoplus2.c
@@ -48,7 +48,7 @@ static void netduinoplus2_init(MachineState *machine)
qdev_connect_clock_in(dev, "sysclk", sysclk);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
- armv7m_load_kernel(ARM_CPU(first_cpu),
+ armv7m_load_kernel(STM32F405_SOC(dev)->armv7m.cpu,
machine->kernel_filename,
0, FLASH_SIZE);
}
diff --git a/hw/arm/npcm7xx.c b/hw/arm/npcm7xx.c
index cb77913..2f30c49 100644
--- a/hw/arm/npcm7xx.c
+++ b/hw/arm/npcm7xx.c
@@ -18,7 +18,7 @@
#include "hw/arm/boot.h"
#include "hw/arm/npcm7xx.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/loader.h"
#include "hw/misc/unimp.h"
#include "hw/qdev-clock.h"
@@ -26,7 +26,7 @@
#include "qapi/error.h"
#include "qemu/bswap.h"
#include "qemu/units.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "target/arm/cpu-qom.h"
/*
@@ -292,17 +292,21 @@ static const struct {
hwaddr regs_addr;
int cs_count;
const hwaddr *flash_addr;
+ size_t flash_size;
} npcm7xx_fiu[] = {
{
.name = "fiu0",
.regs_addr = 0xfb000000,
.cs_count = ARRAY_SIZE(npcm7xx_fiu0_flash_addr),
.flash_addr = npcm7xx_fiu0_flash_addr,
+ .flash_size = 128 * MiB,
+
}, {
.name = "fiu3",
.regs_addr = 0xc0000000,
.cs_count = ARRAY_SIZE(npcm7xx_fiu3_flash_addr),
.flash_addr = npcm7xx_fiu3_flash_addr,
+ .flash_size = 128 * MiB,
},
};
@@ -735,6 +739,8 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp)
object_property_set_int(OBJECT(sbd), "cs-count",
npcm7xx_fiu[i].cs_count, &error_abort);
+ object_property_set_int(OBJECT(sbd), "flash-size",
+ npcm7xx_fiu[i].flash_size, &error_abort);
sysbus_realize(sbd, &error_abort);
sysbus_mmio_map(sbd, 0, npcm7xx_fiu[i].regs_addr);
@@ -810,13 +816,12 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp)
create_unimplemented_device("npcm7xx.spix", 0xfb001000, 4 * KiB);
}
-static Property npcm7xx_properties[] = {
+static const Property npcm7xx_properties[] = {
DEFINE_PROP_LINK("dram-mr", NPCM7xxState, dram, TYPE_MEMORY_REGION,
MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void npcm7xx_class_init(ObjectClass *oc, void *data)
+static void npcm7xx_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -825,7 +830,7 @@ static void npcm7xx_class_init(ObjectClass *oc, void *data)
device_class_set_props(dc, npcm7xx_properties);
}
-static void npcm730_class_init(ObjectClass *oc, void *data)
+static void npcm730_class_init(ObjectClass *oc, const void *data)
{
NPCM7xxClass *nc = NPCM7XX_CLASS(oc);
@@ -834,7 +839,7 @@ static void npcm730_class_init(ObjectClass *oc, void *data)
nc->num_cpus = 2;
}
-static void npcm750_class_init(ObjectClass *oc, void *data)
+static void npcm750_class_init(ObjectClass *oc, const void *data)
{
NPCM7xxClass *nc = NPCM7XX_CLASS(oc);
diff --git a/hw/arm/npcm7xx_boards.c b/hw/arm/npcm7xx_boards.c
index e229efb..465a0e5 100644
--- a/hw/arm/npcm7xx_boards.c
+++ b/hw/arm/npcm7xx_boards.c
@@ -27,9 +27,9 @@
#include "qapi/error.h"
#include "qemu/datadir.h"
#include "qemu/units.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/block-backend.h"
+#include "system/blockdev.h"
+#include "system/system.h"
+#include "system/block-backend.h"
#include "qemu/error-report.h"
@@ -453,7 +453,7 @@ static void npcm7xx_set_soc_type(NPCM7xxMachineClass *nmc, const char *type)
mc->default_cpus = mc->min_cpus = mc->max_cpus = sc->num_cpus;
}
-static void npcm7xx_machine_class_init(ObjectClass *oc, void *data)
+static void npcm7xx_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
static const char * const valid_cpu_types[] = {
@@ -472,7 +472,7 @@ static void npcm7xx_machine_class_init(ObjectClass *oc, void *data)
* Schematics:
* https://github.com/Nuvoton-Israel/nuvoton-info/blob/master/npcm7xx-poleg/evaluation-board/board_deliverables/NPCM750x_EB_ver.A1.1_COMPLETE.pdf
*/
-static void npcm750_evb_machine_class_init(ObjectClass *oc, void *data)
+static void npcm750_evb_machine_class_init(ObjectClass *oc, const void *data)
{
NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc);
MachineClass *mc = MACHINE_CLASS(oc);
@@ -481,10 +481,11 @@ static void npcm750_evb_machine_class_init(ObjectClass *oc, void *data)
mc->desc = "Nuvoton NPCM750 Evaluation Board (Cortex-A9)";
mc->init = npcm750_evb_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 512 * MiB;
};
-static void gsj_machine_class_init(ObjectClass *oc, void *data)
+static void gsj_machine_class_init(ObjectClass *oc, const void *data)
{
NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc);
MachineClass *mc = MACHINE_CLASS(oc);
@@ -493,10 +494,11 @@ static void gsj_machine_class_init(ObjectClass *oc, void *data)
mc->desc = "Quanta GSJ (Cortex-A9)";
mc->init = quanta_gsj_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 512 * MiB;
};
-static void gbs_bmc_machine_class_init(ObjectClass *oc, void *data)
+static void gbs_bmc_machine_class_init(ObjectClass *oc, const void *data)
{
NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc);
MachineClass *mc = MACHINE_CLASS(oc);
@@ -505,10 +507,11 @@ static void gbs_bmc_machine_class_init(ObjectClass *oc, void *data)
mc->desc = "Quanta GBS (Cortex-A9)";
mc->init = quanta_gbs_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 1 * GiB;
}
-static void kudo_bmc_machine_class_init(ObjectClass *oc, void *data)
+static void kudo_bmc_machine_class_init(ObjectClass *oc, const void *data)
{
NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc);
MachineClass *mc = MACHINE_CLASS(oc);
@@ -517,10 +520,11 @@ static void kudo_bmc_machine_class_init(ObjectClass *oc, void *data)
mc->desc = "Kudo BMC (Cortex-A9)";
mc->init = kudo_bmc_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 1 * GiB;
};
-static void mori_bmc_machine_class_init(ObjectClass *oc, void *data)
+static void mori_bmc_machine_class_init(ObjectClass *oc, const void *data)
{
NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc);
MachineClass *mc = MACHINE_CLASS(oc);
@@ -529,6 +533,7 @@ static void mori_bmc_machine_class_init(ObjectClass *oc, void *data)
mc->desc = "Mori BMC (Cortex-A9)";
mc->init = mori_bmc_init;
+ mc->auto_create_sdcard = true;
mc->default_ram_size = 1 * GiB;
}
diff --git a/hw/arm/npcm8xx.c b/hw/arm/npcm8xx.c
new file mode 100644
index 0000000..a276fea
--- /dev/null
+++ b/hw/arm/npcm8xx.c
@@ -0,0 +1,859 @@
+/*
+ * Nuvoton NPCM8xx SoC family.
+ *
+ * Copyright 2022 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "qemu/osdep.h"
+
+#include "hw/boards.h"
+#include "hw/arm/boot.h"
+#include "hw/arm/bsa.h"
+#include "hw/arm/npcm8xx.h"
+#include "hw/char/serial-mm.h"
+#include "hw/intc/arm_gic.h"
+#include "hw/loader.h"
+#include "hw/misc/unimp.h"
+#include "hw/qdev-clock.h"
+#include "hw/qdev-properties.h"
+#include "qapi/error.h"
+#include "qemu/units.h"
+#include "system/system.h"
+
+/*
+ * This covers the whole MMIO space. We'll use this to catch any MMIO accesses
+ * that aren't handled by a device.
+ */
+#define NPCM8XX_MMIO_BA 0x80000000
+#define NPCM8XX_MMIO_SZ 0x7ffd0000
+
+/* OTP fuse array */
+#define NPCM8XX_OTP_BA 0xf0189000
+
+/* GIC Distributor */
+#define NPCM8XX_GICD_BA 0xdfff9000
+#define NPCM8XX_GICC_BA 0xdfffa000
+
+/* Core system modules. */
+#define NPCM8XX_CPUP_BA 0xf03fe000
+#define NPCM8XX_GCR_BA 0xf0800000
+#define NPCM8XX_CLK_BA 0xf0801000
+#define NPCM8XX_MC_BA 0xf0824000
+#define NPCM8XX_RNG_BA 0xf000b000
+
+/* ADC Module */
+#define NPCM8XX_ADC_BA 0xf000c000
+
+/* Internal AHB SRAM */
+#define NPCM8XX_RAM3_BA 0xc0008000
+#define NPCM8XX_RAM3_SZ (4 * KiB)
+
+/* Memory blocks at the end of the address space */
+#define NPCM8XX_RAM2_BA 0xfffb0000
+#define NPCM8XX_RAM2_SZ (256 * KiB)
+#define NPCM8XX_ROM_BA 0xffff0100
+#define NPCM8XX_ROM_SZ (64 * KiB)
+
+/* SDHCI Modules */
+#define NPCM8XX_MMC_BA 0xf0842000
+
+/* PCS Module */
+#define NPCM8XX_PCS_BA 0xf0780000
+
+/* PSPI Modules */
+#define NPCM8XX_PSPI_BA 0xf0201000
+
+/* Run PLL1 at 1600 MHz */
+#define NPCM8XX_PLLCON1_FIXUP_VAL 0x00402101
+/* Run the CPU from PLL1 and UART from PLL2 */
+#define NPCM8XX_CLKSEL_FIXUP_VAL 0x004aaba9
+
+/* Clock configuration values to be fixed up when bypassing bootloader */
+
+/*
+ * Interrupt lines going into the GIC. This does not include internal Cortex-A35
+ * interrupts.
+ */
+enum NPCM8xxInterrupt {
+ NPCM8XX_ADC_IRQ = 0,
+ NPCM8XX_PECI_IRQ = 6,
+ NPCM8XX_KCS_HIB_IRQ = 9,
+ NPCM8XX_GMAC1_IRQ = 14,
+ NPCM8XX_GMAC2_IRQ,
+ NPCM8XX_GMAC3_IRQ,
+ NPCM8XX_GMAC4_IRQ,
+ NPCM8XX_MMC_IRQ = 26,
+ NPCM8XX_PSPI_IRQ = 28,
+ NPCM8XX_TIMER0_IRQ = 32, /* Timer Module 0 */
+ NPCM8XX_TIMER1_IRQ,
+ NPCM8XX_TIMER2_IRQ,
+ NPCM8XX_TIMER3_IRQ,
+ NPCM8XX_TIMER4_IRQ,
+ NPCM8XX_TIMER5_IRQ, /* Timer Module 1 */
+ NPCM8XX_TIMER6_IRQ,
+ NPCM8XX_TIMER7_IRQ,
+ NPCM8XX_TIMER8_IRQ,
+ NPCM8XX_TIMER9_IRQ,
+ NPCM8XX_TIMER10_IRQ, /* Timer Module 2 */
+ NPCM8XX_TIMER11_IRQ,
+ NPCM8XX_TIMER12_IRQ,
+ NPCM8XX_TIMER13_IRQ,
+ NPCM8XX_TIMER14_IRQ,
+ NPCM8XX_WDG0_IRQ = 47, /* Timer Module 0 Watchdog */
+ NPCM8XX_WDG1_IRQ, /* Timer Module 1 Watchdog */
+ NPCM8XX_WDG2_IRQ, /* Timer Module 2 Watchdog */
+ NPCM8XX_EHCI1_IRQ = 61,
+ NPCM8XX_OHCI1_IRQ,
+ NPCM8XX_EHCI2_IRQ,
+ NPCM8XX_OHCI2_IRQ,
+ NPCM8XX_PWM0_IRQ = 93, /* PWM module 0 */
+ NPCM8XX_PWM1_IRQ, /* PWM module 1 */
+ NPCM8XX_MFT0_IRQ = 96, /* MFT module 0 */
+ NPCM8XX_MFT1_IRQ, /* MFT module 1 */
+ NPCM8XX_MFT2_IRQ, /* MFT module 2 */
+ NPCM8XX_MFT3_IRQ, /* MFT module 3 */
+ NPCM8XX_MFT4_IRQ, /* MFT module 4 */
+ NPCM8XX_MFT5_IRQ, /* MFT module 5 */
+ NPCM8XX_MFT6_IRQ, /* MFT module 6 */
+ NPCM8XX_MFT7_IRQ, /* MFT module 7 */
+ NPCM8XX_PCI_MBOX1_IRQ = 105,
+ NPCM8XX_PCI_MBOX2_IRQ,
+ NPCM8XX_GPIO0_IRQ = 116,
+ NPCM8XX_GPIO1_IRQ,
+ NPCM8XX_GPIO2_IRQ,
+ NPCM8XX_GPIO3_IRQ,
+ NPCM8XX_GPIO4_IRQ,
+ NPCM8XX_GPIO5_IRQ,
+ NPCM8XX_GPIO6_IRQ,
+ NPCM8XX_GPIO7_IRQ,
+ NPCM8XX_SMBUS0_IRQ = 128,
+ NPCM8XX_SMBUS1_IRQ,
+ NPCM8XX_SMBUS2_IRQ,
+ NPCM8XX_SMBUS3_IRQ,
+ NPCM8XX_SMBUS4_IRQ,
+ NPCM8XX_SMBUS5_IRQ,
+ NPCM8XX_SMBUS6_IRQ,
+ NPCM8XX_SMBUS7_IRQ,
+ NPCM8XX_SMBUS8_IRQ,
+ NPCM8XX_SMBUS9_IRQ,
+ NPCM8XX_SMBUS10_IRQ,
+ NPCM8XX_SMBUS11_IRQ,
+ NPCM8XX_SMBUS12_IRQ,
+ NPCM8XX_SMBUS13_IRQ,
+ NPCM8XX_SMBUS14_IRQ,
+ NPCM8XX_SMBUS15_IRQ,
+ NPCM8XX_SMBUS16_IRQ,
+ NPCM8XX_SMBUS17_IRQ,
+ NPCM8XX_SMBUS18_IRQ,
+ NPCM8XX_SMBUS19_IRQ,
+ NPCM8XX_SMBUS20_IRQ,
+ NPCM8XX_SMBUS21_IRQ,
+ NPCM8XX_SMBUS22_IRQ,
+ NPCM8XX_SMBUS23_IRQ,
+ NPCM8XX_SMBUS24_IRQ,
+ NPCM8XX_SMBUS25_IRQ,
+ NPCM8XX_SMBUS26_IRQ,
+ NPCM8XX_UART0_IRQ = 192,
+ NPCM8XX_UART1_IRQ,
+ NPCM8XX_UART2_IRQ,
+ NPCM8XX_UART3_IRQ,
+ NPCM8XX_UART4_IRQ,
+ NPCM8XX_UART5_IRQ,
+ NPCM8XX_UART6_IRQ,
+};
+
+/* Total number of GIC interrupts, including internal Cortex-A35 interrupts. */
+#define NPCM8XX_NUM_IRQ (288)
+#define NPCM8XX_PPI_BASE(cpu) \
+ ((NPCM8XX_NUM_IRQ - GIC_INTERNAL) + (cpu) * GIC_INTERNAL)
+
+/* Register base address for each Timer Module */
+static const hwaddr npcm8xx_tim_addr[] = {
+ 0xf0008000,
+ 0xf0009000,
+ 0xf000a000,
+};
+
+/* Register base address for each 16550 UART */
+static const hwaddr npcm8xx_uart_addr[] = {
+ 0xf0000000,
+ 0xf0001000,
+ 0xf0002000,
+ 0xf0003000,
+ 0xf0004000,
+ 0xf0005000,
+ 0xf0006000,
+};
+
+/* Direct memory-mapped access to SPI0 CS0-1. */
+static const hwaddr npcm8xx_fiu0_flash_addr[] = {
+ 0x80000000, /* CS0 */
+ 0x88000000, /* CS1 */
+};
+
+/* Direct memory-mapped access to SPI1 CS0-3. */
+static const hwaddr npcm8xx_fiu1_flash_addr[] = {
+ 0x90000000, /* CS0 */
+ 0x91000000, /* CS1 */
+ 0x92000000, /* CS2 */
+ 0x93000000, /* CS3 */
+};
+
+/* Direct memory-mapped access to SPI3 CS0-3. */
+static const hwaddr npcm8xx_fiu3_flash_addr[] = {
+ 0xa0000000, /* CS0 */
+ 0xa8000000, /* CS1 */
+ 0xb0000000, /* CS2 */
+ 0xb8000000, /* CS3 */
+};
+
+/* Register base address for each PWM Module */
+static const hwaddr npcm8xx_pwm_addr[] = {
+ 0xf0103000,
+ 0xf0104000,
+ 0xf0105000,
+};
+
+/* Register base address for each MFT Module */
+static const hwaddr npcm8xx_mft_addr[] = {
+ 0xf0180000,
+ 0xf0181000,
+ 0xf0182000,
+ 0xf0183000,
+ 0xf0184000,
+ 0xf0185000,
+ 0xf0186000,
+ 0xf0187000,
+};
+
+/* Direct memory-mapped access to each SMBus Module. */
+static const hwaddr npcm8xx_smbus_addr[] = {
+ 0xf0080000,
+ 0xf0081000,
+ 0xf0082000,
+ 0xf0083000,
+ 0xf0084000,
+ 0xf0085000,
+ 0xf0086000,
+ 0xf0087000,
+ 0xf0088000,
+ 0xf0089000,
+ 0xf008a000,
+ 0xf008b000,
+ 0xf008c000,
+ 0xf008d000,
+ 0xf008e000,
+ 0xf008f000,
+ 0xfff00000,
+ 0xfff01000,
+ 0xfff02000,
+ 0xfff03000,
+ 0xfff04000,
+ 0xfff05000,
+ 0xfff06000,
+ 0xfff07000,
+ 0xfff08000,
+ 0xfff09000,
+ 0xfff0a000,
+};
+
+/* Register base address for each GMAC Module */
+static const hwaddr npcm8xx_gmac_addr[] = {
+ 0xf0802000,
+ 0xf0804000,
+ 0xf0806000,
+ 0xf0808000,
+};
+
+/* Register base address for each USB host EHCI registers */
+static const hwaddr npcm8xx_ehci_addr[] = {
+ 0xf0828100,
+ 0xf082a100,
+};
+
+/* Register base address for each USB host OHCI registers */
+static const hwaddr npcm8xx_ohci_addr[] = {
+ 0xf0829000,
+ 0xf082b000,
+};
+
+static const struct {
+ hwaddr regs_addr;
+ uint32_t reset_pu;
+ uint32_t reset_pd;
+ uint32_t reset_osrc;
+ uint32_t reset_odsc;
+} npcm8xx_gpio[] = {
+ {
+ .regs_addr = 0xf0010000,
+ .reset_pu = 0x00000300,
+ .reset_pd = 0x000f0000,
+ }, {
+ .regs_addr = 0xf0011000,
+ .reset_pu = 0xe0fefe01,
+ .reset_pd = 0x07000000,
+ }, {
+ .regs_addr = 0xf0012000,
+ .reset_pu = 0xc00fffff,
+ .reset_pd = 0x3ff00000,
+ }, {
+ .regs_addr = 0xf0013000,
+ .reset_pd = 0x00003000,
+ }, {
+ .regs_addr = 0xf0014000,
+ .reset_pu = 0xffff0000,
+ }, {
+ .regs_addr = 0xf0015000,
+ .reset_pu = 0xff8387fe,
+ .reset_pd = 0x007c0001,
+ .reset_osrc = 0x08000000,
+ }, {
+ .regs_addr = 0xf0016000,
+ .reset_pu = 0x00000801,
+ .reset_pd = 0x00000302,
+ }, {
+ .regs_addr = 0xf0017000,
+ .reset_pu = 0x000002ff,
+ .reset_pd = 0x00000c00,
+ },
+};
+
+static const struct {
+ const char *name;
+ hwaddr regs_addr;
+ int cs_count;
+ const hwaddr *flash_addr;
+ size_t flash_size;
+} npcm8xx_fiu[] = {
+ {
+ .name = "fiu0",
+ .regs_addr = 0xfb000000,
+ .cs_count = ARRAY_SIZE(npcm8xx_fiu0_flash_addr),
+ .flash_addr = npcm8xx_fiu0_flash_addr,
+ .flash_size = 128 * MiB,
+ },
+ {
+ .name = "fiu1",
+ .regs_addr = 0xfb002000,
+ .cs_count = ARRAY_SIZE(npcm8xx_fiu1_flash_addr),
+ .flash_addr = npcm8xx_fiu1_flash_addr,
+ .flash_size = 16 * MiB,
+ }, {
+ .name = "fiu3",
+ .regs_addr = 0xc0000000,
+ .cs_count = ARRAY_SIZE(npcm8xx_fiu3_flash_addr),
+ .flash_addr = npcm8xx_fiu3_flash_addr,
+ .flash_size = 128 * MiB,
+ },
+};
+
+static struct arm_boot_info npcm8xx_binfo = {
+ .loader_start = NPCM8XX_LOADER_START,
+ .smp_loader_start = NPCM8XX_SMP_LOADER_START,
+ .smp_bootreg_addr = NPCM8XX_SMP_BOOTREG_ADDR,
+ .gic_cpu_if_addr = NPCM8XX_GICC_BA,
+ .secure_boot = false,
+ .board_id = -1,
+ .board_setup_addr = NPCM8XX_BOARD_SETUP_ADDR,
+ .psci_conduit = QEMU_PSCI_CONDUIT_SMC,
+};
+
+void npcm8xx_load_kernel(MachineState *machine, NPCM8xxState *soc)
+{
+ npcm8xx_binfo.ram_size = machine->ram_size;
+
+ arm_load_kernel(&soc->cpu[0], machine, &npcm8xx_binfo);
+}
+
+static void npcm8xx_init_fuses(NPCM8xxState *s)
+{
+ NPCM8xxClass *nc = NPCM8XX_GET_CLASS(s);
+ uint32_t value;
+
+ /*
+ * The initial mask of disabled modules indicates the chip derivative (e.g.
+ * NPCM750 or NPCM730).
+ */
+ value = cpu_to_le32(nc->disabled_modules);
+ npcm7xx_otp_array_write(&s->fuse_array, &value, NPCM7XX_FUSE_DERIVATIVE,
+ sizeof(value));
+}
+
+static void npcm8xx_write_adc_calibration(NPCM8xxState *s)
+{
+ /* Both ADC and the fuse array must have realized. */
+ QEMU_BUILD_BUG_ON(sizeof(s->adc.calibration_r_values) != 4);
+ npcm7xx_otp_array_write(&s->fuse_array, s->adc.calibration_r_values,
+ NPCM7XX_FUSE_ADC_CALIB, sizeof(s->adc.calibration_r_values));
+}
+
+static qemu_irq npcm8xx_irq(NPCM8xxState *s, int n)
+{
+ return qdev_get_gpio_in(DEVICE(&s->gic), n);
+}
+
+static void npcm8xx_init(Object *obj)
+{
+ NPCM8xxState *s = NPCM8XX(obj);
+ int i;
+
+ object_initialize_child(obj, "cpu-cluster", &s->cpu_cluster,
+ TYPE_CPU_CLUSTER);
+ for (i = 0; i < NPCM8XX_MAX_NUM_CPUS; i++) {
+ object_initialize_child(OBJECT(&s->cpu_cluster), "cpu[*]", &s->cpu[i],
+ ARM_CPU_TYPE_NAME("cortex-a35"));
+ }
+ object_initialize_child(obj, "gic", &s->gic, TYPE_ARM_GIC);
+ object_initialize_child(obj, "gcr", &s->gcr, TYPE_NPCM8XX_GCR);
+ object_property_add_alias(obj, "power-on-straps", OBJECT(&s->gcr),
+ "power-on-straps");
+ object_initialize_child(obj, "clk", &s->clk, TYPE_NPCM8XX_CLK);
+ object_initialize_child(obj, "otp", &s->fuse_array,
+ TYPE_NPCM7XX_FUSE_ARRAY);
+ object_initialize_child(obj, "mc", &s->mc, TYPE_NPCM7XX_MC);
+ object_initialize_child(obj, "rng", &s->rng, TYPE_NPCM7XX_RNG);
+ object_initialize_child(obj, "adc", &s->adc, TYPE_NPCM7XX_ADC);
+
+ for (i = 0; i < ARRAY_SIZE(s->tim); i++) {
+ object_initialize_child(obj, "tim[*]", &s->tim[i], TYPE_NPCM7XX_TIMER);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->gpio); i++) {
+ object_initialize_child(obj, "gpio[*]", &s->gpio[i], TYPE_NPCM7XX_GPIO);
+ }
+
+
+ for (i = 0; i < ARRAY_SIZE(s->smbus); i++) {
+ object_initialize_child(obj, "smbus[*]", &s->smbus[i],
+ TYPE_NPCM7XX_SMBUS);
+ DEVICE(&s->smbus[i])->id = g_strdup_printf("smbus[%d]", i);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->ehci); i++) {
+ object_initialize_child(obj, "ehci[*]", &s->ehci[i], TYPE_NPCM7XX_EHCI);
+ }
+ for (i = 0; i < ARRAY_SIZE(s->ohci); i++) {
+ object_initialize_child(obj, "ohci[*]", &s->ohci[i], TYPE_SYSBUS_OHCI);
+ }
+
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_fiu) != ARRAY_SIZE(s->fiu));
+ for (i = 0; i < ARRAY_SIZE(s->fiu); i++) {
+ object_initialize_child(obj, npcm8xx_fiu[i].name, &s->fiu[i],
+ TYPE_NPCM7XX_FIU);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->pwm); i++) {
+ object_initialize_child(obj, "pwm[*]", &s->pwm[i], TYPE_NPCM7XX_PWM);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->mft); i++) {
+ object_initialize_child(obj, "mft[*]", &s->mft[i], TYPE_NPCM7XX_MFT);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->gmac); i++) {
+ object_initialize_child(obj, "gmac[*]", &s->gmac[i], TYPE_NPCM_GMAC);
+ }
+ object_initialize_child(obj, "pcs", &s->pcs, TYPE_NPCM_PCS);
+
+ object_initialize_child(obj, "mmc", &s->mmc, TYPE_NPCM7XX_SDHCI);
+ object_initialize_child(obj, "pspi", &s->pspi, TYPE_NPCM_PSPI);
+}
+
+static void npcm8xx_realize(DeviceState *dev, Error **errp)
+{
+ NPCM8xxState *s = NPCM8XX(dev);
+ NPCM8xxClass *nc = NPCM8XX_GET_CLASS(s);
+ int i;
+
+ if (memory_region_size(s->dram) > NPCM8XX_DRAM_SZ) {
+ error_setg(errp, "%s: NPCM8xx cannot address more than %" PRIu64
+ " MiB of DRAM", __func__, NPCM8XX_DRAM_SZ / MiB);
+ return;
+ }
+
+ /* CPUs */
+ for (i = 0; i < nc->num_cpus; i++) {
+ object_property_set_int(OBJECT(&s->cpu[i]), "mp-affinity",
+ arm_build_mp_affinity(i, NPCM8XX_MAX_NUM_CPUS),
+ &error_abort);
+ object_property_set_bool(OBJECT(&s->cpu[i]), "reset-hivecs", true,
+ &error_abort);
+ object_property_set_int(OBJECT(&s->cpu[i]), "core-count",
+ nc->num_cpus, &error_abort);
+
+ /* Disable security extensions. */
+ object_property_set_bool(OBJECT(&s->cpu[i]), "has_el3", false,
+ &error_abort);
+
+ if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) {
+ return;
+ }
+ }
+
+ /* ARM GIC for Cortex A35. Can only fail if we pass bad parameters here. */
+ object_property_set_uint(OBJECT(&s->gic), "num-cpu", nc->num_cpus, errp);
+ object_property_set_uint(OBJECT(&s->gic), "num-irq", NPCM8XX_NUM_IRQ, errp);
+ object_property_set_uint(OBJECT(&s->gic), "revision", 2, errp);
+ object_property_set_bool(OBJECT(&s->gic), "has-security-extensions", true,
+ errp);
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->gic), errp)) {
+ return;
+ }
+ for (i = 0; i < nc->num_cpus; i++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i,
+ qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_IRQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + nc->num_cpus,
+ qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_FIQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + nc->num_cpus * 2,
+ qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_VIRQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + nc->num_cpus * 3,
+ qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_VFIQ));
+
+ qdev_connect_gpio_out(DEVICE(&s->cpu[i]), GTIMER_PHYS,
+ qdev_get_gpio_in(DEVICE(&s->gic),
+ NPCM8XX_PPI_BASE(i) + ARCH_TIMER_NS_EL1_IRQ));
+ qdev_connect_gpio_out(DEVICE(&s->cpu[i]), GTIMER_VIRT,
+ qdev_get_gpio_in(DEVICE(&s->gic),
+ NPCM8XX_PPI_BASE(i) + ARCH_TIMER_VIRT_IRQ));
+ qdev_connect_gpio_out(DEVICE(&s->cpu[i]), GTIMER_HYP,
+ qdev_get_gpio_in(DEVICE(&s->gic),
+ NPCM8XX_PPI_BASE(i) + ARCH_TIMER_NS_EL2_IRQ));
+ qdev_connect_gpio_out(DEVICE(&s->cpu[i]), GTIMER_SEC,
+ qdev_get_gpio_in(DEVICE(&s->gic),
+ NPCM8XX_PPI_BASE(i) + ARCH_TIMER_S_EL1_IRQ));
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 0, NPCM8XX_GICD_BA);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 1, NPCM8XX_GICC_BA);
+
+ /* CPU cluster */
+ qdev_prop_set_uint32(DEVICE(&s->cpu_cluster), "cluster-id", 0);
+ qdev_realize(DEVICE(&s->cpu_cluster), NULL, &error_fatal);
+
+ /* System Global Control Registers (GCR). Can fail due to user input. */
+ object_property_set_int(OBJECT(&s->gcr), "disabled-modules",
+ nc->disabled_modules, &error_abort);
+ object_property_add_const_link(OBJECT(&s->gcr), "dram-mr", OBJECT(s->dram));
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->gcr), errp)) {
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->gcr), 0, NPCM8XX_GCR_BA);
+
+ /* Clock Control Registers (CLK). Cannot fail. */
+ sysbus_realize(SYS_BUS_DEVICE(&s->clk), &error_abort);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->clk), 0, NPCM8XX_CLK_BA);
+
+ /* OTP fuse strap array. Cannot fail. */
+ sysbus_realize(SYS_BUS_DEVICE(&s->fuse_array), &error_abort);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->fuse_array), 0, NPCM8XX_OTP_BA);
+ npcm8xx_init_fuses(s);
+
+ /* Fake Memory Controller (MC). Cannot fail. */
+ sysbus_realize(SYS_BUS_DEVICE(&s->mc), &error_abort);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->mc), 0, NPCM8XX_MC_BA);
+
+ /* ADC Modules. Cannot fail. */
+ qdev_connect_clock_in(DEVICE(&s->adc), "clock", qdev_get_clock_out(
+ DEVICE(&s->clk), "adc-clock"));
+ sysbus_realize(SYS_BUS_DEVICE(&s->adc), &error_abort);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->adc), 0, NPCM8XX_ADC_BA);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0,
+ npcm8xx_irq(s, NPCM8XX_ADC_IRQ));
+ npcm8xx_write_adc_calibration(s);
+
+ /* Timer Modules (TIM). Cannot fail. */
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_tim_addr) != ARRAY_SIZE(s->tim));
+ for (i = 0; i < ARRAY_SIZE(s->tim); i++) {
+ SysBusDevice *sbd = SYS_BUS_DEVICE(&s->tim[i]);
+ int first_irq;
+ int j;
+
+ /* Connect the timer clock. */
+ qdev_connect_clock_in(DEVICE(&s->tim[i]), "clock", qdev_get_clock_out(
+ DEVICE(&s->clk), "timer-clock"));
+
+ sysbus_realize(sbd, &error_abort);
+ sysbus_mmio_map(sbd, 0, npcm8xx_tim_addr[i]);
+
+ first_irq = NPCM8XX_TIMER0_IRQ + i * NPCM7XX_TIMERS_PER_CTRL;
+ for (j = 0; j < NPCM7XX_TIMERS_PER_CTRL; j++) {
+ qemu_irq irq = npcm8xx_irq(s, first_irq + j);
+ sysbus_connect_irq(sbd, j, irq);
+ }
+
+ /* IRQ for watchdogs */
+ sysbus_connect_irq(sbd, NPCM7XX_TIMERS_PER_CTRL,
+ npcm8xx_irq(s, NPCM8XX_WDG0_IRQ + i));
+ /* GPIO that connects clk module with watchdog */
+ qdev_connect_gpio_out_named(DEVICE(&s->tim[i]),
+ NPCM7XX_WATCHDOG_RESET_GPIO_OUT, 0,
+ qdev_get_gpio_in_named(DEVICE(&s->clk),
+ NPCM7XX_WATCHDOG_RESET_GPIO_IN, i));
+ }
+
+ /* UART0..6 (16550 compatible) */
+ for (i = 0; i < ARRAY_SIZE(npcm8xx_uart_addr); i++) {
+ serial_mm_init(get_system_memory(), npcm8xx_uart_addr[i], 2,
+ npcm8xx_irq(s, NPCM8XX_UART0_IRQ + i), 115200,
+ serial_hd(i), DEVICE_LITTLE_ENDIAN);
+ }
+
+ /* Random Number Generator. Cannot fail. */
+ sysbus_realize(SYS_BUS_DEVICE(&s->rng), &error_abort);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->rng), 0, NPCM8XX_RNG_BA);
+
+ /* GPIO modules. Cannot fail. */
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_gpio) != ARRAY_SIZE(s->gpio));
+ for (i = 0; i < ARRAY_SIZE(s->gpio); i++) {
+ Object *obj = OBJECT(&s->gpio[i]);
+
+ object_property_set_uint(obj, "reset-pullup",
+ npcm8xx_gpio[i].reset_pu, &error_abort);
+ object_property_set_uint(obj, "reset-pulldown",
+ npcm8xx_gpio[i].reset_pd, &error_abort);
+ object_property_set_uint(obj, "reset-osrc",
+ npcm8xx_gpio[i].reset_osrc, &error_abort);
+ object_property_set_uint(obj, "reset-odsc",
+ npcm8xx_gpio[i].reset_odsc, &error_abort);
+ sysbus_realize(SYS_BUS_DEVICE(obj), &error_abort);
+ sysbus_mmio_map(SYS_BUS_DEVICE(obj), 0, npcm8xx_gpio[i].regs_addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(obj), 0,
+ npcm8xx_irq(s, NPCM8XX_GPIO0_IRQ + i));
+ }
+
+ /* SMBus modules. Cannot fail. */
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_smbus_addr) != ARRAY_SIZE(s->smbus));
+ for (i = 0; i < ARRAY_SIZE(s->smbus); i++) {
+ Object *obj = OBJECT(&s->smbus[i]);
+
+ sysbus_realize(SYS_BUS_DEVICE(obj), &error_abort);
+ sysbus_mmio_map(SYS_BUS_DEVICE(obj), 0, npcm8xx_smbus_addr[i]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(obj), 0,
+ npcm8xx_irq(s, NPCM8XX_SMBUS0_IRQ + i));
+ }
+
+ /* USB Host */
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(s->ohci) != ARRAY_SIZE(s->ehci));
+ for (i = 0; i < ARRAY_SIZE(s->ehci); i++) {
+ object_property_set_bool(OBJECT(&s->ehci[i]), "companion-enable", true,
+ &error_abort);
+ sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), &error_abort);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->ehci[i]), 0, npcm8xx_ehci_addr[i]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0,
+ npcm8xx_irq(s, NPCM8XX_EHCI1_IRQ + 2 * i));
+ }
+ for (i = 0; i < ARRAY_SIZE(s->ohci); i++) {
+ object_property_set_str(OBJECT(&s->ohci[i]), "masterbus", "usb-bus.0",
+ &error_abort);
+ object_property_set_uint(OBJECT(&s->ohci[i]), "num-ports", 1,
+ &error_abort);
+ object_property_set_uint(OBJECT(&s->ohci[i]), "firstport", i,
+ &error_abort);
+ sysbus_realize(SYS_BUS_DEVICE(&s->ohci[i]), &error_abort);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->ohci[i]), 0, npcm8xx_ohci_addr[i]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->ohci[i]), 0,
+ npcm8xx_irq(s, NPCM8XX_OHCI1_IRQ + 2 * i));
+ }
+
+ /* PWM Modules. Cannot fail. */
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_pwm_addr) != ARRAY_SIZE(s->pwm));
+ for (i = 0; i < ARRAY_SIZE(s->pwm); i++) {
+ SysBusDevice *sbd = SYS_BUS_DEVICE(&s->pwm[i]);
+
+ qdev_connect_clock_in(DEVICE(&s->pwm[i]), "clock", qdev_get_clock_out(
+ DEVICE(&s->clk), "apb3-clock"));
+ sysbus_realize(sbd, &error_abort);
+ sysbus_mmio_map(sbd, 0, npcm8xx_pwm_addr[i]);
+ sysbus_connect_irq(sbd, i, npcm8xx_irq(s, NPCM8XX_PWM0_IRQ + i));
+ }
+
+ /* MFT Modules. Cannot fail. */
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_mft_addr) != ARRAY_SIZE(s->mft));
+ for (i = 0; i < ARRAY_SIZE(s->mft); i++) {
+ SysBusDevice *sbd = SYS_BUS_DEVICE(&s->mft[i]);
+
+ qdev_connect_clock_in(DEVICE(&s->mft[i]), "clock-in",
+ qdev_get_clock_out(DEVICE(&s->clk),
+ "apb4-clock"));
+ sysbus_realize(sbd, &error_abort);
+ sysbus_mmio_map(sbd, 0, npcm8xx_mft_addr[i]);
+ sysbus_connect_irq(sbd, 0, npcm8xx_irq(s, NPCM8XX_MFT0_IRQ + i));
+ }
+
+ /*
+ * GMAC Modules. Cannot fail.
+ */
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_gmac_addr) != ARRAY_SIZE(s->gmac));
+ for (i = 0; i < ARRAY_SIZE(s->gmac); i++) {
+ SysBusDevice *sbd = SYS_BUS_DEVICE(&s->gmac[i]);
+
+ /* This is used to make sure that the NIC can create the device */
+ qemu_configure_nic_device(DEVICE(sbd), false, NULL);
+
+ /*
+ * The device exists regardless of whether it's connected to a QEMU
+ * netdev backend. So always instantiate it even if there is no
+ * backend.
+ */
+ sysbus_realize(sbd, &error_abort);
+ sysbus_mmio_map(sbd, 0, npcm8xx_gmac_addr[i]);
+ /*
+ * N.B. The values for the second argument sysbus_connect_irq are
+ * chosen to match the registration order in npcm7xx_emc_realize.
+ */
+ sysbus_connect_irq(sbd, 0, npcm8xx_irq(s, NPCM8XX_GMAC1_IRQ + i));
+ }
+ /*
+ * GMAC Physical Coding Sublayer(PCS) Module. Cannot fail.
+ */
+ sysbus_realize(SYS_BUS_DEVICE(&s->pcs), &error_abort);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcs), 0, NPCM8XX_PCS_BA);
+
+ /*
+ * Flash Interface Unit (FIU). Can fail if incorrect number of chip selects
+ * specified, but this is a programming error.
+ */
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_fiu) != ARRAY_SIZE(s->fiu));
+ for (i = 0; i < ARRAY_SIZE(s->fiu); i++) {
+ SysBusDevice *sbd = SYS_BUS_DEVICE(&s->fiu[i]);
+ int j;
+
+ object_property_set_int(OBJECT(sbd), "cs-count",
+ npcm8xx_fiu[i].cs_count, &error_abort);
+ object_property_set_int(OBJECT(sbd), "flash-size",
+ npcm8xx_fiu[i].flash_size, &error_abort);
+ sysbus_realize(sbd, &error_abort);
+
+ sysbus_mmio_map(sbd, 0, npcm8xx_fiu[i].regs_addr);
+ for (j = 0; j < npcm8xx_fiu[i].cs_count; j++) {
+ sysbus_mmio_map(sbd, j + 1, npcm8xx_fiu[i].flash_addr[j]);
+ }
+ }
+
+ /* RAM2 (SRAM) */
+ memory_region_init_ram(&s->sram, OBJECT(dev), "ram2",
+ NPCM8XX_RAM2_SZ, &error_abort);
+ memory_region_add_subregion(get_system_memory(), NPCM8XX_RAM2_BA, &s->sram);
+
+ /* RAM3 (SRAM) */
+ memory_region_init_ram(&s->ram3, OBJECT(dev), "ram3",
+ NPCM8XX_RAM3_SZ, &error_abort);
+ memory_region_add_subregion(get_system_memory(), NPCM8XX_RAM3_BA, &s->ram3);
+
+ /* Internal ROM */
+ memory_region_init_rom(&s->irom, OBJECT(dev), "irom", NPCM8XX_ROM_SZ,
+ &error_abort);
+ memory_region_add_subregion(get_system_memory(), NPCM8XX_ROM_BA, &s->irom);
+
+ /* SDHCI */
+ sysbus_realize(SYS_BUS_DEVICE(&s->mmc), &error_abort);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->mmc), 0, NPCM8XX_MMC_BA);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->mmc), 0,
+ npcm8xx_irq(s, NPCM8XX_MMC_IRQ));
+
+ /* PSPI */
+ sysbus_realize(SYS_BUS_DEVICE(&s->pspi), &error_abort);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->pspi), 0, NPCM8XX_PSPI_BA);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pspi), 0,
+ npcm8xx_irq(s, NPCM8XX_PSPI_IRQ));
+
+ create_unimplemented_device("npcm8xx.shm", 0xc0001000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.gicextra", 0xdfffa000, 24 * KiB);
+ create_unimplemented_device("npcm8xx.vdmx", 0xe0800000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.pcierc", 0xe1000000, 64 * KiB);
+ create_unimplemented_device("npcm8xx.rootc", 0xe8000000, 128 * MiB);
+ create_unimplemented_device("npcm8xx.kcs", 0xf0007000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.gfxi", 0xf000e000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.fsw", 0xf000f000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.bt", 0xf0030000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.espi", 0xf009f000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.peci", 0xf0100000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.siox[1]", 0xf0101000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.siox[2]", 0xf0102000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.tmps", 0xf0188000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.viru1", 0xf0204000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.viru2", 0xf0205000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.jtm1", 0xf0208000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.jtm2", 0xf0209000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.flm0", 0xf0210000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.flm1", 0xf0211000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.flm2", 0xf0212000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.flm3", 0xf0213000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.ahbpci", 0xf0400000, 1 * MiB);
+ create_unimplemented_device("npcm8xx.dap", 0xf0500000, 960 * KiB);
+ create_unimplemented_device("npcm8xx.mcphy", 0xf05f0000, 64 * KiB);
+ create_unimplemented_device("npcm8xx.tsgen", 0xf07fc000, 8 * KiB);
+ create_unimplemented_device("npcm8xx.copctl", 0xf080c000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.tipctl", 0xf080d000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.rst", 0xf080e000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.vcd", 0xf0810000, 64 * KiB);
+ create_unimplemented_device("npcm8xx.ece", 0xf0820000, 8 * KiB);
+ create_unimplemented_device("npcm8xx.vdma", 0xf0822000, 8 * KiB);
+ create_unimplemented_device("npcm8xx.usbd[0]", 0xf0830000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.usbd[1]", 0xf0831000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.usbd[2]", 0xf0832000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.usbd[3]", 0xf0833000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.usbd[4]", 0xf0834000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.usbd[5]", 0xf0835000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.usbd[6]", 0xf0836000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.usbd[7]", 0xf0837000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.usbd[8]", 0xf0838000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.usbd[9]", 0xf0839000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.pci_mbox1", 0xf0848000, 64 * KiB);
+ create_unimplemented_device("npcm8xx.gdma0", 0xf0850000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.gdma1", 0xf0851000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.gdma2", 0xf0852000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.aes", 0xf0858000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.des", 0xf0859000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.sha", 0xf085a000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.pci_mbox2", 0xf0868000, 64 * KiB);
+ create_unimplemented_device("npcm8xx.i3c0", 0xfff10000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.i3c1", 0xfff11000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.i3c2", 0xfff12000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.i3c3", 0xfff13000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.i3c4", 0xfff14000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.i3c5", 0xfff15000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.spixcs0", 0xf8000000, 16 * MiB);
+ create_unimplemented_device("npcm8xx.spixcs1", 0xf9000000, 16 * MiB);
+ create_unimplemented_device("npcm8xx.spix", 0xfb001000, 4 * KiB);
+ create_unimplemented_device("npcm8xx.vect", 0xffff0000, 256);
+}
+
+static const Property npcm8xx_properties[] = {
+ DEFINE_PROP_LINK("dram-mr", NPCM8xxState, dram, TYPE_MEMORY_REGION,
+ MemoryRegion *),
+};
+
+static void npcm8xx_class_init(ObjectClass *oc, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ NPCM8xxClass *nc = NPCM8XX_CLASS(oc);
+
+ dc->realize = npcm8xx_realize;
+ dc->user_creatable = false;
+ nc->disabled_modules = 0x00000000;
+ nc->num_cpus = NPCM8XX_MAX_NUM_CPUS;
+ device_class_set_props(dc, npcm8xx_properties);
+}
+
+static const TypeInfo npcm8xx_soc_types[] = {
+ {
+ .name = TYPE_NPCM8XX,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(NPCM8xxState),
+ .instance_init = npcm8xx_init,
+ .class_size = sizeof(NPCM8xxClass),
+ .class_init = npcm8xx_class_init,
+ },
+};
+
+DEFINE_TYPES(npcm8xx_soc_types);
diff --git a/hw/arm/npcm8xx_boards.c b/hw/arm/npcm8xx_boards.c
new file mode 100644
index 0000000..3bf3e1f
--- /dev/null
+++ b/hw/arm/npcm8xx_boards.c
@@ -0,0 +1,254 @@
+/*
+ * Machine definitions for boards featuring an NPCM8xx SoC.
+ *
+ * Copyright 2021 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "qemu/osdep.h"
+
+#include "chardev/char.h"
+#include "hw/boards.h"
+#include "hw/arm/npcm8xx.h"
+#include "hw/core/cpu.h"
+#include "hw/loader.h"
+#include "hw/qdev-core.h"
+#include "hw/qdev-properties.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "qemu/datadir.h"
+#include "qemu/units.h"
+
+#define NPCM845_EVB_POWER_ON_STRAPS 0x000017ff
+
+static const char npcm8xx_default_bootrom[] = "npcm8xx_bootrom.bin";
+
+static void npcm8xx_load_bootrom(MachineState *machine, NPCM8xxState *soc)
+{
+ const char *bios_name = machine->firmware ?: npcm8xx_default_bootrom;
+ g_autofree char *filename = NULL;
+ int ret;
+
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+ if (!filename) {
+ error_report("Could not find ROM image '%s'", bios_name);
+ if (!machine->kernel_filename) {
+ /* We can't boot without a bootrom or a kernel image. */
+ exit(1);
+ }
+ return;
+ }
+ ret = load_image_mr(filename, machine->ram);
+ if (ret < 0) {
+ error_report("Failed to load ROM image '%s'", filename);
+ exit(1);
+ }
+}
+
+static void npcm8xx_connect_flash(NPCM7xxFIUState *fiu, int cs_no,
+ const char *flash_type, DriveInfo *dinfo)
+{
+ DeviceState *flash;
+ qemu_irq flash_cs;
+
+ flash = qdev_new(flash_type);
+ if (dinfo) {
+ qdev_prop_set_drive(flash, "drive", blk_by_legacy_dinfo(dinfo));
+ }
+ qdev_realize_and_unref(flash, BUS(fiu->spi), &error_fatal);
+
+ flash_cs = qdev_get_gpio_in_named(flash, SSI_GPIO_CS, 0);
+ qdev_connect_gpio_out_named(DEVICE(fiu), "cs", cs_no, flash_cs);
+}
+
+static void npcm8xx_connect_dram(NPCM8xxState *soc, MemoryRegion *dram)
+{
+ memory_region_add_subregion(get_system_memory(), NPCM8XX_DRAM_BA, dram);
+
+ object_property_set_link(OBJECT(soc), "dram-mr", OBJECT(dram),
+ &error_abort);
+}
+
+static NPCM8xxState *npcm8xx_create_soc(MachineState *machine,
+ uint32_t hw_straps)
+{
+ NPCM8xxMachineClass *nmc = NPCM8XX_MACHINE_GET_CLASS(machine);
+ Object *obj;
+
+ obj = object_new_with_props(nmc->soc_type, OBJECT(machine), "soc",
+ &error_abort, NULL);
+ object_property_set_uint(obj, "power-on-straps", hw_straps, &error_abort);
+
+ return NPCM8XX(obj);
+}
+
+static I2CBus *npcm8xx_i2c_get_bus(NPCM8xxState *soc, uint32_t num)
+{
+ g_assert(num < ARRAY_SIZE(soc->smbus));
+ return I2C_BUS(qdev_get_child_bus(DEVICE(&soc->smbus[num]), "i2c-bus"));
+}
+
+static void npcm8xx_init_pwm_splitter(NPCM8xxMachine *machine,
+ NPCM8xxState *soc, const int *fan_counts)
+{
+ SplitIRQ *splitters = machine->fan_splitter;
+
+ /*
+ * PWM 0~3 belong to module 0 output 0~3.
+ * PWM 4~7 belong to module 1 output 0~3.
+ */
+ for (int i = 0; i < NPCM8XX_NR_PWM_MODULES; ++i) {
+ for (int j = 0; j < NPCM7XX_PWM_PER_MODULE; ++j) {
+ int splitter_no = i * NPCM7XX_PWM_PER_MODULE + j;
+ DeviceState *splitter;
+
+ if (fan_counts[splitter_no] < 1) {
+ continue;
+ }
+ object_initialize_child(OBJECT(machine), "fan-splitter[*]",
+ &splitters[splitter_no], TYPE_SPLIT_IRQ);
+ splitter = DEVICE(&splitters[splitter_no]);
+ qdev_prop_set_uint16(splitter, "num-lines",
+ fan_counts[splitter_no]);
+ qdev_realize(splitter, NULL, &error_abort);
+ qdev_connect_gpio_out_named(DEVICE(&soc->pwm[i]), "duty-gpio-out",
+ j, qdev_get_gpio_in(splitter, 0));
+ }
+ }
+}
+
+static void npcm8xx_connect_pwm_fan(NPCM8xxState *soc, SplitIRQ *splitter,
+ int fan_no, int output_no)
+{
+ DeviceState *fan;
+ int fan_input;
+ qemu_irq fan_duty_gpio;
+
+ g_assert(fan_no >= 0 && fan_no <= NPCM7XX_MFT_MAX_FAN_INPUT);
+ /*
+ * Fan 0~1 belong to module 0 input 0~1.
+ * Fan 2~3 belong to module 1 input 0~1.
+ * ...
+ * Fan 14~15 belong to module 7 input 0~1.
+ * Fan 16~17 belong to module 0 input 2~3.
+ * Fan 18~19 belong to module 1 input 2~3.
+ */
+ if (fan_no < 16) {
+ fan = DEVICE(&soc->mft[fan_no / 2]);
+ fan_input = fan_no % 2;
+ } else {
+ fan = DEVICE(&soc->mft[(fan_no - 16) / 2]);
+ fan_input = fan_no % 2 + 2;
+ }
+
+ /* Connect the Fan to PWM module */
+ fan_duty_gpio = qdev_get_gpio_in_named(fan, "duty", fan_input);
+ qdev_connect_gpio_out(DEVICE(splitter), output_no, fan_duty_gpio);
+}
+
+static void npcm845_evb_i2c_init(NPCM8xxState *soc)
+{
+ /* tmp100 temperature sensor on SVB, tmp105 is compatible */
+ i2c_slave_create_simple(npcm8xx_i2c_get_bus(soc, 6), "tmp105", 0x48);
+}
+
+static void npcm845_evb_fan_init(NPCM8xxMachine *machine, NPCM8xxState *soc)
+{
+ SplitIRQ *splitter = machine->fan_splitter;
+ static const int fan_counts[] = {2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0};
+
+ npcm8xx_init_pwm_splitter(machine, soc, fan_counts);
+ npcm8xx_connect_pwm_fan(soc, &splitter[0], 0x00, 0);
+ npcm8xx_connect_pwm_fan(soc, &splitter[0], 0x01, 1);
+ npcm8xx_connect_pwm_fan(soc, &splitter[1], 0x02, 0);
+ npcm8xx_connect_pwm_fan(soc, &splitter[1], 0x03, 1);
+ npcm8xx_connect_pwm_fan(soc, &splitter[2], 0x04, 0);
+ npcm8xx_connect_pwm_fan(soc, &splitter[2], 0x05, 1);
+ npcm8xx_connect_pwm_fan(soc, &splitter[3], 0x06, 0);
+ npcm8xx_connect_pwm_fan(soc, &splitter[3], 0x07, 1);
+ npcm8xx_connect_pwm_fan(soc, &splitter[4], 0x08, 0);
+ npcm8xx_connect_pwm_fan(soc, &splitter[4], 0x09, 1);
+ npcm8xx_connect_pwm_fan(soc, &splitter[5], 0x0a, 0);
+ npcm8xx_connect_pwm_fan(soc, &splitter[5], 0x0b, 1);
+ npcm8xx_connect_pwm_fan(soc, &splitter[6], 0x0c, 0);
+ npcm8xx_connect_pwm_fan(soc, &splitter[6], 0x0d, 1);
+ npcm8xx_connect_pwm_fan(soc, &splitter[7], 0x0e, 0);
+ npcm8xx_connect_pwm_fan(soc, &splitter[7], 0x0f, 1);
+}
+
+static void npcm845_evb_init(MachineState *machine)
+{
+ NPCM8xxState *soc;
+
+ soc = npcm8xx_create_soc(machine, NPCM845_EVB_POWER_ON_STRAPS);
+ npcm8xx_connect_dram(soc, machine->ram);
+ qdev_realize(DEVICE(soc), NULL, &error_fatal);
+
+ npcm8xx_load_bootrom(machine, soc);
+ npcm8xx_connect_flash(&soc->fiu[0], 0, "w25q256", drive_get(IF_MTD, 0, 0));
+ npcm845_evb_i2c_init(soc);
+ npcm845_evb_fan_init(NPCM8XX_MACHINE(machine), soc);
+ npcm8xx_load_kernel(machine, soc);
+}
+
+static void npcm8xx_set_soc_type(NPCM8xxMachineClass *nmc, const char *type)
+{
+ NPCM8xxClass *sc = NPCM8XX_CLASS(object_class_by_name(type));
+ MachineClass *mc = MACHINE_CLASS(nmc);
+
+ nmc->soc_type = type;
+ mc->default_cpus = mc->min_cpus = mc->max_cpus = sc->num_cpus;
+}
+
+static void npcm8xx_machine_class_init(ObjectClass *oc, const void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+ static const char * const valid_cpu_types[] = {
+ ARM_CPU_TYPE_NAME("cortex-a35"),
+ NULL
+ };
+
+ mc->no_floppy = 1;
+ mc->no_cdrom = 1;
+ mc->no_parallel = 1;
+ mc->default_ram_id = "ram";
+ mc->valid_cpu_types = valid_cpu_types;
+}
+
+static void npcm845_evb_machine_class_init(ObjectClass *oc, const void *data)
+{
+ NPCM8xxMachineClass *nmc = NPCM8XX_MACHINE_CLASS(oc);
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ npcm8xx_set_soc_type(nmc, TYPE_NPCM8XX);
+
+ mc->desc = "Nuvoton NPCM845 Evaluation Board (Cortex-A35)";
+ mc->init = npcm845_evb_init;
+ mc->default_ram_size = 1 * GiB;
+};
+
+static const TypeInfo npcm8xx_machine_types[] = {
+ {
+ .name = TYPE_NPCM8XX_MACHINE,
+ .parent = TYPE_MACHINE,
+ .instance_size = sizeof(NPCM8xxMachine),
+ .class_size = sizeof(NPCM8xxMachineClass),
+ .class_init = npcm8xx_machine_class_init,
+ .abstract = true,
+ }, {
+ .name = MACHINE_TYPE_NAME("npcm845-evb"),
+ .parent = TYPE_NPCM8XX_MACHINE,
+ .class_init = npcm845_evb_machine_class_init,
+ },
+};
+
+DEFINE_TYPES(npcm8xx_machine_types)
diff --git a/hw/arm/nrf51_soc.c b/hw/arm/nrf51_soc.c
index ac53441..d8cc321 100644
--- a/hw/arm/nrf51_soc.c
+++ b/hw/arm/nrf51_soc.c
@@ -76,16 +76,16 @@ static void nrf51_soc_realize(DeviceState *dev_soc, Error **errp)
}
/* This clock doesn't need migration because it is fixed-frequency */
clock_set_hz(s->sysclk, HCLK_FRQ);
- qdev_connect_clock_in(DEVICE(&s->cpu), "cpuclk", s->sysclk);
+ qdev_connect_clock_in(DEVICE(&s->armv7m), "cpuclk", s->sysclk);
/*
* This SoC has no systick device, so don't connect refclk.
* TODO: model the lack of systick (currently the armv7m object
* will always provide one).
*/
- object_property_set_link(OBJECT(&s->cpu), "memory", OBJECT(&s->container),
+ object_property_set_link(OBJECT(&s->armv7m), "memory", OBJECT(&s->container),
&error_abort);
- if (!sysbus_realize(SYS_BUS_DEVICE(&s->cpu), errp)) {
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) {
return;
}
@@ -104,7 +104,7 @@ static void nrf51_soc_realize(DeviceState *dev_soc, Error **errp)
mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->uart), 0);
memory_region_add_subregion_overlap(&s->container, NRF51_UART_BASE, mr, 0);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart), 0,
- qdev_get_gpio_in(DEVICE(&s->cpu),
+ qdev_get_gpio_in(DEVICE(&s->armv7m),
BASE_TO_IRQ(NRF51_UART_BASE)));
/* RNG */
@@ -115,7 +115,7 @@ static void nrf51_soc_realize(DeviceState *dev_soc, Error **errp)
mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->rng), 0);
memory_region_add_subregion_overlap(&s->container, NRF51_RNG_BASE, mr, 0);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->rng), 0,
- qdev_get_gpio_in(DEVICE(&s->cpu),
+ qdev_get_gpio_in(DEVICE(&s->armv7m),
BASE_TO_IRQ(NRF51_RNG_BASE)));
/* UICR, FICR, NVMC, FLASH */
@@ -161,7 +161,7 @@ static void nrf51_soc_realize(DeviceState *dev_soc, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->timer[i]), 0, base_addr);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->timer[i]), 0,
- qdev_get_gpio_in(DEVICE(&s->cpu),
+ qdev_get_gpio_in(DEVICE(&s->armv7m),
BASE_TO_IRQ(base_addr)));
}
@@ -185,10 +185,10 @@ static void nrf51_soc_init(Object *obj)
memory_region_init(&s->container, obj, "nrf51-container", UINT64_MAX);
- object_initialize_child(OBJECT(s), "armv6m", &s->cpu, TYPE_ARMV7M);
- qdev_prop_set_string(DEVICE(&s->cpu), "cpu-type",
+ object_initialize_child(OBJECT(s), "armv6m", &s->armv7m, TYPE_ARMV7M);
+ qdev_prop_set_string(DEVICE(&s->armv7m), "cpu-type",
ARM_CPU_TYPE_NAME("cortex-m0"));
- qdev_prop_set_uint32(DEVICE(&s->cpu), "num-irq", 32);
+ qdev_prop_set_uint32(DEVICE(&s->armv7m), "num-irq", 32);
object_initialize_child(obj, "uart", &s->uart, TYPE_NRF51_UART);
object_property_add_alias(obj, "serial0", OBJECT(&s->uart), "chardev");
@@ -208,16 +208,15 @@ static void nrf51_soc_init(Object *obj)
s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0);
}
-static Property nrf51_soc_properties[] = {
+static const Property nrf51_soc_properties[] = {
DEFINE_PROP_LINK("memory", NRF51State, board_memory, TYPE_MEMORY_REGION,
MemoryRegion *),
DEFINE_PROP_UINT32("sram-size", NRF51State, sram_size, NRF51822_SRAM_SIZE),
DEFINE_PROP_UINT32("flash-size", NRF51State, flash_size,
NRF51822_FLASH_SIZE),
- DEFINE_PROP_END_OF_LIST(),
};
-static void nrf51_soc_class_init(ObjectClass *klass, void *data)
+static void nrf51_soc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c
deleted file mode 100644
index 3536431..0000000
--- a/hw/arm/nseries.c
+++ /dev/null
@@ -1,1473 +0,0 @@
-/*
- * Nokia N-series internet tablets.
- *
- * Copyright (C) 2007 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "cpu.h"
-#include "chardev/char.h"
-#include "qemu/cutils.h"
-#include "qemu/bswap.h"
-#include "qemu/hw-version.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
-#include "hw/arm/omap.h"
-#include "hw/arm/boot.h"
-#include "hw/irq.h"
-#include "ui/console.h"
-#include "hw/boards.h"
-#include "hw/i2c/i2c.h"
-#include "hw/display/blizzard.h"
-#include "hw/input/lm832x.h"
-#include "hw/input/tsc2xxx.h"
-#include "hw/misc/cbus.h"
-#include "hw/sensor/tmp105.h"
-#include "hw/qdev-properties.h"
-#include "hw/block/flash.h"
-#include "hw/hw.h"
-#include "hw/loader.h"
-#include "hw/sysbus.h"
-#include "qemu/log.h"
-#include "qemu/error-report.h"
-
-
-/* Nokia N8x0 support */
-struct n800_s {
- struct omap_mpu_state_s *mpu;
-
- struct rfbi_chip_s blizzard;
- struct {
- void *opaque;
- uint32_t (*txrx)(void *opaque, uint32_t value, int len);
- uWireSlave *chip;
- } ts;
-
- int keymap[0x80];
- DeviceState *kbd;
-
- DeviceState *usb;
- void *retu;
- void *tahvo;
- DeviceState *nand;
-};
-
-/* GPIO pins */
-#define N8X0_TUSB_ENABLE_GPIO 0
-#define N800_MMC2_WP_GPIO 8
-#define N800_UNKNOWN_GPIO0 9 /* out */
-#define N810_MMC2_VIOSD_GPIO 9
-#define N810_HEADSET_AMP_GPIO 10
-#define N800_CAM_TURN_GPIO 12
-#define N810_GPS_RESET_GPIO 12
-#define N800_BLIZZARD_POWERDOWN_GPIO 15
-#define N800_MMC1_WP_GPIO 23
-#define N810_MMC2_VSD_GPIO 23
-#define N8X0_ONENAND_GPIO 26
-#define N810_BLIZZARD_RESET_GPIO 30
-#define N800_UNKNOWN_GPIO2 53 /* out */
-#define N8X0_TUSB_INT_GPIO 58
-#define N8X0_BT_WKUP_GPIO 61
-#define N8X0_STI_GPIO 62
-#define N8X0_CBUS_SEL_GPIO 64
-#define N8X0_CBUS_DAT_GPIO 65
-#define N8X0_CBUS_CLK_GPIO 66
-#define N8X0_WLAN_IRQ_GPIO 87
-#define N8X0_BT_RESET_GPIO 92
-#define N8X0_TEA5761_CS_GPIO 93
-#define N800_UNKNOWN_GPIO 94
-#define N810_TSC_RESET_GPIO 94
-#define N800_CAM_ACT_GPIO 95
-#define N810_GPS_WAKEUP_GPIO 95
-#define N8X0_MMC_CS_GPIO 96
-#define N8X0_WLAN_PWR_GPIO 97
-#define N8X0_BT_HOST_WKUP_GPIO 98
-#define N810_SPEAKER_AMP_GPIO 101
-#define N810_KB_LOCK_GPIO 102
-#define N800_TSC_TS_GPIO 103
-#define N810_TSC_TS_GPIO 106
-#define N8X0_HEADPHONE_GPIO 107
-#define N8X0_RETU_GPIO 108
-#define N800_TSC_KP_IRQ_GPIO 109
-#define N810_KEYBOARD_GPIO 109
-#define N800_BAT_COVER_GPIO 110
-#define N810_SLIDE_GPIO 110
-#define N8X0_TAHVO_GPIO 111
-#define N800_UNKNOWN_GPIO4 112 /* out */
-#define N810_SLEEPX_LED_GPIO 112
-#define N800_TSC_RESET_GPIO 118 /* ? */
-#define N810_AIC33_RESET_GPIO 118
-#define N800_TSC_UNKNOWN_GPIO 119 /* out */
-#define N8X0_TMP105_GPIO 125
-
-/* Config */
-#define BT_UART 0
-#define XLDR_LL_UART 1
-
-/* Addresses on the I2C bus 0 */
-#define N810_TLV320AIC33_ADDR 0x18 /* Audio CODEC */
-#define N8X0_TCM825x_ADDR 0x29 /* Camera */
-#define N810_LP5521_ADDR 0x32 /* LEDs */
-#define N810_TSL2563_ADDR 0x3d /* Light sensor */
-#define N810_LM8323_ADDR 0x45 /* Keyboard */
-/* Addresses on the I2C bus 1 */
-#define N8X0_TMP105_ADDR 0x48 /* Temperature sensor */
-#define N8X0_MENELAUS_ADDR 0x72 /* Power management */
-
-/* Chipselects on GPMC NOR interface */
-#define N8X0_ONENAND_CS 0
-#define N8X0_USB_ASYNC_CS 1
-#define N8X0_USB_SYNC_CS 4
-
-#define N8X0_BD_ADDR 0x00, 0x1a, 0x89, 0x9e, 0x3e, 0x81
-
-static void n800_mmc_cs_cb(void *opaque, int line, int level)
-{
- /* TODO: this seems to actually be connected to the menelaus, to
- * which also both MMC slots connect. */
- omap_mmc_enable((struct omap_mmc_s *) opaque, !level);
-}
-
-static void n8x0_gpio_setup(struct n800_s *s)
-{
- qdev_connect_gpio_out(s->mpu->gpio, N8X0_MMC_CS_GPIO,
- qemu_allocate_irq(n800_mmc_cs_cb, s->mpu->mmc, 0));
- qemu_irq_lower(qdev_get_gpio_in(s->mpu->gpio, N800_BAT_COVER_GPIO));
-}
-
-#define MAEMO_CAL_HEADER(...) \
- 'C', 'o', 'n', 'F', 0x02, 0x00, 0x04, 0x00, \
- __VA_ARGS__, \
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-
-static const uint8_t n8x0_cal_wlan_mac[] = {
- MAEMO_CAL_HEADER('w', 'l', 'a', 'n', '-', 'm', 'a', 'c')
- 0x1c, 0x00, 0x00, 0x00, 0x47, 0xd6, 0x69, 0xb3,
- 0x30, 0x08, 0xa0, 0x83, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00,
- 0x89, 0x00, 0x00, 0x00, 0x9e, 0x00, 0x00, 0x00,
- 0x5d, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00,
-};
-
-static const uint8_t n8x0_cal_bt_id[] = {
- MAEMO_CAL_HEADER('b', 't', '-', 'i', 'd', 0, 0, 0)
- 0x0a, 0x00, 0x00, 0x00, 0xa3, 0x4b, 0xf6, 0x96,
- 0xa8, 0xeb, 0xb2, 0x41, 0x00, 0x00, 0x00, 0x00,
- N8X0_BD_ADDR,
-};
-
-static void n8x0_nand_setup(struct n800_s *s)
-{
- char *otp_region;
- DriveInfo *dinfo;
-
- s->nand = qdev_new("onenand");
- qdev_prop_set_uint16(s->nand, "manufacturer_id", NAND_MFR_SAMSUNG);
- /* Either 0x40 or 0x48 are OK for the device ID */
- qdev_prop_set_uint16(s->nand, "device_id", 0x48);
- qdev_prop_set_uint16(s->nand, "version_id", 0);
- qdev_prop_set_int32(s->nand, "shift", 1);
- dinfo = drive_get(IF_MTD, 0, 0);
- if (dinfo) {
- qdev_prop_set_drive_err(s->nand, "drive", blk_by_legacy_dinfo(dinfo),
- &error_fatal);
- }
- sysbus_realize_and_unref(SYS_BUS_DEVICE(s->nand), &error_fatal);
- sysbus_connect_irq(SYS_BUS_DEVICE(s->nand), 0,
- qdev_get_gpio_in(s->mpu->gpio, N8X0_ONENAND_GPIO));
- omap_gpmc_attach(s->mpu->gpmc, N8X0_ONENAND_CS,
- sysbus_mmio_get_region(SYS_BUS_DEVICE(s->nand), 0));
- otp_region = onenand_raw_otp(s->nand);
-
- memcpy(otp_region + 0x000, n8x0_cal_wlan_mac, sizeof(n8x0_cal_wlan_mac));
- memcpy(otp_region + 0x800, n8x0_cal_bt_id, sizeof(n8x0_cal_bt_id));
- /* XXX: in theory should also update the OOB for both pages */
-}
-
-static qemu_irq n8x0_system_powerdown;
-
-static void n8x0_powerdown_req(Notifier *n, void *opaque)
-{
- qemu_irq_raise(n8x0_system_powerdown);
-}
-
-static Notifier n8x0_system_powerdown_notifier = {
- .notify = n8x0_powerdown_req
-};
-
-static void n8x0_i2c_setup(struct n800_s *s)
-{
- DeviceState *dev;
- qemu_irq tmp_irq = qdev_get_gpio_in(s->mpu->gpio, N8X0_TMP105_GPIO);
- I2CBus *i2c = omap_i2c_bus(s->mpu->i2c[0]);
-
- /* Attach a menelaus PM chip */
- dev = DEVICE(i2c_slave_create_simple(i2c, "twl92230", N8X0_MENELAUS_ADDR));
- qdev_connect_gpio_out(dev, 3,
- qdev_get_gpio_in(s->mpu->ih[0],
- OMAP_INT_24XX_SYS_NIRQ));
-
- n8x0_system_powerdown = qdev_get_gpio_in(dev, 3);
- qemu_register_powerdown_notifier(&n8x0_system_powerdown_notifier);
-
- /* Attach a TMP105 PM chip (A0 wired to ground) */
- dev = DEVICE(i2c_slave_create_simple(i2c, TYPE_TMP105, N8X0_TMP105_ADDR));
- qdev_connect_gpio_out(dev, 0, tmp_irq);
-}
-
-/* Touchscreen and keypad controller */
-static const MouseTransformInfo n800_pointercal = {
- .x = 800,
- .y = 480,
- .a = { 14560, -68, -3455208, -39, -9621, 35152972, 65536 },
-};
-
-static const MouseTransformInfo n810_pointercal = {
- .x = 800,
- .y = 480,
- .a = { 15041, 148, -4731056, 171, -10238, 35933380, 65536 },
-};
-
-#define RETU_KEYCODE 61 /* F3 */
-
-static void n800_key_event(void *opaque, int keycode)
-{
- struct n800_s *s = (struct n800_s *) opaque;
- int code = s->keymap[keycode & 0x7f];
-
- if (code == -1) {
- if ((keycode & 0x7f) == RETU_KEYCODE) {
- retu_key_event(s->retu, !(keycode & 0x80));
- }
- return;
- }
-
- tsc210x_key_event(s->ts.chip, code, !(keycode & 0x80));
-}
-
-static const int n800_keys[16] = {
- -1,
- 72, /* Up */
- 63, /* Home (F5) */
- -1,
- 75, /* Left */
- 28, /* Enter */
- 77, /* Right */
- -1,
- 1, /* Cycle (ESC) */
- 80, /* Down */
- 62, /* Menu (F4) */
- -1,
- 66, /* Zoom- (F8) */
- 64, /* FullScreen (F6) */
- 65, /* Zoom+ (F7) */
- -1,
-};
-
-static void n800_tsc_kbd_setup(struct n800_s *s)
-{
- int i;
-
- /* XXX: are the three pins inverted inside the chip between the
- * tsc and the cpu (N4111)? */
- qemu_irq penirq = NULL; /* NC */
- qemu_irq kbirq = qdev_get_gpio_in(s->mpu->gpio, N800_TSC_KP_IRQ_GPIO);
- qemu_irq dav = qdev_get_gpio_in(s->mpu->gpio, N800_TSC_TS_GPIO);
-
- s->ts.chip = tsc2301_init(penirq, kbirq, dav);
- s->ts.opaque = s->ts.chip->opaque;
- s->ts.txrx = tsc210x_txrx;
-
- for (i = 0; i < 0x80; i++) {
- s->keymap[i] = -1;
- }
- for (i = 0; i < 0x10; i++) {
- if (n800_keys[i] >= 0) {
- s->keymap[n800_keys[i]] = i;
- }
- }
-
- qemu_add_kbd_event_handler(n800_key_event, s);
-
- tsc210x_set_transform(s->ts.chip, &n800_pointercal);
-}
-
-static void n810_tsc_setup(struct n800_s *s)
-{
- qemu_irq pintdav = qdev_get_gpio_in(s->mpu->gpio, N810_TSC_TS_GPIO);
-
- s->ts.opaque = tsc2005_init(pintdav);
- s->ts.txrx = tsc2005_txrx;
-
- tsc2005_set_transform(s->ts.opaque, &n810_pointercal);
-}
-
-/* N810 Keyboard controller */
-static void n810_key_event(void *opaque, int keycode)
-{
- struct n800_s *s = (struct n800_s *) opaque;
- int code = s->keymap[keycode & 0x7f];
-
- if (code == -1) {
- if ((keycode & 0x7f) == RETU_KEYCODE) {
- retu_key_event(s->retu, !(keycode & 0x80));
- }
- return;
- }
-
- lm832x_key_event(s->kbd, code, !(keycode & 0x80));
-}
-
-#define M 0
-
-static const int n810_keys[0x80] = {
- [0x01] = 16, /* Q */
- [0x02] = 37, /* K */
- [0x03] = 24, /* O */
- [0x04] = 25, /* P */
- [0x05] = 14, /* Backspace */
- [0x06] = 30, /* A */
- [0x07] = 31, /* S */
- [0x08] = 32, /* D */
- [0x09] = 33, /* F */
- [0x0a] = 34, /* G */
- [0x0b] = 35, /* H */
- [0x0c] = 36, /* J */
-
- [0x11] = 17, /* W */
- [0x12] = 62, /* Menu (F4) */
- [0x13] = 38, /* L */
- [0x14] = 40, /* ' (Apostrophe) */
- [0x16] = 44, /* Z */
- [0x17] = 45, /* X */
- [0x18] = 46, /* C */
- [0x19] = 47, /* V */
- [0x1a] = 48, /* B */
- [0x1b] = 49, /* N */
- [0x1c] = 42, /* Shift (Left shift) */
- [0x1f] = 65, /* Zoom+ (F7) */
-
- [0x21] = 18, /* E */
- [0x22] = 39, /* ; (Semicolon) */
- [0x23] = 12, /* - (Minus) */
- [0x24] = 13, /* = (Equal) */
- [0x2b] = 56, /* Fn (Left Alt) */
- [0x2c] = 50, /* M */
- [0x2f] = 66, /* Zoom- (F8) */
-
- [0x31] = 19, /* R */
- [0x32] = 29 | M, /* Right Ctrl */
- [0x34] = 57, /* Space */
- [0x35] = 51, /* , (Comma) */
- [0x37] = 72 | M, /* Up */
- [0x3c] = 82 | M, /* Compose (Insert) */
- [0x3f] = 64, /* FullScreen (F6) */
-
- [0x41] = 20, /* T */
- [0x44] = 52, /* . (Dot) */
- [0x46] = 77 | M, /* Right */
- [0x4f] = 63, /* Home (F5) */
- [0x51] = 21, /* Y */
- [0x53] = 80 | M, /* Down */
- [0x55] = 28, /* Enter */
- [0x5f] = 1, /* Cycle (ESC) */
-
- [0x61] = 22, /* U */
- [0x64] = 75 | M, /* Left */
-
- [0x71] = 23, /* I */
-#if 0
- [0x75] = 28 | M, /* KP Enter (KP Enter) */
-#else
- [0x75] = 15, /* KP Enter (Tab) */
-#endif
-};
-
-#undef M
-
-static void n810_kbd_setup(struct n800_s *s)
-{
- qemu_irq kbd_irq = qdev_get_gpio_in(s->mpu->gpio, N810_KEYBOARD_GPIO);
- int i;
-
- for (i = 0; i < 0x80; i++) {
- s->keymap[i] = -1;
- }
- for (i = 0; i < 0x80; i++) {
- if (n810_keys[i] > 0) {
- s->keymap[n810_keys[i]] = i;
- }
- }
-
- qemu_add_kbd_event_handler(n810_key_event, s);
-
- /* Attach the LM8322 keyboard to the I2C bus,
- * should happen in n8x0_i2c_setup and s->kbd be initialised here. */
- s->kbd = DEVICE(i2c_slave_create_simple(omap_i2c_bus(s->mpu->i2c[0]),
- TYPE_LM8323, N810_LM8323_ADDR));
- qdev_connect_gpio_out(s->kbd, 0, kbd_irq);
-}
-
-/* LCD MIPI DBI-C controller (URAL) */
-struct mipid_s {
- int resp[4];
- int param[4];
- int p;
- int pm;
- int cmd;
-
- int sleep;
- int booster;
- int te;
- int selfcheck;
- int partial;
- int normal;
- int vscr;
- int invert;
- int onoff;
- int gamma;
- uint32_t id;
-};
-
-static void mipid_reset(struct mipid_s *s)
-{
- s->pm = 0;
- s->cmd = 0;
-
- s->sleep = 1;
- s->booster = 0;
- s->selfcheck =
- (1 << 7) | /* Register loading OK. */
- (1 << 5) | /* The chip is attached. */
- (1 << 4); /* Display glass still in one piece. */
- s->te = 0;
- s->partial = 0;
- s->normal = 1;
- s->vscr = 0;
- s->invert = 0;
- s->onoff = 1;
- s->gamma = 0;
-}
-
-static uint32_t mipid_txrx(void *opaque, uint32_t cmd, int len)
-{
- struct mipid_s *s = (struct mipid_s *) opaque;
- uint8_t ret;
-
- if (len > 9) {
- hw_error("%s: FIXME: bad SPI word width %i\n", __func__, len);
- }
-
- if (s->p >= ARRAY_SIZE(s->resp)) {
- ret = 0;
- } else {
- ret = s->resp[s->p++];
- }
- if (s->pm-- > 0) {
- s->param[s->pm] = cmd;
- } else {
- s->cmd = cmd;
- }
-
- switch (s->cmd) {
- case 0x00: /* NOP */
- break;
-
- case 0x01: /* SWRESET */
- mipid_reset(s);
- break;
-
- case 0x02: /* BSTROFF */
- s->booster = 0;
- break;
- case 0x03: /* BSTRON */
- s->booster = 1;
- break;
-
- case 0x04: /* RDDID */
- s->p = 0;
- s->resp[0] = (s->id >> 16) & 0xff;
- s->resp[1] = (s->id >> 8) & 0xff;
- s->resp[2] = (s->id >> 0) & 0xff;
- break;
-
- case 0x06: /* RD_RED */
- case 0x07: /* RD_GREEN */
- /* XXX the bootloader sometimes issues RD_BLUE meaning RDDID so
- * for the bootloader one needs to change this. */
- case 0x08: /* RD_BLUE */
- s->p = 0;
- /* TODO: return first pixel components */
- s->resp[0] = 0x01;
- break;
-
- case 0x09: /* RDDST */
- s->p = 0;
- s->resp[0] = s->booster << 7;
- s->resp[1] = (5 << 4) | (s->partial << 2) |
- (s->sleep << 1) | s->normal;
- s->resp[2] = (s->vscr << 7) | (s->invert << 5) |
- (s->onoff << 2) | (s->te << 1) | (s->gamma >> 2);
- s->resp[3] = s->gamma << 6;
- break;
-
- case 0x0a: /* RDDPM */
- s->p = 0;
- s->resp[0] = (s->onoff << 2) | (s->normal << 3) | (s->sleep << 4) |
- (s->partial << 5) | (s->sleep << 6) | (s->booster << 7);
- break;
- case 0x0b: /* RDDMADCTR */
- s->p = 0;
- s->resp[0] = 0;
- break;
- case 0x0c: /* RDDCOLMOD */
- s->p = 0;
- s->resp[0] = 5; /* 65K colours */
- break;
- case 0x0d: /* RDDIM */
- s->p = 0;
- s->resp[0] = (s->invert << 5) | (s->vscr << 7) | s->gamma;
- break;
- case 0x0e: /* RDDSM */
- s->p = 0;
- s->resp[0] = s->te << 7;
- break;
- case 0x0f: /* RDDSDR */
- s->p = 0;
- s->resp[0] = s->selfcheck;
- break;
-
- case 0x10: /* SLPIN */
- s->sleep = 1;
- break;
- case 0x11: /* SLPOUT */
- s->sleep = 0;
- s->selfcheck ^= 1 << 6; /* POFF self-diagnosis Ok */
- break;
-
- case 0x12: /* PTLON */
- s->partial = 1;
- s->normal = 0;
- s->vscr = 0;
- break;
- case 0x13: /* NORON */
- s->partial = 0;
- s->normal = 1;
- s->vscr = 0;
- break;
-
- case 0x20: /* INVOFF */
- s->invert = 0;
- break;
- case 0x21: /* INVON */
- s->invert = 1;
- break;
-
- case 0x22: /* APOFF */
- case 0x23: /* APON */
- goto bad_cmd;
-
- case 0x25: /* WRCNTR */
- if (s->pm < 0) {
- s->pm = 1;
- }
- goto bad_cmd;
-
- case 0x26: /* GAMSET */
- if (!s->pm) {
- s->gamma = ctz32(s->param[0] & 0xf);
- if (s->gamma == 32) {
- s->gamma = -1; /* XXX: should this be 0? */
- }
- } else if (s->pm < 0) {
- s->pm = 1;
- }
- break;
-
- case 0x28: /* DISPOFF */
- s->onoff = 0;
- break;
- case 0x29: /* DISPON */
- s->onoff = 1;
- break;
-
- case 0x2a: /* CASET */
- case 0x2b: /* RASET */
- case 0x2c: /* RAMWR */
- case 0x2d: /* RGBSET */
- case 0x2e: /* RAMRD */
- case 0x30: /* PTLAR */
- case 0x33: /* SCRLAR */
- goto bad_cmd;
-
- case 0x34: /* TEOFF */
- s->te = 0;
- break;
- case 0x35: /* TEON */
- if (!s->pm) {
- s->te = 1;
- } else if (s->pm < 0) {
- s->pm = 1;
- }
- break;
-
- case 0x36: /* MADCTR */
- goto bad_cmd;
-
- case 0x37: /* VSCSAD */
- s->partial = 0;
- s->normal = 0;
- s->vscr = 1;
- break;
-
- case 0x38: /* IDMOFF */
- case 0x39: /* IDMON */
- case 0x3a: /* COLMOD */
- goto bad_cmd;
-
- case 0xb0: /* CLKINT / DISCTL */
- case 0xb1: /* CLKEXT */
- if (s->pm < 0) {
- s->pm = 2;
- }
- break;
-
- case 0xb4: /* FRMSEL */
- break;
-
- case 0xb5: /* FRM8SEL */
- case 0xb6: /* TMPRNG / INIESC */
- case 0xb7: /* TMPHIS / NOP2 */
- case 0xb8: /* TMPREAD / MADCTL */
- case 0xba: /* DISTCTR */
- case 0xbb: /* EPVOL */
- goto bad_cmd;
-
- case 0xbd: /* Unknown */
- s->p = 0;
- s->resp[0] = 0;
- s->resp[1] = 1;
- break;
-
- case 0xc2: /* IFMOD */
- if (s->pm < 0) {
- s->pm = 2;
- }
- break;
-
- case 0xc6: /* PWRCTL */
- case 0xc7: /* PPWRCTL */
- case 0xd0: /* EPWROUT */
- case 0xd1: /* EPWRIN */
- case 0xd4: /* RDEV */
- case 0xd5: /* RDRR */
- goto bad_cmd;
-
- case 0xda: /* RDID1 */
- s->p = 0;
- s->resp[0] = (s->id >> 16) & 0xff;
- break;
- case 0xdb: /* RDID2 */
- s->p = 0;
- s->resp[0] = (s->id >> 8) & 0xff;
- break;
- case 0xdc: /* RDID3 */
- s->p = 0;
- s->resp[0] = (s->id >> 0) & 0xff;
- break;
-
- default:
- bad_cmd:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: unknown command 0x%02x\n", __func__, s->cmd);
- break;
- }
-
- return ret;
-}
-
-static void *mipid_init(void)
-{
- struct mipid_s *s = g_malloc0(sizeof(*s));
-
- s->id = 0x838f03;
- mipid_reset(s);
-
- return s;
-}
-
-static void n8x0_spi_setup(struct n800_s *s)
-{
- void *tsc = s->ts.opaque;
- void *mipid = mipid_init();
-
- omap_mcspi_attach(s->mpu->mcspi[0], s->ts.txrx, tsc, 0);
- omap_mcspi_attach(s->mpu->mcspi[0], mipid_txrx, mipid, 1);
-}
-
-/* This task is normally performed by the bootloader. If we're loading
- * a kernel directly, we need to enable the Blizzard ourselves. */
-static void n800_dss_init(struct rfbi_chip_s *chip)
-{
- uint8_t *fb_blank;
-
- chip->write(chip->opaque, 0, 0x2a); /* LCD Width register */
- chip->write(chip->opaque, 1, 0x64);
- chip->write(chip->opaque, 0, 0x2c); /* LCD HNDP register */
- chip->write(chip->opaque, 1, 0x1e);
- chip->write(chip->opaque, 0, 0x2e); /* LCD Height 0 register */
- chip->write(chip->opaque, 1, 0xe0);
- chip->write(chip->opaque, 0, 0x30); /* LCD Height 1 register */
- chip->write(chip->opaque, 1, 0x01);
- chip->write(chip->opaque, 0, 0x32); /* LCD VNDP register */
- chip->write(chip->opaque, 1, 0x06);
- chip->write(chip->opaque, 0, 0x68); /* Display Mode register */
- chip->write(chip->opaque, 1, 1); /* Enable bit */
-
- chip->write(chip->opaque, 0, 0x6c);
- chip->write(chip->opaque, 1, 0x00); /* Input X Start Position */
- chip->write(chip->opaque, 1, 0x00); /* Input X Start Position */
- chip->write(chip->opaque, 1, 0x00); /* Input Y Start Position */
- chip->write(chip->opaque, 1, 0x00); /* Input Y Start Position */
- chip->write(chip->opaque, 1, 0x1f); /* Input X End Position */
- chip->write(chip->opaque, 1, 0x03); /* Input X End Position */
- chip->write(chip->opaque, 1, 0xdf); /* Input Y End Position */
- chip->write(chip->opaque, 1, 0x01); /* Input Y End Position */
- chip->write(chip->opaque, 1, 0x00); /* Output X Start Position */
- chip->write(chip->opaque, 1, 0x00); /* Output X Start Position */
- chip->write(chip->opaque, 1, 0x00); /* Output Y Start Position */
- chip->write(chip->opaque, 1, 0x00); /* Output Y Start Position */
- chip->write(chip->opaque, 1, 0x1f); /* Output X End Position */
- chip->write(chip->opaque, 1, 0x03); /* Output X End Position */
- chip->write(chip->opaque, 1, 0xdf); /* Output Y End Position */
- chip->write(chip->opaque, 1, 0x01); /* Output Y End Position */
- chip->write(chip->opaque, 1, 0x01); /* Input Data Format */
- chip->write(chip->opaque, 1, 0x01); /* Data Source Select */
-
- fb_blank = memset(g_malloc(800 * 480 * 2), 0xff, 800 * 480 * 2);
- /* Display Memory Data Port */
- chip->block(chip->opaque, 1, fb_blank, 800 * 480 * 2, 800);
- g_free(fb_blank);
-}
-
-static void n8x0_dss_setup(struct n800_s *s)
-{
- s->blizzard.opaque = s1d13745_init(NULL);
- s->blizzard.block = s1d13745_write_block;
- s->blizzard.write = s1d13745_write;
- s->blizzard.read = s1d13745_read;
-
- omap_rfbi_attach(s->mpu->dss, 0, &s->blizzard);
-}
-
-static void n8x0_cbus_setup(struct n800_s *s)
-{
- qemu_irq dat_out = qdev_get_gpio_in(s->mpu->gpio, N8X0_CBUS_DAT_GPIO);
- qemu_irq retu_irq = qdev_get_gpio_in(s->mpu->gpio, N8X0_RETU_GPIO);
- qemu_irq tahvo_irq = qdev_get_gpio_in(s->mpu->gpio, N8X0_TAHVO_GPIO);
-
- CBus *cbus = cbus_init(dat_out);
-
- qdev_connect_gpio_out(s->mpu->gpio, N8X0_CBUS_CLK_GPIO, cbus->clk);
- qdev_connect_gpio_out(s->mpu->gpio, N8X0_CBUS_DAT_GPIO, cbus->dat);
- qdev_connect_gpio_out(s->mpu->gpio, N8X0_CBUS_SEL_GPIO, cbus->sel);
-
- cbus_attach(cbus, s->retu = retu_init(retu_irq, 1));
- cbus_attach(cbus, s->tahvo = tahvo_init(tahvo_irq, 1));
-}
-
-static void n8x0_usb_setup(struct n800_s *s)
-{
- SysBusDevice *dev;
- s->usb = qdev_new("tusb6010");
- dev = SYS_BUS_DEVICE(s->usb);
- sysbus_realize_and_unref(dev, &error_fatal);
- sysbus_connect_irq(dev, 0,
- qdev_get_gpio_in(s->mpu->gpio, N8X0_TUSB_INT_GPIO));
- /* Using the NOR interface */
- omap_gpmc_attach(s->mpu->gpmc, N8X0_USB_ASYNC_CS,
- sysbus_mmio_get_region(dev, 0));
- omap_gpmc_attach(s->mpu->gpmc, N8X0_USB_SYNC_CS,
- sysbus_mmio_get_region(dev, 1));
- qdev_connect_gpio_out(s->mpu->gpio, N8X0_TUSB_ENABLE_GPIO,
- qdev_get_gpio_in(s->usb, 0)); /* tusb_pwr */
-}
-
-/* Setup done before the main bootloader starts by some early setup code
- * - used when we want to run the main bootloader in emulation. This
- * isn't documented. */
-static const uint32_t n800_pinout[104] = {
- 0x080f00d8, 0x00d40808, 0x03080808, 0x080800d0,
- 0x00dc0808, 0x0b0f0f00, 0x080800b4, 0x00c00808,
- 0x08080808, 0x180800c4, 0x00b80000, 0x08080808,
- 0x080800bc, 0x00cc0808, 0x08081818, 0x18180128,
- 0x01241800, 0x18181818, 0x000000f0, 0x01300000,
- 0x00001b0b, 0x1b0f0138, 0x00e0181b, 0x1b031b0b,
- 0x180f0078, 0x00740018, 0x0f0f0f1a, 0x00000080,
- 0x007c0000, 0x00000000, 0x00000088, 0x00840000,
- 0x00000000, 0x00000094, 0x00980300, 0x0f180003,
- 0x0000008c, 0x00900f0f, 0x0f0f1b00, 0x0f00009c,
- 0x01140000, 0x1b1b0f18, 0x0818013c, 0x01400008,
- 0x00001818, 0x000b0110, 0x010c1800, 0x0b030b0f,
- 0x181800f4, 0x00f81818, 0x00000018, 0x000000fc,
- 0x00401808, 0x00000000, 0x0f1b0030, 0x003c0008,
- 0x00000000, 0x00000038, 0x00340000, 0x00000000,
- 0x1a080070, 0x00641a1a, 0x08080808, 0x08080060,
- 0x005c0808, 0x08080808, 0x08080058, 0x00540808,
- 0x08080808, 0x0808006c, 0x00680808, 0x08080808,
- 0x000000a8, 0x00b00000, 0x08080808, 0x000000a0,
- 0x00a40000, 0x00000000, 0x08ff0050, 0x004c0808,
- 0xffffffff, 0xffff0048, 0x0044ffff, 0xffffffff,
- 0x000000ac, 0x01040800, 0x08080b0f, 0x18180100,
- 0x01081818, 0x0b0b1808, 0x1a0300e4, 0x012c0b1a,
- 0x02020018, 0x0b000134, 0x011c0800, 0x0b1b1b00,
- 0x0f0000c8, 0x00ec181b, 0x000f0f02, 0x00180118,
- 0x01200000, 0x0f0b1b1b, 0x0f0200e8, 0x0000020b,
-};
-
-static void n800_setup_nolo_tags(void *sram_base)
-{
- int i;
- uint32_t *p = sram_base + 0x8000;
- uint32_t *v = sram_base + 0xa000;
-
- memset(p, 0, 0x3000);
-
- strcpy((void *) (p + 0), "QEMU N800");
-
- strcpy((void *) (p + 8), "F5");
-
- stl_p(p + 10, 0x04f70000);
- strcpy((void *) (p + 9), "RX-34");
-
- /* RAM size in MB? */
- stl_p(p + 12, 0x80);
-
- /* Pointer to the list of tags */
- stl_p(p + 13, OMAP2_SRAM_BASE + 0x9000);
-
- /* The NOLO tags start here */
- p = sram_base + 0x9000;
-#define ADD_TAG(tag, len) \
- stw_p((uint16_t *) p + 0, tag); \
- stw_p((uint16_t *) p + 1, len); p++; \
- stl_p(p++, OMAP2_SRAM_BASE | (((void *) v - sram_base) & 0xffff));
-
- /* OMAP STI console? Pin out settings? */
- ADD_TAG(0x6e01, 414);
- for (i = 0; i < ARRAY_SIZE(n800_pinout); i++) {
- stl_p(v++, n800_pinout[i]);
- }
-
- /* Kernel memsize? */
- ADD_TAG(0x6e05, 1);
- stl_p(v++, 2);
-
- /* NOLO serial console */
- ADD_TAG(0x6e02, 4);
- stl_p(v++, XLDR_LL_UART); /* UART number (1 - 3) */
-
-#if 0
- /* CBUS settings (Retu/AVilma) */
- ADD_TAG(0x6e03, 6);
- stw_p((uint16_t *) v + 0, 65); /* CBUS GPIO0 */
- stw_p((uint16_t *) v + 1, 66); /* CBUS GPIO1 */
- stw_p((uint16_t *) v + 2, 64); /* CBUS GPIO2 */
- v += 2;
-#endif
-
- /* Nokia ASIC BB5 (Retu/Tahvo) */
- ADD_TAG(0x6e0a, 4);
- stw_p((uint16_t *) v + 0, 111); /* "Retu" interrupt GPIO */
- stw_p((uint16_t *) v + 1, 108); /* "Tahvo" interrupt GPIO */
- v++;
-
- /* LCD console? */
- ADD_TAG(0x6e04, 4);
- stw_p((uint16_t *) v + 0, 30); /* ??? */
- stw_p((uint16_t *) v + 1, 24); /* ??? */
- v++;
-
-#if 0
- /* LCD settings */
- ADD_TAG(0x6e06, 2);
- stw_p((uint16_t *) (v++), 15); /* ??? */
-#endif
-
- /* I^2C (Menelaus) */
- ADD_TAG(0x6e07, 4);
- stl_p(v++, 0x00720000); /* ??? */
-
- /* Unknown */
- ADD_TAG(0x6e0b, 6);
- stw_p((uint16_t *) v + 0, 94); /* ??? */
- stw_p((uint16_t *) v + 1, 23); /* ??? */
- stw_p((uint16_t *) v + 2, 0); /* ??? */
- v += 2;
-
- /* OMAP gpio switch info */
- ADD_TAG(0x6e0c, 80);
- strcpy((void *) v, "bat_cover"); v += 3;
- stw_p((uint16_t *) v + 0, 110); /* GPIO num ??? */
- stw_p((uint16_t *) v + 1, 1); /* GPIO num ??? */
- v += 2;
- strcpy((void *) v, "cam_act"); v += 3;
- stw_p((uint16_t *) v + 0, 95); /* GPIO num ??? */
- stw_p((uint16_t *) v + 1, 32); /* GPIO num ??? */
- v += 2;
- strcpy((void *) v, "cam_turn"); v += 3;
- stw_p((uint16_t *) v + 0, 12); /* GPIO num ??? */
- stw_p((uint16_t *) v + 1, 33); /* GPIO num ??? */
- v += 2;
- strcpy((void *) v, "headphone"); v += 3;
- stw_p((uint16_t *) v + 0, 107); /* GPIO num ??? */
- stw_p((uint16_t *) v + 1, 17); /* GPIO num ??? */
- v += 2;
-
- /* Bluetooth */
- ADD_TAG(0x6e0e, 12);
- stl_p(v++, 0x5c623d01); /* ??? */
- stl_p(v++, 0x00000201); /* ??? */
- stl_p(v++, 0x00000000); /* ??? */
-
- /* CX3110x WLAN settings */
- ADD_TAG(0x6e0f, 8);
- stl_p(v++, 0x00610025); /* ??? */
- stl_p(v++, 0xffff0057); /* ??? */
-
- /* MMC host settings */
- ADD_TAG(0x6e10, 12);
- stl_p(v++, 0xffff000f); /* ??? */
- stl_p(v++, 0xffffffff); /* ??? */
- stl_p(v++, 0x00000060); /* ??? */
-
- /* OneNAND chip select */
- ADD_TAG(0x6e11, 10);
- stl_p(v++, 0x00000401); /* ??? */
- stl_p(v++, 0x0002003a); /* ??? */
- stl_p(v++, 0x00000002); /* ??? */
-
- /* TEA5761 sensor settings */
- ADD_TAG(0x6e12, 2);
- stl_p(v++, 93); /* GPIO num ??? */
-
-#if 0
- /* Unknown tag */
- ADD_TAG(6e09, 0);
-
- /* Kernel UART / console */
- ADD_TAG(6e12, 0);
-#endif
-
- /* End of the list */
- stl_p(p++, 0x00000000);
- stl_p(p++, 0x00000000);
-}
-
-/* This task is normally performed by the bootloader. If we're loading
- * a kernel directly, we need to set up GPMC mappings ourselves. */
-static void n800_gpmc_init(struct n800_s *s)
-{
- uint32_t config7 =
- (0xf << 8) | /* MASKADDRESS */
- (1 << 6) | /* CSVALID */
- (4 << 0); /* BASEADDRESS */
-
- cpu_physical_memory_write(0x6800a078, /* GPMC_CONFIG7_0 */
- &config7, sizeof(config7));
-}
-
-/* Setup sequence done by the bootloader */
-static void n8x0_boot_init(void *opaque)
-{
- struct n800_s *s = (struct n800_s *) opaque;
- uint32_t buf;
-
- /* PRCM setup */
-#define omap_writel(addr, val) \
- buf = (val); \
- cpu_physical_memory_write(addr, &buf, sizeof(buf))
-
- omap_writel(0x48008060, 0x41); /* PRCM_CLKSRC_CTRL */
- omap_writel(0x48008070, 1); /* PRCM_CLKOUT_CTRL */
- omap_writel(0x48008078, 0); /* PRCM_CLKEMUL_CTRL */
- omap_writel(0x48008090, 0); /* PRCM_VOLTSETUP */
- omap_writel(0x48008094, 0); /* PRCM_CLKSSETUP */
- omap_writel(0x48008098, 0); /* PRCM_POLCTRL */
- omap_writel(0x48008140, 2); /* CM_CLKSEL_MPU */
- omap_writel(0x48008148, 0); /* CM_CLKSTCTRL_MPU */
- omap_writel(0x48008158, 1); /* RM_RSTST_MPU */
- omap_writel(0x480081c8, 0x15); /* PM_WKDEP_MPU */
- omap_writel(0x480081d4, 0x1d4); /* PM_EVGENCTRL_MPU */
- omap_writel(0x480081d8, 0); /* PM_EVEGENONTIM_MPU */
- omap_writel(0x480081dc, 0); /* PM_EVEGENOFFTIM_MPU */
- omap_writel(0x480081e0, 0xc); /* PM_PWSTCTRL_MPU */
- omap_writel(0x48008200, 0x047e7ff7); /* CM_FCLKEN1_CORE */
- omap_writel(0x48008204, 0x00000004); /* CM_FCLKEN2_CORE */
- omap_writel(0x48008210, 0x047e7ff1); /* CM_ICLKEN1_CORE */
- omap_writel(0x48008214, 0x00000004); /* CM_ICLKEN2_CORE */
- omap_writel(0x4800821c, 0x00000000); /* CM_ICLKEN4_CORE */
- omap_writel(0x48008230, 0); /* CM_AUTOIDLE1_CORE */
- omap_writel(0x48008234, 0); /* CM_AUTOIDLE2_CORE */
- omap_writel(0x48008238, 7); /* CM_AUTOIDLE3_CORE */
- omap_writel(0x4800823c, 0); /* CM_AUTOIDLE4_CORE */
- omap_writel(0x48008240, 0x04360626); /* CM_CLKSEL1_CORE */
- omap_writel(0x48008244, 0x00000014); /* CM_CLKSEL2_CORE */
- omap_writel(0x48008248, 0); /* CM_CLKSTCTRL_CORE */
- omap_writel(0x48008300, 0x00000000); /* CM_FCLKEN_GFX */
- omap_writel(0x48008310, 0x00000000); /* CM_ICLKEN_GFX */
- omap_writel(0x48008340, 0x00000001); /* CM_CLKSEL_GFX */
- omap_writel(0x48008400, 0x00000004); /* CM_FCLKEN_WKUP */
- omap_writel(0x48008410, 0x00000004); /* CM_ICLKEN_WKUP */
- omap_writel(0x48008440, 0x00000000); /* CM_CLKSEL_WKUP */
- omap_writel(0x48008500, 0x000000cf); /* CM_CLKEN_PLL */
- omap_writel(0x48008530, 0x0000000c); /* CM_AUTOIDLE_PLL */
- omap_writel(0x48008540, /* CM_CLKSEL1_PLL */
- (0x78 << 12) | (6 << 8));
- omap_writel(0x48008544, 2); /* CM_CLKSEL2_PLL */
-
- /* GPMC setup */
- n800_gpmc_init(s);
-
- /* Video setup */
- n800_dss_init(&s->blizzard);
-
- /* CPU setup */
- s->mpu->cpu->env.GE = 0x5;
-
- /* If the machine has a slided keyboard, open it */
- if (s->kbd) {
- qemu_irq_raise(qdev_get_gpio_in(s->mpu->gpio, N810_SLIDE_GPIO));
- }
-}
-
-#define OMAP_TAG_NOKIA_BT 0x4e01
-#define OMAP_TAG_WLAN_CX3110X 0x4e02
-#define OMAP_TAG_CBUS 0x4e03
-#define OMAP_TAG_EM_ASIC_BB5 0x4e04
-
-static const struct omap_gpiosw_info_s {
- const char *name;
- int line;
- int type;
-} n800_gpiosw_info[] = {
- {
- "bat_cover", N800_BAT_COVER_GPIO,
- OMAP_GPIOSW_TYPE_COVER | OMAP_GPIOSW_INVERTED,
- }, {
- "cam_act", N800_CAM_ACT_GPIO,
- OMAP_GPIOSW_TYPE_ACTIVITY,
- }, {
- "cam_turn", N800_CAM_TURN_GPIO,
- OMAP_GPIOSW_TYPE_ACTIVITY | OMAP_GPIOSW_INVERTED,
- }, {
- "headphone", N8X0_HEADPHONE_GPIO,
- OMAP_GPIOSW_TYPE_CONNECTION | OMAP_GPIOSW_INVERTED,
- },
- { /* end of list */ }
-}, n810_gpiosw_info[] = {
- {
- "gps_reset", N810_GPS_RESET_GPIO,
- OMAP_GPIOSW_TYPE_ACTIVITY | OMAP_GPIOSW_OUTPUT,
- }, {
- "gps_wakeup", N810_GPS_WAKEUP_GPIO,
- OMAP_GPIOSW_TYPE_ACTIVITY | OMAP_GPIOSW_OUTPUT,
- }, {
- "headphone", N8X0_HEADPHONE_GPIO,
- OMAP_GPIOSW_TYPE_CONNECTION | OMAP_GPIOSW_INVERTED,
- }, {
- "kb_lock", N810_KB_LOCK_GPIO,
- OMAP_GPIOSW_TYPE_COVER | OMAP_GPIOSW_INVERTED,
- }, {
- "sleepx_led", N810_SLEEPX_LED_GPIO,
- OMAP_GPIOSW_TYPE_ACTIVITY | OMAP_GPIOSW_INVERTED | OMAP_GPIOSW_OUTPUT,
- }, {
- "slide", N810_SLIDE_GPIO,
- OMAP_GPIOSW_TYPE_COVER | OMAP_GPIOSW_INVERTED,
- },
- { /* end of list */ }
-};
-
-static const struct omap_partition_info_s {
- uint32_t offset;
- uint32_t size;
- int mask;
- const char *name;
-} n800_part_info[] = {
- { 0x00000000, 0x00020000, 0x3, "bootloader" },
- { 0x00020000, 0x00060000, 0x0, "config" },
- { 0x00080000, 0x00200000, 0x0, "kernel" },
- { 0x00280000, 0x00200000, 0x3, "initfs" },
- { 0x00480000, 0x0fb80000, 0x3, "rootfs" },
- { /* end of list */ }
-}, n810_part_info[] = {
- { 0x00000000, 0x00020000, 0x3, "bootloader" },
- { 0x00020000, 0x00060000, 0x0, "config" },
- { 0x00080000, 0x00220000, 0x0, "kernel" },
- { 0x002a0000, 0x00400000, 0x0, "initfs" },
- { 0x006a0000, 0x0f960000, 0x0, "rootfs" },
- { /* end of list */ }
-};
-
-static const uint8_t n8x0_bd_addr[6] = { N8X0_BD_ADDR };
-
-static int n8x0_atag_setup(void *p, int model)
-{
- uint8_t *b;
- uint16_t *w;
- uint32_t *l;
- const struct omap_gpiosw_info_s *gpiosw;
- const struct omap_partition_info_s *partition;
- const char *tag;
-
- w = p;
-
- stw_p(w++, OMAP_TAG_UART); /* u16 tag */
- stw_p(w++, 4); /* u16 len */
- stw_p(w++, (1 << 2) | (1 << 1) | (1 << 0)); /* uint enabled_uarts */
- w++;
-
-#if 0
- stw_p(w++, OMAP_TAG_SERIAL_CONSOLE); /* u16 tag */
- stw_p(w++, 4); /* u16 len */
- stw_p(w++, XLDR_LL_UART + 1); /* u8 console_uart */
- stw_p(w++, 115200); /* u32 console_speed */
-#endif
-
- stw_p(w++, OMAP_TAG_LCD); /* u16 tag */
- stw_p(w++, 36); /* u16 len */
- strcpy((void *) w, "QEMU LCD panel"); /* char panel_name[16] */
- w += 8;
- strcpy((void *) w, "blizzard"); /* char ctrl_name[16] */
- w += 8;
- stw_p(w++, N810_BLIZZARD_RESET_GPIO); /* TODO: n800 s16 nreset_gpio */
- stw_p(w++, 24); /* u8 data_lines */
-
- stw_p(w++, OMAP_TAG_CBUS); /* u16 tag */
- stw_p(w++, 8); /* u16 len */
- stw_p(w++, N8X0_CBUS_CLK_GPIO); /* s16 clk_gpio */
- stw_p(w++, N8X0_CBUS_DAT_GPIO); /* s16 dat_gpio */
- stw_p(w++, N8X0_CBUS_SEL_GPIO); /* s16 sel_gpio */
- w++;
-
- stw_p(w++, OMAP_TAG_EM_ASIC_BB5); /* u16 tag */
- stw_p(w++, 4); /* u16 len */
- stw_p(w++, N8X0_RETU_GPIO); /* s16 retu_irq_gpio */
- stw_p(w++, N8X0_TAHVO_GPIO); /* s16 tahvo_irq_gpio */
-
- gpiosw = (model == 810) ? n810_gpiosw_info : n800_gpiosw_info;
- for (; gpiosw->name; gpiosw++) {
- stw_p(w++, OMAP_TAG_GPIO_SWITCH); /* u16 tag */
- stw_p(w++, 20); /* u16 len */
- strcpy((void *) w, gpiosw->name); /* char name[12] */
- w += 6;
- stw_p(w++, gpiosw->line); /* u16 gpio */
- stw_p(w++, gpiosw->type);
- stw_p(w++, 0);
- stw_p(w++, 0);
- }
-
- stw_p(w++, OMAP_TAG_NOKIA_BT); /* u16 tag */
- stw_p(w++, 12); /* u16 len */
- b = (void *) w;
- stb_p(b++, 0x01); /* u8 chip_type (CSR) */
- stb_p(b++, N8X0_BT_WKUP_GPIO); /* u8 bt_wakeup_gpio */
- stb_p(b++, N8X0_BT_HOST_WKUP_GPIO); /* u8 host_wakeup_gpio */
- stb_p(b++, N8X0_BT_RESET_GPIO); /* u8 reset_gpio */
- stb_p(b++, BT_UART + 1); /* u8 bt_uart */
- memcpy(b, &n8x0_bd_addr, 6); /* u8 bd_addr[6] */
- b += 6;
- stb_p(b++, 0x02); /* u8 bt_sysclk (38.4) */
- w = (void *) b;
-
- stw_p(w++, OMAP_TAG_WLAN_CX3110X); /* u16 tag */
- stw_p(w++, 8); /* u16 len */
- stw_p(w++, 0x25); /* u8 chip_type */
- stw_p(w++, N8X0_WLAN_PWR_GPIO); /* s16 power_gpio */
- stw_p(w++, N8X0_WLAN_IRQ_GPIO); /* s16 irq_gpio */
- stw_p(w++, -1); /* s16 spi_cs_gpio */
-
- stw_p(w++, OMAP_TAG_MMC); /* u16 tag */
- stw_p(w++, 16); /* u16 len */
- if (model == 810) {
- stw_p(w++, 0x23f); /* unsigned flags */
- stw_p(w++, -1); /* s16 power_pin */
- stw_p(w++, -1); /* s16 switch_pin */
- stw_p(w++, -1); /* s16 wp_pin */
- stw_p(w++, 0x240); /* unsigned flags */
- stw_p(w++, 0xc000); /* s16 power_pin */
- stw_p(w++, 0x0248); /* s16 switch_pin */
- stw_p(w++, 0xc000); /* s16 wp_pin */
- } else {
- stw_p(w++, 0xf); /* unsigned flags */
- stw_p(w++, -1); /* s16 power_pin */
- stw_p(w++, -1); /* s16 switch_pin */
- stw_p(w++, -1); /* s16 wp_pin */
- stw_p(w++, 0); /* unsigned flags */
- stw_p(w++, 0); /* s16 power_pin */
- stw_p(w++, 0); /* s16 switch_pin */
- stw_p(w++, 0); /* s16 wp_pin */
- }
-
- stw_p(w++, OMAP_TAG_TEA5761); /* u16 tag */
- stw_p(w++, 4); /* u16 len */
- stw_p(w++, N8X0_TEA5761_CS_GPIO); /* u16 enable_gpio */
- w++;
-
- partition = (model == 810) ? n810_part_info : n800_part_info;
- for (; partition->name; partition++) {
- stw_p(w++, OMAP_TAG_PARTITION); /* u16 tag */
- stw_p(w++, 28); /* u16 len */
- strcpy((void *) w, partition->name); /* char name[16] */
- l = (void *) (w + 8);
- stl_p(l++, partition->size); /* unsigned int size */
- stl_p(l++, partition->offset); /* unsigned int offset */
- stl_p(l++, partition->mask); /* unsigned int mask_flags */
- w = (void *) l;
- }
-
- stw_p(w++, OMAP_TAG_BOOT_REASON); /* u16 tag */
- stw_p(w++, 12); /* u16 len */
-#if 0
- strcpy((void *) w, "por"); /* char reason_str[12] */
- strcpy((void *) w, "charger"); /* char reason_str[12] */
- strcpy((void *) w, "32wd_to"); /* char reason_str[12] */
- strcpy((void *) w, "sw_rst"); /* char reason_str[12] */
- strcpy((void *) w, "mbus"); /* char reason_str[12] */
- strcpy((void *) w, "unknown"); /* char reason_str[12] */
- strcpy((void *) w, "swdg_to"); /* char reason_str[12] */
- strcpy((void *) w, "sec_vio"); /* char reason_str[12] */
- strcpy((void *) w, "pwr_key"); /* char reason_str[12] */
- strcpy((void *) w, "rtc_alarm"); /* char reason_str[12] */
-#else
- strcpy((void *) w, "pwr_key"); /* char reason_str[12] */
-#endif
- w += 6;
-
- tag = (model == 810) ? "RX-44" : "RX-34";
- stw_p(w++, OMAP_TAG_VERSION_STR); /* u16 tag */
- stw_p(w++, 24); /* u16 len */
- strcpy((void *) w, "product"); /* char component[12] */
- w += 6;
- strcpy((void *) w, tag); /* char version[12] */
- w += 6;
-
- stw_p(w++, OMAP_TAG_VERSION_STR); /* u16 tag */
- stw_p(w++, 24); /* u16 len */
- strcpy((void *) w, "hw-build"); /* char component[12] */
- w += 6;
- strcpy((void *) w, "QEMU ");
- pstrcat((void *) w, 12, qemu_hw_version()); /* char version[12] */
- w += 6;
-
- tag = (model == 810) ? "1.1.10-qemu" : "1.1.6-qemu";
- stw_p(w++, OMAP_TAG_VERSION_STR); /* u16 tag */
- stw_p(w++, 24); /* u16 len */
- strcpy((void *) w, "nolo"); /* char component[12] */
- w += 6;
- strcpy((void *) w, tag); /* char version[12] */
- w += 6;
-
- return (void *) w - p;
-}
-
-static int n800_atag_setup(const struct arm_boot_info *info, void *p)
-{
- return n8x0_atag_setup(p, 800);
-}
-
-static int n810_atag_setup(const struct arm_boot_info *info, void *p)
-{
- return n8x0_atag_setup(p, 810);
-}
-
-static void n8x0_init(MachineState *machine,
- struct arm_boot_info *binfo, int model)
-{
- struct n800_s *s = g_malloc0(sizeof(*s));
- MachineClass *mc = MACHINE_GET_CLASS(machine);
-
- if (machine->ram_size != mc->default_ram_size) {
- char *sz = size_to_str(mc->default_ram_size);
- error_report("Invalid RAM size, should be %s", sz);
- g_free(sz);
- exit(EXIT_FAILURE);
- }
- binfo->ram_size = machine->ram_size;
-
- memory_region_add_subregion(get_system_memory(), OMAP2_Q2_BASE,
- machine->ram);
-
- s->mpu = omap2420_mpu_init(machine->ram, machine->cpu_type);
-
- /* Setup peripherals
- *
- * Believed external peripherals layout in the N810:
- * (spi bus 1)
- * tsc2005
- * lcd_mipid
- * (spi bus 2)
- * Conexant cx3110x (WLAN)
- * optional: pc2400m (WiMAX)
- * (i2c bus 0)
- * TLV320AIC33 (audio codec)
- * TCM825x (camera by Toshiba)
- * lp5521 (clever LEDs)
- * tsl2563 (light sensor, hwmon, model 7, rev. 0)
- * lm8323 (keypad, manf 00, rev 04)
- * (i2c bus 1)
- * tmp105 (temperature sensor, hwmon)
- * menelaus (pm)
- * (somewhere on i2c - maybe N800-only)
- * tea5761 (FM tuner)
- * (serial 0)
- * GPS
- * (some serial port)
- * csr41814 (Bluetooth)
- */
- n8x0_gpio_setup(s);
- n8x0_nand_setup(s);
- n8x0_i2c_setup(s);
- if (model == 800) {
- n800_tsc_kbd_setup(s);
- } else if (model == 810) {
- n810_tsc_setup(s);
- n810_kbd_setup(s);
- }
- n8x0_spi_setup(s);
- n8x0_dss_setup(s);
- n8x0_cbus_setup(s);
- n8x0_usb_setup(s);
-
- if (machine->kernel_filename) {
- /* Or at the linux loader. */
- arm_load_kernel(s->mpu->cpu, machine, binfo);
-
- qemu_register_reset(n8x0_boot_init, s);
- }
-
- if (option_rom[0].name &&
- (machine->boot_config.order[0] == 'n' || !machine->kernel_filename)) {
- uint8_t *nolo_tags = g_new(uint8_t, 0x10000);
- /* No, wait, better start at the ROM. */
- s->mpu->cpu->env.regs[15] = OMAP2_Q2_BASE + 0x400000;
-
- /*
- * This is intended for loading the `secondary.bin' program from
- * Nokia images (the NOLO bootloader). The entry point seems
- * to be at OMAP2_Q2_BASE + 0x400000.
- *
- * The `2nd.bin' files contain some kind of earlier boot code and
- * for them the entry point needs to be set to OMAP2_SRAM_BASE.
- *
- * The code above is for loading the `zImage' file from Nokia
- * images.
- */
- if (load_image_targphys(option_rom[0].name,
- OMAP2_Q2_BASE + 0x400000,
- machine->ram_size - 0x400000) < 0) {
- error_report("Failed to load secondary bootloader %s",
- option_rom[0].name);
- exit(EXIT_FAILURE);
- }
-
- n800_setup_nolo_tags(nolo_tags);
- cpu_physical_memory_write(OMAP2_SRAM_BASE, nolo_tags, 0x10000);
- g_free(nolo_tags);
- }
-}
-
-static struct arm_boot_info n800_binfo = {
- .loader_start = OMAP2_Q2_BASE,
- .board_id = 0x4f7,
- .atag_board = n800_atag_setup,
-};
-
-static struct arm_boot_info n810_binfo = {
- .loader_start = OMAP2_Q2_BASE,
- /* 0x60c and 0x6bf (WiMAX Edition) have been assigned but are not
- * used by some older versions of the bootloader and 5555 is used
- * instead (including versions that shipped with many devices). */
- .board_id = 0x60c,
- .atag_board = n810_atag_setup,
-};
-
-static void n800_init(MachineState *machine)
-{
- n8x0_init(machine, &n800_binfo, 800);
-}
-
-static void n810_init(MachineState *machine)
-{
- n8x0_init(machine, &n810_binfo, 810);
-}
-
-static void n800_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
-
- mc->desc = "Nokia N800 tablet aka. RX-34 (OMAP2420)";
- mc->init = n800_init;
- mc->default_boot_order = "";
- mc->ignore_memory_transaction_failures = true;
- mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm1136-r2");
- /* Actually two chips of 0x4000000 bytes each */
- mc->default_ram_size = 0x08000000;
- mc->default_ram_id = "omap2.dram";
- mc->deprecation_reason = "machine is old and unmaintained";
-
- machine_add_audiodev_property(mc);
-}
-
-static const TypeInfo n800_type = {
- .name = MACHINE_TYPE_NAME("n800"),
- .parent = TYPE_MACHINE,
- .class_init = n800_class_init,
-};
-
-static void n810_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
-
- mc->desc = "Nokia N810 tablet aka. RX-44 (OMAP2420)";
- mc->init = n810_init;
- mc->default_boot_order = "";
- mc->ignore_memory_transaction_failures = true;
- mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm1136-r2");
- /* Actually two chips of 0x4000000 bytes each */
- mc->default_ram_size = 0x08000000;
- mc->default_ram_id = "omap2.dram";
- mc->deprecation_reason = "machine is old and unmaintained";
-
- machine_add_audiodev_property(mc);
-}
-
-static const TypeInfo n810_type = {
- .name = MACHINE_TYPE_NAME("n810"),
- .parent = TYPE_MACHINE,
- .class_init = n810_class_init,
-};
-
-static void nseries_machine_init(void)
-{
- type_register_static(&n800_type);
- type_register_static(&n810_type);
-}
-
-type_init(nseries_machine_init)
diff --git a/hw/arm/olimex-stm32-h405.c b/hw/arm/olimex-stm32-h405.c
index 4ad7b04..1f15620 100644
--- a/hw/arm/olimex-stm32-h405.c
+++ b/hw/arm/olimex-stm32-h405.c
@@ -51,7 +51,7 @@ static void olimex_stm32_h405_init(MachineState *machine)
qdev_connect_clock_in(dev, "sysclk", sysclk);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
- armv7m_load_kernel(ARM_CPU(first_cpu),
+ armv7m_load_kernel(STM32F405_SOC(dev)->armv7m.cpu,
machine->kernel_filename,
0, FLASH_SIZE);
}
diff --git a/hw/arm/omap1.c b/hw/arm/omap1.c
index 86ee336..74458fb 100644
--- a/hw/arm/omap1.c
+++ b/hw/arm/omap1.c
@@ -23,24 +23,26 @@
#include "qemu/main-loop.h"
#include "qapi/error.h"
#include "cpu.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/hw.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "hw/arm/boot.h"
#include "hw/arm/omap.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/sysemu.h"
+#include "hw/sd/sd.h"
+#include "system/blockdev.h"
+#include "system/system.h"
#include "hw/arm/soc_dma.h"
-#include "sysemu/qtest.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "sysemu/rtc.h"
+#include "system/qtest.h"
+#include "system/reset.h"
+#include "system/runstate.h"
+#include "system/rtc.h"
#include "qemu/range.h"
#include "hw/sysbus.h"
#include "qemu/cutils.h"
#include "qemu/bcd.h"
#include "target/arm/cpu-qom.h"
+#include "trace.h"
static inline void omap_log_badwidth(const char *funcname, hwaddr addr, int sz)
{
@@ -142,7 +144,7 @@ static inline void omap_timer_update(struct omap_mpu_timer_s *timer)
int64_t expires;
if (timer->enable && timer->st && timer->rate) {
- timer->val = timer->reset_val; /* Should skip this on clk enable */
+ timer->val = timer->reset_val; /* Should skip this on clk enable */
expires = muldiv64((uint64_t) timer->val << (timer->ptv + 1),
NANOSECONDS_PER_SECOND, timer->rate);
@@ -210,13 +212,13 @@ static uint64_t omap_mpu_timer_read(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* CNTL_TIMER */
+ case 0x00: /* CNTL_TIMER */
return (s->enable << 5) | (s->ptv << 2) | (s->ar << 1) | s->st;
- case 0x04: /* LOAD_TIM */
+ case 0x04: /* LOAD_TIM */
break;
- case 0x08: /* READ_TIM */
+ case 0x08: /* READ_TIM */
return omap_timer_read(s);
}
@@ -235,7 +237,7 @@ static void omap_mpu_timer_write(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* CNTL_TIMER */
+ case 0x00: /* CNTL_TIMER */
omap_timer_sync(s);
s->enable = (value >> 5) & 1;
s->ptv = (value >> 2) & 7;
@@ -244,11 +246,11 @@ static void omap_mpu_timer_write(void *opaque, hwaddr addr,
omap_timer_update(s);
return;
- case 0x04: /* LOAD_TIM */
+ case 0x04: /* LOAD_TIM */
s->reset_val = value;
return;
- case 0x08: /* READ_TIM */
+ case 0x08: /* READ_TIM */
OMAP_RO_REG(addr);
break;
@@ -316,14 +318,14 @@ static uint64_t omap_wd_timer_read(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* CNTL_TIMER */
+ case 0x00: /* CNTL_TIMER */
return (s->timer.ptv << 9) | (s->timer.ar << 8) |
(s->timer.st << 7) | (s->free << 1);
- case 0x04: /* READ_TIMER */
+ case 0x04: /* READ_TIMER */
return omap_timer_read(&s->timer);
- case 0x08: /* TIMER_MODE */
+ case 0x08: /* TIMER_MODE */
return s->mode << 15;
}
@@ -342,7 +344,7 @@ static void omap_wd_timer_write(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* CNTL_TIMER */
+ case 0x00: /* CNTL_TIMER */
omap_timer_sync(&s->timer);
s->timer.ptv = (value >> 9) & 7;
s->timer.ar = (value >> 8) & 1;
@@ -351,11 +353,11 @@ static void omap_wd_timer_write(void *opaque, hwaddr addr,
omap_timer_update(&s->timer);
break;
- case 0x04: /* LOAD_TIMER */
+ case 0x04: /* LOAD_TIMER */
s->timer.reset_val = value & 0xffff;
break;
- case 0x08: /* TIMER_MODE */
+ case 0x08: /* TIMER_MODE */
if (!s->mode && ((value >> 15) & 1))
omap_clk_get(s->timer.clk);
s->mode |= (value >> 15) & 1;
@@ -440,13 +442,13 @@ static uint64_t omap_os_timer_read(void *opaque, hwaddr addr,
}
switch (offset) {
- case 0x00: /* TVR */
+ case 0x00: /* TVR */
return s->timer.reset_val;
- case 0x04: /* TCR */
+ case 0x04: /* TCR */
return omap_timer_read(&s->timer);
- case 0x08: /* CR */
+ case 0x08: /* CR */
return (s->timer.ar << 3) | (s->timer.it_ena << 2) | s->timer.st;
default:
@@ -468,15 +470,15 @@ static void omap_os_timer_write(void *opaque, hwaddr addr,
}
switch (offset) {
- case 0x00: /* TVR */
+ case 0x00: /* TVR */
s->timer.reset_val = value & 0x00ffffff;
break;
- case 0x04: /* TCR */
+ case 0x04: /* TCR */
OMAP_RO_REG(addr);
break;
- case 0x08: /* CR */
+ case 0x08: /* CR */
s->timer.ar = (value >> 3) & 1;
s->timer.it_ena = (value >> 2) & 1;
if (s->timer.st != (value & 1) || (value & 2)) {
@@ -541,34 +543,34 @@ static uint64_t omap_ulpd_pm_read(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x14: /* IT_STATUS */
+ case 0x14: /* IT_STATUS */
ret = s->ulpd_pm_regs[addr >> 2];
s->ulpd_pm_regs[addr >> 2] = 0;
qemu_irq_lower(qdev_get_gpio_in(s->ih[1], OMAP_INT_GAUGE_32K));
return ret;
- case 0x18: /* Reserved */
- case 0x1c: /* Reserved */
- case 0x20: /* Reserved */
- case 0x28: /* Reserved */
- case 0x2c: /* Reserved */
+ case 0x18: /* Reserved */
+ case 0x1c: /* Reserved */
+ case 0x20: /* Reserved */
+ case 0x28: /* Reserved */
+ case 0x2c: /* Reserved */
OMAP_BAD_REG(addr);
/* fall through */
- case 0x00: /* COUNTER_32_LSB */
- case 0x04: /* COUNTER_32_MSB */
- case 0x08: /* COUNTER_HIGH_FREQ_LSB */
- case 0x0c: /* COUNTER_HIGH_FREQ_MSB */
- case 0x10: /* GAUGING_CTRL */
- case 0x24: /* SETUP_ANALOG_CELL3_ULPD1 */
- case 0x30: /* CLOCK_CTRL */
- case 0x34: /* SOFT_REQ */
- case 0x38: /* COUNTER_32_FIQ */
- case 0x3c: /* DPLL_CTRL */
- case 0x40: /* STATUS_REQ */
+ case 0x00: /* COUNTER_32_LSB */
+ case 0x04: /* COUNTER_32_MSB */
+ case 0x08: /* COUNTER_HIGH_FREQ_LSB */
+ case 0x0c: /* COUNTER_HIGH_FREQ_MSB */
+ case 0x10: /* GAUGING_CTRL */
+ case 0x24: /* SETUP_ANALOG_CELL3_ULPD1 */
+ case 0x30: /* CLOCK_CTRL */
+ case 0x34: /* SOFT_REQ */
+ case 0x38: /* COUNTER_32_FIQ */
+ case 0x3c: /* DPLL_CTRL */
+ case 0x40: /* STATUS_REQ */
/* XXX: check clk::usecount state for every clock */
- case 0x48: /* LOCL_TIME */
- case 0x4c: /* APLL_CTRL */
- case 0x50: /* POWER_CTRL */
+ case 0x48: /* LOCL_TIME */
+ case 0x4c: /* APLL_CTRL */
+ case 0x50: /* POWER_CTRL */
return s->ulpd_pm_regs[addr >> 2];
}
@@ -579,22 +581,22 @@ static uint64_t omap_ulpd_pm_read(void *opaque, hwaddr addr,
static inline void omap_ulpd_clk_update(struct omap_mpu_state_s *s,
uint16_t diff, uint16_t value)
{
- if (diff & (1 << 4)) /* USB_MCLK_EN */
+ if (diff & (1 << 4)) /* USB_MCLK_EN */
omap_clk_onoff(omap_findclk(s, "usb_clk0"), (value >> 4) & 1);
- if (diff & (1 << 5)) /* DIS_USB_PVCI_CLK */
+ if (diff & (1 << 5)) /* DIS_USB_PVCI_CLK */
omap_clk_onoff(omap_findclk(s, "usb_w2fc_ck"), (~value >> 5) & 1);
}
static inline void omap_ulpd_req_update(struct omap_mpu_state_s *s,
uint16_t diff, uint16_t value)
{
- if (diff & (1 << 0)) /* SOFT_DPLL_REQ */
+ if (diff & (1 << 0)) /* SOFT_DPLL_REQ */
omap_clk_canidle(omap_findclk(s, "dpll4"), (~value >> 0) & 1);
- if (diff & (1 << 1)) /* SOFT_COM_REQ */
+ if (diff & (1 << 1)) /* SOFT_COM_REQ */
omap_clk_canidle(omap_findclk(s, "com_mclk_out"), (~value >> 1) & 1);
- if (diff & (1 << 2)) /* SOFT_SDW_REQ */
+ if (diff & (1 << 2)) /* SOFT_SDW_REQ */
omap_clk_canidle(omap_findclk(s, "bt_mclk_out"), (~value >> 2) & 1);
- if (diff & (1 << 3)) /* SOFT_USB_REQ */
+ if (diff & (1 << 3)) /* SOFT_USB_REQ */
omap_clk_canidle(omap_findclk(s, "usb_clk0"), (~value >> 3) & 1);
}
@@ -613,16 +615,16 @@ static void omap_ulpd_pm_write(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* COUNTER_32_LSB */
- case 0x04: /* COUNTER_32_MSB */
- case 0x08: /* COUNTER_HIGH_FREQ_LSB */
- case 0x0c: /* COUNTER_HIGH_FREQ_MSB */
- case 0x14: /* IT_STATUS */
- case 0x40: /* STATUS_REQ */
+ case 0x00: /* COUNTER_32_LSB */
+ case 0x04: /* COUNTER_32_MSB */
+ case 0x08: /* COUNTER_HIGH_FREQ_LSB */
+ case 0x0c: /* COUNTER_HIGH_FREQ_MSB */
+ case 0x14: /* IT_STATUS */
+ case 0x40: /* STATUS_REQ */
OMAP_RO_REG(addr);
break;
- case 0x10: /* GAUGING_CTRL */
+ case 0x10: /* GAUGING_CTRL */
/* Bits 0 and 1 seem to be confused in the OMAP 310 TRM */
if ((s->ulpd_pm_regs[addr >> 2] ^ value) & 1) {
now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
@@ -636,50 +638,50 @@ static void omap_ulpd_pm_write(void *opaque, hwaddr addr,
ticks = muldiv64(now, 32768, NANOSECONDS_PER_SECOND);
s->ulpd_pm_regs[0x00 >> 2] = (ticks >> 0) & 0xffff;
s->ulpd_pm_regs[0x04 >> 2] = (ticks >> 16) & 0xffff;
- if (ticks >> 32) /* OVERFLOW_32K */
+ if (ticks >> 32) /* OVERFLOW_32K */
s->ulpd_pm_regs[0x14 >> 2] |= 1 << 2;
/* High frequency ticks */
ticks = muldiv64(now, 12000000, NANOSECONDS_PER_SECOND);
s->ulpd_pm_regs[0x08 >> 2] = (ticks >> 0) & 0xffff;
s->ulpd_pm_regs[0x0c >> 2] = (ticks >> 16) & 0xffff;
- if (ticks >> 32) /* OVERFLOW_HI_FREQ */
+ if (ticks >> 32) /* OVERFLOW_HI_FREQ */
s->ulpd_pm_regs[0x14 >> 2] |= 1 << 1;
- s->ulpd_pm_regs[0x14 >> 2] |= 1 << 0; /* IT_GAUGING */
+ s->ulpd_pm_regs[0x14 >> 2] |= 1 << 0; /* IT_GAUGING */
qemu_irq_raise(qdev_get_gpio_in(s->ih[1], OMAP_INT_GAUGE_32K));
}
}
s->ulpd_pm_regs[addr >> 2] = value;
break;
- case 0x18: /* Reserved */
- case 0x1c: /* Reserved */
- case 0x20: /* Reserved */
- case 0x28: /* Reserved */
- case 0x2c: /* Reserved */
+ case 0x18: /* Reserved */
+ case 0x1c: /* Reserved */
+ case 0x20: /* Reserved */
+ case 0x28: /* Reserved */
+ case 0x2c: /* Reserved */
OMAP_BAD_REG(addr);
/* fall through */
- case 0x24: /* SETUP_ANALOG_CELL3_ULPD1 */
- case 0x38: /* COUNTER_32_FIQ */
- case 0x48: /* LOCL_TIME */
- case 0x50: /* POWER_CTRL */
+ case 0x24: /* SETUP_ANALOG_CELL3_ULPD1 */
+ case 0x38: /* COUNTER_32_FIQ */
+ case 0x48: /* LOCL_TIME */
+ case 0x50: /* POWER_CTRL */
s->ulpd_pm_regs[addr >> 2] = value;
break;
- case 0x30: /* CLOCK_CTRL */
+ case 0x30: /* CLOCK_CTRL */
diff = s->ulpd_pm_regs[addr >> 2] ^ value;
s->ulpd_pm_regs[addr >> 2] = value & 0x3f;
omap_ulpd_clk_update(s, diff, value);
break;
- case 0x34: /* SOFT_REQ */
+ case 0x34: /* SOFT_REQ */
diff = s->ulpd_pm_regs[addr >> 2] ^ value;
s->ulpd_pm_regs[addr >> 2] = value & 0x1f;
omap_ulpd_req_update(s, diff, value);
break;
- case 0x3c: /* DPLL_CTRL */
+ case 0x3c: /* DPLL_CTRL */
/* XXX: OMAP310 TRM claims bit 3 is PLL_ENABLE, and bit 4 is
* omitted altogether, probably a typo. */
/* This register has identical semantics with DPLL(1:3) control
@@ -687,11 +689,11 @@ static void omap_ulpd_pm_write(void *opaque, hwaddr addr,
diff = s->ulpd_pm_regs[addr >> 2] & value;
s->ulpd_pm_regs[addr >> 2] = value & 0x2fff;
if (diff & (0x3ff << 2)) {
- if (value & (1 << 4)) { /* PLL_ENABLE */
- div = ((value >> 5) & 3) + 1; /* PLL_DIV */
- mult = MIN((value >> 7) & 0x1f, 1); /* PLL_MULT */
+ if (value & (1 << 4)) { /* PLL_ENABLE */
+ div = ((value >> 5) & 3) + 1; /* PLL_DIV */
+ mult = MIN((value >> 7) & 0x1f, 1); /* PLL_MULT */
} else {
- div = bypass_div[((value >> 2) & 3)]; /* BYPASS_DIV */
+ div = bypass_div[((value >> 2) & 3)]; /* BYPASS_DIV */
mult = 1;
}
omap_clk_setrate(omap_findclk(s, "dpll4"), div, mult);
@@ -706,10 +708,10 @@ static void omap_ulpd_pm_write(void *opaque, hwaddr addr,
s->ulpd_pm_regs[addr >> 2] |= 2;
break;
- case 0x4c: /* APLL_CTRL */
+ case 0x4c: /* APLL_CTRL */
diff = s->ulpd_pm_regs[addr >> 2] & value;
s->ulpd_pm_regs[addr >> 2] = value & 0xf;
- if (diff & (1 << 0)) /* APLL_NDPLL_SWITCH */
+ if (diff & (1 << 0)) /* APLL_NDPLL_SWITCH */
omap_clk_reparent(omap_findclk(s, "ck_48m"), omap_findclk(s,
(value & (1 << 0)) ? "apll" : "dpll4"));
break;
@@ -773,43 +775,43 @@ static uint64_t omap_pin_cfg_read(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* FUNC_MUX_CTRL_0 */
- case 0x04: /* FUNC_MUX_CTRL_1 */
- case 0x08: /* FUNC_MUX_CTRL_2 */
+ case 0x00: /* FUNC_MUX_CTRL_0 */
+ case 0x04: /* FUNC_MUX_CTRL_1 */
+ case 0x08: /* FUNC_MUX_CTRL_2 */
return s->func_mux_ctrl[addr >> 2];
- case 0x0c: /* COMP_MODE_CTRL_0 */
+ case 0x0c: /* COMP_MODE_CTRL_0 */
return s->comp_mode_ctrl[0];
- case 0x10: /* FUNC_MUX_CTRL_3 */
- case 0x14: /* FUNC_MUX_CTRL_4 */
- case 0x18: /* FUNC_MUX_CTRL_5 */
- case 0x1c: /* FUNC_MUX_CTRL_6 */
- case 0x20: /* FUNC_MUX_CTRL_7 */
- case 0x24: /* FUNC_MUX_CTRL_8 */
- case 0x28: /* FUNC_MUX_CTRL_9 */
- case 0x2c: /* FUNC_MUX_CTRL_A */
- case 0x30: /* FUNC_MUX_CTRL_B */
- case 0x34: /* FUNC_MUX_CTRL_C */
- case 0x38: /* FUNC_MUX_CTRL_D */
+ case 0x10: /* FUNC_MUX_CTRL_3 */
+ case 0x14: /* FUNC_MUX_CTRL_4 */
+ case 0x18: /* FUNC_MUX_CTRL_5 */
+ case 0x1c: /* FUNC_MUX_CTRL_6 */
+ case 0x20: /* FUNC_MUX_CTRL_7 */
+ case 0x24: /* FUNC_MUX_CTRL_8 */
+ case 0x28: /* FUNC_MUX_CTRL_9 */
+ case 0x2c: /* FUNC_MUX_CTRL_A */
+ case 0x30: /* FUNC_MUX_CTRL_B */
+ case 0x34: /* FUNC_MUX_CTRL_C */
+ case 0x38: /* FUNC_MUX_CTRL_D */
return s->func_mux_ctrl[(addr >> 2) - 1];
- case 0x40: /* PULL_DWN_CTRL_0 */
- case 0x44: /* PULL_DWN_CTRL_1 */
- case 0x48: /* PULL_DWN_CTRL_2 */
- case 0x4c: /* PULL_DWN_CTRL_3 */
+ case 0x40: /* PULL_DWN_CTRL_0 */
+ case 0x44: /* PULL_DWN_CTRL_1 */
+ case 0x48: /* PULL_DWN_CTRL_2 */
+ case 0x4c: /* PULL_DWN_CTRL_3 */
return s->pull_dwn_ctrl[(addr & 0xf) >> 2];
- case 0x50: /* GATE_INH_CTRL_0 */
+ case 0x50: /* GATE_INH_CTRL_0 */
return s->gate_inh_ctrl[0];
- case 0x60: /* VOLTAGE_CTRL_0 */
+ case 0x60: /* VOLTAGE_CTRL_0 */
return s->voltage_ctrl[0];
- case 0x70: /* TEST_DBG_CTRL_0 */
+ case 0x70: /* TEST_DBG_CTRL_0 */
return s->test_dbg_ctrl[0];
- case 0x80: /* MOD_CONF_CTRL_0 */
+ case 0x80: /* MOD_CONF_CTRL_0 */
return s->mod_conf_ctrl[0];
}
@@ -821,10 +823,10 @@ static inline void omap_pin_funcmux0_update(struct omap_mpu_state_s *s,
uint32_t diff, uint32_t value)
{
if (s->compat1509) {
- if (diff & (1 << 9)) /* BLUETOOTH */
+ if (diff & (1 << 9)) /* BLUETOOTH */
omap_clk_onoff(omap_findclk(s, "bt_mclk_out"),
(~value >> 9) & 1);
- if (diff & (1 << 7)) /* USB.CLKO */
+ if (diff & (1 << 7)) /* USB.CLKO */
omap_clk_onoff(omap_findclk(s, "usb.clko"),
(value >> 7) & 1);
}
@@ -854,23 +856,23 @@ static inline void omap_pin_modconf1_update(struct omap_mpu_state_s *s,
omap_findclk(s, ((value >> 31) & 1) ?
"ck_48m" : "armper_ck"));
}
- if (diff & (1 << 30)) /* CONF_MOD_UART2_CLK_MODE_R */
+ if (diff & (1 << 30)) /* CONF_MOD_UART2_CLK_MODE_R */
omap_clk_reparent(omap_findclk(s, "uart2_ck"),
omap_findclk(s, ((value >> 30) & 1) ?
"ck_48m" : "armper_ck"));
- if (diff & (1 << 29)) /* CONF_MOD_UART1_CLK_MODE_R */
+ if (diff & (1 << 29)) /* CONF_MOD_UART1_CLK_MODE_R */
omap_clk_reparent(omap_findclk(s, "uart1_ck"),
omap_findclk(s, ((value >> 29) & 1) ?
"ck_48m" : "armper_ck"));
- if (diff & (1 << 23)) /* CONF_MOD_MMC_SD_CLK_REQ_R */
+ if (diff & (1 << 23)) /* CONF_MOD_MMC_SD_CLK_REQ_R */
omap_clk_reparent(omap_findclk(s, "mmc_ck"),
omap_findclk(s, ((value >> 23) & 1) ?
"ck_48m" : "armper_ck"));
- if (diff & (1 << 12)) /* CONF_MOD_COM_MCLK_12_48_S */
+ if (diff & (1 << 12)) /* CONF_MOD_COM_MCLK_12_48_S */
omap_clk_reparent(omap_findclk(s, "com_mclk_out"),
omap_findclk(s, ((value >> 12) & 1) ?
"ck_48m" : "armper_ck"));
- if (diff & (1 << 9)) /* CONF_MOD_USB_HOST_HHC_UHO */
+ if (diff & (1 << 9)) /* CONF_MOD_USB_HOST_HHC_UHO */
omap_clk_onoff(omap_findclk(s, "usb_hhc_ck"), (value >> 9) & 1);
}
@@ -886,63 +888,63 @@ static void omap_pin_cfg_write(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* FUNC_MUX_CTRL_0 */
+ case 0x00: /* FUNC_MUX_CTRL_0 */
diff = s->func_mux_ctrl[addr >> 2] ^ value;
s->func_mux_ctrl[addr >> 2] = value;
omap_pin_funcmux0_update(s, diff, value);
return;
- case 0x04: /* FUNC_MUX_CTRL_1 */
+ case 0x04: /* FUNC_MUX_CTRL_1 */
diff = s->func_mux_ctrl[addr >> 2] ^ value;
s->func_mux_ctrl[addr >> 2] = value;
omap_pin_funcmux1_update(s, diff, value);
return;
- case 0x08: /* FUNC_MUX_CTRL_2 */
+ case 0x08: /* FUNC_MUX_CTRL_2 */
s->func_mux_ctrl[addr >> 2] = value;
return;
- case 0x0c: /* COMP_MODE_CTRL_0 */
+ case 0x0c: /* COMP_MODE_CTRL_0 */
s->comp_mode_ctrl[0] = value;
s->compat1509 = (value != 0x0000eaef);
omap_pin_funcmux0_update(s, ~0, s->func_mux_ctrl[0]);
omap_pin_funcmux1_update(s, ~0, s->func_mux_ctrl[1]);
return;
- case 0x10: /* FUNC_MUX_CTRL_3 */
- case 0x14: /* FUNC_MUX_CTRL_4 */
- case 0x18: /* FUNC_MUX_CTRL_5 */
- case 0x1c: /* FUNC_MUX_CTRL_6 */
- case 0x20: /* FUNC_MUX_CTRL_7 */
- case 0x24: /* FUNC_MUX_CTRL_8 */
- case 0x28: /* FUNC_MUX_CTRL_9 */
- case 0x2c: /* FUNC_MUX_CTRL_A */
- case 0x30: /* FUNC_MUX_CTRL_B */
- case 0x34: /* FUNC_MUX_CTRL_C */
- case 0x38: /* FUNC_MUX_CTRL_D */
+ case 0x10: /* FUNC_MUX_CTRL_3 */
+ case 0x14: /* FUNC_MUX_CTRL_4 */
+ case 0x18: /* FUNC_MUX_CTRL_5 */
+ case 0x1c: /* FUNC_MUX_CTRL_6 */
+ case 0x20: /* FUNC_MUX_CTRL_7 */
+ case 0x24: /* FUNC_MUX_CTRL_8 */
+ case 0x28: /* FUNC_MUX_CTRL_9 */
+ case 0x2c: /* FUNC_MUX_CTRL_A */
+ case 0x30: /* FUNC_MUX_CTRL_B */
+ case 0x34: /* FUNC_MUX_CTRL_C */
+ case 0x38: /* FUNC_MUX_CTRL_D */
s->func_mux_ctrl[(addr >> 2) - 1] = value;
return;
- case 0x40: /* PULL_DWN_CTRL_0 */
- case 0x44: /* PULL_DWN_CTRL_1 */
- case 0x48: /* PULL_DWN_CTRL_2 */
- case 0x4c: /* PULL_DWN_CTRL_3 */
+ case 0x40: /* PULL_DWN_CTRL_0 */
+ case 0x44: /* PULL_DWN_CTRL_1 */
+ case 0x48: /* PULL_DWN_CTRL_2 */
+ case 0x4c: /* PULL_DWN_CTRL_3 */
s->pull_dwn_ctrl[(addr & 0xf) >> 2] = value;
return;
- case 0x50: /* GATE_INH_CTRL_0 */
+ case 0x50: /* GATE_INH_CTRL_0 */
s->gate_inh_ctrl[0] = value;
return;
- case 0x60: /* VOLTAGE_CTRL_0 */
+ case 0x60: /* VOLTAGE_CTRL_0 */
s->voltage_ctrl[0] = value;
return;
- case 0x70: /* TEST_DBG_CTRL_0 */
+ case 0x70: /* TEST_DBG_CTRL_0 */
s->test_dbg_ctrl[0] = value;
return;
- case 0x80: /* MOD_CONF_CTRL_0 */
+ case 0x80: /* MOD_CONF_CTRL_0 */
diff = s->mod_conf_ctrl[0] ^ value;
s->mod_conf_ctrl[0] = value;
omap_pin_modconf1_update(s, diff, value);
@@ -996,17 +998,17 @@ static uint64_t omap_id_read(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0xfffe1800: /* DIE_ID_LSB */
+ case 0xfffe1800: /* DIE_ID_LSB */
return 0xc9581f0e;
- case 0xfffe1804: /* DIE_ID_MSB */
+ case 0xfffe1804: /* DIE_ID_MSB */
return 0xa8858bfa;
- case 0xfffe2000: /* PRODUCT_ID_LSB */
+ case 0xfffe2000: /* PRODUCT_ID_LSB */
return 0x00aaaafc;
- case 0xfffe2004: /* PRODUCT_ID_MSB */
+ case 0xfffe2004: /* PRODUCT_ID_MSB */
return 0xcafeb574;
- case 0xfffed400: /* JTAG_ID_LSB */
+ case 0xfffed400: /* JTAG_ID_LSB */
switch (s->mpu_model) {
case omap310:
return 0x03310315;
@@ -1017,7 +1019,7 @@ static uint64_t omap_id_read(void *opaque, hwaddr addr,
}
break;
- case 0xfffed404: /* JTAG_ID_MSB */
+ case 0xfffed404: /* JTAG_ID_MSB */
switch (s->mpu_model) {
case omap310:
return 0xfb57402f;
@@ -1078,22 +1080,22 @@ static uint64_t omap_mpui_read(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* CTRL */
+ case 0x00: /* CTRL */
return s->mpui_ctrl;
- case 0x04: /* DEBUG_ADDR */
+ case 0x04: /* DEBUG_ADDR */
return 0x01ffffff;
- case 0x08: /* DEBUG_DATA */
+ case 0x08: /* DEBUG_DATA */
return 0xffffffff;
- case 0x0c: /* DEBUG_FLAG */
+ case 0x0c: /* DEBUG_FLAG */
return 0x00000800;
- case 0x10: /* STATUS */
+ case 0x10: /* STATUS */
return 0x00000000;
/* Not in OMAP310 */
- case 0x14: /* DSP_STATUS */
- case 0x18: /* DSP_BOOT_CONFIG */
+ case 0x14: /* DSP_STATUS */
+ case 0x18: /* DSP_BOOT_CONFIG */
return 0x00000000;
- case 0x1c: /* DSP_MPUI_CONFIG */
+ case 0x1c: /* DSP_MPUI_CONFIG */
return 0x0000ffff;
}
@@ -1112,20 +1114,20 @@ static void omap_mpui_write(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* CTRL */
+ case 0x00: /* CTRL */
s->mpui_ctrl = value & 0x007fffff;
break;
- case 0x04: /* DEBUG_ADDR */
- case 0x08: /* DEBUG_DATA */
- case 0x0c: /* DEBUG_FLAG */
- case 0x10: /* STATUS */
+ case 0x04: /* DEBUG_ADDR */
+ case 0x08: /* DEBUG_DATA */
+ case 0x0c: /* DEBUG_FLAG */
+ case 0x10: /* STATUS */
/* Not in OMAP310 */
- case 0x14: /* DSP_STATUS */
+ case 0x14: /* DSP_STATUS */
OMAP_RO_REG(addr);
break;
- case 0x18: /* DSP_BOOT_CONFIG */
- case 0x1c: /* DSP_MPUI_CONFIG */
+ case 0x18: /* DSP_BOOT_CONFIG */
+ case 0x1c: /* DSP_MPUI_CONFIG */
break;
default:
@@ -1176,19 +1178,19 @@ static uint64_t omap_tipb_bridge_read(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* TIPB_CNTL */
+ case 0x00: /* TIPB_CNTL */
return s->control;
- case 0x04: /* TIPB_BUS_ALLOC */
+ case 0x04: /* TIPB_BUS_ALLOC */
return s->alloc;
- case 0x08: /* MPU_TIPB_CNTL */
+ case 0x08: /* MPU_TIPB_CNTL */
return s->buffer;
- case 0x0c: /* ENHANCED_TIPB_CNTL */
+ case 0x0c: /* ENHANCED_TIPB_CNTL */
return s->enh_control;
- case 0x10: /* ADDRESS_DBG */
- case 0x14: /* DATA_DEBUG_LOW */
- case 0x18: /* DATA_DEBUG_HIGH */
+ case 0x10: /* ADDRESS_DBG */
+ case 0x14: /* DATA_DEBUG_LOW */
+ case 0x18: /* DATA_DEBUG_HIGH */
return 0xffff;
- case 0x1c: /* DEBUG_CNTR_SIG */
+ case 0x1c: /* DEBUG_CNTR_SIG */
return 0x00f8;
}
@@ -1207,27 +1209,27 @@ static void omap_tipb_bridge_write(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* TIPB_CNTL */
+ case 0x00: /* TIPB_CNTL */
s->control = value & 0xffff;
break;
- case 0x04: /* TIPB_BUS_ALLOC */
+ case 0x04: /* TIPB_BUS_ALLOC */
s->alloc = value & 0x003f;
break;
- case 0x08: /* MPU_TIPB_CNTL */
+ case 0x08: /* MPU_TIPB_CNTL */
s->buffer = value & 0x0003;
break;
- case 0x0c: /* ENHANCED_TIPB_CNTL */
+ case 0x0c: /* ENHANCED_TIPB_CNTL */
s->width_intr = !(value & 2);
s->enh_control = value & 0x000f;
break;
- case 0x10: /* ADDRESS_DBG */
- case 0x14: /* DATA_DEBUG_LOW */
- case 0x18: /* DATA_DEBUG_HIGH */
- case 0x1c: /* DEBUG_CNTR_SIG */
+ case 0x10: /* ADDRESS_DBG */
+ case 0x14: /* DATA_DEBUG_LOW */
+ case 0x18: /* DATA_DEBUG_HIGH */
+ case 0x1c: /* DEBUG_CNTR_SIG */
OMAP_RO_REG(addr);
break;
@@ -1278,23 +1280,23 @@ static uint64_t omap_tcmi_read(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* IMIF_PRIO */
- case 0x04: /* EMIFS_PRIO */
- case 0x08: /* EMIFF_PRIO */
- case 0x0c: /* EMIFS_CONFIG */
- case 0x10: /* EMIFS_CS0_CONFIG */
- case 0x14: /* EMIFS_CS1_CONFIG */
- case 0x18: /* EMIFS_CS2_CONFIG */
- case 0x1c: /* EMIFS_CS3_CONFIG */
- case 0x24: /* EMIFF_MRS */
- case 0x28: /* TIMEOUT1 */
- case 0x2c: /* TIMEOUT2 */
- case 0x30: /* TIMEOUT3 */
- case 0x3c: /* EMIFF_SDRAM_CONFIG_2 */
- case 0x40: /* EMIFS_CFG_DYN_WAIT */
+ case 0x00: /* IMIF_PRIO */
+ case 0x04: /* EMIFS_PRIO */
+ case 0x08: /* EMIFF_PRIO */
+ case 0x0c: /* EMIFS_CONFIG */
+ case 0x10: /* EMIFS_CS0_CONFIG */
+ case 0x14: /* EMIFS_CS1_CONFIG */
+ case 0x18: /* EMIFS_CS2_CONFIG */
+ case 0x1c: /* EMIFS_CS3_CONFIG */
+ case 0x24: /* EMIFF_MRS */
+ case 0x28: /* TIMEOUT1 */
+ case 0x2c: /* TIMEOUT2 */
+ case 0x30: /* TIMEOUT3 */
+ case 0x3c: /* EMIFF_SDRAM_CONFIG_2 */
+ case 0x40: /* EMIFS_CFG_DYN_WAIT */
return s->tcmi_regs[addr >> 2];
- case 0x20: /* EMIFF_SDRAM_CONFIG */
+ case 0x20: /* EMIFF_SDRAM_CONFIG */
ret = s->tcmi_regs[addr >> 2];
s->tcmi_regs[addr >> 2] &= ~1; /* XXX: Clear SLRF on SDRAM access */
/* XXX: We can try using the VGA_DIRTY flag for this */
@@ -1316,23 +1318,23 @@ static void omap_tcmi_write(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* IMIF_PRIO */
- case 0x04: /* EMIFS_PRIO */
- case 0x08: /* EMIFF_PRIO */
- case 0x10: /* EMIFS_CS0_CONFIG */
- case 0x14: /* EMIFS_CS1_CONFIG */
- case 0x18: /* EMIFS_CS2_CONFIG */
- case 0x1c: /* EMIFS_CS3_CONFIG */
- case 0x20: /* EMIFF_SDRAM_CONFIG */
- case 0x24: /* EMIFF_MRS */
- case 0x28: /* TIMEOUT1 */
- case 0x2c: /* TIMEOUT2 */
- case 0x30: /* TIMEOUT3 */
- case 0x3c: /* EMIFF_SDRAM_CONFIG_2 */
- case 0x40: /* EMIFS_CFG_DYN_WAIT */
+ case 0x00: /* IMIF_PRIO */
+ case 0x04: /* EMIFS_PRIO */
+ case 0x08: /* EMIFF_PRIO */
+ case 0x10: /* EMIFS_CS0_CONFIG */
+ case 0x14: /* EMIFS_CS1_CONFIG */
+ case 0x18: /* EMIFS_CS2_CONFIG */
+ case 0x1c: /* EMIFS_CS3_CONFIG */
+ case 0x20: /* EMIFF_SDRAM_CONFIG */
+ case 0x24: /* EMIFF_MRS */
+ case 0x28: /* TIMEOUT1 */
+ case 0x2c: /* TIMEOUT2 */
+ case 0x30: /* TIMEOUT3 */
+ case 0x3c: /* EMIFF_SDRAM_CONFIG_2 */
+ case 0x40: /* EMIFS_CFG_DYN_WAIT */
s->tcmi_regs[addr >> 2] = value;
break;
- case 0x0c: /* EMIFS_CONFIG */
+ case 0x0c: /* EMIFS_CONFIG */
s->tcmi_regs[addr >> 2] = (value & 0xf) | (1 << 4);
break;
@@ -1391,7 +1393,7 @@ static uint64_t omap_dpll_read(void *opaque, hwaddr addr,
return omap_badwidth_read16(opaque, addr);
}
- if (addr == 0x00) /* CTL_REG */
+ if (addr == 0x00) /* CTL_REG */
return s->mode;
OMAP_BAD_REG(addr);
@@ -1411,16 +1413,16 @@ static void omap_dpll_write(void *opaque, hwaddr addr,
return;
}
- if (addr == 0x00) { /* CTL_REG */
+ if (addr == 0x00) { /* CTL_REG */
/* See omap_ulpd_pm_write() too */
diff = s->mode & value;
s->mode = value & 0x2fff;
if (diff & (0x3ff << 2)) {
- if (value & (1 << 4)) { /* PLL_ENABLE */
- div = ((value >> 5) & 3) + 1; /* PLL_DIV */
- mult = MIN((value >> 7) & 0x1f, 1); /* PLL_MULT */
+ if (value & (1 << 4)) { /* PLL_ENABLE */
+ div = ((value >> 5) & 3) + 1; /* PLL_DIV */
+ mult = MIN((value >> 7) & 0x1f, 1); /* PLL_MULT */
} else {
- div = bypass_div[((value >> 2) & 3)]; /* BYPASS_DIV */
+ div = bypass_div[((value >> 2) & 3)]; /* BYPASS_DIV */
mult = 1;
}
omap_clk_setrate(s->dpll, div, mult);
@@ -1472,31 +1474,31 @@ static uint64_t omap_clkm_read(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* ARM_CKCTL */
+ case 0x00: /* ARM_CKCTL */
return s->clkm.arm_ckctl;
- case 0x04: /* ARM_IDLECT1 */
+ case 0x04: /* ARM_IDLECT1 */
return s->clkm.arm_idlect1;
- case 0x08: /* ARM_IDLECT2 */
+ case 0x08: /* ARM_IDLECT2 */
return s->clkm.arm_idlect2;
- case 0x0c: /* ARM_EWUPCT */
+ case 0x0c: /* ARM_EWUPCT */
return s->clkm.arm_ewupct;
- case 0x10: /* ARM_RSTCT1 */
+ case 0x10: /* ARM_RSTCT1 */
return s->clkm.arm_rstct1;
- case 0x14: /* ARM_RSTCT2 */
+ case 0x14: /* ARM_RSTCT2 */
return s->clkm.arm_rstct2;
- case 0x18: /* ARM_SYSST */
+ case 0x18: /* ARM_SYSST */
return (s->clkm.clocking_scheme << 11) | s->clkm.cold_start;
- case 0x1c: /* ARM_CKOUT1 */
+ case 0x1c: /* ARM_CKOUT1 */
return s->clkm.arm_ckout1;
- case 0x20: /* ARM_CKOUT2 */
+ case 0x20: /* ARM_CKOUT2 */
break;
}
@@ -1509,7 +1511,7 @@ static inline void omap_clkm_ckctl_update(struct omap_mpu_state_s *s,
{
omap_clk clk;
- if (diff & (1 << 14)) { /* ARM_INTHCK_SEL */
+ if (diff & (1 << 14)) { /* ARM_INTHCK_SEL */
if (value & (1 << 14))
/* Reserved */;
else {
@@ -1517,7 +1519,7 @@ static inline void omap_clkm_ckctl_update(struct omap_mpu_state_s *s,
omap_clk_reparent(clk, omap_findclk(s, "tc_ck"));
}
}
- if (diff & (1 << 12)) { /* ARM_TIMXO */
+ if (diff & (1 << 12)) { /* ARM_TIMXO */
clk = omap_findclk(s, "armtim_ck");
if (value & (1 << 12))
omap_clk_reparent(clk, omap_findclk(s, "clkin"));
@@ -1525,27 +1527,27 @@ static inline void omap_clkm_ckctl_update(struct omap_mpu_state_s *s,
omap_clk_reparent(clk, omap_findclk(s, "ck_gen1"));
}
/* XXX: en_dspck */
- if (diff & (3 << 10)) { /* DSPMMUDIV */
+ if (diff & (3 << 10)) { /* DSPMMUDIV */
clk = omap_findclk(s, "dspmmu_ck");
omap_clk_setrate(clk, 1 << ((value >> 10) & 3), 1);
}
- if (diff & (3 << 8)) { /* TCDIV */
+ if (diff & (3 << 8)) { /* TCDIV */
clk = omap_findclk(s, "tc_ck");
omap_clk_setrate(clk, 1 << ((value >> 8) & 3), 1);
}
- if (diff & (3 << 6)) { /* DSPDIV */
+ if (diff & (3 << 6)) { /* DSPDIV */
clk = omap_findclk(s, "dsp_ck");
omap_clk_setrate(clk, 1 << ((value >> 6) & 3), 1);
}
- if (diff & (3 << 4)) { /* ARMDIV */
+ if (diff & (3 << 4)) { /* ARMDIV */
clk = omap_findclk(s, "arm_ck");
omap_clk_setrate(clk, 1 << ((value >> 4) & 3), 1);
}
- if (diff & (3 << 2)) { /* LCDDIV */
+ if (diff & (3 << 2)) { /* LCDDIV */
clk = omap_findclk(s, "lcd_ck");
omap_clk_setrate(clk, 1 << ((value >> 2) & 3), 1);
}
- if (diff & (3 << 0)) { /* PERDIV */
+ if (diff & (3 << 0)) { /* PERDIV */
clk = omap_findclk(s, "armper_ck");
omap_clk_setrate(clk, 1 << ((value >> 0) & 3), 1);
}
@@ -1564,25 +1566,25 @@ static inline void omap_clkm_idlect1_update(struct omap_mpu_state_s *s,
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
}
-#define SET_CANIDLE(clock, bit) \
- if (diff & (1 << bit)) { \
- clk = omap_findclk(s, clock); \
- omap_clk_canidle(clk, (value >> bit) & 1); \
+#define SET_CANIDLE(clock, bit) \
+ if (diff & (1 << bit)) { \
+ clk = omap_findclk(s, clock); \
+ omap_clk_canidle(clk, (value >> bit) & 1); \
}
- SET_CANIDLE("mpuwd_ck", 0) /* IDLWDT_ARM */
- SET_CANIDLE("armxor_ck", 1) /* IDLXORP_ARM */
- SET_CANIDLE("mpuper_ck", 2) /* IDLPER_ARM */
- SET_CANIDLE("lcd_ck", 3) /* IDLLCD_ARM */
- SET_CANIDLE("lb_ck", 4) /* IDLLB_ARM */
- SET_CANIDLE("hsab_ck", 5) /* IDLHSAB_ARM */
- SET_CANIDLE("tipb_ck", 6) /* IDLIF_ARM */
- SET_CANIDLE("dma_ck", 6) /* IDLIF_ARM */
- SET_CANIDLE("tc_ck", 6) /* IDLIF_ARM */
- SET_CANIDLE("dpll1", 7) /* IDLDPLL_ARM */
- SET_CANIDLE("dpll2", 7) /* IDLDPLL_ARM */
- SET_CANIDLE("dpll3", 7) /* IDLDPLL_ARM */
- SET_CANIDLE("mpui_ck", 8) /* IDLAPI_ARM */
- SET_CANIDLE("armtim_ck", 9) /* IDLTIM_ARM */
+ SET_CANIDLE("mpuwd_ck", 0) /* IDLWDT_ARM */
+ SET_CANIDLE("armxor_ck", 1) /* IDLXORP_ARM */
+ SET_CANIDLE("mpuper_ck", 2) /* IDLPER_ARM */
+ SET_CANIDLE("lcd_ck", 3) /* IDLLCD_ARM */
+ SET_CANIDLE("lb_ck", 4) /* IDLLB_ARM */
+ SET_CANIDLE("hsab_ck", 5) /* IDLHSAB_ARM */
+ SET_CANIDLE("tipb_ck", 6) /* IDLIF_ARM */
+ SET_CANIDLE("dma_ck", 6) /* IDLIF_ARM */
+ SET_CANIDLE("tc_ck", 6) /* IDLIF_ARM */
+ SET_CANIDLE("dpll1", 7) /* IDLDPLL_ARM */
+ SET_CANIDLE("dpll2", 7) /* IDLDPLL_ARM */
+ SET_CANIDLE("dpll3", 7) /* IDLDPLL_ARM */
+ SET_CANIDLE("mpui_ck", 8) /* IDLAPI_ARM */
+ SET_CANIDLE("armtim_ck", 9) /* IDLTIM_ARM */
}
static inline void omap_clkm_idlect2_update(struct omap_mpu_state_s *s,
@@ -1590,22 +1592,22 @@ static inline void omap_clkm_idlect2_update(struct omap_mpu_state_s *s,
{
omap_clk clk;
-#define SET_ONOFF(clock, bit) \
- if (diff & (1 << bit)) { \
- clk = omap_findclk(s, clock); \
- omap_clk_onoff(clk, (value >> bit) & 1); \
+#define SET_ONOFF(clock, bit) \
+ if (diff & (1 << bit)) { \
+ clk = omap_findclk(s, clock); \
+ omap_clk_onoff(clk, (value >> bit) & 1); \
}
- SET_ONOFF("mpuwd_ck", 0) /* EN_WDTCK */
- SET_ONOFF("armxor_ck", 1) /* EN_XORPCK */
- SET_ONOFF("mpuper_ck", 2) /* EN_PERCK */
- SET_ONOFF("lcd_ck", 3) /* EN_LCDCK */
- SET_ONOFF("lb_ck", 4) /* EN_LBCK */
- SET_ONOFF("hsab_ck", 5) /* EN_HSABCK */
- SET_ONOFF("mpui_ck", 6) /* EN_APICK */
- SET_ONOFF("armtim_ck", 7) /* EN_TIMCK */
- SET_CANIDLE("dma_ck", 8) /* DMACK_REQ */
- SET_ONOFF("arm_gpio_ck", 9) /* EN_GPIOCK */
- SET_ONOFF("lbfree_ck", 10) /* EN_LBFREECK */
+ SET_ONOFF("mpuwd_ck", 0) /* EN_WDTCK */
+ SET_ONOFF("armxor_ck", 1) /* EN_XORPCK */
+ SET_ONOFF("mpuper_ck", 2) /* EN_PERCK */
+ SET_ONOFF("lcd_ck", 3) /* EN_LCDCK */
+ SET_ONOFF("lb_ck", 4) /* EN_LBCK */
+ SET_ONOFF("hsab_ck", 5) /* EN_HSABCK */
+ SET_ONOFF("mpui_ck", 6) /* EN_APICK */
+ SET_ONOFF("armtim_ck", 7) /* EN_TIMCK */
+ SET_CANIDLE("dma_ck", 8) /* DMACK_REQ */
+ SET_ONOFF("arm_gpio_ck", 9) /* EN_GPIOCK */
+ SET_ONOFF("lbfree_ck", 10) /* EN_LBFREECK */
}
static inline void omap_clkm_ckout1_update(struct omap_mpu_state_s *s,
@@ -1613,7 +1615,7 @@ static inline void omap_clkm_ckout1_update(struct omap_mpu_state_s *s,
{
omap_clk clk;
- if (diff & (3 << 4)) { /* TCLKOUT */
+ if (diff & (3 << 4)) { /* TCLKOUT */
clk = omap_findclk(s, "tclk_out");
switch ((value >> 4) & 3) {
case 1:
@@ -1628,7 +1630,7 @@ static inline void omap_clkm_ckout1_update(struct omap_mpu_state_s *s,
omap_clk_onoff(clk, 0);
}
}
- if (diff & (3 << 2)) { /* DCLKOUT */
+ if (diff & (3 << 2)) { /* DCLKOUT */
clk = omap_findclk(s, "dclk_out");
switch ((value >> 2) & 3) {
case 0:
@@ -1645,7 +1647,7 @@ static inline void omap_clkm_ckout1_update(struct omap_mpu_state_s *s,
break;
}
}
- if (diff & (3 << 0)) { /* ACLKOUT */
+ if (diff & (3 << 0)) { /* ACLKOUT */
clk = omap_findclk(s, "aclk_out");
switch ((value >> 0) & 3) {
case 1:
@@ -1683,66 +1685,66 @@ static void omap_clkm_write(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x00: /* ARM_CKCTL */
+ case 0x00: /* ARM_CKCTL */
diff = s->clkm.arm_ckctl ^ value;
s->clkm.arm_ckctl = value & 0x7fff;
omap_clkm_ckctl_update(s, diff, value);
return;
- case 0x04: /* ARM_IDLECT1 */
+ case 0x04: /* ARM_IDLECT1 */
diff = s->clkm.arm_idlect1 ^ value;
s->clkm.arm_idlect1 = value & 0x0fff;
omap_clkm_idlect1_update(s, diff, value);
return;
- case 0x08: /* ARM_IDLECT2 */
+ case 0x08: /* ARM_IDLECT2 */
diff = s->clkm.arm_idlect2 ^ value;
s->clkm.arm_idlect2 = value & 0x07ff;
omap_clkm_idlect2_update(s, diff, value);
return;
- case 0x0c: /* ARM_EWUPCT */
+ case 0x0c: /* ARM_EWUPCT */
s->clkm.arm_ewupct = value & 0x003f;
return;
- case 0x10: /* ARM_RSTCT1 */
+ case 0x10: /* ARM_RSTCT1 */
diff = s->clkm.arm_rstct1 ^ value;
s->clkm.arm_rstct1 = value & 0x0007;
if (value & 9) {
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
s->clkm.cold_start = 0xa;
}
- if (diff & ~value & 4) { /* DSP_RST */
+ if (diff & ~value & 4) { /* DSP_RST */
omap_mpui_reset(s);
omap_tipb_bridge_reset(s->private_tipb);
omap_tipb_bridge_reset(s->public_tipb);
}
- if (diff & 2) { /* DSP_EN */
+ if (diff & 2) { /* DSP_EN */
clk = omap_findclk(s, "dsp_ck");
omap_clk_canidle(clk, (~value >> 1) & 1);
}
return;
- case 0x14: /* ARM_RSTCT2 */
+ case 0x14: /* ARM_RSTCT2 */
s->clkm.arm_rstct2 = value & 0x0001;
return;
- case 0x18: /* ARM_SYSST */
+ case 0x18: /* ARM_SYSST */
if ((s->clkm.clocking_scheme ^ (value >> 11)) & 7) {
s->clkm.clocking_scheme = (value >> 11) & 7;
- printf("%s: clocking scheme set to %s\n", __func__,
+ trace_omap1_pwl_clocking_scheme(
clkschemename[s->clkm.clocking_scheme]);
}
s->clkm.cold_start &= value & 0x3f;
return;
- case 0x1c: /* ARM_CKOUT1 */
+ case 0x1c: /* ARM_CKOUT1 */
diff = s->clkm.arm_ckout1 ^ value;
s->clkm.arm_ckout1 = value & 0x003f;
omap_clkm_ckout1_update(s, diff, value);
return;
- case 0x20: /* ARM_CKOUT2 */
+ case 0x20: /* ARM_CKOUT2 */
default:
OMAP_BAD_REG(addr);
}
@@ -1765,16 +1767,16 @@ static uint64_t omap_clkdsp_read(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x04: /* DSP_IDLECT1 */
+ case 0x04: /* DSP_IDLECT1 */
return s->clkm.dsp_idlect1;
- case 0x08: /* DSP_IDLECT2 */
+ case 0x08: /* DSP_IDLECT2 */
return s->clkm.dsp_idlect2;
- case 0x14: /* DSP_RSTCT2 */
+ case 0x14: /* DSP_RSTCT2 */
return s->clkm.dsp_rstct2;
- case 0x18: /* DSP_SYSST */
+ case 0x18: /* DSP_SYSST */
return (s->clkm.clocking_scheme << 11) | s->clkm.cold_start |
(cpu->halted << 6); /* Quite useless... */
}
@@ -1788,7 +1790,7 @@ static inline void omap_clkdsp_idlect1_update(struct omap_mpu_state_s *s,
{
omap_clk clk;
- SET_CANIDLE("dspxor_ck", 1); /* IDLXORP_DSP */
+ SET_CANIDLE("dspxor_ck", 1); /* IDLXORP_DSP */
}
static inline void omap_clkdsp_idlect2_update(struct omap_mpu_state_s *s,
@@ -1796,7 +1798,7 @@ static inline void omap_clkdsp_idlect2_update(struct omap_mpu_state_s *s,
{
omap_clk clk;
- SET_ONOFF("dspxor_ck", 1); /* EN_XORPCK */
+ SET_ONOFF("dspxor_ck", 1); /* EN_XORPCK */
}
static void omap_clkdsp_write(void *opaque, hwaddr addr,
@@ -1811,23 +1813,23 @@ static void omap_clkdsp_write(void *opaque, hwaddr addr,
}
switch (addr) {
- case 0x04: /* DSP_IDLECT1 */
+ case 0x04: /* DSP_IDLECT1 */
diff = s->clkm.dsp_idlect1 ^ value;
s->clkm.dsp_idlect1 = value & 0x01f7;
omap_clkdsp_idlect1_update(s, diff, value);
break;
- case 0x08: /* DSP_IDLECT2 */
+ case 0x08: /* DSP_IDLECT2 */
s->clkm.dsp_idlect2 = value & 0x0037;
diff = s->clkm.dsp_idlect1 ^ value;
omap_clkdsp_idlect2_update(s, diff, value);
break;
- case 0x14: /* DSP_RSTCT2 */
+ case 0x14: /* DSP_RSTCT2 */
s->clkm.dsp_rstct2 = value & 0x0001;
break;
- case 0x18: /* DSP_SYSST */
+ case 0x18: /* DSP_SYSST */
s->clkm.cold_start &= value & 0x3f;
break;
@@ -1926,8 +1928,8 @@ static void omap_mpuio_set(void *opaque, int line, int level)
qemu_irq_raise(s->irq);
/* TODO: wakeup */
}
- if ((s->event & (1 << 0)) && /* SET_GPIO_EVENT_MODE */
- (s->event >> 1) == line) /* PIN_SELECT */
+ if ((s->event & (1 << 0)) && /* SET_GPIO_EVENT_MODE */
+ (s->event >> 1) == line) /* PIN_SELECT */
s->latch = s->inputs;
}
}
@@ -1957,47 +1959,47 @@ static uint64_t omap_mpuio_read(void *opaque, hwaddr addr,
}
switch (offset) {
- case 0x00: /* INPUT_LATCH */
+ case 0x00: /* INPUT_LATCH */
return s->inputs;
- case 0x04: /* OUTPUT_REG */
+ case 0x04: /* OUTPUT_REG */
return s->outputs;
- case 0x08: /* IO_CNTL */
+ case 0x08: /* IO_CNTL */
return s->dir;
- case 0x10: /* KBR_LATCH */
+ case 0x10: /* KBR_LATCH */
return s->row_latch;
- case 0x14: /* KBC_REG */
+ case 0x14: /* KBC_REG */
return s->cols;
- case 0x18: /* GPIO_EVENT_MODE_REG */
+ case 0x18: /* GPIO_EVENT_MODE_REG */
return s->event;
- case 0x1c: /* GPIO_INT_EDGE_REG */
+ case 0x1c: /* GPIO_INT_EDGE_REG */
return s->edge;
- case 0x20: /* KBD_INT */
+ case 0x20: /* KBD_INT */
return (~s->row_latch & 0x1f) && !s->kbd_mask;
- case 0x24: /* GPIO_INT */
+ case 0x24: /* GPIO_INT */
ret = s->ints;
s->ints &= s->mask;
if (ret)
qemu_irq_lower(s->irq);
return ret;
- case 0x28: /* KBD_MASKIT */
+ case 0x28: /* KBD_MASKIT */
return s->kbd_mask;
- case 0x2c: /* GPIO_MASKIT */
+ case 0x2c: /* GPIO_MASKIT */
return s->mask;
- case 0x30: /* GPIO_DEBOUNCING_REG */
+ case 0x30: /* GPIO_DEBOUNCING_REG */
return s->debounce;
- case 0x34: /* GPIO_LATCH_REG */
+ case 0x34: /* GPIO_LATCH_REG */
return s->latch;
}
@@ -2019,7 +2021,7 @@ static void omap_mpuio_write(void *opaque, hwaddr addr,
}
switch (offset) {
- case 0x04: /* OUTPUT_REG */
+ case 0x04: /* OUTPUT_REG */
diff = (s->outputs ^ value) & ~s->dir;
s->outputs = value;
while ((ln = ctz32(diff)) != 32) {
@@ -2029,7 +2031,7 @@ static void omap_mpuio_write(void *opaque, hwaddr addr,
}
break;
- case 0x08: /* IO_CNTL */
+ case 0x08: /* IO_CNTL */
diff = s->outputs & (s->dir ^ value);
s->dir = value;
@@ -2041,37 +2043,37 @@ static void omap_mpuio_write(void *opaque, hwaddr addr,
}
break;
- case 0x14: /* KBC_REG */
+ case 0x14: /* KBC_REG */
s->cols = value;
omap_mpuio_kbd_update(s);
break;
- case 0x18: /* GPIO_EVENT_MODE_REG */
+ case 0x18: /* GPIO_EVENT_MODE_REG */
s->event = value & 0x1f;
break;
- case 0x1c: /* GPIO_INT_EDGE_REG */
+ case 0x1c: /* GPIO_INT_EDGE_REG */
s->edge = value;
break;
- case 0x28: /* KBD_MASKIT */
+ case 0x28: /* KBD_MASKIT */
s->kbd_mask = value & 1;
omap_mpuio_kbd_update(s);
break;
- case 0x2c: /* GPIO_MASKIT */
+ case 0x2c: /* GPIO_MASKIT */
s->mask = value;
break;
- case 0x30: /* GPIO_DEBOUNCING_REG */
+ case 0x30: /* GPIO_DEBOUNCING_REG */
s->debounce = value & 0x1ff;
break;
- case 0x00: /* INPUT_LATCH */
- case 0x10: /* KBR_LATCH */
- case 0x20: /* KBD_INT */
- case 0x24: /* GPIO_INT */
- case 0x34: /* GPIO_LATCH_REG */
+ case 0x00: /* INPUT_LATCH */
+ case 0x10: /* KBR_LATCH */
+ case 0x20: /* KBD_INT */
+ case 0x24: /* GPIO_INT */
+ case 0x34: /* GPIO_LATCH_REG */
OMAP_RO_REG(addr);
return;
@@ -2170,30 +2172,28 @@ struct omap_uwire_s {
uint16_t rxbuf;
uint16_t control;
uint16_t setup[5];
-
- uWireSlave *chip[4];
};
static void omap_uwire_transfer_start(struct omap_uwire_s *s)
{
- int chipselect = (s->control >> 10) & 3; /* INDEX */
- uWireSlave *slave = s->chip[chipselect];
+ int chipselect = (s->control >> 10) & 3; /* INDEX */
- if ((s->control >> 5) & 0x1f) { /* NB_BITS_WR */
- if (s->control & (1 << 12)) /* CS_CMD */
- if (slave && slave->send)
- slave->send(slave->opaque,
- s->txbuf >> (16 - ((s->control >> 5) & 0x1f)));
- s->control &= ~(1 << 14); /* CSRB */
+ if ((s->control >> 5) & 0x1f) { /* NB_BITS_WR */
+ if (s->control & (1 << 12)) { /* CS_CMD */
+ qemu_log_mask(LOG_UNIMP, "uWireSlave TX CS:%d data:0x%04x\n",
+ chipselect,
+ s->txbuf >> (16 - ((s->control >> 5) & 0x1f)));
+ }
+ s->control &= ~(1 << 14); /* CSRB */
/* TODO: depending on s->setup[4] bits [1:0] assert an IRQ or
* a DRQ. When is the level IRQ supposed to be reset? */
}
- if ((s->control >> 0) & 0x1f) { /* NB_BITS_RD */
- if (s->control & (1 << 12)) /* CS_CMD */
- if (slave && slave->receive)
- s->rxbuf = slave->receive(slave->opaque);
- s->control |= 1 << 15; /* RDRB */
+ if ((s->control >> 0) & 0x1f) { /* NB_BITS_RD */
+ if (s->control & (1 << 12)) { /* CS_CMD */
+ qemu_log_mask(LOG_UNIMP, "uWireSlave RX CS:%d\n", chipselect);
+ }
+ s->control |= 1 << 15; /* RDRB */
/* TODO: depending on s->setup[4] bits [1:0] assert an IRQ or
* a DRQ. When is the level IRQ supposed to be reset? */
}
@@ -2209,22 +2209,22 @@ static uint64_t omap_uwire_read(void *opaque, hwaddr addr, unsigned size)
}
switch (offset) {
- case 0x00: /* RDR */
- s->control &= ~(1 << 15); /* RDRB */
+ case 0x00: /* RDR */
+ s->control &= ~(1 << 15); /* RDRB */
return s->rxbuf;
- case 0x04: /* CSR */
+ case 0x04: /* CSR */
return s->control;
- case 0x08: /* SR1 */
+ case 0x08: /* SR1 */
return s->setup[0];
- case 0x0c: /* SR2 */
+ case 0x0c: /* SR2 */
return s->setup[1];
- case 0x10: /* SR3 */
+ case 0x10: /* SR3 */
return s->setup[2];
- case 0x14: /* SR4 */
+ case 0x14: /* SR4 */
return s->setup[3];
- case 0x18: /* SR5 */
+ case 0x18: /* SR5 */
return s->setup[4];
}
@@ -2244,39 +2244,39 @@ static void omap_uwire_write(void *opaque, hwaddr addr,
}
switch (offset) {
- case 0x00: /* TDR */
- s->txbuf = value; /* TD */
- if ((s->setup[4] & (1 << 2)) && /* AUTO_TX_EN */
- ((s->setup[4] & (1 << 3)) || /* CS_TOGGLE_TX_EN */
- (s->control & (1 << 12)))) { /* CS_CMD */
- s->control |= 1 << 14; /* CSRB */
+ case 0x00: /* TDR */
+ s->txbuf = value; /* TD */
+ if ((s->setup[4] & (1 << 2)) && /* AUTO_TX_EN */
+ ((s->setup[4] & (1 << 3)) || /* CS_TOGGLE_TX_EN */
+ (s->control & (1 << 12)))) { /* CS_CMD */
+ s->control |= 1 << 14; /* CSRB */
omap_uwire_transfer_start(s);
}
break;
- case 0x04: /* CSR */
+ case 0x04: /* CSR */
s->control = value & 0x1fff;
- if (value & (1 << 13)) /* START */
+ if (value & (1 << 13)) /* START */
omap_uwire_transfer_start(s);
break;
- case 0x08: /* SR1 */
+ case 0x08: /* SR1 */
s->setup[0] = value & 0x003f;
break;
- case 0x0c: /* SR2 */
+ case 0x0c: /* SR2 */
s->setup[1] = value & 0x0fc0;
break;
- case 0x10: /* SR3 */
+ case 0x10: /* SR3 */
s->setup[2] = value & 0x0003;
break;
- case 0x14: /* SR4 */
+ case 0x14: /* SR4 */
s->setup[3] = value & 0x0001;
break;
- case 0x18: /* SR5 */
+ case 0x18: /* SR5 */
s->setup[4] = value & 0x000f;
break;
@@ -2321,17 +2321,6 @@ static struct omap_uwire_s *omap_uwire_init(MemoryRegion *system_memory,
return s;
}
-void omap_uwire_attach(struct omap_uwire_s *s,
- uWireSlave *slave, int chipselect)
-{
- if (chipselect < 0 || chipselect > 3) {
- error_report("%s: Bad chipselect %i", __func__, chipselect);
- exit(-1);
- }
-
- s->chip[chipselect] = slave;
-}
-
/* Pseudonoise Pulse-Width Light Modulator */
struct omap_pwl_s {
MemoryRegion iomem;
@@ -2347,7 +2336,7 @@ static void omap_pwl_update(struct omap_pwl_s *s)
if (output != s->output) {
s->output = output;
- printf("%s: Backlight now at %i/256\n", __func__, output);
+ trace_omap1_pwl_backlight(output);
}
}
@@ -2361,9 +2350,9 @@ static uint64_t omap_pwl_read(void *opaque, hwaddr addr, unsigned size)
}
switch (offset) {
- case 0x00: /* PWL_LEVEL */
+ case 0x00: /* PWL_LEVEL */
return s->level;
- case 0x04: /* PWL_CTRL */
+ case 0x04: /* PWL_CTRL */
return s->enable;
}
OMAP_BAD_REG(addr);
@@ -2382,11 +2371,11 @@ static void omap_pwl_write(void *opaque, hwaddr addr,
}
switch (offset) {
- case 0x00: /* PWL_LEVEL */
+ case 0x00: /* PWL_LEVEL */
s->level = value;
omap_pwl_update(s);
break;
- case 0x04: /* PWL_CTRL */
+ case 0x04: /* PWL_CTRL */
s->enable = value & 1;
omap_pwl_update(s);
break;
@@ -2454,11 +2443,11 @@ static uint64_t omap_pwt_read(void *opaque, hwaddr addr, unsigned size)
}
switch (offset) {
- case 0x00: /* FRC */
+ case 0x00: /* FRC */
return s->frc;
- case 0x04: /* VCR */
+ case 0x04: /* VCR */
return s->vrc;
- case 0x08: /* GCR */
+ case 0x08: /* GCR */
return s->gcr;
}
OMAP_BAD_REG(addr);
@@ -2477,13 +2466,13 @@ static void omap_pwt_write(void *opaque, hwaddr addr,
}
switch (offset) {
- case 0x00: /* FRC */
+ case 0x00: /* FRC */
s->frc = value & 0x3f;
break;
- case 0x04: /* VRC */
+ case 0x04: /* VRC */
if ((value ^ s->vrc) & 1) {
- if (value & 1)
- printf("%s: %iHz buzz on\n", __func__, (int)
+ if (value & 1) {
+ trace_omap1_pwt_buzz(
/* 1.5 MHz from a 12-MHz or 13-MHz PWT_CLK */
((omap_clk_getrate(s->clk) >> 3) /
/* Pre-multiplexer divider */
@@ -2499,12 +2488,13 @@ static void omap_pwt_write(void *opaque, hwaddr addr,
/* 80/127 divider */
((value & (1 << 5)) ? 80 : 127) /
(107 * 55 * 63 * 127)));
- else
- printf("%s: silence!\n", __func__);
+ } else {
+ trace_omap1_pwt_silence();
+ }
}
s->vrc = value & 0x7f;
break;
- case 0x08: /* GCR */
+ case 0x08: /* GCR */
s->gcr = value & 3;
break;
default:
@@ -2571,8 +2561,9 @@ static void omap_rtc_interrupts_update(struct omap_rtc_s *s)
static void omap_rtc_alarm_update(struct omap_rtc_s *s)
{
s->alarm_ti = mktimegm(&s->alarm_tm);
- if (s->alarm_ti == -1)
- printf("%s: conversion failed\n", __func__);
+ if (s->alarm_ti == -1) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: conversion failed\n", __func__);
+ }
}
static uint64_t omap_rtc_read(void *opaque, hwaddr addr, unsigned size)
@@ -2586,69 +2577,69 @@ static uint64_t omap_rtc_read(void *opaque, hwaddr addr, unsigned size)
}
switch (offset) {
- case 0x00: /* SECONDS_REG */
+ case 0x00: /* SECONDS_REG */
return to_bcd(s->current_tm.tm_sec);
- case 0x04: /* MINUTES_REG */
+ case 0x04: /* MINUTES_REG */
return to_bcd(s->current_tm.tm_min);
- case 0x08: /* HOURS_REG */
+ case 0x08: /* HOURS_REG */
if (s->pm_am)
return ((s->current_tm.tm_hour > 11) << 7) |
to_bcd(((s->current_tm.tm_hour - 1) % 12) + 1);
else
return to_bcd(s->current_tm.tm_hour);
- case 0x0c: /* DAYS_REG */
+ case 0x0c: /* DAYS_REG */
return to_bcd(s->current_tm.tm_mday);
- case 0x10: /* MONTHS_REG */
+ case 0x10: /* MONTHS_REG */
return to_bcd(s->current_tm.tm_mon + 1);
- case 0x14: /* YEARS_REG */
+ case 0x14: /* YEARS_REG */
return to_bcd(s->current_tm.tm_year % 100);
- case 0x18: /* WEEK_REG */
+ case 0x18: /* WEEK_REG */
return s->current_tm.tm_wday;
- case 0x20: /* ALARM_SECONDS_REG */
+ case 0x20: /* ALARM_SECONDS_REG */
return to_bcd(s->alarm_tm.tm_sec);
- case 0x24: /* ALARM_MINUTES_REG */
+ case 0x24: /* ALARM_MINUTES_REG */
return to_bcd(s->alarm_tm.tm_min);
- case 0x28: /* ALARM_HOURS_REG */
+ case 0x28: /* ALARM_HOURS_REG */
if (s->pm_am)
return ((s->alarm_tm.tm_hour > 11) << 7) |
to_bcd(((s->alarm_tm.tm_hour - 1) % 12) + 1);
else
return to_bcd(s->alarm_tm.tm_hour);
- case 0x2c: /* ALARM_DAYS_REG */
+ case 0x2c: /* ALARM_DAYS_REG */
return to_bcd(s->alarm_tm.tm_mday);
- case 0x30: /* ALARM_MONTHS_REG */
+ case 0x30: /* ALARM_MONTHS_REG */
return to_bcd(s->alarm_tm.tm_mon + 1);
- case 0x34: /* ALARM_YEARS_REG */
+ case 0x34: /* ALARM_YEARS_REG */
return to_bcd(s->alarm_tm.tm_year % 100);
- case 0x40: /* RTC_CTRL_REG */
+ case 0x40: /* RTC_CTRL_REG */
return (s->pm_am << 3) | (s->auto_comp << 2) |
(s->round << 1) | s->running;
- case 0x44: /* RTC_STATUS_REG */
+ case 0x44: /* RTC_STATUS_REG */
i = s->status;
s->status &= ~0x3d;
return i;
- case 0x48: /* RTC_INTERRUPTS_REG */
+ case 0x48: /* RTC_INTERRUPTS_REG */
return s->interrupts;
- case 0x4c: /* RTC_COMP_LSB_REG */
+ case 0x4c: /* RTC_COMP_LSB_REG */
return ((uint16_t) s->comp_reg) & 0xff;
- case 0x50: /* RTC_COMP_MSB_REG */
+ case 0x50: /* RTC_COMP_MSB_REG */
return ((uint16_t) s->comp_reg) >> 8;
}
@@ -2670,26 +2661,17 @@ static void omap_rtc_write(void *opaque, hwaddr addr,
}
switch (offset) {
- case 0x00: /* SECONDS_REG */
-#ifdef ALMDEBUG
- printf("RTC SEC_REG <-- %02x\n", value);
-#endif
+ case 0x00: /* SECONDS_REG */
s->ti -= s->current_tm.tm_sec;
s->ti += from_bcd(value);
return;
- case 0x04: /* MINUTES_REG */
-#ifdef ALMDEBUG
- printf("RTC MIN_REG <-- %02x\n", value);
-#endif
+ case 0x04: /* MINUTES_REG */
s->ti -= s->current_tm.tm_min * 60;
s->ti += from_bcd(value) * 60;
return;
- case 0x08: /* HOURS_REG */
-#ifdef ALMDEBUG
- printf("RTC HRS_REG <-- %02x\n", value);
-#endif
+ case 0x08: /* HOURS_REG */
s->ti -= s->current_tm.tm_hour * 3600;
if (s->pm_am) {
s->ti += (from_bcd(value & 0x3f) & 12) * 3600;
@@ -2698,18 +2680,12 @@ static void omap_rtc_write(void *opaque, hwaddr addr,
s->ti += from_bcd(value & 0x3f) * 3600;
return;
- case 0x0c: /* DAYS_REG */
-#ifdef ALMDEBUG
- printf("RTC DAY_REG <-- %02x\n", value);
-#endif
+ case 0x0c: /* DAYS_REG */
s->ti -= s->current_tm.tm_mday * 86400;
s->ti += from_bcd(value) * 86400;
return;
- case 0x10: /* MONTHS_REG */
-#ifdef ALMDEBUG
- printf("RTC MTH_REG <-- %02x\n", value);
-#endif
+ case 0x10: /* MONTHS_REG */
memcpy(&new_tm, &s->current_tm, sizeof(new_tm));
new_tm.tm_mon = from_bcd(value);
ti[0] = mktimegm(&s->current_tm);
@@ -2725,10 +2701,7 @@ static void omap_rtc_write(void *opaque, hwaddr addr,
}
return;
- case 0x14: /* YEARS_REG */
-#ifdef ALMDEBUG
- printf("RTC YRS_REG <-- %02x\n", value);
-#endif
+ case 0x14: /* YEARS_REG */
memcpy(&new_tm, &s->current_tm, sizeof(new_tm));
new_tm.tm_year += from_bcd(value) - (new_tm.tm_year % 100);
ti[0] = mktimegm(&s->current_tm);
@@ -2744,29 +2717,20 @@ static void omap_rtc_write(void *opaque, hwaddr addr,
}
return;
- case 0x18: /* WEEK_REG */
- return; /* Ignored */
+ case 0x18: /* WEEK_REG */
+ return; /* Ignored */
- case 0x20: /* ALARM_SECONDS_REG */
-#ifdef ALMDEBUG
- printf("ALM SEC_REG <-- %02x\n", value);
-#endif
+ case 0x20: /* ALARM_SECONDS_REG */
s->alarm_tm.tm_sec = from_bcd(value);
omap_rtc_alarm_update(s);
return;
- case 0x24: /* ALARM_MINUTES_REG */
-#ifdef ALMDEBUG
- printf("ALM MIN_REG <-- %02x\n", value);
-#endif
+ case 0x24: /* ALARM_MINUTES_REG */
s->alarm_tm.tm_min = from_bcd(value);
omap_rtc_alarm_update(s);
return;
- case 0x28: /* ALARM_HOURS_REG */
-#ifdef ALMDEBUG
- printf("ALM HRS_REG <-- %02x\n", value);
-#endif
+ case 0x28: /* ALARM_HOURS_REG */
if (s->pm_am)
s->alarm_tm.tm_hour =
((from_bcd(value & 0x3f)) % 12) +
@@ -2776,34 +2740,22 @@ static void omap_rtc_write(void *opaque, hwaddr addr,
omap_rtc_alarm_update(s);
return;
- case 0x2c: /* ALARM_DAYS_REG */
-#ifdef ALMDEBUG
- printf("ALM DAY_REG <-- %02x\n", value);
-#endif
+ case 0x2c: /* ALARM_DAYS_REG */
s->alarm_tm.tm_mday = from_bcd(value);
omap_rtc_alarm_update(s);
return;
- case 0x30: /* ALARM_MONTHS_REG */
-#ifdef ALMDEBUG
- printf("ALM MON_REG <-- %02x\n", value);
-#endif
+ case 0x30: /* ALARM_MONTHS_REG */
s->alarm_tm.tm_mon = from_bcd(value);
omap_rtc_alarm_update(s);
return;
- case 0x34: /* ALARM_YEARS_REG */
-#ifdef ALMDEBUG
- printf("ALM YRS_REG <-- %02x\n", value);
-#endif
+ case 0x34: /* ALARM_YEARS_REG */
s->alarm_tm.tm_year = from_bcd(value);
omap_rtc_alarm_update(s);
return;
- case 0x40: /* RTC_CTRL_REG */
-#ifdef ALMDEBUG
- printf("RTC CONTROL <-- %02x\n", value);
-#endif
+ case 0x40: /* RTC_CTRL_REG */
s->pm_am = (value >> 3) & 1;
s->auto_comp = (value >> 2) & 1;
s->round = (value >> 1) & 1;
@@ -2812,33 +2764,21 @@ static void omap_rtc_write(void *opaque, hwaddr addr,
s->status |= s->running << 1;
return;
- case 0x44: /* RTC_STATUS_REG */
-#ifdef ALMDEBUG
- printf("RTC STATUSL <-- %02x\n", value);
-#endif
+ case 0x44: /* RTC_STATUS_REG */
s->status &= ~((value & 0xc0) ^ 0x80);
omap_rtc_interrupts_update(s);
return;
- case 0x48: /* RTC_INTERRUPTS_REG */
-#ifdef ALMDEBUG
- printf("RTC INTRS <-- %02x\n", value);
-#endif
+ case 0x48: /* RTC_INTERRUPTS_REG */
s->interrupts = value;
return;
- case 0x4c: /* RTC_COMP_LSB_REG */
-#ifdef ALMDEBUG
- printf("RTC COMPLSB <-- %02x\n", value);
-#endif
+ case 0x4c: /* RTC_COMP_LSB_REG */
s->comp_reg &= 0xff00;
s->comp_reg |= 0x00ff & value;
return;
- case 0x50: /* RTC_COMP_MSB_REG */
-#ifdef ALMDEBUG
- printf("RTC COMPMSB <-- %02x\n", value);
-#endif
+ case 0x50: /* RTC_COMP_MSB_REG */
s->comp_reg &= 0x00ff;
s->comp_reg |= 0xff00 & (value << 8);
return;
@@ -2989,12 +2929,12 @@ static void omap_mcbsp_intr_update(struct omap_mcbsp_s *s)
{
int irq;
- switch ((s->spcr[0] >> 4) & 3) { /* RINTM */
+ switch ((s->spcr[0] >> 4) & 3) { /* RINTM */
case 0:
- irq = (s->spcr[0] >> 1) & 1; /* RRDY */
+ irq = (s->spcr[0] >> 1) & 1; /* RRDY */
break;
case 3:
- irq = (s->spcr[0] >> 3) & 1; /* RSYNCERR */
+ irq = (s->spcr[0] >> 3) & 1; /* RSYNCERR */
break;
default:
irq = 0;
@@ -3004,12 +2944,12 @@ static void omap_mcbsp_intr_update(struct omap_mcbsp_s *s)
if (irq)
qemu_irq_pulse(s->rxirq);
- switch ((s->spcr[1] >> 4) & 3) { /* XINTM */
+ switch ((s->spcr[1] >> 4) & 3) { /* XINTM */
case 0:
- irq = (s->spcr[1] >> 1) & 1; /* XRDY */
+ irq = (s->spcr[1] >> 1) & 1; /* XRDY */
break;
case 3:
- irq = (s->spcr[1] >> 3) & 1; /* XSYNCERR */
+ irq = (s->spcr[1] >> 3) & 1; /* XSYNCERR */
break;
default:
irq = 0;
@@ -3022,9 +2962,9 @@ static void omap_mcbsp_intr_update(struct omap_mcbsp_s *s)
static void omap_mcbsp_rx_newdata(struct omap_mcbsp_s *s)
{
- if ((s->spcr[0] >> 1) & 1) /* RRDY */
- s->spcr[0] |= 1 << 2; /* RFULL */
- s->spcr[0] |= 1 << 1; /* RRDY */
+ if ((s->spcr[0] >> 1) & 1) /* RRDY */
+ s->spcr[0] |= 1 << 2; /* RFULL */
+ s->spcr[0] |= 1 << 1; /* RRDY */
qemu_irq_raise(s->rxdrq);
omap_mcbsp_intr_update(s);
}
@@ -3036,8 +2976,9 @@ static void omap_mcbsp_source_tick(void *opaque)
if (!s->rx_rate)
return;
- if (s->rx_req)
- printf("%s: Rx FIFO overrun\n", __func__);
+ if (s->rx_req) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Rx FIFO overrun\n", __func__);
+ }
s->rx_req = s->rx_rate << bps[(s->rcr[0] >> 5) & 7];
@@ -3063,14 +3004,14 @@ static void omap_mcbsp_rx_stop(struct omap_mcbsp_s *s)
static void omap_mcbsp_rx_done(struct omap_mcbsp_s *s)
{
- s->spcr[0] &= ~(1 << 1); /* RRDY */
+ s->spcr[0] &= ~(1 << 1); /* RRDY */
qemu_irq_lower(s->rxdrq);
omap_mcbsp_intr_update(s);
}
static void omap_mcbsp_tx_newdata(struct omap_mcbsp_s *s)
{
- s->spcr[1] |= 1 << 1; /* XRDY */
+ s->spcr[1] |= 1 << 1; /* XRDY */
qemu_irq_raise(s->txdrq);
omap_mcbsp_intr_update(s);
}
@@ -3082,8 +3023,9 @@ static void omap_mcbsp_sink_tick(void *opaque)
if (!s->tx_rate)
return;
- if (s->tx_req)
- printf("%s: Tx FIFO underrun\n", __func__);
+ if (s->tx_req) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Tx FIFO underrun\n", __func__);
+ }
s->tx_req = s->tx_rate << bps[(s->xcr[0] >> 5) & 7];
@@ -3104,7 +3046,7 @@ static void omap_mcbsp_tx_start(struct omap_mcbsp_s *s)
static void omap_mcbsp_tx_done(struct omap_mcbsp_s *s)
{
- s->spcr[1] &= ~(1 << 1); /* XRDY */
+ s->spcr[1] &= ~(1 << 1); /* XRDY */
qemu_irq_lower(s->txdrq);
omap_mcbsp_intr_update(s);
if (s->codec && s->codec->cts)
@@ -3122,27 +3064,27 @@ static void omap_mcbsp_req_update(struct omap_mcbsp_s *s)
{
int prev_rx_rate, prev_tx_rate;
int rx_rate = 0, tx_rate = 0;
- int cpu_rate = 1500000; /* XXX */
+ int cpu_rate = 1500000; /* XXX */
/* TODO: check CLKSTP bit */
- if (s->spcr[1] & (1 << 6)) { /* GRST */
- if (s->spcr[0] & (1 << 0)) { /* RRST */
- if ((s->srgr[1] & (1 << 13)) && /* CLKSM */
- (s->pcr & (1 << 8))) { /* CLKRM */
- if (~s->pcr & (1 << 7)) /* SCLKME */
+ if (s->spcr[1] & (1 << 6)) { /* GRST */
+ if (s->spcr[0] & (1 << 0)) { /* RRST */
+ if ((s->srgr[1] & (1 << 13)) && /* CLKSM */
+ (s->pcr & (1 << 8))) { /* CLKRM */
+ if (~s->pcr & (1 << 7)) /* SCLKME */
rx_rate = cpu_rate /
- ((s->srgr[0] & 0xff) + 1); /* CLKGDV */
+ ((s->srgr[0] & 0xff) + 1); /* CLKGDV */
} else
if (s->codec)
rx_rate = s->codec->rx_rate;
}
- if (s->spcr[1] & (1 << 0)) { /* XRST */
- if ((s->srgr[1] & (1 << 13)) && /* CLKSM */
- (s->pcr & (1 << 9))) { /* CLKXM */
- if (~s->pcr & (1 << 7)) /* SCLKME */
+ if (s->spcr[1] & (1 << 0)) { /* XRST */
+ if ((s->srgr[1] & (1 << 13)) && /* CLKSM */
+ (s->pcr & (1 << 9))) { /* CLKXM */
+ if (~s->pcr & (1 << 7)) /* SCLKME */
tx_rate = cpu_rate /
- ((s->srgr[0] & 0xff) + 1); /* CLKGDV */
+ ((s->srgr[0] & 0xff) + 1); /* CLKGDV */
} else
if (s->codec)
tx_rate = s->codec->tx_rate;
@@ -3179,13 +3121,13 @@ static uint64_t omap_mcbsp_read(void *opaque, hwaddr addr,
}
switch (offset) {
- case 0x00: /* DRR2 */
- if (((s->rcr[0] >> 5) & 7) < 3) /* RWDLEN1 */
+ case 0x00: /* DRR2 */
+ if (((s->rcr[0] >> 5) & 7) < 3) /* RWDLEN1 */
return 0x0000;
/* Fall through. */
- case 0x02: /* DRR1 */
+ case 0x02: /* DRR1 */
if (s->rx_req < 2) {
- printf("%s: Rx FIFO underrun\n", __func__);
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Rx FIFO underrun\n", __func__);
omap_mcbsp_rx_done(s);
} else {
s->tx_req -= 2;
@@ -3201,63 +3143,63 @@ static uint64_t omap_mcbsp_read(void *opaque, hwaddr addr,
}
return 0x0000;
- case 0x04: /* DXR2 */
- case 0x06: /* DXR1 */
+ case 0x04: /* DXR2 */
+ case 0x06: /* DXR1 */
return 0x0000;
- case 0x08: /* SPCR2 */
+ case 0x08: /* SPCR2 */
return s->spcr[1];
- case 0x0a: /* SPCR1 */
+ case 0x0a: /* SPCR1 */
return s->spcr[0];
- case 0x0c: /* RCR2 */
+ case 0x0c: /* RCR2 */
return s->rcr[1];
- case 0x0e: /* RCR1 */
+ case 0x0e: /* RCR1 */
return s->rcr[0];
- case 0x10: /* XCR2 */
+ case 0x10: /* XCR2 */
return s->xcr[1];
- case 0x12: /* XCR1 */
+ case 0x12: /* XCR1 */
return s->xcr[0];
- case 0x14: /* SRGR2 */
+ case 0x14: /* SRGR2 */
return s->srgr[1];
- case 0x16: /* SRGR1 */
+ case 0x16: /* SRGR1 */
return s->srgr[0];
- case 0x18: /* MCR2 */
+ case 0x18: /* MCR2 */
return s->mcr[1];
- case 0x1a: /* MCR1 */
+ case 0x1a: /* MCR1 */
return s->mcr[0];
- case 0x1c: /* RCERA */
+ case 0x1c: /* RCERA */
return s->rcer[0];
- case 0x1e: /* RCERB */
+ case 0x1e: /* RCERB */
return s->rcer[1];
- case 0x20: /* XCERA */
+ case 0x20: /* XCERA */
return s->xcer[0];
- case 0x22: /* XCERB */
+ case 0x22: /* XCERB */
return s->xcer[1];
- case 0x24: /* PCR0 */
+ case 0x24: /* PCR0 */
return s->pcr;
- case 0x26: /* RCERC */
+ case 0x26: /* RCERC */
return s->rcer[2];
- case 0x28: /* RCERD */
+ case 0x28: /* RCERD */
return s->rcer[3];
- case 0x2a: /* XCERC */
+ case 0x2a: /* XCERC */
return s->xcer[2];
- case 0x2c: /* XCERD */
+ case 0x2c: /* XCERD */
return s->xcer[3];
- case 0x2e: /* RCERE */
+ case 0x2e: /* RCERE */
return s->rcer[4];
- case 0x30: /* RCERF */
+ case 0x30: /* RCERF */
return s->rcer[5];
- case 0x32: /* XCERE */
+ case 0x32: /* XCERE */
return s->xcer[4];
- case 0x34: /* XCERF */
+ case 0x34: /* XCERF */
return s->xcer[5];
- case 0x36: /* RCERG */
+ case 0x36: /* RCERG */
return s->rcer[6];
- case 0x38: /* RCERH */
+ case 0x38: /* RCERH */
return s->rcer[7];
- case 0x3a: /* XCERG */
+ case 0x3a: /* XCERG */
return s->xcer[6];
- case 0x3c: /* XCERH */
+ case 0x3c: /* XCERH */
return s->xcer[7];
}
@@ -3272,16 +3214,16 @@ static void omap_mcbsp_writeh(void *opaque, hwaddr addr,
int offset = addr & OMAP_MPUI_REG_MASK;
switch (offset) {
- case 0x00: /* DRR2 */
- case 0x02: /* DRR1 */
+ case 0x00: /* DRR2 */
+ case 0x02: /* DRR1 */
OMAP_RO_REG(addr);
return;
- case 0x04: /* DXR2 */
- if (((s->xcr[0] >> 5) & 7) < 3) /* XWDLEN1 */
+ case 0x04: /* DXR2 */
+ if (((s->xcr[0] >> 5) & 7) < 3) /* XWDLEN1 */
return;
/* Fall through. */
- case 0x06: /* DXR1 */
+ case 0x06: /* DXR1 */
if (s->tx_req > 1) {
s->tx_req -= 2;
if (s->codec && s->codec->cts) {
@@ -3290,24 +3232,28 @@ static void omap_mcbsp_writeh(void *opaque, hwaddr addr,
}
if (s->tx_req < 2)
omap_mcbsp_tx_done(s);
- } else
- printf("%s: Tx FIFO overrun\n", __func__);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Tx FIFO overrun\n", __func__);
+ }
return;
- case 0x08: /* SPCR2 */
+ case 0x08: /* SPCR2 */
s->spcr[1] &= 0x0002;
s->spcr[1] |= 0x03f9 & value;
- s->spcr[1] |= 0x0004 & (value << 2); /* XEMPTY := XRST */
- if (~value & 1) /* XRST */
+ s->spcr[1] |= 0x0004 & (value << 2); /* XEMPTY := XRST */
+ if (~value & 1) /* XRST */
s->spcr[1] &= ~6;
omap_mcbsp_req_update(s);
return;
- case 0x0a: /* SPCR1 */
+ case 0x0a: /* SPCR1 */
s->spcr[0] &= 0x0006;
s->spcr[0] |= 0xf8f9 & value;
- if (value & (1 << 15)) /* DLB */
- printf("%s: Digital Loopback mode enable attempt\n", __func__);
- if (~value & 1) { /* RRST */
+ if (value & (1 << 15)) { /* DLB */
+ qemu_log_mask(LOG_UNIMP,
+ "%s: Digital Loopback mode enable attempt\n",
+ __func__);
+ }
+ if (~value & 1) { /* RRST */
s->spcr[0] &= ~6;
s->rx_req = 0;
omap_mcbsp_rx_done(s);
@@ -3315,85 +3261,91 @@ static void omap_mcbsp_writeh(void *opaque, hwaddr addr,
omap_mcbsp_req_update(s);
return;
- case 0x0c: /* RCR2 */
+ case 0x0c: /* RCR2 */
s->rcr[1] = value & 0xffff;
return;
- case 0x0e: /* RCR1 */
+ case 0x0e: /* RCR1 */
s->rcr[0] = value & 0x7fe0;
return;
- case 0x10: /* XCR2 */
+ case 0x10: /* XCR2 */
s->xcr[1] = value & 0xffff;
return;
- case 0x12: /* XCR1 */
+ case 0x12: /* XCR1 */
s->xcr[0] = value & 0x7fe0;
return;
- case 0x14: /* SRGR2 */
+ case 0x14: /* SRGR2 */
s->srgr[1] = value & 0xffff;
omap_mcbsp_req_update(s);
return;
- case 0x16: /* SRGR1 */
+ case 0x16: /* SRGR1 */
s->srgr[0] = value & 0xffff;
omap_mcbsp_req_update(s);
return;
- case 0x18: /* MCR2 */
+ case 0x18: /* MCR2 */
s->mcr[1] = value & 0x03e3;
- if (value & 3) /* XMCM */
- printf("%s: Tx channel selection mode enable attempt\n", __func__);
+ if (value & 3) { /* XMCM */
+ qemu_log_mask(LOG_UNIMP,
+ "%s: Tx channel selection mode enable attempt\n",
+ __func__);
+ }
return;
- case 0x1a: /* MCR1 */
+ case 0x1a: /* MCR1 */
s->mcr[0] = value & 0x03e1;
- if (value & 1) /* RMCM */
- printf("%s: Rx channel selection mode enable attempt\n", __func__);
+ if (value & 1) { /* RMCM */
+ qemu_log_mask(LOG_UNIMP,
+ "%s: Rx channel selection mode enable attempt\n",
+ __func__);
+ }
return;
- case 0x1c: /* RCERA */
+ case 0x1c: /* RCERA */
s->rcer[0] = value & 0xffff;
return;
- case 0x1e: /* RCERB */
+ case 0x1e: /* RCERB */
s->rcer[1] = value & 0xffff;
return;
- case 0x20: /* XCERA */
+ case 0x20: /* XCERA */
s->xcer[0] = value & 0xffff;
return;
- case 0x22: /* XCERB */
+ case 0x22: /* XCERB */
s->xcer[1] = value & 0xffff;
return;
- case 0x24: /* PCR0 */
+ case 0x24: /* PCR0 */
s->pcr = value & 0x7faf;
return;
- case 0x26: /* RCERC */
+ case 0x26: /* RCERC */
s->rcer[2] = value & 0xffff;
return;
- case 0x28: /* RCERD */
+ case 0x28: /* RCERD */
s->rcer[3] = value & 0xffff;
return;
- case 0x2a: /* XCERC */
+ case 0x2a: /* XCERC */
s->xcer[2] = value & 0xffff;
return;
- case 0x2c: /* XCERD */
+ case 0x2c: /* XCERD */
s->xcer[3] = value & 0xffff;
return;
- case 0x2e: /* RCERE */
+ case 0x2e: /* RCERE */
s->rcer[4] = value & 0xffff;
return;
- case 0x30: /* RCERF */
+ case 0x30: /* RCERF */
s->rcer[5] = value & 0xffff;
return;
- case 0x32: /* XCERE */
+ case 0x32: /* XCERE */
s->xcer[4] = value & 0xffff;
return;
- case 0x34: /* XCERF */
+ case 0x34: /* XCERF */
s->xcer[5] = value & 0xffff;
return;
- case 0x36: /* RCERG */
+ case 0x36: /* RCERG */
s->rcer[6] = value & 0xffff;
return;
- case 0x38: /* RCERH */
+ case 0x38: /* RCERH */
s->rcer[7] = value & 0xffff;
return;
- case 0x3a: /* XCERG */
+ case 0x3a: /* XCERG */
s->xcer[6] = value & 0xffff;
return;
- case 0x3c: /* XCERH */
+ case 0x3c: /* XCERH */
s->xcer[7] = value & 0xffff;
return;
}
@@ -3407,8 +3359,8 @@ static void omap_mcbsp_writew(void *opaque, hwaddr addr,
struct omap_mcbsp_s *s = opaque;
int offset = addr & OMAP_MPUI_REG_MASK;
- if (offset == 0x04) { /* DXR */
- if (((s->xcr[0] >> 5) & 7) < 3) /* XWDLEN1 */
+ if (offset == 0x04) { /* DXR */
+ if (((s->xcr[0] >> 5) & 7) < 3) /* XWDLEN1 */
return;
if (s->tx_req > 3) {
s->tx_req -= 4;
@@ -3424,8 +3376,9 @@ static void omap_mcbsp_writew(void *opaque, hwaddr addr,
}
if (s->tx_req < 4)
omap_mcbsp_tx_done(s);
- } else
- printf("%s: Tx FIFO overrun\n", __func__);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Tx FIFO overrun\n", __func__);
+ }
return;
}
@@ -3543,7 +3496,7 @@ static void omap_lpg_tick(void *opaque)
timer_mod(s->tm, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + s->on);
s->cycle = !s->cycle;
- printf("%s: LED is %s\n", __func__, s->cycle ? "on" : "off");
+ trace_omap1_lpg_led(s->cycle ? "on" : "off");
}
static void omap_lpg_update(struct omap_lpg_s *s)
@@ -3551,23 +3504,23 @@ static void omap_lpg_update(struct omap_lpg_s *s)
int64_t on, period = 1, ticks = 1000;
static const int per[8] = { 1, 2, 4, 8, 12, 16, 20, 24 };
- if (~s->control & (1 << 6)) /* LPGRES */
+ if (~s->control & (1 << 6)) /* LPGRES */
on = 0;
- else if (s->control & (1 << 7)) /* PERM_ON */
+ else if (s->control & (1 << 7)) /* PERM_ON */
on = period;
else {
- period = muldiv64(ticks, per[s->control & 7], /* PERCTRL */
+ period = muldiv64(ticks, per[s->control & 7], /* PERCTRL */
256 / 32);
on = (s->clk && s->power) ? muldiv64(ticks,
- per[(s->control >> 3) & 7], 256) : 0; /* ONCTRL */
+ per[(s->control >> 3) & 7], 256) : 0; /* ONCTRL */
}
timer_del(s->tm);
- if (on == period && s->on < s->period)
- printf("%s: LED is on\n", __func__);
- else if (on == 0 && s->on)
- printf("%s: LED is off\n", __func__);
- else if (on && (on != s->on || period != s->period)) {
+ if (on == period && s->on < s->period) {
+ trace_omap1_lpg_led("on");
+ } else if (on == 0 && s->on) {
+ trace_omap1_lpg_led("off");
+ } else if (on && (on != s->on || period != s->period)) {
s->cycle = 0;
s->on = on;
s->period = period;
@@ -3597,10 +3550,10 @@ static uint64_t omap_lpg_read(void *opaque, hwaddr addr, unsigned size)
}
switch (offset) {
- case 0x00: /* LCR */
+ case 0x00: /* LCR */
return s->control;
- case 0x04: /* PMR */
+ case 0x04: /* PMR */
return s->power;
}
@@ -3620,14 +3573,14 @@ static void omap_lpg_write(void *opaque, hwaddr addr,
}
switch (offset) {
- case 0x00: /* LCR */
- if (~value & (1 << 6)) /* LPGRES */
+ case 0x00: /* LCR */
+ if (~value & (1 << 6)) /* LPGRES */
omap_lpg_reset(s);
s->control = value & 0xff;
omap_lpg_update(s);
return;
- case 0x04: /* PMR */
+ case 0x04: /* PMR */
s->power = value & 0x01;
omap_lpg_update(s);
return;
@@ -3677,7 +3630,7 @@ static uint64_t omap_mpui_io_read(void *opaque, hwaddr addr,
return omap_badwidth_read16(opaque, addr);
}
- if (addr == OMAP_MPUI_BASE) /* CMR */
+ if (addr == OMAP_MPUI_BASE) /* CMR */
return 0xfe4d;
OMAP_BAD_REG(addr);
@@ -3729,7 +3682,6 @@ static void omap1_mpu_reset(void *opaque)
omap_uart_reset(mpu->uart[0]);
omap_uart_reset(mpu->uart[1]);
omap_uart_reset(mpu->uart[2]);
- omap_mmc_reset(mpu->mmc);
omap_mpuio_reset(mpu->mpuio);
omap_uwire_reset(mpu->microwire);
omap_pwl_reset(mpu->pwl);
@@ -3751,25 +3703,25 @@ static const struct omap_map_s {
const char *name;
} omap15xx_dsp_mm[] = {
/* Strobe 0 */
- { 0xe1010000, 0xfffb0000, 0x800, "UART1 BT" }, /* CS0 */
- { 0xe1010800, 0xfffb0800, 0x800, "UART2 COM" }, /* CS1 */
- { 0xe1011800, 0xfffb1800, 0x800, "McBSP1 audio" }, /* CS3 */
- { 0xe1012000, 0xfffb2000, 0x800, "MCSI2 communication" }, /* CS4 */
- { 0xe1012800, 0xfffb2800, 0x800, "MCSI1 BT u-Law" }, /* CS5 */
- { 0xe1013000, 0xfffb3000, 0x800, "uWire" }, /* CS6 */
- { 0xe1013800, 0xfffb3800, 0x800, "I^2C" }, /* CS7 */
- { 0xe1014000, 0xfffb4000, 0x800, "USB W2FC" }, /* CS8 */
- { 0xe1014800, 0xfffb4800, 0x800, "RTC" }, /* CS9 */
- { 0xe1015000, 0xfffb5000, 0x800, "MPUIO" }, /* CS10 */
- { 0xe1015800, 0xfffb5800, 0x800, "PWL" }, /* CS11 */
- { 0xe1016000, 0xfffb6000, 0x800, "PWT" }, /* CS12 */
- { 0xe1017000, 0xfffb7000, 0x800, "McBSP3" }, /* CS14 */
- { 0xe1017800, 0xfffb7800, 0x800, "MMC" }, /* CS15 */
- { 0xe1019000, 0xfffb9000, 0x800, "32-kHz timer" }, /* CS18 */
- { 0xe1019800, 0xfffb9800, 0x800, "UART3" }, /* CS19 */
- { 0xe101c800, 0xfffbc800, 0x800, "TIPB switches" }, /* CS25 */
+ { 0xe1010000, 0xfffb0000, 0x800, "UART1 BT" }, /* CS0 */
+ { 0xe1010800, 0xfffb0800, 0x800, "UART2 COM" }, /* CS1 */
+ { 0xe1011800, 0xfffb1800, 0x800, "McBSP1 audio" }, /* CS3 */
+ { 0xe1012000, 0xfffb2000, 0x800, "MCSI2 communication" }, /* CS4 */
+ { 0xe1012800, 0xfffb2800, 0x800, "MCSI1 BT u-Law" }, /* CS5 */
+ { 0xe1013000, 0xfffb3000, 0x800, "uWire" }, /* CS6 */
+ { 0xe1013800, 0xfffb3800, 0x800, "I^2C" }, /* CS7 */
+ { 0xe1014000, 0xfffb4000, 0x800, "USB W2FC" }, /* CS8 */
+ { 0xe1014800, 0xfffb4800, 0x800, "RTC" }, /* CS9 */
+ { 0xe1015000, 0xfffb5000, 0x800, "MPUIO" }, /* CS10 */
+ { 0xe1015800, 0xfffb5800, 0x800, "PWL" }, /* CS11 */
+ { 0xe1016000, 0xfffb6000, 0x800, "PWT" }, /* CS12 */
+ { 0xe1017000, 0xfffb7000, 0x800, "McBSP3" }, /* CS14 */
+ { 0xe1017800, 0xfffb7800, 0x800, "MMC" }, /* CS15 */
+ { 0xe1019000, 0xfffb9000, 0x800, "32-kHz timer" }, /* CS18 */
+ { 0xe1019800, 0xfffb9800, 0x800, "UART3" }, /* CS19 */
+ { 0xe101c800, 0xfffbc800, 0x800, "TIPB switches" }, /* CS25 */
/* Strobe 1 */
- { 0xe101e000, 0xfffce000, 0x800, "GPIOs" }, /* CS28 */
+ { 0xe101e000, 0xfffce000, 0x800, "GPIOs" }, /* CS28 */
{ 0 }
};
@@ -3994,11 +3946,25 @@ struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *dram,
if (!dinfo && !qtest_enabled()) {
warn_report("missing SecureDigital device");
}
- s->mmc = omap_mmc_init(0xfffb7800, system_memory,
- dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
- qdev_get_gpio_in(s->ih[1], OMAP_INT_OQN),
- &s->drq[OMAP_DMA_MMC_TX],
- omap_findclk(s, "mmc_ck"));
+
+ s->mmc = qdev_new(TYPE_OMAP_MMC);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(s->mmc), &error_fatal);
+ omap_mmc_set_clk(s->mmc, omap_findclk(s, "mmc_ck"));
+
+ memory_region_add_subregion(system_memory, 0xfffb7800,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(s->mmc), 0));
+ qdev_connect_gpio_out_named(s->mmc, "dma-tx", 0, s->drq[OMAP_DMA_MMC_TX]);
+ qdev_connect_gpio_out_named(s->mmc, "dma-rx", 0, s->drq[OMAP_DMA_MMC_RX]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(s->mmc), 0,
+ qdev_get_gpio_in(s->ih[1], OMAP_INT_OQN));
+
+ if (dinfo) {
+ DeviceState *card = qdev_new(TYPE_SD_CARD);
+ qdev_prop_set_drive_err(card, "drive", blk_by_legacy_dinfo(dinfo),
+ &error_fatal);
+ qdev_realize_and_unref(card, qdev_get_child_bus(s->mmc, "sd-bus"),
+ &error_fatal);
+ }
s->mpuio = omap_mpuio_init(system_memory, 0xfffb5000,
qdev_get_gpio_in(s->ih[1], OMAP_INT_KEYBOARD),
@@ -4059,18 +4025,18 @@ struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *dram,
0xfffbd800, omap_findclk(s, "clk32-kHz"));
/* Register mappings not currently implemented:
- * MCSI2 Comm fffb2000 - fffb27ff (not mapped on OMAP310)
- * MCSI1 Bluetooth fffb2800 - fffb2fff (not mapped on OMAP310)
- * USB W2FC fffb4000 - fffb47ff
- * Camera Interface fffb6800 - fffb6fff
- * USB Host fffba000 - fffba7ff
- * FAC fffba800 - fffbafff
- * HDQ/1-Wire fffbc000 - fffbc7ff
- * TIPB switches fffbc800 - fffbcfff
- * Mailbox fffcf000 - fffcf7ff
- * Local bus IF fffec100 - fffec1ff
- * Local bus MMU fffec200 - fffec2ff
- * DSP MMU fffed200 - fffed2ff
+ * MCSI2 Comm fffb2000 - fffb27ff (not mapped on OMAP310)
+ * MCSI1 Bluetooth fffb2800 - fffb2fff (not mapped on OMAP310)
+ * USB W2FC fffb4000 - fffb47ff
+ * Camera Interface fffb6800 - fffb6fff
+ * USB Host fffba000 - fffba7ff
+ * FAC fffba800 - fffbafff
+ * HDQ/1-Wire fffbc000 - fffbc7ff
+ * TIPB switches fffbc800 - fffbcfff
+ * Mailbox fffcf000 - fffcf7ff
+ * Local bus IF fffec100 - fffec1ff
+ * Local bus MMU fffec200 - fffec2ff
+ * DSP MMU fffed200 - fffed2ff
*/
omap_setup_dsp_mapping(system_memory, omap15xx_dsp_mm);
diff --git a/hw/arm/omap2.c b/hw/arm/omap2.c
deleted file mode 100644
index d968327..0000000
--- a/hw/arm/omap2.c
+++ /dev/null
@@ -1,2715 +0,0 @@
-/*
- * TI OMAP processors emulation.
- *
- * Copyright (C) 2007-2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/error-report.h"
-#include "qapi/error.h"
-#include "exec/address-spaces.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/qtest.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "hw/irq.h"
-#include "hw/qdev-properties.h"
-#include "hw/arm/boot.h"
-#include "hw/arm/omap.h"
-#include "sysemu/sysemu.h"
-#include "qemu/timer.h"
-#include "chardev/char-fe.h"
-#include "hw/block/flash.h"
-#include "hw/arm/soc_dma.h"
-#include "hw/sysbus.h"
-#include "hw/boards.h"
-#include "audio/audio.h"
-#include "target/arm/cpu-qom.h"
-
-/* Enhanced Audio Controller (CODEC only) */
-struct omap_eac_s {
- qemu_irq irq;
- MemoryRegion iomem;
-
- uint16_t sysconfig;
- uint8_t config[4];
- uint8_t control;
- uint8_t address;
- uint16_t data;
- uint8_t vtol;
- uint8_t vtsl;
- uint16_t mixer;
- uint16_t gain[4];
- uint8_t att;
- uint16_t max[7];
-
- struct {
- qemu_irq txdrq;
- qemu_irq rxdrq;
- uint32_t (*txrx)(void *opaque, uint32_t, int);
- void *opaque;
-
-#define EAC_BUF_LEN 1024
- uint32_t rxbuf[EAC_BUF_LEN];
- int rxoff;
- int rxlen;
- int rxavail;
- uint32_t txbuf[EAC_BUF_LEN];
- int txlen;
- int txavail;
-
- int enable;
- int rate;
-
- uint16_t config[4];
-
- /* These need to be moved to the actual codec */
- QEMUSoundCard card;
- SWVoiceIn *in_voice;
- SWVoiceOut *out_voice;
- int hw_enable;
- } codec;
-
- struct {
- uint8_t control;
- uint16_t config;
- } modem, bt;
-};
-
-static inline void omap_eac_interrupt_update(struct omap_eac_s *s)
-{
- qemu_set_irq(s->irq, (s->codec.config[1] >> 14) & 1); /* AURDI */
-}
-
-static inline void omap_eac_in_dmarequest_update(struct omap_eac_s *s)
-{
- qemu_set_irq(s->codec.rxdrq, (s->codec.rxavail || s->codec.rxlen) &&
- ((s->codec.config[1] >> 12) & 1)); /* DMAREN */
-}
-
-static inline void omap_eac_out_dmarequest_update(struct omap_eac_s *s)
-{
- qemu_set_irq(s->codec.txdrq, s->codec.txlen < s->codec.txavail &&
- ((s->codec.config[1] >> 11) & 1)); /* DMAWEN */
-}
-
-static inline void omap_eac_in_refill(struct omap_eac_s *s)
-{
- int left = MIN(EAC_BUF_LEN - s->codec.rxlen, s->codec.rxavail) << 2;
- int start = ((s->codec.rxoff + s->codec.rxlen) & (EAC_BUF_LEN - 1)) << 2;
- int leftwrap = MIN(left, (EAC_BUF_LEN << 2) - start);
- int recv = 1;
- uint8_t *buf = (uint8_t *) s->codec.rxbuf + start;
-
- left -= leftwrap;
- start = 0;
- while (leftwrap && (recv = AUD_read(s->codec.in_voice, buf + start,
- leftwrap)) > 0) { /* Be defensive */
- start += recv;
- leftwrap -= recv;
- }
- if (recv <= 0)
- s->codec.rxavail = 0;
- else
- s->codec.rxavail -= start >> 2;
- s->codec.rxlen += start >> 2;
-
- if (recv > 0 && left > 0) {
- start = 0;
- while (left && (recv = AUD_read(s->codec.in_voice,
- (uint8_t *) s->codec.rxbuf + start,
- left)) > 0) { /* Be defensive */
- start += recv;
- left -= recv;
- }
- if (recv <= 0)
- s->codec.rxavail = 0;
- else
- s->codec.rxavail -= start >> 2;
- s->codec.rxlen += start >> 2;
- }
-}
-
-static inline void omap_eac_out_empty(struct omap_eac_s *s)
-{
- int left = s->codec.txlen << 2;
- int start = 0;
- int sent = 1;
-
- while (left && (sent = AUD_write(s->codec.out_voice,
- (uint8_t *) s->codec.txbuf + start,
- left)) > 0) { /* Be defensive */
- start += sent;
- left -= sent;
- }
-
- if (!sent) {
- s->codec.txavail = 0;
- omap_eac_out_dmarequest_update(s);
- }
-
- if (start)
- s->codec.txlen = 0;
-}
-
-static void omap_eac_in_cb(void *opaque, int avail_b)
-{
- struct omap_eac_s *s = opaque;
-
- s->codec.rxavail = avail_b >> 2;
- omap_eac_in_refill(s);
- /* TODO: possibly discard current buffer if overrun */
- omap_eac_in_dmarequest_update(s);
-}
-
-static void omap_eac_out_cb(void *opaque, int free_b)
-{
- struct omap_eac_s *s = opaque;
-
- s->codec.txavail = free_b >> 2;
- if (s->codec.txlen)
- omap_eac_out_empty(s);
- else
- omap_eac_out_dmarequest_update(s);
-}
-
-static void omap_eac_enable_update(struct omap_eac_s *s)
-{
- s->codec.enable = !(s->codec.config[1] & 1) && /* EACPWD */
- (s->codec.config[1] & 2) && /* AUDEN */
- s->codec.hw_enable;
-}
-
-static const int omap_eac_fsint[4] = {
- 8000,
- 11025,
- 22050,
- 44100,
-};
-
-static const int omap_eac_fsint2[8] = {
- 8000,
- 11025,
- 22050,
- 44100,
- 48000,
- 0, 0, 0,
-};
-
-static const int omap_eac_fsint3[16] = {
- 8000,
- 11025,
- 16000,
- 22050,
- 24000,
- 32000,
- 44100,
- 48000,
- 0, 0, 0, 0, 0, 0, 0, 0,
-};
-
-static void omap_eac_rate_update(struct omap_eac_s *s)
-{
- int fsint[3];
-
- fsint[2] = (s->codec.config[3] >> 9) & 0xf;
- fsint[1] = (s->codec.config[2] >> 0) & 0x7;
- fsint[0] = (s->codec.config[0] >> 6) & 0x3;
- if (fsint[2] < 0xf)
- s->codec.rate = omap_eac_fsint3[fsint[2]];
- else if (fsint[1] < 0x7)
- s->codec.rate = omap_eac_fsint2[fsint[1]];
- else
- s->codec.rate = omap_eac_fsint[fsint[0]];
-}
-
-static void omap_eac_volume_update(struct omap_eac_s *s)
-{
- /* TODO */
-}
-
-static void omap_eac_format_update(struct omap_eac_s *s)
-{
- struct audsettings fmt;
-
- /* The hardware buffers at most one sample */
- if (s->codec.rxlen)
- s->codec.rxlen = 1;
-
- if (s->codec.in_voice) {
- AUD_set_active_in(s->codec.in_voice, 0);
- AUD_close_in(&s->codec.card, s->codec.in_voice);
- s->codec.in_voice = NULL;
- }
- if (s->codec.out_voice) {
- omap_eac_out_empty(s);
- AUD_set_active_out(s->codec.out_voice, 0);
- AUD_close_out(&s->codec.card, s->codec.out_voice);
- s->codec.out_voice = NULL;
- s->codec.txavail = 0;
- }
- /* Discard what couldn't be written */
- s->codec.txlen = 0;
-
- omap_eac_enable_update(s);
- if (!s->codec.enable)
- return;
-
- omap_eac_rate_update(s);
- fmt.endianness = ((s->codec.config[0] >> 8) & 1); /* LI_BI */
- fmt.nchannels = ((s->codec.config[0] >> 10) & 1) ? 2 : 1; /* MN_ST */
- fmt.freq = s->codec.rate;
- /* TODO: signedness possibly depends on the CODEC hardware - or
- * does I2S specify it? */
- /* All register writes are 16 bits so we store 16-bit samples
- * in the buffers regardless of AGCFR[B8_16] value. */
- fmt.fmt = AUDIO_FORMAT_U16;
-
- s->codec.in_voice = AUD_open_in(&s->codec.card, s->codec.in_voice,
- "eac.codec.in", s, omap_eac_in_cb, &fmt);
- s->codec.out_voice = AUD_open_out(&s->codec.card, s->codec.out_voice,
- "eac.codec.out", s, omap_eac_out_cb, &fmt);
-
- omap_eac_volume_update(s);
-
- AUD_set_active_in(s->codec.in_voice, 1);
- AUD_set_active_out(s->codec.out_voice, 1);
-}
-
-static void omap_eac_reset(struct omap_eac_s *s)
-{
- s->sysconfig = 0;
- s->config[0] = 0x0c;
- s->config[1] = 0x09;
- s->config[2] = 0xab;
- s->config[3] = 0x03;
- s->control = 0x00;
- s->address = 0x00;
- s->data = 0x0000;
- s->vtol = 0x00;
- s->vtsl = 0x00;
- s->mixer = 0x0000;
- s->gain[0] = 0xe7e7;
- s->gain[1] = 0x6767;
- s->gain[2] = 0x6767;
- s->gain[3] = 0x6767;
- s->att = 0xce;
- s->max[0] = 0;
- s->max[1] = 0;
- s->max[2] = 0;
- s->max[3] = 0;
- s->max[4] = 0;
- s->max[5] = 0;
- s->max[6] = 0;
-
- s->modem.control = 0x00;
- s->modem.config = 0x0000;
- s->bt.control = 0x00;
- s->bt.config = 0x0000;
- s->codec.config[0] = 0x0649;
- s->codec.config[1] = 0x0000;
- s->codec.config[2] = 0x0007;
- s->codec.config[3] = 0x1ffc;
- s->codec.rxoff = 0;
- s->codec.rxlen = 0;
- s->codec.txlen = 0;
- s->codec.rxavail = 0;
- s->codec.txavail = 0;
-
- omap_eac_format_update(s);
- omap_eac_interrupt_update(s);
-}
-
-static uint64_t omap_eac_read(void *opaque, hwaddr addr, unsigned size)
-{
- struct omap_eac_s *s = opaque;
- uint32_t ret;
-
- if (size != 2) {
- return omap_badwidth_read16(opaque, addr);
- }
-
- switch (addr) {
- case 0x000: /* CPCFR1 */
- return s->config[0];
- case 0x004: /* CPCFR2 */
- return s->config[1];
- case 0x008: /* CPCFR3 */
- return s->config[2];
- case 0x00c: /* CPCFR4 */
- return s->config[3];
-
- case 0x010: /* CPTCTL */
- return s->control | ((s->codec.rxavail + s->codec.rxlen > 0) << 7) |
- ((s->codec.txlen < s->codec.txavail) << 5);
-
- case 0x014: /* CPTTADR */
- return s->address;
- case 0x018: /* CPTDATL */
- return s->data & 0xff;
- case 0x01c: /* CPTDATH */
- return s->data >> 8;
- case 0x020: /* CPTVSLL */
- return s->vtol;
- case 0x024: /* CPTVSLH */
- return s->vtsl | (3 << 5); /* CRDY1 | CRDY2 */
- case 0x040: /* MPCTR */
- return s->modem.control;
- case 0x044: /* MPMCCFR */
- return s->modem.config;
- case 0x060: /* BPCTR */
- return s->bt.control;
- case 0x064: /* BPMCCFR */
- return s->bt.config;
- case 0x080: /* AMSCFR */
- return s->mixer;
- case 0x084: /* AMVCTR */
- return s->gain[0];
- case 0x088: /* AM1VCTR */
- return s->gain[1];
- case 0x08c: /* AM2VCTR */
- return s->gain[2];
- case 0x090: /* AM3VCTR */
- return s->gain[3];
- case 0x094: /* ASTCTR */
- return s->att;
- case 0x098: /* APD1LCR */
- return s->max[0];
- case 0x09c: /* APD1RCR */
- return s->max[1];
- case 0x0a0: /* APD2LCR */
- return s->max[2];
- case 0x0a4: /* APD2RCR */
- return s->max[3];
- case 0x0a8: /* APD3LCR */
- return s->max[4];
- case 0x0ac: /* APD3RCR */
- return s->max[5];
- case 0x0b0: /* APD4R */
- return s->max[6];
- case 0x0b4: /* ADWR */
- /* This should be write-only? Docs list it as read-only. */
- return 0x0000;
- case 0x0b8: /* ADRDR */
- if (likely(s->codec.rxlen > 1)) {
- ret = s->codec.rxbuf[s->codec.rxoff ++];
- s->codec.rxlen --;
- s->codec.rxoff &= EAC_BUF_LEN - 1;
- return ret;
- } else if (s->codec.rxlen) {
- ret = s->codec.rxbuf[s->codec.rxoff ++];
- s->codec.rxlen --;
- s->codec.rxoff &= EAC_BUF_LEN - 1;
- if (s->codec.rxavail)
- omap_eac_in_refill(s);
- omap_eac_in_dmarequest_update(s);
- return ret;
- }
- return 0x0000;
- case 0x0bc: /* AGCFR */
- return s->codec.config[0];
- case 0x0c0: /* AGCTR */
- return s->codec.config[1] | ((s->codec.config[1] & 2) << 14);
- case 0x0c4: /* AGCFR2 */
- return s->codec.config[2];
- case 0x0c8: /* AGCFR3 */
- return s->codec.config[3];
- case 0x0cc: /* MBPDMACTR */
- case 0x0d0: /* MPDDMARR */
- case 0x0d8: /* MPUDMARR */
- case 0x0e4: /* BPDDMARR */
- case 0x0ec: /* BPUDMARR */
- return 0x0000;
-
- case 0x100: /* VERSION_NUMBER */
- return 0x0010;
-
- case 0x104: /* SYSCONFIG */
- return s->sysconfig;
-
- case 0x108: /* SYSSTATUS */
- return 1 | 0xe; /* RESETDONE | stuff */
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_eac_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_eac_s *s = opaque;
-
- if (size != 2) {
- omap_badwidth_write16(opaque, addr, value);
- return;
- }
-
- switch (addr) {
- case 0x098: /* APD1LCR */
- case 0x09c: /* APD1RCR */
- case 0x0a0: /* APD2LCR */
- case 0x0a4: /* APD2RCR */
- case 0x0a8: /* APD3LCR */
- case 0x0ac: /* APD3RCR */
- case 0x0b0: /* APD4R */
- case 0x0b8: /* ADRDR */
- case 0x0d0: /* MPDDMARR */
- case 0x0d8: /* MPUDMARR */
- case 0x0e4: /* BPDDMARR */
- case 0x0ec: /* BPUDMARR */
- case 0x100: /* VERSION_NUMBER */
- case 0x108: /* SYSSTATUS */
- OMAP_RO_REG(addr);
- return;
-
- case 0x000: /* CPCFR1 */
- s->config[0] = value & 0xff;
- omap_eac_format_update(s);
- break;
- case 0x004: /* CPCFR2 */
- s->config[1] = value & 0xff;
- omap_eac_format_update(s);
- break;
- case 0x008: /* CPCFR3 */
- s->config[2] = value & 0xff;
- omap_eac_format_update(s);
- break;
- case 0x00c: /* CPCFR4 */
- s->config[3] = value & 0xff;
- omap_eac_format_update(s);
- break;
-
- case 0x010: /* CPTCTL */
- /* Assuming TXF and TXE bits are read-only... */
- s->control = value & 0x5f;
- omap_eac_interrupt_update(s);
- break;
-
- case 0x014: /* CPTTADR */
- s->address = value & 0xff;
- break;
- case 0x018: /* CPTDATL */
- s->data &= 0xff00;
- s->data |= value & 0xff;
- break;
- case 0x01c: /* CPTDATH */
- s->data &= 0x00ff;
- s->data |= value << 8;
- break;
- case 0x020: /* CPTVSLL */
- s->vtol = value & 0xf8;
- break;
- case 0x024: /* CPTVSLH */
- s->vtsl = value & 0x9f;
- break;
- case 0x040: /* MPCTR */
- s->modem.control = value & 0x8f;
- break;
- case 0x044: /* MPMCCFR */
- s->modem.config = value & 0x7fff;
- break;
- case 0x060: /* BPCTR */
- s->bt.control = value & 0x8f;
- break;
- case 0x064: /* BPMCCFR */
- s->bt.config = value & 0x7fff;
- break;
- case 0x080: /* AMSCFR */
- s->mixer = value & 0x0fff;
- break;
- case 0x084: /* AMVCTR */
- s->gain[0] = value & 0xffff;
- break;
- case 0x088: /* AM1VCTR */
- s->gain[1] = value & 0xff7f;
- break;
- case 0x08c: /* AM2VCTR */
- s->gain[2] = value & 0xff7f;
- break;
- case 0x090: /* AM3VCTR */
- s->gain[3] = value & 0xff7f;
- break;
- case 0x094: /* ASTCTR */
- s->att = value & 0xff;
- break;
-
- case 0x0b4: /* ADWR */
- s->codec.txbuf[s->codec.txlen ++] = value;
- if (unlikely(s->codec.txlen == EAC_BUF_LEN ||
- s->codec.txlen == s->codec.txavail)) {
- if (s->codec.txavail)
- omap_eac_out_empty(s);
- /* Discard what couldn't be written */
- s->codec.txlen = 0;
- }
- break;
-
- case 0x0bc: /* AGCFR */
- s->codec.config[0] = value & 0x07ff;
- omap_eac_format_update(s);
- break;
- case 0x0c0: /* AGCTR */
- s->codec.config[1] = value & 0x780f;
- omap_eac_format_update(s);
- break;
- case 0x0c4: /* AGCFR2 */
- s->codec.config[2] = value & 0x003f;
- omap_eac_format_update(s);
- break;
- case 0x0c8: /* AGCFR3 */
- s->codec.config[3] = value & 0xffff;
- omap_eac_format_update(s);
- break;
- case 0x0cc: /* MBPDMACTR */
- case 0x0d4: /* MPDDMAWR */
- case 0x0e0: /* MPUDMAWR */
- case 0x0e8: /* BPDDMAWR */
- case 0x0f0: /* BPUDMAWR */
- break;
-
- case 0x104: /* SYSCONFIG */
- if (value & (1 << 1)) /* SOFTRESET */
- omap_eac_reset(s);
- s->sysconfig = value & 0x31d;
- break;
-
- default:
- OMAP_BAD_REG(addr);
- return;
- }
-}
-
-static const MemoryRegionOps omap_eac_ops = {
- .read = omap_eac_read,
- .write = omap_eac_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static struct omap_eac_s *omap_eac_init(struct omap_target_agent_s *ta,
- qemu_irq irq, qemu_irq *drq, omap_clk fclk, omap_clk iclk)
-{
- struct omap_eac_s *s = g_new0(struct omap_eac_s, 1);
-
- s->irq = irq;
- s->codec.rxdrq = *drq ++;
- s->codec.txdrq = *drq;
- omap_eac_reset(s);
-
- if (current_machine->audiodev) {
- s->codec.card.name = g_strdup(current_machine->audiodev);
- s->codec.card.state = audio_state_by_name(s->codec.card.name, &error_fatal);
- }
- AUD_register_card("OMAP EAC", &s->codec.card, &error_fatal);
-
- memory_region_init_io(&s->iomem, NULL, &omap_eac_ops, s, "omap.eac",
- omap_l4_region_size(ta, 0));
- omap_l4_attach(ta, 0, &s->iomem);
-
- return s;
-}
-
-/* STI/XTI (emulation interface) console - reverse engineered only */
-struct omap_sti_s {
- qemu_irq irq;
- MemoryRegion iomem;
- MemoryRegion iomem_fifo;
- CharBackend chr;
-
- uint32_t sysconfig;
- uint32_t systest;
- uint32_t irqst;
- uint32_t irqen;
- uint32_t clkcontrol;
- uint32_t serial_config;
-};
-
-#define STI_TRACE_CONSOLE_CHANNEL 239
-#define STI_TRACE_CONTROL_CHANNEL 253
-
-static inline void omap_sti_interrupt_update(struct omap_sti_s *s)
-{
- qemu_set_irq(s->irq, s->irqst & s->irqen);
-}
-
-static void omap_sti_reset(struct omap_sti_s *s)
-{
- s->sysconfig = 0;
- s->irqst = 0;
- s->irqen = 0;
- s->clkcontrol = 0;
- s->serial_config = 0;
-
- omap_sti_interrupt_update(s);
-}
-
-static uint64_t omap_sti_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- struct omap_sti_s *s = opaque;
-
- if (size != 4) {
- return omap_badwidth_read32(opaque, addr);
- }
-
- switch (addr) {
- case 0x00: /* STI_REVISION */
- return 0x10;
-
- case 0x10: /* STI_SYSCONFIG */
- return s->sysconfig;
-
- case 0x14: /* STI_SYSSTATUS / STI_RX_STATUS / XTI_SYSSTATUS */
- return 0x00;
-
- case 0x18: /* STI_IRQSTATUS */
- return s->irqst;
-
- case 0x1c: /* STI_IRQSETEN / STI_IRQCLREN */
- return s->irqen;
-
- case 0x24: /* STI_ER / STI_DR / XTI_TRACESELECT */
- case 0x28: /* STI_RX_DR / XTI_RXDATA */
- /* TODO */
- return 0;
-
- case 0x2c: /* STI_CLK_CTRL / XTI_SCLKCRTL */
- return s->clkcontrol;
-
- case 0x30: /* STI_SERIAL_CFG / XTI_SCONFIG */
- return s->serial_config;
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_sti_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_sti_s *s = opaque;
-
- if (size != 4) {
- omap_badwidth_write32(opaque, addr, value);
- return;
- }
-
- switch (addr) {
- case 0x00: /* STI_REVISION */
- case 0x14: /* STI_SYSSTATUS / STI_RX_STATUS / XTI_SYSSTATUS */
- OMAP_RO_REG(addr);
- return;
-
- case 0x10: /* STI_SYSCONFIG */
- if (value & (1 << 1)) /* SOFTRESET */
- omap_sti_reset(s);
- s->sysconfig = value & 0xfe;
- break;
-
- case 0x18: /* STI_IRQSTATUS */
- s->irqst &= ~value;
- omap_sti_interrupt_update(s);
- break;
-
- case 0x1c: /* STI_IRQSETEN / STI_IRQCLREN */
- s->irqen = value & 0xffff;
- omap_sti_interrupt_update(s);
- break;
-
- case 0x2c: /* STI_CLK_CTRL / XTI_SCLKCRTL */
- s->clkcontrol = value & 0xff;
- break;
-
- case 0x30: /* STI_SERIAL_CFG / XTI_SCONFIG */
- s->serial_config = value & 0xff;
- break;
-
- case 0x24: /* STI_ER / STI_DR / XTI_TRACESELECT */
- case 0x28: /* STI_RX_DR / XTI_RXDATA */
- /* TODO */
- return;
-
- default:
- OMAP_BAD_REG(addr);
- return;
- }
-}
-
-static const MemoryRegionOps omap_sti_ops = {
- .read = omap_sti_read,
- .write = omap_sti_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static uint64_t omap_sti_fifo_read(void *opaque, hwaddr addr, unsigned size)
-{
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_sti_fifo_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_sti_s *s = opaque;
- int ch = addr >> 6;
- uint8_t byte = value;
-
- if (size != 1) {
- omap_badwidth_write8(opaque, addr, size);
- return;
- }
-
- if (ch == STI_TRACE_CONTROL_CHANNEL) {
- /* Flush channel <i>value</i>. */
- /* XXX this blocks entire thread. Rewrite to use
- * qemu_chr_fe_write and background I/O callbacks */
- qemu_chr_fe_write_all(&s->chr, (const uint8_t *) "\r", 1);
- } else if (ch == STI_TRACE_CONSOLE_CHANNEL || 1) {
- if (value == 0xc0 || value == 0xc3) {
- /* Open channel <i>ch</i>. */
- } else if (value == 0x00) {
- qemu_chr_fe_write_all(&s->chr, (const uint8_t *) "\n", 1);
- } else {
- qemu_chr_fe_write_all(&s->chr, &byte, 1);
- }
- }
-}
-
-static const MemoryRegionOps omap_sti_fifo_ops = {
- .read = omap_sti_fifo_read,
- .write = omap_sti_fifo_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static struct omap_sti_s *omap_sti_init(struct omap_target_agent_s *ta,
- MemoryRegion *sysmem,
- hwaddr channel_base, qemu_irq irq, omap_clk clk,
- Chardev *chr)
-{
- struct omap_sti_s *s = g_new0(struct omap_sti_s, 1);
-
- s->irq = irq;
- omap_sti_reset(s);
-
- qemu_chr_fe_init(&s->chr, chr ?: qemu_chr_new("null", "null", NULL),
- &error_abort);
-
- memory_region_init_io(&s->iomem, NULL, &omap_sti_ops, s, "omap.sti",
- omap_l4_region_size(ta, 0));
- omap_l4_attach(ta, 0, &s->iomem);
-
- memory_region_init_io(&s->iomem_fifo, NULL, &omap_sti_fifo_ops, s,
- "omap.sti.fifo", 0x10000);
- memory_region_add_subregion(sysmem, channel_base, &s->iomem_fifo);
-
- return s;
-}
-
-/* L4 Interconnect */
-#define L4TA(n) (n)
-#define L4TAO(n) ((n) + 39)
-
-static const struct omap_l4_region_s omap_l4_region[125] = {
- [ 1] = { 0x40800, 0x800, 32 }, /* Initiator agent */
- [ 2] = { 0x41000, 0x1000, 32 }, /* Link agent */
- [ 0] = { 0x40000, 0x800, 32 }, /* Address and protection */
- [ 3] = { 0x00000, 0x1000, 32 | 16 | 8 }, /* System Control and Pinout */
- [ 4] = { 0x01000, 0x1000, 32 | 16 | 8 }, /* L4TAO1 */
- [ 5] = { 0x04000, 0x1000, 32 | 16 }, /* 32K Timer */
- [ 6] = { 0x05000, 0x1000, 32 | 16 | 8 }, /* L4TAO2 */
- [ 7] = { 0x08000, 0x800, 32 }, /* PRCM Region A */
- [ 8] = { 0x08800, 0x800, 32 }, /* PRCM Region B */
- [ 9] = { 0x09000, 0x1000, 32 | 16 | 8 }, /* L4TAO */
- [ 10] = { 0x12000, 0x1000, 32 | 16 | 8 }, /* Test (BCM) */
- [ 11] = { 0x13000, 0x1000, 32 | 16 | 8 }, /* L4TA1 */
- [ 12] = { 0x14000, 0x1000, 32 }, /* Test/emulation (TAP) */
- [ 13] = { 0x15000, 0x1000, 32 | 16 | 8 }, /* L4TA2 */
- [ 14] = { 0x18000, 0x1000, 32 | 16 | 8 }, /* GPIO1 */
- [ 16] = { 0x1a000, 0x1000, 32 | 16 | 8 }, /* GPIO2 */
- [ 18] = { 0x1c000, 0x1000, 32 | 16 | 8 }, /* GPIO3 */
- [ 19] = { 0x1e000, 0x1000, 32 | 16 | 8 }, /* GPIO4 */
- [ 15] = { 0x19000, 0x1000, 32 | 16 | 8 }, /* Quad GPIO TOP */
- [ 17] = { 0x1b000, 0x1000, 32 | 16 | 8 }, /* L4TA3 */
- [ 20] = { 0x20000, 0x1000, 32 | 16 | 8 }, /* WD Timer 1 (Secure) */
- [ 22] = { 0x22000, 0x1000, 32 | 16 | 8 }, /* WD Timer 2 (OMAP) */
- [ 21] = { 0x21000, 0x1000, 32 | 16 | 8 }, /* Dual WD timer TOP */
- [ 23] = { 0x23000, 0x1000, 32 | 16 | 8 }, /* L4TA4 */
- [ 24] = { 0x28000, 0x1000, 32 | 16 | 8 }, /* GP Timer 1 */
- [ 25] = { 0x29000, 0x1000, 32 | 16 | 8 }, /* L4TA7 */
- [ 26] = { 0x48000, 0x2000, 32 | 16 | 8 }, /* Emulation (ARM11ETB) */
- [ 27] = { 0x4a000, 0x1000, 32 | 16 | 8 }, /* L4TA9 */
- [ 28] = { 0x50000, 0x400, 32 | 16 | 8 }, /* Display top */
- [ 29] = { 0x50400, 0x400, 32 | 16 | 8 }, /* Display control */
- [ 30] = { 0x50800, 0x400, 32 | 16 | 8 }, /* Display RFBI */
- [ 31] = { 0x50c00, 0x400, 32 | 16 | 8 }, /* Display encoder */
- [ 32] = { 0x51000, 0x1000, 32 | 16 | 8 }, /* L4TA10 */
- [ 33] = { 0x52000, 0x400, 32 | 16 | 8 }, /* Camera top */
- [ 34] = { 0x52400, 0x400, 32 | 16 | 8 }, /* Camera core */
- [ 35] = { 0x52800, 0x400, 32 | 16 | 8 }, /* Camera DMA */
- [ 36] = { 0x52c00, 0x400, 32 | 16 | 8 }, /* Camera MMU */
- [ 37] = { 0x53000, 0x1000, 32 | 16 | 8 }, /* L4TA11 */
- [ 38] = { 0x56000, 0x1000, 32 | 16 | 8 }, /* sDMA */
- [ 39] = { 0x57000, 0x1000, 32 | 16 | 8 }, /* L4TA12 */
- [ 40] = { 0x58000, 0x1000, 32 | 16 | 8 }, /* SSI top */
- [ 41] = { 0x59000, 0x1000, 32 | 16 | 8 }, /* SSI GDD */
- [ 42] = { 0x5a000, 0x1000, 32 | 16 | 8 }, /* SSI Port1 */
- [ 43] = { 0x5b000, 0x1000, 32 | 16 | 8 }, /* SSI Port2 */
- [ 44] = { 0x5c000, 0x1000, 32 | 16 | 8 }, /* L4TA13 */
- [ 45] = { 0x5e000, 0x1000, 32 | 16 | 8 }, /* USB OTG */
- [ 46] = { 0x5f000, 0x1000, 32 | 16 | 8 }, /* L4TAO4 */
- [ 47] = { 0x60000, 0x1000, 32 | 16 | 8 }, /* Emulation (WIN_TRACER1SDRC) */
- [ 48] = { 0x61000, 0x1000, 32 | 16 | 8 }, /* L4TA14 */
- [ 49] = { 0x62000, 0x1000, 32 | 16 | 8 }, /* Emulation (WIN_TRACER2GPMC) */
- [ 50] = { 0x63000, 0x1000, 32 | 16 | 8 }, /* L4TA15 */
- [ 51] = { 0x64000, 0x1000, 32 | 16 | 8 }, /* Emulation (WIN_TRACER3OCM) */
- [ 52] = { 0x65000, 0x1000, 32 | 16 | 8 }, /* L4TA16 */
- [ 53] = { 0x66000, 0x300, 32 | 16 | 8 }, /* Emulation (WIN_TRACER4L4) */
- [ 54] = { 0x67000, 0x1000, 32 | 16 | 8 }, /* L4TA17 */
- [ 55] = { 0x68000, 0x1000, 32 | 16 | 8 }, /* Emulation (XTI) */
- [ 56] = { 0x69000, 0x1000, 32 | 16 | 8 }, /* L4TA18 */
- [ 57] = { 0x6a000, 0x1000, 16 | 8 }, /* UART1 */
- [ 58] = { 0x6b000, 0x1000, 32 | 16 | 8 }, /* L4TA19 */
- [ 59] = { 0x6c000, 0x1000, 16 | 8 }, /* UART2 */
- [ 60] = { 0x6d000, 0x1000, 32 | 16 | 8 }, /* L4TA20 */
- [ 61] = { 0x6e000, 0x1000, 16 | 8 }, /* UART3 */
- [ 62] = { 0x6f000, 0x1000, 32 | 16 | 8 }, /* L4TA21 */
- [ 63] = { 0x70000, 0x1000, 16 }, /* I2C1 */
- [ 64] = { 0x71000, 0x1000, 32 | 16 | 8 }, /* L4TAO5 */
- [ 65] = { 0x72000, 0x1000, 16 }, /* I2C2 */
- [ 66] = { 0x73000, 0x1000, 32 | 16 | 8 }, /* L4TAO6 */
- [ 67] = { 0x74000, 0x1000, 16 }, /* McBSP1 */
- [ 68] = { 0x75000, 0x1000, 32 | 16 | 8 }, /* L4TAO7 */
- [ 69] = { 0x76000, 0x1000, 16 }, /* McBSP2 */
- [ 70] = { 0x77000, 0x1000, 32 | 16 | 8 }, /* L4TAO8 */
- [ 71] = { 0x24000, 0x1000, 32 | 16 | 8 }, /* WD Timer 3 (DSP) */
- [ 72] = { 0x25000, 0x1000, 32 | 16 | 8 }, /* L4TA5 */
- [ 73] = { 0x26000, 0x1000, 32 | 16 | 8 }, /* WD Timer 4 (IVA) */
- [ 74] = { 0x27000, 0x1000, 32 | 16 | 8 }, /* L4TA6 */
- [ 75] = { 0x2a000, 0x1000, 32 | 16 | 8 }, /* GP Timer 2 */
- [ 76] = { 0x2b000, 0x1000, 32 | 16 | 8 }, /* L4TA8 */
- [ 77] = { 0x78000, 0x1000, 32 | 16 | 8 }, /* GP Timer 3 */
- [ 78] = { 0x79000, 0x1000, 32 | 16 | 8 }, /* L4TA22 */
- [ 79] = { 0x7a000, 0x1000, 32 | 16 | 8 }, /* GP Timer 4 */
- [ 80] = { 0x7b000, 0x1000, 32 | 16 | 8 }, /* L4TA23 */
- [ 81] = { 0x7c000, 0x1000, 32 | 16 | 8 }, /* GP Timer 5 */
- [ 82] = { 0x7d000, 0x1000, 32 | 16 | 8 }, /* L4TA24 */
- [ 83] = { 0x7e000, 0x1000, 32 | 16 | 8 }, /* GP Timer 6 */
- [ 84] = { 0x7f000, 0x1000, 32 | 16 | 8 }, /* L4TA25 */
- [ 85] = { 0x80000, 0x1000, 32 | 16 | 8 }, /* GP Timer 7 */
- [ 86] = { 0x81000, 0x1000, 32 | 16 | 8 }, /* L4TA26 */
- [ 87] = { 0x82000, 0x1000, 32 | 16 | 8 }, /* GP Timer 8 */
- [ 88] = { 0x83000, 0x1000, 32 | 16 | 8 }, /* L4TA27 */
- [ 89] = { 0x84000, 0x1000, 32 | 16 | 8 }, /* GP Timer 9 */
- [ 90] = { 0x85000, 0x1000, 32 | 16 | 8 }, /* L4TA28 */
- [ 91] = { 0x86000, 0x1000, 32 | 16 | 8 }, /* GP Timer 10 */
- [ 92] = { 0x87000, 0x1000, 32 | 16 | 8 }, /* L4TA29 */
- [ 93] = { 0x88000, 0x1000, 32 | 16 | 8 }, /* GP Timer 11 */
- [ 94] = { 0x89000, 0x1000, 32 | 16 | 8 }, /* L4TA30 */
- [ 95] = { 0x8a000, 0x1000, 32 | 16 | 8 }, /* GP Timer 12 */
- [ 96] = { 0x8b000, 0x1000, 32 | 16 | 8 }, /* L4TA31 */
- [ 97] = { 0x90000, 0x1000, 16 }, /* EAC */
- [ 98] = { 0x91000, 0x1000, 32 | 16 | 8 }, /* L4TA32 */
- [ 99] = { 0x92000, 0x1000, 16 }, /* FAC */
- [100] = { 0x93000, 0x1000, 32 | 16 | 8 }, /* L4TA33 */
- [101] = { 0x94000, 0x1000, 32 | 16 | 8 }, /* IPC (MAILBOX) */
- [102] = { 0x95000, 0x1000, 32 | 16 | 8 }, /* L4TA34 */
- [103] = { 0x98000, 0x1000, 32 | 16 | 8 }, /* SPI1 */
- [104] = { 0x99000, 0x1000, 32 | 16 | 8 }, /* L4TA35 */
- [105] = { 0x9a000, 0x1000, 32 | 16 | 8 }, /* SPI2 */
- [106] = { 0x9b000, 0x1000, 32 | 16 | 8 }, /* L4TA36 */
- [107] = { 0x9c000, 0x1000, 16 | 8 }, /* MMC SDIO */
- [108] = { 0x9d000, 0x1000, 32 | 16 | 8 }, /* L4TAO9 */
- [109] = { 0x9e000, 0x1000, 32 | 16 | 8 }, /* MS_PRO */
- [110] = { 0x9f000, 0x1000, 32 | 16 | 8 }, /* L4TAO10 */
- [111] = { 0xa0000, 0x1000, 32 }, /* RNG */
- [112] = { 0xa1000, 0x1000, 32 | 16 | 8 }, /* L4TAO11 */
- [113] = { 0xa2000, 0x1000, 32 }, /* DES3DES */
- [114] = { 0xa3000, 0x1000, 32 | 16 | 8 }, /* L4TAO12 */
- [115] = { 0xa4000, 0x1000, 32 }, /* SHA1MD5 */
- [116] = { 0xa5000, 0x1000, 32 | 16 | 8 }, /* L4TAO13 */
- [117] = { 0xa6000, 0x1000, 32 }, /* AES */
- [118] = { 0xa7000, 0x1000, 32 | 16 | 8 }, /* L4TA37 */
- [119] = { 0xa8000, 0x2000, 32 }, /* PKA */
- [120] = { 0xaa000, 0x1000, 32 | 16 | 8 }, /* L4TA38 */
- [121] = { 0xb0000, 0x1000, 32 }, /* MG */
- [122] = { 0xb1000, 0x1000, 32 | 16 | 8 },
- [123] = { 0xb2000, 0x1000, 32 }, /* HDQ/1-Wire */
- [124] = { 0xb3000, 0x1000, 32 | 16 | 8 }, /* L4TA39 */
-};
-
-static const struct omap_l4_agent_info_s omap_l4_agent_info[54] = {
- { 0, 0, 3, 2 }, /* L4IA initiatior agent */
- { L4TAO(1), 3, 2, 1 }, /* Control and pinout module */
- { L4TAO(2), 5, 2, 1 }, /* 32K timer */
- { L4TAO(3), 7, 3, 2 }, /* PRCM */
- { L4TA(1), 10, 2, 1 }, /* BCM */
- { L4TA(2), 12, 2, 1 }, /* Test JTAG */
- { L4TA(3), 14, 6, 3 }, /* Quad GPIO */
- { L4TA(4), 20, 4, 3 }, /* WD timer 1/2 */
- { L4TA(7), 24, 2, 1 }, /* GP timer 1 */
- { L4TA(9), 26, 2, 1 }, /* ATM11 ETB */
- { L4TA(10), 28, 5, 4 }, /* Display subsystem */
- { L4TA(11), 33, 5, 4 }, /* Camera subsystem */
- { L4TA(12), 38, 2, 1 }, /* sDMA */
- { L4TA(13), 40, 5, 4 }, /* SSI */
- { L4TAO(4), 45, 2, 1 }, /* USB */
- { L4TA(14), 47, 2, 1 }, /* Win Tracer1 */
- { L4TA(15), 49, 2, 1 }, /* Win Tracer2 */
- { L4TA(16), 51, 2, 1 }, /* Win Tracer3 */
- { L4TA(17), 53, 2, 1 }, /* Win Tracer4 */
- { L4TA(18), 55, 2, 1 }, /* XTI */
- { L4TA(19), 57, 2, 1 }, /* UART1 */
- { L4TA(20), 59, 2, 1 }, /* UART2 */
- { L4TA(21), 61, 2, 1 }, /* UART3 */
- { L4TAO(5), 63, 2, 1 }, /* I2C1 */
- { L4TAO(6), 65, 2, 1 }, /* I2C2 */
- { L4TAO(7), 67, 2, 1 }, /* McBSP1 */
- { L4TAO(8), 69, 2, 1 }, /* McBSP2 */
- { L4TA(5), 71, 2, 1 }, /* WD Timer 3 (DSP) */
- { L4TA(6), 73, 2, 1 }, /* WD Timer 4 (IVA) */
- { L4TA(8), 75, 2, 1 }, /* GP Timer 2 */
- { L4TA(22), 77, 2, 1 }, /* GP Timer 3 */
- { L4TA(23), 79, 2, 1 }, /* GP Timer 4 */
- { L4TA(24), 81, 2, 1 }, /* GP Timer 5 */
- { L4TA(25), 83, 2, 1 }, /* GP Timer 6 */
- { L4TA(26), 85, 2, 1 }, /* GP Timer 7 */
- { L4TA(27), 87, 2, 1 }, /* GP Timer 8 */
- { L4TA(28), 89, 2, 1 }, /* GP Timer 9 */
- { L4TA(29), 91, 2, 1 }, /* GP Timer 10 */
- { L4TA(30), 93, 2, 1 }, /* GP Timer 11 */
- { L4TA(31), 95, 2, 1 }, /* GP Timer 12 */
- { L4TA(32), 97, 2, 1 }, /* EAC */
- { L4TA(33), 99, 2, 1 }, /* FAC */
- { L4TA(34), 101, 2, 1 }, /* IPC */
- { L4TA(35), 103, 2, 1 }, /* SPI1 */
- { L4TA(36), 105, 2, 1 }, /* SPI2 */
- { L4TAO(9), 107, 2, 1 }, /* MMC SDIO */
- { L4TAO(10), 109, 2, 1 },
- { L4TAO(11), 111, 2, 1 }, /* RNG */
- { L4TAO(12), 113, 2, 1 }, /* DES3DES */
- { L4TAO(13), 115, 2, 1 }, /* SHA1MD5 */
- { L4TA(37), 117, 2, 1 }, /* AES */
- { L4TA(38), 119, 2, 1 }, /* PKA */
- { -1, 121, 2, 1 },
- { L4TA(39), 123, 2, 1 }, /* HDQ/1-Wire */
-};
-
-#define omap_l4ta(bus, cs) \
- omap_l4ta_get(bus, omap_l4_region, omap_l4_agent_info, L4TA(cs))
-#define omap_l4tao(bus, cs) \
- omap_l4ta_get(bus, omap_l4_region, omap_l4_agent_info, L4TAO(cs))
-
-/* Power, Reset, and Clock Management */
-struct omap_prcm_s {
- qemu_irq irq[3];
- struct omap_mpu_state_s *mpu;
- MemoryRegion iomem0;
- MemoryRegion iomem1;
-
- uint32_t irqst[3];
- uint32_t irqen[3];
-
- uint32_t sysconfig;
- uint32_t voltctrl;
- uint32_t scratch[20];
-
- uint32_t clksrc[1];
- uint32_t clkout[1];
- uint32_t clkemul[1];
- uint32_t clkpol[1];
- uint32_t clksel[8];
- uint32_t clken[12];
- uint32_t clkctrl[4];
- uint32_t clkidle[7];
- uint32_t setuptime[2];
-
- uint32_t wkup[3];
- uint32_t wken[3];
- uint32_t wkst[3];
- uint32_t rst[4];
- uint32_t rstctrl[1];
- uint32_t power[4];
- uint32_t rsttime_wkup;
-
- uint32_t ev;
- uint32_t evtime[2];
-
- int dpll_lock, apll_lock[2];
-};
-
-static void omap_prcm_int_update(struct omap_prcm_s *s, int dom)
-{
- qemu_set_irq(s->irq[dom], s->irqst[dom] & s->irqen[dom]);
- /* XXX or is the mask applied before PRCM_IRQSTATUS_* ? */
-}
-
-static uint64_t omap_prcm_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- struct omap_prcm_s *s = opaque;
- uint32_t ret;
-
- if (size != 4) {
- return omap_badwidth_read32(opaque, addr);
- }
-
- switch (addr) {
- case 0x000: /* PRCM_REVISION */
- return 0x10;
-
- case 0x010: /* PRCM_SYSCONFIG */
- return s->sysconfig;
-
- case 0x018: /* PRCM_IRQSTATUS_MPU */
- return s->irqst[0];
-
- case 0x01c: /* PRCM_IRQENABLE_MPU */
- return s->irqen[0];
-
- case 0x050: /* PRCM_VOLTCTRL */
- return s->voltctrl;
- case 0x054: /* PRCM_VOLTST */
- return s->voltctrl & 3;
-
- case 0x060: /* PRCM_CLKSRC_CTRL */
- return s->clksrc[0];
- case 0x070: /* PRCM_CLKOUT_CTRL */
- return s->clkout[0];
- case 0x078: /* PRCM_CLKEMUL_CTRL */
- return s->clkemul[0];
- case 0x080: /* PRCM_CLKCFG_CTRL */
- case 0x084: /* PRCM_CLKCFG_STATUS */
- return 0;
-
- case 0x090: /* PRCM_VOLTSETUP */
- return s->setuptime[0];
-
- case 0x094: /* PRCM_CLKSSETUP */
- return s->setuptime[1];
-
- case 0x098: /* PRCM_POLCTRL */
- return s->clkpol[0];
-
- case 0x0b0: /* GENERAL_PURPOSE1 */
- case 0x0b4: /* GENERAL_PURPOSE2 */
- case 0x0b8: /* GENERAL_PURPOSE3 */
- case 0x0bc: /* GENERAL_PURPOSE4 */
- case 0x0c0: /* GENERAL_PURPOSE5 */
- case 0x0c4: /* GENERAL_PURPOSE6 */
- case 0x0c8: /* GENERAL_PURPOSE7 */
- case 0x0cc: /* GENERAL_PURPOSE8 */
- case 0x0d0: /* GENERAL_PURPOSE9 */
- case 0x0d4: /* GENERAL_PURPOSE10 */
- case 0x0d8: /* GENERAL_PURPOSE11 */
- case 0x0dc: /* GENERAL_PURPOSE12 */
- case 0x0e0: /* GENERAL_PURPOSE13 */
- case 0x0e4: /* GENERAL_PURPOSE14 */
- case 0x0e8: /* GENERAL_PURPOSE15 */
- case 0x0ec: /* GENERAL_PURPOSE16 */
- case 0x0f0: /* GENERAL_PURPOSE17 */
- case 0x0f4: /* GENERAL_PURPOSE18 */
- case 0x0f8: /* GENERAL_PURPOSE19 */
- case 0x0fc: /* GENERAL_PURPOSE20 */
- return s->scratch[(addr - 0xb0) >> 2];
-
- case 0x140: /* CM_CLKSEL_MPU */
- return s->clksel[0];
- case 0x148: /* CM_CLKSTCTRL_MPU */
- return s->clkctrl[0];
-
- case 0x158: /* RM_RSTST_MPU */
- return s->rst[0];
- case 0x1c8: /* PM_WKDEP_MPU */
- return s->wkup[0];
- case 0x1d4: /* PM_EVGENCTRL_MPU */
- return s->ev;
- case 0x1d8: /* PM_EVEGENONTIM_MPU */
- return s->evtime[0];
- case 0x1dc: /* PM_EVEGENOFFTIM_MPU */
- return s->evtime[1];
- case 0x1e0: /* PM_PWSTCTRL_MPU */
- return s->power[0];
- case 0x1e4: /* PM_PWSTST_MPU */
- return 0;
-
- case 0x200: /* CM_FCLKEN1_CORE */
- return s->clken[0];
- case 0x204: /* CM_FCLKEN2_CORE */
- return s->clken[1];
- case 0x210: /* CM_ICLKEN1_CORE */
- return s->clken[2];
- case 0x214: /* CM_ICLKEN2_CORE */
- return s->clken[3];
- case 0x21c: /* CM_ICLKEN4_CORE */
- return s->clken[4];
-
- case 0x220: /* CM_IDLEST1_CORE */
- /* TODO: check the actual iclk status */
- return 0x7ffffff9;
- case 0x224: /* CM_IDLEST2_CORE */
- /* TODO: check the actual iclk status */
- return 0x00000007;
- case 0x22c: /* CM_IDLEST4_CORE */
- /* TODO: check the actual iclk status */
- return 0x0000001f;
-
- case 0x230: /* CM_AUTOIDLE1_CORE */
- return s->clkidle[0];
- case 0x234: /* CM_AUTOIDLE2_CORE */
- return s->clkidle[1];
- case 0x238: /* CM_AUTOIDLE3_CORE */
- return s->clkidle[2];
- case 0x23c: /* CM_AUTOIDLE4_CORE */
- return s->clkidle[3];
-
- case 0x240: /* CM_CLKSEL1_CORE */
- return s->clksel[1];
- case 0x244: /* CM_CLKSEL2_CORE */
- return s->clksel[2];
-
- case 0x248: /* CM_CLKSTCTRL_CORE */
- return s->clkctrl[1];
-
- case 0x2a0: /* PM_WKEN1_CORE */
- return s->wken[0];
- case 0x2a4: /* PM_WKEN2_CORE */
- return s->wken[1];
-
- case 0x2b0: /* PM_WKST1_CORE */
- return s->wkst[0];
- case 0x2b4: /* PM_WKST2_CORE */
- return s->wkst[1];
- case 0x2c8: /* PM_WKDEP_CORE */
- return 0x1e;
-
- case 0x2e0: /* PM_PWSTCTRL_CORE */
- return s->power[1];
- case 0x2e4: /* PM_PWSTST_CORE */
- return 0x000030 | (s->power[1] & 0xfc00);
-
- case 0x300: /* CM_FCLKEN_GFX */
- return s->clken[5];
- case 0x310: /* CM_ICLKEN_GFX */
- return s->clken[6];
- case 0x320: /* CM_IDLEST_GFX */
- /* TODO: check the actual iclk status */
- return 0x00000001;
- case 0x340: /* CM_CLKSEL_GFX */
- return s->clksel[3];
- case 0x348: /* CM_CLKSTCTRL_GFX */
- return s->clkctrl[2];
- case 0x350: /* RM_RSTCTRL_GFX */
- return s->rstctrl[0];
- case 0x358: /* RM_RSTST_GFX */
- return s->rst[1];
- case 0x3c8: /* PM_WKDEP_GFX */
- return s->wkup[1];
-
- case 0x3e0: /* PM_PWSTCTRL_GFX */
- return s->power[2];
- case 0x3e4: /* PM_PWSTST_GFX */
- return s->power[2] & 3;
-
- case 0x400: /* CM_FCLKEN_WKUP */
- return s->clken[7];
- case 0x410: /* CM_ICLKEN_WKUP */
- return s->clken[8];
- case 0x420: /* CM_IDLEST_WKUP */
- /* TODO: check the actual iclk status */
- return 0x0000003f;
- case 0x430: /* CM_AUTOIDLE_WKUP */
- return s->clkidle[4];
- case 0x440: /* CM_CLKSEL_WKUP */
- return s->clksel[4];
- case 0x450: /* RM_RSTCTRL_WKUP */
- return 0;
- case 0x454: /* RM_RSTTIME_WKUP */
- return s->rsttime_wkup;
- case 0x458: /* RM_RSTST_WKUP */
- return s->rst[2];
- case 0x4a0: /* PM_WKEN_WKUP */
- return s->wken[2];
- case 0x4b0: /* PM_WKST_WKUP */
- return s->wkst[2];
-
- case 0x500: /* CM_CLKEN_PLL */
- return s->clken[9];
- case 0x520: /* CM_IDLEST_CKGEN */
- ret = 0x0000070 | (s->apll_lock[0] << 9) | (s->apll_lock[1] << 8);
- if (!(s->clksel[6] & 3))
- /* Core uses 32-kHz clock */
- ret |= 3 << 0;
- else if (!s->dpll_lock)
- /* DPLL not locked, core uses ref_clk */
- ret |= 1 << 0;
- else
- /* Core uses DPLL */
- ret |= 2 << 0;
- return ret;
- case 0x530: /* CM_AUTOIDLE_PLL */
- return s->clkidle[5];
- case 0x540: /* CM_CLKSEL1_PLL */
- return s->clksel[5];
- case 0x544: /* CM_CLKSEL2_PLL */
- return s->clksel[6];
-
- case 0x800: /* CM_FCLKEN_DSP */
- return s->clken[10];
- case 0x810: /* CM_ICLKEN_DSP */
- return s->clken[11];
- case 0x820: /* CM_IDLEST_DSP */
- /* TODO: check the actual iclk status */
- return 0x00000103;
- case 0x830: /* CM_AUTOIDLE_DSP */
- return s->clkidle[6];
- case 0x840: /* CM_CLKSEL_DSP */
- return s->clksel[7];
- case 0x848: /* CM_CLKSTCTRL_DSP */
- return s->clkctrl[3];
- case 0x850: /* RM_RSTCTRL_DSP */
- return 0;
- case 0x858: /* RM_RSTST_DSP */
- return s->rst[3];
- case 0x8c8: /* PM_WKDEP_DSP */
- return s->wkup[2];
- case 0x8e0: /* PM_PWSTCTRL_DSP */
- return s->power[3];
- case 0x8e4: /* PM_PWSTST_DSP */
- return 0x008030 | (s->power[3] & 0x3003);
-
- case 0x8f0: /* PRCM_IRQSTATUS_DSP */
- return s->irqst[1];
- case 0x8f4: /* PRCM_IRQENABLE_DSP */
- return s->irqen[1];
-
- case 0x8f8: /* PRCM_IRQSTATUS_IVA */
- return s->irqst[2];
- case 0x8fc: /* PRCM_IRQENABLE_IVA */
- return s->irqen[2];
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_prcm_apll_update(struct omap_prcm_s *s)
-{
- int mode[2];
-
- mode[0] = (s->clken[9] >> 6) & 3;
- s->apll_lock[0] = (mode[0] == 3);
- mode[1] = (s->clken[9] >> 2) & 3;
- s->apll_lock[1] = (mode[1] == 3);
- /* TODO: update clocks */
-
- if (mode[0] == 1 || mode[0] == 2 || mode[1] == 1 || mode[1] == 2)
- fprintf(stderr, "%s: bad EN_54M_PLL or bad EN_96M_PLL\n",
- __func__);
-}
-
-static void omap_prcm_dpll_update(struct omap_prcm_s *s)
-{
- omap_clk dpll = omap_findclk(s->mpu, "dpll");
- omap_clk dpll_x2 = omap_findclk(s->mpu, "dpll");
- omap_clk core = omap_findclk(s->mpu, "core_clk");
- int mode = (s->clken[9] >> 0) & 3;
- int mult, div;
-
- mult = (s->clksel[5] >> 12) & 0x3ff;
- div = (s->clksel[5] >> 8) & 0xf;
- if (mult == 0 || mult == 1)
- mode = 1; /* Bypass */
-
- s->dpll_lock = 0;
- switch (mode) {
- case 0:
- fprintf(stderr, "%s: bad EN_DPLL\n", __func__);
- break;
- case 1: /* Low-power bypass mode (Default) */
- case 2: /* Fast-relock bypass mode */
- omap_clk_setrate(dpll, 1, 1);
- omap_clk_setrate(dpll_x2, 1, 1);
- break;
- case 3: /* Lock mode */
- s->dpll_lock = 1; /* After 20 FINT cycles (ref_clk / (div + 1)). */
-
- omap_clk_setrate(dpll, div + 1, mult);
- omap_clk_setrate(dpll_x2, div + 1, mult * 2);
- break;
- }
-
- switch ((s->clksel[6] >> 0) & 3) {
- case 0:
- omap_clk_reparent(core, omap_findclk(s->mpu, "clk32-kHz"));
- break;
- case 1:
- omap_clk_reparent(core, dpll);
- break;
- case 2:
- /* Default */
- omap_clk_reparent(core, dpll_x2);
- break;
- case 3:
- fprintf(stderr, "%s: bad CORE_CLK_SRC\n", __func__);
- break;
- }
-}
-
-static void omap_prcm_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_prcm_s *s = opaque;
-
- if (size != 4) {
- omap_badwidth_write32(opaque, addr, value);
- return;
- }
-
- switch (addr) {
- case 0x000: /* PRCM_REVISION */
- case 0x054: /* PRCM_VOLTST */
- case 0x084: /* PRCM_CLKCFG_STATUS */
- case 0x1e4: /* PM_PWSTST_MPU */
- case 0x220: /* CM_IDLEST1_CORE */
- case 0x224: /* CM_IDLEST2_CORE */
- case 0x22c: /* CM_IDLEST4_CORE */
- case 0x2c8: /* PM_WKDEP_CORE */
- case 0x2e4: /* PM_PWSTST_CORE */
- case 0x320: /* CM_IDLEST_GFX */
- case 0x3e4: /* PM_PWSTST_GFX */
- case 0x420: /* CM_IDLEST_WKUP */
- case 0x520: /* CM_IDLEST_CKGEN */
- case 0x820: /* CM_IDLEST_DSP */
- case 0x8e4: /* PM_PWSTST_DSP */
- OMAP_RO_REG(addr);
- return;
-
- case 0x010: /* PRCM_SYSCONFIG */
- s->sysconfig = value & 1;
- break;
-
- case 0x018: /* PRCM_IRQSTATUS_MPU */
- s->irqst[0] &= ~value;
- omap_prcm_int_update(s, 0);
- break;
- case 0x01c: /* PRCM_IRQENABLE_MPU */
- s->irqen[0] = value & 0x3f;
- omap_prcm_int_update(s, 0);
- break;
-
- case 0x050: /* PRCM_VOLTCTRL */
- s->voltctrl = value & 0xf1c3;
- break;
-
- case 0x060: /* PRCM_CLKSRC_CTRL */
- s->clksrc[0] = value & 0xdb;
- /* TODO update clocks */
- break;
-
- case 0x070: /* PRCM_CLKOUT_CTRL */
- s->clkout[0] = value & 0xbbbb;
- /* TODO update clocks */
- break;
-
- case 0x078: /* PRCM_CLKEMUL_CTRL */
- s->clkemul[0] = value & 1;
- /* TODO update clocks */
- break;
-
- case 0x080: /* PRCM_CLKCFG_CTRL */
- break;
-
- case 0x090: /* PRCM_VOLTSETUP */
- s->setuptime[0] = value & 0xffff;
- break;
- case 0x094: /* PRCM_CLKSSETUP */
- s->setuptime[1] = value & 0xffff;
- break;
-
- case 0x098: /* PRCM_POLCTRL */
- s->clkpol[0] = value & 0x701;
- break;
-
- case 0x0b0: /* GENERAL_PURPOSE1 */
- case 0x0b4: /* GENERAL_PURPOSE2 */
- case 0x0b8: /* GENERAL_PURPOSE3 */
- case 0x0bc: /* GENERAL_PURPOSE4 */
- case 0x0c0: /* GENERAL_PURPOSE5 */
- case 0x0c4: /* GENERAL_PURPOSE6 */
- case 0x0c8: /* GENERAL_PURPOSE7 */
- case 0x0cc: /* GENERAL_PURPOSE8 */
- case 0x0d0: /* GENERAL_PURPOSE9 */
- case 0x0d4: /* GENERAL_PURPOSE10 */
- case 0x0d8: /* GENERAL_PURPOSE11 */
- case 0x0dc: /* GENERAL_PURPOSE12 */
- case 0x0e0: /* GENERAL_PURPOSE13 */
- case 0x0e4: /* GENERAL_PURPOSE14 */
- case 0x0e8: /* GENERAL_PURPOSE15 */
- case 0x0ec: /* GENERAL_PURPOSE16 */
- case 0x0f0: /* GENERAL_PURPOSE17 */
- case 0x0f4: /* GENERAL_PURPOSE18 */
- case 0x0f8: /* GENERAL_PURPOSE19 */
- case 0x0fc: /* GENERAL_PURPOSE20 */
- s->scratch[(addr - 0xb0) >> 2] = value;
- break;
-
- case 0x140: /* CM_CLKSEL_MPU */
- s->clksel[0] = value & 0x1f;
- /* TODO update clocks */
- break;
- case 0x148: /* CM_CLKSTCTRL_MPU */
- s->clkctrl[0] = value & 0x1f;
- break;
-
- case 0x158: /* RM_RSTST_MPU */
- s->rst[0] &= ~value;
- break;
- case 0x1c8: /* PM_WKDEP_MPU */
- s->wkup[0] = value & 0x15;
- break;
-
- case 0x1d4: /* PM_EVGENCTRL_MPU */
- s->ev = value & 0x1f;
- break;
- case 0x1d8: /* PM_EVEGENONTIM_MPU */
- s->evtime[0] = value;
- break;
- case 0x1dc: /* PM_EVEGENOFFTIM_MPU */
- s->evtime[1] = value;
- break;
-
- case 0x1e0: /* PM_PWSTCTRL_MPU */
- s->power[0] = value & 0xc0f;
- break;
-
- case 0x200: /* CM_FCLKEN1_CORE */
- s->clken[0] = value & 0xbfffffff;
- /* TODO update clocks */
- /* The EN_EAC bit only gets/puts func_96m_clk. */
- break;
- case 0x204: /* CM_FCLKEN2_CORE */
- s->clken[1] = value & 0x00000007;
- /* TODO update clocks */
- break;
- case 0x210: /* CM_ICLKEN1_CORE */
- s->clken[2] = value & 0xfffffff9;
- /* TODO update clocks */
- /* The EN_EAC bit only gets/puts core_l4_iclk. */
- break;
- case 0x214: /* CM_ICLKEN2_CORE */
- s->clken[3] = value & 0x00000007;
- /* TODO update clocks */
- break;
- case 0x21c: /* CM_ICLKEN4_CORE */
- s->clken[4] = value & 0x0000001f;
- /* TODO update clocks */
- break;
-
- case 0x230: /* CM_AUTOIDLE1_CORE */
- s->clkidle[0] = value & 0xfffffff9;
- /* TODO update clocks */
- break;
- case 0x234: /* CM_AUTOIDLE2_CORE */
- s->clkidle[1] = value & 0x00000007;
- /* TODO update clocks */
- break;
- case 0x238: /* CM_AUTOIDLE3_CORE */
- s->clkidle[2] = value & 0x00000007;
- /* TODO update clocks */
- break;
- case 0x23c: /* CM_AUTOIDLE4_CORE */
- s->clkidle[3] = value & 0x0000001f;
- /* TODO update clocks */
- break;
-
- case 0x240: /* CM_CLKSEL1_CORE */
- s->clksel[1] = value & 0x0fffbf7f;
- /* TODO update clocks */
- break;
-
- case 0x244: /* CM_CLKSEL2_CORE */
- s->clksel[2] = value & 0x00fffffc;
- /* TODO update clocks */
- break;
-
- case 0x248: /* CM_CLKSTCTRL_CORE */
- s->clkctrl[1] = value & 0x7;
- break;
-
- case 0x2a0: /* PM_WKEN1_CORE */
- s->wken[0] = value & 0x04667ff8;
- break;
- case 0x2a4: /* PM_WKEN2_CORE */
- s->wken[1] = value & 0x00000005;
- break;
-
- case 0x2b0: /* PM_WKST1_CORE */
- s->wkst[0] &= ~value;
- break;
- case 0x2b4: /* PM_WKST2_CORE */
- s->wkst[1] &= ~value;
- break;
-
- case 0x2e0: /* PM_PWSTCTRL_CORE */
- s->power[1] = (value & 0x00fc3f) | (1 << 2);
- break;
-
- case 0x300: /* CM_FCLKEN_GFX */
- s->clken[5] = value & 6;
- /* TODO update clocks */
- break;
- case 0x310: /* CM_ICLKEN_GFX */
- s->clken[6] = value & 1;
- /* TODO update clocks */
- break;
- case 0x340: /* CM_CLKSEL_GFX */
- s->clksel[3] = value & 7;
- /* TODO update clocks */
- break;
- case 0x348: /* CM_CLKSTCTRL_GFX */
- s->clkctrl[2] = value & 1;
- break;
- case 0x350: /* RM_RSTCTRL_GFX */
- s->rstctrl[0] = value & 1;
- /* TODO: reset */
- break;
- case 0x358: /* RM_RSTST_GFX */
- s->rst[1] &= ~value;
- break;
- case 0x3c8: /* PM_WKDEP_GFX */
- s->wkup[1] = value & 0x13;
- break;
- case 0x3e0: /* PM_PWSTCTRL_GFX */
- s->power[2] = (value & 0x00c0f) | (3 << 2);
- break;
-
- case 0x400: /* CM_FCLKEN_WKUP */
- s->clken[7] = value & 0xd;
- /* TODO update clocks */
- break;
- case 0x410: /* CM_ICLKEN_WKUP */
- s->clken[8] = value & 0x3f;
- /* TODO update clocks */
- break;
- case 0x430: /* CM_AUTOIDLE_WKUP */
- s->clkidle[4] = value & 0x0000003f;
- /* TODO update clocks */
- break;
- case 0x440: /* CM_CLKSEL_WKUP */
- s->clksel[4] = value & 3;
- /* TODO update clocks */
- break;
- case 0x450: /* RM_RSTCTRL_WKUP */
- /* TODO: reset */
- if (value & 2)
- qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
- break;
- case 0x454: /* RM_RSTTIME_WKUP */
- s->rsttime_wkup = value & 0x1fff;
- break;
- case 0x458: /* RM_RSTST_WKUP */
- s->rst[2] &= ~value;
- break;
- case 0x4a0: /* PM_WKEN_WKUP */
- s->wken[2] = value & 0x00000005;
- break;
- case 0x4b0: /* PM_WKST_WKUP */
- s->wkst[2] &= ~value;
- break;
-
- case 0x500: /* CM_CLKEN_PLL */
- if (value & 0xffffff30)
- fprintf(stderr, "%s: write 0s in CM_CLKEN_PLL for "
- "future compatibility\n", __func__);
- if ((s->clken[9] ^ value) & 0xcc) {
- s->clken[9] &= ~0xcc;
- s->clken[9] |= value & 0xcc;
- omap_prcm_apll_update(s);
- }
- if ((s->clken[9] ^ value) & 3) {
- s->clken[9] &= ~3;
- s->clken[9] |= value & 3;
- omap_prcm_dpll_update(s);
- }
- break;
- case 0x530: /* CM_AUTOIDLE_PLL */
- s->clkidle[5] = value & 0x000000cf;
- /* TODO update clocks */
- break;
- case 0x540: /* CM_CLKSEL1_PLL */
- if (value & 0xfc4000d7)
- fprintf(stderr, "%s: write 0s in CM_CLKSEL1_PLL for "
- "future compatibility\n", __func__);
- if ((s->clksel[5] ^ value) & 0x003fff00) {
- s->clksel[5] = value & 0x03bfff28;
- omap_prcm_dpll_update(s);
- }
- /* TODO update the other clocks */
-
- s->clksel[5] = value & 0x03bfff28;
- break;
- case 0x544: /* CM_CLKSEL2_PLL */
- if (value & ~3)
- fprintf(stderr, "%s: write 0s in CM_CLKSEL2_PLL[31:2] for "
- "future compatibility\n", __func__);
- if (s->clksel[6] != (value & 3)) {
- s->clksel[6] = value & 3;
- omap_prcm_dpll_update(s);
- }
- break;
-
- case 0x800: /* CM_FCLKEN_DSP */
- s->clken[10] = value & 0x501;
- /* TODO update clocks */
- break;
- case 0x810: /* CM_ICLKEN_DSP */
- s->clken[11] = value & 0x2;
- /* TODO update clocks */
- break;
- case 0x830: /* CM_AUTOIDLE_DSP */
- s->clkidle[6] = value & 0x2;
- /* TODO update clocks */
- break;
- case 0x840: /* CM_CLKSEL_DSP */
- s->clksel[7] = value & 0x3fff;
- /* TODO update clocks */
- break;
- case 0x848: /* CM_CLKSTCTRL_DSP */
- s->clkctrl[3] = value & 0x101;
- break;
- case 0x850: /* RM_RSTCTRL_DSP */
- /* TODO: reset */
- break;
- case 0x858: /* RM_RSTST_DSP */
- s->rst[3] &= ~value;
- break;
- case 0x8c8: /* PM_WKDEP_DSP */
- s->wkup[2] = value & 0x13;
- break;
- case 0x8e0: /* PM_PWSTCTRL_DSP */
- s->power[3] = (value & 0x03017) | (3 << 2);
- break;
-
- case 0x8f0: /* PRCM_IRQSTATUS_DSP */
- s->irqst[1] &= ~value;
- omap_prcm_int_update(s, 1);
- break;
- case 0x8f4: /* PRCM_IRQENABLE_DSP */
- s->irqen[1] = value & 0x7;
- omap_prcm_int_update(s, 1);
- break;
-
- case 0x8f8: /* PRCM_IRQSTATUS_IVA */
- s->irqst[2] &= ~value;
- omap_prcm_int_update(s, 2);
- break;
- case 0x8fc: /* PRCM_IRQENABLE_IVA */
- s->irqen[2] = value & 0x7;
- omap_prcm_int_update(s, 2);
- break;
-
- default:
- OMAP_BAD_REG(addr);
- return;
- }
-}
-
-static const MemoryRegionOps omap_prcm_ops = {
- .read = omap_prcm_read,
- .write = omap_prcm_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void omap_prcm_reset(struct omap_prcm_s *s)
-{
- s->sysconfig = 0;
- s->irqst[0] = 0;
- s->irqst[1] = 0;
- s->irqst[2] = 0;
- s->irqen[0] = 0;
- s->irqen[1] = 0;
- s->irqen[2] = 0;
- s->voltctrl = 0x1040;
- s->ev = 0x14;
- s->evtime[0] = 0;
- s->evtime[1] = 0;
- s->clkctrl[0] = 0;
- s->clkctrl[1] = 0;
- s->clkctrl[2] = 0;
- s->clkctrl[3] = 0;
- s->clken[1] = 7;
- s->clken[3] = 7;
- s->clken[4] = 0;
- s->clken[5] = 0;
- s->clken[6] = 0;
- s->clken[7] = 0xc;
- s->clken[8] = 0x3e;
- s->clken[9] = 0x0d;
- s->clken[10] = 0;
- s->clken[11] = 0;
- s->clkidle[0] = 0;
- s->clkidle[2] = 7;
- s->clkidle[3] = 0;
- s->clkidle[4] = 0;
- s->clkidle[5] = 0x0c;
- s->clkidle[6] = 0;
- s->clksel[0] = 0x01;
- s->clksel[1] = 0x02100121;
- s->clksel[2] = 0x00000000;
- s->clksel[3] = 0x01;
- s->clksel[4] = 0;
- s->clksel[7] = 0x0121;
- s->wkup[0] = 0x15;
- s->wkup[1] = 0x13;
- s->wkup[2] = 0x13;
- s->wken[0] = 0x04667ff8;
- s->wken[1] = 0x00000005;
- s->wken[2] = 5;
- s->wkst[0] = 0;
- s->wkst[1] = 0;
- s->wkst[2] = 0;
- s->power[0] = 0x00c;
- s->power[1] = 4;
- s->power[2] = 0x0000c;
- s->power[3] = 0x14;
- s->rstctrl[0] = 1;
- s->rst[3] = 1;
- omap_prcm_apll_update(s);
- omap_prcm_dpll_update(s);
-}
-
-static void omap_prcm_coldreset(struct omap_prcm_s *s)
-{
- s->setuptime[0] = 0;
- s->setuptime[1] = 0;
- memset(&s->scratch, 0, sizeof(s->scratch));
- s->rst[0] = 0x01;
- s->rst[1] = 0x00;
- s->rst[2] = 0x01;
- s->clken[0] = 0;
- s->clken[2] = 0;
- s->clkidle[1] = 0;
- s->clksel[5] = 0;
- s->clksel[6] = 2;
- s->clksrc[0] = 0x43;
- s->clkout[0] = 0x0303;
- s->clkemul[0] = 0;
- s->clkpol[0] = 0x100;
- s->rsttime_wkup = 0x1002;
-
- omap_prcm_reset(s);
-}
-
-static struct omap_prcm_s *omap_prcm_init(struct omap_target_agent_s *ta,
- qemu_irq mpu_int, qemu_irq dsp_int, qemu_irq iva_int,
- struct omap_mpu_state_s *mpu)
-{
- struct omap_prcm_s *s = g_new0(struct omap_prcm_s, 1);
-
- s->irq[0] = mpu_int;
- s->irq[1] = dsp_int;
- s->irq[2] = iva_int;
- s->mpu = mpu;
- omap_prcm_coldreset(s);
-
- memory_region_init_io(&s->iomem0, NULL, &omap_prcm_ops, s, "omap.pcrm0",
- omap_l4_region_size(ta, 0));
- memory_region_init_io(&s->iomem1, NULL, &omap_prcm_ops, s, "omap.pcrm1",
- omap_l4_region_size(ta, 1));
- omap_l4_attach(ta, 0, &s->iomem0);
- omap_l4_attach(ta, 1, &s->iomem1);
-
- return s;
-}
-
-/* System and Pinout control */
-struct omap_sysctl_s {
- struct omap_mpu_state_s *mpu;
- MemoryRegion iomem;
-
- uint32_t sysconfig;
- uint32_t devconfig;
- uint32_t psaconfig;
- uint32_t padconf[0x45];
- uint8_t obs;
- uint32_t msuspendmux[5];
-};
-
-static uint32_t omap_sysctl_read8(void *opaque, hwaddr addr)
-{
-
- struct omap_sysctl_s *s = opaque;
- int pad_offset, byte_offset;
- int value;
-
- switch (addr) {
- case 0x030 ... 0x140: /* CONTROL_PADCONF - only used in the POP */
- pad_offset = (addr - 0x30) >> 2;
- byte_offset = (addr - 0x30) & (4 - 1);
-
- value = s->padconf[pad_offset];
- value = (value >> (byte_offset * 8)) & 0xff;
-
- return value;
-
- default:
- break;
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static uint32_t omap_sysctl_read(void *opaque, hwaddr addr)
-{
- struct omap_sysctl_s *s = opaque;
-
- switch (addr) {
- case 0x000: /* CONTROL_REVISION */
- return 0x20;
-
- case 0x010: /* CONTROL_SYSCONFIG */
- return s->sysconfig;
-
- case 0x030 ... 0x140: /* CONTROL_PADCONF - only used in the POP */
- return s->padconf[(addr - 0x30) >> 2];
-
- case 0x270: /* CONTROL_DEBOBS */
- return s->obs;
-
- case 0x274: /* CONTROL_DEVCONF */
- return s->devconfig;
-
- case 0x28c: /* CONTROL_EMU_SUPPORT */
- return 0;
-
- case 0x290: /* CONTROL_MSUSPENDMUX_0 */
- return s->msuspendmux[0];
- case 0x294: /* CONTROL_MSUSPENDMUX_1 */
- return s->msuspendmux[1];
- case 0x298: /* CONTROL_MSUSPENDMUX_2 */
- return s->msuspendmux[2];
- case 0x29c: /* CONTROL_MSUSPENDMUX_3 */
- return s->msuspendmux[3];
- case 0x2a0: /* CONTROL_MSUSPENDMUX_4 */
- return s->msuspendmux[4];
- case 0x2a4: /* CONTROL_MSUSPENDMUX_5 */
- return 0;
-
- case 0x2b8: /* CONTROL_PSA_CTRL */
- return s->psaconfig;
- case 0x2bc: /* CONTROL_PSA_CMD */
- case 0x2c0: /* CONTROL_PSA_VALUE */
- return 0;
-
- case 0x2b0: /* CONTROL_SEC_CTRL */
- return 0x800000f1;
- case 0x2d0: /* CONTROL_SEC_EMU */
- return 0x80000015;
- case 0x2d4: /* CONTROL_SEC_TAP */
- return 0x8000007f;
- case 0x2b4: /* CONTROL_SEC_TEST */
- case 0x2f0: /* CONTROL_SEC_STATUS */
- case 0x2f4: /* CONTROL_SEC_ERR_STATUS */
- /* Secure mode is not present on general-pusrpose device. Outside
- * secure mode these values cannot be read or written. */
- return 0;
-
- case 0x2d8: /* CONTROL_OCM_RAM_PERM */
- return 0xff;
- case 0x2dc: /* CONTROL_OCM_PUB_RAM_ADD */
- case 0x2e0: /* CONTROL_EXT_SEC_RAM_START_ADD */
- case 0x2e4: /* CONTROL_EXT_SEC_RAM_STOP_ADD */
- /* No secure mode so no Extended Secure RAM present. */
- return 0;
-
- case 0x2f8: /* CONTROL_STATUS */
- /* Device Type => General-purpose */
- return 0x0300;
- case 0x2fc: /* CONTROL_GENERAL_PURPOSE_STATUS */
-
- case 0x300: /* CONTROL_RPUB_KEY_H_0 */
- case 0x304: /* CONTROL_RPUB_KEY_H_1 */
- case 0x308: /* CONTROL_RPUB_KEY_H_2 */
- case 0x30c: /* CONTROL_RPUB_KEY_H_3 */
- return 0xdecafbad;
-
- case 0x310: /* CONTROL_RAND_KEY_0 */
- case 0x314: /* CONTROL_RAND_KEY_1 */
- case 0x318: /* CONTROL_RAND_KEY_2 */
- case 0x31c: /* CONTROL_RAND_KEY_3 */
- case 0x320: /* CONTROL_CUST_KEY_0 */
- case 0x324: /* CONTROL_CUST_KEY_1 */
- case 0x330: /* CONTROL_TEST_KEY_0 */
- case 0x334: /* CONTROL_TEST_KEY_1 */
- case 0x338: /* CONTROL_TEST_KEY_2 */
- case 0x33c: /* CONTROL_TEST_KEY_3 */
- case 0x340: /* CONTROL_TEST_KEY_4 */
- case 0x344: /* CONTROL_TEST_KEY_5 */
- case 0x348: /* CONTROL_TEST_KEY_6 */
- case 0x34c: /* CONTROL_TEST_KEY_7 */
- case 0x350: /* CONTROL_TEST_KEY_8 */
- case 0x354: /* CONTROL_TEST_KEY_9 */
- /* Can only be accessed in secure mode and when C_FieldAccEnable
- * bit is set in CONTROL_SEC_CTRL.
- * TODO: otherwise an interconnect access error is generated. */
- return 0;
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_sysctl_write8(void *opaque, hwaddr addr, uint32_t value)
-{
- struct omap_sysctl_s *s = opaque;
- int pad_offset, byte_offset;
- int prev_value;
-
- switch (addr) {
- case 0x030 ... 0x140: /* CONTROL_PADCONF - only used in the POP */
- pad_offset = (addr - 0x30) >> 2;
- byte_offset = (addr - 0x30) & (4 - 1);
-
- prev_value = s->padconf[pad_offset];
- prev_value &= ~(0xff << (byte_offset * 8));
- prev_value |= ((value & 0x1f1f1f1f) << (byte_offset * 8)) & 0x1f1f1f1f;
- s->padconf[pad_offset] = prev_value;
- break;
-
- default:
- OMAP_BAD_REG(addr);
- break;
- }
-}
-
-static void omap_sysctl_write(void *opaque, hwaddr addr, uint32_t value)
-{
- struct omap_sysctl_s *s = opaque;
-
- switch (addr) {
- case 0x000: /* CONTROL_REVISION */
- case 0x2a4: /* CONTROL_MSUSPENDMUX_5 */
- case 0x2c0: /* CONTROL_PSA_VALUE */
- case 0x2f8: /* CONTROL_STATUS */
- case 0x2fc: /* CONTROL_GENERAL_PURPOSE_STATUS */
- case 0x300: /* CONTROL_RPUB_KEY_H_0 */
- case 0x304: /* CONTROL_RPUB_KEY_H_1 */
- case 0x308: /* CONTROL_RPUB_KEY_H_2 */
- case 0x30c: /* CONTROL_RPUB_KEY_H_3 */
- case 0x310: /* CONTROL_RAND_KEY_0 */
- case 0x314: /* CONTROL_RAND_KEY_1 */
- case 0x318: /* CONTROL_RAND_KEY_2 */
- case 0x31c: /* CONTROL_RAND_KEY_3 */
- case 0x320: /* CONTROL_CUST_KEY_0 */
- case 0x324: /* CONTROL_CUST_KEY_1 */
- case 0x330: /* CONTROL_TEST_KEY_0 */
- case 0x334: /* CONTROL_TEST_KEY_1 */
- case 0x338: /* CONTROL_TEST_KEY_2 */
- case 0x33c: /* CONTROL_TEST_KEY_3 */
- case 0x340: /* CONTROL_TEST_KEY_4 */
- case 0x344: /* CONTROL_TEST_KEY_5 */
- case 0x348: /* CONTROL_TEST_KEY_6 */
- case 0x34c: /* CONTROL_TEST_KEY_7 */
- case 0x350: /* CONTROL_TEST_KEY_8 */
- case 0x354: /* CONTROL_TEST_KEY_9 */
- OMAP_RO_REG(addr);
- return;
-
- case 0x010: /* CONTROL_SYSCONFIG */
- s->sysconfig = value & 0x1e;
- break;
-
- case 0x030 ... 0x140: /* CONTROL_PADCONF - only used in the POP */
- /* XXX: should check constant bits */
- s->padconf[(addr - 0x30) >> 2] = value & 0x1f1f1f1f;
- break;
-
- case 0x270: /* CONTROL_DEBOBS */
- s->obs = value & 0xff;
- break;
-
- case 0x274: /* CONTROL_DEVCONF */
- s->devconfig = value & 0xffffc7ff;
- break;
-
- case 0x28c: /* CONTROL_EMU_SUPPORT */
- break;
-
- case 0x290: /* CONTROL_MSUSPENDMUX_0 */
- s->msuspendmux[0] = value & 0x3fffffff;
- break;
- case 0x294: /* CONTROL_MSUSPENDMUX_1 */
- s->msuspendmux[1] = value & 0x3fffffff;
- break;
- case 0x298: /* CONTROL_MSUSPENDMUX_2 */
- s->msuspendmux[2] = value & 0x3fffffff;
- break;
- case 0x29c: /* CONTROL_MSUSPENDMUX_3 */
- s->msuspendmux[3] = value & 0x3fffffff;
- break;
- case 0x2a0: /* CONTROL_MSUSPENDMUX_4 */
- s->msuspendmux[4] = value & 0x3fffffff;
- break;
-
- case 0x2b8: /* CONTROL_PSA_CTRL */
- s->psaconfig = value & 0x1c;
- s->psaconfig |= (value & 0x20) ? 2 : 1;
- break;
- case 0x2bc: /* CONTROL_PSA_CMD */
- break;
-
- case 0x2b0: /* CONTROL_SEC_CTRL */
- case 0x2b4: /* CONTROL_SEC_TEST */
- case 0x2d0: /* CONTROL_SEC_EMU */
- case 0x2d4: /* CONTROL_SEC_TAP */
- case 0x2d8: /* CONTROL_OCM_RAM_PERM */
- case 0x2dc: /* CONTROL_OCM_PUB_RAM_ADD */
- case 0x2e0: /* CONTROL_EXT_SEC_RAM_START_ADD */
- case 0x2e4: /* CONTROL_EXT_SEC_RAM_STOP_ADD */
- case 0x2f0: /* CONTROL_SEC_STATUS */
- case 0x2f4: /* CONTROL_SEC_ERR_STATUS */
- break;
-
- default:
- OMAP_BAD_REG(addr);
- return;
- }
-}
-
-static uint64_t omap_sysctl_readfn(void *opaque, hwaddr addr,
- unsigned size)
-{
- switch (size) {
- case 1:
- return omap_sysctl_read8(opaque, addr);
- case 2:
- return omap_badwidth_read32(opaque, addr); /* TODO */
- case 4:
- return omap_sysctl_read(opaque, addr);
- default:
- g_assert_not_reached();
- }
-}
-
-static void omap_sysctl_writefn(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- switch (size) {
- case 1:
- omap_sysctl_write8(opaque, addr, value);
- break;
- case 2:
- omap_badwidth_write32(opaque, addr, value); /* TODO */
- break;
- case 4:
- omap_sysctl_write(opaque, addr, value);
- break;
- default:
- g_assert_not_reached();
- }
-}
-
-static const MemoryRegionOps omap_sysctl_ops = {
- .read = omap_sysctl_readfn,
- .write = omap_sysctl_writefn,
- .valid.min_access_size = 1,
- .valid.max_access_size = 4,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void omap_sysctl_reset(struct omap_sysctl_s *s)
-{
- /* (power-on reset) */
- s->sysconfig = 0;
- s->obs = 0;
- s->devconfig = 0x0c000000;
- s->msuspendmux[0] = 0x00000000;
- s->msuspendmux[1] = 0x00000000;
- s->msuspendmux[2] = 0x00000000;
- s->msuspendmux[3] = 0x00000000;
- s->msuspendmux[4] = 0x00000000;
- s->psaconfig = 1;
-
- s->padconf[0x00] = 0x000f0f0f;
- s->padconf[0x01] = 0x00000000;
- s->padconf[0x02] = 0x00000000;
- s->padconf[0x03] = 0x00000000;
- s->padconf[0x04] = 0x00000000;
- s->padconf[0x05] = 0x00000000;
- s->padconf[0x06] = 0x00000000;
- s->padconf[0x07] = 0x00000000;
- s->padconf[0x08] = 0x08080800;
- s->padconf[0x09] = 0x08080808;
- s->padconf[0x0a] = 0x08080808;
- s->padconf[0x0b] = 0x08080808;
- s->padconf[0x0c] = 0x08080808;
- s->padconf[0x0d] = 0x08080800;
- s->padconf[0x0e] = 0x08080808;
- s->padconf[0x0f] = 0x08080808;
- s->padconf[0x10] = 0x18181808; /* | 0x07070700 if SBoot3 */
- s->padconf[0x11] = 0x18181818; /* | 0x07070707 if SBoot3 */
- s->padconf[0x12] = 0x18181818; /* | 0x07070707 if SBoot3 */
- s->padconf[0x13] = 0x18181818; /* | 0x07070707 if SBoot3 */
- s->padconf[0x14] = 0x18181818; /* | 0x00070707 if SBoot3 */
- s->padconf[0x15] = 0x18181818;
- s->padconf[0x16] = 0x18181818; /* | 0x07000000 if SBoot3 */
- s->padconf[0x17] = 0x1f001f00;
- s->padconf[0x18] = 0x1f1f1f1f;
- s->padconf[0x19] = 0x00000000;
- s->padconf[0x1a] = 0x1f180000;
- s->padconf[0x1b] = 0x00001f1f;
- s->padconf[0x1c] = 0x1f001f00;
- s->padconf[0x1d] = 0x00000000;
- s->padconf[0x1e] = 0x00000000;
- s->padconf[0x1f] = 0x08000000;
- s->padconf[0x20] = 0x08080808;
- s->padconf[0x21] = 0x08080808;
- s->padconf[0x22] = 0x0f080808;
- s->padconf[0x23] = 0x0f0f0f0f;
- s->padconf[0x24] = 0x000f0f0f;
- s->padconf[0x25] = 0x1f1f1f0f;
- s->padconf[0x26] = 0x080f0f1f;
- s->padconf[0x27] = 0x070f1808;
- s->padconf[0x28] = 0x0f070707;
- s->padconf[0x29] = 0x000f0f1f;
- s->padconf[0x2a] = 0x0f0f0f1f;
- s->padconf[0x2b] = 0x08000000;
- s->padconf[0x2c] = 0x0000001f;
- s->padconf[0x2d] = 0x0f0f1f00;
- s->padconf[0x2e] = 0x1f1f0f0f;
- s->padconf[0x2f] = 0x0f1f1f1f;
- s->padconf[0x30] = 0x0f0f0f0f;
- s->padconf[0x31] = 0x0f1f0f1f;
- s->padconf[0x32] = 0x0f0f0f0f;
- s->padconf[0x33] = 0x0f1f0f1f;
- s->padconf[0x34] = 0x1f1f0f0f;
- s->padconf[0x35] = 0x0f0f1f1f;
- s->padconf[0x36] = 0x0f0f1f0f;
- s->padconf[0x37] = 0x0f0f0f0f;
- s->padconf[0x38] = 0x1f18180f;
- s->padconf[0x39] = 0x1f1f1f1f;
- s->padconf[0x3a] = 0x00001f1f;
- s->padconf[0x3b] = 0x00000000;
- s->padconf[0x3c] = 0x00000000;
- s->padconf[0x3d] = 0x0f0f0f0f;
- s->padconf[0x3e] = 0x18000f0f;
- s->padconf[0x3f] = 0x00070000;
- s->padconf[0x40] = 0x00000707;
- s->padconf[0x41] = 0x0f1f0700;
- s->padconf[0x42] = 0x1f1f070f;
- s->padconf[0x43] = 0x0008081f;
- s->padconf[0x44] = 0x00000800;
-}
-
-static struct omap_sysctl_s *omap_sysctl_init(struct omap_target_agent_s *ta,
- omap_clk iclk, struct omap_mpu_state_s *mpu)
-{
- struct omap_sysctl_s *s = g_new0(struct omap_sysctl_s, 1);
-
- s->mpu = mpu;
- omap_sysctl_reset(s);
-
- memory_region_init_io(&s->iomem, NULL, &omap_sysctl_ops, s, "omap.sysctl",
- omap_l4_region_size(ta, 0));
- omap_l4_attach(ta, 0, &s->iomem);
-
- return s;
-}
-
-/* General chip reset */
-static void omap2_mpu_reset(void *opaque)
-{
- struct omap_mpu_state_s *mpu = opaque;
-
- omap_dma_reset(mpu->dma);
- omap_prcm_reset(mpu->prcm);
- omap_sysctl_reset(mpu->sysc);
- omap_gp_timer_reset(mpu->gptimer[0]);
- omap_gp_timer_reset(mpu->gptimer[1]);
- omap_gp_timer_reset(mpu->gptimer[2]);
- omap_gp_timer_reset(mpu->gptimer[3]);
- omap_gp_timer_reset(mpu->gptimer[4]);
- omap_gp_timer_reset(mpu->gptimer[5]);
- omap_gp_timer_reset(mpu->gptimer[6]);
- omap_gp_timer_reset(mpu->gptimer[7]);
- omap_gp_timer_reset(mpu->gptimer[8]);
- omap_gp_timer_reset(mpu->gptimer[9]);
- omap_gp_timer_reset(mpu->gptimer[10]);
- omap_gp_timer_reset(mpu->gptimer[11]);
- omap_synctimer_reset(mpu->synctimer);
- omap_sdrc_reset(mpu->sdrc);
- omap_gpmc_reset(mpu->gpmc);
- omap_dss_reset(mpu->dss);
- omap_uart_reset(mpu->uart[0]);
- omap_uart_reset(mpu->uart[1]);
- omap_uart_reset(mpu->uart[2]);
- omap_mmc_reset(mpu->mmc);
- omap_mcspi_reset(mpu->mcspi[0]);
- omap_mcspi_reset(mpu->mcspi[1]);
- cpu_reset(CPU(mpu->cpu));
-}
-
-static int omap2_validate_addr(struct omap_mpu_state_s *s,
- hwaddr addr)
-{
- return 1;
-}
-
-static const struct dma_irq_map omap2_dma_irq_map[] = {
- { 0, OMAP_INT_24XX_SDMA_IRQ0 },
- { 0, OMAP_INT_24XX_SDMA_IRQ1 },
- { 0, OMAP_INT_24XX_SDMA_IRQ2 },
- { 0, OMAP_INT_24XX_SDMA_IRQ3 },
-};
-
-struct omap_mpu_state_s *omap2420_mpu_init(MemoryRegion *sdram,
- const char *cpu_type)
-{
- struct omap_mpu_state_s *s = g_new0(struct omap_mpu_state_s, 1);
- qemu_irq dma_irqs[4];
- DriveInfo *dinfo;
- int i;
- SysBusDevice *busdev;
- struct omap_target_agent_s *ta;
- MemoryRegion *sysmem = get_system_memory();
-
- /* Core */
- s->mpu_model = omap2420;
- s->cpu = ARM_CPU(cpu_create(cpu_type));
- s->sram_size = OMAP242X_SRAM_SIZE;
-
- s->wakeup = qemu_allocate_irq(omap_mpu_wakeup, s, 0);
-
- /* Clocks */
- omap_clk_init(s);
-
- /* Memory-mapped stuff */
- memory_region_init_ram(&s->sram, NULL, "omap2.sram", s->sram_size,
- &error_fatal);
- memory_region_add_subregion(sysmem, OMAP2_SRAM_BASE, &s->sram);
-
- s->l4 = omap_l4_init(sysmem, OMAP2_L4_BASE, 54);
-
- /* Actually mapped at any 2K boundary in the ARM11 private-peripheral if */
- s->ih[0] = qdev_new("omap2-intc");
- qdev_prop_set_uint8(s->ih[0], "revision", 0x21);
- omap_intc_set_fclk(OMAP_INTC(s->ih[0]), omap_findclk(s, "mpu_intc_fclk"));
- omap_intc_set_iclk(OMAP_INTC(s->ih[0]), omap_findclk(s, "mpu_intc_iclk"));
- busdev = SYS_BUS_DEVICE(s->ih[0]);
- sysbus_realize_and_unref(busdev, &error_fatal);
- sysbus_connect_irq(busdev, 0,
- qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ));
- sysbus_connect_irq(busdev, 1,
- qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_FIQ));
- sysbus_mmio_map(busdev, 0, 0x480fe000);
- s->prcm = omap_prcm_init(omap_l4tao(s->l4, 3),
- qdev_get_gpio_in(s->ih[0],
- OMAP_INT_24XX_PRCM_MPU_IRQ),
- NULL, NULL, s);
-
- s->sysc = omap_sysctl_init(omap_l4tao(s->l4, 1),
- omap_findclk(s, "omapctrl_iclk"), s);
-
- for (i = 0; i < 4; i++) {
- dma_irqs[i] = qdev_get_gpio_in(s->ih[omap2_dma_irq_map[i].ih],
- omap2_dma_irq_map[i].intr);
- }
- s->dma = omap_dma4_init(0x48056000, dma_irqs, sysmem, s, 256, 32,
- omap_findclk(s, "sdma_iclk"),
- omap_findclk(s, "sdma_fclk"));
- s->port->addr_valid = omap2_validate_addr;
-
- /* Register SDRAM and SRAM ports for fast DMA transfers. */
- soc_dma_port_add_mem(s->dma, memory_region_get_ram_ptr(sdram),
- OMAP2_Q2_BASE, memory_region_size(sdram));
- soc_dma_port_add_mem(s->dma, memory_region_get_ram_ptr(&s->sram),
- OMAP2_SRAM_BASE, s->sram_size);
-
- s->uart[0] = omap2_uart_init(sysmem, omap_l4ta(s->l4, 19),
- qdev_get_gpio_in(s->ih[0],
- OMAP_INT_24XX_UART1_IRQ),
- omap_findclk(s, "uart1_fclk"),
- omap_findclk(s, "uart1_iclk"),
- s->drq[OMAP24XX_DMA_UART1_TX],
- s->drq[OMAP24XX_DMA_UART1_RX],
- "uart1",
- serial_hd(0));
- s->uart[1] = omap2_uart_init(sysmem, omap_l4ta(s->l4, 20),
- qdev_get_gpio_in(s->ih[0],
- OMAP_INT_24XX_UART2_IRQ),
- omap_findclk(s, "uart2_fclk"),
- omap_findclk(s, "uart2_iclk"),
- s->drq[OMAP24XX_DMA_UART2_TX],
- s->drq[OMAP24XX_DMA_UART2_RX],
- "uart2",
- serial_hd(0) ? serial_hd(1) : NULL);
- s->uart[2] = omap2_uart_init(sysmem, omap_l4ta(s->l4, 21),
- qdev_get_gpio_in(s->ih[0],
- OMAP_INT_24XX_UART3_IRQ),
- omap_findclk(s, "uart3_fclk"),
- omap_findclk(s, "uart3_iclk"),
- s->drq[OMAP24XX_DMA_UART3_TX],
- s->drq[OMAP24XX_DMA_UART3_RX],
- "uart3",
- serial_hd(0) && serial_hd(1) ? serial_hd(2) : NULL);
-
- s->gptimer[0] = omap_gp_timer_init(omap_l4ta(s->l4, 7),
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER1),
- omap_findclk(s, "wu_gpt1_clk"),
- omap_findclk(s, "wu_l4_iclk"));
- s->gptimer[1] = omap_gp_timer_init(omap_l4ta(s->l4, 8),
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER2),
- omap_findclk(s, "core_gpt2_clk"),
- omap_findclk(s, "core_l4_iclk"));
- s->gptimer[2] = omap_gp_timer_init(omap_l4ta(s->l4, 22),
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER3),
- omap_findclk(s, "core_gpt3_clk"),
- omap_findclk(s, "core_l4_iclk"));
- s->gptimer[3] = omap_gp_timer_init(omap_l4ta(s->l4, 23),
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER4),
- omap_findclk(s, "core_gpt4_clk"),
- omap_findclk(s, "core_l4_iclk"));
- s->gptimer[4] = omap_gp_timer_init(omap_l4ta(s->l4, 24),
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER5),
- omap_findclk(s, "core_gpt5_clk"),
- omap_findclk(s, "core_l4_iclk"));
- s->gptimer[5] = omap_gp_timer_init(omap_l4ta(s->l4, 25),
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER6),
- omap_findclk(s, "core_gpt6_clk"),
- omap_findclk(s, "core_l4_iclk"));
- s->gptimer[6] = omap_gp_timer_init(omap_l4ta(s->l4, 26),
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER7),
- omap_findclk(s, "core_gpt7_clk"),
- omap_findclk(s, "core_l4_iclk"));
- s->gptimer[7] = omap_gp_timer_init(omap_l4ta(s->l4, 27),
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER8),
- omap_findclk(s, "core_gpt8_clk"),
- omap_findclk(s, "core_l4_iclk"));
- s->gptimer[8] = omap_gp_timer_init(omap_l4ta(s->l4, 28),
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER9),
- omap_findclk(s, "core_gpt9_clk"),
- omap_findclk(s, "core_l4_iclk"));
- s->gptimer[9] = omap_gp_timer_init(omap_l4ta(s->l4, 29),
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER10),
- omap_findclk(s, "core_gpt10_clk"),
- omap_findclk(s, "core_l4_iclk"));
- s->gptimer[10] = omap_gp_timer_init(omap_l4ta(s->l4, 30),
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER11),
- omap_findclk(s, "core_gpt11_clk"),
- omap_findclk(s, "core_l4_iclk"));
- s->gptimer[11] = omap_gp_timer_init(omap_l4ta(s->l4, 31),
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER12),
- omap_findclk(s, "core_gpt12_clk"),
- omap_findclk(s, "core_l4_iclk"));
-
- omap_tap_init(omap_l4ta(s->l4, 2), s);
-
- s->synctimer = omap_synctimer_init(omap_l4tao(s->l4, 2), s,
- omap_findclk(s, "clk32-kHz"),
- omap_findclk(s, "core_l4_iclk"));
-
- s->i2c[0] = qdev_new("omap_i2c");
- qdev_prop_set_uint8(s->i2c[0], "revision", 0x34);
- omap_i2c_set_iclk(OMAP_I2C(s->i2c[0]), omap_findclk(s, "i2c1.iclk"));
- omap_i2c_set_fclk(OMAP_I2C(s->i2c[0]), omap_findclk(s, "i2c1.fclk"));
- busdev = SYS_BUS_DEVICE(s->i2c[0]);
- sysbus_realize_and_unref(busdev, &error_fatal);
- sysbus_connect_irq(busdev, 0,
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_I2C1_IRQ));
- sysbus_connect_irq(busdev, 1, s->drq[OMAP24XX_DMA_I2C1_TX]);
- sysbus_connect_irq(busdev, 2, s->drq[OMAP24XX_DMA_I2C1_RX]);
- sysbus_mmio_map(busdev, 0, omap_l4_region_base(omap_l4tao(s->l4, 5), 0));
-
- s->i2c[1] = qdev_new("omap_i2c");
- qdev_prop_set_uint8(s->i2c[1], "revision", 0x34);
- omap_i2c_set_iclk(OMAP_I2C(s->i2c[1]), omap_findclk(s, "i2c2.iclk"));
- omap_i2c_set_fclk(OMAP_I2C(s->i2c[1]), omap_findclk(s, "i2c2.fclk"));
- busdev = SYS_BUS_DEVICE(s->i2c[1]);
- sysbus_realize_and_unref(busdev, &error_fatal);
- sysbus_connect_irq(busdev, 0,
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_I2C2_IRQ));
- sysbus_connect_irq(busdev, 1, s->drq[OMAP24XX_DMA_I2C2_TX]);
- sysbus_connect_irq(busdev, 2, s->drq[OMAP24XX_DMA_I2C2_RX]);
- sysbus_mmio_map(busdev, 0, omap_l4_region_base(omap_l4tao(s->l4, 6), 0));
-
- s->gpio = qdev_new("omap2-gpio");
- qdev_prop_set_int32(s->gpio, "mpu_model", s->mpu_model);
- omap2_gpio_set_iclk(OMAP2_GPIO(s->gpio), omap_findclk(s, "gpio_iclk"));
- omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 0, omap_findclk(s, "gpio1_dbclk"));
- omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 1, omap_findclk(s, "gpio2_dbclk"));
- omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 2, omap_findclk(s, "gpio3_dbclk"));
- omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 3, omap_findclk(s, "gpio4_dbclk"));
- if (s->mpu_model == omap2430) {
- omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 4,
- omap_findclk(s, "gpio5_dbclk"));
- }
- busdev = SYS_BUS_DEVICE(s->gpio);
- sysbus_realize_and_unref(busdev, &error_fatal);
- sysbus_connect_irq(busdev, 0,
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPIO_BANK1));
- sysbus_connect_irq(busdev, 3,
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPIO_BANK2));
- sysbus_connect_irq(busdev, 6,
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPIO_BANK3));
- sysbus_connect_irq(busdev, 9,
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPIO_BANK4));
- if (s->mpu_model == omap2430) {
- sysbus_connect_irq(busdev, 12,
- qdev_get_gpio_in(s->ih[0],
- OMAP_INT_243X_GPIO_BANK5));
- }
- ta = omap_l4ta(s->l4, 3);
- sysbus_mmio_map(busdev, 0, omap_l4_region_base(ta, 1));
- sysbus_mmio_map(busdev, 1, omap_l4_region_base(ta, 0));
- sysbus_mmio_map(busdev, 2, omap_l4_region_base(ta, 2));
- sysbus_mmio_map(busdev, 3, omap_l4_region_base(ta, 4));
- sysbus_mmio_map(busdev, 4, omap_l4_region_base(ta, 5));
-
- s->sdrc = omap_sdrc_init(sysmem, 0x68009000);
- s->gpmc = omap_gpmc_init(s, 0x6800a000,
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPMC_IRQ),
- s->drq[OMAP24XX_DMA_GPMC]);
-
- dinfo = drive_get(IF_SD, 0, 0);
- if (!dinfo && !qtest_enabled()) {
- warn_report("missing SecureDigital device");
- }
- s->mmc = omap2_mmc_init(omap_l4tao(s->l4, 9),
- dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_MMC_IRQ),
- &s->drq[OMAP24XX_DMA_MMC1_TX],
- omap_findclk(s, "mmc_fclk"), omap_findclk(s, "mmc_iclk"));
-
- s->mcspi[0] = omap_mcspi_init(omap_l4ta(s->l4, 35), 4,
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_MCSPI1_IRQ),
- &s->drq[OMAP24XX_DMA_SPI1_TX0],
- omap_findclk(s, "spi1_fclk"),
- omap_findclk(s, "spi1_iclk"));
- s->mcspi[1] = omap_mcspi_init(omap_l4ta(s->l4, 36), 2,
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_MCSPI2_IRQ),
- &s->drq[OMAP24XX_DMA_SPI2_TX0],
- omap_findclk(s, "spi2_fclk"),
- omap_findclk(s, "spi2_iclk"));
-
- s->dss = omap_dss_init(omap_l4ta(s->l4, 10), sysmem, 0x68000800,
- /* XXX wire M_IRQ_25, D_L2_IRQ_30 and I_IRQ_13 together */
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_DSS_IRQ),
- s->drq[OMAP24XX_DMA_DSS],
- omap_findclk(s, "dss_clk1"), omap_findclk(s, "dss_clk2"),
- omap_findclk(s, "dss_54m_clk"),
- omap_findclk(s, "dss_l3_iclk"),
- omap_findclk(s, "dss_l4_iclk"));
-
- omap_sti_init(omap_l4ta(s->l4, 18), sysmem, 0x54000000,
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_STI),
- omap_findclk(s, "emul_ck"),
- serial_hd(0) && serial_hd(1) && serial_hd(2) ?
- serial_hd(3) : NULL);
-
- s->eac = omap_eac_init(omap_l4ta(s->l4, 32),
- qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_EAC_IRQ),
- /* Ten consecutive lines */
- &s->drq[OMAP24XX_DMA_EAC_AC_RD],
- omap_findclk(s, "func_96m_clk"),
- omap_findclk(s, "core_l4_iclk"));
-
- /* All register mappings (including those not currently implemented):
- * SystemControlMod 48000000 - 48000fff
- * SystemControlL4 48001000 - 48001fff
- * 32kHz Timer Mod 48004000 - 48004fff
- * 32kHz Timer L4 48005000 - 48005fff
- * PRCM ModA 48008000 - 480087ff
- * PRCM ModB 48008800 - 48008fff
- * PRCM L4 48009000 - 48009fff
- * TEST-BCM Mod 48012000 - 48012fff
- * TEST-BCM L4 48013000 - 48013fff
- * TEST-TAP Mod 48014000 - 48014fff
- * TEST-TAP L4 48015000 - 48015fff
- * GPIO1 Mod 48018000 - 48018fff
- * GPIO Top 48019000 - 48019fff
- * GPIO2 Mod 4801a000 - 4801afff
- * GPIO L4 4801b000 - 4801bfff
- * GPIO3 Mod 4801c000 - 4801cfff
- * GPIO4 Mod 4801e000 - 4801efff
- * WDTIMER1 Mod 48020000 - 48010fff
- * WDTIMER Top 48021000 - 48011fff
- * WDTIMER2 Mod 48022000 - 48012fff
- * WDTIMER L4 48023000 - 48013fff
- * WDTIMER3 Mod 48024000 - 48014fff
- * WDTIMER3 L4 48025000 - 48015fff
- * WDTIMER4 Mod 48026000 - 48016fff
- * WDTIMER4 L4 48027000 - 48017fff
- * GPTIMER1 Mod 48028000 - 48018fff
- * GPTIMER1 L4 48029000 - 48019fff
- * GPTIMER2 Mod 4802a000 - 4801afff
- * GPTIMER2 L4 4802b000 - 4801bfff
- * L4-Config AP 48040000 - 480407ff
- * L4-Config IP 48040800 - 48040fff
- * L4-Config LA 48041000 - 48041fff
- * ARM11ETB Mod 48048000 - 48049fff
- * ARM11ETB L4 4804a000 - 4804afff
- * DISPLAY Top 48050000 - 480503ff
- * DISPLAY DISPC 48050400 - 480507ff
- * DISPLAY RFBI 48050800 - 48050bff
- * DISPLAY VENC 48050c00 - 48050fff
- * DISPLAY L4 48051000 - 48051fff
- * CAMERA Top 48052000 - 480523ff
- * CAMERA core 48052400 - 480527ff
- * CAMERA DMA 48052800 - 48052bff
- * CAMERA MMU 48052c00 - 48052fff
- * CAMERA L4 48053000 - 48053fff
- * SDMA Mod 48056000 - 48056fff
- * SDMA L4 48057000 - 48057fff
- * SSI Top 48058000 - 48058fff
- * SSI GDD 48059000 - 48059fff
- * SSI Port1 4805a000 - 4805afff
- * SSI Port2 4805b000 - 4805bfff
- * SSI L4 4805c000 - 4805cfff
- * USB Mod 4805e000 - 480fefff
- * USB L4 4805f000 - 480fffff
- * WIN_TRACER1 Mod 48060000 - 48060fff
- * WIN_TRACER1 L4 48061000 - 48061fff
- * WIN_TRACER2 Mod 48062000 - 48062fff
- * WIN_TRACER2 L4 48063000 - 48063fff
- * WIN_TRACER3 Mod 48064000 - 48064fff
- * WIN_TRACER3 L4 48065000 - 48065fff
- * WIN_TRACER4 Top 48066000 - 480660ff
- * WIN_TRACER4 ETT 48066100 - 480661ff
- * WIN_TRACER4 WT 48066200 - 480662ff
- * WIN_TRACER4 L4 48067000 - 48067fff
- * XTI Mod 48068000 - 48068fff
- * XTI L4 48069000 - 48069fff
- * UART1 Mod 4806a000 - 4806afff
- * UART1 L4 4806b000 - 4806bfff
- * UART2 Mod 4806c000 - 4806cfff
- * UART2 L4 4806d000 - 4806dfff
- * UART3 Mod 4806e000 - 4806efff
- * UART3 L4 4806f000 - 4806ffff
- * I2C1 Mod 48070000 - 48070fff
- * I2C1 L4 48071000 - 48071fff
- * I2C2 Mod 48072000 - 48072fff
- * I2C2 L4 48073000 - 48073fff
- * McBSP1 Mod 48074000 - 48074fff
- * McBSP1 L4 48075000 - 48075fff
- * McBSP2 Mod 48076000 - 48076fff
- * McBSP2 L4 48077000 - 48077fff
- * GPTIMER3 Mod 48078000 - 48078fff
- * GPTIMER3 L4 48079000 - 48079fff
- * GPTIMER4 Mod 4807a000 - 4807afff
- * GPTIMER4 L4 4807b000 - 4807bfff
- * GPTIMER5 Mod 4807c000 - 4807cfff
- * GPTIMER5 L4 4807d000 - 4807dfff
- * GPTIMER6 Mod 4807e000 - 4807efff
- * GPTIMER6 L4 4807f000 - 4807ffff
- * GPTIMER7 Mod 48080000 - 48080fff
- * GPTIMER7 L4 48081000 - 48081fff
- * GPTIMER8 Mod 48082000 - 48082fff
- * GPTIMER8 L4 48083000 - 48083fff
- * GPTIMER9 Mod 48084000 - 48084fff
- * GPTIMER9 L4 48085000 - 48085fff
- * GPTIMER10 Mod 48086000 - 48086fff
- * GPTIMER10 L4 48087000 - 48087fff
- * GPTIMER11 Mod 48088000 - 48088fff
- * GPTIMER11 L4 48089000 - 48089fff
- * GPTIMER12 Mod 4808a000 - 4808afff
- * GPTIMER12 L4 4808b000 - 4808bfff
- * EAC Mod 48090000 - 48090fff
- * EAC L4 48091000 - 48091fff
- * FAC Mod 48092000 - 48092fff
- * FAC L4 48093000 - 48093fff
- * MAILBOX Mod 48094000 - 48094fff
- * MAILBOX L4 48095000 - 48095fff
- * SPI1 Mod 48098000 - 48098fff
- * SPI1 L4 48099000 - 48099fff
- * SPI2 Mod 4809a000 - 4809afff
- * SPI2 L4 4809b000 - 4809bfff
- * MMC/SDIO Mod 4809c000 - 4809cfff
- * MMC/SDIO L4 4809d000 - 4809dfff
- * MS_PRO Mod 4809e000 - 4809efff
- * MS_PRO L4 4809f000 - 4809ffff
- * RNG Mod 480a0000 - 480a0fff
- * RNG L4 480a1000 - 480a1fff
- * DES3DES Mod 480a2000 - 480a2fff
- * DES3DES L4 480a3000 - 480a3fff
- * SHA1MD5 Mod 480a4000 - 480a4fff
- * SHA1MD5 L4 480a5000 - 480a5fff
- * AES Mod 480a6000 - 480a6fff
- * AES L4 480a7000 - 480a7fff
- * PKA Mod 480a8000 - 480a9fff
- * PKA L4 480aa000 - 480aafff
- * MG Mod 480b0000 - 480b0fff
- * MG L4 480b1000 - 480b1fff
- * HDQ/1-wire Mod 480b2000 - 480b2fff
- * HDQ/1-wire L4 480b3000 - 480b3fff
- * MPU interrupt 480fe000 - 480fefff
- * STI channel base 54000000 - 5400ffff
- * IVA RAM 5c000000 - 5c01ffff
- * IVA ROM 5c020000 - 5c027fff
- * IMG_BUF_A 5c040000 - 5c040fff
- * IMG_BUF_B 5c042000 - 5c042fff
- * VLCDS 5c048000 - 5c0487ff
- * IMX_COEF 5c049000 - 5c04afff
- * IMX_CMD 5c051000 - 5c051fff
- * VLCDQ 5c053000 - 5c0533ff
- * VLCDH 5c054000 - 5c054fff
- * SEQ_CMD 5c055000 - 5c055fff
- * IMX_REG 5c056000 - 5c0560ff
- * VLCD_REG 5c056100 - 5c0561ff
- * SEQ_REG 5c056200 - 5c0562ff
- * IMG_BUF_REG 5c056300 - 5c0563ff
- * SEQIRQ_REG 5c056400 - 5c0564ff
- * OCP_REG 5c060000 - 5c060fff
- * SYSC_REG 5c070000 - 5c070fff
- * MMU_REG 5d000000 - 5d000fff
- * sDMA R 68000400 - 680005ff
- * sDMA W 68000600 - 680007ff
- * Display Control 68000800 - 680009ff
- * DSP subsystem 68000a00 - 68000bff
- * MPU subsystem 68000c00 - 68000dff
- * IVA subsystem 68001000 - 680011ff
- * USB 68001200 - 680013ff
- * Camera 68001400 - 680015ff
- * VLYNQ (firewall) 68001800 - 68001bff
- * VLYNQ 68001e00 - 68001fff
- * SSI 68002000 - 680021ff
- * L4 68002400 - 680025ff
- * DSP (firewall) 68002800 - 68002bff
- * DSP subsystem 68002e00 - 68002fff
- * IVA (firewall) 68003000 - 680033ff
- * IVA 68003600 - 680037ff
- * GFX 68003a00 - 68003bff
- * CMDWR emulation 68003c00 - 68003dff
- * SMS 68004000 - 680041ff
- * OCM 68004200 - 680043ff
- * GPMC 68004400 - 680045ff
- * RAM (firewall) 68005000 - 680053ff
- * RAM (err login) 68005400 - 680057ff
- * ROM (firewall) 68005800 - 68005bff
- * ROM (err login) 68005c00 - 68005fff
- * GPMC (firewall) 68006000 - 680063ff
- * GPMC (err login) 68006400 - 680067ff
- * SMS (err login) 68006c00 - 68006fff
- * SMS registers 68008000 - 68008fff
- * SDRC registers 68009000 - 68009fff
- * GPMC registers 6800a000 6800afff
- */
-
- qemu_register_reset(omap2_mpu_reset, s);
-
- return s;
-}
diff --git a/hw/arm/omap_sx1.c b/hw/arm/omap_sx1.c
index 62d7915..5d4a31b 100644
--- a/hw/arm/omap_sx1.c
+++ b/hw/arm/omap_sx1.c
@@ -1,7 +1,7 @@
/* omap_sx1.c Support for the Siemens SX1 smartphone emulation.
*
* Copyright (C) 2008
- * Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
+ * Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
* Copyright (C) 2007 Vladimir Ananiev <vovan888@gmail.com>
*
* based on PalmOne's (TM) PDAs support (palm.c)
@@ -33,8 +33,8 @@
#include "hw/boards.h"
#include "hw/arm/boot.h"
#include "hw/block/flash.h"
-#include "sysemu/qtest.h"
-#include "exec/address-spaces.h"
+#include "system/qtest.h"
+#include "system/address-spaces.h"
#include "qemu/cutils.h"
#include "qemu/error-report.h"
@@ -76,10 +76,6 @@ static uint64_t static_read(void *opaque, hwaddr offset,
static void static_write(void *opaque, hwaddr offset,
uint64_t value, unsigned size)
{
-#ifdef SPY
- printf("%s: value %" PRIx64 " %u bytes written at 0x%x\n",
- __func__, value, size, (int)offset);
-#endif
}
static const MemoryRegionOps static_ops = {
@@ -206,7 +202,7 @@ static void sx1_init_v2(MachineState *machine)
sx1_init(machine, 2);
}
-static void sx1_machine_v2_class_init(ObjectClass *oc, void *data)
+static void sx1_machine_v2_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -216,6 +212,7 @@ static void sx1_machine_v2_class_init(ObjectClass *oc, void *data)
mc->default_cpu_type = ARM_CPU_TYPE_NAME("ti925t");
mc->default_ram_size = SDRAM_SIZE;
mc->default_ram_id = "omap1.dram";
+ mc->auto_create_sdcard = true;
}
static const TypeInfo sx1_machine_v2_type = {
@@ -224,7 +221,7 @@ static const TypeInfo sx1_machine_v2_type = {
.class_init = sx1_machine_v2_class_init,
};
-static void sx1_machine_v1_class_init(ObjectClass *oc, void *data)
+static void sx1_machine_v1_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -234,6 +231,7 @@ static void sx1_machine_v1_class_init(ObjectClass *oc, void *data)
mc->default_cpu_type = ARM_CPU_TYPE_NAME("ti925t");
mc->default_ram_size = SDRAM_SIZE;
mc->default_ram_id = "omap1.dram";
+ mc->auto_create_sdcard = true;
}
static const TypeInfo sx1_machine_v1_type = {
diff --git a/hw/arm/orangepi.c b/hw/arm/orangepi.c
index 77e3281..e095688 100644
--- a/hw/arm/orangepi.c
+++ b/hw/arm/orangepi.c
@@ -19,7 +19,7 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "hw/boards.h"
@@ -121,6 +121,7 @@ static void orangepi_machine_init(MachineClass *mc)
mc->valid_cpu_types = valid_cpu_types;
mc->default_ram_size = 1 * GiB;
mc->default_ram_id = "orangepi.ram";
+ mc->auto_create_sdcard = true;
}
DEFINE_MACHINE("orangepi-pc", orangepi_machine_init)
diff --git a/hw/arm/palm.c b/hw/arm/palm.c
deleted file mode 100644
index e04ac92..0000000
--- a/hw/arm/palm.c
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * PalmOne's (TM) PDAs.
- *
- * Copyright (C) 2006-2007 Andrzej Zaborowski <balrog@zabor.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "audio/audio.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/qtest.h"
-#include "ui/console.h"
-#include "hw/arm/omap.h"
-#include "hw/boards.h"
-#include "hw/arm/boot.h"
-#include "hw/input/tsc2xxx.h"
-#include "hw/irq.h"
-#include "hw/loader.h"
-#include "qemu/cutils.h"
-#include "qom/object.h"
-#include "qemu/error-report.h"
-
-
-static uint64_t static_read(void *opaque, hwaddr offset, unsigned size)
-{
- uint32_t *val = (uint32_t *)opaque;
- uint32_t sizemask = 7 >> size;
-
- return *val >> ((offset & sizemask) << 3);
-}
-
-static void static_write(void *opaque, hwaddr offset, uint64_t value,
- unsigned size)
-{
-#ifdef SPY
- printf("%s: value %08lx written at " PA_FMT "\n",
- __func__, value, offset);
-#endif
-}
-
-static const MemoryRegionOps static_ops = {
- .read = static_read,
- .write = static_write,
- .valid.min_access_size = 1,
- .valid.max_access_size = 4,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-/* Palm Tunsgten|E support */
-
-/* Shared GPIOs */
-#define PALMTE_USBDETECT_GPIO 0
-#define PALMTE_USB_OR_DC_GPIO 1
-#define PALMTE_TSC_GPIO 4
-#define PALMTE_PINTDAV_GPIO 6
-#define PALMTE_MMC_WP_GPIO 8
-#define PALMTE_MMC_POWER_GPIO 9
-#define PALMTE_HDQ_GPIO 11
-#define PALMTE_HEADPHONES_GPIO 14
-#define PALMTE_SPEAKER_GPIO 15
-/* MPU private GPIOs */
-#define PALMTE_DC_GPIO 2
-#define PALMTE_MMC_SWITCH_GPIO 4
-#define PALMTE_MMC1_GPIO 6
-#define PALMTE_MMC2_GPIO 7
-#define PALMTE_MMC3_GPIO 11
-
-static MouseTransformInfo palmte_pointercal = {
- .x = 320,
- .y = 320,
- .a = { -5909, 8, 22465308, 104, 7644, -1219972, 65536 },
-};
-
-static void palmte_microwire_setup(struct omap_mpu_state_s *cpu)
-{
- uWireSlave *tsc;
-
- tsc = tsc2102_init(qdev_get_gpio_in(cpu->gpio, PALMTE_PINTDAV_GPIO));
-
- omap_uwire_attach(cpu->microwire, tsc, 0);
- omap_mcbsp_i2s_attach(cpu->mcbsp1, tsc210x_codec(tsc));
-
- tsc210x_set_transform(tsc, &palmte_pointercal);
-}
-
-static struct {
- int row;
- int column;
-} palmte_keymap[0x80] = {
- [0 ... 0x7f] = { -1, -1 },
- [0x3b] = { 0, 0 }, /* F1 -> Calendar */
- [0x3c] = { 1, 0 }, /* F2 -> Contacts */
- [0x3d] = { 2, 0 }, /* F3 -> Tasks List */
- [0x3e] = { 3, 0 }, /* F4 -> Note Pad */
- [0x01] = { 4, 0 }, /* Esc -> Power */
- [0x4b] = { 0, 1 }, /* Left */
- [0x50] = { 1, 1 }, /* Down */
- [0x48] = { 2, 1 }, /* Up */
- [0x4d] = { 3, 1 }, /* Right */
- [0x4c] = { 4, 1 }, /* Centre */
- [0x39] = { 4, 1 }, /* Spc -> Centre */
-};
-
-static void palmte_button_event(void *opaque, int keycode)
-{
- struct omap_mpu_state_s *cpu = opaque;
-
- if (palmte_keymap[keycode & 0x7f].row != -1)
- omap_mpuio_key(cpu->mpuio,
- palmte_keymap[keycode & 0x7f].row,
- palmte_keymap[keycode & 0x7f].column,
- !(keycode & 0x80));
-}
-
-/*
- * Encapsulation of some GPIO line behaviour for the Palm board
- *
- * QEMU interface:
- * + unnamed GPIO inputs 0..6: for the various miscellaneous input lines
- */
-
-#define TYPE_PALM_MISC_GPIO "palm-misc-gpio"
-OBJECT_DECLARE_SIMPLE_TYPE(PalmMiscGPIOState, PALM_MISC_GPIO)
-
-struct PalmMiscGPIOState {
- SysBusDevice parent_obj;
-};
-
-static void palmte_onoff_gpios(void *opaque, int line, int level)
-{
- switch (line) {
- case 0:
- printf("%s: current to MMC/SD card %sabled.\n",
- __func__, level ? "dis" : "en");
- break;
- case 1:
- printf("%s: internal speaker amplifier %s.\n",
- __func__, level ? "down" : "on");
- break;
-
- /* These LCD & Audio output signals have not been identified yet. */
- case 2:
- case 3:
- case 4:
- printf("%s: LCD GPIO%i %s.\n",
- __func__, line - 1, level ? "high" : "low");
- break;
- case 5:
- case 6:
- printf("%s: Audio GPIO%i %s.\n",
- __func__, line - 4, level ? "high" : "low");
- break;
- }
-}
-
-static void palm_misc_gpio_init(Object *obj)
-{
- DeviceState *dev = DEVICE(obj);
-
- qdev_init_gpio_in(dev, palmte_onoff_gpios, 7);
-}
-
-static const TypeInfo palm_misc_gpio_info = {
- .name = TYPE_PALM_MISC_GPIO,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(PalmMiscGPIOState),
- .instance_init = palm_misc_gpio_init,
- /*
- * No class init required: device has no internal state so does not
- * need to set up reset or vmstate, and has no realize method.
- */
-};
-
-static void palmte_gpio_setup(struct omap_mpu_state_s *cpu)
-{
- DeviceState *misc_gpio;
-
- misc_gpio = sysbus_create_simple(TYPE_PALM_MISC_GPIO, -1, NULL);
-
- omap_mmc_handlers(cpu->mmc,
- qdev_get_gpio_in(cpu->gpio, PALMTE_MMC_WP_GPIO),
- qemu_irq_invert(omap_mpuio_in_get(cpu->mpuio)
- [PALMTE_MMC_SWITCH_GPIO]));
-
- qdev_connect_gpio_out(cpu->gpio, PALMTE_MMC_POWER_GPIO,
- qdev_get_gpio_in(misc_gpio, 0));
- qdev_connect_gpio_out(cpu->gpio, PALMTE_SPEAKER_GPIO,
- qdev_get_gpio_in(misc_gpio, 1));
- qdev_connect_gpio_out(cpu->gpio, 11, qdev_get_gpio_in(misc_gpio, 2));
- qdev_connect_gpio_out(cpu->gpio, 12, qdev_get_gpio_in(misc_gpio, 3));
- qdev_connect_gpio_out(cpu->gpio, 13, qdev_get_gpio_in(misc_gpio, 4));
- omap_mpuio_out_set(cpu->mpuio, 1, qdev_get_gpio_in(misc_gpio, 5));
- omap_mpuio_out_set(cpu->mpuio, 3, qdev_get_gpio_in(misc_gpio, 6));
-
- /* Reset some inputs to initial state. */
- qemu_irq_lower(qdev_get_gpio_in(cpu->gpio, PALMTE_USBDETECT_GPIO));
- qemu_irq_lower(qdev_get_gpio_in(cpu->gpio, PALMTE_USB_OR_DC_GPIO));
- qemu_irq_lower(qdev_get_gpio_in(cpu->gpio, 4));
- qemu_irq_lower(qdev_get_gpio_in(cpu->gpio, PALMTE_HEADPHONES_GPIO));
- qemu_irq_lower(omap_mpuio_in_get(cpu->mpuio)[PALMTE_DC_GPIO]);
- qemu_irq_raise(omap_mpuio_in_get(cpu->mpuio)[6]);
- qemu_irq_raise(omap_mpuio_in_get(cpu->mpuio)[7]);
- qemu_irq_raise(omap_mpuio_in_get(cpu->mpuio)[11]);
-}
-
-static struct arm_boot_info palmte_binfo = {
- .loader_start = OMAP_EMIFF_BASE,
- .ram_size = 0x02000000,
- .board_id = 0x331,
-};
-
-static void palmte_init(MachineState *machine)
-{
- MemoryRegion *address_space_mem = get_system_memory();
- struct omap_mpu_state_s *mpu;
- int flash_size = 0x00800000;
- static uint32_t cs0val = 0xffffffff;
- static uint32_t cs1val = 0x0000e1a0;
- static uint32_t cs2val = 0x0000e1a0;
- static uint32_t cs3val = 0xe1a0e1a0;
- int rom_size, rom_loaded = 0;
- MachineClass *mc = MACHINE_GET_CLASS(machine);
- MemoryRegion *flash = g_new(MemoryRegion, 1);
- MemoryRegion *cs = g_new(MemoryRegion, 4);
-
- if (machine->ram_size != mc->default_ram_size) {
- char *sz = size_to_str(mc->default_ram_size);
- error_report("Invalid RAM size, should be %s", sz);
- g_free(sz);
- exit(EXIT_FAILURE);
- }
-
- memory_region_add_subregion(address_space_mem, OMAP_EMIFF_BASE,
- machine->ram);
-
- mpu = omap310_mpu_init(machine->ram, machine->cpu_type);
-
- /* External Flash (EMIFS) */
- memory_region_init_rom(flash, NULL, "palmte.flash", flash_size,
- &error_fatal);
- memory_region_add_subregion(address_space_mem, OMAP_CS0_BASE, flash);
-
- memory_region_init_io(&cs[0], NULL, &static_ops, &cs0val, "palmte-cs0",
- OMAP_CS0_SIZE - flash_size);
- memory_region_add_subregion(address_space_mem, OMAP_CS0_BASE + flash_size,
- &cs[0]);
- memory_region_init_io(&cs[1], NULL, &static_ops, &cs1val, "palmte-cs1",
- OMAP_CS1_SIZE);
- memory_region_add_subregion(address_space_mem, OMAP_CS1_BASE, &cs[1]);
- memory_region_init_io(&cs[2], NULL, &static_ops, &cs2val, "palmte-cs2",
- OMAP_CS2_SIZE);
- memory_region_add_subregion(address_space_mem, OMAP_CS2_BASE, &cs[2]);
- memory_region_init_io(&cs[3], NULL, &static_ops, &cs3val, "palmte-cs3",
- OMAP_CS3_SIZE);
- memory_region_add_subregion(address_space_mem, OMAP_CS3_BASE, &cs[3]);
-
- palmte_microwire_setup(mpu);
-
- qemu_add_kbd_event_handler(palmte_button_event, mpu);
-
- palmte_gpio_setup(mpu);
-
- /* Setup initial (reset) machine state */
- if (nb_option_roms) {
- rom_size = get_image_size(option_rom[0].name);
- if (rom_size > flash_size) {
- fprintf(stderr, "%s: ROM image too big (%x > %x)\n",
- __func__, rom_size, flash_size);
- rom_size = 0;
- }
- if (rom_size > 0) {
- rom_size = load_image_targphys(option_rom[0].name, OMAP_CS0_BASE,
- flash_size);
- rom_loaded = 1;
- }
- if (rom_size < 0) {
- fprintf(stderr, "%s: error loading '%s'\n",
- __func__, option_rom[0].name);
- }
- }
-
- if (!rom_loaded && !machine->kernel_filename && !qtest_enabled()) {
- fprintf(stderr, "Kernel or ROM image must be specified\n");
- exit(1);
- }
-
- /* Load the kernel. */
- arm_load_kernel(mpu->cpu, machine, &palmte_binfo);
-}
-
-static void palmte_machine_init(MachineClass *mc)
-{
- mc->desc = "Palm Tungsten|E aka. Cheetah PDA (OMAP310)";
- mc->init = palmte_init;
- mc->ignore_memory_transaction_failures = true;
- mc->default_cpu_type = ARM_CPU_TYPE_NAME("ti925t");
- mc->default_ram_size = 0x02000000;
- mc->default_ram_id = "omap1.dram";
- mc->deprecation_reason = "machine is old and unmaintained";
-
- machine_add_audiodev_property(mc);
-}
-
-DEFINE_MACHINE("cheetah", palmte_machine_init)
-
-static void palm_register_types(void)
-{
- type_register_static(&palm_misc_gpio_info);
-}
-
-type_init(palm_register_types)
diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c
deleted file mode 100644
index 6b2e544..0000000
--- a/hw/arm/pxa2xx.c
+++ /dev/null
@@ -1,2393 +0,0 @@
-/*
- * Intel XScale PXA255/270 processor support.
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GPL.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/error-report.h"
-#include "qemu/module.h"
-#include "qapi/error.h"
-#include "exec/address-spaces.h"
-#include "cpu.h"
-#include "hw/sysbus.h"
-#include "migration/vmstate.h"
-#include "hw/arm/pxa.h"
-#include "sysemu/sysemu.h"
-#include "hw/char/serial.h"
-#include "hw/i2c/i2c.h"
-#include "hw/irq.h"
-#include "hw/qdev-properties.h"
-#include "hw/qdev-properties-system.h"
-#include "hw/ssi/ssi.h"
-#include "hw/sd/sd.h"
-#include "chardev/char-fe.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/qtest.h"
-#include "sysemu/rtc.h"
-#include "qemu/cutils.h"
-#include "qemu/log.h"
-#include "qom/object.h"
-#include "target/arm/cpregs.h"
-
-static struct {
- hwaddr io_base;
- int irqn;
-} pxa255_serial[] = {
- { 0x40100000, PXA2XX_PIC_FFUART },
- { 0x40200000, PXA2XX_PIC_BTUART },
- { 0x40700000, PXA2XX_PIC_STUART },
- { 0x41600000, PXA25X_PIC_HWUART },
- { 0, 0 }
-}, pxa270_serial[] = {
- { 0x40100000, PXA2XX_PIC_FFUART },
- { 0x40200000, PXA2XX_PIC_BTUART },
- { 0x40700000, PXA2XX_PIC_STUART },
- { 0, 0 }
-};
-
-typedef struct PXASSPDef {
- hwaddr io_base;
- int irqn;
-} PXASSPDef;
-
-#if 0
-static PXASSPDef pxa250_ssp[] = {
- { 0x41000000, PXA2XX_PIC_SSP },
- { 0, 0 }
-};
-#endif
-
-static PXASSPDef pxa255_ssp[] = {
- { 0x41000000, PXA2XX_PIC_SSP },
- { 0x41400000, PXA25X_PIC_NSSP },
- { 0, 0 }
-};
-
-#if 0
-static PXASSPDef pxa26x_ssp[] = {
- { 0x41000000, PXA2XX_PIC_SSP },
- { 0x41400000, PXA25X_PIC_NSSP },
- { 0x41500000, PXA26X_PIC_ASSP },
- { 0, 0 }
-};
-#endif
-
-static PXASSPDef pxa27x_ssp[] = {
- { 0x41000000, PXA2XX_PIC_SSP },
- { 0x41700000, PXA27X_PIC_SSP2 },
- { 0x41900000, PXA2XX_PIC_SSP3 },
- { 0, 0 }
-};
-
-#define PMCR 0x00 /* Power Manager Control register */
-#define PSSR 0x04 /* Power Manager Sleep Status register */
-#define PSPR 0x08 /* Power Manager Scratch-Pad register */
-#define PWER 0x0c /* Power Manager Wake-Up Enable register */
-#define PRER 0x10 /* Power Manager Rising-Edge Detect Enable register */
-#define PFER 0x14 /* Power Manager Falling-Edge Detect Enable register */
-#define PEDR 0x18 /* Power Manager Edge-Detect Status register */
-#define PCFR 0x1c /* Power Manager General Configuration register */
-#define PGSR0 0x20 /* Power Manager GPIO Sleep-State register 0 */
-#define PGSR1 0x24 /* Power Manager GPIO Sleep-State register 1 */
-#define PGSR2 0x28 /* Power Manager GPIO Sleep-State register 2 */
-#define PGSR3 0x2c /* Power Manager GPIO Sleep-State register 3 */
-#define RCSR 0x30 /* Reset Controller Status register */
-#define PSLR 0x34 /* Power Manager Sleep Configuration register */
-#define PTSR 0x38 /* Power Manager Standby Configuration register */
-#define PVCR 0x40 /* Power Manager Voltage Change Control register */
-#define PUCR 0x4c /* Power Manager USIM Card Control/Status register */
-#define PKWR 0x50 /* Power Manager Keyboard Wake-Up Enable register */
-#define PKSR 0x54 /* Power Manager Keyboard Level-Detect Status */
-#define PCMD0 0x80 /* Power Manager I2C Command register File 0 */
-#define PCMD31 0xfc /* Power Manager I2C Command register File 31 */
-
-static uint64_t pxa2xx_pm_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- PXA2xxState *s = (PXA2xxState *) opaque;
-
- switch (addr) {
- case PMCR ... PCMD31:
- if (addr & 3)
- goto fail;
-
- return s->pm_regs[addr >> 2];
- default:
- fail:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad read offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- break;
- }
- return 0;
-}
-
-static void pxa2xx_pm_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- PXA2xxState *s = (PXA2xxState *) opaque;
-
- switch (addr) {
- case PMCR:
- /* Clear the write-one-to-clear bits... */
- s->pm_regs[addr >> 2] &= ~(value & 0x2a);
- /* ...and set the plain r/w bits */
- s->pm_regs[addr >> 2] &= ~0x15;
- s->pm_regs[addr >> 2] |= value & 0x15;
- break;
-
- case PSSR: /* Read-clean registers */
- case RCSR:
- case PKSR:
- s->pm_regs[addr >> 2] &= ~value;
- break;
-
- default: /* Read-write registers */
- if (!(addr & 3)) {
- s->pm_regs[addr >> 2] = value;
- break;
- }
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad write offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- break;
- }
-}
-
-static const MemoryRegionOps pxa2xx_pm_ops = {
- .read = pxa2xx_pm_read,
- .write = pxa2xx_pm_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static const VMStateDescription vmstate_pxa2xx_pm = {
- .name = "pxa2xx_pm",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32_ARRAY(pm_regs, PXA2xxState, 0x40),
- VMSTATE_END_OF_LIST()
- }
-};
-
-#define CCCR 0x00 /* Core Clock Configuration register */
-#define CKEN 0x04 /* Clock Enable register */
-#define OSCC 0x08 /* Oscillator Configuration register */
-#define CCSR 0x0c /* Core Clock Status register */
-
-static uint64_t pxa2xx_cm_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- PXA2xxState *s = (PXA2xxState *) opaque;
-
- switch (addr) {
- case CCCR:
- case CKEN:
- case OSCC:
- return s->cm_regs[addr >> 2];
-
- case CCSR:
- return s->cm_regs[CCCR >> 2] | (3 << 28);
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad read offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- break;
- }
- return 0;
-}
-
-static void pxa2xx_cm_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- PXA2xxState *s = (PXA2xxState *) opaque;
-
- switch (addr) {
- case CCCR:
- case CKEN:
- s->cm_regs[addr >> 2] = value;
- break;
-
- case OSCC:
- s->cm_regs[addr >> 2] &= ~0x6c;
- s->cm_regs[addr >> 2] |= value & 0x6e;
- if ((value >> 1) & 1) /* OON */
- s->cm_regs[addr >> 2] |= 1 << 0; /* Oscillator is now stable */
- break;
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad write offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- break;
- }
-}
-
-static const MemoryRegionOps pxa2xx_cm_ops = {
- .read = pxa2xx_cm_read,
- .write = pxa2xx_cm_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static const VMStateDescription vmstate_pxa2xx_cm = {
- .name = "pxa2xx_cm",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32_ARRAY(cm_regs, PXA2xxState, 4),
- VMSTATE_UINT32(clkcfg, PXA2xxState),
- VMSTATE_UINT32(pmnc, PXA2xxState),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static uint64_t pxa2xx_clkcfg_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- PXA2xxState *s = (PXA2xxState *)ri->opaque;
- return s->clkcfg;
-}
-
-static void pxa2xx_clkcfg_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- PXA2xxState *s = (PXA2xxState *)ri->opaque;
- s->clkcfg = value & 0xf;
- if (value & 2) {
- printf("%s: CPU frequency change attempt\n", __func__);
- }
-}
-
-static void pxa2xx_pwrmode_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- PXA2xxState *s = (PXA2xxState *)ri->opaque;
- static const char *pwrmode[8] = {
- "Normal", "Idle", "Deep-idle", "Standby",
- "Sleep", "reserved (!)", "reserved (!)", "Deep-sleep",
- };
-
- if (value & 8) {
- printf("%s: CPU voltage change attempt\n", __func__);
- }
- switch (value & 7) {
- case 0:
- /* Do nothing */
- break;
-
- case 1:
- /* Idle */
- if (!(s->cm_regs[CCCR >> 2] & (1U << 31))) { /* CPDIS */
- cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HALT);
- break;
- }
- /* Fall through. */
-
- case 2:
- /* Deep-Idle */
- cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HALT);
- s->pm_regs[RCSR >> 2] |= 0x8; /* Set GPR */
- goto message;
-
- case 3:
- s->cpu->env.uncached_cpsr = ARM_CPU_MODE_SVC;
- s->cpu->env.daif = PSTATE_A | PSTATE_F | PSTATE_I;
- s->cpu->env.cp15.sctlr_ns = 0;
- s->cpu->env.cp15.cpacr_el1 = 0;
- s->cpu->env.cp15.ttbr0_el[1] = 0;
- s->cpu->env.cp15.dacr_ns = 0;
- s->pm_regs[PSSR >> 2] |= 0x8; /* Set STS */
- s->pm_regs[RCSR >> 2] |= 0x8; /* Set GPR */
-
- /*
- * The scratch-pad register is almost universally used
- * for storing the return address on suspend. For the
- * lack of a resuming bootloader, perform a jump
- * directly to that address.
- */
- memset(s->cpu->env.regs, 0, 4 * 15);
- s->cpu->env.regs[15] = s->pm_regs[PSPR >> 2];
-
-#if 0
- buffer = 0xe59ff000; /* ldr pc, [pc, #0] */
- cpu_physical_memory_write(0, &buffer, 4);
- buffer = s->pm_regs[PSPR >> 2];
- cpu_physical_memory_write(8, &buffer, 4);
-#endif
-
- /* Suspend */
- cpu_interrupt(current_cpu, CPU_INTERRUPT_HALT);
-
- goto message;
-
- default:
- message:
- printf("%s: machine entered %s mode\n", __func__,
- pwrmode[value & 7]);
- }
-}
-
-static uint64_t pxa2xx_cppmnc_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- PXA2xxState *s = (PXA2xxState *)ri->opaque;
- return s->pmnc;
-}
-
-static void pxa2xx_cppmnc_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- PXA2xxState *s = (PXA2xxState *)ri->opaque;
- s->pmnc = value;
-}
-
-static uint64_t pxa2xx_cpccnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- PXA2xxState *s = (PXA2xxState *)ri->opaque;
- if (s->pmnc & 1) {
- return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- } else {
- return 0;
- }
-}
-
-static const ARMCPRegInfo pxa_cp_reginfo[] = {
- /* cp14 crm==1: perf registers */
- { .name = "CPPMNC", .cp = 14, .crn = 0, .crm = 1, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_IO,
- .readfn = pxa2xx_cppmnc_read, .writefn = pxa2xx_cppmnc_write },
- { .name = "CPCCNT", .cp = 14, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_IO,
- .readfn = pxa2xx_cpccnt_read, .writefn = arm_cp_write_ignore },
- { .name = "CPINTEN", .cp = 14, .crn = 4, .crm = 1, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
- { .name = "CPFLAG", .cp = 14, .crn = 5, .crm = 1, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
- { .name = "CPEVTSEL", .cp = 14, .crn = 8, .crm = 1, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
- /* cp14 crm==2: performance count registers */
- { .name = "CPPMN0", .cp = 14, .crn = 0, .crm = 2, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
- { .name = "CPPMN1", .cp = 14, .crn = 1, .crm = 2, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
- { .name = "CPPMN2", .cp = 14, .crn = 2, .crm = 2, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
- { .name = "CPPMN3", .cp = 14, .crn = 2, .crm = 3, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
- /* cp14 crn==6: CLKCFG */
- { .name = "CLKCFG", .cp = 14, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_IO,
- .readfn = pxa2xx_clkcfg_read, .writefn = pxa2xx_clkcfg_write },
- /* cp14 crn==7: PWRMODE */
- { .name = "PWRMODE", .cp = 14, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_IO,
- .readfn = arm_cp_read_zero, .writefn = pxa2xx_pwrmode_write },
-};
-
-static void pxa2xx_setup_cp14(PXA2xxState *s)
-{
- define_arm_cp_regs_with_opaque(s->cpu, pxa_cp_reginfo, s);
-}
-
-#define MDCNFG 0x00 /* SDRAM Configuration register */
-#define MDREFR 0x04 /* SDRAM Refresh Control register */
-#define MSC0 0x08 /* Static Memory Control register 0 */
-#define MSC1 0x0c /* Static Memory Control register 1 */
-#define MSC2 0x10 /* Static Memory Control register 2 */
-#define MECR 0x14 /* Expansion Memory Bus Config register */
-#define SXCNFG 0x1c /* Synchronous Static Memory Config register */
-#define MCMEM0 0x28 /* PC Card Memory Socket 0 Timing register */
-#define MCMEM1 0x2c /* PC Card Memory Socket 1 Timing register */
-#define MCATT0 0x30 /* PC Card Attribute Socket 0 register */
-#define MCATT1 0x34 /* PC Card Attribute Socket 1 register */
-#define MCIO0 0x38 /* PC Card I/O Socket 0 Timing register */
-#define MCIO1 0x3c /* PC Card I/O Socket 1 Timing register */
-#define MDMRS 0x40 /* SDRAM Mode Register Set Config register */
-#define BOOT_DEF 0x44 /* Boot-time Default Configuration register */
-#define ARB_CNTL 0x48 /* Arbiter Control register */
-#define BSCNTR0 0x4c /* Memory Buffer Strength Control register 0 */
-#define BSCNTR1 0x50 /* Memory Buffer Strength Control register 1 */
-#define LCDBSCNTR 0x54 /* LCD Buffer Strength Control register */
-#define MDMRSLP 0x58 /* Low Power SDRAM Mode Set Config register */
-#define BSCNTR2 0x5c /* Memory Buffer Strength Control register 2 */
-#define BSCNTR3 0x60 /* Memory Buffer Strength Control register 3 */
-#define SA1110 0x64 /* SA-1110 Memory Compatibility register */
-
-static uint64_t pxa2xx_mm_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- PXA2xxState *s = (PXA2xxState *) opaque;
-
- switch (addr) {
- case MDCNFG ... SA1110:
- if ((addr & 3) == 0)
- return s->mm_regs[addr >> 2];
- /* fall through */
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad read offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- break;
- }
- return 0;
-}
-
-static void pxa2xx_mm_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- PXA2xxState *s = (PXA2xxState *) opaque;
-
- switch (addr) {
- case MDCNFG ... SA1110:
- if ((addr & 3) == 0) {
- s->mm_regs[addr >> 2] = value;
- break;
- }
- /* fallthrough */
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad write offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- break;
- }
-}
-
-static const MemoryRegionOps pxa2xx_mm_ops = {
- .read = pxa2xx_mm_read,
- .write = pxa2xx_mm_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static const VMStateDescription vmstate_pxa2xx_mm = {
- .name = "pxa2xx_mm",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32_ARRAY(mm_regs, PXA2xxState, 0x1a),
- VMSTATE_END_OF_LIST()
- }
-};
-
-#define TYPE_PXA2XX_SSP "pxa2xx-ssp"
-OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxSSPState, PXA2XX_SSP)
-
-/* Synchronous Serial Ports */
-struct PXA2xxSSPState {
- /*< private >*/
- SysBusDevice parent_obj;
- /*< public >*/
-
- MemoryRegion iomem;
- qemu_irq irq;
- uint32_t enable;
- SSIBus *bus;
-
- uint32_t sscr[2];
- uint32_t sspsp;
- uint32_t ssto;
- uint32_t ssitr;
- uint32_t sssr;
- uint8_t sstsa;
- uint8_t ssrsa;
- uint8_t ssacd;
-
- uint32_t rx_fifo[16];
- uint32_t rx_level;
- uint32_t rx_start;
-};
-
-static bool pxa2xx_ssp_vmstate_validate(void *opaque, int version_id)
-{
- PXA2xxSSPState *s = opaque;
-
- return s->rx_start < sizeof(s->rx_fifo);
-}
-
-static const VMStateDescription vmstate_pxa2xx_ssp = {
- .name = "pxa2xx-ssp",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32(enable, PXA2xxSSPState),
- VMSTATE_UINT32_ARRAY(sscr, PXA2xxSSPState, 2),
- VMSTATE_UINT32(sspsp, PXA2xxSSPState),
- VMSTATE_UINT32(ssto, PXA2xxSSPState),
- VMSTATE_UINT32(ssitr, PXA2xxSSPState),
- VMSTATE_UINT32(sssr, PXA2xxSSPState),
- VMSTATE_UINT8(sstsa, PXA2xxSSPState),
- VMSTATE_UINT8(ssrsa, PXA2xxSSPState),
- VMSTATE_UINT8(ssacd, PXA2xxSSPState),
- VMSTATE_UINT32(rx_level, PXA2xxSSPState),
- VMSTATE_UINT32(rx_start, PXA2xxSSPState),
- VMSTATE_VALIDATE("fifo is 16 bytes", pxa2xx_ssp_vmstate_validate),
- VMSTATE_UINT32_ARRAY(rx_fifo, PXA2xxSSPState, 16),
- VMSTATE_END_OF_LIST()
- }
-};
-
-#define SSCR0 0x00 /* SSP Control register 0 */
-#define SSCR1 0x04 /* SSP Control register 1 */
-#define SSSR 0x08 /* SSP Status register */
-#define SSITR 0x0c /* SSP Interrupt Test register */
-#define SSDR 0x10 /* SSP Data register */
-#define SSTO 0x28 /* SSP Time-Out register */
-#define SSPSP 0x2c /* SSP Programmable Serial Protocol register */
-#define SSTSA 0x30 /* SSP TX Time Slot Active register */
-#define SSRSA 0x34 /* SSP RX Time Slot Active register */
-#define SSTSS 0x38 /* SSP Time Slot Status register */
-#define SSACD 0x3c /* SSP Audio Clock Divider register */
-
-/* Bitfields for above registers */
-#define SSCR0_SPI(x) (((x) & 0x30) == 0x00)
-#define SSCR0_SSP(x) (((x) & 0x30) == 0x10)
-#define SSCR0_UWIRE(x) (((x) & 0x30) == 0x20)
-#define SSCR0_PSP(x) (((x) & 0x30) == 0x30)
-#define SSCR0_SSE (1 << 7)
-#define SSCR0_RIM (1 << 22)
-#define SSCR0_TIM (1 << 23)
-#define SSCR0_MOD (1U << 31)
-#define SSCR0_DSS(x) (((((x) >> 16) & 0x10) | ((x) & 0xf)) + 1)
-#define SSCR1_RIE (1 << 0)
-#define SSCR1_TIE (1 << 1)
-#define SSCR1_LBM (1 << 2)
-#define SSCR1_MWDS (1 << 5)
-#define SSCR1_TFT(x) ((((x) >> 6) & 0xf) + 1)
-#define SSCR1_RFT(x) ((((x) >> 10) & 0xf) + 1)
-#define SSCR1_EFWR (1 << 14)
-#define SSCR1_PINTE (1 << 18)
-#define SSCR1_TINTE (1 << 19)
-#define SSCR1_RSRE (1 << 20)
-#define SSCR1_TSRE (1 << 21)
-#define SSCR1_EBCEI (1 << 29)
-#define SSITR_INT (7 << 5)
-#define SSSR_TNF (1 << 2)
-#define SSSR_RNE (1 << 3)
-#define SSSR_TFS (1 << 5)
-#define SSSR_RFS (1 << 6)
-#define SSSR_ROR (1 << 7)
-#define SSSR_PINT (1 << 18)
-#define SSSR_TINT (1 << 19)
-#define SSSR_EOC (1 << 20)
-#define SSSR_TUR (1 << 21)
-#define SSSR_BCE (1 << 23)
-#define SSSR_RW 0x00bc0080
-
-static void pxa2xx_ssp_int_update(PXA2xxSSPState *s)
-{
- int level = 0;
-
- level |= s->ssitr & SSITR_INT;
- level |= (s->sssr & SSSR_BCE) && (s->sscr[1] & SSCR1_EBCEI);
- level |= (s->sssr & SSSR_TUR) && !(s->sscr[0] & SSCR0_TIM);
- level |= (s->sssr & SSSR_EOC) && (s->sssr & (SSSR_TINT | SSSR_PINT));
- level |= (s->sssr & SSSR_TINT) && (s->sscr[1] & SSCR1_TINTE);
- level |= (s->sssr & SSSR_PINT) && (s->sscr[1] & SSCR1_PINTE);
- level |= (s->sssr & SSSR_ROR) && !(s->sscr[0] & SSCR0_RIM);
- level |= (s->sssr & SSSR_RFS) && (s->sscr[1] & SSCR1_RIE);
- level |= (s->sssr & SSSR_TFS) && (s->sscr[1] & SSCR1_TIE);
- qemu_set_irq(s->irq, !!level);
-}
-
-static void pxa2xx_ssp_fifo_update(PXA2xxSSPState *s)
-{
- s->sssr &= ~(0xf << 12); /* Clear RFL */
- s->sssr &= ~(0xf << 8); /* Clear TFL */
- s->sssr &= ~SSSR_TFS;
- s->sssr &= ~SSSR_TNF;
- if (s->enable) {
- s->sssr |= ((s->rx_level - 1) & 0xf) << 12;
- if (s->rx_level >= SSCR1_RFT(s->sscr[1]))
- s->sssr |= SSSR_RFS;
- else
- s->sssr &= ~SSSR_RFS;
- if (s->rx_level)
- s->sssr |= SSSR_RNE;
- else
- s->sssr &= ~SSSR_RNE;
- /* TX FIFO is never filled, so it is always in underrun
- condition if SSP is enabled */
- s->sssr |= SSSR_TFS;
- s->sssr |= SSSR_TNF;
- }
-
- pxa2xx_ssp_int_update(s);
-}
-
-static uint64_t pxa2xx_ssp_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- PXA2xxSSPState *s = (PXA2xxSSPState *) opaque;
- uint32_t retval;
-
- switch (addr) {
- case SSCR0:
- return s->sscr[0];
- case SSCR1:
- return s->sscr[1];
- case SSPSP:
- return s->sspsp;
- case SSTO:
- return s->ssto;
- case SSITR:
- return s->ssitr;
- case SSSR:
- return s->sssr | s->ssitr;
- case SSDR:
- if (!s->enable)
- return 0xffffffff;
- if (s->rx_level < 1) {
- printf("%s: SSP Rx Underrun\n", __func__);
- return 0xffffffff;
- }
- s->rx_level --;
- retval = s->rx_fifo[s->rx_start ++];
- s->rx_start &= 0xf;
- pxa2xx_ssp_fifo_update(s);
- return retval;
- case SSTSA:
- return s->sstsa;
- case SSRSA:
- return s->ssrsa;
- case SSTSS:
- return 0;
- case SSACD:
- return s->ssacd;
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad read offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- break;
- }
- return 0;
-}
-
-static void pxa2xx_ssp_write(void *opaque, hwaddr addr,
- uint64_t value64, unsigned size)
-{
- PXA2xxSSPState *s = (PXA2xxSSPState *) opaque;
- uint32_t value = value64;
-
- switch (addr) {
- case SSCR0:
- s->sscr[0] = value & 0xc7ffffff;
- s->enable = value & SSCR0_SSE;
- if (value & SSCR0_MOD)
- printf("%s: Attempt to use network mode\n", __func__);
- if (s->enable && SSCR0_DSS(value) < 4)
- printf("%s: Wrong data size: %u bits\n", __func__,
- SSCR0_DSS(value));
- if (!(value & SSCR0_SSE)) {
- s->sssr = 0;
- s->ssitr = 0;
- s->rx_level = 0;
- }
- pxa2xx_ssp_fifo_update(s);
- break;
-
- case SSCR1:
- s->sscr[1] = value;
- if (value & (SSCR1_LBM | SSCR1_EFWR))
- printf("%s: Attempt to use SSP test mode\n", __func__);
- pxa2xx_ssp_fifo_update(s);
- break;
-
- case SSPSP:
- s->sspsp = value;
- break;
-
- case SSTO:
- s->ssto = value;
- break;
-
- case SSITR:
- s->ssitr = value & SSITR_INT;
- pxa2xx_ssp_int_update(s);
- break;
-
- case SSSR:
- s->sssr &= ~(value & SSSR_RW);
- pxa2xx_ssp_int_update(s);
- break;
-
- case SSDR:
- if (SSCR0_UWIRE(s->sscr[0])) {
- if (s->sscr[1] & SSCR1_MWDS)
- value &= 0xffff;
- else
- value &= 0xff;
- } else
- /* Note how 32bits overflow does no harm here */
- value &= (1 << SSCR0_DSS(s->sscr[0])) - 1;
-
- /* Data goes from here to the Tx FIFO and is shifted out from
- * there directly to the slave, no need to buffer it.
- */
- if (s->enable) {
- uint32_t readval;
- readval = ssi_transfer(s->bus, value);
- if (s->rx_level < 0x10) {
- s->rx_fifo[(s->rx_start + s->rx_level ++) & 0xf] = readval;
- } else {
- s->sssr |= SSSR_ROR;
- }
- }
- pxa2xx_ssp_fifo_update(s);
- break;
-
- case SSTSA:
- s->sstsa = value;
- break;
-
- case SSRSA:
- s->ssrsa = value;
- break;
-
- case SSACD:
- s->ssacd = value;
- break;
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad write offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- break;
- }
-}
-
-static const MemoryRegionOps pxa2xx_ssp_ops = {
- .read = pxa2xx_ssp_read,
- .write = pxa2xx_ssp_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void pxa2xx_ssp_reset(DeviceState *d)
-{
- PXA2xxSSPState *s = PXA2XX_SSP(d);
-
- s->enable = 0;
- s->sscr[0] = s->sscr[1] = 0;
- s->sspsp = 0;
- s->ssto = 0;
- s->ssitr = 0;
- s->sssr = 0;
- s->sstsa = 0;
- s->ssrsa = 0;
- s->ssacd = 0;
- s->rx_start = s->rx_level = 0;
-}
-
-static void pxa2xx_ssp_init(Object *obj)
-{
- DeviceState *dev = DEVICE(obj);
- PXA2xxSSPState *s = PXA2XX_SSP(obj);
- SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- sysbus_init_irq(sbd, &s->irq);
-
- memory_region_init_io(&s->iomem, obj, &pxa2xx_ssp_ops, s,
- "pxa2xx-ssp", 0x1000);
- sysbus_init_mmio(sbd, &s->iomem);
-
- s->bus = ssi_create_bus(dev, "ssi");
-}
-
-/* Real-Time Clock */
-#define RCNR 0x00 /* RTC Counter register */
-#define RTAR 0x04 /* RTC Alarm register */
-#define RTSR 0x08 /* RTC Status register */
-#define RTTR 0x0c /* RTC Timer Trim register */
-#define RDCR 0x10 /* RTC Day Counter register */
-#define RYCR 0x14 /* RTC Year Counter register */
-#define RDAR1 0x18 /* RTC Wristwatch Day Alarm register 1 */
-#define RYAR1 0x1c /* RTC Wristwatch Year Alarm register 1 */
-#define RDAR2 0x20 /* RTC Wristwatch Day Alarm register 2 */
-#define RYAR2 0x24 /* RTC Wristwatch Year Alarm register 2 */
-#define SWCR 0x28 /* RTC Stopwatch Counter register */
-#define SWAR1 0x2c /* RTC Stopwatch Alarm register 1 */
-#define SWAR2 0x30 /* RTC Stopwatch Alarm register 2 */
-#define RTCPICR 0x34 /* RTC Periodic Interrupt Counter register */
-#define PIAR 0x38 /* RTC Periodic Interrupt Alarm register */
-
-#define TYPE_PXA2XX_RTC "pxa2xx_rtc"
-OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxRTCState, PXA2XX_RTC)
-
-struct PXA2xxRTCState {
- /*< private >*/
- SysBusDevice parent_obj;
- /*< public >*/
-
- MemoryRegion iomem;
- uint32_t rttr;
- uint32_t rtsr;
- uint32_t rtar;
- uint32_t rdar1;
- uint32_t rdar2;
- uint32_t ryar1;
- uint32_t ryar2;
- uint32_t swar1;
- uint32_t swar2;
- uint32_t piar;
- uint32_t last_rcnr;
- uint32_t last_rdcr;
- uint32_t last_rycr;
- uint32_t last_swcr;
- uint32_t last_rtcpicr;
- int64_t last_hz;
- int64_t last_sw;
- int64_t last_pi;
- QEMUTimer *rtc_hz;
- QEMUTimer *rtc_rdal1;
- QEMUTimer *rtc_rdal2;
- QEMUTimer *rtc_swal1;
- QEMUTimer *rtc_swal2;
- QEMUTimer *rtc_pi;
- qemu_irq rtc_irq;
-};
-
-static inline void pxa2xx_rtc_int_update(PXA2xxRTCState *s)
-{
- qemu_set_irq(s->rtc_irq, !!(s->rtsr & 0x2553));
-}
-
-static void pxa2xx_rtc_hzupdate(PXA2xxRTCState *s)
-{
- int64_t rt = qemu_clock_get_ms(rtc_clock);
- s->last_rcnr += ((rt - s->last_hz) << 15) /
- (1000 * ((s->rttr & 0xffff) + 1));
- s->last_rdcr += ((rt - s->last_hz) << 15) /
- (1000 * ((s->rttr & 0xffff) + 1));
- s->last_hz = rt;
-}
-
-static void pxa2xx_rtc_swupdate(PXA2xxRTCState *s)
-{
- int64_t rt = qemu_clock_get_ms(rtc_clock);
- if (s->rtsr & (1 << 12))
- s->last_swcr += (rt - s->last_sw) / 10;
- s->last_sw = rt;
-}
-
-static void pxa2xx_rtc_piupdate(PXA2xxRTCState *s)
-{
- int64_t rt = qemu_clock_get_ms(rtc_clock);
- if (s->rtsr & (1 << 15))
- s->last_swcr += rt - s->last_pi;
- s->last_pi = rt;
-}
-
-static inline void pxa2xx_rtc_alarm_update(PXA2xxRTCState *s,
- uint32_t rtsr)
-{
- if ((rtsr & (1 << 2)) && !(rtsr & (1 << 0)))
- timer_mod(s->rtc_hz, s->last_hz +
- (((s->rtar - s->last_rcnr) * 1000 *
- ((s->rttr & 0xffff) + 1)) >> 15));
- else
- timer_del(s->rtc_hz);
-
- if ((rtsr & (1 << 5)) && !(rtsr & (1 << 4)))
- timer_mod(s->rtc_rdal1, s->last_hz +
- (((s->rdar1 - s->last_rdcr) * 1000 *
- ((s->rttr & 0xffff) + 1)) >> 15)); /* TODO: fixup */
- else
- timer_del(s->rtc_rdal1);
-
- if ((rtsr & (1 << 7)) && !(rtsr & (1 << 6)))
- timer_mod(s->rtc_rdal2, s->last_hz +
- (((s->rdar2 - s->last_rdcr) * 1000 *
- ((s->rttr & 0xffff) + 1)) >> 15)); /* TODO: fixup */
- else
- timer_del(s->rtc_rdal2);
-
- if ((rtsr & 0x1200) == 0x1200 && !(rtsr & (1 << 8)))
- timer_mod(s->rtc_swal1, s->last_sw +
- (s->swar1 - s->last_swcr) * 10); /* TODO: fixup */
- else
- timer_del(s->rtc_swal1);
-
- if ((rtsr & 0x1800) == 0x1800 && !(rtsr & (1 << 10)))
- timer_mod(s->rtc_swal2, s->last_sw +
- (s->swar2 - s->last_swcr) * 10); /* TODO: fixup */
- else
- timer_del(s->rtc_swal2);
-
- if ((rtsr & 0xc000) == 0xc000 && !(rtsr & (1 << 13)))
- timer_mod(s->rtc_pi, s->last_pi +
- (s->piar & 0xffff) - s->last_rtcpicr);
- else
- timer_del(s->rtc_pi);
-}
-
-static inline void pxa2xx_rtc_hz_tick(void *opaque)
-{
- PXA2xxRTCState *s = (PXA2xxRTCState *) opaque;
- s->rtsr |= (1 << 0);
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- pxa2xx_rtc_int_update(s);
-}
-
-static inline void pxa2xx_rtc_rdal1_tick(void *opaque)
-{
- PXA2xxRTCState *s = (PXA2xxRTCState *) opaque;
- s->rtsr |= (1 << 4);
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- pxa2xx_rtc_int_update(s);
-}
-
-static inline void pxa2xx_rtc_rdal2_tick(void *opaque)
-{
- PXA2xxRTCState *s = (PXA2xxRTCState *) opaque;
- s->rtsr |= (1 << 6);
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- pxa2xx_rtc_int_update(s);
-}
-
-static inline void pxa2xx_rtc_swal1_tick(void *opaque)
-{
- PXA2xxRTCState *s = (PXA2xxRTCState *) opaque;
- s->rtsr |= (1 << 8);
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- pxa2xx_rtc_int_update(s);
-}
-
-static inline void pxa2xx_rtc_swal2_tick(void *opaque)
-{
- PXA2xxRTCState *s = (PXA2xxRTCState *) opaque;
- s->rtsr |= (1 << 10);
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- pxa2xx_rtc_int_update(s);
-}
-
-static inline void pxa2xx_rtc_pi_tick(void *opaque)
-{
- PXA2xxRTCState *s = (PXA2xxRTCState *) opaque;
- s->rtsr |= (1 << 13);
- pxa2xx_rtc_piupdate(s);
- s->last_rtcpicr = 0;
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- pxa2xx_rtc_int_update(s);
-}
-
-static uint64_t pxa2xx_rtc_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- PXA2xxRTCState *s = (PXA2xxRTCState *) opaque;
-
- switch (addr) {
- case RTTR:
- return s->rttr;
- case RTSR:
- return s->rtsr;
- case RTAR:
- return s->rtar;
- case RDAR1:
- return s->rdar1;
- case RDAR2:
- return s->rdar2;
- case RYAR1:
- return s->ryar1;
- case RYAR2:
- return s->ryar2;
- case SWAR1:
- return s->swar1;
- case SWAR2:
- return s->swar2;
- case PIAR:
- return s->piar;
- case RCNR:
- return s->last_rcnr +
- ((qemu_clock_get_ms(rtc_clock) - s->last_hz) << 15) /
- (1000 * ((s->rttr & 0xffff) + 1));
- case RDCR:
- return s->last_rdcr +
- ((qemu_clock_get_ms(rtc_clock) - s->last_hz) << 15) /
- (1000 * ((s->rttr & 0xffff) + 1));
- case RYCR:
- return s->last_rycr;
- case SWCR:
- if (s->rtsr & (1 << 12))
- return s->last_swcr +
- (qemu_clock_get_ms(rtc_clock) - s->last_sw) / 10;
- else
- return s->last_swcr;
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad read offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- break;
- }
- return 0;
-}
-
-static void pxa2xx_rtc_write(void *opaque, hwaddr addr,
- uint64_t value64, unsigned size)
-{
- PXA2xxRTCState *s = (PXA2xxRTCState *) opaque;
- uint32_t value = value64;
-
- switch (addr) {
- case RTTR:
- if (!(s->rttr & (1U << 31))) {
- pxa2xx_rtc_hzupdate(s);
- s->rttr = value;
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- }
- break;
-
- case RTSR:
- if ((s->rtsr ^ value) & (1 << 15))
- pxa2xx_rtc_piupdate(s);
-
- if ((s->rtsr ^ value) & (1 << 12))
- pxa2xx_rtc_swupdate(s);
-
- if (((s->rtsr ^ value) & 0x4aac) | (value & ~0xdaac))
- pxa2xx_rtc_alarm_update(s, value);
-
- s->rtsr = (value & 0xdaac) | (s->rtsr & ~(value & ~0xdaac));
- pxa2xx_rtc_int_update(s);
- break;
-
- case RTAR:
- s->rtar = value;
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- break;
-
- case RDAR1:
- s->rdar1 = value;
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- break;
-
- case RDAR2:
- s->rdar2 = value;
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- break;
-
- case RYAR1:
- s->ryar1 = value;
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- break;
-
- case RYAR2:
- s->ryar2 = value;
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- break;
-
- case SWAR1:
- pxa2xx_rtc_swupdate(s);
- s->swar1 = value;
- s->last_swcr = 0;
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- break;
-
- case SWAR2:
- s->swar2 = value;
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- break;
-
- case PIAR:
- s->piar = value;
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- break;
-
- case RCNR:
- pxa2xx_rtc_hzupdate(s);
- s->last_rcnr = value;
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- break;
-
- case RDCR:
- pxa2xx_rtc_hzupdate(s);
- s->last_rdcr = value;
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- break;
-
- case RYCR:
- s->last_rycr = value;
- break;
-
- case SWCR:
- pxa2xx_rtc_swupdate(s);
- s->last_swcr = value;
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- break;
-
- case RTCPICR:
- pxa2xx_rtc_piupdate(s);
- s->last_rtcpicr = value & 0xffff;
- pxa2xx_rtc_alarm_update(s, s->rtsr);
- break;
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad write offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- }
-}
-
-static const MemoryRegionOps pxa2xx_rtc_ops = {
- .read = pxa2xx_rtc_read,
- .write = pxa2xx_rtc_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void pxa2xx_rtc_init(Object *obj)
-{
- PXA2xxRTCState *s = PXA2XX_RTC(obj);
- SysBusDevice *dev = SYS_BUS_DEVICE(obj);
- struct tm tm;
- int wom;
-
- s->rttr = 0x7fff;
- s->rtsr = 0;
-
- qemu_get_timedate(&tm, 0);
- wom = ((tm.tm_mday - 1) / 7) + 1;
-
- s->last_rcnr = (uint32_t) mktimegm(&tm);
- s->last_rdcr = (wom << 20) | ((tm.tm_wday + 1) << 17) |
- (tm.tm_hour << 12) | (tm.tm_min << 6) | tm.tm_sec;
- s->last_rycr = ((tm.tm_year + 1900) << 9) |
- ((tm.tm_mon + 1) << 5) | tm.tm_mday;
- s->last_swcr = (tm.tm_hour << 19) |
- (tm.tm_min << 13) | (tm.tm_sec << 7);
- s->last_rtcpicr = 0;
- s->last_hz = s->last_sw = s->last_pi = qemu_clock_get_ms(rtc_clock);
-
- sysbus_init_irq(dev, &s->rtc_irq);
-
- memory_region_init_io(&s->iomem, obj, &pxa2xx_rtc_ops, s,
- "pxa2xx-rtc", 0x10000);
- sysbus_init_mmio(dev, &s->iomem);
-}
-
-static void pxa2xx_rtc_realize(DeviceState *dev, Error **errp)
-{
- PXA2xxRTCState *s = PXA2XX_RTC(dev);
- s->rtc_hz = timer_new_ms(rtc_clock, pxa2xx_rtc_hz_tick, s);
- s->rtc_rdal1 = timer_new_ms(rtc_clock, pxa2xx_rtc_rdal1_tick, s);
- s->rtc_rdal2 = timer_new_ms(rtc_clock, pxa2xx_rtc_rdal2_tick, s);
- s->rtc_swal1 = timer_new_ms(rtc_clock, pxa2xx_rtc_swal1_tick, s);
- s->rtc_swal2 = timer_new_ms(rtc_clock, pxa2xx_rtc_swal2_tick, s);
- s->rtc_pi = timer_new_ms(rtc_clock, pxa2xx_rtc_pi_tick, s);
-}
-
-static int pxa2xx_rtc_pre_save(void *opaque)
-{
- PXA2xxRTCState *s = (PXA2xxRTCState *) opaque;
-
- pxa2xx_rtc_hzupdate(s);
- pxa2xx_rtc_piupdate(s);
- pxa2xx_rtc_swupdate(s);
-
- return 0;
-}
-
-static int pxa2xx_rtc_post_load(void *opaque, int version_id)
-{
- PXA2xxRTCState *s = (PXA2xxRTCState *) opaque;
-
- pxa2xx_rtc_alarm_update(s, s->rtsr);
-
- return 0;
-}
-
-static const VMStateDescription vmstate_pxa2xx_rtc_regs = {
- .name = "pxa2xx_rtc",
- .version_id = 0,
- .minimum_version_id = 0,
- .pre_save = pxa2xx_rtc_pre_save,
- .post_load = pxa2xx_rtc_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32(rttr, PXA2xxRTCState),
- VMSTATE_UINT32(rtsr, PXA2xxRTCState),
- VMSTATE_UINT32(rtar, PXA2xxRTCState),
- VMSTATE_UINT32(rdar1, PXA2xxRTCState),
- VMSTATE_UINT32(rdar2, PXA2xxRTCState),
- VMSTATE_UINT32(ryar1, PXA2xxRTCState),
- VMSTATE_UINT32(ryar2, PXA2xxRTCState),
- VMSTATE_UINT32(swar1, PXA2xxRTCState),
- VMSTATE_UINT32(swar2, PXA2xxRTCState),
- VMSTATE_UINT32(piar, PXA2xxRTCState),
- VMSTATE_UINT32(last_rcnr, PXA2xxRTCState),
- VMSTATE_UINT32(last_rdcr, PXA2xxRTCState),
- VMSTATE_UINT32(last_rycr, PXA2xxRTCState),
- VMSTATE_UINT32(last_swcr, PXA2xxRTCState),
- VMSTATE_UINT32(last_rtcpicr, PXA2xxRTCState),
- VMSTATE_INT64(last_hz, PXA2xxRTCState),
- VMSTATE_INT64(last_sw, PXA2xxRTCState),
- VMSTATE_INT64(last_pi, PXA2xxRTCState),
- VMSTATE_END_OF_LIST(),
- },
-};
-
-static void pxa2xx_rtc_sysbus_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->desc = "PXA2xx RTC Controller";
- dc->vmsd = &vmstate_pxa2xx_rtc_regs;
- dc->realize = pxa2xx_rtc_realize;
-}
-
-static const TypeInfo pxa2xx_rtc_sysbus_info = {
- .name = TYPE_PXA2XX_RTC,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(PXA2xxRTCState),
- .instance_init = pxa2xx_rtc_init,
- .class_init = pxa2xx_rtc_sysbus_class_init,
-};
-
-/* I2C Interface */
-
-#define TYPE_PXA2XX_I2C_SLAVE "pxa2xx-i2c-slave"
-OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxI2CSlaveState, PXA2XX_I2C_SLAVE)
-
-struct PXA2xxI2CSlaveState {
- I2CSlave parent_obj;
-
- PXA2xxI2CState *host;
-};
-
-struct PXA2xxI2CState {
- /*< private >*/
- SysBusDevice parent_obj;
- /*< public >*/
-
- MemoryRegion iomem;
- PXA2xxI2CSlaveState *slave;
- I2CBus *bus;
- qemu_irq irq;
- uint32_t offset;
- uint32_t region_size;
-
- uint16_t control;
- uint16_t status;
- uint8_t ibmr;
- uint8_t data;
-};
-
-#define IBMR 0x80 /* I2C Bus Monitor register */
-#define IDBR 0x88 /* I2C Data Buffer register */
-#define ICR 0x90 /* I2C Control register */
-#define ISR 0x98 /* I2C Status register */
-#define ISAR 0xa0 /* I2C Slave Address register */
-
-static void pxa2xx_i2c_update(PXA2xxI2CState *s)
-{
- uint16_t level = 0;
- level |= s->status & s->control & (1 << 10); /* BED */
- level |= (s->status & (1 << 7)) && (s->control & (1 << 9)); /* IRF */
- level |= (s->status & (1 << 6)) && (s->control & (1 << 8)); /* ITE */
- level |= s->status & (1 << 9); /* SAD */
- qemu_set_irq(s->irq, !!level);
-}
-
-/* These are only stubs now. */
-static int pxa2xx_i2c_event(I2CSlave *i2c, enum i2c_event event)
-{
- PXA2xxI2CSlaveState *slave = PXA2XX_I2C_SLAVE(i2c);
- PXA2xxI2CState *s = slave->host;
-
- switch (event) {
- case I2C_START_SEND:
- s->status |= (1 << 9); /* set SAD */
- s->status &= ~(1 << 0); /* clear RWM */
- break;
- case I2C_START_RECV:
- s->status |= (1 << 9); /* set SAD */
- s->status |= 1 << 0; /* set RWM */
- break;
- case I2C_FINISH:
- s->status |= (1 << 4); /* set SSD */
- break;
- case I2C_NACK:
- s->status |= 1 << 1; /* set ACKNAK */
- break;
- default:
- return -1;
- }
- pxa2xx_i2c_update(s);
-
- return 0;
-}
-
-static uint8_t pxa2xx_i2c_rx(I2CSlave *i2c)
-{
- PXA2xxI2CSlaveState *slave = PXA2XX_I2C_SLAVE(i2c);
- PXA2xxI2CState *s = slave->host;
-
- if ((s->control & (1 << 14)) || !(s->control & (1 << 6))) {
- return 0;
- }
-
- if (s->status & (1 << 0)) { /* RWM */
- s->status |= 1 << 6; /* set ITE */
- }
- pxa2xx_i2c_update(s);
-
- return s->data;
-}
-
-static int pxa2xx_i2c_tx(I2CSlave *i2c, uint8_t data)
-{
- PXA2xxI2CSlaveState *slave = PXA2XX_I2C_SLAVE(i2c);
- PXA2xxI2CState *s = slave->host;
-
- if ((s->control & (1 << 14)) || !(s->control & (1 << 6))) {
- return 1;
- }
-
- if (!(s->status & (1 << 0))) { /* RWM */
- s->status |= 1 << 7; /* set IRF */
- s->data = data;
- }
- pxa2xx_i2c_update(s);
-
- return 1;
-}
-
-static uint64_t pxa2xx_i2c_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- PXA2xxI2CState *s = (PXA2xxI2CState *) opaque;
- I2CSlave *slave;
-
- addr -= s->offset;
- switch (addr) {
- case ICR:
- return s->control;
- case ISR:
- return s->status | (i2c_bus_busy(s->bus) << 2);
- case ISAR:
- slave = I2C_SLAVE(s->slave);
- return slave->address;
- case IDBR:
- return s->data;
- case IBMR:
- if (s->status & (1 << 2))
- s->ibmr ^= 3; /* Fake SCL and SDA pin changes */
- else
- s->ibmr = 0;
- return s->ibmr;
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad read offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- break;
- }
- return 0;
-}
-
-static void pxa2xx_i2c_write(void *opaque, hwaddr addr,
- uint64_t value64, unsigned size)
-{
- PXA2xxI2CState *s = (PXA2xxI2CState *) opaque;
- uint32_t value = value64;
- int ack;
-
- addr -= s->offset;
- switch (addr) {
- case ICR:
- s->control = value & 0xfff7;
- if ((value & (1 << 3)) && (value & (1 << 6))) { /* TB and IUE */
- /* TODO: slave mode */
- if (value & (1 << 0)) { /* START condition */
- if (s->data & 1)
- s->status |= 1 << 0; /* set RWM */
- else
- s->status &= ~(1 << 0); /* clear RWM */
- ack = !i2c_start_transfer(s->bus, s->data >> 1, s->data & 1);
- } else {
- if (s->status & (1 << 0)) { /* RWM */
- s->data = i2c_recv(s->bus);
- if (value & (1 << 2)) /* ACKNAK */
- i2c_nack(s->bus);
- ack = 1;
- } else
- ack = !i2c_send(s->bus, s->data);
- }
-
- if (value & (1 << 1)) /* STOP condition */
- i2c_end_transfer(s->bus);
-
- if (ack) {
- if (value & (1 << 0)) /* START condition */
- s->status |= 1 << 6; /* set ITE */
- else
- if (s->status & (1 << 0)) /* RWM */
- s->status |= 1 << 7; /* set IRF */
- else
- s->status |= 1 << 6; /* set ITE */
- s->status &= ~(1 << 1); /* clear ACKNAK */
- } else {
- s->status |= 1 << 6; /* set ITE */
- s->status |= 1 << 10; /* set BED */
- s->status |= 1 << 1; /* set ACKNAK */
- }
- }
- if (!(value & (1 << 3)) && (value & (1 << 6))) /* !TB and IUE */
- if (value & (1 << 4)) /* MA */
- i2c_end_transfer(s->bus);
- pxa2xx_i2c_update(s);
- break;
-
- case ISR:
- s->status &= ~(value & 0x07f0);
- pxa2xx_i2c_update(s);
- break;
-
- case ISAR:
- i2c_slave_set_address(I2C_SLAVE(s->slave), value & 0x7f);
- break;
-
- case IDBR:
- s->data = value & 0xff;
- break;
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad write offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- }
-}
-
-static const MemoryRegionOps pxa2xx_i2c_ops = {
- .read = pxa2xx_i2c_read,
- .write = pxa2xx_i2c_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static const VMStateDescription vmstate_pxa2xx_i2c_slave = {
- .name = "pxa2xx_i2c_slave",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_I2C_SLAVE(parent_obj, PXA2xxI2CSlaveState),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static const VMStateDescription vmstate_pxa2xx_i2c = {
- .name = "pxa2xx_i2c",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT16(control, PXA2xxI2CState),
- VMSTATE_UINT16(status, PXA2xxI2CState),
- VMSTATE_UINT8(ibmr, PXA2xxI2CState),
- VMSTATE_UINT8(data, PXA2xxI2CState),
- VMSTATE_STRUCT_POINTER(slave, PXA2xxI2CState,
- vmstate_pxa2xx_i2c_slave, PXA2xxI2CSlaveState),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static void pxa2xx_i2c_slave_class_init(ObjectClass *klass, void *data)
-{
- I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
-
- k->event = pxa2xx_i2c_event;
- k->recv = pxa2xx_i2c_rx;
- k->send = pxa2xx_i2c_tx;
-}
-
-static const TypeInfo pxa2xx_i2c_slave_info = {
- .name = TYPE_PXA2XX_I2C_SLAVE,
- .parent = TYPE_I2C_SLAVE,
- .instance_size = sizeof(PXA2xxI2CSlaveState),
- .class_init = pxa2xx_i2c_slave_class_init,
-};
-
-PXA2xxI2CState *pxa2xx_i2c_init(hwaddr base,
- qemu_irq irq, uint32_t region_size)
-{
- DeviceState *dev;
- SysBusDevice *i2c_dev;
- PXA2xxI2CState *s;
- I2CBus *i2cbus;
-
- dev = qdev_new(TYPE_PXA2XX_I2C);
- qdev_prop_set_uint32(dev, "size", region_size + 1);
- qdev_prop_set_uint32(dev, "offset", base & region_size);
-
- /* FIXME: Should the slave device really be on a separate bus? */
- i2cbus = i2c_init_bus(dev, "dummy");
-
- i2c_dev = SYS_BUS_DEVICE(dev);
- sysbus_realize_and_unref(i2c_dev, &error_fatal);
- sysbus_mmio_map(i2c_dev, 0, base & ~region_size);
- sysbus_connect_irq(i2c_dev, 0, irq);
-
- s = PXA2XX_I2C(i2c_dev);
- s->slave = PXA2XX_I2C_SLAVE(i2c_slave_create_simple(i2cbus,
- TYPE_PXA2XX_I2C_SLAVE,
- 0));
- s->slave->host = s;
-
- return s;
-}
-
-static void pxa2xx_i2c_initfn(Object *obj)
-{
- DeviceState *dev = DEVICE(obj);
- PXA2xxI2CState *s = PXA2XX_I2C(obj);
- SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
-
- s->bus = i2c_init_bus(dev, NULL);
-
- memory_region_init_io(&s->iomem, obj, &pxa2xx_i2c_ops, s,
- "pxa2xx-i2c", s->region_size);
- sysbus_init_mmio(sbd, &s->iomem);
- sysbus_init_irq(sbd, &s->irq);
-}
-
-I2CBus *pxa2xx_i2c_bus(PXA2xxI2CState *s)
-{
- return s->bus;
-}
-
-static Property pxa2xx_i2c_properties[] = {
- DEFINE_PROP_UINT32("size", PXA2xxI2CState, region_size, 0x10000),
- DEFINE_PROP_UINT32("offset", PXA2xxI2CState, offset, 0),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void pxa2xx_i2c_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->desc = "PXA2xx I2C Bus Controller";
- dc->vmsd = &vmstate_pxa2xx_i2c;
- device_class_set_props(dc, pxa2xx_i2c_properties);
-}
-
-static const TypeInfo pxa2xx_i2c_info = {
- .name = TYPE_PXA2XX_I2C,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(PXA2xxI2CState),
- .instance_init = pxa2xx_i2c_initfn,
- .class_init = pxa2xx_i2c_class_init,
-};
-
-/* PXA Inter-IC Sound Controller */
-static void pxa2xx_i2s_reset(PXA2xxI2SState *i2s)
-{
- i2s->rx_len = 0;
- i2s->tx_len = 0;
- i2s->fifo_len = 0;
- i2s->clk = 0x1a;
- i2s->control[0] = 0x00;
- i2s->control[1] = 0x00;
- i2s->status = 0x00;
- i2s->mask = 0x00;
-}
-
-#define SACR_TFTH(val) ((val >> 8) & 0xf)
-#define SACR_RFTH(val) ((val >> 12) & 0xf)
-#define SACR_DREC(val) (val & (1 << 3))
-#define SACR_DPRL(val) (val & (1 << 4))
-
-static inline void pxa2xx_i2s_update(PXA2xxI2SState *i2s)
-{
- int rfs, tfs;
- rfs = SACR_RFTH(i2s->control[0]) < i2s->rx_len &&
- !SACR_DREC(i2s->control[1]);
- tfs = (i2s->tx_len || i2s->fifo_len < SACR_TFTH(i2s->control[0])) &&
- i2s->enable && !SACR_DPRL(i2s->control[1]);
-
- qemu_set_irq(i2s->rx_dma, rfs);
- qemu_set_irq(i2s->tx_dma, tfs);
-
- i2s->status &= 0xe0;
- if (i2s->fifo_len < 16 || !i2s->enable)
- i2s->status |= 1 << 0; /* TNF */
- if (i2s->rx_len)
- i2s->status |= 1 << 1; /* RNE */
- if (i2s->enable)
- i2s->status |= 1 << 2; /* BSY */
- if (tfs)
- i2s->status |= 1 << 3; /* TFS */
- if (rfs)
- i2s->status |= 1 << 4; /* RFS */
- if (!(i2s->tx_len && i2s->enable))
- i2s->status |= i2s->fifo_len << 8; /* TFL */
- i2s->status |= MAX(i2s->rx_len, 0xf) << 12; /* RFL */
-
- qemu_set_irq(i2s->irq, i2s->status & i2s->mask);
-}
-
-#define SACR0 0x00 /* Serial Audio Global Control register */
-#define SACR1 0x04 /* Serial Audio I2S/MSB-Justified Control register */
-#define SASR0 0x0c /* Serial Audio Interface and FIFO Status register */
-#define SAIMR 0x14 /* Serial Audio Interrupt Mask register */
-#define SAICR 0x18 /* Serial Audio Interrupt Clear register */
-#define SADIV 0x60 /* Serial Audio Clock Divider register */
-#define SADR 0x80 /* Serial Audio Data register */
-
-static uint64_t pxa2xx_i2s_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- PXA2xxI2SState *s = (PXA2xxI2SState *) opaque;
-
- switch (addr) {
- case SACR0:
- return s->control[0];
- case SACR1:
- return s->control[1];
- case SASR0:
- return s->status;
- case SAIMR:
- return s->mask;
- case SAICR:
- return 0;
- case SADIV:
- return s->clk;
- case SADR:
- if (s->rx_len > 0) {
- s->rx_len --;
- pxa2xx_i2s_update(s);
- return s->codec_in(s->opaque);
- }
- return 0;
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad read offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- break;
- }
- return 0;
-}
-
-static void pxa2xx_i2s_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- PXA2xxI2SState *s = (PXA2xxI2SState *) opaque;
- uint32_t *sample;
-
- switch (addr) {
- case SACR0:
- if (value & (1 << 3)) /* RST */
- pxa2xx_i2s_reset(s);
- s->control[0] = value & 0xff3d;
- if (!s->enable && (value & 1) && s->tx_len) { /* ENB */
- for (sample = s->fifo; s->fifo_len > 0; s->fifo_len --, sample ++)
- s->codec_out(s->opaque, *sample);
- s->status &= ~(1 << 7); /* I2SOFF */
- }
- if (value & (1 << 4)) /* EFWR */
- printf("%s: Attempt to use special function\n", __func__);
- s->enable = (value & 9) == 1; /* ENB && !RST*/
- pxa2xx_i2s_update(s);
- break;
- case SACR1:
- s->control[1] = value & 0x0039;
- if (value & (1 << 5)) /* ENLBF */
- printf("%s: Attempt to use loopback function\n", __func__);
- if (value & (1 << 4)) /* DPRL */
- s->fifo_len = 0;
- pxa2xx_i2s_update(s);
- break;
- case SAIMR:
- s->mask = value & 0x0078;
- pxa2xx_i2s_update(s);
- break;
- case SAICR:
- s->status &= ~(value & (3 << 5));
- pxa2xx_i2s_update(s);
- break;
- case SADIV:
- s->clk = value & 0x007f;
- break;
- case SADR:
- if (s->tx_len && s->enable) {
- s->tx_len --;
- pxa2xx_i2s_update(s);
- s->codec_out(s->opaque, value);
- } else if (s->fifo_len < 16) {
- s->fifo[s->fifo_len ++] = value;
- pxa2xx_i2s_update(s);
- }
- break;
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad write offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- }
-}
-
-static const MemoryRegionOps pxa2xx_i2s_ops = {
- .read = pxa2xx_i2s_read,
- .write = pxa2xx_i2s_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static const VMStateDescription vmstate_pxa2xx_i2s = {
- .name = "pxa2xx_i2s",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32_ARRAY(control, PXA2xxI2SState, 2),
- VMSTATE_UINT32(status, PXA2xxI2SState),
- VMSTATE_UINT32(mask, PXA2xxI2SState),
- VMSTATE_UINT32(clk, PXA2xxI2SState),
- VMSTATE_INT32(enable, PXA2xxI2SState),
- VMSTATE_INT32(rx_len, PXA2xxI2SState),
- VMSTATE_INT32(tx_len, PXA2xxI2SState),
- VMSTATE_INT32(fifo_len, PXA2xxI2SState),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static void pxa2xx_i2s_data_req(void *opaque, int tx, int rx)
-{
- PXA2xxI2SState *s = (PXA2xxI2SState *) opaque;
- uint32_t *sample;
-
- /* Signal FIFO errors */
- if (s->enable && s->tx_len)
- s->status |= 1 << 5; /* TUR */
- if (s->enable && s->rx_len)
- s->status |= 1 << 6; /* ROR */
-
- /* Should be tx - MIN(tx, s->fifo_len) but we don't really need to
- * handle the cases where it makes a difference. */
- s->tx_len = tx - s->fifo_len;
- s->rx_len = rx;
- /* Note that is s->codec_out wasn't set, we wouldn't get called. */
- if (s->enable)
- for (sample = s->fifo; s->fifo_len; s->fifo_len --, sample ++)
- s->codec_out(s->opaque, *sample);
- pxa2xx_i2s_update(s);
-}
-
-static PXA2xxI2SState *pxa2xx_i2s_init(MemoryRegion *sysmem,
- hwaddr base,
- qemu_irq irq, qemu_irq rx_dma, qemu_irq tx_dma)
-{
- PXA2xxI2SState *s = g_new0(PXA2xxI2SState, 1);
-
- s->irq = irq;
- s->rx_dma = rx_dma;
- s->tx_dma = tx_dma;
- s->data_req = pxa2xx_i2s_data_req;
-
- pxa2xx_i2s_reset(s);
-
- memory_region_init_io(&s->iomem, NULL, &pxa2xx_i2s_ops, s,
- "pxa2xx-i2s", 0x100000);
- memory_region_add_subregion(sysmem, base, &s->iomem);
-
- vmstate_register(NULL, base, &vmstate_pxa2xx_i2s, s);
-
- return s;
-}
-
-/* PXA Fast Infra-red Communications Port */
-struct PXA2xxFIrState {
- /*< private >*/
- SysBusDevice parent_obj;
- /*< public >*/
-
- MemoryRegion iomem;
- qemu_irq irq;
- qemu_irq rx_dma;
- qemu_irq tx_dma;
- uint32_t enable;
- CharBackend chr;
-
- uint8_t control[3];
- uint8_t status[2];
-
- uint32_t rx_len;
- uint32_t rx_start;
- uint8_t rx_fifo[64];
-};
-
-static void pxa2xx_fir_reset(DeviceState *d)
-{
- PXA2xxFIrState *s = PXA2XX_FIR(d);
-
- s->control[0] = 0x00;
- s->control[1] = 0x00;
- s->control[2] = 0x00;
- s->status[0] = 0x00;
- s->status[1] = 0x00;
- s->enable = 0;
-}
-
-static inline void pxa2xx_fir_update(PXA2xxFIrState *s)
-{
- static const int tresh[4] = { 8, 16, 32, 0 };
- int intr = 0;
- if ((s->control[0] & (1 << 4)) && /* RXE */
- s->rx_len >= tresh[s->control[2] & 3]) /* TRIG */
- s->status[0] |= 1 << 4; /* RFS */
- else
- s->status[0] &= ~(1 << 4); /* RFS */
- if (s->control[0] & (1 << 3)) /* TXE */
- s->status[0] |= 1 << 3; /* TFS */
- else
- s->status[0] &= ~(1 << 3); /* TFS */
- if (s->rx_len)
- s->status[1] |= 1 << 2; /* RNE */
- else
- s->status[1] &= ~(1 << 2); /* RNE */
- if (s->control[0] & (1 << 4)) /* RXE */
- s->status[1] |= 1 << 0; /* RSY */
- else
- s->status[1] &= ~(1 << 0); /* RSY */
-
- intr |= (s->control[0] & (1 << 5)) && /* RIE */
- (s->status[0] & (1 << 4)); /* RFS */
- intr |= (s->control[0] & (1 << 6)) && /* TIE */
- (s->status[0] & (1 << 3)); /* TFS */
- intr |= (s->control[2] & (1 << 4)) && /* TRAIL */
- (s->status[0] & (1 << 6)); /* EOC */
- intr |= (s->control[0] & (1 << 2)) && /* TUS */
- (s->status[0] & (1 << 1)); /* TUR */
- intr |= s->status[0] & 0x25; /* FRE, RAB, EIF */
-
- qemu_set_irq(s->rx_dma, (s->status[0] >> 4) & 1);
- qemu_set_irq(s->tx_dma, (s->status[0] >> 3) & 1);
-
- qemu_set_irq(s->irq, intr && s->enable);
-}
-
-#define ICCR0 0x00 /* FICP Control register 0 */
-#define ICCR1 0x04 /* FICP Control register 1 */
-#define ICCR2 0x08 /* FICP Control register 2 */
-#define ICDR 0x0c /* FICP Data register */
-#define ICSR0 0x14 /* FICP Status register 0 */
-#define ICSR1 0x18 /* FICP Status register 1 */
-#define ICFOR 0x1c /* FICP FIFO Occupancy Status register */
-
-static uint64_t pxa2xx_fir_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- PXA2xxFIrState *s = (PXA2xxFIrState *) opaque;
- uint8_t ret;
-
- switch (addr) {
- case ICCR0:
- return s->control[0];
- case ICCR1:
- return s->control[1];
- case ICCR2:
- return s->control[2];
- case ICDR:
- s->status[0] &= ~0x01;
- s->status[1] &= ~0x72;
- if (s->rx_len) {
- s->rx_len --;
- ret = s->rx_fifo[s->rx_start ++];
- s->rx_start &= 63;
- pxa2xx_fir_update(s);
- return ret;
- }
- printf("%s: Rx FIFO underrun.\n", __func__);
- break;
- case ICSR0:
- return s->status[0];
- case ICSR1:
- return s->status[1] | (1 << 3); /* TNF */
- case ICFOR:
- return s->rx_len;
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad read offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- break;
- }
- return 0;
-}
-
-static void pxa2xx_fir_write(void *opaque, hwaddr addr,
- uint64_t value64, unsigned size)
-{
- PXA2xxFIrState *s = (PXA2xxFIrState *) opaque;
- uint32_t value = value64;
- uint8_t ch;
-
- switch (addr) {
- case ICCR0:
- s->control[0] = value;
- if (!(value & (1 << 4))) /* RXE */
- s->rx_len = s->rx_start = 0;
- if (!(value & (1 << 3))) { /* TXE */
- /* Nop */
- }
- s->enable = value & 1; /* ITR */
- if (!s->enable)
- s->status[0] = 0;
- pxa2xx_fir_update(s);
- break;
- case ICCR1:
- s->control[1] = value;
- break;
- case ICCR2:
- s->control[2] = value & 0x3f;
- pxa2xx_fir_update(s);
- break;
- case ICDR:
- if (s->control[2] & (1 << 2)) { /* TXP */
- ch = value;
- } else {
- ch = ~value;
- }
- if (s->enable && (s->control[0] & (1 << 3))) { /* TXE */
- /* XXX this blocks entire thread. Rewrite to use
- * qemu_chr_fe_write and background I/O callbacks */
- qemu_chr_fe_write_all(&s->chr, &ch, 1);
- }
- break;
- case ICSR0:
- s->status[0] &= ~(value & 0x66);
- pxa2xx_fir_update(s);
- break;
- case ICFOR:
- break;
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad write offset 0x%"HWADDR_PRIx"\n",
- __func__, addr);
- }
-}
-
-static const MemoryRegionOps pxa2xx_fir_ops = {
- .read = pxa2xx_fir_read,
- .write = pxa2xx_fir_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static int pxa2xx_fir_is_empty(void *opaque)
-{
- PXA2xxFIrState *s = (PXA2xxFIrState *) opaque;
- return (s->rx_len < 64);
-}
-
-static void pxa2xx_fir_rx(void *opaque, const uint8_t *buf, int size)
-{
- PXA2xxFIrState *s = (PXA2xxFIrState *) opaque;
- if (!(s->control[0] & (1 << 4))) /* RXE */
- return;
-
- while (size --) {
- s->status[1] |= 1 << 4; /* EOF */
- if (s->rx_len >= 64) {
- s->status[1] |= 1 << 6; /* ROR */
- break;
- }
-
- if (s->control[2] & (1 << 3)) /* RXP */
- s->rx_fifo[(s->rx_start + s->rx_len ++) & 63] = *(buf ++);
- else
- s->rx_fifo[(s->rx_start + s->rx_len ++) & 63] = ~*(buf ++);
- }
-
- pxa2xx_fir_update(s);
-}
-
-static void pxa2xx_fir_event(void *opaque, QEMUChrEvent event)
-{
-}
-
-static void pxa2xx_fir_instance_init(Object *obj)
-{
- PXA2xxFIrState *s = PXA2XX_FIR(obj);
- SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
-
- memory_region_init_io(&s->iomem, obj, &pxa2xx_fir_ops, s,
- "pxa2xx-fir", 0x1000);
- sysbus_init_mmio(sbd, &s->iomem);
- sysbus_init_irq(sbd, &s->irq);
- sysbus_init_irq(sbd, &s->rx_dma);
- sysbus_init_irq(sbd, &s->tx_dma);
-}
-
-static void pxa2xx_fir_realize(DeviceState *dev, Error **errp)
-{
- PXA2xxFIrState *s = PXA2XX_FIR(dev);
-
- qemu_chr_fe_set_handlers(&s->chr, pxa2xx_fir_is_empty,
- pxa2xx_fir_rx, pxa2xx_fir_event, NULL, s, NULL,
- true);
-}
-
-static bool pxa2xx_fir_vmstate_validate(void *opaque, int version_id)
-{
- PXA2xxFIrState *s = opaque;
-
- return s->rx_start < ARRAY_SIZE(s->rx_fifo);
-}
-
-static const VMStateDescription pxa2xx_fir_vmsd = {
- .name = "pxa2xx-fir",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32(enable, PXA2xxFIrState),
- VMSTATE_UINT8_ARRAY(control, PXA2xxFIrState, 3),
- VMSTATE_UINT8_ARRAY(status, PXA2xxFIrState, 2),
- VMSTATE_UINT32(rx_len, PXA2xxFIrState),
- VMSTATE_UINT32(rx_start, PXA2xxFIrState),
- VMSTATE_VALIDATE("fifo is 64 bytes", pxa2xx_fir_vmstate_validate),
- VMSTATE_UINT8_ARRAY(rx_fifo, PXA2xxFIrState, 64),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static Property pxa2xx_fir_properties[] = {
- DEFINE_PROP_CHR("chardev", PXA2xxFIrState, chr),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void pxa2xx_fir_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->realize = pxa2xx_fir_realize;
- dc->vmsd = &pxa2xx_fir_vmsd;
- device_class_set_props(dc, pxa2xx_fir_properties);
- dc->reset = pxa2xx_fir_reset;
-}
-
-static const TypeInfo pxa2xx_fir_info = {
- .name = TYPE_PXA2XX_FIR,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(PXA2xxFIrState),
- .class_init = pxa2xx_fir_class_init,
- .instance_init = pxa2xx_fir_instance_init,
-};
-
-static PXA2xxFIrState *pxa2xx_fir_init(MemoryRegion *sysmem,
- hwaddr base,
- qemu_irq irq, qemu_irq rx_dma,
- qemu_irq tx_dma,
- Chardev *chr)
-{
- DeviceState *dev;
- SysBusDevice *sbd;
-
- dev = qdev_new(TYPE_PXA2XX_FIR);
- qdev_prop_set_chr(dev, "chardev", chr);
- sbd = SYS_BUS_DEVICE(dev);
- sysbus_realize_and_unref(sbd, &error_fatal);
- sysbus_mmio_map(sbd, 0, base);
- sysbus_connect_irq(sbd, 0, irq);
- sysbus_connect_irq(sbd, 1, rx_dma);
- sysbus_connect_irq(sbd, 2, tx_dma);
- return PXA2XX_FIR(dev);
-}
-
-static void pxa2xx_reset(void *opaque, int line, int level)
-{
- PXA2xxState *s = (PXA2xxState *) opaque;
-
- if (level && (s->pm_regs[PCFR >> 2] & 0x10)) { /* GPR_EN */
- cpu_reset(CPU(s->cpu));
- /* TODO: reset peripherals */
- }
-}
-
-/* Initialise a PXA270 integrated chip (ARM based core). */
-PXA2xxState *pxa270_init(unsigned int sdram_size, const char *cpu_type)
-{
- MemoryRegion *address_space = get_system_memory();
- PXA2xxState *s;
- int i;
- DriveInfo *dinfo;
- s = g_new0(PXA2xxState, 1);
-
- if (strncmp(cpu_type, "pxa27", 5)) {
- error_report("Machine requires a PXA27x processor");
- exit(1);
- }
-
- s->cpu = ARM_CPU(cpu_create(cpu_type));
- s->reset = qemu_allocate_irq(pxa2xx_reset, s, 0);
-
- /* SDRAM & Internal Memory Storage */
- memory_region_init_ram(&s->sdram, NULL, "pxa270.sdram", sdram_size,
- &error_fatal);
- memory_region_add_subregion(address_space, PXA2XX_SDRAM_BASE, &s->sdram);
- memory_region_init_ram(&s->internal, NULL, "pxa270.internal", 0x40000,
- &error_fatal);
- memory_region_add_subregion(address_space, PXA2XX_INTERNAL_BASE,
- &s->internal);
-
- s->pic = pxa2xx_pic_init(0x40d00000, s->cpu);
-
- s->dma = pxa27x_dma_init(0x40000000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_DMA));
-
- sysbus_create_varargs("pxa27x-timer", 0x40a00000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 0),
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 1),
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 2),
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 3),
- qdev_get_gpio_in(s->pic, PXA27X_PIC_OST_4_11),
- NULL);
-
- s->gpio = pxa2xx_gpio_init(0x40e00000, s->cpu, s->pic, 121);
-
- s->mmc = pxa2xx_mmci_init(address_space, 0x41100000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_MMC),
- qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_MMCI),
- qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_MMCI));
- dinfo = drive_get(IF_SD, 0, 0);
- if (dinfo) {
- DeviceState *carddev;
-
- /* Create and plug in the sd card */
- carddev = qdev_new(TYPE_SD_CARD);
- qdev_prop_set_drive_err(carddev, "drive",
- blk_by_legacy_dinfo(dinfo), &error_fatal);
- qdev_realize_and_unref(carddev, qdev_get_child_bus(DEVICE(s->mmc),
- "sd-bus"),
- &error_fatal);
- } else if (!qtest_enabled()) {
- warn_report("missing SecureDigital device");
- }
-
- for (i = 0; pxa270_serial[i].io_base; i++) {
- if (serial_hd(i)) {
- serial_mm_init(address_space, pxa270_serial[i].io_base, 2,
- qdev_get_gpio_in(s->pic, pxa270_serial[i].irqn),
- 14857000 / 16, serial_hd(i),
- DEVICE_NATIVE_ENDIAN);
- } else {
- break;
- }
- }
- if (serial_hd(i))
- s->fir = pxa2xx_fir_init(address_space, 0x40800000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_ICP),
- qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_ICP),
- qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_ICP),
- serial_hd(i));
-
- s->lcd = pxa2xx_lcdc_init(address_space, 0x44000000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_LCD));
-
- s->cm_base = 0x41300000;
- s->cm_regs[CCCR >> 2] = 0x02000210; /* 416.0 MHz */
- s->clkcfg = 0x00000009; /* Turbo mode active */
- memory_region_init_io(&s->cm_iomem, NULL, &pxa2xx_cm_ops, s, "pxa2xx-cm", 0x1000);
- memory_region_add_subregion(address_space, s->cm_base, &s->cm_iomem);
- vmstate_register(NULL, 0, &vmstate_pxa2xx_cm, s);
-
- pxa2xx_setup_cp14(s);
-
- s->mm_base = 0x48000000;
- s->mm_regs[MDMRS >> 2] = 0x00020002;
- s->mm_regs[MDREFR >> 2] = 0x03ca4000;
- s->mm_regs[MECR >> 2] = 0x00000001; /* Two PC Card sockets */
- memory_region_init_io(&s->mm_iomem, NULL, &pxa2xx_mm_ops, s, "pxa2xx-mm", 0x1000);
- memory_region_add_subregion(address_space, s->mm_base, &s->mm_iomem);
- vmstate_register(NULL, 0, &vmstate_pxa2xx_mm, s);
-
- s->pm_base = 0x40f00000;
- memory_region_init_io(&s->pm_iomem, NULL, &pxa2xx_pm_ops, s, "pxa2xx-pm", 0x100);
- memory_region_add_subregion(address_space, s->pm_base, &s->pm_iomem);
- vmstate_register(NULL, 0, &vmstate_pxa2xx_pm, s);
-
- for (i = 0; pxa27x_ssp[i].io_base; i ++);
- s->ssp = g_new0(SSIBus *, i);
- for (i = 0; pxa27x_ssp[i].io_base; i ++) {
- DeviceState *dev;
- dev = sysbus_create_simple(TYPE_PXA2XX_SSP, pxa27x_ssp[i].io_base,
- qdev_get_gpio_in(s->pic, pxa27x_ssp[i].irqn));
- s->ssp[i] = (SSIBus *)qdev_get_child_bus(dev, "ssi");
- }
-
- sysbus_create_simple("sysbus-ohci", 0x4c000000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_USBH1));
-
- s->pcmcia[0] = PXA2XX_PCMCIA(sysbus_create_simple(TYPE_PXA2XX_PCMCIA,
- 0x20000000, NULL));
- s->pcmcia[1] = PXA2XX_PCMCIA(sysbus_create_simple(TYPE_PXA2XX_PCMCIA,
- 0x30000000, NULL));
-
- sysbus_create_simple(TYPE_PXA2XX_RTC, 0x40900000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_RTCALARM));
-
- s->i2c[0] = pxa2xx_i2c_init(0x40301600,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2C), 0xffff);
- s->i2c[1] = pxa2xx_i2c_init(0x40f00100,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_PWRI2C), 0xff);
-
- s->i2s = pxa2xx_i2s_init(address_space, 0x40400000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2S),
- qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_I2S),
- qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_I2S));
-
- s->kp = pxa27x_keypad_init(address_space, 0x41500000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_KEYPAD));
-
- /* GPIO1 resets the processor */
- /* The handler can be overridden by board-specific code */
- qdev_connect_gpio_out(s->gpio, 1, s->reset);
- return s;
-}
-
-/* Initialise a PXA255 integrated chip (ARM based core). */
-PXA2xxState *pxa255_init(unsigned int sdram_size)
-{
- MemoryRegion *address_space = get_system_memory();
- PXA2xxState *s;
- int i;
- DriveInfo *dinfo;
-
- s = g_new0(PXA2xxState, 1);
-
- s->cpu = ARM_CPU(cpu_create(ARM_CPU_TYPE_NAME("pxa255")));
- s->reset = qemu_allocate_irq(pxa2xx_reset, s, 0);
-
- /* SDRAM & Internal Memory Storage */
- memory_region_init_ram(&s->sdram, NULL, "pxa255.sdram", sdram_size,
- &error_fatal);
- memory_region_add_subregion(address_space, PXA2XX_SDRAM_BASE, &s->sdram);
- memory_region_init_ram(&s->internal, NULL, "pxa255.internal",
- PXA2XX_INTERNAL_SIZE, &error_fatal);
- memory_region_add_subregion(address_space, PXA2XX_INTERNAL_BASE,
- &s->internal);
-
- s->pic = pxa2xx_pic_init(0x40d00000, s->cpu);
-
- s->dma = pxa255_dma_init(0x40000000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_DMA));
-
- sysbus_create_varargs("pxa25x-timer", 0x40a00000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 0),
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 1),
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 2),
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 3),
- NULL);
-
- s->gpio = pxa2xx_gpio_init(0x40e00000, s->cpu, s->pic, 85);
-
- s->mmc = pxa2xx_mmci_init(address_space, 0x41100000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_MMC),
- qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_MMCI),
- qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_MMCI));
- dinfo = drive_get(IF_SD, 0, 0);
- if (dinfo) {
- DeviceState *carddev;
-
- /* Create and plug in the sd card */
- carddev = qdev_new(TYPE_SD_CARD);
- qdev_prop_set_drive_err(carddev, "drive",
- blk_by_legacy_dinfo(dinfo), &error_fatal);
- qdev_realize_and_unref(carddev, qdev_get_child_bus(DEVICE(s->mmc),
- "sd-bus"),
- &error_fatal);
- } else if (!qtest_enabled()) {
- warn_report("missing SecureDigital device");
- }
-
- for (i = 0; pxa255_serial[i].io_base; i++) {
- if (serial_hd(i)) {
- serial_mm_init(address_space, pxa255_serial[i].io_base, 2,
- qdev_get_gpio_in(s->pic, pxa255_serial[i].irqn),
- 14745600 / 16, serial_hd(i),
- DEVICE_NATIVE_ENDIAN);
- } else {
- break;
- }
- }
- if (serial_hd(i))
- s->fir = pxa2xx_fir_init(address_space, 0x40800000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_ICP),
- qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_ICP),
- qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_ICP),
- serial_hd(i));
-
- s->lcd = pxa2xx_lcdc_init(address_space, 0x44000000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_LCD));
-
- s->cm_base = 0x41300000;
- s->cm_regs[CCCR >> 2] = 0x00000121; /* from datasheet */
- s->cm_regs[CKEN >> 2] = 0x00017def; /* from datasheet */
-
- s->clkcfg = 0x00000009; /* Turbo mode active */
- memory_region_init_io(&s->cm_iomem, NULL, &pxa2xx_cm_ops, s, "pxa2xx-cm", 0x1000);
- memory_region_add_subregion(address_space, s->cm_base, &s->cm_iomem);
- vmstate_register(NULL, 0, &vmstate_pxa2xx_cm, s);
-
- pxa2xx_setup_cp14(s);
-
- s->mm_base = 0x48000000;
- s->mm_regs[MDMRS >> 2] = 0x00020002;
- s->mm_regs[MDREFR >> 2] = 0x03ca4000;
- s->mm_regs[MECR >> 2] = 0x00000001; /* Two PC Card sockets */
- memory_region_init_io(&s->mm_iomem, NULL, &pxa2xx_mm_ops, s, "pxa2xx-mm", 0x1000);
- memory_region_add_subregion(address_space, s->mm_base, &s->mm_iomem);
- vmstate_register(NULL, 0, &vmstate_pxa2xx_mm, s);
-
- s->pm_base = 0x40f00000;
- memory_region_init_io(&s->pm_iomem, NULL, &pxa2xx_pm_ops, s, "pxa2xx-pm", 0x100);
- memory_region_add_subregion(address_space, s->pm_base, &s->pm_iomem);
- vmstate_register(NULL, 0, &vmstate_pxa2xx_pm, s);
-
- for (i = 0; pxa255_ssp[i].io_base; i ++);
- s->ssp = g_new0(SSIBus *, i);
- for (i = 0; pxa255_ssp[i].io_base; i ++) {
- DeviceState *dev;
- dev = sysbus_create_simple(TYPE_PXA2XX_SSP, pxa255_ssp[i].io_base,
- qdev_get_gpio_in(s->pic, pxa255_ssp[i].irqn));
- s->ssp[i] = (SSIBus *)qdev_get_child_bus(dev, "ssi");
- }
-
- s->pcmcia[0] = PXA2XX_PCMCIA(sysbus_create_simple(TYPE_PXA2XX_PCMCIA,
- 0x20000000, NULL));
- s->pcmcia[1] = PXA2XX_PCMCIA(sysbus_create_simple(TYPE_PXA2XX_PCMCIA,
- 0x30000000, NULL));
-
- sysbus_create_simple(TYPE_PXA2XX_RTC, 0x40900000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_RTCALARM));
-
- s->i2c[0] = pxa2xx_i2c_init(0x40301600,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2C), 0xffff);
- s->i2c[1] = pxa2xx_i2c_init(0x40f00100,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_PWRI2C), 0xff);
-
- s->i2s = pxa2xx_i2s_init(address_space, 0x40400000,
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2S),
- qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_I2S),
- qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_I2S));
-
- /* GPIO1 resets the processor */
- /* The handler can be overridden by board-specific code */
- qdev_connect_gpio_out(s->gpio, 1, s->reset);
- return s;
-}
-
-static void pxa2xx_ssp_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->reset = pxa2xx_ssp_reset;
- dc->vmsd = &vmstate_pxa2xx_ssp;
-}
-
-static const TypeInfo pxa2xx_ssp_info = {
- .name = TYPE_PXA2XX_SSP,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(PXA2xxSSPState),
- .instance_init = pxa2xx_ssp_init,
- .class_init = pxa2xx_ssp_class_init,
-};
-
-static void pxa2xx_register_types(void)
-{
- type_register_static(&pxa2xx_i2c_slave_info);
- type_register_static(&pxa2xx_ssp_info);
- type_register_static(&pxa2xx_i2c_info);
- type_register_static(&pxa2xx_rtc_sysbus_info);
- type_register_static(&pxa2xx_fir_info);
-}
-
-type_init(pxa2xx_register_types)
diff --git a/hw/arm/pxa2xx_gpio.c b/hw/arm/pxa2xx_gpio.c
deleted file mode 100644
index 41dca03..0000000
--- a/hw/arm/pxa2xx_gpio.c
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- * Intel XScale PXA255/270 GPIO controller emulation.
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GPL.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "hw/irq.h"
-#include "hw/qdev-properties.h"
-#include "hw/sysbus.h"
-#include "migration/vmstate.h"
-#include "hw/arm/pxa.h"
-#include "qapi/error.h"
-#include "qemu/log.h"
-#include "qemu/module.h"
-#include "qom/object.h"
-
-#define PXA2XX_GPIO_BANKS 4
-
-#define TYPE_PXA2XX_GPIO "pxa2xx-gpio"
-OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxGPIOInfo, PXA2XX_GPIO)
-
-struct PXA2xxGPIOInfo {
- /*< private >*/
- SysBusDevice parent_obj;
- /*< public >*/
-
- MemoryRegion iomem;
- qemu_irq irq0, irq1, irqX;
- int lines;
- ARMCPU *cpu;
-
- /* XXX: GNU C vectors are more suitable */
- uint32_t ilevel[PXA2XX_GPIO_BANKS];
- uint32_t olevel[PXA2XX_GPIO_BANKS];
- uint32_t dir[PXA2XX_GPIO_BANKS];
- uint32_t rising[PXA2XX_GPIO_BANKS];
- uint32_t falling[PXA2XX_GPIO_BANKS];
- uint32_t status[PXA2XX_GPIO_BANKS];
- uint32_t gafr[PXA2XX_GPIO_BANKS * 2];
-
- uint32_t prev_level[PXA2XX_GPIO_BANKS];
- qemu_irq handler[PXA2XX_GPIO_BANKS * 32];
- qemu_irq read_notify;
-};
-
-static struct {
- enum {
- GPIO_NONE,
- GPLR,
- GPSR,
- GPCR,
- GPDR,
- GRER,
- GFER,
- GEDR,
- GAFR_L,
- GAFR_U,
- } reg;
- int bank;
-} pxa2xx_gpio_regs[0x200] = {
- [0 ... 0x1ff] = { GPIO_NONE, 0 },
-#define PXA2XX_REG(reg, a0, a1, a2, a3) \
- [a0] = { reg, 0 }, [a1] = { reg, 1 }, [a2] = { reg, 2 }, [a3] = { reg, 3 },
-
- PXA2XX_REG(GPLR, 0x000, 0x004, 0x008, 0x100)
- PXA2XX_REG(GPSR, 0x018, 0x01c, 0x020, 0x118)
- PXA2XX_REG(GPCR, 0x024, 0x028, 0x02c, 0x124)
- PXA2XX_REG(GPDR, 0x00c, 0x010, 0x014, 0x10c)
- PXA2XX_REG(GRER, 0x030, 0x034, 0x038, 0x130)
- PXA2XX_REG(GFER, 0x03c, 0x040, 0x044, 0x13c)
- PXA2XX_REG(GEDR, 0x048, 0x04c, 0x050, 0x148)
- PXA2XX_REG(GAFR_L, 0x054, 0x05c, 0x064, 0x06c)
- PXA2XX_REG(GAFR_U, 0x058, 0x060, 0x068, 0x070)
-};
-
-static void pxa2xx_gpio_irq_update(PXA2xxGPIOInfo *s)
-{
- if (s->status[0] & (1 << 0))
- qemu_irq_raise(s->irq0);
- else
- qemu_irq_lower(s->irq0);
-
- if (s->status[0] & (1 << 1))
- qemu_irq_raise(s->irq1);
- else
- qemu_irq_lower(s->irq1);
-
- if ((s->status[0] & ~3) | s->status[1] | s->status[2] | s->status[3])
- qemu_irq_raise(s->irqX);
- else
- qemu_irq_lower(s->irqX);
-}
-
-/* Bitmap of pins used as standby and sleep wake-up sources. */
-static const int pxa2xx_gpio_wake[PXA2XX_GPIO_BANKS] = {
- 0x8003fe1b, 0x002001fc, 0xec080000, 0x0012007f,
-};
-
-static void pxa2xx_gpio_set(void *opaque, int line, int level)
-{
- PXA2xxGPIOInfo *s = (PXA2xxGPIOInfo *) opaque;
- CPUState *cpu = CPU(s->cpu);
- int bank;
- uint32_t mask;
-
- if (line >= s->lines) {
- printf("%s: No GPIO pin %i\n", __func__, line);
- return;
- }
-
- bank = line >> 5;
- mask = 1U << (line & 31);
-
- if (level) {
- s->status[bank] |= s->rising[bank] & mask &
- ~s->ilevel[bank] & ~s->dir[bank];
- s->ilevel[bank] |= mask;
- } else {
- s->status[bank] |= s->falling[bank] & mask &
- s->ilevel[bank] & ~s->dir[bank];
- s->ilevel[bank] &= ~mask;
- }
-
- if (s->status[bank] & mask)
- pxa2xx_gpio_irq_update(s);
-
- /* Wake-up GPIOs */
- if (cpu->halted && (mask & ~s->dir[bank] & pxa2xx_gpio_wake[bank])) {
- cpu_interrupt(cpu, CPU_INTERRUPT_EXITTB);
- }
-}
-
-static void pxa2xx_gpio_handler_update(PXA2xxGPIOInfo *s) {
- uint32_t level, diff;
- int i, bit, line;
- for (i = 0; i < PXA2XX_GPIO_BANKS; i ++) {
- level = s->olevel[i] & s->dir[i];
-
- for (diff = s->prev_level[i] ^ level; diff; diff ^= 1 << bit) {
- bit = ctz32(diff);
- line = bit + 32 * i;
- qemu_set_irq(s->handler[line], (level >> bit) & 1);
- }
-
- s->prev_level[i] = level;
- }
-}
-
-static uint64_t pxa2xx_gpio_read(void *opaque, hwaddr offset,
- unsigned size)
-{
- PXA2xxGPIOInfo *s = (PXA2xxGPIOInfo *) opaque;
- uint32_t ret;
- int bank;
- if (offset >= 0x200)
- return 0;
-
- bank = pxa2xx_gpio_regs[offset].bank;
- switch (pxa2xx_gpio_regs[offset].reg) {
- case GPDR: /* GPIO Pin-Direction registers */
- return s->dir[bank];
-
- case GPSR: /* GPIO Pin-Output Set registers */
- qemu_log_mask(LOG_GUEST_ERROR,
- "pxa2xx GPIO: read from write only register GPSR\n");
- return 0;
-
- case GPCR: /* GPIO Pin-Output Clear registers */
- qemu_log_mask(LOG_GUEST_ERROR,
- "pxa2xx GPIO: read from write only register GPCR\n");
- return 0;
-
- case GRER: /* GPIO Rising-Edge Detect Enable registers */
- return s->rising[bank];
-
- case GFER: /* GPIO Falling-Edge Detect Enable registers */
- return s->falling[bank];
-
- case GAFR_L: /* GPIO Alternate Function registers */
- return s->gafr[bank * 2];
-
- case GAFR_U: /* GPIO Alternate Function registers */
- return s->gafr[bank * 2 + 1];
-
- case GPLR: /* GPIO Pin-Level registers */
- ret = (s->olevel[bank] & s->dir[bank]) |
- (s->ilevel[bank] & ~s->dir[bank]);
- qemu_irq_raise(s->read_notify);
- return ret;
-
- case GEDR: /* GPIO Edge Detect Status registers */
- return s->status[bank];
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
- __func__, offset);
- }
-
- return 0;
-}
-
-static void pxa2xx_gpio_write(void *opaque, hwaddr offset,
- uint64_t value, unsigned size)
-{
- PXA2xxGPIOInfo *s = (PXA2xxGPIOInfo *) opaque;
- int bank;
- if (offset >= 0x200)
- return;
-
- bank = pxa2xx_gpio_regs[offset].bank;
- switch (pxa2xx_gpio_regs[offset].reg) {
- case GPDR: /* GPIO Pin-Direction registers */
- s->dir[bank] = value;
- pxa2xx_gpio_handler_update(s);
- break;
-
- case GPSR: /* GPIO Pin-Output Set registers */
- s->olevel[bank] |= value;
- pxa2xx_gpio_handler_update(s);
- break;
-
- case GPCR: /* GPIO Pin-Output Clear registers */
- s->olevel[bank] &= ~value;
- pxa2xx_gpio_handler_update(s);
- break;
-
- case GRER: /* GPIO Rising-Edge Detect Enable registers */
- s->rising[bank] = value;
- break;
-
- case GFER: /* GPIO Falling-Edge Detect Enable registers */
- s->falling[bank] = value;
- break;
-
- case GAFR_L: /* GPIO Alternate Function registers */
- s->gafr[bank * 2] = value;
- break;
-
- case GAFR_U: /* GPIO Alternate Function registers */
- s->gafr[bank * 2 + 1] = value;
- break;
-
- case GEDR: /* GPIO Edge Detect Status registers */
- s->status[bank] &= ~value;
- pxa2xx_gpio_irq_update(s);
- break;
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
- __func__, offset);
- }
-}
-
-static const MemoryRegionOps pxa_gpio_ops = {
- .read = pxa2xx_gpio_read,
- .write = pxa2xx_gpio_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-DeviceState *pxa2xx_gpio_init(hwaddr base,
- ARMCPU *cpu, DeviceState *pic, int lines)
-{
- DeviceState *dev;
-
- dev = qdev_new(TYPE_PXA2XX_GPIO);
- qdev_prop_set_int32(dev, "lines", lines);
- object_property_set_link(OBJECT(dev), "cpu", OBJECT(cpu), &error_abort);
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
-
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
- sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0,
- qdev_get_gpio_in(pic, PXA2XX_PIC_GPIO_0));
- sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1,
- qdev_get_gpio_in(pic, PXA2XX_PIC_GPIO_1));
- sysbus_connect_irq(SYS_BUS_DEVICE(dev), 2,
- qdev_get_gpio_in(pic, PXA2XX_PIC_GPIO_X));
-
- return dev;
-}
-
-static void pxa2xx_gpio_initfn(Object *obj)
-{
- SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- DeviceState *dev = DEVICE(sbd);
- PXA2xxGPIOInfo *s = PXA2XX_GPIO(dev);
-
- memory_region_init_io(&s->iomem, obj, &pxa_gpio_ops,
- s, "pxa2xx-gpio", 0x1000);
- sysbus_init_mmio(sbd, &s->iomem);
- sysbus_init_irq(sbd, &s->irq0);
- sysbus_init_irq(sbd, &s->irq1);
- sysbus_init_irq(sbd, &s->irqX);
-}
-
-static void pxa2xx_gpio_realize(DeviceState *dev, Error **errp)
-{
- PXA2xxGPIOInfo *s = PXA2XX_GPIO(dev);
-
- qdev_init_gpio_in(dev, pxa2xx_gpio_set, s->lines);
- qdev_init_gpio_out(dev, s->handler, s->lines);
-}
-
-/*
- * Registers a callback to notify on GPLR reads. This normally
- * shouldn't be needed but it is used for the hack on Spitz machines.
- */
-void pxa2xx_gpio_read_notifier(DeviceState *dev, qemu_irq handler)
-{
- PXA2xxGPIOInfo *s = PXA2XX_GPIO(dev);
-
- s->read_notify = handler;
-}
-
-static const VMStateDescription vmstate_pxa2xx_gpio_regs = {
- .name = "pxa2xx-gpio",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32_ARRAY(ilevel, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS),
- VMSTATE_UINT32_ARRAY(olevel, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS),
- VMSTATE_UINT32_ARRAY(dir, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS),
- VMSTATE_UINT32_ARRAY(rising, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS),
- VMSTATE_UINT32_ARRAY(falling, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS),
- VMSTATE_UINT32_ARRAY(status, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS),
- VMSTATE_UINT32_ARRAY(gafr, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS * 2),
- VMSTATE_UINT32_ARRAY(prev_level, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS),
- VMSTATE_END_OF_LIST(),
- },
-};
-
-static Property pxa2xx_gpio_properties[] = {
- DEFINE_PROP_INT32("lines", PXA2xxGPIOInfo, lines, 0),
- DEFINE_PROP_LINK("cpu", PXA2xxGPIOInfo, cpu, TYPE_ARM_CPU, ARMCPU *),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void pxa2xx_gpio_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->desc = "PXA2xx GPIO controller";
- device_class_set_props(dc, pxa2xx_gpio_properties);
- dc->vmsd = &vmstate_pxa2xx_gpio_regs;
- dc->realize = pxa2xx_gpio_realize;
-}
-
-static const TypeInfo pxa2xx_gpio_info = {
- .name = TYPE_PXA2XX_GPIO,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(PXA2xxGPIOInfo),
- .instance_init = pxa2xx_gpio_initfn,
- .class_init = pxa2xx_gpio_class_init,
-};
-
-static void pxa2xx_gpio_register_types(void)
-{
- type_register_static(&pxa2xx_gpio_info);
-}
-
-type_init(pxa2xx_gpio_register_types)
diff --git a/hw/arm/pxa2xx_pic.c b/hw/arm/pxa2xx_pic.c
deleted file mode 100644
index 34c5555..0000000
--- a/hw/arm/pxa2xx_pic.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Intel XScale PXA Programmable Interrupt Controller.
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Copyright (c) 2006 Thorsten Zitterell
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GPL.
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "qemu/module.h"
-#include "qemu/log.h"
-#include "cpu.h"
-#include "hw/arm/pxa.h"
-#include "hw/sysbus.h"
-#include "hw/qdev-properties.h"
-#include "migration/vmstate.h"
-#include "qom/object.h"
-#include "target/arm/cpregs.h"
-
-#define ICIP 0x00 /* Interrupt Controller IRQ Pending register */
-#define ICMR 0x04 /* Interrupt Controller Mask register */
-#define ICLR 0x08 /* Interrupt Controller Level register */
-#define ICFP 0x0c /* Interrupt Controller FIQ Pending register */
-#define ICPR 0x10 /* Interrupt Controller Pending register */
-#define ICCR 0x14 /* Interrupt Controller Control register */
-#define ICHP 0x18 /* Interrupt Controller Highest Priority register */
-#define IPR0 0x1c /* Interrupt Controller Priority register 0 */
-#define IPR31 0x98 /* Interrupt Controller Priority register 31 */
-#define ICIP2 0x9c /* Interrupt Controller IRQ Pending register 2 */
-#define ICMR2 0xa0 /* Interrupt Controller Mask register 2 */
-#define ICLR2 0xa4 /* Interrupt Controller Level register 2 */
-#define ICFP2 0xa8 /* Interrupt Controller FIQ Pending register 2 */
-#define ICPR2 0xac /* Interrupt Controller Pending register 2 */
-#define IPR32 0xb0 /* Interrupt Controller Priority register 32 */
-#define IPR39 0xcc /* Interrupt Controller Priority register 39 */
-
-#define PXA2XX_PIC_SRCS 40
-
-#define TYPE_PXA2XX_PIC "pxa2xx_pic"
-OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxPICState, PXA2XX_PIC)
-
-struct PXA2xxPICState {
- /*< private >*/
- SysBusDevice parent_obj;
- /*< public >*/
-
- MemoryRegion iomem;
- ARMCPU *cpu;
- uint32_t int_enabled[2];
- uint32_t int_pending[2];
- uint32_t is_fiq[2];
- uint32_t int_idle;
- uint32_t priority[PXA2XX_PIC_SRCS];
-};
-
-static void pxa2xx_pic_update(void *opaque)
-{
- uint32_t mask[2];
- PXA2xxPICState *s = (PXA2xxPICState *) opaque;
- CPUState *cpu = CPU(s->cpu);
-
- if (cpu->halted) {
- mask[0] = s->int_pending[0] & (s->int_enabled[0] | s->int_idle);
- mask[1] = s->int_pending[1] & (s->int_enabled[1] | s->int_idle);
- if (mask[0] || mask[1]) {
- cpu_interrupt(cpu, CPU_INTERRUPT_EXITTB);
- }
- }
-
- mask[0] = s->int_pending[0] & s->int_enabled[0];
- mask[1] = s->int_pending[1] & s->int_enabled[1];
-
- if ((mask[0] & s->is_fiq[0]) || (mask[1] & s->is_fiq[1])) {
- cpu_interrupt(cpu, CPU_INTERRUPT_FIQ);
- } else {
- cpu_reset_interrupt(cpu, CPU_INTERRUPT_FIQ);
- }
-
- if ((mask[0] & ~s->is_fiq[0]) || (mask[1] & ~s->is_fiq[1])) {
- cpu_interrupt(cpu, CPU_INTERRUPT_HARD);
- } else {
- cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
- }
-}
-
-/* Note: Here level means state of the signal on a pin, not
- * IRQ/FIQ distinction as in PXA Developer Manual. */
-static void pxa2xx_pic_set_irq(void *opaque, int irq, int level)
-{
- PXA2xxPICState *s = (PXA2xxPICState *) opaque;
- int int_set = (irq >= 32);
- irq &= 31;
-
- if (level)
- s->int_pending[int_set] |= 1 << irq;
- else
- s->int_pending[int_set] &= ~(1 << irq);
-
- pxa2xx_pic_update(opaque);
-}
-
-static inline uint32_t pxa2xx_pic_highest(PXA2xxPICState *s) {
- int i, int_set, irq;
- uint32_t bit, mask[2];
- uint32_t ichp = 0x003f003f; /* Both IDs invalid */
-
- mask[0] = s->int_pending[0] & s->int_enabled[0];
- mask[1] = s->int_pending[1] & s->int_enabled[1];
-
- for (i = PXA2XX_PIC_SRCS - 1; i >= 0; i --) {
- irq = s->priority[i] & 0x3f;
- if ((s->priority[i] & (1U << 31)) && irq < PXA2XX_PIC_SRCS) {
- /* Source peripheral ID is valid. */
- bit = 1 << (irq & 31);
- int_set = (irq >= 32);
-
- if (mask[int_set] & bit & s->is_fiq[int_set]) {
- /* FIQ asserted */
- ichp &= 0xffff0000;
- ichp |= (1 << 15) | irq;
- }
-
- if (mask[int_set] & bit & ~s->is_fiq[int_set]) {
- /* IRQ asserted */
- ichp &= 0x0000ffff;
- ichp |= (1U << 31) | (irq << 16);
- }
- }
- }
-
- return ichp;
-}
-
-static uint64_t pxa2xx_pic_mem_read(void *opaque, hwaddr offset,
- unsigned size)
-{
- PXA2xxPICState *s = (PXA2xxPICState *) opaque;
-
- switch (offset) {
- case ICIP: /* IRQ Pending register */
- return s->int_pending[0] & ~s->is_fiq[0] & s->int_enabled[0];
- case ICIP2: /* IRQ Pending register 2 */
- return s->int_pending[1] & ~s->is_fiq[1] & s->int_enabled[1];
- case ICMR: /* Mask register */
- return s->int_enabled[0];
- case ICMR2: /* Mask register 2 */
- return s->int_enabled[1];
- case ICLR: /* Level register */
- return s->is_fiq[0];
- case ICLR2: /* Level register 2 */
- return s->is_fiq[1];
- case ICCR: /* Idle mask */
- return (s->int_idle == 0);
- case ICFP: /* FIQ Pending register */
- return s->int_pending[0] & s->is_fiq[0] & s->int_enabled[0];
- case ICFP2: /* FIQ Pending register 2 */
- return s->int_pending[1] & s->is_fiq[1] & s->int_enabled[1];
- case ICPR: /* Pending register */
- return s->int_pending[0];
- case ICPR2: /* Pending register 2 */
- return s->int_pending[1];
- case IPR0 ... IPR31:
- return s->priority[0 + ((offset - IPR0 ) >> 2)];
- case IPR32 ... IPR39:
- return s->priority[32 + ((offset - IPR32) >> 2)];
- case ICHP: /* Highest Priority register */
- return pxa2xx_pic_highest(s);
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "pxa2xx_pic_mem_read: bad register offset 0x%" HWADDR_PRIx
- "\n", offset);
- return 0;
- }
-}
-
-static void pxa2xx_pic_mem_write(void *opaque, hwaddr offset,
- uint64_t value, unsigned size)
-{
- PXA2xxPICState *s = (PXA2xxPICState *) opaque;
-
- switch (offset) {
- case ICMR: /* Mask register */
- s->int_enabled[0] = value;
- break;
- case ICMR2: /* Mask register 2 */
- s->int_enabled[1] = value;
- break;
- case ICLR: /* Level register */
- s->is_fiq[0] = value;
- break;
- case ICLR2: /* Level register 2 */
- s->is_fiq[1] = value;
- break;
- case ICCR: /* Idle mask */
- s->int_idle = (value & 1) ? 0 : ~0;
- break;
- case IPR0 ... IPR31:
- s->priority[0 + ((offset - IPR0 ) >> 2)] = value & 0x8000003f;
- break;
- case IPR32 ... IPR39:
- s->priority[32 + ((offset - IPR32) >> 2)] = value & 0x8000003f;
- break;
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "pxa2xx_pic_mem_write: bad register offset 0x%"
- HWADDR_PRIx "\n", offset);
- return;
- }
- pxa2xx_pic_update(opaque);
-}
-
-/* Interrupt Controller Coprocessor Space Register Mapping */
-static const int pxa2xx_cp_reg_map[0x10] = {
- [0x0 ... 0xf] = -1,
- [0x0] = ICIP,
- [0x1] = ICMR,
- [0x2] = ICLR,
- [0x3] = ICFP,
- [0x4] = ICPR,
- [0x5] = ICHP,
- [0x6] = ICIP2,
- [0x7] = ICMR2,
- [0x8] = ICLR2,
- [0x9] = ICFP2,
- [0xa] = ICPR2,
-};
-
-static uint64_t pxa2xx_pic_cp_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- int offset = pxa2xx_cp_reg_map[ri->crn];
- return pxa2xx_pic_mem_read(ri->opaque, offset, 4);
-}
-
-static void pxa2xx_pic_cp_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- int offset = pxa2xx_cp_reg_map[ri->crn];
- pxa2xx_pic_mem_write(ri->opaque, offset, value, 4);
-}
-
-#define REGINFO_FOR_PIC_CP(NAME, CRN) \
- { .name = NAME, .cp = 6, .crn = CRN, .crm = 0, .opc1 = 0, .opc2 = 0, \
- .access = PL1_RW, .type = ARM_CP_IO, \
- .readfn = pxa2xx_pic_cp_read, .writefn = pxa2xx_pic_cp_write }
-
-static const ARMCPRegInfo pxa_pic_cp_reginfo[] = {
- REGINFO_FOR_PIC_CP("ICIP", 0),
- REGINFO_FOR_PIC_CP("ICMR", 1),
- REGINFO_FOR_PIC_CP("ICLR", 2),
- REGINFO_FOR_PIC_CP("ICFP", 3),
- REGINFO_FOR_PIC_CP("ICPR", 4),
- REGINFO_FOR_PIC_CP("ICHP", 5),
- REGINFO_FOR_PIC_CP("ICIP2", 6),
- REGINFO_FOR_PIC_CP("ICMR2", 7),
- REGINFO_FOR_PIC_CP("ICLR2", 8),
- REGINFO_FOR_PIC_CP("ICFP2", 9),
- REGINFO_FOR_PIC_CP("ICPR2", 0xa),
-};
-
-static const MemoryRegionOps pxa2xx_pic_ops = {
- .read = pxa2xx_pic_mem_read,
- .write = pxa2xx_pic_mem_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static int pxa2xx_pic_post_load(void *opaque, int version_id)
-{
- pxa2xx_pic_update(opaque);
- return 0;
-}
-
-static void pxa2xx_pic_reset_hold(Object *obj, ResetType type)
-{
- PXA2xxPICState *s = PXA2XX_PIC(obj);
-
- s->int_pending[0] = 0;
- s->int_pending[1] = 0;
- s->int_enabled[0] = 0;
- s->int_enabled[1] = 0;
- s->is_fiq[0] = 0;
- s->is_fiq[1] = 0;
-}
-
-DeviceState *pxa2xx_pic_init(hwaddr base, ARMCPU *cpu)
-{
- DeviceState *dev = qdev_new(TYPE_PXA2XX_PIC);
-
- object_property_set_link(OBJECT(dev), "arm-cpu",
- OBJECT(cpu), &error_abort);
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
-
- return dev;
-}
-
-static void pxa2xx_pic_realize(DeviceState *dev, Error **errp)
-{
- PXA2xxPICState *s = PXA2XX_PIC(dev);
-
- qdev_init_gpio_in(dev, pxa2xx_pic_set_irq, PXA2XX_PIC_SRCS);
-
- /* Enable IC memory-mapped registers access. */
- memory_region_init_io(&s->iomem, OBJECT(s), &pxa2xx_pic_ops, s,
- "pxa2xx-pic", 0x00100000);
- sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
-
- /* Enable IC coprocessor access. */
- define_arm_cp_regs_with_opaque(s->cpu, pxa_pic_cp_reginfo, s);
-}
-
-static const VMStateDescription vmstate_pxa2xx_pic_regs = {
- .name = "pxa2xx_pic",
- .version_id = 0,
- .minimum_version_id = 0,
- .post_load = pxa2xx_pic_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32_ARRAY(int_enabled, PXA2xxPICState, 2),
- VMSTATE_UINT32_ARRAY(int_pending, PXA2xxPICState, 2),
- VMSTATE_UINT32_ARRAY(is_fiq, PXA2xxPICState, 2),
- VMSTATE_UINT32(int_idle, PXA2xxPICState),
- VMSTATE_UINT32_ARRAY(priority, PXA2xxPICState, PXA2XX_PIC_SRCS),
- VMSTATE_END_OF_LIST(),
- },
-};
-
-static Property pxa2xx_pic_properties[] = {
- DEFINE_PROP_LINK("arm-cpu", PXA2xxPICState, cpu,
- TYPE_ARM_CPU, ARMCPU *),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void pxa2xx_pic_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- ResettableClass *rc = RESETTABLE_CLASS(klass);
-
- device_class_set_props(dc, pxa2xx_pic_properties);
- dc->realize = pxa2xx_pic_realize;
- dc->desc = "PXA2xx PIC";
- dc->vmsd = &vmstate_pxa2xx_pic_regs;
- rc->phases.hold = pxa2xx_pic_reset_hold;
-}
-
-static const TypeInfo pxa2xx_pic_info = {
- .name = TYPE_PXA2XX_PIC,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(PXA2xxPICState),
- .class_init = pxa2xx_pic_class_init,
-};
-
-static void pxa2xx_pic_register_types(void)
-{
- type_register_static(&pxa2xx_pic_info);
-}
-
-type_init(pxa2xx_pic_register_types)
diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c
index a7a662f..9d9af63 100644
--- a/hw/arm/raspi.c
+++ b/hw/arm/raspi.c
@@ -337,48 +337,53 @@ static void raspi_machine_class_init(MachineClass *mc,
mc->init = raspi_machine_init;
};
-static void raspi0_machine_class_init(ObjectClass *oc, void *data)
+static void raspi0_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc);
+ mc->auto_create_sdcard = true;
rmc->board_rev = 0x920092; /* Revision 1.2 */
raspi_machine_class_init(mc, rmc->board_rev);
};
-static void raspi1ap_machine_class_init(ObjectClass *oc, void *data)
+static void raspi1ap_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc);
+ mc->auto_create_sdcard = true;
rmc->board_rev = 0x900021; /* Revision 1.1 */
raspi_machine_class_init(mc, rmc->board_rev);
};
-static void raspi2b_machine_class_init(ObjectClass *oc, void *data)
+static void raspi2b_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc);
+ mc->auto_create_sdcard = true;
rmc->board_rev = 0xa21041;
raspi_machine_class_init(mc, rmc->board_rev);
};
#ifdef TARGET_AARCH64
-static void raspi3ap_machine_class_init(ObjectClass *oc, void *data)
+static void raspi3ap_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc);
+ mc->auto_create_sdcard = true;
rmc->board_rev = 0x9020e0; /* Revision 1.0 */
raspi_machine_class_init(mc, rmc->board_rev);
};
-static void raspi3b_machine_class_init(ObjectClass *oc, void *data)
+static void raspi3b_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc);
+ mc->auto_create_sdcard = true;
rmc->board_rev = 0xa02082;
raspi_machine_class_init(mc, rmc->board_rev);
};
diff --git a/hw/arm/raspi4b.c b/hw/arm/raspi4b.c
index 8587788..20082d5 100644
--- a/hw/arm/raspi4b.c
+++ b/hw/arm/raspi4b.c
@@ -15,7 +15,7 @@
#include "hw/display/bcm2835_fb.h"
#include "hw/registerfields.h"
#include "qemu/error-report.h"
-#include "sysemu/device_tree.h"
+#include "system/device_tree.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "hw/arm/boot.h"
@@ -107,7 +107,7 @@ static void raspi4b_machine_init(MachineState *machine)
raspi_base_machine_init(machine, &soc->parent_obj);
}
-static void raspi4b_machine_class_init(ObjectClass *oc, void *data)
+static void raspi4b_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc);
@@ -118,6 +118,7 @@ static void raspi4b_machine_class_init(ObjectClass *oc, void *data)
rmc->board_rev = 0xb03115; /* Revision 1.5, 2 Gb RAM */
#endif
raspi_machine_class_common_init(mc, rmc->board_rev);
+ mc->auto_create_sdcard = true;
mc->init = raspi4b_machine_init;
}
diff --git a/hw/arm/realview.c b/hw/arm/realview.c
index b186f96..5c90504 100644
--- a/hw/arm/realview.c
+++ b/hw/arm/realview.c
@@ -19,7 +19,7 @@
#include "hw/pci/pci.h"
#include "hw/qdev-core.h"
#include "net/net.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/boards.h"
#include "hw/i2c/i2c.h"
#include "qemu/error-report.h"
@@ -35,6 +35,8 @@
#define SMP_BOOT_ADDR 0xe0000000
#define SMP_BOOTREG_ADDR 0x10000030
+#define GIC_EXT_IRQS 64 /* Realview PBX-A9 development board */
+
/* Board init. */
static struct arm_boot_info realview_binfo = {
@@ -185,7 +187,12 @@ static void realview_init(MachineState *machine,
sysbus_mmio_map(SYS_BUS_DEVICE(sysctl), 0, 0x10000000);
if (is_mpcore) {
- dev = qdev_new(is_pb ? TYPE_A9MPCORE_PRIV : "realview_mpcore");
+ if (is_pb) {
+ dev = qdev_new(TYPE_A9MPCORE_PRIV);
+ qdev_prop_set_uint32(dev, "num-irq", GIC_EXT_IRQS + GIC_INTERNAL);
+ } else {
+ dev = qdev_new("realview_mpcore");
+ }
qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
busdev = SYS_BUS_DEVICE(dev);
sysbus_realize_and_unref(busdev, &error_fatal);
@@ -201,7 +208,7 @@ static void realview_init(MachineState *machine,
/* For now just create the nIRQ GIC, and ignore the others. */
dev = sysbus_create_simple(TYPE_REALVIEW_GIC, gic_addr, cpu_irq[0]);
}
- for (n = 0; n < 64; n++) {
+ for (n = 0; n < GIC_EXT_IRQS; n++) {
pic[n] = qdev_get_gpio_in(dev, n);
}
@@ -406,7 +413,7 @@ static void realview_pbx_a9_init(MachineState *machine)
realview_init(machine, BOARD_PBX_A9);
}
-static void realview_eb_class_init(ObjectClass *oc, void *data)
+static void realview_eb_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -415,6 +422,7 @@ static void realview_eb_class_init(ObjectClass *oc, void *data)
mc->block_default_type = IF_SCSI;
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926");
+ mc->auto_create_sdcard = true;
machine_add_audiodev_property(mc);
}
@@ -425,7 +433,7 @@ static const TypeInfo realview_eb_type = {
.class_init = realview_eb_class_init,
};
-static void realview_eb_mpcore_class_init(ObjectClass *oc, void *data)
+static void realview_eb_mpcore_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -435,6 +443,7 @@ static void realview_eb_mpcore_class_init(ObjectClass *oc, void *data)
mc->max_cpus = 4;
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm11mpcore");
+ mc->auto_create_sdcard = true;
machine_add_audiodev_property(mc);
}
@@ -445,7 +454,7 @@ static const TypeInfo realview_eb_mpcore_type = {
.class_init = realview_eb_mpcore_class_init,
};
-static void realview_pb_a8_class_init(ObjectClass *oc, void *data)
+static void realview_pb_a8_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -453,6 +462,7 @@ static void realview_pb_a8_class_init(ObjectClass *oc, void *data)
mc->init = realview_pb_a8_init;
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a8");
+ mc->auto_create_sdcard = true;
machine_add_audiodev_property(mc);
}
@@ -463,7 +473,7 @@ static const TypeInfo realview_pb_a8_type = {
.class_init = realview_pb_a8_class_init,
};
-static void realview_pbx_a9_class_init(ObjectClass *oc, void *data)
+static void realview_pbx_a9_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -472,6 +482,7 @@ static void realview_pbx_a9_class_init(ObjectClass *oc, void *data)
mc->max_cpus = 4;
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a9");
+ mc->auto_create_sdcard = true;
machine_add_audiodev_property(mc);
}
diff --git a/hw/arm/sabrelite.c b/hw/arm/sabrelite.c
index 56f184b..df60d47 100644
--- a/hw/arm/sabrelite.c
+++ b/hw/arm/sabrelite.c
@@ -17,7 +17,7 @@
#include "hw/boards.h"
#include "hw/qdev-properties.h"
#include "qemu/error-report.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
static struct arm_boot_info sabrelite_binfo = {
/* DDR memory start */
@@ -110,6 +110,7 @@ static void sabrelite_machine_init(MachineClass *mc)
mc->max_cpus = FSL_IMX6_NUM_CPUS;
mc->ignore_memory_transaction_failures = true;
mc->default_ram_id = "sabrelite.ram";
+ mc->auto_create_sdcard = true;
}
DEFINE_MACHINE("sabrelite", sabrelite_machine_init)
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
index ae37a92..deae5cf 100644
--- a/hw/arm/sbsa-ref.c
+++ b/hw/arm/sbsa-ref.c
@@ -23,11 +23,11 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/units.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/kvm.h"
-#include "sysemu/numa.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/device_tree.h"
+#include "system/kvm.h"
+#include "system/numa.h"
+#include "system/runstate.h"
+#include "system/system.h"
#include "exec/hwaddr.h"
#include "kvm_arm.h"
#include "hw/arm/boot.h"
@@ -48,7 +48,7 @@
#include "hw/char/pl011.h"
#include "hw/watchdog/sbsa_gwdt.h"
#include "net/net.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qlist.h"
#include "qom/object.h"
#include "target/arm/cpu-qom.h"
#include "target/arm/gtimer.h"
@@ -164,23 +164,20 @@ static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx)
static void sbsa_fdt_add_gic_node(SBSAMachineState *sms)
{
- char *nodename;
+ const char *intc_nodename = "/intc";
+ const char *its_nodename = "/intc/its";
- nodename = g_strdup_printf("/intc");
- qemu_fdt_add_subnode(sms->fdt, nodename);
- qemu_fdt_setprop_sized_cells(sms->fdt, nodename, "reg",
+ qemu_fdt_add_subnode(sms->fdt, intc_nodename);
+ qemu_fdt_setprop_sized_cells(sms->fdt, intc_nodename, "reg",
2, sbsa_ref_memmap[SBSA_GIC_DIST].base,
2, sbsa_ref_memmap[SBSA_GIC_DIST].size,
2, sbsa_ref_memmap[SBSA_GIC_REDIST].base,
2, sbsa_ref_memmap[SBSA_GIC_REDIST].size);
- nodename = g_strdup_printf("/intc/its");
- qemu_fdt_add_subnode(sms->fdt, nodename);
- qemu_fdt_setprop_sized_cells(sms->fdt, nodename, "reg",
+ qemu_fdt_add_subnode(sms->fdt, its_nodename);
+ qemu_fdt_setprop_sized_cells(sms->fdt, its_nodename, "reg",
2, sbsa_ref_memmap[SBSA_GIC_ITS].base,
2, sbsa_ref_memmap[SBSA_GIC_ITS].size);
-
- g_free(nodename);
}
/*
@@ -487,6 +484,8 @@ static void create_gic(SBSAMachineState *sms, MemoryRegion *mem)
[GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ,
[GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ,
[GTIMER_HYPVIRT] = ARCH_TIMER_NS_EL2_VIRT_IRQ,
+ [GTIMER_S_EL2_PHYS] = ARCH_TIMER_S_EL2_IRQ,
+ [GTIMER_S_EL2_VIRT] = ARCH_TIMER_S_EL2_VIRT_IRQ,
};
for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) {
@@ -621,6 +620,7 @@ static void create_smmu(const SBSAMachineState *sms, PCIBus *bus)
dev = qdev_new(TYPE_ARM_SMMUV3);
+ object_property_set_str(OBJECT(dev), "stage", "nested", &error_abort);
object_property_set_link(OBJECT(dev), "primary-bus", OBJECT(bus),
&error_abort);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
@@ -675,7 +675,7 @@ static void create_pcie(SBSAMachineState *sms)
/* Map IO port space */
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio);
- for (i = 0; i < GPEX_NUM_IRQS; i++) {
+ for (i = 0; i < PCI_NUM_PINS; i++) {
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i,
qdev_get_gpio_in(sms->gic, irq + i));
gpex_set_irq_num(GPEX_HOST(dev), i, irq + i);
@@ -880,7 +880,7 @@ static void sbsa_ref_instance_init(Object *obj)
sbsa_flash_create(sms);
}
-static void sbsa_ref_class_init(ObjectClass *oc, void *data)
+static void sbsa_ref_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
static const char * const valid_cpu_types[] = {
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
index d73ad62..f39b99e 100644
--- a/hw/arm/smmu-common.c
+++ b/hw/arm/smmu-common.c
@@ -225,6 +225,27 @@ static gboolean smmu_hash_remove_by_vmid_ipa(gpointer key, gpointer value,
((entry->iova & ~info->mask) == info->iova);
}
+static gboolean
+smmu_hash_remove_by_sid_range(gpointer key, gpointer value, gpointer user_data)
+{
+ SMMUDevice *sdev = (SMMUDevice *)key;
+ uint32_t sid = smmu_get_sid(sdev);
+ SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data;
+
+ if (sid < sid_range->start || sid > sid_range->end) {
+ return false;
+ }
+ trace_smmu_config_cache_inv(sid);
+ return true;
+}
+
+void smmu_configs_inv_sid_range(SMMUState *s, SMMUSIDRange sid_range)
+{
+ trace_smmu_configs_inv_sid_range(sid_range.start, sid_range.end);
+ g_hash_table_foreach_remove(s->configs, smmu_hash_remove_by_sid_range,
+ &sid_range);
+}
+
void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova,
uint8_t tg, uint64_t num_pages, uint8_t ttl)
{
@@ -674,7 +695,7 @@ error:
/*
* combine S1 and S2 TLB entries into a single entry.
- * As a result the S1 entry is overriden with combined data.
+ * As a result the S1 entry is overridden with combined data.
*/
static void combine_tlb(SMMUTLBEntry *tlbe, SMMUTLBEntry *tlbe_s2,
dma_addr_t iova, SMMUTransCfg *cfg)
@@ -691,7 +712,6 @@ static void combine_tlb(SMMUTLBEntry *tlbe, SMMUTLBEntry *tlbe_s2,
tlbe->entry.iova = iova & ~tlbe->entry.addr_mask;
/* parent_perm has s2 perm while perm keeps s1 perm. */
tlbe->parent_perm = tlbe_s2->entry.perm;
- return;
}
/**
@@ -924,7 +944,12 @@ static void smmu_base_realize(DeviceState *dev, Error **errp)
}
}
-static void smmu_base_reset_hold(Object *obj, ResetType type)
+/*
+ * Make sure the IOMMU is reset in 'exit' phase after
+ * all outstanding DMA requests have been quiesced during
+ * the 'enter' or 'hold' reset phases
+ */
+static void smmu_base_reset_exit(Object *obj, ResetType type)
{
SMMUState *s = ARM_SMMU(obj);
@@ -934,14 +959,13 @@ static void smmu_base_reset_hold(Object *obj, ResetType type)
g_hash_table_remove_all(s->iotlb);
}
-static Property smmu_dev_properties[] = {
+static const Property smmu_dev_properties[] = {
DEFINE_PROP_UINT8("bus_num", SMMUState, bus_num, 0),
DEFINE_PROP_LINK("primary-bus", SMMUState, primary_bus,
TYPE_PCI_BUS, PCIBus *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void smmu_base_class_init(ObjectClass *klass, void *data)
+static void smmu_base_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -950,7 +974,7 @@ static void smmu_base_class_init(ObjectClass *klass, void *data)
device_class_set_props(dc, smmu_dev_properties);
device_class_set_parent_realize(dc, smmu_base_realize,
&sbc->parent_realize);
- rc->phases.hold = smmu_base_reset_hold;
+ rc->phases.exit = smmu_base_reset_exit;
}
static const TypeInfo smmu_base_info = {
diff --git a/hw/arm/smmu-internal.h b/hw/arm/smmu-internal.h
index 843bebb..d143d29 100644
--- a/hw/arm/smmu-internal.h
+++ b/hw/arm/smmu-internal.h
@@ -141,9 +141,4 @@ typedef struct SMMUIOTLBPageInvInfo {
uint64_t mask;
} SMMUIOTLBPageInvInfo;
-typedef struct SMMUSIDRange {
- uint32_t start;
- uint32_t end;
-} SMMUSIDRange;
-
#endif
diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
index 0ebf2ee..b6b7399 100644
--- a/hw/arm/smmuv3-internal.h
+++ b/hw/arm/smmuv3-internal.h
@@ -599,7 +599,8 @@ static inline int oas2bits(int oas_field)
case 5:
return 48;
}
- return -1;
+
+ g_assert_not_reached();
}
/* CD fields */
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 3971976..ab67972 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -25,6 +25,7 @@
#include "hw/qdev-core.h"
#include "hw/pci/pci.h"
#include "cpu.h"
+#include "exec/target_page.h"
#include "trace.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
@@ -377,7 +378,7 @@ static int smmu_get_cd(SMMUv3State *s, STE *ste, SMMUTransCfg *cfg,
qemu_log_mask(LOG_GUEST_ERROR,
"Cannot fetch pte at address=0x%"PRIx64"\n", addr);
event->type = SMMU_EVT_F_CD_FETCH;
- event->u.f_ste_fetch.addr = addr;
+ event->u.f_cd_fetch.addr = addr;
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(buf->word); i++) {
@@ -903,7 +904,7 @@ static void smmuv3_flush_config(SMMUDevice *sdev)
SMMUv3State *s = sdev->smmu;
SMMUState *bc = &s->smmu_state;
- trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
+ trace_smmu_config_cache_inv(smmu_get_sid(sdev));
g_hash_table_remove(bc->configs, sdev);
}
@@ -1277,20 +1278,6 @@ static void smmuv3_range_inval(SMMUState *s, Cmd *cmd, SMMUStage stage)
}
}
-static gboolean
-smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data)
-{
- SMMUDevice *sdev = (SMMUDevice *)key;
- uint32_t sid = smmu_get_sid(sdev);
- SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data;
-
- if (sid < sid_range->start || sid > sid_range->end) {
- return false;
- }
- trace_smmuv3_config_cache_inv(sid);
- return true;
-}
-
static int smmuv3_cmdq_consume(SMMUv3State *s)
{
SMMUState *bs = ARM_SMMU(s);
@@ -1373,8 +1360,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
sid_range.end = sid_range.start + mask;
trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end);
- g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste,
- &sid_range);
+ smmu_configs_inv_sid_range(bs, sid_range);
break;
}
case SMMU_CMD_CFGI_CD:
@@ -1870,13 +1856,19 @@ static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
}
}
-static void smmu_reset_hold(Object *obj, ResetType type)
+/*
+ * Make sure the IOMMU is reset in 'exit' phase after
+ * all outstanding DMA requests have been quiesced during
+ * the 'enter' or 'hold' reset phases
+ */
+static void smmu_reset_exit(Object *obj, ResetType type)
{
SMMUv3State *s = ARM_SMMUV3(obj);
SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
- if (c->parent_phases.hold) {
- c->parent_phases.hold(obj, type);
+ trace_smmu_reset_exit();
+ if (c->parent_phases.exit) {
+ c->parent_phases.exit(obj, type);
}
smmuv3_init_regs(s);
@@ -1976,15 +1968,15 @@ static const VMStateDescription vmstate_smmuv3 = {
}
};
-static Property smmuv3_properties[] = {
+static const Property smmuv3_properties[] = {
/*
* Stages of translation advertised.
* "1": Stage 1
* "2": Stage 2
+ * "nested": Both stage 1 and stage 2
* Defaults to stage 1
*/
DEFINE_PROP_STRING("stage", SMMUv3State, stage),
- DEFINE_PROP_END_OF_LIST()
};
static void smmuv3_instance_init(Object *obj)
@@ -1992,14 +1984,14 @@ static void smmuv3_instance_init(Object *obj)
/* Nothing much to do here as of now */
}
-static void smmuv3_class_init(ObjectClass *klass, void *data)
+static void smmuv3_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
dc->vmsd = &vmstate_smmuv3;
- resettable_class_set_parent_phases(rc, NULL, smmu_reset_hold, NULL,
+ resettable_class_set_parent_phases(rc, NULL, NULL, smmu_reset_exit,
&c->parent_phases);
device_class_set_parent_realize(dc, smmu_realize,
&c->parent_realize);
@@ -2039,7 +2031,7 @@ static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
}
static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
- void *data)
+ const void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
@@ -2064,8 +2056,8 @@ static const TypeInfo smmuv3_iommu_memory_region_info = {
static void smmuv3_register_types(void)
{
- type_register(&smmuv3_type_info);
- type_register(&smmuv3_iommu_memory_region_info);
+ type_register_static(&smmuv3_type_info);
+ type_register_static(&smmuv3_iommu_memory_region_info);
}
type_init(smmuv3_register_types)
diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c
deleted file mode 100644
index 62cd55b..0000000
--- a/hw/arm/spitz.c
+++ /dev/null
@@ -1,1284 +0,0 @@
-/*
- * PXA270-based Clamshell PDA platforms.
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GNU GPL v2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "hw/arm/pxa.h"
-#include "hw/arm/boot.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
-#include "hw/pcmcia.h"
-#include "hw/qdev-properties.h"
-#include "hw/i2c/i2c.h"
-#include "hw/irq.h"
-#include "hw/ssi/ssi.h"
-#include "hw/block/flash.h"
-#include "qemu/timer.h"
-#include "qemu/log.h"
-#include "hw/arm/sharpsl.h"
-#include "ui/console.h"
-#include "hw/audio/wm8750.h"
-#include "audio/audio.h"
-#include "hw/boards.h"
-#include "hw/sysbus.h"
-#include "hw/adc/max111x.h"
-#include "migration/vmstate.h"
-#include "exec/address-spaces.h"
-#include "qom/object.h"
-#include "audio/audio.h"
-
-enum spitz_model_e { spitz, akita, borzoi, terrier };
-
-struct SpitzMachineClass {
- MachineClass parent;
- enum spitz_model_e model;
- int arm_id;
-};
-
-struct SpitzMachineState {
- MachineState parent;
- PXA2xxState *mpu;
- DeviceState *mux;
- DeviceState *lcdtg;
- DeviceState *ads7846;
- DeviceState *max1111;
- DeviceState *scp0;
- DeviceState *scp1;
- DeviceState *misc_gpio;
-};
-
-#define TYPE_SPITZ_MACHINE "spitz-common"
-OBJECT_DECLARE_TYPE(SpitzMachineState, SpitzMachineClass, SPITZ_MACHINE)
-
-#define zaurus_printf(format, ...) \
- fprintf(stderr, "%s: " format, __func__, ##__VA_ARGS__)
-
-/* Spitz Flash */
-#define FLASH_BASE 0x0c000000
-#define FLASH_ECCLPLB 0x00 /* Line parity 7 - 0 bit */
-#define FLASH_ECCLPUB 0x04 /* Line parity 15 - 8 bit */
-#define FLASH_ECCCP 0x08 /* Column parity 5 - 0 bit */
-#define FLASH_ECCCNTR 0x0c /* ECC byte counter */
-#define FLASH_ECCCLRR 0x10 /* Clear ECC */
-#define FLASH_FLASHIO 0x14 /* Flash I/O */
-#define FLASH_FLASHCTL 0x18 /* Flash Control */
-
-#define FLASHCTL_CE0 (1 << 0)
-#define FLASHCTL_CLE (1 << 1)
-#define FLASHCTL_ALE (1 << 2)
-#define FLASHCTL_WP (1 << 3)
-#define FLASHCTL_CE1 (1 << 4)
-#define FLASHCTL_RYBY (1 << 5)
-#define FLASHCTL_NCE (FLASHCTL_CE0 | FLASHCTL_CE1)
-
-#define TYPE_SL_NAND "sl-nand"
-OBJECT_DECLARE_SIMPLE_TYPE(SLNANDState, SL_NAND)
-
-struct SLNANDState {
- SysBusDevice parent_obj;
-
- MemoryRegion iomem;
- DeviceState *nand;
- uint8_t ctl;
- uint8_t manf_id;
- uint8_t chip_id;
- ECCState ecc;
-};
-
-static uint64_t sl_read(void *opaque, hwaddr addr, unsigned size)
-{
- SLNANDState *s = (SLNANDState *) opaque;
- int ryby;
-
- switch (addr) {
-#define BSHR(byte, from, to) ((s->ecc.lp[byte] >> (from - to)) & (1 << to))
- case FLASH_ECCLPLB:
- return BSHR(0, 4, 0) | BSHR(0, 5, 2) | BSHR(0, 6, 4) | BSHR(0, 7, 6) |
- BSHR(1, 4, 1) | BSHR(1, 5, 3) | BSHR(1, 6, 5) | BSHR(1, 7, 7);
-
-#define BSHL(byte, from, to) ((s->ecc.lp[byte] << (to - from)) & (1 << to))
- case FLASH_ECCLPUB:
- return BSHL(0, 0, 0) | BSHL(0, 1, 2) | BSHL(0, 2, 4) | BSHL(0, 3, 6) |
- BSHL(1, 0, 1) | BSHL(1, 1, 3) | BSHL(1, 2, 5) | BSHL(1, 3, 7);
-
- case FLASH_ECCCP:
- return s->ecc.cp;
-
- case FLASH_ECCCNTR:
- return s->ecc.count & 0xff;
-
- case FLASH_FLASHCTL:
- nand_getpins(s->nand, &ryby);
- if (ryby)
- return s->ctl | FLASHCTL_RYBY;
- else
- return s->ctl;
-
- case FLASH_FLASHIO:
- if (size == 4) {
- return ecc_digest(&s->ecc, nand_getio(s->nand)) |
- (ecc_digest(&s->ecc, nand_getio(s->nand)) << 16);
- }
- return ecc_digest(&s->ecc, nand_getio(s->nand));
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "sl_read: bad register offset 0x%02" HWADDR_PRIx "\n",
- addr);
- }
- return 0;
-}
-
-static void sl_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- SLNANDState *s = (SLNANDState *) opaque;
-
- switch (addr) {
- case FLASH_ECCCLRR:
- /* Value is ignored. */
- ecc_reset(&s->ecc);
- break;
-
- case FLASH_FLASHCTL:
- s->ctl = value & 0xff & ~FLASHCTL_RYBY;
- nand_setpins(s->nand,
- s->ctl & FLASHCTL_CLE,
- s->ctl & FLASHCTL_ALE,
- s->ctl & FLASHCTL_NCE,
- s->ctl & FLASHCTL_WP,
- 0);
- break;
-
- case FLASH_FLASHIO:
- nand_setio(s->nand, ecc_digest(&s->ecc, value & 0xff));
- break;
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "sl_write: bad register offset 0x%02" HWADDR_PRIx "\n",
- addr);
- }
-}
-
-enum {
- FLASH_128M,
- FLASH_1024M,
-};
-
-static const MemoryRegionOps sl_ops = {
- .read = sl_read,
- .write = sl_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void sl_flash_register(PXA2xxState *cpu, int size)
-{
- DeviceState *dev;
-
- dev = qdev_new(TYPE_SL_NAND);
-
- qdev_prop_set_uint8(dev, "manf_id", NAND_MFR_SAMSUNG);
- if (size == FLASH_128M)
- qdev_prop_set_uint8(dev, "chip_id", 0x73);
- else if (size == FLASH_1024M)
- qdev_prop_set_uint8(dev, "chip_id", 0xf1);
-
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, FLASH_BASE);
-}
-
-static void sl_nand_init(Object *obj)
-{
- SLNANDState *s = SL_NAND(obj);
- SysBusDevice *dev = SYS_BUS_DEVICE(obj);
-
- s->ctl = 0;
-
- memory_region_init_io(&s->iomem, obj, &sl_ops, s, "sl", 0x40);
- sysbus_init_mmio(dev, &s->iomem);
-}
-
-static void sl_nand_realize(DeviceState *dev, Error **errp)
-{
- SLNANDState *s = SL_NAND(dev);
- DriveInfo *nand;
-
- /* FIXME use a qdev drive property instead of drive_get() */
- nand = drive_get(IF_MTD, 0, 0);
- s->nand = nand_init(nand ? blk_by_legacy_dinfo(nand) : NULL,
- s->manf_id, s->chip_id);
-}
-
-/* Spitz Keyboard */
-
-#define SPITZ_KEY_STROBE_NUM 11
-#define SPITZ_KEY_SENSE_NUM 7
-
-static const int spitz_gpio_key_sense[SPITZ_KEY_SENSE_NUM] = {
- 12, 17, 91, 34, 36, 38, 39
-};
-
-static const int spitz_gpio_key_strobe[SPITZ_KEY_STROBE_NUM] = {
- 88, 23, 24, 25, 26, 27, 52, 103, 107, 108, 114
-};
-
-/* Eighth additional row maps the special keys */
-static int spitz_keymap[SPITZ_KEY_SENSE_NUM + 1][SPITZ_KEY_STROBE_NUM] = {
- { 0x1d, 0x02, 0x04, 0x06, 0x07, 0x08, 0x0a, 0x0b, 0x0e, 0x3f, 0x40 },
- { -1 , 0x03, 0x05, 0x13, 0x15, 0x09, 0x17, 0x18, 0x19, 0x41, 0x42 },
- { 0x0f, 0x10, 0x12, 0x14, 0x22, 0x16, 0x24, 0x25, -1 , -1 , -1 },
- { 0x3c, 0x11, 0x1f, 0x21, 0x2f, 0x23, 0x32, 0x26, -1 , 0x36, -1 },
- { 0x3b, 0x1e, 0x20, 0x2e, 0x30, 0x31, 0x34, -1 , 0x1c, 0x2a, -1 },
- { 0x44, 0x2c, 0x2d, 0x0c, 0x39, 0x33, -1 , 0x48, -1 , -1 , 0x38 },
- { 0x37, 0x3d, -1 , 0x45, 0x57, 0x58, 0x4b, 0x50, 0x4d, -1 , -1 },
- { 0x52, 0x43, 0x01, 0x47, 0x49, -1 , -1 , -1 , -1 , -1 , -1 },
-};
-
-#define SPITZ_GPIO_AK_INT 13 /* Remote control */
-#define SPITZ_GPIO_SYNC 16 /* Sync button */
-#define SPITZ_GPIO_ON_KEY 95 /* Power button */
-#define SPITZ_GPIO_SWA 97 /* Lid */
-#define SPITZ_GPIO_SWB 96 /* Tablet mode */
-
-/* The special buttons are mapped to unused keys */
-static const int spitz_gpiomap[5] = {
- SPITZ_GPIO_AK_INT, SPITZ_GPIO_SYNC, SPITZ_GPIO_ON_KEY,
- SPITZ_GPIO_SWA, SPITZ_GPIO_SWB,
-};
-
-#define TYPE_SPITZ_KEYBOARD "spitz-keyboard"
-OBJECT_DECLARE_SIMPLE_TYPE(SpitzKeyboardState, SPITZ_KEYBOARD)
-
-struct SpitzKeyboardState {
- SysBusDevice parent_obj;
-
- qemu_irq sense[SPITZ_KEY_SENSE_NUM];
- qemu_irq gpiomap[5];
- int keymap[0x80];
- uint16_t keyrow[SPITZ_KEY_SENSE_NUM];
- uint16_t strobe_state;
- uint16_t sense_state;
-
- uint16_t pre_map[0x100];
- uint16_t modifiers;
- uint16_t imodifiers;
- uint8_t fifo[16];
- int fifopos, fifolen;
- QEMUTimer *kbdtimer;
-};
-
-static void spitz_keyboard_sense_update(SpitzKeyboardState *s)
-{
- int i;
- uint16_t strobe, sense = 0;
- for (i = 0; i < SPITZ_KEY_SENSE_NUM; i ++) {
- strobe = s->keyrow[i] & s->strobe_state;
- if (strobe) {
- sense |= 1 << i;
- if (!(s->sense_state & (1 << i)))
- qemu_irq_raise(s->sense[i]);
- } else if (s->sense_state & (1 << i))
- qemu_irq_lower(s->sense[i]);
- }
-
- s->sense_state = sense;
-}
-
-static void spitz_keyboard_strobe(void *opaque, int line, int level)
-{
- SpitzKeyboardState *s = (SpitzKeyboardState *) opaque;
-
- if (level)
- s->strobe_state |= 1 << line;
- else
- s->strobe_state &= ~(1 << line);
- spitz_keyboard_sense_update(s);
-}
-
-static void spitz_keyboard_keydown(SpitzKeyboardState *s, int keycode)
-{
- int spitz_keycode = s->keymap[keycode & 0x7f];
- if (spitz_keycode == -1)
- return;
-
- /* Handle the additional keys */
- if ((spitz_keycode >> 4) == SPITZ_KEY_SENSE_NUM) {
- qemu_set_irq(s->gpiomap[spitz_keycode & 0xf], (keycode < 0x80));
- return;
- }
-
- if (keycode & 0x80)
- s->keyrow[spitz_keycode >> 4] &= ~(1 << (spitz_keycode & 0xf));
- else
- s->keyrow[spitz_keycode >> 4] |= 1 << (spitz_keycode & 0xf);
-
- spitz_keyboard_sense_update(s);
-}
-
-#define SPITZ_MOD_SHIFT (1 << 7)
-#define SPITZ_MOD_CTRL (1 << 8)
-#define SPITZ_MOD_FN (1 << 9)
-
-#define QUEUE_KEY(c) s->fifo[(s->fifopos + s->fifolen ++) & 0xf] = c
-
-static void spitz_keyboard_handler(void *opaque, int keycode)
-{
- SpitzKeyboardState *s = opaque;
- uint16_t code;
- int mapcode;
- switch (keycode) {
- case 0x2a: /* Left Shift */
- s->modifiers |= 1;
- break;
- case 0xaa:
- s->modifiers &= ~1;
- break;
- case 0x36: /* Right Shift */
- s->modifiers |= 2;
- break;
- case 0xb6:
- s->modifiers &= ~2;
- break;
- case 0x1d: /* Control */
- s->modifiers |= 4;
- break;
- case 0x9d:
- s->modifiers &= ~4;
- break;
- case 0x38: /* Alt */
- s->modifiers |= 8;
- break;
- case 0xb8:
- s->modifiers &= ~8;
- break;
- }
-
- code = s->pre_map[mapcode = ((s->modifiers & 3) ?
- (keycode | SPITZ_MOD_SHIFT) :
- (keycode & ~SPITZ_MOD_SHIFT))];
-
- if (code != mapcode) {
-#if 0
- if ((code & SPITZ_MOD_SHIFT) && !(s->modifiers & 1)) {
- QUEUE_KEY(0x2a | (keycode & 0x80));
- }
- if ((code & SPITZ_MOD_CTRL) && !(s->modifiers & 4)) {
- QUEUE_KEY(0x1d | (keycode & 0x80));
- }
- if ((code & SPITZ_MOD_FN) && !(s->modifiers & 8)) {
- QUEUE_KEY(0x38 | (keycode & 0x80));
- }
- if ((code & SPITZ_MOD_FN) && (s->modifiers & 1)) {
- QUEUE_KEY(0x2a | (~keycode & 0x80));
- }
- if ((code & SPITZ_MOD_FN) && (s->modifiers & 2)) {
- QUEUE_KEY(0x36 | (~keycode & 0x80));
- }
-#else
- if (keycode & 0x80) {
- if ((s->imodifiers & 1 ) && !(s->modifiers & 1))
- QUEUE_KEY(0x2a | 0x80);
- if ((s->imodifiers & 4 ) && !(s->modifiers & 4))
- QUEUE_KEY(0x1d | 0x80);
- if ((s->imodifiers & 8 ) && !(s->modifiers & 8))
- QUEUE_KEY(0x38 | 0x80);
- if ((s->imodifiers & 0x10) && (s->modifiers & 1))
- QUEUE_KEY(0x2a);
- if ((s->imodifiers & 0x20) && (s->modifiers & 2))
- QUEUE_KEY(0x36);
- s->imodifiers = 0;
- } else {
- if ((code & SPITZ_MOD_SHIFT) &&
- !((s->modifiers | s->imodifiers) & 1)) {
- QUEUE_KEY(0x2a);
- s->imodifiers |= 1;
- }
- if ((code & SPITZ_MOD_CTRL) &&
- !((s->modifiers | s->imodifiers) & 4)) {
- QUEUE_KEY(0x1d);
- s->imodifiers |= 4;
- }
- if ((code & SPITZ_MOD_FN) &&
- !((s->modifiers | s->imodifiers) & 8)) {
- QUEUE_KEY(0x38);
- s->imodifiers |= 8;
- }
- if ((code & SPITZ_MOD_FN) && (s->modifiers & 1) &&
- !(s->imodifiers & 0x10)) {
- QUEUE_KEY(0x2a | 0x80);
- s->imodifiers |= 0x10;
- }
- if ((code & SPITZ_MOD_FN) && (s->modifiers & 2) &&
- !(s->imodifiers & 0x20)) {
- QUEUE_KEY(0x36 | 0x80);
- s->imodifiers |= 0x20;
- }
- }
-#endif
- }
-
- QUEUE_KEY((code & 0x7f) | (keycode & 0x80));
-}
-
-static void spitz_keyboard_tick(void *opaque)
-{
- SpitzKeyboardState *s = (SpitzKeyboardState *) opaque;
-
- if (s->fifolen) {
- spitz_keyboard_keydown(s, s->fifo[s->fifopos ++]);
- s->fifolen --;
- if (s->fifopos >= 16)
- s->fifopos = 0;
- }
-
- timer_mod(s->kbdtimer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- NANOSECONDS_PER_SECOND / 32);
-}
-
-static void spitz_keyboard_pre_map(SpitzKeyboardState *s)
-{
- int i;
- for (i = 0; i < 0x100; i ++)
- s->pre_map[i] = i;
- s->pre_map[0x02 | SPITZ_MOD_SHIFT] = 0x02 | SPITZ_MOD_SHIFT; /* exclam */
- s->pre_map[0x28 | SPITZ_MOD_SHIFT] = 0x03 | SPITZ_MOD_SHIFT; /* quotedbl */
- s->pre_map[0x04 | SPITZ_MOD_SHIFT] = 0x04 | SPITZ_MOD_SHIFT; /* # */
- s->pre_map[0x05 | SPITZ_MOD_SHIFT] = 0x05 | SPITZ_MOD_SHIFT; /* dollar */
- s->pre_map[0x06 | SPITZ_MOD_SHIFT] = 0x06 | SPITZ_MOD_SHIFT; /* percent */
- s->pre_map[0x08 | SPITZ_MOD_SHIFT] = 0x07 | SPITZ_MOD_SHIFT; /* ampersand */
- s->pre_map[0x28] = 0x08 | SPITZ_MOD_SHIFT; /* ' */
- s->pre_map[0x0a | SPITZ_MOD_SHIFT] = 0x09 | SPITZ_MOD_SHIFT; /* ( */
- s->pre_map[0x0b | SPITZ_MOD_SHIFT] = 0x0a | SPITZ_MOD_SHIFT; /* ) */
- s->pre_map[0x29 | SPITZ_MOD_SHIFT] = 0x0b | SPITZ_MOD_SHIFT; /* tilde */
- s->pre_map[0x03 | SPITZ_MOD_SHIFT] = 0x0c | SPITZ_MOD_SHIFT; /* at */
- s->pre_map[0xd3] = 0x0e | SPITZ_MOD_FN; /* Delete */
- s->pre_map[0x3a] = 0x0f | SPITZ_MOD_FN; /* Caps_Lock */
- s->pre_map[0x07 | SPITZ_MOD_SHIFT] = 0x11 | SPITZ_MOD_FN; /* ^ */
- s->pre_map[0x0d] = 0x12 | SPITZ_MOD_FN; /* equal */
- s->pre_map[0x0d | SPITZ_MOD_SHIFT] = 0x13 | SPITZ_MOD_FN; /* plus */
- s->pre_map[0x1a] = 0x14 | SPITZ_MOD_FN; /* [ */
- s->pre_map[0x1b] = 0x15 | SPITZ_MOD_FN; /* ] */
- s->pre_map[0x1a | SPITZ_MOD_SHIFT] = 0x16 | SPITZ_MOD_FN; /* { */
- s->pre_map[0x1b | SPITZ_MOD_SHIFT] = 0x17 | SPITZ_MOD_FN; /* } */
- s->pre_map[0x27] = 0x22 | SPITZ_MOD_FN; /* semicolon */
- s->pre_map[0x27 | SPITZ_MOD_SHIFT] = 0x23 | SPITZ_MOD_FN; /* colon */
- s->pre_map[0x09 | SPITZ_MOD_SHIFT] = 0x24 | SPITZ_MOD_FN; /* asterisk */
- s->pre_map[0x2b] = 0x25 | SPITZ_MOD_FN; /* backslash */
- s->pre_map[0x2b | SPITZ_MOD_SHIFT] = 0x26 | SPITZ_MOD_FN; /* bar */
- s->pre_map[0x0c | SPITZ_MOD_SHIFT] = 0x30 | SPITZ_MOD_FN; /* _ */
- s->pre_map[0x33 | SPITZ_MOD_SHIFT] = 0x33 | SPITZ_MOD_FN; /* less */
- s->pre_map[0x35] = 0x33 | SPITZ_MOD_SHIFT; /* slash */
- s->pre_map[0x34 | SPITZ_MOD_SHIFT] = 0x34 | SPITZ_MOD_FN; /* greater */
- s->pre_map[0x35 | SPITZ_MOD_SHIFT] = 0x34 | SPITZ_MOD_SHIFT; /* question */
- s->pre_map[0x49] = 0x48 | SPITZ_MOD_FN; /* Page_Up */
- s->pre_map[0x51] = 0x50 | SPITZ_MOD_FN; /* Page_Down */
-
- s->modifiers = 0;
- s->imodifiers = 0;
- s->fifopos = 0;
- s->fifolen = 0;
-}
-
-#undef SPITZ_MOD_SHIFT
-#undef SPITZ_MOD_CTRL
-#undef SPITZ_MOD_FN
-
-static int spitz_keyboard_post_load(void *opaque, int version_id)
-{
- SpitzKeyboardState *s = (SpitzKeyboardState *) opaque;
-
- /* Release all pressed keys */
- memset(s->keyrow, 0, sizeof(s->keyrow));
- spitz_keyboard_sense_update(s);
- s->modifiers = 0;
- s->imodifiers = 0;
- s->fifopos = 0;
- s->fifolen = 0;
-
- return 0;
-}
-
-static void spitz_keyboard_register(PXA2xxState *cpu)
-{
- int i;
- DeviceState *dev;
- SpitzKeyboardState *s;
-
- dev = sysbus_create_simple(TYPE_SPITZ_KEYBOARD, -1, NULL);
- s = SPITZ_KEYBOARD(dev);
-
- for (i = 0; i < SPITZ_KEY_SENSE_NUM; i ++)
- qdev_connect_gpio_out(dev, i, qdev_get_gpio_in(cpu->gpio, spitz_gpio_key_sense[i]));
-
- for (i = 0; i < 5; i ++)
- s->gpiomap[i] = qdev_get_gpio_in(cpu->gpio, spitz_gpiomap[i]);
-
- if (!graphic_rotate)
- s->gpiomap[4] = qemu_irq_invert(s->gpiomap[4]);
-
- for (i = 0; i < 5; i++)
- qemu_set_irq(s->gpiomap[i], 0);
-
- for (i = 0; i < SPITZ_KEY_STROBE_NUM; i ++)
- qdev_connect_gpio_out(cpu->gpio, spitz_gpio_key_strobe[i],
- qdev_get_gpio_in(dev, i));
-
- timer_mod(s->kbdtimer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
-
- qemu_add_kbd_event_handler(spitz_keyboard_handler, s);
-}
-
-static void spitz_keyboard_init(Object *obj)
-{
- DeviceState *dev = DEVICE(obj);
- SpitzKeyboardState *s = SPITZ_KEYBOARD(obj);
- int i, j;
-
- for (i = 0; i < 0x80; i ++)
- s->keymap[i] = -1;
- for (i = 0; i < SPITZ_KEY_SENSE_NUM + 1; i ++)
- for (j = 0; j < SPITZ_KEY_STROBE_NUM; j ++)
- if (spitz_keymap[i][j] != -1)
- s->keymap[spitz_keymap[i][j]] = (i << 4) | j;
-
- spitz_keyboard_pre_map(s);
-
- qdev_init_gpio_in(dev, spitz_keyboard_strobe, SPITZ_KEY_STROBE_NUM);
- qdev_init_gpio_out(dev, s->sense, SPITZ_KEY_SENSE_NUM);
-}
-
-static void spitz_keyboard_realize(DeviceState *dev, Error **errp)
-{
- SpitzKeyboardState *s = SPITZ_KEYBOARD(dev);
- s->kbdtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, spitz_keyboard_tick, s);
-}
-
-/* LCD backlight controller */
-
-#define LCDTG_RESCTL 0x00
-#define LCDTG_PHACTRL 0x01
-#define LCDTG_DUTYCTRL 0x02
-#define LCDTG_POWERREG0 0x03
-#define LCDTG_POWERREG1 0x04
-#define LCDTG_GPOR3 0x05
-#define LCDTG_PICTRL 0x06
-#define LCDTG_POLCTRL 0x07
-
-#define TYPE_SPITZ_LCDTG "spitz-lcdtg"
-OBJECT_DECLARE_SIMPLE_TYPE(SpitzLCDTG, SPITZ_LCDTG)
-
-struct SpitzLCDTG {
- SSIPeripheral ssidev;
- uint32_t bl_intensity;
- uint32_t bl_power;
-};
-
-static void spitz_bl_update(SpitzLCDTG *s)
-{
- if (s->bl_power && s->bl_intensity)
- zaurus_printf("LCD Backlight now at %u/63\n", s->bl_intensity);
- else
- zaurus_printf("LCD Backlight now off\n");
-}
-
-static inline void spitz_bl_bit5(void *opaque, int line, int level)
-{
- SpitzLCDTG *s = opaque;
- int prev = s->bl_intensity;
-
- if (level)
- s->bl_intensity &= ~0x20;
- else
- s->bl_intensity |= 0x20;
-
- if (s->bl_power && prev != s->bl_intensity)
- spitz_bl_update(s);
-}
-
-static inline void spitz_bl_power(void *opaque, int line, int level)
-{
- SpitzLCDTG *s = opaque;
- s->bl_power = !!level;
- spitz_bl_update(s);
-}
-
-static uint32_t spitz_lcdtg_transfer(SSIPeripheral *dev, uint32_t value)
-{
- SpitzLCDTG *s = SPITZ_LCDTG(dev);
- int addr;
- addr = value >> 5;
- value &= 0x1f;
-
- switch (addr) {
- case LCDTG_RESCTL:
- if (value)
- zaurus_printf("LCD in QVGA mode\n");
- else
- zaurus_printf("LCD in VGA mode\n");
- break;
-
- case LCDTG_DUTYCTRL:
- s->bl_intensity &= ~0x1f;
- s->bl_intensity |= value;
- if (s->bl_power)
- spitz_bl_update(s);
- break;
-
- case LCDTG_POWERREG0:
- /* Set common voltage to M62332FP */
- break;
- }
- return 0;
-}
-
-static void spitz_lcdtg_realize(SSIPeripheral *ssi, Error **errp)
-{
- SpitzLCDTG *s = SPITZ_LCDTG(ssi);
- DeviceState *dev = DEVICE(s);
-
- s->bl_power = 0;
- s->bl_intensity = 0x20;
-
- qdev_init_gpio_in_named(dev, spitz_bl_bit5, "bl_bit5", 1);
- qdev_init_gpio_in_named(dev, spitz_bl_power, "bl_power", 1);
-}
-
-/* SSP devices */
-
-#define CORGI_SSP_PORT 2
-
-#define SPITZ_GPIO_LCDCON_CS 53
-#define SPITZ_GPIO_ADS7846_CS 14
-#define SPITZ_GPIO_MAX1111_CS 20
-#define SPITZ_GPIO_TP_INT 11
-
-#define TYPE_CORGI_SSP "corgi-ssp"
-OBJECT_DECLARE_SIMPLE_TYPE(CorgiSSPState, CORGI_SSP)
-
-/* "Demux" the signal based on current chipselect */
-struct CorgiSSPState {
- SSIPeripheral ssidev;
- SSIBus *bus[3];
- uint32_t enable[3];
-};
-
-static uint32_t corgi_ssp_transfer(SSIPeripheral *dev, uint32_t value)
-{
- CorgiSSPState *s = CORGI_SSP(dev);
- int i;
-
- for (i = 0; i < 3; i++) {
- if (s->enable[i]) {
- return ssi_transfer(s->bus[i], value);
- }
- }
- return 0;
-}
-
-static void corgi_ssp_gpio_cs(void *opaque, int line, int level)
-{
- CorgiSSPState *s = (CorgiSSPState *)opaque;
- assert(line >= 0 && line < 3);
- s->enable[line] = !level;
-}
-
-#define MAX1111_BATT_VOLT 1
-#define MAX1111_BATT_TEMP 2
-#define MAX1111_ACIN_VOLT 3
-
-#define SPITZ_BATTERY_TEMP 0xe0 /* About 2.9V */
-#define SPITZ_BATTERY_VOLT 0xd0 /* About 4.0V */
-#define SPITZ_CHARGEON_ACIN 0x80 /* About 5.0V */
-
-static void corgi_ssp_realize(SSIPeripheral *d, Error **errp)
-{
- DeviceState *dev = DEVICE(d);
- CorgiSSPState *s = CORGI_SSP(d);
-
- qdev_init_gpio_in(dev, corgi_ssp_gpio_cs, 3);
- s->bus[0] = ssi_create_bus(dev, "ssi0");
- s->bus[1] = ssi_create_bus(dev, "ssi1");
- s->bus[2] = ssi_create_bus(dev, "ssi2");
-}
-
-static void spitz_ssp_attach(SpitzMachineState *sms)
-{
- void *bus;
-
- sms->mux = ssi_create_peripheral(sms->mpu->ssp[CORGI_SSP_PORT - 1],
- TYPE_CORGI_SSP);
-
- bus = qdev_get_child_bus(sms->mux, "ssi0");
- sms->lcdtg = ssi_create_peripheral(bus, TYPE_SPITZ_LCDTG);
-
- bus = qdev_get_child_bus(sms->mux, "ssi1");
- sms->ads7846 = ssi_create_peripheral(bus, "ads7846");
- qdev_connect_gpio_out(sms->ads7846, 0,
- qdev_get_gpio_in(sms->mpu->gpio, SPITZ_GPIO_TP_INT));
-
- bus = qdev_get_child_bus(sms->mux, "ssi2");
- sms->max1111 = qdev_new(TYPE_MAX_1111);
- qdev_prop_set_uint8(sms->max1111, "input1" /* BATT_VOLT */,
- SPITZ_BATTERY_VOLT);
- qdev_prop_set_uint8(sms->max1111, "input2" /* BATT_TEMP */, 0);
- qdev_prop_set_uint8(sms->max1111, "input3" /* ACIN_VOLT */,
- SPITZ_CHARGEON_ACIN);
- ssi_realize_and_unref(sms->max1111, bus, &error_fatal);
-
- qdev_connect_gpio_out(sms->mpu->gpio, SPITZ_GPIO_LCDCON_CS,
- qdev_get_gpio_in(sms->mux, 0));
- qdev_connect_gpio_out(sms->mpu->gpio, SPITZ_GPIO_ADS7846_CS,
- qdev_get_gpio_in(sms->mux, 1));
- qdev_connect_gpio_out(sms->mpu->gpio, SPITZ_GPIO_MAX1111_CS,
- qdev_get_gpio_in(sms->mux, 2));
-}
-
-/* CF Microdrive */
-
-static void spitz_microdrive_attach(PXA2xxState *cpu, int slot)
-{
- PCMCIACardState *md;
- DriveInfo *dinfo;
-
- dinfo = drive_get(IF_IDE, 0, 0);
- if (!dinfo || dinfo->media_cd)
- return;
- md = dscm1xxxx_init(dinfo);
- pxa2xx_pcmcia_attach(cpu->pcmcia[slot], md);
-}
-
-/* Wm8750 and Max7310 on I2C */
-
-#define AKITA_MAX_ADDR 0x18
-#define SPITZ_WM_ADDRL 0x1b
-#define SPITZ_WM_ADDRH 0x1a
-
-#define SPITZ_GPIO_WM 5
-
-static void spitz_wm8750_addr(void *opaque, int line, int level)
-{
- I2CSlave *wm = (I2CSlave *) opaque;
- if (level)
- i2c_slave_set_address(wm, SPITZ_WM_ADDRH);
- else
- i2c_slave_set_address(wm, SPITZ_WM_ADDRL);
-}
-
-static void spitz_i2c_setup(MachineState *machine, PXA2xxState *cpu)
-{
- /* Attach the CPU on one end of our I2C bus. */
- I2CBus *bus = pxa2xx_i2c_bus(cpu->i2c[0]);
-
- /* Attach a WM8750 to the bus */
- I2CSlave *i2c_dev = i2c_slave_new(TYPE_WM8750, 0);
- DeviceState *wm = DEVICE(i2c_dev);
-
- if (machine->audiodev) {
- qdev_prop_set_string(wm, "audiodev", machine->audiodev);
- }
- i2c_slave_realize_and_unref(i2c_dev, bus, &error_abort);
-
- spitz_wm8750_addr(wm, 0, 0);
- qdev_connect_gpio_out(cpu->gpio, SPITZ_GPIO_WM,
- qemu_allocate_irq(spitz_wm8750_addr, wm, 0));
- /* .. and to the sound interface. */
- cpu->i2s->opaque = wm;
- cpu->i2s->codec_out = wm8750_dac_dat;
- cpu->i2s->codec_in = wm8750_adc_dat;
- wm8750_data_req_set(wm, cpu->i2s->data_req, cpu->i2s);
-}
-
-static void spitz_akita_i2c_setup(PXA2xxState *cpu)
-{
- /* Attach a Max7310 to Akita I2C bus. */
- i2c_slave_create_simple(pxa2xx_i2c_bus(cpu->i2c[0]), "max7310",
- AKITA_MAX_ADDR);
-}
-
-/* Other peripherals */
-
-/*
- * Encapsulation of some miscellaneous GPIO line behaviour for the Spitz boards.
- *
- * QEMU interface:
- * + named GPIO inputs "green-led", "orange-led", "charging", "discharging":
- * these currently just print messages that the line has been signalled
- * + named GPIO input "adc-temp-on": set to cause the battery-temperature
- * value to be passed to the max111x ADC
- * + named GPIO output "adc-temp": the ADC value, to be wired up to the max111x
- */
-#define TYPE_SPITZ_MISC_GPIO "spitz-misc-gpio"
-OBJECT_DECLARE_SIMPLE_TYPE(SpitzMiscGPIOState, SPITZ_MISC_GPIO)
-
-struct SpitzMiscGPIOState {
- SysBusDevice parent_obj;
-
- qemu_irq adc_value;
-};
-
-static void spitz_misc_charging(void *opaque, int n, int level)
-{
- zaurus_printf("Charging %s.\n", level ? "off" : "on");
-}
-
-static void spitz_misc_discharging(void *opaque, int n, int level)
-{
- zaurus_printf("Discharging %s.\n", level ? "off" : "on");
-}
-
-static void spitz_misc_green_led(void *opaque, int n, int level)
-{
- zaurus_printf("Green LED %s.\n", level ? "off" : "on");
-}
-
-static void spitz_misc_orange_led(void *opaque, int n, int level)
-{
- zaurus_printf("Orange LED %s.\n", level ? "off" : "on");
-}
-
-static void spitz_misc_adc_temp(void *opaque, int n, int level)
-{
- SpitzMiscGPIOState *s = SPITZ_MISC_GPIO(opaque);
- int batt_temp = level ? SPITZ_BATTERY_TEMP : 0;
-
- qemu_set_irq(s->adc_value, batt_temp);
-}
-
-static void spitz_misc_gpio_init(Object *obj)
-{
- SpitzMiscGPIOState *s = SPITZ_MISC_GPIO(obj);
- DeviceState *dev = DEVICE(obj);
-
- qdev_init_gpio_in_named(dev, spitz_misc_charging, "charging", 1);
- qdev_init_gpio_in_named(dev, spitz_misc_discharging, "discharging", 1);
- qdev_init_gpio_in_named(dev, spitz_misc_green_led, "green-led", 1);
- qdev_init_gpio_in_named(dev, spitz_misc_orange_led, "orange-led", 1);
- qdev_init_gpio_in_named(dev, spitz_misc_adc_temp, "adc-temp-on", 1);
-
- qdev_init_gpio_out_named(dev, &s->adc_value, "adc-temp", 1);
-}
-
-#define SPITZ_SCP_LED_GREEN 1
-#define SPITZ_SCP_JK_B 2
-#define SPITZ_SCP_CHRG_ON 3
-#define SPITZ_SCP_MUTE_L 4
-#define SPITZ_SCP_MUTE_R 5
-#define SPITZ_SCP_CF_POWER 6
-#define SPITZ_SCP_LED_ORANGE 7
-#define SPITZ_SCP_JK_A 8
-#define SPITZ_SCP_ADC_TEMP_ON 9
-#define SPITZ_SCP2_IR_ON 1
-#define SPITZ_SCP2_AKIN_PULLUP 2
-#define SPITZ_SCP2_BACKLIGHT_CONT 7
-#define SPITZ_SCP2_BACKLIGHT_ON 8
-#define SPITZ_SCP2_MIC_BIAS 9
-
-static void spitz_scoop_gpio_setup(SpitzMachineState *sms)
-{
- DeviceState *miscdev = sysbus_create_simple(TYPE_SPITZ_MISC_GPIO, -1, NULL);
-
- sms->misc_gpio = miscdev;
-
- qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_CHRG_ON,
- qdev_get_gpio_in_named(miscdev, "charging", 0));
- qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_JK_B,
- qdev_get_gpio_in_named(miscdev, "discharging", 0));
- qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_LED_GREEN,
- qdev_get_gpio_in_named(miscdev, "green-led", 0));
- qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_LED_ORANGE,
- qdev_get_gpio_in_named(miscdev, "orange-led", 0));
- qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_ADC_TEMP_ON,
- qdev_get_gpio_in_named(miscdev, "adc-temp-on", 0));
- qdev_connect_gpio_out_named(miscdev, "adc-temp", 0,
- qdev_get_gpio_in(sms->max1111, MAX1111_BATT_TEMP));
-
- if (sms->scp1) {
- qdev_connect_gpio_out(sms->scp1, SPITZ_SCP2_BACKLIGHT_CONT,
- qdev_get_gpio_in_named(sms->lcdtg, "bl_bit5", 0));
- qdev_connect_gpio_out(sms->scp1, SPITZ_SCP2_BACKLIGHT_ON,
- qdev_get_gpio_in_named(sms->lcdtg, "bl_power", 0));
- }
-}
-
-#define SPITZ_GPIO_HSYNC 22
-#define SPITZ_GPIO_SD_DETECT 9
-#define SPITZ_GPIO_SD_WP 81
-#define SPITZ_GPIO_ON_RESET 89
-#define SPITZ_GPIO_BAT_COVER 90
-#define SPITZ_GPIO_CF1_IRQ 105
-#define SPITZ_GPIO_CF1_CD 94
-#define SPITZ_GPIO_CF2_IRQ 106
-#define SPITZ_GPIO_CF2_CD 93
-
-static int spitz_hsync;
-
-static void spitz_lcd_hsync_handler(void *opaque, int line, int level)
-{
- PXA2xxState *cpu = (PXA2xxState *) opaque;
- qemu_set_irq(qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_HSYNC), spitz_hsync);
- spitz_hsync ^= 1;
-}
-
-static void spitz_reset(void *opaque, int line, int level)
-{
- if (level) {
- qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
- }
-}
-
-static void spitz_gpio_setup(PXA2xxState *cpu, int slots)
-{
- qemu_irq lcd_hsync;
- qemu_irq reset;
-
- /*
- * Bad hack: We toggle the LCD hsync GPIO on every GPIO status
- * read to satisfy broken guests that poll-wait for hsync.
- * Simulating a real hsync event would be less practical and
- * wouldn't guarantee that a guest ever exits the loop.
- */
- spitz_hsync = 0;
- lcd_hsync = qemu_allocate_irq(spitz_lcd_hsync_handler, cpu, 0);
- pxa2xx_gpio_read_notifier(cpu->gpio, lcd_hsync);
- pxa2xx_lcd_vsync_notifier(cpu->lcd, lcd_hsync);
-
- /* MMC/SD host */
- pxa2xx_mmci_handlers(cpu->mmc,
- qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_SD_WP),
- qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_SD_DETECT));
-
- /* Battery lock always closed */
- qemu_irq_raise(qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_BAT_COVER));
-
- /* Handle reset */
- reset = qemu_allocate_irq(spitz_reset, cpu, 0);
- qdev_connect_gpio_out(cpu->gpio, SPITZ_GPIO_ON_RESET, reset);
-
- /* PCMCIA signals: card's IRQ and Card-Detect */
- if (slots >= 1)
- pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[0],
- qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_CF1_IRQ),
- qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_CF1_CD));
- if (slots >= 2)
- pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[1],
- qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_CF2_IRQ),
- qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_CF2_CD));
-}
-
-/* Board init. */
-#define SPITZ_RAM 0x04000000
-#define SPITZ_ROM 0x00800000
-
-static struct arm_boot_info spitz_binfo = {
- .loader_start = PXA2XX_SDRAM_BASE,
- .ram_size = 0x04000000,
-};
-
-static void spitz_common_init(MachineState *machine)
-{
- SpitzMachineClass *smc = SPITZ_MACHINE_GET_CLASS(machine);
- SpitzMachineState *sms = SPITZ_MACHINE(machine);
- enum spitz_model_e model = smc->model;
- PXA2xxState *mpu;
- MemoryRegion *rom = g_new(MemoryRegion, 1);
-
- /* Setup CPU & memory */
- mpu = pxa270_init(spitz_binfo.ram_size, machine->cpu_type);
- sms->mpu = mpu;
-
- sl_flash_register(mpu, (model == spitz) ? FLASH_128M : FLASH_1024M);
-
- memory_region_init_rom(rom, NULL, "spitz.rom", SPITZ_ROM, &error_fatal);
- memory_region_add_subregion(get_system_memory(), 0, rom);
-
- /* Setup peripherals */
- spitz_keyboard_register(mpu);
-
- spitz_ssp_attach(sms);
-
- sms->scp0 = sysbus_create_simple("scoop", 0x10800000, NULL);
- if (model != akita) {
- sms->scp1 = sysbus_create_simple("scoop", 0x08800040, NULL);
- } else {
- sms->scp1 = NULL;
- }
-
- spitz_scoop_gpio_setup(sms);
-
- spitz_gpio_setup(mpu, (model == akita) ? 1 : 2);
-
- spitz_i2c_setup(machine, mpu);
-
- if (model == akita)
- spitz_akita_i2c_setup(mpu);
-
- if (model == terrier)
- /* A 6.0 GB microdrive is permanently sitting in CF slot 1. */
- spitz_microdrive_attach(mpu, 1);
- else if (model != akita)
- /* A 4.0 GB microdrive is permanently sitting in CF slot 0. */
- spitz_microdrive_attach(mpu, 0);
-
- spitz_binfo.board_id = smc->arm_id;
- arm_load_kernel(mpu->cpu, machine, &spitz_binfo);
- sl_bootparam_write(SL_PXA_PARAM_BASE);
-}
-
-static void spitz_common_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
-
- mc->block_default_type = IF_IDE;
- mc->ignore_memory_transaction_failures = true;
- mc->init = spitz_common_init;
- mc->deprecation_reason = "machine is old and unmaintained";
-
- machine_add_audiodev_property(mc);
-}
-
-static const TypeInfo spitz_common_info = {
- .name = TYPE_SPITZ_MACHINE,
- .parent = TYPE_MACHINE,
- .abstract = true,
- .instance_size = sizeof(SpitzMachineState),
- .class_size = sizeof(SpitzMachineClass),
- .class_init = spitz_common_class_init,
-};
-
-static void akitapda_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
- SpitzMachineClass *smc = SPITZ_MACHINE_CLASS(oc);
-
- mc->desc = "Sharp SL-C1000 (Akita) PDA (PXA270)";
- mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c0");
- smc->model = akita;
- smc->arm_id = 0x2e8;
-}
-
-static const TypeInfo akitapda_type = {
- .name = MACHINE_TYPE_NAME("akita"),
- .parent = TYPE_SPITZ_MACHINE,
- .class_init = akitapda_class_init,
-};
-
-static void spitzpda_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
- SpitzMachineClass *smc = SPITZ_MACHINE_CLASS(oc);
-
- mc->desc = "Sharp SL-C3000 (Spitz) PDA (PXA270)";
- mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c0");
- smc->model = spitz;
- smc->arm_id = 0x2c9;
-}
-
-static const TypeInfo spitzpda_type = {
- .name = MACHINE_TYPE_NAME("spitz"),
- .parent = TYPE_SPITZ_MACHINE,
- .class_init = spitzpda_class_init,
-};
-
-static void borzoipda_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
- SpitzMachineClass *smc = SPITZ_MACHINE_CLASS(oc);
-
- mc->desc = "Sharp SL-C3100 (Borzoi) PDA (PXA270)";
- mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c0");
- smc->model = borzoi;
- smc->arm_id = 0x33f;
-}
-
-static const TypeInfo borzoipda_type = {
- .name = MACHINE_TYPE_NAME("borzoi"),
- .parent = TYPE_SPITZ_MACHINE,
- .class_init = borzoipda_class_init,
-};
-
-static void terrierpda_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
- SpitzMachineClass *smc = SPITZ_MACHINE_CLASS(oc);
-
- mc->desc = "Sharp SL-C3200 (Terrier) PDA (PXA270)";
- mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c5");
- smc->model = terrier;
- smc->arm_id = 0x33f;
-}
-
-static const TypeInfo terrierpda_type = {
- .name = MACHINE_TYPE_NAME("terrier"),
- .parent = TYPE_SPITZ_MACHINE,
- .class_init = terrierpda_class_init,
-};
-
-static void spitz_machine_init(void)
-{
- type_register_static(&spitz_common_info);
- type_register_static(&akitapda_type);
- type_register_static(&spitzpda_type);
- type_register_static(&borzoipda_type);
- type_register_static(&terrierpda_type);
-}
-
-type_init(spitz_machine_init)
-
-static bool is_version_0(void *opaque, int version_id)
-{
- return version_id == 0;
-}
-
-static const VMStateDescription vmstate_sl_nand_info = {
- .name = "sl-nand",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT8(ctl, SLNANDState),
- VMSTATE_STRUCT(ecc, SLNANDState, 0, vmstate_ecc_state, ECCState),
- VMSTATE_END_OF_LIST(),
- },
-};
-
-static Property sl_nand_properties[] = {
- DEFINE_PROP_UINT8("manf_id", SLNANDState, manf_id, NAND_MFR_SAMSUNG),
- DEFINE_PROP_UINT8("chip_id", SLNANDState, chip_id, 0xf1),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void sl_nand_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->vmsd = &vmstate_sl_nand_info;
- device_class_set_props(dc, sl_nand_properties);
- dc->realize = sl_nand_realize;
- /* Reason: init() method uses drive_get() */
- dc->user_creatable = false;
-}
-
-static const TypeInfo sl_nand_info = {
- .name = TYPE_SL_NAND,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(SLNANDState),
- .instance_init = sl_nand_init,
- .class_init = sl_nand_class_init,
-};
-
-static const VMStateDescription vmstate_spitz_kbd = {
- .name = "spitz-keyboard",
- .version_id = 1,
- .minimum_version_id = 0,
- .post_load = spitz_keyboard_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT16(sense_state, SpitzKeyboardState),
- VMSTATE_UINT16(strobe_state, SpitzKeyboardState),
- VMSTATE_UNUSED_TEST(is_version_0, 5),
- VMSTATE_END_OF_LIST(),
- },
-};
-
-static void spitz_keyboard_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->vmsd = &vmstate_spitz_kbd;
- dc->realize = spitz_keyboard_realize;
-}
-
-static const TypeInfo spitz_keyboard_info = {
- .name = TYPE_SPITZ_KEYBOARD,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(SpitzKeyboardState),
- .instance_init = spitz_keyboard_init,
- .class_init = spitz_keyboard_class_init,
-};
-
-static const VMStateDescription vmstate_corgi_ssp_regs = {
- .name = "corgi-ssp",
- .version_id = 2,
- .minimum_version_id = 2,
- .fields = (const VMStateField[]) {
- VMSTATE_SSI_PERIPHERAL(ssidev, CorgiSSPState),
- VMSTATE_UINT32_ARRAY(enable, CorgiSSPState, 3),
- VMSTATE_END_OF_LIST(),
- }
-};
-
-static void corgi_ssp_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass);
-
- k->realize = corgi_ssp_realize;
- k->transfer = corgi_ssp_transfer;
- dc->vmsd = &vmstate_corgi_ssp_regs;
-}
-
-static const TypeInfo corgi_ssp_info = {
- .name = TYPE_CORGI_SSP,
- .parent = TYPE_SSI_PERIPHERAL,
- .instance_size = sizeof(CorgiSSPState),
- .class_init = corgi_ssp_class_init,
-};
-
-static const VMStateDescription vmstate_spitz_lcdtg_regs = {
- .name = "spitz-lcdtg",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_SSI_PERIPHERAL(ssidev, SpitzLCDTG),
- VMSTATE_UINT32(bl_intensity, SpitzLCDTG),
- VMSTATE_UINT32(bl_power, SpitzLCDTG),
- VMSTATE_END_OF_LIST(),
- }
-};
-
-static void spitz_lcdtg_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass);
-
- k->realize = spitz_lcdtg_realize;
- k->transfer = spitz_lcdtg_transfer;
- dc->vmsd = &vmstate_spitz_lcdtg_regs;
-}
-
-static const TypeInfo spitz_lcdtg_info = {
- .name = TYPE_SPITZ_LCDTG,
- .parent = TYPE_SSI_PERIPHERAL,
- .instance_size = sizeof(SpitzLCDTG),
- .class_init = spitz_lcdtg_class_init,
-};
-
-static const TypeInfo spitz_misc_gpio_info = {
- .name = TYPE_SPITZ_MISC_GPIO,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(SpitzMiscGPIOState),
- .instance_init = spitz_misc_gpio_init,
- /*
- * No class_init required: device has no internal state so does not
- * need to set up reset or vmstate, and does not have a realize method.
- */
-};
-
-static void spitz_register_types(void)
-{
- type_register_static(&corgi_ssp_info);
- type_register_static(&spitz_lcdtg_info);
- type_register_static(&spitz_keyboard_info);
- type_register_static(&sl_nand_info);
- type_register_static(&spitz_misc_gpio_info);
-}
-
-type_init(spitz_register_types)
diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c
index 3767462..031ea3a 100644
--- a/hw/arm/stellaris.c
+++ b/hw/arm/stellaris.c
@@ -8,6 +8,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/bitops.h"
#include "qapi/error.h"
#include "hw/core/split-irq.h"
#include "hw/sysbus.h"
@@ -19,8 +20,8 @@
#include "net/net.h"
#include "hw/boards.h"
#include "qemu/log.h"
-#include "exec/address-spaces.h"
-#include "sysemu/sysemu.h"
+#include "system/address-spaces.h"
+#include "system/system.h"
#include "hw/arm/armv7m.h"
#include "hw/char/pl011.h"
#include "hw/input/stellaris_gamepad.h"
@@ -31,7 +32,7 @@
#include "hw/timer/stellaris-gptm.h"
#include "hw/qdev-clock.h"
#include "qom/object.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qlist.h"
#include "ui/input.h"
#define GPIO_A 0
@@ -49,6 +50,31 @@
#define NUM_IRQ_LINES 64
#define NUM_PRIO_BITS 3
+#define NUM_GPIO 7
+#define NUM_UART 4
+#define NUM_GPTM 4
+#define NUM_I2C 2
+
+/*
+ * See Stellaris Data Sheet chapter 5.2.5 "System Control",
+ * Register 13 .. 17: Device Capabilities 0 .. 4 (DC0 .. DC4).
+ */
+#define DC1_WDT 3
+#define DC1_HIB 6
+#define DC1_MPU 7
+#define DC1_ADC 16
+#define DC1_PWM 20
+#define DC2_UART(n) (n)
+#define DC2_SSI 4
+#define DC2_QEI(n) (8 + n)
+#define DC2_I2C(n) (12 + 2 * n)
+#define DC2_GPTM(n) (16 + n)
+#define DC2_COMP(n) (24 + n)
+#define DC4_GPIO(n) (n)
+#define DC4_EMAC 28
+
+#define DEV_CAP(_dc, _cap) extract32(board->dc##_dc, DC##_dc##_##_cap, 1)
+
typedef const struct {
const char *name;
uint32_t did0;
@@ -101,7 +127,7 @@ static void ssys_update(ssys_state *s)
qemu_set_irq(s->irq, (s->int_status & s->int_mask) != 0);
}
-static uint32_t pllcfg_sandstorm[16] = {
+static const uint32_t pllcfg_sandstorm[16] = {
0x31c0, /* 1 Mhz */
0x1ae0, /* 1.8432 Mhz */
0x18c0, /* 2 Mhz */
@@ -120,7 +146,7 @@ static uint32_t pllcfg_sandstorm[16] = {
0x585b /* 8.192 Mhz */
};
-static uint32_t pllcfg_fury[16] = {
+static const uint32_t pllcfg_fury[16] = {
0x3200, /* 1 Mhz */
0x1b20, /* 1.8432 Mhz */
0x1900, /* 2 Mhz */
@@ -438,7 +464,7 @@ static const VMStateDescription vmstate_stellaris_sys = {
}
};
-static Property stellaris_sys_properties[] = {
+static const Property stellaris_sys_properties[] = {
DEFINE_PROP_UINT32("user0", ssys_state, user0, 0),
DEFINE_PROP_UINT32("user1", ssys_state, user1, 0),
DEFINE_PROP_UINT32("did0", ssys_state, did0, 0),
@@ -448,7 +474,6 @@ static Property stellaris_sys_properties[] = {
DEFINE_PROP_UINT32("dc2", ssys_state, dc2, 0),
DEFINE_PROP_UINT32("dc3", ssys_state, dc3, 0),
DEFINE_PROP_UINT32("dc4", ssys_state, dc4, 0),
- DEFINE_PROP_END_OF_LIST()
};
static void stellaris_sys_instance_init(Object *obj)
@@ -965,7 +990,7 @@ static void stellaris_adc_init(Object *obj)
}
/* Board init. */
-static stellaris_board_info stellaris_boards[] = {
+static const stellaris_board_info stellaris_boards[] = {
{ "LM3S811EVB",
0,
0x0032000e,
@@ -990,19 +1015,20 @@ static stellaris_board_info stellaris_boards[] = {
static void stellaris_init(MachineState *ms, stellaris_board_info *board)
{
- static const int uart_irq[] = {5, 6, 33, 34};
- static const int timer_irq[] = {19, 21, 23, 35};
- static const uint32_t gpio_addr[7] =
+ static const int uart_irq[NUM_UART] = {5, 6, 33, 34};
+ static const int timer_irq[NUM_GPTM] = {19, 21, 23, 35};
+ static const uint32_t gpio_addr[NUM_GPIO] =
{ 0x40004000, 0x40005000, 0x40006000, 0x40007000,
0x40024000, 0x40025000, 0x40026000};
- static const int gpio_irq[7] = {0, 1, 2, 3, 4, 30, 31};
+ static const int gpio_irq[NUM_GPIO] = {0, 1, 2, 3, 4, 30, 31};
+ static const uint32_t i2c_addr[NUM_I2C] = {0x40020000, 0x40021000};
+ static const int i2c_irq[NUM_I2C] = {8, 37};
/* Memory map of SoC devices, from
* Stellaris LM3S6965 Microcontroller Data Sheet (rev I)
* http://www.ti.com/lit/ds/symlink/lm3s6965.pdf
*
* 40000000 wdtimer
- * 40002000 i2c (unimplemented)
* 40004000 GPIO
* 40005000 GPIO
* 40006000 GPIO
@@ -1032,13 +1058,13 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
*/
Object *soc_container;
- DeviceState *gpio_dev[7], *nvic;
- qemu_irq gpio_in[7][8];
- qemu_irq gpio_out[7][8];
+ DeviceState *gpio_dev[NUM_GPIO], *armv7m, *nvic;
+ qemu_irq gpio_in[NUM_GPIO][8];
+ qemu_irq gpio_out[NUM_GPIO][8];
qemu_irq adc;
int sram_size;
int flash_size;
- I2CBus *i2c;
+ DeviceState *i2c_dev[NUM_I2C] = { };
DeviceState *dev;
DeviceState *ssys_dev;
int i;
@@ -1053,7 +1079,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
flash_size = (((board->dc0 & 0xffff) + 1) << 1) * 1024;
sram_size = ((board->dc0 >> 18) + 1) * 1024;
- soc_container = object_new("container");
+ soc_container = object_new(TYPE_CONTAINER);
object_property_add_child(OBJECT(ms), "soc", soc_container);
/* Flash programming is done via the SCU, so pretend it is ROM. */
@@ -1096,25 +1122,26 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
qdev_prop_set_uint32(ssys_dev, "dc4", board->dc4);
sysbus_realize_and_unref(SYS_BUS_DEVICE(ssys_dev), &error_fatal);
- nvic = qdev_new(TYPE_ARMV7M);
- object_property_add_child(soc_container, "v7m", OBJECT(nvic));
- qdev_prop_set_uint32(nvic, "num-irq", NUM_IRQ_LINES);
- qdev_prop_set_uint8(nvic, "num-prio-bits", NUM_PRIO_BITS);
- qdev_prop_set_string(nvic, "cpu-type", ms->cpu_type);
- qdev_prop_set_bit(nvic, "enable-bitband", true);
- qdev_connect_clock_in(nvic, "cpuclk",
+ armv7m = qdev_new(TYPE_ARMV7M);
+ object_property_add_child(soc_container, "v7m", OBJECT(armv7m));
+ qdev_prop_set_uint32(armv7m, "num-irq", NUM_IRQ_LINES);
+ qdev_prop_set_uint8(armv7m, "num-prio-bits", NUM_PRIO_BITS);
+ qdev_prop_set_string(armv7m, "cpu-type", ms->cpu_type);
+ qdev_prop_set_bit(armv7m, "enable-bitband", true);
+ qdev_connect_clock_in(armv7m, "cpuclk",
qdev_get_clock_out(ssys_dev, "SYSCLK"));
/* This SoC does not connect the systick reference clock */
- object_property_set_link(OBJECT(nvic), "memory",
+ object_property_set_link(OBJECT(armv7m), "memory",
OBJECT(get_system_memory()), &error_abort);
/* This will exit with an error if the user passed us a bad cpu_type */
- sysbus_realize_and_unref(SYS_BUS_DEVICE(nvic), &error_fatal);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(armv7m), &error_fatal);
+ nvic = armv7m;
/* Now we can wire up the IRQ and MMIO of the system registers */
sysbus_mmio_map(SYS_BUS_DEVICE(ssys_dev), 0, 0x400fe000);
sysbus_connect_irq(SYS_BUS_DEVICE(ssys_dev), 0, qdev_get_gpio_in(nvic, 28));
- if (board->dc1 & (1 << 16)) {
+ if (DEV_CAP(1, ADC)) {
dev = sysbus_create_varargs(TYPE_STELLARIS_ADC, 0x40038000,
qdev_get_gpio_in(nvic, 14),
qdev_get_gpio_in(nvic, 15),
@@ -1125,8 +1152,8 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
} else {
adc = NULL;
}
- for (i = 0; i < 4; i++) {
- if (board->dc2 & (0x10000 << i)) {
+ for (i = 0; i < NUM_GPTM; i++) {
+ if (DEV_CAP(2, GPTM(i))) {
SysBusDevice *sbd;
dev = qdev_new(TYPE_STELLARIS_GPTM);
@@ -1143,7 +1170,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
}
}
- if (board->dc1 & (1 << 3)) { /* watchdog present */
+ if (DEV_CAP(1, WDT)) {
dev = qdev_new(TYPE_LUMINARY_WATCHDOG);
object_property_add_child(soc_container, "wdg", OBJECT(dev));
qdev_connect_clock_in(dev, "WDOGCLK",
@@ -1159,8 +1186,8 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
}
- for (i = 0; i < 7; i++) {
- if (board->dc4 & (1 << i)) {
+ for (i = 0; i < NUM_GPIO; i++) {
+ if (DEV_CAP(4, GPIO(i))) {
gpio_dev[i] = sysbus_create_simple("pl061_luminary", gpio_addr[i],
qdev_get_gpio_in(nvic,
gpio_irq[i]));
@@ -1171,17 +1198,21 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
}
}
- if (board->dc2 & (1 << 12)) {
- dev = sysbus_create_simple(TYPE_STELLARIS_I2C, 0x40020000,
- qdev_get_gpio_in(nvic, 8));
- i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c");
- if (board->peripherals & BP_OLED_I2C) {
- i2c_slave_create_simple(i2c, "ssd0303", 0x3d);
+ for (i = 0; i < NUM_I2C; i++) {
+ if (DEV_CAP(2, I2C(i))) {
+ i2c_dev[i] = sysbus_create_simple(TYPE_STELLARIS_I2C, i2c_addr[i],
+ qdev_get_gpio_in(nvic,
+ i2c_irq[i]));
}
}
+ if (board->peripherals & BP_OLED_I2C) {
+ I2CBus *bus = (I2CBus *)qdev_get_child_bus(i2c_dev[0], "i2c");
- for (i = 0; i < 4; i++) {
- if (board->dc2 & (1 << i)) {
+ i2c_slave_create_simple(bus, "ssd0303", 0x3d);
+ }
+
+ for (i = 0; i < NUM_UART; i++) {
+ if (DEV_CAP(2, UART(i))) {
SysBusDevice *sbd;
dev = qdev_new("pl011_luminary");
@@ -1193,7 +1224,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(nvic, uart_irq[i]));
}
}
- if (board->dc2 & (1 << 4)) {
+ if (DEV_CAP(2, SSI)) {
dev = sysbus_create_simple("pl022", 0x40008000,
qdev_get_gpio_in(nvic, 7));
if (board->peripherals & BP_OLED_SSI) {
@@ -1302,7 +1333,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
qemu_irq_raise(gpio_out[GPIO_D][0]);
}
}
- if (board->dc4 & (1 << 28)) {
+ if (DEV_CAP(4, EMAC)) {
DeviceState *enet;
enet = qdev_new("stellaris_enet");
@@ -1357,8 +1388,6 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
/* Add dummy regions for the devices we don't implement yet,
* so guest accesses don't cause unlogged crashes.
*/
- create_unimplemented_device("i2c-0", 0x40002000, 0x1000);
- create_unimplemented_device("i2c-2", 0x40021000, 0x1000);
create_unimplemented_device("PWM", 0x40028000, 0x1000);
create_unimplemented_device("QEI-0", 0x4002c000, 0x1000);
create_unimplemented_device("QEI-1", 0x4002d000, 0x1000);
@@ -1366,7 +1395,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
create_unimplemented_device("hibernation", 0x400fc000, 0x1000);
create_unimplemented_device("flash-control", 0x400fd000, 0x1000);
- armv7m_load_kernel(ARM_CPU(first_cpu), ms->kernel_filename, 0, flash_size);
+ armv7m_load_kernel(ARMV7M(armv7m)->cpu, ms->kernel_filename, 0, flash_size);
}
/* FIXME: Figure out how to generate these from stellaris_boards. */
@@ -1380,7 +1409,11 @@ static void lm3s6965evb_init(MachineState *machine)
stellaris_init(machine, &stellaris_boards[1]);
}
-static void lm3s811evb_class_init(ObjectClass *oc, void *data)
+/*
+ * Stellaris LM3S811 Evaluation Board Schematics:
+ * https://www.ti.com/lit/ug/symlink/spmu030.pdf
+ */
+static void lm3s811evb_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -1396,7 +1429,11 @@ static const TypeInfo lm3s811evb_type = {
.class_init = lm3s811evb_class_init,
};
-static void lm3s6965evb_class_init(ObjectClass *oc, void *data)
+/*
+ * Stellaris: LM3S6965 Evaluation Board Schematics:
+ * https://www.ti.com/lit/ug/symlink/spmu029.pdf
+ */
+static void lm3s6965evb_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -1404,6 +1441,7 @@ static void lm3s6965evb_class_init(ObjectClass *oc, void *data)
mc->init = lm3s6965evb_init;
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m3");
+ mc->auto_create_sdcard = true;
}
static const TypeInfo lm3s6965evb_type = {
@@ -1420,7 +1458,7 @@ static void stellaris_machine_init(void)
type_init(stellaris_machine_init)
-static void stellaris_i2c_class_init(ObjectClass *klass, void *data)
+static void stellaris_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -1439,7 +1477,7 @@ static const TypeInfo stellaris_i2c_info = {
.class_init = stellaris_i2c_class_init,
};
-static void stellaris_adc_class_init(ObjectClass *klass, void *data)
+static void stellaris_adc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -1456,7 +1494,7 @@ static const TypeInfo stellaris_adc_info = {
.class_init = stellaris_adc_class_init,
};
-static void stellaris_sys_class_init(ObjectClass *klass, void *data)
+static void stellaris_sys_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/arm/stm32f100_soc.c b/hw/arm/stm32f100_soc.c
index 808b783..0702d51 100644
--- a/hw/arm/stm32f100_soc.c
+++ b/hw/arm/stm32f100_soc.c
@@ -27,12 +27,12 @@
#include "qapi/error.h"
#include "qemu/module.h"
#include "hw/arm/boot.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/arm/stm32f100_soc.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-clock.h"
#include "hw/misc/unimp.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
/* stm32f100_soc implementation is derived from stm32f205_soc */
@@ -181,7 +181,7 @@ static void stm32f100_soc_realize(DeviceState *dev_soc, Error **errp)
create_unimplemented_device("CRC", 0x40023000, 0x400);
}
-static void stm32f100_soc_class_init(ObjectClass *klass, void *data)
+static void stm32f100_soc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/arm/stm32f205_soc.c b/hw/arm/stm32f205_soc.c
index a451e21..229af7f 100644
--- a/hw/arm/stm32f205_soc.c
+++ b/hw/arm/stm32f205_soc.c
@@ -26,11 +26,11 @@
#include "qapi/error.h"
#include "qemu/module.h"
#include "hw/arm/boot.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/arm/stm32f205_soc.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-clock.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
/* At the moment only Timer 2 to 5 are modelled */
static const uint32_t timer_addr[STM_NUM_TIMERS] = { 0x40000000, 0x40000400,
@@ -202,7 +202,7 @@ static void stm32f205_soc_realize(DeviceState *dev_soc, Error **errp)
}
}
-static void stm32f205_soc_class_init(ObjectClass *klass, void *data)
+static void stm32f205_soc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/arm/stm32f405_soc.c b/hw/arm/stm32f405_soc.c
index 2ad5b79..c8684e2 100644
--- a/hw/arm/stm32f405_soc.c
+++ b/hw/arm/stm32f405_soc.c
@@ -24,12 +24,13 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "exec/address-spaces.h"
-#include "sysemu/sysemu.h"
+#include "system/address-spaces.h"
+#include "system/system.h"
#include "hw/arm/stm32f405_soc.h"
#include "hw/qdev-clock.h"
#include "hw/misc/unimp.h"
+#define RCC_ADDR 0x40023800
#define SYSCFG_ADD 0x40013800
static const uint32_t usart_addr[] = { 0x40011000, 0x40004400, 0x40004800,
0x40004C00, 0x40005000, 0x40011400,
@@ -59,6 +60,8 @@ static void stm32f405_soc_initfn(Object *obj)
object_initialize_child(obj, "armv7m", &s->armv7m, TYPE_ARMV7M);
+ object_initialize_child(obj, "rcc", &s->rcc, TYPE_STM32_RCC);
+
object_initialize_child(obj, "syscfg", &s->syscfg, TYPE_STM32F4XX_SYSCFG);
for (i = 0; i < STM_NUM_USARTS; i++) {
@@ -160,6 +163,14 @@ static void stm32f405_soc_realize(DeviceState *dev_soc, Error **errp)
return;
}
+ /* Reset and clock controller */
+ dev = DEVICE(&s->rcc);
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->rcc), errp)) {
+ return;
+ }
+ busdev = SYS_BUS_DEVICE(dev);
+ sysbus_mmio_map(busdev, 0, RCC_ADDR);
+
/* System configuration controller */
dev = DEVICE(&s->syscfg);
if (!sysbus_realize(SYS_BUS_DEVICE(&s->syscfg), errp)) {
@@ -276,7 +287,6 @@ static void stm32f405_soc_realize(DeviceState *dev_soc, Error **errp)
create_unimplemented_device("GPIOH", 0x40021C00, 0x400);
create_unimplemented_device("GPIOI", 0x40022000, 0x400);
create_unimplemented_device("CRC", 0x40023000, 0x400);
- create_unimplemented_device("RCC", 0x40023800, 0x400);
create_unimplemented_device("Flash Int", 0x40023C00, 0x400);
create_unimplemented_device("BKPSRAM", 0x40024000, 0x400);
create_unimplemented_device("DMA1", 0x40026000, 0x400);
@@ -288,7 +298,7 @@ static void stm32f405_soc_realize(DeviceState *dev_soc, Error **errp)
create_unimplemented_device("RNG", 0x50060800, 0x400);
}
-static void stm32f405_soc_class_init(ObjectClass *klass, void *data)
+static void stm32f405_soc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/arm/stm32l4x5_soc.c b/hw/arm/stm32l4x5_soc.c
index fac83d3..64da555 100644
--- a/hw/arm/stm32l4x5_soc.c
+++ b/hw/arm/stm32l4x5_soc.c
@@ -24,8 +24,8 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qapi/error.h"
-#include "exec/address-spaces.h"
-#include "sysemu/sysemu.h"
+#include "system/address-spaces.h"
+#include "system/system.h"
#include "hw/or-irq.h"
#include "hw/arm/stm32l4x5_soc.h"
#include "hw/char/stm32l4x5_usart.h"
@@ -236,6 +236,8 @@ static void stm32l4x5_soc_realize(DeviceState *dev_soc, Error **errp)
/* System configuration controller */
busdev = SYS_BUS_DEVICE(&s->syscfg);
+ qdev_connect_clock_in(DEVICE(&s->syscfg), "clk",
+ qdev_get_clock_out(DEVICE(&(s->rcc)), "syscfg-out"));
if (!sysbus_realize(busdev, errp)) {
return;
}
@@ -433,7 +435,7 @@ static void stm32l4x5_soc_realize(DeviceState *dev_soc, Error **errp)
create_unimplemented_device("QUADSPI", 0xA0001000, 0x400);
}
-static void stm32l4x5_soc_class_init(ObjectClass *klass, void *data)
+static void stm32l4x5_soc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -444,21 +446,21 @@ static void stm32l4x5_soc_class_init(ObjectClass *klass, void *data)
/* No vmstate or reset required: device has no internal state */
}
-static void stm32l4x5xc_soc_class_init(ObjectClass *oc, void *data)
+static void stm32l4x5xc_soc_class_init(ObjectClass *oc, const void *data)
{
Stm32l4x5SocClass *ssc = STM32L4X5_SOC_CLASS(oc);
ssc->flash_size = 256 * KiB;
}
-static void stm32l4x5xe_soc_class_init(ObjectClass *oc, void *data)
+static void stm32l4x5xe_soc_class_init(ObjectClass *oc, const void *data)
{
Stm32l4x5SocClass *ssc = STM32L4X5_SOC_CLASS(oc);
ssc->flash_size = 512 * KiB;
}
-static void stm32l4x5xg_soc_class_init(ObjectClass *oc, void *data)
+static void stm32l4x5xg_soc_class_init(ObjectClass *oc, const void *data)
{
Stm32l4x5SocClass *ssc = STM32L4X5_SOC_CLASS(oc);
diff --git a/hw/arm/stm32vldiscovery.c b/hw/arm/stm32vldiscovery.c
index cc41935..e6c1f5b 100644
--- a/hw/arm/stm32vldiscovery.c
+++ b/hw/arm/stm32vldiscovery.c
@@ -51,7 +51,7 @@ static void stm32vldiscovery_init(MachineState *machine)
qdev_connect_clock_in(dev, "sysclk", sysclk);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
- armv7m_load_kernel(ARM_CPU(first_cpu),
+ armv7m_load_kernel(STM32F100_SOC(dev)->armv7m.cpu,
machine->kernel_filename,
0, FLASH_SIZE);
}
diff --git a/hw/arm/strongarm.c b/hw/arm/strongarm.c
index 823b493..229c98d 100644
--- a/hw/arm/strongarm.c
+++ b/hw/arm/strongarm.c
@@ -38,8 +38,8 @@
#include "hw/arm/boot.h"
#include "chardev/char-fe.h"
#include "chardev/char-serial.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/rtc.h"
+#include "system/system.h"
+#include "system/rtc.h"
#include "hw/ssi/ssi.h"
#include "qapi/error.h"
#include "qemu/cutils.h"
@@ -215,7 +215,7 @@ static const VMStateDescription vmstate_strongarm_pic_regs = {
},
};
-static void strongarm_pic_class_init(ObjectClass *klass, void *data)
+static void strongarm_pic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -448,7 +448,8 @@ static const VMStateDescription vmstate_strongarm_rtc_regs = {
},
};
-static void strongarm_rtc_sysbus_class_init(ObjectClass *klass, void *data)
+static void strongarm_rtc_sysbus_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -693,7 +694,7 @@ static const VMStateDescription vmstate_strongarm_gpio_regs = {
},
};
-static void strongarm_gpio_class_init(ObjectClass *klass, void *data)
+static void strongarm_gpio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -865,7 +866,7 @@ static const VMStateDescription vmstate_strongarm_ppc_regs = {
},
};
-static void strongarm_ppc_class_init(ObjectClass *klass, void *data)
+static void strongarm_ppc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -1332,17 +1333,16 @@ static const VMStateDescription vmstate_strongarm_uart_regs = {
},
};
-static Property strongarm_uart_properties[] = {
+static const Property strongarm_uart_properties[] = {
DEFINE_PROP_CHR("chardev", StrongARMUARTState, chr),
- DEFINE_PROP_END_OF_LIST(),
};
-static void strongarm_uart_class_init(ObjectClass *klass, void *data)
+static void strongarm_uart_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "StrongARM UART controller";
- dc->reset = strongarm_uart_reset;
+ device_class_set_legacy_reset(dc, strongarm_uart_reset);
dc->vmsd = &vmstate_strongarm_uart_regs;
device_class_set_props(dc, strongarm_uart_properties);
dc->realize = strongarm_uart_realize;
@@ -1590,12 +1590,12 @@ static const VMStateDescription vmstate_strongarm_ssp_regs = {
},
};
-static void strongarm_ssp_class_init(ObjectClass *klass, void *data)
+static void strongarm_ssp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "StrongARM SSP controller";
- dc->reset = strongarm_ssp_reset;
+ device_class_set_legacy_reset(dc, strongarm_ssp_reset);
dc->vmsd = &vmstate_strongarm_ssp_regs;
}
diff --git a/hw/arm/strongarm.h b/hw/arm/strongarm.h
index 192821f..b11b3a3 100644
--- a/hw/arm/strongarm.h
+++ b/hw/arm/strongarm.h
@@ -1,7 +1,7 @@
#ifndef STRONGARM_H
#define STRONGARM_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "target/arm/cpu-qom.h"
#define SA_CS0 0x00000000
diff --git a/hw/arm/tosa.c b/hw/arm/tosa.c
deleted file mode 100644
index 5891f60..0000000
--- a/hw/arm/tosa.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/* vim:set shiftwidth=4 ts=4 et: */
-/*
- * PXA255 Sharp Zaurus SL-6000 PDA platform
- *
- * Copyright (c) 2008 Dmitry Baryshkov
- *
- * Code based on spitz platform by Andrzej Zaborowski <balrog@zabor.org>
- * This code is licensed under the GNU GPL v2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "sysemu/runstate.h"
-#include "hw/arm/pxa.h"
-#include "hw/arm/boot.h"
-#include "hw/arm/sharpsl.h"
-#include "hw/pcmcia.h"
-#include "hw/boards.h"
-#include "hw/display/tc6393xb.h"
-#include "hw/i2c/i2c.h"
-#include "hw/irq.h"
-#include "hw/ssi/ssi.h"
-#include "hw/sysbus.h"
-#include "hw/misc/led.h"
-#include "exec/address-spaces.h"
-#include "qom/object.h"
-
-#define TOSA_RAM 0x04000000
-#define TOSA_ROM 0x00800000
-
-#define TOSA_GPIO_USB_IN (5)
-#define TOSA_GPIO_nSD_DETECT (9)
-#define TOSA_GPIO_ON_RESET (19)
-#define TOSA_GPIO_CF_IRQ (21) /* CF slot0 Ready */
-#define TOSA_GPIO_CF_CD (13)
-#define TOSA_GPIO_TC6393XB_INT (15)
-#define TOSA_GPIO_JC_CF_IRQ (36) /* CF slot1 Ready */
-
-#define TOSA_SCOOP_GPIO_BASE 1
-#define TOSA_GPIO_IR_POWERDWN (TOSA_SCOOP_GPIO_BASE + 2)
-#define TOSA_GPIO_SD_WP (TOSA_SCOOP_GPIO_BASE + 3)
-#define TOSA_GPIO_PWR_ON (TOSA_SCOOP_GPIO_BASE + 4)
-
-#define TOSA_SCOOP_JC_GPIO_BASE 1
-#define TOSA_GPIO_BT_LED (TOSA_SCOOP_JC_GPIO_BASE + 0)
-#define TOSA_GPIO_NOTE_LED (TOSA_SCOOP_JC_GPIO_BASE + 1)
-#define TOSA_GPIO_CHRG_ERR_LED (TOSA_SCOOP_JC_GPIO_BASE + 2)
-#define TOSA_GPIO_TC6393XB_L3V_ON (TOSA_SCOOP_JC_GPIO_BASE + 5)
-#define TOSA_GPIO_WLAN_LED (TOSA_SCOOP_JC_GPIO_BASE + 7)
-
-#define DAC_BASE 0x4e
-#define DAC_CH1 0
-#define DAC_CH2 1
-
-static void tosa_microdrive_attach(PXA2xxState *cpu)
-{
- PCMCIACardState *md;
- DriveInfo *dinfo;
-
- dinfo = drive_get(IF_IDE, 0, 0);
- if (!dinfo || dinfo->media_cd)
- return;
- md = dscm1xxxx_init(dinfo);
- pxa2xx_pcmcia_attach(cpu->pcmcia[0], md);
-}
-
-/*
- * Encapsulation of some GPIO line behaviour for the Tosa board
- *
- * QEMU interface:
- * + named GPIO inputs "leds[0..3]": assert to light LEDs
- * + named GPIO input "reset": when asserted, resets the system
- */
-
-#define TYPE_TOSA_MISC_GPIO "tosa-misc-gpio"
-OBJECT_DECLARE_SIMPLE_TYPE(TosaMiscGPIOState, TOSA_MISC_GPIO)
-
-struct TosaMiscGPIOState {
- SysBusDevice parent_obj;
-};
-
-static void tosa_reset(void *opaque, int line, int level)
-{
- if (level) {
- qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
- }
-}
-
-static void tosa_misc_gpio_init(Object *obj)
-{
- DeviceState *dev = DEVICE(obj);
-
- qdev_init_gpio_in_named(dev, tosa_reset, "reset", 1);
-}
-
-static void tosa_gpio_setup(PXA2xxState *cpu,
- DeviceState *scp0,
- DeviceState *scp1,
- TC6393xbState *tmio)
-{
- DeviceState *misc_gpio;
- LEDState *led[4];
-
- misc_gpio = sysbus_create_simple(TYPE_TOSA_MISC_GPIO, -1, NULL);
-
- /* MMC/SD host */
- pxa2xx_mmci_handlers(cpu->mmc,
- qdev_get_gpio_in(scp0, TOSA_GPIO_SD_WP),
- qemu_irq_invert(qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_nSD_DETECT)));
-
- /* Handle reset */
- qdev_connect_gpio_out(cpu->gpio, TOSA_GPIO_ON_RESET,
- qdev_get_gpio_in_named(misc_gpio, "reset", 0));
-
- /* PCMCIA signals: card's IRQ and Card-Detect */
- pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[0],
- qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_CF_IRQ),
- qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_CF_CD));
-
- pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[1],
- qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_JC_CF_IRQ),
- NULL);
-
- led[0] = led_create_simple(OBJECT(misc_gpio), GPIO_POLARITY_ACTIVE_HIGH,
- LED_COLOR_BLUE, "bluetooth");
- led[1] = led_create_simple(OBJECT(misc_gpio), GPIO_POLARITY_ACTIVE_HIGH,
- LED_COLOR_GREEN, "note");
- led[2] = led_create_simple(OBJECT(misc_gpio), GPIO_POLARITY_ACTIVE_HIGH,
- LED_COLOR_AMBER, "charger-error");
- led[3] = led_create_simple(OBJECT(misc_gpio), GPIO_POLARITY_ACTIVE_HIGH,
- LED_COLOR_GREEN, "wlan");
-
- qdev_connect_gpio_out(scp1, TOSA_GPIO_BT_LED,
- qdev_get_gpio_in(DEVICE(led[0]), 0));
- qdev_connect_gpio_out(scp1, TOSA_GPIO_NOTE_LED,
- qdev_get_gpio_in(DEVICE(led[1]), 0));
- qdev_connect_gpio_out(scp1, TOSA_GPIO_CHRG_ERR_LED,
- qdev_get_gpio_in(DEVICE(led[2]), 0));
- qdev_connect_gpio_out(scp1, TOSA_GPIO_WLAN_LED,
- qdev_get_gpio_in(DEVICE(led[3]), 0));
-
- qdev_connect_gpio_out(scp1, TOSA_GPIO_TC6393XB_L3V_ON, tc6393xb_l3v_get(tmio));
-
- /* UDC Vbus */
- qemu_irq_raise(qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_USB_IN));
-}
-
-static uint32_t tosa_ssp_tansfer(SSIPeripheral *dev, uint32_t value)
-{
- fprintf(stderr, "TG: %u %02x\n", value >> 5, value & 0x1f);
- return 0;
-}
-
-static void tosa_ssp_realize(SSIPeripheral *dev, Error **errp)
-{
- /* Nothing to do. */
-}
-
-#define TYPE_TOSA_DAC "tosa_dac"
-OBJECT_DECLARE_SIMPLE_TYPE(TosaDACState, TOSA_DAC)
-
-struct TosaDACState {
- I2CSlave parent_obj;
-
- int len;
- char buf[3];
-};
-
-static int tosa_dac_send(I2CSlave *i2c, uint8_t data)
-{
- TosaDACState *s = TOSA_DAC(i2c);
-
- s->buf[s->len] = data;
- if (s->len ++ > 2) {
-#ifdef VERBOSE
- fprintf(stderr, "%s: message too long (%i bytes)\n", __func__, s->len);
-#endif
- return 1;
- }
-
- if (s->len == 2) {
- fprintf(stderr, "dac: channel %d value 0x%02x\n",
- s->buf[0], s->buf[1]);
- }
-
- return 0;
-}
-
-static int tosa_dac_event(I2CSlave *i2c, enum i2c_event event)
-{
- TosaDACState *s = TOSA_DAC(i2c);
-
- s->len = 0;
- switch (event) {
- case I2C_START_SEND:
- break;
- case I2C_START_RECV:
- printf("%s: recv not supported!!!\n", __func__);
- break;
- case I2C_FINISH:
-#ifdef VERBOSE
- if (s->len < 2)
- printf("%s: message too short (%i bytes)\n", __func__, s->len);
- if (s->len > 2)
- printf("%s: message too long\n", __func__);
-#endif
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static uint8_t tosa_dac_recv(I2CSlave *s)
-{
- printf("%s: recv not supported!!!\n", __func__);
- return 0xff;
-}
-
-static void tosa_tg_init(PXA2xxState *cpu)
-{
- I2CBus *bus = pxa2xx_i2c_bus(cpu->i2c[0]);
- i2c_slave_create_simple(bus, TYPE_TOSA_DAC, DAC_BASE);
- ssi_create_peripheral(cpu->ssp[1], "tosa-ssp");
-}
-
-
-static struct arm_boot_info tosa_binfo = {
- .loader_start = PXA2XX_SDRAM_BASE,
- .ram_size = 0x04000000,
-};
-
-static void tosa_init(MachineState *machine)
-{
- MemoryRegion *address_space_mem = get_system_memory();
- MemoryRegion *rom = g_new(MemoryRegion, 1);
- PXA2xxState *mpu;
- TC6393xbState *tmio;
- DeviceState *scp0, *scp1;
-
- mpu = pxa255_init(tosa_binfo.ram_size);
-
- memory_region_init_rom(rom, NULL, "tosa.rom", TOSA_ROM, &error_fatal);
- memory_region_add_subregion(address_space_mem, 0, rom);
-
- tmio = tc6393xb_init(address_space_mem, 0x10000000,
- qdev_get_gpio_in(mpu->gpio, TOSA_GPIO_TC6393XB_INT));
-
- scp0 = sysbus_create_simple("scoop", 0x08800000, NULL);
- scp1 = sysbus_create_simple("scoop", 0x14800040, NULL);
-
- tosa_gpio_setup(mpu, scp0, scp1, tmio);
-
- tosa_microdrive_attach(mpu);
-
- tosa_tg_init(mpu);
-
- tosa_binfo.board_id = 0x208;
- arm_load_kernel(mpu->cpu, machine, &tosa_binfo);
- sl_bootparam_write(SL_PXA_PARAM_BASE);
-}
-
-static void tosapda_machine_init(MachineClass *mc)
-{
- mc->desc = "Sharp SL-6000 (Tosa) PDA (PXA255)";
- mc->init = tosa_init;
- mc->block_default_type = IF_IDE;
- mc->ignore_memory_transaction_failures = true;
- mc->deprecation_reason = "machine is old and unmaintained";
-}
-
-DEFINE_MACHINE("tosa", tosapda_machine_init)
-
-static void tosa_dac_class_init(ObjectClass *klass, void *data)
-{
- I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
-
- k->event = tosa_dac_event;
- k->recv = tosa_dac_recv;
- k->send = tosa_dac_send;
-}
-
-static const TypeInfo tosa_dac_info = {
- .name = TYPE_TOSA_DAC,
- .parent = TYPE_I2C_SLAVE,
- .instance_size = sizeof(TosaDACState),
- .class_init = tosa_dac_class_init,
-};
-
-static void tosa_ssp_class_init(ObjectClass *klass, void *data)
-{
- SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass);
-
- k->realize = tosa_ssp_realize;
- k->transfer = tosa_ssp_tansfer;
-}
-
-static const TypeInfo tosa_ssp_info = {
- .name = "tosa-ssp",
- .parent = TYPE_SSI_PERIPHERAL,
- .instance_size = sizeof(SSIPeripheral),
- .class_init = tosa_ssp_class_init,
-};
-
-static const TypeInfo tosa_misc_gpio_info = {
- .name = TYPE_TOSA_MISC_GPIO,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(TosaMiscGPIOState),
- .instance_init = tosa_misc_gpio_init,
- /*
- * No class init required: device has no internal state so does not
- * need to set up reset or vmstate, and has no realize method.
- */
-};
-
-static void tosa_register_types(void)
-{
- type_register_static(&tosa_dac_info);
- type_register_static(&tosa_ssp_info);
- type_register_static(&tosa_misc_gpio_info);
-}
-
-type_init(tosa_register_types)
diff --git a/hw/arm/trace-events b/hw/arm/trace-events
index be6c8f7..f3386bd 100644
--- a/hw/arm/trace-events
+++ b/hw/arm/trace-events
@@ -1,5 +1,12 @@
# See docs/devel/tracing.rst for syntax documentation.
+# omap1.c
+omap1_pwl_clocking_scheme(const char *scheme) "omap1 CLKM: clocking scheme set to %s"
+omap1_pwl_backlight(int output) "omap1 PWL: backlight now at %d/256"
+omap1_pwt_buzz(int freq) "omap1 PWT: %dHz buzz on"
+omap1_pwt_silence(void) "omap1 PWT: buzzer silenced"
+omap1_lpg_led(const char *onoff) "omap1 LPG: LED is %s"
+
# virt-acpi-build.c
virt_acpi_setup(void) "No fw cfg or ACPI disabled. Bailing out."
@@ -15,6 +22,8 @@ smmu_iotlb_inv_asid_vmid(int asid, int vmid) "IOTLB invalidate asid=%d vmid=%d"
smmu_iotlb_inv_vmid(int vmid) "IOTLB invalidate vmid=%d"
smmu_iotlb_inv_vmid_s1(int vmid) "IOTLB invalidate vmid=%d"
smmu_iotlb_inv_iova(int asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64
+smmu_configs_inv_sid_range(uint32_t start, uint32_t end) "Config cache INV SID range from 0x%x to 0x%x"
+smmu_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x"
smmu_inv_notifiers_mr(const char *name) "iommu mr=%s"
smmu_iotlb_lookup_hit(int asid, int vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d"
smmu_iotlb_lookup_miss(int asid, int vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d"
@@ -52,10 +61,10 @@ smmuv3_cmdq_tlbi_nh(int vmid) "vmid=%d"
smmuv3_cmdq_tlbi_nsnh(void) ""
smmuv3_cmdq_tlbi_nh_asid(int asid) "asid=%d"
smmuv3_cmdq_tlbi_s12_vmid(int vmid) "vmid=%d"
-smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x"
smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s"
smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s"
smmuv3_inv_notifiers_iova(const char *name, int asid, int vmid, uint64_t iova, uint8_t tg, uint64_t num_pages, int stage) "iommu mr=%s asid=%d vmid=%d iova=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" stage=%d"
+smmu_reset_exit(void) ""
# strongarm.c
strongarm_uart_update_parameters(const char *label, int speed, char parity, int data_bits, int stop_bits) "%s speed=%d parity=%c data=%d stop=%d"
@@ -68,10 +77,5 @@ z2_aer915_send_too_long(int8_t msg) "message too long (%i bytes)"
z2_aer915_send(uint8_t reg, uint8_t value) "reg %d value 0x%02x"
z2_aer915_event(int8_t event, int8_t len) "i2c event =0x%x len=%d bytes"
-# xen_arm.c
-xen_create_virtio_mmio_devices(int i, int irq, uint64_t base) "Created virtio-mmio device %d: irq %d base 0x%"PRIx64
-xen_init_ram(uint64_t machine_ram_size) "Initialized xen ram with size 0x%"PRIx64
-xen_enable_tpm(uint64_t addr) "Connected tpmdev at address 0x%"PRIx64
-
# bcm2838.c
bcm2838_gic_set_irq(int irq, int level) "gic irq:%d lvl:%d"
diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c
index d482354..5cf1a70 100644
--- a/hw/arm/versatilepb.c
+++ b/hw/arm/versatilepb.c
@@ -14,7 +14,7 @@
#include "hw/arm/boot.h"
#include "hw/net/smc91c111.h"
#include "net/net.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/pci/pci.h"
#include "hw/i2c/i2c.h"
#include "hw/i2c/arm_sbcon_i2c.h"
@@ -27,6 +27,7 @@
#include "qom/object.h"
#include "audio/audio.h"
#include "target/arm/cpu-qom.h"
+#include "qemu/log.h"
#define VERSATILE_FLASH_ADDR 0x34000000
#define VERSATILE_FLASH_SIZE (64 * 1024 * 1024)
@@ -110,7 +111,8 @@ static uint64_t vpb_sic_read(void *opaque, hwaddr offset,
case 8: /* PICENABLE */
return s->pic_enable;
default:
- printf ("vpb_sic_read: Bad register offset 0x%x\n", (int)offset);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "vpb_sic_read: Bad register offset 0x%x\n", (int)offset);
return 0;
}
}
@@ -144,7 +146,8 @@ static void vpb_sic_write(void *opaque, hwaddr offset,
vpb_sic_update_pic(s);
break;
default:
- printf ("vpb_sic_write: Bad register offset 0x%x\n", (int)offset);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "vpb_sic_write: Bad register offset 0x%x\n", (int)offset);
return;
}
vpb_sic_update(s);
@@ -409,7 +412,7 @@ static void vab_init(MachineState *machine)
versatile_init(machine, 0x25e);
}
-static void versatilepb_class_init(ObjectClass *oc, void *data)
+static void versatilepb_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -419,6 +422,7 @@ static void versatilepb_class_init(ObjectClass *oc, void *data)
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926");
mc->default_ram_id = "versatile.ram";
+ mc->auto_create_sdcard = true;
machine_add_audiodev_property(mc);
}
@@ -429,7 +433,7 @@ static const TypeInfo versatilepb_type = {
.class_init = versatilepb_class_init,
};
-static void versatileab_class_init(ObjectClass *oc, void *data)
+static void versatileab_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -439,6 +443,7 @@ static void versatileab_class_init(ObjectClass *oc, void *data)
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926");
mc->default_ram_id = "versatile.ram";
+ mc->auto_create_sdcard = true;
machine_add_audiodev_property(mc);
}
@@ -457,7 +462,7 @@ static void versatile_machine_init(void)
type_init(versatile_machine_init)
-static void vpb_sic_class_init(ObjectClass *klass, void *data)
+static void vpb_sic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c
index de815d8..35f8d05 100644
--- a/hw/arm/vexpress.c
+++ b/hw/arm/vexpress.c
@@ -30,11 +30,11 @@
#include "hw/net/lan9118.h"
#include "hw/i2c/i2c.h"
#include "net/net.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "hw/block/flash.h"
-#include "sysemu/device_tree.h"
+#include "system/device_tree.h"
#include "qemu/error-report.h"
#include <libfdt.h>
#include "hw/char/pl011.h"
@@ -42,7 +42,7 @@
#include "hw/cpu/a15mpcore.h"
#include "hw/i2c/arm_sbcon_i2c.h"
#include "hw/sd/sd.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qlist.h"
#include "qom/object.h"
#include "audio/audio.h"
#include "target/arm/cpu-qom.h"
@@ -51,6 +51,8 @@
#define VEXPRESS_FLASH_SIZE (64 * 1024 * 1024)
#define VEXPRESS_FLASH_SECT_SIZE (256 * 1024)
+#define GIC_EXT_IRQS 64 /* Versatile Express A9 development board */
+
/* Number of virtio transports to create (0..8; limited by
* number of available IRQ lines).
*/
@@ -241,6 +243,7 @@ static void init_cpus(MachineState *ms, const char *cpu_type,
*/
dev = qdev_new(privdev);
qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
+ qdev_prop_set_uint32(dev, "num-irq", GIC_EXT_IRQS + GIC_INTERNAL);
busdev = SYS_BUS_DEVICE(dev);
sysbus_realize_and_unref(busdev, &error_fatal);
sysbus_mmio_map(busdev, 0, periphbase);
@@ -251,7 +254,7 @@ static void init_cpus(MachineState *ms, const char *cpu_type,
* external interrupts starting from 32 (because there
* are internal interrupts 0..31).
*/
- for (n = 0; n < 64; n++) {
+ for (n = 0; n < GIC_EXT_IRQS; n++) {
pic[n] = qdev_get_gpio_in(dev, n);
}
@@ -543,7 +546,7 @@ static void vexpress_common_init(MachineState *machine)
VexpressMachineClass *vmc = VEXPRESS_MACHINE_GET_CLASS(machine);
VEDBoardInfo *daughterboard = vmc->daughterboard;
DeviceState *dev, *sysctl, *pl041;
- qemu_irq pic[64];
+ qemu_irq pic[GIC_EXT_IRQS];
uint32_t sys_id;
DriveInfo *dinfo;
PFlashCFI01 *pflash0;
@@ -774,7 +777,7 @@ static void vexpress_a9_instance_init(Object *obj)
vms->virt = false;
}
-static void vexpress_class_init(ObjectClass *oc, void *data)
+static void vexpress_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -792,7 +795,7 @@ static void vexpress_class_init(ObjectClass *oc, void *data)
"Security Extensions (TrustZone)");
}
-static void vexpress_a9_class_init(ObjectClass *oc, void *data)
+static void vexpress_a9_class_init(ObjectClass *oc, const void *data)
{
static const char * const valid_cpu_types[] = {
ARM_CPU_TYPE_NAME("cortex-a9"),
@@ -803,11 +806,12 @@ static void vexpress_a9_class_init(ObjectClass *oc, void *data)
mc->desc = "ARM Versatile Express for Cortex-A9";
mc->valid_cpu_types = valid_cpu_types;
+ mc->auto_create_sdcard = true;
vmc->daughterboard = &a9_daughterboard;
}
-static void vexpress_a15_class_init(ObjectClass *oc, void *data)
+static void vexpress_a15_class_init(ObjectClass *oc, const void *data)
{
static const char * const valid_cpu_types[] = {
ARM_CPU_TYPE_NAME("cortex-a15"),
@@ -818,6 +822,7 @@ static void vexpress_a15_class_init(ObjectClass *oc, void *data)
mc->desc = "ARM Versatile Express for Cortex-A15";
mc->valid_cpu_types = valid_cpu_types;
+ mc->auto_create_sdcard = true;
vmc->daughterboard = &a15_daughterboard;
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index e10cad8..7e8e0f0 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -51,13 +51,12 @@
#include "hw/intc/arm_gicv3_its_common.h"
#include "hw/mem/nvdimm.h"
#include "hw/platform-bus.h"
-#include "sysemu/numa.h"
-#include "sysemu/reset.h"
-#include "sysemu/tpm.h"
+#include "system/numa.h"
+#include "system/reset.h"
+#include "system/tpm.h"
#include "migration/vmstate.h"
#include "hw/acpi/ghes.h"
#include "hw/acpi/viot.h"
-#include "hw/acpi/acpi_generic_initiator.h"
#include "hw/virtio/virtio-acpi.h"
#include "target/arm/multiprocessing.h"
@@ -154,10 +153,10 @@ static void acpi_dsdt_add_gpio(Aml *scope, const MemMapEntry *gpio_memmap,
aml_append(dev, aml_name_decl("_CRS", crs));
Aml *aei = aml_resource_template();
- /* Pin 3 for power button */
- const uint32_t pin_list[1] = {3};
+
+ const uint32_t pin = GPIO_PIN_POWER_BUTTON;
aml_append(aei, aml_gpio_int(AML_CONSUMER, AML_EDGE, AML_ACTIVE_HIGH,
- AML_EXCLUSIVE, AML_PULL_UP, 0, pin_list, 1,
+ AML_EXCLUSIVE, AML_PULL_UP, 0, &pin, 1,
"GPO0", NULL, 0));
aml_append(dev, aml_name_decl("_AEI", aei));
@@ -464,8 +463,12 @@ spcr_setup(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
.pci_flags = 0,
.pci_segment = 0,
};
-
- build_spcr(table_data, linker, &serial, 2, vms->oem_id, vms->oem_table_id);
+ /*
+ * Passing NULL as the SPCR Table for Revision 2 doesn't support
+ * NameSpaceString.
+ */
+ build_spcr(table_data, linker, &serial, 2, vms->oem_id, vms->oem_table_id,
+ NULL);
}
/*
@@ -511,7 +514,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
}
}
- build_srat_generic_pci_initiator(table_data);
+ build_srat_generic_affinity_structures(table_data);
if (ms->nvdimms_state->is_enabled) {
nvdimm_build_srat(table_data);
@@ -534,15 +537,12 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
static void
build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
{
- VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
/*
* Table 5-117 Flag Definitions
* set only "Timer interrupt Mode" and assume "Timer Interrupt
* polarity" bit as '0: Interrupt is Active high'
*/
- uint32_t irqflags = vmc->claim_edge_triggered_timers ?
- 1 : /* Interrupt is Edge triggered */
- 0; /* Interrupt is Level triggered */
+ const uint32_t irqflags = 0; /* Interrupt is Level triggered */
AcpiTable table = { .sig = "GTDT", .rev = 3, .oem_id = vms->oem_id,
.oem_table_id = vms->oem_table_id };
@@ -667,7 +667,6 @@ static void
build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
{
int i;
- VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
const MemMapEntry *memmap = vms->memmap;
AcpiTable table = { .sig = "APIC", .rev = 4, .oem_id = vms->oem_id,
.oem_table_id = vms->oem_table_id };
@@ -738,7 +737,7 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
memmap[VIRT_HIGH_GIC_REDIST2].size);
}
- if (its_class_name() && !vmc->no_its) {
+ if (its_class_name()) {
/*
* ACPI spec, Revision 6.0 Errata A
* (original 6.0 definition has invalid Length)
@@ -943,10 +942,9 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
build_dbg2(tables_blob, tables->linker, vms);
if (vms->ras) {
- build_ghes_error_table(tables->hardware_errors, tables->linker);
acpi_add_table(table_offsets, tables_blob);
- acpi_build_hest(tables_blob, tables->linker, vms->oem_id,
- vms->oem_table_id);
+ acpi_build_hest(tables_blob, tables->hardware_errors, tables->linker,
+ vms->oem_id, vms->oem_table_id);
}
if (ms->numa_state->num_nodes > 0) {
@@ -971,7 +969,7 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
vms->oem_table_id);
}
- if (its_class_name() && !vmc->no_its) {
+ if (its_class_name()) {
acpi_add_table(table_offsets, tables_blob);
build_iort(tables_blob, tables->linker, vms);
}
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index b0c68d6..99fde58 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -42,17 +42,18 @@
#include "hw/vfio/vfio-amd-xgbe.h"
#include "hw/display/ramfb.h"
#include "net/net.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/numa.h"
-#include "sysemu/runstate.h"
-#include "sysemu/tpm.h"
-#include "sysemu/tcg.h"
-#include "sysemu/kvm.h"
-#include "sysemu/hvf.h"
-#include "sysemu/qtest.h"
+#include "system/device_tree.h"
+#include "system/numa.h"
+#include "system/runstate.h"
+#include "system/tpm.h"
+#include "system/tcg.h"
+#include "system/kvm.h"
+#include "system/hvf.h"
+#include "system/qtest.h"
#include "hw/loader.h"
#include "qapi/error.h"
#include "qemu/bitops.h"
+#include "qemu/cutils.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "hw/pci-host/gpex.h"
@@ -66,10 +67,11 @@
#include "hw/intc/arm_gicv3_its_common.h"
#include "hw/irq.h"
#include "kvm_arm.h"
+#include "hvf_arm.h"
#include "hw/firmware/smbios.h"
#include "qapi/visitor.h"
#include "qapi/qapi-visit-common.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qlist.h"
#include "standard-headers/linux/input.h"
#include "hw/arm/smmuv3.h"
#include "hw/acpi/acpi.h"
@@ -80,6 +82,7 @@
#include "hw/mem/pc-dimm.h"
#include "hw/mem/nvdimm.h"
#include "hw/acpi/generic_event_device.h"
+#include "hw/uefi/var-service-api.h"
#include "hw/virtio/virtio-md-pci.h"
#include "hw/virtio/virtio-iommu.h"
#include "hw/char/pl011.h"
@@ -104,7 +107,7 @@ static void arm_virt_compat_set(MachineClass *mc)
#define DEFINE_VIRT_MACHINE_IMPL(latest, ...) \
static void MACHINE_VER_SYM(class_init, virt, __VA_ARGS__)( \
ObjectClass *oc, \
- void *data) \
+ const void *data) \
{ \
MachineClass *mc = MACHINE_CLASS(oc); \
arm_virt_compat_set(mc); \
@@ -191,6 +194,10 @@ static const MemMapEntry base_memmap[] = {
[VIRT_MEM] = { GiB, LEGACY_RAMLIMIT_BYTES },
};
+/* Update the docs for highmem-mmio-size when changing this default */
+#define DEFAULT_HIGH_PCIE_MMIO_SIZE_GB 512
+#define DEFAULT_HIGH_PCIE_MMIO_SIZE (DEFAULT_HIGH_PCIE_MMIO_SIZE_GB * GiB)
+
/*
* Highmem IO Regions: This memory map is floating, located after the RAM.
* Each MemMapEntry base (GPA) will be dynamically computed, depending on the
@@ -206,13 +213,16 @@ static const MemMapEntry base_memmap[] = {
* PA space for one specific region is always reserved, even if the region
* has been disabled or doesn't fit into the PA space. However, the PA space
* for the region won't be reserved in these circumstances with compact layout.
+ *
+ * Note that the highmem-mmio-size property will update the high PCIE MMIO size
+ * field in this array.
*/
static MemMapEntry extended_memmap[] = {
/* Additional 64 MB redist region (can contain up to 512 redistributors) */
[VIRT_HIGH_GIC_REDIST2] = { 0x0, 64 * MiB },
[VIRT_HIGH_PCIE_ECAM] = { 0x0, 256 * MiB },
/* Second PCIe window */
- [VIRT_HIGH_PCIE_MMIO] = { 0x0, 512 * GiB },
+ [VIRT_HIGH_PCIE_MMIO] = { 0x0, DEFAULT_HIGH_PCIE_MMIO_SIZE },
};
static const int a15irqmap[] = {
@@ -360,14 +370,9 @@ static void fdt_add_timer_nodes(const VirtMachineState *vms)
* the correct information.
*/
ARMCPU *armcpu;
- VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
uint32_t irqflags = GIC_FDT_IRQ_FLAGS_LEVEL_HI;
MachineState *ms = MACHINE(vms);
- if (vmc->claim_edge_triggered_timers) {
- irqflags = GIC_FDT_IRQ_FLAGS_EDGE_LO_HI;
- }
-
if (vms->gic_version == VIRT_GIC_VERSION_2) {
irqflags = deposit32(irqflags, GIC_FDT_IRQ_PPI_CPU_START,
GIC_FDT_IRQ_PPI_CPU_WIDTH,
@@ -872,6 +877,8 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
[GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ,
[GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ,
[GTIMER_HYPVIRT] = ARCH_TIMER_NS_EL2_VIRT_IRQ,
+ [GTIMER_S_EL2_PHYS] = ARCH_TIMER_S_EL2_IRQ,
+ [GTIMER_S_EL2_VIRT] = ARCH_TIMER_S_EL2_VIRT_IRQ,
};
for (unsigned irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) {
@@ -1004,7 +1011,7 @@ static void virt_powerdown_req(Notifier *n, void *opaque)
if (s->acpi_dev) {
acpi_send_event(s->acpi_dev, ACPI_POWER_DOWN_STATUS);
} else {
- /* use gpio Pin 3 for power button event */
+ /* use gpio Pin for power button event */
qemu_set_irq(qdev_get_gpio_in(gpio_key_dev, 0), 1);
}
}
@@ -1013,7 +1020,8 @@ static void create_gpio_keys(char *fdt, DeviceState *pl061_dev,
uint32_t phandle)
{
gpio_key_dev = sysbus_create_simple("gpio-key", -1,
- qdev_get_gpio_in(pl061_dev, 3));
+ qdev_get_gpio_in(pl061_dev,
+ GPIO_PIN_POWER_BUTTON));
qemu_fdt_add_subnode(fdt, "/gpio-keys");
qemu_fdt_setprop_string(fdt, "/gpio-keys", "compatible", "gpio-keys");
@@ -1024,7 +1032,7 @@ static void create_gpio_keys(char *fdt, DeviceState *pl061_dev,
qemu_fdt_setprop_cell(fdt, "/gpio-keys/poweroff", "linux,code",
KEY_POWER);
qemu_fdt_setprop_cells(fdt, "/gpio-keys/poweroff",
- "gpios", phandle, 3, 0);
+ "gpios", phandle, GPIO_PIN_POWER_BUTTON, 0);
}
#define SECURE_GPIO_POWEROFF 0
@@ -1407,6 +1415,7 @@ static void create_pcie_irq_map(const MachineState *ms,
static void create_smmu(const VirtMachineState *vms,
PCIBus *bus)
{
+ VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
char *node;
const char compat[] = "arm,smmu-v3";
int irq = vms->irqmap[VIRT_SMMU];
@@ -1423,6 +1432,9 @@ static void create_smmu(const VirtMachineState *vms,
dev = qdev_new(TYPE_ARM_SMMUV3);
+ if (!vmc->no_nested_smmu) {
+ object_property_set_str(OBJECT(dev), "stage", "nested", &error_fatal);
+ }
object_property_set_link(OBJECT(dev), "primary-bus", OBJECT(bus),
&error_abort);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
@@ -1475,9 +1487,12 @@ static void create_virtio_iommu_dt_bindings(VirtMachineState *vms)
qemu_fdt_setprop_cell(ms->fdt, node, "phandle", vms->iommu_phandle);
g_free(node);
- qemu_fdt_setprop_cells(ms->fdt, vms->pciehb_nodename, "iommu-map",
- 0x0, vms->iommu_phandle, 0x0, bdf,
- bdf + 1, vms->iommu_phandle, bdf + 1, 0xffff - bdf);
+ if (!vms->default_bus_bypass_iommu) {
+ qemu_fdt_setprop_cells(ms->fdt, vms->pciehb_nodename, "iommu-map",
+ 0x0, vms->iommu_phandle, 0x0, bdf,
+ bdf + 1, vms->iommu_phandle, bdf + 1,
+ 0xffff - bdf);
+ }
}
static void create_pcie(VirtMachineState *vms)
@@ -1541,7 +1556,7 @@ static void create_pcie(VirtMachineState *vms)
/* Map IO port space */
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio);
- for (i = 0; i < GPEX_NUM_IRQS; i++) {
+ for (i = 0; i < PCI_NUM_PINS; i++) {
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i,
qdev_get_gpio_in(vms->gic, irq + i));
gpex_set_irq_num(GPEX_HOST(dev), i, irq + i);
@@ -1600,8 +1615,10 @@ static void create_pcie(VirtMachineState *vms)
switch (vms->iommu) {
case VIRT_IOMMU_SMMUV3:
create_smmu(vms, vms->bus);
- qemu_fdt_setprop_cells(ms->fdt, nodename, "iommu-map",
- 0x0, vms->iommu_phandle, 0x0, 0x10000);
+ if (!vms->default_bus_bypass_iommu) {
+ qemu_fdt_setprop_cells(ms->fdt, nodename, "iommu-map",
+ 0x0, vms->iommu_phandle, 0x0, 0x10000);
+ }
break;
default:
g_assert_not_reached();
@@ -1687,7 +1704,6 @@ static void virt_build_smbios(VirtMachineState *vms)
{
MachineClass *mc = MACHINE_GET_CLASS(vms);
MachineState *ms = MACHINE(vms);
- VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
uint8_t *smbios_tables, *smbios_anchor;
size_t smbios_tables_len, smbios_anchor_len;
struct smbios_phys_mem_area mem_array;
@@ -1697,8 +1713,7 @@ static void virt_build_smbios(VirtMachineState *vms)
product = "KVM Virtual Machine";
}
- smbios_set_defaults("QEMU", product,
- vmc->smbios_old_sys_ver ? "1.0" : mc->name);
+ smbios_set_defaults("QEMU", product, mc->name);
/* build the array of physical mem area from base_memmap */
mem_array.address = vms->memmap[VIRT_MEM].base;
@@ -1740,11 +1755,12 @@ void virt_machine_done(Notifier *notifier, void *data)
vms->memmap[VIRT_PLATFORM_BUS].size,
vms->irqmap[VIRT_PLATFORM_BUS]);
}
- if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as, ms) < 0) {
+ if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as, ms, cpu) < 0) {
exit(1);
}
- fw_cfg_add_extra_pci_roots(vms->bus, vms->fw_cfg);
+ pci_bus_add_fw_cfg_extra_pci_roots(vms->fw_cfg, vms->bus,
+ &error_abort);
virt_acpi_setup(vms);
virt_build_smbios(vms);
@@ -1752,24 +1768,18 @@ void virt_machine_done(Notifier *notifier, void *data)
static uint64_t virt_cpu_mp_affinity(VirtMachineState *vms, int idx)
{
- uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER;
- VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
+ uint8_t clustersz;
- if (!vmc->disallow_affinity_adjustment) {
- /* Adjust MPIDR like 64-bit KVM hosts, which incorporate the
- * GIC's target-list limitations. 32-bit KVM hosts currently
- * always create clusters of 4 CPUs, but that is expected to
- * change when they gain support for gicv3. When KVM is enabled
- * it will override the changes we make here, therefore our
- * purposes are to make TCG consistent (with 64-bit KVM hosts)
- * and to improve SGI efficiency.
- */
- if (vms->gic_version == VIRT_GIC_VERSION_2) {
- clustersz = GIC_TARGETLIST_BITS;
- } else {
- clustersz = GICV3_TARGETLIST_BITS;
- }
+ /*
+ * Adjust MPIDR to make TCG consistent (with 64-bit KVM hosts)
+ * and to improve SGI efficiency.
+ */
+ if (vms->gic_version == VIRT_GIC_VERSION_2) {
+ clustersz = GIC_TARGETLIST_BITS;
+ } else {
+ clustersz = GICV3_TARGETLIST_BITS;
}
+
return arm_build_mp_affinity(idx, clustersz);
}
@@ -2106,7 +2116,8 @@ static void machvirt_init(MachineState *machine)
/*
* In accelerated mode, the memory map is computed earlier in kvm_type()
- * to create a VM with the right number of IPA bits.
+ * for Linux, or hvf_get_physical_address_range() for macOS to create a
+ * VM with the right number of IPA bits.
*/
if (!vms->memmap) {
Object *cpuobj;
@@ -2206,7 +2217,7 @@ static void machvirt_init(MachineState *machine)
exit(1);
}
- if (vms->mte && (kvm_enabled() || hvf_enabled())) {
+ if (vms->mte && hvf_enabled()) {
error_report("mach-virt: %s does not support providing "
"MTE to the guest CPU",
current_accel_name());
@@ -2254,10 +2265,6 @@ static void machvirt_init(MachineState *machine)
object_property_set_bool(cpuobj, "kvm-steal-time", false, NULL);
}
- if (vmc->no_pmu && object_property_find(cpuobj, "pmu")) {
- object_property_set_bool(cpuobj, "pmu", false, NULL);
- }
-
if (vmc->no_tcg_lpa2 && object_property_find(cpuobj, "lpa2")) {
object_property_set_bool(cpuobj, "lpa2", false, NULL);
}
@@ -2276,39 +2283,51 @@ static void machvirt_init(MachineState *machine)
}
if (vms->mte) {
- /* Create the memory region only once, but link to all cpus. */
- if (!tag_sysmem) {
- /*
- * The property exists only if MemTag is supported.
- * If it is, we must allocate the ram to back that up.
- */
- if (!object_property_find(cpuobj, "tag-memory")) {
- error_report("MTE requested, but not supported "
- "by the guest CPU");
- exit(1);
+ if (tcg_enabled()) {
+ /* Create the memory region only once, but link to all cpus. */
+ if (!tag_sysmem) {
+ /*
+ * The property exists only if MemTag is supported.
+ * If it is, we must allocate the ram to back that up.
+ */
+ if (!object_property_find(cpuobj, "tag-memory")) {
+ error_report("MTE requested, but not supported "
+ "by the guest CPU");
+ exit(1);
+ }
+
+ tag_sysmem = g_new(MemoryRegion, 1);
+ memory_region_init(tag_sysmem, OBJECT(machine),
+ "tag-memory", UINT64_MAX / 32);
+
+ if (vms->secure) {
+ secure_tag_sysmem = g_new(MemoryRegion, 1);
+ memory_region_init(secure_tag_sysmem, OBJECT(machine),
+ "secure-tag-memory",
+ UINT64_MAX / 32);
+
+ /* As with ram, secure-tag takes precedence over tag. */
+ memory_region_add_subregion_overlap(secure_tag_sysmem,
+ 0, tag_sysmem, -1);
+ }
}
- tag_sysmem = g_new(MemoryRegion, 1);
- memory_region_init(tag_sysmem, OBJECT(machine),
- "tag-memory", UINT64_MAX / 32);
-
+ object_property_set_link(cpuobj, "tag-memory",
+ OBJECT(tag_sysmem), &error_abort);
if (vms->secure) {
- secure_tag_sysmem = g_new(MemoryRegion, 1);
- memory_region_init(secure_tag_sysmem, OBJECT(machine),
- "secure-tag-memory", UINT64_MAX / 32);
-
- /* As with ram, secure-tag takes precedence over tag. */
- memory_region_add_subregion_overlap(secure_tag_sysmem, 0,
- tag_sysmem, -1);
+ object_property_set_link(cpuobj, "secure-tag-memory",
+ OBJECT(secure_tag_sysmem),
+ &error_abort);
}
- }
-
- object_property_set_link(cpuobj, "tag-memory", OBJECT(tag_sysmem),
- &error_abort);
- if (vms->secure) {
- object_property_set_link(cpuobj, "secure-tag-memory",
- OBJECT(secure_tag_sysmem),
- &error_abort);
+ } else if (kvm_enabled()) {
+ if (!kvm_arm_mte_supported()) {
+ error_report("MTE requested, but not supported by KVM");
+ exit(1);
+ }
+ kvm_arm_enable_mte(cpuobj, &error_abort);
+ } else {
+ error_report("MTE requested, but not supported ");
+ exit(1);
}
}
@@ -2530,6 +2549,40 @@ static void virt_set_highmem_mmio(Object *obj, bool value, Error **errp)
vms->highmem_mmio = value;
}
+static void virt_get_highmem_mmio_size(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ uint64_t size = extended_memmap[VIRT_HIGH_PCIE_MMIO].size;
+
+ visit_type_size(v, name, &size, errp);
+}
+
+static void virt_set_highmem_mmio_size(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ uint64_t size;
+
+ if (!visit_type_size(v, name, &size, errp)) {
+ return;
+ }
+
+ if (!is_power_of_2(size)) {
+ error_setg(errp, "highmem-mmio-size is not a power of 2");
+ return;
+ }
+
+ if (size < DEFAULT_HIGH_PCIE_MMIO_SIZE) {
+ char *sz = size_to_str(DEFAULT_HIGH_PCIE_MMIO_SIZE);
+ error_setg(errp, "highmem-mmio-size cannot be set to a lower value "
+ "than the default (%s)", sz);
+ g_free(sz);
+ return;
+ }
+
+ extended_memmap[VIRT_HIGH_PCIE_MMIO].size = size;
+}
static bool virt_get_its(Object *obj, Error **errp)
{
@@ -3026,7 +3079,40 @@ static int virt_kvm_type(MachineState *ms, const char *type_str)
return fixed_ipa ? 0 : requested_pa_size;
}
-static void virt_machine_class_init(ObjectClass *oc, void *data)
+static int virt_hvf_get_physical_address_range(MachineState *ms)
+{
+ VirtMachineState *vms = VIRT_MACHINE(ms);
+
+ int default_ipa_size = hvf_arm_get_default_ipa_bit_size();
+ int max_ipa_size = hvf_arm_get_max_ipa_bit_size();
+
+ /* We freeze the memory map to compute the highest gpa */
+ virt_set_memmap(vms, max_ipa_size);
+
+ int requested_ipa_size = 64 - clz64(vms->highest_gpa);
+
+ /*
+ * If we're <= the default IPA size just use the default.
+ * If we're above the default but below the maximum, round up to
+ * the maximum. hvf_arm_get_max_ipa_bit_size() conveniently only
+ * returns values that are valid ARM PARange values.
+ */
+ if (requested_ipa_size <= default_ipa_size) {
+ requested_ipa_size = default_ipa_size;
+ } else if (requested_ipa_size <= max_ipa_size) {
+ requested_ipa_size = max_ipa_size;
+ } else {
+ error_report("-m and ,maxmem option values "
+ "require an IPA range (%d bits) larger than "
+ "the one supported by the host (%d bits)",
+ requested_ipa_size, max_ipa_size);
+ return -1;
+ }
+
+ return requested_ipa_size;
+}
+
+static void virt_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
@@ -3067,6 +3153,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_VFIO_AMD_XGBE);
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE);
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_VFIO_PLATFORM);
+ machine_class_allow_dynamic_sysbus_dev(mc, TYPE_UEFI_VARS_SYSBUS);
#ifdef CONFIG_TPM
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS);
#endif
@@ -3085,6 +3172,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
mc->valid_cpu_types = valid_cpu_types;
mc->get_default_cpu_node_id = virt_get_default_cpu_node_id;
mc->kvm_type = virt_kvm_type;
+ mc->hvf_get_physical_address_range = virt_hvf_get_physical_address_range;
assert(!mc->get_hotplug_handler);
mc->get_hotplug_handler = virt_machine_get_hotplug_handler;
hc->pre_plug = virt_machine_device_pre_plug_cb;
@@ -3153,6 +3241,14 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
"Set on/off to enable/disable high "
"memory region for PCI MMIO");
+ object_class_property_add(oc, "highmem-mmio-size", "size",
+ virt_get_highmem_mmio_size,
+ virt_set_highmem_mmio_size,
+ NULL, NULL);
+ object_class_property_set_description(oc, "highmem-mmio-size",
+ "Set the high memory region size "
+ "for PCI MMIO");
+
object_class_property_add_str(oc, "gic-version", virt_get_gic_version,
virt_set_gic_version);
object_class_property_set_description(oc, "gic-version",
@@ -3240,21 +3336,17 @@ static void virt_instance_init(Object *obj)
vms->highmem_compact = !vmc->no_highmem_compact;
vms->gic_version = VIRT_GIC_VERSION_NOSEL;
- vms->highmem_ecam = !vmc->no_highmem_ecam;
+ vms->highmem_ecam = true;
vms->highmem_mmio = true;
vms->highmem_redists = true;
- if (vmc->no_its) {
- vms->its = false;
- } else {
- /* Default allows ITS instantiation */
- vms->its = true;
+ /* Default allows ITS instantiation */
+ vms->its = true;
- if (vmc->no_tcg_its) {
- vms->tcg_its = false;
- } else {
- vms->tcg_its = true;
- }
+ if (vmc->no_tcg_its) {
+ vms->tcg_its = false;
+ } else {
+ vms->tcg_its = true;
}
/* Default disallows iommu instantiation */
@@ -3288,7 +3380,7 @@ static const TypeInfo virt_machine_info = {
.class_size = sizeof(VirtMachineClass),
.class_init = virt_machine_class_init,
.instance_init = virt_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
},
@@ -3300,14 +3392,40 @@ static void machvirt_machine_init(void)
}
type_init(machvirt_machine_init);
+static void virt_machine_10_1_options(MachineClass *mc)
+{
+}
+DEFINE_VIRT_MACHINE_AS_LATEST(10, 1)
+
+static void virt_machine_10_0_options(MachineClass *mc)
+{
+ virt_machine_10_1_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_10_0, hw_compat_10_0_len);
+}
+DEFINE_VIRT_MACHINE(10, 0)
+
+static void virt_machine_9_2_options(MachineClass *mc)
+{
+ virt_machine_10_0_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_9_2, hw_compat_9_2_len);
+}
+DEFINE_VIRT_MACHINE(9, 2)
+
static void virt_machine_9_1_options(MachineClass *mc)
{
+ VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
+
+ virt_machine_9_2_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_9_1, hw_compat_9_1_len);
+ /* 9.1 and earlier have only a stage-1 SMMU, not a nested s1+2 one */
+ vmc->no_nested_smmu = true;
}
-DEFINE_VIRT_MACHINE_AS_LATEST(9, 1)
+DEFINE_VIRT_MACHINE(9, 1)
static void virt_machine_9_0_options(MachineClass *mc)
{
virt_machine_9_1_options(mc);
+ mc->smbios_memory_device_size = 16 * GiB;
compat_props_add(mc->compat_props, hw_compat_9_0, hw_compat_9_0_len);
}
DEFINE_VIRT_MACHINE(9, 0)
@@ -3449,99 +3567,3 @@ static void virt_machine_4_1_options(MachineClass *mc)
mc->auto_enable_numa_with_memhp = false;
}
DEFINE_VIRT_MACHINE(4, 1)
-
-static void virt_machine_4_0_options(MachineClass *mc)
-{
- virt_machine_4_1_options(mc);
- compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len);
-}
-DEFINE_VIRT_MACHINE(4, 0)
-
-static void virt_machine_3_1_options(MachineClass *mc)
-{
- virt_machine_4_0_options(mc);
- compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len);
-}
-DEFINE_VIRT_MACHINE(3, 1)
-
-static void virt_machine_3_0_options(MachineClass *mc)
-{
- virt_machine_3_1_options(mc);
- compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len);
-}
-DEFINE_VIRT_MACHINE(3, 0)
-
-static void virt_machine_2_12_options(MachineClass *mc)
-{
- VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
-
- virt_machine_3_0_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len);
- vmc->no_highmem_ecam = true;
- mc->max_cpus = 255;
-}
-DEFINE_VIRT_MACHINE(2, 12)
-
-static void virt_machine_2_11_options(MachineClass *mc)
-{
- VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
-
- virt_machine_2_12_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len);
- vmc->smbios_old_sys_ver = true;
-}
-DEFINE_VIRT_MACHINE(2, 11)
-
-static void virt_machine_2_10_options(MachineClass *mc)
-{
- virt_machine_2_11_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len);
- /* before 2.11 we never faulted accesses to bad addresses */
- mc->ignore_memory_transaction_failures = true;
-}
-DEFINE_VIRT_MACHINE(2, 10)
-
-static void virt_machine_2_9_options(MachineClass *mc)
-{
- virt_machine_2_10_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len);
-}
-DEFINE_VIRT_MACHINE(2, 9)
-
-static void virt_machine_2_8_options(MachineClass *mc)
-{
- VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
-
- virt_machine_2_9_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len);
- /* For 2.8 and earlier we falsely claimed in the DT that
- * our timers were edge-triggered, not level-triggered.
- */
- vmc->claim_edge_triggered_timers = true;
-}
-DEFINE_VIRT_MACHINE(2, 8)
-
-static void virt_machine_2_7_options(MachineClass *mc)
-{
- VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
-
- virt_machine_2_8_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len);
- /* ITS was introduced with 2.8 */
- vmc->no_its = true;
- /* Stick with 1K pages for migration compatibility */
- mc->minimum_page_bits = 0;
-}
-DEFINE_VIRT_MACHINE(2, 7)
-
-static void virt_machine_2_6_options(MachineClass *mc)
-{
- VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
-
- virt_machine_2_7_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len);
- vmc->disallow_affinity_adjustment = true;
- /* Disable PMU for 2.6 as PMU support was first introduced in 2.7 */
- vmc->no_pmu = true;
-}
-DEFINE_VIRT_MACHINE(2, 6)
diff --git a/hw/arm/xen-pvh.c b/hw/arm/xen-pvh.c
new file mode 100644
index 0000000..4b26bcf
--- /dev/null
+++ b/hw/arm/xen-pvh.c
@@ -0,0 +1,106 @@
+/*
+ * QEMU ARM Xen PVH Machine
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qapi/qapi-commands-migration.h"
+#include "hw/boards.h"
+#include "system/system.h"
+#include "hw/xen/xen-pvh-common.h"
+#include "hw/xen/arch_hvm.h"
+
+#define TYPE_XEN_ARM MACHINE_TYPE_NAME("xenpvh")
+
+/*
+ * VIRTIO_MMIO_DEV_SIZE is imported from tools/libs/light/libxl_arm.c under Xen
+ * repository.
+ *
+ * Origin: git://xenbits.xen.org/xen.git 2128143c114c
+ */
+#define VIRTIO_MMIO_DEV_SIZE 0x200
+
+#define NR_VIRTIO_MMIO_DEVICES \
+ (GUEST_VIRTIO_MMIO_SPI_LAST - GUEST_VIRTIO_MMIO_SPI_FIRST)
+
+static void xen_arm_instance_init(Object *obj)
+{
+ XenPVHMachineState *s = XEN_PVH_MACHINE(obj);
+
+ /* Default values. */
+ s->cfg.ram_low = (MemMapEntry) { GUEST_RAM0_BASE, GUEST_RAM0_SIZE };
+ s->cfg.ram_high = (MemMapEntry) { GUEST_RAM1_BASE, GUEST_RAM1_SIZE };
+
+ s->cfg.virtio_mmio_num = NR_VIRTIO_MMIO_DEVICES;
+ s->cfg.virtio_mmio_irq_base = GUEST_VIRTIO_MMIO_SPI_FIRST;
+ s->cfg.virtio_mmio = (MemMapEntry) { GUEST_VIRTIO_MMIO_BASE,
+ VIRTIO_MMIO_DEV_SIZE };
+}
+
+static void xen_pvh_set_pci_intx_irq(void *opaque, int intx_irq, int level)
+{
+ XenPVHMachineState *s = XEN_PVH_MACHINE(opaque);
+ int irq = s->cfg.pci_intx_irq_base + intx_irq;
+
+ if (xendevicemodel_set_irq_level(xen_dmod, xen_domid, irq, level)) {
+ error_report("xendevicemodel_set_pci_intx_level failed");
+ }
+}
+
+static void xen_arm_machine_class_init(ObjectClass *oc, const void *data)
+{
+ XenPVHMachineClass *xpc = XEN_PVH_MACHINE_CLASS(oc);
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->desc = "Xen PVH ARM machine";
+
+ /*
+ * mc->max_cpus holds the MAX value allowed in the -smp command-line opts.
+ *
+ * 1. If users don't pass any -smp option:
+ * ms->smp.cpus will default to 1.
+ * ms->smp.max_cpus will default to 1.
+ *
+ * 2. If users pass -smp X:
+ * ms->smp.cpus will be set to X.
+ * ms->smp.max_cpus will also be set to X.
+ *
+ * 3. If users pass -smp X,maxcpus=Y:
+ * ms->smp.cpus will be set to X.
+ * ms->smp.max_cpus will be set to Y.
+ *
+ * In scenarios 2 and 3, if X or Y are set to something larger than
+ * mc->max_cpus, QEMU will bail out with an error message.
+ */
+ mc->max_cpus = GUEST_MAX_VCPUS;
+
+ /* Xen/ARM does not use buffered IOREQs. */
+ xpc->handle_bufioreq = HVM_IOREQSRV_BUFIOREQ_OFF;
+
+ /* PCI INTX delivery. */
+ xpc->set_pci_intx_irq = xen_pvh_set_pci_intx_irq;
+
+ /* List of supported features known to work on PVH ARM. */
+ xpc->has_pci = true;
+ xpc->has_tpm = true;
+ xpc->has_virtio_mmio = true;
+
+ xen_pvh_class_setup_common_props(xpc);
+}
+
+static const TypeInfo xen_arm_machine_type = {
+ .name = TYPE_XEN_ARM,
+ .parent = TYPE_XEN_PVH_MACHINE,
+ .class_init = xen_arm_machine_class_init,
+ .instance_size = sizeof(XenPVHMachineState),
+ .instance_init = xen_arm_instance_init,
+};
+
+static void xen_arm_machine_register_types(void)
+{
+ type_register_static(&xen_arm_machine_type);
+}
+
+type_init(xen_arm_machine_register_types)
diff --git a/hw/arm/xen-stubs.c b/hw/arm/xen-stubs.c
new file mode 100644
index 0000000..6a83043
--- /dev/null
+++ b/hw/arm/xen-stubs.c
@@ -0,0 +1,30 @@
+/*
+ * Stubs for unimplemented Xen functions for ARM.
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/qapi-commands-migration.h"
+#include "system/xen.h"
+#include "hw/hw.h"
+#include "hw/xen/xen-hvm-common.h"
+#include "hw/xen/arch_hvm.h"
+
+void arch_handle_ioreq(XenIOState *state, ioreq_t *req)
+{
+ hw_error("Invalid ioreq type 0x%x\n", req->type);
+}
+
+void arch_xen_set_memory(XenIOState *state, MemoryRegionSection *section,
+ bool add)
+{
+}
+
+void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
+{
+}
+
+void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
+{
+}
diff --git a/hw/arm/xen_arm.c b/hw/arm/xen_arm.c
deleted file mode 100644
index 6fad829e..0000000
--- a/hw/arm/xen_arm.c
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * QEMU ARM Xen PVH Machine
- *
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/error-report.h"
-#include "qapi/qapi-commands-migration.h"
-#include "qapi/visitor.h"
-#include "hw/boards.h"
-#include "hw/irq.h"
-#include "hw/sysbus.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/tpm_backend.h"
-#include "sysemu/sysemu.h"
-#include "hw/xen/xen-hvm-common.h"
-#include "sysemu/tpm.h"
-#include "hw/xen/arch_hvm.h"
-#include "trace.h"
-
-#define TYPE_XEN_ARM MACHINE_TYPE_NAME("xenpvh")
-OBJECT_DECLARE_SIMPLE_TYPE(XenArmState, XEN_ARM)
-
-static const MemoryListener xen_memory_listener = {
- .region_add = xen_region_add,
- .region_del = xen_region_del,
- .log_start = NULL,
- .log_stop = NULL,
- .log_sync = NULL,
- .log_global_start = NULL,
- .log_global_stop = NULL,
- .priority = MEMORY_LISTENER_PRIORITY_ACCEL,
-};
-
-struct XenArmState {
- /*< private >*/
- MachineState parent;
-
- XenIOState *state;
-
- struct {
- uint64_t tpm_base_addr;
- } cfg;
-};
-
-static MemoryRegion ram_lo, ram_hi;
-
-/*
- * VIRTIO_MMIO_DEV_SIZE is imported from tools/libs/light/libxl_arm.c under Xen
- * repository.
- *
- * Origin: git://xenbits.xen.org/xen.git 2128143c114c
- */
-#define VIRTIO_MMIO_DEV_SIZE 0x200
-
-#define NR_VIRTIO_MMIO_DEVICES \
- (GUEST_VIRTIO_MMIO_SPI_LAST - GUEST_VIRTIO_MMIO_SPI_FIRST)
-
-static void xen_set_irq(void *opaque, int irq, int level)
-{
- if (xendevicemodel_set_irq_level(xen_dmod, xen_domid, irq, level)) {
- error_report("xendevicemodel_set_irq_level failed");
- }
-}
-
-static void xen_create_virtio_mmio_devices(XenArmState *xam)
-{
- int i;
-
- for (i = 0; i < NR_VIRTIO_MMIO_DEVICES; i++) {
- hwaddr base = GUEST_VIRTIO_MMIO_BASE + i * VIRTIO_MMIO_DEV_SIZE;
- qemu_irq irq = qemu_allocate_irq(xen_set_irq, NULL,
- GUEST_VIRTIO_MMIO_SPI_FIRST + i);
-
- sysbus_create_simple("virtio-mmio", base, irq);
-
- trace_xen_create_virtio_mmio_devices(i,
- GUEST_VIRTIO_MMIO_SPI_FIRST + i,
- base);
- }
-}
-
-static void xen_init_ram(MachineState *machine)
-{
- MemoryRegion *sysmem = get_system_memory();
- ram_addr_t block_len, ram_size[GUEST_RAM_BANKS];
-
- trace_xen_init_ram(machine->ram_size);
- if (machine->ram_size <= GUEST_RAM0_SIZE) {
- ram_size[0] = machine->ram_size;
- ram_size[1] = 0;
- block_len = GUEST_RAM0_BASE + ram_size[0];
- } else {
- ram_size[0] = GUEST_RAM0_SIZE;
- ram_size[1] = machine->ram_size - GUEST_RAM0_SIZE;
- block_len = GUEST_RAM1_BASE + ram_size[1];
- }
-
- memory_region_init_ram(&xen_memory, NULL, "xen.ram", block_len,
- &error_fatal);
-
- memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", &xen_memory,
- GUEST_RAM0_BASE, ram_size[0]);
- memory_region_add_subregion(sysmem, GUEST_RAM0_BASE, &ram_lo);
- if (ram_size[1] > 0) {
- memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", &xen_memory,
- GUEST_RAM1_BASE, ram_size[1]);
- memory_region_add_subregion(sysmem, GUEST_RAM1_BASE, &ram_hi);
- }
-
- /* Setup support for grants. */
- memory_region_init_ram(&xen_grants, NULL, "xen.grants", block_len,
- &error_fatal);
- memory_region_add_subregion(sysmem, XEN_GRANT_ADDR_OFF, &xen_grants);
-}
-
-void arch_handle_ioreq(XenIOState *state, ioreq_t *req)
-{
- hw_error("Invalid ioreq type 0x%x\n", req->type);
-
- return;
-}
-
-void arch_xen_set_memory(XenIOState *state, MemoryRegionSection *section,
- bool add)
-{
-}
-
-void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
-{
-}
-
-void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
-{
-}
-
-#ifdef CONFIG_TPM
-static void xen_enable_tpm(XenArmState *xam)
-{
- Error *errp = NULL;
- DeviceState *dev;
- SysBusDevice *busdev;
-
- TPMBackend *be = qemu_find_tpm_be("tpm0");
- if (be == NULL) {
- error_report("Couldn't find tmp0 backend");
- return;
- }
- dev = qdev_new(TYPE_TPM_TIS_SYSBUS);
- object_property_set_link(OBJECT(dev), "tpmdev", OBJECT(be), &errp);
- object_property_set_str(OBJECT(dev), "tpmdev", be->id, &errp);
- busdev = SYS_BUS_DEVICE(dev);
- sysbus_realize_and_unref(busdev, &error_fatal);
- sysbus_mmio_map(busdev, 0, xam->cfg.tpm_base_addr);
-
- trace_xen_enable_tpm(xam->cfg.tpm_base_addr);
-}
-#endif
-
-static void xen_arm_init(MachineState *machine)
-{
- XenArmState *xam = XEN_ARM(machine);
-
- xam->state = g_new0(XenIOState, 1);
-
- if (machine->ram_size == 0) {
- warn_report("%s non-zero ram size not specified. QEMU machine started"
- " without IOREQ (no emulated devices including virtio)",
- MACHINE_CLASS(object_get_class(OBJECT(machine)))->desc);
- return;
- }
-
- xen_init_ram(machine);
-
- xen_register_ioreq(xam->state, machine->smp.cpus, &xen_memory_listener);
-
- xen_create_virtio_mmio_devices(xam);
-
-#ifdef CONFIG_TPM
- if (xam->cfg.tpm_base_addr) {
- xen_enable_tpm(xam);
- } else {
- warn_report("tpm-base-addr is not provided. TPM will not be enabled");
- }
-#endif
-}
-
-#ifdef CONFIG_TPM
-static void xen_arm_get_tpm_base_addr(Object *obj, Visitor *v,
- const char *name, void *opaque,
- Error **errp)
-{
- XenArmState *xam = XEN_ARM(obj);
- uint64_t value = xam->cfg.tpm_base_addr;
-
- visit_type_uint64(v, name, &value, errp);
-}
-
-static void xen_arm_set_tpm_base_addr(Object *obj, Visitor *v,
- const char *name, void *opaque,
- Error **errp)
-{
- XenArmState *xam = XEN_ARM(obj);
- uint64_t value;
-
- if (!visit_type_uint64(v, name, &value, errp)) {
- return;
- }
-
- xam->cfg.tpm_base_addr = value;
-}
-#endif
-
-static void xen_arm_machine_class_init(ObjectClass *oc, void *data)
-{
-
- MachineClass *mc = MACHINE_CLASS(oc);
- mc->desc = "Xen Para-virtualized PC";
- mc->init = xen_arm_init;
- mc->max_cpus = 1;
- mc->default_machine_opts = "accel=xen";
- /* Set explicitly here to make sure that real ram_size is passed */
- mc->default_ram_size = 0;
-
-#ifdef CONFIG_TPM
- object_class_property_add(oc, "tpm-base-addr", "uint64_t",
- xen_arm_get_tpm_base_addr,
- xen_arm_set_tpm_base_addr,
- NULL, NULL);
- object_class_property_set_description(oc, "tpm-base-addr",
- "Set Base address for TPM device.");
-
- machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS);
-#endif
-}
-
-static const TypeInfo xen_arm_machine_type = {
- .name = TYPE_XEN_ARM,
- .parent = TYPE_MACHINE,
- .class_init = xen_arm_machine_class_init,
- .instance_size = sizeof(XenArmState),
-};
-
-static void xen_arm_machine_register_types(void)
-{
- type_register_static(&xen_arm_machine_type);
-}
-
-type_init(xen_arm_machine_register_types)
diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c
index 3c56b9a..0372cd0 100644
--- a/hw/arm/xilinx_zynq.c
+++ b/hw/arm/xilinx_zynq.c
@@ -21,7 +21,7 @@
#include "hw/sysbus.h"
#include "hw/arm/boot.h"
#include "net/net.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/boards.h"
#include "hw/block/flash.h"
#include "hw/loader.h"
@@ -34,7 +34,8 @@
#include "hw/net/cadence_gem.h"
#include "hw/cpu/a9mpcore.h"
#include "hw/qdev-clock.h"
-#include "sysemu/reset.h"
+#include "hw/misc/unimp.h"
+#include "system/reset.h"
#include "qom/object.h"
#include "exec/tswap.h"
#include "target/arm/cpu-qom.h"
@@ -53,11 +54,11 @@ OBJECT_DECLARE_SIMPLE_TYPE(ZynqMachineState, ZYNQ_MACHINE)
#define FLASH_SIZE (64 * 1024 * 1024)
#define FLASH_SECTOR_SIZE (128 * 1024)
-#define IRQ_OFFSET 32 /* pic interrupts start from index 32 */
-
#define MPCORE_PERIPHBASE 0xF8F00000
#define ZYNQ_BOARD_MIDR 0x413FC090
+#define GIC_EXT_IRQS 64 /* Zynq 7000 SoC */
+
static const int dma_irqs[8] = {
46, 47, 48, 49, 72, 73, 74, 75
};
@@ -206,7 +207,7 @@ static void zynq_init(MachineState *machine)
MemoryRegion *ocm_ram = g_new(MemoryRegion, 1);
DeviceState *dev, *slcr;
SysBusDevice *busdev;
- qemu_irq pic[64];
+ qemu_irq pic[GIC_EXT_IRQS];
int n;
unsigned int smp_cpus = machine->smp.cpus;
@@ -219,14 +220,6 @@ static void zynq_init(MachineState *machine)
for (n = 0; n < smp_cpus; n++) {
Object *cpuobj = object_new(machine->cpu_type);
- /*
- * By default A9 CPUs have EL3 enabled. This board does not currently
- * support EL3 so the CPU EL3 property is disabled before realization.
- */
- if (object_property_find(cpuobj, "has_el3")) {
- object_property_set_bool(cpuobj, "has_el3", false, &error_fatal);
- }
-
object_property_set_int(cpuobj, "midr", ZYNQ_BOARD_MIDR,
&error_fatal);
object_property_set_int(cpuobj, "reset-cbar", MPCORE_PERIPHBASE,
@@ -270,6 +263,7 @@ static void zynq_init(MachineState *machine)
dev = qdev_new(TYPE_A9MPCORE_PRIV);
qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
+ qdev_prop_set_uint32(dev, "num-irq", GIC_EXT_IRQS + GIC_INTERNAL);
busdev = SYS_BUS_DEVICE(dev);
sysbus_realize_and_unref(busdev, &error_fatal);
sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE);
@@ -284,16 +278,16 @@ static void zynq_init(MachineState *machine)
qdev_get_gpio_in(cpudev, ARM_CPU_FIQ));
}
- for (n = 0; n < 64; n++) {
+ for (n = 0; n < GIC_EXT_IRQS; n++) {
pic[n] = qdev_get_gpio_in(dev, n);
}
- n = zynq_init_spi_flashes(0xE0006000, pic[58 - IRQ_OFFSET], false, 0);
- n = zynq_init_spi_flashes(0xE0007000, pic[81 - IRQ_OFFSET], false, n);
- n = zynq_init_spi_flashes(0xE000D000, pic[51 - IRQ_OFFSET], true, n);
+ n = zynq_init_spi_flashes(0xE0006000, pic[58 - GIC_INTERNAL], false, 0);
+ n = zynq_init_spi_flashes(0xE0007000, pic[81 - GIC_INTERNAL], false, n);
+ n = zynq_init_spi_flashes(0xE000D000, pic[51 - GIC_INTERNAL], true, n);
- sysbus_create_simple(TYPE_CHIPIDEA, 0xE0002000, pic[53 - IRQ_OFFSET]);
- sysbus_create_simple(TYPE_CHIPIDEA, 0xE0003000, pic[76 - IRQ_OFFSET]);
+ sysbus_create_simple(TYPE_CHIPIDEA, 0xE0002000, pic[53 - GIC_INTERNAL]);
+ sysbus_create_simple(TYPE_CHIPIDEA, 0xE0003000, pic[76 - GIC_INTERNAL]);
dev = qdev_new(TYPE_CADENCE_UART);
busdev = SYS_BUS_DEVICE(dev);
@@ -302,7 +296,7 @@ static void zynq_init(MachineState *machine)
qdev_get_clock_out(slcr, "uart0_ref_clk"));
sysbus_realize_and_unref(busdev, &error_fatal);
sysbus_mmio_map(busdev, 0, 0xE0000000);
- sysbus_connect_irq(busdev, 0, pic[59 - IRQ_OFFSET]);
+ sysbus_connect_irq(busdev, 0, pic[59 - GIC_INTERNAL]);
dev = qdev_new(TYPE_CADENCE_UART);
busdev = SYS_BUS_DEVICE(dev);
qdev_prop_set_chr(dev, "chardev", serial_hd(1));
@@ -310,15 +304,15 @@ static void zynq_init(MachineState *machine)
qdev_get_clock_out(slcr, "uart1_ref_clk"));
sysbus_realize_and_unref(busdev, &error_fatal);
sysbus_mmio_map(busdev, 0, 0xE0001000);
- sysbus_connect_irq(busdev, 0, pic[82 - IRQ_OFFSET]);
+ sysbus_connect_irq(busdev, 0, pic[82 - GIC_INTERNAL]);
sysbus_create_varargs("cadence_ttc", 0xF8001000,
- pic[42-IRQ_OFFSET], pic[43-IRQ_OFFSET], pic[44-IRQ_OFFSET], NULL);
+ pic[42-GIC_INTERNAL], pic[43-GIC_INTERNAL], pic[44-GIC_INTERNAL], NULL);
sysbus_create_varargs("cadence_ttc", 0xF8002000,
- pic[69-IRQ_OFFSET], pic[70-IRQ_OFFSET], pic[71-IRQ_OFFSET], NULL);
+ pic[69-GIC_INTERNAL], pic[70-GIC_INTERNAL], pic[71-GIC_INTERNAL], NULL);
- gem_init(0xE000B000, pic[54 - IRQ_OFFSET]);
- gem_init(0xE000C000, pic[77 - IRQ_OFFSET]);
+ gem_init(0xE000B000, pic[54 - GIC_INTERNAL]);
+ gem_init(0xE000C000, pic[77 - GIC_INTERNAL]);
for (n = 0; n < 2; n++) {
int hci_irq = n ? 79 : 56;
@@ -337,7 +331,7 @@ static void zynq_init(MachineState *machine)
qdev_prop_set_uint64(dev, "capareg", ZYNQ_SDHCI_CAPABILITIES);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, hci_addr);
- sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[hci_irq - IRQ_OFFSET]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[hci_irq - GIC_INTERNAL]);
di = drive_get(IF_SD, 0, n);
blk = di ? blk_by_legacy_dinfo(di) : NULL;
@@ -350,7 +344,7 @@ static void zynq_init(MachineState *machine)
dev = qdev_new(TYPE_ZYNQ_XADC);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xF8007100);
- sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[39-IRQ_OFFSET]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[39-GIC_INTERNAL]);
dev = qdev_new("pl330");
object_property_set_link(OBJECT(dev), "memory",
@@ -370,17 +364,86 @@ static void zynq_init(MachineState *machine)
busdev = SYS_BUS_DEVICE(dev);
sysbus_realize_and_unref(busdev, &error_fatal);
sysbus_mmio_map(busdev, 0, 0xF8003000);
- sysbus_connect_irq(busdev, 0, pic[45-IRQ_OFFSET]); /* abort irq line */
+ sysbus_connect_irq(busdev, 0, pic[45-GIC_INTERNAL]); /* abort irq line */
for (n = 0; n < ARRAY_SIZE(dma_irqs); ++n) { /* event irqs */
- sysbus_connect_irq(busdev, n + 1, pic[dma_irqs[n] - IRQ_OFFSET]);
+ sysbus_connect_irq(busdev, n + 1, pic[dma_irqs[n] - GIC_INTERNAL]);
}
dev = qdev_new("xlnx.ps7-dev-cfg");
busdev = SYS_BUS_DEVICE(dev);
sysbus_realize_and_unref(busdev, &error_fatal);
- sysbus_connect_irq(busdev, 0, pic[40 - IRQ_OFFSET]);
+ sysbus_connect_irq(busdev, 0, pic[40 - GIC_INTERNAL]);
sysbus_mmio_map(busdev, 0, 0xF8007000);
+ /*
+ * Refer to the ug585-Zynq-7000-TRM manual B.3 (Module Summary) and
+ * the zynq-7000.dtsi. Add placeholders for unimplemented devices.
+ */
+ create_unimplemented_device("zynq.i2c0", 0xE0004000, 4 * KiB);
+ create_unimplemented_device("zynq.i2c1", 0xE0005000, 4 * KiB);
+ create_unimplemented_device("zynq.can0", 0xE0008000, 4 * KiB);
+ create_unimplemented_device("zynq.can1", 0xE0009000, 4 * KiB);
+ create_unimplemented_device("zynq.gpio", 0xE000A000, 4 * KiB);
+ create_unimplemented_device("zynq.smcc", 0xE000E000, 4 * KiB);
+
+ /* Direct Memory Access Controller, PL330, Non-Secure Mode */
+ create_unimplemented_device("zynq.dma_ns", 0xF8004000, 4 * KiB);
+
+ /* System Watchdog Timer Registers */
+ create_unimplemented_device("zynq.swdt", 0xF8005000, 4 * KiB);
+
+ /* DDR memory controller */
+ create_unimplemented_device("zynq.ddrc", 0xF8006000, 4 * KiB);
+
+ /* AXI_HP Interface (AFI) */
+ create_unimplemented_device("zynq.axi_hp0", 0xF8008000, 0x28);
+ create_unimplemented_device("zynq.axi_hp1", 0xF8009000, 0x28);
+ create_unimplemented_device("zynq.axi_hp2", 0xF800A000, 0x28);
+ create_unimplemented_device("zynq.axi_hp3", 0xF800B000, 0x28);
+
+ create_unimplemented_device("zynq.efuse", 0xF800d000, 0x20);
+
+ /* Embedded Trace Buffer */
+ create_unimplemented_device("zynq.etb", 0xF8801000, 4 * KiB);
+
+ /* Cross Trigger Interface, ETB and TPIU */
+ create_unimplemented_device("zynq.cti_etb_tpiu", 0xF8802000, 4 * KiB);
+
+ /* Trace Port Interface Unit */
+ create_unimplemented_device("zynq.tpiu", 0xF8803000, 4 * KiB);
+
+ /* CoreSight Trace Funnel */
+ create_unimplemented_device("zynq.funnel", 0xF8804000, 4 * KiB);
+
+ /* Instrumentation Trace Macrocell */
+ create_unimplemented_device("zynq.itm", 0xF8805000, 4 * KiB);
+
+ /* Cross Trigger Interface, FTM */
+ create_unimplemented_device("zynq.cti_ftm", 0xF8809000, 4 * KiB);
+
+ /* Fabric Trace Macrocell */
+ create_unimplemented_device("zynq.ftm", 0xF880B000, 4 * KiB);
+
+ /* Cortex A9 Performance Monitoring Unit, CPU */
+ create_unimplemented_device("cortex-a9.pmu0", 0xF8891000, 4 * KiB);
+ create_unimplemented_device("cortex-a9.pmu1", 0xF8893000, 4 * KiB);
+
+ /* Cross Trigger Interface, CPU */
+ create_unimplemented_device("zynq.cpu_cti0", 0xF8898000, 4 * KiB);
+ create_unimplemented_device("zynq.cpu_cti1", 0xF8899000, 4 * KiB);
+
+ /* CoreSight PTM-A9, CPU */
+ create_unimplemented_device("cortex-a9.ptm0", 0xF889c000, 4 * KiB);
+ create_unimplemented_device("cortex-a9.ptm1", 0xF889d000, 4 * KiB);
+
+ /* AMBA NIC301 TrustZone */
+ create_unimplemented_device("zynq.trustZone", 0xF8900000, 0x20);
+
+ /* AMBA Network Interconnect Advanced Quality of Service (QoS-301) */
+ create_unimplemented_device("zynq.qos301_cpu", 0xF8946000, 0x130);
+ create_unimplemented_device("zynq.qos301_dmac", 0xF8947000, 0x130);
+ create_unimplemented_device("zynq.qos301_iou", 0xF8948000, 0x130);
+
zynq_binfo.ram_size = machine->ram_size;
zynq_binfo.board_id = 0xd32;
zynq_binfo.loader_start = 0;
@@ -390,7 +453,7 @@ static void zynq_init(MachineState *machine)
arm_load_kernel(zynq_machine->cpu[0], machine, &zynq_binfo);
}
-static void zynq_machine_class_init(ObjectClass *oc, void *data)
+static void zynq_machine_class_init(ObjectClass *oc, const void *data)
{
static const char * const valid_cpu_types[] = {
ARM_CPU_TYPE_NAME("cortex-a9"),
@@ -398,10 +461,9 @@ static void zynq_machine_class_init(ObjectClass *oc, void *data)
};
MachineClass *mc = MACHINE_CLASS(oc);
ObjectProperty *prop;
- mc->desc = "Xilinx Zynq Platform Baseboard for Cortex-A9";
+ mc->desc = "Xilinx Zynq 7000 Platform Baseboard for Cortex-A9";
mc->init = zynq_init;
mc->max_cpus = ZYNQ_MAX_CPUS;
- mc->no_sdcard = 1;
mc->ignore_memory_transaction_failures = true;
mc->valid_cpu_types = valid_cpu_types;
mc->default_ram_id = "zynq.ext_ram";
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
index 962f98f..adadbb7 100644
--- a/hw/arm/xlnx-versal-virt.c
+++ b/hw/arm/xlnx-versal-virt.c
@@ -12,7 +12,7 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
-#include "sysemu/device_tree.h"
+#include "system/device_tree.h"
#include "hw/block/flash.h"
#include "hw/boards.h"
#include "hw/sysbus.h"
@@ -761,9 +761,9 @@ static void versal_virt_init(MachineState *machine)
if (!flash_klass ||
object_class_is_abstract(flash_klass) ||
!object_class_dynamic_cast(flash_klass, TYPE_M25P80)) {
- error_setg(&error_fatal, "'%s' is either abstract or"
+ error_report("'%s' is either abstract or"
" not a subtype of m25p80", s->ospi_model);
- return;
+ exit(1);
}
}
@@ -808,7 +808,7 @@ static void versal_virt_machine_finalize(Object *obj)
g_free(s->ospi_model);
}
-static void versal_virt_machine_class_init(ObjectClass *oc, void *data)
+static void versal_virt_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -818,6 +818,7 @@ static void versal_virt_machine_class_init(ObjectClass *oc, void *data)
mc->max_cpus = XLNX_VERSAL_NR_ACPUS + XLNX_VERSAL_NR_RCPUS;
mc->default_cpus = XLNX_VERSAL_NR_ACPUS + XLNX_VERSAL_NR_RCPUS;
mc->no_cdrom = true;
+ mc->auto_create_sdcard = true;
mc->default_ram_id = "ddr";
object_class_property_add_str(oc, "ospi-flash", versal_get_ospi_model,
versal_set_ospi_model);
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
index 50cb060..a42b9e7 100644
--- a/hw/arm/xlnx-versal.c
+++ b/hw/arm/xlnx-versal.c
@@ -12,14 +12,12 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qapi/error.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qlist.h"
#include "qemu/module.h"
#include "hw/sysbus.h"
#include "net/net.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/kvm.h"
+#include "system/system.h"
#include "hw/arm/boot.h"
-#include "kvm_arm.h"
#include "hw/misc/unimp.h"
#include "hw/arm/xlnx-versal.h"
#include "qemu/log.h"
@@ -258,14 +256,23 @@ static void versal_create_gems(Versal *s, qemu_irq *pic)
char *name = g_strdup_printf("gem%d", i);
DeviceState *dev;
MemoryRegion *mr;
+ OrIRQState *or_irq;
object_initialize_child(OBJECT(s), name, &s->lpd.iou.gem[i],
TYPE_CADENCE_GEM);
+ or_irq = &s->lpd.iou.gem_irq_orgate[i];
+ object_initialize_child(OBJECT(s), "gem-irq-orgate[*]",
+ or_irq, TYPE_OR_IRQ);
dev = DEVICE(&s->lpd.iou.gem[i]);
qemu_configure_nic_device(dev, true, NULL);
object_property_set_int(OBJECT(dev), "phy-addr", 23, &error_abort);
object_property_set_int(OBJECT(dev), "num-priority-queues", 2,
&error_abort);
+ object_property_set_int(OBJECT(or_irq),
+ "num-lines", 2, &error_fatal);
+ qdev_realize(DEVICE(or_irq), NULL, &error_fatal);
+ qdev_connect_gpio_out(DEVICE(or_irq), 0, pic[irqs[i]]);
+
object_property_set_link(OBJECT(dev), "dma", OBJECT(&s->mr_ps),
&error_abort);
sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal);
@@ -273,7 +280,8 @@ static void versal_create_gems(Versal *s, qemu_irq *pic)
mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
memory_region_add_subregion(&s->mr_ps, addrs[i], mr);
- sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irqs[i]]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(DEVICE(or_irq), 0));
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1, qdev_get_gpio_in(DEVICE(or_irq), 1));
g_free(name);
}
}
@@ -958,17 +966,16 @@ static void versal_init(Object *obj)
"mr-rpu-ps-alias", &s->mr_ps, 0, UINT64_MAX);
}
-static Property versal_properties[] = {
+static const Property versal_properties[] = {
DEFINE_PROP_LINK("ddr", Versal, cfg.mr_ddr, TYPE_MEMORY_REGION,
MemoryRegion *),
DEFINE_PROP_LINK("canbus0", Versal, lpd.iou.canbus[0],
TYPE_CAN_BUS, CanBusState *),
DEFINE_PROP_LINK("canbus1", Versal, lpd.iou.canbus[1],
TYPE_CAN_BUS, CanBusState *),
- DEFINE_PROP_END_OF_LIST()
};
-static void versal_class_init(ObjectClass *klass, void *data)
+static void versal_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c
index 4667cb3..14b6641 100644
--- a/hw/arm/xlnx-zcu102.c
+++ b/hw/arm/xlnx-zcu102.c
@@ -22,7 +22,7 @@
#include "hw/boards.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
-#include "sysemu/device_tree.h"
+#include "system/device_tree.h"
#include "qom/object.h"
#include "net/can_emu.h"
#include "audio/audio.h"
@@ -267,7 +267,7 @@ static void xlnx_zcu102_machine_instance_init(Object *obj)
0);
}
-static void xlnx_zcu102_machine_class_init(ObjectClass *oc, void *data)
+static void xlnx_zcu102_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -280,6 +280,7 @@ static void xlnx_zcu102_machine_class_init(ObjectClass *oc, void *data)
mc->max_cpus = XLNX_ZYNQMP_NUM_APU_CPUS + XLNX_ZYNQMP_NUM_RPU_CPUS;
mc->default_cpus = XLNX_ZYNQMP_NUM_APU_CPUS;
mc->default_ram_id = "ddr-ram";
+ mc->auto_create_sdcard = true;
machine_add_audiodev_property(mc);
object_class_property_add_bool(oc, "secure", zcu102_get_secure,
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
index afeb3f8..ec96a46 100644
--- a/hw/arm/xlnx-zynqmp.c
+++ b/hw/arm/xlnx-zynqmp.c
@@ -22,9 +22,7 @@
#include "hw/intc/arm_gic_common.h"
#include "hw/misc/unimp.h"
#include "hw/boards.h"
-#include "sysemu/kvm.h"
-#include "sysemu/sysemu.h"
-#include "kvm_arm.h"
+#include "system/system.h"
#include "target/arm/cpu-qom.h"
#include "target/arm/gtimer.h"
@@ -394,6 +392,8 @@ static void xlnx_zynqmp_init(Object *obj)
for (i = 0; i < XLNX_ZYNQMP_NUM_GEMS; i++) {
object_initialize_child(obj, "gem[*]", &s->gem[i], TYPE_CADENCE_GEM);
+ object_initialize_child(obj, "gem-irq-orgate[*]",
+ &s->gem_irq_orgate[i], TYPE_OR_IRQ);
}
for (i = 0; i < XLNX_ZYNQMP_NUM_UARTS; i++) {
@@ -625,12 +625,19 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
&error_abort);
object_property_set_int(OBJECT(&s->gem[i]), "num-priority-queues", 2,
&error_abort);
+ object_property_set_int(OBJECT(&s->gem_irq_orgate[i]),
+ "num-lines", 2, &error_fatal);
+ qdev_realize(DEVICE(&s->gem_irq_orgate[i]), NULL, &error_fatal);
+ qdev_connect_gpio_out(DEVICE(&s->gem_irq_orgate[i]), 0, gic_spi[gem_intr[i]]);
+
if (!sysbus_realize(SYS_BUS_DEVICE(&s->gem[i]), errp)) {
return;
}
sysbus_mmio_map(SYS_BUS_DEVICE(&s->gem[i]), 0, gem_addr[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->gem[i]), 0,
- gic_spi[gem_intr[i]]);
+ qdev_get_gpio_in(DEVICE(&s->gem_irq_orgate[i]), 0));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gem[i]), 1,
+ qdev_get_gpio_in(DEVICE(&s->gem_irq_orgate[i]), 1));
}
for (i = 0; i < XLNX_ZYNQMP_NUM_UARTS; i++) {
@@ -680,16 +687,10 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
* - SDIO Specification Version 3.0
* - eMMC Specification Version 4.51
*/
- if (!object_property_set_uint(sdhci, "sd-spec-version", 3, errp)) {
- return;
- }
- if (!object_property_set_uint(sdhci, "capareg", SDHCI_CAPABILITIES,
- errp)) {
- return;
- }
- if (!object_property_set_uint(sdhci, "uhs", UHS_I, errp)) {
- return;
- }
+ object_property_set_uint(sdhci, "sd-spec-version", 3, &error_abort);
+ object_property_set_uint(sdhci, "capareg", SDHCI_CAPABILITIES,
+ &error_abort);
+ object_property_set_uint(sdhci, "uhs", UHS_I, &error_abort);
if (!sysbus_realize(SYS_BUS_DEVICE(sdhci), errp)) {
return;
}
@@ -754,14 +755,10 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
xlnx_zynqmp_create_unimp_mmio(s);
for (i = 0; i < XLNX_ZYNQMP_NUM_GDMA_CH; i++) {
- if (!object_property_set_uint(OBJECT(&s->gdma[i]), "bus-width", 128,
- errp)) {
- return;
- }
- if (!object_property_set_link(OBJECT(&s->gdma[i]), "dma",
- OBJECT(system_memory), errp)) {
- return;
- }
+ object_property_set_uint(OBJECT(&s->gdma[i]), "bus-width", 128,
+ &error_abort);
+ object_property_set_link(OBJECT(&s->gdma[i]), "dma",
+ OBJECT(system_memory), &error_abort);
if (!sysbus_realize(SYS_BUS_DEVICE(&s->gdma[i]), errp)) {
return;
}
@@ -802,10 +799,8 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi_dma), 0,
qdev_get_gpio_in(DEVICE(&s->qspi_irq_orgate), 0));
- if (!object_property_set_link(OBJECT(&s->qspi), "stream-connected-dma",
- OBJECT(&s->qspi_dma), errp)) {
- return;
- }
+ object_property_set_link(OBJECT(&s->qspi), "stream-connected-dma",
+ OBJECT(&s->qspi_dma), &error_abort);
if (!sysbus_realize(SYS_BUS_DEVICE(&s->qspi), errp)) {
return;
}
@@ -824,10 +819,8 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
}
for (i = 0; i < XLNX_ZYNQMP_NUM_USB; i++) {
- if (!object_property_set_link(OBJECT(&s->usb[i].sysbus_xhci), "dma",
- OBJECT(system_memory), errp)) {
- return;
- }
+ object_property_set_link(OBJECT(&s->usb[i].sysbus_xhci), "dma",
+ OBJECT(system_memory), &error_abort);
qdev_prop_set_uint32(DEVICE(&s->usb[i].sysbus_xhci), "intrs", 4);
qdev_prop_set_uint32(DEVICE(&s->usb[i].sysbus_xhci), "slots", 2);
@@ -848,7 +841,7 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
}
}
-static Property xlnx_zynqmp_props[] = {
+static const Property xlnx_zynqmp_props[] = {
DEFINE_PROP_STRING("boot-cpu", XlnxZynqMPState, boot_cpu),
DEFINE_PROP_BOOL("secure", XlnxZynqMPState, secure, false),
DEFINE_PROP_BOOL("virtualization", XlnxZynqMPState, virt, false),
@@ -858,10 +851,9 @@ static Property xlnx_zynqmp_props[] = {
CanBusState *),
DEFINE_PROP_LINK("canbus1", XlnxZynqMPState, canbus[1], TYPE_CAN_BUS,
CanBusState *),
- DEFINE_PROP_END_OF_LIST()
};
-static void xlnx_zynqmp_class_init(ObjectClass *oc, void *data)
+static void xlnx_zynqmp_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/arm/z2.c b/hw/arm/z2.c
deleted file mode 100644
index fc5672e..0000000
--- a/hw/arm/z2.c
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * PXA270-based Zipit Z2 device
- *
- * Copyright (c) 2011 by Vasily Khoruzhick <anarsoul@gmail.com>
- *
- * Code is based on mainstone platform.
- *
- * This code is licensed under the GNU GPL v2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/units.h"
-#include "hw/arm/pxa.h"
-#include "hw/arm/boot.h"
-#include "hw/i2c/i2c.h"
-#include "hw/irq.h"
-#include "hw/ssi/ssi.h"
-#include "migration/vmstate.h"
-#include "hw/boards.h"
-#include "hw/block/flash.h"
-#include "ui/console.h"
-#include "hw/audio/wm8750.h"
-#include "audio/audio.h"
-#include "exec/address-spaces.h"
-#include "qom/object.h"
-#include "qapi/error.h"
-#include "trace.h"
-
-static const struct keymap map[0x100] = {
- [0 ... 0xff] = { -1, -1 },
- [0x3b] = {0, 0}, /* Option = F1 */
- [0xc8] = {0, 1}, /* Up */
- [0xd0] = {0, 2}, /* Down */
- [0xcb] = {0, 3}, /* Left */
- [0xcd] = {0, 4}, /* Right */
- [0xcf] = {0, 5}, /* End */
- [0x0d] = {0, 6}, /* KPPLUS */
- [0xc7] = {1, 0}, /* Home */
- [0x10] = {1, 1}, /* Q */
- [0x17] = {1, 2}, /* I */
- [0x22] = {1, 3}, /* G */
- [0x2d] = {1, 4}, /* X */
- [0x1c] = {1, 5}, /* Enter */
- [0x0c] = {1, 6}, /* KPMINUS */
- [0xc9] = {2, 0}, /* PageUp */
- [0x11] = {2, 1}, /* W */
- [0x18] = {2, 2}, /* O */
- [0x23] = {2, 3}, /* H */
- [0x2e] = {2, 4}, /* C */
- [0x38] = {2, 5}, /* LeftAlt */
- [0xd1] = {3, 0}, /* PageDown */
- [0x12] = {3, 1}, /* E */
- [0x19] = {3, 2}, /* P */
- [0x24] = {3, 3}, /* J */
- [0x2f] = {3, 4}, /* V */
- [0x2a] = {3, 5}, /* LeftShift */
- [0x01] = {4, 0}, /* Esc */
- [0x13] = {4, 1}, /* R */
- [0x1e] = {4, 2}, /* A */
- [0x25] = {4, 3}, /* K */
- [0x30] = {4, 4}, /* B */
- [0x1d] = {4, 5}, /* LeftCtrl */
- [0x0f] = {5, 0}, /* Tab */
- [0x14] = {5, 1}, /* T */
- [0x1f] = {5, 2}, /* S */
- [0x26] = {5, 3}, /* L */
- [0x31] = {5, 4}, /* N */
- [0x39] = {5, 5}, /* Space */
- [0x3c] = {6, 0}, /* Stop = F2 */
- [0x15] = {6, 1}, /* Y */
- [0x20] = {6, 2}, /* D */
- [0x0e] = {6, 3}, /* Backspace */
- [0x32] = {6, 4}, /* M */
- [0x33] = {6, 5}, /* Comma */
- [0x3d] = {7, 0}, /* Play = F3 */
- [0x16] = {7, 1}, /* U */
- [0x21] = {7, 2}, /* F */
- [0x2c] = {7, 3}, /* Z */
- [0x27] = {7, 4}, /* Semicolon */
- [0x34] = {7, 5}, /* Dot */
-};
-
-#define Z2_RAM_SIZE 0x02000000
-#define Z2_FLASH_BASE 0x00000000
-#define Z2_FLASH_SIZE 0x00800000
-
-static struct arm_boot_info z2_binfo = {
- .loader_start = PXA2XX_SDRAM_BASE,
- .ram_size = Z2_RAM_SIZE,
-};
-
-#define Z2_GPIO_SD_DETECT 96
-#define Z2_GPIO_AC_IN 0
-#define Z2_GPIO_KEY_ON 1
-#define Z2_GPIO_LCD_CS 88
-
-struct ZipitLCD {
- SSIPeripheral ssidev;
- int32_t selected;
- int32_t enabled;
- uint8_t buf[3];
- uint32_t cur_reg;
- int pos;
-};
-
-#define TYPE_ZIPIT_LCD "zipit-lcd"
-OBJECT_DECLARE_SIMPLE_TYPE(ZipitLCD, ZIPIT_LCD)
-
-static uint32_t zipit_lcd_transfer(SSIPeripheral *dev, uint32_t value)
-{
- ZipitLCD *z = ZIPIT_LCD(dev);
- uint16_t val;
-
- trace_z2_lcd_reg_update(z->cur_reg, z->buf[0], z->buf[1], z->buf[2], value);
- if (z->selected) {
- z->buf[z->pos] = value & 0xff;
- z->pos++;
- }
- if (z->pos == 3) {
- switch (z->buf[0]) {
- case 0x74:
- z->cur_reg = z->buf[2];
- break;
- case 0x76:
- val = z->buf[1] << 8 | z->buf[2];
- if (z->cur_reg == 0x22 && val == 0x0000) {
- z->enabled = 1;
- trace_z2_lcd_enable_disable_result("enabled");
- } else if (z->cur_reg == 0x10 && val == 0x0000) {
- z->enabled = 0;
- trace_z2_lcd_enable_disable_result("disabled");
- }
- break;
- default:
- break;
- }
- z->pos = 0;
- }
- return 0;
-}
-
-static void z2_lcd_cs(void *opaque, int line, int level)
-{
- ZipitLCD *z2_lcd = opaque;
- z2_lcd->selected = !level;
-}
-
-static void zipit_lcd_realize(SSIPeripheral *dev, Error **errp)
-{
- ZipitLCD *z = ZIPIT_LCD(dev);
- z->selected = 0;
- z->enabled = 0;
- z->pos = 0;
-}
-
-static const VMStateDescription vmstate_zipit_lcd_state = {
- .name = "zipit-lcd",
- .version_id = 2,
- .minimum_version_id = 2,
- .fields = (const VMStateField[]) {
- VMSTATE_SSI_PERIPHERAL(ssidev, ZipitLCD),
- VMSTATE_INT32(selected, ZipitLCD),
- VMSTATE_INT32(enabled, ZipitLCD),
- VMSTATE_BUFFER(buf, ZipitLCD),
- VMSTATE_UINT32(cur_reg, ZipitLCD),
- VMSTATE_INT32(pos, ZipitLCD),
- VMSTATE_END_OF_LIST(),
- }
-};
-
-static void zipit_lcd_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass);
-
- k->realize = zipit_lcd_realize;
- k->transfer = zipit_lcd_transfer;
- dc->vmsd = &vmstate_zipit_lcd_state;
-}
-
-static const TypeInfo zipit_lcd_info = {
- .name = TYPE_ZIPIT_LCD,
- .parent = TYPE_SSI_PERIPHERAL,
- .instance_size = sizeof(ZipitLCD),
- .class_init = zipit_lcd_class_init,
-};
-
-#define TYPE_AER915 "aer915"
-OBJECT_DECLARE_SIMPLE_TYPE(AER915State, AER915)
-
-struct AER915State {
- I2CSlave parent_obj;
-
- int len;
- uint8_t buf[3];
-};
-
-static int aer915_send(I2CSlave *i2c, uint8_t data)
-{
- AER915State *s = AER915(i2c);
-
- s->buf[s->len] = data;
- if (s->len++ > 2) {
- trace_z2_aer915_send_too_long(s->len);
- return 1;
- }
-
- if (s->len == 2) {
- trace_z2_aer915_send(s->buf[0], s->buf[1]);
- }
-
- return 0;
-}
-
-static int aer915_event(I2CSlave *i2c, enum i2c_event event)
-{
- AER915State *s = AER915(i2c);
-
- trace_z2_aer915_event(s->len, event);
- switch (event) {
- case I2C_START_SEND:
- s->len = 0;
- break;
- case I2C_START_RECV:
- break;
- case I2C_FINISH:
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static uint8_t aer915_recv(I2CSlave *slave)
-{
- AER915State *s = AER915(slave);
- int retval = 0x00;
-
- switch (s->buf[0]) {
- /* Return hardcoded battery voltage,
- * 0xf0 means ~4.1V
- */
- case 0x02:
- retval = 0xf0;
- break;
- /* Return 0x00 for other regs,
- * we don't know what they are for,
- * anyway they return 0x00 on real hardware.
- */
- default:
- break;
- }
-
- return retval;
-}
-
-static const VMStateDescription vmstate_aer915_state = {
- .name = "aer915",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_INT32(len, AER915State),
- VMSTATE_BUFFER(buf, AER915State),
- VMSTATE_END_OF_LIST(),
- }
-};
-
-static void aer915_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
-
- k->event = aer915_event;
- k->recv = aer915_recv;
- k->send = aer915_send;
- dc->vmsd = &vmstate_aer915_state;
-}
-
-static const TypeInfo aer915_info = {
- .name = TYPE_AER915,
- .parent = TYPE_I2C_SLAVE,
- .instance_size = sizeof(AER915State),
- .class_init = aer915_class_init,
-};
-
-#define FLASH_SECTOR_SIZE (64 * KiB)
-
-static void z2_init(MachineState *machine)
-{
- PXA2xxState *mpu;
- DriveInfo *dinfo;
- void *z2_lcd;
- I2CBus *bus;
- DeviceState *wm;
- I2CSlave *i2c_dev;
-
- /* Setup CPU & memory */
- mpu = pxa270_init(z2_binfo.ram_size, machine->cpu_type);
-
- dinfo = drive_get(IF_PFLASH, 0, 0);
- pflash_cfi01_register(Z2_FLASH_BASE, "z2.flash0", Z2_FLASH_SIZE,
- dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
- FLASH_SECTOR_SIZE, 4, 0, 0, 0, 0, 0);
-
- /* setup keypad */
- pxa27x_register_keypad(mpu->kp, map, 0x100);
-
- /* MMC/SD host */
- pxa2xx_mmci_handlers(mpu->mmc,
- NULL,
- qdev_get_gpio_in(mpu->gpio, Z2_GPIO_SD_DETECT));
-
- type_register_static(&zipit_lcd_info);
- type_register_static(&aer915_info);
- z2_lcd = ssi_create_peripheral(mpu->ssp[1], TYPE_ZIPIT_LCD);
- bus = pxa2xx_i2c_bus(mpu->i2c[0]);
-
- i2c_slave_create_simple(bus, TYPE_AER915, 0x55);
-
- i2c_dev = i2c_slave_new(TYPE_WM8750, 0x1b);
- wm = DEVICE(i2c_dev);
-
- if (machine->audiodev) {
- qdev_prop_set_string(wm, "audiodev", machine->audiodev);
- }
- i2c_slave_realize_and_unref(i2c_dev, bus, &error_abort);
-
- mpu->i2s->opaque = wm;
- mpu->i2s->codec_out = wm8750_dac_dat;
- mpu->i2s->codec_in = wm8750_adc_dat;
- wm8750_data_req_set(wm, mpu->i2s->data_req, mpu->i2s);
-
- qdev_connect_gpio_out(mpu->gpio, Z2_GPIO_LCD_CS,
- qemu_allocate_irq(z2_lcd_cs, z2_lcd, 0));
-
- z2_binfo.board_id = 0x6dd;
- arm_load_kernel(mpu->cpu, machine, &z2_binfo);
-}
-
-static void z2_machine_init(MachineClass *mc)
-{
- mc->desc = "Zipit Z2 (PXA27x)";
- mc->init = z2_init;
- mc->ignore_memory_transaction_failures = true;
- mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c5");
- mc->deprecation_reason = "machine is old and unmaintained";
-
- machine_add_audiodev_property(mc);
-}
-
-DEFINE_MACHINE("z2", z2_machine_init)
diff --git a/hw/audio/ac97.c b/hw/audio/ac97.c
index 3f0053f..eb7a847 100644
--- a/hw/audio/ac97.c
+++ b/hw/audio/ac97.c
@@ -24,7 +24,7 @@
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qom/object.h"
#include "ac97.h"
@@ -886,7 +886,7 @@ static void nabm_writel(void *opaque, uint32_t addr, uint32_t val)
static int write_audio(AC97LinkState *s, AC97BusMasterRegs *r,
int max, int *stop)
{
- uint8_t tmpbuf[4096];
+ QEMU_UNINITIALIZED uint8_t tmpbuf[4096];
uint32_t addr = r->bd.addr;
uint32_t temp = r->picb << 1;
uint32_t written = 0;
@@ -959,7 +959,7 @@ static void write_bup(AC97LinkState *s, int elapsed)
static int read_audio(AC97LinkState *s, AC97BusMasterRegs *r,
int max, int *stop)
{
- uint8_t tmpbuf[4096];
+ QEMU_UNINITIALIZED uint8_t tmpbuf[4096];
uint32_t addr = r->bd.addr;
uint32_t temp = r->picb << 1;
uint32_t nread = 0;
@@ -1324,12 +1324,11 @@ static void ac97_exit(PCIDevice *dev)
AUD_remove_card(&s->card);
}
-static Property ac97_properties[] = {
+static const Property ac97_properties[] = {
DEFINE_AUDIO_PROPERTIES(AC97LinkState, card),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ac97_class_init(ObjectClass *klass, void *data)
+static void ac97_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -1344,7 +1343,7 @@ static void ac97_class_init(ObjectClass *klass, void *data)
dc->desc = "Intel 82801AA AC97 Audio";
dc->vmsd = &vmstate_ac97;
device_class_set_props(dc, ac97_properties);
- dc->reset = ac97_on_reset;
+ device_class_set_legacy_reset(dc, ac97_on_reset);
}
static const TypeInfo ac97_info = {
@@ -1352,7 +1351,7 @@ static const TypeInfo ac97_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(AC97LinkState),
.class_init = ac97_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/audio/adlib.c b/hw/audio/adlib.c
index bd73806..1f29a7e 100644
--- a/hw/audio/adlib.c
+++ b/hw/audio/adlib.c
@@ -297,14 +297,13 @@ static void adlib_realizefn (DeviceState *dev, Error **errp)
portio_list_add (&s->port_list, isa_address_space_io(&s->parent_obj), 0);
}
-static Property adlib_properties[] = {
+static const Property adlib_properties[] = {
DEFINE_AUDIO_PROPERTIES(AdlibState, card),
DEFINE_PROP_UINT32 ("iobase", AdlibState, port, 0x220),
DEFINE_PROP_UINT32 ("freq", AdlibState, freq, 44100),
- DEFINE_PROP_END_OF_LIST (),
};
-static void adlib_class_initfn (ObjectClass *klass, void *data)
+static void adlib_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS (klass);
diff --git a/hw/audio/asc.c b/hw/audio/asc.c
index 8054163..edd42d6 100644
--- a/hw/audio/asc.c
+++ b/hw/audio/asc.c
@@ -12,6 +12,7 @@
#include "qemu/osdep.h"
#include "qemu/timer.h"
+#include "qapi/error.h"
#include "hw/sysbus.h"
#include "hw/irq.h"
#include "audio/audio.h"
@@ -406,7 +407,6 @@ static void asc_fifo_write(void *opaque, hwaddr addr, uint64_t value,
} else {
fs->fifo[addr] = value;
}
- return;
}
static const MemoryRegionOps asc_fifo_ops = {
@@ -654,11 +654,17 @@ static void asc_realize(DeviceState *dev, Error **errp)
s->voice = AUD_open_out(&s->card, s->voice, "asc.out", s, asc_out_cb,
&as);
+ if (!s->voice) {
+ AUD_remove_card(&s->card);
+ error_setg(errp, "Initializing audio stream failed");
+ return;
+ }
+
s->shift = 1;
s->samples = AUD_get_buffer_size_out(s->voice) >> s->shift;
s->mixbuf = g_malloc0(s->samples << s->shift);
- s->silentbuf = g_malloc0(s->samples << s->shift);
+ s->silentbuf = g_malloc(s->samples << s->shift);
memset(s->silentbuf, 0x80, s->samples << s->shift);
/* Add easc registers if required */
@@ -695,13 +701,12 @@ static void asc_init(Object *obj)
sysbus_init_mmio(sbd, &s->asc);
}
-static Property asc_properties[] = {
+static const Property asc_properties[] = {
DEFINE_AUDIO_PROPERTIES(ASCState, card),
DEFINE_PROP_UINT8("asctype", ASCState, type, ASC_TYPE_ASC),
- DEFINE_PROP_END_OF_LIST(),
};
-static void asc_class_init(ObjectClass *oc, void *data)
+static void asc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
ResettableClass *rc = RESETTABLE_CLASS(oc);
diff --git a/hw/audio/cs4231.c b/hw/audio/cs4231.c
index 967caa7..97cceb4 100644
--- a/hw/audio/cs4231.c
+++ b/hw/audio/cs4231.c
@@ -160,11 +160,11 @@ static void cs4231_init(Object *obj)
sysbus_init_irq(dev, &s->irq);
}
-static void cs4231_class_init(ObjectClass *klass, void *data)
+static void cs4231_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = cs_reset;
+ device_class_set_legacy_reset(dc, cs_reset);
dc->vmsd = &vmstate_cs4231;
}
diff --git a/hw/audio/cs4231a.c b/hw/audio/cs4231a.c
index 9ef57f0..6dfff20 100644
--- a/hw/audio/cs4231a.c
+++ b/hw/audio/cs4231a.c
@@ -528,7 +528,7 @@ static int cs_write_audio (CSState *s, int nchan, int dma_pos,
int dma_len, int len)
{
int temp, net;
- uint8_t tmpbuf[4096];
+ QEMU_UNINITIALIZED uint8_t tmpbuf[4096];
IsaDmaClass *k = ISADMA_GET_CLASS(s->isa_dma);
temp = len;
@@ -547,7 +547,7 @@ static int cs_write_audio (CSState *s, int nchan, int dma_pos,
copied = k->read_memory(s->isa_dma, nchan, tmpbuf, dma_pos, to_copy);
if (s->tab) {
int i;
- int16_t linbuf[4096];
+ QEMU_UNINITIALIZED int16_t linbuf[4096];
for (i = 0; i < copied; ++i)
linbuf[i] = s->tab[tmpbuf[i]];
@@ -682,6 +682,10 @@ static void cs4231a_realizefn (DeviceState *dev, Error **errp)
return;
}
+ if (s->irq >= ISA_NUM_IRQS) {
+ error_setg(errp, "Invalid IRQ %d (max %d)", s->irq, ISA_NUM_IRQS - 1);
+ return;
+ }
s->pic = isa_bus_get_irq(bus, s->irq);
k = ISADMA_GET_CLASS(s->isa_dma);
k->register_channel(s->isa_dma, s->dma, cs_dma_read, s);
@@ -689,20 +693,19 @@ static void cs4231a_realizefn (DeviceState *dev, Error **errp)
isa_register_ioport (d, &s->ioports, s->port);
}
-static Property cs4231a_properties[] = {
+static const Property cs4231a_properties[] = {
DEFINE_AUDIO_PROPERTIES(CSState, card),
DEFINE_PROP_UINT32 ("iobase", CSState, port, 0x534),
DEFINE_PROP_UINT32 ("irq", CSState, irq, 9),
DEFINE_PROP_UINT32 ("dma", CSState, dma, 3),
- DEFINE_PROP_END_OF_LIST (),
};
-static void cs4231a_class_initfn (ObjectClass *klass, void *data)
+static void cs4231a_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS (klass);
dc->realize = cs4231a_realizefn;
- dc->reset = cs4231a_reset;
+ device_class_set_legacy_reset(dc, cs4231a_reset);
set_bit(DEVICE_CATEGORY_SOUND, dc->categories);
dc->desc = "Crystal Semiconductor CS4231A";
dc->vmsd = &vmstate_cs4231a;
diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c
index 4ab61d3..a6a32a6 100644
--- a/hw/audio/es1370.c
+++ b/hw/audio/es1370.c
@@ -32,7 +32,7 @@
#include "migration/vmstate.h"
#include "qemu/cutils.h"
#include "qemu/module.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qom/object.h"
#include "trace.h"
@@ -604,7 +604,7 @@ static uint64_t es1370_read(void *opaque, hwaddr addr, unsigned size)
static void es1370_transfer_audio (ES1370State *s, struct chan *d, int loop_sel,
int max, bool *irq)
{
- uint8_t tmpbuf[4096];
+ QEMU_UNINITIALIZED uint8_t tmpbuf[4096];
size_t to_transfer;
uint32_t addr = d->frame_addr;
int sc = d->scount & 0xffff;
@@ -868,12 +868,11 @@ static void es1370_exit(PCIDevice *dev)
AUD_remove_card(&s->card);
}
-static Property es1370_properties[] = {
+static const Property es1370_properties[] = {
DEFINE_AUDIO_PROPERTIES(ES1370State, card),
- DEFINE_PROP_END_OF_LIST(),
};
-static void es1370_class_init (ObjectClass *klass, void *data)
+static void es1370_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS (klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS (klass);
@@ -888,7 +887,7 @@ static void es1370_class_init (ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_SOUND, dc->categories);
dc->desc = "ENSONIQ AudioPCI ES1370";
dc->vmsd = &vmstate_es1370;
- dc->reset = es1370_on_reset;
+ device_class_set_legacy_reset(dc, es1370_on_reset);
device_class_set_props(dc, es1370_properties);
}
@@ -897,7 +896,7 @@ static const TypeInfo es1370_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof (ES1370State),
.class_init = es1370_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/audio/gus.c b/hw/audio/gus.c
index 4beb3fd..c36df02 100644
--- a/hw/audio/gus.c
+++ b/hw/audio/gus.c
@@ -183,7 +183,7 @@ static int GUS_read_DMA (void *opaque, int nchan, int dma_pos, int dma_len)
{
GUSState *s = opaque;
IsaDmaClass *k = ISADMA_GET_CLASS(s->isa_dma);
- char tmpbuf[4096];
+ QEMU_UNINITIALIZED char tmpbuf[4096];
int pos = dma_pos, mode, left = dma_len - dma_pos;
ldebug ("read DMA %#x %d\n", dma_pos, dma_len);
@@ -290,16 +290,15 @@ static void gus_realizefn (DeviceState *dev, Error **errp)
AUD_set_active_out (s->voice, 1);
}
-static Property gus_properties[] = {
+static const Property gus_properties[] = {
DEFINE_AUDIO_PROPERTIES(GUSState, card),
DEFINE_PROP_UINT32 ("freq", GUSState, freq, 44100),
DEFINE_PROP_UINT32 ("iobase", GUSState, port, 0x240),
DEFINE_PROP_UINT32 ("irq", GUSState, emu.gusirq, 7),
DEFINE_PROP_UINT32 ("dma", GUSState, emu.gusdma, 3),
- DEFINE_PROP_END_OF_LIST (),
};
-static void gus_class_initfn (ObjectClass *klass, void *data)
+static void gus_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS (klass);
diff --git a/hw/audio/hda-codec.c b/hw/audio/hda-codec.c
index b22e486..66edad2 100644
--- a/hw/audio/hda-codec.c
+++ b/hw/audio/hda-codec.c
@@ -487,8 +487,7 @@ static void hda_audio_setup(HDAAudioStream *st)
if (st->output) {
if (use_timer) {
cb = hda_audio_output_cb;
- st->buft = timer_new_ns(QEMU_CLOCK_VIRTUAL,
- hda_audio_output_timer, st);
+ timer_del(st->buft);
} else {
cb = hda_audio_compat_output_cb;
}
@@ -497,8 +496,7 @@ static void hda_audio_setup(HDAAudioStream *st)
} else {
if (use_timer) {
cb = hda_audio_input_cb;
- st->buft = timer_new_ns(QEMU_CLOCK_VIRTUAL,
- hda_audio_input_timer, st);
+ timer_del(st->buft);
} else {
cb = hda_audio_compat_input_cb;
}
@@ -726,8 +724,12 @@ static void hda_audio_init(HDACodecDevice *hda,
st->gain_right = QEMU_HDA_AMP_STEPS;
st->compat_bpos = sizeof(st->compat_buf);
st->output = true;
+ st->buft = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ hda_audio_output_timer, st);
} else {
st->output = false;
+ st->buft = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ hda_audio_input_timer, st);
}
st->format = AC_FMT_TYPE_PCM | AC_FMT_BITS_16 |
(1 << AC_FMT_CHAN_SHIFT);
@@ -750,9 +752,7 @@ static void hda_audio_exit(HDACodecDevice *hda)
if (st->node == NULL) {
continue;
}
- if (a->use_timer) {
- timer_del(st->buft);
- }
+ timer_free(st->buft);
if (st->output) {
AUD_close_out(&a->card, st->voice.out);
} else {
@@ -857,12 +857,11 @@ static const VMStateDescription vmstate_hda_audio = {
}
};
-static Property hda_audio_properties[] = {
+static const Property hda_audio_properties[] = {
DEFINE_AUDIO_PROPERTIES(HDAAudioState, card),
DEFINE_PROP_UINT32("debug", HDAAudioState, debug, 0),
DEFINE_PROP_BOOL("mixer", HDAAudioState, mixer, true),
DEFINE_PROP_BOOL("use-timer", HDAAudioState, use_timer, true),
- DEFINE_PROP_END_OF_LIST(),
};
static void hda_audio_init_output(HDACodecDevice *hda, Error **errp)
@@ -901,7 +900,7 @@ static void hda_audio_init_micro(HDACodecDevice *hda, Error **errp)
hda_audio_init(hda, desc, errp);
}
-static void hda_audio_base_class_init(ObjectClass *klass, void *data)
+static void hda_audio_base_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
HDACodecDeviceClass *k = HDA_CODEC_DEVICE_CLASS(klass);
@@ -910,7 +909,7 @@ static void hda_audio_base_class_init(ObjectClass *klass, void *data)
k->command = hda_audio_command;
k->stream = hda_audio_stream;
set_bit(DEVICE_CATEGORY_SOUND, dc->categories);
- dc->reset = hda_audio_reset;
+ device_class_set_legacy_reset(dc, hda_audio_reset);
dc->vmsd = &vmstate_hda_audio;
device_class_set_props(dc, hda_audio_properties);
}
@@ -923,7 +922,7 @@ static const TypeInfo hda_audio_info = {
.abstract = true,
};
-static void hda_audio_output_class_init(ObjectClass *klass, void *data)
+static void hda_audio_output_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
HDACodecDeviceClass *k = HDA_CODEC_DEVICE_CLASS(klass);
@@ -938,7 +937,7 @@ static const TypeInfo hda_audio_output_info = {
.class_init = hda_audio_output_class_init,
};
-static void hda_audio_duplex_class_init(ObjectClass *klass, void *data)
+static void hda_audio_duplex_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
HDACodecDeviceClass *k = HDA_CODEC_DEVICE_CLASS(klass);
@@ -953,7 +952,7 @@ static const TypeInfo hda_audio_duplex_info = {
.class_init = hda_audio_duplex_class_init,
};
-static void hda_audio_micro_class_init(ObjectClass *klass, void *data)
+static void hda_audio_micro_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
HDACodecDeviceClass *k = HDA_CODEC_DEVICE_CLASS(klass);
diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c
index 9c54e60..b256c8c 100644
--- a/hw/audio/intel-hda.c
+++ b/hw/audio/intel-hda.c
@@ -30,16 +30,15 @@
#include "intel-hda.h"
#include "migration/vmstate.h"
#include "intel-hda-defs.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qapi/error.h"
#include "qom/object.h"
/* --------------------------------------------------------------------- */
/* hda bus */
-static Property hda_props[] = {
+static const Property hda_props[] = {
DEFINE_PROP_UINT32("cad", HDACodecDevice, cad, -1),
- DEFINE_PROP_END_OF_LIST()
};
static const TypeInfo hda_codec_bus_info = {
@@ -1215,14 +1214,13 @@ static const VMStateDescription vmstate_intel_hda = {
}
};
-static Property intel_hda_properties[] = {
+static const Property intel_hda_properties[] = {
DEFINE_PROP_UINT32("debug", IntelHDAState, debug, 0),
DEFINE_PROP_ON_OFF_AUTO("msi", IntelHDAState, msi, ON_OFF_AUTO_AUTO),
DEFINE_PROP_BOOL("old_msi_addr", IntelHDAState, old_msi_addr, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void intel_hda_class_init(ObjectClass *klass, void *data)
+static void intel_hda_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -1231,12 +1229,12 @@ static void intel_hda_class_init(ObjectClass *klass, void *data)
k->exit = intel_hda_exit;
k->vendor_id = PCI_VENDOR_ID_INTEL;
k->class_id = PCI_CLASS_MULTIMEDIA_HD_AUDIO;
- dc->reset = intel_hda_reset;
+ device_class_set_legacy_reset(dc, intel_hda_reset);
dc->vmsd = &vmstate_intel_hda;
device_class_set_props(dc, intel_hda_properties);
}
-static void intel_hda_class_init_ich6(ObjectClass *klass, void *data)
+static void intel_hda_class_init_ich6(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -1247,7 +1245,7 @@ static void intel_hda_class_init_ich6(ObjectClass *klass, void *data)
dc->desc = "Intel HD Audio Controller (ich6)";
}
-static void intel_hda_class_init_ich9(ObjectClass *klass, void *data)
+static void intel_hda_class_init_ich9(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -1264,7 +1262,7 @@ static const TypeInfo intel_hda_info = {
.instance_size = sizeof(IntelHDAState),
.class_init = intel_hda_class_init,
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -1282,7 +1280,7 @@ static const TypeInfo intel_hda_info_ich9 = {
.class_init = intel_hda_class_init_ich9,
};
-static void hda_codec_device_class_init(ObjectClass *klass, void *data)
+static void hda_codec_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
k->realize = hda_codec_dev_realize;
diff --git a/hw/audio/marvell_88w8618.c b/hw/audio/marvell_88w8618.c
index cc28544..c5c79d0 100644
--- a/hw/audio/marvell_88w8618.c
+++ b/hw/audio/marvell_88w8618.c
@@ -66,7 +66,7 @@ static void mv88w8618_audio_callback(void *opaque, int free_out, int free_in)
{
mv88w8618_audio_state *s = opaque;
int16_t *codec_buffer;
- int8_t buf[4096];
+ QEMU_UNINITIALIZED int8_t buf[4096];
int8_t *mem_buffer;
int pos, block_size;
@@ -287,12 +287,12 @@ static const VMStateDescription mv88w8618_audio_vmsd = {
}
};
-static void mv88w8618_audio_class_init(ObjectClass *klass, void *data)
+static void mv88w8618_audio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = mv88w8618_audio_realize;
- dc->reset = mv88w8618_audio_reset;
+ device_class_set_legacy_reset(dc, mv88w8618_audio_reset);
dc->vmsd = &mv88w8618_audio_vmsd;
dc->user_creatable = false;
}
diff --git a/hw/audio/pcspk.c b/hw/audio/pcspk.c
index a4b89f1..a419161 100644
--- a/hw/audio/pcspk.c
+++ b/hw/audio/pcspk.c
@@ -215,14 +215,13 @@ static const VMStateDescription vmstate_spk = {
}
};
-static Property pcspk_properties[] = {
+static const Property pcspk_properties[] = {
DEFINE_AUDIO_PROPERTIES(PCSpkState, card),
DEFINE_PROP_UINT32("iobase", PCSpkState, iobase, 0x61),
DEFINE_PROP_BOOL("migrate", PCSpkState, migrate, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pcspk_class_initfn(ObjectClass *klass, void *data)
+static void pcspk_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/audio/pl041.c b/hw/audio/pl041.c
index b435208..5d9d6c1 100644
--- a/hw/audio/pl041.c
+++ b/hw/audio/pl041.c
@@ -625,21 +625,20 @@ static const VMStateDescription vmstate_pl041 = {
}
};
-static Property pl041_device_properties[] = {
+static const Property pl041_device_properties[] = {
DEFINE_AUDIO_PROPERTIES(PL041State, codec.card),
/* Non-compact FIFO depth property */
DEFINE_PROP_UINT32("nc_fifo_depth", PL041State, fifo_depth,
DEFAULT_FIFO_DEPTH),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pl041_device_class_init(ObjectClass *klass, void *data)
+static void pl041_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = pl041_realize;
set_bit(DEVICE_CATEGORY_SOUND, dc->categories);
- dc->reset = pl041_device_reset;
+ device_class_set_legacy_reset(dc, pl041_device_reset);
dc->vmsd = &vmstate_pl041;
device_class_set_props(dc, pl041_device_properties);
}
diff --git a/hw/audio/sb16.c b/hw/audio/sb16.c
index fd76e78..bac6411 100644
--- a/hw/audio/sb16.c
+++ b/hw/audio/sb16.c
@@ -1181,7 +1181,7 @@ static int write_audio (SB16State *s, int nchan, int dma_pos,
IsaDma *isa_dma = nchan == s->dma ? s->isa_dma : s->isa_hdma;
IsaDmaClass *k = ISADMA_GET_CLASS(isa_dma);
int temp, net;
- uint8_t tmpbuf[4096];
+ QEMU_UNINITIALIZED uint8_t tmpbuf[4096];
temp = len;
net = 0;
@@ -1440,17 +1440,16 @@ static void sb16_realizefn (DeviceState *dev, Error **errp)
s->can_write = 1;
}
-static Property sb16_properties[] = {
+static const Property sb16_properties[] = {
DEFINE_AUDIO_PROPERTIES(SB16State, card),
DEFINE_PROP_UINT32 ("version", SB16State, ver, 0x0405), /* 4.5 */
DEFINE_PROP_UINT32 ("iobase", SB16State, port, 0x220),
DEFINE_PROP_UINT32 ("irq", SB16State, irq, 5),
DEFINE_PROP_UINT32 ("dma", SB16State, dma, 1),
DEFINE_PROP_UINT32 ("dma16", SB16State, hdma, 5),
- DEFINE_PROP_END_OF_LIST (),
};
-static void sb16_class_initfn (ObjectClass *klass, void *data)
+static void sb16_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS (klass);
diff --git a/hw/audio/soundhw.c b/hw/audio/soundhw.c
index b387b0e..d18fd9f 100644
--- a/hw/audio/soundhw.c
+++ b/hw/audio/soundhw.c
@@ -88,7 +88,8 @@ void select_soundhw(const char *name, const char *audiodev)
struct soundhw *c;
if (selected) {
- error_setg(&error_fatal, "only one -soundhw option is allowed");
+ error_report("only one -soundhw option is allowed");
+ exit(1);
}
for (c = soundhw; c->name; ++c) {
diff --git a/hw/audio/trace-events b/hw/audio/trace-events
index b1870ff..b8ef572 100644
--- a/hw/audio/trace-events
+++ b/hw/audio/trace-events
@@ -41,7 +41,6 @@ asc_update_irq(int irq, int a, int b) "set IRQ to %d (A: 0x%x B: 0x%x)"
#virtio-snd.c
virtio_snd_get_config(void *vdev, uint32_t jacks, uint32_t streams, uint32_t chmaps) "snd %p: get_config jacks=%"PRIu32" streams=%"PRIu32" chmaps=%"PRIu32""
-virtio_snd_set_config(void *vdev, uint32_t jacks, uint32_t new_jacks, uint32_t streams, uint32_t new_streams, uint32_t chmaps, uint32_t new_chmaps) "snd %p: set_config jacks from %"PRIu32"->%"PRIu32", streams from %"PRIu32"->%"PRIu32", chmaps from %"PRIu32"->%"PRIu32
virtio_snd_get_features(void *vdev, uint64_t features) "snd %p: get_features 0x%"PRIx64
virtio_snd_vm_state_running(void) "vm state running"
virtio_snd_vm_state_stopped(void) "vm state stopped"
diff --git a/hw/audio/via-ac97.c b/hw/audio/via-ac97.c
index 4c127a1..d5231e1 100644
--- a/hw/audio/via-ac97.c
+++ b/hw/audio/via-ac97.c
@@ -175,7 +175,7 @@ static void out_cb(void *opaque, int avail)
ViaAC97SGDChannel *c = &s->aur;
int temp, to_copy, copied;
bool stop = false;
- uint8_t tmpbuf[4096];
+ QEMU_UNINITIALIZED uint8_t tmpbuf[4096];
if (c->stat & STAT_PAUSED) {
return;
@@ -459,12 +459,11 @@ static void via_ac97_exit(PCIDevice *dev)
AUD_remove_card(&s->card);
}
-static Property via_ac97_properties[] = {
+static const Property via_ac97_properties[] = {
DEFINE_AUDIO_PROPERTIES(ViaAC97State, card),
- DEFINE_PROP_END_OF_LIST(),
};
-static void via_ac97_class_init(ObjectClass *klass, void *data)
+static void via_ac97_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -478,7 +477,7 @@ static void via_ac97_class_init(ObjectClass *klass, void *data)
device_class_set_props(dc, via_ac97_properties);
set_bit(DEVICE_CATEGORY_SOUND, dc->categories);
dc->desc = "VIA AC97";
- dc->reset = via_ac97_reset;
+ device_class_set_legacy_reset(dc, via_ac97_reset);
/* Reason: Part of a south bridge chip */
dc->user_creatable = false;
}
@@ -488,7 +487,7 @@ static const TypeInfo via_ac97_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(ViaAC97State),
.class_init = via_ac97_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -502,7 +501,7 @@ static void via_mc97_realize(PCIDevice *pci_dev, Error **errp)
pci_set_long(pci_dev->config + PCI_INTERRUPT_PIN, 0x03);
}
-static void via_mc97_class_init(ObjectClass *klass, void *data)
+static void via_mc97_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -523,7 +522,7 @@ static const TypeInfo via_mc97_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIDevice),
.class_init = via_mc97_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/audio/virtio-snd-pci.c b/hw/audio/virtio-snd-pci.c
index ab58c64..9eb0007 100644
--- a/hw/audio/virtio-snd-pci.c
+++ b/hw/audio/virtio-snd-pci.c
@@ -27,11 +27,10 @@ struct VirtIOSoundPCI {
VirtIOSound vdev;
};
-static Property virtio_snd_pci_properties[] = {
+static const Property virtio_snd_pci_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
- DEFINE_PROP_END_OF_LIST(),
};
static void virtio_snd_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -43,7 +42,7 @@ static void virtio_snd_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void virtio_snd_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_snd_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/audio/virtio-snd.c b/hw/audio/virtio-snd.c
index 5993f4f..eca3319 100644
--- a/hw/audio/virtio-snd.c
+++ b/hw/audio/virtio-snd.c
@@ -20,8 +20,7 @@
#include "qemu/log.h"
#include "qemu/error-report.h"
#include "qemu/lockable.h"
-#include "exec/tswap.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "trace.h"
#include "qapi/error.h"
#include "hw/audio/virtio-snd.h"
@@ -78,7 +77,7 @@ static const VMStateDescription vmstate_virtio_snd = {
},
};
-static Property virtio_snd_properties[] = {
+static const Property virtio_snd_properties[] = {
DEFINE_AUDIO_PROPERTIES(VirtIOSound, card),
DEFINE_PROP_UINT32("jacks", VirtIOSound, snd_conf.jacks,
VIRTIO_SOUND_JACK_DEFAULT),
@@ -86,7 +85,6 @@ static Property virtio_snd_properties[] = {
VIRTIO_SOUND_STREAM_DEFAULT),
DEFINE_PROP_UINT32("chmaps", VirtIOSound, snd_conf.chmaps,
VIRTIO_SOUND_CHMAP_DEFAULT),
- DEFINE_PROP_END_OF_LIST(),
};
static void
@@ -108,29 +106,6 @@ virtio_snd_get_config(VirtIODevice *vdev, uint8_t *config)
}
static void
-virtio_snd_set_config(VirtIODevice *vdev, const uint8_t *config)
-{
- VirtIOSound *s = VIRTIO_SND(vdev);
- const virtio_snd_config *sndconfig =
- (const virtio_snd_config *)config;
-
-
- trace_virtio_snd_set_config(vdev,
- s->snd_conf.jacks,
- sndconfig->jacks,
- s->snd_conf.streams,
- sndconfig->streams,
- s->snd_conf.chmaps,
- sndconfig->chmaps);
-
- memcpy(&s->snd_conf, sndconfig, sizeof(virtio_snd_config));
- le32_to_cpus(&s->snd_conf.jacks);
- le32_to_cpus(&s->snd_conf.streams);
- le32_to_cpus(&s->snd_conf.chmaps);
-
-}
-
-static void
virtio_snd_pcm_buffer_free(VirtIOSoundPCMBuffer *buffer)
{
g_free(buffer->elem);
@@ -282,11 +257,13 @@ uint32_t virtio_snd_set_pcm_params(VirtIOSound *s,
error_report("Number of channels is not supported.");
return cpu_to_le32(VIRTIO_SND_S_NOT_SUPP);
}
- if (!(supported_formats & BIT(params->format))) {
+ if (params->format >= sizeof(supported_formats) * BITS_PER_BYTE ||
+ !(supported_formats & BIT(params->format))) {
error_report("Stream format is not supported.");
return cpu_to_le32(VIRTIO_SND_S_NOT_SUPP);
}
- if (!(supported_rates & BIT(params->rate))) {
+ if (params->rate >= sizeof(supported_rates) * BITS_PER_BYTE ||
+ !(supported_rates & BIT(params->rate))) {
error_report("Stream rate is not supported.");
return cpu_to_le32(VIRTIO_SND_S_NOT_SUPP);
}
@@ -1261,7 +1238,7 @@ static void virtio_snd_pcm_in_cb(void *data, int available)
{
VirtIOSoundPCMStream *stream = data;
VirtIOSoundPCMBuffer *buffer;
- size_t size;
+ size_t size, max_size;
WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) {
while (!QSIMPLEQ_EMPTY(&stream->queue)) {
@@ -1275,7 +1252,12 @@ static void virtio_snd_pcm_in_cb(void *data, int available)
continue;
}
+ max_size = iov_size(buffer->elem->in_sg, buffer->elem->in_num);
for (;;) {
+ if (buffer->size >= max_size) {
+ return_rx_buffer(stream, buffer);
+ break;
+ }
size = AUD_read(stream->voice.in,
buffer->data + buffer->size,
MIN(available, (stream->params.period_bytes -
@@ -1379,7 +1361,7 @@ static void virtio_snd_reset(VirtIODevice *vdev)
}
}
-static void virtio_snd_class_init(ObjectClass *klass, void *data)
+static void virtio_snd_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
@@ -1393,7 +1375,6 @@ static void virtio_snd_class_init(ObjectClass *klass, void *data)
vdc->realize = virtio_snd_realize;
vdc->unrealize = virtio_snd_unrealize;
vdc->get_config = virtio_snd_get_config;
- vdc->set_config = virtio_snd_set_config;
vdc->get_features = get_features;
vdc->reset = virtio_snd_reset;
vdc->legacy_features = 0;
diff --git a/hw/audio/wm8750.c b/hw/audio/wm8750.c
index ec2c4e1..2846b55 100644
--- a/hw/audio/wm8750.c
+++ b/hw/audio/wm8750.c
@@ -706,12 +706,11 @@ void wm8750_set_bclk_in(void *opaque, int new_hz)
wm8750_clk_update(s, 1);
}
-static Property wm8750_properties[] = {
+static const Property wm8750_properties[] = {
DEFINE_AUDIO_PROPERTIES(WM8750State, card),
- DEFINE_PROP_END_OF_LIST(),
};
-static void wm8750_class_init(ObjectClass *klass, void *data)
+static void wm8750_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass);
diff --git a/hw/avr/arduino.c b/hw/avr/arduino.c
index 48ef478..e166ca1 100644
--- a/hw/avr/arduino.c
+++ b/hw/avr/arduino.c
@@ -56,7 +56,7 @@ static void arduino_machine_init(MachineState *machine)
}
}
-static void arduino_machine_class_init(ObjectClass *oc, void *data)
+static void arduino_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -69,7 +69,7 @@ static void arduino_machine_class_init(ObjectClass *oc, void *data)
mc->no_parallel = 1;
}
-static void arduino_duemilanove_class_init(ObjectClass *oc, void *data)
+static void arduino_duemilanove_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
ArduinoMachineClass *amc = ARDUINO_MACHINE_CLASS(oc);
@@ -84,7 +84,7 @@ static void arduino_duemilanove_class_init(ObjectClass *oc, void *data)
amc->xtal_hz = 16 * 1000 * 1000;
};
-static void arduino_uno_class_init(ObjectClass *oc, void *data)
+static void arduino_uno_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
ArduinoMachineClass *amc = ARDUINO_MACHINE_CLASS(oc);
@@ -99,7 +99,7 @@ static void arduino_uno_class_init(ObjectClass *oc, void *data)
amc->xtal_hz = 16 * 1000 * 1000;
};
-static void arduino_mega_class_init(ObjectClass *oc, void *data)
+static void arduino_mega_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
ArduinoMachineClass *amc = ARDUINO_MACHINE_CLASS(oc);
@@ -114,7 +114,7 @@ static void arduino_mega_class_init(ObjectClass *oc, void *data)
amc->xtal_hz = 16 * 1000 * 1000;
};
-static void arduino_mega2560_class_init(ObjectClass *oc, void *data)
+static void arduino_mega2560_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
ArduinoMachineClass *amc = ARDUINO_MACHINE_CLASS(oc);
diff --git a/hw/avr/atmega.c b/hw/avr/atmega.c
index 31c8992..95b6da5 100644
--- a/hw/avr/atmega.c
+++ b/hw/avr/atmega.c
@@ -12,13 +12,15 @@
#include "qemu/module.h"
#include "qemu/units.h"
#include "qapi/error.h"
-#include "exec/memory.h"
-#include "exec/address-spaces.h"
-#include "sysemu/sysemu.h"
+#include "exec/target_page.h"
+#include "system/memory.h"
+#include "system/address-spaces.h"
+#include "system/system.h"
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "qom/object.h"
#include "hw/misc/unimp.h"
+#include "migration/vmstate.h"
#include "atmega.h"
enum AtmegaPeripheral {
@@ -224,8 +226,6 @@ static void atmega_realize(DeviceState *dev, Error **errp)
char *devname;
size_t i;
- assert(mc->io_size <= 0x200);
-
if (!s->xtal_freq_hz) {
error_setg(errp, "\"xtal-frequency-hz\" property must be provided.");
return;
@@ -240,11 +240,37 @@ static void atmega_realize(DeviceState *dev, Error **errp)
qdev_realize(DEVICE(&s->cpu), NULL, &error_abort);
cpudev = DEVICE(&s->cpu);
- /* SRAM */
- memory_region_init_ram(&s->sram, OBJECT(dev), "sram", mc->sram_size,
- &error_abort);
- memory_region_add_subregion(get_system_memory(),
- OFFSET_DATA + mc->io_size, &s->sram);
+ /*
+ * SRAM
+ *
+ * Softmmu is not able mix i/o and ram on the same page.
+ * Therefore in all cases, the first page exclusively contains i/o.
+ *
+ * If the MCU's i/o region matches the page size, then we can simply
+ * allocate all ram starting at the second page. Otherwise, we must
+ * allocate some ram as i/o to complete the first page.
+ */
+ assert(mc->io_size == 0x100 || mc->io_size == 0x200);
+ if (mc->io_size >= TARGET_PAGE_SIZE) {
+ memory_region_init_ram(&s->sram, OBJECT(dev), "sram", mc->sram_size,
+ &error_abort);
+ memory_region_add_subregion(get_system_memory(),
+ OFFSET_DATA + mc->io_size, &s->sram);
+ } else {
+ int sram_io_size = TARGET_PAGE_SIZE - mc->io_size;
+ void *sram_io_mem = g_malloc0(sram_io_size);
+
+ memory_region_init_ram_device_ptr(&s->sram_io, OBJECT(dev), "sram-as-io",
+ sram_io_size, sram_io_mem);
+ memory_region_add_subregion(get_system_memory(),
+ OFFSET_DATA + mc->io_size, &s->sram_io);
+ vmstate_register_ram(&s->sram_io, dev);
+
+ memory_region_init_ram(&s->sram, OBJECT(dev), "sram",
+ mc->sram_size - sram_io_size, &error_abort);
+ memory_region_add_subregion(get_system_memory(),
+ OFFSET_DATA + TARGET_PAGE_SIZE, &s->sram);
+ }
/* Flash */
memory_region_init_rom(&s->flash, OBJECT(dev),
@@ -355,13 +381,12 @@ static void atmega_realize(DeviceState *dev, Error **errp)
create_unimplemented_device("avr-eeprom", OFFSET_DATA + 0x03f, 3);
}
-static Property atmega_props[] = {
+static const Property atmega_props[] = {
DEFINE_PROP_UINT64("xtal-frequency-hz", AtmegaMcuState,
xtal_freq_hz, 0),
- DEFINE_PROP_END_OF_LIST()
};
-static void atmega_class_init(ObjectClass *oc, void *data)
+static void atmega_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -371,7 +396,7 @@ static void atmega_class_init(ObjectClass *oc, void *data)
dc->user_creatable = false;
}
-static void atmega168_class_init(ObjectClass *oc, void *data)
+static void atmega168_class_init(ObjectClass *oc, const void *data)
{
AtmegaMcuClass *amc = ATMEGA_MCU_CLASS(oc);
@@ -386,7 +411,7 @@ static void atmega168_class_init(ObjectClass *oc, void *data)
amc->dev = dev168_328;
};
-static void atmega328_class_init(ObjectClass *oc, void *data)
+static void atmega328_class_init(ObjectClass *oc, const void *data)
{
AtmegaMcuClass *amc = ATMEGA_MCU_CLASS(oc);
@@ -401,7 +426,7 @@ static void atmega328_class_init(ObjectClass *oc, void *data)
amc->dev = dev168_328;
};
-static void atmega1280_class_init(ObjectClass *oc, void *data)
+static void atmega1280_class_init(ObjectClass *oc, const void *data)
{
AtmegaMcuClass *amc = ATMEGA_MCU_CLASS(oc);
@@ -416,7 +441,7 @@ static void atmega1280_class_init(ObjectClass *oc, void *data)
amc->dev = dev1280_2560;
};
-static void atmega2560_class_init(ObjectClass *oc, void *data)
+static void atmega2560_class_init(ObjectClass *oc, const void *data)
{
AtmegaMcuClass *amc = ATMEGA_MCU_CLASS(oc);
diff --git a/hw/avr/atmega.h b/hw/avr/atmega.h
index a99ee15..9ac4678 100644
--- a/hw/avr/atmega.h
+++ b/hw/avr/atmega.h
@@ -41,6 +41,7 @@ struct AtmegaMcuState {
MemoryRegion flash;
MemoryRegion eeprom;
MemoryRegion sram;
+ MemoryRegion sram_io;
DeviceState *io;
AVRMaskState pwr[POWER_MAX];
AVRUsartState usart[USART_MAX];
diff --git a/hw/avr/boot.c b/hw/avr/boot.c
index 617f3a1..e5a29c7 100644
--- a/hw/avr/boot.c
+++ b/hw/avr/boot.c
@@ -71,11 +71,9 @@ bool avr_load_firmware(AVRCPU *cpu, MachineState *ms,
return false;
}
- bytes_loaded = load_elf_ram_sym(filename,
- NULL, NULL, NULL,
- &entry, NULL, NULL,
- &e_flags, 0, EM_AVR, 0, 0,
- NULL, true, NULL);
+ bytes_loaded = load_elf_as(filename, NULL, NULL, NULL,
+ &entry, NULL, NULL,
+ &e_flags, ELFDATA2LSB, EM_AVR, 0, 0, NULL);
if (bytes_loaded >= 0) {
/* If ELF file is provided, determine CPU type reading ELF e_flags. */
const char *elf_cpu = avr_elf_e_flags_to_cpu_type(e_flags);
diff --git a/hw/block/Kconfig b/hw/block/Kconfig
index 9e8f28f..737dbcd 100644
--- a/hw/block/Kconfig
+++ b/hw/block/Kconfig
@@ -13,24 +13,12 @@ config FDC_SYSBUS
config SSI_M25P80
bool
-config NAND
- bool
-
config PFLASH_CFI01
bool
config PFLASH_CFI02
bool
-config ECC
- bool
-
-config ONENAND
- bool
-
-config TC58128
- bool
-
config VIRTIO_BLK
bool
default y
diff --git a/hw/block/block.c b/hw/block/block.c
index 3ceca7d..2e10611 100644
--- a/hw/block/block.c
+++ b/hw/block/block.c
@@ -9,9 +9,10 @@
#include "qemu/osdep.h"
#include "block/block_int-common.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/block-backend.h"
+#include "system/blockdev.h"
+#include "system/block-backend.h"
#include "hw/block/block.h"
+#include "migration/cpr.h"
#include "qapi/error.h"
#include "qapi/qapi-types-block.h"
@@ -66,6 +67,10 @@ bool blk_check_size_and_read_all(BlockBackend *blk, DeviceState *dev,
int ret;
g_autofree char *dev_id = NULL;
+ if (cpr_is_incoming()) {
+ return true;
+ }
+
blk_len = blk_getlength(blk);
if (blk_len < 0) {
error_setg_errno(errp, -blk_len,
diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c
index 98501e6..48c2e31 100644
--- a/hw/block/dataplane/xen-block.c
+++ b/hw/block/dataplane/xen-block.c
@@ -27,8 +27,8 @@
#include "hw/xen/xen.h"
#include "hw/block/xen_blkif.h"
#include "hw/xen/interface/io/ring.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/iothread.h"
+#include "system/block-backend.h"
+#include "system/iothread.h"
#include "xen-block.h"
typedef struct XenBlockRequest {
diff --git a/hw/block/dataplane/xen-block.h b/hw/block/dataplane/xen-block.h
index 7b8e9df..eb70327 100644
--- a/hw/block/dataplane/xen-block.h
+++ b/hw/block/dataplane/xen-block.h
@@ -10,7 +10,7 @@
#include "hw/block/block.h"
#include "hw/xen/xen-bus.h"
-#include "sysemu/iothread.h"
+#include "system/iothread.h"
typedef struct XenBlockDataPlane XenBlockDataPlane;
diff --git a/hw/block/ecc.c b/hw/block/ecc.c
deleted file mode 100644
index ed889a4..0000000
--- a/hw/block/ecc.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Calculate Error-correcting Codes. Used by NAND Flash controllers
- * (not by NAND chips).
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GNU GPL v2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu/osdep.h"
-#include "migration/vmstate.h"
-#include "hw/block/flash.h"
-
-/*
- * Pre-calculated 256-way 1 byte column parity. Table borrowed from Linux.
- */
-static const uint8_t nand_ecc_precalc_table[] = {
- 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a,
- 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00,
- 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f,
- 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
- 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c,
- 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
- 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59,
- 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03,
- 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33,
- 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69,
- 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56,
- 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c,
- 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55,
- 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f,
- 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30,
- 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a,
- 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30,
- 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a,
- 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55,
- 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f,
- 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56,
- 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c,
- 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33,
- 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69,
- 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59,
- 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03,
- 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c,
- 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
- 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f,
- 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
- 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a,
- 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00,
-};
-
-/* Update ECC parity count. */
-uint8_t ecc_digest(ECCState *s, uint8_t sample)
-{
- uint8_t idx = nand_ecc_precalc_table[sample];
-
- s->cp ^= idx & 0x3f;
- if (idx & 0x40) {
- s->lp[0] ^= ~s->count;
- s->lp[1] ^= s->count;
- }
- s->count ++;
-
- return sample;
-}
-
-/* Reinitialise the counters. */
-void ecc_reset(ECCState *s)
-{
- s->lp[0] = 0x0000;
- s->lp[1] = 0x0000;
- s->cp = 0x00;
- s->count = 0;
-}
-
-/* Save/restore */
-const VMStateDescription vmstate_ecc_state = {
- .name = "ecc-state",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT8(cp, ECCState),
- VMSTATE_UINT16_ARRAY(lp, ECCState, 2),
- VMSTATE_UINT16(count, ECCState),
- VMSTATE_END_OF_LIST(),
- },
-};
diff --git a/hw/block/fdc-isa.c b/hw/block/fdc-isa.c
index e43dc53..6d1790e 100644
--- a/hw/block/fdc-isa.c
+++ b/hw/block/fdc-isa.c
@@ -39,10 +39,10 @@
#include "hw/qdev-properties-system.h"
#include "migration/vmstate.h"
#include "hw/block/block.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/sysemu.h"
-#include "exec/ioport.h"
+#include "system/block-backend.h"
+#include "system/blockdev.h"
+#include "system/system.h"
+#include "system/ioport.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
@@ -147,6 +147,8 @@ static void isa_fdc_get_drive_max_chs(FloppyDriveType type, uint8_t *maxc,
*maxs = fdf->last_sect;
}
}
+ /* fd_formats must contain at least one entry per FloppyDriveType */
+ assert(*maxc);
(*maxc)--;
}
@@ -281,7 +283,7 @@ static const VMStateDescription vmstate_isa_fdc = {
}
};
-static Property isa_fdc_properties[] = {
+static const Property isa_fdc_properties[] = {
DEFINE_PROP_UINT32("iobase", FDCtrlISABus, iobase, 0x3f0),
DEFINE_PROP_UINT32("irq", FDCtrlISABus, irq, 6),
DEFINE_PROP_UINT32("dma", FDCtrlISABus, dma, 2),
@@ -294,10 +296,9 @@ static Property isa_fdc_properties[] = {
DEFINE_PROP_SIGNED("fallback", FDCtrlISABus, state.fallback,
FLOPPY_DRIVE_TYPE_288, qdev_prop_fdc_drive_type,
FloppyDriveType),
- DEFINE_PROP_END_OF_LIST(),
};
-static void isabus_fdc_class_init(ObjectClass *klass, void *data)
+static void isabus_fdc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass);
@@ -305,7 +306,7 @@ static void isabus_fdc_class_init(ObjectClass *klass, void *data)
dc->desc = "virtual floppy controller";
dc->realize = isabus_fdc_realize;
dc->fw_name = "fdc";
- dc->reset = fdctrl_external_reset_isa;
+ device_class_set_legacy_reset(dc, fdctrl_external_reset_isa);
dc->vmsd = &vmstate_isa_fdc;
adevc->build_dev_aml = build_fdc_aml;
device_class_set_props(dc, isa_fdc_properties);
@@ -330,7 +331,7 @@ static const TypeInfo isa_fdc_info = {
.instance_size = sizeof(FDCtrlISABus),
.class_init = isabus_fdc_class_init,
.instance_init = isabus_fdc_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_ACPI_DEV_AML_IF },
{ },
},
diff --git a/hw/block/fdc-sysbus.c b/hw/block/fdc-sysbus.c
index 035bc08..956860a 100644
--- a/hw/block/fdc-sysbus.c
+++ b/hw/block/fdc-sysbus.c
@@ -26,7 +26,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qom/object.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/sysbus.h"
#include "hw/block/fdc.h"
#include "migration/vmstate.h"
@@ -176,12 +176,12 @@ static const VMStateDescription vmstate_sysbus_fdc = {
}
};
-static void sysbus_fdc_common_class_init(ObjectClass *klass, void *data)
+static void sysbus_fdc_common_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = sysbus_fdc_realize;
- dc->reset = fdctrl_external_reset_sysbus;
+ device_class_set_legacy_reset(dc, fdctrl_external_reset_sysbus);
dc->vmsd = &vmstate_sysbus_fdc;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
@@ -196,7 +196,7 @@ static const TypeInfo sysbus_fdc_common_typeinfo = {
.class_size = sizeof(FDCtrlSysBusClass),
};
-static Property sysbus_fdc_properties[] = {
+static const Property sysbus_fdc_properties[] = {
DEFINE_PROP_SIGNED("fdtypeA", FDCtrlSysBus, state.qdev_for_drives[0].type,
FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type,
FloppyDriveType),
@@ -206,10 +206,9 @@ static Property sysbus_fdc_properties[] = {
DEFINE_PROP_SIGNED("fallback", FDCtrlSysBus, state.fallback,
FLOPPY_DRIVE_TYPE_144, qdev_prop_fdc_drive_type,
FloppyDriveType),
- DEFINE_PROP_END_OF_LIST(),
};
-static void sysbus_fdc_class_init(ObjectClass *klass, void *data)
+static void sysbus_fdc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -223,17 +222,16 @@ static const TypeInfo sysbus_fdc_typeinfo = {
.class_init = sysbus_fdc_class_init,
};
-static Property sun4m_fdc_properties[] = {
+static const Property sun4m_fdc_properties[] = {
DEFINE_PROP_SIGNED("fdtype", FDCtrlSysBus, state.qdev_for_drives[0].type,
FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type,
FloppyDriveType),
DEFINE_PROP_SIGNED("fallback", FDCtrlSysBus, state.fallback,
FLOPPY_DRIVE_TYPE_144, qdev_prop_fdc_drive_type,
FloppyDriveType),
- DEFINE_PROP_END_OF_LIST(),
};
-static void sun4m_fdc_class_init(ObjectClass *klass, void *data)
+static void sun4m_fdc_class_init(ObjectClass *klass, const void *data)
{
FDCtrlSysBusClass *sbdc = SYSBUS_FDC_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/block/fdc.c b/hw/block/fdc.c
index 6dd94e9..d0f08c7 100644
--- a/hw/block/fdc.c
+++ b/hw/block/fdc.c
@@ -39,9 +39,9 @@
#include "hw/qdev-properties-system.h"
#include "migration/vmstate.h"
#include "hw/block/block.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/sysemu.h"
+#include "system/block-backend.h"
+#include "system/blockdev.h"
+#include "system/system.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
@@ -454,13 +454,12 @@ struct FloppyDrive {
FloppyDriveType type;
};
-static Property floppy_drive_properties[] = {
+static const Property floppy_drive_properties[] = {
DEFINE_PROP_UINT32("unit", FloppyDrive, unit, -1),
DEFINE_BLOCK_PROPERTIES(FloppyDrive, conf),
DEFINE_PROP_SIGNED("drive-type", FloppyDrive, type,
FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type,
FloppyDriveType),
- DEFINE_PROP_END_OF_LIST(),
};
static void floppy_drive_realize(DeviceState *qdev, Error **errp)
@@ -554,7 +553,7 @@ static void floppy_drive_realize(DeviceState *qdev, Error **errp)
fd_revalidate(drive);
}
-static void floppy_drive_class_init(ObjectClass *klass, void *data)
+static void floppy_drive_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
k->realize = floppy_drive_realize;
diff --git a/hw/block/hd-geometry.c b/hw/block/hd-geometry.c
index 2b0af44..f3939e7 100644
--- a/hw/block/hd-geometry.c
+++ b/hw/block/hd-geometry.c
@@ -31,7 +31,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qapi/qapi-types-block.h"
#include "qemu/bswap.h"
#include "hw/block/block.h"
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
index 9e99107..a5336d9 100644
--- a/hw/block/m25p80.c
+++ b/hw/block/m25p80.c
@@ -23,7 +23,7 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "hw/block/block.h"
#include "hw/block/flash.h"
#include "hw/qdev-properties.h"
@@ -61,7 +61,8 @@ typedef struct FlashPartInfo {
*/
uint8_t id[SPI_NOR_MAX_ID_LEN];
uint8_t id_len;
- /* there is confusion between manufacturers as to what a sector is. In this
+ /*
+ * there is confusion between manufacturers as to what a sector is. In this
* device model, a "sector" is the size that is erased by the ERASE_SECTOR
* command (opcode 0xd8).
*/
@@ -168,7 +169,7 @@ typedef struct FlashPartInfo {
/*
* Spansion read mode command length in bytes,
* the mode is currently not supported.
-*/
+ */
#define SPANSION_CONTINUOUS_READ_MODE_CMD_LEN 1
#define WINBOND_CONTINUOUS_READ_MODE_CMD_LEN 1
@@ -189,7 +190,8 @@ static const FlashPartInfo known_devices[] = {
{ INFO("at45db081d", 0x1f2500, 0, 64 << 10, 16, ER_4K) },
- /* Atmel EEPROMS - it is assumed, that don't care bit in command
+ /*
+ * Atmel EEPROMS - it is assumed, that don't care bit in command
* is set to 0. Block protection is not supported.
*/
{ INFO("at25128a-nonjedec", 0x0, 0, 1, 131072, EEPROM) },
@@ -266,7 +268,8 @@ static const FlashPartInfo known_devices[] = {
{ INFO("n25q512ax3", 0x20ba20, 0x1000, 64 << 10, 1024, ER_4K) },
{ INFO("mt25ql512ab", 0x20ba20, 0x1044, 64 << 10, 1024, ER_4K | ER_32K) },
{ INFO_STACKED("mt35xu01g", 0x2c5b1b, 0x104100, 128 << 10, 1024,
- ER_4K | ER_32K, 2) },
+ ER_4K | ER_32K, 2),
+ .sfdp_read = m25p80_sfdp_mt35xu01g },
{ INFO_STACKED("mt35xu02gbba", 0x2c5b1c, 0x104100, 128 << 10, 2048,
ER_4K | ER_32K, 4),
.sfdp_read = m25p80_sfdp_mt35xu02g },
@@ -274,10 +277,13 @@ static const FlashPartInfo known_devices[] = {
{ INFO_STACKED("n25q00a", 0x20bb21, 0x1000, 64 << 10, 2048, ER_4K, 4) },
{ INFO_STACKED("mt25ql01g", 0x20ba21, 0x1040, 64 << 10, 2048, ER_4K, 2) },
{ INFO_STACKED("mt25qu01g", 0x20bb21, 0x1040, 64 << 10, 2048, ER_4K, 2) },
- { INFO_STACKED("mt25ql02g", 0x20ba22, 0x1040, 64 << 10, 4096, ER_4K | ER_32K, 2) },
- { INFO_STACKED("mt25qu02g", 0x20bb22, 0x1040, 64 << 10, 4096, ER_4K | ER_32K, 2) },
+ { INFO_STACKED("mt25ql02g", 0x20ba22, 0x1040, 64 << 10, 4096,
+ ER_4K | ER_32K, 2) },
+ { INFO_STACKED("mt25qu02g", 0x20bb22, 0x1040, 64 << 10, 4096,
+ ER_4K | ER_32K, 2) },
- /* Spansion -- single (large) sector size only, at least
+ /*
+ * Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
{ INFO("s25sl032p", 0x010215, 0x4d00, 64 << 10, 64, ER_4K) },
@@ -350,13 +356,17 @@ static const FlashPartInfo known_devices[] = {
{ INFO("w25x64", 0xef3017, 0, 64 << 10, 128, ER_4K) },
{ INFO("w25q64", 0xef4017, 0, 64 << 10, 128, ER_4K) },
{ INFO("w25q80", 0xef5014, 0, 64 << 10, 16, ER_4K) },
- { INFO("w25q80bl", 0xef4014, 0, 64 << 10, 16, ER_4K) },
+ { INFO("w25q80bl", 0xef4014, 0, 64 << 10, 16, ER_4K),
+ .sfdp_read = m25p80_sfdp_w25q80bl },
{ INFO("w25q256", 0xef4019, 0, 64 << 10, 512, ER_4K),
.sfdp_read = m25p80_sfdp_w25q256 },
{ INFO("w25q512jv", 0xef4020, 0, 64 << 10, 1024, ER_4K),
.sfdp_read = m25p80_sfdp_w25q512jv },
{ INFO("w25q01jvq", 0xef4021, 0, 64 << 10, 2048, ER_4K),
.sfdp_read = m25p80_sfdp_w25q01jvq },
+
+ /* Microchip */
+ { INFO("25csm04", 0x29cc00, 0x100, 64 << 10, 8, 0) },
};
typedef enum {
@@ -421,6 +431,11 @@ typedef enum {
RDCR_EQIO = 0x35,
RSTQIO = 0xf5,
+ /*
+ * Winbond: 0x31 - write status register 2
+ */
+ WRSR2 = 0x31,
+
RNVCR = 0xB5,
WNVCR = 0xB1,
@@ -513,7 +528,7 @@ struct Flash {
struct M25P80Class {
SSIPeripheralClass parent_class;
- FlashPartInfo *pi;
+ const FlashPartInfo *pi;
};
OBJECT_DECLARE_TYPE(Flash, M25P80Class, M25P80)
@@ -545,7 +560,8 @@ static void blk_sync_complete(void *opaque, int ret)
qemu_iovec_destroy(iov);
g_free(iov);
- /* do nothing. Masters do not directly interact with the backing store,
+ /*
+ * do nothing. Masters do not directly interact with the backing store,
* only the working copy so no mutexing required.
*/
}
@@ -811,6 +827,15 @@ static void complete_collecting_data(Flash *s)
s->write_enable = false;
}
break;
+ case WRSR2:
+ switch (get_man(s)) {
+ case MAN_WINBOND:
+ s->quad_enable = !!(s->data[0] & 0x02);
+ break;
+ default:
+ break;
+ }
+ break;
case BRWR:
case EXTEND_ADDR_WRITE:
s->ear = s->data[0];
@@ -1270,7 +1295,31 @@ static void decode_new_cmd(Flash *s, uint32_t value)
}
s->pos = 0;
break;
+ case WRSR2:
+ /*
+ * If WP# is low and status_register_write_disabled is high,
+ * status register writes are disabled.
+ * This is also called "hardware protected mode" (HPM). All other
+ * combinations of the two states are called "software protected mode"
+ * (SPM), and status register writes are permitted.
+ */
+ if ((s->wp_level == 0 && s->status_register_write_disabled)
+ || !s->write_enable) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M25P80: Status register 2 write is disabled!\n");
+ break;
+ }
+ switch (get_man(s)) {
+ case MAN_WINBOND:
+ s->needed_bytes = 1;
+ s->state = STATE_COLLECTING_DATA;
+ s->pos = 0;
+ break;
+ default:
+ break;
+ }
+ break;
case WRDI:
s->write_enable = false;
if (get_man(s) == MAN_SST) {
@@ -1671,7 +1720,7 @@ static int m25p80_pre_save(void *opaque)
return 0;
}
-static Property m25p80_properties[] = {
+static const Property m25p80_properties[] = {
/* This is default value for Micron flash */
DEFINE_PROP_BOOL("write-enable", Flash, write_enable, false),
DEFINE_PROP_UINT32("nonvolatile-cfg", Flash, nonvolatile_cfg, 0x8FFF),
@@ -1680,7 +1729,6 @@ static Property m25p80_properties[] = {
DEFINE_PROP_UINT8("spansion-cr3nv", Flash, spansion_cr3nv, 0x2),
DEFINE_PROP_UINT8("spansion-cr4nv", Flash, spansion_cr4nv, 0x10),
DEFINE_PROP_DRIVE("drive", Flash, blk),
- DEFINE_PROP_END_OF_LIST(),
};
static int m25p80_pre_load(void *opaque)
@@ -1809,7 +1857,7 @@ static const VMStateDescription vmstate_m25p80 = {
}
};
-static void m25p80_class_init(ObjectClass *klass, void *data)
+static void m25p80_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass);
@@ -1821,8 +1869,10 @@ static void m25p80_class_init(ObjectClass *klass, void *data)
k->cs_polarity = SSI_CS_LOW;
dc->vmsd = &vmstate_m25p80;
device_class_set_props(dc, m25p80_properties);
- dc->reset = m25p80_reset;
+ device_class_set_legacy_reset(dc, m25p80_reset);
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
mc->pi = data;
+ dc->desc = "Serial Flash";
}
static const TypeInfo m25p80_info = {
@@ -1839,13 +1889,13 @@ static void m25p80_register_types(void)
type_register_static(&m25p80_info);
for (i = 0; i < ARRAY_SIZE(known_devices); ++i) {
- TypeInfo ti = {
+ const TypeInfo ti = {
.name = known_devices[i].part_name,
.parent = TYPE_M25P80,
.class_init = m25p80_class_init,
- .class_data = (void *)&known_devices[i],
+ .class_data = &known_devices[i],
};
- type_register(&ti);
+ type_register_static(&ti);
}
}
diff --git a/hw/block/m25p80_sfdp.c b/hw/block/m25p80_sfdp.c
index 6ee2cfa..a03a291 100644
--- a/hw/block/m25p80_sfdp.c
+++ b/hw/block/m25p80_sfdp.c
@@ -57,6 +57,43 @@ static const uint8_t sfdp_n25q256a[] = {
};
define_sfdp_read(n25q256a);
+static const uint8_t sfdp_mt35xu01g[] = {
+ 0x53, 0x46, 0x44, 0x50, 0x06, 0x01, 0x01, 0xff,
+ 0x00, 0x06, 0x01, 0x10, 0x30, 0x00, 0x00, 0xff,
+ 0x84, 0x00, 0x01, 0x02, 0x80, 0x00, 0x00, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xe5, 0x20, 0x8a, 0xff, 0xff, 0xff, 0xff, 0x3f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0xff, 0xff, 0x00, 0x00, 0x0c, 0x20, 0x11, 0xd8,
+ 0x0f, 0x52, 0x00, 0x00, 0x24, 0x5a, 0x99, 0x00,
+ 0x8b, 0x8e, 0x03, 0xe1, 0xac, 0x01, 0x27, 0x38,
+ 0x7a, 0x75, 0x7a, 0x75, 0xfb, 0xbd, 0xd5, 0x5c,
+ 0x00, 0x00, 0x70, 0xff, 0x81, 0xb0, 0x38, 0x36,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x43, 0x0e, 0xff, 0xff, 0x21, 0xdc, 0x5c, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+};
+
+define_sfdp_read(mt35xu01g);
+
static const uint8_t sfdp_mt35xu02g[] = {
0x53, 0x46, 0x44, 0x50, 0x06, 0x01, 0x01, 0xff,
0x00, 0x06, 0x01, 0x10, 0x30, 0x00, 0x00, 0xff,
@@ -367,6 +404,42 @@ static const uint8_t sfdp_w25q01jvq[] = {
};
define_sfdp_read(w25q01jvq);
+static const uint8_t sfdp_w25q80bl[] = {
+ 0x53, 0x46, 0x44, 0x50, 0x05, 0x01, 0x00, 0xff,
+ 0x00, 0x05, 0x01, 0x10, 0x80, 0x00, 0x00, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xe5, 0x20, 0xf1, 0xff, 0xff, 0xff, 0x7f, 0x00,
+ 0x44, 0xeb, 0x08, 0x6b, 0x08, 0x3b, 0x42, 0xbb,
+ 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0xff, 0xff, 0x00, 0x00, 0x0c, 0x20, 0x0f, 0x52,
+ 0x10, 0xd8, 0x00, 0x00, 0x23, 0x02, 0xa6, 0x00,
+ 0x81, 0x6c, 0x14, 0xa7, 0xed, 0x61, 0x76, 0x33,
+ 0x7a, 0x75, 0x7a, 0x75, 0xf7, 0xa2, 0xd5, 0x5c,
+ 0x00, 0xf7, 0x1d, 0xff, 0xe9, 0x30, 0xc0, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+};
+define_sfdp_read(w25q80bl);
+
/*
* Integrated Silicon Solution (ISSI)
*/
diff --git a/hw/block/m25p80_sfdp.h b/hw/block/m25p80_sfdp.h
index 1733b56..3578568 100644
--- a/hw/block/m25p80_sfdp.h
+++ b/hw/block/m25p80_sfdp.h
@@ -16,6 +16,7 @@
#define M25P80_SFDP_MAX_SIZE (1 << 24)
uint8_t m25p80_sfdp_n25q256a(uint32_t addr);
+uint8_t m25p80_sfdp_mt35xu01g(uint32_t addr);
uint8_t m25p80_sfdp_mt35xu02g(uint32_t addr);
uint8_t m25p80_sfdp_mx25l25635e(uint32_t addr);
@@ -24,7 +25,7 @@ uint8_t m25p80_sfdp_mx66l1g45g(uint32_t addr);
uint8_t m25p80_sfdp_w25q256(uint32_t addr);
uint8_t m25p80_sfdp_w25q512jv(uint32_t addr);
-
+uint8_t m25p80_sfdp_w25q80bl(uint32_t addr);
uint8_t m25p80_sfdp_w25q01jvq(uint32_t addr);
uint8_t m25p80_sfdp_is25wp256(uint32_t addr);
diff --git a/hw/block/meson.build b/hw/block/meson.build
index 8aa4dc3..6557044 100644
--- a/hw/block/meson.build
+++ b/hw/block/meson.build
@@ -3,19 +3,15 @@ system_ss.add(files(
'cdrom.c',
'hd-geometry.c'
))
-system_ss.add(when: 'CONFIG_ECC', if_true: files('ecc.c'))
system_ss.add(when: 'CONFIG_FDC', if_true: files('fdc.c'))
system_ss.add(when: 'CONFIG_FDC_ISA', if_true: files('fdc-isa.c'))
system_ss.add(when: 'CONFIG_FDC_SYSBUS', if_true: files('fdc-sysbus.c'))
-system_ss.add(when: 'CONFIG_NAND', if_true: files('nand.c'))
-system_ss.add(when: 'CONFIG_ONENAND', if_true: files('onenand.c'))
system_ss.add(when: 'CONFIG_PFLASH_CFI01', if_true: files('pflash_cfi01.c'))
system_ss.add(when: 'CONFIG_PFLASH_CFI02', if_true: files('pflash_cfi02.c'))
system_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80.c'))
system_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80_sfdp.c'))
system_ss.add(when: 'CONFIG_SWIM', if_true: files('swim.c'))
system_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-block.c'))
-system_ss.add(when: 'CONFIG_TC58128', if_true: files('tc58128.c'))
specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c', 'virtio-blk-common.c'))
specific_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk.c', 'virtio-blk-common.c'))
diff --git a/hw/block/nand.c b/hw/block/nand.c
deleted file mode 100644
index e2433c2..0000000
--- a/hw/block/nand.c
+++ /dev/null
@@ -1,836 +0,0 @@
-/*
- * Flash NAND memory emulation. Based on "16M x 8 Bit NAND Flash
- * Memory" datasheet for the KM29U128AT / K9F2808U0A chips from
- * Samsung Electronic.
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * Support for additional features based on "MT29F2G16ABCWP 2Gx16"
- * datasheet from Micron Technology and "NAND02G-B2C" datasheet
- * from ST Microelectronics.
- *
- * This code is licensed under the GNU GPL v2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#ifndef NAND_IO
-
-#include "qemu/osdep.h"
-#include "hw/hw.h"
-#include "hw/qdev-properties.h"
-#include "hw/qdev-properties-system.h"
-#include "hw/block/flash.h"
-#include "sysemu/block-backend.h"
-#include "migration/vmstate.h"
-#include "qapi/error.h"
-#include "qemu/error-report.h"
-#include "qemu/module.h"
-#include "qom/object.h"
-
-# define NAND_CMD_READ0 0x00
-# define NAND_CMD_READ1 0x01
-# define NAND_CMD_READ2 0x50
-# define NAND_CMD_LPREAD2 0x30
-# define NAND_CMD_NOSERIALREAD2 0x35
-# define NAND_CMD_RANDOMREAD1 0x05
-# define NAND_CMD_RANDOMREAD2 0xe0
-# define NAND_CMD_READID 0x90
-# define NAND_CMD_RESET 0xff
-# define NAND_CMD_PAGEPROGRAM1 0x80
-# define NAND_CMD_PAGEPROGRAM2 0x10
-# define NAND_CMD_CACHEPROGRAM2 0x15
-# define NAND_CMD_BLOCKERASE1 0x60
-# define NAND_CMD_BLOCKERASE2 0xd0
-# define NAND_CMD_READSTATUS 0x70
-# define NAND_CMD_COPYBACKPRG1 0x85
-
-# define NAND_IOSTATUS_ERROR (1 << 0)
-# define NAND_IOSTATUS_PLANE0 (1 << 1)
-# define NAND_IOSTATUS_PLANE1 (1 << 2)
-# define NAND_IOSTATUS_PLANE2 (1 << 3)
-# define NAND_IOSTATUS_PLANE3 (1 << 4)
-# define NAND_IOSTATUS_READY (1 << 6)
-# define NAND_IOSTATUS_UNPROTCT (1 << 7)
-
-# define MAX_PAGE 0x800
-# define MAX_OOB 0x40
-
-typedef struct NANDFlashState NANDFlashState;
-struct NANDFlashState {
- DeviceState parent_obj;
-
- uint8_t manf_id, chip_id;
- uint8_t buswidth; /* in BYTES */
- int size, pages;
- int page_shift, oob_shift, erase_shift, addr_shift;
- uint8_t *storage;
- BlockBackend *blk;
- int mem_oob;
-
- uint8_t cle, ale, ce, wp, gnd;
-
- uint8_t io[MAX_PAGE + MAX_OOB + 0x400];
- uint8_t *ioaddr;
- int iolen;
-
- uint32_t cmd;
- uint64_t addr;
- int addrlen;
- int status;
- int offset;
-
- void (*blk_write)(NANDFlashState *s);
- void (*blk_erase)(NANDFlashState *s);
- /*
- * Returns %true when block containing (@addr + @offset) is
- * successfully loaded, otherwise %false.
- */
- bool (*blk_load)(NANDFlashState *s, uint64_t addr, unsigned offset);
-
- uint32_t ioaddr_vmstate;
-};
-
-#define TYPE_NAND "nand"
-
-OBJECT_DECLARE_SIMPLE_TYPE(NANDFlashState, NAND)
-
-static void mem_and(uint8_t *dest, const uint8_t *src, size_t n)
-{
- /* Like memcpy() but we logical-AND the data into the destination */
- int i;
- for (i = 0; i < n; i++) {
- dest[i] &= src[i];
- }
-}
-
-# define NAND_NO_AUTOINCR 0x00000001
-# define NAND_BUSWIDTH_16 0x00000002
-# define NAND_NO_PADDING 0x00000004
-# define NAND_CACHEPRG 0x00000008
-# define NAND_COPYBACK 0x00000010
-# define NAND_IS_AND 0x00000020
-# define NAND_4PAGE_ARRAY 0x00000040
-# define NAND_NO_READRDY 0x00000100
-# define NAND_SAMSUNG_LP (NAND_NO_PADDING | NAND_COPYBACK)
-
-# define NAND_IO
-
-# define PAGE(addr) ((addr) >> ADDR_SHIFT)
-# define PAGE_START(page) (PAGE(page) * (NAND_PAGE_SIZE + OOB_SIZE))
-# define PAGE_MASK ((1 << ADDR_SHIFT) - 1)
-# define OOB_SHIFT (PAGE_SHIFT - 5)
-# define OOB_SIZE (1 << OOB_SHIFT)
-# define SECTOR(addr) ((addr) >> (9 + ADDR_SHIFT - PAGE_SHIFT))
-# define SECTOR_OFFSET(addr) ((addr) & ((511 >> PAGE_SHIFT) << 8))
-
-# define NAND_PAGE_SIZE 256
-# define PAGE_SHIFT 8
-# define PAGE_SECTORS 1
-# define ADDR_SHIFT 8
-# include "nand.c"
-# define NAND_PAGE_SIZE 512
-# define PAGE_SHIFT 9
-# define PAGE_SECTORS 1
-# define ADDR_SHIFT 8
-# include "nand.c"
-# define NAND_PAGE_SIZE 2048
-# define PAGE_SHIFT 11
-# define PAGE_SECTORS 4
-# define ADDR_SHIFT 16
-# include "nand.c"
-
-/* Information based on Linux drivers/mtd/nand/raw/nand_ids.c */
-static const struct {
- int size;
- int width;
- int page_shift;
- int erase_shift;
- uint32_t options;
-} nand_flash_ids[0x100] = {
- [0 ... 0xff] = { 0 },
-
- [0x6b] = { 4, 8, 9, 4, 0 },
- [0xe3] = { 4, 8, 9, 4, 0 },
- [0xe5] = { 4, 8, 9, 4, 0 },
- [0xd6] = { 8, 8, 9, 4, 0 },
- [0xe6] = { 8, 8, 9, 4, 0 },
-
- [0x33] = { 16, 8, 9, 5, 0 },
- [0x73] = { 16, 8, 9, 5, 0 },
- [0x43] = { 16, 16, 9, 5, NAND_BUSWIDTH_16 },
- [0x53] = { 16, 16, 9, 5, NAND_BUSWIDTH_16 },
-
- [0x35] = { 32, 8, 9, 5, 0 },
- [0x75] = { 32, 8, 9, 5, 0 },
- [0x45] = { 32, 16, 9, 5, NAND_BUSWIDTH_16 },
- [0x55] = { 32, 16, 9, 5, NAND_BUSWIDTH_16 },
-
- [0x36] = { 64, 8, 9, 5, 0 },
- [0x76] = { 64, 8, 9, 5, 0 },
- [0x46] = { 64, 16, 9, 5, NAND_BUSWIDTH_16 },
- [0x56] = { 64, 16, 9, 5, NAND_BUSWIDTH_16 },
-
- [0x78] = { 128, 8, 9, 5, 0 },
- [0x39] = { 128, 8, 9, 5, 0 },
- [0x79] = { 128, 8, 9, 5, 0 },
- [0x72] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 },
- [0x49] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 },
- [0x74] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 },
- [0x59] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 },
-
- [0x71] = { 256, 8, 9, 5, 0 },
-
- /*
- * These are the new chips with large page size. The pagesize and the
- * erasesize is determined from the extended id bytes
- */
-# define LP_OPTIONS (NAND_SAMSUNG_LP | NAND_NO_READRDY | NAND_NO_AUTOINCR)
-# define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
-
- /* 512 Megabit */
- [0xa2] = { 64, 8, 0, 0, LP_OPTIONS },
- [0xf2] = { 64, 8, 0, 0, LP_OPTIONS },
- [0xb2] = { 64, 16, 0, 0, LP_OPTIONS16 },
- [0xc2] = { 64, 16, 0, 0, LP_OPTIONS16 },
-
- /* 1 Gigabit */
- [0xa1] = { 128, 8, 0, 0, LP_OPTIONS },
- [0xf1] = { 128, 8, 0, 0, LP_OPTIONS },
- [0xb1] = { 128, 16, 0, 0, LP_OPTIONS16 },
- [0xc1] = { 128, 16, 0, 0, LP_OPTIONS16 },
-
- /* 2 Gigabit */
- [0xaa] = { 256, 8, 0, 0, LP_OPTIONS },
- [0xda] = { 256, 8, 0, 0, LP_OPTIONS },
- [0xba] = { 256, 16, 0, 0, LP_OPTIONS16 },
- [0xca] = { 256, 16, 0, 0, LP_OPTIONS16 },
-
- /* 4 Gigabit */
- [0xac] = { 512, 8, 0, 0, LP_OPTIONS },
- [0xdc] = { 512, 8, 0, 0, LP_OPTIONS },
- [0xbc] = { 512, 16, 0, 0, LP_OPTIONS16 },
- [0xcc] = { 512, 16, 0, 0, LP_OPTIONS16 },
-
- /* 8 Gigabit */
- [0xa3] = { 1024, 8, 0, 0, LP_OPTIONS },
- [0xd3] = { 1024, 8, 0, 0, LP_OPTIONS },
- [0xb3] = { 1024, 16, 0, 0, LP_OPTIONS16 },
- [0xc3] = { 1024, 16, 0, 0, LP_OPTIONS16 },
-
- /* 16 Gigabit */
- [0xa5] = { 2048, 8, 0, 0, LP_OPTIONS },
- [0xd5] = { 2048, 8, 0, 0, LP_OPTIONS },
- [0xb5] = { 2048, 16, 0, 0, LP_OPTIONS16 },
- [0xc5] = { 2048, 16, 0, 0, LP_OPTIONS16 },
-};
-
-static void nand_reset(DeviceState *dev)
-{
- NANDFlashState *s = NAND(dev);
- s->cmd = NAND_CMD_READ0;
- s->addr = 0;
- s->addrlen = 0;
- s->iolen = 0;
- s->offset = 0;
- s->status &= NAND_IOSTATUS_UNPROTCT;
- s->status |= NAND_IOSTATUS_READY;
-}
-
-static inline void nand_pushio_byte(NANDFlashState *s, uint8_t value)
-{
- s->ioaddr[s->iolen++] = value;
- for (value = s->buswidth; --value;) {
- s->ioaddr[s->iolen++] = 0;
- }
-}
-
-/*
- * nand_load_block: Load block containing (s->addr + @offset).
- * Returns length of data available at @offset in this block.
- */
-static unsigned nand_load_block(NANDFlashState *s, unsigned offset)
-{
- unsigned iolen;
-
- if (!s->blk_load(s, s->addr, offset)) {
- return 0;
- }
-
- iolen = (1 << s->page_shift);
- if (s->gnd) {
- iolen += 1 << s->oob_shift;
- }
- assert(offset <= iolen);
- iolen -= offset;
-
- return iolen;
-}
-
-static void nand_command(NANDFlashState *s)
-{
- switch (s->cmd) {
- case NAND_CMD_READ0:
- s->iolen = 0;
- break;
-
- case NAND_CMD_READID:
- s->ioaddr = s->io;
- s->iolen = 0;
- nand_pushio_byte(s, s->manf_id);
- nand_pushio_byte(s, s->chip_id);
- nand_pushio_byte(s, 'Q'); /* Don't-care byte (often 0xa5) */
- if (nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) {
- /* Page Size, Block Size, Spare Size; bit 6 indicates
- * 8 vs 16 bit width NAND.
- */
- nand_pushio_byte(s, (s->buswidth == 2) ? 0x55 : 0x15);
- } else {
- nand_pushio_byte(s, 0xc0); /* Multi-plane */
- }
- break;
-
- case NAND_CMD_RANDOMREAD2:
- case NAND_CMD_NOSERIALREAD2:
- if (!(nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP))
- break;
- s->iolen = nand_load_block(s, s->addr & ((1 << s->addr_shift) - 1));
- break;
-
- case NAND_CMD_RESET:
- nand_reset(DEVICE(s));
- break;
-
- case NAND_CMD_PAGEPROGRAM1:
- s->ioaddr = s->io;
- s->iolen = 0;
- break;
-
- case NAND_CMD_PAGEPROGRAM2:
- if (s->wp) {
- s->blk_write(s);
- }
- break;
-
- case NAND_CMD_BLOCKERASE1:
- break;
-
- case NAND_CMD_BLOCKERASE2:
- s->addr &= (1ull << s->addrlen * 8) - 1;
- s->addr <<= nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP ?
- 16 : 8;
-
- if (s->wp) {
- s->blk_erase(s);
- }
- break;
-
- case NAND_CMD_READSTATUS:
- s->ioaddr = s->io;
- s->iolen = 0;
- nand_pushio_byte(s, s->status);
- break;
-
- default:
- printf("%s: Unknown NAND command 0x%02x\n", __func__, s->cmd);
- }
-}
-
-static int nand_pre_save(void *opaque)
-{
- NANDFlashState *s = NAND(opaque);
-
- s->ioaddr_vmstate = s->ioaddr - s->io;
-
- return 0;
-}
-
-static int nand_post_load(void *opaque, int version_id)
-{
- NANDFlashState *s = NAND(opaque);
-
- if (s->ioaddr_vmstate > sizeof(s->io)) {
- return -EINVAL;
- }
- s->ioaddr = s->io + s->ioaddr_vmstate;
-
- return 0;
-}
-
-static const VMStateDescription vmstate_nand = {
- .name = "nand",
- .version_id = 1,
- .minimum_version_id = 1,
- .pre_save = nand_pre_save,
- .post_load = nand_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT8(cle, NANDFlashState),
- VMSTATE_UINT8(ale, NANDFlashState),
- VMSTATE_UINT8(ce, NANDFlashState),
- VMSTATE_UINT8(wp, NANDFlashState),
- VMSTATE_UINT8(gnd, NANDFlashState),
- VMSTATE_BUFFER(io, NANDFlashState),
- VMSTATE_UINT32(ioaddr_vmstate, NANDFlashState),
- VMSTATE_INT32(iolen, NANDFlashState),
- VMSTATE_UINT32(cmd, NANDFlashState),
- VMSTATE_UINT64(addr, NANDFlashState),
- VMSTATE_INT32(addrlen, NANDFlashState),
- VMSTATE_INT32(status, NANDFlashState),
- VMSTATE_INT32(offset, NANDFlashState),
- /* XXX: do we want to save s->storage too? */
- VMSTATE_END_OF_LIST()
- }
-};
-
-static void nand_realize(DeviceState *dev, Error **errp)
-{
- int pagesize;
- NANDFlashState *s = NAND(dev);
- int ret;
-
-
- s->buswidth = nand_flash_ids[s->chip_id].width >> 3;
- s->size = nand_flash_ids[s->chip_id].size << 20;
- if (nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) {
- s->page_shift = 11;
- s->erase_shift = 6;
- } else {
- s->page_shift = nand_flash_ids[s->chip_id].page_shift;
- s->erase_shift = nand_flash_ids[s->chip_id].erase_shift;
- }
-
- switch (1 << s->page_shift) {
- case 256:
- nand_init_256(s);
- break;
- case 512:
- nand_init_512(s);
- break;
- case 2048:
- nand_init_2048(s);
- break;
- default:
- error_setg(errp, "Unsupported NAND block size %#x",
- 1 << s->page_shift);
- return;
- }
-
- pagesize = 1 << s->oob_shift;
- s->mem_oob = 1;
- if (s->blk) {
- if (!blk_supports_write_perm(s->blk)) {
- error_setg(errp, "Can't use a read-only drive");
- return;
- }
- ret = blk_set_perm(s->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE,
- BLK_PERM_ALL, errp);
- if (ret < 0) {
- return;
- }
- if (blk_getlength(s->blk) >=
- (s->pages << s->page_shift) + (s->pages << s->oob_shift)) {
- pagesize = 0;
- s->mem_oob = 0;
- }
- } else {
- pagesize += 1 << s->page_shift;
- }
- if (pagesize) {
- s->storage = (uint8_t *) memset(g_malloc(s->pages * pagesize),
- 0xff, s->pages * pagesize);
- }
- /* Give s->ioaddr a sane value in case we save state before it is used. */
- s->ioaddr = s->io;
-}
-
-static Property nand_properties[] = {
- DEFINE_PROP_UINT8("manufacturer_id", NANDFlashState, manf_id, 0),
- DEFINE_PROP_UINT8("chip_id", NANDFlashState, chip_id, 0),
- DEFINE_PROP_DRIVE("drive", NANDFlashState, blk),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void nand_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->realize = nand_realize;
- dc->reset = nand_reset;
- dc->vmsd = &vmstate_nand;
- device_class_set_props(dc, nand_properties);
- set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
-}
-
-static const TypeInfo nand_info = {
- .name = TYPE_NAND,
- .parent = TYPE_DEVICE,
- .instance_size = sizeof(NANDFlashState),
- .class_init = nand_class_init,
-};
-
-static void nand_register_types(void)
-{
- type_register_static(&nand_info);
-}
-
-/*
- * Chip inputs are CLE, ALE, CE, WP, GND and eight I/O pins. Chip
- * outputs are R/B and eight I/O pins.
- *
- * CE, WP and R/B are active low.
- */
-void nand_setpins(DeviceState *dev, uint8_t cle, uint8_t ale,
- uint8_t ce, uint8_t wp, uint8_t gnd)
-{
- NANDFlashState *s = NAND(dev);
-
- s->cle = cle;
- s->ale = ale;
- s->ce = ce;
- s->wp = wp;
- s->gnd = gnd;
- if (wp) {
- s->status |= NAND_IOSTATUS_UNPROTCT;
- } else {
- s->status &= ~NAND_IOSTATUS_UNPROTCT;
- }
-}
-
-void nand_getpins(DeviceState *dev, int *rb)
-{
- *rb = 1;
-}
-
-void nand_setio(DeviceState *dev, uint32_t value)
-{
- int i;
- NANDFlashState *s = NAND(dev);
-
- if (!s->ce && s->cle) {
- if (nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) {
- if (s->cmd == NAND_CMD_READ0 && value == NAND_CMD_LPREAD2)
- return;
- if (value == NAND_CMD_RANDOMREAD1) {
- s->addr &= ~((1 << s->addr_shift) - 1);
- s->addrlen = 0;
- return;
- }
- }
- if (value == NAND_CMD_READ0) {
- s->offset = 0;
- } else if (value == NAND_CMD_READ1) {
- s->offset = 0x100;
- value = NAND_CMD_READ0;
- } else if (value == NAND_CMD_READ2) {
- s->offset = 1 << s->page_shift;
- value = NAND_CMD_READ0;
- }
-
- s->cmd = value;
-
- if (s->cmd == NAND_CMD_READSTATUS ||
- s->cmd == NAND_CMD_PAGEPROGRAM2 ||
- s->cmd == NAND_CMD_BLOCKERASE1 ||
- s->cmd == NAND_CMD_BLOCKERASE2 ||
- s->cmd == NAND_CMD_NOSERIALREAD2 ||
- s->cmd == NAND_CMD_RANDOMREAD2 ||
- s->cmd == NAND_CMD_RESET) {
- nand_command(s);
- }
-
- if (s->cmd != NAND_CMD_RANDOMREAD2) {
- s->addrlen = 0;
- }
- }
-
- if (s->ale) {
- unsigned int shift = s->addrlen * 8;
- uint64_t mask = ~(0xffull << shift);
- uint64_t v = (uint64_t)value << shift;
-
- s->addr = (s->addr & mask) | v;
- s->addrlen ++;
-
- switch (s->addrlen) {
- case 1:
- if (s->cmd == NAND_CMD_READID) {
- nand_command(s);
- }
- break;
- case 2: /* fix cache address as a byte address */
- s->addr <<= (s->buswidth - 1);
- break;
- case 3:
- if (!(nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) &&
- (s->cmd == NAND_CMD_READ0 ||
- s->cmd == NAND_CMD_PAGEPROGRAM1)) {
- nand_command(s);
- }
- break;
- case 4:
- if ((nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) &&
- nand_flash_ids[s->chip_id].size < 256 && /* 1Gb or less */
- (s->cmd == NAND_CMD_READ0 ||
- s->cmd == NAND_CMD_PAGEPROGRAM1)) {
- nand_command(s);
- }
- break;
- case 5:
- if ((nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) &&
- nand_flash_ids[s->chip_id].size >= 256 && /* 2Gb or more */
- (s->cmd == NAND_CMD_READ0 ||
- s->cmd == NAND_CMD_PAGEPROGRAM1)) {
- nand_command(s);
- }
- break;
- default:
- break;
- }
- }
-
- if (!s->cle && !s->ale && s->cmd == NAND_CMD_PAGEPROGRAM1) {
- if (s->iolen < (1 << s->page_shift) + (1 << s->oob_shift)) {
- for (i = s->buswidth; i--; value >>= 8) {
- s->io[s->iolen ++] = (uint8_t) (value & 0xff);
- }
- }
- } else if (!s->cle && !s->ale && s->cmd == NAND_CMD_COPYBACKPRG1) {
- if ((s->addr & ((1 << s->addr_shift) - 1)) <
- (1 << s->page_shift) + (1 << s->oob_shift)) {
- for (i = s->buswidth; i--; s->addr++, value >>= 8) {
- s->io[s->iolen + (s->addr & ((1 << s->addr_shift) - 1))] =
- (uint8_t) (value & 0xff);
- }
- }
- }
-}
-
-uint32_t nand_getio(DeviceState *dev)
-{
- int offset;
- uint32_t x = 0;
- NANDFlashState *s = NAND(dev);
-
- /* Allow sequential reading */
- if (!s->iolen && s->cmd == NAND_CMD_READ0) {
- offset = (int) (s->addr & ((1 << s->addr_shift) - 1)) + s->offset;
- s->offset = 0;
- s->iolen = nand_load_block(s, offset);
- }
-
- if (s->ce || s->iolen <= 0) {
- return 0;
- }
-
- for (offset = s->buswidth; offset--;) {
- x |= s->ioaddr[offset] << (offset << 3);
- }
- /* after receiving READ STATUS command all subsequent reads will
- * return the status register value until another command is issued
- */
- if (s->cmd != NAND_CMD_READSTATUS) {
- s->addr += s->buswidth;
- s->ioaddr += s->buswidth;
- s->iolen -= s->buswidth;
- }
- return x;
-}
-
-uint32_t nand_getbuswidth(DeviceState *dev)
-{
- NANDFlashState *s = (NANDFlashState *) dev;
- return s->buswidth << 3;
-}
-
-DeviceState *nand_init(BlockBackend *blk, int manf_id, int chip_id)
-{
- DeviceState *dev;
-
- if (nand_flash_ids[chip_id].size == 0) {
- hw_error("%s: Unsupported NAND chip ID.\n", __func__);
- }
- dev = qdev_new(TYPE_NAND);
- qdev_prop_set_uint8(dev, "manufacturer_id", manf_id);
- qdev_prop_set_uint8(dev, "chip_id", chip_id);
- if (blk) {
- qdev_prop_set_drive_err(dev, "drive", blk, &error_fatal);
- }
-
- qdev_realize(dev, NULL, &error_fatal);
- return dev;
-}
-
-type_init(nand_register_types)
-
-#else
-
-/* Program a single page */
-static void glue(nand_blk_write_, NAND_PAGE_SIZE)(NANDFlashState *s)
-{
- uint64_t off, page, sector, soff;
- uint8_t iobuf[(PAGE_SECTORS + 2) * 0x200];
- if (PAGE(s->addr) >= s->pages)
- return;
-
- if (!s->blk) {
- mem_and(s->storage + PAGE_START(s->addr) + (s->addr & PAGE_MASK) +
- s->offset, s->io, s->iolen);
- } else if (s->mem_oob) {
- sector = SECTOR(s->addr);
- off = (s->addr & PAGE_MASK) + s->offset;
- soff = SECTOR_OFFSET(s->addr);
- if (blk_pread(s->blk, sector << BDRV_SECTOR_BITS,
- PAGE_SECTORS << BDRV_SECTOR_BITS, iobuf, 0) < 0) {
- printf("%s: read error in sector %" PRIu64 "\n", __func__, sector);
- return;
- }
-
- mem_and(iobuf + (soff | off), s->io, MIN(s->iolen, NAND_PAGE_SIZE - off));
- if (off + s->iolen > NAND_PAGE_SIZE) {
- page = PAGE(s->addr);
- mem_and(s->storage + (page << OOB_SHIFT), s->io + NAND_PAGE_SIZE - off,
- MIN(OOB_SIZE, off + s->iolen - NAND_PAGE_SIZE));
- }
-
- if (blk_pwrite(s->blk, sector << BDRV_SECTOR_BITS,
- PAGE_SECTORS << BDRV_SECTOR_BITS, iobuf, 0) < 0) {
- printf("%s: write error in sector %" PRIu64 "\n", __func__, sector);
- }
- } else {
- off = PAGE_START(s->addr) + (s->addr & PAGE_MASK) + s->offset;
- sector = off >> 9;
- soff = off & 0x1ff;
- if (blk_pread(s->blk, sector << BDRV_SECTOR_BITS,
- (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS, iobuf, 0) < 0) {
- printf("%s: read error in sector %" PRIu64 "\n", __func__, sector);
- return;
- }
-
- mem_and(iobuf + soff, s->io, s->iolen);
-
- if (blk_pwrite(s->blk, sector << BDRV_SECTOR_BITS,
- (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS, iobuf, 0) < 0) {
- printf("%s: write error in sector %" PRIu64 "\n", __func__, sector);
- }
- }
- s->offset = 0;
-}
-
-/* Erase a single block */
-static void glue(nand_blk_erase_, NAND_PAGE_SIZE)(NANDFlashState *s)
-{
- uint64_t i, page, addr;
- uint8_t iobuf[0x200] = { [0 ... 0x1ff] = 0xff, };
- addr = s->addr & ~((1 << (ADDR_SHIFT + s->erase_shift)) - 1);
-
- if (PAGE(addr) >= s->pages) {
- return;
- }
-
- if (!s->blk) {
- memset(s->storage + PAGE_START(addr),
- 0xff, (NAND_PAGE_SIZE + OOB_SIZE) << s->erase_shift);
- } else if (s->mem_oob) {
- memset(s->storage + (PAGE(addr) << OOB_SHIFT),
- 0xff, OOB_SIZE << s->erase_shift);
- i = SECTOR(addr);
- page = SECTOR(addr + (1 << (ADDR_SHIFT + s->erase_shift)));
- for (; i < page; i ++)
- if (blk_pwrite(s->blk, i << BDRV_SECTOR_BITS,
- BDRV_SECTOR_SIZE, iobuf, 0) < 0) {
- printf("%s: write error in sector %" PRIu64 "\n", __func__, i);
- }
- } else {
- addr = PAGE_START(addr);
- page = addr >> 9;
- if (blk_pread(s->blk, page << BDRV_SECTOR_BITS,
- BDRV_SECTOR_SIZE, iobuf, 0) < 0) {
- printf("%s: read error in sector %" PRIu64 "\n", __func__, page);
- }
- memset(iobuf + (addr & 0x1ff), 0xff, (~addr & 0x1ff) + 1);
- if (blk_pwrite(s->blk, page << BDRV_SECTOR_BITS,
- BDRV_SECTOR_SIZE, iobuf, 0) < 0) {
- printf("%s: write error in sector %" PRIu64 "\n", __func__, page);
- }
-
- memset(iobuf, 0xff, 0x200);
- i = (addr & ~0x1ff) + 0x200;
- for (addr += ((NAND_PAGE_SIZE + OOB_SIZE) << s->erase_shift) - 0x200;
- i < addr; i += 0x200) {
- if (blk_pwrite(s->blk, i, BDRV_SECTOR_SIZE, iobuf, 0) < 0) {
- printf("%s: write error in sector %" PRIu64 "\n",
- __func__, i >> 9);
- }
- }
-
- page = i >> 9;
- if (blk_pread(s->blk, page << BDRV_SECTOR_BITS,
- BDRV_SECTOR_SIZE, iobuf, 0) < 0) {
- printf("%s: read error in sector %" PRIu64 "\n", __func__, page);
- }
- memset(iobuf, 0xff, ((addr - 1) & 0x1ff) + 1);
- if (blk_pwrite(s->blk, page << BDRV_SECTOR_BITS,
- BDRV_SECTOR_SIZE, iobuf, 0) < 0) {
- printf("%s: write error in sector %" PRIu64 "\n", __func__, page);
- }
- }
-}
-
-static bool glue(nand_blk_load_, NAND_PAGE_SIZE)(NANDFlashState *s,
- uint64_t addr, unsigned offset)
-{
- if (PAGE(addr) >= s->pages) {
- return false;
- }
-
- if (offset > NAND_PAGE_SIZE + OOB_SIZE) {
- return false;
- }
-
- if (s->blk) {
- if (s->mem_oob) {
- if (blk_pread(s->blk, SECTOR(addr) << BDRV_SECTOR_BITS,
- PAGE_SECTORS << BDRV_SECTOR_BITS, s->io, 0) < 0) {
- printf("%s: read error in sector %" PRIu64 "\n",
- __func__, SECTOR(addr));
- }
- memcpy(s->io + SECTOR_OFFSET(s->addr) + NAND_PAGE_SIZE,
- s->storage + (PAGE(s->addr) << OOB_SHIFT),
- OOB_SIZE);
- s->ioaddr = s->io + SECTOR_OFFSET(s->addr) + offset;
- } else {
- if (blk_pread(s->blk, PAGE_START(addr),
- (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS, s->io, 0)
- < 0) {
- printf("%s: read error in sector %" PRIu64 "\n",
- __func__, PAGE_START(addr) >> 9);
- }
- s->ioaddr = s->io + (PAGE_START(addr) & 0x1ff) + offset;
- }
- } else {
- memcpy(s->io, s->storage + PAGE_START(s->addr) +
- offset, NAND_PAGE_SIZE + OOB_SIZE - offset);
- s->ioaddr = s->io;
- }
-
- return true;
-}
-
-static void glue(nand_init_, NAND_PAGE_SIZE)(NANDFlashState *s)
-{
- s->oob_shift = PAGE_SHIFT - 5;
- s->pages = s->size >> PAGE_SHIFT;
- s->addr_shift = ADDR_SHIFT;
-
- s->blk_erase = glue(nand_blk_erase_, NAND_PAGE_SIZE);
- s->blk_write = glue(nand_blk_write_, NAND_PAGE_SIZE);
- s->blk_load = glue(nand_blk_load_, NAND_PAGE_SIZE);
-}
-
-# undef NAND_PAGE_SIZE
-# undef PAGE_SHIFT
-# undef PAGE_SECTORS
-# undef ADDR_SHIFT
-#endif /* NAND_IO */
diff --git a/hw/block/onenand.c b/hw/block/onenand.c
deleted file mode 100644
index d8a6944..0000000
--- a/hw/block/onenand.c
+++ /dev/null
@@ -1,872 +0,0 @@
-/*
- * OneNAND flash memories emulation.
- *
- * Copyright (C) 2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "hw/hw.h"
-#include "hw/block/flash.h"
-#include "hw/irq.h"
-#include "hw/qdev-properties.h"
-#include "hw/qdev-properties-system.h"
-#include "sysemu/block-backend.h"
-#include "exec/memory.h"
-#include "hw/sysbus.h"
-#include "migration/vmstate.h"
-#include "qemu/error-report.h"
-#include "qemu/log.h"
-#include "qemu/module.h"
-#include "qom/object.h"
-
-/* 11 for 2kB-page OneNAND ("2nd generation") and 10 for 1kB-page chips */
-#define PAGE_SHIFT 11
-
-/* Fixed */
-#define BLOCK_SHIFT (PAGE_SHIFT + 6)
-
-#define TYPE_ONE_NAND "onenand"
-OBJECT_DECLARE_SIMPLE_TYPE(OneNANDState, ONE_NAND)
-
-struct OneNANDState {
- SysBusDevice parent_obj;
-
- struct {
- uint16_t man;
- uint16_t dev;
- uint16_t ver;
- } id;
- int shift;
- hwaddr base;
- qemu_irq intr;
- qemu_irq rdy;
- BlockBackend *blk;
- BlockBackend *blk_cur;
- uint8_t *image;
- uint8_t *otp;
- uint8_t *current;
- MemoryRegion ram;
- MemoryRegion mapped_ram;
- uint8_t current_direction;
- uint8_t *boot[2];
- uint8_t *data[2][2];
- MemoryRegion iomem;
- MemoryRegion container;
- int cycle;
- int otpmode;
-
- uint16_t addr[8];
- uint16_t unladdr[8];
- int bufaddr;
- int count;
- uint16_t command;
- uint16_t config[2];
- uint16_t status;
- uint16_t intstatus;
- uint16_t wpstatus;
-
- ECCState ecc;
-
- int density_mask;
- int secs;
- int secs_cur;
- int blocks;
- uint8_t *blockwp;
-};
-
-enum {
- ONEN_BUF_BLOCK = 0,
- ONEN_BUF_BLOCK2 = 1,
- ONEN_BUF_DEST_BLOCK = 2,
- ONEN_BUF_DEST_PAGE = 3,
- ONEN_BUF_PAGE = 7,
-};
-
-enum {
- ONEN_ERR_CMD = 1 << 10,
- ONEN_ERR_ERASE = 1 << 11,
- ONEN_ERR_PROG = 1 << 12,
- ONEN_ERR_LOAD = 1 << 13,
-};
-
-enum {
- ONEN_INT_RESET = 1 << 4,
- ONEN_INT_ERASE = 1 << 5,
- ONEN_INT_PROG = 1 << 6,
- ONEN_INT_LOAD = 1 << 7,
- ONEN_INT = 1 << 15,
-};
-
-enum {
- ONEN_LOCK_LOCKTIGHTEN = 1 << 0,
- ONEN_LOCK_LOCKED = 1 << 1,
- ONEN_LOCK_UNLOCKED = 1 << 2,
-};
-
-static void onenand_mem_setup(OneNANDState *s)
-{
- /* XXX: We should use IO_MEM_ROMD but we broke it earlier...
- * Both 0x0000 ... 0x01ff and 0x8000 ... 0x800f can be used to
- * write boot commands. Also take note of the BWPS bit. */
- memory_region_init(&s->container, OBJECT(s), "onenand",
- 0x10000 << s->shift);
- memory_region_add_subregion(&s->container, 0, &s->iomem);
- memory_region_init_alias(&s->mapped_ram, OBJECT(s), "onenand-mapped-ram",
- &s->ram, 0x0200 << s->shift,
- 0xbe00 << s->shift);
- memory_region_add_subregion_overlap(&s->container,
- 0x0200 << s->shift,
- &s->mapped_ram,
- 1);
-}
-
-static void onenand_intr_update(OneNANDState *s)
-{
- qemu_set_irq(s->intr, ((s->intstatus >> 15) ^ (~s->config[0] >> 6)) & 1);
-}
-
-static int onenand_pre_save(void *opaque)
-{
- OneNANDState *s = opaque;
- if (s->current == s->otp) {
- s->current_direction = 1;
- } else if (s->current == s->image) {
- s->current_direction = 2;
- } else {
- s->current_direction = 0;
- }
-
- return 0;
-}
-
-static int onenand_post_load(void *opaque, int version_id)
-{
- OneNANDState *s = opaque;
- switch (s->current_direction) {
- case 0:
- break;
- case 1:
- s->current = s->otp;
- break;
- case 2:
- s->current = s->image;
- break;
- default:
- return -1;
- }
- onenand_intr_update(s);
- return 0;
-}
-
-static const VMStateDescription vmstate_onenand = {
- .name = "onenand",
- .version_id = 1,
- .minimum_version_id = 1,
- .pre_save = onenand_pre_save,
- .post_load = onenand_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT8(current_direction, OneNANDState),
- VMSTATE_INT32(cycle, OneNANDState),
- VMSTATE_INT32(otpmode, OneNANDState),
- VMSTATE_UINT16_ARRAY(addr, OneNANDState, 8),
- VMSTATE_UINT16_ARRAY(unladdr, OneNANDState, 8),
- VMSTATE_INT32(bufaddr, OneNANDState),
- VMSTATE_INT32(count, OneNANDState),
- VMSTATE_UINT16(command, OneNANDState),
- VMSTATE_UINT16_ARRAY(config, OneNANDState, 2),
- VMSTATE_UINT16(status, OneNANDState),
- VMSTATE_UINT16(intstatus, OneNANDState),
- VMSTATE_UINT16(wpstatus, OneNANDState),
- VMSTATE_INT32(secs_cur, OneNANDState),
- VMSTATE_PARTIAL_VBUFFER(blockwp, OneNANDState, blocks),
- VMSTATE_UINT8(ecc.cp, OneNANDState),
- VMSTATE_UINT16_ARRAY(ecc.lp, OneNANDState, 2),
- VMSTATE_UINT16(ecc.count, OneNANDState),
- VMSTATE_BUFFER_POINTER_UNSAFE(otp, OneNANDState, 0,
- ((64 + 2) << PAGE_SHIFT)),
- VMSTATE_END_OF_LIST()
- }
-};
-
-/* Hot reset (Reset OneNAND command) or warm reset (RP pin low) */
-static void onenand_reset(OneNANDState *s, int cold)
-{
- memset(&s->addr, 0, sizeof(s->addr));
- s->command = 0;
- s->count = 1;
- s->bufaddr = 0;
- s->config[0] = 0x40c0;
- s->config[1] = 0x0000;
- onenand_intr_update(s);
- qemu_irq_raise(s->rdy);
- s->status = 0x0000;
- s->intstatus = cold ? 0x8080 : 0x8010;
- s->unladdr[0] = 0;
- s->unladdr[1] = 0;
- s->wpstatus = 0x0002;
- s->cycle = 0;
- s->otpmode = 0;
- s->blk_cur = s->blk;
- s->current = s->image;
- s->secs_cur = s->secs;
-
- if (cold) {
- /* Lock the whole flash */
- memset(s->blockwp, ONEN_LOCK_LOCKED, s->blocks);
-
- if (s->blk_cur && blk_pread(s->blk_cur, 0, 8 << BDRV_SECTOR_BITS,
- s->boot[0], 0) < 0) {
- hw_error("%s: Loading the BootRAM failed.\n", __func__);
- }
- }
-}
-
-static void onenand_system_reset(DeviceState *dev)
-{
- OneNANDState *s = ONE_NAND(dev);
-
- onenand_reset(s, 1);
-}
-
-static inline int onenand_load_main(OneNANDState *s, int sec, int secn,
- void *dest)
-{
- assert(UINT32_MAX >> BDRV_SECTOR_BITS > sec);
- assert(UINT32_MAX >> BDRV_SECTOR_BITS > secn);
- if (s->blk_cur) {
- return blk_pread(s->blk_cur, sec << BDRV_SECTOR_BITS,
- secn << BDRV_SECTOR_BITS, dest, 0) < 0;
- } else if (sec + secn > s->secs_cur) {
- return 1;
- }
-
- memcpy(dest, s->current + (sec << 9), secn << 9);
-
- return 0;
-}
-
-static inline int onenand_prog_main(OneNANDState *s, int sec, int secn,
- void *src)
-{
- int result = 0;
-
- if (secn > 0) {
- uint32_t size = secn << BDRV_SECTOR_BITS;
- uint32_t offset = sec << BDRV_SECTOR_BITS;
- assert(UINT32_MAX >> BDRV_SECTOR_BITS > sec);
- assert(UINT32_MAX >> BDRV_SECTOR_BITS > secn);
- const uint8_t *sp = (const uint8_t *)src;
- uint8_t *dp = 0;
- if (s->blk_cur) {
- dp = g_malloc(size);
- if (!dp || blk_pread(s->blk_cur, offset, size, dp, 0) < 0) {
- result = 1;
- }
- } else {
- if (sec + secn > s->secs_cur) {
- result = 1;
- } else {
- dp = (uint8_t *)s->current + offset;
- }
- }
- if (!result) {
- uint32_t i;
- for (i = 0; i < size; i++) {
- dp[i] &= sp[i];
- }
- if (s->blk_cur) {
- result = blk_pwrite(s->blk_cur, offset, size, dp, 0) < 0;
- }
- }
- if (dp && s->blk_cur) {
- g_free(dp);
- }
- }
-
- return result;
-}
-
-static inline int onenand_load_spare(OneNANDState *s, int sec, int secn,
- void *dest)
-{
- uint8_t buf[512];
-
- if (s->blk_cur) {
- uint32_t offset = (s->secs_cur + (sec >> 5)) << BDRV_SECTOR_BITS;
- if (blk_pread(s->blk_cur, offset, BDRV_SECTOR_SIZE, buf, 0) < 0) {
- return 1;
- }
- memcpy(dest, buf + ((sec & 31) << 4), secn << 4);
- } else if (sec + secn > s->secs_cur) {
- return 1;
- } else {
- memcpy(dest, s->current + (s->secs_cur << 9) + (sec << 4), secn << 4);
- }
-
- return 0;
-}
-
-static inline int onenand_prog_spare(OneNANDState *s, int sec, int secn,
- void *src)
-{
- int result = 0;
- if (secn > 0) {
- const uint8_t *sp = (const uint8_t *)src;
- uint8_t *dp = 0, *dpp = 0;
- uint32_t offset = (s->secs_cur + (sec >> 5)) << BDRV_SECTOR_BITS;
- assert(UINT32_MAX >> BDRV_SECTOR_BITS > s->secs_cur + (sec >> 5));
- if (s->blk_cur) {
- dp = g_malloc(512);
- if (!dp
- || blk_pread(s->blk_cur, offset, BDRV_SECTOR_SIZE, dp, 0) < 0) {
- result = 1;
- } else {
- dpp = dp + ((sec & 31) << 4);
- }
- } else {
- if (sec + secn > s->secs_cur) {
- result = 1;
- } else {
- dpp = s->current + (s->secs_cur << 9) + (sec << 4);
- }
- }
- if (!result) {
- uint32_t i;
- for (i = 0; i < (secn << 4); i++) {
- dpp[i] &= sp[i];
- }
- if (s->blk_cur) {
- result = blk_pwrite(s->blk_cur, offset, BDRV_SECTOR_SIZE, dp,
- 0) < 0;
- }
- }
- g_free(dp);
- }
- return result;
-}
-
-static inline int onenand_erase(OneNANDState *s, int sec, int num)
-{
- uint8_t *blankbuf, *tmpbuf;
-
- blankbuf = g_malloc(512);
- tmpbuf = g_malloc(512);
- memset(blankbuf, 0xff, 512);
- for (; num > 0; num--, sec++) {
- if (s->blk_cur) {
- int erasesec = s->secs_cur + (sec >> 5);
- if (blk_pwrite(s->blk_cur, sec << BDRV_SECTOR_BITS,
- BDRV_SECTOR_SIZE, blankbuf, 0) < 0) {
- goto fail;
- }
- if (blk_pread(s->blk_cur, erasesec << BDRV_SECTOR_BITS,
- BDRV_SECTOR_SIZE, tmpbuf, 0) < 0) {
- goto fail;
- }
- memcpy(tmpbuf + ((sec & 31) << 4), blankbuf, 1 << 4);
- if (blk_pwrite(s->blk_cur, erasesec << BDRV_SECTOR_BITS,
- BDRV_SECTOR_SIZE, tmpbuf, 0) < 0) {
- goto fail;
- }
- } else {
- if (sec + 1 > s->secs_cur) {
- goto fail;
- }
- memcpy(s->current + (sec << 9), blankbuf, 512);
- memcpy(s->current + (s->secs_cur << 9) + (sec << 4),
- blankbuf, 1 << 4);
- }
- }
-
- g_free(tmpbuf);
- g_free(blankbuf);
- return 0;
-
-fail:
- g_free(tmpbuf);
- g_free(blankbuf);
- return 1;
-}
-
-static void onenand_command(OneNANDState *s)
-{
- int b;
- int sec;
- void *buf;
-#define SETADDR(block, page) \
- sec = (s->addr[page] & 3) + \
- ((((s->addr[page] >> 2) & 0x3f) + \
- (((s->addr[block] & 0xfff) | \
- (s->addr[block] >> 15 ? s->density_mask : 0)) \
- << 6)) \
- << (PAGE_SHIFT - 9));
-#define SETBUF_M() \
- buf = (s->bufaddr & 8) ? s->data[(s->bufaddr >> 2) & 1][0] : s->boot[0]; \
- buf += (s->bufaddr & 3) << 9;
-#define SETBUF_S() \
- buf = (s->bufaddr & 8) ? \
- s->data[(s->bufaddr >> 2) & 1][1] : s->boot[1]; \
- buf += (s->bufaddr & 3) << 4;
-
- switch (s->command) {
- case 0x00: /* Load single/multiple sector data unit into buffer */
- SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE)
-
- SETBUF_M()
- if (onenand_load_main(s, sec, s->count, buf))
- s->status |= ONEN_ERR_CMD | ONEN_ERR_LOAD;
-
-#if 0
- SETBUF_S()
- if (onenand_load_spare(s, sec, s->count, buf))
- s->status |= ONEN_ERR_CMD | ONEN_ERR_LOAD;
-#endif
-
- /* TODO: if (s->bufaddr & 3) + s->count was > 4 (2k-pages)
- * or if (s->bufaddr & 1) + s->count was > 2 (1k-pages)
- * then we need two split the read/write into two chunks.
- */
- s->intstatus |= ONEN_INT | ONEN_INT_LOAD;
- break;
- case 0x13: /* Load single/multiple spare sector into buffer */
- SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE)
-
- SETBUF_S()
- if (onenand_load_spare(s, sec, s->count, buf))
- s->status |= ONEN_ERR_CMD | ONEN_ERR_LOAD;
-
- /* TODO: if (s->bufaddr & 3) + s->count was > 4 (2k-pages)
- * or if (s->bufaddr & 1) + s->count was > 2 (1k-pages)
- * then we need two split the read/write into two chunks.
- */
- s->intstatus |= ONEN_INT | ONEN_INT_LOAD;
- break;
- case 0x80: /* Program single/multiple sector data unit from buffer */
- SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE)
-
- SETBUF_M()
- if (onenand_prog_main(s, sec, s->count, buf))
- s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG;
-
-#if 0
- SETBUF_S()
- if (onenand_prog_spare(s, sec, s->count, buf))
- s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG;
-#endif
-
- /* TODO: if (s->bufaddr & 3) + s->count was > 4 (2k-pages)
- * or if (s->bufaddr & 1) + s->count was > 2 (1k-pages)
- * then we need two split the read/write into two chunks.
- */
- s->intstatus |= ONEN_INT | ONEN_INT_PROG;
- break;
- case 0x1a: /* Program single/multiple spare area sector from buffer */
- SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE)
-
- SETBUF_S()
- if (onenand_prog_spare(s, sec, s->count, buf))
- s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG;
-
- /* TODO: if (s->bufaddr & 3) + s->count was > 4 (2k-pages)
- * or if (s->bufaddr & 1) + s->count was > 2 (1k-pages)
- * then we need two split the read/write into two chunks.
- */
- s->intstatus |= ONEN_INT | ONEN_INT_PROG;
- break;
- case 0x1b: /* Copy-back program */
- SETBUF_S()
-
- SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE)
- if (onenand_load_main(s, sec, s->count, buf))
- s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG;
-
- SETADDR(ONEN_BUF_DEST_BLOCK, ONEN_BUF_DEST_PAGE)
- if (onenand_prog_main(s, sec, s->count, buf))
- s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG;
-
- /* TODO: spare areas */
-
- s->intstatus |= ONEN_INT | ONEN_INT_PROG;
- break;
-
- case 0x23: /* Unlock NAND array block(s) */
- s->intstatus |= ONEN_INT;
-
- /* XXX the previous (?) area should be locked automatically */
- for (b = s->unladdr[0]; b <= s->unladdr[1]; b ++) {
- if (b >= s->blocks) {
- s->status |= ONEN_ERR_CMD;
- break;
- }
- if (s->blockwp[b] == ONEN_LOCK_LOCKTIGHTEN)
- break;
-
- s->wpstatus = s->blockwp[b] = ONEN_LOCK_UNLOCKED;
- }
- break;
- case 0x27: /* Unlock All NAND array blocks */
- s->intstatus |= ONEN_INT;
-
- for (b = 0; b < s->blocks; b ++) {
- if (s->blockwp[b] == ONEN_LOCK_LOCKTIGHTEN)
- break;
-
- s->wpstatus = s->blockwp[b] = ONEN_LOCK_UNLOCKED;
- }
- break;
-
- case 0x2a: /* Lock NAND array block(s) */
- s->intstatus |= ONEN_INT;
-
- for (b = s->unladdr[0]; b <= s->unladdr[1]; b ++) {
- if (b >= s->blocks) {
- s->status |= ONEN_ERR_CMD;
- break;
- }
- if (s->blockwp[b] == ONEN_LOCK_LOCKTIGHTEN)
- break;
-
- s->wpstatus = s->blockwp[b] = ONEN_LOCK_LOCKED;
- }
- break;
- case 0x2c: /* Lock-tight NAND array block(s) */
- s->intstatus |= ONEN_INT;
-
- for (b = s->unladdr[0]; b <= s->unladdr[1]; b ++) {
- if (b >= s->blocks) {
- s->status |= ONEN_ERR_CMD;
- break;
- }
- if (s->blockwp[b] == ONEN_LOCK_UNLOCKED)
- continue;
-
- s->wpstatus = s->blockwp[b] = ONEN_LOCK_LOCKTIGHTEN;
- }
- break;
-
- case 0x71: /* Erase-Verify-Read */
- s->intstatus |= ONEN_INT;
- break;
- case 0x95: /* Multi-block erase */
- qemu_irq_pulse(s->intr);
- /* Fall through. */
- case 0x94: /* Block erase */
- sec = ((s->addr[ONEN_BUF_BLOCK] & 0xfff) |
- (s->addr[ONEN_BUF_BLOCK] >> 15 ? s->density_mask : 0))
- << (BLOCK_SHIFT - 9);
- if (onenand_erase(s, sec, 1 << (BLOCK_SHIFT - 9)))
- s->status |= ONEN_ERR_CMD | ONEN_ERR_ERASE;
-
- s->intstatus |= ONEN_INT | ONEN_INT_ERASE;
- break;
- case 0xb0: /* Erase suspend */
- break;
- case 0x30: /* Erase resume */
- s->intstatus |= ONEN_INT | ONEN_INT_ERASE;
- break;
-
- case 0xf0: /* Reset NAND Flash core */
- onenand_reset(s, 0);
- break;
- case 0xf3: /* Reset OneNAND */
- onenand_reset(s, 0);
- break;
-
- case 0x65: /* OTP Access */
- s->intstatus |= ONEN_INT;
- s->blk_cur = NULL;
- s->current = s->otp;
- s->secs_cur = 1 << (BLOCK_SHIFT - 9);
- s->addr[ONEN_BUF_BLOCK] = 0;
- s->otpmode = 1;
- break;
-
- default:
- s->status |= ONEN_ERR_CMD;
- s->intstatus |= ONEN_INT;
- qemu_log_mask(LOG_GUEST_ERROR, "unknown OneNAND command %x\n",
- s->command);
- }
-
- onenand_intr_update(s);
-}
-
-static uint64_t onenand_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- OneNANDState *s = (OneNANDState *) opaque;
- int offset = addr >> s->shift;
-
- switch (offset) {
- case 0x0000 ... 0xbffe:
- return lduw_le_p(s->boot[0] + addr);
-
- case 0xf000: /* Manufacturer ID */
- return s->id.man;
- case 0xf001: /* Device ID */
- return s->id.dev;
- case 0xf002: /* Version ID */
- return s->id.ver;
- /* TODO: get the following values from a real chip! */
- case 0xf003: /* Data Buffer size */
- return 1 << PAGE_SHIFT;
- case 0xf004: /* Boot Buffer size */
- return 0x200;
- case 0xf005: /* Amount of buffers */
- return 1 | (2 << 8);
- case 0xf006: /* Technology */
- return 0;
-
- case 0xf100 ... 0xf107: /* Start addresses */
- return s->addr[offset - 0xf100];
-
- case 0xf200: /* Start buffer */
- return (s->bufaddr << 8) | ((s->count - 1) & (1 << (PAGE_SHIFT - 10)));
-
- case 0xf220: /* Command */
- return s->command;
- case 0xf221: /* System Configuration 1 */
- return s->config[0] & 0xffe0;
- case 0xf222: /* System Configuration 2 */
- return s->config[1];
-
- case 0xf240: /* Controller Status */
- return s->status;
- case 0xf241: /* Interrupt */
- return s->intstatus;
- case 0xf24c: /* Unlock Start Block Address */
- return s->unladdr[0];
- case 0xf24d: /* Unlock End Block Address */
- return s->unladdr[1];
- case 0xf24e: /* Write Protection Status */
- return s->wpstatus;
-
- case 0xff00: /* ECC Status */
- return 0x00;
- case 0xff01: /* ECC Result of main area data */
- case 0xff02: /* ECC Result of spare area data */
- case 0xff03: /* ECC Result of main area data */
- case 0xff04: /* ECC Result of spare area data */
- qemu_log_mask(LOG_UNIMP,
- "onenand: ECC result registers unimplemented\n");
- return 0x0000;
- }
-
- qemu_log_mask(LOG_GUEST_ERROR, "read of unknown OneNAND register 0x%x\n",
- offset);
- return 0;
-}
-
-static void onenand_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- OneNANDState *s = (OneNANDState *) opaque;
- int offset = addr >> s->shift;
- int sec;
-
- switch (offset) {
- case 0x0000 ... 0x01ff:
- case 0x8000 ... 0x800f:
- if (s->cycle) {
- s->cycle = 0;
-
- if (value == 0x0000) {
- SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE)
- onenand_load_main(s, sec,
- 1 << (PAGE_SHIFT - 9), s->data[0][0]);
- s->addr[ONEN_BUF_PAGE] += 4;
- s->addr[ONEN_BUF_PAGE] &= 0xff;
- }
- break;
- }
-
- switch (value) {
- case 0x00f0: /* Reset OneNAND */
- onenand_reset(s, 0);
- break;
-
- case 0x00e0: /* Load Data into Buffer */
- s->cycle = 1;
- break;
-
- case 0x0090: /* Read Identification Data */
- memset(s->boot[0], 0, 3 << s->shift);
- s->boot[0][0 << s->shift] = s->id.man & 0xff;
- s->boot[0][1 << s->shift] = s->id.dev & 0xff;
- s->boot[0][2 << s->shift] = s->wpstatus & 0xff;
- break;
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "unknown OneNAND boot command %" PRIx64 "\n",
- value);
- }
- break;
-
- case 0xf100 ... 0xf107: /* Start addresses */
- s->addr[offset - 0xf100] = value;
- break;
-
- case 0xf200: /* Start buffer */
- s->bufaddr = (value >> 8) & 0xf;
- if (PAGE_SHIFT == 11)
- s->count = (value & 3) ?: 4;
- else if (PAGE_SHIFT == 10)
- s->count = (value & 1) ?: 2;
- break;
-
- case 0xf220: /* Command */
- if (s->intstatus & (1 << 15))
- break;
- s->command = value;
- onenand_command(s);
- break;
- case 0xf221: /* System Configuration 1 */
- s->config[0] = value;
- onenand_intr_update(s);
- qemu_set_irq(s->rdy, (s->config[0] >> 7) & 1);
- break;
- case 0xf222: /* System Configuration 2 */
- s->config[1] = value;
- break;
-
- case 0xf241: /* Interrupt */
- s->intstatus &= value;
- if ((1 << 15) & ~s->intstatus)
- s->status &= ~(ONEN_ERR_CMD | ONEN_ERR_ERASE |
- ONEN_ERR_PROG | ONEN_ERR_LOAD);
- onenand_intr_update(s);
- break;
- case 0xf24c: /* Unlock Start Block Address */
- s->unladdr[0] = value & (s->blocks - 1);
- /* For some reason we have to set the end address to by default
- * be same as start because the software forgets to write anything
- * in there. */
- s->unladdr[1] = value & (s->blocks - 1);
- break;
- case 0xf24d: /* Unlock End Block Address */
- s->unladdr[1] = value & (s->blocks - 1);
- break;
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "write to unknown OneNAND register 0x%x\n",
- offset);
- }
-}
-
-static const MemoryRegionOps onenand_ops = {
- .read = onenand_read,
- .write = onenand_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void onenand_realize(DeviceState *dev, Error **errp)
-{
- SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
- OneNANDState *s = ONE_NAND(dev);
- uint32_t size = 1 << (24 + ((s->id.dev >> 4) & 7));
- void *ram;
- Error *local_err = NULL;
-
- s->base = (hwaddr)-1;
- s->rdy = NULL;
- s->blocks = size >> BLOCK_SHIFT;
- s->secs = size >> 9;
- s->blockwp = g_malloc(s->blocks);
- s->density_mask = (s->id.dev & 0x08)
- ? (1 << (6 + ((s->id.dev >> 4) & 7))) : 0;
- memory_region_init_io(&s->iomem, OBJECT(s), &onenand_ops, s, "onenand",
- 0x10000 << s->shift);
- if (!s->blk) {
- s->image = memset(g_malloc(size + (size >> 5)),
- 0xff, size + (size >> 5));
- } else {
- if (!blk_supports_write_perm(s->blk)) {
- error_setg(errp, "Can't use a read-only drive");
- return;
- }
- blk_set_perm(s->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE,
- BLK_PERM_ALL, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
- s->blk_cur = s->blk;
- }
- s->otp = memset(g_malloc((64 + 2) << PAGE_SHIFT),
- 0xff, (64 + 2) << PAGE_SHIFT);
- memory_region_init_ram_nomigrate(&s->ram, OBJECT(s), "onenand.ram",
- 0xc000 << s->shift, &error_fatal);
- vmstate_register_ram_global(&s->ram);
- ram = memory_region_get_ram_ptr(&s->ram);
- s->boot[0] = ram + (0x0000 << s->shift);
- s->boot[1] = ram + (0x8000 << s->shift);
- s->data[0][0] = ram + ((0x0200 + (0 << (PAGE_SHIFT - 1))) << s->shift);
- s->data[0][1] = ram + ((0x8010 + (0 << (PAGE_SHIFT - 6))) << s->shift);
- s->data[1][0] = ram + ((0x0200 + (1 << (PAGE_SHIFT - 1))) << s->shift);
- s->data[1][1] = ram + ((0x8010 + (1 << (PAGE_SHIFT - 6))) << s->shift);
- onenand_mem_setup(s);
- sysbus_init_irq(sbd, &s->intr);
- sysbus_init_mmio(sbd, &s->container);
- vmstate_register(VMSTATE_IF(dev),
- ((s->shift & 0x7f) << 24)
- | ((s->id.man & 0xff) << 16)
- | ((s->id.dev & 0xff) << 8)
- | (s->id.ver & 0xff),
- &vmstate_onenand, s);
-}
-
-static Property onenand_properties[] = {
- DEFINE_PROP_UINT16("manufacturer_id", OneNANDState, id.man, 0),
- DEFINE_PROP_UINT16("device_id", OneNANDState, id.dev, 0),
- DEFINE_PROP_UINT16("version_id", OneNANDState, id.ver, 0),
- DEFINE_PROP_INT32("shift", OneNANDState, shift, 0),
- DEFINE_PROP_DRIVE("drive", OneNANDState, blk),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void onenand_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->realize = onenand_realize;
- dc->reset = onenand_system_reset;
- device_class_set_props(dc, onenand_properties);
-}
-
-static const TypeInfo onenand_info = {
- .name = TYPE_ONE_NAND,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(OneNANDState),
- .class_init = onenand_class_init,
-};
-
-static void onenand_register_types(void)
-{
- type_register_static(&onenand_info);
-}
-
-void *onenand_raw_otp(DeviceState *onenand_device)
-{
- OneNANDState *s = ONE_NAND(onenand_device);
-
- return s->otp;
-}
-
-type_init(onenand_register_types)
diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c
index c8f1cf5..168101d 100644
--- a/hw/block/pflash_cfi01.c
+++ b/hw/block/pflash_cfi01.c
@@ -41,18 +41,17 @@
#include "hw/block/flash.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/bitops.h"
#include "qemu/host-utils.h"
#include "qemu/log.h"
-#include "qemu/module.h"
#include "qemu/option.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/runstate.h"
+#include "system/blockdev.h"
+#include "system/runstate.h"
#include "trace.h"
#define PFLASH_BE 0
@@ -614,6 +613,7 @@ static void pflash_write(PFlashCFI01 *pfl, hwaddr offset,
if (!pfl->counter) {
trace_pflash_write(pfl->name, "block write finished");
pfl->wcycle++;
+ break;
}
pfl->counter--;
@@ -895,7 +895,7 @@ static void pflash_cfi01_system_reset(DeviceState *dev)
pfl->blk_offset = -1;
}
-static Property pflash_cfi01_properties[] = {
+static const Property pflash_cfi01_properties[] = {
DEFINE_PROP_DRIVE("drive", PFlashCFI01, blk),
/* num-blocks is the number of blocks actually visible to the guest,
* ie the total size of the device divided by the sector length.
@@ -932,34 +932,29 @@ static Property pflash_cfi01_properties[] = {
DEFINE_PROP_STRING("name", PFlashCFI01, name),
DEFINE_PROP_BOOL("old-multiple-chip-handling", PFlashCFI01,
old_multiple_chip_handling, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pflash_cfi01_class_init(ObjectClass *klass, void *data)
+static void pflash_cfi01_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = pflash_cfi01_system_reset;
+ device_class_set_legacy_reset(dc, pflash_cfi01_system_reset);
dc->realize = pflash_cfi01_realize;
device_class_set_props(dc, pflash_cfi01_properties);
dc->vmsd = &vmstate_pflash;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
-
-static const TypeInfo pflash_cfi01_info = {
- .name = TYPE_PFLASH_CFI01,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(PFlashCFI01),
- .class_init = pflash_cfi01_class_init,
+static const TypeInfo pflash_cfi01_types[] = {
+ {
+ .name = TYPE_PFLASH_CFI01,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(PFlashCFI01),
+ .class_init = pflash_cfi01_class_init,
+ },
};
-static void pflash_cfi01_register_types(void)
-{
- type_register_static(&pflash_cfi01_info);
-}
-
-type_init(pflash_cfi01_register_types)
+DEFINE_TYPES(pflash_cfi01_types)
PFlashCFI01 *pflash_cfi01_register(hwaddr base,
const char *name,
diff --git a/hw/block/pflash_cfi02.c b/hw/block/pflash_cfi02.c
index 2314142..3244b69 100644
--- a/hw/block/pflash_cfi02.c
+++ b/hw/block/pflash_cfi02.c
@@ -41,7 +41,7 @@
#include "qemu/error-report.h"
#include "qemu/bitmap.h"
#include "qemu/timer.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/host-utils.h"
#include "qemu/module.h"
#include "hw/sysbus.h"
@@ -937,7 +937,7 @@ static void pflash_cfi02_reset(DeviceState *dev)
pflash_reset_state_machine(pfl);
}
-static Property pflash_cfi02_properties[] = {
+static const Property pflash_cfi02_properties[] = {
DEFINE_PROP_DRIVE("drive", PFlashCFI02, blk),
DEFINE_PROP_UINT32("num-blocks", PFlashCFI02, uniform_nb_blocs, 0),
DEFINE_PROP_UINT32("sector-length", PFlashCFI02, uniform_sector_len, 0),
@@ -959,7 +959,6 @@ static Property pflash_cfi02_properties[] = {
DEFINE_PROP_UINT16("unlock-addr0", PFlashCFI02, unlock_addr0, 0),
DEFINE_PROP_UINT16("unlock-addr1", PFlashCFI02, unlock_addr1, 0),
DEFINE_PROP_STRING("name", PFlashCFI02, name),
- DEFINE_PROP_END_OF_LIST(),
};
static void pflash_cfi02_unrealize(DeviceState *dev)
@@ -969,12 +968,12 @@ static void pflash_cfi02_unrealize(DeviceState *dev)
g_free(pfl->sector_erase_map);
}
-static void pflash_cfi02_class_init(ObjectClass *klass, void *data)
+static void pflash_cfi02_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = pflash_cfi02_realize;
- dc->reset = pflash_cfi02_reset;
+ device_class_set_legacy_reset(dc, pflash_cfi02_reset);
dc->unrealize = pflash_cfi02_unrealize;
device_class_set_props(dc, pflash_cfi02_properties);
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
diff --git a/hw/block/swim.c b/hw/block/swim.c
index 44761c1..ad04736 100644
--- a/hw/block/swim.c
+++ b/hw/block/swim.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "qapi/error.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
#include "hw/block/block.h"
@@ -166,10 +166,9 @@ static const BlockDevOps swim_block_ops = {
.change_media_cb = swim_change_cb,
};
-static Property swim_drive_properties[] = {
+static const Property swim_drive_properties[] = {
DEFINE_PROP_INT32("unit", SWIMDrive, unit, -1),
DEFINE_BLOCK_PROPERTIES(SWIMDrive, conf),
- DEFINE_PROP_END_OF_LIST(),
};
static void swim_drive_realize(DeviceState *qdev, Error **errp)
@@ -254,7 +253,7 @@ static void swim_drive_realize(DeviceState *qdev, Error **errp)
blk_set_dev_ops(drive->blk, &swim_block_ops, drive);
}
-static void swim_drive_class_init(ObjectClass *klass, void *data)
+static void swim_drive_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
k->realize = swim_drive_realize;
@@ -551,12 +550,12 @@ static const VMStateDescription vmstate_sysbus_swim = {
}
};
-static void sysbus_swim_class_init(ObjectClass *oc, void *data)
+static void sysbus_swim_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = sysbus_swim_realize;
- dc->reset = sysbus_swim_reset;
+ device_class_set_legacy_reset(dc, sysbus_swim_reset);
dc->vmsd = &vmstate_sysbus_swim;
}
diff --git a/hw/block/tc58128.c b/hw/block/tc58128.c
deleted file mode 100644
index 0984e37..0000000
--- a/hw/block/tc58128.c
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * TC58128 NAND EEPROM emulation
- *
- * Copyright (c) 2005 Samuel Tardieu
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- * SPDX-License-Identifier: MIT
- */
-#include "qemu/osdep.h"
-#include "qemu/units.h"
-#include "hw/sh4/sh.h"
-#include "hw/loader.h"
-#include "sysemu/qtest.h"
-#include "qemu/error-report.h"
-
-#define CE1 0x0100
-#define CE2 0x0200
-#define RE 0x0400
-#define WE 0x0800
-#define ALE 0x1000
-#define CLE 0x2000
-#define RDY1 0x4000
-#define RDY2 0x8000
-#define RDY(n) ((n) == 0 ? RDY1 : RDY2)
-
-typedef enum { WAIT, READ1, READ2, READ3 } state_t;
-
-typedef struct {
- uint8_t *flash_contents;
- state_t state;
- uint32_t address;
- uint8_t address_cycle;
-} tc58128_dev;
-
-static tc58128_dev tc58128_devs[2];
-
-#define FLASH_SIZE (16 * MiB)
-
-static void init_dev(tc58128_dev * dev, const char *filename)
-{
- int ret, blocks;
-
- dev->state = WAIT;
- dev->flash_contents = g_malloc(FLASH_SIZE);
- memset(dev->flash_contents, 0xff, FLASH_SIZE);
- if (filename) {
- /* Load flash image skipping the first block */
- ret = load_image_size(filename, dev->flash_contents + 528 * 32,
- FLASH_SIZE - 528 * 32);
- if (ret < 0) {
- if (!qtest_enabled()) {
- error_report("Could not load flash image %s", filename);
- exit(1);
- }
- } else {
- /* Build first block with number of blocks */
- blocks = DIV_ROUND_UP(ret, 528 * 32);
- dev->flash_contents[0] = blocks & 0xff;
- dev->flash_contents[1] = (blocks >> 8) & 0xff;
- dev->flash_contents[2] = (blocks >> 16) & 0xff;
- dev->flash_contents[3] = (blocks >> 24) & 0xff;
- fprintf(stderr, "loaded %d bytes for %s into flash\n", ret,
- filename);
- }
- }
-}
-
-static void handle_command(tc58128_dev * dev, uint8_t command)
-{
- switch (command) {
- case 0xff:
- fprintf(stderr, "reset flash device\n");
- dev->state = WAIT;
- break;
- case 0x00:
- fprintf(stderr, "read mode 1\n");
- dev->state = READ1;
- dev->address_cycle = 0;
- break;
- case 0x01:
- fprintf(stderr, "read mode 2\n");
- dev->state = READ2;
- dev->address_cycle = 0;
- break;
- case 0x50:
- fprintf(stderr, "read mode 3\n");
- dev->state = READ3;
- dev->address_cycle = 0;
- break;
- default:
- fprintf(stderr, "unknown flash command 0x%02x\n", command);
- abort();
- }
-}
-
-static void handle_address(tc58128_dev * dev, uint8_t data)
-{
- switch (dev->state) {
- case READ1:
- case READ2:
- case READ3:
- switch (dev->address_cycle) {
- case 0:
- dev->address = data;
- if (dev->state == READ2)
- dev->address |= 0x100;
- else if (dev->state == READ3)
- dev->address |= 0x200;
- break;
- case 1:
- dev->address += data * 528 * 0x100;
- break;
- case 2:
- dev->address += data * 528;
- fprintf(stderr, "address pointer in flash: 0x%08x\n",
- dev->address);
- break;
- default:
- /* Invalid data */
- abort();
- }
- dev->address_cycle++;
- break;
- default:
- abort();
- }
-}
-
-static uint8_t handle_read(tc58128_dev * dev)
-{
-#if 0
- if (dev->address % 0x100000 == 0)
- fprintf(stderr, "reading flash at address 0x%08x\n", dev->address);
-#endif
- return dev->flash_contents[dev->address++];
-}
-
-/* We never mark the device as busy, so interrupts cannot be triggered
- XXXXX */
-
-static int tc58128_cb(uint16_t porta, uint16_t portb,
- uint16_t * periph_pdtra, uint16_t * periph_portadir,
- uint16_t * periph_pdtrb, uint16_t * periph_portbdir)
-{
- int dev;
-
- if ((porta & CE1) == 0)
- dev = 0;
- else if ((porta & CE2) == 0)
- dev = 1;
- else
- return 0; /* No device selected */
-
- if ((porta & RE) && (porta & WE)) {
- /* Nothing to do, assert ready and return to input state */
- *periph_portadir &= 0xff00;
- *periph_portadir |= RDY(dev);
- *periph_pdtra |= RDY(dev);
- return 1;
- }
-
- if (porta & CLE) {
- /* Command */
- assert((porta & WE) == 0);
- handle_command(&tc58128_devs[dev], porta & 0x00ff);
- } else if (porta & ALE) {
- assert((porta & WE) == 0);
- handle_address(&tc58128_devs[dev], porta & 0x00ff);
- } else if ((porta & RE) == 0) {
- *periph_portadir |= 0x00ff;
- *periph_pdtra &= 0xff00;
- *periph_pdtra |= handle_read(&tc58128_devs[dev]);
- } else {
- abort();
- }
- return 1;
-}
-
-static sh7750_io_device tc58128 = {
- RE | WE, /* Port A triggers */
- 0, /* Port B triggers */
- tc58128_cb /* Callback */
-};
-
-int tc58128_init(struct SH7750State *s, const char *zone1, const char *zone2)
-{
- if (!qtest_enabled()) {
- warn_report_once("The TC58128 flash device is deprecated");
- }
- init_dev(&tc58128_devs[0], zone1);
- init_dev(&tc58128_devs[1], zone2);
- return sh7750_register_io_device(s, &tc58128);
-}
diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index fdbc30b..0eebbcd 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -29,8 +29,8 @@
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/runstate.h"
+#include "system/system.h"
+#include "system/runstate.h"
static const int user_feature_bits[] = {
VIRTIO_BLK_F_SIZE_MAX,
@@ -51,6 +51,7 @@ static const int user_feature_bits[] = {
VIRTIO_F_RING_PACKED,
VIRTIO_F_IOMMU_PLATFORM,
VIRTIO_F_RING_RESET,
+ VIRTIO_F_IN_ORDER,
VIRTIO_F_NOTIFICATION_DATA,
VHOST_INVALID_FEATURE_BIT
};
@@ -89,27 +90,39 @@ static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
s->blkcfg.wce = blkcfg->wce;
}
+static int vhost_user_blk_sync_config(DeviceState *dev, Error **errp)
+{
+ int ret;
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserBlk *s = VHOST_USER_BLK(vdev);
+
+ ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg,
+ vdev->config_len, errp);
+ if (ret < 0) {
+ return ret;
+ }
+
+ memcpy(vdev->config, &s->blkcfg, vdev->config_len);
+ virtio_notify_config(vdev);
+
+ return 0;
+}
+
static int vhost_user_blk_handle_config_change(struct vhost_dev *dev)
{
int ret;
- VirtIODevice *vdev = dev->vdev;
- VHostUserBlk *s = VHOST_USER_BLK(dev->vdev);
Error *local_err = NULL;
if (!dev->started) {
return 0;
}
- ret = vhost_dev_get_config(dev, (uint8_t *)&s->blkcfg,
- vdev->config_len, &local_err);
+ ret = vhost_user_blk_sync_config(DEVICE(dev->vdev), &local_err);
if (ret < 0) {
error_report_err(local_err);
return ret;
}
- memcpy(dev->vdev->config, &s->blkcfg, vdev->config_len);
- virtio_notify_config(dev->vdev);
-
return 0;
}
@@ -191,7 +204,7 @@ err_host_notifiers:
return ret;
}
-static void vhost_user_blk_stop(VirtIODevice *vdev)
+static int vhost_user_blk_stop(VirtIODevice *vdev)
{
VHostUserBlk *s = VHOST_USER_BLK(vdev);
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
@@ -199,26 +212,26 @@ static void vhost_user_blk_stop(VirtIODevice *vdev)
int ret;
if (!s->started_vu) {
- return;
+ return 0;
}
s->started_vu = false;
if (!k->set_guest_notifiers) {
- return;
+ return 0;
}
- vhost_dev_stop(&s->dev, vdev, true);
+ ret = vhost_dev_stop(&s->dev, vdev, true);
- ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
- if (ret < 0) {
+ if (k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false) < 0) {
error_report("vhost guest notifier cleanup failed: %d", ret);
- return;
+ return -1;
}
vhost_dev_disable_notifiers(&s->dev, vdev);
+ return ret;
}
-static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status)
+static int vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserBlk *s = VHOST_USER_BLK(vdev);
bool should_start = virtio_device_should_start(vdev, status);
@@ -226,11 +239,11 @@ static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status)
int ret;
if (!s->connected) {
- return;
+ return -1;
}
if (vhost_dev_is_started(&s->dev) == should_start) {
- return;
+ return 0;
}
if (should_start) {
@@ -240,9 +253,12 @@ static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status)
qemu_chr_fe_disconnect(&s->chardev);
}
} else {
- vhost_user_blk_stop(vdev);
+ ret = vhost_user_blk_stop(vdev);
+ if (ret < 0) {
+ return ret;
+ }
}
-
+ return 0;
}
static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev,
@@ -557,7 +573,7 @@ static const VMStateDescription vmstate_vhost_user_blk = {
},
};
-static Property vhost_user_blk_properties[] = {
+static const Property vhost_user_blk_properties[] = {
DEFINE_PROP_CHR("chardev", VHostUserBlk, chardev),
DEFINE_PROP_UINT16("num-queues", VHostUserBlk, num_queues,
VHOST_USER_BLK_AUTO_NUM_QUEUES),
@@ -568,16 +584,16 @@ static Property vhost_user_blk_properties[] = {
VIRTIO_BLK_F_DISCARD, true),
DEFINE_PROP_BIT64("write-zeroes", VHostUserBlk, parent_obj.host_features,
VIRTIO_BLK_F_WRITE_ZEROES, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vhost_user_blk_class_init(ObjectClass *klass, void *data)
+static void vhost_user_blk_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
device_class_set_props(dc, vhost_user_blk_properties);
dc->vmsd = &vmstate_vhost_user_blk;
+ dc->sync_config = vhost_user_blk_sync_config;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
vdc->realize = vhost_user_blk_device_realize;
vdc->unrealize = vhost_user_blk_device_unrealize;
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 73bdfd6..9bab271 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -22,10 +22,10 @@
#include "trace.h"
#include "hw/block/block.h"
#include "hw/qdev-properties.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/block-ram-registrar.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/runstate.h"
+#include "system/blockdev.h"
+#include "system/block-ram-registrar.h"
+#include "system/system.h"
+#include "system/runstate.h"
#include "hw/virtio/virtio-blk.h"
#include "scsi/constants.h"
#ifdef __linux__
@@ -33,6 +33,7 @@
#endif
#include "hw/virtio/virtio-bus.h"
#include "migration/qemu-file-types.h"
+#include "hw/virtio/iothread-vq-mapping.h"
#include "hw/virtio/virtio-access.h"
#include "hw/virtio/virtio-blk-common.h"
#include "qemu/coroutine.h"
@@ -50,12 +51,7 @@ static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
req->mr_next = NULL;
}
-static void virtio_blk_free_request(VirtIOBlockReq *req)
-{
- g_free(req);
-}
-
-static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
+void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
{
VirtIOBlock *s = req->dev;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
@@ -93,7 +89,7 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
if (acct_failed) {
block_acct_failed(blk_get_stats(s->blk), &req->acct);
}
- virtio_blk_free_request(req);
+ g_free(req);
}
blk_error_action(s->blk, action, is_read, error);
@@ -136,7 +132,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
block_acct_done(blk_get_stats(s->blk), &req->acct);
- virtio_blk_free_request(req);
+ g_free(req);
}
}
@@ -151,7 +147,7 @@ static void virtio_blk_flush_complete(void *opaque, int ret)
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
block_acct_done(blk_get_stats(s->blk), &req->acct);
- virtio_blk_free_request(req);
+ g_free(req);
}
static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
@@ -169,7 +165,7 @@ static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
if (is_write_zeroes) {
block_acct_done(blk_get_stats(s->blk), &req->acct);
}
- virtio_blk_free_request(req);
+ g_free(req);
}
static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq)
@@ -214,7 +210,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
fail:
virtio_blk_req_complete(req, status);
- virtio_blk_free_request(req);
+ g_free(req);
}
static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb,
@@ -612,7 +608,7 @@ static void virtio_blk_zone_report_complete(void *opaque, int ret)
out:
virtio_blk_req_complete(req, err_status);
- virtio_blk_free_request(req);
+ g_free(req);
g_free(data->zone_report_data.zones);
g_free(data);
}
@@ -661,7 +657,7 @@ static void virtio_blk_handle_zone_report(VirtIOBlockReq *req,
return;
out:
virtio_blk_req_complete(req, err_status);
- virtio_blk_free_request(req);
+ g_free(req);
}
static void virtio_blk_zone_mgmt_complete(void *opaque, int ret)
@@ -677,7 +673,7 @@ static void virtio_blk_zone_mgmt_complete(void *opaque, int ret)
}
virtio_blk_req_complete(req, err_status);
- virtio_blk_free_request(req);
+ g_free(req);
}
static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op)
@@ -700,7 +696,7 @@ static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op)
} else {
if (bs->bl.zone_size > capacity - offset) {
/* The zoned device allows the last smaller zone. */
- len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1);
+ len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1ull);
} else {
len = bs->bl.zone_size;
}
@@ -719,7 +715,7 @@ static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op)
return 0;
out:
virtio_blk_req_complete(req, err_status);
- virtio_blk_free_request(req);
+ g_free(req);
return err_status;
}
@@ -750,7 +746,7 @@ static void virtio_blk_zone_append_complete(void *opaque, int ret)
out:
virtio_blk_req_complete(req, err_status);
- virtio_blk_free_request(req);
+ g_free(req);
g_free(data);
}
@@ -788,7 +784,7 @@ static int virtio_blk_handle_zone_append(VirtIOBlockReq *req,
out:
virtio_blk_req_complete(req, err_status);
- virtio_blk_free_request(req);
+ g_free(req);
return err_status;
}
@@ -855,7 +851,7 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
block_acct_invalid(blk_get_stats(s->blk),
is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
- virtio_blk_free_request(req);
+ g_free(req);
return 0;
}
@@ -911,7 +907,7 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
VIRTIO_BLK_ID_BYTES));
iov_from_buf(in_iov, in_num, 0, serial, size);
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
- virtio_blk_free_request(req);
+ g_free(req);
break;
}
case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT:
@@ -943,7 +939,7 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
if (unlikely(!(type & VIRTIO_BLK_T_OUT) ||
out_len > sizeof(dwz_hdr))) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
- virtio_blk_free_request(req);
+ g_free(req);
return 0;
}
@@ -960,14 +956,24 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
is_write_zeroes);
if (err_status != VIRTIO_BLK_S_OK) {
virtio_blk_req_complete(req, err_status);
- virtio_blk_free_request(req);
+ g_free(req);
}
break;
}
default:
- virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
- virtio_blk_free_request(req);
+ {
+ /*
+ * Give subclasses a chance to handle unknown requests. This way the
+ * class lookup is not in the hot path.
+ */
+ VirtIOBlkClass *vbk = VIRTIO_BLK_GET_CLASS(s);
+ if (!vbk->handle_unknown_request ||
+ !vbk->handle_unknown_request(req, mrb, type)) {
+ virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
+ g_free(req);
+ }
+ }
}
return 0;
}
@@ -988,7 +994,7 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
while ((req = virtio_blk_get_request(s, vq))) {
if (virtio_blk_handle_request(req, &mrb)) {
virtqueue_detach_element(req->vq, &req->elem, 0);
- virtio_blk_free_request(req);
+ g_free(req);
break;
}
}
@@ -1038,7 +1044,7 @@ static void virtio_blk_dma_restart_bh(void *opaque)
while (req) {
next = req->next;
virtqueue_detach_element(req->vq, &req->elem, 0);
- virtio_blk_free_request(req);
+ g_free(req);
req = next;
}
break;
@@ -1060,7 +1066,7 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool running,
VirtIOBlock *s = opaque;
uint16_t num_queues = s->conf.num_queues;
g_autofree VirtIOBlockReq **vq_rq = NULL;
- VirtIOBlockReq *rq;
+ VirtIOBlockReq *rq = NULL;
if (!running) {
return;
@@ -1121,7 +1127,7 @@ static void virtio_blk_reset(VirtIODevice *vdev)
/* No other threads can access req->vq here */
virtqueue_detach_element(req->vq, &req->elem, 0);
- virtio_blk_free_request(req);
+ g_free(req);
}
}
@@ -1264,7 +1270,7 @@ static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
return features;
}
-static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
+static int virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
@@ -1273,7 +1279,7 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
}
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
- return;
+ return 0;
}
/* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
@@ -1296,6 +1302,7 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
virtio_vdev_has_feature(vdev,
VIRTIO_BLK_F_WCE));
}
+ return 0;
}
static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
@@ -1418,128 +1425,6 @@ static const BlockDevOps virtio_block_ops = {
.drained_end = virtio_blk_drained_end,
};
-static bool
-validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
- uint16_t num_queues, Error **errp)
-{
- g_autofree unsigned long *vqs = bitmap_new(num_queues);
- g_autoptr(GHashTable) iothreads =
- g_hash_table_new(g_str_hash, g_str_equal);
-
- for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) {
- const char *name = node->value->iothread;
- uint16List *vq;
-
- if (!iothread_by_id(name)) {
- error_setg(errp, "IOThread \"%s\" object does not exist", name);
- return false;
- }
-
- if (!g_hash_table_add(iothreads, (gpointer)name)) {
- error_setg(errp,
- "duplicate IOThread name \"%s\" in iothread-vq-mapping",
- name);
- return false;
- }
-
- if (node != list) {
- if (!!node->value->vqs != !!list->value->vqs) {
- error_setg(errp, "either all items in iothread-vq-mapping "
- "must have vqs or none of them must have it");
- return false;
- }
- }
-
- for (vq = node->value->vqs; vq; vq = vq->next) {
- if (vq->value >= num_queues) {
- error_setg(errp, "vq index %u for IOThread \"%s\" must be "
- "less than num_queues %u in iothread-vq-mapping",
- vq->value, name, num_queues);
- return false;
- }
-
- if (test_and_set_bit(vq->value, vqs)) {
- error_setg(errp, "cannot assign vq %u to IOThread \"%s\" "
- "because it is already assigned", vq->value, name);
- return false;
- }
- }
- }
-
- if (list->value->vqs) {
- for (uint16_t i = 0; i < num_queues; i++) {
- if (!test_bit(i, vqs)) {
- error_setg(errp,
- "missing vq %u IOThread assignment in iothread-vq-mapping",
- i);
- return false;
- }
- }
- }
-
- return true;
-}
-
-/**
- * apply_iothread_vq_mapping:
- * @iothread_vq_mapping_list: The mapping of virtqueues to IOThreads.
- * @vq_aio_context: The array of AioContext pointers to fill in.
- * @num_queues: The length of @vq_aio_context.
- * @errp: If an error occurs, a pointer to the area to store the error.
- *
- * Fill in the AioContext for each virtqueue in the @vq_aio_context array given
- * the iothread-vq-mapping parameter in @iothread_vq_mapping_list.
- *
- * Returns: %true on success, %false on failure.
- **/
-static bool apply_iothread_vq_mapping(
- IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
- AioContext **vq_aio_context,
- uint16_t num_queues,
- Error **errp)
-{
- IOThreadVirtQueueMappingList *node;
- size_t num_iothreads = 0;
- size_t cur_iothread = 0;
-
- if (!validate_iothread_vq_mapping_list(iothread_vq_mapping_list,
- num_queues, errp)) {
- return false;
- }
-
- for (node = iothread_vq_mapping_list; node; node = node->next) {
- num_iothreads++;
- }
-
- for (node = iothread_vq_mapping_list; node; node = node->next) {
- IOThread *iothread = iothread_by_id(node->value->iothread);
- AioContext *ctx = iothread_get_aio_context(iothread);
-
- /* Released in virtio_blk_vq_aio_context_cleanup() */
- object_ref(OBJECT(iothread));
-
- if (node->value->vqs) {
- uint16List *vq;
-
- /* Explicit vq:IOThread assignment */
- for (vq = node->value->vqs; vq; vq = vq->next) {
- assert(vq->value < num_queues);
- vq_aio_context[vq->value] = ctx;
- }
- } else {
- /* Round-robin vq:IOThread assignment */
- for (unsigned i = cur_iothread; i < num_queues;
- i += num_iothreads) {
- vq_aio_context[i] = ctx;
- }
- }
-
- cur_iothread++;
- }
-
- return true;
-}
-
/* Context: BQL held */
static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
{
@@ -1567,21 +1452,12 @@ static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
error_setg(errp, "ioeventfd is required for iothread");
return false;
}
-
- /*
- * If ioeventfd is (re-)enabled while the guest is running there could
- * be block jobs that can conflict.
- */
- if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
- error_prepend(errp, "cannot start virtio-blk ioeventfd: ");
- return false;
- }
}
s->vq_aio_context = g_new(AioContext *, conf->num_queues);
if (conf->iothread_vq_mapping_list) {
- if (!apply_iothread_vq_mapping(conf->iothread_vq_mapping_list,
+ if (!iothread_vq_mapping_apply(conf->iothread_vq_mapping_list,
s->vq_aio_context,
conf->num_queues,
errp)) {
@@ -1615,12 +1491,7 @@ static void virtio_blk_vq_aio_context_cleanup(VirtIOBlock *s)
assert(!s->ioeventfd_started);
if (conf->iothread_vq_mapping_list) {
- IOThreadVirtQueueMappingList *node;
-
- for (node = conf->iothread_vq_mapping_list; node; node = node->next) {
- IOThread *iothread = iothread_by_id(node->value->iothread);
- object_unref(OBJECT(iothread));
- }
+ iothread_vq_mapping_cleanup(conf->iothread_vq_mapping_list);
}
if (conf->iothread) {
@@ -1932,7 +1803,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
* called after ->start_ioeventfd() has already set blk's AioContext.
*/
s->change =
- qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s);
+ qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, NULL, s);
blk_ram_registrar_init(&s->blk_ram_registrar, s->blk);
blk_set_dev_ops(s->blk, &virtio_block_ops, s);
@@ -1985,7 +1856,7 @@ static const VMStateDescription vmstate_virtio_blk = {
},
};
-static Property virtio_blk_properties[] = {
+static const Property virtio_blk_properties[] = {
DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf),
DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf),
DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf),
@@ -2014,10 +1885,9 @@ static Property virtio_blk_properties[] = {
conf.max_write_zeroes_sectors, BDRV_REQUEST_MAX_SECTORS),
DEFINE_PROP_BOOL("x-enable-wce-if-config-wce", VirtIOBlock,
conf.x_enable_wce_if_config_wce, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_blk_class_init(ObjectClass *klass, void *data)
+static void virtio_blk_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
@@ -2044,6 +1914,7 @@ static const TypeInfo virtio_blk_info = {
.instance_size = sizeof(VirtIOBlock),
.instance_init = virtio_blk_instance_init,
.class_init = virtio_blk_class_init,
+ .class_size = sizeof(VirtIOBlkClass),
};
static void virtio_register_types(void)
diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c
index aed1d5c..74de897 100644
--- a/hw/block/xen-block.c
+++ b/hw/block/xen-block.c
@@ -16,16 +16,16 @@
#include "qapi/qapi-visit-block-core.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/visitor.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "qom/object_interfaces.h"
#include "hw/block/xen_blkif.h"
#include "hw/qdev-properties.h"
#include "hw/xen/xen-block.h"
#include "hw/xen/xen-backend.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/iothread.h"
+#include "system/blockdev.h"
+#include "system/block-backend.h"
+#include "system/iothread.h"
#include "dataplane/xen-block.h"
#include "hw/xen/interface/io/xs_wire.h"
#include "trace.h"
@@ -239,7 +239,8 @@ static void xen_block_connect(XenDevice *xendev, Error **errp)
return;
}
- if (xen_device_frontend_scanf(xendev, "protocol", "%ms", &str) != 1) {
+ str = xen_device_frontend_read(xendev, "protocol");
+ if (!str) {
/* x86 defaults to the 32-bit protocol even for 64-bit guests. */
if (object_dynamic_cast(OBJECT(qdev_get_machine()), "x86-machine")) {
protocol = BLKIF_PROTOCOL_X86_32;
@@ -407,6 +408,8 @@ static void xen_block_realize(XenDevice *xendev, Error **errp)
}
xen_device_backend_printf(xendev, "info", "%u", blockdev->info);
+ xen_device_backend_printf(xendev, "mode",
+ (blockdev->info & VDISK_READONLY) ? "r" : "w");
xen_device_frontend_printf(xendev, "virtual-device", "%lu",
vdev->number);
@@ -485,7 +488,7 @@ static char *disk_to_vbd_name(unsigned int disk)
static void xen_block_get_vdev(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
XenBlockVdev *vdev = object_field_prop_ptr(obj, prop);
char *str;
@@ -545,7 +548,7 @@ static int vbd_name_to_disk(const char *name, const char **endp,
static void xen_block_set_vdev(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
XenBlockVdev *vdev = object_field_prop_ptr(obj, prop);
char *str, *p;
const char *end;
@@ -659,14 +662,14 @@ invalid:
*
* https://xenbits.xen.org/docs/unstable/man/xen-vbd-interface.7.html
*/
-const PropertyInfo xen_block_prop_vdev = {
- .name = "str",
- .description = "Virtual Disk specifier: d*p*/xvd*/hd*/sd*",
+static const PropertyInfo xen_block_prop_vdev = {
+ .type = "str",
+ .description = "Virtual Disk specifier (d*p*/xvd*/hd*/sd*)",
.get = xen_block_get_vdev,
.set = xen_block_set_vdev,
};
-static Property xen_block_props[] = {
+static const Property xen_block_props[] = {
DEFINE_PROP("vdev", XenBlockDevice, props.vdev,
xen_block_prop_vdev, XenBlockVdev),
DEFINE_BLOCK_PROPERTIES(XenBlockDevice, props.conf),
@@ -674,10 +677,9 @@ static Property xen_block_props[] = {
props.max_ring_page_order, 4),
DEFINE_PROP_LINK("iothread", XenBlockDevice, props.iothread,
TYPE_IOTHREAD, IOThread *),
- DEFINE_PROP_END_OF_LIST()
};
-static void xen_block_class_init(ObjectClass *class, void *data)
+static void xen_block_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dev_class = DEVICE_CLASS(class);
XenDeviceClass *xendev_class = XEN_DEVICE_CLASS(class);
@@ -722,7 +724,7 @@ static void xen_disk_realize(XenBlockDevice *blockdev, Error **errp)
blockdev->info = blk_supports_write_perm(conf->blk) ? 0 : VDISK_READONLY;
}
-static void xen_disk_class_init(ObjectClass *class, void *data)
+static void xen_disk_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dev_class = DEVICE_CLASS(class);
XenBlockDeviceClass *blockdev_class = XEN_BLOCK_DEVICE_CLASS(class);
@@ -769,7 +771,7 @@ static void xen_cdrom_realize(XenBlockDevice *blockdev, Error **errp)
blockdev->info = VDISK_READONLY | VDISK_CDROM;
}
-static void xen_cdrom_class_init(ObjectClass *class, void *data)
+static void xen_cdrom_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dev_class = DEVICE_CLASS(class);
XenBlockDeviceClass *blockdev_class = XEN_BLOCK_DEVICE_CLASS(class);
diff --git a/hw/char/Kconfig b/hw/char/Kconfig
index 4fd74ea..9d517f3 100644
--- a/hw/char/Kconfig
+++ b/hw/char/Kconfig
@@ -11,6 +11,12 @@ config PARALLEL
config PL011
bool
+ # The PL011 has both a Rust and a C implementation
+ select PL011_C if !HAVE_RUST
+ select X_PL011_RUST if HAVE_RUST
+
+config PL011_C
+ bool
config SERIAL
bool
@@ -21,6 +27,10 @@ config SERIAL_ISA
depends on ISA_BUS
select SERIAL
+config SERIAL_MM
+ bool
+ select SERIAL
+
config SERIAL_PCI
bool
default y if PCI_DEVICES
@@ -62,6 +72,9 @@ config RENESAS_SCI
config AVR_USART
bool
+config DIVA_GSP
+ bool
+
config MCHP_PFSOC_MMUART
bool
select SERIAL
@@ -74,3 +87,8 @@ config GOLDFISH_TTY
config SHAKTI_UART
bool
+
+config IP_OCTAL_232
+ bool
+ default y
+ depends on IPACK
diff --git a/hw/char/avr_usart.c b/hw/char/avr_usart.c
index 5bcf9db..fae1521 100644
--- a/hw/char/avr_usart.c
+++ b/hw/char/avr_usart.c
@@ -86,7 +86,7 @@ static void update_char_mask(AVRUsartState *usart)
usart->char_mask = 0b11111111;
break;
default:
- assert(0);
+ g_assert_not_reached();
}
}
@@ -259,9 +259,8 @@ static const MemoryRegionOps avr_usart_ops = {
.impl = {.min_access_size = 1, .max_access_size = 1}
};
-static Property avr_usart_properties[] = {
+static const Property avr_usart_properties[] = {
DEFINE_PROP_CHR("chardev", AVRUsartState, chr),
- DEFINE_PROP_END_OF_LIST(),
};
static void avr_usart_pr(void *opaque, int irq, int level)
@@ -296,11 +295,11 @@ static void avr_usart_realize(DeviceState *dev, Error **errp)
avr_usart_reset(dev);
}
-static void avr_usart_class_init(ObjectClass *klass, void *data)
+static void avr_usart_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = avr_usart_reset;
+ device_class_set_legacy_reset(dc, avr_usart_reset);
device_class_set_props(dc, avr_usart_properties);
dc->realize = avr_usart_realize;
}
diff --git a/hw/char/bcm2835_aux.c b/hw/char/bcm2835_aux.c
index 83990e2..2b397f2 100644
--- a/hw/char/bcm2835_aux.c
+++ b/hw/char/bcm2835_aux.c
@@ -98,7 +98,7 @@ static uint64_t bcm2835_aux_read(void *opaque, hwaddr offset, unsigned size)
* interrupts are active, besides that this cannot occur. At
* present, we choose to prioritise the rx interrupt, since
* the tx fifo is always empty. */
- if (s->read_count != 0) {
+ if ((s->iir & RX_INT) && s->read_count != 0) {
res |= 0x4;
} else {
res |= 0x2;
@@ -138,7 +138,7 @@ static uint64_t bcm2835_aux_read(void *opaque, hwaddr offset, unsigned size)
res = 0x30e; /* space in the output buffer, empty tx fifo, idle tx/rx */
if (s->read_count > 0) {
res |= 0x1; /* data in input buffer */
- assert(s->read_count < BCM2835_AUX_RX_FIFO_LEN);
+ assert(s->read_count <= BCM2835_AUX_RX_FIFO_LEN);
res |= ((uint32_t)s->read_count) << 16; /* rx fifo fill level */
}
return res;
@@ -221,7 +221,7 @@ static int bcm2835_aux_can_receive(void *opaque)
{
BCM2835AuxState *s = opaque;
- return s->read_count < BCM2835_AUX_RX_FIFO_LEN;
+ return BCM2835_AUX_RX_FIFO_LEN - s->read_count;
}
static void bcm2835_aux_put_fifo(void *opaque, uint8_t value)
@@ -243,7 +243,9 @@ static void bcm2835_aux_put_fifo(void *opaque, uint8_t value)
static void bcm2835_aux_receive(void *opaque, const uint8_t *buf, int size)
{
- bcm2835_aux_put_fifo(opaque, *buf);
+ for (int i = 0; i < size; i++) {
+ bcm2835_aux_put_fifo(opaque, buf[i]);
+ }
}
static const MemoryRegionOps bcm2835_aux_ops = {
@@ -290,12 +292,11 @@ static void bcm2835_aux_realize(DeviceState *dev, Error **errp)
bcm2835_aux_receive, NULL, NULL, s, NULL, true);
}
-static Property bcm2835_aux_props[] = {
+static const Property bcm2835_aux_props[] = {
DEFINE_PROP_CHR("chardev", BCM2835AuxState, chr),
- DEFINE_PROP_END_OF_LIST(),
};
-static void bcm2835_aux_class_init(ObjectClass *oc, void *data)
+static void bcm2835_aux_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c
index 77d9a2a..0dfa356 100644
--- a/hw/char/cadence_uart.c
+++ b/hw/char/cadence_uart.c
@@ -617,12 +617,11 @@ static const VMStateDescription vmstate_cadence_uart = {
},
};
-static Property cadence_uart_properties[] = {
+static const Property cadence_uart_properties[] = {
DEFINE_PROP_CHR("chardev", CadenceUARTState, chr),
- DEFINE_PROP_END_OF_LIST(),
};
-static void cadence_uart_class_init(ObjectClass *klass, void *data)
+static void cadence_uart_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/char/cmsdk-apb-uart.c b/hw/char/cmsdk-apb-uart.c
index d07cca1..32090f3 100644
--- a/hw/char/cmsdk-apb-uart.c
+++ b/hw/char/cmsdk-apb-uart.c
@@ -377,19 +377,18 @@ static const VMStateDescription cmsdk_apb_uart_vmstate = {
}
};
-static Property cmsdk_apb_uart_properties[] = {
+static const Property cmsdk_apb_uart_properties[] = {
DEFINE_PROP_CHR("chardev", CMSDKAPBUART, chr),
DEFINE_PROP_UINT32("pclk-frq", CMSDKAPBUART, pclk_frq, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void cmsdk_apb_uart_class_init(ObjectClass *klass, void *data)
+static void cmsdk_apb_uart_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = cmsdk_apb_uart_realize;
dc->vmsd = &cmsdk_apb_uart_vmstate;
- dc->reset = cmsdk_apb_uart_reset;
+ device_class_set_legacy_reset(dc, cmsdk_apb_uart_reset);
device_class_set_props(dc, cmsdk_apb_uart_properties);
}
diff --git a/hw/char/debugcon.c b/hw/char/debugcon.c
index fdb04fe..bf44aaf 100644
--- a/hw/char/debugcon.c
+++ b/hw/char/debugcon.c
@@ -114,14 +114,13 @@ static void debugcon_isa_realizefn(DeviceState *dev, Error **errp)
isa->iobase, &s->io);
}
-static Property debugcon_isa_properties[] = {
+static const Property debugcon_isa_properties[] = {
DEFINE_PROP_UINT32("iobase", ISADebugconState, iobase, 0xe9),
DEFINE_PROP_CHR("chardev", ISADebugconState, state.chr),
DEFINE_PROP_UINT32("readback", ISADebugconState, state.readback, 0xe9),
- DEFINE_PROP_END_OF_LIST(),
};
-static void debugcon_isa_class_initfn(ObjectClass *klass, void *data)
+static void debugcon_isa_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/char/digic-uart.c b/hw/char/digic-uart.c
index ef2d762..0f6af51 100644
--- a/hw/char/digic-uart.c
+++ b/hw/char/digic-uart.c
@@ -172,17 +172,16 @@ static const VMStateDescription vmstate_digic_uart = {
}
};
-static Property digic_uart_properties[] = {
+static const Property digic_uart_properties[] = {
DEFINE_PROP_CHR("chardev", DigicUartState, chr),
- DEFINE_PROP_END_OF_LIST(),
};
-static void digic_uart_class_init(ObjectClass *klass, void *data)
+static void digic_uart_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = digic_uart_realize;
- dc->reset = digic_uart_reset;
+ device_class_set_legacy_reset(dc, digic_uart_reset);
dc->vmsd = &vmstate_digic_uart;
device_class_set_props(dc, digic_uart_properties);
}
diff --git a/hw/char/diva-gsp.c b/hw/char/diva-gsp.c
new file mode 100644
index 0000000..e1f0713
--- /dev/null
+++ b/hw/char/diva-gsp.c
@@ -0,0 +1,295 @@
+/*
+ * HP Diva GSP controller
+ *
+ * The Diva PCI boards are Remote Management cards for PA-RISC machines.
+ * They come with built-in 16550A multi UARTs for serial consoles
+ * and a mailbox-like memory area for hardware auto-reboot functionality.
+ * GSP stands for "Guardian Service Processor". Later products were marketed
+ * "Management Processor" (MP).
+ *
+ * Diva cards are multifunctional cards. The first part, the aux port,
+ * is on physical machines not useable but we still try to mimic it here.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (c) 2025 Helge Deller <deller@gmx.de>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "hw/char/serial.h"
+#include "hw/irq.h"
+#include "hw/pci/pci_device.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
+#include "migration/vmstate.h"
+
+#define PCI_DEVICE_ID_HP_DIVA 0x1048
+/* various DIVA GSP cards: */
+#define PCI_DEVICE_ID_HP_DIVA_TOSCA1 0x1049
+#define PCI_DEVICE_ID_HP_DIVA_TOSCA2 0x104A
+#define PCI_DEVICE_ID_HP_DIVA_MAESTRO 0x104B
+#define PCI_DEVICE_ID_HP_REO_IOC 0x10f1
+#define PCI_DEVICE_ID_HP_DIVA_HALFDOME 0x1223
+#define PCI_DEVICE_ID_HP_DIVA_KEYSTONE 0x1226
+#define PCI_DEVICE_ID_HP_DIVA_POWERBAR 0x1227
+#define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282
+#define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290
+#define PCI_DEVICE_ID_HP_DIVA_RMP3 0x1301
+#define PCI_DEVICE_ID_HP_DIVA_HURRICANE 0x132a
+
+
+#define PCI_SERIAL_MAX_PORTS 4
+
+typedef struct PCIDivaSerialState {
+ PCIDevice dev;
+ MemoryRegion membar; /* for serial ports */
+ MemoryRegion mailboxbar; /* for hardware mailbox */
+ uint32_t subvendor;
+ uint32_t ports;
+ char *name[PCI_SERIAL_MAX_PORTS];
+ SerialState state[PCI_SERIAL_MAX_PORTS];
+ uint32_t level[PCI_SERIAL_MAX_PORTS];
+ qemu_irq *irqs;
+ bool disable;
+} PCIDivaSerialState;
+
+static void diva_pci_exit(PCIDevice *dev)
+{
+ PCIDivaSerialState *pci = DO_UPCAST(PCIDivaSerialState, dev, dev);
+ SerialState *s;
+ int i;
+
+ for (i = 0; i < pci->ports; i++) {
+ s = pci->state + i;
+ qdev_unrealize(DEVICE(s));
+ memory_region_del_subregion(&pci->membar, &s->io);
+ g_free(pci->name[i]);
+ }
+ qemu_free_irqs(pci->irqs, pci->ports);
+}
+
+static void multi_serial_irq_mux(void *opaque, int n, int level)
+{
+ PCIDivaSerialState *pci = opaque;
+ int i, pending = 0;
+
+ pci->level[n] = level;
+ for (i = 0; i < pci->ports; i++) {
+ if (pci->level[i]) {
+ pending = 1;
+ }
+ }
+ pci_set_irq(&pci->dev, pending);
+}
+
+struct diva_info {
+ unsigned int nports:4; /* number of serial ports */
+ unsigned int omask:12; /* offset mask: BIT(1) -> offset 8 */
+};
+
+static struct diva_info diva_get_diva_info(PCIDeviceClass *pc)
+{
+ switch (pc->subsystem_id) {
+ case PCI_DEVICE_ID_HP_DIVA_POWERBAR:
+ case PCI_DEVICE_ID_HP_DIVA_HURRICANE:
+ return (struct diva_info) { .nports = 1,
+ .omask = BIT(0) };
+ case PCI_DEVICE_ID_HP_DIVA_TOSCA2:
+ return (struct diva_info) { .nports = 2,
+ .omask = BIT(0) | BIT(1) };
+ case PCI_DEVICE_ID_HP_DIVA_TOSCA1:
+ case PCI_DEVICE_ID_HP_DIVA_HALFDOME:
+ case PCI_DEVICE_ID_HP_DIVA_KEYSTONE:
+ return (struct diva_info) { .nports = 3,
+ .omask = BIT(0) | BIT(1) | BIT(2) };
+ case PCI_DEVICE_ID_HP_DIVA_EVEREST: /* e.g. in rp3410 */
+ return (struct diva_info) { .nports = 3,
+ .omask = BIT(0) | BIT(2) | BIT(7) };
+ case PCI_DEVICE_ID_HP_DIVA_MAESTRO:
+ return (struct diva_info) { .nports = 4,
+ .omask = BIT(0) | BIT(1) | BIT(2) | BIT(7) };
+ }
+ g_assert_not_reached();
+}
+
+
+static void diva_pci_realize(PCIDevice *dev, Error **errp)
+{
+ PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev);
+ PCIDivaSerialState *pci = DO_UPCAST(PCIDivaSerialState, dev, dev);
+ SerialState *s;
+ struct diva_info di = diva_get_diva_info(pc);
+ size_t i, offset = 0;
+ size_t portmask = di.omask;
+
+ pci->dev.config[PCI_CLASS_PROG] = 2; /* 16550 compatible */
+ pci->dev.config[PCI_INTERRUPT_PIN] = 1;
+ memory_region_init(&pci->membar, OBJECT(pci), "serial_ports", 4096);
+ pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &pci->membar);
+ pci->irqs = qemu_allocate_irqs(multi_serial_irq_mux, pci, di.nports);
+
+ for (i = 0; i < di.nports; i++) {
+ s = pci->state + i;
+ if (!qdev_realize(DEVICE(s), NULL, errp)) {
+ diva_pci_exit(dev);
+ return;
+ }
+ s->irq = pci->irqs[i];
+ pci->name[i] = g_strdup_printf("uart #%zu", i + 1);
+ memory_region_init_io(&s->io, OBJECT(pci), &serial_io_ops, s,
+ pci->name[i], 8);
+
+ /* calculate offset of given port based on bitmask */
+ while ((portmask & BIT(0)) == 0) {
+ offset += 8;
+ portmask >>= 1;
+ }
+ memory_region_add_subregion(&pci->membar, offset, &s->io);
+ offset += 8;
+ portmask >>= 1;
+ pci->ports++;
+ }
+
+ /* mailbox bar */
+ memory_region_init(&pci->mailboxbar, OBJECT(pci), "mailbox", 128 * KiB);
+ pci_register_bar(&pci->dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_PREFETCH, &pci->mailboxbar);
+}
+
+static const VMStateDescription vmstate_pci_diva = {
+ .name = "pci-diva-serial",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_PCI_DEVICE(dev, PCIDivaSerialState),
+ VMSTATE_STRUCT_ARRAY(state, PCIDivaSerialState, PCI_SERIAL_MAX_PORTS,
+ 0, vmstate_serial, SerialState),
+ VMSTATE_UINT32_ARRAY(level, PCIDivaSerialState, PCI_SERIAL_MAX_PORTS),
+ VMSTATE_BOOL(disable, PCIDivaSerialState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const Property diva_serial_properties[] = {
+ DEFINE_PROP_BOOL("disable", PCIDivaSerialState, disable, false),
+ DEFINE_PROP_CHR("chardev1", PCIDivaSerialState, state[0].chr),
+ DEFINE_PROP_CHR("chardev2", PCIDivaSerialState, state[1].chr),
+ DEFINE_PROP_CHR("chardev3", PCIDivaSerialState, state[2].chr),
+ DEFINE_PROP_CHR("chardev4", PCIDivaSerialState, state[3].chr),
+ DEFINE_PROP_UINT32("subvendor", PCIDivaSerialState, subvendor,
+ PCI_DEVICE_ID_HP_DIVA_TOSCA1),
+};
+
+static void diva_serial_class_initfn(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass);
+ pc->realize = diva_pci_realize;
+ pc->exit = diva_pci_exit;
+ pc->vendor_id = PCI_VENDOR_ID_HP;
+ pc->device_id = PCI_DEVICE_ID_HP_DIVA;
+ pc->subsystem_vendor_id = PCI_VENDOR_ID_HP;
+ pc->subsystem_id = PCI_DEVICE_ID_HP_DIVA_TOSCA1;
+ pc->revision = 3;
+ pc->class_id = PCI_CLASS_COMMUNICATION_SERIAL;
+ dc->vmsd = &vmstate_pci_diva;
+ device_class_set_props(dc, diva_serial_properties);
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+}
+
+static void diva_serial_init(Object *o)
+{
+ PCIDevice *dev = PCI_DEVICE(o);
+ PCIDivaSerialState *pms = DO_UPCAST(PCIDivaSerialState, dev, dev);
+ struct diva_info di = diva_get_diva_info(PCI_DEVICE_GET_CLASS(dev));
+ size_t i;
+
+ for (i = 0; i < di.nports; i++) {
+ object_initialize_child(o, "serial[*]", &pms->state[i], TYPE_SERIAL);
+ }
+}
+
+
+/* Diva-aux is the driver for portion 0 of the multifunction PCI device */
+
+struct DivaAuxState {
+ PCIDevice dev;
+ MemoryRegion mem;
+ qemu_irq irq;
+};
+
+#define TYPE_DIVA_AUX "diva-aux"
+OBJECT_DECLARE_SIMPLE_TYPE(DivaAuxState, DIVA_AUX)
+
+static void diva_aux_realize(PCIDevice *dev, Error **errp)
+{
+ DivaAuxState *pci = DO_UPCAST(DivaAuxState, dev, dev);
+
+ pci->dev.config[PCI_CLASS_PROG] = 0x02;
+ pci->dev.config[PCI_INTERRUPT_PIN] = 0x01;
+ pci->irq = pci_allocate_irq(&pci->dev);
+
+ memory_region_init(&pci->mem, OBJECT(pci), "mem", 16);
+ pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &pci->mem);
+}
+
+static void diva_aux_exit(PCIDevice *dev)
+{
+ DivaAuxState *pci = DO_UPCAST(DivaAuxState, dev, dev);
+ qemu_free_irq(pci->irq);
+}
+
+static void diva_aux_class_initfn(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass);
+ pc->realize = diva_aux_realize;
+ pc->exit = diva_aux_exit;
+ pc->vendor_id = PCI_VENDOR_ID_HP;
+ pc->device_id = PCI_DEVICE_ID_HP_DIVA_AUX;
+ pc->subsystem_vendor_id = PCI_VENDOR_ID_HP;
+ pc->subsystem_id = 0x1291;
+ pc->revision = 1;
+ pc->class_id = PCI_CLASS_COMMUNICATION_MULTISERIAL;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->user_creatable = false;
+}
+
+static void diva_aux_init(Object *o)
+{
+}
+
+static const TypeInfo diva_aux_info = {
+ .name = TYPE_DIVA_AUX,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(DivaAuxState),
+ .instance_init = diva_aux_init,
+ .class_init = diva_aux_class_initfn,
+ .interfaces = (const InterfaceInfo[]) {
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { },
+ },
+};
+
+
+
+static const TypeInfo diva_serial_pci_info = {
+ .name = "diva-gsp",
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(PCIDivaSerialState),
+ .instance_init = diva_serial_init,
+ .class_init = diva_serial_class_initfn,
+ .interfaces = (const InterfaceInfo[]) {
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { },
+ },
+};
+
+static void diva_pci_register_type(void)
+{
+ type_register_static(&diva_serial_pci_info);
+ type_register_static(&diva_aux_info);
+}
+
+type_init(diva_pci_register_type)
diff --git a/hw/char/escc.c b/hw/char/escc.c
index d450d70..afe4ca4 100644
--- a/hw/char/escc.c
+++ b/hw/char/escc.c
@@ -287,6 +287,7 @@ static void escc_reset_chn(ESCCChannelState *s)
s->rxint = s->txint = 0;
s->rxint_under_svc = s->txint_under_svc = 0;
s->e0_mode = s->led_mode = s->caps_lock_mode = s->num_lock_mode = 0;
+ s->sunmouse_dx = s->sunmouse_dy = s->sunmouse_buttons = 0;
clear_queue(s);
}
@@ -952,53 +953,96 @@ static void handle_kbd_command(ESCCChannelState *s, int val)
}
}
-static void sunmouse_event(void *opaque,
- int dx, int dy, int dz, int buttons_state)
+static void sunmouse_handle_event(DeviceState *dev, QemuConsole *src,
+ InputEvent *evt)
{
- ESCCChannelState *s = opaque;
- int ch;
+ ESCCChannelState *s = (ESCCChannelState *)dev;
+ InputMoveEvent *move;
+ InputBtnEvent *btn;
+ static const int bmap[INPUT_BUTTON__MAX] = {
+ [INPUT_BUTTON_LEFT] = 0x4,
+ [INPUT_BUTTON_MIDDLE] = 0x2,
+ [INPUT_BUTTON_RIGHT] = 0x1,
+ };
+
+ switch (evt->type) {
+ case INPUT_EVENT_KIND_REL:
+ move = evt->u.rel.data;
+ if (move->axis == INPUT_AXIS_X) {
+ s->sunmouse_dx += move->value;
+ } else if (move->axis == INPUT_AXIS_Y) {
+ s->sunmouse_dy -= move->value;
+ }
+ break;
- trace_escc_sunmouse_event(dx, dy, buttons_state);
- ch = 0x80 | 0x7; /* protocol start byte, no buttons pressed */
+ case INPUT_EVENT_KIND_BTN:
+ btn = evt->u.btn.data;
+ if (bmap[btn->button]) {
+ if (btn->down) {
+ s->sunmouse_buttons |= bmap[btn->button];
+ } else {
+ s->sunmouse_buttons &= ~bmap[btn->button];
+ }
+ /* Indicate we have a supported button event */
+ s->sunmouse_buttons |= 0x80;
+ }
+ break;
- if (buttons_state & MOUSE_EVENT_LBUTTON) {
- ch ^= 0x4;
- }
- if (buttons_state & MOUSE_EVENT_MBUTTON) {
- ch ^= 0x2;
+ default:
+ /* keep gcc happy */
+ break;
}
- if (buttons_state & MOUSE_EVENT_RBUTTON) {
- ch ^= 0x1;
+}
+
+static void sunmouse_sync(DeviceState *dev)
+{
+ ESCCChannelState *s = (ESCCChannelState *)dev;
+ int ch;
+
+ if (s->sunmouse_dx == 0 && s->sunmouse_dy == 0 &&
+ (s->sunmouse_buttons & 0x80) == 0) {
+ /* Nothing to do after button event filter */
+ return;
}
+ /* Clear our button event flag */
+ s->sunmouse_buttons &= ~0x80;
+ trace_escc_sunmouse_event(s->sunmouse_dx, s->sunmouse_dy,
+ s->sunmouse_buttons);
+ ch = 0x80 | 0x7; /* protocol start byte, no buttons pressed */
+ ch ^= s->sunmouse_buttons;
put_queue(s, ch);
- ch = dx;
-
+ ch = s->sunmouse_dx;
if (ch > 127) {
ch = 127;
} else if (ch < -127) {
ch = -127;
}
-
put_queue(s, ch & 0xff);
+ s->sunmouse_dx -= ch;
- ch = -dy;
-
+ ch = s->sunmouse_dy;
if (ch > 127) {
ch = 127;
} else if (ch < -127) {
ch = -127;
}
-
put_queue(s, ch & 0xff);
+ s->sunmouse_dy -= ch;
/* MSC protocol specifies two extra motion bytes */
-
put_queue(s, 0);
put_queue(s, 0);
}
+static const QemuInputHandler sunmouse_handler = {
+ .name = "QEMU Sun Mouse",
+ .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL,
+ .event = sunmouse_handle_event,
+ .sync = sunmouse_sync,
+};
+
static void escc_init1(Object *obj)
{
ESCCState *s = ESCC(obj);
@@ -1036,8 +1080,8 @@ static void escc_realize(DeviceState *dev, Error **errp)
}
if (s->chn[0].type == escc_mouse) {
- qemu_add_mouse_event_handler(sunmouse_event, &s->chn[0], 0,
- "QEMU Sun Mouse");
+ s->chn[0].hs = qemu_input_handler_register((DeviceState *)(&s->chn[0]),
+ &sunmouse_handler);
}
if (s->chn[1].type == escc_kbd) {
s->chn[1].hs = qemu_input_handler_register((DeviceState *)(&s->chn[1]),
@@ -1045,7 +1089,7 @@ static void escc_realize(DeviceState *dev, Error **errp)
}
}
-static Property escc_properties[] = {
+static const Property escc_properties[] = {
DEFINE_PROP_UINT32("frequency", ESCCState, frequency, 0),
DEFINE_PROP_UINT32("it_shift", ESCCState, it_shift, 0),
DEFINE_PROP_BOOL("bit_swap", ESCCState, bit_swap, false),
@@ -1055,14 +1099,13 @@ static Property escc_properties[] = {
DEFINE_PROP_CHR("chrB", ESCCState, chn[0].chr),
DEFINE_PROP_CHR("chrA", ESCCState, chn[1].chr),
DEFINE_PROP_STRING("chnA-sunkbd-layout", ESCCState, chn[1].sunkbd_layout),
- DEFINE_PROP_END_OF_LIST(),
};
-static void escc_class_init(ObjectClass *klass, void *data)
+static void escc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = escc_reset;
+ device_class_set_legacy_reset(dc, escc_reset);
dc->realize = escc_realize;
dc->vmsd = &vmstate_escc;
device_class_set_props(dc, escc_properties);
diff --git a/hw/char/etraxfs_ser.c b/hw/char/etraxfs_ser.c
deleted file mode 100644
index 8d6422d..0000000
--- a/hw/char/etraxfs_ser.c
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * QEMU ETRAX System Emulator
- *
- * Copyright (c) 2007 Edgar E. Iglesias, Axis Communications AB.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "hw/irq.h"
-#include "hw/qdev-properties.h"
-#include "hw/qdev-properties-system.h"
-#include "hw/sysbus.h"
-#include "chardev/char-fe.h"
-#include "qemu/log.h"
-#include "qemu/module.h"
-#include "qom/object.h"
-
-#define D(x)
-
-#define RW_TR_CTRL (0x00 / 4)
-#define RW_TR_DMA_EN (0x04 / 4)
-#define RW_REC_CTRL (0x08 / 4)
-#define RW_DOUT (0x1c / 4)
-#define RS_STAT_DIN (0x20 / 4)
-#define R_STAT_DIN (0x24 / 4)
-#define RW_INTR_MASK (0x2c / 4)
-#define RW_ACK_INTR (0x30 / 4)
-#define R_INTR (0x34 / 4)
-#define R_MASKED_INTR (0x38 / 4)
-#define R_MAX (0x3c / 4)
-
-#define STAT_DAV 16
-#define STAT_TR_IDLE 22
-#define STAT_TR_RDY 24
-
-#define TYPE_ETRAX_FS_SERIAL "etraxfs-serial"
-typedef struct ETRAXSerial ETRAXSerial;
-DECLARE_INSTANCE_CHECKER(ETRAXSerial, ETRAX_SERIAL,
- TYPE_ETRAX_FS_SERIAL)
-
-struct ETRAXSerial {
- SysBusDevice parent_obj;
-
- MemoryRegion mmio;
- CharBackend chr;
- qemu_irq irq;
-
- int pending_tx;
-
- uint8_t rx_fifo[16];
- unsigned int rx_fifo_pos;
- unsigned int rx_fifo_len;
-
- /* Control registers. */
- uint32_t regs[R_MAX];
-};
-
-static void ser_update_irq(ETRAXSerial *s)
-{
-
- if (s->rx_fifo_len) {
- s->regs[R_INTR] |= 8;
- } else {
- s->regs[R_INTR] &= ~8;
- }
-
- s->regs[R_MASKED_INTR] = s->regs[R_INTR] & s->regs[RW_INTR_MASK];
- qemu_set_irq(s->irq, !!s->regs[R_MASKED_INTR]);
-}
-
-static uint64_t
-ser_read(void *opaque, hwaddr addr, unsigned int size)
-{
- ETRAXSerial *s = opaque;
- uint32_t r = 0;
-
- addr >>= 2;
- switch (addr)
- {
- case R_STAT_DIN:
- r = s->rx_fifo[(s->rx_fifo_pos - s->rx_fifo_len) & 15];
- if (s->rx_fifo_len) {
- r |= 1 << STAT_DAV;
- }
- r |= 1 << STAT_TR_RDY;
- r |= 1 << STAT_TR_IDLE;
- break;
- case RS_STAT_DIN:
- r = s->rx_fifo[(s->rx_fifo_pos - s->rx_fifo_len) & 15];
- if (s->rx_fifo_len) {
- r |= 1 << STAT_DAV;
- s->rx_fifo_len--;
- }
- r |= 1 << STAT_TR_RDY;
- r |= 1 << STAT_TR_IDLE;
- break;
- default:
- r = s->regs[addr];
- D(qemu_log("%s " HWADDR_FMT_plx "=%x\n", __func__, addr, r));
- break;
- }
- return r;
-}
-
-static void
-ser_write(void *opaque, hwaddr addr,
- uint64_t val64, unsigned int size)
-{
- ETRAXSerial *s = opaque;
- uint32_t value = val64;
- unsigned char ch = val64;
-
- D(qemu_log("%s " HWADDR_FMT_plx "=%x\n", __func__, addr, value));
- addr >>= 2;
- switch (addr)
- {
- case RW_DOUT:
- /* XXX this blocks entire thread. Rewrite to use
- * qemu_chr_fe_write and background I/O callbacks */
- qemu_chr_fe_write_all(&s->chr, &ch, 1);
- s->regs[R_INTR] |= 3;
- s->pending_tx = 1;
- s->regs[addr] = value;
- break;
- case RW_ACK_INTR:
- if (s->pending_tx) {
- value &= ~1;
- s->pending_tx = 0;
- D(qemu_log("fixedup value=%x r_intr=%x\n",
- value, s->regs[R_INTR]));
- }
- s->regs[addr] = value;
- s->regs[R_INTR] &= ~value;
- D(printf("r_intr=%x\n", s->regs[R_INTR]));
- break;
- default:
- s->regs[addr] = value;
- break;
- }
- ser_update_irq(s);
-}
-
-static const MemoryRegionOps ser_ops = {
- .read = ser_read,
- .write = ser_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid = {
- .min_access_size = 4,
- .max_access_size = 4
- }
-};
-
-static Property etraxfs_ser_properties[] = {
- DEFINE_PROP_CHR("chardev", ETRAXSerial, chr),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void serial_receive(void *opaque, const uint8_t *buf, int size)
-{
- ETRAXSerial *s = opaque;
- int i;
-
- /* Got a byte. */
- if (s->rx_fifo_len >= 16) {
- D(qemu_log("WARNING: UART dropped char.\n"));
- return;
- }
-
- for (i = 0; i < size; i++) {
- s->rx_fifo[s->rx_fifo_pos] = buf[i];
- s->rx_fifo_pos++;
- s->rx_fifo_pos &= 15;
- s->rx_fifo_len++;
- }
-
- ser_update_irq(s);
-}
-
-static int serial_can_receive(void *opaque)
-{
- ETRAXSerial *s = opaque;
-
- /* Is the receiver enabled? */
- if (!(s->regs[RW_REC_CTRL] & (1 << 3))) {
- return 0;
- }
-
- return sizeof(s->rx_fifo) - s->rx_fifo_len;
-}
-
-static void serial_event(void *opaque, QEMUChrEvent event)
-{
-
-}
-
-static void etraxfs_ser_reset(DeviceState *d)
-{
- ETRAXSerial *s = ETRAX_SERIAL(d);
-
- /* transmitter begins ready and idle. */
- s->regs[RS_STAT_DIN] |= (1 << STAT_TR_RDY);
- s->regs[RS_STAT_DIN] |= (1 << STAT_TR_IDLE);
-
- s->regs[RW_REC_CTRL] = 0x10000;
-
-}
-
-static void etraxfs_ser_init(Object *obj)
-{
- ETRAXSerial *s = ETRAX_SERIAL(obj);
- SysBusDevice *dev = SYS_BUS_DEVICE(obj);
-
- sysbus_init_irq(dev, &s->irq);
- memory_region_init_io(&s->mmio, obj, &ser_ops, s,
- "etraxfs-serial", R_MAX * 4);
- sysbus_init_mmio(dev, &s->mmio);
-}
-
-static void etraxfs_ser_realize(DeviceState *dev, Error **errp)
-{
- ETRAXSerial *s = ETRAX_SERIAL(dev);
-
- qemu_chr_fe_set_handlers(&s->chr,
- serial_can_receive, serial_receive,
- serial_event, NULL, s, NULL, true);
-}
-
-static void etraxfs_ser_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->reset = etraxfs_ser_reset;
- device_class_set_props(dc, etraxfs_ser_properties);
- dc->realize = etraxfs_ser_realize;
-}
-
-static const TypeInfo etraxfs_ser_info = {
- .name = TYPE_ETRAX_FS_SERIAL,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(ETRAXSerial),
- .instance_init = etraxfs_ser_init,
- .class_init = etraxfs_ser_class_init,
-};
-
-static void etraxfs_serial_register_types(void)
-{
- type_register_static(&etraxfs_ser_info);
-}
-
-type_init(etraxfs_serial_register_types)
diff --git a/hw/char/exynos4210_uart.c b/hw/char/exynos4210_uart.c
index 8cdd42e..6521b4c 100644
--- a/hw/char/exynos4210_uart.c
+++ b/hw/char/exynos4210_uart.c
@@ -704,20 +704,19 @@ static void exynos4210_uart_realize(DeviceState *dev, Error **errp)
NULL, s, NULL, true);
}
-static Property exynos4210_uart_properties[] = {
+static const Property exynos4210_uart_properties[] = {
DEFINE_PROP_CHR("chardev", Exynos4210UartState, chr),
DEFINE_PROP_UINT32("channel", Exynos4210UartState, channel, 0),
DEFINE_PROP_UINT32("rx-size", Exynos4210UartState, rx.size, 16),
DEFINE_PROP_UINT32("tx-size", Exynos4210UartState, tx.size, 16),
- DEFINE_PROP_END_OF_LIST(),
};
-static void exynos4210_uart_class_init(ObjectClass *klass, void *data)
+static void exynos4210_uart_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = exynos4210_uart_realize;
- dc->reset = exynos4210_uart_reset;
+ device_class_set_legacy_reset(dc, exynos4210_uart_reset);
device_class_set_props(dc, exynos4210_uart_properties);
dc->vmsd = &vmstate_exynos4210_uart;
}
diff --git a/hw/char/goldfish_tty.c b/hw/char/goldfish_tty.c
index f8ff043..a37408a 100644
--- a/hw/char/goldfish_tty.c
+++ b/hw/char/goldfish_tty.c
@@ -15,7 +15,8 @@
#include "chardev/char-fe.h"
#include "qemu/log.h"
#include "trace.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
+#include "system/dma.h"
#include "hw/char/goldfish_tty.h"
#define GOLDFISH_TTY_VERSION 1
@@ -69,7 +70,6 @@ static uint64_t goldfish_tty_read(void *opaque, hwaddr addr,
static void goldfish_tty_cmd(GoldfishTTYState *s, uint32_t cmd)
{
uint32_t to_copy;
- uint8_t *buf;
uint8_t data_out[GOLFISH_TTY_BUFFER_SIZE];
int len;
uint64_t ptr;
@@ -97,8 +97,8 @@ static void goldfish_tty_cmd(GoldfishTTYState *s, uint32_t cmd)
while (len) {
to_copy = MIN(GOLFISH_TTY_BUFFER_SIZE, len);
- address_space_rw(&address_space_memory, ptr,
- MEMTXATTRS_UNSPECIFIED, data_out, to_copy, 0);
+ dma_memory_read_relaxed(&address_space_memory, ptr,
+ data_out, to_copy);
qemu_chr_fe_write_all(&s->chr, data_out, to_copy);
len -= to_copy;
@@ -109,9 +109,9 @@ static void goldfish_tty_cmd(GoldfishTTYState *s, uint32_t cmd)
len = s->data_len;
ptr = s->data_ptr;
while (len && !fifo8_is_empty(&s->rx_fifo)) {
- buf = (uint8_t *)fifo8_pop_buf(&s->rx_fifo, len, &to_copy);
- address_space_rw(&address_space_memory, ptr,
- MEMTXATTRS_UNSPECIFIED, buf, to_copy, 1);
+ const uint8_t *buf = fifo8_pop_bufptr(&s->rx_fifo, len, &to_copy);
+
+ dma_memory_write_relaxed(&address_space_memory, ptr, buf, to_copy);
len -= to_copy;
ptr += to_copy;
@@ -241,9 +241,8 @@ static const VMStateDescription vmstate_goldfish_tty = {
}
};
-static Property goldfish_tty_properties[] = {
+static const Property goldfish_tty_properties[] = {
DEFINE_PROP_CHR("chardev", GoldfishTTYState, chr),
- DEFINE_PROP_END_OF_LIST(),
};
static void goldfish_tty_instance_init(Object *obj)
@@ -257,12 +256,12 @@ static void goldfish_tty_instance_init(Object *obj)
sysbus_init_irq(dev, &s->irq);
}
-static void goldfish_tty_class_init(ObjectClass *oc, void *data)
+static void goldfish_tty_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
device_class_set_props(dc, goldfish_tty_properties);
- dc->reset = goldfish_tty_reset;
+ device_class_set_legacy_reset(dc, goldfish_tty_reset);
dc->realize = goldfish_tty_realize;
dc->unrealize = goldfish_tty_unrealize;
dc->vmsd = &vmstate_goldfish_tty;
diff --git a/hw/char/grlib_apbuart.c b/hw/char/grlib_apbuart.c
index 515b65b..81c26e3 100644
--- a/hw/char/grlib_apbuart.c
+++ b/hw/char/grlib_apbuart.c
@@ -277,17 +277,16 @@ static void grlib_apbuart_reset(DeviceState *d)
uart->current = 0;
}
-static Property grlib_apbuart_properties[] = {
+static const Property grlib_apbuart_properties[] = {
DEFINE_PROP_CHR("chrdev", UART, chr),
- DEFINE_PROP_END_OF_LIST(),
};
-static void grlib_apbuart_class_init(ObjectClass *klass, void *data)
+static void grlib_apbuart_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = grlib_apbuart_realize;
- dc->reset = grlib_apbuart_reset;
+ device_class_set_legacy_reset(dc, grlib_apbuart_reset);
device_class_set_props(dc, grlib_apbuart_properties);
}
diff --git a/hw/char/ibex_uart.c b/hw/char/ibex_uart.c
index 63aae6d..d6f0d18 100644
--- a/hw/char/ibex_uart.c
+++ b/hw/char/ibex_uart.c
@@ -508,9 +508,8 @@ static const VMStateDescription vmstate_ibex_uart = {
}
};
-static Property ibex_uart_properties[] = {
+static const Property ibex_uart_properties[] = {
DEFINE_PROP_CHR("chardev", IbexUartState, chr),
- DEFINE_PROP_END_OF_LIST(),
};
static void ibex_uart_init(Object *obj)
@@ -543,11 +542,11 @@ static void ibex_uart_realize(DeviceState *dev, Error **errp)
s, NULL, true);
}
-static void ibex_uart_class_init(ObjectClass *klass, void *data)
+static void ibex_uart_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = ibex_uart_reset;
+ device_class_set_legacy_reset(dc, ibex_uart_reset);
dc->realize = ibex_uart_realize;
dc->vmsd = &vmstate_ibex_uart;
device_class_set_props(dc, ibex_uart_properties);
diff --git a/hw/char/imx_serial.c b/hw/char/imx_serial.c
index ba37be6..509b014 100644
--- a/hw/char/imx_serial.c
+++ b/hw/char/imx_serial.c
@@ -27,6 +27,7 @@
#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/fifo32.h"
+#include "trace.h"
#ifndef DEBUG_IMX_UART
#define DEBUG_IMX_UART 0
@@ -159,6 +160,7 @@ static void imx_serial_reset(IMXSerialState *s)
s->ucr3 = 0x700;
s->ubmr = 0;
s->ubrc = 4;
+ s->ufcr = BIT(11) | BIT(0);
fifo32_reset(&s->rx_fifo);
timer_del(&s->ageing_timer);
@@ -184,10 +186,10 @@ static uint64_t imx_serial_read(void *opaque, hwaddr offset,
unsigned size)
{
IMXSerialState *s = (IMXSerialState *)opaque;
+ Chardev *chr = qemu_chr_fe_get_driver(&s->chr);
uint32_t c, rx_used;
uint8_t rxtl = s->ufcr & TL_MASK;
-
- DPRINTF("read(offset=0x%" HWADDR_PRIx ")\n", offset);
+ uint64_t value;
switch (offset >> 2) {
case 0x0: /* URXD */
@@ -208,49 +210,67 @@ static uint64_t imx_serial_read(void *opaque, hwaddr offset,
imx_serial_rx_fifo_ageing_timer_restart(s);
qemu_chr_fe_accept_input(&s->chr);
}
- return c;
+ value = c;
+ break;
case 0x20: /* UCR1 */
- return s->ucr1;
+ value = s->ucr1;
+ break;
case 0x21: /* UCR2 */
- return s->ucr2;
+ value = s->ucr2;
+ break;
case 0x25: /* USR1 */
- return s->usr1;
+ value = s->usr1;
+ break;
case 0x26: /* USR2 */
- return s->usr2;
+ value = s->usr2;
+ break;
case 0x2A: /* BRM Modulator */
- return s->ubmr;
+ value = s->ubmr;
+ break;
case 0x2B: /* Baud Rate Count */
- return s->ubrc;
+ value = s->ubrc;
+ break;
case 0x2d: /* Test register */
- return s->uts1;
+ value = s->uts1;
+ break;
case 0x24: /* UFCR */
- return s->ufcr;
+ value = s->ufcr;
+ break;
case 0x2c:
- return s->onems;
+ value = s->onems;
+ break;
case 0x22: /* UCR3 */
- return s->ucr3;
+ value = s->ucr3;
+ break;
case 0x23: /* UCR4 */
- return s->ucr4;
+ value = s->ucr4;
+ break;
case 0x29: /* BRM Incremental */
- return 0x0; /* TODO */
+ value = 0x0; /* TODO */
+ break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
HWADDR_PRIx "\n", TYPE_IMX_SERIAL, __func__, offset);
- return 0;
+ value = 0;
+ break;
}
+
+ trace_imx_serial_read(chr ? chr->label : "NODEV", offset, value);
+
+ return value;
}
static void imx_serial_write(void *opaque, hwaddr offset,
@@ -260,8 +280,7 @@ static void imx_serial_write(void *opaque, hwaddr offset,
Chardev *chr = qemu_chr_fe_get_driver(&s->chr);
unsigned char ch;
- DPRINTF("write(offset=0x%" HWADDR_PRIx ", value = 0x%x) to %s\n",
- offset, (unsigned int)value, chr ? chr->label : "NODEV");
+ trace_imx_serial_write(chr ? chr->label : "NODEV", offset, value);
switch (offset >> 2) {
case 0x10: /* UTXD */
@@ -367,27 +386,30 @@ static void imx_serial_write(void *opaque, hwaddr offset,
static int imx_can_receive(void *opaque)
{
IMXSerialState *s = (IMXSerialState *)opaque;
- return s->ucr2 & UCR2_RXEN && fifo32_num_used(&s->rx_fifo) < FIFO_SIZE;
+
+ return s->ucr2 & UCR2_RXEN ? fifo32_num_free(&s->rx_fifo) : 0;
}
static void imx_put_data(void *opaque, uint32_t value)
{
IMXSerialState *s = (IMXSerialState *)opaque;
+ Chardev *chr = qemu_chr_fe_get_driver(&s->chr);
uint8_t rxtl = s->ufcr & TL_MASK;
- DPRINTF("received char\n");
+ trace_imx_serial_put_data(chr ? chr->label : "NODEV", value);
+
imx_serial_rx_fifo_push(s, value);
if (fifo32_num_used(&s->rx_fifo) >= rxtl) {
s->usr1 |= USR1_RRDY;
}
-
- imx_serial_rx_fifo_ageing_timer_restart(s);
-
s->usr2 |= USR2_RDR;
s->uts1 &= ~UTS1_RXEMPTY;
if (value & URXD_BRK) {
s->usr2 |= USR2_BRCD;
}
+
+ imx_serial_rx_fifo_ageing_timer_restart(s);
+
imx_update(s);
}
@@ -396,7 +418,10 @@ static void imx_receive(void *opaque, const uint8_t *buf, int size)
IMXSerialState *s = (IMXSerialState *)opaque;
s->usr2 |= USR2_WAKE;
- imx_put_data(opaque, *buf);
+
+ for (int i = 0; i < size; i++) {
+ imx_put_data(opaque, buf[i]);
+ }
}
static void imx_event(void *opaque, QEMUChrEvent event)
@@ -438,18 +463,17 @@ static void imx_serial_init(Object *obj)
sysbus_init_irq(sbd, &s->irq);
}
-static Property imx_serial_properties[] = {
+static const Property imx_serial_properties[] = {
DEFINE_PROP_CHR("chardev", IMXSerialState, chr),
- DEFINE_PROP_END_OF_LIST(),
};
-static void imx_serial_class_init(ObjectClass *klass, void *data)
+static void imx_serial_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = imx_serial_realize;
dc->vmsd = &vmstate_imx_serial;
- dc->reset = imx_serial_reset_at_boot;
+ device_class_set_legacy_reset(dc, imx_serial_reset_at_boot);
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
dc->desc = "i.MX series UART";
device_class_set_props(dc, imx_serial_properties);
diff --git a/hw/char/ipoctal232.c b/hw/char/ipoctal232.c
index 64be522..752c6c8 100644
--- a/hw/char/ipoctal232.c
+++ b/hw/char/ipoctal232.c
@@ -184,9 +184,9 @@ static void update_irq(IPOctalState *dev, unsigned block)
unsigned intno = block / 2;
if ((blk0->isr & blk0->imr) || (blk1->isr & blk1->imr)) {
- qemu_irq_raise(idev->irq[intno]);
+ qemu_irq_raise(&idev->irq[intno]);
} else {
- qemu_irq_lower(idev->irq[intno]);
+ qemu_irq_lower(&idev->irq[intno]);
}
}
@@ -558,7 +558,7 @@ static void ipoctal_realize(DeviceState *dev, Error **errp)
}
}
-static Property ipoctal_properties[] = {
+static const Property ipoctal_properties[] = {
DEFINE_PROP_CHR("chardev0", IPOctalState, ch[0].dev),
DEFINE_PROP_CHR("chardev1", IPOctalState, ch[1].dev),
DEFINE_PROP_CHR("chardev2", IPOctalState, ch[2].dev),
@@ -567,10 +567,9 @@ static Property ipoctal_properties[] = {
DEFINE_PROP_CHR("chardev5", IPOctalState, ch[5].dev),
DEFINE_PROP_CHR("chardev6", IPOctalState, ch[6].dev),
DEFINE_PROP_CHR("chardev7", IPOctalState, ch[7].dev),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ipoctal_class_init(ObjectClass *klass, void *data)
+static void ipoctal_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
IPackDeviceClass *ic = IPACK_DEVICE_CLASS(klass);
diff --git a/hw/char/mcf_uart.c b/hw/char/mcf_uart.c
index f9cbc9b..87bfcbe 100644
--- a/hw/char/mcf_uart.c
+++ b/hw/char/mcf_uart.c
@@ -17,6 +17,8 @@
#include "chardev/char-fe.h"
#include "qom/object.h"
+#define FIFO_DEPTH 4
+
struct mcf_uart_state {
SysBusDevice parent_obj;
@@ -27,7 +29,7 @@ struct mcf_uart_state {
uint8_t imr;
uint8_t bg1;
uint8_t bg2;
- uint8_t fifo[4];
+ uint8_t fifo[FIFO_DEPTH];
uint8_t tb;
int current_mr;
int fifo_len;
@@ -247,14 +249,16 @@ static void mcf_uart_reset(DeviceState *dev)
static void mcf_uart_push_byte(mcf_uart_state *s, uint8_t data)
{
/* Break events overwrite the last byte if the fifo is full. */
- if (s->fifo_len == 4)
+ if (s->fifo_len == FIFO_DEPTH) {
s->fifo_len--;
+ }
s->fifo[s->fifo_len] = data;
s->fifo_len++;
s->sr |= MCF_UART_RxRDY;
- if (s->fifo_len == 4)
+ if (s->fifo_len == FIFO_DEPTH) {
s->sr |= MCF_UART_FFULL;
+ }
mcf_uart_update(s);
}
@@ -277,14 +281,16 @@ static int mcf_uart_can_receive(void *opaque)
{
mcf_uart_state *s = (mcf_uart_state *)opaque;
- return s->rx_enabled && (s->sr & MCF_UART_FFULL) == 0;
+ return s->rx_enabled ? FIFO_DEPTH - s->fifo_len : 0;
}
static void mcf_uart_receive(void *opaque, const uint8_t *buf, int size)
{
mcf_uart_state *s = (mcf_uart_state *)opaque;
- mcf_uart_push_byte(s, buf[0]);
+ for (int i = 0; i < size; i++) {
+ mcf_uart_push_byte(s, buf[i]);
+ }
}
static const MemoryRegionOps mcf_uart_ops = {
@@ -312,17 +318,16 @@ static void mcf_uart_realize(DeviceState *dev, Error **errp)
mcf_uart_event, NULL, s, NULL, true);
}
-static Property mcf_uart_properties[] = {
+static const Property mcf_uart_properties[] = {
DEFINE_PROP_CHR("chardev", mcf_uart_state, chr),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mcf_uart_class_init(ObjectClass *oc, void *data)
+static void mcf_uart_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = mcf_uart_realize;
- dc->reset = mcf_uart_reset;
+ device_class_set_legacy_reset(dc, mcf_uart_reset);
device_class_set_props(dc, mcf_uart_properties);
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
}
diff --git a/hw/char/mchp_pfsoc_mmuart.c b/hw/char/mchp_pfsoc_mmuart.c
index e7908bb..6149f9d 100644
--- a/hw/char/mchp_pfsoc_mmuart.c
+++ b/hw/char/mchp_pfsoc_mmuart.c
@@ -121,12 +121,12 @@ static const VMStateDescription mchp_pfsoc_mmuart_vmstate = {
}
};
-static void mchp_pfsoc_mmuart_class_init(ObjectClass *oc, void *data)
+static void mchp_pfsoc_mmuart_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = mchp_pfsoc_mmuart_realize;
- dc->reset = mchp_pfsoc_mmuart_reset;
+ device_class_set_legacy_reset(dc, mchp_pfsoc_mmuart_reset);
dc->vmsd = &mchp_pfsoc_mmuart_vmstate;
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
}
diff --git a/hw/char/meson.build b/hw/char/meson.build
index e5b13b6..4e439da 100644
--- a/hw/char/meson.build
+++ b/hw/char/meson.build
@@ -1,25 +1,26 @@
system_ss.add(when: 'CONFIG_CADENCE', if_true: files('cadence_uart.c'))
system_ss.add(when: 'CONFIG_CMSDK_APB_UART', if_true: files('cmsdk-apb-uart.c'))
system_ss.add(when: 'CONFIG_ESCC', if_true: files('escc.c'))
-system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_ser.c'))
system_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_apbuart.c'))
system_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_uart.c'))
system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_serial.c'))
-system_ss.add(when: 'CONFIG_IPACK', if_true: files('ipoctal232.c'))
+system_ss.add(when: 'CONFIG_IP_OCTAL_232', if_true: files('ipoctal232.c'))
system_ss.add(when: 'CONFIG_ISA_BUS', if_true: files('parallel-isa.c'))
system_ss.add(when: 'CONFIG_ISA_DEBUG', if_true: files('debugcon.c'))
system_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_uart.c'))
system_ss.add(when: 'CONFIG_PARALLEL', if_true: files('parallel.c'))
-system_ss.add(when: 'CONFIG_PL011', if_true: files('pl011.c'))
+system_ss.add(when: 'CONFIG_PL011_C', if_true: files('pl011.c'))
system_ss.add(when: 'CONFIG_SCLPCONSOLE', if_true: files('sclpconsole.c', 'sclpconsole-lm.c'))
system_ss.add(when: 'CONFIG_SERIAL', if_true: files('serial.c'))
system_ss.add(when: 'CONFIG_SERIAL_ISA', if_true: files('serial-isa.c'))
+system_ss.add(when: 'CONFIG_SERIAL_MM', if_true: files('serial-mm.c'))
system_ss.add(when: 'CONFIG_SERIAL_PCI', if_true: files('serial-pci.c'))
system_ss.add(when: 'CONFIG_SERIAL_PCI_MULTI', if_true: files('serial-pci-multi.c'))
system_ss.add(when: 'CONFIG_SHAKTI_UART', if_true: files('shakti_uart.c'))
system_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-console.c'))
system_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen_console.c'))
system_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_uartlite.c'))
+system_ss.add(when: 'CONFIG_DIVA_GSP', if_true: files('diva-gsp.c'))
system_ss.add(when: 'CONFIG_AVR_USART', if_true: files('avr_usart.c'))
system_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_uart.c'))
diff --git a/hw/char/nrf51_uart.c b/hw/char/nrf51_uart.c
index c2cd6bb..41d4234 100644
--- a/hw/char/nrf51_uart.c
+++ b/hw/char/nrf51_uart.c
@@ -304,16 +304,15 @@ static const VMStateDescription nrf51_uart_vmstate = {
}
};
-static Property nrf51_uart_properties[] = {
+static const Property nrf51_uart_properties[] = {
DEFINE_PROP_CHR("chardev", NRF51UARTState, chr),
- DEFINE_PROP_END_OF_LIST(),
};
-static void nrf51_uart_class_init(ObjectClass *klass, void *data)
+static void nrf51_uart_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = nrf51_uart_reset;
+ device_class_set_legacy_reset(dc, nrf51_uart_reset);
dc->realize = nrf51_uart_realize;
device_class_set_props(dc, nrf51_uart_properties);
dc->vmsd = &nrf51_uart_vmstate;
diff --git a/hw/char/omap_uart.c b/hw/char/omap_uart.c
index c2ef4c1..8cbf6ce 100644
--- a/hw/char/omap_uart.c
+++ b/hw/char/omap_uart.c
@@ -20,15 +20,14 @@
#include "qemu/osdep.h"
#include "chardev/char.h"
#include "hw/arm/omap.h"
-#include "hw/char/serial.h"
-#include "exec/address-spaces.h"
+#include "hw/char/serial-mm.h"
+#include "system/address-spaces.h"
/* UARTs */
struct omap_uart_s {
MemoryRegion iomem;
hwaddr base;
SerialMM *serial; /* TODO */
- struct omap_target_agent_s *ta;
omap_clk fclk;
qemu_irq irq;
@@ -36,8 +35,6 @@ struct omap_uart_s {
uint8_t syscontrol;
uint8_t wkup;
uint8_t cfps;
- uint8_t mdr[2];
- uint8_t scr;
uint8_t clksel;
};
@@ -66,113 +63,3 @@ struct omap_uart_s *omap_uart_init(hwaddr base,
DEVICE_NATIVE_ENDIAN);
return s;
}
-
-static uint64_t omap_uart_read(void *opaque, hwaddr addr, unsigned size)
-{
- struct omap_uart_s *s = opaque;
-
- if (size == 4) {
- return omap_badwidth_read8(opaque, addr);
- }
-
- switch (addr) {
- case 0x20: /* MDR1 */
- return s->mdr[0];
- case 0x24: /* MDR2 */
- return s->mdr[1];
- case 0x40: /* SCR */
- return s->scr;
- case 0x44: /* SSR */
- return 0x0;
- case 0x48: /* EBLR (OMAP2) */
- return s->eblr;
- case 0x4C: /* OSC_12M_SEL (OMAP1) */
- return s->clksel;
- case 0x50: /* MVR */
- return 0x30;
- case 0x54: /* SYSC (OMAP2) */
- return s->syscontrol;
- case 0x58: /* SYSS (OMAP2) */
- return 1;
- case 0x5c: /* WER (OMAP2) */
- return s->wkup;
- case 0x60: /* CFPS (OMAP2) */
- return s->cfps;
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_uart_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_uart_s *s = opaque;
-
- if (size == 4) {
- omap_badwidth_write8(opaque, addr, value);
- return;
- }
-
- switch (addr) {
- case 0x20: /* MDR1 */
- s->mdr[0] = value & 0x7f;
- break;
- case 0x24: /* MDR2 */
- s->mdr[1] = value & 0xff;
- break;
- case 0x40: /* SCR */
- s->scr = value & 0xff;
- break;
- case 0x48: /* EBLR (OMAP2) */
- s->eblr = value & 0xff;
- break;
- case 0x4C: /* OSC_12M_SEL (OMAP1) */
- s->clksel = value & 1;
- break;
- case 0x44: /* SSR */
- case 0x50: /* MVR */
- case 0x58: /* SYSS (OMAP2) */
- OMAP_RO_REG(addr);
- break;
- case 0x54: /* SYSC (OMAP2) */
- s->syscontrol = value & 0x1d;
- if (value & 2) {
- omap_uart_reset(s);
- }
- break;
- case 0x5c: /* WER (OMAP2) */
- s->wkup = value & 0x7f;
- break;
- case 0x60: /* CFPS (OMAP2) */
- s->cfps = value & 0xff;
- break;
- default:
- OMAP_BAD_REG(addr);
- }
-}
-
-static const MemoryRegionOps omap_uart_ops = {
- .read = omap_uart_read,
- .write = omap_uart_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-struct omap_uart_s *omap2_uart_init(MemoryRegion *sysmem,
- struct omap_target_agent_s *ta,
- qemu_irq irq, omap_clk fclk, omap_clk iclk,
- qemu_irq txdma, qemu_irq rxdma,
- const char *label, Chardev *chr)
-{
- hwaddr base = omap_l4_attach(ta, 0, NULL);
- struct omap_uart_s *s = omap_uart_init(base, irq,
- fclk, iclk, txdma, rxdma, label, chr);
-
- memory_region_init_io(&s->iomem, NULL, &omap_uart_ops, s, "omap.uart", 0x100);
-
- s->ta = ta;
-
- memory_region_add_subregion(sysmem, base + 0x20, &s->iomem);
-
- return s;
-}
diff --git a/hw/char/parallel-isa.c b/hw/char/parallel-isa.c
index a5ce6ee..b6dfb6c 100644
--- a/hw/char/parallel-isa.c
+++ b/hw/char/parallel-isa.c
@@ -10,7 +10,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/isa/isa.h"
#include "hw/qdev-properties.h"
#include "hw/char/parallel-isa.h"
diff --git a/hw/char/parallel.c b/hw/char/parallel.c
index c394635..8732e4e 100644
--- a/hw/char/parallel.c
+++ b/hw/char/parallel.c
@@ -33,8 +33,8 @@
#include "migration/vmstate.h"
#include "hw/char/parallel-isa.h"
#include "hw/char/parallel.h"
-#include "sysemu/reset.h"
-#include "sysemu/sysemu.h"
+#include "system/reset.h"
+#include "system/system.h"
#include "trace.h"
#include "qom/object.h"
@@ -603,15 +603,14 @@ bool parallel_mm_init(MemoryRegion *address_space,
return true;
}
-static Property parallel_isa_properties[] = {
+static const Property parallel_isa_properties[] = {
DEFINE_PROP_UINT32("index", ISAParallelState, index, -1),
DEFINE_PROP_UINT32("iobase", ISAParallelState, iobase, -1),
DEFINE_PROP_UINT32("irq", ISAParallelState, isairq, 7),
DEFINE_PROP_CHR("chardev", ISAParallelState, state.chr),
- DEFINE_PROP_END_OF_LIST(),
};
-static void parallel_isa_class_initfn(ObjectClass *klass, void *data)
+static void parallel_isa_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass);
@@ -628,7 +627,7 @@ static const TypeInfo parallel_isa_info = {
.parent = TYPE_ISA_DEVICE,
.instance_size = sizeof(ISAParallelState),
.class_init = parallel_isa_class_initfn,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_ACPI_DEV_AML_IF },
{ },
},
diff --git a/hw/char/pl011.c b/hw/char/pl011.c
index f8078aa..01335d9 100644
--- a/hw/char/pl011.c
+++ b/hw/char/pl011.c
@@ -85,13 +85,16 @@ DeviceState *pl011_create(hwaddr addr, qemu_irq irq, Chardev *chr)
#define CR_OUT1 (1 << 12)
#define CR_RTS (1 << 11)
#define CR_DTR (1 << 10)
+#define CR_RXE (1 << 9)
+#define CR_TXE (1 << 8)
#define CR_LBE (1 << 7)
+#define CR_UARTEN (1 << 0)
/* Integer Baud Rate Divider, UARTIBRD */
-#define IBRD_MASK 0x3f
+#define IBRD_MASK 0xffff
/* Fractional Baud Rate Divider, UARTFBRD */
-#define FBRD_MASK 0xffff
+#define FBRD_MASK 0x3f
static const unsigned char pl011_id_arm[8] =
{ 0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1 };
@@ -138,6 +141,11 @@ static void pl011_update(PL011State *s)
}
}
+static bool pl011_loopback_enabled(PL011State *s)
+{
+ return !!(s->cr & CR_LBE);
+}
+
static bool pl011_is_fifo_enabled(PL011State *s)
{
return (s->lcr & LCR_FEN) != 0;
@@ -149,41 +157,127 @@ static inline unsigned pl011_get_fifo_depth(PL011State *s)
return pl011_is_fifo_enabled(s) ? PL011_FIFO_DEPTH : 1;
}
-static inline void pl011_reset_fifo(PL011State *s)
+static inline void pl011_reset_rx_fifo(PL011State *s)
{
s->read_count = 0;
s->read_pos = 0;
/* Reset FIFO flags */
- s->flags &= ~(PL011_FLAG_RXFF | PL011_FLAG_TXFF);
- s->flags |= PL011_FLAG_RXFE | PL011_FLAG_TXFE;
+ s->flags &= ~PL011_FLAG_RXFF;
+ s->flags |= PL011_FLAG_RXFE;
+}
+
+static inline void pl011_reset_tx_fifo(PL011State *s)
+{
+ /* Reset FIFO flags */
+ s->flags &= ~PL011_FLAG_TXFF;
+ s->flags |= PL011_FLAG_TXFE;
+}
+
+static void pl011_fifo_rx_put(void *opaque, uint32_t value)
+{
+ PL011State *s = (PL011State *)opaque;
+ int slot;
+ unsigned pipe_depth;
+
+ pipe_depth = pl011_get_fifo_depth(s);
+ slot = (s->read_pos + s->read_count) & (pipe_depth - 1);
+ s->read_fifo[slot] = value;
+ s->read_count++;
+ s->flags &= ~PL011_FLAG_RXFE;
+ trace_pl011_fifo_rx_put(value, s->read_count, pipe_depth);
+ if (s->read_count == pipe_depth) {
+ trace_pl011_fifo_rx_full();
+ s->flags |= PL011_FLAG_RXFF;
+ }
+ if (s->read_count == s->read_trigger) {
+ s->int_level |= INT_RX;
+ pl011_update(s);
+ }
+}
+
+static void pl011_loopback_tx(PL011State *s, uint32_t value)
+{
+ if (!pl011_loopback_enabled(s)) {
+ return;
+ }
+
+ /*
+ * Caveat:
+ *
+ * In real hardware, TX loopback happens at the serial-bit level
+ * and then reassembled by the RX logics back into bytes and placed
+ * into the RX fifo. That is, loopback happens after TX fifo.
+ *
+ * Because the real hardware TX fifo is time-drained at the frame
+ * rate governed by the configured serial format, some loopback
+ * bytes in TX fifo may still be able to get into the RX fifo
+ * that could be full at times while being drained at software
+ * pace.
+ *
+ * In such scenario, the RX draining pace is the major factor
+ * deciding which loopback bytes get into the RX fifo, unless
+ * hardware flow-control is enabled.
+ *
+ * For simplicity, the above described is not emulated.
+ */
+ pl011_fifo_rx_put(s, value);
+}
+
+static void pl011_write_txdata(PL011State *s, uint8_t data)
+{
+ if (!(s->cr & CR_UARTEN)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "PL011 data written to disabled UART\n");
+ }
+ if (!(s->cr & CR_TXE)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "PL011 data written to disabled TX UART\n");
+ }
+
+ /*
+ * XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks
+ */
+ qemu_chr_fe_write_all(&s->chr, &data, 1);
+ pl011_loopback_tx(s, data);
+ s->int_level |= INT_TX;
+ pl011_update(s);
+}
+
+static uint32_t pl011_read_rxdata(PL011State *s)
+{
+ uint32_t c;
+ unsigned fifo_depth = pl011_get_fifo_depth(s);
+
+ s->flags &= ~PL011_FLAG_RXFF;
+ c = s->read_fifo[s->read_pos];
+ if (s->read_count > 0) {
+ s->read_count--;
+ s->read_pos = (s->read_pos + 1) & (fifo_depth - 1);
+ }
+ if (s->read_count == 0) {
+ s->flags |= PL011_FLAG_RXFE;
+ }
+ if (s->read_count == s->read_trigger - 1) {
+ s->int_level &= ~INT_RX;
+ }
+ trace_pl011_read_fifo(s->read_count, fifo_depth);
+ s->rsr = c >> 8;
+ pl011_update(s);
+ qemu_chr_fe_accept_input(&s->chr);
+ return c;
}
static uint64_t pl011_read(void *opaque, hwaddr offset,
unsigned size)
{
PL011State *s = (PL011State *)opaque;
- uint32_t c;
uint64_t r;
switch (offset >> 2) {
case 0: /* UARTDR */
- s->flags &= ~PL011_FLAG_RXFF;
- c = s->read_fifo[s->read_pos];
- if (s->read_count > 0) {
- s->read_count--;
- s->read_pos = (s->read_pos + 1) & (pl011_get_fifo_depth(s) - 1);
- }
- if (s->read_count == 0) {
- s->flags |= PL011_FLAG_RXFE;
- }
- if (s->read_count == s->read_trigger - 1)
- s->int_level &= ~ INT_RX;
- trace_pl011_read_fifo(s->read_count);
- s->rsr = c >> 8;
- pl011_update(s);
- qemu_chr_fe_accept_input(&s->chr);
- r = c;
+ r = pl011_read_rxdata(s);
break;
case 1: /* UARTRSR */
r = s->rsr;
@@ -268,11 +362,6 @@ static void pl011_trace_baudrate_change(const PL011State *s)
s->ibrd, s->fbrd);
}
-static bool pl011_loopback_enabled(PL011State *s)
-{
- return !!(s->cr & CR_LBE);
-}
-
static void pl011_loopback_mdmctrl(PL011State *s)
{
uint32_t cr, fr, il;
@@ -314,36 +403,6 @@ static void pl011_loopback_mdmctrl(PL011State *s)
pl011_update(s);
}
-static void pl011_put_fifo(void *opaque, uint32_t value);
-
-static void pl011_loopback_tx(PL011State *s, uint32_t value)
-{
- if (!pl011_loopback_enabled(s)) {
- return;
- }
-
- /*
- * Caveat:
- *
- * In real hardware, TX loopback happens at the serial-bit level
- * and then reassembled by the RX logics back into bytes and placed
- * into the RX fifo. That is, loopback happens after TX fifo.
- *
- * Because the real hardware TX fifo is time-drained at the frame
- * rate governed by the configured serial format, some loopback
- * bytes in TX fifo may still be able to get into the RX fifo
- * that could be full at times while being drained at software
- * pace.
- *
- * In such scenario, the RX draining pace is the major factor
- * deciding which loopback bytes get into the RX fifo, unless
- * hardware flow-control is enabled.
- *
- * For simplicity, the above described is not emulated.
- */
- pl011_put_fifo(s, value);
-}
-
static void pl011_loopback_break(PL011State *s, int brk_enable)
{
if (brk_enable) {
@@ -361,14 +420,8 @@ static void pl011_write(void *opaque, hwaddr offset,
switch (offset >> 2) {
case 0: /* UARTDR */
- /* ??? Check if transmitter is enabled. */
ch = value;
- /* XXX this blocks entire thread. Rewrite to use
- * qemu_chr_fe_write and background I/O callbacks */
- qemu_chr_fe_write_all(&s->chr, &ch, 1);
- pl011_loopback_tx(s, ch);
- s->int_level |= INT_TX;
- pl011_update(s);
+ pl011_write_txdata(s, ch);
break;
case 1: /* UARTRSR/UARTECR */
s->rsr = 0;
@@ -390,7 +443,8 @@ static void pl011_write(void *opaque, hwaddr offset,
case 11: /* UARTLCR_H */
/* Reset the FIFO state on FIFO enable or disable */
if ((s->lcr ^ value) & LCR_FEN) {
- pl011_reset_fifo(s);
+ pl011_reset_rx_fifo(s);
+ pl011_reset_tx_fifo(s);
}
if ((s->lcr ^ value) & LCR_BRK) {
int break_enable = value & LCR_BRK;
@@ -433,37 +487,26 @@ static void pl011_write(void *opaque, hwaddr offset,
static int pl011_can_receive(void *opaque)
{
PL011State *s = (PL011State *)opaque;
- int r;
-
- r = s->read_count < pl011_get_fifo_depth(s);
- trace_pl011_can_receive(s->lcr, s->read_count, r);
- return r;
-}
+ unsigned fifo_depth = pl011_get_fifo_depth(s);
+ unsigned fifo_available = fifo_depth - s->read_count;
-static void pl011_put_fifo(void *opaque, uint32_t value)
-{
- PL011State *s = (PL011State *)opaque;
- int slot;
- unsigned pipe_depth;
+ /*
+ * In theory we should check the UART and RX enable bits here and
+ * return 0 if they are not set (so the guest can't receive data
+ * until you have enabled the UART). In practice we suspect there
+ * is at least some guest code out there which has been tested only
+ * on QEMU and which never bothers to enable the UART because we
+ * historically never enforced that. So we effectively keep the
+ * UART continuously enabled regardless of the enable bits.
+ */
- pipe_depth = pl011_get_fifo_depth(s);
- slot = (s->read_pos + s->read_count) & (pipe_depth - 1);
- s->read_fifo[slot] = value;
- s->read_count++;
- s->flags &= ~PL011_FLAG_RXFE;
- trace_pl011_put_fifo(value, s->read_count);
- if (s->read_count == pipe_depth) {
- trace_pl011_put_fifo_full();
- s->flags |= PL011_FLAG_RXFF;
- }
- if (s->read_count == s->read_trigger) {
- s->int_level |= INT_RX;
- pl011_update(s);
- }
+ trace_pl011_can_receive(s->lcr, s->read_count, fifo_depth, fifo_available);
+ return fifo_available;
}
static void pl011_receive(void *opaque, const uint8_t *buf, int size)
{
+ trace_pl011_receive(size);
/*
* In loopback mode, the RX input signal is internally disconnected
* from the entire receiving logics; thus, all inputs are ignored,
@@ -473,13 +516,15 @@ static void pl011_receive(void *opaque, const uint8_t *buf, int size)
return;
}
- pl011_put_fifo(opaque, *buf);
+ for (int i = 0; i < size; i++) {
+ pl011_fifo_rx_put(opaque, buf[i]);
+ }
}
static void pl011_event(void *opaque, QEMUChrEvent event)
{
if (event == CHR_EVENT_BREAK && !pl011_loopback_enabled(opaque)) {
- pl011_put_fifo(opaque, DR_BE);
+ pl011_fifo_rx_put(opaque, DR_BE);
}
}
@@ -549,7 +594,7 @@ static const VMStateDescription vmstate_pl011 = {
.minimum_version_id = 2,
.post_load = pl011_post_load,
.fields = (const VMStateField[]) {
- VMSTATE_UINT32(readbuff, PL011State),
+ VMSTATE_UNUSED(sizeof(uint32_t)),
VMSTATE_UINT32(flags, PL011State),
VMSTATE_UINT32(lcr, PL011State),
VMSTATE_UINT32(rsr, PL011State),
@@ -573,10 +618,9 @@ static const VMStateDescription vmstate_pl011 = {
}
};
-static Property pl011_properties[] = {
+static const Property pl011_properties[] = {
DEFINE_PROP_CHR("chardev", PL011State, chr),
DEFINE_PROP_BOOL("migrate-clk", PL011State, migrate_clk, true),
- DEFINE_PROP_END_OF_LIST(),
};
static void pl011_init(Object *obj)
@@ -621,15 +665,16 @@ static void pl011_reset(DeviceState *dev)
s->ifl = 0x12;
s->cr = 0x300;
s->flags = 0;
- pl011_reset_fifo(s);
+ pl011_reset_rx_fifo(s);
+ pl011_reset_tx_fifo(s);
}
-static void pl011_class_init(ObjectClass *oc, void *data)
+static void pl011_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = pl011_realize;
- dc->reset = pl011_reset;
+ device_class_set_legacy_reset(dc, pl011_reset);
dc->vmsd = &vmstate_pl011;
device_class_set_props(dc, pl011_properties);
}
diff --git a/hw/char/renesas_sci.c b/hw/char/renesas_sci.c
index 5cb7335..b9d0ed1 100644
--- a/hw/char/renesas_sci.c
+++ b/hw/char/renesas_sci.c
@@ -319,19 +319,18 @@ static const VMStateDescription vmstate_rsci = {
}
};
-static Property rsci_properties[] = {
+static const Property rsci_properties[] = {
DEFINE_PROP_UINT64("input-freq", RSCIState, input_freq, 0),
DEFINE_PROP_CHR("chardev", RSCIState, chr),
- DEFINE_PROP_END_OF_LIST(),
};
-static void rsci_class_init(ObjectClass *klass, void *data)
+static void rsci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = rsci_realize;
dc->vmsd = &vmstate_rsci;
- dc->reset = rsci_reset;
+ device_class_set_legacy_reset(dc, rsci_reset);
device_class_set_props(dc, rsci_properties);
}
diff --git a/hw/char/riscv_htif.c b/hw/char/riscv_htif.c
index 9bef60d..c884be5 100644
--- a/hw/char/riscv_htif.c
+++ b/hw/char/riscv_htif.c
@@ -24,23 +24,15 @@
#include "qapi/error.h"
#include "qemu/log.h"
#include "hw/char/riscv_htif.h"
-#include "hw/char/serial.h"
#include "chardev/char.h"
#include "chardev/char-fe.h"
#include "qemu/timer.h"
#include "qemu/error-report.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "exec/tswap.h"
-#include "sysemu/dma.h"
-#include "sysemu/runstate.h"
-
-#define RISCV_DEBUG_HTIF 0
-#define HTIF_DEBUG(fmt, ...) \
- do { \
- if (RISCV_DEBUG_HTIF) { \
- qemu_log_mask(LOG_TRACE, "%s: " fmt "\n", __func__, ##__VA_ARGS__);\
- } \
- } while (0)
+#include "system/dma.h"
+#include "system/runstate.h"
+#include "trace.h"
#define HTIF_DEV_SHIFT 56
#define HTIF_CMD_SHIFT 48
@@ -160,8 +152,7 @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written)
uint64_t payload = val_written & 0xFFFFFFFFFFFFULL;
int resp = 0;
- HTIF_DEBUG("mtohost write: device: %d cmd: %d what: %02" PRIx64
- " -payload: %016" PRIx64 "\n", device, cmd, payload & 0xFF, payload);
+ trace_htif_uart_write_to_host(device, cmd, payload);
/*
* Currently, there is a fixed mapping of devices:
@@ -213,12 +204,16 @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written)
} else {
uint64_t syscall[8];
cpu_physical_memory_read(payload, syscall, sizeof(syscall));
- if (tswap64(syscall[0]) == PK_SYS_WRITE &&
- tswap64(syscall[1]) == HTIF_DEV_CONSOLE &&
- tswap64(syscall[3]) == HTIF_CONSOLE_CMD_PUTC) {
+ if (le64_to_cpu(syscall[0]) == PK_SYS_WRITE &&
+ le64_to_cpu(syscall[1]) == HTIF_DEV_CONSOLE &&
+ le64_to_cpu(syscall[3]) == HTIF_CONSOLE_CMD_PUTC) {
uint8_t ch;
- cpu_physical_memory_read(tswap64(syscall[2]), &ch, 1);
- qemu_chr_fe_write(&s->chr, &ch, 1);
+ cpu_physical_memory_read(le64_to_cpu(syscall[2]), &ch, 1);
+ /*
+ * XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks
+ */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
resp = 0x100 | (uint8_t)payload;
} else {
qemu_log_mask(LOG_UNIMP,
@@ -237,15 +232,18 @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written)
return;
} else if (cmd == HTIF_CONSOLE_CMD_PUTC) {
uint8_t ch = (uint8_t)payload;
- qemu_chr_fe_write(&s->chr, &ch, 1);
+ /*
+ * XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks
+ */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
resp = 0x100 | (uint8_t)payload;
} else {
qemu_log("HTIF device %d: unknown command\n", device);
}
} else {
qemu_log("HTIF unknown device or command\n");
- HTIF_DEBUG("device: %d cmd: %d what: %02" PRIx64
- " payload: %016" PRIx64, device, cmd, payload & 0xFF, payload);
+ trace_htif_uart_unknown_device_command(device, cmd, payload);
}
/*
* Latest bbl does not set fromhost to 0 if there is a value in tohost.
@@ -317,6 +315,11 @@ static void htif_mm_write(void *opaque, hwaddr addr,
static const MemoryRegionOps htif_mm_ops = {
.read = htif_mm_read,
.write = htif_mm_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
};
HTIFState *htif_mm_init(MemoryRegion *address_space, Chardev *chr,
diff --git a/hw/char/sclpconsole-lm.c b/hw/char/sclpconsole-lm.c
index 7719f43..3e40d5e 100644
--- a/hw/char/sclpconsole-lm.c
+++ b/hw/char/sclpconsole-lm.c
@@ -214,7 +214,7 @@ static int process_mdb(SCLPEvent *event, MDBO *mdbo)
{
int rc;
int len;
- uint8_t buffer[SIZE_BUFFER];
+ QEMU_UNINITIALIZED uint8_t buffer[SIZE_BUFFER];
len = be16_to_cpu(mdbo->length);
len -= sizeof(mdbo->length) + sizeof(mdbo->type)
@@ -333,20 +333,19 @@ static void console_reset(DeviceState *dev)
scon->write_errors = 0;
}
-static Property console_properties[] = {
+static const Property console_properties[] = {
DEFINE_PROP_CHR("chardev", SCLPConsoleLM, chr),
DEFINE_PROP_UINT32("write_errors", SCLPConsoleLM, write_errors, 0),
DEFINE_PROP_BOOL("echo", SCLPConsoleLM, echo, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void console_class_init(ObjectClass *klass, void *data)
+static void console_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SCLPEventClass *ec = SCLP_EVENT_CLASS(klass);
device_class_set_props(dc, console_properties);
- dc->reset = console_reset;
+ device_class_set_legacy_reset(dc, console_reset);
dc->vmsd = &vmstate_sclplmconsole;
ec->init = console_init;
ec->get_send_mask = send_mask;
diff --git a/hw/char/sclpconsole.c b/hw/char/sclpconsole.c
index 5d630b0..95e3045 100644
--- a/hw/char/sclpconsole.c
+++ b/hw/char/sclpconsole.c
@@ -251,18 +251,17 @@ static void console_reset(DeviceState *dev)
scon->notify = false;
}
-static Property console_properties[] = {
+static const Property console_properties[] = {
DEFINE_PROP_CHR("chardev", SCLPConsole, chr),
- DEFINE_PROP_END_OF_LIST(),
};
-static void console_class_init(ObjectClass *klass, void *data)
+static void console_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SCLPEventClass *ec = SCLP_EVENT_CLASS(klass);
device_class_set_props(dc, console_properties);
- dc->reset = console_reset;
+ device_class_set_legacy_reset(dc, console_reset);
dc->vmsd = &vmstate_sclpconsole;
ec->init = console_init;
ec->get_send_mask = send_mask;
diff --git a/hw/char/serial-isa.c b/hw/char/serial-isa.c
index 329b352..0ea59a3 100644
--- a/hw/char/serial-isa.c
+++ b/hw/char/serial-isa.c
@@ -26,9 +26,10 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/module.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/acpi/acpi_aml_interface.h"
#include "hw/char/serial.h"
+#include "hw/char/serial-isa.h"
#include "hw/isa/isa.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
@@ -112,14 +113,13 @@ static const VMStateDescription vmstate_isa_serial = {
}
};
-static Property serial_isa_properties[] = {
+static const Property serial_isa_properties[] = {
DEFINE_PROP_UINT32("index", ISASerialState, index, -1),
DEFINE_PROP_UINT32("iobase", ISASerialState, iobase, -1),
DEFINE_PROP_UINT32("irq", ISASerialState, isairq, -1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void serial_isa_class_initfn(ObjectClass *klass, void *data)
+static void serial_isa_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass);
@@ -146,7 +146,7 @@ static const TypeInfo serial_isa_info = {
.instance_size = sizeof(ISASerialState),
.instance_init = serial_isa_initfn,
.class_init = serial_isa_class_initfn,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_ACPI_DEV_AML_IF },
{ },
},
diff --git a/hw/char/serial-mm.c b/hw/char/serial-mm.c
new file mode 100644
index 0000000..13aba78
--- /dev/null
+++ b/hw/char/serial-mm.c
@@ -0,0 +1,156 @@
+/*
+ * QEMU 16550A UART emulation
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ * Copyright (c) 2008 Citrix Systems, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/char/serial-mm.h"
+#include "exec/cpu-common.h"
+#include "migration/vmstate.h"
+#include "qapi/error.h"
+#include "hw/qdev-properties.h"
+
+static uint64_t serial_mm_read(void *opaque, hwaddr addr, unsigned size)
+{
+ SerialMM *s = SERIAL_MM(opaque);
+ return serial_io_ops.read(&s->serial, addr >> s->regshift, 1);
+}
+
+static void serial_mm_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ SerialMM *s = SERIAL_MM(opaque);
+ value &= 255;
+ serial_io_ops.write(&s->serial, addr >> s->regshift, value, 1);
+}
+
+static const MemoryRegionOps serial_mm_ops[3] = {
+ [DEVICE_NATIVE_ENDIAN] = {
+ .read = serial_mm_read,
+ .write = serial_mm_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid.max_access_size = 8,
+ .impl.max_access_size = 8,
+ },
+ [DEVICE_LITTLE_ENDIAN] = {
+ .read = serial_mm_read,
+ .write = serial_mm_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.max_access_size = 8,
+ .impl.max_access_size = 8,
+ },
+ [DEVICE_BIG_ENDIAN] = {
+ .read = serial_mm_read,
+ .write = serial_mm_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid.max_access_size = 8,
+ .impl.max_access_size = 8,
+ },
+};
+
+static void serial_mm_realize(DeviceState *dev, Error **errp)
+{
+ SerialMM *smm = SERIAL_MM(dev);
+ SerialState *s = &smm->serial;
+
+ if (!qdev_realize(DEVICE(s), NULL, errp)) {
+ return;
+ }
+
+ memory_region_init_io(&s->io, OBJECT(dev),
+ &serial_mm_ops[smm->endianness], smm, "serial",
+ 8 << smm->regshift);
+ sysbus_init_mmio(SYS_BUS_DEVICE(smm), &s->io);
+ sysbus_init_irq(SYS_BUS_DEVICE(smm), &smm->serial.irq);
+}
+
+static const VMStateDescription vmstate_serial_mm = {
+ .name = "serial",
+ .version_id = 3,
+ .minimum_version_id = 2,
+ .fields = (const VMStateField[]) {
+ VMSTATE_STRUCT(serial, SerialMM, 0, vmstate_serial, SerialState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+SerialMM *serial_mm_init(MemoryRegion *address_space,
+ hwaddr base, int regshift,
+ qemu_irq irq, int baudbase,
+ Chardev *chr, enum device_endian end)
+{
+ SerialMM *smm = SERIAL_MM(qdev_new(TYPE_SERIAL_MM));
+ MemoryRegion *mr;
+
+ qdev_prop_set_uint8(DEVICE(smm), "regshift", regshift);
+ qdev_prop_set_uint32(DEVICE(smm), "baudbase", baudbase);
+ qdev_prop_set_chr(DEVICE(smm), "chardev", chr);
+ qdev_set_legacy_instance_id(DEVICE(smm), base, 2);
+ qdev_prop_set_uint8(DEVICE(smm), "endianness", end);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(smm), &error_fatal);
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(smm), 0, irq);
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(smm), 0);
+ memory_region_add_subregion(address_space, base, mr);
+
+ return smm;
+}
+
+static void serial_mm_instance_init(Object *o)
+{
+ SerialMM *smm = SERIAL_MM(o);
+
+ object_initialize_child(o, "serial", &smm->serial, TYPE_SERIAL);
+
+ qdev_alias_all_properties(DEVICE(&smm->serial), o);
+}
+
+static const Property serial_mm_properties[] = {
+ /*
+ * Set the spacing between adjacent memory-mapped UART registers.
+ * Each register will be at (1 << regshift) bytes after the previous one.
+ */
+ DEFINE_PROP_UINT8("regshift", SerialMM, regshift, 0),
+ DEFINE_PROP_UINT8("endianness", SerialMM, endianness, DEVICE_NATIVE_ENDIAN),
+};
+
+static void serial_mm_class_init(ObjectClass *oc, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ device_class_set_props(dc, serial_mm_properties);
+ dc->realize = serial_mm_realize;
+ dc->vmsd = &vmstate_serial_mm;
+}
+
+static const TypeInfo types[] = {
+ {
+ .name = TYPE_SERIAL_MM,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .class_init = serial_mm_class_init,
+ .instance_init = serial_mm_instance_init,
+ .instance_size = sizeof(SerialMM),
+ },
+};
+
+DEFINE_TYPES(types)
diff --git a/hw/char/serial-pci-multi.c b/hw/char/serial-pci-multi.c
index 28b2757..13df272 100644
--- a/hw/char/serial-pci-multi.c
+++ b/hw/char/serial-pci-multi.c
@@ -45,8 +45,7 @@ typedef struct PCIMultiSerialState {
char *name[PCI_SERIAL_MAX_PORTS];
SerialState state[PCI_SERIAL_MAX_PORTS];
uint32_t level[PCI_SERIAL_MAX_PORTS];
- qemu_irq *irqs;
- uint8_t prog_if;
+ IRQState irqs[PCI_SERIAL_MAX_PORTS];
} PCIMultiSerialState;
static void multi_serial_pci_exit(PCIDevice *dev)
@@ -61,7 +60,6 @@ static void multi_serial_pci_exit(PCIDevice *dev)
memory_region_del_subregion(&pci->iobar, &s->io);
g_free(pci->name[i]);
}
- qemu_free_irqs(pci->irqs, pci->ports);
}
static void multi_serial_irq_mux(void *opaque, int n, int level)
@@ -98,11 +96,10 @@ static void multi_serial_pci_realize(PCIDevice *dev, Error **errp)
SerialState *s;
size_t i, nports = multi_serial_get_port_count(pc);
- pci->dev.config[PCI_CLASS_PROG] = pci->prog_if;
- pci->dev.config[PCI_INTERRUPT_PIN] = 0x01;
+ pci->dev.config[PCI_CLASS_PROG] = 2; /* 16550 compatible */
+ pci->dev.config[PCI_INTERRUPT_PIN] = 1;
memory_region_init(&pci->iobar, OBJECT(pci), "multiserial", 8 * nports);
pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->iobar);
- pci->irqs = qemu_allocate_irqs(multi_serial_irq_mux, pci, nports);
for (i = 0; i < nports; i++) {
s = pci->state + i;
@@ -110,7 +107,7 @@ static void multi_serial_pci_realize(PCIDevice *dev, Error **errp)
multi_serial_pci_exit(dev);
return;
}
- s->irq = pci->irqs[i];
+ s->irq = &pci->irqs[i];
pci->name[i] = g_strdup_printf("uart #%zu", i + 1);
memory_region_init_io(&s->io, OBJECT(pci), &serial_io_ops, s,
pci->name[i], 8);
@@ -132,23 +129,20 @@ static const VMStateDescription vmstate_pci_multi_serial = {
}
};
-static Property multi_2x_serial_pci_properties[] = {
+static const Property multi_2x_serial_pci_properties[] = {
DEFINE_PROP_CHR("chardev1", PCIMultiSerialState, state[0].chr),
DEFINE_PROP_CHR("chardev2", PCIMultiSerialState, state[1].chr),
- DEFINE_PROP_UINT8("prog_if", PCIMultiSerialState, prog_if, 0x02),
- DEFINE_PROP_END_OF_LIST(),
};
-static Property multi_4x_serial_pci_properties[] = {
+static const Property multi_4x_serial_pci_properties[] = {
DEFINE_PROP_CHR("chardev1", PCIMultiSerialState, state[0].chr),
DEFINE_PROP_CHR("chardev2", PCIMultiSerialState, state[1].chr),
DEFINE_PROP_CHR("chardev3", PCIMultiSerialState, state[2].chr),
DEFINE_PROP_CHR("chardev4", PCIMultiSerialState, state[3].chr),
- DEFINE_PROP_UINT8("prog_if", PCIMultiSerialState, prog_if, 0x02),
- DEFINE_PROP_END_OF_LIST(),
};
-static void multi_2x_serial_pci_class_initfn(ObjectClass *klass, void *data)
+static void multi_2x_serial_pci_class_initfn(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass);
@@ -163,7 +157,8 @@ static void multi_2x_serial_pci_class_initfn(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
}
-static void multi_4x_serial_pci_class_initfn(ObjectClass *klass, void *data)
+static void multi_4x_serial_pci_class_initfn(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass);
@@ -185,6 +180,7 @@ static void multi_serial_init(Object *o)
size_t i, nports = multi_serial_get_port_count(PCI_DEVICE_GET_CLASS(dev));
for (i = 0; i < nports; i++) {
+ qemu_init_irq(&pms->irqs[i], multi_serial_irq_mux, pms, i);
object_initialize_child(o, "serial[*]", &pms->state[i], TYPE_SERIAL);
}
}
@@ -195,7 +191,7 @@ static const TypeInfo multi_2x_serial_pci_info = {
.instance_size = sizeof(PCIMultiSerialState),
.instance_init = multi_serial_init,
.class_init = multi_2x_serial_pci_class_initfn,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -207,7 +203,7 @@ static const TypeInfo multi_4x_serial_pci_info = {
.instance_size = sizeof(PCIMultiSerialState),
.instance_init = multi_serial_init,
.class_init = multi_4x_serial_pci_class_initfn,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/char/serial-pci.c b/hw/char/serial-pci.c
index f8a1a94..46efabc 100644
--- a/hw/char/serial-pci.c
+++ b/hw/char/serial-pci.c
@@ -38,7 +38,6 @@
struct PCISerialState {
PCIDevice dev;
SerialState state;
- uint8_t prog_if;
};
#define TYPE_PCI_SERIAL "pci-serial"
@@ -53,8 +52,8 @@ static void serial_pci_realize(PCIDevice *dev, Error **errp)
return;
}
- pci->dev.config[PCI_CLASS_PROG] = pci->prog_if;
- pci->dev.config[PCI_INTERRUPT_PIN] = 0x01;
+ pci->dev.config[PCI_CLASS_PROG] = 2; /* 16550 compatible */
+ pci->dev.config[PCI_INTERRUPT_PIN] = 1;
s->irq = pci_allocate_irq(&pci->dev);
memory_region_init_io(&s->io, OBJECT(pci), &serial_io_ops, s, "serial", 8);
@@ -81,12 +80,7 @@ static const VMStateDescription vmstate_pci_serial = {
}
};
-static Property serial_pci_properties[] = {
- DEFINE_PROP_UINT8("prog_if", PCISerialState, prog_if, 0x02),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void serial_pci_class_initfn(ObjectClass *klass, void *data)
+static void serial_pci_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass);
@@ -97,7 +91,6 @@ static void serial_pci_class_initfn(ObjectClass *klass, void *data)
pc->revision = 1;
pc->class_id = PCI_CLASS_COMMUNICATION_SERIAL;
dc->vmsd = &vmstate_pci_serial;
- device_class_set_props(dc, serial_pci_properties);
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
}
@@ -116,7 +109,7 @@ static const TypeInfo serial_pci_info = {
.instance_size = sizeof(PCISerialState),
.instance_init = serial_pci_init,
.class_init = serial_pci_class_initfn,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/char/serial.c b/hw/char/serial.c
index d8b2db5..03fec3f 100644
--- a/hw/char/serial.c
+++ b/hw/char/serial.c
@@ -31,8 +31,8 @@
#include "chardev/char-serial.h"
#include "qapi/error.h"
#include "qemu/timer.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
+#include "system/reset.h"
+#include "system/runstate.h"
#include "qemu/error-report.h"
#include "trace.h"
#include "hw/qdev-properties.h"
@@ -951,13 +951,6 @@ static void serial_unrealize(DeviceState *dev)
qemu_unregister_reset(serial_reset, s);
}
-/* Change the main reference oscillator frequency. */
-void serial_set_frequency(SerialState *s, uint32_t frequency)
-{
- s->baudbase = frequency;
- serial_update_parameters(s);
-}
-
const MemoryRegionOps serial_io_ops = {
.read = serial_ioport_read,
.write = serial_ioport_write,
@@ -971,14 +964,13 @@ const MemoryRegionOps serial_io_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
-static Property serial_properties[] = {
+static const Property serial_properties[] = {
DEFINE_PROP_CHR("chardev", SerialState, chr),
DEFINE_PROP_UINT32("baudbase", SerialState, baudbase, 115200),
DEFINE_PROP_BOOL("wakeup", SerialState, wakeup, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void serial_class_init(ObjectClass *klass, void* data)
+static void serial_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -996,135 +988,9 @@ static const TypeInfo serial_info = {
.class_init = serial_class_init,
};
-/* Memory mapped interface */
-static uint64_t serial_mm_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- SerialMM *s = SERIAL_MM(opaque);
- return serial_ioport_read(&s->serial, addr >> s->regshift, 1);
-}
-
-static void serial_mm_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- SerialMM *s = SERIAL_MM(opaque);
- value &= 255;
- serial_ioport_write(&s->serial, addr >> s->regshift, value, 1);
-}
-
-static const MemoryRegionOps serial_mm_ops[3] = {
- [DEVICE_NATIVE_ENDIAN] = {
- .read = serial_mm_read,
- .write = serial_mm_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid.max_access_size = 8,
- .impl.max_access_size = 8,
- },
- [DEVICE_LITTLE_ENDIAN] = {
- .read = serial_mm_read,
- .write = serial_mm_write,
- .endianness = DEVICE_LITTLE_ENDIAN,
- .valid.max_access_size = 8,
- .impl.max_access_size = 8,
- },
- [DEVICE_BIG_ENDIAN] = {
- .read = serial_mm_read,
- .write = serial_mm_write,
- .endianness = DEVICE_BIG_ENDIAN,
- .valid.max_access_size = 8,
- .impl.max_access_size = 8,
- },
-};
-
-static void serial_mm_realize(DeviceState *dev, Error **errp)
-{
- SerialMM *smm = SERIAL_MM(dev);
- SerialState *s = &smm->serial;
-
- if (!qdev_realize(DEVICE(s), NULL, errp)) {
- return;
- }
-
- memory_region_init_io(&s->io, OBJECT(dev),
- &serial_mm_ops[smm->endianness], smm, "serial",
- 8 << smm->regshift);
- sysbus_init_mmio(SYS_BUS_DEVICE(smm), &s->io);
- sysbus_init_irq(SYS_BUS_DEVICE(smm), &smm->serial.irq);
-}
-
-static const VMStateDescription vmstate_serial_mm = {
- .name = "serial",
- .version_id = 3,
- .minimum_version_id = 2,
- .fields = (const VMStateField[]) {
- VMSTATE_STRUCT(serial, SerialMM, 0, vmstate_serial, SerialState),
- VMSTATE_END_OF_LIST()
- }
-};
-
-SerialMM *serial_mm_init(MemoryRegion *address_space,
- hwaddr base, int regshift,
- qemu_irq irq, int baudbase,
- Chardev *chr, enum device_endian end)
-{
- SerialMM *smm = SERIAL_MM(qdev_new(TYPE_SERIAL_MM));
- MemoryRegion *mr;
-
- qdev_prop_set_uint8(DEVICE(smm), "regshift", regshift);
- qdev_prop_set_uint32(DEVICE(smm), "baudbase", baudbase);
- qdev_prop_set_chr(DEVICE(smm), "chardev", chr);
- qdev_set_legacy_instance_id(DEVICE(smm), base, 2);
- qdev_prop_set_uint8(DEVICE(smm), "endianness", end);
- sysbus_realize_and_unref(SYS_BUS_DEVICE(smm), &error_fatal);
-
- sysbus_connect_irq(SYS_BUS_DEVICE(smm), 0, irq);
- mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(smm), 0);
- memory_region_add_subregion(address_space, base, mr);
-
- return smm;
-}
-
-static void serial_mm_instance_init(Object *o)
-{
- SerialMM *smm = SERIAL_MM(o);
-
- object_initialize_child(o, "serial", &smm->serial, TYPE_SERIAL);
-
- qdev_alias_all_properties(DEVICE(&smm->serial), o);
-}
-
-static Property serial_mm_properties[] = {
- /*
- * Set the spacing between adjacent memory-mapped UART registers.
- * Each register will be at (1 << regshift) bytes after the
- * previous one.
- */
- DEFINE_PROP_UINT8("regshift", SerialMM, regshift, 0),
- DEFINE_PROP_UINT8("endianness", SerialMM, endianness, DEVICE_NATIVE_ENDIAN),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void serial_mm_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- device_class_set_props(dc, serial_mm_properties);
- dc->realize = serial_mm_realize;
- dc->vmsd = &vmstate_serial_mm;
-}
-
-static const TypeInfo serial_mm_info = {
- .name = TYPE_SERIAL_MM,
- .parent = TYPE_SYS_BUS_DEVICE,
- .class_init = serial_mm_class_init,
- .instance_init = serial_mm_instance_init,
- .instance_size = sizeof(SerialMM),
-};
-
static void serial_register_types(void)
{
type_register_static(&serial_info);
- type_register_static(&serial_mm_info);
}
type_init(serial_register_types)
diff --git a/hw/char/sh_serial.c b/hw/char/sh_serial.c
index 355886e..30447fa 100644
--- a/hw/char/sh_serial.c
+++ b/hw/char/sh_serial.c
@@ -78,10 +78,6 @@ struct SHSerialState {
qemu_irq bri;
};
-typedef struct {} SHSerialStateClass;
-
-OBJECT_DEFINE_TYPE(SHSerialState, sh_serial, SH_SERIAL, SYS_BUS_DEVICE)
-
static void sh_serial_clear_fifo(SHSerialState *s)
{
memset(s->rx_fifo, 0, SH_RX_FIFO_LENGTH);
@@ -320,7 +316,7 @@ static uint64_t sh_serial_read(void *opaque, hwaddr offs,
static int sh_serial_can_receive(SHSerialState *s)
{
- return s->scr & (1 << 4);
+ return s->scr & (1 << 4) ? SH_RX_FIFO_LENGTH - s->rx_head : 0;
}
static void sh_serial_receive_break(SHSerialState *s)
@@ -353,22 +349,20 @@ static void sh_serial_receive1(void *opaque, const uint8_t *buf, int size)
if (s->feat & SH_SERIAL_FEAT_SCIF) {
int i;
for (i = 0; i < size; i++) {
- if (s->rx_cnt < SH_RX_FIFO_LENGTH) {
- s->rx_fifo[s->rx_head++] = buf[i];
- if (s->rx_head == SH_RX_FIFO_LENGTH) {
- s->rx_head = 0;
- }
- s->rx_cnt++;
- if (s->rx_cnt >= s->rtrg) {
- s->flags |= SH_SERIAL_FLAG_RDF;
- if (s->scr & (1 << 6) && s->rxi) {
- timer_del(&s->fifo_timeout_timer);
- qemu_set_irq(s->rxi, 1);
- }
- } else {
- timer_mod(&s->fifo_timeout_timer,
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 15 * s->etu);
+ s->rx_fifo[s->rx_head++] = buf[i];
+ if (s->rx_head == SH_RX_FIFO_LENGTH) {
+ s->rx_head = 0;
+ }
+ s->rx_cnt++;
+ if (s->rx_cnt >= s->rtrg) {
+ s->flags |= SH_SERIAL_FLAG_RDF;
+ if (s->scr & (1 << 6) && s->rxi) {
+ timer_del(&s->fifo_timeout_timer);
+ qemu_set_irq(s->rxi, 1);
}
+ } else {
+ timer_mod(&s->fifo_timeout_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 15 * s->etu);
}
}
} else {
@@ -436,30 +430,37 @@ static void sh_serial_realize(DeviceState *d, Error **errp)
s->etu = NANOSECONDS_PER_SECOND / 9600;
}
-static void sh_serial_finalize(Object *obj)
+static void sh_serial_unrealize(DeviceState *dev)
{
- SHSerialState *s = SH_SERIAL(obj);
+ SHSerialState *s = SH_SERIAL(dev);
timer_del(&s->fifo_timeout_timer);
}
-static void sh_serial_init(Object *obj)
-{
-}
-
-static Property sh_serial_properties[] = {
+static const Property sh_serial_properties[] = {
DEFINE_PROP_CHR("chardev", SHSerialState, chr),
DEFINE_PROP_UINT8("features", SHSerialState, feat, 0),
- DEFINE_PROP_END_OF_LIST()
};
-static void sh_serial_class_init(ObjectClass *oc, void *data)
+static void sh_serial_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
device_class_set_props(dc, sh_serial_properties);
dc->realize = sh_serial_realize;
- dc->reset = sh_serial_reset;
+ dc->unrealize = sh_serial_unrealize;
+ device_class_set_legacy_reset(dc, sh_serial_reset);
/* Reason: part of SuperH CPU/SoC, needs to be wired up */
dc->user_creatable = false;
}
+
+static const TypeInfo sh_serial_types[] = {
+ {
+ .name = TYPE_SH_SERIAL,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SHSerialState),
+ .class_init = sh_serial_class_init,
+ },
+};
+
+DEFINE_TYPES(sh_serial_types)
diff --git a/hw/char/shakti_uart.c b/hw/char/shakti_uart.c
index 98b142c..6e216ed 100644
--- a/hw/char/shakti_uart.c
+++ b/hw/char/shakti_uart.c
@@ -157,15 +157,14 @@ static void shakti_uart_instance_init(Object *obj)
sysbus_init_mmio(SYS_BUS_DEVICE(obj), &sus->mmio);
}
-static Property shakti_uart_properties[] = {
+static const Property shakti_uart_properties[] = {
DEFINE_PROP_CHR("chardev", ShaktiUartState, chr),
- DEFINE_PROP_END_OF_LIST(),
};
-static void shakti_uart_class_init(ObjectClass *klass, void *data)
+static void shakti_uart_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = shakti_uart_reset;
+ device_class_set_legacy_reset(dc, shakti_uart_reset);
dc->realize = shakti_uart_realize;
device_class_set_props(dc, shakti_uart_properties);
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
diff --git a/hw/char/sifive_uart.c b/hw/char/sifive_uart.c
index 7fc6787..0fc89e7 100644
--- a/hw/char/sifive_uart.c
+++ b/hw/char/sifive_uart.c
@@ -26,6 +26,8 @@
#include "hw/char/sifive_uart.h"
#include "hw/qdev-properties-system.h"
+#define TX_INTERRUPT_TRIGGER_DELAY_NS 100
+
/*
* Not yet implemented:
*
@@ -64,6 +66,72 @@ static void sifive_uart_update_irq(SiFiveUARTState *s)
}
}
+static gboolean sifive_uart_xmit(void *do_not_use, GIOCondition cond,
+ void *opaque)
+{
+ SiFiveUARTState *s = opaque;
+ int ret;
+ const uint8_t *characters;
+ uint32_t numptr = 0;
+
+ /* instant drain the fifo when there's no back-end */
+ if (!qemu_chr_fe_backend_connected(&s->chr)) {
+ fifo8_reset(&s->tx_fifo);
+ return G_SOURCE_REMOVE;
+ }
+
+ if (fifo8_is_empty(&s->tx_fifo)) {
+ return G_SOURCE_REMOVE;
+ }
+
+ /* Don't pop the FIFO in case the write fails */
+ characters = fifo8_peek_bufptr(&s->tx_fifo,
+ fifo8_num_used(&s->tx_fifo), &numptr);
+ ret = qemu_chr_fe_write(&s->chr, characters, numptr);
+
+ if (ret >= 0) {
+ /* We wrote the data, actually pop the fifo */
+ fifo8_pop_bufptr(&s->tx_fifo, ret, NULL);
+ }
+
+ if (!fifo8_is_empty(&s->tx_fifo)) {
+ guint r = qemu_chr_fe_add_watch(&s->chr, G_IO_OUT | G_IO_HUP,
+ sifive_uart_xmit, s);
+ if (!r) {
+ fifo8_reset(&s->tx_fifo);
+ return G_SOURCE_REMOVE;
+ }
+ }
+
+ /* Clear the TX Full bit */
+ if (!fifo8_is_full(&s->tx_fifo)) {
+ s->txfifo &= ~SIFIVE_UART_TXFIFO_FULL;
+ }
+
+ sifive_uart_update_irq(s);
+ return G_SOURCE_REMOVE;
+}
+
+static void sifive_uart_write_tx_fifo(SiFiveUARTState *s, const uint8_t *buf,
+ int size)
+{
+ uint64_t current_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+
+ if (size > fifo8_num_free(&s->tx_fifo)) {
+ size = fifo8_num_free(&s->tx_fifo);
+ qemu_log_mask(LOG_GUEST_ERROR, "sifive_uart: TX FIFO overflow");
+ }
+
+ fifo8_push_all(&s->tx_fifo, buf, size);
+
+ if (fifo8_is_full(&s->tx_fifo)) {
+ s->txfifo |= SIFIVE_UART_TXFIFO_FULL;
+ }
+
+ timer_mod(s->fifo_trigger_handle, current_time +
+ TX_INTERRUPT_TRIGGER_DELAY_NS);
+}
+
static uint64_t
sifive_uart_read(void *opaque, hwaddr addr, unsigned int size)
{
@@ -82,7 +150,7 @@ sifive_uart_read(void *opaque, hwaddr addr, unsigned int size)
return 0x80000000;
case SIFIVE_UART_TXFIFO:
- return 0; /* Should check tx fifo */
+ return s->txfifo;
case SIFIVE_UART_IE:
return s->ie;
case SIFIVE_UART_IP:
@@ -106,12 +174,11 @@ sifive_uart_write(void *opaque, hwaddr addr,
{
SiFiveUARTState *s = opaque;
uint32_t value = val64;
- unsigned char ch = value;
+ uint8_t ch = value;
switch (addr) {
case SIFIVE_UART_TXFIFO:
- qemu_chr_fe_write(&s->chr, &ch, 1);
- sifive_uart_update_irq(s);
+ sifive_uart_write_tx_fifo(s, &ch, 1);
return;
case SIFIVE_UART_IE:
s->ie = val64;
@@ -131,6 +198,13 @@ sifive_uart_write(void *opaque, hwaddr addr,
__func__, (int)addr, (int)value);
}
+static void fifo_trigger_update(void *opaque)
+{
+ SiFiveUARTState *s = opaque;
+
+ sifive_uart_xmit(NULL, G_IO_OUT, s);
+}
+
static const MemoryRegionOps sifive_uart_ops = {
.read = sifive_uart_read,
.write = sifive_uart_write,
@@ -177,9 +251,25 @@ static int sifive_uart_be_change(void *opaque)
return 0;
}
-static Property sifive_uart_properties[] = {
+static void sifive_uart_reset_enter(Object *obj, ResetType type)
+{
+ SiFiveUARTState *s = SIFIVE_UART(obj);
+
+ s->txfifo = 0;
+ s->ie = 0;
+ s->ip = 0;
+ s->txctrl = 0;
+ s->rxctrl = 0;
+ s->div = 0;
+
+ s->rx_fifo_len = 0;
+
+ memset(s->rx_fifo, 0, SIFIVE_UART_RX_FIFO_SIZE);
+ fifo8_reset(&s->tx_fifo);
+}
+
+static const Property sifive_uart_properties[] = {
DEFINE_PROP_CHR("chardev", SiFiveUARTState, chr),
- DEFINE_PROP_END_OF_LIST(),
};
static void sifive_uart_init(Object *obj)
@@ -197,21 +287,24 @@ static void sifive_uart_realize(DeviceState *dev, Error **errp)
{
SiFiveUARTState *s = SIFIVE_UART(dev);
- qemu_chr_fe_set_handlers(&s->chr, sifive_uart_can_rx, sifive_uart_rx,
- sifive_uart_event, sifive_uart_be_change, s,
- NULL, true);
+ fifo8_create(&s->tx_fifo, SIFIVE_UART_TX_FIFO_SIZE);
+
+ s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ fifo_trigger_update, s);
+
+ if (qemu_chr_fe_backend_connected(&s->chr)) {
+ qemu_chr_fe_set_handlers(&s->chr, sifive_uart_can_rx, sifive_uart_rx,
+ sifive_uart_event, sifive_uart_be_change, s,
+ NULL, true);
+ }
}
-static void sifive_uart_reset_enter(Object *obj, ResetType type)
+static void sifive_uart_unrealize(DeviceState *dev)
{
- SiFiveUARTState *s = SIFIVE_UART(obj);
- s->ie = 0;
- s->ip = 0;
- s->txctrl = 0;
- s->rxctrl = 0;
- s->div = 0;
- s->rx_fifo_len = 0;
+ SiFiveUARTState *s = SIFIVE_UART(dev);
+
+ fifo8_destroy(&s->tx_fifo);
}
static void sifive_uart_reset_hold(Object *obj, ResetType type)
@@ -222,8 +315,8 @@ static void sifive_uart_reset_hold(Object *obj, ResetType type)
static const VMStateDescription vmstate_sifive_uart = {
.name = TYPE_SIFIVE_UART,
- .version_id = 1,
- .minimum_version_id = 1,
+ .version_id = 2,
+ .minimum_version_id = 2,
.fields = (const VMStateField[]) {
VMSTATE_UINT8_ARRAY(rx_fifo, SiFiveUARTState,
SIFIVE_UART_RX_FIFO_SIZE),
@@ -233,17 +326,21 @@ static const VMStateDescription vmstate_sifive_uart = {
VMSTATE_UINT32(txctrl, SiFiveUARTState),
VMSTATE_UINT32(rxctrl, SiFiveUARTState),
VMSTATE_UINT32(div, SiFiveUARTState),
+ VMSTATE_UINT32(txfifo, SiFiveUARTState),
+ VMSTATE_FIFO8(tx_fifo, SiFiveUARTState),
+ VMSTATE_TIMER_PTR(fifo_trigger_handle, SiFiveUARTState),
VMSTATE_END_OF_LIST()
},
};
-static void sifive_uart_class_init(ObjectClass *oc, void *data)
+static void sifive_uart_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
ResettableClass *rc = RESETTABLE_CLASS(oc);
dc->realize = sifive_uart_realize;
+ dc->unrealize = sifive_uart_unrealize;
dc->vmsd = &vmstate_sifive_uart;
rc->phases.enter = sifive_uart_reset_enter;
rc->phases.hold = sifive_uart_reset_hold;
diff --git a/hw/char/spapr_vty.c b/hw/char/spapr_vty.c
index 3e23d9c..fc8ea60 100644
--- a/hw/char/spapr_vty.c
+++ b/hw/char/spapr_vty.c
@@ -163,10 +163,9 @@ void spapr_vty_create(SpaprVioBus *bus, Chardev *chardev)
qdev_realize_and_unref(dev, &bus->bus, &error_fatal);
}
-static Property spapr_vty_properties[] = {
+static const Property spapr_vty_properties[] = {
DEFINE_SPAPR_PROPERTIES(SpaprVioVty, sdev),
DEFINE_PROP_CHR("chardev", SpaprVioVty, chardev),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_spapr_vty = {
@@ -183,7 +182,7 @@ static const VMStateDescription vmstate_spapr_vty = {
},
};
-static void spapr_vty_class_init(ObjectClass *klass, void *data)
+static void spapr_vty_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass);
diff --git a/hw/char/stm32f2xx_usart.c b/hw/char/stm32f2xx_usart.c
index 8753afe..45c3064 100644
--- a/hw/char/stm32f2xx_usart.c
+++ b/hw/char/stm32f2xx_usart.c
@@ -30,17 +30,7 @@
#include "qemu/log.h"
#include "qemu/module.h"
-#ifndef STM_USART_ERR_DEBUG
-#define STM_USART_ERR_DEBUG 0
-#endif
-
-#define DB_PRINT_L(lvl, fmt, args...) do { \
- if (STM_USART_ERR_DEBUG >= lvl) { \
- qemu_log("%s: " fmt, __func__, ## args); \
- } \
-} while (0)
-
-#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args)
+#include "trace.h"
static int stm32f2xx_usart_can_receive(void *opaque)
{
@@ -67,10 +57,11 @@ static void stm32f2xx_update_irq(STM32F2XXUsartState *s)
static void stm32f2xx_usart_receive(void *opaque, const uint8_t *buf, int size)
{
STM32F2XXUsartState *s = opaque;
+ DeviceState *d = DEVICE(s);
if (!(s->usart_cr1 & USART_CR1_UE && s->usart_cr1 & USART_CR1_RE)) {
/* USART not enabled - drop the chars */
- DB_PRINT("Dropping the chars\n");
+ trace_stm32f2xx_usart_drop(d->id);
return;
}
@@ -79,7 +70,7 @@ static void stm32f2xx_usart_receive(void *opaque, const uint8_t *buf, int size)
stm32f2xx_update_irq(s);
- DB_PRINT("Receiving: %c\n", s->usart_dr);
+ trace_stm32f2xx_usart_receive(d->id, *buf);
}
static void stm32f2xx_usart_reset(DeviceState *dev)
@@ -101,49 +92,55 @@ static uint64_t stm32f2xx_usart_read(void *opaque, hwaddr addr,
unsigned int size)
{
STM32F2XXUsartState *s = opaque;
- uint64_t retvalue;
-
- DB_PRINT("Read 0x%"HWADDR_PRIx"\n", addr);
+ DeviceState *d = DEVICE(s);
+ uint64_t retvalue = 0;
switch (addr) {
case USART_SR:
retvalue = s->usart_sr;
qemu_chr_fe_accept_input(&s->chr);
- return retvalue;
+ break;
case USART_DR:
- DB_PRINT("Value: 0x%" PRIx32 ", %c\n", s->usart_dr, (char) s->usart_dr);
retvalue = s->usart_dr & 0x3FF;
s->usart_sr &= ~USART_SR_RXNE;
qemu_chr_fe_accept_input(&s->chr);
stm32f2xx_update_irq(s);
- return retvalue;
+ break;
case USART_BRR:
- return s->usart_brr;
+ retvalue = s->usart_brr;
+ break;
case USART_CR1:
- return s->usart_cr1;
+ retvalue = s->usart_cr1;
+ break;
case USART_CR2:
- return s->usart_cr2;
+ retvalue = s->usart_cr2;
+ break;
case USART_CR3:
- return s->usart_cr3;
+ retvalue = s->usart_cr3;
+ break;
case USART_GTPR:
- return s->usart_gtpr;
+ retvalue = s->usart_gtpr;
+ break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr);
return 0;
}
- return 0;
+ trace_stm32f2xx_usart_read(d->id, size, addr, retvalue);
+
+ return retvalue;
}
static void stm32f2xx_usart_write(void *opaque, hwaddr addr,
uint64_t val64, unsigned int size)
{
STM32F2XXUsartState *s = opaque;
+ DeviceState *d = DEVICE(s);
uint32_t value = val64;
unsigned char ch;
- DB_PRINT("Write 0x%" PRIx32 ", 0x%"HWADDR_PRIx"\n", value, addr);
+ trace_stm32f2xx_usart_write(d->id, size, addr, val64);
switch (addr) {
case USART_SR:
@@ -199,9 +196,8 @@ static const MemoryRegionOps stm32f2xx_usart_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static Property stm32f2xx_usart_properties[] = {
+static const Property stm32f2xx_usart_properties[] = {
DEFINE_PROP_CHR("chardev", STM32F2XXUsartState, chr),
- DEFINE_PROP_END_OF_LIST(),
};
static void stm32f2xx_usart_init(Object *obj)
@@ -224,11 +220,11 @@ static void stm32f2xx_usart_realize(DeviceState *dev, Error **errp)
s, NULL, true);
}
-static void stm32f2xx_usart_class_init(ObjectClass *klass, void *data)
+static void stm32f2xx_usart_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = stm32f2xx_usart_reset;
+ device_class_set_legacy_reset(dc, stm32f2xx_usart_reset);
device_class_set_props(dc, stm32f2xx_usart_properties);
dc->realize = stm32f2xx_usart_realize;
}
diff --git a/hw/char/stm32l4x5_usart.c b/hw/char/stm32l4x5_usart.c
index fc5dcac..afbe4ba 100644
--- a/hw/char/stm32l4x5_usart.c
+++ b/hw/char/stm32l4x5_usart.c
@@ -154,6 +154,21 @@ REG32(RDR, 0x24)
REG32(TDR, 0x28)
FIELD(TDR, TDR, 0, 9)
+static void stm32l4x5_update_isr(Stm32l4x5UsartBaseState *s)
+{
+ if (s->cr1 & R_CR1_TE_MASK) {
+ s->isr |= R_ISR_TEACK_MASK;
+ } else {
+ s->isr &= ~R_ISR_TEACK_MASK;
+ }
+
+ if (s->cr1 & R_CR1_RE_MASK) {
+ s->isr |= R_ISR_REACK_MASK;
+ } else {
+ s->isr &= ~R_ISR_REACK_MASK;
+ }
+}
+
static void stm32l4x5_update_irq(Stm32l4x5UsartBaseState *s)
{
if (((s->isr & R_ISR_WUF_MASK) && (s->cr3 & R_CR3_WUFIE_MASK)) ||
@@ -456,6 +471,7 @@ static void stm32l4x5_usart_base_write(void *opaque, hwaddr addr,
case A_CR1:
s->cr1 = value;
stm32l4x5_update_params(s);
+ stm32l4x5_update_isr(s);
stm32l4x5_update_irq(s);
return;
case A_CR2:
@@ -518,9 +534,8 @@ static const MemoryRegionOps stm32l4x5_usart_base_ops = {
},
};
-static Property stm32l4x5_usart_base_properties[] = {
+static const Property stm32l4x5_usart_base_properties[] = {
DEFINE_PROP_CHR("chardev", Stm32l4x5UsartBaseState, chr),
- DEFINE_PROP_END_OF_LIST(),
};
static void stm32l4x5_usart_base_init(Object *obj)
@@ -579,7 +594,8 @@ static void stm32l4x5_usart_base_realize(DeviceState *dev, Error **errp)
s, NULL, true);
}
-static void stm32l4x5_usart_base_class_init(ObjectClass *klass, void *data)
+static void stm32l4x5_usart_base_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -590,21 +606,21 @@ static void stm32l4x5_usart_base_class_init(ObjectClass *klass, void *data)
dc->vmsd = &vmstate_stm32l4x5_usart_base;
}
-static void stm32l4x5_usart_class_init(ObjectClass *oc, void *data)
+static void stm32l4x5_usart_class_init(ObjectClass *oc, const void *data)
{
Stm32l4x5UsartBaseClass *subc = STM32L4X5_USART_BASE_CLASS(oc);
subc->type = STM32L4x5_USART;
}
-static void stm32l4x5_uart_class_init(ObjectClass *oc, void *data)
+static void stm32l4x5_uart_class_init(ObjectClass *oc, const void *data)
{
Stm32l4x5UsartBaseClass *subc = STM32L4X5_USART_BASE_CLASS(oc);
subc->type = STM32L4x5_UART;
}
-static void stm32l4x5_lpuart_class_init(ObjectClass *oc, void *data)
+static void stm32l4x5_lpuart_class_init(ObjectClass *oc, const void *data)
{
Stm32l4x5UsartBaseClass *subc = STM32L4X5_USART_BASE_CLASS(oc);
diff --git a/hw/char/terminal3270.c b/hw/char/terminal3270.c
index 82e85fa..d950c17 100644
--- a/hw/char/terminal3270.c
+++ b/hw/char/terminal3270.c
@@ -283,9 +283,8 @@ static int write_payload_3270(EmulatedCcw3270Device *dev, uint8_t cmd)
return (retval <= 0) ? 0 : get_cds(t)->count;
}
-static Property terminal_properties[] = {
+static const Property terminal_properties[] = {
DEFINE_PROP_CHR("chardev", Terminal3270, chr),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription terminal3270_vmstate = {
@@ -293,7 +292,7 @@ static const VMStateDescription terminal3270_vmstate = {
.unmigratable = 1,
};
-static void terminal_class_init(ObjectClass *klass, void *data)
+static void terminal_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
EmulatedCcw3270Class *ck = EMULATED_CCW_3270_CLASS(klass);
diff --git a/hw/char/trace-events b/hw/char/trace-events
index 8875758..05a3303 100644
--- a/hw/char/trace-events
+++ b/hw/char/trace-events
@@ -52,15 +52,21 @@ escc_sunkbd_event_out(int ch) "Translated keycode 0x%2.2x"
escc_kbd_command(int val) "Command %d"
escc_sunmouse_event(int dx, int dy, int buttons_state) "dx=%d dy=%d buttons=0x%01x"
+# imx_serial.c
+imx_serial_read(const char *chrname, uint64_t addr, uint64_t value) "%s:[0x%03" PRIu64 "] -> 0x%08" PRIx64
+imx_serial_write(const char *chrname, uint64_t addr, uint64_t value) "%s:[0x%03" PRIu64 "] <- 0x%08" PRIx64
+imx_serial_put_data(const char *chrname, uint32_t value) "%s: 0x%" PRIx32
+
# pl011.c
pl011_irq_state(int level) "irq state %d"
pl011_read(uint32_t addr, uint32_t value, const char *regname) "addr 0x%03x value 0x%08x reg %s"
-pl011_read_fifo(int read_count) "FIFO read, read_count now %d"
+pl011_read_fifo(unsigned rx_fifo_used, size_t rx_fifo_depth) "RX FIFO read, used %u/%zu"
pl011_write(uint32_t addr, uint32_t value, const char *regname) "addr 0x%03x value 0x%08x reg %s"
-pl011_can_receive(uint32_t lcr, int read_count, int r) "LCR 0x%08x read_count %d returning %d"
-pl011_put_fifo(uint32_t c, int read_count) "new char 0x%x read_count now %d"
-pl011_put_fifo_full(void) "FIFO now full, RXFF set"
+pl011_can_receive(uint32_t lcr, unsigned rx_fifo_used, size_t rx_fifo_depth, unsigned rx_fifo_available) "LCR 0x%02x, RX FIFO used %u/%zu, can_receive %u chars"
+pl011_fifo_rx_put(uint32_t c, unsigned read_count, size_t rx_fifo_depth) "RX FIFO push char [0x%02x] %d/%zu depth used"
+pl011_fifo_rx_full(void) "RX FIFO now full, RXFF set"
pl011_baudrate_change(unsigned int baudrate, uint64_t clock, uint32_t ibrd, uint32_t fbrd) "new baudrate %u (clk: %" PRIu64 "hz, ibrd: %" PRIu32 ", fbrd: %" PRIu32 ")"
+pl011_receive(int size) "recv %d chars"
# cmsdk-apb-uart.c
cmsdk_apb_uart_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB UART read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
@@ -125,3 +131,13 @@ xen_console_unrealize(unsigned int idx) "idx %u"
xen_console_realize(unsigned int idx, const char *chrdev) "idx %u chrdev %s"
xen_console_device_create(unsigned int idx) "idx %u"
xen_console_device_destroy(unsigned int idx) "idx %u"
+
+# stm32f2xx_usart.c
+stm32f2xx_usart_read(char *id, unsigned size, uint64_t ofs, uint64_t val) " %s size %d ofs 0x%02" PRIx64 " -> 0x%02" PRIx64
+stm32f2xx_usart_write(char *id, unsigned size, uint64_t ofs, uint64_t val) "%s size %d ofs 0x%02" PRIx64 " <- 0x%02" PRIx64
+stm32f2xx_usart_drop(char *id) " %s dropping the chars"
+stm32f2xx_usart_receive(char *id, uint8_t chr) " %s receiving '%c'"
+
+# riscv_htif.c
+htif_uart_write_to_host(uint8_t device, uint8_t cmd, uint64_t payload) "device: %u cmd: %02u payload: %016" PRIx64
+htif_uart_unknown_device_command(uint8_t device, uint8_t cmd, uint64_t payload) "device: %u cmd: %02u payload: %016" PRIx64
diff --git a/hw/char/virtio-console.c b/hw/char/virtio-console.c
index dbe0b28..0932a35 100644
--- a/hw/char/virtio-console.c
+++ b/hw/char/virtio-console.c
@@ -261,7 +261,7 @@ static void virtconsole_unrealize(DeviceState *dev)
}
}
-static void virtconsole_class_init(ObjectClass *klass, void *data)
+static void virtconsole_class_init(ObjectClass *klass, const void *data)
{
VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_CLASS(klass);
@@ -274,12 +274,11 @@ static const TypeInfo virtconsole_info = {
.class_init = virtconsole_class_init,
};
-static Property virtserialport_properties[] = {
+static const Property virtserialport_properties[] = {
DEFINE_PROP_CHR("chardev", VirtConsole, chr),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtserialport_class_init(ObjectClass *klass, void *data)
+static void virtserialport_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_CLASS(klass);
diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c
index 2094d21..673c50f 100644
--- a/hw/char/virtio-serial-bus.c
+++ b/hw/char/virtio-serial-bus.c
@@ -622,7 +622,7 @@ static void guest_reset(VirtIOSerial *vser)
}
}
-static void set_status(VirtIODevice *vdev, uint8_t status)
+static int set_status(VirtIODevice *vdev, uint8_t status)
{
VirtIOSerial *vser;
VirtIOSerialPort *port;
@@ -650,6 +650,7 @@ static void set_status(VirtIODevice *vdev, uint8_t status)
vsc->enable_backend(port, vdev->vm_running);
}
}
+ return 0;
}
static void vser_reset(VirtIODevice *vdev)
@@ -835,13 +836,12 @@ static int virtio_serial_load_device(VirtIODevice *vdev, QEMUFile *f,
static void virtser_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent);
-static Property virtser_props[] = {
+static const Property virtser_props[] = {
DEFINE_PROP_UINT32("nr", VirtIOSerialPort, id, VIRTIO_CONSOLE_BAD_ID),
DEFINE_PROP_STRING("name", VirtIOSerialPort, name),
- DEFINE_PROP_END_OF_LIST()
};
-static void virtser_bus_class_init(ObjectClass *klass, void *data)
+static void virtser_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
k->print_dev = virtser_bus_dev_print;
@@ -1093,7 +1093,7 @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
QLIST_INSERT_HEAD(&vserdevices.devices, vser, next);
}
-static void virtio_serial_port_class_init(ObjectClass *klass, void *data)
+static void virtio_serial_port_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
@@ -1153,15 +1153,14 @@ static const VMStateDescription vmstate_virtio_console = {
},
};
-static Property virtio_serial_properties[] = {
+static const Property virtio_serial_properties[] = {
DEFINE_PROP_UINT32("max_ports", VirtIOSerial, serial.max_virtserial_ports,
31),
DEFINE_PROP_BIT64("emergency-write", VirtIOSerial, host_features,
VIRTIO_CONSOLE_F_EMERG_WRITE, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_serial_class_init(ObjectClass *klass, void *data)
+static void virtio_serial_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
@@ -1190,7 +1189,7 @@ static const TypeInfo virtio_device_info = {
.parent = TYPE_VIRTIO_DEVICE,
.instance_size = sizeof(VirtIOSerial),
.class_init = virtio_serial_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
diff --git a/hw/char/xen_console.c b/hw/char/xen_console.c
index 683c92a..9c34a55 100644
--- a/hw/char/xen_console.c
+++ b/hw/char/xen_console.c
@@ -25,7 +25,7 @@
#include <termios.h>
#include "qapi/error.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "chardev/char-fe.h"
#include "hw/xen/xen-backend.h"
#include "hw/xen/xen-bus-helper.h"
@@ -367,28 +367,28 @@ static char *xen_console_get_name(XenDevice *xendev, Error **errp)
if (con->dev == -1) {
XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev)));
- char fe_path[XENSTORE_ABS_PATH_MAX + 1];
int idx = (xen_mode == XEN_EMULATE) ? 0 : 1;
+ Error *local_err = NULL;
char *value;
/* Theoretically we could go up to INT_MAX here but that's overkill */
while (idx < 100) {
if (!idx) {
- snprintf(fe_path, sizeof(fe_path),
- "/local/domain/%u/console", xendev->frontend_id);
+ value = xs_node_read(xenbus->xsh, XBT_NULL, NULL, &local_err,
+ "/local/domain/%u/console",
+ xendev->frontend_id);
} else {
- snprintf(fe_path, sizeof(fe_path),
- "/local/domain/%u/device/console/%u",
- xendev->frontend_id, idx);
+ value = xs_node_read(xenbus->xsh, XBT_NULL, NULL, &local_err,
+ "/local/domain/%u/device/console/%u",
+ xendev->frontend_id, idx);
}
- value = qemu_xen_xs_read(xenbus->xsh, XBT_NULL, fe_path, NULL);
if (!value) {
if (errno == ENOENT) {
con->dev = idx;
+ error_free(local_err);
goto found;
}
- error_setg(errp, "cannot read %s: %s", fe_path,
- strerror(errno));
+ error_propagate(errp, local_err);
return NULL;
}
free(value);
@@ -487,13 +487,12 @@ static char *xen_console_get_frontend_path(XenDevice *xendev, Error **errp)
}
-static Property xen_console_properties[] = {
+static const Property xen_console_properties[] = {
DEFINE_PROP_CHR("chardev", XenConsole, chr),
DEFINE_PROP_INT32("idx", XenConsole, dev, -1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xen_console_class_init(ObjectClass *class, void *data)
+static void xen_console_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dev_class = DEVICE_CLASS(class);
XenDeviceClass *xendev_class = XEN_DEVICE_CLASS(class);
@@ -551,7 +550,8 @@ static void xen_console_device_create(XenBackendInstance *backend,
goto fail;
}
- if (xs_node_scanf(xsh, XBT_NULL, fe, "type", errp, "%ms", &type) != 1) {
+ type = xs_node_read(xsh, XBT_NULL, NULL, errp, "%s/%s", fe, "type");
+ if (!type) {
error_prepend(errp, "failed to read console device type: ");
goto fail;
}
@@ -569,7 +569,8 @@ static void xen_console_device_create(XenBackendInstance *backend,
snprintf(label, sizeof(label), "xencons%ld", number);
- if (xs_node_scanf(xsh, XBT_NULL, fe, "output", NULL, "%ms", &output) == 1) {
+ output = xs_node_read(xsh, XBT_NULL, NULL, errp, "%s/%s", fe, "output");
+ if (output) {
/*
* FIXME: sure we want to support implicit
* muxed monitors here?
@@ -580,19 +581,27 @@ static void xen_console_device_create(XenBackendInstance *backend,
output);
goto fail;
}
- } else if (number) {
- cd = serial_hd(number);
- if (!cd) {
- error_prepend(errp, "console: No serial device #%ld found: ",
- number);
- goto fail;
- }
+ } else if (errno != ENOENT) {
+ error_prepend(errp, "console: No valid chardev found: ");
+ goto fail;
} else {
- /* No 'output' node on primary console: use null. */
- cd = qemu_chr_new(label, "null", NULL);
- if (!cd) {
- error_setg(errp, "console: failed to create null device");
- goto fail;
+ error_free(*errp);
+ *errp = NULL;
+
+ if (number) {
+ cd = serial_hd(number);
+ if (!cd) {
+ error_setg(errp, "console: No serial device #%ld found",
+ number);
+ goto fail;
+ }
+ } else {
+ /* No 'output' node on primary console: use null. */
+ cd = qemu_chr_new(label, "null", NULL);
+ if (!cd) {
+ error_setg(errp, "console: failed to create null device");
+ goto fail;
+ }
}
}
diff --git a/hw/char/xilinx_uartlite.c b/hw/char/xilinx_uartlite.c
index 180bb97..8008171 100644
--- a/hw/char/xilinx_uartlite.c
+++ b/hw/char/xilinx_uartlite.c
@@ -24,6 +24,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
+#include "qapi/error.h"
#include "hw/char/xilinx_uartlite.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
@@ -57,6 +58,7 @@
struct XilinxUARTLite {
SysBusDevice parent_obj;
+ EndianMode model_endianness;
MemoryRegion mmio;
CharBackend chr;
qemu_irq irq;
@@ -166,19 +168,22 @@ uart_write(void *opaque, hwaddr addr,
uart_update_irq(s);
}
-static const MemoryRegionOps uart_ops = {
- .read = uart_read,
- .write = uart_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid = {
- .min_access_size = 1,
- .max_access_size = 4
- }
+static const MemoryRegionOps uart_ops[2] = {
+ [0 ... 1] = {
+ .read = uart_read,
+ .write = uart_write,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+ },
+ [0].endianness = DEVICE_LITTLE_ENDIAN,
+ [1].endianness = DEVICE_BIG_ENDIAN,
};
-static Property xilinx_uartlite_properties[] = {
+static const Property xilinx_uartlite_properties[] = {
+ DEFINE_PROP_ENDIAN_NODEFAULT("endianness", XilinxUARTLite, model_endianness),
DEFINE_PROP_CHR("chardev", XilinxUARTLite, chr),
- DEFINE_PROP_END_OF_LIST(),
};
static void uart_rx(void *opaque, const uint8_t *buf, int size)
@@ -215,6 +220,15 @@ static void xilinx_uartlite_realize(DeviceState *dev, Error **errp)
{
XilinxUARTLite *s = XILINX_UARTLITE(dev);
+ if (s->model_endianness == ENDIAN_MODE_UNSPECIFIED) {
+ error_setg(errp, TYPE_XILINX_UARTLITE " property 'endianness'"
+ " must be set to 'big' or 'little'");
+ return;
+ }
+
+ memory_region_init_io(&s->mmio, OBJECT(dev),
+ &uart_ops[s->model_endianness == ENDIAN_MODE_BIG],
+ s, "xlnx.xps-uartlite", R_MAX * 4);
qemu_chr_fe_set_handlers(&s->chr, uart_can_rx, uart_rx,
uart_event, NULL, s, NULL, true);
}
@@ -224,17 +238,14 @@ static void xilinx_uartlite_init(Object *obj)
XilinxUARTLite *s = XILINX_UARTLITE(obj);
sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq);
-
- memory_region_init_io(&s->mmio, obj, &uart_ops, s,
- "xlnx.xps-uartlite", R_MAX * 4);
sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
}
-static void xilinx_uartlite_class_init(ObjectClass *klass, void *data)
+static void xilinx_uartlite_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = xilinx_uartlite_reset;
+ device_class_set_legacy_reset(dc, xilinx_uartlite_reset);
dc->realize = xilinx_uartlite_realize;
device_class_set_props(dc, xilinx_uartlite_properties);
}
diff --git a/hw/core/Kconfig b/hw/core/Kconfig
index 24411f5..d1bdf76 100644
--- a/hw/core/Kconfig
+++ b/hw/core/Kconfig
@@ -34,3 +34,7 @@ config REGISTER
config SPLIT_IRQ
bool
+
+config EIF
+ bool
+ depends on LIBCBOR && GNUTLS
diff --git a/hw/core/bus.c b/hw/core/bus.c
index b9d8949..bddfc22 100644
--- a/hw/core/bus.c
+++ b/hw/core/bus.c
@@ -232,7 +232,7 @@ static char *default_bus_get_fw_dev_path(DeviceState *dev)
return g_strdup(object_get_typename(OBJECT(dev)));
}
-static void bus_class_init(ObjectClass *class, void *data)
+static void bus_class_init(ObjectClass *class, const void *data)
{
BusClass *bc = BUS_CLASS(class);
ResettableClass *rc = RESETTABLE_CLASS(class);
@@ -260,7 +260,7 @@ static const TypeInfo bus_info = {
.instance_init = qbus_initfn,
.instance_finalize = qbus_finalize,
.class_init = bus_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_RESETTABLE_INTERFACE },
{ }
},
diff --git a/hw/core/clock.c b/hw/core/clock.c
index e212865..9c90676 100644
--- a/hw/core/clock.c
+++ b/hw/core/clock.c
@@ -13,6 +13,8 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
+#include "qapi/visitor.h"
+#include "system/qtest.h"
#include "hw/clock.h"
#include "trace.h"
@@ -42,16 +44,12 @@ Clock *clock_new(Object *parent, const char *name)
void clock_set_callback(Clock *clk, ClockCallback *cb, void *opaque,
unsigned int events)
{
+ assert(OBJECT(clk)->parent);
clk->callback = cb;
clk->callback_opaque = opaque;
clk->callback_events = events;
}
-void clock_clear_callback(Clock *clk)
-{
- clock_set_callback(clk, NULL, NULL, 0);
-}
-
bool clock_set(Clock *clk, uint64_t period)
{
if (clk->period == period) {
@@ -158,6 +156,25 @@ bool clock_set_mul_div(Clock *clk, uint32_t multiplier, uint32_t divider)
return true;
}
+static void clock_period_prop_get(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ Clock *clk = CLOCK(obj);
+ uint64_t period = clock_get(clk);
+ visit_type_uint64(v, name, &period, errp);
+}
+
+static void clock_unparent(Object *obj)
+{
+ /*
+ * Callback are registered by the parent, which might die anytime after
+ * it's unparented the children. Avoid having a callback to a deleted
+ * object in case the clock is still referenced somewhere else (eg: by
+ * a clock output).
+ */
+ clock_set_callback(CLOCK(obj), NULL, NULL, 0);
+}
+
static void clock_initfn(Object *obj)
{
Clock *clk = CLOCK(obj);
@@ -166,6 +183,11 @@ static void clock_initfn(Object *obj)
clk->divider = 1;
QLIST_INIT(&clk->children);
+
+ if (qtest_enabled()) {
+ object_property_add(obj, "qtest-clock-period", "uint64",
+ clock_period_prop_get, NULL, NULL, NULL);
+ }
}
static void clock_finalizefn(Object *obj)
@@ -184,11 +206,17 @@ static void clock_finalizefn(Object *obj)
g_free(clk->canonical_path);
}
+static void clock_class_init(ObjectClass *klass, const void *data)
+{
+ klass->unparent = clock_unparent;
+}
+
static const TypeInfo clock_info = {
.name = TYPE_CLOCK,
.parent = TYPE_OBJECT,
.instance_size = sizeof(Clock),
.instance_init = clock_initfn,
+ .class_init = clock_class_init,
.instance_finalize = clock_finalizefn,
};
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
index d2e3e45..39e674a 100644
--- a/hw/core/cpu-common.c
+++ b/hw/core/cpu-common.c
@@ -21,12 +21,16 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/core/cpu.h"
-#include "sysemu/hw_accel.h"
+#include "system/hw_accel.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
+#include "qemu/lockcnt.h"
+#include "qemu/error-report.h"
+#include "qemu/qemu-print.h"
+#include "qemu/target-info.h"
#include "exec/log.h"
#include "exec/gdbstub.h"
-#include "sysemu/tcg.h"
+#include "system/tcg.h"
#include "hw/boards.h"
#include "hw/qdev-properties.h"
#include "trace.h"
@@ -39,9 +43,7 @@ CPUState *cpu_by_arch_id(int64_t id)
CPUState *cpu;
CPU_FOREACH(cpu) {
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (cc->get_arch_id(cpu) == id) {
+ if (cpu->cc->get_arch_id(cpu) == id) {
return cpu;
}
}
@@ -100,11 +102,9 @@ static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg)
void cpu_dump_state(CPUState *cpu, FILE *f, int flags)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (cc->dump_state) {
+ if (cpu->cc->dump_state) {
cpu_synchronize_state(cpu);
- cc->dump_state(cpu, f, flags);
+ cpu->cc->dump_state(cpu, f, flags);
}
}
@@ -118,11 +118,10 @@ void cpu_reset(CPUState *cpu)
static void cpu_common_reset_hold(Object *obj, ResetType type)
{
CPUState *cpu = CPU(obj);
- CPUClass *cc = CPU_GET_CLASS(cpu);
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
- log_cpu_state(cpu, cc->reset_dump_flags);
+ log_cpu_state(cpu, cpu->cc->reset_dump_flags);
}
cpu->interrupt_request = 0;
@@ -138,11 +137,6 @@ static void cpu_common_reset_hold(Object *obj, ResetType type)
cpu_exec_reset_hold(cpu);
}
-static bool cpu_common_has_work(CPUState *cs)
-{
- return false;
-}
-
ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model)
{
ObjectClass *oc;
@@ -161,6 +155,21 @@ ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model)
return NULL;
}
+char *cpu_model_from_type(const char *typename)
+{
+ g_autofree char *suffix = g_strdup_printf("-%s", target_cpu_type());
+
+ if (!object_class_by_name(typename)) {
+ return NULL;
+ }
+
+ if (g_str_has_suffix(typename, suffix)) {
+ return g_strndup(typename, strlen(typename) - strlen(suffix));
+ }
+
+ return g_strdup(typename);
+}
+
static void cpu_common_parse_features(const char *typename, char *features,
Error **errp)
{
@@ -192,6 +201,49 @@ static void cpu_common_parse_features(const char *typename, char *features,
}
}
+const char *parse_cpu_option(const char *cpu_option)
+{
+ ObjectClass *oc;
+ CPUClass *cc;
+ gchar **model_pieces;
+ const char *cpu_type;
+
+ model_pieces = g_strsplit(cpu_option, ",", 2);
+ if (!model_pieces[0]) {
+ error_report("-cpu option cannot be empty");
+ exit(1);
+ }
+
+ oc = cpu_class_by_name(target_cpu_type(), model_pieces[0]);
+ if (oc == NULL) {
+ error_report("unable to find CPU model '%s'", model_pieces[0]);
+ g_strfreev(model_pieces);
+ exit(EXIT_FAILURE);
+ }
+
+ cpu_type = object_class_get_name(oc);
+ cc = CPU_CLASS(oc);
+ cc->parse_features(cpu_type, model_pieces[1], &error_fatal);
+ g_strfreev(model_pieces);
+ return cpu_type;
+}
+
+bool cpu_exec_realizefn(CPUState *cpu, Error **errp)
+{
+ if (!accel_cpu_common_realize(cpu, errp)) {
+ return false;
+ }
+
+ gdb_init_cpu(cpu);
+
+ /* Wait until cpu initialization complete before exposing cpu. */
+ cpu_list_add(cpu);
+
+ cpu_vmstate_register(cpu);
+
+ return true;
+}
+
static void cpu_common_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cpu = CPU(dev);
@@ -233,18 +285,34 @@ static void cpu_common_unrealizefn(DeviceState *dev)
cpu_exec_unrealizefn(cpu);
}
+void cpu_exec_unrealizefn(CPUState *cpu)
+{
+ cpu_vmstate_unregister(cpu);
+
+ cpu_list_remove(cpu);
+ /*
+ * Now that the vCPU has been removed from the RCU list, we can call
+ * accel_cpu_common_unrealize, which may free fields using call_rcu.
+ */
+ accel_cpu_common_unrealize(cpu);
+}
+
static void cpu_common_initfn(Object *obj)
{
CPUState *cpu = CPU(obj);
- gdb_init_cpu(cpu);
+ cpu_exec_class_post_init(CPU_GET_CLASS(obj));
+
+ /* cache the cpu class for the hotpath */
+ cpu->cc = CPU_GET_CLASS(cpu);
+
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
cpu->cluster_index = UNASSIGNED_CLUSTER_INDEX;
+ cpu->as = NULL;
+ cpu->num_ases = 0;
/* user-mode doesn't have configurable SMP topology */
/* the default value is changed by qemu_init_vcpu() for system-mode */
- cpu->nr_cores = 1;
cpu->nr_threads = 1;
- cpu->cflags_next_tb = -1;
/* allocate storage for thread info, initialise condition variables */
cpu->thread = g_new0(QemuThread, 1);
@@ -282,7 +350,10 @@ static void cpu_common_finalize(Object *obj)
}
#endif
free_queued_cpu_work(cpu);
- g_array_free(cpu->gdb_regs, TRUE);
+ /* If cleanup didn't happen in context to gdb_unregister_coprocessor_all */
+ if (cpu->gdb_regs) {
+ g_array_free(cpu->gdb_regs, TRUE);
+ }
qemu_lockcnt_destroy(&cpu->in_ioctl_lock);
qemu_mutex_destroy(&cpu->work_mutex);
qemu_cond_destroy(cpu->halt_cond);
@@ -295,7 +366,7 @@ static int64_t cpu_common_get_arch_id(CPUState *cpu)
return cpu->cpu_index;
}
-static void cpu_common_class_init(ObjectClass *klass, void *data)
+static void cpu_common_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -303,7 +374,6 @@ static void cpu_common_class_init(ObjectClass *klass, void *data)
k->parse_features = cpu_common_parse_features;
k->get_arch_id = cpu_common_get_arch_id;
- k->has_work = cpu_common_has_work;
k->gdb_read_register = cpu_common_gdb_read_register;
k->gdb_write_register = cpu_common_gdb_write_register;
set_bit(DEVICE_CATEGORY_CPU, dc->categories);
@@ -335,3 +405,32 @@ static void cpu_register_types(void)
}
type_init(cpu_register_types)
+
+static void cpu_list_entry(gpointer data, gpointer user_data)
+{
+ CPUClass *cc = CPU_CLASS(OBJECT_CLASS(data));
+ const char *typename = object_class_get_name(OBJECT_CLASS(data));
+ g_autofree char *model = cpu_model_from_type(typename);
+
+ if (cc->deprecation_note) {
+ qemu_printf(" %s (deprecated)\n", model);
+ } else {
+ qemu_printf(" %s\n", model);
+ }
+}
+
+void list_cpus(void)
+{
+ CPUClass *cc = CPU_CLASS(object_class_by_name(target_cpu_type()));
+
+ if (cc->list_cpus) {
+ cc->list_cpus();
+ } else {
+ GSList *list;
+
+ list = object_class_get_list_sorted(TYPE_CPU, false);
+ qemu_printf("Available CPUs:\n");
+ g_slist_foreach(list, cpu_list_entry, NULL);
+ g_slist_free(list);
+ }
+}
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
deleted file mode 100644
index 2a9a2a4..0000000
--- a/hw/core/cpu-sysemu.c
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * QEMU CPU model (system emulation specific)
- *
- * Copyright (c) 2012-2014 SUSE LINUX Products GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see
- * <http://www.gnu.org/licenses/gpl-2.0.html>
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "exec/tswap.h"
-#include "hw/core/sysemu-cpu-ops.h"
-
-bool cpu_paging_enabled(const CPUState *cpu)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (cc->sysemu_ops->get_paging_enabled) {
- return cc->sysemu_ops->get_paging_enabled(cpu);
- }
-
- return false;
-}
-
-bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
- Error **errp)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (cc->sysemu_ops->get_memory_mapping) {
- return cc->sysemu_ops->get_memory_mapping(cpu, list, errp);
- }
-
- error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
- return false;
-}
-
-hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
- MemTxAttrs *attrs)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (cc->sysemu_ops->get_phys_page_attrs_debug) {
- return cc->sysemu_ops->get_phys_page_attrs_debug(cpu, addr, attrs);
- }
- /* Fallback for CPUs which don't implement the _attrs_ hook */
- *attrs = MEMTXATTRS_UNSPECIFIED;
- return cc->sysemu_ops->get_phys_page_debug(cpu, addr);
-}
-
-hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
-{
- MemTxAttrs attrs = {};
-
- return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
-}
-
-int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
-{
- int ret = 0;
-
- if (cpu->cc->sysemu_ops->asidx_from_attrs) {
- ret = cpu->cc->sysemu_ops->asidx_from_attrs(cpu, attrs);
- assert(ret < cpu->num_ases && ret >= 0);
- }
- return ret;
-}
-
-int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
- void *opaque)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (!cc->sysemu_ops->write_elf32_qemunote) {
- return 0;
- }
- return (*cc->sysemu_ops->write_elf32_qemunote)(f, cpu, opaque);
-}
-
-int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
- int cpuid, void *opaque)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (!cc->sysemu_ops->write_elf32_note) {
- return -1;
- }
- return (*cc->sysemu_ops->write_elf32_note)(f, cpu, cpuid, opaque);
-}
-
-int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
- void *opaque)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (!cc->sysemu_ops->write_elf64_qemunote) {
- return 0;
- }
- return (*cc->sysemu_ops->write_elf64_qemunote)(f, cpu, opaque);
-}
-
-int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
- int cpuid, void *opaque)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (!cc->sysemu_ops->write_elf64_note) {
- return -1;
- }
- return (*cc->sysemu_ops->write_elf64_note)(f, cpu, cpuid, opaque);
-}
-
-bool cpu_virtio_is_big_endian(CPUState *cpu)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (cc->sysemu_ops->virtio_is_big_endian) {
- return cc->sysemu_ops->virtio_is_big_endian(cpu);
- }
- return target_words_bigendian();
-}
-
-GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
- GuestPanicInformation *res = NULL;
-
- if (cc->sysemu_ops->get_crash_info) {
- res = cc->sysemu_ops->get_crash_info(cpu);
- }
- return res;
-}
diff --git a/hw/core/cpu-system.c b/hw/core/cpu-system.c
new file mode 100644
index 0000000..3c84176
--- /dev/null
+++ b/hw/core/cpu-system.c
@@ -0,0 +1,305 @@
+/*
+ * QEMU CPU model (system specific)
+ *
+ * Copyright (c) 2012-2014 SUSE LINUX Products GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "system/address-spaces.h"
+#include "exec/cputlb.h"
+#include "system/memory.h"
+#include "exec/tb-flush.h"
+#include "exec/tswap.h"
+#include "hw/qdev-core.h"
+#include "hw/qdev-properties.h"
+#include "hw/core/sysemu-cpu-ops.h"
+#include "migration/vmstate.h"
+#include "system/tcg.h"
+
+bool cpu_has_work(CPUState *cpu)
+{
+ return cpu->cc->sysemu_ops->has_work(cpu);
+}
+
+bool cpu_paging_enabled(const CPUState *cpu)
+{
+ if (cpu->cc->sysemu_ops->get_paging_enabled) {
+ return cpu->cc->sysemu_ops->get_paging_enabled(cpu);
+ }
+
+ return false;
+}
+
+bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
+ Error **errp)
+{
+ if (cpu->cc->sysemu_ops->get_memory_mapping) {
+ return cpu->cc->sysemu_ops->get_memory_mapping(cpu, list, errp);
+ }
+
+ error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
+ return false;
+}
+
+hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
+ MemTxAttrs *attrs)
+{
+ hwaddr paddr;
+
+ if (cpu->cc->sysemu_ops->get_phys_page_attrs_debug) {
+ paddr = cpu->cc->sysemu_ops->get_phys_page_attrs_debug(cpu, addr,
+ attrs);
+ } else {
+ /* Fallback for CPUs which don't implement the _attrs_ hook */
+ *attrs = MEMTXATTRS_UNSPECIFIED;
+ paddr = cpu->cc->sysemu_ops->get_phys_page_debug(cpu, addr);
+ }
+ /* Indicate that this is a debug access. */
+ attrs->debug = 1;
+ return paddr;
+}
+
+hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
+{
+ MemTxAttrs attrs = {};
+
+ return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
+}
+
+int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
+{
+ int ret = 0;
+
+ if (cpu->cc->sysemu_ops->asidx_from_attrs) {
+ ret = cpu->cc->sysemu_ops->asidx_from_attrs(cpu, attrs);
+ assert(ret < cpu->num_ases && ret >= 0);
+ }
+ return ret;
+}
+
+int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
+ void *opaque)
+{
+ if (!cpu->cc->sysemu_ops->write_elf32_qemunote) {
+ return 0;
+ }
+ return (*cpu->cc->sysemu_ops->write_elf32_qemunote)(f, cpu, opaque);
+}
+
+int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
+ int cpuid, void *opaque)
+{
+ if (!cpu->cc->sysemu_ops->write_elf32_note) {
+ return -1;
+ }
+ return (*cpu->cc->sysemu_ops->write_elf32_note)(f, cpu, cpuid, opaque);
+}
+
+int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
+ void *opaque)
+{
+ if (!cpu->cc->sysemu_ops->write_elf64_qemunote) {
+ return 0;
+ }
+ return (*cpu->cc->sysemu_ops->write_elf64_qemunote)(f, cpu, opaque);
+}
+
+int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
+ int cpuid, void *opaque)
+{
+ if (!cpu->cc->sysemu_ops->write_elf64_note) {
+ return -1;
+ }
+ return (*cpu->cc->sysemu_ops->write_elf64_note)(f, cpu, cpuid, opaque);
+}
+
+bool cpu_virtio_is_big_endian(CPUState *cpu)
+{
+ if (cpu->cc->sysemu_ops->virtio_is_big_endian) {
+ return cpu->cc->sysemu_ops->virtio_is_big_endian(cpu);
+ }
+ return target_big_endian();
+}
+
+GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
+{
+ GuestPanicInformation *res = NULL;
+
+ if (cpu->cc->sysemu_ops->get_crash_info) {
+ res = cpu->cc->sysemu_ops->get_crash_info(cpu);
+ }
+ return res;
+}
+
+static const Property cpu_system_props[] = {
+ /*
+ * Create a memory property for system CPU object, so users can
+ * wire up its memory. The default if no link is set up is to use
+ * the system address space.
+ */
+ DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
+ MemoryRegion *),
+};
+
+static bool cpu_get_start_powered_off(Object *obj, Error **errp)
+{
+ CPUState *cpu = CPU(obj);
+ return cpu->start_powered_off;
+}
+
+static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp)
+{
+ CPUState *cpu = CPU(obj);
+ cpu->start_powered_off = value;
+}
+
+void cpu_class_init_props(DeviceClass *dc)
+{
+ ObjectClass *oc = OBJECT_CLASS(dc);
+
+ /*
+ * We can't use DEFINE_PROP_BOOL in the Property array for this
+ * property, because we want this to be settable after realize.
+ */
+ object_class_property_add_bool(oc, "start-powered-off",
+ cpu_get_start_powered_off,
+ cpu_set_start_powered_off);
+
+ device_class_set_props(dc, cpu_system_props);
+}
+
+void cpu_exec_class_post_init(CPUClass *cc)
+{
+ /* Check mandatory SysemuCPUOps handlers */
+ g_assert(cc->sysemu_ops->has_work);
+}
+
+void cpu_exec_initfn(CPUState *cpu)
+{
+ cpu->memory = get_system_memory();
+ object_ref(OBJECT(cpu->memory));
+}
+
+static int cpu_common_post_load(void *opaque, int version_id)
+{
+ if (tcg_enabled()) {
+ CPUState *cpu = opaque;
+
+ /*
+ * 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
+ * version_id is increased.
+ */
+ cpu->interrupt_request &= ~0x01;
+
+ tlb_flush(cpu);
+
+ /*
+ * loadvm has just updated the content of RAM, bypassing the
+ * usual mechanisms that ensure we flush TBs for writes to
+ * memory we've translated code from. So we must flush all TBs,
+ * which will now be stale.
+ */
+ tb_flush(cpu);
+ }
+
+ return 0;
+}
+
+static int cpu_common_pre_load(void *opaque)
+{
+ CPUState *cpu = opaque;
+
+ cpu->exception_index = -1;
+
+ return 0;
+}
+
+static bool cpu_common_exception_index_needed(void *opaque)
+{
+ CPUState *cpu = opaque;
+
+ return tcg_enabled() && cpu->exception_index != -1;
+}
+
+static const VMStateDescription vmstate_cpu_common_exception_index = {
+ .name = "cpu_common/exception_index",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = cpu_common_exception_index_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_INT32(exception_index, CPUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool cpu_common_crash_occurred_needed(void *opaque)
+{
+ CPUState *cpu = opaque;
+
+ return cpu->crash_occurred;
+}
+
+static const VMStateDescription vmstate_cpu_common_crash_occurred = {
+ .name = "cpu_common/crash_occurred",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = cpu_common_crash_occurred_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_BOOL(crash_occurred, CPUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_cpu_common = {
+ .name = "cpu_common",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_load = cpu_common_pre_load,
+ .post_load = cpu_common_post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32(halted, CPUState),
+ VMSTATE_UINT32(interrupt_request, CPUState),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * const []) {
+ &vmstate_cpu_common_exception_index,
+ &vmstate_cpu_common_crash_occurred,
+ NULL
+ }
+};
+
+void cpu_vmstate_register(CPUState *cpu)
+{
+ if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
+ vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
+ }
+ if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
+ vmstate_register(NULL, cpu->cpu_index,
+ cpu->cc->sysemu_ops->legacy_vmsd, cpu);
+ }
+}
+
+void cpu_vmstate_unregister(CPUState *cpu)
+{
+ if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
+ vmstate_unregister(NULL, cpu->cc->sysemu_ops->legacy_vmsd, cpu);
+ }
+ if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
+ vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
+ }
+}
diff --git a/hw/core/cpu-user.c b/hw/core/cpu-user.c
new file mode 100644
index 0000000..7176791
--- /dev/null
+++ b/hw/core/cpu-user.c
@@ -0,0 +1,49 @@
+/*
+ * QEMU CPU model (user specific)
+ *
+ * Copyright (c) Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hw/qdev-core.h"
+#include "hw/qdev-properties.h"
+#include "hw/core/cpu.h"
+#include "migration/vmstate.h"
+
+static const Property cpu_user_props[] = {
+ /*
+ * Create a property for the user-only object, so users can
+ * adjust prctl(PR_SET_UNALIGN) from the command-line.
+ * Has no effect if the target does not support the feature.
+ */
+ DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState,
+ prctl_unalign_sigbus, false),
+};
+
+void cpu_class_init_props(DeviceClass *dc)
+{
+ device_class_set_props(dc, cpu_user_props);
+}
+
+void cpu_exec_class_post_init(CPUClass *cc)
+{
+ /* nothing to do */
+}
+
+void cpu_exec_initfn(CPUState *cpu)
+{
+ /* nothing to do */
+}
+
+void cpu_vmstate_register(CPUState *cpu)
+{
+ assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
+ qdev_get_vmsd(DEVICE(cpu))->unmigratable);
+}
+
+void cpu_vmstate_unregister(CPUState *cpu)
+{
+ /* nothing to do */
+}
diff --git a/hw/core/eif.c b/hw/core/eif.c
new file mode 100644
index 0000000..513caec
--- /dev/null
+++ b/hw/core/eif.c
@@ -0,0 +1,709 @@
+/*
+ * EIF (Enclave Image Format) related helpers
+ *
+ * Copyright (c) 2024 Dorjoy Chowdhury <dorjoychy111@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/bswap.h"
+#include "qapi/error.h"
+#include "crypto/hash.h"
+#include "crypto/x509-utils.h"
+#include <zlib.h> /* for crc32 */
+#include <cbor.h>
+
+#include "hw/core/eif.h"
+
+#define MAX_SECTIONS 32
+
+/* members are ordered according to field order in .eif file */
+typedef struct EifHeader {
+ uint8_t magic[4]; /* must be .eif in ascii i.e., [46, 101, 105, 102] */
+ uint16_t version;
+ uint16_t flags;
+ uint64_t default_memory;
+ uint64_t default_cpus;
+ uint16_t reserved;
+ uint16_t section_cnt;
+ uint64_t section_offsets[MAX_SECTIONS];
+ uint64_t section_sizes[MAX_SECTIONS];
+ uint32_t unused;
+ uint32_t eif_crc32;
+} QEMU_PACKED EifHeader;
+
+/* members are ordered according to field order in .eif file */
+typedef struct EifSectionHeader {
+ /*
+ * 0 = invalid, 1 = kernel, 2 = cmdline, 3 = ramdisk, 4 = signature,
+ * 5 = metadata
+ */
+ uint16_t section_type;
+ uint16_t flags;
+ uint64_t section_size;
+} QEMU_PACKED EifSectionHeader;
+
+enum EifSectionTypes {
+ EIF_SECTION_INVALID = 0,
+ EIF_SECTION_KERNEL = 1,
+ EIF_SECTION_CMDLINE = 2,
+ EIF_SECTION_RAMDISK = 3,
+ EIF_SECTION_SIGNATURE = 4,
+ EIF_SECTION_METADATA = 5,
+ EIF_SECTION_MAX = 6,
+};
+
+static const char *section_type_to_string(uint16_t type)
+{
+ const char *str;
+ switch (type) {
+ case EIF_SECTION_INVALID:
+ str = "invalid";
+ break;
+ case EIF_SECTION_KERNEL:
+ str = "kernel";
+ break;
+ case EIF_SECTION_CMDLINE:
+ str = "cmdline";
+ break;
+ case EIF_SECTION_RAMDISK:
+ str = "ramdisk";
+ break;
+ case EIF_SECTION_SIGNATURE:
+ str = "signature";
+ break;
+ case EIF_SECTION_METADATA:
+ str = "metadata";
+ break;
+ default:
+ str = "unknown";
+ break;
+ }
+
+ return str;
+}
+
+static bool read_eif_header(FILE *f, EifHeader *header, uint32_t *crc,
+ Error **errp)
+{
+ size_t got;
+ size_t header_size = sizeof(*header);
+
+ got = fread(header, 1, header_size, f);
+ if (got != header_size) {
+ error_setg(errp, "Failed to read EIF header");
+ return false;
+ }
+
+ if (memcmp(header->magic, ".eif", 4) != 0) {
+ error_setg(errp, "Invalid EIF image. Magic mismatch.");
+ return false;
+ }
+
+ /* Exclude header->eif_crc32 field from CRC calculation */
+ *crc = crc32(*crc, (uint8_t *)header, header_size - 4);
+
+ header->version = be16_to_cpu(header->version);
+ header->flags = be16_to_cpu(header->flags);
+ header->default_memory = be64_to_cpu(header->default_memory);
+ header->default_cpus = be64_to_cpu(header->default_cpus);
+ header->reserved = be16_to_cpu(header->reserved);
+ header->section_cnt = be16_to_cpu(header->section_cnt);
+
+ for (int i = 0; i < MAX_SECTIONS; ++i) {
+ header->section_offsets[i] = be64_to_cpu(header->section_offsets[i]);
+ }
+
+ for (int i = 0; i < MAX_SECTIONS; ++i) {
+ header->section_sizes[i] = be64_to_cpu(header->section_sizes[i]);
+ if (header->section_sizes[i] > SSIZE_MAX) {
+ error_setg(errp, "Invalid EIF image. Section size out of bounds");
+ return false;
+ }
+ }
+
+ header->unused = be32_to_cpu(header->unused);
+ header->eif_crc32 = be32_to_cpu(header->eif_crc32);
+ return true;
+}
+
+static bool read_eif_section_header(FILE *f, EifSectionHeader *section_header,
+ uint32_t *crc, Error **errp)
+{
+ size_t got;
+ size_t section_header_size = sizeof(*section_header);
+
+ got = fread(section_header, 1, section_header_size, f);
+ if (got != section_header_size) {
+ error_setg(errp, "Failed to read EIF section header");
+ return false;
+ }
+
+ *crc = crc32(*crc, (uint8_t *)section_header, section_header_size);
+
+ section_header->section_type = be16_to_cpu(section_header->section_type);
+ section_header->flags = be16_to_cpu(section_header->flags);
+ section_header->section_size = be64_to_cpu(section_header->section_size);
+ return true;
+}
+
+/*
+ * Upon success, the caller is responsible for unlinking and freeing *tmp_path.
+ */
+static bool get_tmp_file(const char *template, char **tmp_path, Error **errp)
+{
+ int tmp_fd;
+
+ *tmp_path = NULL;
+ tmp_fd = g_file_open_tmp(template, tmp_path, NULL);
+ if (tmp_fd < 0 || *tmp_path == NULL) {
+ error_setg(errp, "Failed to create temporary file for template %s",
+ template);
+ return false;
+ }
+
+ close(tmp_fd);
+ return true;
+}
+
+static void safe_fclose(FILE *f)
+{
+ if (f) {
+ fclose(f);
+ }
+}
+
+static void safe_unlink(char *f)
+{
+ if (f) {
+ unlink(f);
+ }
+}
+
+/*
+ * Upon success, the caller is reponsible for unlinking and freeing *kernel_path
+ */
+static bool read_eif_kernel(FILE *f, uint64_t size, char **kernel_path,
+ QCryptoHash *hash0, QCryptoHash *hash1,
+ uint32_t *crc, Error **errp)
+{
+ size_t got;
+ FILE *tmp_file = NULL;
+ uint8_t *kernel = g_try_malloc(size);
+ if (!kernel) {
+ error_setg(errp, "Out of memory reading kernel section");
+ goto cleanup;
+ }
+
+ *kernel_path = NULL;
+ if (!get_tmp_file("eif-kernel-XXXXXX", kernel_path, errp)) {
+ goto cleanup;
+ }
+
+ tmp_file = fopen(*kernel_path, "wb");
+ if (tmp_file == NULL) {
+ error_setg_errno(errp, errno, "Failed to open temporary file %s",
+ *kernel_path);
+ goto cleanup;
+ }
+
+ got = fread(kernel, 1, size, f);
+ if ((uint64_t) got != size) {
+ error_setg(errp, "Failed to read EIF kernel section data");
+ goto cleanup;
+ }
+
+ got = fwrite(kernel, 1, size, tmp_file);
+ if ((uint64_t) got != size) {
+ error_setg(errp, "Failed to write EIF kernel section data to temporary"
+ " file");
+ goto cleanup;
+ }
+
+ *crc = crc32(*crc, kernel, size);
+ if (qcrypto_hash_update(hash0, (char *)kernel, size, errp) != 0 ||
+ qcrypto_hash_update(hash1, (char *)kernel, size, errp) != 0) {
+ goto cleanup;
+ }
+ g_free(kernel);
+ fclose(tmp_file);
+
+ return true;
+
+ cleanup:
+ safe_fclose(tmp_file);
+
+ safe_unlink(*kernel_path);
+ g_free(*kernel_path);
+ *kernel_path = NULL;
+
+ g_free(kernel);
+ return false;
+}
+
+static bool read_eif_cmdline(FILE *f, uint64_t size, char *cmdline,
+ QCryptoHash *hash0, QCryptoHash *hash1,
+ uint32_t *crc, Error **errp)
+{
+ size_t got = fread(cmdline, 1, size, f);
+ if ((uint64_t) got != size) {
+ error_setg(errp, "Failed to read EIF cmdline section data");
+ return false;
+ }
+
+ *crc = crc32(*crc, (uint8_t *)cmdline, size);
+ if (qcrypto_hash_update(hash0, cmdline, size, errp) != 0 ||
+ qcrypto_hash_update(hash1, cmdline, size, errp) != 0) {
+ return false;
+ }
+ return true;
+}
+
+static bool read_eif_ramdisk(FILE *eif, FILE *initrd, uint64_t size,
+ QCryptoHash *hash0, QCryptoHash *h, uint32_t *crc,
+ Error **errp)
+{
+ size_t got;
+ bool ret = false;
+ uint8_t *ramdisk = g_try_malloc(size);
+ if (!ramdisk) {
+ error_setg(errp, "Out of memory reading initrd section");
+ goto cleanup;
+ }
+
+ got = fread(ramdisk, 1, size, eif);
+ if ((uint64_t) got != size) {
+ error_setg(errp, "Failed to read EIF ramdisk section data");
+ goto cleanup;
+ }
+
+ got = fwrite(ramdisk, 1, size, initrd);
+ if ((uint64_t) got != size) {
+ error_setg(errp, "Failed to write EIF ramdisk data to temporary file");
+ goto cleanup;
+ }
+
+ *crc = crc32(*crc, ramdisk, size);
+ if (qcrypto_hash_update(hash0, (char *)ramdisk, size, errp) != 0 ||
+ qcrypto_hash_update(h, (char *)ramdisk, size, errp) != 0) {
+ goto cleanup;
+ }
+ ret = true;
+
+ cleanup:
+ g_free(ramdisk);
+ return ret;
+}
+
+static bool get_signature_fingerprint_sha384(FILE *eif, uint64_t size,
+ uint8_t *sha384,
+ uint32_t *crc,
+ Error **errp)
+{
+ size_t got;
+ g_autofree uint8_t *sig = NULL;
+ g_autofree uint8_t *cert = NULL;
+ cbor_item_t *item = NULL;
+ cbor_item_t *pcr0 = NULL;
+ size_t len;
+ size_t hash_len = QCRYPTO_HASH_DIGEST_LEN_SHA384;
+ struct cbor_pair *pair;
+ struct cbor_load_result result;
+ bool ret = false;
+
+ sig = g_try_malloc(size);
+ if (!sig) {
+ error_setg(errp, "Out of memory reading signature section");
+ goto cleanup;
+ }
+
+ got = fread(sig, 1, size, eif);
+ if ((uint64_t) got != size) {
+ error_setg(errp, "Failed to read EIF signature section data");
+ goto cleanup;
+ }
+
+ *crc = crc32(*crc, sig, size);
+
+ item = cbor_load(sig, size, &result);
+ if (!item || result.error.code != CBOR_ERR_NONE) {
+ error_setg(errp, "Failed to load signature section data as CBOR");
+ goto cleanup;
+ }
+ if (!cbor_isa_array(item) || cbor_array_size(item) < 1) {
+ error_setg(errp, "Invalid signature CBOR");
+ goto cleanup;
+ }
+ pcr0 = cbor_array_get(item, 0);
+ if (!pcr0) {
+ error_setg(errp, "Failed to get PCR0 signature");
+ goto cleanup;
+ }
+ if (!cbor_isa_map(pcr0) || cbor_map_size(pcr0) != 2) {
+ error_setg(errp, "Invalid signature CBOR");
+ goto cleanup;
+ }
+ pair = cbor_map_handle(pcr0);
+ if (!cbor_isa_string(pair->key) || cbor_string_length(pair->key) != 19 ||
+ memcmp(cbor_string_handle(pair->key), "signing_certificate", 19) != 0) {
+ error_setg(errp, "Invalid signautre CBOR");
+ goto cleanup;
+ }
+ if (!cbor_isa_array(pair->value)) {
+ error_setg(errp, "Invalid signature CBOR");
+ goto cleanup;
+ }
+ len = cbor_array_size(pair->value);
+ if (len == 0) {
+ error_setg(errp, "Invalid signature CBOR");
+ goto cleanup;
+ }
+ cert = g_try_malloc(len);
+ if (!cert) {
+ error_setg(errp, "Out of memory reading signature section");
+ goto cleanup;
+ }
+
+ for (int i = 0; i < len; ++i) {
+ cbor_item_t *tmp = cbor_array_get(pair->value, i);
+ if (!tmp) {
+ error_setg(errp, "Invalid signature CBOR");
+ goto cleanup;
+ }
+ if (!cbor_isa_uint(tmp) || cbor_int_get_width(tmp) != CBOR_INT_8) {
+ cbor_decref(&tmp);
+ error_setg(errp, "Invalid signature CBOR");
+ goto cleanup;
+ }
+ cert[i] = cbor_get_uint8(tmp);
+ cbor_decref(&tmp);
+ }
+
+ if (qcrypto_get_x509_cert_fingerprint(cert, len, QCRYPTO_HASH_ALGO_SHA384,
+ sha384, &hash_len, errp)) {
+ goto cleanup;
+ }
+
+ ret = true;
+
+ cleanup:
+ if (pcr0) {
+ cbor_decref(&pcr0);
+ }
+ if (item) {
+ cbor_decref(&item);
+ }
+ return ret;
+}
+
+/* Expects file to have offset 0 before this function is called */
+static long get_file_size(FILE *f, Error **errp)
+{
+ long size;
+
+ if (fseek(f, 0, SEEK_END) != 0) {
+ error_setg_errno(errp, errno, "Failed to seek to the end of file");
+ return -1;
+ }
+
+ size = ftell(f);
+ if (size == -1) {
+ error_setg_errno(errp, errno, "Failed to get offset");
+ return -1;
+ }
+
+ if (fseek(f, 0, SEEK_SET) != 0) {
+ error_setg_errno(errp, errno, "Failed to seek back to the start");
+ return -1;
+ }
+
+ return size;
+}
+
+static bool get_SHA384_hash(QCryptoHash *h, uint8_t *hash, Error **errp)
+{
+ size_t hash_len = QCRYPTO_HASH_DIGEST_LEN_SHA384;
+ return qcrypto_hash_finalize_bytes(h, &hash, &hash_len, errp) == 0;
+}
+
+/*
+ * Upon success, the caller is reponsible for unlinking and freeing
+ * *kernel_path, *initrd_path and freeing *cmdline.
+ */
+bool read_eif_file(const char *eif_path, const char *machine_initrd,
+ char **kernel_path, char **initrd_path, char **cmdline,
+ uint8_t *image_hash, uint8_t *bootstrap_hash,
+ uint8_t *app_hash, uint8_t *fingerprint_hash,
+ bool *signature_found, Error **errp)
+{
+ FILE *f = NULL;
+ FILE *machine_initrd_f = NULL;
+ FILE *initrd_path_f = NULL;
+ long machine_initrd_size;
+ uint32_t crc = 0;
+ EifHeader eif_header;
+ bool seen_sections[EIF_SECTION_MAX] = {false};
+ /* kernel + ramdisks + cmdline SHA384 hash */
+ g_autoptr(QCryptoHash) hash0 = NULL;
+ /* kernel + boot ramdisk + cmdline SHA384 hash */
+ g_autoptr(QCryptoHash) hash1 = NULL;
+ /* application ramdisk(s) SHA384 hash */
+ g_autoptr(QCryptoHash) hash2 = NULL;
+
+ *signature_found = false;
+ *kernel_path = *initrd_path = *cmdline = NULL;
+
+ hash0 = qcrypto_hash_new(QCRYPTO_HASH_ALGO_SHA384, errp);
+ if (!hash0) {
+ goto cleanup;
+ }
+ hash1 = qcrypto_hash_new(QCRYPTO_HASH_ALGO_SHA384, errp);
+ if (!hash1) {
+ goto cleanup;
+ }
+ hash2 = qcrypto_hash_new(QCRYPTO_HASH_ALGO_SHA384, errp);
+ if (!hash2) {
+ goto cleanup;
+ }
+
+ f = fopen(eif_path, "rb");
+ if (f == NULL) {
+ error_setg_errno(errp, errno, "Failed to open %s", eif_path);
+ goto cleanup;
+ }
+
+ if (!read_eif_header(f, &eif_header, &crc, errp)) {
+ goto cleanup;
+ }
+
+ if (eif_header.version < 4) {
+ error_setg(errp, "Expected EIF version 4 or greater");
+ goto cleanup;
+ }
+
+ if (eif_header.flags != 0) {
+ error_setg(errp, "Expected EIF flags to be 0");
+ goto cleanup;
+ }
+
+ if (eif_header.section_cnt > MAX_SECTIONS) {
+ error_setg(errp, "EIF header section count must not be greater than "
+ "%d but found %d", MAX_SECTIONS, eif_header.section_cnt);
+ goto cleanup;
+ }
+
+ for (int i = 0; i < eif_header.section_cnt; ++i) {
+ EifSectionHeader hdr;
+ uint16_t section_type;
+
+ if (eif_header.section_offsets[i] > OFF_MAX) {
+ error_setg(errp, "Invalid EIF image. Section offset out of bounds");
+ goto cleanup;
+ }
+ if (fseek(f, eif_header.section_offsets[i], SEEK_SET) != 0) {
+ error_setg_errno(errp, errno, "Failed to offset to %" PRIu64 " in EIF file",
+ eif_header.section_offsets[i]);
+ goto cleanup;
+ }
+
+ if (!read_eif_section_header(f, &hdr, &crc, errp)) {
+ goto cleanup;
+ }
+
+ if (hdr.flags != 0) {
+ error_setg(errp, "Expected EIF section header flags to be 0");
+ goto cleanup;
+ }
+
+ if (eif_header.section_sizes[i] != hdr.section_size) {
+ error_setg(errp, "EIF section size mismatch between header and "
+ "section header: header %" PRIu64 ", section header %" PRIu64,
+ eif_header.section_sizes[i],
+ hdr.section_size);
+ goto cleanup;
+ }
+
+ section_type = hdr.section_type;
+
+ switch (section_type) {
+ case EIF_SECTION_KERNEL:
+ if (seen_sections[EIF_SECTION_KERNEL]) {
+ error_setg(errp, "Invalid EIF image. More than 1 kernel "
+ "section");
+ goto cleanup;
+ }
+
+ if (!read_eif_kernel(f, hdr.section_size, kernel_path, hash0,
+ hash1, &crc, errp)) {
+ goto cleanup;
+ }
+
+ break;
+ case EIF_SECTION_CMDLINE:
+ {
+ uint64_t size;
+ if (seen_sections[EIF_SECTION_CMDLINE]) {
+ error_setg(errp, "Invalid EIF image. More than 1 cmdline "
+ "section");
+ goto cleanup;
+ }
+ size = hdr.section_size;
+ *cmdline = g_try_malloc(size + 1);
+ if (!*cmdline) {
+ error_setg(errp, "Out of memory reading command line section");
+ goto cleanup;
+ }
+ if (!read_eif_cmdline(f, size, *cmdline, hash0, hash1, &crc,
+ errp)) {
+ goto cleanup;
+ }
+ (*cmdline)[size] = '\0';
+
+ break;
+ }
+ case EIF_SECTION_RAMDISK:
+ {
+ QCryptoHash *h = hash2;
+ if (!seen_sections[EIF_SECTION_RAMDISK]) {
+ /*
+ * If this is the first time we are seeing a ramdisk section,
+ * we need to:
+ * 1) hash it into bootstrap (hash1) instead of app (hash2)
+ * along with image (hash0)
+ * 2) create the initrd temporary file.
+ */
+ h = hash1;
+ if (!get_tmp_file("eif-initrd-XXXXXX", initrd_path, errp)) {
+ goto cleanup;
+ }
+ initrd_path_f = fopen(*initrd_path, "wb");
+ if (initrd_path_f == NULL) {
+ error_setg_errno(errp, errno, "Failed to open file %s",
+ *initrd_path);
+ goto cleanup;
+ }
+ }
+
+ if (!read_eif_ramdisk(f, initrd_path_f, hdr.section_size, hash0, h,
+ &crc, errp)) {
+ goto cleanup;
+ }
+
+ break;
+ }
+ case EIF_SECTION_SIGNATURE:
+ *signature_found = true;
+ if (!get_signature_fingerprint_sha384(f, hdr.section_size,
+ fingerprint_hash, &crc,
+ errp)) {
+ goto cleanup;
+ }
+ break;
+ default:
+ /* other sections including invalid or unknown sections */
+ {
+ uint8_t *buf;
+ size_t got;
+ uint64_t size = hdr.section_size;
+ buf = g_try_malloc(size);
+ if (!buf) {
+ error_setg(errp, "Out of memory reading unknown section");
+ goto cleanup;
+ }
+ got = fread(buf, 1, size, f);
+ if ((uint64_t) got != size) {
+ g_free(buf);
+ error_setg(errp, "Failed to read EIF %s section data",
+ section_type_to_string(section_type));
+ goto cleanup;
+ }
+ crc = crc32(crc, buf, size);
+ g_free(buf);
+ break;
+ }
+ }
+
+ if (section_type < EIF_SECTION_MAX) {
+ seen_sections[section_type] = true;
+ }
+ }
+
+ if (!seen_sections[EIF_SECTION_KERNEL]) {
+ error_setg(errp, "Invalid EIF image. No kernel section.");
+ goto cleanup;
+ }
+ if (!seen_sections[EIF_SECTION_CMDLINE]) {
+ error_setg(errp, "Invalid EIF image. No cmdline section.");
+ goto cleanup;
+ }
+ if (!seen_sections[EIF_SECTION_RAMDISK]) {
+ error_setg(errp, "Invalid EIF image. No ramdisk section.");
+ goto cleanup;
+ }
+
+ if (eif_header.eif_crc32 != crc) {
+ error_setg(errp, "CRC mismatch. Expected %u but header has %u.",
+ crc, eif_header.eif_crc32);
+ goto cleanup;
+ }
+
+ /*
+ * Let's append the initrd file from "-initrd" option if any. Although
+ * we pass the crc pointer to read_eif_ramdisk, it is not useful anymore.
+ * We have already done the crc mismatch check above this code.
+ */
+ if (machine_initrd) {
+ machine_initrd_f = fopen(machine_initrd, "rb");
+ if (machine_initrd_f == NULL) {
+ error_setg_errno(errp, errno, "Failed to open initrd file %s",
+ machine_initrd);
+ goto cleanup;
+ }
+
+ machine_initrd_size = get_file_size(machine_initrd_f, errp);
+ if (machine_initrd_size == -1) {
+ goto cleanup;
+ }
+
+ if (!read_eif_ramdisk(machine_initrd_f, initrd_path_f,
+ machine_initrd_size, hash0, hash2, &crc, errp)) {
+ goto cleanup;
+ }
+ }
+
+ if (!get_SHA384_hash(hash0, image_hash, errp)) {
+ goto cleanup;
+ }
+ if (!get_SHA384_hash(hash1, bootstrap_hash, errp)) {
+ goto cleanup;
+ }
+ if (!get_SHA384_hash(hash2, app_hash, errp)) {
+ goto cleanup;
+ }
+
+ fclose(f);
+ fclose(initrd_path_f);
+ safe_fclose(machine_initrd_f);
+ return true;
+
+ cleanup:
+ safe_fclose(f);
+ safe_fclose(initrd_path_f);
+ safe_fclose(machine_initrd_f);
+
+ safe_unlink(*kernel_path);
+ g_free(*kernel_path);
+ *kernel_path = NULL;
+
+ safe_unlink(*initrd_path);
+ g_free(*initrd_path);
+ *initrd_path = NULL;
+
+ g_free(*cmdline);
+ *cmdline = NULL;
+
+ return false;
+}
diff --git a/hw/core/eif.h b/hw/core/eif.h
new file mode 100644
index 0000000..fed3cb5
--- /dev/null
+++ b/hw/core/eif.h
@@ -0,0 +1,22 @@
+/*
+ * EIF (Enclave Image Format) related helpers
+ *
+ * Copyright (c) 2024 Dorjoy Chowdhury <dorjoychy111@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef HW_CORE_EIF_H
+#define HW_CORE_EIF_H
+
+bool read_eif_file(const char *eif_path, const char *machine_initrd,
+ char **kernel_path, char **initrd_path,
+ char **kernel_cmdline, uint8_t *image_sha384,
+ uint8_t *bootstrap_sha384, uint8_t *app_sha384,
+ uint8_t *fingerprint_sha384, bool *signature_found,
+ Error **errp);
+
+#endif
+
diff --git a/hw/core/generic-loader.c b/hw/core/generic-loader.c
index ea8628b..e72bbde 100644
--- a/hw/core/generic-loader.c
+++ b/hw/core/generic-loader.c
@@ -31,9 +31,8 @@
*/
#include "qemu/osdep.h"
-#include "exec/tswap.h"
-#include "sysemu/dma.h"
-#include "sysemu/reset.h"
+#include "system/dma.h"
+#include "system/reset.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "hw/qdev-properties.h"
@@ -48,11 +47,8 @@ static void generic_loader_reset(void *opaque)
GenericLoaderState *s = GENERIC_LOADER(opaque);
if (s->set_pc) {
- CPUClass *cc = CPU_GET_CLASS(s->cpu);
cpu_reset(s->cpu);
- if (cc) {
- cc->set_pc(s->cpu, s->addr);
- }
+ cpu_set_pc(s->cpu, s->addr);
}
if (s->data_len) {
@@ -66,7 +62,6 @@ static void generic_loader_realize(DeviceState *dev, Error **errp)
{
GenericLoaderState *s = GENERIC_LOADER(dev);
hwaddr entry;
- int big_endian;
ssize_t size = 0;
s->set_pc = false;
@@ -134,14 +129,12 @@ static void generic_loader_realize(DeviceState *dev, Error **errp)
s->cpu = first_cpu;
}
- big_endian = target_words_bigendian();
-
if (s->file) {
AddressSpace *as = s->cpu ? s->cpu->as : NULL;
if (!s->force_raw) {
size = load_elf_as(s->file, NULL, NULL, NULL, &entry, NULL, NULL,
- NULL, big_endian, 0, 0, 0, as);
+ NULL, ELFDATANONE, 0, 0, 0, as);
if (size < 0) {
size = load_uimage_as(s->file, &entry, NULL, NULL, NULL, NULL,
@@ -179,7 +172,7 @@ static void generic_loader_unrealize(DeviceState *dev)
qemu_unregister_reset(generic_loader_reset, dev);
}
-static Property generic_loader_props[] = {
+static const Property generic_loader_props[] = {
DEFINE_PROP_UINT64("addr", GenericLoaderState, addr, 0),
DEFINE_PROP_UINT64("data", GenericLoaderState, data, 0),
DEFINE_PROP_UINT8("data-len", GenericLoaderState, data_len, 0),
@@ -187,10 +180,9 @@ static Property generic_loader_props[] = {
DEFINE_PROP_UINT32("cpu-num", GenericLoaderState, cpu_num, CPU_NONE),
DEFINE_PROP_BOOL("force-raw", GenericLoaderState, force_raw, false),
DEFINE_PROP_STRING("file", GenericLoaderState, file),
- DEFINE_PROP_END_OF_LIST(),
};
-static void generic_loader_class_init(ObjectClass *klass, void *data)
+static void generic_loader_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/core/gpio.c b/hw/core/gpio.c
index 80d07a6..6e32a8e 100644
--- a/hw/core/gpio.c
+++ b/hw/core/gpio.c
@@ -121,8 +121,7 @@ void qdev_connect_gpio_out_named(DeviceState *dev, const char *name, int n,
name ? name : "unnamed-gpio-out", n);
if (input_pin && !OBJECT(input_pin)->parent) {
/* We need a name for object_property_set_link to work */
- object_property_add_child(container_get(qdev_get_machine(),
- "/unattached"),
+ object_property_add_child(machine_get_container("unattached"),
"non-qdev-gpio[*]", OBJECT(input_pin));
}
object_property_set_link(OBJECT(dev), propname,
diff --git a/hw/core/guest-loader.c b/hw/core/guest-loader.c
index 391c875..3db89d7 100644
--- a/hw/core/guest-loader.c
+++ b/hw/core/guest-loader.c
@@ -26,13 +26,13 @@
#include "qemu/osdep.h"
#include "hw/core/cpu.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/loader.h"
#include "hw/qdev-properties.h"
#include "qapi/error.h"
#include "qemu/module.h"
#include "guest-loader.h"
-#include "sysemu/device_tree.h"
+#include "system/device_tree.h"
#include "hw/boards.h"
/*
@@ -111,15 +111,14 @@ static void guest_loader_realize(DeviceState *dev, Error **errp)
loader_insert_platform_data(s, size, errp);
}
-static Property guest_loader_props[] = {
+static const Property guest_loader_props[] = {
DEFINE_PROP_UINT64("addr", GuestLoaderState, addr, 0),
DEFINE_PROP_STRING("kernel", GuestLoaderState, kernel),
DEFINE_PROP_STRING("bootargs", GuestLoaderState, args),
DEFINE_PROP_STRING("initrd", GuestLoaderState, initrd),
- DEFINE_PROP_END_OF_LIST(),
};
-static void guest_loader_class_init(ObjectClass *klass, void *data)
+static void guest_loader_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/core/irq.c b/hw/core/irq.c
index 3f14e2d..6dd8d47 100644
--- a/hw/core/irq.c
+++ b/hw/core/irq.c
@@ -26,16 +26,6 @@
#include "hw/irq.h"
#include "qom/object.h"
-OBJECT_DECLARE_SIMPLE_TYPE(IRQState, IRQ)
-
-struct IRQState {
- Object parent_obj;
-
- qemu_irq_handler handler;
- void *opaque;
- int n;
-};
-
void qemu_set_irq(qemu_irq irq, int level)
{
if (!irq)
@@ -44,6 +34,29 @@ void qemu_set_irq(qemu_irq irq, int level)
irq->handler(irq->opaque, irq->n, level);
}
+static void init_irq_fields(IRQState *irq, qemu_irq_handler handler,
+ void *opaque, int n)
+{
+ irq->handler = handler;
+ irq->opaque = opaque;
+ irq->n = n;
+}
+
+void qemu_init_irq(IRQState *irq, qemu_irq_handler handler, void *opaque,
+ int n)
+{
+ object_initialize(irq, sizeof(*irq), TYPE_IRQ);
+ init_irq_fields(irq, handler, opaque, n);
+}
+
+void qemu_init_irqs(IRQState irq[], size_t count,
+ qemu_irq_handler handler, void *opaque)
+{
+ for (size_t i = 0; i < count; i++) {
+ qemu_init_irq(&irq[i], handler, opaque, i);
+ }
+}
+
qemu_irq *qemu_extend_irqs(qemu_irq *old, int n_old, qemu_irq_handler handler,
void *opaque, int n)
{
@@ -67,13 +80,8 @@ qemu_irq *qemu_allocate_irqs(qemu_irq_handler handler, void *opaque, int n)
qemu_irq qemu_allocate_irq(qemu_irq_handler handler, void *opaque, int n)
{
- IRQState *irq;
-
- irq = IRQ(object_new(TYPE_IRQ));
- irq->handler = handler;
- irq->opaque = opaque;
- irq->n = n;
-
+ IRQState *irq = IRQ(object_new(TYPE_IRQ));
+ init_irq_fields(irq, handler, opaque, n);
return irq;
}
diff --git a/hw/core/loader-fit.c b/hw/core/loader-fit.c
index 7ccc9d5..2dea485 100644
--- a/hw/core/loader-fit.c
+++ b/hw/core/loader-fit.c
@@ -20,20 +20,20 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/units.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/loader.h"
#include "hw/loader-fit.h"
#include "qemu/cutils.h"
#include "qemu/error-report.h"
-#include "sysemu/device_tree.h"
+#include "system/device_tree.h"
#include <libfdt.h>
#include <zlib.h>
#define FIT_LOADER_MAX_PATH (128)
-static const void *fit_load_image_alloc(const void *itb, const char *name,
- int *poff, size_t *psz, Error **errp)
+static void *fit_load_image_alloc(const void *itb, const char *name,
+ int *poff, size_t *psz, Error **errp)
{
const void *data;
const char *comp;
@@ -80,11 +80,11 @@ static const void *fit_load_image_alloc(const void *itb, const char *name,
return NULL;
}
- data = g_realloc(uncomp_data, uncomp_len);
+ uncomp_data = g_realloc(uncomp_data, uncomp_len);
if (psz) {
*psz = uncomp_len;
}
- return data;
+ return uncomp_data;
}
error_setg(errp, "unknown compression '%s'", comp);
@@ -177,13 +177,12 @@ out:
static int fit_load_fdt(const struct fit_loader *ldr, const void *itb,
int cfg, void *opaque, const void *match_data,
- hwaddr kernel_end, Error **errp)
+ hwaddr kernel_end, void **pfdt, Error **errp)
{
ERRP_GUARD();
Error *err = NULL;
const char *name;
- const void *data;
- const void *load_data;
+ void *data;
hwaddr load_addr;
int img_off;
size_t sz;
@@ -194,7 +193,7 @@ static int fit_load_fdt(const struct fit_loader *ldr, const void *itb,
return 0;
}
- load_data = data = fit_load_image_alloc(itb, name, &img_off, &sz, errp);
+ data = fit_load_image_alloc(itb, name, &img_off, &sz, errp);
if (!data) {
error_prepend(errp, "unable to load FDT image from FIT: ");
return -EINVAL;
@@ -211,19 +210,23 @@ static int fit_load_fdt(const struct fit_loader *ldr, const void *itb,
}
if (ldr->fdt_filter) {
- load_data = ldr->fdt_filter(opaque, data, match_data, &load_addr);
+ void *filtered_data;
+
+ filtered_data = ldr->fdt_filter(opaque, data, match_data, &load_addr);
+ if (filtered_data != data) {
+ g_free(data);
+ data = filtered_data;
+ }
}
load_addr = ldr->addr_to_phys(opaque, load_addr);
- sz = fdt_totalsize(load_data);
- rom_add_blob_fixed(name, load_data, sz, load_addr);
+ sz = fdt_totalsize(data);
+ rom_add_blob_fixed(name, data, sz, load_addr);
- ret = 0;
+ *pfdt = data;
+ return 0;
out:
g_free((void *) data);
- if (data != load_data) {
- g_free((void *) load_data);
- }
return ret;
}
@@ -259,7 +262,8 @@ out:
return ret;
}
-int load_fit(const struct fit_loader *ldr, const char *filename, void *opaque)
+int load_fit(const struct fit_loader *ldr, const char *filename,
+ void **pfdt, void *opaque)
{
Error *err = NULL;
const struct fit_loader_match *match;
@@ -323,7 +327,7 @@ int load_fit(const struct fit_loader *ldr, const char *filename, void *opaque)
goto out;
}
- ret = fit_load_fdt(ldr, itb, cfg_off, opaque, match_data, kernel_end,
+ ret = fit_load_fdt(ldr, itb, cfg_off, opaque, match_data, kernel_end, pfdt,
&err);
if (ret) {
error_report_err(err);
diff --git a/hw/core/loader.c b/hw/core/loader.c
index 31593a1..e7056ba 100644
--- a/hw/core/loader.c
+++ b/hw/core/loader.c
@@ -51,17 +51,18 @@
#include "trace.h"
#include "hw/hw.h"
#include "disas/disas.h"
+#include "migration/cpr.h"
#include "migration/vmstate.h"
#include "monitor/monitor.h"
-#include "sysemu/reset.h"
-#include "sysemu/sysemu.h"
+#include "system/reset.h"
+#include "system/system.h"
#include "uboot_image.h"
#include "hw/loader.h"
#include "hw/nvram/fw_cfg.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/boards.h"
#include "qemu/cutils.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "tcg/debuginfo.h"
#include <zlib.h>
@@ -144,7 +145,7 @@ ssize_t load_image_mr(const char *filename, MemoryRegion *mr)
{
ssize_t size;
- if (!memory_access_is_direct(mr, false)) {
+ if (!memory_access_is_direct(mr, false, MEMTXATTRS_UNSPECIFIED)) {
/* Can only load an image into RAM or ROM */
return -1;
}
@@ -225,7 +226,7 @@ static void bswap_ahdr(struct exec *e)
ssize_t load_aout(const char *filename, hwaddr addr, int max_sz,
- int bswap_needed, hwaddr target_page_size)
+ bool big_endian, hwaddr target_page_size)
{
int fd;
ssize_t size, ret;
@@ -240,7 +241,7 @@ ssize_t load_aout(const char *filename, hwaddr addr, int max_sz,
if (size < 0)
goto fail;
- if (bswap_needed) {
+ if (big_endian != HOST_BIG_ENDIAN) {
bswap_ahdr(&e);
}
@@ -409,11 +410,11 @@ ssize_t load_elf(const char *filename,
uint64_t (*elf_note_fn)(void *, void *, bool),
uint64_t (*translate_fn)(void *, uint64_t),
void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
- uint64_t *highaddr, uint32_t *pflags, int big_endian,
+ uint64_t *highaddr, uint32_t *pflags, int elf_data_order,
int elf_machine, int clear_lsb, int data_swab)
{
return load_elf_as(filename, elf_note_fn, translate_fn, translate_opaque,
- pentry, lowaddr, highaddr, pflags, big_endian,
+ pentry, lowaddr, highaddr, pflags, elf_data_order,
elf_machine, clear_lsb, data_swab, NULL);
}
@@ -422,29 +423,15 @@ ssize_t load_elf_as(const char *filename,
uint64_t (*elf_note_fn)(void *, void *, bool),
uint64_t (*translate_fn)(void *, uint64_t),
void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
- uint64_t *highaddr, uint32_t *pflags, int big_endian,
+ uint64_t *highaddr, uint32_t *pflags, int elf_data_order,
int elf_machine, int clear_lsb, int data_swab,
AddressSpace *as)
{
- return load_elf_ram(filename, elf_note_fn, translate_fn, translate_opaque,
- pentry, lowaddr, highaddr, pflags, big_endian,
- elf_machine, clear_lsb, data_swab, as, true);
-}
-
-/* return < 0 if error, otherwise the number of bytes loaded in memory */
-ssize_t load_elf_ram(const char *filename,
- uint64_t (*elf_note_fn)(void *, void *, bool),
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque, uint64_t *pentry,
- uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags,
- int big_endian, int elf_machine, int clear_lsb,
- int data_swab, AddressSpace *as, bool load_rom)
-{
return load_elf_ram_sym(filename, elf_note_fn,
translate_fn, translate_opaque,
- pentry, lowaddr, highaddr, pflags, big_endian,
+ pentry, lowaddr, highaddr, pflags, elf_data_order,
elf_machine, clear_lsb, data_swab, as,
- load_rom, NULL);
+ true, NULL);
}
/* return < 0 if error, otherwise the number of bytes loaded in memory */
@@ -453,11 +440,12 @@ ssize_t load_elf_ram_sym(const char *filename,
uint64_t (*translate_fn)(void *, uint64_t),
void *translate_opaque, uint64_t *pentry,
uint64_t *lowaddr, uint64_t *highaddr,
- uint32_t *pflags, int big_endian, int elf_machine,
+ uint32_t *pflags, int elf_data_order, int elf_machine,
int clear_lsb, int data_swab,
AddressSpace *as, bool load_rom, symbol_fn_t sym_cb)
{
- int fd, data_order, target_data_order, must_swab;
+ const int host_data_order = HOST_BIG_ENDIAN ? ELFDATA2MSB : ELFDATA2LSB;
+ int fd, must_swab;
ssize_t ret = ELF_LOAD_FAILED;
uint8_t e_ident[EI_NIDENT];
@@ -475,23 +463,14 @@ ssize_t load_elf_ram_sym(const char *filename,
ret = ELF_LOAD_NOT_ELF;
goto fail;
}
-#if HOST_BIG_ENDIAN
- data_order = ELFDATA2MSB;
-#else
- data_order = ELFDATA2LSB;
-#endif
- must_swab = data_order != e_ident[EI_DATA];
- if (big_endian) {
- target_data_order = ELFDATA2MSB;
- } else {
- target_data_order = ELFDATA2LSB;
- }
- if (target_data_order != e_ident[EI_DATA]) {
+ if (elf_data_order != ELFDATANONE && elf_data_order != e_ident[EI_DATA]) {
ret = ELF_LOAD_WRONG_ENDIAN;
goto fail;
}
+ must_swab = host_data_order != e_ident[EI_DATA];
+
lseek(fd, 0, SEEK_SET);
if (e_ident[EI_CLASS] == ELFCLASS64) {
ret = load_elf64(filename, fd, elf_note_fn,
@@ -886,11 +865,11 @@ struct linux_efi_zboot_header {
*
* If the image is not a Linux EFI zboot image, do nothing and return success.
*/
-ssize_t unpack_efi_zboot_image(uint8_t **buffer, int *size)
+ssize_t unpack_efi_zboot_image(uint8_t **buffer, ssize_t *size)
{
const struct linux_efi_zboot_header *header;
uint8_t *data = NULL;
- int ploff, plsize;
+ ssize_t ploff, plsize;
ssize_t bytes;
/* ignore if this is too small to be a EFI zboot image */
@@ -1051,7 +1030,9 @@ static void *rom_set_mr(Rom *rom, Object *owner, const char *name, bool ro)
vmstate_register_ram_global(rom->mr);
data = memory_region_get_ram_ptr(rom->mr);
- memcpy(data, rom->data, rom->datasize);
+ if (!cpr_is_incoming()) {
+ memcpy(data, rom->data, rom->datasize);
+ }
return data;
}
@@ -1352,20 +1333,6 @@ void rom_set_fw(FWCfgState *f)
fw_cfg = f;
}
-void rom_set_order_override(int order)
-{
- if (!fw_cfg)
- return;
- fw_cfg_set_order_override(fw_cfg, order);
-}
-
-void rom_reset_order_override(void)
-{
- if (!fw_cfg)
- return;
- fw_cfg_reset_order_override(fw_cfg);
-}
-
void rom_transaction_begin(void)
{
Rom *rom;
@@ -1429,7 +1396,7 @@ typedef struct RomSec {
* work, but this way saves a little work later by avoiding
* dealing with "gaps" of 0 length.
*/
-static gint sort_secs(gconstpointer a, gconstpointer b)
+static gint sort_secs(gconstpointer a, gconstpointer b, gpointer d)
{
RomSec *ra = (RomSec *) a;
RomSec *rb = (RomSec *) b;
@@ -1482,7 +1449,7 @@ RomGap rom_find_largest_gap_between(hwaddr base, size_t size)
/* sentinel */
secs = add_romsec_to_list(secs, base + size, 1);
- secs = g_list_sort(secs, sort_secs);
+ secs = g_list_sort_with_data(secs, sort_secs, NULL);
for (it = g_list_first(secs); it; it = g_list_next(it)) {
cand = (RomSec *) it->data;
diff --git a/hw/core/machine-hmp-cmds.c b/hw/core/machine-hmp-cmds.c
index 8701f00..c6325cd 100644
--- a/hw/core/machine-hmp-cmds.c
+++ b/hw/core/machine-hmp-cmds.c
@@ -19,10 +19,10 @@
#include "qapi/error.h"
#include "qapi/qapi-builtin-visit.h"
#include "qapi/qapi-commands-machine.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/string-output-visitor.h"
#include "qemu/error-report.h"
-#include "sysemu/numa.h"
+#include "system/numa.h"
#include "hw/boards.h"
void hmp_info_cpus(Monitor *mon, const QDict *qdict)
diff --git a/hw/core/machine-qmp-cmds.c b/hw/core/machine-qmp-cmds.c
index 130217d..d82043e 100644
--- a/hw/core/machine-qmp-cmds.c
+++ b/hw/core/machine-qmp-cmds.c
@@ -15,16 +15,18 @@
#include "qapi/error.h"
#include "qapi/qapi-builtin-visit.h"
#include "qapi/qapi-commands-machine.h"
-#include "qapi/qmp/qobject.h"
+#include "qobject/qobject.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/type-helpers.h"
#include "qemu/uuid.h"
+#include "qemu/target-info.h"
#include "qom/qom-qobject.h"
-#include "sysemu/hostmem.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/numa.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/hostmem.h"
+#include "system/hw_accel.h"
+#include "system/numa.h"
+#include "system/runstate.h"
+#include "system/system.h"
+#include "hw/s390x/storage-keys.h"
/*
* fast means: we NEVER interrupt vCPU threads to retrieve
@@ -72,6 +74,7 @@ MachineInfoList *qmp_query_machines(bool has_compat_props, bool compat_props,
for (el = machines; el; el = el->next) {
MachineClass *mc = el->data;
+ const char *default_cpu_type = machine_class_default_cpu_type(mc);
MachineInfo *info;
info = g_malloc0(sizeof(*info));
@@ -90,8 +93,8 @@ MachineInfoList *qmp_query_machines(bool has_compat_props, bool compat_props,
info->numa_mem_supported = mc->numa_mem_supported;
info->deprecated = !!mc->deprecation_reason;
info->acpi = !!object_class_property_find(OBJECT_CLASS(mc), "acpi");
- if (mc->default_cpu_type) {
- info->default_cpu_type = g_strdup(mc->default_cpu_type);
+ if (default_cpu_type) {
+ info->default_cpu_type = g_strdup(default_cpu_type);
}
if (mc->default_ram_id) {
info->default_ram_id = g_strdup(mc->default_ram_id);
@@ -132,9 +135,9 @@ CurrentMachineParams *qmp_query_current_machine(Error **errp)
return params;
}
-TargetInfo *qmp_query_target(Error **errp)
+QemuTargetInfo *qmp_query_target(Error **errp)
{
- TargetInfo *info = g_malloc0(sizeof(*info));
+ QemuTargetInfo *info = g_malloc0(sizeof(*info));
info->arch = qapi_enum_parse(&SysEmuTarget_lookup, target_name(), -1,
&error_abort);
@@ -406,3 +409,16 @@ GuidInfo *qmp_query_vm_generation_id(Error **errp)
info->guid = qemu_uuid_unparse_strdup(&vms->guid);
return info;
}
+
+void qmp_dump_skeys(const char *filename, Error **errp)
+{
+ ObjectClass *mc = object_get_class(qdev_get_machine());
+ ObjectClass *oc = object_class_dynamic_cast(mc, TYPE_DUMP_SKEYS_INTERFACE);
+
+ if (!oc) {
+ error_setg(errp, "Storage keys information not available"
+ " for this architecture");
+ return;
+ }
+ DUMP_SKEYS_INTERFACE_CLASS(oc)->qmp_dump_skeys(filename, errp);
+}
diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c
index 5d8d7ed..0be0ac0 100644
--- a/hw/core/machine-smp.c
+++ b/hw/core/machine-smp.c
@@ -261,6 +261,82 @@ void machine_parse_smp_config(MachineState *ms,
}
}
+static bool machine_check_topo_support(MachineState *ms,
+ CpuTopologyLevel topo,
+ Error **errp)
+{
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+
+ if ((topo == CPU_TOPOLOGY_LEVEL_MODULE && !mc->smp_props.modules_supported) ||
+ (topo == CPU_TOPOLOGY_LEVEL_CLUSTER && !mc->smp_props.clusters_supported) ||
+ (topo == CPU_TOPOLOGY_LEVEL_DIE && !mc->smp_props.dies_supported) ||
+ (topo == CPU_TOPOLOGY_LEVEL_BOOK && !mc->smp_props.books_supported) ||
+ (topo == CPU_TOPOLOGY_LEVEL_DRAWER && !mc->smp_props.drawers_supported)) {
+ error_setg(errp,
+ "Invalid topology level: %s. "
+ "The topology level is not supported by this machine",
+ CpuTopologyLevel_str(topo));
+ return false;
+ }
+
+ return true;
+}
+
+bool machine_parse_smp_cache(MachineState *ms,
+ const SmpCachePropertiesList *caches,
+ Error **errp)
+{
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ const SmpCachePropertiesList *node;
+ DECLARE_BITMAP(caches_bitmap, CACHE_LEVEL_AND_TYPE__MAX);
+
+ bitmap_zero(caches_bitmap, CACHE_LEVEL_AND_TYPE__MAX);
+ for (node = caches; node; node = node->next) {
+ /* Prohibit users from repeating settings. */
+ if (test_bit(node->value->cache, caches_bitmap)) {
+ error_setg(errp,
+ "Invalid cache properties: %s. "
+ "The cache properties are duplicated",
+ CacheLevelAndType_str(node->value->cache));
+ return false;
+ }
+
+ machine_set_cache_topo_level(ms, node->value->cache,
+ node->value->topology);
+ set_bit(node->value->cache, caches_bitmap);
+ }
+
+ for (int i = 0; i < CACHE_LEVEL_AND_TYPE__MAX; i++) {
+ const SmpCacheProperties *props = &ms->smp_cache.props[i];
+
+ /*
+ * Reject non "default" topology level if the cache isn't
+ * supported by the machine.
+ */
+ if (props->topology != CPU_TOPOLOGY_LEVEL_DEFAULT &&
+ !mc->smp_props.cache_supported[props->cache]) {
+ error_setg(errp,
+ "%s cache topology not supported by this machine",
+ CacheLevelAndType_str(props->cache));
+ return false;
+ }
+
+ if (props->topology == CPU_TOPOLOGY_LEVEL_THREAD) {
+ error_setg(errp,
+ "%s level cache not supported by this machine",
+ CpuTopologyLevel_str(props->topology));
+ return false;
+ }
+
+ if (!machine_check_topo_support(ms, props->topology, errp)) {
+ return false;
+ }
+ }
+
+ mc->smp_props.has_caches = true;
+ return true;
+}
+
unsigned int machine_topo_get_cores_per_socket(const MachineState *ms)
{
return ms->smp.cores * ms->smp.modules * ms->smp.clusters * ms->smp.dies;
@@ -270,3 +346,63 @@ unsigned int machine_topo_get_threads_per_socket(const MachineState *ms)
{
return ms->smp.threads * machine_topo_get_cores_per_socket(ms);
}
+
+CpuTopologyLevel machine_get_cache_topo_level(const MachineState *ms,
+ CacheLevelAndType cache)
+{
+ return ms->smp_cache.props[cache].topology;
+}
+
+void machine_set_cache_topo_level(MachineState *ms, CacheLevelAndType cache,
+ CpuTopologyLevel level)
+{
+ ms->smp_cache.props[cache].topology = level;
+}
+
+/*
+ * When both cache1 and cache2 are configured with specific topology levels
+ * (not default level), is cache1's topology level higher than cache2?
+ */
+static bool smp_cache_topo_cmp(const SmpCache *smp_cache,
+ CacheLevelAndType cache1,
+ CacheLevelAndType cache2)
+{
+ /*
+ * Before comparing, the "default" topology level should be replaced
+ * with the specific level.
+ */
+ assert(smp_cache->props[cache1].topology != CPU_TOPOLOGY_LEVEL_DEFAULT);
+
+ return smp_cache->props[cache1].topology > smp_cache->props[cache2].topology;
+}
+
+/*
+ * Currently, we have no way to expose the arch-specific default cache model
+ * because the cache model is sometimes related to the CPU model (e.g., i386).
+ *
+ * We can only check the correctness of the cache topology after the arch loads
+ * the user-configured cache model from MachineState and consumes the special
+ * "default" level by replacing it with the specific level.
+ */
+bool machine_check_smp_cache(const MachineState *ms, Error **errp)
+{
+ if (smp_cache_topo_cmp(&ms->smp_cache, CACHE_LEVEL_AND_TYPE_L1D,
+ CACHE_LEVEL_AND_TYPE_L2) ||
+ smp_cache_topo_cmp(&ms->smp_cache, CACHE_LEVEL_AND_TYPE_L1I,
+ CACHE_LEVEL_AND_TYPE_L2)) {
+ error_setg(errp,
+ "Invalid smp cache topology. "
+ "L2 cache topology level shouldn't be lower than L1 cache");
+ return false;
+ }
+
+ if (smp_cache_topo_cmp(&ms->smp_cache, CACHE_LEVEL_AND_TYPE_L2,
+ CACHE_LEVEL_AND_TYPE_L3)) {
+ error_setg(errp,
+ "Invalid smp cache topology. "
+ "L3 cache topology level shouldn't be lower than L2 cache");
+ return false;
+ }
+
+ return true;
+}
diff --git a/hw/core/machine.c b/hw/core/machine.c
index bc38cad..e869821 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -11,35 +11,61 @@
*/
#include "qemu/osdep.h"
+#include "qemu/units.h"
#include "qemu/accel.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "hw/boards.h"
#include "hw/loader.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/qapi-visit-machine.h"
+#include "qapi/qapi-commands-machine.h"
#include "qemu/madvise.h"
#include "qom/object_interfaces.h"
-#include "sysemu/cpus.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "sysemu/xen.h"
-#include "sysemu/qtest.h"
+#include "system/cpus.h"
+#include "system/system.h"
+#include "system/reset.h"
+#include "system/runstate.h"
+#include "system/xen.h"
+#include "system/qtest.h"
#include "hw/pci/pci_bridge.h"
#include "hw/mem/nvdimm.h"
#include "migration/global_state.h"
-#include "exec/confidential-guest-support.h"
+#include "system/confidential-guest-support.h"
#include "hw/virtio/virtio-pci.h"
#include "hw/virtio/virtio-net.h"
#include "hw/virtio/virtio-iommu.h"
#include "audio/audio.h"
+GlobalProperty hw_compat_10_0[] = {
+ { "scsi-hd", "dpofua", "off" },
+};
+const size_t hw_compat_10_0_len = G_N_ELEMENTS(hw_compat_10_0);
+
+GlobalProperty hw_compat_9_2[] = {
+ { "arm-cpu", "backcompat-pauth-default-use-qarma5", "true"},
+ { "virtio-balloon-pci", "vectors", "0" },
+ { "virtio-balloon-pci-transitional", "vectors", "0" },
+ { "virtio-balloon-pci-non-transitional", "vectors", "0" },
+ { "virtio-mem-pci", "vectors", "0" },
+ { "migration", "multifd-clean-tls-termination", "false" },
+ { "migration", "send-switchover-start", "off"},
+ { "vfio-pci", "x-migration-multifd-transfer", "off" },
+};
+const size_t hw_compat_9_2_len = G_N_ELEMENTS(hw_compat_9_2);
+
+GlobalProperty hw_compat_9_1[] = {
+ { TYPE_PCI_DEVICE, "x-pcie-ext-tag", "false" },
+};
+const size_t hw_compat_9_1_len = G_N_ELEMENTS(hw_compat_9_1);
+
GlobalProperty hw_compat_9_0[] = {
- {"arm-cpu", "backcompat-cntfrq", "true" },
- {"scsi-disk-base", "migrate-emulated-scsi-request", "false" },
- {"vfio-pci", "skip-vsc-check", "false" },
+ { "arm-cpu", "backcompat-cntfrq", "true" },
+ { "scsi-hd", "migrate-emulated-scsi-request", "false" },
+ { "scsi-cd", "migrate-emulated-scsi-request", "false" },
+ { "vfio-pci", "skip-vsc-check", "false" },
{ "virtio-pci", "x-pcie-pm-no-soft-reset", "off" },
- {"sd-card", "spec_version", "2" },
+ { "sd-card", "spec_version", "2" },
};
const size_t hw_compat_9_0_len = G_N_ELEMENTS(hw_compat_9_0);
@@ -259,51 +285,6 @@ GlobalProperty hw_compat_2_6[] = {
};
const size_t hw_compat_2_6_len = G_N_ELEMENTS(hw_compat_2_6);
-GlobalProperty hw_compat_2_5[] = {
- { "isa-fdc", "fallback", "144" },
- { "pvscsi", "x-old-pci-configuration", "on" },
- { "pvscsi", "x-disable-pcie", "on" },
- { "vmxnet3", "x-old-msi-offsets", "on" },
- { "vmxnet3", "x-disable-pcie", "on" },
-};
-const size_t hw_compat_2_5_len = G_N_ELEMENTS(hw_compat_2_5);
-
-GlobalProperty hw_compat_2_4[] = {
- { "e1000", "extra_mac_registers", "off" },
- { "virtio-pci", "x-disable-pcie", "on" },
- { "virtio-pci", "migrate-extra", "off" },
- { "fw_cfg_mem", "dma_enabled", "off" },
- { "fw_cfg_io", "dma_enabled", "off" }
-};
-const size_t hw_compat_2_4_len = G_N_ELEMENTS(hw_compat_2_4);
-
-GlobalProperty hw_compat_2_3[] = {
- { "virtio-blk-pci", "any_layout", "off" },
- { "virtio-balloon-pci", "any_layout", "off" },
- { "virtio-serial-pci", "any_layout", "off" },
- { "virtio-9p-pci", "any_layout", "off" },
- { "virtio-rng-pci", "any_layout", "off" },
- { TYPE_PCI_DEVICE, "x-pcie-lnksta-dllla", "off" },
- { "migration", "send-configuration", "off" },
- { "migration", "send-section-footer", "off" },
- { "migration", "store-global-state", "off" },
-};
-const size_t hw_compat_2_3_len = G_N_ELEMENTS(hw_compat_2_3);
-
-GlobalProperty hw_compat_2_2[] = {};
-const size_t hw_compat_2_2_len = G_N_ELEMENTS(hw_compat_2_2);
-
-GlobalProperty hw_compat_2_1[] = {
- { "intel-hda", "old_msi_addr", "on" },
- { "VGA", "qemu-extended-regs", "off" },
- { "secondary-vga", "qemu-extended-regs", "off" },
- { "virtio-scsi-pci", "any_layout", "off" },
- { "usb-mouse", "usb_version", "1" },
- { "usb-kbd", "usb_version", "1" },
- { "virtio-pci", "virtio-pci-bus-master-bug-migration", "on" },
-};
-const size_t hw_compat_2_1_len = G_N_ELEMENTS(hw_compat_2_1);
-
MachineState *current_machine;
static char *machine_get_kernel(Object *obj, Error **errp)
@@ -321,6 +302,21 @@ static void machine_set_kernel(Object *obj, const char *value, Error **errp)
ms->kernel_filename = g_strdup(value);
}
+static char *machine_get_shim(Object *obj, Error **errp)
+{
+ MachineState *ms = MACHINE(obj);
+
+ return g_strdup(ms->shim_filename);
+}
+
+static void machine_set_shim(Object *obj, const char *value, Error **errp)
+{
+ MachineState *ms = MACHINE(obj);
+
+ g_free(ms->shim_filename);
+ ms->shim_filename = g_strdup(value);
+}
+
static char *machine_get_initrd(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
@@ -456,6 +452,22 @@ static void machine_set_mem_merge(Object *obj, bool value, Error **errp)
ms->mem_merge = value;
}
+#ifdef CONFIG_POSIX
+static bool machine_get_aux_ram_share(Object *obj, Error **errp)
+{
+ MachineState *ms = MACHINE(obj);
+
+ return ms->aux_ram_share;
+}
+
+static void machine_set_aux_ram_share(Object *obj, bool value, Error **errp)
+{
+ MachineState *ms = MACHINE(obj);
+
+ ms->aux_ram_share = value;
+}
+#endif
+
static bool machine_get_usb(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
@@ -617,11 +629,19 @@ static void machine_set_mem(Object *obj, Visitor *v, const char *name,
mem->size = mc->fixup_ram_size(mem->size);
}
if ((ram_addr_t)mem->size != mem->size) {
- error_setg(errp, "ram size too large");
+ error_setg(errp, "ram size %llu exceeds permitted maximum %llu",
+ (unsigned long long)mem->size,
+ (unsigned long long)RAM_ADDR_MAX);
goto out_free;
}
if (mem->has_max_size) {
+ if ((ram_addr_t)mem->max_size != mem->max_size) {
+ error_setg(errp, "ram size %llu exceeds permitted maximum %llu",
+ (unsigned long long)mem->max_size,
+ (unsigned long long)RAM_ADDR_MAX);
+ goto out_free;
+ }
if (mem->max_size < mem->size) {
error_setg(errp, "invalid value of maxmem: "
"maximum memory size (0x%" PRIx64 ") must be at least "
@@ -928,6 +948,40 @@ static void machine_set_smp(Object *obj, Visitor *v, const char *name,
machine_parse_smp_config(ms, config, errp);
}
+static void machine_get_smp_cache(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ MachineState *ms = MACHINE(obj);
+ SmpCache *cache = &ms->smp_cache;
+ SmpCachePropertiesList *head = NULL;
+ SmpCachePropertiesList **tail = &head;
+
+ for (int i = 0; i < CACHE_LEVEL_AND_TYPE__MAX; i++) {
+ SmpCacheProperties *node = g_new(SmpCacheProperties, 1);
+
+ node->cache = cache->props[i].cache;
+ node->topology = cache->props[i].topology;
+ QAPI_LIST_APPEND(tail, node);
+ }
+
+ visit_type_SmpCachePropertiesList(v, name, &head, errp);
+ qapi_free_SmpCachePropertiesList(head);
+}
+
+static void machine_set_smp_cache(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ MachineState *ms = MACHINE(obj);
+ SmpCachePropertiesList *caches;
+
+ if (!visit_type_SmpCachePropertiesList(v, name, &caches, errp)) {
+ return;
+ }
+
+ machine_parse_smp_cache(ms, caches, errp);
+ qapi_free_SmpCachePropertiesList(caches);
+}
+
static void machine_get_boot(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
@@ -997,24 +1051,70 @@ void machine_add_audiodev_property(MachineClass *mc)
"Audiodev to use for default machine devices");
}
-static void machine_class_init(ObjectClass *oc, void *data)
+static bool create_default_memdev(MachineState *ms, const char *path,
+ Error **errp)
+{
+ Object *obj;
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ bool r = false;
+
+ obj = object_new(path ? TYPE_MEMORY_BACKEND_FILE : TYPE_MEMORY_BACKEND_RAM);
+ if (path) {
+ if (!object_property_set_str(obj, "mem-path", path, errp)) {
+ goto out;
+ }
+ }
+ if (!object_property_set_int(obj, "size", ms->ram_size, errp)) {
+ goto out;
+ }
+ object_property_add_child(object_get_objects_root(), mc->default_ram_id,
+ obj);
+ /* Ensure backend's memory region name is equal to mc->default_ram_id */
+ if (!object_property_set_bool(obj, "x-use-canonical-path-for-ramblock-id",
+ false, errp)) {
+ goto out;
+ }
+ if (!user_creatable_complete(USER_CREATABLE(obj), errp)) {
+ goto out;
+ }
+ r = object_property_set_link(OBJECT(ms), "memory-backend", obj, errp);
+
+out:
+ object_unref(obj);
+ return r;
+}
+
+static void machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
/* Default 128 MB as guest ram size */
mc->default_ram_size = 128 * MiB;
mc->rom_file_has_mr = true;
+ /*
+ * SMBIOS 3.1.0 7.18.5 Memory Device — Extended Size
+ * use max possible value that could be encoded into
+ * 'Extended Size' field (2047Tb).
+ */
+ mc->smbios_memory_device_size = 2047 * TiB;
/* numa node memory size aligned on 8MB by default.
* On Linux, each node's border has to be 8MB aligned
*/
mc->numa_mem_align_shift = 23;
+ mc->create_default_memdev = create_default_memdev;
+
object_class_property_add_str(oc, "kernel",
machine_get_kernel, machine_set_kernel);
object_class_property_set_description(oc, "kernel",
"Linux kernel image file");
+ object_class_property_add_str(oc, "shim",
+ machine_get_shim, machine_set_shim);
+ object_class_property_set_description(oc, "shim",
+ "shim.efi file");
+
object_class_property_add_str(oc, "initrd",
machine_get_initrd, machine_set_initrd);
object_class_property_set_description(oc, "initrd",
@@ -1047,6 +1147,11 @@ static void machine_class_init(ObjectClass *oc, void *data)
object_class_property_set_description(oc, "smp",
"CPU topology");
+ object_class_property_add(oc, "smp-cache", "SmpCachePropertiesWrapper",
+ machine_get_smp_cache, machine_set_smp_cache, NULL, NULL);
+ object_class_property_set_description(oc, "smp-cache",
+ "Cache properties list for SMP machine");
+
object_class_property_add(oc, "phandle-start", "int",
machine_get_phandle_start, machine_set_phandle_start,
NULL, NULL);
@@ -1068,6 +1173,12 @@ static void machine_class_init(ObjectClass *oc, void *data)
object_class_property_set_description(oc, "mem-merge",
"Enable/disable memory merge support");
+#ifdef CONFIG_POSIX
+ object_class_property_add_bool(oc, "aux-ram-share",
+ machine_get_aux_ram_share,
+ machine_set_aux_ram_share);
+#endif
+
object_class_property_add_bool(oc, "usb",
machine_get_usb, machine_set_usb);
object_class_property_set_description(oc, "usb",
@@ -1116,7 +1227,7 @@ static void machine_class_init(ObjectClass *oc, void *data)
"Memory size configuration");
}
-static void machine_class_base_init(ObjectClass *oc, void *data)
+static void machine_class_base_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
mc->max_cpus = mc->max_cpus ?: 1;
@@ -1137,9 +1248,6 @@ static void machine_initfn(Object *obj)
MachineState *ms = MACHINE(obj);
MachineClass *mc = MACHINE_GET_CLASS(obj);
- container_get(obj, "/peripheral");
- container_get(obj, "/peripheral-anon");
-
ms->dump_guest_core = true;
ms->mem_merge = (QEMU_MADV_MERGEABLE != QEMU_MADV_INVALID);
ms->enable_graphics = true;
@@ -1185,6 +1293,11 @@ static void machine_initfn(Object *obj)
ms->smp.cores = 1;
ms->smp.threads = 1;
+ for (int i = 0; i < CACHE_LEVEL_AND_TYPE__MAX; i++) {
+ ms->smp_cache.props[i].cache = (CacheLevelAndType)i;
+ ms->smp_cache.props[i].topology = CPU_TOPOLOGY_LEVEL_DEFAULT;
+ }
+
machine_copy_boot_config(ms, &(BootConfiguration){ 0 });
}
@@ -1403,38 +1516,6 @@ MemoryRegion *machine_consume_memdev(MachineState *machine,
return ret;
}
-static bool create_default_memdev(MachineState *ms, const char *path, Error **errp)
-{
- Object *obj;
- MachineClass *mc = MACHINE_GET_CLASS(ms);
- bool r = false;
-
- obj = object_new(path ? TYPE_MEMORY_BACKEND_FILE : TYPE_MEMORY_BACKEND_RAM);
- if (path) {
- if (!object_property_set_str(obj, "mem-path", path, errp)) {
- goto out;
- }
- }
- if (!object_property_set_int(obj, "size", ms->ram_size, errp)) {
- goto out;
- }
- object_property_add_child(object_get_objects_root(), mc->default_ram_id,
- obj);
- /* Ensure backend's memory region name is equal to mc->default_ram_id */
- if (!object_property_set_bool(obj, "x-use-canonical-path-for-ramblock-id",
- false, errp)) {
- goto out;
- }
- if (!user_creatable_complete(USER_CREATABLE(obj), errp)) {
- goto out;
- }
- r = object_property_set_link(OBJECT(ms), "memory-backend", obj, errp);
-
-out:
- object_unref(obj);
- return r;
-}
-
const char *machine_class_default_cpu_type(MachineClass *mc)
{
if (mc->valid_cpu_types && !mc->valid_cpu_types[1]) {
@@ -1538,7 +1619,9 @@ void machine_run_board_init(MachineState *machine, const char *mem_path, Error *
machine_class->default_ram_id);
return;
}
- if (!create_default_memdev(current_machine, mem_path, errp)) {
+
+ if (!machine_class->create_default_memdev(current_machine, mem_path,
+ errp)) {
return;
}
}
@@ -1603,6 +1686,22 @@ void qemu_remove_machine_init_done_notifier(Notifier *notify)
notifier_remove(notify);
}
+static void handle_machine_dumpdtb(MachineState *ms)
+{
+ if (!ms->dumpdtb) {
+ return;
+ }
+#ifdef CONFIG_FDT
+ qmp_dumpdtb(ms->dumpdtb, &error_fatal);
+ exit(0);
+#else
+ error_report("This machine doesn't have an FDT");
+ error_printf("(this machine type definitely doesn't use FDT, and "
+ "this QEMU doesn't have FDT support compiled in)\n");
+ exit(1);
+#endif
+}
+
void qdev_machine_creation_done(void)
{
cpu_synchronize_all_post_init();
@@ -1632,6 +1731,12 @@ void qdev_machine_creation_done(void)
notifier_list_notify(&machine_init_done_notifiers, NULL);
+ /*
+ * If the user used -machine dumpdtb=file.dtb to request that we
+ * dump the DTB to a file, do it now, and exit.
+ */
+ handle_machine_dumpdtb(current_machine);
+
if (rom_check_and_register_reset() != 0) {
exit(1);
}
diff --git a/hw/core/meson.build b/hw/core/meson.build
index a3d9bab..b5a545a 100644
--- a/hw/core/meson.build
+++ b/hw/core/meson.build
@@ -24,9 +24,10 @@ system_ss.add(when: 'CONFIG_REGISTER', if_true: files('register.c'))
system_ss.add(when: 'CONFIG_SPLIT_IRQ', if_true: files('split-irq.c'))
system_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('stream.c'))
system_ss.add(when: 'CONFIG_PLATFORM_BUS', if_true: files('sysbus-fdt.c'))
+system_ss.add(when: 'CONFIG_EIF', if_true: [files('eif.c'), zlib, libcbor, gnutls])
system_ss.add(files(
- 'cpu-sysemu.c',
+ 'cpu-system.c',
'fw-path-provider.c',
'gpio.c',
'hotplug.c',
@@ -45,3 +46,7 @@ system_ss.add(files(
'vm-change-state-handler.c',
'clock-vmstate.c',
))
+user_ss.add(files(
+ 'cpu-user.c',
+ 'qdev-user.c',
+))
diff --git a/hw/core/null-machine.c b/hw/core/null-machine.c
index f586a4b..a6e477a 100644
--- a/hw/core/null-machine.c
+++ b/hw/core/null-machine.c
@@ -14,7 +14,7 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "hw/boards.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/core/cpu.h"
static void machine_none_init(MachineState *mch)
@@ -53,7 +53,6 @@ static void machine_none_machine_init(MachineClass *mc)
mc->no_parallel = 1;
mc->no_floppy = 1;
mc->no_cdrom = 1;
- mc->no_sdcard = 1;
}
DEFINE_MACHINE("none", machine_none_machine_init)
diff --git a/hw/core/numa.c b/hw/core/numa.c
index f8ce332..218576f 100644
--- a/hw/core/numa.c
+++ b/hw/core/numa.c
@@ -24,15 +24,15 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
-#include "sysemu/hostmem.h"
-#include "sysemu/numa.h"
+#include "system/hostmem.h"
+#include "system/numa.h"
#include "exec/cpu-common.h"
#include "exec/ramlist.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/opts-visitor.h"
#include "qapi/qapi-visit-machine.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#include "hw/core/cpu.h"
#include "hw/mem/pc-dimm.h"
#include "hw/boards.h"
@@ -249,7 +249,7 @@ void parse_numa_hmat_lb(NumaState *numa_state, NumaHmatLBOptions *node,
lb_data.initiator = node->initiator;
lb_data.target = node->target;
- if (node->data_type <= HMATLB_DATA_TYPE_WRITE_LATENCY) {
+ if (node->data_type <= HMAT_LB_DATA_TYPE_WRITE_LATENCY) {
/* Input latency data */
if (!node->has_latency) {
@@ -313,7 +313,7 @@ void parse_numa_hmat_lb(NumaState *numa_state, NumaHmatLBOptions *node,
numa_info[node->target].lb_info_provided |= BIT(0);
}
lb_data.data = node->latency;
- } else if (node->data_type >= HMATLB_DATA_TYPE_ACCESS_BANDWIDTH) {
+ } else if (node->data_type >= HMAT_LB_DATA_TYPE_ACCESS_BANDWIDTH) {
/* Input bandwidth data */
if (!node->has_bandwidth) {
error_setg(errp, "Missing 'bandwidth' option");
@@ -380,7 +380,7 @@ void parse_numa_hmat_lb(NumaState *numa_state, NumaHmatLBOptions *node,
}
lb_data.data = node->bandwidth;
} else {
- assert(0);
+ g_assert_not_reached();
}
g_array_append_val(hmat_lb->list, lb_data);
diff --git a/hw/core/or-irq.c b/hw/core/or-irq.c
index 13907df..3942c70 100644
--- a/hw/core/or-irq.c
+++ b/hw/core/or-irq.c
@@ -115,16 +115,15 @@ static const VMStateDescription vmstate_or_irq = {
},
};
-static Property or_irq_properties[] = {
+static const Property or_irq_properties[] = {
DEFINE_PROP_UINT16("num-lines", OrIRQState, num_lines, 1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void or_irq_class_init(ObjectClass *klass, void *data)
+static void or_irq_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = or_irq_reset;
+ device_class_set_legacy_reset(dc, or_irq_reset);
device_class_set_props(dc, or_irq_properties);
dc->realize = or_irq_realize;
dc->vmsd = &vmstate_or_irq;
diff --git a/hw/core/platform-bus.c b/hw/core/platform-bus.c
index b8487b2..6950063 100644
--- a/hw/core/platform-bus.c
+++ b/hw/core/platform-bus.c
@@ -145,9 +145,12 @@ static void platform_bus_map_mmio(PlatformBusDevice *pbus, SysBusDevice *sbdev,
* the target device's memory region
*/
for (off = 0; off < pbus->mmio_size; off += alignment) {
- if (!memory_region_find(&pbus->mmio, off, size).mr) {
+ MemoryRegion *mr = memory_region_find(&pbus->mmio, off, size).mr;
+ if (!mr) {
found_region = true;
break;
+ } else {
+ memory_region_unref(mr);
}
}
@@ -201,13 +204,12 @@ static void platform_bus_realize(DeviceState *dev, Error **errp)
plaform_bus_refresh_irqs(pbus);
}
-static Property platform_bus_properties[] = {
+static const Property platform_bus_properties[] = {
DEFINE_PROP_UINT32("num_irqs", PlatformBusDevice, num_irqs, 0),
DEFINE_PROP_UINT32("mmio_size", PlatformBusDevice, mmio_size, 0),
- DEFINE_PROP_END_OF_LIST()
};
-static void platform_bus_class_init(ObjectClass *klass, void *data)
+static void platform_bus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/core/ptimer.c b/hw/core/ptimer.c
index b151759..0aeb10f 100644
--- a/hw/core/ptimer.c
+++ b/hw/core/ptimer.c
@@ -11,8 +11,8 @@
#include "migration/vmstate.h"
#include "qemu/host-utils.h"
#include "exec/replay-core.h"
-#include "sysemu/cpu-timers.h"
-#include "sysemu/qtest.h"
+#include "exec/icount.h"
+#include "system/qtest.h"
#include "block/aio.h"
#include "hw/clock.h"
@@ -83,7 +83,7 @@ static void ptimer_reload(ptimer_state *s, int delta_adjust)
delta = s->delta = s->limit;
}
- if (s->period == 0) {
+ if (s->period == 0 && s->period_frac == 0) {
if (!qtest_enabled()) {
fprintf(stderr, "Timer with period zero, disabling\n");
}
@@ -309,7 +309,7 @@ void ptimer_run(ptimer_state *s, int oneshot)
assert(s->in_transaction);
- if (was_disabled && s->period == 0) {
+ if (was_disabled && s->period == 0 && s->period_frac == 0) {
if (!qtest_enabled()) {
fprintf(stderr, "Timer with period zero, disabling\n");
}
diff --git a/hw/core/qdev-clock.c b/hw/core/qdev-clock.c
index 8279957..dacafa4 100644
--- a/hw/core/qdev-clock.c
+++ b/hw/core/qdev-clock.c
@@ -22,7 +22,7 @@
* Add a new clock in a device
*/
static NamedClockList *qdev_init_clocklist(DeviceState *dev, const char *name,
- bool output, Clock *clk)
+ bool alias, bool output, Clock *clk)
{
NamedClockList *ncl;
@@ -38,39 +38,8 @@ static NamedClockList *qdev_init_clocklist(DeviceState *dev, const char *name,
*/
ncl = g_new0(NamedClockList, 1);
ncl->name = g_strdup(name);
+ ncl->alias = alias;
ncl->output = output;
- ncl->alias = (clk != NULL);
-
- /*
- * Trying to create a clock whose name clashes with some other
- * clock or property is a bug in the caller and we will abort().
- */
- if (clk == NULL) {
- clk = CLOCK(object_new(TYPE_CLOCK));
- object_property_add_child(OBJECT(dev), name, OBJECT(clk));
- if (output) {
- /*
- * Remove object_new()'s initial reference.
- * Note that for inputs, the reference created by object_new()
- * will be deleted in qdev_finalize_clocklist().
- */
- object_unref(OBJECT(clk));
- }
- } else {
- object_property_add_link(OBJECT(dev), name,
- object_get_typename(OBJECT(clk)),
- (Object **) &ncl->clock,
- NULL, OBJ_PROP_LINK_STRONG);
- /*
- * Since the link property has the OBJ_PROP_LINK_STRONG flag, the clk
- * object reference count gets decremented on property deletion.
- * However object_property_add_link does not increment it since it
- * doesn't know the linked object. Increment it here to ensure the
- * aliased clock stays alive during this device life-time.
- */
- object_ref(OBJECT(clk));
- }
-
ncl->clock = clk;
QLIST_INSERT_HEAD(&dev->clocks, ncl, node);
@@ -84,14 +53,11 @@ void qdev_finalize_clocklist(DeviceState *dev)
QLIST_FOREACH_SAFE(ncl, &dev->clocks, node, ncl_next) {
QLIST_REMOVE(ncl, node);
- if (!ncl->output && !ncl->alias) {
+ if (!ncl->alias) {
/*
* We kept a reference on the input clock to ensure it lives up to
- * this point so we can safely remove the callback.
- * It avoids having a callback to a deleted object if ncl->clock
- * is still referenced somewhere else (eg: by a clock output).
+ * this point; it is used by the monitor to show the frequency.
*/
- clock_clear_callback(ncl->clock);
object_unref(OBJECT(ncl->clock));
}
g_free(ncl->name);
@@ -101,29 +67,25 @@ void qdev_finalize_clocklist(DeviceState *dev)
Clock *qdev_init_clock_out(DeviceState *dev, const char *name)
{
- NamedClockList *ncl;
-
- assert(name);
-
- ncl = qdev_init_clocklist(dev, name, true, NULL);
+ Clock *clk = CLOCK(object_new(TYPE_CLOCK));
+ object_property_add_child(OBJECT(dev), name, OBJECT(clk));
- return ncl->clock;
+ qdev_init_clocklist(dev, name, false, true, clk);
+ return clk;
}
Clock *qdev_init_clock_in(DeviceState *dev, const char *name,
ClockCallback *callback, void *opaque,
unsigned int events)
{
- NamedClockList *ncl;
-
- assert(name);
-
- ncl = qdev_init_clocklist(dev, name, false, NULL);
+ Clock *clk = CLOCK(object_new(TYPE_CLOCK));
+ object_property_add_child(OBJECT(dev), name, OBJECT(clk));
+ qdev_init_clocklist(dev, name, false, false, clk);
if (callback) {
- clock_set_callback(ncl->clock, callback, opaque, events);
+ clock_set_callback(clk, callback, opaque, events);
}
- return ncl->clock;
+ return clk;
}
void qdev_init_clocks(DeviceState *dev, const ClockPortInitArray clocks)
@@ -194,15 +156,25 @@ Clock *qdev_get_clock_out(DeviceState *dev, const char *name)
Clock *qdev_alias_clock(DeviceState *dev, const char *name,
DeviceState *alias_dev, const char *alias_name)
{
- NamedClockList *ncl;
-
- assert(name && alias_name);
+ NamedClockList *ncl = qdev_get_clocklist(dev, name);
+ Clock *clk = ncl->clock;
- ncl = qdev_get_clocklist(dev, name);
+ ncl = qdev_init_clocklist(alias_dev, alias_name, true, ncl->output, clk);
- qdev_init_clocklist(alias_dev, alias_name, ncl->output, ncl->clock);
+ object_property_add_link(OBJECT(alias_dev), alias_name,
+ TYPE_CLOCK,
+ (Object **) &ncl->clock,
+ NULL, OBJ_PROP_LINK_STRONG);
+ /*
+ * Since the link property has the OBJ_PROP_LINK_STRONG flag, the clk
+ * object reference count gets decremented on property deletion.
+ * However object_property_add_link does not increment it since it
+ * doesn't know the linked object. Increment it here to ensure the
+ * aliased clock stays alive during this device life-time.
+ */
+ object_ref(OBJECT(clk));
- return ncl->clock;
+ return clk;
}
void qdev_connect_clock_in(DeviceState *dev, const char *name, Clock *source)
diff --git a/hw/core/qdev-hotplug.c b/hw/core/qdev-hotplug.c
index d495d0e..ff176dc 100644
--- a/hw/core/qdev-hotplug.c
+++ b/hw/core/qdev-hotplug.c
@@ -12,6 +12,7 @@
#include "qemu/osdep.h"
#include "hw/qdev-core.h"
#include "hw/boards.h"
+#include "qapi/error.h"
HotplugHandler *qdev_get_machine_hotplug_handler(DeviceState *dev)
{
@@ -30,12 +31,48 @@ HotplugHandler *qdev_get_machine_hotplug_handler(DeviceState *dev)
return NULL;
}
-bool qdev_hotplug_allowed(DeviceState *dev, Error **errp)
+static bool qdev_hotplug_unplug_allowed_common(DeviceState *dev, BusState *bus,
+ Error **errp)
+{
+ DeviceClass *dc = DEVICE_GET_CLASS(dev);
+
+ if (!dc->hotpluggable) {
+ error_setg(errp, "Device '%s' does not support hotplugging",
+ object_get_typename(OBJECT(dev)));
+ return false;
+ }
+
+ if (bus) {
+ if (!qbus_is_hotpluggable(bus)) {
+ error_setg(errp, "Bus '%s' does not support hotplugging",
+ bus->name);
+ return false;
+ }
+ } else {
+ if (!qdev_get_machine_hotplug_handler(dev)) {
+ /*
+ * No bus, no machine hotplug handler --> device is not hotpluggable
+ */
+ error_setg(errp,
+ "Device '%s' can not be hotplugged on this machine",
+ object_get_typename(OBJECT(dev)));
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool qdev_hotplug_allowed(DeviceState *dev, BusState *bus, Error **errp)
{
MachineState *machine;
MachineClass *mc;
Object *m_obj = qdev_get_machine();
+ if (!qdev_hotplug_unplug_allowed_common(dev, bus, errp)) {
+ return false;
+ }
+
if (object_dynamic_cast(m_obj, TYPE_MACHINE)) {
machine = MACHINE(m_obj);
mc = MACHINE_GET_CLASS(machine);
@@ -47,6 +84,12 @@ bool qdev_hotplug_allowed(DeviceState *dev, Error **errp)
return true;
}
+bool qdev_hotunplug_allowed(DeviceState *dev, Error **errp)
+{
+ return !qdev_unplug_blocked(dev, errp) &&
+ qdev_hotplug_unplug_allowed_common(dev, dev->parent_bus, errp);
+}
+
HotplugHandler *qdev_get_bus_hotplug_handler(DeviceState *dev)
{
if (dev->parent_bus) {
diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c
index f13350b..24e145d 100644
--- a/hw/core/qdev-properties-system.c
+++ b/hw/core/qdev-properties-system.c
@@ -29,8 +29,8 @@
#include "audio/audio.h"
#include "chardev/char-fe.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
+#include "system/block-backend.h"
+#include "system/blockdev.h"
#include "net/net.h"
#include "hw/pci/pci.h"
#include "hw/pci/pcie.h"
@@ -58,13 +58,39 @@ static bool check_prop_still_unset(Object *obj, const char *name,
return false;
}
+bool qdev_prop_sanitize_s390x_loadparm(uint8_t *loadparm, const char *str,
+ Error **errp)
+{
+ int i, len;
+
+ len = strlen(str);
+ if (len > 8) {
+ error_setg(errp, "'loadparm' can only contain up to 8 characters");
+ return false;
+ }
+
+ for (i = 0; i < len; i++) {
+ uint8_t c = qemu_toupper(str[i]); /* mimic HMC */
+
+ if (qemu_isalnum(c) || c == '.' || c == ' ') {
+ loadparm[i] = c;
+ } else {
+ error_setg(errp,
+ "invalid character in 'loadparm': '%c' (ASCII 0x%02x)",
+ c, c);
+ return false;
+ }
+ }
+
+ return true;
+}
/* --- drive --- */
static void get_drive(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
void **ptr = object_field_prop_ptr(obj, prop);
const char *value;
char *p;
@@ -90,7 +116,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name,
void *opaque, bool iothread, Error **errp)
{
DeviceState *dev = DEVICE(obj);
- Property *prop = opaque;
+ const Property *prop = opaque;
void **ptr = object_field_prop_ptr(obj, prop);
char *str;
BlockBackend *blk;
@@ -119,6 +145,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name,
if (ctx != bdrv_get_aio_context(bs)) {
error_setg(errp, "Different aio context is not supported for new "
"node");
+ return;
}
blk_replace_bs(blk, bs, errp);
@@ -199,7 +226,7 @@ static void set_drive_iothread(Object *obj, Visitor *v, const char *name,
static void release_drive(Object *obj, const char *name, void *opaque)
{
DeviceState *dev = DEVICE(obj);
- Property *prop = opaque;
+ const Property *prop = opaque;
BlockBackend **ptr = object_field_prop_ptr(obj, prop);
if (*ptr) {
@@ -209,7 +236,7 @@ static void release_drive(Object *obj, const char *name, void *opaque)
}
const PropertyInfo qdev_prop_drive = {
- .name = "str",
+ .type = "str",
.description = "Node name or ID of a block device to use as a backend",
.realized_set_allowed = true,
.get = get_drive,
@@ -218,7 +245,7 @@ const PropertyInfo qdev_prop_drive = {
};
const PropertyInfo qdev_prop_drive_iothread = {
- .name = "str",
+ .type = "str",
.description = "Node name or ID of a block device to use as a backend",
.realized_set_allowed = true,
.get = get_drive,
@@ -243,7 +270,7 @@ static void set_chr(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
ERRP_GUARD();
- Property *prop = opaque;
+ const Property *prop = opaque;
CharBackend *be = object_field_prop_ptr(obj, prop);
Chardev *s;
char *str;
@@ -279,14 +306,14 @@ static void set_chr(Object *obj, Visitor *v, const char *name, void *opaque,
static void release_chr(Object *obj, const char *name, void *opaque)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
CharBackend *be = object_field_prop_ptr(obj, prop);
qemu_chr_fe_deinit(be, false);
}
const PropertyInfo qdev_prop_chr = {
- .name = "str",
+ .type = "str",
.description = "ID of a chardev to use as a backend",
.get = get_chr,
.set = set_chr,
@@ -303,7 +330,7 @@ const PropertyInfo qdev_prop_chr = {
static void get_mac(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
MACAddr *mac = object_field_prop_ptr(obj, prop);
char buffer[2 * 6 + 5 + 1];
char *p = buffer;
@@ -318,7 +345,7 @@ static void get_mac(Object *obj, Visitor *v, const char *name, void *opaque,
static void set_mac(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
MACAddr *mac = object_field_prop_ptr(obj, prop);
int i, pos;
char *str;
@@ -360,7 +387,7 @@ inval:
}
const PropertyInfo qdev_prop_macaddr = {
- .name = "str",
+ .type = "str",
.description = "Ethernet 6-byte MAC Address, example: 52:54:00:12:34:56",
.get = get_mac,
.set = set_mac,
@@ -380,7 +407,7 @@ void qdev_prop_set_macaddr(DeviceState *dev, const char *name,
static void get_netdev(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
NICPeers *peers_ptr = object_field_prop_ptr(obj, prop);
char *p = g_strdup(peers_ptr->ncs[0] ? peers_ptr->ncs[0]->name : "");
@@ -391,7 +418,7 @@ static void get_netdev(Object *obj, Visitor *v, const char *name,
static void set_netdev(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
NICPeers *peers_ptr = object_field_prop_ptr(obj, prop);
NetClientState **ncs = peers_ptr->ncs;
NetClientState *peers[MAX_QUEUE_NUM];
@@ -448,7 +475,7 @@ out:
}
const PropertyInfo qdev_prop_netdev = {
- .name = "str",
+ .type = "str",
.description = "ID of a netdev to use as a backend",
.get = get_netdev,
.set = set_netdev,
@@ -459,7 +486,7 @@ const PropertyInfo qdev_prop_netdev = {
static void get_audiodev(Object *obj, Visitor *v, const char* name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
QEMUSoundCard *card = object_field_prop_ptr(obj, prop);
char *p = g_strdup(audio_get_id(card));
@@ -470,7 +497,7 @@ static void get_audiodev(Object *obj, Visitor *v, const char* name,
static void set_audiodev(Object *obj, Visitor *v, const char* name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
QEMUSoundCard *card = object_field_prop_ptr(obj, prop);
AudioState *state;
g_autofree char *str = NULL;
@@ -486,7 +513,7 @@ static void set_audiodev(Object *obj, Visitor *v, const char* name,
}
const PropertyInfo qdev_prop_audiodev = {
- .name = "str",
+ .type = "str",
.description = "ID of an audiodev to use as a backend",
/* release done on shutdown */
.get = get_audiodev,
@@ -552,7 +579,7 @@ static void qdev_propinfo_set_losttickpolicy(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
int *ptr = object_field_prop_ptr(obj, prop);
int value;
@@ -576,7 +603,8 @@ static void qdev_propinfo_set_losttickpolicy(Object *obj, Visitor *v,
QEMU_BUILD_BUG_ON(sizeof(LostTickPolicy) != sizeof(int));
const PropertyInfo qdev_prop_losttickpolicy = {
- .name = "LostTickPolicy",
+ .type = "LostTickPolicy",
+ .description = "Policy for handling lost ticks (discard/delay/slew)",
.enum_table = &LostTickPolicy_lookup,
.get = qdev_propinfo_get_enum,
.set = qdev_propinfo_set_losttickpolicy,
@@ -588,25 +616,21 @@ const PropertyInfo qdev_prop_losttickpolicy = {
static void set_blocksize(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- DeviceState *dev = DEVICE(obj);
- Property *prop = opaque;
+ const Property *prop = opaque;
uint32_t *ptr = object_field_prop_ptr(obj, prop);
uint64_t value;
- Error *local_err = NULL;
if (!visit_type_size(v, name, &value, errp)) {
return;
}
- check_block_size(dev->id ? : "", name, value, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
+ if (!check_block_size(name, value, errp)) {
return;
}
*ptr = value;
}
const PropertyInfo qdev_prop_blocksize = {
- .name = "size",
+ .type = "size",
.description = "A power of two between " MIN_BLOCK_SIZE_STR
" and " MAX_BLOCK_SIZE_STR,
.get = qdev_propinfo_get_size32,
@@ -619,9 +643,8 @@ const PropertyInfo qdev_prop_blocksize = {
QEMU_BUILD_BUG_ON(sizeof(BlockdevOnError) != sizeof(int));
const PropertyInfo qdev_prop_blockdev_on_error = {
- .name = "BlockdevOnError",
- .description = "Error handling policy, "
- "report/ignore/enospc/stop/auto",
+ .type = "BlockdevOnError",
+ .description = "Error handling policy (report/ignore/enospc/stop/auto)",
.enum_table = &BlockdevOnError_lookup,
.get = qdev_propinfo_get_enum,
.set = qdev_propinfo_set_enum,
@@ -633,9 +656,9 @@ const PropertyInfo qdev_prop_blockdev_on_error = {
QEMU_BUILD_BUG_ON(sizeof(BiosAtaTranslation) != sizeof(int));
const PropertyInfo qdev_prop_bios_chs_trans = {
- .name = "BiosAtaTranslation",
- .description = "Logical CHS translation algorithm, "
- "auto/none/lba/large/rechs",
+ .type = "BiosAtaTranslation",
+ .description = "Logical CHS translation algorithm "
+ " (auto/none/lba/large/rechs)",
.enum_table = &BiosAtaTranslation_lookup,
.get = qdev_propinfo_get_enum,
.set = qdev_propinfo_set_enum,
@@ -645,9 +668,8 @@ const PropertyInfo qdev_prop_bios_chs_trans = {
/* --- FDC default drive types */
const PropertyInfo qdev_prop_fdc_drive_type = {
- .name = "FdcDriveType",
- .description = "FDC drive type, "
- "144/288/120/none/auto",
+ .type = "FloppyDriveType",
+ .description = "Floppy drive type (144/288/120/none/auto)",
.enum_table = &FloppyDriveType_lookup,
.get = qdev_propinfo_get_enum,
.set = qdev_propinfo_set_enum,
@@ -657,9 +679,9 @@ const PropertyInfo qdev_prop_fdc_drive_type = {
/* --- MultiFDCompression --- */
const PropertyInfo qdev_prop_multifd_compression = {
- .name = "MultiFDCompression",
- .description = "multifd_compression values, "
- "none/zlib/zstd/qpl/uadk",
+ .type = "MultiFDCompression",
+ .description = "multifd_compression values"
+ " (none/zlib/zstd/qpl/uadk/qatzip)",
.enum_table = &MultiFDCompression_lookup,
.get = qdev_propinfo_get_enum,
.set = qdev_propinfo_set_enum,
@@ -671,9 +693,8 @@ const PropertyInfo qdev_prop_multifd_compression = {
QEMU_BUILD_BUG_ON(sizeof(MigMode) != sizeof(int));
const PropertyInfo qdev_prop_mig_mode = {
- .name = "MigMode",
- .description = "mig_mode values, "
- "normal,cpr-reboot",
+ .type = "MigMode",
+ .description = "Migration mode (normal/cpr-reboot)",
.enum_table = &MigMode_lookup,
.get = qdev_propinfo_get_enum,
.set = qdev_propinfo_set_enum,
@@ -685,9 +706,8 @@ const PropertyInfo qdev_prop_mig_mode = {
QEMU_BUILD_BUG_ON(sizeof(GranuleMode) != sizeof(int));
const PropertyInfo qdev_prop_granule_mode = {
- .name = "GranuleMode",
- .description = "granule_mode values, "
- "4k, 8k, 16k, 64k, host",
+ .type = "GranuleMode",
+ .description = "Granule page size (4k/8k/16k/64k/host)",
.enum_table = &GranuleMode_lookup,
.get = qdev_propinfo_get_enum,
.set = qdev_propinfo_set_enum,
@@ -695,9 +715,8 @@ const PropertyInfo qdev_prop_granule_mode = {
};
const PropertyInfo qdev_prop_zero_page_detection = {
- .name = "ZeroPageDetection",
- .description = "zero_page_detection values, "
- "none,legacy,multifd",
+ .type = "ZeroPageDetection",
+ .description = "Zero page detection (none/legacy/multifd)",
.enum_table = &ZeroPageDetection_lookup,
.get = qdev_propinfo_get_enum,
.set = qdev_propinfo_set_enum,
@@ -715,7 +734,7 @@ const PropertyInfo qdev_prop_zero_page_detection = {
static void get_reserved_region(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
ReservedRegion *rr = object_field_prop_ptr(obj, prop);
char buffer[64];
char *p = buffer;
@@ -731,7 +750,7 @@ static void get_reserved_region(Object *obj, Visitor *v, const char *name,
static void set_reserved_region(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
ReservedRegion *rr = object_field_prop_ptr(obj, prop);
const char *endptr;
uint64_t lob, upb;
@@ -775,11 +794,10 @@ separator_error:
error_setg(errp, "reserved region fields must be separated with ':'");
out:
g_free(str);
- return;
}
const PropertyInfo qdev_prop_reserved_region = {
- .name = "reserved_region",
+ .type = "str",
.description = "Reserved Region, example: 0xFEE00000:0xFEEFFFFF:0",
.get = get_reserved_region,
.set = set_reserved_region,
@@ -793,43 +811,61 @@ const PropertyInfo qdev_prop_reserved_region = {
static void set_pci_devfn(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
+ g_autofree GenericAlternate *alt;
int32_t value, *ptr = object_field_prop_ptr(obj, prop);
unsigned int slot, fn, n;
- char *str;
+ g_autofree char *str = NULL;
+
+ if (!visit_start_alternate(v, name, &alt, sizeof(*alt), errp)) {
+ return;
+ }
+
+ switch (alt->type) {
+ case QTYPE_QSTRING:
+ if (!visit_type_str(v, name, &str, errp)) {
+ goto out;
+ }
- if (!visit_type_str(v, name, &str, NULL)) {
+ if (sscanf(str, "%x.%x%n", &slot, &fn, &n) != 2) {
+ fn = 0;
+ if (sscanf(str, "%x%n", &slot, &n) != 1) {
+ goto invalid;
+ }
+ }
+ if (str[n] != '\0' || fn > 7 || slot > 31) {
+ goto invalid;
+ }
+ *ptr = slot << 3 | fn;
+ break;
+
+ case QTYPE_QNUM:
if (!visit_type_int32(v, name, &value, errp)) {
- return;
+ goto out;
}
if (value < -1 || value > 255) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
name ? name : "null", "a value between -1 and 255");
- return;
+ goto out;
}
*ptr = value;
- return;
- }
+ break;
- if (sscanf(str, "%x.%x%n", &slot, &fn, &n) != 2) {
- fn = 0;
- if (sscanf(str, "%x%n", &slot, &n) != 1) {
- goto invalid;
- }
- }
- if (str[n] != '\0' || fn > 7 || slot > 31) {
- goto invalid;
+ default:
+ error_setg(errp, "Invalid parameter type for '%s', expected int or str",
+ name ? name : "null");
+ goto out;
}
- *ptr = slot << 3 | fn;
- g_free(str);
- return;
+
+ goto out;
invalid:
error_set_from_qdev_prop_error(errp, EINVAL, obj, name, str);
- g_free(str);
+out:
+ visit_end_alternate(v, (void **) &alt);
}
-static int print_pci_devfn(Object *obj, Property *prop, char *dest,
+static int print_pci_devfn(Object *obj, const Property *prop, char *dest,
size_t len)
{
int32_t *ptr = object_field_prop_ptr(obj, prop);
@@ -842,7 +878,7 @@ static int print_pci_devfn(Object *obj, Property *prop, char *dest,
}
const PropertyInfo qdev_prop_pci_devfn = {
- .name = "int32",
+ .type = "str",
.description = "Slot and optional function number, example: 06.0 or 06",
.print = print_pci_devfn,
.get = qdev_propinfo_get_int32,
@@ -855,7 +891,7 @@ const PropertyInfo qdev_prop_pci_devfn = {
static void get_pci_host_devaddr(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
PCIHostDeviceAddress *addr = object_field_prop_ptr(obj, prop);
char buffer[] = "ffff:ff:ff.f";
char *p = buffer;
@@ -881,7 +917,7 @@ static void get_pci_host_devaddr(Object *obj, Visitor *v, const char *name,
static void set_pci_host_devaddr(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
PCIHostDeviceAddress *addr = object_field_prop_ptr(obj, prop);
char *str, *p;
char *e;
@@ -948,8 +984,8 @@ inval:
}
const PropertyInfo qdev_prop_pci_host_devaddr = {
- .name = "str",
- .description = "Address (bus/device/function) of "
+ .type = "str",
+ .description = "Address (bus:device.function) of "
"the host device, example: 04:10.0",
.get = get_pci_host_devaddr,
.set = set_pci_host_devaddr,
@@ -958,7 +994,7 @@ const PropertyInfo qdev_prop_pci_host_devaddr = {
/* --- OffAutoPCIBAR off/auto/bar0/bar1/bar2/bar3/bar4/bar5 --- */
const PropertyInfo qdev_prop_off_auto_pcibar = {
- .name = "OffAutoPCIBAR",
+ .type = "OffAutoPCIBAR",
.description = "off/auto/bar0/bar1/bar2/bar3/bar4/bar5",
.enum_table = &OffAutoPCIBAR_lookup,
.get = qdev_propinfo_get_enum,
@@ -971,7 +1007,7 @@ const PropertyInfo qdev_prop_off_auto_pcibar = {
static void get_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
PCIExpLinkSpeed *p = object_field_prop_ptr(obj, prop);
int speed;
@@ -1005,7 +1041,7 @@ static void get_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name,
static void set_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
PCIExpLinkSpeed *p = object_field_prop_ptr(obj, prop);
int speed;
@@ -1040,7 +1076,7 @@ static void set_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name,
}
const PropertyInfo qdev_prop_pcie_link_speed = {
- .name = "PCIELinkSpeed",
+ .type = "PCIELinkSpeed",
.description = "2_5/5/8/16/32/64",
.enum_table = &PCIELinkSpeed_lookup,
.get = get_prop_pcielinkspeed,
@@ -1053,7 +1089,7 @@ const PropertyInfo qdev_prop_pcie_link_speed = {
static void get_prop_pcielinkwidth(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
PCIExpLinkWidth *p = object_field_prop_ptr(obj, prop);
int width;
@@ -1090,7 +1126,7 @@ static void get_prop_pcielinkwidth(Object *obj, Visitor *v, const char *name,
static void set_prop_pcielinkwidth(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
PCIExpLinkWidth *p = object_field_prop_ptr(obj, prop);
int width;
@@ -1128,7 +1164,7 @@ static void set_prop_pcielinkwidth(Object *obj, Visitor *v, const char *name,
}
const PropertyInfo qdev_prop_pcie_link_width = {
- .name = "PCIELinkWidth",
+ .type = "PCIELinkWidth",
.description = "1/2/4/8/12/16/32",
.enum_table = &PCIELinkWidth_lookup,
.get = get_prop_pcielinkwidth,
@@ -1141,7 +1177,7 @@ const PropertyInfo qdev_prop_pcie_link_width = {
static void get_uuid(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
QemuUUID *uuid = object_field_prop_ptr(obj, prop);
char buffer[UUID_STR_LEN];
char *p = buffer;
@@ -1156,7 +1192,7 @@ static void get_uuid(Object *obj, Visitor *v, const char *name, void *opaque,
static void set_uuid(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
QemuUUID *uuid = object_field_prop_ptr(obj, prop);
char *str;
@@ -1178,7 +1214,7 @@ static void set_default_uuid_auto(ObjectProperty *op, const Property *prop)
}
const PropertyInfo qdev_prop_uuid = {
- .name = "str",
+ .type = "str",
.description = "UUID (aka GUID) or \"" UUID_VALUE_AUTO
"\" for random value (default)",
.get = get_uuid,
@@ -1188,12 +1224,12 @@ const PropertyInfo qdev_prop_uuid = {
/* --- s390 cpu entitlement policy --- */
-QEMU_BUILD_BUG_ON(sizeof(CpuS390Entitlement) != sizeof(int));
+QEMU_BUILD_BUG_ON(sizeof(S390CpuEntitlement) != sizeof(int));
const PropertyInfo qdev_prop_cpus390entitlement = {
- .name = "CpuS390Entitlement",
- .description = "low/medium (default)/high",
- .enum_table = &CpuS390Entitlement_lookup,
+ .type = "S390CpuEntitlement",
+ .description = "auto/low/medium/high (default medium)",
+ .enum_table = &S390CpuEntitlement_lookup,
.get = qdev_propinfo_get_enum,
.set = qdev_propinfo_set_enum,
.set_default_value = qdev_propinfo_set_default_value_enum,
@@ -1236,10 +1272,30 @@ static void release_iothread_vq_mapping_list(Object *obj,
}
const PropertyInfo qdev_prop_iothread_vq_mapping_list = {
- .name = "IOThreadVirtQueueMappingList",
+ .type = "IOThreadVirtQueueMappingList",
.description = "IOThread virtqueue mapping list [{\"iothread\":\"<id>\", "
"\"vqs\":[1,2,3,...]},...]",
.get = get_iothread_vq_mapping_list,
.set = set_iothread_vq_mapping_list,
.release = release_iothread_vq_mapping_list,
};
+
+/* --- Endian modes */
+
+const PropertyInfo qdev_prop_endian_mode = {
+ .type = "EndianMode",
+ .description = "Endian mode, big/little/unspecified",
+ .enum_table = &EndianMode_lookup,
+ .get = qdev_propinfo_get_enum,
+ .set = qdev_propinfo_set_enum,
+ .set_default_value = qdev_propinfo_set_default_value_enum,
+};
+
+const PropertyInfo qdev_prop_vmapple_virtio_blk_variant = {
+ .type = "VMAppleVirtioBlkVariant",
+ .description = "unspecified/root/aux",
+ .enum_table = &VMAppleVirtioBlkVariant_lookup,
+ .get = qdev_propinfo_get_enum,
+ .set = qdev_propinfo_set_enum,
+ .set_default_value = qdev_propinfo_set_default_value_enum,
+};
diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c
index 86a5835..147b3ff 100644
--- a/hw/core/qdev-properties.c
+++ b/hw/core/qdev-properties.c
@@ -2,7 +2,7 @@
#include "hw/qdev-properties.h"
#include "qapi/error.h"
#include "qapi/qapi-types-misc.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qlist.h"
#include "qemu/ctype.h"
#include "qemu/error-report.h"
#include "qapi/visitor.h"
@@ -51,7 +51,7 @@ void qdev_prop_allow_set_link_before_realize(const Object *obj,
}
}
-void *object_field_prop_ptr(Object *obj, Property *prop)
+void *object_field_prop_ptr(Object *obj, const Property *prop)
{
void *ptr = obj;
ptr += prop->offset;
@@ -61,7 +61,7 @@ void *object_field_prop_ptr(Object *obj, Property *prop)
static void field_prop_get(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
return prop->info->get(obj, v, name, opaque, errp);
}
@@ -78,7 +78,7 @@ static ObjectPropertyAccessor *field_prop_getter(const PropertyInfo *info)
static void field_prop_set(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
if (!qdev_prop_allow_set(obj, name, prop->info, errp)) {
return;
@@ -100,7 +100,7 @@ static ObjectPropertyAccessor *field_prop_setter(const PropertyInfo *info)
void qdev_propinfo_get_enum(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
int *ptr = object_field_prop_ptr(obj, prop);
visit_type_enum(v, name, ptr, prop->info->enum_table, errp);
@@ -109,7 +109,7 @@ void qdev_propinfo_get_enum(Object *obj, Visitor *v, const char *name,
void qdev_propinfo_set_enum(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
int *ptr = object_field_prop_ptr(obj, prop);
visit_type_enum(v, name, ptr, prop->info->enum_table, errp);
@@ -122,22 +122,15 @@ void qdev_propinfo_set_default_value_enum(ObjectProperty *op,
qapi_enum_lookup(prop->info->enum_table, prop->defval.i));
}
-const PropertyInfo qdev_prop_enum = {
- .name = "enum",
- .get = qdev_propinfo_get_enum,
- .set = qdev_propinfo_set_enum,
- .set_default_value = qdev_propinfo_set_default_value_enum,
-};
-
/* Bit */
-static uint32_t qdev_get_prop_mask(Property *prop)
+static uint32_t qdev_get_prop_mask(const Property *prop)
{
assert(prop->info == &qdev_prop_bit);
return 0x1 << prop->bitnr;
}
-static void bit_prop_set(Object *obj, Property *props, bool val)
+static void bit_prop_set(Object *obj, const Property *props, bool val)
{
uint32_t *p = object_field_prop_ptr(obj, props);
uint32_t mask = qdev_get_prop_mask(props);
@@ -151,7 +144,7 @@ static void bit_prop_set(Object *obj, Property *props, bool val)
static void prop_get_bit(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint32_t *p = object_field_prop_ptr(obj, prop);
bool value = (*p & qdev_get_prop_mask(prop)) != 0;
@@ -161,7 +154,7 @@ static void prop_get_bit(Object *obj, Visitor *v, const char *name,
static void prop_set_bit(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
bool value;
if (!visit_type_bool(v, name, &value, errp)) {
@@ -176,7 +169,7 @@ static void set_default_value_bool(ObjectProperty *op, const Property *prop)
}
const PropertyInfo qdev_prop_bit = {
- .name = "bool",
+ .type = "bool",
.description = "on/off",
.get = prop_get_bit,
.set = prop_set_bit,
@@ -185,13 +178,13 @@ const PropertyInfo qdev_prop_bit = {
/* Bit64 */
-static uint64_t qdev_get_prop_mask64(Property *prop)
+static uint64_t qdev_get_prop_mask64(const Property *prop)
{
assert(prop->info == &qdev_prop_bit64);
return 0x1ull << prop->bitnr;
}
-static void bit64_prop_set(Object *obj, Property *props, bool val)
+static void bit64_prop_set(Object *obj, const Property *props, bool val)
{
uint64_t *p = object_field_prop_ptr(obj, props);
uint64_t mask = qdev_get_prop_mask64(props);
@@ -205,7 +198,7 @@ static void bit64_prop_set(Object *obj, Property *props, bool val)
static void prop_get_bit64(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint64_t *p = object_field_prop_ptr(obj, prop);
bool value = (*p & qdev_get_prop_mask64(prop)) != 0;
@@ -215,7 +208,7 @@ static void prop_get_bit64(Object *obj, Visitor *v, const char *name,
static void prop_set_bit64(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
bool value;
if (!visit_type_bool(v, name, &value, errp)) {
@@ -225,7 +218,7 @@ static void prop_set_bit64(Object *obj, Visitor *v, const char *name,
}
const PropertyInfo qdev_prop_bit64 = {
- .name = "bool",
+ .type = "bool",
.description = "on/off",
.get = prop_get_bit64,
.set = prop_set_bit64,
@@ -237,7 +230,7 @@ const PropertyInfo qdev_prop_bit64 = {
static void get_bool(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
bool *ptr = object_field_prop_ptr(obj, prop);
visit_type_bool(v, name, ptr, errp);
@@ -246,14 +239,15 @@ static void get_bool(Object *obj, Visitor *v, const char *name, void *opaque,
static void set_bool(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
bool *ptr = object_field_prop_ptr(obj, prop);
visit_type_bool(v, name, ptr, errp);
}
const PropertyInfo qdev_prop_bool = {
- .name = "bool",
+ .type = "bool",
+ .description = "on/off",
.get = get_bool,
.set = set_bool,
.set_default_value = set_default_value_bool,
@@ -264,7 +258,7 @@ const PropertyInfo qdev_prop_bool = {
static void get_uint8(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint8_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_uint8(v, name, ptr, errp);
@@ -273,7 +267,7 @@ static void get_uint8(Object *obj, Visitor *v, const char *name, void *opaque,
static void set_uint8(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint8_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_uint8(v, name, ptr, errp);
@@ -292,7 +286,7 @@ void qdev_propinfo_set_default_value_uint(ObjectProperty *op,
}
const PropertyInfo qdev_prop_uint8 = {
- .name = "uint8",
+ .type = "uint8",
.get = get_uint8,
.set = set_uint8,
.set_default_value = qdev_propinfo_set_default_value_uint,
@@ -303,7 +297,7 @@ const PropertyInfo qdev_prop_uint8 = {
static void get_uint16(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint16_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_uint16(v, name, ptr, errp);
@@ -312,14 +306,14 @@ static void get_uint16(Object *obj, Visitor *v, const char *name,
static void set_uint16(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint16_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_uint16(v, name, ptr, errp);
}
const PropertyInfo qdev_prop_uint16 = {
- .name = "uint16",
+ .type = "uint16",
.get = get_uint16,
.set = set_uint16,
.set_default_value = qdev_propinfo_set_default_value_uint,
@@ -330,7 +324,7 @@ const PropertyInfo qdev_prop_uint16 = {
static void get_uint32(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint32_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_uint32(v, name, ptr, errp);
@@ -339,7 +333,7 @@ static void get_uint32(Object *obj, Visitor *v, const char *name,
static void set_uint32(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint32_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_uint32(v, name, ptr, errp);
@@ -348,7 +342,7 @@ static void set_uint32(Object *obj, Visitor *v, const char *name,
void qdev_propinfo_get_int32(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
int32_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_int32(v, name, ptr, errp);
@@ -357,21 +351,21 @@ void qdev_propinfo_get_int32(Object *obj, Visitor *v, const char *name,
static void set_int32(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
int32_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_int32(v, name, ptr, errp);
}
const PropertyInfo qdev_prop_uint32 = {
- .name = "uint32",
+ .type = "uint32",
.get = get_uint32,
.set = set_uint32,
.set_default_value = qdev_propinfo_set_default_value_uint,
};
const PropertyInfo qdev_prop_int32 = {
- .name = "int32",
+ .type = "int32",
.get = qdev_propinfo_get_int32,
.set = set_int32,
.set_default_value = qdev_propinfo_set_default_value_int,
@@ -382,7 +376,7 @@ const PropertyInfo qdev_prop_int32 = {
static void get_uint64(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint64_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_uint64(v, name, ptr, errp);
@@ -391,7 +385,7 @@ static void get_uint64(Object *obj, Visitor *v, const char *name,
static void set_uint64(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint64_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_uint64(v, name, ptr, errp);
@@ -400,7 +394,7 @@ static void set_uint64(Object *obj, Visitor *v, const char *name,
static void get_int64(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
int64_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_int64(v, name, ptr, errp);
@@ -409,21 +403,21 @@ static void get_int64(Object *obj, Visitor *v, const char *name,
static void set_int64(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
int64_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_int64(v, name, ptr, errp);
}
const PropertyInfo qdev_prop_uint64 = {
- .name = "uint64",
+ .type = "uint64",
.get = get_uint64,
.set = set_uint64,
.set_default_value = qdev_propinfo_set_default_value_uint,
};
const PropertyInfo qdev_prop_int64 = {
- .name = "int64",
+ .type = "int64",
.get = get_int64,
.set = set_int64,
.set_default_value = qdev_propinfo_set_default_value_int,
@@ -432,7 +426,7 @@ const PropertyInfo qdev_prop_int64 = {
static void set_uint64_checkmask(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint64_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_uint64(v, name, ptr, errp);
@@ -443,23 +437,60 @@ static void set_uint64_checkmask(Object *obj, Visitor *v, const char *name,
}
const PropertyInfo qdev_prop_uint64_checkmask = {
- .name = "uint64",
+ .type = "uint64",
.get = get_uint64,
.set = set_uint64_checkmask,
};
+/* --- pointer-size integer --- */
+
+static void get_usize(Object *obj, Visitor *v, const char *name, void *opaque,
+ Error **errp)
+{
+ const Property *prop = opaque;
+
+#if HOST_LONG_BITS == 32
+ uint32_t *ptr = object_field_prop_ptr(obj, prop);
+ visit_type_uint32(v, name, ptr, errp);
+#else
+ uint64_t *ptr = object_field_prop_ptr(obj, prop);
+ visit_type_uint64(v, name, ptr, errp);
+#endif
+}
+
+static void set_usize(Object *obj, Visitor *v, const char *name, void *opaque,
+ Error **errp)
+{
+ const Property *prop = opaque;
+
+#if HOST_LONG_BITS == 32
+ uint32_t *ptr = object_field_prop_ptr(obj, prop);
+ visit_type_uint32(v, name, ptr, errp);
+#else
+ uint64_t *ptr = object_field_prop_ptr(obj, prop);
+ visit_type_uint64(v, name, ptr, errp);
+#endif
+}
+
+const PropertyInfo qdev_prop_usize = {
+ .type = "usize",
+ .get = get_usize,
+ .set = set_usize,
+ .set_default_value = qdev_propinfo_set_default_value_uint,
+};
+
/* --- string --- */
static void release_string(Object *obj, const char *name, void *opaque)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
g_free(*(char **)object_field_prop_ptr(obj, prop));
}
static void get_string(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
char **ptr = object_field_prop_ptr(obj, prop);
if (!*ptr) {
@@ -473,7 +504,7 @@ static void get_string(Object *obj, Visitor *v, const char *name,
static void set_string(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
char **ptr = object_field_prop_ptr(obj, prop);
char *str;
@@ -485,7 +516,7 @@ static void set_string(Object *obj, Visitor *v, const char *name,
}
const PropertyInfo qdev_prop_string = {
- .name = "str",
+ .type = "str",
.release = release_string,
.get = get_string,
.set = set_string,
@@ -494,7 +525,7 @@ const PropertyInfo qdev_prop_string = {
/* --- on/off/auto --- */
const PropertyInfo qdev_prop_on_off_auto = {
- .name = "OnOffAuto",
+ .type = "OnOffAuto",
.description = "on/off/auto",
.enum_table = &OnOffAuto_lookup,
.get = qdev_propinfo_get_enum,
@@ -507,7 +538,7 @@ const PropertyInfo qdev_prop_on_off_auto = {
void qdev_propinfo_get_size32(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint32_t *ptr = object_field_prop_ptr(obj, prop);
uint64_t value = *ptr;
@@ -517,7 +548,7 @@ void qdev_propinfo_get_size32(Object *obj, Visitor *v, const char *name,
static void set_size32(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint32_t *ptr = object_field_prop_ptr(obj, prop);
uint64_t value;
@@ -537,7 +568,7 @@ static void set_size32(Object *obj, Visitor *v, const char *name, void *opaque,
}
const PropertyInfo qdev_prop_size32 = {
- .name = "size",
+ .type = "size",
.get = qdev_propinfo_get_size32,
.set = set_size32,
.set_default_value = qdev_propinfo_set_default_value_uint,
@@ -557,7 +588,7 @@ struct ArrayElementList {
* specific element of the array. Arrays are backed by an uint32_t length field
* and an element array. @elem points at an element in this element array.
*/
-static Property array_elem_prop(Object *obj, Property *parent_prop,
+static Property array_elem_prop(Object *obj, const Property *parent_prop,
const char *name, char *elem)
{
return (Property) {
@@ -582,7 +613,7 @@ static Property array_elem_prop(Object *obj, Property *parent_prop,
*/
static void release_prop_array(Object *obj, const char *name, void *opaque)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint32_t *alenptr = object_field_prop_ptr(obj, prop);
void **arrayptr = (void *)obj + prop->arrayoffset;
char *elem = *arrayptr;
@@ -609,7 +640,7 @@ static void set_prop_array(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
ERRP_GUARD();
- Property *prop = opaque;
+ const Property *prop = opaque;
uint32_t *alenptr = object_field_prop_ptr(obj, prop);
void **arrayptr = (void *)obj + prop->arrayoffset;
ArrayElementList *list, *elem, *next;
@@ -685,7 +716,7 @@ static void get_prop_array(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
ERRP_GUARD();
- Property *prop = opaque;
+ const Property *prop = opaque;
uint32_t *alenptr = object_field_prop_ptr(obj, prop);
void **arrayptr = (void *)obj + prop->arrayoffset;
char *elemptr = *arrayptr;
@@ -740,7 +771,7 @@ static void default_prop_array(ObjectProperty *op, const Property *prop)
}
const PropertyInfo qdev_prop_array = {
- .name = "list",
+ .type = "list",
.get = get_prop_array,
.set = set_prop_array,
.release = release_prop_array,
@@ -749,29 +780,26 @@ const PropertyInfo qdev_prop_array = {
/* --- public helpers --- */
-static Property *qdev_prop_walk(Property *props, const char *name)
+static const Property *qdev_prop_walk(DeviceClass *cls, const char *name)
{
- if (!props) {
- return NULL;
- }
- while (props->name) {
- if (strcmp(props->name, name) == 0) {
- return props;
+ for (int i = 0, n = cls->props_count_; i < n; ++i) {
+ const Property *prop = &cls->props_[i];
+ if (strcmp(prop->name, name) == 0) {
+ return prop;
}
- props++;
}
return NULL;
}
-static Property *qdev_prop_find(DeviceState *dev, const char *name)
+static const Property *qdev_prop_find(DeviceState *dev, const char *name)
{
ObjectClass *class;
- Property *prop;
+ const Property *prop;
/* device properties */
class = object_get_class(OBJECT(dev));
do {
- prop = qdev_prop_walk(DEVICE_CLASS(class)->props_, name);
+ prop = qdev_prop_walk(DEVICE_CLASS(class), name);
if (prop) {
return prop;
}
@@ -840,7 +868,7 @@ void qdev_prop_set_string(DeviceState *dev, const char *name, const char *value)
void qdev_prop_set_enum(DeviceState *dev, const char *name, int value)
{
- Property *prop;
+ const Property *prop;
prop = qdev_prop_find(dev, name);
object_property_set_str(OBJECT(dev), name,
@@ -931,7 +959,7 @@ void qdev_prop_set_globals(DeviceState *dev)
static void get_size(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint64_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_size(v, name, ptr, errp);
@@ -940,14 +968,14 @@ static void get_size(Object *obj, Visitor *v, const char *name, void *opaque,
static void set_size(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint64_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_size(v, name, ptr, errp);
}
const PropertyInfo qdev_prop_size = {
- .name = "size",
+ .type = "size",
.get = get_size,
.set = set_size,
.set_default_value = qdev_propinfo_set_default_value_uint,
@@ -956,7 +984,7 @@ const PropertyInfo qdev_prop_size = {
/* --- object link property --- */
static ObjectProperty *create_link_property(ObjectClass *oc, const char *name,
- Property *prop)
+ const Property *prop)
{
return object_class_property_add_link(oc, name, prop->link_type,
prop->offset,
@@ -965,22 +993,22 @@ static ObjectProperty *create_link_property(ObjectClass *oc, const char *name,
}
const PropertyInfo qdev_prop_link = {
- .name = "link",
+ .type = "link",
.create = create_link_property,
};
-void qdev_property_add_static(DeviceState *dev, Property *prop)
+void qdev_property_add_static(DeviceState *dev, const Property *prop)
{
Object *obj = OBJECT(dev);
ObjectProperty *op;
assert(!prop->info->create);
- op = object_property_add(obj, prop->name, prop->info->name,
+ op = object_property_add(obj, prop->name, prop->info->type,
field_prop_getter(prop->info),
field_prop_setter(prop->info),
prop->info->release,
- prop);
+ (Property *)prop);
object_property_set_description(obj, prop->name,
prop->info->description);
@@ -994,7 +1022,7 @@ void qdev_property_add_static(DeviceState *dev, Property *prop)
}
static void qdev_class_add_property(DeviceClass *klass, const char *name,
- Property *prop)
+ const Property *prop)
{
ObjectClass *oc = OBJECT_CLASS(klass);
ObjectProperty *op;
@@ -1003,11 +1031,11 @@ static void qdev_class_add_property(DeviceClass *klass, const char *name,
op = prop->info->create(oc, name, prop);
} else {
op = object_class_property_add(oc,
- name, prop->info->name,
+ name, prop->info->type,
field_prop_getter(prop->info),
field_prop_setter(prop->info),
prop->info->release,
- prop);
+ (Property *)prop);
}
if (prop->set_default) {
prop->info->set_default_value(op, prop);
@@ -1023,7 +1051,7 @@ static void qdev_get_legacy_property(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
char buffer[1024];
char *ptr = buffer;
@@ -1046,7 +1074,7 @@ static void qdev_get_legacy_property(Object *obj, Visitor *v,
* Do not use this in new code! QOM Properties added through this interface
* will be given names in the "legacy" namespace.
*/
-static void qdev_class_add_legacy_property(DeviceClass *dc, Property *prop)
+static void qdev_class_add_legacy_property(DeviceClass *dc, const Property *prop)
{
g_autofree char *name = NULL;
@@ -1058,15 +1086,21 @@ static void qdev_class_add_legacy_property(DeviceClass *dc, Property *prop)
name = g_strdup_printf("legacy-%s", prop->name);
object_class_property_add(OBJECT_CLASS(dc), name, "str",
prop->info->print ? qdev_get_legacy_property : prop->info->get,
- NULL, NULL, prop);
+ NULL, NULL, (Property *)prop);
}
-void device_class_set_props(DeviceClass *dc, Property *props)
+void device_class_set_props_n(DeviceClass *dc, const Property *props, size_t n)
{
- Property *prop;
+ /* We used a hole in DeviceClass because that's still a lot. */
+ assert(n <= UINT16_MAX);
+ assert(n != 0);
dc->props_ = props;
- for (prop = props; prop && prop->name; prop++) {
+ dc->props_count_ = n;
+
+ for (size_t i = 0; i < n; ++i) {
+ const Property *prop = &props[i];
+ assert(prop->name);
qdev_class_add_legacy_property(dc, prop);
qdev_class_add_property(dc, prop->name, prop);
}
diff --git a/hw/core/qdev-user.c b/hw/core/qdev-user.c
new file mode 100644
index 0000000..3d421d8
--- /dev/null
+++ b/hw/core/qdev-user.c
@@ -0,0 +1,19 @@
+/*
+ * QDev helpers specific to user emulation.
+ *
+ * Copyright 2025 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "qemu/osdep.h"
+#include "qom/object.h"
+#include "hw/qdev-core.h"
+
+void qdev_create_fake_machine(void)
+{
+ Object *fake_machine_obj;
+
+ fake_machine_obj = object_property_add_new_container(object_get_root(),
+ "machine");
+ object_property_add_new_container(fake_machine_obj, "unattached");
+}
diff --git a/hw/core/qdev.c b/hw/core/qdev.c
index f3a996f..f600226 100644
--- a/hw/core/qdev.c
+++ b/hw/core/qdev.c
@@ -28,7 +28,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qapi/qapi-events-qdev.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/visitor.h"
#include "qemu/error-report.h"
#include "qemu/option.h"
@@ -146,31 +146,16 @@ bool qdev_set_parent_bus(DeviceState *dev, BusState *bus, Error **errp)
DeviceState *qdev_new(const char *name)
{
- ObjectClass *oc = object_class_by_name(name);
-#ifdef CONFIG_MODULES
- if (!oc) {
- int rv = module_load_qom(name, &error_fatal);
- if (rv > 0) {
- oc = object_class_by_name(name);
- } else {
- error_report("could not find a module for type '%s'", name);
- exit(1);
- }
- }
-#endif
- if (!oc) {
- error_report("unknown type '%s'", name);
- abort();
- }
return DEVICE(object_new(name));
}
DeviceState *qdev_try_new(const char *name)
{
- if (!module_object_class_by_name(name)) {
+ ObjectClass *oc = module_object_class_by_name(name);
+ if (!oc) {
return NULL;
}
- return DEVICE(object_new(name));
+ return DEVICE(object_new_with_class(oc));
}
static QTAILQ_HEAD(, DeviceListener) device_listeners
@@ -491,8 +476,7 @@ static void device_set_realized(Object *obj, bool value, Error **errp)
if (!obj->parent) {
gchar *name = g_strdup_printf("device[%d]", unattached_count++);
- object_property_add_child(container_get(qdev_get_machine(),
- "/unattached"),
+ object_property_add_child(machine_get_container("unattached"),
name, obj);
unattached_parent = true;
g_free(name);
@@ -706,11 +690,10 @@ static void device_finalize(Object *obj)
dev->canonical_path = NULL;
}
- qobject_unref(dev->opts);
g_free(dev->id);
}
-static void device_class_base_init(ObjectClass *class, void *data)
+static void device_class_base_init(ObjectClass *class, const void *data)
{
DeviceClass *klass = DEVICE_CLASS(class);
@@ -718,6 +701,7 @@ static void device_class_base_init(ObjectClass *class, void *data)
* so do not propagate them to the subclasses.
*/
klass->props_ = NULL;
+ klass->props_count_ = 0;
}
static void device_unparent(Object *obj)
@@ -747,58 +731,7 @@ device_vmstate_if_get_id(VMStateIf *obj)
return qdev_get_dev_path(dev);
}
-/**
- * device_phases_reset:
- * Transition reset method for devices to allow moving
- * smoothly from legacy reset method to multi-phases
- */
-static void device_phases_reset(DeviceState *dev)
-{
- ResettableClass *rc = RESETTABLE_GET_CLASS(dev);
-
- if (rc->phases.enter) {
- rc->phases.enter(OBJECT(dev), RESET_TYPE_COLD);
- }
- if (rc->phases.hold) {
- rc->phases.hold(OBJECT(dev), RESET_TYPE_COLD);
- }
- if (rc->phases.exit) {
- rc->phases.exit(OBJECT(dev), RESET_TYPE_COLD);
- }
-}
-
-static void device_transitional_reset(Object *obj)
-{
- DeviceClass *dc = DEVICE_GET_CLASS(obj);
-
- /*
- * This will call either @device_phases_reset (for multi-phases transitioned
- * devices) or a device's specific method for not-yet transitioned devices.
- * In both case, it does not reset children.
- */
- if (dc->reset) {
- dc->reset(DEVICE(obj));
- }
-}
-
-/**
- * device_get_transitional_reset:
- * check if the device's class is ready for multi-phase
- */
-static ResettableTrFunction device_get_transitional_reset(Object *obj)
-{
- DeviceClass *dc = DEVICE_GET_CLASS(obj);
- if (dc->reset != device_phases_reset) {
- /*
- * dc->reset has been overridden by a subclass,
- * the device is not ready for multi phase yet.
- */
- return device_transitional_reset;
- }
- return NULL;
-}
-
-static void device_class_init(ObjectClass *class, void *data)
+static void device_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
VMStateIfClass *vc = VMSTATE_IF_CLASS(class);
@@ -819,20 +752,12 @@ static void device_class_init(ObjectClass *class, void *data)
rc->child_foreach = device_reset_child_foreach;
/*
- * @device_phases_reset is put as the default reset method below, allowing
- * to do the multi-phase transition from base classes to leaf classes. It
- * allows a legacy-reset Device class to extend a multi-phases-reset
- * Device class for the following reason:
- * + If a base class B has been moved to multi-phase, then it does not
- * override this default reset method and may have defined phase methods.
- * + A child class C (extending class B) which uses
- * device_class_set_parent_reset() (or similar means) to override the
- * reset method will still work as expected. @device_phases_reset function
- * will be registered as the parent reset method and effectively call
- * parent reset phases.
+ * A NULL legacy_reset implies a three-phase reset device. Devices can
+ * only be reset using three-phase aware mechanisms, but we still support
+ * for transitional purposes leaf classes which set the old legacy_reset
+ * method via device_class_set_legacy_reset().
*/
- dc->reset = device_phases_reset;
- rc->get_transitional_function = device_get_transitional_reset;
+ dc->legacy_reset = NULL;
object_class_property_add_bool(class, "realized",
device_get_realized, device_set_realized);
@@ -844,12 +769,30 @@ static void device_class_init(ObjectClass *class, void *data)
offsetof(DeviceState, parent_bus), NULL, 0);
}
-void device_class_set_parent_reset(DeviceClass *dc,
- DeviceReset dev_reset,
- DeviceReset *parent_reset)
+static void do_legacy_reset(Object *obj, ResetType type)
{
- *parent_reset = dc->reset;
- dc->reset = dev_reset;
+ DeviceClass *dc = DEVICE_GET_CLASS(obj);
+
+ dc->legacy_reset(DEVICE(obj));
+}
+
+void device_class_set_legacy_reset(DeviceClass *dc, DeviceReset dev_reset)
+{
+ /*
+ * A legacy DeviceClass::reset has identical semantics to the
+ * three-phase "hold" method, with no "enter" or "exit"
+ * behaviour. Classes that use this legacy function must be leaf
+ * classes that do not chain up to their parent class reset.
+ * There is no mechanism for resetting a device that does not
+ * use the three-phase APIs, so the only place which calls
+ * the legacy_reset hook is do_legacy_reset().
+ */
+ ResettableClass *rc = RESETTABLE_CLASS(dc);
+
+ rc->phases.enter = NULL;
+ rc->phases.hold = do_legacy_reset;
+ rc->phases.exit = NULL;
+ dc->legacy_reset = dev_reset;
}
void device_class_set_parent_realize(DeviceClass *dc,
@@ -873,12 +816,28 @@ Object *qdev_get_machine(void)
static Object *dev;
if (dev == NULL) {
- dev = container_get(object_get_root(), "/machine");
+ dev = object_resolve_path_component(object_get_root(), "machine");
+ /*
+ * Any call to this function before machine is created is treated
+ * as a programming error as of now.
+ */
+ assert(dev);
}
return dev;
}
+Object *machine_get_container(const char *name)
+{
+ Object *container, *machine;
+
+ machine = qdev_get_machine();
+ container = object_resolve_path_component(machine, name);
+ assert(object_dynamic_cast(container, TYPE_CONTAINER));
+
+ return container;
+}
+
char *qdev_get_human_name(DeviceState *dev)
{
g_assert(dev != NULL);
@@ -911,7 +870,7 @@ static const TypeInfo device_type_info = {
.class_init = device_class_init,
.abstract = true,
.class_size = sizeof(DeviceClass),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_VMSTATE_IF },
{ TYPE_RESETTABLE_INTERFACE },
{ }
diff --git a/hw/core/register.c b/hw/core/register.c
index 95b0150..8f63d9f 100644
--- a/hw/core/register.c
+++ b/hw/core/register.c
@@ -319,7 +319,7 @@ void register_finalize_block(RegisterInfoArray *r_array)
g_free(r_array);
}
-static void register_class_init(ObjectClass *oc, void *data)
+static void register_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/core/reset.c b/hw/core/reset.c
index 58dfc8d..65f82fa 100644
--- a/hw/core/reset.c
+++ b/hw/core/reset.c
@@ -24,7 +24,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "hw/resettable.h"
#include "hw/core/resetcontainer.h"
@@ -84,7 +84,7 @@ static void legacy_reset_finalize(Object *obj)
{
}
-static void legacy_reset_class_init(ObjectClass *klass, void *data)
+static void legacy_reset_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -170,11 +170,8 @@ void qemu_unregister_resettable(Object *obj)
resettable_container_remove(get_root_reset_container(), obj);
}
-void qemu_devices_reset(ShutdownCause reason)
+void qemu_devices_reset(ResetType type)
{
- ResetType type = (reason == SHUTDOWN_CAUSE_SNAPSHOT_LOAD) ?
- RESET_TYPE_SNAPSHOT_LOAD : RESET_TYPE_COLD;
-
/* Reset the simulation */
resettable_reset(OBJECT(get_root_reset_container()), type);
}
diff --git a/hw/core/resetcontainer.c b/hw/core/resetcontainer.c
index e4ece68..5ff1700 100644
--- a/hw/core/resetcontainer.c
+++ b/hw/core/resetcontainer.c
@@ -68,7 +68,8 @@ static void resettable_container_finalize(Object *obj)
{
}
-static void resettable_container_class_init(ObjectClass *klass, void *data)
+static void resettable_container_class_init(ObjectClass *klass,
+ const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/core/resettable.c b/hw/core/resettable.c
index 6dd3e3d..5cdb4a4 100644
--- a/hw/core/resettable.c
+++ b/hw/core/resettable.c
@@ -93,20 +93,6 @@ static void resettable_child_foreach(ResettableClass *rc, Object *obj,
}
}
-/**
- * resettable_get_tr_func:
- * helper to fetch transitional reset callback if any.
- */
-static ResettableTrFunction resettable_get_tr_func(ResettableClass *rc,
- Object *obj)
-{
- ResettableTrFunction tr_func = NULL;
- if (rc->get_transitional_function) {
- tr_func = rc->get_transitional_function(obj);
- }
- return tr_func;
-}
-
static void resettable_phase_enter(Object *obj, void *opaque, ResetType type)
{
ResettableClass *rc = RESETTABLE_GET_CLASS(obj);
@@ -146,7 +132,7 @@ static void resettable_phase_enter(Object *obj, void *opaque, ResetType type)
if (action_needed) {
trace_resettable_phase_enter_exec(obj, obj_typename, type,
!!rc->phases.enter);
- if (rc->phases.enter && !resettable_get_tr_func(rc, obj)) {
+ if (rc->phases.enter) {
rc->phases.enter(obj, type);
}
s->hold_phase_pending = true;
@@ -171,12 +157,8 @@ static void resettable_phase_hold(Object *obj, void *opaque, ResetType type)
/* exec hold phase */
if (s->hold_phase_pending) {
s->hold_phase_pending = false;
- ResettableTrFunction tr_func = resettable_get_tr_func(rc, obj);
trace_resettable_phase_hold_exec(obj, obj_typename, !!rc->phases.hold);
- if (tr_func) {
- trace_resettable_transitional_function(obj, obj_typename);
- tr_func(obj);
- } else if (rc->phases.hold) {
+ if (rc->phases.hold) {
rc->phases.hold(obj, type);
}
}
@@ -199,7 +181,7 @@ static void resettable_phase_exit(Object *obj, void *opaque, ResetType type)
assert(s->count > 0);
if (--s->count == 0) {
trace_resettable_phase_exit_exec(obj, obj_typename, !!rc->phases.exit);
- if (rc->phases.exit && !resettable_get_tr_func(rc, obj)) {
+ if (rc->phases.exit) {
rc->phases.exit(obj, type);
}
}
diff --git a/hw/core/split-irq.c b/hw/core/split-irq.c
index 3b90af2..f8b4875 100644
--- a/hw/core/split-irq.c
+++ b/hw/core/split-irq.c
@@ -59,12 +59,11 @@ static void split_irq_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_out(dev, s->out_irq, s->num_lines);
}
-static Property split_irq_properties[] = {
+static const Property split_irq_properties[] = {
DEFINE_PROP_UINT16("num-lines", SplitIRQ, num_lines, 1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void split_irq_class_init(ObjectClass *klass, void *data)
+static void split_irq_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/core/sysbus-fdt.c b/hw/core/sysbus-fdt.c
index eebcd28..c339a27 100644
--- a/hw/core/sysbus-fdt.c
+++ b/hw/core/sysbus-fdt.c
@@ -29,13 +29,15 @@
#endif
#include "hw/core/sysbus-fdt.h"
#include "qemu/error-report.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/tpm.h"
+#include "system/device_tree.h"
+#include "system/tpm.h"
#include "hw/platform-bus.h"
#include "hw/vfio/vfio-platform.h"
#include "hw/vfio/vfio-calxeda-xgmac.h"
#include "hw/vfio/vfio-amd-xgbe.h"
+#include "hw/vfio/vfio-region.h"
#include "hw/display/ramfb.h"
+#include "hw/uefi/var-service-api.h"
#include "hw/arm/fdt.h"
/*
@@ -471,6 +473,28 @@ static int add_tpm_tis_fdt_node(SysBusDevice *sbdev, void *opaque)
}
#endif
+static int add_uefi_vars_node(SysBusDevice *sbdev, void *opaque)
+{
+ PlatformBusFDTData *data = opaque;
+ PlatformBusDevice *pbus = data->pbus;
+ const char *parent_node = data->pbus_node_name;
+ void *fdt = data->fdt;
+ uint64_t mmio_base;
+ char *nodename;
+
+ mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, 0);
+ nodename = g_strdup_printf("%s/%s@%" PRIx64, parent_node,
+ UEFI_VARS_FDT_NODE, mmio_base);
+ qemu_fdt_add_subnode(fdt, nodename);
+ qemu_fdt_setprop_string(fdt, nodename,
+ "compatible", UEFI_VARS_FDT_COMPAT);
+ qemu_fdt_setprop_sized_cells(fdt, nodename, "reg",
+ 1, mmio_base,
+ 1, UEFI_VARS_REGS_SIZE);
+ g_free(nodename);
+ return 0;
+}
+
static int no_fdt_node(SysBusDevice *sbdev, void *opaque)
{
return 0;
@@ -495,6 +519,7 @@ static const BindingEntry bindings[] = {
TYPE_BINDING(TYPE_TPM_TIS_SYSBUS, add_tpm_tis_fdt_node),
#endif
TYPE_BINDING(TYPE_RAMFB_DEVICE, no_fdt_node),
+ TYPE_BINDING(TYPE_UEFI_VARS_SYSBUS, add_uefi_vars_node),
TYPE_BINDING("", NULL), /* last element */
};
diff --git a/hw/core/sysbus.c b/hw/core/sysbus.c
index ad34fb7..e71367a 100644
--- a/hw/core/sysbus.c
+++ b/hw/core/sysbus.c
@@ -19,10 +19,9 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qemu/module.h"
#include "hw/sysbus.h"
#include "monitor/monitor.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
static void sysbus_dev_print(Monitor *mon, DeviceState *dev, int indent);
static char *sysbus_get_fw_dev_path(DeviceState *dev);
@@ -65,14 +64,14 @@ void foreach_dynamic_sysbus_device(FindSysbusDeviceFunc *func, void *opaque)
};
/* Loop through all sysbus devices that were spawned outside the machine */
- container = container_get(qdev_get_machine(), "/peripheral");
+ container = machine_get_container("peripheral");
find_sysbus_device(container, &find);
- container = container_get(qdev_get_machine(), "/peripheral-anon");
+ container = machine_get_container("peripheral-anon");
find_sysbus_device(container, &find);
}
-static void system_bus_class_init(ObjectClass *klass, void *data)
+static void system_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
@@ -80,13 +79,6 @@ static void system_bus_class_init(ObjectClass *klass, void *data)
k->get_fw_dev_path = sysbus_get_fw_dev_path;
}
-static const TypeInfo system_bus_info = {
- .name = TYPE_SYSTEM_BUS,
- .parent = TYPE_BUS,
- .instance_size = sizeof(BusState),
- .class_init = system_bus_class_init,
-};
-
/* Check whether an IRQ source exists */
bool sysbus_has_irq(SysBusDevice *dev, int n)
{
@@ -154,16 +146,6 @@ static void sysbus_mmio_map_common(SysBusDevice *dev, int n, hwaddr addr,
}
}
-void sysbus_mmio_unmap(SysBusDevice *dev, int n)
-{
- assert(n >= 0 && n < dev->num_mmio);
-
- if (dev->mmio[n].addr != (hwaddr)-1) {
- memory_region_del_subregion(get_system_memory(), dev->mmio[n].memory);
- dev->mmio[n].addr = (hwaddr)-1;
- }
-}
-
void sysbus_mmio_map(SysBusDevice *dev, int n, hwaddr addr)
{
sysbus_mmio_map_common(dev, n, addr, false, 0);
@@ -298,7 +280,7 @@ static char *sysbus_get_fw_dev_path(DeviceState *dev)
return g_strdup(qdev_fw_name(dev));
}
-static void sysbus_device_class_init(ObjectClass *klass, void *data)
+static void sysbus_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
k->realize = sysbus_device_realize;
@@ -316,15 +298,6 @@ static void sysbus_device_class_init(ObjectClass *klass, void *data)
k->user_creatable = false;
}
-static const TypeInfo sysbus_device_type_info = {
- .name = TYPE_SYS_BUS_DEVICE,
- .parent = TYPE_DEVICE,
- .instance_size = sizeof(SysBusDevice),
- .abstract = true,
- .class_size = sizeof(SysBusDeviceClass),
- .class_init = sysbus_device_class_init,
-};
-
static BusState *main_system_bus;
static void main_system_bus_create(void)
@@ -333,8 +306,8 @@ static void main_system_bus_create(void)
* assign main_system_bus before qbus_init()
* in order to make "if (bus != sysbus_get_default())" work
*/
- main_system_bus = g_malloc0(system_bus_info.instance_size);
- qbus_init(main_system_bus, system_bus_info.instance_size,
+ main_system_bus = g_new0(BusState, 1);
+ qbus_init(main_system_bus, sizeof(BusState),
TYPE_SYSTEM_BUS, NULL, "main-system-bus");
OBJECT(main_system_bus)->free = g_free;
}
@@ -347,10 +320,36 @@ BusState *sysbus_get_default(void)
return main_system_bus;
}
-static void sysbus_register_types(void)
+static void dynamic_sysbus_device_class_init(ObjectClass *klass,
+ const void *data)
{
- type_register_static(&system_bus_info);
- type_register_static(&sysbus_device_type_info);
+ DeviceClass *k = DEVICE_CLASS(klass);
+
+ k->user_creatable = true;
+ k->hotpluggable = false;
}
-type_init(sysbus_register_types)
+static const TypeInfo sysbus_types[] = {
+ {
+ .name = TYPE_SYSTEM_BUS,
+ .parent = TYPE_BUS,
+ .instance_size = sizeof(BusState),
+ .class_init = system_bus_class_init,
+ },
+ {
+ .name = TYPE_SYS_BUS_DEVICE,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(SysBusDevice),
+ .abstract = true,
+ .class_size = sizeof(SysBusDeviceClass),
+ .class_init = sysbus_device_class_init,
+ },
+ {
+ .name = TYPE_DYNAMIC_SYS_BUS_DEVICE,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .class_init = dynamic_sysbus_device_class_init,
+ .abstract = true,
+ }
+};
+
+DEFINE_TYPES(sysbus_types)
diff --git a/hw/core/uboot_image.h b/hw/core/uboot_image.h
index 18ac293..e4dcfb0 100644
--- a/hw/core/uboot_image.h
+++ b/hw/core/uboot_image.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* (C) Copyright 2008 Semihalf
*
diff --git a/hw/core/vm-change-state-handler.c b/hw/core/vm-change-state-handler.c
index 8e26392..99c642b 100644
--- a/hw/core/vm-change-state-handler.c
+++ b/hw/core/vm-change-state-handler.c
@@ -17,7 +17,7 @@
#include "qemu/osdep.h"
#include "hw/qdev-core.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
static int qdev_get_dev_tree_depth(DeviceState *dev)
{
@@ -40,6 +40,7 @@ static int qdev_get_dev_tree_depth(DeviceState *dev)
* qdev_add_vm_change_state_handler:
* @dev: the device that owns this handler
* @cb: the callback function to be invoked
+ * @cb_ret: the callback function with return value to be invoked
* @opaque: user data passed to the callback function
*
* This function works like qemu_add_vm_change_state_handler() except callbacks
@@ -50,25 +51,30 @@ static int qdev_get_dev_tree_depth(DeviceState *dev)
* controller's callback is invoked before the children on its bus when the VM
* starts running. The order is reversed when the VM stops running.
*
+ * Note that the parameter `cb` and `cb_ret` are mutually exclusive.
+ *
* Returns: an entry to be freed with qemu_del_vm_change_state_handler()
*/
VMChangeStateEntry *qdev_add_vm_change_state_handler(DeviceState *dev,
VMChangeStateHandler *cb,
+ VMChangeStateHandlerWithRet *cb_ret,
void *opaque)
{
- return qdev_add_vm_change_state_handler_full(dev, cb, NULL, opaque);
+ assert(!cb || !cb_ret);
+ return qdev_add_vm_change_state_handler_full(dev, cb, NULL, cb_ret, opaque);
}
/*
* Exactly like qdev_add_vm_change_state_handler() but passes a prepare_cb
- * argument too.
+ * and the cb_ret arguments too.
*/
VMChangeStateEntry *qdev_add_vm_change_state_handler_full(
- DeviceState *dev, VMChangeStateHandler *cb,
- VMChangeStateHandler *prepare_cb, void *opaque)
+ DeviceState *dev, VMChangeStateHandler *cb, VMChangeStateHandler *prepare_cb,
+ VMChangeStateHandlerWithRet *cb_ret, void *opaque)
{
int depth = qdev_get_dev_tree_depth(dev);
- return qemu_add_vm_change_state_handler_prio_full(cb, prepare_cb, opaque,
- depth);
+ assert(!cb || !cb_ret);
+ return qemu_add_vm_change_state_handler_prio_full(cb, prepare_cb, cb_ret,
+ opaque, depth);
}
diff --git a/hw/cpu/a15mpcore.c b/hw/cpu/a15mpcore.c
index 967d8d3..bd36dd9 100644
--- a/hw/cpu/a15mpcore.c
+++ b/hw/cpu/a15mpcore.c
@@ -24,7 +24,7 @@
#include "hw/cpu/a15mpcore.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "kvm_arm.h"
#include "target/arm/gtimer.h"
@@ -58,6 +58,11 @@ static void a15mp_priv_realize(DeviceState *dev, Error **errp)
bool has_el2 = false;
Object *cpuobj;
+ if (s->num_irq < 32 || s->num_irq > 256) {
+ error_setg(errp, "Property 'num-irq' must be between 32 and 256");
+ return;
+ }
+
gicdev = DEVICE(&s->gic);
qdev_prop_set_uint32(gicdev, "num-cpu", s->num_cpu);
qdev_prop_set_uint32(gicdev, "num-irq", s->num_irq);
@@ -144,19 +149,19 @@ static void a15mp_priv_realize(DeviceState *dev, Error **errp)
}
}
-static Property a15mp_priv_properties[] = {
+static const Property a15mp_priv_properties[] = {
DEFINE_PROP_UINT32("num-cpu", A15MPPrivState, num_cpu, 1),
- /* The Cortex-A15MP may have anything from 0 to 224 external interrupt
- * IRQ lines (with another 32 internal). We default to 128+32, which
- * is the number provided by the Cortex-A15MP test chip in the
- * Versatile Express A15 development board.
- * Other boards may differ and should set this property appropriately.
+ /*
+ * The Cortex-A15MP may have anything from 0 to 224 external interrupt
+ * lines, plus always 32 internal IRQs. This property sets the total
+ * of internal + external, so the valid range is from 32 to 256.
+ * The board model must set this to whatever the configuration
+ * used for the CPU on that board or SoC is.
*/
- DEFINE_PROP_UINT32("num-irq", A15MPPrivState, num_irq, 160),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_UINT32("num-irq", A15MPPrivState, num_irq, 0),
};
-static void a15mp_priv_class_init(ObjectClass *klass, void *data)
+static void a15mp_priv_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -165,17 +170,14 @@ static void a15mp_priv_class_init(ObjectClass *klass, void *data)
/* We currently have no saveable state */
}
-static const TypeInfo a15mp_priv_info = {
- .name = TYPE_A15MPCORE_PRIV,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(A15MPPrivState),
- .instance_init = a15mp_priv_initfn,
- .class_init = a15mp_priv_class_init,
+static const TypeInfo a15mp_types[] = {
+ {
+ .name = TYPE_A15MPCORE_PRIV,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(A15MPPrivState),
+ .instance_init = a15mp_priv_initfn,
+ .class_init = a15mp_priv_class_init,
+ },
};
-static void a15mp_register_types(void)
-{
- type_register_static(&a15mp_priv_info);
-}
-
-type_init(a15mp_register_types)
+DEFINE_TYPES(a15mp_types)
diff --git a/hw/cpu/a9mpcore.c b/hw/cpu/a9mpcore.c
index c30ef72..64bebbd 100644
--- a/hw/cpu/a9mpcore.c
+++ b/hw/cpu/a9mpcore.c
@@ -56,6 +56,11 @@ static void a9mp_priv_realize(DeviceState *dev, Error **errp)
CPUState *cpu0;
Object *cpuobj;
+ if (s->num_irq < 32 || s->num_irq > 256) {
+ error_setg(errp, "Property 'num-irq' must be between 32 and 256");
+ return;
+ }
+
cpu0 = qemu_get_cpu(0);
cpuobj = OBJECT(cpu0);
if (strcmp(object_get_typename(cpuobj), ARM_CPU_TYPE_NAME("cortex-a9"))) {
@@ -158,19 +163,19 @@ static void a9mp_priv_realize(DeviceState *dev, Error **errp)
}
}
-static Property a9mp_priv_properties[] = {
+static const Property a9mp_priv_properties[] = {
DEFINE_PROP_UINT32("num-cpu", A9MPPrivState, num_cpu, 1),
- /* The Cortex-A9MP may have anything from 0 to 224 external interrupt
- * IRQ lines (with another 32 internal). We default to 64+32, which
- * is the number provided by the Cortex-A9MP test chip in the
- * Realview PBX-A9 and Versatile Express A9 development boards.
- * Other boards may differ and should set this property appropriately.
+ /*
+ * The Cortex-A9MP may have anything from 0 to 224 external interrupt
+ * lines, plus always 32 internal IRQs. This property sets the total
+ * of internal + external, so the valid range is from 32 to 256.
+ * The board model must set this to whatever the configuration
+ * used for the CPU on that board or SoC is.
*/
- DEFINE_PROP_UINT32("num-irq", A9MPPrivState, num_irq, 96),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_UINT32("num-irq", A9MPPrivState, num_irq, 0),
};
-static void a9mp_priv_class_init(ObjectClass *klass, void *data)
+static void a9mp_priv_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -178,17 +183,14 @@ static void a9mp_priv_class_init(ObjectClass *klass, void *data)
device_class_set_props(dc, a9mp_priv_properties);
}
-static const TypeInfo a9mp_priv_info = {
- .name = TYPE_A9MPCORE_PRIV,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(A9MPPrivState),
- .instance_init = a9mp_priv_initfn,
- .class_init = a9mp_priv_class_init,
+static const TypeInfo a9mp_types[] = {
+ {
+ .name = TYPE_A9MPCORE_PRIV,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(A9MPPrivState),
+ .instance_init = a9mp_priv_initfn,
+ .class_init = a9mp_priv_class_init,
+ },
};
-static void a9mp_register_types(void)
-{
- type_register_static(&a9mp_priv_info);
-}
-
-type_init(a9mp_register_types)
+DEFINE_TYPES(a9mp_types)
diff --git a/hw/cpu/arm11mpcore.c b/hw/cpu/arm11mpcore.c
index 89c4e35..01772e7 100644
--- a/hw/cpu/arm11mpcore.c
+++ b/hw/cpu/arm11mpcore.c
@@ -131,7 +131,7 @@ static void mpcore_priv_initfn(Object *obj)
object_initialize_child(obj, "wdtimer", &s->wdtimer, TYPE_ARM_MPTIMER);
}
-static Property mpcore_priv_properties[] = {
+static const Property mpcore_priv_properties[] = {
DEFINE_PROP_UINT32("num-cpu", ARM11MPCorePriveState, num_cpu, 1),
/* The ARM11 MPCORE TRM says the on-chip controller may have
* anything from 0 to 224 external interrupt IRQ lines (with another
@@ -142,10 +142,9 @@ static Property mpcore_priv_properties[] = {
* has more IRQ lines than the kernel expects.
*/
DEFINE_PROP_UINT32("num-irq", ARM11MPCorePriveState, num_irq, 64),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mpcore_priv_class_init(ObjectClass *klass, void *data)
+static void mpcore_priv_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -153,17 +152,14 @@ static void mpcore_priv_class_init(ObjectClass *klass, void *data)
device_class_set_props(dc, mpcore_priv_properties);
}
-static const TypeInfo mpcore_priv_info = {
- .name = TYPE_ARM11MPCORE_PRIV,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(ARM11MPCorePriveState),
- .instance_init = mpcore_priv_initfn,
- .class_init = mpcore_priv_class_init,
+static const TypeInfo arm11mp_types[] = {
+ {
+ .name = TYPE_ARM11MPCORE_PRIV,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(ARM11MPCorePriveState),
+ .instance_init = mpcore_priv_initfn,
+ .class_init = mpcore_priv_class_init,
+ },
};
-static void arm11mpcore_register_types(void)
-{
- type_register_static(&mpcore_priv_info);
-}
-
-type_init(arm11mpcore_register_types)
+DEFINE_TYPES(arm11mp_types)
diff --git a/hw/cpu/cluster.c b/hw/cpu/cluster.c
index 61289a8..ef3b3d1 100644
--- a/hw/cpu/cluster.c
+++ b/hw/cpu/cluster.c
@@ -25,9 +25,8 @@
#include "hw/qdev-properties.h"
#include "qapi/error.h"
-static Property cpu_cluster_properties[] = {
+static const Property cpu_cluster_properties[] = {
DEFINE_PROP_UINT32("cluster-id", CPUClusterState, cluster_id, 0),
- DEFINE_PROP_END_OF_LIST()
};
typedef struct CallbackData {
@@ -73,7 +72,7 @@ static void cpu_cluster_realize(DeviceState *dev, Error **errp)
assert(cbdata.cpu_count > 0);
}
-static void cpu_cluster_class_init(ObjectClass *klass, void *data)
+static void cpu_cluster_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/cpu/core.c b/hw/cpu/core.c
index 495a5c3..5cb2e9a 100644
--- a/hw/cpu/core.c
+++ b/hw/cpu/core.c
@@ -77,7 +77,7 @@ static void cpu_core_instance_init(Object *obj)
}
}
-static void cpu_core_class_init(ObjectClass *oc, void *data)
+static void cpu_core_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/cpu/realview_mpcore.c b/hw/cpu/realview_mpcore.c
index 72c792e..099b71a 100644
--- a/hw/cpu/realview_mpcore.c
+++ b/hw/cpu/realview_mpcore.c
@@ -14,7 +14,6 @@
#include "hw/cpu/arm11mpcore.h"
#include "hw/intc/realview_gic.h"
#include "hw/irq.h"
-#include "hw/qdev-properties.h"
#include "qom/object.h"
#define TYPE_REALVIEW_MPCORE_RIRQ "realview_mpcore"
@@ -68,7 +67,6 @@ static void realview_mpcore_realize(DeviceState *dev, Error **errp)
int n;
int i;
- qdev_prop_set_uint32(priv, "num-cpu", s->num_cpu);
if (!sysbus_realize(SYS_BUS_DEVICE(&s->priv), errp)) {
return;
}
@@ -100,6 +98,7 @@ static void mpcore_rirq_init(Object *obj)
int i;
object_initialize_child(obj, "a11priv", &s->priv, TYPE_ARM11MPCORE_PRIV);
+ object_property_add_alias(obj, "num-cpu", OBJECT(&s->priv), "num-cpu");
privbusdev = SYS_BUS_DEVICE(&s->priv);
sysbus_init_mmio(sbd, sysbus_mmio_get_region(privbusdev, 0));
@@ -108,30 +107,21 @@ static void mpcore_rirq_init(Object *obj)
}
}
-static Property mpcore_rirq_properties[] = {
- DEFINE_PROP_UINT32("num-cpu", mpcore_rirq_state, num_cpu, 1),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void mpcore_rirq_class_init(ObjectClass *klass, void *data)
+static void mpcore_rirq_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = realview_mpcore_realize;
- device_class_set_props(dc, mpcore_rirq_properties);
}
-static const TypeInfo mpcore_rirq_info = {
- .name = TYPE_REALVIEW_MPCORE_RIRQ,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(mpcore_rirq_state),
- .instance_init = mpcore_rirq_init,
- .class_init = mpcore_rirq_class_init,
+static const TypeInfo realview_mpcore_types[] = {
+ {
+ .name = TYPE_REALVIEW_MPCORE_RIRQ,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(mpcore_rirq_state),
+ .instance_init = mpcore_rirq_init,
+ .class_init = mpcore_rirq_class_init,
+ },
};
-static void realview_mpcore_register_types(void)
-{
- type_register_static(&mpcore_rirq_info);
-}
-
-type_init(realview_mpcore_register_types)
+DEFINE_TYPES(realview_mpcore_types)
diff --git a/hw/cris/Kconfig b/hw/cris/Kconfig
deleted file mode 100644
index 26c7eef..0000000
--- a/hw/cris/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-config AXIS
- bool
- default y
- depends on CRIS
- select ETRAXFS
- select PFLASH_CFI02
- select NAND
-
-config ETRAXFS
- bool
- select PTIMER
diff --git a/hw/cris/axis_dev88.c b/hw/cris/axis_dev88.c
deleted file mode 100644
index 5556634..0000000
--- a/hw/cris/axis_dev88.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * QEMU model for the AXIS devboard 88.
- *
- * Copyright (c) 2009 Edgar E. Iglesias, Axis Communications AB.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/units.h"
-#include "qapi/error.h"
-#include "cpu.h"
-#include "hw/sysbus.h"
-#include "net/net.h"
-#include "hw/block/flash.h"
-#include "hw/boards.h"
-#include "hw/cris/etraxfs.h"
-#include "hw/loader.h"
-#include "elf.h"
-#include "boot.h"
-#include "sysemu/qtest.h"
-#include "sysemu/sysemu.h"
-
-#define D(x)
-#define DNAND(x)
-
-struct nand_state_t
-{
- DeviceState *nand;
- MemoryRegion iomem;
- unsigned int rdy:1;
- unsigned int ale:1;
- unsigned int cle:1;
- unsigned int ce:1;
-};
-
-static struct nand_state_t nand_state;
-static uint64_t nand_read(void *opaque, hwaddr addr, unsigned size)
-{
- struct nand_state_t *s = opaque;
- uint32_t r;
- int rdy;
-
- r = nand_getio(s->nand);
- nand_getpins(s->nand, &rdy);
- s->rdy = rdy;
-
- DNAND(printf("%s addr=%x r=%x\n", __func__, addr, r));
- return r;
-}
-
-static void
-nand_write(void *opaque, hwaddr addr, uint64_t value,
- unsigned size)
-{
- struct nand_state_t *s = opaque;
- int rdy;
-
- DNAND(printf("%s addr=%x v=%x\n", __func__, addr, (unsigned)value));
- nand_setpins(s->nand, s->cle, s->ale, s->ce, 1, 0);
- nand_setio(s->nand, value);
- nand_getpins(s->nand, &rdy);
- s->rdy = rdy;
-}
-
-static const MemoryRegionOps nand_ops = {
- .read = nand_read,
- .write = nand_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-struct tempsensor_t
-{
- unsigned int shiftreg;
- unsigned int count;
- enum {
- ST_OUT, ST_IN, ST_Z
- } state;
-
- uint16_t regs[3];
-};
-
-static void tempsensor_clkedge(struct tempsensor_t *s,
- unsigned int clk, unsigned int data_in)
-{
- D(printf("%s clk=%d state=%d sr=%x\n", __func__,
- clk, s->state, s->shiftreg));
- if (s->count == 0) {
- s->count = 16;
- s->state = ST_OUT;
- }
- switch (s->state) {
- case ST_OUT:
- /* Output reg is clocked at negedge. */
- if (!clk) {
- s->count--;
- s->shiftreg <<= 1;
- if (s->count == 0) {
- s->shiftreg = 0;
- s->state = ST_IN;
- s->count = 16;
- }
- }
- break;
- case ST_Z:
- if (clk) {
- s->count--;
- if (s->count == 0) {
- s->shiftreg = 0;
- s->state = ST_OUT;
- s->count = 16;
- }
- }
- break;
- case ST_IN:
- /* Indata is sampled at posedge. */
- if (clk) {
- s->count--;
- s->shiftreg <<= 1;
- s->shiftreg |= data_in & 1;
- if (s->count == 0) {
- D(printf("%s cfgreg=%x\n", __func__, s->shiftreg));
- s->regs[0] = s->shiftreg;
- s->state = ST_OUT;
- s->count = 16;
-
- if ((s->regs[0] & 0xff) == 0) {
- /* 25 degrees celsius. */
- s->shiftreg = 0x0b9f;
- } else if ((s->regs[0] & 0xff) == 0xff) {
- /* Sensor ID, 0x8100 LM70. */
- s->shiftreg = 0x8100;
- } else
- printf("Invalid tempsens state %x\n", s->regs[0]);
- }
- }
- break;
- }
-}
-
-
-#define RW_PA_DOUT 0x00
-#define R_PA_DIN 0x01
-#define RW_PA_OE 0x02
-#define RW_PD_DOUT 0x10
-#define R_PD_DIN 0x11
-#define RW_PD_OE 0x12
-
-static struct gpio_state_t
-{
- MemoryRegion iomem;
- struct nand_state_t *nand;
- struct tempsensor_t tempsensor;
- uint32_t regs[0x5c / 4];
-} gpio_state;
-
-static uint64_t gpio_read(void *opaque, hwaddr addr, unsigned size)
-{
- struct gpio_state_t *s = opaque;
- uint32_t r = 0;
-
- addr >>= 2;
- switch (addr)
- {
- case R_PA_DIN:
- r = s->regs[RW_PA_DOUT] & s->regs[RW_PA_OE];
-
- /* Encode pins from the nand. */
- r |= s->nand->rdy << 7;
- break;
- case R_PD_DIN:
- r = s->regs[RW_PD_DOUT] & s->regs[RW_PD_OE];
-
- /* Encode temp sensor pins. */
- r |= (!!(s->tempsensor.shiftreg & 0x10000)) << 4;
- break;
-
- default:
- r = s->regs[addr];
- break;
- }
- return r;
- D(printf("%s %x=%x\n", __func__, addr, r));
-}
-
-static void gpio_write(void *opaque, hwaddr addr, uint64_t value,
- unsigned size)
-{
- struct gpio_state_t *s = opaque;
- D(printf("%s %x=%x\n", __func__, addr, (unsigned)value));
-
- addr >>= 2;
- switch (addr)
- {
- case RW_PA_DOUT:
- /* Decode nand pins. */
- s->nand->ale = !!(value & (1 << 6));
- s->nand->cle = !!(value & (1 << 5));
- s->nand->ce = !!(value & (1 << 4));
-
- s->regs[addr] = value;
- break;
-
- case RW_PD_DOUT:
- /* Temp sensor clk. */
- if ((s->regs[addr] ^ value) & 2)
- tempsensor_clkedge(&s->tempsensor, !!(value & 2),
- !!(value & 16));
- s->regs[addr] = value;
- break;
-
- default:
- s->regs[addr] = value;
- break;
- }
-}
-
-static const MemoryRegionOps gpio_ops = {
- .read = gpio_read,
- .write = gpio_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid = {
- .min_access_size = 4,
- .max_access_size = 4,
- },
-};
-
-#define INTMEM_SIZE (128 * KiB)
-
-static struct cris_load_info li;
-
-static
-void axisdev88_init(MachineState *machine)
-{
- const char *kernel_filename = machine->kernel_filename;
- const char *kernel_cmdline = machine->kernel_cmdline;
- CRISCPU *cpu;
- DeviceState *dev;
- SysBusDevice *s;
- DriveInfo *nand;
- qemu_irq irq[30], nmi[2];
- void *etraxfs_dmac;
- struct etraxfs_dma_client *dma_eth;
- int i;
- MemoryRegion *address_space_mem = get_system_memory();
- MemoryRegion *phys_intmem = g_new(MemoryRegion, 1);
-
- /* init CPUs */
- cpu = CRIS_CPU(cpu_create(machine->cpu_type));
-
- memory_region_add_subregion(address_space_mem, 0x40000000, machine->ram);
-
- /* The ETRAX-FS has 128Kb on chip ram, the docs refer to it as the
- internal memory. */
- memory_region_init_ram(phys_intmem, NULL, "axisdev88.chipram",
- INTMEM_SIZE, &error_fatal);
- memory_region_add_subregion(address_space_mem, 0x38000000, phys_intmem);
-
- /* Attach a NAND flash to CS1. */
- nand = drive_get(IF_MTD, 0, 0);
- nand_state.nand = nand_init(nand ? blk_by_legacy_dinfo(nand) : NULL,
- NAND_MFR_STMICRO, 0x39);
- memory_region_init_io(&nand_state.iomem, NULL, &nand_ops, &nand_state,
- "nand", 0x05000000);
- memory_region_add_subregion(address_space_mem, 0x10000000,
- &nand_state.iomem);
-
- gpio_state.nand = &nand_state;
- memory_region_init_io(&gpio_state.iomem, NULL, &gpio_ops, &gpio_state,
- "gpio", 0x5c);
- memory_region_add_subregion(address_space_mem, 0x3001a000,
- &gpio_state.iomem);
-
-
- dev = qdev_new("etraxfs-pic");
- s = SYS_BUS_DEVICE(dev);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, 0x3001c000);
- sysbus_connect_irq(s, 0, qdev_get_gpio_in(DEVICE(cpu), CRIS_CPU_IRQ));
- sysbus_connect_irq(s, 1, qdev_get_gpio_in(DEVICE(cpu), CRIS_CPU_NMI));
- for (i = 0; i < 30; i++) {
- irq[i] = qdev_get_gpio_in(dev, i);
- }
- nmi[0] = qdev_get_gpio_in(dev, 30);
- nmi[1] = qdev_get_gpio_in(dev, 31);
-
- etraxfs_dmac = etraxfs_dmac_init(0x30000000, 10);
- for (i = 0; i < 10; i++) {
- /* On ETRAX, odd numbered channels are inputs. */
- etraxfs_dmac_connect(etraxfs_dmac, i, irq + 7 + i, i & 1);
- }
-
- /* Add the two ethernet blocks. */
- dma_eth = g_malloc0(sizeof dma_eth[0] * 4); /* Allocate 4 channels. */
-
- etraxfs_eth_init(0x30034000, 1, &dma_eth[0], &dma_eth[1]);
- /* The DMA Connector block is missing, hardwire things for now. */
- etraxfs_dmac_connect_client(etraxfs_dmac, 0, &dma_eth[0]);
- etraxfs_dmac_connect_client(etraxfs_dmac, 1, &dma_eth[1]);
-
- if (qemu_find_nic_info("etraxfs-eth", true, "fseth")) {
- etraxfs_eth_init(0x30036000, 2, &dma_eth[2], &dma_eth[3]);
- etraxfs_dmac_connect_client(etraxfs_dmac, 6, &dma_eth[2]);
- etraxfs_dmac_connect_client(etraxfs_dmac, 7, &dma_eth[3]);
- }
-
- /* 2 timers. */
- sysbus_create_varargs("etraxfs-timer", 0x3001e000, irq[0x1b], nmi[1], NULL);
- sysbus_create_varargs("etraxfs-timer", 0x3005e000, irq[0x1b], nmi[1], NULL);
-
- for (i = 0; i < 4; i++) {
- etraxfs_ser_create(0x30026000 + i * 0x2000, irq[0x14 + i], serial_hd(i));
- }
-
- if (kernel_filename) {
- li.image_filename = kernel_filename;
- li.cmdline = kernel_cmdline;
- li.ram_size = machine->ram_size;
- cris_load_image(cpu, &li);
- } else if (!qtest_enabled()) {
- fprintf(stderr, "Kernel image must be specified\n");
- exit(1);
- }
-}
-
-static void axisdev88_machine_init(MachineClass *mc)
-{
- mc->desc = "AXIS devboard 88";
- mc->init = axisdev88_init;
- mc->is_default = true;
- mc->default_cpu_type = CRIS_CPU_TYPE_NAME("crisv32");
- mc->default_ram_id = "axisdev88.ram";
-}
-
-DEFINE_MACHINE("axis-dev88", axisdev88_machine_init)
diff --git a/hw/cris/boot.c b/hw/cris/boot.c
deleted file mode 100644
index 9fa09cf..0000000
--- a/hw/cris/boot.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * CRIS image loading.
- *
- * Copyright (c) 2010 Edgar E. Iglesias, Axis Communications AB.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "hw/loader.h"
-#include "elf.h"
-#include "boot.h"
-#include "qemu/cutils.h"
-#include "sysemu/reset.h"
-
-static void main_cpu_reset(void *opaque)
-{
- CRISCPU *cpu = opaque;
- CPUCRISState *env = &cpu->env;
- struct cris_load_info *li;
-
- li = env->load_info;
-
- cpu_reset(CPU(cpu));
-
- if (!li) {
- /* nothing more to do. */
- return;
- }
-
- env->pc = li->entry;
-
- if (li->image_filename) {
- env->regs[8] = 0x56902387; /* RAM boot magic. */
- env->regs[9] = 0x40004000 + li->image_size;
- }
-
- if (li->cmdline) {
- /* Let the kernel know we are modifying the cmdline. */
- env->regs[10] = 0x87109563;
- env->regs[11] = 0x40000000;
- }
-}
-
-static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
-{
- return addr - 0x80000000LL;
-}
-
-void cris_load_image(CRISCPU *cpu, struct cris_load_info *li)
-{
- CPUCRISState *env = &cpu->env;
- uint64_t entry;
- int kcmdline_len;
- int image_size;
-
- env->load_info = li;
- /* Boots a kernel elf binary, os/linux-2.6/vmlinux from the axis
- devboard SDK. */
- image_size = load_elf(li->image_filename, NULL,
- translate_kernel_address, NULL,
- &entry, NULL, NULL, NULL, 0, EM_CRIS, 0, 0);
- li->entry = entry;
- if (image_size < 0) {
- /* Takes a kimage from the axis devboard SDK. */
- image_size = load_image_targphys(li->image_filename, 0x40004000,
- li->ram_size);
- li->entry = 0x40004000;
- }
-
- if (image_size < 0) {
- fprintf(stderr, "qemu: could not load kernel '%s'\n",
- li->image_filename);
- exit(1);
- }
-
- if (li->cmdline && (kcmdline_len = strlen(li->cmdline))) {
- if (kcmdline_len > 256) {
- fprintf(stderr, "Too long CRIS kernel cmdline (max 256)\n");
- exit(1);
- }
- pstrcpy_targphys("cmdline", 0x40000000, 256, li->cmdline);
- }
- qemu_register_reset(main_cpu_reset, cpu);
-}
diff --git a/hw/cris/boot.h b/hw/cris/boot.h
deleted file mode 100644
index 9f1e0e3..0000000
--- a/hw/cris/boot.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef HW_CRIS_BOOT_H
-#define HW_CRIS_BOOT_H
-
-struct cris_load_info
-{
- const char *image_filename;
- const char *cmdline;
- int image_size;
- ram_addr_t ram_size;
-
- hwaddr entry;
-};
-
-void cris_load_image(CRISCPU *cpu, struct cris_load_info *li);
-
-#endif
diff --git a/hw/cris/meson.build b/hw/cris/meson.build
deleted file mode 100644
index dc808a4..0000000
--- a/hw/cris/meson.build
+++ /dev/null
@@ -1,5 +0,0 @@
-cris_ss = ss.source_set()
-cris_ss.add(files('boot.c'))
-cris_ss.add(when: 'CONFIG_AXIS', if_true: files('axis_dev88.c'))
-
-hw_arch += {'cris': cris_ss}
diff --git a/hw/cxl/cxl-component-utils.c b/hw/cxl/cxl-component-utils.c
index cd116c0..4738959 100644
--- a/hw/cxl/cxl-component-utils.c
+++ b/hw/cxl/cxl-component-utils.c
@@ -243,8 +243,13 @@ static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk,
ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 1);
ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY,
POISON_ON_ERR_CAP, 0);
- ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 3_6_12_WAY, 0);
- ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 16_WAY, 0);
+ if (type == CXL2_TYPE3_DEVICE) {
+ ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 3_6_12_WAY, 1);
+ ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 16_WAY, 1);
+ } else {
+ ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 3_6_12_WAY, 0);
+ ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 16_WAY, 0);
+ }
ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, UIO, 0);
ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY,
UIO_DECODER_COUNT, 0);
diff --git a/hw/cxl/cxl-device-utils.c b/hw/cxl/cxl-device-utils.c
index 035d034..e150d74 100644
--- a/hw/cxl/cxl-device-utils.c
+++ b/hw/cxl/cxl-device-utils.c
@@ -95,11 +95,15 @@ static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size)
}
if (offset == A_CXL_DEV_MAILBOX_STS) {
uint64_t status_reg = cxl_dstate->mbox_reg_state64[offset / size];
- if (cci->bg.complete_pct) {
- status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, BG_OP,
- 0);
- cxl_dstate->mbox_reg_state64[offset / size] = status_reg;
- }
+ int bgop;
+
+ qemu_mutex_lock(&cci->bg.lock);
+ bgop = !(cci->bg.complete_pct == 100 || cci->bg.aborted);
+
+ status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, BG_OP,
+ bgop);
+ cxl_dstate->mbox_reg_state64[offset / size] = status_reg;
+ qemu_mutex_unlock(&cci->bg.lock);
}
return cxl_dstate->mbox_reg_state64[offset / size];
default:
@@ -352,10 +356,8 @@ static void device_reg_init_common(CXLDeviceState *cxl_dstate)
}
}
-static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate)
+static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate, int msi_n)
{
- const uint8_t msi_n = 9;
-
/* 2048 payload size */
ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT);
@@ -382,7 +384,7 @@ static void memdev_reg_init_common(CXLDeviceState *cxl_dstate)
cxl_dstate->memdev_status = memdev_status_reg;
}
-void cxl_device_register_init_t3(CXLType3Dev *ct3d)
+void cxl_device_register_init_t3(CXLType3Dev *ct3d, int msi_n)
{
CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
uint64_t *cap_h = cxl_dstate->caps_reg_state64;
@@ -398,7 +400,7 @@ void cxl_device_register_init_t3(CXLType3Dev *ct3d)
device_reg_init_common(cxl_dstate);
cxl_device_cap_init(cxl_dstate, MAILBOX, 2, CXL_DEV_MAILBOX_VERSION);
- mailbox_reg_init_common(cxl_dstate);
+ mailbox_reg_init_common(cxl_dstate, msi_n);
cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000,
CXL_MEM_DEV_STATUS_VERSION);
@@ -408,7 +410,7 @@ void cxl_device_register_init_t3(CXLType3Dev *ct3d)
CXL_MAILBOX_MAX_PAYLOAD_SIZE);
}
-void cxl_device_register_init_swcci(CSWMBCCIDev *sw)
+void cxl_device_register_init_swcci(CSWMBCCIDev *sw, int msi_n)
{
CXLDeviceState *cxl_dstate = &sw->cxl_dstate;
uint64_t *cap_h = cxl_dstate->caps_reg_state64;
@@ -423,7 +425,7 @@ void cxl_device_register_init_swcci(CSWMBCCIDev *sw)
device_reg_init_common(cxl_dstate);
cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1);
- mailbox_reg_init_common(cxl_dstate);
+ mailbox_reg_init_common(cxl_dstate, msi_n);
cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1);
memdev_reg_init_common(cxl_dstate);
diff --git a/hw/cxl/cxl-events.c b/hw/cxl/cxl-events.c
index d397718..12dee2e 100644
--- a/hw/cxl/cxl-events.c
+++ b/hw/cxl/cxl-events.c
@@ -139,6 +139,19 @@ bool cxl_event_insert(CXLDeviceState *cxlds, CXLEventLogType log_type,
return cxl_event_count(log) == 1;
}
+void cxl_discard_all_event_records(CXLDeviceState *cxlds)
+{
+ CXLEventLogType log_type;
+ CXLEventLog *log;
+
+ for (log_type = 0; log_type < CXL_EVENT_TYPE_MAX; log_type++) {
+ log = &cxlds->event_logs[log_type];
+ while (!cxl_event_empty(log)) {
+ cxl_event_delete_head(cxlds, log_type, log);
+ }
+ }
+}
+
CXLRetCode cxl_event_get_records(CXLDeviceState *cxlds, CXLGetEventPayload *pl,
uint8_t log_type, int max_recs,
size_t *len)
diff --git a/hw/cxl/cxl-host.c b/hw/cxl/cxl-host.c
index c5f5fcf..e010163 100644
--- a/hw/cxl/cxl-host.c
+++ b/hw/cxl/cxl-host.c
@@ -10,7 +10,7 @@
#include "qemu/bitmap.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#include "hw/boards.h"
#include "qapi/qapi-visit-machine.h"
@@ -67,8 +67,6 @@ static void cxl_fixed_memory_window_config(CXLState *cxl_state,
cxl_state->fixed_windows = g_list_append(cxl_state->fixed_windows,
g_steal_pointer(&fw));
-
- return;
}
void cxl_fmws_link_targets(CXLState *cxl_state, Error **errp)
@@ -315,7 +313,8 @@ static void machine_set_cxl(Object *obj, Visitor *v, const char *name,
static void machine_get_cfmw(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- CXLFixedMemoryWindowOptionsList **list = opaque;
+ CXLState *state = opaque;
+ CXLFixedMemoryWindowOptionsList **list = &state->cfmw_list;
visit_type_CXLFixedMemoryWindowOptionsList(v, name, list, errp);
}
diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c
index 74eeb6f..299f232 100644
--- a/hw/cxl/cxl-mailbox-utils.c
+++ b/hw/cxl/cxl-mailbox-utils.c
@@ -7,24 +7,32 @@
* COPYING file in the top-level directory.
*/
+#include <math.h>
+
#include "qemu/osdep.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/cxl/cxl.h"
#include "hw/cxl/cxl_events.h"
+#include "hw/cxl/cxl_mailbox.h"
#include "hw/pci/pci.h"
#include "hw/pci-bridge/cxl_upstream_port.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
#include "qemu/units.h"
#include "qemu/uuid.h"
-#include "sysemu/hostmem.h"
+#include "system/hostmem.h"
#include "qemu/range.h"
#define CXL_CAPACITY_MULTIPLIER (256 * MiB)
#define CXL_DC_EVENT_LOG_SIZE 8
#define CXL_NUM_EXTENTS_SUPPORTED 512
#define CXL_NUM_TAGS_SUPPORTED 0
+#define CXL_ALERTS_LIFE_USED_WARN_THRESH (1 << 0)
+#define CXL_ALERTS_OVER_TEMP_WARN_THRESH (1 << 1)
+#define CXL_ALERTS_UNDER_TEMP_WARN_THRESH (1 << 2)
+#define CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH (1 << 3)
+#define CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH (1 << 4)
/*
* How to add a new command, example. The command set FOO, with cmd BAR.
@@ -55,6 +63,9 @@ enum {
INFOSTAT = 0x00,
#define IS_IDENTIFY 0x1
#define BACKGROUND_OPERATION_STATUS 0x2
+ #define GET_RESPONSE_MSG_LIMIT 0x3
+ #define SET_RESPONSE_MSG_LIMIT 0x4
+ #define BACKGROUND_OPERATION_ABORT 0x5
EVENTS = 0x01,
#define GET_RECORDS 0x0
#define CLEAR_RECORDS 0x1
@@ -62,27 +73,40 @@ enum {
#define SET_INTERRUPT_POLICY 0x3
FIRMWARE_UPDATE = 0x02,
#define GET_INFO 0x0
+ #define TRANSFER 0x1
+ #define ACTIVATE 0x2
TIMESTAMP = 0x03,
#define GET 0x0
#define SET 0x1
LOGS = 0x04,
#define GET_SUPPORTED 0x0
#define GET_LOG 0x1
+ FEATURES = 0x05,
+ #define GET_SUPPORTED 0x0
+ #define GET_FEATURE 0x1
+ #define SET_FEATURE 0x2
IDENTIFY = 0x40,
#define MEMORY_DEVICE 0x0
CCLS = 0x41,
#define GET_PARTITION_INFO 0x0
#define GET_LSA 0x2
#define SET_LSA 0x3
+ HEALTH_INFO_ALERTS = 0x42,
+ #define GET_ALERT_CONFIG 0x1
+ #define SET_ALERT_CONFIG 0x2
SANITIZE = 0x44,
#define OVERWRITE 0x0
#define SECURE_ERASE 0x1
+ #define MEDIA_OPERATIONS 0x2
PERSISTENT_MEM = 0x45,
#define GET_SECURITY_STATE 0x0
MEDIA_AND_POISON = 0x43,
#define GET_POISON_LIST 0x0
#define INJECT_POISON 0x1
#define CLEAR_POISON 0x2
+ #define GET_SCAN_MEDIA_CAPABILITIES 0x3
+ #define SCAN_MEDIA 0x4
+ #define GET_SCAN_MEDIA_RESULTS 0x5
DCD_CONFIG = 0x48,
#define GET_DC_CONFIG 0x0
#define GET_DYN_CAP_EXT_LIST 0x1
@@ -141,6 +165,9 @@ static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd,
in = (void *)payload_in;
out = (void *)payload_out;
+ if (len_in < sizeof(*in)) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
/* Enough room for minimum sized message - no payload */
if (in->size < sizeof(in->ccimessage)) {
return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
@@ -235,7 +262,6 @@ static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd,
log_type = payload_in[0];
pl = (CXLGetEventPayload *)payload_out;
- memset(pl, 0, sizeof(*pl));
max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) /
CXL_EVENT_RECORD_SIZE;
@@ -257,6 +283,12 @@ static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd,
CXLClearEventPayload *pl;
pl = (CXLClearEventPayload *)payload_in;
+
+ if (len_in < sizeof(*pl) ||
+ len_in < sizeof(*pl) + sizeof(*pl->handle) * pl->nr_recs) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
+
*len_out = 0;
return cxl_event_clear_records(cxlds, pl);
}
@@ -273,7 +305,6 @@ static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd,
CXLEventLog *log;
policy = (CXLEventInterruptPolicy *)payload_out;
- memset(policy, 0, sizeof(*policy));
log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
if (log->irq_enabled) {
@@ -366,13 +397,12 @@ static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd,
uint16_t pcie_subsys_vid;
uint16_t pcie_subsys_id;
uint64_t sn;
- uint8_t max_message_size;
+ uint8_t max_message_size;
uint8_t component_type;
} QEMU_PACKED *is_identify;
QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18);
is_identify = (void *)payload_out;
- memset(is_identify, 0, sizeof(*is_identify));
is_identify->pcie_vid = class->vendor_id;
is_identify->pcie_did = class->device_id;
if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) {
@@ -396,12 +426,58 @@ static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd,
is_identify->component_type = 0x3; /* Type 3 */
}
- /* TODO: Allow this to vary across different CCIs */
- is_identify->max_message_size = 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */
+ is_identify->max_message_size = (uint8_t)log2(cci->payload_max);
*len_out = sizeof(*is_identify);
return CXL_MBOX_SUCCESS;
}
+/* CXL r3.1 section 8.2.9.1.3: Get Response Message Limit (Opcode 0003h) */
+static CXLRetCode cmd_get_response_msg_limit(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ struct {
+ uint8_t rsp_limit;
+ } QEMU_PACKED *get_rsp_msg_limit = (void *)payload_out;
+ QEMU_BUILD_BUG_ON(sizeof(*get_rsp_msg_limit) != 1);
+
+ get_rsp_msg_limit->rsp_limit = (uint8_t)log2(cci->payload_max);
+
+ *len_out = sizeof(*get_rsp_msg_limit);
+ return CXL_MBOX_SUCCESS;
+}
+
+/* CXL r3.1 section 8.2.9.1.4: Set Response Message Limit (Opcode 0004h) */
+static CXLRetCode cmd_set_response_msg_limit(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ struct {
+ uint8_t rsp_limit;
+ } QEMU_PACKED *in = (void *)payload_in;
+ QEMU_BUILD_BUG_ON(sizeof(*in) != 1);
+ struct {
+ uint8_t rsp_limit;
+ } QEMU_PACKED *out = (void *)payload_out;
+ QEMU_BUILD_BUG_ON(sizeof(*out) != 1);
+
+ if (in->rsp_limit < 8 || in->rsp_limit > 10) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+
+ cci->payload_max = 1 << in->rsp_limit;
+ out->rsp_limit = in->rsp_limit;
+
+ *len_out = sizeof(*out);
+ return CXL_MBOX_SUCCESS;
+}
+
static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d,
void *private)
{
@@ -514,6 +590,9 @@ static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd,
in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in;
out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out;
+ if (len_in < sizeof(*in)) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
/* Check if what was requested can fit */
if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) {
return CXL_MBOX_INVALID_INPUT;
@@ -606,7 +685,6 @@ static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd,
QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8);
bg_op_status = (void *)payload_out;
- memset(bg_op_status, 0, sizeof(*bg_op_status));
bg_op_status->status = cci->bg.complete_pct << 1;
if (cci->bg.runtime > 0) {
bg_op_status->status |= 1U << 0;
@@ -618,6 +696,44 @@ static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
+/*
+ * CXL r3.1 Section 8.2.9.1.5:
+ * Request Abort Background Operation (Opcode 0005h)
+ */
+static CXLRetCode cmd_infostat_bg_op_abort(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ int bg_set = cci->bg.opcode >> 8;
+ int bg_cmd = cci->bg.opcode & 0xff;
+ const struct cxl_cmd *bg_c = &cci->cxl_cmd_set[bg_set][bg_cmd];
+
+ if (!(bg_c->effect & CXL_MBOX_BACKGROUND_OPERATION_ABORT)) {
+ return CXL_MBOX_REQUEST_ABORT_NOTSUP;
+ }
+
+ qemu_mutex_lock(&cci->bg.lock);
+ if (cci->bg.runtime) {
+ /* operation is near complete, let it finish */
+ if (cci->bg.complete_pct < 85) {
+ timer_del(cci->bg.timer);
+ cci->bg.ret_code = CXL_MBOX_ABORTED;
+ cci->bg.starttime = 0;
+ cci->bg.runtime = 0;
+ cci->bg.aborted = true;
+ }
+ }
+ qemu_mutex_unlock(&cci->bg.lock);
+
+ return CXL_MBOX_SUCCESS;
+}
+
+#define CXL_FW_SLOTS 2
+#define CXL_FW_SIZE 0x02000000 /* 32 mb */
+
/* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */
static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd,
uint8_t *payload_in,
@@ -640,24 +756,204 @@ static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd,
} QEMU_PACKED *fw_info;
QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50);
- if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) ||
- (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER) ||
- (ct3d->dc.total_capacity < CXL_CAPACITY_MULTIPLIER)) {
+ if (!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER) ||
+ !QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER) ||
+ !QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER)) {
return CXL_MBOX_INTERNAL_ERROR;
}
fw_info = (void *)payload_out;
- memset(fw_info, 0, sizeof(*fw_info));
- fw_info->slots_supported = 2;
- fw_info->slot_info = BIT(0) | BIT(3);
- fw_info->caps = 0;
- pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0");
+ fw_info->slots_supported = CXL_FW_SLOTS;
+ fw_info->slot_info = (cci->fw.active_slot & 0x7) |
+ ((cci->fw.staged_slot & 0x7) << 3);
+ fw_info->caps = BIT(0); /* online update supported */
+
+ if (cci->fw.slot[0]) {
+ pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0");
+ }
+ if (cci->fw.slot[1]) {
+ pstrcpy(fw_info->fw_rev2, sizeof(fw_info->fw_rev2), "BWFW VERSION 1");
+ }
*len_out = sizeof(*fw_info);
return CXL_MBOX_SUCCESS;
}
+/* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */
+#define CXL_FW_XFER_ALIGNMENT 128
+
+#define CXL_FW_XFER_ACTION_FULL 0x0
+#define CXL_FW_XFER_ACTION_INIT 0x1
+#define CXL_FW_XFER_ACTION_CONTINUE 0x2
+#define CXL_FW_XFER_ACTION_END 0x3
+#define CXL_FW_XFER_ACTION_ABORT 0x4
+
+static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ struct {
+ uint8_t action;
+ uint8_t slot;
+ uint8_t rsvd1[2];
+ uint32_t offset;
+ uint8_t rsvd2[0x78];
+ uint8_t data[];
+ } QEMU_PACKED *fw_transfer = (void *)payload_in;
+ size_t offset, length;
+
+ if (len < sizeof(*fw_transfer)) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
+
+ if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) {
+ /*
+ * At this point there aren't any on-going transfers
+ * running in the bg - this is serialized before this
+ * call altogether. Just mark the state machine and
+ * disregard any other input.
+ */
+ cci->fw.transferring = false;
+ return CXL_MBOX_SUCCESS;
+ }
+
+ offset = fw_transfer->offset * CXL_FW_XFER_ALIGNMENT;
+ length = len - sizeof(*fw_transfer);
+ if (offset + length > CXL_FW_SIZE) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+
+ if (cci->fw.transferring) {
+ if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL ||
+ fw_transfer->action == CXL_FW_XFER_ACTION_INIT) {
+ return CXL_MBOX_FW_XFER_IN_PROGRESS;
+ }
+ /*
+ * Abort partitioned package transfer if over 30 secs
+ * between parts. As opposed to the explicit ABORT action,
+ * semantically treat this condition as an error - as
+ * if a part action were passed without a previous INIT.
+ */
+ if (difftime(time(NULL), cci->fw.last_partxfer) > 30.0) {
+ cci->fw.transferring = false;
+ return CXL_MBOX_INVALID_INPUT;
+ }
+ } else if (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE ||
+ fw_transfer->action == CXL_FW_XFER_ACTION_END) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+
+ /* allow back-to-back retransmission */
+ if ((offset != cci->fw.prev_offset || length != cci->fw.prev_len) &&
+ (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE ||
+ fw_transfer->action == CXL_FW_XFER_ACTION_END)) {
+ /* verify no overlaps */
+ if (offset < cci->fw.prev_offset + cci->fw.prev_len) {
+ return CXL_MBOX_FW_XFER_OUT_OF_ORDER;
+ }
+ }
+
+ switch (fw_transfer->action) {
+ case CXL_FW_XFER_ACTION_FULL: /* ignores offset */
+ case CXL_FW_XFER_ACTION_END:
+ if (fw_transfer->slot == 0 ||
+ fw_transfer->slot == cci->fw.active_slot ||
+ fw_transfer->slot > CXL_FW_SLOTS) {
+ return CXL_MBOX_FW_INVALID_SLOT;
+ }
+
+ /* mark the slot used upon bg completion */
+ break;
+ case CXL_FW_XFER_ACTION_INIT:
+ if (offset != 0) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+
+ cci->fw.transferring = true;
+ cci->fw.prev_offset = offset;
+ cci->fw.prev_len = length;
+ break;
+ case CXL_FW_XFER_ACTION_CONTINUE:
+ cci->fw.prev_offset = offset;
+ cci->fw.prev_len = length;
+ break;
+ default:
+ return CXL_MBOX_INVALID_INPUT;
+ }
+
+ if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL) {
+ cci->bg.runtime = 10 * 1000UL;
+ } else {
+ cci->bg.runtime = 2 * 1000UL;
+ }
+ /* keep relevant context for bg completion */
+ cci->fw.curr_action = fw_transfer->action;
+ cci->fw.curr_slot = fw_transfer->slot;
+ *len_out = 0;
+
+ return CXL_MBOX_BG_STARTED;
+}
+
+static void __do_firmware_xfer(CXLCCI *cci)
+{
+ switch (cci->fw.curr_action) {
+ case CXL_FW_XFER_ACTION_FULL:
+ case CXL_FW_XFER_ACTION_END:
+ cci->fw.slot[cci->fw.curr_slot - 1] = true;
+ cci->fw.transferring = false;
+ break;
+ case CXL_FW_XFER_ACTION_INIT:
+ case CXL_FW_XFER_ACTION_CONTINUE:
+ time(&cci->fw.last_partxfer);
+ break;
+ default:
+ break;
+ }
+}
+
+/* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */
+static CXLRetCode cmd_firmware_update_activate(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ struct {
+ uint8_t action;
+ uint8_t slot;
+ } QEMU_PACKED *fw_activate = (void *)payload_in;
+ QEMU_BUILD_BUG_ON(sizeof(*fw_activate) != 0x2);
+
+ if (fw_activate->slot == 0 ||
+ fw_activate->slot == cci->fw.active_slot ||
+ fw_activate->slot > CXL_FW_SLOTS) {
+ return CXL_MBOX_FW_INVALID_SLOT;
+ }
+
+ /* ensure that an actual fw package is there */
+ if (!cci->fw.slot[fw_activate->slot - 1]) {
+ return CXL_MBOX_FW_INVALID_SLOT;
+ }
+
+ switch (fw_activate->action) {
+ case 0: /* online */
+ cci->fw.active_slot = fw_activate->slot;
+ break;
+ case 1: /* reset */
+ cci->fw.staged_slot = fw_activate->slot;
+ break;
+ default:
+ return CXL_MBOX_INVALID_INPUT;
+ }
+
+ return CXL_MBOX_SUCCESS;
+}
+
/* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */
static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd,
uint8_t *payload_in,
@@ -742,24 +1038,28 @@ static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd,
get_log = (void *)payload_in;
+ if (get_log->length > cci->payload_max) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+
+ if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) {
+ return CXL_MBOX_INVALID_LOG;
+ }
+
/*
* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h)
* The device shall return Invalid Input if the Offset or Length
* fields attempt to access beyond the size of the log as reported by Get
- * Supported Logs.
+ * Supported Log.
*
- * The CEL buffer is large enough to fit all commands in the emulation, so
- * the only possible failure would be if the mailbox itself isn't big
- * enough.
+ * Only valid for there to be one entry per opcode, but the length + offset
+ * may still be greater than that if the inputs are not valid and so access
+ * beyond the end of cci->cel_log.
*/
- if (get_log->offset + get_log->length > cci->payload_max) {
+ if ((uint64_t)get_log->offset + get_log->length >= sizeof(cci->cel_log)) {
return CXL_MBOX_INVALID_INPUT;
}
- if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) {
- return CXL_MBOX_INVALID_LOG;
- }
-
/* Store off everything to local variables so we can wipe out the payload */
*len_out = get_log->length;
@@ -768,6 +1068,399 @@ static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
+/* CXL r3.1 section 8.2.9.6: Features */
+/*
+ * Get Supported Features output payload
+ * CXL r3.1 section 8.2.9.6.1 Table 8-96
+ */
+typedef struct CXLSupportedFeatureHeader {
+ uint16_t entries;
+ uint16_t nsuppfeats_dev;
+ uint32_t reserved;
+} QEMU_PACKED CXLSupportedFeatureHeader;
+
+/*
+ * Get Supported Features Supported Feature Entry
+ * CXL r3.1 section 8.2.9.6.1 Table 8-97
+ */
+typedef struct CXLSupportedFeatureEntry {
+ QemuUUID uuid;
+ uint16_t feat_index;
+ uint16_t get_feat_size;
+ uint16_t set_feat_size;
+ uint32_t attr_flags;
+ uint8_t get_feat_version;
+ uint8_t set_feat_version;
+ uint16_t set_feat_effects;
+ uint8_t rsvd[18];
+} QEMU_PACKED CXLSupportedFeatureEntry;
+
+/*
+ * Get Supported Features Supported Feature Entry
+ * CXL rev 3.1 section 8.2.9.6.1 Table 8-97
+ */
+/* Supported Feature Entry : attribute flags */
+#define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0)
+#define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1)
+#define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4)
+#define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5)
+#define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6)
+
+/* Supported Feature Entry : set feature effects */
+#define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0)
+#define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1)
+#define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2)
+#define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3)
+#define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4)
+#define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5)
+#define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6)
+#define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7)
+#define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8)
+#define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9)
+#define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10)
+#define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11)
+
+enum CXL_SUPPORTED_FEATURES_LIST {
+ CXL_FEATURE_PATROL_SCRUB = 0,
+ CXL_FEATURE_ECS,
+ CXL_FEATURE_MAX
+};
+
+/* Get Feature CXL 3.1 Spec 8.2.9.6.2 */
+/*
+ * Get Feature input payload
+ * CXL r3.1 section 8.2.9.6.2 Table 8-99
+ */
+/* Get Feature : Payload in selection */
+enum CXL_GET_FEATURE_SELECTION {
+ CXL_GET_FEATURE_SEL_CURRENT_VALUE,
+ CXL_GET_FEATURE_SEL_DEFAULT_VALUE,
+ CXL_GET_FEATURE_SEL_SAVED_VALUE,
+ CXL_GET_FEATURE_SEL_MAX
+};
+
+/* Set Feature CXL 3.1 Spec 8.2.9.6.3 */
+/*
+ * Set Feature input payload
+ * CXL r3.1 section 8.2.9.6.3 Table 8-101
+ */
+typedef struct CXLSetFeatureInHeader {
+ QemuUUID uuid;
+ uint32_t flags;
+ uint16_t offset;
+ uint8_t version;
+ uint8_t rsvd[9];
+} QEMU_PACKED QEMU_ALIGNED(16) CXLSetFeatureInHeader;
+
+/* Set Feature : Payload in flags */
+#define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK 0x7
+enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER {
+ CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER,
+ CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER,
+ CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER,
+ CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER,
+ CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER,
+ CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX
+};
+#define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3)
+
+/* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */
+static const QemuUUID patrol_scrub_uuid = {
+ .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33,
+ 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a)
+};
+
+typedef struct CXLMemPatrolScrubSetFeature {
+ CXLSetFeatureInHeader hdr;
+ CXLMemPatrolScrubWriteAttrs feat_data;
+} QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature;
+
+/*
+ * CXL r3.1 section 8.2.9.9.11.2:
+ * DDR5 Error Check Scrub (ECS) Control Feature
+ */
+static const QemuUUID ecs_uuid = {
+ .data = UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba,
+ 0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86)
+};
+
+typedef struct CXLMemECSSetFeature {
+ CXLSetFeatureInHeader hdr;
+ CXLMemECSWriteAttrs feat_data[];
+} QEMU_PACKED QEMU_ALIGNED(16) CXLMemECSSetFeature;
+
+/* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */
+static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ struct {
+ uint32_t count;
+ uint16_t start_index;
+ uint16_t reserved;
+ } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_in = (void *)payload_in;
+
+ struct {
+ CXLSupportedFeatureHeader hdr;
+ CXLSupportedFeatureEntry feat_entries[];
+ } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_out = (void *)payload_out;
+ uint16_t index, req_entries;
+ uint16_t entry;
+
+ if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
+ return CXL_MBOX_UNSUPPORTED;
+ }
+ if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) ||
+ get_feats_in->start_index >= CXL_FEATURE_MAX) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+
+ req_entries = (get_feats_in->count -
+ sizeof(CXLSupportedFeatureHeader)) /
+ sizeof(CXLSupportedFeatureEntry);
+ req_entries = MIN(req_entries,
+ (CXL_FEATURE_MAX - get_feats_in->start_index));
+
+ for (entry = 0, index = get_feats_in->start_index;
+ entry < req_entries; index++) {
+ switch (index) {
+ case CXL_FEATURE_PATROL_SCRUB:
+ /* Fill supported feature entry for device patrol scrub control */
+ get_feats_out->feat_entries[entry++] =
+ (struct CXLSupportedFeatureEntry) {
+ .uuid = patrol_scrub_uuid,
+ .feat_index = index,
+ .get_feat_size = sizeof(CXLMemPatrolScrubReadAttrs),
+ .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrs),
+ .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE,
+ .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION,
+ .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION,
+ .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE |
+ CXL_FEAT_ENTRY_SFE_CEL_VALID,
+ };
+ break;
+ case CXL_FEATURE_ECS:
+ /* Fill supported feature entry for device DDR5 ECS control */
+ get_feats_out->feat_entries[entry++] =
+ (struct CXLSupportedFeatureEntry) {
+ .uuid = ecs_uuid,
+ .feat_index = index,
+ .get_feat_size = sizeof(CXLMemECSReadAttrs),
+ .set_feat_size = sizeof(CXLMemECSWriteAttrs),
+ .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE,
+ .get_feat_version = CXL_ECS_GET_FEATURE_VERSION,
+ .set_feat_version = CXL_ECS_SET_FEATURE_VERSION,
+ .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE |
+ CXL_FEAT_ENTRY_SFE_CEL_VALID,
+ };
+ break;
+ default:
+ __builtin_unreachable();
+ }
+ }
+ get_feats_out->hdr.nsuppfeats_dev = CXL_FEATURE_MAX;
+ get_feats_out->hdr.entries = req_entries;
+ *len_out = sizeof(CXLSupportedFeatureHeader) +
+ req_entries * sizeof(CXLSupportedFeatureEntry);
+
+ return CXL_MBOX_SUCCESS;
+}
+
+/* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */
+static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ struct {
+ QemuUUID uuid;
+ uint16_t offset;
+ uint16_t count;
+ uint8_t selection;
+ } QEMU_PACKED QEMU_ALIGNED(16) * get_feature;
+ uint16_t bytes_to_copy = 0;
+ CXLType3Dev *ct3d;
+ CXLSetFeatureInfo *set_feat_info;
+
+ if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
+ return CXL_MBOX_UNSUPPORTED;
+ }
+
+ ct3d = CXL_TYPE3(cci->d);
+ get_feature = (void *)payload_in;
+
+ set_feat_info = &ct3d->set_feat_info;
+ if (qemu_uuid_is_equal(&get_feature->uuid, &set_feat_info->uuid)) {
+ return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS;
+ }
+
+ if (get_feature->selection != CXL_GET_FEATURE_SEL_CURRENT_VALUE) {
+ return CXL_MBOX_UNSUPPORTED;
+ }
+ if (get_feature->offset + get_feature->count > cci->payload_max) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+
+ if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) {
+ if (get_feature->offset >= sizeof(CXLMemPatrolScrubReadAttrs)) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+ bytes_to_copy = sizeof(CXLMemPatrolScrubReadAttrs) -
+ get_feature->offset;
+ bytes_to_copy = MIN(bytes_to_copy, get_feature->count);
+ memcpy(payload_out,
+ (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset,
+ bytes_to_copy);
+ } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) {
+ if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+ bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset;
+ bytes_to_copy = MIN(bytes_to_copy, get_feature->count);
+ memcpy(payload_out,
+ (uint8_t *)&ct3d->ecs_attrs + get_feature->offset,
+ bytes_to_copy);
+ } else {
+ return CXL_MBOX_UNSUPPORTED;
+ }
+
+ *len_out = bytes_to_copy;
+
+ return CXL_MBOX_SUCCESS;
+}
+
+/* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */
+static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ CXLSetFeatureInHeader *hdr = (void *)payload_in;
+ CXLMemPatrolScrubWriteAttrs *ps_write_attrs;
+ CXLMemPatrolScrubSetFeature *ps_set_feature;
+ CXLMemECSWriteAttrs *ecs_write_attrs;
+ CXLMemECSSetFeature *ecs_set_feature;
+ CXLSetFeatureInfo *set_feat_info;
+ uint16_t bytes_to_copy = 0;
+ uint8_t data_transfer_flag;
+ CXLType3Dev *ct3d;
+ uint16_t count;
+
+ if (len_in < sizeof(*hdr)) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
+
+ if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
+ return CXL_MBOX_UNSUPPORTED;
+ }
+ ct3d = CXL_TYPE3(cci->d);
+ set_feat_info = &ct3d->set_feat_info;
+
+ if (!qemu_uuid_is_null(&set_feat_info->uuid) &&
+ !qemu_uuid_is_equal(&hdr->uuid, &set_feat_info->uuid)) {
+ return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS;
+ }
+ if (hdr->flags & CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET) {
+ set_feat_info->data_saved_across_reset = true;
+ } else {
+ set_feat_info->data_saved_across_reset = false;
+ }
+
+ data_transfer_flag =
+ hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK;
+ if (data_transfer_flag == CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER) {
+ set_feat_info->uuid = hdr->uuid;
+ set_feat_info->data_size = 0;
+ }
+ set_feat_info->data_transfer_flag = data_transfer_flag;
+ set_feat_info->data_offset = hdr->offset;
+ bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader);
+
+ if (bytes_to_copy == 0) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
+
+ if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) {
+ if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) {
+ return CXL_MBOX_UNSUPPORTED;
+ }
+
+ ps_set_feature = (void *)payload_in;
+ ps_write_attrs = &ps_set_feature->feat_data;
+
+ if ((uint32_t)hdr->offset + bytes_to_copy >
+ sizeof(ct3d->patrol_scrub_wr_attrs)) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
+ memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset,
+ ps_write_attrs,
+ bytes_to_copy);
+ set_feat_info->data_size += bytes_to_copy;
+
+ if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
+ data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) {
+ ct3d->patrol_scrub_attrs.scrub_cycle &= ~0xFF;
+ ct3d->patrol_scrub_attrs.scrub_cycle |=
+ ct3d->patrol_scrub_wr_attrs.scrub_cycle_hr & 0xFF;
+ ct3d->patrol_scrub_attrs.scrub_flags &= ~0x1;
+ ct3d->patrol_scrub_attrs.scrub_flags |=
+ ct3d->patrol_scrub_wr_attrs.scrub_flags & 0x1;
+ }
+ } else if (qemu_uuid_is_equal(&hdr->uuid,
+ &ecs_uuid)) {
+ if (hdr->version != CXL_ECS_SET_FEATURE_VERSION) {
+ return CXL_MBOX_UNSUPPORTED;
+ }
+
+ ecs_set_feature = (void *)payload_in;
+ ecs_write_attrs = ecs_set_feature->feat_data;
+
+ if ((uint32_t)hdr->offset + bytes_to_copy >
+ sizeof(ct3d->ecs_wr_attrs)) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
+ memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset,
+ ecs_write_attrs,
+ bytes_to_copy);
+ set_feat_info->data_size += bytes_to_copy;
+
+ if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
+ data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) {
+ ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap;
+ for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) {
+ ct3d->ecs_attrs.fru_attrs[count].ecs_config =
+ ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F;
+ }
+ }
+ } else {
+ return CXL_MBOX_UNSUPPORTED;
+ }
+
+ if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
+ data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER ||
+ data_transfer_flag == CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER) {
+ memset(&set_feat_info->uuid, 0, sizeof(QemuUUID));
+ if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) {
+ memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size);
+ } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) {
+ memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size);
+ }
+ set_feat_info->data_transfer_flag = 0;
+ set_feat_info->data_saved_across_reset = false;
+ set_feat_info->data_offset = 0;
+ set_feat_info->data_size = 0;
+ }
+
+ return CXL_MBOX_SUCCESS;
+}
+
/* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */
static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd,
uint8_t *payload_in,
@@ -805,7 +1498,6 @@ static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd,
}
id = (void *)payload_out;
- memset(id, 0, sizeof(*id));
snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0);
@@ -879,7 +1571,7 @@ static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd,
} QEMU_PACKED *get_lsa;
CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
- uint32_t offset, length;
+ uint64_t offset, length;
get_lsa = (void *)payload_in;
offset = get_lsa->offset;
@@ -913,8 +1605,8 @@ static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd,
const size_t hdr_len = offsetof(struct set_lsa_pl, data);
*len_out = 0;
- if (!len_in) {
- return CXL_MBOX_SUCCESS;
+ if (len_in < hdr_len) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
}
if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) {
@@ -926,6 +1618,97 @@ static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
+/* CXL r3.2 Section 8.2.10.9.3.2 Get Alert Configuration (Opcode 4201h) */
+static CXLRetCode cmd_get_alert_config(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
+ CXLAlertConfig *out = (CXLAlertConfig *)payload_out;
+
+ memcpy(out, &ct3d->alert_config, sizeof(ct3d->alert_config));
+ *len_out = sizeof(ct3d->alert_config);
+
+ return CXL_MBOX_SUCCESS;
+}
+
+/* CXL r3.2 Section 8.2.10.9.3.3 Set Alert Configuration (Opcode 4202h) */
+static CXLRetCode cmd_set_alert_config(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
+ CXLAlertConfig *alert_config = &ct3d->alert_config;
+ struct {
+ uint8_t valid_alert_actions;
+ uint8_t enable_alert_actions;
+ uint8_t life_used_warn_thresh;
+ uint8_t rsvd;
+ uint16_t over_temp_warn_thresh;
+ uint16_t under_temp_warn_thresh;
+ uint16_t cor_vmem_err_warn_thresh;
+ uint16_t cor_pmem_err_warn_thresh;
+ } QEMU_PACKED *in = (void *)payload_in;
+
+ if (in->valid_alert_actions & CXL_ALERTS_LIFE_USED_WARN_THRESH) {
+ /*
+ * CXL r3.2 Table 8-149 The life used warning threshold shall be
+ * less than the life used critical alert value.
+ */
+ if (in->life_used_warn_thresh >=
+ alert_config->life_used_crit_alert_thresh) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+ alert_config->life_used_warn_thresh = in->life_used_warn_thresh;
+ alert_config->enable_alerts |= CXL_ALERTS_LIFE_USED_WARN_THRESH;
+ }
+
+ if (in->valid_alert_actions & CXL_ALERTS_OVER_TEMP_WARN_THRESH) {
+ /*
+ * CXL r3.2 Table 8-149 The Device Over-Temperature Warning Threshold
+ * shall be less than the the Device Over-Temperature Critical
+ * Alert Threshold.
+ */
+ if (in->over_temp_warn_thresh >=
+ alert_config->over_temp_crit_alert_thresh) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+ alert_config->over_temp_warn_thresh = in->over_temp_warn_thresh;
+ alert_config->enable_alerts |= CXL_ALERTS_OVER_TEMP_WARN_THRESH;
+ }
+
+ if (in->valid_alert_actions & CXL_ALERTS_UNDER_TEMP_WARN_THRESH) {
+ /*
+ * CXL r3.2 Table 8-149 The Device Under-Temperature Warning Threshold
+ * shall be higher than the the Device Under-Temperature Critical
+ * Alert Threshold.
+ */
+ if (in->under_temp_warn_thresh <=
+ alert_config->under_temp_crit_alert_thresh) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+ alert_config->under_temp_warn_thresh = in->under_temp_warn_thresh;
+ alert_config->enable_alerts |= CXL_ALERTS_UNDER_TEMP_WARN_THRESH;
+ }
+
+ if (in->valid_alert_actions & CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH) {
+ alert_config->cor_vmem_err_warn_thresh = in->cor_vmem_err_warn_thresh;
+ alert_config->enable_alerts |= CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH;
+ }
+
+ if (in->valid_alert_actions & CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH) {
+ alert_config->cor_pmem_err_warn_thresh = in->cor_pmem_err_warn_thresh;
+ alert_config->enable_alerts |= CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH;
+ }
+ return CXL_MBOX_SUCCESS;
+}
+
/* Perform the actual device zeroing */
static void __do_sanitization(CXLType3Dev *ct3d)
{
@@ -953,36 +1736,13 @@ static void __do_sanitization(CXLType3Dev *ct3d)
memset(lsa, 0, memory_region_size(mr));
}
}
+ cxl_discard_all_event_records(&ct3d->cxl_dstate);
}
-/*
- * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h)
- *
- * Once the Sanitize command has started successfully, the device shall be
- * placed in the media disabled state. If the command fails or is interrupted
- * by a reset or power failure, it shall remain in the media disabled state
- * until a successful Sanitize command has been completed. During this state:
- *
- * 1. Memory writes to the device will have no effect, and all memory reads
- * will return random values (no user data returned, even for locations that
- * the failed Sanitize operation didn’t sanitize yet).
- *
- * 2. Mailbox commands shall still be processed in the disabled state, except
- * that commands that access Sanitized areas shall fail with the Media Disabled
- * error code.
- */
-static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd,
- uint8_t *payload_in,
- size_t len_in,
- uint8_t *payload_out,
- size_t *len_out,
- CXLCCI *cci)
+static int get_sanitize_duration(uint64_t total_mem)
{
- CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
- uint64_t total_mem; /* in Mb */
- int secs;
+ int secs = 0;
- total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20;
if (total_mem <= 512) {
secs = 4;
} else if (total_mem <= 1024) {
@@ -1011,6 +1771,39 @@ static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd,
secs = 240 * 60; /* max 4 hrs */
}
+ return secs;
+}
+
+/*
+ * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h)
+ *
+ * Once the Sanitize command has started successfully, the device shall be
+ * placed in the media disabled state. If the command fails or is interrupted
+ * by a reset or power failure, it shall remain in the media disabled state
+ * until a successful Sanitize command has been completed. During this state:
+ *
+ * 1. Memory writes to the device will have no effect, and all memory reads
+ * will return random values (no user data returned, even for locations that
+ * the failed Sanitize operation didn’t sanitize yet).
+ *
+ * 2. Mailbox commands shall still be processed in the disabled state, except
+ * that commands that access Sanitized areas shall fail with the Media Disabled
+ * error code.
+ */
+static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
+ uint64_t total_mem; /* in Mb */
+ int secs;
+
+ total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20;
+ secs = get_sanitize_duration(total_mem);
+
/* EBUSY other bg cmds as of now */
cci->bg.runtime = secs * 1000UL;
*len_out = 0;
@@ -1021,6 +1814,324 @@ static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd,
return CXL_MBOX_BG_STARTED;
}
+struct dpa_range_list_entry {
+ uint64_t starting_dpa;
+ uint64_t length;
+} QEMU_PACKED;
+
+struct CXLSanitizeInfo {
+ uint32_t dpa_range_count;
+ uint8_t fill_value;
+ struct dpa_range_list_entry dpa_range_list[];
+} QEMU_PACKED;
+
+static uint64_t get_vmr_size(CXLType3Dev *ct3d, MemoryRegion **vmr)
+{
+ MemoryRegion *mr;
+ if (ct3d->hostvmem) {
+ mr = host_memory_backend_get_memory(ct3d->hostvmem);
+ if (vmr) {
+ *vmr = mr;
+ }
+ return memory_region_size(mr);
+ }
+ return 0;
+}
+
+static uint64_t get_pmr_size(CXLType3Dev *ct3d, MemoryRegion **pmr)
+{
+ MemoryRegion *mr;
+ if (ct3d->hostpmem) {
+ mr = host_memory_backend_get_memory(ct3d->hostpmem);
+ if (pmr) {
+ *pmr = mr;
+ }
+ return memory_region_size(mr);
+ }
+ return 0;
+}
+
+static uint64_t get_dc_size(CXLType3Dev *ct3d, MemoryRegion **dc_mr)
+{
+ MemoryRegion *mr;
+ if (ct3d->dc.host_dc) {
+ mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
+ if (dc_mr) {
+ *dc_mr = mr;
+ }
+ return memory_region_size(mr);
+ }
+ return 0;
+}
+
+static int validate_dpa_addr(CXLType3Dev *ct3d, uint64_t dpa_addr,
+ size_t length)
+{
+ uint64_t vmr_size, pmr_size, dc_size;
+
+ if ((dpa_addr % CXL_CACHE_LINE_SIZE) ||
+ (length % CXL_CACHE_LINE_SIZE) ||
+ (length <= 0)) {
+ return -EINVAL;
+ }
+
+ vmr_size = get_vmr_size(ct3d, NULL);
+ pmr_size = get_pmr_size(ct3d, NULL);
+ dc_size = get_dc_size(ct3d, NULL);
+
+ if (dpa_addr + length > vmr_size + pmr_size + dc_size) {
+ return -EINVAL;
+ }
+
+ if (dpa_addr > vmr_size + pmr_size) {
+ if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) {
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static int sanitize_range(CXLType3Dev *ct3d, uint64_t dpa_addr, size_t length,
+ uint8_t fill_value)
+{
+
+ uint64_t vmr_size, pmr_size;
+ AddressSpace *as = NULL;
+ MemTxAttrs mem_attrs = {};
+
+ vmr_size = get_vmr_size(ct3d, NULL);
+ pmr_size = get_pmr_size(ct3d, NULL);
+
+ if (dpa_addr < vmr_size) {
+ as = &ct3d->hostvmem_as;
+ } else if (dpa_addr < vmr_size + pmr_size) {
+ as = &ct3d->hostpmem_as;
+ } else {
+ if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) {
+ return -ENODEV;
+ }
+ as = &ct3d->dc.host_dc_as;
+ }
+
+ return address_space_set(as, dpa_addr, fill_value, length, mem_attrs);
+}
+
+/* Perform the actual device zeroing */
+static void __do_sanitize(CXLType3Dev *ct3d)
+{
+ struct CXLSanitizeInfo *san_info = ct3d->media_op_sanitize;
+ int dpa_range_count = san_info->dpa_range_count;
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < dpa_range_count; i++) {
+ rc = sanitize_range(ct3d, san_info->dpa_range_list[i].starting_dpa,
+ san_info->dpa_range_list[i].length,
+ san_info->fill_value);
+ if (rc) {
+ goto exit;
+ }
+ }
+exit:
+ g_free(ct3d->media_op_sanitize);
+ ct3d->media_op_sanitize = NULL;
+ return;
+}
+
+enum {
+ MEDIA_OP_CLASS_GENERAL = 0x0,
+ #define MEDIA_OP_GEN_SUBC_DISCOVERY 0x0
+ MEDIA_OP_CLASS_SANITIZE = 0x1,
+ #define MEDIA_OP_SAN_SUBC_SANITIZE 0x0
+ #define MEDIA_OP_SAN_SUBC_ZERO 0x1
+};
+
+struct media_op_supported_list_entry {
+ uint8_t media_op_class;
+ uint8_t media_op_subclass;
+};
+
+struct media_op_discovery_out_pl {
+ uint64_t dpa_range_granularity;
+ uint16_t total_supported_operations;
+ uint16_t num_of_supported_operations;
+ struct media_op_supported_list_entry entry[];
+} QEMU_PACKED;
+
+static const struct media_op_supported_list_entry media_op_matrix[] = {
+ { MEDIA_OP_CLASS_GENERAL, MEDIA_OP_GEN_SUBC_DISCOVERY },
+ { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_SANITIZE },
+ { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_ZERO },
+};
+
+static CXLRetCode media_operations_discovery(uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out)
+{
+ struct {
+ uint8_t media_operation_class;
+ uint8_t media_operation_subclass;
+ uint8_t rsvd[2];
+ uint32_t dpa_range_count;
+ struct {
+ uint16_t start_index;
+ uint16_t num_ops;
+ } discovery_osa;
+ } QEMU_PACKED *media_op_in_disc_pl = (void *)payload_in;
+ struct media_op_discovery_out_pl *media_out_pl =
+ (struct media_op_discovery_out_pl *)payload_out;
+ int num_ops, start_index, i;
+ int count = 0;
+
+ if (len_in < sizeof(*media_op_in_disc_pl)) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
+
+ num_ops = media_op_in_disc_pl->discovery_osa.num_ops;
+ start_index = media_op_in_disc_pl->discovery_osa.start_index;
+
+ /*
+ * As per spec CXL r3.2 8.2.10.9.5.3 dpa_range_count should be zero and
+ * start index should not exceed the total number of entries for discovery
+ * sub class command.
+ */
+ if (media_op_in_disc_pl->dpa_range_count ||
+ start_index > ARRAY_SIZE(media_op_matrix)) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+
+ media_out_pl->dpa_range_granularity = CXL_CACHE_LINE_SIZE;
+ media_out_pl->total_supported_operations =
+ ARRAY_SIZE(media_op_matrix);
+ if (num_ops > 0) {
+ for (i = start_index; i < start_index + num_ops; i++) {
+ media_out_pl->entry[count].media_op_class =
+ media_op_matrix[i].media_op_class;
+ media_out_pl->entry[count].media_op_subclass =
+ media_op_matrix[i].media_op_subclass;
+ count++;
+ if (count == num_ops) {
+ break;
+ }
+ }
+ }
+
+ media_out_pl->num_of_supported_operations = count;
+ *len_out = sizeof(*media_out_pl) + count * sizeof(*media_out_pl->entry);
+ return CXL_MBOX_SUCCESS;
+}
+
+static CXLRetCode media_operations_sanitize(CXLType3Dev *ct3d,
+ uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out,
+ uint8_t fill_value,
+ CXLCCI *cci)
+{
+ struct media_operations_sanitize {
+ uint8_t media_operation_class;
+ uint8_t media_operation_subclass;
+ uint8_t rsvd[2];
+ uint32_t dpa_range_count;
+ struct dpa_range_list_entry dpa_range_list[];
+ } QEMU_PACKED *media_op_in_sanitize_pl = (void *)payload_in;
+ uint32_t dpa_range_count = media_op_in_sanitize_pl->dpa_range_count;
+ uint64_t total_mem = 0;
+ size_t dpa_range_list_size;
+ int secs = 0, i;
+
+ if (dpa_range_count == 0) {
+ return CXL_MBOX_SUCCESS;
+ }
+
+ dpa_range_list_size = dpa_range_count * sizeof(struct dpa_range_list_entry);
+ if (len_in < (sizeof(*media_op_in_sanitize_pl) + dpa_range_list_size)) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
+
+ for (i = 0; i < dpa_range_count; i++) {
+ uint64_t start_dpa =
+ media_op_in_sanitize_pl->dpa_range_list[i].starting_dpa;
+ uint64_t length = media_op_in_sanitize_pl->dpa_range_list[i].length;
+
+ if (validate_dpa_addr(ct3d, start_dpa, length)) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+ total_mem += length;
+ }
+ ct3d->media_op_sanitize = g_malloc0(sizeof(struct CXLSanitizeInfo) +
+ dpa_range_list_size);
+
+ ct3d->media_op_sanitize->dpa_range_count = dpa_range_count;
+ ct3d->media_op_sanitize->fill_value = fill_value;
+ memcpy(ct3d->media_op_sanitize->dpa_range_list,
+ media_op_in_sanitize_pl->dpa_range_list,
+ dpa_range_list_size);
+ secs = get_sanitize_duration(total_mem >> 20);
+
+ /* EBUSY other bg cmds as of now */
+ cci->bg.runtime = secs * 1000UL;
+ *len_out = 0;
+ /*
+ * media op sanitize is targeted so no need to disable media or
+ * clear event logs
+ */
+ return CXL_MBOX_BG_STARTED;
+}
+
+static CXLRetCode cmd_media_operations(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ struct {
+ uint8_t media_operation_class;
+ uint8_t media_operation_subclass;
+ uint8_t rsvd[2];
+ uint32_t dpa_range_count;
+ } QEMU_PACKED *media_op_in_common_pl = (void *)payload_in;
+ CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
+ uint8_t media_op_cl = 0;
+ uint8_t media_op_subclass = 0;
+
+ if (len_in < sizeof(*media_op_in_common_pl)) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
+
+ media_op_cl = media_op_in_common_pl->media_operation_class;
+ media_op_subclass = media_op_in_common_pl->media_operation_subclass;
+
+ switch (media_op_cl) {
+ case MEDIA_OP_CLASS_GENERAL:
+ if (media_op_subclass != MEDIA_OP_GEN_SUBC_DISCOVERY) {
+ return CXL_MBOX_UNSUPPORTED;
+ }
+
+ return media_operations_discovery(payload_in, len_in, payload_out,
+ len_out);
+ case MEDIA_OP_CLASS_SANITIZE:
+ switch (media_op_subclass) {
+ case MEDIA_OP_SAN_SUBC_SANITIZE:
+ return media_operations_sanitize(ct3d, payload_in, len_in,
+ payload_out, len_out, 0xF,
+ cci);
+ case MEDIA_OP_SAN_SUBC_ZERO:
+ return media_operations_sanitize(ct3d, payload_in, len_in,
+ payload_out, len_out, 0,
+ cci);
+ default:
+ return CXL_MBOX_UNSUPPORTED;
+ }
+ default:
+ return CXL_MBOX_UNSUPPORTED;
+ }
+}
+
static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -1086,8 +2197,8 @@ static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd,
QLIST_FOREACH(ent, poison_list, node) {
/* Check for no overlap */
- if (ent->start >= query_start + query_length ||
- ent->start + ent->length <= query_start) {
+ if (!ranges_overlap(ent->start, ent->length,
+ query_start, query_length)) {
continue;
}
record_count++;
@@ -1095,13 +2206,12 @@ static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd,
out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
- memset(out, 0, out_pl_len);
QLIST_FOREACH(ent, poison_list, node) {
uint64_t start, stop;
/* Check for no overlap */
- if (ent->start >= query_start + query_length ||
- ent->start + ent->length <= query_start) {
+ if (!ranges_overlap(ent->start, ent->length,
+ query_start, query_length)) {
continue;
}
@@ -1117,6 +2227,10 @@ static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd,
out->flags = (1 << 1);
stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts);
}
+ if (scan_media_running(cci)) {
+ out->flags |= (1 << 2);
+ }
+
stw_le_p(&out->count, record_count);
*len_out = out_pl_len;
return CXL_MBOX_SUCCESS;
@@ -1146,6 +2260,16 @@ static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
}
+ /*
+ * Freeze the list if there is an on-going scan media operation.
+ */
+ if (scan_media_running(cci)) {
+ /*
+ * XXX: Spec is ambiguous - is this case considered
+ * a successful return despite not adding to the list?
+ */
+ goto success;
+ }
if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
return CXL_MBOX_INJECT_POISON_LIMIT;
@@ -1161,6 +2285,7 @@ static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd,
*/
QLIST_INSERT_HEAD(poison_list, p, node);
ct3d->poison_list_cnt++;
+success:
*len_out = 0;
return CXL_MBOX_SUCCESS;
@@ -1200,6 +2325,17 @@ static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd,
}
}
+ /*
+ * Freeze the list if there is an on-going scan media operation.
+ */
+ if (scan_media_running(cci)) {
+ /*
+ * XXX: Spec is ambiguous - is this case considered
+ * a successful return despite not removing from the list?
+ */
+ goto success;
+ }
+
QLIST_FOREACH(ent, poison_list, node) {
/*
* Test for contained in entry. Simpler than general case
@@ -1210,7 +2346,7 @@ static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd,
}
}
if (!ent) {
- return CXL_MBOX_SUCCESS;
+ goto success;
}
QLIST_REMOVE(ent, node);
@@ -1247,12 +2383,262 @@ static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd,
}
/* Any fragments have been added, free original entry */
g_free(ent);
+success:
*len_out = 0;
return CXL_MBOX_SUCCESS;
}
/*
+ * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities
+ */
+static CXLRetCode
+cmd_media_get_scan_media_capabilities(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ struct get_scan_media_capabilities_pl {
+ uint64_t pa;
+ uint64_t length;
+ } QEMU_PACKED;
+
+ struct get_scan_media_capabilities_out_pl {
+ uint32_t estimated_runtime_ms;
+ };
+
+ CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
+ CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
+ struct get_scan_media_capabilities_pl *in = (void *)payload_in;
+ struct get_scan_media_capabilities_out_pl *out = (void *)payload_out;
+ uint64_t query_start;
+ uint64_t query_length;
+
+ query_start = ldq_le_p(&in->pa);
+ /* 64 byte alignment required */
+ if (query_start & 0x3f) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+ query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
+
+ if (query_start + query_length > cxl_dstate->static_mem_size) {
+ return CXL_MBOX_INVALID_PA;
+ }
+
+ /*
+ * Just use 400 nanosecond access/read latency + 100 ns for
+ * the cost of updating the poison list. For small enough
+ * chunks return at least 1 ms.
+ */
+ stl_le_p(&out->estimated_runtime_ms,
+ MAX(1, query_length * (0.0005L / 64)));
+
+ *len_out = sizeof(*out);
+ return CXL_MBOX_SUCCESS;
+}
+
+static void __do_scan_media(CXLType3Dev *ct3d)
+{
+ CXLPoison *ent;
+ unsigned int results_cnt = 0;
+
+ QLIST_FOREACH(ent, &ct3d->scan_media_results, node) {
+ results_cnt++;
+ }
+
+ /* only scan media may clear the overflow */
+ if (ct3d->poison_list_overflowed &&
+ ct3d->poison_list_cnt == results_cnt) {
+ cxl_clear_poison_list_overflowed(ct3d);
+ }
+ /* scan media has run since last conventional reset */
+ ct3d->scan_media_hasrun = true;
+}
+
+/*
+ * CXL r3.1 section 8.2.9.9.4.5: Scan Media
+ */
+static CXLRetCode cmd_media_scan_media(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ struct scan_media_pl {
+ uint64_t pa;
+ uint64_t length;
+ uint8_t flags;
+ } QEMU_PACKED;
+
+ struct scan_media_pl *in = (void *)payload_in;
+ CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
+ CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
+ uint64_t query_start;
+ uint64_t query_length;
+ CXLPoison *ent, *next;
+
+ query_start = ldq_le_p(&in->pa);
+ /* 64 byte alignment required */
+ if (query_start & 0x3f) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+ query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
+
+ if (query_start + query_length > cxl_dstate->static_mem_size) {
+ return CXL_MBOX_INVALID_PA;
+ }
+ if (ct3d->dc.num_regions && query_start + query_length >=
+ cxl_dstate->static_mem_size + ct3d->dc.total_capacity) {
+ return CXL_MBOX_INVALID_PA;
+ }
+
+ if (in->flags == 0) { /* TODO */
+ qemu_log_mask(LOG_UNIMP,
+ "Scan Media Event Log is unsupported\n");
+ }
+
+ /* any previous results are discarded upon a new Scan Media */
+ QLIST_FOREACH_SAFE(ent, &ct3d->scan_media_results, node, next) {
+ QLIST_REMOVE(ent, node);
+ g_free(ent);
+ }
+
+ /* kill the poison list - it will be recreated */
+ if (ct3d->poison_list_overflowed) {
+ QLIST_FOREACH_SAFE(ent, &ct3d->poison_list, node, next) {
+ QLIST_REMOVE(ent, node);
+ g_free(ent);
+ ct3d->poison_list_cnt--;
+ }
+ }
+
+ /*
+ * Scan the backup list and move corresponding entries
+ * into the results list, updating the poison list
+ * when possible.
+ */
+ QLIST_FOREACH_SAFE(ent, &ct3d->poison_list_bkp, node, next) {
+ CXLPoison *res;
+
+ if (ent->start >= query_start + query_length ||
+ ent->start + ent->length <= query_start) {
+ continue;
+ }
+
+ /*
+ * If a Get Poison List cmd comes in while this
+ * scan is being done, it will see the new complete
+ * list, while setting the respective flag.
+ */
+ if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) {
+ CXLPoison *p = g_new0(CXLPoison, 1);
+
+ p->start = ent->start;
+ p->length = ent->length;
+ p->type = ent->type;
+ QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
+ ct3d->poison_list_cnt++;
+ }
+
+ res = g_new0(CXLPoison, 1);
+ res->start = ent->start;
+ res->length = ent->length;
+ res->type = ent->type;
+ QLIST_INSERT_HEAD(&ct3d->scan_media_results, res, node);
+
+ QLIST_REMOVE(ent, node);
+ g_free(ent);
+ }
+
+ cci->bg.runtime = MAX(1, query_length * (0.0005L / 64));
+ *len_out = 0;
+
+ return CXL_MBOX_BG_STARTED;
+}
+
+/*
+ * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results
+ */
+static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd,
+ uint8_t *payload_in,
+ size_t len_in,
+ uint8_t *payload_out,
+ size_t *len_out,
+ CXLCCI *cci)
+{
+ struct get_scan_media_results_out_pl {
+ uint64_t dpa_restart;
+ uint64_t length;
+ uint8_t flags;
+ uint8_t rsvd1;
+ uint16_t count;
+ uint8_t rsvd2[0xc];
+ struct {
+ uint64_t addr;
+ uint32_t length;
+ uint32_t resv;
+ } QEMU_PACKED records[];
+ } QEMU_PACKED;
+
+ struct get_scan_media_results_out_pl *out = (void *)payload_out;
+ CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
+ CXLPoisonList *scan_media_results = &ct3d->scan_media_results;
+ CXLPoison *ent, *next;
+ uint16_t total_count = 0, record_count = 0, i = 0;
+ uint16_t out_pl_len;
+
+ if (!ct3d->scan_media_hasrun) {
+ return CXL_MBOX_UNSUPPORTED;
+ }
+
+ /*
+ * Calculate limits, all entries are within the same address range of the
+ * last scan media call.
+ */
+ QLIST_FOREACH(ent, scan_media_results, node) {
+ size_t rec_size = record_count * sizeof(out->records[0]);
+
+ if (sizeof(*out) + rec_size < CXL_MAILBOX_MAX_PAYLOAD_SIZE) {
+ record_count++;
+ }
+ total_count++;
+ }
+
+ out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
+ assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
+
+ memset(out, 0, out_pl_len);
+ QLIST_FOREACH_SAFE(ent, scan_media_results, node, next) {
+ uint64_t start, stop;
+
+ if (i == record_count) {
+ break;
+ }
+
+ start = ROUND_DOWN(ent->start, 64ull);
+ stop = ROUND_DOWN(ent->start, 64ull) + ent->length;
+ stq_le_p(&out->records[i].addr, start);
+ stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
+ i++;
+
+ /* consume the returning entry */
+ QLIST_REMOVE(ent, node);
+ g_free(ent);
+ }
+
+ stw_le_p(&out->count, record_count);
+ if (total_count > record_count) {
+ out->flags = (1 << 0); /* More Media Error Records */
+ }
+
+ *len_out = out_pl_len;
+ return CXL_MBOX_SUCCESS;
+}
+
+/*
* CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration
* (Opcode: 4800h)
*/
@@ -1391,6 +2777,7 @@ static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd,
stw_le_p(&out_rec->shared_seq, ent->shared_seq);
record_done++;
+ out_rec++;
if (record_done == record_count) {
break;
}
@@ -1628,11 +3015,20 @@ static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd,
uint64_t dpa, len;
CXLRetCode ret;
+ if (len_in < sizeof(*in)) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
+
if (in->num_entries_updated == 0) {
cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
return CXL_MBOX_SUCCESS;
}
+ if (len_in <
+ sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
+
/* Adding extents causes exceeding device's extent tracking ability. */
if (in->num_entries_updated + ct3d->dc.total_extent_count >
CXL_NUM_EXTENTS_SUPPORTED) {
@@ -1787,10 +3183,19 @@ static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd,
uint32_t updated_list_size;
CXLRetCode ret;
+ if (len_in < sizeof(*in)) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
+
if (in->num_entries_updated == 0) {
return CXL_MBOX_INVALID_INPUT;
}
+ if (len_in <
+ sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
+
ret = cxl_detect_malformed_extent_list(ct3d, in);
if (ret != CXL_MBOX_SUCCESS) {
return ret;
@@ -1822,40 +3227,66 @@ static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
-#define IMMEDIATE_CONFIG_CHANGE (1 << 1)
-#define IMMEDIATE_DATA_CHANGE (1 << 2)
-#define IMMEDIATE_POLICY_CHANGE (1 << 3)
-#define IMMEDIATE_LOG_CHANGE (1 << 4)
-#define SECURITY_STATE_CHANGE (1 << 5)
-#define BACKGROUND_OPERATION (1 << 6)
-
static const struct cxl_cmd cxl_cmd_set[256][256] = {
+ [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT",
+ cmd_infostat_bg_op_abort, 0, 0 },
[EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS",
cmd_events_get_records, 1, 0 },
[EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS",
- cmd_events_clear_records, ~0, IMMEDIATE_LOG_CHANGE },
+ cmd_events_clear_records, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE },
[EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY",
cmd_events_get_interrupt_policy, 0, 0 },
[EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY",
cmd_events_set_interrupt_policy,
- ~0, IMMEDIATE_CONFIG_CHANGE },
+ ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE },
[FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO",
cmd_firmware_update_get_info, 0, 0 },
+ [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER",
+ cmd_firmware_update_transfer, ~0,
+ CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT },
+ [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE",
+ cmd_firmware_update_activate, 2,
+ CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT },
[TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
[TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set,
- 8, IMMEDIATE_POLICY_CHANGE },
+ 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
[LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported,
0, 0 },
[LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
+ [FEATURES][GET_SUPPORTED] = { "FEATURES_GET_SUPPORTED",
+ cmd_features_get_supported, 0x8, 0 },
+ [FEATURES][GET_FEATURE] = { "FEATURES_GET_FEATURE",
+ cmd_features_get_feature, 0x15, 0 },
+ [FEATURES][SET_FEATURE] = { "FEATURES_SET_FEATURE",
+ cmd_features_set_feature,
+ ~0,
+ (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE |
+ CXL_MBOX_IMMEDIATE_DATA_CHANGE |
+ CXL_MBOX_IMMEDIATE_POLICY_CHANGE |
+ CXL_MBOX_IMMEDIATE_LOG_CHANGE |
+ CXL_MBOX_SECURITY_STATE_CHANGE)},
[IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE",
cmd_identify_memory_device, 0, 0 },
[CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO",
cmd_ccls_get_partition_info, 0, 0 },
[CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 },
[CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa,
- ~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE },
+ ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE },
+ [HEALTH_INFO_ALERTS][GET_ALERT_CONFIG] = {
+ "HEALTH_INFO_ALERTS_GET_ALERT_CONFIG",
+ cmd_get_alert_config, 0, 0 },
+ [HEALTH_INFO_ALERTS][SET_ALERT_CONFIG] = {
+ "HEALTH_INFO_ALERTS_SET_ALERT_CONFIG",
+ cmd_set_alert_config, 12, CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
[SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0,
- IMMEDIATE_DATA_CHANGE | SECURITY_STATE_CHANGE | BACKGROUND_OPERATION },
+ (CXL_MBOX_IMMEDIATE_DATA_CHANGE |
+ CXL_MBOX_SECURITY_STATE_CHANGE |
+ CXL_MBOX_BACKGROUND_OPERATION |
+ CXL_MBOX_BACKGROUND_OPERATION_ABORT)},
+ [SANITIZE][MEDIA_OPERATIONS] = { "MEDIA_OPERATIONS", cmd_media_operations,
+ ~0,
+ (CXL_MBOX_IMMEDIATE_DATA_CHANGE |
+ CXL_MBOX_BACKGROUND_OPERATION)},
[PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE",
cmd_get_security_state, 0, 0 },
[MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST",
@@ -1864,6 +3295,15 @@ static const struct cxl_cmd cxl_cmd_set[256][256] = {
cmd_media_inject_poison, 8, 0 },
[MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON",
cmd_media_clear_poison, 72, 0 },
+ [MEDIA_AND_POISON][GET_SCAN_MEDIA_CAPABILITIES] = {
+ "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES",
+ cmd_media_get_scan_media_capabilities, 16, 0 },
+ [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA",
+ cmd_media_scan_media, 17,
+ (CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT)},
+ [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = {
+ "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS",
+ cmd_media_get_scan_media_results, 0, 0 },
};
static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = {
@@ -1874,19 +3314,21 @@ static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = {
8, 0 },
[DCD_CONFIG][ADD_DYN_CAP_RSP] = {
"DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp,
- ~0, IMMEDIATE_DATA_CHANGE },
+ ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE },
[DCD_CONFIG][RELEASE_DYN_CAP] = {
"DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap,
- ~0, IMMEDIATE_DATA_CHANGE },
+ ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE },
};
static const struct cxl_cmd cxl_cmd_set_sw[256][256] = {
[INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 },
[INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS",
cmd_infostat_bg_op_sts, 0, 0 },
+ [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT",
+ cmd_infostat_bg_op_abort, 0, 0 },
[TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
- [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 0,
- IMMEDIATE_POLICY_CHANGE },
+ [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8,
+ CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
[LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
0 },
[LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
@@ -1913,6 +3355,7 @@ int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
int ret;
const struct cxl_cmd *cxl_cmd;
opcode_handler h;
+ CXLDeviceState *cxl_dstate;
*len_out = 0;
cxl_cmd = &cci->cxl_cmd_set[set][cmd];
@@ -1928,28 +3371,34 @@ int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
}
/* Only one bg command at a time */
- if ((cxl_cmd->effect & BACKGROUND_OPERATION) &&
+ if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) &&
cci->bg.runtime > 0) {
return CXL_MBOX_BUSY;
}
- /* forbid any selected commands while overwriting */
- if (sanitize_running(cci)) {
- if (h == cmd_events_get_records ||
- h == cmd_ccls_get_partition_info ||
- h == cmd_ccls_set_lsa ||
- h == cmd_ccls_get_lsa ||
- h == cmd_logs_get_log ||
- h == cmd_media_get_poison_list ||
- h == cmd_media_inject_poison ||
- h == cmd_media_clear_poison ||
- h == cmd_sanitize_overwrite) {
- return CXL_MBOX_MEDIA_DISABLED;
+ /* forbid any selected commands while the media is disabled */
+ if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
+ cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
+
+ if (cxl_dev_media_disabled(cxl_dstate)) {
+ if (h == cmd_events_get_records ||
+ h == cmd_ccls_get_partition_info ||
+ h == cmd_ccls_set_lsa ||
+ h == cmd_ccls_get_lsa ||
+ h == cmd_logs_get_log ||
+ h == cmd_media_get_poison_list ||
+ h == cmd_media_inject_poison ||
+ h == cmd_media_clear_poison ||
+ h == cmd_sanitize_overwrite ||
+ h == cmd_firmware_update_transfer ||
+ h == cmd_firmware_update_activate) {
+ return CXL_MBOX_MEDIA_DISABLED;
+ }
}
}
ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci);
- if ((cxl_cmd->effect & BACKGROUND_OPERATION) &&
+ if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) &&
ret == CXL_MBOX_BG_STARTED) {
*bg_started = true;
} else {
@@ -1963,6 +3412,7 @@ int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
cci->bg.opcode = (set << 8) | cmd;
cci->bg.complete_pct = 0;
+ cci->bg.aborted = false;
cci->bg.ret_code = 0;
now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
@@ -1976,10 +3426,12 @@ int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
static void bg_timercb(void *opaque)
{
CXLCCI *cci = opaque;
- uint64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
- uint64_t total_time = cci->bg.starttime + cci->bg.runtime;
+ uint64_t now, total_time;
- assert(cci->bg.runtime > 0);
+ qemu_mutex_lock(&cci->bg.lock);
+
+ now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
+ total_time = cci->bg.starttime + cci->bg.runtime;
if (now >= total_time) { /* we are done */
uint16_t ret = CXL_MBOX_SUCCESS;
@@ -1987,6 +3439,9 @@ static void bg_timercb(void *opaque)
cci->bg.complete_pct = 100;
cci->bg.ret_code = ret;
switch (cci->bg.opcode) {
+ case 0x0201: /* fw transfer */
+ __do_firmware_xfer(cci);
+ break;
case 0x4400: /* sanitize */
{
CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
@@ -1995,15 +3450,27 @@ static void bg_timercb(void *opaque)
cxl_dev_enable_media(&ct3d->cxl_dstate);
}
break;
- case 0x4304: /* TODO: scan media */
+ case 0x4402: /* Media Operations sanitize */
+ {
+ CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
+ __do_sanitize(ct3d);
+ }
+ break;
+ case 0x4304: /* scan media */
+ {
+ CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
+
+ __do_scan_media(ct3d);
break;
+ }
default:
__builtin_unreachable();
break;
}
} else {
/* estimate only */
- cci->bg.complete_pct = 100 * now / total_time;
+ cci->bg.complete_pct =
+ 100 * (now - cci->bg.starttime) / cci->bg.runtime;
timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ);
}
@@ -2023,6 +3490,8 @@ static void bg_timercb(void *opaque)
msi_notify(pdev, cxl_dstate->mbox_msi_n);
}
}
+
+ qemu_mutex_unlock(&cci->bg.lock);
}
static void cxl_rebuild_cel(CXLCCI *cci)
@@ -2051,8 +3520,21 @@ void cxl_init_cci(CXLCCI *cci, size_t payload_max)
cci->bg.complete_pct = 0;
cci->bg.starttime = 0;
cci->bg.runtime = 0;
+ cci->bg.aborted = false;
cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
bg_timercb, cci);
+ qemu_mutex_init(&cci->bg.lock);
+
+ memset(&cci->fw, 0, sizeof(cci->fw));
+ cci->fw.active_slot = 1;
+ cci->fw.slot[cci->fw.active_slot - 1] = true;
+ cci->initialized = true;
+}
+
+void cxl_destroy_cci(CXLCCI *cci)
+{
+ qemu_mutex_destroy(&cci->bg.lock);
+ cci->initialized = false;
}
static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256])
@@ -2116,6 +3598,10 @@ void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf,
static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = {
[INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0},
+ [INFOSTAT][GET_RESPONSE_MSG_LIMIT] = { "GET_RESPONSE_MSG_LIMIT",
+ cmd_get_response_msg_limit, 0, 0 },
+ [INFOSTAT][SET_RESPONSE_MSG_LIMIT] = { "SET_RESPONSE_MSG_LIMIT",
+ cmd_set_response_msg_limit, 1, 0 },
[LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
0 },
[LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
diff --git a/hw/cxl/switch-mailbox-cci.c b/hw/cxl/switch-mailbox-cci.c
index ba399c6..223f220 100644
--- a/hw/cxl/switch-mailbox-cci.c
+++ b/hw/cxl/switch-mailbox-cci.c
@@ -17,10 +17,12 @@
#include "hw/qdev-properties.h"
#include "hw/cxl/cxl.h"
+#define CXL_SWCCI_MSIX_MBOX 3
+
static void cswmbcci_reset(DeviceState *dev)
{
CSWMBCCIDev *cswmb = CXL_SWITCH_MAILBOX_CCI(dev);
- cxl_device_register_init_swcci(cswmb);
+ cxl_device_register_init_swcci(cswmb, CXL_SWCCI_MSIX_MBOX);
}
static void cswbcci_realize(PCIDevice *pci_dev, Error **errp)
@@ -65,13 +67,12 @@ static void cswmbcci_exit(PCIDevice *pci_dev)
/* Nothing to do here yet */
}
-static Property cxl_switch_cci_props[] = {
+static const Property cxl_switch_cci_props[] = {
DEFINE_PROP_LINK("target", CSWMBCCIDev,
target, TYPE_CXL_USP, PCIDevice *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void cswmbcci_class_init(ObjectClass *oc, void *data)
+static void cswmbcci_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
@@ -89,7 +90,7 @@ static void cswmbcci_class_init(ObjectClass *oc, void *data)
pc->device_id = 0xa123;
pc->revision = 0;
dc->desc = "CXL Switch Mailbox CCI";
- dc->reset = cswmbcci_reset;
+ device_class_set_legacy_reset(dc, cswmbcci_reset);
device_class_set_props(dc, cxl_switch_cci_props);
}
@@ -98,7 +99,7 @@ static const TypeInfo cswmbcci_info = {
.parent = TYPE_PCI_DEVICE,
.class_init = cswmbcci_class_init,
.instance_size = sizeof(CSWMBCCIDev),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ }
},
diff --git a/hw/display/Kconfig b/hw/display/Kconfig
index a4552c8..1e95ab2 100644
--- a/hw/display/Kconfig
+++ b/hw/display/Kconfig
@@ -66,9 +66,6 @@ config BOCHS_DISPLAY
select VGA
select EDID
-config BLIZZARD
- bool
-
config FRAMEBUFFER
bool
@@ -76,7 +73,7 @@ config SM501
bool
select I2C
select DDC
- select SERIAL
+ select SERIAL_MM
select USB_OHCI_SYSBUS
config TCX
@@ -143,3 +140,12 @@ config XLNX_DISPLAYPORT
config DM163
bool
+
+config MAC_PVG_MMIO
+ bool
+ depends on MAC_PVG && AARCH64
+
+config MAC_PVG_PCI
+ bool
+ depends on MAC_PVG && PCI
+ default y if PCI_DEVICES
diff --git a/hw/display/apple-gfx-mmio.m b/hw/display/apple-gfx-mmio.m
new file mode 100644
index 0000000..b0b6e29
--- /dev/null
+++ b/hw/display/apple-gfx-mmio.m
@@ -0,0 +1,285 @@
+/*
+ * QEMU Apple ParavirtualizedGraphics.framework device, MMIO (arm64) variant
+ *
+ * Copyright Ā© 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * ParavirtualizedGraphics.framework is a set of libraries that macOS provides
+ * which implements 3d graphics passthrough to the host as well as a
+ * proprietary guest communication channel to drive it. This device model
+ * implements support to drive that library from within QEMU as an MMIO-based
+ * system device for macOS on arm64 VMs.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "block/aio-wait.h"
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+#include "apple-gfx.h"
+#include "trace.h"
+
+#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h>
+
+OBJECT_DECLARE_SIMPLE_TYPE(AppleGFXMMIOState, APPLE_GFX_MMIO)
+
+/*
+ * ParavirtualizedGraphics.Framework only ships header files for the PCI
+ * variant which does not include IOSFC descriptors and host devices. We add
+ * their definitions here so that we can also work with the ARM version.
+ */
+typedef bool(^IOSFCRaiseInterrupt)(uint32_t vector);
+typedef bool(^IOSFCUnmapMemory)(void *, void *, void *, void *, void *, void *);
+typedef bool(^IOSFCMapMemory)(uint64_t phys, uint64_t len, bool ro, void **va,
+ void *, void *);
+
+@interface PGDeviceDescriptor (IOSurfaceMapper)
+@property (readwrite, nonatomic) bool usingIOSurfaceMapper;
+@end
+
+@interface PGIOSurfaceHostDeviceDescriptor : NSObject
+-(PGIOSurfaceHostDeviceDescriptor *)init;
+@property (readwrite, nonatomic, copy, nullable) IOSFCMapMemory mapMemory;
+@property (readwrite, nonatomic, copy, nullable) IOSFCUnmapMemory unmapMemory;
+@property (readwrite, nonatomic, copy, nullable) IOSFCRaiseInterrupt raiseInterrupt;
+@end
+
+@interface PGIOSurfaceHostDevice : NSObject
+-(instancetype)initWithDescriptor:(PGIOSurfaceHostDeviceDescriptor *)desc;
+-(uint32_t)mmioReadAtOffset:(size_t)offset;
+-(void)mmioWriteAtOffset:(size_t)offset value:(uint32_t)value;
+@end
+
+struct AppleGFXMapSurfaceMemoryJob;
+struct AppleGFXMMIOState {
+ SysBusDevice parent_obj;
+
+ AppleGFXState common;
+
+ qemu_irq irq_gfx;
+ qemu_irq irq_iosfc;
+ MemoryRegion iomem_iosfc;
+ PGIOSurfaceHostDevice *pgiosfc;
+};
+
+typedef struct AppleGFXMMIOJob {
+ AppleGFXMMIOState *state;
+ uint64_t offset;
+ uint64_t value;
+ bool completed;
+} AppleGFXMMIOJob;
+
+static void iosfc_do_read(void *opaque)
+{
+ AppleGFXMMIOJob *job = opaque;
+ job->value = [job->state->pgiosfc mmioReadAtOffset:job->offset];
+ qatomic_set(&job->completed, true);
+ aio_wait_kick();
+}
+
+static uint64_t iosfc_read(void *opaque, hwaddr offset, unsigned size)
+{
+ AppleGFXMMIOJob job = {
+ .state = opaque,
+ .offset = offset,
+ .completed = false,
+ };
+ dispatch_queue_t queue =
+ dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
+
+ dispatch_async_f(queue, &job, iosfc_do_read);
+ AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed));
+
+ trace_apple_gfx_mmio_iosfc_read(offset, job.value);
+ return job.value;
+}
+
+static void iosfc_do_write(void *opaque)
+{
+ AppleGFXMMIOJob *job = opaque;
+ [job->state->pgiosfc mmioWriteAtOffset:job->offset value:job->value];
+ qatomic_set(&job->completed, true);
+ aio_wait_kick();
+}
+
+static void iosfc_write(void *opaque, hwaddr offset, uint64_t val,
+ unsigned size)
+{
+ AppleGFXMMIOJob job = {
+ .state = opaque,
+ .offset = offset,
+ .value = val,
+ .completed = false,
+ };
+ dispatch_queue_t queue =
+ dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
+
+ dispatch_async_f(queue, &job, iosfc_do_write);
+ AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed));
+
+ trace_apple_gfx_mmio_iosfc_write(offset, val);
+}
+
+static const MemoryRegionOps apple_iosfc_ops = {
+ .read = iosfc_read,
+ .write = iosfc_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+};
+
+static void raise_irq_bh(void *opaque)
+{
+ qemu_irq *irq = opaque;
+
+ qemu_irq_pulse(*irq);
+}
+
+static void *apple_gfx_mmio_map_surface_memory(uint64_t guest_physical_address,
+ uint64_t length, bool read_only)
+{
+ void *mem;
+ MemoryRegion *region = NULL;
+
+ RCU_READ_LOCK_GUARD();
+ mem = apple_gfx_host_ptr_for_gpa_range(guest_physical_address,
+ length, read_only, &region);
+ if (mem) {
+ memory_region_ref(region);
+ }
+ return mem;
+}
+
+static bool apple_gfx_mmio_unmap_surface_memory(void *ptr)
+{
+ MemoryRegion *region;
+ ram_addr_t offset = 0;
+
+ RCU_READ_LOCK_GUARD();
+ region = memory_region_from_host(ptr, &offset);
+ if (!region) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: memory at %p to be unmapped not found.\n",
+ __func__, ptr);
+ return false;
+ }
+
+ trace_apple_gfx_iosfc_unmap_memory_region(ptr, region);
+ memory_region_unref(region);
+ return true;
+}
+
+static PGIOSurfaceHostDevice *apple_gfx_prepare_iosurface_host_device(
+ AppleGFXMMIOState *s)
+{
+ PGIOSurfaceHostDeviceDescriptor *iosfc_desc =
+ [PGIOSurfaceHostDeviceDescriptor new];
+ PGIOSurfaceHostDevice *iosfc_host_dev;
+
+ iosfc_desc.mapMemory =
+ ^bool(uint64_t phys, uint64_t len, bool ro, void **va, void *e, void *f) {
+ *va = apple_gfx_mmio_map_surface_memory(phys, len, ro);
+
+ trace_apple_gfx_iosfc_map_memory(phys, len, ro, va, e, f, *va);
+
+ return *va != NULL;
+ };
+
+ iosfc_desc.unmapMemory =
+ ^bool(void *va, void *b, void *c, void *d, void *e, void *f) {
+ return apple_gfx_mmio_unmap_surface_memory(va);
+ };
+
+ iosfc_desc.raiseInterrupt = ^bool(uint32_t vector) {
+ trace_apple_gfx_iosfc_raise_irq(vector);
+ aio_bh_schedule_oneshot(qemu_get_aio_context(),
+ raise_irq_bh, &s->irq_iosfc);
+ return true;
+ };
+
+ iosfc_host_dev =
+ [[PGIOSurfaceHostDevice alloc] initWithDescriptor:iosfc_desc];
+ [iosfc_desc release];
+ return iosfc_host_dev;
+}
+
+static void apple_gfx_mmio_realize(DeviceState *dev, Error **errp)
+{
+ @autoreleasepool {
+ AppleGFXMMIOState *s = APPLE_GFX_MMIO(dev);
+ PGDeviceDescriptor *desc = [PGDeviceDescriptor new];
+
+ desc.raiseInterrupt = ^(uint32_t vector) {
+ trace_apple_gfx_raise_irq(vector);
+ aio_bh_schedule_oneshot(qemu_get_aio_context(),
+ raise_irq_bh, &s->irq_gfx);
+ };
+
+ desc.usingIOSurfaceMapper = true;
+ s->pgiosfc = apple_gfx_prepare_iosurface_host_device(s);
+
+ if (!apple_gfx_common_realize(&s->common, dev, desc, errp)) {
+ [s->pgiosfc release];
+ s->pgiosfc = nil;
+ }
+
+ [desc release];
+ desc = nil;
+ }
+}
+
+static void apple_gfx_mmio_init(Object *obj)
+{
+ AppleGFXMMIOState *s = APPLE_GFX_MMIO(obj);
+
+ apple_gfx_common_init(obj, &s->common, TYPE_APPLE_GFX_MMIO);
+
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->common.iomem_gfx);
+ memory_region_init_io(&s->iomem_iosfc, obj, &apple_iosfc_ops, s,
+ TYPE_APPLE_GFX_MMIO, 0x10000);
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem_iosfc);
+ sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq_gfx);
+ sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq_iosfc);
+}
+
+static void apple_gfx_mmio_reset(Object *obj, ResetType type)
+{
+ AppleGFXMMIOState *s = APPLE_GFX_MMIO(obj);
+ [s->common.pgdev reset];
+}
+
+static const Property apple_gfx_mmio_properties[] = {
+ DEFINE_PROP_ARRAY("display-modes", AppleGFXMMIOState,
+ common.num_display_modes, common.display_modes,
+ qdev_prop_apple_gfx_display_mode, AppleGFXDisplayMode),
+};
+
+static void apple_gfx_mmio_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ rc->phases.hold = apple_gfx_mmio_reset;
+ dc->hotpluggable = false;
+ dc->realize = apple_gfx_mmio_realize;
+
+ device_class_set_props(dc, apple_gfx_mmio_properties);
+}
+
+static const TypeInfo apple_gfx_mmio_types[] = {
+ {
+ .name = TYPE_APPLE_GFX_MMIO,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AppleGFXMMIOState),
+ .class_init = apple_gfx_mmio_class_init,
+ .instance_init = apple_gfx_mmio_init,
+ }
+};
+DEFINE_TYPES(apple_gfx_mmio_types)
diff --git a/hw/display/apple-gfx-pci.m b/hw/display/apple-gfx-pci.m
new file mode 100644
index 0000000..b0694f4
--- /dev/null
+++ b/hw/display/apple-gfx-pci.m
@@ -0,0 +1,157 @@
+/*
+ * QEMU Apple ParavirtualizedGraphics.framework device, PCI variant
+ *
+ * Copyright Ā© 2023-2024 Phil Dennis-Jordan
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * ParavirtualizedGraphics.framework is a set of libraries that macOS provides
+ * which implements 3d graphics passthrough to the host as well as a
+ * proprietary guest communication channel to drive it. This device model
+ * implements support to drive that library from within QEMU as a PCI device
+ * aimed primarily at x86-64 macOS VMs.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/pci/pci_device.h"
+#include "hw/pci/msi.h"
+#include "apple-gfx.h"
+#include "trace.h"
+
+#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h>
+
+OBJECT_DECLARE_SIMPLE_TYPE(AppleGFXPCIState, APPLE_GFX_PCI)
+
+struct AppleGFXPCIState {
+ PCIDevice parent_obj;
+
+ AppleGFXState common;
+};
+
+static const char *apple_gfx_pci_option_rom_path = NULL;
+
+static void apple_gfx_init_option_rom_path(void)
+{
+ NSURL *option_rom_url = PGCopyOptionROMURL();
+ const char *option_rom_path = option_rom_url.fileSystemRepresentation;
+ apple_gfx_pci_option_rom_path = g_strdup(option_rom_path);
+ [option_rom_url release];
+}
+
+static void apple_gfx_pci_init(Object *obj)
+{
+ AppleGFXPCIState *s = APPLE_GFX_PCI(obj);
+
+ if (!apple_gfx_pci_option_rom_path) {
+ /*
+ * The following is done on device not class init to avoid running
+ * ObjC code before fork() in -daemonize mode.
+ */
+ PCIDeviceClass *pci = PCI_DEVICE_CLASS(object_get_class(obj));
+ apple_gfx_init_option_rom_path();
+ pci->romfile = apple_gfx_pci_option_rom_path;
+ }
+
+ apple_gfx_common_init(obj, &s->common, TYPE_APPLE_GFX_PCI);
+}
+
+typedef struct AppleGFXPCIInterruptJob {
+ PCIDevice *device;
+ uint32_t vector;
+} AppleGFXPCIInterruptJob;
+
+static void apple_gfx_pci_raise_interrupt(void *opaque)
+{
+ AppleGFXPCIInterruptJob *job = opaque;
+
+ if (msi_enabled(job->device)) {
+ msi_notify(job->device, job->vector);
+ }
+ g_free(job);
+}
+
+static void apple_gfx_pci_interrupt(PCIDevice *dev, uint32_t vector)
+{
+ AppleGFXPCIInterruptJob *job;
+
+ trace_apple_gfx_raise_irq(vector);
+ job = g_malloc0(sizeof(*job));
+ job->device = dev;
+ job->vector = vector;
+ aio_bh_schedule_oneshot(qemu_get_aio_context(),
+ apple_gfx_pci_raise_interrupt, job);
+}
+
+static void apple_gfx_pci_realize(PCIDevice *dev, Error **errp)
+{
+ AppleGFXPCIState *s = APPLE_GFX_PCI(dev);
+ int ret;
+
+ pci_register_bar(dev, PG_PCI_BAR_MMIO,
+ PCI_BASE_ADDRESS_SPACE_MEMORY, &s->common.iomem_gfx);
+
+ ret = msi_init(dev, 0x0 /* config offset; 0 = find space */,
+ PG_PCI_MAX_MSI_VECTORS, true /* msi64bit */,
+ false /* msi_per_vector_mask */, errp);
+ if (ret != 0) {
+ return;
+ }
+
+ @autoreleasepool {
+ PGDeviceDescriptor *desc = [PGDeviceDescriptor new];
+ desc.raiseInterrupt = ^(uint32_t vector) {
+ apple_gfx_pci_interrupt(dev, vector);
+ };
+
+ apple_gfx_common_realize(&s->common, DEVICE(dev), desc, errp);
+ [desc release];
+ desc = nil;
+ }
+}
+
+static void apple_gfx_pci_reset(Object *obj, ResetType type)
+{
+ AppleGFXPCIState *s = APPLE_GFX_PCI(obj);
+ [s->common.pgdev reset];
+}
+
+static const Property apple_gfx_pci_properties[] = {
+ DEFINE_PROP_ARRAY("display-modes", AppleGFXPCIState,
+ common.num_display_modes, common.display_modes,
+ qdev_prop_apple_gfx_display_mode, AppleGFXDisplayMode),
+};
+
+static void apple_gfx_pci_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *pci = PCI_DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ rc->phases.hold = apple_gfx_pci_reset;
+ dc->desc = "macOS Paravirtualized Graphics PCI Display Controller";
+ dc->hotpluggable = false;
+ set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
+
+ pci->vendor_id = PG_PCI_VENDOR_ID;
+ pci->device_id = PG_PCI_DEVICE_ID;
+ pci->class_id = PCI_CLASS_DISPLAY_OTHER;
+ pci->realize = apple_gfx_pci_realize;
+
+ device_class_set_props(dc, apple_gfx_pci_properties);
+}
+
+static const TypeInfo apple_gfx_pci_types[] = {
+ {
+ .name = TYPE_APPLE_GFX_PCI,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(AppleGFXPCIState),
+ .class_init = apple_gfx_pci_class_init,
+ .instance_init = apple_gfx_pci_init,
+ .interfaces = (const InterfaceInfo[]) {
+ { INTERFACE_PCIE_DEVICE },
+ { },
+ },
+ }
+};
+DEFINE_TYPES(apple_gfx_pci_types)
+
diff --git a/hw/display/apple-gfx.h b/hw/display/apple-gfx.h
new file mode 100644
index 0000000..a8b1d1e
--- /dev/null
+++ b/hw/display/apple-gfx.h
@@ -0,0 +1,74 @@
+/*
+ * Data structures and functions shared between variants of the macOS
+ * ParavirtualizedGraphics.framework based apple-gfx display adapter.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef QEMU_APPLE_GFX_H
+#define QEMU_APPLE_GFX_H
+
+#include "qemu/queue.h"
+#include "system/memory.h"
+#include "hw/qdev-properties.h"
+#include "ui/surface.h"
+
+#define TYPE_APPLE_GFX_MMIO "apple-gfx-mmio"
+#define TYPE_APPLE_GFX_PCI "apple-gfx-pci"
+
+@class PGDeviceDescriptor;
+@protocol PGDevice;
+@protocol PGDisplay;
+@protocol MTLDevice;
+@protocol MTLTexture;
+@protocol MTLCommandQueue;
+
+typedef QTAILQ_HEAD(, PGTask_s) PGTaskList;
+
+typedef struct AppleGFXDisplayMode {
+ uint16_t width_px;
+ uint16_t height_px;
+ uint16_t refresh_rate_hz;
+} AppleGFXDisplayMode;
+
+typedef struct AppleGFXState {
+ /* Initialised on init/realize() */
+ MemoryRegion iomem_gfx;
+ id<PGDevice> pgdev;
+ id<PGDisplay> pgdisp;
+ QemuConsole *con;
+ id<MTLDevice> mtl;
+ id<MTLCommandQueue> mtl_queue;
+ AppleGFXDisplayMode *display_modes;
+ uint32_t num_display_modes;
+
+ /* List `tasks` is protected by task_mutex */
+ QemuMutex task_mutex;
+ PGTaskList tasks;
+
+ /* Mutable state (BQL protected) */
+ QEMUCursor *cursor;
+ DisplaySurface *surface;
+ id<MTLTexture> texture;
+ int8_t pending_frames; /* # guest frames in the rendering pipeline */
+ bool gfx_update_requested; /* QEMU display system wants a new frame */
+ bool new_frame_ready; /* Guest has rendered a frame, ready to be used */
+ bool using_managed_texture_storage;
+ uint32_t rendering_frame_width;
+ uint32_t rendering_frame_height;
+
+ /* Mutable state (atomic) */
+ bool cursor_show;
+} AppleGFXState;
+
+void apple_gfx_common_init(Object *obj, AppleGFXState *s, const char* obj_name);
+bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev,
+ PGDeviceDescriptor *desc, Error **errp);
+void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical,
+ uint64_t length, bool read_only,
+ MemoryRegion **mapping_in_region);
+
+extern const PropertyInfo qdev_prop_apple_gfx_display_mode;
+
+#endif
+
diff --git a/hw/display/apple-gfx.m b/hw/display/apple-gfx.m
new file mode 100644
index 0000000..174d56a
--- /dev/null
+++ b/hw/display/apple-gfx.m
@@ -0,0 +1,880 @@
+/*
+ * QEMU Apple ParavirtualizedGraphics.framework device
+ *
+ * Copyright Ā© 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * ParavirtualizedGraphics.framework is a set of libraries that macOS provides
+ * which implements 3d graphics passthrough to the host as well as a
+ * proprietary guest communication channel to drive it. This device model
+ * implements support to drive that library from within QEMU.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/lockable.h"
+#include "qemu/cutils.h"
+#include "qemu/log.h"
+#include "qapi/visitor.h"
+#include "qapi/error.h"
+#include "block/aio-wait.h"
+#include "system/address-spaces.h"
+#include "system/dma.h"
+#include "migration/blocker.h"
+#include "ui/console.h"
+#include "apple-gfx.h"
+#include "trace.h"
+
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#include <dispatch/dispatch.h>
+
+#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h>
+
+static const AppleGFXDisplayMode apple_gfx_default_modes[] = {
+ { 1920, 1080, 60 },
+ { 1440, 1080, 60 },
+ { 1280, 1024, 60 },
+};
+
+static Error *apple_gfx_mig_blocker;
+static uint32_t next_pgdisplay_serial_num = 1;
+
+static dispatch_queue_t get_background_queue(void)
+{
+ return dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
+}
+
+/* ------ PGTask and task operations: new/destroy/map/unmap ------ */
+
+/*
+ * This implements the type declared in <ParavirtualizedGraphics/PGDevice.h>
+ * which is opaque from the framework's point of view. It is used in callbacks
+ * in the form of its typedef PGTask_t, which also already exists in the
+ * framework headers.
+ *
+ * A "task" in PVG terminology represents a host-virtual contiguous address
+ * range which is reserved in a large chunk on task creation. The mapMemory
+ * callback then requests ranges of guest system memory (identified by their
+ * GPA) to be mapped into subranges of this reserved address space.
+ * This type of operation isn't well-supported by QEMU's memory subsystem,
+ * but it is fortunately trivial to achieve with Darwin's mach_vm_remap() call,
+ * which allows us to refer to the same backing memory via multiple virtual
+ * address ranges. The Mach VM APIs are therefore used throughout for managing
+ * task memory.
+ */
+struct PGTask_s {
+ QTAILQ_ENTRY(PGTask_s) node;
+ AppleGFXState *s;
+ mach_vm_address_t address;
+ uint64_t len;
+ /*
+ * All unique MemoryRegions for which a mapping has been created in this
+ * task, and on which we have thus called memory_region_ref(). There are
+ * usually very few regions of system RAM in total, so we expect this array
+ * to be very short. Therefore, no need for sorting or fancy search
+ * algorithms, linear search will do.
+ * Protected by AppleGFXState's task_mutex.
+ */
+ GPtrArray *mapped_regions;
+};
+
+static PGTask_t *apple_gfx_new_task(AppleGFXState *s, uint64_t len)
+{
+ mach_vm_address_t task_mem;
+ PGTask_t *task;
+ kern_return_t r;
+
+ r = mach_vm_allocate(mach_task_self(), &task_mem, len, VM_FLAGS_ANYWHERE);
+ if (r != KERN_SUCCESS) {
+ return NULL;
+ }
+
+ task = g_new0(PGTask_t, 1);
+ task->s = s;
+ task->address = task_mem;
+ task->len = len;
+ task->mapped_regions = g_ptr_array_sized_new(2 /* Usually enough */);
+
+ QEMU_LOCK_GUARD(&s->task_mutex);
+ QTAILQ_INSERT_TAIL(&s->tasks, task, node);
+
+ return task;
+}
+
+static void apple_gfx_destroy_task(AppleGFXState *s, PGTask_t *task)
+{
+ GPtrArray *regions = task->mapped_regions;
+ MemoryRegion *region;
+ size_t i;
+
+ for (i = 0; i < regions->len; ++i) {
+ region = g_ptr_array_index(regions, i);
+ memory_region_unref(region);
+ }
+ g_ptr_array_unref(regions);
+
+ mach_vm_deallocate(mach_task_self(), task->address, task->len);
+
+ QEMU_LOCK_GUARD(&s->task_mutex);
+ QTAILQ_REMOVE(&s->tasks, task, node);
+ g_free(task);
+}
+
+void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical,
+ uint64_t length, bool read_only,
+ MemoryRegion **mapping_in_region)
+{
+ MemoryRegion *ram_region;
+ char *host_ptr;
+ hwaddr ram_region_offset = 0;
+ hwaddr ram_region_length = length;
+
+ ram_region = address_space_translate(&address_space_memory,
+ guest_physical,
+ &ram_region_offset,
+ &ram_region_length, !read_only,
+ MEMTXATTRS_UNSPECIFIED);
+
+ if (!ram_region || ram_region_length < length ||
+ !memory_access_is_direct(ram_region, !read_only,
+ MEMTXATTRS_UNSPECIFIED)) {
+ return NULL;
+ }
+
+ host_ptr = memory_region_get_ram_ptr(ram_region);
+ if (!host_ptr) {
+ return NULL;
+ }
+ host_ptr += ram_region_offset;
+ *mapping_in_region = ram_region;
+ return host_ptr;
+}
+
+static bool apple_gfx_task_map_memory(AppleGFXState *s, PGTask_t *task,
+ uint64_t virtual_offset,
+ PGPhysicalMemoryRange_t *ranges,
+ uint32_t range_count, bool read_only)
+{
+ kern_return_t r;
+ void *source_ptr;
+ mach_vm_address_t target;
+ vm_prot_t cur_protection, max_protection;
+ bool success = true;
+ MemoryRegion *region;
+
+ RCU_READ_LOCK_GUARD();
+ QEMU_LOCK_GUARD(&s->task_mutex);
+
+ trace_apple_gfx_map_memory(task, range_count, virtual_offset, read_only);
+ for (int i = 0; i < range_count; i++) {
+ PGPhysicalMemoryRange_t *range = &ranges[i];
+
+ target = task->address + virtual_offset;
+ virtual_offset += range->physicalLength;
+
+ trace_apple_gfx_map_memory_range(i, range->physicalAddress,
+ range->physicalLength);
+
+ region = NULL;
+ source_ptr = apple_gfx_host_ptr_for_gpa_range(range->physicalAddress,
+ range->physicalLength,
+ read_only, &region);
+ if (!source_ptr) {
+ success = false;
+ continue;
+ }
+
+ if (!g_ptr_array_find(task->mapped_regions, region, NULL)) {
+ g_ptr_array_add(task->mapped_regions, region);
+ memory_region_ref(region);
+ }
+
+ cur_protection = 0;
+ max_protection = 0;
+ /* Map guest RAM at range->physicalAddress into PG task memory range */
+ r = mach_vm_remap(mach_task_self(),
+ &target, range->physicalLength, vm_page_size - 1,
+ VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
+ mach_task_self(), (mach_vm_address_t)source_ptr,
+ false /* shared mapping, no copy */,
+ &cur_protection, &max_protection,
+ VM_INHERIT_COPY);
+ trace_apple_gfx_remap(r, source_ptr, target);
+ g_assert(r == KERN_SUCCESS);
+ }
+
+ return success;
+}
+
+static void apple_gfx_task_unmap_memory(AppleGFXState *s, PGTask_t *task,
+ uint64_t virtual_offset, uint64_t length)
+{
+ kern_return_t r;
+ mach_vm_address_t range_address;
+
+ trace_apple_gfx_unmap_memory(task, virtual_offset, length);
+
+ /*
+ * Replace task memory range with fresh 0 pages, undoing the mapping
+ * from guest RAM.
+ */
+ range_address = task->address + virtual_offset;
+ r = mach_vm_allocate(mach_task_self(), &range_address, length,
+ VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
+ g_assert(r == KERN_SUCCESS);
+}
+
+/* ------ Rendering and frame management ------ */
+
+static void apple_gfx_render_frame_completed_bh(void *opaque);
+
+static void apple_gfx_render_new_frame(AppleGFXState *s)
+{
+ bool managed_texture = s->using_managed_texture_storage;
+ uint32_t width = surface_width(s->surface);
+ uint32_t height = surface_height(s->surface);
+ MTLRegion region = MTLRegionMake2D(0, 0, width, height);
+ id<MTLCommandBuffer> command_buffer = [s->mtl_queue commandBuffer];
+ id<MTLTexture> texture = s->texture;
+
+ assert(bql_locked());
+ [texture retain];
+ [command_buffer retain];
+
+ s->rendering_frame_width = width;
+ s->rendering_frame_height = height;
+
+ dispatch_async(get_background_queue(), ^{
+ /*
+ * This is not safe to call from the BQL/BH due to PVG-internal locks
+ * causing deadlocks.
+ */
+ bool r = [s->pgdisp encodeCurrentFrameToCommandBuffer:command_buffer
+ texture:texture
+ region:region];
+ if (!r) {
+ [texture release];
+ [command_buffer release];
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: encodeCurrentFrameToCommandBuffer:texture:region: "
+ "failed\n", __func__);
+ bql_lock();
+ --s->pending_frames;
+ if (s->pending_frames > 0) {
+ apple_gfx_render_new_frame(s);
+ }
+ bql_unlock();
+ return;
+ }
+
+ if (managed_texture) {
+ /* "Managed" textures exist in both VRAM and RAM and must be synced. */
+ id<MTLBlitCommandEncoder> blit = [command_buffer blitCommandEncoder];
+ [blit synchronizeResource:texture];
+ [blit endEncoding];
+ }
+ [texture release];
+ [command_buffer addCompletedHandler:
+ ^(id<MTLCommandBuffer> cb)
+ {
+ aio_bh_schedule_oneshot(qemu_get_aio_context(),
+ apple_gfx_render_frame_completed_bh, s);
+ }];
+ [command_buffer commit];
+ [command_buffer release];
+ });
+}
+
+static void copy_mtl_texture_to_surface_mem(id<MTLTexture> texture, void *vram)
+{
+ /*
+ * TODO: Skip this entirely on a pure Metal or headless/guest-only
+ * rendering path, else use a blit command encoder? Needs careful
+ * (double?) buffering design.
+ */
+ size_t width = texture.width, height = texture.height;
+ MTLRegion region = MTLRegionMake2D(0, 0, width, height);
+ [texture getBytes:vram
+ bytesPerRow:(width * 4)
+ bytesPerImage:(width * height * 4)
+ fromRegion:region
+ mipmapLevel:0
+ slice:0];
+}
+
+static void apple_gfx_render_frame_completed_bh(void *opaque)
+{
+ AppleGFXState *s = opaque;
+
+ @autoreleasepool {
+ --s->pending_frames;
+ assert(s->pending_frames >= 0);
+
+ /* Only update display if mode hasn't changed since we started rendering. */
+ if (s->rendering_frame_width == surface_width(s->surface) &&
+ s->rendering_frame_height == surface_height(s->surface)) {
+ copy_mtl_texture_to_surface_mem(s->texture, surface_data(s->surface));
+ if (s->gfx_update_requested) {
+ s->gfx_update_requested = false;
+ dpy_gfx_update_full(s->con);
+ graphic_hw_update_done(s->con);
+ s->new_frame_ready = false;
+ } else {
+ s->new_frame_ready = true;
+ }
+ }
+ if (s->pending_frames > 0) {
+ apple_gfx_render_new_frame(s);
+ }
+ }
+}
+
+static void apple_gfx_fb_update_display(void *opaque)
+{
+ AppleGFXState *s = opaque;
+
+ assert(bql_locked());
+ if (s->new_frame_ready) {
+ dpy_gfx_update_full(s->con);
+ s->new_frame_ready = false;
+ graphic_hw_update_done(s->con);
+ } else if (s->pending_frames > 0) {
+ s->gfx_update_requested = true;
+ } else {
+ graphic_hw_update_done(s->con);
+ }
+}
+
+static const GraphicHwOps apple_gfx_fb_ops = {
+ .gfx_update = apple_gfx_fb_update_display,
+ .gfx_update_async = true,
+};
+
+/* ------ Mouse cursor and display mode setting ------ */
+
+static void set_mode(AppleGFXState *s, uint32_t width, uint32_t height)
+{
+ MTLTextureDescriptor *textureDescriptor;
+
+ if (s->surface &&
+ width == surface_width(s->surface) &&
+ height == surface_height(s->surface)) {
+ return;
+ }
+
+ [s->texture release];
+
+ s->surface = qemu_create_displaysurface(width, height);
+
+ @autoreleasepool {
+ textureDescriptor =
+ [MTLTextureDescriptor
+ texture2DDescriptorWithPixelFormat:MTLPixelFormatBGRA8Unorm
+ width:width
+ height:height
+ mipmapped:NO];
+ textureDescriptor.usage = s->pgdisp.minimumTextureUsage;
+ s->texture = [s->mtl newTextureWithDescriptor:textureDescriptor];
+ s->using_managed_texture_storage =
+ (s->texture.storageMode == MTLStorageModeManaged);
+ }
+
+ dpy_gfx_replace_surface(s->con, s->surface);
+}
+
+static void update_cursor(AppleGFXState *s)
+{
+ assert(bql_locked());
+ dpy_mouse_set(s->con, s->pgdisp.cursorPosition.x,
+ s->pgdisp.cursorPosition.y, qatomic_read(&s->cursor_show));
+}
+
+static void update_cursor_bh(void *opaque)
+{
+ AppleGFXState *s = opaque;
+ update_cursor(s);
+}
+
+typedef struct AppleGFXSetCursorGlyphJob {
+ AppleGFXState *s;
+ NSBitmapImageRep *glyph;
+ PGDisplayCoord_t hotspot;
+} AppleGFXSetCursorGlyphJob;
+
+static void set_cursor_glyph(void *opaque)
+{
+ AppleGFXSetCursorGlyphJob *job = opaque;
+ AppleGFXState *s = job->s;
+ NSBitmapImageRep *glyph = job->glyph;
+ uint32_t bpp = glyph.bitsPerPixel;
+ size_t width = glyph.pixelsWide;
+ size_t height = glyph.pixelsHigh;
+ size_t padding_bytes_per_row = glyph.bytesPerRow - width * 4;
+ const uint8_t* px_data = glyph.bitmapData;
+
+ trace_apple_gfx_cursor_set(bpp, width, height);
+
+ if (s->cursor) {
+ cursor_unref(s->cursor);
+ s->cursor = NULL;
+ }
+
+ if (bpp == 32) { /* Shouldn't be anything else, but just to be safe... */
+ s->cursor = cursor_alloc(width, height);
+ s->cursor->hot_x = job->hotspot.x;
+ s->cursor->hot_y = job->hotspot.y;
+
+ uint32_t *dest_px = s->cursor->data;
+
+ for (size_t y = 0; y < height; ++y) {
+ for (size_t x = 0; x < width; ++x) {
+ /*
+ * NSBitmapImageRep's red & blue channels are swapped
+ * compared to QEMUCursor's.
+ */
+ *dest_px =
+ (px_data[0] << 16u) |
+ (px_data[1] << 8u) |
+ (px_data[2] << 0u) |
+ (px_data[3] << 24u);
+ ++dest_px;
+ px_data += 4;
+ }
+ px_data += padding_bytes_per_row;
+ }
+ dpy_cursor_define(s->con, s->cursor);
+ update_cursor(s);
+ }
+ [glyph release];
+
+ g_free(job);
+}
+
+/* ------ DMA (device reading system memory) ------ */
+
+typedef struct AppleGFXReadMemoryJob {
+ QemuEvent event;
+ hwaddr physical_address;
+ uint64_t length;
+ void *dst;
+ bool success;
+} AppleGFXReadMemoryJob;
+
+static void apple_gfx_do_read_memory(void *opaque)
+{
+ AppleGFXReadMemoryJob *job = opaque;
+ MemTxResult r;
+
+ r = dma_memory_read(&address_space_memory, job->physical_address,
+ job->dst, job->length, MEMTXATTRS_UNSPECIFIED);
+ job->success = (r == MEMTX_OK);
+
+ qemu_event_set(&job->event);
+}
+
+static bool apple_gfx_read_memory(AppleGFXState *s, hwaddr physical_address,
+ uint64_t length, void *dst)
+{
+ AppleGFXReadMemoryJob job = {
+ .physical_address = physical_address, .length = length, .dst = dst
+ };
+
+ trace_apple_gfx_read_memory(physical_address, length, dst);
+
+ /* Performing DMA requires BQL, so do it in a BH. */
+ qemu_event_init(&job.event, 0);
+ aio_bh_schedule_oneshot(qemu_get_aio_context(),
+ apple_gfx_do_read_memory, &job);
+ qemu_event_wait(&job.event);
+ qemu_event_destroy(&job.event);
+ return job.success;
+}
+
+/* ------ Memory-mapped device I/O operations ------ */
+
+typedef struct AppleGFXIOJob {
+ AppleGFXState *state;
+ uint64_t offset;
+ uint64_t value;
+ bool completed;
+} AppleGFXIOJob;
+
+static void apple_gfx_do_read(void *opaque)
+{
+ AppleGFXIOJob *job = opaque;
+ job->value = [job->state->pgdev mmioReadAtOffset:job->offset];
+ qatomic_set(&job->completed, true);
+ aio_wait_kick();
+}
+
+static uint64_t apple_gfx_read(void *opaque, hwaddr offset, unsigned size)
+{
+ AppleGFXIOJob job = {
+ .state = opaque,
+ .offset = offset,
+ .completed = false,
+ };
+ dispatch_queue_t queue = get_background_queue();
+
+ dispatch_async_f(queue, &job, apple_gfx_do_read);
+ AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed));
+
+ trace_apple_gfx_read(offset, job.value);
+ return job.value;
+}
+
+static void apple_gfx_do_write(void *opaque)
+{
+ AppleGFXIOJob *job = opaque;
+ [job->state->pgdev mmioWriteAtOffset:job->offset value:job->value];
+ qatomic_set(&job->completed, true);
+ aio_wait_kick();
+}
+
+static void apple_gfx_write(void *opaque, hwaddr offset, uint64_t val,
+ unsigned size)
+{
+ /*
+ * The methods mmioReadAtOffset: and especially mmioWriteAtOffset: can
+ * trigger synchronous operations on other dispatch queues, which in turn
+ * may call back out on one or more of the callback blocks. For this reason,
+ * and as we are holding the BQL, we invoke the I/O methods on a pool
+ * thread and handle AIO tasks while we wait. Any work in the callbacks
+ * requiring the BQL will in turn schedule BHs which this thread will
+ * process while waiting.
+ */
+ AppleGFXIOJob job = {
+ .state = opaque,
+ .offset = offset,
+ .value = val,
+ .completed = false,
+ };
+ dispatch_queue_t queue = get_background_queue();
+
+ dispatch_async_f(queue, &job, apple_gfx_do_write);
+ AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed));
+
+ trace_apple_gfx_write(offset, val);
+}
+
+static const MemoryRegionOps apple_gfx_ops = {
+ .read = apple_gfx_read,
+ .write = apple_gfx_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static size_t apple_gfx_get_default_mmio_range_size(void)
+{
+ size_t mmio_range_size;
+ @autoreleasepool {
+ PGDeviceDescriptor *desc = [PGDeviceDescriptor new];
+ mmio_range_size = desc.mmioLength;
+ [desc release];
+ }
+ return mmio_range_size;
+}
+
+/* ------ Initialisation and startup ------ */
+
+void apple_gfx_common_init(Object *obj, AppleGFXState *s, const char* obj_name)
+{
+ size_t mmio_range_size = apple_gfx_get_default_mmio_range_size();
+
+ trace_apple_gfx_common_init(obj_name, mmio_range_size);
+ memory_region_init_io(&s->iomem_gfx, obj, &apple_gfx_ops, s, obj_name,
+ mmio_range_size);
+
+ /* TODO: PVG framework supports serialising device state: integrate it! */
+}
+
+static void apple_gfx_register_task_mapping_handlers(AppleGFXState *s,
+ PGDeviceDescriptor *desc)
+{
+ desc.createTask = ^(uint64_t vmSize, void * _Nullable * _Nonnull baseAddress) {
+ PGTask_t *task = apple_gfx_new_task(s, vmSize);
+ *baseAddress = (void *)task->address;
+ trace_apple_gfx_create_task(vmSize, *baseAddress);
+ return task;
+ };
+
+ desc.destroyTask = ^(PGTask_t * _Nonnull task) {
+ trace_apple_gfx_destroy_task(task, task->mapped_regions->len);
+
+ apple_gfx_destroy_task(s, task);
+ };
+
+ desc.mapMemory = ^bool(PGTask_t * _Nonnull task, uint32_t range_count,
+ uint64_t virtual_offset, bool read_only,
+ PGPhysicalMemoryRange_t * _Nonnull ranges) {
+ return apple_gfx_task_map_memory(s, task, virtual_offset,
+ ranges, range_count, read_only);
+ };
+
+ desc.unmapMemory = ^bool(PGTask_t * _Nonnull task, uint64_t virtual_offset,
+ uint64_t length) {
+ apple_gfx_task_unmap_memory(s, task, virtual_offset, length);
+ return true;
+ };
+
+ desc.readMemory = ^bool(uint64_t physical_address, uint64_t length,
+ void * _Nonnull dst) {
+ return apple_gfx_read_memory(s, physical_address, length, dst);
+ };
+}
+
+static void new_frame_handler_bh(void *opaque)
+{
+ AppleGFXState *s = opaque;
+
+ /* Drop frames if guest gets too far ahead. */
+ if (s->pending_frames >= 2) {
+ return;
+ }
+ ++s->pending_frames;
+ if (s->pending_frames > 1) {
+ return;
+ }
+
+ @autoreleasepool {
+ apple_gfx_render_new_frame(s);
+ }
+}
+
+static PGDisplayDescriptor *apple_gfx_prepare_display_descriptor(AppleGFXState *s)
+{
+ PGDisplayDescriptor *disp_desc = [PGDisplayDescriptor new];
+
+ disp_desc.name = @"QEMU display";
+ disp_desc.sizeInMillimeters = NSMakeSize(400., 300.); /* A 20" display */
+ disp_desc.queue = dispatch_get_main_queue();
+ disp_desc.newFrameEventHandler = ^(void) {
+ trace_apple_gfx_new_frame();
+ aio_bh_schedule_oneshot(qemu_get_aio_context(), new_frame_handler_bh, s);
+ };
+ disp_desc.modeChangeHandler = ^(PGDisplayCoord_t sizeInPixels,
+ OSType pixelFormat) {
+ trace_apple_gfx_mode_change(sizeInPixels.x, sizeInPixels.y);
+
+ BQL_LOCK_GUARD();
+ set_mode(s, sizeInPixels.x, sizeInPixels.y);
+ };
+ disp_desc.cursorGlyphHandler = ^(NSBitmapImageRep *glyph,
+ PGDisplayCoord_t hotspot) {
+ AppleGFXSetCursorGlyphJob *job = g_malloc0(sizeof(*job));
+ job->s = s;
+ job->glyph = glyph;
+ job->hotspot = hotspot;
+ [glyph retain];
+ aio_bh_schedule_oneshot(qemu_get_aio_context(),
+ set_cursor_glyph, job);
+ };
+ disp_desc.cursorShowHandler = ^(BOOL show) {
+ trace_apple_gfx_cursor_show(show);
+ qatomic_set(&s->cursor_show, show);
+ aio_bh_schedule_oneshot(qemu_get_aio_context(),
+ update_cursor_bh, s);
+ };
+ disp_desc.cursorMoveHandler = ^(void) {
+ trace_apple_gfx_cursor_move();
+ aio_bh_schedule_oneshot(qemu_get_aio_context(),
+ update_cursor_bh, s);
+ };
+
+ return disp_desc;
+}
+
+static NSArray<PGDisplayMode *> *apple_gfx_create_display_mode_array(
+ const AppleGFXDisplayMode display_modes[], uint32_t display_mode_count)
+{
+ PGDisplayMode *mode_obj;
+ NSMutableArray<PGDisplayMode *> *mode_array =
+ [[NSMutableArray alloc] initWithCapacity:display_mode_count];
+
+ for (unsigned i = 0; i < display_mode_count; i++) {
+ const AppleGFXDisplayMode *mode = &display_modes[i];
+ trace_apple_gfx_display_mode(i, mode->width_px, mode->height_px);
+ PGDisplayCoord_t mode_size = { mode->width_px, mode->height_px };
+
+ mode_obj =
+ [[PGDisplayMode alloc] initWithSizeInPixels:mode_size
+ refreshRateInHz:mode->refresh_rate_hz];
+ [mode_array addObject:mode_obj];
+ [mode_obj release];
+ }
+
+ return mode_array;
+}
+
+static id<MTLDevice> copy_suitable_metal_device(void)
+{
+ id<MTLDevice> dev = nil;
+ NSArray<id<MTLDevice>> *devs = MTLCopyAllDevices();
+
+ /* Prefer a unified memory GPU. Failing that, pick a non-removable GPU. */
+ for (size_t i = 0; i < devs.count; ++i) {
+ if (devs[i].hasUnifiedMemory) {
+ dev = devs[i];
+ break;
+ }
+ if (!devs[i].removable) {
+ dev = devs[i];
+ }
+ }
+
+ if (dev != nil) {
+ [dev retain];
+ } else {
+ dev = MTLCreateSystemDefaultDevice();
+ }
+ [devs release];
+
+ return dev;
+}
+
+bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev,
+ PGDeviceDescriptor *desc, Error **errp)
+{
+ PGDisplayDescriptor *disp_desc;
+ const AppleGFXDisplayMode *display_modes = apple_gfx_default_modes;
+ uint32_t num_display_modes = ARRAY_SIZE(apple_gfx_default_modes);
+ NSArray<PGDisplayMode *> *mode_array;
+
+ if (apple_gfx_mig_blocker == NULL) {
+ error_setg(&apple_gfx_mig_blocker,
+ "Migration state blocked by apple-gfx display device");
+ if (migrate_add_blocker(&apple_gfx_mig_blocker, errp) < 0) {
+ return false;
+ }
+ }
+
+ qemu_mutex_init(&s->task_mutex);
+ QTAILQ_INIT(&s->tasks);
+ s->mtl = copy_suitable_metal_device();
+ s->mtl_queue = [s->mtl newCommandQueue];
+
+ desc.device = s->mtl;
+
+ apple_gfx_register_task_mapping_handlers(s, desc);
+
+ s->cursor_show = true;
+
+ s->pgdev = PGNewDeviceWithDescriptor(desc);
+
+ disp_desc = apple_gfx_prepare_display_descriptor(s);
+ /*
+ * Although the framework does, this integration currently does not support
+ * multiple virtual displays connected to a single PV graphics device.
+ * It is however possible to create
+ * more than one instance of the device, each with one display. The macOS
+ * guest will ignore these displays if they share the same serial number,
+ * so ensure each instance gets a unique one.
+ */
+ s->pgdisp = [s->pgdev newDisplayWithDescriptor:disp_desc
+ port:0
+ serialNum:next_pgdisplay_serial_num++];
+ [disp_desc release];
+
+ if (s->display_modes != NULL && s->num_display_modes > 0) {
+ trace_apple_gfx_common_realize_modes_property(s->num_display_modes);
+ display_modes = s->display_modes;
+ num_display_modes = s->num_display_modes;
+ }
+ s->pgdisp.modeList = mode_array =
+ apple_gfx_create_display_mode_array(display_modes, num_display_modes);
+ [mode_array release];
+
+ s->con = graphic_console_init(dev, 0, &apple_gfx_fb_ops, s);
+ return true;
+}
+
+/* ------ Display mode list device property ------ */
+
+static void apple_gfx_get_display_mode(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ Property *prop = opaque;
+ AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop);
+ /* 3 uint16s (max 5 digits) + 2 separator characters + nul. */
+ char buffer[5 * 3 + 2 + 1];
+ char *pos = buffer;
+
+ int rc = snprintf(buffer, sizeof(buffer),
+ "%"PRIu16"x%"PRIu16"@%"PRIu16,
+ mode->width_px, mode->height_px,
+ mode->refresh_rate_hz);
+ assert(rc < sizeof(buffer));
+
+ visit_type_str(v, name, &pos, errp);
+}
+
+static void apple_gfx_set_display_mode(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ Property *prop = opaque;
+ AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop);
+ const char *endptr;
+ g_autofree char *str = NULL;
+ int ret;
+ int val;
+
+ if (!visit_type_str(v, name, &str, errp)) {
+ return;
+ }
+
+ endptr = str;
+
+ ret = qemu_strtoi(endptr, &endptr, 10, &val);
+ if (ret || val > UINT16_MAX || val <= 0) {
+ error_setg(errp, "width in '%s' must be a decimal integer number"
+ " of pixels in the range 1..65535", name);
+ return;
+ }
+ mode->width_px = val;
+ if (*endptr != 'x') {
+ goto separator_error;
+ }
+
+ ret = qemu_strtoi(endptr + 1, &endptr, 10, &val);
+ if (ret || val > UINT16_MAX || val <= 0) {
+ error_setg(errp, "height in '%s' must be a decimal integer number"
+ " of pixels in the range 1..65535", name);
+ return;
+ }
+ mode->height_px = val;
+ if (*endptr != '@') {
+ goto separator_error;
+ }
+
+ ret = qemu_strtoi(endptr + 1, &endptr, 10, &val);
+ if (ret || val > UINT16_MAX || val <= 0) {
+ error_setg(errp, "refresh rate in '%s'"
+ " must be a positive decimal integer (Hertz)", name);
+ return;
+ }
+ mode->refresh_rate_hz = val;
+ return;
+
+separator_error:
+ error_setg(errp,
+ "Each display mode takes the format '<width>x<height>@<rate>'");
+}
+
+const PropertyInfo qdev_prop_apple_gfx_display_mode = {
+ .type = "display_mode",
+ .description =
+ "Display mode in pixels and Hertz, as <width>x<height>@<refresh-rate> "
+ "Example: 3840x2160@60",
+ .get = apple_gfx_get_display_mode,
+ .set = apple_gfx_set_display_mode,
+};
diff --git a/hw/display/artist.c b/hw/display/artist.c
index d913453..3fafc8a 100644
--- a/hw/display/artist.c
+++ b/hw/display/artist.c
@@ -48,6 +48,7 @@ struct ARTISTState {
struct vram_buffer vram_buffer[16];
+ bool disable;
uint16_t width;
uint16_t height;
uint16_t depth;
@@ -1211,8 +1212,8 @@ static uint64_t artist_reg_read(void *opaque, hwaddr addr, unsigned size)
break;
case 0x380004:
- /* 0x02000000 Buserror */
- val = 0x6dc20006;
+ /* magic number detected by SeaBIOS-hppa */
+ val = s->disable ? 0 : 0x6dc20006;
break;
default:
@@ -1432,7 +1433,7 @@ static int vmstate_artist_post_load(void *opaque, int version_id)
static const VMStateDescription vmstate_artist = {
.name = "artist",
- .version_id = 2,
+ .version_id = 3,
.minimum_version_id = 2,
.post_load = vmstate_artist_post_load,
.fields = (const VMStateField[]) {
@@ -1470,28 +1471,29 @@ static const VMStateDescription vmstate_artist = {
VMSTATE_UINT32(font_write1, ARTISTState),
VMSTATE_UINT32(font_write2, ARTISTState),
VMSTATE_UINT32(font_write_pos_y, ARTISTState),
+ VMSTATE_BOOL(disable, ARTISTState),
VMSTATE_END_OF_LIST()
}
};
-static Property artist_properties[] = {
+static const Property artist_properties[] = {
DEFINE_PROP_UINT16("width", ARTISTState, width, 1280),
DEFINE_PROP_UINT16("height", ARTISTState, height, 1024),
DEFINE_PROP_UINT16("depth", ARTISTState, depth, 8),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_BOOL("disable", ARTISTState, disable, false),
};
static void artist_reset(DeviceState *qdev)
{
}
-static void artist_class_init(ObjectClass *klass, void *data)
+static void artist_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = artist_realizefn;
dc->vmsd = &vmstate_artist;
- dc->reset = artist_reset;
+ device_class_set_legacy_reset(dc, artist_reset);
device_class_set_props(dc, artist_properties);
}
diff --git a/hw/display/ati.c b/hw/display/ati.c
index b1f94f5..7de2773 100644
--- a/hw/display/ati.c
+++ b/hw/display/ati.c
@@ -1039,7 +1039,7 @@ static void ati_vga_exit(PCIDevice *dev)
graphic_console_close(s->vga.con);
}
-static Property ati_vga_properties[] = {
+static const Property ati_vga_properties[] = {
DEFINE_PROP_UINT32("vgamem_mb", ATIVGAState, vga.vram_size_mb, 16),
DEFINE_PROP_STRING("model", ATIVGAState, model),
DEFINE_PROP_UINT16("x-device-id", ATIVGAState, dev_id,
@@ -1047,15 +1047,14 @@ static Property ati_vga_properties[] = {
DEFINE_PROP_BOOL("guest_hwcursor", ATIVGAState, cursor_guest_mode, false),
/* this is a debug option, prefer PROP_UINT over PROP_BIT for simplicity */
DEFINE_PROP_UINT8("x-pixman", ATIVGAState, use_pixman, DEFAULT_X_PIXMAN),
- DEFINE_PROP_END_OF_LIST()
};
-static void ati_vga_class_init(ObjectClass *klass, void *data)
+static void ati_vga_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
- dc->reset = ati_vga_reset;
+ device_class_set_legacy_reset(dc, ati_vga_reset);
device_class_set_props(dc, ati_vga_properties);
dc->hotpluggable = false;
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
@@ -1080,7 +1079,7 @@ static const TypeInfo ati_vga_info = {
.instance_size = sizeof(ATIVGAState),
.class_init = ati_vga_class_init,
.instance_init = ati_vga_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/display/bcm2835_fb.c b/hw/display/bcm2835_fb.c
index 650db3d..820e67a 100644
--- a/hw/display/bcm2835_fb.c
+++ b/hw/display/bcm2835_fb.c
@@ -429,7 +429,7 @@ static void bcm2835_fb_realize(DeviceState *dev, Error **errp)
qemu_console_resize(s->con, s->config.xres, s->config.yres);
}
-static Property bcm2835_fb_props[] = {
+static const Property bcm2835_fb_props[] = {
DEFINE_PROP_UINT32("vcram-base", BCM2835FBState, vcram_base, 0),/*required*/
DEFINE_PROP_UINT32("vcram-size", BCM2835FBState, vcram_size,
DEFAULT_VCRAM_SIZE),
@@ -440,16 +440,15 @@ static Property bcm2835_fb_props[] = {
initial_config.pixo, 1), /* 1=RGB, 0=BGR */
DEFINE_PROP_UINT32("alpha", BCM2835FBState,
initial_config.alpha, 2), /* alpha ignored */
- DEFINE_PROP_END_OF_LIST()
};
-static void bcm2835_fb_class_init(ObjectClass *klass, void *data)
+static void bcm2835_fb_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, bcm2835_fb_props);
dc->realize = bcm2835_fb_realize;
- dc->reset = bcm2835_fb_reset;
+ device_class_set_legacy_reset(dc, bcm2835_fb_reset);
dc->vmsd = &vmstate_bcm2835_fb;
}
diff --git a/hw/display/blizzard.c b/hw/display/blizzard.c
deleted file mode 100644
index 030abbe..0000000
--- a/hw/display/blizzard.c
+++ /dev/null
@@ -1,1026 +0,0 @@
-/*
- * Epson S1D13744/S1D13745 (Blizzard/Hailstorm/Tornado) LCD/TV controller.
- *
- * Copyright (C) 2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/bitops.h"
-#include "ui/console.h"
-#include "hw/display/blizzard.h"
-#include "ui/pixel_ops.h"
-
-typedef void (*blizzard_fn_t)(uint8_t *, const uint8_t *, unsigned int);
-
-typedef struct {
- uint8_t reg;
- uint32_t addr;
- int swallow;
-
- int pll;
- int pll_range;
- int pll_ctrl;
- uint8_t pll_mode;
- uint8_t clksel;
- int memenable;
- int memrefresh;
- uint8_t timing[3];
- int priority;
-
- uint8_t lcd_config;
- int x;
- int y;
- int skipx;
- int skipy;
- uint8_t hndp;
- uint8_t vndp;
- uint8_t hsync;
- uint8_t vsync;
- uint8_t pclk;
- uint8_t u;
- uint8_t v;
- uint8_t yrc[2];
- int ix[2];
- int iy[2];
- int ox[2];
- int oy[2];
-
- int enable;
- int blank;
- int bpp;
- int invalidate;
- int mx[2];
- int my[2];
- uint8_t mode;
- uint8_t effect;
- uint8_t iformat;
- uint8_t source;
- QemuConsole *con;
- blizzard_fn_t *line_fn_tab[2];
- void *fb;
-
- uint8_t hssi_config[3];
- uint8_t tv_config;
- uint8_t tv_timing[4];
- uint8_t vbi;
- uint8_t tv_x;
- uint8_t tv_y;
- uint8_t tv_test;
- uint8_t tv_filter_config;
- uint8_t tv_filter_idx;
- uint8_t tv_filter_coeff[0x20];
- uint8_t border_r;
- uint8_t border_g;
- uint8_t border_b;
- uint8_t gamma_config;
- uint8_t gamma_idx;
- uint8_t gamma_lut[0x100];
- uint8_t matrix_ena;
- uint8_t matrix_coeff[0x12];
- uint8_t matrix_r;
- uint8_t matrix_g;
- uint8_t matrix_b;
- uint8_t pm;
- uint8_t status;
- uint8_t rgbgpio_dir;
- uint8_t rgbgpio;
- uint8_t gpio_dir;
- uint8_t gpio;
- uint8_t gpio_edge[2];
- uint8_t gpio_irq;
- uint8_t gpio_pdown;
-
- struct {
- int x;
- int y;
- int dx;
- int dy;
- int len;
- int buflen;
- void *buf;
- void *data;
- uint16_t *ptr;
- int angle;
- int pitch;
- blizzard_fn_t line_fn;
- } data;
-} BlizzardState;
-
-/* Bytes(!) per pixel */
-static const int blizzard_iformat_bpp[0x10] = {
- 0,
- 2, /* RGB 5:6:5*/
- 3, /* RGB 6:6:6 mode 1 */
- 3, /* RGB 8:8:8 mode 1 */
- 0, 0,
- 4, /* RGB 6:6:6 mode 2 */
- 4, /* RGB 8:8:8 mode 2 */
- 0, /* YUV 4:2:2 */
- 0, /* YUV 4:2:0 */
- 0, 0, 0, 0, 0, 0,
-};
-
-static void blizzard_window(BlizzardState *s)
-{
- DisplaySurface *surface = qemu_console_surface(s->con);
- uint8_t *src, *dst;
- int bypp[2];
- int bypl[3];
- int y;
- blizzard_fn_t fn = s->data.line_fn;
-
- if (!fn)
- return;
- if (s->mx[0] > s->data.x)
- s->mx[0] = s->data.x;
- if (s->my[0] > s->data.y)
- s->my[0] = s->data.y;
- if (s->mx[1] < s->data.x + s->data.dx)
- s->mx[1] = s->data.x + s->data.dx;
- if (s->my[1] < s->data.y + s->data.dy)
- s->my[1] = s->data.y + s->data.dy;
-
- bypp[0] = s->bpp;
- bypp[1] = surface_bytes_per_pixel(surface);
- bypl[0] = bypp[0] * s->data.pitch;
- bypl[1] = bypp[1] * s->x;
- bypl[2] = bypp[0] * s->data.dx;
-
- src = s->data.data;
- dst = s->fb + bypl[1] * s->data.y + bypp[1] * s->data.x;
- for (y = s->data.dy; y > 0; y --, src += bypl[0], dst += bypl[1])
- fn(dst, src, bypl[2]);
-}
-
-static int blizzard_transfer_setup(BlizzardState *s)
-{
- if (s->source > 3 || !s->bpp ||
- s->ix[1] < s->ix[0] || s->iy[1] < s->iy[0])
- return 0;
-
- s->data.angle = s->effect & 3;
- s->data.line_fn = s->line_fn_tab[!!s->data.angle][s->iformat];
- s->data.x = s->ix[0];
- s->data.y = s->iy[0];
- s->data.dx = s->ix[1] - s->ix[0] + 1;
- s->data.dy = s->iy[1] - s->iy[0] + 1;
- s->data.len = s->bpp * s->data.dx * s->data.dy;
- s->data.pitch = s->data.dx;
- if (s->data.len > s->data.buflen) {
- s->data.buf = g_realloc(s->data.buf, s->data.len);
- s->data.buflen = s->data.len;
- }
- s->data.ptr = s->data.buf;
- s->data.data = s->data.buf;
- s->data.len /= 2;
- return 1;
-}
-
-static void blizzard_reset(BlizzardState *s)
-{
- s->reg = 0;
- s->swallow = 0;
-
- s->pll = 9;
- s->pll_range = 1;
- s->pll_ctrl = 0x14;
- s->pll_mode = 0x32;
- s->clksel = 0x00;
- s->memenable = 0;
- s->memrefresh = 0x25c;
- s->timing[0] = 0x3f;
- s->timing[1] = 0x13;
- s->timing[2] = 0x21;
- s->priority = 0;
-
- s->lcd_config = 0x74;
- s->x = 8;
- s->y = 1;
- s->skipx = 0;
- s->skipy = 0;
- s->hndp = 3;
- s->vndp = 2;
- s->hsync = 1;
- s->vsync = 1;
- s->pclk = 0x80;
-
- s->ix[0] = 0;
- s->ix[1] = 0;
- s->iy[0] = 0;
- s->iy[1] = 0;
- s->ox[0] = 0;
- s->ox[1] = 0;
- s->oy[0] = 0;
- s->oy[1] = 0;
-
- s->yrc[0] = 0x00;
- s->yrc[1] = 0x30;
- s->u = 0;
- s->v = 0;
-
- s->iformat = 3;
- s->source = 0;
- s->bpp = blizzard_iformat_bpp[s->iformat];
-
- s->hssi_config[0] = 0x00;
- s->hssi_config[1] = 0x00;
- s->hssi_config[2] = 0x01;
- s->tv_config = 0x00;
- s->tv_timing[0] = 0x00;
- s->tv_timing[1] = 0x00;
- s->tv_timing[2] = 0x00;
- s->tv_timing[3] = 0x00;
- s->vbi = 0x10;
- s->tv_x = 0x14;
- s->tv_y = 0x03;
- s->tv_test = 0x00;
- s->tv_filter_config = 0x80;
- s->tv_filter_idx = 0x00;
- s->border_r = 0x10;
- s->border_g = 0x80;
- s->border_b = 0x80;
- s->gamma_config = 0x00;
- s->gamma_idx = 0x00;
- s->matrix_ena = 0x00;
- memset(&s->matrix_coeff, 0, sizeof(s->matrix_coeff));
- s->matrix_r = 0x00;
- s->matrix_g = 0x00;
- s->matrix_b = 0x00;
- s->pm = 0x02;
- s->status = 0x00;
- s->rgbgpio_dir = 0x00;
- s->gpio_dir = 0x00;
- s->gpio_edge[0] = 0x00;
- s->gpio_edge[1] = 0x00;
- s->gpio_irq = 0x00;
- s->gpio_pdown = 0xff;
-}
-
-static inline void blizzard_invalidate_display(void *opaque) {
- BlizzardState *s = (BlizzardState *) opaque;
-
- s->invalidate = 1;
-}
-
-static uint16_t blizzard_reg_read(void *opaque, uint8_t reg)
-{
- BlizzardState *s = (BlizzardState *) opaque;
-
- switch (reg) {
- case 0x00: /* Revision Code */
- return 0xa5;
-
- case 0x02: /* Configuration Readback */
- return 0x83; /* Macrovision OK, CNF[2:0] = 3 */
-
- case 0x04: /* PLL M-Divider */
- return (s->pll - 1) | (1 << 7);
- case 0x06: /* PLL Lock Range Control */
- return s->pll_range;
- case 0x08: /* PLL Lock Synthesis Control 0 */
- return s->pll_ctrl & 0xff;
- case 0x0a: /* PLL Lock Synthesis Control 1 */
- return s->pll_ctrl >> 8;
- case 0x0c: /* PLL Mode Control 0 */
- return s->pll_mode;
-
- case 0x0e: /* Clock-Source Select */
- return s->clksel;
-
- case 0x10: /* Memory Controller Activate */
- case 0x14: /* Memory Controller Bank 0 Status Flag */
- return s->memenable;
-
- case 0x18: /* Auto-Refresh Interval Setting 0 */
- return s->memrefresh & 0xff;
- case 0x1a: /* Auto-Refresh Interval Setting 1 */
- return s->memrefresh >> 8;
-
- case 0x1c: /* Power-On Sequence Timing Control */
- return s->timing[0];
- case 0x1e: /* Timing Control 0 */
- return s->timing[1];
- case 0x20: /* Timing Control 1 */
- return s->timing[2];
-
- case 0x24: /* Arbitration Priority Control */
- return s->priority;
-
- case 0x28: /* LCD Panel Configuration */
- return s->lcd_config;
-
- case 0x2a: /* LCD Horizontal Display Width */
- return s->x >> 3;
- case 0x2c: /* LCD Horizontal Non-display Period */
- return s->hndp;
- case 0x2e: /* LCD Vertical Display Height 0 */
- return s->y & 0xff;
- case 0x30: /* LCD Vertical Display Height 1 */
- return s->y >> 8;
- case 0x32: /* LCD Vertical Non-display Period */
- return s->vndp;
- case 0x34: /* LCD HS Pulse-width */
- return s->hsync;
- case 0x36: /* LCd HS Pulse Start Position */
- return s->skipx >> 3;
- case 0x38: /* LCD VS Pulse-width */
- return s->vsync;
- case 0x3a: /* LCD VS Pulse Start Position */
- return s->skipy;
-
- case 0x3c: /* PCLK Polarity */
- return s->pclk;
-
- case 0x3e: /* High-speed Serial Interface Tx Configuration Port 0 */
- return s->hssi_config[0];
- case 0x40: /* High-speed Serial Interface Tx Configuration Port 1 */
- return s->hssi_config[1];
- case 0x42: /* High-speed Serial Interface Tx Mode */
- return s->hssi_config[2];
- case 0x44: /* TV Display Configuration */
- return s->tv_config;
- case 0x46 ... 0x4c: /* TV Vertical Blanking Interval Data bits */
- return s->tv_timing[(reg - 0x46) >> 1];
- case 0x4e: /* VBI: Closed Caption / XDS Control / Status */
- return s->vbi;
- case 0x50: /* TV Horizontal Start Position */
- return s->tv_x;
- case 0x52: /* TV Vertical Start Position */
- return s->tv_y;
- case 0x54: /* TV Test Pattern Setting */
- return s->tv_test;
- case 0x56: /* TV Filter Setting */
- return s->tv_filter_config;
- case 0x58: /* TV Filter Coefficient Index */
- return s->tv_filter_idx;
- case 0x5a: /* TV Filter Coefficient Data */
- if (s->tv_filter_idx < 0x20)
- return s->tv_filter_coeff[s->tv_filter_idx ++];
- return 0;
-
- case 0x60: /* Input YUV/RGB Translate Mode 0 */
- return s->yrc[0];
- case 0x62: /* Input YUV/RGB Translate Mode 1 */
- return s->yrc[1];
- case 0x64: /* U Data Fix */
- return s->u;
- case 0x66: /* V Data Fix */
- return s->v;
-
- case 0x68: /* Display Mode */
- return s->mode;
-
- case 0x6a: /* Special Effects */
- return s->effect;
-
- case 0x6c: /* Input Window X Start Position 0 */
- return s->ix[0] & 0xff;
- case 0x6e: /* Input Window X Start Position 1 */
- return s->ix[0] >> 3;
- case 0x70: /* Input Window Y Start Position 0 */
- return s->ix[0] & 0xff;
- case 0x72: /* Input Window Y Start Position 1 */
- return s->ix[0] >> 3;
- case 0x74: /* Input Window X End Position 0 */
- return s->ix[1] & 0xff;
- case 0x76: /* Input Window X End Position 1 */
- return s->ix[1] >> 3;
- case 0x78: /* Input Window Y End Position 0 */
- return s->ix[1] & 0xff;
- case 0x7a: /* Input Window Y End Position 1 */
- return s->ix[1] >> 3;
- case 0x7c: /* Output Window X Start Position 0 */
- return s->ox[0] & 0xff;
- case 0x7e: /* Output Window X Start Position 1 */
- return s->ox[0] >> 3;
- case 0x80: /* Output Window Y Start Position 0 */
- return s->oy[0] & 0xff;
- case 0x82: /* Output Window Y Start Position 1 */
- return s->oy[0] >> 3;
- case 0x84: /* Output Window X End Position 0 */
- return s->ox[1] & 0xff;
- case 0x86: /* Output Window X End Position 1 */
- return s->ox[1] >> 3;
- case 0x88: /* Output Window Y End Position 0 */
- return s->oy[1] & 0xff;
- case 0x8a: /* Output Window Y End Position 1 */
- return s->oy[1] >> 3;
-
- case 0x8c: /* Input Data Format */
- return s->iformat;
- case 0x8e: /* Data Source Select */
- return s->source;
- case 0x90: /* Display Memory Data Port */
- return 0;
-
- case 0xa8: /* Border Color 0 */
- return s->border_r;
- case 0xaa: /* Border Color 1 */
- return s->border_g;
- case 0xac: /* Border Color 2 */
- return s->border_b;
-
- case 0xb4: /* Gamma Correction Enable */
- return s->gamma_config;
- case 0xb6: /* Gamma Correction Table Index */
- return s->gamma_idx;
- case 0xb8: /* Gamma Correction Table Data */
- return s->gamma_lut[s->gamma_idx ++];
-
- case 0xba: /* 3x3 Matrix Enable */
- return s->matrix_ena;
- case 0xbc ... 0xde: /* Coefficient Registers */
- return s->matrix_coeff[(reg - 0xbc) >> 1];
- case 0xe0: /* 3x3 Matrix Red Offset */
- return s->matrix_r;
- case 0xe2: /* 3x3 Matrix Green Offset */
- return s->matrix_g;
- case 0xe4: /* 3x3 Matrix Blue Offset */
- return s->matrix_b;
-
- case 0xe6: /* Power-save */
- return s->pm;
- case 0xe8: /* Non-display Period Control / Status */
- return s->status | (1 << 5);
- case 0xea: /* RGB Interface Control */
- return s->rgbgpio_dir;
- case 0xec: /* RGB Interface Status */
- return s->rgbgpio;
- case 0xee: /* General-purpose IO Pins Configuration */
- return s->gpio_dir;
- case 0xf0: /* General-purpose IO Pins Status / Control */
- return s->gpio;
- case 0xf2: /* GPIO Positive Edge Interrupt Trigger */
- return s->gpio_edge[0];
- case 0xf4: /* GPIO Negative Edge Interrupt Trigger */
- return s->gpio_edge[1];
- case 0xf6: /* GPIO Interrupt Status */
- return s->gpio_irq;
- case 0xf8: /* GPIO Pull-down Control */
- return s->gpio_pdown;
-
- default:
- fprintf(stderr, "%s: unknown register %02x\n", __func__, reg);
- return 0;
- }
-}
-
-static void blizzard_reg_write(void *opaque, uint8_t reg, uint16_t value)
-{
- BlizzardState *s = (BlizzardState *) opaque;
-
- switch (reg) {
- case 0x04: /* PLL M-Divider */
- s->pll = (value & 0x3f) + 1;
- break;
- case 0x06: /* PLL Lock Range Control */
- s->pll_range = value & 3;
- break;
- case 0x08: /* PLL Lock Synthesis Control 0 */
- s->pll_ctrl &= 0xf00;
- s->pll_ctrl |= (value << 0) & 0x0ff;
- break;
- case 0x0a: /* PLL Lock Synthesis Control 1 */
- s->pll_ctrl &= 0x0ff;
- s->pll_ctrl |= (value << 8) & 0xf00;
- break;
- case 0x0c: /* PLL Mode Control 0 */
- s->pll_mode = value & 0x77;
- if ((value & 3) == 0 || (value & 3) == 3)
- fprintf(stderr, "%s: wrong PLL Control bits (%i)\n",
- __func__, value & 3);
- break;
-
- case 0x0e: /* Clock-Source Select */
- s->clksel = value & 0xff;
- break;
-
- case 0x10: /* Memory Controller Activate */
- s->memenable = value & 1;
- break;
- case 0x14: /* Memory Controller Bank 0 Status Flag */
- break;
-
- case 0x18: /* Auto-Refresh Interval Setting 0 */
- s->memrefresh &= 0xf00;
- s->memrefresh |= (value << 0) & 0x0ff;
- break;
- case 0x1a: /* Auto-Refresh Interval Setting 1 */
- s->memrefresh &= 0x0ff;
- s->memrefresh |= (value << 8) & 0xf00;
- break;
-
- case 0x1c: /* Power-On Sequence Timing Control */
- s->timing[0] = value & 0x7f;
- break;
- case 0x1e: /* Timing Control 0 */
- s->timing[1] = value & 0x17;
- break;
- case 0x20: /* Timing Control 1 */
- s->timing[2] = value & 0x35;
- break;
-
- case 0x24: /* Arbitration Priority Control */
- s->priority = value & 1;
- break;
-
- case 0x28: /* LCD Panel Configuration */
- s->lcd_config = value & 0xff;
- if (value & (1 << 7))
- fprintf(stderr, "%s: data swap not supported!\n", __func__);
- break;
-
- case 0x2a: /* LCD Horizontal Display Width */
- s->x = value << 3;
- break;
- case 0x2c: /* LCD Horizontal Non-display Period */
- s->hndp = value & 0xff;
- break;
- case 0x2e: /* LCD Vertical Display Height 0 */
- s->y &= 0x300;
- s->y |= (value << 0) & 0x0ff;
- break;
- case 0x30: /* LCD Vertical Display Height 1 */
- s->y &= 0x0ff;
- s->y |= (value << 8) & 0x300;
- break;
- case 0x32: /* LCD Vertical Non-display Period */
- s->vndp = value & 0xff;
- break;
- case 0x34: /* LCD HS Pulse-width */
- s->hsync = value & 0xff;
- break;
- case 0x36: /* LCD HS Pulse Start Position */
- s->skipx = value & 0xff;
- break;
- case 0x38: /* LCD VS Pulse-width */
- s->vsync = value & 0xbf;
- break;
- case 0x3a: /* LCD VS Pulse Start Position */
- s->skipy = value & 0xff;
- break;
-
- case 0x3c: /* PCLK Polarity */
- s->pclk = value & 0x82;
- /* Affects calculation of s->hndp, s->hsync and s->skipx. */
- break;
-
- case 0x3e: /* High-speed Serial Interface Tx Configuration Port 0 */
- s->hssi_config[0] = value;
- break;
- case 0x40: /* High-speed Serial Interface Tx Configuration Port 1 */
- s->hssi_config[1] = value;
- if (((value >> 4) & 3) == 3)
- fprintf(stderr, "%s: Illegal active-data-links value\n",
- __func__);
- break;
- case 0x42: /* High-speed Serial Interface Tx Mode */
- s->hssi_config[2] = value & 0xbd;
- break;
-
- case 0x44: /* TV Display Configuration */
- s->tv_config = value & 0xfe;
- break;
- case 0x46 ... 0x4c: /* TV Vertical Blanking Interval Data bits 0 */
- s->tv_timing[(reg - 0x46) >> 1] = value;
- break;
- case 0x4e: /* VBI: Closed Caption / XDS Control / Status */
- s->vbi = value;
- break;
- case 0x50: /* TV Horizontal Start Position */
- s->tv_x = value;
- break;
- case 0x52: /* TV Vertical Start Position */
- s->tv_y = value & 0x7f;
- break;
- case 0x54: /* TV Test Pattern Setting */
- s->tv_test = value;
- break;
- case 0x56: /* TV Filter Setting */
- s->tv_filter_config = value & 0xbf;
- break;
- case 0x58: /* TV Filter Coefficient Index */
- s->tv_filter_idx = value & 0x1f;
- break;
- case 0x5a: /* TV Filter Coefficient Data */
- if (s->tv_filter_idx < 0x20)
- s->tv_filter_coeff[s->tv_filter_idx ++] = value;
- break;
-
- case 0x60: /* Input YUV/RGB Translate Mode 0 */
- s->yrc[0] = value & 0xb0;
- break;
- case 0x62: /* Input YUV/RGB Translate Mode 1 */
- s->yrc[1] = value & 0x30;
- break;
- case 0x64: /* U Data Fix */
- s->u = value & 0xff;
- break;
- case 0x66: /* V Data Fix */
- s->v = value & 0xff;
- break;
-
- case 0x68: /* Display Mode */
- if ((s->mode ^ value) & 3)
- s->invalidate = 1;
- s->mode = value & 0xb7;
- s->enable = value & 1;
- s->blank = (value >> 1) & 1;
- if (value & (1 << 4))
- fprintf(stderr, "%s: Macrovision enable attempt!\n", __func__);
- break;
-
- case 0x6a: /* Special Effects */
- s->effect = value & 0xfb;
- break;
-
- case 0x6c: /* Input Window X Start Position 0 */
- s->ix[0] &= 0x300;
- s->ix[0] |= (value << 0) & 0x0ff;
- break;
- case 0x6e: /* Input Window X Start Position 1 */
- s->ix[0] &= 0x0ff;
- s->ix[0] |= (value << 8) & 0x300;
- break;
- case 0x70: /* Input Window Y Start Position 0 */
- s->iy[0] &= 0x300;
- s->iy[0] |= (value << 0) & 0x0ff;
- break;
- case 0x72: /* Input Window Y Start Position 1 */
- s->iy[0] &= 0x0ff;
- s->iy[0] |= (value << 8) & 0x300;
- break;
- case 0x74: /* Input Window X End Position 0 */
- s->ix[1] &= 0x300;
- s->ix[1] |= (value << 0) & 0x0ff;
- break;
- case 0x76: /* Input Window X End Position 1 */
- s->ix[1] &= 0x0ff;
- s->ix[1] |= (value << 8) & 0x300;
- break;
- case 0x78: /* Input Window Y End Position 0 */
- s->iy[1] &= 0x300;
- s->iy[1] |= (value << 0) & 0x0ff;
- break;
- case 0x7a: /* Input Window Y End Position 1 */
- s->iy[1] &= 0x0ff;
- s->iy[1] |= (value << 8) & 0x300;
- break;
- case 0x7c: /* Output Window X Start Position 0 */
- s->ox[0] &= 0x300;
- s->ox[0] |= (value << 0) & 0x0ff;
- break;
- case 0x7e: /* Output Window X Start Position 1 */
- s->ox[0] &= 0x0ff;
- s->ox[0] |= (value << 8) & 0x300;
- break;
- case 0x80: /* Output Window Y Start Position 0 */
- s->oy[0] &= 0x300;
- s->oy[0] |= (value << 0) & 0x0ff;
- break;
- case 0x82: /* Output Window Y Start Position 1 */
- s->oy[0] &= 0x0ff;
- s->oy[0] |= (value << 8) & 0x300;
- break;
- case 0x84: /* Output Window X End Position 0 */
- s->ox[1] &= 0x300;
- s->ox[1] |= (value << 0) & 0x0ff;
- break;
- case 0x86: /* Output Window X End Position 1 */
- s->ox[1] &= 0x0ff;
- s->ox[1] |= (value << 8) & 0x300;
- break;
- case 0x88: /* Output Window Y End Position 0 */
- s->oy[1] &= 0x300;
- s->oy[1] |= (value << 0) & 0x0ff;
- break;
- case 0x8a: /* Output Window Y End Position 1 */
- s->oy[1] &= 0x0ff;
- s->oy[1] |= (value << 8) & 0x300;
- break;
-
- case 0x8c: /* Input Data Format */
- s->iformat = value & 0xf;
- s->bpp = blizzard_iformat_bpp[s->iformat];
- if (!s->bpp)
- fprintf(stderr, "%s: Illegal or unsupported input format %x\n",
- __func__, s->iformat);
- break;
- case 0x8e: /* Data Source Select */
- s->source = value & 7;
- /* Currently all windows will be "destructive overlays". */
- if ((!(s->effect & (1 << 3)) && (s->ix[0] != s->ox[0] ||
- s->iy[0] != s->oy[0] ||
- s->ix[1] != s->ox[1] ||
- s->iy[1] != s->oy[1])) ||
- !((s->ix[1] - s->ix[0]) & (s->iy[1] - s->iy[0]) &
- (s->ox[1] - s->ox[0]) & (s->oy[1] - s->oy[0]) & 1))
- fprintf(stderr, "%s: Illegal input/output window positions\n",
- __func__);
-
- blizzard_transfer_setup(s);
- break;
-
- case 0x90: /* Display Memory Data Port */
- if (!s->data.len && !blizzard_transfer_setup(s))
- break;
-
- *s->data.ptr ++ = value;
- if (-- s->data.len == 0)
- blizzard_window(s);
- break;
-
- case 0xa8: /* Border Color 0 */
- s->border_r = value;
- break;
- case 0xaa: /* Border Color 1 */
- s->border_g = value;
- break;
- case 0xac: /* Border Color 2 */
- s->border_b = value;
- break;
-
- case 0xb4: /* Gamma Correction Enable */
- s->gamma_config = value & 0x87;
- break;
- case 0xb6: /* Gamma Correction Table Index */
- s->gamma_idx = value;
- break;
- case 0xb8: /* Gamma Correction Table Data */
- s->gamma_lut[s->gamma_idx ++] = value;
- break;
-
- case 0xba: /* 3x3 Matrix Enable */
- s->matrix_ena = value & 1;
- break;
- case 0xbc ... 0xde: /* Coefficient Registers */
- s->matrix_coeff[(reg - 0xbc) >> 1] = value & ((reg & 2) ? 0x80 : 0xff);
- break;
- case 0xe0: /* 3x3 Matrix Red Offset */
- s->matrix_r = value;
- break;
- case 0xe2: /* 3x3 Matrix Green Offset */
- s->matrix_g = value;
- break;
- case 0xe4: /* 3x3 Matrix Blue Offset */
- s->matrix_b = value;
- break;
-
- case 0xe6: /* Power-save */
- s->pm = value & 0x83;
- if (value & s->mode & 1)
- fprintf(stderr, "%s: The display must be disabled before entering "
- "Standby Mode\n", __func__);
- break;
- case 0xe8: /* Non-display Period Control / Status */
- s->status = value & 0x1b;
- break;
- case 0xea: /* RGB Interface Control */
- s->rgbgpio_dir = value & 0x8f;
- break;
- case 0xec: /* RGB Interface Status */
- s->rgbgpio = value & 0xcf;
- break;
- case 0xee: /* General-purpose IO Pins Configuration */
- s->gpio_dir = value;
- break;
- case 0xf0: /* General-purpose IO Pins Status / Control */
- s->gpio = value;
- break;
- case 0xf2: /* GPIO Positive Edge Interrupt Trigger */
- s->gpio_edge[0] = value;
- break;
- case 0xf4: /* GPIO Negative Edge Interrupt Trigger */
- s->gpio_edge[1] = value;
- break;
- case 0xf6: /* GPIO Interrupt Status */
- s->gpio_irq &= value;
- break;
- case 0xf8: /* GPIO Pull-down Control */
- s->gpio_pdown = value;
- break;
-
- default:
- fprintf(stderr, "%s: unknown register %02x\n", __func__, reg);
- break;
- }
-}
-
-uint16_t s1d13745_read(void *opaque, int dc)
-{
- BlizzardState *s = (BlizzardState *) opaque;
- uint16_t value = blizzard_reg_read(s, s->reg);
-
- if (s->swallow -- > 0)
- return 0;
- if (dc)
- s->reg ++;
-
- return value;
-}
-
-void s1d13745_write(void *opaque, int dc, uint16_t value)
-{
- BlizzardState *s = (BlizzardState *) opaque;
-
- if (s->swallow -- > 0)
- return;
- if (dc) {
- blizzard_reg_write(s, s->reg, value);
-
- if (s->reg != 0x90 && s->reg != 0x5a && s->reg != 0xb8)
- s->reg += 2;
- } else
- s->reg = value & 0xff;
-}
-
-void s1d13745_write_block(void *opaque, int dc,
- void *buf, size_t len, int pitch)
-{
- BlizzardState *s = (BlizzardState *) opaque;
-
- while (len > 0) {
- if (s->reg == 0x90 && dc &&
- (s->data.len || blizzard_transfer_setup(s)) &&
- len >= (s->data.len << 1)) {
- len -= s->data.len << 1;
- s->data.len = 0;
- s->data.data = buf;
- if (pitch)
- s->data.pitch = pitch;
- blizzard_window(s);
- s->data.data = s->data.buf;
- continue;
- }
-
- s1d13745_write(opaque, dc, *(uint16_t *) buf);
- len -= 2;
- buf += 2;
- }
-}
-
-static void blizzard_update_display(void *opaque)
-{
- BlizzardState *s = (BlizzardState *) opaque;
- DisplaySurface *surface = qemu_console_surface(s->con);
- int y, bypp, bypl, bwidth;
- uint8_t *src, *dst;
-
- if (!s->enable)
- return;
-
- if (s->x != surface_width(surface) || s->y != surface_height(surface)) {
- s->invalidate = 1;
- qemu_console_resize(s->con, s->x, s->y);
- surface = qemu_console_surface(s->con);
- }
-
- if (s->invalidate) {
- s->invalidate = 0;
-
- if (s->blank) {
- bypp = surface_bytes_per_pixel(surface);
- memset(surface_data(surface), 0, bypp * s->x * s->y);
- return;
- }
-
- s->mx[0] = 0;
- s->mx[1] = s->x;
- s->my[0] = 0;
- s->my[1] = s->y;
- }
-
- if (s->mx[1] <= s->mx[0])
- return;
-
- bypp = surface_bytes_per_pixel(surface);
- bypl = bypp * s->x;
- bwidth = bypp * (s->mx[1] - s->mx[0]);
- y = s->my[0];
- src = s->fb + bypl * y + bypp * s->mx[0];
- dst = surface_data(surface) + bypl * y + bypp * s->mx[0];
- for (; y < s->my[1]; y ++, src += bypl, dst += bypl)
- memcpy(dst, src, bwidth);
-
- dpy_gfx_update(s->con, s->mx[0], s->my[0],
- s->mx[1] - s->mx[0], y - s->my[0]);
-
- s->mx[0] = s->x;
- s->mx[1] = 0;
- s->my[0] = s->y;
- s->my[1] = 0;
-}
-
-static void blizzard_draw_line16_32(uint32_t *dest,
- const uint16_t *src, unsigned int width)
-{
- uint16_t data;
- unsigned int r, g, b;
- const uint16_t *end = (const void *) src + width;
- while (src < end) {
- data = *src ++;
- b = extract16(data, 0, 5) << 3;
- g = extract16(data, 5, 6) << 2;
- r = extract16(data, 11, 5) << 3;
- *dest++ = rgb_to_pixel32(r, g, b);
- }
-}
-
-static void blizzard_draw_line24mode1_32(uint32_t *dest,
- const uint8_t *src, unsigned int width)
-{
- /* TODO: check if SDL 24-bit planes are not in the same format and
- * if so, use memcpy */
- unsigned int r[2], g[2], b[2];
- const uint8_t *end = src + width;
- while (src < end) {
- g[0] = *src ++;
- r[0] = *src ++;
- r[1] = *src ++;
- b[0] = *src ++;
- *dest++ = rgb_to_pixel32(r[0], g[0], b[0]);
- b[1] = *src ++;
- g[1] = *src ++;
- *dest++ = rgb_to_pixel32(r[1], g[1], b[1]);
- }
-}
-
-static void blizzard_draw_line24mode2_32(uint32_t *dest,
- const uint8_t *src, unsigned int width)
-{
- unsigned int r, g, b;
- const uint8_t *end = src + width;
- while (src < end) {
- r = *src ++;
- src ++;
- b = *src ++;
- g = *src ++;
- *dest++ = rgb_to_pixel32(r, g, b);
- }
-}
-
-/* No rotation */
-static blizzard_fn_t blizzard_draw_fn_32[0x10] = {
- NULL,
- /* RGB 5:6:5*/
- (blizzard_fn_t) blizzard_draw_line16_32,
- /* RGB 6:6:6 mode 1 */
- (blizzard_fn_t) blizzard_draw_line24mode1_32,
- /* RGB 8:8:8 mode 1 */
- (blizzard_fn_t) blizzard_draw_line24mode1_32,
- NULL, NULL,
- /* RGB 6:6:6 mode 2 */
- (blizzard_fn_t) blizzard_draw_line24mode2_32,
- /* RGB 8:8:8 mode 2 */
- (blizzard_fn_t) blizzard_draw_line24mode2_32,
- /* YUV 4:2:2 */
- NULL,
- /* YUV 4:2:0 */
- NULL,
- NULL, NULL, NULL, NULL, NULL, NULL,
-};
-
-/* 90deg, 180deg and 270deg rotation */
-static blizzard_fn_t blizzard_draw_fn_r_32[0x10] = {
- /* TODO */
- [0 ... 0xf] = NULL,
-};
-
-static const GraphicHwOps blizzard_ops = {
- .invalidate = blizzard_invalidate_display,
- .gfx_update = blizzard_update_display,
-};
-
-void *s1d13745_init(qemu_irq gpio_int)
-{
- BlizzardState *s = g_malloc0(sizeof(*s));
- DisplaySurface *surface;
-
- s->fb = g_malloc(0x180000);
-
- s->con = graphic_console_init(NULL, 0, &blizzard_ops, s);
- surface = qemu_console_surface(s->con);
-
- assert(surface_bits_per_pixel(surface) == 32);
-
- s->line_fn_tab[0] = blizzard_draw_fn_32;
- s->line_fn_tab[1] = blizzard_draw_fn_r_32;
-
- blizzard_reset(s);
-
- return s;
-}
diff --git a/hw/display/bochs-display.c b/hw/display/bochs-display.c
index 3b1d922..ad2821c 100644
--- a/hw/display/bochs-display.c
+++ b/hw/display/bochs-display.c
@@ -345,14 +345,13 @@ static void bochs_display_exit(PCIDevice *dev)
graphic_console_close(s->con);
}
-static Property bochs_display_properties[] = {
+static const Property bochs_display_properties[] = {
DEFINE_PROP_SIZE("vgamem", BochsDisplayState, vgamem, 16 * MiB),
DEFINE_PROP_BOOL("edid", BochsDisplayState, enable_edid, true),
DEFINE_EDID_PROPERTIES(BochsDisplayState, edid_info),
- DEFINE_PROP_END_OF_LIST(),
};
-static void bochs_display_class_init(ObjectClass *klass, void *data)
+static void bochs_display_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -375,7 +374,7 @@ static const TypeInfo bochs_display_type_info = {
.instance_size = sizeof(BochsDisplayState),
.instance_init = bochs_display_init,
.class_init = bochs_display_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
diff --git a/hw/display/cg3.c b/hw/display/cg3.c
index b271faa..daeef15 100644
--- a/hw/display/cg3.c
+++ b/hw/display/cg3.c
@@ -361,20 +361,19 @@ static void cg3_reset(DeviceState *d)
qemu_irq_lower(s->irq);
}
-static Property cg3_properties[] = {
+static const Property cg3_properties[] = {
DEFINE_PROP_UINT32("vram-size", CG3State, vram_size, -1),
DEFINE_PROP_UINT16("width", CG3State, width, -1),
DEFINE_PROP_UINT16("height", CG3State, height, -1),
DEFINE_PROP_UINT16("depth", CG3State, depth, -1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void cg3_class_init(ObjectClass *klass, void *data)
+static void cg3_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = cg3_realizefn;
- dc->reset = cg3_reset;
+ device_class_set_legacy_reset(dc, cg3_reset);
dc->vmsd = &vmstate_cg3;
device_class_set_props(dc, cg3_properties);
}
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index 150883a..ef08694 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -36,7 +36,7 @@
#include "qemu/module.h"
#include "qemu/units.h"
#include "qemu/log.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "qapi/error.h"
#include "trace.h"
#include "hw/pci/pci_device.h"
@@ -2982,17 +2982,16 @@ static void pci_cirrus_vga_realize(PCIDevice *dev, Error **errp)
}
}
-static Property pci_vga_cirrus_properties[] = {
+static const Property pci_vga_cirrus_properties[] = {
DEFINE_PROP_UINT32("vgamem_mb", struct PCICirrusVGAState,
cirrus_vga.vga.vram_size_mb, 4),
DEFINE_PROP_BOOL("blitter", struct PCICirrusVGAState,
cirrus_vga.enable_blitter, true),
DEFINE_PROP_BOOL("global-vmstate", struct PCICirrusVGAState,
cirrus_vga.vga.global_vmstate, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void cirrus_vga_class_init(ObjectClass *klass, void *data)
+static void cirrus_vga_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -3014,7 +3013,7 @@ static const TypeInfo cirrus_vga_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCICirrusVGAState),
.class_init = cirrus_vga_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/display/cirrus_vga_isa.c b/hw/display/cirrus_vga_isa.c
index 84be516..4b55c48 100644
--- a/hw/display/cirrus_vga_isa.c
+++ b/hw/display/cirrus_vga_isa.c
@@ -69,15 +69,14 @@ static void isa_cirrus_vga_realizefn(DeviceState *dev, Error **errp)
/* FIXME not qdev yet */
}
-static Property isa_cirrus_vga_properties[] = {
+static const Property isa_cirrus_vga_properties[] = {
DEFINE_PROP_UINT32("vgamem_mb", struct ISACirrusVGAState,
cirrus_vga.vga.vram_size_mb, 4),
DEFINE_PROP_BOOL("blitter", struct ISACirrusVGAState,
cirrus_vga.enable_blitter, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void isa_cirrus_vga_class_init(ObjectClass *klass, void *data)
+static void isa_cirrus_vga_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/display/dm163.c b/hw/display/dm163.c
index f92aee3..f8340d8 100644
--- a/hw/display/dm163.c
+++ b/hw/display/dm163.c
@@ -271,7 +271,7 @@ static uint32_t *update_display_of_row(DM163State *s, uint32_t *dest,
unsigned row)
{
for (unsigned _ = 0; _ < LED_SQUARE_SIZE; _++) {
- for (int x = 0; x < RGB_MATRIX_NUM_COLS * LED_SQUARE_SIZE; x++) {
+ for (int x = RGB_MATRIX_NUM_COLS * LED_SQUARE_SIZE - 1; x >= 0; x--) {
/* UI layer guarantees that there's 32 bits per pixel (Mar 2024) */
*dest++ = s->buffer[s->buffer_idx_of_row[row]][x / LED_SQUARE_SIZE];
}
@@ -325,12 +325,12 @@ static void dm163_realize(DeviceState *dev, Error **errp)
RGB_MATRIX_NUM_ROWS * LED_SQUARE_SIZE);
}
-static void dm163_class_init(ObjectClass *klass, void *data)
+static void dm163_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
- dc->desc = "DM163";
+ dc->desc = "DM163 8x3-channel constant current LED driver";
dc->vmsd = &vmstate_dm163;
dc->realize = dm163_realize;
rc->phases.hold = dm163_reset_hold;
diff --git a/hw/display/dpcd.c b/hw/display/dpcd.c
index aab1b1a..a157dc6 100644
--- a/hw/display/dpcd.c
+++ b/hw/display/dpcd.c
@@ -141,11 +141,11 @@ static const VMStateDescription vmstate_dpcd = {
}
};
-static void dpcd_class_init(ObjectClass *oc, void *data)
+static void dpcd_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
- dc->reset = dpcd_reset;
+ device_class_set_legacy_reset(dc, dpcd_reset);
dc->vmsd = &vmstate_dpcd;
}
diff --git a/hw/display/edid-region.c b/hw/display/edid-region.c
index 675429d..f1596fb 100644
--- a/hw/display/edid-region.c
+++ b/hw/display/edid-region.c
@@ -1,5 +1,5 @@
#include "qemu/osdep.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/display/edid.h"
static uint64_t edid_region_read(void *ptr, hwaddr addr, unsigned size)
diff --git a/hw/display/exynos4210_fimd.c b/hw/display/exynos4210_fimd.c
index 5712558..c61e028 100644
--- a/hw/display/exynos4210_fimd.c
+++ b/hw/display/exynos4210_fimd.c
@@ -1925,10 +1925,9 @@ static const GraphicHwOps exynos4210_fimd_ops = {
.gfx_update = exynos4210_fimd_update,
};
-static Property exynos4210_fimd_properties[] = {
+static const Property exynos4210_fimd_properties[] = {
DEFINE_PROP_LINK("framebuffer-memory", Exynos4210fimdState, fbmem,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
static void exynos4210_fimd_init(Object *obj)
@@ -1959,12 +1958,12 @@ static void exynos4210_fimd_realize(DeviceState *dev, Error **errp)
s->console = graphic_console_init(dev, 0, &exynos4210_fimd_ops, s);
}
-static void exynos4210_fimd_class_init(ObjectClass *klass, void *data)
+static void exynos4210_fimd_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &exynos4210_fimd_vmstate;
- dc->reset = exynos4210_fimd_reset;
+ device_class_set_legacy_reset(dc, exynos4210_fimd_reset);
dc->realize = exynos4210_fimd_realize;
device_class_set_props(dc, exynos4210_fimd_properties);
}
diff --git a/hw/display/framebuffer.h b/hw/display/framebuffer.h
index 38fa0dc..29a828c 100644
--- a/hw/display/framebuffer.h
+++ b/hw/display/framebuffer.h
@@ -1,7 +1,7 @@
#ifndef QEMU_FRAMEBUFFER_H
#define QEMU_FRAMEBUFFER_H
-#include "exec/memory.h"
+#include "system/memory.h"
/* Framebuffer device helper routines. */
diff --git a/hw/display/g364fb.c b/hw/display/g364fb.c
index e08ec3f..a6ddc21 100644
--- a/hw/display/g364fb.c
+++ b/hw/display/g364fb.c
@@ -512,9 +512,8 @@ static void g364fb_sysbus_reset(DeviceState *d)
g364fb_reset(&s->g364);
}
-static Property g364fb_sysbus_properties[] = {
+static const Property g364fb_sysbus_properties[] = {
DEFINE_PROP_UINT32("vram_size", G364SysBusState, g364.vram_size, 8 * MiB),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_g364fb_sysbus = {
@@ -527,14 +526,14 @@ static const VMStateDescription vmstate_g364fb_sysbus = {
}
};
-static void g364fb_sysbus_class_init(ObjectClass *klass, void *data)
+static void g364fb_sysbus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = g364fb_sysbus_realize;
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
dc->desc = "G364 framebuffer";
- dc->reset = g364fb_sysbus_reset;
+ device_class_set_legacy_reset(dc, g364fb_sysbus_reset);
dc->vmsd = &vmstate_g364fb_sysbus;
device_class_set_props(dc, g364fb_sysbus_properties);
}
diff --git a/hw/display/i2c-ddc.c b/hw/display/i2c-ddc.c
index 3f9d1e1..2adfc1a 100644
--- a/hw/display/i2c-ddc.c
+++ b/hw/display/i2c-ddc.c
@@ -95,17 +95,16 @@ static const VMStateDescription vmstate_i2c_ddc = {
}
};
-static Property i2c_ddc_properties[] = {
+static const Property i2c_ddc_properties[] = {
DEFINE_EDID_PROPERTIES(I2CDDCState, edid_info),
- DEFINE_PROP_END_OF_LIST(),
};
-static void i2c_ddc_class_init(ObjectClass *oc, void *data)
+static void i2c_ddc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
I2CSlaveClass *isc = I2C_SLAVE_CLASS(oc);
- dc->reset = i2c_ddc_reset;
+ device_class_set_legacy_reset(dc, i2c_ddc_reset);
dc->vmsd = &vmstate_i2c_ddc;
device_class_set_props(dc, i2c_ddc_properties);
isc->event = i2c_ddc_event;
diff --git a/hw/display/jazz_led.c b/hw/display/jazz_led.c
index 534f15d..90e82b5 100644
--- a/hw/display/jazz_led.c
+++ b/hw/display/jazz_led.c
@@ -294,13 +294,13 @@ static void jazz_led_reset(DeviceState *d)
qemu_console_resize(s->con, 60, 80);
}
-static void jazz_led_class_init(ObjectClass *klass, void *data)
+static void jazz_led_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "Jazz LED display",
dc->vmsd = &vmstate_jazz_led;
- dc->reset = jazz_led_reset;
+ device_class_set_legacy_reset(dc, jazz_led_reset);
dc->realize = jazz_led_realize;
}
diff --git a/hw/display/macfb.c b/hw/display/macfb.c
index 1ace341..574d667 100644
--- a/hw/display/macfb.c
+++ b/hw/display/macfb.c
@@ -383,7 +383,6 @@ static void macfb_sense_write(MacfbState *s, uint32_t val)
s->regs[DAFB_MODE_SENSE >> 2] = val;
trace_macfb_sense_write(val);
- return;
}
static void macfb_update_mode(MacfbState *s)
@@ -758,13 +757,12 @@ static void macfb_nubus_reset(DeviceState *d)
macfb_reset(&s->macfb);
}
-static Property macfb_sysbus_properties[] = {
+static const Property macfb_sysbus_properties[] = {
DEFINE_PROP_UINT32("width", MacfbSysBusState, macfb.width, 640),
DEFINE_PROP_UINT32("height", MacfbSysBusState, macfb.height, 480),
DEFINE_PROP_UINT8("depth", MacfbSysBusState, macfb.depth, 8),
DEFINE_PROP_UINT8("display", MacfbSysBusState, macfb.type,
MACFB_DISPLAY_VGA),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_macfb_sysbus = {
@@ -777,13 +775,12 @@ static const VMStateDescription vmstate_macfb_sysbus = {
}
};
-static Property macfb_nubus_properties[] = {
+static const Property macfb_nubus_properties[] = {
DEFINE_PROP_UINT32("width", MacfbNubusState, macfb.width, 640),
DEFINE_PROP_UINT32("height", MacfbNubusState, macfb.height, 480),
DEFINE_PROP_UINT8("depth", MacfbNubusState, macfb.depth, 8),
DEFINE_PROP_UINT8("display", MacfbNubusState, macfb.type,
MACFB_DISPLAY_VGA),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_macfb_nubus = {
@@ -796,18 +793,18 @@ static const VMStateDescription vmstate_macfb_nubus = {
}
};
-static void macfb_sysbus_class_init(ObjectClass *klass, void *data)
+static void macfb_sysbus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = macfb_sysbus_realize;
dc->desc = "SysBus Macintosh framebuffer";
- dc->reset = macfb_sysbus_reset;
+ device_class_set_legacy_reset(dc, macfb_sysbus_reset);
dc->vmsd = &vmstate_macfb_sysbus;
device_class_set_props(dc, macfb_sysbus_properties);
}
-static void macfb_nubus_class_init(ObjectClass *klass, void *data)
+static void macfb_nubus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
MacfbNubusDeviceClass *ndc = NUBUS_MACFB_CLASS(klass);
@@ -817,7 +814,7 @@ static void macfb_nubus_class_init(ObjectClass *klass, void *data)
device_class_set_parent_unrealize(dc, macfb_nubus_unrealize,
&ndc->parent_unrealize);
dc->desc = "Nubus Macintosh framebuffer";
- dc->reset = macfb_nubus_reset;
+ device_class_set_legacy_reset(dc, macfb_nubus_reset);
dc->vmsd = &vmstate_macfb_nubus;
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
device_class_set_props(dc, macfb_nubus_properties);
diff --git a/hw/display/meson.build b/hw/display/meson.build
index 7db05ea..90e6c04 100644
--- a/hw/display/meson.build
+++ b/hw/display/meson.build
@@ -22,13 +22,9 @@ system_ss.add(when: 'CONFIG_VGA_MMIO', if_true: files('vga-mmio.c'))
system_ss.add(when: 'CONFIG_VMWARE_VGA', if_true: files('vmware_vga.c'))
system_ss.add(when: 'CONFIG_BOCHS_DISPLAY', if_true: files('bochs-display.c'))
-system_ss.add(when: 'CONFIG_BLIZZARD', if_true: files('blizzard.c'))
system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_fimd.c'))
system_ss.add(when: 'CONFIG_FRAMEBUFFER', if_true: files('framebuffer.c'))
-system_ss.add(when: 'CONFIG_ZAURUS', if_true: files('tc6393xb.c'))
-system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_dss.c'))
-system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_lcd.c'))
system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_fb.c'))
system_ss.add(when: 'CONFIG_SM501', if_true: files('sm501.c'))
system_ss.add(when: 'CONFIG_TCX', if_true: files('tcx.c'))
@@ -65,6 +61,8 @@ system_ss.add(when: 'CONFIG_ARTIST', if_true: files('artist.c'))
system_ss.add(when: 'CONFIG_ATI_VGA', if_true: [files('ati.c', 'ati_2d.c', 'ati_dbg.c'), pixman])
+system_ss.add(when: [pvg, 'CONFIG_MAC_PVG_PCI'], if_true: [files('apple-gfx.m', 'apple-gfx-pci.m')])
+system_ss.add(when: [pvg, 'CONFIG_MAC_PVG_MMIO'], if_true: [files('apple-gfx.m', 'apple-gfx-mmio.m')])
if config_all_devices.has_key('CONFIG_VIRTIO_GPU')
virtio_gpu_ss = ss.source_set()
diff --git a/hw/display/next-fb.c b/hw/display/next-fb.c
index 8446ff3..ec81b76 100644
--- a/hw/display/next-fb.c
+++ b/hw/display/next-fb.c
@@ -119,7 +119,7 @@ static void nextfb_realize(DeviceState *dev, Error **errp)
qemu_console_resize(s->con, s->cols, s->rows);
}
-static void nextfb_class_init(ObjectClass *oc, void *data)
+static void nextfb_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/display/omap_dss.c b/hw/display/omap_dss.c
deleted file mode 100644
index f33fc76..0000000
--- a/hw/display/omap_dss.c
+++ /dev/null
@@ -1,1093 +0,0 @@
-/*
- * OMAP2 Display Subsystem.
- *
- * Copyright (C) 2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/log.h"
-#include "hw/hw.h"
-#include "hw/irq.h"
-#include "ui/console.h"
-#include "hw/arm/omap.h"
-
-struct omap_dss_s {
- qemu_irq irq;
- qemu_irq drq;
- DisplayState *state;
- MemoryRegion iomem_diss1, iomem_disc1, iomem_rfbi1, iomem_venc1, iomem_im3;
-
- int autoidle;
- int control;
- int enable;
-
- struct omap_dss_panel_s {
- int enable;
- int nx;
- int ny;
-
- int x;
- int y;
- } dig, lcd;
-
- struct {
- uint32_t idlemode;
- uint32_t irqst;
- uint32_t irqen;
- uint32_t control;
- uint32_t config;
- uint32_t capable;
- uint32_t timing[4];
- int line;
- uint32_t bg[2];
- uint32_t trans[2];
-
- struct omap_dss_plane_s {
- int enable;
- int bpp;
- int posx;
- int posy;
- int nx;
- int ny;
-
- hwaddr addr[3];
-
- uint32_t attr;
- uint32_t tresh;
- int rowinc;
- int colinc;
- int wininc;
- } l[3];
-
- int invalidate;
- uint16_t palette[256];
- } dispc;
-
- struct {
- int idlemode;
- uint32_t control;
- int enable;
- int pixels;
- int busy;
- int skiplines;
- uint16_t rxbuf;
- uint32_t config[2];
- uint32_t time[4];
- uint32_t data[6];
- uint16_t vsync;
- uint16_t hsync;
- struct rfbi_chip_s *chip[2];
- } rfbi;
-};
-
-static void omap_dispc_interrupt_update(struct omap_dss_s *s)
-{
- qemu_set_irq(s->irq, s->dispc.irqst & s->dispc.irqen);
-}
-
-static void omap_rfbi_reset(struct omap_dss_s *s)
-{
- s->rfbi.idlemode = 0;
- s->rfbi.control = 2;
- s->rfbi.enable = 0;
- s->rfbi.pixels = 0;
- s->rfbi.skiplines = 0;
- s->rfbi.busy = 0;
- s->rfbi.config[0] = 0x00310000;
- s->rfbi.config[1] = 0x00310000;
- s->rfbi.time[0] = 0;
- s->rfbi.time[1] = 0;
- s->rfbi.time[2] = 0;
- s->rfbi.time[3] = 0;
- s->rfbi.data[0] = 0;
- s->rfbi.data[1] = 0;
- s->rfbi.data[2] = 0;
- s->rfbi.data[3] = 0;
- s->rfbi.data[4] = 0;
- s->rfbi.data[5] = 0;
- s->rfbi.vsync = 0;
- s->rfbi.hsync = 0;
-}
-
-void omap_dss_reset(struct omap_dss_s *s)
-{
- s->autoidle = 0;
- s->control = 0;
- s->enable = 0;
-
- s->dig.enable = 0;
- s->dig.nx = 1;
- s->dig.ny = 1;
-
- s->lcd.enable = 0;
- s->lcd.nx = 1;
- s->lcd.ny = 1;
-
- s->dispc.idlemode = 0;
- s->dispc.irqst = 0;
- s->dispc.irqen = 0;
- s->dispc.control = 0;
- s->dispc.config = 0;
- s->dispc.capable = 0x161;
- s->dispc.timing[0] = 0;
- s->dispc.timing[1] = 0;
- s->dispc.timing[2] = 0;
- s->dispc.timing[3] = 0;
- s->dispc.line = 0;
- s->dispc.bg[0] = 0;
- s->dispc.bg[1] = 0;
- s->dispc.trans[0] = 0;
- s->dispc.trans[1] = 0;
-
- s->dispc.l[0].enable = 0;
- s->dispc.l[0].bpp = 0;
- s->dispc.l[0].addr[0] = 0;
- s->dispc.l[0].addr[1] = 0;
- s->dispc.l[0].addr[2] = 0;
- s->dispc.l[0].posx = 0;
- s->dispc.l[0].posy = 0;
- s->dispc.l[0].nx = 1;
- s->dispc.l[0].ny = 1;
- s->dispc.l[0].attr = 0;
- s->dispc.l[0].tresh = 0;
- s->dispc.l[0].rowinc = 1;
- s->dispc.l[0].colinc = 1;
- s->dispc.l[0].wininc = 0;
-
- omap_rfbi_reset(s);
- omap_dispc_interrupt_update(s);
-}
-
-static uint64_t omap_diss_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- struct omap_dss_s *s = opaque;
-
- if (size != 4) {
- return omap_badwidth_read32(opaque, addr);
- }
-
- switch (addr) {
- case 0x00: /* DSS_REVISIONNUMBER */
- return 0x20;
-
- case 0x10: /* DSS_SYSCONFIG */
- return s->autoidle;
-
- case 0x14: /* DSS_SYSSTATUS */
- return 1; /* RESETDONE */
-
- case 0x40: /* DSS_CONTROL */
- return s->control;
-
- case 0x50: /* DSS_PSA_LCD_REG_1 */
- case 0x54: /* DSS_PSA_LCD_REG_2 */
- case 0x58: /* DSS_PSA_VIDEO_REG */
- /* TODO: fake some values when appropriate s->control bits are set */
- return 0;
-
- case 0x5c: /* DSS_STATUS */
- return 1 + (s->control & 1);
-
- default:
- break;
- }
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_diss_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_dss_s *s = opaque;
-
- if (size != 4) {
- omap_badwidth_write32(opaque, addr, value);
- return;
- }
-
- switch (addr) {
- case 0x00: /* DSS_REVISIONNUMBER */
- case 0x14: /* DSS_SYSSTATUS */
- case 0x50: /* DSS_PSA_LCD_REG_1 */
- case 0x54: /* DSS_PSA_LCD_REG_2 */
- case 0x58: /* DSS_PSA_VIDEO_REG */
- case 0x5c: /* DSS_STATUS */
- OMAP_RO_REG(addr);
- break;
-
- case 0x10: /* DSS_SYSCONFIG */
- if (value & 2) /* SOFTRESET */
- omap_dss_reset(s);
- s->autoidle = value & 1;
- break;
-
- case 0x40: /* DSS_CONTROL */
- s->control = value & 0x3dd;
- break;
-
- default:
- OMAP_BAD_REG(addr);
- }
-}
-
-static const MemoryRegionOps omap_diss_ops = {
- .read = omap_diss_read,
- .write = omap_diss_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static uint64_t omap_disc_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- struct omap_dss_s *s = opaque;
-
- if (size != 4) {
- return omap_badwidth_read32(opaque, addr);
- }
-
- switch (addr) {
- case 0x000: /* DISPC_REVISION */
- return 0x20;
-
- case 0x010: /* DISPC_SYSCONFIG */
- return s->dispc.idlemode;
-
- case 0x014: /* DISPC_SYSSTATUS */
- return 1; /* RESETDONE */
-
- case 0x018: /* DISPC_IRQSTATUS */
- return s->dispc.irqst;
-
- case 0x01c: /* DISPC_IRQENABLE */
- return s->dispc.irqen;
-
- case 0x040: /* DISPC_CONTROL */
- return s->dispc.control;
-
- case 0x044: /* DISPC_CONFIG */
- return s->dispc.config;
-
- case 0x048: /* DISPC_CAPABLE */
- return s->dispc.capable;
-
- case 0x04c: /* DISPC_DEFAULT_COLOR0 */
- return s->dispc.bg[0];
- case 0x050: /* DISPC_DEFAULT_COLOR1 */
- return s->dispc.bg[1];
- case 0x054: /* DISPC_TRANS_COLOR0 */
- return s->dispc.trans[0];
- case 0x058: /* DISPC_TRANS_COLOR1 */
- return s->dispc.trans[1];
-
- case 0x05c: /* DISPC_LINE_STATUS */
- return 0x7ff;
- case 0x060: /* DISPC_LINE_NUMBER */
- return s->dispc.line;
-
- case 0x064: /* DISPC_TIMING_H */
- return s->dispc.timing[0];
- case 0x068: /* DISPC_TIMING_V */
- return s->dispc.timing[1];
- case 0x06c: /* DISPC_POL_FREQ */
- return s->dispc.timing[2];
- case 0x070: /* DISPC_DIVISOR */
- return s->dispc.timing[3];
-
- case 0x078: /* DISPC_SIZE_DIG */
- return ((s->dig.ny - 1) << 16) | (s->dig.nx - 1);
- case 0x07c: /* DISPC_SIZE_LCD */
- return ((s->lcd.ny - 1) << 16) | (s->lcd.nx - 1);
-
- case 0x080: /* DISPC_GFX_BA0 */
- return s->dispc.l[0].addr[0];
- case 0x084: /* DISPC_GFX_BA1 */
- return s->dispc.l[0].addr[1];
- case 0x088: /* DISPC_GFX_POSITION */
- return (s->dispc.l[0].posy << 16) | s->dispc.l[0].posx;
- case 0x08c: /* DISPC_GFX_SIZE */
- return ((s->dispc.l[0].ny - 1) << 16) | (s->dispc.l[0].nx - 1);
- case 0x0a0: /* DISPC_GFX_ATTRIBUTES */
- return s->dispc.l[0].attr;
- case 0x0a4: /* DISPC_GFX_FIFO_TRESHOLD */
- return s->dispc.l[0].tresh;
- case 0x0a8: /* DISPC_GFX_FIFO_SIZE_STATUS */
- return 256;
- case 0x0ac: /* DISPC_GFX_ROW_INC */
- return s->dispc.l[0].rowinc;
- case 0x0b0: /* DISPC_GFX_PIXEL_INC */
- return s->dispc.l[0].colinc;
- case 0x0b4: /* DISPC_GFX_WINDOW_SKIP */
- return s->dispc.l[0].wininc;
- case 0x0b8: /* DISPC_GFX_TABLE_BA */
- return s->dispc.l[0].addr[2];
-
- case 0x0bc: /* DISPC_VID1_BA0 */
- case 0x0c0: /* DISPC_VID1_BA1 */
- case 0x0c4: /* DISPC_VID1_POSITION */
- case 0x0c8: /* DISPC_VID1_SIZE */
- case 0x0cc: /* DISPC_VID1_ATTRIBUTES */
- case 0x0d0: /* DISPC_VID1_FIFO_TRESHOLD */
- case 0x0d4: /* DISPC_VID1_FIFO_SIZE_STATUS */
- case 0x0d8: /* DISPC_VID1_ROW_INC */
- case 0x0dc: /* DISPC_VID1_PIXEL_INC */
- case 0x0e0: /* DISPC_VID1_FIR */
- case 0x0e4: /* DISPC_VID1_PICTURE_SIZE */
- case 0x0e8: /* DISPC_VID1_ACCU0 */
- case 0x0ec: /* DISPC_VID1_ACCU1 */
- case 0x0f0 ... 0x140: /* DISPC_VID1_FIR_COEF, DISPC_VID1_CONV_COEF */
- case 0x14c: /* DISPC_VID2_BA0 */
- case 0x150: /* DISPC_VID2_BA1 */
- case 0x154: /* DISPC_VID2_POSITION */
- case 0x158: /* DISPC_VID2_SIZE */
- case 0x15c: /* DISPC_VID2_ATTRIBUTES */
- case 0x160: /* DISPC_VID2_FIFO_TRESHOLD */
- case 0x164: /* DISPC_VID2_FIFO_SIZE_STATUS */
- case 0x168: /* DISPC_VID2_ROW_INC */
- case 0x16c: /* DISPC_VID2_PIXEL_INC */
- case 0x170: /* DISPC_VID2_FIR */
- case 0x174: /* DISPC_VID2_PICTURE_SIZE */
- case 0x178: /* DISPC_VID2_ACCU0 */
- case 0x17c: /* DISPC_VID2_ACCU1 */
- case 0x180 ... 0x1d0: /* DISPC_VID2_FIR_COEF, DISPC_VID2_CONV_COEF */
- case 0x1d4: /* DISPC_DATA_CYCLE1 */
- case 0x1d8: /* DISPC_DATA_CYCLE2 */
- case 0x1dc: /* DISPC_DATA_CYCLE3 */
- return 0;
-
- default:
- break;
- }
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_disc_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_dss_s *s = opaque;
-
- if (size != 4) {
- omap_badwidth_write32(opaque, addr, value);
- return;
- }
-
- switch (addr) {
- case 0x010: /* DISPC_SYSCONFIG */
- if (value & 2) /* SOFTRESET */
- omap_dss_reset(s);
- s->dispc.idlemode = value & 0x301b;
- break;
-
- case 0x018: /* DISPC_IRQSTATUS */
- s->dispc.irqst &= ~value;
- omap_dispc_interrupt_update(s);
- break;
-
- case 0x01c: /* DISPC_IRQENABLE */
- s->dispc.irqen = value & 0xffff;
- omap_dispc_interrupt_update(s);
- break;
-
- case 0x040: /* DISPC_CONTROL */
- s->dispc.control = value & 0x07ff9fff;
- s->dig.enable = (value >> 1) & 1;
- s->lcd.enable = (value >> 0) & 1;
- if (value & (1 << 12)) /* OVERLAY_OPTIMIZATION */
- if (!((s->dispc.l[1].attr | s->dispc.l[2].attr) & 1)) {
- fprintf(stderr, "%s: Overlay Optimization when no overlay "
- "region effectively exists leads to "
- "unpredictable behaviour!\n", __func__);
- }
- if (value & (1 << 6)) { /* GODIGITAL */
- /* XXX: Shadowed fields are:
- * s->dispc.config
- * s->dispc.capable
- * s->dispc.bg[0]
- * s->dispc.bg[1]
- * s->dispc.trans[0]
- * s->dispc.trans[1]
- * s->dispc.line
- * s->dispc.timing[0]
- * s->dispc.timing[1]
- * s->dispc.timing[2]
- * s->dispc.timing[3]
- * s->lcd.nx
- * s->lcd.ny
- * s->dig.nx
- * s->dig.ny
- * s->dispc.l[0].addr[0]
- * s->dispc.l[0].addr[1]
- * s->dispc.l[0].addr[2]
- * s->dispc.l[0].posx
- * s->dispc.l[0].posy
- * s->dispc.l[0].nx
- * s->dispc.l[0].ny
- * s->dispc.l[0].tresh
- * s->dispc.l[0].rowinc
- * s->dispc.l[0].colinc
- * s->dispc.l[0].wininc
- * All they need to be loaded here from their shadow registers.
- */
- }
- if (value & (1 << 5)) { /* GOLCD */
- /* XXX: Likewise for LCD here. */
- }
- s->dispc.invalidate = 1;
- break;
-
- case 0x044: /* DISPC_CONFIG */
- s->dispc.config = value & 0x3fff;
- /* XXX:
- * bits 2:1 (LOADMODE) reset to 0 after set to 1 and palette loaded
- * bits 2:1 (LOADMODE) reset to 2 after set to 3 and palette loaded
- */
- s->dispc.invalidate = 1;
- break;
-
- case 0x048: /* DISPC_CAPABLE */
- s->dispc.capable = value & 0x3ff;
- break;
-
- case 0x04c: /* DISPC_DEFAULT_COLOR0 */
- s->dispc.bg[0] = value & 0xffffff;
- s->dispc.invalidate = 1;
- break;
- case 0x050: /* DISPC_DEFAULT_COLOR1 */
- s->dispc.bg[1] = value & 0xffffff;
- s->dispc.invalidate = 1;
- break;
- case 0x054: /* DISPC_TRANS_COLOR0 */
- s->dispc.trans[0] = value & 0xffffff;
- s->dispc.invalidate = 1;
- break;
- case 0x058: /* DISPC_TRANS_COLOR1 */
- s->dispc.trans[1] = value & 0xffffff;
- s->dispc.invalidate = 1;
- break;
-
- case 0x060: /* DISPC_LINE_NUMBER */
- s->dispc.line = value & 0x7ff;
- break;
-
- case 0x064: /* DISPC_TIMING_H */
- s->dispc.timing[0] = value & 0x0ff0ff3f;
- break;
- case 0x068: /* DISPC_TIMING_V */
- s->dispc.timing[1] = value & 0x0ff0ff3f;
- break;
- case 0x06c: /* DISPC_POL_FREQ */
- s->dispc.timing[2] = value & 0x0003ffff;
- break;
- case 0x070: /* DISPC_DIVISOR */
- s->dispc.timing[3] = value & 0x00ff00ff;
- break;
-
- case 0x078: /* DISPC_SIZE_DIG */
- s->dig.nx = ((value >> 0) & 0x7ff) + 1; /* PPL */
- s->dig.ny = ((value >> 16) & 0x7ff) + 1; /* LPP */
- s->dispc.invalidate = 1;
- break;
- case 0x07c: /* DISPC_SIZE_LCD */
- s->lcd.nx = ((value >> 0) & 0x7ff) + 1; /* PPL */
- s->lcd.ny = ((value >> 16) & 0x7ff) + 1; /* LPP */
- s->dispc.invalidate = 1;
- break;
- case 0x080: /* DISPC_GFX_BA0 */
- s->dispc.l[0].addr[0] = (hwaddr) value;
- s->dispc.invalidate = 1;
- break;
- case 0x084: /* DISPC_GFX_BA1 */
- s->dispc.l[0].addr[1] = (hwaddr) value;
- s->dispc.invalidate = 1;
- break;
- case 0x088: /* DISPC_GFX_POSITION */
- s->dispc.l[0].posx = ((value >> 0) & 0x7ff); /* GFXPOSX */
- s->dispc.l[0].posy = ((value >> 16) & 0x7ff); /* GFXPOSY */
- s->dispc.invalidate = 1;
- break;
- case 0x08c: /* DISPC_GFX_SIZE */
- s->dispc.l[0].nx = ((value >> 0) & 0x7ff) + 1; /* GFXSIZEX */
- s->dispc.l[0].ny = ((value >> 16) & 0x7ff) + 1; /* GFXSIZEY */
- s->dispc.invalidate = 1;
- break;
- case 0x0a0: /* DISPC_GFX_ATTRIBUTES */
- s->dispc.l[0].attr = value & 0x7ff;
- if (value & (3 << 9))
- fprintf(stderr, "%s: Big-endian pixel format not supported\n",
- __func__);
- s->dispc.l[0].enable = value & 1;
- s->dispc.l[0].bpp = (value >> 1) & 0xf;
- s->dispc.invalidate = 1;
- break;
- case 0x0a4: /* DISPC_GFX_FIFO_TRESHOLD */
- s->dispc.l[0].tresh = value & 0x01ff01ff;
- break;
- case 0x0ac: /* DISPC_GFX_ROW_INC */
- s->dispc.l[0].rowinc = value;
- s->dispc.invalidate = 1;
- break;
- case 0x0b0: /* DISPC_GFX_PIXEL_INC */
- s->dispc.l[0].colinc = value;
- s->dispc.invalidate = 1;
- break;
- case 0x0b4: /* DISPC_GFX_WINDOW_SKIP */
- s->dispc.l[0].wininc = value;
- break;
- case 0x0b8: /* DISPC_GFX_TABLE_BA */
- s->dispc.l[0].addr[2] = (hwaddr) value;
- s->dispc.invalidate = 1;
- break;
-
- case 0x0bc: /* DISPC_VID1_BA0 */
- case 0x0c0: /* DISPC_VID1_BA1 */
- case 0x0c4: /* DISPC_VID1_POSITION */
- case 0x0c8: /* DISPC_VID1_SIZE */
- case 0x0cc: /* DISPC_VID1_ATTRIBUTES */
- case 0x0d0: /* DISPC_VID1_FIFO_TRESHOLD */
- case 0x0d8: /* DISPC_VID1_ROW_INC */
- case 0x0dc: /* DISPC_VID1_PIXEL_INC */
- case 0x0e0: /* DISPC_VID1_FIR */
- case 0x0e4: /* DISPC_VID1_PICTURE_SIZE */
- case 0x0e8: /* DISPC_VID1_ACCU0 */
- case 0x0ec: /* DISPC_VID1_ACCU1 */
- case 0x0f0 ... 0x140: /* DISPC_VID1_FIR_COEF, DISPC_VID1_CONV_COEF */
- case 0x14c: /* DISPC_VID2_BA0 */
- case 0x150: /* DISPC_VID2_BA1 */
- case 0x154: /* DISPC_VID2_POSITION */
- case 0x158: /* DISPC_VID2_SIZE */
- case 0x15c: /* DISPC_VID2_ATTRIBUTES */
- case 0x160: /* DISPC_VID2_FIFO_TRESHOLD */
- case 0x168: /* DISPC_VID2_ROW_INC */
- case 0x16c: /* DISPC_VID2_PIXEL_INC */
- case 0x170: /* DISPC_VID2_FIR */
- case 0x174: /* DISPC_VID2_PICTURE_SIZE */
- case 0x178: /* DISPC_VID2_ACCU0 */
- case 0x17c: /* DISPC_VID2_ACCU1 */
- case 0x180 ... 0x1d0: /* DISPC_VID2_FIR_COEF, DISPC_VID2_CONV_COEF */
- case 0x1d4: /* DISPC_DATA_CYCLE1 */
- case 0x1d8: /* DISPC_DATA_CYCLE2 */
- case 0x1dc: /* DISPC_DATA_CYCLE3 */
- break;
-
- default:
- OMAP_BAD_REG(addr);
- }
-}
-
-static const MemoryRegionOps omap_disc_ops = {
- .read = omap_disc_read,
- .write = omap_disc_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void omap_rfbi_transfer_stop(struct omap_dss_s *s)
-{
- if (!s->rfbi.busy)
- return;
-
- /* TODO: in non-Bypass mode we probably need to just deassert the DRQ. */
-
- s->rfbi.busy = 0;
-}
-
-static void omap_rfbi_transfer_start(struct omap_dss_s *s)
-{
- void *data;
- hwaddr len;
- hwaddr data_addr;
- int pitch;
- static void *bounce_buffer;
- static hwaddr bounce_len;
-
- if (!s->rfbi.enable || s->rfbi.busy)
- return;
-
- if (s->rfbi.control & (1 << 1)) { /* BYPASS */
- /* TODO: in non-Bypass mode we probably need to just assert the
- * DRQ and wait for DMA to write the pixels. */
- qemu_log_mask(LOG_UNIMP, "%s: Bypass mode unimplemented\n", __func__);
- return;
- }
-
- if (!(s->dispc.control & (1 << 11))) /* RFBIMODE */
- return;
- /* TODO: check that LCD output is enabled in DISPC. */
-
- s->rfbi.busy = 1;
-
- len = s->rfbi.pixels * 2;
-
- data_addr = s->dispc.l[0].addr[0];
- data = cpu_physical_memory_map(data_addr, &len, false);
- if (data && len != s->rfbi.pixels * 2) {
- cpu_physical_memory_unmap(data, len, 0, 0);
- data = NULL;
- len = s->rfbi.pixels * 2;
- }
- if (!data) {
- if (len > bounce_len) {
- bounce_buffer = g_realloc(bounce_buffer, len);
- }
- data = bounce_buffer;
- cpu_physical_memory_read(data_addr, data, len);
- }
-
- /* TODO bpp */
- s->rfbi.pixels = 0;
-
- /* TODO: negative values */
- pitch = s->dispc.l[0].nx + (s->dispc.l[0].rowinc - 1) / 2;
-
- if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0])
- s->rfbi.chip[0]->block(s->rfbi.chip[0]->opaque, 1, data, len, pitch);
- if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1])
- s->rfbi.chip[1]->block(s->rfbi.chip[1]->opaque, 1, data, len, pitch);
-
- if (data != bounce_buffer) {
- cpu_physical_memory_unmap(data, len, 0, len);
- }
-
- omap_rfbi_transfer_stop(s);
-
- /* TODO */
- s->dispc.irqst |= 1; /* FRAMEDONE */
- omap_dispc_interrupt_update(s);
-}
-
-static uint64_t omap_rfbi_read(void *opaque, hwaddr addr, unsigned size)
-{
- struct omap_dss_s *s = opaque;
-
- if (size != 4) {
- return omap_badwidth_read32(opaque, addr);
- }
-
- switch (addr) {
- case 0x00: /* RFBI_REVISION */
- return 0x10;
-
- case 0x10: /* RFBI_SYSCONFIG */
- return s->rfbi.idlemode;
-
- case 0x14: /* RFBI_SYSSTATUS */
- return 1 | (s->rfbi.busy << 8); /* RESETDONE */
-
- case 0x40: /* RFBI_CONTROL */
- return s->rfbi.control;
-
- case 0x44: /* RFBI_PIXELCNT */
- return s->rfbi.pixels;
-
- case 0x48: /* RFBI_LINE_NUMBER */
- return s->rfbi.skiplines;
-
- case 0x58: /* RFBI_READ */
- case 0x5c: /* RFBI_STATUS */
- return s->rfbi.rxbuf;
-
- case 0x60: /* RFBI_CONFIG0 */
- return s->rfbi.config[0];
- case 0x64: /* RFBI_ONOFF_TIME0 */
- return s->rfbi.time[0];
- case 0x68: /* RFBI_CYCLE_TIME0 */
- return s->rfbi.time[1];
- case 0x6c: /* RFBI_DATA_CYCLE1_0 */
- return s->rfbi.data[0];
- case 0x70: /* RFBI_DATA_CYCLE2_0 */
- return s->rfbi.data[1];
- case 0x74: /* RFBI_DATA_CYCLE3_0 */
- return s->rfbi.data[2];
-
- case 0x78: /* RFBI_CONFIG1 */
- return s->rfbi.config[1];
- case 0x7c: /* RFBI_ONOFF_TIME1 */
- return s->rfbi.time[2];
- case 0x80: /* RFBI_CYCLE_TIME1 */
- return s->rfbi.time[3];
- case 0x84: /* RFBI_DATA_CYCLE1_1 */
- return s->rfbi.data[3];
- case 0x88: /* RFBI_DATA_CYCLE2_1 */
- return s->rfbi.data[4];
- case 0x8c: /* RFBI_DATA_CYCLE3_1 */
- return s->rfbi.data[5];
-
- case 0x90: /* RFBI_VSYNC_WIDTH */
- return s->rfbi.vsync;
- case 0x94: /* RFBI_HSYNC_WIDTH */
- return s->rfbi.hsync;
- }
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_rfbi_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_dss_s *s = opaque;
-
- if (size != 4) {
- omap_badwidth_write32(opaque, addr, value);
- return;
- }
-
- switch (addr) {
- case 0x10: /* RFBI_SYSCONFIG */
- if (value & 2) /* SOFTRESET */
- omap_rfbi_reset(s);
- s->rfbi.idlemode = value & 0x19;
- break;
-
- case 0x40: /* RFBI_CONTROL */
- s->rfbi.control = value & 0xf;
- s->rfbi.enable = value & 1;
- if (value & (1 << 4) && /* ITE */
- !(s->rfbi.config[0] & s->rfbi.config[1] & 0xc))
- omap_rfbi_transfer_start(s);
- break;
-
- case 0x44: /* RFBI_PIXELCNT */
- s->rfbi.pixels = value;
- break;
-
- case 0x48: /* RFBI_LINE_NUMBER */
- s->rfbi.skiplines = value & 0x7ff;
- break;
-
- case 0x4c: /* RFBI_CMD */
- if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0])
- s->rfbi.chip[0]->write(s->rfbi.chip[0]->opaque, 0, value & 0xffff);
- if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1])
- s->rfbi.chip[1]->write(s->rfbi.chip[1]->opaque, 0, value & 0xffff);
- break;
- case 0x50: /* RFBI_PARAM */
- if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0])
- s->rfbi.chip[0]->write(s->rfbi.chip[0]->opaque, 1, value & 0xffff);
- if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1])
- s->rfbi.chip[1]->write(s->rfbi.chip[1]->opaque, 1, value & 0xffff);
- break;
- case 0x54: /* RFBI_DATA */
- /* TODO: take into account the format set up in s->rfbi.config[?] and
- * s->rfbi.data[?], but special-case the most usual scenario so that
- * speed doesn't suffer. */
- if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) {
- s->rfbi.chip[0]->write(s->rfbi.chip[0]->opaque, 1, value & 0xffff);
- s->rfbi.chip[0]->write(s->rfbi.chip[0]->opaque, 1, value >> 16);
- }
- if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) {
- s->rfbi.chip[1]->write(s->rfbi.chip[1]->opaque, 1, value & 0xffff);
- s->rfbi.chip[1]->write(s->rfbi.chip[1]->opaque, 1, value >> 16);
- }
- if (!-- s->rfbi.pixels)
- omap_rfbi_transfer_stop(s);
- break;
- case 0x58: /* RFBI_READ */
- if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0])
- s->rfbi.rxbuf = s->rfbi.chip[0]->read(s->rfbi.chip[0]->opaque, 1);
- else if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1])
- s->rfbi.rxbuf = s->rfbi.chip[1]->read(s->rfbi.chip[1]->opaque, 1);
- if (!-- s->rfbi.pixels)
- omap_rfbi_transfer_stop(s);
- break;
-
- case 0x5c: /* RFBI_STATUS */
- if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0])
- s->rfbi.rxbuf = s->rfbi.chip[0]->read(s->rfbi.chip[0]->opaque, 0);
- else if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1])
- s->rfbi.rxbuf = s->rfbi.chip[1]->read(s->rfbi.chip[1]->opaque, 0);
- if (!-- s->rfbi.pixels)
- omap_rfbi_transfer_stop(s);
- break;
-
- case 0x60: /* RFBI_CONFIG0 */
- s->rfbi.config[0] = value & 0x003f1fff;
- break;
-
- case 0x64: /* RFBI_ONOFF_TIME0 */
- s->rfbi.time[0] = value & 0x3fffffff;
- break;
- case 0x68: /* RFBI_CYCLE_TIME0 */
- s->rfbi.time[1] = value & 0x0fffffff;
- break;
- case 0x6c: /* RFBI_DATA_CYCLE1_0 */
- s->rfbi.data[0] = value & 0x0f1f0f1f;
- break;
- case 0x70: /* RFBI_DATA_CYCLE2_0 */
- s->rfbi.data[1] = value & 0x0f1f0f1f;
- break;
- case 0x74: /* RFBI_DATA_CYCLE3_0 */
- s->rfbi.data[2] = value & 0x0f1f0f1f;
- break;
- case 0x78: /* RFBI_CONFIG1 */
- s->rfbi.config[1] = value & 0x003f1fff;
- break;
-
- case 0x7c: /* RFBI_ONOFF_TIME1 */
- s->rfbi.time[2] = value & 0x3fffffff;
- break;
- case 0x80: /* RFBI_CYCLE_TIME1 */
- s->rfbi.time[3] = value & 0x0fffffff;
- break;
- case 0x84: /* RFBI_DATA_CYCLE1_1 */
- s->rfbi.data[3] = value & 0x0f1f0f1f;
- break;
- case 0x88: /* RFBI_DATA_CYCLE2_1 */
- s->rfbi.data[4] = value & 0x0f1f0f1f;
- break;
- case 0x8c: /* RFBI_DATA_CYCLE3_1 */
- s->rfbi.data[5] = value & 0x0f1f0f1f;
- break;
-
- case 0x90: /* RFBI_VSYNC_WIDTH */
- s->rfbi.vsync = value & 0xffff;
- break;
- case 0x94: /* RFBI_HSYNC_WIDTH */
- s->rfbi.hsync = value & 0xffff;
- break;
-
- default:
- OMAP_BAD_REG(addr);
- }
-}
-
-static const MemoryRegionOps omap_rfbi_ops = {
- .read = omap_rfbi_read,
- .write = omap_rfbi_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static uint64_t omap_venc_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- if (size != 4) {
- return omap_badwidth_read32(opaque, addr);
- }
-
- switch (addr) {
- case 0x00: /* REV_ID */
- case 0x04: /* STATUS */
- case 0x08: /* F_CONTROL */
- case 0x10: /* VIDOUT_CTRL */
- case 0x14: /* SYNC_CTRL */
- case 0x1c: /* LLEN */
- case 0x20: /* FLENS */
- case 0x24: /* HFLTR_CTRL */
- case 0x28: /* CC_CARR_WSS_CARR */
- case 0x2c: /* C_PHASE */
- case 0x30: /* GAIN_U */
- case 0x34: /* GAIN_V */
- case 0x38: /* GAIN_Y */
- case 0x3c: /* BLACK_LEVEL */
- case 0x40: /* BLANK_LEVEL */
- case 0x44: /* X_COLOR */
- case 0x48: /* M_CONTROL */
- case 0x4c: /* BSTAMP_WSS_DATA */
- case 0x50: /* S_CARR */
- case 0x54: /* LINE21 */
- case 0x58: /* LN_SEL */
- case 0x5c: /* L21__WC_CTL */
- case 0x60: /* HTRIGGER_VTRIGGER */
- case 0x64: /* SAVID__EAVID */
- case 0x68: /* FLEN__FAL */
- case 0x6c: /* LAL__PHASE_RESET */
- case 0x70: /* HS_INT_START_STOP_X */
- case 0x74: /* HS_EXT_START_STOP_X */
- case 0x78: /* VS_INT_START_X */
- case 0x7c: /* VS_INT_STOP_X__VS_INT_START_Y */
- case 0x80: /* VS_INT_STOP_Y__VS_INT_START_X */
- case 0x84: /* VS_EXT_STOP_X__VS_EXT_START_Y */
- case 0x88: /* VS_EXT_STOP_Y */
- case 0x90: /* AVID_START_STOP_X */
- case 0x94: /* AVID_START_STOP_Y */
- case 0xa0: /* FID_INT_START_X__FID_INT_START_Y */
- case 0xa4: /* FID_INT_OFFSET_Y__FID_EXT_START_X */
- case 0xa8: /* FID_EXT_START_Y__FID_EXT_OFFSET_Y */
- case 0xb0: /* TVDETGP_INT_START_STOP_X */
- case 0xb4: /* TVDETGP_INT_START_STOP_Y */
- case 0xb8: /* GEN_CTRL */
- case 0xc4: /* DAC_TST__DAC_A */
- case 0xc8: /* DAC_B__DAC_C */
- return 0;
-
- default:
- break;
- }
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_venc_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- if (size != 4) {
- omap_badwidth_write32(opaque, addr, size);
- return;
- }
-
- switch (addr) {
- case 0x08: /* F_CONTROL */
- case 0x10: /* VIDOUT_CTRL */
- case 0x14: /* SYNC_CTRL */
- case 0x1c: /* LLEN */
- case 0x20: /* FLENS */
- case 0x24: /* HFLTR_CTRL */
- case 0x28: /* CC_CARR_WSS_CARR */
- case 0x2c: /* C_PHASE */
- case 0x30: /* GAIN_U */
- case 0x34: /* GAIN_V */
- case 0x38: /* GAIN_Y */
- case 0x3c: /* BLACK_LEVEL */
- case 0x40: /* BLANK_LEVEL */
- case 0x44: /* X_COLOR */
- case 0x48: /* M_CONTROL */
- case 0x4c: /* BSTAMP_WSS_DATA */
- case 0x50: /* S_CARR */
- case 0x54: /* LINE21 */
- case 0x58: /* LN_SEL */
- case 0x5c: /* L21__WC_CTL */
- case 0x60: /* HTRIGGER_VTRIGGER */
- case 0x64: /* SAVID__EAVID */
- case 0x68: /* FLEN__FAL */
- case 0x6c: /* LAL__PHASE_RESET */
- case 0x70: /* HS_INT_START_STOP_X */
- case 0x74: /* HS_EXT_START_STOP_X */
- case 0x78: /* VS_INT_START_X */
- case 0x7c: /* VS_INT_STOP_X__VS_INT_START_Y */
- case 0x80: /* VS_INT_STOP_Y__VS_INT_START_X */
- case 0x84: /* VS_EXT_STOP_X__VS_EXT_START_Y */
- case 0x88: /* VS_EXT_STOP_Y */
- case 0x90: /* AVID_START_STOP_X */
- case 0x94: /* AVID_START_STOP_Y */
- case 0xa0: /* FID_INT_START_X__FID_INT_START_Y */
- case 0xa4: /* FID_INT_OFFSET_Y__FID_EXT_START_X */
- case 0xa8: /* FID_EXT_START_Y__FID_EXT_OFFSET_Y */
- case 0xb0: /* TVDETGP_INT_START_STOP_X */
- case 0xb4: /* TVDETGP_INT_START_STOP_Y */
- case 0xb8: /* GEN_CTRL */
- case 0xc4: /* DAC_TST__DAC_A */
- case 0xc8: /* DAC_B__DAC_C */
- break;
-
- default:
- OMAP_BAD_REG(addr);
- }
-}
-
-static const MemoryRegionOps omap_venc_ops = {
- .read = omap_venc_read,
- .write = omap_venc_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static uint64_t omap_im3_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- if (size != 4) {
- return omap_badwidth_read32(opaque, addr);
- }
-
- switch (addr) {
- case 0x0a8: /* SBIMERRLOGA */
- case 0x0b0: /* SBIMERRLOG */
- case 0x190: /* SBIMSTATE */
- case 0x198: /* SBTMSTATE_L */
- case 0x19c: /* SBTMSTATE_H */
- case 0x1a8: /* SBIMCONFIG_L */
- case 0x1ac: /* SBIMCONFIG_H */
- case 0x1f8: /* SBID_L */
- case 0x1fc: /* SBID_H */
- return 0;
-
- default:
- break;
- }
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_im3_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- if (size != 4) {
- omap_badwidth_write32(opaque, addr, value);
- return;
- }
-
- switch (addr) {
- case 0x0b0: /* SBIMERRLOG */
- case 0x190: /* SBIMSTATE */
- case 0x198: /* SBTMSTATE_L */
- case 0x19c: /* SBTMSTATE_H */
- case 0x1a8: /* SBIMCONFIG_L */
- case 0x1ac: /* SBIMCONFIG_H */
- break;
-
- default:
- OMAP_BAD_REG(addr);
- }
-}
-
-static const MemoryRegionOps omap_im3_ops = {
- .read = omap_im3_read,
- .write = omap_im3_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-struct omap_dss_s *omap_dss_init(struct omap_target_agent_s *ta,
- MemoryRegion *sysmem,
- hwaddr l3_base,
- qemu_irq irq, qemu_irq drq,
- omap_clk fck1, omap_clk fck2, omap_clk ck54m,
- omap_clk ick1, omap_clk ick2)
-{
- struct omap_dss_s *s = g_new0(struct omap_dss_s, 1);
-
- s->irq = irq;
- s->drq = drq;
- omap_dss_reset(s);
-
- memory_region_init_io(&s->iomem_diss1, NULL, &omap_diss_ops, s, "omap.diss1",
- omap_l4_region_size(ta, 0));
- memory_region_init_io(&s->iomem_disc1, NULL, &omap_disc_ops, s, "omap.disc1",
- omap_l4_region_size(ta, 1));
- memory_region_init_io(&s->iomem_rfbi1, NULL, &omap_rfbi_ops, s, "omap.rfbi1",
- omap_l4_region_size(ta, 2));
- memory_region_init_io(&s->iomem_venc1, NULL, &omap_venc_ops, s, "omap.venc1",
- omap_l4_region_size(ta, 3));
- memory_region_init_io(&s->iomem_im3, NULL, &omap_im3_ops, s,
- "omap.im3", 0x1000);
-
- omap_l4_attach(ta, 0, &s->iomem_diss1);
- omap_l4_attach(ta, 1, &s->iomem_disc1);
- omap_l4_attach(ta, 2, &s->iomem_rfbi1);
- omap_l4_attach(ta, 3, &s->iomem_venc1);
- memory_region_add_subregion(sysmem, l3_base, &s->iomem_im3);
-
-#if 0
- s->state = graphic_console_init(omap_update_display,
- omap_invalidate_display, omap_screen_dump, s);
-#endif
-
- return s;
-}
-
-void omap_rfbi_attach(struct omap_dss_s *s, int cs, struct rfbi_chip_s *chip)
-{
- if (cs < 0 || cs > 1)
- hw_error("%s: wrong CS %i\n", __func__, cs);
- s->rfbi.chip[cs] = chip;
-}
diff --git a/hw/display/pl110.c b/hw/display/pl110.c
index 7f145bb..09c3c59 100644
--- a/hw/display/pl110.c
+++ b/hw/display/pl110.c
@@ -535,10 +535,9 @@ static const GraphicHwOps pl110_gfx_ops = {
.gfx_update = pl110_update_display,
};
-static Property pl110_properties[] = {
+static const Property pl110_properties[] = {
DEFINE_PROP_LINK("framebuffer-memory", PL110State, fbmem,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
static void pl110_realize(DeviceState *dev, Error **errp)
@@ -581,7 +580,7 @@ static void pl111_init(Object *obj)
s->version = VERSION_PL111;
}
-static void pl110_class_init(ObjectClass *klass, void *data)
+static void pl110_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/display/pxa2xx_lcd.c b/hw/display/pxa2xx_lcd.c
deleted file mode 100644
index a9d0d98..0000000
--- a/hw/display/pxa2xx_lcd.c
+++ /dev/null
@@ -1,1451 +0,0 @@
-/*
- * Intel XScale PXA255/270 LCDC emulation.
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GPLv2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/log.h"
-#include "hw/irq.h"
-#include "migration/vmstate.h"
-#include "ui/console.h"
-#include "hw/arm/pxa.h"
-#include "ui/pixel_ops.h"
-#include "hw/boards.h"
-/* FIXME: For graphic_rotate. Should probably be done in common code. */
-#include "sysemu/sysemu.h"
-#include "framebuffer.h"
-
-struct DMAChannel {
- uint32_t branch;
- uint8_t up;
- uint8_t palette[1024];
- uint8_t pbuffer[1024];
- void (*redraw)(PXA2xxLCDState *s, hwaddr addr,
- int *miny, int *maxy);
-
- uint32_t descriptor;
- uint32_t source;
- uint32_t id;
- uint32_t command;
-};
-
-struct PXA2xxLCDState {
- MemoryRegion *sysmem;
- MemoryRegion iomem;
- MemoryRegionSection fbsection;
- qemu_irq irq;
- int irqlevel;
-
- int invalidated;
- QemuConsole *con;
- int dest_width;
- int xres, yres;
- int pal_for;
- int transp;
- enum {
- pxa_lcdc_2bpp = 1,
- pxa_lcdc_4bpp = 2,
- pxa_lcdc_8bpp = 3,
- pxa_lcdc_16bpp = 4,
- pxa_lcdc_18bpp = 5,
- pxa_lcdc_18pbpp = 6,
- pxa_lcdc_19bpp = 7,
- pxa_lcdc_19pbpp = 8,
- pxa_lcdc_24bpp = 9,
- pxa_lcdc_25bpp = 10,
- } bpp;
-
- uint32_t control[6];
- uint32_t status[2];
- uint32_t ovl1c[2];
- uint32_t ovl2c[2];
- uint32_t ccr;
- uint32_t cmdcr;
- uint32_t trgbr;
- uint32_t tcr;
- uint32_t liidr;
- uint8_t bscntr;
-
- struct DMAChannel dma_ch[7];
-
- qemu_irq vsync_cb;
- int orientation;
-};
-
-typedef struct QEMU_PACKED {
- uint32_t fdaddr;
- uint32_t fsaddr;
- uint32_t fidr;
- uint32_t ldcmd;
-} PXAFrameDescriptor;
-
-#define LCCR0 0x000 /* LCD Controller Control register 0 */
-#define LCCR1 0x004 /* LCD Controller Control register 1 */
-#define LCCR2 0x008 /* LCD Controller Control register 2 */
-#define LCCR3 0x00c /* LCD Controller Control register 3 */
-#define LCCR4 0x010 /* LCD Controller Control register 4 */
-#define LCCR5 0x014 /* LCD Controller Control register 5 */
-
-#define FBR0 0x020 /* DMA Channel 0 Frame Branch register */
-#define FBR1 0x024 /* DMA Channel 1 Frame Branch register */
-#define FBR2 0x028 /* DMA Channel 2 Frame Branch register */
-#define FBR3 0x02c /* DMA Channel 3 Frame Branch register */
-#define FBR4 0x030 /* DMA Channel 4 Frame Branch register */
-#define FBR5 0x110 /* DMA Channel 5 Frame Branch register */
-#define FBR6 0x114 /* DMA Channel 6 Frame Branch register */
-
-#define LCSR1 0x034 /* LCD Controller Status register 1 */
-#define LCSR0 0x038 /* LCD Controller Status register 0 */
-#define LIIDR 0x03c /* LCD Controller Interrupt ID register */
-
-#define TRGBR 0x040 /* TMED RGB Seed register */
-#define TCR 0x044 /* TMED Control register */
-
-#define OVL1C1 0x050 /* Overlay 1 Control register 1 */
-#define OVL1C2 0x060 /* Overlay 1 Control register 2 */
-#define OVL2C1 0x070 /* Overlay 2 Control register 1 */
-#define OVL2C2 0x080 /* Overlay 2 Control register 2 */
-#define CCR 0x090 /* Cursor Control register */
-
-#define CMDCR 0x100 /* Command Control register */
-#define PRSR 0x104 /* Panel Read Status register */
-
-#define PXA_LCDDMA_CHANS 7
-#define DMA_FDADR 0x00 /* Frame Descriptor Address register */
-#define DMA_FSADR 0x04 /* Frame Source Address register */
-#define DMA_FIDR 0x08 /* Frame ID register */
-#define DMA_LDCMD 0x0c /* Command register */
-
-/* LCD Buffer Strength Control register */
-#define BSCNTR 0x04000054
-
-/* Bitfield masks */
-#define LCCR0_ENB (1 << 0)
-#define LCCR0_CMS (1 << 1)
-#define LCCR0_SDS (1 << 2)
-#define LCCR0_LDM (1 << 3)
-#define LCCR0_SOFM0 (1 << 4)
-#define LCCR0_IUM (1 << 5)
-#define LCCR0_EOFM0 (1 << 6)
-#define LCCR0_PAS (1 << 7)
-#define LCCR0_DPD (1 << 9)
-#define LCCR0_DIS (1 << 10)
-#define LCCR0_QDM (1 << 11)
-#define LCCR0_PDD (0xff << 12)
-#define LCCR0_BSM0 (1 << 20)
-#define LCCR0_OUM (1 << 21)
-#define LCCR0_LCDT (1 << 22)
-#define LCCR0_RDSTM (1 << 23)
-#define LCCR0_CMDIM (1 << 24)
-#define LCCR0_OUC (1 << 25)
-#define LCCR0_LDDALT (1 << 26)
-#define LCCR1_PPL(x) ((x) & 0x3ff)
-#define LCCR2_LPP(x) ((x) & 0x3ff)
-#define LCCR3_API (15 << 16)
-#define LCCR3_BPP(x) ((((x) >> 24) & 7) | (((x) >> 26) & 8))
-#define LCCR3_PDFOR(x) (((x) >> 30) & 3)
-#define LCCR4_K1(x) (((x) >> 0) & 7)
-#define LCCR4_K2(x) (((x) >> 3) & 7)
-#define LCCR4_K3(x) (((x) >> 6) & 7)
-#define LCCR4_PALFOR(x) (((x) >> 15) & 3)
-#define LCCR5_SOFM(ch) (1 << (ch - 1))
-#define LCCR5_EOFM(ch) (1 << (ch + 7))
-#define LCCR5_BSM(ch) (1 << (ch + 15))
-#define LCCR5_IUM(ch) (1 << (ch + 23))
-#define OVLC1_EN (1 << 31)
-#define CCR_CEN (1 << 31)
-#define FBR_BRA (1 << 0)
-#define FBR_BINT (1 << 1)
-#define FBR_SRCADDR (0xfffffff << 4)
-#define LCSR0_LDD (1 << 0)
-#define LCSR0_SOF0 (1 << 1)
-#define LCSR0_BER (1 << 2)
-#define LCSR0_ABC (1 << 3)
-#define LCSR0_IU0 (1 << 4)
-#define LCSR0_IU1 (1 << 5)
-#define LCSR0_OU (1 << 6)
-#define LCSR0_QD (1 << 7)
-#define LCSR0_EOF0 (1 << 8)
-#define LCSR0_BS0 (1 << 9)
-#define LCSR0_SINT (1 << 10)
-#define LCSR0_RDST (1 << 11)
-#define LCSR0_CMDINT (1 << 12)
-#define LCSR0_BERCH(x) (((x) & 7) << 28)
-#define LCSR1_SOF(ch) (1 << (ch - 1))
-#define LCSR1_EOF(ch) (1 << (ch + 7))
-#define LCSR1_BS(ch) (1 << (ch + 15))
-#define LCSR1_IU(ch) (1 << (ch + 23))
-#define LDCMD_LENGTH(x) ((x) & 0x001ffffc)
-#define LDCMD_EOFINT (1 << 21)
-#define LDCMD_SOFINT (1 << 22)
-#define LDCMD_PAL (1 << 26)
-
-/* Size of a pixel in the QEMU UI output surface, in bytes */
-#define DEST_PIXEL_WIDTH 4
-
-/* Line drawing code to handle the various possible guest pixel formats */
-
-# define SKIP_PIXEL(to) do { to += deststep; } while (0)
-# define COPY_PIXEL(to, from) \
- do { \
- *(uint32_t *) to = from; \
- SKIP_PIXEL(to); \
- } while (0)
-
-#if HOST_BIG_ENDIAN
-# define SWAP_WORDS 1
-#endif
-
-#define FN_2(x) FN(x + 1) FN(x)
-#define FN_4(x) FN_2(x + 2) FN_2(x)
-
-static void pxa2xx_draw_line2(void *opaque, uint8_t *dest, const uint8_t *src,
- int width, int deststep)
-{
- uint32_t *palette = opaque;
- uint32_t data;
- while (width > 0) {
- data = *(uint32_t *) src;
-#define FN(x) COPY_PIXEL(dest, palette[(data >> ((x) * 2)) & 3]);
-#ifdef SWAP_WORDS
- FN_4(12)
- FN_4(8)
- FN_4(4)
- FN_4(0)
-#else
- FN_4(0)
- FN_4(4)
- FN_4(8)
- FN_4(12)
-#endif
-#undef FN
- width -= 16;
- src += 4;
- }
-}
-
-static void pxa2xx_draw_line4(void *opaque, uint8_t *dest, const uint8_t *src,
- int width, int deststep)
-{
- uint32_t *palette = opaque;
- uint32_t data;
- while (width > 0) {
- data = *(uint32_t *) src;
-#define FN(x) COPY_PIXEL(dest, palette[(data >> ((x) * 4)) & 0xf]);
-#ifdef SWAP_WORDS
- FN_2(6)
- FN_2(4)
- FN_2(2)
- FN_2(0)
-#else
- FN_2(0)
- FN_2(2)
- FN_2(4)
- FN_2(6)
-#endif
-#undef FN
- width -= 8;
- src += 4;
- }
-}
-
-static void pxa2xx_draw_line8(void *opaque, uint8_t *dest, const uint8_t *src,
- int width, int deststep)
-{
- uint32_t *palette = opaque;
- uint32_t data;
- while (width > 0) {
- data = *(uint32_t *) src;
-#define FN(x) COPY_PIXEL(dest, palette[(data >> (x)) & 0xff]);
-#ifdef SWAP_WORDS
- FN(24)
- FN(16)
- FN(8)
- FN(0)
-#else
- FN(0)
- FN(8)
- FN(16)
- FN(24)
-#endif
-#undef FN
- width -= 4;
- src += 4;
- }
-}
-
-static void pxa2xx_draw_line16(void *opaque, uint8_t *dest, const uint8_t *src,
- int width, int deststep)
-{
- uint32_t data;
- unsigned int r, g, b;
- while (width > 0) {
- data = *(uint32_t *) src;
-#ifdef SWAP_WORDS
- data = bswap32(data);
-#endif
- b = (data & 0x1f) << 3;
- data >>= 5;
- g = (data & 0x3f) << 2;
- data >>= 6;
- r = (data & 0x1f) << 3;
- data >>= 5;
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- b = (data & 0x1f) << 3;
- data >>= 5;
- g = (data & 0x3f) << 2;
- data >>= 6;
- r = (data & 0x1f) << 3;
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- width -= 2;
- src += 4;
- }
-}
-
-static void pxa2xx_draw_line16t(void *opaque, uint8_t *dest, const uint8_t *src,
- int width, int deststep)
-{
- uint32_t data;
- unsigned int r, g, b;
- while (width > 0) {
- data = *(uint32_t *) src;
-#ifdef SWAP_WORDS
- data = bswap32(data);
-#endif
- b = (data & 0x1f) << 3;
- data >>= 5;
- g = (data & 0x1f) << 3;
- data >>= 5;
- r = (data & 0x1f) << 3;
- data >>= 5;
- if (data & 1) {
- SKIP_PIXEL(dest);
- } else {
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- }
- data >>= 1;
- b = (data & 0x1f) << 3;
- data >>= 5;
- g = (data & 0x1f) << 3;
- data >>= 5;
- r = (data & 0x1f) << 3;
- data >>= 5;
- if (data & 1) {
- SKIP_PIXEL(dest);
- } else {
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- }
- width -= 2;
- src += 4;
- }
-}
-
-static void pxa2xx_draw_line18(void *opaque, uint8_t *dest, const uint8_t *src,
- int width, int deststep)
-{
- uint32_t data;
- unsigned int r, g, b;
- while (width > 0) {
- data = *(uint32_t *) src;
-#ifdef SWAP_WORDS
- data = bswap32(data);
-#endif
- b = (data & 0x3f) << 2;
- data >>= 6;
- g = (data & 0x3f) << 2;
- data >>= 6;
- r = (data & 0x3f) << 2;
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- width -= 1;
- src += 4;
- }
-}
-
-/* The wicked packed format */
-static void pxa2xx_draw_line18p(void *opaque, uint8_t *dest, const uint8_t *src,
- int width, int deststep)
-{
- uint32_t data[3];
- unsigned int r, g, b;
- while (width > 0) {
- data[0] = *(uint32_t *) src;
- src += 4;
- data[1] = *(uint32_t *) src;
- src += 4;
- data[2] = *(uint32_t *) src;
- src += 4;
-#ifdef SWAP_WORDS
- data[0] = bswap32(data[0]);
- data[1] = bswap32(data[1]);
- data[2] = bswap32(data[2]);
-#endif
- b = (data[0] & 0x3f) << 2;
- data[0] >>= 6;
- g = (data[0] & 0x3f) << 2;
- data[0] >>= 6;
- r = (data[0] & 0x3f) << 2;
- data[0] >>= 12;
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- b = (data[0] & 0x3f) << 2;
- data[0] >>= 6;
- g = ((data[1] & 0xf) << 4) | (data[0] << 2);
- data[1] >>= 4;
- r = (data[1] & 0x3f) << 2;
- data[1] >>= 12;
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- b = (data[1] & 0x3f) << 2;
- data[1] >>= 6;
- g = (data[1] & 0x3f) << 2;
- data[1] >>= 6;
- r = ((data[2] & 0x3) << 6) | (data[1] << 2);
- data[2] >>= 8;
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- b = (data[2] & 0x3f) << 2;
- data[2] >>= 6;
- g = (data[2] & 0x3f) << 2;
- data[2] >>= 6;
- r = data[2] << 2;
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- width -= 4;
- }
-}
-
-static void pxa2xx_draw_line19(void *opaque, uint8_t *dest, const uint8_t *src,
- int width, int deststep)
-{
- uint32_t data;
- unsigned int r, g, b;
- while (width > 0) {
- data = *(uint32_t *) src;
-#ifdef SWAP_WORDS
- data = bswap32(data);
-#endif
- b = (data & 0x3f) << 2;
- data >>= 6;
- g = (data & 0x3f) << 2;
- data >>= 6;
- r = (data & 0x3f) << 2;
- data >>= 6;
- if (data & 1) {
- SKIP_PIXEL(dest);
- } else {
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- }
- width -= 1;
- src += 4;
- }
-}
-
-/* The wicked packed format */
-static void pxa2xx_draw_line19p(void *opaque, uint8_t *dest, const uint8_t *src,
- int width, int deststep)
-{
- uint32_t data[3];
- unsigned int r, g, b;
- while (width > 0) {
- data[0] = *(uint32_t *) src;
- src += 4;
- data[1] = *(uint32_t *) src;
- src += 4;
- data[2] = *(uint32_t *) src;
- src += 4;
-# ifdef SWAP_WORDS
- data[0] = bswap32(data[0]);
- data[1] = bswap32(data[1]);
- data[2] = bswap32(data[2]);
-# endif
- b = (data[0] & 0x3f) << 2;
- data[0] >>= 6;
- g = (data[0] & 0x3f) << 2;
- data[0] >>= 6;
- r = (data[0] & 0x3f) << 2;
- data[0] >>= 6;
- if (data[0] & 1) {
- SKIP_PIXEL(dest);
- } else {
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- }
- data[0] >>= 6;
- b = (data[0] & 0x3f) << 2;
- data[0] >>= 6;
- g = ((data[1] & 0xf) << 4) | (data[0] << 2);
- data[1] >>= 4;
- r = (data[1] & 0x3f) << 2;
- data[1] >>= 6;
- if (data[1] & 1) {
- SKIP_PIXEL(dest);
- } else {
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- }
- data[1] >>= 6;
- b = (data[1] & 0x3f) << 2;
- data[1] >>= 6;
- g = (data[1] & 0x3f) << 2;
- data[1] >>= 6;
- r = ((data[2] & 0x3) << 6) | (data[1] << 2);
- data[2] >>= 2;
- if (data[2] & 1) {
- SKIP_PIXEL(dest);
- } else {
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- }
- data[2] >>= 6;
- b = (data[2] & 0x3f) << 2;
- data[2] >>= 6;
- g = (data[2] & 0x3f) << 2;
- data[2] >>= 6;
- r = data[2] << 2;
- data[2] >>= 6;
- if (data[2] & 1) {
- SKIP_PIXEL(dest);
- } else {
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- }
- width -= 4;
- }
-}
-
-static void pxa2xx_draw_line24(void *opaque, uint8_t *dest, const uint8_t *src,
- int width, int deststep)
-{
- uint32_t data;
- unsigned int r, g, b;
- while (width > 0) {
- data = *(uint32_t *) src;
-#ifdef SWAP_WORDS
- data = bswap32(data);
-#endif
- b = data & 0xff;
- data >>= 8;
- g = data & 0xff;
- data >>= 8;
- r = data & 0xff;
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- width -= 1;
- src += 4;
- }
-}
-
-static void pxa2xx_draw_line24t(void *opaque, uint8_t *dest, const uint8_t *src,
- int width, int deststep)
-{
- uint32_t data;
- unsigned int r, g, b;
- while (width > 0) {
- data = *(uint32_t *) src;
-#ifdef SWAP_WORDS
- data = bswap32(data);
-#endif
- b = (data & 0x7f) << 1;
- data >>= 7;
- g = data & 0xff;
- data >>= 8;
- r = data & 0xff;
- data >>= 8;
- if (data & 1) {
- SKIP_PIXEL(dest);
- } else {
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- }
- width -= 1;
- src += 4;
- }
-}
-
-static void pxa2xx_draw_line25(void *opaque, uint8_t *dest, const uint8_t *src,
- int width, int deststep)
-{
- uint32_t data;
- unsigned int r, g, b;
- while (width > 0) {
- data = *(uint32_t *) src;
-#ifdef SWAP_WORDS
- data = bswap32(data);
-#endif
- b = data & 0xff;
- data >>= 8;
- g = data & 0xff;
- data >>= 8;
- r = data & 0xff;
- data >>= 8;
- if (data & 1) {
- SKIP_PIXEL(dest);
- } else {
- COPY_PIXEL(dest, rgb_to_pixel32(r, g, b));
- }
- width -= 1;
- src += 4;
- }
-}
-
-/* Overlay planes disabled, no transparency */
-static drawfn pxa2xx_draw_fn_32[16] = {
- [0 ... 0xf] = NULL,
- [pxa_lcdc_2bpp] = pxa2xx_draw_line2,
- [pxa_lcdc_4bpp] = pxa2xx_draw_line4,
- [pxa_lcdc_8bpp] = pxa2xx_draw_line8,
- [pxa_lcdc_16bpp] = pxa2xx_draw_line16,
- [pxa_lcdc_18bpp] = pxa2xx_draw_line18,
- [pxa_lcdc_18pbpp] = pxa2xx_draw_line18p,
- [pxa_lcdc_24bpp] = pxa2xx_draw_line24,
-};
-
-/* Overlay planes enabled, transparency used */
-static drawfn pxa2xx_draw_fn_32t[16] = {
- [0 ... 0xf] = NULL,
- [pxa_lcdc_4bpp] = pxa2xx_draw_line4,
- [pxa_lcdc_8bpp] = pxa2xx_draw_line8,
- [pxa_lcdc_16bpp] = pxa2xx_draw_line16t,
- [pxa_lcdc_19bpp] = pxa2xx_draw_line19,
- [pxa_lcdc_19pbpp] = pxa2xx_draw_line19p,
- [pxa_lcdc_24bpp] = pxa2xx_draw_line24t,
- [pxa_lcdc_25bpp] = pxa2xx_draw_line25,
-};
-
-#undef COPY_PIXEL
-#undef SKIP_PIXEL
-
-#ifdef SWAP_WORDS
-# undef SWAP_WORDS
-#endif
-
-/* Route internal interrupt lines to the global IC */
-static void pxa2xx_lcdc_int_update(PXA2xxLCDState *s)
-{
- int level = 0;
- level |= (s->status[0] & LCSR0_LDD) && !(s->control[0] & LCCR0_LDM);
- level |= (s->status[0] & LCSR0_SOF0) && !(s->control[0] & LCCR0_SOFM0);
- level |= (s->status[0] & LCSR0_IU0) && !(s->control[0] & LCCR0_IUM);
- level |= (s->status[0] & LCSR0_IU1) && !(s->control[5] & LCCR5_IUM(1));
- level |= (s->status[0] & LCSR0_OU) && !(s->control[0] & LCCR0_OUM);
- level |= (s->status[0] & LCSR0_QD) && !(s->control[0] & LCCR0_QDM);
- level |= (s->status[0] & LCSR0_EOF0) && !(s->control[0] & LCCR0_EOFM0);
- level |= (s->status[0] & LCSR0_BS0) && !(s->control[0] & LCCR0_BSM0);
- level |= (s->status[0] & LCSR0_RDST) && !(s->control[0] & LCCR0_RDSTM);
- level |= (s->status[0] & LCSR0_CMDINT) && !(s->control[0] & LCCR0_CMDIM);
- level |= (s->status[1] & ~s->control[5]);
-
- qemu_set_irq(s->irq, !!level);
- s->irqlevel = level;
-}
-
-/* Set Branch Status interrupt high and poke associated registers */
-static inline void pxa2xx_dma_bs_set(PXA2xxLCDState *s, int ch)
-{
- int unmasked;
- if (ch == 0) {
- s->status[0] |= LCSR0_BS0;
- unmasked = !(s->control[0] & LCCR0_BSM0);
- } else {
- s->status[1] |= LCSR1_BS(ch);
- unmasked = !(s->control[5] & LCCR5_BSM(ch));
- }
-
- if (unmasked) {
- if (s->irqlevel)
- s->status[0] |= LCSR0_SINT;
- else
- s->liidr = s->dma_ch[ch].id;
- }
-}
-
-/* Set Start Of Frame Status interrupt high and poke associated registers */
-static inline void pxa2xx_dma_sof_set(PXA2xxLCDState *s, int ch)
-{
- int unmasked;
- if (!(s->dma_ch[ch].command & LDCMD_SOFINT))
- return;
-
- if (ch == 0) {
- s->status[0] |= LCSR0_SOF0;
- unmasked = !(s->control[0] & LCCR0_SOFM0);
- } else {
- s->status[1] |= LCSR1_SOF(ch);
- unmasked = !(s->control[5] & LCCR5_SOFM(ch));
- }
-
- if (unmasked) {
- if (s->irqlevel)
- s->status[0] |= LCSR0_SINT;
- else
- s->liidr = s->dma_ch[ch].id;
- }
-}
-
-/* Set End Of Frame Status interrupt high and poke associated registers */
-static inline void pxa2xx_dma_eof_set(PXA2xxLCDState *s, int ch)
-{
- int unmasked;
- if (!(s->dma_ch[ch].command & LDCMD_EOFINT))
- return;
-
- if (ch == 0) {
- s->status[0] |= LCSR0_EOF0;
- unmasked = !(s->control[0] & LCCR0_EOFM0);
- } else {
- s->status[1] |= LCSR1_EOF(ch);
- unmasked = !(s->control[5] & LCCR5_EOFM(ch));
- }
-
- if (unmasked) {
- if (s->irqlevel)
- s->status[0] |= LCSR0_SINT;
- else
- s->liidr = s->dma_ch[ch].id;
- }
-}
-
-/* Set Bus Error Status interrupt high and poke associated registers */
-static inline void pxa2xx_dma_ber_set(PXA2xxLCDState *s, int ch)
-{
- s->status[0] |= LCSR0_BERCH(ch) | LCSR0_BER;
- if (s->irqlevel)
- s->status[0] |= LCSR0_SINT;
- else
- s->liidr = s->dma_ch[ch].id;
-}
-
-/* Load new Frame Descriptors from DMA */
-static void pxa2xx_descriptor_load(PXA2xxLCDState *s)
-{
- PXAFrameDescriptor desc;
- hwaddr descptr;
- int i;
-
- for (i = 0; i < PXA_LCDDMA_CHANS; i ++) {
- s->dma_ch[i].source = 0;
-
- if (!s->dma_ch[i].up)
- continue;
-
- if (s->dma_ch[i].branch & FBR_BRA) {
- descptr = s->dma_ch[i].branch & FBR_SRCADDR;
- if (s->dma_ch[i].branch & FBR_BINT)
- pxa2xx_dma_bs_set(s, i);
- s->dma_ch[i].branch &= ~FBR_BRA;
- } else
- descptr = s->dma_ch[i].descriptor;
-
- if (!((descptr >= PXA2XX_SDRAM_BASE && descptr +
- sizeof(desc) <= PXA2XX_SDRAM_BASE + current_machine->ram_size) ||
- (descptr >= PXA2XX_INTERNAL_BASE && descptr + sizeof(desc) <=
- PXA2XX_INTERNAL_BASE + PXA2XX_INTERNAL_SIZE))) {
- continue;
- }
-
- cpu_physical_memory_read(descptr, &desc, sizeof(desc));
- s->dma_ch[i].descriptor = le32_to_cpu(desc.fdaddr);
- s->dma_ch[i].source = le32_to_cpu(desc.fsaddr);
- s->dma_ch[i].id = le32_to_cpu(desc.fidr);
- s->dma_ch[i].command = le32_to_cpu(desc.ldcmd);
- }
-}
-
-static uint64_t pxa2xx_lcdc_read(void *opaque, hwaddr offset,
- unsigned size)
-{
- PXA2xxLCDState *s = (PXA2xxLCDState *) opaque;
- int ch;
-
- switch (offset) {
- case LCCR0:
- return s->control[0];
- case LCCR1:
- return s->control[1];
- case LCCR2:
- return s->control[2];
- case LCCR3:
- return s->control[3];
- case LCCR4:
- return s->control[4];
- case LCCR5:
- return s->control[5];
-
- case OVL1C1:
- return s->ovl1c[0];
- case OVL1C2:
- return s->ovl1c[1];
- case OVL2C1:
- return s->ovl2c[0];
- case OVL2C2:
- return s->ovl2c[1];
-
- case CCR:
- return s->ccr;
-
- case CMDCR:
- return s->cmdcr;
-
- case TRGBR:
- return s->trgbr;
- case TCR:
- return s->tcr;
-
- case 0x200 ... 0x1000: /* DMA per-channel registers */
- ch = (offset - 0x200) >> 4;
- if (!(ch >= 0 && ch < PXA_LCDDMA_CHANS))
- goto fail;
-
- switch (offset & 0xf) {
- case DMA_FDADR:
- return s->dma_ch[ch].descriptor;
- case DMA_FSADR:
- return s->dma_ch[ch].source;
- case DMA_FIDR:
- return s->dma_ch[ch].id;
- case DMA_LDCMD:
- return s->dma_ch[ch].command;
- default:
- goto fail;
- }
-
- case FBR0:
- return s->dma_ch[0].branch;
- case FBR1:
- return s->dma_ch[1].branch;
- case FBR2:
- return s->dma_ch[2].branch;
- case FBR3:
- return s->dma_ch[3].branch;
- case FBR4:
- return s->dma_ch[4].branch;
- case FBR5:
- return s->dma_ch[5].branch;
- case FBR6:
- return s->dma_ch[6].branch;
-
- case BSCNTR:
- return s->bscntr;
-
- case PRSR:
- return 0;
-
- case LCSR0:
- return s->status[0];
- case LCSR1:
- return s->status[1];
- case LIIDR:
- return s->liidr;
-
- default:
- fail:
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
- __func__, offset);
- }
-
- return 0;
-}
-
-static void pxa2xx_lcdc_write(void *opaque, hwaddr offset,
- uint64_t value, unsigned size)
-{
- PXA2xxLCDState *s = (PXA2xxLCDState *) opaque;
- int ch;
-
- switch (offset) {
- case LCCR0:
- /* ACK Quick Disable done */
- if ((s->control[0] & LCCR0_ENB) && !(value & LCCR0_ENB))
- s->status[0] |= LCSR0_QD;
-
- if (!(s->control[0] & LCCR0_LCDT) && (value & LCCR0_LCDT)) {
- qemu_log_mask(LOG_UNIMP,
- "%s: internal frame buffer unsupported\n", __func__);
- }
- if ((s->control[3] & LCCR3_API) &&
- (value & LCCR0_ENB) && !(value & LCCR0_LCDT))
- s->status[0] |= LCSR0_ABC;
-
- s->control[0] = value & 0x07ffffff;
- pxa2xx_lcdc_int_update(s);
-
- s->dma_ch[0].up = !!(value & LCCR0_ENB);
- s->dma_ch[1].up = (s->ovl1c[0] & OVLC1_EN) || (value & LCCR0_SDS);
- break;
-
- case LCCR1:
- s->control[1] = value;
- break;
-
- case LCCR2:
- s->control[2] = value;
- break;
-
- case LCCR3:
- s->control[3] = value & 0xefffffff;
- s->bpp = LCCR3_BPP(value);
- break;
-
- case LCCR4:
- s->control[4] = value & 0x83ff81ff;
- break;
-
- case LCCR5:
- s->control[5] = value & 0x3f3f3f3f;
- break;
-
- case OVL1C1:
- if (!(s->ovl1c[0] & OVLC1_EN) && (value & OVLC1_EN)) {
- qemu_log_mask(LOG_UNIMP, "%s: Overlay 1 not supported\n", __func__);
- }
- s->ovl1c[0] = value & 0x80ffffff;
- s->dma_ch[1].up = (value & OVLC1_EN) || (s->control[0] & LCCR0_SDS);
- break;
-
- case OVL1C2:
- s->ovl1c[1] = value & 0x000fffff;
- break;
-
- case OVL2C1:
- if (!(s->ovl2c[0] & OVLC1_EN) && (value & OVLC1_EN)) {
- qemu_log_mask(LOG_UNIMP, "%s: Overlay 2 not supported\n", __func__);
- }
- s->ovl2c[0] = value & 0x80ffffff;
- s->dma_ch[2].up = !!(value & OVLC1_EN);
- s->dma_ch[3].up = !!(value & OVLC1_EN);
- s->dma_ch[4].up = !!(value & OVLC1_EN);
- break;
-
- case OVL2C2:
- s->ovl2c[1] = value & 0x007fffff;
- break;
-
- case CCR:
- if (!(s->ccr & CCR_CEN) && (value & CCR_CEN)) {
- qemu_log_mask(LOG_UNIMP,
- "%s: Hardware cursor unimplemented\n", __func__);
- }
- s->ccr = value & 0x81ffffe7;
- s->dma_ch[5].up = !!(value & CCR_CEN);
- break;
-
- case CMDCR:
- s->cmdcr = value & 0xff;
- break;
-
- case TRGBR:
- s->trgbr = value & 0x00ffffff;
- break;
-
- case TCR:
- s->tcr = value & 0x7fff;
- break;
-
- case 0x200 ... 0x1000: /* DMA per-channel registers */
- ch = (offset - 0x200) >> 4;
- if (!(ch >= 0 && ch < PXA_LCDDMA_CHANS))
- goto fail;
-
- switch (offset & 0xf) {
- case DMA_FDADR:
- s->dma_ch[ch].descriptor = value & 0xfffffff0;
- break;
-
- default:
- goto fail;
- }
- break;
-
- case FBR0:
- s->dma_ch[0].branch = value & 0xfffffff3;
- break;
- case FBR1:
- s->dma_ch[1].branch = value & 0xfffffff3;
- break;
- case FBR2:
- s->dma_ch[2].branch = value & 0xfffffff3;
- break;
- case FBR3:
- s->dma_ch[3].branch = value & 0xfffffff3;
- break;
- case FBR4:
- s->dma_ch[4].branch = value & 0xfffffff3;
- break;
- case FBR5:
- s->dma_ch[5].branch = value & 0xfffffff3;
- break;
- case FBR6:
- s->dma_ch[6].branch = value & 0xfffffff3;
- break;
-
- case BSCNTR:
- s->bscntr = value & 0xf;
- break;
-
- case PRSR:
- break;
-
- case LCSR0:
- s->status[0] &= ~(value & 0xfff);
- if (value & LCSR0_BER)
- s->status[0] &= ~LCSR0_BERCH(7);
- break;
-
- case LCSR1:
- s->status[1] &= ~(value & 0x3e3f3f);
- break;
-
- default:
- fail:
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
- __func__, offset);
- }
-}
-
-static const MemoryRegionOps pxa2xx_lcdc_ops = {
- .read = pxa2xx_lcdc_read,
- .write = pxa2xx_lcdc_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-/* Load new palette for a given DMA channel, convert to internal format */
-static void pxa2xx_palette_parse(PXA2xxLCDState *s, int ch, int bpp)
-{
- DisplaySurface *surface = qemu_console_surface(s->con);
- int i, n, format, r, g, b, alpha;
- uint32_t *dest;
- uint8_t *src;
- s->pal_for = LCCR4_PALFOR(s->control[4]);
- format = s->pal_for;
-
- switch (bpp) {
- case pxa_lcdc_2bpp:
- n = 4;
- break;
- case pxa_lcdc_4bpp:
- n = 16;
- break;
- case pxa_lcdc_8bpp:
- n = 256;
- break;
- default:
- return;
- }
-
- src = (uint8_t *) s->dma_ch[ch].pbuffer;
- dest = (uint32_t *) s->dma_ch[ch].palette;
- alpha = r = g = b = 0;
-
- for (i = 0; i < n; i ++) {
- switch (format) {
- case 0: /* 16 bpp, no transparency */
- alpha = 0;
- if (s->control[0] & LCCR0_CMS) {
- r = g = b = *(uint16_t *) src & 0xff;
- }
- else {
- r = (*(uint16_t *) src & 0xf800) >> 8;
- g = (*(uint16_t *) src & 0x07e0) >> 3;
- b = (*(uint16_t *) src & 0x001f) << 3;
- }
- src += 2;
- break;
- case 1: /* 16 bpp plus transparency */
- alpha = *(uint32_t *) src & (1 << 24);
- if (s->control[0] & LCCR0_CMS)
- r = g = b = *(uint32_t *) src & 0xff;
- else {
- r = (*(uint32_t *) src & 0xf80000) >> 16;
- g = (*(uint32_t *) src & 0x00fc00) >> 8;
- b = (*(uint32_t *) src & 0x0000f8);
- }
- src += 4;
- break;
- case 2: /* 18 bpp plus transparency */
- alpha = *(uint32_t *) src & (1 << 24);
- if (s->control[0] & LCCR0_CMS)
- r = g = b = *(uint32_t *) src & 0xff;
- else {
- r = (*(uint32_t *) src & 0xfc0000) >> 16;
- g = (*(uint32_t *) src & 0x00fc00) >> 8;
- b = (*(uint32_t *) src & 0x0000fc);
- }
- src += 4;
- break;
- case 3: /* 24 bpp plus transparency */
- alpha = *(uint32_t *) src & (1 << 24);
- if (s->control[0] & LCCR0_CMS)
- r = g = b = *(uint32_t *) src & 0xff;
- else {
- r = (*(uint32_t *) src & 0xff0000) >> 16;
- g = (*(uint32_t *) src & 0x00ff00) >> 8;
- b = (*(uint32_t *) src & 0x0000ff);
- }
- src += 4;
- break;
- }
- switch (surface_bits_per_pixel(surface)) {
- case 8:
- *dest = rgb_to_pixel8(r, g, b) | alpha;
- break;
- case 15:
- *dest = rgb_to_pixel15(r, g, b) | alpha;
- break;
- case 16:
- *dest = rgb_to_pixel16(r, g, b) | alpha;
- break;
- case 24:
- *dest = rgb_to_pixel24(r, g, b) | alpha;
- break;
- case 32:
- *dest = rgb_to_pixel32(r, g, b) | alpha;
- break;
- }
- dest ++;
- }
-}
-
-static inline drawfn pxa2xx_drawfn(PXA2xxLCDState *s)
-{
- if (s->transp) {
- return pxa2xx_draw_fn_32t[s->bpp];
- } else {
- return pxa2xx_draw_fn_32[s->bpp];
- }
-}
-
-static void pxa2xx_lcdc_dma0_redraw_rot0(PXA2xxLCDState *s,
- hwaddr addr, int *miny, int *maxy)
-{
- DisplaySurface *surface = qemu_console_surface(s->con);
- int src_width, dest_width;
- drawfn fn = pxa2xx_drawfn(s);
- if (!fn)
- return;
-
- src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */
- if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp)
- src_width *= 3;
- else if (s->bpp > pxa_lcdc_16bpp)
- src_width *= 4;
- else if (s->bpp > pxa_lcdc_8bpp)
- src_width *= 2;
-
- dest_width = s->xres * DEST_PIXEL_WIDTH;
- *miny = 0;
- if (s->invalidated) {
- framebuffer_update_memory_section(&s->fbsection, s->sysmem,
- addr, s->yres, src_width);
- }
- framebuffer_update_display(surface, &s->fbsection, s->xres, s->yres,
- src_width, dest_width, DEST_PIXEL_WIDTH,
- s->invalidated,
- fn, s->dma_ch[0].palette, miny, maxy);
-}
-
-static void pxa2xx_lcdc_dma0_redraw_rot90(PXA2xxLCDState *s,
- hwaddr addr, int *miny, int *maxy)
-{
- DisplaySurface *surface = qemu_console_surface(s->con);
- int src_width, dest_width;
- drawfn fn = pxa2xx_drawfn(s);
- if (!fn)
- return;
-
- src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */
- if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp)
- src_width *= 3;
- else if (s->bpp > pxa_lcdc_16bpp)
- src_width *= 4;
- else if (s->bpp > pxa_lcdc_8bpp)
- src_width *= 2;
-
- dest_width = s->yres * DEST_PIXEL_WIDTH;
- *miny = 0;
- if (s->invalidated) {
- framebuffer_update_memory_section(&s->fbsection, s->sysmem,
- addr, s->yres, src_width);
- }
- framebuffer_update_display(surface, &s->fbsection, s->xres, s->yres,
- src_width, DEST_PIXEL_WIDTH, -dest_width,
- s->invalidated,
- fn, s->dma_ch[0].palette,
- miny, maxy);
-}
-
-static void pxa2xx_lcdc_dma0_redraw_rot180(PXA2xxLCDState *s,
- hwaddr addr, int *miny, int *maxy)
-{
- DisplaySurface *surface = qemu_console_surface(s->con);
- int src_width, dest_width;
- drawfn fn = pxa2xx_drawfn(s);
- if (!fn) {
- return;
- }
-
- src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */
- if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp) {
- src_width *= 3;
- } else if (s->bpp > pxa_lcdc_16bpp) {
- src_width *= 4;
- } else if (s->bpp > pxa_lcdc_8bpp) {
- src_width *= 2;
- }
-
- dest_width = s->xres * DEST_PIXEL_WIDTH;
- *miny = 0;
- if (s->invalidated) {
- framebuffer_update_memory_section(&s->fbsection, s->sysmem,
- addr, s->yres, src_width);
- }
- framebuffer_update_display(surface, &s->fbsection, s->xres, s->yres,
- src_width, -dest_width, -DEST_PIXEL_WIDTH,
- s->invalidated,
- fn, s->dma_ch[0].palette, miny, maxy);
-}
-
-static void pxa2xx_lcdc_dma0_redraw_rot270(PXA2xxLCDState *s,
- hwaddr addr, int *miny, int *maxy)
-{
- DisplaySurface *surface = qemu_console_surface(s->con);
- int src_width, dest_width;
- drawfn fn = pxa2xx_drawfn(s);
- if (!fn) {
- return;
- }
-
- src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */
- if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp) {
- src_width *= 3;
- } else if (s->bpp > pxa_lcdc_16bpp) {
- src_width *= 4;
- } else if (s->bpp > pxa_lcdc_8bpp) {
- src_width *= 2;
- }
-
- dest_width = s->yres * DEST_PIXEL_WIDTH;
- *miny = 0;
- if (s->invalidated) {
- framebuffer_update_memory_section(&s->fbsection, s->sysmem,
- addr, s->yres, src_width);
- }
- framebuffer_update_display(surface, &s->fbsection, s->xres, s->yres,
- src_width, -DEST_PIXEL_WIDTH, dest_width,
- s->invalidated,
- fn, s->dma_ch[0].palette,
- miny, maxy);
-}
-
-static void pxa2xx_lcdc_resize(PXA2xxLCDState *s)
-{
- int width, height;
- if (!(s->control[0] & LCCR0_ENB))
- return;
-
- width = LCCR1_PPL(s->control[1]) + 1;
- height = LCCR2_LPP(s->control[2]) + 1;
-
- if (width != s->xres || height != s->yres) {
- if (s->orientation == 90 || s->orientation == 270) {
- qemu_console_resize(s->con, height, width);
- } else {
- qemu_console_resize(s->con, width, height);
- }
- s->invalidated = 1;
- s->xres = width;
- s->yres = height;
- }
-}
-
-static void pxa2xx_update_display(void *opaque)
-{
- PXA2xxLCDState *s = (PXA2xxLCDState *) opaque;
- hwaddr fbptr;
- int miny, maxy;
- int ch;
- if (!(s->control[0] & LCCR0_ENB))
- return;
-
- pxa2xx_descriptor_load(s);
-
- pxa2xx_lcdc_resize(s);
- miny = s->yres;
- maxy = 0;
- s->transp = s->dma_ch[2].up || s->dma_ch[3].up;
- /* Note: With overlay planes the order depends on LCCR0 bit 25. */
- for (ch = 0; ch < PXA_LCDDMA_CHANS; ch ++)
- if (s->dma_ch[ch].up) {
- if (!s->dma_ch[ch].source) {
- pxa2xx_dma_ber_set(s, ch);
- continue;
- }
- fbptr = s->dma_ch[ch].source;
- if (!((fbptr >= PXA2XX_SDRAM_BASE &&
- fbptr <= PXA2XX_SDRAM_BASE + current_machine->ram_size) ||
- (fbptr >= PXA2XX_INTERNAL_BASE &&
- fbptr <= PXA2XX_INTERNAL_BASE + PXA2XX_INTERNAL_SIZE))) {
- pxa2xx_dma_ber_set(s, ch);
- continue;
- }
-
- if (s->dma_ch[ch].command & LDCMD_PAL) {
- cpu_physical_memory_read(fbptr, s->dma_ch[ch].pbuffer,
- MAX(LDCMD_LENGTH(s->dma_ch[ch].command),
- sizeof(s->dma_ch[ch].pbuffer)));
- pxa2xx_palette_parse(s, ch, s->bpp);
- } else {
- /* Do we need to reparse palette */
- if (LCCR4_PALFOR(s->control[4]) != s->pal_for)
- pxa2xx_palette_parse(s, ch, s->bpp);
-
- /* ACK frame start */
- pxa2xx_dma_sof_set(s, ch);
-
- s->dma_ch[ch].redraw(s, fbptr, &miny, &maxy);
- s->invalidated = 0;
-
- /* ACK frame completed */
- pxa2xx_dma_eof_set(s, ch);
- }
- }
-
- if (s->control[0] & LCCR0_DIS) {
- /* ACK last frame completed */
- s->control[0] &= ~LCCR0_ENB;
- s->status[0] |= LCSR0_LDD;
- }
-
- if (miny >= 0) {
- switch (s->orientation) {
- case 0:
- dpy_gfx_update(s->con, 0, miny, s->xres, maxy - miny + 1);
- break;
- case 90:
- dpy_gfx_update(s->con, miny, 0, maxy - miny + 1, s->xres);
- break;
- case 180:
- maxy = s->yres - maxy - 1;
- miny = s->yres - miny - 1;
- dpy_gfx_update(s->con, 0, maxy, s->xres, miny - maxy + 1);
- break;
- case 270:
- maxy = s->yres - maxy - 1;
- miny = s->yres - miny - 1;
- dpy_gfx_update(s->con, maxy, 0, miny - maxy + 1, s->xres);
- break;
- }
- }
- pxa2xx_lcdc_int_update(s);
-
- qemu_irq_raise(s->vsync_cb);
-}
-
-static void pxa2xx_invalidate_display(void *opaque)
-{
- PXA2xxLCDState *s = (PXA2xxLCDState *) opaque;
- s->invalidated = 1;
-}
-
-static void pxa2xx_lcdc_orientation(void *opaque, int angle)
-{
- PXA2xxLCDState *s = (PXA2xxLCDState *) opaque;
-
- switch (angle) {
- case 0:
- s->dma_ch[0].redraw = pxa2xx_lcdc_dma0_redraw_rot0;
- break;
- case 90:
- s->dma_ch[0].redraw = pxa2xx_lcdc_dma0_redraw_rot90;
- break;
- case 180:
- s->dma_ch[0].redraw = pxa2xx_lcdc_dma0_redraw_rot180;
- break;
- case 270:
- s->dma_ch[0].redraw = pxa2xx_lcdc_dma0_redraw_rot270;
- break;
- }
-
- s->orientation = angle;
- s->xres = s->yres = -1;
- pxa2xx_lcdc_resize(s);
-}
-
-static const VMStateDescription vmstate_dma_channel = {
- .name = "dma_channel",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32(branch, struct DMAChannel),
- VMSTATE_UINT8(up, struct DMAChannel),
- VMSTATE_BUFFER(pbuffer, struct DMAChannel),
- VMSTATE_UINT32(descriptor, struct DMAChannel),
- VMSTATE_UINT32(source, struct DMAChannel),
- VMSTATE_UINT32(id, struct DMAChannel),
- VMSTATE_UINT32(command, struct DMAChannel),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static int pxa2xx_lcdc_post_load(void *opaque, int version_id)
-{
- PXA2xxLCDState *s = opaque;
-
- s->bpp = LCCR3_BPP(s->control[3]);
- s->xres = s->yres = s->pal_for = -1;
-
- return 0;
-}
-
-static const VMStateDescription vmstate_pxa2xx_lcdc = {
- .name = "pxa2xx_lcdc",
- .version_id = 0,
- .minimum_version_id = 0,
- .post_load = pxa2xx_lcdc_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_INT32(irqlevel, PXA2xxLCDState),
- VMSTATE_INT32(transp, PXA2xxLCDState),
- VMSTATE_UINT32_ARRAY(control, PXA2xxLCDState, 6),
- VMSTATE_UINT32_ARRAY(status, PXA2xxLCDState, 2),
- VMSTATE_UINT32_ARRAY(ovl1c, PXA2xxLCDState, 2),
- VMSTATE_UINT32_ARRAY(ovl2c, PXA2xxLCDState, 2),
- VMSTATE_UINT32(ccr, PXA2xxLCDState),
- VMSTATE_UINT32(cmdcr, PXA2xxLCDState),
- VMSTATE_UINT32(trgbr, PXA2xxLCDState),
- VMSTATE_UINT32(tcr, PXA2xxLCDState),
- VMSTATE_UINT32(liidr, PXA2xxLCDState),
- VMSTATE_UINT8(bscntr, PXA2xxLCDState),
- VMSTATE_STRUCT_ARRAY(dma_ch, PXA2xxLCDState, 7, 0,
- vmstate_dma_channel, struct DMAChannel),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static const GraphicHwOps pxa2xx_ops = {
- .invalidate = pxa2xx_invalidate_display,
- .gfx_update = pxa2xx_update_display,
-};
-
-PXA2xxLCDState *pxa2xx_lcdc_init(MemoryRegion *sysmem,
- hwaddr base, qemu_irq irq)
-{
- PXA2xxLCDState *s;
-
- s = g_new0(PXA2xxLCDState, 1);
- s->invalidated = 1;
- s->irq = irq;
- s->sysmem = sysmem;
-
- pxa2xx_lcdc_orientation(s, graphic_rotate);
-
- memory_region_init_io(&s->iomem, NULL, &pxa2xx_lcdc_ops, s,
- "pxa2xx-lcd-controller", 0x00100000);
- memory_region_add_subregion(sysmem, base, &s->iomem);
-
- s->con = graphic_console_init(NULL, 0, &pxa2xx_ops, s);
-
- vmstate_register(NULL, 0, &vmstate_pxa2xx_lcdc, s);
-
- return s;
-}
-
-void pxa2xx_lcd_vsync_notifier(PXA2xxLCDState *s, qemu_irq handler)
-{
- s->vsync_cb = handler;
-}
diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c
index 335d01e..eda6d3d 100644
--- a/hw/display/qxl-render.c
+++ b/hw/display/qxl-render.c
@@ -21,7 +21,7 @@
#include "qemu/osdep.h"
#include "qxl.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "trace.h"
static void qxl_blit(PCIQXLDevice *qxl, QXLRect *rect)
diff --git a/hw/display/qxl.c b/hw/display/qxl.c
index 7178dec..18f482c 100644
--- a/hw/display/qxl.c
+++ b/hw/display/qxl.c
@@ -29,7 +29,8 @@
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "hw/qdev-properties.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
+#include "migration/cpr.h"
#include "migration/vmstate.h"
#include "trace.h"
@@ -50,7 +51,7 @@
#undef ALIGN
#define ALIGN(a, b) (((a) + ((b) - 1)) & ~((b) - 1))
-#define PIXEL_SIZE 0.2936875 //1280x1024 is 14.8" x 11.9"
+#define PIXEL_SIZE 0.2936875 /* 1280x1024 is 14.8" x 11.9" */
#define QXL_MODE(_x, _y, _b, _o) \
{ .x_res = _x, \
@@ -333,6 +334,10 @@ static void init_qxl_rom(PCIQXLDevice *d)
uint32_t fb;
int i, n;
+ if (cpr_is_incoming()) {
+ goto skip_init;
+ }
+
memset(rom, 0, d->rom_size);
rom->magic = cpu_to_le32(QXL_ROM_MAGIC);
@@ -390,6 +395,7 @@ static void init_qxl_rom(PCIQXLDevice *d)
sizeof(rom->client_monitors_config));
}
+skip_init:
d->shadow_rom = *rom;
d->rom = rom;
d->modes = modes;
@@ -403,6 +409,9 @@ static void init_qxl_ram(PCIQXLDevice *d)
buf = d->vga.vram_ptr;
d->ram = (QXLRam *)(buf + le32_to_cpu(d->shadow_rom.ram_header_offset));
+ if (cpr_is_incoming()) {
+ return;
+ }
d->ram->magic = cpu_to_le32(QXL_RAM_MAGIC);
d->ram->int_pending = cpu_to_le32(0);
d->ram->int_mask = cpu_to_le32(0);
@@ -539,6 +548,10 @@ static void interface_set_compression_level(QXLInstance *sin, int level)
trace_qxl_interface_set_compression_level(qxl->id, level);
qxl->shadow_rom.compression_level = cpu_to_le32(level);
+ if (cpr_is_incoming()) {
+ assert(qxl->rom->compression_level == cpu_to_le32(level));
+ return;
+ }
qxl->rom->compression_level = cpu_to_le32(level);
qxl_rom_set_dirty(qxl);
}
@@ -997,7 +1010,8 @@ static void interface_set_client_capabilities(QXLInstance *sin,
}
if (runstate_check(RUN_STATE_INMIGRATE) ||
- runstate_check(RUN_STATE_POSTMIGRATE)) {
+ runstate_check(RUN_STATE_POSTMIGRATE) ||
+ cpr_is_incoming()) {
return;
}
@@ -1200,6 +1214,10 @@ static void qxl_reset_state(PCIQXLDevice *d)
{
QXLRom *rom = d->rom;
+ if (cpr_is_incoming()) {
+ return;
+ }
+
qxl_check_state(d);
d->shadow_rom.update_id = cpu_to_le32(0);
*rom = d->shadow_rom;
@@ -1301,8 +1319,8 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta,
};
uint64_t guest_start;
uint64_t guest_end;
- int pci_region;
- pcibus_t pci_start;
+ int pci_region = -1;
+ pcibus_t pci_start = PCI_BAR_UNMAPPED;
pcibus_t pci_end;
MemoryRegion *mr;
intptr_t virt_start;
@@ -1370,8 +1388,11 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta,
memslot.virt_start = virt_start + (guest_start - pci_start);
memslot.virt_end = virt_start + (guest_end - pci_start);
memslot.addr_delta = memslot.virt_start - delta;
- memslot.generation = d->rom->slot_generation = 0;
- qxl_rom_set_dirty(d);
+ if (!cpr_is_incoming()) {
+ d->rom->slot_generation = 0;
+ qxl_rom_set_dirty(d);
+ }
+ memslot.generation = d->rom->slot_generation;
qemu_spice_add_memslot(&d->ssd, &memslot, async);
d->guest_slots[slot_id].mr = mr;
@@ -2458,7 +2479,7 @@ static const VMStateDescription qxl_vmstate = {
}
};
-static Property qxl_properties[] = {
+static const Property qxl_properties[] = {
DEFINE_PROP_UINT32("ram_size", PCIQXLDevice, vga.vram_size, 64 * MiB),
DEFINE_PROP_UINT64("vram_size", PCIQXLDevice, vram32_size, 64 * MiB),
DEFINE_PROP_UINT32("revision", PCIQXLDevice, revision,
@@ -2475,10 +2496,9 @@ static Property qxl_properties[] = {
DEFINE_PROP_UINT32("xres", PCIQXLDevice, xres, 0),
DEFINE_PROP_UINT32("yres", PCIQXLDevice, yres, 0),
DEFINE_PROP_BOOL("global-vmstate", PCIQXLDevice, vga.global_vmstate, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void qxl_pci_class_init(ObjectClass *klass, void *data)
+static void qxl_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -2486,7 +2506,7 @@ static void qxl_pci_class_init(ObjectClass *klass, void *data)
k->vendor_id = REDHAT_PCI_VENDOR_ID;
k->device_id = QXL_DEVICE_ID_STABLE;
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
- dc->reset = qxl_reset_handler;
+ device_class_set_legacy_reset(dc, qxl_reset_handler);
dc->vmsd = &qxl_vmstate;
device_class_set_props(dc, qxl_properties);
}
@@ -2497,13 +2517,13 @@ static const TypeInfo qxl_pci_type_info = {
.instance_size = sizeof(PCIQXLDevice),
.abstract = true,
.class_init = qxl_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-static void qxl_primary_class_init(ObjectClass *klass, void *data)
+static void qxl_primary_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -2523,7 +2543,7 @@ static const TypeInfo qxl_primary_info = {
module_obj("qxl-vga");
module_kconfig(QXL);
-static void qxl_secondary_class_init(ObjectClass *klass, void *data)
+static void qxl_secondary_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
diff --git a/hw/display/ramfb-standalone.c b/hw/display/ramfb-standalone.c
index 20eab34..08f2d5d 100644
--- a/hw/display/ramfb-standalone.c
+++ b/hw/display/ramfb-standalone.c
@@ -60,12 +60,11 @@ static const VMStateDescription ramfb_dev_vmstate = {
}
};
-static Property ramfb_properties[] = {
+static const Property ramfb_properties[] = {
DEFINE_PROP_BOOL("x-migrate", RAMFBStandaloneState, migrate, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ramfb_class_initfn(ObjectClass *klass, void *data)
+static void ramfb_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -73,13 +72,12 @@ static void ramfb_class_initfn(ObjectClass *klass, void *data)
dc->vmsd = &ramfb_dev_vmstate;
dc->realize = ramfb_realizefn;
dc->desc = "ram framebuffer standalone device";
- dc->user_creatable = true;
device_class_set_props(dc, ramfb_properties);
}
static const TypeInfo ramfb_info = {
.name = TYPE_RAMFB_DEVICE,
- .parent = TYPE_SYS_BUS_DEVICE,
+ .parent = TYPE_DYNAMIC_SYS_BUS_DEVICE,
.instance_size = sizeof(RAMFBStandaloneState),
.class_init = ramfb_class_initfn,
};
diff --git a/hw/display/ramfb.c b/hw/display/ramfb.c
index 6086baf..8c0f907 100644
--- a/hw/display/ramfb.c
+++ b/hw/display/ramfb.c
@@ -17,7 +17,7 @@
#include "hw/display/ramfb.h"
#include "hw/display/bochs-vbe.h" /* for limits */
#include "ui/console.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
struct QEMU_PACKED RAMFBCfg {
uint64_t addr;
diff --git a/hw/display/sii9022.c b/hw/display/sii9022.c
index 60c3f78..d00d3e9 100644
--- a/hw/display/sii9022.c
+++ b/hw/display/sii9022.c
@@ -167,7 +167,7 @@ static void sii9022_realize(DeviceState *dev, Error **errp)
i2c_slave_create_simple(bus, TYPE_I2CDDC, 0x50);
}
-static void sii9022_class_init(ObjectClass *klass, void *data)
+static void sii9022_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
@@ -175,7 +175,7 @@ static void sii9022_class_init(ObjectClass *klass, void *data)
k->event = sii9022_event;
k->recv = sii9022_rx;
k->send = sii9022_tx;
- dc->reset = sii9022_reset;
+ device_class_set_legacy_reset(dc, sii9022_reset);
dc->realize = sii9022_realize;
dc->vmsd = &vmstate_sii9022;
}
diff --git a/hw/display/sm501.c b/hw/display/sm501.c
index 26dc817..6d2f186 100644
--- a/hw/display/sm501.c
+++ b/hw/display/sm501.c
@@ -29,7 +29,7 @@
#include "qemu/log.h"
#include "qemu/module.h"
#include "hw/usb/hcd-ohci.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "ui/console.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
@@ -2054,11 +2054,10 @@ static void sm501_realize_sysbus(DeviceState *dev, Error **errp)
/* TODO : chain irq to IRL */
}
-static Property sm501_sysbus_properties[] = {
+static const Property sm501_sysbus_properties[] = {
DEFINE_PROP_UINT32("vram-size", SM501SysBusState, vram_size, 0),
/* this a debug option, prefer PROP_UINT over PROP_BIT for simplicity */
DEFINE_PROP_UINT8("x-pixman", SM501SysBusState, state.use_pixman, DEFAULT_X_PIXMAN),
- DEFINE_PROP_END_OF_LIST(),
};
static void sm501_reset_sysbus(DeviceState *dev)
@@ -2078,7 +2077,7 @@ static const VMStateDescription vmstate_sm501_sysbus = {
}
};
-static void sm501_sysbus_class_init(ObjectClass *klass, void *data)
+static void sm501_sysbus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -2086,7 +2085,7 @@ static void sm501_sysbus_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
dc->desc = "SM501 Multimedia Companion";
device_class_set_props(dc, sm501_sysbus_properties);
- dc->reset = sm501_reset_sysbus;
+ device_class_set_legacy_reset(dc, sm501_reset_sysbus);
dc->vmsd = &vmstate_sm501_sysbus;
}
@@ -2143,10 +2142,9 @@ static void sm501_realize_pci(PCIDevice *dev, Error **errp)
&s->state.mmio_region);
}
-static Property sm501_pci_properties[] = {
+static const Property sm501_pci_properties[] = {
DEFINE_PROP_UINT32("vram-size", SM501PCIState, vram_size, 64 * MiB),
DEFINE_PROP_UINT8("x-pixman", SM501PCIState, state.use_pixman, DEFAULT_X_PIXMAN),
- DEFINE_PROP_END_OF_LIST(),
};
static void sm501_reset_pci(DeviceState *dev)
@@ -2169,7 +2167,7 @@ static const VMStateDescription vmstate_sm501_pci = {
}
};
-static void sm501_pci_class_init(ObjectClass *klass, void *data)
+static void sm501_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -2181,7 +2179,7 @@ static void sm501_pci_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
dc->desc = "SM501 Display Controller";
device_class_set_props(dc, sm501_pci_properties);
- dc->reset = sm501_reset_pci;
+ device_class_set_legacy_reset(dc, sm501_reset_pci);
dc->hotpluggable = false;
dc->vmsd = &vmstate_sm501_pci;
}
@@ -2198,7 +2196,7 @@ static const TypeInfo sm501_pci_info = {
.instance_size = sizeof(SM501PCIState),
.class_init = sm501_pci_class_init,
.instance_init = sm501_pci_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/display/ssd0303.c b/hw/display/ssd0303.c
index e292cff..8778143 100644
--- a/hw/display/ssd0303.c
+++ b/hw/display/ssd0303.c
@@ -311,7 +311,7 @@ static void ssd0303_realize(DeviceState *dev, Error **errp)
qemu_console_resize(s->con, 96 * MAGNIFY, 16 * MAGNIFY);
}
-static void ssd0303_class_init(ObjectClass *klass, void *data)
+static void ssd0303_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
diff --git a/hw/display/ssd0323.c b/hw/display/ssd0323.c
index 96cf0dc..af5ff4f 100644
--- a/hw/display/ssd0323.c
+++ b/hw/display/ssd0323.c
@@ -361,7 +361,7 @@ static void ssd0323_realize(SSIPeripheral *d, Error **errp)
qdev_init_gpio_in(dev, ssd0323_cd, 1);
}
-static void ssd0323_class_init(ObjectClass *klass, void *data)
+static void ssd0323_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass);
diff --git a/hw/display/tc6393xb.c b/hw/display/tc6393xb.c
deleted file mode 100644
index c7beba4..0000000
--- a/hw/display/tc6393xb.c
+++ /dev/null
@@ -1,568 +0,0 @@
-/*
- * Toshiba TC6393XB I/O Controller.
- * Found in Sharp Zaurus SL-6000 (tosa) or some
- * Toshiba e-Series PDAs.
- *
- * Most features are currently unsupported!!!
- *
- * This code is licensed under the GNU GPL v2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "qemu/host-utils.h"
-#include "hw/irq.h"
-#include "hw/display/tc6393xb.h"
-#include "exec/memory.h"
-#include "hw/block/flash.h"
-#include "ui/console.h"
-#include "ui/pixel_ops.h"
-#include "sysemu/blockdev.h"
-
-#define IRQ_TC6393_NAND 0
-#define IRQ_TC6393_MMC 1
-#define IRQ_TC6393_OHCI 2
-#define IRQ_TC6393_SERIAL 3
-#define IRQ_TC6393_FB 4
-
-#define TC6393XB_NR_IRQS 8
-
-#define TC6393XB_GPIOS 16
-
-#define SCR_REVID 0x08 /* b Revision ID */
-#define SCR_ISR 0x50 /* b Interrupt Status */
-#define SCR_IMR 0x52 /* b Interrupt Mask */
-#define SCR_IRR 0x54 /* b Interrupt Routing */
-#define SCR_GPER 0x60 /* w GP Enable */
-#define SCR_GPI_SR(i) (0x64 + (i)) /* b3 GPI Status */
-#define SCR_GPI_IMR(i) (0x68 + (i)) /* b3 GPI INT Mask */
-#define SCR_GPI_EDER(i) (0x6c + (i)) /* b3 GPI Edge Detect Enable */
-#define SCR_GPI_LIR(i) (0x70 + (i)) /* b3 GPI Level Invert */
-#define SCR_GPO_DSR(i) (0x78 + (i)) /* b3 GPO Data Set */
-#define SCR_GPO_DOECR(i) (0x7c + (i)) /* b3 GPO Data OE Control */
-#define SCR_GP_IARCR(i) (0x80 + (i)) /* b3 GP Internal Active Register Control */
-#define SCR_GP_IARLCR(i) (0x84 + (i)) /* b3 GP INTERNAL Active Register Level Control */
-#define SCR_GPI_BCR(i) (0x88 + (i)) /* b3 GPI Buffer Control */
-#define SCR_GPA_IARCR 0x8c /* w GPa Internal Active Register Control */
-#define SCR_GPA_IARLCR 0x90 /* w GPa Internal Active Register Level Control */
-#define SCR_GPA_BCR 0x94 /* w GPa Buffer Control */
-#define SCR_CCR 0x98 /* w Clock Control */
-#define SCR_PLL2CR 0x9a /* w PLL2 Control */
-#define SCR_PLL1CR 0x9c /* l PLL1 Control */
-#define SCR_DIARCR 0xa0 /* b Device Internal Active Register Control */
-#define SCR_DBOCR 0xa1 /* b Device Buffer Off Control */
-#define SCR_FER 0xe0 /* b Function Enable */
-#define SCR_MCR 0xe4 /* w Mode Control */
-#define SCR_CONFIG 0xfc /* b Configuration Control */
-#define SCR_DEBUG 0xff /* b Debug */
-
-#define NAND_CFG_COMMAND 0x04 /* w Command */
-#define NAND_CFG_BASE 0x10 /* l Control Base Address */
-#define NAND_CFG_INTP 0x3d /* b Interrupt Pin */
-#define NAND_CFG_INTE 0x48 /* b Int Enable */
-#define NAND_CFG_EC 0x4a /* b Event Control */
-#define NAND_CFG_ICC 0x4c /* b Internal Clock Control */
-#define NAND_CFG_ECCC 0x5b /* b ECC Control */
-#define NAND_CFG_NFTC 0x60 /* b NAND Flash Transaction Control */
-#define NAND_CFG_NFM 0x61 /* b NAND Flash Monitor */
-#define NAND_CFG_NFPSC 0x62 /* b NAND Flash Power Supply Control */
-#define NAND_CFG_NFDC 0x63 /* b NAND Flash Detect Control */
-
-#define NAND_DATA 0x00 /* l Data */
-#define NAND_MODE 0x04 /* b Mode */
-#define NAND_STATUS 0x05 /* b Status */
-#define NAND_ISR 0x06 /* b Interrupt Status */
-#define NAND_IMR 0x07 /* b Interrupt Mask */
-
-#define NAND_MODE_WP 0x80
-#define NAND_MODE_CE 0x10
-#define NAND_MODE_ALE 0x02
-#define NAND_MODE_CLE 0x01
-#define NAND_MODE_ECC_MASK 0x60
-#define NAND_MODE_ECC_EN 0x20
-#define NAND_MODE_ECC_READ 0x40
-#define NAND_MODE_ECC_RST 0x60
-
-struct TC6393xbState {
- MemoryRegion iomem;
- qemu_irq irq;
- qemu_irq *sub_irqs;
- struct {
- uint8_t ISR;
- uint8_t IMR;
- uint8_t IRR;
- uint16_t GPER;
- uint8_t GPI_SR[3];
- uint8_t GPI_IMR[3];
- uint8_t GPI_EDER[3];
- uint8_t GPI_LIR[3];
- uint8_t GP_IARCR[3];
- uint8_t GP_IARLCR[3];
- uint8_t GPI_BCR[3];
- uint16_t GPA_IARCR;
- uint16_t GPA_IARLCR;
- uint16_t CCR;
- uint16_t PLL2CR;
- uint32_t PLL1CR;
- uint8_t DIARCR;
- uint8_t DBOCR;
- uint8_t FER;
- uint16_t MCR;
- uint8_t CONFIG;
- uint8_t DEBUG;
- } scr;
- uint32_t gpio_dir;
- uint32_t gpio_level;
- uint32_t prev_level;
- qemu_irq handler[TC6393XB_GPIOS];
- qemu_irq *gpio_in;
-
- struct {
- uint8_t mode;
- uint8_t isr;
- uint8_t imr;
- } nand;
- int nand_enable;
- uint32_t nand_phys;
- DeviceState *flash;
- ECCState ecc;
-
- QemuConsole *con;
- MemoryRegion vram;
- uint16_t *vram_ptr;
- uint32_t scr_width, scr_height; /* in pixels */
- qemu_irq l3v;
- unsigned blank : 1,
- blanked : 1;
-};
-
-static void tc6393xb_gpio_set(void *opaque, int line, int level)
-{
-// TC6393xbState *s = opaque;
-
- if (line > TC6393XB_GPIOS) {
- printf("%s: No GPIO pin %i\n", __func__, line);
- return;
- }
-
- // FIXME: how does the chip reflect the GPIO input level change?
-}
-
-static void tc6393xb_gpio_handler_update(TC6393xbState *s)
-{
- uint32_t level, diff;
- int bit;
-
- level = s->gpio_level & s->gpio_dir;
- level &= MAKE_64BIT_MASK(0, TC6393XB_GPIOS);
-
- for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) {
- bit = ctz32(diff);
- qemu_set_irq(s->handler[bit], (level >> bit) & 1);
- }
-
- s->prev_level = level;
-}
-
-qemu_irq tc6393xb_l3v_get(TC6393xbState *s)
-{
- return s->l3v;
-}
-
-static void tc6393xb_l3v(void *opaque, int line, int level)
-{
- TC6393xbState *s = opaque;
- s->blank = !level;
- fprintf(stderr, "L3V: %d\n", level);
-}
-
-static void tc6393xb_sub_irq(void *opaque, int line, int level) {
- TC6393xbState *s = opaque;
- uint8_t isr = s->scr.ISR;
- if (level)
- isr |= 1 << line;
- else
- isr &= ~(1 << line);
- s->scr.ISR = isr;
- qemu_set_irq(s->irq, isr & s->scr.IMR);
-}
-
-#define SCR_REG_B(N) \
- case SCR_ ##N: return s->scr.N
-#define SCR_REG_W(N) \
- case SCR_ ##N: return s->scr.N; \
- case SCR_ ##N + 1: return s->scr.N >> 8;
-#define SCR_REG_L(N) \
- case SCR_ ##N: return s->scr.N; \
- case SCR_ ##N + 1: return s->scr.N >> 8; \
- case SCR_ ##N + 2: return s->scr.N >> 16; \
- case SCR_ ##N + 3: return s->scr.N >> 24;
-#define SCR_REG_A(N) \
- case SCR_ ##N(0): return s->scr.N[0]; \
- case SCR_ ##N(1): return s->scr.N[1]; \
- case SCR_ ##N(2): return s->scr.N[2]
-
-static uint32_t tc6393xb_scr_readb(TC6393xbState *s, hwaddr addr)
-{
- switch (addr) {
- case SCR_REVID:
- return 3;
- case SCR_REVID+1:
- return 0;
- SCR_REG_B(ISR);
- SCR_REG_B(IMR);
- SCR_REG_B(IRR);
- SCR_REG_W(GPER);
- SCR_REG_A(GPI_SR);
- SCR_REG_A(GPI_IMR);
- SCR_REG_A(GPI_EDER);
- SCR_REG_A(GPI_LIR);
- case SCR_GPO_DSR(0):
- case SCR_GPO_DSR(1):
- case SCR_GPO_DSR(2):
- return (s->gpio_level >> ((addr - SCR_GPO_DSR(0)) * 8)) & 0xff;
- case SCR_GPO_DOECR(0):
- case SCR_GPO_DOECR(1):
- case SCR_GPO_DOECR(2):
- return (s->gpio_dir >> ((addr - SCR_GPO_DOECR(0)) * 8)) & 0xff;
- SCR_REG_A(GP_IARCR);
- SCR_REG_A(GP_IARLCR);
- SCR_REG_A(GPI_BCR);
- SCR_REG_W(GPA_IARCR);
- SCR_REG_W(GPA_IARLCR);
- SCR_REG_W(CCR);
- SCR_REG_W(PLL2CR);
- SCR_REG_L(PLL1CR);
- SCR_REG_B(DIARCR);
- SCR_REG_B(DBOCR);
- SCR_REG_B(FER);
- SCR_REG_W(MCR);
- SCR_REG_B(CONFIG);
- SCR_REG_B(DEBUG);
- }
- fprintf(stderr, "tc6393xb_scr: unhandled read at %08x\n", (uint32_t) addr);
- return 0;
-}
-#undef SCR_REG_B
-#undef SCR_REG_W
-#undef SCR_REG_L
-#undef SCR_REG_A
-
-#define SCR_REG_B(N) \
- case SCR_ ##N: s->scr.N = value; return;
-#define SCR_REG_W(N) \
- case SCR_ ##N: s->scr.N = (s->scr.N & ~0xff) | (value & 0xff); return; \
- case SCR_ ##N + 1: s->scr.N = (s->scr.N & 0xff) | (value << 8); return
-#define SCR_REG_L(N) \
- case SCR_ ##N: s->scr.N = (s->scr.N & ~0xff) | (value & 0xff); return; \
- case SCR_ ##N + 1: s->scr.N = (s->scr.N & ~(0xff << 8)) | (value & (0xff << 8)); return; \
- case SCR_ ##N + 2: s->scr.N = (s->scr.N & ~(0xff << 16)) | (value & (0xff << 16)); return; \
- case SCR_ ##N + 3: s->scr.N = (s->scr.N & ~(0xff << 24)) | (value & (0xff << 24)); return;
-#define SCR_REG_A(N) \
- case SCR_ ##N(0): s->scr.N[0] = value; return; \
- case SCR_ ##N(1): s->scr.N[1] = value; return; \
- case SCR_ ##N(2): s->scr.N[2] = value; return
-
-static void tc6393xb_scr_writeb(TC6393xbState *s, hwaddr addr, uint32_t value)
-{
- switch (addr) {
- SCR_REG_B(ISR);
- SCR_REG_B(IMR);
- SCR_REG_B(IRR);
- SCR_REG_W(GPER);
- SCR_REG_A(GPI_SR);
- SCR_REG_A(GPI_IMR);
- SCR_REG_A(GPI_EDER);
- SCR_REG_A(GPI_LIR);
- case SCR_GPO_DSR(0):
- case SCR_GPO_DSR(1):
- case SCR_GPO_DSR(2):
- s->gpio_level = (s->gpio_level & ~(0xff << ((addr - SCR_GPO_DSR(0))*8))) | ((value & 0xff) << ((addr - SCR_GPO_DSR(0))*8));
- tc6393xb_gpio_handler_update(s);
- return;
- case SCR_GPO_DOECR(0):
- case SCR_GPO_DOECR(1):
- case SCR_GPO_DOECR(2):
- s->gpio_dir = (s->gpio_dir & ~(0xff << ((addr - SCR_GPO_DOECR(0))*8))) | ((value & 0xff) << ((addr - SCR_GPO_DOECR(0))*8));
- tc6393xb_gpio_handler_update(s);
- return;
- SCR_REG_A(GP_IARCR);
- SCR_REG_A(GP_IARLCR);
- SCR_REG_A(GPI_BCR);
- SCR_REG_W(GPA_IARCR);
- SCR_REG_W(GPA_IARLCR);
- SCR_REG_W(CCR);
- SCR_REG_W(PLL2CR);
- SCR_REG_L(PLL1CR);
- SCR_REG_B(DIARCR);
- SCR_REG_B(DBOCR);
- SCR_REG_B(FER);
- SCR_REG_W(MCR);
- SCR_REG_B(CONFIG);
- SCR_REG_B(DEBUG);
- }
- fprintf(stderr, "tc6393xb_scr: unhandled write at %08x: %02x\n",
- (uint32_t) addr, value & 0xff);
-}
-#undef SCR_REG_B
-#undef SCR_REG_W
-#undef SCR_REG_L
-#undef SCR_REG_A
-
-static void tc6393xb_nand_irq(TC6393xbState *s) {
- qemu_set_irq(s->sub_irqs[IRQ_TC6393_NAND],
- (s->nand.imr & 0x80) && (s->nand.imr & s->nand.isr));
-}
-
-static uint32_t tc6393xb_nand_cfg_readb(TC6393xbState *s, hwaddr addr) {
- switch (addr) {
- case NAND_CFG_COMMAND:
- return s->nand_enable ? 2 : 0;
- case NAND_CFG_BASE:
- case NAND_CFG_BASE + 1:
- case NAND_CFG_BASE + 2:
- case NAND_CFG_BASE + 3:
- return s->nand_phys >> (addr - NAND_CFG_BASE);
- }
- fprintf(stderr, "tc6393xb_nand_cfg: unhandled read at %08x\n", (uint32_t) addr);
- return 0;
-}
-static void tc6393xb_nand_cfg_writeb(TC6393xbState *s, hwaddr addr, uint32_t value) {
- switch (addr) {
- case NAND_CFG_COMMAND:
- s->nand_enable = (value & 0x2);
- return;
- case NAND_CFG_BASE:
- case NAND_CFG_BASE + 1:
- case NAND_CFG_BASE + 2:
- case NAND_CFG_BASE + 3:
- s->nand_phys &= ~(0xff << ((addr - NAND_CFG_BASE) * 8));
- s->nand_phys |= (value & 0xff) << ((addr - NAND_CFG_BASE) * 8);
- return;
- }
- fprintf(stderr, "tc6393xb_nand_cfg: unhandled write at %08x: %02x\n",
- (uint32_t) addr, value & 0xff);
-}
-
-static uint32_t tc6393xb_nand_readb(TC6393xbState *s, hwaddr addr) {
- switch (addr) {
- case NAND_DATA + 0:
- case NAND_DATA + 1:
- case NAND_DATA + 2:
- case NAND_DATA + 3:
- return nand_getio(s->flash);
- case NAND_MODE:
- return s->nand.mode;
- case NAND_STATUS:
- return 0x14;
- case NAND_ISR:
- return s->nand.isr;
- case NAND_IMR:
- return s->nand.imr;
- }
- fprintf(stderr, "tc6393xb_nand: unhandled read at %08x\n", (uint32_t) addr);
- return 0;
-}
-static void tc6393xb_nand_writeb(TC6393xbState *s, hwaddr addr, uint32_t value) {
-// fprintf(stderr, "tc6393xb_nand: write at %08x: %02x\n",
-// (uint32_t) addr, value & 0xff);
- switch (addr) {
- case NAND_DATA + 0:
- case NAND_DATA + 1:
- case NAND_DATA + 2:
- case NAND_DATA + 3:
- nand_setio(s->flash, value);
- s->nand.isr |= 1;
- tc6393xb_nand_irq(s);
- return;
- case NAND_MODE:
- s->nand.mode = value;
- nand_setpins(s->flash,
- value & NAND_MODE_CLE,
- value & NAND_MODE_ALE,
- !(value & NAND_MODE_CE),
- value & NAND_MODE_WP,
- 0); // FIXME: gnd
- switch (value & NAND_MODE_ECC_MASK) {
- case NAND_MODE_ECC_RST:
- ecc_reset(&s->ecc);
- break;
- case NAND_MODE_ECC_READ:
- // FIXME
- break;
- case NAND_MODE_ECC_EN:
- ecc_reset(&s->ecc);
- }
- return;
- case NAND_ISR:
- s->nand.isr = value;
- tc6393xb_nand_irq(s);
- return;
- case NAND_IMR:
- s->nand.imr = value;
- tc6393xb_nand_irq(s);
- return;
- }
- fprintf(stderr, "tc6393xb_nand: unhandled write at %08x: %02x\n",
- (uint32_t) addr, value & 0xff);
-}
-
-static void tc6393xb_draw_graphic(TC6393xbState *s, int full_update)
-{
- DisplaySurface *surface = qemu_console_surface(s->con);
- int i;
- uint16_t *data_buffer;
- uint8_t *data_display;
-
- data_buffer = s->vram_ptr;
- data_display = surface_data(surface);
- for (i = 0; i < s->scr_height; i++) {
- int j;
- for (j = 0; j < s->scr_width; j++, data_display += 4, data_buffer++) {
- uint16_t color = *data_buffer;
- uint32_t dest_color = rgb_to_pixel32(
- ((color & 0xf800) * 0x108) >> 11,
- ((color & 0x7e0) * 0x41) >> 9,
- ((color & 0x1f) * 0x21) >> 2
- );
- *(uint32_t *)data_display = dest_color;
- }
- }
- dpy_gfx_update_full(s->con);
-}
-
-static void tc6393xb_draw_blank(TC6393xbState *s, int full_update)
-{
- DisplaySurface *surface = qemu_console_surface(s->con);
- int i, w;
- uint8_t *d;
-
- if (!full_update)
- return;
-
- w = s->scr_width * surface_bytes_per_pixel(surface);
- d = surface_data(surface);
- for(i = 0; i < s->scr_height; i++) {
- memset(d, 0, w);
- d += surface_stride(surface);
- }
-
- dpy_gfx_update_full(s->con);
-}
-
-static void tc6393xb_update_display(void *opaque)
-{
- TC6393xbState *s = opaque;
- DisplaySurface *surface = qemu_console_surface(s->con);
- int full_update;
-
- if (s->scr_width == 0 || s->scr_height == 0)
- return;
-
- full_update = 0;
- if (s->blanked != s->blank) {
- s->blanked = s->blank;
- full_update = 1;
- }
- if (s->scr_width != surface_width(surface) ||
- s->scr_height != surface_height(surface)) {
- qemu_console_resize(s->con, s->scr_width, s->scr_height);
- full_update = 1;
- }
- if (s->blanked)
- tc6393xb_draw_blank(s, full_update);
- else
- tc6393xb_draw_graphic(s, full_update);
-}
-
-
-static uint64_t tc6393xb_readb(void *opaque, hwaddr addr,
- unsigned size)
-{
- TC6393xbState *s = opaque;
-
- switch (addr >> 8) {
- case 0:
- return tc6393xb_scr_readb(s, addr & 0xff);
- case 1:
- return tc6393xb_nand_cfg_readb(s, addr & 0xff);
- };
-
- if ((addr &~0xff) == s->nand_phys && s->nand_enable) {
-// return tc6393xb_nand_readb(s, addr & 0xff);
- uint8_t d = tc6393xb_nand_readb(s, addr & 0xff);
-// fprintf(stderr, "tc6393xb_nand: read at %08x: %02hhx\n", (uint32_t) addr, d);
- return d;
- }
-
-// fprintf(stderr, "tc6393xb: unhandled read at %08x\n", (uint32_t) addr);
- return 0;
-}
-
-static void tc6393xb_writeb(void *opaque, hwaddr addr,
- uint64_t value, unsigned size) {
- TC6393xbState *s = opaque;
-
- switch (addr >> 8) {
- case 0:
- tc6393xb_scr_writeb(s, addr & 0xff, value);
- return;
- case 1:
- tc6393xb_nand_cfg_writeb(s, addr & 0xff, value);
- return;
- };
-
- if ((addr &~0xff) == s->nand_phys && s->nand_enable)
- tc6393xb_nand_writeb(s, addr & 0xff, value);
- else
- fprintf(stderr, "tc6393xb: unhandled write at %08x: %02x\n",
- (uint32_t) addr, (int)value & 0xff);
-}
-
-static const GraphicHwOps tc6393xb_gfx_ops = {
- .gfx_update = tc6393xb_update_display,
-};
-
-TC6393xbState *tc6393xb_init(MemoryRegion *sysmem, uint32_t base, qemu_irq irq)
-{
- TC6393xbState *s;
- DriveInfo *nand;
- static const MemoryRegionOps tc6393xb_ops = {
- .read = tc6393xb_readb,
- .write = tc6393xb_writeb,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .impl = {
- .min_access_size = 1,
- .max_access_size = 1,
- },
- };
-
- s = g_new0(TC6393xbState, 1);
- s->irq = irq;
- s->gpio_in = qemu_allocate_irqs(tc6393xb_gpio_set, s, TC6393XB_GPIOS);
-
- s->l3v = qemu_allocate_irq(tc6393xb_l3v, s, 0);
- s->blanked = 1;
-
- s->sub_irqs = qemu_allocate_irqs(tc6393xb_sub_irq, s, TC6393XB_NR_IRQS);
-
- nand = drive_get(IF_MTD, 0, 0);
- s->flash = nand_init(nand ? blk_by_legacy_dinfo(nand) : NULL,
- NAND_MFR_TOSHIBA, 0x76);
-
- memory_region_init_io(&s->iomem, NULL, &tc6393xb_ops, s, "tc6393xb", 0x10000);
- memory_region_add_subregion(sysmem, base, &s->iomem);
-
- memory_region_init_ram(&s->vram, NULL, "tc6393xb.vram", 0x100000,
- &error_fatal);
- s->vram_ptr = memory_region_get_ram_ptr(&s->vram);
- memory_region_add_subregion(sysmem, base + 0x100000, &s->vram);
- s->scr_width = 480;
- s->scr_height = 640;
- s->con = graphic_console_init(NULL, 0, &tc6393xb_gfx_ops, s);
-
- return s;
-}
diff --git a/hw/display/tcx.c b/hw/display/tcx.c
index 99507e7..4853c5e 100644
--- a/hw/display/tcx.c
+++ b/hw/display/tcx.c
@@ -729,7 +729,6 @@ static uint64_t tcx_dummy_readl(void *opaque, hwaddr addr,
static void tcx_dummy_writel(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
- return;
}
static const MemoryRegionOps tcx_dummy_ops = {
@@ -879,20 +878,19 @@ static void tcx_realizefn(DeviceState *dev, Error **errp)
qemu_console_resize(s->con, s->width, s->height);
}
-static Property tcx_properties[] = {
+static const Property tcx_properties[] = {
DEFINE_PROP_UINT32("vram_size", TCXState, vram_size, -1),
DEFINE_PROP_UINT16("width", TCXState, width, -1),
DEFINE_PROP_UINT16("height", TCXState, height, -1),
DEFINE_PROP_UINT16("depth", TCXState, depth, -1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void tcx_class_init(ObjectClass *klass, void *data)
+static void tcx_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = tcx_realizefn;
- dc->reset = tcx_reset;
+ device_class_set_legacy_reset(dc, tcx_reset);
dc->vmsd = &vmstate_tcx;
device_class_set_props(dc, tcx_properties);
}
diff --git a/hw/display/trace-events b/hw/display/trace-events
index 781f8a3..52786e6 100644
--- a/hw/display/trace-events
+++ b/hw/display/trace-events
@@ -53,6 +53,9 @@ virtio_gpu_cmd_ctx_submit(uint32_t ctx, uint32_t size) "ctx 0x%x, size %d"
virtio_gpu_update_cursor(uint32_t scanout, uint32_t x, uint32_t y, const char *type, uint32_t res) "scanout %d, x %d, y %d, %s, res 0x%x"
virtio_gpu_fence_ctrl(uint64_t fence, uint32_t type) "fence 0x%" PRIx64 ", type 0x%x"
virtio_gpu_fence_resp(uint64_t fence) "fence 0x%" PRIx64
+virtio_gpu_inc_inflight_fences(uint32_t inflight) "in-flight+ %u"
+virtio_gpu_dec_inflight_fences(uint32_t inflight) "in-flight- %u"
+virtio_gpu_cmd_suspended(uint32_t cmd) "cmd 0x%x"
# qxl.c
disable qxl_io_write_vga(int qid, const char *mode, uint32_t addr, uint32_t val) "%d %s addr=%u val=%u"
@@ -191,3 +194,33 @@ dm163_bits_ppi(unsigned dest_width) "dest_width : %u"
dm163_leds(int led, uint32_t value) "led %d: 0x%x"
dm163_channels(int channel, uint8_t value) "channel %d: 0x%x"
dm163_refresh_rate(uint32_t rr) "refresh rate %d"
+
+# apple-gfx.m
+apple_gfx_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64
+apple_gfx_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64
+apple_gfx_create_task(uint32_t vm_size, void *va) "vm_size=0x%x base_addr=%p"
+apple_gfx_destroy_task(void *task, unsigned int num_mapped_regions) "task=%p, task->mapped_regions->len=%u"
+apple_gfx_map_memory(void *task, uint32_t range_count, uint64_t virtual_offset, uint32_t read_only) "task=%p range_count=0x%x virtual_offset=0x%"PRIx64" read_only=%d"
+apple_gfx_map_memory_range(uint32_t i, uint64_t phys_addr, uint64_t phys_len) "[%d] phys_addr=0x%"PRIx64" phys_len=0x%"PRIx64
+apple_gfx_remap(uint64_t retval, void *source_ptr, uint64_t target) "retval=%"PRId64" source=%p target=0x%"PRIx64
+apple_gfx_unmap_memory(void *task, uint64_t virtual_offset, uint64_t length) "task=%p virtual_offset=0x%"PRIx64" length=0x%"PRIx64
+apple_gfx_read_memory(uint64_t phys_address, uint64_t length, void *dst) "phys_addr=0x%"PRIx64" length=0x%"PRIx64" dest=%p"
+apple_gfx_raise_irq(uint32_t vector) "vector=0x%x"
+apple_gfx_new_frame(void) ""
+apple_gfx_mode_change(uint64_t x, uint64_t y) "x=%"PRId64" y=%"PRId64
+apple_gfx_cursor_set(uint32_t bpp, uint64_t width, uint64_t height) "bpp=%d width=%"PRId64" height=0x%"PRId64
+apple_gfx_cursor_show(uint32_t show) "show=%d"
+apple_gfx_cursor_move(void) ""
+apple_gfx_common_init(const char *device_name, size_t mmio_size) "device: %s; MMIO size: %zu bytes"
+apple_gfx_common_realize_modes_property(uint32_t num_modes) "using %u modes supplied by 'display-modes' device property"
+apple_gfx_display_mode(uint32_t mode_idx, uint16_t width_px, uint16_t height_px) "mode %2"PRIu32": %4"PRIu16"x%4"PRIu16
+
+# apple-gfx-mmio.m
+apple_gfx_mmio_iosfc_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64
+apple_gfx_mmio_iosfc_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64
+apple_gfx_iosfc_map_memory(uint64_t phys, uint64_t len, uint32_t ro, void *va, void *e, void *f, void* va_result) "phys=0x%"PRIx64" len=0x%"PRIx64" ro=%d va=%p e=%p f=%p -> *va=%p"
+apple_gfx_iosfc_map_memory_new_region(size_t i, void *region, uint64_t start, uint64_t end) "index=%zu, region=%p, 0x%"PRIx64"-0x%"PRIx64
+apple_gfx_iosfc_unmap_memory(void *a, void *b, void *c, void *d, void *e, void *f) "a=%p b=%p c=%p d=%p e=%p f=%p"
+apple_gfx_iosfc_unmap_memory_region(void* mem, void *region) "unmapping @ %p from memory region %p"
+apple_gfx_iosfc_raise_irq(uint32_t vector) "vector=0x%x"
+
diff --git a/hw/display/vga-isa.c b/hw/display/vga-isa.c
index c096ec9..3618913 100644
--- a/hw/display/vga-isa.c
+++ b/hw/display/vga-isa.c
@@ -88,17 +88,16 @@ static void vga_isa_realizefn(DeviceState *dev, Error **errp)
rom_add_vga(VGABIOS_FILENAME);
}
-static Property vga_isa_properties[] = {
+static const Property vga_isa_properties[] = {
DEFINE_PROP_UINT32("vgamem_mb", ISAVGAState, state.vram_size_mb, 8),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vga_isa_class_initfn(ObjectClass *klass, void *data)
+static void vga_isa_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = vga_isa_realizefn;
- dc->reset = vga_isa_reset;
+ device_class_set_legacy_reset(dc, vga_isa_reset);
dc->vmsd = &vmstate_vga_common;
device_class_set_props(dc, vga_isa_properties);
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
diff --git a/hw/display/vga-mmio.c b/hw/display/vga-mmio.c
index cd2c467..3326385 100644
--- a/hw/display/vga-mmio.c
+++ b/hw/display/vga-mmio.c
@@ -111,18 +111,17 @@ static void vga_mmio_realizefn(DeviceState *dev, Error **errp)
s->vga.con = graphic_console_init(dev, 0, s->vga.hw_ops, &s->vga);
}
-static Property vga_mmio_properties[] = {
+static const Property vga_mmio_properties[] = {
DEFINE_PROP_UINT8("it_shift", VGAMmioState, it_shift, 0),
DEFINE_PROP_UINT32("vgamem_mb", VGAMmioState, vga.vram_size_mb, 8),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vga_mmio_class_initfn(ObjectClass *klass, void *data)
+static void vga_mmio_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = vga_mmio_realizefn;
- dc->reset = vga_mmio_reset;
+ device_class_set_legacy_reset(dc, vga_mmio_reset);
dc->vmsd = &vmstate_vga_common;
device_class_set_props(dc, vga_mmio_properties);
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
diff --git a/hw/display/vga-pci.c b/hw/display/vga-pci.c
index 2d8adce..b81f7fd 100644
--- a/hw/display/vga-pci.c
+++ b/hw/display/vga-pci.c
@@ -330,7 +330,7 @@ static void pci_secondary_vga_reset(DeviceState *dev)
vga_common_reset(&d->vga);
}
-static Property vga_pci_properties[] = {
+static const Property vga_pci_properties[] = {
DEFINE_PROP_UINT32("vgamem_mb", PCIVGAState, vga.vram_size_mb, 16),
DEFINE_PROP_BIT("mmio", PCIVGAState, flags, PCI_VGA_FLAG_ENABLE_MMIO, true),
DEFINE_PROP_BIT("qemu-extended-regs",
@@ -339,20 +339,18 @@ static Property vga_pci_properties[] = {
PCIVGAState, flags, PCI_VGA_FLAG_ENABLE_EDID, true),
DEFINE_EDID_PROPERTIES(PCIVGAState, edid_info),
DEFINE_PROP_BOOL("global-vmstate", PCIVGAState, vga.global_vmstate, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static Property secondary_pci_properties[] = {
+static const Property secondary_pci_properties[] = {
DEFINE_PROP_UINT32("vgamem_mb", PCIVGAState, vga.vram_size_mb, 16),
DEFINE_PROP_BIT("qemu-extended-regs",
PCIVGAState, flags, PCI_VGA_FLAG_ENABLE_QEXT, true),
DEFINE_PROP_BIT("edid",
PCIVGAState, flags, PCI_VGA_FLAG_ENABLE_EDID, true),
DEFINE_EDID_PROPERTIES(PCIVGAState, edid_info),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vga_pci_class_init(ObjectClass *klass, void *data)
+static void vga_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -371,14 +369,14 @@ static const TypeInfo vga_pci_type_info = {
.instance_size = sizeof(PCIVGAState),
.abstract = true,
.class_init = vga_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ TYPE_ACPI_DEV_AML_IF },
{ },
},
};
-static void vga_class_init(ObjectClass *klass, void *data)
+static void vga_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -394,7 +392,7 @@ static void vga_class_init(ObjectClass *klass, void *data)
vga_get_big_endian_fb, vga_set_big_endian_fb);
}
-static void secondary_class_init(ObjectClass *klass, void *data)
+static void secondary_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -403,7 +401,7 @@ static void secondary_class_init(ObjectClass *klass, void *data)
k->exit = pci_secondary_vga_exit;
k->class_id = PCI_CLASS_DISPLAY_OTHER;
device_class_set_props(dc, secondary_pci_properties);
- dc->reset = pci_secondary_vga_reset;
+ device_class_set_legacy_reset(dc, pci_secondary_vga_reset);
}
static const TypeInfo vga_info = {
diff --git a/hw/display/vga.c b/hw/display/vga.c
index 892fedc..20475eb 100644
--- a/hw/display/vga.c
+++ b/hw/display/vga.c
@@ -24,7 +24,7 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "qapi/error.h"
#include "exec/tswap.h"
#include "hw/display/vga.h"
@@ -1873,7 +1873,6 @@ void vga_common_reset(VGACommonState *s)
s->cursor_start = 0;
s->cursor_end = 0;
s->cursor_offset = 0;
- s->big_endian_fb = s->default_endian_fb;
memset(s->invalidated_y_table, '\0', sizeof(s->invalidated_y_table));
memset(s->last_palette, '\0', sizeof(s->last_palette));
memset(s->last_ch_attr, '\0', sizeof(s->last_ch_attr));
@@ -2265,7 +2264,8 @@ bool vga_common_init(VGACommonState *s, Object *obj, Error **errp)
* into a device attribute set by the machine/platform to remove
* all target endian dependencies from this file.
*/
- s->default_endian_fb = target_words_bigendian();
+ s->default_endian_fb = target_big_endian();
+ s->big_endian_fb = s->default_endian_fb;
vga_dirty_log_start(s);
diff --git a/hw/display/vga_int.h b/hw/display/vga_int.h
index f77c1c1..747b5cc 100644
--- a/hw/display/vga_int.h
+++ b/hw/display/vga_int.h
@@ -26,8 +26,8 @@
#define HW_VGA_INT_H
#include "ui/console.h"
-#include "exec/ioport.h"
-#include "exec/memory.h"
+#include "system/ioport.h"
+#include "system/memory.h"
#include "hw/display/bochs-vbe.h"
#include "hw/acpi/acpi_aml_interface.h"
diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c
index 63c64dd..9fc6bbc 100644
--- a/hw/display/vhost-user-gpu.c
+++ b/hw/display/vhost-user-gpu.c
@@ -18,6 +18,7 @@
#include "chardev/char-fe.h"
#include "qapi/error.h"
#include "migration/blocker.h"
+#include "standard-headers/drm/drm_fourcc.h"
typedef enum VhostUserGpuRequest {
VHOST_USER_GPU_NONE = 0,
@@ -249,7 +250,9 @@ vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
case VHOST_USER_GPU_DMABUF_SCANOUT: {
VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout;
int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr);
- uint64_t modifier = 0;
+ uint32_t offset = 0;
+ uint32_t stride = m->fd_stride;
+ uint64_t modifier = DRM_FORMAT_MOD_INVALID;
QemuDmaBuf *dmabuf;
if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
@@ -282,10 +285,10 @@ vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
}
dmabuf = qemu_dmabuf_new(m->width, m->height,
- m->fd_stride, 0, 0,
+ &offset, &stride, 0, 0,
m->fd_width, m->fd_height,
m->fd_drm_fourcc, modifier,
- fd, false, m->fd_flags &
+ &fd, 1, false, m->fd_flags &
VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP);
dpy_gl_scanout_dmabuf(con, dmabuf);
@@ -390,7 +393,7 @@ vhost_user_gpu_chr_read(void *opaque)
}
msg->request = request;
- msg->flags = size;
+ msg->flags = flags;
msg->size = size;
if (request == VHOST_USER_GPU_CURSOR_UPDATE ||
@@ -513,7 +516,7 @@ vhost_user_gpu_set_config(VirtIODevice *vdev,
}
}
-static void
+static int
vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
{
VhostUserGPU *g = VHOST_USER_GPU(vdev);
@@ -522,18 +525,24 @@ vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) {
if (!vhost_user_gpu_do_set_socket(g, &err)) {
error_report_err(err);
- return;
+ return 0;
}
vhost_user_backend_start(g->vhost);
} else {
+ int ret;
+
/* unblock any wait and stop processing */
if (g->vhost_gpu_fd != -1) {
vhost_user_gpu_update_blocked(g, true);
qemu_chr_fe_deinit(&g->vhost_chr, true);
g->vhost_gpu_fd = -1;
}
- vhost_user_backend_stop(g->vhost);
+ ret = vhost_user_backend_stop(g->vhost);
+ if (ret < 0) {
+ return ret;
+ }
}
+ return 0;
}
static bool
@@ -631,6 +640,14 @@ vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp)
error_report("EDID requested but the backend doesn't support it.");
g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_EDID_ENABLED);
}
+ if (virtio_has_feature(g->vhost->dev.features,
+ VIRTIO_GPU_F_RESOURCE_UUID)) {
+ g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_RESOURCE_UUID_ENABLED;
+ }
+ if (virtio_has_feature(g->vhost->dev.features,
+ VIRTIO_GPU_F_RESOURCE_UUID)) {
+ g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_RESOURCE_UUID_ENABLED;
+ }
if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) {
return;
@@ -642,16 +659,15 @@ vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp)
static struct vhost_dev *vhost_user_gpu_get_vhost(VirtIODevice *vdev)
{
VhostUserGPU *g = VHOST_USER_GPU(vdev);
- return &g->vhost->dev;
+ return g->vhost ? &g->vhost->dev : NULL;
}
-static Property vhost_user_gpu_properties[] = {
+static const Property vhost_user_gpu_properties[] = {
VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf),
- DEFINE_PROP_END_OF_LIST(),
};
static void
-vhost_user_gpu_class_init(ObjectClass *klass, void *data)
+vhost_user_gpu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/display/virtio-gpu-base.c b/hw/display/virtio-gpu-base.c
index 4fc7ef8..9eb806b 100644
--- a/hw/display/virtio-gpu-base.c
+++ b/hw/display/virtio-gpu-base.c
@@ -110,7 +110,6 @@ static void virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
/* send event to guest */
virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
- return;
}
static void
@@ -235,6 +234,9 @@ virtio_gpu_base_get_features(VirtIODevice *vdev, uint64_t features,
if (virtio_gpu_context_init_enabled(g->conf)) {
features |= (1 << VIRTIO_GPU_F_CONTEXT_INIT);
}
+ if (virtio_gpu_resource_uuid_enabled(g->conf)) {
+ features |= (1 << VIRTIO_GPU_F_RESOURCE_UUID);
+ }
return features;
}
@@ -260,7 +262,7 @@ virtio_gpu_base_device_unrealize(DeviceState *qdev)
}
static void
-virtio_gpu_base_class_init(ObjectClass *klass, void *data)
+virtio_gpu_base_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/display/virtio-gpu-gl.c b/hw/display/virtio-gpu-gl.c
index e06be60..c06a078 100644
--- a/hw/display/virtio-gpu-gl.c
+++ b/hw/display/virtio-gpu-gl.c
@@ -16,7 +16,7 @@
#include "qemu/module.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-gpu.h"
#include "hw/virtio/virtio-gpu-bswap.h"
@@ -29,9 +29,14 @@ static void virtio_gpu_gl_update_cursor_data(VirtIOGPU *g,
struct virtio_gpu_scanout *s,
uint32_t resource_id)
{
+ VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
uint32_t width, height;
uint32_t pixels, *data;
+ if (gl->renderer_state != RS_INITED) {
+ return;
+ }
+
data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
if (!data) {
return;
@@ -65,13 +70,22 @@ static void virtio_gpu_gl_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
return;
}
- if (!gl->renderer_inited) {
- virtio_gpu_virgl_init(g);
- gl->renderer_inited = true;
- }
- if (gl->renderer_reset) {
- gl->renderer_reset = false;
+ switch (gl->renderer_state) {
+ case RS_RESET:
virtio_gpu_virgl_reset(g);
+ /* fallthrough */
+ case RS_START:
+ if (virtio_gpu_virgl_init(g)) {
+ gl->renderer_state = RS_INIT_FAILED;
+ return;
+ }
+
+ gl->renderer_state = RS_INITED;
+ break;
+ case RS_INIT_FAILED:
+ return;
+ case RS_INITED:
+ break;
}
cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
@@ -98,14 +112,15 @@ static void virtio_gpu_gl_reset(VirtIODevice *vdev)
* GL functions must be called with the associated GL context in main
* thread, and when the renderer is unblocked.
*/
- if (gl->renderer_inited && !gl->renderer_reset) {
+ if (gl->renderer_state == RS_INITED) {
virtio_gpu_virgl_reset_scanout(g);
- gl->renderer_reset = true;
+ gl->renderer_state = RS_RESET;
}
}
static void virtio_gpu_gl_device_realize(DeviceState *qdev, Error **errp)
{
+ ERRP_GUARD();
VirtIOGPU *g = VIRTIO_GPU(qdev);
#if HOST_BIG_ENDIAN
@@ -119,24 +134,55 @@ static void virtio_gpu_gl_device_realize(DeviceState *qdev, Error **errp)
}
if (!display_opengl) {
- error_setg(errp, "opengl is not available");
+ error_setg(errp,
+ "The display backend does not have OpenGL support enabled");
+ error_append_hint(errp,
+ "It can be enabled with '-display BACKEND,gl=on' "
+ "where BACKEND is the name of the display backend "
+ "to use.\n");
return;
}
g->parent_obj.conf.flags |= (1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
- VIRTIO_GPU_BASE(g)->virtio_config.num_capsets =
- virtio_gpu_virgl_get_num_capsets(g);
+ g->capset_ids = virtio_gpu_virgl_get_capsets(g);
+ VIRTIO_GPU_BASE(g)->virtio_config.num_capsets = g->capset_ids->len;
+
+#if VIRGL_VERSION_MAJOR >= 1
+ g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED;
+#endif
virtio_gpu_device_realize(qdev, errp);
}
-static Property virtio_gpu_gl_properties[] = {
+static const Property virtio_gpu_gl_properties[] = {
DEFINE_PROP_BIT("stats", VirtIOGPU, parent_obj.conf.flags,
VIRTIO_GPU_FLAG_STATS_ENABLED, false),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_BIT("venus", VirtIOGPU, parent_obj.conf.flags,
+ VIRTIO_GPU_FLAG_VENUS_ENABLED, false),
};
-static void virtio_gpu_gl_class_init(ObjectClass *klass, void *data)
+static void virtio_gpu_gl_device_unrealize(DeviceState *qdev)
+{
+ VirtIOGPU *g = VIRTIO_GPU(qdev);
+ VirtIOGPUGL *gl = VIRTIO_GPU_GL(qdev);
+
+ if (gl->renderer_state >= RS_INITED) {
+#if VIRGL_VERSION_MAJOR >= 1
+ qemu_bh_delete(gl->cmdq_resume_bh);
+#endif
+ if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
+ timer_free(gl->print_stats);
+ }
+ timer_free(gl->fence_poll);
+ virgl_renderer_cleanup(NULL);
+ }
+
+ gl->renderer_state = RS_START;
+
+ g_array_unref(g->capset_ids);
+}
+
+static void virtio_gpu_gl_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
@@ -149,6 +195,7 @@ static void virtio_gpu_gl_class_init(ObjectClass *klass, void *data)
vgc->update_cursor_data = virtio_gpu_gl_update_cursor_data;
vdc->realize = virtio_gpu_gl_device_realize;
+ vdc->unrealize = virtio_gpu_gl_device_unrealize;
vdc->reset = virtio_gpu_gl_reset;
device_class_set_props(dc, virtio_gpu_gl_properties);
}
@@ -170,3 +217,4 @@ static void virtio_register_types(void)
type_init(virtio_register_types)
module_dep("hw-display-virtio-gpu");
+module_dep("ui-opengl");
diff --git a/hw/display/virtio-gpu-pci-rutabaga.c b/hw/display/virtio-gpu-pci-rutabaga.c
index abbb898..5fdff37 100644
--- a/hw/display/virtio-gpu-pci-rutabaga.c
+++ b/hw/display/virtio-gpu-pci-rutabaga.c
@@ -34,7 +34,7 @@ static const TypeInfo virtio_gpu_rutabaga_pci_info[] = {
.parent = TYPE_VIRTIO_GPU_PCI_BASE,
.instance_size = sizeof(VirtIOGPURutabagaPCI),
.instance_init = virtio_gpu_rutabaga_initfn,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
}
diff --git a/hw/display/virtio-gpu-pci.c b/hw/display/virtio-gpu-pci.c
index da6a99f..c0d71b6 100644
--- a/hw/display/virtio-gpu-pci.c
+++ b/hw/display/virtio-gpu-pci.c
@@ -21,9 +21,8 @@
#include "hw/virtio/virtio-gpu-pci.h"
#include "qom/object.h"
-static Property virtio_gpu_pci_base_properties[] = {
+static const Property virtio_gpu_pci_base_properties[] = {
DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy),
- DEFINE_PROP_END_OF_LIST(),
};
static void virtio_gpu_pci_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -58,7 +57,7 @@ static void virtio_gpu_pci_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
}
}
-static void virtio_gpu_pci_base_class_init(ObjectClass *klass, void *data)
+static void virtio_gpu_pci_base_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/display/virtio-gpu-rutabaga.c b/hw/display/virtio-gpu-rutabaga.c
index 17bf701..ed5ae52 100644
--- a/hw/display/virtio-gpu-rutabaga.c
+++ b/hw/display/virtio-gpu-rutabaga.c
@@ -1096,7 +1096,7 @@ static void virtio_gpu_rutabaga_realize(DeviceState *qdev, Error **errp)
virtio_gpu_device_realize(qdev, errp);
}
-static Property virtio_gpu_rutabaga_properties[] = {
+static const Property virtio_gpu_rutabaga_properties[] = {
DEFINE_PROP_BIT64("gfxstream-vulkan", VirtIOGPURutabaga, capset_mask,
RUTABAGA_CAPSET_GFXSTREAM_VULKAN, false),
DEFINE_PROP_BIT64("cross-domain", VirtIOGPURutabaga, capset_mask,
@@ -1108,10 +1108,9 @@ static Property virtio_gpu_rutabaga_properties[] = {
DEFINE_PROP_STRING("wayland-socket-path", VirtIOGPURutabaga,
wayland_socket_path),
DEFINE_PROP_STRING("wsi", VirtIOGPURutabaga, wsi),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_gpu_rutabaga_class_init(ObjectClass *klass, void *data)
+static void virtio_gpu_rutabaga_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/display/virtio-gpu-udmabuf.c b/hw/display/virtio-gpu-udmabuf.c
index c02ec6d..d804f32 100644
--- a/hw/display/virtio-gpu-udmabuf.c
+++ b/hw/display/virtio-gpu-udmabuf.c
@@ -19,12 +19,13 @@
#include "hw/virtio/virtio-gpu.h"
#include "hw/virtio/virtio-gpu-pixman.h"
#include "trace.h"
-#include "exec/ramblock.h"
-#include "sysemu/hostmem.h"
+#include "system/ramblock.h"
+#include "system/hostmem.h"
#include <sys/ioctl.h>
#include <linux/memfd.h>
#include "qemu/memfd.h"
#include "standard-headers/linux/udmabuf.h"
+#include "standard-headers/drm/drm_fourcc.h"
static void virtio_gpu_create_udmabuf(struct virtio_gpu_simple_resource *res)
{
@@ -176,16 +177,19 @@ static VGPUDMABuf
struct virtio_gpu_rect *r)
{
VGPUDMABuf *dmabuf;
+ uint32_t offset = 0;
if (res->dmabuf_fd < 0) {
return NULL;
}
dmabuf = g_new0(VGPUDMABuf, 1);
- dmabuf->buf = qemu_dmabuf_new(r->width, r->height, fb->stride,
+ dmabuf->buf = qemu_dmabuf_new(r->width, r->height,
+ &offset, &fb->stride,
r->x, r->y, fb->width, fb->height,
qemu_pixman_to_drm_format(fb->format),
- 0, res->dmabuf_fd, true, false);
+ DRM_FORMAT_MOD_INVALID, &res->dmabuf_fd,
+ 1, true, false);
dmabuf->scanout_id = scanout_id;
QTAILQ_INSERT_HEAD(&g->dmabuf.bufs, dmabuf, next);
diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
index 9f34d0e..145a0b3 100644
--- a/hw/display/virtio-gpu-virgl.c
+++ b/hw/display/virtio-gpu-virgl.c
@@ -17,11 +17,31 @@
#include "trace.h"
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-gpu.h"
+#include "hw/virtio/virtio-gpu-bswap.h"
+#include "hw/virtio/virtio-gpu-pixman.h"
#include "ui/egl-helpers.h"
#include <virglrenderer.h>
+struct virtio_gpu_virgl_resource {
+ struct virtio_gpu_simple_resource base;
+ MemoryRegion *mr;
+};
+
+static struct virtio_gpu_virgl_resource *
+virtio_gpu_virgl_find_resource(VirtIOGPU *g, uint32_t resource_id)
+{
+ struct virtio_gpu_simple_resource *res;
+
+ res = virtio_gpu_find_resource(g, resource_id);
+ if (!res) {
+ return NULL;
+ }
+
+ return container_of(res, struct virtio_gpu_virgl_resource, base);
+}
+
#if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
static void *
virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
@@ -30,16 +50,179 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
}
#endif
+#if VIRGL_VERSION_MAJOR >= 1
+struct virtio_gpu_virgl_hostmem_region {
+ MemoryRegion mr;
+ struct VirtIOGPU *g;
+ bool finish_unmapping;
+};
+
+static struct virtio_gpu_virgl_hostmem_region *
+to_hostmem_region(MemoryRegion *mr)
+{
+ return container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr);
+}
+
+static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque)
+{
+ VirtIOGPU *g = opaque;
+
+ virtio_gpu_process_cmdq(g);
+}
+
+static void virtio_gpu_virgl_hostmem_region_free(void *obj)
+{
+ MemoryRegion *mr = MEMORY_REGION(obj);
+ struct virtio_gpu_virgl_hostmem_region *vmr;
+ VirtIOGPUBase *b;
+ VirtIOGPUGL *gl;
+
+ vmr = to_hostmem_region(mr);
+ vmr->finish_unmapping = true;
+
+ b = VIRTIO_GPU_BASE(vmr->g);
+ b->renderer_blocked--;
+
+ /*
+ * memory_region_unref() is executed from RCU thread context, while
+ * virglrenderer works only on the main-loop thread that's holding GL
+ * context.
+ */
+ gl = VIRTIO_GPU_GL(vmr->g);
+ qemu_bh_schedule(gl->cmdq_resume_bh);
+}
+
+static int
+virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
+ struct virtio_gpu_virgl_resource *res,
+ uint64_t offset)
+{
+ struct virtio_gpu_virgl_hostmem_region *vmr;
+ VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
+ MemoryRegion *mr;
+ uint64_t size;
+ void *data;
+ int ret;
+
+ if (!virtio_gpu_hostmem_enabled(b->conf)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size);
+ if (ret) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n",
+ __func__, strerror(-ret));
+ return ret;
+ }
+
+ vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
+ vmr->g = g;
+
+ mr = &vmr->mr;
+ memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
+ memory_region_add_subregion(&b->hostmem, offset, mr);
+ memory_region_set_enabled(mr, true);
+
+ /*
+ * MR could outlive the resource if MR's reference is held outside of
+ * virtio-gpu. In order to prevent unmapping resource while MR is alive,
+ * and thus, making the data pointer invalid, we will block virtio-gpu
+ * command processing until MR is fully unreferenced and freed.
+ */
+ OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free;
+
+ res->mr = mr;
+
+ return 0;
+}
+
+static int
+virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
+ struct virtio_gpu_virgl_resource *res,
+ bool *cmd_suspended)
+{
+ struct virtio_gpu_virgl_hostmem_region *vmr;
+ VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
+ MemoryRegion *mr = res->mr;
+ int ret;
+
+ if (!mr) {
+ return 0;
+ }
+
+ vmr = to_hostmem_region(res->mr);
+
+ /*
+ * Perform async unmapping in 3 steps:
+ *
+ * 1. Begin async unmapping with memory_region_del_subregion()
+ * and suspend/block cmd processing.
+ * 2. Wait for res->mr to be freed and cmd processing resumed
+ * asynchronously by virtio_gpu_virgl_hostmem_region_free().
+ * 3. Finish the unmapping with final virgl_renderer_resource_unmap().
+ */
+ if (vmr->finish_unmapping) {
+ res->mr = NULL;
+ g_free(vmr);
+
+ ret = virgl_renderer_resource_unmap(res->base.resource_id);
+ if (ret) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: failed to unmap virgl resource: %s\n",
+ __func__, strerror(-ret));
+ return ret;
+ }
+ } else {
+ *cmd_suspended = true;
+
+ /* render will be unblocked once MR is freed */
+ b->renderer_blocked++;
+
+ /* memory region owns self res->mr object and frees it by itself */
+ memory_region_set_enabled(mr, false);
+ memory_region_del_subregion(&b->hostmem, mr);
+ object_unparent(OBJECT(mr));
+ }
+
+ return 0;
+}
+#endif
+
static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
struct virtio_gpu_resource_create_2d c2d;
struct virgl_renderer_resource_create_args args;
+ struct virtio_gpu_virgl_resource *res;
VIRTIO_GPU_FILL_CMD(c2d);
trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
c2d.width, c2d.height);
+ if (c2d.resource_id == 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
+ __func__);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = virtio_gpu_virgl_find_resource(g, c2d.resource_id);
+ if (res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
+ __func__, c2d.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = g_new0(struct virtio_gpu_virgl_resource, 1);
+ res->base.width = c2d.width;
+ res->base.height = c2d.height;
+ res->base.format = c2d.format;
+ res->base.resource_id = c2d.resource_id;
+ res->base.dmabuf_fd = -1;
+ QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
+
args.handle = c2d.resource_id;
args.target = 2;
args.format = c2d.format;
@@ -59,11 +242,35 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
{
struct virtio_gpu_resource_create_3d c3d;
struct virgl_renderer_resource_create_args args;
+ struct virtio_gpu_virgl_resource *res;
VIRTIO_GPU_FILL_CMD(c3d);
trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
c3d.width, c3d.height, c3d.depth);
+ if (c3d.resource_id == 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
+ __func__);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = virtio_gpu_virgl_find_resource(g, c3d.resource_id);
+ if (res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
+ __func__, c3d.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = g_new0(struct virtio_gpu_virgl_resource, 1);
+ res->base.width = c3d.width;
+ res->base.height = c3d.height;
+ res->base.format = c3d.format;
+ res->base.resource_id = c3d.resource_id;
+ res->base.dmabuf_fd = -1;
+ QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
+
args.handle = c3d.resource_id;
args.target = c3d.target;
args.format = c3d.format;
@@ -79,15 +286,35 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
}
static void virgl_cmd_resource_unref(VirtIOGPU *g,
- struct virtio_gpu_ctrl_command *cmd)
+ struct virtio_gpu_ctrl_command *cmd,
+ bool *cmd_suspended)
{
struct virtio_gpu_resource_unref unref;
+ struct virtio_gpu_virgl_resource *res;
struct iovec *res_iovs = NULL;
int num_iovs = 0;
VIRTIO_GPU_FILL_CMD(unref);
trace_virtio_gpu_cmd_res_unref(unref.resource_id);
+ res = virtio_gpu_virgl_find_resource(g, unref.resource_id);
+ if (!res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
+ __func__, unref.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+#if VIRGL_VERSION_MAJOR >= 1
+ if (virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended)) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+ if (*cmd_suspended) {
+ return;
+ }
+#endif
+
virgl_renderer_resource_detach_iov(unref.resource_id,
&res_iovs,
&num_iovs);
@@ -95,6 +322,10 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
}
virgl_renderer_resource_unref(unref.resource_id);
+
+ QTAILQ_REMOVE(&g->reslist, &res->base, next);
+
+ g_free(res);
}
static void virgl_cmd_context_create(VirtIOGPU *g,
@@ -106,8 +337,24 @@ static void virgl_cmd_context_create(VirtIOGPU *g,
trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id,
cc.debug_name);
- virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen,
- cc.debug_name);
+ if (cc.context_init) {
+ if (!virtio_gpu_context_init_enabled(g->parent_obj.conf)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: context_init disabled",
+ __func__);
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+
+#if VIRGL_VERSION_MAJOR >= 1
+ virgl_renderer_context_create_with_flags(cc.hdr.ctx_id,
+ cc.context_init,
+ cc.nlen,
+ cc.debug_name);
+ return;
+#endif
+ }
+
+ virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, cc.debug_name);
}
static void virgl_cmd_context_destroy(VirtIOGPU *g,
@@ -171,7 +418,7 @@ static void virgl_cmd_set_scanout(VirtIOGPU *g,
struct virgl_renderer_resource_info info;
void *d3d_tex2d = NULL;
-#ifdef HAVE_VIRGL_D3D_INFO_EXT
+#if VIRGL_VERSION_MAJOR >= 1
struct virgl_renderer_resource_info_ext ext;
memset(&ext, 0, sizeof(ext));
ret = virgl_renderer_resource_get_info_ext(ss.resource_id, &ext);
@@ -375,19 +622,13 @@ static void virgl_cmd_get_capset_info(VirtIOGPU *g,
VIRTIO_GPU_FILL_CMD(info);
memset(&resp, 0, sizeof(resp));
- if (info.capset_index == 0) {
- resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
- virgl_renderer_get_cap_set(resp.capset_id,
- &resp.capset_max_version,
- &resp.capset_max_size);
- } else if (info.capset_index == 1) {
- resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2;
+
+ if (info.capset_index < g->capset_ids->len) {
+ resp.capset_id = g_array_index(g->capset_ids, uint32_t,
+ info.capset_index);
virgl_renderer_get_cap_set(resp.capset_id,
&resp.capset_max_version,
&resp.capset_max_size);
- } else {
- resp.capset_max_version = 0;
- resp.capset_max_size = 0;
}
resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
@@ -417,9 +658,221 @@ static void virgl_cmd_get_capset(VirtIOGPU *g,
g_free(resp);
}
+#if VIRGL_VERSION_MAJOR >= 1
+static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
+ g_autofree struct virtio_gpu_virgl_resource *res = NULL;
+ struct virtio_gpu_resource_create_blob cblob;
+ struct virgl_renderer_resource_info info;
+ int ret;
+
+ if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+
+ VIRTIO_GPU_FILL_CMD(cblob);
+ virtio_gpu_create_blob_bswap(&cblob);
+ trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
+
+ if (cblob.resource_id == 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
+ __func__);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = virtio_gpu_virgl_find_resource(g, cblob.resource_id);
+ if (res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
+ __func__, cblob.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = g_new0(struct virtio_gpu_virgl_resource, 1);
+ res->base.resource_id = cblob.resource_id;
+ res->base.blob_size = cblob.size;
+ res->base.dmabuf_fd = -1;
+
+ if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
+ ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
+ cmd, &res->base.addrs,
+ &res->base.iov, &res->base.iov_cnt);
+ if (!ret) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+ }
+
+ virgl_args.res_handle = cblob.resource_id;
+ virgl_args.ctx_id = cblob.hdr.ctx_id;
+ virgl_args.blob_mem = cblob.blob_mem;
+ virgl_args.blob_id = cblob.blob_id;
+ virgl_args.blob_flags = cblob.blob_flags;
+ virgl_args.size = cblob.size;
+ virgl_args.iovecs = res->base.iov;
+ virgl_args.num_iovs = res->base.iov_cnt;
+
+ ret = virgl_renderer_resource_create_blob(&virgl_args);
+ if (ret) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n",
+ __func__, strerror(-ret));
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ virtio_gpu_cleanup_mapping(g, &res->base);
+ return;
+ }
+
+ ret = virgl_renderer_resource_get_info(cblob.resource_id, &info);
+ if (ret) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: resource does not have info %d: %s\n",
+ __func__, cblob.resource_id, strerror(-ret));
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ virtio_gpu_cleanup_mapping(g, &res->base);
+ virgl_renderer_resource_unref(cblob.resource_id);
+ return;
+ }
+
+ res->base.dmabuf_fd = info.fd;
+
+ QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
+ res = NULL;
+}
+
+static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_resource_map_blob mblob;
+ struct virtio_gpu_virgl_resource *res;
+ struct virtio_gpu_resp_map_info resp;
+ int ret;
+
+ VIRTIO_GPU_FILL_CMD(mblob);
+ virtio_gpu_map_blob_bswap(&mblob);
+
+ res = virtio_gpu_virgl_find_resource(g, mblob.resource_id);
+ if (!res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
+ __func__, mblob.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset);
+ if (ret) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+
+ memset(&resp, 0, sizeof(resp));
+ resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
+ virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info);
+ virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
+}
+
+static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd,
+ bool *cmd_suspended)
+{
+ struct virtio_gpu_resource_unmap_blob ublob;
+ struct virtio_gpu_virgl_resource *res;
+ int ret;
+
+ VIRTIO_GPU_FILL_CMD(ublob);
+ virtio_gpu_unmap_blob_bswap(&ublob);
+
+ res = virtio_gpu_virgl_find_resource(g, ublob.resource_id);
+ if (!res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
+ __func__, ublob.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ ret = virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended);
+ if (ret) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+}
+
+static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_framebuffer fb = { 0 };
+ struct virtio_gpu_virgl_resource *res;
+ struct virtio_gpu_set_scanout_blob ss;
+
+ VIRTIO_GPU_FILL_CMD(ss);
+ virtio_gpu_scanout_blob_bswap(&ss);
+ trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
+ ss.r.width, ss.r.height, ss.r.x,
+ ss.r.y);
+
+ if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
+ __func__, ss.scanout_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
+ return;
+ }
+
+ if (ss.resource_id == 0) {
+ virtio_gpu_disable_scanout(g, ss.scanout_id);
+ return;
+ }
+
+ if (ss.width < 16 ||
+ ss.height < 16 ||
+ ss.r.x + ss.r.width > ss.width ||
+ ss.r.y + ss.r.height > ss.height) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
+ " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
+ __func__, ss.scanout_id, ss.resource_id,
+ ss.r.x, ss.r.y, ss.r.width, ss.r.height,
+ ss.width, ss.height);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+
+ res = virtio_gpu_virgl_find_resource(g, ss.resource_id);
+ if (!res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
+ __func__, ss.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+ if (res->base.dmabuf_fd < 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource not backed by dmabuf %d\n",
+ __func__, ss.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+
+ if (!virtio_gpu_scanout_blob_to_fb(&fb, &ss, res->base.blob_size)) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+
+ g->parent_obj.enable = 1;
+ if (virtio_gpu_update_dmabuf(g, ss.scanout_id, &res->base, &fb, &ss.r)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to update dmabuf\n",
+ __func__);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+
+ virtio_gpu_update_scanout(g, ss.scanout_id, &res->base, &fb, &ss.r);
+}
+#endif
+
void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
+ bool cmd_suspended = false;
+
VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
virgl_renderer_force_ctx_0();
@@ -461,7 +914,7 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
virgl_cmd_resource_flush(g, cmd);
break;
case VIRTIO_GPU_CMD_RESOURCE_UNREF:
- virgl_cmd_resource_unref(g, cmd);
+ virgl_cmd_resource_unref(g, cmd, &cmd_suspended);
break;
case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
/* TODO add security */
@@ -483,12 +936,26 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
case VIRTIO_GPU_CMD_GET_EDID:
virtio_gpu_get_edid(g, cmd);
break;
+#if VIRGL_VERSION_MAJOR >= 1
+ case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
+ virgl_cmd_resource_create_blob(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB:
+ virgl_cmd_resource_map_blob(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB:
+ virgl_cmd_resource_unmap_blob(g, cmd, &cmd_suspended);
+ break;
+ case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
+ virgl_cmd_set_scanout_blob(g, cmd);
+ break;
+#endif
default:
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
break;
}
- if (cmd->finished) {
+ if (cmd_suspended || cmd->finished) {
return;
}
if (cmd->error) {
@@ -525,7 +992,7 @@ static void virgl_write_fence(void *opaque, uint32_t fence)
g_free(cmd);
g->inflight--;
if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
- fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
+ trace_virtio_gpu_dec_inflight_fences(g->inflight);
}
}
}
@@ -574,6 +1041,7 @@ static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
static void virtio_gpu_print_stats(void *opaque)
{
VirtIOGPU *g = opaque;
+ VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
if (g->stats.requests) {
fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
@@ -588,17 +1056,18 @@ static void virtio_gpu_print_stats(void *opaque)
} else {
fprintf(stderr, "stats: idle\r");
}
- timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
+ timer_mod(gl->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
}
static void virtio_gpu_fence_poll(void *opaque)
{
VirtIOGPU *g = opaque;
+ VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
virgl_renderer_poll();
virtio_gpu_process_cmdq(g);
if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) {
- timer_mod(g->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10);
+ timer_mod(gl->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10);
}
}
@@ -626,6 +1095,7 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
{
int ret;
uint32_t flags = 0;
+ VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
#if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
if (qemu_egl_display) {
@@ -638,6 +1108,11 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
flags |= VIRGL_RENDERER_D3D11_SHARE_TEXTURE;
}
#endif
+#if VIRGL_VERSION_MAJOR >= 1
+ if (virtio_gpu_venus_enabled(g->parent_obj.conf)) {
+ flags |= VIRGL_RENDERER_VENUS | VIRGL_RENDERER_RENDER_SERVER;
+ }
+#endif
ret = virgl_renderer_init(g, flags, &virtio_gpu_3d_cbs);
if (ret != 0) {
@@ -645,23 +1120,55 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
return ret;
}
- g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
- virtio_gpu_fence_poll, g);
+ gl->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
+ virtio_gpu_fence_poll, g);
if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
- g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
- virtio_gpu_print_stats, g);
- timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
+ gl->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
+ virtio_gpu_print_stats, g);
+ timer_mod(gl->print_stats,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
}
+
+#if VIRGL_VERSION_MAJOR >= 1
+ gl->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(),
+ virtio_gpu_virgl_resume_cmdq_bh,
+ g);
+#endif
+
return 0;
}
-int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g)
+static void virtio_gpu_virgl_add_capset(GArray *capset_ids, uint32_t capset_id)
+{
+ g_array_append_val(capset_ids, capset_id);
+}
+
+GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g)
{
- uint32_t capset2_max_ver, capset2_max_size;
+ uint32_t capset_max_ver, capset_max_size;
+ GArray *capset_ids;
+
+ capset_ids = g_array_new(false, false, sizeof(uint32_t));
+
+ /* VIRGL is always supported. */
+ virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL);
+
virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
- &capset2_max_ver,
- &capset2_max_size);
+ &capset_max_ver,
+ &capset_max_size);
+ if (capset_max_ver) {
+ virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL2);
+ }
+
+ if (virtio_gpu_venus_enabled(g->parent_obj.conf)) {
+ virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VENUS,
+ &capset_max_ver,
+ &capset_max_size);
+ if (capset_max_size) {
+ virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VENUS);
+ }
+ }
- return capset2_max_ver ? 2 : 1;
+ return capset_ids;
}
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 3281842..0a1a625 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -14,12 +14,12 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qemu/iov.h"
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
#include "ui/console.h"
#include "ui/rect.h"
#include "trace.h"
-#include "sysemu/dma.h"
-#include "sysemu/sysemu.h"
+#include "system/dma.h"
+#include "system/system.h"
#include "hw/virtio/virtio.h"
#include "migration/qemu-file-types.h"
#include "hw/virtio/virtio-gpu.h"
@@ -28,6 +28,7 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/qdev-properties.h"
#include "qemu/log.h"
+#include "qemu/memfd.h"
#include "qemu/module.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
@@ -238,16 +239,6 @@ static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
return height * stride;
}
-#ifdef WIN32
-static void
-win32_pixman_image_destroy(pixman_image_t *image, void *data)
-{
- HANDLE handle = data;
-
- qemu_win32_map_free(pixman_image_get_data(image), handle, &error_warn);
-}
-#endif
-
static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
@@ -294,28 +285,20 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
- void *bits = NULL;
-#ifdef WIN32
- bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
- if (!bits) {
+ if (!qemu_pixman_image_new_shareable(
+ &res->image,
+ &res->share_handle,
+ "virtio-gpu res",
+ pformat,
+ c2d.width,
+ c2d.height,
+ c2d.height ? res->hostmem / c2d.height : 0,
+ &error_warn)) {
goto end;
}
-#endif
- res->image = pixman_image_create_bits(
- pformat,
- c2d.width,
- c2d.height,
- bits, c2d.height ? res->hostmem / c2d.height : 0);
-#ifdef WIN32
- if (res->image) {
- pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle);
- }
-#endif
}
-#ifdef WIN32
end:
-#endif
if (!res->image) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: resource creation failed %d %d %d\n",
@@ -379,7 +362,7 @@ static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
QTAILQ_INSERT_HEAD(&g->reslist, res, next);
}
-static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
+void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
{
struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
struct virtio_gpu_simple_resource *res;
@@ -596,11 +579,11 @@ static void virtio_unref_resource(pixman_image_t *image, void *data)
pixman_image_unref(data);
}
-static void virtio_gpu_update_scanout(VirtIOGPU *g,
- uint32_t scanout_id,
- struct virtio_gpu_simple_resource *res,
- struct virtio_gpu_framebuffer *fb,
- struct virtio_gpu_rect *r)
+void virtio_gpu_update_scanout(VirtIOGPU *g,
+ uint32_t scanout_id,
+ struct virtio_gpu_simple_resource *res,
+ struct virtio_gpu_framebuffer *fb,
+ struct virtio_gpu_rect *r)
{
struct virtio_gpu_simple_resource *ores;
struct virtio_gpu_scanout *scanout;
@@ -686,9 +669,7 @@ static bool virtio_gpu_do_set_scanout(VirtIOGPU *g,
/* realloc the surface ptr */
scanout->ds = qemu_create_displaysurface_pixman(rect);
-#ifdef WIN32
- qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset);
-#endif
+ qemu_displaysurface_set_share_handle(scanout->ds, res->share_handle, fb->offset);
pixman_image_unref(rect);
dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con,
@@ -740,13 +721,47 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
&fb, res, &ss.r, &cmd->error);
}
+bool virtio_gpu_scanout_blob_to_fb(struct virtio_gpu_framebuffer *fb,
+ struct virtio_gpu_set_scanout_blob *ss,
+ uint64_t blob_size)
+{
+ uint64_t fbend;
+
+ fb->format = virtio_gpu_get_pixman_format(ss->format);
+ if (!fb->format) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: host couldn't handle guest format %d\n",
+ __func__, ss->format);
+ return false;
+ }
+
+ fb->bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb->format), 8);
+ fb->width = ss->width;
+ fb->height = ss->height;
+ fb->stride = ss->strides[0];
+ fb->offset = ss->offsets[0] + ss->r.x * fb->bytes_pp + ss->r.y * fb->stride;
+
+ fbend = fb->offset;
+ fbend += (uint64_t) fb->stride * ss->r.height;
+
+ if (fbend > blob_size) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: fb end out of range\n",
+ __func__);
+ return false;
+ }
+
+ return true;
+}
+
+
+
static void virtio_gpu_set_scanout_blob(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
struct virtio_gpu_simple_resource *res;
struct virtio_gpu_framebuffer fb = { 0 };
struct virtio_gpu_set_scanout_blob ss;
- uint64_t fbend;
VIRTIO_GPU_FILL_CMD(ss);
virtio_gpu_scanout_blob_bswap(&ss);
@@ -772,28 +787,7 @@ static void virtio_gpu_set_scanout_blob(VirtIOGPU *g,
return;
}
- fb.format = virtio_gpu_get_pixman_format(ss.format);
- if (!fb.format) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: host couldn't handle guest format %d\n",
- __func__, ss.format);
- cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
- return;
- }
-
- fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
- fb.width = ss.width;
- fb.height = ss.height;
- fb.stride = ss.strides[0];
- fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
-
- fbend = fb.offset;
- fbend += fb.stride * (ss.r.height - 1);
- fbend += fb.bytes_pp * ss.r.width;
- if (fbend > res->blob_size) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: fb end out of range\n",
- __func__);
+ if (!virtio_gpu_scanout_blob_to_fb(&fb, &ss, res->blob_size)) {
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
return;
}
@@ -1053,6 +1047,12 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g)
/* process command */
vgc->process_cmd(g, cmd);
+ /* command suspended */
+ if (!cmd->finished && !(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
+ trace_virtio_gpu_cmd_suspended(cmd->cmd_hdr.type);
+ break;
+ }
+
QTAILQ_REMOVE(&g->cmdq, cmd, next);
if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
g->stats.requests++;
@@ -1065,7 +1065,7 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g)
if (g->stats.max_inflight < g->inflight) {
g->stats.max_inflight = g->inflight;
}
- fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
+ trace_virtio_gpu_inc_inflight_fences(g->inflight);
}
} else {
g_free(cmd);
@@ -1085,7 +1085,7 @@ static void virtio_gpu_process_fenceq(VirtIOGPU *g)
g_free(cmd);
g->inflight--;
if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
- fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
+ trace_virtio_gpu_dec_inflight_fences(g->inflight);
}
}
}
@@ -1284,7 +1284,6 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
VirtIOGPU *g = opaque;
struct virtio_gpu_simple_resource *res;
uint32_t resource_id, pformat;
- void *bits = NULL;
int i;
g->hostmem = 0;
@@ -1311,24 +1310,17 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
}
res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
-#ifdef WIN32
- bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
- if (!bits) {
- g_free(res);
- return -EINVAL;
- }
-#endif
- res->image = pixman_image_create_bits(
- pformat,
- res->width, res->height,
- bits, res->height ? res->hostmem / res->height : 0);
- if (!res->image) {
+ if (!qemu_pixman_image_new_shareable(&res->image,
+ &res->share_handle,
+ "virtio-gpu res",
+ pformat,
+ res->width,
+ res->height,
+ res->height ? res->hostmem / res->height : 0,
+ &error_warn)) {
g_free(res);
return -EINVAL;
}
-#ifdef WIN32
- pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle);
-#endif
res->addrs = g_new(uint64_t, res->iov_cnt);
res->iov = g_new(struct iovec, res->iov_cnt);
@@ -1461,9 +1453,7 @@ static int virtio_gpu_post_load(void *opaque, int version_id)
return -EINVAL;
}
scanout->ds = qemu_create_displaysurface_pixman(res->image);
-#ifdef WIN32
- qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0);
-#endif
+ qemu_displaysurface_set_share_handle(scanout->ds, res->share_handle, 0);
dpy_gfx_replace_surface(scanout->con, scanout->ds);
}
@@ -1484,15 +1474,35 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
if (virtio_gpu_blob_enabled(g->parent_obj.conf)) {
if (!virtio_gpu_rutabaga_enabled(g->parent_obj.conf) &&
+ !virtio_gpu_virgl_enabled(g->parent_obj.conf) &&
!virtio_gpu_have_udmabuf()) {
error_setg(errp, "need rutabaga or udmabuf for blob resources");
return;
}
+#ifdef VIRGL_VERSION_MAJOR
+ #if VIRGL_VERSION_MAJOR < 1
if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
- error_setg(errp, "blobs and virgl are not compatible (yet)");
+ error_setg(errp, "old virglrenderer, blob resources unsupported");
return;
}
+ #endif
+#endif
+ }
+
+ if (virtio_gpu_venus_enabled(g->parent_obj.conf)) {
+#ifdef VIRGL_VERSION_MAJOR
+ #if VIRGL_VERSION_MAJOR >= 1
+ if (!virtio_gpu_blob_enabled(g->parent_obj.conf) ||
+ !virtio_gpu_hostmem_enabled(g->parent_obj.conf)) {
+ error_setg(errp, "venus requires enabled blob and hostmem options");
+ return;
+ }
+ #else
+ error_setg(errp, "old virglrenderer, venus unsupported");
+ return;
+ #endif
+#endif
}
if (!virtio_gpu_base_device_realize(qdev,
@@ -1664,7 +1674,7 @@ static const VMStateDescription vmstate_virtio_gpu = {
.post_load = virtio_gpu_post_load,
};
-static Property virtio_gpu_properties[] = {
+static const Property virtio_gpu_properties[] = {
VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
256 * MiB),
@@ -1672,10 +1682,9 @@ static Property virtio_gpu_properties[] = {
VIRTIO_GPU_FLAG_BLOB_ENABLED, false),
DEFINE_PROP_SIZE("hostmem", VirtIOGPU, parent_obj.conf.hostmem, 0),
DEFINE_PROP_UINT8("x-scanout-vmstate-version", VirtIOGPU, scanout_vmstate_version, 2),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_gpu_class_init(ObjectClass *klass, void *data)
+static void virtio_gpu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c
index 276f315..40e60f7 100644
--- a/hw/display/virtio-vga.c
+++ b/hw/display/virtio-vga.c
@@ -209,12 +209,11 @@ static void virtio_vga_set_big_endian_fb(Object *obj, bool value, Error **errp)
d->vga.big_endian_fb = value;
}
-static Property virtio_vga_base_properties[] = {
+static const Property virtio_vga_base_properties[] = {
DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_vga_base_class_init(ObjectClass *klass, void *data)
+static void virtio_vga_base_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c
index 3db3ff9..bc1a8ed 100644
--- a/hw/display/vmware_vga.c
+++ b/hw/display/vmware_vga.c
@@ -618,7 +618,7 @@ static void vmsvga_fifo_run(struct vmsvga_state_s *s)
uint32_t cmd, colour;
int args, len, maxloop = 1024;
int x, y, dx, dy, width, height;
- struct vmsvga_cursor_definition_s cursor;
+ QEMU_UNINITIALIZED struct vmsvga_cursor_definition_s cursor;
uint32_t cmd_start;
len = vmsvga_fifo_length(s);
@@ -1332,15 +1332,14 @@ static void pci_vmsvga_realize(PCIDevice *dev, Error **errp)
&s->chip.fifo_ram);
}
-static Property vga_vmware_properties[] = {
+static const Property vga_vmware_properties[] = {
DEFINE_PROP_UINT32("vgamem_mb", struct pci_vmsvga_state_s,
chip.vga.vram_size_mb, 16),
DEFINE_PROP_BOOL("global-vmstate", struct pci_vmsvga_state_s,
chip.vga.global_vmstate, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vmsvga_class_init(ObjectClass *klass, void *data)
+static void vmsvga_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -1352,7 +1351,7 @@ static void vmsvga_class_init(ObjectClass *klass, void *data)
k->class_id = PCI_CLASS_DISPLAY_VGA;
k->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE;
k->subsystem_id = SVGA_PCI_DEVICE_ID;
- dc->reset = vmsvga_reset;
+ device_class_set_legacy_reset(dc, vmsvga_reset);
dc->vmsd = &vmstate_vmware_vga;
device_class_set_props(dc, vga_vmware_properties);
dc->hotpluggable = false;
@@ -1364,7 +1363,7 @@ static const TypeInfo vmsvga_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(struct pci_vmsvga_state_s),
.class_init = vmsvga_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c
index 314d378..22822fe 100644
--- a/hw/display/xenfb.c
+++ b/hw/display/xenfb.c
@@ -29,7 +29,7 @@
#include "ui/input.h"
#include "ui/console.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/xen/xen-legacy-backend.h"
#include "hw/xen/interface/io/fbif.h"
diff --git a/hw/display/xlnx_dp.c b/hw/display/xlnx_dp.c
index c42fc38..7c980ee 100644
--- a/hw/display/xlnx_dp.c
+++ b/hw/display/xlnx_dp.c
@@ -1387,18 +1387,17 @@ static void xlnx_dp_reset(DeviceState *dev)
xlnx_dp_update_irq(s);
}
-static Property xlnx_dp_device_properties[] = {
+static const Property xlnx_dp_device_properties[] = {
DEFINE_AUDIO_PROPERTIES(XlnxDPState, aud_card),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xlnx_dp_class_init(ObjectClass *oc, void *data)
+static void xlnx_dp_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = xlnx_dp_realize;
dc->vmsd = &vmstate_dp;
- dc->reset = xlnx_dp_reset;
+ device_class_set_legacy_reset(dc, xlnx_dp_reset);
device_class_set_props(dc, xlnx_dp_device_properties);
}
diff --git a/hw/dma/bcm2835_dma.c b/hw/dma/bcm2835_dma.c
index 9bda450..a2771dd 100644
--- a/hw/dma/bcm2835_dma.c
+++ b/hw/dma/bcm2835_dma.c
@@ -385,12 +385,12 @@ static void bcm2835_dma_realize(DeviceState *dev, Error **errp)
bcm2835_dma_reset(dev);
}
-static void bcm2835_dma_class_init(ObjectClass *klass, void *data)
+static void bcm2835_dma_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = bcm2835_dma_realize;
- dc->reset = bcm2835_dma_reset;
+ device_class_set_legacy_reset(dc, bcm2835_dma_reset);
dc->vmsd = &vmstate_bcm2835_dma;
}
diff --git a/hw/dma/etraxfs_dma.c b/hw/dma/etraxfs_dma.c
deleted file mode 100644
index 9c0003d..0000000
--- a/hw/dma/etraxfs_dma.c
+++ /dev/null
@@ -1,781 +0,0 @@
-/*
- * QEMU ETRAX DMA Controller.
- *
- * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "hw/hw.h"
-#include "hw/irq.h"
-#include "qemu/main-loop.h"
-#include "sysemu/runstate.h"
-#include "exec/address-spaces.h"
-#include "exec/memory.h"
-
-#include "hw/cris/etraxfs_dma.h"
-
-#define D(x)
-
-#define RW_DATA (0x0 / 4)
-#define RW_SAVED_DATA (0x58 / 4)
-#define RW_SAVED_DATA_BUF (0x5c / 4)
-#define RW_GROUP (0x60 / 4)
-#define RW_GROUP_DOWN (0x7c / 4)
-#define RW_CMD (0x80 / 4)
-#define RW_CFG (0x84 / 4)
-#define RW_STAT (0x88 / 4)
-#define RW_INTR_MASK (0x8c / 4)
-#define RW_ACK_INTR (0x90 / 4)
-#define R_INTR (0x94 / 4)
-#define R_MASKED_INTR (0x98 / 4)
-#define RW_STREAM_CMD (0x9c / 4)
-
-#define DMA_REG_MAX (0x100 / 4)
-
-/* descriptors */
-
-// ------------------------------------------------------------ dma_descr_group
-typedef struct dma_descr_group {
- uint32_t next;
- unsigned eol : 1;
- unsigned tol : 1;
- unsigned bol : 1;
- unsigned : 1;
- unsigned intr : 1;
- unsigned : 2;
- unsigned en : 1;
- unsigned : 7;
- unsigned dis : 1;
- unsigned md : 16;
- struct dma_descr_group *up;
- union {
- struct dma_descr_context *context;
- struct dma_descr_group *group;
- } down;
-} dma_descr_group;
-
-// ---------------------------------------------------------- dma_descr_context
-typedef struct dma_descr_context {
- uint32_t next;
- unsigned eol : 1;
- unsigned : 3;
- unsigned intr : 1;
- unsigned : 1;
- unsigned store_mode : 1;
- unsigned en : 1;
- unsigned : 7;
- unsigned dis : 1;
- unsigned md0 : 16;
- unsigned md1;
- unsigned md2;
- unsigned md3;
- unsigned md4;
- uint32_t saved_data;
- uint32_t saved_data_buf;
-} dma_descr_context;
-
-// ------------------------------------------------------------- dma_descr_data
-typedef struct dma_descr_data {
- uint32_t next;
- uint32_t buf;
- unsigned eol : 1;
- unsigned : 2;
- unsigned out_eop : 1;
- unsigned intr : 1;
- unsigned wait : 1;
- unsigned : 2;
- unsigned : 3;
- unsigned in_eop : 1;
- unsigned : 4;
- unsigned md : 16;
- uint32_t after;
-} dma_descr_data;
-
-/* Constants */
-enum {
- regk_dma_ack_pkt = 0x00000100,
- regk_dma_anytime = 0x00000001,
- regk_dma_array = 0x00000008,
- regk_dma_burst = 0x00000020,
- regk_dma_client = 0x00000002,
- regk_dma_copy_next = 0x00000010,
- regk_dma_copy_up = 0x00000020,
- regk_dma_data_at_eol = 0x00000001,
- regk_dma_dis_c = 0x00000010,
- regk_dma_dis_g = 0x00000020,
- regk_dma_idle = 0x00000001,
- regk_dma_intern = 0x00000004,
- regk_dma_load_c = 0x00000200,
- regk_dma_load_c_n = 0x00000280,
- regk_dma_load_c_next = 0x00000240,
- regk_dma_load_d = 0x00000140,
- regk_dma_load_g = 0x00000300,
- regk_dma_load_g_down = 0x000003c0,
- regk_dma_load_g_next = 0x00000340,
- regk_dma_load_g_up = 0x00000380,
- regk_dma_next_en = 0x00000010,
- regk_dma_next_pkt = 0x00000010,
- regk_dma_no = 0x00000000,
- regk_dma_only_at_wait = 0x00000000,
- regk_dma_restore = 0x00000020,
- regk_dma_rst = 0x00000001,
- regk_dma_running = 0x00000004,
- regk_dma_rw_cfg_default = 0x00000000,
- regk_dma_rw_cmd_default = 0x00000000,
- regk_dma_rw_intr_mask_default = 0x00000000,
- regk_dma_rw_stat_default = 0x00000101,
- regk_dma_rw_stream_cmd_default = 0x00000000,
- regk_dma_save_down = 0x00000020,
- regk_dma_save_up = 0x00000020,
- regk_dma_set_reg = 0x00000050,
- regk_dma_set_w_size1 = 0x00000190,
- regk_dma_set_w_size2 = 0x000001a0,
- regk_dma_set_w_size4 = 0x000001c0,
- regk_dma_stopped = 0x00000002,
- regk_dma_store_c = 0x00000002,
- regk_dma_store_descr = 0x00000000,
- regk_dma_store_g = 0x00000004,
- regk_dma_store_md = 0x00000001,
- regk_dma_sw = 0x00000008,
- regk_dma_update_down = 0x00000020,
- regk_dma_yes = 0x00000001
-};
-
-enum dma_ch_state
-{
- RST = 1,
- STOPPED = 2,
- RUNNING = 4
-};
-
-struct fs_dma_channel
-{
- qemu_irq irq;
- struct etraxfs_dma_client *client;
-
- /* Internal status. */
- int stream_cmd_src;
- enum dma_ch_state state;
-
- unsigned int input : 1;
- unsigned int eol : 1;
-
- struct dma_descr_group current_g;
- struct dma_descr_context current_c;
- struct dma_descr_data current_d;
-
- /* Control registers. */
- uint32_t regs[DMA_REG_MAX];
-};
-
-struct fs_dma_ctrl
-{
- MemoryRegion mmio;
- int nr_channels;
- struct fs_dma_channel *channels;
-
- QEMUBH *bh;
-};
-
-static void DMA_run(void *opaque);
-static int channel_out_run(struct fs_dma_ctrl *ctrl, int c);
-
-static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg)
-{
- return ctrl->channels[c].regs[reg];
-}
-
-static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c)
-{
- return channel_reg(ctrl, c, RW_CFG) & 2;
-}
-
-static inline int channel_en(struct fs_dma_ctrl *ctrl, int c)
-{
- return (channel_reg(ctrl, c, RW_CFG) & 1)
- && ctrl->channels[c].client;
-}
-
-static inline int fs_channel(hwaddr addr)
-{
- /* Every channel has a 0x2000 ctrl register map. */
- return addr >> 13;
-}
-
-#ifdef USE_THIS_DEAD_CODE
-static void channel_load_g(struct fs_dma_ctrl *ctrl, int c)
-{
- hwaddr addr = channel_reg(ctrl, c, RW_GROUP);
-
- /* Load and decode. FIXME: handle endianness. */
- cpu_physical_memory_read(addr, &ctrl->channels[c].current_g,
- sizeof(ctrl->channels[c].current_g));
-}
-
-static void dump_c(int ch, struct dma_descr_context *c)
-{
- printf("%s ch=%d\n", __func__, ch);
- printf("next=%x\n", c->next);
- printf("saved_data=%x\n", c->saved_data);
- printf("saved_data_buf=%x\n", c->saved_data_buf);
- printf("eol=%x\n", (uint32_t) c->eol);
-}
-
-static void dump_d(int ch, struct dma_descr_data *d)
-{
- printf("%s ch=%d\n", __func__, ch);
- printf("next=%x\n", d->next);
- printf("buf=%x\n", d->buf);
- printf("after=%x\n", d->after);
- printf("intr=%x\n", (uint32_t) d->intr);
- printf("out_eop=%x\n", (uint32_t) d->out_eop);
- printf("in_eop=%x\n", (uint32_t) d->in_eop);
- printf("eol=%x\n", (uint32_t) d->eol);
-}
-#endif
-
-static void channel_load_c(struct fs_dma_ctrl *ctrl, int c)
-{
- hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
-
- /* Load and decode. FIXME: handle endianness. */
- cpu_physical_memory_read(addr, &ctrl->channels[c].current_c,
- sizeof(ctrl->channels[c].current_c));
-
- D(dump_c(c, &ctrl->channels[c].current_c));
- /* I guess this should update the current pos. */
- ctrl->channels[c].regs[RW_SAVED_DATA] =
- (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data;
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
- (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf;
-}
-
-static void channel_load_d(struct fs_dma_ctrl *ctrl, int c)
-{
- hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
-
- /* Load and decode. FIXME: handle endianness. */
- D(printf("%s ch=%d addr=" HWADDR_FMT_plx "\n", __func__, c, addr));
- cpu_physical_memory_read(addr, &ctrl->channels[c].current_d,
- sizeof(ctrl->channels[c].current_d));
-
- D(dump_d(c, &ctrl->channels[c].current_d));
- ctrl->channels[c].regs[RW_DATA] = addr;
-}
-
-static void channel_store_c(struct fs_dma_ctrl *ctrl, int c)
-{
- hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
-
- /* Encode and store. FIXME: handle endianness. */
- D(printf("%s ch=%d addr=" HWADDR_FMT_plx "\n", __func__, c, addr));
- D(dump_d(c, &ctrl->channels[c].current_d));
- cpu_physical_memory_write(addr, &ctrl->channels[c].current_c,
- sizeof(ctrl->channels[c].current_c));
-}
-
-static void channel_store_d(struct fs_dma_ctrl *ctrl, int c)
-{
- hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
-
- /* Encode and store. FIXME: handle endianness. */
- D(printf("%s ch=%d addr=" HWADDR_FMT_plx "\n", __func__, c, addr));
- cpu_physical_memory_write(addr, &ctrl->channels[c].current_d,
- sizeof(ctrl->channels[c].current_d));
-}
-
-static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c)
-{
- /* FIXME: */
-}
-
-static inline void channel_start(struct fs_dma_ctrl *ctrl, int c)
-{
- if (ctrl->channels[c].client)
- {
- ctrl->channels[c].eol = 0;
- ctrl->channels[c].state = RUNNING;
- if (!ctrl->channels[c].input)
- channel_out_run(ctrl, c);
- } else
- printf("WARNING: starting DMA ch %d with no client\n", c);
-
- qemu_bh_schedule_idle(ctrl->bh);
-}
-
-static void channel_continue(struct fs_dma_ctrl *ctrl, int c)
-{
- if (!channel_en(ctrl, c)
- || channel_stopped(ctrl, c)
- || ctrl->channels[c].state != RUNNING
- /* Only reload the current data descriptor if it has eol set. */
- || !ctrl->channels[c].current_d.eol) {
- D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n",
- c, ctrl->channels[c].state,
- channel_stopped(ctrl, c),
- channel_en(ctrl,c),
- ctrl->channels[c].eol));
- D(dump_d(c, &ctrl->channels[c].current_d));
- return;
- }
-
- /* Reload the current descriptor. */
- channel_load_d(ctrl, c);
-
- /* If the current descriptor cleared the eol flag and we had already
- reached eol state, do the continue. */
- if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) {
- D(printf("continue %d ok %x\n", c,
- ctrl->channels[c].current_d.next));
- ctrl->channels[c].regs[RW_SAVED_DATA] =
- (uint32_t)(unsigned long)ctrl->channels[c].current_d.next;
- channel_load_d(ctrl, c);
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
- (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
-
- channel_start(ctrl, c);
- }
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
- (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
-}
-
-static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v)
-{
- unsigned int cmd = v & ((1 << 10) - 1);
-
- D(printf("%s ch=%d cmd=%x\n",
- __func__, c, cmd));
- if (cmd & regk_dma_load_d) {
- channel_load_d(ctrl, c);
- if (cmd & regk_dma_burst)
- channel_start(ctrl, c);
- }
-
- if (cmd & regk_dma_load_c) {
- channel_load_c(ctrl, c);
- }
-}
-
-static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c)
-{
- D(printf("%s %d\n", __func__, c));
- ctrl->channels[c].regs[R_INTR] &=
- ~(ctrl->channels[c].regs[RW_ACK_INTR]);
-
- ctrl->channels[c].regs[R_MASKED_INTR] =
- ctrl->channels[c].regs[R_INTR]
- & ctrl->channels[c].regs[RW_INTR_MASK];
-
- D(printf("%s: chan=%d masked_intr=%x\n", __func__,
- c,
- ctrl->channels[c].regs[R_MASKED_INTR]));
-
- qemu_set_irq(ctrl->channels[c].irq,
- !!ctrl->channels[c].regs[R_MASKED_INTR]);
-}
-
-static int channel_out_run(struct fs_dma_ctrl *ctrl, int c)
-{
- uint32_t len;
- uint32_t saved_data_buf;
- unsigned char buf[2 * 1024];
-
- struct dma_context_metadata meta;
- bool send_context = true;
-
- if (ctrl->channels[c].eol)
- return 0;
-
- do {
- bool out_eop;
- D(printf("ch=%d buf=%x after=%x\n",
- c,
- (uint32_t)ctrl->channels[c].current_d.buf,
- (uint32_t)ctrl->channels[c].current_d.after));
-
- if (send_context) {
- if (ctrl->channels[c].client->client.metadata_push) {
- meta.metadata = ctrl->channels[c].current_d.md;
- ctrl->channels[c].client->client.metadata_push(
- ctrl->channels[c].client->client.opaque,
- &meta);
- }
- send_context = false;
- }
-
- channel_load_d(ctrl, c);
- saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
- len = (uint32_t)(unsigned long)
- ctrl->channels[c].current_d.after;
- len -= saved_data_buf;
-
- if (len > sizeof buf)
- len = sizeof buf;
- cpu_physical_memory_read (saved_data_buf, buf, len);
-
- out_eop = ((saved_data_buf + len) ==
- ctrl->channels[c].current_d.after) &&
- ctrl->channels[c].current_d.out_eop;
-
- D(printf("channel %d pushes %x %u bytes eop=%u\n", c,
- saved_data_buf, len, out_eop));
-
- if (ctrl->channels[c].client->client.push) {
- if (len > 0) {
- ctrl->channels[c].client->client.push(
- ctrl->channels[c].client->client.opaque,
- buf, len, out_eop);
- }
- } else {
- printf("WARNING: DMA ch%d dataloss,"
- " no attached client.\n", c);
- }
-
- saved_data_buf += len;
-
- if (saved_data_buf == (uint32_t)(unsigned long)
- ctrl->channels[c].current_d.after) {
- /* Done. Step to next. */
- if (ctrl->channels[c].current_d.out_eop) {
- send_context = true;
- }
- if (ctrl->channels[c].current_d.intr) {
- /* data intr. */
- D(printf("signal intr %d eol=%d\n",
- len, ctrl->channels[c].current_d.eol));
- ctrl->channels[c].regs[R_INTR] |= (1 << 2);
- channel_update_irq(ctrl, c);
- }
- channel_store_d(ctrl, c);
- if (ctrl->channels[c].current_d.eol) {
- D(printf("channel %d EOL\n", c));
- ctrl->channels[c].eol = 1;
-
- /* Mark the context as disabled. */
- ctrl->channels[c].current_c.dis = 1;
- channel_store_c(ctrl, c);
-
- channel_stop(ctrl, c);
- } else {
- ctrl->channels[c].regs[RW_SAVED_DATA] =
- (uint32_t)(unsigned long)ctrl->
- channels[c].current_d.next;
- /* Load new descriptor. */
- channel_load_d(ctrl, c);
- saved_data_buf = (uint32_t)(unsigned long)
- ctrl->channels[c].current_d.buf;
- }
-
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
- saved_data_buf;
- D(dump_d(c, &ctrl->channels[c].current_d));
- }
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
- } while (!ctrl->channels[c].eol);
- return 1;
-}
-
-static int channel_in_process(struct fs_dma_ctrl *ctrl, int c,
- unsigned char *buf, int buflen, int eop)
-{
- uint32_t len;
- uint32_t saved_data_buf;
-
- if (ctrl->channels[c].eol == 1)
- return 0;
-
- channel_load_d(ctrl, c);
- saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
- len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after;
- len -= saved_data_buf;
-
- if (len > buflen)
- len = buflen;
-
- cpu_physical_memory_write (saved_data_buf, buf, len);
- saved_data_buf += len;
-
- if (saved_data_buf ==
- (uint32_t)(unsigned long)ctrl->channels[c].current_d.after
- || eop) {
- uint32_t r_intr = ctrl->channels[c].regs[R_INTR];
-
- D(printf("in dscr end len=%d\n",
- ctrl->channels[c].current_d.after
- - ctrl->channels[c].current_d.buf));
- ctrl->channels[c].current_d.after = saved_data_buf;
-
- /* Done. Step to next. */
- if (ctrl->channels[c].current_d.intr) {
- /* TODO: signal eop to the client. */
- /* data intr. */
- ctrl->channels[c].regs[R_INTR] |= 3;
- }
- if (eop) {
- ctrl->channels[c].current_d.in_eop = 1;
- ctrl->channels[c].regs[R_INTR] |= 8;
- }
- if (r_intr != ctrl->channels[c].regs[R_INTR])
- channel_update_irq(ctrl, c);
-
- channel_store_d(ctrl, c);
- D(dump_d(c, &ctrl->channels[c].current_d));
-
- if (ctrl->channels[c].current_d.eol) {
- D(printf("channel %d EOL\n", c));
- ctrl->channels[c].eol = 1;
-
- /* Mark the context as disabled. */
- ctrl->channels[c].current_c.dis = 1;
- channel_store_c(ctrl, c);
-
- channel_stop(ctrl, c);
- } else {
- ctrl->channels[c].regs[RW_SAVED_DATA] =
- (uint32_t)(unsigned long)ctrl->
- channels[c].current_d.next;
- /* Load new descriptor. */
- channel_load_d(ctrl, c);
- saved_data_buf = (uint32_t)(unsigned long)
- ctrl->channels[c].current_d.buf;
- }
- }
-
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
- return len;
-}
-
-static inline int channel_in_run(struct fs_dma_ctrl *ctrl, int c)
-{
- if (ctrl->channels[c].client->client.pull) {
- ctrl->channels[c].client->client.pull(
- ctrl->channels[c].client->client.opaque);
- return 1;
- } else
- return 0;
-}
-
-static uint32_t dma_rinvalid (void *opaque, hwaddr addr)
-{
- hw_error("Unsupported short raccess. reg=" HWADDR_FMT_plx "\n", addr);
- return 0;
-}
-
-static uint64_t
-dma_read(void *opaque, hwaddr addr, unsigned int size)
-{
- struct fs_dma_ctrl *ctrl = opaque;
- int c;
- uint32_t r = 0;
-
- if (size != 4) {
- dma_rinvalid(opaque, addr);
- }
-
- /* Make addr relative to this channel and bounded to nr regs. */
- c = fs_channel(addr);
- addr &= 0xff;
- addr >>= 2;
- switch (addr)
- {
- case RW_STAT:
- r = ctrl->channels[c].state & 7;
- r |= ctrl->channels[c].eol << 5;
- r |= ctrl->channels[c].stream_cmd_src << 8;
- break;
-
- default:
- r = ctrl->channels[c].regs[addr];
- D(printf("%s c=%d addr=" HWADDR_FMT_plx "\n",
- __func__, c, addr));
- break;
- }
- return r;
-}
-
-static void
-dma_winvalid (void *opaque, hwaddr addr, uint32_t value)
-{
- hw_error("Unsupported short waccess. reg=" HWADDR_FMT_plx "\n", addr);
-}
-
-static void
-dma_update_state(struct fs_dma_ctrl *ctrl, int c)
-{
- if (ctrl->channels[c].regs[RW_CFG] & 2)
- ctrl->channels[c].state = STOPPED;
- if (!(ctrl->channels[c].regs[RW_CFG] & 1))
- ctrl->channels[c].state = RST;
-}
-
-static void
-dma_write(void *opaque, hwaddr addr,
- uint64_t val64, unsigned int size)
-{
- struct fs_dma_ctrl *ctrl = opaque;
- uint32_t value = val64;
- int c;
-
- if (size != 4) {
- dma_winvalid(opaque, addr, value);
- }
-
- /* Make addr relative to this channel and bounded to nr regs. */
- c = fs_channel(addr);
- addr &= 0xff;
- addr >>= 2;
- switch (addr)
- {
- case RW_DATA:
- ctrl->channels[c].regs[addr] = value;
- break;
-
- case RW_CFG:
- ctrl->channels[c].regs[addr] = value;
- dma_update_state(ctrl, c);
- break;
- case RW_CMD:
- /* continue. */
- if (value & ~1)
- printf("Invalid store to ch=%d RW_CMD %x\n",
- c, value);
- ctrl->channels[c].regs[addr] = value;
- channel_continue(ctrl, c);
- break;
-
- case RW_SAVED_DATA:
- case RW_SAVED_DATA_BUF:
- case RW_GROUP:
- case RW_GROUP_DOWN:
- ctrl->channels[c].regs[addr] = value;
- break;
-
- case RW_ACK_INTR:
- case RW_INTR_MASK:
- ctrl->channels[c].regs[addr] = value;
- channel_update_irq(ctrl, c);
- if (addr == RW_ACK_INTR)
- ctrl->channels[c].regs[RW_ACK_INTR] = 0;
- break;
-
- case RW_STREAM_CMD:
- if (value & ~1023)
- printf("Invalid store to ch=%d "
- "RW_STREAMCMD %x\n",
- c, value);
- ctrl->channels[c].regs[addr] = value;
- D(printf("stream_cmd ch=%d\n", c));
- channel_stream_cmd(ctrl, c, value);
- break;
-
- default:
- D(printf("%s c=%d " HWADDR_FMT_plx "\n",
- __func__, c, addr));
- break;
- }
-}
-
-static const MemoryRegionOps dma_ops = {
- .read = dma_read,
- .write = dma_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid = {
- .min_access_size = 1,
- .max_access_size = 4
- }
-};
-
-static int etraxfs_dmac_run(void *opaque)
-{
- struct fs_dma_ctrl *ctrl = opaque;
- int i;
- int p = 0;
-
- for (i = 0;
- i < ctrl->nr_channels;
- i++)
- {
- if (ctrl->channels[i].state == RUNNING)
- {
- if (ctrl->channels[i].input) {
- p += channel_in_run(ctrl, i);
- } else {
- p += channel_out_run(ctrl, i);
- }
- }
- }
- return p;
-}
-
-int etraxfs_dmac_input(struct etraxfs_dma_client *client,
- void *buf, int len, int eop)
-{
- return channel_in_process(client->ctrl, client->channel,
- buf, len, eop);
-}
-
-/* Connect an IRQ line with a channel. */
-void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input)
-{
- struct fs_dma_ctrl *ctrl = opaque;
- ctrl->channels[c].irq = *line;
- ctrl->channels[c].input = input;
-}
-
-void etraxfs_dmac_connect_client(void *opaque, int c,
- struct etraxfs_dma_client *cl)
-{
- struct fs_dma_ctrl *ctrl = opaque;
- cl->ctrl = ctrl;
- cl->channel = c;
- ctrl->channels[c].client = cl;
-}
-
-
-static void DMA_run(void *opaque)
-{
- struct fs_dma_ctrl *etraxfs_dmac = opaque;
- int p = 1;
-
- if (runstate_is_running())
- p = etraxfs_dmac_run(etraxfs_dmac);
-
- if (p)
- qemu_bh_schedule_idle(etraxfs_dmac->bh);
-}
-
-void *etraxfs_dmac_init(hwaddr base, int nr_channels)
-{
- struct fs_dma_ctrl *ctrl = NULL;
-
- ctrl = g_malloc0(sizeof *ctrl);
-
- ctrl->bh = qemu_bh_new(DMA_run, ctrl);
-
- ctrl->nr_channels = nr_channels;
- ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels);
-
- memory_region_init_io(&ctrl->mmio, NULL, &dma_ops, ctrl, "etraxfs-dma",
- nr_channels * 0x2000);
- memory_region_add_subregion(get_system_memory(), base, &ctrl->mmio);
-
- return ctrl;
-}
diff --git a/hw/dma/i82374.c b/hw/dma/i82374.c
index e72aa2e..e226eda 100644
--- a/hw/dma/i82374.c
+++ b/hw/dma/i82374.c
@@ -139,18 +139,19 @@ static void i82374_realize(DeviceState *dev, Error **errp)
memset(s->commands, 0, sizeof(s->commands));
}
-static Property i82374_properties[] = {
+static const Property i82374_properties[] = {
DEFINE_PROP_UINT32("iobase", I82374State, iobase, 0x400),
- DEFINE_PROP_END_OF_LIST()
};
-static void i82374_class_init(ObjectClass *klass, void *data)
+static void i82374_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = i82374_realize;
dc->vmsd = &vmstate_i82374;
device_class_set_props(dc, i82374_properties);
+ dc->desc = "Intel 82374 DMA controller";
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
}
static const TypeInfo i82374_info = {
diff --git a/hw/dma/i8257.c b/hw/dma/i8257.c
index 24a54ca..2463952 100644
--- a/hw/dma/i8257.c
+++ b/hw/dma/i8257.c
@@ -585,21 +585,20 @@ static void i8257_realize(DeviceState *dev, Error **errp)
d->dma_bh = qemu_bh_new(i8257_dma_run, d);
}
-static Property i8257_properties[] = {
+static const Property i8257_properties[] = {
DEFINE_PROP_INT32("base", I8257State, base, 0x00),
DEFINE_PROP_INT32("page-base", I8257State, page_base, 0x80),
DEFINE_PROP_INT32("pageh-base", I8257State, pageh_base, 0x480),
DEFINE_PROP_INT32("dshift", I8257State, dshift, 0),
- DEFINE_PROP_END_OF_LIST()
};
-static void i8257_class_init(ObjectClass *klass, void *data)
+static void i8257_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
IsaDmaClass *idc = ISADMA_CLASS(klass);
dc->realize = i8257_realize;
- dc->reset = i8257_reset;
+ device_class_set_legacy_reset(dc, i8257_reset);
dc->vmsd = &vmstate_i8257;
device_class_set_props(dc, i8257_properties);
@@ -619,7 +618,7 @@ static const TypeInfo i8257_info = {
.parent = TYPE_ISA_DEVICE,
.instance_size = sizeof(I8257State),
.class_init = i8257_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_ISADMA },
{ }
}
diff --git a/hw/dma/meson.build b/hw/dma/meson.build
index a96c1be..cc7810b 100644
--- a/hw/dma/meson.build
+++ b/hw/dma/meson.build
@@ -5,12 +5,10 @@ system_ss.add(when: 'CONFIG_I82374', if_true: files('i82374.c'))
system_ss.add(when: 'CONFIG_I8257', if_true: files('i8257.c'))
system_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('xilinx_axidma.c'))
system_ss.add(when: 'CONFIG_ZYNQ_DEVCFG', if_true: files('xlnx-zynq-devcfg.c'))
-system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_dma.c'))
system_ss.add(when: 'CONFIG_STP2000', if_true: files('sparc32_dma.c'))
system_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx_dpdma.c'))
system_ss.add(when: 'CONFIG_XLNX_ZDMA', if_true: files('xlnx-zdma.c'))
system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_dma.c', 'soc_dma.c'))
-system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_dma.c'))
system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_dma.c'))
system_ss.add(when: 'CONFIG_SIFIVE_PDMA', if_true: files('sifive_pdma.c'))
system_ss.add(when: 'CONFIG_XLNX_CSU_DMA', if_true: files('xlnx_csu_dma.c'))
diff --git a/hw/dma/omap_dma.c b/hw/dma/omap_dma.c
index 77797a6..101f91f 100644
--- a/hw/dma/omap_dma.c
+++ b/hw/dma/omap_dma.c
@@ -131,9 +131,9 @@ struct omap_dma_s {
#define LAST_FRAME_INTR (1 << 4)
#define END_BLOCK_INTR (1 << 5)
#define SYNC (1 << 6)
-#define END_PKT_INTR (1 << 7)
-#define TRANS_ERR_INTR (1 << 8)
-#define MISALIGN_INTR (1 << 11)
+#define END_PKT_INTR (1 << 7)
+#define TRANS_ERR_INTR (1 << 8)
+#define MISALIGN_INTR (1 << 11)
static inline void omap_dma_interrupts_update(struct omap_dma_s *s)
{
@@ -526,12 +526,12 @@ static void omap_dma_transfer_setup(struct soc_dma_ch_s *dma)
/* Check all the conditions that terminate the transfer starting
* with those that can occur the soonest. */
-#define INTR_CHECK(cond, id, nelements) \
- if (cond) { \
- elements[id] = nelements; \
- if (elements[id] < min_elems) \
- min_elems = elements[id]; \
- } else \
+#define INTR_CHECK(cond, id, nelements) \
+ if (cond) { \
+ elements[id] = nelements; \
+ if (elements[id] < min_elems) \
+ min_elems = elements[id]; \
+ } else \
elements[id] = INT_MAX;
/* Elements */
@@ -686,10 +686,7 @@ void omap_dma_reset(struct soc_dma_s *dma)
struct omap_dma_s *s = dma->opaque;
soc_dma_reset(s->dma);
- if (s->model < omap_dma_4)
- s->gcr = 0x0004;
- else
- s->gcr = 0x00010010;
+ s->gcr = 0x0004;
s->ocp = 0x00000000;
memset(&s->irqstat, 0, sizeof(s->irqstat));
memset(&s->irqen, 0, sizeof(s->irqen));
@@ -697,8 +694,7 @@ void omap_dma_reset(struct soc_dma_s *dma)
s->lcd_ch.condition = 0;
s->lcd_ch.interrupts = 0;
s->lcd_ch.dual = 0;
- if (s->model < omap_dma_4)
- omap_dma_enable_3_1_mapping(s);
+ omap_dma_enable_3_1_mapping(s);
for (i = 0; i < s->chans; i ++) {
s->ch[i].suspend = 0;
s->ch[i].prefetch = 0;
@@ -721,10 +717,7 @@ void omap_dma_reset(struct soc_dma_s *dma)
s->ch[i].repeat = 0;
s->ch[i].auto_init = 0;
s->ch[i].link_enabled = 0;
- if (s->model < omap_dma_4)
- s->ch[i].interrupts = 0x0003;
- else
- s->ch[i].interrupts = 0x0000;
+ s->ch[i].interrupts = 0x0003;
s->ch[i].status = 0;
s->ch[i].cstatus = 0;
s->ch[i].active = 0;
@@ -747,7 +740,7 @@ static int omap_dma_ch_reg_read(struct omap_dma_s *s,
struct omap_dma_channel_s *ch, int reg, uint16_t *value)
{
switch (reg) {
- case 0x00: /* SYS_DMA_CSDP_CH0 */
+ case 0x00: /* SYS_DMA_CSDP_CH0 */
*value = (ch->burst[1] << 14) |
(ch->pack[1] << 13) |
(ch->port[1] << 9) |
@@ -757,9 +750,9 @@ static int omap_dma_ch_reg_read(struct omap_dma_s *s,
(ch->data_type >> 1);
break;
- case 0x02: /* SYS_DMA_CCR_CH0 */
+ case 0x02: /* SYS_DMA_CCR_CH0 */
if (s->model <= omap_dma_3_1)
- *value = 0 << 10; /* FIFO_FLUSH reads as 0 */
+ *value = 0 << 10; /* FIFO_FLUSH reads as 0 */
else
*value = ch->omap_3_1_compatible_disable << 10;
*value |= (ch->mode[1] << 14) |
@@ -772,11 +765,11 @@ static int omap_dma_ch_reg_read(struct omap_dma_s *s,
(ch->fs << 5) | ch->sync;
break;
- case 0x04: /* SYS_DMA_CICR_CH0 */
+ case 0x04: /* SYS_DMA_CICR_CH0 */
*value = ch->interrupts;
break;
- case 0x06: /* SYS_DMA_CSR_CH0 */
+ case 0x06: /* SYS_DMA_CSR_CH0 */
*value = ch->status;
ch->status &= SYNC;
if (!ch->omap_3_1_compatible_disable && ch->sibling) {
@@ -786,77 +779,77 @@ static int omap_dma_ch_reg_read(struct omap_dma_s *s,
qemu_irq_lower(ch->irq);
break;
- case 0x08: /* SYS_DMA_CSSA_L_CH0 */
+ case 0x08: /* SYS_DMA_CSSA_L_CH0 */
*value = ch->addr[0] & 0x0000ffff;
break;
- case 0x0a: /* SYS_DMA_CSSA_U_CH0 */
+ case 0x0a: /* SYS_DMA_CSSA_U_CH0 */
*value = ch->addr[0] >> 16;
break;
- case 0x0c: /* SYS_DMA_CDSA_L_CH0 */
+ case 0x0c: /* SYS_DMA_CDSA_L_CH0 */
*value = ch->addr[1] & 0x0000ffff;
break;
- case 0x0e: /* SYS_DMA_CDSA_U_CH0 */
+ case 0x0e: /* SYS_DMA_CDSA_U_CH0 */
*value = ch->addr[1] >> 16;
break;
- case 0x10: /* SYS_DMA_CEN_CH0 */
+ case 0x10: /* SYS_DMA_CEN_CH0 */
*value = ch->elements;
break;
- case 0x12: /* SYS_DMA_CFN_CH0 */
+ case 0x12: /* SYS_DMA_CFN_CH0 */
*value = ch->frames;
break;
- case 0x14: /* SYS_DMA_CFI_CH0 */
+ case 0x14: /* SYS_DMA_CFI_CH0 */
*value = ch->frame_index[0];
break;
- case 0x16: /* SYS_DMA_CEI_CH0 */
+ case 0x16: /* SYS_DMA_CEI_CH0 */
*value = ch->element_index[0];
break;
- case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */
+ case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */
if (ch->omap_3_1_compatible_disable)
- *value = ch->active_set.src & 0xffff; /* CSAC */
+ *value = ch->active_set.src & 0xffff; /* CSAC */
else
*value = ch->cpc;
break;
- case 0x1a: /* DMA_CDAC */
- *value = ch->active_set.dest & 0xffff; /* CDAC */
+ case 0x1a: /* DMA_CDAC */
+ *value = ch->active_set.dest & 0xffff; /* CDAC */
break;
- case 0x1c: /* DMA_CDEI */
+ case 0x1c: /* DMA_CDEI */
*value = ch->element_index[1];
break;
- case 0x1e: /* DMA_CDFI */
+ case 0x1e: /* DMA_CDFI */
*value = ch->frame_index[1];
break;
- case 0x20: /* DMA_COLOR_L */
+ case 0x20: /* DMA_COLOR_L */
*value = ch->color & 0xffff;
break;
- case 0x22: /* DMA_COLOR_U */
+ case 0x22: /* DMA_COLOR_U */
*value = ch->color >> 16;
break;
- case 0x24: /* DMA_CCR2 */
+ case 0x24: /* DMA_CCR2 */
*value = (ch->bs << 2) |
(ch->transparent_copy << 1) |
ch->constant_fill;
break;
- case 0x28: /* DMA_CLNK_CTRL */
+ case 0x28: /* DMA_CLNK_CTRL */
*value = (ch->link_enabled << 15) |
(ch->link_next_ch & 0xf);
break;
- case 0x2a: /* DMA_LCH_CTRL */
+ case 0x2a: /* DMA_LCH_CTRL */
*value = (ch->interleave_disabled << 15) |
ch->type;
break;
@@ -871,7 +864,7 @@ static int omap_dma_ch_reg_write(struct omap_dma_s *s,
struct omap_dma_channel_s *ch, int reg, uint16_t value)
{
switch (reg) {
- case 0x00: /* SYS_DMA_CSDP_CH0 */
+ case 0x00: /* SYS_DMA_CSDP_CH0 */
ch->burst[1] = (value & 0xc000) >> 14;
ch->pack[1] = (value & 0x2000) >> 13;
ch->port[1] = (enum omap_dma_port) ((value & 0x1e00) >> 9);
@@ -894,7 +887,7 @@ static int omap_dma_ch_reg_write(struct omap_dma_s *s,
}
break;
- case 0x02: /* SYS_DMA_CCR_CH0 */
+ case 0x02: /* SYS_DMA_CCR_CH0 */
ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14);
ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12);
ch->end_prog = (value & 0x0800) >> 11;
@@ -916,88 +909,88 @@ static int omap_dma_ch_reg_write(struct omap_dma_s *s,
break;
- case 0x04: /* SYS_DMA_CICR_CH0 */
+ case 0x04: /* SYS_DMA_CICR_CH0 */
ch->interrupts = value & 0x3f;
break;
- case 0x06: /* SYS_DMA_CSR_CH0 */
+ case 0x06: /* SYS_DMA_CSR_CH0 */
OMAP_RO_REG((hwaddr) reg);
break;
- case 0x08: /* SYS_DMA_CSSA_L_CH0 */
+ case 0x08: /* SYS_DMA_CSSA_L_CH0 */
ch->addr[0] &= 0xffff0000;
ch->addr[0] |= value;
break;
- case 0x0a: /* SYS_DMA_CSSA_U_CH0 */
+ case 0x0a: /* SYS_DMA_CSSA_U_CH0 */
ch->addr[0] &= 0x0000ffff;
ch->addr[0] |= (uint32_t) value << 16;
break;
- case 0x0c: /* SYS_DMA_CDSA_L_CH0 */
+ case 0x0c: /* SYS_DMA_CDSA_L_CH0 */
ch->addr[1] &= 0xffff0000;
ch->addr[1] |= value;
break;
- case 0x0e: /* SYS_DMA_CDSA_U_CH0 */
+ case 0x0e: /* SYS_DMA_CDSA_U_CH0 */
ch->addr[1] &= 0x0000ffff;
ch->addr[1] |= (uint32_t) value << 16;
break;
- case 0x10: /* SYS_DMA_CEN_CH0 */
+ case 0x10: /* SYS_DMA_CEN_CH0 */
ch->elements = value;
break;
- case 0x12: /* SYS_DMA_CFN_CH0 */
+ case 0x12: /* SYS_DMA_CFN_CH0 */
ch->frames = value;
break;
- case 0x14: /* SYS_DMA_CFI_CH0 */
+ case 0x14: /* SYS_DMA_CFI_CH0 */
ch->frame_index[0] = (int16_t) value;
break;
- case 0x16: /* SYS_DMA_CEI_CH0 */
+ case 0x16: /* SYS_DMA_CEI_CH0 */
ch->element_index[0] = (int16_t) value;
break;
- case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */
+ case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */
OMAP_RO_REG((hwaddr) reg);
break;
- case 0x1c: /* DMA_CDEI */
+ case 0x1c: /* DMA_CDEI */
ch->element_index[1] = (int16_t) value;
break;
- case 0x1e: /* DMA_CDFI */
+ case 0x1e: /* DMA_CDFI */
ch->frame_index[1] = (int16_t) value;
break;
- case 0x20: /* DMA_COLOR_L */
+ case 0x20: /* DMA_COLOR_L */
ch->color &= 0xffff0000;
ch->color |= value;
break;
- case 0x22: /* DMA_COLOR_U */
+ case 0x22: /* DMA_COLOR_U */
ch->color &= 0xffff;
ch->color |= (uint32_t)value << 16;
break;
- case 0x24: /* DMA_CCR2 */
+ case 0x24: /* DMA_CCR2 */
ch->bs = (value >> 2) & 0x1;
ch->transparent_copy = (value >> 1) & 0x1;
ch->constant_fill = value & 0x1;
break;
- case 0x28: /* DMA_CLNK_CTRL */
+ case 0x28: /* DMA_CLNK_CTRL */
ch->link_enabled = (value >> 15) & 0x1;
- if (value & (1 << 14)) { /* Stop_Lnk */
+ if (value & (1 << 14)) { /* Stop_Lnk */
ch->link_enabled = 0;
omap_dma_disable_channel(s, ch);
}
ch->link_next_ch = value & 0x1f;
break;
- case 0x2a: /* DMA_LCH_CTRL */
+ case 0x2a: /* DMA_LCH_CTRL */
ch->interleave_disabled = (value >> 15) & 0x1;
ch->type = value & 0xf;
break;
@@ -1012,7 +1005,7 @@ static int omap_dma_3_2_lcd_write(struct omap_dma_lcd_channel_s *s, int offset,
uint16_t value)
{
switch (offset) {
- case 0xbc0: /* DMA_LCD_CSDP */
+ case 0xbc0: /* DMA_LCD_CSDP */
s->brust_f2 = (value >> 14) & 0x3;
s->pack_f2 = (value >> 13) & 0x1;
s->data_type_f2 = (1 << ((value >> 11) & 0x3));
@@ -1021,7 +1014,7 @@ static int omap_dma_3_2_lcd_write(struct omap_dma_lcd_channel_s *s, int offset,
s->data_type_f1 = (1 << ((value >> 0) & 0x3));
break;
- case 0xbc2: /* DMA_LCD_CCR */
+ case 0xbc2: /* DMA_LCD_CCR */
s->mode_f2 = (value >> 14) & 0x3;
s->mode_f1 = (value >> 12) & 0x3;
s->end_prog = (value >> 11) & 0x1;
@@ -1033,7 +1026,7 @@ static int omap_dma_3_2_lcd_write(struct omap_dma_lcd_channel_s *s, int offset,
s->bs = (value >> 4) & 0x1;
break;
- case 0xbc4: /* DMA_LCD_CTRL */
+ case 0xbc4: /* DMA_LCD_CTRL */
s->dst = (value >> 8) & 0x1;
s->src = ((value >> 6) & 0x3) << 1;
s->condition = 0;
@@ -1042,91 +1035,91 @@ static int omap_dma_3_2_lcd_write(struct omap_dma_lcd_channel_s *s, int offset,
s->dual = value & 1;
break;
- case 0xbc8: /* TOP_B1_L */
+ case 0xbc8: /* TOP_B1_L */
s->src_f1_top &= 0xffff0000;
s->src_f1_top |= 0x0000ffff & value;
break;
- case 0xbca: /* TOP_B1_U */
+ case 0xbca: /* TOP_B1_U */
s->src_f1_top &= 0x0000ffff;
s->src_f1_top |= (uint32_t)value << 16;
break;
- case 0xbcc: /* BOT_B1_L */
+ case 0xbcc: /* BOT_B1_L */
s->src_f1_bottom &= 0xffff0000;
s->src_f1_bottom |= 0x0000ffff & value;
break;
- case 0xbce: /* BOT_B1_U */
+ case 0xbce: /* BOT_B1_U */
s->src_f1_bottom &= 0x0000ffff;
s->src_f1_bottom |= (uint32_t) value << 16;
break;
- case 0xbd0: /* TOP_B2_L */
+ case 0xbd0: /* TOP_B2_L */
s->src_f2_top &= 0xffff0000;
s->src_f2_top |= 0x0000ffff & value;
break;
- case 0xbd2: /* TOP_B2_U */
+ case 0xbd2: /* TOP_B2_U */
s->src_f2_top &= 0x0000ffff;
s->src_f2_top |= (uint32_t) value << 16;
break;
- case 0xbd4: /* BOT_B2_L */
+ case 0xbd4: /* BOT_B2_L */
s->src_f2_bottom &= 0xffff0000;
s->src_f2_bottom |= 0x0000ffff & value;
break;
- case 0xbd6: /* BOT_B2_U */
+ case 0xbd6: /* BOT_B2_U */
s->src_f2_bottom &= 0x0000ffff;
s->src_f2_bottom |= (uint32_t) value << 16;
break;
- case 0xbd8: /* DMA_LCD_SRC_EI_B1 */
+ case 0xbd8: /* DMA_LCD_SRC_EI_B1 */
s->element_index_f1 = value;
break;
- case 0xbda: /* DMA_LCD_SRC_FI_B1_L */
+ case 0xbda: /* DMA_LCD_SRC_FI_B1_L */
s->frame_index_f1 &= 0xffff0000;
s->frame_index_f1 |= 0x0000ffff & value;
break;
- case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */
+ case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */
s->frame_index_f1 &= 0x0000ffff;
s->frame_index_f1 |= (uint32_t) value << 16;
break;
- case 0xbdc: /* DMA_LCD_SRC_EI_B2 */
+ case 0xbdc: /* DMA_LCD_SRC_EI_B2 */
s->element_index_f2 = value;
break;
- case 0xbde: /* DMA_LCD_SRC_FI_B2_L */
+ case 0xbde: /* DMA_LCD_SRC_FI_B2_L */
s->frame_index_f2 &= 0xffff0000;
s->frame_index_f2 |= 0x0000ffff & value;
break;
- case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */
+ case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */
s->frame_index_f2 &= 0x0000ffff;
s->frame_index_f2 |= (uint32_t) value << 16;
break;
- case 0xbe0: /* DMA_LCD_SRC_EN_B1 */
+ case 0xbe0: /* DMA_LCD_SRC_EN_B1 */
s->elements_f1 = value;
break;
- case 0xbe4: /* DMA_LCD_SRC_FN_B1 */
+ case 0xbe4: /* DMA_LCD_SRC_FN_B1 */
s->frames_f1 = value;
break;
- case 0xbe2: /* DMA_LCD_SRC_EN_B2 */
+ case 0xbe2: /* DMA_LCD_SRC_EN_B2 */
s->elements_f2 = value;
break;
- case 0xbe6: /* DMA_LCD_SRC_FN_B2 */
+ case 0xbe6: /* DMA_LCD_SRC_FN_B2 */
s->frames_f2 = value;
break;
- case 0xbea: /* DMA_LCD_LCH_CTRL */
+ case 0xbea: /* DMA_LCD_LCH_CTRL */
s->lch_type = value & 0xf;
break;
@@ -1140,7 +1133,7 @@ static int omap_dma_3_2_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
uint16_t *ret)
{
switch (offset) {
- case 0xbc0: /* DMA_LCD_CSDP */
+ case 0xbc0: /* DMA_LCD_CSDP */
*ret = (s->brust_f2 << 14) |
(s->pack_f2 << 13) |
((s->data_type_f2 >> 1) << 11) |
@@ -1149,7 +1142,7 @@ static int omap_dma_3_2_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
((s->data_type_f1 >> 1) << 0);
break;
- case 0xbc2: /* DMA_LCD_CCR */
+ case 0xbc2: /* DMA_LCD_CCR */
*ret = (s->mode_f2 << 14) |
(s->mode_f1 << 12) |
(s->end_prog << 11) |
@@ -1161,7 +1154,7 @@ static int omap_dma_3_2_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
(s->bs << 4);
break;
- case 0xbc4: /* DMA_LCD_CTRL */
+ case 0xbc4: /* DMA_LCD_CTRL */
qemu_irq_lower(s->irq);
*ret = (s->dst << 8) |
((s->src & 0x6) << 5) |
@@ -1170,79 +1163,79 @@ static int omap_dma_3_2_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
s->dual;
break;
- case 0xbc8: /* TOP_B1_L */
+ case 0xbc8: /* TOP_B1_L */
*ret = s->src_f1_top & 0xffff;
break;
- case 0xbca: /* TOP_B1_U */
+ case 0xbca: /* TOP_B1_U */
*ret = s->src_f1_top >> 16;
break;
- case 0xbcc: /* BOT_B1_L */
+ case 0xbcc: /* BOT_B1_L */
*ret = s->src_f1_bottom & 0xffff;
break;
- case 0xbce: /* BOT_B1_U */
+ case 0xbce: /* BOT_B1_U */
*ret = s->src_f1_bottom >> 16;
break;
- case 0xbd0: /* TOP_B2_L */
+ case 0xbd0: /* TOP_B2_L */
*ret = s->src_f2_top & 0xffff;
break;
- case 0xbd2: /* TOP_B2_U */
+ case 0xbd2: /* TOP_B2_U */
*ret = s->src_f2_top >> 16;
break;
- case 0xbd4: /* BOT_B2_L */
+ case 0xbd4: /* BOT_B2_L */
*ret = s->src_f2_bottom & 0xffff;
break;
- case 0xbd6: /* BOT_B2_U */
+ case 0xbd6: /* BOT_B2_U */
*ret = s->src_f2_bottom >> 16;
break;
- case 0xbd8: /* DMA_LCD_SRC_EI_B1 */
+ case 0xbd8: /* DMA_LCD_SRC_EI_B1 */
*ret = s->element_index_f1;
break;
- case 0xbda: /* DMA_LCD_SRC_FI_B1_L */
+ case 0xbda: /* DMA_LCD_SRC_FI_B1_L */
*ret = s->frame_index_f1 & 0xffff;
break;
- case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */
+ case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */
*ret = s->frame_index_f1 >> 16;
break;
- case 0xbdc: /* DMA_LCD_SRC_EI_B2 */
+ case 0xbdc: /* DMA_LCD_SRC_EI_B2 */
*ret = s->element_index_f2;
break;
- case 0xbde: /* DMA_LCD_SRC_FI_B2_L */
+ case 0xbde: /* DMA_LCD_SRC_FI_B2_L */
*ret = s->frame_index_f2 & 0xffff;
break;
- case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */
+ case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */
*ret = s->frame_index_f2 >> 16;
break;
- case 0xbe0: /* DMA_LCD_SRC_EN_B1 */
+ case 0xbe0: /* DMA_LCD_SRC_EN_B1 */
*ret = s->elements_f1;
break;
- case 0xbe4: /* DMA_LCD_SRC_FN_B1 */
+ case 0xbe4: /* DMA_LCD_SRC_FN_B1 */
*ret = s->frames_f1;
break;
- case 0xbe2: /* DMA_LCD_SRC_EN_B2 */
+ case 0xbe2: /* DMA_LCD_SRC_EN_B2 */
*ret = s->elements_f2;
break;
- case 0xbe6: /* DMA_LCD_SRC_FN_B2 */
+ case 0xbe6: /* DMA_LCD_SRC_FN_B2 */
*ret = s->frames_f2;
break;
- case 0xbea: /* DMA_LCD_LCH_CTRL */
+ case 0xbea: /* DMA_LCD_LCH_CTRL */
*ret = s->lch_type;
break;
@@ -1256,7 +1249,7 @@ static int omap_dma_3_1_lcd_write(struct omap_dma_lcd_channel_s *s, int offset,
uint16_t value)
{
switch (offset) {
- case 0x300: /* SYS_DMA_LCD_CTRL */
+ case 0x300: /* SYS_DMA_LCD_CTRL */
s->src = (value & 0x40) ? imif : emiff;
s->condition = 0;
/* Assume no bus errors and thus no BUS_ERROR irq bits. */
@@ -1264,42 +1257,42 @@ static int omap_dma_3_1_lcd_write(struct omap_dma_lcd_channel_s *s, int offset,
s->dual = value & 1;
break;
- case 0x302: /* SYS_DMA_LCD_TOP_F1_L */
+ case 0x302: /* SYS_DMA_LCD_TOP_F1_L */
s->src_f1_top &= 0xffff0000;
s->src_f1_top |= 0x0000ffff & value;
break;
- case 0x304: /* SYS_DMA_LCD_TOP_F1_U */
+ case 0x304: /* SYS_DMA_LCD_TOP_F1_U */
s->src_f1_top &= 0x0000ffff;
s->src_f1_top |= (uint32_t)value << 16;
break;
- case 0x306: /* SYS_DMA_LCD_BOT_F1_L */
+ case 0x306: /* SYS_DMA_LCD_BOT_F1_L */
s->src_f1_bottom &= 0xffff0000;
s->src_f1_bottom |= 0x0000ffff & value;
break;
- case 0x308: /* SYS_DMA_LCD_BOT_F1_U */
+ case 0x308: /* SYS_DMA_LCD_BOT_F1_U */
s->src_f1_bottom &= 0x0000ffff;
s->src_f1_bottom |= (uint32_t)value << 16;
break;
- case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */
+ case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */
s->src_f2_top &= 0xffff0000;
s->src_f2_top |= 0x0000ffff & value;
break;
- case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */
+ case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */
s->src_f2_top &= 0x0000ffff;
s->src_f2_top |= (uint32_t)value << 16;
break;
- case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */
+ case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */
s->src_f2_bottom &= 0xffff0000;
s->src_f2_bottom |= 0x0000ffff & value;
break;
- case 0x310: /* SYS_DMA_LCD_BOT_F2_U */
+ case 0x310: /* SYS_DMA_LCD_BOT_F2_U */
s->src_f2_bottom &= 0x0000ffff;
s->src_f2_bottom |= (uint32_t)value << 16;
break;
@@ -1316,7 +1309,7 @@ static int omap_dma_3_1_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
int i;
switch (offset) {
- case 0x300: /* SYS_DMA_LCD_CTRL */
+ case 0x300: /* SYS_DMA_LCD_CTRL */
i = s->condition;
s->condition = 0;
qemu_irq_lower(s->irq);
@@ -1324,35 +1317,35 @@ static int omap_dma_3_1_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
(s->interrupts << 1) | s->dual;
break;
- case 0x302: /* SYS_DMA_LCD_TOP_F1_L */
+ case 0x302: /* SYS_DMA_LCD_TOP_F1_L */
*ret = s->src_f1_top & 0xffff;
break;
- case 0x304: /* SYS_DMA_LCD_TOP_F1_U */
+ case 0x304: /* SYS_DMA_LCD_TOP_F1_U */
*ret = s->src_f1_top >> 16;
break;
- case 0x306: /* SYS_DMA_LCD_BOT_F1_L */
+ case 0x306: /* SYS_DMA_LCD_BOT_F1_L */
*ret = s->src_f1_bottom & 0xffff;
break;
- case 0x308: /* SYS_DMA_LCD_BOT_F1_U */
+ case 0x308: /* SYS_DMA_LCD_BOT_F1_U */
*ret = s->src_f1_bottom >> 16;
break;
- case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */
+ case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */
*ret = s->src_f2_top & 0xffff;
break;
- case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */
+ case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */
*ret = s->src_f2_top >> 16;
break;
- case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */
+ case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */
*ret = s->src_f2_bottom & 0xffff;
break;
- case 0x310: /* SYS_DMA_LCD_BOT_F2_U */
+ case 0x310: /* SYS_DMA_LCD_BOT_F2_U */
*ret = s->src_f2_bottom >> 16;
break;
@@ -1365,18 +1358,18 @@ static int omap_dma_3_1_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
static int omap_dma_sys_write(struct omap_dma_s *s, int offset, uint16_t value)
{
switch (offset) {
- case 0x400: /* SYS_DMA_GCR */
+ case 0x400: /* SYS_DMA_GCR */
s->gcr = value;
break;
- case 0x404: /* DMA_GSCR */
+ case 0x404: /* DMA_GSCR */
if (value & 0x8)
omap_dma_disable_3_1_mapping(s);
else
omap_dma_enable_3_1_mapping(s);
break;
- case 0x408: /* DMA_GRST */
+ case 0x408: /* DMA_GRST */
if (value & 0x1)
omap_dma_reset(s->dma);
break;
@@ -1391,57 +1384,57 @@ static int omap_dma_sys_read(struct omap_dma_s *s, int offset,
uint16_t *ret)
{
switch (offset) {
- case 0x400: /* SYS_DMA_GCR */
+ case 0x400: /* SYS_DMA_GCR */
*ret = s->gcr;
break;
- case 0x404: /* DMA_GSCR */
+ case 0x404: /* DMA_GSCR */
*ret = s->omap_3_1_mapping_disabled << 3;
break;
- case 0x408: /* DMA_GRST */
+ case 0x408: /* DMA_GRST */
*ret = 0;
break;
- case 0x442: /* DMA_HW_ID */
- case 0x444: /* DMA_PCh2_ID */
- case 0x446: /* DMA_PCh0_ID */
- case 0x448: /* DMA_PCh1_ID */
- case 0x44a: /* DMA_PChG_ID */
- case 0x44c: /* DMA_PChD_ID */
+ case 0x442: /* DMA_HW_ID */
+ case 0x444: /* DMA_PCh2_ID */
+ case 0x446: /* DMA_PCh0_ID */
+ case 0x448: /* DMA_PCh1_ID */
+ case 0x44a: /* DMA_PChG_ID */
+ case 0x44c: /* DMA_PChD_ID */
*ret = 1;
break;
- case 0x44e: /* DMA_CAPS_0_U */
+ case 0x44e: /* DMA_CAPS_0_U */
*ret = (s->caps[0] >> 16) & 0xffff;
break;
- case 0x450: /* DMA_CAPS_0_L */
+ case 0x450: /* DMA_CAPS_0_L */
*ret = (s->caps[0] >> 0) & 0xffff;
break;
- case 0x452: /* DMA_CAPS_1_U */
+ case 0x452: /* DMA_CAPS_1_U */
*ret = (s->caps[1] >> 16) & 0xffff;
break;
- case 0x454: /* DMA_CAPS_1_L */
+ case 0x454: /* DMA_CAPS_1_L */
*ret = (s->caps[1] >> 0) & 0xffff;
break;
- case 0x456: /* DMA_CAPS_2 */
+ case 0x456: /* DMA_CAPS_2 */
*ret = s->caps[2];
break;
- case 0x458: /* DMA_CAPS_3 */
+ case 0x458: /* DMA_CAPS_3 */
*ret = s->caps[3];
break;
- case 0x45a: /* DMA_CAPS_4 */
+ case 0x45a: /* DMA_CAPS_4 */
*ret = s->caps[4];
break;
- case 0x460: /* DMA_PCh2_SR */
- case 0x480: /* DMA_PCh0_SR */
- case 0x482: /* DMA_PCh1_SR */
- case 0x4c0: /* DMA_PChD_SR_0 */
+ case 0x460: /* DMA_PCh2_SR */
+ case 0x480: /* DMA_PCh0_SR */
+ case 0x482: /* DMA_PCh1_SR */
+ case 0x4c0: /* DMA_PChD_SR_0 */
qemu_log_mask(LOG_UNIMP,
"%s: Physical Channel Status Registers not implemented\n",
__func__);
@@ -1587,41 +1580,40 @@ static void omap_dma_setcaps(struct omap_dma_s *s)
case omap_dma_3_1:
break;
case omap_dma_3_2:
- case omap_dma_4:
/* XXX Only available for sDMA */
s->caps[0] =
- (1 << 19) | /* Constant Fill Capability */
- (1 << 18); /* Transparent BLT Capability */
+ (1 << 19) | /* Constant Fill Capability */
+ (1 << 18); /* Transparent BLT Capability */
s->caps[1] =
- (1 << 1); /* 1-bit palettized capability (DMA 3.2 only) */
+ (1 << 1); /* 1-bit palettized capability (DMA 3.2 only) */
s->caps[2] =
- (1 << 8) | /* SEPARATE_SRC_AND_DST_INDEX_CPBLTY */
- (1 << 7) | /* DST_DOUBLE_INDEX_ADRS_CPBLTY */
- (1 << 6) | /* DST_SINGLE_INDEX_ADRS_CPBLTY */
- (1 << 5) | /* DST_POST_INCRMNT_ADRS_CPBLTY */
- (1 << 4) | /* DST_CONST_ADRS_CPBLTY */
- (1 << 3) | /* SRC_DOUBLE_INDEX_ADRS_CPBLTY */
- (1 << 2) | /* SRC_SINGLE_INDEX_ADRS_CPBLTY */
- (1 << 1) | /* SRC_POST_INCRMNT_ADRS_CPBLTY */
- (1 << 0); /* SRC_CONST_ADRS_CPBLTY */
+ (1 << 8) | /* SEPARATE_SRC_AND_DST_INDEX_CPBLTY */
+ (1 << 7) | /* DST_DOUBLE_INDEX_ADRS_CPBLTY */
+ (1 << 6) | /* DST_SINGLE_INDEX_ADRS_CPBLTY */
+ (1 << 5) | /* DST_POST_INCRMNT_ADRS_CPBLTY */
+ (1 << 4) | /* DST_CONST_ADRS_CPBLTY */
+ (1 << 3) | /* SRC_DOUBLE_INDEX_ADRS_CPBLTY */
+ (1 << 2) | /* SRC_SINGLE_INDEX_ADRS_CPBLTY */
+ (1 << 1) | /* SRC_POST_INCRMNT_ADRS_CPBLTY */
+ (1 << 0); /* SRC_CONST_ADRS_CPBLTY */
s->caps[3] =
- (1 << 6) | /* BLOCK_SYNCHR_CPBLTY (DMA 4 only) */
- (1 << 7) | /* PKT_SYNCHR_CPBLTY (DMA 4 only) */
- (1 << 5) | /* CHANNEL_CHAINING_CPBLTY */
- (1 << 4) | /* LCh_INTERLEAVE_CPBLTY */
- (1 << 3) | /* AUTOINIT_REPEAT_CPBLTY (DMA 3.2 only) */
- (1 << 2) | /* AUTOINIT_ENDPROG_CPBLTY (DMA 3.2 only) */
- (1 << 1) | /* FRAME_SYNCHR_CPBLTY */
- (1 << 0); /* ELMNT_SYNCHR_CPBLTY */
+ (1 << 6) | /* BLOCK_SYNCHR_CPBLTY (DMA 4 only) */
+ (1 << 7) | /* PKT_SYNCHR_CPBLTY (DMA 4 only) */
+ (1 << 5) | /* CHANNEL_CHAINING_CPBLTY */
+ (1 << 4) | /* LCh_INTERLEAVE_CPBLTY */
+ (1 << 3) | /* AUTOINIT_REPEAT_CPBLTY (DMA 3.2 only) */
+ (1 << 2) | /* AUTOINIT_ENDPROG_CPBLTY (DMA 3.2 only) */
+ (1 << 1) | /* FRAME_SYNCHR_CPBLTY */
+ (1 << 0); /* ELMNT_SYNCHR_CPBLTY */
s->caps[4] =
- (1 << 7) | /* PKT_INTERRUPT_CPBLTY (DMA 4 only) */
- (1 << 6) | /* SYNC_STATUS_CPBLTY */
- (1 << 5) | /* BLOCK_INTERRUPT_CPBLTY */
- (1 << 4) | /* LAST_FRAME_INTERRUPT_CPBLTY */
- (1 << 3) | /* FRAME_INTERRUPT_CPBLTY */
- (1 << 2) | /* HALF_FRAME_INTERRUPT_CPBLTY */
- (1 << 1) | /* EVENT_DROP_INTERRUPT_CPBLTY */
- (1 << 0); /* TIMEOUT_INTERRUPT_CPBLTY (DMA 3.2 only) */
+ (1 << 7) | /* PKT_INTERRUPT_CPBLTY (DMA 4 only) */
+ (1 << 6) | /* SYNC_STATUS_CPBLTY */
+ (1 << 5) | /* BLOCK_INTERRUPT_CPBLTY */
+ (1 << 4) | /* LAST_FRAME_INTERRUPT_CPBLTY */
+ (1 << 3) | /* FRAME_INTERRUPT_CPBLTY */
+ (1 << 2) | /* HALF_FRAME_INTERRUPT_CPBLTY */
+ (1 << 1) | /* EVENT_DROP_INTERRUPT_CPBLTY */
+ (1 << 0); /* TIMEOUT_INTERRUPT_CPBLTY (DMA 3.2 only) */
break;
}
}
@@ -1678,443 +1670,6 @@ struct soc_dma_s *omap_dma_init(hwaddr base, qemu_irq *irqs,
return s->dma;
}
-static void omap_dma_interrupts_4_update(struct omap_dma_s *s)
-{
- struct omap_dma_channel_s *ch = s->ch;
- uint32_t bmp, bit;
-
- for (bmp = 0, bit = 1; bit; ch ++, bit <<= 1)
- if (ch->status) {
- bmp |= bit;
- ch->cstatus |= ch->status;
- ch->status = 0;
- }
- if ((s->irqstat[0] |= s->irqen[0] & bmp))
- qemu_irq_raise(s->irq[0]);
- if ((s->irqstat[1] |= s->irqen[1] & bmp))
- qemu_irq_raise(s->irq[1]);
- if ((s->irqstat[2] |= s->irqen[2] & bmp))
- qemu_irq_raise(s->irq[2]);
- if ((s->irqstat[3] |= s->irqen[3] & bmp))
- qemu_irq_raise(s->irq[3]);
-}
-
-static uint64_t omap_dma4_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- struct omap_dma_s *s = opaque;
- int irqn = 0, chnum;
- struct omap_dma_channel_s *ch;
-
- if (size == 1) {
- return omap_badwidth_read16(opaque, addr);
- }
-
- switch (addr) {
- case 0x00: /* DMA4_REVISION */
- return 0x40;
-
- case 0x14: /* DMA4_IRQSTATUS_L3 */
- irqn ++;
- /* fall through */
- case 0x10: /* DMA4_IRQSTATUS_L2 */
- irqn ++;
- /* fall through */
- case 0x0c: /* DMA4_IRQSTATUS_L1 */
- irqn ++;
- /* fall through */
- case 0x08: /* DMA4_IRQSTATUS_L0 */
- return s->irqstat[irqn];
-
- case 0x24: /* DMA4_IRQENABLE_L3 */
- irqn ++;
- /* fall through */
- case 0x20: /* DMA4_IRQENABLE_L2 */
- irqn ++;
- /* fall through */
- case 0x1c: /* DMA4_IRQENABLE_L1 */
- irqn ++;
- /* fall through */
- case 0x18: /* DMA4_IRQENABLE_L0 */
- return s->irqen[irqn];
-
- case 0x28: /* DMA4_SYSSTATUS */
- return 1; /* RESETDONE */
-
- case 0x2c: /* DMA4_OCP_SYSCONFIG */
- return s->ocp;
-
- case 0x64: /* DMA4_CAPS_0 */
- return s->caps[0];
- case 0x6c: /* DMA4_CAPS_2 */
- return s->caps[2];
- case 0x70: /* DMA4_CAPS_3 */
- return s->caps[3];
- case 0x74: /* DMA4_CAPS_4 */
- return s->caps[4];
-
- case 0x78: /* DMA4_GCR */
- return s->gcr;
-
- case 0x80 ... 0xfff:
- addr -= 0x80;
- chnum = addr / 0x60;
- ch = s->ch + chnum;
- addr -= chnum * 0x60;
- break;
-
- default:
- OMAP_BAD_REG(addr);
- return 0;
- }
-
- /* Per-channel registers */
- switch (addr) {
- case 0x00: /* DMA4_CCR */
- return (ch->buf_disable << 25) |
- (ch->src_sync << 24) |
- (ch->prefetch << 23) |
- ((ch->sync & 0x60) << 14) |
- (ch->bs << 18) |
- (ch->transparent_copy << 17) |
- (ch->constant_fill << 16) |
- (ch->mode[1] << 14) |
- (ch->mode[0] << 12) |
- (0 << 10) | (0 << 9) |
- (ch->suspend << 8) |
- (ch->enable << 7) |
- (ch->priority << 6) |
- (ch->fs << 5) | (ch->sync & 0x1f);
-
- case 0x04: /* DMA4_CLNK_CTRL */
- return (ch->link_enabled << 15) | ch->link_next_ch;
-
- case 0x08: /* DMA4_CICR */
- return ch->interrupts;
-
- case 0x0c: /* DMA4_CSR */
- return ch->cstatus;
-
- case 0x10: /* DMA4_CSDP */
- return (ch->endian[0] << 21) |
- (ch->endian_lock[0] << 20) |
- (ch->endian[1] << 19) |
- (ch->endian_lock[1] << 18) |
- (ch->write_mode << 16) |
- (ch->burst[1] << 14) |
- (ch->pack[1] << 13) |
- (ch->translate[1] << 9) |
- (ch->burst[0] << 7) |
- (ch->pack[0] << 6) |
- (ch->translate[0] << 2) |
- (ch->data_type >> 1);
-
- case 0x14: /* DMA4_CEN */
- return ch->elements;
-
- case 0x18: /* DMA4_CFN */
- return ch->frames;
-
- case 0x1c: /* DMA4_CSSA */
- return ch->addr[0];
-
- case 0x20: /* DMA4_CDSA */
- return ch->addr[1];
-
- case 0x24: /* DMA4_CSEI */
- return ch->element_index[0];
-
- case 0x28: /* DMA4_CSFI */
- return ch->frame_index[0];
-
- case 0x2c: /* DMA4_CDEI */
- return ch->element_index[1];
-
- case 0x30: /* DMA4_CDFI */
- return ch->frame_index[1];
-
- case 0x34: /* DMA4_CSAC */
- return ch->active_set.src & 0xffff;
-
- case 0x38: /* DMA4_CDAC */
- return ch->active_set.dest & 0xffff;
-
- case 0x3c: /* DMA4_CCEN */
- return ch->active_set.element;
-
- case 0x40: /* DMA4_CCFN */
- return ch->active_set.frame;
-
- case 0x44: /* DMA4_COLOR */
- /* XXX only in sDMA */
- return ch->color;
-
- default:
- OMAP_BAD_REG(addr);
- return 0;
- }
-}
-
-static void omap_dma4_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_dma_s *s = opaque;
- int chnum, irqn = 0;
- struct omap_dma_channel_s *ch;
-
- if (size == 1) {
- omap_badwidth_write16(opaque, addr, value);
- return;
- }
-
- switch (addr) {
- case 0x14: /* DMA4_IRQSTATUS_L3 */
- irqn ++;
- /* fall through */
- case 0x10: /* DMA4_IRQSTATUS_L2 */
- irqn ++;
- /* fall through */
- case 0x0c: /* DMA4_IRQSTATUS_L1 */
- irqn ++;
- /* fall through */
- case 0x08: /* DMA4_IRQSTATUS_L0 */
- s->irqstat[irqn] &= ~value;
- if (!s->irqstat[irqn])
- qemu_irq_lower(s->irq[irqn]);
- return;
-
- case 0x24: /* DMA4_IRQENABLE_L3 */
- irqn ++;
- /* fall through */
- case 0x20: /* DMA4_IRQENABLE_L2 */
- irqn ++;
- /* fall through */
- case 0x1c: /* DMA4_IRQENABLE_L1 */
- irqn ++;
- /* fall through */
- case 0x18: /* DMA4_IRQENABLE_L0 */
- s->irqen[irqn] = value;
- return;
-
- case 0x2c: /* DMA4_OCP_SYSCONFIG */
- if (value & 2) /* SOFTRESET */
- omap_dma_reset(s->dma);
- s->ocp = value & 0x3321;
- if (((s->ocp >> 12) & 3) == 3) { /* MIDLEMODE */
- qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA power mode\n",
- __func__);
- }
- return;
-
- case 0x78: /* DMA4_GCR */
- s->gcr = value & 0x00ff00ff;
- if ((value & 0xff) == 0x00) { /* MAX_CHANNEL_FIFO_DEPTH */
- qemu_log_mask(LOG_GUEST_ERROR, "%s: wrong FIFO depth in GCR\n",
- __func__);
- }
- return;
-
- case 0x80 ... 0xfff:
- addr -= 0x80;
- chnum = addr / 0x60;
- ch = s->ch + chnum;
- addr -= chnum * 0x60;
- break;
-
- case 0x00: /* DMA4_REVISION */
- case 0x28: /* DMA4_SYSSTATUS */
- case 0x64: /* DMA4_CAPS_0 */
- case 0x6c: /* DMA4_CAPS_2 */
- case 0x70: /* DMA4_CAPS_3 */
- case 0x74: /* DMA4_CAPS_4 */
- OMAP_RO_REG(addr);
- return;
-
- default:
- OMAP_BAD_REG(addr);
- return;
- }
-
- /* Per-channel registers */
- switch (addr) {
- case 0x00: /* DMA4_CCR */
- ch->buf_disable = (value >> 25) & 1;
- ch->src_sync = (value >> 24) & 1; /* XXX For CamDMA must be 1 */
- if (ch->buf_disable && !ch->src_sync) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Buffering disable is not allowed in "
- "destination synchronised mode\n", __func__);
- }
- ch->prefetch = (value >> 23) & 1;
- ch->bs = (value >> 18) & 1;
- ch->transparent_copy = (value >> 17) & 1;
- ch->constant_fill = (value >> 16) & 1;
- ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14);
- ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12);
- ch->suspend = (value & 0x0100) >> 8;
- ch->priority = (value & 0x0040) >> 6;
- ch->fs = (value & 0x0020) >> 5;
- if (ch->fs && ch->bs && ch->mode[0] && ch->mode[1]) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: For a packet transfer at least one port "
- "must be constant-addressed\n", __func__);
- }
- ch->sync = (value & 0x001f) | ((value >> 14) & 0x0060);
- /* XXX must be 0x01 for CamDMA */
-
- if (value & 0x0080)
- omap_dma_enable_channel(s, ch);
- else
- omap_dma_disable_channel(s, ch);
-
- break;
-
- case 0x04: /* DMA4_CLNK_CTRL */
- ch->link_enabled = (value >> 15) & 0x1;
- ch->link_next_ch = value & 0x1f;
- break;
-
- case 0x08: /* DMA4_CICR */
- ch->interrupts = value & 0x09be;
- break;
-
- case 0x0c: /* DMA4_CSR */
- ch->cstatus &= ~value;
- break;
-
- case 0x10: /* DMA4_CSDP */
- ch->endian[0] =(value >> 21) & 1;
- ch->endian_lock[0] =(value >> 20) & 1;
- ch->endian[1] =(value >> 19) & 1;
- ch->endian_lock[1] =(value >> 18) & 1;
- if (ch->endian[0] != ch->endian[1]) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: DMA endianness conversion enable attempt\n",
- __func__);
- }
- ch->write_mode = (value >> 16) & 3;
- ch->burst[1] = (value & 0xc000) >> 14;
- ch->pack[1] = (value & 0x2000) >> 13;
- ch->translate[1] = (value & 0x1e00) >> 9;
- ch->burst[0] = (value & 0x0180) >> 7;
- ch->pack[0] = (value & 0x0040) >> 6;
- ch->translate[0] = (value & 0x003c) >> 2;
- if (ch->translate[0] | ch->translate[1]) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: bad MReqAddressTranslate sideband signal\n",
- __func__);
- }
- ch->data_type = 1 << (value & 3);
- if ((value & 3) == 3) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: bad data_type for DMA channel\n", __func__);
- ch->data_type >>= 1;
- }
- break;
-
- case 0x14: /* DMA4_CEN */
- ch->set_update = 1;
- ch->elements = value & 0xffffff;
- break;
-
- case 0x18: /* DMA4_CFN */
- ch->frames = value & 0xffff;
- ch->set_update = 1;
- break;
-
- case 0x1c: /* DMA4_CSSA */
- ch->addr[0] = (hwaddr) (uint32_t) value;
- ch->set_update = 1;
- break;
-
- case 0x20: /* DMA4_CDSA */
- ch->addr[1] = (hwaddr) (uint32_t) value;
- ch->set_update = 1;
- break;
-
- case 0x24: /* DMA4_CSEI */
- ch->element_index[0] = (int16_t) value;
- ch->set_update = 1;
- break;
-
- case 0x28: /* DMA4_CSFI */
- ch->frame_index[0] = (int32_t) value;
- ch->set_update = 1;
- break;
-
- case 0x2c: /* DMA4_CDEI */
- ch->element_index[1] = (int16_t) value;
- ch->set_update = 1;
- break;
-
- case 0x30: /* DMA4_CDFI */
- ch->frame_index[1] = (int32_t) value;
- ch->set_update = 1;
- break;
-
- case 0x44: /* DMA4_COLOR */
- /* XXX only in sDMA */
- ch->color = value;
- break;
-
- case 0x34: /* DMA4_CSAC */
- case 0x38: /* DMA4_CDAC */
- case 0x3c: /* DMA4_CCEN */
- case 0x40: /* DMA4_CCFN */
- OMAP_RO_REG(addr);
- break;
-
- default:
- OMAP_BAD_REG(addr);
- }
-}
-
-static const MemoryRegionOps omap_dma4_ops = {
- .read = omap_dma4_read,
- .write = omap_dma4_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-struct soc_dma_s *omap_dma4_init(hwaddr base, qemu_irq *irqs,
- MemoryRegion *sysmem,
- struct omap_mpu_state_s *mpu, int fifo,
- int chans, omap_clk iclk, omap_clk fclk)
-{
- int i;
- struct omap_dma_s *s = g_new0(struct omap_dma_s, 1);
-
- s->model = omap_dma_4;
- s->chans = chans;
- s->mpu = mpu;
- s->clk = fclk;
-
- s->dma = soc_dma_init(s->chans);
- s->dma->freq = omap_clk_getrate(fclk);
- s->dma->transfer_fn = omap_dma_transfer_generic;
- s->dma->setup_fn = omap_dma_transfer_setup;
- s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 64);
- s->dma->opaque = s;
- for (i = 0; i < s->chans; i ++) {
- s->ch[i].dma = &s->dma->ch[i];
- s->dma->ch[i].opaque = &s->ch[i];
- }
-
- memcpy(&s->irq, irqs, sizeof(s->irq));
- s->intr_update = omap_dma_interrupts_4_update;
-
- omap_dma_setcaps(s);
- omap_clk_adduser(s->clk, qemu_allocate_irq(omap_dma_clk_update, s, 0));
- omap_dma_reset(s->dma);
- omap_dma_clk_update(s, 0, !!s->dma->freq);
-
- memory_region_init_io(&s->iomem, NULL, &omap_dma4_ops, s, "omap.dma4", 0x1000);
- memory_region_add_subregion(sysmem, base, &s->iomem);
-
- mpu->drq = s->dma->drq;
-
- return s->dma;
-}
-
struct omap_dma_lcd_channel_s *omap_dma_get_lcdch(struct soc_dma_s *dma)
{
struct omap_dma_s *s = dma->opaque;
diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c
index 1e49c22..277d934 100644
--- a/hw/dma/pl080.c
+++ b/hw/dma/pl080.c
@@ -408,20 +408,19 @@ static void pl081_init(Object *obj)
s->nchannels = 2;
}
-static Property pl080_properties[] = {
+static const Property pl080_properties[] = {
DEFINE_PROP_LINK("downstream", PL080State, downstream,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pl080_class_init(ObjectClass *oc, void *data)
+static void pl080_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->vmsd = &vmstate_pl080;
dc->realize = pl080_realize;
device_class_set_props(dc, pl080_properties);
- dc->reset = pl080_reset;
+ device_class_set_legacy_reset(dc, pl080_reset);
}
static const TypeInfo pl080_info = {
diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c
index 5f89295..a570bb0 100644
--- a/hw/dma/pl330.c
+++ b/hw/dma/pl330.c
@@ -22,7 +22,7 @@
#include "migration/vmstate.h"
#include "qapi/error.h"
#include "qemu/timer.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "trace.h"
@@ -1646,7 +1646,7 @@ static void pl330_realize(DeviceState *dev, Error **errp)
pl330_fifo_init(&s->fifo, s->data_width / 4 * s->data_buffer_dep);
}
-static Property pl330_properties[] = {
+static const Property pl330_properties[] = {
/* CR0 */
DEFINE_PROP_UINT32("num_chnls", PL330State, num_chnls, 8),
DEFINE_PROP_UINT8("num_periph_req", PL330State, num_periph_req, 4),
@@ -1669,16 +1669,14 @@ static Property pl330_properties[] = {
DEFINE_PROP_LINK("memory", PL330State, mem_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
-
- DEFINE_PROP_END_OF_LIST(),
};
-static void pl330_class_init(ObjectClass *klass, void *data)
+static void pl330_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = pl330_realize;
- dc->reset = pl330_reset;
+ device_class_set_legacy_reset(dc, pl330_reset);
device_class_set_props(dc, pl330_properties);
dc->vmsd = &vmstate_pl330;
}
diff --git a/hw/dma/pxa2xx_dma.c b/hw/dma/pxa2xx_dma.c
deleted file mode 100644
index 9f62f0b..0000000
--- a/hw/dma/pxa2xx_dma.c
+++ /dev/null
@@ -1,591 +0,0 @@
-/*
- * Intel XScale PXA255/270 DMA controller.
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Copyright (c) 2006 Thorsten Zitterell
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GPL.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/log.h"
-#include "hw/hw.h"
-#include "hw/irq.h"
-#include "hw/qdev-properties.h"
-#include "hw/arm/pxa.h"
-#include "hw/sysbus.h"
-#include "migration/vmstate.h"
-#include "qapi/error.h"
-#include "qemu/module.h"
-#include "qom/object.h"
-
-#define PXA255_DMA_NUM_CHANNELS 16
-#define PXA27X_DMA_NUM_CHANNELS 32
-
-#define PXA2XX_DMA_NUM_REQUESTS 75
-
-typedef struct {
- uint32_t descr;
- uint32_t src;
- uint32_t dest;
- uint32_t cmd;
- uint32_t state;
- int request;
-} PXA2xxDMAChannel;
-
-#define TYPE_PXA2XX_DMA "pxa2xx-dma"
-OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxDMAState, PXA2XX_DMA)
-
-struct PXA2xxDMAState {
- SysBusDevice parent_obj;
-
- MemoryRegion iomem;
- qemu_irq irq;
-
- uint32_t stopintr;
- uint32_t eorintr;
- uint32_t rasintr;
- uint32_t startintr;
- uint32_t endintr;
-
- uint32_t align;
- uint32_t pio;
-
- int channels;
- PXA2xxDMAChannel *chan;
-
- uint8_t req[PXA2XX_DMA_NUM_REQUESTS];
-
- /* Flag to avoid recursive DMA invocations. */
- int running;
-};
-
-#define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */
-#define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */
-#define DALGN 0x00a0 /* DMA Alignment register */
-#define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */
-#define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */
-#define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */
-#define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */
-#define DINT 0x00f0 /* DMA Interrupt register */
-#define DRCMR0 0x0100 /* Request to Channel Map register 0 */
-#define DRCMR63 0x01fc /* Request to Channel Map register 63 */
-#define D_CH0 0x0200 /* Channel 0 Descriptor start */
-#define DRCMR64 0x1100 /* Request to Channel Map register 64 */
-#define DRCMR74 0x1128 /* Request to Channel Map register 74 */
-
-/* Per-channel register */
-#define DDADR 0x00
-#define DSADR 0x01
-#define DTADR 0x02
-#define DCMD 0x03
-
-/* Bit-field masks */
-#define DRCMR_CHLNUM 0x1f
-#define DRCMR_MAPVLD (1 << 7)
-#define DDADR_STOP (1 << 0)
-#define DDADR_BREN (1 << 1)
-#define DCMD_LEN 0x1fff
-#define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1))
-#define DCMD_SIZE(x) (4 << (((x) >> 16) & 3))
-#define DCMD_FLYBYT (1 << 19)
-#define DCMD_FLYBYS (1 << 20)
-#define DCMD_ENDIRQEN (1 << 21)
-#define DCMD_STARTIRQEN (1 << 22)
-#define DCMD_CMPEN (1 << 25)
-#define DCMD_FLOWTRG (1 << 28)
-#define DCMD_FLOWSRC (1 << 29)
-#define DCMD_INCTRGADDR (1 << 30)
-#define DCMD_INCSRCADDR (1 << 31)
-#define DCSR_BUSERRINTR (1 << 0)
-#define DCSR_STARTINTR (1 << 1)
-#define DCSR_ENDINTR (1 << 2)
-#define DCSR_STOPINTR (1 << 3)
-#define DCSR_RASINTR (1 << 4)
-#define DCSR_REQPEND (1 << 8)
-#define DCSR_EORINT (1 << 9)
-#define DCSR_CMPST (1 << 10)
-#define DCSR_MASKRUN (1 << 22)
-#define DCSR_RASIRQEN (1 << 23)
-#define DCSR_CLRCMPST (1 << 24)
-#define DCSR_SETCMPST (1 << 25)
-#define DCSR_EORSTOPEN (1 << 26)
-#define DCSR_EORJMPEN (1 << 27)
-#define DCSR_EORIRQEN (1 << 28)
-#define DCSR_STOPIRQEN (1 << 29)
-#define DCSR_NODESCFETCH (1 << 30)
-#define DCSR_RUN (1 << 31)
-
-static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch)
-{
- if (ch >= 0) {
- if ((s->chan[ch].state & DCSR_STOPIRQEN) &&
- (s->chan[ch].state & DCSR_STOPINTR))
- s->stopintr |= 1 << ch;
- else
- s->stopintr &= ~(1 << ch);
-
- if ((s->chan[ch].state & DCSR_EORIRQEN) &&
- (s->chan[ch].state & DCSR_EORINT))
- s->eorintr |= 1 << ch;
- else
- s->eorintr &= ~(1 << ch);
-
- if ((s->chan[ch].state & DCSR_RASIRQEN) &&
- (s->chan[ch].state & DCSR_RASINTR))
- s->rasintr |= 1 << ch;
- else
- s->rasintr &= ~(1 << ch);
-
- if (s->chan[ch].state & DCSR_STARTINTR)
- s->startintr |= 1 << ch;
- else
- s->startintr &= ~(1 << ch);
-
- if (s->chan[ch].state & DCSR_ENDINTR)
- s->endintr |= 1 << ch;
- else
- s->endintr &= ~(1 << ch);
- }
-
- if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr)
- qemu_irq_raise(s->irq);
- else
- qemu_irq_lower(s->irq);
-}
-
-static inline void pxa2xx_dma_descriptor_fetch(
- PXA2xxDMAState *s, int ch)
-{
- uint32_t desc[4];
- hwaddr daddr = s->chan[ch].descr & ~0xf;
- if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST))
- daddr += 32;
-
- cpu_physical_memory_read(daddr, desc, 16);
- s->chan[ch].descr = desc[DDADR];
- s->chan[ch].src = desc[DSADR];
- s->chan[ch].dest = desc[DTADR];
- s->chan[ch].cmd = desc[DCMD];
-
- if (s->chan[ch].cmd & DCMD_FLOWSRC)
- s->chan[ch].src &= ~3;
- if (s->chan[ch].cmd & DCMD_FLOWTRG)
- s->chan[ch].dest &= ~3;
-
- if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT))
- printf("%s: unsupported mode in channel %i\n", __func__, ch);
-
- if (s->chan[ch].cmd & DCMD_STARTIRQEN)
- s->chan[ch].state |= DCSR_STARTINTR;
-}
-
-static void pxa2xx_dma_run(PXA2xxDMAState *s)
-{
- int c, srcinc, destinc;
- uint32_t n, size;
- uint32_t width;
- uint32_t length;
- uint8_t buffer[32];
- PXA2xxDMAChannel *ch;
-
- if (s->running ++)
- return;
-
- while (s->running) {
- s->running = 1;
- for (c = 0; c < s->channels; c ++) {
- ch = &s->chan[c];
-
- while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) {
- /* Test for pending requests */
- if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request)
- break;
-
- length = ch->cmd & DCMD_LEN;
- size = DCMD_SIZE(ch->cmd);
- width = DCMD_WIDTH(ch->cmd);
-
- srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0;
- destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0;
-
- while (length) {
- size = MIN(length, size);
-
- for (n = 0; n < size; n += width) {
- cpu_physical_memory_read(ch->src, buffer + n, width);
- ch->src += srcinc;
- }
-
- for (n = 0; n < size; n += width) {
- cpu_physical_memory_write(ch->dest, buffer + n, width);
- ch->dest += destinc;
- }
-
- length -= size;
-
- if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) &&
- !ch->request) {
- ch->state |= DCSR_EORINT;
- if (ch->state & DCSR_EORSTOPEN)
- ch->state |= DCSR_STOPINTR;
- if ((ch->state & DCSR_EORJMPEN) &&
- !(ch->state & DCSR_NODESCFETCH))
- pxa2xx_dma_descriptor_fetch(s, c);
- break;
- }
- }
-
- ch->cmd = (ch->cmd & ~DCMD_LEN) | length;
-
- /* Is the transfer complete now? */
- if (!length) {
- if (ch->cmd & DCMD_ENDIRQEN)
- ch->state |= DCSR_ENDINTR;
-
- if ((ch->state & DCSR_NODESCFETCH) ||
- (ch->descr & DDADR_STOP) ||
- (ch->state & DCSR_EORSTOPEN)) {
- ch->state |= DCSR_STOPINTR;
- ch->state &= ~DCSR_RUN;
-
- break;
- }
-
- ch->state |= DCSR_STOPINTR;
- break;
- }
- }
- }
-
- s->running --;
- }
-}
-
-static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset,
- unsigned size)
-{
- PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
- unsigned int channel;
-
- if (size != 4) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n",
- __func__, size);
- return 5;
- }
-
- switch (offset) {
- case DRCMR64 ... DRCMR74:
- offset -= DRCMR64 - DRCMR0 - (64 << 2);
- /* Fall through */
- case DRCMR0 ... DRCMR63:
- channel = (offset - DRCMR0) >> 2;
- return s->req[channel];
-
- case DRQSR0:
- case DRQSR1:
- case DRQSR2:
- return 0;
-
- case DCSR0 ... DCSR31:
- channel = offset >> 2;
- if (s->chan[channel].request)
- return s->chan[channel].state | DCSR_REQPEND;
- return s->chan[channel].state;
-
- case DINT:
- return s->stopintr | s->eorintr | s->rasintr |
- s->startintr | s->endintr;
-
- case DALGN:
- return s->align;
-
- case DPCSR:
- return s->pio;
- }
-
- if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
- channel = (offset - D_CH0) >> 4;
- switch ((offset & 0x0f) >> 2) {
- case DDADR:
- return s->chan[channel].descr;
- case DSADR:
- return s->chan[channel].src;
- case DTADR:
- return s->chan[channel].dest;
- case DCMD:
- return s->chan[channel].cmd;
- }
- }
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
- __func__, offset);
- return 7;
-}
-
-static void pxa2xx_dma_write(void *opaque, hwaddr offset,
- uint64_t value, unsigned size)
-{
- PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
- unsigned int channel;
-
- if (size != 4) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n",
- __func__, size);
- return;
- }
-
- switch (offset) {
- case DRCMR64 ... DRCMR74:
- offset -= DRCMR64 - DRCMR0 - (64 << 2);
- /* Fall through */
- case DRCMR0 ... DRCMR63:
- channel = (offset - DRCMR0) >> 2;
-
- if (value & DRCMR_MAPVLD)
- if ((value & DRCMR_CHLNUM) > s->channels)
- hw_error("%s: Bad DMA channel %i\n",
- __func__, (unsigned)value & DRCMR_CHLNUM);
-
- s->req[channel] = value;
- break;
-
- case DRQSR0:
- case DRQSR1:
- case DRQSR2:
- /* Nothing to do */
- break;
-
- case DCSR0 ... DCSR31:
- channel = offset >> 2;
- s->chan[channel].state &= 0x0000071f & ~(value &
- (DCSR_EORINT | DCSR_ENDINTR |
- DCSR_STARTINTR | DCSR_BUSERRINTR));
- s->chan[channel].state |= value & 0xfc800000;
-
- if (s->chan[channel].state & DCSR_STOPIRQEN)
- s->chan[channel].state &= ~DCSR_STOPINTR;
-
- if (value & DCSR_NODESCFETCH) {
- /* No-descriptor-fetch mode */
- if (value & DCSR_RUN) {
- s->chan[channel].state &= ~DCSR_STOPINTR;
- pxa2xx_dma_run(s);
- }
- } else {
- /* Descriptor-fetch mode */
- if (value & DCSR_RUN) {
- s->chan[channel].state &= ~DCSR_STOPINTR;
- pxa2xx_dma_descriptor_fetch(s, channel);
- pxa2xx_dma_run(s);
- }
- }
-
- /* Shouldn't matter as our DMA is synchronous. */
- if (!(value & (DCSR_RUN | DCSR_MASKRUN)))
- s->chan[channel].state |= DCSR_STOPINTR;
-
- if (value & DCSR_CLRCMPST)
- s->chan[channel].state &= ~DCSR_CMPST;
- if (value & DCSR_SETCMPST)
- s->chan[channel].state |= DCSR_CMPST;
-
- pxa2xx_dma_update(s, channel);
- break;
-
- case DALGN:
- s->align = value;
- break;
-
- case DPCSR:
- s->pio = value & 0x80000001;
- break;
-
- default:
- if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
- channel = (offset - D_CH0) >> 4;
- switch ((offset & 0x0f) >> 2) {
- case DDADR:
- s->chan[channel].descr = value;
- break;
- case DSADR:
- s->chan[channel].src = value;
- break;
- case DTADR:
- s->chan[channel].dest = value;
- break;
- case DCMD:
- s->chan[channel].cmd = value;
- break;
- default:
- goto fail;
- }
-
- break;
- }
- fail:
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
- __func__, offset);
- }
-}
-
-static const MemoryRegionOps pxa2xx_dma_ops = {
- .read = pxa2xx_dma_read,
- .write = pxa2xx_dma_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void pxa2xx_dma_request(void *opaque, int req_num, int on)
-{
- PXA2xxDMAState *s = opaque;
- int ch;
- if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS)
- hw_error("%s: Bad DMA request %i\n", __func__, req_num);
-
- if (!(s->req[req_num] & DRCMR_MAPVLD))
- return;
- ch = s->req[req_num] & DRCMR_CHLNUM;
-
- if (!s->chan[ch].request && on)
- s->chan[ch].state |= DCSR_RASINTR;
- else
- s->chan[ch].state &= ~DCSR_RASINTR;
- if (s->chan[ch].request && !on)
- s->chan[ch].state |= DCSR_EORINT;
-
- s->chan[ch].request = on;
- if (on) {
- pxa2xx_dma_run(s);
- pxa2xx_dma_update(s, ch);
- }
-}
-
-static void pxa2xx_dma_init(Object *obj)
-{
- DeviceState *dev = DEVICE(obj);
- PXA2xxDMAState *s = PXA2XX_DMA(obj);
- SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
-
- memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS);
-
- qdev_init_gpio_in(dev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS);
-
- memory_region_init_io(&s->iomem, obj, &pxa2xx_dma_ops, s,
- "pxa2xx.dma", 0x00010000);
- sysbus_init_mmio(sbd, &s->iomem);
- sysbus_init_irq(sbd, &s->irq);
-}
-
-static void pxa2xx_dma_realize(DeviceState *dev, Error **errp)
-{
- PXA2xxDMAState *s = PXA2XX_DMA(dev);
- int i;
-
- if (s->channels <= 0) {
- error_setg(errp, "channels value invalid");
- return;
- }
-
- s->chan = g_new0(PXA2xxDMAChannel, s->channels);
-
- for (i = 0; i < s->channels; i ++)
- s->chan[i].state = DCSR_STOPINTR;
-}
-
-DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq)
-{
- DeviceState *dev;
-
- dev = qdev_new("pxa2xx-dma");
- qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
-
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
- sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
-
- return dev;
-}
-
-DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq)
-{
- DeviceState *dev;
-
- dev = qdev_new("pxa2xx-dma");
- qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
-
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
- sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
-
- return dev;
-}
-
-static bool is_version_0(void *opaque, int version_id)
-{
- return version_id == 0;
-}
-
-static const VMStateDescription vmstate_pxa2xx_dma_chan = {
- .name = "pxa2xx_dma_chan",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32(descr, PXA2xxDMAChannel),
- VMSTATE_UINT32(src, PXA2xxDMAChannel),
- VMSTATE_UINT32(dest, PXA2xxDMAChannel),
- VMSTATE_UINT32(cmd, PXA2xxDMAChannel),
- VMSTATE_UINT32(state, PXA2xxDMAChannel),
- VMSTATE_INT32(request, PXA2xxDMAChannel),
- VMSTATE_END_OF_LIST(),
- },
-};
-
-static const VMStateDescription vmstate_pxa2xx_dma = {
- .name = "pxa2xx_dma",
- .version_id = 1,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_UNUSED_TEST(is_version_0, 4),
- VMSTATE_UINT32(stopintr, PXA2xxDMAState),
- VMSTATE_UINT32(eorintr, PXA2xxDMAState),
- VMSTATE_UINT32(rasintr, PXA2xxDMAState),
- VMSTATE_UINT32(startintr, PXA2xxDMAState),
- VMSTATE_UINT32(endintr, PXA2xxDMAState),
- VMSTATE_UINT32(align, PXA2xxDMAState),
- VMSTATE_UINT32(pio, PXA2xxDMAState),
- VMSTATE_BUFFER(req, PXA2xxDMAState),
- VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels,
- vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel),
- VMSTATE_END_OF_LIST(),
- },
-};
-
-static Property pxa2xx_dma_properties[] = {
- DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void pxa2xx_dma_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->desc = "PXA2xx DMA controller";
- dc->vmsd = &vmstate_pxa2xx_dma;
- device_class_set_props(dc, pxa2xx_dma_properties);
- dc->realize = pxa2xx_dma_realize;
-}
-
-static const TypeInfo pxa2xx_dma_info = {
- .name = TYPE_PXA2XX_DMA,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(PXA2xxDMAState),
- .instance_init = pxa2xx_dma_init,
- .class_init = pxa2xx_dma_class_init,
-};
-
-static void pxa2xx_dma_register_types(void)
-{
- type_register_static(&pxa2xx_dma_info);
-}
-
-type_init(pxa2xx_dma_register_types)
diff --git a/hw/dma/rc4030.c b/hw/dma/rc4030.c
index 9152841..b6ed1d4 100644
--- a/hw/dma/rc4030.c
+++ b/hw/dma/rc4030.c
@@ -32,7 +32,7 @@
#include "qemu/timer.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "trace.h"
#include "qom/object.h"
@@ -701,13 +701,13 @@ static void rc4030_unrealize(DeviceState *dev)
object_unparent(OBJECT(&s->dma_mr));
}
-static void rc4030_class_init(ObjectClass *klass, void *class_data)
+static void rc4030_class_init(ObjectClass *klass, const void *class_data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = rc4030_realize;
dc->unrealize = rc4030_unrealize;
- dc->reset = rc4030_reset;
+ device_class_set_legacy_reset(dc, rc4030_reset);
dc->vmsd = &vmstate_rc4030;
}
@@ -720,7 +720,7 @@ static const TypeInfo rc4030_info = {
};
static void rc4030_iommu_memory_region_class_init(ObjectClass *klass,
- void *data)
+ const void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
diff --git a/hw/dma/sifive_pdma.c b/hw/dma/sifive_pdma.c
index 1dd88f3..48de3a2 100644
--- a/hw/dma/sifive_pdma.c
+++ b/hw/dma/sifive_pdma.c
@@ -28,7 +28,7 @@
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/dma/sifive_pdma.h"
#define DMA_CONTROL 0x000
@@ -152,7 +152,6 @@ done:
error:
s->chan[ch].state = DMA_CHAN_STATE_ERROR;
s->chan[ch].control |= CONTROL_ERR;
- return;
}
static inline void sifive_pdma_update_irq(SiFivePDMAState *s, int ch)
@@ -465,7 +464,7 @@ static void sifive_pdma_realize(DeviceState *dev, Error **errp)
}
}
-static void sifive_pdma_class_init(ObjectClass *klass, void *data)
+static void sifive_pdma_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/dma/sparc32_dma.c b/hw/dma/sparc32_dma.c
index 8019641..60c23b6 100644
--- a/hw/dma/sparc32_dma.c
+++ b/hw/dma/sparc32_dma.c
@@ -32,7 +32,7 @@
#include "hw/sparc/sun4m_iommu.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qapi/error.h"
#include "qemu/module.h"
#include "trace.h"
@@ -274,11 +274,11 @@ static void sparc32_dma_device_init(Object *obj)
qdev_init_gpio_out(dev, s->gpio, 2);
}
-static void sparc32_dma_device_class_init(ObjectClass *klass, void *data)
+static void sparc32_dma_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = sparc32_dma_device_reset;
+ device_class_set_legacy_reset(dc, sparc32_dma_device_reset);
dc->vmsd = &vmstate_sparc32_dma_device;
}
@@ -316,7 +316,8 @@ static void sparc32_espdma_device_realize(DeviceState *dev, Error **errp)
sysbus_realize(SYS_BUS_DEVICE(sysbus), &error_fatal);
}
-static void sparc32_espdma_device_class_init(ObjectClass *klass, void *data)
+static void sparc32_espdma_device_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -351,7 +352,8 @@ static void sparc32_ledma_device_realize(DeviceState *dev, Error **errp)
sysbus_realize(SYS_BUS_DEVICE(lance), &error_fatal);
}
-static void sparc32_ledma_device_class_init(ObjectClass *klass, void *data)
+static void sparc32_ledma_device_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -426,7 +428,7 @@ static void sparc32_dma_init(Object *obj)
TYPE_SPARC32_LEDMA_DEVICE);
}
-static void sparc32_dma_class_init(ObjectClass *klass, void *data)
+static void sparc32_dma_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c
index c9cfc31..2020399 100644
--- a/hw/dma/xilinx_axidma.c
+++ b/hw/dma/xilinx_axidma.c
@@ -33,7 +33,7 @@
#include "qemu/log.h"
#include "qemu/module.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/stream.h"
#include "qom/object.h"
#include "trace.h"
@@ -611,7 +611,7 @@ static void xilinx_axidma_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
}
-static Property axidma_properties[] = {
+static const Property axidma_properties[] = {
DEFINE_PROP_UINT32("freqhz", XilinxAXIDMA, freqhz, 50000000),
DEFINE_PROP_LINK("axistream-connected", XilinxAXIDMA,
tx_data_dev, TYPE_STREAM_SINK, StreamSink *),
@@ -619,15 +619,14 @@ static Property axidma_properties[] = {
tx_control_dev, TYPE_STREAM_SINK, StreamSink *),
DEFINE_PROP_LINK("dma", XilinxAXIDMA, dma_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void axidma_class_init(ObjectClass *klass, void *data)
+static void axidma_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->realize = xilinx_axidma_realize,
- dc->reset = xilinx_axidma_reset;
+ dc->realize = xilinx_axidma_realize;
+ device_class_set_legacy_reset(dc, xilinx_axidma_reset);
device_class_set_props(dc, axidma_properties);
}
@@ -640,7 +639,8 @@ static StreamSinkClass xilinx_axidma_control_stream_class = {
.push = xilinx_axidma_control_stream_push,
};
-static void xilinx_axidma_stream_class_init(ObjectClass *klass, void *data)
+static void xilinx_axidma_stream_class_init(ObjectClass *klass,
+ const void *data)
{
StreamSinkClass *ssc = STREAM_SINK_CLASS(klass);
@@ -662,7 +662,7 @@ static const TypeInfo xilinx_axidma_data_stream_info = {
.instance_size = sizeof(XilinxAXIDMAStreamSink),
.class_init = xilinx_axidma_stream_class_init,
.class_data = &xilinx_axidma_data_stream_class,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_STREAM_SINK },
{ }
}
@@ -674,7 +674,7 @@ static const TypeInfo xilinx_axidma_control_stream_info = {
.instance_size = sizeof(XilinxAXIDMAStreamSink),
.class_init = xilinx_axidma_stream_class_init,
.class_data = &xilinx_axidma_control_stream_class,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_STREAM_SINK },
{ }
}
diff --git a/hw/dma/xlnx-zdma.c b/hw/dma/xlnx-zdma.c
index 670c956..0c075e7 100644
--- a/hw/dma/xlnx-zdma.c
+++ b/hw/dma/xlnx-zdma.c
@@ -810,18 +810,17 @@ static const VMStateDescription vmstate_zdma = {
}
};
-static Property zdma_props[] = {
+static const Property zdma_props[] = {
DEFINE_PROP_UINT32("bus-width", XlnxZDMA, cfg.bus_width, 64),
DEFINE_PROP_LINK("dma", XlnxZDMA, dma_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void zdma_class_init(ObjectClass *klass, void *data)
+static void zdma_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = zdma_reset;
+ device_class_set_legacy_reset(dc, zdma_reset);
dc->realize = zdma_realize;
device_class_set_props(dc, zdma_props);
dc->vmsd = &vmstate_zdma;
diff --git a/hw/dma/xlnx-zynq-devcfg.c b/hw/dma/xlnx-zynq-devcfg.c
index e901f68..2684571 100644
--- a/hw/dma/xlnx-zynq-devcfg.c
+++ b/hw/dma/xlnx-zynq-devcfg.c
@@ -29,7 +29,7 @@
#include "hw/irq.h"
#include "migration/vmstate.h"
#include "qemu/bitops.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qemu/log.h"
#include "qemu/module.h"
@@ -380,11 +380,11 @@ static void xlnx_zynq_devcfg_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
}
-static void xlnx_zynq_devcfg_class_init(ObjectClass *klass, void *data)
+static void xlnx_zynq_devcfg_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = xlnx_zynq_devcfg_reset;
+ device_class_set_legacy_reset(dc, xlnx_zynq_devcfg_reset);
dc->vmsd = &vmstate_xlnx_zynq_devcfg;
}
diff --git a/hw/dma/xlnx_csu_dma.c b/hw/dma/xlnx_csu_dma.c
index ae30748..d8c7da1 100644
--- a/hw/dma/xlnx_csu_dma.c
+++ b/hw/dma/xlnx_csu_dma.c
@@ -25,7 +25,7 @@
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/ptimer.h"
#include "hw/stream.h"
#include "hw/register.h"
@@ -287,7 +287,7 @@ static uint32_t xlnx_csu_dma_advance(XlnxCSUDMA *s, uint32_t len)
static void xlnx_csu_dma_src_notify(void *opaque)
{
XlnxCSUDMA *s = XLNX_CSU_DMA(opaque);
- unsigned char buf[4 * 1024];
+ QEMU_UNINITIALIZED unsigned char buf[4 * 1024];
size_t rlen = 0;
ptimer_transaction_begin(s->src_timer);
@@ -691,7 +691,7 @@ static const VMStateDescription vmstate_xlnx_csu_dma = {
}
};
-static Property xlnx_csu_dma_properties[] = {
+static const Property xlnx_csu_dma_properties[] = {
/*
* Ref PG021, Stream Data Width:
* Data width in bits of the AXI S2MM AXI4-Stream Data bus.
@@ -710,16 +710,15 @@ static Property xlnx_csu_dma_properties[] = {
TYPE_STREAM_SINK, StreamSink *),
DEFINE_PROP_LINK("dma", XlnxCSUDMA, dma_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xlnx_csu_dma_class_init(ObjectClass *klass, void *data)
+static void xlnx_csu_dma_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
StreamSinkClass *ssc = STREAM_SINK_CLASS(klass);
XlnxCSUDMAClass *xcdc = XLNX_CSU_DMA_CLASS(klass);
- dc->reset = xlnx_csu_dma_reset;
+ device_class_set_legacy_reset(dc, xlnx_csu_dma_reset);
dc->realize = xlnx_csu_dma_realize;
dc->vmsd = &vmstate_xlnx_csu_dma;
device_class_set_props(dc, xlnx_csu_dma_properties);
@@ -745,7 +744,7 @@ static const TypeInfo xlnx_csu_dma_info = {
.class_init = xlnx_csu_dma_class_init,
.class_size = sizeof(XlnxCSUDMAClass),
.instance_init = xlnx_csu_dma_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_STREAM_SINK },
{ }
}
diff --git a/hw/dma/xlnx_dpdma.c b/hw/dma/xlnx_dpdma.c
index a685bd2..3d88ccc 100644
--- a/hw/dma/xlnx_dpdma.c
+++ b/hw/dma/xlnx_dpdma.c
@@ -593,12 +593,12 @@ static void xlnx_dpdma_reset(DeviceState *dev)
}
}
-static void xlnx_dpdma_class_init(ObjectClass *oc, void *data)
+static void xlnx_dpdma_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->vmsd = &vmstate_xlnx_dpdma;
- dc->reset = xlnx_dpdma_reset;
+ device_class_set_legacy_reset(dc, xlnx_dpdma_reset);
}
static const TypeInfo xlnx_dpdma_info = {
diff --git a/hw/fsi/aspeed_apb2opb.c b/hw/fsi/aspeed_apb2opb.c
index ea50718..172ba16 100644
--- a/hw/fsi/aspeed_apb2opb.c
+++ b/hw/fsi/aspeed_apb2opb.c
@@ -320,13 +320,13 @@ static void fsi_aspeed_apb2opb_reset(DeviceState *dev)
memcpy(s->regs, aspeed_apb2opb_reset, ASPEED_APB2OPB_NR_REGS);
}
-static void fsi_aspeed_apb2opb_class_init(ObjectClass *klass, void *data)
+static void fsi_aspeed_apb2opb_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "ASPEED APB2OPB Bridge";
dc->realize = fsi_aspeed_apb2opb_realize;
- dc->reset = fsi_aspeed_apb2opb_reset;
+ device_class_set_legacy_reset(dc, fsi_aspeed_apb2opb_reset);
}
static const TypeInfo aspeed_apb2opb_info = {
diff --git a/hw/fsi/cfam.c b/hw/fsi/cfam.c
index c62f0f7..e2145c5 100644
--- a/hw/fsi/cfam.c
+++ b/hw/fsi/cfam.c
@@ -145,7 +145,7 @@ static void fsi_cfam_realize(DeviceState *dev, Error **errp)
memory_region_add_subregion(&cfam->lbus.mr, 0, &fsi_dev->iomem);
}
-static void fsi_cfam_class_init(ObjectClass *klass, void *data)
+static void fsi_cfam_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->bus_type = TYPE_FSI_BUS;
diff --git a/hw/fsi/fsi-master.c b/hw/fsi/fsi-master.c
index a5f0598..083a550 100644
--- a/hw/fsi/fsi-master.c
+++ b/hw/fsi/fsi-master.c
@@ -144,14 +144,14 @@ static void fsi_master_reset(DeviceState *dev)
s->regs[FSI_MVER] = 0xe0050101;
}
-static void fsi_master_class_init(ObjectClass *klass, void *data)
+static void fsi_master_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->bus_type = TYPE_OP_BUS;
dc->desc = "FSI Master";
dc->realize = fsi_master_realize;
- dc->reset = fsi_master_reset;
+ device_class_set_legacy_reset(dc, fsi_master_reset);
}
static const TypeInfo fsi_master_info = {
diff --git a/hw/fsi/fsi.c b/hw/fsi/fsi.c
index 9a5f4e6..6c52d5e 100644
--- a/hw/fsi/fsi.c
+++ b/hw/fsi/fsi.c
@@ -76,13 +76,13 @@ static void fsi_slave_init(Object *o)
s, TYPE_FSI_SLAVE, 0x400);
}
-static void fsi_slave_class_init(ObjectClass *klass, void *data)
+static void fsi_slave_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->bus_type = TYPE_FSI_BUS;
dc->desc = "FSI Slave";
- dc->reset = fsi_slave_reset;
+ device_class_set_legacy_reset(dc, fsi_slave_reset);
}
static const TypeInfo fsi_slave_info = {
diff --git a/hw/fsi/lbus.c b/hw/fsi/lbus.c
index 20495f4..8ec7f5f 100644
--- a/hw/fsi/lbus.c
+++ b/hw/fsi/lbus.c
@@ -91,13 +91,13 @@ static void fsi_scratchpad_reset(DeviceState *dev)
memset(s->regs, 0, sizeof(s->regs));
}
-static void fsi_scratchpad_class_init(ObjectClass *klass, void *data)
+static void fsi_scratchpad_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->bus_type = TYPE_FSI_LBUS;
dc->realize = fsi_scratchpad_realize;
- dc->reset = fsi_scratchpad_reset;
+ device_class_set_legacy_reset(dc, fsi_scratchpad_reset);
}
static const TypeInfo fsi_scratchpad_info = {
diff --git a/hw/gpio/Kconfig b/hw/gpio/Kconfig
index 19c97cc..a209294 100644
--- a/hw/gpio/Kconfig
+++ b/hw/gpio/Kconfig
@@ -1,7 +1,3 @@
-config MAX7310
- bool
- depends on I2C
-
config PL061
bool
@@ -20,6 +16,17 @@ config SIFIVE_GPIO
config STM32L4X5_GPIO
bool
+config PCA9552
+ bool
+ depends on I2C
+
+config PCA9554
+ bool
+ depends on I2C
+
config PCF8574
bool
depends on I2C
+
+config ZAURUS_SCOOP
+ bool
diff --git a/hw/gpio/aspeed_gpio.c b/hw/gpio/aspeed_gpio.c
index 6474bb8..609a556 100644
--- a/hw/gpio/aspeed_gpio.c
+++ b/hw/gpio/aspeed_gpio.c
@@ -227,6 +227,38 @@ REG32(GPIO_INDEX_REG, 0x2AC)
FIELD(GPIO_INDEX_REG, COMMAND_SRC_1, 21, 1)
FIELD(GPIO_INDEX_REG, INPUT_MASK, 20, 1)
+/* AST2700 GPIO Register Address Offsets */
+REG32(GPIO_2700_DEBOUNCE_TIME_1, 0x000)
+REG32(GPIO_2700_DEBOUNCE_TIME_2, 0x004)
+REG32(GPIO_2700_DEBOUNCE_TIME_3, 0x008)
+REG32(GPIO_2700_INT_STATUS_1, 0x100)
+REG32(GPIO_2700_INT_STATUS_2, 0x104)
+REG32(GPIO_2700_INT_STATUS_3, 0x108)
+REG32(GPIO_2700_INT_STATUS_4, 0x10C)
+REG32(GPIO_2700_INT_STATUS_5, 0x110)
+REG32(GPIO_2700_INT_STATUS_6, 0x114)
+REG32(GPIO_2700_INT_STATUS_7, 0x118)
+/* GPIOA0 - GPIOAA7 Control Register */
+REG32(GPIO_A0_CONTROL, 0x180)
+ SHARED_FIELD(GPIO_CONTROL_OUT_DATA, 0, 1)
+ SHARED_FIELD(GPIO_CONTROL_DIRECTION, 1, 1)
+ SHARED_FIELD(GPIO_CONTROL_INT_ENABLE, 2, 1)
+ SHARED_FIELD(GPIO_CONTROL_INT_SENS_0, 3, 1)
+ SHARED_FIELD(GPIO_CONTROL_INT_SENS_1, 4, 1)
+ SHARED_FIELD(GPIO_CONTROL_INT_SENS_2, 5, 1)
+ SHARED_FIELD(GPIO_CONTROL_RESET_TOLERANCE, 6, 1)
+ SHARED_FIELD(GPIO_CONTROL_DEBOUNCE_1, 7, 1)
+ SHARED_FIELD(GPIO_CONTROL_DEBOUNCE_2, 8, 1)
+ SHARED_FIELD(GPIO_CONTROL_INPUT_MASK, 9, 1)
+ SHARED_FIELD(GPIO_CONTROL_BLINK_COUNTER_1, 10, 1)
+ SHARED_FIELD(GPIO_CONTROL_BLINK_COUNTER_2, 11, 1)
+ SHARED_FIELD(GPIO_CONTROL_INT_STATUS, 12, 1)
+ SHARED_FIELD(GPIO_CONTROL_IN_DATA, 13, 1)
+ SHARED_FIELD(GPIO_CONTROL_RESERVED, 14, 18)
+REG32(GPIO_AA7_CONTROL, 0x4DC)
+#define GPIO_2700_MEM_SIZE 0x4E0
+#define GPIO_2700_REG_ARRAY_SIZE (GPIO_2700_MEM_SIZE >> 2)
+
static int aspeed_evaluate_irq(GPIOSets *regs, int gpio_prev_high, int gpio)
{
uint32_t falling_edge = 0, rising_edge = 0;
@@ -281,7 +313,7 @@ static void aspeed_gpio_update(AspeedGPIOState *s, GPIOSets *regs,
diff &= mode_mask;
if (diff) {
for (gpio = 0; gpio < ASPEED_GPIOS_PER_SET; gpio++) {
- uint32_t mask = 1 << gpio;
+ uint32_t mask = 1U << gpio;
/* If the gpio needs to be updated... */
if (!(diff & mask)) {
@@ -340,7 +372,8 @@ static void aspeed_gpio_set_pin_level(AspeedGPIOState *s, uint32_t set_idx,
value &= ~pin_mask;
}
- aspeed_gpio_update(s, &s->sets[set_idx], value, ~s->sets[set_idx].direction);
+ aspeed_gpio_update(s, &s->sets[set_idx], value,
+ ~s->sets[set_idx].direction);
}
/*
@@ -629,7 +662,6 @@ static uint64_t aspeed_gpio_read(void *opaque, hwaddr offset, uint32_t size)
static void aspeed_gpio_write_index_mode(void *opaque, hwaddr offset,
uint64_t data, uint32_t size)
{
-
AspeedGPIOState *s = ASPEED_GPIO(opaque);
AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s);
const GPIOSetProperties *props;
@@ -641,7 +673,7 @@ static void aspeed_gpio_write_index_mode(void *opaque, hwaddr offset,
uint32_t pin_idx = reg_idx_number % ASPEED_GPIOS_PER_SET;
uint32_t group_idx = pin_idx / GPIOS_PER_GROUP;
uint32_t reg_value = 0;
- uint32_t cleared;
+ uint32_t pending = 0;
set = &s->sets[set_idx];
props = &agc->props[set_idx];
@@ -703,16 +735,23 @@ static void aspeed_gpio_write_index_mode(void *opaque, hwaddr offset,
FIELD_EX32(data, GPIO_INDEX_REG, INT_SENS_2));
set->int_sens_2 = update_value_control_source(set, set->int_sens_2,
reg_value);
- /* set interrupt status */
- reg_value = set->int_status;
- reg_value = deposit32(reg_value, pin_idx, 1,
- FIELD_EX32(data, GPIO_INDEX_REG, INT_STATUS));
- cleared = ctpop32(reg_value & set->int_status);
- if (s->pending && cleared) {
- assert(s->pending >= cleared);
- s->pending -= cleared;
+ /* interrupt status */
+ if (FIELD_EX32(data, GPIO_INDEX_REG, INT_STATUS)) {
+ /* pending is either 1 or 0 for a 1-bit field */
+ pending = extract32(set->int_status, pin_idx, 1);
+
+ assert(s->pending >= pending);
+
+ /* No change to s->pending if pending is 0 */
+ s->pending -= pending;
+
+ /*
+ * The write acknowledged the interrupt regardless of whether it
+ * was pending or not. The post-condition is that it mustn't be
+ * pending. Unconditionally clear the status bit.
+ */
+ set->int_status = deposit32(set->int_status, pin_idx, 1, 0);
}
- set->int_status &= ~reg_value;
break;
case gpio_reg_idx_debounce:
reg_value = set->debounce_1;
@@ -761,7 +800,6 @@ static void aspeed_gpio_write_index_mode(void *opaque, hwaddr offset,
return;
}
aspeed_gpio_update(s, set, set->data_value, UINT32_MAX);
- return;
}
static void aspeed_gpio_write(void *opaque, hwaddr offset, uint64_t data,
@@ -889,7 +927,6 @@ static void aspeed_gpio_write(void *opaque, hwaddr offset, uint64_t data,
return;
}
aspeed_gpio_update(s, set, set->data_value, UINT32_MAX);
- return;
}
static int get_set_idx(AspeedGPIOState *s, const char *group, int *group_idx)
@@ -963,7 +1000,314 @@ static void aspeed_gpio_set_pin(Object *obj, Visitor *v, const char *name,
aspeed_gpio_set_pin_level(s, set_idx, pin, level);
}
-/****************** Setup functions ******************/
+static uint64_t aspeed_gpio_2700_read_control_reg(AspeedGPIOState *s,
+ uint32_t pin)
+{
+ AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s);
+ GPIOSets *set;
+ uint64_t value = 0;
+ uint32_t set_idx;
+ uint32_t pin_idx;
+
+ set_idx = pin / ASPEED_GPIOS_PER_SET;
+ pin_idx = pin % ASPEED_GPIOS_PER_SET;
+
+ if (set_idx >= agc->nr_gpio_sets) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: set index: %d, out of bounds\n",
+ __func__, set_idx);
+ return 0;
+ }
+
+ set = &s->sets[set_idx];
+ value = SHARED_FIELD_DP32(value, GPIO_CONTROL_OUT_DATA,
+ extract32(set->data_read, pin_idx, 1));
+ value = SHARED_FIELD_DP32(value, GPIO_CONTROL_DIRECTION,
+ extract32(set->direction, pin_idx, 1));
+ value = SHARED_FIELD_DP32(value, GPIO_CONTROL_INT_ENABLE,
+ extract32(set->int_enable, pin_idx, 1));
+ value = SHARED_FIELD_DP32(value, GPIO_CONTROL_INT_SENS_0,
+ extract32(set->int_sens_0, pin_idx, 1));
+ value = SHARED_FIELD_DP32(value, GPIO_CONTROL_INT_SENS_1,
+ extract32(set->int_sens_1, pin_idx, 1));
+ value = SHARED_FIELD_DP32(value, GPIO_CONTROL_INT_SENS_2,
+ extract32(set->int_sens_2, pin_idx, 1));
+ value = SHARED_FIELD_DP32(value, GPIO_CONTROL_RESET_TOLERANCE,
+ extract32(set->reset_tol, pin_idx, 1));
+ value = SHARED_FIELD_DP32(value, GPIO_CONTROL_DEBOUNCE_1,
+ extract32(set->debounce_1, pin_idx, 1));
+ value = SHARED_FIELD_DP32(value, GPIO_CONTROL_DEBOUNCE_2,
+ extract32(set->debounce_2, pin_idx, 1));
+ value = SHARED_FIELD_DP32(value, GPIO_CONTROL_INPUT_MASK,
+ extract32(set->input_mask, pin_idx, 1));
+ value = SHARED_FIELD_DP32(value, GPIO_CONTROL_INT_STATUS,
+ extract32(set->int_status, pin_idx, 1));
+ value = SHARED_FIELD_DP32(value, GPIO_CONTROL_IN_DATA,
+ extract32(set->data_value, pin_idx, 1));
+ return value;
+}
+
+static void aspeed_gpio_2700_write_control_reg(AspeedGPIOState *s,
+ uint32_t pin, uint64_t data)
+{
+ AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s);
+ const GPIOSetProperties *props;
+ GPIOSets *set;
+ uint32_t set_idx;
+ uint32_t pin_idx;
+ uint32_t group_value = 0;
+ uint32_t pending = 0;
+
+ set_idx = pin / ASPEED_GPIOS_PER_SET;
+ pin_idx = pin % ASPEED_GPIOS_PER_SET;
+
+ if (set_idx >= agc->nr_gpio_sets) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: set index: %d, out of bounds\n",
+ __func__, set_idx);
+ return;
+ }
+
+ set = &s->sets[set_idx];
+ props = &agc->props[set_idx];
+
+ /* direction */
+ group_value = set->direction;
+ group_value = deposit32(group_value, pin_idx, 1,
+ SHARED_FIELD_EX32(data, GPIO_CONTROL_DIRECTION));
+ /*
+ * where data is the value attempted to be written to the pin:
+ * pin type | input mask | output mask | expected value
+ * ------------------------------------------------------------
+ * bidirectional | 1 | 1 | data
+ * input only | 1 | 0 | 0
+ * output only | 0 | 1 | 1
+ * no pin | 0 | 0 | 0
+ *
+ * which is captured by:
+ * data = ( data | ~input) & output;
+ */
+ group_value = (group_value | ~props->input) & props->output;
+ set->direction = update_value_control_source(set, set->direction,
+ group_value);
+
+ /* out data */
+ group_value = set->data_read;
+ group_value = deposit32(group_value, pin_idx, 1,
+ SHARED_FIELD_EX32(data, GPIO_CONTROL_OUT_DATA));
+ group_value &= props->output;
+ group_value = update_value_control_source(set, set->data_read,
+ group_value);
+ set->data_read = group_value;
+
+ /* interrupt enable */
+ group_value = set->int_enable;
+ group_value = deposit32(group_value, pin_idx, 1,
+ SHARED_FIELD_EX32(data, GPIO_CONTROL_INT_ENABLE));
+ set->int_enable = update_value_control_source(set, set->int_enable,
+ group_value);
+
+ /* interrupt sensitivity type 0 */
+ group_value = set->int_sens_0;
+ group_value = deposit32(group_value, pin_idx, 1,
+ SHARED_FIELD_EX32(data, GPIO_CONTROL_INT_SENS_0));
+ set->int_sens_0 = update_value_control_source(set, set->int_sens_0,
+ group_value);
+
+ /* interrupt sensitivity type 1 */
+ group_value = set->int_sens_1;
+ group_value = deposit32(group_value, pin_idx, 1,
+ SHARED_FIELD_EX32(data, GPIO_CONTROL_INT_SENS_1));
+ set->int_sens_1 = update_value_control_source(set, set->int_sens_1,
+ group_value);
+
+ /* interrupt sensitivity type 2 */
+ group_value = set->int_sens_2;
+ group_value = deposit32(group_value, pin_idx, 1,
+ SHARED_FIELD_EX32(data, GPIO_CONTROL_INT_SENS_2));
+ set->int_sens_2 = update_value_control_source(set, set->int_sens_2,
+ group_value);
+
+ /* reset tolerance enable */
+ group_value = set->reset_tol;
+ group_value = deposit32(group_value, pin_idx, 1,
+ SHARED_FIELD_EX32(data, GPIO_CONTROL_RESET_TOLERANCE));
+ set->reset_tol = update_value_control_source(set, set->reset_tol,
+ group_value);
+
+ /* debounce 1 */
+ group_value = set->debounce_1;
+ group_value = deposit32(group_value, pin_idx, 1,
+ SHARED_FIELD_EX32(data, GPIO_CONTROL_DEBOUNCE_1));
+ set->debounce_1 = update_value_control_source(set, set->debounce_1,
+ group_value);
+
+ /* debounce 2 */
+ group_value = set->debounce_2;
+ group_value = deposit32(group_value, pin_idx, 1,
+ SHARED_FIELD_EX32(data, GPIO_CONTROL_DEBOUNCE_2));
+ set->debounce_2 = update_value_control_source(set, set->debounce_2,
+ group_value);
+
+ /* input mask */
+ group_value = set->input_mask;
+ group_value = deposit32(group_value, pin_idx, 1,
+ SHARED_FIELD_EX32(data, GPIO_CONTROL_INPUT_MASK));
+ /*
+ * feeds into interrupt generation
+ * 0: read from data value reg will be updated
+ * 1: read from data value reg will not be updated
+ */
+ set->input_mask = group_value & props->input;
+
+ /* blink counter 1 */
+ /* blink counter 2 */
+ /* unimplement */
+
+ /* interrupt status */
+ if (SHARED_FIELD_EX32(data, GPIO_CONTROL_INT_STATUS)) {
+ /* pending is either 1 or 0 for a 1-bit field */
+ pending = extract32(set->int_status, pin_idx, 1);
+
+ assert(s->pending >= pending);
+
+ /* No change to s->pending if pending is 0 */
+ s->pending -= pending;
+
+ /*
+ * The write acknowledged the interrupt regardless of whether it
+ * was pending or not. The post-condition is that it mustn't be
+ * pending. Unconditionally clear the status bit.
+ */
+ set->int_status = deposit32(set->int_status, pin_idx, 1, 0);
+ }
+
+ aspeed_gpio_update(s, set, set->data_value, UINT32_MAX);
+}
+
+static uint64_t aspeed_gpio_2700_read(void *opaque, hwaddr offset,
+ uint32_t size)
+{
+ AspeedGPIOState *s = ASPEED_GPIO(opaque);
+ AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s);
+ GPIOSets *set;
+ uint64_t value;
+ uint64_t reg;
+ uint32_t pin;
+ uint32_t idx;
+
+ reg = offset >> 2;
+
+ if (reg >= agc->reg_table_count) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: offset 0x%" PRIx64 " out of bounds\n",
+ __func__, offset);
+ return 0;
+ }
+
+ switch (reg) {
+ case R_GPIO_2700_DEBOUNCE_TIME_1 ... R_GPIO_2700_DEBOUNCE_TIME_3:
+ idx = reg - R_GPIO_2700_DEBOUNCE_TIME_1;
+
+ if (idx >= ASPEED_GPIO_NR_DEBOUNCE_REGS) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: debounce index: %d, out of bounds\n",
+ __func__, idx);
+ return 0;
+ }
+
+ value = (uint64_t) s->debounce_regs[idx];
+ break;
+ case R_GPIO_2700_INT_STATUS_1 ... R_GPIO_2700_INT_STATUS_7:
+ idx = reg - R_GPIO_2700_INT_STATUS_1;
+
+ if (idx >= agc->nr_gpio_sets) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: interrupt status index: %d, out of bounds\n",
+ __func__, idx);
+ return 0;
+ }
+
+ set = &s->sets[idx];
+ value = (uint64_t) set->int_status;
+ break;
+ case R_GPIO_A0_CONTROL ... R_GPIO_AA7_CONTROL:
+ pin = reg - R_GPIO_A0_CONTROL;
+
+ if (pin >= agc->nr_gpio_pins) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid pin number: %d\n",
+ __func__, pin);
+ return 0;
+ }
+
+ value = aspeed_gpio_2700_read_control_reg(s, pin);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: no getter for offset 0x%"
+ PRIx64"\n", __func__, offset);
+ return 0;
+ }
+
+ trace_aspeed_gpio_read(offset, value);
+ return value;
+}
+
+static void aspeed_gpio_2700_write(void *opaque, hwaddr offset,
+ uint64_t data, uint32_t size)
+{
+ AspeedGPIOState *s = ASPEED_GPIO(opaque);
+ AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s);
+ uint64_t reg;
+ uint32_t pin;
+ uint32_t idx;
+
+ trace_aspeed_gpio_write(offset, data);
+
+ reg = offset >> 2;
+
+ if (reg >= agc->reg_table_count) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: offset 0x%" PRIx64 " out of bounds\n",
+ __func__, offset);
+ return;
+ }
+
+ switch (reg) {
+ case R_GPIO_2700_DEBOUNCE_TIME_1 ... R_GPIO_2700_DEBOUNCE_TIME_3:
+ idx = reg - R_GPIO_2700_DEBOUNCE_TIME_1;
+
+ if (idx >= ASPEED_GPIO_NR_DEBOUNCE_REGS) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: debounce index: %d out of bounds\n",
+ __func__, idx);
+ return;
+ }
+
+ s->debounce_regs[idx] = (uint32_t) data;
+ break;
+ case R_GPIO_A0_CONTROL ... R_GPIO_AA7_CONTROL:
+ pin = reg - R_GPIO_A0_CONTROL;
+
+ if (pin >= agc->nr_gpio_pins) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid pin number: %d\n",
+ __func__, pin);
+ return;
+ }
+
+ if (SHARED_FIELD_EX32(data, GPIO_CONTROL_RESERVED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid reserved data: 0x%"
+ PRIx64"\n", __func__, data);
+ return;
+ }
+
+ aspeed_gpio_2700_write_control_reg(s, pin, data);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: no setter for offset 0x%"
+ PRIx64"\n", __func__, offset);
+ break;
+ }
+}
+
+/* Setup functions */
static const GPIOSetProperties ast2400_set_props[ASPEED_GPIO_MAX_NR_SETS] = {
[0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} },
[1] = {0xffffffff, 0xffffffff, {"E", "F", "G", "H"} },
@@ -1009,6 +1353,16 @@ static GPIOSetProperties ast1030_set_props[ASPEED_GPIO_MAX_NR_SETS] = {
[5] = {0x000000ff, 0x00000000, {"U"} },
};
+static GPIOSetProperties ast2700_set_props[ASPEED_GPIO_MAX_NR_SETS] = {
+ [0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} },
+ [1] = {0x0fffffff, 0x0fffffff, {"E", "F", "G", "H"} },
+ [2] = {0xffffffff, 0xffffffff, {"I", "J", "K", "L"} },
+ [3] = {0xffffffff, 0xffffffff, {"M", "N", "O", "P"} },
+ [4] = {0xffffffff, 0xffffffff, {"Q", "R", "S", "T"} },
+ [5] = {0xffffffff, 0xffffffff, {"U", "V", "W", "X"} },
+ [6] = {0x00ffffff, 0x00ffffff, {"Y", "Z", "AA"} },
+};
+
static const MemoryRegionOps aspeed_gpio_ops = {
.read = aspeed_gpio_read,
.write = aspeed_gpio_write,
@@ -1017,6 +1371,14 @@ static const MemoryRegionOps aspeed_gpio_ops = {
.valid.max_access_size = 4,
};
+static const MemoryRegionOps aspeed_gpio_2700_ops = {
+ .read = aspeed_gpio_2700_read,
+ .write = aspeed_gpio_2700_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
static void aspeed_gpio_reset(DeviceState *dev)
{
AspeedGPIOState *s = ASPEED_GPIO(dev);
@@ -1046,8 +1408,8 @@ static void aspeed_gpio_realize(DeviceState *dev, Error **errp)
}
}
- memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_gpio_ops, s,
- TYPE_ASPEED_GPIO, 0x800);
+ memory_region_init_io(&s->iomem, OBJECT(s), agc->reg_ops, s,
+ TYPE_ASPEED_GPIO, agc->mem_size);
sysbus_init_mmio(sbd, &s->iomem);
}
@@ -1111,17 +1473,17 @@ static const VMStateDescription vmstate_aspeed_gpio = {
}
};
-static void aspeed_gpio_class_init(ObjectClass *klass, void *data)
+static void aspeed_gpio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aspeed_gpio_realize;
- dc->reset = aspeed_gpio_reset;
+ device_class_set_legacy_reset(dc, aspeed_gpio_reset);
dc->desc = "Aspeed GPIO Controller";
dc->vmsd = &vmstate_aspeed_gpio;
}
-static void aspeed_gpio_ast2400_class_init(ObjectClass *klass, void *data)
+static void aspeed_gpio_ast2400_class_init(ObjectClass *klass, const void *data)
{
AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass);
@@ -1130,9 +1492,11 @@ static void aspeed_gpio_ast2400_class_init(ObjectClass *klass, void *data)
agc->nr_gpio_sets = 7;
agc->reg_table = aspeed_3_3v_gpios;
agc->reg_table_count = GPIO_3_3V_REG_ARRAY_SIZE;
+ agc->mem_size = 0x1000;
+ agc->reg_ops = &aspeed_gpio_ops;
}
-static void aspeed_gpio_2500_class_init(ObjectClass *klass, void *data)
+static void aspeed_gpio_2500_class_init(ObjectClass *klass, const void *data)
{
AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass);
@@ -1141,9 +1505,12 @@ static void aspeed_gpio_2500_class_init(ObjectClass *klass, void *data)
agc->nr_gpio_sets = 8;
agc->reg_table = aspeed_3_3v_gpios;
agc->reg_table_count = GPIO_3_3V_REG_ARRAY_SIZE;
+ agc->mem_size = 0x1000;
+ agc->reg_ops = &aspeed_gpio_ops;
}
-static void aspeed_gpio_ast2600_3_3v_class_init(ObjectClass *klass, void *data)
+static void aspeed_gpio_ast2600_3_3v_class_init(ObjectClass *klass,
+ const void *data)
{
AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass);
@@ -1152,9 +1519,12 @@ static void aspeed_gpio_ast2600_3_3v_class_init(ObjectClass *klass, void *data)
agc->nr_gpio_sets = 7;
agc->reg_table = aspeed_3_3v_gpios;
agc->reg_table_count = GPIO_3_3V_REG_ARRAY_SIZE;
+ agc->mem_size = 0x800;
+ agc->reg_ops = &aspeed_gpio_ops;
}
-static void aspeed_gpio_ast2600_1_8v_class_init(ObjectClass *klass, void *data)
+static void aspeed_gpio_ast2600_1_8v_class_init(ObjectClass *klass,
+ const void *data)
{
AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass);
@@ -1163,9 +1533,11 @@ static void aspeed_gpio_ast2600_1_8v_class_init(ObjectClass *klass, void *data)
agc->nr_gpio_sets = 2;
agc->reg_table = aspeed_1_8v_gpios;
agc->reg_table_count = GPIO_1_8V_REG_ARRAY_SIZE;
+ agc->mem_size = 0x800;
+ agc->reg_ops = &aspeed_gpio_ops;
}
-static void aspeed_gpio_1030_class_init(ObjectClass *klass, void *data)
+static void aspeed_gpio_1030_class_init(ObjectClass *klass, const void *data)
{
AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass);
@@ -1174,6 +1546,20 @@ static void aspeed_gpio_1030_class_init(ObjectClass *klass, void *data)
agc->nr_gpio_sets = 6;
agc->reg_table = aspeed_3_3v_gpios;
agc->reg_table_count = GPIO_3_3V_REG_ARRAY_SIZE;
+ agc->mem_size = 0x1000;
+ agc->reg_ops = &aspeed_gpio_ops;
+}
+
+static void aspeed_gpio_2700_class_init(ObjectClass *klass, const void *data)
+{
+ AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass);
+
+ agc->props = ast2700_set_props;
+ agc->nr_gpio_pins = 216;
+ agc->nr_gpio_sets = 7;
+ agc->reg_table_count = GPIO_2700_REG_ARRAY_SIZE;
+ agc->mem_size = 0x1000;
+ agc->reg_ops = &aspeed_gpio_2700_ops;
}
static const TypeInfo aspeed_gpio_info = {
@@ -1220,6 +1606,13 @@ static const TypeInfo aspeed_gpio_ast1030_info = {
.instance_init = aspeed_gpio_init,
};
+static const TypeInfo aspeed_gpio_ast2700_info = {
+ .name = TYPE_ASPEED_GPIO "-ast2700",
+ .parent = TYPE_ASPEED_GPIO,
+ .class_init = aspeed_gpio_2700_class_init,
+ .instance_init = aspeed_gpio_init,
+};
+
static void aspeed_gpio_register_types(void)
{
type_register_static(&aspeed_gpio_info);
@@ -1228,6 +1621,7 @@ static void aspeed_gpio_register_types(void)
type_register_static(&aspeed_gpio_ast2600_3_3v_info);
type_register_static(&aspeed_gpio_ast2600_1_8v_info);
type_register_static(&aspeed_gpio_ast1030_info);
+ type_register_static(&aspeed_gpio_ast2700_info);
}
type_init(aspeed_gpio_register_types);
diff --git a/hw/gpio/bcm2835_gpio.c b/hw/gpio/bcm2835_gpio.c
index 6bd50bb..dfb5d5c 100644
--- a/hw/gpio/bcm2835_gpio.c
+++ b/hw/gpio/bcm2835_gpio.c
@@ -319,13 +319,13 @@ static void bcm2835_gpio_realize(DeviceState *dev, Error **errp)
s->sdbus_sdhost = SD_BUS(obj);
}
-static void bcm2835_gpio_class_init(ObjectClass *klass, void *data)
+static void bcm2835_gpio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_bcm2835_gpio;
dc->realize = &bcm2835_gpio_realize;
- dc->reset = &bcm2835_gpio_reset;
+ device_class_set_legacy_reset(dc, bcm2835_gpio_reset);
}
static const TypeInfo bcm2835_gpio_info = {
diff --git a/hw/gpio/bcm2838_gpio.c b/hw/gpio/bcm2838_gpio.c
index 2ddf62f..1069e78 100644
--- a/hw/gpio/bcm2838_gpio.c
+++ b/hw/gpio/bcm2838_gpio.c
@@ -293,7 +293,6 @@ static void bcm2838_gpio_write(void *opaque, hwaddr offset, uint64_t value,
qemu_log_mask(LOG_GUEST_ERROR, "%s: %s: bad offset %"HWADDR_PRIx"\n",
TYPE_BCM2838_GPIO, __func__, offset);
}
- return;
}
static void bcm2838_gpio_reset(DeviceState *dev)
@@ -365,13 +364,13 @@ static void bcm2838_gpio_realize(DeviceState *dev, Error **errp)
s->sdbus_sdhost = SD_BUS(obj);
}
-static void bcm2838_gpio_class_init(ObjectClass *klass, void *data)
+static void bcm2838_gpio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_bcm2838_gpio;
dc->realize = &bcm2838_gpio_realize;
- dc->reset = &bcm2838_gpio_reset;
+ device_class_set_legacy_reset(dc, bcm2838_gpio_reset);
}
static const TypeInfo bcm2838_gpio_info = {
diff --git a/hw/gpio/gpio_key.c b/hw/gpio/gpio_key.c
index 61bb587..40c028b 100644
--- a/hw/gpio/gpio_key.c
+++ b/hw/gpio/gpio_key.c
@@ -85,13 +85,13 @@ static void gpio_key_realize(DeviceState *dev, Error **errp)
s->timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, gpio_key_timer_expired, s);
}
-static void gpio_key_class_init(ObjectClass *klass, void *data)
+static void gpio_key_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = gpio_key_realize;
dc->vmsd = &vmstate_gpio_key;
- dc->reset = &gpio_key_reset;
+ device_class_set_legacy_reset(dc, gpio_key_reset);
}
static const TypeInfo gpio_key_info = {
diff --git a/hw/gpio/gpio_pwr.c b/hw/gpio/gpio_pwr.c
index dbaf1c7..2d14f8b 100644
--- a/hw/gpio/gpio_pwr.c
+++ b/hw/gpio/gpio_pwr.c
@@ -24,7 +24,7 @@
#include "qemu/osdep.h"
#include "hw/sysbus.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#define TYPE_GPIOPWR "gpio-pwr"
OBJECT_DECLARE_SIMPLE_TYPE(GPIO_PWR_State, GPIOPWR)
diff --git a/hw/gpio/imx_gpio.c b/hw/gpio/imx_gpio.c
index e53b00d..450ece4 100644
--- a/hw/gpio/imx_gpio.c
+++ b/hw/gpio/imx_gpio.c
@@ -24,6 +24,7 @@
#include "migration/vmstate.h"
#include "qemu/log.h"
#include "qemu/module.h"
+#include "trace.h"
#ifndef DEBUG_IMX_GPIO
#define DEBUG_IMX_GPIO 0
@@ -34,14 +35,6 @@ typedef enum IMXGPIOLevel {
IMX_GPIO_LEVEL_HIGH = 1,
} IMXGPIOLevel;
-#define DPRINTF(fmt, args...) \
- do { \
- if (DEBUG_IMX_GPIO) { \
- fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_GPIO, \
- __func__, ##args); \
- } \
- } while (0)
-
static const char *imx_gpio_reg_name(uint32_t reg)
{
switch (reg) {
@@ -79,7 +72,7 @@ static void imx_gpio_update_int(IMXGPIOState *s)
static void imx_gpio_set_int_line(IMXGPIOState *s, int line, IMXGPIOLevel level)
{
/* if this signal isn't configured as an input signal, nothing to do */
- if (!extract32(s->gdir, line, 1)) {
+ if (extract32(s->gdir, line, 1)) {
return;
}
@@ -111,6 +104,8 @@ static void imx_gpio_set(void *opaque, int line, int level)
IMXGPIOState *s = IMX_GPIO(opaque);
IMXGPIOLevel imx_level = level ? IMX_GPIO_LEVEL_HIGH : IMX_GPIO_LEVEL_LOW;
+ trace_imx_gpio_set(DEVICE(s)->canonical_path, line, imx_level);
+
imx_gpio_set_int_line(s, line, imx_level);
/* this is an input signal, so set PSR */
@@ -200,7 +195,8 @@ static uint64_t imx_gpio_read(void *opaque, hwaddr offset, unsigned size)
break;
}
- DPRINTF("(%s) = 0x%" PRIx32 "\n", imx_gpio_reg_name(offset), reg_value);
+ trace_imx_gpio_read(DEVICE(s)->canonical_path, imx_gpio_reg_name(offset),
+ reg_value);
return reg_value;
}
@@ -210,8 +206,8 @@ static void imx_gpio_write(void *opaque, hwaddr offset, uint64_t value,
{
IMXGPIOState *s = IMX_GPIO(opaque);
- DPRINTF("(%s, value = 0x%" PRIx32 ")\n", imx_gpio_reg_name(offset),
- (uint32_t)value);
+ trace_imx_gpio_write(DEVICE(s)->canonical_path, imx_gpio_reg_name(offset),
+ value);
switch (offset) {
case DR_ADDR:
@@ -261,8 +257,6 @@ static void imx_gpio_write(void *opaque, hwaddr offset, uint64_t value,
HWADDR_PRIx "\n", TYPE_IMX_GPIO, __func__, offset);
break;
}
-
- return;
}
static const MemoryRegionOps imx_gpio_ops = {
@@ -290,11 +284,10 @@ static const VMStateDescription vmstate_imx_gpio = {
}
};
-static Property imx_gpio_properties[] = {
+static const Property imx_gpio_properties[] = {
DEFINE_PROP_BOOL("has-edge-sel", IMXGPIOState, has_edge_sel, true),
DEFINE_PROP_BOOL("has-upper-pin-irq", IMXGPIOState, has_upper_pin_irq,
false),
- DEFINE_PROP_END_OF_LIST(),
};
static void imx_gpio_reset(DeviceState *dev)
@@ -328,12 +321,12 @@ static void imx_gpio_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
}
-static void imx_gpio_class_init(ObjectClass *klass, void *data)
+static void imx_gpio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = imx_gpio_realize;
- dc->reset = imx_gpio_reset;
+ device_class_set_legacy_reset(dc, imx_gpio_reset);
device_class_set_props(dc, imx_gpio_properties);
dc->vmsd = &vmstate_imx_gpio;
dc->desc = "i.MX GPIO controller";
diff --git a/hw/gpio/max7310.c b/hw/gpio/max7310.c
deleted file mode 100644
index 8631571..0000000
--- a/hw/gpio/max7310.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * MAX7310 8-port GPIO expansion chip.
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This file is licensed under GNU GPL.
- */
-
-#include "qemu/osdep.h"
-#include "hw/i2c/i2c.h"
-#include "hw/irq.h"
-#include "migration/vmstate.h"
-#include "qemu/log.h"
-#include "qemu/module.h"
-#include "qom/object.h"
-
-#define TYPE_MAX7310 "max7310"
-OBJECT_DECLARE_SIMPLE_TYPE(MAX7310State, MAX7310)
-
-struct MAX7310State {
- I2CSlave parent_obj;
-
- int i2c_command_byte;
- int len;
-
- uint8_t level;
- uint8_t direction;
- uint8_t polarity;
- uint8_t status;
- uint8_t command;
- qemu_irq handler[8];
- qemu_irq *gpio_in;
-};
-
-static void max7310_reset(DeviceState *dev)
-{
- MAX7310State *s = MAX7310(dev);
-
- s->level &= s->direction;
- s->direction = 0xff;
- s->polarity = 0xf0;
- s->status = 0x01;
- s->command = 0x00;
-}
-
-static uint8_t max7310_rx(I2CSlave *i2c)
-{
- MAX7310State *s = MAX7310(i2c);
-
- switch (s->command) {
- case 0x00: /* Input port */
- return s->level ^ s->polarity;
-
- case 0x01: /* Output port */
- return s->level & ~s->direction;
-
- case 0x02: /* Polarity inversion */
- return s->polarity;
-
- case 0x03: /* Configuration */
- return s->direction;
-
- case 0x04: /* Timeout */
- return s->status;
-
- case 0xff: /* Reserved */
- return 0xff;
-
- default:
- qemu_log_mask(LOG_UNIMP, "%s: Unsupported register 0x02%" PRIx8 "\n",
- __func__, s->command);
- break;
- }
- return 0xff;
-}
-
-static int max7310_tx(I2CSlave *i2c, uint8_t data)
-{
- MAX7310State *s = MAX7310(i2c);
- uint8_t diff;
- int line;
-
- if (s->len ++ > 1) {
-#ifdef VERBOSE
- printf("%s: message too long (%i bytes)\n", __func__, s->len);
-#endif
- return 1;
- }
-
- if (s->i2c_command_byte) {
- s->command = data;
- s->i2c_command_byte = 0;
- return 0;
- }
-
- switch (s->command) {
- case 0x01: /* Output port */
- for (diff = (data ^ s->level) & ~s->direction; diff;
- diff &= ~(1 << line)) {
- line = ctz32(diff);
- if (s->handler[line])
- qemu_set_irq(s->handler[line], (data >> line) & 1);
- }
- s->level = (s->level & s->direction) | (data & ~s->direction);
- break;
-
- case 0x02: /* Polarity inversion */
- s->polarity = data;
- break;
-
- case 0x03: /* Configuration */
- s->level &= ~(s->direction ^ data);
- s->direction = data;
- break;
-
- case 0x04: /* Timeout */
- s->status = data;
- break;
-
- case 0x00: /* Input port - ignore writes */
- break;
- default:
- qemu_log_mask(LOG_UNIMP, "%s: Unsupported register 0x02%" PRIx8 "\n",
- __func__, s->command);
- return 1;
- }
-
- return 0;
-}
-
-static int max7310_event(I2CSlave *i2c, enum i2c_event event)
-{
- MAX7310State *s = MAX7310(i2c);
- s->len = 0;
-
- switch (event) {
- case I2C_START_SEND:
- s->i2c_command_byte = 1;
- break;
- case I2C_FINISH:
-#ifdef VERBOSE
- if (s->len == 1)
- printf("%s: message too short (%i bytes)\n", __func__, s->len);
-#endif
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static const VMStateDescription vmstate_max7310 = {
- .name = "max7310",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_INT32(i2c_command_byte, MAX7310State),
- VMSTATE_INT32(len, MAX7310State),
- VMSTATE_UINT8(level, MAX7310State),
- VMSTATE_UINT8(direction, MAX7310State),
- VMSTATE_UINT8(polarity, MAX7310State),
- VMSTATE_UINT8(status, MAX7310State),
- VMSTATE_UINT8(command, MAX7310State),
- VMSTATE_I2C_SLAVE(parent_obj, MAX7310State),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static void max7310_gpio_set(void *opaque, int line, int level)
-{
- MAX7310State *s = (MAX7310State *) opaque;
- assert(line >= 0 && line < ARRAY_SIZE(s->handler));
-
- if (level)
- s->level |= s->direction & (1 << line);
- else
- s->level &= ~(s->direction & (1 << line));
-}
-
-/* MAX7310 is SMBus-compatible (can be used with only SMBus protocols),
- * but also accepts sequences that are not SMBus so return an I2C device. */
-static void max7310_realize(DeviceState *dev, Error **errp)
-{
- MAX7310State *s = MAX7310(dev);
-
- qdev_init_gpio_in(dev, max7310_gpio_set, ARRAY_SIZE(s->handler));
- qdev_init_gpio_out(dev, s->handler, ARRAY_SIZE(s->handler));
-}
-
-static void max7310_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
-
- dc->realize = max7310_realize;
- k->event = max7310_event;
- k->recv = max7310_rx;
- k->send = max7310_tx;
- dc->reset = max7310_reset;
- dc->vmsd = &vmstate_max7310;
-}
-
-static const TypeInfo max7310_info = {
- .name = TYPE_MAX7310,
- .parent = TYPE_I2C_SLAVE,
- .instance_size = sizeof(MAX7310State),
- .class_init = max7310_class_init,
-};
-
-static void max7310_register_types(void)
-{
- type_register_static(&max7310_info);
-}
-
-type_init(max7310_register_types)
diff --git a/hw/gpio/meson.build b/hw/gpio/meson.build
index a7495d1..7484061 100644
--- a/hw/gpio/meson.build
+++ b/hw/gpio/meson.build
@@ -1,11 +1,10 @@
system_ss.add(when: 'CONFIG_GPIO_KEY', if_true: files('gpio_key.c'))
system_ss.add(when: 'CONFIG_GPIO_MPC8XXX', if_true: files('mpc8xxx.c'))
system_ss.add(when: 'CONFIG_GPIO_PWR', if_true: files('gpio_pwr.c'))
-system_ss.add(when: 'CONFIG_MAX7310', if_true: files('max7310.c'))
system_ss.add(when: 'CONFIG_PCA9552', if_true: files('pca9552.c'))
system_ss.add(when: 'CONFIG_PCA9554', if_true: files('pca9554.c'))
system_ss.add(when: 'CONFIG_PL061', if_true: files('pl061.c'))
-system_ss.add(when: 'CONFIG_ZAURUS', if_true: files('zaurus.c'))
+system_ss.add(when: 'CONFIG_ZAURUS_SCOOP', if_true: files('zaurus.c'))
system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_gpio.c'))
system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_gpio.c'))
diff --git a/hw/gpio/mpc8xxx.c b/hw/gpio/mpc8xxx.c
index 0b3f9e5..257497a 100644
--- a/hw/gpio/mpc8xxx.c
+++ b/hw/gpio/mpc8xxx.c
@@ -23,7 +23,6 @@
#include "hw/irq.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
-#include "qemu/module.h"
#include "qom/object.h"
#define TYPE_MPC8XXX_GPIO "mpc8xxx_gpio"
@@ -200,25 +199,22 @@ static void mpc8xxx_gpio_initfn(Object *obj)
qdev_init_gpio_out(dev, s->out, 32);
}
-static void mpc8xxx_gpio_class_init(ObjectClass *klass, void *data)
+static void mpc8xxx_gpio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_mpc8xxx_gpio;
- dc->reset = mpc8xxx_gpio_reset;
+ device_class_set_legacy_reset(dc, mpc8xxx_gpio_reset);
}
-static const TypeInfo mpc8xxx_gpio_info = {
- .name = TYPE_MPC8XXX_GPIO,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(MPC8XXXGPIOState),
- .instance_init = mpc8xxx_gpio_initfn,
- .class_init = mpc8xxx_gpio_class_init,
+static const TypeInfo mpc8xxx_gpio_types[] = {
+ {
+ .name = TYPE_MPC8XXX_GPIO,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(MPC8XXXGPIOState),
+ .instance_init = mpc8xxx_gpio_initfn,
+ .class_init = mpc8xxx_gpio_class_init,
+ },
};
-static void mpc8xxx_gpio_register_types(void)
-{
- type_register_static(&mpc8xxx_gpio_info);
-}
-
-type_init(mpc8xxx_gpio_register_types)
+DEFINE_TYPES(mpc8xxx_gpio_types)
diff --git a/hw/gpio/npcm7xx_gpio.c b/hw/gpio/npcm7xx_gpio.c
index ba19b9e..66f8256 100644
--- a/hw/gpio/npcm7xx_gpio.c
+++ b/hw/gpio/npcm7xx_gpio.c
@@ -220,8 +220,6 @@ static void npcm7xx_gpio_regs_write(void *opaque, hwaddr addr, uint64_t v,
return;
}
- diff = s->regs[reg] ^ value;
-
switch (reg) {
case NPCM7XX_GPIO_TLOCK1:
case NPCM7XX_GPIO_TLOCK2:
@@ -242,6 +240,7 @@ static void npcm7xx_gpio_regs_write(void *opaque, hwaddr addr, uint64_t v,
case NPCM7XX_GPIO_PU:
case NPCM7XX_GPIO_PD:
case NPCM7XX_GPIO_IEM:
+ diff = s->regs[reg] ^ value;
s->regs[reg] = value;
npcm7xx_gpio_update_pins(s, diff);
break;
@@ -386,7 +385,7 @@ static const VMStateDescription vmstate_npcm7xx_gpio = {
},
};
-static Property npcm7xx_gpio_properties[] = {
+static const Property npcm7xx_gpio_properties[] = {
/* Bit n set => pin n has pullup enabled by default. */
DEFINE_PROP_UINT32("reset-pullup", NPCM7xxGPIOState, reset_pu, 0),
/* Bit n set => pin n has pulldown enabled by default. */
@@ -395,10 +394,9 @@ static Property npcm7xx_gpio_properties[] = {
DEFINE_PROP_UINT32("reset-osrc", NPCM7xxGPIOState, reset_osrc, 0),
/* Bit n set => pin n has high drive strength by default. */
DEFINE_PROP_UINT32("reset-odsc", NPCM7xxGPIOState, reset_odsc, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void npcm7xx_gpio_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_gpio_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *reset = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/gpio/nrf51_gpio.c b/hw/gpio/nrf51_gpio.c
index ffc7dff..d94c0c4 100644
--- a/hw/gpio/nrf51_gpio.c
+++ b/hw/gpio/nrf51_gpio.c
@@ -40,7 +40,6 @@ static bool is_connected(uint32_t config, uint32_t level)
break;
default:
g_assert_not_reached();
- break;
}
return state;
@@ -305,12 +304,12 @@ static void nrf51_gpio_init(Object *obj)
qdev_init_gpio_out_named(DEVICE(s), &s->detect, "detect", 1);
}
-static void nrf51_gpio_class_init(ObjectClass *klass, void *data)
+static void nrf51_gpio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_nrf51_gpio;
- dc->reset = nrf51_gpio_reset;
+ device_class_set_legacy_reset(dc, nrf51_gpio_reset);
dc->desc = "nRF51 GPIO";
}
diff --git a/hw/gpio/omap_gpio.c b/hw/gpio/omap_gpio.c
index a3341d7..f27806b 100644
--- a/hw/gpio/omap_gpio.c
+++ b/hw/gpio/omap_gpio.c
@@ -80,25 +80,25 @@ static uint64_t omap_gpio_read(void *opaque, hwaddr addr,
}
switch (offset) {
- case 0x00: /* DATA_INPUT */
+ case 0x00: /* DATA_INPUT */
return s->inputs & s->pins;
- case 0x04: /* DATA_OUTPUT */
+ case 0x04: /* DATA_OUTPUT */
return s->outputs;
- case 0x08: /* DIRECTION_CONTROL */
+ case 0x08: /* DIRECTION_CONTROL */
return s->dir;
- case 0x0c: /* INTERRUPT_CONTROL */
+ case 0x0c: /* INTERRUPT_CONTROL */
return s->edge;
- case 0x10: /* INTERRUPT_MASK */
+ case 0x10: /* INTERRUPT_MASK */
return s->mask;
- case 0x14: /* INTERRUPT_STATUS */
+ case 0x14: /* INTERRUPT_STATUS */
return s->ints;
- case 0x18: /* PIN_CONTROL (not in OMAP310) */
+ case 0x18: /* PIN_CONTROL (not in OMAP310) */
OMAP_BAD_REG(addr);
return s->pins;
}
@@ -121,11 +121,11 @@ static void omap_gpio_write(void *opaque, hwaddr addr,
}
switch (offset) {
- case 0x00: /* DATA_INPUT */
+ case 0x00: /* DATA_INPUT */
OMAP_RO_REG(addr);
return;
- case 0x04: /* DATA_OUTPUT */
+ case 0x04: /* DATA_OUTPUT */
diff = (s->outputs ^ value) & ~s->dir;
s->outputs = value;
while ((ln = ctz32(diff)) != 32) {
@@ -135,7 +135,7 @@ static void omap_gpio_write(void *opaque, hwaddr addr,
}
break;
- case 0x08: /* DIRECTION_CONTROL */
+ case 0x08: /* DIRECTION_CONTROL */
diff = s->outputs & (s->dir ^ value);
s->dir = value;
@@ -147,21 +147,21 @@ static void omap_gpio_write(void *opaque, hwaddr addr,
}
break;
- case 0x0c: /* INTERRUPT_CONTROL */
+ case 0x0c: /* INTERRUPT_CONTROL */
s->edge = value;
break;
- case 0x10: /* INTERRUPT_MASK */
+ case 0x10: /* INTERRUPT_MASK */
s->mask = value;
break;
- case 0x14: /* INTERRUPT_STATUS */
+ case 0x14: /* INTERRUPT_STATUS */
s->ints &= ~value;
if (!s->ints)
qemu_irq_lower(s->irq);
break;
- case 0x18: /* PIN_CONTROL (not in OMAP310 TRM) */
+ case 0x18: /* PIN_CONTROL (not in OMAP310 TRM) */
OMAP_BAD_REG(addr);
s->pins = value;
break;
@@ -190,408 +190,6 @@ static void omap_gpio_reset(struct omap_gpio_s *s)
s->pins = ~0;
}
-struct omap2_gpio_s {
- qemu_irq irq[2];
- qemu_irq wkup;
- qemu_irq *handler;
- MemoryRegion iomem;
-
- uint8_t revision;
- uint8_t config[2];
- uint32_t inputs;
- uint32_t outputs;
- uint32_t dir;
- uint32_t level[2];
- uint32_t edge[2];
- uint32_t mask[2];
- uint32_t wumask;
- uint32_t ints[2];
- uint32_t debounce;
- uint8_t delay;
-};
-
-struct Omap2GpioState {
- SysBusDevice parent_obj;
-
- MemoryRegion iomem;
- int mpu_model;
- void *iclk;
- void *fclk[6];
- int modulecount;
- struct omap2_gpio_s *modules;
- qemu_irq *handler;
- int autoidle;
- int gpo;
-};
-
-/* General-Purpose Interface of OMAP2/3 */
-static inline void omap2_gpio_module_int_update(struct omap2_gpio_s *s,
- int line)
-{
- qemu_set_irq(s->irq[line], s->ints[line] & s->mask[line]);
-}
-
-static void omap2_gpio_module_wake(struct omap2_gpio_s *s, int line)
-{
- if (!(s->config[0] & (1 << 2))) /* ENAWAKEUP */
- return;
- if (!(s->config[0] & (3 << 3))) /* Force Idle */
- return;
- if (!(s->wumask & (1 << line)))
- return;
-
- qemu_irq_raise(s->wkup);
-}
-
-static inline void omap2_gpio_module_out_update(struct omap2_gpio_s *s,
- uint32_t diff)
-{
- int ln;
-
- s->outputs ^= diff;
- diff &= ~s->dir;
- while ((ln = ctz32(diff)) != 32) {
- qemu_set_irq(s->handler[ln], (s->outputs >> ln) & 1);
- diff &= ~(1 << ln);
- }
-}
-
-static void omap2_gpio_module_level_update(struct omap2_gpio_s *s, int line)
-{
- s->ints[line] |= s->dir &
- ((s->inputs & s->level[1]) | (~s->inputs & s->level[0]));
- omap2_gpio_module_int_update(s, line);
-}
-
-static inline void omap2_gpio_module_int(struct omap2_gpio_s *s, int line)
-{
- s->ints[0] |= 1 << line;
- omap2_gpio_module_int_update(s, 0);
- s->ints[1] |= 1 << line;
- omap2_gpio_module_int_update(s, 1);
- omap2_gpio_module_wake(s, line);
-}
-
-static void omap2_gpio_set(void *opaque, int line, int level)
-{
- Omap2GpioState *p = opaque;
- struct omap2_gpio_s *s = &p->modules[line >> 5];
-
- line &= 31;
- if (level) {
- if (s->dir & (1 << line) & ((~s->inputs & s->edge[0]) | s->level[1]))
- omap2_gpio_module_int(s, line);
- s->inputs |= 1 << line;
- } else {
- if (s->dir & (1 << line) & ((s->inputs & s->edge[1]) | s->level[0]))
- omap2_gpio_module_int(s, line);
- s->inputs &= ~(1 << line);
- }
-}
-
-static void omap2_gpio_module_reset(struct omap2_gpio_s *s)
-{
- s->config[0] = 0;
- s->config[1] = 2;
- s->ints[0] = 0;
- s->ints[1] = 0;
- s->mask[0] = 0;
- s->mask[1] = 0;
- s->wumask = 0;
- s->dir = ~0;
- s->level[0] = 0;
- s->level[1] = 0;
- s->edge[0] = 0;
- s->edge[1] = 0;
- s->debounce = 0;
- s->delay = 0;
-}
-
-static uint32_t omap2_gpio_module_read(void *opaque, hwaddr addr)
-{
- struct omap2_gpio_s *s = opaque;
-
- switch (addr) {
- case 0x00: /* GPIO_REVISION */
- return s->revision;
-
- case 0x10: /* GPIO_SYSCONFIG */
- return s->config[0];
-
- case 0x14: /* GPIO_SYSSTATUS */
- return 0x01;
-
- case 0x18: /* GPIO_IRQSTATUS1 */
- return s->ints[0];
-
- case 0x1c: /* GPIO_IRQENABLE1 */
- case 0x60: /* GPIO_CLEARIRQENABLE1 */
- case 0x64: /* GPIO_SETIRQENABLE1 */
- return s->mask[0];
-
- case 0x20: /* GPIO_WAKEUPENABLE */
- case 0x80: /* GPIO_CLEARWKUENA */
- case 0x84: /* GPIO_SETWKUENA */
- return s->wumask;
-
- case 0x28: /* GPIO_IRQSTATUS2 */
- return s->ints[1];
-
- case 0x2c: /* GPIO_IRQENABLE2 */
- case 0x70: /* GPIO_CLEARIRQENABLE2 */
- case 0x74: /* GPIO_SETIREQNEABLE2 */
- return s->mask[1];
-
- case 0x30: /* GPIO_CTRL */
- return s->config[1];
-
- case 0x34: /* GPIO_OE */
- return s->dir;
-
- case 0x38: /* GPIO_DATAIN */
- return s->inputs;
-
- case 0x3c: /* GPIO_DATAOUT */
- case 0x90: /* GPIO_CLEARDATAOUT */
- case 0x94: /* GPIO_SETDATAOUT */
- return s->outputs;
-
- case 0x40: /* GPIO_LEVELDETECT0 */
- return s->level[0];
-
- case 0x44: /* GPIO_LEVELDETECT1 */
- return s->level[1];
-
- case 0x48: /* GPIO_RISINGDETECT */
- return s->edge[0];
-
- case 0x4c: /* GPIO_FALLINGDETECT */
- return s->edge[1];
-
- case 0x50: /* GPIO_DEBOUNCENABLE */
- return s->debounce;
-
- case 0x54: /* GPIO_DEBOUNCINGTIME */
- return s->delay;
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap2_gpio_module_write(void *opaque, hwaddr addr,
- uint32_t value)
-{
- struct omap2_gpio_s *s = opaque;
- uint32_t diff;
- int ln;
-
- switch (addr) {
- case 0x00: /* GPIO_REVISION */
- case 0x14: /* GPIO_SYSSTATUS */
- case 0x38: /* GPIO_DATAIN */
- OMAP_RO_REG(addr);
- break;
-
- case 0x10: /* GPIO_SYSCONFIG */
- if (((value >> 3) & 3) == 3) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Illegal IDLEMODE value: 3\n", __func__);
- }
- if (value & 2)
- omap2_gpio_module_reset(s);
- s->config[0] = value & 0x1d;
- break;
-
- case 0x18: /* GPIO_IRQSTATUS1 */
- if (s->ints[0] & value) {
- s->ints[0] &= ~value;
- omap2_gpio_module_level_update(s, 0);
- }
- break;
-
- case 0x1c: /* GPIO_IRQENABLE1 */
- s->mask[0] = value;
- omap2_gpio_module_int_update(s, 0);
- break;
-
- case 0x20: /* GPIO_WAKEUPENABLE */
- s->wumask = value;
- break;
-
- case 0x28: /* GPIO_IRQSTATUS2 */
- if (s->ints[1] & value) {
- s->ints[1] &= ~value;
- omap2_gpio_module_level_update(s, 1);
- }
- break;
-
- case 0x2c: /* GPIO_IRQENABLE2 */
- s->mask[1] = value;
- omap2_gpio_module_int_update(s, 1);
- break;
-
- case 0x30: /* GPIO_CTRL */
- s->config[1] = value & 7;
- break;
-
- case 0x34: /* GPIO_OE */
- diff = s->outputs & (s->dir ^ value);
- s->dir = value;
-
- value = s->outputs & ~s->dir;
- while ((ln = ctz32(diff)) != 32) {
- diff &= ~(1 << ln);
- qemu_set_irq(s->handler[ln], (value >> ln) & 1);
- }
-
- omap2_gpio_module_level_update(s, 0);
- omap2_gpio_module_level_update(s, 1);
- break;
-
- case 0x3c: /* GPIO_DATAOUT */
- omap2_gpio_module_out_update(s, s->outputs ^ value);
- break;
-
- case 0x40: /* GPIO_LEVELDETECT0 */
- s->level[0] = value;
- omap2_gpio_module_level_update(s, 0);
- omap2_gpio_module_level_update(s, 1);
- break;
-
- case 0x44: /* GPIO_LEVELDETECT1 */
- s->level[1] = value;
- omap2_gpio_module_level_update(s, 0);
- omap2_gpio_module_level_update(s, 1);
- break;
-
- case 0x48: /* GPIO_RISINGDETECT */
- s->edge[0] = value;
- break;
-
- case 0x4c: /* GPIO_FALLINGDETECT */
- s->edge[1] = value;
- break;
-
- case 0x50: /* GPIO_DEBOUNCENABLE */
- s->debounce = value;
- break;
-
- case 0x54: /* GPIO_DEBOUNCINGTIME */
- s->delay = value;
- break;
-
- case 0x60: /* GPIO_CLEARIRQENABLE1 */
- s->mask[0] &= ~value;
- omap2_gpio_module_int_update(s, 0);
- break;
-
- case 0x64: /* GPIO_SETIRQENABLE1 */
- s->mask[0] |= value;
- omap2_gpio_module_int_update(s, 0);
- break;
-
- case 0x70: /* GPIO_CLEARIRQENABLE2 */
- s->mask[1] &= ~value;
- omap2_gpio_module_int_update(s, 1);
- break;
-
- case 0x74: /* GPIO_SETIREQNEABLE2 */
- s->mask[1] |= value;
- omap2_gpio_module_int_update(s, 1);
- break;
-
- case 0x80: /* GPIO_CLEARWKUENA */
- s->wumask &= ~value;
- break;
-
- case 0x84: /* GPIO_SETWKUENA */
- s->wumask |= value;
- break;
-
- case 0x90: /* GPIO_CLEARDATAOUT */
- omap2_gpio_module_out_update(s, s->outputs & value);
- break;
-
- case 0x94: /* GPIO_SETDATAOUT */
- omap2_gpio_module_out_update(s, ~s->outputs & value);
- break;
-
- default:
- OMAP_BAD_REG(addr);
- return;
- }
-}
-
-static uint64_t omap2_gpio_module_readp(void *opaque, hwaddr addr,
- unsigned size)
-{
- return omap2_gpio_module_read(opaque, addr & ~3) >> ((addr & 3) << 3);
-}
-
-static void omap2_gpio_module_writep(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- uint32_t cur = 0;
- uint32_t mask = 0xffff;
-
- if (size == 4) {
- omap2_gpio_module_write(opaque, addr, value);
- return;
- }
-
- switch (addr & ~3) {
- case 0x00: /* GPIO_REVISION */
- case 0x14: /* GPIO_SYSSTATUS */
- case 0x38: /* GPIO_DATAIN */
- OMAP_RO_REG(addr);
- break;
-
- case 0x10: /* GPIO_SYSCONFIG */
- case 0x1c: /* GPIO_IRQENABLE1 */
- case 0x20: /* GPIO_WAKEUPENABLE */
- case 0x2c: /* GPIO_IRQENABLE2 */
- case 0x30: /* GPIO_CTRL */
- case 0x34: /* GPIO_OE */
- case 0x3c: /* GPIO_DATAOUT */
- case 0x40: /* GPIO_LEVELDETECT0 */
- case 0x44: /* GPIO_LEVELDETECT1 */
- case 0x48: /* GPIO_RISINGDETECT */
- case 0x4c: /* GPIO_FALLINGDETECT */
- case 0x50: /* GPIO_DEBOUNCENABLE */
- case 0x54: /* GPIO_DEBOUNCINGTIME */
- cur = omap2_gpio_module_read(opaque, addr & ~3) &
- ~(mask << ((addr & 3) << 3));
-
- /* Fall through. */
- case 0x18: /* GPIO_IRQSTATUS1 */
- case 0x28: /* GPIO_IRQSTATUS2 */
- case 0x60: /* GPIO_CLEARIRQENABLE1 */
- case 0x64: /* GPIO_SETIRQENABLE1 */
- case 0x70: /* GPIO_CLEARIRQENABLE2 */
- case 0x74: /* GPIO_SETIREQNEABLE2 */
- case 0x80: /* GPIO_CLEARWKUENA */
- case 0x84: /* GPIO_SETWKUENA */
- case 0x90: /* GPIO_CLEARDATAOUT */
- case 0x94: /* GPIO_SETDATAOUT */
- value <<= (addr & 3) << 3;
- omap2_gpio_module_write(opaque, addr, cur | value);
- break;
-
- default:
- OMAP_BAD_REG(addr);
- return;
- }
-}
-
-static const MemoryRegionOps omap2_gpio_module_ops = {
- .read = omap2_gpio_module_readp,
- .write = omap2_gpio_module_writep,
- .valid.min_access_size = 1,
- .valid.max_access_size = 4,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
static void omap_gpif_reset(DeviceState *dev)
{
Omap1GpioState *s = OMAP1_GPIO(dev);
@@ -599,81 +197,6 @@ static void omap_gpif_reset(DeviceState *dev)
omap_gpio_reset(&s->omap1);
}
-static void omap2_gpif_reset(DeviceState *dev)
-{
- Omap2GpioState *s = OMAP2_GPIO(dev);
- int i;
-
- for (i = 0; i < s->modulecount; i++) {
- omap2_gpio_module_reset(&s->modules[i]);
- }
- s->autoidle = 0;
- s->gpo = 0;
-}
-
-static uint64_t omap2_gpif_top_read(void *opaque, hwaddr addr, unsigned size)
-{
- Omap2GpioState *s = opaque;
-
- switch (addr) {
- case 0x00: /* IPGENERICOCPSPL_REVISION */
- return 0x18;
-
- case 0x10: /* IPGENERICOCPSPL_SYSCONFIG */
- return s->autoidle;
-
- case 0x14: /* IPGENERICOCPSPL_SYSSTATUS */
- return 0x01;
-
- case 0x18: /* IPGENERICOCPSPL_IRQSTATUS */
- return 0x00;
-
- case 0x40: /* IPGENERICOCPSPL_GPO */
- return s->gpo;
-
- case 0x50: /* IPGENERICOCPSPL_GPI */
- return 0x00;
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap2_gpif_top_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- Omap2GpioState *s = opaque;
-
- switch (addr) {
- case 0x00: /* IPGENERICOCPSPL_REVISION */
- case 0x14: /* IPGENERICOCPSPL_SYSSTATUS */
- case 0x18: /* IPGENERICOCPSPL_IRQSTATUS */
- case 0x50: /* IPGENERICOCPSPL_GPI */
- OMAP_RO_REG(addr);
- break;
-
- case 0x10: /* IPGENERICOCPSPL_SYSCONFIG */
- if (value & (1 << 1)) /* SOFTRESET */
- omap2_gpif_reset(DEVICE(s));
- s->autoidle = value & 1;
- break;
-
- case 0x40: /* IPGENERICOCPSPL_GPO */
- s->gpo = value & 1;
- break;
-
- default:
- OMAP_BAD_REG(addr);
- return;
- }
-}
-
-static const MemoryRegionOps omap2_gpif_top_ops = {
- .read = omap2_gpif_top_read,
- .write = omap2_gpif_top_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
static void omap_gpio_init(Object *obj)
{
DeviceState *dev = DEVICE(obj);
@@ -697,67 +220,21 @@ static void omap_gpio_realize(DeviceState *dev, Error **errp)
}
}
-static void omap2_gpio_realize(DeviceState *dev, Error **errp)
-{
- Omap2GpioState *s = OMAP2_GPIO(dev);
- SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
- int i;
-
- if (!s->iclk) {
- error_setg(errp, "omap2-gpio: iclk not connected");
- return;
- }
-
- s->modulecount = s->mpu_model < omap2430 ? 4
- : s->mpu_model < omap3430 ? 5
- : 6;
-
- if (s->mpu_model < omap3430) {
- memory_region_init_io(&s->iomem, OBJECT(dev), &omap2_gpif_top_ops, s,
- "omap2.gpio", 0x1000);
- sysbus_init_mmio(sbd, &s->iomem);
- }
-
- s->modules = g_new0(struct omap2_gpio_s, s->modulecount);
- s->handler = g_new0(qemu_irq, s->modulecount * 32);
- qdev_init_gpio_in(dev, omap2_gpio_set, s->modulecount * 32);
- qdev_init_gpio_out(dev, s->handler, s->modulecount * 32);
-
- for (i = 0; i < s->modulecount; i++) {
- struct omap2_gpio_s *m = &s->modules[i];
-
- if (!s->fclk[i]) {
- error_setg(errp, "omap2-gpio: fclk%d not connected", i);
- return;
- }
-
- m->revision = (s->mpu_model < omap3430) ? 0x18 : 0x25;
- m->handler = &s->handler[i * 32];
- sysbus_init_irq(sbd, &m->irq[0]); /* mpu irq */
- sysbus_init_irq(sbd, &m->irq[1]); /* dsp irq */
- sysbus_init_irq(sbd, &m->wkup);
- memory_region_init_io(&m->iomem, OBJECT(dev), &omap2_gpio_module_ops, m,
- "omap.gpio-module", 0x1000);
- sysbus_init_mmio(sbd, &m->iomem);
- }
-}
-
void omap_gpio_set_clk(Omap1GpioState *gpio, omap_clk clk)
{
gpio->clk = clk;
}
-static Property omap_gpio_properties[] = {
+static const Property omap_gpio_properties[] = {
DEFINE_PROP_INT32("mpu_model", Omap1GpioState, mpu_model, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void omap_gpio_class_init(ObjectClass *klass, void *data)
+static void omap_gpio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = omap_gpio_realize;
- dc->reset = omap_gpif_reset;
+ device_class_set_legacy_reset(dc, omap_gpif_reset);
device_class_set_props(dc, omap_gpio_properties);
/* Reason: pointer property "clk" */
dc->user_creatable = false;
@@ -771,44 +248,9 @@ static const TypeInfo omap_gpio_info = {
.class_init = omap_gpio_class_init,
};
-void omap2_gpio_set_iclk(Omap2GpioState *gpio, omap_clk clk)
-{
- gpio->iclk = clk;
-}
-
-void omap2_gpio_set_fclk(Omap2GpioState *gpio, uint8_t i, omap_clk clk)
-{
- assert(i <= 5);
- gpio->fclk[i] = clk;
-}
-
-static Property omap2_gpio_properties[] = {
- DEFINE_PROP_INT32("mpu_model", Omap2GpioState, mpu_model, 0),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void omap2_gpio_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->realize = omap2_gpio_realize;
- dc->reset = omap2_gpif_reset;
- device_class_set_props(dc, omap2_gpio_properties);
- /* Reason: pointer properties "iclk", "fclk0", ..., "fclk5" */
- dc->user_creatable = false;
-}
-
-static const TypeInfo omap2_gpio_info = {
- .name = TYPE_OMAP2_GPIO,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(Omap2GpioState),
- .class_init = omap2_gpio_class_init,
-};
-
static void omap_gpio_register_types(void)
{
type_register_static(&omap_gpio_info);
- type_register_static(&omap2_gpio_info);
}
type_init(omap_gpio_register_types)
diff --git a/hw/gpio/pca9552.c b/hw/gpio/pca9552.c
index 27d4db0..1e10238 100644
--- a/hw/gpio/pca9552.c
+++ b/hw/gpio/pca9552.c
@@ -76,7 +76,7 @@ static void pca955x_display_pins_status(PCA955xState *s,
return;
}
if (trace_event_get_state_backends(TRACE_PCA955X_GPIO_STATUS)) {
- char *buf = g_newa(char, k->pin_count + 1);
+ char buf[PCA955X_PIN_COUNT_MAX + 1];
for (i = 0; i < k->pin_count; i++) {
if (extract32(pins_status, i, 1)) {
@@ -428,12 +428,11 @@ static void pca955x_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_in(dev, pca955x_gpio_in_handler, k->pin_count);
}
-static Property pca955x_properties[] = {
+static const Property pca955x_properties[] = {
DEFINE_PROP_STRING("description", PCA955xState, description),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pca955x_class_init(ObjectClass *klass, void *data)
+static void pca955x_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
@@ -455,12 +454,12 @@ static const TypeInfo pca955x_info = {
.abstract = true,
};
-static void pca9552_class_init(ObjectClass *oc, void *data)
+static void pca9552_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCA955xClass *pc = PCA955X_CLASS(oc);
- dc->reset = pca9552_reset;
+ device_class_set_legacy_reset(dc, pca9552_reset);
dc->vmsd = &pca9552_vmstate;
pc->max_reg = PCA9552_LS3;
pc->pin_count = 16;
diff --git a/hw/gpio/pca9554.c b/hw/gpio/pca9554.c
index 7d10a64..de3f883 100644
--- a/hw/gpio/pca9554.c
+++ b/hw/gpio/pca9554.c
@@ -118,11 +118,8 @@ static void pca9554_write(PCA9554State *s, uint8_t reg, uint8_t data)
static uint8_t pca9554_recv(I2CSlave *i2c)
{
PCA9554State *s = PCA9554(i2c);
- uint8_t ret;
- ret = pca9554_read(s, s->pointer & 0x3);
-
- return ret;
+ return pca9554_read(s, s->pointer & 0x3);
}
static int pca9554_send(I2CSlave *i2c, uint8_t data)
@@ -291,12 +288,11 @@ static void pca9554_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_in(dev, pca9554_gpio_in_handler, PCA9554_PIN_COUNT);
}
-static Property pca9554_properties[] = {
+static const Property pca9554_properties[] = {
DEFINE_PROP_STRING("description", PCA9554State, description),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pca9554_class_init(ObjectClass *klass, void *data)
+static void pca9554_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
@@ -305,7 +301,7 @@ static void pca9554_class_init(ObjectClass *klass, void *data)
k->recv = pca9554_recv;
k->send = pca9554_send;
dc->realize = pca9554_realize;
- dc->reset = pca9554_reset;
+ device_class_set_legacy_reset(dc, pca9554_reset);
dc->vmsd = &pca9554_vmstate;
device_class_set_props(dc, pca9554_properties);
}
diff --git a/hw/gpio/pcf8574.c b/hw/gpio/pcf8574.c
index d37909e..274b44b 100644
--- a/hw/gpio/pcf8574.c
+++ b/hw/gpio/pcf8574.c
@@ -138,7 +138,7 @@ static void pcf8574_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_out_named(dev, &s->intrq, "nINT", 1);
}
-static void pcf8574_class_init(ObjectClass *klass, void *data)
+static void pcf8574_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
@@ -146,7 +146,7 @@ static void pcf8574_class_init(ObjectClass *klass, void *data)
k->recv = pcf8574_rx;
k->send = pcf8574_tx;
dc->realize = pcf8574_realize;
- dc->reset = pcf8574_reset;
+ device_class_set_legacy_reset(dc, pcf8574_reset);
dc->vmsd = &vmstate_pcf8574;
}
diff --git a/hw/gpio/pl061.c b/hw/gpio/pl061.c
index d5838b8..1acca3f 100644
--- a/hw/gpio/pl061.c
+++ b/hw/gpio/pl061.c
@@ -443,7 +443,6 @@ static void pl061_write(void *opaque, hwaddr offset,
return;
}
pl061_update(s);
- return;
}
static void pl061_enter_reset(Object *obj, ResetType type)
@@ -562,13 +561,12 @@ static void pl061_realize(DeviceState *dev, Error **errp)
}
}
-static Property pl061_props[] = {
+static const Property pl061_props[] = {
DEFINE_PROP_UINT32("pullups", PL061State, pullups, 0xff),
DEFINE_PROP_UINT32("pulldowns", PL061State, pulldowns, 0x0),
- DEFINE_PROP_END_OF_LIST()
};
-static void pl061_class_init(ObjectClass *klass, void *data)
+static void pl061_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/gpio/sifive_gpio.c b/hw/gpio/sifive_gpio.c
index 995a43c..5831647 100644
--- a/hw/gpio/sifive_gpio.c
+++ b/hw/gpio/sifive_gpio.c
@@ -349,9 +349,8 @@ static const VMStateDescription vmstate_sifive_gpio = {
}
};
-static Property sifive_gpio_properties[] = {
+static const Property sifive_gpio_properties[] = {
DEFINE_PROP_UINT32("ngpio", SIFIVEGPIOState, ngpio, SIFIVE_GPIO_PINS),
- DEFINE_PROP_END_OF_LIST(),
};
static void sifive_gpio_realize(DeviceState *dev, Error **errp)
@@ -371,14 +370,14 @@ static void sifive_gpio_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_out(DEVICE(s), s->output, s->ngpio);
}
-static void sifive_gpio_class_init(ObjectClass *klass, void *data)
+static void sifive_gpio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, sifive_gpio_properties);
dc->vmsd = &vmstate_sifive_gpio;
dc->realize = sifive_gpio_realize;
- dc->reset = sifive_gpio_reset;
+ device_class_set_legacy_reset(dc, sifive_gpio_reset);
dc->desc = "SiFive GPIO";
}
diff --git a/hw/gpio/stm32l4x5_gpio.c b/hw/gpio/stm32l4x5_gpio.c
index 30d8d6c..414ce83 100644
--- a/hw/gpio/stm32l4x5_gpio.c
+++ b/hw/gpio/stm32l4x5_gpio.c
@@ -447,15 +447,14 @@ static const VMStateDescription vmstate_stm32l4x5_gpio = {
}
};
-static Property stm32l4x5_gpio_properties[] = {
+static const Property stm32l4x5_gpio_properties[] = {
DEFINE_PROP_STRING("name", Stm32l4x5GpioState, name),
DEFINE_PROP_UINT32("mode-reset", Stm32l4x5GpioState, moder_reset, 0),
DEFINE_PROP_UINT32("ospeed-reset", Stm32l4x5GpioState, ospeedr_reset, 0),
DEFINE_PROP_UINT32("pupd-reset", Stm32l4x5GpioState, pupdr_reset, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void stm32l4x5_gpio_class_init(ObjectClass *klass, void *data)
+static void stm32l4x5_gpio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/gpio/trace-events b/hw/gpio/trace-events
index b91cc7e..cea896b 100644
--- a/hw/gpio/trace-events
+++ b/hw/gpio/trace-events
@@ -1,5 +1,10 @@
# See docs/devel/tracing.rst for syntax documentation.
+# imx_gpio.c
+imx_gpio_read(const char *id, const char *reg, uint32_t value) "%s:[%s] -> 0x%" PRIx32
+imx_gpio_write(const char *id, const char *reg, uint32_t value) "%s:[%s] <- 0x%" PRIx32
+imx_gpio_set(const char *id, int line, int level) "%s:[%d] <- %d"
+
# npcm7xx_gpio.c
npcm7xx_gpio_read(const char *id, uint64_t offset, uint64_t value) " %s offset: 0x%04" PRIx64 " value 0x%08" PRIx64
npcm7xx_gpio_write(const char *id, uint64_t offset, uint64_t value) "%s offset: 0x%04" PRIx64 " value 0x%08" PRIx64
diff --git a/hw/gpio/zaurus.c b/hw/gpio/zaurus.c
index 7342440..b8d27f5 100644
--- a/hw/gpio/zaurus.c
+++ b/hw/gpio/zaurus.c
@@ -243,7 +243,7 @@ static const VMStateDescription vmstate_scoop_regs = {
},
};
-static void scoop_sysbus_class_init(ObjectClass *klass, void *data)
+static void scoop_sysbus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/hppa/Kconfig b/hw/hppa/Kconfig
index d4d457f..cab2104 100644
--- a/hw/hppa/Kconfig
+++ b/hw/hppa/Kconfig
@@ -9,8 +9,9 @@ config HPPA_B160L
select ASTRO
select DINO
select LASI
- select SERIAL
+ select SERIAL_MM
select SERIAL_PCI
+ select DIVA_GSP
select ISA_BUS
select I8259
select IDE_CMD646
diff --git a/hw/hppa/hppa_hardware.h b/hw/hppa/hppa_hardware.h
index a9be7bb..21c777c 100644
--- a/hw/hppa/hppa_hardware.h
+++ b/hw/hppa/hppa_hardware.h
@@ -6,6 +6,11 @@
#define FIRMWARE_START 0xf0000000
#define FIRMWARE_END 0xf0800000
+#define FIRMWARE_HIGH 0xfffffff0 /* upper 32-bits of 64-bit firmware address */
+
+#define RAM_MAP_HIGH 0x0100000000 /* memory above 3.75 GB is mapped here */
+
+#define MEM_PDC_ENTRY 0x4800 /* PDC entry address */
#define DEVICE_HPA_LEN 0x00100000
@@ -18,6 +23,7 @@
#define LASI_UART_HPA 0xffd05000
#define LASI_SCSI_HPA 0xffd06000
#define LASI_LAN_HPA 0xffd07000
+#define LASI_RTC_HPA 0xffd09000
#define LASI_LPT_HPA 0xffd02000
#define LASI_AUDIO_HPA 0xffd04000
#define LASI_PS2KBD_HPA 0xffd08000
@@ -27,16 +33,23 @@
#define CPU_HPA 0xfffb0000
#define MEMORY_HPA 0xfffff000
-#define PCI_HPA DINO_HPA /* PCI bus */
#define IDE_HPA 0xf9000000 /* Boot disc controller */
+#define ASTRO_HPA 0xfed00000
+#define ELROY0_HPA 0xfed30000
+#define ELROY2_HPA 0xfed32000
+#define ELROY8_HPA 0xfed38000
+#define ELROYc_HPA 0xfed3c000
+#define ASTRO_MEMORY_HPA 0xfed10200
+
+#define SCSI_HPA 0xf1040000 /* emulated SCSI, needs to be in f region */
/* offsets to DINO HPA: */
#define DINO_PCI_ADDR 0x064
#define DINO_CONFIG_DATA 0x068
#define DINO_IO_DATA 0x06c
-#define PORT_PCI_CMD (PCI_HPA + DINO_PCI_ADDR)
-#define PORT_PCI_DATA (PCI_HPA + DINO_CONFIG_DATA)
+#define PORT_PCI_CMD hppa_port_pci_cmd
+#define PORT_PCI_DATA hppa_port_pci_data
#define FW_CFG_IO_BASE 0xfffa0000
@@ -46,7 +59,24 @@
#define HPPA_MAX_CPUS 16 /* max. number of SMP CPUs */
#define CPU_CLOCK_MHZ 250 /* emulate a 250 MHz CPU */
+#define CR_PSW_DEFAULT 6 /* used by SeaBIOS & QEMU for default PSW */
#define CPU_HPA_CR_REG 7 /* store CPU HPA in cr7 (SeaBIOS internal) */
#define PIM_STORAGE_SIZE 600 /* storage size of pdc_pim_toc_struct (64bit) */
+#define ASTRO_BUS_MODULE 0x0a /* C3700: 0x0a, others maybe 0 ? */
+
+/* ASTRO Memory and I/O regions */
+#define ASTRO_BASE_HPA 0xfffed00000
+#define ELROY0_BASE_HPA 0xfffed30000 /* ELROY0_HPA */
+
+#define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */
+
+#define LMMIO_DIRECT0_BASE 0x300
+#define LMMIO_DIRECT0_MASK 0x308
+#define LMMIO_DIRECT0_ROUTE 0x310
+
+/* space register hashing */
+#define HPPA64_DIAG_SPHASH_ENABLE 0x200 /* DIAG_SPHASH_ENAB (bit 54) */
+#define HPPA64_PDC_CACHE_RET_SPID_VAL 0xfe0 /* PDC return value on 64-bit CPU */
+
#endif
diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c
index 5d0a873..dacedc5 100644
--- a/hw/hppa/machine.c
+++ b/hw/hppa/machine.c
@@ -11,13 +11,14 @@
#include "elf.h"
#include "hw/loader.h"
#include "qemu/error-report.h"
-#include "sysemu/reset.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/qtest.h"
-#include "sysemu/runstate.h"
+#include "exec/target_page.h"
+#include "system/reset.h"
+#include "system/system.h"
+#include "system/qtest.h"
+#include "system/runstate.h"
#include "hw/rtc/mc146818rtc.h"
#include "hw/timer/i8254.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/char/parallel.h"
#include "hw/intc/i8259.h"
#include "hw/input/lasips2.h"
@@ -240,7 +241,7 @@ static FWCfgState *create_fw_cfg(MachineState *ms, PCIBus *pci_bus,
g_memdup2(qemu_version, sizeof(qemu_version)),
sizeof(qemu_version));
- fw_cfg_add_extra_pci_roots(pci_bus, fw_cfg);
+ pci_bus_add_fw_cfg_extra_pci_roots(fw_cfg, pci_bus, &error_abort);
return fw_cfg;
}
@@ -283,16 +284,13 @@ static TranslateFn *machine_HP_common_init_cpus(MachineState *machine)
cpu[i] = HPPA_CPU(cpu_create(machine->cpu_type));
}
- /*
- * For now, treat address layout as if PSW_W is clear.
- * TODO: create a proper hppa64 board model and load elf64 firmware.
- */
+ /* Initialize memory */
if (hppa_is_pa20(&cpu[0]->env)) {
translate = translate_pa20;
- ram_max = 0xf0000000; /* 3.75 GB (limited by 32-bit firmware) */
+ ram_max = 256 * GiB; /* like HP rp8440 */
} else {
translate = translate_pa10;
- ram_max = 0xf0000000; /* 3.75 GB (32-bit CPU) */
+ ram_max = FIRMWARE_START; /* 3.75 GB (32-bit CPU) */
}
soft_power_reg = translate(NULL, HPA_POWER_BUTTON);
@@ -320,7 +318,22 @@ static TranslateFn *machine_HP_common_init_cpus(MachineState *machine)
info_report("Max RAM size limited to %" PRIu64 " MB", ram_max / MiB);
machine->ram_size = ram_max;
}
- memory_region_add_subregion_overlap(addr_space, 0, machine->ram, -1);
+ if (machine->ram_size <= FIRMWARE_START) {
+ /* contiguous memory up to 3.75 GB RAM */
+ memory_region_add_subregion_overlap(addr_space, 0, machine->ram, -1);
+ } else {
+ /* non-contiguous: Memory above 3.75 GB is mapped at RAM_MAP_HIGH */
+ MemoryRegion *mem_region;
+ mem_region = g_new(MemoryRegion, 2);
+ memory_region_init_alias(&mem_region[0], &addr_space->parent_obj,
+ "LowMem", machine->ram, 0, FIRMWARE_START);
+ memory_region_init_alias(&mem_region[1], &addr_space->parent_obj,
+ "HighMem", machine->ram, FIRMWARE_START,
+ machine->ram_size - FIRMWARE_START);
+ memory_region_add_subregion_overlap(addr_space, 0, &mem_region[0], -1);
+ memory_region_add_subregion_overlap(addr_space, RAM_MAP_HIGH,
+ &mem_region[1], -1);
+ }
return translate;
}
@@ -344,7 +357,6 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus,
uint64_t kernel_entry = 0, kernel_low, kernel_high;
MemoryRegion *addr_space = get_system_memory();
MemoryRegion *rom_region;
- unsigned int smp_cpus = machine->smp.cpus;
SysBusDevice *s;
/* SCSI disk setup. */
@@ -355,12 +367,15 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus,
/* Graphics setup. */
if (machine->enable_graphics && vga_interface_type != VGA_NONE) {
- vga_interface_created = true;
dev = qdev_new("artist");
s = SYS_BUS_DEVICE(dev);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, translate(NULL, LASI_GFX_HPA));
- sysbus_mmio_map(s, 1, translate(NULL, ARTIST_FB_ADDR));
+ bool disabled = object_property_get_bool(OBJECT(dev), "disable", NULL);
+ if (!disabled) {
+ sysbus_realize_and_unref(s, &error_fatal);
+ vga_interface_created = true;
+ sysbus_mmio_map(s, 0, translate(NULL, LASI_GFX_HPA));
+ sysbus_mmio_map(s, 1, translate(NULL, ARTIST_FB_ADDR));
+ }
}
/* Network setup. */
@@ -372,26 +387,17 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus,
pci_init_nic_devices(pci_bus, mc->default_nic);
- /* BMC board: HP Powerbar SP2 Diva (with console only) */
- pci_dev = pci_new(-1, "pci-serial");
- if (!lasi_dev) {
- /* bind default keyboard/serial to Diva card */
- qdev_prop_set_chr(DEVICE(pci_dev), "chardev", serial_hd(0));
- }
- qdev_prop_set_uint8(DEVICE(pci_dev), "prog_if", 0);
- pci_realize_and_unref(pci_dev, pci_bus, &error_fatal);
- pci_config_set_vendor_id(pci_dev->config, PCI_VENDOR_ID_HP);
- pci_config_set_device_id(pci_dev->config, 0x1048);
- pci_set_word(&pci_dev->config[PCI_SUBSYSTEM_VENDOR_ID], PCI_VENDOR_ID_HP);
- pci_set_word(&pci_dev->config[PCI_SUBSYSTEM_ID], 0x1227); /* Powerbar */
-
- /* create a second serial PCI card when running Astro */
- if (serial_hd(1) && !lasi_dev) {
- pci_dev = pci_new(-1, "pci-serial-4x");
- qdev_prop_set_chr(DEVICE(pci_dev), "chardev1", serial_hd(1));
- qdev_prop_set_chr(DEVICE(pci_dev), "chardev2", serial_hd(2));
- qdev_prop_set_chr(DEVICE(pci_dev), "chardev3", serial_hd(3));
- qdev_prop_set_chr(DEVICE(pci_dev), "chardev4", serial_hd(4));
+ /* BMC board: HP Diva GSP */
+ dev = qdev_new("diva-gsp");
+ if (!object_property_get_bool(OBJECT(dev), "disable", NULL)) {
+ pci_dev = pci_new_multifunction(PCI_DEVFN(2, 0), "diva-gsp");
+ if (!lasi_dev) {
+ /* bind default keyboard/serial to Diva card */
+ qdev_prop_set_chr(DEVICE(pci_dev), "chardev1", serial_hd(0));
+ qdev_prop_set_chr(DEVICE(pci_dev), "chardev2", serial_hd(1));
+ qdev_prop_set_chr(DEVICE(pci_dev), "chardev3", serial_hd(2));
+ qdev_prop_set_chr(DEVICE(pci_dev), "chardev4", serial_hd(3));
+ }
pci_realize_and_unref(pci_dev, pci_bus, &error_fatal);
}
@@ -429,7 +435,7 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus,
size = load_elf(firmware_filename, NULL, translate, NULL,
&firmware_entry, &firmware_low, &firmware_high, NULL,
- true, EM_PARISC, 0, 0);
+ ELFDATA2MSB, EM_PARISC, 0, 0);
if (size < 0) {
error_report("could not load firmware '%s'", firmware_filename);
@@ -456,7 +462,7 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus,
if (kernel_filename) {
size = load_elf(kernel_filename, NULL, linux_kernel_virt_to_phys,
NULL, &kernel_entry, &kernel_low, &kernel_high, NULL,
- true, EM_PARISC, 0, 0);
+ ELFDATA2MSB, EM_PARISC, 0, 0);
kernel_entry = linux_kernel_virt_to_phys(NULL, kernel_entry);
@@ -470,8 +476,8 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus,
kernel_low, kernel_high, kernel_entry, size / KiB);
if (kernel_cmdline) {
- cpu[0]->env.gr[24] = 0x4000;
- pstrcpy_targphys("cmdline", cpu[0]->env.gr[24],
+ cpu[0]->env.cmdline_or_bootorder = 0x4000;
+ pstrcpy_targphys("cmdline", cpu[0]->env.cmdline_or_bootorder,
TARGET_PAGE_SIZE, kernel_cmdline);
}
@@ -501,32 +507,22 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus,
}
load_image_targphys(initrd_filename, initrd_base, initrd_size);
- cpu[0]->env.gr[23] = initrd_base;
- cpu[0]->env.gr[22] = initrd_base + initrd_size;
+ cpu[0]->env.initrd_base = initrd_base;
+ cpu[0]->env.initrd_end = initrd_base + initrd_size;
}
}
if (!kernel_entry) {
/* When booting via firmware, tell firmware if we want interactive
- * mode (kernel_entry=1), and to boot from CD (gr[24]='d')
- * or hard disc * (gr[24]='c').
+ * mode (kernel_entry=1), and to boot from CD (cmdline_or_bootorder='d')
+ * or hard disc (cmdline_or_bootorder='c').
*/
kernel_entry = machine->boot_config.has_menu ? machine->boot_config.menu : 0;
- cpu[0]->env.gr[24] = machine->boot_config.order[0];
+ cpu[0]->env.cmdline_or_bootorder = machine->boot_config.order[0];
}
- /* We jump to the firmware entry routine and pass the
- * various parameters in registers. After firmware initialization,
- * firmware will start the Linux kernel with ramdisk and cmdline.
- */
- cpu[0]->env.gr[26] = machine->ram_size;
- cpu[0]->env.gr[25] = kernel_entry;
-
- /* tell firmware how many SMP CPUs to present in inventory table */
- cpu[0]->env.gr[21] = smp_cpus;
-
- /* tell firmware fw_cfg port */
- cpu[0]->env.gr[19] = FW_CFG_IO_BASE;
+ /* Keep initial kernel_entry for first boot */
+ cpu[0]->env.kernel_entry = kernel_entry;
}
/*
@@ -642,12 +638,12 @@ static void machine_HP_C3700_init(MachineState *machine)
machine_HP_common_init_tail(machine, pci_bus, translate);
}
-static void hppa_machine_reset(MachineState *ms, ShutdownCause reason)
+static void hppa_machine_reset(MachineState *ms, ResetType type)
{
unsigned int smp_cpus = ms->smp.cpus;
int i;
- qemu_devices_reset(reason);
+ qemu_devices_reset(type);
/* Start all CPUs at the firmware entry point.
* Monarch CPU will initialize firmware, secondary CPUs
@@ -655,26 +651,27 @@ static void hppa_machine_reset(MachineState *ms, ShutdownCause reason)
for (i = 0; i < smp_cpus; i++) {
CPUState *cs = CPU(cpu[i]);
+ /* reset CPU */
+ resettable_reset(OBJECT(cs), RESET_TYPE_COLD);
+
cpu_set_pc(cs, firmware_entry);
cpu[i]->env.psw = PSW_Q;
cpu[i]->env.gr[5] = CPU_HPA + i * 0x1000;
-
- cs->exception_index = -1;
- cs->halted = 0;
- }
-
- /* already initialized by machine_hppa_init()? */
- if (cpu[0]->env.gr[26] == ms->ram_size) {
- return;
}
cpu[0]->env.gr[26] = ms->ram_size;
- cpu[0]->env.gr[25] = 0; /* no firmware boot menu */
- cpu[0]->env.gr[24] = 'c';
- /* gr22/gr23 unused, no initrd while reboot. */
+ cpu[0]->env.gr[25] = cpu[0]->env.kernel_entry;
+ cpu[0]->env.gr[24] = cpu[0]->env.cmdline_or_bootorder;
+ cpu[0]->env.gr[23] = cpu[0]->env.initrd_base;
+ cpu[0]->env.gr[22] = cpu[0]->env.initrd_end;
cpu[0]->env.gr[21] = smp_cpus;
- /* tell firmware fw_cfg port */
cpu[0]->env.gr[19] = FW_CFG_IO_BASE;
+
+ /* reset static fields to avoid starting Linux kernel & initrd on reboot */
+ cpu[0]->env.kernel_entry = 0;
+ cpu[0]->env.initrd_base = 0;
+ cpu[0]->env.initrd_end = 0;
+ cpu[0]->env.cmdline_or_bootorder = 'c';
}
static void hppa_nmi(NMIState *n, int cpu_index, Error **errp)
@@ -686,7 +683,7 @@ static void hppa_nmi(NMIState *n, int cpu_index, Error **errp)
}
}
-static void HP_B160L_machine_init_class_init(ObjectClass *oc, void *data)
+static void HP_B160L_machine_init_class_init(ObjectClass *oc, const void *data)
{
static const char * const valid_cpu_types[] = {
TYPE_HPPA_CPU,
@@ -716,13 +713,13 @@ static const TypeInfo HP_B160L_machine_init_typeinfo = {
.name = MACHINE_TYPE_NAME("B160L"),
.parent = TYPE_MACHINE,
.class_init = HP_B160L_machine_init_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_NMI },
{ }
},
};
-static void HP_C3700_machine_init_class_init(ObjectClass *oc, void *data)
+static void HP_C3700_machine_init_class_init(ObjectClass *oc, const void *data)
{
static const char * const valid_cpu_types[] = {
TYPE_HPPA64_CPU,
@@ -752,7 +749,7 @@ static const TypeInfo HP_C3700_machine_init_typeinfo = {
.name = MACHINE_TYPE_NAME("C3700"),
.parent = TYPE_MACHINE,
.class_init = HP_C3700_machine_init_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_NMI },
{ }
},
diff --git a/hw/hyperv/hv-balloon-our_range_memslots.h b/hw/hyperv/hv-balloon-our_range_memslots.h
index df3b686..b1f19d7 100644
--- a/hw/hyperv/hv-balloon-our_range_memslots.h
+++ b/hw/hyperv/hv-balloon-our_range_memslots.h
@@ -11,7 +11,7 @@
#define HW_HYPERV_HV_BALLOON_OUR_RANGE_MEMSLOTS_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qom/object.h"
#include "hv-balloon-page_range_tree.h"
diff --git a/hw/hyperv/hv-balloon.c b/hw/hyperv/hv-balloon.c
index 3a9ef07..6dbcb2d 100644
--- a/hw/hyperv/hv-balloon.c
+++ b/hw/hyperv/hv-balloon.c
@@ -10,9 +10,9 @@
#include "qemu/osdep.h"
#include "hv-balloon-internal.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "exec/cpu-common.h"
-#include "exec/ramblock.h"
+#include "system/ramblock.h"
#include "hw/boards.h"
#include "hw/hyperv/dynmem-proto.h"
#include "hw/hyperv/hv-balloon.h"
@@ -26,15 +26,15 @@
#include "qapi/qapi-commands-machine.h"
#include "qapi/qapi-events-machine.h"
#include "qapi/qapi-types-machine.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/visitor.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qemu/units.h"
#include "qemu/timer.h"
-#include "sysemu/balloon.h"
-#include "sysemu/hostmem.h"
-#include "sysemu/reset.h"
+#include "system/balloon.h"
+#include "system/hostmem.h"
+#include "system/reset.h"
#include "hv-balloon-our_range_memslots.h"
#include "hv-balloon-page_range_tree.h"
#include "trace.h"
@@ -67,10 +67,6 @@
* these requests
*/
-struct HvBalloonClass {
- VMBusDeviceClass parent_class;
-} HvBalloonClass;
-
typedef enum State {
/* not a real state */
S_NO_CHANGE = 0,
@@ -162,8 +158,9 @@ typedef struct HvBalloon {
MemoryRegion *mr;
} HvBalloon;
-OBJECT_DEFINE_TYPE_WITH_INTERFACES(HvBalloon, hv_balloon, HV_BALLOON, VMBUS_DEVICE, \
- { TYPE_MEMORY_DEVICE }, { })
+OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(HvBalloon, hv_balloon, \
+ HV_BALLOON, VMBUS_DEVICE, \
+ { TYPE_MEMORY_DEVICE }, { })
#define HV_BALLOON_SET_STATE(hvb, news) \
do { \
@@ -1733,7 +1730,7 @@ static void hv_balloon_finalize(Object *obj)
hv_balloon_unrealize_finalize_common(balloon);
}
-static Property hv_balloon_properties[] = {
+static const Property hv_balloon_properties[] = {
DEFINE_PROP_BOOL("status-report", HvBalloon,
status_report.enabled, false),
@@ -1741,11 +1738,9 @@ static Property hv_balloon_properties[] = {
DEFINE_PROP_LINK(HV_BALLOON_MEMDEV_PROP, HvBalloon, hostmem,
TYPE_MEMORY_BACKEND, HostMemoryBackend *),
DEFINE_PROP_UINT64(HV_BALLOON_ADDR_PROP, HvBalloon, addr, 0),
-
- DEFINE_PROP_END_OF_LIST(),
};
-static void hv_balloon_class_init(ObjectClass *klass, void *data)
+static void hv_balloon_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VMBusDeviceClass *vdc = VMBUS_DEVICE_CLASS(klass);
diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
index 483dcca..e4d0688 100644
--- a/hw/hyperv/hyperv.c
+++ b/hw/hyperv/hyperv.c
@@ -11,9 +11,11 @@
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qapi/error.h"
-#include "exec/address-spaces.h"
-#include "exec/memory.h"
-#include "sysemu/kvm.h"
+#include "system/address-spaces.h"
+#include "system/memory.h"
+#include "exec/target_page.h"
+#include "linux/kvm.h"
+#include "system/kvm.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
#include "qemu/lockable.h"
@@ -23,8 +25,7 @@
#include "hw/hyperv/hyperv.h"
#include "qom/object.h"
#include "target/i386/kvm/hyperv-proto.h"
-#include "target/i386/cpu.h"
-#include "exec/cpu-all.h"
+#include "exec/target_page.h"
struct SynICState {
DeviceState parent_obj;
@@ -133,12 +134,12 @@ static void synic_reset(DeviceState *dev)
assert(QLIST_EMPTY(&synic->sint_routes));
}
-static void synic_class_init(ObjectClass *klass, void *data)
+static void synic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = synic_realize;
- dc->reset = synic_reset;
+ device_class_set_legacy_reset(dc, synic_reset);
dc->user_creatable = false;
}
diff --git a/hw/hyperv/hyperv_testdev.c b/hw/hyperv/hyperv_testdev.c
index 9a56ddf..2d4a636 100644
--- a/hw/hyperv/hyperv_testdev.c
+++ b/hw/hyperv/hyperv_testdev.c
@@ -88,8 +88,7 @@ static TestSintRoute *sint_route_find(HypervTestDev *dev,
return sint_route;
}
}
- assert(false);
- return NULL;
+ g_assert_not_reached();
}
static void sint_route_destroy(HypervTestDev *dev,
@@ -187,7 +186,7 @@ static void msg_conn_destroy(HypervTestDev *dev, uint8_t conn_id)
return;
}
}
- assert(false);
+ g_assert_not_reached();
}
static void evt_conn_handler(EventNotifier *notifier)
@@ -237,7 +236,7 @@ static void evt_conn_destroy(HypervTestDev *dev, uint8_t conn_id)
return;
}
}
- assert(false);
+ g_assert_not_reached();
}
static uint64_t hv_test_dev_read(void *opaque, hwaddr addr, unsigned size)
@@ -304,7 +303,7 @@ static void hv_test_dev_realizefn(DeviceState *d, Error **errp)
memory_region_add_subregion(io, 0x3000, &dev->sint_control);
}
-static void hv_test_dev_class_init(ObjectClass *klass, void *data)
+static void hv_test_dev_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/hyperv/meson.build b/hw/hyperv/meson.build
index d3d2668..d1cf781 100644
--- a/hw/hyperv/meson.build
+++ b/hw/hyperv/meson.build
@@ -1,5 +1,6 @@
-specific_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c'))
-specific_ss.add(when: 'CONFIG_HYPERV_TESTDEV', if_true: files('hyperv_testdev.c'))
-specific_ss.add(when: 'CONFIG_VMBUS', if_true: files('vmbus.c'))
-specific_ss.add(when: 'CONFIG_SYNDBG', if_true: files('syndbg.c'))
-specific_ss.add(when: 'CONFIG_HV_BALLOON', if_true: files('hv-balloon.c', 'hv-balloon-page_range_tree.c', 'hv-balloon-our_range_memslots.c'), if_false: files('hv-balloon-stub.c'))
+system_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c'))
+system_ss.add(when: 'CONFIG_HYPERV_TESTDEV', if_true: files('hyperv_testdev.c'))
+system_ss.add(when: 'CONFIG_VMBUS', if_true: files('vmbus.c'))
+system_ss.add(when: 'CONFIG_SYNDBG', if_true: files('syndbg.c'))
+system_ss.add(when: 'CONFIG_HV_BALLOON', if_true: files('hv-balloon.c', 'hv-balloon-page_range_tree.c', 'hv-balloon-our_range_memslots.c'))
+system_ss.add(when: 'CONFIG_HV_BALLOON', if_false: files('hv-balloon-stub.c'))
diff --git a/hw/hyperv/syndbg.c b/hw/hyperv/syndbg.c
index 065e12f..ac7e15f 100644
--- a/hw/hyperv/syndbg.c
+++ b/hw/hyperv/syndbg.c
@@ -10,11 +10,12 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/sockets.h"
+#include "qemu/units.h"
#include "qapi/error.h"
#include "migration/vmstate.h"
#include "hw/qdev-properties.h"
#include "hw/loader.h"
-#include "cpu.h"
+#include "exec/target_page.h"
#include "hw/hyperv/hyperv.h"
#include "hw/hyperv/vmbus-bridge.h"
#include "hw/hyperv/hyperv-proto.h"
@@ -183,12 +184,15 @@ static bool create_udp_pkt(HvSynDbg *syndbg, void *pkt, uint32_t pkt_len,
return true;
}
+#define MSG_BUFSZ (4 * KiB)
+
static uint16_t handle_recv_msg(HvSynDbg *syndbg, uint64_t outgpa,
uint32_t count, bool is_raw, uint32_t options,
uint64_t timeout, uint32_t *retrieved_count)
{
uint16_t ret;
- uint8_t data_buf[TARGET_PAGE_SIZE - UDP_PKT_HEADER_SIZE];
+ g_assert(MSG_BUFSZ >= qemu_target_page_size());
+ QEMU_UNINITIALIZED uint8_t data_buf[MSG_BUFSZ];
hwaddr out_len;
void *out_data;
ssize_t recv_byte_count;
@@ -201,7 +205,7 @@ static uint16_t handle_recv_msg(HvSynDbg *syndbg, uint64_t outgpa,
recv_byte_count = 0;
} else {
recv_byte_count = recv(syndbg->socket, data_buf,
- MIN(sizeof(data_buf), count), MSG_WAITALL);
+ MIN(MSG_BUFSZ, count), MSG_WAITALL);
if (recv_byte_count == -1) {
return HV_STATUS_INVALID_PARAMETER;
}
@@ -366,14 +370,13 @@ static const VMStateDescription vmstate_hv_syndbg = {
.unmigratable = 1,
};
-static Property hv_syndbg_properties[] = {
+static const Property hv_syndbg_properties[] = {
DEFINE_PROP_STRING("host_ip", HvSynDbg, host_ip),
DEFINE_PROP_UINT16("host_port", HvSynDbg, host_port, 50000),
DEFINE_PROP_BOOL("use_hcalls", HvSynDbg, use_hcalls, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void hv_syndbg_class_init(ObjectClass *klass, void *data)
+static void hv_syndbg_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c
index 490d805..961406c 100644
--- a/hw/hyperv/vmbus.c
+++ b/hw/hyperv/vmbus.c
@@ -10,6 +10,7 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
+#include "exec/target_page.h"
#include "qapi/error.h"
#include "migration/vmstate.h"
#include "hw/qdev-properties.h"
@@ -18,7 +19,7 @@
#include "hw/hyperv/vmbus.h"
#include "hw/hyperv/vmbus-bridge.h"
#include "hw/sysbus.h"
-#include "cpu.h"
+#include "exec/target_page.h"
#include "trace.h"
enum {
@@ -1874,7 +1875,7 @@ static void send_create_gpadl(VMBus *vmbus)
}
}
- assert(false);
+ g_assert_not_reached();
}
static bool complete_create_gpadl(VMBus *vmbus)
@@ -1889,8 +1890,7 @@ static bool complete_create_gpadl(VMBus *vmbus)
}
}
- assert(false);
- return false;
+ g_assert_not_reached();
}
static void handle_gpadl_teardown(VMBus *vmbus,
@@ -1931,7 +1931,7 @@ static void send_teardown_gpadl(VMBus *vmbus)
}
}
- assert(false);
+ g_assert_not_reached();
}
static bool complete_teardown_gpadl(VMBus *vmbus)
@@ -1946,8 +1946,7 @@ static bool complete_teardown_gpadl(VMBus *vmbus)
}
}
- assert(false);
- return false;
+ g_assert_not_reached();
}
static void handle_open_channel(VMBus *vmbus, vmbus_message_open_channel *msg,
@@ -1996,7 +1995,7 @@ static void send_open_channel(VMBus *vmbus)
}
}
- assert(false);
+ g_assert_not_reached();
}
static bool complete_open_channel(VMBus *vmbus)
@@ -2020,8 +2019,7 @@ static bool complete_open_channel(VMBus *vmbus)
}
}
- assert(false);
- return false;
+ g_assert_not_reached();
}
static void vdev_reset_on_close(VMBusDevice *vdev)
@@ -2076,7 +2074,6 @@ static void send_unload(VMBus *vmbus)
qemu_mutex_unlock(&vmbus->rx_queue_lock);
post_msg(vmbus, &msg, sizeof(msg));
- return;
}
static bool complete_unload(VMBus *vmbus)
@@ -2349,20 +2346,19 @@ static void vmbus_dev_unrealize(DeviceState *dev)
free_channels(vdev);
}
-static Property vmbus_dev_props[] = {
+static const Property vmbus_dev_props[] = {
DEFINE_PROP_UUID("instanceid", VMBusDevice, instanceid),
- DEFINE_PROP_END_OF_LIST()
};
-static void vmbus_dev_class_init(ObjectClass *klass, void *data)
+static void vmbus_dev_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *kdev = DEVICE_CLASS(klass);
device_class_set_props(kdev, vmbus_dev_props);
kdev->bus_type = TYPE_VMBUS;
kdev->realize = vmbus_dev_realize;
kdev->unrealize = vmbus_dev_unrealize;
- kdev->reset = vmbus_dev_reset;
+ device_class_set_legacy_reset(kdev, vmbus_dev_reset);
}
static void vmbus_dev_instance_init(Object *obj)
@@ -2473,7 +2469,7 @@ static char *vmbus_get_fw_dev_path(DeviceState *dev)
return g_strdup_printf("%s@%s", qdev_fw_name(dev), uuid);
}
-static void vmbus_class_init(ObjectClass *klass, void *data)
+static void vmbus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -2656,12 +2652,11 @@ static const VMStateDescription vmstate_vmbus_bridge = {
},
};
-static Property vmbus_bridge_props[] = {
+static const Property vmbus_bridge_props[] = {
DEFINE_PROP_UINT8("irq", VMBusBridge, irq, 7),
- DEFINE_PROP_END_OF_LIST()
};
-static void vmbus_bridge_class_init(ObjectClass *klass, void *data)
+static void vmbus_bridge_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
SysBusDeviceClass *sk = SYS_BUS_DEVICE_CLASS(klass);
diff --git a/hw/i2c/allwinner-i2c.c b/hw/i2c/allwinner-i2c.c
index 16f1d6d..fe887e1 100644
--- a/hw/i2c/allwinner-i2c.c
+++ b/hw/i2c/allwinner-i2c.c
@@ -407,7 +407,7 @@ static const MemoryRegionOps allwinner_i2c_ops = {
.write = allwinner_i2c_write,
.valid.min_access_size = 1,
.valid.max_access_size = 4,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
};
static const VMStateDescription allwinner_i2c_vmstate = {
@@ -438,7 +438,7 @@ static void allwinner_i2c_realize(DeviceState *dev, Error **errp)
s->bus = i2c_init_bus(dev, "i2c");
}
-static void allwinner_i2c_class_init(ObjectClass *klass, void *data)
+static void allwinner_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/i2c/aspeed_i2c.c b/hw/i2c/aspeed_i2c.c
index b43afd2..83fb906 100644
--- a/hw/i2c/aspeed_i2c.c
+++ b/hw/i2c/aspeed_i2c.c
@@ -114,7 +114,10 @@ static uint64_t aspeed_i2c_bus_old_read(AspeedI2CBus *bus, hwaddr offset,
if (!aic->has_dma) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA support\n", __func__);
value = -1;
+ break;
}
+
+ value = extract64(bus->dma_dram_offset, 0, 32);
break;
case A_I2CD_DMA_LEN:
if (!aic->has_dma) {
@@ -137,6 +140,7 @@ static uint64_t aspeed_i2c_bus_old_read(AspeedI2CBus *bus, hwaddr offset,
static uint64_t aspeed_i2c_bus_new_read(AspeedI2CBus *bus, hwaddr offset,
unsigned size)
{
+ AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(bus->controller);
uint64_t value = bus->regs[offset / sizeof(*bus->regs)];
switch (offset) {
@@ -150,9 +154,7 @@ static uint64_t aspeed_i2c_bus_new_read(AspeedI2CBus *bus, hwaddr offset,
case A_I2CM_DMA_TX_ADDR:
case A_I2CM_DMA_RX_ADDR:
case A_I2CM_DMA_LEN_STS:
- case A_I2CC_DMA_ADDR:
case A_I2CC_DMA_LEN:
-
case A_I2CS_DEV_ADDR:
case A_I2CS_DMA_RX_ADDR:
case A_I2CS_DMA_LEN:
@@ -161,11 +163,24 @@ static uint64_t aspeed_i2c_bus_new_read(AspeedI2CBus *bus, hwaddr offset,
case A_I2CS_DMA_LEN_STS:
/* Value is already set, don't do anything. */
break;
+ case A_I2CC_DMA_ADDR:
+ value = extract64(bus->dma_dram_offset, 0, 32);
+ break;
case A_I2CS_INTR_STS:
break;
case A_I2CM_CMD:
value = SHARED_FIELD_DP32(value, BUS_BUSY_STS, i2c_bus_busy(bus->bus));
break;
+ case A_I2CM_DMA_TX_ADDR_HI:
+ case A_I2CM_DMA_RX_ADDR_HI:
+ case A_I2CS_DMA_TX_ADDR_HI:
+ case A_I2CS_DMA_RX_ADDR_HI:
+ if (!aic->has_dma64) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA 64 bits support\n",
+ __func__);
+ value = -1;
+ }
+ break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset);
@@ -210,18 +225,18 @@ static int aspeed_i2c_dma_read(AspeedI2CBus *bus, uint8_t *data)
{
MemTxResult result;
AspeedI2CState *s = bus->controller;
- uint32_t reg_dma_addr = aspeed_i2c_bus_dma_addr_offset(bus);
uint32_t reg_dma_len = aspeed_i2c_bus_dma_len_offset(bus);
- result = address_space_read(&s->dram_as, bus->regs[reg_dma_addr],
+ result = address_space_read(&s->dram_as, bus->dma_dram_offset,
MEMTXATTRS_UNSPECIFIED, data, 1);
if (result != MEMTX_OK) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM read failed @%08x\n",
- __func__, bus->regs[reg_dma_addr]);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: DRAM read failed @%" PRIx64 "\n",
+ __func__, bus->dma_dram_offset);
return -1;
}
- bus->regs[reg_dma_addr]++;
+ bus->dma_dram_offset++;
bus->regs[reg_dma_len]--;
return 0;
}
@@ -291,7 +306,6 @@ static void aspeed_i2c_bus_recv(AspeedI2CBus *bus)
uint32_t reg_pool_ctrl = aspeed_i2c_bus_pool_ctrl_offset(bus);
uint32_t reg_byte_buf = aspeed_i2c_bus_byte_buf_offset(bus);
uint32_t reg_dma_len = aspeed_i2c_bus_dma_len_offset(bus);
- uint32_t reg_dma_addr = aspeed_i2c_bus_dma_addr_offset(bus);
int pool_rx_count = SHARED_ARRAY_FIELD_EX32(bus->regs, reg_pool_ctrl,
RX_SIZE) + 1;
@@ -323,14 +337,17 @@ static void aspeed_i2c_bus_recv(AspeedI2CBus *bus)
data = i2c_recv(bus->bus);
trace_aspeed_i2c_bus_recv("DMA", bus->regs[reg_dma_len],
bus->regs[reg_dma_len], data);
- result = address_space_write(&s->dram_as, bus->regs[reg_dma_addr],
+
+ result = address_space_write(&s->dram_as, bus->dma_dram_offset,
MEMTXATTRS_UNSPECIFIED, &data, 1);
if (result != MEMTX_OK) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM write failed @%08x\n",
- __func__, bus->regs[reg_dma_addr]);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: DRAM write failed @%" PRIx64 "\n",
+ __func__, bus->dma_dram_offset);
return;
}
- bus->regs[reg_dma_addr]++;
+
+ bus->dma_dram_offset++;
bus->regs[reg_dma_len]--;
/* In new mode, keep track of how many bytes we RXed */
if (aspeed_i2c_is_new_mode(bus->controller)) {
@@ -636,14 +653,18 @@ static void aspeed_i2c_bus_new_write(AspeedI2CBus *bus, hwaddr offset,
case A_I2CM_DMA_TX_ADDR:
bus->regs[R_I2CM_DMA_TX_ADDR] = FIELD_EX32(value, I2CM_DMA_TX_ADDR,
ADDR);
- bus->regs[R_I2CC_DMA_ADDR] = FIELD_EX32(value, I2CM_DMA_TX_ADDR, ADDR);
+ bus->dma_dram_offset =
+ deposit64(bus->dma_dram_offset, 0, 32,
+ FIELD_EX32(value, I2CM_DMA_TX_ADDR, ADDR));
bus->regs[R_I2CC_DMA_LEN] = ARRAY_FIELD_EX32(bus->regs, I2CM_DMA_LEN,
TX_BUF_LEN) + 1;
break;
case A_I2CM_DMA_RX_ADDR:
bus->regs[R_I2CM_DMA_RX_ADDR] = FIELD_EX32(value, I2CM_DMA_RX_ADDR,
ADDR);
- bus->regs[R_I2CC_DMA_ADDR] = FIELD_EX32(value, I2CM_DMA_RX_ADDR, ADDR);
+ bus->dma_dram_offset =
+ deposit64(bus->dma_dram_offset, 0, 32,
+ FIELD_EX32(value, I2CM_DMA_RX_ADDR, ADDR));
bus->regs[R_I2CC_DMA_LEN] = ARRAY_FIELD_EX32(bus->regs, I2CM_DMA_LEN,
RX_BUF_LEN) + 1;
break;
@@ -721,6 +742,56 @@ static void aspeed_i2c_bus_new_write(AspeedI2CBus *bus, hwaddr offset,
qemu_log_mask(LOG_UNIMP, "%s: Slave mode DMA TX is not implemented\n",
__func__);
break;
+
+ /*
+ * The AST2700 support the maximum DRAM size is 8 GB.
+ * The DRAM offset range is from 0x0_0000_0000 to
+ * 0x1_FFFF_FFFF and it is enough to use bits [33:0]
+ * saving the dram offset.
+ * Therefore, save the high part physical address bit[1:0]
+ * of Tx/Rx buffer address as dma_dram_offset bit[33:32].
+ */
+ case A_I2CM_DMA_TX_ADDR_HI:
+ if (!aic->has_dma64) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA 64 bits support\n",
+ __func__);
+ break;
+ }
+ bus->regs[R_I2CM_DMA_TX_ADDR_HI] = FIELD_EX32(value,
+ I2CM_DMA_TX_ADDR_HI,
+ ADDR_HI);
+ bus->dma_dram_offset = deposit64(bus->dma_dram_offset, 32, 32,
+ extract32(value, 0, 2));
+ break;
+ case A_I2CM_DMA_RX_ADDR_HI:
+ if (!aic->has_dma64) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA 64 bits support\n",
+ __func__);
+ break;
+ }
+ bus->regs[R_I2CM_DMA_RX_ADDR_HI] = FIELD_EX32(value,
+ I2CM_DMA_RX_ADDR_HI,
+ ADDR_HI);
+ bus->dma_dram_offset = deposit64(bus->dma_dram_offset, 32, 32,
+ extract32(value, 0, 2));
+ break;
+ case A_I2CS_DMA_TX_ADDR_HI:
+ qemu_log_mask(LOG_UNIMP,
+ "%s: Slave mode DMA TX Addr high is not implemented\n",
+ __func__);
+ break;
+ case A_I2CS_DMA_RX_ADDR_HI:
+ if (!aic->has_dma64) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA 64 bits support\n",
+ __func__);
+ break;
+ }
+ bus->regs[R_I2CS_DMA_RX_ADDR_HI] = FIELD_EX32(value,
+ I2CS_DMA_RX_ADDR_HI,
+ ADDR_HI);
+ bus->dma_dram_offset = deposit64(bus->dma_dram_offset, 32, 32,
+ extract32(value, 0, 2));
+ break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n",
__func__, offset);
@@ -811,7 +882,8 @@ static void aspeed_i2c_bus_old_write(AspeedI2CBus *bus, hwaddr offset,
break;
}
- bus->regs[R_I2CD_DMA_ADDR] = value & 0x3ffffffc;
+ bus->dma_dram_offset = deposit64(bus->dma_dram_offset, 0, 32,
+ value & 0x3ffffffc);
break;
case A_I2CD_DMA_LEN:
@@ -906,7 +978,7 @@ static const MemoryRegionOps aspeed_i2c_ctrl_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
-static uint64_t aspeed_i2c_pool_read(void *opaque, hwaddr offset,
+static uint64_t aspeed_i2c_share_pool_read(void *opaque, hwaddr offset,
unsigned size)
{
AspeedI2CState *s = opaque;
@@ -914,26 +986,61 @@ static uint64_t aspeed_i2c_pool_read(void *opaque, hwaddr offset,
int i;
for (i = 0; i < size; i++) {
- ret |= (uint64_t) s->pool[offset + i] << (8 * i);
+ ret |= (uint64_t) s->share_pool[offset + i] << (8 * i);
}
return ret;
}
-static void aspeed_i2c_pool_write(void *opaque, hwaddr offset,
+static void aspeed_i2c_share_pool_write(void *opaque, hwaddr offset,
uint64_t value, unsigned size)
{
AspeedI2CState *s = opaque;
int i;
for (i = 0; i < size; i++) {
+ s->share_pool[offset + i] = (value >> (8 * i)) & 0xFF;
+ }
+}
+
+static const MemoryRegionOps aspeed_i2c_share_pool_ops = {
+ .read = aspeed_i2c_share_pool_read,
+ .write = aspeed_i2c_share_pool_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+};
+
+static uint64_t aspeed_i2c_bus_pool_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ AspeedI2CBus *s = opaque;
+ uint64_t ret = 0;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ ret |= (uint64_t) s->pool[offset + i] << (8 * i);
+ }
+
+ return ret;
+}
+
+static void aspeed_i2c_bus_pool_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ AspeedI2CBus *s = opaque;
+ int i;
+
+ for (i = 0; i < size; i++) {
s->pool[offset + i] = (value >> (8 * i)) & 0xFF;
}
}
-static const MemoryRegionOps aspeed_i2c_pool_ops = {
- .read = aspeed_i2c_pool_read,
- .write = aspeed_i2c_pool_write,
+static const MemoryRegionOps aspeed_i2c_bus_pool_ops = {
+ .read = aspeed_i2c_bus_pool_read,
+ .write = aspeed_i2c_bus_pool_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 1,
@@ -943,24 +1050,27 @@ static const MemoryRegionOps aspeed_i2c_pool_ops = {
static const VMStateDescription aspeed_i2c_bus_vmstate = {
.name = TYPE_ASPEED_I2C,
- .version_id = 5,
- .minimum_version_id = 5,
+ .version_id = 6,
+ .minimum_version_id = 6,
.fields = (const VMStateField[]) {
VMSTATE_UINT32_ARRAY(regs, AspeedI2CBus, ASPEED_I2C_NEW_NUM_REG),
+ VMSTATE_UINT8_ARRAY(pool, AspeedI2CBus, ASPEED_I2C_BUS_POOL_SIZE),
+ VMSTATE_UINT64(dma_dram_offset, AspeedI2CBus),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription aspeed_i2c_vmstate = {
.name = TYPE_ASPEED_I2C,
- .version_id = 2,
- .minimum_version_id = 2,
+ .version_id = 3,
+ .minimum_version_id = 3,
.fields = (const VMStateField[]) {
VMSTATE_UINT32(intr_status, AspeedI2CState),
VMSTATE_STRUCT_ARRAY(busses, AspeedI2CState,
ASPEED_I2C_NR_BUSSES, 1, aspeed_i2c_bus_vmstate,
AspeedI2CBus),
- VMSTATE_UINT8_ARRAY(pool, AspeedI2CState, ASPEED_I2C_MAX_POOL_SIZE),
+ VMSTATE_UINT8_ARRAY(share_pool, AspeedI2CState,
+ ASPEED_I2C_SHARE_POOL_SIZE),
VMSTATE_END_OF_LIST()
}
};
@@ -995,7 +1105,21 @@ static void aspeed_i2c_instance_init(Object *obj)
* 0x140 ... 0x17F: Device 5
* 0x180 ... 0x1BF: Device 6
* 0x1C0 ... 0x1FF: Device 7
- * 0x200 ... 0x2FF: Buffer Pool (unused in linux driver)
+ * 0x200 ... 0x20F: Device 1 buffer (AST2500 unused in linux driver)
+ * 0x210 ... 0x21F: Device 2 buffer
+ * 0x220 ... 0x22F: Device 3 buffer
+ * 0x230 ... 0x23F: Device 4 buffer
+ * 0x240 ... 0x24F: Device 5 buffer
+ * 0x250 ... 0x25F: Device 6 buffer
+ * 0x260 ... 0x26F: Device 7 buffer
+ * 0x270 ... 0x27F: Device 8 buffer
+ * 0x280 ... 0x28F: Device 9 buffer
+ * 0x290 ... 0x29F: Device 10 buffer
+ * 0x2A0 ... 0x2AF: Device 11 buffer
+ * 0x2B0 ... 0x2BF: Device 12 buffer
+ * 0x2C0 ... 0x2CF: Device 13 buffer
+ * 0x2D0 ... 0x2DF: Device 14 buffer
+ * 0x2E0 ... 0x2FF: Reserved
* 0x300 ... 0x33F: Device 8
* 0x340 ... 0x37F: Device 9
* 0x380 ... 0x3BF: Device 10
@@ -1003,7 +1127,77 @@ static void aspeed_i2c_instance_init(Object *obj)
* 0x400 ... 0x43F: Device 12
* 0x440 ... 0x47F: Device 13
* 0x480 ... 0x4BF: Device 14
- * 0x800 ... 0xFFF: Buffer Pool (unused in linux driver)
+ * 0x800 ... 0xFFF: Buffer Pool (AST2400 unused in linux driver)
+ *
+ * Address Definitions (AST2600 and AST1030)
+ * 0x000 ... 0x07F: Global Register
+ * 0x080 ... 0x0FF: Device 1
+ * 0x100 ... 0x17F: Device 2
+ * 0x180 ... 0x1FF: Device 3
+ * 0x200 ... 0x27F: Device 4
+ * 0x280 ... 0x2FF: Device 5
+ * 0x300 ... 0x37F: Device 6
+ * 0x380 ... 0x3FF: Device 7
+ * 0x400 ... 0x47F: Device 8
+ * 0x480 ... 0x4FF: Device 9
+ * 0x500 ... 0x57F: Device 10
+ * 0x580 ... 0x5FF: Device 11
+ * 0x600 ... 0x67F: Device 12
+ * 0x680 ... 0x6FF: Device 13
+ * 0x700 ... 0x77F: Device 14
+ * 0x780 ... 0x7FF: Device 15 (15 and 16 unused in AST1030)
+ * 0x800 ... 0x87F: Device 16
+ * 0xC00 ... 0xC1F: Device 1 buffer
+ * 0xC20 ... 0xC3F: Device 2 buffer
+ * 0xC40 ... 0xC5F: Device 3 buffer
+ * 0xC60 ... 0xC7F: Device 4 buffer
+ * 0xC80 ... 0xC9F: Device 5 buffer
+ * 0xCA0 ... 0xCBF: Device 6 buffer
+ * 0xCC0 ... 0xCDF: Device 7 buffer
+ * 0xCE0 ... 0xCFF: Device 8 buffer
+ * 0xD00 ... 0xD1F: Device 9 buffer
+ * 0xD20 ... 0xD3F: Device 10 buffer
+ * 0xD40 ... 0xD5F: Device 11 buffer
+ * 0xD60 ... 0xD7F: Device 12 buffer
+ * 0xD80 ... 0xD9F: Device 13 buffer
+ * 0xDA0 ... 0xDBF: Device 14 buffer
+ * 0xDC0 ... 0xDDF: Device 15 buffer (15 and 16 unused in AST1030)
+ * 0xDE0 ... 0xDFF: Device 16 buffer
+ *
+ * Address Definitions (AST2700)
+ * 0x000 ... 0x0FF: Global Register
+ * 0x100 ... 0x17F: Device 0
+ * 0x1A0 ... 0x1BF: Device 0 buffer
+ * 0x200 ... 0x27F: Device 1
+ * 0x2A0 ... 0x2BF: Device 1 buffer
+ * 0x300 ... 0x37F: Device 2
+ * 0x3A0 ... 0x3BF: Device 2 buffer
+ * 0x400 ... 0x47F: Device 3
+ * 0x4A0 ... 0x4BF: Device 3 buffer
+ * 0x500 ... 0x57F: Device 4
+ * 0x5A0 ... 0x5BF: Device 4 buffer
+ * 0x600 ... 0x67F: Device 5
+ * 0x6A0 ... 0x6BF: Device 5 buffer
+ * 0x700 ... 0x77F: Device 6
+ * 0x7A0 ... 0x7BF: Device 6 buffer
+ * 0x800 ... 0x87F: Device 7
+ * 0x8A0 ... 0x8BF: Device 7 buffer
+ * 0x900 ... 0x97F: Device 8
+ * 0x9A0 ... 0x9BF: Device 8 buffer
+ * 0xA00 ... 0xA7F: Device 9
+ * 0xAA0 ... 0xABF: Device 9 buffer
+ * 0xB00 ... 0xB7F: Device 10
+ * 0xBA0 ... 0xBBF: Device 10 buffer
+ * 0xC00 ... 0xC7F: Device 11
+ * 0xCA0 ... 0xCBF: Device 11 buffer
+ * 0xD00 ... 0xD7F: Device 12
+ * 0xDA0 ... 0xDBF: Device 12 buffer
+ * 0xE00 ... 0xE7F: Device 13
+ * 0xEA0 ... 0xEBF: Device 13 buffer
+ * 0xF00 ... 0xF7F: Device 14
+ * 0xFA0 ... 0xFBF: Device 14 buffer
+ * 0x1000 ... 0x107F: Device 15
+ * 0x10A0 ... 0x10BF: Device 15 buffer
*/
static void aspeed_i2c_realize(DeviceState *dev, Error **errp)
{
@@ -1011,10 +1205,12 @@ static void aspeed_i2c_realize(DeviceState *dev, Error **errp)
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
AspeedI2CState *s = ASPEED_I2C(dev);
AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(s);
+ uint32_t reg_offset = aic->reg_size + aic->reg_gap_size;
+ uint32_t pool_offset = aic->pool_size + aic->pool_gap_size;
sysbus_init_irq(sbd, &s->irq);
memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_i2c_ctrl_ops, s,
- "aspeed.i2c", 0x1000);
+ "aspeed.i2c", aic->mem_size);
sysbus_init_mmio(sbd, &s->iomem);
for (i = 0; i < aic->num_busses; i++) {
@@ -1033,13 +1229,23 @@ static void aspeed_i2c_realize(DeviceState *dev, Error **errp)
return;
}
- memory_region_add_subregion(&s->iomem, aic->reg_size * (i + offset),
+ memory_region_add_subregion(&s->iomem, reg_offset * (i + offset),
&s->busses[i].mr);
}
- memory_region_init_io(&s->pool_iomem, OBJECT(s), &aspeed_i2c_pool_ops, s,
- "aspeed.i2c-pool", aic->pool_size);
- memory_region_add_subregion(&s->iomem, aic->pool_base, &s->pool_iomem);
+ if (aic->has_share_pool) {
+ memory_region_init_io(&s->pool_iomem, OBJECT(s),
+ &aspeed_i2c_share_pool_ops, s,
+ "aspeed.i2c-share-pool", aic->pool_size);
+ memory_region_add_subregion(&s->iomem, aic->pool_base,
+ &s->pool_iomem);
+ } else {
+ for (i = 0; i < aic->num_busses; i++) {
+ memory_region_add_subregion(&s->iomem,
+ aic->pool_base + (pool_offset * i),
+ &s->busses[i].mr_pool);
+ }
+ }
if (aic->has_dma) {
if (!s->dram_mr) {
@@ -1052,18 +1258,17 @@ static void aspeed_i2c_realize(DeviceState *dev, Error **errp)
}
}
-static Property aspeed_i2c_properties[] = {
+static const Property aspeed_i2c_properties[] = {
DEFINE_PROP_LINK("dram", AspeedI2CState, dram_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void aspeed_i2c_class_init(ObjectClass *klass, void *data)
+static void aspeed_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &aspeed_i2c_vmstate;
- dc->reset = aspeed_i2c_reset;
+ device_class_set_legacy_reset(dc, aspeed_i2c_reset);
device_class_set_props(dc, aspeed_i2c_properties);
dc->realize = aspeed_i2c_realize;
dc->desc = "Aspeed I2C Controller";
@@ -1090,8 +1295,9 @@ static int aspeed_i2c_bus_new_slave_event(AspeedI2CBus *bus,
return -1;
}
ARRAY_FIELD_DP32(bus->regs, I2CS_DMA_LEN_STS, RX_LEN, 0);
- bus->regs[R_I2CC_DMA_ADDR] =
- ARRAY_FIELD_EX32(bus->regs, I2CS_DMA_RX_ADDR, ADDR);
+ bus->dma_dram_offset =
+ deposit64(bus->dma_dram_offset, 0, 32,
+ ARRAY_FIELD_EX32(bus->regs, I2CS_DMA_RX_ADDR, ADDR));
bus->regs[R_I2CC_DMA_LEN] =
ARRAY_FIELD_EX32(bus->regs, I2CS_DMA_LEN, RX_BUF_LEN) + 1;
i2c_ack(bus->bus);
@@ -1157,10 +1363,10 @@ static int aspeed_i2c_bus_slave_event(I2CSlave *slave, enum i2c_event event)
static void aspeed_i2c_bus_new_slave_send_async(AspeedI2CBus *bus, uint8_t data)
{
assert(address_space_write(&bus->controller->dram_as,
- bus->regs[R_I2CC_DMA_ADDR],
+ bus->dma_dram_offset,
MEMTXATTRS_UNSPECIFIED, &data, 1) == MEMTX_OK);
- bus->regs[R_I2CC_DMA_ADDR]++;
+ bus->dma_dram_offset++;
bus->regs[R_I2CC_DMA_LEN]--;
ARRAY_FIELD_DP32(bus->regs, I2CS_DMA_LEN_STS, RX_LEN,
ARRAY_FIELD_EX32(bus->regs, I2CS_DMA_LEN_STS, RX_LEN) + 1);
@@ -1184,7 +1390,8 @@ static void aspeed_i2c_bus_slave_send_async(I2CSlave *slave, uint8_t data)
aspeed_i2c_bus_raise_interrupt(bus);
}
-static void aspeed_i2c_bus_slave_class_init(ObjectClass *klass, void *data)
+static void aspeed_i2c_bus_slave_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass);
@@ -1215,6 +1422,7 @@ static void aspeed_i2c_bus_realize(DeviceState *dev, Error **errp)
AspeedI2CBus *s = ASPEED_I2C_BUS(dev);
AspeedI2CClass *aic;
g_autofree char *name = g_strdup_printf(TYPE_ASPEED_I2C_BUS ".%d", s->id);
+ g_autofree char *pool_name = g_strdup_printf("%s.pool", name);
if (!s->controller) {
error_setg(errp, TYPE_ASPEED_I2C_BUS ": 'controller' link not set");
@@ -1232,22 +1440,25 @@ static void aspeed_i2c_bus_realize(DeviceState *dev, Error **errp)
memory_region_init_io(&s->mr, OBJECT(s), &aspeed_i2c_bus_ops,
s, name, aic->reg_size);
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mr);
+
+ memory_region_init_io(&s->mr_pool, OBJECT(s), &aspeed_i2c_bus_pool_ops,
+ s, pool_name, aic->pool_size);
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mr_pool);
}
-static Property aspeed_i2c_bus_properties[] = {
+static const Property aspeed_i2c_bus_properties[] = {
DEFINE_PROP_UINT8("bus-id", AspeedI2CBus, id, 0),
DEFINE_PROP_LINK("controller", AspeedI2CBus, controller, TYPE_ASPEED_I2C,
AspeedI2CState *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void aspeed_i2c_bus_class_init(ObjectClass *klass, void *data)
+static void aspeed_i2c_bus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "Aspeed I2C Bus";
dc->realize = aspeed_i2c_bus_realize;
- dc->reset = aspeed_i2c_bus_reset;
+ device_class_set_legacy_reset(dc, aspeed_i2c_bus_reset);
device_class_set_props(dc, aspeed_i2c_bus_properties);
}
@@ -1266,13 +1477,14 @@ static qemu_irq aspeed_2400_i2c_bus_get_irq(AspeedI2CBus *bus)
static uint8_t *aspeed_2400_i2c_bus_pool_base(AspeedI2CBus *bus)
{
uint8_t *pool_page =
- &bus->controller->pool[ARRAY_FIELD_EX32(bus->regs, I2CD_FUN_CTRL,
- POOL_PAGE_SEL) * 0x100];
+ &bus->controller->share_pool[ARRAY_FIELD_EX32(bus->regs,
+ I2CD_FUN_CTRL,
+ POOL_PAGE_SEL) * 0x100];
return &pool_page[ARRAY_FIELD_EX32(bus->regs, I2CD_POOL_CTRL, OFFSET)];
}
-static void aspeed_2400_i2c_class_init(ObjectClass *klass, void *data)
+static void aspeed_2400_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedI2CClass *aic = ASPEED_I2C_CLASS(klass);
@@ -1283,9 +1495,11 @@ static void aspeed_2400_i2c_class_init(ObjectClass *klass, void *data)
aic->reg_size = 0x40;
aic->gap = 7;
aic->bus_get_irq = aspeed_2400_i2c_bus_get_irq;
+ aic->has_share_pool = true;
aic->pool_size = 0x800;
aic->pool_base = 0x800;
aic->bus_pool_base = aspeed_2400_i2c_bus_pool_base;
+ aic->mem_size = 0x1000;
}
static const TypeInfo aspeed_2400_i2c_info = {
@@ -1301,10 +1515,10 @@ static qemu_irq aspeed_2500_i2c_bus_get_irq(AspeedI2CBus *bus)
static uint8_t *aspeed_2500_i2c_bus_pool_base(AspeedI2CBus *bus)
{
- return &bus->controller->pool[bus->id * 0x10];
+ return bus->pool;
}
-static void aspeed_2500_i2c_class_init(ObjectClass *klass, void *data)
+static void aspeed_2500_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedI2CClass *aic = ASPEED_I2C_CLASS(klass);
@@ -1315,11 +1529,12 @@ static void aspeed_2500_i2c_class_init(ObjectClass *klass, void *data)
aic->reg_size = 0x40;
aic->gap = 7;
aic->bus_get_irq = aspeed_2500_i2c_bus_get_irq;
- aic->pool_size = 0x100;
+ aic->pool_size = 0x10;
aic->pool_base = 0x200;
aic->bus_pool_base = aspeed_2500_i2c_bus_pool_base;
aic->check_sram = true;
aic->has_dma = true;
+ aic->mem_size = 0x1000;
}
static const TypeInfo aspeed_2500_i2c_info = {
@@ -1333,12 +1548,7 @@ static qemu_irq aspeed_2600_i2c_bus_get_irq(AspeedI2CBus *bus)
return bus->irq;
}
-static uint8_t *aspeed_2600_i2c_bus_pool_base(AspeedI2CBus *bus)
-{
- return &bus->controller->pool[bus->id * 0x20];
-}
-
-static void aspeed_2600_i2c_class_init(ObjectClass *klass, void *data)
+static void aspeed_2600_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedI2CClass *aic = ASPEED_I2C_CLASS(klass);
@@ -1349,10 +1559,11 @@ static void aspeed_2600_i2c_class_init(ObjectClass *klass, void *data)
aic->reg_size = 0x80;
aic->gap = -1; /* no gap */
aic->bus_get_irq = aspeed_2600_i2c_bus_get_irq;
- aic->pool_size = 0x200;
+ aic->pool_size = 0x20;
aic->pool_base = 0xC00;
- aic->bus_pool_base = aspeed_2600_i2c_bus_pool_base;
+ aic->bus_pool_base = aspeed_2500_i2c_bus_pool_base;
aic->has_dma = true;
+ aic->mem_size = 0x1000;
}
static const TypeInfo aspeed_2600_i2c_info = {
@@ -1361,7 +1572,7 @@ static const TypeInfo aspeed_2600_i2c_info = {
.class_init = aspeed_2600_i2c_class_init,
};
-static void aspeed_1030_i2c_class_init(ObjectClass *klass, void *data)
+static void aspeed_1030_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedI2CClass *aic = ASPEED_I2C_CLASS(klass);
@@ -1372,10 +1583,11 @@ static void aspeed_1030_i2c_class_init(ObjectClass *klass, void *data)
aic->reg_size = 0x80;
aic->gap = -1; /* no gap */
aic->bus_get_irq = aspeed_2600_i2c_bus_get_irq;
- aic->pool_size = 0x200;
+ aic->pool_size = 0x20;
aic->pool_base = 0xC00;
- aic->bus_pool_base = aspeed_2600_i2c_bus_pool_base;
+ aic->bus_pool_base = aspeed_2500_i2c_bus_pool_base;
aic->has_dma = true;
+ aic->mem_size = 0x10000;
}
static const TypeInfo aspeed_1030_i2c_info = {
@@ -1384,6 +1596,33 @@ static const TypeInfo aspeed_1030_i2c_info = {
.class_init = aspeed_1030_i2c_class_init,
};
+static void aspeed_2700_i2c_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedI2CClass *aic = ASPEED_I2C_CLASS(klass);
+
+ dc->desc = "ASPEED 2700 I2C Controller";
+
+ aic->num_busses = 16;
+ aic->reg_size = 0x80;
+ aic->reg_gap_size = 0x80;
+ aic->gap = -1; /* no gap */
+ aic->bus_get_irq = aspeed_2600_i2c_bus_get_irq;
+ aic->pool_size = 0x20;
+ aic->pool_gap_size = 0xe0;
+ aic->pool_base = 0x1a0;
+ aic->bus_pool_base = aspeed_2500_i2c_bus_pool_base;
+ aic->has_dma = true;
+ aic->mem_size = 0x2000;
+ aic->has_dma64 = true;
+}
+
+static const TypeInfo aspeed_2700_i2c_info = {
+ .name = TYPE_ASPEED_2700_I2C,
+ .parent = TYPE_ASPEED_I2C,
+ .class_init = aspeed_2700_i2c_class_init,
+};
+
static void aspeed_i2c_register_types(void)
{
type_register_static(&aspeed_i2c_bus_info);
@@ -1393,6 +1632,7 @@ static void aspeed_i2c_register_types(void)
type_register_static(&aspeed_2500_i2c_info);
type_register_static(&aspeed_2600_i2c_info);
type_register_static(&aspeed_1030_i2c_info);
+ type_register_static(&aspeed_2700_i2c_info);
}
type_init(aspeed_i2c_register_types)
diff --git a/hw/i2c/bcm2835_i2c.c b/hw/i2c/bcm2835_i2c.c
index 20ec46e..be11cca 100644
--- a/hw/i2c/bcm2835_i2c.c
+++ b/hw/i2c/bcm2835_i2c.c
@@ -258,11 +258,11 @@ static const VMStateDescription vmstate_bcm2835_i2c = {
}
};
-static void bcm2835_i2c_class_init(ObjectClass *klass, void *data)
+static void bcm2835_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = bcm2835_i2c_reset;
+ device_class_set_legacy_reset(dc, bcm2835_i2c_reset);
dc->realize = bcm2835_i2c_realize;
dc->vmsd = &vmstate_bcm2835_i2c;
}
diff --git a/hw/i2c/bitbang_i2c.c b/hw/i2c/bitbang_i2c.c
index de5f5aa..e020f31 100644
--- a/hw/i2c/bitbang_i2c.c
+++ b/hw/i2c/bitbang_i2c.c
@@ -222,7 +222,7 @@ static void gpio_i2c_init(Object *obj)
qdev_init_gpio_out(dev, &s->out, 1);
}
-static void gpio_i2c_class_init(ObjectClass *klass, void *data)
+static void gpio_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/i2c/core.c b/hw/i2c/core.c
index 4cf30b2..4b6345b 100644
--- a/hw/i2c/core.c
+++ b/hw/i2c/core.c
@@ -18,9 +18,8 @@
#define I2C_BROADCAST 0x00
-static Property i2c_props[] = {
+static const Property i2c_props[] = {
DEFINE_PROP_UINT8("address", struct I2CSlave, address, 0),
- DEFINE_PROP_END_OF_LIST(),
};
static const TypeInfo i2c_bus_info = {
@@ -402,7 +401,7 @@ static bool i2c_slave_match(I2CSlave *candidate, uint8_t address,
return false;
}
-static void i2c_slave_class_init(ObjectClass *klass, void *data)
+static void i2c_slave_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass);
diff --git a/hw/i2c/exynos4210_i2c.c b/hw/i2c/exynos4210_i2c.c
index 9445424..9d0c1cd 100644
--- a/hw/i2c/exynos4210_i2c.c
+++ b/hw/i2c/exynos4210_i2c.c
@@ -309,12 +309,12 @@ static void exynos4210_i2c_init(Object *obj)
s->bus = i2c_init_bus(dev, "i2c");
}
-static void exynos4210_i2c_class_init(ObjectClass *klass, void *data)
+static void exynos4210_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &exynos4210_i2c_vmstate;
- dc->reset = exynos4210_i2c_reset;
+ device_class_set_legacy_reset(dc, exynos4210_i2c_reset);
}
static const TypeInfo exynos4210_i2c_type_info = {
diff --git a/hw/i2c/i2c_mux_pca954x.c b/hw/i2c/i2c_mux_pca954x.c
index db5db95..a8ef640 100644
--- a/hw/i2c/i2c_mux_pca954x.c
+++ b/hw/i2c/i2c_mux_pca954x.c
@@ -172,13 +172,13 @@ I2CBus *pca954x_i2c_get_bus(I2CSlave *mux, uint8_t channel)
return pca954x->bus[channel];
}
-static void pca9546_class_init(ObjectClass *klass, void *data)
+static void pca9546_class_init(ObjectClass *klass, const void *data)
{
Pca954xClass *s = PCA954X_CLASS(klass);
s->nchans = PCA9546_CHANNEL_COUNT;
}
-static void pca9548_class_init(ObjectClass *klass, void *data)
+static void pca9548_class_init(ObjectClass *klass, const void *data)
{
Pca954xClass *s = PCA954X_CLASS(klass);
s->nchans = PCA9548_CHANNEL_COUNT;
@@ -211,12 +211,11 @@ static void pca954x_init(Object *obj)
}
}
-static Property pca954x_props[] = {
+static const Property pca954x_props[] = {
DEFINE_PROP_STRING("name", Pca954xState, name),
- DEFINE_PROP_END_OF_LIST()
};
-static void pca954x_class_init(ObjectClass *klass, void *data)
+static void pca954x_class_init(ObjectClass *klass, const void *data)
{
I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/i2c/imx_i2c.c b/hw/i2c/imx_i2c.c
index a25676f..d26177c 100644
--- a/hw/i2c/imx_i2c.c
+++ b/hw/i2c/imx_i2c.c
@@ -25,18 +25,7 @@
#include "hw/i2c/i2c.h"
#include "qemu/log.h"
#include "qemu/module.h"
-
-#ifndef DEBUG_IMX_I2C
-#define DEBUG_IMX_I2C 0
-#endif
-
-#define DPRINTF(fmt, args...) \
- do { \
- if (DEBUG_IMX_I2C) { \
- fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_I2C, \
- __func__, ##args); \
- } \
- } while (0)
+#include "trace.h"
static const char *imx_i2c_get_regname(unsigned offset)
{
@@ -90,13 +79,12 @@ static void imx_i2c_reset(DeviceState *dev)
static inline void imx_i2c_raise_interrupt(IMXI2CState *s)
{
- /*
- * raise an interrupt if the device is enabled and it is configured
- * to generate some interrupts.
- */
- if (imx_i2c_is_enabled(s) && imx_i2c_interrupt_is_enabled(s)) {
+ if (imx_i2c_is_enabled(s)) {
s->i2sr |= I2SR_IIF;
- qemu_irq_raise(s->irq);
+
+ if (imx_i2c_interrupt_is_enabled(s)) {
+ qemu_irq_raise(s->irq);
+ }
}
}
@@ -152,8 +140,8 @@ static uint64_t imx_i2c_read(void *opaque, hwaddr offset,
break;
}
- DPRINTF("read %s [0x%" HWADDR_PRIx "] -> 0x%02x\n",
- imx_i2c_get_regname(offset), offset, value);
+ trace_imx_i2c_read(DEVICE(s)->canonical_path, imx_i2c_get_regname(offset),
+ offset, value);
return (uint64_t)value;
}
@@ -163,8 +151,8 @@ static void imx_i2c_write(void *opaque, hwaddr offset,
{
IMXI2CState *s = IMX_I2C(opaque);
- DPRINTF("write %s [0x%" HWADDR_PRIx "] <- 0x%02x\n",
- imx_i2c_get_regname(offset), offset, (int)value);
+ trace_imx_i2c_read(DEVICE(s)->canonical_path, imx_i2c_get_regname(offset),
+ offset, value);
value &= 0xff;
@@ -308,12 +296,12 @@ static void imx_i2c_realize(DeviceState *dev, Error **errp)
s->bus = i2c_init_bus(dev, NULL);
}
-static void imx_i2c_class_init(ObjectClass *klass, void *data)
+static void imx_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &imx_i2c_vmstate;
- dc->reset = imx_i2c_reset;
+ device_class_set_legacy_reset(dc, imx_i2c_reset);
dc->realize = imx_i2c_realize;
dc->desc = "i.MX I2C Controller";
}
diff --git a/hw/i2c/microbit_i2c.c b/hw/i2c/microbit_i2c.c
index 24d36d1..2291d63 100644
--- a/hw/i2c/microbit_i2c.c
+++ b/hw/i2c/microbit_i2c.c
@@ -105,12 +105,12 @@ static void microbit_i2c_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->iomem);
}
-static void microbit_i2c_class_init(ObjectClass *klass, void *data)
+static void microbit_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &microbit_i2c_vmstate;
- dc->reset = microbit_i2c_reset;
+ device_class_set_legacy_reset(dc, microbit_i2c_reset);
dc->realize = microbit_i2c_realize;
dc->desc = "Microbit I2C controller";
}
diff --git a/hw/i2c/mpc_i2c.c b/hw/i2c/mpc_i2c.c
index cb051a5..25f91b7 100644
--- a/hw/i2c/mpc_i2c.c
+++ b/hw/i2c/mpc_i2c.c
@@ -20,10 +20,10 @@
#include "qemu/osdep.h"
#include "hw/i2c/i2c.h"
#include "hw/irq.h"
-#include "qemu/module.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
#include "qom/object.h"
+#include "trace.h"
/* #define DEBUG_I2C */
@@ -82,7 +82,7 @@ struct MPCI2CState {
uint8_t cr;
uint8_t sr;
uint8_t dr;
- uint8_t dfssr;
+ uint8_t dfsrr;
};
static bool mpc_i2c_is_enabled(MPCI2CState *s)
@@ -224,8 +224,8 @@ static uint64_t mpc_i2c_read(void *opaque, hwaddr addr, unsigned size)
break;
}
- DPRINTF("%s: addr " HWADDR_FMT_plx " %02" PRIx32 "\n", __func__,
- addr, value);
+ trace_mpc_i2c_read(addr, value);
+
return (uint64_t)value;
}
@@ -234,8 +234,8 @@ static void mpc_i2c_write(void *opaque, hwaddr addr,
{
MPCI2CState *s = opaque;
- DPRINTF("%s: addr " HWADDR_FMT_plx " val %08" PRIx64 "\n", __func__,
- addr, value);
+ trace_mpc_i2c_write(addr, value);
+
switch (addr) {
case MPC_I2C_ADR:
s->adr = value & CADR_MASK;
@@ -293,7 +293,7 @@ static void mpc_i2c_write(void *opaque, hwaddr addr,
}
break;
case MPC_I2C_DFSRR:
- s->dfssr = value;
+ s->dfsrr = value;
break;
default:
DPRINTF("ERROR: Bad write addr 0x%x\n", (unsigned int)addr);
@@ -319,7 +319,7 @@ static const VMStateDescription mpc_i2c_vmstate = {
VMSTATE_UINT8(cr, MPCI2CState),
VMSTATE_UINT8(sr, MPCI2CState),
VMSTATE_UINT8(dr, MPCI2CState),
- VMSTATE_UINT8(dfssr, MPCI2CState),
+ VMSTATE_UINT8(dfsrr, MPCI2CState),
VMSTATE_END_OF_LIST()
}
};
@@ -329,31 +329,28 @@ static void mpc_i2c_realize(DeviceState *dev, Error **errp)
MPCI2CState *i2c = MPC_I2C(dev);
sysbus_init_irq(SYS_BUS_DEVICE(dev), &i2c->irq);
memory_region_init_io(&i2c->iomem, OBJECT(i2c), &i2c_ops, i2c,
- "mpc-i2c", 0x14);
+ "mpc-i2c", 0x15);
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &i2c->iomem);
i2c->bus = i2c_init_bus(dev, "i2c");
}
-static void mpc_i2c_class_init(ObjectClass *klass, void *data)
+static void mpc_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &mpc_i2c_vmstate ;
- dc->reset = mpc_i2c_reset;
+ device_class_set_legacy_reset(dc, mpc_i2c_reset);
dc->realize = mpc_i2c_realize;
dc->desc = "MPC I2C Controller";
}
-static const TypeInfo mpc_i2c_type_info = {
- .name = TYPE_MPC_I2C,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(MPCI2CState),
- .class_init = mpc_i2c_class_init,
+static const TypeInfo mpc_i2c_types[] = {
+ {
+ .name = TYPE_MPC_I2C,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(MPCI2CState),
+ .class_init = mpc_i2c_class_init,
+ },
};
-static void mpc_i2c_register_types(void)
-{
- type_register_static(&mpc_i2c_type_info);
-}
-
-type_init(mpc_i2c_register_types)
+DEFINE_TYPES(mpc_i2c_types)
diff --git a/hw/i2c/npcm7xx_smbus.c b/hw/i2c/npcm7xx_smbus.c
index 22d68fc..179852a 100644
--- a/hw/i2c/npcm7xx_smbus.c
+++ b/hw/i2c/npcm7xx_smbus.c
@@ -1075,7 +1075,7 @@ static const VMStateDescription vmstate_npcm7xx_smbus = {
},
};
-static void npcm7xx_smbus_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_smbus_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/i2c/omap_i2c.c b/hw/i2c/omap_i2c.c
index e5d205d..751bf74 100644
--- a/hw/i2c/omap_i2c.c
+++ b/hw/i2c/omap_i2c.c
@@ -55,16 +55,16 @@ struct OMAPI2CState {
uint16_t test;
};
-#define OMAP2_INTR_REV 0x34
-#define OMAP2_GC_REV 0x34
+#define OMAP2_INTR_REV 0x34
+#define OMAP2_GC_REV 0x34
static void omap_i2c_interrupts_update(OMAPI2CState *s)
{
qemu_set_irq(s->irq, s->stat & s->mask);
- if ((s->dma >> 15) & 1) /* RDMA_EN */
- qemu_set_irq(s->drq[0], (s->stat >> 3) & 1); /* RRDY */
- if ((s->dma >> 7) & 1) /* XDMA_EN */
- qemu_set_irq(s->drq[1], (s->stat >> 4) & 1); /* XRDY */
+ if ((s->dma >> 15) & 1) /* RDMA_EN */
+ qemu_set_irq(s->drq[0], (s->stat >> 3) & 1); /* RRDY */
+ if ((s->dma >> 7) & 1) /* XDMA_EN */
+ qemu_set_irq(s->drq[1], (s->stat >> 4) & 1); /* XRDY */
}
static void omap_i2c_fifo_run(OMAPI2CState *s)
@@ -74,25 +74,25 @@ static void omap_i2c_fifo_run(OMAPI2CState *s)
if (!i2c_bus_busy(s->bus))
return;
- if ((s->control >> 2) & 1) { /* RM */
- if ((s->control >> 1) & 1) { /* STP */
+ if ((s->control >> 2) & 1) { /* RM */
+ if ((s->control >> 1) & 1) { /* STP */
i2c_end_transfer(s->bus);
- s->control &= ~(1 << 1); /* STP */
+ s->control &= ~(1 << 1); /* STP */
s->count_cur = s->count;
s->txlen = 0;
- } else if ((s->control >> 9) & 1) { /* TRX */
+ } else if ((s->control >> 9) & 1) { /* TRX */
while (ack && s->txlen)
ack = (i2c_send(s->bus,
(s->fifo >> ((-- s->txlen) << 3)) &
0xff) >= 0);
- s->stat |= 1 << 4; /* XRDY */
+ s->stat |= 1 << 4; /* XRDY */
} else {
while (s->rxlen < 4)
s->fifo |= i2c_recv(s->bus) << ((s->rxlen ++) << 3);
- s->stat |= 1 << 3; /* RRDY */
+ s->stat |= 1 << 3; /* RRDY */
}
} else {
- if ((s->control >> 9) & 1) { /* TRX */
+ if ((s->control >> 9) & 1) { /* TRX */
while (ack && s->count_cur && s->txlen) {
ack = (i2c_send(s->bus,
(s->fifo >> ((-- s->txlen) << 3)) &
@@ -100,12 +100,12 @@ static void omap_i2c_fifo_run(OMAPI2CState *s)
s->count_cur --;
}
if (ack && s->count_cur)
- s->stat |= 1 << 4; /* XRDY */
+ s->stat |= 1 << 4; /* XRDY */
else
- s->stat &= ~(1 << 4); /* XRDY */
+ s->stat &= ~(1 << 4); /* XRDY */
if (!s->count_cur) {
- s->stat |= 1 << 2; /* ARDY */
- s->control &= ~(1 << 10); /* MST */
+ s->stat |= 1 << 2; /* ARDY */
+ s->control &= ~(1 << 10); /* MST */
}
} else {
while (s->count_cur && s->rxlen < 4) {
@@ -113,26 +113,26 @@ static void omap_i2c_fifo_run(OMAPI2CState *s)
s->count_cur --;
}
if (s->rxlen)
- s->stat |= 1 << 3; /* RRDY */
+ s->stat |= 1 << 3; /* RRDY */
else
- s->stat &= ~(1 << 3); /* RRDY */
+ s->stat &= ~(1 << 3); /* RRDY */
}
if (!s->count_cur) {
- if ((s->control >> 1) & 1) { /* STP */
+ if ((s->control >> 1) & 1) { /* STP */
i2c_end_transfer(s->bus);
- s->control &= ~(1 << 1); /* STP */
+ s->control &= ~(1 << 1); /* STP */
s->count_cur = s->count;
s->txlen = 0;
} else {
- s->stat |= 1 << 2; /* ARDY */
- s->control &= ~(1 << 10); /* MST */
+ s->stat |= 1 << 2; /* ARDY */
+ s->control &= ~(1 << 10); /* MST */
}
}
}
- s->stat |= (!ack) << 1; /* NACK */
+ s->stat |= (!ack) << 1; /* NACK */
if (!ack)
- s->control &= ~(1 << 1); /* STP */
+ s->control &= ~(1 << 1); /* STP */
}
static void omap_i2c_reset(DeviceState *dev)
@@ -163,16 +163,16 @@ static uint32_t omap_i2c_read(void *opaque, hwaddr addr)
uint16_t ret;
switch (offset) {
- case 0x00: /* I2C_REV */
- return s->revision; /* REV */
+ case 0x00: /* I2C_REV */
+ return s->revision; /* REV */
- case 0x04: /* I2C_IE */
+ case 0x04: /* I2C_IE */
return s->mask;
- case 0x08: /* I2C_STAT */
+ case 0x08: /* I2C_STAT */
return s->stat | (i2c_bus_busy(s->bus) << 12);
- case 0x0c: /* I2C_IV */
+ case 0x0c: /* I2C_IV */
if (s->revision >= OMAP2_INTR_REV)
break;
ret = ctz32(s->stat & s->mask);
@@ -185,18 +185,18 @@ static uint32_t omap_i2c_read(void *opaque, hwaddr addr)
omap_i2c_interrupts_update(s);
return ret;
- case 0x10: /* I2C_SYSS */
- return (s->control >> 15) & 1; /* I2C_EN */
+ case 0x10: /* I2C_SYSS */
+ return (s->control >> 15) & 1; /* I2C_EN */
- case 0x14: /* I2C_BUF */
+ case 0x14: /* I2C_BUF */
return s->dma;
- case 0x18: /* I2C_CNT */
- return s->count_cur; /* DCOUNT */
+ case 0x18: /* I2C_CNT */
+ return s->count_cur; /* DCOUNT */
- case 0x1c: /* I2C_DATA */
+ case 0x1c: /* I2C_DATA */
ret = 0;
- if (s->control & (1 << 14)) { /* BE */
+ if (s->control & (1 << 14)) { /* BE */
ret |= ((s->fifo >> 0) & 0xff) << 8;
ret |= ((s->fifo >> 8) & 0xff) << 0;
} else {
@@ -204,7 +204,7 @@ static uint32_t omap_i2c_read(void *opaque, hwaddr addr)
ret |= ((s->fifo >> 0) & 0xff) << 0;
}
if (s->rxlen == 1) {
- s->stat |= 1 << 15; /* SBD */
+ s->stat |= 1 << 15; /* SBD */
s->rxlen = 0;
} else if (s->rxlen > 1) {
if (s->rxlen > 2)
@@ -214,41 +214,41 @@ static uint32_t omap_i2c_read(void *opaque, hwaddr addr)
/* XXX: remote access (qualifier) error - what's that? */
}
if (!s->rxlen) {
- s->stat &= ~(1 << 3); /* RRDY */
- if (((s->control >> 10) & 1) && /* MST */
- ((~s->control >> 9) & 1)) { /* TRX */
- s->stat |= 1 << 2; /* ARDY */
- s->control &= ~(1 << 10); /* MST */
+ s->stat &= ~(1 << 3); /* RRDY */
+ if (((s->control >> 10) & 1) && /* MST */
+ ((~s->control >> 9) & 1)) { /* TRX */
+ s->stat |= 1 << 2; /* ARDY */
+ s->control &= ~(1 << 10); /* MST */
}
}
- s->stat &= ~(1 << 11); /* ROVR */
+ s->stat &= ~(1 << 11); /* ROVR */
omap_i2c_fifo_run(s);
omap_i2c_interrupts_update(s);
return ret;
- case 0x20: /* I2C_SYSC */
+ case 0x20: /* I2C_SYSC */
return 0;
- case 0x24: /* I2C_CON */
+ case 0x24: /* I2C_CON */
return s->control;
- case 0x28: /* I2C_OA */
+ case 0x28: /* I2C_OA */
return s->addr[0];
- case 0x2c: /* I2C_SA */
+ case 0x2c: /* I2C_SA */
return s->addr[1];
- case 0x30: /* I2C_PSC */
+ case 0x30: /* I2C_PSC */
return s->divider;
- case 0x34: /* I2C_SCLL */
+ case 0x34: /* I2C_SCLL */
return s->times[0];
- case 0x38: /* I2C_SCLH */
+ case 0x38: /* I2C_SCLH */
return s->times[1];
- case 0x3c: /* I2C_SYSTEST */
- if (s->test & (1 << 15)) { /* ST_EN */
+ case 0x3c: /* I2C_SYSTEST */
+ if (s->test & (1 << 15)) { /* ST_EN */
s->test ^= 0xa;
return s->test;
} else
@@ -267,17 +267,17 @@ static void omap_i2c_write(void *opaque, hwaddr addr,
int nack;
switch (offset) {
- case 0x00: /* I2C_REV */
- case 0x0c: /* I2C_IV */
- case 0x10: /* I2C_SYSS */
+ case 0x00: /* I2C_REV */
+ case 0x0c: /* I2C_IV */
+ case 0x10: /* I2C_SYSS */
OMAP_RO_REG(addr);
return;
- case 0x04: /* I2C_IE */
+ case 0x04: /* I2C_IE */
s->mask = value & (s->revision < OMAP2_GC_REV ? 0x1f : 0x3f);
break;
- case 0x08: /* I2C_STAT */
+ case 0x08: /* I2C_STAT */
if (s->revision < OMAP2_INTR_REV) {
OMAP_RO_REG(addr);
return;
@@ -288,40 +288,40 @@ static void omap_i2c_write(void *opaque, hwaddr addr,
omap_i2c_interrupts_update(s);
break;
- case 0x14: /* I2C_BUF */
+ case 0x14: /* I2C_BUF */
s->dma = value & 0x8080;
- if (value & (1 << 15)) /* RDMA_EN */
- s->mask &= ~(1 << 3); /* RRDY_IE */
- if (value & (1 << 7)) /* XDMA_EN */
- s->mask &= ~(1 << 4); /* XRDY_IE */
+ if (value & (1 << 15)) /* RDMA_EN */
+ s->mask &= ~(1 << 3); /* RRDY_IE */
+ if (value & (1 << 7)) /* XDMA_EN */
+ s->mask &= ~(1 << 4); /* XRDY_IE */
break;
- case 0x18: /* I2C_CNT */
- s->count = value; /* DCOUNT */
+ case 0x18: /* I2C_CNT */
+ s->count = value; /* DCOUNT */
break;
- case 0x1c: /* I2C_DATA */
+ case 0x1c: /* I2C_DATA */
if (s->txlen > 2) {
/* XXX: remote access (qualifier) error - what's that? */
break;
}
s->fifo <<= 16;
s->txlen += 2;
- if (s->control & (1 << 14)) { /* BE */
+ if (s->control & (1 << 14)) { /* BE */
s->fifo |= ((value >> 8) & 0xff) << 8;
s->fifo |= ((value >> 0) & 0xff) << 0;
} else {
s->fifo |= ((value >> 0) & 0xff) << 8;
s->fifo |= ((value >> 8) & 0xff) << 0;
}
- s->stat &= ~(1 << 10); /* XUDF */
+ s->stat &= ~(1 << 10); /* XUDF */
if (s->txlen > 2)
- s->stat &= ~(1 << 4); /* XRDY */
+ s->stat &= ~(1 << 4); /* XRDY */
omap_i2c_fifo_run(s);
omap_i2c_interrupts_update(s);
break;
- case 0x20: /* I2C_SYSC */
+ case 0x20: /* I2C_SYSC */
if (s->revision < OMAP2_INTR_REV) {
OMAP_BAD_REG(addr);
return;
@@ -332,9 +332,9 @@ static void omap_i2c_write(void *opaque, hwaddr addr,
}
break;
- case 0x24: /* I2C_CON */
+ case 0x24: /* I2C_CON */
s->control = value & 0xcf87;
- if (~value & (1 << 15)) { /* I2C_EN */
+ if (~value & (1 << 15)) { /* I2C_EN */
if (s->revision < OMAP2_INTR_REV) {
omap_i2c_reset(DEVICE(s));
}
@@ -351,14 +351,14 @@ static void omap_i2c_write(void *opaque, hwaddr addr,
__func__);
break;
}
- if ((value & (1 << 15)) && value & (1 << 0)) { /* STT */
- nack = !!i2c_start_transfer(s->bus, s->addr[1], /* SA */
- (~value >> 9) & 1); /* TRX */
- s->stat |= nack << 1; /* NACK */
- s->control &= ~(1 << 0); /* STT */
+ if ((value & (1 << 15)) && value & (1 << 0)) { /* STT */
+ nack = !!i2c_start_transfer(s->bus, s->addr[1], /* SA */
+ (~value >> 9) & 1); /* TRX */
+ s->stat |= nack << 1; /* NACK */
+ s->control &= ~(1 << 0); /* STT */
s->fifo = 0;
if (nack)
- s->control &= ~(1 << 1); /* STP */
+ s->control &= ~(1 << 1); /* STP */
else {
s->count_cur = s->count;
omap_i2c_fifo_run(s);
@@ -367,34 +367,34 @@ static void omap_i2c_write(void *opaque, hwaddr addr,
}
break;
- case 0x28: /* I2C_OA */
+ case 0x28: /* I2C_OA */
s->addr[0] = value & 0x3ff;
break;
- case 0x2c: /* I2C_SA */
+ case 0x2c: /* I2C_SA */
s->addr[1] = value & 0x3ff;
break;
- case 0x30: /* I2C_PSC */
+ case 0x30: /* I2C_PSC */
s->divider = value;
break;
- case 0x34: /* I2C_SCLL */
+ case 0x34: /* I2C_SCLL */
s->times[0] = value;
break;
- case 0x38: /* I2C_SCLH */
+ case 0x38: /* I2C_SCLH */
s->times[1] = value;
break;
- case 0x3c: /* I2C_SYSTEST */
+ case 0x3c: /* I2C_SYSTEST */
s->test = value & 0xf80f;
- if (value & (1 << 11)) /* SBB */
+ if (value & (1 << 11)) /* SBB */
if (s->revision >= OMAP2_INTR_REV) {
s->stat |= 0x3f;
omap_i2c_interrupts_update(s);
}
- if (value & (1 << 15)) { /* ST_EN */
+ if (value & (1 << 15)) { /* ST_EN */
qemu_log_mask(LOG_UNIMP,
"%s: System Test not supported\n", __func__);
}
@@ -413,7 +413,7 @@ static void omap_i2c_writeb(void *opaque, hwaddr addr,
int offset = addr & OMAP_MPUI_REG_MASK;
switch (offset) {
- case 0x1c: /* I2C_DATA */
+ case 0x1c: /* I2C_DATA */
if (s->txlen > 2) {
/* XXX: remote access (qualifier) error - what's that? */
break;
@@ -421,9 +421,9 @@ static void omap_i2c_writeb(void *opaque, hwaddr addr,
s->fifo <<= 8;
s->txlen += 1;
s->fifo |= value & 0xff;
- s->stat &= ~(1 << 10); /* XUDF */
+ s->stat &= ~(1 << 10); /* XUDF */
if (s->txlen > 2)
- s->stat &= ~(1 << 4); /* XRDY */
+ s->stat &= ~(1 << 4); /* XRDY */
omap_i2c_fifo_run(s);
omap_i2c_interrupts_update(s);
break;
@@ -511,17 +511,16 @@ void omap_i2c_set_fclk(OMAPI2CState *i2c, omap_clk clk)
i2c->fclk = clk;
}
-static Property omap_i2c_properties[] = {
+static const Property omap_i2c_properties[] = {
DEFINE_PROP_UINT8("revision", OMAPI2CState, revision, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void omap_i2c_class_init(ObjectClass *klass, void *data)
+static void omap_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, omap_i2c_properties);
- dc->reset = omap_i2c_reset;
+ device_class_set_legacy_reset(dc, omap_i2c_reset);
/* Reason: pointer properties "iclk", "fclk" */
dc->user_creatable = false;
dc->realize = omap_i2c_realize;
diff --git a/hw/i2c/pm_smbus.c b/hw/i2c/pm_smbus.c
index 3eed811..4e685fd 100644
--- a/hw/i2c/pm_smbus.c
+++ b/hw/i2c/pm_smbus.c
@@ -205,7 +205,6 @@ out:
error:
s->smb_stat |= STS_DEV_ERR;
- return;
}
static void smb_transaction_start(PMSMBus *s)
diff --git a/hw/i2c/pmbus_device.c b/hw/i2c/pmbus_device.c
index ba1d2fd..853dc4b 100644
--- a/hw/i2c/pmbus_device.c
+++ b/hw/i2c/pmbus_device.c
@@ -1902,7 +1902,7 @@ static void pmbus_device_finalize(Object *obj)
g_free(pmdev->pages);
}
-static void pmbus_device_class_init(ObjectClass *klass, void *data)
+static void pmbus_device_class_init(ObjectClass *klass, const void *data)
{
SMBusDeviceClass *k = SMBUS_DEVICE_CLASS(klass);
diff --git a/hw/i2c/ppc4xx_i2c.c b/hw/i2c/ppc4xx_i2c.c
index 75d50f1..09d4c49 100644
--- a/hw/i2c/ppc4xx_i2c.c
+++ b/hw/i2c/ppc4xx_i2c.c
@@ -354,11 +354,11 @@ static void ppc4xx_i2c_init(Object *o)
bitbang_i2c_init(&s->bitbang, s->bus);
}
-static void ppc4xx_i2c_class_init(ObjectClass *klass, void *data)
+static void ppc4xx_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = ppc4xx_i2c_reset;
+ device_class_set_legacy_reset(dc, ppc4xx_i2c_reset);
}
static const TypeInfo ppc4xx_i2c_type_info = {
diff --git a/hw/i2c/smbus_eeprom.c b/hw/i2c/smbus_eeprom.c
index c42236b..0a1088f 100644
--- a/hw/i2c/smbus_eeprom.c
+++ b/hw/i2c/smbus_eeprom.c
@@ -137,13 +137,13 @@ static void smbus_eeprom_realize(DeviceState *dev, Error **errp)
}
}
-static void smbus_eeprom_class_initfn(ObjectClass *klass, void *data)
+static void smbus_eeprom_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SMBusDeviceClass *sc = SMBUS_DEVICE_CLASS(klass);
dc->realize = smbus_eeprom_realize;
- dc->reset = smbus_eeprom_reset;
+ device_class_set_legacy_reset(dc, smbus_eeprom_reset);
sc->receive_byte = eeprom_receive_byte;
sc->write_data = eeprom_write_data;
dc->vmsd = &vmstate_smbus_eeprom;
@@ -151,19 +151,16 @@ static void smbus_eeprom_class_initfn(ObjectClass *klass, void *data)
dc->user_creatable = false;
}
-static const TypeInfo smbus_eeprom_info = {
- .name = TYPE_SMBUS_EEPROM,
- .parent = TYPE_SMBUS_DEVICE,
- .instance_size = sizeof(SMBusEEPROMDevice),
- .class_init = smbus_eeprom_class_initfn,
+static const TypeInfo smbus_eeprom_types[] = {
+ {
+ .name = TYPE_SMBUS_EEPROM,
+ .parent = TYPE_SMBUS_DEVICE,
+ .instance_size = sizeof(SMBusEEPROMDevice),
+ .class_init = smbus_eeprom_class_initfn,
+ },
};
-static void smbus_eeprom_register_types(void)
-{
- type_register_static(&smbus_eeprom_info);
-}
-
-type_init(smbus_eeprom_register_types)
+DEFINE_TYPES(smbus_eeprom_types)
void smbus_eeprom_init_one(I2CBus *smbus, uint8_t address, uint8_t *eeprom_buf)
{
diff --git a/hw/i2c/smbus_ich9.c b/hw/i2c/smbus_ich9.c
index 208f263..956c9b5 100644
--- a/hw/i2c/smbus_ich9.c
+++ b/hw/i2c/smbus_ich9.c
@@ -118,7 +118,7 @@ static void build_ich9_smb_aml(AcpiDevAmlIf *adev, Aml *scope)
qbus_build_aml(bus, scope);
}
-static void ich9_smb_class_init(ObjectClass *klass, void *data)
+static void ich9_smb_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -145,7 +145,7 @@ static const TypeInfo ich9_smb_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(ICH9SMBState),
.class_init = ich9_smb_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ TYPE_ACPI_DEV_AML_IF },
{ },
diff --git a/hw/i2c/smbus_slave.c b/hw/i2c/smbus_slave.c
index 9f9afc2..cfb61c8 100644
--- a/hw/i2c/smbus_slave.c
+++ b/hw/i2c/smbus_slave.c
@@ -201,7 +201,7 @@ static int smbus_i2c_send(I2CSlave *s, uint8_t data)
return 0;
}
-static void smbus_device_class_init(ObjectClass *klass, void *data)
+static void smbus_device_class_init(ObjectClass *klass, const void *data)
{
I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass);
diff --git a/hw/i2c/trace-events b/hw/i2c/trace-events
index 6900e06..1ad0e95 100644
--- a/hw/i2c/trace-events
+++ b/hw/i2c/trace-events
@@ -35,6 +35,11 @@ aspeed_i2c_bus_write(uint32_t busid, uint64_t offset, unsigned size, uint64_t va
aspeed_i2c_bus_send(const char *mode, int i, int count, uint8_t byte) "%s send %d/%d 0x%02x"
aspeed_i2c_bus_recv(const char *mode, int i, int count, uint8_t byte) "%s recv %d/%d 0x%02x"
+# mpc_i2c.c
+
+mpc_i2c_read(uint64_t addr, uint32_t value) "[0x%" PRIx64 "] -> 0x%02" PRIx32
+mpc_i2c_write(uint64_t addr, uint32_t value) "[0x%" PRIx64 "] <- 0x%02" PRIx32
+
# npcm7xx_smbus.c
npcm7xx_smbus_read(const char *id, uint64_t offset, uint64_t value, unsigned size) "%s offset: 0x%04" PRIx64 " value: 0x%02" PRIx64 " size: %u"
@@ -51,3 +56,8 @@ npcm7xx_smbus_recv_fifo(const char *id, uint8_t received, uint8_t expected) "%s
pca954x_write_bytes(uint8_t value) "PCA954X write data: 0x%02x"
pca954x_read_data(uint8_t value) "PCA954X read data: 0x%02x"
+
+# imx_i2c.c
+
+imx_i2c_read(const char *id, const char *reg, uint64_t ofs, uint64_t value) "%s:[%s (0x%" PRIx64 ")] -> 0x%02" PRIx64
+imx_i2c_write(const char *id, const char *reg, uint64_t ofs, uint64_t value) "%s:[%s (0x%" PRIx64 ")] <- 0x%02" PRIx64
diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig
index f4a33b6..eb65bda 100644
--- a/hw/i386/Kconfig
+++ b/hw/i386/Kconfig
@@ -10,6 +10,11 @@ config SGX
bool
depends on KVM
+config TDX
+ bool
+ select X86_FW_OVMF
+ depends on KVM
+
config PC
bool
imply APPLESMC
@@ -26,6 +31,7 @@ config PC
imply QXL
imply SEV
imply SGX
+ imply TDX
imply TEST_DEVICES
imply TPM_CRB
imply TPM_TIS_ISA
@@ -43,6 +49,7 @@ config PC
select SERIAL_ISA
select ACPI_PCI
select ACPI_VMGENID
+ select ACPI_VMCLOCK
select VIRTIO_PMEM_SUPPORTED
select VIRTIO_MEM_SUPPORTED
select HV_BALLOON_SUPPORTED
@@ -129,6 +136,16 @@ config MICROVM
select USB_XHCI_SYSBUS
select I8254
+config NITRO_ENCLAVE
+ default y
+ depends on I386 && FDT # for MICROVM
+ depends on LIBCBOR && GNUTLS # for EIF and VIRTIO_NSM
+ depends on VHOST_USER # for VHOST_USER_VSOCK
+ select EIF
+ select MICROVM
+ select VHOST_USER_VSOCK
+ select VIRTIO_NSM
+
config X86_IOMMU
bool
depends on PC
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index f4e366f..61851cc 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -22,7 +22,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qmp/qnum.h"
+#include "qobject/qnum.h"
#include "acpi-build.h"
#include "acpi-common.h"
#include "qemu/bitmap.h"
@@ -40,18 +40,19 @@
#include "hw/acpi/acpi_aml_interface.h"
#include "hw/input/i8042.h"
#include "hw/acpi/memory_hotplug.h"
-#include "sysemu/tpm.h"
+#include "system/tpm.h"
#include "hw/acpi/tpm.h"
#include "hw/acpi/vmgenid.h"
+#include "hw/acpi/vmclock.h"
#include "hw/acpi/erst.h"
#include "hw/acpi/piix4.h"
-#include "sysemu/tpm_backend.h"
+#include "system/tpm_backend.h"
#include "hw/rtc/mc146818rtc_regs.h"
#include "migration/vmstate.h"
#include "hw/mem/memory-device.h"
#include "hw/mem/nvdimm.h"
-#include "sysemu/numa.h"
-#include "sysemu/reset.h"
+#include "system/numa.h"
+#include "system/reset.h"
#include "hw/hyperv/vmbus-bridge.h"
/* Supported chipsets: */
@@ -68,7 +69,6 @@
#include "hw/acpi/utils.h"
#include "hw/acpi/pci.h"
#include "hw/acpi/cxl.h"
-#include "hw/acpi/acpi_generic_initiator.h"
#include "qom/qom-qobject.h"
#include "hw/i386/amd_iommu.h"
@@ -139,7 +139,7 @@ static void init_common_fadt_data(MachineState *ms, Object *o,
/*
* "ICH9-LPC" or "PIIX4_PM" has "smm-compat" property to keep the old
* behavior for compatibility irrelevant to smm_enabled, which doesn't
- * comforms to ACPI spec.
+ * conform to the ACPI spec.
*/
bool smm_enabled = object_property_get_bool(o, "smm-compat", NULL) ?
true : x86_machine_is_smm_enabled(x86ms);
@@ -589,8 +589,8 @@ void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus)
}
}
-static bool build_append_notfication_callback(Aml *parent_scope,
- const PCIBus *bus)
+static bool build_append_notification_callback(Aml *parent_scope,
+ const PCIBus *bus)
{
Aml *method;
PCIBus *sec;
@@ -604,7 +604,7 @@ static bool build_append_notfication_callback(Aml *parent_scope,
continue;
}
nr_notifiers = nr_notifiers +
- build_append_notfication_callback(br_scope, sec);
+ build_append_notification_callback(br_scope, sec);
/*
* add new child scope to parent
* and keep track of bus that have PCNT,
@@ -655,6 +655,7 @@ static Aml *aml_pci_pdsm(void)
Aml *acpi_index = aml_local(2);
Aml *zero = aml_int(0);
Aml *one = aml_int(1);
+ Aml *not_supp = aml_int(0xFFFFFFFF);
Aml *func = aml_arg(2);
Aml *params = aml_arg(4);
Aml *bnum = aml_derefof(aml_index(params, aml_int(0)));
@@ -679,7 +680,7 @@ static Aml *aml_pci_pdsm(void)
*/
ifctx1 = aml_if(aml_lnot(
aml_or(aml_equal(acpi_index, zero),
- aml_equal(acpi_index, aml_int(0xFFFFFFFF)), NULL)
+ aml_equal(acpi_index, not_supp), NULL)
));
{
/* have supported functions */
@@ -705,18 +706,30 @@ static Aml *aml_pci_pdsm(void)
{
Aml *pkg = aml_package(2);
- aml_append(pkg, zero);
- /*
- * optional, if not impl. should return null string
- */
- aml_append(pkg, aml_string("%s", ""));
- aml_append(ifctx, aml_store(pkg, ret));
-
aml_append(ifctx, aml_store(aml_call2("AIDX", bnum, sunum), acpi_index));
+ aml_append(ifctx, aml_store(pkg, ret));
/*
- * update acpi-index to actual value
+ * Windows calls func=7 without checking if it's available,
+ * as workaround Microsoft has suggested to return invalid for func7
+ * Package, so return 2 elements package but only initialize elements
+ * when acpi_index is supported and leave them uninitialized, which
+ * leads elements to being Uninitialized ObjectType and should trip
+ * Windows into discarding result as an unexpected and prevent setting
+ * bogus 'PCI Label' on the device.
*/
- aml_append(ifctx, aml_store(acpi_index, aml_index(ret, zero)));
+ ifctx1 = aml_if(aml_lnot(aml_lor(
+ aml_equal(acpi_index, zero), aml_equal(acpi_index, not_supp)
+ )));
+ {
+ aml_append(ifctx1, aml_store(acpi_index, aml_index(ret, zero)));
+ /*
+ * optional, if not impl. should return null string
+ */
+ aml_append(ifctx1, aml_store(aml_string("%s", ""),
+ aml_index(ret, one)));
+ }
+ aml_append(ifctx, ifctx1);
+
aml_append(ifctx, aml_return(ret));
}
@@ -724,120 +737,45 @@ static Aml *aml_pci_pdsm(void)
return method;
}
-/**
- * build_prt_entry:
- * @link_name: link name for PCI route entry
- *
- * build AML package containing a PCI route entry for @link_name
- */
-static Aml *build_prt_entry(const char *link_name)
-{
- Aml *a_zero = aml_int(0);
- Aml *pkg = aml_package(4);
- aml_append(pkg, a_zero);
- aml_append(pkg, a_zero);
- aml_append(pkg, aml_name("%s", link_name));
- aml_append(pkg, a_zero);
- return pkg;
-}
-
/*
- * initialize_route - Initialize the interrupt routing rule
- * through a specific LINK:
- * if (lnk_idx == idx)
- * route using link 'link_name'
- */
-static Aml *initialize_route(Aml *route, const char *link_name,
- Aml *lnk_idx, int idx)
-{
- Aml *if_ctx = aml_if(aml_equal(lnk_idx, aml_int(idx)));
- Aml *pkg = build_prt_entry(link_name);
-
- aml_append(if_ctx, aml_store(pkg, route));
-
- return if_ctx;
-}
-
-/*
- * build_prt - Define interrupt rounting rules
+ * build_prt - Define interrupt routing rules
*
* Returns an array of 128 routes, one for each device,
* based on device location.
* The main goal is to equally distribute the interrupts
* over the 4 existing ACPI links (works only for i440fx).
- * The hash function is (slot + pin) & 3 -> "LNK[D|A|B|C]".
+ * The hash function is: (slot + pin) & 3 -> "LNK[D|A|B|C]".
*
*/
static Aml *build_prt(bool is_pci0_prt)
{
- Aml *method, *while_ctx, *pin, *res;
+ const int nroutes = 128;
+ Aml *rt_pkg, *method;
+ int pin;
method = aml_method("_PRT", 0, AML_NOTSERIALIZED);
- res = aml_local(0);
- pin = aml_local(1);
- aml_append(method, aml_store(aml_package(128), res));
- aml_append(method, aml_store(aml_int(0), pin));
-
- /* while (pin < 128) */
- while_ctx = aml_while(aml_lless(pin, aml_int(128)));
- {
- Aml *slot = aml_local(2);
- Aml *lnk_idx = aml_local(3);
- Aml *route = aml_local(4);
-
- /* slot = pin >> 2 */
- aml_append(while_ctx,
- aml_store(aml_shiftright(pin, aml_int(2), NULL), slot));
- /* lnk_idx = (slot + pin) & 3 */
- aml_append(while_ctx,
- aml_store(aml_and(aml_add(pin, slot, NULL), aml_int(3), NULL),
- lnk_idx));
-
- /* route[2] = "LNK[D|A|B|C]", selection based on pin % 3 */
- aml_append(while_ctx, initialize_route(route, "LNKD", lnk_idx, 0));
- if (is_pci0_prt) {
- Aml *if_device_1, *if_pin_4, *else_pin_4;
-
- /* device 1 is the power-management device, needs SCI */
- if_device_1 = aml_if(aml_equal(lnk_idx, aml_int(1)));
- {
- if_pin_4 = aml_if(aml_equal(pin, aml_int(4)));
- {
- aml_append(if_pin_4,
- aml_store(build_prt_entry("LNKS"), route));
- }
- aml_append(if_device_1, if_pin_4);
- else_pin_4 = aml_else();
- {
- aml_append(else_pin_4,
- aml_store(build_prt_entry("LNKA"), route));
- }
- aml_append(if_device_1, else_pin_4);
- }
- aml_append(while_ctx, if_device_1);
+ assert(nroutes < 256);
+ rt_pkg = aml_package(nroutes);
+
+ for (pin = 0; pin < nroutes; pin++) {
+ Aml *pkg = aml_package(4);
+ int slot = pin >> 2;
+
+ aml_append(pkg, aml_int((slot << 16) | 0xFFFF));
+ aml_append(pkg, aml_int(pin & 3));
+ /* device 1 is the power-management device, needs SCI */
+ if (is_pci0_prt && pin == 4) {
+ aml_append(pkg, aml_name("%s", "LNKS"));
} else {
- aml_append(while_ctx, initialize_route(route, "LNKA", lnk_idx, 1));
+ static const char link_name[][5] = {"LNKD", "LNKA", "LNKB", "LNKC"};
+ int hash = (slot + pin) & 3;
+ aml_append(pkg, aml_name("%s", link_name[hash]));
}
- aml_append(while_ctx, initialize_route(route, "LNKB", lnk_idx, 2));
- aml_append(while_ctx, initialize_route(route, "LNKC", lnk_idx, 3));
-
- /* route[0] = 0x[slot]FFFF */
- aml_append(while_ctx,
- aml_store(aml_or(aml_shiftleft(slot, aml_int(16)), aml_int(0xFFFF),
- NULL),
- aml_index(route, aml_int(0))));
- /* route[1] = pin & 3 */
- aml_append(while_ctx,
- aml_store(aml_and(pin, aml_int(3), NULL),
- aml_index(route, aml_int(1))));
- /* res[pin] = route */
- aml_append(while_ctx, aml_store(route, aml_index(res, pin)));
- /* pin++ */
- aml_append(while_ctx, aml_increment(pin));
+ aml_append(pkg, aml_int(0));
+ aml_append(rt_pkg, pkg);
}
- aml_append(method, while_ctx);
- /* return res*/
- aml_append(method, aml_return(res));
+
+ aml_append(method, aml_return(rt_pkg));
return method;
}
@@ -1536,7 +1474,8 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
.fw_unplugs_cpu = pm->smi_on_cpu_unplug,
};
build_cpus_aml(dsdt, machine, opts, pc_madt_cpu_entry,
- pm->cpu_hp_io_base, "\\_SB.PCI0", "\\_GPE._E02");
+ pm->cpu_hp_io_base, "\\_SB.PCI0", "\\_GPE._E02",
+ AML_SYSTEM_IO);
}
if (pcms->memhp_io_base && nr_mem) {
@@ -1551,6 +1490,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
QLIST_FOREACH(bus, &bus->child, sibling) {
uint8_t bus_num = pci_bus_num(bus);
uint8_t numa_node = pci_bus_numa_node(bus);
+ uint32_t uid;
/* look only for expander root buses */
if (!pci_bus_is_root(bus)) {
@@ -1561,6 +1501,8 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
root_bus_limit = bus_num - 1;
}
+ uid = object_property_get_uint(OBJECT(bus), "acpi_uid",
+ &error_fatal);
scope = aml_scope("\\_SB");
if (pci_bus_is_cxl(bus)) {
@@ -1568,7 +1510,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
} else {
dev = aml_device("PC%.02X", bus_num);
}
- aml_append(dev, aml_name_decl("_UID", aml_int(bus_num)));
+ aml_append(dev, aml_name_decl("_UID", aml_int(uid)));
aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num)));
if (pci_bus_is_cxl(bus)) {
struct Aml *aml_pkg = aml_package(2);
@@ -1831,7 +1773,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
PCIBus *b = PCI_HOST_BRIDGE(pci_host)->bus;
scope = aml_scope("\\_SB.PCI0");
- has_pcnt = build_append_notfication_callback(scope, b);
+ has_pcnt = build_append_notification_callback(scope, b);
if (has_pcnt) {
aml_append(dsdt, scope);
}
@@ -2046,7 +1988,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
build_srat_memory(table_data, 0, 0, 0, MEM_AFFINITY_NOFLAGS);
}
- build_srat_generic_pci_initiator(table_data);
+ build_srat_generic_affinity_structures(table_data);
/*
* Entry is required for Windows to enable memory hotplug in OS
@@ -2391,12 +2333,12 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id,
build_append_int_noprefix(table_data, ivhd_blob->len + 24, 2);
/* DeviceID */
build_append_int_noprefix(table_data,
- object_property_get_int(OBJECT(&s->pci), "addr",
+ object_property_get_int(OBJECT(s->pci), "addr",
&error_abort), 2);
/* Capability offset */
- build_append_int_noprefix(table_data, s->pci.capab_offset, 2);
+ build_append_int_noprefix(table_data, s->pci->capab_offset, 2);
/* IOMMU base address */
- build_append_int_noprefix(table_data, s->mmio.addr, 8);
+ build_append_int_noprefix(table_data, s->mr_mmio.addr, 8);
/* PCI Segment Group */
build_append_int_noprefix(table_data, 0, 2);
/* IOMMU info */
@@ -2426,12 +2368,12 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id,
build_append_int_noprefix(table_data, ivhd_blob->len + 40, 2);
/* DeviceID */
build_append_int_noprefix(table_data,
- object_property_get_int(OBJECT(&s->pci), "addr",
+ object_property_get_int(OBJECT(s->pci), "addr",
&error_abort), 2);
/* Capability offset */
- build_append_int_noprefix(table_data, s->pci.capab_offset, 2);
+ build_append_int_noprefix(table_data, s->pci->capab_offset, 2);
/* IOMMU base address */
- build_append_int_noprefix(table_data, s->mmio.addr, 8);
+ build_append_int_noprefix(table_data, s->mr_mmio.addr, 8);
/* PCI Segment Group */
build_append_int_noprefix(table_data, 0, 2);
/* IOMMU info */
@@ -2504,7 +2446,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine)
uint8_t *u;
GArray *tables_blob = tables->table_data;
AcpiSlicOem slic_oem = { .id = NULL, .table_id = NULL };
- Object *vmgenid_dev;
+ Object *vmgenid_dev, *vmclock_dev;
char *oem_id;
char *oem_table_id;
@@ -2577,6 +2519,13 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine)
tables->vmgenid, tables->linker, x86ms->oem_id);
}
+ vmclock_dev = find_vmclock_dev();
+ if (vmclock_dev) {
+ acpi_add_table(table_offsets, tables_blob);
+ vmclock_build_acpi(VMCLOCK(vmclock_dev), tables_blob, tables->linker,
+ x86ms->oem_id);
+ }
+
if (misc.has_hpet) {
acpi_add_table(table_offsets, tables_blob);
build_hpet(tables_blob, tables->linker, x86ms->oem_id,
diff --git a/hw/i386/acpi-build.h b/hw/i386/acpi-build.h
index 0dce155..275ec05 100644
--- a/hw/i386/acpi-build.h
+++ b/hw/i386/acpi-build.h
@@ -5,7 +5,7 @@
extern const struct AcpiGenericAddress x86_nvdimm_acpi_dsmio;
-/* PCI Hot-plug registers bases. See docs/spec/acpi_pci_hotplug.txt */
+/* PCI Hot-plug registers' base. See docs/specs/acpi_pci_hotplug.rst */
#define ACPI_PCIHP_SEJ_BASE 0x8
#define ACPI_PCIHP_BNMR_BASE 0x10
diff --git a/hw/i386/acpi-common.c b/hw/i386/acpi-common.c
index 0cc2919..7bd0806 100644
--- a/hw/i386/acpi-common.c
+++ b/hw/i386/acpi-common.c
@@ -23,7 +23,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/aml-build.h"
#include "hw/acpi/utils.h"
diff --git a/hw/i386/acpi-microvm.c b/hw/i386/acpi-microvm.c
index 279da6b..bc65717 100644
--- a/hw/i386/acpi-microvm.c
+++ b/hw/i386/acpi-microvm.c
@@ -24,7 +24,7 @@
#include "qemu/cutils.h"
#include "qapi/error.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/acpi_aml_interface.h"
#include "hw/acpi/aml-build.h"
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
index 6d4fde7..963aa24 100644
--- a/hw/i386/amd_iommu.c
+++ b/hw/i386/amd_iommu.c
@@ -32,6 +32,7 @@
#include "trace.h"
#include "hw/i386/apic-msidef.h"
#include "hw/qdev-properties.h"
+#include "kvm/kvm_i386.h"
/* used AMD-Vi MMIO registers */
const char *amdvi_mmio_low[] = {
@@ -60,8 +61,9 @@ struct AMDVIAddressSpace {
uint8_t bus_num; /* bus number */
uint8_t devfn; /* device function */
AMDVIState *iommu_state; /* AMDVI - one per machine */
- MemoryRegion root; /* AMDVI Root memory map region */
+ MemoryRegion root; /* AMDVI Root memory map region */
IOMMUMemoryRegion iommu; /* Device's address translation region */
+ MemoryRegion iommu_nodma; /* Alias of shared nodma memory region */
MemoryRegion iommu_ir; /* Device's interrupt remapping region */
AddressSpace as; /* device's corresponding address space */
};
@@ -165,11 +167,11 @@ static void amdvi_generate_msi_interrupt(AMDVIState *s)
{
MSIMessage msg = {};
MemTxAttrs attrs = {
- .requester_id = pci_requester_id(&s->pci.dev)
+ .requester_id = pci_requester_id(&s->pci->dev)
};
- if (msi_enabled(&s->pci.dev)) {
- msg = msi_get_message(&s->pci.dev, 0);
+ if (msi_enabled(&s->pci->dev)) {
+ msg = msi_get_message(&s->pci->dev, 0);
address_space_stl_le(&address_space_memory, msg.address, msg.data,
attrs, NULL);
}
@@ -237,7 +239,7 @@ static void amdvi_page_fault(AMDVIState *s, uint16_t devid,
info |= AMDVI_EVENT_IOPF_I | AMDVI_EVENT_IOPF;
amdvi_encode_event(evt, devid, addr, info);
amdvi_log_event(s, evt);
- pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
+ pci_word_test_and_set_mask(s->pci->dev.config + PCI_STATUS,
PCI_STATUS_SIG_TARGET_ABORT);
}
/*
@@ -254,7 +256,7 @@ static void amdvi_log_devtab_error(AMDVIState *s, uint16_t devid,
amdvi_encode_event(evt, devid, devtab, info);
amdvi_log_event(s, evt);
- pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
+ pci_word_test_and_set_mask(s->pci->dev.config + PCI_STATUS,
PCI_STATUS_SIG_TARGET_ABORT);
}
/* log an event trying to access command buffer
@@ -267,7 +269,7 @@ static void amdvi_log_command_error(AMDVIState *s, hwaddr addr)
amdvi_encode_event(evt, 0, addr, info);
amdvi_log_event(s, evt);
- pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
+ pci_word_test_and_set_mask(s->pci->dev.config + PCI_STATUS,
PCI_STATUS_SIG_TARGET_ABORT);
}
/* log an illegal command event
@@ -308,7 +310,7 @@ static void amdvi_log_pagetab_error(AMDVIState *s, uint16_t devid,
info |= AMDVI_EVENT_PAGE_TAB_HW_ERROR;
amdvi_encode_event(evt, devid, addr, info);
amdvi_log_event(s, evt);
- pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
+ pci_word_test_and_set_mask(s->pci->dev.config + PCI_STATUS,
PCI_STATUS_SIG_TARGET_ABORT);
}
@@ -357,12 +359,12 @@ static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid,
uint64_t gpa, IOMMUTLBEntry to_cache,
uint16_t domid)
{
- AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1);
- uint64_t *key = g_new(uint64_t, 1);
- uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K;
-
/* don't cache erroneous translations */
if (to_cache.perm != IOMMU_NONE) {
+ AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1);
+ uint64_t *key = g_new(uint64_t, 1);
+ uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K;
+
trace_amdvi_cache_update(domid, PCI_BUS_NUM(devid), PCI_SLOT(devid),
PCI_FUNC(devid), gpa, to_cache.translated_addr);
@@ -430,6 +432,12 @@ static void amdvi_complete_ppr(AMDVIState *s, uint64_t *cmd)
trace_amdvi_ppr_exec();
}
+static void amdvi_intremap_inval_notify_all(AMDVIState *s, bool global,
+ uint32_t index, uint32_t mask)
+{
+ x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask);
+}
+
static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd)
{
if (extract64(cmd[0], 0, 60) || cmd[1]) {
@@ -437,6 +445,9 @@ static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd)
s->cmdbuf + s->cmdbuf_head);
}
+ /* Notify global invalidation */
+ amdvi_intremap_inval_notify_all(s, true, 0, 0);
+
amdvi_iotlb_reset(s);
trace_amdvi_all_inval();
}
@@ -485,6 +496,9 @@ static void amdvi_inval_inttable(AMDVIState *s, uint64_t *cmd)
return;
}
+ /* Notify global invalidation */
+ amdvi_intremap_inval_notify_all(s, true, 0, 0);
+
trace_amdvi_intr_inval();
}
@@ -1295,15 +1309,15 @@ static int amdvi_int_remap_msi(AMDVIState *iommu,
ret = -AMDVI_IR_ERR;
break;
case AMDVI_IOAPIC_INT_TYPE_NMI:
- pass = dte[3] & AMDVI_DEV_NMI_PASS_MASK;
+ pass = dte[2] & AMDVI_DEV_NMI_PASS_MASK;
trace_amdvi_ir_delivery_mode("nmi");
break;
case AMDVI_IOAPIC_INT_TYPE_INIT:
- pass = dte[3] & AMDVI_DEV_INT_PASS_MASK;
+ pass = dte[2] & AMDVI_DEV_INT_PASS_MASK;
trace_amdvi_ir_delivery_mode("init");
break;
case AMDVI_IOAPIC_INT_TYPE_EINT:
- pass = dte[3] & AMDVI_DEV_EINT_PASS_MASK;
+ pass = dte[2] & AMDVI_DEV_EINT_PASS_MASK;
trace_amdvi_ir_delivery_mode("eint");
break;
default:
@@ -1436,13 +1450,13 @@ static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
* Memory region relationships looks like (Address range shows
* only lower 32 bits to make it short in length...):
*
- * |-----------------+-------------------+----------|
- * | Name | Address range | Priority |
- * |-----------------+-------------------+----------+
- * | amdvi_root | 00000000-ffffffff | 0 |
- * | amdvi_iommu | 00000000-ffffffff | 1 |
- * | amdvi_iommu_ir | fee00000-feefffff | 64 |
- * |-----------------+-------------------+----------|
+ * |--------------------+-------------------+----------|
+ * | Name | Address range | Priority |
+ * |--------------------+-------------------+----------+
+ * | amdvi-root | 00000000-ffffffff | 0 |
+ * | amdvi-iommu_nodma | 00000000-ffffffff | 0 |
+ * | amdvi-iommu_ir | fee00000-feefffff | 1 |
+ * |--------------------+-------------------+----------|
*/
memory_region_init_iommu(&amdvi_dev_as->iommu,
sizeof(amdvi_dev_as->iommu),
@@ -1452,16 +1466,27 @@ static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
memory_region_init(&amdvi_dev_as->root, OBJECT(s),
"amdvi_root", UINT64_MAX);
address_space_init(&amdvi_dev_as->as, &amdvi_dev_as->root, name);
- memory_region_init_io(&amdvi_dev_as->iommu_ir, OBJECT(s),
- &amdvi_ir_ops, s, "amd_iommu_ir",
- AMDVI_INT_ADDR_SIZE);
- memory_region_add_subregion_overlap(&amdvi_dev_as->root,
- AMDVI_INT_ADDR_FIRST,
- &amdvi_dev_as->iommu_ir,
- 64);
memory_region_add_subregion_overlap(&amdvi_dev_as->root, 0,
MEMORY_REGION(&amdvi_dev_as->iommu),
- 1);
+ 0);
+
+ /* Build the DMA Disabled alias to shared memory */
+ memory_region_init_alias(&amdvi_dev_as->iommu_nodma, OBJECT(s),
+ "amdvi-sys", &s->mr_sys, 0,
+ memory_region_size(&s->mr_sys));
+ memory_region_add_subregion_overlap(&amdvi_dev_as->root, 0,
+ &amdvi_dev_as->iommu_nodma,
+ 0);
+ /* Build the Interrupt Remapping alias to shared memory */
+ memory_region_init_alias(&amdvi_dev_as->iommu_ir, OBJECT(s),
+ "amdvi-ir", &s->mr_ir, 0,
+ memory_region_size(&s->mr_ir));
+ memory_region_add_subregion_overlap(MEMORY_REGION(&amdvi_dev_as->iommu),
+ AMDVI_INT_ADDR_FIRST,
+ &amdvi_dev_as->iommu_ir, 1);
+
+ memory_region_set_enabled(&amdvi_dev_as->iommu_nodma, false);
+ memory_region_set_enabled(MEMORY_REGION(&amdvi_dev_as->iommu), true);
}
return &iommu_as[devfn]->as;
}
@@ -1560,9 +1585,9 @@ static void amdvi_pci_realize(PCIDevice *pdev, Error **errp)
/* reset AMDVI specific capabilities, all r/o */
pci_set_long(pdev->config + s->capab_offset, AMDVI_CAPAB_FEATURES);
pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_BAR_LOW,
- AMDVI_BASE_ADDR & ~(0xffff0000));
+ AMDVI_BASE_ADDR & MAKE_64BIT_MASK(14, 18));
pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_BAR_HIGH,
- (AMDVI_BASE_ADDR & ~(0xffff)) >> 16);
+ AMDVI_BASE_ADDR >> 32);
pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_RANGE,
0xff000000);
pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_MISC, 0);
@@ -1574,41 +1599,137 @@ static void amdvi_sysbus_reset(DeviceState *dev)
{
AMDVIState *s = AMD_IOMMU_DEVICE(dev);
- msi_reset(&s->pci.dev);
+ msi_reset(&s->pci->dev);
amdvi_init(s);
}
+static const VMStateDescription vmstate_amdvi_sysbus_migratable = {
+ .name = "amd-iommu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .priority = MIG_PRI_IOMMU,
+ .fields = (VMStateField[]) {
+ /* Updated in amdvi_handle_control_write() */
+ VMSTATE_BOOL(enabled, AMDVIState),
+ VMSTATE_BOOL(ga_enabled, AMDVIState),
+ VMSTATE_BOOL(ats_enabled, AMDVIState),
+ VMSTATE_BOOL(cmdbuf_enabled, AMDVIState),
+ VMSTATE_BOOL(completion_wait_intr, AMDVIState),
+ VMSTATE_BOOL(evtlog_enabled, AMDVIState),
+ VMSTATE_BOOL(evtlog_intr, AMDVIState),
+ /* Updated in amdvi_handle_devtab_write() */
+ VMSTATE_UINT64(devtab, AMDVIState),
+ VMSTATE_UINT64(devtab_len, AMDVIState),
+ /* Updated in amdvi_handle_cmdbase_write() */
+ VMSTATE_UINT64(cmdbuf, AMDVIState),
+ VMSTATE_UINT64(cmdbuf_len, AMDVIState),
+ /* Updated in amdvi_handle_cmdhead_write() */
+ VMSTATE_UINT32(cmdbuf_head, AMDVIState),
+ /* Updated in amdvi_handle_cmdtail_write() */
+ VMSTATE_UINT32(cmdbuf_tail, AMDVIState),
+ /* Updated in amdvi_handle_evtbase_write() */
+ VMSTATE_UINT64(evtlog, AMDVIState),
+ VMSTATE_UINT32(evtlog_len, AMDVIState),
+ /* Updated in amdvi_handle_evthead_write() */
+ VMSTATE_UINT32(evtlog_head, AMDVIState),
+ /* Updated in amdvi_handle_evttail_write() */
+ VMSTATE_UINT32(evtlog_tail, AMDVIState),
+ /* Updated in amdvi_handle_pprbase_write() */
+ VMSTATE_UINT64(ppr_log, AMDVIState),
+ VMSTATE_UINT32(pprlog_len, AMDVIState),
+ /* Updated in amdvi_handle_pprhead_write() */
+ VMSTATE_UINT32(pprlog_head, AMDVIState),
+ /* Updated in amdvi_handle_tailhead_write() */
+ VMSTATE_UINT32(pprlog_tail, AMDVIState),
+ /* MMIO registers */
+ VMSTATE_UINT8_ARRAY(mmior, AMDVIState, AMDVI_MMIO_SIZE),
+ VMSTATE_UINT8_ARRAY(romask, AMDVIState, AMDVI_MMIO_SIZE),
+ VMSTATE_UINT8_ARRAY(w1cmask, AMDVIState, AMDVI_MMIO_SIZE),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static void amdvi_sysbus_realize(DeviceState *dev, Error **errp)
{
+ DeviceClass *dc = (DeviceClass *) object_get_class(OBJECT(dev));
AMDVIState *s = AMD_IOMMU_DEVICE(dev);
MachineState *ms = MACHINE(qdev_get_machine());
PCMachineState *pcms = PC_MACHINE(ms);
X86MachineState *x86ms = X86_MACHINE(ms);
PCIBus *bus = pcms->pcibus;
- s->iotlb = g_hash_table_new_full(amdvi_uint64_hash,
- amdvi_uint64_equal, g_free, g_free);
+ if (s->pci_id) {
+ PCIDevice *pdev = NULL;
+ int ret = pci_qdev_find_device(s->pci_id, &pdev);
- /* This device should take care of IOMMU PCI properties */
- if (!qdev_realize(DEVICE(&s->pci), &bus->qbus, errp)) {
- return;
+ if (ret) {
+ error_report("Cannot find PCI device '%s'", s->pci_id);
+ return;
+ }
+
+ if (!object_dynamic_cast(OBJECT(pdev), TYPE_AMD_IOMMU_PCI)) {
+ error_report("Device '%s' must be an AMDVI-PCI device type", s->pci_id);
+ return;
+ }
+
+ s->pci = AMD_IOMMU_PCI(pdev);
+ dc->vmsd = &vmstate_amdvi_sysbus_migratable;
+ } else {
+ s->pci = AMD_IOMMU_PCI(object_new(TYPE_AMD_IOMMU_PCI));
+ /* This device should take care of IOMMU PCI properties */
+ if (!qdev_realize(DEVICE(s->pci), &bus->qbus, errp)) {
+ return;
+ }
}
+ s->iotlb = g_hash_table_new_full(amdvi_uint64_hash,
+ amdvi_uint64_equal, g_free, g_free);
+
/* Pseudo address space under root PCI bus. */
x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID);
/* set up MMIO */
- memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "amdvi-mmio",
- AMDVI_MMIO_SIZE);
+ memory_region_init_io(&s->mr_mmio, OBJECT(s), &mmio_mem_ops, s,
+ "amdvi-mmio", AMDVI_MMIO_SIZE);
memory_region_add_subregion(get_system_memory(), AMDVI_BASE_ADDR,
- &s->mmio);
+ &s->mr_mmio);
+
+ /* Create the share memory regions by all devices */
+ memory_region_init(&s->mr_sys, OBJECT(s), "amdvi-sys", UINT64_MAX);
+
+ /* set up the DMA disabled memory region */
+ memory_region_init_alias(&s->mr_nodma, OBJECT(s),
+ "amdvi-nodma", get_system_memory(), 0,
+ memory_region_size(get_system_memory()));
+ memory_region_add_subregion_overlap(&s->mr_sys, 0,
+ &s->mr_nodma, 0);
+
+ /* set up the Interrupt Remapping memory region */
+ memory_region_init_io(&s->mr_ir, OBJECT(s), &amdvi_ir_ops,
+ s, "amdvi-ir", AMDVI_INT_ADDR_SIZE);
+ memory_region_add_subregion_overlap(&s->mr_sys, AMDVI_INT_ADDR_FIRST,
+ &s->mr_ir, 1);
+
+ if (kvm_enabled() && x86ms->apic_id_limit > 255 && !s->xtsup) {
+ error_report("AMD IOMMU with x2APIC configuration requires xtsup=on");
+ exit(EXIT_FAILURE);
+ }
+
+ if (s->xtsup) {
+ if (kvm_irqchip_is_split() && !kvm_enable_x2apic()) {
+ error_report("AMD IOMMU xtsup=on requires x2APIC support on "
+ "the KVM side");
+ exit(EXIT_FAILURE);
+ }
+ }
+
pci_setup_iommu(bus, &amdvi_iommu_ops, s);
amdvi_init(s);
}
-static Property amdvi_properties[] = {
+static const Property amdvi_properties[] = {
DEFINE_PROP_BOOL("xtsup", AMDVIState, xtsup, false),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_STRING("pci-id", AMDVIState, pci_id),
};
static const VMStateDescription vmstate_amdvi_sysbus = {
@@ -1616,25 +1737,16 @@ static const VMStateDescription vmstate_amdvi_sysbus = {
.unmigratable = 1
};
-static void amdvi_sysbus_instance_init(Object *klass)
-{
- AMDVIState *s = AMD_IOMMU_DEVICE(klass);
-
- object_initialize(&s->pci, sizeof(s->pci), TYPE_AMD_IOMMU_PCI);
-}
-
-static void amdvi_sysbus_class_init(ObjectClass *klass, void *data)
+static void amdvi_sysbus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
X86IOMMUClass *dc_class = X86_IOMMU_DEVICE_CLASS(klass);
- dc->reset = amdvi_sysbus_reset;
+ device_class_set_legacy_reset(dc, amdvi_sysbus_reset);
dc->vmsd = &vmstate_amdvi_sysbus;
dc->hotpluggable = false;
dc_class->realize = amdvi_sysbus_realize;
dc_class->int_remap = amdvi_int_remap;
- /* Supported by the pc-q35-* machine types */
- dc->user_creatable = true;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device";
device_class_set_props(dc, amdvi_properties);
@@ -1644,16 +1756,16 @@ static const TypeInfo amdvi_sysbus = {
.name = TYPE_AMD_IOMMU_DEVICE,
.parent = TYPE_X86_IOMMU_DEVICE,
.instance_size = sizeof(AMDVIState),
- .instance_init = amdvi_sysbus_instance_init,
.class_init = amdvi_sysbus_class_init
};
-static void amdvi_pci_class_init(ObjectClass *klass, void *data)
+static void amdvi_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->vendor_id = PCI_VENDOR_ID_AMD;
+ k->device_id = 0x1419;
k->class_id = 0x0806;
k->realize = amdvi_pci_realize;
@@ -1666,13 +1778,14 @@ static const TypeInfo amdvi_pci = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(AMDVIPCIState),
.class_init = amdvi_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-static void amdvi_iommu_memory_region_class_init(ObjectClass *klass, void *data)
+static void amdvi_iommu_memory_region_class_init(ObjectClass *klass,
+ const void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
index 73619fe..5672bde 100644
--- a/hw/i386/amd_iommu.h
+++ b/hw/i386/amd_iommu.h
@@ -187,7 +187,7 @@
AMDVI_CAPAB_FLAG_HTTUNNEL | AMDVI_CAPAB_EFR_SUP)
/* AMDVI default address */
-#define AMDVI_BASE_ADDR 0xfed80000
+#define AMDVI_BASE_ADDR 0xfed80000ULL
/* page management constants */
#define AMDVI_PAGE_SHIFT 12
@@ -315,7 +315,8 @@ struct AMDVIPCIState {
struct AMDVIState {
X86IOMMUState iommu; /* IOMMU bus device */
- AMDVIPCIState pci; /* IOMMU PCI device */
+ AMDVIPCIState *pci; /* IOMMU PCI device */
+ char *pci_id; /* ID of AMDVI-PCI device, if user created */
uint32_t version;
@@ -328,7 +329,7 @@ struct AMDVIState {
bool excl_enabled;
hwaddr devtab; /* base address device table */
- size_t devtab_len; /* device table length */
+ uint64_t devtab_len; /* device table length */
hwaddr cmdbuf; /* command buffer base address */
uint64_t cmdbuf_len; /* command buffer length */
@@ -353,7 +354,10 @@ struct AMDVIState {
uint32_t pprlog_head; /* ppr log head */
uint32_t pprlog_tail; /* ppr log tail */
- MemoryRegion mmio; /* MMIO region */
+ MemoryRegion mr_mmio; /* MMIO region */
+ MemoryRegion mr_sys;
+ MemoryRegion mr_nodma;
+ MemoryRegion mr_ir;
uint8_t mmior[AMDVI_MMIO_SIZE]; /* read/write MMIO */
uint8_t w1cmask[AMDVI_MMIO_SIZE]; /* read/write 1 clear mask */
uint8_t romask[AMDVI_MMIO_SIZE]; /* MMIO read/only mask */
diff --git a/hw/i386/fw_cfg.c b/hw/i386/fw_cfg.c
index 0e44946..5c0bcd5 100644
--- a/hw/i386/fw_cfg.c
+++ b/hw/i386/fw_cfg.c
@@ -13,7 +13,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/numa.h"
+#include "system/numa.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/aml-build.h"
#include "hw/firmware/smbios.h"
@@ -26,7 +26,9 @@
#include CONFIG_DEVICES
#include "target/i386/cpu.h"
-struct hpet_fw_config hpet_cfg = {.count = UINT8_MAX};
+#if !defined(CONFIG_HPET)
+struct hpet_fw_config hpet_fw_cfg = {.count = UINT8_MAX};
+#endif
const char *fw_cfg_arch_key_name(uint16_t key)
{
@@ -143,13 +145,13 @@ FWCfgState *fw_cfg_arch_create(MachineState *ms,
*/
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, apic_id_limit);
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, ms->ram_size);
-#ifdef CONFIG_ACPI
- fw_cfg_add_bytes(fw_cfg, FW_CFG_ACPI_TABLES,
- acpi_tables, acpi_tables_len);
-#endif
+ if (acpi_builtin()) {
+ fw_cfg_add_bytes(fw_cfg, FW_CFG_ACPI_TABLES,
+ acpi_tables, acpi_tables_len);
+ }
fw_cfg_add_i32(fw_cfg, FW_CFG_IRQ0_OVERRIDE, 1);
- fw_cfg_add_bytes(fw_cfg, FW_CFG_HPET, &hpet_cfg, sizeof(hpet_cfg));
+ fw_cfg_add_bytes(fw_cfg, FW_CFG_HPET, &hpet_fw_cfg, sizeof(hpet_fw_cfg));
/* allocate memory for the NUMA channel: one (64bit) word for the number
* of nodes, one word for each VCPU->node and one word for each node to
* hold the amount of memory.
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 37c21a0a..69d72ad 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -32,9 +32,9 @@
#include "hw/i386/apic-msidef.h"
#include "hw/i386/x86-iommu.h"
#include "hw/pci-host/q35.h"
-#include "sysemu/kvm.h"
-#include "sysemu/dma.h"
-#include "sysemu/sysemu.h"
+#include "system/kvm.h"
+#include "system/dma.h"
+#include "system/system.h"
#include "hw/i386/apic_internal.h"
#include "kvm/kvm_i386.h"
#include "migration/vmstate.h"
@@ -48,7 +48,10 @@
/* pe operations */
#define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT)
-#define VTD_PE_GET_LEVEL(pe) (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW))
+#define VTD_PE_GET_FL_LEVEL(pe) \
+ (4 + (((pe)->val[2] >> 2) & VTD_SM_PASID_ENTRY_FLPM))
+#define VTD_PE_GET_SL_LEVEL(pe) \
+ (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW))
/*
* PCI bus number (or SID) is not reliable since the device is usaully
@@ -67,6 +70,11 @@ struct vtd_hiod_key {
uint8_t devfn;
};
+struct vtd_as_raw_key {
+ uint16_t sid;
+ uint32_t pasid;
+};
+
struct vtd_iotlb_key {
uint64_t gfn;
uint32_t pasid;
@@ -284,15 +292,15 @@ static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value,
}
/* The shift of an addr for a certain level of paging structure */
-static inline uint32_t vtd_slpt_level_shift(uint32_t level)
+static inline uint32_t vtd_pt_level_shift(uint32_t level)
{
assert(level != 0);
- return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS;
+ return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_LEVEL_BITS;
}
-static inline uint64_t vtd_slpt_level_page_mask(uint32_t level)
+static inline uint64_t vtd_pt_level_page_mask(uint32_t level)
{
- return ~((1ULL << vtd_slpt_level_shift(level)) - 1);
+ return ~((1ULL << vtd_pt_level_shift(level)) - 1);
}
static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value,
@@ -302,9 +310,43 @@ static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value,
VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data;
uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask;
uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K;
- return (entry->domain_id == info->domain_id) &&
- (((entry->gfn & info->mask) == gfn) ||
- (entry->gfn == gfn_tlb));
+
+ if (entry->domain_id != info->domain_id) {
+ return false;
+ }
+
+ /*
+ * According to spec, IOTLB entries caching first-stage (PGTT=001b) or
+ * nested (PGTT=011b) mapping associated with specified domain-id are
+ * invalidated. Nested isn't supported yet, so only need to check 001b.
+ */
+ if (entry->pgtt == VTD_SM_PASID_ENTRY_FLT) {
+ return true;
+ }
+
+ return (entry->gfn & info->mask) == gfn || entry->gfn == gfn_tlb;
+}
+
+static gboolean vtd_hash_remove_by_page_piotlb(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
+ VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data;
+ uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask;
+ uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K;
+
+ /*
+ * According to spec, PASID-based-IOTLB Invalidation in page granularity
+ * doesn't invalidate IOTLB entries caching second-stage (PGTT=010b)
+ * or pass-through (PGTT=100b) mappings. Nested isn't supported yet,
+ * so only need to check first-stage (PGTT=001b) mappings.
+ */
+ if (entry->pgtt != VTD_SM_PASID_ENTRY_FLT) {
+ return false;
+ }
+
+ return entry->domain_id == info->domain_id && entry->pasid == info->pasid &&
+ ((entry->gfn & info->mask) == gfn || entry->gfn == gfn_tlb);
}
/* Reset all the gen of VTDAddressSpace to zero and set the gen of
@@ -349,7 +391,7 @@ static void vtd_reset_caches(IntelIOMMUState *s)
static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level)
{
- return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K;
+ return (addr & vtd_pt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K;
}
/* Must be called with IOMMU lock held */
@@ -358,9 +400,9 @@ static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id,
{
struct vtd_iotlb_key key;
VTDIOTLBEntry *entry;
- int level;
+ unsigned level;
- for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) {
+ for (level = VTD_PT_LEVEL; level < VTD_PML4_LEVEL; level++) {
key.gfn = vtd_get_iotlb_gfn(addr, level);
key.level = level;
key.sid = source_id;
@@ -377,15 +419,15 @@ out:
/* Must be with IOMMU lock held */
static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
- uint16_t domain_id, hwaddr addr, uint64_t slpte,
+ uint16_t domain_id, hwaddr addr, uint64_t pte,
uint8_t access_flags, uint32_t level,
- uint32_t pasid)
+ uint32_t pasid, uint8_t pgtt)
{
VTDIOTLBEntry *entry = g_malloc(sizeof(*entry));
struct vtd_iotlb_key *key = g_malloc(sizeof(*key));
uint64_t gfn = vtd_get_iotlb_gfn(addr, level);
- trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id);
+ trace_vtd_iotlb_page_update(source_id, addr, pte, domain_id);
if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) {
trace_vtd_iotlb_reset("iotlb exceeds size limit");
vtd_reset_iotlb_locked(s);
@@ -393,10 +435,11 @@ static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
entry->gfn = gfn;
entry->domain_id = domain_id;
- entry->slpte = slpte;
+ entry->pte = pte;
entry->access_flags = access_flags;
- entry->mask = vtd_slpt_level_page_mask(level);
+ entry->mask = vtd_pt_level_page_mask(level);
entry->pasid = pasid;
+ entry->pgtt = pgtt;
key->gfn = gfn;
key->sid = source_id;
@@ -710,32 +753,32 @@ static inline dma_addr_t vtd_ce_get_slpt_base(VTDContextEntry *ce)
return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR;
}
-static inline uint64_t vtd_get_slpte_addr(uint64_t slpte, uint8_t aw)
+static inline uint64_t vtd_get_pte_addr(uint64_t pte, uint8_t aw)
{
- return slpte & VTD_SL_PT_BASE_ADDR_MASK(aw);
+ return pte & VTD_PT_BASE_ADDR_MASK(aw);
}
/* Whether the pte indicates the address of the page frame */
-static inline bool vtd_is_last_slpte(uint64_t slpte, uint32_t level)
+static inline bool vtd_is_last_pte(uint64_t pte, uint32_t level)
{
- return level == VTD_SL_PT_LEVEL || (slpte & VTD_SL_PT_PAGE_SIZE_MASK);
+ return level == VTD_PT_LEVEL || (pte & VTD_PT_PAGE_SIZE_MASK);
}
-/* Get the content of a spte located in @base_addr[@index] */
-static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index)
+/* Get the content of a pte located in @base_addr[@index] */
+static uint64_t vtd_get_pte(dma_addr_t base_addr, uint32_t index)
{
- uint64_t slpte;
+ uint64_t pte;
- assert(index < VTD_SL_PT_ENTRY_NR);
+ assert(index < VTD_PT_ENTRY_NR);
if (dma_memory_read(&address_space_memory,
- base_addr + index * sizeof(slpte),
- &slpte, sizeof(slpte), MEMTXATTRS_UNSPECIFIED)) {
- slpte = (uint64_t)-1;
- return slpte;
+ base_addr + index * sizeof(pte),
+ &pte, sizeof(pte), MEMTXATTRS_UNSPECIFIED)) {
+ pte = (uint64_t)-1;
+ return pte;
}
- slpte = le64_to_cpu(slpte);
- return slpte;
+ pte = le64_to_cpu(pte);
+ return pte;
}
/* Given an iova and the level of paging structure, return the offset
@@ -743,36 +786,39 @@ static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index)
*/
static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level)
{
- return (iova >> vtd_slpt_level_shift(level)) &
- ((1ULL << VTD_SL_LEVEL_BITS) - 1);
+ return (iova >> vtd_pt_level_shift(level)) &
+ ((1ULL << VTD_LEVEL_BITS) - 1);
}
/* Check Capability Register to see if the @level of page-table is supported */
-static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level)
+static inline bool vtd_is_sl_level_supported(IntelIOMMUState *s, uint32_t level)
{
return VTD_CAP_SAGAW_MASK & s->cap &
(1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT));
}
+static inline bool vtd_is_fl_level_supported(IntelIOMMUState *s, uint32_t level)
+{
+ return level == VTD_PML4_LEVEL;
+}
+
/* Return true if check passed, otherwise false */
-static inline bool vtd_pe_type_check(X86IOMMUState *x86_iommu,
- VTDPASIDEntry *pe)
+static inline bool vtd_pe_type_check(IntelIOMMUState *s, VTDPASIDEntry *pe)
{
switch (VTD_PE_GET_TYPE(pe)) {
case VTD_SM_PASID_ENTRY_FLT:
+ return !!(s->ecap & VTD_ECAP_FLTS);
case VTD_SM_PASID_ENTRY_SLT:
+ return !!(s->ecap & VTD_ECAP_SLTS);
case VTD_SM_PASID_ENTRY_NESTED:
- break;
+ /* Not support NESTED page table type yet */
+ return false;
case VTD_SM_PASID_ENTRY_PT:
- if (!x86_iommu->pt_supported) {
- return false;
- }
- break;
+ return !!(s->ecap & VTD_ECAP_PT);
default:
/* Unknown type */
return false;
}
- return true;
}
static inline bool vtd_pdire_present(VTDPASIDDirEntry *pdire)
@@ -796,7 +842,7 @@ static int vtd_get_pdire_from_pdir_table(dma_addr_t pasid_dir_base,
addr = pasid_dir_base + index * entry_size;
if (dma_memory_read(&address_space_memory, addr,
pdire, entry_size, MEMTXATTRS_UNSPECIFIED)) {
- return -VTD_FR_PASID_TABLE_INV;
+ return -VTD_FR_PASID_DIR_ACCESS_ERR;
}
pdire->val = le64_to_cpu(pdire->val);
@@ -814,28 +860,35 @@ static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState *s,
dma_addr_t addr,
VTDPASIDEntry *pe)
{
+ uint8_t pgtt;
uint32_t index;
dma_addr_t entry_size;
- X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
index = VTD_PASID_TABLE_INDEX(pasid);
entry_size = VTD_PASID_ENTRY_SIZE;
addr = addr + index * entry_size;
if (dma_memory_read(&address_space_memory, addr,
pe, entry_size, MEMTXATTRS_UNSPECIFIED)) {
- return -VTD_FR_PASID_TABLE_INV;
+ return -VTD_FR_PASID_TABLE_ACCESS_ERR;
}
for (size_t i = 0; i < ARRAY_SIZE(pe->val); i++) {
pe->val[i] = le64_to_cpu(pe->val[i]);
}
/* Do translation type check */
- if (!vtd_pe_type_check(x86_iommu, pe)) {
- return -VTD_FR_PASID_TABLE_INV;
+ if (!vtd_pe_type_check(s, pe)) {
+ return -VTD_FR_PASID_TABLE_ENTRY_INV;
}
- if (!vtd_is_level_supported(s, VTD_PE_GET_LEVEL(pe))) {
- return -VTD_FR_PASID_TABLE_INV;
+ pgtt = VTD_PE_GET_TYPE(pe);
+ if (pgtt == VTD_SM_PASID_ENTRY_SLT &&
+ !vtd_is_sl_level_supported(s, VTD_PE_GET_SL_LEVEL(pe))) {
+ return -VTD_FR_PASID_TABLE_ENTRY_INV;
+ }
+
+ if (pgtt == VTD_SM_PASID_ENTRY_FLT &&
+ !vtd_is_fl_level_supported(s, VTD_PE_GET_FL_LEVEL(pe))) {
+ return -VTD_FR_PASID_TABLE_ENTRY_INV;
}
return 0;
@@ -876,7 +929,7 @@ static int vtd_get_pe_from_pasid_table(IntelIOMMUState *s,
}
if (!vtd_pdire_present(&pdire)) {
- return -VTD_FR_PASID_TABLE_INV;
+ return -VTD_FR_PASID_DIR_ENTRY_P;
}
ret = vtd_get_pe_from_pdire(s, pasid, &pdire, pe);
@@ -885,7 +938,7 @@ static int vtd_get_pe_from_pasid_table(IntelIOMMUState *s,
}
if (!vtd_pe_present(pe)) {
- return -VTD_FR_PASID_TABLE_INV;
+ return -VTD_FR_PASID_ENTRY_P;
}
return 0;
@@ -938,7 +991,7 @@ static int vtd_ce_get_pasid_fpd(IntelIOMMUState *s,
}
if (!vtd_pdire_present(&pdire)) {
- return -VTD_FR_PASID_TABLE_INV;
+ return -VTD_FR_PASID_DIR_ENTRY_P;
}
/*
@@ -973,7 +1026,11 @@ static uint32_t vtd_get_iova_level(IntelIOMMUState *s,
if (s->root_scalable) {
vtd_ce_get_rid2pasid_entry(s, ce, &pe, pasid);
- return VTD_PE_GET_LEVEL(&pe);
+ if (s->flts) {
+ return VTD_PE_GET_FL_LEVEL(&pe);
+ } else {
+ return VTD_PE_GET_SL_LEVEL(&pe);
+ }
}
return vtd_ce_get_level(ce);
@@ -1041,9 +1098,9 @@ static inline uint64_t vtd_iova_limit(IntelIOMMUState *s,
}
/* Return true if IOVA passes range check, otherwise false. */
-static inline bool vtd_iova_range_check(IntelIOMMUState *s,
- uint64_t iova, VTDContextEntry *ce,
- uint8_t aw, uint32_t pasid)
+static inline bool vtd_iova_sl_range_check(IntelIOMMUState *s,
+ uint64_t iova, VTDContextEntry *ce,
+ uint8_t aw, uint32_t pasid)
{
/*
* Check if @iova is above 2^X-1, where X is the minimum of MGAW
@@ -1060,7 +1117,11 @@ static dma_addr_t vtd_get_iova_pgtbl_base(IntelIOMMUState *s,
if (s->root_scalable) {
vtd_ce_get_rid2pasid_entry(s, ce, &pe, pasid);
- return pe.val[0] & VTD_SM_PASID_ENTRY_SLPTPTR;
+ if (s->flts) {
+ return pe.val[2] & VTD_SM_PASID_ENTRY_FLPTPTR;
+ } else {
+ return pe.val[0] & VTD_SM_PASID_ENTRY_SLPTPTR;
+ }
}
return vtd_ce_get_slpt_base(ce);
@@ -1084,17 +1145,17 @@ static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level)
/*
* We should have caught a guest-mis-programmed level earlier,
- * via vtd_is_level_supported.
+ * via vtd_is_sl_level_supported.
*/
assert(level < VTD_SPTE_RSVD_LEN);
/*
- * Zero level doesn't exist. The smallest level is VTD_SL_PT_LEVEL=1 and
- * checked by vtd_is_last_slpte().
+ * Zero level doesn't exist. The smallest level is VTD_PT_LEVEL=1 and
+ * checked by vtd_is_last_pte().
*/
assert(level);
- if ((level == VTD_SL_PD_LEVEL || level == VTD_SL_PDP_LEVEL) &&
- (slpte & VTD_SL_PT_PAGE_SIZE_MASK)) {
+ if ((level == VTD_PD_LEVEL || level == VTD_PDP_LEVEL) &&
+ (slpte & VTD_PT_PAGE_SIZE_MASK)) {
/* large page */
rsvd_mask = vtd_spte_rsvd_large[level];
} else {
@@ -1118,9 +1179,8 @@ static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce,
uint32_t offset;
uint64_t slpte;
uint64_t access_right_check;
- uint64_t xlat, size;
- if (!vtd_iova_range_check(s, iova, ce, aw_bits, pasid)) {
+ if (!vtd_iova_sl_range_check(s, iova, ce, aw_bits, pasid)) {
error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64 ","
"pasid=0x%" PRIx32 ")", __func__, iova, pasid);
return -VTD_FR_ADDR_BEYOND_MGAW;
@@ -1131,7 +1191,7 @@ static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce,
while (true) {
offset = vtd_iova_level_offset(iova, level);
- slpte = vtd_get_slpte(addr, offset);
+ slpte = vtd_get_pte(addr, offset);
if (slpte == (uint64_t)-1) {
error_report_once("%s: detected read error on DMAR slpte "
@@ -1162,37 +1222,16 @@ static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce,
return -VTD_FR_PAGING_ENTRY_RSVD;
}
- if (vtd_is_last_slpte(slpte, level)) {
+ if (vtd_is_last_pte(slpte, level)) {
*slptep = slpte;
*slpte_level = level;
break;
}
- addr = vtd_get_slpte_addr(slpte, aw_bits);
+ addr = vtd_get_pte_addr(slpte, aw_bits);
level--;
}
- xlat = vtd_get_slpte_addr(*slptep, aw_bits);
- size = ~vtd_slpt_level_page_mask(level) + 1;
-
- /*
- * From VT-d spec 3.14: Untranslated requests and translation
- * requests that result in an address in the interrupt range will be
- * blocked with condition code LGN.4 or SGN.8.
- */
- if ((xlat > VTD_INTERRUPT_ADDR_LAST ||
- xlat + size - 1 < VTD_INTERRUPT_ADDR_FIRST)) {
- return 0;
- } else {
- error_report_once("%s: xlat address is in interrupt range "
- "(iova=0x%" PRIx64 ", level=0x%" PRIx32 ", "
- "slpte=0x%" PRIx64 ", write=%d, "
- "xlat=0x%" PRIx64 ", size=0x%" PRIx64 ", "
- "pasid=0x%" PRIx32 ")",
- __func__, iova, level, slpte, is_write,
- xlat, size, pasid);
- return s->scalable_mode ? -VTD_FR_SM_INTERRUPT_ADDR :
- -VTD_FR_INTERRUPT_ADDR;
- }
+ return 0;
}
typedef int (*vtd_page_walk_hook)(const IOMMUTLBEvent *event, void *private);
@@ -1323,14 +1362,14 @@ static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
trace_vtd_page_walk_level(addr, level, start, end);
- subpage_size = 1ULL << vtd_slpt_level_shift(level);
- subpage_mask = vtd_slpt_level_page_mask(level);
+ subpage_size = 1ULL << vtd_pt_level_shift(level);
+ subpage_mask = vtd_pt_level_page_mask(level);
while (iova < end) {
iova_next = (iova & subpage_mask) + subpage_size;
offset = vtd_iova_level_offset(iova, level);
- slpte = vtd_get_slpte(addr, offset);
+ slpte = vtd_get_pte(addr, offset);
if (slpte == (uint64_t)-1) {
trace_vtd_page_walk_skip_read(iova, iova_next);
@@ -1353,12 +1392,12 @@ static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
*/
entry_valid = read_cur | write_cur;
- if (!vtd_is_last_slpte(slpte, level) && entry_valid) {
+ if (!vtd_is_last_pte(slpte, level) && entry_valid) {
/*
* This is a valid PDE (or even bigger than PDE). We need
* to walk one further level.
*/
- ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte, info->aw),
+ ret = vtd_page_walk_level(vtd_get_pte_addr(slpte, info->aw),
iova, MIN(iova_next, end), level - 1,
read_cur, write_cur, info);
} else {
@@ -1375,7 +1414,7 @@ static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
event.entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur);
event.entry.addr_mask = ~subpage_mask;
/* NOTE: this is only meaningful if entry_valid == true */
- event.entry.translated_addr = vtd_get_slpte_addr(slpte, info->aw);
+ event.entry.translated_addr = vtd_get_pte_addr(slpte, info->aw);
event.type = event.entry.perm ? IOMMU_NOTIFIER_MAP :
IOMMU_NOTIFIER_UNMAP;
ret = vtd_page_walk_one(&event, info);
@@ -1409,11 +1448,11 @@ static int vtd_page_walk(IntelIOMMUState *s, VTDContextEntry *ce,
dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce, pasid);
uint32_t level = vtd_get_iova_level(s, ce, pasid);
- if (!vtd_iova_range_check(s, start, ce, info->aw, pasid)) {
+ if (!vtd_iova_sl_range_check(s, start, ce, info->aw, pasid)) {
return -VTD_FR_ADDR_BEYOND_MGAW;
}
- if (!vtd_iova_range_check(s, end, ce, info->aw, pasid)) {
+ if (!vtd_iova_sl_range_check(s, end, ce, info->aw, pasid)) {
/* Fix end so that it reaches the maximum */
end = vtd_iova_limit(s, ce, info->aw, pasid);
}
@@ -1528,7 +1567,7 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
/* Check if the programming of context-entry is valid */
if (!s->root_scalable &&
- !vtd_is_level_supported(s, vtd_ce_get_level(ce))) {
+ !vtd_is_sl_level_supported(s, vtd_ce_get_level(ce))) {
error_report_once("%s: invalid context entry: hi=%"PRIx64
", lo=%"PRIx64" (level %d not supported)",
__func__, ce->hi, ce->lo,
@@ -1689,8 +1728,6 @@ static bool vtd_as_pt_enabled(VTDAddressSpace *as)
static bool vtd_switch_address_space(VTDAddressSpace *as)
{
bool use_iommu, pt;
- /* Whether we need to take the BQL on our own */
- bool take_bql = !bql_locked();
assert(as);
@@ -1707,9 +1744,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
* from vtd_pt_enable_fast_path(). However the memory APIs need
* it. We'd better make sure we have had it already, or, take it.
*/
- if (take_bql) {
- bql_lock();
- }
+ BQL_LOCK_GUARD();
/* Turn off first then on the other */
if (use_iommu) {
@@ -1762,10 +1797,6 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
memory_region_set_enabled(&as->iommu_ir_fault, false);
}
- if (take_bql) {
- bql_unlock();
- }
-
return use_iommu;
}
@@ -1795,8 +1826,20 @@ static const bool vtd_qualified_faults[] = {
[VTD_FR_ROOT_ENTRY_RSVD] = false,
[VTD_FR_PAGING_ENTRY_RSVD] = true,
[VTD_FR_CONTEXT_ENTRY_TT] = true,
- [VTD_FR_PASID_TABLE_INV] = false,
+ [VTD_FR_PASID_DIR_ACCESS_ERR] = false,
+ [VTD_FR_PASID_DIR_ENTRY_P] = true,
+ [VTD_FR_PASID_TABLE_ACCESS_ERR] = false,
+ [VTD_FR_PASID_ENTRY_P] = true,
+ [VTD_FR_PASID_TABLE_ENTRY_INV] = true,
+ [VTD_FR_FS_PAGING_ENTRY_INV] = true,
+ [VTD_FR_FS_PAGING_ENTRY_P] = true,
+ [VTD_FR_FS_PAGING_ENTRY_RSVD] = true,
+ [VTD_FR_PASID_ENTRY_FSPTPTR_INV] = true,
+ [VTD_FR_FS_NON_CANONICAL] = true,
+ [VTD_FR_FS_PAGING_ENTRY_US] = true,
+ [VTD_FR_SM_WRITE] = true,
[VTD_FR_SM_INTERRUPT_ADDR] = true,
+ [VTD_FR_FS_BIT_UPDATE_FAILED] = true,
[VTD_FR_MAX] = false,
};
@@ -1814,29 +1857,32 @@ static inline bool vtd_is_interrupt_addr(hwaddr addr)
return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST;
}
-static gboolean vtd_find_as_by_sid(gpointer key, gpointer value,
- gpointer user_data)
+static gboolean vtd_find_as_by_sid_and_pasid(gpointer key, gpointer value,
+ gpointer user_data)
{
struct vtd_as_key *as_key = (struct vtd_as_key *)key;
- uint16_t target_sid = *(uint16_t *)user_data;
+ struct vtd_as_raw_key *target = (struct vtd_as_raw_key *)user_data;
uint16_t sid = PCI_BUILD_BDF(pci_bus_num(as_key->bus), as_key->devfn);
- return sid == target_sid;
+
+ return (as_key->pasid == target->pasid) && (sid == target->sid);
}
-static VTDAddressSpace *vtd_get_as_by_sid(IntelIOMMUState *s, uint16_t sid)
+static VTDAddressSpace *vtd_get_as_by_sid_and_pasid(IntelIOMMUState *s,
+ uint16_t sid,
+ uint32_t pasid)
{
- uint8_t bus_num = PCI_BUS_NUM(sid);
- VTDAddressSpace *vtd_as = s->vtd_as_cache[bus_num];
-
- if (vtd_as &&
- (sid == PCI_BUILD_BDF(pci_bus_num(vtd_as->bus), vtd_as->devfn))) {
- return vtd_as;
- }
+ struct vtd_as_raw_key key = {
+ .sid = sid,
+ .pasid = pasid
+ };
- vtd_as = g_hash_table_find(s->vtd_address_spaces, vtd_find_as_by_sid, &sid);
- s->vtd_as_cache[bus_num] = vtd_as;
+ return g_hash_table_find(s->vtd_address_spaces,
+ vtd_find_as_by_sid_and_pasid, &key);
+}
- return vtd_as;
+static VTDAddressSpace *vtd_get_as_by_sid(IntelIOMMUState *s, uint16_t sid)
+{
+ return vtd_get_as_by_sid_and_pasid(s, sid, PCI_NO_PASID);
}
static void vtd_pt_enable_fast_path(IntelIOMMUState *s, uint16_t source_id)
@@ -1858,6 +1904,157 @@ out:
trace_vtd_pt_enable_fast_path(source_id, success);
}
+/*
+ * Rsvd field masks for fpte:
+ * vtd_fpte_rsvd 4k pages
+ * vtd_fpte_rsvd_large large pages
+ *
+ * We support only 4-level page tables.
+ */
+#define VTD_FPTE_RSVD_LEN 5
+static uint64_t vtd_fpte_rsvd[VTD_FPTE_RSVD_LEN];
+static uint64_t vtd_fpte_rsvd_large[VTD_FPTE_RSVD_LEN];
+
+static bool vtd_flpte_nonzero_rsvd(uint64_t flpte, uint32_t level)
+{
+ uint64_t rsvd_mask;
+
+ /*
+ * We should have caught a guest-mis-programmed level earlier,
+ * via vtd_is_fl_level_supported.
+ */
+ assert(level < VTD_FPTE_RSVD_LEN);
+ /*
+ * Zero level doesn't exist. The smallest level is VTD_PT_LEVEL=1 and
+ * checked by vtd_is_last_pte().
+ */
+ assert(level);
+
+ if ((level == VTD_PD_LEVEL || level == VTD_PDP_LEVEL) &&
+ (flpte & VTD_PT_PAGE_SIZE_MASK)) {
+ /* large page */
+ rsvd_mask = vtd_fpte_rsvd_large[level];
+ } else {
+ rsvd_mask = vtd_fpte_rsvd[level];
+ }
+
+ return flpte & rsvd_mask;
+}
+
+static inline bool vtd_flpte_present(uint64_t flpte)
+{
+ return !!(flpte & VTD_FL_P);
+}
+
+/* Return true if IOVA is canonical, otherwise false. */
+static bool vtd_iova_fl_check_canonical(IntelIOMMUState *s, uint64_t iova,
+ VTDContextEntry *ce, uint32_t pasid)
+{
+ uint64_t iova_limit = vtd_iova_limit(s, ce, s->aw_bits, pasid);
+ uint64_t upper_bits_mask = ~(iova_limit - 1);
+ uint64_t upper_bits = iova & upper_bits_mask;
+ bool msb = ((iova & (iova_limit >> 1)) != 0);
+
+ if (msb) {
+ return upper_bits == upper_bits_mask;
+ } else {
+ return !upper_bits;
+ }
+}
+
+static MemTxResult vtd_set_flag_in_pte(dma_addr_t base_addr, uint32_t index,
+ uint64_t pte, uint64_t flag)
+{
+ if (pte & flag) {
+ return MEMTX_OK;
+ }
+ pte |= flag;
+ pte = cpu_to_le64(pte);
+ return dma_memory_write(&address_space_memory,
+ base_addr + index * sizeof(pte),
+ &pte, sizeof(pte),
+ MEMTXATTRS_UNSPECIFIED);
+}
+
+/*
+ * Given the @iova, get relevant @flptep. @flpte_level will be the last level
+ * of the translation, can be used for deciding the size of large page.
+ */
+static int vtd_iova_to_flpte(IntelIOMMUState *s, VTDContextEntry *ce,
+ uint64_t iova, bool is_write,
+ uint64_t *flptep, uint32_t *flpte_level,
+ bool *reads, bool *writes, uint8_t aw_bits,
+ uint32_t pasid)
+{
+ dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce, pasid);
+ uint32_t level = vtd_get_iova_level(s, ce, pasid);
+ uint32_t offset;
+ uint64_t flpte, flag_ad = VTD_FL_A;
+
+ if (!vtd_iova_fl_check_canonical(s, iova, ce, pasid)) {
+ error_report_once("%s: detected non canonical IOVA (iova=0x%" PRIx64 ","
+ "pasid=0x%" PRIx32 ")", __func__, iova, pasid);
+ return -VTD_FR_FS_NON_CANONICAL;
+ }
+
+ while (true) {
+ offset = vtd_iova_level_offset(iova, level);
+ flpte = vtd_get_pte(addr, offset);
+
+ if (flpte == (uint64_t)-1) {
+ if (level == vtd_get_iova_level(s, ce, pasid)) {
+ /* Invalid programming of pasid-entry */
+ return -VTD_FR_PASID_ENTRY_FSPTPTR_INV;
+ } else {
+ return -VTD_FR_FS_PAGING_ENTRY_INV;
+ }
+ }
+
+ if (!vtd_flpte_present(flpte)) {
+ *reads = false;
+ *writes = false;
+ return -VTD_FR_FS_PAGING_ENTRY_P;
+ }
+
+ /* No emulated device supports supervisor privilege request yet */
+ if (!(flpte & VTD_FL_US)) {
+ *reads = false;
+ *writes = false;
+ return -VTD_FR_FS_PAGING_ENTRY_US;
+ }
+
+ *reads = true;
+ *writes = (*writes) && (flpte & VTD_FL_RW);
+ if (is_write && !(flpte & VTD_FL_RW)) {
+ return -VTD_FR_SM_WRITE;
+ }
+ if (vtd_flpte_nonzero_rsvd(flpte, level)) {
+ error_report_once("%s: detected flpte reserved non-zero "
+ "iova=0x%" PRIx64 ", level=0x%" PRIx32
+ "flpte=0x%" PRIx64 ", pasid=0x%" PRIX32 ")",
+ __func__, iova, level, flpte, pasid);
+ return -VTD_FR_FS_PAGING_ENTRY_RSVD;
+ }
+
+ if (vtd_is_last_pte(flpte, level) && is_write) {
+ flag_ad |= VTD_FL_D;
+ }
+
+ if (vtd_set_flag_in_pte(addr, offset, flpte, flag_ad) != MEMTX_OK) {
+ return -VTD_FR_FS_BIT_UPDATE_FAILED;
+ }
+
+ if (vtd_is_last_pte(flpte, level)) {
+ *flptep = flpte;
+ *flpte_level = level;
+ return 0;
+ }
+
+ addr = vtd_get_pte_addr(flpte, aw_bits);
+ level--;
+ }
+}
+
static void vtd_report_fault(IntelIOMMUState *s,
int err, bool is_fpd_set,
uint16_t source_id,
@@ -1894,16 +2091,17 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
VTDContextEntry ce;
uint8_t bus_num = pci_bus_num(bus);
VTDContextCacheEntry *cc_entry;
- uint64_t slpte, page_mask;
+ uint64_t pte, page_mask;
uint32_t level, pasid = vtd_as->pasid;
uint16_t source_id = PCI_BUILD_BDF(bus_num, devfn);
int ret_fr;
bool is_fpd_set = false;
bool reads = true;
bool writes = true;
- uint8_t access_flags;
+ uint8_t access_flags, pgtt;
bool rid2pasid = (pasid == PCI_NO_PASID) && s->root_scalable;
VTDIOTLBEntry *iotlb_entry;
+ uint64_t xlat, size;
/*
* We have standalone memory region for interrupt addresses, we
@@ -1915,13 +2113,13 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
cc_entry = &vtd_as->context_cache_entry;
- /* Try to fetch slpte form IOTLB, we don't need RID2PASID logic */
+ /* Try to fetch pte from IOTLB, we don't need RID2PASID logic */
if (!rid2pasid) {
iotlb_entry = vtd_lookup_iotlb(s, source_id, pasid, addr);
if (iotlb_entry) {
- trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte,
+ trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->pte,
iotlb_entry->domain_id);
- slpte = iotlb_entry->slpte;
+ pte = iotlb_entry->pte;
access_flags = iotlb_entry->access_flags;
page_mask = iotlb_entry->mask;
goto out;
@@ -1993,35 +2191,65 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
return true;
}
- /* Try to fetch slpte form IOTLB for RID2PASID slow path */
+ /* Try to fetch pte from IOTLB for RID2PASID slow path */
if (rid2pasid) {
iotlb_entry = vtd_lookup_iotlb(s, source_id, pasid, addr);
if (iotlb_entry) {
- trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte,
+ trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->pte,
iotlb_entry->domain_id);
- slpte = iotlb_entry->slpte;
+ pte = iotlb_entry->pte;
access_flags = iotlb_entry->access_flags;
page_mask = iotlb_entry->mask;
goto out;
}
}
- ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &slpte, &level,
- &reads, &writes, s->aw_bits, pasid);
+ if (s->flts && s->root_scalable) {
+ ret_fr = vtd_iova_to_flpte(s, &ce, addr, is_write, &pte, &level,
+ &reads, &writes, s->aw_bits, pasid);
+ pgtt = VTD_SM_PASID_ENTRY_FLT;
+ } else {
+ ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &pte, &level,
+ &reads, &writes, s->aw_bits, pasid);
+ pgtt = VTD_SM_PASID_ENTRY_SLT;
+ }
+ if (!ret_fr) {
+ xlat = vtd_get_pte_addr(pte, s->aw_bits);
+ size = ~vtd_pt_level_page_mask(level) + 1;
+
+ /*
+ * Per VT-d spec 4.1 section 3.15: Untranslated requests and translation
+ * requests that result in an address in the interrupt range will be
+ * blocked with condition code LGN.4 or SGN.8.
+ */
+ if ((xlat <= VTD_INTERRUPT_ADDR_LAST &&
+ xlat + size - 1 >= VTD_INTERRUPT_ADDR_FIRST)) {
+ error_report_once("%s: xlat address is in interrupt range "
+ "(iova=0x%" PRIx64 ", level=0x%" PRIx32 ", "
+ "pte=0x%" PRIx64 ", write=%d, "
+ "xlat=0x%" PRIx64 ", size=0x%" PRIx64 ", "
+ "pasid=0x%" PRIx32 ")",
+ __func__, addr, level, pte, is_write,
+ xlat, size, pasid);
+ ret_fr = s->scalable_mode ? -VTD_FR_SM_INTERRUPT_ADDR :
+ -VTD_FR_INTERRUPT_ADDR;
+ }
+ }
+
if (ret_fr) {
vtd_report_fault(s, -ret_fr, is_fpd_set, source_id,
addr, is_write, pasid != PCI_NO_PASID, pasid);
goto error;
}
- page_mask = vtd_slpt_level_page_mask(level);
+ page_mask = vtd_pt_level_page_mask(level);
access_flags = IOMMU_ACCESS_FLAG(reads, writes);
vtd_update_iotlb(s, source_id, vtd_get_domain_id(s, &ce, pasid),
- addr, slpte, access_flags, level, pasid);
+ addr, pte, access_flags, level, pasid, pgtt);
out:
vtd_iommu_unlock(s);
entry->iova = addr & page_mask;
- entry->translated_addr = vtd_get_slpte_addr(slpte, s->aw_bits) & page_mask;
+ entry->translated_addr = vtd_get_pte_addr(pte, s->aw_bits) & page_mask;
entry->addr_mask = ~page_mask;
entry->perm = access_flags;
return true;
@@ -2215,8 +2443,13 @@ static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id)
}
}
+/*
+ * There is no pasid field in iotlb invalidation descriptor, so PCI_NO_PASID
+ * is passed as parameter. Piotlb invalidation supports pasid, pasid in its
+ * descriptor is passed which should not be PCI_NO_PASID.
+ */
static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
- uint16_t domain_id, hwaddr addr,
+ uint16_t domain_id, hwaddr addr,
uint8_t am, uint32_t pasid)
{
VTDAddressSpace *vtd_as;
@@ -2225,19 +2458,37 @@ static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
hwaddr size = (1 << am) * VTD_PAGE_SIZE;
QLIST_FOREACH(vtd_as, &(s->vtd_as_with_notifiers), next) {
- if (pasid != PCI_NO_PASID && pasid != vtd_as->pasid) {
- continue;
- }
ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
vtd_as->devfn, &ce);
if (!ret && domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) {
+ uint32_t rid2pasid = PCI_NO_PASID;
+
+ if (s->root_scalable) {
+ rid2pasid = VTD_CE_GET_RID2PASID(&ce);
+ }
+
+ /*
+ * In legacy mode, vtd_as->pasid == pasid is always true.
+ * In scalable mode, for vtd address space backing a PCI
+ * device without pasid, needs to compare pasid with
+ * rid2pasid of this device.
+ */
+ if (!(vtd_as->pasid == pasid ||
+ (vtd_as->pasid == PCI_NO_PASID && pasid == rid2pasid))) {
+ continue;
+ }
+
if (vtd_as_has_map_notifier(vtd_as)) {
/*
- * As long as we have MAP notifications registered in
- * any of our IOMMU notifiers, we need to sync the
- * shadow page table.
+ * When stage-1 translation is off, as long as we have MAP
+ * notifications registered in any of our IOMMU notifiers,
+ * we need to sync the shadow page table. Otherwise VFIO
+ * device attaches to nested page table instead of shadow
+ * page table, so no need to sync.
*/
- vtd_sync_shadow_page_table_range(vtd_as, &ce, addr, size);
+ if (!s->flts || !s->root_scalable) {
+ vtd_sync_shadow_page_table_range(vtd_as, &ce, addr, size);
+ }
} else {
/*
* For UNMAP-only notifiers, we don't need to walk the
@@ -2532,15 +2783,51 @@ static bool vtd_get_inv_desc(IntelIOMMUState *s,
return true;
}
+static bool vtd_inv_desc_reserved_check(IntelIOMMUState *s,
+ VTDInvDesc *inv_desc,
+ uint64_t mask[4], bool dw,
+ const char *func_name,
+ const char *desc_type)
+{
+ if (s->iq_dw) {
+ if (inv_desc->val[0] & mask[0] || inv_desc->val[1] & mask[1] ||
+ inv_desc->val[2] & mask[2] || inv_desc->val[3] & mask[3]) {
+ error_report("%s: invalid %s desc val[3]: 0x%"PRIx64
+ " val[2]: 0x%"PRIx64" val[1]=0x%"PRIx64
+ " val[0]=0x%"PRIx64" (reserved nonzero)",
+ func_name, desc_type, inv_desc->val[3],
+ inv_desc->val[2], inv_desc->val[1],
+ inv_desc->val[0]);
+ return false;
+ }
+ } else {
+ if (dw) {
+ error_report("%s: 256-bit %s desc in 128-bit invalidation queue",
+ func_name, desc_type);
+ return false;
+ }
+
+ if (inv_desc->lo & mask[0] || inv_desc->hi & mask[1]) {
+ error_report("%s: invalid %s desc: hi=%"PRIx64", lo=%"PRIx64
+ " (reserved nonzero)", func_name, desc_type,
+ inv_desc->hi, inv_desc->lo);
+ return false;
+ }
+ }
+
+ return true;
+}
+
static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
{
- if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) ||
- (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) {
- error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64
- " (reserved nonzero)", __func__, inv_desc->hi,
- inv_desc->lo);
+ uint64_t mask[4] = {VTD_INV_DESC_WAIT_RSVD_LO, VTD_INV_DESC_WAIT_RSVD_HI,
+ VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE};
+
+ if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false,
+ __func__, "wait")) {
return false;
}
+
if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) {
/* Status Write */
uint32_t status_data = (uint32_t)(inv_desc->lo >>
@@ -2574,13 +2861,14 @@ static bool vtd_process_context_cache_desc(IntelIOMMUState *s,
VTDInvDesc *inv_desc)
{
uint16_t sid, fmask;
+ uint64_t mask[4] = {VTD_INV_DESC_CC_RSVD, VTD_INV_DESC_ALL_ONE,
+ VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE};
- if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) {
- error_report_once("%s: invalid cc inv desc: hi=%"PRIx64", lo=%"PRIx64
- " (reserved nonzero)", __func__, inv_desc->hi,
- inv_desc->lo);
+ if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false,
+ __func__, "cc inv")) {
return false;
}
+
switch (inv_desc->lo & VTD_INV_DESC_CC_G) {
case VTD_INV_DESC_CC_DOMAIN:
trace_vtd_inv_desc_cc_domain(
@@ -2610,12 +2898,11 @@ static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
uint16_t domain_id;
uint8_t am;
hwaddr addr;
+ uint64_t mask[4] = {VTD_INV_DESC_IOTLB_RSVD_LO, VTD_INV_DESC_IOTLB_RSVD_HI,
+ VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE};
- if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) ||
- (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) {
- error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
- ", lo=0x%"PRIx64" (reserved bits unzero)",
- __func__, inv_desc->hi, inv_desc->lo);
+ if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false,
+ __func__, "iotlb inv")) {
return false;
}
@@ -2653,9 +2940,117 @@ static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
return true;
}
+static gboolean vtd_hash_remove_by_pasid(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
+ VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data;
+
+ return ((entry->domain_id == info->domain_id) &&
+ (entry->pasid == info->pasid));
+}
+
+static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s,
+ uint16_t domain_id, uint32_t pasid)
+{
+ VTDIOTLBPageInvInfo info;
+ VTDAddressSpace *vtd_as;
+ VTDContextEntry ce;
+
+ info.domain_id = domain_id;
+ info.pasid = pasid;
+
+ vtd_iommu_lock(s);
+ g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_pasid,
+ &info);
+ vtd_iommu_unlock(s);
+
+ QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) {
+ if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
+ vtd_as->devfn, &ce) &&
+ domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) {
+ uint32_t rid2pasid = VTD_CE_GET_RID2PASID(&ce);
+
+ if ((vtd_as->pasid != PCI_NO_PASID || pasid != rid2pasid) &&
+ vtd_as->pasid != pasid) {
+ continue;
+ }
+
+ if (!s->flts || !vtd_as_has_map_notifier(vtd_as)) {
+ vtd_address_space_sync(vtd_as);
+ }
+ }
+ }
+}
+
+static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
+ uint32_t pasid, hwaddr addr, uint8_t am)
+{
+ VTDIOTLBPageInvInfo info;
+
+ info.domain_id = domain_id;
+ info.pasid = pasid;
+ info.addr = addr;
+ info.mask = ~((1 << am) - 1);
+
+ vtd_iommu_lock(s);
+ g_hash_table_foreach_remove(s->iotlb,
+ vtd_hash_remove_by_page_piotlb, &info);
+ vtd_iommu_unlock(s);
+
+ vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am, pasid);
+}
+
+static bool vtd_process_piotlb_desc(IntelIOMMUState *s,
+ VTDInvDesc *inv_desc)
+{
+ uint16_t domain_id;
+ uint32_t pasid;
+ hwaddr addr;
+ uint8_t am;
+ uint64_t mask[4] = {VTD_INV_DESC_PIOTLB_RSVD_VAL0,
+ VTD_INV_DESC_PIOTLB_RSVD_VAL1,
+ VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE};
+
+ if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, true,
+ __func__, "piotlb inv")) {
+ return false;
+ }
+
+ domain_id = VTD_INV_DESC_PIOTLB_DID(inv_desc->val[0]);
+ pasid = VTD_INV_DESC_PIOTLB_PASID(inv_desc->val[0]);
+ switch (inv_desc->val[0] & VTD_INV_DESC_PIOTLB_G) {
+ case VTD_INV_DESC_PIOTLB_ALL_IN_PASID:
+ vtd_piotlb_pasid_invalidate(s, domain_id, pasid);
+ break;
+
+ case VTD_INV_DESC_PIOTLB_PSI_IN_PASID:
+ am = VTD_INV_DESC_PIOTLB_AM(inv_desc->val[1]);
+ addr = (hwaddr) VTD_INV_DESC_PIOTLB_ADDR(inv_desc->val[1]);
+ vtd_piotlb_page_invalidate(s, domain_id, pasid, addr, am);
+ break;
+
+ default:
+ error_report_once("%s: invalid piotlb inv desc: hi=0x%"PRIx64
+ ", lo=0x%"PRIx64" (type mismatch: 0x%llx)",
+ __func__, inv_desc->val[1], inv_desc->val[0],
+ inv_desc->val[0] & VTD_INV_DESC_IOTLB_G);
+ return false;
+ }
+ return true;
+}
+
static bool vtd_process_inv_iec_desc(IntelIOMMUState *s,
VTDInvDesc *inv_desc)
{
+ uint64_t mask[4] = {VTD_INV_DESC_IEC_RSVD, VTD_INV_DESC_ALL_ONE,
+ VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE};
+
+ if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false,
+ __func__, "iec inv")) {
+ return false;
+ }
+
trace_vtd_inv_desc_iec(inv_desc->iec.granularity,
inv_desc->iec.index,
inv_desc->iec.index_mask);
@@ -2666,38 +3061,11 @@ static bool vtd_process_inv_iec_desc(IntelIOMMUState *s,
return true;
}
-static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
- VTDInvDesc *inv_desc)
+static void do_invalidate_device_tlb(VTDAddressSpace *vtd_dev_as,
+ bool size, hwaddr addr)
{
- VTDAddressSpace *vtd_dev_as;
- IOMMUTLBEvent event;
- hwaddr addr;
- uint64_t sz;
- uint16_t sid;
- bool size;
-
- addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi);
- sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo);
- size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi);
-
- if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) ||
- (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) {
- error_report_once("%s: invalid dev-iotlb inv desc: hi=%"PRIx64
- ", lo=%"PRIx64" (reserved nonzero)", __func__,
- inv_desc->hi, inv_desc->lo);
- return false;
- }
-
/*
- * Using sid is OK since the guest should have finished the
- * initialization of both the bus and device.
- */
- vtd_dev_as = vtd_get_as_by_sid(s, sid);
- if (!vtd_dev_as) {
- goto done;
- }
-
- /* According to ATS spec table 2.4:
+ * According to ATS spec table 2.4:
* S = 0, bits 15:12 = xxxx range size: 4K
* S = 1, bits 15:12 = xxx0 range size: 8K
* S = 1, bits 15:12 = xx01 range size: 16K
@@ -2705,6 +3073,10 @@ static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
* S = 1, bits 15:12 = 0111 range size: 64K
* ...
*/
+
+ IOMMUTLBEvent event;
+ uint64_t sz;
+
if (size) {
sz = (VTD_PAGE_SIZE * 2) << cto64(addr >> VTD_PAGE_SHIFT);
addr &= ~(sz - 1);
@@ -2719,6 +3091,81 @@ static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
event.entry.perm = IOMMU_NONE;
event.entry.translated_addr = 0;
memory_region_notify_iommu(&vtd_dev_as->iommu, 0, event);
+}
+
+static bool vtd_process_device_piotlb_desc(IntelIOMMUState *s,
+ VTDInvDesc *inv_desc)
+{
+ uint16_t sid;
+ VTDAddressSpace *vtd_dev_as;
+ bool size;
+ bool global;
+ hwaddr addr;
+ uint32_t pasid;
+ uint64_t mask[4] = {VTD_INV_DESC_PASID_DEVICE_IOTLB_RSVD_VAL0,
+ VTD_INV_DESC_PASID_DEVICE_IOTLB_RSVD_VAL1,
+ VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE};
+
+ if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, true,
+ __func__, "device piotlb inv")) {
+ return false;
+ }
+
+ global = VTD_INV_DESC_PASID_DEVICE_IOTLB_GLOBAL(inv_desc->hi);
+ size = VTD_INV_DESC_PASID_DEVICE_IOTLB_SIZE(inv_desc->hi);
+ addr = VTD_INV_DESC_PASID_DEVICE_IOTLB_ADDR(inv_desc->hi);
+ sid = VTD_INV_DESC_PASID_DEVICE_IOTLB_SID(inv_desc->lo);
+ if (global) {
+ QLIST_FOREACH(vtd_dev_as, &s->vtd_as_with_notifiers, next) {
+ if ((vtd_dev_as->pasid != PCI_NO_PASID) &&
+ (PCI_BUILD_BDF(pci_bus_num(vtd_dev_as->bus),
+ vtd_dev_as->devfn) == sid)) {
+ do_invalidate_device_tlb(vtd_dev_as, size, addr);
+ }
+ }
+ } else {
+ pasid = VTD_INV_DESC_PASID_DEVICE_IOTLB_PASID(inv_desc->lo);
+ vtd_dev_as = vtd_get_as_by_sid_and_pasid(s, sid, pasid);
+ if (!vtd_dev_as) {
+ return true;
+ }
+
+ do_invalidate_device_tlb(vtd_dev_as, size, addr);
+ }
+
+ return true;
+}
+
+static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
+ VTDInvDesc *inv_desc)
+{
+ VTDAddressSpace *vtd_dev_as;
+ hwaddr addr;
+ uint16_t sid;
+ bool size;
+ uint64_t mask[4] = {VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO,
+ VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI,
+ VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE};
+
+ if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false,
+ __func__, "dev-iotlb inv")) {
+ return false;
+ }
+
+ addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi);
+ sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo);
+ size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi);
+
+ /*
+ * Using sid is OK since the guest should have finished the
+ * initialization of both the bus and device.
+ */
+ vtd_dev_as = vtd_get_as_by_sid(s, sid);
+ if (!vtd_dev_as) {
+ goto done;
+ }
+
+ do_invalidate_device_tlb(vtd_dev_as, size, addr);
done:
return true;
@@ -2735,7 +3182,7 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s)
return false;
}
- desc_type = inv_desc.lo & VTD_INV_DESC_TYPE;
+ desc_type = VTD_INV_DESC_TYPE(inv_desc.lo);
/* FIXME: should update at first or at last? */
s->iq_last_desc_type = desc_type;
@@ -2754,15 +3201,11 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s)
}
break;
- /*
- * TODO: the entity of below two cases will be implemented in future series.
- * To make guest (which integrates scalable mode support patch set in
- * iommu driver) work, just return true is enough so far.
- */
- case VTD_INV_DESC_PC:
- break;
-
case VTD_INV_DESC_PIOTLB:
+ trace_vtd_inv_desc("p-iotlb", inv_desc.val[1], inv_desc.val[0]);
+ if (!vtd_process_piotlb_desc(s, &inv_desc)) {
+ return false;
+ }
break;
case VTD_INV_DESC_WAIT:
@@ -2779,6 +3222,13 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s)
}
break;
+ case VTD_INV_DESC_DEV_PIOTLB:
+ trace_vtd_inv_desc("device-piotlb", inv_desc.hi, inv_desc.lo);
+ if (!vtd_process_device_piotlb_desc(s, &inv_desc)) {
+ return false;
+ }
+ break;
+
case VTD_INV_DESC_DEVICE:
trace_vtd_inv_desc("device", inv_desc.hi, inv_desc.lo);
if (!vtd_process_device_iotlb_desc(s, &inv_desc)) {
@@ -2786,6 +3236,16 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s)
}
break;
+ /*
+ * TODO: the entity of below two cases will be implemented in future series.
+ * To make guest (which integrates scalable mode support patch set in
+ * iommu driver) work, just return true is enough so far.
+ */
+ case VTD_INV_DESC_PC:
+ if (s->scalable_mode) {
+ break;
+ }
+ /* fallthrough */
default:
error_report_once("%s: invalid inv desc: hi=%"PRIx64", lo=%"PRIx64
" (unknown type)", __func__, inv_desc.hi,
@@ -2838,6 +3298,7 @@ static void vtd_handle_iqt_write(IntelIOMMUState *s)
if (s->iq_dw && (val & VTD_IQT_QT_256_RSV_BIT)) {
error_report_once("%s: RSV bit is set: val=0x%"PRIx64,
__func__, val);
+ vtd_handle_inv_queue_error(s);
return;
}
s->iq_tail = VTD_IQT_QT(s->iq_dw, val);
@@ -2938,7 +3399,9 @@ static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size)
/* Invalidation Queue Address Register, 64-bit */
case DMAR_IQA_REG:
- val = s->iq | (vtd_get_quad(s, DMAR_IQA_REG) & VTD_IQA_QS);
+ val = s->iq |
+ (vtd_get_quad(s, DMAR_IQA_REG) &
+ (VTD_IQA_QS | VTD_IQA_DW_MASK));
if (size == 4) {
val = val & ((1ULL << 32) - 1);
}
@@ -3348,7 +3811,7 @@ static const MemoryRegionOps vtd_mem_ops = {
},
};
-static Property vtd_properties[] = {
+static const Property vtd_properties[] = {
DEFINE_PROP_UINT32("version", IntelIOMMUState, version, 0),
DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState, intr_eim,
ON_OFF_AUTO_AUTO),
@@ -3357,11 +3820,13 @@ static Property vtd_properties[] = {
VTD_HOST_ADDRESS_WIDTH),
DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE),
DEFINE_PROP_BOOL("x-scalable-mode", IntelIOMMUState, scalable_mode, FALSE),
+ DEFINE_PROP_BOOL("x-flts", IntelIOMMUState, flts, FALSE),
DEFINE_PROP_BOOL("snoop-control", IntelIOMMUState, snoop_control, false),
DEFINE_PROP_BOOL("x-pasid-mode", IntelIOMMUState, pasid, false),
DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true),
DEFINE_PROP_BOOL("dma-translation", IntelIOMMUState, dma_translation, true),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_BOOL("stale-tm", IntelIOMMUState, stale_tm, false),
+ DEFINE_PROP_BOOL("fs1gp", IntelIOMMUState, fs1gp, true),
};
/* Read IRTE entry with specific index */
@@ -3740,9 +4205,30 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus,
VTDAddressSpace *vtd_dev_as;
char name[128];
+ vtd_iommu_lock(s);
vtd_dev_as = g_hash_table_lookup(s->vtd_address_spaces, &key);
+ vtd_iommu_unlock(s);
+
if (!vtd_dev_as) {
- struct vtd_as_key *new_key = g_malloc(sizeof(*new_key));
+ struct vtd_as_key *new_key;
+ /* Slow path */
+
+ /*
+ * memory_region_add_subregion_overlap requires the bql,
+ * make sure we own it.
+ */
+ BQL_LOCK_GUARD();
+ vtd_iommu_lock(s);
+
+ /* Check again as we released the lock for a moment */
+ vtd_dev_as = g_hash_table_lookup(s->vtd_address_spaces, &key);
+ if (vtd_dev_as) {
+ vtd_iommu_unlock(s);
+ return vtd_dev_as;
+ }
+
+ /* Still nothing, allocate a new address space */
+ new_key = g_malloc(sizeof(*new_key));
new_key->bus = bus;
new_key->devfn = devfn;
@@ -3833,6 +4319,8 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus,
vtd_switch_address_space(vtd_dev_as);
g_hash_table_insert(s->vtd_address_spaces, new_key, vtd_dev_as);
+
+ vtd_iommu_unlock(s);
}
return vtd_dev_as;
}
@@ -3858,7 +4346,13 @@ static bool vtd_check_hiod(IntelIOMMUState *s, HostIOMMUDevice *hiod,
return false;
}
- return true;
+ if (!s->flts) {
+ /* All checks requested by VTD stage-2 translation pass */
+ return true;
+ }
+
+ error_setg(errp, "host device is uncompatible with stage-1 translation");
+ return false;
}
static bool vtd_dev_set_iommu_device(PCIBus *bus, void *opaque, int devfn,
@@ -4036,8 +4530,6 @@ static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn),
PCI_FUNC(vtd_as->devfn));
}
-
- return;
}
static void vtd_cap_init(IntelIOMMUState *s)
@@ -4081,7 +4573,12 @@ static void vtd_cap_init(IntelIOMMUState *s)
}
/* TODO: read cap/ecap from host to decide which cap to be exposed. */
- if (s->scalable_mode) {
+ if (s->flts) {
+ s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_FLTS;
+ if (s->fs1gp) {
+ s->cap |= VTD_CAP_FS1GP;
+ }
+ } else if (s->scalable_mode) {
s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_SRS | VTD_ECAP_SLTS;
}
@@ -4127,15 +4624,27 @@ static void vtd_init(IntelIOMMUState *s)
*/
vtd_spte_rsvd[0] = ~0ULL;
vtd_spte_rsvd[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s->aw_bits,
- x86_iommu->dt_supported);
+ x86_iommu->dt_supported && s->stale_tm);
vtd_spte_rsvd[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s->aw_bits);
vtd_spte_rsvd[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s->aw_bits);
vtd_spte_rsvd[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s->aw_bits);
vtd_spte_rsvd_large[2] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s->aw_bits,
- x86_iommu->dt_supported);
+ x86_iommu->dt_supported && s->stale_tm);
vtd_spte_rsvd_large[3] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits,
- x86_iommu->dt_supported);
+ x86_iommu->dt_supported && s->stale_tm);
+
+ /*
+ * Rsvd field masks for fpte
+ */
+ vtd_fpte_rsvd[0] = ~0ULL;
+ vtd_fpte_rsvd[1] = VTD_FPTE_PAGE_L1_RSVD_MASK(s->aw_bits);
+ vtd_fpte_rsvd[2] = VTD_FPTE_PAGE_L2_RSVD_MASK(s->aw_bits);
+ vtd_fpte_rsvd[3] = VTD_FPTE_PAGE_L3_RSVD_MASK(s->aw_bits);
+ vtd_fpte_rsvd[4] = VTD_FPTE_PAGE_L4_RSVD_MASK(s->aw_bits);
+
+ vtd_fpte_rsvd_large[2] = VTD_FPTE_LPAGE_L2_RSVD_MASK(s->aw_bits);
+ vtd_fpte_rsvd_large[3] = VTD_FPTE_LPAGE_L3_RSVD_MASK(s->aw_bits);
if (s->scalable_mode || s->snoop_control) {
vtd_spte_rsvd[1] &= ~VTD_SPTE_SNP;
@@ -4201,10 +4710,11 @@ static void vtd_init(IntelIOMMUState *s)
/* Should not reset address_spaces when reset because devices will still use
* the address space they got at first (won't ask the bus again).
*/
-static void vtd_reset(DeviceState *dev)
+static void vtd_reset_exit(Object *obj, ResetType type)
{
- IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
+ IntelIOMMUState *s = INTEL_IOMMU_DEVICE(obj);
+ trace_vtd_reset_exit();
vtd_init(s);
vtd_address_space_refresh_all(s);
}
@@ -4248,14 +4758,26 @@ static bool vtd_decide_config(IntelIOMMUState *s, Error **errp)
}
}
- /* Currently only address widths supported are 39 and 48 bits */
- if ((s->aw_bits != VTD_HOST_AW_39BIT) &&
- (s->aw_bits != VTD_HOST_AW_48BIT)) {
- error_setg(errp, "Supported values for aw-bits are: %d, %d",
+ if (!s->scalable_mode && s->flts) {
+ error_setg(errp, "x-flts is only available in scalable mode");
+ return false;
+ }
+
+ if (!s->flts && s->aw_bits != VTD_HOST_AW_39BIT &&
+ s->aw_bits != VTD_HOST_AW_48BIT) {
+ error_setg(errp, "%s: supported values for aw-bits are: %d, %d",
+ s->scalable_mode ? "Scalable mode(flts=off)" : "Legacy mode",
VTD_HOST_AW_39BIT, VTD_HOST_AW_48BIT);
return false;
}
+ if (s->flts && s->aw_bits != VTD_HOST_AW_48BIT) {
+ error_setg(errp,
+ "Scalable mode(flts=on): supported value for aw-bits is: %d",
+ VTD_HOST_AW_48BIT);
+ return false;
+ }
+
if (s->scalable_mode && !s->dma_drain) {
error_setg(errp, "Need to set dma_drain for scalable mode");
return false;
@@ -4352,19 +4874,22 @@ static void vtd_realize(DeviceState *dev, Error **errp)
qemu_add_machine_init_done_notifier(&vtd_machine_done_notify);
}
-static void vtd_class_init(ObjectClass *klass, void *data)
+static void vtd_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
X86IOMMUClass *x86_class = X86_IOMMU_DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
- dc->reset = vtd_reset;
+ /*
+ * Use 'exit' reset phase to make sure all DMA requests
+ * have been quiesced during 'enter' or 'hold' phase
+ */
+ rc->phases.exit = vtd_reset_exit;
dc->vmsd = &vtd_vmstate;
device_class_set_props(dc, vtd_properties);
dc->hotpluggable = false;
x86_class->realize = vtd_realize;
x86_class->int_remap = vtd_int_remap;
- /* Supported by the pc-q35-* machine types */
- dc->user_creatable = true;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->desc = "Intel IOMMU (VT-d) DMA Remapping device";
}
@@ -4377,7 +4902,7 @@ static const TypeInfo vtd_info = {
};
static void vtd_iommu_memory_region_class_init(ObjectClass *klass,
- void *data)
+ const void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
index f8cf99b..e8b211e 100644
--- a/hw/i386/intel_iommu_internal.h
+++ b/hw/i386/intel_iommu_internal.h
@@ -195,6 +195,7 @@
#define VTD_ECAP_PASID (1ULL << 40)
#define VTD_ECAP_SMTS (1ULL << 43)
#define VTD_ECAP_SLTS (1ULL << 46)
+#define VTD_ECAP_FLTS (1ULL << 47)
/* CAP_REG */
/* (offset >> 4) << 24 */
@@ -211,6 +212,7 @@
#define VTD_CAP_SLLPS ((1ULL << 34) | (1ULL << 35))
#define VTD_CAP_DRAIN_WRITE (1ULL << 54)
#define VTD_CAP_DRAIN_READ (1ULL << 55)
+#define VTD_CAP_FS1GP (1ULL << 56)
#define VTD_CAP_DRAIN (VTD_CAP_DRAIN_READ | VTD_CAP_DRAIN_WRITE)
#define VTD_CAP_CM (1ULL << 7)
#define VTD_PASID_ID_SHIFT 20
@@ -264,10 +266,10 @@
#define VTD_FRCD_FR(val) (((val) & 0xffULL) << 32)
#define VTD_FRCD_SID_MASK 0xffffULL
#define VTD_FRCD_SID(val) ((val) & VTD_FRCD_SID_MASK)
+#define VTD_FRCD_PV(val) (((val) & 0xffffULL) << 40)
+#define VTD_FRCD_PP(val) (((val) & 0x1ULL) << 31)
/* For the low 64-bit of 128-bit */
#define VTD_FRCD_FI(val) ((val) & ~0xfffULL)
-#define VTD_FRCD_PV(val) (((val) & 0xffffULL) << 40)
-#define VTD_FRCD_PP(val) (((val) & 0x1) << 31)
#define VTD_FRCD_IR_IDX(val) (((val) & 0xffffULL) << 48)
/* DMA Remapping Fault Conditions */
@@ -311,10 +313,28 @@ typedef enum VTDFaultReason {
* request while disabled */
VTD_FR_IR_SID_ERR = 0x26, /* Invalid Source-ID */
- VTD_FR_PASID_TABLE_INV = 0x58, /*Invalid PASID table entry */
+ /* PASID directory entry access failure */
+ VTD_FR_PASID_DIR_ACCESS_ERR = 0x50,
+ /* The Present(P) field of pasid directory entry is 0 */
+ VTD_FR_PASID_DIR_ENTRY_P = 0x51,
+ VTD_FR_PASID_TABLE_ACCESS_ERR = 0x58, /* PASID table entry access failure */
+ /* The Present(P) field of pasid table entry is 0 */
+ VTD_FR_PASID_ENTRY_P = 0x59,
+ VTD_FR_PASID_TABLE_ENTRY_INV = 0x5b, /*Invalid PASID table entry */
+
+ /* Fail to access a first-level paging entry (not FS_PML4E) */
+ VTD_FR_FS_PAGING_ENTRY_INV = 0x70,
+ VTD_FR_FS_PAGING_ENTRY_P = 0x71,
+ /* Non-zero reserved field in present first-stage paging entry */
+ VTD_FR_FS_PAGING_ENTRY_RSVD = 0x72,
+ VTD_FR_PASID_ENTRY_FSPTPTR_INV = 0x73, /* Invalid FSPTPTR in PASID entry */
+ VTD_FR_FS_NON_CANONICAL = 0x80, /* SNG.1 : Address for FS not canonical.*/
+ VTD_FR_FS_PAGING_ENTRY_US = 0x81, /* Privilege violation */
+ VTD_FR_SM_WRITE = 0x85, /* No write permission */
/* Output address in the interrupt address range for scalable mode */
VTD_FR_SM_INTERRUPT_ADDR = 0x87,
+ VTD_FR_FS_BIT_UPDATE_FAILED = 0x91, /* SFS.10 */
VTD_FR_MAX, /* Guard */
} VTDFaultReason;
@@ -356,7 +376,9 @@ union VTDInvDesc {
typedef union VTDInvDesc VTDInvDesc;
/* Masks for struct VTDInvDesc */
-#define VTD_INV_DESC_TYPE 0xf
+#define VTD_INV_DESC_ALL_ONE -1ULL
+#define VTD_INV_DESC_TYPE(val) ((((val) >> 5) & 0x70ULL) | \
+ ((val) & 0xfULL))
#define VTD_INV_DESC_CC 0x1 /* Context-cache Invalidate Desc */
#define VTD_INV_DESC_IOTLB 0x2
#define VTD_INV_DESC_DEVICE 0x3
@@ -365,6 +387,7 @@ typedef union VTDInvDesc VTDInvDesc;
#define VTD_INV_DESC_WAIT 0x5 /* Invalidation Wait Descriptor */
#define VTD_INV_DESC_PIOTLB 0x6 /* PASID-IOTLB Invalidate Desc */
#define VTD_INV_DESC_PC 0x7 /* PASID-cache Invalidate Desc */
+#define VTD_INV_DESC_DEV_PIOTLB 0x8 /* PASID-based-DIOTLB inv_desc*/
#define VTD_INV_DESC_NONE 0 /* Not an Invalidate Descriptor */
/* Masks for Invalidation Wait Descriptor*/
@@ -372,7 +395,7 @@ typedef union VTDInvDesc VTDInvDesc;
#define VTD_INV_DESC_WAIT_IF (1ULL << 4)
#define VTD_INV_DESC_WAIT_FN (1ULL << 6)
#define VTD_INV_DESC_WAIT_DATA_SHIFT 32
-#define VTD_INV_DESC_WAIT_RSVD_LO 0Xffffff80ULL
+#define VTD_INV_DESC_WAIT_RSVD_LO 0Xfffff180ULL
#define VTD_INV_DESC_WAIT_RSVD_HI 3ULL
/* Masks for Context-cache Invalidation Descriptor */
@@ -383,7 +406,7 @@ typedef union VTDInvDesc VTDInvDesc;
#define VTD_INV_DESC_CC_DID(val) (((val) >> 16) & VTD_DOMAIN_ID_MASK)
#define VTD_INV_DESC_CC_SID(val) (((val) >> 32) & 0xffffUL)
#define VTD_INV_DESC_CC_FM(val) (((val) >> 48) & 3UL)
-#define VTD_INV_DESC_CC_RSVD 0xfffc00000000ffc0ULL
+#define VTD_INV_DESC_CC_RSVD 0xfffc00000000f1c0ULL
/* Masks for IOTLB Invalidate Descriptor */
#define VTD_INV_DESC_IOTLB_G (3ULL << 4)
@@ -393,26 +416,34 @@ typedef union VTDInvDesc VTDInvDesc;
#define VTD_INV_DESC_IOTLB_DID(val) (((val) >> 16) & VTD_DOMAIN_ID_MASK)
#define VTD_INV_DESC_IOTLB_ADDR(val) ((val) & ~0xfffULL)
#define VTD_INV_DESC_IOTLB_AM(val) ((val) & 0x3fULL)
-#define VTD_INV_DESC_IOTLB_RSVD_LO 0xffffffff0000ff00ULL
+#define VTD_INV_DESC_IOTLB_RSVD_LO 0xffffffff0000f100ULL
#define VTD_INV_DESC_IOTLB_RSVD_HI 0xf80ULL
-#define VTD_INV_DESC_IOTLB_PASID_PASID (2ULL << 4)
-#define VTD_INV_DESC_IOTLB_PASID_PAGE (3ULL << 4)
-#define VTD_INV_DESC_IOTLB_PASID(val) (((val) >> 32) & VTD_PASID_ID_MASK)
-#define VTD_INV_DESC_IOTLB_PASID_RSVD_LO 0xfff00000000001c0ULL
-#define VTD_INV_DESC_IOTLB_PASID_RSVD_HI 0xf80ULL
/* Mask for Device IOTLB Invalidate Descriptor */
#define VTD_INV_DESC_DEVICE_IOTLB_ADDR(val) ((val) & 0xfffffffffffff000ULL)
#define VTD_INV_DESC_DEVICE_IOTLB_SIZE(val) ((val) & 0x1)
#define VTD_INV_DESC_DEVICE_IOTLB_SID(val) (((val) >> 32) & 0xFFFFULL)
#define VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI 0xffeULL
-#define VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO 0xffff0000ffe0fff8
+#define VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO 0xffff0000ffe0f1f0
+
+/* Masks for Interrupt Entry Invalidate Descriptor */
+#define VTD_INV_DESC_IEC_RSVD 0xffff000007fff1e0ULL
+
+/* Masks for PASID based Device IOTLB Invalidate Descriptor */
+#define VTD_INV_DESC_PASID_DEVICE_IOTLB_ADDR(val) ((val) & \
+ 0xfffffffffffff000ULL)
+#define VTD_INV_DESC_PASID_DEVICE_IOTLB_SIZE(val) ((val >> 11) & 0x1)
+#define VTD_INV_DESC_PASID_DEVICE_IOTLB_GLOBAL(val) ((val) & 0x1)
+#define VTD_INV_DESC_PASID_DEVICE_IOTLB_SID(val) (((val) >> 16) & 0xffffULL)
+#define VTD_INV_DESC_PASID_DEVICE_IOTLB_PASID(val) ((val >> 32) & 0xfffffULL)
+#define VTD_INV_DESC_PASID_DEVICE_IOTLB_RSVD_VAL0 0xfff000000000f000ULL
+#define VTD_INV_DESC_PASID_DEVICE_IOTLB_RSVD_VAL1 0x7feULL
/* Rsvd field masks for spte */
#define VTD_SPTE_SNP 0x800ULL
-#define VTD_SPTE_PAGE_L1_RSVD_MASK(aw, dt_supported) \
- dt_supported ? \
+#define VTD_SPTE_PAGE_L1_RSVD_MASK(aw, stale_tm) \
+ stale_tm ? \
(0x800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM | VTD_SL_TM)) : \
(0x800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM))
#define VTD_SPTE_PAGE_L2_RSVD_MASK(aw) \
@@ -422,21 +453,49 @@ typedef union VTDInvDesc VTDInvDesc;
#define VTD_SPTE_PAGE_L4_RSVD_MASK(aw) \
(0x880ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM))
-#define VTD_SPTE_LPAGE_L2_RSVD_MASK(aw, dt_supported) \
- dt_supported ? \
+#define VTD_SPTE_LPAGE_L2_RSVD_MASK(aw, stale_tm) \
+ stale_tm ? \
(0x1ff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM | VTD_SL_TM)) : \
(0x1ff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM))
-#define VTD_SPTE_LPAGE_L3_RSVD_MASK(aw, dt_supported) \
- dt_supported ? \
+#define VTD_SPTE_LPAGE_L3_RSVD_MASK(aw, stale_tm) \
+ stale_tm ? \
(0x3ffff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM | VTD_SL_TM)) : \
(0x3ffff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM))
+/* Rsvd field masks for fpte */
+#define VTD_FS_UPPER_IGNORED 0xfff0000000000000ULL
+#define VTD_FPTE_PAGE_L1_RSVD_MASK(aw) \
+ (~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED))
+#define VTD_FPTE_PAGE_L2_RSVD_MASK(aw) \
+ (~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED))
+#define VTD_FPTE_PAGE_L3_RSVD_MASK(aw) \
+ (~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED))
+#define VTD_FPTE_PAGE_L4_RSVD_MASK(aw) \
+ (0x80ULL | ~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED))
+
+#define VTD_FPTE_LPAGE_L2_RSVD_MASK(aw) \
+ (0x1fe000ULL | ~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED))
+#define VTD_FPTE_LPAGE_L3_RSVD_MASK(aw) \
+ (0x3fffe000ULL | ~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED))
+
+/* Masks for PIOTLB Invalidate Descriptor */
+#define VTD_INV_DESC_PIOTLB_G (3ULL << 4)
+#define VTD_INV_DESC_PIOTLB_ALL_IN_PASID (2ULL << 4)
+#define VTD_INV_DESC_PIOTLB_PSI_IN_PASID (3ULL << 4)
+#define VTD_INV_DESC_PIOTLB_DID(val) (((val) >> 16) & VTD_DOMAIN_ID_MASK)
+#define VTD_INV_DESC_PIOTLB_PASID(val) (((val) >> 32) & 0xfffffULL)
+#define VTD_INV_DESC_PIOTLB_AM(val) ((val) & 0x3fULL)
+#define VTD_INV_DESC_PIOTLB_IH(val) (((val) >> 6) & 0x1)
+#define VTD_INV_DESC_PIOTLB_ADDR(val) ((val) & ~0xfffULL)
+#define VTD_INV_DESC_PIOTLB_RSVD_VAL0 0xfff000000000f1c0ULL
+#define VTD_INV_DESC_PIOTLB_RSVD_VAL1 0xf80ULL
+
/* Information about page-selective IOTLB invalidate */
struct VTDIOTLBPageInvInfo {
uint16_t domain_id;
uint32_t pasid;
uint64_t addr;
- uint8_t mask;
+ uint64_t mask;
};
typedef struct VTDIOTLBPageInvInfo VTDIOTLBPageInvInfo;
@@ -514,27 +573,38 @@ typedef struct VTDRootEntry VTDRootEntry;
#define VTD_SM_PASID_ENTRY_AW 7ULL /* Adjusted guest-address-width */
#define VTD_SM_PASID_ENTRY_DID(val) ((val) & VTD_DOMAIN_ID_MASK)
+#define VTD_SM_PASID_ENTRY_FLPM 3ULL
+#define VTD_SM_PASID_ENTRY_FLPTPTR (~0xfffULL)
+
+/* First Level Paging Structure */
+/* Masks for First Level Paging Entry */
+#define VTD_FL_P 1ULL
+#define VTD_FL_RW (1ULL << 1)
+#define VTD_FL_US (1ULL << 2)
+#define VTD_FL_A (1ULL << 5)
+#define VTD_FL_D (1ULL << 6)
+
/* Second Level Page Translation Pointer*/
#define VTD_SM_PASID_ENTRY_SLPTPTR (~0xfffULL)
-/* Paging Structure common */
-#define VTD_SL_PT_PAGE_SIZE_MASK (1ULL << 7)
-/* Bits to decide the offset for each level */
-#define VTD_SL_LEVEL_BITS 9
-
/* Second Level Paging Structure */
-#define VTD_SL_PML4_LEVEL 4
-#define VTD_SL_PDP_LEVEL 3
-#define VTD_SL_PD_LEVEL 2
-#define VTD_SL_PT_LEVEL 1
-#define VTD_SL_PT_ENTRY_NR 512
-
/* Masks for Second Level Paging Entry */
#define VTD_SL_RW_MASK 3ULL
#define VTD_SL_R 1ULL
#define VTD_SL_W (1ULL << 1)
-#define VTD_SL_PT_BASE_ADDR_MASK(aw) (~(VTD_PAGE_SIZE - 1) & VTD_HAW_MASK(aw))
#define VTD_SL_IGN_COM 0xbff0000000000000ULL
#define VTD_SL_TM (1ULL << 62)
+/* Common for both First Level and Second Level */
+#define VTD_PML4_LEVEL 4
+#define VTD_PDP_LEVEL 3
+#define VTD_PD_LEVEL 2
+#define VTD_PT_LEVEL 1
+#define VTD_PT_ENTRY_NR 512
+#define VTD_PT_PAGE_SIZE_MASK (1ULL << 7)
+#define VTD_PT_BASE_ADDR_MASK(aw) (~(VTD_PAGE_SIZE - 1) & VTD_HAW_MASK(aw))
+
+/* Bits to decide the offset for each level */
+#define VTD_LEVEL_BITS 9
+
#endif
diff --git a/hw/i386/kvm/apic.c b/hw/i386/kvm/apic.c
index a72c28e..1be9bfe 100644
--- a/hw/i386/kvm/apic.c
+++ b/hw/i386/kvm/apic.c
@@ -14,9 +14,10 @@
#include "qemu/module.h"
#include "hw/i386/apic_internal.h"
#include "hw/pci/msi.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/kvm.h"
+#include "system/hw_accel.h"
+#include "system/kvm.h"
#include "kvm/kvm_i386.h"
+#include "kvm/tdx.h"
static inline void kvm_apic_set_reg(struct kvm_lapic_state *kapic,
int reg_id, uint32_t val)
@@ -141,6 +142,10 @@ static void kvm_apic_put(CPUState *cs, run_on_cpu_data data)
struct kvm_lapic_state kapic;
int ret;
+ if (is_tdx_vm()) {
+ return;
+ }
+
kvm_put_apicbase(s->cpu, s->apicbase);
kvm_put_apic_state(s, &kapic);
@@ -214,7 +219,7 @@ static void kvm_apic_mem_write(void *opaque, hwaddr addr,
static const MemoryRegionOps kvm_apic_io_ops = {
.read = kvm_apic_mem_read,
.write = kvm_apic_mem_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
};
static void kvm_apic_reset(APICCommonState *s)
@@ -240,7 +245,7 @@ static void kvm_apic_unrealize(DeviceState *dev)
{
}
-static void kvm_apic_class_init(ObjectClass *klass, void *data)
+static void kvm_apic_class_init(ObjectClass *klass, const void *data)
{
APICCommonClass *k = APIC_COMMON_CLASS(klass);
diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c
index 40aa9a3..f563827 100644
--- a/hw/i386/kvm/clock.c
+++ b/hw/i386/kvm/clock.c
@@ -16,9 +16,9 @@
#include "qemu/osdep.h"
#include "qemu/host-utils.h"
#include "qemu/module.h"
-#include "sysemu/kvm.h"
-#include "sysemu/runstate.h"
-#include "sysemu/hw_accel.h"
+#include "system/kvm.h"
+#include "system/runstate.h"
+#include "system/hw_accel.h"
#include "kvm/kvm_i386.h"
#include "migration/vmstate.h"
#include "hw/sysbus.h"
@@ -27,7 +27,6 @@
#include "qapi/error.h"
#include <linux/kvm.h>
-#include "standard-headers/asm-x86/kvm_para.h"
#include "qom/object.h"
#define TYPE_KVM_CLOCK "kvmclock"
@@ -305,13 +304,12 @@ static const VMStateDescription kvmclock_vmsd = {
}
};
-static Property kvmclock_properties[] = {
+static const Property kvmclock_properties[] = {
DEFINE_PROP_BOOL("x-mach-use-reliable-get-clock", KVMClockState,
mach_use_reliable_get_clock, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void kvmclock_class_init(ObjectClass *klass, void *data)
+static void kvmclock_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -334,8 +332,8 @@ void kvmclock_create(bool create_always)
assert(kvm_enabled());
if (create_always ||
- cpu->env.features[FEAT_KVM] & ((1ULL << KVM_FEATURE_CLOCKSOURCE) |
- (1ULL << KVM_FEATURE_CLOCKSOURCE2))) {
+ cpu->env.features[FEAT_KVM] & (CPUID_KVM_CLOCK |
+ CPUID_KVM_CLOCK2)) {
sysbus_create_simple(TYPE_KVM_CLOCK, -1, NULL);
}
}
diff --git a/hw/i386/kvm/i8254.c b/hw/i386/kvm/i8254.c
index e49b9c4..14b78f3 100644
--- a/hw/i386/kvm/i8254.c
+++ b/hw/i386/kvm/i8254.c
@@ -29,11 +29,11 @@
#include "qapi/error.h"
#include "qemu/module.h"
#include "qemu/timer.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/timer/i8254.h"
#include "hw/timer/i8254_internal.h"
#include "hw/qdev-properties-system.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "target/i386/kvm/kvm_i386.h"
#include "qom/object.h"
@@ -287,13 +287,12 @@ static void kvm_pit_realizefn(DeviceState *dev, Error **errp)
kpc->parent_realize(dev, errp);
}
-static Property kvm_pit_properties[] = {
+static const Property kvm_pit_properties[] = {
DEFINE_PROP_LOSTTICKPOLICY("lost_tick_policy", KVMPITState,
lost_tick_policy, LOST_TICK_POLICY_DELAY),
- DEFINE_PROP_END_OF_LIST(),
};
-static void kvm_pit_class_init(ObjectClass *klass, void *data)
+static void kvm_pit_class_init(ObjectClass *klass, const void *data)
{
KVMPITClass *kpc = KVM_PIT_CLASS(klass);
PITCommonClass *k = PIT_COMMON_CLASS(klass);
@@ -303,7 +302,7 @@ static void kvm_pit_class_init(ObjectClass *klass, void *data)
&kpc->parent_realize);
k->set_channel_gate = kvm_pit_set_gate;
k->get_channel_info = kvm_pit_get_channel_info;
- dc->reset = kvm_pit_reset;
+ device_class_set_legacy_reset(dc, kvm_pit_reset);
device_class_set_props(dc, kvm_pit_properties);
}
diff --git a/hw/i386/kvm/i8259.c b/hw/i386/kvm/i8259.c
index 3ca0e1f..8a72d6e 100644
--- a/hw/i386/kvm/i8259.c
+++ b/hw/i386/kvm/i8259.c
@@ -16,7 +16,7 @@
#include "qemu/module.h"
#include "hw/intc/kvm_irqcount.h"
#include "hw/irq.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "qom/object.h"
#define TYPE_KVM_I8259 "kvm-i8259"
@@ -139,13 +139,13 @@ qemu_irq *kvm_i8259_init(ISABus *bus)
return qemu_allocate_irqs(kvm_pic_set_irq, NULL, ISA_NUM_IRQS);
}
-static void kvm_i8259_class_init(ObjectClass *klass, void *data)
+static void kvm_i8259_class_init(ObjectClass *klass, const void *data)
{
KVMPICClass *kpc = KVM_PIC_CLASS(klass);
PICCommonClass *k = PIC_COMMON_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = kvm_pic_reset;
+ device_class_set_legacy_reset(dc, kvm_pic_reset);
device_class_set_parent_realize(dc, kvm_pic_realize, &kpc->parent_realize);
k->pre_save = kvm_pic_get;
k->post_load = kvm_pic_put;
diff --git a/hw/i386/kvm/ioapic.c b/hw/i386/kvm/ioapic.c
index b96fe84..693ee97 100644
--- a/hw/i386/kvm/ioapic.c
+++ b/hw/i386/kvm/ioapic.c
@@ -15,7 +15,7 @@
#include "hw/qdev-properties.h"
#include "hw/intc/ioapic_internal.h"
#include "hw/intc/kvm_irqcount.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "kvm/kvm_i386.h"
/* PC Utility function */
@@ -133,12 +133,11 @@ static void kvm_ioapic_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_in(dev, kvm_ioapic_set_irq, IOAPIC_NUM_PINS);
}
-static Property kvm_ioapic_properties[] = {
+static const Property kvm_ioapic_properties[] = {
DEFINE_PROP_UINT32("gsi_base", KVMIOAPICState, kvm_gsi_base, 0),
- DEFINE_PROP_END_OF_LIST()
};
-static void kvm_ioapic_class_init(ObjectClass *klass, void *data)
+static void kvm_ioapic_class_init(ObjectClass *klass, const void *data)
{
IOAPICCommonClass *k = IOAPIC_COMMON_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -146,7 +145,7 @@ static void kvm_ioapic_class_init(ObjectClass *klass, void *data)
k->realize = kvm_ioapic_realize;
k->pre_save = kvm_ioapic_get;
k->post_load = kvm_ioapic_put;
- dc->reset = kvm_ioapic_reset;
+ device_class_set_legacy_reset(dc, kvm_ioapic_reset);
device_class_set_props(dc, kvm_ioapic_properties);
}
diff --git a/hw/i386/kvm/xen-stubs.c b/hw/i386/kvm/xen-stubs.c
index d03131e..ce73119 100644
--- a/hw/i386/kvm/xen-stubs.c
+++ b/hw/i386/kvm/xen-stubs.c
@@ -12,7 +12,6 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-misc-target.h"
#include "xen_evtchn.h"
#include "xen_primary_console.h"
@@ -38,15 +37,3 @@ void xen_primary_console_create(void)
void xen_primary_console_set_be_port(uint16_t port)
{
}
-#ifdef TARGET_I386
-EvtchnInfoList *qmp_xen_event_list(Error **errp)
-{
- error_setg(errp, "Xen event channel emulation not enabled");
- return NULL;
-}
-
-void qmp_xen_event_inject(uint32_t port, Error **errp)
-{
- error_setg(errp, "Xen event channel emulation not enabled");
-}
-#endif
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index 07bd0c9..dd566c4 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -19,11 +19,11 @@
#include "monitor/monitor.h"
#include "monitor/hmp.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-misc-target.h"
-#include "qapi/qmp/qdict.h"
+#include "qapi/qapi-commands-misc-i386.h"
+#include "qobject/qdict.h"
#include "qom/object.h"
#include "exec/target_page.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "migration/vmstate.h"
#include "trace.h"
@@ -41,8 +41,8 @@
#include "xen_overlay.h"
#include "xen_xenstore.h"
-#include "sysemu/kvm.h"
-#include "sysemu/kvm_xen.h"
+#include "system/kvm.h"
+#include "system/kvm_xen.h"
#include <linux/kvm.h>
#include <sys/eventfd.h>
@@ -140,6 +140,8 @@ struct XenEvtchnState {
uint64_t callback_param;
bool evtchn_in_kernel;
+ bool setting_callback_gsi;
+ int extern_gsi_level;
uint32_t callback_gsi;
QEMUBH *gsi_bh;
@@ -269,7 +271,7 @@ static const VMStateDescription xen_evtchn_vmstate = {
}
};
-static void xen_evtchn_class_init(ObjectClass *klass, void *data)
+static void xen_evtchn_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -431,9 +433,22 @@ void xen_evtchn_set_callback_level(int level)
}
if (s->callback_gsi && s->callback_gsi < s->nr_callback_gsis) {
- qemu_set_irq(s->callback_gsis[s->callback_gsi], level);
- if (level) {
- /* Ensure the vCPU polls for deassertion */
+ /*
+ * Ugly, but since we hold the BQL we can set this flag so that
+ * xen_evtchn_set_gsi() can tell the difference between this code
+ * setting the GSI, and an external device (PCI INTx) doing so.
+ */
+ s->setting_callback_gsi = true;
+ /* Do not deassert the line if an external device is asserting it. */
+ qemu_set_irq(s->callback_gsis[s->callback_gsi],
+ level || s->extern_gsi_level);
+ s->setting_callback_gsi = false;
+
+ /*
+ * If the callback GSI is the only one asserted, ensure the status
+ * is polled for deassertion in kvm_arch_post_run().
+ */
+ if (level && !s->extern_gsi_level) {
kvm_xen_set_callback_asserted();
}
}
@@ -1596,7 +1611,7 @@ static int allocate_pirq(XenEvtchnState *s, int type, int gsi)
return pirq;
}
-bool xen_evtchn_set_gsi(int gsi, int level)
+bool xen_evtchn_set_gsi(int gsi, int *level)
{
XenEvtchnState *s = xen_evtchn_singleton;
int pirq;
@@ -1608,16 +1623,35 @@ bool xen_evtchn_set_gsi(int gsi, int level)
}
/*
- * Check that that it *isn't* the event channel GSI, and thus
- * that we are not recursing and it's safe to take s->port_lock.
- *
- * Locking aside, it's perfectly sane to bail out early for that
- * special case, as it would make no sense for the event channel
- * GSI to be routed back to event channels, when the delivery
- * method is to raise the GSI... that recursion wouldn't *just*
- * be a locking issue.
+ * For the callback_gsi we need to implement a logical OR of the event
+ * channel GSI and the external input (e.g. from PCI INTx), because
+ * QEMU itself doesn't support shared level interrupts via demux or
+ * resamplers.
*/
if (gsi && gsi == s->callback_gsi) {
+ /* Remember the external state of the GSI pin (e.g. from PCI INTx) */
+ if (!s->setting_callback_gsi) {
+ s->extern_gsi_level = *level;
+
+ /*
+ * Don't allow the external device to deassert the line if the
+ * eveht channel GSI should still be asserted.
+ */
+ if (!s->extern_gsi_level) {
+ struct vcpu_info *vi = kvm_xen_get_vcpu_info_hva(0);
+ if (vi && vi->evtchn_upcall_pending) {
+ /* Need to poll for deassertion */
+ kvm_xen_set_callback_asserted();
+ *level = 1;
+ }
+ }
+ }
+
+ /*
+ * The event channel GSI cannot be routed to PIRQ, as that would make
+ * no sense. It could also deadlock on s->port_lock, if we proceed.
+ * So bail out now.
+ */
return false;
}
@@ -1628,7 +1662,7 @@ bool xen_evtchn_set_gsi(int gsi, int level)
return false;
}
- if (level) {
+ if (*level) {
int port = s->pirq[pirq].port;
s->pirq_gsi_set |= (1U << gsi);
diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h
index b740acf..0521ebc 100644
--- a/hw/i386/kvm/xen_evtchn.h
+++ b/hw/i386/kvm/xen_evtchn.h
@@ -23,7 +23,7 @@ void xen_evtchn_set_callback_level(int level);
int xen_evtchn_set_port(uint16_t port);
-bool xen_evtchn_set_gsi(int gsi, int level);
+bool xen_evtchn_set_gsi(int gsi, int *level);
void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector,
uint64_t addr, uint32_t data, bool is_masked);
void xen_evtchn_remove_pci_device(PCIDevice *dev);
diff --git a/hw/i386/kvm/xen_gnttab.c b/hw/i386/kvm/xen_gnttab.c
index 245e4b1..4b9e272 100644
--- a/hw/i386/kvm/xen_gnttab.c
+++ b/hw/i386/kvm/xen_gnttab.c
@@ -17,7 +17,7 @@
#include "qapi/error.h"
#include "qom/object.h"
#include "exec/target_page.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "migration/vmstate.h"
#include "hw/sysbus.h"
@@ -27,8 +27,8 @@
#include "xen_gnttab.h"
#include "xen_primary_console.h"
-#include "sysemu/kvm.h"
-#include "sysemu/kvm_xen.h"
+#include "system/kvm.h"
+#include "system/kvm_xen.h"
#include "hw/xen/interface/memory.h"
#include "hw/xen/interface/grant_table.h"
@@ -135,7 +135,7 @@ static const VMStateDescription xen_gnttab_vmstate = {
}
};
-static void xen_gnttab_class_init(ObjectClass *klass, void *data)
+static void xen_gnttab_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/i386/kvm/xen_overlay.c b/hw/i386/kvm/xen_overlay.c
index c68e78a..3cb7361 100644
--- a/hw/i386/kvm/xen_overlay.c
+++ b/hw/i386/kvm/xen_overlay.c
@@ -16,15 +16,15 @@
#include "qapi/error.h"
#include "qom/object.h"
#include "exec/target_page.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "migration/vmstate.h"
#include "hw/sysbus.h"
#include "hw/xen/xen.h"
#include "xen_overlay.h"
-#include "sysemu/kvm.h"
-#include "sysemu/kvm_xen.h"
+#include "system/kvm.h"
+#include "system/kvm_xen.h"
#include <linux/kvm.h>
#include "hw/xen/interface/memory.h"
@@ -151,11 +151,11 @@ static void xen_overlay_reset(DeviceState *dev)
kvm_xen_soft_reset();
}
-static void xen_overlay_class_init(ObjectClass *klass, void *data)
+static void xen_overlay_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = xen_overlay_reset;
+ device_class_set_legacy_reset(dc, xen_overlay_reset);
dc->realize = xen_overlay_realize;
dc->vmsd = &xen_overlay_vmstate;
}
diff --git a/hw/i386/kvm/xen_primary_console.c b/hw/i386/kvm/xen_primary_console.c
index abe79f5..6e9d641 100644
--- a/hw/i386/kvm/xen_primary_console.c
+++ b/hw/i386/kvm/xen_primary_console.c
@@ -20,8 +20,8 @@
#include "xen_overlay.h"
#include "xen_primary_console.h"
-#include "sysemu/kvm.h"
-#include "sysemu/kvm_xen.h"
+#include "system/kvm.h"
+#include "system/kvm_xen.h"
#include "trace.h"
@@ -67,7 +67,7 @@ static void xen_primary_console_realize(DeviceState *dev, Error **errp)
xen_primary_console_singleton = s;
}
-static void xen_primary_console_class_init(ObjectClass *klass, void *data)
+static void xen_primary_console_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c
index 1a9bc34..42955cc 100644
--- a/hw/i386/kvm/xen_xenstore.c
+++ b/hw/i386/kvm/xen_xenstore.c
@@ -28,8 +28,8 @@
#include "xen_primary_console.h"
#include "xen_xenstore.h"
-#include "sysemu/kvm.h"
-#include "sysemu/kvm_xen.h"
+#include "system/kvm.h"
+#include "system/kvm_xen.h"
#include "trace.h"
@@ -209,7 +209,6 @@ static int xen_xenstore_post_load(void *opaque, int ver)
{
XenXenstoreState *s = opaque;
GByteArray *save;
- int ret;
/*
* As qemu/dom0, rebind to the guest's port. The Windows drivers may
@@ -231,8 +230,7 @@ static int xen_xenstore_post_load(void *opaque, int ver)
s->impl_state = NULL;
s->impl_state_size = 0;
- ret = xs_impl_deserialize(s->impl, save, xen_domid, fire_watch_cb, s);
- return ret;
+ return xs_impl_deserialize(s->impl, save, xen_domid, fire_watch_cb, s);
}
static const VMStateDescription xen_xenstore_vmstate = {
@@ -261,7 +259,7 @@ static const VMStateDescription xen_xenstore_vmstate = {
}
};
-static void xen_xenstore_class_init(ObjectClass *klass, void *data)
+static void xen_xenstore_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -532,6 +530,10 @@ static void xs_read(XenXenstoreState *s, unsigned int req_id,
return;
}
+ if (!len) {
+ return;
+ }
+
memcpy(&rsp_data[rsp->len], data->data, len);
rsp->len += len;
}
diff --git a/hw/i386/meson.build b/hw/i386/meson.build
index 03aad10..7896f34 100644
--- a/hw/i386/meson.build
+++ b/hw/i386/meson.build
@@ -15,6 +15,7 @@ i386_ss.add(when: 'CONFIG_AMD_IOMMU', if_true: files('amd_iommu.c'),
if_false: files('amd_iommu-stub.c'))
i386_ss.add(when: 'CONFIG_I440FX', if_true: files('pc_piix.c'))
i386_ss.add(when: 'CONFIG_MICROVM', if_true: files('x86-common.c', 'microvm.c', 'acpi-microvm.c', 'microvm-dt.c'))
+i386_ss.add(when: 'CONFIG_NITRO_ENCLAVE', if_true: files('nitro_enclave.c'))
i386_ss.add(when: 'CONFIG_Q35', if_true: files('pc_q35.c'))
i386_ss.add(when: 'CONFIG_VMMOUSE', if_true: files('vmmouse.c'))
i386_ss.add(when: 'CONFIG_VMPORT', if_true: files('vmport.c'))
@@ -31,6 +32,7 @@ i386_ss.add(when: 'CONFIG_PC', if_true: files(
'port92.c'))
i386_ss.add(when: 'CONFIG_X86_FW_OVMF', if_true: files('pc_sysfw_ovmf.c'),
if_false: files('pc_sysfw_ovmf-stubs.c'))
+i386_ss.add(when: 'CONFIG_TDX', if_true: files('tdvf.c', 'tdvf-hob.c'))
subdir('kvm')
subdir('xen')
diff --git a/hw/i386/microvm-dt.c b/hw/i386/microvm-dt.c
index b3049e4..cb27dfd 100644
--- a/hw/i386/microvm-dt.c
+++ b/hw/i386/microvm-dt.c
@@ -33,8 +33,8 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
-#include "sysemu/device_tree.h"
-#include "hw/char/serial.h"
+#include "system/device_tree.h"
+#include "hw/char/serial-isa.h"
#include "hw/i386/fw_cfg.h"
#include "hw/rtc/mc146818rtc.h"
#include "hw/sysbus.h"
diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c
index 40edcee..e0daf0d 100644
--- a/hw/i386/microvm.c
+++ b/hw/i386/microvm.c
@@ -22,11 +22,11 @@
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qapi/qapi-visit-common.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/cpus.h"
-#include "sysemu/numa.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
+#include "system/system.h"
+#include "system/cpus.h"
+#include "system/numa.h"
+#include "system/reset.h"
+#include "system/runstate.h"
#include "acpi-microvm.h"
#include "microvm-dt.h"
@@ -39,7 +39,7 @@
#include "hw/intc/i8259.h"
#include "hw/timer/i8254.h"
#include "hw/rtc/mc146818rtc.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-isa.h"
#include "hw/display/ramfb.h"
#include "hw/i386/topology.h"
#include "hw/i386/e820_memory_layout.h"
@@ -139,7 +139,7 @@ static void create_gpex(MicrovmMachineState *mms)
mms->gpex.mmio64.base, mmio64_alias);
}
- for (i = 0; i < GPEX_NUM_IRQS; i++) {
+ for (i = 0; i < PCI_NUM_PINS; i++) {
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i,
x86ms->gsi[mms->gpex.irq + i]);
}
@@ -283,6 +283,7 @@ static void microvm_devices_init(MicrovmMachineState *mms)
static void microvm_memory_init(MicrovmMachineState *mms)
{
+ MicrovmMachineClass *mmc = MICROVM_MACHINE_GET_CLASS(mms);
MachineState *machine = MACHINE(mms);
X86MachineState *x86ms = X86_MACHINE(mms);
MemoryRegion *ram_below_4g, *ram_above_4g;
@@ -328,7 +329,7 @@ static void microvm_memory_init(MicrovmMachineState *mms)
rom_set_fw(fw_cfg);
if (machine->kernel_filename != NULL) {
- x86_load_linux(x86ms, fw_cfg, 0, true);
+ mmc->x86_load_linux(x86ms, fw_cfg, 0, true);
}
if (mms->option_roms) {
@@ -450,11 +451,44 @@ static HotplugHandler *microvm_get_hotplug_handler(MachineState *machine,
return NULL;
}
+static void microvm_machine_done(Notifier *notifier, void *data)
+{
+ MicrovmMachineState *mms = container_of(notifier, MicrovmMachineState,
+ machine_done);
+ X86MachineState *x86ms = X86_MACHINE(mms);
+
+ acpi_setup_microvm(mms);
+ dt_setup_microvm(mms);
+ fw_cfg_add_e820(x86ms->fw_cfg);
+}
+
+static void microvm_powerdown_req(Notifier *notifier, void *data)
+{
+ MicrovmMachineState *mms = container_of(notifier, MicrovmMachineState,
+ powerdown_req);
+ X86MachineState *x86ms = X86_MACHINE(mms);
+
+ if (x86ms->acpi_dev) {
+ Object *obj = OBJECT(x86ms->acpi_dev);
+ AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(obj);
+ adevc->send_event(ACPI_DEVICE_IF(x86ms->acpi_dev),
+ ACPI_POWER_DOWN_STATUS);
+ }
+}
+
static void microvm_machine_state_init(MachineState *machine)
{
MicrovmMachineState *mms = MICROVM_MACHINE(machine);
X86MachineState *x86ms = X86_MACHINE(machine);
+ /* State */
+ mms->kernel_cmdline_fixed = false;
+
+ mms->machine_done.notify = microvm_machine_done;
+ qemu_add_machine_init_done_notifier(&mms->machine_done);
+ mms->powerdown_req.notify = microvm_powerdown_req;
+ qemu_register_powerdown_notifier(&mms->powerdown_req);
+
microvm_memory_init(mms);
x86_cpus_init(x86ms, CPU_VERSION_LATEST);
@@ -462,7 +496,7 @@ static void microvm_machine_state_init(MachineState *machine)
microvm_devices_init(mms);
}
-static void microvm_machine_reset(MachineState *machine, ShutdownCause reason)
+static void microvm_machine_reset(MachineState *machine, ResetType type)
{
MicrovmMachineState *mms = MICROVM_MACHINE(machine);
CPUState *cs;
@@ -475,7 +509,7 @@ static void microvm_machine_reset(MachineState *machine, ShutdownCause reason)
mms->kernel_cmdline_fixed = true;
}
- qemu_devices_reset(reason);
+ qemu_devices_reset(type);
CPU_FOREACH(cs) {
cpu = X86_CPU(cs);
@@ -580,31 +614,6 @@ static void microvm_machine_set_auto_kernel_cmdline(Object *obj, bool value,
mms->auto_kernel_cmdline = value;
}
-static void microvm_machine_done(Notifier *notifier, void *data)
-{
- MicrovmMachineState *mms = container_of(notifier, MicrovmMachineState,
- machine_done);
- X86MachineState *x86ms = X86_MACHINE(mms);
-
- acpi_setup_microvm(mms);
- dt_setup_microvm(mms);
- fw_cfg_add_e820(x86ms->fw_cfg);
-}
-
-static void microvm_powerdown_req(Notifier *notifier, void *data)
-{
- MicrovmMachineState *mms = container_of(notifier, MicrovmMachineState,
- powerdown_req);
- X86MachineState *x86ms = X86_MACHINE(mms);
-
- if (x86ms->acpi_dev) {
- Object *obj = OBJECT(x86ms->acpi_dev);
- AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(obj);
- adevc->send_event(ACPI_DEVICE_IF(x86ms->acpi_dev),
- ACPI_POWER_DOWN_STATUS);
- }
-}
-
static void microvm_machine_initfn(Object *obj)
{
MicrovmMachineState *mms = MICROVM_MACHINE(obj);
@@ -616,14 +625,6 @@ static void microvm_machine_initfn(Object *obj)
mms->isa_serial = true;
mms->option_roms = true;
mms->auto_kernel_cmdline = true;
-
- /* State */
- mms->kernel_cmdline_fixed = false;
-
- mms->machine_done.notify = microvm_machine_done;
- qemu_add_machine_init_done_notifier(&mms->machine_done);
- mms->powerdown_req.notify = microvm_powerdown_req;
- qemu_register_powerdown_notifier(&mms->powerdown_req);
}
GlobalProperty microvm_properties[] = {
@@ -634,12 +635,15 @@ GlobalProperty microvm_properties[] = {
{ "pcie-root-port", "io-reserve", "0" },
};
-static void microvm_class_init(ObjectClass *oc, void *data)
+static void microvm_class_init(ObjectClass *oc, const void *data)
{
X86MachineClass *x86mc = X86_MACHINE_CLASS(oc);
+ MicrovmMachineClass *mmc = MICROVM_MACHINE_CLASS(oc);
MachineClass *mc = MACHINE_CLASS(oc);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
+ mmc->x86_load_linux = x86_load_linux;
+
mc->init = microvm_machine_state_init;
mc->family = "microvm_i386";
@@ -722,7 +726,7 @@ static const TypeInfo microvm_machine_info = {
.instance_init = microvm_machine_initfn,
.class_size = sizeof(MicrovmMachineClass),
.class_init = microvm_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
},
diff --git a/hw/i386/monitor.c b/hw/i386/monitor.c
index 1ebd356..79df965 100644
--- a/hw/i386/monitor.c
+++ b/hw/i386/monitor.c
@@ -24,9 +24,9 @@
#include "qemu/osdep.h"
#include "monitor/monitor.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-misc-target.h"
+#include "qapi/qapi-commands-misc-i386.h"
#include "hw/i386/x86.h"
#include "hw/rtc/mc146818rtc.h"
diff --git a/hw/i386/multiboot.c b/hw/i386/multiboot.c
index 3332712..6e6b96b 100644
--- a/hw/i386/multiboot.c
+++ b/hw/i386/multiboot.c
@@ -29,7 +29,8 @@
#include "multiboot.h"
#include "hw/loader.h"
#include "elf.h"
-#include "sysemu/sysemu.h"
+#include "exec/target_page.h"
+#include "system/system.h"
#include "qemu/error-report.h"
/* Show multiboot debug output */
@@ -133,9 +134,9 @@ static void mb_add_mod(MultibootState *s,
p = (char *)s->mb_buf + s->offset_mbinfo + MB_MOD_SIZE * s->mb_mods_count;
- stl_p(p + MB_MOD_START, start);
- stl_p(p + MB_MOD_END, end);
- stl_p(p + MB_MOD_CMDLINE, cmdline_phys);
+ stl_le_p(p + MB_MOD_START, start);
+ stl_le_p(p + MB_MOD_END, end);
+ stl_le_p(p + MB_MOD_CMDLINE, cmdline_phys);
mb_debug("mod%02d: "HWADDR_FMT_plx" - "HWADDR_FMT_plx,
s->mb_mods_count, start, end);
@@ -168,9 +169,9 @@ int load_multiboot(X86MachineState *x86ms,
/* Ok, let's see if it is a multiboot image.
The header is 12x32bit long, so the latest entry may be 8192 - 48. */
for (i = 0; i < (8192 - 48); i += 4) {
- if (ldl_p(header+i) == 0x1BADB002) {
- uint32_t checksum = ldl_p(header+i+8);
- flags = ldl_p(header+i+4);
+ if (ldl_le_p(header + i) == 0x1BADB002) {
+ uint32_t checksum = ldl_le_p(header + i + 8);
+ flags = ldl_le_p(header + i + 4);
checksum += flags;
checksum += (uint32_t)0x1BADB002;
if (!checksum) {
@@ -202,8 +203,8 @@ int load_multiboot(X86MachineState *x86ms,
}
kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry,
- &elf_low, &elf_high, NULL, 0, I386_ELF_MACHINE,
- 0, 0);
+ &elf_low, &elf_high, NULL,
+ ELFDATA2LSB, I386_ELF_MACHINE, 0, 0);
if (kernel_size < 0) {
error_report("Error while loading elf kernel");
exit(1);
@@ -223,11 +224,11 @@ int load_multiboot(X86MachineState *x86ms,
mb_kernel_size, (size_t)mh_entry_addr);
} else {
/* Valid if mh_flags sets MULTIBOOT_HEADER_HAS_ADDR. */
- uint32_t mh_header_addr = ldl_p(header+i+12);
- uint32_t mh_load_end_addr = ldl_p(header+i+20);
- uint32_t mh_bss_end_addr = ldl_p(header+i+24);
+ uint32_t mh_header_addr = ldl_le_p(header + i + 12);
+ uint32_t mh_load_end_addr = ldl_le_p(header + i + 20);
+ uint32_t mh_bss_end_addr = ldl_le_p(header + i + 24);
- mh_load_addr = ldl_p(header+i+16);
+ mh_load_addr = ldl_le_p(header + i + 16);
if (mh_header_addr < mh_load_addr) {
error_report("invalid load_addr address");
exit(1);
@@ -239,7 +240,7 @@ int load_multiboot(X86MachineState *x86ms,
uint32_t mb_kernel_text_offset = i - (mh_header_addr - mh_load_addr);
uint32_t mb_load_size = 0;
- mh_entry_addr = ldl_p(header+i+28);
+ mh_entry_addr = ldl_le_p(header + i + 28);
if (mh_load_end_addr) {
if (mh_load_end_addr < mh_load_addr) {
@@ -364,22 +365,21 @@ int load_multiboot(X86MachineState *x86ms,
/* Commandline support */
kcmdline = g_strdup_printf("%s %s", kernel_filename, kernel_cmdline);
- stl_p(bootinfo + MBI_CMDLINE, mb_add_cmdline(&mbs, kcmdline));
-
- stl_p(bootinfo + MBI_BOOTLOADER, mb_add_bootloader(&mbs, bootloader_name));
-
- stl_p(bootinfo + MBI_MODS_ADDR, mbs.mb_buf_phys + mbs.offset_mbinfo);
- stl_p(bootinfo + MBI_MODS_COUNT, mbs.mb_mods_count); /* mods_count */
+ stl_le_p(bootinfo + MBI_CMDLINE, mb_add_cmdline(&mbs, kcmdline));
+ stl_le_p(bootinfo + MBI_BOOTLOADER, mb_add_bootloader(&mbs,
+ bootloader_name));
+ stl_le_p(bootinfo + MBI_MODS_ADDR, mbs.mb_buf_phys + mbs.offset_mbinfo);
+ stl_le_p(bootinfo + MBI_MODS_COUNT, mbs.mb_mods_count); /* mods_count */
/* the kernel is where we want it to be now */
- stl_p(bootinfo + MBI_FLAGS, MULTIBOOT_FLAGS_MEMORY
+ stl_le_p(bootinfo + MBI_FLAGS, MULTIBOOT_FLAGS_MEMORY
| MULTIBOOT_FLAGS_BOOT_DEVICE
| MULTIBOOT_FLAGS_CMDLINE
| MULTIBOOT_FLAGS_MODULES
| MULTIBOOT_FLAGS_MMAP
| MULTIBOOT_FLAGS_BOOTLOADER);
- stl_p(bootinfo + MBI_BOOT_DEVICE, 0x8000ffff); /* XXX: use the -boot switch? */
- stl_p(bootinfo + MBI_MMAP_ADDR, ADDR_E820_MAP);
+ stl_le_p(bootinfo + MBI_BOOT_DEVICE, 0x8000ffff); /* XXX: use the -boot switch? */
+ stl_le_p(bootinfo + MBI_MMAP_ADDR, ADDR_E820_MAP);
mb_debug("multiboot: entry_addr = %#x", mh_entry_addr);
mb_debug(" mb_buf_phys = "HWADDR_FMT_plx, mbs.mb_buf_phys);
diff --git a/hw/i386/nitro_enclave.c b/hw/i386/nitro_enclave.c
new file mode 100644
index 0000000..5ee50f3
--- /dev/null
+++ b/hw/i386/nitro_enclave.c
@@ -0,0 +1,353 @@
+/*
+ * AWS nitro-enclave machine
+ *
+ * Copyright (c) 2024 Dorjoy Chowdhury <dorjoychy111@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "qom/object_interfaces.h"
+
+#include "chardev/char.h"
+#include "hw/sysbus.h"
+#include "hw/core/eif.h"
+#include "hw/i386/x86.h"
+#include "hw/i386/microvm.h"
+#include "hw/i386/nitro_enclave.h"
+#include "hw/virtio/virtio-mmio.h"
+#include "hw/virtio/virtio-nsm.h"
+#include "hw/virtio/vhost-user-vsock.h"
+#include "system/hostmem.h"
+
+static BusState *find_free_virtio_mmio_bus(void)
+{
+ BusChild *kid;
+ BusState *bus = sysbus_get_default();
+
+ QTAILQ_FOREACH(kid, &bus->children, sibling) {
+ DeviceState *dev = kid->child;
+ if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MMIO)) {
+ VirtIOMMIOProxy *mmio = VIRTIO_MMIO(OBJECT(dev));
+ VirtioBusState *mmio_virtio_bus = &mmio->bus;
+ BusState *mmio_bus = &mmio_virtio_bus->parent_obj;
+ if (QTAILQ_EMPTY(&mmio_bus->children)) {
+ return mmio_bus;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void vhost_user_vsock_init(NitroEnclaveMachineState *nems)
+{
+ DeviceState *dev = qdev_new(TYPE_VHOST_USER_VSOCK);
+ VHostUserVSock *vsock = VHOST_USER_VSOCK(dev);
+ BusState *bus;
+
+ if (!nems->vsock) {
+ error_report("A valid chardev id for vhost-user-vsock device must be "
+ "provided using the 'vsock' machine option");
+ exit(1);
+ }
+
+ bus = find_free_virtio_mmio_bus();
+ if (!bus) {
+ error_report("Failed to find bus for vhost-user-vsock device");
+ exit(1);
+ }
+
+ Chardev *chardev = qemu_chr_find(nems->vsock);
+ if (!chardev) {
+ error_report("Failed to find chardev with id %s", nems->vsock);
+ exit(1);
+ }
+
+ vsock->conf.chardev.chr = chardev;
+
+ qdev_realize_and_unref(dev, bus, &error_fatal);
+}
+
+static void virtio_nsm_init(NitroEnclaveMachineState *nems)
+{
+ DeviceState *dev = qdev_new(TYPE_VIRTIO_NSM);
+ VirtIONSM *vnsm = VIRTIO_NSM(dev);
+ BusState *bus = find_free_virtio_mmio_bus();
+
+ if (!bus) {
+ error_report("Failed to find bus for virtio-nsm device.");
+ exit(1);
+ }
+
+ qdev_prop_set_string(dev, "module-id", nems->id);
+
+ qdev_realize_and_unref(dev, bus, &error_fatal);
+ nems->vnsm = vnsm;
+}
+
+static void nitro_enclave_devices_init(NitroEnclaveMachineState *nems)
+{
+ vhost_user_vsock_init(nems);
+ virtio_nsm_init(nems);
+}
+
+static void nitro_enclave_machine_state_init(MachineState *machine)
+{
+ NitroEnclaveMachineClass *ne_class =
+ NITRO_ENCLAVE_MACHINE_GET_CLASS(machine);
+ NitroEnclaveMachineState *ne_state = NITRO_ENCLAVE_MACHINE(machine);
+
+ ne_class->parent_init(machine);
+ nitro_enclave_devices_init(ne_state);
+}
+
+static void nitro_enclave_machine_reset(MachineState *machine, ResetType type)
+{
+ NitroEnclaveMachineClass *ne_class =
+ NITRO_ENCLAVE_MACHINE_GET_CLASS(machine);
+ NitroEnclaveMachineState *ne_state = NITRO_ENCLAVE_MACHINE(machine);
+
+ ne_class->parent_reset(machine, type);
+
+ memset(ne_state->vnsm->pcrs, 0, sizeof(ne_state->vnsm->pcrs));
+
+ /* PCR0 */
+ ne_state->vnsm->extend_pcr(ne_state->vnsm, 0, ne_state->image_hash,
+ QCRYPTO_HASH_DIGEST_LEN_SHA384);
+ /* PCR1 */
+ ne_state->vnsm->extend_pcr(ne_state->vnsm, 1, ne_state->bootstrap_hash,
+ QCRYPTO_HASH_DIGEST_LEN_SHA384);
+ /* PCR2 */
+ ne_state->vnsm->extend_pcr(ne_state->vnsm, 2, ne_state->app_hash,
+ QCRYPTO_HASH_DIGEST_LEN_SHA384);
+ /* PCR3 */
+ if (ne_state->parent_role) {
+ ne_state->vnsm->extend_pcr(ne_state->vnsm, 3,
+ (uint8_t *) ne_state->parent_role,
+ strlen(ne_state->parent_role));
+ }
+ /* PCR4 */
+ if (ne_state->parent_id) {
+ ne_state->vnsm->extend_pcr(ne_state->vnsm, 4,
+ (uint8_t *) ne_state->parent_id,
+ strlen(ne_state->parent_id));
+ }
+ /* PCR8 */
+ if (ne_state->signature_found) {
+ ne_state->vnsm->extend_pcr(ne_state->vnsm, 8,
+ ne_state->fingerprint_hash,
+ QCRYPTO_HASH_DIGEST_LEN_SHA384);
+ }
+
+ /* First 16 PCRs are locked from boot and reserved for nitro enclave */
+ for (int i = 0; i < 16; ++i) {
+ ne_state->vnsm->lock_pcr(ne_state->vnsm, i);
+ }
+}
+
+static void nitro_enclave_machine_initfn(Object *obj)
+{
+ MicrovmMachineState *mms = MICROVM_MACHINE(obj);
+ X86MachineState *x86ms = X86_MACHINE(obj);
+ NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj);
+
+ nems->id = g_strdup("i-234-enc5678");
+
+ /* AWS nitro enclaves have PCIE and ACPI disabled */
+ mms->pcie = ON_OFF_AUTO_OFF;
+ x86ms->acpi = ON_OFF_AUTO_OFF;
+}
+
+static void x86_load_eif(X86MachineState *x86ms, FWCfgState *fw_cfg,
+ int acpi_data_size, bool pvh_enabled)
+{
+ Error *err = NULL;
+ char *eif_kernel, *eif_initrd, *eif_cmdline;
+ MachineState *machine = MACHINE(x86ms);
+ NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(x86ms);
+
+ if (!read_eif_file(machine->kernel_filename, machine->initrd_filename,
+ &eif_kernel, &eif_initrd, &eif_cmdline,
+ nems->image_hash, nems->bootstrap_hash,
+ nems->app_hash, nems->fingerprint_hash,
+ &(nems->signature_found), &err)) {
+ error_report_err(err);
+ exit(1);
+ }
+
+ g_free(machine->kernel_filename);
+ machine->kernel_filename = eif_kernel;
+ g_free(machine->initrd_filename);
+ machine->initrd_filename = eif_initrd;
+
+ /*
+ * If kernel cmdline argument was provided, let's concatenate it to the
+ * extracted EIF kernel cmdline.
+ */
+ if (machine->kernel_cmdline != NULL) {
+ char *cmd = g_strdup_printf("%s %s", eif_cmdline,
+ machine->kernel_cmdline);
+ g_free(eif_cmdline);
+ g_free(machine->kernel_cmdline);
+ machine->kernel_cmdline = cmd;
+ } else {
+ machine->kernel_cmdline = eif_cmdline;
+ }
+
+ x86_load_linux(x86ms, fw_cfg, 0, true);
+
+ unlink(machine->kernel_filename);
+ unlink(machine->initrd_filename);
+}
+
+static bool create_memfd_backend(MachineState *ms, const char *path,
+ Error **errp)
+{
+ Object *obj;
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ bool r = false;
+
+ obj = object_new(TYPE_MEMORY_BACKEND_MEMFD);
+ if (!object_property_set_int(obj, "size", ms->ram_size, errp)) {
+ goto out;
+ }
+ object_property_add_child(object_get_objects_root(), mc->default_ram_id,
+ obj);
+
+ if (!user_creatable_complete(USER_CREATABLE(obj), errp)) {
+ goto out;
+ }
+ r = object_property_set_link(OBJECT(ms), "memory-backend", obj, errp);
+
+out:
+ object_unref(obj);
+ return r;
+}
+
+static char *nitro_enclave_get_vsock_chardev_id(Object *obj, Error **errp)
+{
+ NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj);
+
+ return g_strdup(nems->vsock);
+}
+
+static void nitro_enclave_set_vsock_chardev_id(Object *obj, const char *value,
+ Error **errp)
+{
+ NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj);
+
+ g_free(nems->vsock);
+ nems->vsock = g_strdup(value);
+}
+
+static char *nitro_enclave_get_id(Object *obj, Error **errp)
+{
+ NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj);
+
+ return g_strdup(nems->id);
+}
+
+static void nitro_enclave_set_id(Object *obj, const char *value,
+ Error **errp)
+{
+ NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj);
+
+ g_free(nems->id);
+ nems->id = g_strdup(value);
+}
+
+static char *nitro_enclave_get_parent_role(Object *obj, Error **errp)
+{
+ NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj);
+
+ return g_strdup(nems->parent_role);
+}
+
+static void nitro_enclave_set_parent_role(Object *obj, const char *value,
+ Error **errp)
+{
+ NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj);
+
+ g_free(nems->parent_role);
+ nems->parent_role = g_strdup(value);
+}
+
+static char *nitro_enclave_get_parent_id(Object *obj, Error **errp)
+{
+ NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj);
+
+ return g_strdup(nems->parent_id);
+}
+
+static void nitro_enclave_set_parent_id(Object *obj, const char *value,
+ Error **errp)
+{
+ NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj);
+
+ g_free(nems->parent_id);
+ nems->parent_id = g_strdup(value);
+}
+
+static void nitro_enclave_class_init(ObjectClass *oc, const void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+ MicrovmMachineClass *mmc = MICROVM_MACHINE_CLASS(oc);
+ NitroEnclaveMachineClass *nemc = NITRO_ENCLAVE_MACHINE_CLASS(oc);
+
+ mmc->x86_load_linux = x86_load_eif;
+
+ mc->family = "nitro_enclave_i386";
+ mc->desc = "AWS Nitro Enclave";
+
+ nemc->parent_init = mc->init;
+ mc->init = nitro_enclave_machine_state_init;
+
+ nemc->parent_reset = mc->reset;
+ mc->reset = nitro_enclave_machine_reset;
+
+ mc->create_default_memdev = create_memfd_backend;
+
+ object_class_property_add_str(oc, NITRO_ENCLAVE_VSOCK_CHARDEV_ID,
+ nitro_enclave_get_vsock_chardev_id,
+ nitro_enclave_set_vsock_chardev_id);
+ object_class_property_set_description(oc, NITRO_ENCLAVE_VSOCK_CHARDEV_ID,
+ "Set chardev id for vhost-user-vsock "
+ "device");
+
+ object_class_property_add_str(oc, NITRO_ENCLAVE_ID, nitro_enclave_get_id,
+ nitro_enclave_set_id);
+ object_class_property_set_description(oc, NITRO_ENCLAVE_ID,
+ "Set enclave identifier");
+
+ object_class_property_add_str(oc, NITRO_ENCLAVE_PARENT_ROLE,
+ nitro_enclave_get_parent_role,
+ nitro_enclave_set_parent_role);
+ object_class_property_set_description(oc, NITRO_ENCLAVE_PARENT_ROLE,
+ "Set parent instance IAM role ARN");
+
+ object_class_property_add_str(oc, NITRO_ENCLAVE_PARENT_ID,
+ nitro_enclave_get_parent_id,
+ nitro_enclave_set_parent_id);
+ object_class_property_set_description(oc, NITRO_ENCLAVE_PARENT_ID,
+ "Set parent instance identifier");
+}
+
+static const TypeInfo nitro_enclave_machine_info = {
+ .name = TYPE_NITRO_ENCLAVE_MACHINE,
+ .parent = TYPE_MICROVM_MACHINE,
+ .instance_size = sizeof(NitroEnclaveMachineState),
+ .instance_init = nitro_enclave_machine_initfn,
+ .class_size = sizeof(NitroEnclaveMachineClass),
+ .class_init = nitro_enclave_class_init,
+};
+
+static void nitro_enclave_machine_init(void)
+{
+ type_register_static(&nitro_enclave_machine_info);
+}
+type_init(nitro_enclave_machine_init);
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index c74931d..b211633 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -24,13 +24,14 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
+#include "exec/target_page.h"
#include "hw/i386/pc.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-isa.h"
#include "hw/char/parallel.h"
#include "hw/hyperv/hv-balloon.h"
#include "hw/i386/fw_cfg.h"
#include "hw/i386/vmport.h"
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
#include "hw/ide/ide-bus.h"
#include "hw/timer/hpet.h"
#include "hw/loader.h"
@@ -39,12 +40,13 @@
#include "hw/timer/i8254.h"
#include "hw/input/i8042.h"
#include "hw/audio/pcspk.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/xen.h"
-#include "sysemu/reset.h"
+#include "system/system.h"
+#include "system/xen.h"
+#include "system/reset.h"
#include "kvm/kvm_i386.h"
+#include "kvm/tdx.h"
#include "hw/xen/xen.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qlist.h"
#include "qemu/error-report.h"
#include "hw/acpi/cpu_hotplug.h"
#include "acpi-build.h"
@@ -79,6 +81,20 @@
{ "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\
{ "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },
+GlobalProperty pc_compat_10_0[] = {};
+const size_t pc_compat_10_0_len = G_N_ELEMENTS(pc_compat_10_0);
+
+GlobalProperty pc_compat_9_2[] = {};
+const size_t pc_compat_9_2_len = G_N_ELEMENTS(pc_compat_9_2);
+
+GlobalProperty pc_compat_9_1[] = {
+ { "ICH9-LPC", "x-smi-swsmi-timer", "off" },
+ { "ICH9-LPC", "x-smi-periodic-timer", "off" },
+ { TYPE_INTEL_IOMMU_DEVICE, "stale-tm", "on" },
+ { TYPE_INTEL_IOMMU_DEVICE, "aw-bits", "39" },
+};
+const size_t pc_compat_9_1_len = G_N_ELEMENTS(pc_compat_9_1);
+
GlobalProperty pc_compat_9_0[] = {
{ TYPE_X86_CPU, "x-amd-topoext-features-only", "false" },
{ TYPE_X86_CPU, "x-l1-cache-per-thread", "false" },
@@ -244,28 +260,6 @@ GlobalProperty pc_compat_2_6[] = {
};
const size_t pc_compat_2_6_len = G_N_ELEMENTS(pc_compat_2_6);
-GlobalProperty pc_compat_2_5[] = {};
-const size_t pc_compat_2_5_len = G_N_ELEMENTS(pc_compat_2_5);
-
-GlobalProperty pc_compat_2_4[] = {
- PC_CPU_MODEL_IDS("2.4.0")
- { "Haswell-" TYPE_X86_CPU, "abm", "off" },
- { "Haswell-noTSX-" TYPE_X86_CPU, "abm", "off" },
- { "Broadwell-" TYPE_X86_CPU, "abm", "off" },
- { "Broadwell-noTSX-" TYPE_X86_CPU, "abm", "off" },
- { "host" "-" TYPE_X86_CPU, "host-cache-info", "on" },
- { TYPE_X86_CPU, "check", "off" },
- { "qemu64" "-" TYPE_X86_CPU, "sse4a", "on" },
- { "qemu64" "-" TYPE_X86_CPU, "abm", "on" },
- { "qemu64" "-" TYPE_X86_CPU, "popcnt", "on" },
- { "qemu32" "-" TYPE_X86_CPU, "popcnt", "on" },
- { "Opteron_G2" "-" TYPE_X86_CPU, "rdtscp", "on" },
- { "Opteron_G3" "-" TYPE_X86_CPU, "rdtscp", "on" },
- { "Opteron_G4" "-" TYPE_X86_CPU, "rdtscp", "on" },
- { "Opteron_G5" "-" TYPE_X86_CPU, "rdtscp", "on", }
-};
-const size_t pc_compat_2_4_len = G_N_ELEMENTS(pc_compat_2_4);
-
/*
* @PC_FW_DATA:
* Size of the chunk of memory at the top of RAM for the BIOS ACPI tables
@@ -453,7 +447,7 @@ static int check_fdc(Object *obj, void *opaque)
}
static const char * const fdc_container_path[] = {
- "/unattached", "/peripheral", "/peripheral-anon"
+ "unattached", "peripheral", "peripheral-anon"
};
/*
@@ -467,7 +461,7 @@ static ISADevice *pc_find_fdc0(void)
CheckFdcState state = { 0 };
for (i = 0; i < ARRAY_SIZE(fdc_container_path); i++) {
- container = container_get(qdev_get_machine(), fdc_container_path[i]);
+ container = machine_get_container(fdc_container_path[i]);
object_child_foreach(container, check_fdc, &state);
}
@@ -621,7 +615,8 @@ void pc_machine_done(Notifier *notifier, void *data)
/* set the number of CPUs */
x86_rtc_set_cpus_count(x86ms->rtc, x86ms->boot_cpus);
- fw_cfg_add_extra_pci_roots(pcms->pcibus, x86ms->fw_cfg);
+ pci_bus_add_fw_cfg_extra_pci_roots(x86ms->fw_cfg, pcms->pcibus,
+ &error_abort);
acpi_setup();
if (x86ms->fw_cfg) {
@@ -960,21 +955,23 @@ void pc_memory_init(PCMachineState *pcms,
/* Initialize PC system firmware */
pc_system_firmware_init(pcms, rom_memory);
- option_rom_mr = g_malloc(sizeof(*option_rom_mr));
- if (machine_require_guest_memfd(machine)) {
- memory_region_init_ram_guest_memfd(option_rom_mr, NULL, "pc.rom",
- PC_ROM_SIZE, &error_fatal);
- } else {
- memory_region_init_ram(option_rom_mr, NULL, "pc.rom", PC_ROM_SIZE,
- &error_fatal);
- if (pcmc->pci_enabled) {
- memory_region_set_readonly(option_rom_mr, true);
+ if (!is_tdx_vm()) {
+ option_rom_mr = g_malloc(sizeof(*option_rom_mr));
+ if (machine_require_guest_memfd(machine)) {
+ memory_region_init_ram_guest_memfd(option_rom_mr, NULL, "pc.rom",
+ PC_ROM_SIZE, &error_fatal);
+ } else {
+ memory_region_init_ram(option_rom_mr, NULL, "pc.rom", PC_ROM_SIZE,
+ &error_fatal);
+ if (pcmc->pci_enabled) {
+ memory_region_set_readonly(option_rom_mr, true);
+ }
}
+ memory_region_add_subregion_overlap(rom_memory,
+ PC_ROM_MIN_VGA,
+ option_rom_mr,
+ 1);
}
- memory_region_add_subregion_overlap(rom_memory,
- PC_ROM_MIN_VGA,
- option_rom_mr,
- 1);
fw_cfg = fw_cfg_arch_create(machine,
x86ms->boot_cpus, x86ms->apic_id_limit);
@@ -983,14 +980,13 @@ void pc_memory_init(PCMachineState *pcms,
if (machine->device_memory) {
uint64_t *val = g_malloc(sizeof(*val));
- uint64_t res_mem_end = machine->device_memory->base;
-
- if (!pcmc->broken_reserved_end) {
- res_mem_end += memory_region_size(&machine->device_memory->mr);
- }
+ uint64_t res_mem_end;
if (pcms->cxl_devices_state.is_enabled) {
res_mem_end = cxl_resv_end;
+ } else {
+ res_mem_end = machine->device_memory->base
+ + memory_region_size(&machine->device_memory->mr);
}
*val = cpu_to_le64(ROUND_UP(res_mem_end, 1 * GiB));
fw_cfg_add_file(fw_cfg, "etc/reserved-memory-end", val, sizeof(*val));
@@ -1028,9 +1024,7 @@ uint64_t pc_pci_hole64_start(void)
hole64_start = pc_get_cxl_range_end(pcms);
} else if (pcmc->has_reserved_memory && (ms->ram_size < ms->maxram_size)) {
pc_get_device_memory_range(pcms, &hole64_start, &size);
- if (!pcmc->broken_reserved_end) {
- hole64_start += size;
- }
+ hole64_start += size;
} else {
hole64_start = pc_above_4g_end(pcms);
}
@@ -1042,7 +1036,6 @@ DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus)
{
DeviceState *dev = NULL;
- rom_set_order_override(FW_CFG_ORDER_OVERRIDE_VGA);
if (pci_bus) {
PCIDevice *pcidev = pci_vga_init(pci_bus);
dev = pcidev ? &pcidev->qdev : NULL;
@@ -1050,14 +1043,14 @@ DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus)
ISADevice *isadev = isa_vga_init(isa_bus);
dev = isadev ? DEVICE(isadev) : NULL;
}
- rom_reset_order_override();
+
return dev;
}
static const MemoryRegionOps ioport80_io_ops = {
.write = ioport80_write,
.read = ioport80_read,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.impl = {
.min_access_size = 1,
.max_access_size = 1,
@@ -1067,7 +1060,7 @@ static const MemoryRegionOps ioport80_io_ops = {
static const MemoryRegionOps ioportF0_io_ops = {
.write = ioportF0_write,
.read = ioportF0_read,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.impl = {
.min_access_size = 1,
.max_access_size = 1,
@@ -1075,7 +1068,7 @@ static const MemoryRegionOps ioportF0_io_ops = {
};
static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl,
- bool create_i8042, bool no_vmport)
+ bool create_i8042, bool no_vmport, Error **errp)
{
int i;
DriveInfo *fd[MAX_FD];
@@ -1100,6 +1093,10 @@ static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl,
}
if (!create_i8042) {
+ if (!no_vmport) {
+ error_setg(errp,
+ "vmport requires the i8042 controller to be enabled");
+ }
return;
}
@@ -1217,9 +1214,17 @@ void pc_basic_device_init(struct PCMachineState *pcms,
isa_realize_and_unref(pcms->pcspk, isa_bus, &error_fatal);
}
+ if (pcms->vmport == ON_OFF_AUTO_AUTO) {
+ pcms->vmport = (xen_enabled() || !pcms->i8042_enabled)
+ ? ON_OFF_AUTO_OFF : ON_OFF_AUTO_ON;
+ }
+
/* Super I/O */
pc_superio_init(isa_bus, create_fdctrl, pcms->i8042_enabled,
- pcms->vmport != ON_OFF_AUTO_ON);
+ pcms->vmport != ON_OFF_AUTO_ON, &error_fatal);
+
+ pcms->machine_done.notify = pc_machine_done;
+ qemu_add_machine_init_done_notifier(&pcms->machine_done);
}
void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus)
@@ -1228,16 +1233,14 @@ void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus)
bool default_is_ne2k = g_str_equal(mc->default_nic, TYPE_ISA_NE2000);
NICInfo *nd;
- rom_set_order_override(FW_CFG_ORDER_OVERRIDE_NIC);
-
while ((nd = qemu_find_nic_info(TYPE_ISA_NE2000, default_is_ne2k, NULL))) {
pc_init_ne2k_isa(isa_bus, nd, &error_fatal);
}
/* Anything remaining should be a PCI NIC */
- pci_init_nic_devices(pci_bus, mc->default_nic);
-
- rom_reset_order_override();
+ if (pci_bus) {
+ pci_init_nic_devices(pci_bus, mc->default_nic);
+ }
}
void pc_i8259_create(ISABus *isa_bus, qemu_irq *i8259_irqs)
@@ -1678,7 +1681,7 @@ static void pc_machine_initfn(Object *obj)
pcms->sata_enabled = true;
pcms->i8042_enabled = true;
pcms->max_fw_size = 8 * MiB;
-#ifdef CONFIG_HPET
+#if defined(CONFIG_HPET)
pcms->hpet_enabled = true;
#endif
pcms->fd_bootchk = true;
@@ -1691,17 +1694,14 @@ static void pc_machine_initfn(Object *obj)
if (pcmc->pci_enabled) {
cxl_machine_init(obj, &pcms->cxl_devices_state);
}
-
- pcms->machine_done.notify = pc_machine_done;
- qemu_add_machine_init_done_notifier(&pcms->machine_done);
}
-static void pc_machine_reset(MachineState *machine, ShutdownCause reason)
+static void pc_machine_reset(MachineState *machine, ResetType type)
{
CPUState *cs;
X86CPU *cpu;
- qemu_devices_reset(reason);
+ qemu_devices_reset(type);
/* Reset APIC after devices have been reset to cancel
* any changes that qemu_devices_reset() might have done.
@@ -1716,7 +1716,7 @@ static void pc_machine_reset(MachineState *machine, ShutdownCause reason)
static void pc_machine_wakeup(MachineState *machine)
{
cpu_synchronize_all_states();
- pc_machine_reset(machine, SHUTDOWN_CAUSE_NONE);
+ pc_machine_reset(machine, RESET_TYPE_WAKEUP);
cpu_synchronize_all_post_reset();
}
@@ -1739,7 +1739,7 @@ static bool pc_hotplug_allowed(MachineState *ms, DeviceState *dev, Error **errp)
return true;
}
-static void pc_machine_class_init(ObjectClass *oc, void *data)
+static void pc_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
X86MachineClass *x86mc = X86_MACHINE_CLASS(oc);
@@ -1775,6 +1775,10 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
mc->nvdimm_supported = true;
mc->smp_props.dies_supported = true;
mc->smp_props.modules_supported = true;
+ mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L1D] = true;
+ mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L1I] = true;
+ mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L2] = true;
+ mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L3] = true;
mc->default_ram_id = "pc.ram";
pcmc->default_smbios_ep_type = SMBIOS_ENTRY_POINT_TYPE_AUTO;
@@ -1807,6 +1811,8 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
object_class_property_add_bool(oc, PC_MACHINE_I8042,
pc_machine_get_i8042, pc_machine_set_i8042);
+ object_class_property_set_description(oc, PC_MACHINE_I8042,
+ "Enable/disable Intel 8042 PS/2 controller emulation");
object_class_property_add_bool(oc, "default-bus-bypass-iommu",
pc_machine_get_default_bus_bypass_iommu,
@@ -1837,7 +1843,7 @@ static const TypeInfo pc_machine_info = {
.instance_init = pc_machine_initfn,
.class_size = sizeof(PCMachineClass),
.class_init = pc_machine_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
},
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 9445b07..ea7572e 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -43,15 +43,15 @@
#include "hw/ide/isa.h"
#include "hw/ide/pci.h"
#include "hw/irq.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "hw/i386/kvm/clock.h"
#include "hw/sysbus.h"
#include "hw/i2c/smbus_eeprom.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/acpi/acpi.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "sysemu/xen.h"
+#include "system/xen.h"
#ifdef CONFIG_XEN
#include <xen/hvm/hvm_info_table.h>
#include "hw/xen/xen_pt.h"
@@ -61,10 +61,11 @@
#include "hw/xen/xen.h"
#include "migration/global_state.h"
#include "migration/misc.h"
-#include "sysemu/runstate.h"
-#include "sysemu/numa.h"
+#include "system/runstate.h"
+#include "system/numa.h"
#include "hw/hyperv/vmbus-bridge.h"
#include "hw/mem/nvdimm.h"
+#include "hw/uefi/var-service-api.h"
#include "hw/i386/acpi-build.h"
#include "target/i386/cpu.h"
@@ -284,6 +285,8 @@ static void pc_init1(MachineState *machine, const char *pci_type)
pcms->idebus[0] = qdev_get_child_bus(dev, "ide.0");
pcms->idebus[1] = qdev_get_child_bus(dev, "ide.1");
} else {
+ uint32_t irq;
+
isa_bus = isa_bus_new(NULL, system_memory, system_io,
&error_abort);
isa_bus_register_input_irqs(isa_bus, x86ms->gsi);
@@ -291,6 +294,9 @@ static void pc_init1(MachineState *machine, const char *pci_type)
x86ms->rtc = isa_new(TYPE_MC146818_RTC);
qdev_prop_set_int32(DEVICE(x86ms->rtc), "base_year", 2000);
isa_realize_and_unref(x86ms->rtc, isa_bus, &error_fatal);
+ irq = object_property_get_uint(OBJECT(x86ms->rtc), "irq",
+ &error_fatal);
+ isa_connect_gpio_out(ISA_DEVICE(x86ms->rtc), 0, irq);
i8257_dma_init(OBJECT(machine), isa_bus, 0);
pcms->hpet_enabled = false;
@@ -310,11 +316,6 @@ static void pc_init1(MachineState *machine, const char *pci_type)
pc_vga_init(isa_bus, pcmc->pci_enabled ? pcms->pcibus : NULL);
- assert(pcms->vmport != ON_OFF_AUTO__MAX);
- if (pcms->vmport == ON_OFF_AUTO_AUTO) {
- pcms->vmport = xen_enabled() ? ON_OFF_AUTO_OFF : ON_OFF_AUTO_ON;
- }
-
/* init basic PC hardware */
pc_basic_device_init(pcms, isa_bus, x86ms->gsi, x86ms->rtc,
!MACHINE_CLASS(pcmc)->no_floppy, 0x4);
@@ -451,7 +452,10 @@ static void pc_i440fx_init(MachineState *machine)
}
#define DEFINE_I440FX_MACHINE(major, minor) \
- DEFINE_PC_VER_MACHINE(pc_i440fx, "pc-i440fx", pc_i440fx_init, major, minor);
+ DEFINE_PC_VER_MACHINE(pc_i440fx, "pc-i440fx", pc_i440fx_init, false, NULL, major, minor);
+
+#define DEFINE_I440FX_MACHINE_AS_LATEST(major, minor) \
+ DEFINE_PC_VER_MACHINE(pc_i440fx, "pc-i440fx", pc_i440fx_init, true, "pc", major, minor);
static void pc_i440fx_machine_options(MachineClass *m)
{
@@ -470,6 +474,7 @@ static void pc_i440fx_machine_options(MachineClass *m)
m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
+ machine_class_allow_dynamic_sysbus_dev(m, TYPE_UEFI_VARS_X64);
object_class_property_add_enum(oc, "x-south-bridge", "PCSouthBridgeOption",
&PCSouthBridgeOption_lookup,
@@ -479,11 +484,36 @@ static void pc_i440fx_machine_options(MachineClass *m)
"Use a different south bridge than PIIX3");
}
-static void pc_i440fx_machine_9_1_options(MachineClass *m)
+static void pc_i440fx_machine_10_1_options(MachineClass *m)
{
pc_i440fx_machine_options(m);
- m->alias = "pc";
- m->is_default = true;
+}
+
+DEFINE_I440FX_MACHINE_AS_LATEST(10, 1);
+
+static void pc_i440fx_machine_10_0_options(MachineClass *m)
+{
+ pc_i440fx_machine_10_1_options(m);
+ compat_props_add(m->compat_props, hw_compat_10_0, hw_compat_10_0_len);
+ compat_props_add(m->compat_props, pc_compat_10_0, pc_compat_10_0_len);
+}
+
+DEFINE_I440FX_MACHINE(10, 0);
+
+static void pc_i440fx_machine_9_2_options(MachineClass *m)
+{
+ pc_i440fx_machine_10_0_options(m);
+ compat_props_add(m->compat_props, hw_compat_9_2, hw_compat_9_2_len);
+ compat_props_add(m->compat_props, pc_compat_9_2, pc_compat_9_2_len);
+}
+
+DEFINE_I440FX_MACHINE(9, 2);
+
+static void pc_i440fx_machine_9_1_options(MachineClass *m)
+{
+ pc_i440fx_machine_9_2_options(m);
+ compat_props_add(m->compat_props, hw_compat_9_1, hw_compat_9_1_len);
+ compat_props_add(m->compat_props, pc_compat_9_1, pc_compat_9_1_len);
}
DEFINE_I440FX_MACHINE(9, 1);
@@ -493,8 +523,7 @@ static void pc_i440fx_machine_9_0_options(MachineClass *m)
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_machine_9_1_options(m);
- m->alias = NULL;
- m->is_default = false;
+ m->smbios_memory_device_size = 16 * GiB;
compat_props_add(m->compat_props, hw_compat_9_0, hw_compat_9_0_len);
compat_props_add(m->compat_props, pc_compat_9_0, pc_compat_9_0_len);
@@ -754,32 +783,6 @@ static void pc_i440fx_machine_2_6_options(MachineClass *m)
DEFINE_I440FX_MACHINE(2, 6);
-static void pc_i440fx_machine_2_5_options(MachineClass *m)
-{
- X86MachineClass *x86mc = X86_MACHINE_CLASS(m);
-
- pc_i440fx_machine_2_6_options(m);
- x86mc->save_tsc_khz = false;
- m->legacy_fw_cfg_order = 1;
- compat_props_add(m->compat_props, hw_compat_2_5, hw_compat_2_5_len);
- compat_props_add(m->compat_props, pc_compat_2_5, pc_compat_2_5_len);
-}
-
-DEFINE_I440FX_MACHINE(2, 5);
-
-static void pc_i440fx_machine_2_4_options(MachineClass *m)
-{
- PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
-
- pc_i440fx_machine_2_5_options(m);
- m->hw_version = "2.4.0";
- pcmc->broken_reserved_end = true;
- compat_props_add(m->compat_props, hw_compat_2_4, hw_compat_2_4_len);
- compat_props_add(m->compat_props, pc_compat_2_4, pc_compat_2_4_len);
-}
-
-DEFINE_I440FX_MACHINE(2, 4);
-
#ifdef CONFIG_ISAPC
static void isapc_machine_options(MachineClass *m)
{
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index 71d3c6d..33211b1 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -35,8 +35,8 @@
#include "hw/loader.h"
#include "hw/i2c/smbus_eeprom.h"
#include "hw/rtc/mc146818rtc.h"
-#include "sysemu/tcg.h"
-#include "sysemu/kvm.h"
+#include "system/tcg.h"
+#include "system/kvm.h"
#include "hw/i386/kvm/clock.h"
#include "hw/pci-host/q35.h"
#include "hw/pci/pcie_port.h"
@@ -55,9 +55,10 @@
#include "hw/usb/hcd-uhci.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "sysemu/numa.h"
+#include "system/numa.h"
#include "hw/hyperv/vmbus-bridge.h"
#include "hw/mem/nvdimm.h"
+#include "hw/uefi/var-service-api.h"
#include "hw/i386/acpi-build.h"
#include "target/i386/cpu.h"
@@ -276,11 +277,6 @@ static void pc_q35_init(MachineState *machine)
x86_register_ferr_irq(x86ms->gsi[13]);
}
- assert(pcms->vmport != ON_OFF_AUTO__MAX);
- if (pcms->vmport == ON_OFF_AUTO_AUTO) {
- pcms->vmport = ON_OFF_AUTO_ON;
- }
-
/* init basic PC hardware */
pc_basic_device_init(pcms, isa_bus, x86ms->gsi, x86ms->rtc, !mc->no_floppy,
0xff0104);
@@ -332,10 +328,13 @@ static void pc_q35_init(MachineState *machine)
}
#define DEFINE_Q35_MACHINE(major, minor) \
- DEFINE_PC_VER_MACHINE(pc_q35, "pc-q35", pc_q35_init, major, minor);
+ DEFINE_PC_VER_MACHINE(pc_q35, "pc-q35", pc_q35_init, false, NULL, major, minor);
+
+#define DEFINE_Q35_MACHINE_AS_LATEST(major, minor) \
+ DEFINE_PC_VER_MACHINE(pc_q35, "pc-q35", pc_q35_init, false, "q35", major, minor);
#define DEFINE_Q35_MACHINE_BUGFIX(major, minor, micro) \
- DEFINE_PC_VER_MACHINE(pc_q35, "pc-q35", pc_q35_init, major, minor, micro);
+ DEFINE_PC_VER_MACHINE(pc_q35, "pc-q35", pc_q35_init, false, NULL, major, minor, micro);
static void pc_q35_machine_options(MachineClass *m)
{
@@ -357,14 +356,41 @@ static void pc_q35_machine_options(MachineClass *m)
machine_class_allow_dynamic_sysbus_dev(m, TYPE_INTEL_IOMMU_DEVICE);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
+ machine_class_allow_dynamic_sysbus_dev(m, TYPE_UEFI_VARS_X64);
compat_props_add(m->compat_props,
pc_q35_compat_defaults, pc_q35_compat_defaults_len);
}
-static void pc_q35_machine_9_1_options(MachineClass *m)
+static void pc_q35_machine_10_1_options(MachineClass *m)
{
pc_q35_machine_options(m);
- m->alias = "q35";
+}
+
+DEFINE_Q35_MACHINE_AS_LATEST(10, 1);
+
+static void pc_q35_machine_10_0_options(MachineClass *m)
+{
+ pc_q35_machine_10_1_options(m);
+ compat_props_add(m->compat_props, hw_compat_10_0, hw_compat_10_0_len);
+ compat_props_add(m->compat_props, pc_compat_10_0, pc_compat_10_0_len);
+}
+
+DEFINE_Q35_MACHINE(10, 0);
+
+static void pc_q35_machine_9_2_options(MachineClass *m)
+{
+ pc_q35_machine_10_0_options(m);
+ compat_props_add(m->compat_props, hw_compat_9_2, hw_compat_9_2_len);
+ compat_props_add(m->compat_props, pc_compat_9_2, pc_compat_9_2_len);
+}
+
+DEFINE_Q35_MACHINE(9, 2);
+
+static void pc_q35_machine_9_1_options(MachineClass *m)
+{
+ pc_q35_machine_9_2_options(m);
+ compat_props_add(m->compat_props, hw_compat_9_1, hw_compat_9_1_len);
+ compat_props_add(m->compat_props, pc_compat_9_1, pc_compat_9_1_len);
}
DEFINE_Q35_MACHINE(9, 1);
@@ -373,7 +399,7 @@ static void pc_q35_machine_9_0_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_machine_9_1_options(m);
- m->alias = NULL;
+ m->smbios_memory_device_size = 16 * GiB;
compat_props_add(m->compat_props, hw_compat_9_0, hw_compat_9_0_len);
compat_props_add(m->compat_props, pc_compat_9_0, pc_compat_9_0_len);
pcmc->isa_bios_alias = false;
@@ -646,29 +672,3 @@ static void pc_q35_machine_2_6_options(MachineClass *m)
}
DEFINE_Q35_MACHINE(2, 6);
-
-static void pc_q35_machine_2_5_options(MachineClass *m)
-{
- X86MachineClass *x86mc = X86_MACHINE_CLASS(m);
-
- pc_q35_machine_2_6_options(m);
- x86mc->save_tsc_khz = false;
- m->legacy_fw_cfg_order = 1;
- compat_props_add(m->compat_props, hw_compat_2_5, hw_compat_2_5_len);
- compat_props_add(m->compat_props, pc_compat_2_5, pc_compat_2_5_len);
-}
-
-DEFINE_Q35_MACHINE(2, 5);
-
-static void pc_q35_machine_2_4_options(MachineClass *m)
-{
- PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
-
- pc_q35_machine_2_5_options(m);
- m->hw_version = "2.4.0";
- pcmc->broken_reserved_end = true;
- compat_props_add(m->compat_props, hw_compat_2_4, hw_compat_2_4_len);
- compat_props_add(m->compat_props, pc_compat_2_4, pc_compat_2_4_len);
-}
-
-DEFINE_Q35_MACHINE(2, 4);
diff --git a/hw/i386/pc_sysfw.c b/hw/i386/pc_sysfw.c
index ef80281..821396c 100644
--- a/hw/i386/pc_sysfw.c
+++ b/hw/i386/pc_sysfw.c
@@ -25,7 +25,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/error-report.h"
#include "qemu/option.h"
#include "qemu/units.h"
@@ -35,8 +35,9 @@
#include "hw/loader.h"
#include "hw/qdev-properties.h"
#include "hw/block/flash.h"
-#include "sysemu/kvm.h"
-#include "sev.h"
+#include "system/kvm.h"
+#include "target/i386/sev.h"
+#include "kvm/tdx.h"
#define FLASH_SECTOR_SIZE 4096
@@ -280,5 +281,11 @@ void x86_firmware_configure(hwaddr gpa, void *ptr, int size)
}
sev_encrypt_flash(gpa, ptr, size, &error_fatal);
+ } else if (is_tdx_vm()) {
+ ret = tdx_parse_tdvf(ptr, size);
+ if (ret) {
+ error_report("failed to parse TDVF for TDX VM");
+ exit(1);
+ }
}
}
diff --git a/hw/i386/pc_sysfw_ovmf.c b/hw/i386/pc_sysfw_ovmf.c
index 07a4c26..da947c3 100644
--- a/hw/i386/pc_sysfw_ovmf.c
+++ b/hw/i386/pc_sysfw_ovmf.c
@@ -26,6 +26,7 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "hw/i386/pc.h"
+#include "exec/target_page.h"
#include "cpu.h"
#define OVMF_TABLE_FOOTER_GUID "96b582de-1fb2-45f7-baea-a366c55a082d"
diff --git a/hw/i386/port92.c b/hw/i386/port92.c
index b25157f..39b6f31 100644
--- a/hw/i386/port92.c
+++ b/hw/i386/port92.c
@@ -7,7 +7,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "migration/vmstate.h"
#include "hw/irq.h"
#include "hw/isa/isa.h"
@@ -97,12 +97,12 @@ static void port92_realizefn(DeviceState *dev, Error **errp)
isa_register_ioport(isadev, &s->io, 0x92);
}
-static void port92_class_initfn(ObjectClass *klass, void *data)
+static void port92_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = port92_realizefn;
- dc->reset = port92_reset;
+ device_class_set_legacy_reset(dc, port92_reset);
dc->vmsd = &vmstate_port92_isa;
/*
* Reason: unlike ordinary ISA devices, this one needs additional
diff --git a/hw/i386/sgx-epc.c b/hw/i386/sgx-epc.c
index d664829..2b3b282 100644
--- a/hw/i386/sgx-epc.c
+++ b/hw/i386/sgx-epc.c
@@ -17,14 +17,13 @@
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "target/i386/cpu.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
-static Property sgx_epc_properties[] = {
+static const Property sgx_epc_properties[] = {
DEFINE_PROP_UINT64(SGX_EPC_ADDR_PROP, SGXEPCDevice, addr, 0),
DEFINE_PROP_UINT32(SGX_EPC_NUMA_NODE_PROP, SGXEPCDevice, node, 0),
DEFINE_PROP_LINK(SGX_EPC_MEMDEV_PROP, SGXEPCDevice, hostmem,
TYPE_MEMORY_BACKEND_EPC, HostMemoryBackendEpc *),
- DEFINE_PROP_END_OF_LIST(),
};
static void sgx_epc_get_size(Object *obj, Visitor *v, const char *name,
@@ -148,7 +147,7 @@ static void sgx_epc_md_fill_device_info(const MemoryDeviceState *md,
info->type = MEMORY_DEVICE_INFO_KIND_SGX_EPC;
}
-static void sgx_epc_class_init(ObjectClass *oc, void *data)
+static void sgx_epc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc);
@@ -174,7 +173,7 @@ static const TypeInfo sgx_epc_info = {
.instance_init = sgx_epc_init,
.class_init = sgx_epc_class_init,
.class_size = sizeof(DeviceClass),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_MEMORY_DEVICE },
{ }
},
diff --git a/hw/i386/sgx-stub.c b/hw/i386/sgx-stub.c
index 16b1dfd..d295e54 100644
--- a/hw/i386/sgx-stub.c
+++ b/hw/i386/sgx-stub.c
@@ -3,20 +3,20 @@
#include "monitor/hmp-target.h"
#include "hw/i386/pc.h"
#include "hw/i386/sgx-epc.h"
+#include "qapi/qapi-commands-misc-i386.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-misc-target.h"
void sgx_epc_build_srat(GArray *table_data)
{
}
-SGXInfo *qmp_query_sgx(Error **errp)
+SgxInfo *qmp_query_sgx(Error **errp)
{
error_setg(errp, "SGX support is not compiled in");
return NULL;
}
-SGXInfo *qmp_query_sgx_capabilities(Error **errp)
+SgxInfo *qmp_query_sgx_capabilities(Error **errp)
{
error_setg(errp, "SGX support is not compiled in");
return NULL;
@@ -32,6 +32,11 @@ void pc_machine_init_sgx_epc(PCMachineState *pcms)
memset(&pcms->sgx_epc, 0, sizeof(SGXEPCState));
}
+bool check_sgx_support(void)
+{
+ return false;
+}
+
bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size)
{
return true;
diff --git a/hw/i386/sgx.c b/hw/i386/sgx.c
index a14a84b..e280154 100644
--- a/hw/i386/sgx.c
+++ b/hw/i386/sgx.c
@@ -19,10 +19,10 @@
#include "monitor/hmp-target.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "qapi/qapi-commands-misc-target.h"
-#include "exec/address-spaces.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/reset.h"
+#include "qapi/qapi-commands-misc-i386.h"
+#include "system/address-spaces.h"
+#include "system/hw_accel.h"
+#include "system/reset.h"
#include <sys/ioctl.h>
#include "hw/acpi/aml-build.h"
@@ -84,10 +84,10 @@ static uint64_t sgx_calc_section_metric(uint64_t low, uint64_t high)
((high & MAKE_64BIT_MASK(0, 20)) << 32);
}
-static SGXEPCSectionList *sgx_calc_host_epc_sections(void)
+static SgxEpcSectionList *sgx_calc_host_epc_sections(void)
{
- SGXEPCSectionList *head = NULL, **tail = &head;
- SGXEPCSection *section;
+ SgxEpcSectionList *head = NULL, **tail = &head;
+ SgxEpcSection *section;
uint32_t i, type;
uint32_t eax, ebx, ecx, edx;
uint32_t j = 0;
@@ -104,7 +104,7 @@ static SGXEPCSectionList *sgx_calc_host_epc_sections(void)
break;
}
- section = g_new0(SGXEPCSection, 1);
+ section = g_new0(SgxEpcSection, 1);
section->node = j++;
section->size = sgx_calc_section_metric(ecx, edx);
QAPI_LIST_APPEND(tail, section);
@@ -153,9 +153,9 @@ static void sgx_epc_reset(void *opaque)
}
}
-SGXInfo *qmp_query_sgx_capabilities(Error **errp)
+SgxInfo *qmp_query_sgx_capabilities(Error **errp)
{
- SGXInfo *info = NULL;
+ SgxInfo *info = NULL;
uint32_t eax, ebx, ecx, edx;
Error *local_err = NULL;
@@ -166,7 +166,7 @@ SGXInfo *qmp_query_sgx_capabilities(Error **errp)
return NULL;
}
- info = g_new0(SGXInfo, 1);
+ info = g_new0(SgxInfo, 1);
host_cpuid(0x7, 0, &eax, &ebx, &ecx, &edx);
info->sgx = ebx & (1U << 2) ? true : false;
@@ -183,17 +183,17 @@ SGXInfo *qmp_query_sgx_capabilities(Error **errp)
return info;
}
-static SGXEPCSectionList *sgx_get_epc_sections_list(void)
+static SgxEpcSectionList *sgx_get_epc_sections_list(void)
{
GSList *device_list = sgx_epc_get_device_list();
- SGXEPCSectionList *head = NULL, **tail = &head;
- SGXEPCSection *section;
+ SgxEpcSectionList *head = NULL, **tail = &head;
+ SgxEpcSection *section;
for (; device_list; device_list = device_list->next) {
DeviceState *dev = device_list->data;
Object *obj = OBJECT(dev);
- section = g_new0(SGXEPCSection, 1);
+ section = g_new0(SgxEpcSection, 1);
section->node = object_property_get_uint(obj, SGX_EPC_NUMA_NODE_PROP,
&error_abort);
section->size = object_property_get_uint(obj, SGX_EPC_SIZE_PROP,
@@ -205,9 +205,9 @@ static SGXEPCSectionList *sgx_get_epc_sections_list(void)
return head;
}
-SGXInfo *qmp_query_sgx(Error **errp)
+SgxInfo *qmp_query_sgx(Error **errp)
{
- SGXInfo *info = NULL;
+ SgxInfo *info = NULL;
X86MachineState *x86ms;
PCMachineState *pcms =
(PCMachineState *)object_dynamic_cast(qdev_get_machine(),
@@ -223,7 +223,7 @@ SGXInfo *qmp_query_sgx(Error **errp)
return NULL;
}
- info = g_new0(SGXInfo, 1);
+ info = g_new0(SgxInfo, 1);
info->sgx = true;
info->sgx1 = true;
@@ -237,8 +237,8 @@ SGXInfo *qmp_query_sgx(Error **errp)
void hmp_info_sgx(Monitor *mon, const QDict *qdict)
{
Error *err = NULL;
- SGXEPCSectionList *section_list, *section;
- g_autoptr(SGXInfo) info = qmp_query_sgx(&err);
+ SgxEpcSectionList *section_list, *section;
+ g_autoptr(SgxInfo) info = qmp_query_sgx(&err);
uint64_t size = 0;
if (err) {
@@ -266,12 +266,22 @@ void hmp_info_sgx(Monitor *mon, const QDict *qdict)
size);
}
+bool check_sgx_support(void)
+{
+ if (!object_dynamic_cast(qdev_get_machine(), TYPE_PC_MACHINE)) {
+ return false;
+ }
+ return true;
+}
+
bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size)
{
- PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
+ PCMachineState *pcms =
+ (PCMachineState *)object_dynamic_cast(qdev_get_machine(),
+ TYPE_PC_MACHINE);
SGXEPCDevice *epc;
- if (pcms->sgx_epc.size == 0 || pcms->sgx_epc.nr_sections <= section_nr) {
+ if (!pcms || pcms->sgx_epc.size == 0 || pcms->sgx_epc.nr_sections <= section_nr) {
return true;
}
diff --git a/hw/i386/tdvf-hob.c b/hw/i386/tdvf-hob.c
new file mode 100644
index 0000000..782b3d1
--- /dev/null
+++ b/hw/i386/tdvf-hob.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2025 Intel Corporation
+ * Author: Isaku Yamahata <isaku.yamahata at gmail.com>
+ * <isaku.yamahata at intel.com>
+ * Xiaoyao Li <xiaoyao.li@intel.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "standard-headers/uefi/uefi.h"
+#include "hw/pci/pcie_host.h"
+#include "tdvf-hob.h"
+
+typedef struct TdvfHob {
+ hwaddr hob_addr;
+ void *ptr;
+ int size;
+
+ /* working area */
+ void *current;
+ void *end;
+} TdvfHob;
+
+static uint64_t tdvf_current_guest_addr(const TdvfHob *hob)
+{
+ return hob->hob_addr + (hob->current - hob->ptr);
+}
+
+static void tdvf_align(TdvfHob *hob, size_t align)
+{
+ hob->current = QEMU_ALIGN_PTR_UP(hob->current, align);
+}
+
+static void *tdvf_get_area(TdvfHob *hob, uint64_t size)
+{
+ void *ret;
+
+ if (hob->current + size > hob->end) {
+ error_report("TD_HOB overrun, size = 0x%" PRIx64, size);
+ exit(1);
+ }
+
+ ret = hob->current;
+ hob->current += size;
+ tdvf_align(hob, 8);
+ return ret;
+}
+
+static void tdvf_hob_add_memory_resources(TdxGuest *tdx, TdvfHob *hob)
+{
+ EFI_HOB_RESOURCE_DESCRIPTOR *region;
+ EFI_RESOURCE_ATTRIBUTE_TYPE attr;
+ EFI_RESOURCE_TYPE resource_type;
+
+ TdxRamEntry *e;
+ int i;
+
+ for (i = 0; i < tdx->nr_ram_entries; i++) {
+ e = &tdx->ram_entries[i];
+
+ if (e->type == TDX_RAM_UNACCEPTED) {
+ resource_type = EFI_RESOURCE_MEMORY_UNACCEPTED;
+ attr = EFI_RESOURCE_ATTRIBUTE_TDVF_UNACCEPTED;
+ } else if (e->type == TDX_RAM_ADDED) {
+ resource_type = EFI_RESOURCE_SYSTEM_MEMORY;
+ attr = EFI_RESOURCE_ATTRIBUTE_TDVF_PRIVATE;
+ } else {
+ error_report("unknown TDX_RAM_ENTRY type %d", e->type);
+ exit(1);
+ }
+
+ region = tdvf_get_area(hob, sizeof(*region));
+ *region = (EFI_HOB_RESOURCE_DESCRIPTOR) {
+ .Header = {
+ .HobType = EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,
+ .HobLength = cpu_to_le16(sizeof(*region)),
+ .Reserved = cpu_to_le32(0),
+ },
+ .Owner = EFI_HOB_OWNER_ZERO,
+ .ResourceType = cpu_to_le32(resource_type),
+ .ResourceAttribute = cpu_to_le32(attr),
+ .PhysicalStart = cpu_to_le64(e->address),
+ .ResourceLength = cpu_to_le64(e->length),
+ };
+ }
+}
+
+void tdvf_hob_create(TdxGuest *tdx, TdxFirmwareEntry *td_hob)
+{
+ TdvfHob hob = {
+ .hob_addr = td_hob->address,
+ .size = td_hob->size,
+ .ptr = td_hob->mem_ptr,
+
+ .current = td_hob->mem_ptr,
+ .end = td_hob->mem_ptr + td_hob->size,
+ };
+
+ EFI_HOB_GENERIC_HEADER *last_hob;
+ EFI_HOB_HANDOFF_INFO_TABLE *hit;
+
+ /* Note, Efi{Free}Memory{Bottom,Top} are ignored, leave 'em zeroed. */
+ hit = tdvf_get_area(&hob, sizeof(*hit));
+ *hit = (EFI_HOB_HANDOFF_INFO_TABLE) {
+ .Header = {
+ .HobType = EFI_HOB_TYPE_HANDOFF,
+ .HobLength = cpu_to_le16(sizeof(*hit)),
+ .Reserved = cpu_to_le32(0),
+ },
+ .Version = cpu_to_le32(EFI_HOB_HANDOFF_TABLE_VERSION),
+ .BootMode = cpu_to_le32(0),
+ .EfiMemoryTop = cpu_to_le64(0),
+ .EfiMemoryBottom = cpu_to_le64(0),
+ .EfiFreeMemoryTop = cpu_to_le64(0),
+ .EfiFreeMemoryBottom = cpu_to_le64(0),
+ .EfiEndOfHobList = cpu_to_le64(0), /* initialized later */
+ };
+
+ tdvf_hob_add_memory_resources(tdx, &hob);
+
+ last_hob = tdvf_get_area(&hob, sizeof(*last_hob));
+ *last_hob = (EFI_HOB_GENERIC_HEADER) {
+ .HobType = EFI_HOB_TYPE_END_OF_HOB_LIST,
+ .HobLength = cpu_to_le16(sizeof(*last_hob)),
+ .Reserved = cpu_to_le32(0),
+ };
+ hit->EfiEndOfHobList = tdvf_current_guest_addr(&hob);
+}
diff --git a/hw/i386/tdvf-hob.h b/hw/i386/tdvf-hob.h
new file mode 100644
index 0000000..4fc6a37
--- /dev/null
+++ b/hw/i386/tdvf-hob.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef HW_I386_TD_HOB_H
+#define HW_I386_TD_HOB_H
+
+#include "hw/i386/tdvf.h"
+#include "target/i386/kvm/tdx.h"
+
+void tdvf_hob_create(TdxGuest *tdx, TdxFirmwareEntry *td_hob);
+
+#define EFI_RESOURCE_ATTRIBUTE_TDVF_PRIVATE \
+ (EFI_RESOURCE_ATTRIBUTE_PRESENT | \
+ EFI_RESOURCE_ATTRIBUTE_INITIALIZED | \
+ EFI_RESOURCE_ATTRIBUTE_TESTED)
+
+#define EFI_RESOURCE_ATTRIBUTE_TDVF_UNACCEPTED \
+ (EFI_RESOURCE_ATTRIBUTE_PRESENT | \
+ EFI_RESOURCE_ATTRIBUTE_INITIALIZED | \
+ EFI_RESOURCE_ATTRIBUTE_TESTED)
+
+#define EFI_RESOURCE_ATTRIBUTE_TDVF_MMIO \
+ (EFI_RESOURCE_ATTRIBUTE_PRESENT | \
+ EFI_RESOURCE_ATTRIBUTE_INITIALIZED | \
+ EFI_RESOURCE_ATTRIBUTE_UNCACHEABLE)
+
+#endif
diff --git a/hw/i386/tdvf.c b/hw/i386/tdvf.c
new file mode 100644
index 0000000..645d9d1
--- /dev/null
+++ b/hw/i386/tdvf.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2025 Intel Corporation
+ * Author: Isaku Yamahata <isaku.yamahata at gmail.com>
+ * <isaku.yamahata at intel.com>
+ * Xiaoyao Li <xiaoyao.li@intel.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+
+#include "hw/i386/pc.h"
+#include "hw/i386/tdvf.h"
+#include "system/kvm.h"
+
+#define TDX_METADATA_OFFSET_GUID "e47a6535-984a-4798-865e-4685a7bf8ec2"
+#define TDX_METADATA_VERSION 1
+#define TDVF_SIGNATURE 0x46564454 /* TDVF as little endian */
+#define TDVF_ALIGNMENT 4096
+
+/*
+ * the raw structs read from TDVF keeps the name convention in
+ * TDVF Design Guide spec.
+ */
+typedef struct {
+ uint32_t DataOffset;
+ uint32_t RawDataSize;
+ uint64_t MemoryAddress;
+ uint64_t MemoryDataSize;
+ uint32_t Type;
+ uint32_t Attributes;
+} TdvfSectionEntry;
+
+typedef struct {
+ uint32_t Signature;
+ uint32_t Length;
+ uint32_t Version;
+ uint32_t NumberOfSectionEntries;
+ TdvfSectionEntry SectionEntries[];
+} TdvfMetadata;
+
+struct tdx_metadata_offset {
+ uint32_t offset;
+};
+
+static TdvfMetadata *tdvf_get_metadata(void *flash_ptr, int size)
+{
+ TdvfMetadata *metadata;
+ uint32_t offset = 0;
+ uint8_t *data;
+
+ if ((uint32_t) size != size) {
+ return NULL;
+ }
+
+ if (pc_system_ovmf_table_find(TDX_METADATA_OFFSET_GUID, &data, NULL)) {
+ offset = size - le32_to_cpu(((struct tdx_metadata_offset *)data)->offset);
+
+ if (offset + sizeof(*metadata) > size) {
+ return NULL;
+ }
+ } else {
+ error_report("Cannot find TDX_METADATA_OFFSET_GUID");
+ return NULL;
+ }
+
+ metadata = flash_ptr + offset;
+
+ /* Finally, verify the signature to determine if this is a TDVF image. */
+ metadata->Signature = le32_to_cpu(metadata->Signature);
+ if (metadata->Signature != TDVF_SIGNATURE) {
+ error_report("Invalid TDVF signature in metadata!");
+ return NULL;
+ }
+
+ /* Sanity check that the TDVF doesn't overlap its own metadata. */
+ metadata->Length = le32_to_cpu(metadata->Length);
+ if (offset + metadata->Length > size) {
+ return NULL;
+ }
+
+ /* Only version 1 is supported/defined. */
+ metadata->Version = le32_to_cpu(metadata->Version);
+ if (metadata->Version != TDX_METADATA_VERSION) {
+ return NULL;
+ }
+
+ return metadata;
+}
+
+static int tdvf_parse_and_check_section_entry(const TdvfSectionEntry *src,
+ TdxFirmwareEntry *entry)
+{
+ entry->data_offset = le32_to_cpu(src->DataOffset);
+ entry->data_len = le32_to_cpu(src->RawDataSize);
+ entry->address = le64_to_cpu(src->MemoryAddress);
+ entry->size = le64_to_cpu(src->MemoryDataSize);
+ entry->type = le32_to_cpu(src->Type);
+ entry->attributes = le32_to_cpu(src->Attributes);
+
+ /* sanity check */
+ if (entry->size < entry->data_len) {
+ error_report("Broken metadata RawDataSize 0x%x MemoryDataSize 0x%"PRIx64,
+ entry->data_len, entry->size);
+ return -1;
+ }
+ if (!QEMU_IS_ALIGNED(entry->address, TDVF_ALIGNMENT)) {
+ error_report("MemoryAddress 0x%"PRIx64" not page aligned", entry->address);
+ return -1;
+ }
+ if (!QEMU_IS_ALIGNED(entry->size, TDVF_ALIGNMENT)) {
+ error_report("MemoryDataSize 0x%"PRIx64" not page aligned", entry->size);
+ return -1;
+ }
+
+ switch (entry->type) {
+ case TDVF_SECTION_TYPE_BFV:
+ case TDVF_SECTION_TYPE_CFV:
+ /* The sections that must be copied from firmware image to TD memory */
+ if (entry->data_len == 0) {
+ error_report("%d section with RawDataSize == 0", entry->type);
+ return -1;
+ }
+ break;
+ case TDVF_SECTION_TYPE_TD_HOB:
+ case TDVF_SECTION_TYPE_TEMP_MEM:
+ /* The sections that no need to be copied from firmware image */
+ if (entry->data_len != 0) {
+ error_report("%d section with RawDataSize 0x%x != 0",
+ entry->type, entry->data_len);
+ return -1;
+ }
+ break;
+ default:
+ error_report("TDVF contains unsupported section type %d", entry->type);
+ return -1;
+ }
+
+ return 0;
+}
+
+int tdvf_parse_metadata(TdxFirmware *fw, void *flash_ptr, int size)
+{
+ g_autofree TdvfSectionEntry *sections = NULL;
+ TdvfMetadata *metadata;
+ ssize_t entries_size;
+ int i;
+
+ metadata = tdvf_get_metadata(flash_ptr, size);
+ if (!metadata) {
+ return -EINVAL;
+ }
+
+ /* load and parse metadata entries */
+ fw->nr_entries = le32_to_cpu(metadata->NumberOfSectionEntries);
+ if (fw->nr_entries < 2) {
+ error_report("Invalid number of fw entries (%u) in TDVF Metadata",
+ fw->nr_entries);
+ return -EINVAL;
+ }
+
+ entries_size = fw->nr_entries * sizeof(TdvfSectionEntry);
+ if (metadata->Length != sizeof(*metadata) + entries_size) {
+ error_report("TDVF metadata len (0x%x) mismatch, expected (0x%x)",
+ metadata->Length,
+ (uint32_t)(sizeof(*metadata) + entries_size));
+ return -EINVAL;
+ }
+
+ fw->entries = g_new(TdxFirmwareEntry, fw->nr_entries);
+ sections = g_new(TdvfSectionEntry, fw->nr_entries);
+
+ memcpy(sections, (void *)metadata + sizeof(*metadata), entries_size);
+
+ for (i = 0; i < fw->nr_entries; i++) {
+ if (tdvf_parse_and_check_section_entry(&sections[i], &fw->entries[i])) {
+ goto err;
+ }
+ }
+
+ fw->mem_ptr = flash_ptr;
+ return 0;
+
+err:
+ fw->entries = 0;
+ g_free(fw->entries);
+ return -EINVAL;
+}
diff --git a/hw/i386/trace-events b/hw/i386/trace-events
index 53c02d7..ac9e1a1 100644
--- a/hw/i386/trace-events
+++ b/hw/i386/trace-events
@@ -68,6 +68,7 @@ vtd_frr_new(int index, uint64_t hi, uint64_t lo) "index %d high 0x%"PRIx64" low
vtd_warn_invalid_qi_tail(uint16_t tail) "tail 0x%"PRIx16
vtd_warn_ir_vector(uint16_t sid, int index, int vec, int target) "sid 0x%"PRIx16" index %d vec %d (should be: %d)"
vtd_warn_ir_trigger(uint16_t sid, int index, int trig, int target) "sid 0x%"PRIx16" index %d trigger %d (should be: %d)"
+vtd_reset_exit(void) ""
# amd_iommu.c
amdvi_evntlog_fail(uint64_t addr, uint32_t head) "error: fail to write at addr 0x%"PRIx64" + offset 0x%"PRIx32
diff --git a/hw/i386/vapic.c b/hw/i386/vapic.c
index f5b1db7..0c1c92c 100644
--- a/hw/i386/vapic.c
+++ b/hw/i386/vapic.c
@@ -11,12 +11,13 @@
#include "qemu/osdep.h"
#include "qemu/module.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/cpus.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/kvm.h"
-#include "sysemu/runstate.h"
-#include "exec/address-spaces.h"
+#include "exec/target_page.h"
+#include "system/system.h"
+#include "system/cpus.h"
+#include "system/hw_accel.h"
+#include "system/kvm.h"
+#include "system/runstate.h"
+#include "system/address-spaces.h"
#include "hw/i386/apic_internal.h"
#include "hw/sysbus.h"
#include "hw/boards.h"
@@ -718,7 +719,7 @@ static uint64_t vapic_read(void *opaque, hwaddr addr, unsigned size)
static const MemoryRegionOps vapic_ops = {
.write = vapic_write,
.read = vapic_read,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
};
static void vapic_realize(DeviceState *dev, Error **errp)
@@ -846,11 +847,11 @@ static const VMStateDescription vmstate_vapic = {
}
};
-static void vapic_class_init(ObjectClass *klass, void *data)
+static void vapic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = vapic_reset;
+ device_class_set_legacy_reset(dc, vapic_reset);
dc->vmsd = &vmstate_vapic;
dc->realize = vapic_realize;
}
diff --git a/hw/i386/vmmouse.c b/hw/i386/vmmouse.c
index a8d014d..3896159 100644
--- a/hw/i386/vmmouse.c
+++ b/hw/i386/vmmouse.c
@@ -317,17 +317,16 @@ static void vmmouse_realizefn(DeviceState *dev, Error **errp)
vmport_register(VMPORT_CMD_VMMOUSE_DATA, vmmouse_ioport_read, s);
}
-static Property vmmouse_properties[] = {
+static const Property vmmouse_properties[] = {
DEFINE_PROP_LINK("i8042", VMMouseState, i8042, TYPE_I8042, ISAKBDState *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vmmouse_class_initfn(ObjectClass *klass, void *data)
+static void vmmouse_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = vmmouse_realizefn;
- dc->reset = vmmouse_reset;
+ device_class_set_legacy_reset(dc, vmmouse_reset);
dc->vmsd = &vmstate_vmmouse;
device_class_set_props(dc, vmmouse_properties);
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
diff --git a/hw/i386/vmport.c b/hw/i386/vmport.c
index 7cc75db..6d93457 100644
--- a/hw/i386/vmport.c
+++ b/hw/i386/vmport.c
@@ -33,9 +33,9 @@
#include "hw/i386/vmport.h"
#include "hw/qdev-properties.h"
#include "hw/boards.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/qtest.h"
+#include "system/system.h"
+#include "system/hw_accel.h"
+#include "system/qtest.h"
#include "qemu/log.h"
#include "trace.h"
#include "qom/object.h"
@@ -252,7 +252,7 @@ static void vmport_realizefn(DeviceState *dev, Error **errp)
}
}
-static Property vmport_properties[] = {
+static const Property vmport_properties[] = {
/* Used to enforce compatibility for migration */
DEFINE_PROP_BIT("x-read-set-eax", VMPortState, compat_flags,
VMPORT_COMPAT_READ_SET_EAX_BIT, true),
@@ -284,11 +284,9 @@ static Property vmport_properties[] = {
* 5 - ACE 1.x (Deprecated)
*/
DEFINE_PROP_UINT8("vmware-vmx-type", VMPortState, vmware_vmx_type, 2),
-
- DEFINE_PROP_END_OF_LIST(),
};
-static void vmport_class_initfn(ObjectClass *klass, void *data)
+static void vmport_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/i386/x86-common.c b/hw/i386/x86-common.c
index c0c66a0..b1b5f11 100644
--- a/hw/i386/x86-common.c
+++ b/hw/i386/x86-common.c
@@ -26,9 +26,9 @@
#include "qemu/units.h"
#include "qemu/datadir.h"
#include "qapi/error.h"
-#include "sysemu/numa.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/xen.h"
+#include "system/numa.h"
+#include "system/system.h"
+#include "system/xen.h"
#include "trace.h"
#include "hw/i386/x86.h"
@@ -44,6 +44,7 @@
#include "standard-headers/asm-x86/bootparam.h"
#include CONFIG_DEVICES
#include "kvm/kvm_i386.h"
+#include "kvm/tdx.h"
#ifdef CONFIG_XEN_EMU
#include "hw/xen/xen.h"
@@ -248,9 +249,7 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev,
CPUX86State *env = &cpu->env;
MachineState *ms = MACHINE(hotplug_dev);
X86MachineState *x86ms = X86_MACHINE(hotplug_dev);
- unsigned int smp_cores = ms->smp.cores;
- unsigned int smp_threads = ms->smp.threads;
- X86CPUTopoInfo topo_info;
+ X86CPUTopoInfo *topo_info = &env->topo_info;
if (!object_dynamic_cast(OBJECT(cpu), ms->cpu_type)) {
error_setg(errp, "Invalid CPU type, expected cpu type: '%s'",
@@ -269,16 +268,14 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev,
}
}
- init_topo_info(&topo_info, x86ms);
+ init_topo_info(topo_info, x86ms);
if (ms->smp.modules > 1) {
- env->nr_modules = ms->smp.modules;
- set_bit(CPU_TOPO_LEVEL_MODULE, env->avail_cpu_topo);
+ set_bit(CPU_TOPOLOGY_LEVEL_MODULE, env->avail_cpu_topo);
}
if (ms->smp.dies > 1) {
- env->nr_dies = ms->smp.dies;
- set_bit(CPU_TOPO_LEVEL_DIE, env->avail_cpu_topo);
+ set_bit(CPU_TOPOLOGY_LEVEL_DIE, env->avail_cpu_topo);
}
/*
@@ -329,17 +326,17 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev,
if (cpu->core_id < 0) {
error_setg(errp, "CPU core-id is not set");
return;
- } else if (cpu->core_id > (smp_cores - 1)) {
+ } else if (cpu->core_id > (ms->smp.cores - 1)) {
error_setg(errp, "Invalid CPU core-id: %u must be in range 0:%u",
- cpu->core_id, smp_cores - 1);
+ cpu->core_id, ms->smp.cores - 1);
return;
}
if (cpu->thread_id < 0) {
error_setg(errp, "CPU thread-id is not set");
return;
- } else if (cpu->thread_id > (smp_threads - 1)) {
+ } else if (cpu->thread_id > (ms->smp.threads - 1)) {
error_setg(errp, "Invalid CPU thread-id: %u must be in range 0:%u",
- cpu->thread_id, smp_threads - 1);
+ cpu->thread_id, ms->smp.threads - 1);
return;
}
@@ -348,12 +345,12 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev,
topo_ids.module_id = cpu->module_id;
topo_ids.core_id = cpu->core_id;
topo_ids.smt_id = cpu->thread_id;
- cpu->apic_id = x86_apicid_from_topo_ids(&topo_info, &topo_ids);
+ cpu->apic_id = x86_apicid_from_topo_ids(topo_info, &topo_ids);
}
cpu_slot = x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, &idx);
if (!cpu_slot) {
- x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
+ x86_topo_ids_from_apicid(cpu->apic_id, topo_info, &topo_ids);
error_setg(errp,
"Invalid CPU [socket: %u, die: %u, module: %u, core: %u, thread: %u]"
@@ -376,7 +373,7 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev,
/* TODO: move socket_id/core_id/thread_id checks into x86_cpu_realizefn()
* once -smp refactoring is complete and there will be CPU private
* CPUState::nr_cores and CPUState::nr_threads fields instead of globals */
- x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
+ x86_topo_ids_from_apicid(cpu->apic_id, topo_info, &topo_ids);
if (cpu->socket_id != -1 && cpu->socket_id != topo_ids.pkg_id) {
error_setg(errp, "property socket-id: %u doesn't match set apic-id:"
" 0x%x (socket-id: %u)", cpu->socket_id, cpu->apic_id,
@@ -450,8 +447,27 @@ static long get_file_size(FILE *f)
void gsi_handler(void *opaque, int n, int level)
{
GSIState *s = opaque;
+ bool bypass_ioapic = false;
trace_x86_gsi_interrupt(n, level);
+
+#ifdef CONFIG_XEN_EMU
+ /*
+ * Xen delivers the GSI to the Legacy PIC (not that Legacy PIC
+ * routing actually works properly under Xen). And then to
+ * *either* the PIRQ handling or the I/OAPIC depending on whether
+ * the former wants it.
+ *
+ * Additionally, this hook allows the Xen event channel GSI to
+ * work around QEMU's lack of support for shared level interrupts,
+ * by keeping track of the externally driven state of the pin and
+ * implementing a logical OR with the state of the evtchn GSI.
+ */
+ if (xen_mode == XEN_EMULATE) {
+ bypass_ioapic = xen_evtchn_set_gsi(n, &level);
+ }
+#endif
+
switch (n) {
case 0 ... ISA_NUM_IRQS - 1:
if (s->i8259_irq[n]) {
@@ -460,18 +476,9 @@ void gsi_handler(void *opaque, int n, int level)
}
/* fall through */
case ISA_NUM_IRQS ... IOAPIC_NUM_PINS - 1:
-#ifdef CONFIG_XEN_EMU
- /*
- * Xen delivers the GSI to the Legacy PIC (not that Legacy PIC
- * routing actually works properly under Xen). And then to
- * *either* the PIRQ handling or the I/OAPIC depending on
- * whether the former wants it.
- */
- if (xen_mode == XEN_EMULATE && xen_evtchn_set_gsi(n, level)) {
- break;
+ if (!bypass_ioapic) {
+ qemu_set_irq(s->ioapic_irq[n], level);
}
-#endif
- qemu_set_irq(s->ioapic_irq[n], level);
break;
case IO_APIC_SECONDARY_IRQBASE
... IO_APIC_SECONDARY_IRQBASE + IOAPIC_NUM_PINS - 1:
@@ -586,7 +593,7 @@ static bool load_elfboot(const char *kernel_filename,
uint64_t elf_low, elf_high;
int kernel_size;
- if (ldl_p(header) != 0x464c457f) {
+ if (ldl_le_p(header) != 0x464c457f) {
return false; /* no elfboot */
}
@@ -602,8 +609,8 @@ static bool load_elfboot(const char *kernel_filename,
uint64_t elf_note_type = XEN_ELFNOTE_PHYS32_ENTRY;
kernel_size = load_elf(kernel_filename, read_pvh_start_addr,
NULL, &elf_note_type, &elf_entry,
- &elf_low, &elf_high, NULL, 0, I386_ELF_MACHINE,
- 0, 0);
+ &elf_low, &elf_high, NULL,
+ ELFDATA2LSB, I386_ELF_MACHINE, 0, 0);
if (kernel_size < 0) {
error_report("Error while loading elf kernel");
@@ -665,9 +672,12 @@ void x86_load_linux(X86MachineState *x86ms,
exit(1);
}
- /* kernel protocol version */
- if (ldl_p(header + 0x202) == 0x53726448) {
- protocol = lduw_p(header + 0x206);
+ /*
+ * kernel protocol version.
+ * Please see https://www.kernel.org/doc/Documentation/x86/boot.txt
+ */
+ if (ldl_le_p(header + 0x202) == 0x53726448) /* Magic signature "HdrS" */ {
+ protocol = lduw_le_p(header + 0x206);
} else {
/*
* This could be a multiboot kernel. If it is, let's stop treating it
@@ -694,9 +704,11 @@ void x86_load_linux(X86MachineState *x86ms,
strlen(kernel_cmdline) + 1);
fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline);
+ setup = g_memdup2(header, sizeof(header));
+
fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, sizeof(header));
fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA,
- header, sizeof(header));
+ setup, sizeof(header));
/* load initrd */
if (initrd_filename) {
@@ -759,7 +771,7 @@ void x86_load_linux(X86MachineState *x86ms,
/* highest address for loading the initrd */
if (protocol >= 0x20c &&
- lduw_p(header + 0x236) & XLF_CAN_BE_LOADED_ABOVE_4G) {
+ lduw_le_p(header + 0x236) & XLF_CAN_BE_LOADED_ABOVE_4G) {
/*
* Linux has supported initrd up to 4 GB for a very long time (2007,
* long before XLF_CAN_BE_LOADED_ABOVE_4G which was added in 2013),
@@ -778,7 +790,7 @@ void x86_load_linux(X86MachineState *x86ms,
*/
initrd_max = UINT32_MAX;
} else if (protocol >= 0x203) {
- initrd_max = ldl_p(header + 0x22c);
+ initrd_max = ldl_le_p(header + 0x22c);
} else {
initrd_max = 0x37ffffff;
}
@@ -794,10 +806,10 @@ void x86_load_linux(X86MachineState *x86ms,
sev_load_ctx.cmdline_size = strlen(kernel_cmdline) + 1;
if (protocol >= 0x202) {
- stl_p(header + 0x228, cmdline_addr);
+ stl_le_p(header + 0x228, cmdline_addr);
} else {
- stw_p(header + 0x20, 0xA33F);
- stw_p(header + 0x22, cmdline_addr - real_addr);
+ stw_le_p(header + 0x20, 0xA33F);
+ stw_le_p(header + 0x22, cmdline_addr - real_addr);
}
/* handle vga= parameter */
@@ -821,7 +833,7 @@ void x86_load_linux(X86MachineState *x86ms,
exit(1);
}
}
- stw_p(header + 0x1fa, video_mode);
+ stw_le_p(header + 0x1fa, video_mode);
}
/* loader type */
@@ -836,7 +848,7 @@ void x86_load_linux(X86MachineState *x86ms,
/* heap */
if (protocol >= 0x201) {
header[0x211] |= 0x80; /* CAN_USE_HEAP */
- stw_p(header + 0x224, cmdline_addr - real_addr - 0x200);
+ stw_le_p(header + 0x224, cmdline_addr - real_addr - 0x200);
}
/* load initrd */
@@ -876,8 +888,8 @@ void x86_load_linux(X86MachineState *x86ms,
sev_load_ctx.initrd_data = initrd_data;
sev_load_ctx.initrd_size = initrd_size;
- stl_p(header + 0x218, initrd_addr);
- stl_p(header + 0x21c, initrd_size);
+ stl_le_p(header + 0x218, initrd_addr);
+ stl_le_p(header + 0x21c, initrd_size);
}
/* load kernel and setup */
@@ -890,7 +902,6 @@ void x86_load_linux(X86MachineState *x86ms,
fprintf(stderr, "qemu: invalid kernel header\n");
exit(1);
}
- kernel_size -= setup_size;
setup = g_malloc(setup_size);
kernel = g_malloc(kernel_size);
@@ -899,6 +910,7 @@ void x86_load_linux(X86MachineState *x86ms,
fprintf(stderr, "fread() failed\n");
exit(1);
}
+ fseek(f, 0, SEEK_SET);
if (fread(kernel, 1, kernel_size, f) != kernel_size) {
fprintf(stderr, "fread() failed\n");
exit(1);
@@ -923,7 +935,7 @@ void x86_load_linux(X86MachineState *x86ms,
kernel_size = setup_data_offset + sizeof(struct setup_data) + dtb_size;
kernel = g_realloc(kernel, kernel_size);
- stq_p(header + 0x250, prot_addr + setup_data_offset);
+ stq_le_p(header + 0x250, prot_addr + setup_data_offset);
setup_data = (struct setup_data *)(kernel + setup_data_offset);
setup_data->next = 0;
@@ -940,15 +952,16 @@ void x86_load_linux(X86MachineState *x86ms,
* kernel on the other side of the fw_cfg interface matches the hash of the
* file the user passed in.
*/
- if (!sev_enabled()) {
+ if (!sev_enabled() && protocol > 0) {
memcpy(setup, header, MIN(sizeof(header), setup_size));
}
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr);
- fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size);
- fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size);
- sev_load_ctx.kernel_data = (char *)kernel;
- sev_load_ctx.kernel_size = kernel_size;
+ fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size - setup_size);
+ fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA,
+ kernel + setup_size, kernel_size - setup_size);
+ sev_load_ctx.kernel_data = (char *)kernel + setup_size;
+ sev_load_ctx.kernel_size = kernel_size - setup_size;
fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr);
fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size);
@@ -956,6 +969,25 @@ void x86_load_linux(X86MachineState *x86ms,
sev_load_ctx.setup_data = (char *)setup;
sev_load_ctx.setup_size = setup_size;
+ /* kernel without setup header patches */
+ fw_cfg_add_file(fw_cfg, "etc/boot/kernel", kernel, kernel_size);
+
+ if (machine->shim_filename) {
+ GMappedFile *mapped_file;
+ GError *gerr = NULL;
+
+ mapped_file = g_mapped_file_new(machine->shim_filename, false, &gerr);
+ if (!mapped_file) {
+ fprintf(stderr, "qemu: error reading shim %s: %s\n",
+ machine->shim_filename, gerr->message);
+ exit(1);
+ }
+
+ fw_cfg_add_file(fw_cfg, "etc/boot/shim",
+ g_mapped_file_get_contents(mapped_file),
+ g_mapped_file_get_length(mapped_file));
+ }
+
if (sev_enabled()) {
sev_add_kernel_loader_hashes(&sev_load_ctx, &error_fatal);
}
@@ -1004,11 +1036,14 @@ void x86_bios_rom_init(X86MachineState *x86ms, const char *default_firmware,
if (machine_require_guest_memfd(MACHINE(x86ms))) {
memory_region_init_ram_guest_memfd(&x86ms->bios, NULL, "pc.bios",
bios_size, &error_fatal);
+ if (is_tdx_vm()) {
+ tdx_set_tdvf_region(&x86ms->bios);
+ }
} else {
memory_region_init_ram(&x86ms->bios, NULL, "pc.bios",
bios_size, &error_fatal);
}
- if (sev_enabled()) {
+ if (sev_enabled() || is_tdx_vm()) {
/*
* The concept of a "reset" simply doesn't exist for
* confidential computing guests, we have to destroy and
diff --git a/hw/i386/x86-cpu.c b/hw/i386/x86-cpu.c
index ab29205..c876e67 100644
--- a/hw/i386/x86-cpu.c
+++ b/hw/i386/x86-cpu.c
@@ -21,15 +21,15 @@
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
-#include "sysemu/whpx.h"
-#include "sysemu/cpu-timers.h"
+#include "system/whpx.h"
+#include "system/cpu-timers.h"
#include "trace.h"
#include "hw/i386/x86.h"
#include "target/i386/cpu.h"
#include "hw/intc/i8259.h"
#include "hw/irq.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
/* TSC handling */
uint64_t cpu_get_tsc(CPUX86State *env)
diff --git a/hw/i386/x86-iommu.c b/hw/i386/x86-iommu.c
index 60af896..d34a684 100644
--- a/hw/i386/x86-iommu.c
+++ b/hw/i386/x86-iommu.c
@@ -25,7 +25,7 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "trace.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
void x86_iommu_iec_register_notifier(X86IOMMUState *iommu,
iec_notify_fn fn, void *data)
@@ -125,15 +125,14 @@ static void x86_iommu_realize(DeviceState *dev, Error **errp)
}
}
-static Property x86_iommu_properties[] = {
+static const Property x86_iommu_properties[] = {
DEFINE_PROP_ON_OFF_AUTO("intremap", X86IOMMUState,
intr_supported, ON_OFF_AUTO_AUTO),
DEFINE_PROP_BOOL("device-iotlb", X86IOMMUState, dt_supported, false),
DEFINE_PROP_BOOL("pt", X86IOMMUState, pt_supported, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void x86_iommu_class_init(ObjectClass *klass, void *data)
+static void x86_iommu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = x86_iommu_realize;
@@ -147,7 +146,7 @@ bool x86_iommu_ir_supported(X86IOMMUState *s)
static const TypeInfo x86_iommu_info = {
.name = TYPE_X86_IOMMU_DEVICE,
- .parent = TYPE_SYS_BUS_DEVICE,
+ .parent = TYPE_DYNAMIC_SYS_BUS_DEVICE,
.instance_size = sizeof(X86IOMMUState),
.class_init = x86_iommu_class_init,
.class_size = sizeof(X86IOMMUClass),
diff --git a/hw/i386/x86.c b/hw/i386/x86.c
index 01fc5e6..f80533d 100644
--- a/hw/i386/x86.c
+++ b/hw/i386/x86.c
@@ -27,8 +27,8 @@
#include "qapi/qapi-visit-common.h"
#include "qapi/qapi-visit-machine.h"
#include "qapi/visitor.h"
-#include "sysemu/qtest.h"
-#include "sysemu/numa.h"
+#include "system/qtest.h"
+#include "system/numa.h"
#include "trace.h"
#include "hw/acpi/aml-build.h"
@@ -372,7 +372,7 @@ static void x86_machine_initfn(Object *obj)
x86ms->above_4g_mem_start = 4 * GiB;
}
-static void x86_machine_class_init(ObjectClass *oc, void *data)
+static void x86_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
X86MachineClass *x86mc = X86_MACHINE_CLASS(oc);
@@ -382,7 +382,6 @@ static void x86_machine_class_init(ObjectClass *oc, void *data)
mc->get_default_cpu_node_id = x86_get_default_cpu_node_id;
mc->possible_cpu_arch_ids = x86_possible_cpu_arch_ids;
mc->kvm_type = x86_kvm_type;
- x86mc->save_tsc_khz = true;
x86mc->fwcfg_dma_enabled = true;
nc->nmi_monitor_handler = x86_nmi;
@@ -450,7 +449,7 @@ static const TypeInfo x86_machine_info = {
.instance_init = x86_machine_initfn,
.class_size = sizeof(X86MachineClass),
.class_init = x86_machine_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_NMI },
{ }
},
diff --git a/hw/i386/xen/meson.build b/hw/i386/xen/meson.build
index 3f0df8b..c73c62b 100644
--- a/hw/i386/xen/meson.build
+++ b/hw/i386/xen/meson.build
@@ -4,6 +4,7 @@ i386_ss.add(when: 'CONFIG_XEN', if_true: files(
))
i386_ss.add(when: ['CONFIG_XEN', xen], if_true: files(
'xen-hvm.c',
+ 'xen-pvh.c',
))
i386_ss.add(when: 'CONFIG_XEN_BUS', if_true: files(
diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
index 4f64466..ceb2242 100644
--- a/hw/i386/xen/xen-hvm.c
+++ b/hw/i386/xen/xen-hvm.c
@@ -10,10 +10,12 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-migration.h"
#include "trace.h"
+#include "hw/hw.h"
#include "hw/i386/pc.h"
#include "hw/irq.h"
#include "hw/i386/apic-msidef.h"
@@ -24,6 +26,10 @@
#include "hw/xen/arch_hvm.h"
#include <xen/hvm/e820.h>
#include "exec/target_page.h"
+#include "target/i386/cpu.h"
+#include "system/runstate.h"
+#include "system/xen-mapcache.h"
+#include "system/xen.h"
static MemoryRegion ram_640k, ram_lo, ram_hi;
static MemoryRegion *framebuffer;
@@ -614,7 +620,9 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
state = g_new0(XenIOState, 1);
- xen_register_ioreq(state, max_cpus, &xen_memory_listener);
+ xen_register_ioreq(state, max_cpus,
+ HVM_IOREQSRV_BUFIOREQ_ATOMIC,
+ &xen_memory_listener);
xen_is_stubdomain = xen_check_stubdomain(state->xenstore);
@@ -750,6 +758,4 @@ void arch_handle_ioreq(XenIOState *state, ioreq_t *req)
default:
hw_error("Invalid ioreq type 0x%x\n", req->type);
}
-
- return;
}
diff --git a/hw/i386/xen/xen-pvh.c b/hw/i386/xen/xen-pvh.c
new file mode 100644
index 0000000..067f73e
--- /dev/null
+++ b/hw/i386/xen/xen-pvh.c
@@ -0,0 +1,125 @@
+/*
+ * QEMU Xen PVH x86 Machine
+ *
+ * Copyright (c) 2024 Advanced Micro Devices, Inc.
+ * Written by Edgar E. Iglesias <edgar.iglesias@amd.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "hw/boards.h"
+#include "system/system.h"
+#include "hw/xen/arch_hvm.h"
+#include <xen/hvm/hvm_info_table.h>
+#include "hw/xen/xen-pvh-common.h"
+#include "target/i386/cpu.h"
+
+#define TYPE_XEN_PVH_X86 MACHINE_TYPE_NAME("xenpvh")
+OBJECT_DECLARE_SIMPLE_TYPE(XenPVHx86State, XEN_PVH_X86)
+
+struct XenPVHx86State {
+ /*< private >*/
+ XenPVHMachineState parent;
+
+ DeviceState **cpu;
+};
+
+static DeviceState *xen_pvh_cpu_new(MachineState *ms,
+ int64_t apic_id)
+{
+ Object *cpu = object_new(ms->cpu_type);
+
+ object_property_add_child(OBJECT(ms), "cpu[*]", cpu);
+ object_property_set_uint(cpu, "apic-id", apic_id, &error_fatal);
+ qdev_realize(DEVICE(cpu), NULL, &error_fatal);
+ object_unref(cpu);
+
+ return DEVICE(cpu);
+}
+
+static void xen_pvh_init(MachineState *ms)
+{
+ XenPVHx86State *xp = XEN_PVH_X86(ms);
+ int i;
+
+ /* Create dummy cores. This will indirectly create the APIC MSI window. */
+ xp->cpu = g_malloc(sizeof xp->cpu[0] * ms->smp.max_cpus);
+ for (i = 0; i < ms->smp.max_cpus; i++) {
+ xp->cpu[i] = xen_pvh_cpu_new(ms, i);
+ }
+}
+
+static void xen_pvh_instance_init(Object *obj)
+{
+ XenPVHMachineState *s = XEN_PVH_MACHINE(obj);
+
+ /* Default values. */
+ s->cfg.ram_low = (MemMapEntry) { 0x0, 0x80000000U };
+ s->cfg.ram_high = (MemMapEntry) { 0xC000000000ULL, 0x4000000000ULL };
+ s->cfg.pci_intx_irq_base = 16;
+}
+
+/*
+ * Deliver INTX interrupts to Xen guest.
+ */
+static void xen_pvh_set_pci_intx_irq(void *opaque, int irq, int level)
+{
+ /*
+ * Since QEMU emulates all of the swizziling
+ * We don't want Xen to do any additional swizzling in
+ * xen_set_pci_intx_level() so we always set device to 0.
+ */
+ if (xen_set_pci_intx_level(xen_domid, 0, 0, 0, irq, level)) {
+ error_report("xendevicemodel_set_pci_intx_level failed");
+ }
+}
+
+static void xen_pvh_machine_class_init(ObjectClass *oc, const void *data)
+{
+ XenPVHMachineClass *xpc = XEN_PVH_MACHINE_CLASS(oc);
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->desc = "Xen PVH x86 machine";
+ mc->default_cpu_type = TARGET_DEFAULT_CPU_TYPE;
+
+ /* mc->max_cpus holds the MAX value allowed in the -smp cmd-line opts. */
+ mc->max_cpus = HVM_MAX_VCPUS;
+
+ /* We have an implementation specific init to create CPU objects. */
+ xpc->init = xen_pvh_init;
+
+ /* Enable buffered IOREQs. */
+ xpc->handle_bufioreq = HVM_IOREQSRV_BUFIOREQ_ATOMIC;
+
+ /*
+ * PCI INTX routing.
+ *
+ * We describe the mapping between the 4 INTX interrupt and GSIs
+ * using xen_set_pci_link_route(). xen_pvh_set_pci_intx_irq is
+ * used to deliver the interrupt.
+ */
+ xpc->set_pci_intx_irq = xen_pvh_set_pci_intx_irq;
+ xpc->set_pci_link_route = xen_set_pci_link_route;
+
+ /* List of supported features known to work on PVH x86. */
+ xpc->has_pci = true;
+
+ xen_pvh_class_setup_common_props(xpc);
+}
+
+static const TypeInfo xen_pvh_x86_machine_type = {
+ .name = TYPE_XEN_PVH_X86,
+ .parent = TYPE_XEN_PVH_MACHINE,
+ .class_init = xen_pvh_machine_class_init,
+ .instance_init = xen_pvh_instance_init,
+ .instance_size = sizeof(XenPVHx86State),
+};
+
+static void xen_pvh_machine_register_types(void)
+{
+ type_register_static(&xen_pvh_x86_machine_type);
+}
+
+type_init(xen_pvh_machine_register_types)
diff --git a/hw/i386/xen/xen_apic.c b/hw/i386/xen/xen_apic.c
index 101e16a..f30398f 100644
--- a/hw/i386/xen/xen_apic.c
+++ b/hw/i386/xen/xen_apic.c
@@ -36,7 +36,7 @@ static void xen_apic_mem_write(void *opaque, hwaddr addr,
static const MemoryRegionOps xen_apic_io_ops = {
.read = xen_apic_mem_read,
.write = xen_apic_mem_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
};
static void xen_apic_realize(DeviceState *dev, Error **errp)
@@ -76,7 +76,7 @@ static void xen_send_msi(MSIMessage *msi)
xen_hvm_inject_msi(msi->address, msi->data);
}
-static void xen_apic_class_init(ObjectClass *klass, void *data)
+static void xen_apic_class_init(ObjectClass *klass, const void *data)
{
APICCommonClass *k = APIC_COMMON_CLASS(klass);
diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c
index 708488a..c8b852b 100644
--- a/hw/i386/xen/xen_platform.c
+++ b/hw/i386/xen/xen_platform.c
@@ -30,8 +30,8 @@
#include "migration/vmstate.h"
#include "net/net.h"
#include "trace.h"
-#include "sysemu/xen.h"
-#include "sysemu/block-backend.h"
+#include "system/xen.h"
+#include "system/block-backend.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qom/object.h"
@@ -514,7 +514,7 @@ static void platform_mmio_write(void *opaque, hwaddr addr,
static const MemoryRegionOps platform_mmio_handler = {
.read = &platform_mmio_read,
.write = &platform_mmio_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
};
static void platform_mmio_setup(PCIXenPlatformState *d)
@@ -581,7 +581,7 @@ static void platform_reset(DeviceState *dev)
platform_fixed_ioport_reset(s);
}
-static void xen_platform_class_init(ObjectClass *klass, void *data)
+static void xen_platform_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -595,7 +595,7 @@ static void xen_platform_class_init(ObjectClass *klass, void *data)
k->revision = 1;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->desc = "XEN platform pci device";
- dc->reset = platform_reset;
+ device_class_set_legacy_reset(dc, platform_reset);
dc->vmsd = &vmstate_xen_platform;
}
@@ -604,7 +604,7 @@ static const TypeInfo xen_platform_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIXenPlatformState),
.class_init = xen_platform_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/i386/xen/xen_pvdevice.c b/hw/i386/xen/xen_pvdevice.c
index ed62153..87a974a 100644
--- a/hw/i386/xen/xen_pvdevice.c
+++ b/hw/i386/xen/xen_pvdevice.c
@@ -115,15 +115,14 @@ static void xen_pv_realize(PCIDevice *pci_dev, Error **errp)
&d->mmio);
}
-static Property xen_pv_props[] = {
+static const Property xen_pv_props[] = {
DEFINE_PROP_UINT16("vendor-id", XenPVDevice, vendor_id, PCI_VENDOR_ID_XEN),
DEFINE_PROP_UINT16("device-id", XenPVDevice, device_id, 0xffff),
DEFINE_PROP_UINT8("revision", XenPVDevice, revision, 0x01),
DEFINE_PROP_UINT32("size", XenPVDevice, size, 0x400000),
- DEFINE_PROP_END_OF_LIST()
};
-static void xen_pv_class_init(ObjectClass *klass, void *data)
+static void xen_pv_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -140,7 +139,7 @@ static const TypeInfo xen_pv_type_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(XenPVDevice),
.class_init = xen_pv_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/ide/Kconfig b/hw/ide/Kconfig
index 6dfc5a2..b55507b 100644
--- a/hw/ide/Kconfig
+++ b/hw/ide/Kconfig
@@ -43,12 +43,6 @@ config IDE_VIA
bool
select IDE_PCI
-config MICRODRIVE
- bool
- select IDE_BUS
- select IDE_DEV
- depends on PCMCIA
-
config AHCI
bool
select IDE_BUS
@@ -60,6 +54,10 @@ config AHCI_ICH9
depends on PCI
select AHCI
+config AHCI_SYSBUS
+ bool
+ select AHCI
+
config IDE_SII3112
bool
select IDE_PCI
diff --git a/hw/ide/ahci-allwinner.c b/hw/ide/ahci-allwinner.c
index 9620de8..bc7a116 100644
--- a/hw/ide/ahci-allwinner.c
+++ b/hw/ide/ahci-allwinner.c
@@ -18,7 +18,7 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "migration/vmstate.h"
#include "hw/ide/ahci-sysbus.h"
@@ -103,7 +103,7 @@ static const VMStateDescription vmstate_allwinner_ahci = {
}
};
-static void allwinner_ahci_class_init(ObjectClass *klass, void *data)
+static void allwinner_ahci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/ide/ahci-internal.h b/hw/ide/ahci-internal.h
index 7e63ea2..a318f36 100644
--- a/hw/ide/ahci-internal.h
+++ b/hw/ide/ahci-internal.h
@@ -25,7 +25,6 @@
#define HW_IDE_AHCI_INTERNAL_H
#include "hw/ide/ahci.h"
-#include "hw/pci/pci_device.h"
#include "ide-internal.h"
#define AHCI_MEM_BAR_SIZE 0x1000
diff --git a/hw/ide/ahci-sysbus.c b/hw/ide/ahci-sysbus.c
new file mode 100644
index 0000000..210818d
--- /dev/null
+++ b/hw/ide/ahci-sysbus.c
@@ -0,0 +1,90 @@
+/*
+ * QEMU AHCI Emulation (MMIO-mapped devices)
+ *
+ * Copyright (c) 2010 qiaochong@loongson.cn
+ * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com>
+ * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de>
+ * Copyright (c) 2010 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "system/address-spaces.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+
+#include "hw/ide/ahci-sysbus.h"
+#include "ahci-internal.h"
+
+static const VMStateDescription vmstate_sysbus_ahci = {
+ .name = "sysbus-ahci",
+ .fields = (const VMStateField[]) {
+ VMSTATE_AHCI(ahci, SysbusAHCIState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void sysbus_ahci_reset(DeviceState *dev)
+{
+ SysbusAHCIState *s = SYSBUS_AHCI(dev);
+
+ ahci_reset(&s->ahci);
+}
+
+static void sysbus_ahci_init(Object *obj)
+{
+ SysbusAHCIState *s = SYSBUS_AHCI(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ ahci_init(&s->ahci, DEVICE(obj));
+
+ sysbus_init_mmio(sbd, &s->ahci.mem);
+ sysbus_init_irq(sbd, &s->ahci.irq);
+}
+
+static void sysbus_ahci_realize(DeviceState *dev, Error **errp)
+{
+ SysbusAHCIState *s = SYSBUS_AHCI(dev);
+
+ ahci_realize(&s->ahci, dev, &address_space_memory);
+}
+
+static const Property sysbus_ahci_properties[] = {
+ DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, ahci.ports, 1),
+};
+
+static void sysbus_ahci_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = sysbus_ahci_realize;
+ dc->vmsd = &vmstate_sysbus_ahci;
+ device_class_set_props(dc, sysbus_ahci_properties);
+ device_class_set_legacy_reset(dc, sysbus_ahci_reset);
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo sysbus_ahci_types[] = {
+ {
+ .name = TYPE_SYSBUS_AHCI,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SysbusAHCIState),
+ .instance_init = sysbus_ahci_init,
+ .class_init = sysbus_ahci_class_init,
+ },
+};
+
+DEFINE_TYPES(sysbus_ahci_types)
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
index bfefad2..1303c21 100644
--- a/hw/ide/ahci.c
+++ b/hw/ide/ahci.c
@@ -23,20 +23,13 @@
#include "qemu/osdep.h"
#include "hw/irq.h"
-#include "hw/pci/msi.h"
-#include "hw/pci/pci.h"
-#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
-#include "qemu/module.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/dma.h"
-#include "hw/ide/pci.h"
-#include "hw/ide/ahci-pci.h"
-#include "hw/ide/ahci-sysbus.h"
+#include "system/block-backend.h"
+#include "system/dma.h"
#include "ahci-internal.h"
#include "ide-internal.h"
@@ -179,34 +172,6 @@ static uint32_t ahci_port_read(AHCIState *s, int port, int offset)
return val;
}
-static void ahci_irq_raise(AHCIState *s)
-{
- DeviceState *dev_state = s->container;
- PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state),
- TYPE_PCI_DEVICE);
-
- trace_ahci_irq_raise(s);
-
- if (pci_dev && msi_enabled(pci_dev)) {
- msi_notify(pci_dev, 0);
- } else {
- qemu_irq_raise(s->irq);
- }
-}
-
-static void ahci_irq_lower(AHCIState *s)
-{
- DeviceState *dev_state = s->container;
- PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state),
- TYPE_PCI_DEVICE);
-
- trace_ahci_irq_lower(s);
-
- if (!pci_dev || !msi_enabled(pci_dev)) {
- qemu_irq_lower(s->irq);
- }
-}
-
static void ahci_check_irq(AHCIState *s)
{
int i;
@@ -222,9 +187,11 @@ static void ahci_check_irq(AHCIState *s)
trace_ahci_check_irq(s, old_irq, s->control_regs.irqstatus);
if (s->control_regs.irqstatus &&
(s->control_regs.ghc & HOST_CTL_IRQ_EN)) {
- ahci_irq_raise(s);
+ trace_ahci_irq_raise(s);
+ qemu_irq_raise(s->irq);
} else {
- ahci_irq_lower(s);
+ trace_ahci_irq_lower(s);
+ qemu_irq_lower(s->irq);
}
}
@@ -948,7 +915,6 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist,
uint64_t sum = 0;
int off_idx = -1;
int64_t off_pos = -1;
- int tbl_entry_size;
IDEBus *bus = &ad->port;
BusState *qbus = BUS(bus);
@@ -976,6 +942,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist,
/* Get entries in the PRDT, init a qemu sglist accordingly */
if (prdtl > 0) {
AHCI_SG *tbl = (AHCI_SG *)prdt;
+ int tbl_entry_size = 0;
+
sum = 0;
for (i = 0; i < prdtl; i++) {
tbl_entry_size = prdt_tbl_entry_size(&tbl[i]);
@@ -1607,7 +1575,6 @@ static const IDEDMAOps ahci_dma_ops = {
void ahci_init(AHCIState *s, DeviceState *qdev)
{
- s->container = qdev;
/* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */
memory_region_init_io(&s->mem, OBJECT(qdev), &ahci_mem_ops, s,
"ahci", AHCI_MEM_BAR_SIZE);
@@ -1833,70 +1800,6 @@ const VMStateDescription vmstate_ahci = {
},
};
-static const VMStateDescription vmstate_sysbus_ahci = {
- .name = "sysbus-ahci",
- .fields = (const VMStateField[]) {
- VMSTATE_AHCI(ahci, SysbusAHCIState),
- VMSTATE_END_OF_LIST()
- },
-};
-
-static void sysbus_ahci_reset(DeviceState *dev)
-{
- SysbusAHCIState *s = SYSBUS_AHCI(dev);
-
- ahci_reset(&s->ahci);
-}
-
-static void sysbus_ahci_init(Object *obj)
-{
- SysbusAHCIState *s = SYSBUS_AHCI(obj);
- SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
-
- ahci_init(&s->ahci, DEVICE(obj));
-
- sysbus_init_mmio(sbd, &s->ahci.mem);
- sysbus_init_irq(sbd, &s->ahci.irq);
-}
-
-static void sysbus_ahci_realize(DeviceState *dev, Error **errp)
-{
- SysbusAHCIState *s = SYSBUS_AHCI(dev);
-
- ahci_realize(&s->ahci, dev, &address_space_memory);
-}
-
-static Property sysbus_ahci_properties[] = {
- DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, ahci.ports, 1),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void sysbus_ahci_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->realize = sysbus_ahci_realize;
- dc->vmsd = &vmstate_sysbus_ahci;
- device_class_set_props(dc, sysbus_ahci_properties);
- dc->reset = sysbus_ahci_reset;
- set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
-}
-
-static const TypeInfo sysbus_ahci_info = {
- .name = TYPE_SYSBUS_AHCI,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(SysbusAHCIState),
- .instance_init = sysbus_ahci_init,
- .class_init = sysbus_ahci_class_init,
-};
-
-static void sysbus_ahci_register_types(void)
-{
- type_register_static(&sysbus_ahci_info);
-}
-
-type_init(sysbus_ahci_register_types)
-
void ahci_ide_create_devs(AHCIState *ahci, DriveInfo **hd)
{
int i;
diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
index fcb6cca..a42b748 100644
--- a/hw/ide/atapi.c
+++ b/hw/ide/atapi.c
@@ -26,7 +26,7 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "hw/scsi/scsi.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "scsi/constants.h"
#include "ide-internal.h"
#include "trace.h"
@@ -265,7 +265,7 @@ void ide_atapi_cmd_reply_end(IDEState *s)
byte_count_limit--;
size = byte_count_limit;
}
- s->lcyl = size;
+ s->lcyl = size & 0xff;
s->hcyl = size >> 8;
s->elementary_transfer_size = size;
/* we cannot transmit more than one sector at a time */
diff --git a/hw/ide/cf.c b/hw/ide/cf.c
index 2a425cb..f87cd41 100644
--- a/hw/ide/cf.c
+++ b/hw/ide/cf.c
@@ -24,15 +24,14 @@ static void ide_cf_realize(IDEDevice *dev, Error **errp)
ide_dev_initfn(dev, IDE_CFATA, errp);
}
-static Property ide_cf_properties[] = {
+static const Property ide_cf_properties[] = {
DEFINE_IDE_DEV_PROPERTIES(),
DEFINE_BLOCK_CHS_PROPERTIES(IDEDrive, dev.conf),
DEFINE_PROP_BIOS_CHS_TRANS("bios-chs-trans",
IDEDrive, dev.chs_trans, BIOS_ATA_TRANSLATION_AUTO),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ide_cf_class_init(ObjectClass *klass, void *data)
+static void ide_cf_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
IDEDeviceClass *k = IDE_DEVICE_CLASS(klass);
diff --git a/hw/ide/cmd646.c b/hw/ide/cmd646.c
index 8cebd1b..2a59516 100644
--- a/hw/ide/cmd646.c
+++ b/hw/ide/cmd646.c
@@ -29,8 +29,8 @@
#include "migration/vmstate.h"
#include "qemu/module.h"
#include "hw/isa/isa.h"
-#include "sysemu/dma.h"
-#include "sysemu/reset.h"
+#include "system/dma.h"
+#include "system/reset.h"
#include "hw/ide/pci.h"
#include "ide-internal.h"
@@ -313,17 +313,16 @@ static void pci_cmd646_ide_exitfn(PCIDevice *dev)
}
}
-static Property cmd646_ide_properties[] = {
+static const Property cmd646_ide_properties[] = {
DEFINE_PROP_UINT32("secondary", PCIIDEState, secondary, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void cmd646_ide_class_init(ObjectClass *klass, void *data)
+static void cmd646_ide_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
- dc->reset = cmd646_reset;
+ device_class_set_legacy_reset(dc, cmd646_reset);
dc->vmsd = &vmstate_ide_pci;
k->realize = pci_cmd646_ide_realize;
k->exit = pci_cmd646_ide_exitfn;
diff --git a/hw/ide/core.c b/hw/ide/core.c
index 08d9218..b14983e 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -32,15 +32,15 @@
#include "qemu/timer.h"
#include "qemu/hw-version.h"
#include "qemu/memalign.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/dma.h"
+#include "system/system.h"
+#include "system/blockdev.h"
+#include "system/dma.h"
#include "hw/block/block.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qapi/error.h"
#include "qemu/cutils.h"
-#include "sysemu/replay.h"
-#include "sysemu/runstate.h"
+#include "system/replay.h"
+#include "system/runstate.h"
#include "ide-internal.h"
#include "trace.h"
@@ -968,8 +968,7 @@ static void ide_dma_cb(void *opaque, int ret)
BDRV_SECTOR_SIZE, ide_dma_cb, s);
break;
case IDE_DMA_TRIM:
- s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
- &s->sg, offset, BDRV_SECTOR_SIZE,
+ s->bus->dma->aiocb = dma_blk_io(&s->sg, offset, BDRV_SECTOR_SIZE,
ide_issue_trim, s, ide_dma_cb, s,
DMA_DIRECTION_TO_DEVICE);
break;
diff --git a/hw/ide/ich.c b/hw/ide/ich.c
index 9b909c8..4cade0d 100644
--- a/hw/ide/ich.c
+++ b/hw/ide/ich.c
@@ -61,13 +61,12 @@
*/
#include "qemu/osdep.h"
-#include "hw/irq.h"
#include "hw/pci/msi.h"
#include "hw/pci/pci.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
#include "hw/isa/isa.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/ide/pci.h"
#include "hw/ide/ahci-pci.h"
#include "ahci-internal.h"
@@ -91,6 +90,19 @@ static const VMStateDescription vmstate_ich9_ahci = {
},
};
+static void pci_ich9_ahci_update_irq(void *opaque, int irq_num, int level)
+{
+ PCIDevice *pci_dev = opaque;
+
+ if (msi_enabled(pci_dev)) {
+ if (level) {
+ msi_notify(pci_dev, 0);
+ }
+ } else {
+ pci_set_irq(pci_dev, level);
+ }
+}
+
static void pci_ich9_reset(DeviceState *dev)
{
AHCIPCIState *d = ICH9_AHCI(dev);
@@ -102,7 +114,9 @@ static void pci_ich9_ahci_init(Object *obj)
{
AHCIPCIState *d = ICH9_AHCI(obj);
+ qemu_init_irq(&d->irq, pci_ich9_ahci_update_irq, d, 0);
ahci_init(&d->ahci, DEVICE(obj));
+ d->ahci.irq = &d->irq;
}
static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp)
@@ -125,8 +139,6 @@ static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp)
/* XXX Software should program this register */
dev->config[0x90] = 1 << 6; /* Address Map Register - AHCI mode */
- d->ahci.irq = pci_allocate_irq(dev);
-
pci_register_bar(dev, ICH9_IDP_BAR, PCI_BASE_ADDRESS_SPACE_IO,
&d->ahci.idp);
pci_register_bar(dev, ICH9_MEM_BAR, PCI_BASE_ADDRESS_SPACE_MEMORY,
@@ -161,10 +173,9 @@ static void pci_ich9_uninit(PCIDevice *dev)
msi_uninit(dev);
ahci_uninit(&d->ahci);
- qemu_free_irq(d->ahci.irq);
}
-static void ich_ahci_class_init(ObjectClass *klass, void *data)
+static void ich_ahci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -176,7 +187,7 @@ static void ich_ahci_class_init(ObjectClass *klass, void *data)
k->revision = 0x02;
k->class_id = PCI_CLASS_STORAGE_SATA;
dc->vmsd = &vmstate_ich9_ahci;
- dc->reset = pci_ich9_reset;
+ device_class_set_legacy_reset(dc, pci_ich9_reset);
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
@@ -186,7 +197,7 @@ static const TypeInfo ich_ahci_info = {
.instance_size = sizeof(AHCIPCIState),
.instance_init = pci_ich9_ahci_init,
.class_init = ich_ahci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/ide/ide-bus.c b/hw/ide/ide-bus.c
index 37d003d..b24e4d1 100644
--- a/hw/ide/ide-bus.c
+++ b/hw/ide/ide-bus.c
@@ -21,15 +21,15 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/runstate.h"
+#include "system/block-backend.h"
+#include "system/blockdev.h"
+#include "system/runstate.h"
#include "ide-internal.h"
static char *idebus_get_fw_dev_path(DeviceState *dev);
static void idebus_unrealize(BusState *qdev);
-static void ide_bus_class_init(ObjectClass *klass, void *data)
+static void ide_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
diff --git a/hw/ide/ide-dev.c b/hw/ide/ide-dev.c
index 03f7967..5d47858 100644
--- a/hw/ide/ide-dev.c
+++ b/hw/ide/ide-dev.c
@@ -23,16 +23,15 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "hw/ide/ide-dev.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/sysemu.h"
+#include "system/block-backend.h"
+#include "system/blockdev.h"
+#include "system/system.h"
#include "qapi/visitor.h"
#include "ide-internal.h"
-static Property ide_props[] = {
+static const Property ide_props[] = {
DEFINE_PROP_UINT32("unit", IDEDevice, unit, -1),
DEFINE_PROP_BOOL("win2k-install-hack", IDEDevice, win2k_install_hack, false),
- DEFINE_PROP_END_OF_LIST(),
};
static void ide_qdev_realize(DeviceState *qdev, Error **errp)
@@ -191,16 +190,15 @@ static void ide_cd_realize(IDEDevice *dev, Error **errp)
ide_dev_initfn(dev, IDE_CD, errp);
}
-static Property ide_hd_properties[] = {
+static const Property ide_hd_properties[] = {
DEFINE_IDE_DEV_PROPERTIES(),
DEFINE_BLOCK_CHS_PROPERTIES(IDEDrive, dev.conf),
DEFINE_PROP_BIOS_CHS_TRANS("bios-chs-trans",
IDEDrive, dev.chs_trans, BIOS_ATA_TRANSLATION_AUTO),
DEFINE_PROP_UINT16("rotation_rate", IDEDrive, dev.rotation_rate, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ide_hd_class_init(ObjectClass *klass, void *data)
+static void ide_hd_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
IDEDeviceClass *k = IDE_DEVICE_CLASS(klass);
@@ -218,12 +216,11 @@ static const TypeInfo ide_hd_info = {
.class_init = ide_hd_class_init,
};
-static Property ide_cd_properties[] = {
+static const Property ide_cd_properties[] = {
DEFINE_IDE_DEV_PROPERTIES(),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ide_cd_class_init(ObjectClass *klass, void *data)
+static void ide_cd_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
IDEDeviceClass *k = IDE_DEVICE_CLASS(klass);
@@ -241,7 +238,7 @@ static const TypeInfo ide_cd_info = {
.class_init = ide_cd_class_init,
};
-static void ide_device_class_init(ObjectClass *klass, void *data)
+static void ide_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
k->realize = ide_qdev_realize;
diff --git a/hw/ide/isa.c b/hw/ide/isa.c
index 934c458..5f41841 100644
--- a/hw/ide/isa.c
+++ b/hw/ide/isa.c
@@ -29,7 +29,7 @@
#include "migration/vmstate.h"
#include "qapi/error.h"
#include "qemu/module.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/ide/isa.h"
#include "qom/object.h"
@@ -101,20 +101,19 @@ ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int irqnum,
return isadev;
}
-static Property isa_ide_properties[] = {
+static const Property isa_ide_properties[] = {
DEFINE_PROP_UINT32("iobase", ISAIDEState, iobase, 0x1f0),
DEFINE_PROP_UINT32("iobase2", ISAIDEState, iobase2, 0x3f6),
DEFINE_PROP_UINT32("irq", ISAIDEState, irqnum, 14),
- DEFINE_PROP_END_OF_LIST(),
};
-static void isa_ide_class_initfn(ObjectClass *klass, void *data)
+static void isa_ide_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = isa_ide_realizefn;
dc->fw_name = "ide";
- dc->reset = isa_ide_reset;
+ device_class_set_legacy_reset(dc, isa_ide_reset);
device_class_set_props(dc, isa_ide_properties);
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index e84bf2c..c23bf32 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -30,8 +30,8 @@
#include "migration/vmstate.h"
#include "qemu/module.h"
#include "hw/misc/macio/macio.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/dma.h"
+#include "system/block-backend.h"
+#include "system/dma.h"
#include "ide-internal.h"
@@ -119,9 +119,6 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
return;
done:
- dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len,
- io->dir, io->dma_len);
-
if (ret < 0) {
block_acct_failed(blk_get_stats(s->blk), &s->acct);
} else {
@@ -190,8 +187,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
pmac_ide_transfer_cb, io);
break;
case IDE_DMA_TRIM:
- s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk), &s->sg,
- offset, 0x1, ide_issue_trim, s,
+ s->bus->dma->aiocb = dma_blk_io(&s->sg, offset, 0x1, ide_issue_trim, s,
pmac_ide_transfer_cb, io,
DMA_DIRECTION_TO_DEVICE);
break;
@@ -202,9 +198,6 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
return;
done:
- dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len,
- io->dir, io->dma_len);
-
if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
if (ret < 0) {
block_acct_failed(blk_get_stats(s->blk), &s->acct);
@@ -465,18 +458,17 @@ static void macio_ide_initfn(Object *obj)
qdev_prop_allow_set_link_before_realize, 0);
}
-static Property macio_ide_properties[] = {
+static const Property macio_ide_properties[] = {
DEFINE_PROP_UINT32("channel", MACIOIDEState, channel, 0),
DEFINE_PROP_UINT32("addr", MACIOIDEState, addr, -1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void macio_ide_class_init(ObjectClass *oc, void *data)
+static void macio_ide_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = macio_ide_realizefn;
- dc->reset = macio_ide_reset;
+ device_class_set_legacy_reset(dc, macio_ide_reset);
device_class_set_props(dc, macio_ide_properties);
dc->vmsd = &vmstate_pmac;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
diff --git a/hw/ide/meson.build b/hw/ide/meson.build
index d09705c..ddd7066 100644
--- a/hw/ide/meson.build
+++ b/hw/ide/meson.build
@@ -1,5 +1,6 @@
system_ss.add(when: 'CONFIG_AHCI', if_true: files('ahci.c'))
system_ss.add(when: 'CONFIG_AHCI_ICH9', if_true: files('ich.c'))
+system_ss.add(when: 'CONFIG_AHCI_SYSBUS', if_true: files('ahci-sysbus.c'))
system_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('ahci-allwinner.c'))
system_ss.add(when: 'CONFIG_IDE_BUS', if_true: files('ide-bus.c'))
system_ss.add(when: 'CONFIG_IDE_CF', if_true: files('cf.c'))
@@ -13,4 +14,3 @@ system_ss.add(when: 'CONFIG_IDE_PCI', if_true: files('pci.c'))
system_ss.add(when: 'CONFIG_IDE_PIIX', if_true: files('piix.c', 'ioport.c'))
system_ss.add(when: 'CONFIG_IDE_SII3112', if_true: files('sii3112.c'))
system_ss.add(when: 'CONFIG_IDE_VIA', if_true: files('via.c'))
-system_ss.add(when: 'CONFIG_MICRODRIVE', if_true: files('microdrive.c'))
diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c
deleted file mode 100644
index 3bb152b..0000000
--- a/hw/ide/microdrive.c
+++ /dev/null
@@ -1,644 +0,0 @@
-/*
- * QEMU IDE Emulation: microdrive (CF / PCMCIA)
- *
- * Copyright (c) 2003 Fabrice Bellard
- * Copyright (c) 2006 Openedhand Ltd.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "hw/pcmcia.h"
-#include "migration/vmstate.h"
-#include "qapi/error.h"
-#include "qemu/module.h"
-#include "sysemu/dma.h"
-#include "hw/irq.h"
-
-#include "qom/object.h"
-#include "ide-internal.h"
-
-#define TYPE_MICRODRIVE "microdrive"
-OBJECT_DECLARE_SIMPLE_TYPE(MicroDriveState, MICRODRIVE)
-
-/***********************************************************/
-/* CF-ATA Microdrive */
-
-#define METADATA_SIZE 0x20
-
-/* DSCM-1XXXX Microdrive hard disk with CF+ II / PCMCIA interface. */
-
-struct MicroDriveState {
- /*< private >*/
- PCMCIACardState parent_obj;
- /*< public >*/
-
- IDEBus bus;
- uint32_t attr_base;
- uint32_t io_base;
-
- /* Card state */
- uint8_t opt;
- uint8_t stat;
- uint8_t pins;
-
- uint8_t ctrl;
- uint16_t io;
- uint8_t cycle;
-};
-
-/* Register bitfields */
-enum md_opt {
- OPT_MODE_MMAP = 0,
- OPT_MODE_IOMAP16 = 1,
- OPT_MODE_IOMAP1 = 2,
- OPT_MODE_IOMAP2 = 3,
- OPT_MODE = 0x3f,
- OPT_LEVIREQ = 0x40,
- OPT_SRESET = 0x80,
-};
-enum md_cstat {
- STAT_INT = 0x02,
- STAT_PWRDWN = 0x04,
- STAT_XE = 0x10,
- STAT_IOIS8 = 0x20,
- STAT_SIGCHG = 0x40,
- STAT_CHANGED = 0x80,
-};
-enum md_pins {
- PINS_MRDY = 0x02,
- PINS_CRDY = 0x20,
-};
-enum md_ctrl {
- CTRL_IEN = 0x02,
- CTRL_SRST = 0x04,
-};
-
-static inline void md_interrupt_update(MicroDriveState *s)
-{
- PCMCIACardState *card = PCMCIA_CARD(s);
-
- if (card->slot == NULL) {
- return;
- }
-
- qemu_set_irq(card->slot->irq,
- !(s->stat & STAT_INT) && /* Inverted */
- !(s->ctrl & (CTRL_IEN | CTRL_SRST)) &&
- !(s->opt & OPT_SRESET));
-}
-
-static void md_set_irq(void *opaque, int irq, int level)
-{
- MicroDriveState *s = opaque;
-
- if (level) {
- s->stat |= STAT_INT;
- } else {
- s->stat &= ~STAT_INT;
- }
-
- md_interrupt_update(s);
-}
-
-static void md_reset(DeviceState *dev)
-{
- MicroDriveState *s = MICRODRIVE(dev);
-
- s->opt = OPT_MODE_MMAP;
- s->stat = 0;
- s->pins = 0;
- s->cycle = 0;
- s->ctrl = 0;
- ide_bus_reset(&s->bus);
-}
-
-static uint8_t md_attr_read(PCMCIACardState *card, uint32_t at)
-{
- MicroDriveState *s = MICRODRIVE(card);
- PCMCIACardClass *pcc = PCMCIA_CARD_GET_CLASS(card);
-
- if (at < s->attr_base) {
- if (at < pcc->cis_len) {
- return pcc->cis[at];
- } else {
- return 0x00;
- }
- }
-
- at -= s->attr_base;
-
- switch (at) {
- case 0x00: /* Configuration Option Register */
- return s->opt;
- case 0x02: /* Card Configuration Status Register */
- if (s->ctrl & CTRL_IEN) {
- return s->stat & ~STAT_INT;
- } else {
- return s->stat;
- }
- case 0x04: /* Pin Replacement Register */
- return (s->pins & PINS_CRDY) | 0x0c;
- case 0x06: /* Socket and Copy Register */
- return 0x00;
-#ifdef VERBOSE
- default:
- printf("%s: Bad attribute space register %02x\n", __func__, at);
-#endif
- }
-
- return 0;
-}
-
-static void md_attr_write(PCMCIACardState *card, uint32_t at, uint8_t value)
-{
- MicroDriveState *s = MICRODRIVE(card);
-
- at -= s->attr_base;
-
- switch (at) {
- case 0x00: /* Configuration Option Register */
- s->opt = value & 0xcf;
- if (value & OPT_SRESET) {
- device_cold_reset(DEVICE(s));
- }
- md_interrupt_update(s);
- break;
- case 0x02: /* Card Configuration Status Register */
- if ((s->stat ^ value) & STAT_PWRDWN) {
- s->pins |= PINS_CRDY;
- }
- s->stat &= 0x82;
- s->stat |= value & 0x74;
- md_interrupt_update(s);
- /* Word 170 in Identify Device must be equal to STAT_XE */
- break;
- case 0x04: /* Pin Replacement Register */
- s->pins &= PINS_CRDY;
- s->pins |= value & PINS_MRDY;
- break;
- case 0x06: /* Socket and Copy Register */
- break;
- default:
- printf("%s: Bad attribute space register %02x\n", __func__, at);
- }
-}
-
-static uint16_t md_common_read(PCMCIACardState *card, uint32_t at)
-{
- MicroDriveState *s = MICRODRIVE(card);
- IDEState *ifs;
- uint16_t ret;
- at -= s->io_base;
-
- switch (s->opt & OPT_MODE) {
- case OPT_MODE_MMAP:
- if ((at & ~0x3ff) == 0x400) {
- at = 0;
- }
- break;
- case OPT_MODE_IOMAP16:
- at &= 0xf;
- break;
- case OPT_MODE_IOMAP1:
- if ((at & ~0xf) == 0x3f0) {
- at -= 0x3e8;
- } else if ((at & ~0xf) == 0x1f0) {
- at -= 0x1f0;
- }
- break;
- case OPT_MODE_IOMAP2:
- if ((at & ~0xf) == 0x370) {
- at -= 0x368;
- } else if ((at & ~0xf) == 0x170) {
- at -= 0x170;
- }
- }
-
- switch (at) {
- case 0x0: /* Even RD Data */
- case 0x8:
- return ide_data_readw(&s->bus, 0);
-
- /* TODO: 8-bit accesses */
- if (s->cycle) {
- ret = s->io >> 8;
- } else {
- s->io = ide_data_readw(&s->bus, 0);
- ret = s->io & 0xff;
- }
- s->cycle = !s->cycle;
- return ret;
- case 0x9: /* Odd RD Data */
- return s->io >> 8;
- case 0xd: /* Error */
- return ide_ioport_read(&s->bus, 0x1);
- case 0xe: /* Alternate Status */
- ifs = ide_bus_active_if(&s->bus);
- if (ifs->blk) {
- return ifs->status;
- } else {
- return 0;
- }
- case 0xf: /* Device Address */
- ifs = ide_bus_active_if(&s->bus);
- return 0xc2 | ((~ifs->select << 2) & 0x3c);
- default:
- return ide_ioport_read(&s->bus, at);
- }
-
- return 0;
-}
-
-static void md_common_write(PCMCIACardState *card, uint32_t at, uint16_t value)
-{
- MicroDriveState *s = MICRODRIVE(card);
- at -= s->io_base;
-
- switch (s->opt & OPT_MODE) {
- case OPT_MODE_MMAP:
- if ((at & ~0x3ff) == 0x400) {
- at = 0;
- }
- break;
- case OPT_MODE_IOMAP16:
- at &= 0xf;
- break;
- case OPT_MODE_IOMAP1:
- if ((at & ~0xf) == 0x3f0) {
- at -= 0x3e8;
- } else if ((at & ~0xf) == 0x1f0) {
- at -= 0x1f0;
- }
- break;
- case OPT_MODE_IOMAP2:
- if ((at & ~0xf) == 0x370) {
- at -= 0x368;
- } else if ((at & ~0xf) == 0x170) {
- at -= 0x170;
- }
- }
-
- switch (at) {
- case 0x0: /* Even WR Data */
- case 0x8:
- ide_data_writew(&s->bus, 0, value);
- break;
-
- /* TODO: 8-bit accesses */
- if (s->cycle) {
- ide_data_writew(&s->bus, 0, s->io | (value << 8));
- } else {
- s->io = value & 0xff;
- }
- s->cycle = !s->cycle;
- break;
- case 0x9:
- s->io = value & 0xff;
- s->cycle = !s->cycle;
- break;
- case 0xd: /* Features */
- ide_ioport_write(&s->bus, 0x1, value);
- break;
- case 0xe: /* Device Control */
- s->ctrl = value;
- if (value & CTRL_SRST) {
- device_cold_reset(DEVICE(s));
- }
- md_interrupt_update(s);
- break;
- default:
- if (s->stat & STAT_PWRDWN) {
- s->pins |= PINS_CRDY;
- s->stat &= ~STAT_PWRDWN;
- }
- ide_ioport_write(&s->bus, at, value);
- }
-}
-
-static const VMStateDescription vmstate_microdrive = {
- .name = "microdrive",
- .version_id = 3,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT8(opt, MicroDriveState),
- VMSTATE_UINT8(stat, MicroDriveState),
- VMSTATE_UINT8(pins, MicroDriveState),
- VMSTATE_UINT8(ctrl, MicroDriveState),
- VMSTATE_UINT16(io, MicroDriveState),
- VMSTATE_UINT8(cycle, MicroDriveState),
- VMSTATE_IDE_BUS(bus, MicroDriveState),
- VMSTATE_IDE_DRIVES(bus.ifs, MicroDriveState),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static const uint8_t dscm1xxxx_cis[0x14a] = {
- [0x000] = CISTPL_DEVICE, /* 5V Device Information */
- [0x002] = 0x03, /* Tuple length = 4 bytes */
- [0x004] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */
- [0x006] = 0x01, /* Size = 2K bytes */
- [0x008] = CISTPL_ENDMARK,
-
- [0x00a] = CISTPL_DEVICE_OC, /* Additional Device Information */
- [0x00c] = 0x04, /* Tuple length = 4 byest */
- [0x00e] = 0x03, /* Conditions: Ext = 0, Vcc 3.3V, MWAIT = 1 */
- [0x010] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */
- [0x012] = 0x01, /* Size = 2K bytes */
- [0x014] = CISTPL_ENDMARK,
-
- [0x016] = CISTPL_JEDEC_C, /* JEDEC ID */
- [0x018] = 0x02, /* Tuple length = 2 bytes */
- [0x01a] = 0xdf, /* PC Card ATA with no Vpp required */
- [0x01c] = 0x01,
-
- [0x01e] = CISTPL_MANFID, /* Manufacture ID */
- [0x020] = 0x04, /* Tuple length = 4 bytes */
- [0x022] = 0xa4, /* TPLMID_MANF = 00a4 (IBM) */
- [0x024] = 0x00,
- [0x026] = 0x00, /* PLMID_CARD = 0000 */
- [0x028] = 0x00,
-
- [0x02a] = CISTPL_VERS_1, /* Level 1 Version */
- [0x02c] = 0x12, /* Tuple length = 23 bytes */
- [0x02e] = 0x04, /* Major Version = JEIDA 4.2 / PCMCIA 2.1 */
- [0x030] = 0x01, /* Minor Version = 1 */
- [0x032] = 'I',
- [0x034] = 'B',
- [0x036] = 'M',
- [0x038] = 0x00,
- [0x03a] = 'm',
- [0x03c] = 'i',
- [0x03e] = 'c',
- [0x040] = 'r',
- [0x042] = 'o',
- [0x044] = 'd',
- [0x046] = 'r',
- [0x048] = 'i',
- [0x04a] = 'v',
- [0x04c] = 'e',
- [0x04e] = 0x00,
- [0x050] = CISTPL_ENDMARK,
-
- [0x052] = CISTPL_FUNCID, /* Function ID */
- [0x054] = 0x02, /* Tuple length = 2 bytes */
- [0x056] = 0x04, /* TPLFID_FUNCTION = Fixed Disk */
- [0x058] = 0x01, /* TPLFID_SYSINIT: POST = 1, ROM = 0 */
-
- [0x05a] = CISTPL_FUNCE, /* Function Extension */
- [0x05c] = 0x02, /* Tuple length = 2 bytes */
- [0x05e] = 0x01, /* TPLFE_TYPE = Disk Device Interface */
- [0x060] = 0x01, /* TPLFE_DATA = PC Card ATA Interface */
-
- [0x062] = CISTPL_FUNCE, /* Function Extension */
- [0x064] = 0x03, /* Tuple length = 3 bytes */
- [0x066] = 0x02, /* TPLFE_TYPE = Basic PC Card ATA Interface */
- [0x068] = 0x08, /* TPLFE_DATA: Rotating, Unique, Single */
- [0x06a] = 0x0f, /* TPLFE_DATA: Sleep, Standby, Idle, Auto */
-
- [0x06c] = CISTPL_CONFIG, /* Configuration */
- [0x06e] = 0x05, /* Tuple length = 5 bytes */
- [0x070] = 0x01, /* TPCC_RASZ = 2 bytes, TPCC_RMSZ = 1 byte */
- [0x072] = 0x07, /* TPCC_LAST = 7 */
- [0x074] = 0x00, /* TPCC_RADR = 0200 */
- [0x076] = 0x02,
- [0x078] = 0x0f, /* TPCC_RMSK = 200, 202, 204, 206 */
-
- [0x07a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
- [0x07c] = 0x0b, /* Tuple length = 11 bytes */
- [0x07e] = 0xc0, /* TPCE_INDX = Memory Mode, Default, Iface */
- [0x080] = 0xc0, /* TPCE_IF = Memory, no BVDs, no WP, READY */
- [0x082] = 0xa1, /* TPCE_FS = Vcc only, no I/O, Memory, Misc */
- [0x084] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */
- [0x086] = 0x55, /* NomV: 5.0 V */
- [0x088] = 0x4d, /* MinV: 4.5 V */
- [0x08a] = 0x5d, /* MaxV: 5.5 V */
- [0x08c] = 0x4e, /* Peakl: 450 mA */
- [0x08e] = 0x08, /* TPCE_MS = 1 window, 1 byte, Host address */
- [0x090] = 0x00, /* Window descriptor: Window length = 0 */
- [0x092] = 0x20, /* TPCE_MI: support power down mode, RW */
-
- [0x094] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
- [0x096] = 0x06, /* Tuple length = 6 bytes */
- [0x098] = 0x00, /* TPCE_INDX = Memory Mode, no Default */
- [0x09a] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */
- [0x09c] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */
- [0x09e] = 0xb5, /* NomV: 3.3 V */
- [0x0a0] = 0x1e,
- [0x0a2] = 0x3e, /* Peakl: 350 mA */
-
- [0x0a4] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
- [0x0a6] = 0x0d, /* Tuple length = 13 bytes */
- [0x0a8] = 0xc1, /* TPCE_INDX = I/O and Memory Mode, Default */
- [0x0aa] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */
- [0x0ac] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */
- [0x0ae] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */
- [0x0b0] = 0x55, /* NomV: 5.0 V */
- [0x0b2] = 0x4d, /* MinV: 4.5 V */
- [0x0b4] = 0x5d, /* MaxV: 5.5 V */
- [0x0b6] = 0x4e, /* Peakl: 450 mA */
- [0x0b8] = 0x64, /* TPCE_IO = 16-byte boundary, 16/8 accesses */
- [0x0ba] = 0xf0, /* TPCE_IR = MASK, Level, Pulse, Share */
- [0x0bc] = 0xff, /* IRQ0..IRQ7 supported */
- [0x0be] = 0xff, /* IRQ8..IRQ15 supported */
- [0x0c0] = 0x20, /* TPCE_MI = support power down mode */
-
- [0x0c2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
- [0x0c4] = 0x06, /* Tuple length = 6 bytes */
- [0x0c6] = 0x01, /* TPCE_INDX = I/O and Memory Mode */
- [0x0c8] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */
- [0x0ca] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */
- [0x0cc] = 0xb5, /* NomV: 3.3 V */
- [0x0ce] = 0x1e,
- [0x0d0] = 0x3e, /* Peakl: 350 mA */
-
- [0x0d2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
- [0x0d4] = 0x12, /* Tuple length = 18 bytes */
- [0x0d6] = 0xc2, /* TPCE_INDX = I/O Primary Mode */
- [0x0d8] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */
- [0x0da] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */
- [0x0dc] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */
- [0x0de] = 0x55, /* NomV: 5.0 V */
- [0x0e0] = 0x4d, /* MinV: 4.5 V */
- [0x0e2] = 0x5d, /* MaxV: 5.5 V */
- [0x0e4] = 0x4e, /* Peakl: 450 mA */
- [0x0e6] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */
- [0x0e8] = 0x61, /* Range: 2 fields, 2 bytes addr, 1 byte len */
- [0x0ea] = 0xf0, /* Field 1 address = 0x01f0 */
- [0x0ec] = 0x01,
- [0x0ee] = 0x07, /* Address block length = 8 */
- [0x0f0] = 0xf6, /* Field 2 address = 0x03f6 */
- [0x0f2] = 0x03,
- [0x0f4] = 0x01, /* Address block length = 2 */
- [0x0f6] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */
- [0x0f8] = 0x20, /* TPCE_MI = support power down mode */
-
- [0x0fa] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
- [0x0fc] = 0x06, /* Tuple length = 6 bytes */
- [0x0fe] = 0x02, /* TPCE_INDX = I/O Primary Mode, no Default */
- [0x100] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */
- [0x102] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */
- [0x104] = 0xb5, /* NomV: 3.3 V */
- [0x106] = 0x1e,
- [0x108] = 0x3e, /* Peakl: 350 mA */
-
- [0x10a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
- [0x10c] = 0x12, /* Tuple length = 18 bytes */
- [0x10e] = 0xc3, /* TPCE_INDX = I/O Secondary Mode, Default */
- [0x110] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */
- [0x112] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */
- [0x114] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */
- [0x116] = 0x55, /* NomV: 5.0 V */
- [0x118] = 0x4d, /* MinV: 4.5 V */
- [0x11a] = 0x5d, /* MaxV: 5.5 V */
- [0x11c] = 0x4e, /* Peakl: 450 mA */
- [0x11e] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */
- [0x120] = 0x61, /* Range: 2 fields, 2 byte addr, 1 byte len */
- [0x122] = 0x70, /* Field 1 address = 0x0170 */
- [0x124] = 0x01,
- [0x126] = 0x07, /* Address block length = 8 */
- [0x128] = 0x76, /* Field 2 address = 0x0376 */
- [0x12a] = 0x03,
- [0x12c] = 0x01, /* Address block length = 2 */
- [0x12e] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */
- [0x130] = 0x20, /* TPCE_MI = support power down mode */
-
- [0x132] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
- [0x134] = 0x06, /* Tuple length = 6 bytes */
- [0x136] = 0x03, /* TPCE_INDX = I/O Secondary Mode */
- [0x138] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */
- [0x13a] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */
- [0x13c] = 0xb5, /* NomV: 3.3 V */
- [0x13e] = 0x1e,
- [0x140] = 0x3e, /* Peakl: 350 mA */
-
- [0x142] = CISTPL_NO_LINK, /* No Link */
- [0x144] = 0x00, /* Tuple length = 0 bytes */
-
- [0x146] = CISTPL_END, /* Tuple End */
-};
-
-#define TYPE_DSCM1XXXX "dscm1xxxx"
-
-static int dscm1xxxx_attach(PCMCIACardState *card)
-{
- MicroDriveState *md = MICRODRIVE(card);
- PCMCIACardClass *pcc = PCMCIA_CARD_GET_CLASS(card);
-
- md->attr_base = pcc->cis[0x74] | (pcc->cis[0x76] << 8);
- md->io_base = 0x0;
-
- device_cold_reset(DEVICE(md));
- md_interrupt_update(md);
-
- return 0;
-}
-
-static int dscm1xxxx_detach(PCMCIACardState *card)
-{
- MicroDriveState *md = MICRODRIVE(card);
-
- device_cold_reset(DEVICE(md));
- return 0;
-}
-
-PCMCIACardState *dscm1xxxx_init(DriveInfo *dinfo)
-{
- MicroDriveState *md;
-
- md = MICRODRIVE(object_new(TYPE_DSCM1XXXX));
- qdev_realize(DEVICE(md), NULL, &error_fatal);
-
- if (dinfo != NULL) {
- ide_bus_create_drive(&md->bus, 0, dinfo);
- }
- md->bus.ifs[0].drive_kind = IDE_CFATA;
- md->bus.ifs[0].mdata_size = METADATA_SIZE;
- md->bus.ifs[0].mdata_storage = g_malloc0(METADATA_SIZE);
-
- return PCMCIA_CARD(md);
-}
-
-static void dscm1xxxx_class_init(ObjectClass *oc, void *data)
-{
- PCMCIACardClass *pcc = PCMCIA_CARD_CLASS(oc);
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- pcc->cis = dscm1xxxx_cis;
- pcc->cis_len = sizeof(dscm1xxxx_cis);
-
- pcc->attach = dscm1xxxx_attach;
- pcc->detach = dscm1xxxx_detach;
- /* Reason: Needs to be wired-up in code, see dscm1xxxx_init() */
- dc->user_creatable = false;
-}
-
-static const TypeInfo dscm1xxxx_type_info = {
- .name = TYPE_DSCM1XXXX,
- .parent = TYPE_MICRODRIVE,
- .class_init = dscm1xxxx_class_init,
-};
-
-static void microdrive_realize(DeviceState *dev, Error **errp)
-{
- MicroDriveState *md = MICRODRIVE(dev);
-
- ide_bus_init_output_irq(&md->bus, qemu_allocate_irq(md_set_irq, md, 0));
-}
-
-static void microdrive_init(Object *obj)
-{
- MicroDriveState *md = MICRODRIVE(obj);
-
- ide_bus_init(&md->bus, sizeof(md->bus), DEVICE(obj), 0, 1);
-}
-
-static void microdrive_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- PCMCIACardClass *pcc = PCMCIA_CARD_CLASS(oc);
-
- pcc->attr_read = md_attr_read;
- pcc->attr_write = md_attr_write;
- pcc->common_read = md_common_read;
- pcc->common_write = md_common_write;
- pcc->io_read = md_common_read;
- pcc->io_write = md_common_write;
-
- dc->realize = microdrive_realize;
- dc->reset = md_reset;
- dc->vmsd = &vmstate_microdrive;
-}
-
-static const TypeInfo microdrive_type_info = {
- .name = TYPE_MICRODRIVE,
- .parent = TYPE_PCMCIA_CARD,
- .instance_size = sizeof(MicroDriveState),
- .instance_init = microdrive_init,
- .abstract = true,
- .class_init = microdrive_class_init,
-};
-
-static void microdrive_register_types(void)
-{
- type_register_static(&microdrive_type_info);
- type_register_static(&dscm1xxxx_type_info);
-}
-
-type_init(microdrive_register_types)
diff --git a/hw/ide/mmio.c b/hw/ide/mmio.c
index 8736281..699874d 100644
--- a/hw/ide/mmio.c
+++ b/hw/ide/mmio.c
@@ -27,7 +27,7 @@
#include "hw/sysbus.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/ide/mmio.h"
#include "hw/qdev-properties.h"
@@ -141,17 +141,16 @@ static void mmio_ide_initfn(Object *obj)
sysbus_init_irq(d, &s->irq);
}
-static Property mmio_ide_properties[] = {
+static const Property mmio_ide_properties[] = {
DEFINE_PROP_UINT32("shift", MMIOIDEState, shift, 0),
- DEFINE_PROP_END_OF_LIST()
};
-static void mmio_ide_class_init(ObjectClass *oc, void *data)
+static void mmio_ide_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = mmio_ide_realizefn;
- dc->reset = mmio_ide_reset;
+ device_class_set_legacy_reset(dc, mmio_ide_reset);
device_class_set_props(dc, mmio_ide_properties);
dc->vmsd = &vmstate_ide_mmio;
}
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index 4675d07..1e50bb9 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -27,7 +27,7 @@
#include "hw/irq.h"
#include "hw/pci/pci.h"
#include "migration/vmstate.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "hw/ide/pci.h"
@@ -237,7 +237,7 @@ static int32_t bmdma_prepare_buf(const IDEDMA *dma, int32_t limit)
/* end of table (with a fail safe of one page) */
if (bm->cur_prd_last ||
(bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) {
- return s->sg.size;
+ break;
}
pci_dma_read(pci_dev, bm->cur_addr, &prd, 8);
bm->cur_addr += 8;
@@ -266,10 +266,7 @@ static int32_t bmdma_prepare_buf(const IDEDMA *dma, int32_t limit)
s->io_buffer_size += l;
}
}
-
- qemu_sglist_destroy(&s->sg);
- s->io_buffer_size = 0;
- return -1;
+ return s->sg.size;
}
/* return 0 if buffer completed */
@@ -628,7 +625,7 @@ static const TypeInfo pci_ide_type_info = {
.instance_size = sizeof(PCIIDEState),
.instance_init = pci_ide_init,
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/ide/piix.c b/hw/ide/piix.c
index 80efc63..a0f2709 100644
--- a/hw/ide/piix.c
+++ b/hw/ide/piix.c
@@ -178,12 +178,12 @@ static void pci_piix_ide_exitfn(PCIDevice *dev)
}
/* NOTE: for the PIIX3, the IRQs and IOports are hardcoded */
-static void piix3_ide_class_init(ObjectClass *klass, void *data)
+static void piix3_ide_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
- dc->reset = piix_ide_reset;
+ device_class_set_legacy_reset(dc, piix_ide_reset);
dc->vmsd = &vmstate_ide_pci;
k->realize = pci_piix_ide_realize;
k->exit = pci_piix_ide_exitfn;
@@ -201,12 +201,12 @@ static const TypeInfo piix3_ide_info = {
};
/* NOTE: for the PIIX4, the IRQs and IOports are hardcoded */
-static void piix4_ide_class_init(ObjectClass *klass, void *data)
+static void piix4_ide_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
- dc->reset = piix_ide_reset;
+ device_class_set_legacy_reset(dc, piix_ide_reset);
dc->vmsd = &vmstate_ide_pci;
k->realize = pci_piix_ide_realize;
k->exit = pci_piix_ide_exitfn;
diff --git a/hw/ide/sii3112.c b/hw/ide/sii3112.c
index af17384..9b28c69 100644
--- a/hw/ide/sii3112.c
+++ b/hw/ide/sii3112.c
@@ -290,7 +290,7 @@ static void sii3112_pci_realize(PCIDevice *dev, Error **errp)
}
}
-static void sii3112_pci_class_init(ObjectClass *klass, void *data)
+static void sii3112_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *pd = PCI_DEVICE_CLASS(klass);
@@ -300,7 +300,7 @@ static void sii3112_pci_class_init(ObjectClass *klass, void *data)
pd->class_id = PCI_CLASS_STORAGE_RAID;
pd->revision = 1;
pd->realize = sii3112_pci_realize;
- dc->reset = sii3112_reset;
+ device_class_set_legacy_reset(dc, sii3112_reset);
dc->desc = "SiI3112A SATA controller";
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
diff --git a/hw/ide/via.c b/hw/ide/via.c
index a32f56b..dedc267 100644
--- a/hw/ide/via.c
+++ b/hw/ide/via.c
@@ -29,7 +29,7 @@
#include "migration/vmstate.h"
#include "qemu/module.h"
#include "qemu/range.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/isa/vt82c686.h"
#include "hw/ide/pci.h"
#include "hw/irq.h"
@@ -245,12 +245,12 @@ static void via_ide_exitfn(PCIDevice *dev)
}
}
-static void via_ide_class_init(ObjectClass *klass, void *data)
+static void via_ide_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
- dc->reset = via_ide_reset;
+ device_class_set_legacy_reset(dc, via_ide_reset);
dc->vmsd = &vmstate_ide_pci;
/* Reason: only works as function of VIA southbridge */
dc->user_creatable = false;
diff --git a/hw/input/Kconfig b/hw/input/Kconfig
index f86e98c..a116cb8 100644
--- a/hw/input/Kconfig
+++ b/hw/input/Kconfig
@@ -1,13 +1,6 @@
config ADB
bool
-config ADS7846
- bool
-
-config LM832X
- bool
- depends on I2C
-
config PCKBD
bool
select PS2
@@ -23,9 +16,6 @@ config PS2
config STELLARIS_GAMEPAD
bool
-config TSC2005
- bool
-
config VIRTIO_INPUT
bool
default y
@@ -41,8 +31,5 @@ config VHOST_USER_INPUT
default y
depends on VIRTIO_INPUT && VHOST_USER
-config TSC210X
- bool
-
config LASIPS2
select PS2
diff --git a/hw/input/adb-kbd.c b/hw/input/adb-kbd.c
index 758fa6d..507557d 100644
--- a/hw/input/adb-kbd.c
+++ b/hw/input/adb-kbd.c
@@ -375,7 +375,7 @@ static void adb_kbd_initfn(Object *obj)
d->devaddr = ADB_DEVID_KEYBOARD;
}
-static void adb_kbd_class_init(ObjectClass *oc, void *data)
+static void adb_kbd_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
ADBDeviceClass *adc = ADB_DEVICE_CLASS(oc);
@@ -387,7 +387,7 @@ static void adb_kbd_class_init(ObjectClass *oc, void *data)
adc->devreq = adb_kbd_request;
adc->devhasdata = adb_kbd_has_data;
- dc->reset = adb_kbd_reset;
+ device_class_set_legacy_reset(dc, adb_kbd_reset);
dc->vmsd = &vmstate_adb_kbd;
}
diff --git a/hw/input/adb-mouse.c b/hw/input/adb-mouse.c
index 144a0cc..373ef3f 100644
--- a/hw/input/adb-mouse.c
+++ b/hw/input/adb-mouse.c
@@ -38,6 +38,7 @@ struct MouseState {
ADBDevice parent_obj;
/*< private >*/
+ QemuInputHandlerState *hs;
int buttons_state, last_buttons_state;
int dx, dy, dz;
};
@@ -51,17 +52,57 @@ struct ADBMouseClass {
DeviceRealize parent_realize;
};
-static void adb_mouse_event(void *opaque,
- int dx1, int dy1, int dz1, int buttons_state)
+#define ADB_MOUSE_BUTTON_LEFT 0x01
+#define ADB_MOUSE_BUTTON_RIGHT 0x02
+
+static void adb_mouse_handle_event(DeviceState *dev, QemuConsole *src,
+ InputEvent *evt)
{
- MouseState *s = opaque;
+ MouseState *s = (MouseState *)dev;
+ InputMoveEvent *move;
+ InputBtnEvent *btn;
+ static const int bmap[INPUT_BUTTON__MAX] = {
+ [INPUT_BUTTON_LEFT] = ADB_MOUSE_BUTTON_LEFT,
+ [INPUT_BUTTON_RIGHT] = ADB_MOUSE_BUTTON_RIGHT,
+ };
+
+ switch (evt->type) {
+ case INPUT_EVENT_KIND_REL:
+ move = evt->u.rel.data;
+ if (move->axis == INPUT_AXIS_X) {
+ s->dx += move->value;
+ } else if (move->axis == INPUT_AXIS_Y) {
+ s->dy += move->value;
+ }
+ break;
+
+ case INPUT_EVENT_KIND_BTN:
+ btn = evt->u.btn.data;
+ if (bmap[btn->button]) {
+ if (btn->down) {
+ s->buttons_state |= bmap[btn->button];
+ } else {
+ s->buttons_state &= ~bmap[btn->button];
+ }
+ }
+ break;
- s->dx += dx1;
- s->dy += dy1;
- s->dz += dz1;
- s->buttons_state = buttons_state;
+ default:
+ /* keep gcc happy */
+ break;
+ }
}
+static const QemuInputHandler adb_mouse_handler = {
+ .name = "QEMU ADB Mouse",
+ .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL,
+ .event = adb_mouse_handle_event,
+ /*
+ * We do not need the .sync handler because unlike e.g. PS/2 where async
+ * mouse events are sent over the serial port, an ADB mouse is constantly
+ * polled by the host via the adb_mouse_poll() callback.
+ */
+};
static int adb_mouse_poll(ADBDevice *d, uint8_t *obuf)
{
@@ -94,10 +135,10 @@ static int adb_mouse_poll(ADBDevice *d, uint8_t *obuf)
dx &= 0x7f;
dy &= 0x7f;
- if (!(s->buttons_state & MOUSE_EVENT_LBUTTON)) {
+ if (!(s->buttons_state & ADB_MOUSE_BUTTON_LEFT)) {
dy |= 0x80;
}
- if (!(s->buttons_state & MOUSE_EVENT_RBUTTON)) {
+ if (!(s->buttons_state & ADB_MOUSE_BUTTON_RIGHT)) {
dx |= 0x80;
}
@@ -236,7 +277,7 @@ static void adb_mouse_realizefn(DeviceState *dev, Error **errp)
amc->parent_realize(dev, errp);
- qemu_add_mouse_event_handler(adb_mouse_event, s, 0, "QEMU ADB Mouse");
+ s->hs = qemu_input_handler_register(dev, &adb_mouse_handler);
}
static void adb_mouse_initfn(Object *obj)
@@ -246,7 +287,7 @@ static void adb_mouse_initfn(Object *obj)
d->devaddr = ADB_DEVID_MOUSE;
}
-static void adb_mouse_class_init(ObjectClass *oc, void *data)
+static void adb_mouse_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
ADBDeviceClass *adc = ADB_DEVICE_CLASS(oc);
@@ -258,7 +299,7 @@ static void adb_mouse_class_init(ObjectClass *oc, void *data)
adc->devreq = adb_mouse_request;
adc->devhasdata = adb_mouse_has_data;
- dc->reset = adb_mouse_reset;
+ device_class_set_legacy_reset(dc, adb_mouse_reset);
dc->vmsd = &vmstate_adb_mouse;
}
diff --git a/hw/input/adb.c b/hw/input/adb.c
index aff7130..bcb11ed 100644
--- a/hw/input/adb.c
+++ b/hw/input/adb.c
@@ -259,7 +259,7 @@ static void adb_bus_unrealize(BusState *qbus)
vmstate_unregister(NULL, &vmstate_adb_bus, adb_bus);
}
-static void adb_bus_class_init(ObjectClass *klass, void *data)
+static void adb_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -299,7 +299,7 @@ static void adb_device_realizefn(DeviceState *dev, Error **errp)
bus->devices[bus->nb_devices++] = d;
}
-static void adb_device_class_init(ObjectClass *oc, void *data)
+static void adb_device_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/input/ads7846.c b/hw/input/ads7846.c
deleted file mode 100644
index cde3892..0000000
--- a/hw/input/ads7846.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * TI ADS7846 / TSC2046 chip emulation.
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GNU GPL v2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu/osdep.h"
-#include "hw/irq.h"
-#include "hw/ssi/ssi.h"
-#include "migration/vmstate.h"
-#include "qemu/module.h"
-#include "ui/console.h"
-#include "qom/object.h"
-
-struct ADS7846State {
- SSIPeripheral ssidev;
- qemu_irq interrupt;
-
- int input[8];
- int pressure;
- int noise;
-
- int cycle;
- int output;
-};
-
-#define TYPE_ADS7846 "ads7846"
-OBJECT_DECLARE_SIMPLE_TYPE(ADS7846State, ADS7846)
-
-/* Control-byte bitfields */
-#define CB_PD0 (1 << 0)
-#define CB_PD1 (1 << 1)
-#define CB_SER (1 << 2)
-#define CB_MODE (1 << 3)
-#define CB_A0 (1 << 4)
-#define CB_A1 (1 << 5)
-#define CB_A2 (1 << 6)
-#define CB_START (1 << 7)
-
-#define X_AXIS_DMAX 3470
-#define X_AXIS_MIN 290
-#define Y_AXIS_DMAX 3450
-#define Y_AXIS_MIN 200
-
-#define ADS_VBAT 2000
-#define ADS_VAUX 2000
-#define ADS_TEMP0 2000
-#define ADS_TEMP1 3000
-#define ADS_XPOS(x, y) (X_AXIS_MIN + ((X_AXIS_DMAX * (x)) >> 15))
-#define ADS_YPOS(x, y) (Y_AXIS_MIN + ((Y_AXIS_DMAX * (y)) >> 15))
-#define ADS_Z1POS(x, y) 600
-#define ADS_Z2POS(x, y) (600 + 6000 / ADS_XPOS(x, y))
-
-static void ads7846_int_update(ADS7846State *s)
-{
- if (s->interrupt)
- qemu_set_irq(s->interrupt, s->pressure == 0);
-}
-
-static uint32_t ads7846_transfer(SSIPeripheral *dev, uint32_t value)
-{
- ADS7846State *s = ADS7846(dev);
-
- switch (s->cycle ++) {
- case 0:
- if (!(value & CB_START)) {
- s->cycle = 0;
- break;
- }
-
- s->output = s->input[(value >> 4) & 7];
-
- /* Imitate the ADC noise, some drivers expect this. */
- s->noise = (s->noise + 3) & 7;
- switch ((value >> 4) & 7) {
- case 1: s->output += s->noise ^ 2; break;
- case 3: s->output += s->noise ^ 0; break;
- case 4: s->output += s->noise ^ 7; break;
- case 5: s->output += s->noise ^ 5; break;
- }
-
- if (value & CB_MODE)
- s->output >>= 4; /* 8 bits instead of 12 */
-
- break;
- case 1:
- s->cycle = 0;
- break;
- }
- return s->output;
-}
-
-static void ads7846_ts_event(void *opaque,
- int x, int y, int z, int buttons_state)
-{
- ADS7846State *s = opaque;
-
- if (buttons_state) {
- x = 0x7fff - x;
- s->input[1] = ADS_XPOS(x, y);
- s->input[3] = ADS_Z1POS(x, y);
- s->input[4] = ADS_Z2POS(x, y);
- s->input[5] = ADS_YPOS(x, y);
- }
-
- if (s->pressure == !buttons_state) {
- s->pressure = !!buttons_state;
-
- ads7846_int_update(s);
- }
-}
-
-static int ads7856_post_load(void *opaque, int version_id)
-{
- ADS7846State *s = opaque;
-
- s->pressure = 0;
- ads7846_int_update(s);
- return 0;
-}
-
-static const VMStateDescription vmstate_ads7846 = {
- .name = "ads7846",
- .version_id = 1,
- .minimum_version_id = 1,
- .post_load = ads7856_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_SSI_PERIPHERAL(ssidev, ADS7846State),
- VMSTATE_INT32_ARRAY(input, ADS7846State, 8),
- VMSTATE_INT32(noise, ADS7846State),
- VMSTATE_INT32(cycle, ADS7846State),
- VMSTATE_INT32(output, ADS7846State),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static void ads7846_realize(SSIPeripheral *d, Error **errp)
-{
- DeviceState *dev = DEVICE(d);
- ADS7846State *s = ADS7846(d);
-
- qdev_init_gpio_out(dev, &s->interrupt, 1);
-
- s->input[0] = ADS_TEMP0; /* TEMP0 */
- s->input[2] = ADS_VBAT; /* VBAT */
- s->input[6] = ADS_VAUX; /* VAUX */
- s->input[7] = ADS_TEMP1; /* TEMP1 */
-
- /* We want absolute coordinates */
- qemu_add_mouse_event_handler(ads7846_ts_event, s, 1,
- "QEMU ADS7846-driven Touchscreen");
-
- ads7846_int_update(s);
-
- vmstate_register_any(NULL, &vmstate_ads7846, s);
-}
-
-static void ads7846_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass);
-
- k->realize = ads7846_realize;
- k->transfer = ads7846_transfer;
- set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
-}
-
-static const TypeInfo ads7846_info = {
- .name = TYPE_ADS7846,
- .parent = TYPE_SSI_PERIPHERAL,
- .instance_size = sizeof(ADS7846State),
- .class_init = ads7846_class_init,
-};
-
-static void ads7846_register_types(void)
-{
- type_register_static(&ads7846_info);
-}
-
-type_init(ads7846_register_types)
diff --git a/hw/input/lasips2.c b/hw/input/lasips2.c
index d9f8c36..de62572 100644
--- a/hw/input/lasips2.c
+++ b/hw/input/lasips2.c
@@ -29,7 +29,7 @@
#include "hw/input/lasips2.h"
#include "exec/hwaddr.h"
#include "trace.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "migration/vmstate.h"
#include "hw/irq.h"
#include "qapi/error.h"
@@ -306,7 +306,7 @@ static void lasips2_init(Object *obj)
"lasips2-port-input-irq", 2);
}
-static void lasips2_class_init(ObjectClass *klass, void *data)
+static void lasips2_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -347,7 +347,7 @@ static void lasips2_port_init(Object *obj)
"ps2-input-irq", 1);
}
-static void lasips2_port_class_init(ObjectClass *klass, void *data)
+static void lasips2_port_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -397,7 +397,7 @@ static void lasips2_kbd_port_init(Object *obj)
lp->lasips2 = container_of(s, LASIPS2State, kbd_port);
}
-static void lasips2_kbd_port_class_init(ObjectClass *klass, void *data)
+static void lasips2_kbd_port_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
LASIPS2PortDeviceClass *lpdc = LASIPS2_PORT_CLASS(klass);
@@ -447,7 +447,7 @@ static void lasips2_mouse_port_init(Object *obj)
lp->lasips2 = container_of(s, LASIPS2State, mouse_port);
}
-static void lasips2_mouse_port_class_init(ObjectClass *klass, void *data)
+static void lasips2_mouse_port_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
LASIPS2PortDeviceClass *lpdc = LASIPS2_PORT_CLASS(klass);
diff --git a/hw/input/lm832x.c b/hw/input/lm832x.c
deleted file mode 100644
index 59e5567..0000000
--- a/hw/input/lm832x.c
+++ /dev/null
@@ -1,528 +0,0 @@
-/*
- * National Semiconductor LM8322/8323 GPIO keyboard & PWM chips.
- *
- * Copyright (C) 2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "hw/input/lm832x.h"
-#include "hw/i2c/i2c.h"
-#include "hw/irq.h"
-#include "migration/vmstate.h"
-#include "qemu/module.h"
-#include "qemu/timer.h"
-#include "ui/console.h"
-#include "qom/object.h"
-
-OBJECT_DECLARE_SIMPLE_TYPE(LM823KbdState, LM8323)
-
-struct LM823KbdState {
- I2CSlave parent_obj;
-
- uint8_t i2c_dir;
- uint8_t i2c_cycle;
- uint8_t reg;
-
- qemu_irq nirq;
- uint16_t model;
-
- struct {
- qemu_irq out[2];
- int in[2][2];
- } mux;
-
- uint8_t config;
- uint8_t status;
- uint8_t acttime;
- uint8_t error;
- uint8_t clock;
-
- struct {
- uint16_t pull;
- uint16_t mask;
- uint16_t dir;
- uint16_t level;
- qemu_irq out[16];
- } gpio;
-
- struct {
- uint8_t dbnctime;
- uint8_t size;
- uint8_t start;
- uint8_t len;
- uint8_t fifo[16];
- } kbd;
-
- struct {
- uint16_t file[256];
- uint8_t faddr;
- uint8_t addr[3];
- QEMUTimer *tm[3];
- } pwm;
-};
-
-#define INT_KEYPAD (1 << 0)
-#define INT_ERROR (1 << 3)
-#define INT_NOINIT (1 << 4)
-#define INT_PWMEND(n) (1 << (5 + n))
-
-#define ERR_BADPAR (1 << 0)
-#define ERR_CMDUNK (1 << 1)
-#define ERR_KEYOVR (1 << 2)
-#define ERR_FIFOOVR (1 << 6)
-
-static void lm_kbd_irq_update(LM823KbdState *s)
-{
- qemu_set_irq(s->nirq, !s->status);
-}
-
-static void lm_kbd_gpio_update(LM823KbdState *s)
-{
-}
-
-static void lm_kbd_reset(DeviceState *dev)
-{
- LM823KbdState *s = LM8323(dev);
-
- s->config = 0x80;
- s->status = INT_NOINIT;
- s->acttime = 125;
- s->kbd.dbnctime = 3;
- s->kbd.size = 0x33;
- s->clock = 0x08;
-
- lm_kbd_irq_update(s);
- lm_kbd_gpio_update(s);
-}
-
-static void lm_kbd_error(LM823KbdState *s, int err)
-{
- s->error |= err;
- s->status |= INT_ERROR;
- lm_kbd_irq_update(s);
-}
-
-static void lm_kbd_pwm_tick(LM823KbdState *s, int line)
-{
-}
-
-static void lm_kbd_pwm_start(LM823KbdState *s, int line)
-{
- lm_kbd_pwm_tick(s, line);
-}
-
-static void lm_kbd_pwm0_tick(void *opaque)
-{
- lm_kbd_pwm_tick(opaque, 0);
-}
-static void lm_kbd_pwm1_tick(void *opaque)
-{
- lm_kbd_pwm_tick(opaque, 1);
-}
-static void lm_kbd_pwm2_tick(void *opaque)
-{
- lm_kbd_pwm_tick(opaque, 2);
-}
-
-enum {
- LM832x_CMD_READ_ID = 0x80, /* Read chip ID. */
- LM832x_CMD_WRITE_CFG = 0x81, /* Set configuration item. */
- LM832x_CMD_READ_INT = 0x82, /* Get interrupt status. */
- LM832x_CMD_RESET = 0x83, /* Reset, same as external one */
- LM823x_CMD_WRITE_PULL_DOWN = 0x84, /* Select GPIO pull-up/down. */
- LM832x_CMD_WRITE_PORT_SEL = 0x85, /* Select GPIO in/out. */
- LM832x_CMD_WRITE_PORT_STATE = 0x86, /* Set GPIO pull-up/down. */
- LM832x_CMD_READ_PORT_SEL = 0x87, /* Get GPIO in/out. */
- LM832x_CMD_READ_PORT_STATE = 0x88, /* Get GPIO pull-up/down. */
- LM832x_CMD_READ_FIFO = 0x89, /* Read byte from FIFO. */
- LM832x_CMD_RPT_READ_FIFO = 0x8a, /* Read FIFO (no increment). */
- LM832x_CMD_SET_ACTIVE = 0x8b, /* Set active time. */
- LM832x_CMD_READ_ERROR = 0x8c, /* Get error status. */
- LM832x_CMD_READ_ROTATOR = 0x8e, /* Read rotator status. */
- LM832x_CMD_SET_DEBOUNCE = 0x8f, /* Set debouncing time. */
- LM832x_CMD_SET_KEY_SIZE = 0x90, /* Set keypad size. */
- LM832x_CMD_READ_KEY_SIZE = 0x91, /* Get keypad size. */
- LM832x_CMD_READ_CFG = 0x92, /* Get configuration item. */
- LM832x_CMD_WRITE_CLOCK = 0x93, /* Set clock config. */
- LM832x_CMD_READ_CLOCK = 0x94, /* Get clock config. */
- LM832x_CMD_PWM_WRITE = 0x95, /* Write PWM script. */
- LM832x_CMD_PWM_START = 0x96, /* Start PWM engine. */
- LM832x_CMD_PWM_STOP = 0x97, /* Stop PWM engine. */
- LM832x_GENERAL_ERROR = 0xff, /* There was one error.
- Previously was represented by -1
- This is not a command */
-};
-
-#define LM832x_MAX_KPX 8
-#define LM832x_MAX_KPY 12
-
-static uint8_t lm_kbd_read(LM823KbdState *s, int reg, int byte)
-{
- int ret;
-
- switch (reg) {
- case LM832x_CMD_READ_ID:
- ret = 0x0400;
- break;
-
- case LM832x_CMD_READ_INT:
- ret = s->status;
- if (!(s->status & INT_NOINIT)) {
- s->status = 0;
- lm_kbd_irq_update(s);
- }
- break;
-
- case LM832x_CMD_READ_PORT_SEL:
- ret = s->gpio.dir;
- break;
- case LM832x_CMD_READ_PORT_STATE:
- ret = s->gpio.mask;
- break;
-
- case LM832x_CMD_READ_FIFO:
- if (s->kbd.len <= 1)
- return 0x00;
-
- /* Example response from the two commands after a INT_KEYPAD
- * interrupt caused by the key 0x3c being pressed:
- * RPT_READ_FIFO: 55 bc 00 4e ff 0a 50 08 00 29 d9 08 01 c9 01
- * READ_FIFO: bc 00 00 4e ff 0a 50 08 00 29 d9 08 01 c9 01
- * RPT_READ_FIFO: bc 00 00 4e ff 0a 50 08 00 29 d9 08 01 c9 01
- *
- * 55 is the code of the key release event serviced in the previous
- * interrupt handling.
- *
- * TODO: find out whether the FIFO is advanced a single character
- * before reading every byte or the whole size of the FIFO at the
- * last LM832x_CMD_READ_FIFO. This affects LM832x_CMD_RPT_READ_FIFO
- * output in cases where there are more than one event in the FIFO.
- * Assume 0xbc and 0x3c events are in the FIFO:
- * RPT_READ_FIFO: 55 bc 3c 00 4e ff 0a 50 08 00 29 d9 08 01 c9
- * READ_FIFO: bc 3c 00 00 4e ff 0a 50 08 00 29 d9 08 01 c9
- * Does RPT_READ_FIFO now return 0xbc and 0x3c or only 0x3c?
- */
- s->kbd.start ++;
- s->kbd.start &= sizeof(s->kbd.fifo) - 1;
- s->kbd.len --;
-
- return s->kbd.fifo[s->kbd.start];
- case LM832x_CMD_RPT_READ_FIFO:
- if (byte >= s->kbd.len)
- return 0x00;
-
- return s->kbd.fifo[(s->kbd.start + byte) & (sizeof(s->kbd.fifo) - 1)];
-
- case LM832x_CMD_READ_ERROR:
- return s->error;
-
- case LM832x_CMD_READ_ROTATOR:
- return 0;
-
- case LM832x_CMD_READ_KEY_SIZE:
- return s->kbd.size;
-
- case LM832x_CMD_READ_CFG:
- return s->config & 0xf;
-
- case LM832x_CMD_READ_CLOCK:
- return (s->clock & 0xfc) | 2;
-
- default:
- lm_kbd_error(s, ERR_CMDUNK);
- fprintf(stderr, "%s: unknown command %02x\n", __func__, reg);
- return 0x00;
- }
-
- return ret >> (byte << 3);
-}
-
-static void lm_kbd_write(LM823KbdState *s, int reg, int byte, uint8_t value)
-{
- switch (reg) {
- case LM832x_CMD_WRITE_CFG:
- s->config = value;
- /* This must be done whenever s->mux.in is updated (never). */
- if ((s->config >> 1) & 1) /* MUX1EN */
- qemu_set_irq(s->mux.out[0], s->mux.in[0][(s->config >> 0) & 1]);
- if ((s->config >> 3) & 1) /* MUX2EN */
- qemu_set_irq(s->mux.out[0], s->mux.in[0][(s->config >> 2) & 1]);
- /* TODO: check that this is issued only following the chip reset
- * and not in the middle of operation and that it is followed by
- * the GPIO ports re-resablishing through WRITE_PORT_SEL and
- * WRITE_PORT_STATE (using a timer perhaps) and otherwise output
- * warnings. */
- s->status = 0;
- lm_kbd_irq_update(s);
- s->kbd.len = 0;
- s->kbd.start = 0;
- s->reg = LM832x_GENERAL_ERROR;
- break;
-
- case LM832x_CMD_RESET:
- if (value == 0xaa)
- lm_kbd_reset(DEVICE(s));
- else
- lm_kbd_error(s, ERR_BADPAR);
- s->reg = LM832x_GENERAL_ERROR;
- break;
-
- case LM823x_CMD_WRITE_PULL_DOWN:
- if (!byte)
- s->gpio.pull = value;
- else {
- s->gpio.pull |= value << 8;
- lm_kbd_gpio_update(s);
- s->reg = LM832x_GENERAL_ERROR;
- }
- break;
- case LM832x_CMD_WRITE_PORT_SEL:
- if (!byte)
- s->gpio.dir = value;
- else {
- s->gpio.dir |= value << 8;
- lm_kbd_gpio_update(s);
- s->reg = LM832x_GENERAL_ERROR;
- }
- break;
- case LM832x_CMD_WRITE_PORT_STATE:
- if (!byte)
- s->gpio.mask = value;
- else {
- s->gpio.mask |= value << 8;
- lm_kbd_gpio_update(s);
- s->reg = LM832x_GENERAL_ERROR;
- }
- break;
-
- case LM832x_CMD_SET_ACTIVE:
- s->acttime = value;
- s->reg = LM832x_GENERAL_ERROR;
- break;
-
- case LM832x_CMD_SET_DEBOUNCE:
- s->kbd.dbnctime = value;
- s->reg = LM832x_GENERAL_ERROR;
- if (!value)
- lm_kbd_error(s, ERR_BADPAR);
- break;
-
- case LM832x_CMD_SET_KEY_SIZE:
- s->kbd.size = value;
- s->reg = LM832x_GENERAL_ERROR;
- if (
- (value & 0xf) < 3 || (value & 0xf) > LM832x_MAX_KPY ||
- (value >> 4) < 3 || (value >> 4) > LM832x_MAX_KPX)
- lm_kbd_error(s, ERR_BADPAR);
- break;
-
- case LM832x_CMD_WRITE_CLOCK:
- s->clock = value;
- s->reg = LM832x_GENERAL_ERROR;
- if ((value & 3) && (value & 3) != 3) {
- lm_kbd_error(s, ERR_BADPAR);
- fprintf(stderr, "%s: invalid clock setting in RCPWM\n",
- __func__);
- }
- /* TODO: Validate that the command is only issued once */
- break;
-
- case LM832x_CMD_PWM_WRITE:
- if (byte == 0) {
- if (!(value & 3) || (value >> 2) > 59) {
- lm_kbd_error(s, ERR_BADPAR);
- s->reg = LM832x_GENERAL_ERROR;
- break;
- }
-
- s->pwm.faddr = value;
- s->pwm.file[s->pwm.faddr] = 0;
- } else if (byte == 1) {
- s->pwm.file[s->pwm.faddr] |= value << 8;
- } else if (byte == 2) {
- s->pwm.file[s->pwm.faddr] |= value << 0;
- s->reg = LM832x_GENERAL_ERROR;
- }
- break;
- case LM832x_CMD_PWM_START:
- s->reg = LM832x_GENERAL_ERROR;
- if (!(value & 3) || (value >> 2) > 59) {
- lm_kbd_error(s, ERR_BADPAR);
- break;
- }
-
- s->pwm.addr[(value & 3) - 1] = value >> 2;
- lm_kbd_pwm_start(s, (value & 3) - 1);
- break;
- case LM832x_CMD_PWM_STOP:
- s->reg = LM832x_GENERAL_ERROR;
- if (!(value & 3)) {
- lm_kbd_error(s, ERR_BADPAR);
- break;
- }
-
- timer_del(s->pwm.tm[(value & 3) - 1]);
- break;
-
- case LM832x_GENERAL_ERROR:
- lm_kbd_error(s, ERR_BADPAR);
- break;
- default:
- lm_kbd_error(s, ERR_CMDUNK);
- fprintf(stderr, "%s: unknown command %02x\n", __func__, reg);
- break;
- }
-}
-
-static int lm_i2c_event(I2CSlave *i2c, enum i2c_event event)
-{
- LM823KbdState *s = LM8323(i2c);
-
- switch (event) {
- case I2C_START_RECV:
- case I2C_START_SEND:
- s->i2c_cycle = 0;
- s->i2c_dir = (event == I2C_START_SEND);
- break;
-
- default:
- break;
- }
-
- return 0;
-}
-
-static uint8_t lm_i2c_rx(I2CSlave *i2c)
-{
- LM823KbdState *s = LM8323(i2c);
-
- return lm_kbd_read(s, s->reg, s->i2c_cycle ++);
-}
-
-static int lm_i2c_tx(I2CSlave *i2c, uint8_t data)
-{
- LM823KbdState *s = LM8323(i2c);
-
- if (!s->i2c_cycle)
- s->reg = data;
- else
- lm_kbd_write(s, s->reg, s->i2c_cycle - 1, data);
- s->i2c_cycle ++;
-
- return 0;
-}
-
-static int lm_kbd_post_load(void *opaque, int version_id)
-{
- LM823KbdState *s = opaque;
-
- lm_kbd_irq_update(s);
- lm_kbd_gpio_update(s);
-
- return 0;
-}
-
-static const VMStateDescription vmstate_lm_kbd = {
- .name = "LM8323",
- .version_id = 0,
- .minimum_version_id = 0,
- .post_load = lm_kbd_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_I2C_SLAVE(parent_obj, LM823KbdState),
- VMSTATE_UINT8(i2c_dir, LM823KbdState),
- VMSTATE_UINT8(i2c_cycle, LM823KbdState),
- VMSTATE_UINT8(reg, LM823KbdState),
- VMSTATE_UINT8(config, LM823KbdState),
- VMSTATE_UINT8(status, LM823KbdState),
- VMSTATE_UINT8(acttime, LM823KbdState),
- VMSTATE_UINT8(error, LM823KbdState),
- VMSTATE_UINT8(clock, LM823KbdState),
- VMSTATE_UINT16(gpio.pull, LM823KbdState),
- VMSTATE_UINT16(gpio.mask, LM823KbdState),
- VMSTATE_UINT16(gpio.dir, LM823KbdState),
- VMSTATE_UINT16(gpio.level, LM823KbdState),
- VMSTATE_UINT8(kbd.dbnctime, LM823KbdState),
- VMSTATE_UINT8(kbd.size, LM823KbdState),
- VMSTATE_UINT8(kbd.start, LM823KbdState),
- VMSTATE_UINT8(kbd.len, LM823KbdState),
- VMSTATE_BUFFER(kbd.fifo, LM823KbdState),
- VMSTATE_UINT16_ARRAY(pwm.file, LM823KbdState, 256),
- VMSTATE_UINT8(pwm.faddr, LM823KbdState),
- VMSTATE_BUFFER(pwm.addr, LM823KbdState),
- VMSTATE_TIMER_PTR_ARRAY(pwm.tm, LM823KbdState, 3),
- VMSTATE_END_OF_LIST()
- }
-};
-
-
-static void lm8323_realize(DeviceState *dev, Error **errp)
-{
- LM823KbdState *s = LM8323(dev);
-
- s->model = 0x8323;
- s->pwm.tm[0] = timer_new_ns(QEMU_CLOCK_VIRTUAL, lm_kbd_pwm0_tick, s);
- s->pwm.tm[1] = timer_new_ns(QEMU_CLOCK_VIRTUAL, lm_kbd_pwm1_tick, s);
- s->pwm.tm[2] = timer_new_ns(QEMU_CLOCK_VIRTUAL, lm_kbd_pwm2_tick, s);
- qdev_init_gpio_out(dev, &s->nirq, 1);
-}
-
-void lm832x_key_event(DeviceState *dev, int key, int state)
-{
- LM823KbdState *s = LM8323(dev);
-
- if ((s->status & INT_ERROR) && (s->error & ERR_FIFOOVR))
- return;
-
- if (s->kbd.len >= sizeof(s->kbd.fifo)) {
- lm_kbd_error(s, ERR_FIFOOVR);
- return;
- }
-
- s->kbd.fifo[(s->kbd.start + s->kbd.len ++) & (sizeof(s->kbd.fifo) - 1)] =
- key | (state << 7);
-
- /* We never set ERR_KEYOVR because we support multiple keys fine. */
- s->status |= INT_KEYPAD;
- lm_kbd_irq_update(s);
-}
-
-static void lm8323_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
-
- dc->reset = lm_kbd_reset;
- dc->realize = lm8323_realize;
- k->event = lm_i2c_event;
- k->recv = lm_i2c_rx;
- k->send = lm_i2c_tx;
- dc->vmsd = &vmstate_lm_kbd;
-}
-
-static const TypeInfo lm8323_info = {
- .name = TYPE_LM8323,
- .parent = TYPE_I2C_SLAVE,
- .instance_size = sizeof(LM823KbdState),
- .class_init = lm8323_class_init,
-};
-
-static void lm832x_register_types(void)
-{
- type_register_static(&lm8323_info);
-}
-
-type_init(lm832x_register_types)
diff --git a/hw/input/meson.build b/hw/input/meson.build
index 3cc8ab8..90a2149 100644
--- a/hw/input/meson.build
+++ b/hw/input/meson.build
@@ -1,17 +1,12 @@
system_ss.add(files('hid.c'))
system_ss.add(when: 'CONFIG_ADB', if_true: files('adb.c', 'adb-mouse.c', 'adb-kbd.c'))
-system_ss.add(when: 'CONFIG_ADS7846', if_true: files('ads7846.c'))
-system_ss.add(when: 'CONFIG_LM832X', if_true: files('lm832x.c'))
system_ss.add(when: 'CONFIG_PCKBD', if_true: files('pckbd.c'))
system_ss.add(when: 'CONFIG_PL050', if_true: files('pl050.c'))
system_ss.add(when: 'CONFIG_PS2', if_true: files('ps2.c'))
system_ss.add(when: 'CONFIG_STELLARIS_GAMEPAD', if_true: files('stellaris_gamepad.c'))
-system_ss.add(when: 'CONFIG_TSC2005', if_true: files('tsc2005.c'))
system_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input.c'))
system_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input-hid.c'))
system_ss.add(when: 'CONFIG_VIRTIO_INPUT_HOST', if_true: files('virtio-input-host.c'))
-system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_keypad.c'))
-system_ss.add(when: 'CONFIG_TSC210X', if_true: files('tsc210x.c'))
system_ss.add(when: 'CONFIG_LASIPS2', if_true: files('lasips2.c'))
diff --git a/hw/input/pckbd.c b/hw/input/pckbd.c
index 74f10b6..71f5f97 100644
--- a/hw/input/pckbd.c
+++ b/hw/input/pckbd.c
@@ -34,8 +34,8 @@
#include "hw/irq.h"
#include "hw/input/i8042.h"
#include "hw/qdev-properties.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
+#include "system/reset.h"
+#include "system/runstate.h"
#include "trace.h"
@@ -735,10 +735,9 @@ static void i8042_mmio_init(Object *obj)
"ps2-mouse-input-irq", 1);
}
-static Property i8042_mmio_properties[] = {
+static const Property i8042_mmio_properties[] = {
DEFINE_PROP_UINT64("mask", MMIOKBDState, kbd.mask, UINT64_MAX),
DEFINE_PROP_UINT32("size", MMIOKBDState, size, -1),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_kbd_mmio = {
@@ -751,12 +750,12 @@ static const VMStateDescription vmstate_kbd_mmio = {
}
};
-static void i8042_mmio_class_init(ObjectClass *klass, void *data)
+static void i8042_mmio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = i8042_mmio_realize;
- dc->reset = i8042_mmio_reset;
+ device_class_set_legacy_reset(dc, i8042_mmio_reset);
dc->vmsd = &vmstate_kbd_mmio;
device_class_set_props(dc, i8042_mmio_properties);
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
@@ -933,21 +932,20 @@ static void i8042_build_aml(AcpiDevAmlIf *adev, Aml *scope)
aml_append(scope, mou);
}
-static Property i8042_properties[] = {
+static const Property i8042_properties[] = {
DEFINE_PROP_BOOL("extended-state", ISAKBDState, kbd.extended_state, true),
DEFINE_PROP_BOOL("kbd-throttle", ISAKBDState, kbd_throttle, false),
DEFINE_PROP_UINT8("kbd-irq", ISAKBDState, kbd_irq, 1),
DEFINE_PROP_UINT8("mouse-irq", ISAKBDState, mouse_irq, 12),
- DEFINE_PROP_END_OF_LIST(),
};
-static void i8042_class_initfn(ObjectClass *klass, void *data)
+static void i8042_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass);
device_class_set_props(dc, i8042_properties);
- dc->reset = i8042_reset;
+ device_class_set_legacy_reset(dc, i8042_reset);
dc->realize = i8042_realizefn;
dc->vmsd = &vmstate_kbd_isa;
adevc->build_dev_aml = i8042_build_aml;
@@ -960,7 +958,7 @@ static const TypeInfo i8042_info = {
.instance_size = sizeof(ISAKBDState),
.instance_init = i8042_initfn,
.class_init = i8042_class_initfn,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_ACPI_DEV_AML_IF },
{ },
},
diff --git a/hw/input/pl050.c b/hw/input/pl050.c
index 6519e26..c5f4a3f 100644
--- a/hw/input/pl050.c
+++ b/hw/input/pl050.c
@@ -203,7 +203,7 @@ static void pl050_mouse_init(Object *obj)
object_initialize_child(obj, "mouse", &s->mouse, TYPE_PS2_MOUSE_DEVICE);
}
-static void pl050_kbd_class_init(ObjectClass *oc, void *data)
+static void pl050_kbd_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PL050DeviceClass *pdc = PL050_CLASS(oc);
@@ -220,7 +220,7 @@ static const TypeInfo pl050_kbd_info = {
.class_init = pl050_kbd_class_init,
};
-static void pl050_mouse_class_init(ObjectClass *oc, void *data)
+static void pl050_mouse_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PL050DeviceClass *pdc = PL050_CLASS(oc);
@@ -249,7 +249,7 @@ static void pl050_init(Object *obj)
qdev_init_gpio_in_named(DEVICE(obj), pl050_set_irq, "ps2-input-irq", 1);
}
-static void pl050_class_init(ObjectClass *oc, void *data)
+static void pl050_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/input/ps2.c b/hw/input/ps2.c
index d6f8344..7f7b1fc 100644
--- a/hw/input/ps2.c
+++ b/hw/input/ps2.c
@@ -30,8 +30,8 @@
#include "migration/vmstate.h"
#include "ui/console.h"
#include "ui/input.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
+#include "system/reset.h"
+#include "system/runstate.h"
#include "qapi/error.h"
#include "trace.h"
@@ -1254,7 +1254,7 @@ static void ps2_mouse_realize(DeviceState *dev, Error **errp)
qemu_input_handler_register(dev, &ps2_mouse_handler);
}
-static void ps2_kbd_class_init(ObjectClass *klass, void *data)
+static void ps2_kbd_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -1273,7 +1273,7 @@ static const TypeInfo ps2_kbd_info = {
.class_init = ps2_kbd_class_init
};
-static void ps2_mouse_class_init(ObjectClass *klass, void *data)
+static void ps2_mouse_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -1299,7 +1299,7 @@ static void ps2_init(Object *obj)
qdev_init_gpio_out(DEVICE(obj), &s->irq, 1);
}
-static void ps2_class_init(ObjectClass *klass, void *data)
+static void ps2_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/input/pxa2xx_keypad.c b/hw/input/pxa2xx_keypad.c
deleted file mode 100644
index 3858648..0000000
--- a/hw/input/pxa2xx_keypad.c
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * Intel PXA27X Keypad Controller emulation.
- *
- * Copyright (c) 2007 MontaVista Software, Inc
- * Written by Armin Kuster <akuster@kama-aina.net>
- * or <Akuster@mvista.com>
- *
- * This code is licensed under the GPLv2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/log.h"
-#include "hw/irq.h"
-#include "migration/vmstate.h"
-#include "hw/arm/pxa.h"
-#include "ui/console.h"
-
-/*
- * Keypad
- */
-#define KPC 0x00 /* Keypad Interface Control register */
-#define KPDK 0x08 /* Keypad Interface Direct Key register */
-#define KPREC 0x10 /* Keypad Interface Rotary Encoder register */
-#define KPMK 0x18 /* Keypad Interface Matrix Key register */
-#define KPAS 0x20 /* Keypad Interface Automatic Scan register */
-#define KPASMKP0 0x28 /* Keypad Interface Automatic Scan Multiple
- Key Presser register 0 */
-#define KPASMKP1 0x30 /* Keypad Interface Automatic Scan Multiple
- Key Presser register 1 */
-#define KPASMKP2 0x38 /* Keypad Interface Automatic Scan Multiple
- Key Presser register 2 */
-#define KPASMKP3 0x40 /* Keypad Interface Automatic Scan Multiple
- Key Presser register 3 */
-#define KPKDI 0x48 /* Keypad Interface Key Debounce Interval
- register */
-
-/* Keypad defines */
-#define KPC_AS (0x1 << 30) /* Automatic Scan bit */
-#define KPC_ASACT (0x1 << 29) /* Automatic Scan on Activity */
-#define KPC_MI (0x1 << 22) /* Matrix interrupt bit */
-#define KPC_IMKP (0x1 << 21) /* Ignore Multiple Key Press */
-#define KPC_MS7 (0x1 << 20) /* Matrix scan line 7 */
-#define KPC_MS6 (0x1 << 19) /* Matrix scan line 6 */
-#define KPC_MS5 (0x1 << 18) /* Matrix scan line 5 */
-#define KPC_MS4 (0x1 << 17) /* Matrix scan line 4 */
-#define KPC_MS3 (0x1 << 16) /* Matrix scan line 3 */
-#define KPC_MS2 (0x1 << 15) /* Matrix scan line 2 */
-#define KPC_MS1 (0x1 << 14) /* Matrix scan line 1 */
-#define KPC_MS0 (0x1 << 13) /* Matrix scan line 0 */
-#define KPC_ME (0x1 << 12) /* Matrix Keypad Enable */
-#define KPC_MIE (0x1 << 11) /* Matrix Interrupt Enable */
-#define KPC_DK_DEB_SEL (0x1 << 9) /* Direct Keypad Debounce Select */
-#define KPC_DI (0x1 << 5) /* Direct key interrupt bit */
-#define KPC_RE_ZERO_DEB (0x1 << 4) /* Rotary Encoder Zero Debounce */
-#define KPC_REE1 (0x1 << 3) /* Rotary Encoder1 Enable */
-#define KPC_REE0 (0x1 << 2) /* Rotary Encoder0 Enable */
-#define KPC_DE (0x1 << 1) /* Direct Keypad Enable */
-#define KPC_DIE (0x1 << 0) /* Direct Keypad interrupt Enable */
-
-#define KPDK_DKP (0x1 << 31)
-#define KPDK_DK7 (0x1 << 7)
-#define KPDK_DK6 (0x1 << 6)
-#define KPDK_DK5 (0x1 << 5)
-#define KPDK_DK4 (0x1 << 4)
-#define KPDK_DK3 (0x1 << 3)
-#define KPDK_DK2 (0x1 << 2)
-#define KPDK_DK1 (0x1 << 1)
-#define KPDK_DK0 (0x1 << 0)
-
-#define KPREC_OF1 (0x1 << 31)
-#define KPREC_UF1 (0x1 << 30)
-#define KPREC_OF0 (0x1 << 15)
-#define KPREC_UF0 (0x1 << 14)
-
-#define KPMK_MKP (0x1 << 31)
-#define KPAS_SO (0x1 << 31)
-#define KPASMKPx_SO (0x1 << 31)
-
-
-#define KPASMKPx_MKC(row, col) (1 << (row + 16 * (col % 2)))
-
-#define PXAKBD_MAXROW 8
-#define PXAKBD_MAXCOL 8
-
-struct PXA2xxKeyPadState {
- MemoryRegion iomem;
- qemu_irq irq;
- const struct keymap *map;
- int pressed_cnt;
- int alt_code;
-
- uint32_t kpc;
- uint32_t kpdk;
- uint32_t kprec;
- uint32_t kpmk;
- uint32_t kpas;
- uint32_t kpasmkp[4];
- uint32_t kpkdi;
-};
-
-static void pxa27x_keypad_find_pressed_key(PXA2xxKeyPadState *kp, int *row, int *col)
-{
- int i;
- for (i = 0; i < 4; i++)
- {
- *col = i * 2;
- for (*row = 0; *row < 8; (*row)++) {
- if (kp->kpasmkp[i] & (1 << *row))
- return;
- }
- *col = i * 2 + 1;
- for (*row = 0; *row < 8; (*row)++) {
- if (kp->kpasmkp[i] & (1 << (*row + 16)))
- return;
- }
- }
-}
-
-static void pxa27x_keyboard_event (PXA2xxKeyPadState *kp, int keycode)
-{
- int row, col, rel, assert_irq = 0;
- uint32_t val;
-
- if (keycode == 0xe0) {
- kp->alt_code = 1;
- return;
- }
-
- if(!(kp->kpc & KPC_ME)) /* skip if not enabled */
- return;
-
- rel = (keycode & 0x80) ? 1 : 0; /* key release from qemu */
- keycode &= ~0x80; /* strip qemu key release bit */
- if (kp->alt_code) {
- keycode |= 0x80;
- kp->alt_code = 0;
- }
-
- row = kp->map[keycode].row;
- col = kp->map[keycode].column;
- if (row == -1 || col == -1) {
- return;
- }
-
- val = KPASMKPx_MKC(row, col);
- if (rel) {
- if (kp->kpasmkp[col / 2] & val) {
- kp->kpasmkp[col / 2] &= ~val;
- kp->pressed_cnt--;
- assert_irq = 1;
- }
- } else {
- if (!(kp->kpasmkp[col / 2] & val)) {
- kp->kpasmkp[col / 2] |= val;
- kp->pressed_cnt++;
- assert_irq = 1;
- }
- }
- kp->kpas = ((kp->pressed_cnt & 0x1f) << 26) | (0xf << 4) | 0xf;
- if (kp->pressed_cnt == 1) {
- kp->kpas &= ~((0xf << 4) | 0xf);
- if (rel) {
- pxa27x_keypad_find_pressed_key(kp, &row, &col);
- }
- kp->kpas |= ((row & 0xf) << 4) | (col & 0xf);
- }
-
- if (!(kp->kpc & (KPC_AS | KPC_ASACT)))
- assert_irq = 0;
-
- if (assert_irq && (kp->kpc & KPC_MIE)) {
- kp->kpc |= KPC_MI;
- qemu_irq_raise(kp->irq);
- }
-}
-
-static uint64_t pxa2xx_keypad_read(void *opaque, hwaddr offset,
- unsigned size)
-{
- PXA2xxKeyPadState *s = (PXA2xxKeyPadState *) opaque;
- uint32_t tmp;
-
- switch (offset) {
- case KPC:
- tmp = s->kpc;
- if(tmp & KPC_MI)
- s->kpc &= ~(KPC_MI);
- if(tmp & KPC_DI)
- s->kpc &= ~(KPC_DI);
- qemu_irq_lower(s->irq);
- return tmp;
- case KPDK:
- return s->kpdk;
- case KPREC:
- tmp = s->kprec;
- if(tmp & KPREC_OF1)
- s->kprec &= ~(KPREC_OF1);
- if(tmp & KPREC_UF1)
- s->kprec &= ~(KPREC_UF1);
- if(tmp & KPREC_OF0)
- s->kprec &= ~(KPREC_OF0);
- if(tmp & KPREC_UF0)
- s->kprec &= ~(KPREC_UF0);
- return tmp;
- case KPMK:
- tmp = s->kpmk;
- if(tmp & KPMK_MKP)
- s->kpmk &= ~(KPMK_MKP);
- return tmp;
- case KPAS:
- return s->kpas;
- case KPASMKP0:
- return s->kpasmkp[0];
- case KPASMKP1:
- return s->kpasmkp[1];
- case KPASMKP2:
- return s->kpasmkp[2];
- case KPASMKP3:
- return s->kpasmkp[3];
- case KPKDI:
- return s->kpkdi;
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad read offset 0x%"HWADDR_PRIx"\n",
- __func__, offset);
- }
-
- return 0;
-}
-
-static void pxa2xx_keypad_write(void *opaque, hwaddr offset,
- uint64_t value, unsigned size)
-{
- PXA2xxKeyPadState *s = (PXA2xxKeyPadState *) opaque;
-
- switch (offset) {
- case KPC:
- s->kpc = value;
- if (s->kpc & KPC_AS) {
- s->kpc &= ~(KPC_AS);
- }
- break;
- case KPDK:
- s->kpdk = value;
- break;
- case KPREC:
- s->kprec = value;
- break;
- case KPMK:
- s->kpmk = value;
- break;
- case KPAS:
- s->kpas = value;
- break;
- case KPASMKP0:
- s->kpasmkp[0] = value;
- break;
- case KPASMKP1:
- s->kpasmkp[1] = value;
- break;
- case KPASMKP2:
- s->kpasmkp[2] = value;
- break;
- case KPASMKP3:
- s->kpasmkp[3] = value;
- break;
- case KPKDI:
- s->kpkdi = value;
- break;
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Bad write offset 0x%"HWADDR_PRIx"\n",
- __func__, offset);
- }
-}
-
-static const MemoryRegionOps pxa2xx_keypad_ops = {
- .read = pxa2xx_keypad_read,
- .write = pxa2xx_keypad_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static const VMStateDescription vmstate_pxa2xx_keypad = {
- .name = "pxa2xx_keypad",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32(kpc, PXA2xxKeyPadState),
- VMSTATE_UINT32(kpdk, PXA2xxKeyPadState),
- VMSTATE_UINT32(kprec, PXA2xxKeyPadState),
- VMSTATE_UINT32(kpmk, PXA2xxKeyPadState),
- VMSTATE_UINT32(kpas, PXA2xxKeyPadState),
- VMSTATE_UINT32_ARRAY(kpasmkp, PXA2xxKeyPadState, 4),
- VMSTATE_UINT32(kpkdi, PXA2xxKeyPadState),
- VMSTATE_END_OF_LIST()
- }
-};
-
-PXA2xxKeyPadState *pxa27x_keypad_init(MemoryRegion *sysmem,
- hwaddr base,
- qemu_irq irq)
-{
- PXA2xxKeyPadState *s;
-
- s = g_new0(PXA2xxKeyPadState, 1);
- s->irq = irq;
-
- memory_region_init_io(&s->iomem, NULL, &pxa2xx_keypad_ops, s,
- "pxa2xx-keypad", 0x00100000);
- memory_region_add_subregion(sysmem, base, &s->iomem);
-
- vmstate_register(NULL, 0, &vmstate_pxa2xx_keypad, s);
-
- return s;
-}
-
-void pxa27x_register_keypad(PXA2xxKeyPadState *kp,
- const struct keymap *map, int size)
-{
- if(!map || size < 0x80) {
- fprintf(stderr, "%s - No PXA keypad map defined\n", __func__);
- exit(-1);
- }
-
- kp->map = map;
- qemu_add_kbd_event_handler((QEMUPutKBDEvent *) pxa27x_keyboard_event, kp);
-}
diff --git a/hw/input/stellaris_gamepad.c b/hw/input/stellaris_gamepad.c
index 17ee42b..fec1161 100644
--- a/hw/input/stellaris_gamepad.c
+++ b/hw/input/stellaris_gamepad.c
@@ -77,13 +77,12 @@ static void stellaris_gamepad_reset_enter(Object *obj, ResetType type)
memset(s->pressed, 0, s->num_buttons * sizeof(uint8_t));
}
-static Property stellaris_gamepad_properties[] = {
+static const Property stellaris_gamepad_properties[] = {
DEFINE_PROP_ARRAY("keycodes", StellarisGamepad, num_buttons,
keycodes, qdev_prop_uint32, uint32_t),
- DEFINE_PROP_END_OF_LIST(),
};
-static void stellaris_gamepad_class_init(ObjectClass *klass, void *data)
+static void stellaris_gamepad_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/input/trace-events b/hw/input/trace-events
index 29001a8..1484625 100644
--- a/hw/input/trace-events
+++ b/hw/input/trace-events
@@ -46,9 +46,6 @@ ps2_mouse_reset(void *opaque) "%p"
hid_kbd_queue_full(void) "queue full"
hid_kbd_queue_empty(void) "queue empty"
-# tsc2005.c
-tsc2005_sense(const char *state) "touchscreen sense %s"
-
# virtio-input.c
virtio_input_queue_full(void) "queue full"
diff --git a/hw/input/tsc2005.c b/hw/input/tsc2005.c
deleted file mode 100644
index 54a15d2..0000000
--- a/hw/input/tsc2005.c
+++ /dev/null
@@ -1,571 +0,0 @@
-/*
- * TI TSC2005 emulator.
- *
- * Copyright (c) 2006 Andrzej Zaborowski <balrog@zabor.org>
- * Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/log.h"
-#include "qemu/timer.h"
-#include "sysemu/reset.h"
-#include "ui/console.h"
-#include "hw/input/tsc2xxx.h"
-#include "hw/irq.h"
-#include "migration/vmstate.h"
-#include "trace.h"
-
-#define TSC_CUT_RESOLUTION(value, p) ((value) >> (16 - (p ? 12 : 10)))
-
-typedef struct {
- qemu_irq pint; /* Combination of the nPENIRQ and DAV signals */
- QEMUTimer *timer;
- uint16_t model;
-
- int32_t x, y;
- bool pressure;
-
- uint8_t reg, state;
- bool irq, command;
- uint16_t data, dav;
-
- bool busy;
- bool enabled;
- bool host_mode;
- int8_t function;
- int8_t nextfunction;
- bool precision;
- bool nextprecision;
- uint16_t filter;
- uint8_t pin_func;
- uint16_t timing[2];
- uint8_t noise;
- bool reset;
- bool pdst;
- bool pnd0;
- uint16_t temp_thr[2];
- uint16_t aux_thr[2];
-
- int32_t tr[8];
-} TSC2005State;
-
-enum {
- TSC_MODE_XYZ_SCAN = 0x0,
- TSC_MODE_XY_SCAN,
- TSC_MODE_X,
- TSC_MODE_Y,
- TSC_MODE_Z,
- TSC_MODE_AUX,
- TSC_MODE_TEMP1,
- TSC_MODE_TEMP2,
- TSC_MODE_AUX_SCAN,
- TSC_MODE_X_TEST,
- TSC_MODE_Y_TEST,
- TSC_MODE_TS_TEST,
- TSC_MODE_RESERVED,
- TSC_MODE_XX_DRV,
- TSC_MODE_YY_DRV,
- TSC_MODE_YX_DRV,
-};
-
-static const uint16_t mode_regs[16] = {
- 0xf000, /* X, Y, Z scan */
- 0xc000, /* X, Y scan */
- 0x8000, /* X */
- 0x4000, /* Y */
- 0x3000, /* Z */
- 0x0800, /* AUX */
- 0x0400, /* TEMP1 */
- 0x0200, /* TEMP2 */
- 0x0800, /* AUX scan */
- 0x0040, /* X test */
- 0x0020, /* Y test */
- 0x0080, /* Short-circuit test */
- 0x0000, /* Reserved */
- 0x0000, /* X+, X- drivers */
- 0x0000, /* Y+, Y- drivers */
- 0x0000, /* Y+, X- drivers */
-};
-
-#define X_TRANSFORM(s) \
- ((s->y * s->tr[0] - s->x * s->tr[1]) / s->tr[2] + s->tr[3])
-#define Y_TRANSFORM(s) \
- ((s->y * s->tr[4] - s->x * s->tr[5]) / s->tr[6] + s->tr[7])
-#define Z1_TRANSFORM(s) \
- ((400 - ((s)->x >> 7) + ((s)->pressure << 10)) << 4)
-#define Z2_TRANSFORM(s) \
- ((4000 + ((s)->y >> 7) - ((s)->pressure << 10)) << 4)
-
-#define AUX_VAL (700 << 4) /* +/- 3 at 12-bit */
-#define TEMP1_VAL (1264 << 4) /* +/- 5 at 12-bit */
-#define TEMP2_VAL (1531 << 4) /* +/- 5 at 12-bit */
-
-static uint16_t tsc2005_read(TSC2005State *s, int reg)
-{
- uint16_t ret;
-
- switch (reg) {
- case 0x0: /* X */
- s->dav &= ~mode_regs[TSC_MODE_X];
- return TSC_CUT_RESOLUTION(X_TRANSFORM(s), s->precision) +
- (s->noise & 3);
- case 0x1: /* Y */
- s->dav &= ~mode_regs[TSC_MODE_Y];
- s->noise++;
- return TSC_CUT_RESOLUTION(Y_TRANSFORM(s), s->precision) ^
- (s->noise & 3);
- case 0x2: /* Z1 */
- s->dav &= 0xdfff;
- return TSC_CUT_RESOLUTION(Z1_TRANSFORM(s), s->precision) -
- (s->noise & 3);
- case 0x3: /* Z2 */
- s->dav &= 0xefff;
- return TSC_CUT_RESOLUTION(Z2_TRANSFORM(s), s->precision) |
- (s->noise & 3);
-
- case 0x4: /* AUX */
- s->dav &= ~mode_regs[TSC_MODE_AUX];
- return TSC_CUT_RESOLUTION(AUX_VAL, s->precision);
-
- case 0x5: /* TEMP1 */
- s->dav &= ~mode_regs[TSC_MODE_TEMP1];
- return TSC_CUT_RESOLUTION(TEMP1_VAL, s->precision) -
- (s->noise & 5);
- case 0x6: /* TEMP2 */
- s->dav &= 0xdfff;
- s->dav &= ~mode_regs[TSC_MODE_TEMP2];
- return TSC_CUT_RESOLUTION(TEMP2_VAL, s->precision) ^
- (s->noise & 3);
-
- case 0x7: /* Status */
- ret = s->dav | (s->reset << 7) | (s->pdst << 2) | 0x0;
- s->dav &= ~(mode_regs[TSC_MODE_X_TEST] | mode_regs[TSC_MODE_Y_TEST] |
- mode_regs[TSC_MODE_TS_TEST]);
- s->reset = true;
- return ret;
-
- case 0x8: /* AUX high threshold */
- return s->aux_thr[1];
- case 0x9: /* AUX low threshold */
- return s->aux_thr[0];
-
- case 0xa: /* TEMP high threshold */
- return s->temp_thr[1];
- case 0xb: /* TEMP low threshold */
- return s->temp_thr[0];
-
- case 0xc: /* CFR0 */
- return (s->pressure << 15) | ((!s->busy) << 14) |
- (s->nextprecision << 13) | s->timing[0];
- case 0xd: /* CFR1 */
- return s->timing[1];
- case 0xe: /* CFR2 */
- return (s->pin_func << 14) | s->filter;
-
- case 0xf: /* Function select status */
- return s->function >= 0 ? 1 << s->function : 0;
- }
-
- /* Never gets here */
- return 0xffff;
-}
-
-static void tsc2005_write(TSC2005State *s, int reg, uint16_t data)
-{
- switch (reg) {
- case 0x8: /* AUX high threshold */
- s->aux_thr[1] = data;
- break;
- case 0x9: /* AUX low threshold */
- s->aux_thr[0] = data;
- break;
-
- case 0xa: /* TEMP high threshold */
- s->temp_thr[1] = data;
- break;
- case 0xb: /* TEMP low threshold */
- s->temp_thr[0] = data;
- break;
-
- case 0xc: /* CFR0 */
- s->host_mode = (data >> 15) != 0;
- if (s->enabled != !(data & 0x4000)) {
- s->enabled = !(data & 0x4000);
- trace_tsc2005_sense(s->enabled ? "enabled" : "disabled");
- if (s->busy && !s->enabled) {
- timer_del(s->timer);
- }
- s->busy = s->busy && s->enabled;
- }
- s->nextprecision = (data >> 13) & 1;
- s->timing[0] = data & 0x1fff;
- if ((s->timing[0] >> 11) == 3) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "tsc2005_write: illegal conversion clock setting\n");
- }
- break;
- case 0xd: /* CFR1 */
- s->timing[1] = data & 0xf07;
- break;
- case 0xe: /* CFR2 */
- s->pin_func = (data >> 14) & 3;
- s->filter = data & 0x3fff;
- break;
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: write into read-only register 0x%x\n",
- __func__, reg);
- }
-}
-
-/* This handles most of the chip's logic. */
-static void tsc2005_pin_update(TSC2005State *s)
-{
- int64_t expires;
- bool pin_state;
-
- switch (s->pin_func) {
- case 0:
- pin_state = !s->pressure && !!s->dav;
- break;
- case 1:
- case 3:
- default:
- pin_state = !s->dav;
- break;
- case 2:
- pin_state = !s->pressure;
- }
-
- if (pin_state != s->irq) {
- s->irq = pin_state;
- qemu_set_irq(s->pint, s->irq);
- }
-
- switch (s->nextfunction) {
- case TSC_MODE_XYZ_SCAN:
- case TSC_MODE_XY_SCAN:
- if (!s->host_mode && s->dav) {
- s->enabled = false;
- }
- if (!s->pressure) {
- return;
- }
- /* Fall through */
- case TSC_MODE_AUX_SCAN:
- break;
-
- case TSC_MODE_X:
- case TSC_MODE_Y:
- case TSC_MODE_Z:
- if (!s->pressure) {
- return;
- }
- /* Fall through */
- case TSC_MODE_AUX:
- case TSC_MODE_TEMP1:
- case TSC_MODE_TEMP2:
- case TSC_MODE_X_TEST:
- case TSC_MODE_Y_TEST:
- case TSC_MODE_TS_TEST:
- if (s->dav) {
- s->enabled = false;
- }
- break;
-
- case TSC_MODE_RESERVED:
- case TSC_MODE_XX_DRV:
- case TSC_MODE_YY_DRV:
- case TSC_MODE_YX_DRV:
- default:
- return;
- }
-
- if (!s->enabled || s->busy) {
- return;
- }
-
- s->busy = true;
- s->precision = s->nextprecision;
- s->function = s->nextfunction;
- s->pdst = !s->pnd0; /* Synchronised on internal clock */
- expires = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- (NANOSECONDS_PER_SECOND >> 7);
- timer_mod(s->timer, expires);
-}
-
-static void tsc2005_reset(TSC2005State *s)
-{
- s->state = 0;
- s->pin_func = 0;
- s->enabled = false;
- s->busy = false;
- s->nextprecision = false;
- s->nextfunction = 0;
- s->timing[0] = 0;
- s->timing[1] = 0;
- s->irq = false;
- s->dav = 0;
- s->reset = false;
- s->pdst = true;
- s->pnd0 = false;
- s->function = -1;
- s->temp_thr[0] = 0x000;
- s->temp_thr[1] = 0xfff;
- s->aux_thr[0] = 0x000;
- s->aux_thr[1] = 0xfff;
-
- tsc2005_pin_update(s);
-}
-
-static uint8_t tsc2005_txrx_word(void *opaque, uint8_t value)
-{
- TSC2005State *s = opaque;
- uint32_t ret = 0;
-
- switch (s->state++) {
- case 0:
- if (value & 0x80) {
- /* Command */
- if (value & (1 << 1))
- tsc2005_reset(s);
- else {
- s->nextfunction = (value >> 3) & 0xf;
- s->nextprecision = (value >> 2) & 1;
- if (s->enabled != !(value & 1)) {
- s->enabled = !(value & 1);
- trace_tsc2005_sense(s->enabled ? "enabled" : "disabled");
- if (s->busy && !s->enabled) {
- timer_del(s->timer);
- }
- s->busy = s->busy && s->enabled;
- }
- tsc2005_pin_update(s);
- }
-
- s->state = 0;
- } else if (value) {
- /* Data transfer */
- s->reg = (value >> 3) & 0xf;
- s->pnd0 = (value >> 1) & 1;
- s->command = value & 1;
-
- if (s->command) {
- /* Read */
- s->data = tsc2005_read(s, s->reg);
- tsc2005_pin_update(s);
- } else
- s->data = 0;
- } else
- s->state = 0;
- break;
-
- case 1:
- if (s->command) {
- ret = (s->data >> 8) & 0xff;
- } else {
- s->data |= value << 8;
- }
- break;
-
- case 2:
- if (s->command)
- ret = s->data & 0xff;
- else {
- s->data |= value;
- tsc2005_write(s, s->reg, s->data);
- tsc2005_pin_update(s);
- }
-
- s->state = 0;
- break;
- }
-
- return ret;
-}
-
-uint32_t tsc2005_txrx(void *opaque, uint32_t value, int len)
-{
- uint32_t ret = 0;
-
- len &= ~7;
- while (len > 0) {
- len -= 8;
- ret |= tsc2005_txrx_word(opaque, (value >> len) & 0xff) << len;
- }
-
- return ret;
-}
-
-static void tsc2005_timer_tick(void *opaque)
-{
- TSC2005State *s = opaque;
- unsigned int function = s->function;
-
- assert(function < ARRAY_SIZE(mode_regs));
-
- /* Timer ticked -- a set of conversions has been finished. */
-
- if (!s->busy) {
- return;
- }
-
- s->busy = false;
- s->dav |= mode_regs[function];
- s->function = -1;
- tsc2005_pin_update(s);
-}
-
-static void tsc2005_touchscreen_event(void *opaque,
- int x, int y, int z, int buttons_state)
-{
- TSC2005State *s = opaque;
- int p = s->pressure;
-
- if (buttons_state) {
- s->x = x;
- s->y = y;
- }
- s->pressure = !!buttons_state;
-
- /*
- * Note: We would get better responsiveness in the guest by
- * signaling TS events immediately, but for now we simulate
- * the first conversion delay for sake of correctness.
- */
- if (p != s->pressure) {
- tsc2005_pin_update(s);
- }
-}
-
-static int tsc2005_post_load(void *opaque, int version_id)
-{
- TSC2005State *s = (TSC2005State *) opaque;
-
- s->busy = timer_pending(s->timer);
- tsc2005_pin_update(s);
-
- return 0;
-}
-
-static const VMStateDescription vmstate_tsc2005 = {
- .name = "tsc2005",
- .version_id = 2,
- .minimum_version_id = 2,
- .post_load = tsc2005_post_load,
- .fields = (const VMStateField []) {
- VMSTATE_BOOL(pressure, TSC2005State),
- VMSTATE_BOOL(irq, TSC2005State),
- VMSTATE_BOOL(command, TSC2005State),
- VMSTATE_BOOL(enabled, TSC2005State),
- VMSTATE_BOOL(host_mode, TSC2005State),
- VMSTATE_BOOL(reset, TSC2005State),
- VMSTATE_BOOL(pdst, TSC2005State),
- VMSTATE_BOOL(pnd0, TSC2005State),
- VMSTATE_BOOL(precision, TSC2005State),
- VMSTATE_BOOL(nextprecision, TSC2005State),
- VMSTATE_UINT8(reg, TSC2005State),
- VMSTATE_UINT8(state, TSC2005State),
- VMSTATE_UINT16(data, TSC2005State),
- VMSTATE_UINT16(dav, TSC2005State),
- VMSTATE_UINT16(filter, TSC2005State),
- VMSTATE_INT8(nextfunction, TSC2005State),
- VMSTATE_INT8(function, TSC2005State),
- VMSTATE_INT32(x, TSC2005State),
- VMSTATE_INT32(y, TSC2005State),
- VMSTATE_TIMER_PTR(timer, TSC2005State),
- VMSTATE_UINT8(pin_func, TSC2005State),
- VMSTATE_UINT16_ARRAY(timing, TSC2005State, 2),
- VMSTATE_UINT8(noise, TSC2005State),
- VMSTATE_UINT16_ARRAY(temp_thr, TSC2005State, 2),
- VMSTATE_UINT16_ARRAY(aux_thr, TSC2005State, 2),
- VMSTATE_INT32_ARRAY(tr, TSC2005State, 8),
- VMSTATE_END_OF_LIST()
- }
-};
-
-void *tsc2005_init(qemu_irq pintdav)
-{
- TSC2005State *s;
-
- s = g_new0(TSC2005State, 1);
- s->x = 400;
- s->y = 240;
- s->pressure = false;
- s->precision = s->nextprecision = false;
- s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc2005_timer_tick, s);
- s->pint = pintdav;
- s->model = 0x2005;
-
- s->tr[0] = 0;
- s->tr[1] = 1;
- s->tr[2] = 1;
- s->tr[3] = 0;
- s->tr[4] = 1;
- s->tr[5] = 0;
- s->tr[6] = 1;
- s->tr[7] = 0;
-
- tsc2005_reset(s);
-
- qemu_add_mouse_event_handler(tsc2005_touchscreen_event, s, 1,
- "QEMU TSC2005-driven Touchscreen");
-
- qemu_register_reset((void *) tsc2005_reset, s);
- vmstate_register(NULL, 0, &vmstate_tsc2005, s);
-
- return s;
-}
-
-/*
- * Use tslib generated calibration data to generate ADC input values
- * from the touchscreen. Assuming 12-bit precision was used during
- * tslib calibration.
- */
-void tsc2005_set_transform(void *opaque, const MouseTransformInfo *info)
-{
- TSC2005State *s = (TSC2005State *) opaque;
-
- /* This version assumes touchscreen X & Y axis are parallel or
- * perpendicular to LCD's X & Y axis in some way. */
- if (abs(info->a[0]) > abs(info->a[1])) {
- s->tr[0] = 0;
- s->tr[1] = -info->a[6] * info->x;
- s->tr[2] = info->a[0];
- s->tr[3] = -info->a[2] / info->a[0];
- s->tr[4] = info->a[6] * info->y;
- s->tr[5] = 0;
- s->tr[6] = info->a[4];
- s->tr[7] = -info->a[5] / info->a[4];
- } else {
- s->tr[0] = info->a[6] * info->y;
- s->tr[1] = 0;
- s->tr[2] = info->a[1];
- s->tr[3] = -info->a[2] / info->a[1];
- s->tr[4] = 0;
- s->tr[5] = -info->a[6] * info->x;
- s->tr[6] = info->a[3];
- s->tr[7] = -info->a[5] / info->a[3];
- }
-
- s->tr[0] >>= 11;
- s->tr[1] >>= 11;
- s->tr[3] <<= 4;
- s->tr[4] >>= 11;
- s->tr[5] >>= 11;
- s->tr[7] <<= 4;
-}
diff --git a/hw/input/tsc210x.c b/hw/input/tsc210x.c
deleted file mode 100644
index c4e32c7..0000000
--- a/hw/input/tsc210x.c
+++ /dev/null
@@ -1,1241 +0,0 @@
-/*
- * TI TSC2102 (touchscreen/sensors/audio controller) emulator.
- * TI TSC2301 (touchscreen/sensors/keypad).
- *
- * Copyright (c) 2006 Andrzej Zaborowski <balrog@zabor.org>
- * Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "hw/hw.h"
-#include "audio/audio.h"
-#include "qemu/timer.h"
-#include "qemu/log.h"
-#include "sysemu/reset.h"
-#include "ui/console.h"
-#include "hw/arm/omap.h" /* For I2SCodec */
-#include "hw/boards.h" /* for current_machine */
-#include "hw/input/tsc2xxx.h"
-#include "hw/irq.h"
-#include "migration/vmstate.h"
-#include "qapi/error.h"
-
-#define TSC_DATA_REGISTERS_PAGE 0x0
-#define TSC_CONTROL_REGISTERS_PAGE 0x1
-#define TSC_AUDIO_REGISTERS_PAGE 0x2
-
-#define TSC_VERBOSE
-
-#define TSC_CUT_RESOLUTION(value, p) ((value) >> (16 - resolution[p]))
-
-typedef struct {
- qemu_irq pint;
- qemu_irq kbint;
- qemu_irq davint;
- QEMUTimer *timer;
- QEMUSoundCard card;
- uWireSlave chip;
- I2SCodec codec;
- uint8_t in_fifo[16384];
- uint8_t out_fifo[16384];
- uint16_t model;
-
- int32_t x, y;
- bool pressure;
-
- uint8_t page, offset;
- uint16_t dav;
-
- bool state;
- bool irq;
- bool command;
- bool busy;
- bool enabled;
- bool host_mode;
- uint8_t function, nextfunction;
- uint8_t precision, nextprecision;
- uint8_t filter;
- uint8_t pin_func;
- uint8_t ref;
- uint8_t timing;
- uint8_t noise;
-
- uint16_t audio_ctrl1;
- uint16_t audio_ctrl2;
- uint16_t audio_ctrl3;
- uint16_t pll[3];
- uint16_t volume;
- int64_t volume_change;
- bool softstep;
- uint16_t dac_power;
- int64_t powerdown;
- uint16_t filter_data[0x14];
-
- const char *name;
- SWVoiceIn *adc_voice[1];
- SWVoiceOut *dac_voice[1];
- int i2s_rx_rate;
- int i2s_tx_rate;
-
- int tr[8];
-
- struct {
- uint16_t down;
- uint16_t mask;
- int scan;
- int debounce;
- int mode;
- int intr;
- } kb;
- int64_t now; /* Time at migration */
-} TSC210xState;
-
-static const int resolution[4] = { 12, 8, 10, 12 };
-
-#define TSC_MODE_NO_SCAN 0x0
-#define TSC_MODE_XY_SCAN 0x1
-#define TSC_MODE_XYZ_SCAN 0x2
-#define TSC_MODE_X 0x3
-#define TSC_MODE_Y 0x4
-#define TSC_MODE_Z 0x5
-#define TSC_MODE_BAT1 0x6
-#define TSC_MODE_BAT2 0x7
-#define TSC_MODE_AUX 0x8
-#define TSC_MODE_AUX_SCAN 0x9
-#define TSC_MODE_TEMP1 0xa
-#define TSC_MODE_PORT_SCAN 0xb
-#define TSC_MODE_TEMP2 0xc
-#define TSC_MODE_XX_DRV 0xd
-#define TSC_MODE_YY_DRV 0xe
-#define TSC_MODE_YX_DRV 0xf
-
-static const uint16_t mode_regs[16] = {
- 0x0000, /* No scan */
- 0x0600, /* X, Y scan */
- 0x0780, /* X, Y, Z scan */
- 0x0400, /* X */
- 0x0200, /* Y */
- 0x0180, /* Z */
- 0x0040, /* BAT1 */
- 0x0030, /* BAT2 */
- 0x0010, /* AUX */
- 0x0010, /* AUX scan */
- 0x0004, /* TEMP1 */
- 0x0070, /* Port scan */
- 0x0002, /* TEMP2 */
- 0x0000, /* X+, X- drivers */
- 0x0000, /* Y+, Y- drivers */
- 0x0000, /* Y+, X- drivers */
-};
-
-#define X_TRANSFORM(s) \
- ((s->y * s->tr[0] - s->x * s->tr[1]) / s->tr[2] + s->tr[3])
-#define Y_TRANSFORM(s) \
- ((s->y * s->tr[4] - s->x * s->tr[5]) / s->tr[6] + s->tr[7])
-#define Z1_TRANSFORM(s) \
- ((400 - ((s)->x >> 7) + ((s)->pressure << 10)) << 4)
-#define Z2_TRANSFORM(s) \
- ((4000 + ((s)->y >> 7) - ((s)->pressure << 10)) << 4)
-
-#define BAT1_VAL 0x8660
-#define BAT2_VAL 0x0000
-#define AUX1_VAL 0x35c0
-#define AUX2_VAL 0xffff
-#define TEMP1_VAL 0x8c70
-#define TEMP2_VAL 0xa5b0
-
-#define TSC_POWEROFF_DELAY 50
-#define TSC_SOFTSTEP_DELAY 50
-
-static void tsc210x_reset(TSC210xState *s)
-{
- s->state = false;
- s->pin_func = 2;
- s->enabled = false;
- s->busy = false;
- s->nextfunction = 0;
- s->ref = 0;
- s->timing = 0;
- s->irq = false;
- s->dav = 0;
-
- s->audio_ctrl1 = 0x0000;
- s->audio_ctrl2 = 0x4410;
- s->audio_ctrl3 = 0x0000;
- s->pll[0] = 0x1004;
- s->pll[1] = 0x0000;
- s->pll[2] = 0x1fff;
- s->volume = 0xffff;
- s->dac_power = 0x8540;
- s->softstep = true;
- s->volume_change = 0;
- s->powerdown = 0;
- s->filter_data[0x00] = 0x6be3;
- s->filter_data[0x01] = 0x9666;
- s->filter_data[0x02] = 0x675d;
- s->filter_data[0x03] = 0x6be3;
- s->filter_data[0x04] = 0x9666;
- s->filter_data[0x05] = 0x675d;
- s->filter_data[0x06] = 0x7d83;
- s->filter_data[0x07] = 0x84ee;
- s->filter_data[0x08] = 0x7d83;
- s->filter_data[0x09] = 0x84ee;
- s->filter_data[0x0a] = 0x6be3;
- s->filter_data[0x0b] = 0x9666;
- s->filter_data[0x0c] = 0x675d;
- s->filter_data[0x0d] = 0x6be3;
- s->filter_data[0x0e] = 0x9666;
- s->filter_data[0x0f] = 0x675d;
- s->filter_data[0x10] = 0x7d83;
- s->filter_data[0x11] = 0x84ee;
- s->filter_data[0x12] = 0x7d83;
- s->filter_data[0x13] = 0x84ee;
-
- s->i2s_tx_rate = 0;
- s->i2s_rx_rate = 0;
-
- s->kb.scan = 1;
- s->kb.debounce = 0;
- s->kb.mask = 0x0000;
- s->kb.mode = 3;
- s->kb.intr = 0;
-
- qemu_set_irq(s->pint, !s->irq);
- qemu_set_irq(s->davint, !s->dav);
- qemu_irq_raise(s->kbint);
-}
-
-typedef struct {
- int rate;
- int dsor;
- int fsref;
-} TSC210xRateInfo;
-
-/* { rate, dsor, fsref } */
-static const TSC210xRateInfo tsc2102_rates[] = {
- /* Fsref / 6.0 */
- { 7350, 63, 1 },
- { 8000, 63, 0 },
- /* Fsref / 6.0 */
- { 7350, 54, 1 },
- { 8000, 54, 0 },
- /* Fsref / 5.0 */
- { 8820, 45, 1 },
- { 9600, 45, 0 },
- /* Fsref / 4.0 */
- { 11025, 36, 1 },
- { 12000, 36, 0 },
- /* Fsref / 3.0 */
- { 14700, 27, 1 },
- { 16000, 27, 0 },
- /* Fsref / 2.0 */
- { 22050, 18, 1 },
- { 24000, 18, 0 },
- /* Fsref / 1.5 */
- { 29400, 9, 1 },
- { 32000, 9, 0 },
- /* Fsref */
- { 44100, 0, 1 },
- { 48000, 0, 0 },
-
- { 0, 0, 0 },
-};
-
-static inline void tsc210x_out_flush(TSC210xState *s, int len)
-{
- uint8_t *data = s->codec.out.fifo + s->codec.out.start;
- uint8_t *end = data + len;
-
- while (data < end)
- data += AUD_write(s->dac_voice[0], data, end - data) ?: (end - data);
-
- s->codec.out.len -= len;
- if (s->codec.out.len)
- memmove(s->codec.out.fifo, end, s->codec.out.len);
- s->codec.out.start = 0;
-}
-
-static void tsc210x_audio_out_cb(TSC210xState *s, int free_b)
-{
- if (s->codec.out.len >= free_b) {
- tsc210x_out_flush(s, free_b);
- return;
- }
-
- s->codec.out.size = MIN(free_b, 16384);
- qemu_irq_raise(s->codec.tx_start);
-}
-
-static void tsc2102_audio_rate_update(TSC210xState *s)
-{
- const TSC210xRateInfo *rate;
-
- s->codec.tx_rate = 0;
- s->codec.rx_rate = 0;
- if (s->dac_power & (1 << 15)) /* PWDNC */
- return;
-
- for (rate = tsc2102_rates; rate->rate; rate ++)
- if (rate->dsor == (s->audio_ctrl1 & 0x3f) && /* DACFS */
- rate->fsref == ((s->audio_ctrl3 >> 13) & 1))/* REFFS */
- break;
- if (!rate->rate) {
- printf("%s: unknown sampling rate configured\n", __func__);
- return;
- }
-
- s->codec.tx_rate = rate->rate;
-}
-
-static void tsc2102_audio_output_update(TSC210xState *s)
-{
- int enable;
- struct audsettings fmt;
-
- if (s->dac_voice[0]) {
- tsc210x_out_flush(s, s->codec.out.len);
- s->codec.out.size = 0;
- AUD_set_active_out(s->dac_voice[0], 0);
- AUD_close_out(&s->card, s->dac_voice[0]);
- s->dac_voice[0] = NULL;
- }
- s->codec.cts = 0;
-
- enable =
- (~s->dac_power & (1 << 15)) && /* PWDNC */
- (~s->dac_power & (1 << 10)); /* DAPWDN */
- if (!enable || !s->codec.tx_rate)
- return;
-
- /* Force our own sampling rate even in slave DAC mode */
- fmt.endianness = 0;
- fmt.nchannels = 2;
- fmt.freq = s->codec.tx_rate;
- fmt.fmt = AUDIO_FORMAT_S16;
-
- s->dac_voice[0] = AUD_open_out(&s->card, s->dac_voice[0],
- "tsc2102.sink", s, (void *) tsc210x_audio_out_cb, &fmt);
- if (s->dac_voice[0]) {
- s->codec.cts = 1;
- AUD_set_active_out(s->dac_voice[0], 1);
- }
-}
-
-static uint16_t tsc2102_data_register_read(TSC210xState *s, int reg)
-{
- switch (reg) {
- case 0x00: /* X */
- s->dav &= 0xfbff;
- return TSC_CUT_RESOLUTION(X_TRANSFORM(s), s->precision) +
- (s->noise & 3);
-
- case 0x01: /* Y */
- s->noise ++;
- s->dav &= 0xfdff;
- return TSC_CUT_RESOLUTION(Y_TRANSFORM(s), s->precision) ^
- (s->noise & 3);
-
- case 0x02: /* Z1 */
- s->dav &= 0xfeff;
- return TSC_CUT_RESOLUTION(Z1_TRANSFORM(s), s->precision) -
- (s->noise & 3);
-
- case 0x03: /* Z2 */
- s->dav &= 0xff7f;
- return TSC_CUT_RESOLUTION(Z2_TRANSFORM(s), s->precision) |
- (s->noise & 3);
-
- case 0x04: /* KPData */
- if ((s->model & 0xff00) == 0x2300) {
- if (s->kb.intr && (s->kb.mode & 2)) {
- s->kb.intr = 0;
- qemu_irq_raise(s->kbint);
- }
- return s->kb.down;
- }
-
- return 0xffff;
-
- case 0x05: /* BAT1 */
- s->dav &= 0xffbf;
- return TSC_CUT_RESOLUTION(BAT1_VAL, s->precision) +
- (s->noise & 6);
-
- case 0x06: /* BAT2 */
- s->dav &= 0xffdf;
- return TSC_CUT_RESOLUTION(BAT2_VAL, s->precision);
-
- case 0x07: /* AUX1 */
- s->dav &= 0xffef;
- return TSC_CUT_RESOLUTION(AUX1_VAL, s->precision);
-
- case 0x08: /* AUX2 */
- s->dav &= 0xfff7;
- return 0xffff;
-
- case 0x09: /* TEMP1 */
- s->dav &= 0xfffb;
- return TSC_CUT_RESOLUTION(TEMP1_VAL, s->precision) -
- (s->noise & 5);
-
- case 0x0a: /* TEMP2 */
- s->dav &= 0xfffd;
- return TSC_CUT_RESOLUTION(TEMP2_VAL, s->precision) ^
- (s->noise & 3);
-
- case 0x0b: /* DAC */
- s->dav &= 0xfffe;
- return 0xffff;
-
- default:
-#ifdef TSC_VERBOSE
- fprintf(stderr, "tsc2102_data_register_read: "
- "no such register: 0x%02x\n", reg);
-#endif
- return 0xffff;
- }
-}
-
-static uint16_t tsc2102_control_register_read(
- TSC210xState *s, int reg)
-{
- switch (reg) {
- case 0x00: /* TSC ADC */
- return (s->pressure << 15) | ((!s->busy) << 14) |
- (s->nextfunction << 10) | (s->nextprecision << 8) | s->filter;
-
- case 0x01: /* Status / Keypad Control */
- if ((s->model & 0xff00) == 0x2100)
- return (s->pin_func << 14) | ((!s->enabled) << 13) |
- (s->host_mode << 12) | ((!!s->dav) << 11) | s->dav;
- else
- return (s->kb.intr << 15) | ((s->kb.scan || !s->kb.down) << 14) |
- (s->kb.debounce << 11);
-
- case 0x02: /* DAC Control */
- if ((s->model & 0xff00) == 0x2300)
- return s->dac_power & 0x8000;
- else
- goto bad_reg;
-
- case 0x03: /* Reference */
- return s->ref;
-
- case 0x04: /* Reset */
- return 0xffff;
-
- case 0x05: /* Configuration */
- return s->timing;
-
- case 0x06: /* Secondary configuration */
- if ((s->model & 0xff00) == 0x2100)
- goto bad_reg;
- return ((!s->dav) << 15) | ((s->kb.mode & 1) << 14) | s->pll[2];
-
- case 0x10: /* Keypad Mask */
- if ((s->model & 0xff00) == 0x2100)
- goto bad_reg;
- return s->kb.mask;
-
- default:
- bad_reg:
-#ifdef TSC_VERBOSE
- fprintf(stderr, "tsc2102_control_register_read: "
- "no such register: 0x%02x\n", reg);
-#endif
- return 0xffff;
- }
-}
-
-static uint16_t tsc2102_audio_register_read(TSC210xState *s, int reg)
-{
- int l_ch, r_ch;
- uint16_t val;
-
- switch (reg) {
- case 0x00: /* Audio Control 1 */
- return s->audio_ctrl1;
-
- case 0x01:
- return 0xff00;
-
- case 0x02: /* DAC Volume Control */
- return s->volume;
-
- case 0x03:
- return 0x8b00;
-
- case 0x04: /* Audio Control 2 */
- l_ch = 1;
- r_ch = 1;
- if (s->softstep && !(s->dac_power & (1 << 10))) {
- l_ch = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >
- s->volume_change + TSC_SOFTSTEP_DELAY);
- r_ch = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >
- s->volume_change + TSC_SOFTSTEP_DELAY);
- }
-
- return s->audio_ctrl2 | (l_ch << 3) | (r_ch << 2);
-
- case 0x05: /* Stereo DAC Power Control */
- return 0x2aa0 | s->dac_power |
- (((s->dac_power & (1 << 10)) &&
- (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >
- s->powerdown + TSC_POWEROFF_DELAY)) << 6);
-
- case 0x06: /* Audio Control 3 */
- val = s->audio_ctrl3 | 0x0001;
- s->audio_ctrl3 &= 0xff3f;
- return val;
-
- case 0x07: /* LCH_BASS_BOOST_N0 */
- case 0x08: /* LCH_BASS_BOOST_N1 */
- case 0x09: /* LCH_BASS_BOOST_N2 */
- case 0x0a: /* LCH_BASS_BOOST_N3 */
- case 0x0b: /* LCH_BASS_BOOST_N4 */
- case 0x0c: /* LCH_BASS_BOOST_N5 */
- case 0x0d: /* LCH_BASS_BOOST_D1 */
- case 0x0e: /* LCH_BASS_BOOST_D2 */
- case 0x0f: /* LCH_BASS_BOOST_D4 */
- case 0x10: /* LCH_BASS_BOOST_D5 */
- case 0x11: /* RCH_BASS_BOOST_N0 */
- case 0x12: /* RCH_BASS_BOOST_N1 */
- case 0x13: /* RCH_BASS_BOOST_N2 */
- case 0x14: /* RCH_BASS_BOOST_N3 */
- case 0x15: /* RCH_BASS_BOOST_N4 */
- case 0x16: /* RCH_BASS_BOOST_N5 */
- case 0x17: /* RCH_BASS_BOOST_D1 */
- case 0x18: /* RCH_BASS_BOOST_D2 */
- case 0x19: /* RCH_BASS_BOOST_D4 */
- case 0x1a: /* RCH_BASS_BOOST_D5 */
- return s->filter_data[reg - 0x07];
-
- case 0x1b: /* PLL Programmability 1 */
- return s->pll[0];
-
- case 0x1c: /* PLL Programmability 2 */
- return s->pll[1];
-
- case 0x1d: /* Audio Control 4 */
- return (!s->softstep) << 14;
-
- default:
-#ifdef TSC_VERBOSE
- fprintf(stderr, "tsc2102_audio_register_read: "
- "no such register: 0x%02x\n", reg);
-#endif
- return 0xffff;
- }
-}
-
-static void tsc2102_data_register_write(
- TSC210xState *s, int reg, uint16_t value)
-{
- switch (reg) {
- case 0x00: /* X */
- case 0x01: /* Y */
- case 0x02: /* Z1 */
- case 0x03: /* Z2 */
- case 0x05: /* BAT1 */
- case 0x06: /* BAT2 */
- case 0x07: /* AUX1 */
- case 0x08: /* AUX2 */
- case 0x09: /* TEMP1 */
- case 0x0a: /* TEMP2 */
- return;
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR, "tsc2102_data_register_write: "
- "no such register: 0x%02x\n", reg);
- }
-}
-
-static void tsc2102_control_register_write(
- TSC210xState *s, int reg, uint16_t value)
-{
- switch (reg) {
- case 0x00: /* TSC ADC */
- s->host_mode = value >> 15;
- s->enabled = !(value & 0x4000);
- if (s->busy && !s->enabled)
- timer_del(s->timer);
- s->busy = s->busy && s->enabled;
- s->nextfunction = (value >> 10) & 0xf;
- s->nextprecision = (value >> 8) & 3;
- s->filter = value & 0xff;
- return;
-
- case 0x01: /* Status / Keypad Control */
- if ((s->model & 0xff00) == 0x2100)
- s->pin_func = value >> 14;
- else {
- s->kb.scan = (value >> 14) & 1;
- s->kb.debounce = (value >> 11) & 7;
- if (s->kb.intr && s->kb.scan) {
- s->kb.intr = 0;
- qemu_irq_raise(s->kbint);
- }
- }
- return;
-
- case 0x02: /* DAC Control */
- if ((s->model & 0xff00) == 0x2300) {
- s->dac_power &= 0x7fff;
- s->dac_power |= 0x8000 & value;
- } else
- goto bad_reg;
- break;
-
- case 0x03: /* Reference */
- s->ref = value & 0x1f;
- return;
-
- case 0x04: /* Reset */
- if (value == 0xbb00) {
- if (s->busy)
- timer_del(s->timer);
- tsc210x_reset(s);
-#ifdef TSC_VERBOSE
- } else {
- fprintf(stderr, "tsc2102_control_register_write: "
- "wrong value written into RESET\n");
-#endif
- }
- return;
-
- case 0x05: /* Configuration */
- s->timing = value & 0x3f;
-#ifdef TSC_VERBOSE
- if (value & ~0x3f)
- fprintf(stderr, "tsc2102_control_register_write: "
- "wrong value written into CONFIG\n");
-#endif
- return;
-
- case 0x06: /* Secondary configuration */
- if ((s->model & 0xff00) == 0x2100)
- goto bad_reg;
- s->kb.mode = value >> 14;
- s->pll[2] = value & 0x3ffff;
- return;
-
- case 0x10: /* Keypad Mask */
- if ((s->model & 0xff00) == 0x2100)
- goto bad_reg;
- s->kb.mask = value;
- return;
-
- default:
- bad_reg:
- qemu_log_mask(LOG_GUEST_ERROR, "tsc2102_control_register_write: "
- "no such register: 0x%02x\n", reg);
- }
-}
-
-static void tsc2102_audio_register_write(
- TSC210xState *s, int reg, uint16_t value)
-{
- switch (reg) {
- case 0x00: /* Audio Control 1 */
- s->audio_ctrl1 = value & 0x0f3f;
-#ifdef TSC_VERBOSE
- if ((value & ~0x0f3f) || ((value & 7) != ((value >> 3) & 7)))
- fprintf(stderr, "tsc2102_audio_register_write: "
- "wrong value written into Audio 1\n");
-#endif
- tsc2102_audio_rate_update(s);
- tsc2102_audio_output_update(s);
- return;
-
- case 0x01:
-#ifdef TSC_VERBOSE
- if (value != 0xff00)
- fprintf(stderr, "tsc2102_audio_register_write: "
- "wrong value written into reg 0x01\n");
-#endif
- return;
-
- case 0x02: /* DAC Volume Control */
- s->volume = value;
- s->volume_change = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- return;
-
- case 0x03:
-#ifdef TSC_VERBOSE
- if (value != 0x8b00)
- fprintf(stderr, "tsc2102_audio_register_write: "
- "wrong value written into reg 0x03\n");
-#endif
- return;
-
- case 0x04: /* Audio Control 2 */
- s->audio_ctrl2 = value & 0xf7f2;
-#ifdef TSC_VERBOSE
- if (value & ~0xf7fd)
- fprintf(stderr, "tsc2102_audio_register_write: "
- "wrong value written into Audio 2\n");
-#endif
- return;
-
- case 0x05: /* Stereo DAC Power Control */
- if ((value & ~s->dac_power) & (1 << 10))
- s->powerdown = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
-
- s->dac_power = value & 0x9543;
-#ifdef TSC_VERBOSE
- if ((value & ~0x9543) != 0x2aa0)
- fprintf(stderr, "tsc2102_audio_register_write: "
- "wrong value written into Power\n");
-#endif
- tsc2102_audio_rate_update(s);
- tsc2102_audio_output_update(s);
- return;
-
- case 0x06: /* Audio Control 3 */
- s->audio_ctrl3 &= 0x00c0;
- s->audio_ctrl3 |= value & 0xf800;
-#ifdef TSC_VERBOSE
- if (value & ~0xf8c7)
- fprintf(stderr, "tsc2102_audio_register_write: "
- "wrong value written into Audio 3\n");
-#endif
- tsc2102_audio_output_update(s);
- return;
-
- case 0x07: /* LCH_BASS_BOOST_N0 */
- case 0x08: /* LCH_BASS_BOOST_N1 */
- case 0x09: /* LCH_BASS_BOOST_N2 */
- case 0x0a: /* LCH_BASS_BOOST_N3 */
- case 0x0b: /* LCH_BASS_BOOST_N4 */
- case 0x0c: /* LCH_BASS_BOOST_N5 */
- case 0x0d: /* LCH_BASS_BOOST_D1 */
- case 0x0e: /* LCH_BASS_BOOST_D2 */
- case 0x0f: /* LCH_BASS_BOOST_D4 */
- case 0x10: /* LCH_BASS_BOOST_D5 */
- case 0x11: /* RCH_BASS_BOOST_N0 */
- case 0x12: /* RCH_BASS_BOOST_N1 */
- case 0x13: /* RCH_BASS_BOOST_N2 */
- case 0x14: /* RCH_BASS_BOOST_N3 */
- case 0x15: /* RCH_BASS_BOOST_N4 */
- case 0x16: /* RCH_BASS_BOOST_N5 */
- case 0x17: /* RCH_BASS_BOOST_D1 */
- case 0x18: /* RCH_BASS_BOOST_D2 */
- case 0x19: /* RCH_BASS_BOOST_D4 */
- case 0x1a: /* RCH_BASS_BOOST_D5 */
- s->filter_data[reg - 0x07] = value;
- return;
-
- case 0x1b: /* PLL Programmability 1 */
- s->pll[0] = value & 0xfffc;
-#ifdef TSC_VERBOSE
- if (value & ~0xfffc)
- fprintf(stderr, "tsc2102_audio_register_write: "
- "wrong value written into PLL 1\n");
-#endif
- return;
-
- case 0x1c: /* PLL Programmability 2 */
- s->pll[1] = value & 0xfffc;
-#ifdef TSC_VERBOSE
- if (value & ~0xfffc)
- fprintf(stderr, "tsc2102_audio_register_write: "
- "wrong value written into PLL 2\n");
-#endif
- return;
-
- case 0x1d: /* Audio Control 4 */
- s->softstep = !(value & 0x4000);
-#ifdef TSC_VERBOSE
- if (value & ~0x4000)
- fprintf(stderr, "tsc2102_audio_register_write: "
- "wrong value written into Audio 4\n");
-#endif
- return;
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR, "tsc2102_audio_register_write: "
- "no such register: 0x%02x\n", reg);
- }
-}
-
-/* This handles most of the chip logic. */
-static void tsc210x_pin_update(TSC210xState *s)
-{
- int64_t expires;
- bool pin_state;
-
- switch (s->pin_func) {
- case 0:
- pin_state = s->pressure;
- break;
- case 1:
- pin_state = !!s->dav;
- break;
- case 2:
- default:
- pin_state = s->pressure && !s->dav;
- }
-
- if (!s->enabled)
- pin_state = false;
-
- if (pin_state != s->irq) {
- s->irq = pin_state;
- qemu_set_irq(s->pint, !s->irq);
- }
-
- switch (s->nextfunction) {
- case TSC_MODE_XY_SCAN:
- case TSC_MODE_XYZ_SCAN:
- if (!s->pressure)
- return;
- break;
-
- case TSC_MODE_X:
- case TSC_MODE_Y:
- case TSC_MODE_Z:
- if (!s->pressure)
- return;
- /* Fall through */
- case TSC_MODE_BAT1:
- case TSC_MODE_BAT2:
- case TSC_MODE_AUX:
- case TSC_MODE_TEMP1:
- case TSC_MODE_TEMP2:
- if (s->dav)
- s->enabled = false;
- break;
-
- case TSC_MODE_AUX_SCAN:
- case TSC_MODE_PORT_SCAN:
- break;
-
- case TSC_MODE_NO_SCAN:
- case TSC_MODE_XX_DRV:
- case TSC_MODE_YY_DRV:
- case TSC_MODE_YX_DRV:
- default:
- return;
- }
-
- if (!s->enabled || s->busy || s->dav)
- return;
-
- s->busy = true;
- s->precision = s->nextprecision;
- s->function = s->nextfunction;
- expires = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- (NANOSECONDS_PER_SECOND >> 10);
- timer_mod(s->timer, expires);
-}
-
-static uint16_t tsc210x_read(TSC210xState *s)
-{
- uint16_t ret = 0x0000;
-
- if (!s->command)
- fprintf(stderr, "tsc210x_read: SPI underrun!\n");
-
- switch (s->page) {
- case TSC_DATA_REGISTERS_PAGE:
- ret = tsc2102_data_register_read(s, s->offset);
- if (!s->dav)
- qemu_irq_raise(s->davint);
- break;
- case TSC_CONTROL_REGISTERS_PAGE:
- ret = tsc2102_control_register_read(s, s->offset);
- break;
- case TSC_AUDIO_REGISTERS_PAGE:
- ret = tsc2102_audio_register_read(s, s->offset);
- break;
- default:
- hw_error("tsc210x_read: wrong memory page\n");
- }
-
- tsc210x_pin_update(s);
-
- /* Allow sequential reads. */
- s->offset ++;
- s->state = false;
- return ret;
-}
-
-static void tsc210x_write(TSC210xState *s, uint16_t value)
-{
- /*
- * This is a two-state state machine for reading
- * command and data every second time.
- */
- if (!s->state) {
- s->command = (value >> 15) != 0;
- s->page = (value >> 11) & 0x0f;
- s->offset = (value >> 5) & 0x3f;
- s->state = true;
- } else {
- if (s->command)
- fprintf(stderr, "tsc210x_write: SPI overrun!\n");
- else
- switch (s->page) {
- case TSC_DATA_REGISTERS_PAGE:
- tsc2102_data_register_write(s, s->offset, value);
- break;
- case TSC_CONTROL_REGISTERS_PAGE:
- tsc2102_control_register_write(s, s->offset, value);
- break;
- case TSC_AUDIO_REGISTERS_PAGE:
- tsc2102_audio_register_write(s, s->offset, value);
- break;
- default:
- hw_error("tsc210x_write: wrong memory page\n");
- }
-
- tsc210x_pin_update(s);
- s->state = false;
- }
-}
-
-uint32_t tsc210x_txrx(void *opaque, uint32_t value, int len)
-{
- TSC210xState *s = opaque;
- uint32_t ret = 0;
-
- if (len != 16) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: bad SPI word width %i\n", __func__, len);
- return 0;
- }
-
- /* TODO: sequential reads etc - how do we make sure the host doesn't
- * unintentionally read out a conversion result from a register while
- * transmitting the command word of the next command? */
- if (!value || (s->state && s->command))
- ret = tsc210x_read(s);
- if (value || (s->state && !s->command))
- tsc210x_write(s, value);
-
- return ret;
-}
-
-static void tsc210x_timer_tick(void *opaque)
-{
- TSC210xState *s = opaque;
-
- /* Timer ticked -- a set of conversions has been finished. */
-
- if (!s->busy)
- return;
-
- s->busy = false;
- s->dav |= mode_regs[s->function];
- tsc210x_pin_update(s);
- qemu_irq_lower(s->davint);
-}
-
-static void tsc210x_touchscreen_event(void *opaque,
- int x, int y, int z, int buttons_state)
-{
- TSC210xState *s = opaque;
- int p = s->pressure;
-
- if (buttons_state) {
- s->x = x;
- s->y = y;
- }
- s->pressure = !!buttons_state;
-
- /*
- * Note: We would get better responsiveness in the guest by
- * signaling TS events immediately, but for now we simulate
- * the first conversion delay for sake of correctness.
- */
- if (p != s->pressure)
- tsc210x_pin_update(s);
-}
-
-static void tsc210x_i2s_swallow(TSC210xState *s)
-{
- if (s->dac_voice[0])
- tsc210x_out_flush(s, s->codec.out.len);
- else
- s->codec.out.len = 0;
-}
-
-static void tsc210x_i2s_set_rate(TSC210xState *s, int in, int out)
-{
- s->i2s_tx_rate = out;
- s->i2s_rx_rate = in;
-}
-
-static int tsc210x_pre_save(void *opaque)
-{
- TSC210xState *s = (TSC210xState *) opaque;
- s->now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
-
- return 0;
-}
-
-static int tsc210x_post_load(void *opaque, int version_id)
-{
- TSC210xState *s = (TSC210xState *) opaque;
- int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
-
- if (s->function >= ARRAY_SIZE(mode_regs)) {
- return -EINVAL;
- }
- if (s->nextfunction >= ARRAY_SIZE(mode_regs)) {
- return -EINVAL;
- }
- if (s->precision >= ARRAY_SIZE(resolution)) {
- return -EINVAL;
- }
- if (s->nextprecision >= ARRAY_SIZE(resolution)) {
- return -EINVAL;
- }
-
- s->volume_change -= s->now;
- s->volume_change += now;
- s->powerdown -= s->now;
- s->powerdown += now;
-
- s->busy = timer_pending(s->timer);
- qemu_set_irq(s->pint, !s->irq);
- qemu_set_irq(s->davint, !s->dav);
-
- return 0;
-}
-
-static const VMStateField vmstatefields_tsc210x[] = {
- VMSTATE_BOOL(enabled, TSC210xState),
- VMSTATE_BOOL(host_mode, TSC210xState),
- VMSTATE_BOOL(irq, TSC210xState),
- VMSTATE_BOOL(command, TSC210xState),
- VMSTATE_BOOL(pressure, TSC210xState),
- VMSTATE_BOOL(softstep, TSC210xState),
- VMSTATE_BOOL(state, TSC210xState),
- VMSTATE_UINT16(dav, TSC210xState),
- VMSTATE_INT32(x, TSC210xState),
- VMSTATE_INT32(y, TSC210xState),
- VMSTATE_UINT8(offset, TSC210xState),
- VMSTATE_UINT8(page, TSC210xState),
- VMSTATE_UINT8(filter, TSC210xState),
- VMSTATE_UINT8(pin_func, TSC210xState),
- VMSTATE_UINT8(ref, TSC210xState),
- VMSTATE_UINT8(timing, TSC210xState),
- VMSTATE_UINT8(noise, TSC210xState),
- VMSTATE_UINT8(function, TSC210xState),
- VMSTATE_UINT8(nextfunction, TSC210xState),
- VMSTATE_UINT8(precision, TSC210xState),
- VMSTATE_UINT8(nextprecision, TSC210xState),
- VMSTATE_UINT16(audio_ctrl1, TSC210xState),
- VMSTATE_UINT16(audio_ctrl2, TSC210xState),
- VMSTATE_UINT16(audio_ctrl3, TSC210xState),
- VMSTATE_UINT16_ARRAY(pll, TSC210xState, 3),
- VMSTATE_UINT16(volume, TSC210xState),
- VMSTATE_UINT16(dac_power, TSC210xState),
- VMSTATE_INT64(volume_change, TSC210xState),
- VMSTATE_INT64(powerdown, TSC210xState),
- VMSTATE_INT64(now, TSC210xState),
- VMSTATE_UINT16_ARRAY(filter_data, TSC210xState, 0x14),
- VMSTATE_TIMER_PTR(timer, TSC210xState),
- VMSTATE_END_OF_LIST()
-};
-
-static const VMStateDescription vmstate_tsc2102 = {
- .name = "tsc2102",
- .version_id = 1,
- .minimum_version_id = 1,
- .pre_save = tsc210x_pre_save,
- .post_load = tsc210x_post_load,
- .fields = vmstatefields_tsc210x,
-};
-
-static const VMStateDescription vmstate_tsc2301 = {
- .name = "tsc2301",
- .version_id = 1,
- .minimum_version_id = 1,
- .pre_save = tsc210x_pre_save,
- .post_load = tsc210x_post_load,
- .fields = vmstatefields_tsc210x,
-};
-
-static void tsc210x_init(TSC210xState *s,
- const char *name,
- const VMStateDescription *vmsd)
-{
- s->tr[0] = 0;
- s->tr[1] = 1;
- s->tr[2] = 1;
- s->tr[3] = 0;
- s->tr[4] = 1;
- s->tr[5] = 0;
- s->tr[6] = 1;
- s->tr[7] = 0;
-
- s->chip.opaque = s;
- s->chip.send = (void *) tsc210x_write;
- s->chip.receive = (void *) tsc210x_read;
-
- s->codec.opaque = s;
- s->codec.tx_swallow = (void *) tsc210x_i2s_swallow;
- s->codec.set_rate = (void *) tsc210x_i2s_set_rate;
- s->codec.in.fifo = s->in_fifo;
- s->codec.out.fifo = s->out_fifo;
-
- tsc210x_reset(s);
-
- qemu_add_mouse_event_handler(tsc210x_touchscreen_event, s, 1, name);
-
- if (current_machine->audiodev) {
- s->card.name = g_strdup(current_machine->audiodev);
- s->card.state = audio_state_by_name(s->card.name, &error_fatal);
- }
- AUD_register_card(s->name, &s->card, &error_fatal);
-
- qemu_register_reset((void *) tsc210x_reset, s);
- vmstate_register(NULL, 0, vmsd, s);
-}
-
-uWireSlave *tsc2102_init(qemu_irq pint)
-{
- TSC210xState *s;
-
- s = g_new0(TSC210xState, 1);
- s->x = 160;
- s->y = 160;
- s->pressure = 0;
- s->precision = s->nextprecision = 0;
- s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc210x_timer_tick, s);
- s->pint = pint;
- s->model = 0x2102;
- s->name = "tsc2102";
-
- tsc210x_init(s, "QEMU TSC2102-driven Touchscreen", &vmstate_tsc2102);
-
- return &s->chip;
-}
-
-uWireSlave *tsc2301_init(qemu_irq penirq, qemu_irq kbirq, qemu_irq dav)
-{
- TSC210xState *s;
-
- s = g_new0(TSC210xState, 1);
- s->x = 400;
- s->y = 240;
- s->pressure = 0;
- s->precision = s->nextprecision = 0;
- s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc210x_timer_tick, s);
- s->pint = penirq;
- s->kbint = kbirq;
- s->davint = dav;
- s->model = 0x2301;
- s->name = "tsc2301";
-
- tsc210x_init(s, "QEMU TSC2301-driven Touchscreen", &vmstate_tsc2301);
-
- return &s->chip;
-}
-
-I2SCodec *tsc210x_codec(uWireSlave *chip)
-{
- TSC210xState *s = (TSC210xState *) chip->opaque;
-
- return &s->codec;
-}
-
-/*
- * Use tslib generated calibration data to generate ADC input values
- * from the touchscreen. Assuming 12-bit precision was used during
- * tslib calibration.
- */
-void tsc210x_set_transform(uWireSlave *chip, const MouseTransformInfo *info)
-{
- TSC210xState *s = (TSC210xState *) chip->opaque;
-#if 0
- int64_t ltr[8];
-
- ltr[0] = (int64_t) info->a[1] * info->y;
- ltr[1] = (int64_t) info->a[4] * info->x;
- ltr[2] = (int64_t) info->a[1] * info->a[3] -
- (int64_t) info->a[4] * info->a[0];
- ltr[3] = (int64_t) info->a[2] * info->a[4] -
- (int64_t) info->a[5] * info->a[1];
- ltr[4] = (int64_t) info->a[0] * info->y;
- ltr[5] = (int64_t) info->a[3] * info->x;
- ltr[6] = (int64_t) info->a[4] * info->a[0] -
- (int64_t) info->a[1] * info->a[3];
- ltr[7] = (int64_t) info->a[2] * info->a[3] -
- (int64_t) info->a[5] * info->a[0];
-
- /* Avoid integer overflow */
- s->tr[0] = ltr[0] >> 11;
- s->tr[1] = ltr[1] >> 11;
- s->tr[2] = muldiv64(ltr[2], 1, info->a[6]);
- s->tr[3] = muldiv64(ltr[3], 1 << 4, ltr[2]);
- s->tr[4] = ltr[4] >> 11;
- s->tr[5] = ltr[5] >> 11;
- s->tr[6] = muldiv64(ltr[6], 1, info->a[6]);
- s->tr[7] = muldiv64(ltr[7], 1 << 4, ltr[6]);
-#else
-
- /* This version assumes touchscreen X & Y axis are parallel or
- * perpendicular to LCD's X & Y axis in some way. */
- if (abs(info->a[0]) > abs(info->a[1])) {
- s->tr[0] = 0;
- s->tr[1] = -info->a[6] * info->x;
- s->tr[2] = info->a[0];
- s->tr[3] = -info->a[2] / info->a[0];
- s->tr[4] = info->a[6] * info->y;
- s->tr[5] = 0;
- s->tr[6] = info->a[4];
- s->tr[7] = -info->a[5] / info->a[4];
- } else {
- s->tr[0] = info->a[6] * info->y;
- s->tr[1] = 0;
- s->tr[2] = info->a[1];
- s->tr[3] = -info->a[2] / info->a[1];
- s->tr[4] = 0;
- s->tr[5] = -info->a[6] * info->x;
- s->tr[6] = info->a[3];
- s->tr[7] = -info->a[5] / info->a[3];
- }
-
- s->tr[0] >>= 11;
- s->tr[1] >>= 11;
- s->tr[3] <<= 4;
- s->tr[4] >>= 11;
- s->tr[5] >>= 11;
- s->tr[7] <<= 4;
-#endif
-}
-
-void tsc210x_key_event(uWireSlave *chip, int key, int down)
-{
- TSC210xState *s = (TSC210xState *) chip->opaque;
-
- if (down)
- s->kb.down |= 1 << key;
- else
- s->kb.down &= ~(1 << key);
-
- if (down && (s->kb.down & ~s->kb.mask) && !s->kb.intr) {
- s->kb.intr = 1;
- qemu_irq_lower(s->kbint);
- } else if (s->kb.intr && !(s->kb.down & ~s->kb.mask) &&
- !(s->kb.mode & 1)) {
- s->kb.intr = 0;
- qemu_irq_raise(s->kbint);
- }
-}
diff --git a/hw/input/virtio-input-hid.c b/hw/input/virtio-input-hid.c
index 45e4d4c..d986c3c 100644
--- a/hw/input/virtio-input-hid.c
+++ b/hw/input/virtio-input-hid.c
@@ -237,13 +237,12 @@ static void virtio_input_hid_handle_status(VirtIOInput *vinput,
}
}
-static Property virtio_input_hid_properties[] = {
+static const Property virtio_input_hid_properties[] = {
DEFINE_PROP_STRING("display", VirtIOInputHID, display),
DEFINE_PROP_UINT32("head", VirtIOInputHID, head, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_input_hid_class_init(ObjectClass *klass, void *data)
+static void virtio_input_hid_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOInputClass *vic = VIRTIO_INPUT_CLASS(klass);
@@ -380,12 +379,11 @@ static struct virtio_input_config virtio_mouse_config_v2[] = {
{ /* end of list */ },
};
-static Property virtio_mouse_properties[] = {
+static const Property virtio_mouse_properties[] = {
DEFINE_PROP_BOOL("wheel-axis", VirtIOInputHID, wheel_axis, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_mouse_class_init(ObjectClass *klass, void *data)
+static void virtio_mouse_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -505,12 +503,11 @@ static struct virtio_input_config virtio_tablet_config_v2[] = {
{ /* end of list */ },
};
-static Property virtio_tablet_properties[] = {
+static const Property virtio_tablet_properties[] = {
DEFINE_PROP_BOOL("wheel-axis", VirtIOInputHID, wheel_axis, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_tablet_class_init(ObjectClass *klass, void *data)
+static void virtio_tablet_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/input/virtio-input-host.c b/hw/input/virtio-input-host.c
index fea7139..bbfee9d 100644
--- a/hw/input/virtio-input-host.c
+++ b/hw/input/virtio-input-host.c
@@ -178,7 +178,6 @@ static void virtio_input_host_realize(DeviceState *dev, Error **errp)
err_close:
close(vih->fd);
vih->fd = -1;
- return;
}
static void virtio_input_host_unrealize(DeviceState *dev)
@@ -221,12 +220,11 @@ static const VMStateDescription vmstate_virtio_input_host = {
.unmigratable = 1,
};
-static Property virtio_input_host_properties[] = {
+static const Property virtio_input_host_properties[] = {
DEFINE_PROP_STRING("evdev", VirtIOInputHost, evdev),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_input_host_class_init(ObjectClass *klass, void *data)
+static void virtio_input_host_class_init(ObjectClass *klass, const void *data)
{
VirtIOInputClass *vic = VIRTIO_INPUT_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/input/virtio-input.c b/hw/input/virtio-input.c
index 3bcdae4..a3f554f 100644
--- a/hw/input/virtio-input.c
+++ b/hw/input/virtio-input.c
@@ -189,7 +189,7 @@ static uint64_t virtio_input_get_features(VirtIODevice *vdev, uint64_t f,
return f;
}
-static void virtio_input_set_status(VirtIODevice *vdev, uint8_t val)
+static int virtio_input_set_status(VirtIODevice *vdev, uint8_t val)
{
VirtIOInputClass *vic = VIRTIO_INPUT_GET_CLASS(vdev);
VirtIOInput *vinput = VIRTIO_INPUT(vdev);
@@ -202,6 +202,7 @@ static void virtio_input_set_status(VirtIODevice *vdev, uint8_t val)
}
}
}
+ return 0;
}
static void virtio_input_reset(VirtIODevice *vdev)
@@ -300,12 +301,11 @@ static const VMStateDescription vmstate_virtio_input = {
.post_load = virtio_input_post_load,
};
-static Property virtio_input_properties[] = {
+static const Property virtio_input_properties[] = {
DEFINE_PROP_STRING("serial", VirtIOInput, serial),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_input_class_init(ObjectClass *klass, void *data)
+static void virtio_input_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/intc/Kconfig b/hw/intc/Kconfig
index 58b6d3a..7547528 100644
--- a/hw/intc/Kconfig
+++ b/hw/intc/Kconfig
@@ -23,13 +23,13 @@ config APIC
config ARM_GIC
bool
- select ARM_GICV3_TCG if TCG
+ select ARM_GICV3 if TCG
select ARM_GIC_KVM if KVM
select MSI_NONBROKEN
-config ARM_GICV3_TCG
+config ARM_GICV3
bool
- depends on ARM_GIC && TCG
+ depends on ARM_GIC
config ARM_GIC_KVM
bool
@@ -87,8 +87,16 @@ config GOLDFISH_PIC
config M68K_IRQC
bool
+config LOONGSON_IPI_COMMON
+ bool
+
config LOONGSON_IPI
bool
+ select LOONGSON_IPI_COMMON
+
+config LOONGARCH_IPI
+ bool
+ select LOONGSON_IPI_COMMON
config LOONGARCH_PCH_PIC
bool
diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c
index cea559c..0409734 100644
--- a/hw/intc/allwinner-a10-pic.c
+++ b/hw/intc/allwinner-a10-pic.c
@@ -135,7 +135,7 @@ static void aw_a10_pic_write(void *opaque, hwaddr offset, uint64_t value,
static const MemoryRegionOps aw_a10_pic_ops = {
.read = aw_a10_pic_read,
.write = aw_a10_pic_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
};
static const VMStateDescription vmstate_aw_a10_pic = {
@@ -187,11 +187,11 @@ static void aw_a10_pic_reset(DeviceState *d)
}
}
-static void aw_a10_pic_class_init(ObjectClass *klass, void *data)
+static void aw_a10_pic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = aw_a10_pic_reset;
+ device_class_set_legacy_reset(dc, aw_a10_pic_reset);
dc->desc = "allwinner a10 pic";
dc->vmsd = &vmstate_aw_a10_pic;
}
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index 4186c57..bcb1035 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -26,7 +26,7 @@
#include "hw/intc/kvm_irqcount.h"
#include "hw/pci/msi.h"
#include "qemu/host-utils.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "trace.h"
#include "hw/i386/apic-msidef.h"
#include "qapi/error.h"
@@ -350,9 +350,8 @@ static int apic_set_base(APICCommonState *s, uint64_t val)
return -1;
}
- s->apicbase = (val & 0xfffff000) |
+ s->apicbase = (val & MSR_IA32_APICBASE_BASE) |
(s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE));
- /* if disabled, cannot be enabled again */
if (!(val & MSR_IA32_APICBASE_ENABLE)) {
s->apicbase &= ~MSR_IA32_APICBASE_ENABLE;
cpu_clear_apic_feature(&s->cpu->env);
@@ -1177,7 +1176,7 @@ static void apic_unrealize(DeviceState *dev)
local_apics[s->initial_apic_id] = NULL;
}
-static void apic_class_init(ObjectClass *klass, void *data)
+static void apic_class_init(ObjectClass *klass, const void *data)
{
APICCommonClass *k = APIC_COMMON_CLASS(klass);
diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c
index c13cdd7..37a7a70 100644
--- a/hw/intc/apic_common.c
+++ b/hw/intc/apic_common.c
@@ -28,7 +28,7 @@
#include "hw/intc/kvm_irqcount.h"
#include "trace.h"
#include "hw/boards.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
@@ -408,13 +408,12 @@ static const VMStateDescription vmstate_apic_common = {
}
};
-static Property apic_properties_common[] = {
+static const Property apic_properties_common[] = {
DEFINE_PROP_UINT8("version", APICCommonState, version, 0x14),
DEFINE_PROP_BIT("vapic", APICCommonState, vapic_control, VAPIC_ENABLE_BIT,
true),
DEFINE_PROP_BOOL("legacy-instance-id", APICCommonState, legacy_instance_id,
false),
- DEFINE_PROP_END_OF_LIST(),
};
static void apic_common_get_id(Object *obj, Visitor *v, const char *name,
@@ -467,11 +466,11 @@ static void apic_common_initfn(Object *obj)
apic_common_set_id, NULL, NULL);
}
-static void apic_common_class_init(ObjectClass *klass, void *data)
+static void apic_common_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = apic_reset_common;
+ device_class_set_legacy_reset(dc, apic_reset_common);
device_class_set_props(dc, apic_properties_common);
dc->realize = apic_common_realize;
dc->unrealize = apic_common_unrealize;
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 8068324..899f133 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -27,8 +27,8 @@
#include "qemu/log.h"
#include "qemu/module.h"
#include "trace.h"
-#include "sysemu/kvm.h"
-#include "sysemu/qtest.h"
+#include "system/kvm.h"
+#include "system/qtest.h"
/* #define DEBUG_GIC */
@@ -59,7 +59,7 @@ static const uint8_t gic_id_gicv2[] = {
static inline int gic_get_current_cpu(GICState *s)
{
if (!qtest_enabled() && s->num_cpu > 1) {
- return current_cpu->cpu_index;
+ return current_cpu->cpu_index - s->first_cpu_index;
}
return 0;
}
@@ -1263,9 +1263,14 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
trace_gic_enable_irq(irq + i);
}
GIC_DIST_SET_ENABLED(irq + i, cm);
- /* If a raised level triggered IRQ enabled then mark
- is as pending. */
- if (GIC_DIST_TEST_LEVEL(irq + i, mask)
+ /*
+ * If a raised level triggered IRQ enabled then mark
+ * it as pending on 11MPCore. For other GIC revisions we
+ * handle the "level triggered and line asserted" check
+ * at the other end in gic_test_pending().
+ */
+ if (s->revision == REV_11MPCORE
+ && GIC_DIST_TEST_LEVEL(irq + i, mask)
&& !GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) {
DPRINTF("Set %d pending mask %x\n", irq + i, mask);
GIC_DIST_SET_PENDING(irq + i, mask);
@@ -2157,7 +2162,7 @@ static void arm_gic_realize(DeviceState *dev, Error **errp)
}
-static void arm_gic_class_init(ObjectClass *klass, void *data)
+static void arm_gic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ARMGICClass *agc = ARM_GIC_CLASS(klass);
diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c
index 53fb2c4..ed5be05 100644
--- a/hw/intc/arm_gic_common.c
+++ b/hw/intc/arm_gic_common.c
@@ -26,7 +26,7 @@
#include "hw/arm/linux-boot-if.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
static int gic_pre_save(void *opaque)
{
@@ -348,8 +348,9 @@ static void arm_gic_common_linux_init(ARMLinuxBootIf *obj,
}
}
-static Property arm_gic_common_properties[] = {
+static const Property arm_gic_common_properties[] = {
DEFINE_PROP_UINT32("num-cpu", GICState, num_cpu, 1),
+ DEFINE_PROP_UINT32("first-cpu-index", GICState, first_cpu_index, 0),
DEFINE_PROP_UINT32("num-irq", GICState, num_irq, 32),
/* Revision can be 1 or 2 for GIC architecture specification
* versions 1 or 2, or 0 to indicate the legacy 11MPCore GIC.
@@ -360,10 +361,9 @@ static Property arm_gic_common_properties[] = {
/* True if the GIC should implement the virtualization extensions */
DEFINE_PROP_BOOL("has-virtualization-extensions", GICState, virt_extn, 0),
DEFINE_PROP_UINT32("num-priority-bits", GICState, n_prio_bits, 8),
- DEFINE_PROP_END_OF_LIST(),
};
-static void arm_gic_common_class_init(ObjectClass *klass, void *data)
+static void arm_gic_common_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -383,7 +383,7 @@ static const TypeInfo arm_gic_common_type = {
.class_size = sizeof(ARMGICCommonClass),
.class_init = arm_gic_common_class_init,
.abstract = true,
- .interfaces = (InterfaceInfo []) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_ARM_LINUX_BOOT_IF },
{ },
},
diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c
index 53defee..1e9232f 100644
--- a/hw/intc/arm_gic_kvm.c
+++ b/hw/intc/arm_gic_kvm.c
@@ -23,7 +23,7 @@
#include "qapi/error.h"
#include "qemu/module.h"
#include "migration/blocker.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "kvm_arm.h"
#include "gic_internal.h"
#include "vgic_common.h"
@@ -547,17 +547,10 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true,
&error_abort);
}
- } else if (kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) {
+ } else {
error_setg_errno(errp, -ret, "error creating in-kernel VGIC");
error_append_hint(errp,
"Perhaps the host CPU does not support GICv2?\n");
- } else if (ret != -ENODEV && ret != -ENOTSUP) {
- /*
- * Very ancient kernel without KVM_CAP_DEVICE_CTRL: assume that
- * ENODEV or ENOTSUP mean "can't create GICv2 with KVM_CREATE_DEVICE",
- * and that we will get a GICv2 via KVM_CREATE_IRQCHIP.
- */
- error_setg_errno(errp, -ret, "error creating in-kernel VGIC");
return;
}
@@ -591,7 +584,7 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
}
}
-static void kvm_arm_gic_class_init(ObjectClass *klass, void *data)
+static void kvm_arm_gic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/intc/arm_gicv2m.c b/hw/intc/arm_gicv2m.c
index d564b85..cef0688 100644
--- a/hw/intc/arm_gicv2m.c
+++ b/hw/intc/arm_gicv2m.c
@@ -31,7 +31,7 @@
#include "hw/irq.h"
#include "hw/pci/msi.h"
#include "hw/qdev-properties.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "qom/object.h"
@@ -170,13 +170,12 @@ static void gicv2m_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
}
-static Property gicv2m_properties[] = {
+static const Property gicv2m_properties[] = {
DEFINE_PROP_UINT32("base-spi", ARMGICv2mState, base_spi, 0),
DEFINE_PROP_UINT32("num-spi", ARMGICv2mState, num_spi, 64),
- DEFINE_PROP_END_OF_LIST(),
};
-static void gicv2m_class_init(ObjectClass *klass, void *data)
+static void gicv2m_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/intc/arm_gicv3.c b/hw/intc/arm_gicv3.c
index 58e18ff..6059ce9 100644
--- a/hw/intc/arm_gicv3.c
+++ b/hw/intc/arm_gicv3.c
@@ -452,7 +452,7 @@ static void arm_gic_realize(DeviceState *dev, Error **errp)
gicv3_init_cpuif(s);
}
-static void arm_gicv3_class_init(ObjectClass *klass, void *data)
+static void arm_gicv3_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass);
diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c
index bd50a1b..1cee681 100644
--- a/hw/intc/arm_gicv3_common.c
+++ b/hw/intc/arm_gicv3_common.c
@@ -31,7 +31,7 @@
#include "migration/vmstate.h"
#include "gicv3_internal.h"
#include "hw/arm/linux-boot-if.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State *cs)
@@ -605,7 +605,7 @@ static void arm_gic_common_linux_init(ARMLinuxBootIf *obj,
}
}
-static Property arm_gicv3_common_properties[] = {
+static const Property arm_gicv3_common_properties[] = {
DEFINE_PROP_UINT32("num-cpu", GICv3State, num_cpu, 1),
DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32),
DEFINE_PROP_UINT32("revision", GICv3State, revision, 3),
@@ -621,10 +621,9 @@ static Property arm_gicv3_common_properties[] = {
redist_region_count, qdev_prop_uint32, uint32_t),
DEFINE_PROP_LINK("sysmem", GICv3State, dma, TYPE_MEMORY_REGION,
MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void arm_gicv3_common_class_init(ObjectClass *klass, void *data)
+static void arm_gicv3_common_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -645,7 +644,7 @@ static const TypeInfo arm_gicv3_common_type = {
.class_init = arm_gicv3_common_class_init,
.instance_finalize = arm_gicv3_finalize,
.abstract = true,
- .interfaces = (InterfaceInfo []) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_ARM_LINUX_BOOT_IF },
{ },
},
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index bdb13b0..4b4cf09 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -22,8 +22,9 @@
#include "cpu.h"
#include "target/arm/cpregs.h"
#include "target/arm/cpu-features.h"
-#include "sysemu/tcg.h"
-#include "sysemu/qtest.h"
+#include "target/arm/internals.h"
+#include "system/tcg.h"
+#include "system/qtest.h"
/*
* Special case return value from hppvi_index(); must be larger than
@@ -583,7 +584,6 @@ static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
gicv3_cpuif_virt_irq_fiq_update(cs);
- return;
}
static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
@@ -781,7 +781,7 @@ static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
if (nmi) {
cs->ich_apr[grp][regno] |= ICV_AP1R_EL1_NMI;
} else {
- cs->ich_apr[grp][regno] |= (1 << regbit);
+ cs->ich_apr[grp][regno] |= (1U << regbit);
}
}
@@ -793,7 +793,7 @@ static void icv_activate_vlpi(GICv3CPUState *cs)
int regno = aprbit / 32;
int regbit = aprbit % 32;
- cs->ich_apr[cs->hppvlpi.grp][regno] |= (1 << regbit);
+ cs->ich_apr[cs->hppvlpi.grp][regno] |= (1U << regbit);
gicv3_redist_vlpi_pending(cs, cs->hppvlpi.irq, 0);
}
@@ -1170,7 +1170,7 @@ static void icc_activate_irq(GICv3CPUState *cs, int irq)
if (nmi) {
cs->icc_apr[cs->hppi.grp][regno] |= ICC_AP1R_EL1_NMI;
} else {
- cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit);
+ cs->icc_apr[cs->hppi.grp][regno] |= (1U << regbit);
}
if (irq < GIC_INTERNAL) {
@@ -2291,7 +2291,7 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
r = CP_ACCESS_TRAP_EL3;
break;
case 3:
- if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
+ if (!arm_is_el3_or_mon(env)) {
r = CP_ACCESS_TRAP_EL3;
}
break;
@@ -2300,9 +2300,6 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
}
}
- if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
- r = CP_ACCESS_TRAP;
- }
return r;
}
@@ -2356,7 +2353,7 @@ static CPAccessResult gicv3_fiq_access(CPUARMState *env,
r = CP_ACCESS_TRAP_EL3;
break;
case 3:
- if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
+ if (!arm_is_el3_or_mon(env)) {
r = CP_ACCESS_TRAP_EL3;
}
break;
@@ -2365,9 +2362,6 @@ static CPAccessResult gicv3_fiq_access(CPUARMState *env,
}
}
- if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
- r = CP_ACCESS_TRAP;
- }
return r;
}
@@ -2395,7 +2389,7 @@ static CPAccessResult gicv3_irq_access(CPUARMState *env,
r = CP_ACCESS_TRAP_EL3;
break;
case 3:
- if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
+ if (!arm_is_el3_or_mon(env)) {
r = CP_ACCESS_TRAP_EL3;
}
break;
@@ -2404,9 +2398,6 @@ static CPAccessResult gicv3_irq_access(CPUARMState *env,
}
}
- if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
- r = CP_ACCESS_TRAP;
- }
return r;
}
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
index bf31158..577b445 100644
--- a/hw/intc/arm_gicv3_its.c
+++ b/hw/intc/arm_gicv3_its.c
@@ -465,7 +465,7 @@ static ItsCmdResult lookup_vte(GICv3ITSState *s, const char *who,
static ItsCmdResult process_its_cmd_phys(GICv3ITSState *s, const ITEntry *ite,
int irqlevel)
{
- CTEntry cte;
+ CTEntry cte = {};
ItsCmdResult cmdres;
cmdres = lookup_cte(s, __func__, ite->icid, &cte);
@@ -479,7 +479,7 @@ static ItsCmdResult process_its_cmd_phys(GICv3ITSState *s, const ITEntry *ite,
static ItsCmdResult process_its_cmd_virt(GICv3ITSState *s, const ITEntry *ite,
int irqlevel)
{
- VTEntry vte;
+ VTEntry vte = {};
ItsCmdResult cmdres;
cmdres = lookup_vte(s, __func__, ite->vpeid, &vte);
@@ -514,8 +514,8 @@ static ItsCmdResult process_its_cmd_virt(GICv3ITSState *s, const ITEntry *ite,
static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
uint32_t eventid, ItsCmdType cmd)
{
- DTEntry dte;
- ITEntry ite;
+ DTEntry dte = {};
+ ITEntry ite = {};
ItsCmdResult cmdres;
int irqlevel;
@@ -583,8 +583,8 @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
uint32_t pIntid = 0;
uint64_t num_eventids;
uint16_t icid = 0;
- DTEntry dte;
- ITEntry ite;
+ DTEntry dte = {};
+ ITEntry ite = {};
devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
eventid = cmdpkt[1] & EVENTID_MASK;
@@ -651,8 +651,8 @@ static ItsCmdResult process_vmapti(GICv3ITSState *s, const uint64_t *cmdpkt,
{
uint32_t devid, eventid, vintid, doorbell, vpeid;
uint32_t num_eventids;
- DTEntry dte;
- ITEntry ite;
+ DTEntry dte = {};
+ ITEntry ite = {};
if (!its_feature_virtual(s)) {
return CMD_CONTINUE;
@@ -761,7 +761,7 @@ static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
{
uint16_t icid;
- CTEntry cte;
+ CTEntry cte = {};
icid = cmdpkt[2] & ICID_MASK;
cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
@@ -822,7 +822,7 @@ static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
{
uint32_t devid;
- DTEntry dte;
+ DTEntry dte = {};
devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
dte.size = cmdpkt[1] & SIZE_MASK;
@@ -886,9 +886,9 @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
{
uint32_t devid, eventid;
uint16_t new_icid;
- DTEntry dte;
- CTEntry old_cte, new_cte;
- ITEntry old_ite;
+ DTEntry dte = {};
+ CTEntry old_cte = {}, new_cte = {};
+ ITEntry old_ite = {};
ItsCmdResult cmdres;
devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
@@ -965,7 +965,7 @@ static bool update_vte(GICv3ITSState *s, uint32_t vpeid, const VTEntry *vte)
static ItsCmdResult process_vmapp(GICv3ITSState *s, const uint64_t *cmdpkt)
{
- VTEntry vte;
+ VTEntry vte = {};
uint32_t vpeid;
if (!its_feature_virtual(s)) {
@@ -1030,7 +1030,7 @@ static void vmovp_callback(gpointer data, gpointer opaque)
*/
GICv3ITSState *s = data;
VmovpCallbackData *cbdata = opaque;
- VTEntry vte;
+ VTEntry vte = {};
ItsCmdResult cmdres;
cmdres = lookup_vte(s, __func__, cbdata->vpeid, &vte);
@@ -1085,9 +1085,9 @@ static ItsCmdResult process_vmovi(GICv3ITSState *s, const uint64_t *cmdpkt)
{
uint32_t devid, eventid, vpeid, doorbell;
bool doorbell_valid;
- DTEntry dte;
- ITEntry ite;
- VTEntry old_vte, new_vte;
+ DTEntry dte = {};
+ ITEntry ite = {};
+ VTEntry old_vte = {}, new_vte = {};
ItsCmdResult cmdres;
if (!its_feature_virtual(s)) {
@@ -1186,10 +1186,10 @@ static ItsCmdResult process_vinvall(GICv3ITSState *s, const uint64_t *cmdpkt)
static ItsCmdResult process_inv(GICv3ITSState *s, const uint64_t *cmdpkt)
{
uint32_t devid, eventid;
- ITEntry ite;
- DTEntry dte;
- CTEntry cte;
- VTEntry vte;
+ ITEntry ite = {};
+ DTEntry dte = {};
+ CTEntry cte = {};
+ VTEntry vte = {};
ItsCmdResult cmdres;
devid = FIELD_EX64(cmdpkt[0], INV_0, DEVICEID);
@@ -2002,13 +2002,12 @@ static void gicv3_its_post_load(GICv3ITSState *s)
}
}
-static Property gicv3_its_props[] = {
+static const Property gicv3_its_props[] = {
DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
GICv3State *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void gicv3_its_class_init(ObjectClass *klass, void *data)
+static void gicv3_its_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c
index 0b97362..e946e3f 100644
--- a/hw/intc/arm_gicv3_its_common.c
+++ b/hw/intc/arm_gicv3_its_common.c
@@ -24,7 +24,7 @@
#include "hw/intc/arm_gicv3_its_common.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
static int gicv3_its_pre_save(void *opaque)
{
@@ -135,7 +135,7 @@ static void gicv3_its_common_reset_hold(Object *obj, ResetType type)
memset(&s->baser, 0, sizeof(s->baser));
}
-static void gicv3_its_common_class_init(ObjectClass *klass, void *data)
+static void gicv3_its_common_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c
index 35539c0..9812d50 100644
--- a/hw/intc/arm_gicv3_its_kvm.c
+++ b/hw/intc/arm_gicv3_its_kvm.c
@@ -24,8 +24,8 @@
#include "qemu/error-report.h"
#include "hw/intc/arm_gicv3_its_common.h"
#include "hw/qdev-properties.h"
-#include "sysemu/runstate.h"
-#include "sysemu/kvm.h"
+#include "system/runstate.h"
+#include "system/kvm.h"
#include "kvm_arm.h"
#include "migration/blocker.h"
#include "qom/object.h"
@@ -234,13 +234,12 @@ static void kvm_arm_its_reset_hold(Object *obj, ResetType type)
}
}
-static Property kvm_arm_its_props[] = {
+static const Property kvm_arm_its_props[] = {
DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "kvm-arm-gicv3",
GICv3State *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void kvm_arm_its_class_init(ObjectClass *klass, void *data)
+static void kvm_arm_its_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
index 9ea6b8e..3be3bf6 100644
--- a/hw/intc/arm_gicv3_kvm.c
+++ b/hw/intc/arm_gicv3_kvm.c
@@ -24,8 +24,8 @@
#include "hw/intc/arm_gicv3_common.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
-#include "sysemu/kvm.h"
-#include "sysemu/runstate.h"
+#include "system/kvm.h"
+#include "system/runstate.h"
#include "kvm_arm.h"
#include "gicv3_internal.h"
#include "vgic_common.h"
@@ -893,7 +893,7 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
}
}
-static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data)
+static void kvm_arm_gicv3_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index 404a445..83ff74f 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -18,11 +18,11 @@
#include "hw/intc/armv7m_nvic.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
-#include "sysemu/tcg.h"
-#include "sysemu/runstate.h"
+#include "system/tcg.h"
+#include "system/runstate.h"
#include "target/arm/cpu.h"
#include "target/arm/cpu-features.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/memop.h"
#include "qemu/log.h"
#include "qemu/module.h"
@@ -2569,7 +2569,7 @@ static const VMStateDescription vmstate_nvic = {
}
};
-static Property props_nvic[] = {
+static const Property props_nvic[] = {
/* Number of external IRQ lines (so excluding the 16 internal exceptions) */
DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
/*
@@ -2577,7 +2577,6 @@ static Property props_nvic[] = {
* to use a reasonable default.
*/
DEFINE_PROP_UINT8("num-prio-bits", NVICState, num_prio_bits, 0),
- DEFINE_PROP_END_OF_LIST()
};
static void armv7m_nvic_reset(DeviceState *dev)
@@ -2731,13 +2730,13 @@ static void armv7m_nvic_instance_init(Object *obj)
qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1);
}
-static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
+static void armv7m_nvic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_nvic;
device_class_set_props(dc, props_nvic);
- dc->reset = armv7m_nvic_reset;
+ device_class_set_legacy_reset(dc, armv7m_nvic_reset);
dc->realize = armv7m_nvic_realize;
}
diff --git a/hw/intc/aspeed_intc.c b/hw/intc/aspeed_intc.c
index 7515558..5cd786d 100644
--- a/hw/intc/aspeed_intc.c
+++ b/hw/intc/aspeed_intc.c
@@ -14,72 +14,291 @@
#include "hw/registerfields.h"
#include "qapi/error.h"
-/* INTC Registers */
-REG32(GICINT128_EN, 0x1000)
-REG32(GICINT128_STATUS, 0x1004)
-REG32(GICINT129_EN, 0x1100)
-REG32(GICINT129_STATUS, 0x1104)
-REG32(GICINT130_EN, 0x1200)
-REG32(GICINT130_STATUS, 0x1204)
-REG32(GICINT131_EN, 0x1300)
-REG32(GICINT131_STATUS, 0x1304)
-REG32(GICINT132_EN, 0x1400)
-REG32(GICINT132_STATUS, 0x1404)
-REG32(GICINT133_EN, 0x1500)
-REG32(GICINT133_STATUS, 0x1504)
-REG32(GICINT134_EN, 0x1600)
-REG32(GICINT134_STATUS, 0x1604)
-REG32(GICINT135_EN, 0x1700)
-REG32(GICINT135_STATUS, 0x1704)
-REG32(GICINT136_EN, 0x1800)
-REG32(GICINT136_STATUS, 0x1804)
-
-#define GICINT_STATUS_BASE R_GICINT128_STATUS
-
-static void aspeed_intc_update(AspeedINTCState *s, int irq, int level)
+/*
+ * INTC Registers
+ *
+ * values below are offset by - 0x1000 from datasheet
+ * because its memory region is start at 0x1000
+ *
+ */
+REG32(GICINT128_EN, 0x000)
+REG32(GICINT128_STATUS, 0x004)
+REG32(GICINT129_EN, 0x100)
+REG32(GICINT129_STATUS, 0x104)
+REG32(GICINT130_EN, 0x200)
+REG32(GICINT130_STATUS, 0x204)
+REG32(GICINT131_EN, 0x300)
+REG32(GICINT131_STATUS, 0x304)
+REG32(GICINT132_EN, 0x400)
+REG32(GICINT132_STATUS, 0x404)
+REG32(GICINT133_EN, 0x500)
+REG32(GICINT133_STATUS, 0x504)
+REG32(GICINT134_EN, 0x600)
+REG32(GICINT134_STATUS, 0x604)
+REG32(GICINT135_EN, 0x700)
+REG32(GICINT135_STATUS, 0x704)
+REG32(GICINT136_EN, 0x800)
+REG32(GICINT136_STATUS, 0x804)
+REG32(GICINT192_201_EN, 0xB00)
+REG32(GICINT192_201_STATUS, 0xB04)
+
+/*
+ * INTCIO Registers
+ *
+ * values below are offset by - 0x100 from datasheet
+ * because its memory region is start at 0x100
+ *
+ */
+REG32(GICINT192_EN, 0x00)
+REG32(GICINT192_STATUS, 0x04)
+REG32(GICINT193_EN, 0x10)
+REG32(GICINT193_STATUS, 0x14)
+REG32(GICINT194_EN, 0x20)
+REG32(GICINT194_STATUS, 0x24)
+REG32(GICINT195_EN, 0x30)
+REG32(GICINT195_STATUS, 0x34)
+REG32(GICINT196_EN, 0x40)
+REG32(GICINT196_STATUS, 0x44)
+REG32(GICINT197_EN, 0x50)
+REG32(GICINT197_STATUS, 0x54)
+
+/*
+ * SSP INTC Registers
+ */
+REG32(SSPINT128_EN, 0x2000)
+REG32(SSPINT128_STATUS, 0x2004)
+REG32(SSPINT129_EN, 0x2100)
+REG32(SSPINT129_STATUS, 0x2104)
+REG32(SSPINT130_EN, 0x2200)
+REG32(SSPINT130_STATUS, 0x2204)
+REG32(SSPINT131_EN, 0x2300)
+REG32(SSPINT131_STATUS, 0x2304)
+REG32(SSPINT132_EN, 0x2400)
+REG32(SSPINT132_STATUS, 0x2404)
+REG32(SSPINT133_EN, 0x2500)
+REG32(SSPINT133_STATUS, 0x2504)
+REG32(SSPINT134_EN, 0x2600)
+REG32(SSPINT134_STATUS, 0x2604)
+REG32(SSPINT135_EN, 0x2700)
+REG32(SSPINT135_STATUS, 0x2704)
+REG32(SSPINT136_EN, 0x2800)
+REG32(SSPINT136_STATUS, 0x2804)
+REG32(SSPINT137_EN, 0x2900)
+REG32(SSPINT137_STATUS, 0x2904)
+REG32(SSPINT138_EN, 0x2A00)
+REG32(SSPINT138_STATUS, 0x2A04)
+REG32(SSPINT160_169_EN, 0x2B00)
+REG32(SSPINT160_169_STATUS, 0x2B04)
+
+/*
+ * SSP INTCIO Registers
+ */
+REG32(SSPINT160_EN, 0x180)
+REG32(SSPINT160_STATUS, 0x184)
+REG32(SSPINT161_EN, 0x190)
+REG32(SSPINT161_STATUS, 0x194)
+REG32(SSPINT162_EN, 0x1A0)
+REG32(SSPINT162_STATUS, 0x1A4)
+REG32(SSPINT163_EN, 0x1B0)
+REG32(SSPINT163_STATUS, 0x1B4)
+REG32(SSPINT164_EN, 0x1C0)
+REG32(SSPINT164_STATUS, 0x1C4)
+REG32(SSPINT165_EN, 0x1D0)
+REG32(SSPINT165_STATUS, 0x1D4)
+
+/*
+ * TSP INTC Registers
+ */
+REG32(TSPINT128_EN, 0x3000)
+REG32(TSPINT128_STATUS, 0x3004)
+REG32(TSPINT129_EN, 0x3100)
+REG32(TSPINT129_STATUS, 0x3104)
+REG32(TSPINT130_EN, 0x3200)
+REG32(TSPINT130_STATUS, 0x3204)
+REG32(TSPINT131_EN, 0x3300)
+REG32(TSPINT131_STATUS, 0x3304)
+REG32(TSPINT132_EN, 0x3400)
+REG32(TSPINT132_STATUS, 0x3404)
+REG32(TSPINT133_EN, 0x3500)
+REG32(TSPINT133_STATUS, 0x3504)
+REG32(TSPINT134_EN, 0x3600)
+REG32(TSPINT134_STATUS, 0x3604)
+REG32(TSPINT135_EN, 0x3700)
+REG32(TSPINT135_STATUS, 0x3704)
+REG32(TSPINT136_EN, 0x3800)
+REG32(TSPINT136_STATUS, 0x3804)
+REG32(TSPINT137_EN, 0x3900)
+REG32(TSPINT137_STATUS, 0x3904)
+REG32(TSPINT138_EN, 0x3A00)
+REG32(TSPINT138_STATUS, 0x3A04)
+REG32(TSPINT160_169_EN, 0x3B00)
+REG32(TSPINT160_169_STATUS, 0x3B04)
+
+/*
+ * TSP INTCIO Registers
+ */
+
+REG32(TSPINT160_EN, 0x200)
+REG32(TSPINT160_STATUS, 0x204)
+REG32(TSPINT161_EN, 0x210)
+REG32(TSPINT161_STATUS, 0x214)
+REG32(TSPINT162_EN, 0x220)
+REG32(TSPINT162_STATUS, 0x224)
+REG32(TSPINT163_EN, 0x230)
+REG32(TSPINT163_STATUS, 0x234)
+REG32(TSPINT164_EN, 0x240)
+REG32(TSPINT164_STATUS, 0x244)
+REG32(TSPINT165_EN, 0x250)
+REG32(TSPINT165_STATUS, 0x254)
+
+static const AspeedINTCIRQ *aspeed_intc_get_irq(AspeedINTCClass *aic,
+ uint32_t reg)
+{
+ int i;
+
+ for (i = 0; i < aic->irq_table_count; i++) {
+ if (aic->irq_table[i].enable_reg == reg ||
+ aic->irq_table[i].status_reg == reg) {
+ return &aic->irq_table[i];
+ }
+ }
+
+ /*
+ * Invalid reg.
+ */
+ g_assert_not_reached();
+}
+
+/*
+ * Update the state of an interrupt controller pin by setting
+ * the specified output pin to the given level.
+ * The input pin index should be between 0 and the number of input pins.
+ * The output pin index should be between 0 and the number of output pins.
+ */
+static void aspeed_intc_update(AspeedINTCState *s, int inpin_idx,
+ int outpin_idx, int level)
{
AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s);
+ const char *name = object_get_typename(OBJECT(s));
- if (irq >= aic->num_ints) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid interrupt number: %d\n",
- __func__, irq);
- return;
+ assert((outpin_idx < aic->num_outpins) && (inpin_idx < aic->num_inpins));
+
+ trace_aspeed_intc_update_irq(name, inpin_idx, outpin_idx, level);
+ qemu_set_irq(s->output_pins[outpin_idx], level);
+}
+
+static void aspeed_intc_set_irq_handler(AspeedINTCState *s,
+ const AspeedINTCIRQ *intc_irq,
+ uint32_t select)
+{
+ const char *name = object_get_typename(OBJECT(s));
+ uint32_t status_reg;
+ int outpin_idx;
+ int inpin_idx;
+
+ status_reg = intc_irq->status_reg;
+ outpin_idx = intc_irq->outpin_idx;
+ inpin_idx = intc_irq->inpin_idx;
+
+ if ((s->mask[inpin_idx] & select) || (s->regs[status_reg] & select)) {
+ /*
+ * a. mask is not 0 means in ISR mode
+ * sources interrupt routine are executing.
+ * b. status register value is not 0 means previous
+ * source interrupt does not be executed, yet.
+ *
+ * save source interrupt to pending variable.
+ */
+ s->pending[inpin_idx] |= select;
+ trace_aspeed_intc_pending_irq(name, inpin_idx, s->pending[inpin_idx]);
+ } else {
+ /*
+ * notify firmware which source interrupt are coming
+ * by setting status register
+ */
+ s->regs[status_reg] = select;
+ trace_aspeed_intc_trigger_irq(name, inpin_idx, outpin_idx,
+ s->regs[status_reg]);
+ aspeed_intc_update(s, inpin_idx, outpin_idx, 1);
}
+}
+
+static void aspeed_intc_set_irq_handler_multi_outpins(AspeedINTCState *s,
+ const AspeedINTCIRQ *intc_irq, uint32_t select)
+{
+ const char *name = object_get_typename(OBJECT(s));
+ uint32_t status_reg;
+ int num_outpins;
+ int outpin_idx;
+ int inpin_idx;
+ int i;
+
+ num_outpins = intc_irq->num_outpins;
+ status_reg = intc_irq->status_reg;
+ outpin_idx = intc_irq->outpin_idx;
+ inpin_idx = intc_irq->inpin_idx;
- trace_aspeed_intc_update_irq(irq, level);
- qemu_set_irq(s->output_pins[irq], level);
+ for (i = 0; i < num_outpins; i++) {
+ if (select & BIT(i)) {
+ if (s->mask[inpin_idx] & BIT(i) ||
+ s->regs[status_reg] & BIT(i)) {
+ /*
+ * a. mask bit is not 0 means in ISR mode sources interrupt
+ * routine are executing.
+ * b. status bit is not 0 means previous source interrupt
+ * does not be executed, yet.
+ *
+ * save source interrupt to pending bit.
+ */
+ s->pending[inpin_idx] |= BIT(i);
+ trace_aspeed_intc_pending_irq(name, inpin_idx,
+ s->pending[inpin_idx]);
+ } else {
+ /*
+ * notify firmware which source interrupt are coming
+ * by setting status bit
+ */
+ s->regs[status_reg] |= BIT(i);
+ trace_aspeed_intc_trigger_irq(name, inpin_idx, outpin_idx + i,
+ s->regs[status_reg]);
+ aspeed_intc_update(s, inpin_idx, outpin_idx + i, 1);
+ }
+ }
+ }
}
/*
- * The address of GICINT128 to GICINT136 are from 0x1000 to 0x1804.
- * Utilize "address & 0x0f00" to get the irq and irq output pin index
- * The value of irq should be 0 to num_ints.
- * The irq 0 indicates GICINT128, irq 1 indicates GICINT129 and so on.
+ * GICINT192_201 maps 1:10 to input IRQ 0 and output IRQs 0 to 9.
+ * GICINT128 to GICINT136 map 1:1 to input IRQs 1 to 9 and output
+ * IRQs 10 to 18. The value of input IRQ should be between 0 and
+ * the number of input pins.
*/
static void aspeed_intc_set_irq(void *opaque, int irq, int level)
{
AspeedINTCState *s = (AspeedINTCState *)opaque;
AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s);
- uint32_t status_addr = GICINT_STATUS_BASE + ((0x100 * irq) >> 2);
+ const char *name = object_get_typename(OBJECT(s));
+ const AspeedINTCIRQ *intc_irq;
uint32_t select = 0;
uint32_t enable;
+ int num_outpins;
+ int inpin_idx;
int i;
- if (irq >= aic->num_ints) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid interrupt number: %d\n",
- __func__, irq);
- return;
- }
+ assert(irq < aic->num_inpins);
- trace_aspeed_intc_set_irq(irq, level);
- enable = s->enable[irq];
+ intc_irq = &aic->irq_table[irq];
+ num_outpins = intc_irq->num_outpins;
+ inpin_idx = intc_irq->inpin_idx;
+ trace_aspeed_intc_set_irq(name, inpin_idx, level);
+ enable = s->enable[inpin_idx];
if (!level) {
return;
}
for (i = 0; i < aic->num_lines; i++) {
- if (s->orgates[irq].levels[i]) {
+ if (s->orgates[inpin_idx].levels[i]) {
if (enable & BIT(i)) {
select |= BIT(i);
}
@@ -90,45 +309,190 @@ static void aspeed_intc_set_irq(void *opaque, int irq, int level)
return;
}
- trace_aspeed_intc_select(select);
+ trace_aspeed_intc_select(name, select);
+ if (num_outpins > 1) {
+ aspeed_intc_set_irq_handler_multi_outpins(s, intc_irq, select);
+ } else {
+ aspeed_intc_set_irq_handler(s, intc_irq, select);
+ }
+}
- if (s->mask[irq] || s->regs[status_addr]) {
- /*
- * a. mask is not 0 means in ISR mode
- * sources interrupt routine are executing.
- * b. status register value is not 0 means previous
- * source interrupt does not be executed, yet.
- *
- * save source interrupt to pending variable.
- */
- s->pending[irq] |= select;
- trace_aspeed_intc_pending_irq(irq, s->pending[irq]);
+static void aspeed_intc_enable_handler(AspeedINTCState *s, hwaddr offset,
+ uint64_t data)
+{
+ AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s);
+ const char *name = object_get_typename(OBJECT(s));
+ const AspeedINTCIRQ *intc_irq;
+ uint32_t reg = offset >> 2;
+ uint32_t old_enable;
+ uint32_t change;
+ int inpin_idx;
+
+ intc_irq = aspeed_intc_get_irq(aic, reg);
+ inpin_idx = intc_irq->inpin_idx;
+
+ assert(inpin_idx < aic->num_inpins);
+
+ /*
+ * The enable registers are used to enable source interrupts.
+ * They also handle masking and unmasking of source interrupts
+ * during the execution of the source ISR.
+ */
+
+ /* disable all source interrupt */
+ if (!data && !s->enable[inpin_idx]) {
+ s->regs[reg] = data;
+ return;
+ }
+
+ old_enable = s->enable[inpin_idx];
+ s->enable[inpin_idx] |= data;
+
+ /* enable new source interrupt */
+ if (old_enable != s->enable[inpin_idx]) {
+ trace_aspeed_intc_enable(name, s->enable[inpin_idx]);
+ s->regs[reg] = data;
+ return;
+ }
+
+ /* mask and unmask source interrupt */
+ change = s->regs[reg] ^ data;
+ if (change & data) {
+ s->mask[inpin_idx] &= ~change;
+ trace_aspeed_intc_unmask(name, change, s->mask[inpin_idx]);
} else {
- /*
- * notify firmware which source interrupt are coming
- * by setting status register
- */
- s->regs[status_addr] = select;
- trace_aspeed_intc_trigger_irq(irq, s->regs[status_addr]);
- aspeed_intc_update(s, irq, 1);
+ s->mask[inpin_idx] |= change;
+ trace_aspeed_intc_mask(name, change, s->mask[inpin_idx]);
+ }
+
+ s->regs[reg] = data;
+}
+
+static void aspeed_intc_status_handler(AspeedINTCState *s, hwaddr offset,
+ uint64_t data)
+{
+ AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s);
+ const char *name = object_get_typename(OBJECT(s));
+ const AspeedINTCIRQ *intc_irq;
+ uint32_t reg = offset >> 2;
+ int outpin_idx;
+ int inpin_idx;
+
+ if (!data) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid data 0\n", __func__);
+ return;
+ }
+
+ intc_irq = aspeed_intc_get_irq(aic, reg);
+ outpin_idx = intc_irq->outpin_idx;
+ inpin_idx = intc_irq->inpin_idx;
+
+ assert(inpin_idx < aic->num_inpins);
+
+ /* clear status */
+ s->regs[reg] &= ~data;
+
+ /*
+ * These status registers are used for notify sources ISR are executed.
+ * If one source ISR is executed, it will clear one bit.
+ * If it clear all bits, it means to initialize this register status
+ * rather than sources ISR are executed.
+ */
+ if (data == 0xffffffff) {
+ return;
+ }
+
+ /* All source ISR execution are done */
+ if (!s->regs[reg]) {
+ trace_aspeed_intc_all_isr_done(name, inpin_idx);
+ if (s->pending[inpin_idx]) {
+ /*
+ * handle pending source interrupt
+ * notify firmware which source interrupt are pending
+ * by setting status register
+ */
+ s->regs[reg] = s->pending[inpin_idx];
+ s->pending[inpin_idx] = 0;
+ trace_aspeed_intc_trigger_irq(name, inpin_idx, outpin_idx,
+ s->regs[reg]);
+ aspeed_intc_update(s, inpin_idx, outpin_idx, 1);
+ } else {
+ /* clear irq */
+ trace_aspeed_intc_clear_irq(name, inpin_idx, outpin_idx, 0);
+ aspeed_intc_update(s, inpin_idx, outpin_idx, 0);
+ }
+ }
+}
+
+static void aspeed_intc_status_handler_multi_outpins(AspeedINTCState *s,
+ hwaddr offset, uint64_t data)
+{
+ const char *name = object_get_typename(OBJECT(s));
+ AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s);
+ const AspeedINTCIRQ *intc_irq;
+ uint32_t reg = offset >> 2;
+ int num_outpins;
+ int outpin_idx;
+ int inpin_idx;
+ int i;
+
+ if (!data) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid data 0\n", __func__);
+ return;
+ }
+
+ intc_irq = aspeed_intc_get_irq(aic, reg);
+ num_outpins = intc_irq->num_outpins;
+ outpin_idx = intc_irq->outpin_idx;
+ inpin_idx = intc_irq->inpin_idx;
+ assert(inpin_idx < aic->num_inpins);
+
+ /* clear status */
+ s->regs[reg] &= ~data;
+
+ /*
+ * The status registers are used for notify sources ISR are executed.
+ * If one source ISR is executed, it will clear one bit.
+ * If it clear all bits, it means to initialize this register status
+ * rather than sources ISR are executed.
+ */
+ if (data == 0xffffffff) {
+ return;
+ }
+
+ for (i = 0; i < num_outpins; i++) {
+ /* All source ISR executions are done from a specific bit */
+ if (data & BIT(i)) {
+ trace_aspeed_intc_all_isr_done_bit(name, inpin_idx, i);
+ if (s->pending[inpin_idx] & BIT(i)) {
+ /*
+ * Handle pending source interrupt.
+ * Notify firmware which source interrupt is pending
+ * by setting the status bit.
+ */
+ s->regs[reg] |= BIT(i);
+ s->pending[inpin_idx] &= ~BIT(i);
+ trace_aspeed_intc_trigger_irq(name, inpin_idx, outpin_idx + i,
+ s->regs[reg]);
+ aspeed_intc_update(s, inpin_idx, outpin_idx + i, 1);
+ } else {
+ /* clear irq for the specific bit */
+ trace_aspeed_intc_clear_irq(name, inpin_idx, outpin_idx + i, 0);
+ aspeed_intc_update(s, inpin_idx, outpin_idx + i, 0);
+ }
+ }
}
}
static uint64_t aspeed_intc_read(void *opaque, hwaddr offset, unsigned int size)
{
AspeedINTCState *s = ASPEED_INTC(opaque);
- uint32_t addr = offset >> 2;
+ const char *name = object_get_typename(OBJECT(s));
+ uint32_t reg = offset >> 2;
uint32_t value = 0;
- if (addr >= ASPEED_INTC_NR_REGS) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n",
- __func__, offset);
- return 0;
- }
-
- value = s->regs[addr];
- trace_aspeed_intc_read(offset, size, value);
+ value = s->regs[reg];
+ trace_aspeed_intc_read(name, offset, size, value);
return value;
}
@@ -137,22 +501,12 @@ static void aspeed_intc_write(void *opaque, hwaddr offset, uint64_t data,
unsigned size)
{
AspeedINTCState *s = ASPEED_INTC(opaque);
- AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s);
- uint32_t addr = offset >> 2;
- uint32_t old_enable;
- uint32_t change;
- uint32_t irq;
+ const char *name = object_get_typename(OBJECT(s));
+ uint32_t reg = offset >> 2;
- if (addr >= ASPEED_INTC_NR_REGS) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
- __func__, offset);
- return;
- }
-
- trace_aspeed_intc_write(offset, size, data);
+ trace_aspeed_intc_write(name, offset, size, data);
- switch (addr) {
+ switch (reg) {
case R_GICINT128_EN:
case R_GICINT129_EN:
case R_GICINT130_EN:
@@ -162,45 +516,8 @@ static void aspeed_intc_write(void *opaque, hwaddr offset, uint64_t data,
case R_GICINT134_EN:
case R_GICINT135_EN:
case R_GICINT136_EN:
- irq = (offset & 0x0f00) >> 8;
-
- if (irq >= aic->num_ints) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid interrupt number: %d\n",
- __func__, irq);
- return;
- }
-
- /*
- * These registers are used for enable sources interrupt and
- * mask and unmask source interrupt while executing source ISR.
- */
-
- /* disable all source interrupt */
- if (!data && !s->enable[irq]) {
- s->regs[addr] = data;
- return;
- }
-
- old_enable = s->enable[irq];
- s->enable[irq] |= data;
-
- /* enable new source interrupt */
- if (old_enable != s->enable[irq]) {
- trace_aspeed_intc_enable(s->enable[irq]);
- s->regs[addr] = data;
- return;
- }
-
- /* mask and unmask source interrupt */
- change = s->regs[addr] ^ data;
- if (change & data) {
- s->mask[irq] &= ~change;
- trace_aspeed_intc_unmask(change, s->mask[irq]);
- } else {
- s->mask[irq] |= change;
- trace_aspeed_intc_mask(change, s->mask[irq]);
- }
- s->regs[addr] = data;
+ case R_GICINT192_201_EN:
+ aspeed_intc_enable_handler(s, offset, data);
break;
case R_GICINT128_STATUS:
case R_GICINT129_STATUS:
@@ -211,59 +528,271 @@ static void aspeed_intc_write(void *opaque, hwaddr offset, uint64_t data,
case R_GICINT134_STATUS:
case R_GICINT135_STATUS:
case R_GICINT136_STATUS:
- irq = (offset & 0x0f00) >> 8;
+ aspeed_intc_status_handler(s, offset, data);
+ break;
+ case R_GICINT192_201_STATUS:
+ aspeed_intc_status_handler_multi_outpins(s, offset, data);
+ break;
+ default:
+ s->regs[reg] = data;
+ break;
+ }
+}
- if (irq >= aic->num_ints) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid interrupt number: %d\n",
- __func__, irq);
- return;
- }
+static void aspeed_ssp_intc_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size)
+{
+ AspeedINTCState *s = ASPEED_INTC(opaque);
+ const char *name = object_get_typename(OBJECT(s));
+ uint32_t reg = offset >> 2;
- /* clear status */
- s->regs[addr] &= ~data;
+ trace_aspeed_intc_write(name, offset, size, data);
- /*
- * These status registers are used for notify sources ISR are executed.
- * If one source ISR is executed, it will clear one bit.
- * If it clear all bits, it means to initialize this register status
- * rather than sources ISR are executed.
- */
- if (data == 0xffffffff) {
- return;
- }
+ switch (reg) {
+ case R_SSPINT128_EN:
+ case R_SSPINT129_EN:
+ case R_SSPINT130_EN:
+ case R_SSPINT131_EN:
+ case R_SSPINT132_EN:
+ case R_SSPINT133_EN:
+ case R_SSPINT134_EN:
+ case R_SSPINT135_EN:
+ case R_SSPINT136_EN:
+ case R_SSPINT160_169_EN:
+ aspeed_intc_enable_handler(s, offset, data);
+ break;
+ case R_SSPINT128_STATUS:
+ case R_SSPINT129_STATUS:
+ case R_SSPINT130_STATUS:
+ case R_SSPINT131_STATUS:
+ case R_SSPINT132_STATUS:
+ case R_SSPINT133_STATUS:
+ case R_SSPINT134_STATUS:
+ case R_SSPINT135_STATUS:
+ case R_SSPINT136_STATUS:
+ aspeed_intc_status_handler(s, offset, data);
+ break;
+ case R_SSPINT160_169_STATUS:
+ aspeed_intc_status_handler_multi_outpins(s, offset, data);
+ break;
+ default:
+ s->regs[reg] = data;
+ break;
+ }
+}
- /* All source ISR execution are done */
- if (!s->regs[addr]) {
- trace_aspeed_intc_all_isr_done(irq);
- if (s->pending[irq]) {
- /*
- * handle pending source interrupt
- * notify firmware which source interrupt are pending
- * by setting status register
- */
- s->regs[addr] = s->pending[irq];
- s->pending[irq] = 0;
- trace_aspeed_intc_trigger_irq(irq, s->regs[addr]);
- aspeed_intc_update(s, irq, 1);
- } else {
- /* clear irq */
- trace_aspeed_intc_clear_irq(irq, 0);
- aspeed_intc_update(s, irq, 0);
- }
- }
+static void aspeed_tsp_intc_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size)
+{
+ AspeedINTCState *s = ASPEED_INTC(opaque);
+ const char *name = object_get_typename(OBJECT(s));
+ uint32_t reg = offset >> 2;
+
+ trace_aspeed_intc_write(name, offset, size, data);
+
+ switch (reg) {
+ case R_TSPINT128_EN:
+ case R_TSPINT129_EN:
+ case R_TSPINT130_EN:
+ case R_TSPINT131_EN:
+ case R_TSPINT132_EN:
+ case R_TSPINT133_EN:
+ case R_TSPINT134_EN:
+ case R_TSPINT135_EN:
+ case R_TSPINT136_EN:
+ case R_TSPINT160_169_EN:
+ aspeed_intc_enable_handler(s, offset, data);
+ break;
+ case R_TSPINT128_STATUS:
+ case R_TSPINT129_STATUS:
+ case R_TSPINT130_STATUS:
+ case R_TSPINT131_STATUS:
+ case R_TSPINT132_STATUS:
+ case R_TSPINT133_STATUS:
+ case R_TSPINT134_STATUS:
+ case R_TSPINT135_STATUS:
+ case R_TSPINT136_STATUS:
+ aspeed_intc_status_handler(s, offset, data);
+ break;
+ case R_TSPINT160_169_STATUS:
+ aspeed_intc_status_handler_multi_outpins(s, offset, data);
break;
default:
- s->regs[addr] = data;
+ s->regs[reg] = data;
break;
}
+}
- return;
+static uint64_t aspeed_intcio_read(void *opaque, hwaddr offset,
+ unsigned int size)
+{
+ AspeedINTCState *s = ASPEED_INTC(opaque);
+ const char *name = object_get_typename(OBJECT(s));
+ uint32_t reg = offset >> 2;
+ uint32_t value = 0;
+
+ value = s->regs[reg];
+ trace_aspeed_intc_read(name, offset, size, value);
+
+ return value;
+}
+
+static void aspeed_intcio_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size)
+{
+ AspeedINTCState *s = ASPEED_INTC(opaque);
+ const char *name = object_get_typename(OBJECT(s));
+ uint32_t reg = offset >> 2;
+
+ trace_aspeed_intc_write(name, offset, size, data);
+
+ switch (reg) {
+ case R_GICINT192_EN:
+ case R_GICINT193_EN:
+ case R_GICINT194_EN:
+ case R_GICINT195_EN:
+ case R_GICINT196_EN:
+ case R_GICINT197_EN:
+ aspeed_intc_enable_handler(s, offset, data);
+ break;
+ case R_GICINT192_STATUS:
+ case R_GICINT193_STATUS:
+ case R_GICINT194_STATUS:
+ case R_GICINT195_STATUS:
+ case R_GICINT196_STATUS:
+ case R_GICINT197_STATUS:
+ aspeed_intc_status_handler(s, offset, data);
+ break;
+ default:
+ s->regs[reg] = data;
+ break;
+ }
+}
+
+static void aspeed_ssp_intcio_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size)
+{
+ AspeedINTCState *s = ASPEED_INTC(opaque);
+ const char *name = object_get_typename(OBJECT(s));
+ uint32_t reg = offset >> 2;
+
+ trace_aspeed_intc_write(name, offset, size, data);
+
+ switch (reg) {
+ case R_SSPINT160_EN:
+ case R_SSPINT161_EN:
+ case R_SSPINT162_EN:
+ case R_SSPINT163_EN:
+ case R_SSPINT164_EN:
+ case R_SSPINT165_EN:
+ aspeed_intc_enable_handler(s, offset, data);
+ break;
+ case R_SSPINT160_STATUS:
+ case R_SSPINT161_STATUS:
+ case R_SSPINT162_STATUS:
+ case R_SSPINT163_STATUS:
+ case R_SSPINT164_STATUS:
+ case R_SSPINT165_STATUS:
+ aspeed_intc_status_handler(s, offset, data);
+ break;
+ default:
+ s->regs[reg] = data;
+ break;
+ }
+}
+
+static void aspeed_tsp_intcio_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size)
+{
+ AspeedINTCState *s = ASPEED_INTC(opaque);
+ const char *name = object_get_typename(OBJECT(s));
+ uint32_t reg = offset >> 2;
+
+ trace_aspeed_intc_write(name, offset, size, data);
+
+ switch (reg) {
+ case R_TSPINT160_EN:
+ case R_TSPINT161_EN:
+ case R_TSPINT162_EN:
+ case R_TSPINT163_EN:
+ case R_TSPINT164_EN:
+ case R_TSPINT165_EN:
+ aspeed_intc_enable_handler(s, offset, data);
+ break;
+ case R_TSPINT160_STATUS:
+ case R_TSPINT161_STATUS:
+ case R_TSPINT162_STATUS:
+ case R_TSPINT163_STATUS:
+ case R_TSPINT164_STATUS:
+ case R_TSPINT165_STATUS:
+ aspeed_intc_status_handler(s, offset, data);
+ break;
+ default:
+ s->regs[reg] = data;
+ break;
+ }
}
static const MemoryRegionOps aspeed_intc_ops = {
.read = aspeed_intc_read,
.write = aspeed_intc_write,
.endianness = DEVICE_LITTLE_ENDIAN,
+ .impl.min_access_size = 4,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ }
+};
+
+static const MemoryRegionOps aspeed_intcio_ops = {
+ .read = aspeed_intcio_read,
+ .write = aspeed_intcio_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl.min_access_size = 4,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ }
+};
+
+static const MemoryRegionOps aspeed_ssp_intc_ops = {
+ .read = aspeed_intc_read,
+ .write = aspeed_ssp_intc_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl.min_access_size = 4,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ }
+};
+
+static const MemoryRegionOps aspeed_ssp_intcio_ops = {
+ .read = aspeed_intcio_read,
+ .write = aspeed_ssp_intcio_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl.min_access_size = 4,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ }
+};
+
+static const MemoryRegionOps aspeed_tsp_intc_ops = {
+ .read = aspeed_intc_read,
+ .write = aspeed_tsp_intc_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl.min_access_size = 4,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ }
+};
+
+static const MemoryRegionOps aspeed_tsp_intcio_ops = {
+ .read = aspeed_intcio_read,
+ .write = aspeed_tsp_intcio_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl.min_access_size = 4,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -276,8 +805,8 @@ static void aspeed_intc_instance_init(Object *obj)
AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s);
int i;
- assert(aic->num_ints <= ASPEED_INTC_NR_INTS);
- for (i = 0; i < aic->num_ints; i++) {
+ assert(aic->num_inpins <= ASPEED_INTC_MAX_INPINS);
+ for (i = 0; i < aic->num_inpins; i++) {
object_initialize_child(obj, "intc-orgates[*]", &s->orgates[i],
TYPE_OR_IRQ);
object_property_set_int(OBJECT(&s->orgates[i]), "num-lines",
@@ -288,8 +817,9 @@ static void aspeed_intc_instance_init(Object *obj)
static void aspeed_intc_reset(DeviceState *dev)
{
AspeedINTCState *s = ASPEED_INTC(dev);
+ AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s);
- memset(s->regs, 0, sizeof(s->regs));
+ memset(s->regs, 0, aic->nr_regs << 2);
memset(s->enable, 0, sizeof(s->enable));
memset(s->mask, 0, sizeof(s->mask));
memset(s->pending, 0, sizeof(s->pending));
@@ -302,28 +832,51 @@ static void aspeed_intc_realize(DeviceState *dev, Error **errp)
AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s);
int i;
- memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_intc_ops, s,
- TYPE_ASPEED_INTC ".regs", ASPEED_INTC_NR_REGS << 2);
+ memory_region_init(&s->iomem_container, OBJECT(s),
+ TYPE_ASPEED_INTC ".container", aic->mem_size);
+
+ sysbus_init_mmio(sbd, &s->iomem_container);
+
+ s->regs = g_new(uint32_t, aic->nr_regs);
+ memory_region_init_io(&s->iomem, OBJECT(s), aic->reg_ops, s,
+ TYPE_ASPEED_INTC ".regs", aic->nr_regs << 2);
- sysbus_init_mmio(sbd, &s->iomem);
- qdev_init_gpio_in(dev, aspeed_intc_set_irq, aic->num_ints);
+ memory_region_add_subregion(&s->iomem_container, aic->reg_offset,
+ &s->iomem);
- for (i = 0; i < aic->num_ints; i++) {
+ qdev_init_gpio_in(dev, aspeed_intc_set_irq, aic->num_inpins);
+
+ for (i = 0; i < aic->num_inpins; i++) {
if (!qdev_realize(DEVICE(&s->orgates[i]), NULL, errp)) {
return;
}
+ }
+
+ for (i = 0; i < aic->num_outpins; i++) {
sysbus_init_irq(sbd, &s->output_pins[i]);
}
}
-static void aspeed_intc_class_init(ObjectClass *klass, void *data)
+static void aspeed_intc_unrealize(DeviceState *dev)
+{
+ AspeedINTCState *s = ASPEED_INTC(dev);
+
+ g_free(s->regs);
+ s->regs = NULL;
+}
+
+static void aspeed_intc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass);
dc->desc = "ASPEED INTC Controller";
dc->realize = aspeed_intc_realize;
- dc->reset = aspeed_intc_reset;
+ dc->unrealize = aspeed_intc_unrealize;
+ device_class_set_legacy_reset(dc, aspeed_intc_reset);
dc->vmsd = NULL;
+
+ aic->reg_ops = &aspeed_intc_ops;
}
static const TypeInfo aspeed_intc_info = {
@@ -336,14 +889,33 @@ static const TypeInfo aspeed_intc_info = {
.abstract = true,
};
-static void aspeed_2700_intc_class_init(ObjectClass *klass, void *data)
+static AspeedINTCIRQ aspeed_2700_intc_irqs[ASPEED_INTC_MAX_INPINS] = {
+ {0, 0, 10, R_GICINT192_201_EN, R_GICINT192_201_STATUS},
+ {1, 10, 1, R_GICINT128_EN, R_GICINT128_STATUS},
+ {2, 11, 1, R_GICINT129_EN, R_GICINT129_STATUS},
+ {3, 12, 1, R_GICINT130_EN, R_GICINT130_STATUS},
+ {4, 13, 1, R_GICINT131_EN, R_GICINT131_STATUS},
+ {5, 14, 1, R_GICINT132_EN, R_GICINT132_STATUS},
+ {6, 15, 1, R_GICINT133_EN, R_GICINT133_STATUS},
+ {7, 16, 1, R_GICINT134_EN, R_GICINT134_STATUS},
+ {8, 17, 1, R_GICINT135_EN, R_GICINT135_STATUS},
+ {9, 18, 1, R_GICINT136_EN, R_GICINT136_STATUS},
+};
+
+static void aspeed_2700_intc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass);
dc->desc = "ASPEED 2700 INTC Controller";
aic->num_lines = 32;
- aic->num_ints = 9;
+ aic->num_inpins = 10;
+ aic->num_outpins = 19;
+ aic->mem_size = 0x4000;
+ aic->nr_regs = 0xB08 >> 2;
+ aic->reg_offset = 0x1000;
+ aic->irq_table = aspeed_2700_intc_irqs;
+ aic->irq_table_count = ARRAY_SIZE(aspeed_2700_intc_irqs);
}
static const TypeInfo aspeed_2700_intc_info = {
@@ -352,10 +924,185 @@ static const TypeInfo aspeed_2700_intc_info = {
.class_init = aspeed_2700_intc_class_init,
};
+static AspeedINTCIRQ aspeed_2700_intcio_irqs[ASPEED_INTC_MAX_INPINS] = {
+ {0, 0, 1, R_GICINT192_EN, R_GICINT192_STATUS},
+ {1, 1, 1, R_GICINT193_EN, R_GICINT193_STATUS},
+ {2, 2, 1, R_GICINT194_EN, R_GICINT194_STATUS},
+ {3, 3, 1, R_GICINT195_EN, R_GICINT195_STATUS},
+ {4, 4, 1, R_GICINT196_EN, R_GICINT196_STATUS},
+ {5, 5, 1, R_GICINT197_EN, R_GICINT197_STATUS},
+};
+
+static void aspeed_2700_intcio_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass);
+
+ dc->desc = "ASPEED 2700 INTC IO Controller";
+ aic->num_lines = 32;
+ aic->num_inpins = 6;
+ aic->num_outpins = 6;
+ aic->mem_size = 0x400;
+ aic->nr_regs = 0x58 >> 2;
+ aic->reg_offset = 0x100;
+ aic->reg_ops = &aspeed_intcio_ops;
+ aic->irq_table = aspeed_2700_intcio_irqs;
+ aic->irq_table_count = ARRAY_SIZE(aspeed_2700_intcio_irqs);
+}
+
+static const TypeInfo aspeed_2700_intcio_info = {
+ .name = TYPE_ASPEED_2700_INTCIO,
+ .parent = TYPE_ASPEED_INTC,
+ .class_init = aspeed_2700_intcio_class_init,
+};
+
+static AspeedINTCIRQ aspeed_2700ssp_intc_irqs[ASPEED_INTC_MAX_INPINS] = {
+ {0, 0, 10, R_SSPINT160_169_EN, R_SSPINT160_169_STATUS},
+ {1, 10, 1, R_SSPINT128_EN, R_SSPINT128_STATUS},
+ {2, 11, 1, R_SSPINT129_EN, R_SSPINT129_STATUS},
+ {3, 12, 1, R_SSPINT130_EN, R_SSPINT130_STATUS},
+ {4, 13, 1, R_SSPINT131_EN, R_SSPINT131_STATUS},
+ {5, 14, 1, R_SSPINT132_EN, R_SSPINT132_STATUS},
+ {6, 15, 1, R_SSPINT133_EN, R_SSPINT133_STATUS},
+ {7, 16, 1, R_SSPINT134_EN, R_SSPINT134_STATUS},
+ {8, 17, 1, R_SSPINT135_EN, R_SSPINT135_STATUS},
+ {9, 18, 1, R_SSPINT136_EN, R_SSPINT136_STATUS},
+};
+
+static void aspeed_2700ssp_intc_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass);
+
+ dc->desc = "ASPEED 2700 SSP INTC Controller";
+ aic->num_lines = 32;
+ aic->num_inpins = 10;
+ aic->num_outpins = 19;
+ aic->mem_size = 0x4000;
+ aic->nr_regs = 0x2B08 >> 2;
+ aic->reg_offset = 0x0;
+ aic->reg_ops = &aspeed_ssp_intc_ops;
+ aic->irq_table = aspeed_2700ssp_intc_irqs;
+ aic->irq_table_count = ARRAY_SIZE(aspeed_2700ssp_intc_irqs);
+}
+
+static const TypeInfo aspeed_2700ssp_intc_info = {
+ .name = TYPE_ASPEED_2700SSP_INTC,
+ .parent = TYPE_ASPEED_INTC,
+ .class_init = aspeed_2700ssp_intc_class_init,
+};
+
+static AspeedINTCIRQ aspeed_2700ssp_intcio_irqs[ASPEED_INTC_MAX_INPINS] = {
+ {0, 0, 1, R_SSPINT160_EN, R_SSPINT160_STATUS},
+ {1, 1, 1, R_SSPINT161_EN, R_SSPINT161_STATUS},
+ {2, 2, 1, R_SSPINT162_EN, R_SSPINT162_STATUS},
+ {3, 3, 1, R_SSPINT163_EN, R_SSPINT163_STATUS},
+ {4, 4, 1, R_SSPINT164_EN, R_SSPINT164_STATUS},
+ {5, 5, 1, R_SSPINT165_EN, R_SSPINT165_STATUS},
+};
+
+static void aspeed_2700ssp_intcio_class_init(ObjectClass *klass,
+ const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass);
+
+ dc->desc = "ASPEED 2700 SSP INTC IO Controller";
+ aic->num_lines = 32;
+ aic->num_inpins = 6;
+ aic->num_outpins = 6;
+ aic->mem_size = 0x400;
+ aic->nr_regs = 0x1d8 >> 2;
+ aic->reg_offset = 0;
+ aic->reg_ops = &aspeed_ssp_intcio_ops;
+ aic->irq_table = aspeed_2700ssp_intcio_irqs;
+ aic->irq_table_count = ARRAY_SIZE(aspeed_2700ssp_intcio_irqs);
+}
+
+static const TypeInfo aspeed_2700ssp_intcio_info = {
+ .name = TYPE_ASPEED_2700SSP_INTCIO,
+ .parent = TYPE_ASPEED_INTC,
+ .class_init = aspeed_2700ssp_intcio_class_init,
+};
+
+static AspeedINTCIRQ aspeed_2700tsp_intc_irqs[ASPEED_INTC_MAX_INPINS] = {
+ {0, 0, 10, R_TSPINT160_169_EN, R_TSPINT160_169_STATUS},
+ {1, 10, 1, R_TSPINT128_EN, R_TSPINT128_STATUS},
+ {2, 11, 1, R_TSPINT129_EN, R_TSPINT129_STATUS},
+ {3, 12, 1, R_TSPINT130_EN, R_TSPINT130_STATUS},
+ {4, 13, 1, R_TSPINT131_EN, R_TSPINT131_STATUS},
+ {5, 14, 1, R_TSPINT132_EN, R_TSPINT132_STATUS},
+ {6, 15, 1, R_TSPINT133_EN, R_TSPINT133_STATUS},
+ {7, 16, 1, R_TSPINT134_EN, R_TSPINT134_STATUS},
+ {8, 17, 1, R_TSPINT135_EN, R_TSPINT135_STATUS},
+ {9, 18, 1, R_TSPINT136_EN, R_TSPINT136_STATUS},
+};
+
+static void aspeed_2700tsp_intc_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass);
+
+ dc->desc = "ASPEED 2700 TSP INTC Controller";
+ aic->num_lines = 32;
+ aic->num_inpins = 10;
+ aic->num_outpins = 19;
+ aic->mem_size = 0x4000;
+ aic->nr_regs = 0x3B08 >> 2;
+ aic->reg_offset = 0;
+ aic->reg_ops = &aspeed_tsp_intc_ops;
+ aic->irq_table = aspeed_2700tsp_intc_irqs;
+ aic->irq_table_count = ARRAY_SIZE(aspeed_2700tsp_intc_irqs);
+}
+
+static const TypeInfo aspeed_2700tsp_intc_info = {
+ .name = TYPE_ASPEED_2700TSP_INTC,
+ .parent = TYPE_ASPEED_INTC,
+ .class_init = aspeed_2700tsp_intc_class_init,
+};
+
+static AspeedINTCIRQ aspeed_2700tsp_intcio_irqs[ASPEED_INTC_MAX_INPINS] = {
+ {0, 0, 1, R_TSPINT160_EN, R_TSPINT160_STATUS},
+ {1, 1, 1, R_TSPINT161_EN, R_TSPINT161_STATUS},
+ {2, 2, 1, R_TSPINT162_EN, R_TSPINT162_STATUS},
+ {3, 3, 1, R_TSPINT163_EN, R_TSPINT163_STATUS},
+ {4, 4, 1, R_TSPINT164_EN, R_TSPINT164_STATUS},
+ {5, 5, 1, R_TSPINT165_EN, R_TSPINT165_STATUS},
+};
+
+static void aspeed_2700tsp_intcio_class_init(ObjectClass *klass,
+ const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass);
+
+ dc->desc = "ASPEED 2700 TSP INTC IO Controller";
+ aic->num_lines = 32;
+ aic->num_inpins = 6;
+ aic->num_outpins = 6;
+ aic->mem_size = 0x400;
+ aic->nr_regs = 0x258 >> 2;
+ aic->reg_offset = 0x0;
+ aic->reg_ops = &aspeed_tsp_intcio_ops;
+ aic->irq_table = aspeed_2700tsp_intcio_irqs;
+ aic->irq_table_count = ARRAY_SIZE(aspeed_2700tsp_intcio_irqs);
+}
+
+static const TypeInfo aspeed_2700tsp_intcio_info = {
+ .name = TYPE_ASPEED_2700TSP_INTCIO,
+ .parent = TYPE_ASPEED_INTC,
+ .class_init = aspeed_2700tsp_intcio_class_init,
+};
+
static void aspeed_intc_register_types(void)
{
type_register_static(&aspeed_intc_info);
type_register_static(&aspeed_2700_intc_info);
+ type_register_static(&aspeed_2700_intcio_info);
+ type_register_static(&aspeed_2700ssp_intc_info);
+ type_register_static(&aspeed_2700ssp_intcio_info);
+ type_register_static(&aspeed_2700tsp_intc_info);
+ type_register_static(&aspeed_2700tsp_intcio_info);
}
type_init(aspeed_intc_register_types);
diff --git a/hw/intc/aspeed_vic.c b/hw/intc/aspeed_vic.c
index ba1d953..7120088 100644
--- a/hw/intc/aspeed_vic.c
+++ b/hw/intc/aspeed_vic.c
@@ -339,11 +339,11 @@ static const VMStateDescription vmstate_aspeed_vic = {
}
};
-static void aspeed_vic_class_init(ObjectClass *klass, void *data)
+static void aspeed_vic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aspeed_vic_realize;
- dc->reset = aspeed_vic_reset;
+ device_class_set_legacy_reset(dc, aspeed_vic_reset);
dc->desc = "ASPEED Interrupt Controller (New)";
dc->vmsd = &vmstate_aspeed_vic;
}
diff --git a/hw/intc/bcm2835_ic.c b/hw/intc/bcm2835_ic.c
index 2c2e2b1..55e0a5a 100644
--- a/hw/intc/bcm2835_ic.c
+++ b/hw/intc/bcm2835_ic.c
@@ -219,11 +219,11 @@ static const VMStateDescription vmstate_bcm2835_ic = {
}
};
-static void bcm2835_ic_class_init(ObjectClass *klass, void *data)
+static void bcm2835_ic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = bcm2835_ic_reset;
+ device_class_set_legacy_reset(dc, bcm2835_ic_reset);
dc->vmsd = &vmstate_bcm2835_ic;
}
diff --git a/hw/intc/bcm2836_control.c b/hw/intc/bcm2836_control.c
index 81faf03..1c02853 100644
--- a/hw/intc/bcm2836_control.c
+++ b/hw/intc/bcm2836_control.c
@@ -384,11 +384,11 @@ static const VMStateDescription vmstate_bcm2836_control = {
}
};
-static void bcm2836_control_class_init(ObjectClass *klass, void *data)
+static void bcm2836_control_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = bcm2836_control_reset;
+ device_class_set_legacy_reset(dc, bcm2836_control_reset);
dc->vmsd = &vmstate_bcm2836_control;
}
diff --git a/hw/intc/etraxfs_pic.c b/hw/intc/etraxfs_pic.c
deleted file mode 100644
index bd37d1c..0000000
--- a/hw/intc/etraxfs_pic.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * QEMU ETRAX Interrupt Controller.
- *
- * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "hw/sysbus.h"
-#include "qemu/module.h"
-#include "hw/irq.h"
-#include "hw/qdev-properties.h"
-#include "qom/object.h"
-
-#define D(x)
-
-#define R_RW_MASK 0
-#define R_R_VECT 1
-#define R_R_MASKED_VECT 2
-#define R_R_NMI 3
-#define R_R_GURU 4
-#define R_MAX 5
-
-#define TYPE_ETRAX_FS_PIC "etraxfs-pic"
-DECLARE_INSTANCE_CHECKER(struct etrax_pic, ETRAX_FS_PIC,
- TYPE_ETRAX_FS_PIC)
-
-struct etrax_pic
-{
- SysBusDevice parent_obj;
-
- MemoryRegion mmio;
- qemu_irq parent_irq;
- qemu_irq parent_nmi;
- uint32_t regs[R_MAX];
-};
-
-static void pic_update(struct etrax_pic *fs)
-{
- uint32_t vector = 0;
- int i;
-
- fs->regs[R_R_MASKED_VECT] = fs->regs[R_R_VECT] & fs->regs[R_RW_MASK];
-
- /* The ETRAX interrupt controller signals interrupts to the core
- through an interrupt request wire and an irq vector bus. If
- multiple interrupts are simultaneously active it chooses vector
- 0x30 and lets the sw choose the priorities. */
- if (fs->regs[R_R_MASKED_VECT]) {
- uint32_t mv = fs->regs[R_R_MASKED_VECT];
- for (i = 0; i < 31; i++) {
- if (mv & 1) {
- vector = 0x31 + i;
- /* Check for multiple interrupts. */
- if (mv > 1)
- vector = 0x30;
- break;
- }
- mv >>= 1;
- }
- }
-
- qemu_set_irq(fs->parent_irq, vector);
-}
-
-static uint64_t
-pic_read(void *opaque, hwaddr addr, unsigned int size)
-{
- struct etrax_pic *fs = opaque;
- uint32_t rval;
-
- rval = fs->regs[addr >> 2];
- D(printf("%s %x=%x\n", __func__, addr, rval));
- return rval;
-}
-
-static void pic_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned int size)
-{
- struct etrax_pic *fs = opaque;
- D(printf("%s addr=%x val=%x\n", __func__, addr, value));
-
- if (addr == R_RW_MASK) {
- fs->regs[R_RW_MASK] = value;
- pic_update(fs);
- }
-}
-
-static const MemoryRegionOps pic_ops = {
- .read = pic_read,
- .write = pic_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid = {
- .min_access_size = 4,
- .max_access_size = 4
- }
-};
-
-static void nmi_handler(void *opaque, int irq, int level)
-{
- struct etrax_pic *fs = (void *)opaque;
- uint32_t mask;
-
- mask = 1 << irq;
- if (level)
- fs->regs[R_R_NMI] |= mask;
- else
- fs->regs[R_R_NMI] &= ~mask;
-
- qemu_set_irq(fs->parent_nmi, !!fs->regs[R_R_NMI]);
-}
-
-static void irq_handler(void *opaque, int irq, int level)
-{
- struct etrax_pic *fs = (void *)opaque;
-
- if (irq >= 30) {
- nmi_handler(opaque, irq, level);
- return;
- }
-
- irq -= 1;
- fs->regs[R_R_VECT] &= ~(1 << irq);
- fs->regs[R_R_VECT] |= (!!level << irq);
- pic_update(fs);
-}
-
-static void etraxfs_pic_init(Object *obj)
-{
- DeviceState *dev = DEVICE(obj);
- struct etrax_pic *s = ETRAX_FS_PIC(obj);
- SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
-
- qdev_init_gpio_in(dev, irq_handler, 32);
- sysbus_init_irq(sbd, &s->parent_irq);
- sysbus_init_irq(sbd, &s->parent_nmi);
-
- memory_region_init_io(&s->mmio, obj, &pic_ops, s,
- "etraxfs-pic", R_MAX * 4);
- sysbus_init_mmio(sbd, &s->mmio);
-}
-
-static const TypeInfo etraxfs_pic_info = {
- .name = TYPE_ETRAX_FS_PIC,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(struct etrax_pic),
- .instance_init = etraxfs_pic_init,
-};
-
-static void etraxfs_pic_register_types(void)
-{
- type_register_static(&etraxfs_pic_info);
-}
-
-type_init(etraxfs_pic_register_types)
diff --git a/hw/intc/exynos4210_combiner.c b/hw/intc/exynos4210_combiner.c
index f0d310a..ebbe234 100644
--- a/hw/intc/exynos4210_combiner.c
+++ b/hw/intc/exynos4210_combiner.c
@@ -325,16 +325,15 @@ static void exynos4210_combiner_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
}
-static Property exynos4210_combiner_properties[] = {
+static const Property exynos4210_combiner_properties[] = {
DEFINE_PROP_UINT32("external", Exynos4210CombinerState, external, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void exynos4210_combiner_class_init(ObjectClass *klass, void *data)
+static void exynos4210_combiner_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = exynos4210_combiner_reset;
+ device_class_set_legacy_reset(dc, exynos4210_combiner_reset);
device_class_set_props(dc, exynos4210_combiner_properties);
dc->vmsd = &vmstate_exynos4210_combiner;
}
diff --git a/hw/intc/exynos4210_gic.c b/hw/intc/exynos4210_gic.c
index fcca85c..7e2d79d 100644
--- a/hw/intc/exynos4210_gic.c
+++ b/hw/intc/exynos4210_gic.c
@@ -111,12 +111,11 @@ static void exynos4210_gic_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->dist_container);
}
-static Property exynos4210_gic_properties[] = {
+static const Property exynos4210_gic_properties[] = {
DEFINE_PROP_UINT32("num-cpu", Exynos4210GicState, num_cpu, 1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void exynos4210_gic_class_init(ObjectClass *klass, void *data)
+static void exynos4210_gic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/intc/goldfish_pic.c b/hw/intc/goldfish_pic.c
index 6cc1c69..2359861 100644
--- a/hw/intc/goldfish_pic.c
+++ b/hw/intc/goldfish_pic.c
@@ -181,17 +181,16 @@ static void goldfish_pic_instance_init(Object *obj)
qdev_init_gpio_in(DEVICE(obj), goldfish_irq_request, GOLDFISH_PIC_IRQ_NB);
}
-static Property goldfish_pic_properties[] = {
+static const Property goldfish_pic_properties[] = {
DEFINE_PROP_UINT8("index", GoldfishPICState, idx, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void goldfish_pic_class_init(ObjectClass *oc, void *data)
+static void goldfish_pic_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(oc);
- dc->reset = goldfish_pic_reset;
+ device_class_set_legacy_reset(dc, goldfish_pic_reset);
dc->realize = goldfish_pic_realize;
dc->vmsd = &vmstate_goldfish_pic;
ic->get_statistics = goldfish_pic_get_statistics;
@@ -205,7 +204,7 @@ static const TypeInfo goldfish_pic_info = {
.class_init = goldfish_pic_class_init,
.instance_init = goldfish_pic_instance_init,
.instance_size = sizeof(GoldfishPICState),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_INTERRUPT_STATS_PROVIDER },
{ }
},
diff --git a/hw/intc/grlib_irqmp.c b/hw/intc/grlib_irqmp.c
index c6c51a3..e0f2646 100644
--- a/hw/intc/grlib_irqmp.c
+++ b/hw/intc/grlib_irqmp.c
@@ -376,17 +376,16 @@ static void grlib_irqmp_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &irqmp->iomem);
}
-static Property grlib_irqmp_properties[] = {
+static const Property grlib_irqmp_properties[] = {
DEFINE_PROP_UINT32("ncpus", IRQMP, ncpus, 1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void grlib_irqmp_class_init(ObjectClass *klass, void *data)
+static void grlib_irqmp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = grlib_irqmp_realize;
- dc->reset = grlib_irqmp_reset;
+ device_class_set_legacy_reset(dc, grlib_irqmp_reset);
device_class_set_props(dc, grlib_irqmp_properties);
}
diff --git a/hw/intc/heathrow_pic.c b/hw/intc/heathrow_pic.c
index c2946ba..447e8c2 100644
--- a/hw/intc/heathrow_pic.c
+++ b/hw/intc/heathrow_pic.c
@@ -184,11 +184,11 @@ static void heathrow_init(Object *obj)
sysbus_init_mmio(sbd, &s->mem);
}
-static void heathrow_class_init(ObjectClass *oc, void *data)
+static void heathrow_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
- dc->reset = heathrow_reset;
+ device_class_set_legacy_reset(dc, heathrow_reset);
dc->vmsd = &vmstate_heathrow;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
}
diff --git a/hw/intc/i8259.c b/hw/intc/i8259.c
index bbae2d8..b6f96bf 100644
--- a/hw/intc/i8259.c
+++ b/hw/intc/i8259.c
@@ -32,10 +32,7 @@
#include "trace.h"
#include "qom/object.h"
-/* debug PIC */
-//#define DEBUG_PIC
-
-//#define DEBUG_IRQ_LATENCY
+/*#define DEBUG_IRQ_LATENCY*/
#define TYPE_I8259 "isa-i8259"
typedef struct PICClass PICClass;
@@ -436,13 +433,13 @@ qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq_in)
return irq_set;
}
-static void i8259_class_init(ObjectClass *klass, void *data)
+static void i8259_class_init(ObjectClass *klass, const void *data)
{
PICClass *k = PIC_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_parent_realize(dc, pic_realize, &k->parent_realize);
- dc->reset = pic_reset;
+ device_class_set_legacy_reset(dc, pic_reset);
}
static const TypeInfo i8259_info = {
diff --git a/hw/intc/i8259_common.c b/hw/intc/i8259_common.c
index d9558e3..602e44c 100644
--- a/hw/intc/i8259_common.c
+++ b/hw/intc/i8259_common.c
@@ -193,15 +193,14 @@ static const VMStateDescription vmstate_pic_common = {
}
};
-static Property pic_properties_common[] = {
+static const Property pic_properties_common[] = {
DEFINE_PROP_UINT32("iobase", PICCommonState, iobase, -1),
DEFINE_PROP_UINT32("elcr_addr", PICCommonState, elcr_addr, -1),
DEFINE_PROP_UINT8("elcr_mask", PICCommonState, elcr_mask, -1),
DEFINE_PROP_BIT("master", PICCommonState, master, 0, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pic_common_class_init(ObjectClass *klass, void *data)
+static void pic_common_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass);
@@ -227,7 +226,7 @@ static const TypeInfo pic_common_type = {
.class_size = sizeof(PICCommonClass),
.class_init = pic_common_class_init,
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_INTERRUPT_STATS_PROVIDER },
{ }
},
diff --git a/hw/intc/imx_avic.c b/hw/intc/imx_avic.c
index aedc708..09c3bfa 100644
--- a/hw/intc/imx_avic.c
+++ b/hw/intc/imx_avic.c
@@ -341,12 +341,12 @@ static void imx_avic_init(Object *obj)
}
-static void imx_avic_class_init(ObjectClass *klass, void *data)
+static void imx_avic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_imx_avic;
- dc->reset = imx_avic_reset;
+ device_class_set_legacy_reset(dc, imx_avic_reset);
dc->desc = "i.MX Advanced Vector Interrupt Controller";
}
diff --git a/hw/intc/imx_gpcv2.c b/hw/intc/imx_gpcv2.c
index af45e51..58d286c 100644
--- a/hw/intc/imx_gpcv2.c
+++ b/hw/intc/imx_gpcv2.c
@@ -102,11 +102,11 @@ static const VMStateDescription vmstate_imx_gpcv2 = {
},
};
-static void imx_gpcv2_class_init(ObjectClass *klass, void *data)
+static void imx_gpcv2_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = imx_gpcv2_reset;
+ device_class_set_legacy_reset(dc, imx_gpcv2_reset);
dc->vmsd = &vmstate_imx_gpcv2;
dc->desc = "i.MX GPCv2 Module";
}
diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c
index 716ffc8..133bef8 100644
--- a/hw/intc/ioapic.c
+++ b/hw/intc/ioapic.c
@@ -30,8 +30,8 @@
#include "hw/intc/ioapic_internal.h"
#include "hw/pci/msi.h"
#include "hw/qdev-properties.h"
-#include "sysemu/kvm.h"
-#include "sysemu/sysemu.h"
+#include "system/kvm.h"
+#include "system/system.h"
#include "hw/i386/apic-msidef.h"
#include "hw/i386/x86-iommu.h"
#include "trace.h"
@@ -476,12 +476,11 @@ static void ioapic_unrealize(DeviceState *dev)
timer_free(s->delayed_ioapic_service_timer);
}
-static Property ioapic_properties[] = {
+static const Property ioapic_properties[] = {
DEFINE_PROP_UINT8("version", IOAPICCommonState, version, IOAPIC_VER_DEF),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ioapic_class_init(ObjectClass *klass, void *data)
+static void ioapic_class_init(ObjectClass *klass, const void *data)
{
IOAPICCommonClass *k = IOAPIC_COMMON_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -493,7 +492,7 @@ static void ioapic_class_init(ObjectClass *klass, void *data)
* migration, otherwise first 24 gsi routes will be invalid.
*/
k->post_load = ioapic_update_kvm_routes;
- dc->reset = ioapic_reset_common;
+ device_class_set_legacy_reset(dc, ioapic_reset_common);
device_class_set_props(dc, ioapic_properties);
}
diff --git a/hw/intc/ioapic_common.c b/hw/intc/ioapic_common.c
index 7698963..fce3486 100644
--- a/hw/intc/ioapic_common.c
+++ b/hw/intc/ioapic_common.c
@@ -197,7 +197,7 @@ static const VMStateDescription vmstate_ioapic_common = {
}
};
-static void ioapic_common_class_init(ObjectClass *klass, void *data)
+static void ioapic_common_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass);
@@ -215,7 +215,7 @@ static const TypeInfo ioapic_common_type = {
.class_size = sizeof(IOAPICCommonClass),
.class_init = ioapic_common_class_init,
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_INTERRUPT_STATS_PROVIDER },
{ }
},
diff --git a/hw/intc/ioapic_internal.h b/hw/intc/ioapic_internal.h
index 37b8565..5120576 100644
--- a/hw/intc/ioapic_internal.h
+++ b/hw/intc/ioapic_internal.h
@@ -22,7 +22,7 @@
#ifndef HW_INTC_IOAPIC_INTERNAL_H
#define HW_INTC_IOAPIC_INTERNAL_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/intc/ioapic.h"
#include "hw/sysbus.h"
#include "qemu/notify.h"
diff --git a/hw/intc/loongarch_extioi.c b/hw/intc/loongarch_extioi.c
index 1e8e011..8b8ac6b 100644
--- a/hw/intc/loongarch_extioi.c
+++ b/hw/intc/loongarch_extioi.c
@@ -10,16 +10,31 @@
#include "qemu/log.h"
#include "qapi/error.h"
#include "hw/irq.h"
-#include "hw/sysbus.h"
#include "hw/loongarch/virt.h"
-#include "hw/qdev-properties.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
+#include "system/kvm.h"
#include "hw/intc/loongarch_extioi.h"
-#include "migration/vmstate.h"
#include "trace.h"
+static int extioi_get_index_from_archid(LoongArchExtIOICommonState *s,
+ uint64_t arch_id)
+{
+ int i;
+
+ for (i = 0; i < s->num_cpu; i++) {
+ if (s->cpu[i].arch_id == arch_id) {
+ break;
+ }
+ }
-static void extioi_update_irq(LoongArchExtIOI *s, int irq, int level)
+ if ((i < s->num_cpu) && s->cpu[i].cpu) {
+ return i;
+ }
+
+ return -1;
+}
+
+static void extioi_update_irq(LoongArchExtIOICommonState *s, int irq, int level)
{
int ipnum, cpu, found, irq_index, irq_mask;
@@ -54,17 +69,12 @@ static void extioi_update_irq(LoongArchExtIOI *s, int irq, int level)
static void extioi_setirq(void *opaque, int irq, int level)
{
- LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque);
+ LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque);
trace_loongarch_extioi_setirq(irq, level);
if (level) {
- /*
- * s->isr should be used in vmstate structure,
- * but it not support 'unsigned long',
- * so we have to switch it.
- */
- set_bit(irq, (unsigned long *)s->isr);
+ set_bit32(irq, s->isr);
} else {
- clear_bit(irq, (unsigned long *)s->isr);
+ clear_bit32(irq, s->isr);
}
extioi_update_irq(s, irq, level);
}
@@ -72,7 +82,7 @@ static void extioi_setirq(void *opaque, int irq, int level)
static MemTxResult extioi_readw(void *opaque, hwaddr addr, uint64_t *data,
unsigned size, MemTxAttrs attrs)
{
- LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque);
+ LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque);
unsigned long offset = addr & 0xffff;
uint32_t index, cpu;
@@ -111,7 +121,7 @@ static MemTxResult extioi_readw(void *opaque, hwaddr addr, uint64_t *data,
return MEMTX_OK;
}
-static inline void extioi_enable_irq(LoongArchExtIOI *s, int index,\
+static inline void extioi_enable_irq(LoongArchExtIOICommonState *s, int index,\
uint32_t mask, int level)
{
uint32_t val;
@@ -130,10 +140,10 @@ static inline void extioi_enable_irq(LoongArchExtIOI *s, int index,\
}
}
-static inline void extioi_update_sw_coremap(LoongArchExtIOI *s, int irq,
- uint64_t val, bool notify)
+static inline void extioi_update_sw_coremap(LoongArchExtIOICommonState *s,
+ int irq, uint64_t val, bool notify)
{
- int i, cpu;
+ int i, cpu, cpuid;
/*
* loongarch only support little endian,
@@ -142,19 +152,24 @@ static inline void extioi_update_sw_coremap(LoongArchExtIOI *s, int irq,
val = cpu_to_le64(val);
for (i = 0; i < 4; i++) {
- cpu = val & 0xff;
+ cpuid = val & 0xff;
val = val >> 8;
if (!(s->status & BIT(EXTIOI_ENABLE_CPU_ENCODE))) {
- cpu = ctz32(cpu);
- cpu = (cpu >= 4) ? 0 : cpu;
+ cpuid = ctz32(cpuid);
+ cpuid = (cpuid >= 4) ? 0 : cpuid;
+ }
+
+ cpu = extioi_get_index_from_archid(s, cpuid);
+ if (cpu < 0) {
+ continue;
}
if (s->sw_coremap[irq + i] == cpu) {
continue;
}
- if (notify && test_bit(irq + i, (unsigned long *)s->isr)) {
+ if (notify && test_bit32(irq + i, s->isr)) {
/*
* lower irq at old cpu and raise irq at new cpu
*/
@@ -167,8 +182,8 @@ static inline void extioi_update_sw_coremap(LoongArchExtIOI *s, int irq,
}
}
-static inline void extioi_update_sw_ipmap(LoongArchExtIOI *s, int index,
- uint64_t val)
+static inline void extioi_update_sw_ipmap(LoongArchExtIOICommonState *s,
+ int index, uint64_t val)
{
int i;
uint8_t ipnum;
@@ -191,7 +206,7 @@ static MemTxResult extioi_writew(void *opaque, hwaddr addr,
uint64_t val, unsigned size,
MemTxAttrs attrs)
{
- LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque);
+ LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque);
int cpu, index, old_data, irq;
uint32_t offset;
@@ -271,7 +286,7 @@ static const MemoryRegionOps extioi_ops = {
static MemTxResult extioi_virt_readw(void *opaque, hwaddr addr, uint64_t *data,
unsigned size, MemTxAttrs attrs)
{
- LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque);
+ LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque);
switch (addr) {
case EXTIOI_VIRT_FEATURES:
@@ -291,7 +306,7 @@ static MemTxResult extioi_virt_writew(void *opaque, hwaddr addr,
uint64_t val, unsigned size,
MemTxAttrs attrs)
{
- LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque);
+ LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque);
switch (addr) {
case EXTIOI_VIRT_FEATURES:
@@ -325,65 +340,81 @@ static const MemoryRegionOps extioi_virt_ops = {
static void loongarch_extioi_realize(DeviceState *dev, Error **errp)
{
- LoongArchExtIOI *s = LOONGARCH_EXTIOI(dev);
+ LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(dev);
+ LoongArchExtIOIClass *lec = LOONGARCH_EXTIOI_GET_CLASS(dev);
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
- int i, pin;
+ Error *local_err = NULL;
+ int i;
- if (s->num_cpu == 0) {
- error_setg(errp, "num-cpu must be at least 1");
+ lec->parent_realize(dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
return;
}
- for (i = 0; i < EXTIOI_IRQS; i++) {
- sysbus_init_irq(sbd, &s->irq[i]);
- }
-
- qdev_init_gpio_in(dev, extioi_setirq, EXTIOI_IRQS);
- memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops,
- s, "extioi_system_mem", 0x900);
- sysbus_init_mmio(sbd, &s->extioi_system_mem);
-
if (s->features & BIT(EXTIOI_HAS_VIRT_EXTENSION)) {
- memory_region_init_io(&s->virt_extend, OBJECT(s), &extioi_virt_ops,
- s, "extioi_virt", EXTIOI_VIRT_SIZE);
- sysbus_init_mmio(sbd, &s->virt_extend);
s->features |= EXTIOI_VIRT_HAS_FEATURES;
} else {
s->status |= BIT(EXTIOI_ENABLE);
}
- s->cpu = g_new0(ExtIOICore, s->num_cpu);
- if (s->cpu == NULL) {
- error_setg(errp, "Memory allocation for ExtIOICore faile");
- return;
- }
+ if (kvm_irqchip_in_kernel()) {
+ kvm_extioi_realize(dev, errp);
+ } else {
+ for (i = 0; i < EXTIOI_IRQS; i++) {
+ sysbus_init_irq(sbd, &s->irq[i]);
+ }
- for (i = 0; i < s->num_cpu; i++) {
- for (pin = 0; pin < LS3A_INTC_IP; pin++) {
- qdev_init_gpio_out(dev, &s->cpu[i].parent_irq[pin], 1);
+ qdev_init_gpio_in(dev, extioi_setirq, EXTIOI_IRQS);
+ memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops,
+ s, "extioi_system_mem", 0x900);
+ sysbus_init_mmio(sbd, &s->extioi_system_mem);
+ if (s->features & BIT(EXTIOI_HAS_VIRT_EXTENSION)) {
+ memory_region_init_io(&s->virt_extend, OBJECT(s), &extioi_virt_ops,
+ s, "extioi_virt", EXTIOI_VIRT_SIZE);
+ sysbus_init_mmio(sbd, &s->virt_extend);
}
}
}
-static void loongarch_extioi_finalize(Object *obj)
+static void loongarch_extioi_unrealize(DeviceState *dev)
{
- LoongArchExtIOI *s = LOONGARCH_EXTIOI(obj);
+ LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(dev);
g_free(s->cpu);
}
-static void loongarch_extioi_reset(DeviceState *d)
+static void loongarch_extioi_reset_hold(Object *obj, ResetType type)
{
- LoongArchExtIOI *s = LOONGARCH_EXTIOI(d);
+ LoongArchExtIOIClass *lec = LOONGARCH_EXTIOI_GET_CLASS(obj);
- s->status = 0;
+ if (lec->parent_phases.hold) {
+ lec->parent_phases.hold(obj, type);
+ }
+
+ if (kvm_irqchip_in_kernel()) {
+ kvm_extioi_put(obj, 0);
+ }
+}
+
+static int vmstate_extioi_pre_save(void *opaque)
+{
+ if (kvm_irqchip_in_kernel()) {
+ return kvm_extioi_get(opaque);
+ }
+
+ return 0;
}
static int vmstate_extioi_post_load(void *opaque, int version_id)
{
- LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque);
+ LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque);
int i, start_irq;
+ if (kvm_irqchip_in_kernel()) {
+ return kvm_extioi_put(opaque, version_id);
+ }
+
for (i = 0; i < (EXTIOI_IRQS / 4); i++) {
start_irq = i * 4;
extioi_update_sw_coremap(s, start_irq, s->coremap[i], false);
@@ -396,66 +427,31 @@ static int vmstate_extioi_post_load(void *opaque, int version_id)
return 0;
}
-static const VMStateDescription vmstate_extioi_core = {
- .name = "extioi-core",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32_ARRAY(coreisr, ExtIOICore, EXTIOI_IRQS_GROUP_COUNT),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static const VMStateDescription vmstate_loongarch_extioi = {
- .name = TYPE_LOONGARCH_EXTIOI,
- .version_id = 3,
- .minimum_version_id = 3,
- .post_load = vmstate_extioi_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32_ARRAY(bounce, LoongArchExtIOI, EXTIOI_IRQS_GROUP_COUNT),
- VMSTATE_UINT32_ARRAY(nodetype, LoongArchExtIOI,
- EXTIOI_IRQS_NODETYPE_COUNT / 2),
- VMSTATE_UINT32_ARRAY(enable, LoongArchExtIOI, EXTIOI_IRQS / 32),
- VMSTATE_UINT32_ARRAY(isr, LoongArchExtIOI, EXTIOI_IRQS / 32),
- VMSTATE_UINT32_ARRAY(ipmap, LoongArchExtIOI, EXTIOI_IRQS_IPMAP_SIZE / 4),
- VMSTATE_UINT32_ARRAY(coremap, LoongArchExtIOI, EXTIOI_IRQS / 4),
-
- VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongArchExtIOI, num_cpu,
- vmstate_extioi_core, ExtIOICore),
- VMSTATE_UINT32(features, LoongArchExtIOI),
- VMSTATE_UINT32(status, LoongArchExtIOI),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static Property extioi_properties[] = {
- DEFINE_PROP_UINT32("num-cpu", LoongArchExtIOI, num_cpu, 1),
- DEFINE_PROP_BIT("has-virtualization-extension", LoongArchExtIOI, features,
- EXTIOI_HAS_VIRT_EXTENSION, 0),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void loongarch_extioi_class_init(ObjectClass *klass, void *data)
+static void loongarch_extioi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->realize = loongarch_extioi_realize;
- dc->reset = loongarch_extioi_reset;
- device_class_set_props(dc, extioi_properties);
- dc->vmsd = &vmstate_loongarch_extioi;
+ LoongArchExtIOIClass *lec = LOONGARCH_EXTIOI_CLASS(klass);
+ LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ device_class_set_parent_realize(dc, loongarch_extioi_realize,
+ &lec->parent_realize);
+ device_class_set_parent_unrealize(dc, loongarch_extioi_unrealize,
+ &lec->parent_unrealize);
+ resettable_class_set_parent_phases(rc, NULL, loongarch_extioi_reset_hold,
+ NULL, &lec->parent_phases);
+ lecc->pre_save = vmstate_extioi_pre_save;
+ lecc->post_load = vmstate_extioi_post_load;
}
-static const TypeInfo loongarch_extioi_info = {
- .name = TYPE_LOONGARCH_EXTIOI,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(struct LoongArchExtIOI),
- .class_init = loongarch_extioi_class_init,
- .instance_finalize = loongarch_extioi_finalize,
+static const TypeInfo loongarch_extioi_types[] = {
+ {
+ .name = TYPE_LOONGARCH_EXTIOI,
+ .parent = TYPE_LOONGARCH_EXTIOI_COMMON,
+ .instance_size = sizeof(LoongArchExtIOIState),
+ .class_size = sizeof(LoongArchExtIOIClass),
+ .class_init = loongarch_extioi_class_init,
+ }
};
-static void loongarch_extioi_register_types(void)
-{
- type_register_static(&loongarch_extioi_info);
-}
-
-type_init(loongarch_extioi_register_types)
+DEFINE_TYPES(loongarch_extioi_types)
diff --git a/hw/intc/loongarch_extioi_common.c b/hw/intc/loongarch_extioi_common.c
new file mode 100644
index 0000000..4a904b3
--- /dev/null
+++ b/hw/intc/loongarch_extioi_common.c
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Loongson extioi interrupt controller emulation
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+#include "hw/qdev-properties.h"
+#include "hw/intc/loongarch_extioi_common.h"
+#include "migration/vmstate.h"
+#include "target/loongarch/cpu.h"
+
+static ExtIOICore *loongarch_extioi_get_cpu(LoongArchExtIOICommonState *s,
+ DeviceState *dev)
+{
+ CPUClass *k = CPU_GET_CLASS(dev);
+ uint64_t arch_id = k->get_arch_id(CPU(dev));
+ int i;
+
+ for (i = 0; i < s->num_cpu; i++) {
+ if (s->cpu[i].arch_id == arch_id) {
+ return &s->cpu[i];
+ }
+ }
+
+ return NULL;
+}
+
+static void loongarch_extioi_cpu_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(hotplug_dev);
+ Object *obj = OBJECT(dev);
+ ExtIOICore *core;
+ int pin, index;
+
+ if (!object_dynamic_cast(obj, TYPE_LOONGARCH_CPU)) {
+ warn_report("LoongArch extioi: Invalid %s device type",
+ object_get_typename(obj));
+ return;
+ }
+
+ core = loongarch_extioi_get_cpu(s, dev);
+ if (!core) {
+ return;
+ }
+
+ core->cpu = CPU(dev);
+ index = core - s->cpu;
+
+ /*
+ * connect extioi irq to the cpu irq
+ * cpu_pin[LS3A_INTC_IP + 2 : 2] <= intc_pin[LS3A_INTC_IP : 0]
+ */
+ for (pin = 0; pin < LS3A_INTC_IP; pin++) {
+ qdev_connect_gpio_out(DEVICE(s), index * LS3A_INTC_IP + pin,
+ qdev_get_gpio_in(dev, pin + 2));
+ }
+}
+
+static void loongarch_extioi_cpu_unplug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(hotplug_dev);
+ Object *obj = OBJECT(dev);
+ ExtIOICore *core;
+
+ if (!object_dynamic_cast(obj, TYPE_LOONGARCH_CPU)) {
+ warn_report("LoongArch extioi: Invalid %s device type",
+ object_get_typename(obj));
+ return;
+ }
+
+ core = loongarch_extioi_get_cpu(s, dev);
+ if (!core) {
+ return;
+ }
+
+ core->cpu = NULL;
+}
+
+static void loongarch_extioi_common_realize(DeviceState *dev, Error **errp)
+{
+ LoongArchExtIOICommonState *s = (LoongArchExtIOICommonState *)dev;
+ MachineState *machine = MACHINE(qdev_get_machine());
+ MachineClass *mc = MACHINE_GET_CLASS(machine);
+ const CPUArchIdList *id_list;
+ int i, pin;
+
+ assert(mc->possible_cpu_arch_ids);
+ id_list = mc->possible_cpu_arch_ids(machine);
+ s->num_cpu = id_list->len;
+ s->cpu = g_new0(ExtIOICore, s->num_cpu);
+ if (s->cpu == NULL) {
+ error_setg(errp, "Memory allocation for ExtIOICore faile");
+ return;
+ }
+
+ for (i = 0; i < s->num_cpu; i++) {
+ s->cpu[i].arch_id = id_list->cpus[i].arch_id;
+ s->cpu[i].cpu = CPU(id_list->cpus[i].cpu);
+
+ for (pin = 0; pin < LS3A_INTC_IP; pin++) {
+ qdev_init_gpio_out(dev, &s->cpu[i].parent_irq[pin], 1);
+ }
+ }
+}
+
+static void loongarch_extioi_common_reset_hold(Object *obj, ResetType type)
+{
+ LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_GET_CLASS(obj);
+ LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(obj);
+ ExtIOICore *core;
+ int i;
+
+ if (lecc->parent_phases.hold) {
+ lecc->parent_phases.hold(obj, type);
+ }
+
+ /* Clear HW registers for the board */
+ memset(s->nodetype, 0, sizeof(s->nodetype));
+ memset(s->bounce, 0, sizeof(s->bounce));
+ memset(s->isr, 0, sizeof(s->isr));
+ memset(s->enable, 0, sizeof(s->enable));
+ memset(s->ipmap, 0, sizeof(s->ipmap));
+ memset(s->coremap, 0, sizeof(s->coremap));
+ memset(s->sw_pending, 0, sizeof(s->sw_pending));
+ memset(s->sw_ipmap, 0, sizeof(s->sw_ipmap));
+ memset(s->sw_coremap, 0, sizeof(s->sw_coremap));
+
+ for (i = 0; i < s->num_cpu; i++) {
+ core = s->cpu + i;
+ /* EXTIOI with targeted CPU available however not present */
+ if (!core->cpu) {
+ continue;
+ }
+
+ /* Clear HW registers for CPUs */
+ memset(core->coreisr, 0, sizeof(core->coreisr));
+ memset(core->sw_isr, 0, sizeof(core->sw_isr));
+ }
+
+ s->status = 0;
+}
+
+static int loongarch_extioi_common_pre_save(void *opaque)
+{
+ LoongArchExtIOICommonState *s = (LoongArchExtIOICommonState *)opaque;
+ LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_GET_CLASS(s);
+
+ if (lecc->pre_save) {
+ return lecc->pre_save(s);
+ }
+
+ return 0;
+}
+
+static int loongarch_extioi_common_post_load(void *opaque, int version_id)
+{
+ LoongArchExtIOICommonState *s = (LoongArchExtIOICommonState *)opaque;
+ LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_GET_CLASS(s);
+
+ if (lecc->post_load) {
+ return lecc->post_load(s, version_id);
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_extioi_core = {
+ .name = "extioi-core",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(coreisr, ExtIOICore, EXTIOI_IRQS_GROUP_COUNT),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_loongarch_extioi = {
+ .name = "loongarch.extioi",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .pre_save = loongarch_extioi_common_pre_save,
+ .post_load = loongarch_extioi_common_post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(bounce, LoongArchExtIOICommonState,
+ EXTIOI_IRQS_GROUP_COUNT),
+ VMSTATE_UINT32_ARRAY(nodetype, LoongArchExtIOICommonState,
+ EXTIOI_IRQS_NODETYPE_COUNT / 2),
+ VMSTATE_UINT32_ARRAY(enable, LoongArchExtIOICommonState,
+ EXTIOI_IRQS / 32),
+ VMSTATE_UINT32_ARRAY(isr, LoongArchExtIOICommonState,
+ EXTIOI_IRQS / 32),
+ VMSTATE_UINT32_ARRAY(ipmap, LoongArchExtIOICommonState,
+ EXTIOI_IRQS_IPMAP_SIZE / 4),
+ VMSTATE_UINT32_ARRAY(coremap, LoongArchExtIOICommonState,
+ EXTIOI_IRQS / 4),
+ VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongArchExtIOICommonState,
+ num_cpu, vmstate_extioi_core, ExtIOICore),
+ VMSTATE_UINT32(features, LoongArchExtIOICommonState),
+ VMSTATE_UINT32(status, LoongArchExtIOICommonState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const Property extioi_properties[] = {
+ DEFINE_PROP_BIT("has-virtualization-extension", LoongArchExtIOICommonState,
+ features, EXTIOI_HAS_VIRT_EXTENSION, 0),
+};
+
+static void loongarch_extioi_common_class_init(ObjectClass *klass,
+ const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_CLASS(klass);
+ HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ device_class_set_parent_realize(dc, loongarch_extioi_common_realize,
+ &lecc->parent_realize);
+ resettable_class_set_parent_phases(rc, NULL,
+ loongarch_extioi_common_reset_hold,
+ NULL, &lecc->parent_phases);
+ device_class_set_props(dc, extioi_properties);
+ dc->vmsd = &vmstate_loongarch_extioi;
+ hc->plug = loongarch_extioi_cpu_plug;
+ hc->unplug = loongarch_extioi_cpu_unplug;
+}
+
+static const TypeInfo loongarch_extioi_common_types[] = {
+ {
+ .name = TYPE_LOONGARCH_EXTIOI_COMMON,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(LoongArchExtIOICommonState),
+ .class_size = sizeof(LoongArchExtIOICommonClass),
+ .class_init = loongarch_extioi_common_class_init,
+ .interfaces = (const InterfaceInfo[]) {
+ { TYPE_HOTPLUG_HANDLER },
+ { }
+ },
+ .abstract = true,
+ }
+};
+
+DEFINE_TYPES(loongarch_extioi_common_types)
diff --git a/hw/intc/loongarch_extioi_kvm.c b/hw/intc/loongarch_extioi_kvm.c
new file mode 100644
index 0000000..0133540
--- /dev/null
+++ b/hw/intc/loongarch_extioi_kvm.c
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch EXTIOI interrupt kvm support
+ *
+ * Copyright (C) 2025 Loongson Technology Corporation Limited
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/typedefs.h"
+#include "hw/intc/loongarch_extioi.h"
+#include "linux/kvm.h"
+#include "qapi/error.h"
+#include "system/kvm.h"
+
+static void kvm_extioi_access_reg(int fd, uint64_t addr, void *val, bool write)
+{
+ kvm_device_access(fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS,
+ addr, val, write, &error_abort);
+}
+
+static void kvm_extioi_access_sw_state(int fd, uint64_t addr,
+ void *val, bool write)
+{
+ kvm_device_access(fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS,
+ addr, val, write, &error_abort);
+}
+
+static void kvm_extioi_access_sw_status(void *opaque, bool write)
+{
+ LoongArchExtIOICommonState *lecs = LOONGARCH_EXTIOI_COMMON(opaque);
+ LoongArchExtIOIState *les = LOONGARCH_EXTIOI(opaque);
+ int addr;
+
+ addr = KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE;
+ kvm_extioi_access_sw_state(les->dev_fd, addr, &lecs->status, write);
+}
+
+static void kvm_extioi_access_regs(void *opaque, bool write)
+{
+ LoongArchExtIOICommonState *lecs = LOONGARCH_EXTIOI_COMMON(opaque);
+ LoongArchExtIOIState *les = LOONGARCH_EXTIOI(opaque);
+ int fd = les->dev_fd;
+ int addr, offset, cpu;
+
+ for (addr = EXTIOI_NODETYPE_START; addr < EXTIOI_NODETYPE_END; addr += 4) {
+ offset = (addr - EXTIOI_NODETYPE_START) / 4;
+ kvm_extioi_access_reg(fd, addr, &lecs->nodetype[offset], write);
+ }
+
+ for (addr = EXTIOI_IPMAP_START; addr < EXTIOI_IPMAP_END; addr += 4) {
+ offset = (addr - EXTIOI_IPMAP_START) / 4;
+ kvm_extioi_access_reg(fd, addr, &lecs->ipmap[offset], write);
+ }
+
+ for (addr = EXTIOI_ENABLE_START; addr < EXTIOI_ENABLE_END; addr += 4) {
+ offset = (addr - EXTIOI_ENABLE_START) / 4;
+ kvm_extioi_access_reg(fd, addr, &lecs->enable[offset], write);
+ }
+
+ for (addr = EXTIOI_BOUNCE_START; addr < EXTIOI_BOUNCE_END; addr += 4) {
+ offset = (addr - EXTIOI_BOUNCE_START) / 4;
+ kvm_extioi_access_reg(fd, addr, &lecs->bounce[offset], write);
+ }
+
+ for (addr = EXTIOI_ISR_START; addr < EXTIOI_ISR_END; addr += 4) {
+ offset = (addr - EXTIOI_ISR_START) / 4;
+ kvm_extioi_access_reg(fd, addr, &lecs->isr[offset], write);
+ }
+
+ for (addr = EXTIOI_COREMAP_START; addr < EXTIOI_COREMAP_END; addr += 4) {
+ offset = (addr - EXTIOI_COREMAP_START) / 4;
+ kvm_extioi_access_reg(fd, addr, &lecs->coremap[offset], write);
+ }
+
+ for (cpu = 0; cpu < lecs->num_cpu; cpu++) {
+ for (addr = EXTIOI_COREISR_START;
+ addr < EXTIOI_COREISR_END; addr += 4) {
+ offset = (addr - EXTIOI_COREISR_START) / 4;
+ kvm_extioi_access_reg(fd, (cpu << 16) | addr,
+ &lecs->cpu[cpu].coreisr[offset], write);
+ }
+ }
+}
+
+int kvm_extioi_get(void *opaque)
+{
+ kvm_extioi_access_regs(opaque, false);
+ kvm_extioi_access_sw_status(opaque, false);
+ return 0;
+}
+
+int kvm_extioi_put(void *opaque, int version_id)
+{
+ LoongArchExtIOIState *les = LOONGARCH_EXTIOI(opaque);
+ int fd = les->dev_fd;
+
+ if (fd == 0) {
+ return 0;
+ }
+
+ kvm_extioi_access_regs(opaque, true);
+ kvm_extioi_access_sw_status(opaque, true);
+ kvm_device_access(fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL,
+ KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED,
+ NULL, true, &error_abort);
+ return 0;
+}
+
+void kvm_extioi_realize(DeviceState *dev, Error **errp)
+{
+ LoongArchExtIOICommonState *lecs = LOONGARCH_EXTIOI_COMMON(dev);
+ LoongArchExtIOIState *les = LOONGARCH_EXTIOI(dev);
+ int ret;
+
+ ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_LOONGARCH_EIOINTC, false);
+ if (ret < 0) {
+ fprintf(stderr, "create KVM_LOONGARCH_EIOINTC failed: %s\n",
+ strerror(-ret));
+ abort();
+ }
+
+ les->dev_fd = ret;
+ ret = kvm_device_access(les->dev_fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL,
+ KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU,
+ &lecs->num_cpu, true, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "KVM_LOONGARCH_EXTIOI_INIT_NUM_CPU failed: %s\n",
+ strerror(-ret));
+ abort();
+ }
+
+ ret = kvm_device_access(les->dev_fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL,
+ KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE,
+ &lecs->features, true, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "KVM_LOONGARCH_EXTIOI_INIT_FEATURE failed: %s\n",
+ strerror(-ret));
+ abort();
+ }
+}
diff --git a/hw/intc/loongarch_ipi.c b/hw/intc/loongarch_ipi.c
new file mode 100644
index 0000000..fc8005c
--- /dev/null
+++ b/hw/intc/loongarch_ipi.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch IPI interrupt support
+ *
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "hw/boards.h"
+#include "qapi/error.h"
+#include "hw/intc/loongarch_ipi.h"
+#include "hw/qdev-properties.h"
+#include "system/kvm.h"
+#include "target/loongarch/cpu.h"
+
+static AddressSpace *get_iocsr_as(CPUState *cpu)
+{
+ return LOONGARCH_CPU(cpu)->env.address_space_iocsr;
+}
+
+static int loongarch_ipi_cmp(const void *a, const void *b)
+{
+ IPICore *ipi_a = (IPICore *)a;
+ IPICore *ipi_b = (IPICore *)b;
+
+ return ipi_a->arch_id - ipi_b->arch_id;
+}
+
+static int loongarch_cpu_by_arch_id(LoongsonIPICommonState *lics,
+ int64_t arch_id, int *index, CPUState **pcs)
+{
+ IPICore ipi, *found;
+
+ ipi.arch_id = arch_id;
+ found = bsearch(&ipi, lics->cpu, lics->num_cpu, sizeof(IPICore),
+ loongarch_ipi_cmp);
+ if (found && found->cpu) {
+ if (index) {
+ *index = found - lics->cpu;
+ }
+
+ if (pcs) {
+ *pcs = found->cpu;
+ }
+
+ return MEMTX_OK;
+ }
+
+ return MEMTX_ERROR;
+}
+
+static IPICore *loongarch_ipi_get_cpu(LoongsonIPICommonState *lics,
+ DeviceState *dev)
+{
+ CPUClass *k = CPU_GET_CLASS(dev);
+ uint64_t arch_id = k->get_arch_id(CPU(dev));
+ int i;
+
+ for (i = 0; i < lics->num_cpu; i++) {
+ if (lics->cpu[i].arch_id == arch_id) {
+ return &lics->cpu[i];
+ }
+ }
+
+ return NULL;
+}
+
+static void loongarch_ipi_realize(DeviceState *dev, Error **errp)
+{
+ LoongsonIPICommonState *lics = LOONGSON_IPI_COMMON(dev);
+ LoongarchIPIClass *lic = LOONGARCH_IPI_GET_CLASS(dev);
+ MachineState *machine = MACHINE(qdev_get_machine());
+ MachineClass *mc = MACHINE_GET_CLASS(machine);
+ const CPUArchIdList *id_list;
+ Error *local_err = NULL;
+ int i;
+
+ lic->parent_realize(dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ assert(mc->possible_cpu_arch_ids);
+ id_list = mc->possible_cpu_arch_ids(machine);
+ lics->num_cpu = id_list->len;
+ lics->cpu = g_new0(IPICore, lics->num_cpu);
+ for (i = 0; i < lics->num_cpu; i++) {
+ lics->cpu[i].arch_id = id_list->cpus[i].arch_id;
+ lics->cpu[i].cpu = CPU(id_list->cpus[i].cpu);
+ lics->cpu[i].ipi = lics;
+ qdev_init_gpio_out(dev, &lics->cpu[i].irq, 1);
+ }
+
+ if (kvm_irqchip_in_kernel()) {
+ kvm_ipi_realize(dev, errp);
+ }
+}
+
+static void loongarch_ipi_reset_hold(Object *obj, ResetType type)
+{
+ int i;
+ LoongarchIPIClass *lic = LOONGARCH_IPI_GET_CLASS(obj);
+ LoongsonIPICommonState *lics = LOONGSON_IPI_COMMON(obj);
+ IPICore *core;
+
+ if (lic->parent_phases.hold) {
+ lic->parent_phases.hold(obj, type);
+ }
+
+ for (i = 0; i < lics->num_cpu; i++) {
+ core = lics->cpu + i;
+ /* IPI with targeted CPU available however not present */
+ if (!core->cpu) {
+ continue;
+ }
+
+ core->status = 0;
+ core->en = 0;
+ core->set = 0;
+ core->clear = 0;
+ memset(core->buf, 0, sizeof(core->buf));
+ }
+
+ if (kvm_irqchip_in_kernel()) {
+ kvm_ipi_put(obj, 0);
+ }
+}
+
+static void loongarch_ipi_cpu_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ LoongsonIPICommonState *lics = LOONGSON_IPI_COMMON(hotplug_dev);
+ Object *obj = OBJECT(dev);
+ IPICore *core;
+ int index;
+
+ if (!object_dynamic_cast(obj, TYPE_LOONGARCH_CPU)) {
+ warn_report("LoongArch extioi: Invalid %s device type",
+ object_get_typename(obj));
+ return;
+ }
+
+ core = loongarch_ipi_get_cpu(lics, dev);
+ if (!core) {
+ return;
+ }
+
+ core->cpu = CPU(dev);
+ index = core - lics->cpu;
+
+ /* connect ipi irq to cpu irq */
+ qdev_connect_gpio_out(DEVICE(lics), index, qdev_get_gpio_in(dev, IRQ_IPI));
+}
+
+static void loongarch_ipi_cpu_unplug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ LoongsonIPICommonState *lics = LOONGSON_IPI_COMMON(hotplug_dev);
+ Object *obj = OBJECT(dev);
+ IPICore *core;
+
+ if (!object_dynamic_cast(obj, TYPE_LOONGARCH_CPU)) {
+ warn_report("LoongArch extioi: Invalid %s device type",
+ object_get_typename(obj));
+ return;
+ }
+
+ core = loongarch_ipi_get_cpu(lics, dev);
+ if (!core) {
+ return;
+ }
+
+ core->cpu = NULL;
+}
+
+static int loongarch_ipi_pre_save(void *opaque)
+{
+ if (kvm_irqchip_in_kernel()) {
+ return kvm_ipi_get(opaque);
+ }
+
+ return 0;
+}
+
+static int loongarch_ipi_post_load(void *opaque, int version_id)
+{
+ if (kvm_irqchip_in_kernel()) {
+ return kvm_ipi_put(opaque, version_id);
+ }
+
+ return 0;
+}
+
+static void loongarch_ipi_class_init(ObjectClass *klass, const void *data)
+{
+ LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_CLASS(klass);
+ HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
+ LoongarchIPIClass *lic = LOONGARCH_IPI_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ device_class_set_parent_realize(dc, loongarch_ipi_realize,
+ &lic->parent_realize);
+ resettable_class_set_parent_phases(rc, NULL, loongarch_ipi_reset_hold,
+ NULL, &lic->parent_phases);
+ licc->get_iocsr_as = get_iocsr_as;
+ licc->cpu_by_arch_id = loongarch_cpu_by_arch_id;
+ hc->plug = loongarch_ipi_cpu_plug;
+ hc->unplug = loongarch_ipi_cpu_unplug;
+ licc->pre_save = loongarch_ipi_pre_save;
+ licc->post_load = loongarch_ipi_post_load;
+}
+
+static const TypeInfo loongarch_ipi_types[] = {
+ {
+ .name = TYPE_LOONGARCH_IPI,
+ .parent = TYPE_LOONGSON_IPI_COMMON,
+ .instance_size = sizeof(LoongarchIPIState),
+ .class_size = sizeof(LoongarchIPIClass),
+ .class_init = loongarch_ipi_class_init,
+ .interfaces = (const InterfaceInfo[]) {
+ { TYPE_HOTPLUG_HANDLER },
+ { }
+ },
+ }
+};
+
+DEFINE_TYPES(loongarch_ipi_types)
diff --git a/hw/intc/loongarch_ipi_kvm.c b/hw/intc/loongarch_ipi_kvm.c
new file mode 100644
index 0000000..4cb3acc
--- /dev/null
+++ b/hw/intc/loongarch_ipi_kvm.c
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch IPI interrupt KVM support
+ *
+ * Copyright (C) 2025 Loongson Technology Corporation Limited
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/intc/loongarch_ipi.h"
+#include "system/kvm.h"
+#include "target/loongarch/cpu.h"
+
+static void kvm_ipi_access_reg(int fd, uint64_t addr, uint32_t *val, bool write)
+{
+ kvm_device_access(fd, KVM_DEV_LOONGARCH_IPI_GRP_REGS,
+ addr, val, write, &error_abort);
+}
+
+static void kvm_ipi_access_regs(void *opaque, bool write)
+{
+ LoongsonIPICommonState *ipi = (LoongsonIPICommonState *)opaque;
+ LoongarchIPIState *lis = LOONGARCH_IPI(opaque);
+ IPICore *core;
+ uint64_t attr;
+ int cpu, fd = lis->dev_fd;
+
+ if (fd == 0) {
+ return;
+ }
+
+ for (cpu = 0; cpu < ipi->num_cpu; cpu++) {
+ core = &ipi->cpu[cpu];
+ attr = (cpu << 16) | CORE_STATUS_OFF;
+ kvm_ipi_access_reg(fd, attr, &core->status, write);
+
+ attr = (cpu << 16) | CORE_EN_OFF;
+ kvm_ipi_access_reg(fd, attr, &core->en, write);
+
+ attr = (cpu << 16) | CORE_SET_OFF;
+ kvm_ipi_access_reg(fd, attr, &core->set, write);
+
+ attr = (cpu << 16) | CORE_CLEAR_OFF;
+ kvm_ipi_access_reg(fd, attr, &core->clear, write);
+
+ attr = (cpu << 16) | CORE_BUF_20;
+ kvm_ipi_access_reg(fd, attr, &core->buf[0], write);
+
+ attr = (cpu << 16) | CORE_BUF_28;
+ kvm_ipi_access_reg(fd, attr, &core->buf[2], write);
+
+ attr = (cpu << 16) | CORE_BUF_30;
+ kvm_ipi_access_reg(fd, attr, &core->buf[4], write);
+
+ attr = (cpu << 16) | CORE_BUF_38;
+ kvm_ipi_access_reg(fd, attr, &core->buf[6], write);
+ }
+}
+
+int kvm_ipi_get(void *opaque)
+{
+ kvm_ipi_access_regs(opaque, false);
+ return 0;
+}
+
+int kvm_ipi_put(void *opaque, int version_id)
+{
+ kvm_ipi_access_regs(opaque, true);
+ return 0;
+}
+
+void kvm_ipi_realize(DeviceState *dev, Error **errp)
+{
+ LoongarchIPIState *lis = LOONGARCH_IPI(dev);
+ int ret;
+
+ ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_LOONGARCH_IPI, false);
+ if (ret < 0) {
+ fprintf(stderr, "IPI KVM_CREATE_DEVICE failed: %s\n",
+ strerror(-ret));
+ abort();
+ }
+
+ lis->dev_fd = ret;
+}
diff --git a/hw/intc/loongarch_pch_msi.c b/hw/intc/loongarch_pch_msi.c
index ecf3ed0..f6d1631 100644
--- a/hw/intc/loongarch_pch_msi.c
+++ b/hw/intc/loongarch_pch_msi.c
@@ -13,6 +13,7 @@
#include "hw/pci/msi.h"
#include "hw/misc/unimp.h"
#include "migration/vmstate.h"
+#include "system/kvm.h"
#include "trace.h"
static uint64_t loongarch_msi_mem_read(void *opaque, hwaddr addr, unsigned size)
@@ -26,6 +27,15 @@ static void loongarch_msi_mem_write(void *opaque, hwaddr addr,
LoongArchPCHMSI *s = (LoongArchPCHMSI *)opaque;
int irq_num;
+ if (kvm_irqchip_in_kernel()) {
+ MSIMessage msg;
+
+ msg.address = addr;
+ msg.data = val;
+ kvm_irqchip_send_msi(kvm_state, msg);
+ return;
+ }
+
/*
* vector number is irq number from upper extioi intc
* need subtract irq base to get msi vector offset
@@ -42,13 +52,6 @@ static const MemoryRegionOps loongarch_pch_msi_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
-static void pch_msi_irq_handler(void *opaque, int irq, int level)
-{
- LoongArchPCHMSI *s = LOONGARCH_PCH_MSI(opaque);
-
- qemu_set_irq(s->pch_msi_irq[irq], level);
-}
-
static void loongarch_pch_msi_realize(DeviceState *dev, Error **errp)
{
LoongArchPCHMSI *s = LOONGARCH_PCH_MSI(dev);
@@ -59,9 +62,7 @@ static void loongarch_pch_msi_realize(DeviceState *dev, Error **errp)
}
s->pch_msi_irq = g_new(qemu_irq, s->irq_num);
-
qdev_init_gpio_out(dev, s->pch_msi_irq, s->irq_num);
- qdev_init_gpio_in(dev, pch_msi_irq_handler, s->irq_num);
}
static void loongarch_pch_msi_unrealize(DeviceState *dev)
@@ -83,13 +84,12 @@ static void loongarch_pch_msi_init(Object *obj)
}
-static Property loongarch_msi_properties[] = {
+static const Property loongarch_msi_properties[] = {
DEFINE_PROP_UINT32("msi_irq_base", LoongArchPCHMSI, irq_base, 0),
DEFINE_PROP_UINT32("msi_irq_num", LoongArchPCHMSI, irq_num, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void loongarch_pch_msi_class_init(ObjectClass *klass, void *data)
+static void loongarch_pch_msi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/intc/loongarch_pch_pic.c b/hw/intc/loongarch_pch_pic.c
index 2d5e65a..c4b242d 100644
--- a/hw/intc/loongarch_pch_pic.c
+++ b/hw/intc/loongarch_pch_pic.c
@@ -7,17 +7,15 @@
#include "qemu/osdep.h"
#include "qemu/bitops.h"
-#include "hw/sysbus.h"
-#include "hw/loongarch/virt.h"
-#include "hw/pci-host/ls7a.h"
+#include "qemu/log.h"
#include "hw/irq.h"
#include "hw/intc/loongarch_pch_pic.h"
-#include "hw/qdev-properties.h"
-#include "migration/vmstate.h"
+#include "system/kvm.h"
#include "trace.h"
#include "qapi/error.h"
-static void pch_pic_update_irq(LoongArchPCHPIC *s, uint64_t mask, int level)
+static void pch_pic_update_irq(LoongArchPICCommonState *s, uint64_t mask,
+ int level)
{
uint64_t val;
int irq;
@@ -45,12 +43,17 @@ static void pch_pic_update_irq(LoongArchPCHPIC *s, uint64_t mask, int level)
static void pch_pic_irq_handler(void *opaque, int irq, int level)
{
- LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque);
+ LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque);
uint64_t mask = 1ULL << irq;
assert(irq < s->irq_num);
trace_loongarch_pch_pic_irq_handler(irq, level);
+ if (kvm_irqchip_in_kernel()) {
+ kvm_set_irq(kvm_state, irq, !!level);
+ return;
+ }
+
if (s->intedge & mask) {
/* Edge triggered */
if (level) {
@@ -75,389 +78,266 @@ static void pch_pic_irq_handler(void *opaque, int irq, int level)
pch_pic_update_irq(s, mask, level);
}
-static uint64_t loongarch_pch_pic_low_readw(void *opaque, hwaddr addr,
- unsigned size)
+static uint64_t pch_pic_read(void *opaque, hwaddr addr, uint64_t field_mask)
{
- LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque);
+ LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque);
uint64_t val = 0;
- uint32_t offset = addr & 0xfff;
+ uint32_t offset;
- switch (offset) {
- case PCH_PIC_INT_ID_LO:
- val = PCH_PIC_INT_ID_VAL;
+ offset = addr & 7;
+ addr -= offset;
+ switch (addr) {
+ case PCH_PIC_INT_ID:
+ val = cpu_to_le64(s->id.data);
break;
- case PCH_PIC_INT_ID_HI:
- /*
- * With 7A1000 manual
- * bit 0-15 pch irqchip version
- * bit 16-31 irq number supported with pch irqchip
- */
- val = deposit32(PCH_PIC_INT_ID_VER, 16, 16, s->irq_num - 1);
+ case PCH_PIC_INT_MASK:
+ val = s->int_mask;
break;
- case PCH_PIC_INT_MASK_LO:
- val = (uint32_t)s->int_mask;
+ case PCH_PIC_INT_EDGE:
+ val = s->intedge;
break;
- case PCH_PIC_INT_MASK_HI:
- val = s->int_mask >> 32;
+ case PCH_PIC_HTMSI_EN:
+ val = s->htmsi_en;
break;
- case PCH_PIC_INT_EDGE_LO:
- val = (uint32_t)s->intedge;
+ case PCH_PIC_AUTO_CTRL0:
+ case PCH_PIC_AUTO_CTRL1:
+ /* PCH PIC connect to EXTIOI always, discard auto_ctrl access */
break;
- case PCH_PIC_INT_EDGE_HI:
- val = s->intedge >> 32;
+ case PCH_PIC_INT_STATUS:
+ val = s->intisr & (~s->int_mask);
break;
- case PCH_PIC_HTMSI_EN_LO:
- val = (uint32_t)s->htmsi_en;
+ case PCH_PIC_INT_POL:
+ val = s->int_polarity;
break;
- case PCH_PIC_HTMSI_EN_HI:
- val = s->htmsi_en >> 32;
+ case PCH_PIC_HTMSI_VEC ... PCH_PIC_HTMSI_VEC_END:
+ val = *(uint64_t *)(s->htmsi_vector + addr - PCH_PIC_HTMSI_VEC);
break;
- case PCH_PIC_AUTO_CTRL0_LO:
- case PCH_PIC_AUTO_CTRL0_HI:
- case PCH_PIC_AUTO_CTRL1_LO:
- case PCH_PIC_AUTO_CTRL1_HI:
+ case PCH_PIC_ROUTE_ENTRY ... PCH_PIC_ROUTE_ENTRY_END:
+ val = *(uint64_t *)(s->route_entry + addr - PCH_PIC_ROUTE_ENTRY);
break;
default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "pch_pic_read: Bad address 0x%"PRIx64"\n", addr);
break;
}
- trace_loongarch_pch_pic_low_readw(size, addr, val);
- return val;
+ return (val >> (offset * 8)) & field_mask;
}
-static uint64_t get_writew_val(uint64_t value, uint32_t target, bool hi)
+static void pch_pic_write(void *opaque, hwaddr addr, uint64_t value,
+ uint64_t field_mask)
{
- uint64_t mask = 0xffffffff00000000;
- uint64_t data = target;
-
- return hi ? (value & ~mask) | (data << 32) : (value & mask) | data;
-}
-
-static void loongarch_pch_pic_low_writew(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque);
- uint32_t offset, old_valid, data = (uint32_t)value;
- uint64_t old, int_mask;
- offset = addr & 0xfff;
-
- trace_loongarch_pch_pic_low_writew(size, addr, data);
-
- switch (offset) {
- case PCH_PIC_INT_MASK_LO:
- old = s->int_mask;
- s->int_mask = get_writew_val(old, data, 0);
- old_valid = (uint32_t)old;
- if (old_valid & ~data) {
- pch_pic_update_irq(s, (old_valid & ~data), 1);
- }
- if (~old_valid & data) {
- pch_pic_update_irq(s, (~old_valid & data), 0);
- }
- break;
- case PCH_PIC_INT_MASK_HI:
+ LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque);
+ uint32_t offset;
+ uint64_t old, mask, data, *ptemp;
+
+ offset = addr & 7;
+ addr -= offset;
+ mask = field_mask << (offset * 8);
+ data = (value & field_mask) << (offset * 8);
+ switch (addr) {
+ case PCH_PIC_INT_MASK:
old = s->int_mask;
- s->int_mask = get_writew_val(old, data, 1);
- old_valid = (uint32_t)(old >> 32);
- int_mask = old_valid & ~data;
- if (int_mask) {
- pch_pic_update_irq(s, int_mask << 32, 1);
+ s->int_mask = (old & ~mask) | data;
+ if (old & ~data) {
+ pch_pic_update_irq(s, old & ~data, 1);
}
- int_mask = ~old_valid & data;
- if (int_mask) {
- pch_pic_update_irq(s, int_mask << 32, 0);
+
+ if (~old & data) {
+ pch_pic_update_irq(s, ~old & data, 0);
}
break;
- case PCH_PIC_INT_EDGE_LO:
- s->intedge = get_writew_val(s->intedge, data, 0);
- break;
- case PCH_PIC_INT_EDGE_HI:
- s->intedge = get_writew_val(s->intedge, data, 1);
+ case PCH_PIC_INT_EDGE:
+ s->intedge = (s->intedge & ~mask) | data;
break;
- case PCH_PIC_INT_CLEAR_LO:
+ case PCH_PIC_INT_CLEAR:
if (s->intedge & data) {
- s->intirr &= (~data);
+ s->intirr &= ~data;
pch_pic_update_irq(s, data, 0);
- s->intisr &= (~data);
+ s->intisr &= ~data;
}
break;
- case PCH_PIC_INT_CLEAR_HI:
- value <<= 32;
- if (s->intedge & value) {
- s->intirr &= (~value);
- pch_pic_update_irq(s, value, 0);
- s->intisr &= (~value);
- }
+ case PCH_PIC_HTMSI_EN:
+ s->htmsi_en = (s->htmsi_en & ~mask) | data;
break;
- case PCH_PIC_HTMSI_EN_LO:
- s->htmsi_en = get_writew_val(s->htmsi_en, data, 0);
+ case PCH_PIC_AUTO_CTRL0:
+ case PCH_PIC_AUTO_CTRL1:
+ /* Discard auto_ctrl access */
break;
- case PCH_PIC_HTMSI_EN_HI:
- s->htmsi_en = get_writew_val(s->htmsi_en, data, 1);
+ case PCH_PIC_INT_POL:
+ s->int_polarity = (s->int_polarity & ~mask) | data;
break;
- case PCH_PIC_AUTO_CTRL0_LO:
- case PCH_PIC_AUTO_CTRL0_HI:
- case PCH_PIC_AUTO_CTRL1_LO:
- case PCH_PIC_AUTO_CTRL1_HI:
+ case PCH_PIC_HTMSI_VEC ... PCH_PIC_HTMSI_VEC_END:
+ ptemp = (uint64_t *)(s->htmsi_vector + addr - PCH_PIC_HTMSI_VEC);
+ *ptemp = (*ptemp & ~mask) | data;
+ break;
+ case PCH_PIC_ROUTE_ENTRY ... PCH_PIC_ROUTE_ENTRY_END:
+ ptemp = (uint64_t *)(s->route_entry + addr - PCH_PIC_ROUTE_ENTRY);
+ *ptemp = (*ptemp & ~mask) | data;
break;
default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "pch_pic_write: Bad address 0x%"PRIx64"\n", addr);
break;
}
}
-static uint64_t loongarch_pch_pic_high_readw(void *opaque, hwaddr addr,
- unsigned size)
+static uint64_t loongarch_pch_pic_read(void *opaque, hwaddr addr,
+ unsigned size)
{
- LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque);
uint64_t val = 0;
- uint32_t offset = addr & 0xfff;
- switch (offset) {
- case STATUS_LO_START:
- val = (uint32_t)(s->intisr & (~s->int_mask));
+ switch (size) {
+ case 1:
+ val = pch_pic_read(opaque, addr, UCHAR_MAX);
break;
- case STATUS_HI_START:
- val = (s->intisr & (~s->int_mask)) >> 32;
+ case 2:
+ val = pch_pic_read(opaque, addr, USHRT_MAX);
break;
- case POL_LO_START:
- val = (uint32_t)s->int_polarity;
+ case 4:
+ val = pch_pic_read(opaque, addr, UINT_MAX);
break;
- case POL_HI_START:
- val = s->int_polarity >> 32;
+ case 8:
+ val = pch_pic_read(opaque, addr, UINT64_MAX);
break;
default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "loongarch_pch_pic_read: Bad size %d\n", size);
break;
}
- trace_loongarch_pch_pic_high_readw(size, addr, val);
+ trace_loongarch_pch_pic_read(size, addr, val);
return val;
}
-static void loongarch_pch_pic_high_writew(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
+static void loongarch_pch_pic_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
{
- LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque);
- uint32_t offset, data = (uint32_t)value;
- offset = addr & 0xfff;
-
- trace_loongarch_pch_pic_high_writew(size, addr, data);
+ trace_loongarch_pch_pic_write(size, addr, value);
- switch (offset) {
- case STATUS_LO_START:
- s->intisr = get_writew_val(s->intisr, data, 0);
+ switch (size) {
+ case 1:
+ pch_pic_write(opaque, addr, value, UCHAR_MAX);
break;
- case STATUS_HI_START:
- s->intisr = get_writew_val(s->intisr, data, 1);
+ case 2:
+ pch_pic_write(opaque, addr, value, USHRT_MAX);
break;
- case POL_LO_START:
- s->int_polarity = get_writew_val(s->int_polarity, data, 0);
break;
- case POL_HI_START:
- s->int_polarity = get_writew_val(s->int_polarity, data, 1);
- break;
- default:
- break;
- }
-}
-
-static uint64_t loongarch_pch_pic_readb(void *opaque, hwaddr addr,
- unsigned size)
-{
- LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque);
- uint64_t val = 0;
- uint32_t offset = (addr & 0xfff) + PCH_PIC_ROUTE_ENTRY_OFFSET;
- int64_t offset_tmp;
-
- switch (offset) {
- case PCH_PIC_HTMSI_VEC_OFFSET ... PCH_PIC_HTMSI_VEC_END:
- offset_tmp = offset - PCH_PIC_HTMSI_VEC_OFFSET;
- if (offset_tmp >= 0 && offset_tmp < 64) {
- val = s->htmsi_vector[offset_tmp];
- }
- break;
- case PCH_PIC_ROUTE_ENTRY_OFFSET ... PCH_PIC_ROUTE_ENTRY_END:
- offset_tmp = offset - PCH_PIC_ROUTE_ENTRY_OFFSET;
- if (offset_tmp >= 0 && offset_tmp < 64) {
- val = s->route_entry[offset_tmp];
- }
- break;
- default:
- break;
- }
-
- trace_loongarch_pch_pic_readb(size, addr, val);
- return val;
-}
-
-static void loongarch_pch_pic_writeb(void *opaque, hwaddr addr,
- uint64_t data, unsigned size)
-{
- LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque);
- int32_t offset_tmp;
- uint32_t offset = (addr & 0xfff) + PCH_PIC_ROUTE_ENTRY_OFFSET;
-
- trace_loongarch_pch_pic_writeb(size, addr, data);
-
- switch (offset) {
- case PCH_PIC_HTMSI_VEC_OFFSET ... PCH_PIC_HTMSI_VEC_END:
- offset_tmp = offset - PCH_PIC_HTMSI_VEC_OFFSET;
- if (offset_tmp >= 0 && offset_tmp < 64) {
- s->htmsi_vector[offset_tmp] = (uint8_t)(data & 0xff);
- }
+ case 4:
+ pch_pic_write(opaque, addr, value, UINT_MAX);
break;
- case PCH_PIC_ROUTE_ENTRY_OFFSET ... PCH_PIC_ROUTE_ENTRY_END:
- offset_tmp = offset - PCH_PIC_ROUTE_ENTRY_OFFSET;
- if (offset_tmp >= 0 && offset_tmp < 64) {
- s->route_entry[offset_tmp] = (uint8_t)(data & 0xff);
- }
+ case 8:
+ pch_pic_write(opaque, addr, value, UINT64_MAX);
break;
default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "loongarch_pch_pic_write: Bad size %d\n", size);
break;
}
}
-static const MemoryRegionOps loongarch_pch_pic_reg32_low_ops = {
- .read = loongarch_pch_pic_low_readw,
- .write = loongarch_pch_pic_low_writew,
- .valid = {
- .min_access_size = 4,
- .max_access_size = 8,
- },
- .impl = {
- .min_access_size = 4,
- .max_access_size = 4,
- },
- .endianness = DEVICE_LITTLE_ENDIAN,
-};
-
-static const MemoryRegionOps loongarch_pch_pic_reg32_high_ops = {
- .read = loongarch_pch_pic_high_readw,
- .write = loongarch_pch_pic_high_writew,
- .valid = {
- .min_access_size = 4,
- .max_access_size = 8,
- },
- .impl = {
- .min_access_size = 4,
- .max_access_size = 4,
- },
- .endianness = DEVICE_LITTLE_ENDIAN,
-};
-
-static const MemoryRegionOps loongarch_pch_pic_reg8_ops = {
- .read = loongarch_pch_pic_readb,
- .write = loongarch_pch_pic_writeb,
+static const MemoryRegionOps loongarch_pch_pic_ops = {
+ .read = loongarch_pch_pic_read,
+ .write = loongarch_pch_pic_write,
.valid = {
.min_access_size = 1,
- .max_access_size = 1,
+ .max_access_size = 8,
+ /*
+ * PCH PIC device would not work correctly if the guest was doing
+ * unaligned access. This might not be a limitation on the real
+ * device but in practice there is no reason for a guest to access
+ * this device unaligned.
+ */
+ .unaligned = false,
},
.impl = {
.min_access_size = 1,
- .max_access_size = 1,
+ .max_access_size = 8,
},
.endianness = DEVICE_LITTLE_ENDIAN,
};
-static void loongarch_pch_pic_reset(DeviceState *d)
+static void loongarch_pic_reset_hold(Object *obj, ResetType type)
{
- LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(d);
- int i;
-
- s->int_mask = -1;
- s->htmsi_en = 0x0;
- s->intedge = 0x0;
- s->intclr = 0x0;
- s->auto_crtl0 = 0x0;
- s->auto_crtl1 = 0x0;
- for (i = 0; i < 64; i++) {
- s->route_entry[i] = 0x1;
- s->htmsi_vector[i] = 0x0;
+ LoongarchPICClass *lpc = LOONGARCH_PIC_GET_CLASS(obj);
+
+ if (lpc->parent_phases.hold) {
+ lpc->parent_phases.hold(obj, type);
+ }
+
+ if (kvm_irqchip_in_kernel()) {
+ kvm_pic_put(obj, 0);
}
- s->intirr = 0x0;
- s->intisr = 0x0;
- s->last_intirr = 0x0;
- s->int_polarity = 0x0;
}
-static void loongarch_pch_pic_realize(DeviceState *dev, Error **errp)
+static void loongarch_pic_realize(DeviceState *dev, Error **errp)
{
- LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(dev);
-
- if (!s->irq_num || s->irq_num > VIRT_PCH_PIC_IRQ_NUM) {
- error_setg(errp, "Invalid 'pic_irq_num'");
+ LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(dev);
+ LoongarchPICClass *lpc = LOONGARCH_PIC_GET_CLASS(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ Error *local_err = NULL;
+
+ lpc->parent_realize(dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
return;
}
qdev_init_gpio_out(dev, s->parent_irq, s->irq_num);
qdev_init_gpio_in(dev, pch_pic_irq_handler, s->irq_num);
+
+ if (kvm_irqchip_in_kernel()) {
+ kvm_pic_realize(dev, errp);
+ } else {
+ memory_region_init_io(&s->iomem, OBJECT(dev),
+ &loongarch_pch_pic_ops,
+ s, TYPE_LOONGARCH_PIC, VIRT_PCH_REG_SIZE);
+ sysbus_init_mmio(sbd, &s->iomem);
+ }
}
-static void loongarch_pch_pic_init(Object *obj)
+static int loongarch_pic_pre_save(LoongArchPICCommonState *opaque)
{
- LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(obj);
- SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
-
- memory_region_init_io(&s->iomem32_low, obj,
- &loongarch_pch_pic_reg32_low_ops,
- s, PCH_PIC_NAME(.reg32_part1), 0x100);
- memory_region_init_io(&s->iomem8, obj, &loongarch_pch_pic_reg8_ops,
- s, PCH_PIC_NAME(.reg8), 0x2a0);
- memory_region_init_io(&s->iomem32_high, obj,
- &loongarch_pch_pic_reg32_high_ops,
- s, PCH_PIC_NAME(.reg32_part2), 0xc60);
- sysbus_init_mmio(sbd, &s->iomem32_low);
- sysbus_init_mmio(sbd, &s->iomem8);
- sysbus_init_mmio(sbd, &s->iomem32_high);
+ if (kvm_irqchip_in_kernel()) {
+ return kvm_pic_get(opaque);
+ }
+ return 0;
}
-static Property loongarch_pch_pic_properties[] = {
- DEFINE_PROP_UINT32("pch_pic_irq_num", LoongArchPCHPIC, irq_num, 0),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static const VMStateDescription vmstate_loongarch_pch_pic = {
- .name = TYPE_LOONGARCH_PCH_PIC,
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT64(int_mask, LoongArchPCHPIC),
- VMSTATE_UINT64(htmsi_en, LoongArchPCHPIC),
- VMSTATE_UINT64(intedge, LoongArchPCHPIC),
- VMSTATE_UINT64(intclr, LoongArchPCHPIC),
- VMSTATE_UINT64(auto_crtl0, LoongArchPCHPIC),
- VMSTATE_UINT64(auto_crtl1, LoongArchPCHPIC),
- VMSTATE_UINT8_ARRAY(route_entry, LoongArchPCHPIC, 64),
- VMSTATE_UINT8_ARRAY(htmsi_vector, LoongArchPCHPIC, 64),
- VMSTATE_UINT64(last_intirr, LoongArchPCHPIC),
- VMSTATE_UINT64(intirr, LoongArchPCHPIC),
- VMSTATE_UINT64(intisr, LoongArchPCHPIC),
- VMSTATE_UINT64(int_polarity, LoongArchPCHPIC),
- VMSTATE_END_OF_LIST()
+static int loongarch_pic_post_load(LoongArchPICCommonState *opaque,
+ int version_id)
+{
+ if (kvm_irqchip_in_kernel()) {
+ return kvm_pic_put(opaque, version_id);
}
-};
-static void loongarch_pch_pic_class_init(ObjectClass *klass, void *data)
+ return 0;
+}
+
+static void loongarch_pic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->realize = loongarch_pch_pic_realize;
- dc->reset = loongarch_pch_pic_reset;
- dc->vmsd = &vmstate_loongarch_pch_pic;
- device_class_set_props(dc, loongarch_pch_pic_properties);
+ LoongarchPICClass *lpc = LOONGARCH_PIC_CLASS(klass);
+ LoongArchPICCommonClass *lpcc = LOONGARCH_PIC_COMMON_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ resettable_class_set_parent_phases(rc, NULL, loongarch_pic_reset_hold,
+ NULL, &lpc->parent_phases);
+ device_class_set_parent_realize(dc, loongarch_pic_realize,
+ &lpc->parent_realize);
+ lpcc->pre_save = loongarch_pic_pre_save;
+ lpcc->post_load = loongarch_pic_post_load;
}
-static const TypeInfo loongarch_pch_pic_info = {
- .name = TYPE_LOONGARCH_PCH_PIC,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(LoongArchPCHPIC),
- .instance_init = loongarch_pch_pic_init,
- .class_init = loongarch_pch_pic_class_init,
+static const TypeInfo loongarch_pic_types[] = {
+ {
+ .name = TYPE_LOONGARCH_PIC,
+ .parent = TYPE_LOONGARCH_PIC_COMMON,
+ .instance_size = sizeof(LoongarchPICState),
+ .class_size = sizeof(LoongarchPICClass),
+ .class_init = loongarch_pic_class_init,
+ }
};
-static void loongarch_pch_pic_register_types(void)
-{
- type_register_static(&loongarch_pch_pic_info);
-}
-
-type_init(loongarch_pch_pic_register_types)
+DEFINE_TYPES(loongarch_pic_types)
diff --git a/hw/intc/loongarch_pic_common.c b/hw/intc/loongarch_pic_common.c
new file mode 100644
index 0000000..de17050
--- /dev/null
+++ b/hw/intc/loongarch_pic_common.c
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * QEMU Loongson 7A1000 I/O interrupt controller.
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/intc/loongarch_pic_common.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+
+static int loongarch_pic_pre_save(void *opaque)
+{
+ LoongArchPICCommonState *s = (LoongArchPICCommonState *)opaque;
+ LoongArchPICCommonClass *lpcc = LOONGARCH_PIC_COMMON_GET_CLASS(s);
+
+ if (lpcc->pre_save) {
+ return lpcc->pre_save(s);
+ }
+
+ return 0;
+}
+
+static int loongarch_pic_post_load(void *opaque, int version_id)
+{
+ LoongArchPICCommonState *s = (LoongArchPICCommonState *)opaque;
+ LoongArchPICCommonClass *lpcc = LOONGARCH_PIC_COMMON_GET_CLASS(s);
+
+ if (lpcc->post_load) {
+ return lpcc->post_load(s, version_id);
+ }
+
+ return 0;
+}
+
+static void loongarch_pic_common_realize(DeviceState *dev, Error **errp)
+{
+ LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(dev);
+
+ if (!s->irq_num || s->irq_num > VIRT_PCH_PIC_IRQ_NUM) {
+ error_setg(errp, "Invalid 'pic_irq_num'");
+ return;
+ }
+}
+
+static void loongarch_pic_common_reset_hold(Object *obj, ResetType type)
+{
+ LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(obj);
+ int i;
+
+ /*
+ * With Loongson 7A1000 user manual
+ * Chapter 5.2 "Description of Interrupt-related Registers"
+ *
+ * Interrupt controller identification register 1
+ * Bit 24-31 Interrupt Controller ID
+ * Interrupt controller identification register 2
+ * Bit 0-7 Interrupt Controller version number
+ * Bit 16-23 The number of interrupt sources supported
+ */
+ s->id.desc.id = PCH_PIC_INT_ID_VAL;
+ s->id.desc.version = PCH_PIC_INT_ID_VER;
+ s->id.desc.irq_num = s->irq_num - 1;
+ s->int_mask = UINT64_MAX;
+ s->htmsi_en = 0x0;
+ s->intedge = 0x0;
+ s->intclr = 0x0;
+ s->auto_crtl0 = 0x0;
+ s->auto_crtl1 = 0x0;
+ for (i = 0; i < 64; i++) {
+ s->route_entry[i] = 0x1;
+ s->htmsi_vector[i] = 0x0;
+ }
+ s->intirr = 0x0;
+ s->intisr = 0x0;
+ s->last_intirr = 0x0;
+ s->int_polarity = 0x0;
+}
+
+static const Property loongarch_pic_common_properties[] = {
+ DEFINE_PROP_UINT32("pch_pic_irq_num", LoongArchPICCommonState, irq_num, 0),
+};
+
+static const VMStateDescription vmstate_loongarch_pic_common = {
+ .name = "loongarch_pch_pic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_save = loongarch_pic_pre_save,
+ .post_load = loongarch_pic_post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT64(int_mask, LoongArchPICCommonState),
+ VMSTATE_UINT64(htmsi_en, LoongArchPICCommonState),
+ VMSTATE_UINT64(intedge, LoongArchPICCommonState),
+ VMSTATE_UINT64(intclr, LoongArchPICCommonState),
+ VMSTATE_UINT64(auto_crtl0, LoongArchPICCommonState),
+ VMSTATE_UINT64(auto_crtl1, LoongArchPICCommonState),
+ VMSTATE_UINT8_ARRAY(route_entry, LoongArchPICCommonState, 64),
+ VMSTATE_UINT8_ARRAY(htmsi_vector, LoongArchPICCommonState, 64),
+ VMSTATE_UINT64(last_intirr, LoongArchPICCommonState),
+ VMSTATE_UINT64(intirr, LoongArchPICCommonState),
+ VMSTATE_UINT64(intisr, LoongArchPICCommonState),
+ VMSTATE_UINT64(int_polarity, LoongArchPICCommonState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void loongarch_pic_common_class_init(ObjectClass *klass,
+ const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ LoongArchPICCommonClass *lpcc = LOONGARCH_PIC_COMMON_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ device_class_set_parent_realize(dc, loongarch_pic_common_realize,
+ &lpcc->parent_realize);
+ resettable_class_set_parent_phases(rc, NULL,
+ loongarch_pic_common_reset_hold,
+ NULL, &lpcc->parent_phases);
+ device_class_set_props(dc, loongarch_pic_common_properties);
+ dc->vmsd = &vmstate_loongarch_pic_common;
+}
+
+static const TypeInfo loongarch_pic_common_types[] = {
+ {
+ .name = TYPE_LOONGARCH_PIC_COMMON,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(LoongArchPICCommonState),
+ .class_size = sizeof(LoongArchPICCommonClass),
+ .class_init = loongarch_pic_common_class_init,
+ .abstract = true,
+ }
+};
+
+DEFINE_TYPES(loongarch_pic_common_types)
diff --git a/hw/intc/loongarch_pic_kvm.c b/hw/intc/loongarch_pic_kvm.c
new file mode 100644
index 0000000..dd504ec
--- /dev/null
+++ b/hw/intc/loongarch_pic_kvm.c
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch kvm pch pic interrupt support
+ *
+ * Copyright (C) 2025 Loongson Technology Corporation Limited
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/boards.h"
+#include "hw/intc/loongarch_pch_pic.h"
+#include "hw/loongarch/virt.h"
+#include "hw/pci-host/ls7a.h"
+#include "system/kvm.h"
+
+static void kvm_pch_pic_access_reg(int fd, uint64_t addr, void *val, bool write)
+{
+ kvm_device_access(fd, KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS,
+ addr, val, write, &error_abort);
+}
+
+static void kvm_pch_pic_access(void *opaque, bool write)
+{
+ LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque);
+ LoongarchPICState *lps = LOONGARCH_PIC(opaque);
+ int fd = lps->dev_fd;
+ int addr, offset;
+
+ if (fd == 0) {
+ return;
+ }
+
+ kvm_pch_pic_access_reg(fd, PCH_PIC_INT_MASK, &s->int_mask, write);
+ kvm_pch_pic_access_reg(fd, PCH_PIC_HTMSI_EN, &s->htmsi_en, write);
+ kvm_pch_pic_access_reg(fd, PCH_PIC_INT_EDGE, &s->intedge, write);
+ kvm_pch_pic_access_reg(fd, PCH_PIC_AUTO_CTRL0, &s->auto_crtl0, write);
+ kvm_pch_pic_access_reg(fd, PCH_PIC_AUTO_CTRL1, &s->auto_crtl1, write);
+
+ for (addr = PCH_PIC_ROUTE_ENTRY;
+ addr < PCH_PIC_ROUTE_ENTRY_END; addr++) {
+ offset = addr - PCH_PIC_ROUTE_ENTRY;
+ kvm_pch_pic_access_reg(fd, addr, &s->route_entry[offset], write);
+ }
+
+ for (addr = PCH_PIC_HTMSI_VEC; addr < PCH_PIC_HTMSI_VEC_END; addr++) {
+ offset = addr - PCH_PIC_HTMSI_VEC;
+ kvm_pch_pic_access_reg(fd, addr, &s->htmsi_vector[offset], write);
+ }
+
+ kvm_pch_pic_access_reg(fd, PCH_PIC_INT_REQUEST, &s->intirr, write);
+ kvm_pch_pic_access_reg(fd, PCH_PIC_INT_STATUS, &s->intisr, write);
+ kvm_pch_pic_access_reg(fd, PCH_PIC_INT_POL, &s->int_polarity, write);
+}
+
+int kvm_pic_get(void *opaque)
+{
+ kvm_pch_pic_access(opaque, false);
+ return 0;
+}
+
+int kvm_pic_put(void *opaque, int version_id)
+{
+ kvm_pch_pic_access(opaque, true);
+ return 0;
+}
+
+void kvm_pic_realize(DeviceState *dev, Error **errp)
+{
+ LoongarchPICState *lps = LOONGARCH_PIC(dev);
+ uint64_t pch_pic_base = VIRT_PCH_REG_BASE;
+ int ret;
+
+ ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_LOONGARCH_PCHPIC, false);
+ if (ret < 0) {
+ fprintf(stderr, "Create KVM_LOONGARCH_PCHPIC failed: %s\n",
+ strerror(-ret));
+ abort();
+ }
+
+ lps->dev_fd = ret;
+ ret = kvm_device_access(lps->dev_fd, KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL,
+ KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT,
+ &pch_pic_base, true, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "KVM_LOONGARCH_PCH_PIC_INIT failed: %s\n",
+ strerror(-ret));
+ abort();
+ }
+}
diff --git a/hw/intc/loongson_ipi.c b/hw/intc/loongson_ipi.c
index e6a7142..fbc73e8 100644
--- a/hw/intc/loongson_ipi.c
+++ b/hw/intc/loongson_ipi.c
@@ -6,224 +6,41 @@
*/
#include "qemu/osdep.h"
-#include "hw/boards.h"
-#include "hw/sysbus.h"
#include "hw/intc/loongson_ipi.h"
-#include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "qapi/error.h"
-#include "qemu/log.h"
-#include "exec/address-spaces.h"
-#include "migration/vmstate.h"
-#ifdef TARGET_LOONGARCH64
-#include "target/loongarch/cpu.h"
-#endif
-#ifdef TARGET_MIPS
#include "target/mips/cpu.h"
-#endif
-#include "trace.h"
-static MemTxResult loongson_ipi_core_readl(void *opaque, hwaddr addr,
- uint64_t *data,
- unsigned size, MemTxAttrs attrs)
+static AddressSpace *get_iocsr_as(CPUState *cpu)
{
- IPICore *s = opaque;
- uint64_t ret = 0;
- int index = 0;
-
- addr &= 0xff;
- switch (addr) {
- case CORE_STATUS_OFF:
- ret = s->status;
- break;
- case CORE_EN_OFF:
- ret = s->en;
- break;
- case CORE_SET_OFF:
- ret = 0;
- break;
- case CORE_CLEAR_OFF:
- ret = 0;
- break;
- case CORE_BUF_20 ... CORE_BUF_38 + 4:
- index = (addr - CORE_BUF_20) >> 2;
- ret = s->buf[index];
- break;
- default:
- qemu_log_mask(LOG_UNIMP, "invalid read: %x", (uint32_t)addr);
- break;
- }
-
- trace_loongson_ipi_read(size, (uint64_t)addr, ret);
- *data = ret;
- return MEMTX_OK;
-}
-
-static MemTxResult loongson_ipi_iocsr_readl(void *opaque, hwaddr addr,
- uint64_t *data,
- unsigned size, MemTxAttrs attrs)
-{
- LoongsonIPI *ipi = opaque;
- IPICore *s;
-
- if (attrs.requester_id >= ipi->num_cpu) {
- return MEMTX_DECODE_ERROR;
- }
-
- s = &ipi->cpu[attrs.requester_id];
- return loongson_ipi_core_readl(s, addr, data, size, attrs);
-}
-
-static AddressSpace *get_cpu_iocsr_as(CPUState *cpu)
-{
-#ifdef TARGET_LOONGARCH64
- return LOONGARCH_CPU(cpu)->env.address_space_iocsr;
-#endif
-#ifdef TARGET_MIPS
if (ase_lcsr_available(&MIPS_CPU(cpu)->env)) {
return &MIPS_CPU(cpu)->env.iocsr.as;
}
-#endif
- return NULL;
-}
-
-static MemTxResult send_ipi_data(CPUState *cpu, uint64_t val, hwaddr addr,
- MemTxAttrs attrs)
-{
- int i, mask = 0, data = 0;
- AddressSpace *iocsr_as = get_cpu_iocsr_as(cpu);
-
- if (!iocsr_as) {
- return MEMTX_DECODE_ERROR;
- }
-
- /*
- * bit 27-30 is mask for byte writing,
- * if the mask is 0, we need not to do anything.
- */
- if ((val >> 27) & 0xf) {
- data = address_space_ldl(iocsr_as, addr, attrs, NULL);
- for (i = 0; i < 4; i++) {
- /* get mask for byte writing */
- if (val & (0x1 << (27 + i))) {
- mask |= 0xff << (i * 8);
- }
- }
- }
- data &= mask;
- data |= (val >> 32) & ~mask;
- address_space_stl(iocsr_as, addr, data, attrs, NULL);
-
- return MEMTX_OK;
+ return NULL;
}
-static MemTxResult mail_send(uint64_t val, MemTxAttrs attrs)
+static int loongson_cpu_by_arch_id(LoongsonIPICommonState *lics,
+ int64_t arch_id, int *index, CPUState **pcs)
{
- uint32_t cpuid;
- hwaddr addr;
CPUState *cs;
- cpuid = extract32(val, 16, 10);
- cs = cpu_by_arch_id(cpuid);
+ cs = cpu_by_arch_id(arch_id);
if (cs == NULL) {
- return MEMTX_DECODE_ERROR;
+ return MEMTX_ERROR;
}
- /* override requester_id */
- addr = SMP_IPI_MAILBOX + CORE_BUF_20 + (val & 0x1c);
- attrs.requester_id = cs->cpu_index;
- return send_ipi_data(cs, val, addr, attrs);
-}
-
-static MemTxResult any_send(uint64_t val, MemTxAttrs attrs)
-{
- uint32_t cpuid;
- hwaddr addr;
- CPUState *cs;
-
- cpuid = extract32(val, 16, 10);
- cs = cpu_by_arch_id(cpuid);
- if (cs == NULL) {
- return MEMTX_DECODE_ERROR;
+ if (index) {
+ *index = cs->cpu_index;
}
- /* override requester_id */
- addr = val & 0xffff;
- attrs.requester_id = cs->cpu_index;
- return send_ipi_data(cs, val, addr, attrs);
-}
-
-static MemTxResult loongson_ipi_core_writel(void *opaque, hwaddr addr,
- uint64_t val, unsigned size,
- MemTxAttrs attrs)
-{
- IPICore *s = opaque;
- LoongsonIPI *ipi = s->ipi;
- int index = 0;
- uint32_t cpuid;
- uint8_t vector;
- CPUState *cs;
-
- addr &= 0xff;
- trace_loongson_ipi_write(size, (uint64_t)addr, val);
- switch (addr) {
- case CORE_STATUS_OFF:
- qemu_log_mask(LOG_GUEST_ERROR, "can not be written");
- break;
- case CORE_EN_OFF:
- s->en = val;
- break;
- case CORE_SET_OFF:
- s->status |= val;
- if (s->status != 0 && (s->status & s->en) != 0) {
- qemu_irq_raise(s->irq);
- }
- break;
- case CORE_CLEAR_OFF:
- s->status &= ~val;
- if (s->status == 0 && s->en != 0) {
- qemu_irq_lower(s->irq);
- }
- break;
- case CORE_BUF_20 ... CORE_BUF_38 + 4:
- index = (addr - CORE_BUF_20) >> 2;
- s->buf[index] = val;
- break;
- case IOCSR_IPI_SEND:
- cpuid = extract32(val, 16, 10);
- /* IPI status vector */
- vector = extract8(val, 0, 5);
- cs = cpu_by_arch_id(cpuid);
- if (cs == NULL || cs->cpu_index >= ipi->num_cpu) {
- return MEMTX_DECODE_ERROR;
- }
- loongson_ipi_core_writel(&ipi->cpu[cs->cpu_index], CORE_SET_OFF,
- BIT(vector), 4, attrs);
- break;
- default:
- qemu_log_mask(LOG_UNIMP, "invalid write: %x", (uint32_t)addr);
- break;
+ if (pcs) {
+ *pcs = cs;
}
return MEMTX_OK;
}
-static MemTxResult loongson_ipi_iocsr_writel(void *opaque, hwaddr addr,
- uint64_t val, unsigned size,
- MemTxAttrs attrs)
-{
- LoongsonIPI *ipi = opaque;
- IPICore *s;
-
- if (attrs.requester_id >= ipi->num_cpu) {
- return MEMTX_DECODE_ERROR;
- }
-
- s = &ipi->cpu[attrs.requester_id];
- return loongson_ipi_core_writel(s, addr, val, size, attrs);
-}
-
static const MemoryRegionOps loongson_ipi_core_ops = {
.read_with_attrs = loongson_ipi_core_readl,
.write_with_attrs = loongson_ipi_core_writel,
@@ -234,146 +51,79 @@ static const MemoryRegionOps loongson_ipi_core_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
-static const MemoryRegionOps loongson_ipi_iocsr_ops = {
- .read_with_attrs = loongson_ipi_iocsr_readl,
- .write_with_attrs = loongson_ipi_iocsr_writel,
- .impl.min_access_size = 4,
- .impl.max_access_size = 4,
- .valid.min_access_size = 4,
- .valid.max_access_size = 8,
- .endianness = DEVICE_LITTLE_ENDIAN,
-};
-
-/* mail send and any send only support writeq */
-static MemTxResult loongson_ipi_writeq(void *opaque, hwaddr addr, uint64_t val,
- unsigned size, MemTxAttrs attrs)
-{
- MemTxResult ret = MEMTX_OK;
-
- addr &= 0xfff;
- switch (addr) {
- case MAIL_SEND_OFFSET:
- ret = mail_send(val, attrs);
- break;
- case ANY_SEND_OFFSET:
- ret = any_send(val, attrs);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-static const MemoryRegionOps loongson_ipi64_ops = {
- .write_with_attrs = loongson_ipi_writeq,
- .impl.min_access_size = 8,
- .impl.max_access_size = 8,
- .valid.min_access_size = 8,
- .valid.max_access_size = 8,
- .endianness = DEVICE_LITTLE_ENDIAN,
-};
-
static void loongson_ipi_realize(DeviceState *dev, Error **errp)
{
- LoongsonIPI *s = LOONGSON_IPI(dev);
+ LoongsonIPICommonState *sc = LOONGSON_IPI_COMMON(dev);
+ LoongsonIPIState *s = LOONGSON_IPI(dev);
+ LoongsonIPIClass *lic = LOONGSON_IPI_GET_CLASS(dev);
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ Error *local_err = NULL;
int i;
- if (s->num_cpu == 0) {
- error_setg(errp, "num-cpu must be at least 1");
+ lic->parent_realize(dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
return;
}
- memory_region_init_io(&s->ipi_iocsr_mem, OBJECT(dev),
- &loongson_ipi_iocsr_ops,
- s, "loongson_ipi_iocsr", 0x48);
-
- /* loongson_ipi_iocsr performs re-entrant IO through ipi_send */
- s->ipi_iocsr_mem.disable_reentrancy_guard = true;
-
- sysbus_init_mmio(sbd, &s->ipi_iocsr_mem);
-
- memory_region_init_io(&s->ipi64_iocsr_mem, OBJECT(dev),
- &loongson_ipi64_ops,
- s, "loongson_ipi64_iocsr", 0x118);
- sysbus_init_mmio(sbd, &s->ipi64_iocsr_mem);
-
- s->cpu = g_new0(IPICore, s->num_cpu);
- if (s->cpu == NULL) {
- error_setg(errp, "Memory allocation for IPICore faile");
+ if (sc->num_cpu == 0) {
+ error_setg(errp, "num-cpu must be at least 1");
return;
}
- for (i = 0; i < s->num_cpu; i++) {
- s->cpu[i].ipi = s;
- s->cpu[i].ipi_mmio_mem = g_new0(MemoryRegion, 1);
- g_autofree char *name = g_strdup_printf("loongson_ipi_cpu%d_mmio", i);
- memory_region_init_io(s->cpu[i].ipi_mmio_mem, OBJECT(dev),
- &loongson_ipi_core_ops, &s->cpu[i], name, 0x48);
- sysbus_init_mmio(sbd, s->cpu[i].ipi_mmio_mem);
-
- qdev_init_gpio_out(dev, &s->cpu[i].irq, 1);
+ sc->cpu = g_new0(IPICore, sc->num_cpu);
+ for (i = 0; i < sc->num_cpu; i++) {
+ sc->cpu[i].ipi = sc;
+ qdev_init_gpio_out(dev, &sc->cpu[i].irq, 1);
}
-}
-static const VMStateDescription vmstate_ipi_core = {
- .name = "ipi-single",
- .version_id = 2,
- .minimum_version_id = 2,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32(status, IPICore),
- VMSTATE_UINT32(en, IPICore),
- VMSTATE_UINT32(set, IPICore),
- VMSTATE_UINT32(clear, IPICore),
- VMSTATE_UINT32_ARRAY(buf, IPICore, IPI_MBX_NUM * 2),
- VMSTATE_END_OF_LIST()
- }
-};
+ s->ipi_mmio_mem = g_new0(MemoryRegion, sc->num_cpu);
+ for (i = 0; i < sc->num_cpu; i++) {
+ g_autofree char *name = g_strdup_printf("loongson_ipi_cpu%d_mmio", i);
-static const VMStateDescription vmstate_loongson_ipi = {
- .name = TYPE_LOONGSON_IPI,
- .version_id = 2,
- .minimum_version_id = 2,
- .fields = (const VMStateField[]) {
- VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongsonIPI, num_cpu,
- vmstate_ipi_core, IPICore),
- VMSTATE_END_OF_LIST()
+ memory_region_init_io(&s->ipi_mmio_mem[i], OBJECT(dev),
+ &loongson_ipi_core_ops, &sc->cpu[i], name, 0x48);
+ sysbus_init_mmio(sbd, &s->ipi_mmio_mem[i]);
}
-};
-
-static Property ipi_properties[] = {
- DEFINE_PROP_UINT32("num-cpu", LoongsonIPI, num_cpu, 1),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void loongson_ipi_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->realize = loongson_ipi_realize;
- device_class_set_props(dc, ipi_properties);
- dc->vmsd = &vmstate_loongson_ipi;
}
-static void loongson_ipi_finalize(Object *obj)
+static void loongson_ipi_unrealize(DeviceState *dev)
{
- LoongsonIPI *s = LOONGSON_IPI(obj);
+ LoongsonIPIState *s = LOONGSON_IPI(dev);
+ LoongsonIPIClass *k = LOONGSON_IPI_GET_CLASS(dev);
- g_free(s->cpu);
+ g_free(s->ipi_mmio_mem);
+
+ k->parent_unrealize(dev);
}
-static const TypeInfo loongson_ipi_info = {
- .name = TYPE_LOONGSON_IPI,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(LoongsonIPI),
- .class_init = loongson_ipi_class_init,
- .instance_finalize = loongson_ipi_finalize,
+static const Property loongson_ipi_properties[] = {
+ DEFINE_PROP_UINT32("num-cpu", LoongsonIPICommonState, num_cpu, 1),
};
-static void loongson_ipi_register_types(void)
+static void loongson_ipi_class_init(ObjectClass *klass, const void *data)
{
- type_register_static(&loongson_ipi_info);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ LoongsonIPIClass *lic = LOONGSON_IPI_CLASS(klass);
+ LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_CLASS(klass);
+
+ device_class_set_parent_realize(dc, loongson_ipi_realize,
+ &lic->parent_realize);
+ device_class_set_parent_unrealize(dc, loongson_ipi_unrealize,
+ &lic->parent_unrealize);
+ device_class_set_props(dc, loongson_ipi_properties);
+ licc->get_iocsr_as = get_iocsr_as;
+ licc->cpu_by_arch_id = loongson_cpu_by_arch_id;
}
-type_init(loongson_ipi_register_types)
+static const TypeInfo loongson_ipi_types[] = {
+ {
+ .name = TYPE_LOONGSON_IPI,
+ .parent = TYPE_LOONGSON_IPI_COMMON,
+ .instance_size = sizeof(LoongsonIPIState),
+ .class_size = sizeof(LoongsonIPIClass),
+ .class_init = loongson_ipi_class_init,
+ }
+};
+
+DEFINE_TYPES(loongson_ipi_types)
diff --git a/hw/intc/loongson_ipi_common.c b/hw/intc/loongson_ipi_common.c
new file mode 100644
index 0000000..8cd78d4
--- /dev/null
+++ b/hw/intc/loongson_ipi_common.c
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Loongson IPI interrupt common support
+ *
+ * Copyright (C) 2021 Loongson Technology Corporation Limited
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "hw/intc/loongson_ipi_common.h"
+#include "hw/irq.h"
+#include "qemu/log.h"
+#include "migration/vmstate.h"
+#include "system/kvm.h"
+#include "trace.h"
+
+MemTxResult loongson_ipi_core_readl(void *opaque, hwaddr addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ IPICore *s = opaque;
+ uint64_t ret = 0;
+ int index = 0;
+
+ addr &= 0xff;
+ switch (addr) {
+ case CORE_STATUS_OFF:
+ ret = s->status;
+ break;
+ case CORE_EN_OFF:
+ ret = s->en;
+ break;
+ case CORE_SET_OFF:
+ ret = 0;
+ break;
+ case CORE_CLEAR_OFF:
+ ret = 0;
+ break;
+ case CORE_BUF_20 ... CORE_BUF_38 + 4:
+ index = (addr - CORE_BUF_20) >> 2;
+ ret = s->buf[index];
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "invalid read: %x", (uint32_t)addr);
+ break;
+ }
+
+ trace_loongson_ipi_read(size, (uint64_t)addr, ret);
+ *data = ret;
+
+ return MEMTX_OK;
+}
+
+static MemTxResult loongson_ipi_iocsr_readl(void *opaque, hwaddr addr,
+ uint64_t *data, unsigned size,
+ MemTxAttrs attrs)
+{
+ LoongsonIPICommonState *ipi = opaque;
+ IPICore *s;
+
+ if (attrs.requester_id >= ipi->num_cpu) {
+ return MEMTX_DECODE_ERROR;
+ }
+
+ s = &ipi->cpu[attrs.requester_id];
+ return loongson_ipi_core_readl(s, addr, data, size, attrs);
+}
+
+static MemTxResult send_ipi_data(LoongsonIPICommonState *ipi, CPUState *cpu,
+ uint64_t val, hwaddr addr, MemTxAttrs attrs)
+{
+ LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_GET_CLASS(ipi);
+ int i, mask = 0, data = 0;
+ AddressSpace *iocsr_as = licc->get_iocsr_as(cpu);
+
+ if (!iocsr_as) {
+ return MEMTX_DECODE_ERROR;
+ }
+
+ /*
+ * bit 27-30 is mask for byte writing,
+ * if the mask is 0, we need not to do anything.
+ */
+ if ((val >> 27) & 0xf) {
+ data = address_space_ldl_le(iocsr_as, addr, attrs, NULL);
+ for (i = 0; i < 4; i++) {
+ /* get mask for byte writing */
+ if (val & (0x1 << (27 + i))) {
+ mask |= 0xff << (i * 8);
+ }
+ }
+ }
+
+ data &= mask;
+ data |= (val >> 32) & ~mask;
+ address_space_stl_le(iocsr_as, addr, data, attrs, NULL);
+
+ return MEMTX_OK;
+}
+
+static MemTxResult mail_send(LoongsonIPICommonState *ipi,
+ uint64_t val, MemTxAttrs attrs)
+{
+ LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_GET_CLASS(ipi);
+ uint32_t cpuid;
+ hwaddr addr;
+ CPUState *cs;
+ int cpu, ret;
+
+ cpuid = extract32(val, 16, 10);
+ ret = licc->cpu_by_arch_id(ipi, cpuid, &cpu, &cs);
+ if (ret != MEMTX_OK) {
+ return MEMTX_DECODE_ERROR;
+ }
+
+ /* override requester_id */
+ addr = SMP_IPI_MAILBOX + CORE_BUF_20 + (val & 0x1c);
+ attrs.requester_id = cpu;
+ return send_ipi_data(ipi, cs, val, addr, attrs);
+}
+
+static MemTxResult any_send(LoongsonIPICommonState *ipi,
+ uint64_t val, MemTxAttrs attrs)
+{
+ LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_GET_CLASS(ipi);
+ uint32_t cpuid;
+ hwaddr addr;
+ CPUState *cs;
+ int cpu, ret;
+
+ cpuid = extract32(val, 16, 10);
+ ret = licc->cpu_by_arch_id(ipi, cpuid, &cpu, &cs);
+ if (ret != MEMTX_OK) {
+ return MEMTX_DECODE_ERROR;
+ }
+
+ /* override requester_id */
+ addr = val & 0xffff;
+ attrs.requester_id = cpu;
+ return send_ipi_data(ipi, cs, val, addr, attrs);
+}
+
+MemTxResult loongson_ipi_core_writel(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size, MemTxAttrs attrs)
+{
+ IPICore *s = opaque;
+ LoongsonIPICommonState *ipi = s->ipi;
+ LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_GET_CLASS(ipi);
+ int index = 0;
+ uint32_t cpuid;
+ uint8_t vector;
+ CPUState *cs;
+ int cpu, ret;
+
+ addr &= 0xff;
+ trace_loongson_ipi_write(size, (uint64_t)addr, val);
+ switch (addr) {
+ case CORE_STATUS_OFF:
+ qemu_log_mask(LOG_GUEST_ERROR, "can not be written");
+ break;
+ case CORE_EN_OFF:
+ s->en = val;
+ break;
+ case CORE_SET_OFF:
+ s->status |= val;
+ if (s->status != 0 && (s->status & s->en) != 0) {
+ qemu_irq_raise(s->irq);
+ }
+ break;
+ case CORE_CLEAR_OFF:
+ s->status &= ~val;
+ if (s->status == 0 && s->en != 0) {
+ qemu_irq_lower(s->irq);
+ }
+ break;
+ case CORE_BUF_20 ... CORE_BUF_38 + 4:
+ index = (addr - CORE_BUF_20) >> 2;
+ s->buf[index] = val;
+ break;
+ case IOCSR_IPI_SEND:
+ cpuid = extract32(val, 16, 10);
+ /* IPI status vector */
+ vector = extract8(val, 0, 5);
+ ret = licc->cpu_by_arch_id(ipi, cpuid, &cpu, &cs);
+ if (ret != MEMTX_OK || cpu >= ipi->num_cpu) {
+ return MEMTX_DECODE_ERROR;
+ }
+ loongson_ipi_core_writel(&ipi->cpu[cpu], CORE_SET_OFF,
+ BIT(vector), 4, attrs);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "invalid write: %x", (uint32_t)addr);
+ break;
+ }
+
+ return MEMTX_OK;
+}
+
+static MemTxResult loongson_ipi_iocsr_writel(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size,
+ MemTxAttrs attrs)
+{
+ LoongsonIPICommonState *ipi = opaque;
+ IPICore *s;
+
+ if (attrs.requester_id >= ipi->num_cpu) {
+ return MEMTX_DECODE_ERROR;
+ }
+
+ s = &ipi->cpu[attrs.requester_id];
+ return loongson_ipi_core_writel(s, addr, val, size, attrs);
+}
+
+static const MemoryRegionOps loongson_ipi_iocsr_ops = {
+ .read_with_attrs = loongson_ipi_iocsr_readl,
+ .write_with_attrs = loongson_ipi_iocsr_writel,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 4,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 8,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+/* mail send and any send only support writeq */
+static MemTxResult loongson_ipi_writeq(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size, MemTxAttrs attrs)
+{
+ LoongsonIPICommonState *ipi = opaque;
+ MemTxResult ret = MEMTX_OK;
+
+ addr &= 0xfff;
+ switch (addr) {
+ case MAIL_SEND_OFFSET:
+ ret = mail_send(ipi, val, attrs);
+ break;
+ case ANY_SEND_OFFSET:
+ ret = any_send(ipi, val, attrs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const MemoryRegionOps loongson_ipi64_ops = {
+ .write_with_attrs = loongson_ipi_writeq,
+ .impl.min_access_size = 8,
+ .impl.max_access_size = 8,
+ .valid.min_access_size = 8,
+ .valid.max_access_size = 8,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void loongson_ipi_common_realize(DeviceState *dev, Error **errp)
+{
+ LoongsonIPICommonState *s = LOONGSON_IPI_COMMON(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+
+ if (kvm_irqchip_in_kernel()) {
+ return;
+ }
+
+ memory_region_init_io(&s->ipi_iocsr_mem, OBJECT(dev),
+ &loongson_ipi_iocsr_ops,
+ s, "loongson_ipi_iocsr", 0x48);
+
+ /* loongson_ipi_iocsr performs re-entrant IO through ipi_send */
+ s->ipi_iocsr_mem.disable_reentrancy_guard = true;
+
+ sysbus_init_mmio(sbd, &s->ipi_iocsr_mem);
+
+ memory_region_init_io(&s->ipi64_iocsr_mem, OBJECT(dev),
+ &loongson_ipi64_ops,
+ s, "loongson_ipi64_iocsr", 0x118);
+ sysbus_init_mmio(sbd, &s->ipi64_iocsr_mem);
+}
+
+static void loongson_ipi_common_unrealize(DeviceState *dev)
+{
+ LoongsonIPICommonState *s = LOONGSON_IPI_COMMON(dev);
+
+ g_free(s->cpu);
+}
+
+static int loongson_ipi_common_pre_save(void *opaque)
+{
+ IPICore *ipicore = (IPICore *)opaque;
+ LoongsonIPICommonState *s = ipicore->ipi;
+ LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_GET_CLASS(s);
+
+ if (licc->pre_save) {
+ return licc->pre_save(s);
+ }
+
+ return 0;
+}
+
+static int loongson_ipi_common_post_load(void *opaque, int version_id)
+{
+ IPICore *ipicore = (IPICore *)opaque;
+ LoongsonIPICommonState *s = ipicore->ipi;
+ LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_GET_CLASS(s);
+
+ if (licc->post_load) {
+ return licc->post_load(s, version_id);
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_ipi_core = {
+ .name = "ipi-single",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .pre_save = loongson_ipi_common_pre_save,
+ .post_load = loongson_ipi_common_post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32(status, IPICore),
+ VMSTATE_UINT32(en, IPICore),
+ VMSTATE_UINT32(set, IPICore),
+ VMSTATE_UINT32(clear, IPICore),
+ VMSTATE_UINT32_ARRAY(buf, IPICore, IPI_MBX_NUM * 2),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_loongson_ipi_common = {
+ .name = "loongson_ipi",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (const VMStateField[]) {
+ VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongsonIPICommonState,
+ num_cpu, vmstate_ipi_core,
+ IPICore),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void loongson_ipi_common_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_CLASS(klass);
+
+ device_class_set_parent_realize(dc, loongson_ipi_common_realize,
+ &licc->parent_realize);
+ device_class_set_parent_unrealize(dc, loongson_ipi_common_unrealize,
+ &licc->parent_unrealize);
+ dc->vmsd = &vmstate_loongson_ipi_common;
+}
+
+static const TypeInfo loongarch_ipi_common_types[] = {
+ {
+ .name = TYPE_LOONGSON_IPI_COMMON,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(LoongsonIPICommonState),
+ .class_size = sizeof(LoongsonIPICommonClass),
+ .class_init = loongson_ipi_common_class_init,
+ .abstract = true,
+ }
+};
+
+DEFINE_TYPES(loongarch_ipi_common_types)
diff --git a/hw/intc/m68k_irqc.c b/hw/intc/m68k_irqc.c
index cf3beef..2532322 100644
--- a/hw/intc/m68k_irqc.c
+++ b/hw/intc/m68k_irqc.c
@@ -85,13 +85,12 @@ static const VMStateDescription vmstate_m68k_irqc = {
}
};
-static Property m68k_irqc_properties[] = {
+static const Property m68k_irqc_properties[] = {
DEFINE_PROP_LINK("m68k-cpu", M68KIRQCState, cpu,
TYPE_M68K_CPU, ArchCPU *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void m68k_irqc_class_init(ObjectClass *oc, void *data)
+static void m68k_irqc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
NMIClass *nc = NMI_CLASS(oc);
@@ -99,7 +98,7 @@ static void m68k_irqc_class_init(ObjectClass *oc, void *data)
device_class_set_props(dc, m68k_irqc_properties);
nc->nmi_monitor_handler = m68k_nmi;
- dc->reset = m68k_irqc_reset;
+ device_class_set_legacy_reset(dc, m68k_irqc_reset);
dc->vmsd = &vmstate_m68k_irqc;
ic->get_statistics = m68k_irqc_get_statistics;
ic->print_info = m68k_irqc_print_info;
@@ -111,7 +110,7 @@ static const TypeInfo m68k_irqc_type_info = {
.instance_size = sizeof(M68KIRQCState),
.instance_init = m68k_irqc_instance_init,
.class_init = m68k_irqc_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_NMI },
{ TYPE_INTERRUPT_STATS_PROVIDER },
{ }
diff --git a/hw/intc/meson.build b/hw/intc/meson.build
index afd1aa5..3137521 100644
--- a/hw/intc/meson.build
+++ b/hw/intc/meson.build
@@ -6,7 +6,7 @@ system_ss.add(when: 'CONFIG_ARM_GIC', if_true: files(
'arm_gicv3_common.c',
'arm_gicv3_its_common.c',
))
-system_ss.add(when: 'CONFIG_ARM_GICV3_TCG', if_true: files(
+system_ss.add(when: 'CONFIG_ARM_GICV3', if_true: files(
'arm_gicv3.c',
'arm_gicv3_dist.c',
'arm_gicv3_its.c',
@@ -15,7 +15,6 @@ system_ss.add(when: 'CONFIG_ARM_GICV3_TCG', if_true: files(
system_ss.add(when: 'CONFIG_ALLWINNER_A10_PIC', if_true: files('allwinner-a10-pic.c'))
system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_vic.c'))
system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_intc.c'))
-system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_pic.c'))
system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_gic.c', 'exynos4210_combiner.c'))
system_ss.add(when: 'CONFIG_GOLDFISH_PIC', if_true: files('goldfish_pic.c'))
system_ss.add(when: 'CONFIG_HEATHROW_PIC', if_true: files('heathrow_pic.c'))
@@ -40,7 +39,7 @@ endif
specific_ss.add(when: 'CONFIG_APIC', if_true: files('apic.c', 'apic_common.c'))
specific_ss.add(when: 'CONFIG_ARM_GIC', if_true: files('arm_gicv3_cpuif_common.c'))
-specific_ss.add(when: 'CONFIG_ARM_GICV3_TCG', if_true: files('arm_gicv3_cpuif.c'))
+specific_ss.add(when: 'CONFIG_ARM_GICV3', if_true: files('arm_gicv3_cpuif.c'))
specific_ss.add(when: 'CONFIG_ARM_GIC_KVM', if_true: files('arm_gic_kvm.c'))
specific_ss.add(when: ['CONFIG_ARM_GIC_KVM', 'TARGET_AARCH64'], if_true: files('arm_gicv3_kvm.c', 'arm_gicv3_its_kvm.c'))
specific_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_nvic.c'))
@@ -69,7 +68,15 @@ specific_ss.add(when: 'CONFIG_XIVE', if_true: files('xive.c'))
specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XIVE'],
if_true: files('spapr_xive_kvm.c'))
specific_ss.add(when: 'CONFIG_M68K_IRQC', if_true: files('m68k_irqc.c'))
+specific_ss.add(when: 'CONFIG_LOONGSON_IPI_COMMON', if_true: files('loongson_ipi_common.c'))
specific_ss.add(when: 'CONFIG_LOONGSON_IPI', if_true: files('loongson_ipi.c'))
-specific_ss.add(when: 'CONFIG_LOONGARCH_PCH_PIC', if_true: files('loongarch_pch_pic.c'))
+specific_ss.add(when: 'CONFIG_LOONGARCH_IPI', if_true: files('loongarch_ipi.c'))
+specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_LOONGARCH_IPI'],
+ if_true: files('loongarch_ipi_kvm.c'))
+specific_ss.add(when: 'CONFIG_LOONGARCH_PCH_PIC', if_true: files('loongarch_pch_pic.c', 'loongarch_pic_common.c'))
+specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_LOONGARCH_PCH_PIC'],
+ if_true: files('loongarch_pic_kvm.c'))
specific_ss.add(when: 'CONFIG_LOONGARCH_PCH_MSI', if_true: files('loongarch_pch_msi.c'))
-specific_ss.add(when: 'CONFIG_LOONGARCH_EXTIOI', if_true: files('loongarch_extioi.c'))
+specific_ss.add(when: 'CONFIG_LOONGARCH_EXTIOI', if_true: files('loongarch_extioi.c', 'loongarch_extioi_common.c'))
+specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_LOONGARCH_EXTIOI'],
+ if_true: files('loongarch_extioi_kvm.c'))
diff --git a/hw/intc/mips_gic.c b/hw/intc/mips_gic.c
index 77ba734..0c50ba4 100644
--- a/hw/intc/mips_gic.c
+++ b/hw/intc/mips_gic.c
@@ -14,9 +14,9 @@
#include "qemu/module.h"
#include "qapi/error.h"
#include "hw/sysbus.h"
-#include "exec/memory.h"
-#include "sysemu/kvm.h"
-#include "sysemu/reset.h"
+#include "system/memory.h"
+#include "system/kvm.h"
+#include "system/reset.h"
#include "kvm_mips.h"
#include "hw/intc/mips_gic.h"
#include "hw/irq.h"
@@ -255,7 +255,6 @@ static void gic_write_vp(MIPSGICState *gic, uint32_t vp_index, hwaddr addr,
return;
bad_offset:
qemu_log_mask(LOG_GUEST_ERROR, "Wrong GIC offset at 0x%" PRIx64 "\n", addr);
- return;
}
static void gic_write(void *opaque, hwaddr addr, uint64_t data, unsigned size)
@@ -438,13 +437,12 @@ static void mips_gic_realize(DeviceState *dev, Error **errp)
}
}
-static Property mips_gic_properties[] = {
+static const Property mips_gic_properties[] = {
DEFINE_PROP_UINT32("num-vp", MIPSGICState, num_vps, 1),
DEFINE_PROP_UINT32("num-irq", MIPSGICState, num_irq, 256),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mips_gic_class_init(ObjectClass *klass, void *data)
+static void mips_gic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/intc/omap_intc.c b/hw/intc/omap_intc.c
index 435c476..c61158b 100644
--- a/hw/intc/omap_intc.c
+++ b/hw/intc/omap_intc.c
@@ -50,8 +50,6 @@ struct OMAPIntcState {
int level_only;
uint32_t size;
- uint8_t revision;
-
/* state */
uint32_t new_agr[2];
int sir_intr[2];
@@ -104,8 +102,8 @@ static inline void omap_inth_update(OMAPIntcState *s, int is_fiq)
}
}
-#define INT_FALLING_EDGE 0
-#define INT_LOW_LEVEL 1
+#define INT_FALLING_EDGE 0
+#define INT_LOW_LEVEL 1
static void omap_set_intr(void *opaque, int irq, int req)
{
@@ -133,26 +131,6 @@ static void omap_set_intr(void *opaque, int irq, int req)
}
}
-/* Simplified version with no edge detection */
-static void omap_set_intr_noedge(void *opaque, int irq, int req)
-{
- OMAPIntcState *ih = opaque;
- uint32_t rise;
-
- struct omap_intr_handler_bank_s *bank = &ih->bank[irq >> 5];
- int n = irq & 31;
-
- if (req) {
- rise = ~bank->inputs & (1 << n);
- if (rise) {
- bank->irqs |= bank->inputs |= rise;
- omap_inth_update(ih, 0);
- omap_inth_update(ih, 1);
- }
- } else
- bank->irqs = (bank->inputs &= ~(1 << n)) | bank->swi;
-}
-
static uint64_t omap_inth_read(void *opaque, hwaddr addr,
unsigned size)
{
@@ -164,13 +142,13 @@ static uint64_t omap_inth_read(void *opaque, hwaddr addr,
offset &= 0xff;
switch (offset) {
- case 0x00: /* ITR */
+ case 0x00: /* ITR */
return bank->irqs;
- case 0x04: /* MIR */
+ case 0x04: /* MIR */
return bank->mask;
- case 0x10: /* SIR_IRQ_CODE */
+ case 0x10: /* SIR_IRQ_CODE */
case 0x14: /* SIR_FIQ_CODE */
if (bank_no != 0)
break;
@@ -181,49 +159,49 @@ static uint64_t omap_inth_read(void *opaque, hwaddr addr,
bank->irqs &= ~(1 << i);
return line_no;
- case 0x18: /* CONTROL_REG */
+ case 0x18: /* CONTROL_REG */
if (bank_no != 0)
break;
return 0;
- case 0x1c: /* ILR0 */
- case 0x20: /* ILR1 */
- case 0x24: /* ILR2 */
- case 0x28: /* ILR3 */
- case 0x2c: /* ILR4 */
- case 0x30: /* ILR5 */
- case 0x34: /* ILR6 */
- case 0x38: /* ILR7 */
- case 0x3c: /* ILR8 */
- case 0x40: /* ILR9 */
- case 0x44: /* ILR10 */
- case 0x48: /* ILR11 */
- case 0x4c: /* ILR12 */
- case 0x50: /* ILR13 */
- case 0x54: /* ILR14 */
- case 0x58: /* ILR15 */
- case 0x5c: /* ILR16 */
- case 0x60: /* ILR17 */
- case 0x64: /* ILR18 */
- case 0x68: /* ILR19 */
- case 0x6c: /* ILR20 */
- case 0x70: /* ILR21 */
- case 0x74: /* ILR22 */
- case 0x78: /* ILR23 */
- case 0x7c: /* ILR24 */
- case 0x80: /* ILR25 */
- case 0x84: /* ILR26 */
- case 0x88: /* ILR27 */
- case 0x8c: /* ILR28 */
- case 0x90: /* ILR29 */
- case 0x94: /* ILR30 */
- case 0x98: /* ILR31 */
+ case 0x1c: /* ILR0 */
+ case 0x20: /* ILR1 */
+ case 0x24: /* ILR2 */
+ case 0x28: /* ILR3 */
+ case 0x2c: /* ILR4 */
+ case 0x30: /* ILR5 */
+ case 0x34: /* ILR6 */
+ case 0x38: /* ILR7 */
+ case 0x3c: /* ILR8 */
+ case 0x40: /* ILR9 */
+ case 0x44: /* ILR10 */
+ case 0x48: /* ILR11 */
+ case 0x4c: /* ILR12 */
+ case 0x50: /* ILR13 */
+ case 0x54: /* ILR14 */
+ case 0x58: /* ILR15 */
+ case 0x5c: /* ILR16 */
+ case 0x60: /* ILR17 */
+ case 0x64: /* ILR18 */
+ case 0x68: /* ILR19 */
+ case 0x6c: /* ILR20 */
+ case 0x70: /* ILR21 */
+ case 0x74: /* ILR22 */
+ case 0x78: /* ILR23 */
+ case 0x7c: /* ILR24 */
+ case 0x80: /* ILR25 */
+ case 0x84: /* ILR26 */
+ case 0x88: /* ILR27 */
+ case 0x8c: /* ILR28 */
+ case 0x90: /* ILR29 */
+ case 0x94: /* ILR30 */
+ case 0x98: /* ILR31 */
i = (offset - 0x1c) >> 2;
return (bank->priority[i] << 2) |
(((bank->sens_edge >> i) & 1) << 1) |
((bank->fiq >> i) & 1);
- case 0x9c: /* ISR */
+ case 0x9c: /* ISR */
return 0x00000000;
}
@@ -241,24 +219,24 @@ static void omap_inth_write(void *opaque, hwaddr addr,
offset &= 0xff;
switch (offset) {
- case 0x00: /* ITR */
+ case 0x00: /* ITR */
/* Important: ignore the clearing if the IRQ is level-triggered and
the input bit is 1 */
bank->irqs &= value | (bank->inputs & bank->sens_edge);
return;
- case 0x04: /* MIR */
+ case 0x04: /* MIR */
bank->mask = value;
omap_inth_update(s, 0);
omap_inth_update(s, 1);
return;
- case 0x10: /* SIR_IRQ_CODE */
- case 0x14: /* SIR_FIQ_CODE */
+ case 0x10: /* SIR_IRQ_CODE */
+ case 0x14: /* SIR_FIQ_CODE */
OMAP_RO_REG(addr);
break;
- case 0x18: /* CONTROL_REG */
+ case 0x18: /* CONTROL_REG */
if (bank_no != 0)
break;
if (value & 2) {
@@ -273,38 +251,38 @@ static void omap_inth_write(void *opaque, hwaddr addr,
}
return;
- case 0x1c: /* ILR0 */
- case 0x20: /* ILR1 */
- case 0x24: /* ILR2 */
- case 0x28: /* ILR3 */
- case 0x2c: /* ILR4 */
- case 0x30: /* ILR5 */
- case 0x34: /* ILR6 */
- case 0x38: /* ILR7 */
- case 0x3c: /* ILR8 */
- case 0x40: /* ILR9 */
- case 0x44: /* ILR10 */
- case 0x48: /* ILR11 */
- case 0x4c: /* ILR12 */
- case 0x50: /* ILR13 */
- case 0x54: /* ILR14 */
- case 0x58: /* ILR15 */
- case 0x5c: /* ILR16 */
- case 0x60: /* ILR17 */
- case 0x64: /* ILR18 */
- case 0x68: /* ILR19 */
- case 0x6c: /* ILR20 */
- case 0x70: /* ILR21 */
- case 0x74: /* ILR22 */
- case 0x78: /* ILR23 */
- case 0x7c: /* ILR24 */
- case 0x80: /* ILR25 */
- case 0x84: /* ILR26 */
- case 0x88: /* ILR27 */
- case 0x8c: /* ILR28 */
- case 0x90: /* ILR29 */
- case 0x94: /* ILR30 */
- case 0x98: /* ILR31 */
+ case 0x1c: /* ILR0 */
+ case 0x20: /* ILR1 */
+ case 0x24: /* ILR2 */
+ case 0x28: /* ILR3 */
+ case 0x2c: /* ILR4 */
+ case 0x30: /* ILR5 */
+ case 0x34: /* ILR6 */
+ case 0x38: /* ILR7 */
+ case 0x3c: /* ILR8 */
+ case 0x40: /* ILR9 */
+ case 0x44: /* ILR10 */
+ case 0x48: /* ILR11 */
+ case 0x4c: /* ILR12 */
+ case 0x50: /* ILR13 */
+ case 0x54: /* ILR14 */
+ case 0x58: /* ILR15 */
+ case 0x5c: /* ILR16 */
+ case 0x60: /* ILR17 */
+ case 0x64: /* ILR18 */
+ case 0x68: /* ILR19 */
+ case 0x6c: /* ILR20 */
+ case 0x70: /* ILR21 */
+ case 0x74: /* ILR22 */
+ case 0x78: /* ILR23 */
+ case 0x7c: /* ILR24 */
+ case 0x80: /* ILR25 */
+ case 0x84: /* ILR26 */
+ case 0x88: /* ILR27 */
+ case 0x8c: /* ILR28 */
+ case 0x90: /* ILR29 */
+ case 0x94: /* ILR30 */
+ case 0x98: /* ILR31 */
i = (offset - 0x1c) >> 2;
bank->priority[i] = (value >> 2) & 0x1f;
bank->sens_edge &= ~(1 << i);
@@ -313,7 +291,7 @@ static void omap_inth_write(void *opaque, hwaddr addr,
bank->fiq |= (value & 1) << i;
return;
- case 0x9c: /* ISR */
+ case 0x9c: /* ISR */
for (i = 0; i < 32; i ++)
if (value & (1 << i)) {
omap_set_intr(s, 32 * bank_no + i, 1);
@@ -397,16 +375,15 @@ void omap_intc_set_fclk(OMAPIntcState *intc, omap_clk clk)
intc->fclk = clk;
}
-static Property omap_intc_properties[] = {
+static const Property omap_intc_properties[] = {
DEFINE_PROP_UINT32("size", OMAPIntcState, size, 0x100),
- DEFINE_PROP_END_OF_LIST(),
};
-static void omap_intc_class_init(ObjectClass *klass, void *data)
+static void omap_intc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = omap_inth_reset;
+ device_class_set_legacy_reset(dc, omap_inth_reset);
device_class_set_props(dc, omap_intc_properties);
/* Reason: pointer property "clk" */
dc->user_creatable = false;
@@ -414,277 +391,16 @@ static void omap_intc_class_init(ObjectClass *klass, void *data)
}
static const TypeInfo omap_intc_info = {
- .name = "omap-intc",
- .parent = TYPE_OMAP_INTC,
- .instance_init = omap_intc_init,
- .class_init = omap_intc_class_init,
-};
-
-static uint64_t omap2_inth_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- OMAPIntcState *s = opaque;
- int offset = addr;
- int bank_no, line_no;
- struct omap_intr_handler_bank_s *bank = NULL;
-
- if ((offset & 0xf80) == 0x80) {
- bank_no = (offset & 0x60) >> 5;
- if (bank_no < s->nbanks) {
- offset &= ~0x60;
- bank = &s->bank[bank_no];
- } else {
- OMAP_BAD_REG(addr);
- return 0;
- }
- }
-
- switch (offset) {
- case 0x00: /* INTC_REVISION */
- return s->revision;
-
- case 0x10: /* INTC_SYSCONFIG */
- return (s->autoidle >> 2) & 1;
-
- case 0x14: /* INTC_SYSSTATUS */
- return 1; /* RESETDONE */
-
- case 0x40: /* INTC_SIR_IRQ */
- return s->sir_intr[0];
-
- case 0x44: /* INTC_SIR_FIQ */
- return s->sir_intr[1];
-
- case 0x48: /* INTC_CONTROL */
- return (!s->mask) << 2; /* GLOBALMASK */
-
- case 0x4c: /* INTC_PROTECTION */
- return 0;
-
- case 0x50: /* INTC_IDLE */
- return s->autoidle & 3;
-
- /* Per-bank registers */
- case 0x80: /* INTC_ITR */
- return bank->inputs;
-
- case 0x84: /* INTC_MIR */
- return bank->mask;
-
- case 0x88: /* INTC_MIR_CLEAR */
- case 0x8c: /* INTC_MIR_SET */
- return 0;
-
- case 0x90: /* INTC_ISR_SET */
- return bank->swi;
-
- case 0x94: /* INTC_ISR_CLEAR */
- return 0;
-
- case 0x98: /* INTC_PENDING_IRQ */
- return bank->irqs & ~bank->mask & ~bank->fiq;
-
- case 0x9c: /* INTC_PENDING_FIQ */
- return bank->irqs & ~bank->mask & bank->fiq;
-
- /* Per-line registers */
- case 0x100 ... 0x300: /* INTC_ILR */
- bank_no = (offset - 0x100) >> 7;
- if (bank_no > s->nbanks)
- break;
- bank = &s->bank[bank_no];
- line_no = (offset & 0x7f) >> 2;
- return (bank->priority[line_no] << 2) |
- ((bank->fiq >> line_no) & 1);
- }
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap2_inth_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- OMAPIntcState *s = opaque;
- int offset = addr;
- int bank_no, line_no;
- struct omap_intr_handler_bank_s *bank = NULL;
-
- if ((offset & 0xf80) == 0x80) {
- bank_no = (offset & 0x60) >> 5;
- if (bank_no < s->nbanks) {
- offset &= ~0x60;
- bank = &s->bank[bank_no];
- } else {
- OMAP_BAD_REG(addr);
- return;
- }
- }
-
- switch (offset) {
- case 0x10: /* INTC_SYSCONFIG */
- s->autoidle &= 4;
- s->autoidle |= (value & 1) << 2;
- if (value & 2) { /* SOFTRESET */
- omap_inth_reset(DEVICE(s));
- }
- return;
-
- case 0x48: /* INTC_CONTROL */
- s->mask = (value & 4) ? 0 : ~0; /* GLOBALMASK */
- if (value & 2) { /* NEWFIQAGR */
- qemu_set_irq(s->parent_intr[1], 0);
- s->new_agr[1] = ~0;
- omap_inth_update(s, 1);
- }
- if (value & 1) { /* NEWIRQAGR */
- qemu_set_irq(s->parent_intr[0], 0);
- s->new_agr[0] = ~0;
- omap_inth_update(s, 0);
- }
- return;
-
- case 0x4c: /* INTC_PROTECTION */
- /* TODO: Make a bitmap (or sizeof(char)map) of access privileges
- * for every register, see Chapter 3 and 4 for privileged mode. */
- if (value & 1)
- fprintf(stderr, "%s: protection mode enable attempt\n",
- __func__);
- return;
-
- case 0x50: /* INTC_IDLE */
- s->autoidle &= ~3;
- s->autoidle |= value & 3;
- return;
-
- /* Per-bank registers */
- case 0x84: /* INTC_MIR */
- bank->mask = value;
- omap_inth_update(s, 0);
- omap_inth_update(s, 1);
- return;
-
- case 0x88: /* INTC_MIR_CLEAR */
- bank->mask &= ~value;
- omap_inth_update(s, 0);
- omap_inth_update(s, 1);
- return;
-
- case 0x8c: /* INTC_MIR_SET */
- bank->mask |= value;
- return;
-
- case 0x90: /* INTC_ISR_SET */
- bank->irqs |= bank->swi |= value;
- omap_inth_update(s, 0);
- omap_inth_update(s, 1);
- return;
-
- case 0x94: /* INTC_ISR_CLEAR */
- bank->swi &= ~value;
- bank->irqs = bank->swi & bank->inputs;
- return;
-
- /* Per-line registers */
- case 0x100 ... 0x300: /* INTC_ILR */
- bank_no = (offset - 0x100) >> 7;
- if (bank_no > s->nbanks)
- break;
- bank = &s->bank[bank_no];
- line_no = (offset & 0x7f) >> 2;
- bank->priority[line_no] = (value >> 2) & 0x3f;
- bank->fiq &= ~(1 << line_no);
- bank->fiq |= (value & 1) << line_no;
- return;
-
- case 0x00: /* INTC_REVISION */
- case 0x14: /* INTC_SYSSTATUS */
- case 0x40: /* INTC_SIR_IRQ */
- case 0x44: /* INTC_SIR_FIQ */
- case 0x80: /* INTC_ITR */
- case 0x98: /* INTC_PENDING_IRQ */
- case 0x9c: /* INTC_PENDING_FIQ */
- OMAP_RO_REG(addr);
- return;
- }
- OMAP_BAD_REG(addr);
-}
-
-static const MemoryRegionOps omap2_inth_mem_ops = {
- .read = omap2_inth_read,
- .write = omap2_inth_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid = {
- .min_access_size = 4,
- .max_access_size = 4,
- },
-};
-
-static void omap2_intc_init(Object *obj)
-{
- DeviceState *dev = DEVICE(obj);
- OMAPIntcState *s = OMAP_INTC(obj);
- SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
-
- s->level_only = 1;
- s->nbanks = 3;
- sysbus_init_irq(sbd, &s->parent_intr[0]);
- sysbus_init_irq(sbd, &s->parent_intr[1]);
- qdev_init_gpio_in(dev, omap_set_intr_noedge, s->nbanks * 32);
- memory_region_init_io(&s->mmio, obj, &omap2_inth_mem_ops, s,
- "omap2-intc", 0x1000);
- sysbus_init_mmio(sbd, &s->mmio);
-}
-
-static void omap2_intc_realize(DeviceState *dev, Error **errp)
-{
- OMAPIntcState *s = OMAP_INTC(dev);
-
- if (!s->iclk) {
- error_setg(errp, "omap2-intc: iclk not connected");
- return;
- }
- if (!s->fclk) {
- error_setg(errp, "omap2-intc: fclk not connected");
- return;
- }
-}
-
-static Property omap2_intc_properties[] = {
- DEFINE_PROP_UINT8("revision", OMAPIntcState,
- revision, 0x21),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void omap2_intc_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->reset = omap_inth_reset;
- device_class_set_props(dc, omap2_intc_properties);
- /* Reason: pointer property "iclk", "fclk" */
- dc->user_creatable = false;
- dc->realize = omap2_intc_realize;
-}
-
-static const TypeInfo omap2_intc_info = {
- .name = "omap2-intc",
- .parent = TYPE_OMAP_INTC,
- .instance_init = omap2_intc_init,
- .class_init = omap2_intc_class_init,
-};
-
-static const TypeInfo omap_intc_type_info = {
.name = TYPE_OMAP_INTC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(OMAPIntcState),
- .abstract = true,
+ .instance_init = omap_intc_init,
+ .class_init = omap_intc_class_init,
};
static void omap_intc_register_types(void)
{
- type_register_static(&omap_intc_type_info);
type_register_static(&omap_intc_info);
- type_register_static(&omap2_intc_info);
}
type_init(omap_intc_register_types)
diff --git a/hw/intc/ompic.c b/hw/intc/ompic.c
index 99032ea..047c367 100644
--- a/hw/intc/ompic.c
+++ b/hw/intc/ompic.c
@@ -13,7 +13,7 @@
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qom/object.h"
#define TYPE_OR1K_OMPIC "or1k-ompic"
@@ -128,9 +128,8 @@ static void or1k_ompic_realize(DeviceState *dev, Error **errp)
}
}
-static Property or1k_ompic_properties[] = {
+static const Property or1k_ompic_properties[] = {
DEFINE_PROP_UINT32("num-cpus", OR1KOMPICState, num_cpus, 1),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_or1k_ompic_cpu = {
@@ -156,7 +155,7 @@ static const VMStateDescription vmstate_or1k_ompic = {
}
};
-static void or1k_ompic_class_init(ObjectClass *klass, void *data)
+static void or1k_ompic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c
index 9792a11..87733eb 100644
--- a/hw/intc/openpic.c
+++ b/hw/intc/openpic.c
@@ -41,7 +41,6 @@
#include "hw/pci/msi.h"
#include "qapi/error.h"
#include "qemu/bitops.h"
-#include "qapi/qmp/qerror.h"
#include "qemu/module.h"
#include "qemu/timer.h"
#include "qemu/error-report.h"
@@ -1032,13 +1031,14 @@ static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
s_IRQ = IRQ_get_next(opp, &dst->servicing);
/* Check queued interrupts. */
n_IRQ = IRQ_get_next(opp, &dst->raised);
- src = &opp->src[n_IRQ];
- if (n_IRQ != -1 &&
- (s_IRQ == -1 ||
- IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) {
- DPRINTF("Raise OpenPIC INT output cpu %d irq %d",
- idx, n_IRQ);
- qemu_irq_raise(opp->dst[idx].irqs[OPENPIC_OUTPUT_INT]);
+ if (n_IRQ != -1) {
+ src = &opp->src[n_IRQ];
+ if (s_IRQ == -1 ||
+ IVPR_PRIORITY(src->ivpr) > dst->servicing.priority) {
+ DPRINTF("Raise OpenPIC INT output cpu %d irq %d",
+ idx, n_IRQ);
+ qemu_irq_raise(opp->dst[idx].irqs[OPENPIC_OUTPUT_INT]);
+ }
}
break;
default:
@@ -1535,9 +1535,7 @@ static void openpic_realize(DeviceState *dev, Error **errp)
};
if (opp->nb_cpus > MAX_CPU) {
- error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE,
- TYPE_OPENPIC, "nb_cpus", (uint64_t)opp->nb_cpus,
- (uint64_t)0, (uint64_t)MAX_CPU);
+ error_setg(errp, "property 'nb_cpus' can be at most %d", MAX_CPU);
return;
}
@@ -1608,19 +1606,18 @@ static void openpic_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_in(dev, openpic_set_irq, opp->max_irq);
}
-static Property openpic_properties[] = {
+static const Property openpic_properties[] = {
DEFINE_PROP_UINT32("model", OpenPICState, model, OPENPIC_MODEL_FSL_MPIC_20),
DEFINE_PROP_UINT32("nb_cpus", OpenPICState, nb_cpus, 1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void openpic_class_init(ObjectClass *oc, void *data)
+static void openpic_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = openpic_realize;
device_class_set_props(dc, openpic_properties);
- dc->reset = openpic_reset;
+ device_class_set_legacy_reset(dc, openpic_reset);
dc->vmsd = &vmstate_openpic;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
}
diff --git a/hw/intc/openpic_kvm.c b/hw/intc/openpic_kvm.c
index 557dd0c..673ea9c 100644
--- a/hw/intc/openpic_kvm.c
+++ b/hw/intc/openpic_kvm.c
@@ -30,7 +30,7 @@
#include "hw/pci/msi.h"
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "qom/object.h"
@@ -262,19 +262,18 @@ int kvm_openpic_connect_vcpu(DeviceState *d, CPUState *cs)
kvm_arch_vcpu_id(cs));
}
-static Property kvm_openpic_properties[] = {
+static const Property kvm_openpic_properties[] = {
DEFINE_PROP_UINT32("model", KVMOpenPICState, model,
OPENPIC_MODEL_FSL_MPIC_20),
- DEFINE_PROP_END_OF_LIST(),
};
-static void kvm_openpic_class_init(ObjectClass *oc, void *data)
+static void kvm_openpic_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = kvm_openpic_realize;
device_class_set_props(dc, kvm_openpic_properties);
- dc->reset = kvm_openpic_reset;
+ device_class_set_legacy_reset(dc, kvm_openpic_reset);
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
}
diff --git a/hw/intc/pl190.c b/hw/intc/pl190.c
index d79e5d8..838c21c 100644
--- a/hw/intc/pl190.c
+++ b/hw/intc/pl190.c
@@ -273,11 +273,11 @@ static const VMStateDescription vmstate_pl190 = {
}
};
-static void pl190_class_init(ObjectClass *klass, void *data)
+static void pl190_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = pl190_reset;
+ device_class_set_legacy_reset(dc, pl190_reset);
dc->vmsd = &vmstate_pl190;
}
diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c
index 5bacbce..935c0e4 100644
--- a/hw/intc/pnv_xive.c
+++ b/hw/intc/pnv_xive.c
@@ -1,10 +1,9 @@
/*
* QEMU PowerPC XIVE interrupt controller model
*
- * Copyright (c) 2017-2019, IBM Corporation.
+ * Copyright (c) 2017-2024, IBM Corporation.
*
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
@@ -12,9 +11,9 @@
#include "qemu/module.h"
#include "qapi/error.h"
#include "target/ppc/cpu.h"
-#include "sysemu/cpus.h"
-#include "sysemu/dma.h"
-#include "sysemu/reset.h"
+#include "system/cpus.h"
+#include "system/dma.h"
+#include "system/reset.h"
#include "hw/ppc/fdt.h"
#include "hw/ppc/pnv.h"
#include "hw/ppc/pnv_chip.h"
@@ -473,7 +472,7 @@ static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
{
PnvXive *xive = PNV_XIVE(xptr);
@@ -500,7 +499,8 @@ static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
* Check the thread context CAM lines and record matches.
*/
ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
- nvt_idx, cam_ignore, logic_serv);
+ nvt_idx, cam_ignore,
+ logic_serv);
/*
* Save the context and follow on to catch duplicates, that we
* don't support yet.
@@ -2059,17 +2059,16 @@ static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
return 0;
}
-static Property pnv_xive_properties[] = {
+static const Property pnv_xive_properties[] = {
DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
/* The PnvChip id identifies the XIVE interrupt controller. */
DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pnv_xive_class_init(ObjectClass *klass, void *data)
+static void pnv_xive_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
@@ -2107,7 +2106,7 @@ static const TypeInfo pnv_xive_info = {
.instance_size = sizeof(PnvXive),
.class_init = pnv_xive_class_init,
.class_size = sizeof(PnvXiveClass),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c
index 2fb4fa2..ec8b0c6 100644
--- a/hw/intc/pnv_xive2.c
+++ b/hw/intc/pnv_xive2.c
@@ -1,18 +1,17 @@
/*
* QEMU PowerPC XIVE2 interrupt controller model (POWER10)
*
- * Copyright (c) 2019-2022, IBM Corporation.
+ * Copyright (c) 2019-2024, IBM Corporation.
*
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qapi/error.h"
#include "target/ppc/cpu.h"
-#include "sysemu/cpus.h"
-#include "sysemu/dma.h"
+#include "system/cpus.h"
+#include "system/dma.h"
#include "hw/ppc/fdt.h"
#include "hw/ppc/pnv.h"
#include "hw/ppc/pnv_chip.h"
@@ -24,7 +23,8 @@
#include "hw/ppc/xive2_regs.h"
#include "hw/ppc/ppc.h"
#include "hw/qdev-properties.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
+#include "system/qtest.h"
#include <libfdt.h>
@@ -32,6 +32,16 @@
#undef XIVE2_DEBUG
+/* XIVE Sync or Flush Notification Block */
+typedef struct XiveSfnBlock {
+ uint8_t bytes[32];
+} XiveSfnBlock;
+
+/* XIVE Thread Sync or Flush Notification Area */
+typedef struct XiveThreadNA {
+ XiveSfnBlock topo[16];
+} XiveThreadNA;
+
/*
* Virtual structures table (VST)
*/
@@ -45,16 +55,16 @@ typedef struct XiveVstInfo {
static const XiveVstInfo vst_infos[] = {
- [VST_EAS] = { "EAT", sizeof(Xive2Eas), 16 },
- [VST_ESB] = { "ESB", 1, 16 },
- [VST_END] = { "ENDT", sizeof(Xive2End), 16 },
+ [VST_EAS] = { "EAT", sizeof(Xive2Eas), 16 },
+ [VST_ESB] = { "ESB", 1, 16 },
+ [VST_END] = { "ENDT", sizeof(Xive2End), 16 },
- [VST_NVP] = { "NVPT", sizeof(Xive2Nvp), 16 },
- [VST_NVG] = { "NVGT", sizeof(Xive2Nvgc), 16 },
- [VST_NVC] = { "NVCT", sizeof(Xive2Nvgc), 16 },
+ [VST_NVP] = { "NVPT", sizeof(Xive2Nvp), 16 },
+ [VST_NVG] = { "NVGT", sizeof(Xive2Nvgc), 16 },
+ [VST_NVC] = { "NVCT", sizeof(Xive2Nvgc), 16 },
- [VST_IC] = { "IC", 1 /* ? */ , 16 }, /* Topology # */
- [VST_SYNC] = { "SYNC", 1 /* ? */ , 16 }, /* Topology # */
+ [VST_IC] = { "IC", 1, /* ? */ 16 }, /* Topology # */
+ [VST_SYNC] = { "SYNC", sizeof(XiveThreadNA), 16 }, /* Topology # */
/*
* This table contains the backing store pages for the interrupt
@@ -206,6 +216,20 @@ static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
}
+static uint8_t pnv_xive2_nvc_table_compress_shift(PnvXive2 *xive)
+{
+ uint8_t shift = GETFIELD(PC_NXC_PROC_CONFIG_NVC_TABLE_COMPRESS,
+ xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
+ return shift > 8 ? 0 : shift;
+}
+
+static uint8_t pnv_xive2_nvg_table_compress_shift(PnvXive2 *xive)
+{
+ uint8_t shift = GETFIELD(PC_NXC_PROC_CONFIG_NVG_TABLE_COMPRESS,
+ xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
+ return shift > 8 ? 0 : shift;
+}
+
static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
uint32_t idx)
{
@@ -219,6 +243,11 @@ static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
}
vsd = xive->vsds[type][blk];
+ if (vsd == 0) {
+ xive2_error(xive, "VST: vsd == 0 block id %d for VST %s %d !?",
+ blk, info->name, idx);
+ return 0;
+ }
/* Remote VST access */
if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
@@ -227,6 +256,12 @@ static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
}
+ if (type == VST_NVG) {
+ idx >>= pnv_xive2_nvg_table_compress_shift(xive);
+ } else if (type == VST_NVC) {
+ idx >>= pnv_xive2_nvc_table_compress_shift(xive);
+ }
+
if (VSD_INDIRECT & vsd) {
return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx);
}
@@ -329,40 +364,115 @@ static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
word_number);
}
-static int pnv_xive2_end_update(PnvXive2 *xive)
+static inline int pnv_xive2_get_current_pir(PnvXive2 *xive)
{
- uint8_t blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID,
- xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
- uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX,
- xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
- int i;
+ if (!qtest_enabled()) {
+ PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
+ return ppc_cpu_pir(cpu);
+ }
+ return 0;
+}
+
+/*
+ * After SW injects a Queue Sync or Cache Flush operation, HW will notify
+ * SW of the completion of the operation by writing a byte of all 1's (0xff)
+ * to a specific memory location. The memory location is calculated by first
+ * looking up a base address in the SYNC VSD using the Topology ID of the
+ * originating thread as the "block" number. This points to a
+ * 64k block of memory that is further divided into 128 512 byte chunks of
+ * memory, which is indexed by the thread id of the requesting thread.
+ * Finally, this 512 byte chunk of memory is divided into 16 32 byte
+ * chunks which are indexed by the topology id of the targeted IC's chip.
+ * The values below are the offsets into that 32 byte chunk of memory for
+ * each type of cache flush or queue sync operation.
+ */
+#define PNV_XIVE2_QUEUE_IPI 0x00
+#define PNV_XIVE2_QUEUE_HW 0x01
+#define PNV_XIVE2_QUEUE_NXC 0x02
+#define PNV_XIVE2_QUEUE_INT 0x03
+#define PNV_XIVE2_QUEUE_OS 0x04
+#define PNV_XIVE2_QUEUE_POOL 0x05
+#define PNV_XIVE2_QUEUE_HARD 0x06
+#define PNV_XIVE2_CACHE_ENDC 0x08
+#define PNV_XIVE2_CACHE_ESBC 0x09
+#define PNV_XIVE2_CACHE_EASC 0x0a
+#define PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO 0x10
+#define PNV_XIVE2_QUEUE_NXC_LD_LCL_CO 0x11
+#define PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI 0x12
+#define PNV_XIVE2_QUEUE_NXC_ST_LCL_CI 0x13
+#define PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI 0x14
+#define PNV_XIVE2_QUEUE_NXC_ST_RMT_CI 0x15
+#define PNV_XIVE2_CACHE_NXC 0x18
+
+static int pnv_xive2_inject_notify(PnvXive2 *xive, int type)
+{
+ uint64_t addr;
+ int pir = pnv_xive2_get_current_pir(xive);
+ int thread_nr = PNV10_PIR2THREAD(pir);
+ int thread_topo_id = PNV10_PIR2CHIP(pir);
+ int ic_topo_id = xive->chip->chip_id;
+ uint64_t offset = ic_topo_id * sizeof(XiveSfnBlock);
+ uint8_t byte = 0xff;
+ MemTxResult result;
+
+ /* Retrieve the address of requesting thread's notification area */
+ addr = pnv_xive2_vst_addr(xive, VST_SYNC, thread_topo_id, thread_nr);
+
+ if (!addr) {
+ xive2_error(xive, "VST: no SYNC entry %x/%x !?",
+ thread_topo_id, thread_nr);
+ return -1;
+ }
+
+ address_space_stb(&address_space_memory, addr + offset + type, byte,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ assert(result == MEMTX_OK);
+
+ return 0;
+}
+
+static int pnv_xive2_end_update(PnvXive2 *xive, uint8_t watch_engine)
+{
+ uint8_t blk;
+ uint32_t idx;
+ int i, spec_reg, data_reg;
uint64_t endc_watch[4];
+ assert(watch_engine < ARRAY_SIZE(endc_watch));
+
+ spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
+ data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
+ blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
+ idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
+
for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
- endc_watch[i] =
- cpu_to_be64(xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i]);
+ endc_watch[i] = cpu_to_be64(xive->vc_regs[data_reg + i]);
}
return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch,
XIVE_VST_WORD_ALL);
}
-static void pnv_xive2_end_cache_load(PnvXive2 *xive)
+static void pnv_xive2_end_cache_load(PnvXive2 *xive, uint8_t watch_engine)
{
- uint8_t blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID,
- xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
- uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX,
- xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
+ uint8_t blk;
+ uint32_t idx;
uint64_t endc_watch[4] = { 0 };
- int i;
+ int i, spec_reg, data_reg;
+
+ assert(watch_engine < ARRAY_SIZE(endc_watch));
+
+ spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
+ data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
+ blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
+ idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) {
xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx);
}
for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
- xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i] =
- be64_to_cpu(endc_watch[i]);
+ xive->vc_regs[data_reg + i] = be64_to_cpu(endc_watch[i]);
}
}
@@ -379,40 +489,92 @@ static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
word_number);
}
-static int pnv_xive2_nvp_update(PnvXive2 *xive)
+static int pnv_xive2_get_nvgc(Xive2Router *xrtr, bool crowd,
+ uint8_t blk, uint32_t idx,
+ Xive2Nvgc *nvgc)
{
- uint8_t blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID,
- xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
- uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX,
- xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
- int i;
+ return pnv_xive2_vst_read(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG,
+ blk, idx, nvgc);
+}
+
+static int pnv_xive2_write_nvgc(Xive2Router *xrtr, bool crowd,
+ uint8_t blk, uint32_t idx,
+ Xive2Nvgc *nvgc)
+{
+ return pnv_xive2_vst_write(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG,
+ blk, idx, nvgc,
+ XIVE_VST_WORD_ALL);
+}
+
+static int pnv_xive2_nxc_to_table_type(uint8_t nxc_type, uint32_t *table_type)
+{
+ switch (nxc_type) {
+ case PC_NXC_WATCH_NXC_NVP:
+ *table_type = VST_NVP;
+ break;
+ case PC_NXC_WATCH_NXC_NVG:
+ *table_type = VST_NVG;
+ break;
+ case PC_NXC_WATCH_NXC_NVC:
+ *table_type = VST_NVC;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "XIVE: invalid table type for nxc operation\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int pnv_xive2_nxc_update(PnvXive2 *xive, uint8_t watch_engine)
+{
+ uint8_t blk, nxc_type;
+ uint32_t idx, table_type = -1;
+ int i, spec_reg, data_reg;
uint64_t nxc_watch[4];
+ assert(watch_engine < ARRAY_SIZE(nxc_watch));
+
+ spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
+ data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
+ nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
+ blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
+ idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
+
+ assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
+
for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
- nxc_watch[i] =
- cpu_to_be64(xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i]);
+ nxc_watch[i] = cpu_to_be64(xive->pc_regs[data_reg + i]);
}
- return pnv_xive2_vst_write(xive, VST_NVP, blk, idx, nxc_watch,
+ return pnv_xive2_vst_write(xive, table_type, blk, idx, nxc_watch,
XIVE_VST_WORD_ALL);
}
-static void pnv_xive2_nvp_cache_load(PnvXive2 *xive)
+static void pnv_xive2_nxc_cache_load(PnvXive2 *xive, uint8_t watch_engine)
{
- uint8_t blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID,
- xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
- uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX,
- xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
+ uint8_t blk, nxc_type;
+ uint32_t idx, table_type = -1;
uint64_t nxc_watch[4] = { 0 };
- int i;
+ int i, spec_reg, data_reg;
+
+ assert(watch_engine < ARRAY_SIZE(nxc_watch));
+
+ spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
+ data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
+ nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
+ blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
+ idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
+
+ assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
- if (pnv_xive2_vst_read(xive, VST_NVP, blk, idx, nxc_watch)) {
- xive2_error(xive, "VST: no NVP entry %x/%x !?", blk, idx);
+ if (pnv_xive2_vst_read(xive, table_type, blk, idx, nxc_watch)) {
+ xive2_error(xive, "VST: no NXC entry %x/%x in %s table!?",
+ blk, idx, vst_infos[table_type].name);
}
for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
- xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i] =
- be64_to_cpu(nxc_watch[i]);
+ xive->pc_regs[data_reg + i] = be64_to_cpu(nxc_watch[i]);
}
}
@@ -462,7 +624,7 @@ static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
{
PnvXive2 *xive = PNV_XIVE2(xptr);
@@ -493,25 +655,38 @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
logic_serv);
} else {
ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk,
- nvt_idx, cam_ignore,
- logic_serv);
+ nvt_idx, crowd, cam_ignore,
+ logic_serv);
}
- /*
- * Save the context and follow on to catch duplicates,
- * that we don't support yet.
- */
if (ring != -1) {
- if (match->tctx) {
+ /*
+ * For VP-specific match, finding more than one is a
+ * problem. For group notification, it's possible.
+ */
+ if (!cam_ignore && match->tctx) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
"thread context NVT %x/%x\n",
nvt_blk, nvt_idx);
- return false;
+ /* Should set a FIR if we ever model it */
+ return -1;
+ }
+ /*
+ * For a group notification, we need to know if the
+ * match is precluded first by checking the current
+ * thread priority. If the interrupt can be delivered,
+ * we always notify the first match (for now).
+ */
+ if (cam_ignore &&
+ xive2_tm_irq_precluded(tctx, ring, priority)) {
+ match->precluded = true;
+ } else {
+ if (!match->tctx) {
+ match->ring = ring;
+ match->tctx = tctx;
+ }
+ count++;
}
-
- match->ring = ring;
- match->tctx = tctx;
- count++;
}
}
}
@@ -530,6 +705,47 @@ static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr)
return cfg;
}
+static int pnv_xive2_broadcast(XivePresenter *xptr,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool crowd, bool ignore, uint8_t priority)
+{
+ PnvXive2 *xive = PNV_XIVE2(xptr);
+ PnvChip *chip = xive->chip;
+ int i, j;
+ bool gen1_tima_os =
+ xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
+
+ for (i = 0; i < chip->nr_cores; i++) {
+ PnvCore *pc = chip->cores[i];
+ CPUCore *cc = CPU_CORE(pc);
+
+ for (j = 0; j < cc->nr_threads; j++) {
+ PowerPCCPU *cpu = pc->threads[j];
+ XiveTCTX *tctx;
+ int ring;
+
+ if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
+ continue;
+ }
+
+ tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
+
+ if (gen1_tima_os) {
+ ring = xive_presenter_tctx_match(xptr, tctx, 0, nvt_blk,
+ nvt_idx, ignore, 0);
+ } else {
+ ring = xive2_presenter_tctx_match(xptr, tctx, 0, nvt_blk,
+ nvt_idx, crowd, ignore, 0);
+ }
+
+ if (ring != -1) {
+ xive2_tm_set_lsmfb(tctx, ring, priority);
+ }
+ }
+ }
+ return 0;
+}
+
static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
{
return pnv_xive2_block_id(PNV_XIVE2(xrtr));
@@ -581,6 +797,7 @@ static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val)
case CQ_TAR_NVPG:
case CQ_TAR_ESB:
case CQ_TAR_END:
+ case CQ_TAR_NVC:
xive->tables[tsel][entry] = val;
break;
default:
@@ -641,6 +858,9 @@ static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
* entries provisioned by FW (such as skiboot) and resize the
* ESB window accordingly.
*/
+ if (memory_region_is_mapped(&xsrc->esb_mmio)) {
+ memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
+ }
if (!(VSD_INDIRECT & vsd)) {
memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
* (1ull << xsrc->esb_shift));
@@ -656,6 +876,9 @@ static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
/*
* Backing store pages for the END.
*/
+ if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
+ memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
+ }
if (!(VSD_INDIRECT & vsd)) {
memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
* (1ull << end_xsrc->esb_shift));
@@ -680,13 +903,10 @@ static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
* Both PC and VC sub-engines are configured as each use the Virtual
* Structure Tables
*/
-static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd)
+static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd,
+ uint8_t type, uint8_t blk)
{
uint8_t mode = GETFIELD(VSD_MODE, vsd);
- uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
- xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
- uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
- xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
if (type > VST_ERQ) {
@@ -721,6 +941,16 @@ static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd)
}
}
+static void pnv_xive2_vc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
+{
+ uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
+ xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
+ uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
+ xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
+
+ pnv_xive2_vst_set_data(xive, vsd, type, blk);
+}
+
/*
* MMIO handlers
*/
@@ -964,12 +1194,70 @@ static const MemoryRegionOps pnv_xive2_ic_cq_ops = {
},
};
+static uint8_t pnv_xive2_cache_watch_assign(uint64_t engine_mask,
+ uint64_t *state)
+{
+ uint8_t val = 0xFF;
+ int i;
+
+ for (i = 3; i >= 0; i--) {
+ if (BIT(i) & engine_mask) {
+ if (!(BIT(i) & *state)) {
+ *state |= BIT(i);
+ val = 3 - i;
+ break;
+ }
+ }
+ }
+ return val;
+}
+
+static void pnv_xive2_cache_watch_release(uint64_t *state, uint8_t watch_engine)
+{
+ uint8_t engine_bit = 3 - watch_engine;
+
+ if (*state & BIT(engine_bit)) {
+ *state &= ~BIT(engine_bit);
+ }
+}
+
+static uint8_t pnv_xive2_endc_cache_watch_assign(PnvXive2 *xive)
+{
+ uint64_t engine_mask = GETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN,
+ xive->vc_regs[VC_ENDC_CFG >> 3]);
+ uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
+ uint8_t val;
+
+ /*
+ * We keep track of which engines are currently busy in the
+ * VC_ENDC_WATCH_ASSIGN register directly. When the firmware reads
+ * the register, we don't return its value but the ID of an engine
+ * it can use.
+ * There are 4 engines. 0xFF means no engine is available.
+ */
+ val = pnv_xive2_cache_watch_assign(engine_mask, &state);
+ if (val != 0xFF) {
+ xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
+ }
+ return val;
+}
+
+static void pnv_xive2_endc_cache_watch_release(PnvXive2 *xive,
+ uint8_t watch_engine)
+{
+ uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
+
+ pnv_xive2_cache_watch_release(&state, watch_engine);
+ xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
+}
+
static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
uint64_t val = 0;
uint32_t reg = offset >> 3;
+ uint8_t watch_engine;
switch (offset) {
/*
@@ -1000,24 +1288,44 @@ static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
val = xive->vc_regs[reg];
break;
+ case VC_ENDC_WATCH_ASSIGN:
+ val = pnv_xive2_endc_cache_watch_assign(xive);
+ break;
+
+ case VC_ENDC_CFG:
+ val = xive->vc_regs[reg];
+ break;
+
/*
* END cache updates
*/
case VC_ENDC_WATCH0_SPEC:
+ case VC_ENDC_WATCH1_SPEC:
+ case VC_ENDC_WATCH2_SPEC:
+ case VC_ENDC_WATCH3_SPEC:
+ watch_engine = (offset - VC_ENDC_WATCH0_SPEC) >> 6;
xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT);
+ pnv_xive2_endc_cache_watch_release(xive, watch_engine);
val = xive->vc_regs[reg];
break;
case VC_ENDC_WATCH0_DATA0:
+ case VC_ENDC_WATCH1_DATA0:
+ case VC_ENDC_WATCH2_DATA0:
+ case VC_ENDC_WATCH3_DATA0:
/*
* Load DATA registers from cache with data requested by the
* SPEC register
*/
- pnv_xive2_end_cache_load(xive);
+ watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
+ pnv_xive2_end_cache_load(xive, watch_engine);
val = xive->vc_regs[reg];
break;
case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
+ case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
+ case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
+ case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
val = xive->vc_regs[reg];
break;
@@ -1063,6 +1371,7 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
{
PnvXive2 *xive = PNV_XIVE2(opaque);
uint32_t reg = offset >> 3;
+ uint8_t watch_engine;
switch (offset) {
/*
@@ -1071,7 +1380,7 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
case VC_VSD_TABLE_ADDR:
break;
case VC_VSD_TABLE_DATA:
- pnv_xive2_vst_set_data(xive, val);
+ pnv_xive2_vc_vst_set_data(xive, val);
break;
/*
@@ -1083,6 +1392,10 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
/* ESB update */
break;
+ case VC_ESBC_FLUSH_INJECT:
+ pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ESBC);
+ break;
+
case VC_ESBC_CFG:
break;
@@ -1095,19 +1408,36 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
/* EAS update */
break;
+ case VC_EASC_FLUSH_INJECT:
+ pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_EASC);
+ break;
+
+ case VC_ENDC_CFG:
+ break;
+
/*
* END cache updates
*/
case VC_ENDC_WATCH0_SPEC:
+ case VC_ENDC_WATCH1_SPEC:
+ case VC_ENDC_WATCH2_SPEC:
+ case VC_ENDC_WATCH3_SPEC:
val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */
break;
case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
+ case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
+ case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
+ case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
break;
case VC_ENDC_WATCH0_DATA0:
+ case VC_ENDC_WATCH1_DATA0:
+ case VC_ENDC_WATCH2_DATA0:
+ case VC_ENDC_WATCH3_DATA0:
/* writing to DATA0 triggers the cache write */
+ watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
xive->vc_regs[reg] = val;
- pnv_xive2_end_update(xive);
+ pnv_xive2_end_update(xive, watch_engine);
break;
@@ -1116,6 +1446,10 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
break;
+ case VC_ENDC_FLUSH_INJECT:
+ pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ENDC);
+ break;
+
/*
* Indirect invalidation
*/
@@ -1157,12 +1491,43 @@ static const MemoryRegionOps pnv_xive2_ic_vc_ops = {
},
};
+static uint8_t pnv_xive2_nxc_cache_watch_assign(PnvXive2 *xive)
+{
+ uint64_t engine_mask = GETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN,
+ xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
+ uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
+ uint8_t val;
+
+ /*
+ * We keep track of which engines are currently busy in the
+ * PC_NXC_WATCH_ASSIGN register directly. When the firmware reads
+ * the register, we don't return its value but the ID of an engine
+ * it can use.
+ * There are 4 engines. 0xFF means no engine is available.
+ */
+ val = pnv_xive2_cache_watch_assign(engine_mask, &state);
+ if (val != 0xFF) {
+ xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
+ }
+ return val;
+}
+
+static void pnv_xive2_nxc_cache_watch_release(PnvXive2 *xive,
+ uint8_t watch_engine)
+{
+ uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
+
+ pnv_xive2_cache_watch_release(&state, watch_engine);
+ xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
+}
+
static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
uint64_t val = -1;
uint32_t reg = offset >> 3;
+ uint8_t watch_engine;
switch (offset) {
/*
@@ -1173,24 +1538,44 @@ static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
val = xive->pc_regs[reg];
break;
+ case PC_NXC_WATCH_ASSIGN:
+ val = pnv_xive2_nxc_cache_watch_assign(xive);
+ break;
+
+ case PC_NXC_PROC_CONFIG:
+ val = xive->pc_regs[reg];
+ break;
+
/*
* cache updates
*/
case PC_NXC_WATCH0_SPEC:
+ case PC_NXC_WATCH1_SPEC:
+ case PC_NXC_WATCH2_SPEC:
+ case PC_NXC_WATCH3_SPEC:
+ watch_engine = (offset - PC_NXC_WATCH0_SPEC) >> 6;
xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT);
+ pnv_xive2_nxc_cache_watch_release(xive, watch_engine);
val = xive->pc_regs[reg];
break;
case PC_NXC_WATCH0_DATA0:
+ case PC_NXC_WATCH1_DATA0:
+ case PC_NXC_WATCH2_DATA0:
+ case PC_NXC_WATCH3_DATA0:
/*
* Load DATA registers from cache with data requested by the
* SPEC register
*/
- pnv_xive2_nvp_cache_load(xive);
+ watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
+ pnv_xive2_nxc_cache_load(xive, watch_engine);
val = xive->pc_regs[reg];
break;
case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
+ case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
+ case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
+ case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
val = xive->pc_regs[reg];
break;
@@ -1214,36 +1599,66 @@ static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
return val;
}
+static void pnv_xive2_pc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
+{
+ uint8_t type = GETFIELD(PC_VSD_TABLE_SELECT,
+ xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
+ uint8_t blk = GETFIELD(PC_VSD_TABLE_ADDRESS,
+ xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
+
+ pnv_xive2_vst_set_data(xive, vsd, type, blk);
+}
+
static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
uint64_t val, unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
uint32_t reg = offset >> 3;
+ uint8_t watch_engine;
switch (offset) {
/*
- * VSD table settings. Only taken into account in the VC
- * sub-engine because the Xive2Router model combines both VC and PC
- * sub-engines
+ * VSD table settings.
+ * The Xive2Router model combines both VC and PC sub-engines. We
+ * allow to configure the tables through both, for the rare cases
+ * where a table only really needs to be configured for one of
+ * them (e.g. the NVG table for the presenter). It assumes that
+ * firmware passes the same address to the VC and PC when tables
+ * are defined for both, which seems acceptable.
*/
case PC_VSD_TABLE_ADDR:
+ break;
case PC_VSD_TABLE_DATA:
+ pnv_xive2_pc_vst_set_data(xive, val);
+ break;
+
+ case PC_NXC_PROC_CONFIG:
break;
/*
* cache updates
*/
case PC_NXC_WATCH0_SPEC:
+ case PC_NXC_WATCH1_SPEC:
+ case PC_NXC_WATCH2_SPEC:
+ case PC_NXC_WATCH3_SPEC:
val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */
break;
case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
+ case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
+ case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
+ case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
break;
case PC_NXC_WATCH0_DATA0:
+ case PC_NXC_WATCH1_DATA0:
+ case PC_NXC_WATCH2_DATA0:
+ case PC_NXC_WATCH3_DATA0:
/* writing to DATA0 triggers the cache write */
+ watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
xive->pc_regs[reg] = val;
- pnv_xive2_nvp_update(xive);
+ pnv_xive2_nxc_update(xive, watch_engine);
break;
/* case PC_NXC_FLUSH_CTRL: */
@@ -1251,6 +1666,10 @@ static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
break;
+ case PC_NXC_FLUSH_INJECT:
+ pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_NXC);
+ break;
+
/*
* Indirect invalidation
*/
@@ -1547,13 +1966,19 @@ static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
/*
* Sync MMIO page (write only)
*/
-#define PNV_XIVE2_SYNC_IPI 0x000
-#define PNV_XIVE2_SYNC_HW 0x080
-#define PNV_XIVE2_SYNC_NxC 0x100
-#define PNV_XIVE2_SYNC_INT 0x180
-#define PNV_XIVE2_SYNC_OS_ESC 0x200
-#define PNV_XIVE2_SYNC_POOL_ESC 0x280
-#define PNV_XIVE2_SYNC_HARD_ESC 0x300
+#define PNV_XIVE2_SYNC_IPI 0x000
+#define PNV_XIVE2_SYNC_HW 0x080
+#define PNV_XIVE2_SYNC_NxC 0x100
+#define PNV_XIVE2_SYNC_INT 0x180
+#define PNV_XIVE2_SYNC_OS_ESC 0x200
+#define PNV_XIVE2_SYNC_POOL_ESC 0x280
+#define PNV_XIVE2_SYNC_HARD_ESC 0x300
+#define PNV_XIVE2_SYNC_NXC_LD_LCL_NCO 0x800
+#define PNV_XIVE2_SYNC_NXC_LD_LCL_CO 0x880
+#define PNV_XIVE2_SYNC_NXC_ST_LCL_NCI 0x900
+#define PNV_XIVE2_SYNC_NXC_ST_LCL_CI 0x980
+#define PNV_XIVE2_SYNC_NXC_ST_RMT_NCI 0xA00
+#define PNV_XIVE2_SYNC_NXC_ST_RMT_CI 0xA80
static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
unsigned size)
@@ -1565,22 +1990,72 @@ static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
return -1;
}
+/*
+ * The sync MMIO space spans two pages. The lower page is use for
+ * queue sync "poll" requests while the upper page is used for queue
+ * sync "inject" requests. Inject requests require the HW to write
+ * a byte of all 1's to a predetermined location in memory in order
+ * to signal completion of the request. Both pages have the same
+ * layout, so it is easiest to handle both with a single function.
+ */
static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
uint64_t val, unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
+ int inject_type;
+ hwaddr pg_offset_mask = (1ull << xive->ic_shift) - 1;
- switch (offset) {
+ /* adjust offset for inject page */
+ hwaddr adj_offset = offset & pg_offset_mask;
+
+ switch (adj_offset) {
case PNV_XIVE2_SYNC_IPI:
+ inject_type = PNV_XIVE2_QUEUE_IPI;
+ break;
case PNV_XIVE2_SYNC_HW:
+ inject_type = PNV_XIVE2_QUEUE_HW;
+ break;
case PNV_XIVE2_SYNC_NxC:
+ inject_type = PNV_XIVE2_QUEUE_NXC;
+ break;
case PNV_XIVE2_SYNC_INT:
+ inject_type = PNV_XIVE2_QUEUE_INT;
+ break;
case PNV_XIVE2_SYNC_OS_ESC:
+ inject_type = PNV_XIVE2_QUEUE_OS;
+ break;
case PNV_XIVE2_SYNC_POOL_ESC:
+ inject_type = PNV_XIVE2_QUEUE_POOL;
+ break;
case PNV_XIVE2_SYNC_HARD_ESC:
+ inject_type = PNV_XIVE2_QUEUE_HARD;
+ break;
+ case PNV_XIVE2_SYNC_NXC_LD_LCL_NCO:
+ inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO;
+ break;
+ case PNV_XIVE2_SYNC_NXC_LD_LCL_CO:
+ inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_CO;
+ break;
+ case PNV_XIVE2_SYNC_NXC_ST_LCL_NCI:
+ inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI;
+ break;
+ case PNV_XIVE2_SYNC_NXC_ST_LCL_CI:
+ inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_CI;
+ break;
+ case PNV_XIVE2_SYNC_NXC_ST_RMT_NCI:
+ inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI;
+ break;
+ case PNV_XIVE2_SYNC_NXC_ST_RMT_CI:
+ inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_CI;
break;
default:
xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset);
+ return;
+ }
+
+ /* Write Queue Sync notification byte if writing to sync inject page */
+ if ((offset & ~pg_offset_mask) != 0) {
+ pnv_xive2_inject_notify(xive, inject_type);
}
}
@@ -1727,21 +2202,40 @@ static const MemoryRegionOps pnv_xive2_tm_ops = {
},
};
-static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset,
+static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr addr,
unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
+ XivePresenter *xptr = XIVE_PRESENTER(xive);
+ uint32_t page = addr >> xive->nvpg_shift;
+ uint16_t op = addr & 0xFFF;
+ uint8_t blk = pnv_xive2_block_id(xive);
- xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset);
- return -1;
+ if (size != 2) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc load size %d\n",
+ size);
+ return -1;
+ }
+
+ return xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, 1);
}
-static void pnv_xive2_nvc_write(void *opaque, hwaddr offset,
+static void pnv_xive2_nvc_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
+ XivePresenter *xptr = XIVE_PRESENTER(xive);
+ uint32_t page = addr >> xive->nvc_shift;
+ uint16_t op = addr & 0xFFF;
+ uint8_t blk = pnv_xive2_block_id(xive);
+
+ if (size != 1) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc write size %d\n",
+ size);
+ return;
+ }
- xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset);
+ (void)xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, val);
}
static const MemoryRegionOps pnv_xive2_nvc_ops = {
@@ -1749,30 +2243,63 @@ static const MemoryRegionOps pnv_xive2_nvc_ops = {
.write = pnv_xive2_nvc_write,
.endianness = DEVICE_BIG_ENDIAN,
.valid = {
- .min_access_size = 8,
+ .min_access_size = 1,
.max_access_size = 8,
},
.impl = {
- .min_access_size = 8,
+ .min_access_size = 1,
.max_access_size = 8,
},
};
-static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset,
+static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr addr,
unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
+ XivePresenter *xptr = XIVE_PRESENTER(xive);
+ uint32_t page = addr >> xive->nvpg_shift;
+ uint16_t op = addr & 0xFFF;
+ uint32_t index = page >> 1;
+ uint8_t blk = pnv_xive2_block_id(xive);
- xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset);
- return -1;
+ if (size != 2) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg load size %d\n",
+ size);
+ return -1;
+ }
+
+ if (page % 2) {
+ /* odd page - NVG */
+ return xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, 1);
+ } else {
+ /* even page - NVP */
+ return xive2_presenter_nvp_backlog_op(xptr, blk, index, op);
+ }
}
-static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset,
+static void pnv_xive2_nvpg_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
+ XivePresenter *xptr = XIVE_PRESENTER(xive);
+ uint32_t page = addr >> xive->nvpg_shift;
+ uint16_t op = addr & 0xFFF;
+ uint32_t index = page >> 1;
+ uint8_t blk = pnv_xive2_block_id(xive);
+
+ if (size != 1) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg write size %d\n",
+ size);
+ return;
+ }
- xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset);
+ if (page % 2) {
+ /* odd page - NVG */
+ (void)xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, val);
+ } else {
+ /* even page - NVP */
+ (void)xive2_presenter_nvp_backlog_op(xptr, blk, index, op);
+ }
}
static const MemoryRegionOps pnv_xive2_nvpg_ops = {
@@ -1780,11 +2307,11 @@ static const MemoryRegionOps pnv_xive2_nvpg_ops = {
.write = pnv_xive2_nvpg_write,
.endianness = DEVICE_BIG_ENDIAN,
.valid = {
- .min_access_size = 8,
+ .min_access_size = 1,
.max_access_size = 8,
},
.impl = {
- .min_access_size = 8,
+ .min_access_size = 1,
.max_access_size = 8,
},
};
@@ -1814,6 +2341,12 @@ static void pnv_xive2_reset(void *dev)
xive->cq_regs[CQ_XIVE_CFG >> 3] |=
SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id);
+ /* VC and PC cache watch assign mechanism */
+ xive->vc_regs[VC_ENDC_CFG >> 3] =
+ SETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN, 0ull, 0b0111);
+ xive->pc_regs[PC_NXC_PROC_CONFIG >> 3] =
+ SETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN, 0ull, 0b0111);
+
/* Set default page size to 64k */
xive->ic_shift = xive->esb_shift = xive->end_shift = 16;
xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16;
@@ -1926,7 +2459,7 @@ static void pnv_xive2_realize(DeviceState *dev, Error **errp)
qemu_register_reset(pnv_xive2_reset, dev);
}
-static Property pnv_xive2_properties[] = {
+static const Property pnv_xive2_properties[] = {
DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0),
DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0),
DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0),
@@ -1938,7 +2471,6 @@ static Property pnv_xive2_properties[] = {
DEFINE_PROP_UINT64("config", PnvXive2, config,
PNV_XIVE2_CONFIGURATION),
DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *),
- DEFINE_PROP_END_OF_LIST(),
};
static void pnv_xive2_instance_init(Object *obj)
@@ -1973,7 +2505,7 @@ static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt,
return 0;
}
-static void pnv_xive2_class_init(ObjectClass *klass, void *data)
+static void pnv_xive2_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
@@ -1996,6 +2528,8 @@ static void pnv_xive2_class_init(ObjectClass *klass, void *data)
xrc->write_end = pnv_xive2_write_end;
xrc->get_nvp = pnv_xive2_get_nvp;
xrc->write_nvp = pnv_xive2_write_nvp;
+ xrc->get_nvgc = pnv_xive2_get_nvgc;
+ xrc->write_nvgc = pnv_xive2_write_nvgc;
xrc->get_config = pnv_xive2_get_config;
xrc->get_block_id = pnv_xive2_get_block_id;
@@ -2003,6 +2537,7 @@ static void pnv_xive2_class_init(ObjectClass *klass, void *data)
xpc->match_nvt = pnv_xive2_match_nvt;
xpc->get_config = pnv_xive2_presenter_get_config;
+ xpc->broadcast = pnv_xive2_broadcast;
};
static const TypeInfo pnv_xive2_info = {
@@ -2012,7 +2547,7 @@ static const TypeInfo pnv_xive2_info = {
.instance_size = sizeof(PnvXive2),
.class_init = pnv_xive2_class_init,
.class_size = sizeof(PnvXive2Class),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
@@ -2025,33 +2560,6 @@ static void pnv_xive2_register_types(void)
type_init(pnv_xive2_register_types)
-static void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx,
- GString *buf)
-{
- uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
- uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
-
- if (!xive2_nvp_is_valid(nvp)) {
- return;
- }
-
- g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x",
- nvp_idx, eq_blk, eq_idx,
- xive_get_field32(NVP2_W2_IPB, nvp->w2));
- /*
- * When the NVP is HW controlled, more fields are updated
- */
- if (xive2_nvp_is_hw(nvp)) {
- g_string_append_printf(buf, " CPPR:%02x",
- xive_get_field32(NVP2_W2_CPPR, nvp->w2));
- if (xive2_nvp_is_co(nvp)) {
- g_string_append_printf(buf, " CO:%04x",
- xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
- }
- }
- g_string_append_c(buf, '\n');
-}
-
/*
* If the table is direct, we can compute the number of PQ entries
* provisioned by FW.
@@ -2113,8 +2621,9 @@ void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf)
Xive2Eas eas;
Xive2End end;
Xive2Nvp nvp;
+ Xive2Nvgc nvgc;
int i;
- uint64_t xive_nvp_per_subpage;
+ uint64_t entries_per_subpage;
g_string_append_printf(buf, "XIVE[%x] Source %08x .. %08x\n",
blk, srcno0, srcno0 + nr_esbs - 1);
@@ -2146,10 +2655,28 @@ void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf)
g_string_append_printf(buf, "XIVE[%x] #%d NVPT %08x .. %08x\n",
chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
- xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
- for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) {
+ entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
+ for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) {
xive2_nvp_pic_print_info(&nvp, i++, buf);
}
}
+
+ g_string_append_printf(buf, "XIVE[%x] #%d NVGT %08x .. %08x\n",
+ chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
+ entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVG);
+ for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
+ while (!xive2_router_get_nvgc(xrtr, false, blk, i, &nvgc)) {
+ xive2_nvgc_pic_print_info(&nvgc, i++, buf);
+ }
+ }
+
+ g_string_append_printf(buf, "XIVE[%x] #%d NVCT %08x .. %08x\n",
+ chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
+ entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVC);
+ for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
+ while (!xive2_router_get_nvgc(xrtr, true, blk, i, &nvgc)) {
+ xive2_nvgc_pic_print_info(&nvgc, i++, buf);
+ }
+ }
}
diff --git a/hw/intc/pnv_xive2_regs.h b/hw/intc/pnv_xive2_regs.h
index 7165dc8..e8b87b3 100644
--- a/hw/intc/pnv_xive2_regs.h
+++ b/hw/intc/pnv_xive2_regs.h
@@ -232,6 +232,10 @@
#define VC_ESBC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(32, 35)
#define VC_ESBC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(36, 63) /* 28-bit */
+/* ESBC cache flush inject register */
+#define X_VC_ESBC_FLUSH_INJECT 0x142
+#define VC_ESBC_FLUSH_INJECT 0x210
+
/* ESBC configuration */
#define X_VC_ESBC_CFG 0x148
#define VC_ESBC_CFG 0x240
@@ -250,6 +254,10 @@
#define VC_EASC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(32, 35)
#define VC_EASC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(36, 63) /* 28-bit */
+/* EASC flush inject register */
+#define X_VC_EASC_FLUSH_INJECT 0x162
+#define VC_EASC_FLUSH_INJECT 0x310
+
/*
* VC2
*/
@@ -270,6 +278,10 @@
#define VC_ENDC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(36, 39)
#define VC_ENDC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(40, 63) /* 24-bit */
+/* ENDC flush inject register */
+#define X_VC_ENDC_FLUSH_INJECT 0x182
+#define VC_ENDC_FLUSH_INJECT 0x410
+
/* ENDC Sync done */
#define X_VC_ENDC_SYNC_DONE 0x184
#define VC_ENDC_SYNC_DONE 0x420
@@ -283,6 +295,15 @@
#define VC_ENDC_SYNC_QUEUE_HARD PPC_BIT(6)
#define VC_QUEUE_COUNT 7
+/* ENDC cache watch assign */
+#define X_VC_ENDC_WATCH_ASSIGN 0x186
+#define VC_ENDC_WATCH_ASSIGN 0x430
+
+/* ENDC configuration register */
+#define X_VC_ENDC_CFG 0x188
+#define VC_ENDC_CFG 0x440
+#define VC_ENDC_CFG_CACHE_WATCH_ASSIGN PPC_BITMASK(32, 35)
+
/* ENDC cache watch specification 0 */
#define X_VC_ENDC_WATCH0_SPEC 0x1A0
#define VC_ENDC_WATCH0_SPEC 0x500
@@ -302,6 +323,42 @@
#define VC_ENDC_WATCH0_DATA2 0x530
#define VC_ENDC_WATCH0_DATA3 0x538
+/* ENDC cache watch 1 */
+#define X_VC_ENDC_WATCH1_SPEC 0x1A8
+#define VC_ENDC_WATCH1_SPEC 0x540
+#define X_VC_ENDC_WATCH1_DATA0 0x1AC
+#define X_VC_ENDC_WATCH1_DATA1 0x1AD
+#define X_VC_ENDC_WATCH1_DATA2 0x1AE
+#define X_VC_ENDC_WATCH1_DATA3 0x1AF
+#define VC_ENDC_WATCH1_DATA0 0x560
+#define VC_ENDC_WATCH1_DATA1 0x568
+#define VC_ENDC_WATCH1_DATA2 0x570
+#define VC_ENDC_WATCH1_DATA3 0x578
+
+/* ENDC cache watch 2 */
+#define X_VC_ENDC_WATCH2_SPEC 0x1B0
+#define VC_ENDC_WATCH2_SPEC 0x580
+#define X_VC_ENDC_WATCH2_DATA0 0x1B4
+#define X_VC_ENDC_WATCH2_DATA1 0x1B5
+#define X_VC_ENDC_WATCH2_DATA2 0x1B6
+#define X_VC_ENDC_WATCH2_DATA3 0x1B7
+#define VC_ENDC_WATCH2_DATA0 0x5A0
+#define VC_ENDC_WATCH2_DATA1 0x5A8
+#define VC_ENDC_WATCH2_DATA2 0x5B0
+#define VC_ENDC_WATCH2_DATA3 0x5B8
+
+/* ENDC cache watch 3 */
+#define X_VC_ENDC_WATCH3_SPEC 0x1B8
+#define VC_ENDC_WATCH3_SPEC 0x5C0
+#define X_VC_ENDC_WATCH3_DATA0 0x1BC
+#define X_VC_ENDC_WATCH3_DATA1 0x1BD
+#define X_VC_ENDC_WATCH3_DATA2 0x1BE
+#define X_VC_ENDC_WATCH3_DATA3 0x1BF
+#define VC_ENDC_WATCH3_DATA0 0x5E0
+#define VC_ENDC_WATCH3_DATA1 0x5E8
+#define VC_ENDC_WATCH3_DATA2 0x5F0
+#define VC_ENDC_WATCH3_DATA3 0x5F8
+
/*
* PC LSB1
*/
@@ -358,6 +415,21 @@
#define PC_NXC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(36, 39)
#define PC_NXC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(40, 63) /* 24-bit */
+/* NxC Cache flush inject */
+#define X_PC_NXC_FLUSH_INJECT 0x282
+#define PC_NXC_FLUSH_INJECT 0x410
+
+/* NxC Cache watch assign */
+#define X_PC_NXC_WATCH_ASSIGN 0x286
+#define PC_NXC_WATCH_ASSIGN 0x430
+
+/* NxC Proc config */
+#define X_PC_NXC_PROC_CONFIG 0x28A
+#define PC_NXC_PROC_CONFIG 0x450
+#define PC_NXC_PROC_CONFIG_WATCH_ASSIGN PPC_BITMASK(0, 3)
+#define PC_NXC_PROC_CONFIG_NVG_TABLE_COMPRESS PPC_BITMASK(32, 35)
+#define PC_NXC_PROC_CONFIG_NVC_TABLE_COMPRESS PPC_BITMASK(36, 39)
+
/* NxC Cache Watch 0 Specification */
#define X_PC_NXC_WATCH0_SPEC 0x2A0
#define PC_NXC_WATCH0_SPEC 0x500
@@ -381,6 +453,42 @@
#define PC_NXC_WATCH0_DATA2 0x530
#define PC_NXC_WATCH0_DATA3 0x538
+/* NxC Cache Watch 1 */
+#define X_PC_NXC_WATCH1_SPEC 0x2A8
+#define PC_NXC_WATCH1_SPEC 0x540
+#define X_PC_NXC_WATCH1_DATA0 0x2AC
+#define X_PC_NXC_WATCH1_DATA1 0x2AD
+#define X_PC_NXC_WATCH1_DATA2 0x2AE
+#define X_PC_NXC_WATCH1_DATA3 0x2AF
+#define PC_NXC_WATCH1_DATA0 0x560
+#define PC_NXC_WATCH1_DATA1 0x568
+#define PC_NXC_WATCH1_DATA2 0x570
+#define PC_NXC_WATCH1_DATA3 0x578
+
+/* NxC Cache Watch 2 */
+#define X_PC_NXC_WATCH2_SPEC 0x2B0
+#define PC_NXC_WATCH2_SPEC 0x580
+#define X_PC_NXC_WATCH2_DATA0 0x2B4
+#define X_PC_NXC_WATCH2_DATA1 0x2B5
+#define X_PC_NXC_WATCH2_DATA2 0x2B6
+#define X_PC_NXC_WATCH2_DATA3 0x2B7
+#define PC_NXC_WATCH2_DATA0 0x5A0
+#define PC_NXC_WATCH2_DATA1 0x5A8
+#define PC_NXC_WATCH2_DATA2 0x5B0
+#define PC_NXC_WATCH2_DATA3 0x5B8
+
+/* NxC Cache Watch 3 */
+#define X_PC_NXC_WATCH3_SPEC 0x2B8
+#define PC_NXC_WATCH3_SPEC 0x5C0
+#define X_PC_NXC_WATCH3_DATA0 0x2BC
+#define X_PC_NXC_WATCH3_DATA1 0x2BD
+#define X_PC_NXC_WATCH3_DATA2 0x2BE
+#define X_PC_NXC_WATCH3_DATA3 0x2BF
+#define PC_NXC_WATCH3_DATA0 0x5E0
+#define PC_NXC_WATCH3_DATA1 0x5E8
+#define PC_NXC_WATCH3_DATA2 0x5F0
+#define PC_NXC_WATCH3_DATA3 0x5F8
+
/*
* TCTXT Registers
*/
diff --git a/hw/intc/ppc-uic.c b/hw/intc/ppc-uic.c
index 9a67f7f..bc4dc90 100644
--- a/hw/intc/ppc-uic.c
+++ b/hw/intc/ppc-uic.c
@@ -259,10 +259,9 @@ static void ppc_uic_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_in(dev, ppcuic_set_irq, UIC_MAX_IRQ);
}
-static Property ppc_uic_properties[] = {
+static const Property ppc_uic_properties[] = {
DEFINE_PROP_UINT32("dcr-base", PPCUIC, dcr_base, 0xc0),
DEFINE_PROP_BOOL("use-vectors", PPCUIC, use_vectors, true),
- DEFINE_PROP_END_OF_LIST()
};
static const VMStateDescription ppc_uic_vmstate = {
@@ -282,11 +281,11 @@ static const VMStateDescription ppc_uic_vmstate = {
},
};
-static void ppc_uic_class_init(ObjectClass *klass, void *data)
+static void ppc_uic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = ppc_uic_reset;
+ device_class_set_legacy_reset(dc, ppc_uic_reset);
dc->realize = ppc_uic_realize;
dc->vmsd = &ppc_uic_vmstate;
device_class_set_props(dc, ppc_uic_properties);
diff --git a/hw/intc/realview_gic.c b/hw/intc/realview_gic.c
index 9b12116..63e25c2 100644
--- a/hw/intc/realview_gic.c
+++ b/hw/intc/realview_gic.c
@@ -63,7 +63,7 @@ static void realview_gic_init(Object *obj)
qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", 1);
}
-static void realview_gic_class_init(ObjectClass *oc, void *data)
+static void realview_gic_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c
index e9f0536..b0139f0 100644
--- a/hw/intc/riscv_aclint.c
+++ b/hw/intc/riscv_aclint.c
@@ -262,7 +262,7 @@ static const MemoryRegionOps riscv_aclint_mtimer_ops = {
}
};
-static Property riscv_aclint_mtimer_properties[] = {
+static const Property riscv_aclint_mtimer_properties[] = {
DEFINE_PROP_UINT32("hartid-base", RISCVAclintMTimerState,
hartid_base, 0),
DEFINE_PROP_UINT32("num-harts", RISCVAclintMTimerState, num_harts, 1),
@@ -274,7 +274,6 @@ static Property riscv_aclint_mtimer_properties[] = {
aperture_size, RISCV_ACLINT_DEFAULT_MTIMER_SIZE),
DEFINE_PROP_UINT32("timebase-freq", RISCVAclintMTimerState,
timebase_freq, 0),
- DEFINE_PROP_END_OF_LIST(),
};
static void riscv_aclint_mtimer_realize(DeviceState *dev, Error **errp)
@@ -329,7 +328,7 @@ static const VMStateDescription vmstate_riscv_mtimer = {
}
};
-static void riscv_aclint_mtimer_class_init(ObjectClass *klass, void *data)
+static void riscv_aclint_mtimer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = riscv_aclint_mtimer_realize;
@@ -462,11 +461,10 @@ static const MemoryRegionOps riscv_aclint_swi_ops = {
}
};
-static Property riscv_aclint_swi_properties[] = {
+static const Property riscv_aclint_swi_properties[] = {
DEFINE_PROP_UINT32("hartid-base", RISCVAclintSwiState, hartid_base, 0),
DEFINE_PROP_UINT32("num-harts", RISCVAclintSwiState, num_harts, 1),
DEFINE_PROP_UINT32("sswi", RISCVAclintSwiState, sswi, false),
- DEFINE_PROP_END_OF_LIST(),
};
static void riscv_aclint_swi_realize(DeviceState *dev, Error **errp)
@@ -511,7 +509,7 @@ static void riscv_aclint_swi_reset_enter(Object *obj, ResetType type)
}
}
-static void riscv_aclint_swi_class_init(ObjectClass *klass, void *data)
+static void riscv_aclint_swi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = riscv_aclint_swi_realize;
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
index 32edd6d..8bcd9f4 100644
--- a/hw/intc/riscv_aplic.c
+++ b/hw/intc/riscv_aplic.c
@@ -22,7 +22,7 @@
#include "qemu/module.h"
#include "qemu/error-report.h"
#include "qemu/bswap.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/sysbus.h"
#include "hw/pci/msi.h"
#include "hw/boards.h"
@@ -30,8 +30,9 @@
#include "hw/intc/riscv_aplic.h"
#include "hw/irq.h"
#include "target/riscv/cpu.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/kvm.h"
+#include "system/system.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
#include "kvm/kvm_riscv.h"
#include "migration/vmstate.h"
@@ -154,36 +155,76 @@
* KVM AIA only supports APLIC MSI, fallback to QEMU emulation if we want to use
* APLIC Wired.
*/
-static bool is_kvm_aia(bool msimode)
+bool riscv_is_kvm_aia_aplic_imsic(bool msimode)
{
return kvm_irqchip_in_kernel() && msimode;
}
+bool riscv_use_emulated_aplic(bool msimode)
+{
+#ifdef CONFIG_KVM
+ if (tcg_enabled()) {
+ return true;
+ }
+
+ if (!riscv_is_kvm_aia_aplic_imsic(msimode)) {
+ return true;
+ }
+
+ return kvm_kernel_irqchip_split();
+#else
+ return true;
+#endif
+}
+
+void riscv_aplic_set_kvm_msicfgaddr(RISCVAPLICState *aplic, hwaddr addr)
+{
+#ifdef CONFIG_KVM
+ if (riscv_use_emulated_aplic(aplic->msimode)) {
+ addr >>= APLIC_xMSICFGADDR_PPN_SHIFT;
+ aplic->kvm_msicfgaddr = extract64(addr, 0, 32);
+ aplic->kvm_msicfgaddrH = extract64(addr, 32, 32) &
+ APLIC_xMSICFGADDRH_VALID_MASK;
+ }
+#endif
+}
+
+static bool riscv_aplic_irq_rectified_val(RISCVAPLICState *aplic,
+ uint32_t irq)
+{
+ uint32_t sourcecfg, sm, raw_input, irq_inverted;
+
+ if (!irq || aplic->num_irqs <= irq) {
+ return false;
+ }
+
+ sourcecfg = aplic->sourcecfg[irq];
+ if (sourcecfg & APLIC_SOURCECFG_D) {
+ return false;
+ }
+
+ sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
+ if (sm == APLIC_SOURCECFG_SM_INACTIVE) {
+ return false;
+ }
+
+ raw_input = (aplic->state[irq] & APLIC_ISTATE_INPUT) ? 1 : 0;
+ irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW ||
+ sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0;
+
+ return !!(raw_input ^ irq_inverted);
+}
+
static uint32_t riscv_aplic_read_input_word(RISCVAPLICState *aplic,
uint32_t word)
{
- uint32_t i, irq, sourcecfg, sm, raw_input, irq_inverted, ret = 0;
+ uint32_t i, irq, rectified_val, ret = 0;
for (i = 0; i < 32; i++) {
irq = word * 32 + i;
- if (!irq || aplic->num_irqs <= irq) {
- continue;
- }
- sourcecfg = aplic->sourcecfg[irq];
- if (sourcecfg & APLIC_SOURCECFG_D) {
- continue;
- }
-
- sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
- if (sm == APLIC_SOURCECFG_SM_INACTIVE) {
- continue;
- }
-
- raw_input = (aplic->state[irq] & APLIC_ISTATE_INPUT) ? 1 : 0;
- irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW ||
- sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0;
- ret |= (raw_input ^ irq_inverted) << i;
+ rectified_val = riscv_aplic_irq_rectified_val(aplic, irq);
+ ret |= rectified_val << i;
}
return ret;
@@ -237,9 +278,12 @@ static void riscv_aplic_set_pending(RISCVAPLICState *aplic,
if ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) ||
(sm == APLIC_SOURCECFG_SM_LEVEL_LOW)) {
- if (!aplic->msimode || (aplic->msimode && !pending)) {
+ if (!aplic->msimode) {
return;
}
+ if (aplic->msimode && !pending) {
+ goto noskip_write_pending;
+ }
if ((aplic->state[irq] & APLIC_ISTATE_INPUT) &&
(sm == APLIC_SOURCECFG_SM_LEVEL_LOW)) {
return;
@@ -250,6 +294,7 @@ static void riscv_aplic_set_pending(RISCVAPLICState *aplic,
}
}
+noskip_write_pending:
riscv_aplic_set_pending_raw(aplic, irq, pending);
}
@@ -348,21 +393,29 @@ static void riscv_aplic_msi_send(RISCVAPLICState *aplic,
uint32_t lhxs, lhxw, hhxs, hhxw, group_idx, msicfgaddr, msicfgaddrH;
aplic_m = aplic;
- while (aplic_m && !aplic_m->mmode) {
- aplic_m = aplic_m->parent;
- }
- if (!aplic_m) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: m-level APLIC not found\n",
- __func__);
- return;
+
+ if (!aplic->kvm_splitmode) {
+ while (aplic_m && !aplic_m->mmode) {
+ aplic_m = aplic_m->parent;
+ }
+ if (!aplic_m) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: m-level APLIC not found\n",
+ __func__);
+ return;
+ }
}
- if (aplic->mmode) {
- msicfgaddr = aplic_m->mmsicfgaddr;
- msicfgaddrH = aplic_m->mmsicfgaddrH;
+ if (aplic->kvm_splitmode) {
+ msicfgaddr = aplic->kvm_msicfgaddr;
+ msicfgaddrH = ((uint64_t)aplic->kvm_msicfgaddrH << 32);
} else {
- msicfgaddr = aplic_m->smsicfgaddr;
- msicfgaddrH = aplic_m->smsicfgaddrH;
+ if (aplic->mmode) {
+ msicfgaddr = aplic_m->mmsicfgaddr;
+ msicfgaddrH = aplic_m->mmsicfgaddrH;
+ } else {
+ msicfgaddr = aplic_m->smsicfgaddr;
+ msicfgaddrH = aplic_m->smsicfgaddrH;
+ }
}
lhxs = (msicfgaddrH >> APLIC_xMSICFGADDRH_LHXS_SHIFT) &
@@ -375,7 +428,6 @@ static void riscv_aplic_msi_send(RISCVAPLICState *aplic,
APLIC_xMSICFGADDRH_HHXW_MASK;
group_idx = hart_idx >> lhxw;
- hart_idx &= APLIC_xMSICFGADDR_PPN_LHX_MASK(lhxw);
addr = msicfgaddr;
addr |= ((uint64_t)(msicfgaddrH & APLIC_xMSICFGADDRH_BAPPN_MASK)) << 32;
@@ -702,6 +754,10 @@ static void riscv_aplic_write(void *opaque, hwaddr addr, uint64_t value,
(aplic->sourcecfg[irq] == 0)) {
riscv_aplic_set_pending_raw(aplic, irq, false);
riscv_aplic_set_enabled_raw(aplic, irq, false);
+ } else {
+ if (riscv_aplic_irq_rectified_val(aplic, irq)) {
+ riscv_aplic_set_pending_raw(aplic, irq, true);
+ }
}
} else if (aplic->mmode && aplic->msimode &&
(addr == APLIC_MMSICFGADDR)) {
@@ -838,7 +894,27 @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
uint32_t i;
RISCVAPLICState *aplic = RISCV_APLIC(dev);
- if (!is_kvm_aia(aplic->msimode)) {
+ if (riscv_use_emulated_aplic(aplic->msimode)) {
+ /* Create output IRQ lines for non-MSI mode */
+ if (!aplic->msimode) {
+ /* Claim the CPU interrupt to be triggered by this APLIC */
+ for (i = 0; i < aplic->num_harts; i++) {
+ RISCVCPU *cpu;
+
+ cpu = RISCV_CPU(cpu_by_arch_id(aplic->hartid_base + i));
+ if (riscv_cpu_claim_interrupts(cpu,
+ (aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
+ error_report("%s already claimed",
+ (aplic->mmode) ? "MEIP" : "SEIP");
+ exit(1);
+ }
+ }
+
+ aplic->external_irqs = g_malloc(sizeof(qemu_irq) *
+ aplic->num_harts);
+ qdev_init_gpio_out(dev, aplic->external_irqs, aplic->num_harts);
+ }
+
aplic->bitfield_words = (aplic->num_irqs + 31) >> 5;
aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs);
aplic->state = g_new0(uint32_t, aplic->num_irqs);
@@ -855,6 +931,10 @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
memory_region_init_io(&aplic->mmio, OBJECT(dev), &riscv_aplic_ops,
aplic, TYPE_RISCV_APLIC, aplic->aperture_size);
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &aplic->mmio);
+
+ if (kvm_enabled()) {
+ aplic->kvm_splitmode = true;
+ }
}
/*
@@ -862,34 +942,17 @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
* have IRQ lines delegated by their parent APLIC.
*/
if (!aplic->parent) {
- if (kvm_enabled() && is_kvm_aia(aplic->msimode)) {
+ if (kvm_enabled() && !riscv_use_emulated_aplic(aplic->msimode)) {
qdev_init_gpio_in(dev, riscv_kvm_aplic_request, aplic->num_irqs);
} else {
qdev_init_gpio_in(dev, riscv_aplic_request, aplic->num_irqs);
}
}
- /* Create output IRQ lines for non-MSI mode */
- if (!aplic->msimode) {
- aplic->external_irqs = g_malloc(sizeof(qemu_irq) * aplic->num_harts);
- qdev_init_gpio_out(dev, aplic->external_irqs, aplic->num_harts);
-
- /* Claim the CPU interrupt to be triggered by this APLIC */
- for (i = 0; i < aplic->num_harts; i++) {
- RISCVCPU *cpu = RISCV_CPU(cpu_by_arch_id(aplic->hartid_base + i));
- if (riscv_cpu_claim_interrupts(cpu,
- (aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
- error_report("%s already claimed",
- (aplic->mmode) ? "MEIP" : "SEIP");
- exit(1);
- }
- }
- }
-
msi_nonbroken = true;
}
-static Property riscv_aplic_properties[] = {
+static const Property riscv_aplic_properties[] = {
DEFINE_PROP_UINT32("aperture-size", RISCVAPLICState, aperture_size, 0),
DEFINE_PROP_UINT32("hartid-base", RISCVAPLICState, hartid_base, 0),
DEFINE_PROP_UINT32("num-harts", RISCVAPLICState, num_harts, 0),
@@ -897,13 +960,12 @@ static Property riscv_aplic_properties[] = {
DEFINE_PROP_UINT32("num-irqs", RISCVAPLICState, num_irqs, 0),
DEFINE_PROP_BOOL("msimode", RISCVAPLICState, msimode, 0),
DEFINE_PROP_BOOL("mmode", RISCVAPLICState, mmode, 0),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_riscv_aplic = {
.name = "riscv_aplic",
- .version_id = 1,
- .minimum_version_id = 1,
+ .version_id = 2,
+ .minimum_version_id = 2,
.fields = (const VMStateField[]) {
VMSTATE_UINT32(domaincfg, RISCVAPLICState),
VMSTATE_UINT32(mmsicfgaddr, RISCVAPLICState),
@@ -911,6 +973,8 @@ static const VMStateDescription vmstate_riscv_aplic = {
VMSTATE_UINT32(smsicfgaddr, RISCVAPLICState),
VMSTATE_UINT32(smsicfgaddrH, RISCVAPLICState),
VMSTATE_UINT32(genmsi, RISCVAPLICState),
+ VMSTATE_UINT32(kvm_msicfgaddr, RISCVAPLICState),
+ VMSTATE_UINT32(kvm_msicfgaddrH, RISCVAPLICState),
VMSTATE_VARRAY_UINT32(sourcecfg, RISCVAPLICState,
num_irqs, 0,
vmstate_info_uint32, uint32_t),
@@ -933,7 +997,7 @@ static const VMStateDescription vmstate_riscv_aplic = {
}
};
-static void riscv_aplic_class_init(ObjectClass *klass, void *data)
+static void riscv_aplic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -1006,17 +1070,17 @@ DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size,
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
- if (!is_kvm_aia(msimode)) {
+ if (riscv_use_emulated_aplic(msimode)) {
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
- }
- if (!msimode) {
- for (i = 0; i < num_harts; i++) {
- CPUState *cpu = cpu_by_arch_id(hartid_base + i);
+ if (!msimode) {
+ for (i = 0; i < num_harts; i++) {
+ CPUState *cpu = cpu_by_arch_id(hartid_base + i);
- qdev_connect_gpio_out_named(dev, NULL, i,
- qdev_get_gpio_in(DEVICE(cpu),
+ qdev_connect_gpio_out_named(dev, NULL, i,
+ qdev_get_gpio_in(DEVICE(cpu),
(mmode) ? IRQ_M_EXT : IRQ_S_EXT));
+ }
}
}
diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c
index b90f0d7..2169988 100644
--- a/hw/intc/riscv_imsic.c
+++ b/hw/intc/riscv_imsic.c
@@ -22,7 +22,7 @@
#include "qemu/module.h"
#include "qemu/error-report.h"
#include "qemu/bswap.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/sysbus.h"
#include "hw/pci/msi.h"
#include "hw/boards.h"
@@ -31,8 +31,8 @@
#include "hw/irq.h"
#include "target/riscv/cpu.h"
#include "target/riscv/cpu_bits.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/kvm.h"
+#include "system/system.h"
+#include "system/kvm.h"
#include "migration/vmstate.h"
#define IMSIC_MMIO_PAGE_LE 0x00
@@ -55,7 +55,7 @@ static uint32_t riscv_imsic_topei(RISCVIMSICState *imsic, uint32_t page)
(imsic->eithreshold[page] <= imsic->num_irqs)) ?
imsic->eithreshold[page] : imsic->num_irqs;
for (i = 1; i < max_irq; i++) {
- if ((imsic->eistate[base + i] & IMSIC_EISTATE_ENPEND) ==
+ if ((qatomic_read(&imsic->eistate[base + i]) & IMSIC_EISTATE_ENPEND) ==
IMSIC_EISTATE_ENPEND) {
return (i << IMSIC_TOPEI_IID_SHIFT) | i;
}
@@ -66,10 +66,24 @@ static uint32_t riscv_imsic_topei(RISCVIMSICState *imsic, uint32_t page)
static void riscv_imsic_update(RISCVIMSICState *imsic, uint32_t page)
{
+ uint32_t base = page * imsic->num_irqs;
+
+ /*
+ * Lower the interrupt line if necessary, then evaluate the current
+ * IMSIC state.
+ * This sequence ensures that any race between evaluating the eistate and
+ * updating the interrupt line will not result in an incorrectly
+ * deactivated connected CPU IRQ line.
+ * If multiple interrupts are pending, this sequence functions identically
+ * to qemu_irq_pulse.
+ */
+
+ if (qatomic_fetch_and(&imsic->eistate[base], ~IMSIC_EISTATE_ENPEND)) {
+ qemu_irq_lower(imsic->external_irqs[page]);
+ }
if (imsic->eidelivery[page] && riscv_imsic_topei(imsic, page)) {
qemu_irq_raise(imsic->external_irqs[page]);
- } else {
- qemu_irq_lower(imsic->external_irqs[page]);
+ qatomic_or(&imsic->eistate[base], IMSIC_EISTATE_ENPEND);
}
}
@@ -125,12 +139,11 @@ static int riscv_imsic_topei_rmw(RISCVIMSICState *imsic, uint32_t page,
topei >>= IMSIC_TOPEI_IID_SHIFT;
base = page * imsic->num_irqs;
if (topei) {
- imsic->eistate[base + topei] &= ~IMSIC_EISTATE_PENDING;
+ qatomic_and(&imsic->eistate[base + topei], ~IMSIC_EISTATE_PENDING);
}
-
- riscv_imsic_update(imsic, page);
}
+ riscv_imsic_update(imsic, page);
return 0;
}
@@ -139,7 +152,7 @@ static int riscv_imsic_eix_rmw(RISCVIMSICState *imsic,
uint32_t num, bool pend, target_ulong *val,
target_ulong new_val, target_ulong wr_mask)
{
- uint32_t i, base;
+ uint32_t i, base, prev;
target_ulong mask;
uint32_t state = (pend) ? IMSIC_EISTATE_PENDING : IMSIC_EISTATE_ENABLED;
@@ -157,10 +170,6 @@ static int riscv_imsic_eix_rmw(RISCVIMSICState *imsic,
if (val) {
*val = 0;
- for (i = 0; i < xlen; i++) {
- mask = (target_ulong)1 << i;
- *val |= (imsic->eistate[base + i] & state) ? mask : 0;
- }
}
for (i = 0; i < xlen; i++) {
@@ -172,10 +181,15 @@ static int riscv_imsic_eix_rmw(RISCVIMSICState *imsic,
mask = (target_ulong)1 << i;
if (wr_mask & mask) {
if (new_val & mask) {
- imsic->eistate[base + i] |= state;
+ prev = qatomic_fetch_or(&imsic->eistate[base + i], state);
} else {
- imsic->eistate[base + i] &= ~state;
+ prev = qatomic_fetch_and(&imsic->eistate[base + i], ~state);
}
+ } else {
+ prev = qatomic_read(&imsic->eistate[base + i]);
+ }
+ if (val && (prev & state)) {
+ *val |= mask;
}
}
@@ -302,14 +316,14 @@ static void riscv_imsic_write(void *opaque, hwaddr addr, uint64_t value,
page = addr >> IMSIC_MMIO_PAGE_SHIFT;
if ((addr & (IMSIC_MMIO_PAGE_SZ - 1)) == IMSIC_MMIO_PAGE_LE) {
if (value && (value < imsic->num_irqs)) {
- imsic->eistate[(page * imsic->num_irqs) + value] |=
- IMSIC_EISTATE_PENDING;
+ qatomic_or(&imsic->eistate[(page * imsic->num_irqs) + value],
+ IMSIC_EISTATE_PENDING);
+
+ /* Update CPU external interrupt status */
+ riscv_imsic_update(imsic, page);
}
}
- /* Update CPU external interrupt status */
- riscv_imsic_update(imsic, page);
-
return;
err:
@@ -335,7 +349,19 @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
CPUState *cpu = cpu_by_arch_id(imsic->hartid);
CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
+ /* Claim the CPU interrupt to be triggered by this IMSIC */
+ if (riscv_cpu_claim_interrupts(rcpu,
+ (imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
+ error_setg(errp, "%s already claimed",
+ (imsic->mmode) ? "MEIP" : "SEIP");
+ return;
+ }
+
if (!kvm_irqchip_in_kernel()) {
+ /* Create output IRQ lines */
+ imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages);
+ qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
+
imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
@@ -347,18 +373,6 @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
IMSIC_MMIO_SIZE(imsic->num_pages));
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &imsic->mmio);
- /* Claim the CPU interrupt to be triggered by this IMSIC */
- if (riscv_cpu_claim_interrupts(rcpu,
- (imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
- error_setg(errp, "%s already claimed",
- (imsic->mmode) ? "MEIP" : "SEIP");
- return;
- }
-
- /* Create output IRQ lines */
- imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages);
- qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
-
/* Force select AIA feature and setup CSR read-modify-write callback */
if (env) {
if (!imsic->mmode) {
@@ -367,19 +381,21 @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
} else {
rcpu->cfg.ext_smaia = true;
}
- riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S,
- riscv_imsic_rmw, imsic);
+
+ if (!kvm_irqchip_in_kernel()) {
+ riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S,
+ riscv_imsic_rmw, imsic);
+ }
}
msi_nonbroken = true;
}
-static Property riscv_imsic_properties[] = {
+static const Property riscv_imsic_properties[] = {
DEFINE_PROP_BOOL("mmode", RISCVIMSICState, mmode, 0),
DEFINE_PROP_UINT32("hartid", RISCVIMSICState, hartid, 0),
DEFINE_PROP_UINT32("num-pages", RISCVIMSICState, num_pages, 0),
DEFINE_PROP_UINT32("num-irqs", RISCVIMSICState, num_irqs, 0),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_riscv_imsic = {
@@ -400,7 +416,7 @@ static const VMStateDescription vmstate_riscv_imsic = {
}
};
-static void riscv_imsic_class_init(ObjectClass *klass, void *data)
+static void riscv_imsic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -451,15 +467,17 @@ DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode,
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
- for (i = 0; i < num_pages; i++) {
- if (!i) {
- qdev_connect_gpio_out_named(dev, NULL, i,
- qdev_get_gpio_in(DEVICE(cpu),
+ if (!kvm_irqchip_in_kernel()) {
+ for (i = 0; i < num_pages; i++) {
+ if (!i) {
+ qdev_connect_gpio_out_named(dev, NULL, i,
+ qdev_get_gpio_in(DEVICE(cpu),
(mmode) ? IRQ_M_EXT : IRQ_S_EXT));
- } else {
- qdev_connect_gpio_out_named(dev, NULL, i,
- qdev_get_gpio_in(DEVICE(cpu),
+ } else {
+ qdev_connect_gpio_out_named(dev, NULL, i,
+ qdev_get_gpio_in(DEVICE(cpu),
IRQ_LOCAL_MAX + i - 1));
+ }
}
}
diff --git a/hw/intc/rx_icu.c b/hw/intc/rx_icu.c
index b2d4338..f861552 100644
--- a/hw/intc/rx_icu.c
+++ b/hw/intc/rx_icu.c
@@ -361,15 +361,14 @@ static const VMStateDescription vmstate_rxicu = {
}
};
-static Property rxicu_properties[] = {
+static const Property rxicu_properties[] = {
DEFINE_PROP_ARRAY("ipr-map", RXICUState, nr_irqs, map,
qdev_prop_uint8, uint8_t),
DEFINE_PROP_ARRAY("trigger-level", RXICUState, nr_sense, init_sense,
qdev_prop_uint8, uint8_t),
- DEFINE_PROP_END_OF_LIST(),
};
-static void rxicu_class_init(ObjectClass *klass, void *data)
+static void rxicu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c
index a91a4a4..8f4c9fd 100644
--- a/hw/intc/s390_flic.c
+++ b/hw/intc/s390_flic.c
@@ -445,19 +445,18 @@ static void qemu_s390_flic_instance_init(Object *obj)
}
}
-static Property qemu_s390_flic_properties[] = {
+static const Property qemu_s390_flic_properties[] = {
DEFINE_PROP_BOOL("migrate-all-state", QEMUS390FLICState,
migrate_all_state, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void qemu_s390_flic_class_init(ObjectClass *oc, void *data)
+static void qemu_s390_flic_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc);
device_class_set_props(dc, qemu_s390_flic_properties);
- dc->reset = qemu_s390_flic_reset;
+ device_class_set_legacy_reset(dc, qemu_s390_flic_reset);
dc->vmsd = &qemu_s390_flic_vmstate;
fsc->register_io_adapter = qemu_s390_register_io_adapter;
fsc->io_adapter_map = qemu_s390_io_adapter_map;
@@ -471,33 +470,17 @@ static void qemu_s390_flic_class_init(ObjectClass *oc, void *data)
fsc->inject_crw_mchk = qemu_s390_inject_crw_mchk;
}
-static Property s390_flic_common_properties[] = {
- DEFINE_PROP_UINT32("adapter_routes_max_batch", S390FLICState,
- adapter_routes_max_batch, ADAPTER_ROUTES_MAX_GSI),
- DEFINE_PROP_BOOL("migration-enabled", S390FLICState,
- migration_enabled, true),
- DEFINE_PROP_END_OF_LIST(),
-};
-
static void s390_flic_common_realize(DeviceState *dev, Error **errp)
{
S390FLICState *fs = S390_FLIC_COMMON(dev);
- uint32_t max_batch = fs->adapter_routes_max_batch;
-
- if (max_batch > ADAPTER_ROUTES_MAX_GSI) {
- error_setg(errp, "flic property adapter_routes_max_batch too big"
- " (%d > %d)", max_batch, ADAPTER_ROUTES_MAX_GSI);
- return;
- }
fs->ais_supported = s390_has_feat(S390_FEAT_ADAPTER_INT_SUPPRESSION);
}
-static void s390_flic_class_init(ObjectClass *oc, void *data)
+static void s390_flic_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
- device_class_set_props(dc, s390_flic_common_properties);
dc->realize = s390_flic_common_realize;
}
@@ -526,18 +509,10 @@ static void qemu_s390_flic_register_types(void)
type_init(qemu_s390_flic_register_types)
-static bool adapter_info_so_needed(void *opaque)
-{
- S390FLICState *fs = s390_get_flic();
-
- return fs->migration_enabled;
-}
-
const VMStateDescription vmstate_adapter_info_so = {
.name = "s390_adapter_info/summary_offset",
.version_id = 1,
.minimum_version_id = 1,
- .needed = adapter_info_so_needed,
.fields = (const VMStateField[]) {
VMSTATE_UINT32(summary_offset, AdapterInfo),
VMSTATE_END_OF_LIST()
diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c
index 330f08d..f833a39 100644
--- a/hw/intc/s390_flic_kvm.c
+++ b/hw/intc/s390_flic_kvm.c
@@ -16,7 +16,7 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qapi/error.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "hw/s390x/s390_flic.h"
#include "hw/s390x/adapter.h"
#include "hw/s390x/css.h"
@@ -670,7 +670,7 @@ static void kvm_s390_flic_reset(DeviceState *dev)
flic_enable_pfault(flic);
}
-static void kvm_s390_flic_class_init(ObjectClass *oc, void *data)
+static void kvm_s390_flic_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc);
@@ -679,7 +679,7 @@ static void kvm_s390_flic_class_init(ObjectClass *oc, void *data)
device_class_set_parent_realize(dc, kvm_s390_flic_realize,
&kfsc->parent_realize);
dc->vmsd = &kvm_s390_flic_vmstate;
- dc->reset = kvm_s390_flic_reset;
+ device_class_set_legacy_reset(dc, kvm_s390_flic_reset);
fsc->register_io_adapter = kvm_s390_register_io_adapter;
fsc->io_adapter_map = kvm_s390_io_adapter_map;
fsc->add_adapter_routes = kvm_s390_add_adapter_routes;
diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c
index e559f11..3160b21 100644
--- a/hw/intc/sifive_plic.c
+++ b/hw/intc/sifive_plic.c
@@ -30,7 +30,7 @@
#include "target/riscv/cpu.h"
#include "migration/vmstate.h"
#include "hw/irq.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
static bool addr_between(uint32_t addr, uint32_t base, uint32_t num)
{
@@ -189,8 +189,13 @@ static void sifive_plic_write(void *opaque, hwaddr addr, uint64_t value,
if (addr_between(addr, plic->priority_base, plic->num_sources << 2)) {
uint32_t irq = (addr - plic->priority_base) >> 2;
-
- if (((plic->num_priorities + 1) & plic->num_priorities) == 0) {
+ if (irq == 0) {
+ /* IRQ 0 source prioority is reserved */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Invalid source priority write 0x%"
+ HWADDR_PRIx "\n", __func__, addr);
+ return;
+ } else if (((plic->num_priorities + 1) & plic->num_priorities) == 0) {
/*
* if "num_priorities + 1" is power-of-2, make each register bit of
* interrupt priority WARL (Write-Any-Read-Legal). Just filter
@@ -349,8 +354,10 @@ static void sifive_plic_irq_request(void *opaque, int irq, int level)
{
SiFivePLICState *s = opaque;
- sifive_plic_set_pending(s, irq, level > 0);
- sifive_plic_update(s);
+ if (level > 0) {
+ sifive_plic_set_pending(s, irq, true);
+ sifive_plic_update(s);
+ }
}
static void sifive_plic_realize(DeviceState *dev, Error **errp)
@@ -423,7 +430,7 @@ static const VMStateDescription vmstate_sifive_plic = {
}
};
-static Property sifive_plic_properties[] = {
+static const Property sifive_plic_properties[] = {
DEFINE_PROP_STRING("hart-config", SiFivePLICState, hart_config),
DEFINE_PROP_UINT32("hartid-base", SiFivePLICState, hartid_base, 0),
/* number of interrupt sources including interrupt source 0 */
@@ -437,14 +444,13 @@ static Property sifive_plic_properties[] = {
DEFINE_PROP_UINT32("context-base", SiFivePLICState, context_base, 0),
DEFINE_PROP_UINT32("context-stride", SiFivePLICState, context_stride, 0),
DEFINE_PROP_UINT32("aperture-size", SiFivePLICState, aperture_size, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void sifive_plic_class_init(ObjectClass *klass, void *data)
+static void sifive_plic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = sifive_plic_reset;
+ device_class_set_legacy_reset(dc, sifive_plic_reset);
device_class_set_props(dc, sifive_plic_properties);
dc->realize = sifive_plic_realize;
dc->vmsd = &vmstate_sifive_plic;
diff --git a/hw/intc/slavio_intctl.c b/hw/intc/slavio_intctl.c
index d6e49d2..00b80bb 100644
--- a/hw/intc/slavio_intctl.c
+++ b/hw/intc/slavio_intctl.c
@@ -441,12 +441,12 @@ static void slavio_intctl_init(Object *obj)
}
}
-static void slavio_intctl_class_init(ObjectClass *klass, void *data)
+static void slavio_intctl_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass);
- dc->reset = slavio_intctl_reset;
+ device_class_set_legacy_reset(dc, slavio_intctl_reset);
dc->vmsd = &vmstate_intctl;
#ifdef DEBUG_IRQ_COUNT
ic->get_statistics = slavio_intctl_get_statistics;
@@ -460,7 +460,7 @@ static const TypeInfo slavio_intctl_info = {
.instance_size = sizeof(SLAVIO_INTCTLState),
.instance_init = slavio_intctl_init,
.class_init = slavio_intctl_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_INTERRUPT_STATS_PROVIDER },
{ }
},
diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c
index 283a6b8..440edb9 100644
--- a/hw/intc/spapr_xive.c
+++ b/hw/intc/spapr_xive.c
@@ -1,10 +1,9 @@
/*
* QEMU PowerPC sPAPR XIVE interrupt controller model
*
- * Copyright (c) 2017-2018, IBM Corporation.
+ * Copyright (c) 2017-2024, IBM Corporation.
*
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
@@ -13,8 +12,8 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "target/ppc/cpu.h"
-#include "sysemu/cpus.h"
-#include "sysemu/reset.h"
+#include "system/cpus.h"
+#include "system/reset.h"
#include "migration/vmstate.h"
#include "hw/ppc/fdt.h"
#include "hw/ppc/spapr.h"
@@ -431,7 +430,8 @@ static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore,
+ uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
{
CPUState *cs;
@@ -627,13 +627,12 @@ static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn)
xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
}
-static Property spapr_xive_properties[] = {
+static const Property spapr_xive_properties[] = {
DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
DEFINE_PROP_UINT8("hv-prio", SpaprXive, hv_prio, 7),
- DEFINE_PROP_END_OF_LIST(),
};
static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc,
@@ -810,7 +809,7 @@ static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr)
return spapr_xive_in_kernel(SPAPR_XIVE(xptr));
}
-static void spapr_xive_class_init(ObjectClass *klass, void *data)
+static void spapr_xive_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
@@ -857,7 +856,7 @@ static const TypeInfo spapr_xive_info = {
.instance_size = sizeof(SpaprXive),
.class_init = spapr_xive_class_init,
.class_size = sizeof(SpaprXiveClass),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_SPAPR_INTC },
{ }
},
diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c
index 5789062..26d30b4 100644
--- a/hw/intc/spapr_xive_kvm.c
+++ b/hw/intc/spapr_xive_kvm.c
@@ -12,9 +12,9 @@
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "target/ppc/cpu.h"
-#include "sysemu/cpus.h"
-#include "sysemu/kvm.h"
-#include "sysemu/runstate.h"
+#include "system/cpus.h"
+#include "system/kvm.h"
+#include "system/runstate.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_cpu_core.h"
#include "hw/ppc/spapr_xive.h"
@@ -720,7 +720,7 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers,
{
SpaprXive *xive = SPAPR_XIVE(intc);
XiveSource *xsrc = &xive->source;
- size_t esb_len = xive_source_esb_len(xsrc);
+ uint64_t esb_len = xive_source_esb_len(xsrc);
size_t tima_len = 4ull << TM_SHIFT;
CPUState *cs;
int fd;
@@ -824,7 +824,7 @@ void kvmppc_xive_disconnect(SpaprInterruptController *intc)
{
SpaprXive *xive = SPAPR_XIVE(intc);
XiveSource *xsrc;
- size_t esb_len;
+ uint64_t esb_len;
assert(xive->fd != -1);
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
index 3dcf147..334aa6a 100644
--- a/hw/intc/trace-events
+++ b/hw/intc/trace-events
@@ -80,18 +80,19 @@ aspeed_vic_update_irq(int flags) "Raising IRQ: %d"
aspeed_vic_read(uint64_t offset, unsigned size, uint32_t value) "From 0x%" PRIx64 " of size %u: 0x%" PRIx32
aspeed_vic_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32
# aspeed_intc.c
-aspeed_intc_read(uint64_t offset, unsigned size, uint32_t value) "From 0x%" PRIx64 " of size %u: 0x%" PRIx32
-aspeed_intc_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32
-aspeed_intc_set_irq(int irq, int level) "Set IRQ %d: %d"
-aspeed_intc_clear_irq(int irq, int level) "Clear IRQ %d: %d"
-aspeed_intc_update_irq(int irq, int level) "Update IRQ: %d: %d"
-aspeed_intc_pending_irq(int irq, uint32_t value) "Pending IRQ: %d: 0x%x"
-aspeed_intc_trigger_irq(int irq, uint32_t value) "Trigger IRQ: %d: 0x%x"
-aspeed_intc_all_isr_done(int irq) "All source ISR execution are done: %d"
-aspeed_intc_enable(uint32_t value) "Enable: 0x%x"
-aspeed_intc_select(uint32_t value) "Select: 0x%x"
-aspeed_intc_mask(uint32_t change, uint32_t value) "Mask: 0x%x: 0x%x"
-aspeed_intc_unmask(uint32_t change, uint32_t value) "UnMask: 0x%x: 0x%x"
+aspeed_intc_read(const char *s, uint64_t offset, unsigned size, uint32_t value) "%s: From 0x%" PRIx64 " of size %u: 0x%" PRIx32
+aspeed_intc_write(const char *s, uint64_t offset, unsigned size, uint32_t data) "%s: To 0x%" PRIx64 " of size %u: 0x%" PRIx32
+aspeed_intc_set_irq(const char *s, int inpin_idx, int level) "%s: Set IRQ %d: %d"
+aspeed_intc_clear_irq(const char *s, int inpin_idx, int outpin_idx, int level) "%s: Clear IRQ %d-%d: %d"
+aspeed_intc_update_irq(const char *s, int inpin_idx, int outpin_idx, int level) "%s: Update IRQ: %d-%d: %d"
+aspeed_intc_pending_irq(const char *s, int inpin_idx, uint32_t value) "%s: Pending IRQ: %d: 0x%x"
+aspeed_intc_trigger_irq(const char *s, int inpin_idx, int outpin_idx, uint32_t value) "%s: Trigger IRQ: %d-%d: 0x%x"
+aspeed_intc_all_isr_done(const char *s, int inpin_idx) "%s: All source ISR execution are done: %d"
+aspeed_intc_enable(const char *s, uint32_t value) "%s: Enable: 0x%x"
+aspeed_intc_select(const char *s, uint32_t value) "%s: Select: 0x%x"
+aspeed_intc_mask(const char *s, uint32_t change, uint32_t value) "%s: Mask: 0x%x: 0x%x"
+aspeed_intc_unmask(const char *s, uint32_t change, uint32_t value) "%s: UnMask: 0x%x: 0x%x"
+aspeed_intc_all_isr_done_bit(const char *s, int inpin_idx, int bit) "%s: All source ISR execution are done from specific bit: %d-%d"
# arm_gic.c
gic_enable_irq(int irq) "irq %d enabled"
@@ -282,9 +283,13 @@ xive_router_end_notify(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "EN
xive_router_end_escalate(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t end_data) "END 0x%02x/0x%04x -> escalate END 0x%02x/0x%04x data 0x%08x"
xive_tctx_tm_write(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64
xive_tctx_tm_read(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64
-xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring) "found NVT 0x%x/0x%x ring=0x%x"
+xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring, uint8_t group_level) "found NVT 0x%x/0x%x ring=0x%x group_level=%d"
xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x/0x%x @0x%"PRIx64
+# xive2.c
+xive_nvp_backlog_op(uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint8_t rc) "NVP 0x%x/0x%x operation=%d priority=%d rc=%d"
+xive_nvgc_backlog_op(bool c, uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint32_t rc) "NVGC crowd=%d 0x%x/0x%x operation=%d priority=%d rc=%d"
+
# pnv_xive.c
pnv_xive_ic_hw_trigger(uint64_t addr, uint64_t val) "@0x%"PRIx64" val=0x%"PRIx64
@@ -309,12 +314,8 @@ loongson_ipi_read(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x
loongson_ipi_write(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%"PRIx64
# loongarch_pch_pic.c
loongarch_pch_pic_irq_handler(int irq, int level) "irq %d level %d"
-loongarch_pch_pic_low_readw(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64
-loongarch_pch_pic_low_writew(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64
-loongarch_pch_pic_high_readw(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64
-loongarch_pch_pic_high_writew(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64
-loongarch_pch_pic_readb(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64
-loongarch_pch_pic_writeb(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64
+loongarch_pch_pic_read(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64
+loongarch_pch_pic_write(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64
# loongarch_pch_msi.c
loongarch_msi_set_irq(int irq_num) "set msi irq %d"
diff --git a/hw/intc/xics.c b/hw/intc/xics.c
index 6f4d527..d9a199e 100644
--- a/hw/intc/xics.c
+++ b/hw/intc/xics.c
@@ -37,8 +37,8 @@
#include "migration/vmstate.h"
#include "hw/intc/intc.h"
#include "hw/irq.h"
-#include "sysemu/kvm.h"
-#include "sysemu/reset.h"
+#include "system/kvm.h"
+#include "system/reset.h"
#include "target/ppc/cpu.h"
void icp_pic_print_info(ICPState *icp, GString *buf)
@@ -335,22 +335,6 @@ static void icp_realize(DeviceState *dev, Error **errp)
return;
}
}
- /*
- * The way that pre_2_10_icp is handling is really, really hacky.
- * We used to have here this call:
- *
- * vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp);
- *
- * But we were doing:
- * pre_2_10_vmstate_register_dummy_icp()
- * this vmstate_register()
- * pre_2_10_vmstate_unregister_dummy_icp()
- *
- * So for a short amount of time we had to vmstate entries with
- * the same name. This fixes it.
- */
- vmstate_replace_hack_for_ppc(NULL, icp->cs->cpu_index,
- &vmstate_icp_server, icp);
}
static void icp_unrealize(DeviceState *dev)
@@ -360,14 +344,13 @@ static void icp_unrealize(DeviceState *dev)
vmstate_unregister(NULL, &vmstate_icp_server, icp);
}
-static Property icp_properties[] = {
+static const Property icp_properties[] = {
DEFINE_PROP_LINK(ICP_PROP_XICS, ICPState, xics, TYPE_XICS_FABRIC,
XICSFabric *),
DEFINE_PROP_LINK(ICP_PROP_CPU, ICPState, cs, TYPE_CPU, CPUState *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void icp_class_init(ObjectClass *klass, void *data)
+static void icp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -692,14 +675,13 @@ static const VMStateDescription vmstate_ics = {
},
};
-static Property ics_properties[] = {
+static const Property ics_properties[] = {
DEFINE_PROP_UINT32("nr-irqs", ICSState, nr_irqs, 0),
DEFINE_PROP_LINK(ICS_PROP_XICS, ICSState, xics, TYPE_XICS_FABRIC,
XICSFabric *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ics_class_init(ObjectClass *klass, void *data)
+static void ics_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c
index 9719d98..ee72969 100644
--- a/hw/intc/xics_kvm.c
+++ b/hw/intc/xics_kvm.c
@@ -28,7 +28,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "trace.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_cpu_core.h"
#include "hw/ppc/xics.h"
diff --git a/hw/intc/xics_pnv.c b/hw/intc/xics_pnv.c
index 753c067..ff602d9 100644
--- a/hw/intc/xics_pnv.c
+++ b/hw/intc/xics_pnv.c
@@ -176,7 +176,7 @@ static void pnv_icp_realize(DeviceState *dev, Error **errp)
icp, "icp-thread", 0x1000);
}
-static void pnv_icp_class_init(ObjectClass *klass, void *data)
+static void pnv_icp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ICPStateClass *icpc = ICP_CLASS(klass);
diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c
index a0d97bd..7663596 100644
--- a/hw/intc/xics_spapr.c
+++ b/hw/intc/xics_spapr.c
@@ -436,7 +436,7 @@ static void xics_spapr_deactivate(SpaprInterruptController *intc)
}
}
-static void ics_spapr_class_init(ObjectClass *klass, void *data)
+static void ics_spapr_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ICSStateClass *isc = ICS_CLASS(klass);
@@ -461,7 +461,7 @@ static const TypeInfo ics_spapr_info = {
.name = TYPE_ICS_SPAPR,
.parent = TYPE_ICS,
.class_init = ics_spapr_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_SPAPR_INTC },
{ }
},
diff --git a/hw/intc/xilinx_intc.c b/hw/intc/xilinx_intc.c
index 6e5012e..5257ad5 100644
--- a/hw/intc/xilinx_intc.c
+++ b/hw/intc/xilinx_intc.c
@@ -3,6 +3,9 @@
*
* Copyright (c) 2009 Edgar E. Iglesias.
*
+ * https://docs.amd.com/v/u/en-US/xps_intc
+ * DS572: LogiCORE IP XPS Interrupt Controller (v2.01a)
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
@@ -23,10 +26,12 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "hw/sysbus.h"
#include "qemu/module.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
#include "qom/object.h"
#define D(x)
@@ -49,6 +54,7 @@ struct XpsIntc
{
SysBusDevice parent_obj;
+ EndianMode model_endianness;
MemoryRegion mmio;
qemu_irq parent_irq;
@@ -140,14 +146,28 @@ static void pic_write(void *opaque, hwaddr addr,
update_irq(p);
}
-static const MemoryRegionOps pic_ops = {
- .read = pic_read,
- .write = pic_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid = {
- .min_access_size = 4,
- .max_access_size = 4
- }
+static const MemoryRegionOps pic_ops[2] = {
+ [0 ... 1] = {
+ .read = pic_read,
+ .write = pic_write,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ .valid = {
+ /*
+ * All XPS INTC registers are accessed through the PLB interface.
+ * The base address for these registers is provided by the
+ * configuration parameter, C_BASEADDR. Each register is 32 bits
+ * although some bits may be unused and is accessed on a 4-byte
+ * boundary offset from the base address.
+ */
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ },
+ [0].endianness = DEVICE_LITTLE_ENDIAN,
+ [1].endianness = DEVICE_BIG_ENDIAN,
};
static void irq_handler(void *opaque, int irq, int level)
@@ -170,21 +190,35 @@ static void xilinx_intc_init(Object *obj)
qdev_init_gpio_in(DEVICE(obj), irq_handler, 32);
sysbus_init_irq(SYS_BUS_DEVICE(obj), &p->parent_irq);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &p->mmio);
+}
- memory_region_init_io(&p->mmio, obj, &pic_ops, p, "xlnx.xps-intc",
+static void xilinx_intc_realize(DeviceState *dev, Error **errp)
+{
+ XpsIntc *p = XILINX_INTC(dev);
+
+ if (p->model_endianness == ENDIAN_MODE_UNSPECIFIED) {
+ error_setg(errp, TYPE_XILINX_INTC " property 'endianness'"
+ " must be set to 'big' or 'little'");
+ return;
+ }
+
+ memory_region_init_io(&p->mmio, OBJECT(dev),
+ &pic_ops[p->model_endianness == ENDIAN_MODE_BIG],
+ p, "xlnx.xps-intc",
R_MAX * 4);
- sysbus_init_mmio(SYS_BUS_DEVICE(obj), &p->mmio);
}
-static Property xilinx_intc_properties[] = {
+static const Property xilinx_intc_properties[] = {
+ DEFINE_PROP_ENDIAN_NODEFAULT("endianness", XpsIntc, model_endianness),
DEFINE_PROP_UINT32("kind-of-intr", XpsIntc, c_kind_of_intr, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xilinx_intc_class_init(ObjectClass *klass, void *data)
+static void xilinx_intc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->realize = xilinx_intc_realize;
device_class_set_props(dc, xilinx_intc_properties);
}
diff --git a/hw/intc/xive.c b/hw/intc/xive.c
index 70f11f9..27b473e 100644
--- a/hw/intc/xive.c
+++ b/hw/intc/xive.c
@@ -3,8 +3,7 @@
*
* Copyright (c) 2017-2018, IBM Corporation.
*
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
@@ -12,9 +11,9 @@
#include "qemu/module.h"
#include "qapi/error.h"
#include "target/ppc/cpu.h"
-#include "sysemu/cpus.h"
-#include "sysemu/dma.h"
-#include "sysemu/reset.h"
+#include "system/cpus.h"
+#include "system/dma.h"
+#include "system/reset.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
#include "hw/irq.h"
@@ -27,28 +26,6 @@
* XIVE Thread Interrupt Management context
*/
-/*
- * Convert an Interrupt Pending Buffer (IPB) register to a Pending
- * Interrupt Priority Register (PIPR), which contains the priority of
- * the most favored pending notification.
- */
-static uint8_t ipb_to_pipr(uint8_t ibp)
-{
- return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
-}
-
-static uint8_t exception_mask(uint8_t ring)
-{
- switch (ring) {
- case TM_QW1_OS:
- return TM_QW1_NSR_EO;
- case TM_QW3_HV_PHYS:
- return TM_QW3_NSR_HE;
- default:
- g_assert_not_reached();
- }
-}
-
static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
{
switch (ring) {
@@ -68,66 +45,88 @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
{
uint8_t *regs = &tctx->regs[ring];
uint8_t nsr = regs[TM_NSR];
- uint8_t mask = exception_mask(ring);
qemu_irq_lower(xive_tctx_output(tctx, ring));
- if (regs[TM_NSR] & mask) {
+ if (regs[TM_NSR] != 0) {
uint8_t cppr = regs[TM_PIPR];
+ uint8_t alt_ring;
+ uint8_t *alt_regs;
+
+ /* POOL interrupt uses IPB in QW2, POOL ring */
+ if ((ring == TM_QW3_HV_PHYS) && (nsr & (TM_QW3_NSR_HE_POOL << 6))) {
+ alt_ring = TM_QW2_HV_POOL;
+ } else {
+ alt_ring = ring;
+ }
+ alt_regs = &tctx->regs[alt_ring];
regs[TM_CPPR] = cppr;
- /* Reset the pending buffer bit */
- regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
- regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
+ /*
+ * If the interrupt was for a specific VP, reset the pending
+ * buffer bit, otherwise clear the logical server indicator
+ */
+ if (regs[TM_NSR] & TM_NSR_GRP_LVL) {
+ regs[TM_NSR] &= ~TM_NSR_GRP_LVL;
+ } else {
+ alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
+ }
- /* Drop Exception bit */
- regs[TM_NSR] &= ~mask;
+ /* Drop the exception bit and any group/crowd */
+ regs[TM_NSR] = 0;
- trace_xive_tctx_accept(tctx->cs->cpu_index, ring,
- regs[TM_IPB], regs[TM_PIPR],
+ trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring,
+ alt_regs[TM_IPB], regs[TM_PIPR],
regs[TM_CPPR], regs[TM_NSR]);
}
- return (nsr << 8) | regs[TM_CPPR];
+ return ((uint64_t)nsr << 8) | regs[TM_CPPR];
}
-static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
+void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level)
{
+ /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
+ uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
+ uint8_t *alt_regs = &tctx->regs[alt_ring];
uint8_t *regs = &tctx->regs[ring];
- if (regs[TM_PIPR] < regs[TM_CPPR]) {
+ if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) {
switch (ring) {
case TM_QW1_OS:
- regs[TM_NSR] |= TM_QW1_NSR_EO;
+ regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F);
+ break;
+ case TM_QW2_HV_POOL:
+ alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F);
break;
case TM_QW3_HV_PHYS:
- regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6);
+ regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F);
break;
default:
g_assert_not_reached();
}
trace_xive_tctx_notify(tctx->cs->cpu_index, ring,
- regs[TM_IPB], regs[TM_PIPR],
- regs[TM_CPPR], regs[TM_NSR]);
+ regs[TM_IPB], alt_regs[TM_PIPR],
+ alt_regs[TM_CPPR], alt_regs[TM_NSR]);
qemu_irq_raise(xive_tctx_output(tctx, ring));
}
}
-void xive_tctx_reset_os_signal(XiveTCTX *tctx)
+void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring)
{
/*
- * Lower the External interrupt. Used when pulling an OS
- * context. It is necessary to avoid catching it in the hypervisor
- * context. It should be raised again when re-pushing the OS
- * context.
+ * Lower the External interrupt. Used when pulling a context. It is
+ * necessary to avoid catching it in the higher privilege context. It
+ * should be raised again when re-pushing the lower privilege context.
*/
- qemu_irq_lower(xive_tctx_output(tctx, TM_QW1_OS));
+ qemu_irq_lower(xive_tctx_output(tctx, ring));
}
static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
{
uint8_t *regs = &tctx->regs[ring];
+ uint8_t pipr_min;
+ uint8_t ring_min;
trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
regs[TM_IPB], regs[TM_PIPR],
@@ -139,18 +138,57 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
tctx->regs[ring + TM_CPPR] = cppr;
+ /*
+ * Recompute the PIPR based on local pending interrupts. The PHYS
+ * ring must take the minimum of both the PHYS and POOL PIPR values.
+ */
+ pipr_min = xive_ipb_to_pipr(regs[TM_IPB]);
+ ring_min = ring;
+
+ /* PHYS updates also depend on POOL values */
+ if (ring == TM_QW3_HV_PHYS) {
+ uint8_t *pool_regs = &tctx->regs[TM_QW2_HV_POOL];
+
+ /* POOL values only matter if POOL ctx is valid */
+ if (pool_regs[TM_WORD2] & 0x80) {
+
+ uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]);
+
+ /*
+ * Determine highest priority interrupt and
+ * remember which ring has it.
+ */
+ if (pool_pipr < pipr_min) {
+ pipr_min = pool_pipr;
+ ring_min = TM_QW2_HV_POOL;
+ }
+ }
+ }
+
+ regs[TM_PIPR] = pipr_min;
+
/* CPPR has changed, check if we need to raise a pending exception */
- xive_tctx_notify(tctx, ring);
+ xive_tctx_notify(tctx, ring_min, 0);
}
-void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb)
-{
+void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority,
+ uint8_t group_level)
+ {
+ /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
+ uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
+ uint8_t *alt_regs = &tctx->regs[alt_ring];
uint8_t *regs = &tctx->regs[ring];
- regs[TM_IPB] |= ipb;
- regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
- xive_tctx_notify(tctx, ring);
-}
+ if (group_level == 0) {
+ /* VP-specific */
+ regs[TM_IPB] |= xive_priority_to_ipb(priority);
+ alt_regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]);
+ } else {
+ /* VP-group */
+ alt_regs[TM_PIPR] = xive_priority_to_pipr(priority);
+ }
+ xive_tctx_notify(tctx, ring, group_level);
+ }
/*
* XIVE Thread Interrupt Management Area (TIMA)
@@ -179,6 +217,17 @@ static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx,
return qw2w2;
}
+static uint64_t xive_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, unsigned size)
+{
+ uint8_t qw3b8_prev = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2];
+ uint8_t qw3b8;
+
+ qw3b8 = qw3b8_prev & ~TM_QW3B8_VT;
+ tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8;
+ return qw3b8;
+}
+
static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
uint64_t value, unsigned size)
{
@@ -207,14 +256,14 @@ static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx,
static const uint8_t xive_tm_hw_view[] = {
3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
- 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
+ 0, 0, 3, 3, 0, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */
};
static const uint8_t xive_tm_hv_view[] = {
3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
- 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
+ 0, 0, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */
};
@@ -341,14 +390,27 @@ static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
}
+static void xive_tctx_set_lgs(XiveTCTX *tctx, uint8_t ring, uint8_t lgs)
+{
+ uint8_t *regs = &tctx->regs[ring];
+
+ regs[TM_LGS] = lgs;
+}
+
+static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size)
+{
+ xive_tctx_set_lgs(tctx, TM_QW1_OS, value & 0xff);
+}
+
/*
- * Adjust the IPB to allow a CPU to process event queues of other
+ * Adjust the PIPR to allow a CPU to process event queues of other
* priorities during one physical interrupt cycle.
*/
static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size)
{
- xive_tctx_ipb_update(tctx, TM_QW1_OS, xive_priority_to_ipb(value & 0xff));
+ xive_tctx_pipr_update(tctx, TM_QW1_OS, value & 0xff, 0);
}
static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk,
@@ -400,7 +462,7 @@ static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0);
xive_tctx_set_os_cam(tctx, qw1w2_new);
- xive_tctx_reset_os_signal(tctx);
+ xive_tctx_reset_signal(tctx, TM_QW1_OS);
return qw1w2;
}
@@ -426,16 +488,20 @@ static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx,
/* Reset the NVT value */
nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0);
xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
+
+ uint8_t *regs = &tctx->regs[TM_QW1_OS];
+ regs[TM_IPB] |= ipb;
}
+
/*
- * Always call xive_tctx_ipb_update(). Even if there were no
+ * Always call xive_tctx_pipr_update(). Even if there were no
* escalation triggered, there could be a pending interrupt which
* was saved when the context was pulled and that we need to take
* into account by recalculating the PIPR (which is not
* saved/restored).
* It will also raise the External interrupt signal if needed.
*/
- xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
+ xive_tctx_pipr_update(tctx, TM_QW1_OS, 0xFF, 0); /* fxb */
}
/*
@@ -488,20 +554,34 @@ static const XiveTmOp xive_tm_operations[] = {
* MMIOs below 2K : raw values and special operations without side
* effects
*/
- { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
- { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, NULL },
- { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
- { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
- { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
+ { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr,
+ NULL },
+ { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx,
+ NULL },
+ { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr,
+ NULL },
+ { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push,
+ NULL },
+ { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL,
+ xive_tm_vt_poll },
/* MMIOs above 2K : special operations with side effects */
- { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
- { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
- { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx },
- { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx },
- { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
- { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
- { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
+ { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL,
+ xive_tm_ack_os_reg },
+ { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending,
+ NULL },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL,
+ xive_tm_pull_os_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL,
+ xive_tm_pull_os_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL,
+ xive_tm_ack_hv_reg },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL,
+ xive_tm_pull_pool_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL,
+ xive_tm_pull_pool_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL,
+ xive_tm_pull_phys_ctx },
};
static const XiveTmOp xive2_tm_operations[] = {
@@ -509,20 +589,50 @@ static const XiveTmOp xive2_tm_operations[] = {
* MMIOs below 2K : raw values and special operations without side
* effects
*/
- { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
- { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, NULL },
- { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
- { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
- { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
+ { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive2_tm_set_os_cppr,
+ NULL },
+ { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx,
+ NULL },
+ { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, xive2_tm_push_os_ctx,
+ NULL },
+ { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs,
+ NULL },
+ { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive2_tm_set_hv_cppr,
+ NULL },
+ { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push,
+ NULL },
+ { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL,
+ xive_tm_vt_poll },
+ { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, xive2_tm_set_hv_target,
+ NULL },
/* MMIOs above 2K : special operations with side effects */
- { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
- { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
- { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive2_tm_pull_os_ctx },
- { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive2_tm_pull_os_ctx },
- { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
- { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
- { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
+ { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL,
+ xive_tm_ack_os_reg },
+ { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending,
+ NULL },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, NULL,
+ xive2_tm_pull_os_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL,
+ xive2_tm_pull_os_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL,
+ xive2_tm_pull_os_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL,
+ xive_tm_ack_hv_reg },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, NULL,
+ xive_tm_pull_pool_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL,
+ xive_tm_pull_pool_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL,
+ xive_tm_pull_pool_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, xive2_tm_pull_os_ctx_ol,
+ NULL },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, NULL,
+ xive_tm_pull_phys_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL,
+ xive_tm_pull_phys_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, xive2_tm_pull_phys_ctx_ol,
+ NULL },
};
static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset,
@@ -692,9 +802,15 @@ void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf)
}
}
- g_string_append_printf(buf, "CPU[%04x]: "
- "QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR W2\n",
- cpu_index);
+ if (xive_presenter_get_config(tctx->xptr) & XIVE_PRESENTER_GEN1_TIMA_OS) {
+ g_string_append_printf(buf, "CPU[%04x]: "
+ "QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
+ " W2\n", cpu_index);
+ } else {
+ g_string_append_printf(buf, "CPU[%04x]: "
+ "QW NSR CPPR IPB LSMFB - LGS T PIPR"
+ " W2\n", cpu_index);
+ }
for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
@@ -712,15 +828,19 @@ void xive_tctx_reset(XiveTCTX *tctx)
tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
+ if (!(xive_presenter_get_config(tctx->xptr) &
+ XIVE_PRESENTER_GEN1_TIMA_OS)) {
+ tctx->regs[TM_QW1_OS + TM_OGEN] = 2;
+ }
/*
* Initialize PIPR to 0xFF to avoid phantom interrupts when the
* CPPR is first set.
*/
tctx->regs[TM_QW1_OS + TM_PIPR] =
- ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
+ xive_ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] =
- ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
+ xive_ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
}
static void xive_tctx_realize(DeviceState *dev, Error **errp)
@@ -804,14 +924,13 @@ static const VMStateDescription vmstate_xive_tctx = {
},
};
-static Property xive_tctx_properties[] = {
+static const Property xive_tctx_properties[] = {
DEFINE_PROP_LINK("cpu", XiveTCTX, cs, TYPE_CPU, CPUState *),
DEFINE_PROP_LINK("presenter", XiveTCTX, xptr, TYPE_XIVE_PRESENTER,
XivePresenter *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xive_tctx_class_init(ObjectClass *klass, void *data)
+static void xive_tctx_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -1236,7 +1355,7 @@ static void xive_source_reset(void *dev)
static void xive_source_realize(DeviceState *dev, Error **errp)
{
XiveSource *xsrc = XIVE_SOURCE(dev);
- size_t esb_len = xive_source_esb_len(xsrc);
+ uint64_t esb_len = xive_source_esb_len(xsrc);
assert(xsrc->xive);
@@ -1280,7 +1399,7 @@ static const VMStateDescription vmstate_xive_source = {
* The default XIVE interrupt source setting for the ESB MMIOs is two
* 64k pages without Store EOI, to be in sync with KVM.
*/
-static Property xive_source_properties[] = {
+static const Property xive_source_properties[] = {
DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
@@ -1291,10 +1410,9 @@ static Property xive_source_properties[] = {
DEFINE_PROP_UINT8("reset-pq", XiveSource, reset_pq, XIVE_ESB_OFF),
DEFINE_PROP_LINK("xive", XiveSource, xive, TYPE_XIVE_NOTIFIER,
XiveNotifier *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xive_source_class_init(ObjectClass *klass, void *data)
+static void xive_source_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -1537,6 +1655,75 @@ static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f));
}
+uint32_t xive_get_vpgroup_size(uint32_t nvp_index)
+{
+ /*
+ * Group size is a power of 2. The position of the first 0
+ * (starting with the least significant bits) in the NVP index
+ * gives the size of the group.
+ */
+ int first_zero = cto32(nvp_index);
+ if (first_zero >= 31) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group index 0x%08x",
+ nvp_index);
+ return 0;
+ }
+
+ return 1U << (first_zero + 1);
+}
+
+static uint8_t xive_get_group_level(bool crowd, bool ignore,
+ uint32_t nvp_blk, uint32_t nvp_index)
+{
+ int first_zero;
+ uint8_t level;
+
+ if (!ignore) {
+ g_assert(!crowd);
+ return 0;
+ }
+
+ first_zero = cto32(nvp_index);
+ if (first_zero >= 31) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group index 0x%08x",
+ nvp_index);
+ return 0;
+ }
+
+ level = (first_zero + 1) & 0b1111;
+ if (crowd) {
+ uint32_t blk;
+
+ /* crowd level is bit position of first 0 from the right in nvp_blk */
+ first_zero = cto32(nvp_blk);
+ if (first_zero >= 31) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid crowd block 0x%08x",
+ nvp_blk);
+ return 0;
+ }
+ blk = first_zero + 1;
+
+ /*
+ * Supported crowd sizes are 2^1, 2^2, and 2^4. 2^3 is not supported.
+ * HW will encode level 4 as the value 3. See xive2_pgofnext().
+ */
+ switch (blk) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ blk = 3;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Crowd level bits reside in upper 2 bits of the 6 bit group level */
+ level |= blk << 4;
+ }
+ return level;
+}
+
/*
* The thread context register words are in big-endian format.
*/
@@ -1603,31 +1790,41 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
/*
* This is our simple Xive Presenter Engine model. It is merged in the
* Router as it does not require an extra object.
- *
- * It receives notification requests sent by the IVRE to find one
- * matching NVT (or more) dispatched on the processor threads. In case
- * of a single NVT notification, the process is abbreviated and the
- * thread is signaled if a match is found. In case of a logical server
- * notification (bits ignored at the end of the NVT identifier), the
- * IVPE and IVRE select a winning thread using different filters. This
- * involves 2 or 3 exchanges on the PowerBus that the model does not
- * support.
- *
- * The parameters represent what is sent on the PowerBus
*/
bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
- uint32_t logic_serv)
+ bool crowd, bool cam_ignore, uint8_t priority,
+ uint32_t logic_serv, bool *precluded)
{
XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb);
- XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
+ XiveTCTXMatch match = { .tctx = NULL, .ring = 0, .precluded = false };
+ uint8_t group_level;
int count;
/*
- * Ask the machine to scan the interrupt controllers for a match
+ * Ask the machine to scan the interrupt controllers for a match.
+ *
+ * For VP-specific notification, we expect at most one match and
+ * one call to the presenters is all we need (abbreviated notify
+ * sequence documented by the architecture).
+ *
+ * For VP-group notification, match_nvt() is the equivalent of the
+ * "histogram" and "poll" commands sent to the power bus to the
+ * presenters. 'count' could be more than one, but we always
+ * select the first match for now. 'precluded' tells if (at least)
+ * one thread matches but can't take the interrupt now because
+ * it's running at a more favored priority. We return the
+ * information to the router so that it can take appropriate
+ * actions (backlog, escalation, broadcast, etc...)
+ *
+ * If we were to implement a better way of dispatching the
+ * interrupt in case of multiple matches (instead of the first
+ * match), we would need a heuristic to elect a thread (for
+ * example, the hardware keeps track of an 'age' in the TIMA) and
+ * a new command to the presenters (the equivalent of the "assign"
+ * power bus command in the documented full notify sequence.
*/
- count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore,
+ count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore,
priority, logic_serv, &match);
if (count < 0) {
return false;
@@ -1635,9 +1832,11 @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
/* handle CPU exception delivery */
if (count) {
- trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring);
- xive_tctx_ipb_update(match.tctx, match.ring,
- xive_priority_to_ipb(priority));
+ group_level = xive_get_group_level(crowd, cam_ignore, nvt_blk, nvt_idx);
+ trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level);
+ xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level);
+ } else {
+ *precluded = match.precluded;
}
return !!count;
@@ -1677,7 +1876,7 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas)
uint8_t nvt_blk;
uint32_t nvt_idx;
XiveNVT nvt;
- bool found;
+ bool found, precluded;
uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
@@ -1758,10 +1957,12 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas)
}
found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx,
+ false /* crowd */,
xive_get_field32(END_W7_F0_IGNORE, end.w7),
priority,
- xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
-
+ xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7),
+ &precluded);
+ /* we don't support VP-group notification on P9, so precluded is not used */
/* TODO: Auto EOI. */
if (found) {
@@ -1879,13 +2080,12 @@ void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
xive_router_end_notify_handler(xrtr, &eas);
}
-static Property xive_router_properties[] = {
+static const Property xive_router_properties[] = {
DEFINE_PROP_LINK("xive-fabric", XiveRouter, xfb,
TYPE_XIVE_FABRIC, XiveFabric *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xive_router_class_init(ObjectClass *klass, void *data)
+static void xive_router_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
@@ -1908,7 +2108,7 @@ static const TypeInfo xive_router_info = {
.instance_size = sizeof(XiveRouter),
.class_size = sizeof(XiveRouterClass),
.class_init = xive_router_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_XIVE_NOTIFIER },
{ TYPE_XIVE_PRESENTER },
{ }
@@ -2047,15 +2247,14 @@ static void xive_end_source_realize(DeviceState *dev, Error **errp)
(1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
}
-static Property xive_end_source_properties[] = {
+static const Property xive_end_source_properties[] = {
DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
DEFINE_PROP_LINK("xive", XiveENDSource, xrtr, TYPE_XIVE_ROUTER,
XiveRouter *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xive_end_source_class_init(ObjectClass *klass, void *data)
+static void xive_end_source_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c
index 3e7238c..a08cf90 100644
--- a/hw/intc/xive2.c
+++ b/hw/intc/xive2.c
@@ -1,10 +1,9 @@
/*
* QEMU PowerPC XIVE2 interrupt controller model (POWER10)
*
- * Copyright (c) 2019-2022, IBM Corporation..
+ * Copyright (c) 2019-2024, IBM Corporation..
*
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
@@ -12,12 +11,13 @@
#include "qemu/module.h"
#include "qapi/error.h"
#include "target/ppc/cpu.h"
-#include "sysemu/cpus.h"
-#include "sysemu/dma.h"
+#include "system/cpus.h"
+#include "system/dma.h"
#include "hw/qdev-properties.h"
#include "hw/ppc/xive.h"
#include "hw/ppc/xive2.h"
#include "hw/ppc/xive2_regs.h"
+#include "trace.h"
uint32_t xive2_router_get_config(Xive2Router *xrtr)
{
@@ -26,6 +26,155 @@ uint32_t xive2_router_get_config(Xive2Router *xrtr)
return xrc->get_config(xrtr);
}
+static int xive2_router_get_block_id(Xive2Router *xrtr)
+{
+ Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->get_block_id(xrtr);
+}
+
+static uint64_t xive2_nvp_reporting_addr(Xive2Nvp *nvp)
+{
+ uint64_t cache_addr;
+
+ cache_addr = xive_get_field32(NVP2_W6_REPORTING_LINE, nvp->w6) << 24 |
+ xive_get_field32(NVP2_W7_REPORTING_LINE, nvp->w7);
+ cache_addr <<= 8; /* aligned on a cache line pair */
+ return cache_addr;
+}
+
+static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority)
+{
+ uint32_t val = 0;
+ uint8_t *ptr, i;
+
+ if (priority > 7) {
+ return 0;
+ }
+
+ /*
+ * The per-priority backlog counters are 24-bit and the structure
+ * is stored in big endian. NVGC is 32-bytes long, so 24-bytes from
+ * w2, which fits 8 priorities * 24-bits per priority.
+ */
+ ptr = (uint8_t *)&nvgc->w2 + priority * 3;
+ for (i = 0; i < 3; i++, ptr++) {
+ val = (val << 8) + *ptr;
+ }
+ return val;
+}
+
+static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority,
+ uint32_t val)
+{
+ uint8_t *ptr, i;
+ uint32_t shift;
+
+ if (priority > 7) {
+ return;
+ }
+
+ if (val > 0xFFFFFF) {
+ val = 0xFFFFFF;
+ }
+ /*
+ * The per-priority backlog counters are 24-bit and the structure
+ * is stored in big endian
+ */
+ ptr = (uint8_t *)&nvgc->w2 + priority * 3;
+ for (i = 0; i < 3; i++, ptr++) {
+ shift = 8 * (2 - i);
+ *ptr = (val >> shift) & 0xFF;
+ }
+}
+
+uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr,
+ bool crowd,
+ uint8_t blk, uint32_t idx,
+ uint16_t offset, uint16_t val)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(xptr);
+ uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset);
+ uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset);
+ Xive2Nvgc nvgc;
+ uint32_t count, old_count;
+
+ if (xive2_router_get_nvgc(xrtr, crowd, blk, idx, &nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No %s %x/%x\n",
+ crowd ? "NVC" : "NVG", blk, idx);
+ return -1;
+ }
+ if (!xive2_nvgc_is_valid(&nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", blk, idx);
+ return -1;
+ }
+
+ old_count = xive2_nvgc_get_backlog(&nvgc, priority);
+ count = old_count;
+ /*
+ * op:
+ * 0b00 => increment
+ * 0b01 => decrement
+ * 0b1- => read
+ */
+ if (op == 0b00 || op == 0b01) {
+ if (op == 0b00) {
+ count += val;
+ } else {
+ if (count > val) {
+ count -= val;
+ } else {
+ count = 0;
+ }
+ }
+ xive2_nvgc_set_backlog(&nvgc, priority, count);
+ xive2_router_write_nvgc(xrtr, crowd, blk, idx, &nvgc);
+ }
+ trace_xive_nvgc_backlog_op(crowd, blk, idx, op, priority, old_count);
+ return old_count;
+}
+
+uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr,
+ uint8_t blk, uint32_t idx,
+ uint16_t offset)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(xptr);
+ uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset);
+ uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset);
+ Xive2Nvp nvp;
+ uint8_t ipb, old_ipb, rc;
+
+ if (xive2_router_get_nvp(xrtr, blk, idx, &nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", blk, idx);
+ return -1;
+ }
+ if (!xive2_nvp_is_valid(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVP %x/%x\n", blk, idx);
+ return -1;
+ }
+
+ old_ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
+ ipb = old_ipb;
+ /*
+ * op:
+ * 0b00 => set priority bit
+ * 0b01 => reset priority bit
+ * 0b1- => read
+ */
+ if (op == 0b00 || op == 0b01) {
+ if (op == 0b00) {
+ ipb |= xive_priority_to_ipb(priority);
+ } else {
+ ipb &= ~xive_priority_to_ipb(priority);
+ }
+ nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
+ xive2_router_write_nvp(xrtr, blk, idx, &nvp, 2);
+ }
+ rc = !!(old_ipb & xive_priority_to_ipb(priority));
+ trace_xive_nvp_backlog_op(blk, idx, op, priority, rc);
+ return rc;
+}
+
void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf)
{
if (!xive2_eas_is_valid(eas)) {
@@ -77,8 +226,8 @@ void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf)
uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
uint32_t qentries = 1 << (qsize + 10);
- uint32_t nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6);
- uint32_t nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6);
+ uint32_t nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6);
+ uint32_t nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6);
uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7);
uint8_t pq;
@@ -89,7 +238,7 @@ void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf)
pq = xive_get_field32(END2_W1_ESn, end->w1);
g_string_append_printf(buf,
- " %08x %c%c %c%c%c%c%c%c%c%c%c%c "
+ " %08x %c%c %c%c%c%c%c%c%c%c%c%c%c %c%c "
"prio:%d nvp:%02x/%04x",
end_idx,
pq & XIVE_ESB_VAL_P ? 'P' : '-',
@@ -98,13 +247,16 @@ void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf)
xive2_end_is_enqueue(end) ? 'q' : '-',
xive2_end_is_notify(end) ? 'n' : '-',
xive2_end_is_backlog(end) ? 'b' : '-',
+ xive2_end_is_precluded_escalation(end) ? 'p' : '-',
xive2_end_is_escalate(end) ? 'e' : '-',
xive2_end_is_escalate_end(end) ? 'N' : '-',
xive2_end_is_uncond_escalation(end) ? 'u' : '-',
xive2_end_is_silent_escalation(end) ? 's' : '-',
xive2_end_is_firmware1(end) ? 'f' : '-',
xive2_end_is_firmware2(end) ? 'F' : '-',
- priority, nvp_blk, nvp_idx);
+ xive2_end_is_ignore(end) ? 'i' : '-',
+ xive2_end_is_crowd(end) ? 'c' : '-',
+ priority, nvx_blk, nvx_idx);
if (qaddr_base) {
g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d",
@@ -137,6 +289,55 @@ void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx,
(uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
}
+void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf)
+{
+ uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
+ uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
+ uint64_t cache_line = xive2_nvp_reporting_addr(nvp);
+
+ if (!xive2_nvp_is_valid(nvp)) {
+ return;
+ }
+
+ g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x PGoFirst:%02x",
+ nvp_idx, eq_blk, eq_idx,
+ xive_get_field32(NVP2_W2_IPB, nvp->w2),
+ xive_get_field32(NVP2_W0_PGOFIRST, nvp->w0));
+ if (cache_line) {
+ g_string_append_printf(buf, " reporting CL:%016"PRIx64, cache_line);
+ }
+
+ /*
+ * When the NVP is HW controlled, more fields are updated
+ */
+ if (xive2_nvp_is_hw(nvp)) {
+ g_string_append_printf(buf, " CPPR:%02x",
+ xive_get_field32(NVP2_W2_CPPR, nvp->w2));
+ if (xive2_nvp_is_co(nvp)) {
+ g_string_append_printf(buf, " CO:%04x",
+ xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
+ }
+ }
+ g_string_append_c(buf, '\n');
+}
+
+void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, GString *buf)
+{
+ uint8_t i;
+
+ if (!xive2_nvgc_is_valid(nvgc)) {
+ return;
+ }
+
+ g_string_append_printf(buf, " %08x PGoNext:%02x bklog: ", nvgc_idx,
+ xive_get_field32(NVGC2_W0_PGONEXT, nvgc->w0));
+ for (i = 0; i <= XIVE_PRIORITY_MAX; i++) {
+ g_string_append_printf(buf, "[%d]=0x%x ",
+ i, xive2_nvgc_get_backlog(nvgc, i));
+ }
+ g_string_append_printf(buf, "\n");
+}
+
static void xive2_end_enqueue(Xive2End *end, uint32_t data)
{
uint64_t qaddr_base = xive2_end_qaddr(end);
@@ -166,6 +367,115 @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data)
end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex);
}
+static void xive2_pgofnext(uint8_t *nvgc_blk, uint32_t *nvgc_idx,
+ uint8_t next_level)
+{
+ uint32_t mask, next_idx;
+ uint8_t next_blk;
+
+ /*
+ * Adjust the block and index of a VP for the next group/crowd
+ * size (PGofFirst/PGofNext field in the NVP and NVGC structures).
+ *
+ * The 6-bit group level is split into a 2-bit crowd and 4-bit
+ * group levels. Encoding is similar. However, we don't support
+ * crowd size of 8. So a crowd level of 0b11 is bumped to a crowd
+ * size of 16.
+ */
+ next_blk = NVx_CROWD_LVL(next_level);
+ if (next_blk == 3) {
+ next_blk = 4;
+ }
+ mask = (1 << next_blk) - 1;
+ *nvgc_blk &= ~mask;
+ *nvgc_blk |= mask >> 1;
+
+ next_idx = NVx_GROUP_LVL(next_level);
+ mask = (1 << next_idx) - 1;
+ *nvgc_idx &= ~mask;
+ *nvgc_idx |= mask >> 1;
+}
+
+/*
+ * Scan the group chain and return the highest priority and group
+ * level of pending group interrupts.
+ */
+static uint8_t xive2_presenter_backlog_scan(XivePresenter *xptr,
+ uint8_t nvx_blk, uint32_t nvx_idx,
+ uint8_t first_group,
+ uint8_t *out_level)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(xptr);
+ uint32_t nvgc_idx;
+ uint32_t current_level, count;
+ uint8_t nvgc_blk, prio;
+ Xive2Nvgc nvgc;
+
+ for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) {
+ current_level = first_group & 0x3F;
+ nvgc_blk = nvx_blk;
+ nvgc_idx = nvx_idx;
+
+ while (current_level) {
+ xive2_pgofnext(&nvgc_blk, &nvgc_idx, current_level);
+
+ if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(current_level),
+ nvgc_blk, nvgc_idx, &nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n",
+ nvgc_blk, nvgc_idx);
+ return 0xFF;
+ }
+ if (!xive2_nvgc_is_valid(&nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n",
+ nvgc_blk, nvgc_idx);
+ return 0xFF;
+ }
+
+ count = xive2_nvgc_get_backlog(&nvgc, prio);
+ if (count) {
+ *out_level = current_level;
+ return prio;
+ }
+ current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0x3F;
+ }
+ }
+ return 0xFF;
+}
+
+static void xive2_presenter_backlog_decr(XivePresenter *xptr,
+ uint8_t nvx_blk, uint32_t nvx_idx,
+ uint8_t group_prio,
+ uint8_t group_level)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(xptr);
+ uint32_t nvgc_idx, count;
+ uint8_t nvgc_blk;
+ Xive2Nvgc nvgc;
+
+ nvgc_blk = nvx_blk;
+ nvgc_idx = nvx_idx;
+ xive2_pgofnext(&nvgc_blk, &nvgc_idx, group_level);
+
+ if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(group_level),
+ nvgc_blk, nvgc_idx, &nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n",
+ nvgc_blk, nvgc_idx);
+ return;
+ }
+ if (!xive2_nvgc_is_valid(&nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n",
+ nvgc_blk, nvgc_idx);
+ return;
+ }
+ count = xive2_nvgc_get_backlog(&nvgc, group_prio);
+ if (!count) {
+ return;
+ }
+ xive2_nvgc_set_backlog(&nvgc, group_prio, count - 1);
+ xive2_router_write_nvgc(xrtr, NVx_CROWD_LVL(group_level),
+ nvgc_blk, nvgc_idx, &nvgc);
+}
+
/*
* XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
*
@@ -181,13 +491,14 @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data)
* the NVP by changing the H bit while the context is enabled
*/
-static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
- uint8_t nvp_blk, uint32_t nvp_idx)
+static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
+ uint8_t nvp_blk, uint32_t nvp_idx,
+ uint8_t ring)
{
CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
uint32_t pir = env->spr_cb[SPR_PIR].default_value;
Xive2Nvp nvp;
- uint8_t *regs = &tctx->regs[TM_QW1_OS];
+ uint8_t *regs = &tctx->regs[ring];
if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
@@ -223,7 +534,19 @@ static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]);
nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]);
- nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]);
+ if (nvp.w0 & NVP2_W0_L) {
+ /*
+ * Typically not used. If LSMFB is restored with 0, it will
+ * force a backlog rescan
+ */
+ nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]);
+ }
+ if (nvp.w0 & NVP2_W0_G) {
+ nvp.w2 = xive_set_field32(NVP2_W2_LGS, nvp.w2, regs[TM_LGS]);
+ }
+ if (nvp.w0 & NVP2_W0_T) {
+ nvp.w2 = xive_set_field32(NVP2_W2_T, nvp.w2, regs[TM_T]);
+ }
xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0);
@@ -232,44 +555,190 @@ static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1);
}
-static void xive2_os_cam_decode(uint32_t cam, uint8_t *nvp_blk,
- uint32_t *nvp_idx, bool *vo, bool *ho)
+static void xive2_cam_decode(uint32_t cam, uint8_t *nvp_blk,
+ uint32_t *nvp_idx, bool *valid, bool *hw)
{
*nvp_blk = xive2_nvp_blk(cam);
*nvp_idx = xive2_nvp_idx(cam);
- *vo = !!(cam & TM2_QW1W2_VO);
- *ho = !!(cam & TM2_QW1W2_HO);
+ *valid = !!(cam & TM2_W2_VALID);
+ *hw = !!(cam & TM2_W2_HW);
}
-uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
- hwaddr offset, unsigned size)
+/*
+ * Encode the HW CAM line with 7bit or 8bit thread id. The thread id
+ * width and block id width is configurable at the IC level.
+ *
+ * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit)
+ * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit)
+ */
+static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
{
Xive2Router *xrtr = XIVE2_ROUTER(xptr);
- uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
- uint32_t qw1w2_new;
- uint32_t cam = be32_to_cpu(qw1w2);
+ CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
+ uint32_t pir = env->spr_cb[SPR_PIR].default_value;
+ uint8_t blk = xive2_router_get_block_id(xrtr);
+ uint8_t tid_shift =
+ xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7;
+ uint8_t tid_mask = (1 << tid_shift) - 1;
+
+ return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask));
+}
+
+static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, unsigned size, uint8_t ring)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(xptr);
+ uint32_t target_ringw2 = xive_tctx_word2(&tctx->regs[ring]);
+ uint32_t cam = be32_to_cpu(target_ringw2);
uint8_t nvp_blk;
uint32_t nvp_idx;
- bool vo;
+ uint8_t cur_ring;
+ bool valid;
bool do_save;
- xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_save);
+ xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &valid, &do_save);
- if (!vo) {
+ if (!valid) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n",
nvp_blk, nvp_idx);
}
- /* Invalidate CAM line */
- qw1w2_new = xive_set_field32(TM2_QW1W2_VO, qw1w2, 0);
- memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2_new, 4);
+ /* Invalidate CAM line of requested ring and all lower rings */
+ for (cur_ring = TM_QW0_USER; cur_ring <= ring;
+ cur_ring += XIVE_TM_RING_SIZE) {
+ uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]);
+ uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0);
+ memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4);
+ }
if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) {
- xive2_tctx_save_os_ctx(xrtr, tctx, nvp_blk, nvp_idx);
+ xive2_tctx_save_ctx(xrtr, tctx, nvp_blk, nvp_idx, ring);
+ }
+
+ /*
+ * Lower external interrupt line of requested ring and below except for
+ * USER, which doesn't exist.
+ */
+ for (cur_ring = TM_QW1_OS; cur_ring <= ring;
+ cur_ring += XIVE_TM_RING_SIZE) {
+ xive_tctx_reset_signal(tctx, cur_ring);
+ }
+ return target_ringw2;
+}
+
+uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, unsigned size)
+{
+ return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW1_OS);
+}
+
+#define REPORT_LINE_GEN1_SIZE 16
+
+static void xive2_tm_report_line_gen1(XiveTCTX *tctx, uint8_t *data,
+ uint8_t size)
+{
+ uint8_t *regs = tctx->regs;
+
+ g_assert(size == REPORT_LINE_GEN1_SIZE);
+ memset(data, 0, size);
+ /*
+ * See xive architecture for description of what is saved. It is
+ * hand-picked information to fit in 16 bytes.
+ */
+ data[0x0] = regs[TM_QW3_HV_PHYS + TM_NSR];
+ data[0x1] = regs[TM_QW3_HV_PHYS + TM_CPPR];
+ data[0x2] = regs[TM_QW3_HV_PHYS + TM_IPB];
+ data[0x3] = regs[TM_QW2_HV_POOL + TM_IPB];
+ data[0x4] = regs[TM_QW1_OS + TM_ACK_CNT];
+ data[0x5] = regs[TM_QW3_HV_PHYS + TM_LGS];
+ data[0x6] = 0xFF;
+ data[0x7] = regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x80;
+ data[0x7] |= (regs[TM_QW2_HV_POOL + TM_WORD2] & 0x80) >> 1;
+ data[0x7] |= (regs[TM_QW1_OS + TM_WORD2] & 0x80) >> 2;
+ data[0x7] |= (regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x3);
+ data[0x8] = regs[TM_QW1_OS + TM_NSR];
+ data[0x9] = regs[TM_QW1_OS + TM_CPPR];
+ data[0xA] = regs[TM_QW1_OS + TM_IPB];
+ data[0xB] = regs[TM_QW1_OS + TM_LGS];
+ if (regs[TM_QW0_USER + TM_WORD2] & 0x80) {
+ /*
+ * Logical server extension, except VU bit replaced by EB bit
+ * from NSR
+ */
+ data[0xC] = regs[TM_QW0_USER + TM_WORD2];
+ data[0xC] &= ~0x80;
+ data[0xC] |= regs[TM_QW0_USER + TM_NSR] & 0x80;
+ data[0xD] = regs[TM_QW0_USER + TM_WORD2 + 1];
+ data[0xE] = regs[TM_QW0_USER + TM_WORD2 + 2];
+ data[0xF] = regs[TM_QW0_USER + TM_WORD2 + 3];
+ }
+}
+
+static void xive2_tm_pull_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value,
+ unsigned size, uint8_t ring)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(xptr);
+ uint32_t hw_cam, nvp_idx, xive2_cfg, reserved;
+ uint8_t nvp_blk;
+ Xive2Nvp nvp;
+ uint64_t phys_addr;
+ MemTxResult result;
+
+ hw_cam = xive2_tctx_hw_cam_line(xptr, tctx);
+ nvp_blk = xive2_nvp_blk(hw_cam);
+ nvp_idx = xive2_nvp_idx(hw_cam);
+
+ if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
}
- xive_tctx_reset_os_signal(tctx);
- return qw1w2;
+ if (!xive2_nvp_is_valid(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ xive2_cfg = xive2_router_get_config(xrtr);
+
+ phys_addr = xive2_nvp_reporting_addr(&nvp) + 0x80; /* odd line */
+ if (xive2_cfg & XIVE2_GEN1_TIMA_OS) {
+ uint8_t pull_ctxt[REPORT_LINE_GEN1_SIZE];
+
+ xive2_tm_report_line_gen1(tctx, pull_ctxt, REPORT_LINE_GEN1_SIZE);
+ result = dma_memory_write(&address_space_memory, phys_addr,
+ pull_ctxt, REPORT_LINE_GEN1_SIZE,
+ MEMTXATTRS_UNSPECIFIED);
+ assert(result == MEMTX_OK);
+ } else {
+ result = dma_memory_write(&address_space_memory, phys_addr,
+ &tctx->regs, sizeof(tctx->regs),
+ MEMTXATTRS_UNSPECIFIED);
+ assert(result == MEMTX_OK);
+ reserved = 0xFFFFFFFF;
+ result = dma_memory_write(&address_space_memory, phys_addr + 12,
+ &reserved, sizeof(reserved),
+ MEMTXATTRS_UNSPECIFIED);
+ assert(result == MEMTX_OK);
+ }
+
+ /* the rest is similar to pull context to registers */
+ xive2_tm_pull_ctx(xptr, tctx, offset, size, ring);
+}
+
+void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size)
+{
+ xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW1_OS);
+}
+
+
+void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size)
+{
+ xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS);
}
static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
@@ -291,7 +760,9 @@ static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2);
tctx->regs[TM_QW1_OS + TM_CPPR] = cppr;
- /* we don't model LSMFB */
+ tctx->regs[TM_QW1_OS + TM_LSMFB] = xive_get_field32(NVP2_W2_LSMFB, nvp->w2);
+ tctx->regs[TM_QW1_OS + TM_LGS] = xive_get_field32(NVP2_W2_LGS, nvp->w2);
+ tctx->regs[TM_QW1_OS + TM_T] = xive_get_field32(NVP2_W2_T, nvp->w2);
nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1);
nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1);
@@ -314,8 +785,15 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
uint8_t nvp_blk, uint32_t nvp_idx,
bool do_restore)
{
- Xive2Nvp nvp;
+ XivePresenter *xptr = XIVE_PRESENTER(xrtr);
uint8_t ipb;
+ uint8_t backlog_level;
+ uint8_t group_level;
+ uint8_t first_group;
+ uint8_t backlog_prio;
+ uint8_t group_prio;
+ uint8_t *regs = &tctx->regs[TM_QW1_OS];
+ Xive2Nvp nvp;
/*
* Grab the associated thread interrupt context registers in the
@@ -344,15 +822,29 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0);
xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
}
+ regs[TM_IPB] |= ipb;
+ backlog_prio = xive_ipb_to_pipr(ipb);
+ backlog_level = 0;
+
+ first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0);
+ if (first_group && regs[TM_LSMFB] < backlog_prio) {
+ group_prio = xive2_presenter_backlog_scan(xptr, nvp_blk, nvp_idx,
+ first_group, &group_level);
+ regs[TM_LSMFB] = group_prio;
+ if (regs[TM_LGS] && group_prio < backlog_prio) {
+ /* VP can take a group interrupt */
+ xive2_presenter_backlog_decr(xptr, nvp_blk, nvp_idx,
+ group_prio, group_level);
+ backlog_prio = group_prio;
+ backlog_level = group_level;
+ }
+ }
+
/*
- * Always call xive_tctx_ipb_update(). Even if there were no
- * escalation triggered, there could be a pending interrupt which
- * was saved when the context was pulled and that we need to take
- * into account by recalculating the PIPR (which is not
- * saved/restored).
- * It will also raise the External interrupt signal if needed.
+ * Compute the PIPR based on the restored state.
+ * It will raise the External interrupt signal if needed.
*/
- xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
+ xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level);
}
/*
@@ -361,17 +853,31 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size)
{
- uint32_t cam = value;
- uint32_t qw1w2 = cpu_to_be32(cam);
+ uint32_t cam;
+ uint32_t qw1w2;
+ uint64_t qw1dw1;
uint8_t nvp_blk;
uint32_t nvp_idx;
bool vo;
bool do_restore;
- xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore);
-
/* First update the thead context */
- memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
+ switch (size) {
+ case 4:
+ cam = value;
+ qw1w2 = cpu_to_be32(cam);
+ memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
+ break;
+ case 8:
+ cam = value >> 32;
+ qw1dw1 = cpu_to_be64(value);
+ memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1dw1, 8);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore);
/* Check the interrupt pending bits */
if (vo) {
@@ -380,6 +886,185 @@ void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
}
}
+static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring,
+ uint32_t *nvp_blk, uint32_t *nvp_idx)
+{
+ uint32_t w2, cam;
+
+ w2 = xive_tctx_word2(&tctx->regs[ring]);
+ switch (ring) {
+ case TM_QW1_OS:
+ if (!(be32_to_cpu(w2) & TM2_QW1W2_VO)) {
+ return -1;
+ }
+ cam = xive_get_field32(TM2_QW1W2_OS_CAM, w2);
+ break;
+ case TM_QW2_HV_POOL:
+ if (!(be32_to_cpu(w2) & TM2_QW2W2_VP)) {
+ return -1;
+ }
+ cam = xive_get_field32(TM2_QW2W2_POOL_CAM, w2);
+ break;
+ case TM_QW3_HV_PHYS:
+ if (!(be32_to_cpu(w2) & TM2_QW3W2_VT)) {
+ return -1;
+ }
+ cam = xive2_tctx_hw_cam_line(tctx->xptr, tctx);
+ break;
+ default:
+ return -1;
+ }
+ *nvp_blk = xive2_nvp_blk(cam);
+ *nvp_idx = xive2_nvp_idx(cam);
+ return 0;
+}
+
+static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
+{
+ uint8_t *regs = &tctx->regs[ring];
+ Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr);
+ uint8_t old_cppr, backlog_prio, first_group, group_level = 0;
+ uint8_t pipr_min, lsmfb_min, ring_min;
+ bool group_enabled;
+ uint32_t nvp_blk, nvp_idx;
+ Xive2Nvp nvp;
+ int rc;
+
+ trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
+ regs[TM_IPB], regs[TM_PIPR],
+ cppr, regs[TM_NSR]);
+
+ if (cppr > XIVE_PRIORITY_MAX) {
+ cppr = 0xff;
+ }
+
+ old_cppr = regs[TM_CPPR];
+ regs[TM_CPPR] = cppr;
+
+ /*
+ * Recompute the PIPR based on local pending interrupts. It will
+ * be adjusted below if needed in case of pending group interrupts.
+ */
+ pipr_min = xive_ipb_to_pipr(regs[TM_IPB]);
+ group_enabled = !!regs[TM_LGS];
+ lsmfb_min = (group_enabled) ? regs[TM_LSMFB] : 0xff;
+ ring_min = ring;
+
+ /* PHYS updates also depend on POOL values */
+ if (ring == TM_QW3_HV_PHYS) {
+ uint8_t *pregs = &tctx->regs[TM_QW2_HV_POOL];
+
+ /* POOL values only matter if POOL ctx is valid */
+ if (pregs[TM_WORD2] & 0x80) {
+
+ uint8_t pool_pipr = xive_ipb_to_pipr(pregs[TM_IPB]);
+ uint8_t pool_lsmfb = pregs[TM_LSMFB];
+
+ /*
+ * Determine highest priority interrupt and
+ * remember which ring has it.
+ */
+ if (pool_pipr < pipr_min) {
+ pipr_min = pool_pipr;
+ if (pool_pipr < lsmfb_min) {
+ ring_min = TM_QW2_HV_POOL;
+ }
+ }
+
+ /* Values needed for group priority calculation */
+ if (pregs[TM_LGS] && (pool_lsmfb < lsmfb_min)) {
+ group_enabled = true;
+ lsmfb_min = pool_lsmfb;
+ if (lsmfb_min < pipr_min) {
+ ring_min = TM_QW2_HV_POOL;
+ }
+ }
+ }
+ }
+ regs[TM_PIPR] = pipr_min;
+
+ rc = xive2_tctx_get_nvp_indexes(tctx, ring_min, &nvp_blk, &nvp_idx);
+ if (rc) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: set CPPR on invalid context\n");
+ return;
+ }
+
+ if (cppr < old_cppr) {
+ /*
+ * FIXME: check if there's a group interrupt being presented
+ * and if the new cppr prevents it. If so, then the group
+ * interrupt needs to be re-added to the backlog and
+ * re-triggered (see re-trigger END info in the NVGC
+ * structure)
+ */
+ }
+
+ if (group_enabled &&
+ lsmfb_min < cppr &&
+ lsmfb_min < regs[TM_PIPR]) {
+ /*
+ * Thread has seen a group interrupt with a higher priority
+ * than the new cppr or pending local interrupt. Check the
+ * backlog
+ */
+ if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ if (!xive2_nvp_is_valid(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0);
+ if (!first_group) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ backlog_prio = xive2_presenter_backlog_scan(tctx->xptr,
+ nvp_blk, nvp_idx,
+ first_group, &group_level);
+ tctx->regs[ring_min + TM_LSMFB] = backlog_prio;
+ if (backlog_prio != 0xFF) {
+ xive2_presenter_backlog_decr(tctx->xptr, nvp_blk, nvp_idx,
+ backlog_prio, group_level);
+ regs[TM_PIPR] = backlog_prio;
+ }
+ }
+ /* CPPR has changed, check if we need to raise a pending exception */
+ xive_tctx_notify(tctx, ring_min, group_level);
+}
+
+void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size)
+{
+ xive2_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
+}
+
+void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size)
+{
+ xive2_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
+}
+
+static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target)
+{
+ uint8_t *regs = &tctx->regs[ring];
+
+ regs[TM_T] = target;
+}
+
+void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size)
+{
+ xive2_tctx_set_target(tctx, TM_QW3_HV_PHYS, value & 0xff);
+}
+
/*
* XIVE Router (aka. Virtualization Controller or IVRE)
*/
@@ -442,31 +1127,63 @@ int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number);
}
-static int xive2_router_get_block_id(Xive2Router *xrtr)
+int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd,
+ uint8_t nvgc_blk, uint32_t nvgc_idx,
+ Xive2Nvgc *nvgc)
{
Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
- return xrc->get_block_id(xrtr);
+ return xrc->get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
}
-/*
- * Encode the HW CAM line with 7bit or 8bit thread id. The thread id
- * width and block id width is configurable at the IC level.
- *
- * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit)
- * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit)
- */
-static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
+int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd,
+ uint8_t nvgc_blk, uint32_t nvgc_idx,
+ Xive2Nvgc *nvgc)
{
- Xive2Router *xrtr = XIVE2_ROUTER(xptr);
- CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
- uint32_t pir = env->spr_cb[SPR_PIR].default_value;
- uint8_t blk = xive2_router_get_block_id(xrtr);
- uint8_t tid_shift =
- xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7;
- uint8_t tid_mask = (1 << tid_shift) - 1;
+ Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
- return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask));
+ return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
+}
+
+static bool xive2_vp_match_mask(uint32_t cam1, uint32_t cam2,
+ uint32_t vp_mask)
+{
+ return (cam1 & vp_mask) == (cam2 & vp_mask);
+}
+
+static uint8_t xive2_get_vp_block_mask(uint32_t nvt_blk, bool crowd)
+{
+ uint8_t block_mask = 0b1111;
+
+ /* 3 supported crowd sizes: 2, 4, 16 */
+ if (crowd) {
+ uint32_t size = xive_get_vpgroup_size(nvt_blk);
+
+ if (size != 2 && size != 4 && size != 16) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid crowd size of %d",
+ size);
+ return block_mask;
+ }
+ block_mask &= ~(size - 1);
+ }
+ return block_mask;
+}
+
+static uint32_t xive2_get_vp_index_mask(uint32_t nvt_index, bool cam_ignore)
+{
+ uint32_t index_mask = 0xFFFFFF; /* 24 bits */
+
+ if (cam_ignore) {
+ uint32_t size = xive_get_vpgroup_size(nvt_index);
+
+ if (size < 2) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group size of %d",
+ size);
+ return index_mask;
+ }
+ index_mask &= ~(size - 1);
+ }
+ return index_mask;
}
/*
@@ -475,7 +1192,8 @@ static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint32_t logic_serv)
+ bool crowd, bool cam_ignore,
+ uint32_t logic_serv)
{
uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx);
uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
@@ -483,44 +1201,51 @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
- /*
- * TODO (PowerNV): ignore mode. The low order bits of the NVT
- * identifier are ignored in the "CAM" match.
- */
+ uint32_t index_mask, vp_mask;
+ uint8_t block_mask;
if (format == 0) {
- if (cam_ignore == true) {
- /*
- * F=0 & i=1: Logical server notification (bits ignored at
- * the end of the NVT identifier)
- */
- qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
- nvt_blk, nvt_idx);
- return -1;
- }
+ /*
+ * i=0: Specific NVT notification
+ * i=1: VP-group notification (bits ignored at the end of the
+ * NVT identifier)
+ */
+ block_mask = xive2_get_vp_block_mask(nvt_blk, crowd);
+ index_mask = xive2_get_vp_index_mask(nvt_idx, cam_ignore);
+ vp_mask = xive2_nvp_cam_line(block_mask, index_mask);
- /* F=0 & i=0: Specific NVT notification */
+ /* For VP-group notifications, threads with LGS=0 are excluded */
/* PHYS ring */
if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) &&
- cam == xive2_tctx_hw_cam_line(xptr, tctx)) {
+ !(cam_ignore && tctx->regs[TM_QW3_HV_PHYS + TM_LGS] == 0) &&
+ xive2_vp_match_mask(cam,
+ xive2_tctx_hw_cam_line(xptr, tctx),
+ vp_mask)) {
return TM_QW3_HV_PHYS;
}
/* HV POOL ring */
if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) &&
- cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) {
+ !(cam_ignore && tctx->regs[TM_QW2_HV_POOL + TM_LGS] == 0) &&
+ xive2_vp_match_mask(cam,
+ xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2),
+ vp_mask)) {
return TM_QW2_HV_POOL;
}
/* OS ring */
if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
- cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) {
+ !(cam_ignore && tctx->regs[TM_QW1_OS + TM_LGS] == 0) &&
+ xive2_vp_match_mask(cam,
+ xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2),
+ vp_mask)) {
return TM_QW1_OS;
}
} else {
/* F=1 : User level Event-Based Branch (EBB) notification */
+ /* FIXME: what if cam_ignore and LGS = 0 ? */
/* USER ring */
if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
(cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) &&
@@ -532,6 +1257,37 @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
return -1;
}
+bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority)
+{
+ /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
+ uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
+ uint8_t *alt_regs = &tctx->regs[alt_ring];
+
+ /*
+ * The xive2_presenter_tctx_match() above tells if there's a match
+ * but for VP-group notification, we still need to look at the
+ * priority to know if the thread can take the interrupt now or if
+ * it is precluded.
+ */
+ if (priority < alt_regs[TM_CPPR]) {
+ return false;
+ }
+ return true;
+}
+
+void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority)
+{
+ uint8_t *regs = &tctx->regs[ring];
+
+ /*
+ * Called by the router during a VP-group notification when the
+ * thread matches but can't take the interrupt because it's
+ * already running at a more favored priority. It then stores the
+ * new interrupt priority in the LSMFB field.
+ */
+ regs[TM_LSMFB] = priority;
+}
+
static void xive2_router_realize(DeviceState *dev, Error **errp)
{
Xive2Router *xrtr = XIVE2_ROUTER(dev);
@@ -571,10 +1327,9 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
Xive2End end;
uint8_t priority;
uint8_t format;
- bool found;
- Xive2Nvp nvp;
- uint8_t nvp_blk;
- uint32_t nvp_idx;
+ bool found, precluded;
+ uint8_t nvx_blk;
+ uint32_t nvx_idx;
/* END cache lookup */
if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) {
@@ -589,6 +1344,12 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
return;
}
+ if (xive2_end_is_crowd(&end) && !xive2_end_is_ignore(&end)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "XIVE: invalid END, 'crowd' bit requires 'ignore' bit\n");
+ return;
+ }
+
if (xive2_end_is_enqueue(&end)) {
xive2_end_enqueue(&end, end_data);
/* Enqueuing event data modifies the EQ toggle and index */
@@ -633,26 +1394,14 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
/*
* Follows IVPE notification
*/
- nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
- nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
-
- /* NVP cache lookup */
- if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
- qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
- nvp_blk, nvp_idx);
- return;
- }
+ nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
+ nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
- if (!xive2_nvp_is_valid(&nvp)) {
- qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
- nvp_blk, nvp_idx);
- return;
- }
-
- found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx,
- xive_get_field32(END2_W6_IGNORE, end.w7),
+ found = xive_presenter_notify(xrtr->xfb, format, nvx_blk, nvx_idx,
+ xive2_end_is_crowd(&end), xive2_end_is_ignore(&end),
priority,
- xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7));
+ xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7),
+ &precluded);
/* TODO: Auto EOI. */
@@ -663,10 +1412,9 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
/*
* If no matching NVP is dispatched on a HW thread :
* - specific VP: update the NVP structure if backlog is activated
- * - logical server : forward request to IVPE (not supported)
+ * - VP-group: update the backlog counter for that priority in the NVG
*/
if (xive2_end_is_backlog(&end)) {
- uint8_t ipb;
if (format == 1) {
qemu_log_mask(LOG_GUEST_ERROR,
@@ -675,19 +1423,82 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
return;
}
- /*
- * Record the IPB in the associated NVP structure for later
- * use. The presenter will resend the interrupt when the vCPU
- * is dispatched again on a HW thread.
- */
- ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
- xive_priority_to_ipb(priority);
- nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
- xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
+ if (!xive2_end_is_ignore(&end)) {
+ uint8_t ipb;
+ Xive2Nvp nvp;
- /*
- * On HW, follows a "Broadcast Backlog" to IVPEs
- */
+ /* NVP cache lookup */
+ if (xive2_router_get_nvp(xrtr, nvx_blk, nvx_idx, &nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
+ nvx_blk, nvx_idx);
+ return;
+ }
+
+ if (!xive2_nvp_is_valid(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
+ nvx_blk, nvx_idx);
+ return;
+ }
+
+ /*
+ * Record the IPB in the associated NVP structure for later
+ * use. The presenter will resend the interrupt when the vCPU
+ * is dispatched again on a HW thread.
+ */
+ ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
+ xive_priority_to_ipb(priority);
+ nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
+ xive2_router_write_nvp(xrtr, nvx_blk, nvx_idx, &nvp, 2);
+ } else {
+ Xive2Nvgc nvgc;
+ uint32_t backlog;
+ bool crowd;
+
+ crowd = xive2_end_is_crowd(&end);
+
+ /*
+ * For groups and crowds, the per-priority backlog
+ * counters are stored in the NVG/NVC structures
+ */
+ if (xive2_router_get_nvgc(xrtr, crowd,
+ nvx_blk, nvx_idx, &nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no %s %x/%x\n",
+ crowd ? "NVC" : "NVG", nvx_blk, nvx_idx);
+ return;
+ }
+
+ if (!xive2_nvgc_is_valid(&nvgc)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n",
+ nvx_blk, nvx_idx);
+ return;
+ }
+
+ /*
+ * Increment the backlog counter for that priority.
+ * We only call broadcast the first time the counter is
+ * incremented. broadcast will set the LSMFB field of the TIMA of
+ * relevant threads so that they know an interrupt is pending.
+ */
+ backlog = xive2_nvgc_get_backlog(&nvgc, priority) + 1;
+ xive2_nvgc_set_backlog(&nvgc, priority, backlog);
+ xive2_router_write_nvgc(xrtr, crowd, nvx_blk, nvx_idx, &nvgc);
+
+ if (backlog == 1) {
+ XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb);
+ xfc->broadcast(xrtr->xfb, nvx_blk, nvx_idx,
+ xive2_end_is_crowd(&end),
+ xive2_end_is_ignore(&end),
+ priority);
+
+ if (!xive2_end_is_precluded_escalation(&end)) {
+ /*
+ * The interrupt will be picked up when the
+ * matching thread lowers its priority level
+ */
+ return;
+ }
+ }
+ }
}
do_escalation:
@@ -774,13 +1585,12 @@ void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
xive_get_field64(EAS2_END_DATA, eas.w));
}
-static Property xive2_router_properties[] = {
+static const Property xive2_router_properties[] = {
DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb,
TYPE_XIVE_FABRIC, XiveFabric *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xive2_router_class_init(ObjectClass *klass, void *data)
+static void xive2_router_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
@@ -799,7 +1609,7 @@ static const TypeInfo xive2_router_info = {
.instance_size = sizeof(Xive2Router),
.class_size = sizeof(Xive2RouterClass),
.class_init = xive2_router_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_XIVE_NOTIFIER },
{ TYPE_XIVE_PRESENTER },
{ }
@@ -988,15 +1798,14 @@ static void xive2_end_source_realize(DeviceState *dev, Error **errp)
(1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
}
-static Property xive2_end_source_properties[] = {
+static const Property xive2_end_source_properties[] = {
DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0),
DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K),
DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER,
Xive2Router *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xive2_end_source_class_init(ObjectClass *klass, void *data)
+static void xive2_end_source_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/intc/xlnx-pmu-iomod-intc.c b/hw/intc/xlnx-pmu-iomod-intc.c
index 12bd1a3..9200585 100644
--- a/hw/intc/xlnx-pmu-iomod-intc.c
+++ b/hw/intc/xlnx-pmu-iomod-intc.c
@@ -474,11 +474,10 @@ static const MemoryRegionOps xlnx_pmu_io_intc_ops = {
},
};
-static Property xlnx_pmu_io_intc_properties[] = {
+static const Property xlnx_pmu_io_intc_properties[] = {
DEFINE_PROP_UINT32("intc-intr-size", XlnxPMUIOIntc, cfg.intr_size, 0),
DEFINE_PROP_UINT32("intc-level-edge", XlnxPMUIOIntc, cfg.level_edge, 0),
DEFINE_PROP_UINT32("intc-positive", XlnxPMUIOIntc, cfg.positive, 0),
- DEFINE_PROP_END_OF_LIST(),
};
static void xlnx_pmu_io_intc_realize(DeviceState *dev, Error **errp)
@@ -532,11 +531,11 @@ static const VMStateDescription vmstate_xlnx_pmu_io_intc = {
}
};
-static void xlnx_pmu_io_intc_class_init(ObjectClass *klass, void *data)
+static void xlnx_pmu_io_intc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = xlnx_pmu_io_intc_reset;
+ device_class_set_legacy_reset(dc, xlnx_pmu_io_intc_reset);
dc->realize = xlnx_pmu_io_intc_realize;
dc->vmsd = &vmstate_xlnx_pmu_io_intc;
device_class_set_props(dc, xlnx_pmu_io_intc_properties);
diff --git a/hw/intc/xlnx-zynqmp-ipi.c b/hw/intc/xlnx-zynqmp-ipi.c
index 509ee79..610cd0e 100644
--- a/hw/intc/xlnx-zynqmp-ipi.c
+++ b/hw/intc/xlnx-zynqmp-ipi.c
@@ -355,11 +355,11 @@ static const VMStateDescription vmstate_zynqmp_pmu_ipi = {
}
};
-static void xlnx_zynqmp_ipi_class_init(ObjectClass *klass, void *data)
+static void xlnx_zynqmp_ipi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = xlnx_zynqmp_ipi_reset;
+ device_class_set_legacy_reset(dc, xlnx_zynqmp_ipi_reset);
dc->realize = xlnx_zynqmp_ipi_realize;
dc->vmsd = &vmstate_zynqmp_pmu_ipi;
}
diff --git a/hw/ipack/Kconfig b/hw/ipack/Kconfig
index f8da24a..28d6687 100644
--- a/hw/ipack/Kconfig
+++ b/hw/ipack/Kconfig
@@ -1,4 +1,8 @@
config IPACK
bool
+
+config TPCI200
+ bool
+ select IPACK
default y if PCI_DEVICES
depends on PCI
diff --git a/hw/ipack/ipack.c b/hw/ipack/ipack.c
index c39dbb4..ab602bf 100644
--- a/hw/ipack/ipack.c
+++ b/hw/ipack/ipack.c
@@ -55,30 +55,26 @@ static void ipack_device_realize(DeviceState *dev, Error **errp)
}
bus->free_slot = idev->slot + 1;
- idev->irq = qemu_allocate_irqs(bus->set_irq, idev, 2);
+ qemu_init_irqs(idev->irq, ARRAY_SIZE(idev->irq), bus->set_irq, idev);
k->realize(dev, errp);
}
static void ipack_device_unrealize(DeviceState *dev)
{
- IPackDevice *idev = IPACK_DEVICE(dev);
IPackDeviceClass *k = IPACK_DEVICE_GET_CLASS(dev);
if (k->unrealize) {
k->unrealize(dev);
return;
}
-
- qemu_free_irqs(idev->irq, 2);
}
-static Property ipack_device_props[] = {
+static const Property ipack_device_props[] = {
DEFINE_PROP_INT32("slot", IPackDevice, slot, -1),
- DEFINE_PROP_END_OF_LIST()
};
-static void ipack_device_class_init(ObjectClass *klass, void *data)
+static void ipack_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
diff --git a/hw/ipack/meson.build b/hw/ipack/meson.build
index 26567f1..e480522 100644
--- a/hw/ipack/meson.build
+++ b/hw/ipack/meson.build
@@ -1 +1,2 @@
-system_ss.add(when: 'CONFIG_IPACK', if_true: files('ipack.c', 'tpci200.c'))
+system_ss.add(when: 'CONFIG_IPACK', if_true: files('ipack.c'))
+system_ss.add(when: 'CONFIG_TPCI200', if_true: files('tpci200.c'))
diff --git a/hw/ipack/tpci200.c b/hw/ipack/tpci200.c
index 88eef4b..40b3051 100644
--- a/hw/ipack/tpci200.c
+++ b/hw/ipack/tpci200.c
@@ -275,11 +275,11 @@ static void tpci200_write_las0(void *opaque, hwaddr addr, uint64_t val,
if (ip != NULL) {
if (val & STATUS_INT(i, 0)) {
DPRINTF("Clear IP %c INT0# status\n", 'A' + i);
- qemu_irq_lower(ip->irq[0]);
+ qemu_irq_lower(&ip->irq[0]);
}
if (val & STATUS_INT(i, 1)) {
DPRINTF("Clear IP %c INT1# status\n", 'A' + i);
- qemu_irq_lower(ip->irq[1]);
+ qemu_irq_lower(&ip->irq[1]);
}
}
@@ -344,7 +344,7 @@ static uint64_t tpci200_read_las1(void *opaque, hwaddr addr, unsigned size)
bool int_set = s->status & STATUS_INT(ip_n, intno);
bool int_edge_sensitive = s->ctrl[ip_n] & CTRL_INT_EDGE(intno);
if (int_set && !int_edge_sensitive) {
- qemu_irq_lower(ip->irq[intno]);
+ qemu_irq_lower(&ip->irq[intno]);
}
}
@@ -629,7 +629,7 @@ static const VMStateDescription vmstate_tpci200 = {
}
};
-static void tpci200_class_init(ObjectClass *klass, void *data)
+static void tpci200_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -650,7 +650,7 @@ static const TypeInfo tpci200_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(TPCI200State),
.class_init = tpci200_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/ipmi/ipmi.c b/hw/ipmi/ipmi.c
index bbb07b1..b91e487 100644
--- a/hw/ipmi/ipmi.c
+++ b/hw/ipmi/ipmi.c
@@ -26,7 +26,7 @@
#include "hw/ipmi/ipmi.h"
#include "hw/qdev-properties.h"
#include "qom/object_interfaces.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "qapi/error.h"
#include "qemu/module.h"
#include "hw/nmi.h"
@@ -78,7 +78,7 @@ static int ipmi_do_hw_op(IPMIInterface *s, enum ipmi_op op, int checkonly)
}
}
-static void ipmi_interface_class_init(ObjectClass *class, void *data)
+static void ipmi_interface_class_init(ObjectClass *class, const void *data)
{
IPMIInterfaceClass *ik = IPMI_INTERFACE_CLASS(class);
@@ -108,12 +108,11 @@ void ipmi_bmc_find_and_link(Object *obj, Object **bmc)
OBJ_PROP_LINK_STRONG);
}
-static Property ipmi_bmc_properties[] = {
+static const Property ipmi_bmc_properties[] = {
DEFINE_PROP_UINT8("slave_addr", IPMIBmc, slave_addr, 0x20),
- DEFINE_PROP_END_OF_LIST(),
};
-static void bmc_class_init(ObjectClass *oc, void *data)
+static void bmc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/ipmi/ipmi_bmc_extern.c b/hw/ipmi/ipmi_bmc_extern.c
index 29c5af3..9f1ba7b 100644
--- a/hw/ipmi/ipmi_bmc_extern.c
+++ b/hw/ipmi/ipmi_bmc_extern.c
@@ -142,7 +142,6 @@ static void continue_send(IPMIBmcExtern *ibe)
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 4000000000ULL);
}
}
- return;
}
static void extern_timeout(void *opaque)
@@ -214,7 +213,7 @@ static void ipmi_bmc_extern_handle_command(IPMIBmc *b,
rsp[2] = err;
ibe->waiting_rsp = false;
k->handle_rsp(s, msg_id, rsp, 3);
- goto out;
+ return;
}
addchar(ibe, msg_id);
@@ -229,9 +228,6 @@ static void ipmi_bmc_extern_handle_command(IPMIBmc *b,
/* Start the transmit */
continue_send(ibe);
-
- out:
- return;
}
static void handle_hw_op(IPMIBmcExtern *ibe, unsigned char hw_op)
@@ -497,8 +493,6 @@ static void ipmi_bmc_extern_realize(DeviceState *dev, Error **errp)
qemu_chr_fe_set_handlers(&ibe->chr, can_receive, receive,
chr_event, NULL, ibe, NULL, true);
-
- vmstate_register(NULL, 0, &vmstate_ipmi_bmc_extern, ibe);
}
static void ipmi_bmc_extern_init(Object *obj)
@@ -515,12 +509,11 @@ static void ipmi_bmc_extern_finalize(Object *obj)
timer_free(ibe->extern_timer);
}
-static Property ipmi_bmc_extern_properties[] = {
+static const Property ipmi_bmc_extern_properties[] = {
DEFINE_PROP_CHR("chardev", IPMIBmcExtern, chr),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ipmi_bmc_extern_class_init(ObjectClass *oc, void *data)
+static void ipmi_bmc_extern_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
IPMIBmcClass *bk = IPMI_BMC_CLASS(oc);
@@ -529,6 +522,7 @@ static void ipmi_bmc_extern_class_init(ObjectClass *oc, void *data)
bk->handle_reset = ipmi_bmc_extern_handle_reset;
dc->hotpluggable = false;
dc->realize = ipmi_bmc_extern_realize;
+ dc->vmsd = &vmstate_ipmi_bmc_extern;
device_class_set_props(dc, ipmi_bmc_extern_properties);
}
diff --git a/hw/ipmi/ipmi_bmc_sim.c b/hw/ipmi/ipmi_bmc_sim.c
index 33c839c..04e1dcd 100644
--- a/hw/ipmi/ipmi_bmc_sim.c
+++ b/hw/ipmi/ipmi_bmc_sim.c
@@ -23,7 +23,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qemu/timer.h"
#include "hw/ipmi/ipmi.h"
#include "qemu/error-report.h"
@@ -70,6 +70,7 @@
#define IPMI_CMD_GET_MSG 0x33
#define IPMI_CMD_SEND_MSG 0x34
#define IPMI_CMD_READ_EVT_MSG_BUF 0x35
+#define IPMI_CMD_GET_CHANNEL_INFO 0x42
#define IPMI_NETFN_STORAGE 0x0a
@@ -234,6 +235,7 @@ struct IPMIBmcSim {
#define IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE_SET(s) \
(IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE & (s)->msg_flags)
+#define IPMI_BMC_GLOBAL_ENABLES_SUPPORTED 0x0f
#define IPMI_BMC_RCV_MSG_QUEUE_INT_BIT 0
#define IPMI_BMC_EVBUF_FULL_INT_BIT 1
#define IPMI_BMC_EVENT_MSG_BUF_BIT 2
@@ -463,14 +465,12 @@ void ipmi_bmc_gen_event(IPMIBmc *b, uint8_t *evt, bool log)
}
if (ibs->msg_flags & IPMI_BMC_MSG_FLAG_EVT_BUF_FULL) {
- goto out;
+ return;
}
memcpy(ibs->evtbuf, evt, 16);
ibs->msg_flags |= IPMI_BMC_MSG_FLAG_EVT_BUF_FULL;
k->set_atn(s, 1, attn_irq_enabled(ibs));
- out:
- return;
}
static void gen_event(IPMIBmcSim *ibs, unsigned int sens_num, uint8_t deassert,
uint8_t evd1, uint8_t evd2, uint8_t evd3)
@@ -513,7 +513,8 @@ static void gen_event(IPMIBmcSim *ibs, unsigned int sens_num, uint8_t deassert,
static void sensor_set_discrete_bit(IPMIBmcSim *ibs, unsigned int sensor,
unsigned int bit, unsigned int val,
- uint8_t evd1, uint8_t evd2, uint8_t evd3)
+ uint8_t evd1, uint8_t evd2, uint8_t evd3,
+ bool do_log)
{
IPMISensor *sens;
uint16_t mask;
@@ -533,7 +534,7 @@ static void sensor_set_discrete_bit(IPMIBmcSim *ibs, unsigned int sensor,
return; /* Already asserted */
}
sens->assert_states |= mask & sens->assert_suppt;
- if (sens->assert_enable & mask & sens->assert_states) {
+ if (do_log && (sens->assert_enable & mask & sens->assert_states)) {
/* Send an event on assert */
gen_event(ibs, sensor, 0, evd1, evd2, evd3);
}
@@ -543,7 +544,7 @@ static void sensor_set_discrete_bit(IPMIBmcSim *ibs, unsigned int sensor,
return; /* Already deasserted */
}
sens->deassert_states |= mask & sens->deassert_suppt;
- if (sens->deassert_enable & mask & sens->deassert_states) {
+ if (do_log && (sens->deassert_enable & mask & sens->deassert_states)) {
/* Send an event on deassert */
gen_event(ibs, sensor, 1, evd1, evd2, evd3);
}
@@ -699,6 +700,7 @@ static void ipmi_sim_handle_timeout(IPMIBmcSim *ibs)
{
IPMIInterface *s = ibs->parent.intf;
IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+ bool do_log = !IPMI_BMC_WATCHDOG_GET_DONT_LOG(ibs);
if (!ibs->watchdog_running) {
goto out;
@@ -710,14 +712,16 @@ static void ipmi_sim_handle_timeout(IPMIBmcSim *ibs)
ibs->msg_flags |= IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK;
k->do_hw_op(s, IPMI_SEND_NMI, 0);
sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 8, 1,
- 0xc8, (2 << 4) | 0xf, 0xff);
+ 0xc8, (2 << 4) | 0xf, 0xff,
+ do_log);
break;
case IPMI_BMC_WATCHDOG_PRE_MSG_INT:
ibs->msg_flags |= IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK;
k->set_atn(s, 1, attn_irq_enabled(ibs));
sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 8, 1,
- 0xc8, (3 << 4) | 0xf, 0xff);
+ 0xc8, (3 << 4) | 0xf, 0xff,
+ do_log);
break;
default:
@@ -737,24 +741,28 @@ static void ipmi_sim_handle_timeout(IPMIBmcSim *ibs)
switch (IPMI_BMC_WATCHDOG_GET_ACTION(ibs)) {
case IPMI_BMC_WATCHDOG_ACTION_NONE:
sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 0, 1,
- 0xc0, ibs->watchdog_use & 0xf, 0xff);
+ 0xc0, ibs->watchdog_use & 0xf, 0xff,
+ do_log);
break;
case IPMI_BMC_WATCHDOG_ACTION_RESET:
sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 1, 1,
- 0xc1, ibs->watchdog_use & 0xf, 0xff);
+ 0xc1, ibs->watchdog_use & 0xf, 0xff,
+ do_log);
k->do_hw_op(s, IPMI_RESET_CHASSIS, 0);
break;
case IPMI_BMC_WATCHDOG_ACTION_POWER_DOWN:
sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 2, 1,
- 0xc2, ibs->watchdog_use & 0xf, 0xff);
+ 0xc2, ibs->watchdog_use & 0xf, 0xff,
+ do_log);
k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 0);
break;
case IPMI_BMC_WATCHDOG_ACTION_POWER_CYCLE:
sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 2, 1,
- 0xc3, ibs->watchdog_use & 0xf, 0xff);
+ 0xc3, ibs->watchdog_use & 0xf, 0xff,
+ do_log);
k->do_hw_op(s, IPMI_POWERCYCLE_CHASSIS, 0);
break;
}
@@ -925,7 +933,14 @@ static void set_bmc_global_enables(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
RspBuffer *rsp)
{
- set_global_enables(ibs, cmd[2]);
+ uint8_t val = cmd[2];
+
+ if (val & ~IPMI_BMC_GLOBAL_ENABLES_SUPPORTED) {
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
+ return;
+ }
+
+ set_global_enables(ibs, val);
}
static void get_bmc_global_enables(IPMIBmcSim *ibs,
@@ -980,7 +995,7 @@ static void get_msg(IPMIBmcSim *ibs,
if (QTAILQ_EMPTY(&ibs->rcvbufs)) {
rsp_buffer_set_error(rsp, 0x80); /* Queue empty */
- goto out;
+ return;
}
rsp_buffer_push(rsp, 0); /* Channel 0 */
msg = QTAILQ_FIRST(&ibs->rcvbufs);
@@ -995,9 +1010,6 @@ static void get_msg(IPMIBmcSim *ibs,
ibs->msg_flags &= ~IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE;
k->set_atn(s, attn_set(ibs), attn_irq_enabled(ibs));
}
-
-out:
- return;
}
static unsigned char
@@ -1020,8 +1032,8 @@ static void send_msg(IPMIBmcSim *ibs,
uint8_t *buf;
uint8_t netfn, rqLun, rsLun, rqSeq;
- if (cmd[2] != 0) {
- /* We only handle channel 0 with no options */
+ if (cmd[2] != IPMI_CHANNEL_IPMB) {
+ /* We only handle channel 0h (IPMB) with no options */
rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
return;
}
@@ -1219,6 +1231,68 @@ static void get_watchdog_timer(IPMIBmcSim *ibs,
}
}
+static void get_channel_info(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ RspBuffer *rsp)
+{
+ IPMIInterface *s = ibs->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+ IPMIFwInfo info = {};
+ uint8_t ch = cmd[2] & 0x0f;
+
+ /* Only define channel 0h (IPMB) and Fh (system interface) */
+
+ if (ch == 0x0e) { /* "This channel" */
+ ch = IPMI_CHANNEL_SYSTEM;
+ }
+ rsp_buffer_push(rsp, ch);
+
+ if (ch != IPMI_CHANNEL_IPMB && ch != IPMI_CHANNEL_SYSTEM) {
+ /* Not a supported channel */
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
+ return;
+ }
+
+ if (k->get_fwinfo) {
+ k->get_fwinfo(s, &info);
+ }
+
+ if (ch == IPMI_CHANNEL_IPMB) {
+ rsp_buffer_push(rsp, IPMI_CHANNEL_MEDIUM_IPMB);
+ rsp_buffer_push(rsp, IPMI_CHANNEL_PROTOCOL_IPMB);
+ } else { /* IPMI_CHANNEL_SYSTEM */
+ rsp_buffer_push(rsp, IPMI_CHANNEL_MEDIUM_SYSTEM);
+ rsp_buffer_push(rsp, info.ipmi_channel_protocol);
+ }
+
+ rsp_buffer_push(rsp, 0x00); /* Session-less */
+
+ /* IPMI Enterprise Number for Vendor ID */
+ rsp_buffer_push(rsp, 0xf2);
+ rsp_buffer_push(rsp, 0x1b);
+ rsp_buffer_push(rsp, 0x00);
+
+ if (ch == IPMI_CHANNEL_SYSTEM) {
+ uint8_t irq;
+
+ if (info.irq_source == IPMI_ISA_IRQ) {
+ irq = info.interrupt_number;
+ } else if (info.irq_source == IPMI_PCI_IRQ) {
+ irq = 0x10 + info.interrupt_number;
+ } else {
+ irq = 0xff; /* no interrupt / unspecified */
+ }
+
+ /* Both interrupts use the same irq number */
+ rsp_buffer_push(rsp, irq);
+ rsp_buffer_push(rsp, irq);
+ } else {
+ /* Reserved */
+ rsp_buffer_push(rsp, 0x00);
+ rsp_buffer_push(rsp, 0x00);
+ }
+}
+
static void get_sdr_rep_info(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
RspBuffer *rsp)
@@ -2015,6 +2089,7 @@ static const IPMICmdHandler app_cmds[] = {
[IPMI_CMD_RESET_WATCHDOG_TIMER] = { reset_watchdog_timer },
[IPMI_CMD_SET_WATCHDOG_TIMER] = { set_watchdog_timer, 8 },
[IPMI_CMD_GET_WATCHDOG_TIMER] = { get_watchdog_timer },
+ [IPMI_CMD_GET_CHANNEL_INFO] = { get_channel_info, 3 },
};
static const IPMINetfn app_netfn = {
.cmd_nums = ARRAY_SIZE(app_cmds),
@@ -2187,11 +2262,9 @@ static void ipmi_sim_realize(DeviceState *dev, Error **errp)
register_cmds(ibs);
ibs->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ipmi_timeout, ibs);
-
- vmstate_register(NULL, 0, &vmstate_ipmi_sim, ibs);
}
-static Property ipmi_sim_properties[] = {
+static const Property ipmi_sim_properties[] = {
DEFINE_PROP_UINT16("fruareasize", IPMIBmcSim, fru.areasize, 1024),
DEFINE_PROP_STRING("frudatafile", IPMIBmcSim, fru.filename),
DEFINE_PROP_STRING("sdrfile", IPMIBmcSim, sdr_filename),
@@ -2203,16 +2276,16 @@ static Property ipmi_sim_properties[] = {
DEFINE_PROP_UINT32("mfg_id", IPMIBmcSim, mfg_id, 0),
DEFINE_PROP_UINT16("product_id", IPMIBmcSim, product_id, 0),
DEFINE_PROP_UUID_NODEFAULT("guid", IPMIBmcSim, uuid),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ipmi_sim_class_init(ObjectClass *oc, void *data)
+static void ipmi_sim_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
IPMIBmcClass *bk = IPMI_BMC_CLASS(oc);
dc->hotpluggable = false;
dc->realize = ipmi_sim_realize;
+ dc->vmsd = &vmstate_ipmi_sim;
device_class_set_props(dc, ipmi_sim_properties);
bk->handle_command = ipmi_sim_handle_command;
}
diff --git a/hw/ipmi/ipmi_bt.c b/hw/ipmi/ipmi_bt.c
index 583fc64..e01d02f 100644
--- a/hw/ipmi/ipmi_bt.c
+++ b/hw/ipmi/ipmi_bt.c
@@ -98,14 +98,14 @@ static void ipmi_bt_handle_event(IPMIInterface *ii)
IPMIBT *ib = iic->get_backend_data(ii);
if (ib->inlen < 4) {
- goto out;
+ return;
}
/* Note that overruns are handled by handle_command */
if (ib->inmsg[0] != (ib->inlen - 1)) {
/* Length mismatch, just ignore. */
IPMI_BT_SET_BBUSY(ib->control_reg, 1);
ib->inlen = 0;
- goto out;
+ return;
}
if ((ib->inmsg[1] == (IPMI_NETFN_APP << 2)) &&
(ib->inmsg[3] == IPMI_CMD_GET_BT_INTF_CAP)) {
@@ -136,7 +136,7 @@ static void ipmi_bt_handle_event(IPMIInterface *ii)
IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 1);
ipmi_bt_raise_irq(ib);
}
- goto out;
+ return;
}
ib->waiting_seq = ib->inmsg[2];
ib->inmsg[2] = ib->inmsg[1];
@@ -145,8 +145,6 @@ static void ipmi_bt_handle_event(IPMIInterface *ii)
bk->handle_command(ib->bmc, ib->inmsg + 2, ib->inlen - 2,
sizeof(ib->inmsg), ib->waiting_rsp);
}
- out:
- return;
}
static void ipmi_bt_handle_rsp(IPMIInterface *ii, uint8_t msg_id,
@@ -419,6 +417,8 @@ void ipmi_bt_get_fwinfo(struct IPMIBT *ib, IPMIFwInfo *info)
info->interface_type = IPMI_SMBIOS_BT;
info->ipmi_spec_major_revision = 2;
info->ipmi_spec_minor_revision = 0;
+ /* BT System Interface Format, IPMI v1.5 */
+ info->ipmi_channel_protocol = IPMI_CHANNEL_PROTOCOL_BT_15;
info->base_address = ib->io_base;
info->register_length = ib->io_length;
info->register_spacing = 1;
diff --git a/hw/ipmi/ipmi_kcs.c b/hw/ipmi/ipmi_kcs.c
index c15977c..d5cfe6c 100644
--- a/hw/ipmi/ipmi_kcs.c
+++ b/hw/ipmi/ipmi_kcs.c
@@ -168,7 +168,7 @@ static void ipmi_kcs_handle_event(IPMIInterface *ii)
ik->outpos = 0;
bk->handle_command(ik->bmc, ik->inmsg, ik->inlen, sizeof(ik->inmsg),
ik->waiting_rsp);
- goto out_noibf;
+ return;
} else if (ik->cmd_reg == IPMI_KCS_WRITE_END_CMD) {
ik->cmd_reg = -1;
ik->write_end = 1;
@@ -197,8 +197,6 @@ static void ipmi_kcs_handle_event(IPMIInterface *ii)
ik->cmd_reg = -1;
ik->data_in_reg = -1;
IPMI_KCS_SET_IBF(ik->status_reg, 0);
- out_noibf:
- return;
}
static void ipmi_kcs_handle_rsp(IPMIInterface *ii, uint8_t msg_id,
@@ -405,6 +403,7 @@ void ipmi_kcs_get_fwinfo(IPMIKCS *ik, IPMIFwInfo *info)
info->interface_type = IPMI_SMBIOS_KCS;
info->ipmi_spec_major_revision = 2;
info->ipmi_spec_minor_revision = 0;
+ info->ipmi_channel_protocol = IPMI_CHANNEL_PROTOCOL_KCS;
info->base_address = ik->io_base;
info->i2c_slave_address = ik->bmc->slave_addr;
info->register_length = ik->io_length;
diff --git a/hw/ipmi/isa_ipmi_bt.c b/hw/ipmi/isa_ipmi_bt.c
index 7b36d51..0ad91cc 100644
--- a/hw/ipmi/isa_ipmi_bt.c
+++ b/hw/ipmi/isa_ipmi_bt.c
@@ -49,6 +49,7 @@ static void isa_ipmi_bt_get_fwinfo(struct IPMIInterface *ii, IPMIFwInfo *info)
ISAIPMIBTDevice *iib = ISA_IPMI_BT(ii);
ipmi_bt_get_fwinfo(&iib->bt, info);
+ info->irq_source = IPMI_ISA_IRQ;
info->interrupt_number = iib->isairq;
info->i2c_slave_address = iib->bt.bmc->slave_addr;
info->uuid = iib->uuid;
@@ -117,8 +118,6 @@ static void isa_ipmi_bt_realize(DeviceState *dev, Error **errp)
qdev_set_legacy_instance_id(dev, iib->bt.io_base, iib->bt.io_length);
isa_register_ioport(isadev, &iib->bt.io, iib->bt.io_base);
-
- vmstate_register(NULL, 0, &vmstate_ISAIPMIBTDevice, dev);
}
static void isa_ipmi_bt_init(Object *obj)
@@ -135,19 +134,19 @@ static void *isa_ipmi_bt_get_backend_data(IPMIInterface *ii)
return &iib->bt;
}
-static Property ipmi_isa_properties[] = {
+static const Property ipmi_isa_properties[] = {
DEFINE_PROP_UINT32("ioport", ISAIPMIBTDevice, bt.io_base, 0xe4),
DEFINE_PROP_INT32("irq", ISAIPMIBTDevice, isairq, 5),
- DEFINE_PROP_END_OF_LIST(),
};
-static void isa_ipmi_bt_class_init(ObjectClass *oc, void *data)
+static void isa_ipmi_bt_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc);
AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(oc);
dc->realize = isa_ipmi_bt_realize;
+ dc->vmsd = &vmstate_ISAIPMIBTDevice;
device_class_set_props(dc, ipmi_isa_properties);
iic->get_backend_data = isa_ipmi_bt_get_backend_data;
@@ -162,7 +161,7 @@ static const TypeInfo isa_ipmi_bt_info = {
.instance_size = sizeof(ISAIPMIBTDevice),
.instance_init = isa_ipmi_bt_init,
.class_init = isa_ipmi_bt_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_IPMI_INTERFACE },
{ TYPE_ACPI_DEV_AML_IF },
{ }
diff --git a/hw/ipmi/isa_ipmi_kcs.c b/hw/ipmi/isa_ipmi_kcs.c
index f52b32e..418d234 100644
--- a/hw/ipmi/isa_ipmi_kcs.c
+++ b/hw/ipmi/isa_ipmi_kcs.c
@@ -49,6 +49,7 @@ static void isa_ipmi_kcs_get_fwinfo(IPMIInterface *ii, IPMIFwInfo *info)
ISAIPMIKCSDevice *iik = ISA_IPMI_KCS(ii);
ipmi_kcs_get_fwinfo(&iik->kcs, info);
+ info->irq_source = IPMI_ISA_IRQ;
info->interrupt_number = iik->isairq;
info->uuid = iik->uuid;
}
@@ -72,6 +73,10 @@ static bool vmstate_kcs_before_version2(void *opaque, int version)
return version <= 1;
}
+/*
+ * Version 1 had an incorrect name, it clashed with the BT IPMI
+ * device, so receive it, but transmit a different version.
+ */
static const VMStateDescription vmstate_ISAIPMIKCSDevice = {
.name = TYPE_IPMI_INTERFACE,
.version_id = 2,
@@ -119,13 +124,6 @@ static void ipmi_isa_realize(DeviceState *dev, Error **errp)
qdev_set_legacy_instance_id(dev, iik->kcs.io_base, iik->kcs.io_length);
isa_register_ioport(isadev, &iik->kcs.io, iik->kcs.io_base);
-
- /*
- * Version 1 had an incorrect name, it clashed with the BT
- * IPMI device, so receive it, but transmit a different
- * version.
- */
- vmstate_register(NULL, 0, &vmstate_ISAIPMIKCSDevice, iik);
}
static void isa_ipmi_kcs_init(Object *obj)
@@ -142,19 +140,19 @@ static void *isa_ipmi_kcs_get_backend_data(IPMIInterface *ii)
return &iik->kcs;
}
-static Property ipmi_isa_properties[] = {
+static const Property ipmi_isa_properties[] = {
DEFINE_PROP_UINT32("ioport", ISAIPMIKCSDevice, kcs.io_base, 0xca2),
DEFINE_PROP_INT32("irq", ISAIPMIKCSDevice, isairq, 5),
- DEFINE_PROP_END_OF_LIST(),
};
-static void isa_ipmi_kcs_class_init(ObjectClass *oc, void *data)
+static void isa_ipmi_kcs_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc);
AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(oc);
dc->realize = ipmi_isa_realize;
+ dc->vmsd = &vmstate_ISAIPMIKCSDevice;
device_class_set_props(dc, ipmi_isa_properties);
iic->get_backend_data = isa_ipmi_kcs_get_backend_data;
@@ -169,7 +167,7 @@ static const TypeInfo isa_ipmi_kcs_info = {
.instance_size = sizeof(ISAIPMIKCSDevice),
.instance_init = isa_ipmi_kcs_init,
.class_init = isa_ipmi_kcs_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_IPMI_INTERFACE },
{ TYPE_ACPI_DEV_AML_IF },
{ }
diff --git a/hw/ipmi/pci_ipmi_bt.c b/hw/ipmi/pci_ipmi_bt.c
index afeea6f..905101d 100644
--- a/hw/ipmi/pci_ipmi_bt.c
+++ b/hw/ipmi/pci_ipmi_bt.c
@@ -38,49 +38,60 @@ struct PCIIPMIBTDevice {
uint32_t uuid;
};
-static void pci_ipmi_raise_irq(IPMIBT *ik)
+static void pci_ipmi_bt_get_fwinfo(struct IPMIInterface *ii, IPMIFwInfo *info)
{
- PCIIPMIBTDevice *pik = ik->opaque;
+ PCIIPMIBTDevice *pib = PCI_IPMI_BT(ii);
- pci_set_irq(&pik->dev, true);
+ ipmi_bt_get_fwinfo(&pib->bt, info);
+ info->irq_source = IPMI_PCI_IRQ;
+ info->interrupt_number = pci_intx(&pib->dev);
+ info->i2c_slave_address = pib->bt.bmc->slave_addr;
+ info->uuid = pib->uuid;
}
-static void pci_ipmi_lower_irq(IPMIBT *ik)
+static void pci_ipmi_raise_irq(IPMIBT *ib)
{
- PCIIPMIBTDevice *pik = ik->opaque;
+ PCIIPMIBTDevice *pib = ib->opaque;
- pci_set_irq(&pik->dev, false);
+ pci_set_irq(&pib->dev, true);
+}
+
+static void pci_ipmi_lower_irq(IPMIBT *ib)
+{
+ PCIIPMIBTDevice *pib = ib->opaque;
+
+ pci_set_irq(&pib->dev, false);
}
static void pci_ipmi_bt_realize(PCIDevice *pd, Error **errp)
{
Error *err = NULL;
- PCIIPMIBTDevice *pik = PCI_IPMI_BT(pd);
+ PCIIPMIBTDevice *pib = PCI_IPMI_BT(pd);
IPMIInterface *ii = IPMI_INTERFACE(pd);
IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
- if (!pik->bt.bmc) {
+ if (!pib->bt.bmc) {
error_setg(errp, "IPMI device requires a bmc attribute to be set");
return;
}
- pik->uuid = ipmi_next_uuid();
+ pib->uuid = ipmi_next_uuid();
- pik->bt.bmc->intf = ii;
- pik->bt.opaque = pik;
+ pib->bt.bmc->intf = ii;
+ pib->bt.opaque = pib;
pci_config_set_prog_interface(pd->config, 0x02); /* BT */
pci_config_set_interrupt_pin(pd->config, 0x01);
- pik->bt.use_irq = 1;
- pik->bt.raise_irq = pci_ipmi_raise_irq;
- pik->bt.lower_irq = pci_ipmi_lower_irq;
+ pib->bt.use_irq = 1;
+ pib->bt.raise_irq = pci_ipmi_raise_irq;
+ pib->bt.lower_irq = pci_ipmi_lower_irq;
iic->init(ii, 8, &err);
if (err) {
error_propagate(errp, err);
return;
}
- pci_register_bar(pd, 0, PCI_BASE_ADDRESS_SPACE_IO, &pik->bt.io);
+ pci_register_bar(pd, 0, PCI_BASE_ADDRESS_SPACE_IO, &pib->bt.io);
}
const VMStateDescription vmstate_PCIIPMIBTDevice = {
@@ -96,19 +107,19 @@ const VMStateDescription vmstate_PCIIPMIBTDevice = {
static void pci_ipmi_bt_instance_init(Object *obj)
{
- PCIIPMIBTDevice *pik = PCI_IPMI_BT(obj);
+ PCIIPMIBTDevice *pib = PCI_IPMI_BT(obj);
- ipmi_bmc_find_and_link(obj, (Object **) &pik->bt.bmc);
+ ipmi_bmc_find_and_link(obj, (Object **) &pib->bt.bmc);
}
static void *pci_ipmi_bt_get_backend_data(IPMIInterface *ii)
{
- PCIIPMIBTDevice *pik = PCI_IPMI_BT(ii);
+ PCIIPMIBTDevice *pib = PCI_IPMI_BT(ii);
- return &pik->bt;
+ return &pib->bt;
}
-static void pci_ipmi_bt_class_init(ObjectClass *oc, void *data)
+static void pci_ipmi_bt_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc);
@@ -125,6 +136,7 @@ static void pci_ipmi_bt_class_init(ObjectClass *oc, void *data)
iic->get_backend_data = pci_ipmi_bt_get_backend_data;
ipmi_bt_class_init(iic);
+ iic->get_fwinfo = pci_ipmi_bt_get_fwinfo;
}
static const TypeInfo pci_ipmi_bt_info = {
@@ -133,7 +145,7 @@ static const TypeInfo pci_ipmi_bt_info = {
.instance_size = sizeof(PCIIPMIBTDevice),
.instance_init = pci_ipmi_bt_instance_init,
.class_init = pci_ipmi_bt_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_IPMI_INTERFACE },
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ }
diff --git a/hw/ipmi/pci_ipmi_kcs.c b/hw/ipmi/pci_ipmi_kcs.c
index 05ba97e..4d6cde8 100644
--- a/hw/ipmi/pci_ipmi_kcs.c
+++ b/hw/ipmi/pci_ipmi_kcs.c
@@ -38,6 +38,16 @@ struct PCIIPMIKCSDevice {
uint32_t uuid;
};
+static void pci_ipmi_kcs_get_fwinfo(struct IPMIInterface *ii, IPMIFwInfo *info)
+{
+ PCIIPMIKCSDevice *pik = PCI_IPMI_KCS(ii);
+
+ ipmi_kcs_get_fwinfo(&pik->kcs, info);
+ info->irq_source = IPMI_PCI_IRQ;
+ info->interrupt_number = pci_intx(&pik->dev);
+ info->uuid = pik->uuid;
+}
+
static void pci_ipmi_raise_irq(IPMIKCS *ik)
{
PCIIPMIKCSDevice *pik = ik->opaque;
@@ -108,7 +118,7 @@ static void *pci_ipmi_kcs_get_backend_data(IPMIInterface *ii)
return &pik->kcs;
}
-static void pci_ipmi_kcs_class_init(ObjectClass *oc, void *data)
+static void pci_ipmi_kcs_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc);
@@ -125,6 +135,7 @@ static void pci_ipmi_kcs_class_init(ObjectClass *oc, void *data)
iic->get_backend_data = pci_ipmi_kcs_get_backend_data;
ipmi_kcs_class_init(iic);
+ iic->get_fwinfo = pci_ipmi_kcs_get_fwinfo;
}
static const TypeInfo pci_ipmi_kcs_info = {
@@ -133,7 +144,7 @@ static const TypeInfo pci_ipmi_kcs_info = {
.instance_size = sizeof(PCIIPMIKCSDevice),
.instance_init = pci_ipmi_kcs_instance_init,
.class_init = pci_ipmi_kcs_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_IPMI_INTERFACE },
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ }
diff --git a/hw/ipmi/smbus_ipmi.c b/hw/ipmi/smbus_ipmi.c
index 56865df..78c332d 100644
--- a/hw/ipmi/smbus_ipmi.c
+++ b/hw/ipmi/smbus_ipmi.c
@@ -351,7 +351,7 @@ static void smbus_ipmi_get_fwinfo(struct IPMIInterface *ii, IPMIFwInfo *info)
info->uuid = sid->uuid;
}
-static void smbus_ipmi_class_init(ObjectClass *oc, void *data)
+static void smbus_ipmi_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc);
@@ -376,7 +376,7 @@ static const TypeInfo smbus_ipmi_info = {
.instance_size = sizeof(SMBusIPMIDevice),
.instance_init = smbus_ipmi_init,
.class_init = smbus_ipmi_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_IPMI_INTERFACE },
{ TYPE_ACPI_DEV_AML_IF },
{ }
diff --git a/hw/isa/fdc37m81x-superio.c b/hw/isa/fdc37m81x-superio.c
index 55e91fb..c2a38f0 100644
--- a/hw/isa/fdc37m81x-superio.c
+++ b/hw/isa/fdc37m81x-superio.c
@@ -11,7 +11,7 @@
#include "qemu/osdep.h"
#include "hw/isa/superio.h"
-static void fdc37m81x_class_init(ObjectClass *klass, void *data)
+static void fdc37m81x_class_init(ObjectClass *klass, const void *data)
{
ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass);
diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c
index cbaa152..06e8f0c 100644
--- a/hw/isa/i82378.c
+++ b/hw/isa/i82378.c
@@ -122,7 +122,7 @@ static void i82378_init(Object *obj)
qdev_init_gpio_in(dev, i82378_request_pic_irq, 16);
}
-static void i82378_class_init(ObjectClass *klass, void *data)
+static void i82378_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -142,7 +142,7 @@ static const TypeInfo i82378_type_info = {
.instance_size = sizeof(I82378State),
.instance_init = i82378_init,
.class_init = i82378_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c
index f1e0f14..6c9802e 100644
--- a/hw/isa/isa-bus.c
+++ b/hw/isa/isa-bus.c
@@ -22,14 +22,14 @@
#include "qemu/module.h"
#include "qapi/error.h"
#include "hw/sysbus.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/isa/isa.h"
static ISABus *isabus;
static char *isabus_get_fw_dev_path(DeviceState *dev);
-static void isa_bus_class_init(ObjectClass *klass, void *data)
+static void isa_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
@@ -205,7 +205,7 @@ ISADevice *isa_vga_init(ISABus *bus)
}
}
-static void isabus_bridge_class_init(ObjectClass *klass, void *data)
+static void isabus_bridge_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -220,7 +220,7 @@ static const TypeInfo isabus_bridge_info = {
.class_init = isabus_bridge_class_init,
};
-static void isa_device_class_init(ObjectClass *klass, void *data)
+static void isa_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
k->bus_type = TYPE_ISA_BUS;
diff --git a/hw/isa/isa-superio.c b/hw/isa/isa-superio.c
index a8c8c58..2853485 100644
--- a/hw/isa/isa-superio.c
+++ b/hw/isa/isa-superio.c
@@ -14,7 +14,7 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qapi/error.h"
-#include "sysemu/blockdev.h"
+#include "system/blockdev.h"
#include "chardev/char.h"
#include "hw/char/parallel.h"
#include "hw/block/fdc.h"
@@ -22,7 +22,7 @@
#include "hw/qdev-properties.h"
#include "hw/input/i8042.h"
#include "hw/char/parallel-isa.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-isa.h"
#include "trace.h"
static void isa_superio_realize(DeviceState *dev, Error **errp)
@@ -172,7 +172,7 @@ static void isa_superio_realize(DeviceState *dev, Error **errp)
}
}
-static void isa_superio_class_init(ObjectClass *oc, void *data)
+static void isa_superio_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c
index bd727b2..304dffa 100644
--- a/hw/isa/lpc_ich9.c
+++ b/hw/isa/lpc_ich9.c
@@ -43,10 +43,11 @@
#include "hw/southbridge/ich9.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/ich9.h"
+#include "hw/acpi/ich9_timer.h"
#include "hw/pci/pci_bus.h"
#include "hw/qdev-properties.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/runstate.h"
+#include "system/system.h"
#include "hw/core/cpu.h"
#include "hw/nvram/fw_cfg.h"
#include "qemu/cutils.h"
@@ -181,7 +182,6 @@ static uint64_t ich9_cc_read(void *opaque, hwaddr addr,
}
/* IRQ routing */
-/* */
static void ich9_lpc_rout(uint8_t pirq_rout, int *pic_irq, int *pic_dis)
{
*pic_irq = pirq_rout & ICH9_LPC_PIRQ_ROUT_MASK;
@@ -531,6 +531,15 @@ ich9_lpc_pmcon_update(ICH9LPCState *lpc)
uint16_t gen_pmcon_1 = pci_get_word(lpc->d.config + ICH9_LPC_GEN_PMCON_1);
uint16_t wmask;
+ if (lpc->pm.swsmi_timer_enabled) {
+ ich9_pm_update_swsmi_timer(
+ &lpc->pm, lpc->pm.smi_en & ICH9_PMIO_SMI_EN_SWSMI_EN);
+ }
+ if (lpc->pm.periodic_timer_enabled) {
+ ich9_pm_update_periodic_timer(
+ &lpc->pm, lpc->pm.smi_en & ICH9_PMIO_SMI_EN_PERIODIC_EN);
+ }
+
if (gen_pmcon_1 & ICH9_LPC_GEN_PMCON_1_SMI_LOCK) {
wmask = pci_get_word(lpc->d.wmask + ICH9_LPC_GEN_PMCON_1);
wmask &= ~ICH9_LPC_GEN_PMCON_1_SMI_LOCK;
@@ -816,7 +825,7 @@ static const VMStateDescription vmstate_ich9_lpc = {
}
};
-static Property ich9_lpc_properties[] = {
+static const Property ich9_lpc_properties[] = {
DEFINE_PROP_BOOL("noreboot", ICH9LPCState, pin_strap.spkr_hi, false),
DEFINE_PROP_BOOL("smm-compat", ICH9LPCState, pm.smm_compat, false),
DEFINE_PROP_BOOL("smm-enabled", ICH9LPCState, pm.smm_enabled, false),
@@ -826,7 +835,10 @@ static Property ich9_lpc_properties[] = {
ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT, true),
DEFINE_PROP_BIT64("x-smi-cpu-hotunplug", ICH9LPCState, smi_host_features,
ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT, true),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_BOOL("x-smi-swsmi-timer", ICH9LPCState,
+ pm.swsmi_timer_enabled, true),
+ DEFINE_PROP_BOOL("x-smi-periodic-timer", ICH9LPCState,
+ pm.periodic_timer_enabled, true),
};
static void ich9_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
@@ -862,7 +874,7 @@ static void build_ich9_isa_aml(AcpiDevAmlIf *adev, Aml *scope)
qbus_build_aml(bus, scope);
}
-static void ich9_lpc_class_init(ObjectClass *klass, void *data)
+static void ich9_lpc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -871,7 +883,7 @@ static void ich9_lpc_class_init(ObjectClass *klass, void *data)
AcpiDevAmlIfClass *amldevc = ACPI_DEV_AML_IF_CLASS(klass);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
- dc->reset = ich9_lpc_reset;
+ device_class_set_legacy_reset(dc, ich9_lpc_reset);
k->realize = ich9_lpc_realize;
dc->vmsd = &vmstate_ich9_lpc;
device_class_set_props(dc, ich9_lpc_properties);
@@ -902,7 +914,7 @@ static const TypeInfo ich9_lpc_info = {
.instance_size = sizeof(ICH9LPCState),
.instance_init = ich9_lpc_initfn,
.class_init = ich9_lpc_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ TYPE_ACPI_DEVICE_IF },
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
diff --git a/hw/isa/pc87312.c b/hw/isa/pc87312.c
index 64dd17b..388da8f 100644
--- a/hw/isa/pc87312.c
+++ b/hw/isa/pc87312.c
@@ -327,18 +327,17 @@ static const VMStateDescription vmstate_pc87312 = {
}
};
-static Property pc87312_properties[] = {
+static const Property pc87312_properties[] = {
DEFINE_PROP_UINT16("iobase", PC87312State, iobase, 0x398),
DEFINE_PROP_UINT8("config", PC87312State, config, 1),
- DEFINE_PROP_END_OF_LIST()
};
-static void pc87312_class_init(ObjectClass *klass, void *data)
+static void pc87312_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass);
- dc->reset = pc87312_reset;
+ device_class_set_legacy_reset(dc, pc87312_reset);
dc->vmsd = &vmstate_pc87312;
device_class_set_parent_realize(dc, pc87312_realize,
&sc->parent_realize);
diff --git a/hw/isa/piix.c b/hw/isa/piix.c
index 2d30711..52c14d3 100644
--- a/hw/isa/piix.c
+++ b/hw/isa/piix.c
@@ -34,7 +34,7 @@
#include "hw/ide/piix.h"
#include "hw/intc/i8259.h"
#include "hw/isa/isa.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "migration/vmstate.h"
#include "hw/acpi/acpi_aml_interface.h"
@@ -408,24 +408,23 @@ static void pci_piix_init(Object *obj)
object_initialize_child(obj, "rtc", &d->rtc, TYPE_MC146818_RTC);
}
-static Property pci_piix_props[] = {
+static const Property pci_piix_props[] = {
DEFINE_PROP_UINT32("smb_io_base", PIIXState, smb_io_base, 0),
DEFINE_PROP_BOOL("has-acpi", PIIXState, has_acpi, true),
DEFINE_PROP_BOOL("has-pic", PIIXState, has_pic, true),
DEFINE_PROP_BOOL("has-pit", PIIXState, has_pit, true),
DEFINE_PROP_BOOL("has-usb", PIIXState, has_usb, true),
DEFINE_PROP_BOOL("smm-enabled", PIIXState, smm_enabled, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pci_piix_class_init(ObjectClass *klass, void *data)
+static void pci_piix_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass);
k->config_write = piix_write_config;
- dc->reset = piix_reset;
+ device_class_set_legacy_reset(dc, piix_reset);
dc->desc = "ISA bridge";
dc->hotpluggable = false;
k->vendor_id = PCI_VENDOR_ID_INTEL;
@@ -446,7 +445,7 @@ static const TypeInfo piix_pci_type_info = {
.instance_init = pci_piix_init,
.abstract = true,
.class_init = pci_piix_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ TYPE_ACPI_DEV_AML_IF },
{ },
@@ -465,7 +464,7 @@ static void piix3_init(Object *obj)
object_initialize_child(obj, "ide", &d->ide, TYPE_PIIX3_IDE);
}
-static void piix3_class_init(ObjectClass *klass, void *data)
+static void piix3_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -495,7 +494,7 @@ static void piix4_init(Object *obj)
object_initialize_child(obj, "ide", &s->ide, TYPE_PIIX4_IDE);
}
-static void piix4_class_init(ObjectClass *klass, void *data)
+static void piix4_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
diff --git a/hw/isa/smc37c669-superio.c b/hw/isa/smc37c669-superio.c
index d2e58c9..0ec63f5 100644
--- a/hw/isa/smc37c669-superio.c
+++ b/hw/isa/smc37c669-superio.c
@@ -58,7 +58,7 @@ static unsigned int get_fdc_dma(ISASuperIODevice *sio, uint8_t index)
return 2;
}
-static void smc37c669_class_init(ObjectClass *klass, void *data)
+static void smc37c669_class_init(ObjectClass *klass, const void *data)
{
ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass);
diff --git a/hw/isa/vt82c686.c b/hw/isa/vt82c686.c
index 505b44c..3379586 100644
--- a/hw/isa/vt82c686.c
+++ b/hw/isa/vt82c686.c
@@ -17,7 +17,7 @@
#include "hw/isa/vt82c686.h"
#include "hw/block/fdc.h"
#include "hw/char/parallel-isa.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-isa.h"
#include "hw/pci/pci.h"
#include "hw/qdev-properties.h"
#include "hw/ide/pci.h"
@@ -220,11 +220,11 @@ typedef struct via_pm_init_info {
uint16_t device_id;
} ViaPMInitInfo;
-static void via_pm_class_init(ObjectClass *klass, void *data)
+static void via_pm_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
- ViaPMInitInfo *info = data;
+ const ViaPMInitInfo *info = data;
k->realize = via_pm_realize;
k->config_write = pm_write_config;
@@ -232,7 +232,7 @@ static void via_pm_class_init(ObjectClass *klass, void *data)
k->device_id = info->device_id;
k->class_id = PCI_CLASS_BRIDGE_OTHER;
k->revision = 0x40;
- dc->reset = via_pm_reset;
+ device_class_set_legacy_reset(dc, via_pm_reset);
/* Reason: part of VIA south bridge, does not exist stand alone */
dc->user_creatable = false;
dc->vmsd = &vmstate_acpi;
@@ -243,7 +243,7 @@ static const TypeInfo via_pm_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(ViaPMState),
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -259,7 +259,7 @@ static const TypeInfo vt82c686b_pm_info = {
.name = TYPE_VT82C686B_PM,
.parent = TYPE_VIA_PM,
.class_init = via_pm_class_init,
- .class_data = (void *)&vt82c686b_pm_init_info,
+ .class_data = &vt82c686b_pm_init_info,
};
static const ViaPMInitInfo vt8231_pm_init_info = {
@@ -272,7 +272,7 @@ static const TypeInfo vt8231_pm_info = {
.name = TYPE_VT8231_PM,
.parent = TYPE_VIA_PM,
.class_init = via_pm_class_init,
- .class_data = (void *)&vt8231_pm_init_info,
+ .class_data = &vt8231_pm_init_info,
};
@@ -337,7 +337,7 @@ static void via_superio_devices_enable(ViaSuperIOState *s, uint8_t data)
isa_fdc_set_enabled(s->superio.floppy, data & BIT(4));
}
-static void via_superio_class_init(ObjectClass *klass, void *data)
+static void via_superio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass);
@@ -456,12 +456,12 @@ static void vt82c686b_superio_init(Object *obj)
VIA_SUPERIO(obj)->io_ops = &vt82c686b_superio_cfg_ops;
}
-static void vt82c686b_superio_class_init(ObjectClass *klass, void *data)
+static void vt82c686b_superio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass);
- dc->reset = vt82c686b_superio_reset;
+ device_class_set_legacy_reset(dc, vt82c686b_superio_reset);
sc->serial.count = 2;
sc->parallel.count = 1;
sc->ide.count = 0; /* emulated by via-ide */
@@ -565,12 +565,12 @@ static void vt8231_superio_init(Object *obj)
VIA_SUPERIO(obj)->io_ops = &vt8231_superio_cfg_ops;
}
-static void vt8231_superio_class_init(ObjectClass *klass, void *data)
+static void vt8231_superio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass);
- dc->reset = vt8231_superio_reset;
+ device_class_set_legacy_reset(dc, vt8231_superio_reset);
sc->serial.count = 1;
sc->parallel.count = 1;
sc->ide.count = 0; /* emulated by via-ide */
@@ -592,6 +592,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(ViaISAState, VIA_ISA)
struct ViaISAState {
PCIDevice dev;
+
+ IRQState i8259_irq;
qemu_irq cpu_intr;
qemu_irq *isa_irqs_in;
uint16_t irq_state[ISA_NUM_IRQS];
@@ -632,7 +634,7 @@ static const TypeInfo via_isa_info = {
.instance_size = sizeof(ViaISAState),
.instance_init = via_isa_init,
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -715,13 +717,12 @@ static void via_isa_realize(PCIDevice *d, Error **errp)
ViaISAState *s = VIA_ISA(d);
DeviceState *dev = DEVICE(d);
PCIBus *pci_bus = pci_get_bus(d);
- qemu_irq *isa_irq;
ISABus *isa_bus;
int i;
qdev_init_gpio_out_named(dev, &s->cpu_intr, "intr", 1);
qdev_init_gpio_in_named(dev, via_isa_pirq, "pirq", PCI_NUM_PINS);
- isa_irq = qemu_allocate_irqs(via_isa_request_i8259_irq, s, 1);
+ qemu_init_irq(&s->i8259_irq, via_isa_request_i8259_irq, s, 0);
isa_bus = isa_bus_new(dev, pci_address_space(d), pci_address_space_io(d),
errp);
@@ -729,7 +730,7 @@ static void via_isa_realize(PCIDevice *d, Error **errp)
return;
}
- s->isa_irqs_in = i8259_init(isa_bus, *isa_irq);
+ s->isa_irqs_in = i8259_init(isa_bus, &s->i8259_irq);
isa_bus_register_input_irqs(isa_bus, s->isa_irqs_in);
i8254_pit_init(isa_bus, 0x40, 0, NULL);
i8257_dma_init(OBJECT(d), isa_bus, 0);
@@ -832,7 +833,7 @@ static void vt82c686b_init(Object *obj)
object_initialize_child(obj, "pm", &s->pm, TYPE_VT82C686B_PM);
}
-static void vt82c686b_class_init(ObjectClass *klass, void *data)
+static void vt82c686b_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -843,7 +844,7 @@ static void vt82c686b_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_VIA_82C686B_ISA;
k->class_id = PCI_CLASS_BRIDGE_ISA;
k->revision = 0x40;
- dc->reset = vt82c686b_isa_reset;
+ device_class_set_legacy_reset(dc, vt82c686b_isa_reset);
dc->desc = "ISA bridge";
dc->vmsd = &vmstate_via;
/* Reason: part of VIA VT82C686 southbridge, needs to be wired up */
@@ -897,7 +898,7 @@ static void vt8231_init(Object *obj)
object_initialize_child(obj, "pm", &s->pm, TYPE_VT8231_PM);
}
-static void vt8231_class_init(ObjectClass *klass, void *data)
+static void vt8231_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -908,7 +909,7 @@ static void vt8231_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_VIA_8231_ISA;
k->class_id = PCI_CLASS_BRIDGE_ISA;
k->revision = 0x10;
- dc->reset = vt8231_isa_reset;
+ device_class_set_legacy_reset(dc, vt8231_isa_reset);
dc->desc = "ISA bridge";
dc->vmsd = &vmstate_via;
/* Reason: part of VIA VT8231 southbridge, needs to be wired up */
diff --git a/hw/loongarch/Kconfig b/hw/loongarch/Kconfig
index 89be737..bb2838b 100644
--- a/hw/loongarch/Kconfig
+++ b/hw/loongarch/Kconfig
@@ -5,19 +5,19 @@ config LOONGARCH_VIRT
select DEVICE_TREE
select PCI
select PCI_EXPRESS_GENERIC_BRIDGE
- imply VIRTIO_VGA
imply PCI_DEVICES
imply NVDIMM
imply TPM_TIS_SYSBUS
- select SERIAL
+ select SERIAL_MM
select VIRTIO_PCI
select PLATFORM_BUS
- select LOONGSON_IPI
+ select LOONGARCH_IPI
select LOONGARCH_PCH_PIC
select LOONGARCH_PCH_MSI
select LOONGARCH_EXTIOI
select LS7A_RTC
select SMBIOS
+ select ACPI_CPU_HOTPLUG
select ACPI_PCI
select ACPI_HW_REDUCED
select FW_CFG_DMA
diff --git a/hw/loongarch/acpi-build.c b/hw/loongarch/acpi-build.c
deleted file mode 100644
index 72bfc35..0000000
--- a/hw/loongarch/acpi-build.c
+++ /dev/null
@@ -1,661 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Support for generating ACPI tables and passing them to Guests
- *
- * Copyright (C) 2021 Loongson Technology Corporation Limited
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "qemu/error-report.h"
-#include "qemu/bitmap.h"
-#include "hw/pci/pci.h"
-#include "hw/core/cpu.h"
-#include "target/loongarch/cpu.h"
-#include "hw/acpi/acpi-defs.h"
-#include "hw/acpi/acpi.h"
-#include "hw/nvram/fw_cfg.h"
-#include "hw/acpi/bios-linker-loader.h"
-#include "migration/vmstate.h"
-#include "hw/mem/memory-device.h"
-#include "sysemu/reset.h"
-
-/* Supported chipsets: */
-#include "hw/pci-host/ls7a.h"
-#include "hw/loongarch/virt.h"
-
-#include "hw/acpi/utils.h"
-#include "hw/acpi/pci.h"
-
-#include "qom/qom-qobject.h"
-
-#include "hw/acpi/generic_event_device.h"
-#include "hw/pci-host/gpex.h"
-#include "sysemu/tpm.h"
-#include "hw/platform-bus.h"
-#include "hw/acpi/aml-build.h"
-#include "hw/acpi/hmat.h"
-
-#define ACPI_BUILD_ALIGN_SIZE 0x1000
-#define ACPI_BUILD_TABLE_SIZE 0x20000
-
-#ifdef DEBUG_ACPI_BUILD
-#define ACPI_BUILD_DPRINTF(fmt, ...) \
- do {printf("ACPI_BUILD: " fmt, ## __VA_ARGS__); } while (0)
-#else
-#define ACPI_BUILD_DPRINTF(fmt, ...)
-#endif
-
-/* build FADT */
-static void init_common_fadt_data(AcpiFadtData *data)
-{
- AcpiFadtData fadt = {
- /* ACPI 5.0: 4.1 Hardware-Reduced ACPI */
- .rev = 5,
- .flags = ((1 << ACPI_FADT_F_HW_REDUCED_ACPI) |
- (1 << ACPI_FADT_F_RESET_REG_SUP)),
-
- /* ACPI 5.0: 4.8.3.7 Sleep Control and Status Registers */
- .sleep_ctl = {
- .space_id = AML_AS_SYSTEM_MEMORY,
- .bit_width = 8,
- .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_SLEEP_CTL,
- },
- .sleep_sts = {
- .space_id = AML_AS_SYSTEM_MEMORY,
- .bit_width = 8,
- .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_SLEEP_STS,
- },
-
- /* ACPI 5.0: 4.8.3.6 Reset Register */
- .reset_reg = {
- .space_id = AML_AS_SYSTEM_MEMORY,
- .bit_width = 8,
- .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_RESET,
- },
- .reset_val = ACPI_GED_RESET_VALUE,
- };
- *data = fadt;
-}
-
-static void acpi_align_size(GArray *blob, unsigned align)
-{
- /*
- * Align size to multiple of given size. This reduces the chance
- * we need to change size in the future (breaking cross version migration).
- */
- g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align));
-}
-
-/* build FACS */
-static void
-build_facs(GArray *table_data)
-{
- const char *sig = "FACS";
- const uint8_t reserved[40] = {};
-
- g_array_append_vals(table_data, sig, 4); /* Signature */
- build_append_int_noprefix(table_data, 64, 4); /* Length */
- build_append_int_noprefix(table_data, 0, 4); /* Hardware Signature */
- build_append_int_noprefix(table_data, 0, 4); /* Firmware Waking Vector */
- build_append_int_noprefix(table_data, 0, 4); /* Global Lock */
- build_append_int_noprefix(table_data, 0, 4); /* Flags */
- g_array_append_vals(table_data, reserved, 40); /* Reserved */
-}
-
-/* build MADT */
-static void
-build_madt(GArray *table_data, BIOSLinker *linker,
- LoongArchVirtMachineState *lvms)
-{
- MachineState *ms = MACHINE(lvms);
- MachineClass *mc = MACHINE_GET_CLASS(ms);
- const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms);
- int i, arch_id;
- AcpiTable table = { .sig = "APIC", .rev = 1, .oem_id = lvms->oem_id,
- .oem_table_id = lvms->oem_table_id };
-
- acpi_table_begin(&table, table_data);
-
- /* Local APIC Address */
- build_append_int_noprefix(table_data, 0, 4);
- build_append_int_noprefix(table_data, 1 /* PCAT_COMPAT */, 4); /* Flags */
-
- for (i = 0; i < arch_ids->len; i++) {
- /* Processor Core Interrupt Controller Structure */
- arch_id = arch_ids->cpus[i].arch_id;
-
- build_append_int_noprefix(table_data, 17, 1); /* Type */
- build_append_int_noprefix(table_data, 15, 1); /* Length */
- build_append_int_noprefix(table_data, 1, 1); /* Version */
- build_append_int_noprefix(table_data, i, 4); /* ACPI Processor ID */
- build_append_int_noprefix(table_data, arch_id, 4); /* Core ID */
- build_append_int_noprefix(table_data, 1, 4); /* Flags */
- }
-
- /* Extend I/O Interrupt Controller Structure */
- build_append_int_noprefix(table_data, 20, 1); /* Type */
- build_append_int_noprefix(table_data, 13, 1); /* Length */
- build_append_int_noprefix(table_data, 1, 1); /* Version */
- build_append_int_noprefix(table_data, 3, 1); /* Cascade */
- build_append_int_noprefix(table_data, 0, 1); /* Node */
- build_append_int_noprefix(table_data, 0xffff, 8); /* Node map */
-
- /* MSI Interrupt Controller Structure */
- build_append_int_noprefix(table_data, 21, 1); /* Type */
- build_append_int_noprefix(table_data, 19, 1); /* Length */
- build_append_int_noprefix(table_data, 1, 1); /* Version */
- build_append_int_noprefix(table_data, VIRT_PCH_MSI_ADDR_LOW, 8);/* Address */
- build_append_int_noprefix(table_data, 0x40, 4); /* Start */
- build_append_int_noprefix(table_data, 0xc0, 4); /* Count */
-
- /* Bridge I/O Interrupt Controller Structure */
- build_append_int_noprefix(table_data, 22, 1); /* Type */
- build_append_int_noprefix(table_data, 17, 1); /* Length */
- build_append_int_noprefix(table_data, 1, 1); /* Version */
- build_append_int_noprefix(table_data, VIRT_PCH_REG_BASE, 8);/* Address */
- build_append_int_noprefix(table_data, 0x1000, 2); /* Size */
- build_append_int_noprefix(table_data, 0, 2); /* Id */
- build_append_int_noprefix(table_data, 0x40, 2); /* Base */
-
- acpi_table_end(linker, &table);
-}
-
-/* build SRAT */
-static void
-build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
-{
- int i, arch_id, node_id;
- hwaddr len, base, gap;
- NodeInfo *numa_info;
- int nodes, nb_numa_nodes = machine->numa_state->num_nodes;
- LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine);
- MachineClass *mc = MACHINE_GET_CLASS(lvms);
- const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(machine);
- AcpiTable table = { .sig = "SRAT", .rev = 1, .oem_id = lvms->oem_id,
- .oem_table_id = lvms->oem_table_id };
-
- acpi_table_begin(&table, table_data);
- build_append_int_noprefix(table_data, 1, 4); /* Reserved */
- build_append_int_noprefix(table_data, 0, 8); /* Reserved */
-
- for (i = 0; i < arch_ids->len; ++i) {
- arch_id = arch_ids->cpus[i].arch_id;
- node_id = arch_ids->cpus[i].props.node_id;
-
- /* Processor Local APIC/SAPIC Affinity Structure */
- build_append_int_noprefix(table_data, 0, 1); /* Type */
- build_append_int_noprefix(table_data, 16, 1); /* Length */
- /* Proximity Domain [7:0] */
- build_append_int_noprefix(table_data, node_id, 1);
- build_append_int_noprefix(table_data, arch_id, 1); /* APIC ID */
- /* Flags, Table 5-36 */
- build_append_int_noprefix(table_data, 1, 4);
- build_append_int_noprefix(table_data, 0, 1); /* Local SAPIC EID */
- /* Proximity Domain [31:8] */
- build_append_int_noprefix(table_data, 0, 3);
- build_append_int_noprefix(table_data, 0, 4); /* Reserved */
- }
-
- base = VIRT_LOWMEM_BASE;
- gap = VIRT_LOWMEM_SIZE;
- numa_info = machine->numa_state->nodes;
- nodes = nb_numa_nodes;
- if (!nodes) {
- nodes = 1;
- }
-
- for (i = 0; i < nodes; i++) {
- if (nb_numa_nodes) {
- len = numa_info[i].node_mem;
- } else {
- len = machine->ram_size;
- }
-
- /*
- * memory for the node splited into two part
- * lowram: [base, +gap)
- * highram: [VIRT_HIGHMEM_BASE, +(len - gap))
- */
- if (len >= gap) {
- build_srat_memory(table_data, base, len, i, MEM_AFFINITY_ENABLED);
- len -= gap;
- base = VIRT_HIGHMEM_BASE;
- gap = machine->ram_size - VIRT_LOWMEM_SIZE;
- }
-
- if (len) {
- build_srat_memory(table_data, base, len, i, MEM_AFFINITY_ENABLED);
- base += len;
- gap -= len;
- }
- }
-
- if (machine->device_memory) {
- build_srat_memory(table_data, machine->device_memory->base,
- memory_region_size(&machine->device_memory->mr),
- nodes - 1,
- MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
- }
-
- acpi_table_end(linker, &table);
-}
-
-typedef
-struct AcpiBuildState {
- /* Copy of table in RAM (for patching). */
- MemoryRegion *table_mr;
- /* Is table patched? */
- uint8_t patched;
- void *rsdp;
- MemoryRegion *rsdp_mr;
- MemoryRegion *linker_mr;
-} AcpiBuildState;
-
-static void build_uart_device_aml(Aml *table)
-{
- Aml *dev;
- Aml *crs;
- Aml *pkg0, *pkg1, *pkg2;
- uint32_t uart_irq = VIRT_UART_IRQ;
-
- Aml *scope = aml_scope("_SB");
- dev = aml_device("COMA");
- aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501")));
- aml_append(dev, aml_name_decl("_UID", aml_int(0)));
- aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
- crs = aml_resource_template();
- aml_append(crs,
- aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
- AML_NON_CACHEABLE, AML_READ_WRITE,
- 0, VIRT_UART_BASE, VIRT_UART_BASE + VIRT_UART_SIZE - 1,
- 0, VIRT_UART_SIZE));
- aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
- AML_SHARED, &uart_irq, 1));
- aml_append(dev, aml_name_decl("_CRS", crs));
- pkg0 = aml_package(0x2);
- aml_append(pkg0, aml_int(0x05F5E100));
- aml_append(pkg0, aml_string("clock-frenquency"));
- pkg1 = aml_package(0x1);
- aml_append(pkg1, pkg0);
- pkg2 = aml_package(0x2);
- aml_append(pkg2, aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301"));
- aml_append(pkg2, pkg1);
- aml_append(dev, aml_name_decl("_DSD", pkg2));
- aml_append(scope, dev);
- aml_append(table, scope);
-}
-
-static void
-build_la_ged_aml(Aml *dsdt, MachineState *machine)
-{
- uint32_t event;
- LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine);
-
- build_ged_aml(dsdt, "\\_SB."GED_DEVICE,
- HOTPLUG_HANDLER(lvms->acpi_ged),
- VIRT_SCI_IRQ, AML_SYSTEM_MEMORY,
- VIRT_GED_EVT_ADDR);
- event = object_property_get_uint(OBJECT(lvms->acpi_ged),
- "ged-event", &error_abort);
- if (event & ACPI_GED_MEM_HOTPLUG_EVT) {
- build_memory_hotplug_aml(dsdt, machine->ram_slots, "\\_SB", NULL,
- AML_SYSTEM_MEMORY,
- VIRT_GED_MEM_ADDR);
- }
- acpi_dsdt_add_power_button(dsdt);
-}
-
-static void build_pci_device_aml(Aml *scope, LoongArchVirtMachineState *lvms)
-{
- struct GPEXConfig cfg = {
- .mmio64.base = VIRT_PCI_MEM_BASE,
- .mmio64.size = VIRT_PCI_MEM_SIZE,
- .pio.base = VIRT_PCI_IO_BASE,
- .pio.size = VIRT_PCI_IO_SIZE,
- .ecam.base = VIRT_PCI_CFG_BASE,
- .ecam.size = VIRT_PCI_CFG_SIZE,
- .irq = VIRT_GSI_BASE + VIRT_DEVICE_IRQS,
- .bus = lvms->pci_bus,
- };
-
- acpi_dsdt_add_gpex(scope, &cfg);
-}
-
-static void build_flash_aml(Aml *scope, LoongArchVirtMachineState *lvms)
-{
- Aml *dev, *crs;
- MemoryRegion *flash_mem;
-
- hwaddr flash0_base;
- hwaddr flash0_size;
-
- hwaddr flash1_base;
- hwaddr flash1_size;
-
- flash_mem = pflash_cfi01_get_memory(lvms->flash[0]);
- flash0_base = flash_mem->addr;
- flash0_size = memory_region_size(flash_mem);
-
- flash_mem = pflash_cfi01_get_memory(lvms->flash[1]);
- flash1_base = flash_mem->addr;
- flash1_size = memory_region_size(flash_mem);
-
- dev = aml_device("FLS0");
- aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
- aml_append(dev, aml_name_decl("_UID", aml_int(0)));
-
- crs = aml_resource_template();
- aml_append(crs, aml_memory32_fixed(flash0_base, flash0_size,
- AML_READ_WRITE));
- aml_append(dev, aml_name_decl("_CRS", crs));
- aml_append(scope, dev);
-
- dev = aml_device("FLS1");
- aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
- aml_append(dev, aml_name_decl("_UID", aml_int(1)));
-
- crs = aml_resource_template();
- aml_append(crs, aml_memory32_fixed(flash1_base, flash1_size,
- AML_READ_WRITE));
- aml_append(dev, aml_name_decl("_CRS", crs));
- aml_append(scope, dev);
-}
-
-#ifdef CONFIG_TPM
-static void acpi_dsdt_add_tpm(Aml *scope, LoongArchVirtMachineState *vms)
-{
- PlatformBusDevice *pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev);
- hwaddr pbus_base = VIRT_PLATFORM_BUS_BASEADDRESS;
- SysBusDevice *sbdev = SYS_BUS_DEVICE(tpm_find());
- MemoryRegion *sbdev_mr;
- hwaddr tpm_base;
-
- if (!sbdev) {
- return;
- }
-
- tpm_base = platform_bus_get_mmio_addr(pbus, sbdev, 0);
- assert(tpm_base != -1);
-
- tpm_base += pbus_base;
-
- sbdev_mr = sysbus_mmio_get_region(sbdev, 0);
-
- Aml *dev = aml_device("TPM0");
- aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101")));
- aml_append(dev, aml_name_decl("_STR", aml_string("TPM 2.0 Device")));
- aml_append(dev, aml_name_decl("_UID", aml_int(0)));
-
- Aml *crs = aml_resource_template();
- aml_append(crs,
- aml_memory32_fixed(tpm_base,
- (uint32_t)memory_region_size(sbdev_mr),
- AML_READ_WRITE));
- aml_append(dev, aml_name_decl("_CRS", crs));
- aml_append(scope, dev);
-}
-#endif
-
-/* build DSDT */
-static void
-build_dsdt(GArray *table_data, BIOSLinker *linker, MachineState *machine)
-{
- Aml *dsdt, *scope, *pkg;
- LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine);
- AcpiTable table = { .sig = "DSDT", .rev = 1, .oem_id = lvms->oem_id,
- .oem_table_id = lvms->oem_table_id };
-
- acpi_table_begin(&table, table_data);
- dsdt = init_aml_allocator();
- build_uart_device_aml(dsdt);
- build_pci_device_aml(dsdt, lvms);
- build_la_ged_aml(dsdt, machine);
- build_flash_aml(dsdt, lvms);
-#ifdef CONFIG_TPM
- acpi_dsdt_add_tpm(dsdt, lvms);
-#endif
- /* System State Package */
- scope = aml_scope("\\");
- pkg = aml_package(4);
- aml_append(pkg, aml_int(ACPI_GED_SLP_TYP_S5));
- aml_append(pkg, aml_int(0)); /* ignored */
- aml_append(pkg, aml_int(0)); /* reserved */
- aml_append(pkg, aml_int(0)); /* reserved */
- aml_append(scope, aml_name_decl("_S5", pkg));
- aml_append(dsdt, scope);
- /* Copy AML table into ACPI tables blob and patch header there */
- g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
- acpi_table_end(linker, &table);
- free_aml_allocator();
-}
-
-static void acpi_build(AcpiBuildTables *tables, MachineState *machine)
-{
- LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine);
- GArray *table_offsets;
- AcpiFadtData fadt_data;
- unsigned facs, rsdt, dsdt;
- uint8_t *u;
- GArray *tables_blob = tables->table_data;
-
- init_common_fadt_data(&fadt_data);
-
- table_offsets = g_array_new(false, true, sizeof(uint32_t));
- ACPI_BUILD_DPRINTF("init ACPI tables\n");
-
- bios_linker_loader_alloc(tables->linker,
- ACPI_BUILD_TABLE_FILE, tables_blob,
- 64, false);
-
- /*
- * FACS is pointed to by FADT.
- * We place it first since it's the only table that has alignment
- * requirements.
- */
- facs = tables_blob->len;
- build_facs(tables_blob);
-
- /* DSDT is pointed to by FADT */
- dsdt = tables_blob->len;
- build_dsdt(tables_blob, tables->linker, machine);
-
- /* ACPI tables pointed to by RSDT */
- acpi_add_table(table_offsets, tables_blob);
- fadt_data.facs_tbl_offset = &facs;
- fadt_data.dsdt_tbl_offset = &dsdt;
- fadt_data.xdsdt_tbl_offset = &dsdt;
- build_fadt(tables_blob, tables->linker, &fadt_data,
- lvms->oem_id, lvms->oem_table_id);
-
- acpi_add_table(table_offsets, tables_blob);
- build_madt(tables_blob, tables->linker, lvms);
-
- acpi_add_table(table_offsets, tables_blob);
- build_pptt(tables_blob, tables->linker, machine,
- lvms->oem_id, lvms->oem_table_id);
-
- acpi_add_table(table_offsets, tables_blob);
- build_srat(tables_blob, tables->linker, machine);
-
- if (machine->numa_state->num_nodes) {
- if (machine->numa_state->have_numa_distance) {
- acpi_add_table(table_offsets, tables_blob);
- build_slit(tables_blob, tables->linker, machine, lvms->oem_id,
- lvms->oem_table_id);
- }
- if (machine->numa_state->hmat_enabled) {
- acpi_add_table(table_offsets, tables_blob);
- build_hmat(tables_blob, tables->linker, machine->numa_state,
- lvms->oem_id, lvms->oem_table_id);
- }
- }
-
- acpi_add_table(table_offsets, tables_blob);
- {
- AcpiMcfgInfo mcfg = {
- .base = cpu_to_le64(VIRT_PCI_CFG_BASE),
- .size = cpu_to_le64(VIRT_PCI_CFG_SIZE),
- };
- build_mcfg(tables_blob, tables->linker, &mcfg, lvms->oem_id,
- lvms->oem_table_id);
- }
-
-#ifdef CONFIG_TPM
- /* TPM info */
- if (tpm_get_version(tpm_find()) == TPM_VERSION_2_0) {
- acpi_add_table(table_offsets, tables_blob);
- build_tpm2(tables_blob, tables->linker,
- tables->tcpalog, lvms->oem_id,
- lvms->oem_table_id);
- }
-#endif
- /* Add tables supplied by user (if any) */
- for (u = acpi_table_first(); u; u = acpi_table_next(u)) {
- unsigned len = acpi_table_len(u);
-
- acpi_add_table(table_offsets, tables_blob);
- g_array_append_vals(tables_blob, u, len);
- }
-
- /* RSDT is pointed to by RSDP */
- rsdt = tables_blob->len;
- build_rsdt(tables_blob, tables->linker, table_offsets,
- lvms->oem_id, lvms->oem_table_id);
-
- /* RSDP is in FSEG memory, so allocate it separately */
- {
- AcpiRsdpData rsdp_data = {
- .revision = 0,
- .oem_id = lvms->oem_id,
- .xsdt_tbl_offset = NULL,
- .rsdt_tbl_offset = &rsdt,
- };
- build_rsdp(tables->rsdp, tables->linker, &rsdp_data);
- }
-
- /*
- * The align size is 128, warn if 64k is not enough therefore
- * the align size could be resized.
- */
- if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) {
- warn_report("ACPI table size %u exceeds %d bytes,"
- " migration may not work",
- tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2);
- error_printf("Try removing CPUs, NUMA nodes, memory slots"
- " or PCI bridges.\n");
- }
-
- acpi_align_size(tables->linker->cmd_blob, ACPI_BUILD_ALIGN_SIZE);
-
- /* Cleanup memory that's no longer used. */
- g_array_free(table_offsets, true);
-}
-
-static void acpi_ram_update(MemoryRegion *mr, GArray *data)
-{
- uint32_t size = acpi_data_len(data);
-
- /*
- * Make sure RAM size is correct - in case it got changed
- * e.g. by migration
- */
- memory_region_ram_resize(mr, size, &error_abort);
-
- memcpy(memory_region_get_ram_ptr(mr), data->data, size);
- memory_region_set_dirty(mr, 0, size);
-}
-
-static void acpi_build_update(void *build_opaque)
-{
- AcpiBuildState *build_state = build_opaque;
- AcpiBuildTables tables;
-
- /* No state to update or already patched? Nothing to do. */
- if (!build_state || build_state->patched) {
- return;
- }
- build_state->patched = 1;
-
- acpi_build_tables_init(&tables);
-
- acpi_build(&tables, MACHINE(qdev_get_machine()));
-
- acpi_ram_update(build_state->table_mr, tables.table_data);
- acpi_ram_update(build_state->rsdp_mr, tables.rsdp);
- acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob);
-
- acpi_build_tables_cleanup(&tables, true);
-}
-
-static void acpi_build_reset(void *build_opaque)
-{
- AcpiBuildState *build_state = build_opaque;
- build_state->patched = 0;
-}
-
-static const VMStateDescription vmstate_acpi_build = {
- .name = "acpi_build",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT8(patched, AcpiBuildState),
- VMSTATE_END_OF_LIST()
- },
-};
-
-static bool loongarch_is_acpi_enabled(LoongArchVirtMachineState *lvms)
-{
- if (lvms->acpi == ON_OFF_AUTO_OFF) {
- return false;
- }
- return true;
-}
-
-void loongarch_acpi_setup(LoongArchVirtMachineState *lvms)
-{
- AcpiBuildTables tables;
- AcpiBuildState *build_state;
-
- if (!lvms->fw_cfg) {
- ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n");
- return;
- }
-
- if (!loongarch_is_acpi_enabled(lvms)) {
- ACPI_BUILD_DPRINTF("ACPI disabled. Bailing out.\n");
- return;
- }
-
- build_state = g_malloc0(sizeof *build_state);
-
- acpi_build_tables_init(&tables);
- acpi_build(&tables, MACHINE(lvms));
-
- /* Now expose it all to Guest */
- build_state->table_mr = acpi_add_rom_blob(acpi_build_update,
- build_state, tables.table_data,
- ACPI_BUILD_TABLE_FILE);
- assert(build_state->table_mr != NULL);
-
- build_state->linker_mr =
- acpi_add_rom_blob(acpi_build_update, build_state,
- tables.linker->cmd_blob, ACPI_BUILD_LOADER_FILE);
-
- build_state->rsdp_mr = acpi_add_rom_blob(acpi_build_update,
- build_state, tables.rsdp,
- ACPI_BUILD_RSDP_FILE);
-
- fw_cfg_add_file(lvms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data,
- acpi_data_len(tables.tcpalog));
-
- qemu_register_reset(acpi_build_reset, build_state);
- acpi_build_reset(build_state);
- vmstate_register(NULL, 0, &vmstate_acpi_build, build_state);
-
- /*
- * Cleanup tables but don't free the memory: we track it
- * in build_state.
- */
- acpi_build_tables_cleanup(&tables, false);
-}
diff --git a/hw/loongarch/boot.c b/hw/loongarch/boot.c
index cb66870..14d6c52 100644
--- a/hw/loongarch/boot.c
+++ b/hw/loongarch/boot.c
@@ -12,14 +12,28 @@
#include "hw/loader.h"
#include "elf.h"
#include "qemu/error-report.h"
-#include "sysemu/reset.h"
-#include "sysemu/qtest.h"
+#include "system/reset.h"
+#include "system/qtest.h"
-struct memmap_entry *memmap_table;
-unsigned memmap_entries;
-
-ram_addr_t initrd_offset;
-uint64_t initrd_size;
+/*
+ * Linux Image Format
+ * https://docs.kernel.org/arch/loongarch/booting.html
+ */
+#define LINUX_PE_MAGIC 0x818223cd
+#define MZ_MAGIC 0x5a4d /* "MZ" */
+
+struct loongarch_linux_hdr {
+ uint32_t mz_magic;
+ uint32_t res0;
+ uint64_t kernel_entry;
+ uint64_t kernel_size;
+ uint64_t load_offset;
+ uint64_t res1;
+ uint64_t res2;
+ uint64_t res3;
+ uint32_t linux_pe_magic;
+ uint32_t pe_header_offset;
+} QEMU_PACKED;
static const unsigned int slave_boot_code[] = {
/* Configure reset ebase. */
@@ -74,12 +88,16 @@ static inline void *guidcpy(void *dst, const void *src)
return memcpy(dst, src, sizeof(efi_guid_t));
}
-static void init_efi_boot_memmap(struct efi_system_table *systab,
+static void init_efi_boot_memmap(MachineState *ms,
+ struct efi_system_table *systab,
void *p, void *start)
{
unsigned i;
struct efi_boot_memmap *boot_memmap = p;
efi_guid_t tbl_guid = LINUX_EFI_BOOT_MEMMAP_GUID;
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(ms);
+ struct memmap_entry *memmap_table;
+ unsigned int memmap_entries;
/* efi_configuration_table 1 */
guidcpy(&systab->tables[0].guid, &tbl_guid);
@@ -91,6 +109,8 @@ static void init_efi_boot_memmap(struct efi_system_table *systab,
boot_memmap->map_size = 0;
efi_memory_desc_t *map = p + sizeof(struct efi_boot_memmap);
+ memmap_table = lvms->memmap_table;
+ memmap_entries = lvms->memmap_entries;
for (i = 0; i < memmap_entries; i++) {
map = (void *)boot_memmap + sizeof(*map);
map[i].type = memmap_table[i].type;
@@ -101,7 +121,8 @@ static void init_efi_boot_memmap(struct efi_system_table *systab,
}
}
-static void init_efi_initrd_table(struct efi_system_table *systab,
+static void init_efi_initrd_table(struct loongarch_boot_info *info,
+ struct efi_system_table *systab,
void *p, void *start)
{
efi_guid_t tbl_guid = LINUX_EFI_INITRD_MEDIA_GUID;
@@ -112,8 +133,8 @@ static void init_efi_initrd_table(struct efi_system_table *systab,
systab->tables[1].table = (struct efi_configuration_table *)(p - start);
systab->nr_tables = 2;
- initrd_table->base = initrd_offset;
- initrd_table->size = initrd_size;
+ initrd_table->base = info->initrd_addr;
+ initrd_table->size = info->initrd_size;
}
static void init_efi_fdt_table(struct efi_system_table *systab)
@@ -126,10 +147,12 @@ static void init_efi_fdt_table(struct efi_system_table *systab)
systab->nr_tables = 3;
}
-static void init_systab(struct loongarch_boot_info *info, void *p, void *start)
+static void init_systab(MachineState *ms,
+ struct loongarch_boot_info *info, void *p, void *start)
{
void *bp_tables_start;
struct efi_system_table *systab = p;
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(ms);
info->a2 = p - start;
@@ -146,10 +169,10 @@ static void init_systab(struct loongarch_boot_info *info, void *p, void *start)
systab->tables = p;
bp_tables_start = p;
- init_efi_boot_memmap(systab, p, start);
+ init_efi_boot_memmap(ms, systab, p, start);
p += ROUND_UP(sizeof(struct efi_boot_memmap) +
- sizeof(efi_memory_desc_t) * memmap_entries, 64 * KiB);
- init_efi_initrd_table(systab, p, start);
+ sizeof(efi_memory_desc_t) * lvms->memmap_entries, 64 * KiB);
+ init_efi_initrd_table(info, systab, p, start);
p += ROUND_UP(sizeof(struct efi_initrd), 64 * KiB);
init_efi_fdt_table(systab);
@@ -171,16 +194,105 @@ static uint64_t cpu_loongarch_virt_to_phys(void *opaque, uint64_t addr)
return addr & MAKE_64BIT_MASK(0, TARGET_PHYS_ADDR_SPACE_BITS);
}
+static int64_t load_loongarch_linux_image(const char *filename,
+ uint64_t *kernel_entry,
+ uint64_t *kernel_low,
+ uint64_t *kernel_high)
+{
+ gsize len;
+ ssize_t size;
+ uint8_t *buffer;
+ struct loongarch_linux_hdr *hdr;
+
+ /* Load as raw file otherwise */
+ if (!g_file_get_contents(filename, (char **)&buffer, &len, NULL)) {
+ return -1;
+ }
+ size = len;
+
+ /* Unpack the image if it is a EFI zboot image */
+ if (unpack_efi_zboot_image(&buffer, &size) < 0) {
+ g_free(buffer);
+ return -1;
+ }
+
+ hdr = (struct loongarch_linux_hdr *)buffer;
+
+ if (extract32(le32_to_cpu(hdr->mz_magic), 0, 16) != MZ_MAGIC ||
+ le32_to_cpu(hdr->linux_pe_magic) != LINUX_PE_MAGIC) {
+ g_free(buffer);
+ return -1;
+ }
+
+ /* Early kernel versions may have those fields in virtual address */
+ *kernel_entry = extract64(le64_to_cpu(hdr->kernel_entry),
+ 0, TARGET_PHYS_ADDR_SPACE_BITS);
+ *kernel_low = extract64(le64_to_cpu(hdr->load_offset),
+ 0, TARGET_PHYS_ADDR_SPACE_BITS);
+ *kernel_high = *kernel_low + size;
+
+ rom_add_blob_fixed(filename, buffer, size, *kernel_low);
+
+ g_free(buffer);
+
+ return size;
+}
+
+static ram_addr_t alloc_initrd_memory(struct loongarch_boot_info *info,
+ uint64_t advice_start, ssize_t rd_size)
+{
+ hwaddr base, ram_size, gap, low_end;
+ ram_addr_t initrd_end, initrd_start;
+
+ base = VIRT_LOWMEM_BASE;
+ gap = VIRT_LOWMEM_SIZE;
+ initrd_start = advice_start;
+ initrd_end = initrd_start + rd_size;
+
+ ram_size = info->ram_size;
+ low_end = base + MIN(ram_size, gap);
+ if (initrd_end <= low_end) {
+ return initrd_start;
+ }
+
+ if (ram_size <= gap) {
+ error_report("The low memory too small for initial ram disk '%s',"
+ "You need to expand the ram",
+ info->initrd_filename);
+ exit(1);
+ }
+
+ /*
+ * Try to load initrd in the high memory
+ */
+ ram_size -= gap;
+ initrd_start = VIRT_HIGHMEM_BASE;
+ if (rd_size <= ram_size) {
+ return initrd_start;
+ }
+
+ error_report("The high memory too small for initial ram disk '%s',"
+ "You need to expand the ram",
+ info->initrd_filename);
+ exit(1);
+}
+
static int64_t load_kernel_info(struct loongarch_boot_info *info)
{
- uint64_t kernel_entry, kernel_low, kernel_high;
- ssize_t kernel_size;
+ uint64_t kernel_entry, kernel_low, kernel_high, initrd_offset = 0;
+ ssize_t kernel_size, initrd_size;
kernel_size = load_elf(info->kernel_filename, NULL,
cpu_loongarch_virt_to_phys, NULL,
&kernel_entry, &kernel_low,
- &kernel_high, NULL, 0,
+ &kernel_high, NULL, ELFDATA2LSB,
EM_LOONGARCH, 1, 0);
+ kernel_entry = cpu_loongarch_virt_to_phys(NULL, kernel_entry);
+ if (kernel_size < 0) {
+ kernel_size = load_loongarch_linux_image(info->kernel_filename,
+ &kernel_entry, &kernel_low,
+ &kernel_high);
+ }
if (kernel_size < 0) {
error_report("could not load kernel '%s': %s",
@@ -193,15 +305,10 @@ static int64_t load_kernel_info(struct loongarch_boot_info *info)
initrd_size = get_image_size(info->initrd_filename);
if (initrd_size > 0) {
initrd_offset = ROUND_UP(kernel_high + 4 * kernel_size, 64 * KiB);
-
- if (initrd_offset + initrd_size > info->ram_size) {
- error_report("memory too small for initial ram disk '%s'",
- info->initrd_filename);
- exit(1);
- }
-
- initrd_size = load_image_targphys(info->initrd_filename, initrd_offset,
- info->ram_size - initrd_offset);
+ initrd_offset = alloc_initrd_memory(info, initrd_offset,
+ initrd_size);
+ initrd_size = load_image_targphys(info->initrd_filename,
+ initrd_offset, initrd_size);
}
if (initrd_size == (target_ulong)-1) {
@@ -209,8 +316,9 @@ static int64_t load_kernel_info(struct loongarch_boot_info *info)
info->initrd_filename);
exit(1);
}
- } else {
- initrd_size = 0;
+
+ info->initrd_addr = initrd_offset;
+ info->initrd_size = initrd_size;
}
return kernel_entry;
@@ -223,7 +331,7 @@ static void reset_load_elf(void *opaque)
cpu_reset(CPU(cpu));
if (env->load_elf) {
- if (cpu == LOONGARCH_CPU(first_cpu)) {
+ if (cpu == LOONGARCH_CPU(first_cpu)) {
env->gpr[4] = env->boot_info->a0;
env->gpr[5] = env->boot_info->a1;
env->gpr[6] = env->boot_info->a2;
@@ -265,36 +373,37 @@ static void loongarch_firmware_boot(LoongArchVirtMachineState *lvms,
fw_cfg_add_kernel_info(info, lvms->fw_cfg);
}
-static void init_boot_rom(struct loongarch_boot_info *info, void *p)
+static void init_boot_rom(MachineState *ms,
+ struct loongarch_boot_info *info, void *p)
{
void *start = p;
init_cmdline(info, p, start);
p += COMMAND_LINE_SIZE;
- init_systab(info, p, start);
+ init_systab(ms, info, p, start);
}
-static void loongarch_direct_kernel_boot(struct loongarch_boot_info *info)
+static void loongarch_direct_kernel_boot(MachineState *ms,
+ struct loongarch_boot_info *info)
{
void *p, *bp;
- int64_t kernel_addr = 0;
+ int64_t kernel_addr = VIRT_FLASH0_BASE;
LoongArchCPU *lacpu;
CPUState *cs;
if (info->kernel_filename) {
kernel_addr = load_kernel_info(info);
} else {
- if(!qtest_enabled()) {
- error_report("Need kernel filename\n");
- exit(1);
+ if (!qtest_enabled()) {
+ warn_report("No kernel provided, booting from flash drive.");
}
}
/* Load cmdline and system tables at [0 - 1 MiB] */
p = g_malloc0(1 * MiB);
bp = p;
- init_boot_rom(info, p);
+ init_boot_rom(ms, info, p);
rom_add_blob_fixed_as("boot_info", bp, 1 * MiB, 0, &address_space_memory);
/* Load slave boot code at pflash0 . */
@@ -334,6 +443,6 @@ void loongarch_load_kernel(MachineState *ms, struct loongarch_boot_info *info)
if (lvms->bios_loaded) {
loongarch_firmware_boot(lvms, info);
} else {
- loongarch_direct_kernel_boot(info);
+ loongarch_direct_kernel_boot(ms, info);
}
}
diff --git a/hw/loongarch/fw_cfg.c b/hw/loongarch/fw_cfg.c
index 35aeb2d..4935636 100644
--- a/hw/loongarch/fw_cfg.c
+++ b/hw/loongarch/fw_cfg.c
@@ -9,7 +9,7 @@
#include "hw/loongarch/fw_cfg.h"
#include "hw/loongarch/virt.h"
#include "hw/nvram/fw_cfg.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
static void fw_cfg_boot_set(void *opaque, const char *boot_device,
Error **errp)
diff --git a/hw/loongarch/meson.build b/hw/loongarch/meson.build
index bce7eba..d494d1e 100644
--- a/hw/loongarch/meson.build
+++ b/hw/loongarch/meson.build
@@ -1,9 +1,11 @@
loongarch_ss = ss.source_set()
loongarch_ss.add(files(
- 'fw_cfg.c',
'boot.c',
))
-loongarch_ss.add(when: 'CONFIG_LOONGARCH_VIRT', if_true: files('virt.c'))
-loongarch_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-build.c'))
+common_ss.add(when: 'CONFIG_LOONGARCH_VIRT', if_true: files('fw_cfg.c'))
+loongarch_ss.add(when: 'CONFIG_LOONGARCH_VIRT', if_true: files(
+ 'virt-fdt-build.c',
+ 'virt.c'))
+loongarch_ss.add(when: 'CONFIG_ACPI', if_true: files('virt-acpi-build.c'))
hw_arch += {'loongarch': loongarch_ss}
diff --git a/hw/loongarch/virt-acpi-build.c b/hw/loongarch/virt-acpi-build.c
new file mode 100644
index 0000000..2cd2d9d
--- /dev/null
+++ b/hw/loongarch/virt-acpi-build.c
@@ -0,0 +1,742 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Support for generating ACPI tables and passing them to Guests
+ *
+ * Copyright (C) 2021 Loongson Technology Corporation Limited
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "qemu/bitmap.h"
+#include "hw/pci/pci.h"
+#include "hw/core/cpu.h"
+#include "target/loongarch/cpu.h"
+#include "hw/acpi/acpi-defs.h"
+#include "hw/acpi/acpi.h"
+#include "hw/nvram/fw_cfg.h"
+#include "hw/acpi/bios-linker-loader.h"
+#include "migration/vmstate.h"
+#include "hw/mem/memory-device.h"
+#include "system/reset.h"
+
+/* Supported chipsets: */
+#include "hw/pci-host/ls7a.h"
+#include "hw/loongarch/virt.h"
+
+#include "hw/acpi/utils.h"
+#include "hw/acpi/pci.h"
+
+#include "qom/qom-qobject.h"
+
+#include "hw/acpi/generic_event_device.h"
+#include "hw/pci-host/gpex.h"
+#include "system/system.h"
+#include "system/tpm.h"
+#include "hw/platform-bus.h"
+#include "hw/acpi/aml-build.h"
+#include "hw/acpi/hmat.h"
+
+#define ACPI_BUILD_ALIGN_SIZE 0x1000
+#define ACPI_BUILD_TABLE_SIZE 0x20000
+
+#ifdef DEBUG_ACPI_BUILD
+#define ACPI_BUILD_DPRINTF(fmt, ...) \
+ do {printf("ACPI_BUILD: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define ACPI_BUILD_DPRINTF(fmt, ...)
+#endif
+
+static void virt_madt_cpu_entry(int uid,
+ const CPUArchIdList *apic_ids,
+ GArray *entry, bool force_enabled)
+{
+ uint32_t flags, apic_id = apic_ids->cpus[uid].arch_id;
+
+ flags = apic_ids->cpus[uid].cpu || force_enabled ? 1 /* Enabled */ : 0;
+
+ /* Rev 1.0b, Table 5-13 Processor Local APIC Structure */
+ build_append_int_noprefix(entry, 0, 1); /* Type */
+ build_append_int_noprefix(entry, 8, 1); /* Length */
+ build_append_int_noprefix(entry, uid, 1); /* ACPI Processor ID */
+ build_append_int_noprefix(entry, apic_id, 1); /* APIC ID */
+ build_append_int_noprefix(entry, flags, 4); /* Flags */
+}
+
+/* build FADT */
+static void init_common_fadt_data(AcpiFadtData *data)
+{
+ AcpiFadtData fadt = {
+ /* ACPI 5.0: 4.1 Hardware-Reduced ACPI */
+ .rev = 5,
+ .flags = ((1 << ACPI_FADT_F_HW_REDUCED_ACPI) |
+ (1 << ACPI_FADT_F_RESET_REG_SUP)),
+
+ /* ACPI 5.0: 4.8.3.7 Sleep Control and Status Registers */
+ .sleep_ctl = {
+ .space_id = AML_AS_SYSTEM_MEMORY,
+ .bit_width = 8,
+ .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_SLEEP_CTL,
+ },
+ .sleep_sts = {
+ .space_id = AML_AS_SYSTEM_MEMORY,
+ .bit_width = 8,
+ .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_SLEEP_STS,
+ },
+
+ /* ACPI 5.0: 4.8.3.6 Reset Register */
+ .reset_reg = {
+ .space_id = AML_AS_SYSTEM_MEMORY,
+ .bit_width = 8,
+ .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_RESET,
+ },
+ .reset_val = ACPI_GED_RESET_VALUE,
+ };
+ *data = fadt;
+}
+
+static void acpi_align_size(GArray *blob, unsigned align)
+{
+ /*
+ * Align size to multiple of given size. This reduces the chance
+ * we need to change size in the future (breaking cross version migration).
+ */
+ g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align));
+}
+
+/* build FACS */
+static void
+build_facs(GArray *table_data)
+{
+ const char *sig = "FACS";
+ const uint8_t reserved[40] = {};
+
+ g_array_append_vals(table_data, sig, 4); /* Signature */
+ build_append_int_noprefix(table_data, 64, 4); /* Length */
+ build_append_int_noprefix(table_data, 0, 4); /* Hardware Signature */
+ build_append_int_noprefix(table_data, 0, 4); /* Firmware Waking Vector */
+ build_append_int_noprefix(table_data, 0, 4); /* Global Lock */
+ build_append_int_noprefix(table_data, 0, 4); /* Flags */
+ g_array_append_vals(table_data, reserved, 40); /* Reserved */
+}
+
+/* build MADT */
+static void
+build_madt(GArray *table_data, BIOSLinker *linker,
+ LoongArchVirtMachineState *lvms)
+{
+ MachineState *ms = MACHINE(lvms);
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms);
+ int i, arch_id, flags;
+ AcpiTable table = { .sig = "APIC", .rev = 1, .oem_id = lvms->oem_id,
+ .oem_table_id = lvms->oem_table_id };
+
+ acpi_table_begin(&table, table_data);
+
+ /* Local APIC Address */
+ build_append_int_noprefix(table_data, 0, 4);
+ build_append_int_noprefix(table_data, 1 /* PCAT_COMPAT */, 4); /* Flags */
+
+ for (i = 0; i < arch_ids->len; i++) {
+ /* Processor Core Interrupt Controller Structure */
+ arch_id = arch_ids->cpus[i].arch_id;
+ flags = arch_ids->cpus[i].cpu ? 1 : 0;
+ build_append_int_noprefix(table_data, 17, 1); /* Type */
+ build_append_int_noprefix(table_data, 15, 1); /* Length */
+ build_append_int_noprefix(table_data, 1, 1); /* Version */
+ build_append_int_noprefix(table_data, i, 4); /* ACPI Processor ID */
+ build_append_int_noprefix(table_data, arch_id, 4); /* Core ID */
+ build_append_int_noprefix(table_data, flags, 4); /* Flags */
+ }
+
+ /* Extend I/O Interrupt Controller Structure */
+ build_append_int_noprefix(table_data, 20, 1); /* Type */
+ build_append_int_noprefix(table_data, 13, 1); /* Length */
+ build_append_int_noprefix(table_data, 1, 1); /* Version */
+ build_append_int_noprefix(table_data, 3, 1); /* Cascade */
+ build_append_int_noprefix(table_data, 0, 1); /* Node */
+ build_append_int_noprefix(table_data, 0xffff, 8); /* Node map */
+
+ /* MSI Interrupt Controller Structure */
+ build_append_int_noprefix(table_data, 21, 1); /* Type */
+ build_append_int_noprefix(table_data, 19, 1); /* Length */
+ build_append_int_noprefix(table_data, 1, 1); /* Version */
+ build_append_int_noprefix(table_data, VIRT_PCH_MSI_ADDR_LOW, 8);/* Address */
+ build_append_int_noprefix(table_data, 0x40, 4); /* Start */
+ build_append_int_noprefix(table_data, 0xc0, 4); /* Count */
+
+ /* Bridge I/O Interrupt Controller Structure */
+ build_append_int_noprefix(table_data, 22, 1); /* Type */
+ build_append_int_noprefix(table_data, 17, 1); /* Length */
+ build_append_int_noprefix(table_data, 1, 1); /* Version */
+ build_append_int_noprefix(table_data, VIRT_PCH_REG_BASE, 8);/* Address */
+ build_append_int_noprefix(table_data, 0x1000, 2); /* Size */
+ build_append_int_noprefix(table_data, 0, 2); /* Id */
+ build_append_int_noprefix(table_data, 0x40, 2); /* Base */
+
+ acpi_table_end(linker, &table);
+}
+
+/* build SRAT */
+static void
+build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
+{
+ int i, arch_id, node_id;
+ hwaddr len, base, gap;
+ NodeInfo *numa_info;
+ int nodes, nb_numa_nodes = machine->numa_state->num_nodes;
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine);
+ MachineClass *mc = MACHINE_GET_CLASS(lvms);
+ const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(machine);
+ AcpiTable table = { .sig = "SRAT", .rev = 1, .oem_id = lvms->oem_id,
+ .oem_table_id = lvms->oem_table_id };
+
+ acpi_table_begin(&table, table_data);
+ build_append_int_noprefix(table_data, 1, 4); /* Reserved */
+ build_append_int_noprefix(table_data, 0, 8); /* Reserved */
+
+ for (i = 0; i < arch_ids->len; ++i) {
+ arch_id = arch_ids->cpus[i].arch_id;
+ node_id = arch_ids->cpus[i].props.node_id;
+
+ /* Processor Local APIC/SAPIC Affinity Structure */
+ build_append_int_noprefix(table_data, 0, 1); /* Type */
+ build_append_int_noprefix(table_data, 16, 1); /* Length */
+ /* Proximity Domain [7:0] */
+ build_append_int_noprefix(table_data, node_id, 1);
+ build_append_int_noprefix(table_data, arch_id, 1); /* APIC ID */
+ /* Flags, Table 5-36 */
+ build_append_int_noprefix(table_data, 1, 4);
+ build_append_int_noprefix(table_data, 0, 1); /* Local SAPIC EID */
+ /* Proximity Domain [31:8] */
+ build_append_int_noprefix(table_data, 0, 3);
+ build_append_int_noprefix(table_data, 0, 4); /* Reserved */
+ }
+
+ base = VIRT_LOWMEM_BASE;
+ gap = VIRT_LOWMEM_SIZE;
+ numa_info = machine->numa_state->nodes;
+ nodes = nb_numa_nodes;
+ if (!nodes) {
+ nodes = 1;
+ }
+
+ for (i = 0; i < nodes; i++) {
+ if (nb_numa_nodes) {
+ len = numa_info[i].node_mem;
+ } else {
+ len = machine->ram_size;
+ }
+
+ /*
+ * memory for the node splited into two part
+ * lowram: [base, +gap)
+ * highram: [VIRT_HIGHMEM_BASE, +(len - gap))
+ */
+ if (len >= gap) {
+ build_srat_memory(table_data, base, gap, i, MEM_AFFINITY_ENABLED);
+ len -= gap;
+ base = VIRT_HIGHMEM_BASE;
+ gap = machine->ram_size - VIRT_LOWMEM_SIZE;
+ }
+
+ if (len) {
+ build_srat_memory(table_data, base, len, i, MEM_AFFINITY_ENABLED);
+ base += len;
+ gap -= len;
+ }
+ }
+
+ if (machine->device_memory) {
+ build_srat_memory(table_data, machine->device_memory->base,
+ memory_region_size(&machine->device_memory->mr),
+ nodes - 1,
+ MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
+ }
+
+ acpi_table_end(linker, &table);
+}
+
+/*
+ * Serial Port Console Redirection Table (SPCR)
+ * https://learn.microsoft.com/en-us/windows-hardware/drivers/serports/serial-port-console-redirection-table
+ */
+static void
+spcr_setup(GArray *table_data, BIOSLinker *linker, MachineState *machine)
+{
+ LoongArchVirtMachineState *lvms;
+ AcpiSpcrData serial = {
+ .interface_type = 0, /* 16550 compatible */
+ .base_addr.id = AML_AS_SYSTEM_MEMORY,
+ .base_addr.width = 32,
+ .base_addr.offset = 0,
+ .base_addr.size = 1,
+ .base_addr.addr = VIRT_UART_BASE,
+ .interrupt_type = 0, /* Interrupt not supported */
+ .pc_interrupt = 0,
+ .interrupt = VIRT_UART_IRQ,
+ .baud_rate = 7, /* 115200 */
+ .parity = 0,
+ .stop_bits = 1,
+ .flow_control = 0,
+ .terminal_type = 3, /* ANSI */
+ .language = 0, /* Language */
+ .pci_device_id = 0xffff, /* not a PCI device*/
+ .pci_vendor_id = 0xffff, /* not a PCI device*/
+ .pci_bus = 0,
+ .pci_device = 0,
+ .pci_function = 0,
+ .pci_flags = 0,
+ .pci_segment = 0,
+ };
+
+ lvms = LOONGARCH_VIRT_MACHINE(machine);
+ /*
+ * Passing NULL as the SPCR Table for Revision 2 doesn't support
+ * NameSpaceString.
+ */
+ build_spcr(table_data, linker, &serial, 2, lvms->oem_id,
+ lvms->oem_table_id, NULL);
+}
+
+typedef
+struct AcpiBuildState {
+ /* Copy of table in RAM (for patching). */
+ MemoryRegion *table_mr;
+ /* Is table patched? */
+ uint8_t patched;
+ void *rsdp;
+ MemoryRegion *rsdp_mr;
+ MemoryRegion *linker_mr;
+} AcpiBuildState;
+
+static void build_uart_device_aml(Aml *table, int index)
+{
+ Aml *dev;
+ Aml *crs;
+ Aml *pkg0, *pkg1, *pkg2;
+ Aml *scope;
+ uint32_t uart_irq;
+ uint64_t base;
+
+ uart_irq = VIRT_UART_IRQ + index;
+ base = VIRT_UART_BASE + index * VIRT_UART_SIZE;
+ scope = aml_scope("_SB");
+ dev = aml_device("COM%d", index);
+ aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501")));
+ aml_append(dev, aml_name_decl("_UID", aml_int(index)));
+ aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
+ crs = aml_resource_template();
+ aml_append(crs,
+ aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
+ AML_NON_CACHEABLE, AML_READ_WRITE,
+ 0, base, base + VIRT_UART_SIZE - 1,
+ 0, VIRT_UART_SIZE));
+ aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
+ AML_SHARED, &uart_irq, 1));
+ aml_append(dev, aml_name_decl("_CRS", crs));
+ pkg0 = aml_package(0x2);
+ aml_append(pkg0, aml_int(0x05F5E100));
+ aml_append(pkg0, aml_string("clock-frenquency"));
+ pkg1 = aml_package(0x1);
+ aml_append(pkg1, pkg0);
+ pkg2 = aml_package(0x2);
+ aml_append(pkg2, aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301"));
+ aml_append(pkg2, pkg1);
+ aml_append(dev, aml_name_decl("_DSD", pkg2));
+ aml_append(scope, dev);
+ aml_append(table, scope);
+}
+
+static void
+build_la_ged_aml(Aml *dsdt, MachineState *machine)
+{
+ uint32_t event;
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine);
+ CPUHotplugFeatures opts;
+
+ build_ged_aml(dsdt, "\\_SB."GED_DEVICE,
+ HOTPLUG_HANDLER(lvms->acpi_ged),
+ VIRT_SCI_IRQ, AML_SYSTEM_MEMORY,
+ VIRT_GED_EVT_ADDR);
+ event = object_property_get_uint(OBJECT(lvms->acpi_ged),
+ "ged-event", &error_abort);
+ if (event & ACPI_GED_MEM_HOTPLUG_EVT) {
+ build_memory_hotplug_aml(dsdt, machine->ram_slots, "\\_SB", NULL,
+ AML_SYSTEM_MEMORY,
+ VIRT_GED_MEM_ADDR);
+ }
+
+ if (event & ACPI_GED_CPU_HOTPLUG_EVT) {
+ opts.acpi_1_compatible = false;
+ opts.has_legacy_cphp = false;
+ opts.fw_unplugs_cpu = false;
+ opts.smi_path = NULL;
+
+ build_cpus_aml(dsdt, machine, opts, virt_madt_cpu_entry,
+ VIRT_GED_CPUHP_ADDR, "\\_SB",
+ AML_GED_EVT_CPU_SCAN_METHOD, AML_SYSTEM_MEMORY);
+ }
+
+ acpi_dsdt_add_power_button(dsdt);
+}
+
+static void build_pci_device_aml(Aml *scope, LoongArchVirtMachineState *lvms)
+{
+ struct GPEXConfig cfg = {
+ .mmio64.base = VIRT_PCI_MEM_BASE,
+ .mmio64.size = VIRT_PCI_MEM_SIZE,
+ .pio.base = VIRT_PCI_IO_BASE,
+ .pio.size = VIRT_PCI_IO_SIZE,
+ .ecam.base = VIRT_PCI_CFG_BASE,
+ .ecam.size = VIRT_PCI_CFG_SIZE,
+ .irq = VIRT_GSI_BASE + VIRT_DEVICE_IRQS,
+ .bus = lvms->pci_bus,
+ };
+
+ acpi_dsdt_add_gpex(scope, &cfg);
+}
+
+static void build_flash_aml(Aml *scope, LoongArchVirtMachineState *lvms)
+{
+ Aml *dev, *crs;
+ MemoryRegion *flash_mem;
+
+ hwaddr flash0_base;
+ hwaddr flash0_size;
+
+ hwaddr flash1_base;
+ hwaddr flash1_size;
+
+ flash_mem = pflash_cfi01_get_memory(lvms->flash[0]);
+ flash0_base = flash_mem->addr;
+ flash0_size = memory_region_size(flash_mem);
+
+ flash_mem = pflash_cfi01_get_memory(lvms->flash[1]);
+ flash1_base = flash_mem->addr;
+ flash1_size = memory_region_size(flash_mem);
+
+ dev = aml_device("FLS0");
+ aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
+ aml_append(dev, aml_name_decl("_UID", aml_int(0)));
+
+ crs = aml_resource_template();
+ aml_append(crs, aml_memory32_fixed(flash0_base, flash0_size,
+ AML_READ_WRITE));
+ aml_append(dev, aml_name_decl("_CRS", crs));
+ aml_append(scope, dev);
+
+ dev = aml_device("FLS1");
+ aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
+ aml_append(dev, aml_name_decl("_UID", aml_int(1)));
+
+ crs = aml_resource_template();
+ aml_append(crs, aml_memory32_fixed(flash1_base, flash1_size,
+ AML_READ_WRITE));
+ aml_append(dev, aml_name_decl("_CRS", crs));
+ aml_append(scope, dev);
+}
+
+#ifdef CONFIG_TPM
+static void acpi_dsdt_add_tpm(Aml *scope, LoongArchVirtMachineState *vms)
+{
+ PlatformBusDevice *pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev);
+ hwaddr pbus_base = VIRT_PLATFORM_BUS_BASEADDRESS;
+ SysBusDevice *sbdev = SYS_BUS_DEVICE(tpm_find());
+ MemoryRegion *sbdev_mr;
+ hwaddr tpm_base;
+
+ if (!sbdev) {
+ return;
+ }
+
+ tpm_base = platform_bus_get_mmio_addr(pbus, sbdev, 0);
+ assert(tpm_base != -1);
+
+ tpm_base += pbus_base;
+
+ sbdev_mr = sysbus_mmio_get_region(sbdev, 0);
+
+ Aml *dev = aml_device("TPM0");
+ aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101")));
+ aml_append(dev, aml_name_decl("_STR", aml_string("TPM 2.0 Device")));
+ aml_append(dev, aml_name_decl("_UID", aml_int(0)));
+
+ Aml *crs = aml_resource_template();
+ aml_append(crs,
+ aml_memory32_fixed(tpm_base,
+ (uint32_t)memory_region_size(sbdev_mr),
+ AML_READ_WRITE));
+ aml_append(dev, aml_name_decl("_CRS", crs));
+ aml_append(scope, dev);
+}
+#endif
+
+/* build DSDT */
+static void
+build_dsdt(GArray *table_data, BIOSLinker *linker, MachineState *machine)
+{
+ int i;
+ Aml *dsdt, *scope, *pkg;
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine);
+ AcpiTable table = { .sig = "DSDT", .rev = 1, .oem_id = lvms->oem_id,
+ .oem_table_id = lvms->oem_table_id };
+
+ acpi_table_begin(&table, table_data);
+ dsdt = init_aml_allocator();
+ for (i = 0; i < VIRT_UART_COUNT; i++) {
+ build_uart_device_aml(dsdt, i);
+ }
+ build_pci_device_aml(dsdt, lvms);
+ build_la_ged_aml(dsdt, machine);
+ build_flash_aml(dsdt, lvms);
+#ifdef CONFIG_TPM
+ acpi_dsdt_add_tpm(dsdt, lvms);
+#endif
+ /* System State Package */
+ scope = aml_scope("\\");
+ pkg = aml_package(4);
+ aml_append(pkg, aml_int(ACPI_GED_SLP_TYP_S5));
+ aml_append(pkg, aml_int(0)); /* ignored */
+ aml_append(pkg, aml_int(0)); /* reserved */
+ aml_append(pkg, aml_int(0)); /* reserved */
+ aml_append(scope, aml_name_decl("_S5", pkg));
+ aml_append(dsdt, scope);
+ /* Copy AML table into ACPI tables blob and patch header there */
+ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
+ acpi_table_end(linker, &table);
+ free_aml_allocator();
+}
+
+static void acpi_build(AcpiBuildTables *tables, MachineState *machine)
+{
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine);
+ GArray *table_offsets;
+ AcpiFadtData fadt_data;
+ unsigned facs, xsdt, dsdt;
+ uint8_t *u;
+ GArray *tables_blob = tables->table_data;
+
+ init_common_fadt_data(&fadt_data);
+
+ table_offsets = g_array_new(false, true, sizeof(uint32_t));
+ ACPI_BUILD_DPRINTF("init ACPI tables\n");
+
+ bios_linker_loader_alloc(tables->linker,
+ ACPI_BUILD_TABLE_FILE, tables_blob,
+ 64, false);
+
+ /*
+ * FACS is pointed to by FADT.
+ * We place it first since it's the only table that has alignment
+ * requirements.
+ */
+ facs = tables_blob->len;
+ build_facs(tables_blob);
+
+ /* DSDT is pointed to by FADT */
+ dsdt = tables_blob->len;
+ build_dsdt(tables_blob, tables->linker, machine);
+
+ /* ACPI tables pointed to by RSDT */
+ acpi_add_table(table_offsets, tables_blob);
+ fadt_data.facs_tbl_offset = &facs;
+ fadt_data.dsdt_tbl_offset = &dsdt;
+ fadt_data.xdsdt_tbl_offset = &dsdt;
+ build_fadt(tables_blob, tables->linker, &fadt_data,
+ lvms->oem_id, lvms->oem_table_id);
+
+ acpi_add_table(table_offsets, tables_blob);
+ build_madt(tables_blob, tables->linker, lvms);
+
+ acpi_add_table(table_offsets, tables_blob);
+ build_pptt(tables_blob, tables->linker, machine,
+ lvms->oem_id, lvms->oem_table_id);
+
+ acpi_add_table(table_offsets, tables_blob);
+ build_srat(tables_blob, tables->linker, machine);
+ acpi_add_table(table_offsets, tables_blob);
+ spcr_setup(tables_blob, tables->linker, machine);
+
+ if (machine->numa_state->num_nodes) {
+ if (machine->numa_state->have_numa_distance) {
+ acpi_add_table(table_offsets, tables_blob);
+ build_slit(tables_blob, tables->linker, machine, lvms->oem_id,
+ lvms->oem_table_id);
+ }
+ if (machine->numa_state->hmat_enabled) {
+ acpi_add_table(table_offsets, tables_blob);
+ build_hmat(tables_blob, tables->linker, machine->numa_state,
+ lvms->oem_id, lvms->oem_table_id);
+ }
+ }
+
+ acpi_add_table(table_offsets, tables_blob);
+ {
+ AcpiMcfgInfo mcfg = {
+ .base = VIRT_PCI_CFG_BASE,
+ .size = VIRT_PCI_CFG_SIZE,
+ };
+ build_mcfg(tables_blob, tables->linker, &mcfg, lvms->oem_id,
+ lvms->oem_table_id);
+ }
+
+#ifdef CONFIG_TPM
+ /* TPM info */
+ if (tpm_get_version(tpm_find()) == TPM_VERSION_2_0) {
+ acpi_add_table(table_offsets, tables_blob);
+ build_tpm2(tables_blob, tables->linker,
+ tables->tcpalog, lvms->oem_id,
+ lvms->oem_table_id);
+ }
+#endif
+ /* Add tables supplied by user (if any) */
+ for (u = acpi_table_first(); u; u = acpi_table_next(u)) {
+ unsigned len = acpi_table_len(u);
+
+ acpi_add_table(table_offsets, tables_blob);
+ g_array_append_vals(tables_blob, u, len);
+ }
+
+ /* RSDT is pointed to by RSDP */
+ xsdt = tables_blob->len;
+ build_xsdt(tables_blob, tables->linker, table_offsets,
+ lvms->oem_id, lvms->oem_table_id);
+
+ /* RSDP is in FSEG memory, so allocate it separately */
+ {
+ AcpiRsdpData rsdp_data = {
+ .revision = 2,
+ .oem_id = lvms->oem_id,
+ .xsdt_tbl_offset = &xsdt,
+ .rsdt_tbl_offset = NULL,
+ };
+ build_rsdp(tables->rsdp, tables->linker, &rsdp_data);
+ }
+
+ /*
+ * The align size is 128, warn if 64k is not enough therefore
+ * the align size could be resized.
+ */
+ if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) {
+ warn_report("ACPI table size %u exceeds %d bytes,"
+ " migration may not work",
+ tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2);
+ error_printf("Try removing CPUs, NUMA nodes, memory slots"
+ " or PCI bridges.\n");
+ }
+
+ acpi_align_size(tables->linker->cmd_blob, ACPI_BUILD_ALIGN_SIZE);
+
+ /* Cleanup memory that's no longer used. */
+ g_array_free(table_offsets, true);
+}
+
+static void acpi_ram_update(MemoryRegion *mr, GArray *data)
+{
+ uint32_t size = acpi_data_len(data);
+
+ /*
+ * Make sure RAM size is correct - in case it got changed
+ * e.g. by migration
+ */
+ memory_region_ram_resize(mr, size, &error_abort);
+
+ memcpy(memory_region_get_ram_ptr(mr), data->data, size);
+ memory_region_set_dirty(mr, 0, size);
+}
+
+static void acpi_build_update(void *build_opaque)
+{
+ AcpiBuildState *build_state = build_opaque;
+ AcpiBuildTables tables;
+
+ /* No state to update or already patched? Nothing to do. */
+ if (!build_state || build_state->patched) {
+ return;
+ }
+ build_state->patched = 1;
+
+ acpi_build_tables_init(&tables);
+
+ acpi_build(&tables, MACHINE(qdev_get_machine()));
+
+ acpi_ram_update(build_state->table_mr, tables.table_data);
+ acpi_ram_update(build_state->rsdp_mr, tables.rsdp);
+ acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob);
+
+ acpi_build_tables_cleanup(&tables, true);
+}
+
+static void acpi_build_reset(void *build_opaque)
+{
+ AcpiBuildState *build_state = build_opaque;
+ build_state->patched = 0;
+}
+
+static const VMStateDescription vmstate_acpi_build = {
+ .name = "acpi_build",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT8(patched, AcpiBuildState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static bool virt_is_acpi_enabled(LoongArchVirtMachineState *lvms)
+{
+ if (lvms->acpi == ON_OFF_AUTO_OFF) {
+ return false;
+ }
+ return true;
+}
+
+void virt_acpi_setup(LoongArchVirtMachineState *lvms)
+{
+ AcpiBuildTables tables;
+ AcpiBuildState *build_state;
+
+ if (!lvms->fw_cfg) {
+ ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n");
+ return;
+ }
+
+ if (!virt_is_acpi_enabled(lvms)) {
+ ACPI_BUILD_DPRINTF("ACPI disabled. Bailing out.\n");
+ return;
+ }
+
+ build_state = g_malloc0(sizeof *build_state);
+
+ acpi_build_tables_init(&tables);
+ acpi_build(&tables, MACHINE(lvms));
+
+ /* Now expose it all to Guest */
+ build_state->table_mr = acpi_add_rom_blob(acpi_build_update,
+ build_state, tables.table_data,
+ ACPI_BUILD_TABLE_FILE);
+ assert(build_state->table_mr != NULL);
+
+ build_state->linker_mr =
+ acpi_add_rom_blob(acpi_build_update, build_state,
+ tables.linker->cmd_blob, ACPI_BUILD_LOADER_FILE);
+
+ build_state->rsdp_mr = acpi_add_rom_blob(acpi_build_update,
+ build_state, tables.rsdp,
+ ACPI_BUILD_RSDP_FILE);
+
+ fw_cfg_add_file(lvms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data,
+ acpi_data_len(tables.tcpalog));
+
+ qemu_register_reset(acpi_build_reset, build_state);
+ acpi_build_reset(build_state);
+ vmstate_register(NULL, 0, &vmstate_acpi_build, build_state);
+
+ /*
+ * Cleanup tables but don't free the memory: we track it
+ * in build_state.
+ */
+ acpi_build_tables_cleanup(&tables, false);
+}
diff --git a/hw/loongarch/virt-fdt-build.c b/hw/loongarch/virt-fdt-build.c
new file mode 100644
index 0000000..728ce46
--- /dev/null
+++ b/hw/loongarch/virt-fdt-build.c
@@ -0,0 +1,534 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 Loongson Technology Corporation Limited
+ */
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qemu/guest-random.h"
+#include <libfdt.h>
+#include "hw/acpi/generic_event_device.h"
+#include "hw/core/sysbus-fdt.h"
+#include "hw/intc/loongarch_extioi.h"
+#include "hw/loader.h"
+#include "hw/loongarch/virt.h"
+#include "hw/pci-host/gpex.h"
+#include "hw/pci-host/ls7a.h"
+#include "system/device_tree.h"
+#include "system/reset.h"
+#include "target/loongarch/cpu.h"
+
+static void create_fdt(LoongArchVirtMachineState *lvms)
+{
+ MachineState *ms = MACHINE(lvms);
+ uint8_t rng_seed[32];
+
+ ms->fdt = create_device_tree(&lvms->fdt_size);
+ if (!ms->fdt) {
+ error_report("create_device_tree() failed");
+ exit(1);
+ }
+
+ /* Header */
+ qemu_fdt_setprop_string(ms->fdt, "/", "compatible",
+ "linux,dummy-loongson3");
+ qemu_fdt_setprop_cell(ms->fdt, "/", "#address-cells", 0x2);
+ qemu_fdt_setprop_cell(ms->fdt, "/", "#size-cells", 0x2);
+ qemu_fdt_add_subnode(ms->fdt, "/chosen");
+
+ /* Pass seed to RNG */
+ qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed));
+ qemu_fdt_setprop(ms->fdt, "/chosen", "rng-seed", rng_seed, sizeof(rng_seed));
+}
+
+static void fdt_add_cpu_nodes(const LoongArchVirtMachineState *lvms)
+{
+ int num;
+ MachineState *ms = MACHINE(lvms);
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ const CPUArchIdList *possible_cpus;
+ LoongArchCPU *cpu;
+ CPUState *cs;
+ char *nodename, *map_path;
+
+ qemu_fdt_add_subnode(ms->fdt, "/cpus");
+ qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", 0x1);
+ qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0);
+
+ /* cpu nodes */
+ possible_cpus = mc->possible_cpu_arch_ids(ms);
+ for (num = 0; num < possible_cpus->len; num++) {
+ cs = possible_cpus->cpus[num].cpu;
+ if (cs == NULL) {
+ continue;
+ }
+
+ nodename = g_strdup_printf("/cpus/cpu@%d", num);
+ cpu = LOONGARCH_CPU(cs);
+
+ qemu_fdt_add_subnode(ms->fdt, nodename);
+ qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "cpu");
+ qemu_fdt_setprop_string(ms->fdt, nodename, "compatible",
+ cpu->dtb_compatible);
+ if (possible_cpus->cpus[num].props.has_node_id) {
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id",
+ possible_cpus->cpus[num].props.node_id);
+ }
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "reg", num);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle",
+ qemu_fdt_alloc_phandle(ms->fdt));
+ g_free(nodename);
+ }
+
+ /*cpu map */
+ qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map");
+ for (num = 0; num < possible_cpus->len; num++) {
+ cs = possible_cpus->cpus[num].cpu;
+ if (cs == NULL) {
+ continue;
+ }
+
+ nodename = g_strdup_printf("/cpus/cpu@%d", num);
+ if (ms->smp.threads > 1) {
+ map_path = g_strdup_printf(
+ "/cpus/cpu-map/socket%d/core%d/thread%d",
+ num / (ms->smp.cores * ms->smp.threads),
+ (num / ms->smp.threads) % ms->smp.cores,
+ num % ms->smp.threads);
+ } else {
+ map_path = g_strdup_printf(
+ "/cpus/cpu-map/socket%d/core%d",
+ num / ms->smp.cores,
+ num % ms->smp.cores);
+ }
+ qemu_fdt_add_path(ms->fdt, map_path);
+ qemu_fdt_setprop_phandle(ms->fdt, map_path, "cpu", nodename);
+
+ g_free(map_path);
+ g_free(nodename);
+ }
+}
+
+static void fdt_add_memory_node(MachineState *ms,
+ uint64_t base, uint64_t size, int node_id)
+{
+ char *nodename = g_strdup_printf("/memory@%" PRIx64, base);
+
+ qemu_fdt_add_subnode(ms->fdt, nodename);
+ qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", base >> 32, base,
+ size >> 32, size);
+ qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "memory");
+
+ if (ms->numa_state && ms->numa_state->num_nodes) {
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id", node_id);
+ }
+
+ g_free(nodename);
+}
+
+static void fdt_add_memory_nodes(MachineState *ms)
+{
+ hwaddr base, size, ram_size, gap;
+ int i, nb_numa_nodes, nodes;
+ NodeInfo *numa_info;
+
+ ram_size = ms->ram_size;
+ base = VIRT_LOWMEM_BASE;
+ gap = VIRT_LOWMEM_SIZE;
+ nodes = nb_numa_nodes = ms->numa_state->num_nodes;
+ numa_info = ms->numa_state->nodes;
+ if (!nodes) {
+ nodes = 1;
+ }
+
+ for (i = 0; i < nodes; i++) {
+ if (nb_numa_nodes) {
+ size = numa_info[i].node_mem;
+ } else {
+ size = ram_size;
+ }
+
+ /*
+ * memory for the node splited into two part
+ * lowram: [base, +gap)
+ * highram: [VIRT_HIGHMEM_BASE, +(len - gap))
+ */
+ if (size >= gap) {
+ fdt_add_memory_node(ms, base, gap, i);
+ size -= gap;
+ base = VIRT_HIGHMEM_BASE;
+ gap = ram_size - VIRT_LOWMEM_SIZE;
+ }
+
+ if (size) {
+ fdt_add_memory_node(ms, base, size, i);
+ base += size;
+ gap -= size;
+ }
+ }
+}
+
+static void fdt_add_fw_cfg_node(const LoongArchVirtMachineState *lvms)
+{
+ char *nodename;
+ hwaddr base = VIRT_FWCFG_BASE;
+ const MachineState *ms = MACHINE(lvms);
+
+ nodename = g_strdup_printf("/fw_cfg@%" PRIx64, base);
+ qemu_fdt_add_subnode(ms->fdt, nodename);
+ qemu_fdt_setprop_string(ms->fdt, nodename,
+ "compatible", "qemu,fw-cfg-mmio");
+ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg",
+ 2, base, 2, 0x18);
+ qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0);
+ g_free(nodename);
+}
+
+static void fdt_add_flash_node(LoongArchVirtMachineState *lvms)
+{
+ MachineState *ms = MACHINE(lvms);
+ char *nodename;
+ MemoryRegion *flash_mem;
+
+ hwaddr flash0_base;
+ hwaddr flash0_size;
+
+ hwaddr flash1_base;
+ hwaddr flash1_size;
+
+ flash_mem = pflash_cfi01_get_memory(lvms->flash[0]);
+ flash0_base = flash_mem->addr;
+ flash0_size = memory_region_size(flash_mem);
+
+ flash_mem = pflash_cfi01_get_memory(lvms->flash[1]);
+ flash1_base = flash_mem->addr;
+ flash1_size = memory_region_size(flash_mem);
+
+ nodename = g_strdup_printf("/flash@%" PRIx64, flash0_base);
+ qemu_fdt_add_subnode(ms->fdt, nodename);
+ qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cfi-flash");
+ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg",
+ 2, flash0_base, 2, flash0_size,
+ 2, flash1_base, 2, flash1_size);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "bank-width", 4);
+ g_free(nodename);
+}
+
+static void fdt_add_cpuic_node(LoongArchVirtMachineState *lvms,
+ uint32_t *cpuintc_phandle)
+{
+ MachineState *ms = MACHINE(lvms);
+ char *nodename;
+
+ *cpuintc_phandle = qemu_fdt_alloc_phandle(ms->fdt);
+ nodename = g_strdup_printf("/cpuic");
+ qemu_fdt_add_subnode(ms->fdt, nodename);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *cpuintc_phandle);
+ qemu_fdt_setprop_string(ms->fdt, nodename, "compatible",
+ "loongson,cpu-interrupt-controller");
+ qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 1);
+ g_free(nodename);
+}
+
+static void fdt_add_eiointc_node(LoongArchVirtMachineState *lvms,
+ uint32_t *cpuintc_phandle,
+ uint32_t *eiointc_phandle)
+{
+ MachineState *ms = MACHINE(lvms);
+ char *nodename;
+ hwaddr extioi_base = APIC_BASE;
+ hwaddr extioi_size = EXTIOI_SIZE;
+
+ *eiointc_phandle = qemu_fdt_alloc_phandle(ms->fdt);
+ nodename = g_strdup_printf("/eiointc@%" PRIx64, extioi_base);
+ qemu_fdt_add_subnode(ms->fdt, nodename);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *eiointc_phandle);
+ qemu_fdt_setprop_string(ms->fdt, nodename, "compatible",
+ "loongson,ls2k2000-eiointc");
+ qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 1);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent",
+ *cpuintc_phandle);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupts", 3);
+ qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 0x0,
+ extioi_base, 0x0, extioi_size);
+ g_free(nodename);
+}
+
+static void fdt_add_pch_pic_node(LoongArchVirtMachineState *lvms,
+ uint32_t *eiointc_phandle,
+ uint32_t *pch_pic_phandle)
+{
+ MachineState *ms = MACHINE(lvms);
+ char *nodename;
+ hwaddr pch_pic_base = VIRT_PCH_REG_BASE;
+ hwaddr pch_pic_size = VIRT_PCH_REG_SIZE;
+
+ *pch_pic_phandle = qemu_fdt_alloc_phandle(ms->fdt);
+ nodename = g_strdup_printf("/platic@%" PRIx64, pch_pic_base);
+ qemu_fdt_add_subnode(ms->fdt, nodename);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *pch_pic_phandle);
+ qemu_fdt_setprop_string(ms->fdt, nodename, "compatible",
+ "loongson,pch-pic-1.0");
+ qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 0,
+ pch_pic_base, 0, pch_pic_size);
+ qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 2);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent",
+ *eiointc_phandle);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "loongson,pic-base-vec", 0);
+ g_free(nodename);
+}
+
+static void fdt_add_pch_msi_node(LoongArchVirtMachineState *lvms,
+ uint32_t *eiointc_phandle,
+ uint32_t *pch_msi_phandle)
+{
+ MachineState *ms = MACHINE(lvms);
+ char *nodename;
+ hwaddr pch_msi_base = VIRT_PCH_MSI_ADDR_LOW;
+ hwaddr pch_msi_size = VIRT_PCH_MSI_SIZE;
+
+ *pch_msi_phandle = qemu_fdt_alloc_phandle(ms->fdt);
+ nodename = g_strdup_printf("/msi@%" PRIx64, pch_msi_base);
+ qemu_fdt_add_subnode(ms->fdt, nodename);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *pch_msi_phandle);
+ qemu_fdt_setprop_string(ms->fdt, nodename, "compatible",
+ "loongson,pch-msi-1.0");
+ qemu_fdt_setprop_cells(ms->fdt, nodename, "reg",
+ 0, pch_msi_base,
+ 0, pch_msi_size);
+ qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent",
+ *eiointc_phandle);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "loongson,msi-base-vec",
+ VIRT_PCH_PIC_IRQ_NUM);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "loongson,msi-num-vecs",
+ EXTIOI_IRQS - VIRT_PCH_PIC_IRQ_NUM);
+ g_free(nodename);
+}
+
+static void fdt_add_pcie_irq_map_node(const LoongArchVirtMachineState *lvms,
+ char *nodename,
+ uint32_t *pch_pic_phandle)
+{
+ int pin, dev;
+ uint32_t irq_map_stride = 0;
+ uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS * 10] = {};
+ uint32_t *irq_map = full_irq_map;
+ const MachineState *ms = MACHINE(lvms);
+
+ /*
+ * This code creates a standard swizzle of interrupts such that
+ * each device's first interrupt is based on it's PCI_SLOT number.
+ * (See pci_swizzle_map_irq_fn())
+ *
+ * We only need one entry per interrupt in the table (not one per
+ * possible slot) seeing the interrupt-map-mask will allow the table
+ * to wrap to any number of devices.
+ */
+
+ for (dev = 0; dev < PCI_NUM_PINS; dev++) {
+ int devfn = dev * 0x8;
+
+ for (pin = 0; pin < PCI_NUM_PINS; pin++) {
+ int irq_nr = 16 + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS);
+ int i = 0;
+
+ /* Fill PCI address cells */
+ irq_map[i] = cpu_to_be32(devfn << 8);
+ i += 3;
+
+ /* Fill PCI Interrupt cells */
+ irq_map[i] = cpu_to_be32(pin + 1);
+ i += 1;
+
+ /* Fill interrupt controller phandle and cells */
+ irq_map[i++] = cpu_to_be32(*pch_pic_phandle);
+ irq_map[i++] = cpu_to_be32(irq_nr);
+
+ if (!irq_map_stride) {
+ irq_map_stride = i;
+ }
+ irq_map += irq_map_stride;
+ }
+ }
+
+
+ qemu_fdt_setprop(ms->fdt, nodename, "interrupt-map", full_irq_map,
+ PCI_NUM_PINS * PCI_NUM_PINS *
+ irq_map_stride * sizeof(uint32_t));
+ qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupt-map-mask",
+ 0x1800, 0, 0, 0x7);
+}
+
+static void fdt_add_pcie_node(const LoongArchVirtMachineState *lvms,
+ uint32_t *pch_pic_phandle,
+ uint32_t *pch_msi_phandle)
+{
+ char *nodename;
+ hwaddr base_mmio = VIRT_PCI_MEM_BASE;
+ hwaddr size_mmio = VIRT_PCI_MEM_SIZE;
+ hwaddr base_pio = VIRT_PCI_IO_BASE;
+ hwaddr size_pio = VIRT_PCI_IO_SIZE;
+ hwaddr base_pcie = VIRT_PCI_CFG_BASE;
+ hwaddr size_pcie = VIRT_PCI_CFG_SIZE;
+ hwaddr base = base_pcie;
+ const MachineState *ms = MACHINE(lvms);
+
+ nodename = g_strdup_printf("/pcie@%" PRIx64, base);
+ qemu_fdt_add_subnode(ms->fdt, nodename);
+ qemu_fdt_setprop_string(ms->fdt, nodename,
+ "compatible", "pci-host-ecam-generic");
+ qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "pci");
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 3);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 2);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "linux,pci-domain", 0);
+ qemu_fdt_setprop_cells(ms->fdt, nodename, "bus-range", 0,
+ PCIE_MMCFG_BUS(VIRT_PCI_CFG_SIZE - 1));
+ qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0);
+ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg",
+ 2, base_pcie, 2, size_pcie);
+ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "ranges",
+ 1, FDT_PCI_RANGE_IOPORT, 2, VIRT_PCI_IO_OFFSET,
+ 2, base_pio, 2, size_pio,
+ 1, FDT_PCI_RANGE_MMIO, 2, base_mmio,
+ 2, base_mmio, 2, size_mmio);
+ qemu_fdt_setprop_cells(ms->fdt, nodename, "msi-map",
+ 0, *pch_msi_phandle, 0, 0x10000);
+ fdt_add_pcie_irq_map_node(lvms, nodename, pch_pic_phandle);
+ g_free(nodename);
+}
+
+static void fdt_add_uart_node(LoongArchVirtMachineState *lvms,
+ uint32_t *pch_pic_phandle, hwaddr base,
+ int irq, bool chosen)
+{
+ char *nodename;
+ hwaddr size = VIRT_UART_SIZE;
+ MachineState *ms = MACHINE(lvms);
+
+ nodename = g_strdup_printf("/serial@%" PRIx64, base);
+ qemu_fdt_add_subnode(ms->fdt, nodename);
+ qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "ns16550a");
+ qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 0x0, base, 0x0, size);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "clock-frequency", 100000000);
+ if (chosen) {
+ qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", nodename);
+ }
+ qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts", irq, 0x4);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent",
+ *pch_pic_phandle);
+ g_free(nodename);
+}
+
+static void fdt_add_rtc_node(LoongArchVirtMachineState *lvms,
+ uint32_t *pch_pic_phandle)
+{
+ char *nodename;
+ hwaddr base = VIRT_RTC_REG_BASE;
+ hwaddr size = VIRT_RTC_LEN;
+ MachineState *ms = MACHINE(lvms);
+
+ nodename = g_strdup_printf("/rtc@%" PRIx64, base);
+ qemu_fdt_add_subnode(ms->fdt, nodename);
+ qemu_fdt_setprop_string(ms->fdt, nodename, "compatible",
+ "loongson,ls7a-rtc");
+ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", 2, base, 2, size);
+ qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts",
+ VIRT_RTC_IRQ - VIRT_GSI_BASE , 0x4);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent",
+ *pch_pic_phandle);
+ g_free(nodename);
+}
+
+static void fdt_add_ged_reset(LoongArchVirtMachineState *lvms)
+{
+ char *name;
+ uint32_t ged_handle;
+ MachineState *ms = MACHINE(lvms);
+ hwaddr base = VIRT_GED_REG_ADDR;
+ hwaddr size = ACPI_GED_REG_COUNT;
+
+ ged_handle = qemu_fdt_alloc_phandle(ms->fdt);
+ name = g_strdup_printf("/ged@%" PRIx64, base);
+ qemu_fdt_add_subnode(ms->fdt, name);
+ qemu_fdt_setprop_string(ms->fdt, name, "compatible", "syscon");
+ qemu_fdt_setprop_cells(ms->fdt, name, "reg", 0x0, base, 0x0, size);
+ /* 8 bit registers */
+ qemu_fdt_setprop_cell(ms->fdt, name, "reg-shift", 0);
+ qemu_fdt_setprop_cell(ms->fdt, name, "reg-io-width", 1);
+ qemu_fdt_setprop_cell(ms->fdt, name, "phandle", ged_handle);
+ ged_handle = qemu_fdt_get_phandle(ms->fdt, name);
+ g_free(name);
+
+ name = g_strdup_printf("/reboot");
+ qemu_fdt_add_subnode(ms->fdt, name);
+ qemu_fdt_setprop_string(ms->fdt, name, "compatible", "syscon-reboot");
+ qemu_fdt_setprop_cell(ms->fdt, name, "regmap", ged_handle);
+ qemu_fdt_setprop_cell(ms->fdt, name, "offset", ACPI_GED_REG_RESET);
+ qemu_fdt_setprop_cell(ms->fdt, name, "value", ACPI_GED_RESET_VALUE);
+ g_free(name);
+
+ name = g_strdup_printf("/poweroff");
+ qemu_fdt_add_subnode(ms->fdt, name);
+ qemu_fdt_setprop_string(ms->fdt, name, "compatible", "syscon-poweroff");
+ qemu_fdt_setprop_cell(ms->fdt, name, "regmap", ged_handle);
+ qemu_fdt_setprop_cell(ms->fdt, name, "offset", ACPI_GED_REG_SLEEP_CTL);
+ qemu_fdt_setprop_cell(ms->fdt, name, "value", ACPI_GED_SLP_EN |
+ (ACPI_GED_SLP_TYP_S5 << ACPI_GED_SLP_TYP_POS));
+ g_free(name);
+}
+
+void virt_fdt_setup(LoongArchVirtMachineState *lvms)
+{
+ MachineState *machine = MACHINE(lvms);
+ uint32_t cpuintc_phandle, eiointc_phandle, pch_pic_phandle, pch_msi_phandle;
+ int i;
+
+ create_fdt(lvms);
+ fdt_add_cpu_nodes(lvms);
+ fdt_add_memory_nodes(machine);
+ fdt_add_fw_cfg_node(lvms);
+ fdt_add_flash_node(lvms);
+
+ /* Add cpu interrupt-controller */
+ fdt_add_cpuic_node(lvms, &cpuintc_phandle);
+ /* Add Extend I/O Interrupt Controller node */
+ fdt_add_eiointc_node(lvms, &cpuintc_phandle, &eiointc_phandle);
+ /* Add PCH PIC node */
+ fdt_add_pch_pic_node(lvms, &eiointc_phandle, &pch_pic_phandle);
+ /* Add PCH MSI node */
+ fdt_add_pch_msi_node(lvms, &eiointc_phandle, &pch_msi_phandle);
+ /* Add pcie node */
+ fdt_add_pcie_node(lvms, &pch_pic_phandle, &pch_msi_phandle);
+
+ /*
+ * Create uart fdt node in reverse order so that they appear
+ * in the finished device tree lowest address first
+ */
+ for (i = VIRT_UART_COUNT; i-- > 0;) {
+ hwaddr base = VIRT_UART_BASE + i * VIRT_UART_SIZE;
+ int irq = VIRT_UART_IRQ + i - VIRT_GSI_BASE;
+ fdt_add_uart_node(lvms, &pch_pic_phandle, base, irq, i == 0);
+ }
+
+ fdt_add_rtc_node(lvms, &pch_pic_phandle);
+ fdt_add_ged_reset(lvms);
+ platform_bus_add_all_fdt_nodes(machine->fdt, "/platic",
+ VIRT_PLATFORM_BUS_BASEADDRESS,
+ VIRT_PLATFORM_BUS_SIZE,
+ VIRT_PLATFORM_BUS_IRQ);
+
+ /*
+ * Since lowmem region starts from 0 and Linux kernel legacy start address
+ * at 2 MiB, FDT base address is located at 1 MiB to avoid NULL pointer
+ * access. FDT size limit with 1 MiB.
+ * Put the FDT into the memory map as a ROM image: this will ensure
+ * the FDT is copied again upon reset, even if addr points into RAM.
+ */
+ rom_add_blob_fixed_as("fdt", machine->fdt, lvms->fdt_size, FDT_BASE,
+ &address_space_memory);
+ qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds,
+ rom_ptr_for_as(&address_space_memory, FDT_BASE, lvms->fdt_size));
+}
diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c
index e592b1b..b15ada2 100644
--- a/hw/loongarch/virt.c
+++ b/hw/loongarch/virt.c
@@ -8,22 +8,23 @@
#include "qemu/units.h"
#include "qemu/datadir.h"
#include "qapi/error.h"
+#include "exec/target_page.h"
#include "hw/boards.h"
-#include "hw/char/serial.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/qtest.h"
-#include "sysemu/runstate.h"
-#include "sysemu/reset.h"
-#include "sysemu/rtc.h"
+#include "hw/char/serial-mm.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
+#include "system/system.h"
+#include "system/qtest.h"
+#include "system/runstate.h"
+#include "system/reset.h"
+#include "system/rtc.h"
#include "hw/loongarch/virt.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/irq.h"
#include "net/net.h"
#include "hw/loader.h"
#include "elf.h"
-#include "hw/intc/loongson_ipi.h"
+#include "hw/intc/loongarch_ipi.h"
#include "hw/intc/loongarch_extioi.h"
#include "hw/intc/loongarch_pch_pic.h"
#include "hw/intc/loongarch_pch_msi.h"
@@ -33,30 +34,19 @@
#include "hw/loongarch/fw_cfg.h"
#include "target/loongarch/cpu.h"
#include "hw/firmware/smbios.h"
-#include "hw/acpi/aml-build.h"
#include "qapi/qapi-visit-common.h"
#include "hw/acpi/generic_event_device.h"
#include "hw/mem/nvdimm.h"
-#include "sysemu/device_tree.h"
-#include <libfdt.h>
-#include "hw/core/sysbus-fdt.h"
#include "hw/platform-bus.h"
#include "hw/display/ramfb.h"
+#include "hw/uefi/var-service-api.h"
#include "hw/mem/pc-dimm.h"
-#include "sysemu/tpm.h"
-#include "sysemu/block-backend.h"
+#include "system/tpm.h"
+#include "system/block-backend.h"
#include "hw/block/flash.h"
#include "hw/virtio/virtio-iommu.h"
#include "qemu/error-report.h"
-static bool virt_is_veiointc_enabled(LoongArchVirtMachineState *lvms)
-{
- if (lvms->veiointc == ON_OFF_AUTO_OFF) {
- return false;
- }
- return true;
-}
-
static void virt_get_veiointc(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
@@ -134,416 +124,6 @@ static void virt_flash_map(LoongArchVirtMachineState *lvms,
virt_flash_map1(flash1, VIRT_FLASH1_BASE, VIRT_FLASH1_SIZE, sysmem);
}
-static void fdt_add_cpuic_node(LoongArchVirtMachineState *lvms,
- uint32_t *cpuintc_phandle)
-{
- MachineState *ms = MACHINE(lvms);
- char *nodename;
-
- *cpuintc_phandle = qemu_fdt_alloc_phandle(ms->fdt);
- nodename = g_strdup_printf("/cpuic");
- qemu_fdt_add_subnode(ms->fdt, nodename);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *cpuintc_phandle);
- qemu_fdt_setprop_string(ms->fdt, nodename, "compatible",
- "loongson,cpu-interrupt-controller");
- qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 1);
- g_free(nodename);
-}
-
-static void fdt_add_eiointc_node(LoongArchVirtMachineState *lvms,
- uint32_t *cpuintc_phandle,
- uint32_t *eiointc_phandle)
-{
- MachineState *ms = MACHINE(lvms);
- char *nodename;
- hwaddr extioi_base = APIC_BASE;
- hwaddr extioi_size = EXTIOI_SIZE;
-
- *eiointc_phandle = qemu_fdt_alloc_phandle(ms->fdt);
- nodename = g_strdup_printf("/eiointc@%" PRIx64, extioi_base);
- qemu_fdt_add_subnode(ms->fdt, nodename);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *eiointc_phandle);
- qemu_fdt_setprop_string(ms->fdt, nodename, "compatible",
- "loongson,ls2k2000-eiointc");
- qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 1);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent",
- *cpuintc_phandle);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupts", 3);
- qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 0x0,
- extioi_base, 0x0, extioi_size);
- g_free(nodename);
-}
-
-static void fdt_add_pch_pic_node(LoongArchVirtMachineState *lvms,
- uint32_t *eiointc_phandle,
- uint32_t *pch_pic_phandle)
-{
- MachineState *ms = MACHINE(lvms);
- char *nodename;
- hwaddr pch_pic_base = VIRT_PCH_REG_BASE;
- hwaddr pch_pic_size = VIRT_PCH_REG_SIZE;
-
- *pch_pic_phandle = qemu_fdt_alloc_phandle(ms->fdt);
- nodename = g_strdup_printf("/platic@%" PRIx64, pch_pic_base);
- qemu_fdt_add_subnode(ms->fdt, nodename);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *pch_pic_phandle);
- qemu_fdt_setprop_string(ms->fdt, nodename, "compatible",
- "loongson,pch-pic-1.0");
- qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 0,
- pch_pic_base, 0, pch_pic_size);
- qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 2);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent",
- *eiointc_phandle);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "loongson,pic-base-vec", 0);
- g_free(nodename);
-}
-
-static void fdt_add_pch_msi_node(LoongArchVirtMachineState *lvms,
- uint32_t *eiointc_phandle,
- uint32_t *pch_msi_phandle)
-{
- MachineState *ms = MACHINE(lvms);
- char *nodename;
- hwaddr pch_msi_base = VIRT_PCH_MSI_ADDR_LOW;
- hwaddr pch_msi_size = VIRT_PCH_MSI_SIZE;
-
- *pch_msi_phandle = qemu_fdt_alloc_phandle(ms->fdt);
- nodename = g_strdup_printf("/msi@%" PRIx64, pch_msi_base);
- qemu_fdt_add_subnode(ms->fdt, nodename);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *pch_msi_phandle);
- qemu_fdt_setprop_string(ms->fdt, nodename, "compatible",
- "loongson,pch-msi-1.0");
- qemu_fdt_setprop_cells(ms->fdt, nodename, "reg",
- 0, pch_msi_base,
- 0, pch_msi_size);
- qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent",
- *eiointc_phandle);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "loongson,msi-base-vec",
- VIRT_PCH_PIC_IRQ_NUM);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "loongson,msi-num-vecs",
- EXTIOI_IRQS - VIRT_PCH_PIC_IRQ_NUM);
- g_free(nodename);
-}
-
-static void fdt_add_flash_node(LoongArchVirtMachineState *lvms)
-{
- MachineState *ms = MACHINE(lvms);
- char *nodename;
- MemoryRegion *flash_mem;
-
- hwaddr flash0_base;
- hwaddr flash0_size;
-
- hwaddr flash1_base;
- hwaddr flash1_size;
-
- flash_mem = pflash_cfi01_get_memory(lvms->flash[0]);
- flash0_base = flash_mem->addr;
- flash0_size = memory_region_size(flash_mem);
-
- flash_mem = pflash_cfi01_get_memory(lvms->flash[1]);
- flash1_base = flash_mem->addr;
- flash1_size = memory_region_size(flash_mem);
-
- nodename = g_strdup_printf("/flash@%" PRIx64, flash0_base);
- qemu_fdt_add_subnode(ms->fdt, nodename);
- qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cfi-flash");
- qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg",
- 2, flash0_base, 2, flash0_size,
- 2, flash1_base, 2, flash1_size);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "bank-width", 4);
- g_free(nodename);
-}
-
-static void fdt_add_rtc_node(LoongArchVirtMachineState *lvms,
- uint32_t *pch_pic_phandle)
-{
- char *nodename;
- hwaddr base = VIRT_RTC_REG_BASE;
- hwaddr size = VIRT_RTC_LEN;
- MachineState *ms = MACHINE(lvms);
-
- nodename = g_strdup_printf("/rtc@%" PRIx64, base);
- qemu_fdt_add_subnode(ms->fdt, nodename);
- qemu_fdt_setprop_string(ms->fdt, nodename, "compatible",
- "loongson,ls7a-rtc");
- qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", 2, base, 2, size);
- qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts",
- VIRT_RTC_IRQ - VIRT_GSI_BASE , 0x4);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent",
- *pch_pic_phandle);
- g_free(nodename);
-}
-
-static void fdt_add_uart_node(LoongArchVirtMachineState *lvms,
- uint32_t *pch_pic_phandle)
-{
- char *nodename;
- hwaddr base = VIRT_UART_BASE;
- hwaddr size = VIRT_UART_SIZE;
- MachineState *ms = MACHINE(lvms);
-
- nodename = g_strdup_printf("/serial@%" PRIx64, base);
- qemu_fdt_add_subnode(ms->fdt, nodename);
- qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "ns16550a");
- qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 0x0, base, 0x0, size);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "clock-frequency", 100000000);
- qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", nodename);
- qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts",
- VIRT_UART_IRQ - VIRT_GSI_BASE, 0x4);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent",
- *pch_pic_phandle);
- g_free(nodename);
-}
-
-static void create_fdt(LoongArchVirtMachineState *lvms)
-{
- MachineState *ms = MACHINE(lvms);
-
- ms->fdt = create_device_tree(&lvms->fdt_size);
- if (!ms->fdt) {
- error_report("create_device_tree() failed");
- exit(1);
- }
-
- /* Header */
- qemu_fdt_setprop_string(ms->fdt, "/", "compatible",
- "linux,dummy-loongson3");
- qemu_fdt_setprop_cell(ms->fdt, "/", "#address-cells", 0x2);
- qemu_fdt_setprop_cell(ms->fdt, "/", "#size-cells", 0x2);
- qemu_fdt_add_subnode(ms->fdt, "/chosen");
-}
-
-static void fdt_add_cpu_nodes(const LoongArchVirtMachineState *lvms)
-{
- int num;
- const MachineState *ms = MACHINE(lvms);
- int smp_cpus = ms->smp.cpus;
-
- qemu_fdt_add_subnode(ms->fdt, "/cpus");
- qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", 0x1);
- qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0);
-
- /* cpu nodes */
- for (num = smp_cpus - 1; num >= 0; num--) {
- char *nodename = g_strdup_printf("/cpus/cpu@%d", num);
- LoongArchCPU *cpu = LOONGARCH_CPU(qemu_get_cpu(num));
- CPUState *cs = CPU(cpu);
-
- qemu_fdt_add_subnode(ms->fdt, nodename);
- qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "cpu");
- qemu_fdt_setprop_string(ms->fdt, nodename, "compatible",
- cpu->dtb_compatible);
- if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) {
- qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id",
- ms->possible_cpus->cpus[cs->cpu_index].props.node_id);
- }
- qemu_fdt_setprop_cell(ms->fdt, nodename, "reg", num);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle",
- qemu_fdt_alloc_phandle(ms->fdt));
- g_free(nodename);
- }
-
- /*cpu map */
- qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map");
-
- for (num = smp_cpus - 1; num >= 0; num--) {
- char *cpu_path = g_strdup_printf("/cpus/cpu@%d", num);
- char *map_path;
-
- if (ms->smp.threads > 1) {
- map_path = g_strdup_printf(
- "/cpus/cpu-map/socket%d/core%d/thread%d",
- num / (ms->smp.cores * ms->smp.threads),
- (num / ms->smp.threads) % ms->smp.cores,
- num % ms->smp.threads);
- } else {
- map_path = g_strdup_printf(
- "/cpus/cpu-map/socket%d/core%d",
- num / ms->smp.cores,
- num % ms->smp.cores);
- }
- qemu_fdt_add_path(ms->fdt, map_path);
- qemu_fdt_setprop_phandle(ms->fdt, map_path, "cpu", cpu_path);
-
- g_free(map_path);
- g_free(cpu_path);
- }
-}
-
-static void fdt_add_fw_cfg_node(const LoongArchVirtMachineState *lvms)
-{
- char *nodename;
- hwaddr base = VIRT_FWCFG_BASE;
- const MachineState *ms = MACHINE(lvms);
-
- nodename = g_strdup_printf("/fw_cfg@%" PRIx64, base);
- qemu_fdt_add_subnode(ms->fdt, nodename);
- qemu_fdt_setprop_string(ms->fdt, nodename,
- "compatible", "qemu,fw-cfg-mmio");
- qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg",
- 2, base, 2, 0x18);
- qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0);
- g_free(nodename);
-}
-
-static void fdt_add_pcie_irq_map_node(const LoongArchVirtMachineState *lvms,
- char *nodename,
- uint32_t *pch_pic_phandle)
-{
- int pin, dev;
- uint32_t irq_map_stride = 0;
- uint32_t full_irq_map[GPEX_NUM_IRQS *GPEX_NUM_IRQS * 10] = {};
- uint32_t *irq_map = full_irq_map;
- const MachineState *ms = MACHINE(lvms);
-
- /* This code creates a standard swizzle of interrupts such that
- * each device's first interrupt is based on it's PCI_SLOT number.
- * (See pci_swizzle_map_irq_fn())
- *
- * We only need one entry per interrupt in the table (not one per
- * possible slot) seeing the interrupt-map-mask will allow the table
- * to wrap to any number of devices.
- */
-
- for (dev = 0; dev < GPEX_NUM_IRQS; dev++) {
- int devfn = dev * 0x8;
-
- for (pin = 0; pin < GPEX_NUM_IRQS; pin++) {
- int irq_nr = 16 + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS);
- int i = 0;
-
- /* Fill PCI address cells */
- irq_map[i] = cpu_to_be32(devfn << 8);
- i += 3;
-
- /* Fill PCI Interrupt cells */
- irq_map[i] = cpu_to_be32(pin + 1);
- i += 1;
-
- /* Fill interrupt controller phandle and cells */
- irq_map[i++] = cpu_to_be32(*pch_pic_phandle);
- irq_map[i++] = cpu_to_be32(irq_nr);
-
- if (!irq_map_stride) {
- irq_map_stride = i;
- }
- irq_map += irq_map_stride;
- }
- }
-
-
- qemu_fdt_setprop(ms->fdt, nodename, "interrupt-map", full_irq_map,
- GPEX_NUM_IRQS * GPEX_NUM_IRQS *
- irq_map_stride * sizeof(uint32_t));
- qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupt-map-mask",
- 0x1800, 0, 0, 0x7);
-}
-
-static void fdt_add_pcie_node(const LoongArchVirtMachineState *lvms,
- uint32_t *pch_pic_phandle,
- uint32_t *pch_msi_phandle)
-{
- char *nodename;
- hwaddr base_mmio = VIRT_PCI_MEM_BASE;
- hwaddr size_mmio = VIRT_PCI_MEM_SIZE;
- hwaddr base_pio = VIRT_PCI_IO_BASE;
- hwaddr size_pio = VIRT_PCI_IO_SIZE;
- hwaddr base_pcie = VIRT_PCI_CFG_BASE;
- hwaddr size_pcie = VIRT_PCI_CFG_SIZE;
- hwaddr base = base_pcie;
-
- const MachineState *ms = MACHINE(lvms);
-
- nodename = g_strdup_printf("/pcie@%" PRIx64, base);
- qemu_fdt_add_subnode(ms->fdt, nodename);
- qemu_fdt_setprop_string(ms->fdt, nodename,
- "compatible", "pci-host-ecam-generic");
- qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "pci");
- qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 3);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 2);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "linux,pci-domain", 0);
- qemu_fdt_setprop_cells(ms->fdt, nodename, "bus-range", 0,
- PCIE_MMCFG_BUS(VIRT_PCI_CFG_SIZE - 1));
- qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0);
- qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg",
- 2, base_pcie, 2, size_pcie);
- qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "ranges",
- 1, FDT_PCI_RANGE_IOPORT, 2, VIRT_PCI_IO_OFFSET,
- 2, base_pio, 2, size_pio,
- 1, FDT_PCI_RANGE_MMIO, 2, base_mmio,
- 2, base_mmio, 2, size_mmio);
- qemu_fdt_setprop_cells(ms->fdt, nodename, "msi-map",
- 0, *pch_msi_phandle, 0, 0x10000);
-
- fdt_add_pcie_irq_map_node(lvms, nodename, pch_pic_phandle);
-
- g_free(nodename);
-}
-
-static void fdt_add_memory_node(MachineState *ms,
- uint64_t base, uint64_t size, int node_id)
-{
- char *nodename = g_strdup_printf("/memory@%" PRIx64, base);
-
- qemu_fdt_add_subnode(ms->fdt, nodename);
- qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", base >> 32, base,
- size >> 32, size);
- qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "memory");
-
- if (ms->numa_state && ms->numa_state->num_nodes) {
- qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id", node_id);
- }
-
- g_free(nodename);
-}
-
-static void fdt_add_memory_nodes(MachineState *ms)
-{
- hwaddr base, size, ram_size, gap;
- int i, nb_numa_nodes, nodes;
- NodeInfo *numa_info;
-
- ram_size = ms->ram_size;
- base = VIRT_LOWMEM_BASE;
- gap = VIRT_LOWMEM_SIZE;
- nodes = nb_numa_nodes = ms->numa_state->num_nodes;
- numa_info = ms->numa_state->nodes;
- if (!nodes) {
- nodes = 1;
- }
-
- for (i = 0; i < nodes; i++) {
- if (nb_numa_nodes) {
- size = numa_info[i].node_mem;
- } else {
- size = ram_size;
- }
-
- /*
- * memory for the node splited into two part
- * lowram: [base, +gap)
- * highram: [VIRT_HIGHMEM_BASE, +(len - gap))
- */
- if (size >= gap) {
- fdt_add_memory_node(ms, base, gap, i);
- size -= gap;
- base = VIRT_HIGHMEM_BASE;
- gap = ram_size - VIRT_LOWMEM_SIZE;
- }
-
- if (size) {
- fdt_add_memory_node(ms, base, size, i);
- base += size;
- gap -= size;
- }
- }
-}
-
static void virt_build_smbios(LoongArchVirtMachineState *lvms)
{
MachineState *ms = MACHINE(lvms);
@@ -556,6 +136,10 @@ static void virt_build_smbios(LoongArchVirtMachineState *lvms)
return;
}
+ if (kvm_enabled()) {
+ product = "KVM Virtual Machine";
+ }
+
smbios_set_defaults("QEMU", product, mc->name);
smbios_get_tables(ms, SMBIOS_ENTRY_POINT_TYPE_64,
@@ -576,7 +160,8 @@ static void virt_done(Notifier *notifier, void *data)
LoongArchVirtMachineState *lvms = container_of(notifier,
LoongArchVirtMachineState, machine_done);
virt_build_smbios(lvms);
- loongarch_acpi_setup(lvms);
+ virt_acpi_setup(lvms);
+ virt_fdt_setup(lvms);
}
static void virt_powerdown_req(Notifier *notifier, void *opaque)
@@ -587,8 +172,15 @@ static void virt_powerdown_req(Notifier *notifier, void *opaque)
acpi_send_event(s->acpi_ged, ACPI_POWER_DOWN_STATUS);
}
-static void memmap_add_entry(uint64_t address, uint64_t length, uint32_t type)
+static void memmap_add_entry(MachineState *ms, uint64_t address,
+ uint64_t length, uint32_t type)
{
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(ms);
+ struct memmap_entry *memmap_table;
+ unsigned int memmap_entries;
+
+ memmap_table = lvms->memmap_table;
+ memmap_entries = lvms->memmap_entries;
/* Ensure there are no duplicate entries. */
for (unsigned i = 0; i < memmap_entries; i++) {
assert(memmap_table[i].address != address);
@@ -601,6 +193,8 @@ static void memmap_add_entry(uint64_t address, uint64_t length, uint32_t type)
memmap_table[memmap_entries].type = cpu_to_le32(type);
memmap_table[memmap_entries].reserved = 0;
memmap_entries++;
+ lvms->memmap_table = memmap_table;
+ lvms->memmap_entries = memmap_entries;
}
static DeviceState *create_acpi_ged(DeviceState *pch_pic,
@@ -608,11 +202,17 @@ static DeviceState *create_acpi_ged(DeviceState *pch_pic,
{
DeviceState *dev;
MachineState *ms = MACHINE(lvms);
+ MachineClass *mc = MACHINE_GET_CLASS(lvms);
uint32_t event = ACPI_GED_PWR_DOWN_EVT;
if (ms->ram_slots) {
event |= ACPI_GED_MEM_HOTPLUG_EVT;
}
+
+ if (mc->has_hotpluggable_cpus) {
+ event |= ACPI_GED_CPU_HOTPLUG_EVT;
+ }
+
dev = qdev_new(TYPE_ACPI_GED);
qdev_prop_set_uint32(dev, "ged-event", event);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
@@ -624,6 +224,10 @@ static DeviceState *create_acpi_ged(DeviceState *pch_pic,
/* ged regs used for reset and power down */
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, VIRT_GED_REG_ADDR);
+ if (mc->has_hotpluggable_cpus) {
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 3, VIRT_GED_CPUHP_ADDR);
+ }
+
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0,
qdev_get_gpio_in(pch_pic, VIRT_SCI_IRQ - VIRT_GSI_BASE));
return dev;
@@ -655,9 +259,7 @@ static DeviceState *create_platform_bus(DeviceState *pch_pic)
}
static void virt_devices_init(DeviceState *pch_pic,
- LoongArchVirtMachineState *lvms,
- uint32_t *pch_pic_phandle,
- uint32_t *pch_msi_phandle)
+ LoongArchVirtMachineState *lvms)
{
MachineClass *mc = MACHINE_GET_CLASS(lvms);
DeviceState *gpex_dev;
@@ -697,20 +299,23 @@ static void virt_devices_init(DeviceState *pch_pic,
memory_region_add_subregion(get_system_memory(), VIRT_PCI_IO_BASE,
pio_alias);
- for (i = 0; i < GPEX_NUM_IRQS; i++) {
+ for (i = 0; i < PCI_NUM_PINS; i++) {
sysbus_connect_irq(d, i,
qdev_get_gpio_in(pch_pic, 16 + i));
gpex_set_irq_num(GPEX_HOST(gpex_dev), i, 16 + i);
}
- /* Add pcie node */
- fdt_add_pcie_node(lvms, pch_pic_phandle, pch_msi_phandle);
-
- serial_mm_init(get_system_memory(), VIRT_UART_BASE, 0,
- qdev_get_gpio_in(pch_pic,
- VIRT_UART_IRQ - VIRT_GSI_BASE),
- 115200, serial_hd(0), DEVICE_LITTLE_ENDIAN);
- fdt_add_uart_node(lvms, pch_pic_phandle);
+ /*
+ * Create uart fdt node in reverse order so that they appear
+ * in the finished device tree lowest address first
+ */
+ for (i = VIRT_UART_COUNT; i-- > 0;) {
+ hwaddr base = VIRT_UART_BASE + i * VIRT_UART_SIZE;
+ int irq = VIRT_UART_IRQ + i - VIRT_GSI_BASE;
+ serial_mm_init(get_system_memory(), base, 0,
+ qdev_get_gpio_in(pch_pic, irq),
+ 115200, serial_hd(i), DEVICE_LITTLE_ENDIAN);
+ }
/* Network init */
pci_init_nic_devices(pci_bus, mc->default_nic);
@@ -723,7 +328,6 @@ static void virt_devices_init(DeviceState *pch_pic,
sysbus_create_simple("ls7a_rtc", VIRT_RTC_REG_BASE,
qdev_get_gpio_in(pch_pic,
VIRT_RTC_IRQ - VIRT_GSI_BASE));
- fdt_add_rtc_node(lvms, pch_pic_phandle);
/* acpi ged */
lvms->acpi_ged = create_acpi_ged(pch_pic, lvms);
@@ -731,17 +335,35 @@ static void virt_devices_init(DeviceState *pch_pic,
lvms->platform_bus_dev = create_platform_bus(pch_pic);
}
-static void virt_irq_init(LoongArchVirtMachineState *lvms)
+static void virt_cpu_irq_init(LoongArchVirtMachineState *lvms)
{
+ int num;
MachineState *ms = MACHINE(lvms);
- DeviceState *pch_pic, *pch_msi, *cpudev;
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ const CPUArchIdList *possible_cpus;
+ CPUState *cs;
+
+ /* cpu nodes */
+ possible_cpus = mc->possible_cpu_arch_ids(ms);
+ for (num = 0; num < possible_cpus->len; num++) {
+ cs = possible_cpus->cpus[num].cpu;
+ if (cs == NULL) {
+ continue;
+ }
+
+ hotplug_handler_plug(HOTPLUG_HANDLER(lvms->ipi), DEVICE(cs),
+ &error_abort);
+ hotplug_handler_plug(HOTPLUG_HANDLER(lvms->extioi), DEVICE(cs),
+ &error_abort);
+ }
+}
+
+static void virt_irq_init(LoongArchVirtMachineState *lvms)
+{
+ DeviceState *pch_pic, *pch_msi;
DeviceState *ipi, *extioi;
SysBusDevice *d;
- LoongArchCPU *lacpu;
- CPULoongArchState *env;
- CPUState *cpu_state;
- int cpu, pin, i, start, num;
- uint32_t cpuintc_phandle, eiointc_phandle, pch_pic_phandle, pch_msi_phandle;
+ int i, start, num;
/*
* Extended IRQ model.
@@ -788,81 +410,24 @@ static void virt_irq_init(LoongArchVirtMachineState *lvms)
*/
/* Create IPI device */
- ipi = qdev_new(TYPE_LOONGSON_IPI);
- qdev_prop_set_uint32(ipi, "num-cpu", ms->smp.cpus);
+ ipi = qdev_new(TYPE_LOONGARCH_IPI);
+ lvms->ipi = ipi;
sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal);
- /* IPI iocsr memory region */
- memory_region_add_subregion(&lvms->system_iocsr, SMP_IPI_MAILBOX,
- sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 0));
- memory_region_add_subregion(&lvms->system_iocsr, MAIL_SEND_ADDR,
- sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 1));
-
- /* Add cpu interrupt-controller */
- fdt_add_cpuic_node(lvms, &cpuintc_phandle);
-
- for (cpu = 0; cpu < ms->smp.cpus; cpu++) {
- cpu_state = qemu_get_cpu(cpu);
- cpudev = DEVICE(cpu_state);
- lacpu = LOONGARCH_CPU(cpu_state);
- env = &(lacpu->env);
- env->address_space_iocsr = &lvms->as_iocsr;
-
- /* connect ipi irq to cpu irq */
- qdev_connect_gpio_out(ipi, cpu, qdev_get_gpio_in(cpudev, IRQ_IPI));
- env->ipistate = ipi;
- }
-
/* Create EXTIOI device */
extioi = qdev_new(TYPE_LOONGARCH_EXTIOI);
- qdev_prop_set_uint32(extioi, "num-cpu", ms->smp.cpus);
+ lvms->extioi = extioi;
if (virt_is_veiointc_enabled(lvms)) {
qdev_prop_set_bit(extioi, "has-virtualization-extension", true);
}
sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal);
- memory_region_add_subregion(&lvms->system_iocsr, APIC_BASE,
- sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 0));
- if (virt_is_veiointc_enabled(lvms)) {
- memory_region_add_subregion(&lvms->system_iocsr, EXTIOI_VIRT_BASE,
- sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 1));
- }
- /*
- * connect ext irq to the cpu irq
- * cpu_pin[9:2] <= intc_pin[7:0]
- */
- for (cpu = 0; cpu < ms->smp.cpus; cpu++) {
- cpudev = DEVICE(qemu_get_cpu(cpu));
- for (pin = 0; pin < LS3A_INTC_IP; pin++) {
- qdev_connect_gpio_out(extioi, (cpu * 8 + pin),
- qdev_get_gpio_in(cpudev, pin + 2));
- }
- }
-
- /* Add Extend I/O Interrupt Controller node */
- fdt_add_eiointc_node(lvms, &cpuintc_phandle, &eiointc_phandle);
-
- pch_pic = qdev_new(TYPE_LOONGARCH_PCH_PIC);
+ virt_cpu_irq_init(lvms);
+ pch_pic = qdev_new(TYPE_LOONGARCH_PIC);
num = VIRT_PCH_PIC_IRQ_NUM;
qdev_prop_set_uint32(pch_pic, "pch_pic_irq_num", num);
d = SYS_BUS_DEVICE(pch_pic);
sysbus_realize_and_unref(d, &error_fatal);
- memory_region_add_subregion(get_system_memory(), VIRT_IOAPIC_REG_BASE,
- sysbus_mmio_get_region(d, 0));
- memory_region_add_subregion(get_system_memory(),
- VIRT_IOAPIC_REG_BASE + PCH_PIC_ROUTE_ENTRY_OFFSET,
- sysbus_mmio_get_region(d, 1));
- memory_region_add_subregion(get_system_memory(),
- VIRT_IOAPIC_REG_BASE + PCH_PIC_INT_STATUS_LO,
- sysbus_mmio_get_region(d, 2));
-
- /* Connect pch_pic irqs to extioi */
- for (i = 0; i < num; i++) {
- qdev_connect_gpio_out(DEVICE(d), i, qdev_get_gpio_in(extioi, i));
- }
-
- /* Add PCH PIC node */
- fdt_add_pch_pic_node(lvms, &eiointc_phandle, &pch_pic_phandle);
pch_msi = qdev_new(TYPE_LOONGARCH_PCH_MSI);
start = num;
@@ -872,16 +437,41 @@ static void virt_irq_init(LoongArchVirtMachineState *lvms)
d = SYS_BUS_DEVICE(pch_msi);
sysbus_realize_and_unref(d, &error_fatal);
sysbus_mmio_map(d, 0, VIRT_PCH_MSI_ADDR_LOW);
- for (i = 0; i < num; i++) {
- /* Connect pch_msi irqs to extioi */
- qdev_connect_gpio_out(DEVICE(d), i,
- qdev_get_gpio_in(extioi, i + start));
- }
- /* Add PCH MSI node */
- fdt_add_pch_msi_node(lvms, &eiointc_phandle, &pch_msi_phandle);
+ if (kvm_irqchip_in_kernel()) {
+ kvm_loongarch_init_irq_routing();
+ } else {
+ /* IPI iocsr memory region */
+ memory_region_add_subregion(&lvms->system_iocsr, SMP_IPI_MAILBOX,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 0));
+ memory_region_add_subregion(&lvms->system_iocsr, MAIL_SEND_ADDR,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 1));
+
+ /* EXTIOI iocsr memory region */
+ memory_region_add_subregion(&lvms->system_iocsr, APIC_BASE,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 0));
+ if (virt_is_veiointc_enabled(lvms)) {
+ memory_region_add_subregion(&lvms->system_iocsr, EXTIOI_VIRT_BASE,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 1));
+ }
+
+ /* PCH_PIC memory region */
+ memory_region_add_subregion(get_system_memory(), VIRT_IOAPIC_REG_BASE,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(pch_pic), 0));
+
+ /* Connect pch_pic irqs to extioi */
+ for (i = 0; i < VIRT_PCH_PIC_IRQ_NUM; i++) {
+ qdev_connect_gpio_out(DEVICE(pch_pic), i,
+ qdev_get_gpio_in(extioi, i));
+ }
- virt_devices_init(pch_pic, lvms, &pch_pic_phandle, &pch_msi_phandle);
+ for (i = VIRT_PCH_PIC_IRQ_NUM; i < EXTIOI_IRQS; i++) {
+ /* Connect pch_msi irqs to extioi */
+ qdev_connect_gpio_out(DEVICE(pch_msi), i - VIRT_PCH_PIC_IRQ_NUM,
+ qdev_get_gpio_in(extioi, i));
+ }
+ }
+ virt_devices_init(pch_pic, lvms);
}
static void virt_firmware_init(LoongArchVirtMachineState *lvms)
@@ -941,6 +531,10 @@ static MemTxResult virt_iocsr_misc_write(void *opaque, hwaddr addr,
switch (addr) {
case MISC_FUNC_REG:
+ if (kvm_irqchip_in_kernel()) {
+ return MEMTX_OK;
+ }
+
if (!virt_is_veiointc_enabled(lvms)) {
return MEMTX_OK;
}
@@ -991,6 +585,10 @@ static MemTxResult virt_iocsr_misc_read(void *opaque, hwaddr addr,
ret = 0x303030354133ULL; /* "3A5000" */
break;
case MISC_FUNC_REG:
+ if (kvm_irqchip_in_kernel()) {
+ return MEMTX_OK;
+ }
+
if (!virt_is_veiointc_enabled(lvms)) {
ret |= BIT_ULL(IOCSRM_EXTIOI_EN);
break;
@@ -1051,13 +649,13 @@ static void fw_cfg_add_memory(MachineState *ms)
}
if (size >= gap) {
- memmap_add_entry(base, gap, 1);
+ memmap_add_entry(ms, base, gap, 1);
size -= gap;
base = VIRT_HIGHMEM_BASE;
}
if (size) {
- memmap_add_entry(base, size, 1);
+ memmap_add_entry(ms, base, size, 1);
base += size;
}
@@ -1072,35 +670,32 @@ static void fw_cfg_add_memory(MachineState *ms)
* lowram: [base, +(gap - numa_info[0].node_mem))
* highram: [VIRT_HIGHMEM_BASE, +(ram_size - gap))
*/
- memmap_add_entry(base, gap - numa_info[0].node_mem, 1);
+ memmap_add_entry(ms, base, gap - numa_info[0].node_mem, 1);
size = ram_size - gap;
base = VIRT_HIGHMEM_BASE;
} else {
size = ram_size - numa_info[0].node_mem;
}
- if (size)
- memmap_add_entry(base, size, 1);
+ if (size) {
+ memmap_add_entry(ms, base, size, 1);
+ }
}
static void virt_init(MachineState *machine)
{
- LoongArchCPU *lacpu;
const char *cpu_model = machine->cpu_type;
MemoryRegion *address_space_mem = get_system_memory();
LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine);
int i;
hwaddr base, size, ram_size = machine->ram_size;
- const CPUArchIdList *possible_cpus;
MachineClass *mc = MACHINE_GET_CLASS(machine);
- CPUState *cpu;
+ Object *cpuobj;
if (!cpu_model) {
cpu_model = LOONGARCH_CPU_TYPE_NAME("la464");
}
- create_fdt(lvms);
-
/* Create IOCSR space */
memory_region_init_io(&lvms->system_iocsr, OBJECT(machine), NULL,
machine, "iocsr", UINT64_MAX);
@@ -1111,16 +706,16 @@ static void virt_init(MachineState *machine)
memory_region_add_subregion(&lvms->system_iocsr, 0, &lvms->iocsr_mem);
/* Init CPUs */
- possible_cpus = mc->possible_cpu_arch_ids(machine);
- for (i = 0; i < possible_cpus->len; i++) {
- cpu = cpu_create(machine->cpu_type);
- cpu->cpu_index = i;
- machine->possible_cpus->cpus[i].cpu = cpu;
- lacpu = LOONGARCH_CPU(cpu);
- lacpu->phy_id = machine->possible_cpus->cpus[i].arch_id;
+ mc->possible_cpu_arch_ids(machine);
+ for (i = 0; i < machine->smp.cpus; i++) {
+ cpuobj = object_new(machine->cpu_type);
+ if (cpuobj == NULL) {
+ error_report("Fail to create object with type %s ",
+ machine->cpu_type);
+ exit(EXIT_FAILURE);
+ }
+ qdev_realize_and_unref(DEVICE(cpuobj), NULL, &error_fatal);
}
- fdt_add_cpu_nodes(lvms);
- fdt_add_memory_nodes(machine);
fw_cfg_add_memory(machine);
/* Node0 memory */
@@ -1169,37 +764,18 @@ static void virt_init(MachineState *machine)
rom_set_fw(lvms->fw_cfg);
if (lvms->fw_cfg != NULL) {
fw_cfg_add_file(lvms->fw_cfg, "etc/memmap",
- memmap_table,
- sizeof(struct memmap_entry) * (memmap_entries));
+ lvms->memmap_table,
+ sizeof(struct memmap_entry) * lvms->memmap_entries);
}
- fdt_add_fw_cfg_node(lvms);
- fdt_add_flash_node(lvms);
/* Initialize the IO interrupt subsystem */
virt_irq_init(lvms);
- platform_bus_add_all_fdt_nodes(machine->fdt, "/platic",
- VIRT_PLATFORM_BUS_BASEADDRESS,
- VIRT_PLATFORM_BUS_SIZE,
- VIRT_PLATFORM_BUS_IRQ);
lvms->machine_done.notify = virt_done;
qemu_add_machine_init_done_notifier(&lvms->machine_done);
/* connect powerdown request */
lvms->powerdown_notifier.notify = virt_powerdown_req;
qemu_register_powerdown_notifier(&lvms->powerdown_notifier);
- /*
- * Since lowmem region starts from 0 and Linux kernel legacy start address
- * at 2 MiB, FDT base address is located at 1 MiB to avoid NULL pointer
- * access. FDT size limit with 1 MiB.
- * Put the FDT into the memory map as a ROM image: this will ensure
- * the FDT is copied again upon reset, even if addr points into RAM.
- */
- qemu_fdt_dumpdtb(machine->fdt, lvms->fdt_size);
- rom_add_blob_fixed_as("fdt", machine->fdt, lvms->fdt_size, FDT_BASE,
- &address_space_memory);
- qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds,
- rom_ptr_for_as(&address_space_memory, FDT_BASE, lvms->fdt_size));
-
lvms->bootinfo.ram_size = ram_size;
loongarch_load_kernel(machine, &lvms->bootinfo);
}
@@ -1221,6 +797,48 @@ static void virt_set_acpi(Object *obj, Visitor *v, const char *name,
visit_type_OnOffAuto(v, name, &lvms->acpi, errp);
}
+static char *virt_get_oem_id(Object *obj, Error **errp)
+{
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj);
+
+ return g_strdup(lvms->oem_id);
+}
+
+static void virt_set_oem_id(Object *obj, const char *value, Error **errp)
+{
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj);
+ size_t len = strlen(value);
+
+ if (len > 6) {
+ error_setg(errp,
+ "User specified oem-id value is bigger than 6 bytes in size");
+ return;
+ }
+
+ strncpy(lvms->oem_id, value, 6);
+}
+
+static char *virt_get_oem_table_id(Object *obj, Error **errp)
+{
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj);
+
+ return g_strdup(lvms->oem_table_id);
+}
+
+static void virt_set_oem_table_id(Object *obj, const char *value,
+ Error **errp)
+{
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj);
+ size_t len = strlen(value);
+
+ if (len > 8) {
+ error_setg(errp,
+ "User specified oem-table-id value is bigger than 8 bytes in size");
+ return;
+ }
+ strncpy(lvms->oem_table_id, value, 8);
+}
+
static void virt_initfn(Object *obj)
{
LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj);
@@ -1234,6 +852,195 @@ static void virt_initfn(Object *obj)
virt_flash_create(lvms);
}
+static void virt_get_topo_from_index(MachineState *ms,
+ LoongArchCPUTopo *topo, int index)
+{
+ topo->socket_id = index / (ms->smp.cores * ms->smp.threads);
+ topo->core_id = index / ms->smp.threads % ms->smp.cores;
+ topo->thread_id = index % ms->smp.threads;
+}
+
+static unsigned int topo_align_up(unsigned int count)
+{
+ g_assert(count >= 1);
+ count -= 1;
+ return BIT(count ? 32 - clz32(count) : 0);
+}
+
+/*
+ * LoongArch Reference Manual Vol1, Chapter 7.4.12 CPU Identity
+ * For CPU architecture, bit0 .. bit8 is valid for CPU id, max cpuid is 512
+ * However for IPI/Eiointc interrupt controller, max supported cpu id for
+ * irq routingis 256
+ *
+ * Here max cpu id is 256 for virt machine
+ */
+static int virt_get_arch_id_from_topo(MachineState *ms, LoongArchCPUTopo *topo)
+{
+ int arch_id, threads, cores, sockets;
+
+ threads = topo_align_up(ms->smp.threads);
+ cores = topo_align_up(ms->smp.cores);
+ sockets = topo_align_up(ms->smp.sockets);
+ if ((threads * cores * sockets) > 256) {
+ error_report("Exceeding max cpuid 256 with sockets[%d] cores[%d]"
+ " threads[%d]", ms->smp.sockets, ms->smp.cores,
+ ms->smp.threads);
+ exit(1);
+ }
+
+ arch_id = topo->thread_id + topo->core_id * threads;
+ arch_id += topo->socket_id * threads * cores;
+ return arch_id;
+}
+
+/* Find cpu slot in machine->possible_cpus by arch_id */
+static CPUArchId *virt_find_cpu_slot(MachineState *ms, int arch_id)
+{
+ int n;
+ for (n = 0; n < ms->possible_cpus->len; n++) {
+ if (ms->possible_cpus->cpus[n].arch_id == arch_id) {
+ return &ms->possible_cpus->cpus[n];
+ }
+ }
+
+ return NULL;
+}
+
+/* Find cpu slot for cold-plut CPU object where cpu is NULL */
+static CPUArchId *virt_find_empty_cpu_slot(MachineState *ms)
+{
+ int n;
+ for (n = 0; n < ms->possible_cpus->len; n++) {
+ if (ms->possible_cpus->cpus[n].cpu == NULL) {
+ return &ms->possible_cpus->cpus[n];
+ }
+ }
+
+ return NULL;
+}
+
+static void virt_cpu_pre_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev);
+ MachineState *ms = MACHINE(OBJECT(hotplug_dev));
+ LoongArchCPU *cpu = LOONGARCH_CPU(dev);
+ CPUState *cs = CPU(dev);
+ CPUArchId *cpu_slot;
+ LoongArchCPUTopo topo;
+ int arch_id;
+
+ if (lvms->acpi_ged) {
+ if ((cpu->thread_id < 0) || (cpu->thread_id >= ms->smp.threads)) {
+ error_setg(errp,
+ "Invalid thread-id %u specified, must be in range 1:%u",
+ cpu->thread_id, ms->smp.threads - 1);
+ return;
+ }
+
+ if ((cpu->core_id < 0) || (cpu->core_id >= ms->smp.cores)) {
+ error_setg(errp,
+ "Invalid core-id %u specified, must be in range 1:%u",
+ cpu->core_id, ms->smp.cores - 1);
+ return;
+ }
+
+ if ((cpu->socket_id < 0) || (cpu->socket_id >= ms->smp.sockets)) {
+ error_setg(errp,
+ "Invalid socket-id %u specified, must be in range 1:%u",
+ cpu->socket_id, ms->smp.sockets - 1);
+ return;
+ }
+
+ topo.socket_id = cpu->socket_id;
+ topo.core_id = cpu->core_id;
+ topo.thread_id = cpu->thread_id;
+ arch_id = virt_get_arch_id_from_topo(ms, &topo);
+ cpu_slot = virt_find_cpu_slot(ms, arch_id);
+ if (CPU(cpu_slot->cpu)) {
+ error_setg(errp,
+ "cpu(id%d=%d:%d:%d) with arch-id %" PRIu64 " exists",
+ cs->cpu_index, cpu->socket_id, cpu->core_id,
+ cpu->thread_id, cpu_slot->arch_id);
+ return;
+ }
+ } else {
+ /* For cold-add cpu, find empty cpu slot */
+ cpu_slot = virt_find_empty_cpu_slot(ms);
+ topo.socket_id = cpu_slot->props.socket_id;
+ topo.core_id = cpu_slot->props.core_id;
+ topo.thread_id = cpu_slot->props.thread_id;
+ object_property_set_int(OBJECT(dev), "socket-id", topo.socket_id, NULL);
+ object_property_set_int(OBJECT(dev), "core-id", topo.core_id, NULL);
+ object_property_set_int(OBJECT(dev), "thread-id", topo.thread_id, NULL);
+ }
+
+ cpu->env.address_space_iocsr = &lvms->as_iocsr;
+ cpu->phy_id = cpu_slot->arch_id;
+ cs->cpu_index = cpu_slot - ms->possible_cpus->cpus;
+ numa_cpu_pre_plug(cpu_slot, dev, errp);
+}
+
+static void virt_cpu_unplug_request(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev);
+ LoongArchCPU *cpu = LOONGARCH_CPU(dev);
+ CPUState *cs = CPU(dev);
+
+ if (cs->cpu_index == 0) {
+ error_setg(errp, "hot-unplug of boot cpu(id%d=%d:%d:%d) not supported",
+ cs->cpu_index, cpu->socket_id,
+ cpu->core_id, cpu->thread_id);
+ return;
+ }
+
+ hotplug_handler_unplug_request(HOTPLUG_HANDLER(lvms->acpi_ged), dev, errp);
+}
+
+static void virt_cpu_unplug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ CPUArchId *cpu_slot;
+ LoongArchCPU *cpu = LOONGARCH_CPU(dev);
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev);
+
+ /* Notify ipi and extioi irqchip to remove interrupt routing to CPU */
+ hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->ipi), dev, &error_abort);
+ hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->extioi), dev, &error_abort);
+
+ /* Notify acpi ged CPU removed */
+ hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->acpi_ged), dev, &error_abort);
+
+ cpu_slot = virt_find_cpu_slot(MACHINE(lvms), cpu->phy_id);
+ cpu_slot->cpu = NULL;
+}
+
+static void virt_cpu_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ CPUArchId *cpu_slot;
+ LoongArchCPU *cpu = LOONGARCH_CPU(dev);
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev);
+
+ if (lvms->ipi) {
+ hotplug_handler_plug(HOTPLUG_HANDLER(lvms->ipi), dev, &error_abort);
+ }
+
+ if (lvms->extioi) {
+ hotplug_handler_plug(HOTPLUG_HANDLER(lvms->extioi), dev, &error_abort);
+ }
+
+ if (lvms->acpi_ged) {
+ hotplug_handler_plug(HOTPLUG_HANDLER(lvms->acpi_ged), dev,
+ &error_abort);
+ }
+
+ cpu_slot = virt_find_cpu_slot(MACHINE(lvms), cpu->phy_id);
+ cpu_slot->cpu = CPU(dev);
+}
+
static bool memhp_type_supported(DeviceState *dev)
{
/* we only support pc dimm now */
@@ -1252,6 +1059,8 @@ static void virt_device_pre_plug(HotplugHandler *hotplug_dev,
{
if (memhp_type_supported(dev)) {
virt_mem_pre_plug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) {
+ virt_cpu_pre_plug(hotplug_dev, dev, errp);
}
}
@@ -1270,6 +1079,8 @@ static void virt_device_unplug_request(HotplugHandler *hotplug_dev,
{
if (memhp_type_supported(dev)) {
virt_mem_unplug_request(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) {
+ virt_cpu_unplug_request(hotplug_dev, dev, errp);
}
}
@@ -1288,6 +1099,8 @@ static void virt_device_unplug(HotplugHandler *hotplug_dev,
{
if (memhp_type_supported(dev)) {
virt_mem_unplug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) {
+ virt_cpu_unplug(hotplug_dev, dev, errp);
}
}
@@ -1315,6 +1128,8 @@ static void virt_device_plug_cb(HotplugHandler *hotplug_dev,
}
} else if (memhp_type_supported(dev)) {
virt_mem_plug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) {
+ virt_cpu_plug(hotplug_dev, dev, errp);
}
}
@@ -1324,6 +1139,7 @@ static HotplugHandler *virt_get_hotplug_handler(MachineState *machine,
MachineClass *mc = MACHINE_GET_CLASS(machine);
if (device_is_dynamic_sysbus(mc, dev) ||
+ object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU) ||
object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI) ||
memhp_type_supported(dev)) {
return HOTPLUG_HANDLER(machine);
@@ -1333,8 +1149,9 @@ static HotplugHandler *virt_get_hotplug_handler(MachineState *machine,
static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
{
- int n;
+ int n, arch_id;
unsigned int max_cpus = ms->smp.max_cpus;
+ LoongArchCPUTopo topo;
if (ms->possible_cpus) {
assert(ms->possible_cpus->len == max_cpus);
@@ -1345,17 +1162,17 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
sizeof(CPUArchId) * max_cpus);
ms->possible_cpus->len = max_cpus;
for (n = 0; n < ms->possible_cpus->len; n++) {
+ virt_get_topo_from_index(ms, &topo, n);
+ arch_id = virt_get_arch_id_from_topo(ms, &topo);
ms->possible_cpus->cpus[n].type = ms->cpu_type;
- ms->possible_cpus->cpus[n].arch_id = n;
-
+ ms->possible_cpus->cpus[n].arch_id = arch_id;
+ ms->possible_cpus->cpus[n].vcpus_count = 1;
ms->possible_cpus->cpus[n].props.has_socket_id = true;
- ms->possible_cpus->cpus[n].props.socket_id =
- n / (ms->smp.cores * ms->smp.threads);
+ ms->possible_cpus->cpus[n].props.socket_id = topo.socket_id;
ms->possible_cpus->cpus[n].props.has_core_id = true;
- ms->possible_cpus->cpus[n].props.core_id =
- n / ms->smp.threads % ms->smp.cores;
+ ms->possible_cpus->cpus[n].props.core_id = topo.core_id;
ms->possible_cpus->cpus[n].props.has_thread_id = true;
- ms->possible_cpus->cpus[n].props.thread_id = n % ms->smp.threads;
+ ms->possible_cpus->cpus[n].props.thread_id = topo.thread_id;
}
return ms->possible_cpus;
}
@@ -1382,7 +1199,7 @@ static int64_t virt_get_default_cpu_node_id(const MachineState *ms, int idx)
}
}
-static void virt_class_init(ObjectClass *oc, void *data)
+static void virt_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
@@ -1390,6 +1207,7 @@ static void virt_class_init(ObjectClass *oc, void *data)
mc->init = virt_init;
mc->default_cpu_type = LOONGARCH_CPU_TYPE_NAME("la464");
mc->default_ram_id = "loongarch.ram";
+ mc->desc = "QEMU LoongArch Virtual Machine";
mc->max_cpus = LOONGARCH_MAX_CPUS;
mc->is_default = 1;
mc->default_kernel_irqchip_split = false;
@@ -1402,6 +1220,7 @@ static void virt_class_init(ObjectClass *oc, void *data)
mc->numa_mem_supported = true;
mc->auto_enable_numa_with_memhp = true;
mc->auto_enable_numa_with_memdev = true;
+ mc->has_hotpluggable_cpus = true;
mc->get_hotplug_handler = virt_get_hotplug_handler;
mc->default_nic = "virtio-net-pci";
hc->plug = virt_device_plug_cb;
@@ -1420,9 +1239,26 @@ static void virt_class_init(ObjectClass *oc, void *data)
object_class_property_set_description(oc, "v-eiointc",
"Enable Virt Extend I/O Interrupt Controller.");
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE);
+ machine_class_allow_dynamic_sysbus_dev(mc, TYPE_UEFI_VARS_SYSBUS);
#ifdef CONFIG_TPM
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS);
#endif
+ object_class_property_add_str(oc, "x-oem-id",
+ virt_get_oem_id,
+ virt_set_oem_id);
+ object_class_property_set_description(oc, "x-oem-id",
+ "Override the default value of field OEMID "
+ "in ACPI table header."
+ "The string may be up to 6 bytes in size");
+
+
+ object_class_property_add_str(oc, "x-oem-table-id",
+ virt_get_oem_table_id,
+ virt_set_oem_table_id);
+ object_class_property_set_description(oc, "x-oem-table-id",
+ "Override the default value of field OEM Table ID "
+ "in ACPI table header."
+ "The string may be up to 8 bytes in size");
}
static const TypeInfo virt_machine_types[] = {
@@ -1432,7 +1268,7 @@ static const TypeInfo virt_machine_types[] = {
.instance_size = sizeof(LoongArchVirtMachineState),
.class_init = virt_class_init,
.instance_init = virt_initfn,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
},
diff --git a/hw/m68k/Kconfig b/hw/m68k/Kconfig
index 0092cda..aff769b 100644
--- a/hw/m68k/Kconfig
+++ b/hw/m68k/Kconfig
@@ -18,6 +18,7 @@ config NEXTCUBE
depends on M68K
select FRAMEBUFFER
select ESCC
+ select EMPTY_SLOT
config Q800
bool
diff --git a/hw/m68k/an5206.c b/hw/m68k/an5206.c
index 1e8e64f..d97399b 100644
--- a/hw/m68k/an5206.c
+++ b/hw/m68k/an5206.c
@@ -14,7 +14,7 @@
#include "hw/loader.h"
#include "elf.h"
#include "qemu/error-report.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#define KERNEL_LOAD_ADDR 0x10000
#define AN5206_MBAR_ADDR 0x10000000
@@ -74,7 +74,7 @@ static void an5206_init(MachineState *machine)
}
kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry,
- NULL, NULL, NULL, 1, EM_68K, 0, 0);
+ NULL, NULL, NULL, ELFDATA2MSB, EM_68K, 0, 0);
entry = elf_entry;
if (kernel_size < 0) {
kernel_size = load_uimage(kernel_filename, &entry, NULL, NULL,
diff --git a/hw/m68k/bootinfo.h b/hw/m68k/bootinfo.h
index 0e6e3ee..0b3e7c4 100644
--- a/hw/m68k/bootinfo.h
+++ b/hw/m68k/bootinfo.h
@@ -1,5 +1,5 @@
/*
- * SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ * SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
*
* Bootinfo tags from linux bootinfo.h and bootinfo-mac.h:
* This is an easily parsable and extendable structure containing all
@@ -14,39 +14,39 @@
#define BOOTINFO0(base, id) \
do { \
- stw_p(base, id); \
+ stw_be_p(base, id); \
base += 2; \
- stw_p(base, sizeof(struct bi_record)); \
+ stw_be_p(base, sizeof(struct bi_record)); \
base += 2; \
} while (0)
#define BOOTINFO1(base, id, value) \
do { \
- stw_p(base, id); \
+ stw_be_p(base, id); \
base += 2; \
- stw_p(base, sizeof(struct bi_record) + 4); \
+ stw_be_p(base, sizeof(struct bi_record) + 4); \
base += 2; \
- stl_p(base, value); \
+ stl_be_p(base, value); \
base += 4; \
} while (0)
#define BOOTINFO2(base, id, value1, value2) \
do { \
- stw_p(base, id); \
+ stw_be_p(base, id); \
base += 2; \
- stw_p(base, sizeof(struct bi_record) + 8); \
+ stw_be_p(base, sizeof(struct bi_record) + 8); \
base += 2; \
- stl_p(base, value1); \
+ stl_be_p(base, value1); \
base += 4; \
- stl_p(base, value2); \
+ stl_be_p(base, value2); \
base += 4; \
} while (0)
#define BOOTINFOSTR(base, id, string) \
do { \
- stw_p(base, id); \
+ stw_be_p(base, id); \
base += 2; \
- stw_p(base, \
+ stw_be_p(base, \
(sizeof(struct bi_record) + strlen(string) + \
1 /* null termination */ + 3 /* padding */) & ~3); \
base += 2; \
@@ -59,13 +59,13 @@
#define BOOTINFODATA(base, id, data, len) \
do { \
- stw_p(base, id); \
+ stw_be_p(base, id); \
base += 2; \
- stw_p(base, \
+ stw_be_p(base, \
(sizeof(struct bi_record) + len + \
2 /* length field */ + 3 /* padding */) & ~3); \
base += 2; \
- stw_p(base, len); \
+ stw_be_p(base, len); \
base += 2; \
for (unsigned i_ = 0; i_ < len; ++i_) { \
stb_p(base++, data[i_]); \
diff --git a/hw/m68k/mcf5206.c b/hw/m68k/mcf5206.c
index 183fd3c..a25e782 100644
--- a/hw/m68k/mcf5206.c
+++ b/hw/m68k/mcf5206.c
@@ -16,7 +16,7 @@
#include "hw/m68k/mcf.h"
#include "qemu/timer.h"
#include "hw/ptimer.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/sysbus.h"
/* General purpose timer module. */
@@ -582,7 +582,7 @@ static const MemoryRegionOps m5206_mbar_ops = {
.write = m5206_mbar_writefn,
.valid.min_access_size = 1,
.valid.max_access_size = 4,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_BIG_ENDIAN,
};
static void mcf5206_mbar_realize(DeviceState *dev, Error **errp)
@@ -600,13 +600,12 @@ static void mcf5206_mbar_realize(DeviceState *dev, Error **errp)
s->uart[1] = mcf_uart_create(s->pic[13], serial_hd(1));
}
-static Property mcf5206_mbar_properties[] = {
+static const Property mcf5206_mbar_properties[] = {
DEFINE_PROP_LINK("m68k-cpu", m5206_mbar_state, cpu,
TYPE_M68K_CPU, M68kCPU *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mcf5206_mbar_class_init(ObjectClass *oc, void *data)
+static void mcf5206_mbar_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -614,7 +613,7 @@ static void mcf5206_mbar_class_init(ObjectClass *oc, void *data)
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->desc = "MCF5206 system integration module";
dc->realize = mcf5206_mbar_realize;
- dc->reset = m5206_mbar_reset;
+ device_class_set_legacy_reset(dc, m5206_mbar_reset);
}
static const TypeInfo mcf5206_mbar_info = {
diff --git a/hw/m68k/mcf5208.c b/hw/m68k/mcf5208.c
index ec14096..75cc076 100644
--- a/hw/m68k/mcf5208.c
+++ b/hw/m68k/mcf5208.c
@@ -4,6 +4,14 @@
* Copyright (c) 2007 CodeSourcery.
*
* This code is licensed under the GPL
+ *
+ * This file models both the MCF5208 SoC, and the
+ * MCF5208EVB evaluation board. For details see
+ *
+ * "MCF5208 Reference Manual"
+ * https://www.nxp.com/docs/en/reference-manual/MCF5208RM.pdf
+ * "M5208EVB-RevB 32-bit Microcontroller User Manual"
+ * https://www.nxp.com/docs/en/reference-manual/M5208EVBUM.pdf
*/
#include "qemu/osdep.h"
@@ -18,8 +26,8 @@
#include "hw/m68k/mcf_fec.h"
#include "qemu/timer.h"
#include "hw/ptimer.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/qtest.h"
+#include "system/system.h"
+#include "system/qtest.h"
#include "net/net.h"
#include "hw/boards.h"
#include "hw/loader.h"
@@ -147,7 +155,7 @@ static uint64_t m5208_timer_read(void *opaque, hwaddr addr,
static const MemoryRegionOps m5208_timer_ops = {
.read = m5208_timer_read,
.write = m5208_timer_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_BIG_ENDIAN,
};
static uint64_t m5208_sys_read(void *opaque, hwaddr addr,
@@ -158,7 +166,7 @@ static uint64_t m5208_sys_read(void *opaque, hwaddr addr,
{
int n;
for (n = 0; n < 32; n++) {
- if (current_machine->ram_size < (2u << n)) {
+ if (current_machine->ram_size < (2ULL << n)) {
break;
}
}
@@ -184,7 +192,7 @@ static void m5208_sys_write(void *opaque, hwaddr addr,
static const MemoryRegionOps m5208_sys_ops = {
.read = m5208_sys_read,
.write = m5208_sys_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_BIG_ENDIAN,
};
static uint64_t m5208_rcm_read(void *opaque, hwaddr addr,
@@ -216,7 +224,7 @@ static void m5208_rcm_write(void *opaque, hwaddr addr,
static const MemoryRegionOps m5208_rcm_ops = {
.read = m5208_rcm_read,
.write = m5208_rcm_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_BIG_ENDIAN,
};
static void mcf5208_sys_init(MemoryRegion *address_space, qemu_irq *pic,
@@ -351,7 +359,7 @@ static void mcf5208evb_init(MachineState *machine)
/* Initial PC is always at offset 4 in firmware binaries */
ptr = rom_ptr(0x4, 4);
assert(ptr != NULL);
- env->pc = ldl_p(ptr);
+ env->pc = ldl_be_p(ptr);
}
/* Load kernel. */
@@ -364,7 +372,7 @@ static void mcf5208evb_init(MachineState *machine)
}
kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry,
- NULL, NULL, NULL, 1, EM_68K, 0, 0);
+ NULL, NULL, NULL, ELFDATA2MSB, EM_68K, 0, 0);
entry = elf_entry;
if (kernel_size < 0) {
kernel_size = load_uimage(kernel_filename, &entry, NULL, NULL,
diff --git a/hw/m68k/mcf_intc.c b/hw/m68k/mcf_intc.c
index 1d3b34e..e3055b8 100644
--- a/hw/m68k/mcf_intc.c
+++ b/hw/m68k/mcf_intc.c
@@ -166,7 +166,7 @@ static void mcf_intc_reset(DeviceState *dev)
static const MemoryRegionOps mcf_intc_ops = {
.read = mcf_intc_read,
.write = mcf_intc_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_BIG_ENDIAN,
};
static void mcf_intc_instance_init(Object *obj)
@@ -177,19 +177,18 @@ static void mcf_intc_instance_init(Object *obj)
sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem);
}
-static Property mcf_intc_properties[] = {
+static const Property mcf_intc_properties[] = {
DEFINE_PROP_LINK("m68k-cpu", mcf_intc_state, cpu,
TYPE_M68K_CPU, M68kCPU *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mcf_intc_class_init(ObjectClass *oc, void *data)
+static void mcf_intc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
device_class_set_props(dc, mcf_intc_properties);
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
- dc->reset = mcf_intc_reset;
+ device_class_set_legacy_reset(dc, mcf_intc_reset);
}
static const TypeInfo mcf_intc_gate_info = {
diff --git a/hw/m68k/next-cube.c b/hw/m68k/next-cube.c
index 9f6f90d..957644b 100644
--- a/hw/m68k/next-cube.c
+++ b/hw/m68k/next-cube.c
@@ -2,6 +2,7 @@
* NeXT Cube System Driver
*
* Copyright (c) 2011 Bryce Lanham
+ * Copyright (c) 2024 Mark Cave-Ayland
*
* This code is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
@@ -11,8 +12,9 @@
#include "qemu/osdep.h"
#include "exec/hwaddr.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/qtest.h"
+#include "exec/cpu-interrupt.h"
+#include "system/system.h"
+#include "system/qtest.h"
#include "hw/irq.h"
#include "hw/m68k/next-cube.h"
#include "hw/boards.h"
@@ -22,6 +24,7 @@
#include "qom/object.h"
#include "hw/char/escc.h" /* ZILOG 8530 Serial Emulation */
#include "hw/block/fdc.h"
+#include "hw/misc/empty_slot.h"
#include "hw/qdev-properties.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
@@ -37,31 +40,17 @@
#define DPRINTF(fmt, ...) do { } while (0)
#endif
-#define TYPE_NEXT_MACHINE MACHINE_TYPE_NAME("next-cube")
-OBJECT_DECLARE_SIMPLE_TYPE(NeXTState, NEXT_MACHINE)
-
#define ENTRY 0x0100001e
#define RAM_SIZE 0x4000000
#define ROM_FILE "Rev_2.5_v66.bin"
-typedef struct next_dma {
- uint32_t csr;
- uint32_t saved_next;
- uint32_t saved_limit;
- uint32_t saved_start;
- uint32_t saved_stop;
+#define TYPE_NEXT_RTC "next-rtc"
+OBJECT_DECLARE_SIMPLE_TYPE(NeXTRTC, NEXT_RTC)
- uint32_t next;
- uint32_t limit;
- uint32_t start;
- uint32_t stop;
-
- uint32_t next_initbuf;
- uint32_t size;
-} next_dma;
+struct NeXTRTC {
+ SysBusDevice parent_obj;
-typedef struct NextRtc {
int8_t phase;
uint8_t ram[32];
uint8_t command;
@@ -69,18 +58,25 @@ typedef struct NextRtc {
uint8_t status;
uint8_t control;
uint8_t retval;
-} NextRtc;
-struct NeXTState {
- MachineState parent;
+ qemu_irq data_out_irq;
+ qemu_irq power_irq;
+};
- MemoryRegion rom;
- MemoryRegion rom2;
- MemoryRegion dmamem;
- MemoryRegion bmapm1;
- MemoryRegion bmapm2;
+#define TYPE_NEXT_SCSI "next-scsi"
+OBJECT_DECLARE_SIMPLE_TYPE(NeXTSCSI, NEXT_SCSI)
- next_dma dma[10];
+/* NeXT SCSI Controller */
+struct NeXTSCSI {
+ SysBusDevice parent_obj;
+
+ MemoryRegion scsi_mem;
+
+ SysBusESPState sysbus_esp;
+
+ MemoryRegion scsi_csr_mem;
+ uint8_t scsi_csr_1;
+ uint8_t scsi_csr_2;
};
#define TYPE_NEXT_PC "next-pc"
@@ -92,6 +88,9 @@ struct NeXTPC {
M68kCPU *cpu;
+ MemoryRegion floppy_mem;
+ MemoryRegion timer_mem;
+ MemoryRegion dummyen_mem;
MemoryRegion mmiomem;
MemoryRegion scrmem;
@@ -101,13 +100,49 @@ struct NeXTPC {
uint32_t int_mask;
uint32_t int_status;
uint32_t led;
- uint8_t scsi_csr_1;
- uint8_t scsi_csr_2;
+
+ NeXTSCSI next_scsi;
qemu_irq scsi_reset;
qemu_irq scsi_dma;
- NextRtc rtc;
+ ESCCState escc;
+
+ NeXTRTC rtc;
+ qemu_irq rtc_data_irq;
+ qemu_irq rtc_cmd_reset_irq;
+};
+
+typedef struct next_dma {
+ uint32_t csr;
+
+ uint32_t saved_next;
+ uint32_t saved_limit;
+ uint32_t saved_start;
+ uint32_t saved_stop;
+
+ uint32_t next;
+ uint32_t limit;
+ uint32_t start;
+ uint32_t stop;
+
+ uint32_t next_initbuf;
+ uint32_t size;
+} next_dma;
+
+#define TYPE_NEXT_MACHINE MACHINE_TYPE_NAME("next-cube")
+OBJECT_DECLARE_SIMPLE_TYPE(NeXTState, NEXT_MACHINE)
+
+struct NeXTState {
+ MachineState parent;
+
+ MemoryRegion rom;
+ MemoryRegion rom2;
+ MemoryRegion dmamem;
+ MemoryRegion bmapm1;
+ MemoryRegion bmapm2;
+
+ next_dma dma[10];
};
/* Thanks to NeXT forums for this */
@@ -144,120 +179,26 @@ static void next_scr2_led_update(NeXTPC *s)
static void next_scr2_rtc_update(NeXTPC *s)
{
- uint8_t old_scr2, scr2_2;
- NextRtc *rtc = &s->rtc;
+ uint8_t old_scr2_rtc, scr2_rtc;
- old_scr2 = extract32(s->old_scr2, 8, 8);
- scr2_2 = extract32(s->scr2, 8, 8);
+ old_scr2_rtc = extract32(s->old_scr2, 8, 8);
+ scr2_rtc = extract32(s->scr2, 8, 8);
- if (scr2_2 & 0x1) {
+ if (scr2_rtc & 0x1) {
/* DPRINTF("RTC %x phase %i\n", scr2_2, rtc->phase); */
- if (rtc->phase == -1) {
- rtc->phase = 0;
- }
/* If we are in going down clock... do something */
- if (((old_scr2 & SCR2_RTCLK) != (scr2_2 & SCR2_RTCLK)) &&
- ((scr2_2 & SCR2_RTCLK) == 0)) {
- if (rtc->phase < 8) {
- rtc->command = (rtc->command << 1) |
- ((scr2_2 & SCR2_RTDATA) ? 1 : 0);
- }
- if (rtc->phase >= 8 && rtc->phase < 16) {
- rtc->value = (rtc->value << 1) |
- ((scr2_2 & SCR2_RTDATA) ? 1 : 0);
-
- /* if we read RAM register, output RT_DATA bit */
- if (rtc->command <= 0x1F) {
- scr2_2 = scr2_2 & (~SCR2_RTDATA);
- if (rtc->ram[rtc->command] & (0x80 >> (rtc->phase - 8))) {
- scr2_2 |= SCR2_RTDATA;
- }
-
- rtc->retval = (rtc->retval << 1) |
- ((scr2_2 & SCR2_RTDATA) ? 1 : 0);
- }
- /* read the status 0x30 */
- if (rtc->command == 0x30) {
- scr2_2 = scr2_2 & (~SCR2_RTDATA);
- /* for now status = 0x98 (new rtc + FTU) */
- if (rtc->status & (0x80 >> (rtc->phase - 8))) {
- scr2_2 |= SCR2_RTDATA;
- }
-
- rtc->retval = (rtc->retval << 1) |
- ((scr2_2 & SCR2_RTDATA) ? 1 : 0);
- }
- /* read the status 0x31 */
- if (rtc->command == 0x31) {
- scr2_2 = scr2_2 & (~SCR2_RTDATA);
- if (rtc->control & (0x80 >> (rtc->phase - 8))) {
- scr2_2 |= SCR2_RTDATA;
- }
- rtc->retval = (rtc->retval << 1) |
- ((scr2_2 & SCR2_RTDATA) ? 1 : 0);
- }
-
- if ((rtc->command >= 0x20) && (rtc->command <= 0x2F)) {
- scr2_2 = scr2_2 & (~SCR2_RTDATA);
- /* for now 0x00 */
- time_t time_h = time(NULL);
- struct tm *info = localtime(&time_h);
- int ret = 0;
-
- switch (rtc->command) {
- case 0x20:
- ret = SCR2_TOBCD(info->tm_sec);
- break;
- case 0x21:
- ret = SCR2_TOBCD(info->tm_min);
- break;
- case 0x22:
- ret = SCR2_TOBCD(info->tm_hour);
- break;
- case 0x24:
- ret = SCR2_TOBCD(info->tm_mday);
- break;
- case 0x25:
- ret = SCR2_TOBCD((info->tm_mon + 1));
- break;
- case 0x26:
- ret = SCR2_TOBCD((info->tm_year - 100));
- break;
-
- }
-
- if (ret & (0x80 >> (rtc->phase - 8))) {
- scr2_2 |= SCR2_RTDATA;
- }
- rtc->retval = (rtc->retval << 1) |
- ((scr2_2 & SCR2_RTDATA) ? 1 : 0);
- }
-
- }
-
- rtc->phase++;
- if (rtc->phase == 16) {
- if (rtc->command >= 0x80 && rtc->command <= 0x9F) {
- rtc->ram[rtc->command - 0x80] = rtc->value;
- }
- /* write to x30 register */
- if (rtc->command == 0xB1) {
- /* clear FTU */
- if (rtc->value & 0x04) {
- rtc->status = rtc->status & (~0x18);
- s->int_status = s->int_status & (~0x04);
- }
- }
+ if (((old_scr2_rtc & SCR2_RTCLK) != (scr2_rtc & SCR2_RTCLK)) &&
+ ((scr2_rtc & SCR2_RTCLK) == 0)) {
+ if (scr2_rtc & SCR2_RTDATA) {
+ qemu_irq_raise(s->rtc_data_irq);
+ } else {
+ qemu_irq_lower(s->rtc_data_irq);
}
}
} else {
/* else end or abort */
- rtc->phase = -1;
- rtc->command = 0;
- rtc->value = 0;
+ qemu_irq_raise(s->rtc_cmd_reset_irq);
}
-
- s->scr2 = deposit32(s->scr2, 8, 8, scr2_2);
}
static uint64_t next_mmio_read(void *opaque, hwaddr addr, unsigned size)
@@ -266,30 +207,26 @@ static uint64_t next_mmio_read(void *opaque, hwaddr addr, unsigned size)
uint64_t val;
switch (addr) {
- case 0x7000:
+ case 0x2000: /* 0x2007000 */
/* DPRINTF("Read INT status: %x\n", s->int_status); */
val = s->int_status;
break;
- case 0x7800:
+ case 0x2800: /* 0x2007800 */
DPRINTF("MMIO Read INT mask: %x\n", s->int_mask);
val = s->int_mask;
break;
- case 0xc000 ... 0xc003:
- val = extract32(s->scr1, (4 - (addr - 0xc000) - size) << 3,
+ case 0x7000 ... 0x7003: /* 0x200c000 */
+ val = extract32(s->scr1, (4 - (addr - 0x7000) - size) << 3,
size << 3);
break;
- case 0xd000 ... 0xd003:
- val = extract32(s->scr2, (4 - (addr - 0xd000) - size) << 3,
+ case 0x8000 ... 0x8003: /* 0x200d000 */
+ val = extract32(s->scr2, (4 - (addr - 0x8000) - size) << 3,
size << 3);
break;
- case 0x14020:
- val = 0x7f;
- break;
-
default:
val = 0;
DPRINTF("MMIO Read @ 0x%"HWADDR_PRIx" size %d\n", addr, size);
@@ -305,25 +242,25 @@ static void next_mmio_write(void *opaque, hwaddr addr, uint64_t val,
NeXTPC *s = NEXT_PC(opaque);
switch (addr) {
- case 0x7000:
+ case 0x2000: /* 0x2007000 */
DPRINTF("INT Status old: %x new: %x\n", s->int_status,
(unsigned int)val);
s->int_status = val;
break;
- case 0x7800:
+ case 0x2800: /* 0x2007800 */
DPRINTF("INT Mask old: %x new: %x\n", s->int_mask, (unsigned int)val);
s->int_mask = val;
break;
- case 0xc000 ... 0xc003:
+ case 0x7000 ... 0x7003: /* 0x200c000 */
DPRINTF("SCR1 Write: %x\n", (unsigned int)val);
- s->scr1 = deposit32(s->scr1, (4 - (addr - 0xc000) - size) << 3,
+ s->scr1 = deposit32(s->scr1, (4 - (addr - 0x7000) - size) << 3,
size << 3, val);
break;
- case 0xd000 ... 0xd003:
- s->scr2 = deposit32(s->scr2, (4 - (addr - 0xd000) - size) << 3,
+ case 0x8000 ... 0x8003: /* 0x200d000 */
+ s->scr2 = deposit32(s->scr2, (4 - (addr - 0x8000) - size) << 3,
size << 3, val);
next_scr2_led_update(s);
next_scr2_rtc_update(s);
@@ -351,143 +288,6 @@ static const MemoryRegionOps next_mmio_ops = {
#define SCSICSR_CPUDMA 0x10 /* if set, dma enabled */
#define SCSICSR_INTMASK 0x20 /* if set, interrupt enabled */
-static uint64_t next_scr_readfn(void *opaque, hwaddr addr, unsigned size)
-{
- NeXTPC *s = NEXT_PC(opaque);
- uint64_t val;
-
- switch (addr) {
- case 0x14108:
- DPRINTF("FD read @ %x\n", (unsigned int)addr);
- val = 0x40 | 0x04 | 0x2 | 0x1;
- break;
-
- case 0x14020:
- DPRINTF("SCSI 4020 STATUS READ %X\n", s->scsi_csr_1);
- val = s->scsi_csr_1;
- break;
-
- case 0x14021:
- DPRINTF("SCSI 4021 STATUS READ %X\n", s->scsi_csr_2);
- val = 0x40;
- break;
-
- /*
- * These 4 registers are the hardware timer, not sure which register
- * is the latch instead of data, but no problems so far.
- *
- * Hack: We need to have the LSB change consistently to make it work
- */
- case 0x1a000 ... 0x1a003:
- val = extract32(clock(), (4 - (addr - 0x1a000) - size) << 3,
- size << 3);
- break;
-
- /* For now return dummy byte to allow the Ethernet test to timeout */
- case 0x6000:
- val = 0xff;
- break;
-
- default:
- DPRINTF("BMAP Read @ 0x%x size %u\n", (unsigned int)addr, size);
- val = 0;
- break;
- }
-
- return val;
-}
-
-static void next_scr_writefn(void *opaque, hwaddr addr, uint64_t val,
- unsigned size)
-{
- NeXTPC *s = NEXT_PC(opaque);
-
- switch (addr) {
- case 0x14108:
- DPRINTF("FDCSR Write: %x\n", value);
- if (val == 0x0) {
- /* qemu_irq_raise(s->fd_irq[0]); */
- }
- break;
-
- case 0x14020: /* SCSI Control Register */
- if (val & SCSICSR_FIFOFL) {
- DPRINTF("SCSICSR FIFO Flush\n");
- /* will have to add another irq to the esp if this is needed */
- /* esp_puflush_fifo(esp_g); */
- }
-
- if (val & SCSICSR_ENABLE) {
- DPRINTF("SCSICSR Enable\n");
- /*
- * qemu_irq_raise(s->scsi_dma);
- * s->scsi_csr_1 = 0xc0;
- * s->scsi_csr_1 |= 0x1;
- * qemu_irq_pulse(s->scsi_dma);
- */
- }
- /*
- * else
- * s->scsi_csr_1 &= ~SCSICSR_ENABLE;
- */
-
- if (val & SCSICSR_RESET) {
- DPRINTF("SCSICSR Reset\n");
- /* I think this should set DMADIR. CPUDMA and INTMASK to 0 */
- qemu_irq_raise(s->scsi_reset);
- s->scsi_csr_1 &= ~(SCSICSR_INTMASK | 0x80 | 0x1);
- qemu_irq_lower(s->scsi_reset);
- }
- if (val & SCSICSR_DMADIR) {
- DPRINTF("SCSICSR DMAdir\n");
- }
- if (val & SCSICSR_CPUDMA) {
- DPRINTF("SCSICSR CPUDMA\n");
- /* qemu_irq_raise(s->scsi_dma); */
- s->int_status |= 0x4000000;
- } else {
- /* fprintf(stderr,"SCSICSR CPUDMA disabled\n"); */
- s->int_status &= ~(0x4000000);
- /* qemu_irq_lower(s->scsi_dma); */
- }
- if (val & SCSICSR_INTMASK) {
- DPRINTF("SCSICSR INTMASK\n");
- /*
- * int_mask &= ~0x1000;
- * s->scsi_csr_1 |= val;
- * s->scsi_csr_1 &= ~SCSICSR_INTMASK;
- * if (s->scsi_queued) {
- * s->scsi_queued = 0;
- * next_irq(s, NEXT_SCSI_I, level);
- * }
- */
- } else {
- /* int_mask |= 0x1000; */
- }
- if (val & 0x80) {
- /* int_mask |= 0x1000; */
- /* s->scsi_csr_1 |= 0x80; */
- }
- DPRINTF("SCSICSR Write: %x\n", val);
- /* s->scsi_csr_1 = val; */
- break;
-
- /* Hardware timer latch - not implemented yet */
- case 0x1a000:
- default:
- DPRINTF("BMAP Write @ 0x%x with 0x%x size %u\n", (unsigned int)addr,
- val, size);
- }
-}
-
-static const MemoryRegionOps next_scr_ops = {
- .read = next_scr_readfn,
- .write = next_scr_writefn,
- .valid.min_access_size = 1,
- .valid.max_access_size = 4,
- .endianness = DEVICE_BIG_ENDIAN,
-};
-
#define NEXTDMA_SCSI(x) (0x10 + x)
#define NEXTDMA_FD(x) (0x10 + x)
#define NEXTDMA_ENTX(x) (0x110 + x)
@@ -585,7 +385,7 @@ static void next_dma_write(void *opaque, hwaddr addr, uint64_t val,
break;
default:
- DPRINTF("DMA write @ %x w/ %x\n", (unsigned)addr, (unsigned)value);
+ DPRINTF("DMA write @ %x w/ %x\n", (unsigned)addr, (unsigned)val);
}
}
@@ -828,84 +628,579 @@ static void nextscsi_write(void *opaque, uint8_t *buf, int size)
nextdma_write(opaque, buf, size, NEXTDMA_SCSI);
}
-static void next_scsi_init(DeviceState *pcdev, M68kCPU *cpu)
+static void next_scsi_csr_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ NeXTSCSI *s = NEXT_SCSI(opaque);
+ NeXTPC *pc = NEXT_PC(container_of(s, NeXTPC, next_scsi));
+
+ switch (addr) {
+ case 0:
+ if (val & SCSICSR_FIFOFL) {
+ DPRINTF("SCSICSR FIFO Flush\n");
+ /* will have to add another irq to the esp if this is needed */
+ /* esp_puflush_fifo(esp_g); */
+ }
+
+ if (val & SCSICSR_ENABLE) {
+ DPRINTF("SCSICSR Enable\n");
+ /*
+ * qemu_irq_raise(s->scsi_dma);
+ * s->scsi_csr_1 = 0xc0;
+ * s->scsi_csr_1 |= 0x1;
+ * qemu_irq_pulse(s->scsi_dma);
+ */
+ }
+ /*
+ * else
+ * s->scsi_csr_1 &= ~SCSICSR_ENABLE;
+ */
+
+ if (val & SCSICSR_RESET) {
+ DPRINTF("SCSICSR Reset\n");
+ /* I think this should set DMADIR. CPUDMA and INTMASK to 0 */
+ qemu_irq_raise(pc->scsi_reset);
+ s->scsi_csr_1 &= ~(SCSICSR_INTMASK | 0x80 | 0x1);
+ qemu_irq_lower(pc->scsi_reset);
+ }
+ if (val & SCSICSR_DMADIR) {
+ DPRINTF("SCSICSR DMAdir\n");
+ }
+ if (val & SCSICSR_CPUDMA) {
+ DPRINTF("SCSICSR CPUDMA\n");
+ /* qemu_irq_raise(s->scsi_dma); */
+ pc->int_status |= 0x4000000;
+ } else {
+ /* fprintf(stderr,"SCSICSR CPUDMA disabled\n"); */
+ pc->int_status &= ~(0x4000000);
+ /* qemu_irq_lower(s->scsi_dma); */
+ }
+ if (val & SCSICSR_INTMASK) {
+ DPRINTF("SCSICSR INTMASK\n");
+ /*
+ * int_mask &= ~0x1000;
+ * s->scsi_csr_1 |= val;
+ * s->scsi_csr_1 &= ~SCSICSR_INTMASK;
+ * if (s->scsi_queued) {
+ * s->scsi_queued = 0;
+ * next_irq(s, NEXT_SCSI_I, level);
+ * }
+ */
+ } else {
+ /* int_mask |= 0x1000; */
+ }
+ if (val & 0x80) {
+ /* int_mask |= 0x1000; */
+ /* s->scsi_csr_1 |= 0x80; */
+ }
+ DPRINTF("SCSICSR1 Write: %"PRIx64 "\n", val);
+ s->scsi_csr_1 = val;
+ break;
+
+ case 1:
+ DPRINTF("SCSICSR2 Write: %"PRIx64 "\n", val);
+ s->scsi_csr_2 = val;
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static uint64_t next_scsi_csr_read(void *opaque, hwaddr addr, unsigned size)
+{
+ NeXTSCSI *s = NEXT_SCSI(opaque);
+ uint64_t val;
+
+ switch (addr) {
+ case 0:
+ DPRINTF("SCSI 4020 STATUS READ %X\n", s->scsi_csr_1);
+ val = s->scsi_csr_1;
+ break;
+
+ case 1:
+ DPRINTF("SCSI 4021 STATUS READ %X\n", s->scsi_csr_2);
+ val = s->scsi_csr_2;
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ return val;
+}
+
+static const MemoryRegionOps next_scsi_csr_ops = {
+ .read = next_scsi_csr_read,
+ .write = next_scsi_csr_write,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 1,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static void next_scsi_init(Object *obj)
{
- struct NeXTPC *next_pc = NEXT_PC(pcdev);
- DeviceState *dev;
- SysBusDevice *sysbusdev;
+ NeXTSCSI *s = NEXT_SCSI(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ object_initialize_child(obj, "esp", &s->sysbus_esp, TYPE_SYSBUS_ESP);
+
+ memory_region_init_io(&s->scsi_csr_mem, obj, &next_scsi_csr_ops,
+ s, "csrs", 2);
+
+ memory_region_init(&s->scsi_mem, obj, "next.scsi", 0x40);
+ sysbus_init_mmio(sbd, &s->scsi_mem);
+}
+
+static void next_scsi_realize(DeviceState *dev, Error **errp)
+{
+ NeXTSCSI *s = NEXT_SCSI(dev);
SysBusESPState *sysbus_esp;
+ SysBusDevice *sbd;
ESPState *esp;
+ NeXTPC *pcdev;
+
+ pcdev = NEXT_PC(container_of(s, NeXTPC, next_scsi));
- dev = qdev_new(TYPE_SYSBUS_ESP);
- sysbus_esp = SYSBUS_ESP(dev);
+ /* ESP */
+ sysbus_esp = SYSBUS_ESP(&s->sysbus_esp);
esp = &sysbus_esp->esp;
esp->dma_memory_read = nextscsi_read;
esp->dma_memory_write = nextscsi_write;
esp->dma_opaque = pcdev;
sysbus_esp->it_shift = 0;
esp->dma_enabled = 1;
- sysbusdev = SYS_BUS_DEVICE(dev);
- sysbus_realize_and_unref(sysbusdev, &error_fatal);
- sysbus_connect_irq(sysbusdev, 0, qdev_get_gpio_in(pcdev, NEXT_SCSI_I));
- sysbus_mmio_map(sysbusdev, 0, 0x2114000);
+ sbd = SYS_BUS_DEVICE(sysbus_esp);
+ if (!sysbus_realize(sbd, errp)) {
+ return;
+ }
+ memory_region_add_subregion(&s->scsi_mem, 0x0,
+ sysbus_mmio_get_region(sbd, 0));
- next_pc->scsi_reset = qdev_get_gpio_in(dev, 0);
- next_pc->scsi_dma = qdev_get_gpio_in(dev, 1);
+ /* SCSI CSRs */
+ memory_region_add_subregion(&s->scsi_mem, 0x20, &s->scsi_csr_mem);
- scsi_bus_legacy_handle_cmdline(&esp->bus);
+ scsi_bus_legacy_handle_cmdline(&s->sysbus_esp.esp.bus);
}
-static void next_escc_init(DeviceState *pcdev)
+static const VMStateDescription next_scsi_vmstate = {
+ .name = "next-scsi",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT8(scsi_csr_1, NeXTSCSI),
+ VMSTATE_UINT8(scsi_csr_2, NeXTSCSI),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void next_scsi_class_init(ObjectClass *klass, const void *data)
{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_new(TYPE_ESCC);
- qdev_prop_set_uint32(dev, "disabled", 0);
- qdev_prop_set_uint32(dev, "frequency", 9600 * 384);
- qdev_prop_set_uint32(dev, "it_shift", 0);
- qdev_prop_set_bit(dev, "bit_swap", true);
- qdev_prop_set_chr(dev, "chrB", serial_hd(1));
- qdev_prop_set_chr(dev, "chrA", serial_hd(0));
- qdev_prop_set_uint32(dev, "chnBtype", escc_serial);
- qdev_prop_set_uint32(dev, "chnAtype", escc_serial);
-
- s = SYS_BUS_DEVICE(dev);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_connect_irq(s, 0, qdev_get_gpio_in(pcdev, NEXT_SCC_I));
- sysbus_connect_irq(s, 1, qdev_get_gpio_in(pcdev, NEXT_SCC_DMA_I));
- sysbus_mmio_map(s, 0, 0x2118000);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "NeXT SCSI Controller";
+ dc->realize = next_scsi_realize;
+ dc->vmsd = &next_scsi_vmstate;
}
-static void next_pc_reset(DeviceState *dev)
+static const TypeInfo next_scsi_info = {
+ .name = TYPE_NEXT_SCSI,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_init = next_scsi_init,
+ .instance_size = sizeof(NeXTSCSI),
+ .class_init = next_scsi_class_init,
+};
+
+static void next_floppy_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
{
- NeXTPC *s = NEXT_PC(dev);
+ switch (addr) {
+ case 0:
+ DPRINTF("FDCSR Write: %"PRIx64 "\n", val);
+ if (val == 0x0) {
+ /* qemu_irq_raise(s->fd_irq[0]); */
+ }
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static uint64_t next_floppy_read(void *opaque, hwaddr addr, unsigned size)
+{
+ uint64_t val;
+
+ switch (addr) {
+ case 0:
+ DPRINTF("FD read @ %x\n", (unsigned int)addr);
+ val = 0x40 | 0x04 | 0x2 | 0x1;
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ return val;
+}
+
+static const MemoryRegionOps next_floppy_ops = {
+ .read = next_floppy_read,
+ .write = next_floppy_write,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static void next_timer_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ switch (addr) {
+ case 0 ... 3:
+ /* Hardware timer latch - not implemented yet */
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static uint64_t next_timer_read(void *opaque, hwaddr addr, unsigned size)
+{
+ uint64_t val;
+
+ switch (addr) {
+ case 0 ... 3:
+ /*
+ * These 4 registers are the hardware timer, not sure which register
+ * is the latch instead of data, but no problems so far.
+ *
+ * Hack: We need to have the LSB change consistently to make it work
+ */
+ val = extract32(clock(), (4 - addr - size) << 3,
+ size << 3);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ return val;
+}
+
+static const MemoryRegionOps next_timer_ops = {
+ .read = next_timer_read,
+ .write = next_timer_write,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static void next_dummy_en_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ /* Do nothing */
+}
+
+static uint64_t next_dummy_en_read(void *opaque, hwaddr addr, unsigned size)
+{
+ uint64_t val;
+
+ switch (addr) {
+ case 0:
+ /* For now return dummy byte to allow the Ethernet test to timeout */
+ val = 0xff;
+ break;
+
+ default:
+ val = 0;
+ }
+
+ return val;
+}
+
+static const MemoryRegionOps next_dummy_en_ops = {
+ .read = next_dummy_en_read,
+ .write = next_dummy_en_write,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static bool next_rtc_cmd_is_write(uint8_t cmd)
+{
+ return (cmd >= 0x80 && cmd <= 0x9f) ||
+ (cmd == 0xb1);
+}
+
+static void next_rtc_data_in_irq(void *opaque, int n, int level)
+{
+ NeXTRTC *rtc = NEXT_RTC(opaque);
+
+ if (rtc->phase < 8) {
+ rtc->command = (rtc->command << 1) | level;
+
+ if (rtc->phase == 7 && !next_rtc_cmd_is_write(rtc->command)) {
+ if (rtc->command <= 0x1f) {
+ /* RAM registers */
+ rtc->retval = rtc->ram[rtc->command];
+ }
+ if ((rtc->command >= 0x20) && (rtc->command <= 0x2f)) {
+ /* RTC */
+ time_t time_h = time(NULL);
+ struct tm *info = localtime(&time_h);
+ rtc->retval = 0;
+
+ switch (rtc->command) {
+ case 0x20:
+ rtc->retval = SCR2_TOBCD(info->tm_sec);
+ break;
+ case 0x21:
+ rtc->retval = SCR2_TOBCD(info->tm_min);
+ break;
+ case 0x22:
+ rtc->retval = SCR2_TOBCD(info->tm_hour);
+ break;
+ case 0x24:
+ rtc->retval = SCR2_TOBCD(info->tm_mday);
+ break;
+ case 0x25:
+ rtc->retval = SCR2_TOBCD((info->tm_mon + 1));
+ break;
+ case 0x26:
+ rtc->retval = SCR2_TOBCD((info->tm_year - 100));
+ break;
+ }
+ }
+ if (rtc->command == 0x30) {
+ /* read the status 0x30 */
+ rtc->retval = rtc->status;
+ }
+ if (rtc->command == 0x31) {
+ /* read the control 0x31 */
+ rtc->retval = rtc->control;
+ }
+ }
+ }
+ if (rtc->phase >= 8 && rtc->phase < 16) {
+ if (next_rtc_cmd_is_write(rtc->command)) {
+ /* Shift in value to write */
+ rtc->value = (rtc->value << 1) | level;
+ } else {
+ /* Shift out value to read */
+ if (rtc->retval & (0x80 >> (rtc->phase - 8))) {
+ qemu_irq_raise(rtc->data_out_irq);
+ } else {
+ qemu_irq_lower(rtc->data_out_irq);
+ }
+ }
+ }
+
+ rtc->phase++;
+ if (rtc->phase == 16 && next_rtc_cmd_is_write(rtc->command)) {
+ if (rtc->command >= 0x80 && rtc->command <= 0x9f) {
+ /* RAM registers */
+ rtc->ram[rtc->command - 0x80] = rtc->value;
+ }
+ if (rtc->command == 0xb1) {
+ /* write to 0x30 register */
+ if (rtc->value & 0x04) {
+ /* clear FTU */
+ rtc->status = rtc->status & (~0x18);
+ qemu_irq_lower(rtc->power_irq);
+ }
+ }
+ }
+}
+
+static void next_rtc_cmd_reset_irq(void *opaque, int n, int level)
+{
+ NeXTRTC *rtc = NEXT_RTC(opaque);
+
+ if (level) {
+ rtc->phase = 0;
+ rtc->command = 0;
+ rtc->value = 0;
+ }
+}
+
+static void next_rtc_reset_hold(Object *obj, ResetType type)
+{
+ NeXTRTC *rtc = NEXT_RTC(obj);
+
+ rtc->status = 0x90;
+
+ /* Load RTC RAM - TODO: provide possibility to load contents from file */
+ memcpy(rtc->ram, rtc_ram2, 32);
+}
+
+static void next_rtc_init(Object *obj)
+{
+ NeXTRTC *rtc = NEXT_RTC(obj);
+
+ qdev_init_gpio_in_named(DEVICE(obj), next_rtc_data_in_irq,
+ "rtc-data-in", 1);
+ qdev_init_gpio_out_named(DEVICE(obj), &rtc->data_out_irq,
+ "rtc-data-out", 1);
+ qdev_init_gpio_in_named(DEVICE(obj), next_rtc_cmd_reset_irq,
+ "rtc-cmd-reset", 1);
+ qdev_init_gpio_out_named(DEVICE(obj), &rtc->power_irq,
+ "rtc-power-out", 1);
+}
+
+static const VMStateDescription next_rtc_vmstate = {
+ .name = "next-rtc",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .fields = (const VMStateField[]) {
+ VMSTATE_INT8(phase, NeXTRTC),
+ VMSTATE_UINT8_ARRAY(ram, NeXTRTC, 32),
+ VMSTATE_UINT8(command, NeXTRTC),
+ VMSTATE_UINT8(value, NeXTRTC),
+ VMSTATE_UINT8(status, NeXTRTC),
+ VMSTATE_UINT8(control, NeXTRTC),
+ VMSTATE_UINT8(retval, NeXTRTC),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void next_rtc_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ dc->desc = "NeXT RTC";
+ dc->vmsd = &next_rtc_vmstate;
+ rc->phases.hold = next_rtc_reset_hold;
+}
+
+static const TypeInfo next_rtc_info = {
+ .name = TYPE_NEXT_RTC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_init = next_rtc_init,
+ .instance_size = sizeof(NeXTRTC),
+ .class_init = next_rtc_class_init,
+};
+
+static void next_pc_rtc_data_in_irq(void *opaque, int n, int level)
+{
+ NeXTPC *s = NEXT_PC(opaque);
+ uint8_t scr2_2 = extract32(s->scr2, 8, 8);
+
+ if (level) {
+ scr2_2 |= SCR2_RTDATA;
+ } else {
+ scr2_2 &= ~SCR2_RTDATA;
+ }
+
+ s->scr2 = deposit32(s->scr2, 8, 8, scr2_2);
+}
+
+static void next_pc_reset_hold(Object *obj, ResetType type)
+{
+ NeXTPC *s = NEXT_PC(obj);
/* Set internal registers to initial values */
/* 0x0000XX00 << vital bits */
s->scr1 = 0x00011102;
s->scr2 = 0x00ff0c80;
s->old_scr2 = s->scr2;
-
- s->rtc.status = 0x90;
-
- /* Load RTC RAM - TODO: provide possibility to load contents from file */
- memcpy(s->rtc.ram, rtc_ram2, 32);
}
static void next_pc_realize(DeviceState *dev, Error **errp)
{
NeXTPC *s = NEXT_PC(dev);
- SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ SysBusDevice *sbd;
+ DeviceState *d;
+
+ /* SCSI */
+ sbd = SYS_BUS_DEVICE(&s->next_scsi);
+ if (!sysbus_realize(sbd, errp)) {
+ return;
+ }
- qdev_init_gpio_in(dev, next_irq, NEXT_NUM_IRQS);
+ d = DEVICE(object_resolve_path_component(OBJECT(&s->next_scsi), "esp"));
+ sysbus_connect_irq(SYS_BUS_DEVICE(d), 0,
+ qdev_get_gpio_in(DEVICE(s), NEXT_SCSI_I));
+
+ s->scsi_reset = qdev_get_gpio_in(d, 0);
+ s->scsi_dma = qdev_get_gpio_in(d, 1);
+
+ /* ESCC */
+ d = DEVICE(&s->escc);
+ qdev_prop_set_uint32(d, "disabled", 0);
+ qdev_prop_set_uint32(d, "frequency", 9600 * 384);
+ qdev_prop_set_uint32(d, "it_shift", 0);
+ qdev_prop_set_bit(d, "bit_swap", true);
+ qdev_prop_set_chr(d, "chrB", serial_hd(1));
+ qdev_prop_set_chr(d, "chrA", serial_hd(0));
+ qdev_prop_set_uint32(d, "chnBtype", escc_serial);
+ qdev_prop_set_uint32(d, "chnAtype", escc_serial);
+
+ sbd = SYS_BUS_DEVICE(d);
+ if (!sysbus_realize(sbd, errp)) {
+ return;
+ }
+ sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(dev, NEXT_SCC_I));
+ sysbus_connect_irq(sbd, 1, qdev_get_gpio_in(dev, NEXT_SCC_DMA_I));
+
+ /* RTC */
+ d = DEVICE(&s->rtc);
+ if (!sysbus_realize(SYS_BUS_DEVICE(d), errp)) {
+ return;
+ }
+ /* Data from NeXTPC to RTC */
+ qdev_connect_gpio_out_named(dev, "rtc-data-out", 0,
+ qdev_get_gpio_in_named(d, "rtc-data-in", 0));
+ /* Data from RTC to NeXTPC */
+ qdev_connect_gpio_out_named(d, "rtc-data-out", 0,
+ qdev_get_gpio_in_named(dev,
+ "rtc-data-in", 0));
+ qdev_connect_gpio_out_named(dev, "rtc-cmd-reset", 0,
+ qdev_get_gpio_in_named(d, "rtc-cmd-reset", 0));
+ qdev_connect_gpio_out_named(d, "rtc-power-out", 0,
+ qdev_get_gpio_in(dev, NEXT_PWR_I));
+}
+
+static void next_pc_init(Object *obj)
+{
+ NeXTPC *s = NEXT_PC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ qdev_init_gpio_in(DEVICE(obj), next_irq, NEXT_NUM_IRQS);
memory_region_init_io(&s->mmiomem, OBJECT(s), &next_mmio_ops, s,
- "next.mmio", 0xd0000);
- memory_region_init_io(&s->scrmem, OBJECT(s), &next_scr_ops, s,
- "next.scr", 0x20000);
+ "next.mmio", 0x9000);
sysbus_init_mmio(sbd, &s->mmiomem);
- sysbus_init_mmio(sbd, &s->scrmem);
+
+ memory_region_init_io(&s->dummyen_mem, OBJECT(s), &next_dummy_en_ops, s,
+ "next.en", 0x20);
+ sysbus_init_mmio(sbd, &s->dummyen_mem);
+
+ object_initialize_child(obj, "next-scsi", &s->next_scsi, TYPE_NEXT_SCSI);
+ sysbus_init_mmio(sbd,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->next_scsi), 0));
+
+ memory_region_init_io(&s->floppy_mem, OBJECT(s), &next_floppy_ops, s,
+ "next.floppy", 4);
+ sysbus_init_mmio(sbd, &s->floppy_mem);
+
+ object_initialize_child(obj, "escc", &s->escc, TYPE_ESCC);
+ sysbus_init_mmio(sbd,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->escc), 0));
+
+ memory_region_init_io(&s->timer_mem, OBJECT(s), &next_timer_ops, s,
+ "next.timer", 4);
+ sysbus_init_mmio(sbd, &s->timer_mem);
+
+ object_initialize_child(obj, "rtc", &s->rtc, TYPE_NEXT_RTC);
+
+ qdev_init_gpio_in_named(DEVICE(obj), next_pc_rtc_data_in_irq,
+ "rtc-data-in", 1);
+ qdev_init_gpio_out_named(DEVICE(obj), &s->rtc_data_irq,
+ "rtc-data-out", 1);
+ qdev_init_gpio_out_named(DEVICE(obj), &s->rtc_cmd_reset_irq,
+ "rtc-cmd-reset", 1);
}
/*
@@ -914,31 +1209,14 @@ static void next_pc_realize(DeviceState *dev, Error **errp)
* this cpu link property and could instead provide outbound IRQ lines
* that the board could wire up to the CPU.
*/
-static Property next_pc_properties[] = {
+static const Property next_pc_properties[] = {
DEFINE_PROP_LINK("cpu", NeXTPC, cpu, TYPE_M68K_CPU, M68kCPU *),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static const VMStateDescription next_rtc_vmstate = {
- .name = "next-rtc",
- .version_id = 2,
- .minimum_version_id = 2,
- .fields = (const VMStateField[]) {
- VMSTATE_INT8(phase, NextRtc),
- VMSTATE_UINT8_ARRAY(ram, NextRtc, 32),
- VMSTATE_UINT8(command, NextRtc),
- VMSTATE_UINT8(value, NextRtc),
- VMSTATE_UINT8(status, NextRtc),
- VMSTATE_UINT8(control, NextRtc),
- VMSTATE_UINT8(retval, NextRtc),
- VMSTATE_END_OF_LIST()
- },
};
static const VMStateDescription next_pc_vmstate = {
.name = "next-pc",
- .version_id = 2,
- .minimum_version_id = 2,
+ .version_id = 4,
+ .minimum_version_id = 4,
.fields = (const VMStateField[]) {
VMSTATE_UINT32(scr1, NeXTPC),
VMSTATE_UINT32(scr2, NeXTPC),
@@ -946,27 +1224,26 @@ static const VMStateDescription next_pc_vmstate = {
VMSTATE_UINT32(int_mask, NeXTPC),
VMSTATE_UINT32(int_status, NeXTPC),
VMSTATE_UINT32(led, NeXTPC),
- VMSTATE_UINT8(scsi_csr_1, NeXTPC),
- VMSTATE_UINT8(scsi_csr_2, NeXTPC),
- VMSTATE_STRUCT(rtc, NeXTPC, 0, next_rtc_vmstate, NextRtc),
VMSTATE_END_OF_LIST()
},
};
-static void next_pc_class_init(ObjectClass *klass, void *data)
+static void next_pc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
dc->desc = "NeXT Peripheral Controller";
dc->realize = next_pc_realize;
- dc->reset = next_pc_reset;
device_class_set_props(dc, next_pc_properties);
dc->vmsd = &next_pc_vmstate;
+ rc->phases.hold = next_pc_reset_hold;
}
static const TypeInfo next_pc_info = {
.name = TYPE_NEXT_PC,
.parent = TYPE_SYS_BUS_DEVICE,
+ .instance_init = next_pc_init,
.instance_size = sizeof(NeXTPC),
.class_init = next_pc_class_init,
};
@@ -1004,11 +1281,32 @@ static void next_cube_init(MachineState *machine)
sysbus_create_simple(TYPE_NEXTFB, 0x0B000000, NULL);
/* MMIO */
- sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 0, 0x02000000);
+ sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 0, 0x02005000);
/* BMAP IO - acts as a catch-all for now */
sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 1, 0x02100000);
+ /* en network (dummy) */
+ sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 1, 0x02106000);
+
+ /* unknown: Brightness control register? */
+ empty_slot_init("next.unknown.0", 0x02110000, 0x10);
+ /* unknown: Magneto-Optical drive controller? */
+ empty_slot_init("next.unknown.1", 0x02112000, 0x10);
+
+ /* SCSI */
+ sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 2, 0x02114000);
+ /* Floppy */
+ sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 3, 0x02114108);
+ /* ESCC */
+ sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 4, 0x02118000);
+
+ /* unknown: Serial clock configuration register? */
+ empty_slot_init("next.unknown.2", 0x02118004, 0x10);
+
+ /* Timer */
+ sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 5, 0x0211a000);
+
/* BMAP memory */
memory_region_init_ram_flags_nomigrate(&m->bmapm1, NULL, "next.bmapmem",
64, RAM_SHARED, &error_fatal);
@@ -1036,7 +1334,7 @@ static void next_cube_init(MachineState *machine)
/* Initial PC is always at offset 4 in firmware binaries */
ptr = rom_ptr(0x01000004, 4);
g_assert(ptr != NULL);
- env->pc = ldl_p(ptr);
+ env->pc = ldl_be_p(ptr);
if (env->pc >= 0x01020000) {
error_report("'%s' does not seem to be a valid firmware image.",
bios_name);
@@ -1044,21 +1342,13 @@ static void next_cube_init(MachineState *machine)
}
}
- /* Serial */
- next_escc_init(pcdev);
-
- /* TODO: */
- /* Network */
- /* SCSI */
- next_scsi_init(pcdev, cpu);
-
/* DMA */
memory_region_init_io(&m->dmamem, NULL, &next_dma_ops, machine,
"next.dma", 0x5000);
memory_region_add_subregion(sysmem, 0x02000000, &m->dmamem);
}
-static void next_machine_class_init(ObjectClass *oc, void *data)
+static void next_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -1068,6 +1358,7 @@ static void next_machine_class_init(ObjectClass *oc, void *data)
mc->default_ram_size = RAM_SIZE;
mc->default_ram_id = "next.ram";
mc->default_cpu_type = M68K_CPU_TYPE_NAME("m68040");
+ mc->no_cdrom = true;
}
static const TypeInfo next_typeinfo = {
@@ -1081,6 +1372,8 @@ static void next_register_type(void)
{
type_register_static(&next_typeinfo);
type_register_static(&next_pc_info);
+ type_register_static(&next_scsi_info);
+ type_register_static(&next_rtc_info);
}
type_init(next_register_type)
diff --git a/hw/m68k/next-kbd.c b/hw/m68k/next-kbd.c
index 0c348c1..2bec945 100644
--- a/hw/m68k/next-kbd.c
+++ b/hw/m68k/next-kbd.c
@@ -68,7 +68,6 @@ struct NextKBDState {
uint16_t shift;
};
-static void queue_code(void *opaque, int code);
/* lots of magic numbers here */
static uint32_t kbd_read_byte(void *opaque, hwaddr addr)
@@ -163,71 +162,73 @@ static const MemoryRegionOps kbd_ops = {
.write = kbd_writefn,
.valid.min_access_size = 1,
.valid.max_access_size = 4,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_BIG_ENDIAN,
};
-static void nextkbd_event(void *opaque, int ch)
-{
- /*
- * Will want to set vars for caps/num lock
- * if (ch & 0x80) -> key release
- * there's also e0 escaped scancodes that might need to be handled
- */
- queue_code(opaque, ch);
-}
-
-static const unsigned char next_keycodes[128] = {
- 0x00, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x50, 0x4F,
- 0x4E, 0x1E, 0x1F, 0x20, 0x1D, 0x1C, 0x1B, 0x00,
- 0x42, 0x43, 0x44, 0x45, 0x48, 0x47, 0x46, 0x06,
- 0x07, 0x08, 0x00, 0x00, 0x2A, 0x00, 0x39, 0x3A,
- 0x3B, 0x3C, 0x3D, 0x40, 0x3F, 0x3E, 0x2D, 0x2C,
- 0x2B, 0x26, 0x00, 0x00, 0x31, 0x32, 0x33, 0x34,
- 0x35, 0x37, 0x36, 0x2e, 0x2f, 0x30, 0x00, 0x00,
- 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+static const int qcode_to_nextkbd_keycode[] = {
+ [Q_KEY_CODE_ESC] = 0x49,
+ [Q_KEY_CODE_1] = 0x4a,
+ [Q_KEY_CODE_2] = 0x4b,
+ [Q_KEY_CODE_3] = 0x4c,
+ [Q_KEY_CODE_4] = 0x4d,
+ [Q_KEY_CODE_5] = 0x50,
+ [Q_KEY_CODE_6] = 0x4f,
+ [Q_KEY_CODE_7] = 0x4e,
+ [Q_KEY_CODE_8] = 0x1e,
+ [Q_KEY_CODE_9] = 0x1f,
+ [Q_KEY_CODE_0] = 0x20,
+ [Q_KEY_CODE_MINUS] = 0x1d,
+ [Q_KEY_CODE_EQUAL] = 0x1c,
+ [Q_KEY_CODE_BACKSPACE] = 0x1b,
+
+ [Q_KEY_CODE_Q] = 0x42,
+ [Q_KEY_CODE_W] = 0x43,
+ [Q_KEY_CODE_E] = 0x44,
+ [Q_KEY_CODE_R] = 0x45,
+ [Q_KEY_CODE_T] = 0x48,
+ [Q_KEY_CODE_Y] = 0x47,
+ [Q_KEY_CODE_U] = 0x46,
+ [Q_KEY_CODE_I] = 0x06,
+ [Q_KEY_CODE_O] = 0x07,
+ [Q_KEY_CODE_P] = 0x08,
+ [Q_KEY_CODE_RET] = 0x2a,
+ [Q_KEY_CODE_A] = 0x39,
+ [Q_KEY_CODE_S] = 0x3a,
+
+ [Q_KEY_CODE_D] = 0x3b,
+ [Q_KEY_CODE_F] = 0x3c,
+ [Q_KEY_CODE_G] = 0x3d,
+ [Q_KEY_CODE_H] = 0x40,
+ [Q_KEY_CODE_J] = 0x3f,
+ [Q_KEY_CODE_K] = 0x3e,
+ [Q_KEY_CODE_L] = 0x2d,
+ [Q_KEY_CODE_SEMICOLON] = 0x2c,
+ [Q_KEY_CODE_APOSTROPHE] = 0x2b,
+ [Q_KEY_CODE_GRAVE_ACCENT] = 0x26,
+ [Q_KEY_CODE_Z] = 0x31,
+ [Q_KEY_CODE_X] = 0x32,
+ [Q_KEY_CODE_C] = 0x33,
+ [Q_KEY_CODE_V] = 0x34,
+
+ [Q_KEY_CODE_B] = 0x35,
+ [Q_KEY_CODE_N] = 0x37,
+ [Q_KEY_CODE_M] = 0x36,
+ [Q_KEY_CODE_COMMA] = 0x2e,
+ [Q_KEY_CODE_DOT] = 0x2f,
+ [Q_KEY_CODE_SLASH] = 0x30,
+
+ [Q_KEY_CODE_SPC] = 0x38,
};
-static void queue_code(void *opaque, int code)
+static void nextkbd_put_keycode(NextKBDState *s, int keycode)
{
- NextKBDState *s = NEXTKBD(opaque);
KBDQueue *q = &s->queue;
- int key = code & KD_KEYMASK;
- int release = code & 0x80;
- static int ext;
-
- if (code == 0xE0) {
- ext = 1;
- }
-
- if (code == 0x2A || code == 0x1D || code == 0x36) {
- if (code == 0x2A) {
- s->shift = KD_LSHIFT;
- } else if (code == 0x36) {
- s->shift = KD_RSHIFT;
- ext = 0;
- } else if (code == 0x1D && !ext) {
- s->shift = KD_LCOMM;
- } else if (code == 0x1D && ext) {
- ext = 0;
- s->shift = KD_RCOMM;
- }
- return;
- } else if (code == (0x2A | 0x80) || code == (0x1D | 0x80) ||
- code == (0x36 | 0x80)) {
- s->shift = 0;
- return;
- }
if (q->count >= KBD_QUEUE_SIZE) {
return;
}
- q->data[q->wptr] = next_keycodes[key] | release;
-
+ q->data[q->wptr] = keycode;
if (++q->wptr == KBD_QUEUE_SIZE) {
q->wptr = 0;
}
@@ -241,6 +242,53 @@ static void queue_code(void *opaque, int code)
/* s->update_irq(s->update_arg, 1); */
}
+static void nextkbd_event(DeviceState *dev, QemuConsole *src, InputEvent *evt)
+{
+ NextKBDState *s = NEXTKBD(dev);
+ int qcode, keycode;
+ bool key_down = evt->u.key.data->down;
+
+ qcode = qemu_input_key_value_to_qcode(evt->u.key.data->key);
+ if (qcode >= ARRAY_SIZE(qcode_to_nextkbd_keycode)) {
+ return;
+ }
+
+ /* Shift key currently has no keycode, so handle separately */
+ if (qcode == Q_KEY_CODE_SHIFT) {
+ if (key_down) {
+ s->shift |= KD_LSHIFT;
+ } else {
+ s->shift &= ~KD_LSHIFT;
+ }
+ }
+
+ if (qcode == Q_KEY_CODE_SHIFT_R) {
+ if (key_down) {
+ s->shift |= KD_RSHIFT;
+ } else {
+ s->shift &= ~KD_RSHIFT;
+ }
+ }
+
+ keycode = qcode_to_nextkbd_keycode[qcode];
+ if (!keycode) {
+ return;
+ }
+
+ /* If key release event, create keyboard break code */
+ if (!key_down) {
+ keycode |= 0x80;
+ }
+
+ nextkbd_put_keycode(s, keycode);
+}
+
+static const QemuInputHandler nextkbd_handler = {
+ .name = "QEMU NeXT Keyboard",
+ .mask = INPUT_EVENT_MASK_KEY,
+ .event = nextkbd_event,
+};
+
static void nextkbd_reset(DeviceState *dev)
{
NextKBDState *nks = NEXTKBD(dev);
@@ -256,7 +304,7 @@ static void nextkbd_realize(DeviceState *dev, Error **errp)
memory_region_init_io(&s->mr, OBJECT(dev), &kbd_ops, s, "next.kbd", 0x1000);
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mr);
- qemu_add_kbd_event_handler(nextkbd_event, s);
+ qemu_input_handler_register(dev, &nextkbd_handler);
}
static const VMStateDescription nextkbd_vmstate = {
@@ -264,14 +312,14 @@ static const VMStateDescription nextkbd_vmstate = {
.unmigratable = 1, /* TODO: Implement this when m68k CPU is migratable */
};
-static void nextkbd_class_init(ObjectClass *oc, void *data)
+static void nextkbd_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
dc->vmsd = &nextkbd_vmstate;
dc->realize = nextkbd_realize;
- dc->reset = nextkbd_reset;
+ device_class_set_legacy_reset(dc, nextkbd_reset);
}
static const TypeInfo nextkbd_info = {
diff --git a/hw/m68k/q800-glue.c b/hw/m68k/q800-glue.c
index e2ae7c3..36de67c 100644
--- a/hw/m68k/q800-glue.c
+++ b/hw/m68k/q800-glue.c
@@ -203,9 +203,8 @@ static const VMStateDescription vmstate_glue = {
* this cpu link property and could instead provide outbound IRQ lines
* that the board could wire up to the CPU.
*/
-static Property glue_properties[] = {
+static const Property glue_properties[] = {
DEFINE_PROP_LINK("cpu", GLUEState, cpu, TYPE_M68K_CPU, M68kCPU *),
- DEFINE_PROP_END_OF_LIST(),
};
static void glue_finalize(Object *obj)
@@ -229,7 +228,7 @@ static void glue_init(Object *obj)
s->nmi_release = timer_new_ms(QEMU_CLOCK_VIRTUAL, glue_nmi_release, s);
}
-static void glue_class_init(ObjectClass *klass, void *data)
+static void glue_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -249,7 +248,7 @@ static const TypeInfo glue_info_types[] = {
.instance_init = glue_init,
.instance_finalize = glue_finalize,
.class_init = glue_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_NMI },
{ }
},
diff --git a/hw/m68k/q800.c b/hw/m68k/q800.c
index fa7683b..793b23f 100644
--- a/hw/m68k/q800.c
+++ b/hw/m68k/q800.c
@@ -24,7 +24,8 @@
#include "qemu/units.h"
#include "qemu/datadir.h"
#include "qemu/guest-random.h"
-#include "sysemu/sysemu.h"
+#include "exec/target_page.h"
+#include "system/system.h"
#include "cpu.h"
#include "hw/boards.h"
#include "hw/or-irq.h"
@@ -51,9 +52,9 @@
#include "net/util.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "sysemu/qtest.h"
-#include "sysemu/runstate.h"
-#include "sysemu/reset.h"
+#include "system/qtest.h"
+#include "system/runstate.h"
+#include "system/reset.h"
#include "migration/vmstate.h"
#define MACROM_ADDR 0x40800000
@@ -210,7 +211,6 @@ static uint64_t machine_id_read(void *opaque, hwaddr addr, unsigned size)
static void machine_id_write(void *opaque, hwaddr addr, uint64_t val,
unsigned size)
{
- return;
}
static const MemoryRegionOps machine_id_ops = {
@@ -231,7 +231,6 @@ static uint64_t ramio_read(void *opaque, hwaddr addr, unsigned size)
static void ramio_write(void *opaque, hwaddr addr, uint64_t val,
unsigned size)
{
- return;
}
static const MemoryRegionOps ramio_ops = {
@@ -585,7 +584,7 @@ static void q800_machine_init(MachineState *machine)
}
kernel_size = load_elf(kernel_filename, NULL, NULL, NULL,
- &elf_entry, NULL, &high, NULL, 1,
+ &elf_entry, NULL, &high, NULL, ELFDATA2MSB,
EM_68K, 0, 0);
if (kernel_size < 0) {
error_report("could not load kernel '%s'", kernel_filename);
@@ -684,9 +683,9 @@ static void q800_machine_init(MachineState *machine)
ptr = rom_ptr(MACROM_ADDR, bios_size);
assert(ptr != NULL);
- stl_phys(cs->as, 0, ldl_p(ptr)); /* reset initial SP */
+ stl_phys(cs->as, 0, ldl_be_p(ptr)); /* reset initial SP */
stl_phys(cs->as, 4,
- MACROM_ADDR + ldl_p(ptr + 4)); /* reset initial PC */
+ MACROM_ADDR + ldl_be_p(ptr + 4)); /* reset initial PC */
}
}
}
@@ -728,7 +727,7 @@ static GlobalProperty hw_compat_q800[] = {
};
static const size_t hw_compat_q800_len = G_N_ELEMENTS(hw_compat_q800);
-static void q800_machine_class_init(ObjectClass *oc, void *data)
+static void q800_machine_class_init(ObjectClass *oc, const void *data)
{
static const char * const valid_cpu_types[] = {
M68K_CPU_TYPE_NAME("m68040"),
diff --git a/hw/m68k/virt.c b/hw/m68k/virt.c
index cda199a..875fd00 100644
--- a/hw/m68k/virt.c
+++ b/hw/m68k/virt.c
@@ -10,7 +10,8 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qemu/guest-random.h"
-#include "sysemu/sysemu.h"
+#include "exec/target_page.h"
+#include "system/system.h"
#include "cpu.h"
#include "hw/boards.h"
#include "hw/qdev-properties.h"
@@ -24,9 +25,9 @@
#include "net/net.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "sysemu/qtest.h"
-#include "sysemu/runstate.h"
-#include "sysemu/reset.h"
+#include "system/qtest.h"
+#include "system/runstate.h"
+#include "system/reset.h"
#include "hw/intc/m68k_irqc.h"
#include "hw/misc/virt_ctrl.h"
@@ -228,7 +229,7 @@ static void virt_init(MachineState *machine)
}
kernel_size = load_elf(kernel_filename, NULL, NULL, NULL,
- &elf_entry, NULL, &high, NULL, 1,
+ &elf_entry, NULL, &high, NULL, ELFDATA2MSB,
EM_68K, 0, 0);
if (kernel_size < 0) {
error_report("could not load kernel '%s'", kernel_filename);
@@ -309,7 +310,7 @@ static void virt_init(MachineState *machine)
}
}
-static void virt_machine_class_init(ObjectClass *oc, void *data)
+static void virt_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
mc->desc = "QEMU M68K Virtual Machine";
@@ -338,7 +339,7 @@ type_init(virt_machine_register_types)
#define DEFINE_VIRT_MACHINE_IMPL(latest, ...) \
static void MACHINE_VER_SYM(class_init, virt, __VA_ARGS__)( \
ObjectClass *oc, \
- void *data) \
+ const void *data) \
{ \
MachineClass *mc = MACHINE_CLASS(oc); \
MACHINE_VER_SYM(options, virt, __VA_ARGS__)(mc); \
@@ -366,10 +367,31 @@ type_init(virt_machine_register_types)
#define DEFINE_VIRT_MACHINE(major, minor) \
DEFINE_VIRT_MACHINE_IMPL(false, major, minor)
+static void virt_machine_10_1_options(MachineClass *mc)
+{
+}
+DEFINE_VIRT_MACHINE_AS_LATEST(10, 1)
+
+static void virt_machine_10_0_options(MachineClass *mc)
+{
+ virt_machine_10_1_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_10_0, hw_compat_10_0_len);
+}
+DEFINE_VIRT_MACHINE(10, 0)
+
+static void virt_machine_9_2_options(MachineClass *mc)
+{
+ virt_machine_10_0_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_9_2, hw_compat_9_2_len);
+}
+DEFINE_VIRT_MACHINE(9, 2)
+
static void virt_machine_9_1_options(MachineClass *mc)
{
+ virt_machine_9_2_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_9_1, hw_compat_9_1_len);
}
-DEFINE_VIRT_MACHINE_AS_LATEST(9, 1)
+DEFINE_VIRT_MACHINE(9, 1)
static void virt_machine_9_0_options(MachineClass *mc)
{
diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c
index 35ac598..94e7274 100644
--- a/hw/mem/cxl_type3.c
+++ b/hw/mem/cxl_type3.c
@@ -17,6 +17,7 @@
#include "hw/mem/pc-dimm.h"
#include "hw/pci/pci.h"
#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
#include "qapi/error.h"
#include "qemu/log.h"
#include "qemu/module.h"
@@ -24,11 +25,19 @@
#include "qemu/range.h"
#include "qemu/rcu.h"
#include "qemu/guest-random.h"
-#include "sysemu/hostmem.h"
-#include "sysemu/numa.h"
+#include "system/hostmem.h"
+#include "system/numa.h"
#include "hw/cxl/cxl.h"
#include "hw/pci/msix.h"
+/* type3 device private */
+enum CXL_T3_MSIX_VECTOR {
+ CXL_T3_MSIX_PCIE_DOE_TABLE_ACCESS = 0,
+ CXL_T3_MSIX_EVENT_START = 2,
+ CXL_T3_MSIX_MBOX = CXL_T3_MSIX_EVENT_START + CXL_EVENT_TYPE_MAX,
+ CXL_T3_MSIX_VECTOR_NR
+};
+
#define DWORD_BYTE 4
#define CXL_CAPACITY_MULTIPLIER (256 * MiB)
@@ -737,6 +746,11 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
error_setg(errp, "volatile memdev must have backing device");
return false;
}
+ if (host_memory_backend_is_mapped(ct3d->hostvmem)) {
+ error_setg(errp, "memory backend %s can't be used multiple times.",
+ object_get_canonical_path_component(OBJECT(ct3d->hostvmem)));
+ return false;
+ }
memory_region_set_nonvolatile(vmr, false);
memory_region_set_enabled(vmr, true);
host_memory_backend_set_mapped(ct3d->hostvmem, true);
@@ -760,6 +774,11 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
error_setg(errp, "persistent memdev must have backing device");
return false;
}
+ if (host_memory_backend_is_mapped(ct3d->hostpmem)) {
+ error_setg(errp, "memory backend %s can't be used multiple times.",
+ object_get_canonical_path_component(OBJECT(ct3d->hostpmem)));
+ return false;
+ }
memory_region_set_nonvolatile(pmr, true);
memory_region_set_enabled(pmr, true);
host_memory_backend_set_mapped(ct3d->hostpmem, true);
@@ -790,6 +809,11 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
return false;
}
+ if (host_memory_backend_is_mapped(ct3d->dc.host_dc)) {
+ error_setg(errp, "memory backend %s can't be used multiple times.",
+ object_get_canonical_path_component(OBJECT(ct3d->dc.host_dc)));
+ return false;
+ }
/*
* Set DC regions as volatile for now, non-volatile support can
* be added in the future if needed.
@@ -819,6 +843,19 @@ static DOEProtocol doe_cdat_prot[] = {
{ }
};
+/* Initialize CXL device alerts with default threshold values. */
+static void init_alert_config(CXLType3Dev *ct3d)
+{
+ ct3d->alert_config = (CXLAlertConfig) {
+ .life_used_crit_alert_thresh = 75,
+ .life_used_warn_thresh = 40,
+ .over_temp_crit_alert_thresh = 35,
+ .under_temp_crit_alert_thresh = 10,
+ .over_temp_warn_thresh = 25,
+ .under_temp_warn_thresh = 20
+ };
+}
+
static void ct3_realize(PCIDevice *pci_dev, Error **errp)
{
ERRP_GUARD();
@@ -827,8 +864,8 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
ComponentRegisters *regs = &cxl_cstate->crb;
MemoryRegion *mr = &regs->component_registers;
uint8_t *pci_conf = pci_dev->config;
- unsigned short msix_num = 6;
int i, rc;
+ uint16_t count;
QTAILQ_INIT(&ct3d->error_list);
@@ -867,39 +904,63 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
&ct3d->cxl_dstate.device_registers);
/* MSI(-X) Initialization */
- rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
+ rc = msix_init_exclusive_bar(pci_dev, CXL_T3_MSIX_VECTOR_NR, 4, errp);
if (rc) {
- goto err_address_space_free;
+ goto err_free_special_ops;
}
- for (i = 0; i < msix_num; i++) {
+ for (i = 0; i < CXL_T3_MSIX_VECTOR_NR; i++) {
msix_vector_use(pci_dev, i);
}
/* DOE Initialization */
- pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
+ pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true,
+ CXL_T3_MSIX_PCIE_DOE_TABLE_ACCESS);
cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
cxl_cstate->cdat.private = ct3d;
if (!cxl_doe_cdat_init(cxl_cstate, errp)) {
- goto err_free_special_ops;
+ goto err_msix_uninit;
}
+ init_alert_config(ct3d);
pcie_cap_deverr_init(pci_dev);
/* Leave a bit of room for expansion */
- rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL);
+ rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, errp);
if (rc) {
goto err_release_cdat;
}
- cxl_event_init(&ct3d->cxl_dstate, 2);
+ cxl_event_init(&ct3d->cxl_dstate, CXL_T3_MSIX_EVENT_START);
+
+ /* Set default value for patrol scrub attributes */
+ ct3d->patrol_scrub_attrs.scrub_cycle_cap =
+ CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT |
+ CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT;
+ ct3d->patrol_scrub_attrs.scrub_cycle =
+ CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT |
+ (CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT << 8);
+ ct3d->patrol_scrub_attrs.scrub_flags = CXL_MEMDEV_PS_ENABLE_DEFAULT;
+
+ /* Set default value for DDR5 ECS read attributes */
+ ct3d->ecs_attrs.ecs_log_cap = CXL_ECS_LOG_ENTRY_TYPE_DEFAULT;
+ for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) {
+ ct3d->ecs_attrs.fru_attrs[count].ecs_cap =
+ CXL_ECS_REALTIME_REPORT_CAP_DEFAULT;
+ ct3d->ecs_attrs.fru_attrs[count].ecs_config =
+ CXL_ECS_THRESHOLD_COUNT_DEFAULT |
+ (CXL_ECS_MODE_DEFAULT << 3);
+ /* Reserved */
+ ct3d->ecs_attrs.fru_attrs[count].ecs_flags = 0;
+ }
return;
err_release_cdat:
cxl_doe_cdat_release(cxl_cstate);
+err_msix_uninit:
+ msix_uninit_exclusive_bar(pci_dev);
err_free_special_ops:
g_free(regs->special_ops);
-err_address_space_free:
if (ct3d->dc.host_dc) {
cxl_destroy_dc_regions(ct3d);
address_space_destroy(&ct3d->dc.host_dc_as);
@@ -910,7 +971,6 @@ err_address_space_free:
if (ct3d->hostvmem) {
address_space_destroy(&ct3d->hostvmem_as);
}
- return;
}
static void ct3_exit(PCIDevice *pci_dev)
@@ -921,7 +981,9 @@ static void ct3_exit(PCIDevice *pci_dev)
pcie_aer_exit(pci_dev);
cxl_doe_cdat_release(cxl_cstate);
+ msix_uninit_exclusive_bar(pci_dev);
g_free(regs->special_ops);
+ cxl_destroy_cci(&ct3d->cci);
if (ct3d->dc.host_dc) {
cxl_destroy_dc_regions(ct3d);
address_space_destroy(&ct3d->dc.host_dc_as);
@@ -1052,10 +1114,17 @@ static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
continue;
}
- *dpa = dpa_base +
- ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
- ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset)
- >> iw));
+ if (iw < 8) {
+ *dpa = dpa_base +
+ ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
+ ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset)
+ >> iw));
+ } else {
+ *dpa = dpa_base +
+ ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
+ ((((MAKE_64BIT_MASK(ig + iw, 64 - ig - iw) & hpa_offset)
+ >> (ig + iw)) / 3) << (ig + 8)));
+ }
return true;
}
@@ -1127,7 +1196,7 @@ MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
return MEMTX_ERROR;
}
- if (sanitize_running(&ct3d->cci)) {
+ if (cxl_dev_media_disabled(&ct3d->cxl_dstate)) {
qemu_guest_getrandom_nofail(data, size);
return MEMTX_OK;
}
@@ -1149,7 +1218,7 @@ MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
return MEMTX_ERROR;
}
- if (sanitize_running(&ct3d->cci)) {
+ if (cxl_dev_media_disabled(&ct3d->cxl_dstate)) {
return MEMTX_OK;
}
@@ -1162,22 +1231,28 @@ static void ct3d_reset(DeviceState *dev)
uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask;
+ pcie_cap_fill_link_ep_usp(PCI_DEVICE(dev), ct3d->width, ct3d->speed);
cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE);
- cxl_device_register_init_t3(ct3d);
+ cxl_device_register_init_t3(ct3d, CXL_T3_MSIX_MBOX);
/*
* Bring up an endpoint to target with MCTP over VDM.
* This device is emulating an MLD with single LD for now.
*/
+ if (ct3d->vdm_fm_owned_ld_mctp_cci.initialized) {
+ cxl_destroy_cci(&ct3d->vdm_fm_owned_ld_mctp_cci);
+ }
cxl_initialize_t3_fm_owned_ld_mctpcci(&ct3d->vdm_fm_owned_ld_mctp_cci,
DEVICE(ct3d), DEVICE(ct3d),
512); /* Max payload made up */
+ if (ct3d->ld0_cci.initialized) {
+ cxl_destroy_cci(&ct3d->ld0_cci);
+ }
cxl_initialize_t3_ld_cci(&ct3d->ld0_cci, DEVICE(ct3d), DEVICE(ct3d),
512); /* Max payload made up */
-
}
-static Property ct3_props[] = {
+static const Property ct3_props[] = {
DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
HostMemoryBackend *), /* for backward compatibility */
DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem,
@@ -1191,7 +1266,10 @@ static Property ct3_props[] = {
DEFINE_PROP_UINT8("num-dc-regions", CXLType3Dev, dc.num_regions, 0),
DEFINE_PROP_LINK("volatile-dc-memdev", CXLType3Dev, dc.host_dc,
TYPE_MEMORY_BACKEND, HostMemoryBackend *),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_PCIE_LINK_SPEED("x-speed", CXLType3Dev,
+ speed, PCIE_LINK_SPEED_32),
+ DEFINE_PROP_PCIE_LINK_WIDTH("x-width", CXLType3Dev,
+ width, PCIE_LINK_WIDTH_16),
};
static uint64_t get_lsa_size(CXLType3Dev *ct3d)
@@ -1304,6 +1382,12 @@ void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d)
cxl_device_get_timestamp(&ct3d->cxl_dstate);
}
+void cxl_clear_poison_list_overflowed(CXLType3Dev *ct3d)
+{
+ ct3d->poison_list_overflowed = false;
+ ct3d->poison_list_overflow_ts = 0;
+}
+
void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
Error **errp)
{
@@ -1331,28 +1415,28 @@ void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
ct3d = CXL_TYPE3(obj);
QLIST_FOREACH(p, &ct3d->poison_list, node) {
- if (((start >= p->start) && (start < p->start + p->length)) ||
- ((start + length > p->start) &&
- (start + length <= p->start + p->length))) {
+ if ((start < p->start + p->length) && (start + length > p->start)) {
error_setg(errp,
"Overlap with existing poisoned region not supported");
return;
}
}
- if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
- cxl_set_poison_list_overflowed(ct3d);
- return;
- }
-
p = g_new0(CXLPoison, 1);
p->length = length;
p->start = start;
/* Different from injected via the mbox */
p->type = CXL_POISON_TYPE_INTERNAL;
- QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
- ct3d->poison_list_cnt++;
+ if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) {
+ QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
+ ct3d->poison_list_cnt++;
+ } else {
+ if (!ct3d->poison_list_overflowed) {
+ cxl_set_poison_list_overflowed(ct3d);
+ }
+ QLIST_INSERT_HEAD(&ct3d->poison_list_bkp, p, node);
+ }
}
/* For uncorrectable errors include support for multiple header recording */
@@ -1446,8 +1530,6 @@ void qmp_cxl_inject_uncorrectable_errors(const char *path,
stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err);
pcie_aer_inject_error(PCI_DEVICE(obj), &err);
-
- return;
}
void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
@@ -1723,7 +1805,6 @@ void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags,
if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) {
cxl_event_irq_assert(ct3d);
}
- return;
}
void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
@@ -2014,11 +2095,11 @@ static void qmp_cxl_process_dynamic_capacity_prescriptive(const char *path,
stw_le_p(&dCap.host_id, hid);
/* only valid for DC_REGION_CONFIG_UPDATED event */
dCap.updated_region_id = 0;
- dCap.flags = 0;
for (i = 0; i < num_extents; i++) {
memcpy(&dCap.dynamic_capacity_extent, &extents[i],
sizeof(CXLDCExtentRaw));
+ dCap.flags = 0;
if (i < num_extents - 1) {
/* Set "More" flag */
dCap.flags |= BIT(0);
@@ -2080,7 +2161,7 @@ void qmp_cxl_release_dynamic_capacity(const char *path, uint16_t host_id,
}
}
-static void ct3_class_init(ObjectClass *oc, void *data)
+static void ct3_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
@@ -2098,7 +2179,7 @@ static void ct3_class_init(ObjectClass *oc, void *data)
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
dc->desc = "CXL Memory Device (Type 3)";
- dc->reset = ct3d_reset;
+ device_class_set_legacy_reset(dc, ct3d_reset);
device_class_set_props(dc, ct3_props);
cvc->get_lsa_size = get_lsa_size;
@@ -2113,7 +2194,7 @@ static const TypeInfo ct3d_info = {
.class_size = sizeof(struct CXLType3Class),
.class_init = ct3_class_init,
.instance_size = sizeof(CXLType3Dev),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CXL_DEVICE },
{ INTERFACE_PCIE_DEVICE },
{}
diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c
index a5f279a..1a432e9 100644
--- a/hw/mem/memory-device.c
+++ b/hw/mem/memory-device.c
@@ -16,8 +16,8 @@
#include "hw/boards.h"
#include "qemu/range.h"
#include "hw/virtio/vhost.h"
-#include "sysemu/kvm.h"
-#include "exec/address-spaces.h"
+#include "system/kvm.h"
+#include "system/address-spaces.h"
#include "trace.h"
static bool memory_device_is_empty(const MemoryDeviceState *md)
diff --git a/hw/mem/npcm7xx_mc.c b/hw/mem/npcm7xx_mc.c
index abc5af5..07fc108 100644
--- a/hw/mem/npcm7xx_mc.c
+++ b/hw/mem/npcm7xx_mc.c
@@ -65,7 +65,7 @@ static void npcm7xx_mc_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio);
}
-static void npcm7xx_mc_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_mc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c
index 1631a7d..23ab143e 100644
--- a/hw/mem/nvdimm.c
+++ b/hw/mem/nvdimm.c
@@ -30,7 +30,7 @@
#include "hw/mem/nvdimm.h"
#include "hw/qdev-properties.h"
#include "hw/mem/memory-device.h"
-#include "sysemu/hostmem.h"
+#include "system/hostmem.h"
static void nvdimm_get_label_size(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
@@ -246,12 +246,11 @@ static void nvdimm_write_label_data(NVDIMMDevice *nvdimm, const void *buf,
memory_region_set_dirty(mr, backend_offset, size);
}
-static Property nvdimm_properties[] = {
+static const Property nvdimm_properties[] = {
DEFINE_PROP_BOOL(NVDIMM_UNARMED_PROP, NVDIMMDevice, unarmed, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void nvdimm_class_init(ObjectClass *oc, void *data)
+static void nvdimm_class_init(ObjectClass *oc, const void *data)
{
PCDIMMDeviceClass *ddc = PC_DIMM_CLASS(oc);
MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc);
diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c
index 27919ca..f701d5b 100644
--- a/hw/mem/pc-dimm.c
+++ b/hw/mem/pc-dimm.c
@@ -28,8 +28,8 @@
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qemu/module.h"
-#include "sysemu/hostmem.h"
-#include "sysemu/numa.h"
+#include "system/hostmem.h"
+#include "system/numa.h"
#include "trace.h"
static int pc_dimm_get_free_slot(const int *hint, int max_slots, Error **errp);
@@ -150,14 +150,13 @@ out:
return slot;
}
-static Property pc_dimm_properties[] = {
+static const Property pc_dimm_properties[] = {
DEFINE_PROP_UINT64(PC_DIMM_ADDR_PROP, PCDIMMDevice, addr, 0),
DEFINE_PROP_UINT32(PC_DIMM_NODE_PROP, PCDIMMDevice, node, 0),
DEFINE_PROP_INT32(PC_DIMM_SLOT_PROP, PCDIMMDevice, slot,
PC_DIMM_UNASSIGNED_SLOT),
DEFINE_PROP_LINK(PC_DIMM_MEMDEV_PROP, PCDIMMDevice, hostmem,
TYPE_MEMORY_BACKEND, HostMemoryBackend *),
- DEFINE_PROP_END_OF_LIST(),
};
static void pc_dimm_get_size(Object *obj, Visitor *v, const char *name,
@@ -277,7 +276,7 @@ static void pc_dimm_md_fill_device_info(const MemoryDeviceState *md,
}
}
-static void pc_dimm_class_init(ObjectClass *oc, void *data)
+static void pc_dimm_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc);
@@ -302,7 +301,7 @@ static const TypeInfo pc_dimm_info = {
.instance_init = pc_dimm_init,
.class_init = pc_dimm_class_init,
.class_size = sizeof(PCDIMMDeviceClass),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_MEMORY_DEVICE },
{ }
},
diff --git a/hw/mem/sparse-mem.c b/hw/mem/sparse-mem.c
index 6e8f4f8..d7b00e5 100644
--- a/hw/mem/sparse-mem.c
+++ b/hw/mem/sparse-mem.c
@@ -17,7 +17,7 @@
#include "hw/sysbus.h"
#include "qapi/error.h"
#include "qemu/units.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#include "hw/mem/sparse-mem.h"
#define SPARSE_MEM(obj) OBJECT_CHECK(SparseMemState, (obj), TYPE_SPARSE_MEM)
@@ -82,7 +82,6 @@ static void sparse_mem_enter_reset(Object *obj, ResetType type)
{
SparseMemState *s = SPARSE_MEM(obj);
g_hash_table_remove_all(s->mapped);
- return;
}
static const MemoryRegionOps sparse_mem_ops = {
@@ -96,14 +95,13 @@ static const MemoryRegionOps sparse_mem_ops = {
},
};
-static Property sparse_mem_properties[] = {
+static const Property sparse_mem_properties[] = {
/* The base address of the memory */
DEFINE_PROP_UINT64("baseaddr", SparseMemState, baseaddr, 0x0),
/* The length of the sparse memory region */
DEFINE_PROP_UINT64("length", SparseMemState, length, UINT64_MAX),
/* Max amount of actual memory that can be used to back the sparse memory */
DEFINE_PROP_UINT64("maxsize", SparseMemState, maxsize, 10 * MiB),
- DEFINE_PROP_END_OF_LIST(),
};
MemoryRegion *sparse_mem_init(uint64_t addr, uint64_t length)
@@ -138,7 +136,7 @@ static void sparse_mem_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->mmio);
}
-static void sparse_mem_class_init(ObjectClass *klass, void *data)
+static void sparse_mem_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/meson.build b/hw/meson.build
index 1c6308f..791ce21 100644
--- a/hw/meson.build
+++ b/hw/meson.build
@@ -27,7 +27,6 @@ subdir('nvram')
subdir('pci')
subdir('pci-bridge')
subdir('pci-host')
-subdir('pcmcia')
subdir('rtc')
subdir('scsi')
subdir('sd')
@@ -36,10 +35,13 @@ subdir('smbios')
subdir('ssi')
subdir('timer')
subdir('tpm')
+subdir('uefi')
subdir('ufs')
subdir('usb')
subdir('vfio')
+subdir('vfio-user')
subdir('virtio')
+subdir('vmapple')
subdir('watchdog')
subdir('xen')
subdir('xenpv')
@@ -48,7 +50,6 @@ subdir('fsi')
subdir('alpha')
subdir('arm')
subdir('avr')
-subdir('cris')
subdir('hppa')
subdir('i386')
subdir('loongarch')
diff --git a/hw/microblaze/Kconfig b/hw/microblaze/Kconfig
index d78ba84..b0214b2 100644
--- a/hw/microblaze/Kconfig
+++ b/hw/microblaze/Kconfig
@@ -13,7 +13,7 @@ config PETALOGIX_ML605
default y
depends on MICROBLAZE
select PFLASH_CFI01
- select SERIAL
+ select SERIAL_MM
select SSI_M25P80
select XILINX
select XILINX_AXI
diff --git a/hw/microblaze/boot.c b/hw/microblaze/boot.c
index ed61e48..4a9c9df 100644
--- a/hw/microblaze/boot.c
+++ b/hw/microblaze/boot.c
@@ -31,8 +31,8 @@
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "qemu/guest-random.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/reset.h"
+#include "system/device_tree.h"
+#include "system/reset.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "elf.h"
@@ -114,8 +114,8 @@ static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
return addr - 0x30000000LL;
}
-void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base,
- uint32_t ramsize,
+void microblaze_load_kernel(MicroBlazeCPU *cpu, bool is_little_endian,
+ hwaddr ddr_base, uint32_t ramsize,
const char *initrd_filename,
const char *dtb_filename,
void (*machine_cpu_reset)(MicroBlazeCPU *))
@@ -130,7 +130,7 @@ void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base,
dtb_arg = current_machine->dtb;
/* default to pcbios dtb as passed by machine_init */
if (!dtb_arg && dtb_filename) {
- filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_filename);
+ filename = qemu_find_file(QEMU_FILE_TYPE_DTB, dtb_filename);
}
boot_info.machine_cpu_reset = machine_cpu_reset;
@@ -144,13 +144,15 @@ void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base,
/* Boots a kernel elf binary. */
kernel_size = load_elf(kernel_filename, NULL, NULL, NULL,
&entry, NULL, &high, NULL,
- TARGET_BIG_ENDIAN, EM_MICROBLAZE, 0, 0);
+ is_little_endian ? ELFDATA2LSB : ELFDATA2MSB,
+ EM_MICROBLAZE, 0, 0);
base32 = entry;
if (base32 == 0xc0000000) {
kernel_size = load_elf(kernel_filename, NULL,
translate_kernel_address, NULL,
&entry, NULL, NULL, NULL,
- TARGET_BIG_ENDIAN, EM_MICROBLAZE, 0, 0);
+ is_little_endian ? ELFDATA2LSB : ELFDATA2MSB,
+ EM_MICROBLAZE, 0, 0);
}
/* Always boot into physical ram. */
boot_info.bootstrap_pc = (uint32_t)entry;
diff --git a/hw/microblaze/boot.h b/hw/microblaze/boot.h
index 5a8c2f7..d179a55 100644
--- a/hw/microblaze/boot.h
+++ b/hw/microblaze/boot.h
@@ -2,8 +2,8 @@
#define MICROBLAZE_BOOT_H
-void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base,
- uint32_t ramsize,
+void microblaze_load_kernel(MicroBlazeCPU *cpu, bool is_little_endian,
+ hwaddr ddr_base, uint32_t ramsize,
const char *initrd_filename,
const char *dtb_filename,
void (*machine_cpu_reset)(MicroBlazeCPU *));
diff --git a/hw/microblaze/petalogix_ml605_mmu.c b/hw/microblaze/petalogix_ml605_mmu.c
index 0f5fabc..6e923c4 100644
--- a/hw/microblaze/petalogix_ml605_mmu.c
+++ b/hw/microblaze/petalogix_ml605_mmu.c
@@ -32,11 +32,11 @@
#include "hw/sysbus.h"
#include "net/net.h"
#include "hw/block/flash.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/boards.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/qdev-properties.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/ssi/ssi.h"
#include "boot.h"
@@ -90,7 +90,7 @@ petalogix_ml605_init(MachineState *machine)
object_property_set_int(OBJECT(cpu), "use-fpu", 1, &error_abort);
object_property_set_bool(OBJECT(cpu), "dcache-writeback", true,
&error_abort);
- object_property_set_bool(OBJECT(cpu), "endianness", true, &error_abort);
+ object_property_set_bool(OBJECT(cpu), "little-endian", true, &error_abort);
qdev_realize(DEVICE(cpu), NULL, &error_abort);
/* Attach emulated BRAM through the LMB. */
@@ -111,6 +111,7 @@ petalogix_ml605_init(MachineState *machine)
dev = qdev_new("xlnx.xps-intc");
+ qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE);
qdev_prop_set_uint32(dev, "kind-of-intr", 1 << TIMER_IRQ);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR);
@@ -126,6 +127,7 @@ petalogix_ml605_init(MachineState *machine)
/* 2 timers at irq 2 @ 100 Mhz. */
dev = qdev_new("xlnx.xps-timer");
+ qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE);
qdev_prop_set_uint32(dev, "one-timer-only", 0);
qdev_prop_set_uint32(dev, "clock-frequency", 100 * 1000000);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
@@ -173,6 +175,7 @@ petalogix_ml605_init(MachineState *machine)
SSIBus *spi;
dev = qdev_new("xlnx.xps-spi");
+ qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE);
qdev_prop_set_uint8(dev, "num-ss-bits", NUM_SPI_FLASHES);
busdev = SYS_BUS_DEVICE(dev);
sysbus_realize_and_unref(busdev, &error_fatal);
@@ -204,7 +207,7 @@ petalogix_ml605_init(MachineState *machine)
cpu->cfg.pvr_regs[5] = 0xc56be000;
cpu->cfg.pvr_regs[10] = 0x0e000000; /* virtex 6 */
- microblaze_load_kernel(cpu, MEMORY_BASEADDR, ram_size,
+ microblaze_load_kernel(cpu, true, MEMORY_BASEADDR, ram_size,
machine->initrd_filename,
BINARY_DEVICE_TREE_FILE,
NULL);
@@ -213,7 +216,7 @@ petalogix_ml605_init(MachineState *machine)
static void petalogix_ml605_machine_init(MachineClass *mc)
{
- mc->desc = "PetaLogix linux refdesign for xilinx ml605 little endian";
+ mc->desc = "PetaLogix linux refdesign for xilinx ml605 (little endian)";
mc->init = petalogix_ml605_init;
}
diff --git a/hw/microblaze/petalogix_s3adsp1800_mmu.c b/hw/microblaze/petalogix_s3adsp1800_mmu.c
index dad46bd..e8d0ddf 100644
--- a/hw/microblaze/petalogix_s3adsp1800_mmu.c
+++ b/hw/microblaze/petalogix_s3adsp1800_mmu.c
@@ -30,10 +30,10 @@
#include "hw/sysbus.h"
#include "net/net.h"
#include "hw/block/flash.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/boards.h"
#include "hw/misc/unimp.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/char/xilinx_uartlite.h"
#include "boot.h"
@@ -55,9 +55,23 @@
#define ETHLITE_IRQ 1
#define UARTLITE_IRQ 3
+#define TYPE_PETALOGIX_S3ADSP1800_MACHINE \
+ MACHINE_TYPE_NAME("petalogix-s3adsp1800")
+
+struct S3Adsp1800MachineState {
+ MachineState parent_class;
+
+ EndianMode endianness;
+};
+
+OBJECT_DECLARE_TYPE(S3Adsp1800MachineState, MachineClass,
+ PETALOGIX_S3ADSP1800_MACHINE)
+
+
static void
petalogix_s3adsp1800_init(MachineState *machine)
{
+ S3Adsp1800MachineState *psms = PETALOGIX_S3ADSP1800_MACHINE(machine);
ram_addr_t ram_size = machine->ram_size;
DeviceState *dev;
MicroBlazeCPU *cpu;
@@ -68,9 +82,12 @@ petalogix_s3adsp1800_init(MachineState *machine)
MemoryRegion *phys_ram = g_new(MemoryRegion, 1);
qemu_irq irq[32];
MemoryRegion *sysmem = get_system_memory();
+ EndianMode endianness = psms->endianness;
cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
object_property_set_str(OBJECT(cpu), "version", "7.10.d", &error_abort);
+ object_property_set_bool(OBJECT(cpu), "little-endian",
+ endianness == ENDIAN_MODE_LITTLE, &error_abort);
qdev_realize(DEVICE(cpu), NULL, &error_abort);
/* Attach emulated BRAM through the LMB. */
@@ -90,6 +107,7 @@ petalogix_s3adsp1800_init(MachineState *machine)
64 * KiB, 1, 0x89, 0x18, 0x0000, 0x0, 1);
dev = qdev_new("xlnx.xps-intc");
+ qdev_prop_set_enum(dev, "endianness", endianness);
qdev_prop_set_uint32(dev, "kind-of-intr",
1 << ETHLITE_IRQ | 1 << UARTLITE_IRQ);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
@@ -101,6 +119,7 @@ petalogix_s3adsp1800_init(MachineState *machine)
}
dev = qdev_new(TYPE_XILINX_UARTLITE);
+ qdev_prop_set_enum(dev, "endianness", endianness);
qdev_prop_set_chr(dev, "chardev", serial_hd(0));
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, UARTLITE_BASEADDR);
@@ -108,6 +127,7 @@ petalogix_s3adsp1800_init(MachineState *machine)
/* 2 timers at irq 2 @ 62 Mhz. */
dev = qdev_new("xlnx.xps-timer");
+ qdev_prop_set_enum(dev, "endianness", endianness);
qdev_prop_set_uint32(dev, "one-timer-only", 0);
qdev_prop_set_uint32(dev, "clock-frequency", 62 * 1000000);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
@@ -115,6 +135,7 @@ petalogix_s3adsp1800_init(MachineState *machine)
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ]);
dev = qdev_new("xlnx.xps-ethernetlite");
+ qdev_prop_set_enum(dev, "endianness", endianness);
qemu_configure_nic_device(dev, true, NULL);
qdev_prop_set_uint32(dev, "tx-ping-pong", 0);
qdev_prop_set_uint32(dev, "rx-ping-pong", 0);
@@ -122,19 +143,52 @@ petalogix_s3adsp1800_init(MachineState *machine)
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, ETHLITE_BASEADDR);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[ETHLITE_IRQ]);
- create_unimplemented_device("gpio", GPIO_BASEADDR, 0x10000);
+ create_unimplemented_device("xps_gpio", GPIO_BASEADDR, 0x10000);
- microblaze_load_kernel(cpu, ddr_base, ram_size,
- machine->initrd_filename,
+ microblaze_load_kernel(cpu, endianness == ENDIAN_MODE_LITTLE, ddr_base,
+ ram_size, machine->initrd_filename,
BINARY_DEVICE_TREE_FILE,
NULL);
}
-static void petalogix_s3adsp1800_machine_init(MachineClass *mc)
+static int machine_get_endianness(Object *obj, Error **errp G_GNUC_UNUSED)
+{
+ S3Adsp1800MachineState *ms = PETALOGIX_S3ADSP1800_MACHINE(obj);
+ return ms->endianness;
+}
+
+static void machine_set_endianness(Object *obj, int endianness, Error **errp)
+{
+ S3Adsp1800MachineState *ms = PETALOGIX_S3ADSP1800_MACHINE(obj);
+ ms->endianness = endianness;
+}
+
+static void petalogix_s3adsp1800_machine_class_init(ObjectClass *oc,
+ const void *data)
{
+ MachineClass *mc = MACHINE_CLASS(oc);
+ ObjectProperty *prop;
+
mc->desc = "PetaLogix linux refdesign for xilinx Spartan 3ADSP1800";
mc->init = petalogix_s3adsp1800_init;
mc->is_default = true;
+
+ prop = object_class_property_add_enum(oc, "endianness", "EndianMode",
+ &EndianMode_lookup,
+ machine_get_endianness,
+ machine_set_endianness);
+ object_property_set_default_str(prop, TARGET_BIG_ENDIAN ? "big" : "little");
+ object_class_property_set_description(oc, "endianness",
+ "Defines whether the machine runs in big or little endian mode");
}
-DEFINE_MACHINE("petalogix-s3adsp1800", petalogix_s3adsp1800_machine_init)
+static const TypeInfo petalogix_s3adsp1800_machine_types[] = {
+ {
+ .name = TYPE_PETALOGIX_S3ADSP1800_MACHINE,
+ .parent = TYPE_MACHINE,
+ .class_init = petalogix_s3adsp1800_machine_class_init,
+ .instance_size = sizeof(S3Adsp1800MachineState),
+ },
+};
+
+DEFINE_TYPES(petalogix_s3adsp1800_machine_types)
diff --git a/hw/microblaze/xlnx-zynqmp-pmu.c b/hw/microblaze/xlnx-zynqmp-pmu.c
index 1bfc964..e909802 100644
--- a/hw/microblaze/xlnx-zynqmp-pmu.c
+++ b/hw/microblaze/xlnx-zynqmp-pmu.c
@@ -17,7 +17,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/boards.h"
#include "cpu.h"
#include "boot.h"
@@ -90,7 +90,7 @@ static void xlnx_zynqmp_pmu_soc_realize(DeviceState *dev, Error **errp)
object_property_set_bool(OBJECT(&s->cpu), "use-pcmp-instr", true,
&error_abort);
object_property_set_bool(OBJECT(&s->cpu), "use-mmu", false, &error_abort);
- object_property_set_bool(OBJECT(&s->cpu), "endianness", true,
+ object_property_set_bool(OBJECT(&s->cpu), "little-endian", true,
&error_abort);
object_property_set_str(OBJECT(&s->cpu), "version", "8.40.b",
&error_abort);
@@ -121,7 +121,7 @@ static void xlnx_zynqmp_pmu_soc_realize(DeviceState *dev, Error **errp)
}
}
-static void xlnx_zynqmp_pmu_soc_class_init(ObjectClass *oc, void *data)
+static void xlnx_zynqmp_pmu_soc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -172,7 +172,7 @@ static void xlnx_zynqmp_pmu_init(MachineState *machine)
qdev_realize(DEVICE(pmu), NULL, &error_fatal);
/* Load the kernel */
- microblaze_load_kernel(&pmu->cpu, XLNX_ZYNQMP_PMU_RAM_ADDR,
+ microblaze_load_kernel(&pmu->cpu, true, XLNX_ZYNQMP_PMU_RAM_ADDR,
machine->ram_size,
machine->initrd_filename,
machine->dtb,
@@ -181,9 +181,8 @@ static void xlnx_zynqmp_pmu_init(MachineState *machine)
static void xlnx_zynqmp_pmu_machine_init(MachineClass *mc)
{
- mc->desc = "Xilinx ZynqMP PMU machine";
+ mc->desc = "Xilinx ZynqMP PMU machine (little endian)";
mc->init = xlnx_zynqmp_pmu_init;
}
DEFINE_MACHINE("xlnx-zynqmp-pmu", xlnx_zynqmp_pmu_machine_init)
-
diff --git a/hw/mips/Kconfig b/hw/mips/Kconfig
index 692bede..b09c89a 100644
--- a/hw/mips/Kconfig
+++ b/hw/mips/Kconfig
@@ -10,14 +10,14 @@ config MALTA
select MIPS_CPS
select PIIX
select PFLASH_CFI01
- select SERIAL
+ select SERIAL_MM
select SMBUS_EEPROM
config MIPSSIM
bool
default y
depends on MIPS
- select SERIAL
+ select SERIAL_MM
select MIPSNET
config JAZZ
@@ -37,7 +37,7 @@ config JAZZ
select FDC_SYSBUS
select MC146818RTC
select PCKBD
- select SERIAL
+ select SERIAL_MM
select PARALLEL
select DS1225Y
select JAZZ_LED
@@ -65,7 +65,7 @@ config LOONGSON3V
imply VIRTIO_VGA
imply QXL if SPICE
imply USB_OHCI_PCI
- select SERIAL
+ select SERIAL_MM
select GOLDFISH_RTC
select LOONGSON_IPI
select LOONGSON_LIOINTC
@@ -89,7 +89,7 @@ config MIPS_BOSTON
select MIPS_CPS
select PCI_EXPRESS_XILINX
select AHCI_ICH9
- select SERIAL
+ select SERIAL_MM
config FW_CFG_MIPS
bool
diff --git a/hw/mips/boston.c b/hw/mips/boston.c
index 1b44fb3..149a263 100644
--- a/hw/mips/boston.c
+++ b/hw/mips/boston.c
@@ -22,7 +22,7 @@
#include "elf.h"
#include "hw/boards.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/ide/pci.h"
#include "hw/ide/ahci-pci.h"
#include "hw/loader.h"
@@ -37,11 +37,11 @@
#include "qemu/guest-random.h"
#include "qemu/log.h"
#include "chardev/char.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/qtest.h"
-#include "sysemu/runstate.h"
-#include "sysemu/reset.h"
+#include "system/device_tree.h"
+#include "system/system.h"
+#include "system/qtest.h"
+#include "system/runstate.h"
+#include "system/reset.h"
#include <libfdt.h>
#include "qom/object.h"
@@ -220,7 +220,7 @@ static void boston_lcd_write(void *opaque, hwaddr addr,
static const MemoryRegionOps boston_lcd_ops = {
.read = boston_lcd_read,
.write = boston_lcd_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
};
static uint64_t boston_platreg_read(void *opaque, hwaddr addr,
@@ -299,7 +299,7 @@ static void boston_platreg_write(void *opaque, hwaddr addr,
static const MemoryRegionOps boston_platreg_ops = {
.read = boston_platreg_read,
.write = boston_platreg_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
};
static void mips_boston_instance_init(Object *obj)
@@ -358,8 +358,8 @@ static void gen_firmware(void *p, hwaddr kernel_entry, hwaddr fdt_addr)
kernel_entry);
}
-static const void *boston_fdt_filter(void *opaque, const void *fdt_orig,
- const void *match_data, hwaddr *load_addr)
+static void *boston_fdt_filter(void *opaque, const void *fdt_orig,
+ const void *match_data, hwaddr *load_addr)
{
BostonState *s = BOSTON(opaque);
MachineState *machine = s->mach;
@@ -395,7 +395,6 @@ static const void *boston_fdt_filter(void *opaque, const void *fdt_orig,
1, ram_high_sz);
fdt = g_realloc(fdt, fdt_totalsize(fdt));
- qemu_fdt_dumpdtb(fdt, fdt_sz);
s->fdt_base = *load_addr;
@@ -758,7 +757,7 @@ static void boston_mach_init(MachineState *machine)
s->uart = serial_mm_init(sys_mem, boston_memmap[BOSTON_UART].base, 2,
get_cps_irq(&s->cps, 3), 10000000,
- serial_hd(0), DEVICE_NATIVE_ENDIAN);
+ serial_hd(0), DEVICE_LITTLE_ENDIAN);
lcd = g_new(MemoryRegion, 1);
memory_region_init_io(lcd, NULL, &boston_lcd_ops, s, "boston-lcd", 0x8);
@@ -792,12 +791,12 @@ static void boston_mach_init(MachineState *machine)
kernel_size = load_elf(machine->kernel_filename, NULL,
cpu_mips_kseg0_to_phys, NULL,
&kernel_entry, NULL, &kernel_high,
- NULL, 0, EM_MIPS, 1, 0);
+ NULL, ELFDATA2LSB, EM_MIPS, 1, 0);
if (kernel_size > 0) {
int dt_size;
g_autofree const void *dtb_file_data = NULL;
- g_autofree const void *dtb_load_data = NULL;
+ void *dtb_load_data = NULL;
hwaddr dtb_paddr = QEMU_ALIGN_UP(kernel_high, 64 * KiB);
hwaddr dtb_vaddr = cpu_mips_phys_to_kseg0(NULL, dtb_paddr);
@@ -810,6 +809,12 @@ static void boston_mach_init(MachineState *machine)
dtb_load_data = boston_fdt_filter(s, dtb_file_data,
NULL, &dtb_vaddr);
+ if (!dtb_load_data) {
+ /* boston_fdt_filter() already printed the error for us */
+ exit(1);
+ }
+
+ machine->fdt = dtb_load_data;
/* Calculate real fdt size after filter */
dt_size = fdt_totalsize(dtb_load_data);
@@ -818,7 +823,8 @@ static void boston_mach_init(MachineState *machine)
rom_ptr(dtb_paddr, dt_size));
} else {
/* Try to load file as FIT */
- fit_err = load_fit(&boston_fit_loader, machine->kernel_filename, s);
+ fit_err = load_fit(&boston_fit_loader, machine->kernel_filename,
+ &machine->fdt, s);
if (fit_err) {
error_report("unable to load kernel image");
exit(1);
diff --git a/hw/mips/cps.c b/hw/mips/cps.c
index 07b73b0..2a3ba3f 100644
--- a/hw/mips/cps.c
+++ b/hw/mips/cps.c
@@ -24,8 +24,8 @@
#include "hw/mips/mips.h"
#include "hw/qdev-clock.h"
#include "hw/qdev-properties.h"
-#include "sysemu/kvm.h"
-#include "sysemu/reset.h"
+#include "system/kvm.h"
+#include "system/reset.h"
qemu_irq get_cps_irq(MIPSCPSState *s, int pin_number)
{
@@ -77,6 +77,9 @@ static void mips_cps_realize(DeviceState *dev, Error **errp)
MIPSCPU *cpu = MIPS_CPU(object_new(s->cpu_type));
CPUMIPSState *env = &cpu->env;
+ object_property_set_bool(OBJECT(cpu), "big-endian", s->cpu_is_bigendian,
+ &error_abort);
+
/* All VPs are halted on reset. Leave powering up to CPC. */
object_property_set_bool(OBJECT(cpu), "start-powered-off", true,
&error_abort);
@@ -163,14 +166,14 @@ static void mips_cps_realize(DeviceState *dev, Error **errp)
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gcr), 0));
}
-static Property mips_cps_properties[] = {
+static const Property mips_cps_properties[] = {
DEFINE_PROP_UINT32("num-vp", MIPSCPSState, num_vp, 1),
DEFINE_PROP_UINT32("num-irq", MIPSCPSState, num_irq, 256),
DEFINE_PROP_STRING("cpu-type", MIPSCPSState, cpu_type),
- DEFINE_PROP_END_OF_LIST()
+ DEFINE_PROP_BOOL("cpu-big-endian", MIPSCPSState, cpu_is_bigendian, false),
};
-static void mips_cps_class_init(ObjectClass *klass, void *data)
+static void mips_cps_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/mips/fuloong2e.c b/hw/mips/fuloong2e.c
index 6e4303b..2a8507b 100644
--- a/hw/mips/fuloong2e.c
+++ b/hw/mips/fuloong2e.c
@@ -36,10 +36,11 @@
#include "hw/qdev-properties.h"
#include "elf.h"
#include "hw/isa/vt82c686.h"
-#include "sysemu/qtest.h"
-#include "sysemu/reset.h"
-#include "sysemu/sysemu.h"
+#include "system/qtest.h"
+#include "system/reset.h"
+#include "system/system.h"
#include "qemu/error-report.h"
+#include "exec/tswap.h"
#define ENVP_PADDR 0x2000
#define ENVP_VADDR cpu_mips_phys_to_kseg0(NULL, ENVP_PADDR)
@@ -105,7 +106,7 @@ static uint64_t load_kernel(MIPSCPU *cpu)
cpu_mips_kseg0_to_phys, NULL,
&kernel_entry, NULL,
&kernel_high, NULL,
- 0, EM_MIPS, 1, 0);
+ ELFDATA2LSB, EM_MIPS, 1, 0);
if (kernel_size < 0) {
error_report("could not load kernel '%s': %s",
loaderparams.kernel_filename,
@@ -229,7 +230,7 @@ static void mips_fuloong2e_init(MachineState *machine)
clock_set_hz(cpuclk, 533080000); /* ~533 MHz */
/* init CPUs */
- cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk);
+ cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk, false);
env = &cpu->env;
qemu_register_reset(main_cpu_reset, cpu);
@@ -333,7 +334,6 @@ static void mips_fuloong2e_machine_init(MachineClass *mc)
mc->default_cpu_type = MIPS_CPU_TYPE_NAME("Loongson-2E");
mc->default_ram_size = 256 * MiB;
mc->default_ram_id = "fuloong2e.ram";
- mc->minimum_page_bits = 14;
machine_add_audiodev_property(mc);
}
diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c
index 1bc17e6..7fb0b97 100644
--- a/hw/mips/jazz.c
+++ b/hw/mips/jazz.c
@@ -28,11 +28,11 @@
#include "hw/mips/mips.h"
#include "hw/intc/i8259.h"
#include "hw/dma/i8257.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/char/parallel.h"
#include "hw/isa/isa.h"
#include "hw/block/fdc.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/boards.h"
#include "net/net.h"
#include "hw/scsi/esp.h"
@@ -44,13 +44,13 @@
#include "hw/audio/pcspk.h"
#include "hw/input/i8042.h"
#include "hw/sysbus.h"
-#include "sysemu/qtest.h"
-#include "sysemu/reset.h"
+#include "system/qtest.h"
+#include "system/reset.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/help_option.h"
#ifdef CONFIG_TCG
-#include "hw/core/tcg-cpu-ops.h"
+#include "accel/tcg/cpu-ops.h"
#endif /* CONFIG_TCG */
#include "cpu.h"
@@ -59,12 +59,6 @@ enum jazz_model_e {
JAZZ_PICA61,
};
-#if TARGET_BIG_ENDIAN
-#define BIOS_FILENAME "mips_bios.bin"
-#else
-#define BIOS_FILENAME "mipsel_bios.bin"
-#endif
-
static void main_cpu_reset(void *opaque)
{
MIPSCPU *cpu = opaque;
@@ -128,7 +122,7 @@ static void mips_jazz_init_net(IOMMUMemoryRegion *rc4030_dma_mr,
uint8_t *prom;
NICInfo *nd;
- nd = qemu_find_nic_info("dp8393x", true, "dp82932");
+ nd = qemu_find_nic_info("dp8393x", true, "dp83932");
if (!nd) {
return;
}
@@ -168,6 +162,8 @@ static void mips_jazz_init_net(IOMMUMemoryRegion *rc4030_dma_mr,
static void mips_jazz_init(MachineState *machine,
enum jazz_model_e jazz_model)
{
+ const char *bios_name = TARGET_BIG_ENDIAN ? "mips_bios.bin"
+ : "mipsel_bios.bin";
MemoryRegion *address_space = get_system_memory();
char *filename;
int bios_size, n;
@@ -212,7 +208,8 @@ static void mips_jazz_init(MachineState *machine,
* ext_clk[jazz_model].pll_mult);
/* init CPUs */
- cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk);
+ cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk,
+ TARGET_BIG_ENDIAN);
env = &cpu->env;
qemu_register_reset(main_cpu_reset, cpu);
@@ -244,7 +241,8 @@ static void mips_jazz_init(MachineState *machine,
memory_region_add_subregion(address_space, 0xfff00000LL, bios2);
/* load the BIOS image. */
- filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, machine->firmware ?: BIOS_FILENAME);
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS,
+ machine->firmware ?: bios_name);
if (filename) {
bios_size = load_image_targphys(filename, 0xfff00000LL,
MAGNUM_BIOS_SIZE);
@@ -414,7 +412,7 @@ void mips_pica61_init(MachineState *machine)
mips_jazz_init(machine, JAZZ_PICA61);
}
-static void mips_magnum_class_init(ObjectClass *oc, void *data)
+static void mips_magnum_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -431,7 +429,7 @@ static const TypeInfo mips_magnum_type = {
.class_init = mips_magnum_class_init,
};
-static void mips_pica61_class_init(ObjectClass *oc, void *data)
+static void mips_pica61_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
diff --git a/hw/mips/loongson3_bootp.c b/hw/mips/loongson3_bootp.c
index b97b819..6781266 100644
--- a/hw/mips/loongson3_bootp.c
+++ b/hw/mips/loongson3_bootp.c
@@ -21,16 +21,17 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qemu/cutils.h"
-#include "cpu.h"
-#include "hw/boards.h"
+#include "qemu/bswap.h"
+#include "exec/hwaddr.h"
#include "hw/mips/loongson3_bootp.h"
-static void init_cpu_info(void *g_cpuinfo, uint64_t cpu_freq)
+static void init_cpu_info(void *g_cpuinfo, uint32_t cpu_count,
+ uint32_t processor_id, uint64_t cpu_freq)
{
struct efi_cpuinfo_loongson *c = g_cpuinfo;
c->cputype = cpu_to_le32(Loongson_3A);
- c->processor_id = cpu_to_le32(MIPS_CPU(first_cpu)->env.CP0_PRid);
+ c->processor_id = cpu_to_le32(processor_id);
if (cpu_freq > UINT_MAX) {
c->cpu_clock_freq = cpu_to_le32(UINT_MAX);
} else {
@@ -38,8 +39,8 @@ static void init_cpu_info(void *g_cpuinfo, uint64_t cpu_freq)
}
c->cpu_startup_core_id = cpu_to_le16(0);
- c->nr_cpus = cpu_to_le32(current_machine->smp.cpus);
- c->total_node = cpu_to_le32(DIV_ROUND_UP(current_machine->smp.cpus,
+ c->nr_cpus = cpu_to_le32(cpu_count);
+ c->total_node = cpu_to_le32(DIV_ROUND_UP(cpu_count,
LOONGSON3_CORE_PER_NODE));
}
@@ -110,9 +111,10 @@ static void init_special_info(void *g_special)
}
void init_loongson_params(struct loongson_params *lp, void *p,
+ uint32_t cpu_count, uint32_t processor_id,
uint64_t cpu_freq, uint64_t ram_size)
{
- init_cpu_info(p, cpu_freq);
+ init_cpu_info(p, cpu_count, processor_id, cpu_freq);
lp->cpu_offset = cpu_to_le64((uintptr_t)p - (uintptr_t)lp);
p += ROUND_UP(sizeof(struct efi_cpuinfo_loongson), 64);
diff --git a/hw/mips/loongson3_bootp.h b/hw/mips/loongson3_bootp.h
index 9091265..9dc325a 100644
--- a/hw/mips/loongson3_bootp.h
+++ b/hw/mips/loongson3_bootp.h
@@ -233,6 +233,7 @@ enum {
extern const MemMapEntry virt_memmap[];
void init_loongson_params(struct loongson_params *lp, void *p,
+ uint32_t cpu_count, uint32_t processor_id,
uint64_t cpu_freq, uint64_t ram_size);
void init_reset_system(struct efi_reset_system_t *reset);
diff --git a/hw/mips/loongson3_virt.c b/hw/mips/loongson3_virt.c
index 4ad36f0..de6fbcc 100644
--- a/hw/mips/loongson3_virt.c
+++ b/hw/mips/loongson3_virt.c
@@ -29,7 +29,7 @@
#include "qemu/datadir.h"
#include "qapi/error.h"
#include "elf.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/intc/loongson_liointc.h"
#include "hw/mips/mips.h"
#include "hw/mips/fw_cfg.h"
@@ -45,10 +45,10 @@
#include "hw/pci-host/gpex.h"
#include "hw/usb.h"
#include "net/net.h"
-#include "sysemu/kvm.h"
-#include "sysemu/qtest.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
+#include "system/kvm.h"
+#include "system/qtest.h"
+#include "system/reset.h"
+#include "system/runstate.h"
#include "qemu/error-report.h"
#define PM_CNTL_MODE 0x10
@@ -97,6 +97,7 @@ struct LoongsonMachineState {
MemoryRegion *pio_alias;
MemoryRegion *mmio_alias;
MemoryRegion *ecam_alias;
+ MemoryRegion *core_iocsr[LOONGSON_MAX_VCPUS];
};
typedef struct LoongsonMachineState LoongsonMachineState;
@@ -143,7 +144,7 @@ static void loongson3_pm_write(void *opaque, hwaddr addr,
static const MemoryRegionOps loongson3_pm_ops = {
.read = loongson3_pm_read,
.write = loongson3_pm_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 1,
.max_access_size = 1
@@ -152,7 +153,7 @@ static const MemoryRegionOps loongson3_pm_ops = {
#define DEF_LOONGSON3_FREQ (800 * 1000 * 1000)
-static uint64_t get_cpu_freq_hz(void)
+static uint64_t get_cpu_freq_hz(const MIPSCPU *cpu)
{
#ifdef CONFIG_KVM
int ret;
@@ -163,7 +164,7 @@ static uint64_t get_cpu_freq_hz(void)
};
if (kvm_enabled()) {
- ret = kvm_vcpu_ioctl(first_cpu, KVM_GET_ONE_REG, &freq_reg);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_ONE_REG, &freq_reg);
if (ret >= 0) {
return freq * 2;
}
@@ -172,7 +173,7 @@ static uint64_t get_cpu_freq_hz(void)
return DEF_LOONGSON3_FREQ;
}
-static void init_boot_param(void)
+static void init_boot_param(unsigned cpu_count, uint32_t processor_id)
{
static void *p;
struct boot_params *bp;
@@ -183,7 +184,7 @@ static void init_boot_param(void)
bp->efi.smbios.vers = cpu_to_le16(1);
init_reset_system(&(bp->reset_system));
p += ROUND_UP(sizeof(struct boot_params), 64);
- init_loongson_params(&(bp->efi.smbios.lp), p,
+ init_loongson_params(&(bp->efi.smbios.lp), p, cpu_count, processor_id,
loaderparams.cpu_freq, loaderparams.ram_size);
rom_add_blob_fixed("params_rom", bp,
@@ -279,7 +280,7 @@ static void fw_cfg_boot_set(void *opaque, const char *boot_device,
fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]);
}
-static void fw_conf_init(unsigned long ram_size)
+static void fw_conf_init(void)
{
static const uint8_t suspend[6] = {128, 0, 0, 129, 128, 128};
FWCfgState *fw_cfg;
@@ -288,9 +289,9 @@ static void fw_conf_init(unsigned long ram_size)
fw_cfg = fw_cfg_init_mem_wide(cfg_addr, cfg_addr + 8, 8, 0, NULL);
fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)current_machine->smp.cpus);
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)current_machine->smp.max_cpus);
- fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
+ fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, loaderparams.ram_size);
fw_cfg_add_i32(fw_cfg, FW_CFG_MACHINE_VERSION, 1);
- fw_cfg_add_i64(fw_cfg, FW_CFG_CPU_FREQ, get_cpu_freq_hz());
+ fw_cfg_add_i64(fw_cfg, FW_CFG_CPU_FREQ, loaderparams.cpu_freq);
fw_cfg_add_file(fw_cfg, "etc/system-states",
g_memdup2(suspend, sizeof(suspend)), sizeof(suspend));
@@ -355,9 +356,9 @@ static uint64_t load_kernel(CPUMIPSState *env)
kernel_size = load_elf(loaderparams.kernel_filename, NULL,
cpu_mips_kseg0_to_phys, NULL,
- (uint64_t *)&kernel_entry,
- (uint64_t *)&kernel_low, (uint64_t *)&kernel_high,
- NULL, 0, EM_MIPS, 1, 0);
+ &kernel_entry,
+ &kernel_low, &kernel_high,
+ NULL, ELFDATA2LSB, EM_MIPS, 1, 0);
if (kernel_size < 0) {
error_report("could not load kernel '%s': %s",
loaderparams.kernel_filename,
@@ -398,25 +399,33 @@ static uint64_t load_kernel(CPUMIPSState *env)
return kernel_entry;
}
-static void main_cpu_reset(void *opaque)
+static void generic_cpu_reset(void *opaque)
{
MIPSCPU *cpu = opaque;
CPUMIPSState *env = &cpu->env;
cpu_reset(CPU(cpu));
- /* Loongson-3 reset stuff */
if (loaderparams.kernel_filename) {
- if (cpu == MIPS_CPU(first_cpu)) {
- env->active_tc.gpr[4] = loaderparams.a0;
- env->active_tc.gpr[5] = loaderparams.a1;
- env->active_tc.gpr[6] = loaderparams.a2;
- env->active_tc.PC = loaderparams.kernel_entry;
- }
env->CP0_Status &= ~((1 << CP0St_BEV) | (1 << CP0St_ERL));
}
}
+static void main_cpu_reset(void *opaque)
+{
+ generic_cpu_reset(opaque);
+
+ if (loaderparams.kernel_filename) {
+ MIPSCPU *cpu = opaque;
+ CPUMIPSState *env = &cpu->env;
+
+ env->active_tc.gpr[4] = loaderparams.a0;
+ env->active_tc.gpr[5] = loaderparams.a1;
+ env->active_tc.gpr[6] = loaderparams.a2;
+ env->active_tc.PC = loaderparams.kernel_entry;
+ }
+}
+
static inline void loongson3_virt_devices_init(MachineState *machine,
DeviceState *pic)
{
@@ -457,7 +466,7 @@ static inline void loongson3_virt_devices_init(MachineState *machine,
virt_memmap[VIRT_PCIE_PIO].base, s->pio_alias);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, virt_memmap[VIRT_PCIE_PIO].base);
- for (i = 0; i < GPEX_NUM_IRQS; i++) {
+ for (i = 0; i < PCI_NUM_PINS; i++) {
irq = qdev_get_gpio_in(pic, PCIE_IRQ_BASE + i);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq);
gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ_BASE + i);
@@ -483,9 +492,8 @@ static void mips_loongson3_virt_init(MachineState *machine)
{
int i;
long bios_size;
- MIPSCPU *cpu;
+ MIPSCPU *cpu = NULL;
Clock *cpuclk;
- CPUMIPSState *env;
DeviceState *liointc;
DeviceState *ipi = NULL;
char *filename;
@@ -493,6 +501,7 @@ static void mips_loongson3_virt_init(MachineState *machine)
const char *kernel_filename = machine->kernel_filename;
const char *initrd_filename = machine->initrd_filename;
ram_addr_t ram_size = machine->ram_size;
+ LoongsonMachineState *s = LOONGSON_MACHINE(machine);
MemoryRegion *address_space_mem = get_system_memory();
MemoryRegion *ram = g_new(MemoryRegion, 1);
MemoryRegion *bios = g_new(MemoryRegion, 1);
@@ -551,7 +560,7 @@ static void mips_loongson3_virt_init(MachineState *machine)
serial_mm_init(address_space_mem, virt_memmap[VIRT_UART].base, 0,
qdev_get_gpio_in(liointc, UART_IRQ), 115200, serial_hd(0),
- DEVICE_NATIVE_ENDIAN);
+ DEVICE_LITTLE_ENDIAN);
sysbus_create_simple("goldfish_rtc", virt_memmap[VIRT_RTC].base,
qdev_get_gpio_in(liointc, RTC_IRQ));
@@ -559,20 +568,20 @@ static void mips_loongson3_virt_init(MachineState *machine)
cpuclk = clock_new(OBJECT(machine), "cpu-refclk");
clock_set_hz(cpuclk, DEF_LOONGSON3_FREQ);
- for (i = 0; i < machine->smp.cpus; i++) {
+ for (i = machine->smp.cpus - 1; i >= 0; --i) {
int node = i / LOONGSON3_CORE_PER_NODE;
int core = i % LOONGSON3_CORE_PER_NODE;
int ip;
/* init CPUs */
- cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk);
+ cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk, false);
/* Init internal devices */
cpu_mips_irq_init_cpu(cpu);
cpu_mips_clock_init(cpu);
- qemu_register_reset(main_cpu_reset, cpu);
+ qemu_register_reset(i ? generic_cpu_reset : main_cpu_reset, cpu);
- if (ipi) {
+ if (!kvm_enabled()) {
hwaddr base = ((hwaddr)node << 44) + virt_memmap[VIRT_IPI].base;
base += core * 0x100;
qdev_connect_gpio_out(ipi, i, cpu->env.irq[6]);
@@ -586,6 +595,7 @@ static void mips_loongson3_virt_init(MachineState *machine)
iocsr, 0, UINT32_MAX);
memory_region_add_subregion(&MIPS_CPU(cpu)->env.iocsr.mr,
0, core_iocsr);
+ s->core_iocsr[i] = core_iocsr;
}
if (node > 0) {
@@ -598,7 +608,7 @@ static void mips_loongson3_virt_init(MachineState *machine)
pin, cpu->env.irq[ip + 2]);
}
}
- env = &MIPS_CPU(first_cpu)->env;
+ assert(cpu); /* This variable points to the first created cpu. */
/* Allocate RAM/BIOS, 0x00000000~0x10000000 is alias of 0x80000000~0x90000000 */
memory_region_init_rom(bios, NULL, "loongson3.bios",
@@ -623,16 +633,16 @@ static void mips_loongson3_virt_init(MachineState *machine)
* Please use -L to set the BIOS path and -bios to set bios name.
*/
+ loaderparams.cpu_freq = get_cpu_freq_hz(cpu);
+ loaderparams.ram_size = ram_size;
if (kernel_filename) {
- loaderparams.cpu_freq = get_cpu_freq_hz();
- loaderparams.ram_size = ram_size;
loaderparams.kernel_filename = kernel_filename;
loaderparams.kernel_cmdline = kernel_cmdline;
loaderparams.initrd_filename = initrd_filename;
- loaderparams.kernel_entry = load_kernel(env);
+ loaderparams.kernel_entry = load_kernel(&cpu->env);
init_boot_rom();
- init_boot_param();
+ init_boot_param(machine->smp.cpus, cpu->env.CP0_PRid);
} else {
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS,
machine->firmware ?: LOONGSON3_BIOSNAME);
@@ -651,13 +661,13 @@ static void mips_loongson3_virt_init(MachineState *machine)
exit(1);
}
- fw_conf_init(ram_size);
+ fw_conf_init();
}
loongson3_virt_devices_init(machine, liointc);
}
-static void loongson3v_machine_class_init(ObjectClass *oc, void *data)
+static void loongson3v_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -667,7 +677,6 @@ static void loongson3v_machine_class_init(ObjectClass *oc, void *data)
mc->max_cpus = LOONGSON_MAX_VCPUS;
mc->default_ram_id = "loongson3.highram";
mc->default_ram_size = 1600 * MiB;
- mc->minimum_page_bits = 14;
mc->default_nic = "virtio-net-pci";
}
diff --git a/hw/mips/malta.c b/hw/mips/malta.c
index 664a2ae..cbdbb21 100644
--- a/hw/mips/malta.c
+++ b/hw/mips/malta.c
@@ -28,10 +28,11 @@
#include "qemu/datadir.h"
#include "qemu/cutils.h"
#include "qemu/guest-random.h"
+#include "exec/tswap.h"
#include "hw/clock.h"
#include "hw/southbridge/piix.h"
#include "hw/isa/superio.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "net/net.h"
#include "hw/boards.h"
#include "hw/i2c/smbus_eeprom.h"
@@ -48,12 +49,12 @@
#include "qom/object.h"
#include "hw/sysbus.h" /* SysBusDevice */
#include "qemu/host-utils.h"
-#include "sysemu/qtest.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
+#include "system/qtest.h"
+#include "system/reset.h"
+#include "system/runstate.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "semihosting/semihost.h"
#include "hw/mips/cps.h"
#include "hw/qdev-clock.h"
@@ -93,12 +94,6 @@ typedef struct {
bool display_inited;
} MaltaFPGAState;
-#if TARGET_BIG_ENDIAN
-#define BIOS_FILENAME "mips_bios.bin"
-#else
-#define BIOS_FILENAME "mipsel_bios.bin"
-#endif
-
#define TYPE_MIPS_MALTA "mips-malta"
OBJECT_DECLARE_SIMPLE_TYPE(MaltaState, MIPS_MALTA)
@@ -382,11 +377,7 @@ static uint64_t malta_fpga_read(void *opaque, hwaddr addr,
/* STATUS Register */
case 0x00208:
-#if TARGET_BIG_ENDIAN
- val = 0x00000012;
-#else
- val = 0x00000010;
-#endif
+ val = TARGET_BIG_ENDIAN ? 0x00000012 : 0x00000010;
break;
/* JMPRS Register */
@@ -879,8 +870,9 @@ static uint64_t load_kernel(void)
kernel_size = load_elf(loaderparams.kernel_filename, NULL,
cpu_mips_kseg0_to_phys, NULL,
&kernel_entry, NULL,
- &kernel_high, NULL, TARGET_BIG_ENDIAN, EM_MIPS,
- 1, 0);
+ &kernel_high, NULL,
+ TARGET_BIG_ENDIAN ? ELFDATA2MSB : ELFDATA2LSB,
+ EM_MIPS, 1, 0);
if (kernel_size < 0) {
error_report("could not load kernel '%s': %s",
loaderparams.kernel_filename,
@@ -1034,7 +1026,8 @@ static void create_cpu_without_cps(MachineState *ms, MaltaState *s,
int i;
for (i = 0; i < ms->smp.cpus; i++) {
- cpu = mips_cpu_create_with_clock(ms->cpu_type, s->cpuclk);
+ cpu = mips_cpu_create_with_clock(ms->cpu_type, s->cpuclk,
+ TARGET_BIG_ENDIAN);
/* Init internal devices */
cpu_mips_irq_init_cpu(cpu);
@@ -1054,6 +1047,8 @@ static void create_cps(MachineState *ms, MaltaState *s,
object_initialize_child(OBJECT(s), "cps", &s->cps, TYPE_MIPS_CPS);
object_property_set_str(OBJECT(&s->cps), "cpu-type", ms->cpu_type,
&error_fatal);
+ object_property_set_bool(OBJECT(&s->cps), "cpu-big-endian",
+ TARGET_BIG_ENDIAN, &error_abort);
object_property_set_uint(OBJECT(&s->cps), "num-vp", ms->smp.cpus,
&error_fatal);
qdev_connect_clock_in(DEVICE(&s->cps), "clk-in", s->cpuclk);
@@ -1172,9 +1167,12 @@ void mips_malta_init(MachineState *machine)
target_long bios_size = FLASH_SIZE;
/* Load firmware from flash. */
if (!dinfo) {
+ const char *bios_name = TARGET_BIG_ENDIAN ? "mips_bios.bin"
+ : "mipsel_bios.bin";
+
/* Load a BIOS image. */
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS,
- machine->firmware ?: BIOS_FILENAME);
+ machine->firmware ?: bios_name);
if (filename) {
bios_size = load_image_targphys(filename, FLASH_ADDRESS,
BIOS_SIZE);
@@ -1192,8 +1190,7 @@ void mips_malta_init(MachineState *machine)
* In little endian mode the 32bit words in the bios are swapped,
* a neat trick which allows bi-endian firmware.
*/
-#if !TARGET_BIG_ENDIAN
- {
+ if (!TARGET_BIG_ENDIAN) {
uint32_t *end, *addr;
const size_t swapsize = MIN(bios_size, 0x3e0000);
addr = rom_ptr(FLASH_ADDRESS, swapsize);
@@ -1206,7 +1203,6 @@ void mips_malta_init(MachineState *machine)
addr++;
}
}
-#endif
}
/*
diff --git a/hw/mips/meson.build b/hw/mips/meson.build
index ca37c42..31dbd2b 100644
--- a/hw/mips/meson.build
+++ b/hw/mips/meson.build
@@ -1,7 +1,8 @@
mips_ss = ss.source_set()
mips_ss.add(files('bootloader.c', 'mips_int.c'))
-mips_ss.add(when: 'CONFIG_FW_CFG_MIPS', if_true: files('fw_cfg.c'))
-mips_ss.add(when: 'CONFIG_LOONGSON3V', if_true: files('loongson3_bootp.c', 'loongson3_virt.c'))
+common_ss.add(when: 'CONFIG_FW_CFG_MIPS', if_true: files('fw_cfg.c'))
+common_ss.add(when: 'CONFIG_LOONGSON3V', if_true: files('loongson3_bootp.c'))
+mips_ss.add(when: 'CONFIG_LOONGSON3V', if_true: files('loongson3_virt.c'))
mips_ss.add(when: 'CONFIG_MALTA', if_true: files('malta.c'))
mips_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('cps.c'))
diff --git a/hw/mips/mips_int.c b/hw/mips/mips_int.c
index eef2fd2..26fdb93 100644
--- a/hw/mips/mips_int.c
+++ b/hw/mips/mips_int.c
@@ -23,7 +23,7 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "hw/irq.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "kvm_mips.h"
static void cpu_mips_irq_request(void *opaque, int irq, int level)
diff --git a/hw/mips/mipssim.c b/hw/mips/mipssim.c
index 9170d6c..e843307 100644
--- a/hw/mips/mipssim.c
+++ b/hw/mips/mipssim.c
@@ -28,30 +28,24 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/datadir.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/clock.h"
#include "hw/mips/mips.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "net/net.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "elf.h"
#include "hw/sysbus.h"
#include "hw/qdev-properties.h"
#include "qemu/error-report.h"
-#include "sysemu/qtest.h"
-#include "sysemu/reset.h"
+#include "system/qtest.h"
+#include "system/reset.h"
#include "cpu.h"
#define BIOS_SIZE (4 * MiB)
-#if TARGET_BIG_ENDIAN
-#define BIOS_FILENAME "mips_bios.bin"
-#else
-#define BIOS_FILENAME "mipsel_bios.bin"
-#endif
-
static struct _loaderparams {
int ram_size;
const char *kernel_filename;
@@ -73,7 +67,8 @@ static uint64_t load_kernel(void)
kernel_size = load_elf(loaderparams.kernel_filename, NULL,
cpu_mips_kseg0_to_phys, NULL,
&entry, NULL,
- &kernel_high, NULL, TARGET_BIG_ENDIAN,
+ &kernel_high, NULL,
+ TARGET_BIG_ENDIAN ? ELFDATA2MSB : ELFDATA2LSB,
EM_MIPS, 1, 0);
if (kernel_size < 0) {
error_report("could not load kernel '%s': %s",
@@ -142,6 +137,8 @@ mips_mipssim_init(MachineState *machine)
const char *kernel_filename = machine->kernel_filename;
const char *kernel_cmdline = machine->kernel_cmdline;
const char *initrd_filename = machine->initrd_filename;
+ const char *bios_name = TARGET_BIG_ENDIAN ? "mips_bios.bin"
+ : "mipsel_bios.bin";
char *filename;
MemoryRegion *address_space_mem = get_system_memory();
MemoryRegion *isa = g_new(MemoryRegion, 1);
@@ -160,7 +157,8 @@ mips_mipssim_init(MachineState *machine)
#endif
/* Init CPUs. */
- cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk);
+ cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk,
+ TARGET_BIG_ENDIAN);
env = &cpu->env;
reset_info = g_new0(ResetData, 1);
@@ -177,7 +175,8 @@ mips_mipssim_init(MachineState *machine)
/* Map the BIOS / boot exception handler. */
memory_region_add_subregion(address_space_mem, 0x1fc00000LL, bios);
/* Load a BIOS / boot exception handler image. */
- filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, machine->firmware ?: BIOS_FILENAME);
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS,
+ machine->firmware ?: bios_name);
if (filename) {
bios_size = load_image_targphys(filename, 0x1fc00000LL, BIOS_SIZE);
g_free(filename);
diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig
index 1e08785..ec0fa5a 100644
--- a/hw/misc/Kconfig
+++ b/hw/misc/Kconfig
@@ -30,14 +30,6 @@ config EDU
default y if TEST_DEVICES
depends on PCI && MSI_NONBROKEN
-config PCA9552
- bool
- depends on I2C
-
-config PCA9554
- bool
- depends on I2C
-
config I2C_ECHO
bool
default y if TEST_DEVICES
@@ -72,9 +64,13 @@ config IVSHMEM_DEVICE
default y if PCI_DEVICES
depends on PCI && LINUX && IVSHMEM && MSI_NONBROKEN
+config IVSHMEM_FLAT_DEVICE
+ bool
+ default y
+ depends on LINUX && IVSHMEM
+
config ECCMEMCTL
bool
- select ECC
config IMX
bool
@@ -82,6 +78,15 @@ config IMX
select SSI
select USB_EHCI_SYSBUS
+config FSL_IMX8MP_ANALOG
+ bool
+
+config FSL_IMX8MP_CCM
+ bool
+
+config STM32_RCC
+ bool
+
config STM32F2XX_SYSCFG
bool
@@ -143,6 +148,10 @@ config PVPANIC_ISA
depends on ISA_BUS
select PVPANIC_COMMON
+config PVPANIC_MMIO
+ bool
+ select PVPANIC_COMMON
+
config AUX
bool
select I2C
diff --git a/hw/misc/a9scu.c b/hw/misc/a9scu.c
index 04225df..bb00ae2 100644
--- a/hw/misc/a9scu.c
+++ b/hw/misc/a9scu.c
@@ -123,18 +123,17 @@ static const VMStateDescription vmstate_a9_scu = {
}
};
-static Property a9_scu_properties[] = {
+static const Property a9_scu_properties[] = {
DEFINE_PROP_UINT32("num-cpu", A9SCUState, num_cpu, 1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void a9_scu_class_init(ObjectClass *klass, void *data)
+static void a9_scu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, a9_scu_properties);
dc->vmsd = &vmstate_a9_scu;
- dc->reset = a9_scu_reset;
+ device_class_set_legacy_reset(dc, a9_scu_reset);
dc->realize = a9_scu_realize;
}
diff --git a/hw/misc/allwinner-a10-ccm.c b/hw/misc/allwinner-a10-ccm.c
index 575b018..6b188c2 100644
--- a/hw/misc/allwinner-a10-ccm.c
+++ b/hw/misc/allwinner-a10-ccm.c
@@ -147,7 +147,7 @@ static void allwinner_a10_ccm_write(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_a10_ccm_ops = {
.read = allwinner_a10_ccm_read,
.write = allwinner_a10_ccm_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -199,7 +199,7 @@ static const VMStateDescription allwinner_a10_ccm_vmstate = {
}
};
-static void allwinner_a10_ccm_class_init(ObjectClass *klass, void *data)
+static void allwinner_a10_ccm_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/misc/allwinner-a10-dramc.c b/hw/misc/allwinner-a10-dramc.c
index a7c58fa..c16814c 100644
--- a/hw/misc/allwinner-a10-dramc.c
+++ b/hw/misc/allwinner-a10-dramc.c
@@ -114,7 +114,7 @@ static void allwinner_a10_dramc_write(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_a10_dramc_ops = {
.read = allwinner_a10_dramc_read,
.write = allwinner_a10_dramc_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -154,7 +154,7 @@ static const VMStateDescription allwinner_a10_dramc_vmstate = {
}
};
-static void allwinner_a10_dramc_class_init(ObjectClass *klass, void *data)
+static void allwinner_a10_dramc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/misc/allwinner-cpucfg.c b/hw/misc/allwinner-cpucfg.c
index 31b9780..90dd872 100644
--- a/hw/misc/allwinner-cpucfg.c
+++ b/hw/misc/allwinner-cpucfg.c
@@ -217,7 +217,7 @@ static void allwinner_cpucfg_write(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_cpucfg_ops = {
.read = allwinner_cpucfg_read,
.write = allwinner_cpucfg_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -258,11 +258,11 @@ static const VMStateDescription allwinner_cpucfg_vmstate = {
}
};
-static void allwinner_cpucfg_class_init(ObjectClass *klass, void *data)
+static void allwinner_cpucfg_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = allwinner_cpucfg_reset;
+ device_class_set_legacy_reset(dc, allwinner_cpucfg_reset);
dc->vmsd = &allwinner_cpucfg_vmstate;
}
diff --git a/hw/misc/allwinner-h3-ccu.c b/hw/misc/allwinner-h3-ccu.c
index cfc6852..be91c0c 100644
--- a/hw/misc/allwinner-h3-ccu.c
+++ b/hw/misc/allwinner-h3-ccu.c
@@ -155,7 +155,7 @@ static void allwinner_h3_ccu_write(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_h3_ccu_ops = {
.read = allwinner_h3_ccu_read,
.write = allwinner_h3_ccu_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -218,11 +218,11 @@ static const VMStateDescription allwinner_h3_ccu_vmstate = {
}
};
-static void allwinner_h3_ccu_class_init(ObjectClass *klass, void *data)
+static void allwinner_h3_ccu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = allwinner_h3_ccu_reset;
+ device_class_set_legacy_reset(dc, allwinner_h3_ccu_reset);
dc->vmsd = &allwinner_h3_ccu_vmstate;
}
diff --git a/hw/misc/allwinner-h3-dramc.c b/hw/misc/allwinner-h3-dramc.c
index e168ffe..8834524 100644
--- a/hw/misc/allwinner-h3-dramc.c
+++ b/hw/misc/allwinner-h3-dramc.c
@@ -24,7 +24,7 @@
#include "migration/vmstate.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/qdev-properties.h"
#include "qapi/error.h"
#include "hw/misc/allwinner-h3-dramc.h"
@@ -219,7 +219,7 @@ static void allwinner_h3_dramphy_write(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_h3_dramcom_ops = {
.read = allwinner_h3_dramcom_read,
.write = allwinner_h3_dramcom_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -230,7 +230,7 @@ static const MemoryRegionOps allwinner_h3_dramcom_ops = {
static const MemoryRegionOps allwinner_h3_dramctl_ops = {
.read = allwinner_h3_dramctl_read,
.write = allwinner_h3_dramctl_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -241,7 +241,7 @@ static const MemoryRegionOps allwinner_h3_dramctl_ops = {
static const MemoryRegionOps allwinner_h3_dramphy_ops = {
.read = allwinner_h3_dramphy_read,
.write = allwinner_h3_dramphy_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -314,10 +314,9 @@ static void allwinner_h3_dramc_init(Object *obj)
sysbus_init_mmio(sbd, &s->dramphy_iomem);
}
-static Property allwinner_h3_dramc_properties[] = {
+static const Property allwinner_h3_dramc_properties[] = {
DEFINE_PROP_UINT64("ram-addr", AwH3DramCtlState, ram_addr, 0x0),
DEFINE_PROP_UINT32("ram-size", AwH3DramCtlState, ram_size, 256 * MiB),
- DEFINE_PROP_END_OF_LIST()
};
static const VMStateDescription allwinner_h3_dramc_vmstate = {
@@ -332,11 +331,11 @@ static const VMStateDescription allwinner_h3_dramc_vmstate = {
}
};
-static void allwinner_h3_dramc_class_init(ObjectClass *klass, void *data)
+static void allwinner_h3_dramc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = allwinner_h3_dramc_reset;
+ device_class_set_legacy_reset(dc, allwinner_h3_dramc_reset);
dc->vmsd = &allwinner_h3_dramc_vmstate;
dc->realize = allwinner_h3_dramc_realize;
device_class_set_props(dc, allwinner_h3_dramc_properties);
diff --git a/hw/misc/allwinner-h3-sysctrl.c b/hw/misc/allwinner-h3-sysctrl.c
index 2d29be8..6b86524 100644
--- a/hw/misc/allwinner-h3-sysctrl.c
+++ b/hw/misc/allwinner-h3-sysctrl.c
@@ -78,7 +78,7 @@ static void allwinner_h3_sysctrl_write(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_h3_sysctrl_ops = {
.read = allwinner_h3_sysctrl_read,
.write = allwinner_h3_sysctrl_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -116,11 +116,12 @@ static const VMStateDescription allwinner_h3_sysctrl_vmstate = {
}
};
-static void allwinner_h3_sysctrl_class_init(ObjectClass *klass, void *data)
+static void allwinner_h3_sysctrl_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = allwinner_h3_sysctrl_reset;
+ device_class_set_legacy_reset(dc, allwinner_h3_sysctrl_reset);
dc->vmsd = &allwinner_h3_sysctrl_vmstate;
}
diff --git a/hw/misc/allwinner-r40-ccu.c b/hw/misc/allwinner-r40-ccu.c
index 33baf44..4e21eea 100644
--- a/hw/misc/allwinner-r40-ccu.c
+++ b/hw/misc/allwinner-r40-ccu.c
@@ -129,7 +129,7 @@ static void allwinner_r40_ccu_write(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_r40_ccu_ops = {
.read = allwinner_r40_ccu_read,
.write = allwinner_r40_ccu_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -185,11 +185,11 @@ static const VMStateDescription allwinner_r40_ccu_vmstate = {
}
};
-static void allwinner_r40_ccu_class_init(ObjectClass *klass, void *data)
+static void allwinner_r40_ccu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = allwinner_r40_ccu_reset;
+ device_class_set_legacy_reset(dc, allwinner_r40_ccu_reset);
dc->vmsd = &allwinner_r40_ccu_vmstate;
}
diff --git a/hw/misc/allwinner-r40-dramc.c b/hw/misc/allwinner-r40-dramc.c
index 75b0bef..1c8e17e 100644
--- a/hw/misc/allwinner-r40-dramc.c
+++ b/hw/misc/allwinner-r40-dramc.c
@@ -24,7 +24,7 @@
#include "migration/vmstate.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/qdev-properties.h"
#include "qapi/error.h"
#include "qemu/bitops.h"
@@ -297,7 +297,7 @@ static void allwinner_r40_dramphy_write(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_r40_dramcom_ops = {
.read = allwinner_r40_dramcom_read,
.write = allwinner_r40_dramcom_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -308,7 +308,7 @@ static const MemoryRegionOps allwinner_r40_dramcom_ops = {
static const MemoryRegionOps allwinner_r40_dramctl_ops = {
.read = allwinner_r40_dramctl_read,
.write = allwinner_r40_dramctl_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -319,7 +319,7 @@ static const MemoryRegionOps allwinner_r40_dramctl_ops = {
static const MemoryRegionOps allwinner_r40_dramphy_ops = {
.read = allwinner_r40_dramphy_read,
.write = allwinner_r40_dramphy_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -358,7 +358,7 @@ static void allwinner_r40_detect_write(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_r40_detect_ops = {
.read = allwinner_r40_detect_read,
.write = allwinner_r40_detect_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -393,7 +393,7 @@ static uint64_t allwinner_r40_dualrank_detect_read(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_r40_dualrank_detect_ops = {
.read = allwinner_r40_dualrank_detect_read,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -464,10 +464,9 @@ static void allwinner_r40_dramc_init(Object *obj)
sysbus_init_mmio(sbd, &s->dramphy_iomem);
}
-static Property allwinner_r40_dramc_properties[] = {
+static const Property allwinner_r40_dramc_properties[] = {
DEFINE_PROP_UINT64("ram-addr", AwR40DramCtlState, ram_addr, 0x0),
DEFINE_PROP_UINT32("ram-size", AwR40DramCtlState, ram_size, 256), /* MiB */
- DEFINE_PROP_END_OF_LIST()
};
static const VMStateDescription allwinner_r40_dramc_vmstate = {
@@ -485,11 +484,11 @@ static const VMStateDescription allwinner_r40_dramc_vmstate = {
}
};
-static void allwinner_r40_dramc_class_init(ObjectClass *klass, void *data)
+static void allwinner_r40_dramc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = allwinner_r40_dramc_reset;
+ device_class_set_legacy_reset(dc, allwinner_r40_dramc_reset);
dc->vmsd = &allwinner_r40_dramc_vmstate;
dc->realize = allwinner_r40_dramc_realize;
device_class_set_props(dc, allwinner_r40_dramc_properties);
diff --git a/hw/misc/allwinner-sid.c b/hw/misc/allwinner-sid.c
index e5cd431..1e66c14 100644
--- a/hw/misc/allwinner-sid.c
+++ b/hw/misc/allwinner-sid.c
@@ -99,7 +99,7 @@ static void allwinner_sid_write(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_sid_ops = {
.read = allwinner_sid_read,
.write = allwinner_sid_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -127,9 +127,8 @@ static void allwinner_sid_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
}
-static Property allwinner_sid_properties[] = {
+static const Property allwinner_sid_properties[] = {
DEFINE_PROP_UUID_NODEFAULT("identifier", AwSidState, identifier),
- DEFINE_PROP_END_OF_LIST()
};
static const VMStateDescription allwinner_sid_vmstate = {
@@ -144,11 +143,11 @@ static const VMStateDescription allwinner_sid_vmstate = {
}
};
-static void allwinner_sid_class_init(ObjectClass *klass, void *data)
+static void allwinner_sid_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = allwinner_sid_reset;
+ device_class_set_legacy_reset(dc, allwinner_sid_reset);
dc->vmsd = &allwinner_sid_vmstate;
device_class_set_props(dc, allwinner_sid_properties);
}
diff --git a/hw/misc/allwinner-sramc.c b/hw/misc/allwinner-sramc.c
index cf10ca8..ed299ec 100644
--- a/hw/misc/allwinner-sramc.c
+++ b/hw/misc/allwinner-sramc.c
@@ -104,7 +104,7 @@ static void allwinner_sramc_write(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_sramc_ops = {
.read = allwinner_sramc_read,
.write = allwinner_sramc_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -135,11 +135,11 @@ static void allwinner_sramc_reset(DeviceState *dev)
}
}
-static void allwinner_sramc_class_init(ObjectClass *klass, void *data)
+static void allwinner_sramc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = allwinner_sramc_reset;
+ device_class_set_legacy_reset(dc, allwinner_sramc_reset);
dc->vmsd = &allwinner_sramc_vmstate;
}
@@ -163,7 +163,7 @@ static const TypeInfo allwinner_sramc_info = {
.class_init = allwinner_sramc_class_init,
};
-static void allwinner_r40_sramc_class_init(ObjectClass *klass, void *data)
+static void allwinner_r40_sramc_class_init(ObjectClass *klass, const void *data)
{
AwSRAMCClass *sc = AW_SRAMC_CLASS(klass);
diff --git a/hw/misc/applesmc.c b/hw/misc/applesmc.c
index 59a4899..a015d4a 100644
--- a/hw/misc/applesmc.c
+++ b/hw/misc/applesmc.c
@@ -350,11 +350,10 @@ static void applesmc_unrealize(DeviceState *dev)
}
}
-static Property applesmc_isa_properties[] = {
+static const Property applesmc_isa_properties[] = {
DEFINE_PROP_UINT32(APPLESMC_PROP_IO_BASE, AppleSMCState, iobase,
APPLESMC_DEFAULT_IOBASE),
DEFINE_PROP_STRING("osk", AppleSMCState, osk),
- DEFINE_PROP_END_OF_LIST(),
};
static void build_applesmc_aml(AcpiDevAmlIf *adev, Aml *scope)
@@ -376,14 +375,14 @@ static void build_applesmc_aml(AcpiDevAmlIf *adev, Aml *scope)
aml_append(scope, dev);
}
-static void qdev_applesmc_class_init(ObjectClass *klass, void *data)
+static void qdev_applesmc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass);
dc->realize = applesmc_isa_realize;
dc->unrealize = applesmc_unrealize;
- dc->reset = qdev_applesmc_isa_reset;
+ device_class_set_legacy_reset(dc, qdev_applesmc_isa_reset);
device_class_set_props(dc, applesmc_isa_properties);
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
adevc->build_dev_aml = build_applesmc_aml;
@@ -394,7 +393,7 @@ static const TypeInfo applesmc_isa_info = {
.parent = TYPE_ISA_DEVICE,
.instance_size = sizeof(AppleSMCState),
.class_init = qdev_applesmc_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_ACPI_DEV_AML_IF },
{ },
},
diff --git a/hw/misc/arm11scu.c b/hw/misc/arm11scu.c
index 17c36a0..2ad4fd1 100644
--- a/hw/misc/arm11scu.c
+++ b/hw/misc/arm11scu.c
@@ -75,12 +75,11 @@ static void arm11_scu_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
}
-static Property arm11_scu_properties[] = {
+static const Property arm11_scu_properties[] = {
DEFINE_PROP_UINT32("num-cpu", ARM11SCUState, num_cpu, 1),
- DEFINE_PROP_END_OF_LIST()
};
-static void arm11_scu_class_init(ObjectClass *oc, void *data)
+static void arm11_scu_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/misc/arm_l2x0.c b/hw/misc/arm_l2x0.c
index b14d0a2..8b4b61e 100644
--- a/hw/misc/arm_l2x0.c
+++ b/hw/misc/arm_l2x0.c
@@ -173,18 +173,17 @@ static void l2x0_priv_init(Object *obj)
sysbus_init_mmio(dev, &s->iomem);
}
-static Property l2x0_properties[] = {
+static const Property l2x0_properties[] = {
DEFINE_PROP_UINT32("cache-type", L2x0State, cache_type, 0x1c100100),
- DEFINE_PROP_END_OF_LIST(),
};
-static void l2x0_class_init(ObjectClass *klass, void *data)
+static void l2x0_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_l2x0;
device_class_set_props(dc, l2x0_properties);
- dc->reset = l2x0_priv_reset;
+ device_class_set_legacy_reset(dc, l2x0_priv_reset);
}
static const TypeInfo l2x0_info = {
diff --git a/hw/misc/arm_sysctl.c b/hw/misc/arm_sysctl.c
index 5108f3e..0f4e37c 100644
--- a/hw/misc/arm_sysctl.c
+++ b/hw/misc/arm_sysctl.c
@@ -11,7 +11,7 @@
#include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "qemu/timer.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "qemu/bitops.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
@@ -520,7 +520,7 @@ static void arm_sysctl_write(void *opaque, hwaddr offset,
* as zero.
*/
s->sys_cfgctrl = val & ~((3 << 18) | (1 << 31));
- if (val & (1 << 31)) {
+ if (extract64(val, 31, 1)) {
/* Start bit set -- actually do something */
unsigned int dcc = extract32(s->sys_cfgctrl, 26, 4);
unsigned int function = extract32(s->sys_cfgctrl, 20, 6);
@@ -623,7 +623,7 @@ static void arm_sysctl_finalize(Object *obj)
g_free(s->db_clock_reset);
}
-static Property arm_sysctl_properties[] = {
+static const Property arm_sysctl_properties[] = {
DEFINE_PROP_UINT32("sys_id", arm_sysctl_state, sys_id, 0),
DEFINE_PROP_UINT32("proc_id", arm_sysctl_state, proc_id, 0),
/* Daughterboard power supply voltages (as reported via SYS_CFG) */
@@ -632,15 +632,14 @@ static Property arm_sysctl_properties[] = {
/* Daughterboard clock reset values (as reported via SYS_CFG) */
DEFINE_PROP_ARRAY("db-clock", arm_sysctl_state, db_num_clocks,
db_clock_reset, qdev_prop_uint32, uint32_t),
- DEFINE_PROP_END_OF_LIST(),
};
-static void arm_sysctl_class_init(ObjectClass *klass, void *data)
+static void arm_sysctl_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = arm_sysctl_realize;
- dc->reset = arm_sysctl_reset;
+ device_class_set_legacy_reset(dc, arm_sysctl_reset);
dc->vmsd = &vmstate_arm_sysctl;
device_class_set_props(dc, arm_sysctl_properties);
}
diff --git a/hw/misc/armsse-cpu-pwrctrl.c b/hw/misc/armsse-cpu-pwrctrl.c
index bfc51d1..66e9218 100644
--- a/hw/misc/armsse-cpu-pwrctrl.c
+++ b/hw/misc/armsse-cpu-pwrctrl.c
@@ -125,11 +125,11 @@ static void pwrctrl_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
}
-static void pwrctrl_class_init(ObjectClass *klass, void *data)
+static void pwrctrl_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = pwrctrl_reset;
+ device_class_set_legacy_reset(dc, pwrctrl_reset);
dc->vmsd = &pwrctrl_vmstate;
}
diff --git a/hw/misc/armsse-cpuid.c b/hw/misc/armsse-cpuid.c
index e785a09..a57764d 100644
--- a/hw/misc/armsse-cpuid.c
+++ b/hw/misc/armsse-cpuid.c
@@ -92,9 +92,8 @@ static const MemoryRegionOps armsse_cpuid_ops = {
.valid.max_access_size = 4,
};
-static Property armsse_cpuid_props[] = {
+static const Property armsse_cpuid_props[] = {
DEFINE_PROP_UINT32("CPUID", ARMSSECPUID, cpuid, 0),
- DEFINE_PROP_END_OF_LIST()
};
static void armsse_cpuid_init(Object *obj)
@@ -107,7 +106,7 @@ static void armsse_cpuid_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
}
-static void armsse_cpuid_class_init(ObjectClass *klass, void *data)
+static void armsse_cpuid_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/armsse-mhu.c b/hw/misc/armsse-mhu.c
index 55625b2..d5d307a 100644
--- a/hw/misc/armsse-mhu.c
+++ b/hw/misc/armsse-mhu.c
@@ -176,11 +176,11 @@ static void armsse_mhu_init(Object *obj)
sysbus_init_irq(sbd, &s->cpu1irq);
}
-static void armsse_mhu_class_init(ObjectClass *klass, void *data)
+static void armsse_mhu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = armsse_mhu_reset;
+ device_class_set_legacy_reset(dc, armsse_mhu_reset);
dc->vmsd = &armsse_mhu_vmstate;
}
diff --git a/hw/misc/armv7m_ras.c b/hw/misc/armv7m_ras.c
index de24922..7bf5acd 100644
--- a/hw/misc/armv7m_ras.c
+++ b/hw/misc/armv7m_ras.c
@@ -72,7 +72,7 @@ static void armv7m_ras_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
}
-static void armv7m_ras_class_init(ObjectClass *klass, void *data)
+static void armv7m_ras_class_init(ObjectClass *klass, const void *data)
{
/* This device has no state: no need for vmstate or reset */
}
diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c
index c06c04d..726368f 100644
--- a/hw/misc/aspeed_hace.c
+++ b/hw/misc/aspeed_hace.c
@@ -1,6 +1,7 @@
/*
* ASPEED Hash and Crypto Engine
*
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
* Copyright (C) 2021 IBM Corp.
*
* Joel Stanley <joel@jms.id.au>
@@ -9,14 +10,17 @@
*/
#include "qemu/osdep.h"
+#include "qemu/cutils.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
+#include "qemu/iov.h"
#include "hw/misc/aspeed_hace.h"
#include "qapi/error.h"
#include "migration/vmstate.h"
#include "crypto/hash.h"
#include "hw/qdev-properties.h"
#include "hw/irq.h"
+#include "trace.h"
#define R_CRYPT_CMD (0x10 / 4)
@@ -26,9 +30,12 @@
#define TAG_IRQ BIT(15)
#define R_HASH_SRC (0x20 / 4)
-#define R_HASH_DEST (0x24 / 4)
+#define R_HASH_DIGEST (0x24 / 4)
#define R_HASH_KEY_BUFF (0x28 / 4)
#define R_HASH_SRC_LEN (0x2c / 4)
+#define R_HASH_SRC_HI (0x90 / 4)
+#define R_HASH_DIGEST_HI (0x94 / 4)
+#define R_HASH_KEY_BUFF_HI (0x98 / 4)
#define R_HASH_CMD (0x30 / 4)
/* Hash algorithm selection */
@@ -58,6 +65,7 @@
/* Other cmd bits */
#define HASH_IRQ_EN BIT(9)
#define HASH_SG_EN BIT(18)
+#define CRYPT_IRQ_EN BIT(12)
/* Scatter-gather data list */
#define SG_LIST_LEN_SIZE 4
#define SG_LIST_LEN_MASK 0x0FFFFFFF
@@ -68,17 +76,56 @@
static const struct {
uint32_t mask;
- QCryptoHashAlgorithm algo;
+ QCryptoHashAlgo algo;
} hash_algo_map[] = {
- { HASH_ALGO_MD5, QCRYPTO_HASH_ALG_MD5 },
- { HASH_ALGO_SHA1, QCRYPTO_HASH_ALG_SHA1 },
- { HASH_ALGO_SHA224, QCRYPTO_HASH_ALG_SHA224 },
- { HASH_ALGO_SHA256, QCRYPTO_HASH_ALG_SHA256 },
- { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512, QCRYPTO_HASH_ALG_SHA512 },
- { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384, QCRYPTO_HASH_ALG_SHA384 },
- { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256, QCRYPTO_HASH_ALG_SHA256 },
+ { HASH_ALGO_MD5, QCRYPTO_HASH_ALGO_MD5 },
+ { HASH_ALGO_SHA1, QCRYPTO_HASH_ALGO_SHA1 },
+ { HASH_ALGO_SHA224, QCRYPTO_HASH_ALGO_SHA224 },
+ { HASH_ALGO_SHA256, QCRYPTO_HASH_ALGO_SHA256 },
+ { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512,
+ QCRYPTO_HASH_ALGO_SHA512 },
+ { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384,
+ QCRYPTO_HASH_ALGO_SHA384 },
+ { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256,
+ QCRYPTO_HASH_ALGO_SHA256 },
};
+static void hace_hexdump(const char *desc, const char *buf, size_t size)
+{
+ g_autoptr(GString) str = g_string_sized_new(64);
+ size_t len;
+ size_t i;
+
+ for (i = 0; i < size; i += len) {
+ len = MIN(16, size - i);
+ g_string_truncate(str, 0);
+ qemu_hexdump_line(str, buf + i, len, 1, 4);
+ trace_aspeed_hace_hexdump(desc, i, str->str);
+ }
+}
+
+static void hace_iov_hexdump(const char *desc, const struct iovec *iov,
+ const unsigned int iov_cnt)
+{
+ size_t size = 0;
+ char *buf;
+ int i;
+
+ for (i = 0; i < iov_cnt; i++) {
+ size += iov[i].iov_len;
+ }
+
+ buf = g_malloc(size);
+
+ if (!buf) {
+ return;
+ }
+
+ iov_to_buf(iov, iov_cnt, 0, buf, size);
+ hace_hexdump(desc, buf, size);
+ g_free(buf);
+}
+
static int hash_algo_lookup(uint32_t reg)
{
int i;
@@ -123,6 +170,11 @@ static bool has_padding(AspeedHACEState *s, struct iovec *iov,
if (*total_msg_len <= s->total_req_len) {
uint32_t padding_size = s->total_req_len - *total_msg_len;
uint8_t *padding = iov->iov_base;
+
+ if (padding_size > req_len) {
+ return false;
+ }
+
*pad_offset = req_len - padding_size;
if (padding[*pad_offset] == 0x80) {
return true;
@@ -132,162 +184,269 @@ static bool has_padding(AspeedHACEState *s, struct iovec *iov,
return false;
}
-static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id,
- uint32_t *pad_offset)
+static uint64_t hash_get_source_addr(AspeedHACEState *s)
{
- int i, iov_count;
- if (*pad_offset != 0) {
- s->iov_cache[s->iov_count].iov_base = iov[id].iov_base;
- s->iov_cache[s->iov_count].iov_len = *pad_offset;
- ++s->iov_count;
- }
- for (i = 0; i < s->iov_count; i++) {
- iov[i].iov_base = s->iov_cache[i].iov_base;
- iov[i].iov_len = s->iov_cache[i].iov_len;
+ AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
+ uint64_t src_addr = 0;
+
+ src_addr = deposit64(src_addr, 0, 32, s->regs[R_HASH_SRC]);
+ if (ahc->has_dma64) {
+ src_addr = deposit64(src_addr, 32, 32, s->regs[R_HASH_SRC_HI]);
}
- iov_count = s->iov_count;
- s->iov_count = 0;
- s->total_req_len = 0;
- return iov_count;
+
+ return src_addr;
}
-/**
- * Generate iov for accumulative mode.
- *
- * @param s aspeed hace state object
- * @param iov iov of the current request
- * @param id index of the current iov
- * @param req_len length of the current request
- *
- * @return count of iov
- */
-static int gen_acc_mode_iov(AspeedHACEState *s, struct iovec *iov, int id,
- hwaddr *req_len)
+static int hash_prepare_direct_iov(AspeedHACEState *s, struct iovec *iov,
+ bool acc_mode, bool *acc_final_request)
{
- uint32_t pad_offset;
uint32_t total_msg_len;
- s->total_req_len += *req_len;
+ uint32_t pad_offset;
+ uint64_t src;
+ void *haddr;
+ hwaddr plen;
+ int iov_idx;
+
+ plen = s->regs[R_HASH_SRC_LEN];
+ src = hash_get_source_addr(s);
+ trace_aspeed_hace_hash_addr("src", src);
+ haddr = address_space_map(&s->dram_as, src, &plen, false,
+ MEMTXATTRS_UNSPECIFIED);
+ if (haddr == NULL) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Unable to map address, addr=0x%" HWADDR_PRIx
+ " ,plen=0x%" HWADDR_PRIx "\n",
+ __func__, src, plen);
+ return -1;
+ }
- if (has_padding(s, &iov[id], *req_len, &total_msg_len, &pad_offset)) {
- if (s->iov_count) {
- return reconstruct_iov(s, iov, id, &pad_offset);
- }
+ iov[0].iov_base = haddr;
+ iov_idx = 1;
- *req_len -= s->total_req_len - total_msg_len;
- s->total_req_len = 0;
- iov[id].iov_len = *req_len;
+ if (acc_mode) {
+ s->total_req_len += plen;
+
+ if (has_padding(s, &iov[0], plen, &total_msg_len,
+ &pad_offset)) {
+ /* Padding being present indicates the final request */
+ *acc_final_request = true;
+ iov[0].iov_len = pad_offset;
+ } else {
+ iov[0].iov_len = plen;
+ }
} else {
- s->iov_cache[s->iov_count].iov_base = iov->iov_base;
- s->iov_cache[s->iov_count].iov_len = *req_len;
- ++s->iov_count;
+ iov[0].iov_len = plen;
}
- return id + 1;
+ return iov_idx;
}
-static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
- bool acc_mode)
+static int hash_prepare_sg_iov(AspeedHACEState *s, struct iovec *iov,
+ bool acc_mode, bool *acc_final_request)
{
- struct iovec iov[ASPEED_HACE_MAX_SG];
- g_autofree uint8_t *digest_buf = NULL;
- size_t digest_len = 0;
- int niov = 0;
- int i;
+ uint32_t total_msg_len;
+ uint32_t pad_offset;
+ uint32_t len = 0;
+ uint32_t sg_addr;
+ uint64_t src;
+ int iov_idx;
+ hwaddr plen;
void *haddr;
- if (sg_mode) {
- uint32_t len = 0;
-
- for (i = 0; !(len & SG_LIST_LEN_LAST); i++) {
- uint32_t addr, src;
- hwaddr plen;
-
- if (i == ASPEED_HACE_MAX_SG) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "aspeed_hace: guest failed to set end of sg list marker\n");
- break;
- }
-
- src = s->regs[R_HASH_SRC] + (i * SG_LIST_ENTRY_SIZE);
+ src = hash_get_source_addr(s);
+ for (iov_idx = 0; !(len & SG_LIST_LEN_LAST); iov_idx++) {
+ if (iov_idx == ASPEED_HACE_MAX_SG) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Failed to set end of sg list marker\n",
+ __func__);
+ return -1;
+ }
- len = address_space_ldl_le(&s->dram_as, src,
+ len = address_space_ldl_le(&s->dram_as, src,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ sg_addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE,
MEMTXATTRS_UNSPECIFIED, NULL);
+ sg_addr &= SG_LIST_ADDR_MASK;
+ trace_aspeed_hace_hash_sg(iov_idx, src, sg_addr, len);
+ /*
+ * To maintain compatibility with older SoCs such as the AST2600,
+ * the AST2700 HW automatically set bit 34 of the 64-bit sg_addr.
+ * As a result, the firmware only needs to provide a 32-bit sg_addr
+ * containing bits [31:0]. This is sufficient for the AST2700, as
+ * it uses a DRAM offset rather than a DRAM address.
+ */
+ plen = len & SG_LIST_LEN_MASK;
+ haddr = address_space_map(&s->dram_as, sg_addr, &plen, false,
+ MEMTXATTRS_UNSPECIFIED);
- addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE,
- MEMTXATTRS_UNSPECIFIED, NULL);
- addr &= SG_LIST_ADDR_MASK;
+ if (haddr == NULL) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Unable to map address, sg_addr=0x%x, "
+ "plen=0x%" HWADDR_PRIx "\n",
+ __func__, sg_addr, plen);
+ return -1;
+ }
- plen = len & SG_LIST_LEN_MASK;
- haddr = address_space_map(&s->dram_as, addr, &plen, false,
- MEMTXATTRS_UNSPECIFIED);
- if (haddr == NULL) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
- return;
- }
- iov[i].iov_base = haddr;
- if (acc_mode) {
- niov = gen_acc_mode_iov(s, iov, i, &plen);
+ src += SG_LIST_ENTRY_SIZE;
+
+ iov[iov_idx].iov_base = haddr;
+ if (acc_mode) {
+ s->total_req_len += plen;
+ if (has_padding(s, &iov[iov_idx], plen, &total_msg_len,
+ &pad_offset)) {
+ /* Padding being present indicates the final request */
+ *acc_final_request = true;
+ iov[iov_idx].iov_len = pad_offset;
} else {
- iov[i].iov_len = plen;
+ iov[iov_idx].iov_len = plen;
}
+ } else {
+ iov[iov_idx].iov_len = plen;
}
- } else {
- hwaddr len = s->regs[R_HASH_SRC_LEN];
+ }
- haddr = address_space_map(&s->dram_as, s->regs[R_HASH_SRC],
- &len, false, MEMTXATTRS_UNSPECIFIED);
- if (haddr == NULL) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
+ return iov_idx;
+}
+
+static uint64_t hash_get_digest_addr(AspeedHACEState *s)
+{
+ AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
+ uint64_t digest_addr = 0;
+
+ digest_addr = deposit64(digest_addr, 0, 32, s->regs[R_HASH_DIGEST]);
+ if (ahc->has_dma64) {
+ digest_addr = deposit64(digest_addr, 32, 32, s->regs[R_HASH_DIGEST_HI]);
+ }
+
+ return digest_addr;
+}
+
+static void hash_write_digest_and_unmap_iov(AspeedHACEState *s,
+ struct iovec *iov,
+ int iov_idx,
+ uint8_t *digest_buf,
+ size_t digest_len)
+{
+ uint64_t digest_addr = 0;
+
+ digest_addr = hash_get_digest_addr(s);
+ trace_aspeed_hace_hash_addr("digest", digest_addr);
+ if (address_space_write(&s->dram_as, digest_addr,
+ MEMTXATTRS_UNSPECIFIED,
+ digest_buf, digest_len)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Failed to write digest to 0x%" HWADDR_PRIx "\n",
+ __func__, digest_addr);
+ }
+
+ if (trace_event_get_state_backends(TRACE_ASPEED_HACE_HEXDUMP)) {
+ hace_hexdump("digest", (char *)digest_buf, digest_len);
+ }
+
+ for (; iov_idx > 0; iov_idx--) {
+ address_space_unmap(&s->dram_as, iov[iov_idx - 1].iov_base,
+ iov[iov_idx - 1].iov_len, false,
+ iov[iov_idx - 1].iov_len);
+ }
+}
+
+static void hash_execute_non_acc_mode(AspeedHACEState *s, int algo,
+ struct iovec *iov, int iov_idx)
+{
+ g_autofree uint8_t *digest_buf = NULL;
+ Error *local_err = NULL;
+ size_t digest_len = 0;
+
+ if (qcrypto_hash_bytesv(algo, iov, iov_idx, &digest_buf,
+ &digest_len, &local_err) < 0) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: qcrypto hash bytesv failed : %s",
+ __func__, error_get_pretty(local_err));
+ error_free(local_err);
+ return;
+ }
+
+ hash_write_digest_and_unmap_iov(s, iov, iov_idx, digest_buf, digest_len);
+}
+
+static void hash_execute_acc_mode(AspeedHACEState *s, int algo,
+ struct iovec *iov, int iov_idx,
+ bool final_request)
+{
+ g_autofree uint8_t *digest_buf = NULL;
+ Error *local_err = NULL;
+ size_t digest_len = 0;
+
+ trace_aspeed_hace_hash_execute_acc_mode(final_request);
+
+ if (s->hash_ctx == NULL) {
+ s->hash_ctx = qcrypto_hash_new(algo, &local_err);
+ if (s->hash_ctx == NULL) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto hash new failed : %s",
+ __func__, error_get_pretty(local_err));
+ error_free(local_err);
return;
}
- iov[0].iov_base = haddr;
- iov[0].iov_len = len;
- i = 1;
-
- if (s->iov_count) {
- /*
- * In aspeed sdk kernel driver, sg_mode is disabled in hash_final().
- * Thus if we received a request with sg_mode disabled, it is
- * required to check whether cache is empty. If no, we should
- * combine cached iov and the current iov.
- */
- uint32_t total_msg_len;
- uint32_t pad_offset;
- s->total_req_len += len;
- if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) {
- niov = reconstruct_iov(s, iov, 0, &pad_offset);
- }
- }
}
- if (niov) {
- i = niov;
+ if (qcrypto_hash_updatev(s->hash_ctx, iov, iov_idx, &local_err) < 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto hash updatev failed : %s",
+ __func__, error_get_pretty(local_err));
+ error_free(local_err);
+ return;
}
- if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
- return;
+ if (final_request) {
+ if (qcrypto_hash_finalize_bytes(s->hash_ctx, &digest_buf,
+ &digest_len, &local_err)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: qcrypto hash finalize bytes failed : %s",
+ __func__, error_get_pretty(local_err));
+ error_free(local_err);
+ local_err = NULL;
+ }
+
+ qcrypto_hash_free(s->hash_ctx);
+
+ s->hash_ctx = NULL;
+ s->total_req_len = 0;
}
- if (address_space_write(&s->dram_as, s->regs[R_HASH_DEST],
- MEMTXATTRS_UNSPECIFIED,
- digest_buf, digest_len)) {
+ hash_write_digest_and_unmap_iov(s, iov, iov_idx, digest_buf, digest_len);
+}
+
+static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
+ bool acc_mode)
+{
+ QEMU_UNINITIALIZED struct iovec iov[ASPEED_HACE_MAX_SG];
+ bool acc_final_request = false;
+ int iov_idx = -1;
+
+ /* Prepares the iov for hashing operations based on the selected mode */
+ if (sg_mode) {
+ iov_idx = hash_prepare_sg_iov(s, iov, acc_mode, &acc_final_request);
+ } else {
+ iov_idx = hash_prepare_direct_iov(s, iov, acc_mode,
+ &acc_final_request);
+ }
+
+ if (iov_idx <= 0) {
qemu_log_mask(LOG_GUEST_ERROR,
- "aspeed_hace: address space write failed\n");
+ "%s: Failed to prepare iov\n", __func__);
+ return;
}
- for (; i > 0; i--) {
- address_space_unmap(&s->dram_as, iov[i - 1].iov_base,
- iov[i - 1].iov_len, false,
- iov[i - 1].iov_len);
+ if (trace_event_get_state_backends(TRACE_ASPEED_HACE_HEXDUMP)) {
+ hace_iov_hexdump("plaintext", iov, iov_idx);
}
- /*
- * Set status bits to indicate completion. Testing shows hardware sets
- * these irrespective of HASH_IRQ_EN.
- */
- s->regs[R_STATUS] |= HASH_IRQ;
+ /* Executes the hash operation */
+ if (acc_mode) {
+ hash_execute_acc_mode(s, algo, iov, iov_idx, acc_final_request);
+ } else {
+ hash_execute_non_acc_mode(s, algo, iov, iov_idx);
+ }
}
static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size)
@@ -296,12 +455,7 @@ static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size)
addr >>= 2;
- if (addr >= ASPEED_HACE_NR_REGS) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n",
- __func__, addr << 2);
- return 0;
- }
+ trace_aspeed_hace_read(addr << 2, s->regs[addr]);
return s->regs[addr];
}
@@ -314,12 +468,7 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
addr >>= 2;
- if (addr >= ASPEED_HACE_NR_REGS) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
- __func__, addr << 2);
- return;
- }
+ trace_aspeed_hace_write(addr << 2, data);
switch (addr) {
case R_STATUS:
@@ -330,11 +479,20 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
qemu_irq_lower(s->irq);
}
}
+ if (ahc->raise_crypt_interrupt_workaround) {
+ if (data & CRYPT_IRQ) {
+ data &= ~CRYPT_IRQ;
+
+ if (s->regs[addr] & CRYPT_IRQ) {
+ qemu_irq_lower(s->irq);
+ }
+ }
+ }
break;
case R_HASH_SRC:
data &= ahc->src_mask;
break;
- case R_HASH_DEST:
+ case R_HASH_DIGEST:
data &= ahc->dest_mask;
break;
case R_HASH_KEY_BUFF:
@@ -362,10 +520,16 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
qemu_log_mask(LOG_GUEST_ERROR,
"%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
__func__, data & ahc->hash_mask);
- break;
+ } else {
+ do_hash_operation(s, algo, data & HASH_SG_EN,
+ ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
}
- do_hash_operation(s, algo, data & HASH_SG_EN,
- ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
+
+ /*
+ * Set status bits to indicate completion. Testing shows hardware sets
+ * these irrespective of HASH_IRQ_EN.
+ */
+ s->regs[R_STATUS] |= HASH_IRQ;
if (data & HASH_IRQ_EN) {
qemu_irq_raise(s->irq);
@@ -375,6 +539,21 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
case R_CRYPT_CMD:
qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n",
__func__);
+ if (ahc->raise_crypt_interrupt_workaround) {
+ s->regs[R_STATUS] |= CRYPT_IRQ;
+ if (data & CRYPT_IRQ_EN) {
+ qemu_irq_raise(s->irq);
+ }
+ }
+ break;
+ case R_HASH_SRC_HI:
+ data &= ahc->src_hi_mask;
+ break;
+ case R_HASH_DIGEST_HI:
+ data &= ahc->dest_hi_mask;
+ break;
+ case R_HASH_KEY_BUFF_HI:
+ data &= ahc->key_hi_mask;
break;
default:
break;
@@ -396,9 +575,14 @@ static const MemoryRegionOps aspeed_hace_ops = {
static void aspeed_hace_reset(DeviceState *dev)
{
struct AspeedHACEState *s = ASPEED_HACE(dev);
+ AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
- memset(s->regs, 0, sizeof(s->regs));
- s->iov_count = 0;
+ if (s->hash_ctx != NULL) {
+ qcrypto_hash_free(s->hash_ctx);
+ s->hash_ctx = NULL;
+ }
+
+ memset(s->regs, 0, ahc->nr_regs << 2);
s->total_req_len = 0;
}
@@ -406,11 +590,13 @@ static void aspeed_hace_realize(DeviceState *dev, Error **errp)
{
AspeedHACEState *s = ASPEED_HACE(dev);
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
sysbus_init_irq(sbd, &s->irq);
+ s->regs = g_new(uint32_t, ahc->nr_regs);
memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s,
- TYPE_ASPEED_HACE, 0x1000);
+ TYPE_ASPEED_HACE, ahc->nr_regs << 2);
if (!s->dram_mr) {
error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set");
@@ -422,31 +608,37 @@ static void aspeed_hace_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->iomem);
}
-static Property aspeed_hace_properties[] = {
+static const Property aspeed_hace_properties[] = {
DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_aspeed_hace = {
.name = TYPE_ASPEED_HACE,
- .version_id = 1,
- .minimum_version_id = 1,
+ .version_id = 2,
+ .minimum_version_id = 2,
.fields = (const VMStateField[]) {
- VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS),
VMSTATE_UINT32(total_req_len, AspeedHACEState),
- VMSTATE_UINT32(iov_count, AspeedHACEState),
VMSTATE_END_OF_LIST(),
}
};
-static void aspeed_hace_class_init(ObjectClass *klass, void *data)
+static void aspeed_hace_unrealize(DeviceState *dev)
+{
+ AspeedHACEState *s = ASPEED_HACE(dev);
+
+ g_free(s->regs);
+ s->regs = NULL;
+}
+
+static void aspeed_hace_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aspeed_hace_realize;
- dc->reset = aspeed_hace_reset;
+ dc->unrealize = aspeed_hace_unrealize;
+ device_class_set_legacy_reset(dc, aspeed_hace_reset);
device_class_set_props(dc, aspeed_hace_properties);
dc->vmsd = &vmstate_aspeed_hace;
}
@@ -459,13 +651,14 @@ static const TypeInfo aspeed_hace_info = {
.class_size = sizeof(AspeedHACEClass)
};
-static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data)
+static void aspeed_ast2400_hace_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
dc->desc = "AST2400 Hash and Crypto Engine";
+ ahc->nr_regs = 0x64 >> 2;
ahc->src_mask = 0x0FFFFFFF;
ahc->dest_mask = 0x0FFFFFF8;
ahc->key_mask = 0x0FFFFFC0;
@@ -478,13 +671,14 @@ static const TypeInfo aspeed_ast2400_hace_info = {
.class_init = aspeed_ast2400_hace_class_init,
};
-static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data)
+static void aspeed_ast2500_hace_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
dc->desc = "AST2500 Hash and Crypto Engine";
+ ahc->nr_regs = 0x64 >> 2;
ahc->src_mask = 0x3fffffff;
ahc->dest_mask = 0x3ffffff8;
ahc->key_mask = 0x3FFFFFC0;
@@ -497,13 +691,14 @@ static const TypeInfo aspeed_ast2500_hace_info = {
.class_init = aspeed_ast2500_hace_class_init,
};
-static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data)
+static void aspeed_ast2600_hace_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
dc->desc = "AST2600 Hash and Crypto Engine";
+ ahc->nr_regs = 0x64 >> 2;
ahc->src_mask = 0x7FFFFFFF;
ahc->dest_mask = 0x7FFFFFF8;
ahc->key_mask = 0x7FFFFFF8;
@@ -516,13 +711,14 @@ static const TypeInfo aspeed_ast2600_hace_info = {
.class_init = aspeed_ast2600_hace_class_init,
};
-static void aspeed_ast1030_hace_class_init(ObjectClass *klass, void *data)
+static void aspeed_ast1030_hace_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
dc->desc = "AST1030 Hash and Crypto Engine";
+ ahc->nr_regs = 0x64 >> 2;
ahc->src_mask = 0x7FFFFFFF;
ahc->dest_mask = 0x7FFFFFF8;
ahc->key_mask = 0x7FFFFFF8;
@@ -535,12 +731,58 @@ static const TypeInfo aspeed_ast1030_hace_info = {
.class_init = aspeed_ast1030_hace_class_init,
};
+static void aspeed_ast2700_hace_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
+
+ dc->desc = "AST2700 Hash and Crypto Engine";
+
+ ahc->nr_regs = 0x9C >> 2;
+ ahc->src_mask = 0x7FFFFFFF;
+ ahc->dest_mask = 0x7FFFFFF8;
+ ahc->key_mask = 0x7FFFFFF8;
+ ahc->hash_mask = 0x00147FFF;
+
+ /*
+ * The AST2700 supports a maximum DRAM size of 8 GB, with a DRAM
+ * addressable range from 0x0_0000_0000 to 0x1_FFFF_FFFF. Since this range
+ * fits within 34 bits, only bits [33:0] are needed to store the DRAM
+ * offset. To optimize address storage, the high physical address bits
+ * [1:0] of the source, digest and key buffer addresses are stored as
+ * dram_offset bits [33:32].
+ *
+ * This approach eliminates the need to reduce the high part of the DRAM
+ * physical address for DMA operations. Previously, this was calculated as
+ * (high physical address bits [7:0] - 4), since the DRAM start address is
+ * 0x4_00000000, making the high part address [7:0] - 4.
+ */
+ ahc->src_hi_mask = 0x00000003;
+ ahc->dest_hi_mask = 0x00000003;
+ ahc->key_hi_mask = 0x00000003;
+
+ /*
+ * Currently, it does not support the CRYPT command. Instead, it only
+ * sends an interrupt to notify the firmware that the crypt command
+ * has completed. It is a temporary workaround.
+ */
+ ahc->raise_crypt_interrupt_workaround = true;
+ ahc->has_dma64 = true;
+}
+
+static const TypeInfo aspeed_ast2700_hace_info = {
+ .name = TYPE_ASPEED_AST2700_HACE,
+ .parent = TYPE_ASPEED_HACE,
+ .class_init = aspeed_ast2700_hace_class_init,
+};
+
static void aspeed_hace_register_types(void)
{
type_register_static(&aspeed_ast2400_hace_info);
type_register_static(&aspeed_ast2500_hace_info);
type_register_static(&aspeed_ast2600_hace_info);
type_register_static(&aspeed_ast1030_hace_info);
+ type_register_static(&aspeed_ast2700_hace_info);
type_register_static(&aspeed_hace_info);
}
diff --git a/hw/misc/aspeed_i3c.c b/hw/misc/aspeed_i3c.c
index 827c9e5..3bef1c8 100644
--- a/hw/misc/aspeed_i3c.c
+++ b/hw/misc/aspeed_i3c.c
@@ -323,18 +323,17 @@ static void aspeed_i3c_realize(DeviceState *dev, Error **errp)
}
-static Property aspeed_i3c_device_properties[] = {
+static const Property aspeed_i3c_device_properties[] = {
DEFINE_PROP_UINT8("device-id", AspeedI3CDevice, id, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void aspeed_i3c_device_class_init(ObjectClass *klass, void *data)
+static void aspeed_i3c_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "Aspeed I3C Device";
dc->realize = aspeed_i3c_device_realize;
- dc->reset = aspeed_i3c_device_reset;
+ device_class_set_legacy_reset(dc, aspeed_i3c_device_reset);
device_class_set_props(dc, aspeed_i3c_device_properties);
}
@@ -357,12 +356,12 @@ static const VMStateDescription vmstate_aspeed_i3c = {
}
};
-static void aspeed_i3c_class_init(ObjectClass *klass, void *data)
+static void aspeed_i3c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aspeed_i3c_realize;
- dc->reset = aspeed_i3c_reset;
+ device_class_set_legacy_reset(dc, aspeed_i3c_reset);
dc->desc = "Aspeed I3C Controller";
dc->vmsd = &vmstate_aspeed_i3c;
}
diff --git a/hw/misc/aspeed_lpc.c b/hw/misc/aspeed_lpc.c
index 193f0de..78406da 100644
--- a/hw/misc/aspeed_lpc.c
+++ b/hw/misc/aspeed_lpc.c
@@ -454,17 +454,16 @@ static const VMStateDescription vmstate_aspeed_lpc = {
}
};
-static Property aspeed_lpc_properties[] = {
+static const Property aspeed_lpc_properties[] = {
DEFINE_PROP_UINT32("hicr7", AspeedLPCState, hicr7, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void aspeed_lpc_class_init(ObjectClass *klass, void *data)
+static void aspeed_lpc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aspeed_lpc_realize;
- dc->reset = aspeed_lpc_reset;
+ device_class_set_legacy_reset(dc, aspeed_lpc_reset);
dc->desc = "Aspeed LPC Controller",
dc->vmsd = &vmstate_aspeed_lpc;
device_class_set_props(dc, aspeed_lpc_properties);
diff --git a/hw/misc/aspeed_peci.c b/hw/misc/aspeed_peci.c
index 93cc672..a7a449a 100644
--- a/hw/misc/aspeed_peci.c
+++ b/hw/misc/aspeed_peci.c
@@ -130,12 +130,12 @@ static void aspeed_peci_reset(DeviceState *dev)
memset(s->regs, 0, sizeof(s->regs));
}
-static void aspeed_peci_class_init(ObjectClass *klass, void *data)
+static void aspeed_peci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aspeed_peci_realize;
- dc->reset = aspeed_peci_reset;
+ device_class_set_legacy_reset(dc, aspeed_peci_reset);
dc->desc = "Aspeed PECI Controller";
}
diff --git a/hw/misc/aspeed_sbc.c b/hw/misc/aspeed_sbc.c
index 8bb1f90..a7d101b 100644
--- a/hw/misc/aspeed_sbc.c
+++ b/hw/misc/aspeed_sbc.c
@@ -136,18 +136,17 @@ static const VMStateDescription vmstate_aspeed_sbc = {
}
};
-static Property aspeed_sbc_properties[] = {
+static const Property aspeed_sbc_properties[] = {
DEFINE_PROP_BOOL("emmc-abr", AspeedSBCState, emmc_abr, 0),
DEFINE_PROP_UINT32("signing-settings", AspeedSBCState, signing_settings, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void aspeed_sbc_class_init(ObjectClass *klass, void *data)
+static void aspeed_sbc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aspeed_sbc_realize;
- dc->reset = aspeed_sbc_reset;
+ device_class_set_legacy_reset(dc, aspeed_sbc_reset);
dc->vmsd = &vmstate_aspeed_sbc;
device_class_set_props(dc, aspeed_sbc_properties);
}
@@ -160,7 +159,7 @@ static const TypeInfo aspeed_sbc_info = {
.class_size = sizeof(AspeedSBCClass)
};
-static void aspeed_ast2600_sbc_class_init(ObjectClass *klass, void *data)
+static void aspeed_ast2600_sbc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c
index 451e837..4930e00 100644
--- a/hw/misc/aspeed_scu.c
+++ b/hw/misc/aspeed_scu.c
@@ -157,6 +157,7 @@
#define AST2700_SCU_FREQ_CNTR TO_REG(0x3b0)
#define AST2700_SCU_CPU_SCRATCH_0 TO_REG(0x780)
#define AST2700_SCU_CPU_SCRATCH_1 TO_REG(0x784)
+#define AST2700_SCU_VGA_SCRATCH_0 TO_REG(0x900)
#define AST2700_SCUIO_CLK_STOP_CTL_1 TO_REG(0x240)
#define AST2700_SCUIO_CLK_STOP_CLR_1 TO_REG(0x244)
@@ -426,6 +427,10 @@ static const MemoryRegionOps aspeed_ast2400_scu_ops = {
.read = aspeed_scu_read,
.write = aspeed_ast2400_scu_write,
.endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
.valid = {
.min_access_size = 1,
.max_access_size = 4,
@@ -436,7 +441,9 @@ static const MemoryRegionOps aspeed_ast2500_scu_ops = {
.read = aspeed_scu_read,
.write = aspeed_ast2500_scu_write,
.endianness = DEVICE_LITTLE_ENDIAN,
- .valid.min_access_size = 4,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 4,
+ .valid.min_access_size = 1,
.valid.max_access_size = 4,
.valid.unaligned = false,
};
@@ -559,6 +566,8 @@ static uint32_t aspeed_silicon_revs[] = {
AST2700_A0_SILICON_REV,
AST2720_A0_SILICON_REV,
AST2750_A0_SILICON_REV,
+ AST2700_A1_SILICON_REV,
+ AST2750_A1_SILICON_REV,
};
bool is_supported_silicon_rev(uint32_t silicon_rev)
@@ -602,19 +611,18 @@ static const VMStateDescription vmstate_aspeed_scu = {
}
};
-static Property aspeed_scu_properties[] = {
+static const Property aspeed_scu_properties[] = {
DEFINE_PROP_UINT32("silicon-rev", AspeedSCUState, silicon_rev, 0),
DEFINE_PROP_UINT32("hw-strap1", AspeedSCUState, hw_strap1, 0),
DEFINE_PROP_UINT32("hw-strap2", AspeedSCUState, hw_strap2, 0),
DEFINE_PROP_UINT32("hw-prot-key", AspeedSCUState, hw_prot_key, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void aspeed_scu_class_init(ObjectClass *klass, void *data)
+static void aspeed_scu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aspeed_scu_realize;
- dc->reset = aspeed_scu_reset;
+ device_class_set_legacy_reset(dc, aspeed_scu_reset);
dc->desc = "ASPEED System Control Unit";
dc->vmsd = &vmstate_aspeed_scu;
device_class_set_props(dc, aspeed_scu_properties);
@@ -629,7 +637,7 @@ static const TypeInfo aspeed_scu_info = {
.abstract = true,
};
-static void aspeed_2400_scu_class_init(ObjectClass *klass, void *data)
+static void aspeed_2400_scu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass);
@@ -651,7 +659,7 @@ static const TypeInfo aspeed_2400_scu_info = {
.class_init = aspeed_2400_scu_class_init,
};
-static void aspeed_2500_scu_class_init(ObjectClass *klass, void *data)
+static void aspeed_2500_scu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass);
@@ -777,7 +785,9 @@ static const MemoryRegionOps aspeed_ast2600_scu_ops = {
.read = aspeed_ast2600_scu_read,
.write = aspeed_ast2600_scu_write,
.endianness = DEVICE_LITTLE_ENDIAN,
- .valid.min_access_size = 4,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 4,
+ .valid.min_access_size = 1,
.valid.max_access_size = 4,
.valid.unaligned = false,
};
@@ -825,13 +835,13 @@ static void aspeed_ast2600_scu_reset(DeviceState *dev)
s->regs[PROT_KEY] = s->hw_prot_key;
}
-static void aspeed_2600_scu_class_init(ObjectClass *klass, void *data)
+static void aspeed_2600_scu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass);
dc->desc = "ASPEED 2600 System Control Unit";
- dc->reset = aspeed_ast2600_scu_reset;
+ device_class_set_legacy_reset(dc, aspeed_ast2600_scu_reset);
asc->resets = ast2600_a3_resets;
asc->calc_hpll = aspeed_2600_scu_calc_hpll;
asc->get_apb = aspeed_2600_scu_get_apb_freq;
@@ -904,14 +914,14 @@ static const MemoryRegionOps aspeed_ast2700_scu_ops = {
.read = aspeed_ast2700_scu_read,
.write = aspeed_ast2700_scu_write,
.endianness = DEVICE_LITTLE_ENDIAN,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 4,
.valid.min_access_size = 1,
.valid.max_access_size = 8,
.valid.unaligned = false,
};
static const uint32_t ast2700_a0_resets[ASPEED_AST2700_SCU_NR_REGS] = {
- [AST2700_SILICON_REV] = AST2700_A0_SILICON_REV,
- [AST2700_HW_STRAP1] = 0x00000800,
[AST2700_HW_STRAP1_CLR] = 0xFFF0FFF0,
[AST2700_HW_STRAP1_LOCK] = 0x00000FFF,
[AST2700_HW_STRAP1_SEC1] = 0x000000FF,
@@ -931,6 +941,7 @@ static const uint32_t ast2700_a0_resets[ASPEED_AST2700_SCU_NR_REGS] = {
[AST2700_SCU_FREQ_CNTR] = 0x000375eb,
[AST2700_SCU_CPU_SCRATCH_0] = 0x00000000,
[AST2700_SCU_CPU_SCRATCH_1] = 0x00000004,
+ [AST2700_SCU_VGA_SCRATCH_0] = 0x00000040,
};
static void aspeed_ast2700_scu_reset(DeviceState *dev)
@@ -939,15 +950,17 @@ static void aspeed_ast2700_scu_reset(DeviceState *dev)
AspeedSCUClass *asc = ASPEED_SCU_GET_CLASS(dev);
memcpy(s->regs, asc->resets, asc->nr_regs * 4);
+ s->regs[AST2700_SILICON_REV] = s->silicon_rev;
+ s->regs[AST2700_HW_STRAP1] = s->hw_strap1;
}
-static void aspeed_2700_scu_class_init(ObjectClass *klass, void *data)
+static void aspeed_2700_scu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass);
dc->desc = "ASPEED 2700 System Control Unit";
- dc->reset = aspeed_ast2700_scu_reset;
+ device_class_set_legacy_reset(dc, aspeed_ast2700_scu_reset);
asc->resets = ast2700_a0_resets;
asc->calc_hpll = aspeed_2600_scu_calc_hpll;
asc->get_apb = aspeed_2700_scu_get_apb_freq;
@@ -1025,14 +1038,14 @@ static const MemoryRegionOps aspeed_ast2700_scuio_ops = {
.read = aspeed_ast2700_scuio_read,
.write = aspeed_ast2700_scuio_write,
.endianness = DEVICE_LITTLE_ENDIAN,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 4,
.valid.min_access_size = 1,
.valid.max_access_size = 8,
.valid.unaligned = false,
};
static const uint32_t ast2700_a0_resets_io[ASPEED_AST2700_SCU_NR_REGS] = {
- [AST2700_SILICON_REV] = 0x06000003,
- [AST2700_HW_STRAP1] = 0x00000504,
[AST2700_HW_STRAP1_CLR] = 0xFFF0FFF0,
[AST2700_HW_STRAP1_LOCK] = 0x00000FFF,
[AST2700_HW_STRAP1_SEC1] = 0x000000FF,
@@ -1055,13 +1068,13 @@ static const uint32_t ast2700_a0_resets_io[ASPEED_AST2700_SCU_NR_REGS] = {
[AST2700_SCUIO_CLK_DUTY_MEAS_RST] = 0x0c9100d2,
};
-static void aspeed_2700_scuio_class_init(ObjectClass *klass, void *data)
+static void aspeed_2700_scuio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass);
dc->desc = "ASPEED 2700 System Control Unit I/O";
- dc->reset = aspeed_ast2700_scu_reset;
+ device_class_set_legacy_reset(dc, aspeed_ast2700_scu_reset);
asc->resets = ast2700_a0_resets_io;
asc->calc_hpll = aspeed_2600_scu_calc_hpll;
asc->get_apb = aspeed_2700_scuio_get_apb_freq;
@@ -1113,13 +1126,13 @@ static void aspeed_ast1030_scu_reset(DeviceState *dev)
s->regs[PROT_KEY] = s->hw_prot_key;
}
-static void aspeed_1030_scu_class_init(ObjectClass *klass, void *data)
+static void aspeed_1030_scu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass);
dc->desc = "ASPEED 1030 System Control Unit";
- dc->reset = aspeed_ast1030_scu_reset;
+ device_class_set_legacy_reset(dc, aspeed_ast1030_scu_reset);
asc->resets = ast1030_a1_resets;
asc->calc_hpll = aspeed_2600_scu_calc_hpll;
asc->get_apb = aspeed_1030_scu_get_apb_freq;
diff --git a/hw/misc/aspeed_sdmc.c b/hw/misc/aspeed_sdmc.c
index ebf139c..f04d993 100644
--- a/hw/misc/aspeed_sdmc.c
+++ b/hw/misc/aspeed_sdmc.c
@@ -294,17 +294,16 @@ static const VMStateDescription vmstate_aspeed_sdmc = {
}
};
-static Property aspeed_sdmc_properties[] = {
+static const Property aspeed_sdmc_properties[] = {
DEFINE_PROP_UINT64("max-ram-size", AspeedSDMCState, max_ram_size, 0),
DEFINE_PROP_BOOL("unlocked", AspeedSDMCState, unlocked, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void aspeed_sdmc_class_init(ObjectClass *klass, void *data)
+static void aspeed_sdmc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aspeed_sdmc_realize;
- dc->reset = aspeed_sdmc_reset;
+ device_class_set_legacy_reset(dc, aspeed_sdmc_reset);
dc->desc = "ASPEED SDRAM Memory Controller";
dc->vmsd = &vmstate_aspeed_sdmc;
device_class_set_props(dc, aspeed_sdmc_properties);
@@ -381,7 +380,7 @@ static void aspeed_2400_sdmc_write(AspeedSDMCState *s, uint32_t reg,
static const uint64_t
aspeed_2400_ram_sizes[] = { 64 * MiB, 128 * MiB, 256 * MiB, 512 * MiB, 0};
-static void aspeed_2400_sdmc_class_init(ObjectClass *klass, void *data)
+static void aspeed_2400_sdmc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSDMCClass *asc = ASPEED_SDMC_CLASS(klass);
@@ -449,7 +448,7 @@ static void aspeed_2500_sdmc_write(AspeedSDMCState *s, uint32_t reg,
static const uint64_t
aspeed_2500_ram_sizes[] = { 128 * MiB, 256 * MiB, 512 * MiB, 1024 * MiB, 0};
-static void aspeed_2500_sdmc_class_init(ObjectClass *klass, void *data)
+static void aspeed_2500_sdmc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSDMCClass *asc = ASPEED_SDMC_CLASS(klass);
@@ -543,7 +542,7 @@ static void aspeed_2600_sdmc_write(AspeedSDMCState *s, uint32_t reg,
static const uint64_t
aspeed_2600_ram_sizes[] = { 256 * MiB, 512 * MiB, 1024 * MiB, 2048 * MiB, 0};
-static void aspeed_2600_sdmc_class_init(ObjectClass *klass, void *data)
+static void aspeed_2600_sdmc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSDMCClass *asc = ASPEED_SDMC_CLASS(klass);
@@ -671,13 +670,13 @@ static const uint64_t
aspeed_2700_ram_sizes[] = { 256 * MiB, 512 * MiB, 1024 * MiB,
2048 * MiB, 4096 * MiB, 8192 * MiB, 0};
-static void aspeed_2700_sdmc_class_init(ObjectClass *klass, void *data)
+static void aspeed_2700_sdmc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSDMCClass *asc = ASPEED_SDMC_CLASS(klass);
dc->desc = "ASPEED 2700 SDRAM Memory Controller";
- dc->reset = aspeed_2700_sdmc_reset;
+ device_class_set_legacy_reset(dc, aspeed_2700_sdmc_reset);
asc->is_bus64bit = true;
asc->max_ram_size = 8 * GiB;
diff --git a/hw/misc/aspeed_sli.c b/hw/misc/aspeed_sli.c
index fe720ea..c514840 100644
--- a/hw/misc/aspeed_sli.c
+++ b/hw/misc/aspeed_sli.c
@@ -124,7 +124,7 @@ static void aspeed_sliio_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->iomem);
}
-static void aspeed_sli_class_init(ObjectClass *klass, void *data)
+static void aspeed_sli_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -140,14 +140,14 @@ static const TypeInfo aspeed_sli_info = {
.abstract = true,
};
-static void aspeed_2700_sli_class_init(ObjectClass *klass, void *data)
+static void aspeed_2700_sli_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "AST2700 SLI Controller";
}
-static void aspeed_2700_sliio_class_init(ObjectClass *klass, void *data)
+static void aspeed_2700_sliio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/aspeed_xdma.c b/hw/misc/aspeed_xdma.c
index 76ab846..cc03c42 100644
--- a/hw/misc/aspeed_xdma.c
+++ b/hw/misc/aspeed_xdma.c
@@ -150,7 +150,7 @@ static const VMStateDescription aspeed_xdma_vmstate = {
},
};
-static void aspeed_2600_xdma_class_init(ObjectClass *klass, void *data)
+static void aspeed_2600_xdma_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedXDMAClass *axc = ASPEED_XDMA_CLASS(klass);
@@ -173,7 +173,7 @@ static const TypeInfo aspeed_2600_xdma_info = {
.class_init = aspeed_2600_xdma_class_init,
};
-static void aspeed_2500_xdma_class_init(ObjectClass *klass, void *data)
+static void aspeed_2500_xdma_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedXDMAClass *axc = ASPEED_XDMA_CLASS(klass);
@@ -195,7 +195,7 @@ static const TypeInfo aspeed_2500_xdma_info = {
.class_init = aspeed_2500_xdma_class_init,
};
-static void aspeed_2400_xdma_class_init(ObjectClass *klass, void *data)
+static void aspeed_2400_xdma_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedXDMAClass *axc = ASPEED_XDMA_CLASS(klass);
@@ -217,12 +217,12 @@ static const TypeInfo aspeed_2400_xdma_info = {
.class_init = aspeed_2400_xdma_class_init,
};
-static void aspeed_xdma_class_init(ObjectClass *classp, void *data)
+static void aspeed_xdma_class_init(ObjectClass *classp, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(classp);
dc->realize = aspeed_xdma_realize;
- dc->reset = aspeed_xdma_reset;
+ device_class_set_legacy_reset(dc, aspeed_xdma_reset);
dc->vmsd = &aspeed_xdma_vmstate;
}
diff --git a/hw/misc/auxbus.c b/hw/misc/auxbus.c
index 28d50d9..877f345 100644
--- a/hw/misc/auxbus.c
+++ b/hw/misc/auxbus.c
@@ -50,7 +50,7 @@ static void aux_slave_dev_print(Monitor *mon, DeviceState *dev, int indent);
static inline I2CBus *aux_bridge_get_i2c_bus(AUXTOI2CState *bridge);
/* aux-bus implementation (internal not public) */
-static void aux_bus_class_init(ObjectClass *klass, void *data)
+static void aux_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
@@ -256,7 +256,7 @@ struct AUXTOI2CState {
I2CBus *i2c_bus;
};
-static void aux_bridge_class_init(ObjectClass *oc, void *data)
+static void aux_bridge_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -311,7 +311,7 @@ void aux_init_mmio(AUXSlave *aux_slave, MemoryRegion *mmio)
aux_slave->mmio = mmio;
}
-static void aux_slave_class_init(ObjectClass *klass, void *data)
+static void aux_slave_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
diff --git a/hw/misc/avr_power.c b/hw/misc/avr_power.c
index a5412f2..411f016 100644
--- a/hw/misc/avr_power.c
+++ b/hw/misc/avr_power.c
@@ -90,11 +90,11 @@ static void avr_mask_init(Object *dev)
s->val = 0x00;
}
-static void avr_mask_class_init(ObjectClass *klass, void *data)
+static void avr_mask_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = avr_mask_reset;
+ device_class_set_legacy_reset(dc, avr_mask_reset);
}
static const TypeInfo avr_mask_info = {
diff --git a/hw/misc/axp2xx.c b/hw/misc/axp2xx.c
index af64687..46d1771 100644
--- a/hw/misc/axp2xx.c
+++ b/hw/misc/axp2xx.c
@@ -225,7 +225,7 @@ static const VMStateDescription vmstate_axp2xx = {
}
};
-static void axp2xx_class_init(ObjectClass *oc, void *data)
+static void axp2xx_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
I2CSlaveClass *isc = I2C_SLAVE_CLASS(oc);
@@ -247,7 +247,7 @@ static const TypeInfo axp2xx_info = {
.abstract = true,
};
-static void axp209_class_init(ObjectClass *oc, void *data)
+static void axp209_class_init(ObjectClass *oc, const void *data)
{
AXP2xxClass *sc = AXP2XX_CLASS(oc);
@@ -260,7 +260,7 @@ static const TypeInfo axp209_info = {
.class_init = axp209_class_init
};
-static void axp221_class_init(ObjectClass *oc, void *data)
+static void axp221_class_init(ObjectClass *oc, const void *data)
{
AXP2xxClass *sc = AXP2XX_CLASS(oc);
diff --git a/hw/misc/bcm2835_cprman.c b/hw/misc/bcm2835_cprman.c
index 91c8f7b..efe6f90 100644
--- a/hw/misc/bcm2835_cprman.c
+++ b/hw/misc/bcm2835_cprman.c
@@ -131,12 +131,14 @@ static const VMStateDescription pll_vmstate = {
}
};
-static void pll_class_init(ObjectClass *klass, void *data)
+static void pll_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = pll_reset;
+ device_class_set_legacy_reset(dc, pll_reset);
dc->vmsd = &pll_vmstate;
+ /* Reason: Part of BCM2835CprmanState component */
+ dc->user_creatable = false;
}
static const TypeInfo cprman_pll_info = {
@@ -235,12 +237,14 @@ static const VMStateDescription pll_channel_vmstate = {
}
};
-static void pll_channel_class_init(ObjectClass *klass, void *data)
+static void pll_channel_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = pll_channel_reset;
+ device_class_set_legacy_reset(dc, pll_channel_reset);
dc->vmsd = &pll_channel_vmstate;
+ /* Reason: Part of BCM2835CprmanState component */
+ dc->user_creatable = false;
}
static const TypeInfo cprman_pll_channel_info = {
@@ -356,12 +360,14 @@ static const VMStateDescription clock_mux_vmstate = {
}
};
-static void clock_mux_class_init(ObjectClass *klass, void *data)
+static void clock_mux_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = clock_mux_reset;
+ device_class_set_legacy_reset(dc, clock_mux_reset);
dc->vmsd = &clock_mux_vmstate;
+ /* Reason: Part of BCM2835CprmanState component */
+ dc->user_creatable = false;
}
static const TypeInfo cprman_clock_mux_info = {
@@ -411,11 +417,13 @@ static const VMStateDescription dsi0hsck_mux_vmstate = {
}
};
-static void dsi0hsck_mux_class_init(ObjectClass *klass, void *data)
+static void dsi0hsck_mux_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &dsi0hsck_mux_vmstate;
+ /* Reason: Part of BCM2835CprmanState component */
+ dc->user_creatable = false;
}
static const TypeInfo cprman_dsi0hsck_mux_info = {
@@ -778,17 +786,16 @@ static const VMStateDescription cprman_vmstate = {
}
};
-static Property cprman_properties[] = {
+static const Property cprman_properties[] = {
DEFINE_PROP_UINT32("xosc-freq-hz", BCM2835CprmanState, xosc_freq, 19200000),
- DEFINE_PROP_END_OF_LIST()
};
-static void cprman_class_init(ObjectClass *klass, void *data)
+static void cprman_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = cprman_realize;
- dc->reset = cprman_reset;
+ device_class_set_legacy_reset(dc, cprman_reset);
dc->vmsd = &cprman_vmstate;
device_class_set_props(dc, cprman_properties);
}
diff --git a/hw/misc/bcm2835_mbox.c b/hw/misc/bcm2835_mbox.c
index 67bfc3b..603eaaa 100644
--- a/hw/misc/bcm2835_mbox.c
+++ b/hw/misc/bcm2835_mbox.c
@@ -314,12 +314,12 @@ static void bcm2835_mbox_realize(DeviceState *dev, Error **errp)
bcm2835_mbox_reset(dev);
}
-static void bcm2835_mbox_class_init(ObjectClass *klass, void *data)
+static void bcm2835_mbox_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = bcm2835_mbox_realize;
- dc->reset = bcm2835_mbox_reset;
+ device_class_set_legacy_reset(dc, bcm2835_mbox_reset);
dc->vmsd = &vmstate_bcm2835_mbox;
}
diff --git a/hw/misc/bcm2835_mphi.c b/hw/misc/bcm2835_mphi.c
index f1eeda2..55d79e7 100644
--- a/hw/misc/bcm2835_mphi.c
+++ b/hw/misc/bcm2835_mphi.c
@@ -166,12 +166,12 @@ const VMStateDescription vmstate_mphi_state = {
}
};
-static void mphi_class_init(ObjectClass *klass, void *data)
+static void mphi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = mphi_realize;
- dc->reset = mphi_reset;
+ device_class_set_legacy_reset(dc, mphi_reset);
dc->vmsd = &vmstate_mphi_state;
}
diff --git a/hw/misc/bcm2835_powermgt.c b/hw/misc/bcm2835_powermgt.c
index 1649da8..3ec7aba 100644
--- a/hw/misc/bcm2835_powermgt.c
+++ b/hw/misc/bcm2835_powermgt.c
@@ -13,7 +13,7 @@
#include "qemu/module.h"
#include "hw/misc/bcm2835_powermgt.h"
#include "migration/vmstate.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#define PASSWORD 0x5a000000
#define PASSWORD_MASK 0xff000000
@@ -136,11 +136,11 @@ static void bcm2835_powermgt_reset(DeviceState *dev)
s->wdog = 0x00000000;
}
-static void bcm2835_powermgt_class_init(ObjectClass *klass, void *data)
+static void bcm2835_powermgt_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = bcm2835_powermgt_reset;
+ device_class_set_legacy_reset(dc, bcm2835_powermgt_reset);
dc->vmsd = &vmstate_bcm2835_powermgt;
}
diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c
index 63de3db..a21c6a5 100644
--- a/hw/misc/bcm2835_property.c
+++ b/hw/misc/bcm2835_property.c
@@ -13,7 +13,7 @@
#include "hw/irq.h"
#include "hw/misc/bcm2835_mbox_defs.h"
#include "hw/arm/raspberrypi-fw-defs.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "trace.h"
@@ -25,14 +25,7 @@
static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
{
- uint32_t tag;
- uint32_t bufsize;
uint32_t tot_len;
- size_t resplen;
- uint32_t tmp;
- int n;
- uint32_t offset, length, color;
- uint32_t start_num, number, otp_row;
/*
* Copy the current state of the framebuffer config; we will update
@@ -51,10 +44,10 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
/* @(addr + 4) : Buffer response code */
value = s->addr + 8;
while (value + 8 <= s->addr + tot_len) {
- tag = ldl_le_phys(&s->dma_as, value);
- bufsize = ldl_le_phys(&s->dma_as, value + 4);
+ uint32_t tag = ldl_le_phys(&s->dma_as, value);
+ uint32_t bufsize = ldl_le_phys(&s->dma_as, value + 4);
/* @(value + 8) : Request/response indicator */
- resplen = 0;
+ size_t resplen = 0;
switch (tag) {
case RPI_FWREQ_PROPERTY_END:
break;
@@ -98,13 +91,16 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
resplen = 8;
break;
case RPI_FWREQ_SET_POWER_STATE:
- /* Assume that whatever device they asked for exists,
- * and we'll just claim we set it to the desired state
+ {
+ /*
+ * Assume that whatever device they asked for exists,
+ * and we'll just claim we set it to the desired state.
*/
- tmp = ldl_le_phys(&s->dma_as, value + 16);
- stl_le_phys(&s->dma_as, value + 16, (tmp & 1));
+ uint32_t state = ldl_le_phys(&s->dma_as, value + 16);
+ stl_le_phys(&s->dma_as, value + 16, (state & 1));
resplen = 8;
break;
+ }
/* Clocks */
@@ -274,19 +270,25 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
resplen = 16;
break;
case RPI_FWREQ_FRAMEBUFFER_SET_PALETTE:
- offset = ldl_le_phys(&s->dma_as, value + 12);
- length = ldl_le_phys(&s->dma_as, value + 16);
- n = 0;
- while (n < length - offset) {
- color = ldl_le_phys(&s->dma_as, value + 20 + (n << 2));
- stl_le_phys(&s->dma_as,
- s->fbdev->vcram_base + ((offset + n) << 2), color);
- n++;
+ {
+ uint32_t offset = ldl_le_phys(&s->dma_as, value + 12);
+ uint32_t length = ldl_le_phys(&s->dma_as, value + 16);
+ int resp;
+
+ if (offset > 255 || length < 1 || length > 256) {
+ resp = 1; /* invalid request */
+ } else {
+ for (uint32_t e = 0; e < length; e++) {
+ uint32_t color = ldl_le_phys(&s->dma_as, value + 20 + (e << 2));
+ stl_le_phys(&s->dma_as,
+ s->fbdev->vcram_base + ((offset + e) << 2), color);
+ }
+ resp = 0;
}
- stl_le_phys(&s->dma_as, value + 12, 0);
+ stl_le_phys(&s->dma_as, value + 12, resp);
resplen = 4;
break;
-
+ }
case RPI_FWREQ_FRAMEBUFFER_GET_NUM_DISPLAYS:
stl_le_phys(&s->dma_as, value + 12, 1);
resplen = 4;
@@ -327,22 +329,25 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
/* Customer OTP */
case RPI_FWREQ_GET_CUSTOMER_OTP:
- start_num = ldl_le_phys(&s->dma_as, value + 12);
- number = ldl_le_phys(&s->dma_as, value + 16);
+ {
+ uint32_t start_num = ldl_le_phys(&s->dma_as, value + 12);
+ uint32_t number = ldl_le_phys(&s->dma_as, value + 16);
resplen = 8 + 4 * number;
- for (n = start_num; n < start_num + number &&
+ for (uint32_t n = start_num; n < start_num + number &&
n < BCM2835_OTP_CUSTOMER_OTP_LEN; n++) {
- otp_row = bcm2835_otp_get_row(s->otp,
+ uint32_t otp_row = bcm2835_otp_get_row(s->otp,
BCM2835_OTP_CUSTOMER_OTP + n);
stl_le_phys(&s->dma_as,
value + 20 + ((n - start_num) << 2), otp_row);
}
break;
+ }
case RPI_FWREQ_SET_CUSTOMER_OTP:
- start_num = ldl_le_phys(&s->dma_as, value + 12);
- number = ldl_le_phys(&s->dma_as, value + 16);
+ {
+ uint32_t start_num = ldl_le_phys(&s->dma_as, value + 12);
+ uint32_t number = ldl_le_phys(&s->dma_as, value + 16);
resplen = 4;
@@ -361,34 +366,37 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
break;
}
- for (n = start_num; n < start_num + number &&
+ for (uint32_t n = start_num; n < start_num + number &&
n < BCM2835_OTP_CUSTOMER_OTP_LEN; n++) {
- otp_row = ldl_le_phys(&s->dma_as,
+ uint32_t otp_row = ldl_le_phys(&s->dma_as,
value + 20 + ((n - start_num) << 2));
bcm2835_otp_set_row(s->otp,
BCM2835_OTP_CUSTOMER_OTP + n, otp_row);
}
break;
+ }
/* Device-specific private key */
-
case RPI_FWREQ_GET_PRIVATE_KEY:
- start_num = ldl_le_phys(&s->dma_as, value + 12);
- number = ldl_le_phys(&s->dma_as, value + 16);
+ {
+ uint32_t start_num = ldl_le_phys(&s->dma_as, value + 12);
+ uint32_t number = ldl_le_phys(&s->dma_as, value + 16);
resplen = 8 + 4 * number;
- for (n = start_num; n < start_num + number &&
+ for (uint32_t n = start_num; n < start_num + number &&
n < BCM2835_OTP_PRIVATE_KEY_LEN; n++) {
- otp_row = bcm2835_otp_get_row(s->otp,
+ uint32_t otp_row = bcm2835_otp_get_row(s->otp,
BCM2835_OTP_PRIVATE_KEY + n);
stl_le_phys(&s->dma_as,
value + 20 + ((n - start_num) << 2), otp_row);
}
break;
+ }
case RPI_FWREQ_SET_PRIVATE_KEY:
- start_num = ldl_le_phys(&s->dma_as, value + 12);
- number = ldl_le_phys(&s->dma_as, value + 16);
+ {
+ uint32_t start_num = ldl_le_phys(&s->dma_as, value + 12);
+ uint32_t number = ldl_le_phys(&s->dma_as, value + 16);
resplen = 4;
@@ -398,14 +406,15 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
break;
}
- for (n = start_num; n < start_num + number &&
+ for (uint32_t n = start_num; n < start_num + number &&
n < BCM2835_OTP_PRIVATE_KEY_LEN; n++) {
- otp_row = ldl_le_phys(&s->dma_as,
+ uint32_t otp_row = ldl_le_phys(&s->dma_as,
value + 20 + ((n - start_num) << 2));
bcm2835_otp_set_row(s->otp,
BCM2835_OTP_PRIVATE_KEY + n, otp_row);
}
break;
+ }
default:
qemu_log_mask(LOG_UNIMP,
"bcm2835_property: unhandled tag 0x%08x\n", tag);
@@ -542,13 +551,12 @@ static void bcm2835_property_realize(DeviceState *dev, Error **errp)
bcm2835_property_reset(dev);
}
-static Property bcm2835_property_props[] = {
+static const Property bcm2835_property_props[] = {
DEFINE_PROP_UINT32("board-rev", BCM2835PropertyState, board_rev, 0),
DEFINE_PROP_STRING("command-line", BCM2835PropertyState, command_line),
- DEFINE_PROP_END_OF_LIST()
};
-static void bcm2835_property_class_init(ObjectClass *klass, void *data)
+static void bcm2835_property_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/bcm2835_rng.c b/hw/misc/bcm2835_rng.c
index 10e741b..e4d2c22 100644
--- a/hw/misc/bcm2835_rng.c
+++ b/hw/misc/bcm2835_rng.c
@@ -123,11 +123,11 @@ static void bcm2835_rng_reset(DeviceState *dev)
s->rng_status = 0;
}
-static void bcm2835_rng_class_init(ObjectClass *klass, void *data)
+static void bcm2835_rng_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = bcm2835_rng_reset;
+ device_class_set_legacy_reset(dc, bcm2835_rng_reset);
dc->vmsd = &vmstate_bcm2835_rng;
}
diff --git a/hw/misc/bcm2835_thermal.c b/hw/misc/bcm2835_thermal.c
index 0c49c08..33bfc91 100644
--- a/hw/misc/bcm2835_thermal.c
+++ b/hw/misc/bcm2835_thermal.c
@@ -113,12 +113,12 @@ static const VMStateDescription bcm2835_thermal_vmstate = {
}
};
-static void bcm2835_thermal_class_init(ObjectClass *klass, void *data)
+static void bcm2835_thermal_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = bcm2835_thermal_realize;
- dc->reset = bcm2835_thermal_reset;
+ device_class_set_legacy_reset(dc, bcm2835_thermal_reset);
dc->vmsd = &bcm2835_thermal_vmstate;
}
diff --git a/hw/misc/cbus.c b/hw/misc/cbus.c
deleted file mode 100644
index 653e8dd..0000000
--- a/hw/misc/cbus.c
+++ /dev/null
@@ -1,619 +0,0 @@
-/*
- * CBUS three-pin bus and the Retu / Betty / Tahvo / Vilma / Avilma /
- * Hinku / Vinku / Ahne / Pihi chips used in various Nokia platforms.
- * Based on reverse-engineering of a linux driver.
- *
- * Copyright (C) 2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "hw/hw.h"
-#include "hw/irq.h"
-#include "hw/misc/cbus.h"
-#include "sysemu/runstate.h"
-
-//#define DEBUG
-
-typedef struct {
- void *opaque;
- void (*io)(void *opaque, int rw, int reg, uint16_t *val);
- int addr;
-} CBusSlave;
-
-typedef struct {
- CBus cbus;
-
- int sel;
- int dat;
- int clk;
- int bit;
- int dir;
- uint16_t val;
- qemu_irq dat_out;
-
- int addr;
- int reg;
- int rw;
- enum {
- cbus_address,
- cbus_value,
- } cycle;
-
- CBusSlave *slave[8];
-} CBusPriv;
-
-static void cbus_io(CBusPriv *s)
-{
- if (s->slave[s->addr])
- s->slave[s->addr]->io(s->slave[s->addr]->opaque,
- s->rw, s->reg, &s->val);
- else
- hw_error("%s: bad slave address %i\n", __func__, s->addr);
-}
-
-static void cbus_cycle(CBusPriv *s)
-{
- switch (s->cycle) {
- case cbus_address:
- s->addr = (s->val >> 6) & 7;
- s->rw = (s->val >> 5) & 1;
- s->reg = (s->val >> 0) & 0x1f;
-
- s->cycle = cbus_value;
- s->bit = 15;
- s->dir = !s->rw;
- s->val = 0;
-
- if (s->rw)
- cbus_io(s);
- break;
-
- case cbus_value:
- if (!s->rw)
- cbus_io(s);
-
- s->cycle = cbus_address;
- s->bit = 8;
- s->dir = 1;
- s->val = 0;
- break;
- }
-}
-
-static void cbus_clk(void *opaque, int line, int level)
-{
- CBusPriv *s = (CBusPriv *) opaque;
-
- if (!s->sel && level && !s->clk) {
- if (s->dir)
- s->val |= s->dat << (s->bit --);
- else
- qemu_set_irq(s->dat_out, (s->val >> (s->bit --)) & 1);
-
- if (s->bit < 0)
- cbus_cycle(s);
- }
-
- s->clk = level;
-}
-
-static void cbus_dat(void *opaque, int line, int level)
-{
- CBusPriv *s = (CBusPriv *) opaque;
-
- s->dat = level;
-}
-
-static void cbus_sel(void *opaque, int line, int level)
-{
- CBusPriv *s = (CBusPriv *) opaque;
-
- if (!level) {
- s->dir = 1;
- s->bit = 8;
- s->val = 0;
- }
-
- s->sel = level;
-}
-
-CBus *cbus_init(qemu_irq dat)
-{
- CBusPriv *s = g_malloc0(sizeof(*s));
-
- s->dat_out = dat;
- s->cbus.clk = qemu_allocate_irq(cbus_clk, s, 0);
- s->cbus.dat = qemu_allocate_irq(cbus_dat, s, 0);
- s->cbus.sel = qemu_allocate_irq(cbus_sel, s, 0);
-
- s->sel = 1;
- s->clk = 0;
- s->dat = 0;
-
- return &s->cbus;
-}
-
-void cbus_attach(CBus *bus, void *slave_opaque)
-{
- CBusSlave *slave = (CBusSlave *) slave_opaque;
- CBusPriv *s = (CBusPriv *) bus;
-
- s->slave[slave->addr] = slave;
-}
-
-/* Retu/Vilma */
-typedef struct {
- uint16_t irqst;
- uint16_t irqen;
- uint16_t cc[2];
- int channel;
- uint16_t result[16];
- uint16_t sample;
- uint16_t status;
-
- struct {
- uint16_t cal;
- } rtc;
-
- int is_vilma;
- qemu_irq irq;
- CBusSlave cbus;
-} CBusRetu;
-
-static void retu_interrupt_update(CBusRetu *s)
-{
- qemu_set_irq(s->irq, s->irqst & ~s->irqen);
-}
-
-#define RETU_REG_ASICR 0x00 /* (RO) ASIC ID & revision */
-#define RETU_REG_IDR 0x01 /* (T) Interrupt ID */
-#define RETU_REG_IMR 0x02 /* (RW) Interrupt mask */
-#define RETU_REG_RTCDSR 0x03 /* (RW) RTC seconds register */
-#define RETU_REG_RTCHMR 0x04 /* (RO) RTC hours and minutes reg */
-#define RETU_REG_RTCHMAR 0x05 /* (RW) RTC hours and minutes set reg */
-#define RETU_REG_RTCCALR 0x06 /* (RW) RTC calibration register */
-#define RETU_REG_ADCR 0x08 /* (RW) ADC result register */
-#define RETU_REG_ADCSCR 0x09 /* (RW) ADC sample control register */
-#define RETU_REG_AFCR 0x0a /* (RW) AFC register */
-#define RETU_REG_ANTIFR 0x0b /* (RW) AntiF register */
-#define RETU_REG_CALIBR 0x0c /* (RW) CalibR register*/
-#define RETU_REG_CCR1 0x0d /* (RW) Common control register 1 */
-#define RETU_REG_CCR2 0x0e /* (RW) Common control register 2 */
-#define RETU_REG_RCTRL_CLR 0x0f /* (T) Regulator clear register */
-#define RETU_REG_RCTRL_SET 0x10 /* (T) Regulator set register */
-#define RETU_REG_TXCR 0x11 /* (RW) TxC register */
-#define RETU_REG_STATUS 0x16 /* (RO) Status register */
-#define RETU_REG_WATCHDOG 0x17 /* (RW) Watchdog register */
-#define RETU_REG_AUDTXR 0x18 /* (RW) Audio Codec Tx register */
-#define RETU_REG_AUDPAR 0x19 /* (RW) AudioPA register */
-#define RETU_REG_AUDRXR1 0x1a /* (RW) Audio receive register 1 */
-#define RETU_REG_AUDRXR2 0x1b /* (RW) Audio receive register 2 */
-#define RETU_REG_SGR1 0x1c /* (RW) */
-#define RETU_REG_SCR1 0x1d /* (RW) */
-#define RETU_REG_SGR2 0x1e /* (RW) */
-#define RETU_REG_SCR2 0x1f /* (RW) */
-
-/* Retu Interrupt sources */
-enum {
- retu_int_pwr = 0, /* Power button */
- retu_int_char = 1, /* Charger */
- retu_int_rtcs = 2, /* Seconds */
- retu_int_rtcm = 3, /* Minutes */
- retu_int_rtcd = 4, /* Days */
- retu_int_rtca = 5, /* Alarm */
- retu_int_hook = 6, /* Hook */
- retu_int_head = 7, /* Headset */
- retu_int_adcs = 8, /* ADC sample */
-};
-
-/* Retu ADC channel wiring */
-enum {
- retu_adc_bsi = 1, /* BSI */
- retu_adc_batt_temp = 2, /* Battery temperature */
- retu_adc_chg_volt = 3, /* Charger voltage */
- retu_adc_head_det = 4, /* Headset detection */
- retu_adc_hook_det = 5, /* Hook detection */
- retu_adc_rf_gp = 6, /* RF GP */
- retu_adc_tx_det = 7, /* Wideband Tx detection */
- retu_adc_batt_volt = 8, /* Battery voltage */
- retu_adc_sens = 10, /* Light sensor */
- retu_adc_sens_temp = 11, /* Light sensor temperature */
- retu_adc_bbatt_volt = 12, /* Backup battery voltage */
- retu_adc_self_temp = 13, /* RETU temperature */
-};
-
-static inline uint16_t retu_read(CBusRetu *s, int reg)
-{
-#ifdef DEBUG
- printf("RETU read at %02x\n", reg);
-#endif
-
- switch (reg) {
- case RETU_REG_ASICR:
- return 0x0215 | (s->is_vilma << 7);
-
- case RETU_REG_IDR: /* TODO: Or is this ffs(s->irqst)? */
- return s->irqst;
-
- case RETU_REG_IMR:
- return s->irqen;
-
- case RETU_REG_RTCDSR:
- case RETU_REG_RTCHMR:
- case RETU_REG_RTCHMAR:
- /* TODO */
- return 0x0000;
-
- case RETU_REG_RTCCALR:
- return s->rtc.cal;
-
- case RETU_REG_ADCR:
- return (s->channel << 10) | s->result[s->channel];
- case RETU_REG_ADCSCR:
- return s->sample;
-
- case RETU_REG_AFCR:
- case RETU_REG_ANTIFR:
- case RETU_REG_CALIBR:
- /* TODO */
- return 0x0000;
-
- case RETU_REG_CCR1:
- return s->cc[0];
- case RETU_REG_CCR2:
- return s->cc[1];
-
- case RETU_REG_RCTRL_CLR:
- case RETU_REG_RCTRL_SET:
- case RETU_REG_TXCR:
- /* TODO */
- return 0x0000;
-
- case RETU_REG_STATUS:
- return s->status;
-
- case RETU_REG_WATCHDOG:
- case RETU_REG_AUDTXR:
- case RETU_REG_AUDPAR:
- case RETU_REG_AUDRXR1:
- case RETU_REG_AUDRXR2:
- case RETU_REG_SGR1:
- case RETU_REG_SCR1:
- case RETU_REG_SGR2:
- case RETU_REG_SCR2:
- /* TODO */
- return 0x0000;
-
- default:
- hw_error("%s: bad register %02x\n", __func__, reg);
- }
-}
-
-static inline void retu_write(CBusRetu *s, int reg, uint16_t val)
-{
-#ifdef DEBUG
- printf("RETU write of %04x at %02x\n", val, reg);
-#endif
-
- switch (reg) {
- case RETU_REG_IDR:
- s->irqst ^= val;
- retu_interrupt_update(s);
- break;
-
- case RETU_REG_IMR:
- s->irqen = val;
- retu_interrupt_update(s);
- break;
-
- case RETU_REG_RTCDSR:
- case RETU_REG_RTCHMAR:
- /* TODO */
- break;
-
- case RETU_REG_RTCCALR:
- s->rtc.cal = val;
- break;
-
- case RETU_REG_ADCR:
- s->channel = (val >> 10) & 0xf;
- s->irqst |= 1 << retu_int_adcs;
- retu_interrupt_update(s);
- break;
- case RETU_REG_ADCSCR:
- s->sample &= ~val;
- break;
-
- case RETU_REG_AFCR:
- case RETU_REG_ANTIFR:
- case RETU_REG_CALIBR:
-
- case RETU_REG_CCR1:
- s->cc[0] = val;
- break;
- case RETU_REG_CCR2:
- s->cc[1] = val;
- break;
-
- case RETU_REG_RCTRL_CLR:
- case RETU_REG_RCTRL_SET:
- /* TODO */
- break;
-
- case RETU_REG_WATCHDOG:
- if (val == 0 && (s->cc[0] & 2))
- qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
- break;
-
- case RETU_REG_TXCR:
- case RETU_REG_AUDTXR:
- case RETU_REG_AUDPAR:
- case RETU_REG_AUDRXR1:
- case RETU_REG_AUDRXR2:
- case RETU_REG_SGR1:
- case RETU_REG_SCR1:
- case RETU_REG_SGR2:
- case RETU_REG_SCR2:
- /* TODO */
- break;
-
- default:
- hw_error("%s: bad register %02x\n", __func__, reg);
- }
-}
-
-static void retu_io(void *opaque, int rw, int reg, uint16_t *val)
-{
- CBusRetu *s = (CBusRetu *) opaque;
-
- if (rw)
- *val = retu_read(s, reg);
- else
- retu_write(s, reg, *val);
-}
-
-void *retu_init(qemu_irq irq, int vilma)
-{
- CBusRetu *s = g_malloc0(sizeof(*s));
-
- s->irq = irq;
- s->irqen = 0xffff;
- s->irqst = 0x0000;
- s->status = 0x0020;
- s->is_vilma = !!vilma;
- s->rtc.cal = 0x01;
- s->result[retu_adc_bsi] = 0x3c2;
- s->result[retu_adc_batt_temp] = 0x0fc;
- s->result[retu_adc_chg_volt] = 0x165;
- s->result[retu_adc_head_det] = 123;
- s->result[retu_adc_hook_det] = 1023;
- s->result[retu_adc_rf_gp] = 0x11;
- s->result[retu_adc_tx_det] = 0x11;
- s->result[retu_adc_batt_volt] = 0x250;
- s->result[retu_adc_sens] = 2;
- s->result[retu_adc_sens_temp] = 0x11;
- s->result[retu_adc_bbatt_volt] = 0x3d0;
- s->result[retu_adc_self_temp] = 0x330;
-
- s->cbus.opaque = s;
- s->cbus.io = retu_io;
- s->cbus.addr = 1;
-
- return &s->cbus;
-}
-
-void retu_key_event(void *retu, int state)
-{
- CBusSlave *slave = (CBusSlave *) retu;
- CBusRetu *s = (CBusRetu *) slave->opaque;
-
- s->irqst |= 1 << retu_int_pwr;
- retu_interrupt_update(s);
-
- if (state)
- s->status &= ~(1 << 5);
- else
- s->status |= 1 << 5;
-}
-
-#if 0
-static void retu_head_event(void *retu, int state)
-{
- CBusSlave *slave = (CBusSlave *) retu;
- CBusRetu *s = (CBusRetu *) slave->opaque;
-
- if ((s->cc[0] & 0x500) == 0x500) { /* TODO: Which bits? */
- /* TODO: reissue the interrupt every 100ms or so. */
- s->irqst |= 1 << retu_int_head;
- retu_interrupt_update(s);
- }
-
- if (state)
- s->result[retu_adc_head_det] = 50;
- else
- s->result[retu_adc_head_det] = 123;
-}
-
-static void retu_hook_event(void *retu, int state)
-{
- CBusSlave *slave = (CBusSlave *) retu;
- CBusRetu *s = (CBusRetu *) slave->opaque;
-
- if ((s->cc[0] & 0x500) == 0x500) {
- /* TODO: reissue the interrupt every 100ms or so. */
- s->irqst |= 1 << retu_int_hook;
- retu_interrupt_update(s);
- }
-
- if (state)
- s->result[retu_adc_hook_det] = 50;
- else
- s->result[retu_adc_hook_det] = 123;
-}
-#endif
-
-/* Tahvo/Betty */
-typedef struct {
- uint16_t irqst;
- uint16_t irqen;
- uint8_t charger;
- uint8_t backlight;
- uint16_t usbr;
- uint16_t power;
-
- int is_betty;
- qemu_irq irq;
- CBusSlave cbus;
-} CBusTahvo;
-
-static void tahvo_interrupt_update(CBusTahvo *s)
-{
- qemu_set_irq(s->irq, s->irqst & ~s->irqen);
-}
-
-#define TAHVO_REG_ASICR 0x00 /* (RO) ASIC ID & revision */
-#define TAHVO_REG_IDR 0x01 /* (T) Interrupt ID */
-#define TAHVO_REG_IDSR 0x02 /* (RO) Interrupt status */
-#define TAHVO_REG_IMR 0x03 /* (RW) Interrupt mask */
-#define TAHVO_REG_CHAPWMR 0x04 /* (RW) Charger PWM */
-#define TAHVO_REG_LEDPWMR 0x05 /* (RW) LED PWM */
-#define TAHVO_REG_USBR 0x06 /* (RW) USB control */
-#define TAHVO_REG_RCR 0x07 /* (RW) Some kind of power management */
-#define TAHVO_REG_CCR1 0x08 /* (RW) Common control register 1 */
-#define TAHVO_REG_CCR2 0x09 /* (RW) Common control register 2 */
-#define TAHVO_REG_TESTR1 0x0a /* (RW) Test register 1 */
-#define TAHVO_REG_TESTR2 0x0b /* (RW) Test register 2 */
-#define TAHVO_REG_NOPR 0x0c /* (RW) Number of periods */
-#define TAHVO_REG_FRR 0x0d /* (RO) FR */
-
-static inline uint16_t tahvo_read(CBusTahvo *s, int reg)
-{
-#ifdef DEBUG
- printf("TAHVO read at %02x\n", reg);
-#endif
-
- switch (reg) {
- case TAHVO_REG_ASICR:
- return 0x0021 | (s->is_betty ? 0x0b00 : 0x0300); /* 22 in N810 */
-
- case TAHVO_REG_IDR:
- case TAHVO_REG_IDSR: /* XXX: what does this do? */
- return s->irqst;
-
- case TAHVO_REG_IMR:
- return s->irqen;
-
- case TAHVO_REG_CHAPWMR:
- return s->charger;
-
- case TAHVO_REG_LEDPWMR:
- return s->backlight;
-
- case TAHVO_REG_USBR:
- return s->usbr;
-
- case TAHVO_REG_RCR:
- return s->power;
-
- case TAHVO_REG_CCR1:
- case TAHVO_REG_CCR2:
- case TAHVO_REG_TESTR1:
- case TAHVO_REG_TESTR2:
- case TAHVO_REG_NOPR:
- case TAHVO_REG_FRR:
- return 0x0000;
-
- default:
- hw_error("%s: bad register %02x\n", __func__, reg);
- }
-}
-
-static inline void tahvo_write(CBusTahvo *s, int reg, uint16_t val)
-{
-#ifdef DEBUG
- printf("TAHVO write of %04x at %02x\n", val, reg);
-#endif
-
- switch (reg) {
- case TAHVO_REG_IDR:
- s->irqst ^= val;
- tahvo_interrupt_update(s);
- break;
-
- case TAHVO_REG_IMR:
- s->irqen = val;
- tahvo_interrupt_update(s);
- break;
-
- case TAHVO_REG_CHAPWMR:
- s->charger = val;
- break;
-
- case TAHVO_REG_LEDPWMR:
- if (s->backlight != (val & 0x7f)) {
- s->backlight = val & 0x7f;
- printf("%s: LCD backlight now at %i / 127\n",
- __func__, s->backlight);
- }
- break;
-
- case TAHVO_REG_USBR:
- s->usbr = val;
- break;
-
- case TAHVO_REG_RCR:
- s->power = val;
- break;
-
- case TAHVO_REG_CCR1:
- case TAHVO_REG_CCR2:
- case TAHVO_REG_TESTR1:
- case TAHVO_REG_TESTR2:
- case TAHVO_REG_NOPR:
- case TAHVO_REG_FRR:
- break;
-
- default:
- hw_error("%s: bad register %02x\n", __func__, reg);
- }
-}
-
-static void tahvo_io(void *opaque, int rw, int reg, uint16_t *val)
-{
- CBusTahvo *s = (CBusTahvo *) opaque;
-
- if (rw)
- *val = tahvo_read(s, reg);
- else
- tahvo_write(s, reg, *val);
-}
-
-void *tahvo_init(qemu_irq irq, int betty)
-{
- CBusTahvo *s = g_malloc0(sizeof(*s));
-
- s->irq = irq;
- s->irqen = 0xffff;
- s->irqst = 0x0000;
- s->is_betty = !!betty;
-
- s->cbus.opaque = s;
- s->cbus.io = tahvo_io;
- s->cbus.addr = 2;
-
- return &s->cbus;
-}
diff --git a/hw/misc/debugexit.c b/hw/misc/debugexit.c
index c5c562f..04a9fc3 100644
--- a/hw/misc/debugexit.c
+++ b/hw/misc/debugexit.c
@@ -12,7 +12,7 @@
#include "hw/qdev-properties.h"
#include "qemu/module.h"
#include "qom/object.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#define TYPE_ISA_DEBUG_EXIT_DEVICE "isa-debug-exit"
OBJECT_DECLARE_SIMPLE_TYPE(ISADebugExitState, ISA_DEBUG_EXIT_DEVICE)
@@ -56,13 +56,12 @@ static void debug_exit_realizefn(DeviceState *d, Error **errp)
isa->iobase, &isa->io);
}
-static Property debug_exit_properties[] = {
+static const Property debug_exit_properties[] = {
DEFINE_PROP_UINT32("iobase", ISADebugExitState, iobase, 0x501),
DEFINE_PROP_UINT32("iosize", ISADebugExitState, iosize, 0x02),
- DEFINE_PROP_END_OF_LIST(),
};
-static void debug_exit_class_initfn(ObjectClass *klass, void *data)
+static void debug_exit_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/djmemc.c b/hw/misc/djmemc.c
index 96d5efb..c5b09f5 100644
--- a/hw/misc/djmemc.c
+++ b/hw/misc/djmemc.c
@@ -113,7 +113,7 @@ static const VMStateDescription vmstate_djmemc = {
}
};
-static void djmemc_class_init(ObjectClass *oc, void *data)
+static void djmemc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
ResettableClass *rc = RESETTABLE_CLASS(oc);
diff --git a/hw/misc/eccmemctl.c b/hw/misc/eccmemctl.c
index 5a14a48..81fc536 100644
--- a/hw/misc/eccmemctl.c
+++ b/hw/misc/eccmemctl.c
@@ -325,17 +325,16 @@ static void ecc_realize(DeviceState *dev, Error **errp)
}
}
-static Property ecc_properties[] = {
+static const Property ecc_properties[] = {
DEFINE_PROP_UINT32("version", ECCState, version, -1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ecc_class_init(ObjectClass *klass, void *data)
+static void ecc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = ecc_realize;
- dc->reset = ecc_reset;
+ device_class_set_legacy_reset(dc, ecc_reset);
dc->vmsd = &vmstate_ecc;
device_class_set_props(dc, ecc_properties);
}
diff --git a/hw/misc/edu.c b/hw/misc/edu.c
index 504178b..cece633 100644
--- a/hw/misc/edu.c
+++ b/hw/misc/edu.c
@@ -415,7 +415,7 @@ static void edu_instance_init(Object *obj)
&edu->dma_mask, OBJ_PROP_FLAG_READWRITE);
}
-static void edu_class_init(ObjectClass *class, void *data)
+static void edu_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
PCIDeviceClass *k = PCI_DEVICE_CLASS(class);
@@ -429,21 +429,18 @@ static void edu_class_init(ObjectClass *class, void *data)
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
}
-static void pci_edu_register_types(void)
-{
- static InterfaceInfo interfaces[] = {
- { INTERFACE_CONVENTIONAL_PCI_DEVICE },
- { },
- };
- static const TypeInfo edu_info = {
+static const TypeInfo edu_types[] = {
+ {
.name = TYPE_PCI_EDU_DEVICE,
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(EduState),
.instance_init = edu_instance_init,
.class_init = edu_class_init,
- .interfaces = interfaces,
- };
+ .interfaces = (const InterfaceInfo[]) {
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { },
+ },
+ }
+};
- type_register_static(&edu_info);
-}
-type_init(pci_edu_register_types)
+DEFINE_TYPES(edu_types)
diff --git a/hw/misc/empty_slot.c b/hw/misc/empty_slot.c
index 37b0ddf..239d760 100644
--- a/hw/misc/empty_slot.c
+++ b/hw/misc/empty_slot.c
@@ -79,13 +79,12 @@ static void empty_slot_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
}
-static Property empty_slot_properties[] = {
+static const Property empty_slot_properties[] = {
DEFINE_PROP_UINT64("size", EmptySlot, size, 0),
DEFINE_PROP_STRING("name", EmptySlot, name),
- DEFINE_PROP_END_OF_LIST(),
};
-static void empty_slot_class_init(ObjectClass *klass, void *data)
+static void empty_slot_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/exynos4210_clk.c b/hw/misc/exynos4210_clk.c
index 4566a42..fdf5bdd 100644
--- a/hw/misc/exynos4210_clk.c
+++ b/hw/misc/exynos4210_clk.c
@@ -141,11 +141,11 @@ static const VMStateDescription exynos4210_clk_vmstate = {
}
};
-static void exynos4210_clk_class_init(ObjectClass *klass, void *data)
+static void exynos4210_clk_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = exynos4210_clk_reset;
+ device_class_set_legacy_reset(dc, exynos4210_clk_reset);
dc->vmsd = &exynos4210_clk_vmstate;
}
diff --git a/hw/misc/exynos4210_pmu.c b/hw/misc/exynos4210_pmu.c
index 7e28e79..a86ec9a 100644
--- a/hw/misc/exynos4210_pmu.c
+++ b/hw/misc/exynos4210_pmu.c
@@ -28,7 +28,7 @@
#include "hw/sysbus.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "qom/object.h"
#ifndef DEBUG_PMU
@@ -498,11 +498,11 @@ static const VMStateDescription exynos4210_pmu_vmstate = {
}
};
-static void exynos4210_pmu_class_init(ObjectClass *klass, void *data)
+static void exynos4210_pmu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = exynos4210_pmu_reset;
+ device_class_set_legacy_reset(dc, exynos4210_pmu_reset);
dc->vmsd = &exynos4210_pmu_vmstate;
}
diff --git a/hw/misc/exynos4210_rng.c b/hw/misc/exynos4210_rng.c
index 674d8ee..2d0ebc4 100644
--- a/hw/misc/exynos4210_rng.c
+++ b/hw/misc/exynos4210_rng.c
@@ -255,11 +255,11 @@ static const VMStateDescription exynos4210_rng_vmstate = {
}
};
-static void exynos4210_rng_class_init(ObjectClass *klass, void *data)
+static void exynos4210_rng_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = exynos4210_rng_reset;
+ device_class_set_legacy_reset(dc, exynos4210_rng_reset);
dc->vmsd = &exynos4210_rng_vmstate;
}
diff --git a/hw/misc/grlib_ahb_apb_pnp.c b/hw/misc/grlib_ahb_apb_pnp.c
index 5b05f15..cdca00a 100644
--- a/hw/misc/grlib_ahb_apb_pnp.c
+++ b/hw/misc/grlib_ahb_apb_pnp.c
@@ -168,7 +168,7 @@ static void grlib_ahb_pnp_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &ahb_pnp->iomem);
}
-static void grlib_ahb_pnp_class_init(ObjectClass *klass, void *data)
+static void grlib_ahb_pnp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -280,7 +280,7 @@ static void grlib_apb_pnp_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &apb_pnp->iomem);
}
-static void grlib_apb_pnp_class_init(ObjectClass *klass, void *data)
+static void grlib_apb_pnp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/i2c-echo.c b/hw/misc/i2c-echo.c
index 5ae3d08..2bb99ec 100644
--- a/hw/misc/i2c-echo.c
+++ b/hw/misc/i2c-echo.c
@@ -13,6 +13,7 @@
#include "qemu/main-loop.h"
#include "block/aio.h"
#include "hw/i2c/i2c.h"
+#include "trace.h"
#define TYPE_I2C_ECHO "i2c-echo"
OBJECT_DECLARE_SIMPLE_TYPE(I2CEchoState, I2C_ECHO)
@@ -80,11 +81,13 @@ static int i2c_echo_event(I2CSlave *s, enum i2c_event event)
case I2C_START_RECV:
state->pos = 0;
+ trace_i2c_echo_event(DEVICE(s)->canonical_path, "I2C_START_RECV");
break;
case I2C_START_SEND:
state->pos = 0;
+ trace_i2c_echo_event(DEVICE(s)->canonical_path, "I2C_START_SEND");
break;
case I2C_FINISH:
@@ -92,12 +95,15 @@ static int i2c_echo_event(I2CSlave *s, enum i2c_event event)
state->state = I2C_ECHO_STATE_START_SEND;
i2c_bus_master(state->bus, state->bh);
+ trace_i2c_echo_event(DEVICE(s)->canonical_path, "I2C_FINISH");
break;
case I2C_NACK:
+ trace_i2c_echo_event(DEVICE(s)->canonical_path, "I2C_NACK");
break;
default:
+ trace_i2c_echo_event(DEVICE(s)->canonical_path, "UNHANDLED");
return -1;
}
@@ -112,6 +118,7 @@ static uint8_t i2c_echo_recv(I2CSlave *s)
return 0xff;
}
+ trace_i2c_echo_recv(DEVICE(s)->canonical_path, state->data[state->pos]);
return state->data[state->pos++];
}
@@ -119,6 +126,7 @@ static int i2c_echo_send(I2CSlave *s, uint8_t data)
{
I2CEchoState *state = I2C_ECHO(s);
+ trace_i2c_echo_send(DEVICE(s)->canonical_path, data);
if (state->pos > 2) {
return -1;
}
@@ -135,11 +143,9 @@ static void i2c_echo_realize(DeviceState *dev, Error **errp)
state->bus = I2C_BUS(bus);
state->bh = qemu_bh_new(i2c_echo_bh, state);
-
- return;
}
-static void i2c_echo_class_init(ObjectClass *oc, void *data)
+static void i2c_echo_class_init(ObjectClass *oc, const void *data)
{
I2CSlaveClass *sc = I2C_SLAVE_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/misc/imx25_ccm.c b/hw/misc/imx25_ccm.c
index faa726a..a6665d5 100644
--- a/hw/misc/imx25_ccm.c
+++ b/hw/misc/imx25_ccm.c
@@ -292,12 +292,12 @@ static void imx25_ccm_init(Object *obj)
sysbus_init_mmio(sd, &s->iomem);
}
-static void imx25_ccm_class_init(ObjectClass *klass, void *data)
+static void imx25_ccm_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
IMXCCMClass *ccm = IMX_CCM_CLASS(klass);
- dc->reset = imx25_ccm_reset;
+ device_class_set_legacy_reset(dc, imx25_ccm_reset);
dc->vmsd = &vmstate_imx25_ccm;
dc->desc = "i.MX25 Clock Control Module";
diff --git a/hw/misc/imx31_ccm.c b/hw/misc/imx31_ccm.c
index 125d4fc..339458e 100644
--- a/hw/misc/imx31_ccm.c
+++ b/hw/misc/imx31_ccm.c
@@ -319,12 +319,12 @@ static void imx31_ccm_init(Object *obj)
sysbus_init_mmio(sd, &s->iomem);
}
-static void imx31_ccm_class_init(ObjectClass *klass, void *data)
+static void imx31_ccm_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
IMXCCMClass *ccm = IMX_CCM_CLASS(klass);
- dc->reset = imx31_ccm_reset;
+ device_class_set_legacy_reset(dc, imx31_ccm_reset);
dc->vmsd = &vmstate_imx31_ccm;
dc->desc = "i.MX31 Clock Control Module";
diff --git a/hw/misc/imx6_ccm.c b/hw/misc/imx6_ccm.c
index b1def7f..a10b67d 100644
--- a/hw/misc/imx6_ccm.c
+++ b/hw/misc/imx6_ccm.c
@@ -301,7 +301,6 @@ static uint64_t imx6_analog_get_periph_clk(IMX6CCMState *dev)
default:
/* We should never get there */
g_assert_not_reached();
- break;
}
trace_imx6_analog_get_periph_clk(freq);
@@ -742,12 +741,12 @@ static void imx6_ccm_init(Object *obj)
sysbus_init_mmio(sd, &s->container);
}
-static void imx6_ccm_class_init(ObjectClass *klass, void *data)
+static void imx6_ccm_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
IMXCCMClass *ccm = IMX_CCM_CLASS(klass);
- dc->reset = imx6_ccm_reset;
+ device_class_set_legacy_reset(dc, imx6_ccm_reset);
dc->vmsd = &vmstate_imx6_ccm;
dc->desc = "i.MX6 Clock Control Module";
diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c
index 3766bdf..8d2c417 100644
--- a/hw/misc/imx6_src.c
+++ b/hw/misc/imx6_src.c
@@ -17,18 +17,7 @@
#include "qemu/module.h"
#include "target/arm/arm-powerctl.h"
#include "hw/core/cpu.h"
-
-#ifndef DEBUG_IMX6_SRC
-#define DEBUG_IMX6_SRC 0
-#endif
-
-#define DPRINTF(fmt, args...) \
- do { \
- if (DEBUG_IMX6_SRC) { \
- fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX6_SRC, \
- __func__, ##args); \
- } \
- } while (0)
+#include "trace.h"
static const char *imx6_src_reg_name(uint32_t reg)
{
@@ -87,7 +76,7 @@ static void imx6_src_reset(DeviceState *dev)
{
IMX6SRCState *s = IMX6_SRC(dev);
- DPRINTF("\n");
+ trace_imx6_src_reset();
memset(s->regs, 0, sizeof(s->regs));
@@ -111,7 +100,7 @@ static uint64_t imx6_src_read(void *opaque, hwaddr offset, unsigned size)
}
- DPRINTF("reg[%s] => 0x%" PRIx32 "\n", imx6_src_reg_name(index), value);
+ trace_imx6_src_read(imx6_src_reg_name(index), value);
return value;
}
@@ -134,8 +123,7 @@ static void imx6_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
assert(bql_locked());
s->regs[SRC_SCR] = deposit32(s->regs[SRC_SCR], ri->reset_bit, 1, 0);
- DPRINTF("reg[%s] <= 0x%" PRIx32 "\n",
- imx6_src_reg_name(SRC_SCR), s->regs[SRC_SCR]);
+ trace_imx6_clear_reset_bit(imx6_src_reg_name(SRC_SCR), s->regs[SRC_SCR]);
g_free(ri);
}
@@ -173,8 +161,7 @@ static void imx6_src_write(void *opaque, hwaddr offset, uint64_t value,
return;
}
- DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx6_src_reg_name(index),
- (uint32_t)current_value);
+ trace_imx6_src_write(imx6_src_reg_name(index), value);
change_mask = s->regs[index] ^ (uint32_t)current_value;
@@ -286,12 +273,12 @@ static void imx6_src_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
}
-static void imx6_src_class_init(ObjectClass *klass, void *data)
+static void imx6_src_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = imx6_src_realize;
- dc->reset = imx6_src_reset;
+ device_class_set_legacy_reset(dc, imx6_src_reset);
dc->vmsd = &vmstate_imx6_src;
dc->desc = "i.MX6 System Reset Controller";
}
diff --git a/hw/misc/imx6ul_ccm.c b/hw/misc/imx6ul_ccm.c
index 0ac49ea..7f3ae61 100644
--- a/hw/misc/imx6ul_ccm.c
+++ b/hw/misc/imx6ul_ccm.c
@@ -904,12 +904,12 @@ static void imx6ul_ccm_init(Object *obj)
sysbus_init_mmio(sd, &s->container);
}
-static void imx6ul_ccm_class_init(ObjectClass *klass, void *data)
+static void imx6ul_ccm_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
IMXCCMClass *ccm = IMX_CCM_CLASS(klass);
- dc->reset = imx6ul_ccm_reset;
+ device_class_set_legacy_reset(dc, imx6ul_ccm_reset);
dc->vmsd = &vmstate_imx6ul_ccm;
dc->desc = "i.MX6UL Clock Control Module";
diff --git a/hw/misc/imx7_ccm.c b/hw/misc/imx7_ccm.c
index 88354f0..c061a58 100644
--- a/hw/misc/imx7_ccm.c
+++ b/hw/misc/imx7_ccm.c
@@ -262,12 +262,12 @@ static uint32_t imx7_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
return freq;
}
-static void imx7_ccm_class_init(ObjectClass *klass, void *data)
+static void imx7_ccm_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
IMXCCMClass *ccm = IMX_CCM_CLASS(klass);
- dc->reset = imx7_ccm_reset;
+ device_class_set_legacy_reset(dc, imx7_ccm_reset);
dc->vmsd = &vmstate_imx7_ccm;
dc->desc = "i.MX7 Clock Control Module";
@@ -293,11 +293,11 @@ static const VMStateDescription vmstate_imx7_analog = {
},
};
-static void imx7_analog_class_init(ObjectClass *klass, void *data)
+static void imx7_analog_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = imx7_analog_reset;
+ device_class_set_legacy_reset(dc, imx7_analog_reset);
dc->vmsd = &vmstate_imx7_analog;
dc->desc = "i.MX7 Analog Module";
}
diff --git a/hw/misc/imx7_gpr.c b/hw/misc/imx7_gpr.c
index b03341a..e12b496 100644
--- a/hw/misc/imx7_gpr.c
+++ b/hw/misc/imx7_gpr.c
@@ -102,7 +102,7 @@ static void imx7_gpr_init(Object *obj)
sysbus_init_mmio(sd, &s->mmio);
}
-static void imx7_gpr_class_init(ObjectClass *klass, void *data)
+static void imx7_gpr_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/imx7_snvs.c b/hw/misc/imx7_snvs.c
index edb2df2..6a8733d 100644
--- a/hw/misc/imx7_snvs.c
+++ b/hw/misc/imx7_snvs.c
@@ -19,9 +19,9 @@
#include "hw/misc/imx7_snvs.h"
#include "qemu/cutils.h"
#include "qemu/module.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/rtc.h"
-#include "sysemu/runstate.h"
+#include "system/system.h"
+#include "system/rtc.h"
+#include "system/runstate.h"
#include "trace.h"
#define RTC_FREQ 32768ULL
@@ -143,11 +143,11 @@ static void imx7_snvs_init(Object *obj)
qemu_clock_get_ns(rtc_clock) / NANOSECONDS_PER_SECOND;
}
-static void imx7_snvs_class_init(ObjectClass *klass, void *data)
+static void imx7_snvs_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = imx7_snvs_reset;
+ device_class_set_legacy_reset(dc, imx7_snvs_reset);
dc->vmsd = &vmstate_imx7_snvs;
dc->desc = "i.MX7 Secure Non-Volatile Storage Module";
}
diff --git a/hw/misc/imx7_src.c b/hw/misc/imx7_src.c
index d19f045..df0b0a6 100644
--- a/hw/misc/imx7_src.c
+++ b/hw/misc/imx7_src.c
@@ -251,12 +251,12 @@ static void imx7_src_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
}
-static void imx7_src_class_init(ObjectClass *klass, void *data)
+static void imx7_src_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = imx7_src_realize;
- dc->reset = imx7_src_reset;
+ device_class_set_legacy_reset(dc, imx7_src_reset);
dc->vmsd = &vmstate_imx7_src;
dc->desc = "i.MX6 System Reset Controller";
}
diff --git a/hw/misc/imx8mp_analog.c b/hw/misc/imx8mp_analog.c
new file mode 100644
index 0000000..23ffae8
--- /dev/null
+++ b/hw/misc/imx8mp_analog.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2025 Bernhard Beschow <shentey@gmail.com>
+ *
+ * i.MX 8M Plus ANALOG IP block emulation code
+ *
+ * Based on hw/misc/imx7_ccm.c
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+
+#include "hw/misc/imx8mp_analog.h"
+#include "migration/vmstate.h"
+
+#define ANALOG_PLL_LOCK BIT(31)
+
+static void imx8mp_analog_reset(DeviceState *dev)
+{
+ IMX8MPAnalogState *s = IMX8MP_ANALOG(dev);
+
+ memset(s->analog, 0, sizeof(s->analog));
+
+ s->analog[ANALOG_AUDIO_PLL1_GEN_CTRL] = 0x00002010;
+ s->analog[ANALOG_AUDIO_PLL1_FDIV_CTL0] = 0x00145032;
+ s->analog[ANALOG_AUDIO_PLL1_FDIV_CTL1] = 0x00000000;
+ s->analog[ANALOG_AUDIO_PLL1_SSCG_CTRL] = 0x00000000;
+ s->analog[ANALOG_AUDIO_PLL1_MNIT_CTRL] = 0x00100103;
+ s->analog[ANALOG_AUDIO_PLL2_GEN_CTRL] = 0x00002010;
+ s->analog[ANALOG_AUDIO_PLL2_FDIV_CTL0] = 0x00145032;
+ s->analog[ANALOG_AUDIO_PLL2_FDIV_CTL1] = 0x00000000;
+ s->analog[ANALOG_AUDIO_PLL2_SSCG_CTRL] = 0x00000000;
+ s->analog[ANALOG_AUDIO_PLL2_MNIT_CTRL] = 0x00100103;
+ s->analog[ANALOG_VIDEO_PLL1_GEN_CTRL] = 0x00002010;
+ s->analog[ANALOG_VIDEO_PLL1_FDIV_CTL0] = 0x00145032;
+ s->analog[ANALOG_VIDEO_PLL1_FDIV_CTL1] = 0x00000000;
+ s->analog[ANALOG_VIDEO_PLL1_SSCG_CTRL] = 0x00000000;
+ s->analog[ANALOG_VIDEO_PLL1_MNIT_CTRL] = 0x00100103;
+ s->analog[ANALOG_DRAM_PLL_GEN_CTRL] = 0x00002010;
+ s->analog[ANALOG_DRAM_PLL_FDIV_CTL0] = 0x0012c032;
+ s->analog[ANALOG_DRAM_PLL_FDIV_CTL1] = 0x00000000;
+ s->analog[ANALOG_DRAM_PLL_SSCG_CTRL] = 0x00000000;
+ s->analog[ANALOG_DRAM_PLL_MNIT_CTRL] = 0x00100103;
+ s->analog[ANALOG_GPU_PLL_GEN_CTRL] = 0x00000810;
+ s->analog[ANALOG_GPU_PLL_FDIV_CTL0] = 0x000c8031;
+ s->analog[ANALOG_GPU_PLL_LOCKD_CTRL] = 0x0010003f;
+ s->analog[ANALOG_GPU_PLL_MNIT_CTRL] = 0x00280081;
+ s->analog[ANALOG_VPU_PLL_GEN_CTRL] = 0x00000810;
+ s->analog[ANALOG_VPU_PLL_FDIV_CTL0] = 0x0012c032;
+ s->analog[ANALOG_VPU_PLL_LOCKD_CTRL] = 0x0010003f;
+ s->analog[ANALOG_VPU_PLL_MNIT_CTRL] = 0x00280081;
+ s->analog[ANALOG_ARM_PLL_GEN_CTRL] = 0x00000810;
+ s->analog[ANALOG_ARM_PLL_FDIV_CTL0] = 0x000fa031;
+ s->analog[ANALOG_ARM_PLL_LOCKD_CTRL] = 0x0010003f;
+ s->analog[ANALOG_ARM_PLL_MNIT_CTRL] = 0x00280081;
+ s->analog[ANALOG_SYS_PLL1_GEN_CTRL] = 0x0aaaa810;
+ s->analog[ANALOG_SYS_PLL1_FDIV_CTL0] = 0x00190032;
+ s->analog[ANALOG_SYS_PLL1_LOCKD_CTRL] = 0x0010003f;
+ s->analog[ANALOG_SYS_PLL1_MNIT_CTRL] = 0x00280081;
+ s->analog[ANALOG_SYS_PLL2_GEN_CTRL] = 0x0aaaa810;
+ s->analog[ANALOG_SYS_PLL2_FDIV_CTL0] = 0x000fa031;
+ s->analog[ANALOG_SYS_PLL2_LOCKD_CTRL] = 0x0010003f;
+ s->analog[ANALOG_SYS_PLL2_MNIT_CTRL] = 0x00280081;
+ s->analog[ANALOG_SYS_PLL3_GEN_CTRL] = 0x00000810;
+ s->analog[ANALOG_SYS_PLL3_FDIV_CTL0] = 0x000fa031;
+ s->analog[ANALOG_SYS_PLL3_LOCKD_CTRL] = 0x0010003f;
+ s->analog[ANALOG_SYS_PLL3_MNIT_CTRL] = 0x00280081;
+ s->analog[ANALOG_OSC_MISC_CFG] = 0x00000000;
+ s->analog[ANALOG_ANAMIX_PLL_MNIT_CTL] = 0x00000000;
+ s->analog[ANALOG_DIGPROG] = 0x00824010;
+
+ /* all PLLs need to be locked */
+ s->analog[ANALOG_AUDIO_PLL1_GEN_CTRL] |= ANALOG_PLL_LOCK;
+ s->analog[ANALOG_AUDIO_PLL2_GEN_CTRL] |= ANALOG_PLL_LOCK;
+ s->analog[ANALOG_VIDEO_PLL1_GEN_CTRL] |= ANALOG_PLL_LOCK;
+ s->analog[ANALOG_DRAM_PLL_GEN_CTRL] |= ANALOG_PLL_LOCK;
+ s->analog[ANALOG_GPU_PLL_GEN_CTRL] |= ANALOG_PLL_LOCK;
+ s->analog[ANALOG_VPU_PLL_GEN_CTRL] |= ANALOG_PLL_LOCK;
+ s->analog[ANALOG_ARM_PLL_GEN_CTRL] |= ANALOG_PLL_LOCK;
+ s->analog[ANALOG_SYS_PLL1_GEN_CTRL] |= ANALOG_PLL_LOCK;
+ s->analog[ANALOG_SYS_PLL2_GEN_CTRL] |= ANALOG_PLL_LOCK;
+ s->analog[ANALOG_SYS_PLL3_GEN_CTRL] |= ANALOG_PLL_LOCK;
+}
+
+static uint64_t imx8mp_analog_read(void *opaque, hwaddr offset, unsigned size)
+{
+ IMX8MPAnalogState *s = opaque;
+
+ return s->analog[offset >> 2];
+}
+
+static void imx8mp_analog_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ IMX8MPAnalogState *s = opaque;
+
+ if (offset >> 2 == ANALOG_DIGPROG) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Guest write to read-only ANALOG_DIGPROG register\n");
+ } else {
+ s->analog[offset >> 2] = value;
+ }
+}
+
+static const struct MemoryRegionOps imx8mp_analog_ops = {
+ .read = imx8mp_analog_read,
+ .write = imx8mp_analog_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ .unaligned = false,
+ },
+};
+
+static void imx8mp_analog_init(Object *obj)
+{
+ IMX8MPAnalogState *s = IMX8MP_ANALOG(obj);
+ SysBusDevice *sd = SYS_BUS_DEVICE(obj);
+
+ memory_region_init(&s->mmio.container, obj, TYPE_IMX8MP_ANALOG, 0x10000);
+
+ memory_region_init_io(&s->mmio.analog, obj, &imx8mp_analog_ops, s,
+ TYPE_IMX8MP_ANALOG, sizeof(s->analog));
+ memory_region_add_subregion(&s->mmio.container, 0, &s->mmio.analog);
+
+ sysbus_init_mmio(sd, &s->mmio.container);
+}
+
+static const VMStateDescription imx8mp_analog_vmstate = {
+ .name = TYPE_IMX8MP_ANALOG,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(analog, IMX8MPAnalogState, ANALOG_MAX),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void imx8mp_analog_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ device_class_set_legacy_reset(dc, imx8mp_analog_reset);
+ dc->vmsd = &imx8mp_analog_vmstate;
+ dc->desc = "i.MX 8M Plus Analog Module";
+}
+
+static const TypeInfo imx8mp_analog_types[] = {
+ {
+ .name = TYPE_IMX8MP_ANALOG,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IMX8MPAnalogState),
+ .instance_init = imx8mp_analog_init,
+ .class_init = imx8mp_analog_class_init,
+ }
+};
+
+DEFINE_TYPES(imx8mp_analog_types);
diff --git a/hw/misc/imx8mp_ccm.c b/hw/misc/imx8mp_ccm.c
new file mode 100644
index 0000000..911911e
--- /dev/null
+++ b/hw/misc/imx8mp_ccm.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2025 Bernhard Beschow <shentey@gmail.com>
+ *
+ * i.MX 8M Plus CCM IP block emulation code
+ *
+ * Based on hw/misc/imx7_ccm.c
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+
+#include "hw/misc/imx8mp_ccm.h"
+#include "migration/vmstate.h"
+
+#include "trace.h"
+
+#define CKIH_FREQ 16000000 /* 16MHz crystal input */
+
+static void imx8mp_ccm_reset(DeviceState *dev)
+{
+ IMX8MPCCMState *s = IMX8MP_CCM(dev);
+
+ memset(s->ccm, 0, sizeof(s->ccm));
+}
+
+#define CCM_INDEX(offset) (((offset) & ~(hwaddr)0xF) / sizeof(uint32_t))
+#define CCM_BITOP(offset) ((offset) & (hwaddr)0xF)
+
+enum {
+ CCM_BITOP_NONE = 0x00,
+ CCM_BITOP_SET = 0x04,
+ CCM_BITOP_CLR = 0x08,
+ CCM_BITOP_TOG = 0x0C,
+};
+
+static uint64_t imx8mp_set_clr_tog_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ const uint32_t *mmio = opaque;
+
+ return mmio[CCM_INDEX(offset)];
+}
+
+static void imx8mp_set_clr_tog_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ const uint8_t bitop = CCM_BITOP(offset);
+ const uint32_t index = CCM_INDEX(offset);
+ uint32_t *mmio = opaque;
+
+ switch (bitop) {
+ case CCM_BITOP_NONE:
+ mmio[index] = value;
+ break;
+ case CCM_BITOP_SET:
+ mmio[index] |= value;
+ break;
+ case CCM_BITOP_CLR:
+ mmio[index] &= ~value;
+ break;
+ case CCM_BITOP_TOG:
+ mmio[index] ^= value;
+ break;
+ };
+}
+
+static const struct MemoryRegionOps imx8mp_set_clr_tog_ops = {
+ .read = imx8mp_set_clr_tog_read,
+ .write = imx8mp_set_clr_tog_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .impl = {
+ /*
+ * Our device would not work correctly if the guest was doing
+ * unaligned access. This might not be a limitation on the real
+ * device but in practice there is no reason for a guest to access
+ * this device unaligned.
+ */
+ .min_access_size = 4,
+ .max_access_size = 4,
+ .unaligned = false,
+ },
+};
+
+static void imx8mp_ccm_init(Object *obj)
+{
+ SysBusDevice *sd = SYS_BUS_DEVICE(obj);
+ IMX8MPCCMState *s = IMX8MP_CCM(obj);
+
+ memory_region_init_io(&s->iomem,
+ obj,
+ &imx8mp_set_clr_tog_ops,
+ s->ccm,
+ TYPE_IMX8MP_CCM ".ccm",
+ sizeof(s->ccm));
+
+ sysbus_init_mmio(sd, &s->iomem);
+}
+
+static const VMStateDescription imx8mp_ccm_vmstate = {
+ .name = TYPE_IMX8MP_CCM,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(ccm, IMX8MPCCMState, CCM_MAX),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static uint32_t imx8mp_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
+{
+ /*
+ * This function is "consumed" by GPT emulation code. Some clocks
+ * have fixed frequencies and we can provide requested frequency
+ * easily. However for CCM provided clocks (like IPG) each GPT
+ * timer can have its own clock root.
+ * This means we need additional information when calling this
+ * function to know the requester's identity.
+ */
+ uint32_t freq = 0;
+
+ switch (clock) {
+ case CLK_NONE:
+ break;
+ case CLK_32k:
+ freq = CKIL_FREQ;
+ break;
+ case CLK_HIGH:
+ freq = CKIH_FREQ;
+ break;
+ case CLK_IPG:
+ case CLK_IPG_HIGH:
+ /*
+ * For now we don't have a way to figure out the device this
+ * function is called for. Until then the IPG derived clocks
+ * are left unimplemented.
+ */
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Clock %d Not implemented\n",
+ TYPE_IMX8MP_CCM, __func__, clock);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: unsupported clock %d\n",
+ TYPE_IMX8MP_CCM, __func__, clock);
+ break;
+ }
+
+ trace_ccm_clock_freq(clock, freq);
+
+ return freq;
+}
+
+static void imx8mp_ccm_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ IMXCCMClass *ccm = IMX_CCM_CLASS(klass);
+
+ device_class_set_legacy_reset(dc, imx8mp_ccm_reset);
+ dc->vmsd = &imx8mp_ccm_vmstate;
+ dc->desc = "i.MX 8M Plus Clock Control Module";
+
+ ccm->get_clock_frequency = imx8mp_ccm_get_clock_frequency;
+}
+
+static const TypeInfo imx8mp_ccm_types[] = {
+ {
+ .name = TYPE_IMX8MP_CCM,
+ .parent = TYPE_IMX_CCM,
+ .instance_size = sizeof(IMX8MPCCMState),
+ .instance_init = imx8mp_ccm_init,
+ .class_init = imx8mp_ccm_class_init,
+ },
+};
+
+DEFINE_TYPES(imx8mp_ccm_types);
diff --git a/hw/misc/imx_rngc.c b/hw/misc/imx_rngc.c
index ab7775e..630f6cb 100644
--- a/hw/misc/imx_rngc.c
+++ b/hw/misc/imx_rngc.c
@@ -254,12 +254,12 @@ static const VMStateDescription vmstate_imx_rngc = {
}
};
-static void imx_rngc_class_init(ObjectClass *klass, void *data)
+static void imx_rngc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = imx_rngc_realize;
- dc->reset = imx_rngc_reset;
+ device_class_set_legacy_reset(dc, imx_rngc_reset);
dc->desc = RNGC_NAME,
dc->vmsd = &vmstate_imx_rngc;
}
diff --git a/hw/misc/iosb.c b/hw/misc/iosb.c
index 31927ea..96221e1 100644
--- a/hw/misc/iosb.c
+++ b/hw/misc/iosb.c
@@ -111,7 +111,7 @@ static const VMStateDescription vmstate_iosb = {
}
};
-static void iosb_class_init(ObjectClass *oc, void *data)
+static void iosb_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
ResettableClass *rc = RESETTABLE_CLASS(oc);
diff --git a/hw/misc/iotkit-secctl.c b/hw/misc/iotkit-secctl.c
index f9c45f6..afd9ab4 100644
--- a/hw/misc/iotkit-secctl.c
+++ b/hw/misc/iotkit-secctl.c
@@ -814,17 +814,16 @@ static const VMStateDescription iotkit_secctl_vmstate = {
},
};
-static Property iotkit_secctl_props[] = {
+static const Property iotkit_secctl_props[] = {
DEFINE_PROP_UINT32("sse-version", IoTKitSecCtl, sse_version, 0),
- DEFINE_PROP_END_OF_LIST()
};
-static void iotkit_secctl_class_init(ObjectClass *klass, void *data)
+static void iotkit_secctl_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &iotkit_secctl_vmstate;
- dc->reset = iotkit_secctl_reset;
+ device_class_set_legacy_reset(dc, iotkit_secctl_reset);
dc->realize = iotkit_secctl_realize;
device_class_set_props(dc, iotkit_secctl_props);
}
diff --git a/hw/misc/iotkit-sysctl.c b/hw/misc/iotkit-sysctl.c
index 45393e8..d70e51a 100644
--- a/hw/misc/iotkit-sysctl.c
+++ b/hw/misc/iotkit-sysctl.c
@@ -20,7 +20,7 @@
#include "qemu/bitops.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "trace.h"
#include "qapi/error.h"
#include "hw/sysbus.h"
@@ -835,22 +835,21 @@ static const VMStateDescription iotkit_sysctl_vmstate = {
}
};
-static Property iotkit_sysctl_props[] = {
+static const Property iotkit_sysctl_props[] = {
DEFINE_PROP_UINT32("sse-version", IoTKitSysCtl, sse_version, 0),
DEFINE_PROP_UINT32("CPUWAIT_RST", IoTKitSysCtl, cpuwait_rst, 0),
DEFINE_PROP_UINT32("INITSVTOR0_RST", IoTKitSysCtl, initsvtor0_rst,
0x10000000),
DEFINE_PROP_UINT32("INITSVTOR1_RST", IoTKitSysCtl, initsvtor1_rst,
0x10000000),
- DEFINE_PROP_END_OF_LIST()
};
-static void iotkit_sysctl_class_init(ObjectClass *klass, void *data)
+static void iotkit_sysctl_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &iotkit_sysctl_vmstate;
- dc->reset = iotkit_sysctl_reset;
+ device_class_set_legacy_reset(dc, iotkit_sysctl_reset);
device_class_set_props(dc, iotkit_sysctl_props);
dc->realize = iotkit_sysctl_realize;
}
diff --git a/hw/misc/iotkit-sysinfo.c b/hw/misc/iotkit-sysinfo.c
index aaa9305..57405cb 100644
--- a/hw/misc/iotkit-sysinfo.c
+++ b/hw/misc/iotkit-sysinfo.c
@@ -131,12 +131,11 @@ static const MemoryRegionOps iotkit_sysinfo_ops = {
.valid.max_access_size = 4,
};
-static Property iotkit_sysinfo_props[] = {
+static const Property iotkit_sysinfo_props[] = {
DEFINE_PROP_UINT32("SYS_VERSION", IoTKitSysInfo, sys_version, 0),
DEFINE_PROP_UINT32("SYS_CONFIG", IoTKitSysInfo, sys_config, 0),
DEFINE_PROP_UINT32("sse-version", IoTKitSysInfo, sse_version, 0),
DEFINE_PROP_UINT32("IIDR", IoTKitSysInfo, iidr, 0),
- DEFINE_PROP_END_OF_LIST()
};
static void iotkit_sysinfo_init(Object *obj)
@@ -159,7 +158,7 @@ static void iotkit_sysinfo_realize(DeviceState *dev, Error **errp)
}
}
-static void iotkit_sysinfo_class_init(ObjectClass *klass, void *data)
+static void iotkit_sysinfo_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/ivshmem-flat.c b/hw/misc/ivshmem-flat.c
new file mode 100644
index 0000000..be28c24
--- /dev/null
+++ b/hw/misc/ivshmem-flat.c
@@ -0,0 +1,458 @@
+/*
+ * Inter-VM Shared Memory Flat Device
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (c) 2023 Linaro Ltd.
+ * Authors:
+ * Gustavo Romero
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "qemu/error-report.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties-system.h"
+#include "hw/sysbus.h"
+#include "chardev/char-fe.h"
+#include "system/address-spaces.h"
+#include "trace.h"
+
+#include "hw/misc/ivshmem-flat.h"
+
+static int64_t ivshmem_flat_recv_msg(IvshmemFTState *s, int *pfd)
+{
+ int64_t msg;
+ int n, ret;
+
+ n = 0;
+ do {
+ ret = qemu_chr_fe_read_all(&s->server_chr, (uint8_t *)&msg + n,
+ sizeof(msg) - n);
+ if (ret < 0) {
+ if (ret == -EINTR) {
+ continue;
+ }
+ exit(1);
+ }
+ n += ret;
+ } while (n < sizeof(msg));
+
+ if (pfd) {
+ *pfd = qemu_chr_fe_get_msgfd(&s->server_chr);
+ }
+ return le64_to_cpu(msg);
+}
+
+static void ivshmem_flat_irq_handler(void *opaque)
+{
+ VectorInfo *vi = opaque;
+ EventNotifier *e = &vi->event_notifier;
+ uint16_t vector_id;
+ const VectorInfo (*v)[64];
+
+ assert(e->initialized);
+
+ vector_id = vi->id;
+
+ /*
+ * The vector info struct is passed to the handler via the 'opaque' pointer.
+ * This struct pointer allows the retrieval of the vector ID and its
+ * associated event notifier. However, for triggering an interrupt using
+ * qemu_set_irq, it's necessary to also have a pointer to the device state,
+ * i.e., a pointer to the IvshmemFTState struct. Since the vector info
+ * struct is contained within the IvshmemFTState struct, its pointer can be
+ * used to obtain the pointer to IvshmemFTState through simple pointer math.
+ */
+ v = (void *)(vi - vector_id); /* v = &IvshmemPeer->vector[0] */
+ IvshmemPeer *own_peer = container_of(v, IvshmemPeer, vector);
+ IvshmemFTState *s = container_of(own_peer, IvshmemFTState, own);
+
+ /* Clear event */
+ if (!event_notifier_test_and_clear(e)) {
+ return;
+ }
+
+ trace_ivshmem_flat_irq_handler(vector_id);
+
+ /*
+ * Toggle device's output line, which is connected to interrupt controller,
+ * generating an interrupt request to the CPU.
+ */
+ qemu_irq_pulse(s->irq);
+}
+
+static IvshmemPeer *ivshmem_flat_find_peer(IvshmemFTState *s, uint16_t peer_id)
+{
+ IvshmemPeer *peer;
+
+ /* Own ID */
+ if (s->own.id == peer_id) {
+ return &s->own;
+ }
+
+ /* Peer ID */
+ QTAILQ_FOREACH(peer, &s->peer, next) {
+ if (peer->id == peer_id) {
+ return peer;
+ }
+ }
+
+ return NULL;
+}
+
+static IvshmemPeer *ivshmem_flat_add_peer(IvshmemFTState *s, uint16_t peer_id)
+{
+ IvshmemPeer *new_peer;
+
+ new_peer = g_malloc0(sizeof(*new_peer));
+ new_peer->id = peer_id;
+ new_peer->vector_counter = 0;
+
+ QTAILQ_INSERT_TAIL(&s->peer, new_peer, next);
+
+ trace_ivshmem_flat_new_peer(peer_id);
+
+ return new_peer;
+}
+
+static void ivshmem_flat_remove_peer(IvshmemFTState *s, uint16_t peer_id)
+{
+ IvshmemPeer *peer;
+
+ peer = ivshmem_flat_find_peer(s, peer_id);
+ assert(peer);
+
+ QTAILQ_REMOVE(&s->peer, peer, next);
+ for (int n = 0; n < peer->vector_counter; n++) {
+ int efd;
+ efd = event_notifier_get_fd(&(peer->vector[n].event_notifier));
+ close(efd);
+ }
+
+ g_free(peer);
+}
+
+static void ivshmem_flat_add_vector(IvshmemFTState *s, IvshmemPeer *peer,
+ int vector_fd)
+{
+ if (peer->vector_counter >= IVSHMEM_MAX_VECTOR_NUM) {
+ trace_ivshmem_flat_add_vector_failure(peer->vector_counter,
+ vector_fd, peer->id);
+ close(vector_fd);
+
+ return;
+ }
+
+ trace_ivshmem_flat_add_vector_success(peer->vector_counter,
+ vector_fd, peer->id);
+
+ /*
+ * Set vector ID and its associated eventfd notifier and add them to the
+ * peer.
+ */
+ peer->vector[peer->vector_counter].id = peer->vector_counter;
+ g_unix_set_fd_nonblocking(vector_fd, true, NULL);
+ event_notifier_init_fd(&peer->vector[peer->vector_counter].event_notifier,
+ vector_fd);
+
+ /*
+ * If it's the device's own ID, register also the handler for the eventfd
+ * so the device can be notified by the other peers.
+ */
+ if (peer == &s->own) {
+ qemu_set_fd_handler(vector_fd, ivshmem_flat_irq_handler, NULL,
+ &peer->vector);
+ }
+
+ peer->vector_counter++;
+}
+
+static void ivshmem_flat_process_msg(IvshmemFTState *s, uint64_t msg, int fd)
+{
+ uint16_t peer_id;
+ IvshmemPeer *peer;
+
+ peer_id = msg & 0xFFFF;
+ peer = ivshmem_flat_find_peer(s, peer_id);
+
+ if (!peer) {
+ peer = ivshmem_flat_add_peer(s, peer_id);
+ }
+
+ if (fd >= 0) {
+ ivshmem_flat_add_vector(s, peer, fd);
+ } else { /* fd == -1, which is received when peers disconnect. */
+ ivshmem_flat_remove_peer(s, peer_id);
+ }
+}
+
+static int ivshmem_flat_can_receive_data(void *opaque)
+{
+ IvshmemFTState *s = opaque;
+
+ assert(s->msg_buffered_bytes < sizeof(s->msg_buf));
+ return sizeof(s->msg_buf) - s->msg_buffered_bytes;
+}
+
+static void ivshmem_flat_read_msg(void *opaque, const uint8_t *buf, int size)
+{
+ IvshmemFTState *s = opaque;
+ int fd;
+ int64_t msg;
+
+ assert(size >= 0 && s->msg_buffered_bytes + size <= sizeof(s->msg_buf));
+ memcpy((unsigned char *)&s->msg_buf + s->msg_buffered_bytes, buf, size);
+ s->msg_buffered_bytes += size;
+ if (s->msg_buffered_bytes < sizeof(s->msg_buf)) {
+ return;
+ }
+ msg = le64_to_cpu(s->msg_buf);
+ s->msg_buffered_bytes = 0;
+
+ fd = qemu_chr_fe_get_msgfd(&s->server_chr);
+
+ ivshmem_flat_process_msg(s, msg, fd);
+}
+
+static uint64_t ivshmem_flat_iomem_read(void *opaque,
+ hwaddr offset, unsigned size)
+{
+ IvshmemFTState *s = opaque;
+ uint32_t ret;
+
+ trace_ivshmem_flat_read_mmr(offset);
+
+ switch (offset) {
+ case INTMASK:
+ ret = 0; /* Ignore read since all bits are reserved in rev 1. */
+ break;
+ case INTSTATUS:
+ ret = 0; /* Ignore read since all bits are reserved in rev 1. */
+ break;
+ case IVPOSITION:
+ ret = s->own.id;
+ break;
+ case DOORBELL:
+ trace_ivshmem_flat_read_mmr_doorbell(); /* DOORBELL is write-only */
+ ret = 0;
+ break;
+ default:
+ /* Should never reach out here due to iomem map range being exact */
+ trace_ivshmem_flat_read_write_mmr_invalid(offset);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int ivshmem_flat_interrupt_peer(IvshmemFTState *s,
+ uint16_t peer_id, uint16_t vector_id)
+{
+ IvshmemPeer *peer;
+
+ peer = ivshmem_flat_find_peer(s, peer_id);
+ if (!peer) {
+ trace_ivshmem_flat_interrupt_invalid_peer(peer_id);
+ return 1;
+ }
+
+ event_notifier_set(&(peer->vector[vector_id].event_notifier));
+
+ return 0;
+}
+
+static void ivshmem_flat_iomem_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ IvshmemFTState *s = opaque;
+ uint16_t peer_id = (value >> 16) & 0xFFFF;
+ uint16_t vector_id = value & 0xFFFF;
+
+ trace_ivshmem_flat_write_mmr(offset);
+
+ switch (offset) {
+ case INTMASK:
+ break;
+ case INTSTATUS:
+ break;
+ case IVPOSITION:
+ break;
+ case DOORBELL:
+ trace_ivshmem_flat_interrupt_peer(peer_id, vector_id);
+ ivshmem_flat_interrupt_peer(s, peer_id, vector_id);
+ break;
+ default:
+ /* Should never reach out here due to iomem map range being exact. */
+ trace_ivshmem_flat_read_write_mmr_invalid(offset);
+ break;
+ }
+}
+
+static const MemoryRegionOps ivshmem_flat_ops = {
+ .read = ivshmem_flat_iomem_read,
+ .write = ivshmem_flat_iomem_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = { /* Read/write aligned at 32 bits. */
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void ivshmem_flat_instance_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ IvshmemFTState *s = IVSHMEM_FLAT(obj);
+
+ /*
+ * Init mem region for 4 MMRs (ivshmem_registers),
+ * 32 bits each => 16 bytes (0x10).
+ */
+ memory_region_init_io(&s->iomem, obj, &ivshmem_flat_ops, s,
+ "ivshmem-mmio", 0x10);
+ sysbus_init_mmio(sbd, &s->iomem);
+
+ /*
+ * Create one output IRQ that will be connect to the
+ * machine's interrupt controller.
+ */
+ sysbus_init_irq(sbd, &s->irq);
+
+ QTAILQ_INIT(&s->peer);
+}
+
+static bool ivshmem_flat_connect_server(DeviceState *dev, Error **errp)
+{
+ IvshmemFTState *s = IVSHMEM_FLAT(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ int64_t protocol_version, msg;
+ int shmem_fd;
+ uint16_t peer_id;
+ struct stat fdstat;
+
+ /* Check ivshmem server connection. */
+ if (!qemu_chr_fe_backend_connected(&s->server_chr)) {
+ error_setg(errp, "ivshmem server socket not specified or incorret."
+ " Can't create device.");
+ return false;
+ }
+
+ /*
+ * Message sequence from server on new connection:
+ * _____________________________________
+ * |STEP| uint64_t msg | int fd |
+ * -------------------------------------
+ *
+ * 0 PROTOCOL -1 \
+ * 1 OWN PEER ID -1 |-- Header/Greeting
+ * 2 -1 shmem fd /
+ *
+ * 3 PEER IDx Other peer's Vector 0 eventfd
+ * 4 PEER IDx Other peer's Vector 1 eventfd
+ * . .
+ * . .
+ * . .
+ * N PEER IDy Other peer's Vector 0 eventfd
+ * N+1 PEER IDy Other peer's Vector 1 eventfd
+ * . .
+ * . .
+ * . .
+ *
+ * ivshmem_flat_recv_msg() calls return 'msg' and 'fd'.
+ *
+ * See ./docs/specs/ivshmem-spec.txt for details on the protocol.
+ */
+
+ /* Step 0 */
+ protocol_version = ivshmem_flat_recv_msg(s, NULL);
+
+ /* Step 1 */
+ msg = ivshmem_flat_recv_msg(s, NULL);
+ peer_id = 0xFFFF & msg;
+ s->own.id = peer_id;
+ s->own.vector_counter = 0;
+
+ trace_ivshmem_flat_proto_ver_own_id(protocol_version, s->own.id);
+
+ /* Step 2 */
+ msg = ivshmem_flat_recv_msg(s, &shmem_fd);
+ /* Map shmem fd and MMRs into memory regions. */
+ if (msg != -1 || shmem_fd < 0) {
+ error_setg(errp, "Could not receive valid shmem fd."
+ " Can't create device!");
+ return false;
+ }
+
+ if (fstat(shmem_fd, &fdstat) != 0) {
+ error_setg(errp, "Could not determine shmem fd size."
+ " Can't create device!");
+ return false;
+ }
+ trace_ivshmem_flat_shmem_size(shmem_fd, fdstat.st_size);
+
+ /*
+ * Shmem size provided by the ivshmem server must be equal to
+ * device's shmem size.
+ */
+ if (fdstat.st_size != s->shmem_size) {
+ error_setg(errp, "Can't map shmem fd: shmem size different"
+ " from device size!");
+ return false;
+ }
+
+ /*
+ * Beyond step 2 ivshmem_process_msg, called by ivshmem_flat_read_msg
+ * handler -- when data is available on the server socket -- will handle
+ * the additional messages that will be generated by the server as peers
+ * connect or disconnect.
+ */
+ qemu_chr_fe_set_handlers(&s->server_chr, ivshmem_flat_can_receive_data,
+ ivshmem_flat_read_msg, NULL, NULL, s, NULL, true);
+
+ memory_region_init_ram_from_fd(&s->shmem, OBJECT(s),
+ "ivshmem-shmem", s->shmem_size,
+ RAM_SHARED, shmem_fd, 0, NULL);
+ sysbus_init_mmio(sbd, &s->shmem);
+
+ return true;
+}
+
+static void ivshmem_flat_realize(DeviceState *dev, Error **errp)
+{
+ if (!ivshmem_flat_connect_server(dev, errp)) {
+ return;
+ }
+}
+
+static const Property ivshmem_flat_props[] = {
+ DEFINE_PROP_CHR("chardev", IvshmemFTState, server_chr),
+ DEFINE_PROP_UINT32("shmem-size", IvshmemFTState, shmem_size, 4 * MiB),
+};
+
+static void ivshmem_flat_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->hotpluggable = true;
+ dc->realize = ivshmem_flat_realize;
+
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ device_class_set_props(dc, ivshmem_flat_props);
+
+ /* Reason: Must be wired up in code (sysbus MRs and IRQ) */
+ dc->user_creatable = false;
+}
+
+static const TypeInfo ivshmem_flat_types[] = {
+ {
+ .name = TYPE_IVSHMEM_FLAT,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IvshmemFTState),
+ .instance_init = ivshmem_flat_instance_init,
+ .class_init = ivshmem_flat_class_init,
+ },
+};
+
+DEFINE_TYPES(ivshmem_flat_types)
diff --git a/hw/misc/ivshmem-pci.c b/hw/misc/ivshmem-pci.c
new file mode 100644
index 0000000..5a10bca
--- /dev/null
+++ b/hw/misc/ivshmem-pci.c
@@ -0,0 +1,1131 @@
+/*
+ * Inter-VM Shared Memory PCI device.
+ *
+ * Author:
+ * Cam Macdonell <cam@cs.ualberta.ca>
+ *
+ * Based On: cirrus_vga.c
+ * Copyright (c) 2004 Fabrice Bellard
+ * Copyright (c) 2004 Makoto Suzuki (suzu)
+ *
+ * and rtl8139.c
+ * Copyright (c) 2006 Igor Kovalenko
+ *
+ * This code is licensed under the GNU GPL v2.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "qapi/error.h"
+#include "qemu/cutils.h"
+#include "hw/pci/pci.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
+#include "system/kvm.h"
+#include "migration/blocker.h"
+#include "migration/vmstate.h"
+#include "qemu/error-report.h"
+#include "qemu/event_notifier.h"
+#include "qemu/module.h"
+#include "qom/object_interfaces.h"
+#include "chardev/char-fe.h"
+#include "system/hostmem.h"
+#include "qapi/visitor.h"
+
+#include "hw/misc/ivshmem.h"
+#include "qom/object.h"
+
+#define PCI_VENDOR_ID_IVSHMEM PCI_VENDOR_ID_REDHAT_QUMRANET
+#define PCI_DEVICE_ID_IVSHMEM 0x1110
+
+#define IVSHMEM_MAX_PEERS UINT16_MAX
+#define IVSHMEM_IOEVENTFD 0
+#define IVSHMEM_MSI 1
+
+#define IVSHMEM_REG_BAR_SIZE 0x100
+
+#define IVSHMEM_DEBUG 0
+#define IVSHMEM_DPRINTF(fmt, ...) \
+ do { \
+ if (IVSHMEM_DEBUG) { \
+ printf("IVSHMEM: " fmt, ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+#define TYPE_IVSHMEM_COMMON "ivshmem-common"
+typedef struct IVShmemState IVShmemState;
+DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_COMMON,
+ TYPE_IVSHMEM_COMMON)
+
+#define TYPE_IVSHMEM_PLAIN "ivshmem-plain"
+DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_PLAIN,
+ TYPE_IVSHMEM_PLAIN)
+
+#define TYPE_IVSHMEM_DOORBELL "ivshmem-doorbell"
+DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_DOORBELL,
+ TYPE_IVSHMEM_DOORBELL)
+
+#define TYPE_IVSHMEM "ivshmem"
+DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM,
+ TYPE_IVSHMEM)
+
+typedef struct Peer {
+ int nb_eventfds;
+ EventNotifier *eventfds;
+} Peer;
+
+typedef struct MSIVector {
+ PCIDevice *pdev;
+ int virq;
+ bool unmasked;
+} MSIVector;
+
+struct IVShmemState {
+ /*< private >*/
+ PCIDevice parent_obj;
+ /*< public >*/
+
+ uint32_t features;
+
+ /* exactly one of these two may be set */
+ HostMemoryBackend *hostmem; /* with interrupts */
+ CharBackend server_chr; /* without interrupts */
+
+ /* registers */
+ uint32_t intrmask;
+ uint32_t intrstatus;
+ int vm_id;
+
+ /* BARs */
+ MemoryRegion ivshmem_mmio; /* BAR 0 (registers) */
+ MemoryRegion *ivshmem_bar2; /* BAR 2 (shared memory) */
+ MemoryRegion server_bar2; /* used with server_chr */
+
+ /* interrupt support */
+ Peer *peers;
+ int nb_peers; /* space in @peers[] */
+ uint32_t vectors;
+ MSIVector *msi_vectors;
+ uint64_t msg_buf; /* buffer for receiving server messages */
+ int msg_buffered_bytes; /* #bytes in @msg_buf */
+
+ /* migration stuff */
+ OnOffAuto master;
+ Error *migration_blocker;
+};
+
+/* registers for the Inter-VM shared memory device */
+enum ivshmem_registers {
+ INTRMASK = 0,
+ INTRSTATUS = 4,
+ IVPOSITION = 8,
+ DOORBELL = 12,
+};
+
+static inline uint32_t ivshmem_has_feature(IVShmemState *ivs,
+ unsigned int feature) {
+ return (ivs->features & (1 << feature));
+}
+
+static inline bool ivshmem_is_master(IVShmemState *s)
+{
+ assert(s->master != ON_OFF_AUTO_AUTO);
+ return s->master == ON_OFF_AUTO_ON;
+}
+
+static void ivshmem_IntrMask_write(IVShmemState *s, uint32_t val)
+{
+ IVSHMEM_DPRINTF("IntrMask write(w) val = 0x%04x\n", val);
+
+ s->intrmask = val;
+}
+
+static uint32_t ivshmem_IntrMask_read(IVShmemState *s)
+{
+ uint32_t ret = s->intrmask;
+
+ IVSHMEM_DPRINTF("intrmask read(w) val = 0x%04x\n", ret);
+ return ret;
+}
+
+static void ivshmem_IntrStatus_write(IVShmemState *s, uint32_t val)
+{
+ IVSHMEM_DPRINTF("IntrStatus write(w) val = 0x%04x\n", val);
+
+ s->intrstatus = val;
+}
+
+static uint32_t ivshmem_IntrStatus_read(IVShmemState *s)
+{
+ uint32_t ret = s->intrstatus;
+
+ /* reading ISR clears all interrupts */
+ s->intrstatus = 0;
+ return ret;
+}
+
+static void ivshmem_io_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ IVShmemState *s = opaque;
+
+ uint16_t dest = val >> 16;
+ uint16_t vector = val & 0xff;
+
+ addr &= 0xfc;
+
+ IVSHMEM_DPRINTF("writing to addr " HWADDR_FMT_plx "\n", addr);
+ switch (addr)
+ {
+ case INTRMASK:
+ ivshmem_IntrMask_write(s, val);
+ break;
+
+ case INTRSTATUS:
+ ivshmem_IntrStatus_write(s, val);
+ break;
+
+ case DOORBELL:
+ /* check that dest VM ID is reasonable */
+ if (dest >= s->nb_peers) {
+ IVSHMEM_DPRINTF("Invalid destination VM ID (%d)\n", dest);
+ break;
+ }
+
+ /* check doorbell range */
+ if (vector < s->peers[dest].nb_eventfds) {
+ IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector);
+ event_notifier_set(&s->peers[dest].eventfds[vector]);
+ } else {
+ IVSHMEM_DPRINTF("Invalid destination vector %d on VM %d\n",
+ vector, dest);
+ }
+ break;
+ default:
+ IVSHMEM_DPRINTF("Unhandled write " HWADDR_FMT_plx "\n", addr);
+ }
+}
+
+static uint64_t ivshmem_io_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+
+ IVShmemState *s = opaque;
+ uint32_t ret;
+
+ switch (addr)
+ {
+ case INTRMASK:
+ ret = ivshmem_IntrMask_read(s);
+ break;
+
+ case INTRSTATUS:
+ ret = ivshmem_IntrStatus_read(s);
+ break;
+
+ case IVPOSITION:
+ ret = s->vm_id;
+ break;
+
+ default:
+ IVSHMEM_DPRINTF("why are we reading " HWADDR_FMT_plx "\n", addr);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static const MemoryRegionOps ivshmem_mmio_ops = {
+ .read = ivshmem_io_read,
+ .write = ivshmem_io_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void ivshmem_vector_notify(void *opaque)
+{
+ MSIVector *entry = opaque;
+ PCIDevice *pdev = entry->pdev;
+ IVShmemState *s = IVSHMEM_COMMON(pdev);
+ int vector = entry - s->msi_vectors;
+ EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
+
+ if (!event_notifier_test_and_clear(n)) {
+ return;
+ }
+
+ IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, vector);
+ if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
+ if (msix_enabled(pdev)) {
+ msix_notify(pdev, vector);
+ }
+ } else {
+ ivshmem_IntrStatus_write(s, 1);
+ }
+}
+
+static int ivshmem_vector_unmask(PCIDevice *dev, unsigned vector,
+ MSIMessage msg)
+{
+ IVShmemState *s = IVSHMEM_COMMON(dev);
+ EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
+ MSIVector *v = &s->msi_vectors[vector];
+ int ret;
+
+ IVSHMEM_DPRINTF("vector unmask %p %d\n", dev, vector);
+ if (!v->pdev) {
+ error_report("ivshmem: vector %d route does not exist", vector);
+ return -EINVAL;
+ }
+ assert(!v->unmasked);
+
+ ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg, dev);
+ if (ret < 0) {
+ return ret;
+ }
+ kvm_irqchip_commit_routes(kvm_state);
+
+ ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq);
+ if (ret < 0) {
+ return ret;
+ }
+ v->unmasked = true;
+
+ return 0;
+}
+
+static void ivshmem_vector_mask(PCIDevice *dev, unsigned vector)
+{
+ IVShmemState *s = IVSHMEM_COMMON(dev);
+ EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
+ MSIVector *v = &s->msi_vectors[vector];
+ int ret;
+
+ IVSHMEM_DPRINTF("vector mask %p %d\n", dev, vector);
+ if (!v->pdev) {
+ error_report("ivshmem: vector %d route does not exist", vector);
+ return;
+ }
+ assert(v->unmasked);
+
+ ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, v->virq);
+ if (ret < 0) {
+ error_report("remove_irqfd_notifier_gsi failed");
+ return;
+ }
+ v->unmasked = false;
+}
+
+static void ivshmem_vector_poll(PCIDevice *dev,
+ unsigned int vector_start,
+ unsigned int vector_end)
+{
+ IVShmemState *s = IVSHMEM_COMMON(dev);
+ unsigned int vector;
+
+ IVSHMEM_DPRINTF("vector poll %p %d-%d\n", dev, vector_start, vector_end);
+
+ vector_end = MIN(vector_end, s->vectors);
+
+ for (vector = vector_start; vector < vector_end; vector++) {
+ EventNotifier *notifier = &s->peers[s->vm_id].eventfds[vector];
+
+ if (!msix_is_masked(dev, vector)) {
+ continue;
+ }
+
+ if (event_notifier_test_and_clear(notifier)) {
+ msix_set_pending(dev, vector);
+ }
+ }
+}
+
+static void watch_vector_notifier(IVShmemState *s, EventNotifier *n,
+ int vector)
+{
+ int eventfd = event_notifier_get_fd(n);
+
+ assert(!s->msi_vectors[vector].pdev);
+ s->msi_vectors[vector].pdev = PCI_DEVICE(s);
+
+ qemu_set_fd_handler(eventfd, ivshmem_vector_notify,
+ NULL, &s->msi_vectors[vector]);
+}
+
+static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i)
+{
+ memory_region_add_eventfd(&s->ivshmem_mmio,
+ DOORBELL,
+ 4,
+ true,
+ (posn << 16) | i,
+ &s->peers[posn].eventfds[i]);
+}
+
+static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i)
+{
+ memory_region_del_eventfd(&s->ivshmem_mmio,
+ DOORBELL,
+ 4,
+ true,
+ (posn << 16) | i,
+ &s->peers[posn].eventfds[i]);
+}
+
+static void close_peer_eventfds(IVShmemState *s, int posn)
+{
+ int i, n;
+
+ assert(posn >= 0 && posn < s->nb_peers);
+ n = s->peers[posn].nb_eventfds;
+
+ if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
+ memory_region_transaction_begin();
+ for (i = 0; i < n; i++) {
+ ivshmem_del_eventfd(s, posn, i);
+ }
+ memory_region_transaction_commit();
+ }
+
+ for (i = 0; i < n; i++) {
+ event_notifier_cleanup(&s->peers[posn].eventfds[i]);
+ }
+
+ g_free(s->peers[posn].eventfds);
+ s->peers[posn].nb_eventfds = 0;
+}
+
+static void resize_peers(IVShmemState *s, int nb_peers)
+{
+ int old_nb_peers = s->nb_peers;
+ int i;
+
+ assert(nb_peers > old_nb_peers);
+ IVSHMEM_DPRINTF("bumping storage to %d peers\n", nb_peers);
+
+ s->peers = g_renew(Peer, s->peers, nb_peers);
+ s->nb_peers = nb_peers;
+
+ for (i = old_nb_peers; i < nb_peers; i++) {
+ s->peers[i].eventfds = g_new0(EventNotifier, s->vectors);
+ s->peers[i].nb_eventfds = 0;
+ }
+}
+
+static void ivshmem_add_kvm_msi_virq(IVShmemState *s, int vector,
+ Error **errp)
+{
+ PCIDevice *pdev = PCI_DEVICE(s);
+ KVMRouteChange c;
+ int ret;
+
+ IVSHMEM_DPRINTF("ivshmem_add_kvm_msi_virq vector:%d\n", vector);
+ assert(!s->msi_vectors[vector].pdev);
+
+ c = kvm_irqchip_begin_route_changes(kvm_state);
+ ret = kvm_irqchip_add_msi_route(&c, vector, pdev);
+ if (ret < 0) {
+ error_setg(errp, "kvm_irqchip_add_msi_route failed");
+ return;
+ }
+ kvm_irqchip_commit_route_changes(&c);
+
+ s->msi_vectors[vector].virq = ret;
+ s->msi_vectors[vector].pdev = pdev;
+}
+
+static void setup_interrupt(IVShmemState *s, int vector, Error **errp)
+{
+ EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
+ bool with_irqfd = kvm_msi_via_irqfd_enabled() &&
+ ivshmem_has_feature(s, IVSHMEM_MSI);
+ PCIDevice *pdev = PCI_DEVICE(s);
+ Error *err = NULL;
+
+ IVSHMEM_DPRINTF("setting up interrupt for vector: %d\n", vector);
+
+ if (!with_irqfd) {
+ IVSHMEM_DPRINTF("with eventfd\n");
+ watch_vector_notifier(s, n, vector);
+ } else if (msix_enabled(pdev)) {
+ IVSHMEM_DPRINTF("with irqfd\n");
+ ivshmem_add_kvm_msi_virq(s, vector, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ if (!msix_is_masked(pdev, vector)) {
+ kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL,
+ s->msi_vectors[vector].virq);
+ /* TODO handle error */
+ }
+ } else {
+ /* it will be delayed until msix is enabled, in write_config */
+ IVSHMEM_DPRINTF("with irqfd, delayed until msix enabled\n");
+ }
+}
+
+static void process_msg_shmem(IVShmemState *s, int fd, Error **errp)
+{
+ struct stat buf;
+ size_t size;
+
+ if (s->ivshmem_bar2) {
+ error_setg(errp, "server sent unexpected shared memory message");
+ close(fd);
+ return;
+ }
+
+ if (fstat(fd, &buf) < 0) {
+ error_setg_errno(errp, errno,
+ "can't determine size of shared memory sent by server");
+ close(fd);
+ return;
+ }
+
+ size = buf.st_size;
+
+ /* mmap the region and map into the BAR2 */
+ if (!memory_region_init_ram_from_fd(&s->server_bar2, OBJECT(s),
+ "ivshmem.bar2", size, RAM_SHARED,
+ fd, 0, errp)) {
+ return;
+ }
+
+ s->ivshmem_bar2 = &s->server_bar2;
+}
+
+static void process_msg_disconnect(IVShmemState *s, uint16_t posn,
+ Error **errp)
+{
+ IVSHMEM_DPRINTF("posn %d has gone away\n", posn);
+ if (posn >= s->nb_peers || posn == s->vm_id) {
+ error_setg(errp, "invalid peer %d", posn);
+ return;
+ }
+ close_peer_eventfds(s, posn);
+}
+
+static void process_msg_connect(IVShmemState *s, uint16_t posn, int fd,
+ Error **errp)
+{
+ Peer *peer = &s->peers[posn];
+ int vector;
+
+ /*
+ * The N-th connect message for this peer comes with the file
+ * descriptor for vector N-1. Count messages to find the vector.
+ */
+ if (peer->nb_eventfds >= s->vectors) {
+ error_setg(errp, "Too many eventfd received, device has %d vectors",
+ s->vectors);
+ close(fd);
+ return;
+ }
+ vector = peer->nb_eventfds++;
+
+ IVSHMEM_DPRINTF("eventfds[%d][%d] = %d\n", posn, vector, fd);
+ event_notifier_init_fd(&peer->eventfds[vector], fd);
+ g_unix_set_fd_nonblocking(fd, true, NULL); /* msix/irqfd poll non block */
+
+ if (posn == s->vm_id) {
+ setup_interrupt(s, vector, errp);
+ /* TODO do we need to handle the error? */
+ }
+
+ if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
+ ivshmem_add_eventfd(s, posn, vector);
+ }
+}
+
+static void process_msg(IVShmemState *s, int64_t msg, int fd, Error **errp)
+{
+ IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n", msg, fd);
+
+ if (msg < -1 || msg > IVSHMEM_MAX_PEERS) {
+ error_setg(errp, "server sent invalid message %" PRId64, msg);
+ close(fd);
+ return;
+ }
+
+ if (msg == -1) {
+ process_msg_shmem(s, fd, errp);
+ return;
+ }
+
+ if (msg >= s->nb_peers) {
+ resize_peers(s, msg + 1);
+ }
+
+ if (fd >= 0) {
+ process_msg_connect(s, msg, fd, errp);
+ } else {
+ process_msg_disconnect(s, msg, errp);
+ }
+}
+
+static int ivshmem_can_receive(void *opaque)
+{
+ IVShmemState *s = opaque;
+
+ assert(s->msg_buffered_bytes < sizeof(s->msg_buf));
+ return sizeof(s->msg_buf) - s->msg_buffered_bytes;
+}
+
+static void ivshmem_read(void *opaque, const uint8_t *buf, int size)
+{
+ IVShmemState *s = opaque;
+ Error *err = NULL;
+ int fd;
+ int64_t msg;
+
+ assert(size >= 0 && s->msg_buffered_bytes + size <= sizeof(s->msg_buf));
+ memcpy((unsigned char *)&s->msg_buf + s->msg_buffered_bytes, buf, size);
+ s->msg_buffered_bytes += size;
+ if (s->msg_buffered_bytes < sizeof(s->msg_buf)) {
+ return;
+ }
+ msg = le64_to_cpu(s->msg_buf);
+ s->msg_buffered_bytes = 0;
+
+ fd = qemu_chr_fe_get_msgfd(&s->server_chr);
+
+ process_msg(s, msg, fd, &err);
+ if (err) {
+ error_report_err(err);
+ }
+}
+
+static int64_t ivshmem_recv_msg(IVShmemState *s, int *pfd, Error **errp)
+{
+ int64_t msg;
+ int n, ret;
+
+ n = 0;
+ do {
+ ret = qemu_chr_fe_read_all(&s->server_chr, (uint8_t *)&msg + n,
+ sizeof(msg) - n);
+ if (ret < 0) {
+ if (ret == -EINTR) {
+ continue;
+ }
+ error_setg_errno(errp, -ret, "read from server failed");
+ return INT64_MIN;
+ }
+ n += ret;
+ } while (n < sizeof(msg));
+
+ *pfd = qemu_chr_fe_get_msgfd(&s->server_chr);
+ return le64_to_cpu(msg);
+}
+
+static void ivshmem_recv_setup(IVShmemState *s, Error **errp)
+{
+ Error *err = NULL;
+ int64_t msg;
+ int fd;
+
+ msg = ivshmem_recv_msg(s, &fd, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ if (msg != IVSHMEM_PROTOCOL_VERSION) {
+ error_setg(errp, "server sent version %" PRId64 ", expecting %d",
+ msg, IVSHMEM_PROTOCOL_VERSION);
+ return;
+ }
+ if (fd != -1) {
+ error_setg(errp, "server sent invalid version message");
+ return;
+ }
+
+ /*
+ * ivshmem-server sends the remaining initial messages in a fixed
+ * order, but the device has always accepted them in any order.
+ * Stay as compatible as practical, just in case people use
+ * servers that behave differently.
+ */
+
+ /*
+ * ivshmem_device_spec.txt has always required the ID message
+ * right here, and ivshmem-server has always complied. However,
+ * older versions of the device accepted it out of order, but
+ * broke when an interrupt setup message arrived before it.
+ */
+ msg = ivshmem_recv_msg(s, &fd, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ if (fd != -1 || msg < 0 || msg > IVSHMEM_MAX_PEERS) {
+ error_setg(errp, "server sent invalid ID message");
+ return;
+ }
+ s->vm_id = msg;
+
+ /*
+ * Receive more messages until we got shared memory.
+ */
+ do {
+ msg = ivshmem_recv_msg(s, &fd, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ process_msg(s, msg, fd, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ } while (msg != -1);
+
+ /*
+ * This function must either map the shared memory or fail. The
+ * loop above ensures that: it terminates normally only after it
+ * successfully processed the server's shared memory message.
+ * Assert that actually mapped the shared memory:
+ */
+ assert(s->ivshmem_bar2);
+}
+
+/* Select the MSI-X vectors used by device.
+ * ivshmem maps events to vectors statically, so
+ * we just enable all vectors on init and after reset. */
+static void ivshmem_msix_vector_use(IVShmemState *s)
+{
+ PCIDevice *d = PCI_DEVICE(s);
+ int i;
+
+ for (i = 0; i < s->vectors; i++) {
+ msix_vector_use(d, i);
+ }
+}
+
+static void ivshmem_disable_irqfd(IVShmemState *s);
+
+static void ivshmem_reset(DeviceState *d)
+{
+ IVShmemState *s = IVSHMEM_COMMON(d);
+
+ ivshmem_disable_irqfd(s);
+
+ s->intrstatus = 0;
+ s->intrmask = 0;
+ if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
+ ivshmem_msix_vector_use(s);
+ }
+}
+
+static int ivshmem_setup_interrupts(IVShmemState *s, Error **errp)
+{
+ /* allocate QEMU callback data for receiving interrupts */
+ s->msi_vectors = g_new0(MSIVector, s->vectors);
+
+ if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
+ if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1, errp)) {
+ return -1;
+ }
+
+ IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors);
+ ivshmem_msix_vector_use(s);
+ }
+
+ return 0;
+}
+
+static void ivshmem_remove_kvm_msi_virq(IVShmemState *s, int vector)
+{
+ IVSHMEM_DPRINTF("ivshmem_remove_kvm_msi_virq vector:%d\n", vector);
+
+ if (s->msi_vectors[vector].pdev == NULL) {
+ return;
+ }
+
+ /* it was cleaned when masked in the frontend. */
+ kvm_irqchip_release_virq(kvm_state, s->msi_vectors[vector].virq);
+
+ s->msi_vectors[vector].pdev = NULL;
+}
+
+static void ivshmem_enable_irqfd(IVShmemState *s)
+{
+ PCIDevice *pdev = PCI_DEVICE(s);
+ int i;
+
+ for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) {
+ Error *err = NULL;
+
+ ivshmem_add_kvm_msi_virq(s, i, &err);
+ if (err) {
+ error_report_err(err);
+ goto undo;
+ }
+ }
+
+ if (msix_set_vector_notifiers(pdev,
+ ivshmem_vector_unmask,
+ ivshmem_vector_mask,
+ ivshmem_vector_poll)) {
+ error_report("ivshmem: msix_set_vector_notifiers failed");
+ goto undo;
+ }
+ return;
+
+undo:
+ while (--i >= 0) {
+ ivshmem_remove_kvm_msi_virq(s, i);
+ }
+}
+
+static void ivshmem_disable_irqfd(IVShmemState *s)
+{
+ PCIDevice *pdev = PCI_DEVICE(s);
+ int i;
+
+ if (!pdev->msix_vector_use_notifier) {
+ return;
+ }
+
+ msix_unset_vector_notifiers(pdev);
+
+ for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) {
+ /*
+ * MSI-X is already disabled here so msix_unset_vector_notifiers()
+ * didn't call our release notifier. Do it now to keep our masks and
+ * unmasks balanced.
+ */
+ if (s->msi_vectors[i].unmasked) {
+ ivshmem_vector_mask(pdev, i);
+ }
+ ivshmem_remove_kvm_msi_virq(s, i);
+ }
+
+}
+
+static void ivshmem_write_config(PCIDevice *pdev, uint32_t address,
+ uint32_t val, int len)
+{
+ IVShmemState *s = IVSHMEM_COMMON(pdev);
+ int is_enabled, was_enabled = msix_enabled(pdev);
+
+ pci_default_write_config(pdev, address, val, len);
+ is_enabled = msix_enabled(pdev);
+
+ if (kvm_msi_via_irqfd_enabled()) {
+ if (!was_enabled && is_enabled) {
+ ivshmem_enable_irqfd(s);
+ } else if (was_enabled && !is_enabled) {
+ ivshmem_disable_irqfd(s);
+ }
+ }
+}
+
+static void ivshmem_common_realize(PCIDevice *dev, Error **errp)
+{
+ ERRP_GUARD();
+ IVShmemState *s = IVSHMEM_COMMON(dev);
+ Error *err = NULL;
+ uint8_t *pci_conf;
+
+ /* IRQFD requires MSI */
+ if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) &&
+ !ivshmem_has_feature(s, IVSHMEM_MSI)) {
+ error_setg(errp, "ioeventfd/irqfd requires MSI");
+ return;
+ }
+
+ pci_conf = dev->config;
+ pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
+
+ memory_region_init_io(&s->ivshmem_mmio, OBJECT(s), &ivshmem_mmio_ops, s,
+ "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
+
+ /* region for registers*/
+ pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
+ &s->ivshmem_mmio);
+
+ if (s->hostmem != NULL) {
+ IVSHMEM_DPRINTF("using hostmem\n");
+
+ s->ivshmem_bar2 = host_memory_backend_get_memory(s->hostmem);
+ host_memory_backend_set_mapped(s->hostmem, true);
+ } else {
+ Chardev *chr = qemu_chr_fe_get_driver(&s->server_chr);
+ assert(chr);
+
+ IVSHMEM_DPRINTF("using shared memory server (socket = %s)\n",
+ chr->filename);
+
+ /* we allocate enough space for 16 peers and grow as needed */
+ resize_peers(s, 16);
+
+ /*
+ * Receive setup messages from server synchronously.
+ * Older versions did it asynchronously, but that creates a
+ * number of entertaining race conditions.
+ */
+ ivshmem_recv_setup(s, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ if (s->master == ON_OFF_AUTO_ON && s->vm_id != 0) {
+ error_setg(errp,
+ "master must connect to the server before any peers");
+ return;
+ }
+
+ qemu_chr_fe_set_handlers(&s->server_chr, ivshmem_can_receive,
+ ivshmem_read, NULL, NULL, s, NULL, true);
+
+ if (ivshmem_setup_interrupts(s, errp) < 0) {
+ error_prepend(errp, "Failed to initialize interrupts: ");
+ return;
+ }
+ }
+
+ if (s->master == ON_OFF_AUTO_AUTO) {
+ s->master = s->vm_id == 0 ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
+ }
+
+ if (!ivshmem_is_master(s)) {
+ error_setg(&s->migration_blocker,
+ "Migration is disabled when using feature 'peer mode' in device 'ivshmem'");
+ if (migrate_add_blocker(&s->migration_blocker, errp) < 0) {
+ return;
+ }
+ }
+
+ vmstate_register_ram(s->ivshmem_bar2, DEVICE(s));
+ pci_register_bar(PCI_DEVICE(s), 2,
+ PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_PREFETCH |
+ PCI_BASE_ADDRESS_MEM_TYPE_64,
+ s->ivshmem_bar2);
+}
+
+static void ivshmem_exit(PCIDevice *dev)
+{
+ IVShmemState *s = IVSHMEM_COMMON(dev);
+ int i;
+
+ migrate_del_blocker(&s->migration_blocker);
+
+ if (memory_region_is_mapped(s->ivshmem_bar2)) {
+ if (!s->hostmem) {
+ void *addr = memory_region_get_ram_ptr(s->ivshmem_bar2);
+ int fd;
+
+ if (munmap(addr, memory_region_size(s->ivshmem_bar2) == -1)) {
+ error_report("Failed to munmap shared memory %s",
+ strerror(errno));
+ }
+
+ fd = memory_region_get_fd(s->ivshmem_bar2);
+ close(fd);
+ }
+
+ vmstate_unregister_ram(s->ivshmem_bar2, DEVICE(dev));
+ }
+
+ if (s->hostmem) {
+ host_memory_backend_set_mapped(s->hostmem, false);
+ }
+
+ if (s->peers) {
+ for (i = 0; i < s->nb_peers; i++) {
+ close_peer_eventfds(s, i);
+ }
+ g_free(s->peers);
+ }
+
+ if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
+ msix_uninit_exclusive_bar(dev);
+ }
+
+ g_free(s->msi_vectors);
+}
+
+static int ivshmem_pre_load(void *opaque)
+{
+ IVShmemState *s = opaque;
+
+ if (!ivshmem_is_master(s)) {
+ error_report("'peer' devices are not migratable");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ivshmem_post_load(void *opaque, int version_id)
+{
+ IVShmemState *s = opaque;
+
+ if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
+ ivshmem_msix_vector_use(s);
+ }
+ return 0;
+}
+
+static void ivshmem_common_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->realize = ivshmem_common_realize;
+ k->exit = ivshmem_exit;
+ k->config_write = ivshmem_write_config;
+ k->vendor_id = PCI_VENDOR_ID_IVSHMEM;
+ k->device_id = PCI_DEVICE_ID_IVSHMEM;
+ k->class_id = PCI_CLASS_MEMORY_RAM;
+ k->revision = 1;
+ device_class_set_legacy_reset(dc, ivshmem_reset);
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->desc = "Inter-VM shared memory";
+}
+
+static const TypeInfo ivshmem_common_info = {
+ .name = TYPE_IVSHMEM_COMMON,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(IVShmemState),
+ .abstract = true,
+ .class_init = ivshmem_common_class_init,
+ .interfaces = (const InterfaceInfo[]) {
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { },
+ },
+};
+
+static const VMStateDescription ivshmem_plain_vmsd = {
+ .name = TYPE_IVSHMEM_PLAIN,
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .pre_load = ivshmem_pre_load,
+ .post_load = ivshmem_post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_PCI_DEVICE(parent_obj, IVShmemState),
+ VMSTATE_UINT32(intrstatus, IVShmemState),
+ VMSTATE_UINT32(intrmask, IVShmemState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const Property ivshmem_plain_properties[] = {
+ DEFINE_PROP_ON_OFF_AUTO("master", IVShmemState, master, ON_OFF_AUTO_OFF),
+ DEFINE_PROP_LINK("memdev", IVShmemState, hostmem, TYPE_MEMORY_BACKEND,
+ HostMemoryBackend *),
+};
+
+static void ivshmem_plain_realize(PCIDevice *dev, Error **errp)
+{
+ IVShmemState *s = IVSHMEM_COMMON(dev);
+
+ if (!s->hostmem) {
+ error_setg(errp, "You must specify a 'memdev'");
+ return;
+ } else if (host_memory_backend_is_mapped(s->hostmem)) {
+ error_setg(errp, "can't use already busy memdev: %s",
+ object_get_canonical_path_component(OBJECT(s->hostmem)));
+ return;
+ }
+
+ ivshmem_common_realize(dev, errp);
+}
+
+static void ivshmem_plain_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->realize = ivshmem_plain_realize;
+ device_class_set_props(dc, ivshmem_plain_properties);
+ dc->vmsd = &ivshmem_plain_vmsd;
+}
+
+static const TypeInfo ivshmem_plain_info = {
+ .name = TYPE_IVSHMEM_PLAIN,
+ .parent = TYPE_IVSHMEM_COMMON,
+ .instance_size = sizeof(IVShmemState),
+ .class_init = ivshmem_plain_class_init,
+};
+
+static const VMStateDescription ivshmem_doorbell_vmsd = {
+ .name = TYPE_IVSHMEM_DOORBELL,
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .pre_load = ivshmem_pre_load,
+ .post_load = ivshmem_post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_PCI_DEVICE(parent_obj, IVShmemState),
+ VMSTATE_MSIX(parent_obj, IVShmemState),
+ VMSTATE_UINT32(intrstatus, IVShmemState),
+ VMSTATE_UINT32(intrmask, IVShmemState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const Property ivshmem_doorbell_properties[] = {
+ DEFINE_PROP_CHR("chardev", IVShmemState, server_chr),
+ DEFINE_PROP_UINT32("vectors", IVShmemState, vectors, 1),
+ DEFINE_PROP_BIT("ioeventfd", IVShmemState, features, IVSHMEM_IOEVENTFD,
+ true),
+ DEFINE_PROP_ON_OFF_AUTO("master", IVShmemState, master, ON_OFF_AUTO_OFF),
+};
+
+static void ivshmem_doorbell_init(Object *obj)
+{
+ IVShmemState *s = IVSHMEM_DOORBELL(obj);
+
+ s->features |= (1 << IVSHMEM_MSI);
+}
+
+static void ivshmem_doorbell_realize(PCIDevice *dev, Error **errp)
+{
+ IVShmemState *s = IVSHMEM_COMMON(dev);
+
+ if (!qemu_chr_fe_backend_connected(&s->server_chr)) {
+ error_setg(errp, "You must specify a 'chardev'");
+ return;
+ }
+
+ ivshmem_common_realize(dev, errp);
+}
+
+static void ivshmem_doorbell_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->realize = ivshmem_doorbell_realize;
+ device_class_set_props(dc, ivshmem_doorbell_properties);
+ dc->vmsd = &ivshmem_doorbell_vmsd;
+}
+
+static const TypeInfo ivshmem_doorbell_info = {
+ .name = TYPE_IVSHMEM_DOORBELL,
+ .parent = TYPE_IVSHMEM_COMMON,
+ .instance_size = sizeof(IVShmemState),
+ .instance_init = ivshmem_doorbell_init,
+ .class_init = ivshmem_doorbell_class_init,
+};
+
+static void ivshmem_register_types(void)
+{
+ type_register_static(&ivshmem_common_info);
+ type_register_static(&ivshmem_plain_info);
+ type_register_static(&ivshmem_doorbell_info);
+}
+
+type_init(ivshmem_register_types)
diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
deleted file mode 100644
index de49d1b..0000000
--- a/hw/misc/ivshmem.c
+++ /dev/null
@@ -1,1133 +0,0 @@
-/*
- * Inter-VM Shared Memory PCI device.
- *
- * Author:
- * Cam Macdonell <cam@cs.ualberta.ca>
- *
- * Based On: cirrus_vga.c
- * Copyright (c) 2004 Fabrice Bellard
- * Copyright (c) 2004 Makoto Suzuki (suzu)
- *
- * and rtl8139.c
- * Copyright (c) 2006 Igor Kovalenko
- *
- * This code is licensed under the GNU GPL v2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/units.h"
-#include "qapi/error.h"
-#include "qemu/cutils.h"
-#include "hw/pci/pci.h"
-#include "hw/qdev-properties.h"
-#include "hw/qdev-properties-system.h"
-#include "hw/pci/msi.h"
-#include "hw/pci/msix.h"
-#include "sysemu/kvm.h"
-#include "migration/blocker.h"
-#include "migration/vmstate.h"
-#include "qemu/error-report.h"
-#include "qemu/event_notifier.h"
-#include "qemu/module.h"
-#include "qom/object_interfaces.h"
-#include "chardev/char-fe.h"
-#include "sysemu/hostmem.h"
-#include "qapi/visitor.h"
-
-#include "hw/misc/ivshmem.h"
-#include "qom/object.h"
-
-#define PCI_VENDOR_ID_IVSHMEM PCI_VENDOR_ID_REDHAT_QUMRANET
-#define PCI_DEVICE_ID_IVSHMEM 0x1110
-
-#define IVSHMEM_MAX_PEERS UINT16_MAX
-#define IVSHMEM_IOEVENTFD 0
-#define IVSHMEM_MSI 1
-
-#define IVSHMEM_REG_BAR_SIZE 0x100
-
-#define IVSHMEM_DEBUG 0
-#define IVSHMEM_DPRINTF(fmt, ...) \
- do { \
- if (IVSHMEM_DEBUG) { \
- printf("IVSHMEM: " fmt, ## __VA_ARGS__); \
- } \
- } while (0)
-
-#define TYPE_IVSHMEM_COMMON "ivshmem-common"
-typedef struct IVShmemState IVShmemState;
-DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_COMMON,
- TYPE_IVSHMEM_COMMON)
-
-#define TYPE_IVSHMEM_PLAIN "ivshmem-plain"
-DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_PLAIN,
- TYPE_IVSHMEM_PLAIN)
-
-#define TYPE_IVSHMEM_DOORBELL "ivshmem-doorbell"
-DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_DOORBELL,
- TYPE_IVSHMEM_DOORBELL)
-
-#define TYPE_IVSHMEM "ivshmem"
-DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM,
- TYPE_IVSHMEM)
-
-typedef struct Peer {
- int nb_eventfds;
- EventNotifier *eventfds;
-} Peer;
-
-typedef struct MSIVector {
- PCIDevice *pdev;
- int virq;
- bool unmasked;
-} MSIVector;
-
-struct IVShmemState {
- /*< private >*/
- PCIDevice parent_obj;
- /*< public >*/
-
- uint32_t features;
-
- /* exactly one of these two may be set */
- HostMemoryBackend *hostmem; /* with interrupts */
- CharBackend server_chr; /* without interrupts */
-
- /* registers */
- uint32_t intrmask;
- uint32_t intrstatus;
- int vm_id;
-
- /* BARs */
- MemoryRegion ivshmem_mmio; /* BAR 0 (registers) */
- MemoryRegion *ivshmem_bar2; /* BAR 2 (shared memory) */
- MemoryRegion server_bar2; /* used with server_chr */
-
- /* interrupt support */
- Peer *peers;
- int nb_peers; /* space in @peers[] */
- uint32_t vectors;
- MSIVector *msi_vectors;
- uint64_t msg_buf; /* buffer for receiving server messages */
- int msg_buffered_bytes; /* #bytes in @msg_buf */
-
- /* migration stuff */
- OnOffAuto master;
- Error *migration_blocker;
-};
-
-/* registers for the Inter-VM shared memory device */
-enum ivshmem_registers {
- INTRMASK = 0,
- INTRSTATUS = 4,
- IVPOSITION = 8,
- DOORBELL = 12,
-};
-
-static inline uint32_t ivshmem_has_feature(IVShmemState *ivs,
- unsigned int feature) {
- return (ivs->features & (1 << feature));
-}
-
-static inline bool ivshmem_is_master(IVShmemState *s)
-{
- assert(s->master != ON_OFF_AUTO_AUTO);
- return s->master == ON_OFF_AUTO_ON;
-}
-
-static void ivshmem_IntrMask_write(IVShmemState *s, uint32_t val)
-{
- IVSHMEM_DPRINTF("IntrMask write(w) val = 0x%04x\n", val);
-
- s->intrmask = val;
-}
-
-static uint32_t ivshmem_IntrMask_read(IVShmemState *s)
-{
- uint32_t ret = s->intrmask;
-
- IVSHMEM_DPRINTF("intrmask read(w) val = 0x%04x\n", ret);
- return ret;
-}
-
-static void ivshmem_IntrStatus_write(IVShmemState *s, uint32_t val)
-{
- IVSHMEM_DPRINTF("IntrStatus write(w) val = 0x%04x\n", val);
-
- s->intrstatus = val;
-}
-
-static uint32_t ivshmem_IntrStatus_read(IVShmemState *s)
-{
- uint32_t ret = s->intrstatus;
-
- /* reading ISR clears all interrupts */
- s->intrstatus = 0;
- return ret;
-}
-
-static void ivshmem_io_write(void *opaque, hwaddr addr,
- uint64_t val, unsigned size)
-{
- IVShmemState *s = opaque;
-
- uint16_t dest = val >> 16;
- uint16_t vector = val & 0xff;
-
- addr &= 0xfc;
-
- IVSHMEM_DPRINTF("writing to addr " HWADDR_FMT_plx "\n", addr);
- switch (addr)
- {
- case INTRMASK:
- ivshmem_IntrMask_write(s, val);
- break;
-
- case INTRSTATUS:
- ivshmem_IntrStatus_write(s, val);
- break;
-
- case DOORBELL:
- /* check that dest VM ID is reasonable */
- if (dest >= s->nb_peers) {
- IVSHMEM_DPRINTF("Invalid destination VM ID (%d)\n", dest);
- break;
- }
-
- /* check doorbell range */
- if (vector < s->peers[dest].nb_eventfds) {
- IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector);
- event_notifier_set(&s->peers[dest].eventfds[vector]);
- } else {
- IVSHMEM_DPRINTF("Invalid destination vector %d on VM %d\n",
- vector, dest);
- }
- break;
- default:
- IVSHMEM_DPRINTF("Unhandled write " HWADDR_FMT_plx "\n", addr);
- }
-}
-
-static uint64_t ivshmem_io_read(void *opaque, hwaddr addr,
- unsigned size)
-{
-
- IVShmemState *s = opaque;
- uint32_t ret;
-
- switch (addr)
- {
- case INTRMASK:
- ret = ivshmem_IntrMask_read(s);
- break;
-
- case INTRSTATUS:
- ret = ivshmem_IntrStatus_read(s);
- break;
-
- case IVPOSITION:
- ret = s->vm_id;
- break;
-
- default:
- IVSHMEM_DPRINTF("why are we reading " HWADDR_FMT_plx "\n", addr);
- ret = 0;
- }
-
- return ret;
-}
-
-static const MemoryRegionOps ivshmem_mmio_ops = {
- .read = ivshmem_io_read,
- .write = ivshmem_io_write,
- .endianness = DEVICE_LITTLE_ENDIAN,
- .impl = {
- .min_access_size = 4,
- .max_access_size = 4,
- },
-};
-
-static void ivshmem_vector_notify(void *opaque)
-{
- MSIVector *entry = opaque;
- PCIDevice *pdev = entry->pdev;
- IVShmemState *s = IVSHMEM_COMMON(pdev);
- int vector = entry - s->msi_vectors;
- EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
-
- if (!event_notifier_test_and_clear(n)) {
- return;
- }
-
- IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, vector);
- if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
- if (msix_enabled(pdev)) {
- msix_notify(pdev, vector);
- }
- } else {
- ivshmem_IntrStatus_write(s, 1);
- }
-}
-
-static int ivshmem_vector_unmask(PCIDevice *dev, unsigned vector,
- MSIMessage msg)
-{
- IVShmemState *s = IVSHMEM_COMMON(dev);
- EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
- MSIVector *v = &s->msi_vectors[vector];
- int ret;
-
- IVSHMEM_DPRINTF("vector unmask %p %d\n", dev, vector);
- if (!v->pdev) {
- error_report("ivshmem: vector %d route does not exist", vector);
- return -EINVAL;
- }
- assert(!v->unmasked);
-
- ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg, dev);
- if (ret < 0) {
- return ret;
- }
- kvm_irqchip_commit_routes(kvm_state);
-
- ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq);
- if (ret < 0) {
- return ret;
- }
- v->unmasked = true;
-
- return 0;
-}
-
-static void ivshmem_vector_mask(PCIDevice *dev, unsigned vector)
-{
- IVShmemState *s = IVSHMEM_COMMON(dev);
- EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
- MSIVector *v = &s->msi_vectors[vector];
- int ret;
-
- IVSHMEM_DPRINTF("vector mask %p %d\n", dev, vector);
- if (!v->pdev) {
- error_report("ivshmem: vector %d route does not exist", vector);
- return;
- }
- assert(v->unmasked);
-
- ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, v->virq);
- if (ret < 0) {
- error_report("remove_irqfd_notifier_gsi failed");
- return;
- }
- v->unmasked = false;
-}
-
-static void ivshmem_vector_poll(PCIDevice *dev,
- unsigned int vector_start,
- unsigned int vector_end)
-{
- IVShmemState *s = IVSHMEM_COMMON(dev);
- unsigned int vector;
-
- IVSHMEM_DPRINTF("vector poll %p %d-%d\n", dev, vector_start, vector_end);
-
- vector_end = MIN(vector_end, s->vectors);
-
- for (vector = vector_start; vector < vector_end; vector++) {
- EventNotifier *notifier = &s->peers[s->vm_id].eventfds[vector];
-
- if (!msix_is_masked(dev, vector)) {
- continue;
- }
-
- if (event_notifier_test_and_clear(notifier)) {
- msix_set_pending(dev, vector);
- }
- }
-}
-
-static void watch_vector_notifier(IVShmemState *s, EventNotifier *n,
- int vector)
-{
- int eventfd = event_notifier_get_fd(n);
-
- assert(!s->msi_vectors[vector].pdev);
- s->msi_vectors[vector].pdev = PCI_DEVICE(s);
-
- qemu_set_fd_handler(eventfd, ivshmem_vector_notify,
- NULL, &s->msi_vectors[vector]);
-}
-
-static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i)
-{
- memory_region_add_eventfd(&s->ivshmem_mmio,
- DOORBELL,
- 4,
- true,
- (posn << 16) | i,
- &s->peers[posn].eventfds[i]);
-}
-
-static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i)
-{
- memory_region_del_eventfd(&s->ivshmem_mmio,
- DOORBELL,
- 4,
- true,
- (posn << 16) | i,
- &s->peers[posn].eventfds[i]);
-}
-
-static void close_peer_eventfds(IVShmemState *s, int posn)
-{
- int i, n;
-
- assert(posn >= 0 && posn < s->nb_peers);
- n = s->peers[posn].nb_eventfds;
-
- if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
- memory_region_transaction_begin();
- for (i = 0; i < n; i++) {
- ivshmem_del_eventfd(s, posn, i);
- }
- memory_region_transaction_commit();
- }
-
- for (i = 0; i < n; i++) {
- event_notifier_cleanup(&s->peers[posn].eventfds[i]);
- }
-
- g_free(s->peers[posn].eventfds);
- s->peers[posn].nb_eventfds = 0;
-}
-
-static void resize_peers(IVShmemState *s, int nb_peers)
-{
- int old_nb_peers = s->nb_peers;
- int i;
-
- assert(nb_peers > old_nb_peers);
- IVSHMEM_DPRINTF("bumping storage to %d peers\n", nb_peers);
-
- s->peers = g_renew(Peer, s->peers, nb_peers);
- s->nb_peers = nb_peers;
-
- for (i = old_nb_peers; i < nb_peers; i++) {
- s->peers[i].eventfds = g_new0(EventNotifier, s->vectors);
- s->peers[i].nb_eventfds = 0;
- }
-}
-
-static void ivshmem_add_kvm_msi_virq(IVShmemState *s, int vector,
- Error **errp)
-{
- PCIDevice *pdev = PCI_DEVICE(s);
- KVMRouteChange c;
- int ret;
-
- IVSHMEM_DPRINTF("ivshmem_add_kvm_msi_virq vector:%d\n", vector);
- assert(!s->msi_vectors[vector].pdev);
-
- c = kvm_irqchip_begin_route_changes(kvm_state);
- ret = kvm_irqchip_add_msi_route(&c, vector, pdev);
- if (ret < 0) {
- error_setg(errp, "kvm_irqchip_add_msi_route failed");
- return;
- }
- kvm_irqchip_commit_route_changes(&c);
-
- s->msi_vectors[vector].virq = ret;
- s->msi_vectors[vector].pdev = pdev;
-}
-
-static void setup_interrupt(IVShmemState *s, int vector, Error **errp)
-{
- EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
- bool with_irqfd = kvm_msi_via_irqfd_enabled() &&
- ivshmem_has_feature(s, IVSHMEM_MSI);
- PCIDevice *pdev = PCI_DEVICE(s);
- Error *err = NULL;
-
- IVSHMEM_DPRINTF("setting up interrupt for vector: %d\n", vector);
-
- if (!with_irqfd) {
- IVSHMEM_DPRINTF("with eventfd\n");
- watch_vector_notifier(s, n, vector);
- } else if (msix_enabled(pdev)) {
- IVSHMEM_DPRINTF("with irqfd\n");
- ivshmem_add_kvm_msi_virq(s, vector, &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
-
- if (!msix_is_masked(pdev, vector)) {
- kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL,
- s->msi_vectors[vector].virq);
- /* TODO handle error */
- }
- } else {
- /* it will be delayed until msix is enabled, in write_config */
- IVSHMEM_DPRINTF("with irqfd, delayed until msix enabled\n");
- }
-}
-
-static void process_msg_shmem(IVShmemState *s, int fd, Error **errp)
-{
- struct stat buf;
- size_t size;
-
- if (s->ivshmem_bar2) {
- error_setg(errp, "server sent unexpected shared memory message");
- close(fd);
- return;
- }
-
- if (fstat(fd, &buf) < 0) {
- error_setg_errno(errp, errno,
- "can't determine size of shared memory sent by server");
- close(fd);
- return;
- }
-
- size = buf.st_size;
-
- /* mmap the region and map into the BAR2 */
- if (!memory_region_init_ram_from_fd(&s->server_bar2, OBJECT(s),
- "ivshmem.bar2", size, RAM_SHARED,
- fd, 0, errp)) {
- return;
- }
-
- s->ivshmem_bar2 = &s->server_bar2;
-}
-
-static void process_msg_disconnect(IVShmemState *s, uint16_t posn,
- Error **errp)
-{
- IVSHMEM_DPRINTF("posn %d has gone away\n", posn);
- if (posn >= s->nb_peers || posn == s->vm_id) {
- error_setg(errp, "invalid peer %d", posn);
- return;
- }
- close_peer_eventfds(s, posn);
-}
-
-static void process_msg_connect(IVShmemState *s, uint16_t posn, int fd,
- Error **errp)
-{
- Peer *peer = &s->peers[posn];
- int vector;
-
- /*
- * The N-th connect message for this peer comes with the file
- * descriptor for vector N-1. Count messages to find the vector.
- */
- if (peer->nb_eventfds >= s->vectors) {
- error_setg(errp, "Too many eventfd received, device has %d vectors",
- s->vectors);
- close(fd);
- return;
- }
- vector = peer->nb_eventfds++;
-
- IVSHMEM_DPRINTF("eventfds[%d][%d] = %d\n", posn, vector, fd);
- event_notifier_init_fd(&peer->eventfds[vector], fd);
- g_unix_set_fd_nonblocking(fd, true, NULL); /* msix/irqfd poll non block */
-
- if (posn == s->vm_id) {
- setup_interrupt(s, vector, errp);
- /* TODO do we need to handle the error? */
- }
-
- if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
- ivshmem_add_eventfd(s, posn, vector);
- }
-}
-
-static void process_msg(IVShmemState *s, int64_t msg, int fd, Error **errp)
-{
- IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n", msg, fd);
-
- if (msg < -1 || msg > IVSHMEM_MAX_PEERS) {
- error_setg(errp, "server sent invalid message %" PRId64, msg);
- close(fd);
- return;
- }
-
- if (msg == -1) {
- process_msg_shmem(s, fd, errp);
- return;
- }
-
- if (msg >= s->nb_peers) {
- resize_peers(s, msg + 1);
- }
-
- if (fd >= 0) {
- process_msg_connect(s, msg, fd, errp);
- } else {
- process_msg_disconnect(s, msg, errp);
- }
-}
-
-static int ivshmem_can_receive(void *opaque)
-{
- IVShmemState *s = opaque;
-
- assert(s->msg_buffered_bytes < sizeof(s->msg_buf));
- return sizeof(s->msg_buf) - s->msg_buffered_bytes;
-}
-
-static void ivshmem_read(void *opaque, const uint8_t *buf, int size)
-{
- IVShmemState *s = opaque;
- Error *err = NULL;
- int fd;
- int64_t msg;
-
- assert(size >= 0 && s->msg_buffered_bytes + size <= sizeof(s->msg_buf));
- memcpy((unsigned char *)&s->msg_buf + s->msg_buffered_bytes, buf, size);
- s->msg_buffered_bytes += size;
- if (s->msg_buffered_bytes < sizeof(s->msg_buf)) {
- return;
- }
- msg = le64_to_cpu(s->msg_buf);
- s->msg_buffered_bytes = 0;
-
- fd = qemu_chr_fe_get_msgfd(&s->server_chr);
-
- process_msg(s, msg, fd, &err);
- if (err) {
- error_report_err(err);
- }
-}
-
-static int64_t ivshmem_recv_msg(IVShmemState *s, int *pfd, Error **errp)
-{
- int64_t msg;
- int n, ret;
-
- n = 0;
- do {
- ret = qemu_chr_fe_read_all(&s->server_chr, (uint8_t *)&msg + n,
- sizeof(msg) - n);
- if (ret < 0) {
- if (ret == -EINTR) {
- continue;
- }
- error_setg_errno(errp, -ret, "read from server failed");
- return INT64_MIN;
- }
- n += ret;
- } while (n < sizeof(msg));
-
- *pfd = qemu_chr_fe_get_msgfd(&s->server_chr);
- return le64_to_cpu(msg);
-}
-
-static void ivshmem_recv_setup(IVShmemState *s, Error **errp)
-{
- Error *err = NULL;
- int64_t msg;
- int fd;
-
- msg = ivshmem_recv_msg(s, &fd, &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
- if (msg != IVSHMEM_PROTOCOL_VERSION) {
- error_setg(errp, "server sent version %" PRId64 ", expecting %d",
- msg, IVSHMEM_PROTOCOL_VERSION);
- return;
- }
- if (fd != -1) {
- error_setg(errp, "server sent invalid version message");
- return;
- }
-
- /*
- * ivshmem-server sends the remaining initial messages in a fixed
- * order, but the device has always accepted them in any order.
- * Stay as compatible as practical, just in case people use
- * servers that behave differently.
- */
-
- /*
- * ivshmem_device_spec.txt has always required the ID message
- * right here, and ivshmem-server has always complied. However,
- * older versions of the device accepted it out of order, but
- * broke when an interrupt setup message arrived before it.
- */
- msg = ivshmem_recv_msg(s, &fd, &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
- if (fd != -1 || msg < 0 || msg > IVSHMEM_MAX_PEERS) {
- error_setg(errp, "server sent invalid ID message");
- return;
- }
- s->vm_id = msg;
-
- /*
- * Receive more messages until we got shared memory.
- */
- do {
- msg = ivshmem_recv_msg(s, &fd, &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
- process_msg(s, msg, fd, &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
- } while (msg != -1);
-
- /*
- * This function must either map the shared memory or fail. The
- * loop above ensures that: it terminates normally only after it
- * successfully processed the server's shared memory message.
- * Assert that actually mapped the shared memory:
- */
- assert(s->ivshmem_bar2);
-}
-
-/* Select the MSI-X vectors used by device.
- * ivshmem maps events to vectors statically, so
- * we just enable all vectors on init and after reset. */
-static void ivshmem_msix_vector_use(IVShmemState *s)
-{
- PCIDevice *d = PCI_DEVICE(s);
- int i;
-
- for (i = 0; i < s->vectors; i++) {
- msix_vector_use(d, i);
- }
-}
-
-static void ivshmem_disable_irqfd(IVShmemState *s);
-
-static void ivshmem_reset(DeviceState *d)
-{
- IVShmemState *s = IVSHMEM_COMMON(d);
-
- ivshmem_disable_irqfd(s);
-
- s->intrstatus = 0;
- s->intrmask = 0;
- if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
- ivshmem_msix_vector_use(s);
- }
-}
-
-static int ivshmem_setup_interrupts(IVShmemState *s, Error **errp)
-{
- /* allocate QEMU callback data for receiving interrupts */
- s->msi_vectors = g_new0(MSIVector, s->vectors);
-
- if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
- if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1, errp)) {
- return -1;
- }
-
- IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors);
- ivshmem_msix_vector_use(s);
- }
-
- return 0;
-}
-
-static void ivshmem_remove_kvm_msi_virq(IVShmemState *s, int vector)
-{
- IVSHMEM_DPRINTF("ivshmem_remove_kvm_msi_virq vector:%d\n", vector);
-
- if (s->msi_vectors[vector].pdev == NULL) {
- return;
- }
-
- /* it was cleaned when masked in the frontend. */
- kvm_irqchip_release_virq(kvm_state, s->msi_vectors[vector].virq);
-
- s->msi_vectors[vector].pdev = NULL;
-}
-
-static void ivshmem_enable_irqfd(IVShmemState *s)
-{
- PCIDevice *pdev = PCI_DEVICE(s);
- int i;
-
- for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) {
- Error *err = NULL;
-
- ivshmem_add_kvm_msi_virq(s, i, &err);
- if (err) {
- error_report_err(err);
- goto undo;
- }
- }
-
- if (msix_set_vector_notifiers(pdev,
- ivshmem_vector_unmask,
- ivshmem_vector_mask,
- ivshmem_vector_poll)) {
- error_report("ivshmem: msix_set_vector_notifiers failed");
- goto undo;
- }
- return;
-
-undo:
- while (--i >= 0) {
- ivshmem_remove_kvm_msi_virq(s, i);
- }
-}
-
-static void ivshmem_disable_irqfd(IVShmemState *s)
-{
- PCIDevice *pdev = PCI_DEVICE(s);
- int i;
-
- if (!pdev->msix_vector_use_notifier) {
- return;
- }
-
- msix_unset_vector_notifiers(pdev);
-
- for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) {
- /*
- * MSI-X is already disabled here so msix_unset_vector_notifiers()
- * didn't call our release notifier. Do it now to keep our masks and
- * unmasks balanced.
- */
- if (s->msi_vectors[i].unmasked) {
- ivshmem_vector_mask(pdev, i);
- }
- ivshmem_remove_kvm_msi_virq(s, i);
- }
-
-}
-
-static void ivshmem_write_config(PCIDevice *pdev, uint32_t address,
- uint32_t val, int len)
-{
- IVShmemState *s = IVSHMEM_COMMON(pdev);
- int is_enabled, was_enabled = msix_enabled(pdev);
-
- pci_default_write_config(pdev, address, val, len);
- is_enabled = msix_enabled(pdev);
-
- if (kvm_msi_via_irqfd_enabled()) {
- if (!was_enabled && is_enabled) {
- ivshmem_enable_irqfd(s);
- } else if (was_enabled && !is_enabled) {
- ivshmem_disable_irqfd(s);
- }
- }
-}
-
-static void ivshmem_common_realize(PCIDevice *dev, Error **errp)
-{
- ERRP_GUARD();
- IVShmemState *s = IVSHMEM_COMMON(dev);
- Error *err = NULL;
- uint8_t *pci_conf;
-
- /* IRQFD requires MSI */
- if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) &&
- !ivshmem_has_feature(s, IVSHMEM_MSI)) {
- error_setg(errp, "ioeventfd/irqfd requires MSI");
- return;
- }
-
- pci_conf = dev->config;
- pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
-
- memory_region_init_io(&s->ivshmem_mmio, OBJECT(s), &ivshmem_mmio_ops, s,
- "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
-
- /* region for registers*/
- pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
- &s->ivshmem_mmio);
-
- if (s->hostmem != NULL) {
- IVSHMEM_DPRINTF("using hostmem\n");
-
- s->ivshmem_bar2 = host_memory_backend_get_memory(s->hostmem);
- host_memory_backend_set_mapped(s->hostmem, true);
- } else {
- Chardev *chr = qemu_chr_fe_get_driver(&s->server_chr);
- assert(chr);
-
- IVSHMEM_DPRINTF("using shared memory server (socket = %s)\n",
- chr->filename);
-
- /* we allocate enough space for 16 peers and grow as needed */
- resize_peers(s, 16);
-
- /*
- * Receive setup messages from server synchronously.
- * Older versions did it asynchronously, but that creates a
- * number of entertaining race conditions.
- */
- ivshmem_recv_setup(s, &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
-
- if (s->master == ON_OFF_AUTO_ON && s->vm_id != 0) {
- error_setg(errp,
- "master must connect to the server before any peers");
- return;
- }
-
- qemu_chr_fe_set_handlers(&s->server_chr, ivshmem_can_receive,
- ivshmem_read, NULL, NULL, s, NULL, true);
-
- if (ivshmem_setup_interrupts(s, errp) < 0) {
- error_prepend(errp, "Failed to initialize interrupts: ");
- return;
- }
- }
-
- if (s->master == ON_OFF_AUTO_AUTO) {
- s->master = s->vm_id == 0 ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
- }
-
- if (!ivshmem_is_master(s)) {
- error_setg(&s->migration_blocker,
- "Migration is disabled when using feature 'peer mode' in device 'ivshmem'");
- if (migrate_add_blocker(&s->migration_blocker, errp) < 0) {
- return;
- }
- }
-
- vmstate_register_ram(s->ivshmem_bar2, DEVICE(s));
- pci_register_bar(PCI_DEVICE(s), 2,
- PCI_BASE_ADDRESS_SPACE_MEMORY |
- PCI_BASE_ADDRESS_MEM_PREFETCH |
- PCI_BASE_ADDRESS_MEM_TYPE_64,
- s->ivshmem_bar2);
-}
-
-static void ivshmem_exit(PCIDevice *dev)
-{
- IVShmemState *s = IVSHMEM_COMMON(dev);
- int i;
-
- migrate_del_blocker(&s->migration_blocker);
-
- if (memory_region_is_mapped(s->ivshmem_bar2)) {
- if (!s->hostmem) {
- void *addr = memory_region_get_ram_ptr(s->ivshmem_bar2);
- int fd;
-
- if (munmap(addr, memory_region_size(s->ivshmem_bar2) == -1)) {
- error_report("Failed to munmap shared memory %s",
- strerror(errno));
- }
-
- fd = memory_region_get_fd(s->ivshmem_bar2);
- close(fd);
- }
-
- vmstate_unregister_ram(s->ivshmem_bar2, DEVICE(dev));
- }
-
- if (s->hostmem) {
- host_memory_backend_set_mapped(s->hostmem, false);
- }
-
- if (s->peers) {
- for (i = 0; i < s->nb_peers; i++) {
- close_peer_eventfds(s, i);
- }
- g_free(s->peers);
- }
-
- if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
- msix_uninit_exclusive_bar(dev);
- }
-
- g_free(s->msi_vectors);
-}
-
-static int ivshmem_pre_load(void *opaque)
-{
- IVShmemState *s = opaque;
-
- if (!ivshmem_is_master(s)) {
- error_report("'peer' devices are not migratable");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ivshmem_post_load(void *opaque, int version_id)
-{
- IVShmemState *s = opaque;
-
- if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
- ivshmem_msix_vector_use(s);
- }
- return 0;
-}
-
-static void ivshmem_common_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
-
- k->realize = ivshmem_common_realize;
- k->exit = ivshmem_exit;
- k->config_write = ivshmem_write_config;
- k->vendor_id = PCI_VENDOR_ID_IVSHMEM;
- k->device_id = PCI_DEVICE_ID_IVSHMEM;
- k->class_id = PCI_CLASS_MEMORY_RAM;
- k->revision = 1;
- dc->reset = ivshmem_reset;
- set_bit(DEVICE_CATEGORY_MISC, dc->categories);
- dc->desc = "Inter-VM shared memory";
-}
-
-static const TypeInfo ivshmem_common_info = {
- .name = TYPE_IVSHMEM_COMMON,
- .parent = TYPE_PCI_DEVICE,
- .instance_size = sizeof(IVShmemState),
- .abstract = true,
- .class_init = ivshmem_common_class_init,
- .interfaces = (InterfaceInfo[]) {
- { INTERFACE_CONVENTIONAL_PCI_DEVICE },
- { },
- },
-};
-
-static const VMStateDescription ivshmem_plain_vmsd = {
- .name = TYPE_IVSHMEM_PLAIN,
- .version_id = 0,
- .minimum_version_id = 0,
- .pre_load = ivshmem_pre_load,
- .post_load = ivshmem_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_PCI_DEVICE(parent_obj, IVShmemState),
- VMSTATE_UINT32(intrstatus, IVShmemState),
- VMSTATE_UINT32(intrmask, IVShmemState),
- VMSTATE_END_OF_LIST()
- },
-};
-
-static Property ivshmem_plain_properties[] = {
- DEFINE_PROP_ON_OFF_AUTO("master", IVShmemState, master, ON_OFF_AUTO_OFF),
- DEFINE_PROP_LINK("memdev", IVShmemState, hostmem, TYPE_MEMORY_BACKEND,
- HostMemoryBackend *),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void ivshmem_plain_realize(PCIDevice *dev, Error **errp)
-{
- IVShmemState *s = IVSHMEM_COMMON(dev);
-
- if (!s->hostmem) {
- error_setg(errp, "You must specify a 'memdev'");
- return;
- } else if (host_memory_backend_is_mapped(s->hostmem)) {
- error_setg(errp, "can't use already busy memdev: %s",
- object_get_canonical_path_component(OBJECT(s->hostmem)));
- return;
- }
-
- ivshmem_common_realize(dev, errp);
-}
-
-static void ivshmem_plain_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
-
- k->realize = ivshmem_plain_realize;
- device_class_set_props(dc, ivshmem_plain_properties);
- dc->vmsd = &ivshmem_plain_vmsd;
-}
-
-static const TypeInfo ivshmem_plain_info = {
- .name = TYPE_IVSHMEM_PLAIN,
- .parent = TYPE_IVSHMEM_COMMON,
- .instance_size = sizeof(IVShmemState),
- .class_init = ivshmem_plain_class_init,
-};
-
-static const VMStateDescription ivshmem_doorbell_vmsd = {
- .name = TYPE_IVSHMEM_DOORBELL,
- .version_id = 0,
- .minimum_version_id = 0,
- .pre_load = ivshmem_pre_load,
- .post_load = ivshmem_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_PCI_DEVICE(parent_obj, IVShmemState),
- VMSTATE_MSIX(parent_obj, IVShmemState),
- VMSTATE_UINT32(intrstatus, IVShmemState),
- VMSTATE_UINT32(intrmask, IVShmemState),
- VMSTATE_END_OF_LIST()
- },
-};
-
-static Property ivshmem_doorbell_properties[] = {
- DEFINE_PROP_CHR("chardev", IVShmemState, server_chr),
- DEFINE_PROP_UINT32("vectors", IVShmemState, vectors, 1),
- DEFINE_PROP_BIT("ioeventfd", IVShmemState, features, IVSHMEM_IOEVENTFD,
- true),
- DEFINE_PROP_ON_OFF_AUTO("master", IVShmemState, master, ON_OFF_AUTO_OFF),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void ivshmem_doorbell_init(Object *obj)
-{
- IVShmemState *s = IVSHMEM_DOORBELL(obj);
-
- s->features |= (1 << IVSHMEM_MSI);
-}
-
-static void ivshmem_doorbell_realize(PCIDevice *dev, Error **errp)
-{
- IVShmemState *s = IVSHMEM_COMMON(dev);
-
- if (!qemu_chr_fe_backend_connected(&s->server_chr)) {
- error_setg(errp, "You must specify a 'chardev'");
- return;
- }
-
- ivshmem_common_realize(dev, errp);
-}
-
-static void ivshmem_doorbell_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
-
- k->realize = ivshmem_doorbell_realize;
- device_class_set_props(dc, ivshmem_doorbell_properties);
- dc->vmsd = &ivshmem_doorbell_vmsd;
-}
-
-static const TypeInfo ivshmem_doorbell_info = {
- .name = TYPE_IVSHMEM_DOORBELL,
- .parent = TYPE_IVSHMEM_COMMON,
- .instance_size = sizeof(IVShmemState),
- .instance_init = ivshmem_doorbell_init,
- .class_init = ivshmem_doorbell_class_init,
-};
-
-static void ivshmem_register_types(void)
-{
- type_register_static(&ivshmem_common_info);
- type_register_static(&ivshmem_plain_info);
- type_register_static(&ivshmem_doorbell_info);
-}
-
-type_init(ivshmem_register_types)
diff --git a/hw/misc/lasi.c b/hw/misc/lasi.c
index 970fc98..9f758c6 100644
--- a/hw/misc/lasi.c
+++ b/hw/misc/lasi.c
@@ -15,8 +15,8 @@
#include "qapi/error.h"
#include "trace.h"
#include "hw/irq.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/runstate.h"
+#include "system/system.h"
+#include "system/runstate.h"
#include "migration/vmstate.h"
#include "qom/object.h"
#include "hw/misc/lasi.h"
@@ -263,11 +263,11 @@ static void lasi_init(Object *obj)
qdev_init_gpio_in(DEVICE(obj), lasi_set_irq, LASI_IRQS);
}
-static void lasi_class_init(ObjectClass *klass, void *data)
+static void lasi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = lasi_reset;
+ device_class_set_legacy_reset(dc, lasi_reset);
dc->vmsd = &vmstate_lasi;
}
diff --git a/hw/misc/led.c b/hw/misc/led.c
index d9998ab..f7f7090 100644
--- a/hw/misc/led.c
+++ b/hw/misc/led.c
@@ -101,20 +101,19 @@ static void led_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_in(DEVICE(s), led_set_state_gpio_handler, 1);
}
-static Property led_properties[] = {
+static const Property led_properties[] = {
DEFINE_PROP_STRING("color", LEDState, color),
DEFINE_PROP_STRING("description", LEDState, description),
DEFINE_PROP_BOOL("gpio-active-high", LEDState, gpio_active_high, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void led_class_init(ObjectClass *klass, void *data)
+static void led_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "LED";
dc->vmsd = &vmstate_led;
- dc->reset = led_reset;
+ device_class_set_legacy_reset(dc, led_reset);
dc->realize = led_realize;
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
device_class_set_props(dc, led_properties);
diff --git a/hw/misc/mac_via.c b/hw/misc/mac_via.c
index 652395b..bc37e2a 100644
--- a/hw/misc/mac_via.c
+++ b/hw/misc/mac_via.c
@@ -16,7 +16,7 @@
*/
#include "qemu/osdep.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "migration/vmstate.h"
#include "hw/sysbus.h"
#include "hw/irq.h"
@@ -24,13 +24,13 @@
#include "hw/misc/mac_via.h"
#include "hw/misc/mos6522.h"
#include "hw/input/adb.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "qapi/error.h"
#include "qemu/cutils.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/rtc.h"
+#include "system/block-backend.h"
+#include "system/rtc.h"
#include "trace.h"
#include "qemu/log.h"
@@ -495,7 +495,6 @@ static void via1_rtc_update(MOS6522Q800VIA1State *v1s)
break;
default:
g_assert_not_reached();
- break;
}
return;
}
@@ -556,7 +555,6 @@ static void via1_rtc_update(MOS6522Q800VIA1State *v1s)
break;
default:
g_assert_not_reached();
- break;
}
return;
}
@@ -1324,12 +1322,11 @@ static const VMStateDescription vmstate_q800_via1 = {
}
};
-static Property mos6522_q800_via1_properties[] = {
+static const Property mos6522_q800_via1_properties[] = {
DEFINE_PROP_DRIVE("drive", MOS6522Q800VIA1State, blk),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mos6522_q800_via1_class_init(ObjectClass *oc, void *data)
+static void mos6522_q800_via1_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
ResettableClass *rc = RESETTABLE_CLASS(oc);
@@ -1418,7 +1415,7 @@ static const VMStateDescription vmstate_q800_via2 = {
}
};
-static void mos6522_q800_via2_class_init(ObjectClass *oc, void *data)
+static void mos6522_q800_via2_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
ResettableClass *rc = RESETTABLE_CLASS(oc);
diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c
index beab0ff..bcd00c9 100644
--- a/hw/misc/macio/cuda.c
+++ b/hw/misc/macio/cuda.c
@@ -29,8 +29,8 @@
#include "migration/vmstate.h"
#include "hw/misc/macio/cuda.h"
#include "qemu/timer.h"
-#include "sysemu/runstate.h"
-#include "sysemu/rtc.h"
+#include "system/runstate.h"
+#include "system/rtc.h"
#include "qapi/error.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
@@ -554,17 +554,16 @@ static void cuda_init(Object *obj)
DEVICE(obj), "adb.0");
}
-static Property cuda_properties[] = {
+static const Property cuda_properties[] = {
DEFINE_PROP_UINT64("timebase-frequency", CUDAState, tb_frequency, 0),
- DEFINE_PROP_END_OF_LIST()
};
-static void cuda_class_init(ObjectClass *oc, void *data)
+static void cuda_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = cuda_realize;
- dc->reset = cuda_reset;
+ device_class_set_legacy_reset(dc, cuda_reset);
dc->vmsd = &vmstate_cuda;
device_class_set_props(dc, cuda_properties);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
@@ -599,7 +598,7 @@ static void mos6522_cuda_reset_hold(Object *obj, ResetType type)
ms->timers[1].frequency = (SCALE_US * 6000) / 4700;
}
-static void mos6522_cuda_class_init(ObjectClass *oc, void *data)
+static void mos6522_cuda_class_init(ObjectClass *oc, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(oc);
MOS6522DeviceClass *mdc = MOS6522_CLASS(oc);
diff --git a/hw/misc/macio/gpio.c b/hw/misc/macio/gpio.c
index 5495637..990551f 100644
--- a/hw/misc/macio/gpio.c
+++ b/hw/misc/macio/gpio.c
@@ -34,6 +34,11 @@
#include "qemu/module.h"
#include "trace.h"
+enum MacioGPIORegisterBits {
+ OUT_DATA = 1,
+ IN_DATA = 2,
+ OUT_ENABLE = 4,
+};
void macio_set_gpio(MacIOGPIOState *s, uint32_t gpio, bool state)
{
@@ -41,14 +46,14 @@ void macio_set_gpio(MacIOGPIOState *s, uint32_t gpio, bool state)
trace_macio_set_gpio(gpio, state);
- if (s->gpio_regs[gpio] & 4) {
+ if (s->gpio_regs[gpio] & OUT_ENABLE) {
qemu_log_mask(LOG_GUEST_ERROR,
"GPIO: Setting GPIO %d while it's an output\n", gpio);
}
- new_reg = s->gpio_regs[gpio] & ~2;
+ new_reg = s->gpio_regs[gpio] & ~IN_DATA;
if (state) {
- new_reg |= 2;
+ new_reg |= IN_DATA;
}
if (new_reg == s->gpio_regs[gpio]) {
@@ -107,12 +112,12 @@ static void macio_gpio_write(void *opaque, hwaddr addr, uint64_t value,
addr -= 8;
if (addr < 36) {
- value &= ~2;
+ value &= ~IN_DATA;
- if (value & 4) {
- ibit = (value & 1) << 1;
+ if (value & OUT_ENABLE) {
+ ibit = (value & OUT_DATA) << 1;
} else {
- ibit = s->gpio_regs[addr] & 2;
+ ibit = s->gpio_regs[addr] & IN_DATA;
}
s->gpio_regs[addr] = value | ibit;
@@ -135,7 +140,7 @@ static uint64_t macio_gpio_read(void *opaque, hwaddr addr, unsigned size)
}
}
- trace_macio_gpio_write(addr, val);
+ trace_macio_gpio_read(addr, val);
return val;
}
@@ -189,12 +194,12 @@ static void macio_gpio_nmi(NMIState *n, int cpu_index, Error **errp)
macio_set_gpio(MACIO_GPIO(n), 9, false);
}
-static void macio_gpio_class_init(ObjectClass *oc, void *data)
+static void macio_gpio_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
NMIClass *nc = NMI_CLASS(oc);
- dc->reset = macio_gpio_reset;
+ device_class_set_legacy_reset(dc, macio_gpio_reset);
dc->vmsd = &vmstate_macio_gpio;
nc->nmi_monitor_handler = macio_gpio_nmi;
}
@@ -205,7 +210,7 @@ static const TypeInfo macio_gpio_init_info = {
.instance_size = sizeof(MacIOGPIOState),
.instance_init = macio_gpio_init,
.class_init = macio_gpio_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_NMI },
{ }
},
diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c
index 2a528ea..b2b42dd 100644
--- a/hw/misc/macio/mac_dbdma.c
+++ b/hw/misc/macio/mac_dbdma.c
@@ -44,7 +44,7 @@
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qemu/log.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
/* debug DBDMA */
#define DEBUG_DBDMA 0
@@ -917,12 +917,12 @@ static void mac_dbdma_realize(DeviceState *dev, Error **errp)
s->bh = qemu_bh_new_guarded(DBDMA_run_bh, s, &dev->mem_reentrancy_guard);
}
-static void mac_dbdma_class_init(ObjectClass *oc, void *data)
+static void mac_dbdma_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = mac_dbdma_realize;
- dc->reset = mac_dbdma_reset;
+ device_class_set_legacy_reset(dc, mac_dbdma_reset);
dc->vmsd = &vmstate_dbdma;
}
diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c
index 3f449f9..6710485 100644
--- a/hw/misc/macio/macio.c
+++ b/hw/misc/macio/macio.c
@@ -385,7 +385,7 @@ static const VMStateDescription vmstate_macio_oldworld = {
}
};
-static void macio_oldworld_class_init(ObjectClass *oc, void *data)
+static void macio_oldworld_class_init(ObjectClass *oc, const void *data)
{
PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -405,13 +405,12 @@ static const VMStateDescription vmstate_macio_newworld = {
}
};
-static Property macio_newworld_properties[] = {
+static const Property macio_newworld_properties[] = {
DEFINE_PROP_BOOL("has-pmu", NewWorldMacIOState, has_pmu, false),
DEFINE_PROP_BOOL("has-adb", NewWorldMacIOState, has_adb, false),
- DEFINE_PROP_END_OF_LIST()
};
-static void macio_newworld_class_init(ObjectClass *oc, void *data)
+static void macio_newworld_class_init(ObjectClass *oc, const void *data)
{
PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -422,12 +421,11 @@ static void macio_newworld_class_init(ObjectClass *oc, void *data)
device_class_set_props(dc, macio_newworld_properties);
}
-static Property macio_properties[] = {
+static const Property macio_properties[] = {
DEFINE_PROP_UINT64("frequency", MacIOState, frequency, 0),
- DEFINE_PROP_END_OF_LIST()
};
-static void macio_class_init(ObjectClass *klass, void *data)
+static void macio_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -467,7 +465,7 @@ static const TypeInfo macio_type_info = {
.instance_init = macio_instance_init,
.abstract = true,
.class_init = macio_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/misc/macio/pmu.c b/hw/misc/macio/pmu.c
index 238da58..3734913 100644
--- a/hw/misc/macio/pmu.c
+++ b/hw/misc/macio/pmu.c
@@ -34,8 +34,8 @@
#include "hw/irq.h"
#include "hw/misc/macio/pmu.h"
#include "qemu/timer.h"
-#include "sysemu/runstate.h"
-#include "sysemu/rtc.h"
+#include "system/runstate.h"
+#include "system/rtc.h"
#include "qapi/error.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
@@ -760,17 +760,16 @@ static void pmu_init(Object *obj)
sysbus_init_mmio(d, &s->mem);
}
-static Property pmu_properties[] = {
+static const Property pmu_properties[] = {
DEFINE_PROP_BOOL("has-adb", PMUState, has_adb, true),
- DEFINE_PROP_END_OF_LIST()
};
-static void pmu_class_init(ObjectClass *oc, void *data)
+static void pmu_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = pmu_realize;
- dc->reset = pmu_reset;
+ device_class_set_legacy_reset(dc, pmu_reset);
dc->vmsd = &vmstate_pmu;
device_class_set_props(dc, pmu_properties);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
@@ -809,7 +808,7 @@ static void mos6522_pmu_reset_hold(Object *obj, ResetType type)
s->last_b = ms->b = TACK | TREQ;
}
-static void mos6522_pmu_class_init(ObjectClass *oc, void *data)
+static void mos6522_pmu_class_init(ObjectClass *oc, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(oc);
MOS6522DeviceClass *mdc = MOS6522_CLASS(oc);
diff --git a/hw/misc/macio/trace-events b/hw/misc/macio/trace-events
index ad4b9d1..055a407 100644
--- a/hw/misc/macio/trace-events
+++ b/hw/misc/macio/trace-events
@@ -18,7 +18,8 @@ macio_timer_read(uint64_t addr, unsigned len, uint32_t val) "read addr 0x%"PRIx6
macio_set_gpio(int gpio, bool state) "setting GPIO %d to %d"
macio_gpio_irq_assert(int gpio) "asserting GPIO %d"
macio_gpio_irq_deassert(int gpio) "deasserting GPIO %d"
-macio_gpio_write(uint64_t addr, uint64_t val) "addr: 0x%"PRIx64" value: 0x%"PRIx64
+macio_gpio_write(uint64_t addr, uint64_t val) "addr 0x%"PRIx64" val 0x%"PRIx64
+macio_gpio_read(uint64_t addr, uint64_t val) "addr 0x%"PRIx64" val 0x%"PRIx64
# pmu.c
pmu_adb_poll(int olen) "ADB autopoll, olen=%d"
diff --git a/hw/misc/mchp_pfsoc_dmc.c b/hw/misc/mchp_pfsoc_dmc.c
index 43d8e97..599f845 100644
--- a/hw/misc/mchp_pfsoc_dmc.c
+++ b/hw/misc/mchp_pfsoc_dmc.c
@@ -110,7 +110,8 @@ static void mchp_pfsoc_ddr_sgmii_phy_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->sgmii_phy);
}
-static void mchp_pfsoc_ddr_sgmii_phy_class_init(ObjectClass *klass, void *data)
+static void mchp_pfsoc_ddr_sgmii_phy_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -192,7 +193,7 @@ static void mchp_pfsoc_ddr_cfg_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->cfg);
}
-static void mchp_pfsoc_ddr_cfg_class_init(ObjectClass *klass, void *data)
+static void mchp_pfsoc_ddr_cfg_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/mchp_pfsoc_ioscb.c b/hw/misc/mchp_pfsoc_ioscb.c
index a71d134..10fc7ea 100644
--- a/hw/misc/mchp_pfsoc_ioscb.c
+++ b/hw/misc/mchp_pfsoc_ioscb.c
@@ -292,7 +292,7 @@ static void mchp_pfsoc_ioscb_realize(DeviceState *dev, Error **errp)
sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
}
-static void mchp_pfsoc_ioscb_class_init(ObjectClass *klass, void *data)
+static void mchp_pfsoc_ioscb_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/mchp_pfsoc_sysreg.c b/hw/misc/mchp_pfsoc_sysreg.c
index 7876fe0..f47c835 100644
--- a/hw/misc/mchp_pfsoc_sysreg.c
+++ b/hw/misc/mchp_pfsoc_sysreg.c
@@ -27,7 +27,9 @@
#include "hw/irq.h"
#include "hw/sysbus.h"
#include "hw/misc/mchp_pfsoc_sysreg.h"
+#include "system/runstate.h"
+#define MSS_RESET_CR 0x18
#define ENVM_CR 0xb8
#define MESSAGE_INT 0x118c
@@ -56,6 +58,11 @@ static void mchp_pfsoc_sysreg_write(void *opaque, hwaddr offset,
{
MchpPfSoCSysregState *s = opaque;
switch (offset) {
+ case MSS_RESET_CR:
+ if (value == 0xdead) {
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ }
+ break;
case MESSAGE_INT:
qemu_irq_lower(s->irq);
break;
@@ -85,7 +92,7 @@ static void mchp_pfsoc_sysreg_realize(DeviceState *dev, Error **errp)
sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
}
-static void mchp_pfsoc_sysreg_class_init(ObjectClass *klass, void *data)
+static void mchp_pfsoc_sysreg_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
index 2ca8717..6d47de4 100644
--- a/hw/misc/meson.build
+++ b/hw/misc/meson.build
@@ -37,7 +37,9 @@ system_ss.add(when: 'CONFIG_SIFIVE_U_PRCI', if_true: files('sifive_u_prci.c'))
subdir('macio')
-system_ss.add(when: 'CONFIG_IVSHMEM_DEVICE', if_true: files('ivshmem.c'))
+# ivshmem devices
+system_ss.add(when: 'CONFIG_IVSHMEM_DEVICE', if_true: files('ivshmem-pci.c'))
+system_ss.add(when: 'CONFIG_IVSHMEM_FLAT_DEVICE', if_true: files('ivshmem-flat.c'))
system_ss.add(when: 'CONFIG_ALLWINNER_SRAMC', if_true: files('allwinner-sramc.c'))
system_ss.add(when: 'CONFIG_ALLWINNER_A10_CCM', if_true: files('allwinner-a10-ccm.c'))
@@ -51,9 +53,10 @@ system_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40-ccu.c'
system_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40-dramc.c'))
system_ss.add(when: 'CONFIG_AXP2XX_PMU', if_true: files('axp2xx.c'))
system_ss.add(when: 'CONFIG_REALVIEW', if_true: files('arm_sysctl.c'))
-system_ss.add(when: 'CONFIG_NSERIES', if_true: files('cbus.c'))
system_ss.add(when: 'CONFIG_ECCMEMCTL', if_true: files('eccmemctl.c'))
system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_pmu.c', 'exynos4210_clk.c', 'exynos4210_rng.c'))
+system_ss.add(when: 'CONFIG_FSL_IMX8MP_ANALOG', if_true: files('imx8mp_analog.c'))
+system_ss.add(when: 'CONFIG_FSL_IMX8MP_CCM', if_true: files('imx8mp_ccm.c'))
system_ss.add(when: 'CONFIG_IMX', if_true: files(
'imx25_ccm.c',
'imx31_ccm.c',
@@ -67,20 +70,15 @@ system_ss.add(when: 'CONFIG_IMX', if_true: files(
'imx_ccm.c',
'imx_rngc.c',
))
-system_ss.add(when: 'CONFIG_MAINSTONE', if_true: files('mst_fpga.c'))
system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files(
- 'npcm7xx_clk.c',
- 'npcm7xx_gcr.c',
+ 'npcm_clk.c',
+ 'npcm_gcr.c',
'npcm7xx_mft.c',
'npcm7xx_pwm.c',
'npcm7xx_rng.c',
))
system_ss.add(when: 'CONFIG_OMAP', if_true: files(
'omap_clk.c',
- 'omap_gpmc.c',
- 'omap_l4.c',
- 'omap_sdrc.c',
- 'omap_tap.c',
))
system_ss.add(when: 'CONFIG_RASPI', if_true: files(
'bcm2835_mbox.c',
@@ -106,6 +104,7 @@ system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files(
system_ss.add(when: 'CONFIG_XLNX_VERSAL_TRNG', if_true: files(
'xlnx-versal-trng.c',
))
+system_ss.add(when: 'CONFIG_STM32_RCC', if_true: files('stm32_rcc.c'))
system_ss.add(when: 'CONFIG_STM32F2XX_SYSCFG', if_true: files('stm32f2xx_syscfg.c'))
system_ss.add(when: 'CONFIG_STM32F4XX_SYSCFG', if_true: files('stm32f4xx_syscfg.c'))
system_ss.add(when: 'CONFIG_STM32F4XX_EXTI', if_true: files('stm32f4xx_exti.c'))
@@ -127,6 +126,7 @@ system_ss.add(when: 'CONFIG_ARMSSE_MHU', if_true: files('armsse-mhu.c'))
system_ss.add(when: 'CONFIG_PVPANIC_ISA', if_true: files('pvpanic-isa.c'))
system_ss.add(when: 'CONFIG_PVPANIC_PCI', if_true: files('pvpanic-pci.c'))
+system_ss.add(when: 'CONFIG_PVPANIC_MMIO', if_true: files('pvpanic-mmio.c'))
system_ss.add(when: 'CONFIG_AUX', if_true: files('auxbus.c'))
system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files(
'aspeed_hace.c',
diff --git a/hw/misc/mips_cmgcr.c b/hw/misc/mips_cmgcr.c
index 2703040..5484b73 100644
--- a/hw/misc/mips_cmgcr.c
+++ b/hw/misc/mips_cmgcr.c
@@ -211,7 +211,7 @@ static const VMStateDescription vmstate_mips_gcr = {
},
};
-static Property mips_gcr_properties[] = {
+static const Property mips_gcr_properties[] = {
DEFINE_PROP_UINT32("num-vp", MIPSGCRState, num_vps, 1),
DEFINE_PROP_INT32("gcr-rev", MIPSGCRState, gcr_rev, 0x800),
DEFINE_PROP_UINT64("gcr-base", MIPSGCRState, gcr_base, GCR_BASE_ADDR),
@@ -219,7 +219,6 @@ static Property mips_gcr_properties[] = {
MemoryRegion *),
DEFINE_PROP_LINK("cpc", MIPSGCRState, cpc_mr, TYPE_MEMORY_REGION,
MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
static void mips_gcr_realize(DeviceState *dev, Error **errp)
@@ -230,12 +229,12 @@ static void mips_gcr_realize(DeviceState *dev, Error **errp)
s->vps = g_new(MIPSGCRVPState, s->num_vps);
}
-static void mips_gcr_class_init(ObjectClass *klass, void *data)
+static void mips_gcr_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, mips_gcr_properties);
dc->vmsd = &vmstate_mips_gcr;
- dc->reset = mips_gcr_reset;
+ device_class_set_legacy_reset(dc, mips_gcr_reset);
dc->realize = mips_gcr_realize;
}
diff --git a/hw/misc/mips_cpc.c b/hw/misc/mips_cpc.c
index 1e8fd2e..9bfb7c9 100644
--- a/hw/misc/mips_cpc.c
+++ b/hw/misc/mips_cpc.c
@@ -92,8 +92,6 @@ static void cpc_write(void *opaque, hwaddr offset, uint64_t data,
"%s: Bad offset 0x%x\n", __func__, (int)offset);
break;
}
-
- return;
}
static uint64_t cpc_read(void *opaque, hwaddr offset, unsigned size)
@@ -163,18 +161,17 @@ static const VMStateDescription vmstate_mips_cpc = {
},
};
-static Property mips_cpc_properties[] = {
+static const Property mips_cpc_properties[] = {
DEFINE_PROP_UINT32("num-vp", MIPSCPCState, num_vp, 0x1),
DEFINE_PROP_UINT64("vp-start-running", MIPSCPCState, vp_start_running, 0x1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mips_cpc_class_init(ObjectClass *klass, void *data)
+static void mips_cpc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = mips_cpc_realize;
- dc->reset = mips_cpc_reset;
+ device_class_set_legacy_reset(dc, mips_cpc_reset);
dc->vmsd = &vmstate_mips_cpc;
device_class_set_props(dc, mips_cpc_properties);
}
diff --git a/hw/misc/mips_itu.c b/hw/misc/mips_itu.c
index f8acfb3..fc17385 100644
--- a/hw/misc/mips_itu.c
+++ b/hw/misc/mips_itu.c
@@ -533,21 +533,20 @@ static void mips_itu_reset(DeviceState *dev)
itc_reset_cells(s);
}
-static Property mips_itu_properties[] = {
+static const Property mips_itu_properties[] = {
DEFINE_PROP_UINT32("num-fifo", MIPSITUState, num_fifo,
ITC_FIFO_NUM_MAX),
DEFINE_PROP_UINT32("num-semaphores", MIPSITUState, num_semaphores,
ITC_SEMAPH_NUM_MAX),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mips_itu_class_init(ObjectClass *klass, void *data)
+static void mips_itu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, mips_itu_properties);
dc->realize = mips_itu_realize;
- dc->reset = mips_itu_reset;
+ device_class_set_legacy_reset(dc, mips_itu_reset);
}
static const TypeInfo mips_itu_info = {
diff --git a/hw/misc/mos6522.c b/hw/misc/mos6522.c
index 515f62e..8dd6b82 100644
--- a/hw/misc/mos6522.c
+++ b/hw/misc/mos6522.c
@@ -696,12 +696,11 @@ static void mos6522_finalize(Object *obj)
timer_free(s->timers[1].timer);
}
-static Property mos6522_properties[] = {
+static const Property mos6522_properties[] = {
DEFINE_PROP_UINT64("frequency", MOS6522State, frequency, 0),
- DEFINE_PROP_END_OF_LIST()
};
-static void mos6522_class_init(ObjectClass *oc, void *data)
+static void mos6522_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
ResettableClass *rc = RESETTABLE_CLASS(oc);
diff --git a/hw/misc/mps2-fpgaio.c b/hw/misc/mps2-fpgaio.c
index aa1bb83..bee1309 100644
--- a/hw/misc/mps2-fpgaio.c
+++ b/hw/misc/mps2-fpgaio.c
@@ -198,7 +198,7 @@ static void mps2_fpgaio_write(void *opaque, hwaddr offset, uint64_t value,
s->led0 = value & MAKE_64BIT_MASK(0, s->num_leds);
for (i = 0; i < s->num_leds; i++) {
- led_set_state(s->led[i], value & (1 << i));
+ led_set_state(s->led[i], extract64(value, i, 1));
}
}
break;
@@ -319,23 +319,22 @@ static const VMStateDescription mps2_fpgaio_vmstate = {
},
};
-static Property mps2_fpgaio_properties[] = {
+static const Property mps2_fpgaio_properties[] = {
/* Frequency of the prescale counter */
DEFINE_PROP_UINT32("prescale-clk", MPS2FPGAIO, prescale_clk, 20000000),
/* Number of LEDs controlled by LED0 register */
DEFINE_PROP_UINT32("num-leds", MPS2FPGAIO, num_leds, 2),
DEFINE_PROP_BOOL("has-switches", MPS2FPGAIO, has_switches, false),
DEFINE_PROP_BOOL("has-dbgctrl", MPS2FPGAIO, has_dbgctrl, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mps2_fpgaio_class_init(ObjectClass *klass, void *data)
+static void mps2_fpgaio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &mps2_fpgaio_vmstate;
dc->realize = mps2_fpgaio_realize;
- dc->reset = mps2_fpgaio_reset;
+ device_class_set_legacy_reset(dc, mps2_fpgaio_reset);
device_class_set_props(dc, mps2_fpgaio_properties);
}
diff --git a/hw/misc/mps2-scc.c b/hw/misc/mps2-scc.c
index 18be741..a9a5d4a 100644
--- a/hw/misc/mps2-scc.c
+++ b/hw/misc/mps2-scc.c
@@ -456,7 +456,7 @@ static const VMStateDescription mps2_scc_vmstate = {
}
};
-static Property mps2_scc_properties[] = {
+static const Property mps2_scc_properties[] = {
/* Values for various read-only ID registers (which are specific
* to the board model or FPGA image)
*/
@@ -472,16 +472,15 @@ static Property mps2_scc_properties[] = {
*/
DEFINE_PROP_ARRAY("oscclk", MPS2SCC, num_oscclk, oscclk_reset,
qdev_prop_uint32, uint32_t),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mps2_scc_class_init(ObjectClass *klass, void *data)
+static void mps2_scc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = mps2_scc_realize;
dc->vmsd = &mps2_scc_vmstate;
- dc->reset = mps2_scc_reset;
+ device_class_set_legacy_reset(dc, mps2_scc_reset);
device_class_set_props(dc, mps2_scc_properties);
}
diff --git a/hw/misc/msf2-sysreg.c b/hw/misc/msf2-sysreg.c
index f54382a..ce0ad50 100644
--- a/hw/misc/msf2-sysreg.c
+++ b/hw/misc/msf2-sysreg.c
@@ -118,11 +118,10 @@ static const VMStateDescription vmstate_msf2_sysreg = {
}
};
-static Property msf2_sysreg_properties[] = {
+static const Property msf2_sysreg_properties[] = {
/* default divisors in Libero GUI */
DEFINE_PROP_UINT8("apb0divisor", MSF2SysregState, apb0div, 2),
DEFINE_PROP_UINT8("apb1divisor", MSF2SysregState, apb1div, 2),
- DEFINE_PROP_END_OF_LIST(),
};
static void msf2_sysreg_realize(DeviceState *dev, Error **errp)
@@ -137,12 +136,12 @@ static void msf2_sysreg_realize(DeviceState *dev, Error **errp)
}
}
-static void msf2_sysreg_class_init(ObjectClass *klass, void *data)
+static void msf2_sysreg_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_msf2_sysreg;
- dc->reset = msf2_sysreg_reset;
+ device_class_set_legacy_reset(dc, msf2_sysreg_reset);
device_class_set_props(dc, msf2_sysreg_properties);
dc->realize = msf2_sysreg_realize;
}
diff --git a/hw/misc/mst_fpga.c b/hw/misc/mst_fpga.c
deleted file mode 100644
index 2d7bfa5..0000000
--- a/hw/misc/mst_fpga.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * PXA270-based Intel Mainstone platforms.
- * FPGA driver
- *
- * Copyright (c) 2007 by Armin Kuster <akuster@kama-aina.net> or
- * <akuster@mvista.com>
- *
- * This code is licensed under the GNU GPL v2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu/osdep.h"
-#include "hw/irq.h"
-#include "hw/sysbus.h"
-#include "migration/vmstate.h"
-#include "qemu/module.h"
-#include "qom/object.h"
-
-/* Mainstone FPGA for extern irqs */
-#define FPGA_GPIO_PIN 0
-#define MST_NUM_IRQS 16
-#define MST_LEDDAT1 0x10
-#define MST_LEDDAT2 0x14
-#define MST_LEDCTRL 0x40
-#define MST_GPSWR 0x60
-#define MST_MSCWR1 0x80
-#define MST_MSCWR2 0x84
-#define MST_MSCWR3 0x88
-#define MST_MSCRD 0x90
-#define MST_INTMSKENA 0xc0
-#define MST_INTSETCLR 0xd0
-#define MST_PCMCIA0 0xe0
-#define MST_PCMCIA1 0xe4
-
-#define MST_PCMCIAx_READY (1 << 10)
-#define MST_PCMCIAx_nCD (1 << 5)
-
-#define MST_PCMCIA_CD0_IRQ 9
-#define MST_PCMCIA_CD1_IRQ 13
-
-#define TYPE_MAINSTONE_FPGA "mainstone-fpga"
-OBJECT_DECLARE_SIMPLE_TYPE(mst_irq_state, MAINSTONE_FPGA)
-
-struct mst_irq_state {
- SysBusDevice parent_obj;
-
- MemoryRegion iomem;
-
- qemu_irq parent;
-
- uint32_t prev_level;
- uint32_t leddat1;
- uint32_t leddat2;
- uint32_t ledctrl;
- uint32_t gpswr;
- uint32_t mscwr1;
- uint32_t mscwr2;
- uint32_t mscwr3;
- uint32_t mscrd;
- uint32_t intmskena;
- uint32_t intsetclr;
- uint32_t pcmcia0;
- uint32_t pcmcia1;
-};
-
-static void
-mst_fpga_set_irq(void *opaque, int irq, int level)
-{
- mst_irq_state *s = (mst_irq_state *)opaque;
- uint32_t oldint = s->intsetclr & s->intmskena;
-
- if (level)
- s->prev_level |= 1u << irq;
- else
- s->prev_level &= ~(1u << irq);
-
- switch(irq) {
- case MST_PCMCIA_CD0_IRQ:
- if (level)
- s->pcmcia0 &= ~MST_PCMCIAx_nCD;
- else
- s->pcmcia0 |= MST_PCMCIAx_nCD;
- break;
- case MST_PCMCIA_CD1_IRQ:
- if (level)
- s->pcmcia1 &= ~MST_PCMCIAx_nCD;
- else
- s->pcmcia1 |= MST_PCMCIAx_nCD;
- break;
- }
-
- if ((s->intmskena & (1u << irq)) && level)
- s->intsetclr |= 1u << irq;
-
- if (oldint != (s->intsetclr & s->intmskena))
- qemu_set_irq(s->parent, s->intsetclr & s->intmskena);
-}
-
-
-static uint64_t
-mst_fpga_readb(void *opaque, hwaddr addr, unsigned size)
-{
- mst_irq_state *s = (mst_irq_state *) opaque;
-
- switch (addr) {
- case MST_LEDDAT1:
- return s->leddat1;
- case MST_LEDDAT2:
- return s->leddat2;
- case MST_LEDCTRL:
- return s->ledctrl;
- case MST_GPSWR:
- return s->gpswr;
- case MST_MSCWR1:
- return s->mscwr1;
- case MST_MSCWR2:
- return s->mscwr2;
- case MST_MSCWR3:
- return s->mscwr3;
- case MST_MSCRD:
- return s->mscrd;
- case MST_INTMSKENA:
- return s->intmskena;
- case MST_INTSETCLR:
- return s->intsetclr;
- case MST_PCMCIA0:
- return s->pcmcia0;
- case MST_PCMCIA1:
- return s->pcmcia1;
- default:
- printf("Mainstone - mst_fpga_readb: Bad register offset "
- "0x" HWADDR_FMT_plx "\n", addr);
- }
- return 0;
-}
-
-static void
-mst_fpga_writeb(void *opaque, hwaddr addr, uint64_t value,
- unsigned size)
-{
- mst_irq_state *s = (mst_irq_state *) opaque;
- value &= 0xffffffff;
-
- switch (addr) {
- case MST_LEDDAT1:
- s->leddat1 = value;
- break;
- case MST_LEDDAT2:
- s->leddat2 = value;
- break;
- case MST_LEDCTRL:
- s->ledctrl = value;
- break;
- case MST_GPSWR:
- s->gpswr = value;
- break;
- case MST_MSCWR1:
- s->mscwr1 = value;
- break;
- case MST_MSCWR2:
- s->mscwr2 = value;
- break;
- case MST_MSCWR3:
- s->mscwr3 = value;
- break;
- case MST_MSCRD:
- s->mscrd = value;
- break;
- case MST_INTMSKENA: /* Mask interrupt */
- s->intmskena = (value & 0xFEEFF);
- qemu_set_irq(s->parent, s->intsetclr & s->intmskena);
- break;
- case MST_INTSETCLR: /* clear or set interrupt */
- s->intsetclr = (value & 0xFEEFF);
- qemu_set_irq(s->parent, s->intsetclr & s->intmskena);
- break;
- /* For PCMCIAx allow the to change only power and reset */
- case MST_PCMCIA0:
- s->pcmcia0 = (value & 0x1f) | (s->pcmcia0 & ~0x1f);
- break;
- case MST_PCMCIA1:
- s->pcmcia1 = (value & 0x1f) | (s->pcmcia1 & ~0x1f);
- break;
- default:
- printf("Mainstone - mst_fpga_writeb: Bad register offset "
- "0x" HWADDR_FMT_plx "\n", addr);
- }
-}
-
-static const MemoryRegionOps mst_fpga_ops = {
- .read = mst_fpga_readb,
- .write = mst_fpga_writeb,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static int mst_fpga_post_load(void *opaque, int version_id)
-{
- mst_irq_state *s = (mst_irq_state *) opaque;
-
- qemu_set_irq(s->parent, s->intsetclr & s->intmskena);
- return 0;
-}
-
-static void mst_fpga_init(Object *obj)
-{
- DeviceState *dev = DEVICE(obj);
- mst_irq_state *s = MAINSTONE_FPGA(obj);
- SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
-
- s->pcmcia0 = MST_PCMCIAx_READY | MST_PCMCIAx_nCD;
- s->pcmcia1 = MST_PCMCIAx_READY | MST_PCMCIAx_nCD;
-
- sysbus_init_irq(sbd, &s->parent);
-
- /* alloc the external 16 irqs */
- qdev_init_gpio_in(dev, mst_fpga_set_irq, MST_NUM_IRQS);
-
- memory_region_init_io(&s->iomem, obj, &mst_fpga_ops, s,
- "fpga", 0x00100000);
- sysbus_init_mmio(sbd, &s->iomem);
-}
-
-static const VMStateDescription vmstate_mst_fpga_regs = {
- .name = "mainstone_fpga",
- .version_id = 0,
- .minimum_version_id = 0,
- .post_load = mst_fpga_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32(prev_level, mst_irq_state),
- VMSTATE_UINT32(leddat1, mst_irq_state),
- VMSTATE_UINT32(leddat2, mst_irq_state),
- VMSTATE_UINT32(ledctrl, mst_irq_state),
- VMSTATE_UINT32(gpswr, mst_irq_state),
- VMSTATE_UINT32(mscwr1, mst_irq_state),
- VMSTATE_UINT32(mscwr2, mst_irq_state),
- VMSTATE_UINT32(mscwr3, mst_irq_state),
- VMSTATE_UINT32(mscrd, mst_irq_state),
- VMSTATE_UINT32(intmskena, mst_irq_state),
- VMSTATE_UINT32(intsetclr, mst_irq_state),
- VMSTATE_UINT32(pcmcia0, mst_irq_state),
- VMSTATE_UINT32(pcmcia1, mst_irq_state),
- VMSTATE_END_OF_LIST(),
- },
-};
-
-static void mst_fpga_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->desc = "Mainstone II FPGA";
- dc->vmsd = &vmstate_mst_fpga_regs;
-}
-
-static const TypeInfo mst_fpga_info = {
- .name = TYPE_MAINSTONE_FPGA,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(mst_irq_state),
- .instance_init = mst_fpga_init,
- .class_init = mst_fpga_class_init,
-};
-
-static void mst_fpga_register_types(void)
-{
- type_register_static(&mst_fpga_info);
-}
-
-type_init(mst_fpga_register_types)
diff --git a/hw/misc/npcm7xx_clk.c b/hw/misc/npcm7xx_clk.c
deleted file mode 100644
index 2098c85..0000000
--- a/hw/misc/npcm7xx_clk.c
+++ /dev/null
@@ -1,1088 +0,0 @@
-/*
- * Nuvoton NPCM7xx Clock Control Registers.
- *
- * Copyright 2020 Google LLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- */
-
-#include "qemu/osdep.h"
-
-#include "hw/misc/npcm7xx_clk.h"
-#include "hw/timer/npcm7xx_timer.h"
-#include "hw/qdev-clock.h"
-#include "migration/vmstate.h"
-#include "qemu/error-report.h"
-#include "qemu/log.h"
-#include "qemu/module.h"
-#include "qemu/timer.h"
-#include "qemu/units.h"
-#include "trace.h"
-#include "sysemu/watchdog.h"
-
-/*
- * The reference clock hz, and the SECCNT and CNTR25M registers in this module,
- * is always 25 MHz.
- */
-#define NPCM7XX_CLOCK_REF_HZ (25000000)
-
-/* Register Field Definitions */
-#define NPCM7XX_CLK_WDRCR_CA9C BIT(0) /* Cortex-A9 Cores */
-
-#define PLLCON_LOKI BIT(31)
-#define PLLCON_LOKS BIT(30)
-#define PLLCON_PWDEN BIT(12)
-#define PLLCON_FBDV(con) extract32((con), 16, 12)
-#define PLLCON_OTDV2(con) extract32((con), 13, 3)
-#define PLLCON_OTDV1(con) extract32((con), 8, 3)
-#define PLLCON_INDV(con) extract32((con), 0, 6)
-
-enum NPCM7xxCLKRegisters {
- NPCM7XX_CLK_CLKEN1,
- NPCM7XX_CLK_CLKSEL,
- NPCM7XX_CLK_CLKDIV1,
- NPCM7XX_CLK_PLLCON0,
- NPCM7XX_CLK_PLLCON1,
- NPCM7XX_CLK_SWRSTR,
- NPCM7XX_CLK_IPSRST1 = 0x20 / sizeof(uint32_t),
- NPCM7XX_CLK_IPSRST2,
- NPCM7XX_CLK_CLKEN2,
- NPCM7XX_CLK_CLKDIV2,
- NPCM7XX_CLK_CLKEN3,
- NPCM7XX_CLK_IPSRST3,
- NPCM7XX_CLK_WD0RCR,
- NPCM7XX_CLK_WD1RCR,
- NPCM7XX_CLK_WD2RCR,
- NPCM7XX_CLK_SWRSTC1,
- NPCM7XX_CLK_SWRSTC2,
- NPCM7XX_CLK_SWRSTC3,
- NPCM7XX_CLK_SWRSTC4,
- NPCM7XX_CLK_PLLCON2,
- NPCM7XX_CLK_CLKDIV3,
- NPCM7XX_CLK_CORSTC,
- NPCM7XX_CLK_PLLCONG,
- NPCM7XX_CLK_AHBCKFI,
- NPCM7XX_CLK_SECCNT,
- NPCM7XX_CLK_CNTR25M,
- NPCM7XX_CLK_REGS_END,
-};
-
-/*
- * These reset values were taken from version 0.91 of the NPCM750R data sheet.
- *
- * All are loaded on power-up reset. CLKENx and SWRSTR should also be loaded on
- * core domain reset, but this reset type is not yet supported by QEMU.
- */
-static const uint32_t cold_reset_values[NPCM7XX_CLK_NR_REGS] = {
- [NPCM7XX_CLK_CLKEN1] = 0xffffffff,
- [NPCM7XX_CLK_CLKSEL] = 0x004aaaaa,
- [NPCM7XX_CLK_CLKDIV1] = 0x5413f855,
- [NPCM7XX_CLK_PLLCON0] = 0x00222101 | PLLCON_LOKI,
- [NPCM7XX_CLK_PLLCON1] = 0x00202101 | PLLCON_LOKI,
- [NPCM7XX_CLK_IPSRST1] = 0x00001000,
- [NPCM7XX_CLK_IPSRST2] = 0x80000000,
- [NPCM7XX_CLK_CLKEN2] = 0xffffffff,
- [NPCM7XX_CLK_CLKDIV2] = 0xaa4f8f9f,
- [NPCM7XX_CLK_CLKEN3] = 0xffffffff,
- [NPCM7XX_CLK_IPSRST3] = 0x03000000,
- [NPCM7XX_CLK_WD0RCR] = 0xffffffff,
- [NPCM7XX_CLK_WD1RCR] = 0xffffffff,
- [NPCM7XX_CLK_WD2RCR] = 0xffffffff,
- [NPCM7XX_CLK_SWRSTC1] = 0x00000003,
- [NPCM7XX_CLK_PLLCON2] = 0x00c02105 | PLLCON_LOKI,
- [NPCM7XX_CLK_CORSTC] = 0x04000003,
- [NPCM7XX_CLK_PLLCONG] = 0x01228606 | PLLCON_LOKI,
- [NPCM7XX_CLK_AHBCKFI] = 0x000000c8,
-};
-
-/* The number of watchdogs that can trigger a reset. */
-#define NPCM7XX_NR_WATCHDOGS (3)
-
-/* Clock converter functions */
-
-#define TYPE_NPCM7XX_CLOCK_PLL "npcm7xx-clock-pll"
-#define NPCM7XX_CLOCK_PLL(obj) OBJECT_CHECK(NPCM7xxClockPLLState, \
- (obj), TYPE_NPCM7XX_CLOCK_PLL)
-#define TYPE_NPCM7XX_CLOCK_SEL "npcm7xx-clock-sel"
-#define NPCM7XX_CLOCK_SEL(obj) OBJECT_CHECK(NPCM7xxClockSELState, \
- (obj), TYPE_NPCM7XX_CLOCK_SEL)
-#define TYPE_NPCM7XX_CLOCK_DIVIDER "npcm7xx-clock-divider"
-#define NPCM7XX_CLOCK_DIVIDER(obj) OBJECT_CHECK(NPCM7xxClockDividerState, \
- (obj), TYPE_NPCM7XX_CLOCK_DIVIDER)
-
-static void npcm7xx_clk_update_pll(void *opaque)
-{
- NPCM7xxClockPLLState *s = opaque;
- uint32_t con = s->clk->regs[s->reg];
- uint64_t freq;
-
- /* The PLL is grounded if it is not locked yet. */
- if (con & PLLCON_LOKI) {
- freq = clock_get_hz(s->clock_in);
- freq *= PLLCON_FBDV(con);
- freq /= PLLCON_INDV(con) * PLLCON_OTDV1(con) * PLLCON_OTDV2(con);
- } else {
- freq = 0;
- }
-
- clock_update_hz(s->clock_out, freq);
-}
-
-static void npcm7xx_clk_update_sel(void *opaque)
-{
- NPCM7xxClockSELState *s = opaque;
- uint32_t index = extract32(s->clk->regs[NPCM7XX_CLK_CLKSEL], s->offset,
- s->len);
-
- if (index >= s->input_size) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: SEL index: %u out of range\n",
- __func__, index);
- index = 0;
- }
- clock_update_hz(s->clock_out, clock_get_hz(s->clock_in[index]));
-}
-
-static void npcm7xx_clk_update_divider(void *opaque)
-{
- NPCM7xxClockDividerState *s = opaque;
- uint32_t freq;
-
- freq = s->divide(s);
- clock_update_hz(s->clock_out, freq);
-}
-
-static uint32_t divide_by_constant(NPCM7xxClockDividerState *s)
-{
- return clock_get_hz(s->clock_in) / s->divisor;
-}
-
-static uint32_t divide_by_reg_divisor(NPCM7xxClockDividerState *s)
-{
- return clock_get_hz(s->clock_in) /
- (extract32(s->clk->regs[s->reg], s->offset, s->len) + 1);
-}
-
-static uint32_t divide_by_reg_divisor_times_2(NPCM7xxClockDividerState *s)
-{
- return divide_by_reg_divisor(s) / 2;
-}
-
-static uint32_t shift_by_reg_divisor(NPCM7xxClockDividerState *s)
-{
- return clock_get_hz(s->clock_in) >>
- extract32(s->clk->regs[s->reg], s->offset, s->len);
-}
-
-static NPCM7xxClockPLL find_pll_by_reg(enum NPCM7xxCLKRegisters reg)
-{
- switch (reg) {
- case NPCM7XX_CLK_PLLCON0:
- return NPCM7XX_CLOCK_PLL0;
- case NPCM7XX_CLK_PLLCON1:
- return NPCM7XX_CLOCK_PLL1;
- case NPCM7XX_CLK_PLLCON2:
- return NPCM7XX_CLOCK_PLL2;
- case NPCM7XX_CLK_PLLCONG:
- return NPCM7XX_CLOCK_PLLG;
- default:
- g_assert_not_reached();
- }
-}
-
-static void npcm7xx_clk_update_all_plls(NPCM7xxCLKState *clk)
-{
- int i;
-
- for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) {
- npcm7xx_clk_update_pll(&clk->plls[i]);
- }
-}
-
-static void npcm7xx_clk_update_all_sels(NPCM7xxCLKState *clk)
-{
- int i;
-
- for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) {
- npcm7xx_clk_update_sel(&clk->sels[i]);
- }
-}
-
-static void npcm7xx_clk_update_all_dividers(NPCM7xxCLKState *clk)
-{
- int i;
-
- for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) {
- npcm7xx_clk_update_divider(&clk->dividers[i]);
- }
-}
-
-static void npcm7xx_clk_update_all_clocks(NPCM7xxCLKState *clk)
-{
- clock_update_hz(clk->clkref, NPCM7XX_CLOCK_REF_HZ);
- npcm7xx_clk_update_all_plls(clk);
- npcm7xx_clk_update_all_sels(clk);
- npcm7xx_clk_update_all_dividers(clk);
-}
-
-/* Types of clock sources. */
-typedef enum ClockSrcType {
- CLKSRC_REF,
- CLKSRC_PLL,
- CLKSRC_SEL,
- CLKSRC_DIV,
-} ClockSrcType;
-
-typedef struct PLLInitInfo {
- const char *name;
- ClockSrcType src_type;
- int src_index;
- int reg;
- const char *public_name;
-} PLLInitInfo;
-
-typedef struct SELInitInfo {
- const char *name;
- uint8_t input_size;
- ClockSrcType src_type[NPCM7XX_CLK_SEL_MAX_INPUT];
- int src_index[NPCM7XX_CLK_SEL_MAX_INPUT];
- int offset;
- int len;
- const char *public_name;
-} SELInitInfo;
-
-typedef struct DividerInitInfo {
- const char *name;
- ClockSrcType src_type;
- int src_index;
- uint32_t (*divide)(NPCM7xxClockDividerState *s);
- int reg; /* not used when type == CONSTANT */
- int offset; /* not used when type == CONSTANT */
- int len; /* not used when type == CONSTANT */
- int divisor; /* used only when type == CONSTANT */
- const char *public_name;
-} DividerInitInfo;
-
-static const PLLInitInfo pll_init_info_list[] = {
- [NPCM7XX_CLOCK_PLL0] = {
- .name = "pll0",
- .src_type = CLKSRC_REF,
- .reg = NPCM7XX_CLK_PLLCON0,
- },
- [NPCM7XX_CLOCK_PLL1] = {
- .name = "pll1",
- .src_type = CLKSRC_REF,
- .reg = NPCM7XX_CLK_PLLCON1,
- },
- [NPCM7XX_CLOCK_PLL2] = {
- .name = "pll2",
- .src_type = CLKSRC_REF,
- .reg = NPCM7XX_CLK_PLLCON2,
- },
- [NPCM7XX_CLOCK_PLLG] = {
- .name = "pllg",
- .src_type = CLKSRC_REF,
- .reg = NPCM7XX_CLK_PLLCONG,
- },
-};
-
-static const SELInitInfo sel_init_info_list[] = {
- [NPCM7XX_CLOCK_PIXCKSEL] = {
- .name = "pixcksel",
- .input_size = 2,
- .src_type = {CLKSRC_PLL, CLKSRC_REF},
- .src_index = {NPCM7XX_CLOCK_PLLG, 0},
- .offset = 5,
- .len = 1,
- .public_name = "pixel-clock",
- },
- [NPCM7XX_CLOCK_MCCKSEL] = {
- .name = "mccksel",
- .input_size = 4,
- .src_type = {CLKSRC_DIV, CLKSRC_REF, CLKSRC_REF,
- /*MCBPCK, shouldn't be used in normal operation*/
- CLKSRC_REF},
- .src_index = {NPCM7XX_CLOCK_PLL1D2, 0, 0, 0},
- .offset = 12,
- .len = 2,
- .public_name = "mc-phy-clock",
- },
- [NPCM7XX_CLOCK_CPUCKSEL] = {
- .name = "cpucksel",
- .input_size = 4,
- .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF,
- /*SYSBPCK, shouldn't be used in normal operation*/
- CLKSRC_REF},
- .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, 0},
- .offset = 0,
- .len = 2,
- .public_name = "system-clock",
- },
- [NPCM7XX_CLOCK_CLKOUTSEL] = {
- .name = "clkoutsel",
- .input_size = 5,
- .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF,
- CLKSRC_PLL, CLKSRC_DIV},
- .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0,
- NPCM7XX_CLOCK_PLLG, NPCM7XX_CLOCK_PLL2D2},
- .offset = 18,
- .len = 3,
- .public_name = "tock",
- },
- [NPCM7XX_CLOCK_UARTCKSEL] = {
- .name = "uartcksel",
- .input_size = 4,
- .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV},
- .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0,
- NPCM7XX_CLOCK_PLL2D2},
- .offset = 8,
- .len = 2,
- },
- [NPCM7XX_CLOCK_TIMCKSEL] = {
- .name = "timcksel",
- .input_size = 4,
- .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV},
- .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0,
- NPCM7XX_CLOCK_PLL2D2},
- .offset = 14,
- .len = 2,
- },
- [NPCM7XX_CLOCK_SDCKSEL] = {
- .name = "sdcksel",
- .input_size = 4,
- .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV},
- .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0,
- NPCM7XX_CLOCK_PLL2D2},
- .offset = 6,
- .len = 2,
- },
- [NPCM7XX_CLOCK_GFXMSEL] = {
- .name = "gfxmksel",
- .input_size = 2,
- .src_type = {CLKSRC_REF, CLKSRC_PLL},
- .src_index = {0, NPCM7XX_CLOCK_PLL2},
- .offset = 21,
- .len = 1,
- },
- [NPCM7XX_CLOCK_SUCKSEL] = {
- .name = "sucksel",
- .input_size = 4,
- .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV},
- .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0,
- NPCM7XX_CLOCK_PLL2D2},
- .offset = 10,
- .len = 2,
- },
-};
-
-static const DividerInitInfo divider_init_info_list[] = {
- [NPCM7XX_CLOCK_PLL1D2] = {
- .name = "pll1d2",
- .src_type = CLKSRC_PLL,
- .src_index = NPCM7XX_CLOCK_PLL1,
- .divide = divide_by_constant,
- .divisor = 2,
- },
- [NPCM7XX_CLOCK_PLL2D2] = {
- .name = "pll2d2",
- .src_type = CLKSRC_PLL,
- .src_index = NPCM7XX_CLOCK_PLL2,
- .divide = divide_by_constant,
- .divisor = 2,
- },
- [NPCM7XX_CLOCK_MC_DIVIDER] = {
- .name = "mc-divider",
- .src_type = CLKSRC_SEL,
- .src_index = NPCM7XX_CLOCK_MCCKSEL,
- .divide = divide_by_constant,
- .divisor = 2,
- .public_name = "mc-clock"
- },
- [NPCM7XX_CLOCK_AXI_DIVIDER] = {
- .name = "axi-divider",
- .src_type = CLKSRC_SEL,
- .src_index = NPCM7XX_CLOCK_CPUCKSEL,
- .divide = shift_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV1,
- .offset = 0,
- .len = 1,
- .public_name = "clk2"
- },
- [NPCM7XX_CLOCK_AHB_DIVIDER] = {
- .name = "ahb-divider",
- .src_type = CLKSRC_DIV,
- .src_index = NPCM7XX_CLOCK_AXI_DIVIDER,
- .divide = divide_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV1,
- .offset = 26,
- .len = 2,
- .public_name = "clk4"
- },
- [NPCM7XX_CLOCK_AHB3_DIVIDER] = {
- .name = "ahb3-divider",
- .src_type = CLKSRC_DIV,
- .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
- .divide = divide_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV1,
- .offset = 6,
- .len = 5,
- .public_name = "ahb3-spi3-clock"
- },
- [NPCM7XX_CLOCK_SPI0_DIVIDER] = {
- .name = "spi0-divider",
- .src_type = CLKSRC_DIV,
- .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
- .divide = divide_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV3,
- .offset = 6,
- .len = 5,
- .public_name = "spi0-clock",
- },
- [NPCM7XX_CLOCK_SPIX_DIVIDER] = {
- .name = "spix-divider",
- .src_type = CLKSRC_DIV,
- .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
- .divide = divide_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV3,
- .offset = 1,
- .len = 5,
- .public_name = "spix-clock",
- },
- [NPCM7XX_CLOCK_APB1_DIVIDER] = {
- .name = "apb1-divider",
- .src_type = CLKSRC_DIV,
- .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
- .divide = shift_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV2,
- .offset = 24,
- .len = 2,
- .public_name = "apb1-clock",
- },
- [NPCM7XX_CLOCK_APB2_DIVIDER] = {
- .name = "apb2-divider",
- .src_type = CLKSRC_DIV,
- .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
- .divide = shift_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV2,
- .offset = 26,
- .len = 2,
- .public_name = "apb2-clock",
- },
- [NPCM7XX_CLOCK_APB3_DIVIDER] = {
- .name = "apb3-divider",
- .src_type = CLKSRC_DIV,
- .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
- .divide = shift_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV2,
- .offset = 28,
- .len = 2,
- .public_name = "apb3-clock",
- },
- [NPCM7XX_CLOCK_APB4_DIVIDER] = {
- .name = "apb4-divider",
- .src_type = CLKSRC_DIV,
- .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
- .divide = shift_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV2,
- .offset = 30,
- .len = 2,
- .public_name = "apb4-clock",
- },
- [NPCM7XX_CLOCK_APB5_DIVIDER] = {
- .name = "apb5-divider",
- .src_type = CLKSRC_DIV,
- .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
- .divide = shift_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV2,
- .offset = 22,
- .len = 2,
- .public_name = "apb5-clock",
- },
- [NPCM7XX_CLOCK_CLKOUT_DIVIDER] = {
- .name = "clkout-divider",
- .src_type = CLKSRC_SEL,
- .src_index = NPCM7XX_CLOCK_CLKOUTSEL,
- .divide = divide_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV2,
- .offset = 16,
- .len = 5,
- .public_name = "clkout",
- },
- [NPCM7XX_CLOCK_UART_DIVIDER] = {
- .name = "uart-divider",
- .src_type = CLKSRC_SEL,
- .src_index = NPCM7XX_CLOCK_UARTCKSEL,
- .divide = divide_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV1,
- .offset = 16,
- .len = 5,
- .public_name = "uart-clock",
- },
- [NPCM7XX_CLOCK_TIMER_DIVIDER] = {
- .name = "timer-divider",
- .src_type = CLKSRC_SEL,
- .src_index = NPCM7XX_CLOCK_TIMCKSEL,
- .divide = divide_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV1,
- .offset = 21,
- .len = 5,
- .public_name = "timer-clock",
- },
- [NPCM7XX_CLOCK_ADC_DIVIDER] = {
- .name = "adc-divider",
- .src_type = CLKSRC_DIV,
- .src_index = NPCM7XX_CLOCK_TIMER_DIVIDER,
- .divide = shift_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV1,
- .offset = 28,
- .len = 3,
- .public_name = "adc-clock",
- },
- [NPCM7XX_CLOCK_MMC_DIVIDER] = {
- .name = "mmc-divider",
- .src_type = CLKSRC_SEL,
- .src_index = NPCM7XX_CLOCK_SDCKSEL,
- .divide = divide_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV1,
- .offset = 11,
- .len = 5,
- .public_name = "mmc-clock",
- },
- [NPCM7XX_CLOCK_SDHC_DIVIDER] = {
- .name = "sdhc-divider",
- .src_type = CLKSRC_SEL,
- .src_index = NPCM7XX_CLOCK_SDCKSEL,
- .divide = divide_by_reg_divisor_times_2,
- .reg = NPCM7XX_CLK_CLKDIV2,
- .offset = 0,
- .len = 4,
- .public_name = "sdhc-clock",
- },
- [NPCM7XX_CLOCK_GFXM_DIVIDER] = {
- .name = "gfxm-divider",
- .src_type = CLKSRC_SEL,
- .src_index = NPCM7XX_CLOCK_GFXMSEL,
- .divide = divide_by_constant,
- .divisor = 3,
- .public_name = "gfxm-clock",
- },
- [NPCM7XX_CLOCK_UTMI_DIVIDER] = {
- .name = "utmi-divider",
- .src_type = CLKSRC_SEL,
- .src_index = NPCM7XX_CLOCK_SUCKSEL,
- .divide = divide_by_reg_divisor,
- .reg = NPCM7XX_CLK_CLKDIV2,
- .offset = 8,
- .len = 5,
- .public_name = "utmi-clock",
- },
-};
-
-static void npcm7xx_clk_update_pll_cb(void *opaque, ClockEvent event)
-{
- npcm7xx_clk_update_pll(opaque);
-}
-
-static void npcm7xx_clk_pll_init(Object *obj)
-{
- NPCM7xxClockPLLState *pll = NPCM7XX_CLOCK_PLL(obj);
-
- pll->clock_in = qdev_init_clock_in(DEVICE(pll), "clock-in",
- npcm7xx_clk_update_pll_cb, pll,
- ClockUpdate);
- pll->clock_out = qdev_init_clock_out(DEVICE(pll), "clock-out");
-}
-
-static void npcm7xx_clk_update_sel_cb(void *opaque, ClockEvent event)
-{
- npcm7xx_clk_update_sel(opaque);
-}
-
-static void npcm7xx_clk_sel_init(Object *obj)
-{
- int i;
- NPCM7xxClockSELState *sel = NPCM7XX_CLOCK_SEL(obj);
-
- for (i = 0; i < NPCM7XX_CLK_SEL_MAX_INPUT; ++i) {
- g_autofree char *s = g_strdup_printf("clock-in[%d]", i);
- sel->clock_in[i] = qdev_init_clock_in(DEVICE(sel), s,
- npcm7xx_clk_update_sel_cb, sel, ClockUpdate);
- }
- sel->clock_out = qdev_init_clock_out(DEVICE(sel), "clock-out");
-}
-
-static void npcm7xx_clk_update_divider_cb(void *opaque, ClockEvent event)
-{
- npcm7xx_clk_update_divider(opaque);
-}
-
-static void npcm7xx_clk_divider_init(Object *obj)
-{
- NPCM7xxClockDividerState *div = NPCM7XX_CLOCK_DIVIDER(obj);
-
- div->clock_in = qdev_init_clock_in(DEVICE(div), "clock-in",
- npcm7xx_clk_update_divider_cb,
- div, ClockUpdate);
- div->clock_out = qdev_init_clock_out(DEVICE(div), "clock-out");
-}
-
-static void npcm7xx_init_clock_pll(NPCM7xxClockPLLState *pll,
- NPCM7xxCLKState *clk, const PLLInitInfo *init_info)
-{
- pll->name = init_info->name;
- pll->clk = clk;
- pll->reg = init_info->reg;
- if (init_info->public_name != NULL) {
- qdev_alias_clock(DEVICE(pll), "clock-out", DEVICE(clk),
- init_info->public_name);
- }
-}
-
-static void npcm7xx_init_clock_sel(NPCM7xxClockSELState *sel,
- NPCM7xxCLKState *clk, const SELInitInfo *init_info)
-{
- int input_size = init_info->input_size;
-
- sel->name = init_info->name;
- sel->clk = clk;
- sel->input_size = init_info->input_size;
- g_assert(input_size <= NPCM7XX_CLK_SEL_MAX_INPUT);
- sel->offset = init_info->offset;
- sel->len = init_info->len;
- if (init_info->public_name != NULL) {
- qdev_alias_clock(DEVICE(sel), "clock-out", DEVICE(clk),
- init_info->public_name);
- }
-}
-
-static void npcm7xx_init_clock_divider(NPCM7xxClockDividerState *div,
- NPCM7xxCLKState *clk, const DividerInitInfo *init_info)
-{
- div->name = init_info->name;
- div->clk = clk;
-
- div->divide = init_info->divide;
- if (div->divide == divide_by_constant) {
- div->divisor = init_info->divisor;
- } else {
- div->reg = init_info->reg;
- div->offset = init_info->offset;
- div->len = init_info->len;
- }
- if (init_info->public_name != NULL) {
- qdev_alias_clock(DEVICE(div), "clock-out", DEVICE(clk),
- init_info->public_name);
- }
-}
-
-static Clock *npcm7xx_get_clock(NPCM7xxCLKState *clk, ClockSrcType type,
- int index)
-{
- switch (type) {
- case CLKSRC_REF:
- return clk->clkref;
- case CLKSRC_PLL:
- return clk->plls[index].clock_out;
- case CLKSRC_SEL:
- return clk->sels[index].clock_out;
- case CLKSRC_DIV:
- return clk->dividers[index].clock_out;
- default:
- g_assert_not_reached();
- }
-}
-
-static void npcm7xx_connect_clocks(NPCM7xxCLKState *clk)
-{
- int i, j;
- Clock *src;
-
- for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) {
- src = npcm7xx_get_clock(clk, pll_init_info_list[i].src_type,
- pll_init_info_list[i].src_index);
- clock_set_source(clk->plls[i].clock_in, src);
- }
- for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) {
- for (j = 0; j < sel_init_info_list[i].input_size; ++j) {
- src = npcm7xx_get_clock(clk, sel_init_info_list[i].src_type[j],
- sel_init_info_list[i].src_index[j]);
- clock_set_source(clk->sels[i].clock_in[j], src);
- }
- }
- for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) {
- src = npcm7xx_get_clock(clk, divider_init_info_list[i].src_type,
- divider_init_info_list[i].src_index);
- clock_set_source(clk->dividers[i].clock_in, src);
- }
-}
-
-static uint64_t npcm7xx_clk_read(void *opaque, hwaddr offset, unsigned size)
-{
- uint32_t reg = offset / sizeof(uint32_t);
- NPCM7xxCLKState *s = opaque;
- int64_t now_ns;
- uint32_t value = 0;
-
- if (reg >= NPCM7XX_CLK_NR_REGS) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: offset 0x%04" HWADDR_PRIx " out of range\n",
- __func__, offset);
- return 0;
- }
-
- switch (reg) {
- case NPCM7XX_CLK_SWRSTR:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: register @ 0x%04" HWADDR_PRIx " is write-only\n",
- __func__, offset);
- break;
-
- case NPCM7XX_CLK_SECCNT:
- now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- value = (now_ns - s->ref_ns) / NANOSECONDS_PER_SECOND;
- break;
-
- case NPCM7XX_CLK_CNTR25M:
- now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- /*
- * This register counts 25 MHz cycles, updating every 640 ns. It rolls
- * over to zero every second.
- *
- * The 4 LSBs are always zero: (1e9 / 640) << 4 = 25000000.
- */
- value = (((now_ns - s->ref_ns) / 640) << 4) % NPCM7XX_CLOCK_REF_HZ;
- break;
-
- default:
- value = s->regs[reg];
- break;
- };
-
- trace_npcm7xx_clk_read(offset, value);
-
- return value;
-}
-
-static void npcm7xx_clk_write(void *opaque, hwaddr offset,
- uint64_t v, unsigned size)
-{
- uint32_t reg = offset / sizeof(uint32_t);
- NPCM7xxCLKState *s = opaque;
- uint32_t value = v;
-
- trace_npcm7xx_clk_write(offset, value);
-
- if (reg >= NPCM7XX_CLK_NR_REGS) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: offset 0x%04" HWADDR_PRIx " out of range\n",
- __func__, offset);
- return;
- }
-
- switch (reg) {
- case NPCM7XX_CLK_SWRSTR:
- qemu_log_mask(LOG_UNIMP, "%s: SW reset not implemented: 0x%02x\n",
- __func__, value);
- value = 0;
- break;
-
- case NPCM7XX_CLK_PLLCON0:
- case NPCM7XX_CLK_PLLCON1:
- case NPCM7XX_CLK_PLLCON2:
- case NPCM7XX_CLK_PLLCONG:
- if (value & PLLCON_PWDEN) {
- /* Power down -- clear lock and indicate loss of lock */
- value &= ~PLLCON_LOKI;
- value |= PLLCON_LOKS;
- } else {
- /* Normal mode -- assume always locked */
- value |= PLLCON_LOKI;
- /* Keep LOKS unchanged unless cleared by writing 1 */
- if (value & PLLCON_LOKS) {
- value &= ~PLLCON_LOKS;
- } else {
- value |= (value & PLLCON_LOKS);
- }
- }
- /* Only update PLL when it is locked. */
- if (value & PLLCON_LOKI) {
- npcm7xx_clk_update_pll(&s->plls[find_pll_by_reg(reg)]);
- }
- break;
-
- case NPCM7XX_CLK_CLKSEL:
- npcm7xx_clk_update_all_sels(s);
- break;
-
- case NPCM7XX_CLK_CLKDIV1:
- case NPCM7XX_CLK_CLKDIV2:
- case NPCM7XX_CLK_CLKDIV3:
- npcm7xx_clk_update_all_dividers(s);
- break;
-
- case NPCM7XX_CLK_CNTR25M:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: register @ 0x%04" HWADDR_PRIx " is read-only\n",
- __func__, offset);
- return;
- }
-
- s->regs[reg] = value;
-}
-
-/* Perform reset action triggered by a watchdog */
-static void npcm7xx_clk_perform_watchdog_reset(void *opaque, int n,
- int level)
-{
- NPCM7xxCLKState *clk = NPCM7XX_CLK(opaque);
- uint32_t rcr;
-
- g_assert(n >= 0 && n <= NPCM7XX_NR_WATCHDOGS);
- rcr = clk->regs[NPCM7XX_CLK_WD0RCR + n];
- if (rcr & NPCM7XX_CLK_WDRCR_CA9C) {
- watchdog_perform_action();
- } else {
- qemu_log_mask(LOG_UNIMP,
- "%s: only CPU reset is implemented. (requested 0x%" PRIx32")\n",
- __func__, rcr);
- }
-}
-
-static const struct MemoryRegionOps npcm7xx_clk_ops = {
- .read = npcm7xx_clk_read,
- .write = npcm7xx_clk_write,
- .endianness = DEVICE_LITTLE_ENDIAN,
- .valid = {
- .min_access_size = 4,
- .max_access_size = 4,
- .unaligned = false,
- },
-};
-
-static void npcm7xx_clk_enter_reset(Object *obj, ResetType type)
-{
- NPCM7xxCLKState *s = NPCM7XX_CLK(obj);
-
- QEMU_BUILD_BUG_ON(sizeof(s->regs) != sizeof(cold_reset_values));
-
- memcpy(s->regs, cold_reset_values, sizeof(cold_reset_values));
- s->ref_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- npcm7xx_clk_update_all_clocks(s);
- /*
- * A small number of registers need to be reset on a core domain reset,
- * but no such reset type exists yet.
- */
-}
-
-static void npcm7xx_clk_init_clock_hierarchy(NPCM7xxCLKState *s)
-{
- int i;
-
- s->clkref = qdev_init_clock_in(DEVICE(s), "clkref", NULL, NULL, 0);
-
- /* First pass: init all converter modules */
- QEMU_BUILD_BUG_ON(ARRAY_SIZE(pll_init_info_list) != NPCM7XX_CLOCK_NR_PLLS);
- QEMU_BUILD_BUG_ON(ARRAY_SIZE(sel_init_info_list) != NPCM7XX_CLOCK_NR_SELS);
- QEMU_BUILD_BUG_ON(ARRAY_SIZE(divider_init_info_list)
- != NPCM7XX_CLOCK_NR_DIVIDERS);
- for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) {
- object_initialize_child(OBJECT(s), pll_init_info_list[i].name,
- &s->plls[i], TYPE_NPCM7XX_CLOCK_PLL);
- npcm7xx_init_clock_pll(&s->plls[i], s,
- &pll_init_info_list[i]);
- }
- for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) {
- object_initialize_child(OBJECT(s), sel_init_info_list[i].name,
- &s->sels[i], TYPE_NPCM7XX_CLOCK_SEL);
- npcm7xx_init_clock_sel(&s->sels[i], s,
- &sel_init_info_list[i]);
- }
- for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) {
- object_initialize_child(OBJECT(s), divider_init_info_list[i].name,
- &s->dividers[i], TYPE_NPCM7XX_CLOCK_DIVIDER);
- npcm7xx_init_clock_divider(&s->dividers[i], s,
- &divider_init_info_list[i]);
- }
-
- /* Second pass: connect converter modules */
- npcm7xx_connect_clocks(s);
-
- clock_update_hz(s->clkref, NPCM7XX_CLOCK_REF_HZ);
-}
-
-static void npcm7xx_clk_init(Object *obj)
-{
- NPCM7xxCLKState *s = NPCM7XX_CLK(obj);
-
- memory_region_init_io(&s->iomem, obj, &npcm7xx_clk_ops, s,
- TYPE_NPCM7XX_CLK, 4 * KiB);
- sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
-}
-
-static int npcm7xx_clk_post_load(void *opaque, int version_id)
-{
- if (version_id >= 1) {
- NPCM7xxCLKState *clk = opaque;
-
- npcm7xx_clk_update_all_clocks(clk);
- }
-
- return 0;
-}
-
-static void npcm7xx_clk_realize(DeviceState *dev, Error **errp)
-{
- int i;
- NPCM7xxCLKState *s = NPCM7XX_CLK(dev);
-
- qdev_init_gpio_in_named(DEVICE(s), npcm7xx_clk_perform_watchdog_reset,
- NPCM7XX_WATCHDOG_RESET_GPIO_IN, NPCM7XX_NR_WATCHDOGS);
- npcm7xx_clk_init_clock_hierarchy(s);
-
- /* Realize child devices */
- for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) {
- if (!qdev_realize(DEVICE(&s->plls[i]), NULL, errp)) {
- return;
- }
- }
- for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) {
- if (!qdev_realize(DEVICE(&s->sels[i]), NULL, errp)) {
- return;
- }
- }
- for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) {
- if (!qdev_realize(DEVICE(&s->dividers[i]), NULL, errp)) {
- return;
- }
- }
-}
-
-static const VMStateDescription vmstate_npcm7xx_clk_pll = {
- .name = "npcm7xx-clock-pll",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_CLOCK(clock_in, NPCM7xxClockPLLState),
- VMSTATE_END_OF_LIST(),
- },
-};
-
-static const VMStateDescription vmstate_npcm7xx_clk_sel = {
- .name = "npcm7xx-clock-sel",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(clock_in, NPCM7xxClockSELState,
- NPCM7XX_CLK_SEL_MAX_INPUT, 0, vmstate_clock, Clock),
- VMSTATE_END_OF_LIST(),
- },
-};
-
-static const VMStateDescription vmstate_npcm7xx_clk_divider = {
- .name = "npcm7xx-clock-divider",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_CLOCK(clock_in, NPCM7xxClockDividerState),
- VMSTATE_END_OF_LIST(),
- },
-};
-
-static const VMStateDescription vmstate_npcm7xx_clk = {
- .name = "npcm7xx-clk",
- .version_id = 1,
- .minimum_version_id = 1,
- .post_load = npcm7xx_clk_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32_ARRAY(regs, NPCM7xxCLKState, NPCM7XX_CLK_NR_REGS),
- VMSTATE_INT64(ref_ns, NPCM7xxCLKState),
- VMSTATE_CLOCK(clkref, NPCM7xxCLKState),
- VMSTATE_END_OF_LIST(),
- },
-};
-
-static void npcm7xx_clk_pll_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->desc = "NPCM7xx Clock PLL Module";
- dc->vmsd = &vmstate_npcm7xx_clk_pll;
-}
-
-static void npcm7xx_clk_sel_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->desc = "NPCM7xx Clock SEL Module";
- dc->vmsd = &vmstate_npcm7xx_clk_sel;
-}
-
-static void npcm7xx_clk_divider_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->desc = "NPCM7xx Clock Divider Module";
- dc->vmsd = &vmstate_npcm7xx_clk_divider;
-}
-
-static void npcm7xx_clk_class_init(ObjectClass *klass, void *data)
-{
- ResettableClass *rc = RESETTABLE_CLASS(klass);
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- QEMU_BUILD_BUG_ON(NPCM7XX_CLK_REGS_END > NPCM7XX_CLK_NR_REGS);
-
- dc->desc = "NPCM7xx Clock Control Registers";
- dc->vmsd = &vmstate_npcm7xx_clk;
- dc->realize = npcm7xx_clk_realize;
- rc->phases.enter = npcm7xx_clk_enter_reset;
-}
-
-static const TypeInfo npcm7xx_clk_pll_info = {
- .name = TYPE_NPCM7XX_CLOCK_PLL,
- .parent = TYPE_DEVICE,
- .instance_size = sizeof(NPCM7xxClockPLLState),
- .instance_init = npcm7xx_clk_pll_init,
- .class_init = npcm7xx_clk_pll_class_init,
-};
-
-static const TypeInfo npcm7xx_clk_sel_info = {
- .name = TYPE_NPCM7XX_CLOCK_SEL,
- .parent = TYPE_DEVICE,
- .instance_size = sizeof(NPCM7xxClockSELState),
- .instance_init = npcm7xx_clk_sel_init,
- .class_init = npcm7xx_clk_sel_class_init,
-};
-
-static const TypeInfo npcm7xx_clk_divider_info = {
- .name = TYPE_NPCM7XX_CLOCK_DIVIDER,
- .parent = TYPE_DEVICE,
- .instance_size = sizeof(NPCM7xxClockDividerState),
- .instance_init = npcm7xx_clk_divider_init,
- .class_init = npcm7xx_clk_divider_class_init,
-};
-
-static const TypeInfo npcm7xx_clk_info = {
- .name = TYPE_NPCM7XX_CLK,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(NPCM7xxCLKState),
- .instance_init = npcm7xx_clk_init,
- .class_init = npcm7xx_clk_class_init,
-};
-
-static void npcm7xx_clk_register_type(void)
-{
- type_register_static(&npcm7xx_clk_pll_info);
- type_register_static(&npcm7xx_clk_sel_info);
- type_register_static(&npcm7xx_clk_divider_info);
- type_register_static(&npcm7xx_clk_info);
-}
-type_init(npcm7xx_clk_register_type);
diff --git a/hw/misc/npcm7xx_gcr.c b/hw/misc/npcm7xx_gcr.c
deleted file mode 100644
index c4c4e24..0000000
--- a/hw/misc/npcm7xx_gcr.c
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * Nuvoton NPCM7xx System Global Control Registers.
- *
- * Copyright 2020 Google LLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- */
-
-#include "qemu/osdep.h"
-
-#include "hw/misc/npcm7xx_gcr.h"
-#include "hw/qdev-properties.h"
-#include "migration/vmstate.h"
-#include "qapi/error.h"
-#include "qemu/cutils.h"
-#include "qemu/log.h"
-#include "qemu/module.h"
-#include "qemu/units.h"
-
-#include "trace.h"
-
-#define NPCM7XX_GCR_MIN_DRAM_SIZE (128 * MiB)
-#define NPCM7XX_GCR_MAX_DRAM_SIZE (2 * GiB)
-
-enum NPCM7xxGCRRegisters {
- NPCM7XX_GCR_PDID,
- NPCM7XX_GCR_PWRON,
- NPCM7XX_GCR_MFSEL1 = 0x0c / sizeof(uint32_t),
- NPCM7XX_GCR_MFSEL2,
- NPCM7XX_GCR_MISCPE,
- NPCM7XX_GCR_SPSWC = 0x038 / sizeof(uint32_t),
- NPCM7XX_GCR_INTCR,
- NPCM7XX_GCR_INTSR,
- NPCM7XX_GCR_HIFCR = 0x050 / sizeof(uint32_t),
- NPCM7XX_GCR_INTCR2 = 0x060 / sizeof(uint32_t),
- NPCM7XX_GCR_MFSEL3,
- NPCM7XX_GCR_SRCNT,
- NPCM7XX_GCR_RESSR,
- NPCM7XX_GCR_RLOCKR1,
- NPCM7XX_GCR_FLOCKR1,
- NPCM7XX_GCR_DSCNT,
- NPCM7XX_GCR_MDLR,
- NPCM7XX_GCR_SCRPAD3,
- NPCM7XX_GCR_SCRPAD2,
- NPCM7XX_GCR_DAVCLVLR = 0x098 / sizeof(uint32_t),
- NPCM7XX_GCR_INTCR3,
- NPCM7XX_GCR_VSINTR = 0x0ac / sizeof(uint32_t),
- NPCM7XX_GCR_MFSEL4,
- NPCM7XX_GCR_CPBPNTR = 0x0c4 / sizeof(uint32_t),
- NPCM7XX_GCR_CPCTL = 0x0d0 / sizeof(uint32_t),
- NPCM7XX_GCR_CP2BST,
- NPCM7XX_GCR_B2CPNT,
- NPCM7XX_GCR_CPPCTL,
- NPCM7XX_GCR_I2CSEGSEL,
- NPCM7XX_GCR_I2CSEGCTL,
- NPCM7XX_GCR_VSRCR,
- NPCM7XX_GCR_MLOCKR,
- NPCM7XX_GCR_SCRPAD = 0x013c / sizeof(uint32_t),
- NPCM7XX_GCR_USB1PHYCTL,
- NPCM7XX_GCR_USB2PHYCTL,
- NPCM7XX_GCR_REGS_END,
-};
-
-static const uint32_t cold_reset_values[NPCM7XX_GCR_NR_REGS] = {
- [NPCM7XX_GCR_PDID] = 0x04a92750, /* Poleg A1 */
- [NPCM7XX_GCR_MISCPE] = 0x0000ffff,
- [NPCM7XX_GCR_SPSWC] = 0x00000003,
- [NPCM7XX_GCR_INTCR] = 0x0000035e,
- [NPCM7XX_GCR_HIFCR] = 0x0000004e,
- [NPCM7XX_GCR_INTCR2] = (1U << 19), /* DDR initialized */
- [NPCM7XX_GCR_RESSR] = 0x80000000,
- [NPCM7XX_GCR_DSCNT] = 0x000000c0,
- [NPCM7XX_GCR_DAVCLVLR] = 0x5a00f3cf,
- [NPCM7XX_GCR_SCRPAD] = 0x00000008,
- [NPCM7XX_GCR_USB1PHYCTL] = 0x034730e4,
- [NPCM7XX_GCR_USB2PHYCTL] = 0x034730e4,
-};
-
-static uint64_t npcm7xx_gcr_read(void *opaque, hwaddr offset, unsigned size)
-{
- uint32_t reg = offset / sizeof(uint32_t);
- NPCM7xxGCRState *s = opaque;
-
- if (reg >= NPCM7XX_GCR_NR_REGS) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: offset 0x%04" HWADDR_PRIx " out of range\n",
- __func__, offset);
- return 0;
- }
-
- trace_npcm7xx_gcr_read(offset, s->regs[reg]);
-
- return s->regs[reg];
-}
-
-static void npcm7xx_gcr_write(void *opaque, hwaddr offset,
- uint64_t v, unsigned size)
-{
- uint32_t reg = offset / sizeof(uint32_t);
- NPCM7xxGCRState *s = opaque;
- uint32_t value = v;
-
- trace_npcm7xx_gcr_write(offset, value);
-
- if (reg >= NPCM7XX_GCR_NR_REGS) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: offset 0x%04" HWADDR_PRIx " out of range\n",
- __func__, offset);
- return;
- }
-
- switch (reg) {
- case NPCM7XX_GCR_PDID:
- case NPCM7XX_GCR_PWRON:
- case NPCM7XX_GCR_INTSR:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: register @ 0x%04" HWADDR_PRIx " is read-only\n",
- __func__, offset);
- return;
-
- case NPCM7XX_GCR_RESSR:
- case NPCM7XX_GCR_CP2BST:
- /* Write 1 to clear */
- value = s->regs[reg] & ~value;
- break;
-
- case NPCM7XX_GCR_RLOCKR1:
- case NPCM7XX_GCR_MDLR:
- /* Write 1 to set */
- value |= s->regs[reg];
- break;
- };
-
- s->regs[reg] = value;
-}
-
-static const struct MemoryRegionOps npcm7xx_gcr_ops = {
- .read = npcm7xx_gcr_read,
- .write = npcm7xx_gcr_write,
- .endianness = DEVICE_LITTLE_ENDIAN,
- .valid = {
- .min_access_size = 4,
- .max_access_size = 4,
- .unaligned = false,
- },
-};
-
-static void npcm7xx_gcr_enter_reset(Object *obj, ResetType type)
-{
- NPCM7xxGCRState *s = NPCM7XX_GCR(obj);
-
- QEMU_BUILD_BUG_ON(sizeof(s->regs) != sizeof(cold_reset_values));
-
- memcpy(s->regs, cold_reset_values, sizeof(s->regs));
- s->regs[NPCM7XX_GCR_PWRON] = s->reset_pwron;
- s->regs[NPCM7XX_GCR_MDLR] = s->reset_mdlr;
- s->regs[NPCM7XX_GCR_INTCR3] = s->reset_intcr3;
-}
-
-static void npcm7xx_gcr_realize(DeviceState *dev, Error **errp)
-{
- ERRP_GUARD();
- NPCM7xxGCRState *s = NPCM7XX_GCR(dev);
- uint64_t dram_size;
- Object *obj;
-
- obj = object_property_get_link(OBJECT(dev), "dram-mr", errp);
- if (!obj) {
- error_prepend(errp, "%s: required dram-mr link not found: ", __func__);
- return;
- }
- dram_size = memory_region_size(MEMORY_REGION(obj));
- if (!is_power_of_2(dram_size) ||
- dram_size < NPCM7XX_GCR_MIN_DRAM_SIZE ||
- dram_size > NPCM7XX_GCR_MAX_DRAM_SIZE) {
- g_autofree char *sz = size_to_str(dram_size);
- g_autofree char *min_sz = size_to_str(NPCM7XX_GCR_MIN_DRAM_SIZE);
- g_autofree char *max_sz = size_to_str(NPCM7XX_GCR_MAX_DRAM_SIZE);
- error_setg(errp, "%s: unsupported DRAM size %s", __func__, sz);
- error_append_hint(errp,
- "DRAM size must be a power of two between %s and %s,"
- " inclusive.\n", min_sz, max_sz);
- return;
- }
-
- /* Power-on reset value */
- s->reset_intcr3 = 0x00001002;
-
- /*
- * The GMMAP (Graphics Memory Map) field is used by u-boot to detect the
- * DRAM size, and is normally initialized by the boot block as part of DRAM
- * training. However, since we don't have a complete emulation of the
- * memory controller and try to make it look like it has already been
- * initialized, the boot block will skip this initialization, and we need
- * to make sure this field is set correctly up front.
- *
- * WARNING: some versions of u-boot only looks at bits 8 and 9, so 2 GiB of
- * DRAM will be interpreted as 128 MiB.
- *
- * https://github.com/Nuvoton-Israel/u-boot/blob/2aef993bd2aafeb5408dbaad0f3ce099ee40c4aa/board/nuvoton/poleg/poleg.c#L244
- */
- s->reset_intcr3 |= ctz64(dram_size / NPCM7XX_GCR_MIN_DRAM_SIZE) << 8;
-}
-
-static void npcm7xx_gcr_init(Object *obj)
-{
- NPCM7xxGCRState *s = NPCM7XX_GCR(obj);
-
- memory_region_init_io(&s->iomem, obj, &npcm7xx_gcr_ops, s,
- TYPE_NPCM7XX_GCR, 4 * KiB);
- sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
-}
-
-static const VMStateDescription vmstate_npcm7xx_gcr = {
- .name = "npcm7xx-gcr",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32_ARRAY(regs, NPCM7xxGCRState, NPCM7XX_GCR_NR_REGS),
- VMSTATE_END_OF_LIST(),
- },
-};
-
-static Property npcm7xx_gcr_properties[] = {
- DEFINE_PROP_UINT32("disabled-modules", NPCM7xxGCRState, reset_mdlr, 0),
- DEFINE_PROP_UINT32("power-on-straps", NPCM7xxGCRState, reset_pwron, 0),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void npcm7xx_gcr_class_init(ObjectClass *klass, void *data)
-{
- ResettableClass *rc = RESETTABLE_CLASS(klass);
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- QEMU_BUILD_BUG_ON(NPCM7XX_GCR_REGS_END > NPCM7XX_GCR_NR_REGS);
-
- dc->desc = "NPCM7xx System Global Control Registers";
- dc->realize = npcm7xx_gcr_realize;
- dc->vmsd = &vmstate_npcm7xx_gcr;
- rc->phases.enter = npcm7xx_gcr_enter_reset;
-
- device_class_set_props(dc, npcm7xx_gcr_properties);
-}
-
-static const TypeInfo npcm7xx_gcr_info = {
- .name = TYPE_NPCM7XX_GCR,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(NPCM7xxGCRState),
- .instance_init = npcm7xx_gcr_init,
- .class_init = npcm7xx_gcr_class_init,
-};
-
-static void npcm7xx_gcr_register_type(void)
-{
- type_register_static(&npcm7xx_gcr_info);
-}
-type_init(npcm7xx_gcr_register_type);
diff --git a/hw/misc/npcm7xx_mft.c b/hw/misc/npcm7xx_mft.c
index 9fcc69f..b35e971 100644
--- a/hw/misc/npcm7xx_mft.c
+++ b/hw/misc/npcm7xx_mft.c
@@ -172,8 +172,9 @@ static NPCM7xxMFTCaptureState npcm7xx_mft_compute_cnt(
* RPM = revolution/min. The time for one revlution (in ns) is
* MINUTE_TO_NANOSECOND / RPM.
*/
- count = clock_ns_to_ticks(clock, (60 * NANOSECONDS_PER_SECOND) /
- (rpm * NPCM7XX_MFT_PULSE_PER_REVOLUTION));
+ count = clock_ns_to_ticks(clock,
+ (uint64_t)(60 * NANOSECONDS_PER_SECOND) /
+ ((uint64_t)rpm * NPCM7XX_MFT_PULSE_PER_REVOLUTION));
}
if (count > NPCM7XX_MFT_MAX_CNT) {
@@ -514,7 +515,7 @@ static const VMStateDescription vmstate_npcm7xx_mft = {
},
};
-static void npcm7xx_mft_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_mft_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/npcm7xx_pwm.c b/hw/misc/npcm7xx_pwm.c
index f7f77e3..2de18d0 100644
--- a/hw/misc/npcm7xx_pwm.c
+++ b/hw/misc/npcm7xx_pwm.c
@@ -543,7 +543,7 @@ static const VMStateDescription vmstate_npcm7xx_pwm_module = {
},
};
-static void npcm7xx_pwm_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_pwm_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/npcm7xx_rng.c b/hw/misc/npcm7xx_rng.c
index 7f7e5ec..7d47a1c 100644
--- a/hw/misc/npcm7xx_rng.c
+++ b/hw/misc/npcm7xx_rng.c
@@ -158,7 +158,7 @@ static const VMStateDescription vmstate_npcm7xx_rng = {
},
};
-static void npcm7xx_rng_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_rng_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/npcm_clk.c b/hw/misc/npcm_clk.c
new file mode 100644
index 0000000..c48d40b
--- /dev/null
+++ b/hw/misc/npcm_clk.c
@@ -0,0 +1,1220 @@
+/*
+ * Nuvoton NPCM7xx/8xx Clock Control Registers.
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "qemu/osdep.h"
+
+#include "hw/misc/npcm_clk.h"
+#include "hw/timer/npcm7xx_timer.h"
+#include "hw/qdev-clock.h"
+#include "migration/vmstate.h"
+#include "qemu/error-report.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qemu/timer.h"
+#include "qemu/units.h"
+#include "trace.h"
+#include "system/watchdog.h"
+
+/*
+ * The reference clock hz, and the SECCNT and CNTR25M registers in this module,
+ * is always 25 MHz.
+ */
+#define NPCM7XX_CLOCK_REF_HZ (25000000)
+
+/* Register Field Definitions */
+#define NPCM7XX_CLK_WDRCR_CA9C BIT(0) /* Cortex-A9 Cores */
+
+#define PLLCON_LOKI BIT(31)
+#define PLLCON_LOKS BIT(30)
+#define PLLCON_PWDEN BIT(12)
+#define PLLCON_FBDV(con) extract32((con), 16, 12)
+#define PLLCON_OTDV2(con) extract32((con), 13, 3)
+#define PLLCON_OTDV1(con) extract32((con), 8, 3)
+#define PLLCON_INDV(con) extract32((con), 0, 6)
+
+enum NPCM7xxCLKRegisters {
+ NPCM7XX_CLK_CLKEN1,
+ NPCM7XX_CLK_CLKSEL,
+ NPCM7XX_CLK_CLKDIV1,
+ NPCM7XX_CLK_PLLCON0,
+ NPCM7XX_CLK_PLLCON1,
+ NPCM7XX_CLK_SWRSTR,
+ NPCM7XX_CLK_IPSRST1 = 0x20 / sizeof(uint32_t),
+ NPCM7XX_CLK_IPSRST2,
+ NPCM7XX_CLK_CLKEN2,
+ NPCM7XX_CLK_CLKDIV2,
+ NPCM7XX_CLK_CLKEN3,
+ NPCM7XX_CLK_IPSRST3,
+ NPCM7XX_CLK_WD0RCR,
+ NPCM7XX_CLK_WD1RCR,
+ NPCM7XX_CLK_WD2RCR,
+ NPCM7XX_CLK_SWRSTC1,
+ NPCM7XX_CLK_SWRSTC2,
+ NPCM7XX_CLK_SWRSTC3,
+ NPCM7XX_CLK_SWRSTC4,
+ NPCM7XX_CLK_PLLCON2,
+ NPCM7XX_CLK_CLKDIV3,
+ NPCM7XX_CLK_CORSTC,
+ NPCM7XX_CLK_PLLCONG,
+ NPCM7XX_CLK_AHBCKFI,
+ NPCM7XX_CLK_SECCNT,
+ NPCM7XX_CLK_CNTR25M,
+};
+
+enum NPCM8xxCLKRegisters {
+ NPCM8XX_CLK_CLKEN1,
+ NPCM8XX_CLK_CLKSEL,
+ NPCM8XX_CLK_CLKDIV1,
+ NPCM8XX_CLK_PLLCON0,
+ NPCM8XX_CLK_PLLCON1,
+ NPCM8XX_CLK_SWRSTR,
+ NPCM8XX_CLK_IPSRST1 = 0x20 / sizeof(uint32_t),
+ NPCM8XX_CLK_IPSRST2,
+ NPCM8XX_CLK_CLKEN2,
+ NPCM8XX_CLK_CLKDIV2,
+ NPCM8XX_CLK_CLKEN3,
+ NPCM8XX_CLK_IPSRST3,
+ NPCM8XX_CLK_WD0RCR,
+ NPCM8XX_CLK_WD1RCR,
+ NPCM8XX_CLK_WD2RCR,
+ NPCM8XX_CLK_SWRSTC1,
+ NPCM8XX_CLK_SWRSTC2,
+ NPCM8XX_CLK_SWRSTC3,
+ NPCM8XX_CLK_TIPRSTC,
+ NPCM8XX_CLK_PLLCON2,
+ NPCM8XX_CLK_CLKDIV3,
+ NPCM8XX_CLK_CORSTC,
+ NPCM8XX_CLK_PLLCONG,
+ NPCM8XX_CLK_AHBCKFI,
+ NPCM8XX_CLK_SECCNT,
+ NPCM8XX_CLK_CNTR25M,
+ /* Registers unique to NPCM8XX SoC */
+ NPCM8XX_CLK_CLKEN4,
+ NPCM8XX_CLK_IPSRST4,
+ NPCM8XX_CLK_BUSTO,
+ NPCM8XX_CLK_CLKDIV4,
+ NPCM8XX_CLK_WD0RCRB,
+ NPCM8XX_CLK_WD1RCRB,
+ NPCM8XX_CLK_WD2RCRB,
+ NPCM8XX_CLK_SWRSTC1B,
+ NPCM8XX_CLK_SWRSTC2B,
+ NPCM8XX_CLK_SWRSTC3B,
+ NPCM8XX_CLK_TIPRSTCB,
+ NPCM8XX_CLK_CORSTCB,
+ NPCM8XX_CLK_IPSRSTDIS1,
+ NPCM8XX_CLK_IPSRSTDIS2,
+ NPCM8XX_CLK_IPSRSTDIS3,
+ NPCM8XX_CLK_IPSRSTDIS4,
+ NPCM8XX_CLK_CLKENDIS1,
+ NPCM8XX_CLK_CLKENDIS2,
+ NPCM8XX_CLK_CLKENDIS3,
+ NPCM8XX_CLK_CLKENDIS4,
+ NPCM8XX_CLK_THRTL_CNT,
+};
+
+/*
+ * These reset values were taken from version 0.91 of the NPCM750R data sheet.
+ *
+ * All are loaded on power-up reset. CLKENx and SWRSTR should also be loaded on
+ * core domain reset, but this reset type is not yet supported by QEMU.
+ */
+static const uint32_t npcm7xx_cold_reset_values[NPCM7XX_CLK_NR_REGS] = {
+ [NPCM7XX_CLK_CLKEN1] = 0xffffffff,
+ [NPCM7XX_CLK_CLKSEL] = 0x004aaaaa,
+ [NPCM7XX_CLK_CLKDIV1] = 0x5413f855,
+ [NPCM7XX_CLK_PLLCON0] = 0x00222101 | PLLCON_LOKI,
+ [NPCM7XX_CLK_PLLCON1] = 0x00202101 | PLLCON_LOKI,
+ [NPCM7XX_CLK_IPSRST1] = 0x00001000,
+ [NPCM7XX_CLK_IPSRST2] = 0x80000000,
+ [NPCM7XX_CLK_CLKEN2] = 0xffffffff,
+ [NPCM7XX_CLK_CLKDIV2] = 0xaa4f8f9f,
+ [NPCM7XX_CLK_CLKEN3] = 0xffffffff,
+ [NPCM7XX_CLK_IPSRST3] = 0x03000000,
+ [NPCM7XX_CLK_WD0RCR] = 0xffffffff,
+ [NPCM7XX_CLK_WD1RCR] = 0xffffffff,
+ [NPCM7XX_CLK_WD2RCR] = 0xffffffff,
+ [NPCM7XX_CLK_SWRSTC1] = 0x00000003,
+ [NPCM7XX_CLK_PLLCON2] = 0x00c02105 | PLLCON_LOKI,
+ [NPCM7XX_CLK_CORSTC] = 0x04000003,
+ [NPCM7XX_CLK_PLLCONG] = 0x01228606 | PLLCON_LOKI,
+ [NPCM7XX_CLK_AHBCKFI] = 0x000000c8,
+};
+
+/*
+ * These reset values were taken from version 0.92 of the NPCM8xx data sheet.
+ */
+static const uint32_t npcm8xx_cold_reset_values[NPCM8XX_CLK_NR_REGS] = {
+ [NPCM8XX_CLK_CLKEN1] = 0xffffffff,
+ [NPCM8XX_CLK_CLKSEL] = 0x154aaaaa,
+ [NPCM8XX_CLK_CLKDIV1] = 0x5413f855,
+ [NPCM8XX_CLK_PLLCON0] = 0x00222101 | PLLCON_LOKI,
+ [NPCM8XX_CLK_PLLCON1] = 0x00202101 | PLLCON_LOKI,
+ [NPCM8XX_CLK_IPSRST1] = 0x00001000,
+ [NPCM8XX_CLK_IPSRST2] = 0x80000000,
+ [NPCM8XX_CLK_CLKEN2] = 0xffffffff,
+ [NPCM8XX_CLK_CLKDIV2] = 0xaa4f8f9f,
+ [NPCM8XX_CLK_CLKEN3] = 0xffffffff,
+ [NPCM8XX_CLK_IPSRST3] = 0x03000000,
+ [NPCM8XX_CLK_WD0RCR] = 0xffffffff,
+ [NPCM8XX_CLK_WD1RCR] = 0xffffffff,
+ [NPCM8XX_CLK_WD2RCR] = 0xffffffff,
+ [NPCM8XX_CLK_SWRSTC1] = 0x00000003,
+ [NPCM8XX_CLK_SWRSTC2] = 0x00000001,
+ [NPCM8XX_CLK_SWRSTC3] = 0x00000001,
+ [NPCM8XX_CLK_TIPRSTC] = 0x00000001,
+ [NPCM8XX_CLK_PLLCON2] = 0x00c02105 | PLLCON_LOKI,
+ [NPCM8XX_CLK_CLKDIV3] = 0x00009100,
+ [NPCM8XX_CLK_CORSTC] = 0x04000003,
+ [NPCM8XX_CLK_PLLCONG] = 0x01228606 | PLLCON_LOKI,
+ [NPCM8XX_CLK_AHBCKFI] = 0x000000c8,
+ [NPCM8XX_CLK_CLKEN4] = 0xffffffff,
+ [NPCM8XX_CLK_CLKDIV4] = 0x70009000,
+ [NPCM8XX_CLK_IPSRST4] = 0x02000000,
+ [NPCM8XX_CLK_WD0RCRB] = 0xfffffe71,
+ [NPCM8XX_CLK_WD1RCRB] = 0xfffffe71,
+ [NPCM8XX_CLK_WD2RCRB] = 0xfffffe71,
+ [NPCM8XX_CLK_SWRSTC1B] = 0xfffffe71,
+ [NPCM8XX_CLK_SWRSTC2B] = 0xfffffe71,
+ [NPCM8XX_CLK_SWRSTC3B] = 0xfffffe71,
+ [NPCM8XX_CLK_TIPRSTCB] = 0xfffffe71,
+ [NPCM8XX_CLK_CORSTCB] = 0xfffffe71,
+};
+
+/* The number of watchdogs that can trigger a reset. */
+#define NPCM7XX_NR_WATCHDOGS (3)
+
+/* Clock converter functions */
+
+#define TYPE_NPCM7XX_CLOCK_PLL "npcm7xx-clock-pll"
+#define NPCM7XX_CLOCK_PLL(obj) OBJECT_CHECK(NPCM7xxClockPLLState, \
+ (obj), TYPE_NPCM7XX_CLOCK_PLL)
+#define TYPE_NPCM7XX_CLOCK_SEL "npcm7xx-clock-sel"
+#define NPCM7XX_CLOCK_SEL(obj) OBJECT_CHECK(NPCM7xxClockSELState, \
+ (obj), TYPE_NPCM7XX_CLOCK_SEL)
+#define TYPE_NPCM7XX_CLOCK_DIVIDER "npcm7xx-clock-divider"
+#define NPCM7XX_CLOCK_DIVIDER(obj) OBJECT_CHECK(NPCM7xxClockDividerState, \
+ (obj), TYPE_NPCM7XX_CLOCK_DIVIDER)
+
+static void npcm7xx_clk_update_pll(void *opaque)
+{
+ NPCM7xxClockPLLState *s = opaque;
+ uint32_t con = s->clk->regs[s->reg];
+ uint64_t freq;
+
+ /* The PLL is grounded if it is not locked yet. */
+ if (con & PLLCON_LOKI) {
+ freq = clock_get_hz(s->clock_in);
+ freq *= PLLCON_FBDV(con);
+ freq /= PLLCON_INDV(con) * PLLCON_OTDV1(con) * PLLCON_OTDV2(con);
+ } else {
+ freq = 0;
+ }
+
+ clock_update_hz(s->clock_out, freq);
+}
+
+static void npcm7xx_clk_update_sel(void *opaque)
+{
+ NPCM7xxClockSELState *s = opaque;
+ uint32_t index = extract32(s->clk->regs[NPCM7XX_CLK_CLKSEL], s->offset,
+ s->len);
+
+ if (index >= s->input_size) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: SEL index: %u out of range\n",
+ __func__, index);
+ index = 0;
+ }
+ clock_update_hz(s->clock_out, clock_get_hz(s->clock_in[index]));
+}
+
+static void npcm7xx_clk_update_divider(void *opaque)
+{
+ NPCM7xxClockDividerState *s = opaque;
+ uint32_t freq;
+
+ freq = s->divide(s);
+ clock_update_hz(s->clock_out, freq);
+}
+
+static uint32_t divide_by_constant(NPCM7xxClockDividerState *s)
+{
+ return clock_get_hz(s->clock_in) / s->divisor;
+}
+
+static uint32_t divide_by_reg_divisor(NPCM7xxClockDividerState *s)
+{
+ return clock_get_hz(s->clock_in) /
+ (extract32(s->clk->regs[s->reg], s->offset, s->len) + 1);
+}
+
+static uint32_t divide_by_reg_divisor_times_2(NPCM7xxClockDividerState *s)
+{
+ return divide_by_reg_divisor(s) / 2;
+}
+
+static uint32_t shift_by_reg_divisor(NPCM7xxClockDividerState *s)
+{
+ return clock_get_hz(s->clock_in) >>
+ extract32(s->clk->regs[s->reg], s->offset, s->len);
+}
+
+static NPCM7xxClockPLL find_pll_by_reg(enum NPCM7xxCLKRegisters reg)
+{
+ switch (reg) {
+ case NPCM7XX_CLK_PLLCON0:
+ return NPCM7XX_CLOCK_PLL0;
+ case NPCM7XX_CLK_PLLCON1:
+ return NPCM7XX_CLOCK_PLL1;
+ case NPCM7XX_CLK_PLLCON2:
+ return NPCM7XX_CLOCK_PLL2;
+ case NPCM7XX_CLK_PLLCONG:
+ return NPCM7XX_CLOCK_PLLG;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void npcm7xx_clk_update_all_plls(NPCMCLKState *clk)
+{
+ int i;
+
+ for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) {
+ npcm7xx_clk_update_pll(&clk->plls[i]);
+ }
+}
+
+static void npcm7xx_clk_update_all_sels(NPCMCLKState *clk)
+{
+ int i;
+
+ for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) {
+ npcm7xx_clk_update_sel(&clk->sels[i]);
+ }
+}
+
+static void npcm7xx_clk_update_all_dividers(NPCMCLKState *clk)
+{
+ int i;
+
+ for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) {
+ npcm7xx_clk_update_divider(&clk->dividers[i]);
+ }
+}
+
+static void npcm7xx_clk_update_all_clocks(NPCMCLKState *clk)
+{
+ clock_update_hz(clk->clkref, NPCM7XX_CLOCK_REF_HZ);
+ npcm7xx_clk_update_all_plls(clk);
+ npcm7xx_clk_update_all_sels(clk);
+ npcm7xx_clk_update_all_dividers(clk);
+}
+
+/* Types of clock sources. */
+typedef enum ClockSrcType {
+ CLKSRC_REF,
+ CLKSRC_PLL,
+ CLKSRC_SEL,
+ CLKSRC_DIV,
+} ClockSrcType;
+
+typedef struct PLLInitInfo {
+ const char *name;
+ ClockSrcType src_type;
+ int src_index;
+ int reg;
+ const char *public_name;
+} PLLInitInfo;
+
+typedef struct SELInitInfo {
+ const char *name;
+ uint8_t input_size;
+ ClockSrcType src_type[NPCM7XX_CLK_SEL_MAX_INPUT];
+ int src_index[NPCM7XX_CLK_SEL_MAX_INPUT];
+ int offset;
+ int len;
+ const char *public_name;
+} SELInitInfo;
+
+typedef struct DividerInitInfo {
+ const char *name;
+ ClockSrcType src_type;
+ int src_index;
+ uint32_t (*divide)(NPCM7xxClockDividerState *s);
+ int reg; /* not used when type == CONSTANT */
+ int offset; /* not used when type == CONSTANT */
+ int len; /* not used when type == CONSTANT */
+ int divisor; /* used only when type == CONSTANT */
+ const char *public_name;
+} DividerInitInfo;
+
+static const PLLInitInfo pll_init_info_list[] = {
+ [NPCM7XX_CLOCK_PLL0] = {
+ .name = "pll0",
+ .src_type = CLKSRC_REF,
+ .reg = NPCM7XX_CLK_PLLCON0,
+ },
+ [NPCM7XX_CLOCK_PLL1] = {
+ .name = "pll1",
+ .src_type = CLKSRC_REF,
+ .reg = NPCM7XX_CLK_PLLCON1,
+ },
+ [NPCM7XX_CLOCK_PLL2] = {
+ .name = "pll2",
+ .src_type = CLKSRC_REF,
+ .reg = NPCM7XX_CLK_PLLCON2,
+ },
+ [NPCM7XX_CLOCK_PLLG] = {
+ .name = "pllg",
+ .src_type = CLKSRC_REF,
+ .reg = NPCM7XX_CLK_PLLCONG,
+ },
+};
+
+static const SELInitInfo sel_init_info_list[] = {
+ [NPCM7XX_CLOCK_PIXCKSEL] = {
+ .name = "pixcksel",
+ .input_size = 2,
+ .src_type = {CLKSRC_PLL, CLKSRC_REF},
+ .src_index = {NPCM7XX_CLOCK_PLLG, 0},
+ .offset = 5,
+ .len = 1,
+ .public_name = "pixel-clock",
+ },
+ [NPCM7XX_CLOCK_MCCKSEL] = {
+ .name = "mccksel",
+ .input_size = 4,
+ .src_type = {CLKSRC_DIV, CLKSRC_REF, CLKSRC_REF,
+ /*MCBPCK, shouldn't be used in normal operation*/
+ CLKSRC_REF},
+ .src_index = {NPCM7XX_CLOCK_PLL1D2, 0, 0, 0},
+ .offset = 12,
+ .len = 2,
+ .public_name = "mc-phy-clock",
+ },
+ [NPCM7XX_CLOCK_CPUCKSEL] = {
+ .name = "cpucksel",
+ .input_size = 4,
+ .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF,
+ /*SYSBPCK, shouldn't be used in normal operation*/
+ CLKSRC_REF},
+ .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, 0},
+ .offset = 0,
+ .len = 2,
+ .public_name = "system-clock",
+ },
+ [NPCM7XX_CLOCK_CLKOUTSEL] = {
+ .name = "clkoutsel",
+ .input_size = 5,
+ .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF,
+ CLKSRC_PLL, CLKSRC_DIV},
+ .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0,
+ NPCM7XX_CLOCK_PLLG, NPCM7XX_CLOCK_PLL2D2},
+ .offset = 18,
+ .len = 3,
+ .public_name = "tock",
+ },
+ [NPCM7XX_CLOCK_UARTCKSEL] = {
+ .name = "uartcksel",
+ .input_size = 4,
+ .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV},
+ .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0,
+ NPCM7XX_CLOCK_PLL2D2},
+ .offset = 8,
+ .len = 2,
+ },
+ [NPCM7XX_CLOCK_TIMCKSEL] = {
+ .name = "timcksel",
+ .input_size = 4,
+ .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV},
+ .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0,
+ NPCM7XX_CLOCK_PLL2D2},
+ .offset = 14,
+ .len = 2,
+ },
+ [NPCM7XX_CLOCK_SDCKSEL] = {
+ .name = "sdcksel",
+ .input_size = 4,
+ .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV},
+ .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0,
+ NPCM7XX_CLOCK_PLL2D2},
+ .offset = 6,
+ .len = 2,
+ },
+ [NPCM7XX_CLOCK_GFXMSEL] = {
+ .name = "gfxmksel",
+ .input_size = 2,
+ .src_type = {CLKSRC_REF, CLKSRC_PLL},
+ .src_index = {0, NPCM7XX_CLOCK_PLL2},
+ .offset = 21,
+ .len = 1,
+ },
+ [NPCM7XX_CLOCK_SUCKSEL] = {
+ .name = "sucksel",
+ .input_size = 4,
+ .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV},
+ .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0,
+ NPCM7XX_CLOCK_PLL2D2},
+ .offset = 10,
+ .len = 2,
+ },
+};
+
+static const DividerInitInfo divider_init_info_list[] = {
+ [NPCM7XX_CLOCK_PLL1D2] = {
+ .name = "pll1d2",
+ .src_type = CLKSRC_PLL,
+ .src_index = NPCM7XX_CLOCK_PLL1,
+ .divide = divide_by_constant,
+ .divisor = 2,
+ },
+ [NPCM7XX_CLOCK_PLL2D2] = {
+ .name = "pll2d2",
+ .src_type = CLKSRC_PLL,
+ .src_index = NPCM7XX_CLOCK_PLL2,
+ .divide = divide_by_constant,
+ .divisor = 2,
+ },
+ [NPCM7XX_CLOCK_MC_DIVIDER] = {
+ .name = "mc-divider",
+ .src_type = CLKSRC_SEL,
+ .src_index = NPCM7XX_CLOCK_MCCKSEL,
+ .divide = divide_by_constant,
+ .divisor = 2,
+ .public_name = "mc-clock"
+ },
+ [NPCM7XX_CLOCK_AXI_DIVIDER] = {
+ .name = "axi-divider",
+ .src_type = CLKSRC_SEL,
+ .src_index = NPCM7XX_CLOCK_CPUCKSEL,
+ .divide = shift_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV1,
+ .offset = 0,
+ .len = 1,
+ .public_name = "clk2"
+ },
+ [NPCM7XX_CLOCK_AHB_DIVIDER] = {
+ .name = "ahb-divider",
+ .src_type = CLKSRC_DIV,
+ .src_index = NPCM7XX_CLOCK_AXI_DIVIDER,
+ .divide = divide_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV1,
+ .offset = 26,
+ .len = 2,
+ .public_name = "clk4"
+ },
+ [NPCM7XX_CLOCK_AHB3_DIVIDER] = {
+ .name = "ahb3-divider",
+ .src_type = CLKSRC_DIV,
+ .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
+ .divide = divide_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV1,
+ .offset = 6,
+ .len = 5,
+ .public_name = "ahb3-spi3-clock"
+ },
+ [NPCM7XX_CLOCK_SPI0_DIVIDER] = {
+ .name = "spi0-divider",
+ .src_type = CLKSRC_DIV,
+ .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
+ .divide = divide_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV3,
+ .offset = 6,
+ .len = 5,
+ .public_name = "spi0-clock",
+ },
+ [NPCM7XX_CLOCK_SPIX_DIVIDER] = {
+ .name = "spix-divider",
+ .src_type = CLKSRC_DIV,
+ .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
+ .divide = divide_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV3,
+ .offset = 1,
+ .len = 5,
+ .public_name = "spix-clock",
+ },
+ [NPCM7XX_CLOCK_APB1_DIVIDER] = {
+ .name = "apb1-divider",
+ .src_type = CLKSRC_DIV,
+ .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
+ .divide = shift_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV2,
+ .offset = 24,
+ .len = 2,
+ .public_name = "apb1-clock",
+ },
+ [NPCM7XX_CLOCK_APB2_DIVIDER] = {
+ .name = "apb2-divider",
+ .src_type = CLKSRC_DIV,
+ .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
+ .divide = shift_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV2,
+ .offset = 26,
+ .len = 2,
+ .public_name = "apb2-clock",
+ },
+ [NPCM7XX_CLOCK_APB3_DIVIDER] = {
+ .name = "apb3-divider",
+ .src_type = CLKSRC_DIV,
+ .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
+ .divide = shift_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV2,
+ .offset = 28,
+ .len = 2,
+ .public_name = "apb3-clock",
+ },
+ [NPCM7XX_CLOCK_APB4_DIVIDER] = {
+ .name = "apb4-divider",
+ .src_type = CLKSRC_DIV,
+ .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
+ .divide = shift_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV2,
+ .offset = 30,
+ .len = 2,
+ .public_name = "apb4-clock",
+ },
+ [NPCM7XX_CLOCK_APB5_DIVIDER] = {
+ .name = "apb5-divider",
+ .src_type = CLKSRC_DIV,
+ .src_index = NPCM7XX_CLOCK_AHB_DIVIDER,
+ .divide = shift_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV2,
+ .offset = 22,
+ .len = 2,
+ .public_name = "apb5-clock",
+ },
+ [NPCM7XX_CLOCK_CLKOUT_DIVIDER] = {
+ .name = "clkout-divider",
+ .src_type = CLKSRC_SEL,
+ .src_index = NPCM7XX_CLOCK_CLKOUTSEL,
+ .divide = divide_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV2,
+ .offset = 16,
+ .len = 5,
+ .public_name = "clkout",
+ },
+ [NPCM7XX_CLOCK_UART_DIVIDER] = {
+ .name = "uart-divider",
+ .src_type = CLKSRC_SEL,
+ .src_index = NPCM7XX_CLOCK_UARTCKSEL,
+ .divide = divide_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV1,
+ .offset = 16,
+ .len = 5,
+ .public_name = "uart-clock",
+ },
+ [NPCM7XX_CLOCK_TIMER_DIVIDER] = {
+ .name = "timer-divider",
+ .src_type = CLKSRC_SEL,
+ .src_index = NPCM7XX_CLOCK_TIMCKSEL,
+ .divide = divide_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV1,
+ .offset = 21,
+ .len = 5,
+ .public_name = "timer-clock",
+ },
+ [NPCM7XX_CLOCK_ADC_DIVIDER] = {
+ .name = "adc-divider",
+ .src_type = CLKSRC_DIV,
+ .src_index = NPCM7XX_CLOCK_TIMER_DIVIDER,
+ .divide = shift_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV1,
+ .offset = 28,
+ .len = 3,
+ .public_name = "adc-clock",
+ },
+ [NPCM7XX_CLOCK_MMC_DIVIDER] = {
+ .name = "mmc-divider",
+ .src_type = CLKSRC_SEL,
+ .src_index = NPCM7XX_CLOCK_SDCKSEL,
+ .divide = divide_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV1,
+ .offset = 11,
+ .len = 5,
+ .public_name = "mmc-clock",
+ },
+ [NPCM7XX_CLOCK_SDHC_DIVIDER] = {
+ .name = "sdhc-divider",
+ .src_type = CLKSRC_SEL,
+ .src_index = NPCM7XX_CLOCK_SDCKSEL,
+ .divide = divide_by_reg_divisor_times_2,
+ .reg = NPCM7XX_CLK_CLKDIV2,
+ .offset = 0,
+ .len = 4,
+ .public_name = "sdhc-clock",
+ },
+ [NPCM7XX_CLOCK_GFXM_DIVIDER] = {
+ .name = "gfxm-divider",
+ .src_type = CLKSRC_SEL,
+ .src_index = NPCM7XX_CLOCK_GFXMSEL,
+ .divide = divide_by_constant,
+ .divisor = 3,
+ .public_name = "gfxm-clock",
+ },
+ [NPCM7XX_CLOCK_UTMI_DIVIDER] = {
+ .name = "utmi-divider",
+ .src_type = CLKSRC_SEL,
+ .src_index = NPCM7XX_CLOCK_SUCKSEL,
+ .divide = divide_by_reg_divisor,
+ .reg = NPCM7XX_CLK_CLKDIV2,
+ .offset = 8,
+ .len = 5,
+ .public_name = "utmi-clock",
+ },
+};
+
+static void npcm7xx_clk_update_pll_cb(void *opaque, ClockEvent event)
+{
+ npcm7xx_clk_update_pll(opaque);
+}
+
+static void npcm7xx_clk_pll_init(Object *obj)
+{
+ NPCM7xxClockPLLState *pll = NPCM7XX_CLOCK_PLL(obj);
+
+ pll->clock_in = qdev_init_clock_in(DEVICE(pll), "clock-in",
+ npcm7xx_clk_update_pll_cb, pll,
+ ClockUpdate);
+ pll->clock_out = qdev_init_clock_out(DEVICE(pll), "clock-out");
+}
+
+static void npcm7xx_clk_update_sel_cb(void *opaque, ClockEvent event)
+{
+ npcm7xx_clk_update_sel(opaque);
+}
+
+static void npcm7xx_clk_sel_init(Object *obj)
+{
+ int i;
+ NPCM7xxClockSELState *sel = NPCM7XX_CLOCK_SEL(obj);
+
+ for (i = 0; i < NPCM7XX_CLK_SEL_MAX_INPUT; ++i) {
+ g_autofree char *s = g_strdup_printf("clock-in[%d]", i);
+ sel->clock_in[i] = qdev_init_clock_in(DEVICE(sel), s,
+ npcm7xx_clk_update_sel_cb, sel, ClockUpdate);
+ }
+ sel->clock_out = qdev_init_clock_out(DEVICE(sel), "clock-out");
+}
+
+static void npcm7xx_clk_update_divider_cb(void *opaque, ClockEvent event)
+{
+ npcm7xx_clk_update_divider(opaque);
+}
+
+static void npcm7xx_clk_divider_init(Object *obj)
+{
+ NPCM7xxClockDividerState *div = NPCM7XX_CLOCK_DIVIDER(obj);
+
+ div->clock_in = qdev_init_clock_in(DEVICE(div), "clock-in",
+ npcm7xx_clk_update_divider_cb,
+ div, ClockUpdate);
+ div->clock_out = qdev_init_clock_out(DEVICE(div), "clock-out");
+}
+
+static void npcm7xx_init_clock_pll(NPCM7xxClockPLLState *pll,
+ NPCMCLKState *clk, const PLLInitInfo *init_info)
+{
+ pll->name = init_info->name;
+ pll->clk = clk;
+ pll->reg = init_info->reg;
+ if (init_info->public_name != NULL) {
+ qdev_alias_clock(DEVICE(pll), "clock-out", DEVICE(clk),
+ init_info->public_name);
+ }
+}
+
+static void npcm7xx_init_clock_sel(NPCM7xxClockSELState *sel,
+ NPCMCLKState *clk, const SELInitInfo *init_info)
+{
+ int input_size = init_info->input_size;
+
+ sel->name = init_info->name;
+ sel->clk = clk;
+ sel->input_size = init_info->input_size;
+ g_assert(input_size <= NPCM7XX_CLK_SEL_MAX_INPUT);
+ sel->offset = init_info->offset;
+ sel->len = init_info->len;
+ if (init_info->public_name != NULL) {
+ qdev_alias_clock(DEVICE(sel), "clock-out", DEVICE(clk),
+ init_info->public_name);
+ }
+}
+
+static void npcm7xx_init_clock_divider(NPCM7xxClockDividerState *div,
+ NPCMCLKState *clk, const DividerInitInfo *init_info)
+{
+ div->name = init_info->name;
+ div->clk = clk;
+
+ div->divide = init_info->divide;
+ if (div->divide == divide_by_constant) {
+ div->divisor = init_info->divisor;
+ } else {
+ div->reg = init_info->reg;
+ div->offset = init_info->offset;
+ div->len = init_info->len;
+ }
+ if (init_info->public_name != NULL) {
+ qdev_alias_clock(DEVICE(div), "clock-out", DEVICE(clk),
+ init_info->public_name);
+ }
+}
+
+static Clock *npcm7xx_get_clock(NPCMCLKState *clk, ClockSrcType type,
+ int index)
+{
+ switch (type) {
+ case CLKSRC_REF:
+ return clk->clkref;
+ case CLKSRC_PLL:
+ return clk->plls[index].clock_out;
+ case CLKSRC_SEL:
+ return clk->sels[index].clock_out;
+ case CLKSRC_DIV:
+ return clk->dividers[index].clock_out;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void npcm7xx_connect_clocks(NPCMCLKState *clk)
+{
+ int i, j;
+ Clock *src;
+
+ for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) {
+ src = npcm7xx_get_clock(clk, pll_init_info_list[i].src_type,
+ pll_init_info_list[i].src_index);
+ clock_set_source(clk->plls[i].clock_in, src);
+ }
+ for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) {
+ for (j = 0; j < sel_init_info_list[i].input_size; ++j) {
+ src = npcm7xx_get_clock(clk, sel_init_info_list[i].src_type[j],
+ sel_init_info_list[i].src_index[j]);
+ clock_set_source(clk->sels[i].clock_in[j], src);
+ }
+ }
+ for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) {
+ src = npcm7xx_get_clock(clk, divider_init_info_list[i].src_type,
+ divider_init_info_list[i].src_index);
+ clock_set_source(clk->dividers[i].clock_in, src);
+ }
+}
+
+static uint64_t npcm_clk_read(void *opaque, hwaddr offset, unsigned size)
+{
+ uint32_t reg = offset / sizeof(uint32_t);
+ NPCMCLKState *s = opaque;
+ NPCMCLKClass *c = NPCM_CLK_GET_CLASS(s);
+ int64_t now_ns;
+ uint32_t value = 0;
+
+ if (reg >= c->nr_regs) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: offset 0x%04" HWADDR_PRIx " out of range\n",
+ __func__, offset);
+ return 0;
+ }
+
+ switch (reg) {
+ case NPCM7XX_CLK_SWRSTR:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: register @ 0x%04" HWADDR_PRIx " is write-only\n",
+ __func__, offset);
+ break;
+
+ case NPCM7XX_CLK_SECCNT:
+ now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ value = (now_ns - s->ref_ns) / NANOSECONDS_PER_SECOND;
+ break;
+
+ case NPCM7XX_CLK_CNTR25M:
+ now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ /*
+ * This register counts 25 MHz cycles, updating every 640 ns. It rolls
+ * over to zero every second.
+ *
+ * The 4 LSBs are always zero: (1e9 / 640) << 4 = 25000000.
+ */
+ value = (((now_ns - s->ref_ns) / 640) << 4) % NPCM7XX_CLOCK_REF_HZ;
+ break;
+
+ default:
+ value = s->regs[reg];
+ break;
+ };
+
+ trace_npcm_clk_read(offset, value);
+
+ return value;
+}
+
+static void npcm_clk_write(void *opaque, hwaddr offset,
+ uint64_t v, unsigned size)
+{
+ uint32_t reg = offset / sizeof(uint32_t);
+ NPCMCLKState *s = opaque;
+ NPCMCLKClass *c = NPCM_CLK_GET_CLASS(s);
+ uint32_t value = v;
+
+ trace_npcm_clk_write(offset, value);
+
+ if (reg >= c->nr_regs) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: offset 0x%04" HWADDR_PRIx " out of range\n",
+ __func__, offset);
+ return;
+ }
+
+ switch (reg) {
+ case NPCM7XX_CLK_SWRSTR:
+ qemu_log_mask(LOG_UNIMP, "%s: SW reset not implemented: 0x%02x\n",
+ __func__, value);
+ value = 0;
+ break;
+
+ case NPCM7XX_CLK_PLLCON0:
+ case NPCM7XX_CLK_PLLCON1:
+ case NPCM7XX_CLK_PLLCON2:
+ case NPCM7XX_CLK_PLLCONG:
+ if (value & PLLCON_PWDEN) {
+ /* Power down -- clear lock and indicate loss of lock */
+ value &= ~PLLCON_LOKI;
+ value |= PLLCON_LOKS;
+ } else {
+ /* Normal mode -- assume always locked */
+ value |= PLLCON_LOKI;
+ /* Keep LOKS unchanged unless cleared by writing 1 */
+ if (value & PLLCON_LOKS) {
+ value &= ~PLLCON_LOKS;
+ } else {
+ value |= (value & PLLCON_LOKS);
+ }
+ }
+ /* Only update PLL when it is locked. */
+ if (value & PLLCON_LOKI) {
+ npcm7xx_clk_update_pll(&s->plls[find_pll_by_reg(reg)]);
+ }
+ break;
+
+ case NPCM7XX_CLK_CLKSEL:
+ npcm7xx_clk_update_all_sels(s);
+ break;
+
+ case NPCM7XX_CLK_CLKDIV1:
+ case NPCM7XX_CLK_CLKDIV2:
+ case NPCM7XX_CLK_CLKDIV3:
+ npcm7xx_clk_update_all_dividers(s);
+ break;
+
+ case NPCM7XX_CLK_CNTR25M:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: register @ 0x%04" HWADDR_PRIx " is read-only\n",
+ __func__, offset);
+ return;
+ }
+
+ s->regs[reg] = value;
+}
+
+/* Perform reset action triggered by a watchdog */
+static void npcm7xx_clk_perform_watchdog_reset(void *opaque, int n,
+ int level)
+{
+ NPCMCLKState *clk = NPCM_CLK(opaque);
+ uint32_t rcr;
+
+ g_assert(n >= 0 && n <= NPCM7XX_NR_WATCHDOGS);
+ rcr = clk->regs[NPCM7XX_CLK_WD0RCR + n];
+ if (rcr & NPCM7XX_CLK_WDRCR_CA9C) {
+ watchdog_perform_action();
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%s: only CPU reset is implemented. (requested 0x%" PRIx32")\n",
+ __func__, rcr);
+ }
+}
+
+static const struct MemoryRegionOps npcm_clk_ops = {
+ .read = npcm_clk_read,
+ .write = npcm_clk_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ .unaligned = false,
+ },
+};
+
+static void npcm_clk_enter_reset(Object *obj, ResetType type)
+{
+ NPCMCLKState *s = NPCM_CLK(obj);
+ NPCMCLKClass *c = NPCM_CLK_GET_CLASS(s);
+
+ size_t sizeof_regs = c->nr_regs * sizeof(uint32_t);
+ g_assert(sizeof(s->regs) >= sizeof_regs);
+ memcpy(s->regs, c->cold_reset_values, sizeof_regs);
+ s->ref_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ npcm7xx_clk_update_all_clocks(s);
+ /*
+ * A small number of registers need to be reset on a core domain reset,
+ * but no such reset type exists yet.
+ */
+}
+
+static void npcm7xx_clk_init_clock_hierarchy(NPCMCLKState *s)
+{
+ int i;
+
+ s->clkref = qdev_init_clock_in(DEVICE(s), "clkref", NULL, NULL, 0);
+
+ /* First pass: init all converter modules */
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(pll_init_info_list) != NPCM7XX_CLOCK_NR_PLLS);
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(sel_init_info_list) != NPCM7XX_CLOCK_NR_SELS);
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(divider_init_info_list)
+ != NPCM7XX_CLOCK_NR_DIVIDERS);
+ for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) {
+ object_initialize_child(OBJECT(s), pll_init_info_list[i].name,
+ &s->plls[i], TYPE_NPCM7XX_CLOCK_PLL);
+ npcm7xx_init_clock_pll(&s->plls[i], s,
+ &pll_init_info_list[i]);
+ }
+ for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) {
+ object_initialize_child(OBJECT(s), sel_init_info_list[i].name,
+ &s->sels[i], TYPE_NPCM7XX_CLOCK_SEL);
+ npcm7xx_init_clock_sel(&s->sels[i], s,
+ &sel_init_info_list[i]);
+ }
+ for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) {
+ object_initialize_child(OBJECT(s), divider_init_info_list[i].name,
+ &s->dividers[i], TYPE_NPCM7XX_CLOCK_DIVIDER);
+ npcm7xx_init_clock_divider(&s->dividers[i], s,
+ &divider_init_info_list[i]);
+ }
+
+ /* Second pass: connect converter modules */
+ npcm7xx_connect_clocks(s);
+
+ clock_update_hz(s->clkref, NPCM7XX_CLOCK_REF_HZ);
+}
+
+static void npcm_clk_init(Object *obj)
+{
+ NPCMCLKState *s = NPCM_CLK(obj);
+
+ memory_region_init_io(&s->iomem, obj, &npcm_clk_ops, s,
+ TYPE_NPCM_CLK, 4 * KiB);
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
+}
+
+static int npcm_clk_post_load(void *opaque, int version_id)
+{
+ if (version_id >= 1) {
+ NPCMCLKState *clk = opaque;
+
+ npcm7xx_clk_update_all_clocks(clk);
+ }
+
+ return 0;
+}
+
+static void npcm_clk_realize(DeviceState *dev, Error **errp)
+{
+ int i;
+ NPCMCLKState *s = NPCM_CLK(dev);
+
+ qdev_init_gpio_in_named(DEVICE(s), npcm7xx_clk_perform_watchdog_reset,
+ NPCM7XX_WATCHDOG_RESET_GPIO_IN, NPCM7XX_NR_WATCHDOGS);
+ npcm7xx_clk_init_clock_hierarchy(s);
+
+ /* Realize child devices */
+ for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) {
+ if (!qdev_realize(DEVICE(&s->plls[i]), NULL, errp)) {
+ return;
+ }
+ }
+ for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) {
+ if (!qdev_realize(DEVICE(&s->sels[i]), NULL, errp)) {
+ return;
+ }
+ }
+ for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) {
+ if (!qdev_realize(DEVICE(&s->dividers[i]), NULL, errp)) {
+ return;
+ }
+ }
+}
+
+static const VMStateDescription vmstate_npcm7xx_clk_pll = {
+ .name = "npcm7xx-clock-pll",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (const VMStateField[]) {
+ VMSTATE_CLOCK(clock_in, NPCM7xxClockPLLState),
+ VMSTATE_END_OF_LIST(),
+ },
+};
+
+static const VMStateDescription vmstate_npcm7xx_clk_sel = {
+ .name = "npcm7xx-clock-sel",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (const VMStateField[]) {
+ VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(clock_in, NPCM7xxClockSELState,
+ NPCM7XX_CLK_SEL_MAX_INPUT, 0, vmstate_clock, Clock),
+ VMSTATE_END_OF_LIST(),
+ },
+};
+
+static const VMStateDescription vmstate_npcm7xx_clk_divider = {
+ .name = "npcm7xx-clock-divider",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (const VMStateField[]) {
+ VMSTATE_CLOCK(clock_in, NPCM7xxClockDividerState),
+ VMSTATE_END_OF_LIST(),
+ },
+};
+
+static const VMStateDescription vmstate_npcm_clk = {
+ .name = "npcm-clk",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .post_load = npcm_clk_post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, NPCMCLKState, NPCM_CLK_MAX_NR_REGS),
+ VMSTATE_INT64(ref_ns, NPCMCLKState),
+ VMSTATE_CLOCK(clkref, NPCMCLKState),
+ VMSTATE_END_OF_LIST(),
+ },
+};
+
+static void npcm7xx_clk_pll_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "NPCM7xx Clock PLL Module";
+ dc->vmsd = &vmstate_npcm7xx_clk_pll;
+ /* Reason: Part of NPCMCLKState component */
+ dc->user_creatable = false;
+}
+
+static void npcm7xx_clk_sel_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "NPCM7xx Clock SEL Module";
+ dc->vmsd = &vmstate_npcm7xx_clk_sel;
+ /* Reason: Part of NPCMCLKState component */
+ dc->user_creatable = false;
+}
+
+static void npcm7xx_clk_divider_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "NPCM7xx Clock Divider Module";
+ dc->vmsd = &vmstate_npcm7xx_clk_divider;
+ /* Reason: Part of NPCMCLKState component */
+ dc->user_creatable = false;
+}
+
+static void npcm_clk_class_init(ObjectClass *klass, const void *data)
+{
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->vmsd = &vmstate_npcm_clk;
+ dc->realize = npcm_clk_realize;
+ rc->phases.enter = npcm_clk_enter_reset;
+}
+
+static void npcm7xx_clk_class_init(ObjectClass *klass, const void *data)
+{
+ NPCMCLKClass *c = NPCM_CLK_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "NPCM7xx Clock Control Registers";
+ c->nr_regs = NPCM7XX_CLK_NR_REGS;
+ c->cold_reset_values = npcm7xx_cold_reset_values;
+}
+
+static void npcm8xx_clk_class_init(ObjectClass *klass, const void *data)
+{
+ NPCMCLKClass *c = NPCM_CLK_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "NPCM8xx Clock Control Registers";
+ c->nr_regs = NPCM8XX_CLK_NR_REGS;
+ c->cold_reset_values = npcm8xx_cold_reset_values;
+}
+
+static const TypeInfo npcm7xx_clk_pll_info = {
+ .name = TYPE_NPCM7XX_CLOCK_PLL,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(NPCM7xxClockPLLState),
+ .instance_init = npcm7xx_clk_pll_init,
+ .class_init = npcm7xx_clk_pll_class_init,
+};
+
+static const TypeInfo npcm7xx_clk_sel_info = {
+ .name = TYPE_NPCM7XX_CLOCK_SEL,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(NPCM7xxClockSELState),
+ .instance_init = npcm7xx_clk_sel_init,
+ .class_init = npcm7xx_clk_sel_class_init,
+};
+
+static const TypeInfo npcm7xx_clk_divider_info = {
+ .name = TYPE_NPCM7XX_CLOCK_DIVIDER,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(NPCM7xxClockDividerState),
+ .instance_init = npcm7xx_clk_divider_init,
+ .class_init = npcm7xx_clk_divider_class_init,
+};
+
+static const TypeInfo npcm_clk_info = {
+ .name = TYPE_NPCM_CLK,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(NPCMCLKState),
+ .instance_init = npcm_clk_init,
+ .class_size = sizeof(NPCMCLKClass),
+ .class_init = npcm_clk_class_init,
+ .abstract = true,
+};
+
+static const TypeInfo npcm7xx_clk_info = {
+ .name = TYPE_NPCM7XX_CLK,
+ .parent = TYPE_NPCM_CLK,
+ .class_init = npcm7xx_clk_class_init,
+};
+
+static const TypeInfo npcm8xx_clk_info = {
+ .name = TYPE_NPCM8XX_CLK,
+ .parent = TYPE_NPCM_CLK,
+ .class_init = npcm8xx_clk_class_init,
+};
+
+static void npcm7xx_clk_register_type(void)
+{
+ type_register_static(&npcm7xx_clk_pll_info);
+ type_register_static(&npcm7xx_clk_sel_info);
+ type_register_static(&npcm7xx_clk_divider_info);
+ type_register_static(&npcm_clk_info);
+ type_register_static(&npcm7xx_clk_info);
+ type_register_static(&npcm8xx_clk_info);
+}
+type_init(npcm7xx_clk_register_type);
diff --git a/hw/misc/npcm_gcr.c b/hw/misc/npcm_gcr.c
new file mode 100644
index 0000000..2acaa16
--- /dev/null
+++ b/hw/misc/npcm_gcr.c
@@ -0,0 +1,482 @@
+/*
+ * Nuvoton NPCM7xx/8xx System Global Control Registers.
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "qemu/osdep.h"
+
+#include "hw/misc/npcm_gcr.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+#include "qapi/error.h"
+#include "qemu/cutils.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qemu/units.h"
+
+#include "trace.h"
+
+#define NPCM7XX_GCR_MIN_DRAM_SIZE (128 * MiB)
+#define NPCM7XX_GCR_MAX_DRAM_SIZE (2 * GiB)
+
+enum NPCM7xxGCRRegisters {
+ NPCM7XX_GCR_PDID,
+ NPCM7XX_GCR_PWRON,
+ NPCM7XX_GCR_MFSEL1 = 0x0c / sizeof(uint32_t),
+ NPCM7XX_GCR_MFSEL2,
+ NPCM7XX_GCR_MISCPE,
+ NPCM7XX_GCR_SPSWC = 0x038 / sizeof(uint32_t),
+ NPCM7XX_GCR_INTCR,
+ NPCM7XX_GCR_INTSR,
+ NPCM7XX_GCR_HIFCR = 0x050 / sizeof(uint32_t),
+ NPCM7XX_GCR_INTCR2 = 0x060 / sizeof(uint32_t),
+ NPCM7XX_GCR_MFSEL3,
+ NPCM7XX_GCR_SRCNT,
+ NPCM7XX_GCR_RESSR,
+ NPCM7XX_GCR_RLOCKR1,
+ NPCM7XX_GCR_FLOCKR1,
+ NPCM7XX_GCR_DSCNT,
+ NPCM7XX_GCR_MDLR,
+ NPCM7XX_GCR_SCRPAD3,
+ NPCM7XX_GCR_SCRPAD2,
+ NPCM7XX_GCR_DAVCLVLR = 0x098 / sizeof(uint32_t),
+ NPCM7XX_GCR_INTCR3,
+ NPCM7XX_GCR_VSINTR = 0x0ac / sizeof(uint32_t),
+ NPCM7XX_GCR_MFSEL4,
+ NPCM7XX_GCR_CPBPNTR = 0x0c4 / sizeof(uint32_t),
+ NPCM7XX_GCR_CPCTL = 0x0d0 / sizeof(uint32_t),
+ NPCM7XX_GCR_CP2BST,
+ NPCM7XX_GCR_B2CPNT,
+ NPCM7XX_GCR_CPPCTL,
+ NPCM7XX_GCR_I2CSEGSEL,
+ NPCM7XX_GCR_I2CSEGCTL,
+ NPCM7XX_GCR_VSRCR,
+ NPCM7XX_GCR_MLOCKR,
+ NPCM7XX_GCR_SCRPAD = 0x013c / sizeof(uint32_t),
+ NPCM7XX_GCR_USB1PHYCTL,
+ NPCM7XX_GCR_USB2PHYCTL,
+};
+
+static const uint32_t npcm7xx_cold_reset_values[NPCM7XX_GCR_NR_REGS] = {
+ [NPCM7XX_GCR_PDID] = 0x04a92750, /* Poleg A1 */
+ [NPCM7XX_GCR_MISCPE] = 0x0000ffff,
+ [NPCM7XX_GCR_SPSWC] = 0x00000003,
+ [NPCM7XX_GCR_INTCR] = 0x0000035e,
+ [NPCM7XX_GCR_HIFCR] = 0x0000004e,
+ [NPCM7XX_GCR_INTCR2] = (1U << 19), /* DDR initialized */
+ [NPCM7XX_GCR_RESSR] = 0x80000000,
+ [NPCM7XX_GCR_DSCNT] = 0x000000c0,
+ [NPCM7XX_GCR_DAVCLVLR] = 0x5a00f3cf,
+ [NPCM7XX_GCR_SCRPAD] = 0x00000008,
+ [NPCM7XX_GCR_USB1PHYCTL] = 0x034730e4,
+ [NPCM7XX_GCR_USB2PHYCTL] = 0x034730e4,
+};
+
+enum NPCM8xxGCRRegisters {
+ NPCM8XX_GCR_PDID,
+ NPCM8XX_GCR_PWRON,
+ NPCM8XX_GCR_MISCPE = 0x014 / sizeof(uint32_t),
+ NPCM8XX_GCR_FLOCKR2 = 0x020 / sizeof(uint32_t),
+ NPCM8XX_GCR_FLOCKR3,
+ NPCM8XX_GCR_A35_MODE = 0x034 / sizeof(uint32_t),
+ NPCM8XX_GCR_SPSWC,
+ NPCM8XX_GCR_INTCR,
+ NPCM8XX_GCR_INTSR,
+ NPCM8XX_GCR_HIFCR = 0x050 / sizeof(uint32_t),
+ NPCM8XX_GCR_INTCR2 = 0x060 / sizeof(uint32_t),
+ NPCM8XX_GCR_SRCNT = 0x068 / sizeof(uint32_t),
+ NPCM8XX_GCR_RESSR,
+ NPCM8XX_GCR_RLOCKR1,
+ NPCM8XX_GCR_FLOCKR1,
+ NPCM8XX_GCR_DSCNT,
+ NPCM8XX_GCR_MDLR,
+ NPCM8XX_GCR_SCRPAD_C = 0x080 / sizeof(uint32_t),
+ NPCM8XX_GCR_SCRPAD_B,
+ NPCM8XX_GCR_DAVCLVLR = 0x098 / sizeof(uint32_t),
+ NPCM8XX_GCR_INTCR3,
+ NPCM8XX_GCR_PCIRCTL = 0x0a0 / sizeof(uint32_t),
+ NPCM8XX_GCR_VSINTR,
+ NPCM8XX_GCR_SD2SUR1 = 0x0b4 / sizeof(uint32_t),
+ NPCM8XX_GCR_SD2SUR2,
+ NPCM8XX_GCR_INTCR4 = 0x0c0 / sizeof(uint32_t),
+ NPCM8XX_GCR_CPCTL = 0x0d0 / sizeof(uint32_t),
+ NPCM8XX_GCR_CP2BST,
+ NPCM8XX_GCR_B2CPNT,
+ NPCM8XX_GCR_CPPCTL,
+ NPCM8XX_GCR_I2CSEGSEL = 0x0e0 / sizeof(uint32_t),
+ NPCM8XX_GCR_I2CSEGCTL,
+ NPCM8XX_GCR_VSRCR,
+ NPCM8XX_GCR_MLOCKR,
+ NPCM8XX_GCR_SCRPAD = 0x13c / sizeof(uint32_t),
+ NPCM8XX_GCR_USB1PHYCTL,
+ NPCM8XX_GCR_USB2PHYCTL,
+ NPCM8XX_GCR_USB3PHYCTL,
+ NPCM8XX_GCR_MFSEL1 = 0x260 / sizeof(uint32_t),
+ NPCM8XX_GCR_MFSEL2,
+ NPCM8XX_GCR_MFSEL3,
+ NPCM8XX_GCR_MFSEL4,
+ NPCM8XX_GCR_MFSEL5,
+ NPCM8XX_GCR_MFSEL6,
+ NPCM8XX_GCR_MFSEL7,
+ NPCM8XX_GCR_MFSEL_LK1 = 0x280 / sizeof(uint32_t),
+ NPCM8XX_GCR_MFSEL_LK2,
+ NPCM8XX_GCR_MFSEL_LK3,
+ NPCM8XX_GCR_MFSEL_LK4,
+ NPCM8XX_GCR_MFSEL_LK5,
+ NPCM8XX_GCR_MFSEL_LK6,
+ NPCM8XX_GCR_MFSEL_LK7,
+ NPCM8XX_GCR_MFSEL_SET1 = 0x2a0 / sizeof(uint32_t),
+ NPCM8XX_GCR_MFSEL_SET2,
+ NPCM8XX_GCR_MFSEL_SET3,
+ NPCM8XX_GCR_MFSEL_SET4,
+ NPCM8XX_GCR_MFSEL_SET5,
+ NPCM8XX_GCR_MFSEL_SET6,
+ NPCM8XX_GCR_MFSEL_SET7,
+ NPCM8XX_GCR_MFSEL_CLR1 = 0x2c0 / sizeof(uint32_t),
+ NPCM8XX_GCR_MFSEL_CLR2,
+ NPCM8XX_GCR_MFSEL_CLR3,
+ NPCM8XX_GCR_MFSEL_CLR4,
+ NPCM8XX_GCR_MFSEL_CLR5,
+ NPCM8XX_GCR_MFSEL_CLR6,
+ NPCM8XX_GCR_MFSEL_CLR7,
+ NPCM8XX_GCR_WD0RCRLK = 0x400 / sizeof(uint32_t),
+ NPCM8XX_GCR_WD1RCRLK,
+ NPCM8XX_GCR_WD2RCRLK,
+ NPCM8XX_GCR_SWRSTC1LK,
+ NPCM8XX_GCR_SWRSTC2LK,
+ NPCM8XX_GCR_SWRSTC3LK,
+ NPCM8XX_GCR_TIPRSTCLK,
+ NPCM8XX_GCR_CORSTCLK,
+ NPCM8XX_GCR_WD0RCRBLK,
+ NPCM8XX_GCR_WD1RCRBLK,
+ NPCM8XX_GCR_WD2RCRBLK,
+ NPCM8XX_GCR_SWRSTC1BLK,
+ NPCM8XX_GCR_SWRSTC2BLK,
+ NPCM8XX_GCR_SWRSTC3BLK,
+ NPCM8XX_GCR_TIPRSTCBLK,
+ NPCM8XX_GCR_CORSTCBLK,
+ /* 64 scratch pad registers start here. 0xe00 ~ 0xefc */
+ NPCM8XX_GCR_SCRPAD_00 = 0xe00 / sizeof(uint32_t),
+ /* 32 semaphore registers start here. 0xf00 ~ 0xf7c */
+ NPCM8XX_GCR_GP_SEMFR_00 = 0xf00 / sizeof(uint32_t),
+ NPCM8XX_GCR_GP_SEMFR_31 = 0xf7c / sizeof(uint32_t),
+};
+
+static const uint32_t npcm8xx_cold_reset_values[NPCM8XX_GCR_NR_REGS] = {
+ [NPCM8XX_GCR_PDID] = 0x04a35850, /* Arbel A1 */
+ [NPCM8XX_GCR_MISCPE] = 0x0000ffff,
+ [NPCM8XX_GCR_A35_MODE] = 0xfff4ff30,
+ [NPCM8XX_GCR_SPSWC] = 0x00000003,
+ [NPCM8XX_GCR_INTCR] = 0x0010035e,
+ [NPCM8XX_GCR_HIFCR] = 0x0000004e,
+ [NPCM8XX_GCR_SD2SUR1] = 0xfdc80000,
+ [NPCM8XX_GCR_SD2SUR2] = 0x5200b130,
+ [NPCM8XX_GCR_INTCR2] = (1U << 19), /* DDR initialized */
+ [NPCM8XX_GCR_RESSR] = 0x80000000,
+ [NPCM8XX_GCR_DAVCLVLR] = 0x5a00f3cf,
+ [NPCM8XX_GCR_INTCR3] = 0x5e001002,
+ [NPCM8XX_GCR_VSRCR] = 0x00004800,
+ [NPCM8XX_GCR_SCRPAD] = 0x00000008,
+ [NPCM8XX_GCR_USB1PHYCTL] = 0x034730e4,
+ [NPCM8XX_GCR_USB2PHYCTL] = 0x034730e4,
+ [NPCM8XX_GCR_USB3PHYCTL] = 0x034730e4,
+ /* All 32 semaphores should be initialized to 1. */
+ [NPCM8XX_GCR_GP_SEMFR_00...NPCM8XX_GCR_GP_SEMFR_31] = 0x00000001,
+};
+
+static uint64_t npcm_gcr_read(void *opaque, hwaddr offset, unsigned size)
+{
+ uint32_t reg = offset / sizeof(uint32_t);
+ NPCMGCRState *s = opaque;
+ NPCMGCRClass *c = NPCM_GCR_GET_CLASS(s);
+ uint64_t value;
+
+ if (reg >= c->nr_regs) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: offset 0x%04" HWADDR_PRIx " out of range\n",
+ __func__, offset);
+ return 0;
+ }
+
+ switch (size) {
+ case 4:
+ value = s->regs[reg];
+ break;
+
+ case 8:
+ g_assert(!(reg & 1));
+ value = deposit64(s->regs[reg], 32, 32, s->regs[reg + 1]);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ trace_npcm_gcr_read(offset, value);
+ return value;
+}
+
+static void npcm_gcr_write(void *opaque, hwaddr offset,
+ uint64_t v, unsigned size)
+{
+ uint32_t reg = offset / sizeof(uint32_t);
+ NPCMGCRState *s = opaque;
+ NPCMGCRClass *c = NPCM_GCR_GET_CLASS(s);
+ uint32_t value = v;
+
+ trace_npcm_gcr_write(offset, v);
+
+ if (reg >= c->nr_regs) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: offset 0x%04" HWADDR_PRIx " out of range\n",
+ __func__, offset);
+ return;
+ }
+
+ switch (size) {
+ case 4:
+ switch (reg) {
+ case NPCM7XX_GCR_PDID:
+ case NPCM7XX_GCR_PWRON:
+ case NPCM7XX_GCR_INTSR:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: register @ 0x%04" HWADDR_PRIx " is read-only\n",
+ __func__, offset);
+ return;
+
+ case NPCM7XX_GCR_RESSR:
+ case NPCM7XX_GCR_CP2BST:
+ /* Write 1 to clear */
+ value = s->regs[reg] & ~value;
+ break;
+
+ case NPCM7XX_GCR_RLOCKR1:
+ case NPCM7XX_GCR_MDLR:
+ /* Write 1 to set */
+ value |= s->regs[reg];
+ break;
+ };
+ s->regs[reg] = value;
+ break;
+
+ case 8:
+ g_assert(!(reg & 1));
+ s->regs[reg] = value;
+ s->regs[reg + 1] = extract64(v, 32, 32);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static bool npcm_gcr_check_mem_op(void *opaque, hwaddr offset,
+ unsigned size, bool is_write,
+ MemTxAttrs attrs)
+{
+ NPCMGCRClass *c = NPCM_GCR_GET_CLASS(opaque);
+
+ if (offset >= c->nr_regs * sizeof(uint32_t)) {
+ return false;
+ }
+
+ switch (size) {
+ case 4:
+ return true;
+ case 8:
+ if (offset >= NPCM8XX_GCR_SCRPAD_00 * sizeof(uint32_t) &&
+ offset < (NPCM8XX_GCR_NR_REGS - 1) * sizeof(uint32_t)) {
+ return true;
+ } else {
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static const struct MemoryRegionOps npcm_gcr_ops = {
+ .read = npcm_gcr_read,
+ .write = npcm_gcr_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ .accepts = npcm_gcr_check_mem_op,
+ .unaligned = false,
+ },
+};
+
+static void npcm7xx_gcr_enter_reset(Object *obj, ResetType type)
+{
+ NPCMGCRState *s = NPCM_GCR(obj);
+ NPCMGCRClass *c = NPCM_GCR_GET_CLASS(obj);
+
+ g_assert(sizeof(s->regs) >= sizeof(c->cold_reset_values));
+ g_assert(sizeof(s->regs) >= c->nr_regs * sizeof(uint32_t));
+ memcpy(s->regs, c->cold_reset_values, c->nr_regs * sizeof(uint32_t));
+ /* These 3 registers are at the same location in both 7xx and 8xx. */
+ s->regs[NPCM7XX_GCR_PWRON] = s->reset_pwron;
+ s->regs[NPCM7XX_GCR_MDLR] = s->reset_mdlr;
+ s->regs[NPCM7XX_GCR_INTCR3] = s->reset_intcr3;
+}
+
+static void npcm8xx_gcr_enter_reset(Object *obj, ResetType type)
+{
+ NPCMGCRState *s = NPCM_GCR(obj);
+ NPCMGCRClass *c = NPCM_GCR_GET_CLASS(obj);
+
+ memcpy(s->regs, c->cold_reset_values, c->nr_regs * sizeof(uint32_t));
+ /* These 3 registers are at the same location in both 7xx and 8xx. */
+ s->regs[NPCM8XX_GCR_PWRON] = s->reset_pwron;
+ s->regs[NPCM8XX_GCR_MDLR] = s->reset_mdlr;
+ s->regs[NPCM8XX_GCR_INTCR3] = s->reset_intcr3;
+ s->regs[NPCM8XX_GCR_SCRPAD_B] = s->reset_scrpad_b;
+}
+
+static void npcm_gcr_realize(DeviceState *dev, Error **errp)
+{
+ ERRP_GUARD();
+ NPCMGCRState *s = NPCM_GCR(dev);
+ uint64_t dram_size;
+ Object *obj;
+
+ obj = object_property_get_link(OBJECT(dev), "dram-mr", errp);
+ if (!obj) {
+ error_prepend(errp, "%s: required dram-mr link not found: ", __func__);
+ return;
+ }
+ dram_size = memory_region_size(MEMORY_REGION(obj));
+ if (!is_power_of_2(dram_size) ||
+ dram_size < NPCM7XX_GCR_MIN_DRAM_SIZE ||
+ dram_size > NPCM7XX_GCR_MAX_DRAM_SIZE) {
+ g_autofree char *sz = size_to_str(dram_size);
+ g_autofree char *min_sz = size_to_str(NPCM7XX_GCR_MIN_DRAM_SIZE);
+ g_autofree char *max_sz = size_to_str(NPCM7XX_GCR_MAX_DRAM_SIZE);
+ error_setg(errp, "%s: unsupported DRAM size %s", __func__, sz);
+ error_append_hint(errp,
+ "DRAM size must be a power of two between %s and %s,"
+ " inclusive.\n", min_sz, max_sz);
+ return;
+ }
+
+ /* Power-on reset value */
+ s->reset_intcr3 = 0x00001002;
+
+ /*
+ * The GMMAP (Graphics Memory Map) field is used by u-boot to detect the
+ * DRAM size, and is normally initialized by the boot block as part of DRAM
+ * training. However, since we don't have a complete emulation of the
+ * memory controller and try to make it look like it has already been
+ * initialized, the boot block will skip this initialization, and we need
+ * to make sure this field is set correctly up front.
+ *
+ * WARNING: some versions of u-boot only looks at bits 8 and 9, so 2 GiB of
+ * DRAM will be interpreted as 128 MiB.
+ *
+ * https://github.com/Nuvoton-Israel/u-boot/blob/2aef993bd2aafeb5408dbaad0f3ce099ee40c4aa/board/nuvoton/poleg/poleg.c#L244
+ */
+ s->reset_intcr3 |= ctz64(dram_size / NPCM7XX_GCR_MIN_DRAM_SIZE) << 8;
+
+ /*
+ * The boot block starting from 0.0.6 for NPCM8xx SoCs stores the DRAM size
+ * in the SCRPAD2 registers. We need to set this field correctly since
+ * the initialization is skipped as we mentioned above.
+ * https://github.com/Nuvoton-Israel/u-boot/blob/npcm8mnx-v2019.01_tmp/board/nuvoton/arbel/arbel.c#L737
+ */
+ s->reset_scrpad_b = dram_size;
+}
+
+static void npcm_gcr_init(Object *obj)
+{
+ NPCMGCRState *s = NPCM_GCR(obj);
+
+ memory_region_init_io(&s->iomem, obj, &npcm_gcr_ops, s,
+ TYPE_NPCM_GCR, 4 * KiB);
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
+}
+
+static const VMStateDescription vmstate_npcm_gcr = {
+ .name = "npcm-gcr",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, NPCMGCRState, NPCM_GCR_MAX_NR_REGS),
+ VMSTATE_END_OF_LIST(),
+ },
+};
+
+static const Property npcm_gcr_properties[] = {
+ DEFINE_PROP_UINT32("disabled-modules", NPCMGCRState, reset_mdlr, 0),
+ DEFINE_PROP_UINT32("power-on-straps", NPCMGCRState, reset_pwron, 0),
+};
+
+static void npcm_gcr_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = npcm_gcr_realize;
+ dc->vmsd = &vmstate_npcm_gcr;
+
+ device_class_set_props(dc, npcm_gcr_properties);
+}
+
+static void npcm7xx_gcr_class_init(ObjectClass *klass, const void *data)
+{
+ NPCMGCRClass *c = NPCM_GCR_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ dc->desc = "NPCM7xx System Global Control Registers";
+ rc->phases.enter = npcm7xx_gcr_enter_reset;
+
+ c->nr_regs = NPCM7XX_GCR_NR_REGS;
+ c->cold_reset_values = npcm7xx_cold_reset_values;
+ rc->phases.enter = npcm7xx_gcr_enter_reset;
+}
+
+static void npcm8xx_gcr_class_init(ObjectClass *klass, const void *data)
+{
+ NPCMGCRClass *c = NPCM_GCR_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ dc->desc = "NPCM8xx System Global Control Registers";
+ c->nr_regs = NPCM8XX_GCR_NR_REGS;
+ c->cold_reset_values = npcm8xx_cold_reset_values;
+ rc->phases.enter = npcm8xx_gcr_enter_reset;
+}
+
+static const TypeInfo npcm_gcr_info[] = {
+ {
+ .name = TYPE_NPCM_GCR,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(NPCMGCRState),
+ .instance_init = npcm_gcr_init,
+ .class_size = sizeof(NPCMGCRClass),
+ .class_init = npcm_gcr_class_init,
+ .abstract = true,
+ },
+ {
+ .name = TYPE_NPCM7XX_GCR,
+ .parent = TYPE_NPCM_GCR,
+ .class_init = npcm7xx_gcr_class_init,
+ },
+ {
+ .name = TYPE_NPCM8XX_GCR,
+ .parent = TYPE_NPCM_GCR,
+ .class_init = npcm8xx_gcr_class_init,
+ },
+};
+DEFINE_TYPES(npcm_gcr_info)
diff --git a/hw/misc/nrf51_rng.c b/hw/misc/nrf51_rng.c
index 2d76c45..8cd7ffe 100644
--- a/hw/misc/nrf51_rng.c
+++ b/hw/misc/nrf51_rng.c
@@ -107,25 +107,25 @@ static void rng_write(void *opaque, hwaddr offset,
break;
case NRF51_RNG_REG_SHORTS:
s->shortcut_stop_on_valrdy =
- (value & BIT_MASK(NRF51_RNG_REG_SHORTS_VALRDY_STOP)) ? 1 : 0;
+ (value & BIT(NRF51_RNG_REG_SHORTS_VALRDY_STOP)) ? 1 : 0;
break;
case NRF51_RNG_REG_INTEN:
s->interrupt_enabled =
- (value & BIT_MASK(NRF51_RNG_REG_INTEN_VALRDY)) ? 1 : 0;
+ (value & BIT(NRF51_RNG_REG_INTEN_VALRDY)) ? 1 : 0;
break;
case NRF51_RNG_REG_INTENSET:
- if (value & BIT_MASK(NRF51_RNG_REG_INTEN_VALRDY)) {
+ if (value & BIT(NRF51_RNG_REG_INTEN_VALRDY)) {
s->interrupt_enabled = 1;
}
break;
case NRF51_RNG_REG_INTENCLR:
- if (value & BIT_MASK(NRF51_RNG_REG_INTEN_VALRDY)) {
+ if (value & BIT(NRF51_RNG_REG_INTEN_VALRDY)) {
s->interrupt_enabled = 0;
}
break;
case NRF51_RNG_REG_CONFIG:
s->filter_enabled =
- (value & BIT_MASK(NRF51_RNG_REG_CONFIG_DECEN)) ? 1 : 0;
+ (value & BIT(NRF51_RNG_REG_CONFIG_DECEN)) ? 1 : 0;
break;
default:
@@ -219,12 +219,11 @@ static void nrf51_rng_reset(DeviceState *dev)
}
-static Property nrf51_rng_properties[] = {
+static const Property nrf51_rng_properties[] = {
DEFINE_PROP_UINT16("period_unfiltered_us", NRF51RNGState,
period_unfiltered_us, 167),
DEFINE_PROP_UINT16("period_filtered_us", NRF51RNGState,
period_filtered_us, 660),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_rng = {
@@ -241,13 +240,13 @@ static const VMStateDescription vmstate_rng = {
}
};
-static void nrf51_rng_class_init(ObjectClass *klass, void *data)
+static void nrf51_rng_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, nrf51_rng_properties);
dc->vmsd = &vmstate_rng;
- dc->reset = nrf51_rng_reset;
+ device_class_set_legacy_reset(dc, nrf51_rng_reset);
}
static const TypeInfo nrf51_rng_info = {
diff --git a/hw/misc/omap_clk.c b/hw/misc/omap_clk.c
index c77ca2f..da95c4a 100644
--- a/hw/misc/omap_clk.c
+++ b/hw/misc/omap_clk.c
@@ -30,174 +30,170 @@ struct clk {
struct clk *parent;
struct clk *child1;
struct clk *sibling;
-#define ALWAYS_ENABLED (1 << 0)
-#define CLOCK_IN_OMAP310 (1 << 10)
-#define CLOCK_IN_OMAP730 (1 << 11)
-#define CLOCK_IN_OMAP1510 (1 << 12)
-#define CLOCK_IN_OMAP16XX (1 << 13)
-#define CLOCK_IN_OMAP242X (1 << 14)
-#define CLOCK_IN_OMAP243X (1 << 15)
-#define CLOCK_IN_OMAP343X (1 << 16)
+#define ALWAYS_ENABLED (1 << 0)
+#define CLOCK_IN_OMAP310 (1 << 10)
+#define CLOCK_IN_OMAP730 (1 << 11)
+#define CLOCK_IN_OMAP1510 (1 << 12)
+#define CLOCK_IN_OMAP16XX (1 << 13)
uint32_t flags;
int id;
- int running; /* Is currently ticking */
- int enabled; /* Is enabled, regardless of its input clk */
- unsigned long rate; /* Current rate (if .running) */
- unsigned int divisor; /* Rate relative to input (if .enabled) */
- unsigned int multiplier; /* Rate relative to input (if .enabled) */
- qemu_irq users[16]; /* Who to notify on change */
- int usecount; /* Automatically idle when unused */
+ int running; /* Is currently ticking */
+ int enabled; /* Is enabled, regardless of its input clk */
+ unsigned long rate; /* Current rate (if .running) */
+ unsigned int divisor; /* Rate relative to input (if .enabled) */
+ unsigned int multiplier; /* Rate relative to input (if .enabled) */
+ qemu_irq users[16]; /* Who to notify on change */
+ int usecount; /* Automatically idle when unused */
};
static struct clk xtal_osc12m = {
- .name = "xtal_osc_12m",
- .rate = 12000000,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+ .name = "xtal_osc_12m",
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
};
static struct clk xtal_osc32k = {
- .name = "xtal_osc_32k",
- .rate = 32768,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
- CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .name = "xtal_osc_32k",
+ .rate = 32768,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
};
static struct clk ck_ref = {
- .name = "ck_ref",
- .alias = "clkin",
- .parent = &xtal_osc12m,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ .name = "ck_ref",
+ .alias = "clkin",
+ .parent = &xtal_osc12m,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
ALWAYS_ENABLED,
};
/* If a dpll is disabled it becomes a bypass, child clocks don't stop */
static struct clk dpll1 = {
- .name = "dpll1",
- .parent = &ck_ref,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ .name = "dpll1",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
ALWAYS_ENABLED,
};
static struct clk dpll2 = {
- .name = "dpll2",
- .parent = &ck_ref,
- .flags = CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
+ .name = "dpll2",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
};
static struct clk dpll3 = {
- .name = "dpll3",
- .parent = &ck_ref,
- .flags = CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
+ .name = "dpll3",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
};
static struct clk dpll4 = {
- .name = "dpll4",
- .parent = &ck_ref,
- .multiplier = 4,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+ .name = "dpll4",
+ .parent = &ck_ref,
+ .multiplier = 4,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
};
static struct clk apll = {
- .name = "apll",
- .parent = &ck_ref,
- .multiplier = 48,
- .divisor = 12,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+ .name = "apll",
+ .parent = &ck_ref,
+ .multiplier = 48,
+ .divisor = 12,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
};
static struct clk ck_48m = {
- .name = "ck_48m",
- .parent = &dpll4, /* either dpll4 or apll */
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+ .name = "ck_48m",
+ .parent = &dpll4, /* either dpll4 or apll */
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
};
static struct clk ck_dpll1out = {
- .name = "ck_dpll1out",
- .parent = &dpll1,
- .flags = CLOCK_IN_OMAP16XX,
+ .name = "ck_dpll1out",
+ .parent = &dpll1,
+ .flags = CLOCK_IN_OMAP16XX,
};
static struct clk sossi_ck = {
- .name = "ck_sossi",
- .parent = &ck_dpll1out,
- .flags = CLOCK_IN_OMAP16XX,
+ .name = "ck_sossi",
+ .parent = &ck_dpll1out,
+ .flags = CLOCK_IN_OMAP16XX,
};
static struct clk clkm1 = {
- .name = "clkm1",
- .alias = "ck_gen1",
- .parent = &dpll1,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ .name = "clkm1",
+ .alias = "ck_gen1",
+ .parent = &dpll1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
ALWAYS_ENABLED,
};
static struct clk clkm2 = {
- .name = "clkm2",
- .alias = "ck_gen2",
- .parent = &dpll1,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ .name = "clkm2",
+ .alias = "ck_gen2",
+ .parent = &dpll1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
ALWAYS_ENABLED,
};
static struct clk clkm3 = {
- .name = "clkm3",
- .alias = "ck_gen3",
- .parent = &dpll1, /* either dpll1 or ck_ref */
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ .name = "clkm3",
+ .alias = "ck_gen3",
+ .parent = &dpll1, /* either dpll1 or ck_ref */
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
ALWAYS_ENABLED,
};
static struct clk arm_ck = {
- .name = "arm_ck",
- .alias = "mpu_ck",
- .parent = &clkm1,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ .name = "arm_ck",
+ .alias = "mpu_ck",
+ .parent = &clkm1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
ALWAYS_ENABLED,
};
static struct clk armper_ck = {
- .name = "armper_ck",
- .alias = "mpuper_ck",
- .parent = &clkm1,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+ .name = "armper_ck",
+ .alias = "mpuper_ck",
+ .parent = &clkm1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
};
static struct clk arm_gpio_ck = {
- .name = "arm_gpio_ck",
- .alias = "mpu_gpio_ck",
- .parent = &clkm1,
- .divisor = 1,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
+ .name = "arm_gpio_ck",
+ .alias = "mpu_gpio_ck",
+ .parent = &clkm1,
+ .divisor = 1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
};
static struct clk armxor_ck = {
- .name = "armxor_ck",
- .alias = "mpuxor_ck",
- .parent = &ck_ref,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+ .name = "armxor_ck",
+ .alias = "mpuxor_ck",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
};
static struct clk armtim_ck = {
- .name = "armtim_ck",
- .alias = "mputim_ck",
- .parent = &ck_ref, /* either CLKIN or DPLL1 */
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+ .name = "armtim_ck",
+ .alias = "mputim_ck",
+ .parent = &ck_ref, /* either CLKIN or DPLL1 */
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
};
static struct clk armwdt_ck = {
- .name = "armwdt_ck",
- .alias = "mpuwd_ck",
- .parent = &clkm1,
- .divisor = 14,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ .name = "armwdt_ck",
+ .alias = "mpuwd_ck",
+ .parent = &clkm1,
+ .divisor = 14,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
ALWAYS_ENABLED,
};
static struct clk arminth_ck16xx = {
- .name = "arminth_ck",
- .parent = &arm_ck,
- .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+ .name = "arminth_ck",
+ .parent = &arm_ck,
+ .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
/* Note: On 16xx the frequency can be divided by 2 by programming
* ARM_CKCTL:ARM_INTHCK_SEL(14) to 1
*
@@ -206,48 +202,48 @@ static struct clk arminth_ck16xx = {
};
static struct clk dsp_ck = {
- .name = "dsp_ck",
- .parent = &clkm2,
- .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+ .name = "dsp_ck",
+ .parent = &clkm2,
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
};
static struct clk dspmmu_ck = {
- .name = "dspmmu_ck",
- .parent = &clkm2,
- .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ .name = "dspmmu_ck",
+ .parent = &clkm2,
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
ALWAYS_ENABLED,
};
static struct clk dspper_ck = {
- .name = "dspper_ck",
- .parent = &clkm2,
- .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+ .name = "dspper_ck",
+ .parent = &clkm2,
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
};
static struct clk dspxor_ck = {
- .name = "dspxor_ck",
- .parent = &ck_ref,
- .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+ .name = "dspxor_ck",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
};
static struct clk dsptim_ck = {
- .name = "dsptim_ck",
- .parent = &ck_ref,
- .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+ .name = "dsptim_ck",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
};
static struct clk tc_ck = {
- .name = "tc_ck",
- .parent = &clkm3,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ .name = "tc_ck",
+ .parent = &clkm3,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
CLOCK_IN_OMAP730 | CLOCK_IN_OMAP310 |
ALWAYS_ENABLED,
};
static struct clk arminth_ck15xx = {
- .name = "arminth_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
+ .name = "arminth_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
/* Note: On 1510 the frequency follows TC_CK
*
* 16xx version is in MPU clocks.
@@ -256,698 +252,259 @@ static struct clk arminth_ck15xx = {
static struct clk tipb_ck = {
/* No-idle controlled by "tc_ck" */
- .name = "tipb_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
+ .name = "tipb_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
};
static struct clk l3_ocpi_ck = {
/* No-idle controlled by "tc_ck" */
- .name = "l3_ocpi_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP16XX,
+ .name = "l3_ocpi_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP16XX,
};
static struct clk tc1_ck = {
- .name = "tc1_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP16XX,
+ .name = "tc1_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP16XX,
};
static struct clk tc2_ck = {
- .name = "tc2_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP16XX,
+ .name = "tc2_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP16XX,
};
static struct clk dma_ck = {
/* No-idle controlled by "tc_ck" */
- .name = "dma_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ .name = "dma_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
ALWAYS_ENABLED,
};
static struct clk dma_lcdfree_ck = {
- .name = "dma_lcdfree_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+ .name = "dma_lcdfree_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
};
static struct clk api_ck = {
- .name = "api_ck",
- .alias = "mpui_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+ .name = "api_ck",
+ .alias = "mpui_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
};
static struct clk lb_ck = {
- .name = "lb_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
+ .name = "lb_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
};
static struct clk lbfree_ck = {
- .name = "lbfree_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
+ .name = "lbfree_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
};
static struct clk hsab_ck = {
- .name = "hsab_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
+ .name = "hsab_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
};
static struct clk rhea1_ck = {
- .name = "rhea1_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+ .name = "rhea1_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
};
static struct clk rhea2_ck = {
- .name = "rhea2_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+ .name = "rhea2_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
};
static struct clk lcd_ck_16xx = {
- .name = "lcd_ck",
- .parent = &clkm3,
- .flags = CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730,
+ .name = "lcd_ck",
+ .parent = &clkm3,
+ .flags = CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730,
};
static struct clk lcd_ck_1510 = {
- .name = "lcd_ck",
- .parent = &clkm3,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
+ .name = "lcd_ck",
+ .parent = &clkm3,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
};
static struct clk uart1_1510 = {
- .name = "uart1_ck",
+ .name = "uart1_ck",
/* Direct from ULPD, no real parent */
- .parent = &armper_ck, /* either armper_ck or dpll4 */
- .rate = 12000000,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
+ .parent = &armper_ck, /* either armper_ck or dpll4 */
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
};
static struct clk uart1_16xx = {
- .name = "uart1_ck",
+ .name = "uart1_ck",
/* Direct from ULPD, no real parent */
- .parent = &armper_ck,
- .rate = 48000000,
- .flags = CLOCK_IN_OMAP16XX,
+ .parent = &armper_ck,
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP16XX,
};
static struct clk uart2_ck = {
- .name = "uart2_ck",
+ .name = "uart2_ck",
/* Direct from ULPD, no real parent */
- .parent = &armper_ck, /* either armper_ck or dpll4 */
- .rate = 12000000,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ .parent = &armper_ck, /* either armper_ck or dpll4 */
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
ALWAYS_ENABLED,
};
static struct clk uart3_1510 = {
- .name = "uart3_ck",
+ .name = "uart3_ck",
/* Direct from ULPD, no real parent */
- .parent = &armper_ck, /* either armper_ck or dpll4 */
- .rate = 12000000,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
+ .parent = &armper_ck, /* either armper_ck or dpll4 */
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
};
static struct clk uart3_16xx = {
- .name = "uart3_ck",
+ .name = "uart3_ck",
/* Direct from ULPD, no real parent */
- .parent = &armper_ck,
- .rate = 48000000,
- .flags = CLOCK_IN_OMAP16XX,
+ .parent = &armper_ck,
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP16XX,
};
-static struct clk usb_clk0 = { /* 6 MHz output on W4_USB_CLK0 */
- .name = "usb_clk0",
- .alias = "usb.clko",
+static struct clk usb_clk0 = { /* 6 MHz output on W4_USB_CLK0 */
+ .name = "usb_clk0",
+ .alias = "usb.clko",
/* Direct from ULPD, no parent */
- .rate = 6000000,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+ .rate = 6000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
};
static struct clk usb_hhc_ck1510 = {
- .name = "usb_hhc_ck",
+ .name = "usb_hhc_ck",
/* Direct from ULPD, no parent */
- .rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
+ .rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
};
static struct clk usb_hhc_ck16xx = {
- .name = "usb_hhc_ck",
+ .name = "usb_hhc_ck",
/* Direct from ULPD, no parent */
- .rate = 48000000,
+ .rate = 48000000,
/* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */
- .flags = CLOCK_IN_OMAP16XX,
+ .flags = CLOCK_IN_OMAP16XX,
};
static struct clk usb_w2fc_mclk = {
- .name = "usb_w2fc_mclk",
- .alias = "usb_w2fc_ck",
- .parent = &ck_48m,
- .rate = 48000000,
- .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+ .name = "usb_w2fc_mclk",
+ .alias = "usb_w2fc_ck",
+ .parent = &ck_48m,
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
};
static struct clk mclk_1510 = {
- .name = "mclk",
+ .name = "mclk",
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
- .rate = 12000000,
- .flags = CLOCK_IN_OMAP1510,
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510,
};
static struct clk bclk_310 = {
- .name = "bt_mclk_out", /* Alias midi_mclk_out? */
- .parent = &armper_ck,
- .flags = CLOCK_IN_OMAP310,
+ .name = "bt_mclk_out", /* Alias midi_mclk_out? */
+ .parent = &armper_ck,
+ .flags = CLOCK_IN_OMAP310,
};
static struct clk mclk_310 = {
- .name = "com_mclk_out",
- .parent = &armper_ck,
- .flags = CLOCK_IN_OMAP310,
+ .name = "com_mclk_out",
+ .parent = &armper_ck,
+ .flags = CLOCK_IN_OMAP310,
};
static struct clk mclk_16xx = {
- .name = "mclk",
+ .name = "mclk",
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
- .flags = CLOCK_IN_OMAP16XX,
+ .flags = CLOCK_IN_OMAP16XX,
};
static struct clk bclk_1510 = {
- .name = "bclk",
+ .name = "bclk",
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
- .rate = 12000000,
- .flags = CLOCK_IN_OMAP1510,
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510,
};
static struct clk bclk_16xx = {
- .name = "bclk",
+ .name = "bclk",
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
- .flags = CLOCK_IN_OMAP16XX,
+ .flags = CLOCK_IN_OMAP16XX,
};
static struct clk mmc1_ck = {
- .name = "mmc_ck",
- .id = 1,
+ .name = "mmc_ck",
+ .id = 1,
/* Functional clock is direct from ULPD, interface clock is ARMPER */
- .parent = &armper_ck, /* either armper_ck or dpll4 */
- .rate = 48000000,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+ .parent = &armper_ck, /* either armper_ck or dpll4 */
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
};
static struct clk mmc2_ck = {
- .name = "mmc_ck",
- .id = 2,
+ .name = "mmc_ck",
+ .id = 2,
/* Functional clock is direct from ULPD, interface clock is ARMPER */
- .parent = &armper_ck,
- .rate = 48000000,
- .flags = CLOCK_IN_OMAP16XX,
+ .parent = &armper_ck,
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP16XX,
};
static struct clk cam_mclk = {
- .name = "cam.mclk",
- .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
- .rate = 12000000,
+ .name = "cam.mclk",
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+ .rate = 12000000,
};
static struct clk cam_exclk = {
- .name = "cam.exclk",
- .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+ .name = "cam.exclk",
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
/* Either 12M from cam.mclk or 48M from dpll4 */
- .parent = &cam_mclk,
+ .parent = &cam_mclk,
};
static struct clk cam_lclk = {
- .name = "cam.lclk",
- .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+ .name = "cam.lclk",
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
};
static struct clk i2c_fck = {
- .name = "i2c_fck",
- .id = 1,
- .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ .name = "i2c_fck",
+ .id = 1,
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
ALWAYS_ENABLED,
- .parent = &armxor_ck,
+ .parent = &armxor_ck,
};
static struct clk i2c_ick = {
- .name = "i2c_ick",
- .id = 1,
- .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
- .parent = &armper_ck,
+ .name = "i2c_ick",
+ .id = 1,
+ .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+ .parent = &armper_ck,
};
static struct clk clk32k = {
- .name = "clk32-kHz",
- .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
- CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
- .parent = &xtal_osc32k,
-};
-
-static struct clk ref_clk = {
- .name = "ref_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
- .rate = 12000000, /* 12 MHz or 13 MHz or 19.2 MHz */
- /*.parent = sys.xtalin */
-};
-
-static struct clk apll_96m = {
- .name = "apll_96m",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
- .rate = 96000000,
- /*.parent = ref_clk */
-};
-
-static struct clk apll_54m = {
- .name = "apll_54m",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
- .rate = 54000000,
- /*.parent = ref_clk */
-};
-
-static struct clk sys_clk = {
- .name = "sys_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
- .rate = 32768,
- /*.parent = sys.xtalin */
-};
-
-static struct clk sleep_clk = {
- .name = "sleep_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
- .rate = 32768,
- /*.parent = sys.xtalin */
-};
-
-static struct clk dpll_ck = {
- .name = "dpll",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
- .parent = &ref_clk,
-};
-
-static struct clk dpll_x2_ck = {
- .name = "dpll_x2",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
- .parent = &ref_clk,
-};
-
-static struct clk wdt1_sys_clk = {
- .name = "wdt1_sys_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
- .rate = 32768,
- /*.parent = sys.xtalin */
-};
-
-static struct clk func_96m_clk = {
- .name = "func_96m_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .divisor = 1,
- .parent = &apll_96m,
-};
-
-static struct clk func_48m_clk = {
- .name = "func_48m_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .divisor = 2,
- .parent = &apll_96m,
-};
-
-static struct clk func_12m_clk = {
- .name = "func_12m_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .divisor = 8,
- .parent = &apll_96m,
-};
-
-static struct clk func_54m_clk = {
- .name = "func_54m_clk",
- .flags = CLOCK_IN_OMAP242X,
- .divisor = 1,
- .parent = &apll_54m,
-};
-
-static struct clk sys_clkout = {
- .name = "clkout",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk sys_clkout2 = {
- .name = "clkout2",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk core_clk = {
- .name = "core_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &dpll_x2_ck, /* Switchable between dpll_ck and clk32k */
-};
-
-static struct clk l3_clk = {
- .name = "l3_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_clk,
-};
-
-static struct clk core_l4_iclk = {
- .name = "core_l4_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &l3_clk,
-};
-
-static struct clk wu_l4_iclk = {
- .name = "wu_l4_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &l3_clk,
-};
-
-static struct clk core_l3_iclk = {
- .name = "core_l3_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_clk,
-};
-
-static struct clk core_l4_usb_clk = {
- .name = "core_l4_usb_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &l3_clk,
-};
-
-static struct clk wu_gpt1_clk = {
- .name = "wu_gpt1_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk wu_32k_clk = {
- .name = "wu_32k_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk uart1_fclk = {
- .name = "uart1_fclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &func_48m_clk,
-};
-
-static struct clk uart1_iclk = {
- .name = "uart1_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_l4_iclk,
-};
-
-static struct clk uart2_fclk = {
- .name = "uart2_fclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &func_48m_clk,
-};
-
-static struct clk uart2_iclk = {
- .name = "uart2_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_l4_iclk,
-};
-
-static struct clk uart3_fclk = {
- .name = "uart3_fclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &func_48m_clk,
-};
-
-static struct clk uart3_iclk = {
- .name = "uart3_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_l4_iclk,
-};
-
-static struct clk mpu_fclk = {
- .name = "mpu_fclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_clk,
-};
-
-static struct clk mpu_iclk = {
- .name = "mpu_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_clk,
-};
-
-static struct clk int_m_fclk = {
- .name = "int_m_fclk",
- .alias = "mpu_intc_fclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_clk,
-};
-
-static struct clk int_m_iclk = {
- .name = "int_m_iclk",
- .alias = "mpu_intc_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_clk,
-};
-
-static struct clk core_gpt2_clk = {
- .name = "core_gpt2_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk core_gpt3_clk = {
- .name = "core_gpt3_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk core_gpt4_clk = {
- .name = "core_gpt4_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk core_gpt5_clk = {
- .name = "core_gpt5_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk core_gpt6_clk = {
- .name = "core_gpt6_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk core_gpt7_clk = {
- .name = "core_gpt7_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk core_gpt8_clk = {
- .name = "core_gpt8_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk core_gpt9_clk = {
- .name = "core_gpt9_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk core_gpt10_clk = {
- .name = "core_gpt10_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk core_gpt11_clk = {
- .name = "core_gpt11_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk core_gpt12_clk = {
- .name = "core_gpt12_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
-};
-
-static struct clk mcbsp1_clk = {
- .name = "mcbsp1_cg",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .divisor = 2,
- .parent = &func_96m_clk,
-};
-
-static struct clk mcbsp2_clk = {
- .name = "mcbsp2_cg",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .divisor = 2,
- .parent = &func_96m_clk,
-};
-
-static struct clk emul_clk = {
- .name = "emul_ck",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &func_54m_clk,
-};
-
-static struct clk sdma_fclk = {
- .name = "sdma_fclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &l3_clk,
-};
-
-static struct clk sdma_iclk = {
- .name = "sdma_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_l3_iclk, /* core_l4_iclk for the configuration port */
-};
-
-static struct clk i2c1_fclk = {
- .name = "i2c1.fclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &func_12m_clk,
- .divisor = 1,
-};
-
-static struct clk i2c1_iclk = {
- .name = "i2c1.iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_l4_iclk,
-};
-
-static struct clk i2c2_fclk = {
- .name = "i2c2.fclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &func_12m_clk,
- .divisor = 1,
-};
-
-static struct clk i2c2_iclk = {
- .name = "i2c2.iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_l4_iclk,
-};
-
-static struct clk gpio_dbclk[5] = {
- {
- .name = "gpio1_dbclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &wu_32k_clk,
- }, {
- .name = "gpio2_dbclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &wu_32k_clk,
- }, {
- .name = "gpio3_dbclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &wu_32k_clk,
- }, {
- .name = "gpio4_dbclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &wu_32k_clk,
- }, {
- .name = "gpio5_dbclk",
- .flags = CLOCK_IN_OMAP243X,
- .parent = &wu_32k_clk,
- },
-};
-
-static struct clk gpio_iclk = {
- .name = "gpio_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &wu_l4_iclk,
-};
-
-static struct clk mmc_fck = {
- .name = "mmc_fclk",
- .flags = CLOCK_IN_OMAP242X,
- .parent = &func_96m_clk,
-};
-
-static struct clk mmc_ick = {
- .name = "mmc_iclk",
- .flags = CLOCK_IN_OMAP242X,
- .parent = &core_l4_iclk,
-};
-
-static struct clk spi_fclk[3] = {
- {
- .name = "spi1_fclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &func_48m_clk,
- }, {
- .name = "spi2_fclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &func_48m_clk,
- }, {
- .name = "spi3_fclk",
- .flags = CLOCK_IN_OMAP243X,
- .parent = &func_48m_clk,
- },
-};
-
-static struct clk dss_clk[2] = {
- {
- .name = "dss_clk1",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_clk,
- }, {
- .name = "dss_clk2",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &sys_clk,
- },
-};
-
-static struct clk dss_54m_clk = {
- .name = "dss_54m_clk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &func_54m_clk,
-};
-
-static struct clk dss_l3_iclk = {
- .name = "dss_l3_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_l3_iclk,
-};
-
-static struct clk dss_l4_iclk = {
- .name = "dss_l4_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_l4_iclk,
-};
-
-static struct clk spi_iclk[3] = {
- {
- .name = "spi1_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_l4_iclk,
- }, {
- .name = "spi2_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- .parent = &core_l4_iclk,
- }, {
- .name = "spi3_iclk",
- .flags = CLOCK_IN_OMAP243X,
- .parent = &core_l4_iclk,
- },
-};
-
-static struct clk omapctrl_clk = {
- .name = "omapctrl_iclk",
- .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
- /* XXX Should be in WKUP domain */
- .parent = &core_l4_iclk,
+ .name = "clk32-kHz",
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ ALWAYS_ENABLED,
+ .parent = &xtal_osc32k,
};
static struct clk *onchip_clks[] = {
@@ -1019,80 +576,6 @@ static struct clk *onchip_clks[] = {
&i2c_fck,
&i2c_ick,
- /* OMAP 2 */
-
- &ref_clk,
- &apll_96m,
- &apll_54m,
- &sys_clk,
- &sleep_clk,
- &dpll_ck,
- &dpll_x2_ck,
- &wdt1_sys_clk,
- &func_96m_clk,
- &func_48m_clk,
- &func_12m_clk,
- &func_54m_clk,
- &sys_clkout,
- &sys_clkout2,
- &core_clk,
- &l3_clk,
- &core_l4_iclk,
- &wu_l4_iclk,
- &core_l3_iclk,
- &core_l4_usb_clk,
- &wu_gpt1_clk,
- &wu_32k_clk,
- &uart1_fclk,
- &uart1_iclk,
- &uart2_fclk,
- &uart2_iclk,
- &uart3_fclk,
- &uart3_iclk,
- &mpu_fclk,
- &mpu_iclk,
- &int_m_fclk,
- &int_m_iclk,
- &core_gpt2_clk,
- &core_gpt3_clk,
- &core_gpt4_clk,
- &core_gpt5_clk,
- &core_gpt6_clk,
- &core_gpt7_clk,
- &core_gpt8_clk,
- &core_gpt9_clk,
- &core_gpt10_clk,
- &core_gpt11_clk,
- &core_gpt12_clk,
- &mcbsp1_clk,
- &mcbsp2_clk,
- &emul_clk,
- &sdma_fclk,
- &sdma_iclk,
- &i2c1_fclk,
- &i2c1_iclk,
- &i2c2_fclk,
- &i2c2_iclk,
- &gpio_dbclk[0],
- &gpio_dbclk[1],
- &gpio_dbclk[2],
- &gpio_dbclk[3],
- &gpio_iclk,
- &mmc_fck,
- &mmc_ick,
- &spi_fclk[0],
- &spi_iclk[0],
- &spi_fclk[1],
- &spi_iclk[1],
- &spi_fclk[2],
- &spi_iclk[2],
- &dss_clk[0],
- &dss_clk[1],
- &dss_54m_clk,
- &dss_l3_iclk,
- &dss_l4_iclk,
- &omapctrl_clk,
-
NULL
};
@@ -1230,12 +713,6 @@ void omap_clk_init(struct omap_mpu_state_s *mpu)
flag = CLOCK_IN_OMAP310;
else if (cpu_is_omap1510(mpu))
flag = CLOCK_IN_OMAP1510;
- else if (cpu_is_omap2410(mpu) || cpu_is_omap2420(mpu))
- flag = CLOCK_IN_OMAP242X;
- else if (cpu_is_omap2430(mpu))
- flag = CLOCK_IN_OMAP243X;
- else if (cpu_is_omap3430(mpu))
- flag = CLOCK_IN_OMAP243X;
else
return;
diff --git a/hw/misc/omap_gpmc.c b/hw/misc/omap_gpmc.c
deleted file mode 100644
index 67158eb..0000000
--- a/hw/misc/omap_gpmc.c
+++ /dev/null
@@ -1,898 +0,0 @@
-/*
- * TI OMAP general purpose memory controller emulation.
- *
- * Copyright (C) 2007-2009 Nokia Corporation
- * Original code written by Andrzej Zaborowski <andrew@openedhand.com>
- * Enhancements for OMAP3 and NAND support written by Juha RiihimƤki
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) any later version of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "hw/irq.h"
-#include "hw/block/flash.h"
-#include "hw/arm/omap.h"
-#include "exec/memory.h"
-#include "exec/address-spaces.h"
-
-/* General-Purpose Memory Controller */
-struct omap_gpmc_s {
- qemu_irq irq;
- qemu_irq drq;
- MemoryRegion iomem;
- int accept_256;
-
- uint8_t revision;
- uint8_t sysconfig;
- uint16_t irqst;
- uint16_t irqen;
- uint16_t lastirq;
- uint16_t timeout;
- uint16_t config;
- struct omap_gpmc_cs_file_s {
- uint32_t config[7];
- MemoryRegion *iomem;
- MemoryRegion container;
- MemoryRegion nandiomem;
- DeviceState *dev;
- } cs_file[8];
- int ecc_cs;
- int ecc_ptr;
- uint32_t ecc_cfg;
- ECCState ecc[9];
- struct prefetch {
- uint32_t config1; /* GPMC_PREFETCH_CONFIG1 */
- uint32_t transfercount; /* GPMC_PREFETCH_CONFIG2:TRANSFERCOUNT */
- int startengine; /* GPMC_PREFETCH_CONTROL:STARTENGINE */
- int fifopointer; /* GPMC_PREFETCH_STATUS:FIFOPOINTER */
- int count; /* GPMC_PREFETCH_STATUS:COUNTVALUE */
- MemoryRegion iomem;
- uint8_t fifo[64];
- } prefetch;
-};
-
-#define OMAP_GPMC_8BIT 0
-#define OMAP_GPMC_16BIT 1
-#define OMAP_GPMC_NOR 0
-#define OMAP_GPMC_NAND 2
-
-static int omap_gpmc_devtype(struct omap_gpmc_cs_file_s *f)
-{
- return (f->config[0] >> 10) & 3;
-}
-
-static int omap_gpmc_devsize(struct omap_gpmc_cs_file_s *f)
-{
- /* devsize field is really 2 bits but we ignore the high
- * bit to ensure consistent behaviour if the guest sets
- * it (values 2 and 3 are reserved in the TRM)
- */
- return (f->config[0] >> 12) & 1;
-}
-
-/* Extract the chip-select value from the prefetch config1 register */
-static int prefetch_cs(uint32_t config1)
-{
- return (config1 >> 24) & 7;
-}
-
-static int prefetch_threshold(uint32_t config1)
-{
- return (config1 >> 8) & 0x7f;
-}
-
-static void omap_gpmc_int_update(struct omap_gpmc_s *s)
-{
- /* The TRM is a bit unclear, but it seems to say that
- * the TERMINALCOUNTSTATUS bit is set only on the
- * transition when the prefetch engine goes from
- * active to inactive, whereas the FIFOEVENTSTATUS
- * bit is held high as long as the fifo has at
- * least THRESHOLD bytes available.
- * So we do the latter here, but TERMINALCOUNTSTATUS
- * is set elsewhere.
- */
- if (s->prefetch.fifopointer >= prefetch_threshold(s->prefetch.config1)) {
- s->irqst |= 1;
- }
- if ((s->irqen & s->irqst) != s->lastirq) {
- s->lastirq = s->irqen & s->irqst;
- qemu_set_irq(s->irq, s->lastirq);
- }
-}
-
-static void omap_gpmc_dma_update(struct omap_gpmc_s *s, int value)
-{
- if (s->prefetch.config1 & 4) {
- qemu_set_irq(s->drq, value);
- }
-}
-
-/* Access functions for when a NAND-like device is mapped into memory:
- * all addresses in the region behave like accesses to the relevant
- * GPMC_NAND_DATA_i register (which is actually implemented to call these)
- */
-static uint64_t omap_nand_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- struct omap_gpmc_cs_file_s *f = opaque;
- uint64_t v;
- nand_setpins(f->dev, 0, 0, 0, 1, 0);
- switch (omap_gpmc_devsize(f)) {
- case OMAP_GPMC_8BIT:
- v = nand_getio(f->dev);
- if (size == 1) {
- return v;
- }
- v |= (nand_getio(f->dev) << 8);
- if (size == 2) {
- return v;
- }
- v |= (nand_getio(f->dev) << 16);
- v |= (nand_getio(f->dev) << 24);
- return v;
- case OMAP_GPMC_16BIT:
- v = nand_getio(f->dev);
- if (size == 1) {
- /* 8 bit read from 16 bit device : probably a guest bug */
- return v & 0xff;
- }
- if (size == 2) {
- return v;
- }
- v |= (nand_getio(f->dev) << 16);
- return v;
- default:
- abort();
- }
-}
-
-static void omap_nand_setio(DeviceState *dev, uint64_t value,
- int nandsize, int size)
-{
- /* Write the specified value to the NAND device, respecting
- * both size of the NAND device and size of the write access.
- */
- switch (nandsize) {
- case OMAP_GPMC_8BIT:
- switch (size) {
- case 1:
- nand_setio(dev, value & 0xff);
- break;
- case 2:
- nand_setio(dev, value & 0xff);
- nand_setio(dev, (value >> 8) & 0xff);
- break;
- case 4:
- default:
- nand_setio(dev, value & 0xff);
- nand_setio(dev, (value >> 8) & 0xff);
- nand_setio(dev, (value >> 16) & 0xff);
- nand_setio(dev, (value >> 24) & 0xff);
- break;
- }
- break;
- case OMAP_GPMC_16BIT:
- switch (size) {
- case 1:
- /* writing to a 16bit device with 8bit access is probably a guest
- * bug; pass the value through anyway.
- */
- case 2:
- nand_setio(dev, value & 0xffff);
- break;
- case 4:
- default:
- nand_setio(dev, value & 0xffff);
- nand_setio(dev, (value >> 16) & 0xffff);
- break;
- }
- break;
- }
-}
-
-static void omap_nand_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_gpmc_cs_file_s *f = opaque;
- nand_setpins(f->dev, 0, 0, 0, 1, 0);
- omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
-}
-
-static const MemoryRegionOps omap_nand_ops = {
- .read = omap_nand_read,
- .write = omap_nand_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void fill_prefetch_fifo(struct omap_gpmc_s *s)
-{
- /* Fill the prefetch FIFO by reading data from NAND.
- * We do this synchronously, unlike the hardware which
- * will do this asynchronously. We refill when the
- * FIFO has THRESHOLD bytes free, and we always refill
- * as much data as possible starting at the top end
- * of the FIFO.
- * (We have to refill at THRESHOLD rather than waiting
- * for the FIFO to empty to allow for the case where
- * the FIFO size isn't an exact multiple of THRESHOLD
- * and we're doing DMA transfers.)
- * This means we never need to handle wrap-around in
- * the fifo-reading code, and the next byte of data
- * to read is always fifo[63 - fifopointer].
- */
- int fptr;
- int cs = prefetch_cs(s->prefetch.config1);
- int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0);
- int bytes;
- /* Don't believe the bit of the OMAP TRM that says that COUNTVALUE
- * and TRANSFERCOUNT are in units of 16 bit words for 16 bit NAND.
- * Instead believe the bit that says it is always a byte count.
- */
- bytes = 64 - s->prefetch.fifopointer;
- if (bytes > s->prefetch.count) {
- bytes = s->prefetch.count;
- }
- if (is16bit) {
- bytes &= ~1;
- }
-
- s->prefetch.count -= bytes;
- s->prefetch.fifopointer += bytes;
- fptr = 64 - s->prefetch.fifopointer;
- /* Move the existing data in the FIFO so it sits just
- * before what we're about to read in
- */
- while (fptr < (64 - bytes)) {
- s->prefetch.fifo[fptr] = s->prefetch.fifo[fptr + bytes];
- fptr++;
- }
- while (fptr < 64) {
- if (is16bit) {
- uint32_t v = omap_nand_read(&s->cs_file[cs], 0, 2);
- s->prefetch.fifo[fptr++] = v & 0xff;
- s->prefetch.fifo[fptr++] = (v >> 8) & 0xff;
- } else {
- s->prefetch.fifo[fptr++] = omap_nand_read(&s->cs_file[cs], 0, 1);
- }
- }
- if (s->prefetch.startengine && (s->prefetch.count == 0)) {
- /* This was the final transfer: raise TERMINALCOUNTSTATUS */
- s->irqst |= 2;
- s->prefetch.startengine = 0;
- }
- /* If there are any bytes in the FIFO at this point then
- * we must raise a DMA request (either this is a final part
- * transfer, or we filled the FIFO in which case we certainly
- * have THRESHOLD bytes available)
- */
- if (s->prefetch.fifopointer != 0) {
- omap_gpmc_dma_update(s, 1);
- }
- omap_gpmc_int_update(s);
-}
-
-/* Access functions for a NAND-like device when the prefetch/postwrite
- * engine is enabled -- all addresses in the region behave alike:
- * data is read or written to the FIFO.
- */
-static uint64_t omap_gpmc_prefetch_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- struct omap_gpmc_s *s = opaque;
- uint32_t data;
- if (s->prefetch.config1 & 1) {
- /* The TRM doesn't define the behaviour if you read from the
- * FIFO when the prefetch engine is in write mode. We choose
- * to always return zero.
- */
- return 0;
- }
- /* Note that trying to read an empty fifo repeats the last byte */
- if (s->prefetch.fifopointer) {
- s->prefetch.fifopointer--;
- }
- data = s->prefetch.fifo[63 - s->prefetch.fifopointer];
- if (s->prefetch.fifopointer ==
- (64 - prefetch_threshold(s->prefetch.config1))) {
- /* We've drained THRESHOLD bytes now. So deassert the
- * DMA request, then refill the FIFO (which will probably
- * assert it again.)
- */
- omap_gpmc_dma_update(s, 0);
- fill_prefetch_fifo(s);
- }
- omap_gpmc_int_update(s);
- return data;
-}
-
-static void omap_gpmc_prefetch_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_gpmc_s *s = opaque;
- int cs = prefetch_cs(s->prefetch.config1);
- if ((s->prefetch.config1 & 1) == 0) {
- /* The TRM doesn't define the behaviour of writing to the
- * FIFO when the prefetch engine is in read mode. We
- * choose to ignore the write.
- */
- return;
- }
- if (s->prefetch.count == 0) {
- /* The TRM doesn't define the behaviour of writing to the
- * FIFO if the transfer is complete. We choose to ignore.
- */
- return;
- }
- /* The only reason we do any data buffering in postwrite
- * mode is if we are talking to a 16 bit NAND device, in
- * which case we need to buffer the first byte of the
- * 16 bit word until the other byte arrives.
- */
- int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0);
- if (is16bit) {
- /* fifopointer alternates between 64 (waiting for first
- * byte of word) and 63 (waiting for second byte)
- */
- if (s->prefetch.fifopointer == 64) {
- s->prefetch.fifo[0] = value;
- s->prefetch.fifopointer--;
- } else {
- value = (value << 8) | s->prefetch.fifo[0];
- omap_nand_write(&s->cs_file[cs], 0, value, 2);
- s->prefetch.count--;
- s->prefetch.fifopointer = 64;
- }
- } else {
- /* Just write the byte : fifopointer remains 64 at all times */
- omap_nand_write(&s->cs_file[cs], 0, value, 1);
- s->prefetch.count--;
- }
- if (s->prefetch.count == 0) {
- /* Final transfer: raise TERMINALCOUNTSTATUS */
- s->irqst |= 2;
- s->prefetch.startengine = 0;
- }
- omap_gpmc_int_update(s);
-}
-
-static const MemoryRegionOps omap_prefetch_ops = {
- .read = omap_gpmc_prefetch_read,
- .write = omap_gpmc_prefetch_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .impl.min_access_size = 1,
- .impl.max_access_size = 1,
-};
-
-static MemoryRegion *omap_gpmc_cs_memregion(struct omap_gpmc_s *s, int cs)
-{
- /* Return the MemoryRegion* to map/unmap for this chipselect */
- struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
- if (omap_gpmc_devtype(f) == OMAP_GPMC_NOR) {
- return f->iomem;
- }
- if ((s->prefetch.config1 & 0x80) &&
- (prefetch_cs(s->prefetch.config1) == cs)) {
- /* The prefetch engine is enabled for this CS: map the FIFO */
- return &s->prefetch.iomem;
- }
- return &f->nandiomem;
-}
-
-static void omap_gpmc_cs_map(struct omap_gpmc_s *s, int cs)
-{
- struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
- uint32_t mask = (f->config[6] >> 8) & 0xf;
- uint32_t base = f->config[6] & 0x3f;
- uint32_t size;
-
- if (!f->iomem && !f->dev) {
- return;
- }
-
- if (!(f->config[6] & (1 << 6))) {
- /* Do nothing unless CSVALID */
- return;
- }
-
- /* TODO: check for overlapping regions and report access errors */
- if (mask != 0x8 && mask != 0xc && mask != 0xe && mask != 0xf
- && !(s->accept_256 && !mask)) {
- fprintf(stderr, "%s: invalid chip-select mask address (0x%x)\n",
- __func__, mask);
- }
-
- base <<= 24;
- size = (0x0fffffff & ~(mask << 24)) + 1;
- /* TODO: rather than setting the size of the mapping (which should be
- * constant), the mask should cause wrapping of the address space, so
- * that the same memory becomes accessible at every <i>size</i> bytes
- * starting from <i>base</i>. */
- memory_region_init(&f->container, NULL, "omap-gpmc-file", size);
- memory_region_add_subregion(&f->container, 0,
- omap_gpmc_cs_memregion(s, cs));
- memory_region_add_subregion(get_system_memory(), base,
- &f->container);
-}
-
-static void omap_gpmc_cs_unmap(struct omap_gpmc_s *s, int cs)
-{
- struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
- if (!(f->config[6] & (1 << 6))) {
- /* Do nothing unless CSVALID */
- return;
- }
- if (!f->iomem && !f->dev) {
- return;
- }
- memory_region_del_subregion(get_system_memory(), &f->container);
- memory_region_del_subregion(&f->container, omap_gpmc_cs_memregion(s, cs));
- object_unparent(OBJECT(&f->container));
-}
-
-void omap_gpmc_reset(struct omap_gpmc_s *s)
-{
- int i;
-
- s->sysconfig = 0;
- s->irqst = 0;
- s->irqen = 0;
- omap_gpmc_int_update(s);
- for (i = 0; i < 8; i++) {
- /* This has to happen before we change any of the config
- * used to determine which memory regions are mapped or unmapped.
- */
- omap_gpmc_cs_unmap(s, i);
- }
- s->timeout = 0;
- s->config = 0xa00;
- s->prefetch.config1 = 0x00004000;
- s->prefetch.transfercount = 0x00000000;
- s->prefetch.startengine = 0;
- s->prefetch.fifopointer = 0;
- s->prefetch.count = 0;
- for (i = 0; i < 8; i ++) {
- s->cs_file[i].config[1] = 0x101001;
- s->cs_file[i].config[2] = 0x020201;
- s->cs_file[i].config[3] = 0x10031003;
- s->cs_file[i].config[4] = 0x10f1111;
- s->cs_file[i].config[5] = 0;
- s->cs_file[i].config[6] = 0xf00;
- /* In theory we could probe attached devices for some CFG1
- * bits here, but we just retain them across resets as they
- * were set initially by omap_gpmc_attach().
- */
- if (i == 0) {
- s->cs_file[i].config[0] &= 0x00433e00;
- s->cs_file[i].config[6] |= 1 << 6; /* CSVALID */
- omap_gpmc_cs_map(s, i);
- } else {
- s->cs_file[i].config[0] &= 0x00403c00;
- }
- }
- s->ecc_cs = 0;
- s->ecc_ptr = 0;
- s->ecc_cfg = 0x3fcff000;
- for (i = 0; i < 9; i ++)
- ecc_reset(&s->ecc[i]);
-}
-
-static int gpmc_wordaccess_only(hwaddr addr)
-{
- /* Return true if the register offset is to a register that
- * only permits word width accesses.
- * Non-word accesses are only OK for GPMC_NAND_DATA/ADDRESS/COMMAND
- * for any chipselect.
- */
- if (addr >= 0x60 && addr <= 0x1d4) {
- int cs = (addr - 0x60) / 0x30;
- addr -= cs * 0x30;
- if (addr >= 0x7c && addr < 0x88) {
- /* GPMC_NAND_COMMAND, GPMC_NAND_ADDRESS, GPMC_NAND_DATA */
- return 0;
- }
- }
- return 1;
-}
-
-static uint64_t omap_gpmc_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- struct omap_gpmc_s *s = opaque;
- int cs;
- struct omap_gpmc_cs_file_s *f;
-
- if (size != 4 && gpmc_wordaccess_only(addr)) {
- return omap_badwidth_read32(opaque, addr);
- }
-
- switch (addr) {
- case 0x000: /* GPMC_REVISION */
- return s->revision;
-
- case 0x010: /* GPMC_SYSCONFIG */
- return s->sysconfig;
-
- case 0x014: /* GPMC_SYSSTATUS */
- return 1; /* RESETDONE */
-
- case 0x018: /* GPMC_IRQSTATUS */
- return s->irqst;
-
- case 0x01c: /* GPMC_IRQENABLE */
- return s->irqen;
-
- case 0x040: /* GPMC_TIMEOUT_CONTROL */
- return s->timeout;
-
- case 0x044: /* GPMC_ERR_ADDRESS */
- case 0x048: /* GPMC_ERR_TYPE */
- return 0;
-
- case 0x050: /* GPMC_CONFIG */
- return s->config;
-
- case 0x054: /* GPMC_STATUS */
- return 0x001;
-
- case 0x060 ... 0x1d4:
- cs = (addr - 0x060) / 0x30;
- addr -= cs * 0x30;
- f = s->cs_file + cs;
- switch (addr) {
- case 0x60: /* GPMC_CONFIG1 */
- return f->config[0];
- case 0x64: /* GPMC_CONFIG2 */
- return f->config[1];
- case 0x68: /* GPMC_CONFIG3 */
- return f->config[2];
- case 0x6c: /* GPMC_CONFIG4 */
- return f->config[3];
- case 0x70: /* GPMC_CONFIG5 */
- return f->config[4];
- case 0x74: /* GPMC_CONFIG6 */
- return f->config[5];
- case 0x78: /* GPMC_CONFIG7 */
- return f->config[6];
- case 0x84 ... 0x87: /* GPMC_NAND_DATA */
- if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
- return omap_nand_read(f, 0, size);
- }
- return 0;
- }
- break;
-
- case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */
- return s->prefetch.config1;
- case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */
- return s->prefetch.transfercount;
- case 0x1ec: /* GPMC_PREFETCH_CONTROL */
- return s->prefetch.startengine;
- case 0x1f0: /* GPMC_PREFETCH_STATUS */
- /* NB: The OMAP3 TRM is inconsistent about whether the GPMC
- * FIFOTHRESHOLDSTATUS bit should be set when
- * FIFOPOINTER > FIFOTHRESHOLD or when it is >= FIFOTHRESHOLD.
- * Apparently the underlying functional spec from which the TRM was
- * created states that the behaviour is ">=", and this also
- * makes more conceptual sense.
- */
- return (s->prefetch.fifopointer << 24) |
- ((s->prefetch.fifopointer >=
- ((s->prefetch.config1 >> 8) & 0x7f) ? 1 : 0) << 16) |
- s->prefetch.count;
-
- case 0x1f4: /* GPMC_ECC_CONFIG */
- return s->ecc_cs;
- case 0x1f8: /* GPMC_ECC_CONTROL */
- return s->ecc_ptr;
- case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */
- return s->ecc_cfg;
- case 0x200 ... 0x220: /* GPMC_ECC_RESULT */
- cs = (addr & 0x1f) >> 2;
- /* TODO: check correctness */
- return
- ((s->ecc[cs].cp & 0x07) << 0) |
- ((s->ecc[cs].cp & 0x38) << 13) |
- ((s->ecc[cs].lp[0] & 0x1ff) << 3) |
- ((s->ecc[cs].lp[1] & 0x1ff) << 19);
-
- case 0x230: /* GPMC_TESTMODE_CTRL */
- return 0;
- case 0x234: /* GPMC_PSA_LSB */
- case 0x238: /* GPMC_PSA_MSB */
- return 0x00000000;
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_gpmc_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_gpmc_s *s = opaque;
- int cs;
- struct omap_gpmc_cs_file_s *f;
-
- if (size != 4 && gpmc_wordaccess_only(addr)) {
- omap_badwidth_write32(opaque, addr, value);
- return;
- }
-
- switch (addr) {
- case 0x000: /* GPMC_REVISION */
- case 0x014: /* GPMC_SYSSTATUS */
- case 0x054: /* GPMC_STATUS */
- case 0x1f0: /* GPMC_PREFETCH_STATUS */
- case 0x200 ... 0x220: /* GPMC_ECC_RESULT */
- case 0x234: /* GPMC_PSA_LSB */
- case 0x238: /* GPMC_PSA_MSB */
- OMAP_RO_REG(addr);
- break;
-
- case 0x010: /* GPMC_SYSCONFIG */
- if ((value >> 3) == 0x3)
- fprintf(stderr, "%s: bad SDRAM idle mode %"PRIi64"\n",
- __func__, value >> 3);
- if (value & 2)
- omap_gpmc_reset(s);
- s->sysconfig = value & 0x19;
- break;
-
- case 0x018: /* GPMC_IRQSTATUS */
- s->irqst &= ~value;
- omap_gpmc_int_update(s);
- break;
-
- case 0x01c: /* GPMC_IRQENABLE */
- s->irqen = value & 0xf03;
- omap_gpmc_int_update(s);
- break;
-
- case 0x040: /* GPMC_TIMEOUT_CONTROL */
- s->timeout = value & 0x1ff1;
- break;
-
- case 0x044: /* GPMC_ERR_ADDRESS */
- case 0x048: /* GPMC_ERR_TYPE */
- break;
-
- case 0x050: /* GPMC_CONFIG */
- s->config = value & 0xf13;
- break;
-
- case 0x060 ... 0x1d4:
- cs = (addr - 0x060) / 0x30;
- addr -= cs * 0x30;
- f = s->cs_file + cs;
- switch (addr) {
- case 0x60: /* GPMC_CONFIG1 */
- f->config[0] = value & 0xffef3e13;
- break;
- case 0x64: /* GPMC_CONFIG2 */
- f->config[1] = value & 0x001f1f8f;
- break;
- case 0x68: /* GPMC_CONFIG3 */
- f->config[2] = value & 0x001f1f8f;
- break;
- case 0x6c: /* GPMC_CONFIG4 */
- f->config[3] = value & 0x1f8f1f8f;
- break;
- case 0x70: /* GPMC_CONFIG5 */
- f->config[4] = value & 0x0f1f1f1f;
- break;
- case 0x74: /* GPMC_CONFIG6 */
- f->config[5] = value & 0x00000fcf;
- break;
- case 0x78: /* GPMC_CONFIG7 */
- if ((f->config[6] ^ value) & 0xf7f) {
- omap_gpmc_cs_unmap(s, cs);
- f->config[6] = value & 0x00000f7f;
- omap_gpmc_cs_map(s, cs);
- }
- break;
- case 0x7c ... 0x7f: /* GPMC_NAND_COMMAND */
- if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
- nand_setpins(f->dev, 1, 0, 0, 1, 0); /* CLE */
- omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
- }
- break;
- case 0x80 ... 0x83: /* GPMC_NAND_ADDRESS */
- if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
- nand_setpins(f->dev, 0, 1, 0, 1, 0); /* ALE */
- omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
- }
- break;
- case 0x84 ... 0x87: /* GPMC_NAND_DATA */
- if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
- omap_nand_write(f, 0, value, size);
- }
- break;
- default:
- goto bad_reg;
- }
- break;
-
- case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */
- if (!s->prefetch.startengine) {
- uint32_t newconfig1 = value & 0x7f8f7fbf;
- uint32_t changed;
- changed = newconfig1 ^ s->prefetch.config1;
- if (changed & (0x80 | 0x7000000)) {
- /* Turning the engine on or off, or mapping it somewhere else.
- * cs_map() and cs_unmap() check the prefetch config and
- * overall CSVALID bits, so it is sufficient to unmap-and-map
- * both the old cs and the new one. Note that we adhere to
- * the "unmap/change config/map" order (and not unmap twice
- * if newcs == oldcs), otherwise we'll try to delete the wrong
- * memory region.
- */
- int oldcs = prefetch_cs(s->prefetch.config1);
- int newcs = prefetch_cs(newconfig1);
- omap_gpmc_cs_unmap(s, oldcs);
- if (oldcs != newcs) {
- omap_gpmc_cs_unmap(s, newcs);
- }
- s->prefetch.config1 = newconfig1;
- omap_gpmc_cs_map(s, oldcs);
- if (oldcs != newcs) {
- omap_gpmc_cs_map(s, newcs);
- }
- } else {
- s->prefetch.config1 = newconfig1;
- }
- }
- break;
-
- case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */
- if (!s->prefetch.startengine) {
- s->prefetch.transfercount = value & 0x3fff;
- }
- break;
-
- case 0x1ec: /* GPMC_PREFETCH_CONTROL */
- if (s->prefetch.startengine != (value & 1)) {
- s->prefetch.startengine = value & 1;
- if (s->prefetch.startengine) {
- /* Prefetch engine start */
- s->prefetch.count = s->prefetch.transfercount;
- if (s->prefetch.config1 & 1) {
- /* Write */
- s->prefetch.fifopointer = 64;
- } else {
- /* Read */
- s->prefetch.fifopointer = 0;
- fill_prefetch_fifo(s);
- }
- } else {
- /* Prefetch engine forcibly stopped. The TRM
- * doesn't define the behaviour if you do this.
- * We clear the prefetch count, which means that
- * we permit no more writes, and don't read any
- * more data from NAND. The CPU can still drain
- * the FIFO of unread data.
- */
- s->prefetch.count = 0;
- }
- omap_gpmc_int_update(s);
- }
- break;
-
- case 0x1f4: /* GPMC_ECC_CONFIG */
- s->ecc_cs = 0x8f;
- break;
- case 0x1f8: /* GPMC_ECC_CONTROL */
- if (value & (1 << 8))
- for (cs = 0; cs < 9; cs ++)
- ecc_reset(&s->ecc[cs]);
- s->ecc_ptr = value & 0xf;
- if (s->ecc_ptr == 0 || s->ecc_ptr > 9) {
- s->ecc_ptr = 0;
- s->ecc_cs &= ~1;
- }
- break;
- case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */
- s->ecc_cfg = value & 0x3fcff1ff;
- break;
- case 0x230: /* GPMC_TESTMODE_CTRL */
- if (value & 7)
- fprintf(stderr, "%s: test mode enable attempt\n", __func__);
- break;
-
- default:
- bad_reg:
- OMAP_BAD_REG(addr);
- return;
- }
-}
-
-static const MemoryRegionOps omap_gpmc_ops = {
- .read = omap_gpmc_read,
- .write = omap_gpmc_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-struct omap_gpmc_s *omap_gpmc_init(struct omap_mpu_state_s *mpu,
- hwaddr base,
- qemu_irq irq, qemu_irq drq)
-{
- int cs;
- struct omap_gpmc_s *s = g_new0(struct omap_gpmc_s, 1);
-
- memory_region_init_io(&s->iomem, NULL, &omap_gpmc_ops, s, "omap-gpmc", 0x1000);
- memory_region_add_subregion(get_system_memory(), base, &s->iomem);
-
- s->irq = irq;
- s->drq = drq;
- s->accept_256 = cpu_is_omap3630(mpu);
- s->revision = cpu_class_omap3(mpu) ? 0x50 : 0x20;
- s->lastirq = 0;
- omap_gpmc_reset(s);
-
- /* We have to register a different IO memory handler for each
- * chip select region in case a NAND device is mapped there. We
- * make the region the worst-case size of 256MB and rely on the
- * container memory region in cs_map to chop it down to the actual
- * guest-requested size.
- */
- for (cs = 0; cs < 8; cs++) {
- memory_region_init_io(&s->cs_file[cs].nandiomem, NULL,
- &omap_nand_ops,
- &s->cs_file[cs],
- "omap-nand",
- 256 * 1024 * 1024);
- }
-
- memory_region_init_io(&s->prefetch.iomem, NULL, &omap_prefetch_ops, s,
- "omap-gpmc-prefetch", 256 * 1024 * 1024);
- return s;
-}
-
-void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem)
-{
- struct omap_gpmc_cs_file_s *f;
- assert(iomem);
-
- if (cs < 0 || cs >= 8) {
- fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs);
- exit(-1);
- }
- f = &s->cs_file[cs];
-
- omap_gpmc_cs_unmap(s, cs);
- f->config[0] &= ~(0xf << 10);
- f->iomem = iomem;
- omap_gpmc_cs_map(s, cs);
-}
-
-void omap_gpmc_attach_nand(struct omap_gpmc_s *s, int cs, DeviceState *nand)
-{
- struct omap_gpmc_cs_file_s *f;
- assert(nand);
-
- if (cs < 0 || cs >= 8) {
- fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs);
- exit(-1);
- }
- f = &s->cs_file[cs];
-
- omap_gpmc_cs_unmap(s, cs);
- f->config[0] &= ~(0xf << 10);
- f->config[0] |= (OMAP_GPMC_NAND << 10);
- f->dev = nand;
- if (nand_getbuswidth(f->dev) == 16) {
- f->config[0] |= OMAP_GPMC_16BIT << 12;
- }
- omap_gpmc_cs_map(s, cs);
-}
diff --git a/hw/misc/omap_l4.c b/hw/misc/omap_l4.c
deleted file mode 100644
index b787548..0000000
--- a/hw/misc/omap_l4.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * TI OMAP L4 interconnect emulation.
- *
- * Copyright (C) 2007-2009 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) any later version of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "qemu/osdep.h"
-#include "hw/arm/omap.h"
-
-struct omap_l4_s {
- MemoryRegion *address_space;
- hwaddr base;
- int ta_num;
- struct omap_target_agent_s ta[];
-};
-
-struct omap_l4_s *omap_l4_init(MemoryRegion *address_space,
- hwaddr base, int ta_num)
-{
- struct omap_l4_s *bus = g_malloc0(
- sizeof(*bus) + ta_num * sizeof(*bus->ta));
-
- bus->address_space = address_space;
- bus->ta_num = ta_num;
- bus->base = base;
-
- return bus;
-}
-
-hwaddr omap_l4_region_base(struct omap_target_agent_s *ta,
- int region)
-{
- return ta->bus->base + ta->start[region].offset;
-}
-
-hwaddr omap_l4_region_size(struct omap_target_agent_s *ta,
- int region)
-{
- return ta->start[region].size;
-}
-
-static uint64_t omap_l4ta_read(void *opaque, hwaddr addr, unsigned size)
-{
- struct omap_target_agent_s *s = opaque;
-
- if (size != 2) {
- return omap_badwidth_read16(opaque, addr);
- }
-
- switch (addr) {
- case 0x00: /* COMPONENT */
- return s->component;
-
- case 0x20: /* AGENT_CONTROL */
- return s->control;
-
- case 0x28: /* AGENT_STATUS */
- return s->status;
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_l4ta_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_target_agent_s *s = opaque;
-
- if (size != 4) {
- omap_badwidth_write32(opaque, addr, value);
- return;
- }
-
- switch (addr) {
- case 0x00: /* COMPONENT */
- case 0x28: /* AGENT_STATUS */
- OMAP_RO_REG(addr);
- break;
-
- case 0x20: /* AGENT_CONTROL */
- s->control = value & 0x01000700;
- if (value & 1) /* OCP_RESET */
- s->status &= ~1; /* REQ_TIMEOUT */
- break;
-
- default:
- OMAP_BAD_REG(addr);
- }
-}
-
-static const MemoryRegionOps omap_l4ta_ops = {
- .read = omap_l4ta_read,
- .write = omap_l4ta_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-struct omap_target_agent_s *omap_l4ta_get(struct omap_l4_s *bus,
- const struct omap_l4_region_s *regions,
- const struct omap_l4_agent_info_s *agents,
- int cs)
-{
- int i;
- struct omap_target_agent_s *ta = NULL;
- const struct omap_l4_agent_info_s *info = NULL;
-
- for (i = 0; i < bus->ta_num; i ++)
- if (agents[i].ta == cs) {
- ta = &bus->ta[i];
- info = &agents[i];
- break;
- }
- if (!ta) {
- fprintf(stderr, "%s: bad target agent (%i)\n", __func__, cs);
- exit(-1);
- }
-
- ta->bus = bus;
- ta->start = &regions[info->region];
- ta->regions = info->regions;
-
- ta->component = ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0);
- ta->status = 0x00000000;
- ta->control = 0x00000200; /* XXX 01000200 for L4TAO */
-
- memory_region_init_io(&ta->iomem, NULL, &omap_l4ta_ops, ta, "omap.l4ta",
- omap_l4_region_size(ta, info->ta_region));
- omap_l4_attach(ta, info->ta_region, &ta->iomem);
-
- return ta;
-}
-
-hwaddr omap_l4_attach(struct omap_target_agent_s *ta,
- int region, MemoryRegion *mr)
-{
- hwaddr base;
-
- if (region < 0 || region >= ta->regions) {
- fprintf(stderr, "%s: bad io region (%i)\n", __func__, region);
- exit(-1);
- }
-
- base = ta->bus->base + ta->start[region].offset;
- if (mr) {
- memory_region_add_subregion(ta->bus->address_space, base, mr);
- }
-
- return base;
-}
diff --git a/hw/misc/omap_sdrc.c b/hw/misc/omap_sdrc.c
deleted file mode 100644
index 6aa1b3e..0000000
--- a/hw/misc/omap_sdrc.c
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * TI OMAP SDRAM controller emulation.
- *
- * Copyright (C) 2007-2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) any later version of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "qemu/osdep.h"
-#include "hw/arm/omap.h"
-
-/* SDRAM Controller Subsystem */
-struct omap_sdrc_s {
- MemoryRegion iomem;
- uint8_t config;
-};
-
-void omap_sdrc_reset(struct omap_sdrc_s *s)
-{
- s->config = 0x10;
-}
-
-static uint64_t omap_sdrc_read(void *opaque, hwaddr addr, unsigned size)
-{
- struct omap_sdrc_s *s = opaque;
-
- if (size != 4) {
- return omap_badwidth_read32(opaque, addr);
- }
-
- switch (addr) {
- case 0x00: /* SDRC_REVISION */
- return 0x20;
-
- case 0x10: /* SDRC_SYSCONFIG */
- return s->config;
-
- case 0x14: /* SDRC_SYSSTATUS */
- return 1; /* RESETDONE */
-
- case 0x40: /* SDRC_CS_CFG */
- case 0x44: /* SDRC_SHARING */
- case 0x48: /* SDRC_ERR_ADDR */
- case 0x4c: /* SDRC_ERR_TYPE */
- case 0x60: /* SDRC_DLLA_SCTRL */
- case 0x64: /* SDRC_DLLA_STATUS */
- case 0x68: /* SDRC_DLLB_CTRL */
- case 0x6c: /* SDRC_DLLB_STATUS */
- case 0x70: /* SDRC_POWER */
- case 0x80: /* SDRC_MCFG_0 */
- case 0x84: /* SDRC_MR_0 */
- case 0x88: /* SDRC_EMR1_0 */
- case 0x8c: /* SDRC_EMR2_0 */
- case 0x90: /* SDRC_EMR3_0 */
- case 0x94: /* SDRC_DCDL1_CTRL */
- case 0x98: /* SDRC_DCDL2_CTRL */
- case 0x9c: /* SDRC_ACTIM_CTRLA_0 */
- case 0xa0: /* SDRC_ACTIM_CTRLB_0 */
- case 0xa4: /* SDRC_RFR_CTRL_0 */
- case 0xa8: /* SDRC_MANUAL_0 */
- case 0xb0: /* SDRC_MCFG_1 */
- case 0xb4: /* SDRC_MR_1 */
- case 0xb8: /* SDRC_EMR1_1 */
- case 0xbc: /* SDRC_EMR2_1 */
- case 0xc0: /* SDRC_EMR3_1 */
- case 0xc4: /* SDRC_ACTIM_CTRLA_1 */
- case 0xc8: /* SDRC_ACTIM_CTRLB_1 */
- case 0xd4: /* SDRC_RFR_CTRL_1 */
- case 0xd8: /* SDRC_MANUAL_1 */
- return 0x00;
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_sdrc_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_sdrc_s *s = opaque;
-
- if (size != 4) {
- omap_badwidth_write32(opaque, addr, value);
- return;
- }
-
- switch (addr) {
- case 0x00: /* SDRC_REVISION */
- case 0x14: /* SDRC_SYSSTATUS */
- case 0x48: /* SDRC_ERR_ADDR */
- case 0x64: /* SDRC_DLLA_STATUS */
- case 0x6c: /* SDRC_DLLB_STATUS */
- OMAP_RO_REG(addr);
- return;
-
- case 0x10: /* SDRC_SYSCONFIG */
- if ((value >> 3) != 0x2)
- fprintf(stderr, "%s: bad SDRAM idle mode %i\n",
- __func__, (unsigned)value >> 3);
- if (value & 2)
- omap_sdrc_reset(s);
- s->config = value & 0x18;
- break;
-
- case 0x40: /* SDRC_CS_CFG */
- case 0x44: /* SDRC_SHARING */
- case 0x4c: /* SDRC_ERR_TYPE */
- case 0x60: /* SDRC_DLLA_SCTRL */
- case 0x68: /* SDRC_DLLB_CTRL */
- case 0x70: /* SDRC_POWER */
- case 0x80: /* SDRC_MCFG_0 */
- case 0x84: /* SDRC_MR_0 */
- case 0x88: /* SDRC_EMR1_0 */
- case 0x8c: /* SDRC_EMR2_0 */
- case 0x90: /* SDRC_EMR3_0 */
- case 0x94: /* SDRC_DCDL1_CTRL */
- case 0x98: /* SDRC_DCDL2_CTRL */
- case 0x9c: /* SDRC_ACTIM_CTRLA_0 */
- case 0xa0: /* SDRC_ACTIM_CTRLB_0 */
- case 0xa4: /* SDRC_RFR_CTRL_0 */
- case 0xa8: /* SDRC_MANUAL_0 */
- case 0xb0: /* SDRC_MCFG_1 */
- case 0xb4: /* SDRC_MR_1 */
- case 0xb8: /* SDRC_EMR1_1 */
- case 0xbc: /* SDRC_EMR2_1 */
- case 0xc0: /* SDRC_EMR3_1 */
- case 0xc4: /* SDRC_ACTIM_CTRLA_1 */
- case 0xc8: /* SDRC_ACTIM_CTRLB_1 */
- case 0xd4: /* SDRC_RFR_CTRL_1 */
- case 0xd8: /* SDRC_MANUAL_1 */
- break;
-
- default:
- OMAP_BAD_REG(addr);
- return;
- }
-}
-
-static const MemoryRegionOps omap_sdrc_ops = {
- .read = omap_sdrc_read,
- .write = omap_sdrc_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-struct omap_sdrc_s *omap_sdrc_init(MemoryRegion *sysmem,
- hwaddr base)
-{
- struct omap_sdrc_s *s = g_new0(struct omap_sdrc_s, 1);
-
- omap_sdrc_reset(s);
-
- memory_region_init_io(&s->iomem, NULL, &omap_sdrc_ops, s, "omap.sdrc", 0x1000);
- memory_region_add_subregion(sysmem, base, &s->iomem);
-
- return s;
-}
diff --git a/hw/misc/omap_tap.c b/hw/misc/omap_tap.c
deleted file mode 100644
index 4d7fb7d..0000000
--- a/hw/misc/omap_tap.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * TI OMAP TEST-Chip-level TAP emulation.
- *
- * Copyright (C) 2007-2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) any later version of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "hw/hw.h"
-#include "hw/arm/omap.h"
-
-/* TEST-Chip-level TAP */
-static uint64_t omap_tap_read(void *opaque, hwaddr addr, unsigned size)
-{
- struct omap_mpu_state_s *s = opaque;
-
- if (size != 4) {
- return omap_badwidth_read32(opaque, addr);
- }
-
- switch (addr) {
- case 0x204: /* IDCODE_reg */
- switch (s->mpu_model) {
- case omap2420:
- case omap2422:
- case omap2423:
- return 0x5b5d902f; /* ES 2.2 */
- case omap2430:
- return 0x5b68a02f; /* ES 2.2 */
- case omap3430:
- return 0x1b7ae02f; /* ES 2 */
- default:
- hw_error("%s: Bad mpu model\n", __func__);
- }
-
- case 0x208: /* PRODUCTION_ID_reg for OMAP2 */
- case 0x210: /* PRODUCTION_ID_reg for OMAP3 */
- switch (s->mpu_model) {
- case omap2420:
- return 0x000254f0; /* POP ESHS2.1.1 in N91/93/95, ES2 in N800 */
- case omap2422:
- return 0x000400f0;
- case omap2423:
- return 0x000800f0;
- case omap2430:
- return 0x000000f0;
- case omap3430:
- return 0x000000f0;
- default:
- hw_error("%s: Bad mpu model\n", __func__);
- }
-
- case 0x20c:
- switch (s->mpu_model) {
- case omap2420:
- case omap2422:
- case omap2423:
- return 0xcafeb5d9; /* ES 2.2 */
- case omap2430:
- return 0xcafeb68a; /* ES 2.2 */
- case omap3430:
- return 0xcafeb7ae; /* ES 2 */
- default:
- hw_error("%s: Bad mpu model\n", __func__);
- }
-
- case 0x218: /* DIE_ID_reg */
- return ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0);
- case 0x21c: /* DIE_ID_reg */
- return 0x54 << 24;
- case 0x220: /* DIE_ID_reg */
- return ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0);
- case 0x224: /* DIE_ID_reg */
- return ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0);
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_tap_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- if (size != 4) {
- omap_badwidth_write32(opaque, addr, value);
- return;
- }
-
- OMAP_BAD_REG(addr);
-}
-
-static const MemoryRegionOps omap_tap_ops = {
- .read = omap_tap_read,
- .write = omap_tap_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-void omap_tap_init(struct omap_target_agent_s *ta,
- struct omap_mpu_state_s *mpu)
-{
- memory_region_init_io(&mpu->tap_iomem, NULL, &omap_tap_ops, mpu, "omap.tap",
- omap_l4_region_size(ta, 0));
- omap_l4_attach(ta, 0, &mpu->tap_iomem);
-}
diff --git a/hw/misc/pc-testdev.c b/hw/misc/pc-testdev.c
index e389651..67c486f 100644
--- a/hw/misc/pc-testdev.c
+++ b/hw/misc/pc-testdev.c
@@ -193,7 +193,7 @@ static void testdev_realizefn(DeviceState *d, Error **errp)
memory_region_add_subregion(mem, 0xff000000, &dev->iomem);
}
-static void testdev_class_init(ObjectClass *klass, void *data)
+static void testdev_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/pci-testdev.c b/hw/misc/pci-testdev.c
index acedd0f..ba71c50 100644
--- a/hw/misc/pci-testdev.c
+++ b/hw/misc/pci-testdev.c
@@ -23,7 +23,7 @@
#include "hw/qdev-properties.h"
#include "qemu/event_notifier.h"
#include "qemu/module.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "qom/object.h"
typedef struct PCITestDevHdr {
@@ -90,6 +90,7 @@ struct PCITestDevState {
int current;
uint64_t membar_size;
+ bool membar_backed;
MemoryRegion membar;
};
@@ -258,8 +259,14 @@ static void pci_testdev_realize(PCIDevice *pci_dev, Error **errp)
pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->portio);
if (d->membar_size) {
- memory_region_init(&d->membar, OBJECT(d), "pci-testdev-membar",
- d->membar_size);
+ if (d->membar_backed)
+ memory_region_init_ram(&d->membar, OBJECT(d),
+ "pci-testdev-membar-backed",
+ d->membar_size, NULL);
+ else
+ memory_region_init(&d->membar, OBJECT(d),
+ "pci-testdev-membar",
+ d->membar_size);
pci_register_bar(pci_dev, 2,
PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_PREFETCH |
@@ -319,12 +326,12 @@ static void qdev_pci_testdev_reset(DeviceState *dev)
pci_testdev_reset(d);
}
-static Property pci_testdev_properties[] = {
+static const Property pci_testdev_properties[] = {
DEFINE_PROP_SIZE("membar", PCITestDevState, membar_size, 0),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_BOOL("membar-backed", PCITestDevState, membar_backed, false),
};
-static void pci_testdev_class_init(ObjectClass *klass, void *data)
+static void pci_testdev_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -337,7 +344,7 @@ static void pci_testdev_class_init(ObjectClass *klass, void *data)
k->class_id = PCI_CLASS_OTHERS;
dc->desc = "PCI Test Device";
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
- dc->reset = qdev_pci_testdev_reset;
+ device_class_set_legacy_reset(dc, qdev_pci_testdev_reset);
device_class_set_props(dc, pci_testdev_properties);
}
@@ -346,7 +353,7 @@ static const TypeInfo pci_testdev_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCITestDevState),
.class_init = pci_testdev_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/misc/pvpanic-isa.c b/hw/misc/pvpanic-isa.c
index 9a923b7..f7b421c 100644
--- a/hw/misc/pvpanic-isa.c
+++ b/hw/misc/pvpanic-isa.c
@@ -14,7 +14,7 @@
#include "qemu/osdep.h"
#include "qemu/module.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/nvram/fw_cfg.h"
#include "hw/qdev-properties.h"
@@ -98,14 +98,13 @@ static void build_pvpanic_isa_aml(AcpiDevAmlIf *adev, Aml *scope)
aml_append(scope, dev);
}
-static Property pvpanic_isa_properties[] = {
+static const Property pvpanic_isa_properties[] = {
DEFINE_PROP_UINT16(PVPANIC_IOPORT_PROP, PVPanicISAState, ioport, 0x505),
DEFINE_PROP_UINT8("events", PVPanicISAState, pvpanic.events,
PVPANIC_EVENTS),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pvpanic_isa_class_init(ObjectClass *klass, void *data)
+static void pvpanic_isa_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass);
@@ -122,7 +121,7 @@ static const TypeInfo pvpanic_isa_info = {
.instance_size = sizeof(PVPanicISAState),
.instance_init = pvpanic_isa_initfn,
.class_init = pvpanic_isa_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_ACPI_DEV_AML_IF },
{ },
},
diff --git a/hw/misc/pvpanic-mmio.c b/hw/misc/pvpanic-mmio.c
new file mode 100644
index 0000000..2a36310
--- /dev/null
+++ b/hw/misc/pvpanic-mmio.c
@@ -0,0 +1,60 @@
+/*
+ * QEMU simulated pvpanic device (MMIO frontend)
+ *
+ * Copyright Ā© 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+
+#include "hw/qdev-properties.h"
+#include "hw/misc/pvpanic.h"
+#include "hw/sysbus.h"
+#include "standard-headers/misc/pvpanic.h"
+
+OBJECT_DECLARE_SIMPLE_TYPE(PVPanicMMIOState, PVPANIC_MMIO_DEVICE)
+
+#define PVPANIC_MMIO_SIZE 0x2
+
+struct PVPanicMMIOState {
+ SysBusDevice parent_obj;
+
+ PVPanicState pvpanic;
+};
+
+static void pvpanic_mmio_initfn(Object *obj)
+{
+ PVPanicMMIOState *s = PVPANIC_MMIO_DEVICE(obj);
+
+ pvpanic_setup_io(&s->pvpanic, DEVICE(s), PVPANIC_MMIO_SIZE);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->pvpanic.mr);
+}
+
+static const Property pvpanic_mmio_properties[] = {
+ DEFINE_PROP_UINT8("events", PVPanicMMIOState, pvpanic.events,
+ PVPANIC_PANICKED | PVPANIC_CRASH_LOADED),
+};
+
+static void pvpanic_mmio_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, pvpanic_mmio_properties);
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static const TypeInfo pvpanic_mmio_info = {
+ .name = TYPE_PVPANIC_MMIO_DEVICE,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(PVPanicMMIOState),
+ .instance_init = pvpanic_mmio_initfn,
+ .class_init = pvpanic_mmio_class_init,
+};
+
+static void pvpanic_register_types(void)
+{
+ type_register_static(&pvpanic_mmio_info);
+}
+
+type_init(pvpanic_register_types)
diff --git a/hw/misc/pvpanic-pci.c b/hw/misc/pvpanic-pci.c
index 106d03c..2869b6a 100644
--- a/hw/misc/pvpanic-pci.c
+++ b/hw/misc/pvpanic-pci.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "qemu/module.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/nvram/fw_cfg.h"
#include "hw/qdev-properties.h"
@@ -53,13 +53,12 @@ static void pvpanic_pci_realizefn(PCIDevice *dev, Error **errp)
pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &ps->mr);
}
-static Property pvpanic_pci_properties[] = {
+static const Property pvpanic_pci_properties[] = {
DEFINE_PROP_UINT8("events", PVPanicPCIState, pvpanic.events,
PVPANIC_EVENTS),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pvpanic_pci_class_init(ObjectClass *klass, void *data)
+static void pvpanic_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass);
@@ -81,7 +80,7 @@ static const TypeInfo pvpanic_pci_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PVPanicPCIState),
.class_init = pvpanic_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ }
}
diff --git a/hw/misc/pvpanic.c b/hw/misc/pvpanic.c
index 3b89334..c83247c 100644
--- a/hw/misc/pvpanic.c
+++ b/hw/misc/pvpanic.c
@@ -15,7 +15,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/nvram/fw_cfg.h"
#include "hw/qdev-properties.h"
diff --git a/hw/misc/sbsa_ec.c b/hw/misc/sbsa_ec.c
index 86b23a5..dfee1af 100644
--- a/hw/misc/sbsa_ec.c
+++ b/hw/misc/sbsa_ec.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "hw/sysbus.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
typedef struct SECUREECState {
SysBusDevice parent_obj;
@@ -73,7 +73,7 @@ static void sbsa_ec_init(Object *obj)
sysbus_init_mmio(dev, &s->iomem);
}
-static void sbsa_ec_class_init(ObjectClass *klass, void *data)
+static void sbsa_ec_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/sifive_e_aon.c b/hw/misc/sifive_e_aon.c
index 4656457..6eef38d 100644
--- a/hw/misc/sifive_e_aon.c
+++ b/hw/misc/sifive_e_aon.c
@@ -24,7 +24,7 @@
#include "hw/misc/sifive_e_aon.h"
#include "qapi/visitor.h"
#include "qapi/error.h"
-#include "sysemu/watchdog.h"
+#include "system/watchdog.h"
#include "hw/qdev-properties.h"
REG32(AON_WDT_WDOGCFG, 0x0)
@@ -289,17 +289,16 @@ static void sifive_e_aon_init(Object *obj)
sysbus_init_irq(sbd, &r->wdog_irq);
}
-static Property sifive_e_aon_properties[] = {
+static const Property sifive_e_aon_properties[] = {
DEFINE_PROP_UINT64("wdogclk-frequency", SiFiveEAONState, wdogclk_freq,
SIFIVE_E_LFCLK_DEFAULT_FREQ),
- DEFINE_PROP_END_OF_LIST(),
};
-static void sifive_e_aon_class_init(ObjectClass *oc, void *data)
+static void sifive_e_aon_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
- dc->reset = sifive_e_aon_reset;
+ device_class_set_legacy_reset(dc, sifive_e_aon_reset);
device_class_set_props(dc, sifive_e_aon_properties);
}
diff --git a/hw/misc/sifive_test.c b/hw/misc/sifive_test.c
index ad68807..b94bb2d 100644
--- a/hw/misc/sifive_test.c
+++ b/hw/misc/sifive_test.c
@@ -23,9 +23,9 @@
#include "qapi/error.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/misc/sifive_test.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
static uint64_t sifive_test_read(void *opaque, hwaddr addr, unsigned int size)
{
diff --git a/hw/misc/sifive_u_otp.c b/hw/misc/sifive_u_otp.c
index 8965f5c..1ebed2f 100644
--- a/hw/misc/sifive_u_otp.c
+++ b/hw/misc/sifive_u_otp.c
@@ -27,8 +27,8 @@
#include "qemu/log.h"
#include "qemu/module.h"
#include "hw/misc/sifive_u_otp.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/block-backend.h"
+#include "system/blockdev.h"
+#include "system/block-backend.h"
#define WRITTEN_BIT_ON 0x1
@@ -194,10 +194,9 @@ static const MemoryRegionOps sifive_u_otp_ops = {
}
};
-static Property sifive_u_otp_properties[] = {
+static const Property sifive_u_otp_properties[] = {
DEFINE_PROP_UINT32("serial", SiFiveUOTPState, serial, 0),
DEFINE_PROP_DRIVE("drive", SiFiveUOTPState, blk),
- DEFINE_PROP_END_OF_LIST(),
};
static void sifive_u_otp_realize(DeviceState *dev, Error **errp)
@@ -271,7 +270,7 @@ static void sifive_u_otp_realize(DeviceState *dev, Error **errp)
memset(s->fuse_wo, 0x00, sizeof(s->fuse_wo));
}
-static void sifive_u_otp_class_init(ObjectClass *klass, void *data)
+static void sifive_u_otp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/sifive_u_prci.c b/hw/misc/sifive_u_prci.c
index 5d9d446..6e75cb6 100644
--- a/hw/misc/sifive_u_prci.c
+++ b/hw/misc/sifive_u_prci.c
@@ -146,12 +146,12 @@ static void sifive_u_prci_reset(DeviceState *dev)
s->coreclksel = SIFIVE_U_PRCI_CORECLKSEL_HFCLK;
}
-static void sifive_u_prci_class_init(ObjectClass *klass, void *data)
+static void sifive_u_prci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = sifive_u_prci_realize;
- dc->reset = sifive_u_prci_reset;
+ device_class_set_legacy_reset(dc, sifive_u_prci_reset);
}
static const TypeInfo sifive_u_prci_info = {
diff --git a/hw/misc/slavio_misc.c b/hw/misc/slavio_misc.c
index 94369e4..a034df3 100644
--- a/hw/misc/slavio_misc.c
+++ b/hw/misc/slavio_misc.c
@@ -27,7 +27,7 @@
#include "hw/sysbus.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "trace.h"
#include "qom/object.h"
@@ -483,11 +483,11 @@ static void slavio_misc_init(Object *obj)
qdev_init_gpio_in(dev, slavio_set_power_fail, 1);
}
-static void slavio_misc_class_init(ObjectClass *klass, void *data)
+static void slavio_misc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = slavio_misc_reset;
+ device_class_set_legacy_reset(dc, slavio_misc_reset);
dc->vmsd = &vmstate_misc;
}
diff --git a/hw/misc/stm32_rcc.c b/hw/misc/stm32_rcc.c
new file mode 100644
index 0000000..5815b3e
--- /dev/null
+++ b/hw/misc/stm32_rcc.c
@@ -0,0 +1,162 @@
+/*
+ * STM32 RCC (only reset and enable registers are implemented)
+ *
+ * Copyright (c) 2024 RomƔn CƔrdenas <rcardenas.rod@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "trace.h"
+#include "hw/irq.h"
+#include "migration/vmstate.h"
+#include "hw/misc/stm32_rcc.h"
+
+static void stm32_rcc_reset(DeviceState *dev)
+{
+ STM32RccState *s = STM32_RCC(dev);
+
+ for (int i = 0; i < STM32_RCC_NREGS; i++) {
+ s->regs[i] = 0;
+ }
+}
+
+static uint64_t stm32_rcc_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ STM32RccState *s = STM32_RCC(opaque);
+
+ uint32_t value = 0;
+ if (addr > STM32_RCC_DCKCFGR2) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n",
+ __func__, addr);
+ } else {
+ value = s->regs[addr >> 2];
+ }
+ trace_stm32_rcc_read(addr, value);
+ return value;
+}
+
+static void stm32_rcc_write(void *opaque, hwaddr addr,
+ uint64_t val64, unsigned int size)
+{
+ STM32RccState *s = STM32_RCC(opaque);
+ uint32_t value = val64;
+ uint32_t prev_value, new_value, irq_offset;
+
+ trace_stm32_rcc_write(addr, value);
+
+ if (addr > STM32_RCC_DCKCFGR2) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n",
+ __func__, addr);
+ return;
+ }
+
+ switch (addr) {
+ case STM32_RCC_AHB1_RSTR...STM32_RCC_APB2_RSTR:
+ prev_value = s->regs[addr / 4];
+ s->regs[addr / 4] = value;
+
+ irq_offset = ((addr - STM32_RCC_AHB1_RSTR) / 4) * 32;
+ for (int i = 0; i < 32; i++) {
+ new_value = extract32(value, i, 1);
+ if (extract32(prev_value, i, 1) && !new_value) {
+ trace_stm32_rcc_pulse_reset(irq_offset + i, new_value);
+ qemu_set_irq(s->reset_irq[irq_offset + i], new_value);
+ }
+ }
+ return;
+ case STM32_RCC_AHB1_ENR...STM32_RCC_APB2_ENR:
+ prev_value = s->regs[addr / 4];
+ s->regs[addr / 4] = value;
+
+ irq_offset = ((addr - STM32_RCC_AHB1_ENR) / 4) * 32;
+ for (int i = 0; i < 32; i++) {
+ new_value = extract32(value, i, 1);
+ if (!extract32(prev_value, i, 1) && new_value) {
+ trace_stm32_rcc_pulse_enable(irq_offset + i, new_value);
+ qemu_set_irq(s->enable_irq[irq_offset + i], new_value);
+ }
+ }
+ return;
+ default:
+ qemu_log_mask(
+ LOG_UNIMP,
+ "%s: The RCC peripheral only supports enable and reset in QEMU\n",
+ __func__
+ );
+ s->regs[addr >> 2] = value;
+ }
+}
+
+static const MemoryRegionOps stm32_rcc_ops = {
+ .read = stm32_rcc_read,
+ .write = stm32_rcc_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void stm32_rcc_init(Object *obj)
+{
+ STM32RccState *s = STM32_RCC(obj);
+
+ memory_region_init_io(&s->mmio, obj, &stm32_rcc_ops, s,
+ TYPE_STM32_RCC, STM32_RCC_PERIPHERAL_SIZE);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+
+ qdev_init_gpio_out(DEVICE(obj), s->reset_irq, STM32_RCC_NIRQS);
+ qdev_init_gpio_out(DEVICE(obj), s->enable_irq, STM32_RCC_NIRQS);
+
+ for (int i = 0; i < STM32_RCC_NIRQS; i++) {
+ sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->reset_irq[i]);
+ sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->enable_irq[i]);
+ }
+}
+
+static const VMStateDescription vmstate_stm32_rcc = {
+ .name = TYPE_STM32_RCC,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, STM32RccState, STM32_RCC_NREGS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void stm32_rcc_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->vmsd = &vmstate_stm32_rcc;
+ device_class_set_legacy_reset(dc, stm32_rcc_reset);
+}
+
+static const TypeInfo stm32_rcc_info = {
+ .name = TYPE_STM32_RCC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(STM32RccState),
+ .instance_init = stm32_rcc_init,
+ .class_init = stm32_rcc_class_init,
+};
+
+static void stm32_rcc_register_types(void)
+{
+ type_register_static(&stm32_rcc_info);
+}
+
+type_init(stm32_rcc_register_types)
diff --git a/hw/misc/stm32f2xx_syscfg.c b/hw/misc/stm32f2xx_syscfg.c
index 19c1e86..d285896 100644
--- a/hw/misc/stm32f2xx_syscfg.c
+++ b/hw/misc/stm32f2xx_syscfg.c
@@ -138,11 +138,11 @@ static void stm32f2xx_syscfg_init(Object *obj)
sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
}
-static void stm32f2xx_syscfg_class_init(ObjectClass *klass, void *data)
+static void stm32f2xx_syscfg_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = stm32f2xx_syscfg_reset;
+ device_class_set_legacy_reset(dc, stm32f2xx_syscfg_reset);
}
static const TypeInfo stm32f2xx_syscfg_info = {
diff --git a/hw/misc/stm32f4xx_exti.c b/hw/misc/stm32f4xx_exti.c
index 7bd3afc..0688e6e 100644
--- a/hw/misc/stm32f4xx_exti.c
+++ b/hw/misc/stm32f4xx_exti.c
@@ -164,11 +164,11 @@ static const VMStateDescription vmstate_stm32f4xx_exti = {
}
};
-static void stm32f4xx_exti_class_init(ObjectClass *klass, void *data)
+static void stm32f4xx_exti_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = stm32f4xx_exti_reset;
+ device_class_set_legacy_reset(dc, stm32f4xx_exti_reset);
dc->vmsd = &vmstate_stm32f4xx_exti;
}
diff --git a/hw/misc/stm32f4xx_syscfg.c b/hw/misc/stm32f4xx_syscfg.c
index 854fce6..addfb03 100644
--- a/hw/misc/stm32f4xx_syscfg.c
+++ b/hw/misc/stm32f4xx_syscfg.c
@@ -147,11 +147,11 @@ static const VMStateDescription vmstate_stm32f4xx_syscfg = {
}
};
-static void stm32f4xx_syscfg_class_init(ObjectClass *klass, void *data)
+static void stm32f4xx_syscfg_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = stm32f4xx_syscfg_reset;
+ device_class_set_legacy_reset(dc, stm32f4xx_syscfg_reset);
dc->vmsd = &vmstate_stm32f4xx_syscfg;
}
diff --git a/hw/misc/stm32l4x5_exti.c b/hw/misc/stm32l4x5_exti.c
index e281841..9c00216 100644
--- a/hw/misc/stm32l4x5_exti.c
+++ b/hw/misc/stm32l4x5_exti.c
@@ -271,7 +271,7 @@ static const VMStateDescription vmstate_stm32l4x5_exti = {
}
};
-static void stm32l4x5_exti_class_init(ObjectClass *klass, void *data)
+static void stm32l4x5_exti_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/misc/stm32l4x5_rcc.c b/hw/misc/stm32l4x5_rcc.c
index 417bd5e..0e1f27f 100644
--- a/hw/misc/stm32l4x5_rcc.c
+++ b/hw/misc/stm32l4x5_rcc.c
@@ -141,7 +141,7 @@ static const VMStateDescription clock_mux_vmstate = {
}
};
-static void clock_mux_class_init(ObjectClass *klass, void *data)
+static void clock_mux_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -150,6 +150,8 @@ static void clock_mux_class_init(ObjectClass *klass, void *data)
rc->phases.hold = clock_mux_reset_hold;
rc->phases.exit = clock_mux_reset_exit;
dc->vmsd = &clock_mux_vmstate;
+ /* Reason: Part of Stm32l4x5RccState component */
+ dc->user_creatable = false;
}
static void clock_mux_set_enable(RccClockMuxState *mux, bool enabled)
@@ -293,7 +295,7 @@ static const VMStateDescription pll_vmstate = {
}
};
-static void pll_class_init(ObjectClass *klass, void *data)
+static void pll_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -302,6 +304,8 @@ static void pll_class_init(ObjectClass *klass, void *data)
rc->phases.hold = pll_reset_hold;
rc->phases.exit = pll_reset_exit;
dc->vmsd = &pll_vmstate;
+ /* Reason: Part of Stm32l4x5RccState component */
+ dc->user_creatable = false;
}
static void pll_set_vco_multiplier(RccPllState *pll, uint32_t vco_multiplier)
@@ -543,19 +547,31 @@ static void rcc_update_cfgr_register(Stm32l4x5RccState *s)
uint32_t val;
/* MCOPRE */
val = FIELD_EX32(s->cfgr, CFGR, MCOPRE);
- assert(val <= 0b100);
- clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_MCO],
- 1, 1 << val);
+ if (val > 0b100) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Invalid MCOPRE value: 0x%"PRIx32"\n",
+ __func__, val);
+ clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], false);
+ } else {
+ clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_MCO],
+ 1, 1 << val);
+ }
/* MCOSEL */
val = FIELD_EX32(s->cfgr, CFGR, MCOSEL);
- assert(val <= 0b111);
- if (val == 0) {
+ if (val > 0b111) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Invalid MCOSEL value: 0x%"PRIx32"\n",
+ __func__, val);
clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], false);
} else {
- clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], true);
- clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_MCO],
- val - 1);
+ if (val == 0) {
+ clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], false);
+ } else {
+ clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], true);
+ clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_MCO],
+ val - 1);
+ }
}
/* STOPWUCK */
@@ -1414,17 +1430,16 @@ static void stm32l4x5_rcc_realize(DeviceState *dev, Error **errp)
clock_update(s->gnd, 0);
}
-static Property stm32l4x5_rcc_properties[] = {
+static const Property stm32l4x5_rcc_properties[] = {
DEFINE_PROP_UINT64("hse_frequency", Stm32l4x5RccState,
hse_frequency, HSE_DEFAULT_FRQ),
DEFINE_PROP_UINT64("sai1_extclk_frequency", Stm32l4x5RccState,
sai1_extclk_frequency, 0),
DEFINE_PROP_UINT64("sai2_extclk_frequency", Stm32l4x5RccState,
sai2_extclk_frequency, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void stm32l4x5_rcc_class_init(ObjectClass *klass, void *data)
+static void stm32l4x5_rcc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/misc/stm32l4x5_syscfg.c b/hw/misc/stm32l4x5_syscfg.c
index a5a1ce2..4e21756 100644
--- a/hw/misc/stm32l4x5_syscfg.c
+++ b/hw/misc/stm32l4x5_syscfg.c
@@ -26,6 +26,9 @@
#include "trace.h"
#include "hw/irq.h"
#include "migration/vmstate.h"
+#include "hw/clock.h"
+#include "hw/qdev-clock.h"
+#include "qapi/error.h"
#include "hw/misc/stm32l4x5_syscfg.h"
#include "hw/gpio/stm32l4x5_gpio.h"
@@ -225,12 +228,22 @@ static void stm32l4x5_syscfg_init(Object *obj)
qdev_init_gpio_in(DEVICE(obj), stm32l4x5_syscfg_set_irq,
GPIO_NUM_PINS * NUM_GPIOS);
qdev_init_gpio_out(DEVICE(obj), s->gpio_out, GPIO_NUM_PINS);
+ s->clk = qdev_init_clock_in(DEVICE(s), "clk", NULL, s, 0);
+}
+
+static void stm32l4x5_syscfg_realize(DeviceState *dev, Error **errp)
+{
+ Stm32l4x5SyscfgState *s = STM32L4X5_SYSCFG(dev);
+ if (!clock_has_source(s->clk)) {
+ error_setg(errp, "SYSCFG: clk input must be connected");
+ return;
+ }
}
static const VMStateDescription vmstate_stm32l4x5_syscfg = {
.name = TYPE_STM32L4X5_SYSCFG,
- .version_id = 1,
- .minimum_version_id = 1,
+ .version_id = 2,
+ .minimum_version_id = 2,
.fields = (VMStateField[]) {
VMSTATE_UINT32(memrmp, Stm32l4x5SyscfgState),
VMSTATE_UINT32(cfgr1, Stm32l4x5SyscfgState),
@@ -241,16 +254,18 @@ static const VMStateDescription vmstate_stm32l4x5_syscfg = {
VMSTATE_UINT32(swpr, Stm32l4x5SyscfgState),
VMSTATE_UINT32(skr, Stm32l4x5SyscfgState),
VMSTATE_UINT32(swpr2, Stm32l4x5SyscfgState),
+ VMSTATE_CLOCK(clk, Stm32l4x5SyscfgState),
VMSTATE_END_OF_LIST()
}
};
-static void stm32l4x5_syscfg_class_init(ObjectClass *klass, void *data)
+static void stm32l4x5_syscfg_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
dc->vmsd = &vmstate_stm32l4x5_syscfg;
+ dc->realize = stm32l4x5_syscfg_realize;
rc->phases.hold = stm32l4x5_syscfg_hold_reset;
}
diff --git a/hw/misc/trace-events b/hw/misc/trace-events
index 1be0717..e3f64c0 100644
--- a/hw/misc/trace-events
+++ b/hw/misc/trace-events
@@ -130,13 +130,13 @@ mos6522_set_sr_int(void) "set sr_int"
mos6522_write(uint64_t addr, const char *name, uint64_t val) "reg=0x%"PRIx64 " [%s] val=0x%"PRIx64
mos6522_read(uint64_t addr, const char *name, unsigned val) "reg=0x%"PRIx64 " [%s] val=0x%x"
-# npcm7xx_clk.c
-npcm7xx_clk_read(uint64_t offset, uint32_t value) " offset: 0x%04" PRIx64 " value: 0x%08" PRIx32
-npcm7xx_clk_write(uint64_t offset, uint32_t value) "offset: 0x%04" PRIx64 " value: 0x%08" PRIx32
+# npcm_clk.c
+npcm_clk_read(uint64_t offset, uint32_t value) " offset: 0x%04" PRIx64 " value: 0x%08" PRIx32
+npcm_clk_write(uint64_t offset, uint32_t value) "offset: 0x%04" PRIx64 " value: 0x%08" PRIx32
-# npcm7xx_gcr.c
-npcm7xx_gcr_read(uint64_t offset, uint32_t value) " offset: 0x%04" PRIx64 " value: 0x%08" PRIx32
-npcm7xx_gcr_write(uint64_t offset, uint32_t value) "offset: 0x%04" PRIx64 " value: 0x%08" PRIx32
+# npcm_gcr.c
+npcm_gcr_read(uint64_t offset, uint64_t value) " offset: 0x%04" PRIx64 " value: 0x%08" PRIx64
+npcm_gcr_write(uint64_t offset, uint64_t value) "offset: 0x%04" PRIx64 " value: 0x%08" PRIx64
# npcm7xx_mft.c
npcm7xx_mft_read(const char *name, uint64_t offset, uint16_t value) "%s: offset: 0x%04" PRIx64 " value: 0x%04" PRIx16
@@ -156,6 +156,12 @@ npcm7xx_pwm_write(const char *id, uint64_t offset, uint32_t value) "%s offset: 0
npcm7xx_pwm_update_freq(const char *id, uint8_t index, uint32_t old_value, uint32_t new_value) "%s pwm[%u] Update Freq: old_freq: %u, new_freq: %u"
npcm7xx_pwm_update_duty(const char *id, uint8_t index, uint32_t old_value, uint32_t new_value) "%s pwm[%u] Update Duty: old_duty: %u, new_duty: %u"
+# stm32_rcc.c
+stm32_rcc_read(uint64_t addr, uint64_t data) "reg read: addr: 0x%" PRIx64 " val: 0x%" PRIx64 ""
+stm32_rcc_write(uint64_t addr, uint64_t data) "reg write: addr: 0x%" PRIx64 " val: 0x%" PRIx64 ""
+stm32_rcc_pulse_enable(int line, int level) "Enable: %d to %d"
+stm32_rcc_pulse_reset(int line, int level) "Reset: %d to %d"
+
# stm32f4xx_syscfg.c
stm32f4xx_syscfg_set_irq(int gpio, int line, int level) "Interrupt: GPIO: %d, Line: %d; Level: %d"
stm32f4xx_pulse_exti(int irq) "Pulse EXTI: %d"
@@ -247,6 +253,12 @@ ccm_clock_freq(uint32_t clock, uint32_t freq) "(Clock = %d) = %d"
ccm_read_reg(const char *reg_name, uint32_t value) "reg[%s] <= 0x%" PRIx32
ccm_write_reg(const char *reg_name, uint32_t value) "reg[%s] => 0x%" PRIx32
+# imx6_src.c
+imx6_src_read(const char *reg_name, uint32_t value) "reg[%s] => 0x%" PRIx32
+imx6_src_write(const char *reg_name, uint64_t value) "reg[%s] <= 0x%" PRIx64
+imx6_clear_reset_bit(const char *reg_name, uint32_t value) "reg[%s] <= 0x%" PRIx32
+imx6_src_reset(void) ""
+
# imx7_src.c
imx7_src_read(const char *reg_name, uint32_t value) "reg[%s] => 0x%" PRIx32
imx7_src_write(const char *reg_name, uint32_t value) "reg[%s] <= 0x%" PRIx32
@@ -290,6 +302,14 @@ aspeed_peci_read(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%"
aspeed_peci_write(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64
aspeed_peci_raise_interrupt(uint32_t ctrl, uint32_t status) "ctrl 0x%" PRIx32 " status 0x%" PRIx32
+# aspeed_hace.c
+aspeed_hace_read(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64
+aspeed_hace_write(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64
+aspeed_hace_hash_sg(int index, uint64_t list_addr, uint64_t buf_addr, uint32_t len) "%d: list_addr 0x%" PRIx64 " buf_addr 0x%" PRIx64 " len 0x%" PRIx32
+aspeed_hace_hash_addr(const char *s, uint64_t addr) "%s: 0x%" PRIx64
+aspeed_hace_hash_execute_acc_mode(bool final_request) "final request: %d"
+aspeed_hace_hexdump(const char *desc, uint32_t offset, char *s) "%s: 0x%08x: %s"
+
# bcm2835_property.c
bcm2835_mbox_property(uint32_t tag, uint32_t bufsize, size_t resplen) "mbox property tag:0x%08x in_sz:%u out_sz:%zu"
@@ -362,3 +382,24 @@ aspeed_sli_read(uint64_t offset, unsigned int size, uint32_t data) "To 0x%" PRIx
aspeed_sliio_write(uint64_t offset, unsigned int size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32
aspeed_sliio_read(uint64_t offset, unsigned int size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32
+# ivshmem-flat.c
+ivshmem_flat_irq_handler(uint16_t vector_id) "Caught interrupt request: vector %d"
+ivshmem_flat_new_peer(uint16_t peer_id) "New peer ID: %d"
+ivshmem_flat_add_vector_failure(uint16_t vector_id, uint32_t vector_fd, uint16_t peer_id) "Failed to add vector %u (fd = %u) to peer ID %u, maximum number of vectors reached"
+ivshmem_flat_add_vector_success(uint16_t vector_id, uint32_t vector_fd, uint16_t peer_id) "Successful addition of vector %u (fd = %u) to peer ID %u"
+ivshmem_flat_irq_resolved(const char *irq_qompath) "IRQ QOM path '%s' correctly resolved"
+ivshmem_flat_proto_ver_own_id(uint64_t proto_ver, uint16_t peer_id) "Protocol Version = 0x%"PRIx64", Own Peer ID = %u"
+ivshmem_flat_shmem_size(int fd, uint64_t size) "Shmem fd (%d) total size is %"PRIu64" byte(s)"
+ivshmem_flat_shmem_map(uint64_t addr) "Mapping shmem @ 0x%"PRIx64
+ivshmem_flat_mmio_map(uint64_t addr) "Mapping MMRs @ 0x%"PRIx64
+ivshmem_flat_read_mmr(uint64_t addr_offset) "Read access at offset %"PRIu64
+ivshmem_flat_read_mmr_doorbell(void) "DOORBELL register is write-only!"
+ivshmem_flat_read_write_mmr_invalid(uint64_t addr_offset) "No ivshmem register mapped at offset %"PRIu64
+ivshmem_flat_interrupt_invalid_peer(uint16_t peer_id) "Can't interrupt non-existing peer %u"
+ivshmem_flat_write_mmr(uint64_t addr_offset) "Write access at offset %"PRIu64
+ivshmem_flat_interrupt_peer(uint16_t peer_id, uint16_t vector_id) "Interrupting peer ID %u, vector %u..."
+
+# i2c-echo.c
+i2c_echo_event(const char *id, const char *event) "%s: %s"
+i2c_echo_recv(const char *id, uint8_t data) "%s: recv 0x%02" PRIx8
+i2c_echo_send(const char *id, uint8_t data) "%s: send 0x%02" PRIx8
diff --git a/hw/misc/tz-mpc.c b/hw/misc/tz-mpc.c
index 92b9949..a158d4a 100644
--- a/hw/misc/tz-mpc.c
+++ b/hw/misc/tz-mpc.c
@@ -587,19 +587,18 @@ static const VMStateDescription tz_mpc_vmstate = {
}
};
-static Property tz_mpc_properties[] = {
+static const Property tz_mpc_properties[] = {
DEFINE_PROP_LINK("downstream", TZMPC, downstream,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void tz_mpc_class_init(ObjectClass *klass, void *data)
+static void tz_mpc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = tz_mpc_realize;
dc->vmsd = &tz_mpc_vmstate;
- dc->reset = tz_mpc_reset;
+ device_class_set_legacy_reset(dc, tz_mpc_reset);
device_class_set_props(dc, tz_mpc_properties);
}
@@ -612,7 +611,7 @@ static const TypeInfo tz_mpc_info = {
};
static void tz_mpc_iommu_memory_region_class_init(ObjectClass *klass,
- void *data)
+ const void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
diff --git a/hw/misc/tz-msc.c b/hw/misc/tz-msc.c
index de5a312..af0cc5d 100644
--- a/hw/misc/tz-msc.c
+++ b/hw/misc/tz-msc.c
@@ -278,21 +278,20 @@ static const VMStateDescription tz_msc_vmstate = {
}
};
-static Property tz_msc_properties[] = {
+static const Property tz_msc_properties[] = {
DEFINE_PROP_LINK("downstream", TZMSC, downstream,
TYPE_MEMORY_REGION, MemoryRegion *),
DEFINE_PROP_LINK("idau", TZMSC, idau,
TYPE_IDAU_INTERFACE, IDAUInterface *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void tz_msc_class_init(ObjectClass *klass, void *data)
+static void tz_msc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = tz_msc_realize;
dc->vmsd = &tz_msc_vmstate;
- dc->reset = tz_msc_reset;
+ device_class_set_legacy_reset(dc, tz_msc_reset);
device_class_set_props(dc, tz_msc_properties);
}
diff --git a/hw/misc/tz-ppc.c b/hw/misc/tz-ppc.c
index 6450778..e4235a8 100644
--- a/hw/misc/tz-ppc.c
+++ b/hw/misc/tz-ppc.c
@@ -305,7 +305,7 @@ static const VMStateDescription tz_ppc_vmstate = {
DEFINE_PROP_LINK("port[" #N "]", TZPPC, port[N].downstream, \
TYPE_MEMORY_REGION, MemoryRegion *)
-static Property tz_ppc_properties[] = {
+static const Property tz_ppc_properties[] = {
DEFINE_PROP_UINT32("NONSEC_MASK", TZPPC, nonsec_mask, 0),
DEFINE_PORT(0),
DEFINE_PORT(1),
@@ -323,16 +323,15 @@ static Property tz_ppc_properties[] = {
DEFINE_PORT(13),
DEFINE_PORT(14),
DEFINE_PORT(15),
- DEFINE_PROP_END_OF_LIST(),
};
-static void tz_ppc_class_init(ObjectClass *klass, void *data)
+static void tz_ppc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = tz_ppc_realize;
dc->vmsd = &tz_ppc_vmstate;
- dc->reset = tz_ppc_reset;
+ device_class_set_legacy_reset(dc, tz_ppc_reset);
device_class_set_props(dc, tz_ppc_properties);
}
diff --git a/hw/misc/unimp.c b/hw/misc/unimp.c
index 6cfc572..4370c14 100644
--- a/hw/misc/unimp.c
+++ b/hw/misc/unimp.c
@@ -70,13 +70,12 @@ static void unimp_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
}
-static Property unimp_properties[] = {
+static const Property unimp_properties[] = {
DEFINE_PROP_UINT64("size", UnimplementedDeviceState, size, 0),
DEFINE_PROP_STRING("name", UnimplementedDeviceState, name),
- DEFINE_PROP_END_OF_LIST(),
};
-static void unimp_class_init(ObjectClass *klass, void *data)
+static void unimp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/virt_ctrl.c b/hw/misc/virt_ctrl.c
index 1a6c744..9f16093 100644
--- a/hw/misc/virt_ctrl.c
+++ b/hw/misc/virt_ctrl.c
@@ -10,7 +10,7 @@
#include "migration/vmstate.h"
#include "qemu/log.h"
#include "trace.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/misc/virt_ctrl.h"
enum {
@@ -125,11 +125,11 @@ static void virt_ctrl_instance_init(Object *obj)
sysbus_init_irq(dev, &s->irq);
}
-static void virt_ctrl_class_init(ObjectClass *oc, void *data)
+static void virt_ctrl_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
- dc->reset = virt_ctrl_reset;
+ device_class_set_legacy_reset(dc, virt_ctrl_reset);
dc->realize = virt_ctrl_realize;
dc->vmsd = &vmstate_virt_ctrl;
}
diff --git a/hw/misc/vmcoreinfo.c b/hw/misc/vmcoreinfo.c
index 833773a..9c2e900 100644
--- a/hw/misc/vmcoreinfo.c
+++ b/hw/misc/vmcoreinfo.c
@@ -13,22 +13,22 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/module.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "hw/nvram/fw_cfg.h"
#include "migration/vmstate.h"
#include "hw/misc/vmcoreinfo.h"
-static void fw_cfg_vmci_write(void *dev, off_t offset, size_t len)
+static void fw_cfg_vmci_write(void *opaque, off_t offset, size_t len)
{
- VMCoreInfoState *s = VMCOREINFO(dev);
+ VMCoreInfoState *s = opaque;
s->has_vmcoreinfo = offset == 0 && len == sizeof(s->vmcoreinfo)
&& s->vmcoreinfo.guest_format != FW_CFG_VMCOREINFO_FORMAT_NONE;
}
-static void vmcoreinfo_reset(void *dev)
+static void vmcoreinfo_reset_hold(Object *obj, ResetType type)
{
- VMCoreInfoState *s = VMCOREINFO(dev);
+ VMCoreInfoState *s = VMCOREINFO(obj);
s->has_vmcoreinfo = false;
memset(&s->vmcoreinfo, 0, sizeof(s->vmcoreinfo));
@@ -47,13 +47,13 @@ static void vmcoreinfo_realize(DeviceState *dev, Error **errp)
*/
if (!vmcoreinfo_find()) {
error_setg(errp, "at most one %s device is permitted",
- VMCOREINFO_DEVICE);
+ TYPE_VMCOREINFO);
return;
}
if (!fw_cfg || !fw_cfg->dma_enabled) {
error_setg(errp, "%s device requires fw_cfg with DMA",
- VMCOREINFO_DEVICE);
+ TYPE_VMCOREINFO);
return;
}
@@ -65,7 +65,7 @@ static void vmcoreinfo_realize(DeviceState *dev, Error **errp)
* This device requires to register a global reset because it is
* not plugged to a bus (which, as its QOM parent, would reset it).
*/
- qemu_register_reset(vmcoreinfo_reset, dev);
+ qemu_register_resettable(OBJECT(s));
vmcoreinfo_state = s;
}
@@ -83,26 +83,25 @@ static const VMStateDescription vmstate_vmcoreinfo = {
},
};
-static void vmcoreinfo_device_class_init(ObjectClass *klass, void *data)
+static void vmcoreinfo_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
dc->vmsd = &vmstate_vmcoreinfo;
dc->realize = vmcoreinfo_realize;
dc->hotpluggable = false;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ rc->phases.hold = vmcoreinfo_reset_hold;
}
-static const TypeInfo vmcoreinfo_device_info = {
- .name = VMCOREINFO_DEVICE,
- .parent = TYPE_DEVICE,
- .instance_size = sizeof(VMCoreInfoState),
- .class_init = vmcoreinfo_device_class_init,
+static const TypeInfo vmcoreinfo_types[] = {
+ {
+ .name = TYPE_VMCOREINFO,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(VMCoreInfoState),
+ .class_init = vmcoreinfo_device_class_init,
+ }
};
-static void vmcoreinfo_register_types(void)
-{
- type_register_static(&vmcoreinfo_device_info);
-}
-
-type_init(vmcoreinfo_register_types)
+DEFINE_TYPES(vmcoreinfo_types)
diff --git a/hw/misc/xlnx-versal-cframe-reg.c b/hw/misc/xlnx-versal-cframe-reg.c
index 3fc838b..1ce083e 100644
--- a/hw/misc/xlnx-versal-cframe-reg.c
+++ b/hw/misc/xlnx-versal-cframe-reg.c
@@ -720,7 +720,7 @@ static const VMStateDescription vmstate_cframe_reg = {
}
};
-static Property cframe_regs_props[] = {
+static const Property cframe_regs_props[] = {
DEFINE_PROP_LINK("cfu-fdro", XlnxVersalCFrameReg, cfg.cfu_fdro,
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_UINT32("blktype0-frames", XlnxVersalCFrameReg,
@@ -737,7 +737,6 @@ static Property cframe_regs_props[] = {
cfg.blktype_num_frames[5], 0),
DEFINE_PROP_UINT32("blktype6-frames", XlnxVersalCFrameReg,
cfg.blktype_num_frames[6], 0),
- DEFINE_PROP_END_OF_LIST(),
};
static void cframe_bcast_reg_init(Object *obj)
@@ -771,7 +770,7 @@ static const VMStateDescription vmstate_cframe_bcast_reg = {
}
};
-static Property cframe_bcast_regs_props[] = {
+static const Property cframe_bcast_regs_props[] = {
DEFINE_PROP_LINK("cframe0", XlnxVersalCFrameBcastReg, cfg.cframe[0],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe1", XlnxVersalCFrameBcastReg, cfg.cframe[1],
@@ -802,10 +801,9 @@ static Property cframe_bcast_regs_props[] = {
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe14", XlnxVersalCFrameBcastReg, cfg.cframe[14],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void cframe_reg_class_init(ObjectClass *klass, void *data)
+static void cframe_reg_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -819,7 +817,7 @@ static void cframe_reg_class_init(ObjectClass *klass, void *data)
xcic->cfi_transfer_packet = cframe_reg_cfi_transfer_packet;
}
-static void cframe_bcast_reg_class_init(ObjectClass *klass, void *data)
+static void cframe_bcast_reg_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -835,7 +833,7 @@ static const TypeInfo cframe_reg_info = {
.instance_size = sizeof(XlnxVersalCFrameReg),
.class_init = cframe_reg_class_init,
.instance_init = cframe_reg_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_XLNX_CFI_IF },
{ }
}
diff --git a/hw/misc/xlnx-versal-cfu.c b/hw/misc/xlnx-versal-cfu.c
index 6bb82e5..b920fc7 100644
--- a/hw/misc/xlnx-versal-cfu.c
+++ b/hw/misc/xlnx-versal-cfu.c
@@ -397,6 +397,13 @@ static void cfu_fdro_init(Object *obj)
fifo32_create(&s->fdro_data, 8 * KiB / sizeof(uint32_t));
}
+static void cfu_fdro_finalize(Object *obj)
+{
+ XlnxVersalCFUFDRO *s = XLNX_VERSAL_CFU_FDRO(obj);
+
+ fifo32_destroy(&s->fdro_data);
+}
+
static void cfu_fdro_reset_enter(Object *obj, ResetType type)
{
XlnxVersalCFUFDRO *s = XLNX_VERSAL_CFU_FDRO(obj);
@@ -419,7 +426,7 @@ static void cfu_fdro_cfi_transfer_packet(XlnxCfiIf *cfi_if, XlnxCfiPacket *pkt)
}
}
-static Property cfu_props[] = {
+static const Property cfu_props[] = {
DEFINE_PROP_LINK("cframe0", XlnxVersalCFUAPB, cfg.cframe[0],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe1", XlnxVersalCFUAPB, cfg.cframe[1],
@@ -450,13 +457,11 @@ static Property cfu_props[] = {
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
DEFINE_PROP_LINK("cframe14", XlnxVersalCFUAPB, cfg.cframe[14],
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
- DEFINE_PROP_END_OF_LIST(),
};
-static Property cfu_sfr_props[] = {
+static const Property cfu_sfr_props[] = {
DEFINE_PROP_LINK("cfu", XlnxVersalCFUSFR, cfg.cfu,
TYPE_XLNX_VERSAL_CFU_APB, XlnxVersalCFUAPB *),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_cfu_apb = {
@@ -491,16 +496,16 @@ static const VMStateDescription vmstate_cfu_sfr = {
}
};
-static void cfu_apb_class_init(ObjectClass *klass, void *data)
+static void cfu_apb_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = cfu_apb_reset;
+ device_class_set_legacy_reset(dc, cfu_apb_reset);
dc->vmsd = &vmstate_cfu_apb;
device_class_set_props(dc, cfu_props);
}
-static void cfu_fdro_class_init(ObjectClass *klass, void *data)
+static void cfu_fdro_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -511,7 +516,7 @@ static void cfu_fdro_class_init(ObjectClass *klass, void *data)
rc->phases.enter = cfu_fdro_reset_enter;
}
-static void cfu_sfr_class_init(ObjectClass *klass, void *data)
+static void cfu_sfr_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -527,7 +532,7 @@ static const TypeInfo cfu_apb_info = {
.instance_size = sizeof(XlnxVersalCFUAPB),
.class_init = cfu_apb_class_init,
.instance_init = cfu_apb_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_XLNX_CFI_IF },
{ }
}
@@ -539,7 +544,8 @@ static const TypeInfo cfu_fdro_info = {
.instance_size = sizeof(XlnxVersalCFUFDRO),
.class_init = cfu_fdro_class_init,
.instance_init = cfu_fdro_init,
- .interfaces = (InterfaceInfo[]) {
+ .instance_finalize = cfu_fdro_finalize,
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_XLNX_CFI_IF },
{ }
}
diff --git a/hw/misc/xlnx-versal-crl.c b/hw/misc/xlnx-versal-crl.c
index f143900..08ff2fc 100644
--- a/hw/misc/xlnx-versal-crl.c
+++ b/hw/misc/xlnx-versal-crl.c
@@ -394,7 +394,7 @@ static const VMStateDescription vmstate_crl = {
}
};
-static void crl_class_init(ObjectClass *klass, void *data)
+static void crl_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/xlnx-versal-pmc-iou-slcr.c b/hw/misc/xlnx-versal-pmc-iou-slcr.c
index e469c04..d76df46 100644
--- a/hw/misc/xlnx-versal-pmc-iou-slcr.c
+++ b/hw/misc/xlnx-versal-pmc-iou-slcr.c
@@ -1419,7 +1419,8 @@ static const VMStateDescription vmstate_pmc_iou_slcr = {
}
};
-static void xlnx_versal_pmc_iou_slcr_class_init(ObjectClass *klass, void *data)
+static void xlnx_versal_pmc_iou_slcr_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/misc/xlnx-versal-trng.c b/hw/misc/xlnx-versal-trng.c
index 51eb760..f34dd3e 100644
--- a/hw/misc/xlnx-versal-trng.c
+++ b/hw/misc/xlnx-versal-trng.c
@@ -608,9 +608,8 @@ static void trng_init(Object *obj)
{
XlnxVersalTRng *s = XLNX_VERSAL_TRNG(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- RegisterInfoArray *reg_array;
- reg_array =
+ s->reg_array =
register_init_block32(DEVICE(obj), trng_regs_info,
ARRAY_SIZE(trng_regs_info),
s->regs_info, s->regs,
@@ -618,16 +617,17 @@ static void trng_init(Object *obj)
XLNX_VERSAL_TRNG_ERR_DEBUG,
R_MAX * 4);
- sysbus_init_mmio(sbd, &reg_array->mem);
+ sysbus_init_mmio(sbd, &s->reg_array->mem);
sysbus_init_irq(sbd, &s->irq);
s->prng = g_rand_new();
}
-static void trng_unrealize(DeviceState *dev)
+static void trng_finalize(Object *obj)
{
- XlnxVersalTRng *s = XLNX_VERSAL_TRNG(dev);
+ XlnxVersalTRng *s = XLNX_VERSAL_TRNG(obj);
+ register_finalize_block(s->reg_array);
g_rand_free(s->prng);
s->prng = NULL;
}
@@ -641,7 +641,7 @@ static void trng_prop_fault_event_set(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint32_t *events = object_field_prop_ptr(obj, prop);
if (!visit_type_uint32(v, name, events, errp)) {
@@ -652,7 +652,7 @@ static void trng_prop_fault_event_set(Object *obj, Visitor *v,
}
static const PropertyInfo trng_prop_fault_events = {
- .name = "uint32:bits",
+ .type = "uint32",
.description = "Set to trigger TRNG fault events",
.set = trng_prop_fault_event_set,
.realized_set_allowed = true,
@@ -660,13 +660,12 @@ static const PropertyInfo trng_prop_fault_events = {
static PropertyInfo trng_prop_uint64; /* to extend qdev_prop_uint64 */
-static Property trng_props[] = {
- DEFINE_PROP_UINT64("forced-prng", XlnxVersalTRng, forced_prng_seed, 0),
+static const Property trng_props[] = {
+ DEFINE_PROP_UNSIGNED("forced-prng", XlnxVersalTRng, forced_prng_seed,
+ 0, trng_prop_uint64, uint64_t),
DEFINE_PROP_UINT32("hw-version", XlnxVersalTRng, hw_version, 0x0200),
DEFINE_PROP("fips-fault-events", XlnxVersalTRng, forced_faults,
trng_prop_fault_events, uint32_t),
-
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_trng = {
@@ -683,19 +682,17 @@ static const VMStateDescription vmstate_trng = {
}
};
-static void trng_class_init(ObjectClass *klass, void *data)
+static void trng_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
dc->vmsd = &vmstate_trng;
- dc->unrealize = trng_unrealize;
rc->phases.hold = trng_reset_hold;
/* Clone uint64 property with set allowed after realized */
trng_prop_uint64 = qdev_prop_uint64;
trng_prop_uint64.realized_set_allowed = true;
- trng_props[0].info = &trng_prop_uint64;
device_class_set_props(dc, trng_props);
}
@@ -706,6 +703,7 @@ static const TypeInfo trng_info = {
.instance_size = sizeof(XlnxVersalTRng),
.class_init = trng_class_init,
.instance_init = trng_init,
+ .instance_finalize = trng_finalize,
};
static void trng_register_types(void)
diff --git a/hw/misc/xlnx-versal-xramc.c b/hw/misc/xlnx-versal-xramc.c
index ad839ce..07370b8 100644
--- a/hw/misc/xlnx-versal-xramc.c
+++ b/hw/misc/xlnx-versal-xramc.c
@@ -218,12 +218,11 @@ static const VMStateDescription vmstate_xram_ctrl = {
}
};
-static Property xram_ctrl_properties[] = {
+static const Property xram_ctrl_properties[] = {
DEFINE_PROP_UINT64("size", XlnxXramCtrl, cfg.size, 1 * MiB),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xram_ctrl_class_init(ObjectClass *klass, void *data)
+static void xram_ctrl_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/xlnx-zynqmp-apu-ctrl.c b/hw/misc/xlnx-zynqmp-apu-ctrl.c
index 87e4a14..e85da32 100644
--- a/hw/misc/xlnx-zynqmp-apu-ctrl.c
+++ b/hw/misc/xlnx-zynqmp-apu-ctrl.c
@@ -224,7 +224,7 @@ static const VMStateDescription vmstate_zynqmp_apu = {
}
};
-static void zynqmp_apu_class_init(ObjectClass *klass, void *data)
+static void zynqmp_apu_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/xlnx-zynqmp-crf.c b/hw/misc/xlnx-zynqmp-crf.c
index e5aba56..cccca0e 100644
--- a/hw/misc/xlnx-zynqmp-crf.c
+++ b/hw/misc/xlnx-zynqmp-crf.c
@@ -239,7 +239,7 @@ static const VMStateDescription vmstate_crf = {
}
};
-static void crf_class_init(ObjectClass *klass, void *data)
+static void crf_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c
index ad814c3..010387b 100644
--- a/hw/misc/zynq_slcr.c
+++ b/hw/misc/zynq_slcr.c
@@ -16,7 +16,7 @@
#include "qemu/osdep.h"
#include "qemu/timer.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
#include "qemu/log.h"
@@ -623,12 +623,11 @@ static const VMStateDescription vmstate_zynq_slcr = {
}
};
-static Property zynq_slcr_props[] = {
+static const Property zynq_slcr_props[] = {
DEFINE_PROP_UINT8("boot-mode", ZynqSLCRState, boot_mode, 1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void zynq_slcr_class_init(ObjectClass *klass, void *data)
+static void zynq_slcr_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/net/Kconfig b/hw/net/Kconfig
index 7fcc0d7..7f80218 100644
--- a/hw/net/Kconfig
+++ b/hw/net/Kconfig
@@ -62,8 +62,12 @@ config VMXNET3_PCI
config SMC91C111
bool
+config LAN9118_PHY
+ bool
+
config LAN9118
bool
+ select LAN9118_PHY
select PTIMER
config NE2000_ISA
@@ -89,6 +93,7 @@ config ALLWINNER_SUN8I_EMAC
config IMX_FEC
bool
+ select LAN9118_PHY
config CADENCE
bool
diff --git a/hw/net/allwinner-sun8i-emac.c b/hw/net/allwinner-sun8i-emac.c
index 108ae9c..30a8157 100644
--- a/hw/net/allwinner-sun8i-emac.c
+++ b/hw/net/allwinner-sun8i-emac.c
@@ -30,7 +30,7 @@
#include "net/checksum.h"
#include "qemu/module.h"
#include "exec/cpu-common.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/net/allwinner-sun8i-emac.h"
/* EMAC register offsets */
@@ -784,7 +784,7 @@ static void allwinner_sun8i_emac_set_link(NetClientState *nc)
static const MemoryRegionOps allwinner_sun8i_emac_mem_ops = {
.read = allwinner_sun8i_emac_read,
.write = allwinner_sun8i_emac_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -829,12 +829,11 @@ static void allwinner_sun8i_emac_realize(DeviceState *dev, Error **errp)
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
}
-static Property allwinner_sun8i_emac_properties[] = {
+static const Property allwinner_sun8i_emac_properties[] = {
DEFINE_NIC_PROPERTIES(AwSun8iEmacState, conf),
DEFINE_PROP_UINT8("phy-addr", AwSun8iEmacState, mii_phy_addr, 0),
DEFINE_PROP_LINK("dma-memory", AwSun8iEmacState, dma_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
static int allwinner_sun8i_emac_post_load(void *opaque, int version_id)
@@ -876,12 +875,13 @@ static const VMStateDescription vmstate_aw_emac = {
}
};
-static void allwinner_sun8i_emac_class_init(ObjectClass *klass, void *data)
+static void allwinner_sun8i_emac_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = allwinner_sun8i_emac_realize;
- dc->reset = allwinner_sun8i_emac_reset;
+ device_class_set_legacy_reset(dc, allwinner_sun8i_emac_reset);
dc->vmsd = &vmstate_aw_emac;
device_class_set_props(dc, allwinner_sun8i_emac_properties);
}
diff --git a/hw/net/allwinner_emac.c b/hw/net/allwinner_emac.c
index 9898397..77d089d 100644
--- a/hw/net/allwinner_emac.c
+++ b/hw/net/allwinner_emac.c
@@ -349,7 +349,7 @@ static void aw_emac_write(void *opaque, hwaddr offset, uint64_t value,
"allwinner_emac: TX length > fifo data length\n");
}
if (len > 0) {
- data = fifo8_pop_buf(fifo, len, &ret);
+ data = fifo8_pop_bufptr(fifo, len, &ret);
qemu_send_packet(nc, data, ret);
aw_emac_tx_reset(s, chan);
/* Raise TX interrupt */
@@ -421,7 +421,7 @@ static void aw_emac_set_link(NetClientState *nc)
static const MemoryRegionOps aw_emac_mem_ops = {
.read = aw_emac_read,
.write = aw_emac_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -462,10 +462,9 @@ static void aw_emac_realize(DeviceState *dev, Error **errp)
fifo8_create(&s->tx_fifo[1], TX_FIFO_SIZE);
}
-static Property aw_emac_properties[] = {
+static const Property aw_emac_properties[] = {
DEFINE_NIC_PROPERTIES(AwEmacState, conf),
DEFINE_PROP_UINT8("phy-addr", AwEmacState, phy_addr, 0),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_mii = {
@@ -515,13 +514,13 @@ static const VMStateDescription vmstate_aw_emac = {
}
};
-static void aw_emac_class_init(ObjectClass *klass, void *data)
+static void aw_emac_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aw_emac_realize;
device_class_set_props(dc, aw_emac_properties);
- dc->reset = aw_emac_reset;
+ device_class_set_legacy_reset(dc, aw_emac_reset);
dc->vmsd = &vmstate_aw_emac;
}
diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c
index ec7bf56..50025d5 100644
--- a/hw/net/cadence_gem.c
+++ b/hw/net/cadence_gem.c
@@ -23,7 +23,7 @@
*/
#include "qemu/osdep.h"
-#include <zlib.h> /* For crc32 */
+#include <zlib.h> /* for crc32 */
#include "hw/irq.h"
#include "hw/net/cadence_gem.h"
@@ -33,7 +33,7 @@
#include "qapi/error.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "net/checksum.h"
#include "net/eth.h"
@@ -909,8 +909,8 @@ static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr,
/* Compare A, B, C */
for (j = 0; j < 3; j++) {
- uint32_t cr0, cr1, mask, compare;
- uint16_t rx_cmp;
+ uint32_t cr0, cr1, mask, compare, disable_mask;
+ uint32_t rx_cmp;
int offset;
int cr_idx = extract32(reg, R_SCREENING_TYPE2_REG0_COMPARE_A_SHIFT + j * 6,
R_SCREENING_TYPE2_REG0_COMPARE_A_LENGTH);
@@ -946,9 +946,25 @@ static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr,
break;
}
- rx_cmp = rxbuf_ptr[offset] << 8 | rxbuf_ptr[offset];
- mask = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, MASK_VALUE);
- compare = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, COMPARE_VALUE);
+ disable_mask =
+ FIELD_EX32(cr1, TYPE2_COMPARE_0_WORD_1, DISABLE_MASK);
+ if (disable_mask) {
+ /*
+ * If disable_mask is set, mask_value is used as an
+ * additional 2 byte Compare Value; that is equivalent
+ * to using the whole cr0 register as the comparison value.
+ * Load 32 bits of data from rx_buf, and set mask to
+ * all-ones so we compare all 32 bits.
+ */
+ rx_cmp = ldl_le_p(rxbuf_ptr + offset);
+ mask = 0xFFFFFFFF;
+ compare = cr0;
+ } else {
+ rx_cmp = lduw_le_p(rxbuf_ptr + offset);
+ mask = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, MASK_VALUE);
+ compare =
+ FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, COMPARE_VALUE);
+ }
if ((rx_cmp & mask) == (compare & mask)) {
matched = true;
@@ -1784,7 +1800,7 @@ static const VMStateDescription vmstate_cadence_gem = {
}
};
-static Property gem_properties[] = {
+static const Property gem_properties[] = {
DEFINE_NIC_PROPERTIES(CadenceGEMState, conf),
DEFINE_PROP_UINT32("revision", CadenceGEMState, revision,
GEM_MODID_VALUE),
@@ -1799,17 +1815,16 @@ static Property gem_properties[] = {
jumbo_max_len, 10240),
DEFINE_PROP_LINK("dma", CadenceGEMState, dma_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void gem_class_init(ObjectClass *klass, void *data)
+static void gem_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = gem_realize;
device_class_set_props(dc, gem_properties);
dc->vmsd = &vmstate_cadence_gem;
- dc->reset = gem_reset;
+ device_class_set_legacy_reset(dc, gem_reset);
}
static const TypeInfo gem_info = {
diff --git a/hw/net/can/can_kvaser_pci.c b/hw/net/can/can_kvaser_pci.c
index bf41e6b..be16769 100644
--- a/hw/net/can/can_kvaser_pci.c
+++ b/hw/net/can/can_kvaser_pci.c
@@ -30,12 +30,8 @@
*/
#include "qemu/osdep.h"
-#include "qemu/event_notifier.h"
#include "qemu/module.h"
-#include "qemu/thread.h"
-#include "qemu/sockets.h"
#include "qapi/error.h"
-#include "chardev/char.h"
#include "hw/irq.h"
#include "hw/pci/pci_device.h"
#include "hw/qdev-properties.h"
@@ -286,7 +282,7 @@ static void kvaser_pci_instance_init(Object *obj)
0);
}
-static void kvaser_pci_class_init(ObjectClass *klass, void *data)
+static void kvaser_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -299,7 +295,7 @@ static void kvaser_pci_class_init(ObjectClass *klass, void *data)
k->class_id = 0x00ff00;
dc->desc = "Kvaser PCICANx";
dc->vmsd = &vmstate_kvaser_pci;
- dc->reset = kvaser_pci_reset;
+ device_class_set_legacy_reset(dc, kvaser_pci_reset);
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
}
@@ -309,7 +305,7 @@ static const TypeInfo kvaser_pci_info = {
.instance_size = sizeof(KvaserPCIState),
.class_init = kvaser_pci_class_init,
.instance_init = kvaser_pci_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/net/can/can_mioe3680_pci.c b/hw/net/can/can_mioe3680_pci.c
index 308b17e..44f3ba3 100644
--- a/hw/net/can/can_mioe3680_pci.c
+++ b/hw/net/can/can_mioe3680_pci.c
@@ -26,12 +26,8 @@
*/
#include "qemu/osdep.h"
-#include "qemu/event_notifier.h"
#include "qemu/module.h"
-#include "qemu/thread.h"
-#include "qemu/sockets.h"
#include "qapi/error.h"
-#include "chardev/char.h"
#include "hw/irq.h"
#include "hw/pci/pci_device.h"
#include "hw/qdev-properties.h"
@@ -227,7 +223,7 @@ static void mioe3680_pci_instance_init(Object *obj)
0);
}
-static void mioe3680_pci_class_init(ObjectClass *klass, void *data)
+static void mioe3680_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -243,7 +239,7 @@ static void mioe3680_pci_class_init(ObjectClass *klass, void *data)
dc->desc = "Mioe3680 PCICANx";
dc->vmsd = &vmstate_mioe3680_pci;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
- dc->reset = mioe3680_pci_reset;
+ device_class_set_legacy_reset(dc, mioe3680_pci_reset);
}
static const TypeInfo mioe3680_pci_info = {
@@ -252,7 +248,7 @@ static const TypeInfo mioe3680_pci_info = {
.instance_size = sizeof(Mioe3680PCIState),
.class_init = mioe3680_pci_class_init,
.instance_init = mioe3680_pci_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/net/can/can_pcm3680_pci.c b/hw/net/can/can_pcm3680_pci.c
index e4c8d93..7296d63 100644
--- a/hw/net/can/can_pcm3680_pci.c
+++ b/hw/net/can/can_pcm3680_pci.c
@@ -26,12 +26,8 @@
*/
#include "qemu/osdep.h"
-#include "qemu/event_notifier.h"
#include "qemu/module.h"
-#include "qemu/thread.h"
-#include "qemu/sockets.h"
#include "qapi/error.h"
-#include "chardev/char.h"
#include "hw/irq.h"
#include "hw/pci/pci_device.h"
#include "hw/qdev-properties.h"
@@ -228,7 +224,7 @@ static void pcm3680i_pci_instance_init(Object *obj)
0);
}
-static void pcm3680i_pci_class_init(ObjectClass *klass, void *data)
+static void pcm3680i_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -244,7 +240,7 @@ static void pcm3680i_pci_class_init(ObjectClass *klass, void *data)
dc->desc = "Pcm3680i PCICANx";
dc->vmsd = &vmstate_pcm3680i_pci;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
- dc->reset = pcm3680i_pci_reset;
+ device_class_set_legacy_reset(dc, pcm3680i_pci_reset);
}
static const TypeInfo pcm3680i_pci_info = {
@@ -253,7 +249,7 @@ static const TypeInfo pcm3680i_pci_info = {
.instance_size = sizeof(Pcm3680iPCIState),
.class_init = pcm3680i_pci_class_init,
.instance_init = pcm3680i_pci_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/net/can/can_sja1000.c b/hw/net/can/can_sja1000.c
index 6694d7b..5b6ba9d 100644
--- a/hw/net/can/can_sja1000.c
+++ b/hw/net/can/can_sja1000.c
@@ -27,7 +27,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
-#include "chardev/char.h"
+#include "qemu/bitops.h"
#include "hw/irq.h"
#include "migration/vmstate.h"
#include "net/can_emu.h"
diff --git a/hw/net/can/ctucan_core.c b/hw/net/can/ctucan_core.c
index 812b83e..17131a4 100644
--- a/hw/net/can/ctucan_core.c
+++ b/hw/net/can/ctucan_core.c
@@ -28,7 +28,8 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
-#include "chardev/char.h"
+#include "qemu/bswap.h"
+#include "qemu/bitops.h"
#include "hw/irq.h"
#include "migration/vmstate.h"
#include "net/can_emu.h"
@@ -399,8 +400,6 @@ void ctucan_mem_write(CtuCanCoreState *s, hwaddr addr, uint64_t val,
ctucan_update_irq(s);
}
-
- return;
}
uint64_t ctucan_mem_read(CtuCanCoreState *s, hwaddr addr, unsigned size)
diff --git a/hw/net/can/ctucan_pci.c b/hw/net/can/ctucan_pci.c
index d8f7344d..bed6785 100644
--- a/hw/net/can/ctucan_pci.c
+++ b/hw/net/can/ctucan_pci.c
@@ -27,12 +27,8 @@
*/
#include "qemu/osdep.h"
-#include "qemu/event_notifier.h"
#include "qemu/module.h"
-#include "qemu/thread.h"
-#include "qemu/sockets.h"
#include "qapi/error.h"
-#include "chardev/char.h"
#include "hw/irq.h"
#include "hw/pci/pci_device.h"
#include "hw/qdev-properties.h"
@@ -241,7 +237,7 @@ static void ctucan_pci_instance_init(Object *obj)
#endif
}
-static void ctucan_pci_class_init(ObjectClass *klass, void *data)
+static void ctucan_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -257,7 +253,7 @@ static void ctucan_pci_class_init(ObjectClass *klass, void *data)
dc->desc = "CTU CAN PCI";
dc->vmsd = &vmstate_ctucan_pci;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
- dc->reset = ctucan_pci_reset;
+ device_class_set_legacy_reset(dc, ctucan_pci_reset);
}
static const TypeInfo ctucan_pci_info = {
@@ -266,7 +262,7 @@ static const TypeInfo ctucan_pci_info = {
.instance_size = sizeof(CtuCanPCIState),
.class_init = ctucan_pci_class_init,
.instance_init = ctucan_pci_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/net/can/xlnx-versal-canfd.c b/hw/net/can/xlnx-versal-canfd.c
index 5f083c2..3eb1119 100644
--- a/hw/net/can/xlnx-versal-canfd.c
+++ b/hw/net/can/xlnx-versal-canfd.c
@@ -678,12 +678,10 @@ REG32(RB_DW15_REGISTER_1, 0x4144)
FIELD(RB_DW15_REGISTER_1, DATA_BYTES62, 8, 8)
FIELD(RB_DW15_REGISTER_1, DATA_BYTES63, 0, 8)
-static uint8_t canfd_dlc_array[8] = {8, 12, 16, 20, 24, 32, 48, 64};
-
static void canfd_update_irq(XlnxVersalCANFDState *s)
{
- unsigned int irq = s->regs[R_INTERRUPT_STATUS_REGISTER] &
- s->regs[R_INTERRUPT_ENABLE_REGISTER];
+ const bool irq = (s->regs[R_INTERRUPT_STATUS_REGISTER] &
+ s->regs[R_INTERRUPT_ENABLE_REGISTER]) != 0;
g_autofree char *path = object_get_canonical_path(OBJECT(s));
/* RX watermark interrupts. */
@@ -869,6 +867,10 @@ static void regs2frame(XlnxVersalCANFDState *s, qemu_can_frame *frame,
uint32_t val = 0;
uint32_t dlc_reg_val = 0;
uint32_t dlc_value = 0;
+ uint32_t id_reg_val = 0;
+ bool is_rtr = false;
+
+ frame->flags = 0;
/* Check that reg_num should be within TX register space. */
assert(reg_num <= R_TB_ID_REGISTER + (NUM_REGS_PER_MSG_SPACE *
@@ -877,56 +879,37 @@ static void regs2frame(XlnxVersalCANFDState *s, qemu_can_frame *frame,
dlc_reg_val = s->regs[reg_num + 1];
dlc_value = FIELD_EX32(dlc_reg_val, TB0_DLC_REGISTER, DLC);
- frame->can_id = s->regs[reg_num];
+ id_reg_val = s->regs[reg_num];
+ if (FIELD_EX32(id_reg_val, TB_ID_REGISTER, IDE)) {
+ frame->can_id = (FIELD_EX32(id_reg_val, TB_ID_REGISTER, ID) << 18) |
+ (FIELD_EX32(id_reg_val, TB_ID_REGISTER, ID_EXT)) |
+ QEMU_CAN_EFF_FLAG;
+ if (FIELD_EX32(id_reg_val, TB_ID_REGISTER, RTR_RRS)) {
+ is_rtr = true;
+ }
+ } else {
+ frame->can_id = FIELD_EX32(id_reg_val, TB_ID_REGISTER, ID);
+ if (FIELD_EX32(id_reg_val, TB_ID_REGISTER, SRR_RTR_RRS)) {
+ is_rtr = true;
+ }
+ }
if (FIELD_EX32(dlc_reg_val, TB0_DLC_REGISTER, FDF)) {
- /*
- * CANFD frame.
- * Converting dlc(0 to 15) 4 Byte data to plain length(i.e. 0 to 64)
- * 1 Byte data. This is done to make it work with SocketCAN.
- * On actual CANFD frame, this value can't be more than 0xF.
- * Conversion table for DLC to plain length:
- *
- * DLC Plain Length
- * 0 - 8 0 - 8
- * 9 9 - 12
- * 10 13 - 16
- * 11 17 - 20
- * 12 21 - 24
- * 13 25 - 32
- * 14 33 - 48
- * 15 49 - 64
- */
-
- frame->flags = QEMU_CAN_FRMF_TYPE_FD;
+ frame->flags |= QEMU_CAN_FRMF_TYPE_FD;
- if (dlc_value < 8) {
- frame->can_dlc = dlc_value;
- } else {
- assert((dlc_value - 8) < ARRAY_SIZE(canfd_dlc_array));
- frame->can_dlc = canfd_dlc_array[dlc_value - 8];
+ if (FIELD_EX32(dlc_reg_val, TB0_DLC_REGISTER, BRS)) {
+ frame->flags |= QEMU_CAN_FRMF_BRS;
}
} else {
- /*
- * FD Format bit not set that means it is a CAN Frame.
- * Conversion table for classic CAN:
- *
- * DLC Plain Length
- * 0 - 7 0 - 7
- * 8 - 15 8
- */
-
- if (dlc_value > 8) {
- frame->can_dlc = 8;
- qemu_log_mask(LOG_GUEST_ERROR, "Maximum DLC value for Classic CAN"
- " frame is 8. Only 8 byte data will be sent.\n");
- } else {
- frame->can_dlc = dlc_value;
+ if (is_rtr) {
+ frame->can_id |= QEMU_CAN_RTR_FLAG;
}
}
+ frame->can_dlc = can_dlc2len(dlc_value);
+
for (j = 0; j < frame->can_dlc; j++) {
- val = 8 * i;
+ val = 8 * (3 - i);
frame->data[j] = extract32(s->regs[reg_num + 2 + (j / 4)], val, 8);
i++;
@@ -948,6 +931,33 @@ static void process_cancellation_requests(XlnxVersalCANFDState *s)
canfd_update_irq(s);
}
+static uint32_t frame_to_reg_id(const qemu_can_frame *frame)
+{
+ uint32_t id_reg_val = 0;
+ const bool is_canfd_frame = frame->flags & QEMU_CAN_FRMF_TYPE_FD;
+ const bool is_rtr = !is_canfd_frame && (frame->can_id & QEMU_CAN_RTR_FLAG);
+
+ if (frame->can_id & QEMU_CAN_EFF_FLAG) {
+ id_reg_val |= FIELD_DP32(0, RB_ID_REGISTER, ID,
+ (frame->can_id & QEMU_CAN_EFF_MASK) >> 18);
+ id_reg_val |= FIELD_DP32(0, RB_ID_REGISTER, ID_EXT,
+ frame->can_id & QEMU_CAN_EFF_MASK);
+ id_reg_val |= FIELD_DP32(0, RB_ID_REGISTER, IDE, 1);
+ id_reg_val |= FIELD_DP32(0, RB_ID_REGISTER, SRR_RTR_RRS, 1);
+ if (is_rtr) {
+ id_reg_val |= FIELD_DP32(0, RB_ID_REGISTER, RTR_RRS, 1);
+ }
+ } else {
+ id_reg_val |= FIELD_DP32(0, RB_ID_REGISTER, ID,
+ frame->can_id & QEMU_CAN_SFF_MASK);
+ if (is_rtr) {
+ id_reg_val |= FIELD_DP32(0, RB_ID_REGISTER, SRR_RTR_RRS, 1);
+ }
+ }
+
+ return id_reg_val;
+}
+
static void store_rx_sequential(XlnxVersalCANFDState *s,
const qemu_can_frame *frame,
uint32_t fill_level, uint32_t read_index,
@@ -955,7 +965,6 @@ static void store_rx_sequential(XlnxVersalCANFDState *s,
bool rx_fifo_id, uint8_t filter_index)
{
int i;
- bool is_canfd_frame;
uint8_t dlc = frame->can_dlc;
uint8_t rx_reg_num = 0;
uint32_t dlc_reg_val = 0;
@@ -999,30 +1008,21 @@ static void store_rx_sequential(XlnxVersalCANFDState *s,
NUM_REGS_PER_MSG_SPACE));
}
- s->regs[store_location] = frame->can_id;
+ s->regs[store_location] = frame_to_reg_id(frame);
- dlc = frame->can_dlc;
+ dlc_reg_val = FIELD_DP32(0, RB_DLC_REGISTER, DLC, can_len2dlc(dlc));
- if (frame->flags == QEMU_CAN_FRMF_TYPE_FD) {
- is_canfd_frame = true;
+ if (frame->flags & QEMU_CAN_FRMF_TYPE_FD) {
+ dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, FDF, 1);
- /* Store dlc value in Xilinx specific format. */
- for (i = 0; i < ARRAY_SIZE(canfd_dlc_array); i++) {
- if (canfd_dlc_array[i] == frame->can_dlc) {
- dlc_reg_val = FIELD_DP32(0, RB_DLC_REGISTER, DLC, 8 + i);
- }
+ if (frame->flags & QEMU_CAN_FRMF_BRS) {
+ dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, BRS, 1);
}
- } else {
- is_canfd_frame = false;
-
- if (frame->can_dlc > 8) {
- dlc = 8;
+ if (frame->flags & QEMU_CAN_FRMF_ESI) {
+ dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, ESI, 1);
}
-
- dlc_reg_val = FIELD_DP32(0, RB_DLC_REGISTER, DLC, dlc);
}
- dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, FDF, is_canfd_frame);
dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, TIMESTAMP, rx_timestamp);
dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, MATCHED_FILTER_INDEX,
filter_index);
@@ -1034,19 +1034,19 @@ static void store_rx_sequential(XlnxVersalCANFDState *s,
case 0:
rx_reg_num = i / 4;
- data_reg_val = FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES3,
+ data_reg_val = FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES0,
frame->data[i]);
break;
case 1:
- data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES2,
+ data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES1,
frame->data[i]);
break;
case 2:
- data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES1,
+ data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES2,
frame->data[i]);
break;
case 3:
- data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES0,
+ data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES3,
frame->data[i]);
/*
* Last Bytes data which means we have all 4 bytes ready to
@@ -1090,11 +1090,12 @@ static void update_rx_sequential(XlnxVersalCANFDState *s,
if (s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER]) {
uint32_t acceptance_filter_status =
s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER];
+ const uint32_t reg_id = frame_to_reg_id(frame);
for (i = 0; i < 32; i++) {
if (acceptance_filter_status & 0x1) {
uint32_t msg_id_masked = s->regs[R_AFMR_REGISTER + 2 * i] &
- frame->can_id;
+ reg_id;
uint32_t afir_id_masked = s->regs[R_AFIR_REGISTER + 2 * i] &
s->regs[R_AFMR_REGISTER + 2 * i];
uint16_t std_msg_id_masked = FIELD_EX32(msg_id_masked,
@@ -1143,18 +1144,8 @@ static void update_rx_sequential(XlnxVersalCANFDState *s,
read_index = ARRAY_FIELD_EX32(s->regs, RX_FIFO_STATUS_REGISTER, RI);
store_index = read_index + fill_level;
- if (read_index == s->cfg.rx0_fifo - 1) {
- /*
- * When ri is s->cfg.rx0_fifo - 1 i.e. max, it goes cyclic that
- * means we reset the ri to 0x0.
- */
- read_index = 0;
- ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, RI,
- read_index);
- }
-
if (store_index > s->cfg.rx0_fifo - 1) {
- store_index -= s->cfg.rx0_fifo - 1;
+ store_index -= s->cfg.rx0_fifo;
}
store_location = R_RB_ID_REGISTER +
@@ -1171,18 +1162,8 @@ static void update_rx_sequential(XlnxVersalCANFDState *s,
RI_1);
store_index = read_index + fill_level;
- if (read_index == s->cfg.rx1_fifo - 1) {
- /*
- * When ri is s->cfg.rx1_fifo - 1 i.e. max, it goes cyclic that
- * means we reset the ri to 0x0.
- */
- read_index = 0;
- ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, RI_1,
- read_index);
- }
-
if (store_index > s->cfg.rx1_fifo - 1) {
- store_index -= s->cfg.rx1_fifo - 1;
+ store_index -= s->cfg.rx1_fifo;
}
store_location = R_RB_ID_REGISTER_1 +
@@ -1264,18 +1245,8 @@ static void tx_fifo_stamp(XlnxVersalCANFDState *s, uint32_t tb0_regid)
" Discarding the message\n");
ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXEOFLW, 1);
} else {
- if (read_index == s->cfg.tx_fifo - 1) {
- /*
- * When ri is s->cfg.tx_fifo - 1 i.e. max, it goes cyclic that
- * means we reset the ri to 0x0.
- */
- read_index = 0;
- ARRAY_FIELD_DP32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, TXE_RI,
- read_index);
- }
-
if (store_index > s->cfg.tx_fifo - 1) {
- store_index -= s->cfg.tx_fifo - 1;
+ store_index -= s->cfg.tx_fifo;
}
assert(store_index < s->cfg.tx_fifo);
@@ -1307,7 +1278,7 @@ static void tx_fifo_stamp(XlnxVersalCANFDState *s, uint32_t tb0_regid)
}
}
-static gint g_cmp_ids(gconstpointer data1, gconstpointer data2)
+static gint g_cmp_ids(gconstpointer data1, gconstpointer data2, gpointer d)
{
tx_ready_reg_info *tx_reg_1 = (tx_ready_reg_info *) data1;
tx_ready_reg_info *tx_reg_2 = (tx_ready_reg_info *) data2;
@@ -1327,8 +1298,6 @@ static void free_list(GSList *list)
}
g_slist_free(list);
-
- return;
}
static GSList *prepare_tx_data(XlnxVersalCANFDState *s)
@@ -1347,7 +1316,7 @@ static GSList *prepare_tx_data(XlnxVersalCANFDState *s)
temp->can_id = s->regs[reg_num];
temp->reg_num = reg_num;
list = g_slist_prepend(list, temp);
- list = g_slist_sort(list, g_cmp_ids);
+ list = g_slist_sort_with_data(list, g_cmp_ids, NULL);
}
reg_ready >>= 1;
@@ -2071,7 +2040,7 @@ static const VMStateDescription vmstate_canfd = {
}
};
-static Property canfd_core_properties[] = {
+static const Property canfd_core_properties[] = {
DEFINE_PROP_UINT8("rx-fifo0", XlnxVersalCANFDState, cfg.rx0_fifo, 0x40),
DEFINE_PROP_UINT8("rx-fifo1", XlnxVersalCANFDState, cfg.rx1_fifo, 0x40),
DEFINE_PROP_UINT8("tx-fifo", XlnxVersalCANFDState, cfg.tx_fifo, 0x20),
@@ -2081,14 +2050,13 @@ static Property canfd_core_properties[] = {
CANFD_DEFAULT_CLOCK),
DEFINE_PROP_LINK("canfdbus", XlnxVersalCANFDState, canfdbus, TYPE_CAN_BUS,
CanBusState *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void canfd_class_init(ObjectClass *klass, void *data)
+static void canfd_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = canfd_reset;
+ device_class_set_legacy_reset(dc, canfd_reset);
dc->realize = canfd_realize;
device_class_set_props(dc, canfd_core_properties);
dc->vmsd = &vmstate_canfd;
diff --git a/hw/net/can/xlnx-zynqmp-can.c b/hw/net/can/xlnx-zynqmp-can.c
index 58f1432..ca9edd4 100644
--- a/hw/net/can/xlnx-zynqmp-can.c
+++ b/hw/net/can/xlnx-zynqmp-can.c
@@ -1169,15 +1169,14 @@ static const VMStateDescription vmstate_can = {
}
};
-static Property xlnx_zynqmp_can_properties[] = {
+static const Property xlnx_zynqmp_can_properties[] = {
DEFINE_PROP_UINT32("ext_clk_freq", XlnxZynqMPCANState, cfg.ext_clk_freq,
CAN_DEFAULT_CLOCK),
DEFINE_PROP_LINK("canbus", XlnxZynqMPCANState, canbus, TYPE_CAN_BUS,
CanBusState *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xlnx_zynqmp_can_class_init(ObjectClass *klass, void *data)
+static void xlnx_zynqmp_can_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/net/dp8393x.c b/hw/net/dp8393x.c
index bf0652d..d490320 100644
--- a/hw/net/dp8393x.c
+++ b/hw/net/dp8393x.c
@@ -27,7 +27,7 @@
#include "qapi/error.h"
#include "qemu/module.h"
#include "qemu/timer.h"
-#include <zlib.h>
+#include <zlib.h> /* for crc32 */
#include "qom/object.h"
#include "trace.h"
@@ -931,22 +931,21 @@ static const VMStateDescription vmstate_dp8393x = {
}
};
-static Property dp8393x_properties[] = {
+static const Property dp8393x_properties[] = {
DEFINE_NIC_PROPERTIES(dp8393xState, conf),
DEFINE_PROP_LINK("dma_mr", dp8393xState, dma_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
DEFINE_PROP_UINT8("it_shift", dp8393xState, it_shift, 0),
DEFINE_PROP_BOOL("big_endian", dp8393xState, big_endian, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void dp8393x_class_init(ObjectClass *klass, void *data)
+static void dp8393x_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
dc->realize = dp8393x_realize;
- dc->reset = dp8393x_reset;
+ device_class_set_legacy_reset(dc, dp8393x_reset);
dc->vmsd = &vmstate_dp8393x;
device_class_set_props(dc, dp8393x_properties);
}
diff --git a/hw/net/e1000.c b/hw/net/e1000.c
index 5012b96..a80a7b0 100644
--- a/hw/net/e1000.c
+++ b/hw/net/e1000.c
@@ -33,8 +33,8 @@
#include "net/eth.h"
#include "net/net.h"
#include "net/checksum.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/dma.h"
+#include "system/system.h"
+#include "system/dma.h"
#include "qemu/iov.h"
#include "qemu/module.h"
#include "qemu/range.h"
@@ -127,10 +127,8 @@ struct E1000State_st {
QEMUTimer *flush_queue_timer;
/* Compatibility flags for migration to/from qemu 1.3.0 and older */
-#define E1000_FLAG_MAC_BIT 2
#define E1000_FLAG_TSO_BIT 3
#define E1000_FLAG_VET_BIT 4
-#define E1000_FLAG_MAC (1 << E1000_FLAG_MAC_BIT)
#define E1000_FLAG_TSO (1 << E1000_FLAG_TSO_BIT)
#define E1000_FLAG_VET (1 << E1000_FLAG_VET_BIT)
@@ -1212,52 +1210,51 @@ enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
enum { MAC_ACCESS_PARTIAL = 1, MAC_ACCESS_FLAG_NEEDED = 2 };
-#define markflag(x) ((E1000_FLAG_##x << 2) | MAC_ACCESS_FLAG_NEEDED)
/* In the array below the meaning of the bits is: [f|f|f|f|f|f|n|p]
* f - flag bits (up to 6 possible flags)
* n - flag needed
- * p - partially implenented */
+ * p - partially implemented */
static const uint8_t mac_reg_access[0x8000] = {
- [IPAV] = markflag(MAC), [WUC] = markflag(MAC),
- [IP6AT] = markflag(MAC), [IP4AT] = markflag(MAC),
- [FFVT] = markflag(MAC), [WUPM] = markflag(MAC),
- [ECOL] = markflag(MAC), [MCC] = markflag(MAC),
- [DC] = markflag(MAC), [TNCRS] = markflag(MAC),
- [RLEC] = markflag(MAC), [XONRXC] = markflag(MAC),
- [XOFFTXC] = markflag(MAC), [RFC] = markflag(MAC),
- [TSCTFC] = markflag(MAC), [MGTPRC] = markflag(MAC),
- [WUS] = markflag(MAC), [AIT] = markflag(MAC),
- [FFLT] = markflag(MAC), [FFMT] = markflag(MAC),
- [SCC] = markflag(MAC), [FCRUC] = markflag(MAC),
- [LATECOL] = markflag(MAC), [COLC] = markflag(MAC),
- [SEQEC] = markflag(MAC), [CEXTERR] = markflag(MAC),
- [XONTXC] = markflag(MAC), [XOFFRXC] = markflag(MAC),
- [RJC] = markflag(MAC), [RNBC] = markflag(MAC),
- [MGTPDC] = markflag(MAC), [MGTPTC] = markflag(MAC),
- [RUC] = markflag(MAC), [ROC] = markflag(MAC),
- [GORCL] = markflag(MAC), [GORCH] = markflag(MAC),
- [GOTCL] = markflag(MAC), [GOTCH] = markflag(MAC),
- [BPRC] = markflag(MAC), [MPRC] = markflag(MAC),
- [TSCTC] = markflag(MAC), [PRC64] = markflag(MAC),
- [PRC127] = markflag(MAC), [PRC255] = markflag(MAC),
- [PRC511] = markflag(MAC), [PRC1023] = markflag(MAC),
- [PRC1522] = markflag(MAC), [PTC64] = markflag(MAC),
- [PTC127] = markflag(MAC), [PTC255] = markflag(MAC),
- [PTC511] = markflag(MAC), [PTC1023] = markflag(MAC),
- [PTC1522] = markflag(MAC), [MPTC] = markflag(MAC),
- [BPTC] = markflag(MAC),
-
- [TDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL,
- [TDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL,
- [TDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
- [TDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
- [TDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL,
- [RDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL,
- [RDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL,
- [RDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
- [RDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
- [RDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL,
- [PBM] = markflag(MAC) | MAC_ACCESS_PARTIAL,
+ [IPAV] = MAC_ACCESS_FLAG_NEEDED, [WUC] = MAC_ACCESS_FLAG_NEEDED,
+ [IP6AT] = MAC_ACCESS_FLAG_NEEDED, [IP4AT] = MAC_ACCESS_FLAG_NEEDED,
+ [FFVT] = MAC_ACCESS_FLAG_NEEDED, [WUPM] = MAC_ACCESS_FLAG_NEEDED,
+ [ECOL] = MAC_ACCESS_FLAG_NEEDED, [MCC] = MAC_ACCESS_FLAG_NEEDED,
+ [DC] = MAC_ACCESS_FLAG_NEEDED, [TNCRS] = MAC_ACCESS_FLAG_NEEDED,
+ [RLEC] = MAC_ACCESS_FLAG_NEEDED, [XONRXC] = MAC_ACCESS_FLAG_NEEDED,
+ [XOFFTXC] = MAC_ACCESS_FLAG_NEEDED, [RFC] = MAC_ACCESS_FLAG_NEEDED,
+ [TSCTFC] = MAC_ACCESS_FLAG_NEEDED, [MGTPRC] = MAC_ACCESS_FLAG_NEEDED,
+ [WUS] = MAC_ACCESS_FLAG_NEEDED, [AIT] = MAC_ACCESS_FLAG_NEEDED,
+ [FFLT] = MAC_ACCESS_FLAG_NEEDED, [FFMT] = MAC_ACCESS_FLAG_NEEDED,
+ [SCC] = MAC_ACCESS_FLAG_NEEDED, [FCRUC] = MAC_ACCESS_FLAG_NEEDED,
+ [LATECOL] = MAC_ACCESS_FLAG_NEEDED, [COLC] = MAC_ACCESS_FLAG_NEEDED,
+ [SEQEC] = MAC_ACCESS_FLAG_NEEDED, [CEXTERR] = MAC_ACCESS_FLAG_NEEDED,
+ [XONTXC] = MAC_ACCESS_FLAG_NEEDED, [XOFFRXC] = MAC_ACCESS_FLAG_NEEDED,
+ [RJC] = MAC_ACCESS_FLAG_NEEDED, [RNBC] = MAC_ACCESS_FLAG_NEEDED,
+ [MGTPDC] = MAC_ACCESS_FLAG_NEEDED, [MGTPTC] = MAC_ACCESS_FLAG_NEEDED,
+ [RUC] = MAC_ACCESS_FLAG_NEEDED, [ROC] = MAC_ACCESS_FLAG_NEEDED,
+ [GORCL] = MAC_ACCESS_FLAG_NEEDED, [GORCH] = MAC_ACCESS_FLAG_NEEDED,
+ [GOTCL] = MAC_ACCESS_FLAG_NEEDED, [GOTCH] = MAC_ACCESS_FLAG_NEEDED,
+ [BPRC] = MAC_ACCESS_FLAG_NEEDED, [MPRC] = MAC_ACCESS_FLAG_NEEDED,
+ [TSCTC] = MAC_ACCESS_FLAG_NEEDED, [PRC64] = MAC_ACCESS_FLAG_NEEDED,
+ [PRC127] = MAC_ACCESS_FLAG_NEEDED, [PRC255] = MAC_ACCESS_FLAG_NEEDED,
+ [PRC511] = MAC_ACCESS_FLAG_NEEDED, [PRC1023] = MAC_ACCESS_FLAG_NEEDED,
+ [PRC1522] = MAC_ACCESS_FLAG_NEEDED, [PTC64] = MAC_ACCESS_FLAG_NEEDED,
+ [PTC127] = MAC_ACCESS_FLAG_NEEDED, [PTC255] = MAC_ACCESS_FLAG_NEEDED,
+ [PTC511] = MAC_ACCESS_FLAG_NEEDED, [PTC1023] = MAC_ACCESS_FLAG_NEEDED,
+ [PTC1522] = MAC_ACCESS_FLAG_NEEDED, [MPTC] = MAC_ACCESS_FLAG_NEEDED,
+ [BPTC] = MAC_ACCESS_FLAG_NEEDED,
+
+ [TDFH] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL,
+ [TDFT] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL,
+ [TDFHS] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL,
+ [TDFTS] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL,
+ [TDFPC] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL,
+ [RDFH] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL,
+ [RDFT] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL,
+ [RDFHS] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL,
+ [RDFTS] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL,
+ [RDFPC] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL,
+ [PBM] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL,
};
static void
@@ -1419,13 +1416,6 @@ static int e1000_tx_tso_post_load(void *opaque, int version_id)
return 0;
}
-static bool e1000_full_mac_needed(void *opaque)
-{
- E1000State *s = opaque;
-
- return chkflag(MAC);
-}
-
static bool e1000_tso_state_needed(void *opaque)
{
E1000State *s = opaque;
@@ -1451,7 +1441,6 @@ static const VMStateDescription vmstate_e1000_full_mac_state = {
.name = "e1000/full_mac_state",
.version_id = 1,
.minimum_version_id = 1,
- .needed = e1000_full_mac_needed,
.fields = (const VMStateField[]) {
VMSTATE_UINT32_ARRAY(mac_reg, E1000State, 0x8000),
VMSTATE_END_OF_LIST()
@@ -1677,15 +1666,12 @@ static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp)
e1000_flush_queue_timer, d);
}
-static Property e1000_properties[] = {
+static const Property e1000_properties[] = {
DEFINE_NIC_PROPERTIES(E1000State, conf),
- DEFINE_PROP_BIT("extra_mac_registers", E1000State,
- compat_flags, E1000_FLAG_MAC_BIT, true),
DEFINE_PROP_BIT("migrate_tso_props", E1000State,
compat_flags, E1000_FLAG_TSO_BIT, true),
DEFINE_PROP_BIT("init-vet", E1000State,
compat_flags, E1000_FLAG_VET_BIT, true),
- DEFINE_PROP_END_OF_LIST(),
};
typedef struct E1000Info {
@@ -1695,7 +1681,7 @@ typedef struct E1000Info {
uint16_t phy_id2;
} E1000Info;
-static void e1000_class_init(ObjectClass *klass, void *data)
+static void e1000_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -1733,7 +1719,7 @@ static const TypeInfo e1000_base_info = {
.instance_init = e1000_instance_init,
.class_size = sizeof(E1000BaseClass),
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -1771,10 +1757,10 @@ static void e1000_register_types(void)
type_info.name = info->name;
type_info.parent = TYPE_E1000_BASE;
- type_info.class_data = (void *)info;
+ type_info.class_data = info;
type_info.class_init = e1000_class_init;
- type_register(&type_info);
+ type_register_static(&type_info);
}
}
diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c
index 843892c..89e6d52 100644
--- a/hw/net/e1000e.c
+++ b/hw/net/e1000e.c
@@ -40,7 +40,7 @@
#include "net/tap.h"
#include "qemu/module.h"
#include "qemu/range.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/hw.h"
#include "hw/net/mii.h"
#include "hw/pci/msi.h"
@@ -372,8 +372,7 @@ static int
e1000e_add_pm_capability(PCIDevice *pdev, uint8_t offset, uint16_t pmc)
{
Error *local_err = NULL;
- int ret = pci_add_capability(pdev, PCI_CAP_ID_PM, offset,
- PCI_PM_SIZEOF, &local_err);
+ int ret = pci_pm_init(pdev, offset, &local_err);
if (local_err) {
error_report_err(local_err);
@@ -661,7 +660,7 @@ static PropertyInfo e1000e_prop_disable_vnet,
e1000e_prop_subsys_ven,
e1000e_prop_subsys;
-static Property e1000e_properties[] = {
+static const Property e1000e_properties[] = {
DEFINE_NIC_PROPERTIES(E1000EState, conf),
DEFINE_PROP_SIGNED("disable_vnet_hdr", E1000EState, disable_vnet, false,
e1000e_prop_disable_vnet, bool),
@@ -672,10 +671,9 @@ static Property e1000e_properties[] = {
e1000e_prop_subsys, uint16_t),
DEFINE_PROP_BOOL("init-vet", E1000EState, init_vet, true),
DEFINE_PROP_BOOL("migrate-timadj", E1000EState, timadj, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void e1000e_class_init(ObjectClass *class, void *data)
+static void e1000e_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
ResettableClass *rc = RESETTABLE_CLASS(class);
@@ -723,7 +721,7 @@ static const TypeInfo e1000e_info = {
.instance_size = sizeof(E1000EState),
.class_init = e1000e_class_init,
.instance_init = e1000e_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ }
},
diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c
index 3ae2a18..2413858 100644
--- a/hw/net/e1000e_core.c
+++ b/hw/net/e1000e_core.c
@@ -40,7 +40,7 @@
#include "hw/net/mii.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "net_tx_pkt.h"
#include "net_rx_pkt.h"
@@ -561,8 +561,7 @@ e1000e_rss_calc_hash(E1000ECore *core,
type = NetPktRssIpV6Ex;
break;
default:
- assert(false);
- return 0;
+ g_assert_not_reached();
}
return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]);
@@ -841,7 +840,6 @@ e1000e_ring_free_descr_num(E1000ECore *core, const E1000ERingInfo *r)
}
g_assert_not_reached();
- return 0;
}
static inline bool
diff --git a/hw/net/e1000x_regs.h b/hw/net/e1000x_regs.h
index cd896fc..e9a74de 100644
--- a/hw/net/e1000x_regs.h
+++ b/hw/net/e1000x_regs.h
@@ -900,7 +900,7 @@ struct e1000_context_desc {
uint16_t tucse; /* TCP checksum end */
} tcp_fields;
} upper_setup;
- uint32_t cmd_and_length; /* */
+ uint32_t cmd_and_length;
union {
uint32_t data;
struct {
diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c
index d9a70c4..d47df5a 100644
--- a/hw/net/eepro100.c
+++ b/hw/net/eepro100.c
@@ -6,10 +6,12 @@
* Portions of the code are copies from grub / etherboot eepro100.c
* and linux e100.c.
*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
- * (at your option) version 3 or any later version.
+ * (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -48,9 +50,9 @@
#include "net/net.h"
#include "net/eth.h"
#include "hw/nvram/eeprom93xx.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/dma.h"
-#include "sysemu/reset.h"
+#include "system/system.h"
+#include "system/dma.h"
+#include "system/reset.h"
#include "qemu/bitops.h"
#include "qemu/module.h"
#include "qapi/error.h"
@@ -549,9 +551,7 @@ static void e100_pci_reset(EEPRO100State *s, Error **errp)
if (info->power_management) {
/* Power Management Capabilities */
int cfg_offset = 0xdc;
- int r = pci_add_capability(&s->dev, PCI_CAP_ID_PM,
- cfg_offset, PCI_PM_SIZEOF,
- errp);
+ int r = pci_pm_init(&s->dev, cfg_offset, errp);
if (r < 0) {
return;
}
@@ -2056,12 +2056,11 @@ static E100PCIDeviceInfo *eepro100_get_class(EEPRO100State *s)
return eepro100_get_class_by_name(object_get_typename(OBJECT(s)));
}
-static Property e100_properties[] = {
+static const Property e100_properties[] = {
DEFINE_NIC_PROPERTIES(EEPRO100State, conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void eepro100_class_init(ObjectClass *klass, void *data)
+static void eepro100_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -2095,12 +2094,12 @@ static void eepro100_register_types(void)
type_info.class_init = eepro100_class_init;
type_info.instance_size = sizeof(EEPRO100State);
type_info.instance_init = eepro100_instance_init;
- type_info.interfaces = (InterfaceInfo[]) {
+ type_info.interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
};
- type_register(&type_info);
+ type_register_static(&type_info);
}
}
diff --git a/hw/net/etraxfs_eth.c b/hw/net/etraxfs_eth.c
deleted file mode 100644
index 5faf20c..0000000
--- a/hw/net/etraxfs_eth.c
+++ /dev/null
@@ -1,688 +0,0 @@
-/*
- * QEMU ETRAX Ethernet Controller.
- *
- * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "hw/sysbus.h"
-#include "net/net.h"
-#include "hw/cris/etraxfs.h"
-#include "qemu/error-report.h"
-#include "qemu/module.h"
-#include "trace.h"
-#include "qom/object.h"
-
-#define D(x)
-
-/* Advertisement control register. */
-#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
-#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
-#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
-#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
-
-/*
- * The MDIO extensions in the TDK PHY model were reversed engineered from the
- * linux driver (PHYID and Diagnostics reg).
- * TODO: Add friendly names for the register nums.
- */
-struct qemu_phy
-{
- uint32_t regs[32];
-
- int link;
-
- unsigned int (*read)(struct qemu_phy *phy, unsigned int req);
- void (*write)(struct qemu_phy *phy, unsigned int req, unsigned int data);
-};
-
-static unsigned int tdk_read(struct qemu_phy *phy, unsigned int req)
-{
- int regnum;
- unsigned r = 0;
-
- regnum = req & 0x1f;
-
- switch (regnum) {
- case 1:
- if (!phy->link) {
- break;
- }
- /* MR1. */
- /* Speeds and modes. */
- r |= (1 << 13) | (1 << 14);
- r |= (1 << 11) | (1 << 12);
- r |= (1 << 5); /* Autoneg complete. */
- r |= (1 << 3); /* Autoneg able. */
- r |= (1 << 2); /* link. */
- break;
- case 5:
- /* Link partner ability.
- We are kind; always agree with whatever best mode
- the guest advertises. */
- r = 1 << 14; /* Success. */
- /* Copy advertised modes. */
- r |= phy->regs[4] & (15 << 5);
- /* Autoneg support. */
- r |= 1;
- break;
- case 18:
- {
- /* Diagnostics reg. */
- int duplex = 0;
- int speed_100 = 0;
-
- if (!phy->link) {
- break;
- }
-
- /* Are we advertising 100 half or 100 duplex ? */
- speed_100 = !!(phy->regs[4] & ADVERTISE_100HALF);
- speed_100 |= !!(phy->regs[4] & ADVERTISE_100FULL);
-
- /* Are we advertising 10 duplex or 100 duplex ? */
- duplex = !!(phy->regs[4] & ADVERTISE_100FULL);
- duplex |= !!(phy->regs[4] & ADVERTISE_10FULL);
- r = (speed_100 << 10) | (duplex << 11);
- }
- break;
-
- default:
- r = phy->regs[regnum];
- break;
- }
- trace_mdio_phy_read(regnum, r);
- return r;
-}
-
-static void
-tdk_write(struct qemu_phy *phy, unsigned int req, unsigned int data)
-{
- int regnum;
-
- regnum = req & 0x1f;
- trace_mdio_phy_write(regnum, data);
- switch (regnum) {
- default:
- phy->regs[regnum] = data;
- break;
- }
-}
-
-static void
-tdk_reset(struct qemu_phy *phy)
-{
- phy->regs[0] = 0x3100;
- /* PHY Id. */
- phy->regs[2] = 0x0300;
- phy->regs[3] = 0xe400;
- /* Autonegotiation advertisement reg. */
- phy->regs[4] = 0x01E1;
- phy->link = 1;
-}
-
-struct qemu_mdio
-{
- /* bus. */
- int mdc;
- int mdio;
-
- /* decoder. */
- enum {
- PREAMBLE,
- SOF,
- OPC,
- ADDR,
- REQ,
- TURNAROUND,
- DATA
- } state;
- unsigned int drive;
-
- unsigned int cnt;
- unsigned int addr;
- unsigned int opc;
- unsigned int req;
- unsigned int data;
-
- struct qemu_phy *devs[32];
-};
-
-static void
-mdio_attach(struct qemu_mdio *bus, struct qemu_phy *phy, unsigned int addr)
-{
- bus->devs[addr & 0x1f] = phy;
-}
-
-#ifdef USE_THIS_DEAD_CODE
-static void
-mdio_detach(struct qemu_mdio *bus, struct qemu_phy *phy, unsigned int addr)
-{
- bus->devs[addr & 0x1f] = NULL;
-}
-#endif
-
-static void mdio_read_req(struct qemu_mdio *bus)
-{
- struct qemu_phy *phy;
-
- phy = bus->devs[bus->addr];
- if (phy && phy->read) {
- bus->data = phy->read(phy, bus->req);
- } else {
- bus->data = 0xffff;
- }
-}
-
-static void mdio_write_req(struct qemu_mdio *bus)
-{
- struct qemu_phy *phy;
-
- phy = bus->devs[bus->addr];
- if (phy && phy->write) {
- phy->write(phy, bus->req, bus->data);
- }
-}
-
-static void mdio_cycle(struct qemu_mdio *bus)
-{
- bus->cnt++;
-
- trace_mdio_bitbang(bus->mdc, bus->mdio, bus->state, bus->cnt, bus->drive);
-#if 0
- if (bus->mdc) {
- printf("%d", bus->mdio);
- }
-#endif
- switch (bus->state) {
- case PREAMBLE:
- if (bus->mdc) {
- if (bus->cnt >= (32 * 2) && !bus->mdio) {
- bus->cnt = 0;
- bus->state = SOF;
- bus->data = 0;
- }
- }
- break;
- case SOF:
- if (bus->mdc) {
- if (bus->mdio != 1) {
- printf("WARNING: no SOF\n");
- }
- if (bus->cnt == 1*2) {
- bus->cnt = 0;
- bus->opc = 0;
- bus->state = OPC;
- }
- }
- break;
- case OPC:
- if (bus->mdc) {
- bus->opc <<= 1;
- bus->opc |= bus->mdio & 1;
- if (bus->cnt == 2*2) {
- bus->cnt = 0;
- bus->addr = 0;
- bus->state = ADDR;
- }
- }
- break;
- case ADDR:
- if (bus->mdc) {
- bus->addr <<= 1;
- bus->addr |= bus->mdio & 1;
-
- if (bus->cnt == 5*2) {
- bus->cnt = 0;
- bus->req = 0;
- bus->state = REQ;
- }
- }
- break;
- case REQ:
- if (bus->mdc) {
- bus->req <<= 1;
- bus->req |= bus->mdio & 1;
- if (bus->cnt == 5*2) {
- bus->cnt = 0;
- bus->state = TURNAROUND;
- }
- }
- break;
- case TURNAROUND:
- if (bus->mdc && bus->cnt == 2*2) {
- bus->mdio = 0;
- bus->cnt = 0;
-
- if (bus->opc == 2) {
- bus->drive = 1;
- mdio_read_req(bus);
- bus->mdio = bus->data & 1;
- }
- bus->state = DATA;
- }
- break;
- case DATA:
- if (!bus->mdc) {
- if (bus->drive) {
- bus->mdio = !!(bus->data & (1 << 15));
- bus->data <<= 1;
- }
- } else {
- if (!bus->drive) {
- bus->data <<= 1;
- bus->data |= bus->mdio;
- }
- if (bus->cnt == 16 * 2) {
- bus->cnt = 0;
- bus->state = PREAMBLE;
- if (!bus->drive) {
- mdio_write_req(bus);
- }
- bus->drive = 0;
- }
- }
- break;
- default:
- break;
- }
-}
-
-/* ETRAX-FS Ethernet MAC block starts here. */
-
-#define RW_MA0_LO 0x00
-#define RW_MA0_HI 0x01
-#define RW_MA1_LO 0x02
-#define RW_MA1_HI 0x03
-#define RW_GA_LO 0x04
-#define RW_GA_HI 0x05
-#define RW_GEN_CTRL 0x06
-#define RW_REC_CTRL 0x07
-#define RW_TR_CTRL 0x08
-#define RW_CLR_ERR 0x09
-#define RW_MGM_CTRL 0x0a
-#define R_STAT 0x0b
-#define FS_ETH_MAX_REGS 0x17
-
-#define TYPE_ETRAX_FS_ETH "etraxfs-eth"
-OBJECT_DECLARE_SIMPLE_TYPE(ETRAXFSEthState, ETRAX_FS_ETH)
-
-struct ETRAXFSEthState {
- SysBusDevice parent_obj;
-
- MemoryRegion mmio;
- NICState *nic;
- NICConf conf;
-
- /* Two addrs in the filter. */
- uint8_t macaddr[2][6];
- uint32_t regs[FS_ETH_MAX_REGS];
-
- struct etraxfs_dma_client *dma_out;
- struct etraxfs_dma_client *dma_in;
-
- /* MDIO bus. */
- struct qemu_mdio mdio_bus;
- unsigned int phyaddr;
- int duplex_mismatch;
-
- /* PHY. */
- struct qemu_phy phy;
-};
-
-static void eth_validate_duplex(ETRAXFSEthState *eth)
-{
- struct qemu_phy *phy;
- unsigned int phy_duplex;
- unsigned int mac_duplex;
- int new_mm = 0;
-
- phy = eth->mdio_bus.devs[eth->phyaddr];
- phy_duplex = !!(phy->read(phy, 18) & (1 << 11));
- mac_duplex = !!(eth->regs[RW_REC_CTRL] & 128);
-
- if (mac_duplex != phy_duplex) {
- new_mm = 1;
- }
-
- if (eth->regs[RW_GEN_CTRL] & 1) {
- if (new_mm != eth->duplex_mismatch) {
- if (new_mm) {
- printf("HW: WARNING ETH duplex mismatch MAC=%d PHY=%d\n",
- mac_duplex, phy_duplex);
- } else {
- printf("HW: ETH duplex ok.\n");
- }
- }
- eth->duplex_mismatch = new_mm;
- }
-}
-
-static uint64_t
-eth_read(void *opaque, hwaddr addr, unsigned int size)
-{
- ETRAXFSEthState *eth = opaque;
- uint32_t r = 0;
-
- addr >>= 2;
-
- switch (addr) {
- case R_STAT:
- r = eth->mdio_bus.mdio & 1;
- break;
- default:
- r = eth->regs[addr];
- D(printf("%s %x\n", __func__, addr * 4));
- break;
- }
- return r;
-}
-
-static void eth_update_ma(ETRAXFSEthState *eth, int ma)
-{
- int reg;
- int i = 0;
-
- ma &= 1;
-
- reg = RW_MA0_LO;
- if (ma) {
- reg = RW_MA1_LO;
- }
-
- eth->macaddr[ma][i++] = eth->regs[reg];
- eth->macaddr[ma][i++] = eth->regs[reg] >> 8;
- eth->macaddr[ma][i++] = eth->regs[reg] >> 16;
- eth->macaddr[ma][i++] = eth->regs[reg] >> 24;
- eth->macaddr[ma][i++] = eth->regs[reg + 1];
- eth->macaddr[ma][i] = eth->regs[reg + 1] >> 8;
-
- D(printf("set mac%d=%x.%x.%x.%x.%x.%x\n", ma,
- eth->macaddr[ma][0], eth->macaddr[ma][1],
- eth->macaddr[ma][2], eth->macaddr[ma][3],
- eth->macaddr[ma][4], eth->macaddr[ma][5]));
-}
-
-static void
-eth_write(void *opaque, hwaddr addr,
- uint64_t val64, unsigned int size)
-{
- ETRAXFSEthState *eth = opaque;
- uint32_t value = val64;
-
- addr >>= 2;
- switch (addr) {
- case RW_MA0_LO:
- case RW_MA0_HI:
- eth->regs[addr] = value;
- eth_update_ma(eth, 0);
- break;
- case RW_MA1_LO:
- case RW_MA1_HI:
- eth->regs[addr] = value;
- eth_update_ma(eth, 1);
- break;
-
- case RW_MGM_CTRL:
- /* Attach an MDIO/PHY abstraction. */
- if (value & 2) {
- eth->mdio_bus.mdio = value & 1;
- }
- if (eth->mdio_bus.mdc != (value & 4)) {
- mdio_cycle(&eth->mdio_bus);
- eth_validate_duplex(eth);
- }
- eth->mdio_bus.mdc = !!(value & 4);
- eth->regs[addr] = value;
- break;
-
- case RW_REC_CTRL:
- eth->regs[addr] = value;
- eth_validate_duplex(eth);
- break;
-
- default:
- eth->regs[addr] = value;
- D(printf("%s %x %x\n", __func__, addr, value));
- break;
- }
-}
-
-/* The ETRAX FS has a groupt address table (GAT) which works like a k=1 bloom
- filter dropping group addresses we have not joined. The filter has 64
- bits (m). The has function is a simple nible xor of the group addr. */
-static int eth_match_groupaddr(ETRAXFSEthState *eth, const unsigned char *sa)
-{
- unsigned int hsh;
- int m_individual = eth->regs[RW_REC_CTRL] & 4;
- int match;
-
- /* First bit on the wire of a MAC address signals multicast or
- physical address. */
- if (!m_individual && !(sa[0] & 1)) {
- return 0;
- }
-
- /* Calculate the hash index for the GA registers. */
- hsh = 0;
- hsh ^= (*sa) & 0x3f;
- hsh ^= ((*sa) >> 6) & 0x03;
- ++sa;
- hsh ^= ((*sa) << 2) & 0x03c;
- hsh ^= ((*sa) >> 4) & 0xf;
- ++sa;
- hsh ^= ((*sa) << 4) & 0x30;
- hsh ^= ((*sa) >> 2) & 0x3f;
- ++sa;
- hsh ^= (*sa) & 0x3f;
- hsh ^= ((*sa) >> 6) & 0x03;
- ++sa;
- hsh ^= ((*sa) << 2) & 0x03c;
- hsh ^= ((*sa) >> 4) & 0xf;
- ++sa;
- hsh ^= ((*sa) << 4) & 0x30;
- hsh ^= ((*sa) >> 2) & 0x3f;
-
- hsh &= 63;
- if (hsh > 31) {
- match = eth->regs[RW_GA_HI] & (1 << (hsh - 32));
- } else {
- match = eth->regs[RW_GA_LO] & (1 << hsh);
- }
- D(printf("hsh=%x ga=%x.%x mtch=%d\n", hsh,
- eth->regs[RW_GA_HI], eth->regs[RW_GA_LO], match));
- return match;
-}
-
-static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size)
-{
- unsigned char sa_bcast[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
- ETRAXFSEthState *eth = qemu_get_nic_opaque(nc);
- int use_ma0 = eth->regs[RW_REC_CTRL] & 1;
- int use_ma1 = eth->regs[RW_REC_CTRL] & 2;
- int r_bcast = eth->regs[RW_REC_CTRL] & 8;
-
- if (size < 12) {
- return -1;
- }
-
- D(printf("%x.%x.%x.%x.%x.%x ma=%d %d bc=%d\n",
- buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
- use_ma0, use_ma1, r_bcast));
-
- /* Does the frame get through the address filters? */
- if ((!use_ma0 || memcmp(buf, eth->macaddr[0], 6))
- && (!use_ma1 || memcmp(buf, eth->macaddr[1], 6))
- && (!r_bcast || memcmp(buf, sa_bcast, 6))
- && !eth_match_groupaddr(eth, buf)) {
- return size;
- }
-
- /* FIXME: Find another way to pass on the fake csum. */
- etraxfs_dmac_input(eth->dma_in, (void *)buf, size + 4, 1);
-
- return size;
-}
-
-static int eth_tx_push(void *opaque, unsigned char *buf, int len, bool eop)
-{
- ETRAXFSEthState *eth = opaque;
-
- D(printf("%s buf=%p len=%d\n", __func__, buf, len));
- qemu_send_packet(qemu_get_queue(eth->nic), buf, len);
- return len;
-}
-
-static void eth_set_link(NetClientState *nc)
-{
- ETRAXFSEthState *eth = qemu_get_nic_opaque(nc);
- D(printf("%s %d\n", __func__, nc->link_down));
- eth->phy.link = !nc->link_down;
-}
-
-static const MemoryRegionOps eth_ops = {
- .read = eth_read,
- .write = eth_write,
- .endianness = DEVICE_LITTLE_ENDIAN,
- .valid = {
- .min_access_size = 4,
- .max_access_size = 4
- }
-};
-
-static NetClientInfo net_etraxfs_info = {
- .type = NET_CLIENT_DRIVER_NIC,
- .size = sizeof(NICState),
- .receive = eth_receive,
- .link_status_changed = eth_set_link,
-};
-
-static void etraxfs_eth_reset(DeviceState *dev)
-{
- ETRAXFSEthState *s = ETRAX_FS_ETH(dev);
-
- memset(s->regs, 0, sizeof(s->regs));
- memset(s->macaddr, 0, sizeof(s->macaddr));
- s->duplex_mismatch = 0;
-
- s->mdio_bus.mdc = 0;
- s->mdio_bus.mdio = 0;
- s->mdio_bus.state = 0;
- s->mdio_bus.drive = 0;
- s->mdio_bus.cnt = 0;
- s->mdio_bus.addr = 0;
- s->mdio_bus.opc = 0;
- s->mdio_bus.req = 0;
- s->mdio_bus.data = 0;
-
- tdk_reset(&s->phy);
-}
-
-static void etraxfs_eth_realize(DeviceState *dev, Error **errp)
-{
- SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
- ETRAXFSEthState *s = ETRAX_FS_ETH(dev);
-
- if (!s->dma_out || !s->dma_in) {
- error_setg(errp, "Unconnected ETRAX-FS Ethernet MAC");
- return;
- }
-
- s->dma_out->client.push = eth_tx_push;
- s->dma_out->client.opaque = s;
- s->dma_in->client.opaque = s;
- s->dma_in->client.pull = NULL;
-
- memory_region_init_io(&s->mmio, OBJECT(dev), &eth_ops, s,
- "etraxfs-eth", 0x5c);
- sysbus_init_mmio(sbd, &s->mmio);
-
- qemu_macaddr_default_if_unset(&s->conf.macaddr);
- s->nic = qemu_new_nic(&net_etraxfs_info, &s->conf,
- object_get_typename(OBJECT(s)), dev->id,
- &dev->mem_reentrancy_guard, s);
- qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
-
- s->phy.read = tdk_read;
- s->phy.write = tdk_write;
- mdio_attach(&s->mdio_bus, &s->phy, s->phyaddr);
-}
-
-static Property etraxfs_eth_properties[] = {
- DEFINE_PROP_UINT32("phyaddr", ETRAXFSEthState, phyaddr, 1),
- DEFINE_NIC_PROPERTIES(ETRAXFSEthState, conf),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void etraxfs_eth_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->realize = etraxfs_eth_realize;
- dc->reset = etraxfs_eth_reset;
- device_class_set_props(dc, etraxfs_eth_properties);
- /* Reason: dma_out, dma_in are not user settable */
- dc->user_creatable = false;
-}
-
-
-/* Instantiate an ETRAXFS Ethernet MAC. */
-DeviceState *
-etraxfs_eth_init(hwaddr base, int phyaddr,
- struct etraxfs_dma_client *dma_out,
- struct etraxfs_dma_client *dma_in)
-{
- DeviceState *dev;
-
- dev = qdev_new("etraxfs-eth");
- qemu_configure_nic_device(dev, true, "fseth");
- qdev_prop_set_uint32(dev, "phyaddr", phyaddr);
-
- /*
- * TODO: QOM design, define a QOM interface for "I am an etraxfs
- * DMA client" (which replaces the current 'struct
- * etraxfs_dma_client' ad-hoc interface), implement it on the
- * ethernet device, and then have QOM link properties on the DMA
- * controller device so that you can pass the interface
- * implementations to it.
- */
- ETRAX_FS_ETH(dev)->dma_out = dma_out;
- ETRAX_FS_ETH(dev)->dma_in = dma_in;
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
-
- return dev;
-}
-
-static const TypeInfo etraxfs_eth_info = {
- .name = TYPE_ETRAX_FS_ETH,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(ETRAXFSEthState),
- .class_init = etraxfs_eth_class_init,
-};
-
-static void etraxfs_eth_register_types(void)
-{
- type_register_static(&etraxfs_eth_info);
-}
-
-type_init(etraxfs_eth_register_types)
diff --git a/hw/net/fsl_etsec/etsec.c b/hw/net/fsl_etsec/etsec.c
index 00315f3..846f6cb 100644
--- a/hw/net/fsl_etsec/etsec.c
+++ b/hw/net/fsl_etsec/etsec.c
@@ -36,7 +36,6 @@
#include "registers.h"
#include "qapi/error.h"
#include "qemu/log.h"
-#include "qemu/module.h"
/* #define HEX_DUMP */
/* #define DEBUG_REGISTER */
@@ -390,6 +389,7 @@ static void etsec_realize(DeviceState *dev, Error **errp)
{
eTSEC *etsec = ETSEC_COMMON(dev);
+ qemu_macaddr_default_if_unset(&etsec->conf.macaddr);
etsec->nic = qemu_new_nic(&net_etsec_info, &etsec->conf,
object_get_typename(OBJECT(dev)), dev->id,
&dev->mem_reentrancy_guard, etsec);
@@ -415,33 +415,29 @@ static void etsec_instance_init(Object *obj)
sysbus_init_irq(sbd, &etsec->err_irq);
}
-static Property etsec_properties[] = {
+static const Property etsec_properties[] = {
DEFINE_NIC_PROPERTIES(eTSEC, conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void etsec_class_init(ObjectClass *klass, void *data)
+static void etsec_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = etsec_realize;
- dc->reset = etsec_reset;
+ dc->desc = "Freescale Enhanced Three-Speed Ethernet Controller";
+ device_class_set_legacy_reset(dc, etsec_reset);
device_class_set_props(dc, etsec_properties);
- /* Supported by ppce500 machine */
- dc->user_creatable = true;
+ set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
}
-static const TypeInfo etsec_info = {
- .name = TYPE_ETSEC_COMMON,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(eTSEC),
- .class_init = etsec_class_init,
- .instance_init = etsec_instance_init,
+static const TypeInfo etsec_types[] = {
+ {
+ .name = TYPE_ETSEC_COMMON,
+ .parent = TYPE_DYNAMIC_SYS_BUS_DEVICE,
+ .instance_size = sizeof(eTSEC),
+ .class_init = etsec_class_init,
+ .instance_init = etsec_instance_init,
+ },
};
-static void etsec_register_types(void)
-{
- type_register_static(&etsec_info);
-}
-
-type_init(etsec_register_types)
+DEFINE_TYPES(etsec_types)
diff --git a/hw/net/fsl_etsec/miim.c b/hw/net/fsl_etsec/miim.c
index b48d2cb..4e91699 100644
--- a/hw/net/fsl_etsec/miim.c
+++ b/hw/net/fsl_etsec/miim.c
@@ -29,13 +29,6 @@
/* #define DEBUG_MIIM */
-#define MIIM_CONTROL 0
-#define MIIM_STATUS 1
-#define MIIM_PHY_ID_1 2
-#define MIIM_PHY_ID_2 3
-#define MIIM_T2_STATUS 10
-#define MIIM_EXT_STATUS 15
-
static void miim_read_cycle(eTSEC *etsec)
{
uint8_t phy;
@@ -47,14 +40,14 @@ static void miim_read_cycle(eTSEC *etsec)
addr = etsec->regs[MIIMADD].value & 0x1F;
switch (addr) {
- case MIIM_CONTROL:
+ case MII_BMCR:
value = etsec->phy_control;
break;
- case MIIM_STATUS:
+ case MII_BMSR:
value = etsec->phy_status;
break;
- case MIIM_T2_STATUS:
- value = 0x1800; /* Local and remote receivers OK */
+ case MII_STAT1000:
+ value = MII_STAT1000_LOK | MII_STAT1000_ROK;
break;
default:
value = 0x0;
@@ -84,8 +77,8 @@ static void miim_write_cycle(eTSEC *etsec)
#endif
switch (addr) {
- case MIIM_CONTROL:
- etsec->phy_control = value & ~(0x8100);
+ case MII_BMCR:
+ etsec->phy_control = value & ~(MII_BMCR_RESET | MII_BMCR_FD);
break;
default:
break;
diff --git a/hw/net/ftgmac100.c b/hw/net/ftgmac100.c
index 80f9cd5..c41ce88 100644
--- a/hw/net/ftgmac100.c
+++ b/hw/net/ftgmac100.c
@@ -14,7 +14,7 @@
#include "qemu/osdep.h"
#include "hw/irq.h"
#include "hw/net/ftgmac100.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qapi/error.h"
#include "qemu/log.h"
#include "qemu/module.h"
@@ -24,8 +24,7 @@
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
-/* For crc32 */
-#include <zlib.h>
+#include <zlib.h> /* for crc32 */
/*
* FTGMAC100 registers
@@ -1255,19 +1254,18 @@ static const VMStateDescription vmstate_ftgmac100 = {
}
};
-static Property ftgmac100_properties[] = {
+static const Property ftgmac100_properties[] = {
DEFINE_PROP_BOOL("aspeed", FTGMAC100State, aspeed, false),
DEFINE_NIC_PROPERTIES(FTGMAC100State, conf),
DEFINE_PROP_BOOL("dma64", FTGMAC100State, dma64, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ftgmac100_class_init(ObjectClass *klass, void *data)
+static void ftgmac100_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_ftgmac100;
- dc->reset = ftgmac100_reset;
+ device_class_set_legacy_reset(dc, ftgmac100_reset);
device_class_set_props(dc, ftgmac100_properties);
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
dc->realize = ftgmac100_realize;
@@ -1416,18 +1414,17 @@ static const VMStateDescription vmstate_aspeed_mii = {
}
};
-static Property aspeed_mii_properties[] = {
+static const Property aspeed_mii_properties[] = {
DEFINE_PROP_LINK("nic", AspeedMiiState, nic, TYPE_FTGMAC100,
FTGMAC100State *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void aspeed_mii_class_init(ObjectClass *klass, void *data)
+static void aspeed_mii_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_aspeed_mii;
- dc->reset = aspeed_mii_reset;
+ device_class_set_legacy_reset(dc, aspeed_mii_reset);
dc->realize = aspeed_mii_realize;
dc->desc = "Aspeed MII controller";
device_class_set_props(dc, aspeed_mii_properties);
diff --git a/hw/net/i82596.c b/hw/net/i82596.c
index 6cc8292..c1ff3e6 100644
--- a/hw/net/i82596.c
+++ b/hw/net/i82596.c
@@ -5,7 +5,7 @@
* This work is licensed under the GNU GPL license version 2 or later.
*
* This software was written to be compatible with the specification:
- * https://www.intel.com/assets/pdf/general/82596ca.pdf
+ * https://parisc.docs.kernel.org/en/latest/_downloads/96672be0650d9fc046bbcea40b92482f/82596CA.pdf
*/
#include "qemu/osdep.h"
@@ -15,11 +15,11 @@
#include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qemu/module.h"
#include "trace.h"
#include "i82596.h"
-#include <zlib.h> /* For crc32 */
+#include <zlib.h> /* for crc32 */
#if defined(ENABLE_DEBUG)
#define DBG(x) x
@@ -177,6 +177,26 @@ static void set_individual_address(I82596State *s, uint32_t addr)
trace_i82596_new_mac(nc->info_str);
}
+static void i82596_configure(I82596State *s, uint32_t addr)
+{
+ uint8_t byte_cnt;
+ byte_cnt = get_byte(addr + 8) & 0x0f;
+
+ byte_cnt = MAX(byte_cnt, 4);
+ byte_cnt = MIN(byte_cnt, sizeof(s->config));
+ /* copy byte_cnt max. */
+ address_space_read(&address_space_memory, addr + 8,
+ MEMTXATTRS_UNSPECIFIED, s->config, byte_cnt);
+ /* config byte according to page 35ff */
+ s->config[2] &= 0x82; /* mask valid bits */
+ s->config[2] |= 0x40;
+ s->config[7] &= 0xf7; /* clear zero bit */
+ assert(I596_NOCRC_INS == 0); /* do CRC insertion */
+ s->config[10] = MAX(s->config[10], 5); /* min frame length */
+ s->config[12] &= 0x40; /* only full duplex field valid */
+ s->config[13] |= 0x3f; /* set ones in byte 13 */
+}
+
static void set_multicast_list(I82596State *s, uint32_t addr)
{
uint16_t mc_count, i;
@@ -234,7 +254,6 @@ static void command_loop(I82596State *s)
{
uint16_t cmd;
uint16_t status;
- uint8_t byte_cnt;
DBG(printf("STARTING COMMAND LOOP cmd_p=%08x\n", s->cmd_p));
@@ -254,20 +273,7 @@ static void command_loop(I82596State *s)
set_individual_address(s, s->cmd_p);
break;
case CmdConfigure:
- byte_cnt = get_byte(s->cmd_p + 8) & 0x0f;
- byte_cnt = MAX(byte_cnt, 4);
- byte_cnt = MIN(byte_cnt, sizeof(s->config));
- /* copy byte_cnt max. */
- address_space_read(&address_space_memory, s->cmd_p + 8,
- MEMTXATTRS_UNSPECIFIED, s->config, byte_cnt);
- /* config byte according to page 35ff */
- s->config[2] &= 0x82; /* mask valid bits */
- s->config[2] |= 0x40;
- s->config[7] &= 0xf7; /* clear zero bit */
- assert(I596_NOCRC_INS == 0); /* do CRC insertion */
- s->config[10] = MAX(s->config[10], 5); /* min frame length */
- s->config[12] &= 0x40; /* only full duplex field valid */
- s->config[13] |= 0x3f; /* set ones in byte 13 */
+ i82596_configure(s, s->cmd_p);
break;
case CmdTDR:
/* get signal LINK */
@@ -282,7 +288,7 @@ static void command_loop(I82596State *s)
case CmdDump:
case CmdDiagnose:
printf("FIXME Command %d !!\n", cmd & 7);
- assert(0);
+ g_assert_not_reached();
}
/* update status */
diff --git a/hw/net/i82596.h b/hw/net/i82596.h
index f0bbe81..dc1fa1a 100644
--- a/hw/net/i82596.h
+++ b/hw/net/i82596.h
@@ -3,8 +3,8 @@
#define I82596_IOPORT_SIZE 0x20
-#include "exec/memory.h"
-#include "exec/address-spaces.h"
+#include "system/memory.h"
+#include "system/address-spaces.h"
#define PORT_RESET 0x00 /* reset 82596 */
#define PORT_SELFTEST 0x01 /* selftest */
diff --git a/hw/net/igb.c b/hw/net/igb.c
index b6ca2f1..e4c0236 100644
--- a/hw/net/igb.c
+++ b/hw/net/igb.c
@@ -44,7 +44,7 @@
#include "net/tap.h"
#include "qemu/module.h"
#include "qemu/range.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/hw.h"
#include "hw/net/mii.h"
#include "hw/pci/pci.h"
@@ -356,8 +356,7 @@ static int
igb_add_pm_capability(PCIDevice *pdev, uint8_t offset, uint16_t pmc)
{
Error *local_err = NULL;
- int ret = pci_add_capability(pdev, PCI_CAP_ID_PM, offset,
- PCI_PM_SIZEOF, &local_err);
+ int ret = pci_pm_init(pdev, offset, &local_err);
if (local_err) {
error_report_err(local_err);
@@ -446,14 +445,11 @@ static void igb_pci_realize(PCIDevice *pci_dev, Error **errp)
pcie_ari_init(pci_dev, 0x150);
- if (!pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET,
- TYPE_IGBVF, IGB_82576_VF_DEV_ID,
- IGB_MAX_VF_FUNCTIONS, IGB_MAX_VF_FUNCTIONS,
- IGB_VF_OFFSET, IGB_VF_STRIDE,
+ if (!pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET, TYPE_IGBVF,
+ IGB_82576_VF_DEV_ID, IGB_MAX_VF_FUNCTIONS,
+ IGB_MAX_VF_FUNCTIONS, IGB_VF_OFFSET, IGB_VF_STRIDE,
errp)) {
- pcie_cap_exit(pci_dev);
igb_cleanup_msix(s);
- msi_uninit(pci_dev);
return;
}
@@ -598,13 +594,12 @@ static const VMStateDescription igb_vmstate = {
}
};
-static Property igb_properties[] = {
+static const Property igb_properties[] = {
DEFINE_NIC_PROPERTIES(IGBState, conf),
DEFINE_PROP_BOOL("x-pcie-flr-init", IGBState, has_flr, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void igb_class_init(ObjectClass *class, void *data)
+static void igb_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
ResettableClass *rc = RESETTABLE_CLASS(class);
@@ -640,7 +635,7 @@ static const TypeInfo igb_info = {
.instance_size = sizeof(IGBState),
.class_init = igb_class_init,
.instance_init = igb_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ }
},
diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
index bcd5f6c..39e3ce1 100644
--- a/hw/net/igb_core.c
+++ b/hw/net/igb_core.c
@@ -44,7 +44,7 @@
#include "hw/net/mii.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "net_tx_pkt.h"
#include "net_rx_pkt.h"
@@ -397,8 +397,7 @@ igb_rss_calc_hash(IGBCore *core, struct NetRxPkt *pkt, E1000E_RSSInfo *info)
type = NetPktRssIpV6Udp;
break;
default:
- assert(false);
- return 0;
+ g_assert_not_reached();
}
return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]);
@@ -747,7 +746,6 @@ igb_ring_free_descr_num(IGBCore *core, const E1000ERingInfo *r)
}
g_assert_not_reached();
- return 0;
}
static inline bool
diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h
index e5a47ea..4dc4c31 100644
--- a/hw/net/igb_regs.h
+++ b/hw/net/igb_regs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This is copied + edited from kernel header files in
* drivers/net/ethernet/intel/igb
diff --git a/hw/net/igbvf.c b/hw/net/igbvf.c
index 21a97d4..31d72c4 100644
--- a/hw/net/igbvf.c
+++ b/hw/net/igbvf.c
@@ -299,7 +299,7 @@ static void igbvf_pci_uninit(PCIDevice *dev)
msix_uninit(dev, &s->msix, &s->msix);
}
-static void igbvf_class_init(ObjectClass *class, void *data)
+static void igbvf_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
PCIDeviceClass *c = PCI_DEVICE_CLASS(class);
@@ -325,7 +325,7 @@ static const TypeInfo igbvf_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(IgbVfState),
.class_init = igbvf_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ }
},
diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c
index 8c91d20..e5e34dd 100644
--- a/hw/net/imx_fec.c
+++ b/hw/net/imx_fec.c
@@ -26,15 +26,14 @@
#include "hw/net/imx_fec.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "net/checksum.h"
#include "net/eth.h"
#include "trace.h"
-/* For crc32 */
-#include <zlib.h>
+#include <zlib.h> /* for crc32 */
#define IMX_MAX_DESC 1024
@@ -204,17 +203,12 @@ static const VMStateDescription vmstate_imx_eth_txdescs = {
static const VMStateDescription vmstate_imx_eth = {
.name = TYPE_IMX_FEC,
- .version_id = 2,
- .minimum_version_id = 2,
+ .version_id = 3,
+ .minimum_version_id = 3,
.fields = (const VMStateField[]) {
VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
VMSTATE_UINT32(rx_descriptor, IMXFECState),
VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
- VMSTATE_UINT32(phy_status, IMXFECState),
- VMSTATE_UINT32(phy_control, IMXFECState),
- VMSTATE_UINT32(phy_advertise, IMXFECState),
- VMSTATE_UINT32(phy_int, IMXFECState),
- VMSTATE_UINT32(phy_int_mask, IMXFECState),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * const []) {
@@ -223,14 +217,6 @@ static const VMStateDescription vmstate_imx_eth = {
},
};
-#define PHY_INT_ENERGYON (1 << 7)
-#define PHY_INT_AUTONEG_COMPLETE (1 << 6)
-#define PHY_INT_FAULT (1 << 5)
-#define PHY_INT_DOWN (1 << 4)
-#define PHY_INT_AUTONEG_LP (1 << 3)
-#define PHY_INT_PARFAULT (1 << 2)
-#define PHY_INT_AUTONEG_PAGE (1 << 1)
-
static void imx_eth_update(IMXFECState *s);
/*
@@ -239,47 +225,19 @@ static void imx_eth_update(IMXFECState *s);
* For now we don't handle any GPIO/interrupt line, so the OS will
* have to poll for the PHY status.
*/
-static void imx_phy_update_irq(IMXFECState *s)
-{
- imx_eth_update(s);
-}
-
-static void imx_phy_update_link(IMXFECState *s)
+static void imx_phy_update_irq(void *opaque, int n, int level)
{
- /* Autonegotiation status mirrors link status. */
- if (qemu_get_queue(s->nic)->link_down) {
- trace_imx_phy_update_link("down");
- s->phy_status &= ~0x0024;
- s->phy_int |= PHY_INT_DOWN;
- } else {
- trace_imx_phy_update_link("up");
- s->phy_status |= 0x0024;
- s->phy_int |= PHY_INT_ENERGYON;
- s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
- }
- imx_phy_update_irq(s);
+ imx_eth_update(opaque);
}
static void imx_eth_set_link(NetClientState *nc)
{
- imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
-}
-
-static void imx_phy_reset(IMXFECState *s)
-{
- trace_imx_phy_reset();
-
- s->phy_status = 0x7809;
- s->phy_control = 0x3000;
- s->phy_advertise = 0x01e1;
- s->phy_int_mask = 0;
- s->phy_int = 0;
- imx_phy_update_link(s);
+ lan9118_phy_update_link(&IMX_FEC(qemu_get_nic_opaque(nc))->mii,
+ nc->link_down);
}
static uint32_t imx_phy_read(IMXFECState *s, int reg)
{
- uint32_t val;
uint32_t phy = reg / 32;
if (!s->phy_connected) {
@@ -297,54 +255,7 @@ static uint32_t imx_phy_read(IMXFECState *s, int reg)
reg %= 32;
- switch (reg) {
- case 0: /* Basic Control */
- val = s->phy_control;
- break;
- case 1: /* Basic Status */
- val = s->phy_status;
- break;
- case 2: /* ID1 */
- val = 0x0007;
- break;
- case 3: /* ID2 */
- val = 0xc0d1;
- break;
- case 4: /* Auto-neg advertisement */
- val = s->phy_advertise;
- break;
- case 5: /* Auto-neg Link Partner Ability */
- val = 0x0f71;
- break;
- case 6: /* Auto-neg Expansion */
- val = 1;
- break;
- case 29: /* Interrupt source. */
- val = s->phy_int;
- s->phy_int = 0;
- imx_phy_update_irq(s);
- break;
- case 30: /* Interrupt mask */
- val = s->phy_int_mask;
- break;
- case 17:
- case 18:
- case 27:
- case 31:
- qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
- TYPE_IMX_FEC, __func__, reg);
- val = 0;
- break;
- default:
- qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
- TYPE_IMX_FEC, __func__, reg);
- val = 0;
- break;
- }
-
- trace_imx_phy_read(val, phy, reg);
-
- return val;
+ return lan9118_phy_read(&s->mii, reg);
}
static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
@@ -366,39 +277,7 @@ static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
reg %= 32;
- trace_imx_phy_write(val, phy, reg);
-
- switch (reg) {
- case 0: /* Basic Control */
- if (val & 0x8000) {
- imx_phy_reset(s);
- } else {
- s->phy_control = val & 0x7980;
- /* Complete autonegotiation immediately. */
- if (val & 0x1000) {
- s->phy_status |= 0x0020;
- }
- }
- break;
- case 4: /* Auto-neg advertisement */
- s->phy_advertise = (val & 0x2d7f) | 0x80;
- break;
- case 30: /* Interrupt mask */
- s->phy_int_mask = val & 0xff;
- imx_phy_update_irq(s);
- break;
- case 17:
- case 18:
- case 27:
- case 31:
- qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
- TYPE_IMX_FEC, __func__, reg);
- break;
- default:
- qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
- TYPE_IMX_FEC, __func__, reg);
- break;
- }
+ lan9118_phy_write(&s->mii, reg, val);
}
static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
@@ -683,9 +562,6 @@ static void imx_eth_reset(DeviceState *d)
s->rx_descriptor = 0;
memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
-
- /* We also reset the PHY */
- imx_phy_reset(s);
}
static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
@@ -792,7 +668,6 @@ static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
{
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
- return;
}
static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
@@ -1330,6 +1205,13 @@ static void imx_eth_realize(DeviceState *dev, Error **errp)
sysbus_init_irq(sbd, &s->irq[0]);
sysbus_init_irq(sbd, &s->irq[1]);
+ qemu_init_irq(&s->mii_irq, imx_phy_update_irq, s, 0);
+ object_initialize_child(OBJECT(s), "mii", &s->mii, TYPE_LAN9118_PHY);
+ if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(&s->mii), errp)) {
+ return;
+ }
+ qdev_connect_gpio_out(DEVICE(&s->mii), 0, &s->mii_irq);
+
qemu_macaddr_default_if_unset(&s->conf.macaddr);
s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
@@ -1339,22 +1221,21 @@ static void imx_eth_realize(DeviceState *dev, Error **errp)
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
}
-static Property imx_eth_properties[] = {
+static const Property imx_eth_properties[] = {
DEFINE_NIC_PROPERTIES(IMXFECState, conf),
DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0),
DEFINE_PROP_BOOL("phy-connected", IMXFECState, phy_connected, true),
DEFINE_PROP_LINK("phy-consumer", IMXFECState, phy_consumer, TYPE_IMX_FEC,
IMXFECState *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void imx_eth_class_init(ObjectClass *klass, void *data)
+static void imx_eth_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_imx_eth;
- dc->reset = imx_eth_reset;
+ device_class_set_legacy_reset(dc, imx_eth_reset);
device_class_set_props(dc, imx_eth_properties);
dc->realize = imx_eth_realize;
dc->desc = "i.MX FEC/ENET Ethernet Controller";
diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c
index 91d81b4..6dda1e5 100644
--- a/hw/net/lan9118.c
+++ b/hw/net/lan9118.c
@@ -16,14 +16,14 @@
#include "net/net.h"
#include "net/eth.h"
#include "hw/irq.h"
+#include "hw/net/lan9118_phy.h"
#include "hw/net/lan9118.h"
#include "hw/ptimer.h"
#include "hw/qdev-properties.h"
#include "qapi/error.h"
#include "qemu/log.h"
#include "qemu/module.h"
-/* For crc32 */
-#include <zlib.h>
+#include <zlib.h> /* for crc32 */
#include "qom/object.h"
//#define DEBUG_LAN9118
@@ -140,14 +140,6 @@ do { printf("lan9118: " fmt , ## __VA_ARGS__); } while (0)
#define MAC_CR_RXEN 0x00000004
#define MAC_CR_RESERVED 0x7f404213
-#define PHY_INT_ENERGYON 0x80
-#define PHY_INT_AUTONEG_COMPLETE 0x40
-#define PHY_INT_FAULT 0x20
-#define PHY_INT_DOWN 0x10
-#define PHY_INT_AUTONEG_LP 0x08
-#define PHY_INT_PARFAULT 0x04
-#define PHY_INT_AUTONEG_PAGE 0x02
-
#define GPT_TIMER_EN 0x20000000
/*
@@ -229,11 +221,8 @@ struct lan9118_state {
uint32_t mac_mii_data;
uint32_t mac_flow;
- uint32_t phy_status;
- uint32_t phy_control;
- uint32_t phy_advertise;
- uint32_t phy_int;
- uint32_t phy_int_mask;
+ Lan9118PhyState mii;
+ IRQState mii_irq;
int32_t eeprom_writable;
uint8_t eeprom[128];
@@ -275,8 +264,8 @@ struct lan9118_state {
static const VMStateDescription vmstate_lan9118 = {
.name = "lan9118",
- .version_id = 2,
- .minimum_version_id = 1,
+ .version_id = 3,
+ .minimum_version_id = 3,
.fields = (const VMStateField[]) {
VMSTATE_PTIMER(timer, lan9118_state),
VMSTATE_UINT32(irq_cfg, lan9118_state),
@@ -302,11 +291,6 @@ static const VMStateDescription vmstate_lan9118 = {
VMSTATE_UINT32(mac_mii_acc, lan9118_state),
VMSTATE_UINT32(mac_mii_data, lan9118_state),
VMSTATE_UINT32(mac_flow, lan9118_state),
- VMSTATE_UINT32(phy_status, lan9118_state),
- VMSTATE_UINT32(phy_control, lan9118_state),
- VMSTATE_UINT32(phy_advertise, lan9118_state),
- VMSTATE_UINT32(phy_int, lan9118_state),
- VMSTATE_UINT32(phy_int_mask, lan9118_state),
VMSTATE_INT32(eeprom_writable, lan9118_state),
VMSTATE_UINT8_ARRAY(eeprom, lan9118_state, 128),
VMSTATE_INT32(tx_fifo_size, lan9118_state),
@@ -386,9 +370,11 @@ static void lan9118_reload_eeprom(lan9118_state *s)
lan9118_mac_changed(s);
}
-static void phy_update_irq(lan9118_state *s)
+static void lan9118_update_irq(void *opaque, int n, int level)
{
- if (s->phy_int & s->phy_int_mask) {
+ lan9118_state *s = opaque;
+
+ if (level) {
s->int_sts |= PHY_INT;
} else {
s->int_sts &= ~PHY_INT;
@@ -396,33 +382,10 @@ static void phy_update_irq(lan9118_state *s)
lan9118_update(s);
}
-static void phy_update_link(lan9118_state *s)
-{
- /* Autonegotiation status mirrors link status. */
- if (qemu_get_queue(s->nic)->link_down) {
- s->phy_status &= ~0x0024;
- s->phy_int |= PHY_INT_DOWN;
- } else {
- s->phy_status |= 0x0024;
- s->phy_int |= PHY_INT_ENERGYON;
- s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
- }
- phy_update_irq(s);
-}
-
static void lan9118_set_link(NetClientState *nc)
{
- phy_update_link(qemu_get_nic_opaque(nc));
-}
-
-static void phy_reset(lan9118_state *s)
-{
- s->phy_status = 0x7809;
- s->phy_control = 0x3000;
- s->phy_advertise = 0x01e1;
- s->phy_int_mask = 0;
- s->phy_int = 0;
- phy_update_link(s);
+ lan9118_phy_update_link(&LAN9118(qemu_get_nic_opaque(nc))->mii,
+ nc->link_down);
}
static void lan9118_reset(DeviceState *d)
@@ -479,8 +442,6 @@ static void lan9118_reset(DeviceState *d)
s->read_word_n = 0;
s->write_word_n = 0;
- phy_reset(s);
-
s->eeprom_writable = 0;
lan9118_reload_eeprom(s);
}
@@ -679,7 +640,7 @@ static void do_tx_packet(lan9118_state *s)
uint32_t status;
/* FIXME: Honor TX disable, and allow queueing of packets. */
- if (s->phy_control & 0x4000) {
+ if (s->mii.control & 0x4000) {
/* This assumes the receive routine doesn't touch the VLANClient. */
qemu_receive_packet(qemu_get_queue(s->nic), s->txp->data, s->txp->len);
} else {
@@ -835,68 +796,6 @@ static void tx_fifo_push(lan9118_state *s, uint32_t val)
}
}
-static uint32_t do_phy_read(lan9118_state *s, int reg)
-{
- uint32_t val;
-
- switch (reg) {
- case 0: /* Basic Control */
- return s->phy_control;
- case 1: /* Basic Status */
- return s->phy_status;
- case 2: /* ID1 */
- return 0x0007;
- case 3: /* ID2 */
- return 0xc0d1;
- case 4: /* Auto-neg advertisement */
- return s->phy_advertise;
- case 5: /* Auto-neg Link Partner Ability */
- return 0x0f71;
- case 6: /* Auto-neg Expansion */
- return 1;
- /* TODO 17, 18, 27, 29, 30, 31 */
- case 29: /* Interrupt source. */
- val = s->phy_int;
- s->phy_int = 0;
- phy_update_irq(s);
- return val;
- case 30: /* Interrupt mask */
- return s->phy_int_mask;
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "do_phy_read: PHY read reg %d\n", reg);
- return 0;
- }
-}
-
-static void do_phy_write(lan9118_state *s, int reg, uint32_t val)
-{
- switch (reg) {
- case 0: /* Basic Control */
- if (val & 0x8000) {
- phy_reset(s);
- break;
- }
- s->phy_control = val & 0x7980;
- /* Complete autonegotiation immediately. */
- if (val & 0x1000) {
- s->phy_status |= 0x0020;
- }
- break;
- case 4: /* Auto-neg advertisement */
- s->phy_advertise = (val & 0x2d7f) | 0x80;
- break;
- /* TODO 17, 18, 27, 31 */
- case 30: /* Interrupt mask */
- s->phy_int_mask = val & 0xff;
- phy_update_irq(s);
- break;
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "do_phy_write: PHY write reg %d = 0x%04x\n", reg, val);
- }
-}
-
static void do_mac_write(lan9118_state *s, int reg, uint32_t val)
{
switch (reg) {
@@ -930,9 +829,9 @@ static void do_mac_write(lan9118_state *s, int reg, uint32_t val)
if (val & 2) {
DPRINTF("PHY write %d = 0x%04x\n",
(val >> 6) & 0x1f, s->mac_mii_data);
- do_phy_write(s, (val >> 6) & 0x1f, s->mac_mii_data);
+ lan9118_phy_write(&s->mii, (val >> 6) & 0x1f, s->mac_mii_data);
} else {
- s->mac_mii_data = do_phy_read(s, (val >> 6) & 0x1f);
+ s->mac_mii_data = lan9118_phy_read(&s->mii, (val >> 6) & 0x1f);
DPRINTF("PHY read %d = 0x%04x\n",
(val >> 6) & 0x1f, s->mac_mii_data);
}
@@ -1127,7 +1026,7 @@ static void lan9118_writel(void *opaque, hwaddr offset,
break;
case CSR_PMT_CTRL:
if (val & 0x400) {
- phy_reset(s);
+ lan9118_phy_reset(&s->mii);
}
s->pmt_ctrl &= ~0x34e;
s->pmt_ctrl |= (val & 0x34e);
@@ -1374,6 +1273,13 @@ static void lan9118_realize(DeviceState *dev, Error **errp)
const MemoryRegionOps *mem_ops =
s->mode_16bit ? &lan9118_16bit_mem_ops : &lan9118_mem_ops;
+ qemu_init_irq(&s->mii_irq, lan9118_update_irq, s, 0);
+ object_initialize_child(OBJECT(s), "mii", &s->mii, TYPE_LAN9118_PHY);
+ if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(&s->mii), errp)) {
+ return;
+ }
+ qdev_connect_gpio_out(DEVICE(&s->mii), 0, &s->mii_irq);
+
memory_region_init_io(&s->mmio, OBJECT(dev), mem_ops, s,
"lan9118-mmio", 0x100);
sysbus_init_mmio(sbd, &s->mmio);
@@ -1398,17 +1304,16 @@ static void lan9118_realize(DeviceState *dev, Error **errp)
ptimer_transaction_commit(s->timer);
}
-static Property lan9118_properties[] = {
+static const Property lan9118_properties[] = {
DEFINE_NIC_PROPERTIES(lan9118_state, conf),
DEFINE_PROP_UINT32("mode_16bit", lan9118_state, mode_16bit, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void lan9118_class_init(ObjectClass *klass, void *data)
+static void lan9118_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = lan9118_reset;
+ device_class_set_legacy_reset(dc, lan9118_reset);
device_class_set_props(dc, lan9118_properties);
dc->vmsd = &vmstate_lan9118;
dc->realize = lan9118_realize;
diff --git a/hw/net/lan9118_phy.c b/hw/net/lan9118_phy.c
new file mode 100644
index 0000000..4c4e03d
--- /dev/null
+++ b/hw/net/lan9118_phy.c
@@ -0,0 +1,222 @@
+/*
+ * SMSC LAN9118 PHY emulation
+ *
+ * Copyright (c) 2009 CodeSourcery, LLC.
+ * Written by Paul Brook
+ *
+ * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
+ *
+ * This code is licensed under the GNU GPL v2
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/net/lan9118_phy.h"
+#include "hw/net/mii.h"
+#include "hw/irq.h"
+#include "hw/resettable.h"
+#include "migration/vmstate.h"
+#include "qemu/log.h"
+#include "trace.h"
+
+#define PHY_INT_ENERGYON (1 << 7)
+#define PHY_INT_AUTONEG_COMPLETE (1 << 6)
+#define PHY_INT_FAULT (1 << 5)
+#define PHY_INT_DOWN (1 << 4)
+#define PHY_INT_AUTONEG_LP (1 << 3)
+#define PHY_INT_PARFAULT (1 << 2)
+#define PHY_INT_AUTONEG_PAGE (1 << 1)
+
+static void lan9118_phy_update_irq(Lan9118PhyState *s)
+{
+ qemu_set_irq(s->irq, !!(s->ints & s->int_mask));
+}
+
+uint16_t lan9118_phy_read(Lan9118PhyState *s, int reg)
+{
+ uint16_t val;
+
+ switch (reg) {
+ case MII_BMCR:
+ val = s->control;
+ break;
+ case MII_BMSR:
+ val = s->status;
+ break;
+ case MII_PHYID1:
+ val = SMSCLAN9118_PHYID1;
+ break;
+ case MII_PHYID2:
+ val = SMSCLAN9118_PHYID2;
+ break;
+ case MII_ANAR:
+ val = s->advertise;
+ break;
+ case MII_ANLPAR:
+ val = MII_ANLPAR_PAUSEASY | MII_ANLPAR_PAUSE | MII_ANLPAR_T4 |
+ MII_ANLPAR_TXFD | MII_ANLPAR_TX | MII_ANLPAR_10FD |
+ MII_ANLPAR_10 | MII_ANLPAR_CSMACD;
+ break;
+ case MII_ANER:
+ val = MII_ANER_NWAY;
+ break;
+ case 29: /* Interrupt source. */
+ val = s->ints;
+ s->ints = 0;
+ lan9118_phy_update_irq(s);
+ break;
+ case 30: /* Interrupt mask */
+ val = s->int_mask;
+ break;
+ case 17:
+ case 18:
+ case 27:
+ case 31:
+ qemu_log_mask(LOG_UNIMP, "%s: reg %d not implemented\n",
+ __func__, reg);
+ val = 0;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address at offset %d\n",
+ __func__, reg);
+ val = 0;
+ break;
+ }
+
+ trace_lan9118_phy_read(val, reg);
+
+ return val;
+}
+
+void lan9118_phy_write(Lan9118PhyState *s, int reg, uint16_t val)
+{
+ trace_lan9118_phy_write(val, reg);
+
+ switch (reg) {
+ case MII_BMCR:
+ if (val & MII_BMCR_RESET) {
+ lan9118_phy_reset(s);
+ } else {
+ s->control = val & (MII_BMCR_LOOPBACK | MII_BMCR_SPEED100 |
+ MII_BMCR_AUTOEN | MII_BMCR_PDOWN | MII_BMCR_FD |
+ MII_BMCR_CTST);
+ /* Complete autonegotiation immediately. */
+ if (val & MII_BMCR_AUTOEN) {
+ s->status |= MII_BMSR_AN_COMP;
+ }
+ }
+ break;
+ case MII_ANAR:
+ s->advertise = (val & (MII_ANAR_RFAULT | MII_ANAR_PAUSE_ASYM |
+ MII_ANAR_PAUSE | MII_ANAR_TXFD | MII_ANAR_10FD |
+ MII_ANAR_10 | MII_ANAR_SELECT))
+ | MII_ANAR_TX;
+ break;
+ case 30: /* Interrupt mask */
+ s->int_mask = val & 0xff;
+ lan9118_phy_update_irq(s);
+ break;
+ case 17:
+ case 18:
+ case 27:
+ case 31:
+ qemu_log_mask(LOG_UNIMP, "%s: reg %d not implemented\n",
+ __func__, reg);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address at offset %d\n",
+ __func__, reg);
+ break;
+ }
+}
+
+void lan9118_phy_update_link(Lan9118PhyState *s, bool link_down)
+{
+ s->link_down = link_down;
+
+ /* Autonegotiation status mirrors link status. */
+ if (link_down) {
+ trace_lan9118_phy_update_link("down");
+ s->status &= ~(MII_BMSR_AN_COMP | MII_BMSR_LINK_ST);
+ s->ints |= PHY_INT_DOWN;
+ } else {
+ trace_lan9118_phy_update_link("up");
+ s->status |= MII_BMSR_AN_COMP | MII_BMSR_LINK_ST;
+ s->ints |= PHY_INT_ENERGYON;
+ s->ints |= PHY_INT_AUTONEG_COMPLETE;
+ }
+ lan9118_phy_update_irq(s);
+}
+
+void lan9118_phy_reset(Lan9118PhyState *s)
+{
+ trace_lan9118_phy_reset();
+
+ s->control = MII_BMCR_AUTOEN | MII_BMCR_SPEED100;
+ s->status = MII_BMSR_100TX_FD
+ | MII_BMSR_100TX_HD
+ | MII_BMSR_10T_FD
+ | MII_BMSR_10T_HD
+ | MII_BMSR_AUTONEG
+ | MII_BMSR_EXTCAP;
+ s->advertise = MII_ANAR_TXFD
+ | MII_ANAR_TX
+ | MII_ANAR_10FD
+ | MII_ANAR_10
+ | MII_ANAR_CSMACD;
+ s->int_mask = 0;
+ s->ints = 0;
+ lan9118_phy_update_link(s, s->link_down);
+}
+
+static void lan9118_phy_reset_hold(Object *obj, ResetType type)
+{
+ Lan9118PhyState *s = LAN9118_PHY(obj);
+
+ lan9118_phy_reset(s);
+}
+
+static void lan9118_phy_init(Object *obj)
+{
+ Lan9118PhyState *s = LAN9118_PHY(obj);
+
+ qdev_init_gpio_out(DEVICE(s), &s->irq, 1);
+}
+
+static const VMStateDescription vmstate_lan9118_phy = {
+ .name = "lan9118-phy",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT16(status, Lan9118PhyState),
+ VMSTATE_UINT16(control, Lan9118PhyState),
+ VMSTATE_UINT16(advertise, Lan9118PhyState),
+ VMSTATE_UINT16(ints, Lan9118PhyState),
+ VMSTATE_UINT16(int_mask, Lan9118PhyState),
+ VMSTATE_BOOL(link_down, Lan9118PhyState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void lan9118_phy_class_init(ObjectClass *klass, const void *data)
+{
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ rc->phases.hold = lan9118_phy_reset_hold;
+ dc->vmsd = &vmstate_lan9118_phy;
+}
+
+static const TypeInfo types[] = {
+ {
+ .name = TYPE_LAN9118_PHY,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(Lan9118PhyState),
+ .instance_init = lan9118_phy_init,
+ .class_init = lan9118_phy_class_init,
+ }
+};
+
+DEFINE_TYPES(types)
diff --git a/hw/net/lance.c b/hw/net/lance.c
index e1ed24c..dfb855c 100644
--- a/hw/net/lance.c
+++ b/hw/net/lance.c
@@ -43,7 +43,7 @@
#include "hw/net/lance.h"
#include "hw/qdev-properties.h"
#include "trace.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
static void parent_lance_reset(void *opaque, int irq, int level)
@@ -137,21 +137,20 @@ static void lance_instance_init(Object *obj)
DEVICE(obj));
}
-static Property lance_properties[] = {
+static const Property lance_properties[] = {
DEFINE_PROP_LINK("dma", SysBusPCNetState, state.dma_opaque,
TYPE_DEVICE, DeviceState *),
DEFINE_NIC_PROPERTIES(SysBusPCNetState, state.conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void lance_class_init(ObjectClass *klass, void *data)
+static void lance_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = lance_realize;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
dc->fw_name = "ethernet";
- dc->reset = lance_reset;
+ device_class_set_legacy_reset(dc, lance_reset);
dc->vmsd = &vmstate_lance;
device_class_set_props(dc, lance_properties);
}
diff --git a/hw/net/lasi_i82596.c b/hw/net/lasi_i82596.c
index fcf7fae..9e1dd21 100644
--- a/hw/net/lasi_i82596.c
+++ b/hw/net/lasi_i82596.c
@@ -14,7 +14,7 @@
#include "qapi/error.h"
#include "qemu/timer.h"
#include "hw/sysbus.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "net/eth.h"
#include "hw/net/lasi_82596.h"
#include "hw/net/i82596.h"
@@ -158,19 +158,18 @@ static void lasi_82596_instance_init(Object *obj)
DEVICE(obj));
}
-static Property lasi_82596_properties[] = {
+static const Property lasi_82596_properties[] = {
DEFINE_NIC_PROPERTIES(SysBusI82596State, state.conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void lasi_82596_class_init(ObjectClass *klass, void *data)
+static void lasi_82596_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = lasi_82596_realize;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
dc->fw_name = "ethernet";
- dc->reset = lasi_82596_reset;
+ device_class_set_legacy_reset(dc, lasi_82596_reset);
dc->vmsd = &vmstate_lasi_82596;
dc->user_creatable = false;
device_class_set_props(dc, lasi_82596_properties);
diff --git a/hw/net/mcf_fec.c b/hw/net/mcf_fec.c
index e690271..ae128fa 100644
--- a/hw/net/mcf_fec.c
+++ b/hw/net/mcf_fec.c
@@ -16,8 +16,7 @@
#include "hw/net/mii.h"
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
-/* For crc32 */
-#include <zlib.h>
+#include <zlib.h> /* for crc32 */
//#define DEBUG_FEC 1
@@ -661,19 +660,18 @@ static void mcf_fec_instance_init(Object *obj)
}
}
-static Property mcf_fec_properties[] = {
+static const Property mcf_fec_properties[] = {
DEFINE_NIC_PROPERTIES(mcf_fec_state, conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mcf_fec_class_init(ObjectClass *oc, void *data)
+static void mcf_fec_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
dc->realize = mcf_fec_realize;
dc->desc = "MCF Fast Ethernet Controller network device";
- dc->reset = mcf_fec_reset;
+ device_class_set_legacy_reset(dc, mcf_fec_reset);
device_class_set_props(dc, mcf_fec_properties);
}
diff --git a/hw/net/meson.build b/hw/net/meson.build
index b742687..e6759e2 100644
--- a/hw/net/meson.build
+++ b/hw/net/meson.build
@@ -19,6 +19,7 @@ system_ss.add(when: 'CONFIG_VMXNET3_PCI', if_true: files('vmxnet3.c'))
system_ss.add(when: 'CONFIG_SMC91C111', if_true: files('smc91c111.c'))
system_ss.add(when: 'CONFIG_LAN9118', if_true: files('lan9118.c'))
+system_ss.add(when: 'CONFIG_LAN9118_PHY', if_true: files('lan9118_phy.c'))
system_ss.add(when: 'CONFIG_NE2000_ISA', if_true: files('ne2000-isa.c'))
system_ss.add(when: 'CONFIG_OPENCORES_ETH', if_true: files('opencores_eth.c'))
system_ss.add(when: 'CONFIG_XGMAC', if_true: files('xgmac.c'))
@@ -39,8 +40,8 @@ system_ss.add(when: 'CONFIG_SUNHME', if_true: files('sunhme.c'))
system_ss.add(when: 'CONFIG_FTGMAC100', if_true: files('ftgmac100.c'))
system_ss.add(when: 'CONFIG_SUNGEM', if_true: files('sungem.c'))
system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_emc.c', 'npcm_gmac.c'))
+system_ss.add(when: 'CONFIG_NPCM8XX', if_true: files('npcm_pcs.c'))
-system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_eth.c'))
system_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_fec.c'))
specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_llan.c'))
system_ss.add(when: 'CONFIG_XILINX_ETHLITE', if_true: files('xilinx_ethlite.c'))
diff --git a/hw/net/mipsnet.c b/hw/net/mipsnet.c
index df5101a..583aa1c 100644
--- a/hw/net/mipsnet.c
+++ b/hw/net/mipsnet.c
@@ -266,19 +266,18 @@ static void mipsnet_sysbus_reset(DeviceState *dev)
mipsnet_reset(s);
}
-static Property mipsnet_properties[] = {
+static const Property mipsnet_properties[] = {
DEFINE_NIC_PROPERTIES(MIPSnetState, conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mipsnet_class_init(ObjectClass *klass, void *data)
+static void mipsnet_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = mipsnet_realize;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
dc->desc = "MIPS Simulator network device";
- dc->reset = mipsnet_sysbus_reset;
+ device_class_set_legacy_reset(dc, mipsnet_sysbus_reset);
dc->vmsd = &vmstate_mipsnet;
device_class_set_props(dc, mipsnet_properties);
}
diff --git a/hw/net/msf2-emac.c b/hw/net/msf2-emac.c
index c1fc10d..5904597 100644
--- a/hw/net/msf2-emac.c
+++ b/hw/net/msf2-emac.c
@@ -546,11 +546,10 @@ static void msf2_emac_init(Object *obj)
sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
}
-static Property msf2_emac_properties[] = {
+static const Property msf2_emac_properties[] = {
DEFINE_PROP_LINK("ahb-bus", MSF2EmacState, dma_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
DEFINE_NIC_PROPERTIES(MSF2EmacState, conf),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_msf2_emac = {
@@ -566,12 +565,12 @@ static const VMStateDescription vmstate_msf2_emac = {
}
};
-static void msf2_emac_class_init(ObjectClass *klass, void *data)
+static void msf2_emac_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = msf2_emac_realize;
- dc->reset = msf2_emac_reset;
+ device_class_set_legacy_reset(dc, msf2_emac_reset);
dc->vmsd = &vmstate_msf2_emac;
device_class_set_props(dc, msf2_emac_properties);
}
diff --git a/hw/net/mv88w8618_eth.c b/hw/net/mv88w8618_eth.c
index 96c65f4..6f08846 100644
--- a/hw/net/mv88w8618_eth.c
+++ b/hw/net/mv88w8618_eth.c
@@ -12,7 +12,7 @@
#include "hw/irq.h"
#include "hw/net/mv88w8618_eth.h"
#include "migration/vmstate.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "net/net.h"
#define MP_ETH_SIZE 0x00001000
@@ -371,14 +371,13 @@ static const VMStateDescription mv88w8618_eth_vmsd = {
}
};
-static Property mv88w8618_eth_properties[] = {
+static const Property mv88w8618_eth_properties[] = {
DEFINE_NIC_PROPERTIES(mv88w8618_eth_state, conf),
DEFINE_PROP_LINK("dma-memory", mv88w8618_eth_state, dma_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mv88w8618_eth_class_init(ObjectClass *klass, void *data)
+static void mv88w8618_eth_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/net/ne2000-isa.c b/hw/net/ne2000-isa.c
index 26980e0..673c785 100644
--- a/hw/net/ne2000-isa.c
+++ b/hw/net/ne2000-isa.c
@@ -27,7 +27,7 @@
#include "hw/net/ne2000-isa.h"
#include "migration/vmstate.h"
#include "ne2000.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qemu/module.h"
@@ -79,14 +79,13 @@ static void isa_ne2000_realizefn(DeviceState *dev, Error **errp)
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->c.macaddr.a);
}
-static Property ne2000_isa_properties[] = {
+static const Property ne2000_isa_properties[] = {
DEFINE_PROP_UINT32("iobase", ISANE2000State, iobase, 0x300),
DEFINE_PROP_UINT32("irq", ISANE2000State, isairq, 9),
DEFINE_NIC_PROPERTIES(ISANE2000State, ne2000.c),
- DEFINE_PROP_END_OF_LIST(),
};
-static void isa_ne2000_class_initfn(ObjectClass *klass, void *data)
+static void isa_ne2000_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/net/ne2000-pci.c b/hw/net/ne2000-pci.c
index 7477306..ce937e1 100644
--- a/hw/net/ne2000-pci.c
+++ b/hw/net/ne2000-pci.c
@@ -28,7 +28,7 @@
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
#include "ne2000.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
typedef struct PCINE2000State {
PCIDevice dev;
@@ -96,12 +96,11 @@ static void ne2000_instance_init(Object *obj)
&pci_dev->qdev);
}
-static Property ne2000_properties[] = {
+static const Property ne2000_properties[] = {
DEFINE_NIC_PROPERTIES(PCINE2000State, ne2000.c),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ne2000_class_init(ObjectClass *klass, void *data)
+static void ne2000_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -123,7 +122,7 @@ static const TypeInfo ne2000_info = {
.instance_size = sizeof(PCINE2000State),
.class_init = ne2000_class_init,
.instance_init = ne2000_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/net/ne2000.c b/hw/net/ne2000.c
index b482c5f..b1923c8 100644
--- a/hw/net/ne2000.c
+++ b/hw/net/ne2000.c
@@ -25,7 +25,7 @@
#include "qemu/osdep.h"
#include "net/eth.h"
#include "qemu/module.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/irq.h"
#include "migration/vmstate.h"
#include "ne2000.h"
diff --git a/hw/net/net_rx_pkt.c b/hw/net/net_rx_pkt.c
index 32e5f3f..f87b6f0 100644
--- a/hw/net/net_rx_pkt.c
+++ b/hw/net/net_rx_pkt.c
@@ -209,12 +209,6 @@ void net_rx_pkt_get_protocols(struct NetRxPkt *pkt,
*l4hdr_proto = pkt->l4hdr_info.proto;
}
-size_t net_rx_pkt_get_l3_hdr_offset(struct NetRxPkt *pkt)
-{
- assert(pkt);
- return pkt->l3hdr_off;
-}
-
size_t net_rx_pkt_get_l4_hdr_offset(struct NetRxPkt *pkt)
{
assert(pkt);
@@ -375,8 +369,7 @@ net_rx_pkt_calc_rss_hash(struct NetRxPkt *pkt,
_net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length);
break;
default:
- assert(false);
- break;
+ g_assert_not_reached();
}
net_toeplitz_key_init(&key_data, key);
@@ -427,13 +420,6 @@ struct iovec *net_rx_pkt_get_iovec(struct NetRxPkt *pkt)
return pkt->vec;
}
-uint16_t net_rx_pkt_get_iovec_len(struct NetRxPkt *pkt)
-{
- assert(pkt);
-
- return pkt->vec_len;
-}
-
void net_rx_pkt_set_vhdr(struct NetRxPkt *pkt,
struct virtio_net_hdr *vhdr)
{
diff --git a/hw/net/net_rx_pkt.h b/hw/net/net_rx_pkt.h
index 55ec67a..ea077f5 100644
--- a/hw/net/net_rx_pkt.h
+++ b/hw/net/net_rx_pkt.h
@@ -78,14 +78,6 @@ void net_rx_pkt_get_protocols(struct NetRxPkt *pkt,
EthL4HdrProto *l4hdr_proto);
/**
-* fetches L3 header offset
-*
-* @pkt: packet
-*
-*/
-size_t net_rx_pkt_get_l3_hdr_offset(struct NetRxPkt *pkt);
-
-/**
* fetches L4 header offset
*
* @pkt: packet
@@ -268,15 +260,6 @@ net_rx_pkt_attach_data(struct NetRxPkt *pkt, const void *data,
struct iovec *net_rx_pkt_get_iovec(struct NetRxPkt *pkt);
/**
-* returns io vector length that holds the attached data
-*
-* @pkt: packet
-* @ret: IOVec length
-*
-*/
-uint16_t net_rx_pkt_get_iovec_len(struct NetRxPkt *pkt);
-
-/**
* prints rx packet data if debug is enabled
*
* @pkt: packet
diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c
index 1f79b82..903238d 100644
--- a/hw/net/net_tx_pkt.c
+++ b/hw/net/net_tx_pkt.c
@@ -141,10 +141,6 @@ bool net_tx_pkt_update_sctp_checksum(struct NetTxPkt *pkt)
uint32_t csum = 0;
struct iovec *pl_start_frag = pkt->vec + NET_TX_PKT_PL_START_FRAG;
- if (iov_size(pl_start_frag, pkt->payload_frags) < 8 + sizeof(csum)) {
- return false;
- }
-
if (iov_from_buf(pl_start_frag, pkt->payload_frags, 8, &csum, sizeof(csum)) < sizeof(csum)) {
return false;
}
diff --git a/hw/net/npcm7xx_emc.c b/hw/net/npcm7xx_emc.c
index d1583b6..9ba35e2 100644
--- a/hw/net/npcm7xx_emc.c
+++ b/hw/net/npcm7xx_emc.c
@@ -29,8 +29,7 @@
#include "qemu/osdep.h"
-/* For crc32 */
-#include <zlib.h>
+#include <zlib.h> /* for crc32 */
#include "hw/irq.h"
#include "hw/qdev-clock.h"
@@ -43,7 +42,7 @@
#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/units.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "trace.h"
#define CRC_LENGTH 4
@@ -846,12 +845,11 @@ static const VMStateDescription vmstate_npcm7xx_emc = {
},
};
-static Property npcm7xx_emc_properties[] = {
+static const Property npcm7xx_emc_properties[] = {
DEFINE_NIC_PROPERTIES(NPCM7xxEMCState, conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void npcm7xx_emc_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_emc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -859,7 +857,7 @@ static void npcm7xx_emc_class_init(ObjectClass *klass, void *data)
dc->desc = "NPCM7xx EMC Controller";
dc->realize = npcm7xx_emc_realize;
dc->unrealize = npcm7xx_emc_unrealize;
- dc->reset = npcm7xx_emc_reset;
+ device_class_set_legacy_reset(dc, npcm7xx_emc_reset);
dc->vmsd = &vmstate_npcm7xx_emc;
device_class_set_props(dc, npcm7xx_emc_properties);
}
diff --git a/hw/net/npcm_gmac.c b/hw/net/npcm_gmac.c
index 1b71e25..a434112 100644
--- a/hw/net/npcm_gmac.c
+++ b/hw/net/npcm_gmac.c
@@ -33,7 +33,7 @@
#include "qemu/cutils.h"
#include "qemu/log.h"
#include "qemu/units.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "trace.h"
REG32(NPCM_DMA_BUS_MODE, 0x1000)
@@ -546,9 +546,8 @@ static void gmac_try_send_next_packet(NPCMGMACState *gmac)
/* 1 = DMA Owned, 0 = Software Owned */
if (!(tx_desc.tdes0 & TX_DESC_TDES0_OWN)) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "TX Descriptor @ 0x%x is owned by software\n",
- desc_addr);
+ trace_npcm_gmac_tx_desc_owner(DEVICE(gmac)->canonical_path,
+ desc_addr);
gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_TU;
gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT,
NPCM_DMA_STATUS_TX_SUSPENDED_STATE);
@@ -913,12 +912,11 @@ static const VMStateDescription vmstate_npcm_gmac = {
},
};
-static Property npcm_gmac_properties[] = {
+static const Property npcm_gmac_properties[] = {
DEFINE_NIC_PROPERTIES(NPCMGMACState, conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void npcm_gmac_class_init(ObjectClass *klass, void *data)
+static void npcm_gmac_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -926,7 +924,7 @@ static void npcm_gmac_class_init(ObjectClass *klass, void *data)
dc->desc = "NPCM GMAC Controller";
dc->realize = npcm_gmac_realize;
dc->unrealize = npcm_gmac_unrealize;
- dc->reset = npcm_gmac_reset;
+ device_class_set_legacy_reset(dc, npcm_gmac_reset);
dc->vmsd = &vmstate_npcm_gmac;
device_class_set_props(dc, npcm_gmac_properties);
}
diff --git a/hw/net/npcm_pcs.c b/hw/net/npcm_pcs.c
new file mode 100644
index 0000000..6aec105
--- /dev/null
+++ b/hw/net/npcm_pcs.c
@@ -0,0 +1,410 @@
+/*
+ * Nuvoton NPCM8xx PCS Module
+ *
+ * Copyright 2022 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/*
+ * Disclaimer:
+ * Currently we only implemented the default values of the registers and
+ * the soft reset feature. These are required to boot up the GMAC module
+ * in Linux kernel for NPCM845 boards. Other functionalities are not modeled.
+ */
+
+#include "qemu/osdep.h"
+
+#include "exec/hwaddr.h"
+#include "hw/registerfields.h"
+#include "hw/net/npcm_pcs.h"
+#include "migration/vmstate.h"
+#include "qemu/log.h"
+#include "qemu/units.h"
+#include "trace.h"
+
+#define NPCM_PCS_IND_AC_BA 0x1fe
+#define NPCM_PCS_IND_SR_CTL 0x1e00
+#define NPCM_PCS_IND_SR_MII 0x1f00
+#define NPCM_PCS_IND_SR_TIM 0x1f07
+#define NPCM_PCS_IND_VR_MII 0x1f80
+
+REG16(NPCM_PCS_SR_CTL_ID1, 0x08)
+REG16(NPCM_PCS_SR_CTL_ID2, 0x0a)
+REG16(NPCM_PCS_SR_CTL_STS, 0x10)
+
+REG16(NPCM_PCS_SR_MII_CTRL, 0x00)
+REG16(NPCM_PCS_SR_MII_STS, 0x02)
+REG16(NPCM_PCS_SR_MII_DEV_ID1, 0x04)
+REG16(NPCM_PCS_SR_MII_DEV_ID2, 0x06)
+REG16(NPCM_PCS_SR_MII_AN_ADV, 0x08)
+REG16(NPCM_PCS_SR_MII_LP_BABL, 0x0a)
+REG16(NPCM_PCS_SR_MII_AN_EXPN, 0x0c)
+REG16(NPCM_PCS_SR_MII_EXT_STS, 0x1e)
+
+REG16(NPCM_PCS_SR_TIM_SYNC_ABL, 0x10)
+REG16(NPCM_PCS_SR_TIM_SYNC_TX_MAX_DLY_LWR, 0x12)
+REG16(NPCM_PCS_SR_TIM_SYNC_TX_MAX_DLY_UPR, 0x14)
+REG16(NPCM_PCS_SR_TIM_SYNC_TX_MIN_DLY_LWR, 0x16)
+REG16(NPCM_PCS_SR_TIM_SYNC_TX_MIN_DLY_UPR, 0x18)
+REG16(NPCM_PCS_SR_TIM_SYNC_RX_MAX_DLY_LWR, 0x1a)
+REG16(NPCM_PCS_SR_TIM_SYNC_RX_MAX_DLY_UPR, 0x1c)
+REG16(NPCM_PCS_SR_TIM_SYNC_RX_MIN_DLY_LWR, 0x1e)
+REG16(NPCM_PCS_SR_TIM_SYNC_RX_MIN_DLY_UPR, 0x20)
+
+REG16(NPCM_PCS_VR_MII_MMD_DIG_CTRL1, 0x000)
+REG16(NPCM_PCS_VR_MII_AN_CTRL, 0x002)
+REG16(NPCM_PCS_VR_MII_AN_INTR_STS, 0x004)
+REG16(NPCM_PCS_VR_MII_TC, 0x006)
+REG16(NPCM_PCS_VR_MII_DBG_CTRL, 0x00a)
+REG16(NPCM_PCS_VR_MII_EEE_MCTRL0, 0x00c)
+REG16(NPCM_PCS_VR_MII_EEE_TXTIMER, 0x010)
+REG16(NPCM_PCS_VR_MII_EEE_RXTIMER, 0x012)
+REG16(NPCM_PCS_VR_MII_LINK_TIMER_CTRL, 0x014)
+REG16(NPCM_PCS_VR_MII_EEE_MCTRL1, 0x016)
+REG16(NPCM_PCS_VR_MII_DIG_STS, 0x020)
+REG16(NPCM_PCS_VR_MII_ICG_ERRCNT1, 0x022)
+REG16(NPCM_PCS_VR_MII_MISC_STS, 0x030)
+REG16(NPCM_PCS_VR_MII_RX_LSTS, 0x040)
+REG16(NPCM_PCS_VR_MII_MP_TX_BSTCTRL0, 0x070)
+REG16(NPCM_PCS_VR_MII_MP_TX_LVLCTRL0, 0x074)
+REG16(NPCM_PCS_VR_MII_MP_TX_GENCTRL0, 0x07a)
+REG16(NPCM_PCS_VR_MII_MP_TX_GENCTRL1, 0x07c)
+REG16(NPCM_PCS_VR_MII_MP_TX_STS, 0x090)
+REG16(NPCM_PCS_VR_MII_MP_RX_GENCTRL0, 0x0b0)
+REG16(NPCM_PCS_VR_MII_MP_RX_GENCTRL1, 0x0b2)
+REG16(NPCM_PCS_VR_MII_MP_RX_LOS_CTRL0, 0x0ba)
+REG16(NPCM_PCS_VR_MII_MP_MPLL_CTRL0, 0x0f0)
+REG16(NPCM_PCS_VR_MII_MP_MPLL_CTRL1, 0x0f2)
+REG16(NPCM_PCS_VR_MII_MP_MPLL_STS, 0x110)
+REG16(NPCM_PCS_VR_MII_MP_MISC_CTRL2, 0x126)
+REG16(NPCM_PCS_VR_MII_MP_LVL_CTRL, 0x130)
+REG16(NPCM_PCS_VR_MII_MP_MISC_CTRL0, 0x132)
+REG16(NPCM_PCS_VR_MII_MP_MISC_CTRL1, 0x134)
+REG16(NPCM_PCS_VR_MII_DIG_CTRL2, 0x1c2)
+REG16(NPCM_PCS_VR_MII_DIG_ERRCNT_SEL, 0x1c4)
+
+/* Register Fields */
+#define NPCM_PCS_SR_MII_CTRL_RST BIT(15)
+
+static const uint16_t npcm_pcs_sr_ctl_cold_reset_values[NPCM_PCS_NR_SR_CTLS] = {
+ [R_NPCM_PCS_SR_CTL_ID1] = 0x699e,
+ [R_NPCM_PCS_SR_CTL_STS] = 0x8000,
+};
+
+static const uint16_t npcm_pcs_sr_mii_cold_reset_values[NPCM_PCS_NR_SR_MIIS] = {
+ [R_NPCM_PCS_SR_MII_CTRL] = 0x1140,
+ [R_NPCM_PCS_SR_MII_STS] = 0x0109,
+ [R_NPCM_PCS_SR_MII_DEV_ID1] = 0x699e,
+ [R_NPCM_PCS_SR_MII_DEV_ID2] = 0xced0,
+ [R_NPCM_PCS_SR_MII_AN_ADV] = 0x0020,
+ [R_NPCM_PCS_SR_MII_EXT_STS] = 0xc000,
+};
+
+static const uint16_t npcm_pcs_sr_tim_cold_reset_values[NPCM_PCS_NR_SR_TIMS] = {
+ [R_NPCM_PCS_SR_TIM_SYNC_ABL] = 0x0003,
+ [R_NPCM_PCS_SR_TIM_SYNC_TX_MAX_DLY_LWR] = 0x0038,
+ [R_NPCM_PCS_SR_TIM_SYNC_TX_MIN_DLY_LWR] = 0x0038,
+ [R_NPCM_PCS_SR_TIM_SYNC_RX_MAX_DLY_LWR] = 0x0058,
+ [R_NPCM_PCS_SR_TIM_SYNC_RX_MIN_DLY_LWR] = 0x0048,
+};
+
+static const uint16_t npcm_pcs_vr_mii_cold_reset_values[NPCM_PCS_NR_VR_MIIS] = {
+ [R_NPCM_PCS_VR_MII_MMD_DIG_CTRL1] = 0x2400,
+ [R_NPCM_PCS_VR_MII_AN_INTR_STS] = 0x000a,
+ [R_NPCM_PCS_VR_MII_EEE_MCTRL0] = 0x899c,
+ [R_NPCM_PCS_VR_MII_DIG_STS] = 0x0010,
+ [R_NPCM_PCS_VR_MII_MP_TX_BSTCTRL0] = 0x000a,
+ [R_NPCM_PCS_VR_MII_MP_TX_LVLCTRL0] = 0x007f,
+ [R_NPCM_PCS_VR_MII_MP_TX_GENCTRL0] = 0x0001,
+ [R_NPCM_PCS_VR_MII_MP_RX_GENCTRL0] = 0x0100,
+ [R_NPCM_PCS_VR_MII_MP_RX_GENCTRL1] = 0x1100,
+ [R_NPCM_PCS_VR_MII_MP_RX_LOS_CTRL0] = 0x000e,
+ [R_NPCM_PCS_VR_MII_MP_MPLL_CTRL0] = 0x0100,
+ [R_NPCM_PCS_VR_MII_MP_MPLL_CTRL1] = 0x0032,
+ [R_NPCM_PCS_VR_MII_MP_MPLL_STS] = 0x0001,
+ [R_NPCM_PCS_VR_MII_MP_LVL_CTRL] = 0x0019,
+};
+
+static void npcm_pcs_soft_reset(NPCMPCSState *s)
+{
+ memcpy(s->sr_ctl, npcm_pcs_sr_ctl_cold_reset_values,
+ NPCM_PCS_NR_SR_CTLS * sizeof(uint16_t));
+ memcpy(s->sr_mii, npcm_pcs_sr_mii_cold_reset_values,
+ NPCM_PCS_NR_SR_MIIS * sizeof(uint16_t));
+ memcpy(s->sr_tim, npcm_pcs_sr_tim_cold_reset_values,
+ NPCM_PCS_NR_SR_TIMS * sizeof(uint16_t));
+ memcpy(s->vr_mii, npcm_pcs_vr_mii_cold_reset_values,
+ NPCM_PCS_NR_VR_MIIS * sizeof(uint16_t));
+}
+
+static uint16_t npcm_pcs_read_sr_ctl(NPCMPCSState *s, hwaddr offset)
+{
+ hwaddr regno = offset / sizeof(uint16_t);
+
+ if (regno >= NPCM_PCS_NR_SR_CTLS) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: SR_CTL read offset 0x%04" HWADDR_PRIx
+ " is out of range.\n",
+ DEVICE(s)->canonical_path, offset);
+ return 0;
+ }
+
+ return s->sr_ctl[regno];
+}
+
+static uint16_t npcm_pcs_read_sr_mii(NPCMPCSState *s, hwaddr offset)
+{
+ hwaddr regno = offset / sizeof(uint16_t);
+
+ if (regno >= NPCM_PCS_NR_SR_MIIS) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: SR_MII read offset 0x%04" HWADDR_PRIx
+ " is out of range.\n",
+ DEVICE(s)->canonical_path, offset);
+ return 0;
+ }
+
+ return s->sr_mii[regno];
+}
+
+static uint16_t npcm_pcs_read_sr_tim(NPCMPCSState *s, hwaddr offset)
+{
+ hwaddr regno = offset / sizeof(uint16_t);
+
+ if (regno >= NPCM_PCS_NR_SR_TIMS) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: SR_TIM read offset 0x%04" HWADDR_PRIx
+ " is out of range.\n",
+ DEVICE(s)->canonical_path, offset);
+ return 0;
+ }
+
+ return s->sr_tim[regno];
+}
+
+static uint16_t npcm_pcs_read_vr_mii(NPCMPCSState *s, hwaddr offset)
+{
+ hwaddr regno = offset / sizeof(uint16_t);
+
+ if (regno >= NPCM_PCS_NR_VR_MIIS) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: VR_MII read offset 0x%04" HWADDR_PRIx
+ " is out of range.\n",
+ DEVICE(s)->canonical_path, offset);
+ return 0;
+ }
+
+ return s->vr_mii[regno];
+}
+
+static void npcm_pcs_write_sr_ctl(NPCMPCSState *s, hwaddr offset, uint16_t v)
+{
+ hwaddr regno = offset / sizeof(uint16_t);
+
+ if (regno >= NPCM_PCS_NR_SR_CTLS) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: SR_CTL write offset 0x%04" HWADDR_PRIx
+ " is out of range.\n",
+ DEVICE(s)->canonical_path, offset);
+ return;
+ }
+
+ s->sr_ctl[regno] = v;
+}
+
+static void npcm_pcs_write_sr_mii(NPCMPCSState *s, hwaddr offset, uint16_t v)
+{
+ hwaddr regno = offset / sizeof(uint16_t);
+
+ if (regno >= NPCM_PCS_NR_SR_MIIS) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: SR_MII write offset 0x%04" HWADDR_PRIx
+ " is out of range.\n",
+ DEVICE(s)->canonical_path, offset);
+ return;
+ }
+
+ s->sr_mii[regno] = v;
+
+ if ((offset == A_NPCM_PCS_SR_MII_CTRL) && (v & NPCM_PCS_SR_MII_CTRL_RST)) {
+ /* Trigger a soft reset */
+ npcm_pcs_soft_reset(s);
+ }
+}
+
+static void npcm_pcs_write_sr_tim(NPCMPCSState *s, hwaddr offset, uint16_t v)
+{
+ hwaddr regno = offset / sizeof(uint16_t);
+
+ if (regno >= NPCM_PCS_NR_SR_TIMS) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: SR_TIM write offset 0x%04" HWADDR_PRIx
+ " is out of range.\n",
+ DEVICE(s)->canonical_path, offset);
+ return;
+ }
+
+ s->sr_tim[regno] = v;
+}
+
+static void npcm_pcs_write_vr_mii(NPCMPCSState *s, hwaddr offset, uint16_t v)
+{
+ hwaddr regno = offset / sizeof(uint16_t);
+
+ if (regno >= NPCM_PCS_NR_VR_MIIS) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: VR_MII write offset 0x%04" HWADDR_PRIx
+ " is out of range.\n",
+ DEVICE(s)->canonical_path, offset);
+ return;
+ }
+
+ s->vr_mii[regno] = v;
+}
+
+static uint64_t npcm_pcs_read(void *opaque, hwaddr offset, unsigned size)
+{
+ NPCMPCSState *s = opaque;
+ uint16_t v = 0;
+
+ if (offset == NPCM_PCS_IND_AC_BA) {
+ v = s->indirect_access_base;
+ } else {
+ switch (s->indirect_access_base) {
+ case NPCM_PCS_IND_SR_CTL:
+ v = npcm_pcs_read_sr_ctl(s, offset);
+ break;
+
+ case NPCM_PCS_IND_SR_MII:
+ v = npcm_pcs_read_sr_mii(s, offset);
+ break;
+
+ case NPCM_PCS_IND_SR_TIM:
+ v = npcm_pcs_read_sr_tim(s, offset);
+ break;
+
+ case NPCM_PCS_IND_VR_MII:
+ v = npcm_pcs_read_vr_mii(s, offset);
+ break;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Read with invalid indirect address base: 0x%"
+ PRIx16 "\n", DEVICE(s)->canonical_path,
+ s->indirect_access_base);
+ }
+ }
+
+ trace_npcm_pcs_reg_read(DEVICE(s)->canonical_path, s->indirect_access_base,
+ offset, v);
+ return v;
+}
+
+static void npcm_pcs_write(void *opaque, hwaddr offset,
+ uint64_t v, unsigned size)
+{
+ NPCMPCSState *s = opaque;
+
+ trace_npcm_pcs_reg_write(DEVICE(s)->canonical_path, s->indirect_access_base,
+ offset, v);
+ if (offset == NPCM_PCS_IND_AC_BA) {
+ s->indirect_access_base = v;
+ } else {
+ switch (s->indirect_access_base) {
+ case NPCM_PCS_IND_SR_CTL:
+ npcm_pcs_write_sr_ctl(s, offset, v);
+ break;
+
+ case NPCM_PCS_IND_SR_MII:
+ npcm_pcs_write_sr_mii(s, offset, v);
+ break;
+
+ case NPCM_PCS_IND_SR_TIM:
+ npcm_pcs_write_sr_tim(s, offset, v);
+ break;
+
+ case NPCM_PCS_IND_VR_MII:
+ npcm_pcs_write_vr_mii(s, offset, v);
+ break;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Write with invalid indirect address base: 0x%02"
+ PRIx16 "\n", DEVICE(s)->canonical_path,
+ s->indirect_access_base);
+ }
+ }
+}
+
+static void npcm_pcs_enter_reset(Object *obj, ResetType type)
+{
+ NPCMPCSState *s = NPCM_PCS(obj);
+
+ npcm_pcs_soft_reset(s);
+}
+
+static const struct MemoryRegionOps npcm_pcs_ops = {
+ .read = npcm_pcs_read,
+ .write = npcm_pcs_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 2,
+ .max_access_size = 2,
+ .unaligned = false,
+ },
+};
+
+static void npcm_pcs_realize(DeviceState *dev, Error **errp)
+{
+ NPCMPCSState *pcs = NPCM_PCS(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+
+ memory_region_init_io(&pcs->iomem, OBJECT(pcs), &npcm_pcs_ops, pcs,
+ TYPE_NPCM_PCS, 8 * KiB);
+ sysbus_init_mmio(sbd, &pcs->iomem);
+}
+
+static const VMStateDescription vmstate_npcm_pcs = {
+ .name = TYPE_NPCM_PCS,
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT16(indirect_access_base, NPCMPCSState),
+ VMSTATE_UINT16_ARRAY(sr_ctl, NPCMPCSState, NPCM_PCS_NR_SR_CTLS),
+ VMSTATE_UINT16_ARRAY(sr_mii, NPCMPCSState, NPCM_PCS_NR_SR_MIIS),
+ VMSTATE_UINT16_ARRAY(sr_tim, NPCMPCSState, NPCM_PCS_NR_SR_TIMS),
+ VMSTATE_UINT16_ARRAY(vr_mii, NPCMPCSState, NPCM_PCS_NR_VR_MIIS),
+ VMSTATE_END_OF_LIST(),
+ },
+};
+
+static void npcm_pcs_class_init(ObjectClass *klass, const void *data)
+{
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->desc = "NPCM PCS Controller";
+ dc->realize = npcm_pcs_realize;
+ dc->vmsd = &vmstate_npcm_pcs;
+ rc->phases.enter = npcm_pcs_enter_reset;
+}
+
+static const TypeInfo npcm_pcs_types[] = {
+ {
+ .name = TYPE_NPCM_PCS,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(NPCMPCSState),
+ .class_init = npcm_pcs_class_init,
+ },
+};
+DEFINE_TYPES(npcm_pcs_types)
diff --git a/hw/net/opencores_eth.c b/hw/net/opencores_eth.c
index f96d6ea..7e955c0 100644
--- a/hw/net/opencores_eth.c
+++ b/hw/net/opencores_eth.c
@@ -743,19 +743,18 @@ static void qdev_open_eth_reset(DeviceState *dev)
open_eth_reset(d);
}
-static Property open_eth_properties[] = {
+static const Property open_eth_properties[] = {
DEFINE_NIC_PROPERTIES(OpenEthState, conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void open_eth_class_init(ObjectClass *klass, void *data)
+static void open_eth_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = sysbus_open_eth_realize;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
dc->desc = "Opencores 10/100 Mbit Ethernet";
- dc->reset = qdev_open_eth_reset;
+ device_class_set_legacy_reset(dc, qdev_open_eth_reset);
device_class_set_props(dc, open_eth_properties);
}
diff --git a/hw/net/pcnet-pci.c b/hw/net/pcnet-pci.c
index fe1a845..0ca5bc2 100644
--- a/hw/net/pcnet-pci.c
+++ b/hw/net/pcnet-pci.c
@@ -35,8 +35,8 @@
#include "net/net.h"
#include "qemu/module.h"
#include "qemu/timer.h"
-#include "sysemu/dma.h"
-#include "sysemu/sysemu.h"
+#include "system/dma.h"
+#include "system/system.h"
#include "trace.h"
#include "pcnet.h"
@@ -252,12 +252,11 @@ static void pcnet_instance_init(Object *obj)
DEVICE(obj));
}
-static Property pcnet_properties[] = {
+static const Property pcnet_properties[] = {
DEFINE_NIC_PROPERTIES(PCIPCNetState, state.conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pcnet_class_init(ObjectClass *klass, void *data)
+static void pcnet_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -269,7 +268,7 @@ static void pcnet_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_AMD_LANCE;
k->revision = 0x10;
k->class_id = PCI_CLASS_NETWORK_ETHERNET;
- dc->reset = pci_reset;
+ device_class_set_legacy_reset(dc, pci_reset);
dc->vmsd = &vmstate_pci_pcnet;
device_class_set_props(dc, pcnet_properties);
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
@@ -281,7 +280,7 @@ static const TypeInfo pcnet_info = {
.instance_size = sizeof(PCIPCNetState),
.class_init = pcnet_class_init,
.instance_init = pcnet_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/net/pcnet.h b/hw/net/pcnet.h
index eb7f46a..a94356e 100644
--- a/hw/net/pcnet.h
+++ b/hw/net/pcnet.h
@@ -7,7 +7,7 @@
#define PCNET_LOOPTEST_CRC 1
#define PCNET_LOOPTEST_NOCRC 2
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/irq.h"
/* BUS CONFIGURATION REGISTERS */
diff --git a/hw/net/rocker/rocker-hmp-cmds.c b/hw/net/rocker/rocker-hmp-cmds.c
index 197c6e2..df40991 100644
--- a/hw/net/rocker/rocker-hmp-cmds.c
+++ b/hw/net/rocker/rocker-hmp-cmds.c
@@ -18,7 +18,7 @@
#include "monitor/monitor.h"
#include "net/eth.h"
#include "qapi/qapi-commands-rocker.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
void hmp_rocker(Monitor *mon, const QDict *qdict)
{
diff --git a/hw/net/rocker/rocker.c b/hw/net/rocker/rocker.c
index 7ea8eb6..cc49701 100644
--- a/hw/net/rocker/rocker.c
+++ b/hw/net/rocker/rocker.c
@@ -134,11 +134,6 @@ RockerPortList *qmp_query_rocker_ports(const char *name, Error **errp)
return list;
}
-uint32_t rocker_fp_ports(Rocker *r)
-{
- return r->fp_ports;
-}
-
static uint32_t rocker_get_pport_by_tx_ring(Rocker *r,
DescRing *ring)
{
@@ -1464,7 +1459,7 @@ static void rocker_reset(DeviceState *dev)
DPRINTF("Reset done\n");
}
-static Property rocker_properties[] = {
+static const Property rocker_properties[] = {
DEFINE_PROP_STRING("name", Rocker, name),
DEFINE_PROP_STRING("world", Rocker, world_name),
DEFINE_PROP_MACADDR("fp_start_macaddr", Rocker,
@@ -1473,7 +1468,6 @@ static Property rocker_properties[] = {
switch_id, 0),
DEFINE_PROP_ARRAY("ports", Rocker, fp_ports,
fp_ports_peers, qdev_prop_netdev, NICPeers),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription rocker_vmsd = {
@@ -1481,7 +1475,7 @@ static const VMStateDescription rocker_vmsd = {
.unmigratable = 1,
};
-static void rocker_class_init(ObjectClass *klass, void *data)
+static void rocker_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -1494,7 +1488,7 @@ static void rocker_class_init(ObjectClass *klass, void *data)
k->class_id = PCI_CLASS_NETWORK_OTHER;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
dc->desc = "Rocker Switch";
- dc->reset = rocker_reset;
+ device_class_set_legacy_reset(dc, rocker_reset);
device_class_set_props(dc, rocker_properties);
dc->vmsd = &rocker_vmsd;
}
@@ -1504,7 +1498,7 @@ static const TypeInfo rocker_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(Rocker),
.class_init = rocker_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/net/rocker/rocker.h b/hw/net/rocker/rocker.h
index f85354d..ae06c1c 100644
--- a/hw/net/rocker/rocker.h
+++ b/hw/net/rocker/rocker.h
@@ -36,15 +36,7 @@ static inline G_GNUC_PRINTF(1, 2) int DPRINTF(const char *fmt, ...)
}
#endif
-#define __le16 uint16_t
-#define __le32 uint32_t
-#define __le64 uint64_t
-
-#define __be16 uint16_t
-#define __be32 uint32_t
-#define __be64 uint64_t
-
-static inline bool ipv4_addr_is_multicast(__be32 addr)
+static inline bool ipv4_addr_is_multicast(uint32_t addr)
{
return (addr & htonl(0xf0000000)) == htonl(0xe0000000);
}
@@ -52,8 +44,8 @@ static inline bool ipv4_addr_is_multicast(__be32 addr)
typedef struct ipv6_addr {
union {
uint8_t addr8[16];
- __be16 addr16[8];
- __be32 addr32[4];
+ uint16_t addr16[8];
+ uint32_t addr32[4];
};
} Ipv6Addr;
@@ -72,7 +64,6 @@ DECLARE_INSTANCE_CHECKER(Rocker, ROCKER,
TYPE_ROCKER)
Rocker *rocker_find(const char *name);
-uint32_t rocker_fp_ports(Rocker *r);
int rocker_event_link_changed(Rocker *r, uint32_t pport, bool link_up);
int rocker_event_mac_vlan_seen(Rocker *r, uint32_t pport, uint8_t *addr,
uint16_t vlan_id);
diff --git a/hw/net/rocker/rocker_hw.h b/hw/net/rocker/rocker_hw.h
index 1786323..7ec6bfb 100644
--- a/hw/net/rocker/rocker_hw.h
+++ b/hw/net/rocker/rocker_hw.h
@@ -9,10 +9,6 @@
#ifndef ROCKER_HW_H
#define ROCKER_HW_H
-#define __le16 uint16_t
-#define __le32 uint32_t
-#define __le64 uint64_t
-
/*
* Return codes
*/
@@ -124,12 +120,12 @@ enum {
*/
typedef struct rocker_desc {
- __le64 buf_addr;
+ uint64_t buf_addr;
uint64_t cookie;
- __le16 buf_size;
- __le16 tlv_size;
- __le16 rsvd[5]; /* pad to 32 bytes */
- __le16 comp_err;
+ uint16_t buf_size;
+ uint16_t tlv_size;
+ uint16_t rsvd[5]; /* pad to 32 bytes */
+ uint16_t comp_err;
} __attribute__((packed, aligned(8))) RockerDesc;
/*
@@ -137,9 +133,9 @@ typedef struct rocker_desc {
*/
typedef struct rocker_tlv {
- __le32 type;
- __le16 len;
- __le16 rsvd;
+ uint32_t type;
+ uint16_t len;
+ uint16_t rsvd;
} __attribute__((packed, aligned(8))) RockerTlv;
/* cmd msg */
diff --git a/hw/net/rocker/rocker_of_dpa.c b/hw/net/rocker/rocker_of_dpa.c
index 5e16056..4aed178 100644
--- a/hw/net/rocker/rocker_of_dpa.c
+++ b/hw/net/rocker/rocker_of_dpa.c
@@ -52,10 +52,10 @@ typedef struct of_dpa_flow_key {
uint32_t tunnel_id; /* overlay tunnel id */
uint32_t tbl_id; /* table id */
struct {
- __be16 vlan_id; /* 0 if no VLAN */
+ uint16_t vlan_id; /* 0 if no VLAN */
MACAddr src; /* ethernet source address */
MACAddr dst; /* ethernet destination address */
- __be16 type; /* ethernet frame type */
+ uint16_t type; /* ethernet frame type */
} eth;
struct {
uint8_t proto; /* IP protocol or ARP opcode */
@@ -66,14 +66,14 @@ typedef struct of_dpa_flow_key {
union {
struct {
struct {
- __be32 src; /* IP source address */
- __be32 dst; /* IP destination address */
+ uint32_t src; /* IP source address */
+ uint32_t dst; /* IP destination address */
} addr;
union {
struct {
- __be16 src; /* TCP/UDP/SCTP source port */
- __be16 dst; /* TCP/UDP/SCTP destination port */
- __be16 flags; /* TCP flags */
+ uint16_t src; /* TCP/UDP/SCTP source port */
+ uint16_t dst; /* TCP/UDP/SCTP destination port */
+ uint16_t flags; /* TCP flags */
} tp;
struct {
MACAddr sha; /* ARP source hardware address */
@@ -86,11 +86,11 @@ typedef struct of_dpa_flow_key {
Ipv6Addr src; /* IPv6 source address */
Ipv6Addr dst; /* IPv6 destination address */
} addr;
- __be32 label; /* IPv6 flow label */
+ uint32_t label; /* IPv6 flow label */
struct {
- __be16 src; /* TCP/UDP/SCTP source port */
- __be16 dst; /* TCP/UDP/SCTP destination port */
- __be16 flags; /* TCP flags */
+ uint16_t src; /* TCP/UDP/SCTP source port */
+ uint16_t dst; /* TCP/UDP/SCTP destination port */
+ uint16_t flags; /* TCP flags */
} tp;
struct {
Ipv6Addr target; /* ND target address */
@@ -112,13 +112,13 @@ typedef struct of_dpa_flow_action {
struct {
uint32_t group_id;
uint32_t tun_log_lport;
- __be16 vlan_id;
+ uint16_t vlan_id;
} write;
struct {
- __be16 new_vlan_id;
+ uint16_t new_vlan_id;
uint32_t out_pport;
uint8_t copy_to_cpu;
- __be16 vlan_id;
+ uint16_t vlan_id;
} apply;
} OfDpaFlowAction;
@@ -143,7 +143,7 @@ typedef struct of_dpa_flow {
typedef struct of_dpa_flow_pkt_fields {
uint32_t tunnel_id;
struct eth_header *ethhdr;
- __be16 *h_proto;
+ uint16_t *h_proto;
struct vlan_header *vlanhdr;
struct ip_header *ipv4hdr;
struct ip6_header *ipv6hdr;
@@ -180,7 +180,7 @@ typedef struct of_dpa_group {
uint32_t group_id;
MACAddr src_mac;
MACAddr dst_mac;
- __be16 vlan_id;
+ uint16_t vlan_id;
} l2_rewrite;
struct {
uint16_t group_count;
@@ -190,13 +190,13 @@ typedef struct of_dpa_group {
uint32_t group_id;
MACAddr src_mac;
MACAddr dst_mac;
- __be16 vlan_id;
+ uint16_t vlan_id;
uint8_t ttl_check;
} l3_unicast;
};
} OfDpaGroup;
-static int of_dpa_mask2prefix(__be32 mask)
+static int of_dpa_mask2prefix(uint32_t mask)
{
int i;
int count = 32;
@@ -451,7 +451,7 @@ static void of_dpa_flow_pkt_parse(OfDpaFlowContext *fc,
fc->iovcnt = iovcnt + 2;
}
-static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext *fc, __be16 vlan_id)
+static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext *fc, uint16_t vlan_id)
{
OfDpaFlowPktFields *fields = &fc->fields;
uint16_t h_proto = fields->ethhdr->h_proto;
@@ -486,7 +486,7 @@ static void of_dpa_flow_pkt_strip_vlan(OfDpaFlowContext *fc)
static void of_dpa_flow_pkt_hdr_rewrite(OfDpaFlowContext *fc,
uint8_t *src_mac, uint8_t *dst_mac,
- __be16 vlan_id)
+ uint16_t vlan_id)
{
OfDpaFlowPktFields *fields = &fc->fields;
@@ -1635,8 +1635,8 @@ static int of_dpa_cmd_add_multicast_routing(OfDpaFlow *flow,
return ROCKER_OK;
}
-static int of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask,
- RockerTlv **flow_tlvs)
+static void of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask,
+ RockerTlv **flow_tlvs)
{
key->width = FLOW_KEY_WIDTH(ip.tos);
@@ -1669,8 +1669,6 @@ static int of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask,
mask->ip.tos |=
rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) << 6;
}
-
- return ROCKER_OK;
}
static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs)
@@ -1689,7 +1687,6 @@ static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs)
ACL_MODE_ANY_VLAN,
ACL_MODE_ANY_TENANT,
} mode = ACL_MODE_UNKNOWN;
- int err = ROCKER_OK;
if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
@@ -1776,14 +1773,10 @@ static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs)
switch (ntohs(key->eth.type)) {
case 0x0800:
case 0x86dd:
- err = of_dpa_cmd_add_acl_ip(key, mask, flow_tlvs);
+ of_dpa_cmd_add_acl_ip(key, mask, flow_tlvs);
break;
}
- if (err) {
- return err;
- }
-
if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
action->write.group_id =
rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c
index 897c86e..654a087 100644
--- a/hw/net/rtl8139.c
+++ b/hw/net/rtl8139.c
@@ -48,20 +48,18 @@
* 2011-Mar-22 Benjamin Poirier: Implemented VLAN offloading
*/
-/* For crc32 */
-
#include "qemu/osdep.h"
-#include <zlib.h>
+#include <zlib.h> /* for crc32 */
#include "hw/pci/pci_device.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qemu/module.h"
#include "qemu/timer.h"
#include "net/net.h"
#include "net/eth.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qom/object.h"
/* debug RTL8139 card */
@@ -1818,7 +1816,7 @@ static int rtl8139_transmit_one(RTL8139State *s, int descriptor)
PCIDevice *d = PCI_DEVICE(s);
int txsize = s->TxStatus[descriptor] & 0x1fff;
- uint8_t txbuffer[0x2000];
+ QEMU_UNINITIALIZED uint8_t txbuffer[0x2000];
DPRINTF("+++ transmit reading %d bytes from host memory at 0x%08x\n",
txsize, s->TxAddr[descriptor]);
@@ -2738,7 +2736,11 @@ static void rtl8139_io_writeb(void *opaque, uint8_t addr, uint32_t val)
}
break;
-
+ case RxConfig:
+ DPRINTF("RxConfig write(b) val=0x%02x\n", val);
+ rtl8139_RxConfig_write(s,
+ (rtl8139_RxConfig_read(s) & 0xFFFFFF00) | val);
+ break;
default:
DPRINTF("not implemented write(b) addr=0x%x val=0x%02x\n", addr,
val);
@@ -3408,12 +3410,11 @@ static void rtl8139_instance_init(Object *obj)
DEVICE(obj));
}
-static Property rtl8139_properties[] = {
+static const Property rtl8139_properties[] = {
DEFINE_NIC_PROPERTIES(RTL8139State, conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void rtl8139_class_init(ObjectClass *klass, void *data)
+static void rtl8139_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -3425,7 +3426,7 @@ static void rtl8139_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_REALTEK_8139;
k->revision = RTL8139_PCI_REVID; /* >=0x20 is for 8139C+ */
k->class_id = PCI_CLASS_NETWORK_ETHERNET;
- dc->reset = rtl8139_reset;
+ device_class_set_legacy_reset(dc, rtl8139_reset);
dc->vmsd = &vmstate_rtl8139;
device_class_set_props(dc, rtl8139_properties);
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
@@ -3437,7 +3438,7 @@ static const TypeInfo rtl8139_info = {
.instance_size = sizeof(RTL8139State),
.class_init = rtl8139_class_init,
.instance_init = rtl8139_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/net/smc91c111.c b/hw/net/smc91c111.c
index 702d0e8..5cd78e3 100644
--- a/hw/net/smc91c111.c
+++ b/hw/net/smc91c111.c
@@ -13,16 +13,23 @@
#include "net/net.h"
#include "hw/irq.h"
#include "hw/net/smc91c111.h"
+#include "hw/registerfields.h"
#include "hw/qdev-properties.h"
#include "qapi/error.h"
#include "qemu/log.h"
#include "qemu/module.h"
-/* For crc32 */
-#include <zlib.h>
+#include <zlib.h> /* for crc32 */
#include "qom/object.h"
/* Number of 2k memory pages available. */
#define NUM_PACKETS 4
+/*
+ * Maximum size of a data frame, including the leading status word
+ * and byte count fields and the trailing CRC, last data byte
+ * and control byte (per figure 8-1 in the Microchip Technology
+ * LAN91C111 datasheet).
+ */
+#define MAX_PACKET_SIZE 2048
#define TYPE_SMC91C111 "smc91c111"
OBJECT_DECLARE_SIMPLE_TYPE(smc91c111_state, SMC91C111)
@@ -52,7 +59,7 @@ struct smc91c111_state {
int tx_fifo_done_len;
int tx_fifo_done[NUM_PACKETS];
/* Packet buffer memory. */
- uint8_t data[NUM_PACKETS][2048];
+ uint8_t data[NUM_PACKETS][MAX_PACKET_SIZE];
uint8_t int_level;
uint8_t int_mask;
MemoryRegion mmio;
@@ -80,7 +87,8 @@ static const VMStateDescription vmstate_smc91c111 = {
VMSTATE_INT32_ARRAY(rx_fifo, smc91c111_state, NUM_PACKETS),
VMSTATE_INT32(tx_fifo_done_len, smc91c111_state),
VMSTATE_INT32_ARRAY(tx_fifo_done, smc91c111_state, NUM_PACKETS),
- VMSTATE_BUFFER_UNSAFE(data, smc91c111_state, 0, NUM_PACKETS * 2048),
+ VMSTATE_BUFFER_UNSAFE(data, smc91c111_state, 0,
+ NUM_PACKETS * MAX_PACKET_SIZE),
VMSTATE_UINT8(int_level, smc91c111_state),
VMSTATE_UINT8(int_mask, smc91c111_state),
VMSTATE_END_OF_LIST()
@@ -119,6 +127,18 @@ static const VMStateDescription vmstate_smc91c111 = {
#define RS_TOOSHORT 0x0400
#define RS_MULTICAST 0x0001
+FIELD(PTR, PTR, 0, 11)
+FIELD(PTR, NOT_EMPTY, 11, 1)
+FIELD(PTR, RESERVED, 12, 1)
+FIELD(PTR, READ, 13, 1)
+FIELD(PTR, AUTOINCR, 14, 1)
+FIELD(PTR, RCV, 15, 1)
+
+static inline bool packetnum_valid(int packet_num)
+{
+ return packet_num >= 0 && packet_num < NUM_PACKETS;
+}
+
/* Update interrupt status. */
static void smc91c111_update(smc91c111_state *s)
{
@@ -183,6 +203,15 @@ static void smc91c111_pop_rx_fifo(smc91c111_state *s)
{
int i;
+ if (s->rx_fifo_len == 0) {
+ /*
+ * The datasheet doesn't document what the behaviour is if the
+ * guest tries to pop an empty RX FIFO, and there's no obvious
+ * error status register to report it. Just ignore the attempt.
+ */
+ return;
+ }
+
s->rx_fifo_len--;
if (s->rx_fifo_len) {
for (i = 0; i < s->rx_fifo_len; i++)
@@ -210,12 +239,33 @@ static void smc91c111_pop_tx_fifo_done(smc91c111_state *s)
/* Release the memory allocated to a packet. */
static void smc91c111_release_packet(smc91c111_state *s, int packet)
{
+ if (!packetnum_valid(packet)) {
+ /*
+ * Data sheet doesn't document behaviour in this guest error
+ * case, and there is no error status register to report it.
+ * Log and ignore the attempt.
+ */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "smc91c111: attempt to release invalid packet %d\n",
+ packet);
+ return;
+ }
s->allocated &= ~(1 << packet);
if (s->tx_alloc == 0x80)
smc91c111_tx_alloc(s);
smc91c111_flush_queued_packets(s);
}
+static void smc91c111_complete_tx_packet(smc91c111_state *s, int packetnum)
+{
+ if (s->ctr & CTR_AUTO_RELEASE) {
+ /* Race? */
+ smc91c111_release_packet(s, packetnum);
+ } else if (s->tx_fifo_done_len < NUM_PACKETS) {
+ s->tx_fifo_done[s->tx_fifo_done_len++] = packetnum;
+ }
+}
+
/* Flush the TX FIFO. */
static void smc91c111_do_tx(smc91c111_state *s)
{
@@ -231,12 +281,25 @@ static void smc91c111_do_tx(smc91c111_state *s)
return;
for (i = 0; i < s->tx_fifo_len; i++) {
packetnum = s->tx_fifo[i];
+ /* queue_tx checked the packet number was valid */
+ assert(packetnum_valid(packetnum));
p = &s->data[packetnum][0];
/* Set status word. */
*(p++) = 0x01;
*(p++) = 0x40;
len = *(p++);
len |= ((int)*(p++)) << 8;
+ if (len > MAX_PACKET_SIZE) {
+ /*
+ * Datasheet doesn't say what to do here, and there is no
+ * relevant tx error condition listed. Log, and drop the packet.
+ */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "smc91c111: tx packet with bad length %d, dropping\n",
+ len);
+ smc91c111_complete_tx_packet(s, packetnum);
+ continue;
+ }
len -= 6;
control = p[len + 1];
if (control & 0x20)
@@ -265,11 +328,7 @@ static void smc91c111_do_tx(smc91c111_state *s)
}
}
#endif
- if (s->ctr & CTR_AUTO_RELEASE)
- /* Race? */
- smc91c111_release_packet(s, packetnum);
- else if (s->tx_fifo_done_len < NUM_PACKETS)
- s->tx_fifo_done[s->tx_fifo_done_len++] = packetnum;
+ smc91c111_complete_tx_packet(s, packetnum);
qemu_send_packet(qemu_get_queue(s->nic), p, len);
}
s->tx_fifo_len = 0;
@@ -279,6 +338,17 @@ static void smc91c111_do_tx(smc91c111_state *s)
/* Add a packet to the TX FIFO. */
static void smc91c111_queue_tx(smc91c111_state *s, int packet)
{
+ if (!packetnum_valid(packet)) {
+ /*
+ * Datasheet doesn't document behaviour in this error case, and
+ * there's no error status register we could report it in.
+ * Log and ignore.
+ */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "smc91c111: attempt to queue invalid packet %d\n",
+ packet);
+ return;
+ }
if (s->tx_fifo_len == NUM_PACKETS)
return;
s->tx_fifo[s->tx_fifo_len++] = packet;
@@ -310,6 +380,49 @@ static void smc91c111_reset(DeviceState *dev)
#define SET_LOW(name, val) s->name = (s->name & 0xff00) | val
#define SET_HIGH(name, val) s->name = (s->name & 0xff) | (val << 8)
+/*
+ * The pointer register's pointer is an 11 bit value (so it exactly
+ * indexes a 2048-byte data frame). Add the specified offset to it,
+ * wrapping around at the 2048 byte mark, and return the resulting
+ * wrapped value. There are flag bits in the top part of the register,
+ * but we can ignore them here as the mask will mask them out.
+ */
+static int ptr_reg_add(smc91c111_state *s, int offset)
+{
+ return (s->ptr + offset) & R_PTR_PTR_MASK;
+}
+
+/*
+ * For an access to the Data Register at @offset, return the
+ * required offset into the packet's data frame. This will
+ * perform the pointer register autoincrement if required, and
+ * guarantees to return an in-bounds offset.
+ */
+static int data_reg_ptr(smc91c111_state *s, int offset)
+{
+ int p;
+
+ if (s->ptr & R_PTR_AUTOINCR_MASK) {
+ /*
+ * Autoincrement: use the current pointer value, and
+ * increment the pointer register's pointer field.
+ */
+ p = FIELD_EX32(s->ptr, PTR, PTR);
+ s->ptr = FIELD_DP32(s->ptr, PTR, PTR, ptr_reg_add(s, 1));
+ } else {
+ /*
+ * No autoincrement: register offset determines which
+ * byte we're addressing. Setting the pointer to the top
+ * of the data buffer and then using the pointer wrapping
+ * to read the bottom byte of the buffer is not something
+ * sensible guest software will do, but the datasheet
+ * doesn't say what the behaviour is, so we don't forbid it.
+ */
+ p = ptr_reg_add(s, offset & 3);
+ }
+ return p;
+}
+
static void smc91c111_writeb(void *opaque, hwaddr offset,
uint32_t value)
{
@@ -449,12 +562,14 @@ static void smc91c111_writeb(void *opaque, hwaddr offset,
n = s->rx_fifo[0];
else
n = s->packet_num;
- p = s->ptr & 0x07ff;
- if (s->ptr & 0x4000) {
- s->ptr = (s->ptr & 0xf800) | ((s->ptr + 1) & 0x7ff);
- } else {
- p += (offset & 3);
+ if (!packetnum_valid(n)) {
+ /* Datasheet doesn't document what to do here */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "smc91c111: attempt to write data to invalid packet %d\n",
+ n);
+ return;
}
+ p = data_reg_ptr(s, offset);
s->data[n][p] = value;
}
return;
@@ -597,12 +712,14 @@ static uint32_t smc91c111_readb(void *opaque, hwaddr offset)
n = s->rx_fifo[0];
else
n = s->packet_num;
- p = s->ptr & 0x07ff;
- if (s->ptr & 0x4000) {
- s->ptr = (s->ptr & 0xf800) | ((s->ptr + 1) & 0x07ff);
- } else {
- p += (offset & 3);
+ if (!packetnum_valid(n)) {
+ /* Datasheet doesn't document what to do here */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "smc91c111: attempt to read data from invalid packet %d\n",
+ n);
+ return 0;
}
+ p = data_reg_ptr(s, offset);
return s->data[n][p];
}
case 12: /* Interrupt status. */
@@ -698,13 +815,16 @@ static ssize_t smc91c111_receive(NetClientState *nc, const uint8_t *buf, size_t
if (crc)
packetsize += 4;
/* TODO: Flag overrun and receive errors. */
- if (packetsize > 2048)
+ if (packetsize > MAX_PACKET_SIZE) {
return -1;
+ }
packetnum = smc91c111_allocate_packet(s);
if (packetnum == 0x80)
return -1;
s->rx_fifo[s->rx_fifo_len++] = packetnum;
+ /* allocate_packet() will not hand us back an invalid packet number */
+ assert(packetnum_valid(packetnum));
p = &s->data[packetnum][0];
/* ??? Multicast packets? */
status = 0;
@@ -789,17 +909,16 @@ static void smc91c111_realize(DeviceState *dev, Error **errp)
/* ??? Save/restore. */
}
-static Property smc91c111_properties[] = {
+static const Property smc91c111_properties[] = {
DEFINE_NIC_PROPERTIES(smc91c111_state, conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void smc91c111_class_init(ObjectClass *klass, void *data)
+static void smc91c111_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = smc91c111_realize;
- dc->reset = smc91c111_reset;
+ device_class_set_legacy_reset(dc, smc91c111_reset);
dc->vmsd = &vmstate_smc91c111;
device_class_set_props(dc, smc91c111_properties);
}
diff --git a/hw/net/spapr_llan.c b/hw/net/spapr_llan.c
index 8af33d9..f6f217d 100644
--- a/hw/net/spapr_llan.c
+++ b/hw/net/spapr_llan.c
@@ -33,7 +33,7 @@
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include "hw/qdev-properties.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "trace.h"
#include <libfdt.h>
@@ -786,12 +786,11 @@ static target_ulong h_change_logical_lan_mac(PowerPCCPU *cpu,
return H_SUCCESS;
}
-static Property spapr_vlan_properties[] = {
+static const Property spapr_vlan_properties[] = {
DEFINE_SPAPR_PROPERTIES(SpaprVioVlan, sdev),
DEFINE_NIC_PROPERTIES(SpaprVioVlan, nicconf),
DEFINE_PROP_BIT("use-rx-buffer-pools", SpaprVioVlan,
compat_flags, SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT, true),
- DEFINE_PROP_END_OF_LIST(),
};
static bool spapr_vlan_rx_buffer_pools_needed(void *opaque)
@@ -849,7 +848,7 @@ static const VMStateDescription vmstate_spapr_llan = {
}
};
-static void spapr_vlan_class_init(ObjectClass *klass, void *data)
+static void spapr_vlan_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass);
diff --git a/hw/net/stellaris_enet.c b/hw/net/stellaris_enet.c
index db95766..2fc51e1 100644
--- a/hw/net/stellaris_enet.c
+++ b/hw/net/stellaris_enet.c
@@ -15,7 +15,7 @@
#include "net/net.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include <zlib.h>
+#include <zlib.h> /* for crc32 */
#include "qom/object.h"
//#define DEBUG_STELLARIS_ENET 1
@@ -497,17 +497,16 @@ static void stellaris_enet_realize(DeviceState *dev, Error **errp)
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
}
-static Property stellaris_enet_properties[] = {
+static const Property stellaris_enet_properties[] = {
DEFINE_NIC_PROPERTIES(stellaris_enet_state, conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void stellaris_enet_class_init(ObjectClass *klass, void *data)
+static void stellaris_enet_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = stellaris_enet_realize;
- dc->reset = stellaris_enet_reset;
+ device_class_set_legacy_reset(dc, stellaris_enet_reset);
device_class_set_props(dc, stellaris_enet_properties);
dc->vmsd = &vmstate_stellaris_enet;
}
diff --git a/hw/net/sungem.c b/hw/net/sungem.c
index dd1b4a1..b405eb8 100644
--- a/hw/net/sungem.c
+++ b/hw/net/sungem.c
@@ -17,7 +17,7 @@
#include "net/eth.h"
#include "net/checksum.h"
#include "hw/net/mii.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "trace.h"
#include "qom/object.h"
@@ -1420,14 +1420,13 @@ static void sungem_instance_init(Object *obj)
DEVICE(obj));
}
-static Property sungem_properties[] = {
+static const Property sungem_properties[] = {
DEFINE_NIC_PROPERTIES(SunGEMState, conf),
/* Phy address should be 0 for most Apple machines except
* for K2 in which case it's 1. Will be set by a machine
* override.
*/
DEFINE_PROP_UINT32("phy_addr", SunGEMState, phy_addr, 0),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_sungem = {
@@ -1455,7 +1454,7 @@ static const VMStateDescription vmstate_sungem = {
}
};
-static void sungem_class_init(ObjectClass *klass, void *data)
+static void sungem_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -1467,7 +1466,7 @@ static void sungem_class_init(ObjectClass *klass, void *data)
k->revision = 0x01;
k->class_id = PCI_CLASS_NETWORK_ETHERNET;
dc->vmsd = &vmstate_sungem;
- dc->reset = sungem_reset;
+ device_class_set_legacy_reset(dc, sungem_reset);
device_class_set_props(dc, sungem_properties);
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
}
@@ -1478,7 +1477,7 @@ static const TypeInfo sungem_info = {
.instance_size = sizeof(SunGEMState),
.class_init = sungem_class_init,
.instance_init = sungem_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ }
}
diff --git a/hw/net/sunhme.c b/hw/net/sunhme.c
index ae8452e..c2f7a84 100644
--- a/hw/net/sunhme.c
+++ b/hw/net/sunhme.c
@@ -31,7 +31,7 @@
#include "qemu/module.h"
#include "net/checksum.h"
#include "net/eth.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "trace.h"
#include "qom/object.h"
@@ -177,9 +177,8 @@ struct SunHMEState {
uint16_t miiregs[HME_MII_REGS_SIZE];
};
-static Property sunhme_properties[] = {
+static const Property sunhme_properties[] = {
DEFINE_NIC_PROPERTIES(SunHMEState, conf),
- DEFINE_PROP_END_OF_LIST(),
};
static void sunhme_reset_tx(SunHMEState *s)
@@ -938,7 +937,7 @@ static const VMStateDescription vmstate_hme = {
}
};
-static void sunhme_class_init(ObjectClass *klass, void *data)
+static void sunhme_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -948,7 +947,7 @@ static void sunhme_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_SUN_HME;
k->class_id = PCI_CLASS_NETWORK_ETHERNET;
dc->vmsd = &vmstate_hme;
- dc->reset = sunhme_reset;
+ device_class_set_legacy_reset(dc, sunhme_reset);
device_class_set_props(dc, sunhme_properties);
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
}
@@ -959,7 +958,7 @@ static const TypeInfo sunhme_info = {
.class_init = sunhme_class_init,
.instance_size = sizeof(SunHMEState),
.instance_init = sunhme_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ }
}
diff --git a/hw/net/trace-events b/hw/net/trace-events
index 78efa2e..72b69c4 100644
--- a/hw/net/trace-events
+++ b/hw/net/trace-events
@@ -10,10 +10,11 @@ allwinner_sun8i_emac_set_link(bool active) "Set link: active=%u"
allwinner_sun8i_emac_read(uint64_t offset, uint64_t val) "MMIO read: offset=0x%" PRIx64 " value=0x%" PRIx64
allwinner_sun8i_emac_write(uint64_t offset, uint64_t val) "MMIO write: offset=0x%" PRIx64 " value=0x%" PRIx64
-# etraxfs_eth.c
-mdio_phy_read(int regnum, uint16_t value) "read phy_reg:%d value:0x%04x"
-mdio_phy_write(int regnum, uint16_t value) "write phy_reg:%d value:0x%04x"
-mdio_bitbang(bool mdc, bool mdio, int state, uint16_t cnt, unsigned int drive) "bitbang mdc=%u mdio=%u state=%d cnt=%u drv=%d"
+# lan9118_phy.c
+lan9118_phy_read(uint16_t val, int reg) "[0x%02x] -> 0x%04" PRIx16
+lan9118_phy_write(uint16_t val, int reg) "[0x%02x] <- 0x%04" PRIx16
+lan9118_phy_update_link(const char *s) "%s"
+lan9118_phy_reset(void) ""
# lance.c
lance_mem_readw(uint64_t addr, uint32_t ret) "addr=0x%"PRIx64"val=0x%04x"
@@ -399,9 +400,11 @@ virtio_net_announce_notify(void) ""
virtio_net_announce_timer(int round) "%d"
virtio_net_handle_announce(int round) "%d"
virtio_net_post_load_device(void)
-virtio_net_rss_disable(void)
-virtio_net_rss_error(const char *msg, uint32_t value) "%s, value 0x%08x"
-virtio_net_rss_enable(uint32_t p1, uint16_t p2, uint8_t p3) "hashes 0x%x, table of %d, key of %d"
+virtio_net_rss_load(void *nic, size_t nfds, void *fds) "nic=%p nfds=%zu fds=%p"
+virtio_net_rss_attach_ebpf(void *nic, int prog_fd) "nic=%p prog-fd=%d"
+virtio_net_rss_disable(void *nic) "nic=%p"
+virtio_net_rss_error(void *nic, const char *msg, uint32_t value) "nic=%p msg=%s, value 0x%08x"
+virtio_net_rss_enable(void *nic, uint32_t p1, uint16_t p2, uint8_t p3) "nic=%p hashes 0x%x, table of %d, key of %d"
# tulip.c
tulip_reg_write(uint64_t addr, const char *name, int size, uint64_t val) "addr 0x%02"PRIx64" (%s) size %d value 0x%08"PRIx64
@@ -431,12 +434,8 @@ i82596_set_multicast(uint16_t count) "Added %d multicast entries"
i82596_channel_attention(void *s) "%p: Received CHANNEL ATTENTION"
# imx_fec.c
-imx_phy_read(uint32_t val, int phy, int reg) "0x%04"PRIx32" <= phy[%d].reg[%d]"
imx_phy_read_num(int phy, int configured) "read request from unconfigured phy %d (configured %d)"
-imx_phy_write(uint32_t val, int phy, int reg) "0x%04"PRIx32" => phy[%d].reg[%d]"
imx_phy_write_num(int phy, int configured) "write request to unconfigured phy %d (configured %d)"
-imx_phy_update_link(const char *s) "%s"
-imx_phy_reset(void) ""
imx_fec_read_bd(uint64_t addr, int flags, int len, int data) "tx_bd 0x%"PRIx64" flags 0x%04x len %d data 0x%08x"
imx_enet_read_bd(uint64_t addr, int flags, int len, int data, int options, int status) "tx_bd 0x%"PRIx64" flags 0x%04x len %d data 0x%08x option 0x%04x status 0x%04x"
imx_eth_tx_bd_busy(void) "tx_bd ran out of descriptors to transmit"
@@ -481,10 +480,11 @@ npcm_gmac_packet_received(const char* name, uint32_t len) "%s: Reception finishe
npcm_gmac_packet_sent(const char* name, uint16_t len) "%s: TX packet sent!, length: 0x%04" PRIX16
npcm_gmac_debug_desc_data(const char* name, void* addr, uint32_t des0, uint32_t des1, uint32_t des2, uint32_t des3)"%s: Address: %p Descriptor 0: 0x%04" PRIX32 " Descriptor 1: 0x%04" PRIX32 "Descriptor 2: 0x%04" PRIX32 " Descriptor 3: 0x%04" PRIX32
npcm_gmac_packet_tx_desc_data(const char* name, uint32_t tdes0, uint32_t tdes1) "%s: Tdes0: 0x%04" PRIX32 " Tdes1: 0x%04" PRIX32
+npcm_gmac_tx_desc_owner(const char* name, uint32_t desc_addr) "%s: TX Descriptor @0x%04" PRIX32 " is owned by software"
# npcm_pcs.c
-npcm_pcs_reg_read(const char *name, uint16_t indirect_access_baes, uint64_t offset, uint16_t value) "%s: IND: 0x%02" PRIx16 " offset: 0x%04" PRIx64 " value: 0x%04" PRIx16
-npcm_pcs_reg_write(const char *name, uint16_t indirect_access_baes, uint64_t offset, uint16_t value) "%s: IND: 0x%02" PRIx16 " offset: 0x%04" PRIx64 " value: 0x%04" PRIx16
+npcm_pcs_reg_read(const char *name, uint16_t indirect_access_base, uint64_t offset, uint16_t value) "%s: IND: 0x%02" PRIx16 " offset: 0x%04" PRIx64 " value: 0x%04" PRIx16
+npcm_pcs_reg_write(const char *name, uint16_t indirect_access_base, uint64_t offset, uint16_t value) "%s: IND: 0x%02" PRIx16 " offset: 0x%04" PRIx64 " value: 0x%04" PRIx16
# dp8398x.c
dp8393x_raise_irq(int isr) "raise irq, isr is 0x%04x"
@@ -513,3 +513,7 @@ xen_netdev_connect(int dev, unsigned int tx, unsigned int rx, int port) "vif%u t
xen_netdev_frontend_changed(const char *dev, int state) "vif%s state %d"
xen_netdev_tx(int dev, int ref, int off, int len, unsigned int flags, const char *c, const char *d, const char *m, const char *e) "vif%u ref %u off %u len %u flags 0x%x%s%s%s%s"
xen_netdev_rx(int dev, int idx, int status, int flags) "vif%u idx %d status %d flags 0x%x"
+
+# xilinx_ethlite.c
+ethlite_pkt_lost(uint32_t rx_ctrl) "rx_ctrl:0x%" PRIx32
+ethlite_pkt_size_too_big(uint64_t size) "size:0x%" PRIx64
diff --git a/hw/net/tulip.c b/hw/net/tulip.c
index 1f2ef20..319af90 100644
--- a/hw/net/tulip.c
+++ b/hw/net/tulip.c
@@ -13,7 +13,7 @@
#include "hw/qdev-properties.h"
#include "hw/nvram/eeprom93xx.h"
#include "migration/vmstate.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "tulip.h"
#include "trace.h"
#include "net/eth.h"
@@ -629,7 +629,7 @@ static void tulip_setup_filter_addr(TULIPState *s, uint8_t *buf, int n)
static void tulip_setup_frame(TULIPState *s,
struct tulip_descriptor *desc)
{
- uint8_t buf[4096];
+ QEMU_UNINITIALIZED uint8_t buf[4096];
int len = (desc->control >> TDES1_BUF1_SIZE_SHIFT) & TDES1_BUF1_SIZE_MASK;
int i;
@@ -1007,12 +1007,11 @@ static void tulip_instance_init(Object *obj)
&pci_dev->qdev);
}
-static Property tulip_properties[] = {
+static const Property tulip_properties[] = {
DEFINE_NIC_PROPERTIES(TULIPState, c),
- DEFINE_PROP_END_OF_LIST(),
};
-static void tulip_class_init(ObjectClass *klass, void *data)
+static void tulip_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -1026,7 +1025,7 @@ static void tulip_class_init(ObjectClass *klass, void *data)
k->class_id = PCI_CLASS_NETWORK_ETHERNET;
dc->vmsd = &vmstate_pci_tulip;
device_class_set_props(dc, tulip_properties);
- dc->reset = tulip_qdev_reset;
+ device_class_set_legacy_reset(dc, tulip_qdev_reset);
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
}
@@ -1036,7 +1035,7 @@ static const TypeInfo tulip_info = {
.instance_size = sizeof(TULIPState),
.class_init = tulip_class_init,
.instance_init = tulip_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 18898af..891f235 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -48,7 +48,9 @@ static const int kernel_feature_bits[] = {
VIRTIO_F_IOMMU_PLATFORM,
VIRTIO_F_RING_PACKED,
VIRTIO_F_RING_RESET,
+ VIRTIO_F_IN_ORDER,
VIRTIO_F_NOTIFICATION_DATA,
+ VIRTIO_NET_F_RSC_EXT,
VIRTIO_NET_F_HASH_REPORT,
VHOST_INVALID_FEATURE_BIT
};
@@ -78,7 +80,9 @@ static const int user_feature_bits[] = {
VIRTIO_F_IOMMU_PLATFORM,
VIRTIO_F_RING_PACKED,
VIRTIO_F_RING_RESET,
+ VIRTIO_F_IN_ORDER,
VIRTIO_NET_F_RSS,
+ VIRTIO_NET_F_RSC_EXT,
VIRTIO_NET_F_HASH_REPORT,
VIRTIO_NET_F_GUEST_USO4,
VIRTIO_NET_F_GUEST_USO6,
@@ -158,6 +162,148 @@ void vhost_net_save_acked_features(NetClientState *nc)
#endif
}
+static void vhost_net_disable_notifiers_nvhosts(VirtIODevice *dev,
+ NetClientState *ncs, int data_queue_pairs, int nvhosts)
+{
+ VirtIONet *n = VIRTIO_NET(dev);
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
+ struct vhost_net *net;
+ struct vhost_dev *hdev;
+ int r, i, j;
+ NetClientState *peer;
+
+ /*
+ * Batch all the host notifiers in a single transaction to avoid
+ * quadratic time complexity in address_space_update_ioeventfds().
+ */
+ memory_region_transaction_begin();
+
+ for (i = 0; i < nvhosts; i++) {
+ if (i < data_queue_pairs) {
+ peer = qemu_get_peer(ncs, i);
+ } else {
+ peer = qemu_get_peer(ncs, n->max_queue_pairs);
+ }
+
+ net = get_vhost_net(peer);
+ hdev = &net->dev;
+ for (j = 0; j < hdev->nvqs; j++) {
+ r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus),
+ hdev->vq_index + j,
+ false);
+ if (r < 0) {
+ error_report("vhost %d VQ %d notifier cleanup failed: %d",
+ i, j, -r);
+ }
+ assert(r >= 0);
+ }
+ }
+ /*
+ * The transaction expects the ioeventfds to be open when it
+ * commits. Do it now, before the cleanup loop.
+ */
+ memory_region_transaction_commit();
+
+ for (i = 0; i < nvhosts; i++) {
+ if (i < data_queue_pairs) {
+ peer = qemu_get_peer(ncs, i);
+ } else {
+ peer = qemu_get_peer(ncs, n->max_queue_pairs);
+ }
+
+ net = get_vhost_net(peer);
+ hdev = &net->dev;
+ for (j = 0; j < hdev->nvqs; j++) {
+ virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus),
+ hdev->vq_index + j);
+ }
+ virtio_device_release_ioeventfd(dev);
+ }
+}
+
+static int vhost_net_enable_notifiers(VirtIODevice *dev,
+ NetClientState *ncs, int data_queue_pairs, int cvq)
+{
+ VirtIONet *n = VIRTIO_NET(dev);
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
+ int nvhosts = data_queue_pairs + cvq;
+ struct vhost_net *net;
+ struct vhost_dev *hdev;
+ int r, i, j, k;
+ NetClientState *peer;
+
+ /*
+ * We will pass the notifiers to the kernel, make sure that QEMU
+ * doesn't interfere.
+ */
+ for (i = 0; i < nvhosts; i++) {
+ r = virtio_device_grab_ioeventfd(dev);
+ if (r < 0) {
+ error_report("vhost %d binding does not support host notifiers", i);
+ for (k = 0; k < i; k++) {
+ virtio_device_release_ioeventfd(dev);
+ }
+ return r;
+ }
+ }
+
+ /*
+ * Batch all the host notifiers in a single transaction to avoid
+ * quadratic time complexity in address_space_update_ioeventfds().
+ */
+ memory_region_transaction_begin();
+
+ for (i = 0; i < nvhosts; i++) {
+ if (i < data_queue_pairs) {
+ peer = qemu_get_peer(ncs, i);
+ } else {
+ peer = qemu_get_peer(ncs, n->max_queue_pairs);
+ }
+
+ net = get_vhost_net(peer);
+ hdev = &net->dev;
+
+ for (j = 0; j < hdev->nvqs; j++) {
+ r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus),
+ hdev->vq_index + j,
+ true);
+ if (r < 0) {
+ error_report("vhost %d VQ %d notifier binding failed: %d",
+ i, j, -r);
+ memory_region_transaction_commit();
+ vhost_dev_disable_notifiers_nvqs(hdev, dev, j);
+ goto fail_nvhosts;
+ }
+ }
+ }
+
+ memory_region_transaction_commit();
+
+ return 0;
+fail_nvhosts:
+ vhost_net_disable_notifiers_nvhosts(dev, ncs, data_queue_pairs, i);
+ /*
+ * This for loop starts from i+1, not i, because the i-th ioeventfd
+ * has already been released in vhost_dev_disable_notifiers_nvqs().
+ */
+ for (k = i + 1; k < nvhosts; k++) {
+ virtio_device_release_ioeventfd(dev);
+ }
+
+ return r;
+}
+
+/*
+ * Stop processing guest IO notifications in qemu.
+ * Start processing them in vhost in kernel.
+ */
+static void vhost_net_disable_notifiers(VirtIODevice *dev,
+ NetClientState *ncs, int data_queue_pairs, int cvq)
+{
+ vhost_net_disable_notifiers_nvhosts(dev, ncs, data_queue_pairs,
+ data_queue_pairs + cvq);
+}
+
static int vhost_net_get_fd(NetClientState *backend)
{
switch (backend->info->type) {
@@ -268,11 +414,6 @@ static int vhost_net_start_one(struct vhost_net *net,
}
}
- r = vhost_dev_enable_notifiers(&net->dev, dev);
- if (r < 0) {
- goto fail_notifiers;
- }
-
r = vhost_dev_start(&net->dev, dev, false);
if (r < 0) {
goto fail_start;
@@ -324,8 +465,6 @@ fail:
}
vhost_dev_stop(&net->dev, dev, false);
fail_start:
- vhost_dev_disable_notifiers(&net->dev, dev);
-fail_notifiers:
return r;
}
@@ -347,7 +486,6 @@ static void vhost_net_stop_one(struct vhost_net *net,
if (net->nc->info->stop) {
net->nc->info->stop(net->nc);
}
- vhost_dev_disable_notifiers(&net->dev, dev);
}
int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
@@ -392,10 +530,16 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
}
}
+ r = vhost_net_enable_notifiers(dev, ncs, data_queue_pairs, cvq);
+ if (r < 0) {
+ error_report("Error enabling host notifiers: %d", -r);
+ goto err;
+ }
+
r = k->set_guest_notifiers(qbus->parent, total_notifiers, true);
if (r < 0) {
error_report("Error binding guest notifier: %d", -r);
- goto err;
+ goto err_host_notifiers;
}
for (i = 0; i < nvhosts; i++) {
@@ -410,19 +554,19 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
r = vhost_set_vring_enable(peer, peer->vring_enable);
if (r < 0) {
- goto err_start;
+ goto err_guest_notifiers;
}
}
r = vhost_net_start_one(get_vhost_net(peer), dev);
if (r < 0) {
- goto err_start;
+ goto err_guest_notifiers;
}
}
return 0;
-err_start:
+err_guest_notifiers:
while (--i >= 0) {
peer = qemu_get_peer(ncs, i < data_queue_pairs ?
i : n->max_queue_pairs);
@@ -433,6 +577,8 @@ err_start:
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
fflush(stderr);
}
+err_host_notifiers:
+ vhost_net_disable_notifiers(dev, ncs, data_queue_pairs, cvq);
err:
return r;
}
@@ -464,6 +610,8 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
fflush(stderr);
}
assert(r >= 0);
+
+ vhost_net_disable_notifiers(dev, ncs, data_queue_pairs, cvq);
}
void vhost_net_cleanup(struct vhost_net *net)
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 8f30972..eb93607 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -26,7 +26,7 @@
#include "qemu/option.h"
#include "qemu/option_int.h"
#include "qemu/config-file.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "hw/virtio/virtio-net.h"
#include "net/vhost_net.h"
#include "net/announce.h"
@@ -39,14 +39,15 @@
#include "hw/virtio/virtio-access.h"
#include "migration/misc.h"
#include "standard-headers/linux/ethtool.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
+#include "system/replay.h"
#include "trace.h"
#include "monitor/qdev.h"
#include "monitor/monitor.h"
#include "hw/pci/pci_device.h"
#include "net_rx_pkt.h"
#include "hw/virtio/vhost.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#define VIRTIO_NET_VM_VERSION 11
@@ -381,7 +382,7 @@ static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
}
}
-static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
+static int virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
{
VirtIONet *n = VIRTIO_NET(vdev);
VirtIONetQueue *q;
@@ -417,7 +418,7 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
timer_mod(q->tx_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
} else {
- qemu_bh_schedule(q->tx_bh);
+ replay_bh_schedule_event(q->tx_bh);
}
} else {
if (q->tx_timer) {
@@ -436,6 +437,7 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
}
}
}
+ return 0;
}
static void virtio_net_set_link_status(NetClientState *nc)
@@ -1240,6 +1242,7 @@ static bool virtio_net_attach_ebpf_to_backend(NICState *nic, int prog_fd)
return false;
}
+ trace_virtio_net_rss_attach_ebpf(nic, prog_fd);
return nc->info->set_steering_ebpf(nc, prog_fd);
}
@@ -1253,7 +1256,7 @@ static void rss_data_to_rss_config(struct VirtioNetRssData *data,
config->default_queue = data->default_queue;
}
-static bool virtio_net_attach_epbf_rss(VirtIONet *n)
+static bool virtio_net_attach_ebpf_rss(VirtIONet *n)
{
struct EBPFRSSConfig config = {};
@@ -1264,7 +1267,8 @@ static bool virtio_net_attach_epbf_rss(VirtIONet *n)
rss_data_to_rss_config(&n->rss_data, &config);
if (!ebpf_rss_set_all(&n->ebpf_rss, &config,
- n->rss_data.indirections_table, n->rss_data.key)) {
+ n->rss_data.indirections_table, n->rss_data.key,
+ NULL)) {
return false;
}
@@ -1275,7 +1279,7 @@ static bool virtio_net_attach_epbf_rss(VirtIONet *n)
return true;
}
-static void virtio_net_detach_epbf_rss(VirtIONet *n)
+static void virtio_net_detach_ebpf_rss(VirtIONet *n)
{
virtio_net_attach_ebpf_to_backend(n->nic, -1);
}
@@ -1285,8 +1289,8 @@ static void virtio_net_commit_rss_config(VirtIONet *n)
if (n->rss_data.enabled) {
n->rss_data.enabled_software_rss = n->rss_data.populate_hash;
if (n->rss_data.populate_hash) {
- virtio_net_detach_epbf_rss(n);
- } else if (!virtio_net_attach_epbf_rss(n)) {
+ virtio_net_detach_ebpf_rss(n);
+ } else if (!virtio_net_attach_ebpf_rss(n)) {
if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
warn_report("Can't load eBPF RSS for vhost");
} else {
@@ -1295,12 +1299,13 @@ static void virtio_net_commit_rss_config(VirtIONet *n)
}
}
- trace_virtio_net_rss_enable(n->rss_data.hash_types,
+ trace_virtio_net_rss_enable(n,
+ n->rss_data.hash_types,
n->rss_data.indirections_len,
sizeof(n->rss_data.key));
} else {
- virtio_net_detach_epbf_rss(n);
- trace_virtio_net_rss_disable();
+ virtio_net_detach_ebpf_rss(n);
+ trace_virtio_net_rss_disable(n);
}
}
@@ -1314,28 +1319,27 @@ static void virtio_net_disable_rss(VirtIONet *n)
virtio_net_commit_rss_config(n);
}
-static bool virtio_net_load_ebpf_fds(VirtIONet *n)
+static bool virtio_net_load_ebpf_fds(VirtIONet *n, Error **errp)
{
int fds[EBPF_RSS_MAX_FDS] = { [0 ... EBPF_RSS_MAX_FDS - 1] = -1};
int ret = true;
int i = 0;
if (n->nr_ebpf_rss_fds != EBPF_RSS_MAX_FDS) {
- warn_report("Expected %d file descriptors but got %d",
- EBPF_RSS_MAX_FDS, n->nr_ebpf_rss_fds);
- return false;
- }
+ error_setg(errp, "Expected %d file descriptors but got %d",
+ EBPF_RSS_MAX_FDS, n->nr_ebpf_rss_fds);
+ return false;
+ }
for (i = 0; i < n->nr_ebpf_rss_fds; i++) {
- fds[i] = monitor_fd_param(monitor_cur(), n->ebpf_rss_fds[i],
- &error_warn);
+ fds[i] = monitor_fd_param(monitor_cur(), n->ebpf_rss_fds[i], errp);
if (fds[i] < 0) {
ret = false;
goto exit;
}
}
- ret = ebpf_rss_load_fds(&n->ebpf_rss, fds[0], fds[1], fds[2], fds[3]);
+ ret = ebpf_rss_load_fds(&n->ebpf_rss, fds[0], fds[1], fds[2], fds[3], errp);
exit:
if (!ret) {
@@ -1347,17 +1351,27 @@ exit:
return ret;
}
-static bool virtio_net_load_ebpf(VirtIONet *n)
+static bool virtio_net_load_ebpf(VirtIONet *n, Error **errp)
{
- bool ret = false;
+ if (!virtio_net_attach_ebpf_to_backend(n->nic, -1)) {
+ return true;
+ }
- if (virtio_net_attach_ebpf_to_backend(n->nic, -1)) {
- if (!(n->ebpf_rss_fds && virtio_net_load_ebpf_fds(n))) {
- ret = ebpf_rss_load(&n->ebpf_rss);
- }
+ trace_virtio_net_rss_load(n, n->nr_ebpf_rss_fds, n->ebpf_rss_fds);
+
+ /*
+ * If user explicitly gave QEMU RSS FDs to use, then
+ * failing to use them must be considered a fatal
+ * error. If no RSS FDs were provided, QEMU is trying
+ * eBPF on a "best effort" basis only, so report a
+ * warning and allow fallback to software RSS.
+ */
+ if (n->ebpf_rss_fds) {
+ return virtio_net_load_ebpf_fds(n, errp);
}
- return ret;
+ ebpf_rss_load(&n->ebpf_rss, &error_warn);
+ return true;
}
static void virtio_net_unload_ebpf(VirtIONet *n)
@@ -1400,17 +1414,17 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
n->rss_data.hash_types = virtio_ldl_p(vdev, &cfg.hash_types);
n->rss_data.indirections_len =
virtio_lduw_p(vdev, &cfg.indirection_table_mask);
- n->rss_data.indirections_len++;
if (!do_rss) {
- n->rss_data.indirections_len = 1;
+ n->rss_data.indirections_len = 0;
}
- if (!is_power_of_2(n->rss_data.indirections_len)) {
- err_msg = "Invalid size of indirection table";
+ if (n->rss_data.indirections_len >= VIRTIO_NET_RSS_MAX_TABLE_LEN) {
+ err_msg = "Too large indirection table";
err_value = n->rss_data.indirections_len;
goto error;
}
- if (n->rss_data.indirections_len > VIRTIO_NET_RSS_MAX_TABLE_LEN) {
- err_msg = "Too large indirection table";
+ n->rss_data.indirections_len++;
+ if (!is_power_of_2(n->rss_data.indirections_len)) {
+ err_msg = "Invalid size of indirection table";
err_value = n->rss_data.indirections_len;
goto error;
}
@@ -1481,7 +1495,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
virtio_net_commit_rss_config(n);
return queue_pairs;
error:
- trace_virtio_net_rss_error(err_msg, err_value);
+ trace_virtio_net_rss_error(n, err_msg, err_value);
virtio_net_disable_rss(n);
return 0;
}
@@ -1641,24 +1655,28 @@ static bool virtio_net_can_receive(NetClientState *nc)
static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
{
+ int opaque;
+ unsigned int in_bytes;
VirtIONet *n = q->n;
- if (virtio_queue_empty(q->rx_vq) ||
- (n->mergeable_rx_bufs &&
- !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
- virtio_queue_set_notification(q->rx_vq, 1);
-
- /* To avoid a race condition where the guest has made some buffers
- * available after the above check but before notification was
- * enabled, check for available buffers again.
- */
- if (virtio_queue_empty(q->rx_vq) ||
- (n->mergeable_rx_bufs &&
- !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
+
+ while (virtio_queue_empty(q->rx_vq) || n->mergeable_rx_bufs) {
+ opaque = virtqueue_get_avail_bytes(q->rx_vq, &in_bytes, NULL,
+ bufsize, 0);
+ /* Buffer is enough, disable notifiaction */
+ if (bufsize <= in_bytes) {
+ break;
+ }
+
+ if (virtio_queue_enable_notification_and_check(q->rx_vq, opaque)) {
+ /* Guest has added some buffers, try again */
+ continue;
+ } else {
return 0;
}
}
virtio_queue_set_notification(q->rx_vq, 0);
+
return 1;
}
@@ -1687,8 +1705,11 @@ static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
uint8_t *buf, size_t size)
{
+ size_t csum_size = ETH_HLEN + sizeof(struct ip_header) +
+ sizeof(struct udp_header);
+
if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
- (size > 27 && size < 1500) && /* normal sized MTU */
+ (size >= csum_size && size < 1500) && /* normal sized MTU */
(buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
(buf[23] == 17) && /* ip.protocol == UDP */
(buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
@@ -1885,31 +1906,34 @@ static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
}
static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
- size_t size, bool no_rss)
+ size_t size)
{
VirtIONet *n = qemu_get_nic_opaque(nc);
- VirtIONetQueue *q = virtio_net_get_subqueue(nc);
+ VirtIONetQueue *q;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
- size_t lens[VIRTQUEUE_MAX_SIZE];
- struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
+ QEMU_UNINITIALIZED VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
+ QEMU_UNINITIALIZED size_t lens[VIRTQUEUE_MAX_SIZE];
+ QEMU_UNINITIALIZED struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
struct virtio_net_hdr_v1_hash extra_hdr;
unsigned mhdr_cnt = 0;
size_t offset, i, guest_offset, j;
ssize_t err;
- if (!virtio_net_can_receive(nc)) {
- return -1;
- }
+ memset(&extra_hdr, 0, sizeof(extra_hdr));
- if (!no_rss && n->rss_data.enabled && n->rss_data.enabled_software_rss) {
+ if (n->rss_data.enabled && n->rss_data.enabled_software_rss) {
int index = virtio_net_process_rss(nc, buf, size, &extra_hdr);
if (index >= 0) {
- NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
- return virtio_net_receive_rcu(nc2, buf, size, true);
+ nc = qemu_get_subqueue(n->nic, index % n->curr_queue_pairs);
}
}
+ if (!virtio_net_can_receive(nc)) {
+ return -1;
+ }
+
+ q = virtio_net_get_subqueue(nc);
+
/* hdr_len refers to the header we supply to the guest */
if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
return 0;
@@ -1965,6 +1989,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
sg, elem->in_num,
offsetof(typeof(extra_hdr), hdr.num_buffers),
sizeof(extra_hdr.hdr.num_buffers));
+ } else {
+ extra_hdr.hdr.num_buffers = cpu_to_le16(1);
}
receive_header(n, sg, elem->in_num, buf, size);
@@ -2035,7 +2061,22 @@ static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
{
RCU_READ_LOCK_GUARD();
- return virtio_net_receive_rcu(nc, buf, size, false);
+ return virtio_net_receive_rcu(nc, buf, size);
+}
+
+/*
+ * Accessors to read and write the IP packet data length field. This
+ * is a potentially unaligned network-byte-order 16 bit unsigned integer
+ * pointed to by unit->ip_len.
+ */
+static uint16_t read_unit_ip_len(VirtioNetRscUnit *unit)
+{
+ return lduw_be_p(unit->ip_plen);
+}
+
+static void write_unit_ip_len(VirtioNetRscUnit *unit, uint16_t l)
+{
+ stw_be_p(unit->ip_plen, l);
}
static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
@@ -2052,7 +2093,7 @@ static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
unit->ip_plen = &ip->ip_len;
unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen);
unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
- unit->payload = htons(*unit->ip_plen) - ip_hdrlen - unit->tcp_hdrlen;
+ unit->payload = read_unit_ip_len(unit) - ip_hdrlen - unit->tcp_hdrlen;
}
static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain,
@@ -2071,7 +2112,7 @@ static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain,
/* There is a difference between payload length in ipv4 and v6,
ip header is excluded in ipv6 */
- unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen;
+ unit->payload = read_unit_ip_len(unit) - unit->tcp_hdrlen;
}
static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain,
@@ -2118,7 +2159,7 @@ static void virtio_net_rsc_purge(void *opq)
chain->stat.timer++;
if (!QTAILQ_EMPTY(&chain->buffers)) {
timer_mod(chain->drain_timer,
- qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + chain->n->rsc_timeout);
}
}
@@ -2220,7 +2261,7 @@ static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain,
VirtioNetRscUnit *o_unit;
o_unit = &seg->unit;
- o_ip_len = htons(*o_unit->ip_plen);
+ o_ip_len = read_unit_ip_len(o_unit);
nseq = htonl(n_unit->tcp->th_seq);
oseq = htonl(o_unit->tcp->th_seq);
@@ -2256,7 +2297,7 @@ coalesce:
o_unit->payload += n_unit->payload; /* update new data len */
/* update field in ip header */
- *o_unit->ip_plen = htons(o_ip_len + n_unit->payload);
+ write_unit_ip_len(o_unit, o_ip_len + n_unit->payload);
/* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced
for windows guest, while this may change the behavior for linux
@@ -2354,7 +2395,7 @@ static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain,
chain->stat.empty_cache++;
virtio_net_rsc_cache_buf(chain, nc, buf, size);
timer_mod(chain->drain_timer,
- qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + chain->n->rsc_timeout);
return size;
}
@@ -2592,7 +2633,7 @@ static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n,
chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD;
chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
}
- chain->drain_timer = timer_new_ns(QEMU_CLOCK_HOST,
+ chain->drain_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
virtio_net_rsc_purge, chain);
memset(&chain->stat, 0, sizeof(chain->stat));
@@ -2667,7 +2708,7 @@ static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
*/
virtio_queue_set_notification(q->tx_vq, 0);
if (q->tx_bh) {
- qemu_bh_schedule(q->tx_bh);
+ replay_bh_schedule_event(q->tx_bh);
} else {
timer_mod(q->tx_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
@@ -2833,7 +2874,7 @@ static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
return;
}
virtio_queue_set_notification(vq, 0);
- qemu_bh_schedule(q->tx_bh);
+ replay_bh_schedule_event(q->tx_bh);
}
static void virtio_net_tx_timer(void *opaque)
@@ -2916,7 +2957,7 @@ static void virtio_net_tx_bh(void *opaque)
/* If we flush a full burst of packets, assume there are
* more coming and immediately reschedule */
if (ret >= n->tx_burst) {
- qemu_bh_schedule(q->tx_bh);
+ replay_bh_schedule_event(q->tx_bh);
q->tx_waiting = 1;
return;
}
@@ -2930,7 +2971,7 @@ static void virtio_net_tx_bh(void *opaque)
return;
} else if (ret > 0) {
virtio_queue_set_notification(q->tx_vq, 0);
- qemu_bh_schedule(q->tx_bh);
+ replay_bh_schedule_event(q->tx_bh);
q->tx_waiting = 1;
}
}
@@ -3026,6 +3067,15 @@ static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
virtio_net_set_queue_pairs(n);
}
+static int virtio_net_pre_load_queues(VirtIODevice *vdev)
+{
+ virtio_net_set_multiqueue(VIRTIO_NET(vdev),
+ virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_RSS) ||
+ virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MQ));
+
+ return 0;
+}
+
static int virtio_net_post_load_device(void *opaque, int version_id)
{
VirtIONet *n = opaque;
@@ -3290,6 +3340,117 @@ static const VMStateDescription vmstate_virtio_net_rss = {
},
};
+static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev)
+{
+ VirtIONet *n = VIRTIO_NET(vdev);
+ NetClientState *nc;
+ struct vhost_net *net;
+
+ if (!n->nic) {
+ return NULL;
+ }
+
+ nc = qemu_get_queue(n->nic);
+ if (!nc) {
+ return NULL;
+ }
+
+ net = get_vhost_net(nc->peer);
+ if (!net) {
+ return NULL;
+ }
+
+ return &net->dev;
+}
+
+static int vhost_user_net_save_state(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field,
+ JSONWriter *vmdesc)
+{
+ VirtIONet *n = pv;
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
+ struct vhost_dev *vhdev;
+ Error *local_error = NULL;
+ int ret;
+
+ vhdev = virtio_net_get_vhost(vdev);
+ if (vhdev == NULL) {
+ error_reportf_err(local_error,
+ "Error getting vhost back-end of %s device %s: ",
+ vdev->name, vdev->parent_obj.canonical_path);
+ return -1;
+ }
+
+ ret = vhost_save_backend_state(vhdev, f, &local_error);
+ if (ret < 0) {
+ error_reportf_err(local_error,
+ "Error saving back-end state of %s device %s: ",
+ vdev->name, vdev->parent_obj.canonical_path);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vhost_user_net_load_state(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ VirtIONet *n = pv;
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
+ struct vhost_dev *vhdev;
+ Error *local_error = NULL;
+ int ret;
+
+ vhdev = virtio_net_get_vhost(vdev);
+ if (vhdev == NULL) {
+ error_reportf_err(local_error,
+ "Error getting vhost back-end of %s device %s: ",
+ vdev->name, vdev->parent_obj.canonical_path);
+ return -1;
+ }
+
+ ret = vhost_load_backend_state(vhdev, f, &local_error);
+ if (ret < 0) {
+ error_reportf_err(local_error,
+ "Error loading back-end state of %s device %s: ",
+ vdev->name, vdev->parent_obj.canonical_path);
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool vhost_user_net_is_internal_migration(void *opaque)
+{
+ VirtIONet *n = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
+ struct vhost_dev *vhdev;
+
+ vhdev = virtio_net_get_vhost(vdev);
+ if (vhdev == NULL) {
+ return false;
+ }
+
+ return vhost_supports_device_state(vhdev);
+}
+
+static const VMStateDescription vhost_user_net_backend_state = {
+ .name = "virtio-net-device/backend",
+ .version_id = 0,
+ .needed = vhost_user_net_is_internal_migration,
+ .fields = (const VMStateField[]) {
+ {
+ .name = "backend",
+ .info = &(const VMStateInfo) {
+ .name = "virtio-net vhost-user backend state",
+ .get = vhost_user_net_load_state,
+ .put = vhost_user_net_save_state,
+ },
+ },
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_virtio_net_device = {
.name = "virtio-net-device",
.version_id = VIRTIO_NET_VM_VERSION,
@@ -3342,6 +3503,7 @@ static const VMStateDescription vmstate_virtio_net_device = {
},
.subsections = (const VMStateDescription * const []) {
&vmstate_virtio_net_rss,
+ &vhost_user_net_backend_state,
NULL
}
};
@@ -3754,7 +3916,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
net_rx_pkt_init(&n->rx_pkt);
if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
- virtio_net_load_ebpf(n);
+ virtio_net_load_ebpf(n, errp);
}
}
@@ -3887,14 +4049,6 @@ static bool dev_unplug_pending(void *opaque)
return vdc->primary_unplug_pending(dev);
}
-static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev)
-{
- VirtIONet *n = VIRTIO_NET(vdev);
- NetClientState *nc = qemu_get_queue(n->nic);
- struct vhost_net *net = get_vhost_net(nc->peer);
- return &net->dev;
-}
-
static const VMStateDescription vmstate_virtio_net = {
.name = "virtio-net",
.minimum_version_id = VIRTIO_NET_VM_VERSION,
@@ -3907,7 +4061,7 @@ static const VMStateDescription vmstate_virtio_net = {
.dev_unplug_pending = dev_unplug_pending,
};
-static Property virtio_net_properties[] = {
+static const Property virtio_net_properties[] = {
DEFINE_PROP_BIT64("csum", VirtIONet, host_features,
VIRTIO_NET_F_CSUM, true),
DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features,
@@ -3979,10 +4133,9 @@ static Property virtio_net_properties[] = {
VIRTIO_NET_F_GUEST_USO6, true),
DEFINE_PROP_BIT64("host_uso", VirtIONet, host_features,
VIRTIO_NET_F_HOST_USO, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_net_class_init(ObjectClass *klass, void *data)
+static void virtio_net_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
@@ -4004,6 +4157,7 @@ static void virtio_net_class_init(ObjectClass *klass, void *data)
vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
+ vdc->pre_load_queues = virtio_net_pre_load_queues;
vdc->post_load = virtio_net_post_load_virtio;
vdc->vmsd = &vmstate_virtio_net_device;
vdc->primary_unplug_pending = primary_unplug_pending;
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
index 63a9187..7c0ca56 100644
--- a/hw/net/vmxnet3.c
+++ b/hw/net/vmxnet3.c
@@ -21,7 +21,7 @@
#include "hw/qdev-properties.h"
#include "net/tap.h"
#include "net/checksum.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qemu/bswap.h"
#include "qemu/log.h"
#include "qemu/module.h"
@@ -41,19 +41,9 @@
#define PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION 0x1
#define VMXNET3_MSIX_BAR_SIZE 0x2000
-/* Compatibility flags for migration */
-#define VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT 0
-#define VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS \
- (1 << VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT)
-#define VMXNET3_COMPAT_FLAG_DISABLE_PCIE_BIT 1
-#define VMXNET3_COMPAT_FLAG_DISABLE_PCIE \
- (1 << VMXNET3_COMPAT_FLAG_DISABLE_PCIE_BIT)
-
#define VMXNET3_EXP_EP_OFFSET (0x48)
-#define VMXNET3_MSI_OFFSET(s) \
- ((s)->compat_flags & VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS ? 0x50 : 0x84)
-#define VMXNET3_MSIX_OFFSET(s) \
- ((s)->compat_flags & VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS ? 0 : 0x9c)
+#define VMXNET3_MSI_OFFSET (0x84)
+#define VMXNET3_MSIX_OFFSET (0x9c)
#define VMXNET3_DSN_OFFSET (0x100)
#define VMXNET3_BAR0_IDX (0)
@@ -61,8 +51,7 @@
#define VMXNET3_MSIX_BAR_IDX (2)
#define VMXNET3_OFF_MSIX_TABLE (0x000)
-#define VMXNET3_OFF_MSIX_PBA(s) \
- ((s)->compat_flags & VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS ? 0x800 : 0x1000)
+#define VMXNET3_OFF_MSIX_PBA (0x1000)
/* Link speed in Mbps should be shifted by 16 */
#define VMXNET3_LINK_SPEED (1000 << 16)
@@ -456,7 +445,6 @@ vmxnet3_setup_tx_offloads(VMXNET3State *s)
default:
g_assert_not_reached();
- return false;
}
return true;
@@ -933,7 +921,6 @@ static void vmxnet3_rx_update_descr(struct NetRxPkt *pkt,
nocsum:
rxcd->cnc = 1;
- return;
}
static void
@@ -2124,8 +2111,8 @@ vmxnet3_init_msix(VMXNET3State *s)
&s->msix_bar,
VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_TABLE,
&s->msix_bar,
- VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_PBA(s),
- VMXNET3_MSIX_OFFSET(s), NULL);
+ VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_PBA,
+ VMXNET3_MSIX_OFFSET, NULL);
if (0 > res) {
VMW_WRPRN("Failed to initialize MSI-X, error %d", res);
@@ -2223,7 +2210,7 @@ static void vmxnet3_pci_realize(PCIDevice *pci_dev, Error **errp)
/* Interrupt pin A */
pci_dev->config[PCI_INTERRUPT_PIN] = 0x01;
- ret = msi_init(pci_dev, VMXNET3_MSI_OFFSET(s), VMXNET3_MAX_NMSIX_INTRS,
+ ret = msi_init(pci_dev, VMXNET3_MSI_OFFSET, VMXNET3_MAX_NMSIX_INTRS,
VMXNET3_USE_64BIT, VMXNET3_PER_VECTOR_MASK, NULL);
/* Any error other than -ENOTSUP(board's MSI support is broken)
* is a programming error. Fall back to INTx silently on -ENOTSUP */
@@ -2251,6 +2238,7 @@ static void vmxnet3_instance_init(Object *obj)
device_add_bootindex_property(obj, &s->conf.bootindex,
"bootindex", "/ethernet-phy@0",
DEVICE(obj));
+ PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS;
}
static void vmxnet3_pci_uninit(PCIDevice *pci_dev)
@@ -2472,33 +2460,14 @@ static const VMStateDescription vmstate_vmxnet3 = {
}
};
-static Property vmxnet3_properties[] = {
+static const Property vmxnet3_properties[] = {
DEFINE_NIC_PROPERTIES(VMXNET3State, conf),
- DEFINE_PROP_BIT("x-old-msi-offsets", VMXNET3State, compat_flags,
- VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT, false),
- DEFINE_PROP_BIT("x-disable-pcie", VMXNET3State, compat_flags,
- VMXNET3_COMPAT_FLAG_DISABLE_PCIE_BIT, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vmxnet3_realize(DeviceState *qdev, Error **errp)
-{
- VMXNET3Class *vc = VMXNET3_DEVICE_GET_CLASS(qdev);
- PCIDevice *pci_dev = PCI_DEVICE(qdev);
- VMXNET3State *s = VMXNET3(qdev);
-
- if (!(s->compat_flags & VMXNET3_COMPAT_FLAG_DISABLE_PCIE)) {
- pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
- }
-
- vc->parent_dc_realize(qdev, errp);
-}
-
-static void vmxnet3_class_init(ObjectClass *class, void *data)
+static void vmxnet3_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
PCIDeviceClass *c = PCI_DEVICE_CLASS(class);
- VMXNET3Class *vc = VMXNET3_DEVICE_CLASS(class);
c->realize = vmxnet3_pci_realize;
c->exit = vmxnet3_pci_uninit;
@@ -2509,10 +2478,8 @@ static void vmxnet3_class_init(ObjectClass *class, void *data)
c->class_id = PCI_CLASS_NETWORK_ETHERNET;
c->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE;
c->subsystem_id = PCI_DEVICE_ID_VMWARE_VMXNET3;
- device_class_set_parent_realize(dc, vmxnet3_realize,
- &vc->parent_dc_realize);
dc->desc = "VMWare Paravirtualized Ethernet v3";
- dc->reset = vmxnet3_qdev_reset;
+ device_class_set_legacy_reset(dc, vmxnet3_qdev_reset);
dc->vmsd = &vmstate_vmxnet3;
device_class_set_props(dc, vmxnet3_properties);
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
@@ -2525,7 +2492,7 @@ static const TypeInfo vmxnet3_info = {
.instance_size = sizeof(VMXNET3State),
.class_init = vmxnet3_class_init,
.instance_init = vmxnet3_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ }
diff --git a/hw/net/vmxnet3.h b/hw/net/vmxnet3.h
index f9283f9..dbc69d5 100644
--- a/hw/net/vmxnet3.h
+++ b/hw/net/vmxnet3.h
@@ -63,8 +63,8 @@
* details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ * along with this program; if not, see
+ * <https://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c
index 89487b4..34c6a1d 100644
--- a/hw/net/xen_nic.c
+++ b/hw/net/xen_nic.c
@@ -24,7 +24,7 @@
#include "qemu/cutils.h"
#include "qemu/log.h"
#include "qemu/qemu-print.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/error.h"
#include <sys/socket.h>
@@ -510,23 +510,22 @@ static char *xen_netdev_get_name(XenDevice *xendev, Error **errp)
if (netdev->dev == -1) {
XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev)));
- char fe_path[XENSTORE_ABS_PATH_MAX + 1];
int idx = (xen_mode == XEN_EMULATE) ? 0 : 1;
+ Error *local_err = NULL;
char *value;
/* Theoretically we could go up to INT_MAX here but that's overkill */
while (idx < 100) {
- snprintf(fe_path, sizeof(fe_path),
- "/local/domain/%u/device/vif/%u",
- xendev->frontend_id, idx);
- value = qemu_xen_xs_read(xenbus->xsh, XBT_NULL, fe_path, NULL);
+ value = xs_node_read(xenbus->xsh, XBT_NULL, NULL, &local_err,
+ "/local/domain/%u/device/vif/%u",
+ xendev->frontend_id, idx);
if (!value) {
if (errno == ENOENT) {
netdev->dev = idx;
+ error_free(local_err);
goto found;
}
- error_setg(errp, "cannot read %s: %s", fe_path,
- strerror(errno));
+ error_propagate(errp, local_err);
return NULL;
}
free(value);
@@ -555,13 +554,12 @@ static void xen_netdev_unrealize(XenDevice *xendev)
/* ------------------------------------------------------------- */
-static Property xen_netdev_properties[] = {
+static const Property xen_netdev_properties[] = {
DEFINE_NIC_PROPERTIES(XenNetDev, conf),
DEFINE_PROP_INT32("idx", XenNetDev, dev, -1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xen_netdev_class_init(ObjectClass *class, void *data)
+static void xen_netdev_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dev_class = DEVICE_CLASS(class);
XenDeviceClass *xendev_class = XEN_DEVICE_CLASS(class);
diff --git a/hw/net/xgmac.c b/hw/net/xgmac.c
index ffe3fc8..d45f872 100644
--- a/hw/net/xgmac.c
+++ b/hw/net/xgmac.c
@@ -207,7 +207,7 @@ static void xgmac_enet_send(XgmacState *s)
struct desc bd;
int frame_size;
int len;
- uint8_t frame[8192];
+ QEMU_UNINITIALIZED uint8_t frame[8192];
uint8_t *ptr;
ptr = frame;
@@ -414,12 +414,11 @@ static void xgmac_enet_realize(DeviceState *dev, Error **errp)
s->conf.macaddr.a[0];
}
-static Property xgmac_properties[] = {
+static const Property xgmac_properties[] = {
DEFINE_NIC_PROPERTIES(XgmacState, conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xgmac_enet_class_init(ObjectClass *klass, void *data)
+static void xgmac_enet_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/net/xilinx_axienet.c b/hw/net/xilinx_axienet.c
index 05d41bd..1f5c748 100644
--- a/hw/net/xilinx_axienet.c
+++ b/hw/net/xilinx_axienet.c
@@ -996,7 +996,7 @@ static void xilinx_enet_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
}
-static Property xilinx_enet_properties[] = {
+static const Property xilinx_enet_properties[] = {
DEFINE_PROP_UINT32("phyaddr", XilinxAXIEnet, c_phyaddr, 7),
DEFINE_PROP_UINT32("rxmem", XilinxAXIEnet, c_rxmem, 0x1000),
DEFINE_PROP_UINT32("txmem", XilinxAXIEnet, c_txmem, 0x1000),
@@ -1005,27 +1005,27 @@ static Property xilinx_enet_properties[] = {
tx_data_dev, TYPE_STREAM_SINK, StreamSink *),
DEFINE_PROP_LINK("axistream-control-connected", XilinxAXIEnet,
tx_control_dev, TYPE_STREAM_SINK, StreamSink *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xilinx_enet_class_init(ObjectClass *klass, void *data)
+static void xilinx_enet_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = xilinx_enet_realize;
device_class_set_props(dc, xilinx_enet_properties);
- dc->reset = xilinx_axienet_reset;
+ device_class_set_legacy_reset(dc, xilinx_axienet_reset);
}
static void xilinx_enet_control_stream_class_init(ObjectClass *klass,
- void *data)
+ const void *data)
{
StreamSinkClass *ssc = STREAM_SINK_CLASS(klass);
ssc->push = xilinx_axienet_control_stream_push;
}
-static void xilinx_enet_data_stream_class_init(ObjectClass *klass, void *data)
+static void xilinx_enet_data_stream_class_init(ObjectClass *klass,
+ const void *data)
{
StreamSinkClass *ssc = STREAM_SINK_CLASS(klass);
@@ -1045,7 +1045,7 @@ static const TypeInfo xilinx_enet_data_stream_info = {
.parent = TYPE_OBJECT,
.instance_size = sizeof(XilinxAXIEnetStreamSink),
.class_init = xilinx_enet_data_stream_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_STREAM_SINK },
{ }
}
@@ -1056,7 +1056,7 @@ static const TypeInfo xilinx_enet_control_stream_info = {
.parent = TYPE_OBJECT,
.instance_size = sizeof(XilinxAXIEnetStreamSink),
.class_init = xilinx_enet_control_stream_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_STREAM_SINK },
{ }
}
diff --git a/hw/net/xilinx_ethlite.c b/hw/net/xilinx_ethlite.c
index 989afaf..42b19d0 100644
--- a/hw/net/xilinx_ethlite.c
+++ b/hw/net/xilinx_ethlite.c
@@ -2,6 +2,10 @@
* QEMU model of the Xilinx Ethernet Lite MAC.
*
* Copyright (c) 2009 Edgar E. Iglesias.
+ * Copyright (c) 2024 Linaro, Ltd
+ *
+ * DS580: https://docs.amd.com/v/u/en-US/xps_ethernetlite
+ * LogiCORE IP XPS Ethernet Lite Media Access Controller
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -24,27 +28,35 @@
#include "qemu/osdep.h"
#include "qemu/module.h"
+#include "qemu/bitops.h"
#include "qom/object.h"
-#include "exec/tswap.h"
+#include "qapi/error.h"
#include "hw/sysbus.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
+#include "hw/misc/unimp.h"
#include "net/net.h"
+#include "trace.h"
+
+#define BUFSZ_MAX 0x07e4
+#define A_MDIO_BASE 0x07e4
+#define A_TX_BASE0 0x07f4
+#define A_TX_BASE1 0x0ff4
+#define A_RX_BASE0 0x17fc
+#define A_RX_BASE1 0x1ffc
+
+enum {
+ TX_LEN = 0,
+ TX_GIE = 1,
+ TX_CTRL = 2,
+ TX_MAX
+};
-#define D(x)
-#define R_TX_BUF0 0
-#define R_TX_LEN0 (0x07f4 / 4)
-#define R_TX_GIE0 (0x07f8 / 4)
-#define R_TX_CTRL0 (0x07fc / 4)
-#define R_TX_BUF1 (0x0800 / 4)
-#define R_TX_LEN1 (0x0ff4 / 4)
-#define R_TX_CTRL1 (0x0ffc / 4)
-
-#define R_RX_BUF0 (0x1000 / 4)
-#define R_RX_CTRL0 (0x17fc / 4)
-#define R_RX_BUF1 (0x1800 / 4)
-#define R_RX_CTRL1 (0x1ffc / 4)
-#define R_MAX (0x2000 / 4)
+enum {
+ RX_CTRL = 0,
+ RX_MAX
+};
#define GIE_GIE 0x80000000
@@ -52,174 +64,238 @@
#define CTRL_P 0x2
#define CTRL_S 0x1
+typedef struct XlnxXpsEthLitePort {
+ MemoryRegion txio;
+ MemoryRegion rxio;
+ MemoryRegion txbuf;
+ MemoryRegion rxbuf;
+
+ struct {
+ uint32_t tx_len;
+ uint32_t tx_gie;
+ uint32_t tx_ctrl;
+
+ uint32_t rx_ctrl;
+ } reg;
+} XlnxXpsEthLitePort;
+
#define TYPE_XILINX_ETHLITE "xlnx.xps-ethernetlite"
-DECLARE_INSTANCE_CHECKER(struct xlx_ethlite, XILINX_ETHLITE,
- TYPE_XILINX_ETHLITE)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxXpsEthLite, XILINX_ETHLITE)
-struct xlx_ethlite
+struct XlnxXpsEthLite
{
SysBusDevice parent_obj;
- MemoryRegion mmio;
+ EndianMode model_endianness;
+ MemoryRegion container;
qemu_irq irq;
NICState *nic;
NICConf conf;
uint32_t c_tx_pingpong;
uint32_t c_rx_pingpong;
- unsigned int txbuf;
- unsigned int rxbuf;
+ unsigned int port_index; /* dual port RAM index */
- uint32_t regs[R_MAX];
+ UnimplementedDeviceState rsvd;
+ UnimplementedDeviceState mdio;
+ XlnxXpsEthLitePort port[2];
};
-static inline void eth_pulse_irq(struct xlx_ethlite *s)
+static inline void eth_pulse_irq(XlnxXpsEthLite *s)
{
/* Only the first gie reg is active. */
- if (s->regs[R_TX_GIE0] & GIE_GIE) {
+ if (s->port[0].reg.tx_gie & GIE_GIE) {
qemu_irq_pulse(s->irq);
}
}
-static uint64_t
-eth_read(void *opaque, hwaddr addr, unsigned int size)
+static unsigned addr_to_port_index(hwaddr addr)
{
- struct xlx_ethlite *s = opaque;
- uint32_t r = 0;
+ return extract64(addr, 11, 1);
+}
- addr >>= 2;
+static void *txbuf_ptr(XlnxXpsEthLite *s, unsigned port_index)
+{
+ return memory_region_get_ram_ptr(&s->port[port_index].txbuf);
+}
- switch (addr)
- {
- case R_TX_GIE0:
- case R_TX_LEN0:
- case R_TX_LEN1:
- case R_TX_CTRL1:
- case R_TX_CTRL0:
- case R_RX_CTRL1:
- case R_RX_CTRL0:
- r = s->regs[addr];
- D(qemu_log("%s " HWADDR_FMT_plx "=%x\n", __func__, addr * 4, r));
- break;
-
- default:
- r = tswap32(s->regs[addr]);
- break;
+static void *rxbuf_ptr(XlnxXpsEthLite *s, unsigned port_index)
+{
+ return memory_region_get_ram_ptr(&s->port[port_index].rxbuf);
+}
+
+static uint64_t port_tx_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ XlnxXpsEthLite *s = opaque;
+ unsigned port_index = addr_to_port_index(addr);
+ uint32_t r = 0;
+
+ switch (addr >> 2) {
+ case TX_LEN:
+ r = s->port[port_index].reg.tx_len;
+ break;
+ case TX_GIE:
+ r = s->port[port_index].reg.tx_gie;
+ break;
+ case TX_CTRL:
+ r = s->port[port_index].reg.tx_ctrl;
+ break;
+ default:
+ g_assert_not_reached();
}
+
return r;
}
-static void
-eth_write(void *opaque, hwaddr addr,
- uint64_t val64, unsigned int size)
+static void port_tx_write(void *opaque, hwaddr addr, uint64_t value,
+ unsigned int size)
{
- struct xlx_ethlite *s = opaque;
- unsigned int base = 0;
- uint32_t value = val64;
-
- addr >>= 2;
- switch (addr)
- {
- case R_TX_CTRL0:
- case R_TX_CTRL1:
- if (addr == R_TX_CTRL1)
- base = 0x800 / 4;
-
- D(qemu_log("%s addr=" HWADDR_FMT_plx " val=%x\n",
- __func__, addr * 4, value));
- if ((value & (CTRL_P | CTRL_S)) == CTRL_S) {
- qemu_send_packet(qemu_get_queue(s->nic),
- (void *) &s->regs[base],
- s->regs[base + R_TX_LEN0]);
- D(qemu_log("eth_tx %d\n", s->regs[base + R_TX_LEN0]));
- if (s->regs[base + R_TX_CTRL0] & CTRL_I)
- eth_pulse_irq(s);
- } else if ((value & (CTRL_P | CTRL_S)) == (CTRL_P | CTRL_S)) {
- memcpy(&s->conf.macaddr.a[0], &s->regs[base], 6);
- if (s->regs[base + R_TX_CTRL0] & CTRL_I)
- eth_pulse_irq(s);
+ XlnxXpsEthLite *s = opaque;
+ unsigned port_index = addr_to_port_index(addr);
+
+ switch (addr >> 2) {
+ case TX_LEN:
+ s->port[port_index].reg.tx_len = value;
+ break;
+ case TX_GIE:
+ s->port[port_index].reg.tx_gie = value;
+ break;
+ case TX_CTRL:
+ if ((value & (CTRL_P | CTRL_S)) == CTRL_S) {
+ qemu_send_packet(qemu_get_queue(s->nic),
+ txbuf_ptr(s, port_index),
+ s->port[port_index].reg.tx_len);
+ if (s->port[port_index].reg.tx_ctrl & CTRL_I) {
+ eth_pulse_irq(s);
+ }
+ } else if ((value & (CTRL_P | CTRL_S)) == (CTRL_P | CTRL_S)) {
+ memcpy(&s->conf.macaddr.a[0], txbuf_ptr(s, port_index), 6);
+ if (s->port[port_index].reg.tx_ctrl & CTRL_I) {
+ eth_pulse_irq(s);
}
+ }
+ /*
+ * We are fast and get ready pretty much immediately
+ * so we actually never flip the S nor P bits to one.
+ */
+ s->port[port_index].reg.tx_ctrl = value & ~(CTRL_P | CTRL_S);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
- /* We are fast and get ready pretty much immediately so
- we actually never flip the S nor P bits to one. */
- s->regs[addr] = value & ~(CTRL_P | CTRL_S);
- break;
+static const MemoryRegionOps eth_porttx_ops[2] = {
+ [0 ... 1] = {
+ .read = port_tx_read,
+ .write = port_tx_write,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ },
+ [0].endianness = DEVICE_LITTLE_ENDIAN,
+ [1].endianness = DEVICE_BIG_ENDIAN,
+};
- /* Keep these native. */
- case R_RX_CTRL0:
- case R_RX_CTRL1:
- if (!(value & CTRL_S)) {
- qemu_flush_queued_packets(qemu_get_queue(s->nic));
- }
- /* fall through */
- case R_TX_LEN0:
- case R_TX_LEN1:
- case R_TX_GIE0:
- D(qemu_log("%s addr=" HWADDR_FMT_plx " val=%x\n",
- __func__, addr * 4, value));
- s->regs[addr] = value;
- break;
-
- default:
- s->regs[addr] = tswap32(value);
- break;
+static uint64_t port_rx_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ XlnxXpsEthLite *s = opaque;
+ unsigned port_index = addr_to_port_index(addr);
+ uint32_t r = 0;
+
+ switch (addr >> 2) {
+ case RX_CTRL:
+ r = s->port[port_index].reg.rx_ctrl;
+ break;
+ default:
+ g_assert_not_reached();
}
+
+ return r;
}
-static const MemoryRegionOps eth_ops = {
- .read = eth_read,
- .write = eth_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid = {
- .min_access_size = 4,
- .max_access_size = 4
+static void port_rx_write(void *opaque, hwaddr addr, uint64_t value,
+ unsigned int size)
+{
+ XlnxXpsEthLite *s = opaque;
+ unsigned port_index = addr_to_port_index(addr);
+
+ switch (addr >> 2) {
+ case RX_CTRL:
+ if (!(value & CTRL_S)) {
+ qemu_flush_queued_packets(qemu_get_queue(s->nic));
+ }
+ s->port[port_index].reg.rx_ctrl = value;
+ break;
+ default:
+ g_assert_not_reached();
}
+}
+
+static const MemoryRegionOps eth_portrx_ops[2] = {
+ [0 ... 1] = {
+ .read = port_rx_read,
+ .write = port_rx_write,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ },
+ [0].endianness = DEVICE_LITTLE_ENDIAN,
+ [1].endianness = DEVICE_BIG_ENDIAN,
};
static bool eth_can_rx(NetClientState *nc)
{
- struct xlx_ethlite *s = qemu_get_nic_opaque(nc);
- unsigned int rxbase = s->rxbuf * (0x800 / 4);
+ XlnxXpsEthLite *s = qemu_get_nic_opaque(nc);
- return !(s->regs[rxbase + R_RX_CTRL0] & CTRL_S);
+ return !(s->port[s->port_index].reg.rx_ctrl & CTRL_S);
}
static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
{
- struct xlx_ethlite *s = qemu_get_nic_opaque(nc);
- unsigned int rxbase = s->rxbuf * (0x800 / 4);
+ XlnxXpsEthLite *s = qemu_get_nic_opaque(nc);
+ unsigned int port_index = s->port_index;
/* DA filter. */
if (!(buf[0] & 0x80) && memcmp(&s->conf.macaddr.a[0], buf, 6))
return size;
- if (s->regs[rxbase + R_RX_CTRL0] & CTRL_S) {
- D(qemu_log("ethlite lost packet %x\n", s->regs[R_RX_CTRL0]));
+ if (s->port[port_index].reg.rx_ctrl & CTRL_S) {
+ trace_ethlite_pkt_lost(s->port[port_index].reg.rx_ctrl);
return -1;
}
- D(qemu_log("%s %zd rxbase=%x\n", __func__, size, rxbase));
- if (size > (R_MAX - R_RX_BUF0 - rxbase) * 4) {
- D(qemu_log("ethlite packet is too big, size=%x\n", size));
+ if (size >= BUFSZ_MAX) {
+ trace_ethlite_pkt_size_too_big(size);
return -1;
}
- memcpy(&s->regs[rxbase + R_RX_BUF0], buf, size);
+ memcpy(rxbuf_ptr(s, port_index), buf, size);
- s->regs[rxbase + R_RX_CTRL0] |= CTRL_S;
- if (s->regs[R_RX_CTRL0] & CTRL_I) {
+ s->port[port_index].reg.rx_ctrl |= CTRL_S;
+ if (s->port[port_index].reg.rx_ctrl & CTRL_I) {
eth_pulse_irq(s);
}
/* If c_rx_pingpong was set flip buffers. */
- s->rxbuf ^= s->c_rx_pingpong;
+ s->port_index ^= s->c_rx_pingpong;
return size;
}
static void xilinx_ethlite_reset(DeviceState *dev)
{
- struct xlx_ethlite *s = XILINX_ETHLITE(dev);
+ XlnxXpsEthLite *s = XILINX_ETHLITE(dev);
- s->rxbuf = 0;
+ s->port_index = 0;
}
static NetClientInfo net_xilinx_ethlite_info = {
@@ -231,7 +307,61 @@ static NetClientInfo net_xilinx_ethlite_info = {
static void xilinx_ethlite_realize(DeviceState *dev, Error **errp)
{
- struct xlx_ethlite *s = XILINX_ETHLITE(dev);
+ XlnxXpsEthLite *s = XILINX_ETHLITE(dev);
+ unsigned ops_index;
+
+ if (s->model_endianness == ENDIAN_MODE_UNSPECIFIED) {
+ error_setg(errp, TYPE_XILINX_ETHLITE " property 'endianness'"
+ " must be set to 'big' or 'little'");
+ return;
+ }
+ ops_index = s->model_endianness == ENDIAN_MODE_BIG ? 1 : 0;
+
+ memory_region_init(&s->container, OBJECT(dev),
+ "xlnx.xps-ethernetlite", 0x2000);
+
+ object_initialize_child(OBJECT(dev), "ethlite.reserved", &s->rsvd,
+ TYPE_UNIMPLEMENTED_DEVICE);
+ qdev_prop_set_string(DEVICE(&s->rsvd), "name", "ethlite.reserved");
+ qdev_prop_set_uint64(DEVICE(&s->rsvd), "size",
+ memory_region_size(&s->container));
+ sysbus_realize(SYS_BUS_DEVICE(&s->rsvd), &error_fatal);
+ memory_region_add_subregion_overlap(&s->container, 0,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->rsvd), 0),
+ -1);
+
+ object_initialize_child(OBJECT(dev), "ethlite.mdio", &s->mdio,
+ TYPE_UNIMPLEMENTED_DEVICE);
+ qdev_prop_set_string(DEVICE(&s->mdio), "name", "ethlite.mdio");
+ qdev_prop_set_uint64(DEVICE(&s->mdio), "size", 4 * 4);
+ sysbus_realize(SYS_BUS_DEVICE(&s->mdio), &error_fatal);
+ memory_region_add_subregion(&s->container, A_MDIO_BASE,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->mdio), 0));
+
+ for (unsigned i = 0; i < 2; i++) {
+ memory_region_init_ram(&s->port[i].txbuf, OBJECT(dev),
+ i ? "ethlite.tx[1]buf" : "ethlite.tx[0]buf",
+ BUFSZ_MAX, &error_abort);
+ memory_region_add_subregion(&s->container, 0x0800 * i, &s->port[i].txbuf);
+ memory_region_init_io(&s->port[i].txio, OBJECT(dev),
+ &eth_porttx_ops[ops_index], s,
+ i ? "ethlite.tx[1]io" : "ethlite.tx[0]io",
+ 4 * TX_MAX);
+ memory_region_add_subregion(&s->container, i ? A_TX_BASE1 : A_TX_BASE0,
+ &s->port[i].txio);
+
+ memory_region_init_ram(&s->port[i].rxbuf, OBJECT(dev),
+ i ? "ethlite.rx[1]buf" : "ethlite.rx[0]buf",
+ BUFSZ_MAX, &error_abort);
+ memory_region_add_subregion(&s->container, 0x1000 + 0x0800 * i,
+ &s->port[i].rxbuf);
+ memory_region_init_io(&s->port[i].rxio, OBJECT(dev),
+ &eth_portrx_ops[ops_index], s,
+ i ? "ethlite.rx[1]io" : "ethlite.rx[0]io",
+ 4 * RX_MAX);
+ memory_region_add_subregion(&s->container, i ? A_RX_BASE1 : A_RX_BASE0,
+ &s->port[i].rxio);
+ }
qemu_macaddr_default_if_unset(&s->conf.macaddr);
s->nic = qemu_new_nic(&net_xilinx_ethlite_info, &s->conf,
@@ -242,42 +372,36 @@ static void xilinx_ethlite_realize(DeviceState *dev, Error **errp)
static void xilinx_ethlite_init(Object *obj)
{
- struct xlx_ethlite *s = XILINX_ETHLITE(obj);
+ XlnxXpsEthLite *s = XILINX_ETHLITE(obj);
sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq);
-
- memory_region_init_io(&s->mmio, obj, &eth_ops, s,
- "xlnx.xps-ethernetlite", R_MAX * 4);
- sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->container);
}
-static Property xilinx_ethlite_properties[] = {
- DEFINE_PROP_UINT32("tx-ping-pong", struct xlx_ethlite, c_tx_pingpong, 1),
- DEFINE_PROP_UINT32("rx-ping-pong", struct xlx_ethlite, c_rx_pingpong, 1),
- DEFINE_NIC_PROPERTIES(struct xlx_ethlite, conf),
- DEFINE_PROP_END_OF_LIST(),
+static const Property xilinx_ethlite_properties[] = {
+ DEFINE_PROP_ENDIAN_NODEFAULT("endianness", XlnxXpsEthLite, model_endianness),
+ DEFINE_PROP_UINT32("tx-ping-pong", XlnxXpsEthLite, c_tx_pingpong, 1),
+ DEFINE_PROP_UINT32("rx-ping-pong", XlnxXpsEthLite, c_rx_pingpong, 1),
+ DEFINE_NIC_PROPERTIES(XlnxXpsEthLite, conf),
};
-static void xilinx_ethlite_class_init(ObjectClass *klass, void *data)
+static void xilinx_ethlite_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = xilinx_ethlite_realize;
- dc->reset = xilinx_ethlite_reset;
+ device_class_set_legacy_reset(dc, xilinx_ethlite_reset);
device_class_set_props(dc, xilinx_ethlite_properties);
}
-static const TypeInfo xilinx_ethlite_info = {
- .name = TYPE_XILINX_ETHLITE,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(struct xlx_ethlite),
- .instance_init = xilinx_ethlite_init,
- .class_init = xilinx_ethlite_class_init,
+static const TypeInfo xilinx_ethlite_types[] = {
+ {
+ .name = TYPE_XILINX_ETHLITE,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XlnxXpsEthLite),
+ .instance_init = xilinx_ethlite_init,
+ .class_init = xilinx_ethlite_class_init,
+ },
};
-static void xilinx_ethlite_register_types(void)
-{
- type_register_static(&xilinx_ethlite_info);
-}
-
-type_init(xilinx_ethlite_register_types)
+DEFINE_TYPES(xilinx_ethlite_types)
diff --git a/hw/nubus/mac-nubus-bridge.c b/hw/nubus/mac-nubus-bridge.c
index a0da5a8..0dac8d1 100644
--- a/hw/nubus/mac-nubus-bridge.c
+++ b/hw/nubus/mac-nubus-bridge.c
@@ -40,7 +40,7 @@ static void mac_nubus_bridge_init(Object *obj)
sysbus_init_mmio(sbd, &s->slot_alias);
}
-static void mac_nubus_bridge_class_init(ObjectClass *klass, void *data)
+static void mac_nubus_bridge_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/nubus/nubus-bridge.c b/hw/nubus/nubus-bridge.c
index a42c860..fb14402 100644
--- a/hw/nubus/nubus-bridge.c
+++ b/hw/nubus/nubus-bridge.c
@@ -23,13 +23,12 @@ static void nubus_bridge_init(Object *obj)
qdev_init_gpio_out(DEVICE(s), bus->irqs, NUBUS_IRQS);
}
-static Property nubus_bridge_properties[] = {
+static const Property nubus_bridge_properties[] = {
DEFINE_PROP_UINT16("slot-available-mask", NubusBridge,
bus.slot_available_mask, 0xffff),
- DEFINE_PROP_END_OF_LIST()
};
-static void nubus_bridge_class_init(ObjectClass *klass, void *data)
+static void nubus_bridge_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/nubus/nubus-bus.c b/hw/nubus/nubus-bus.c
index 07c279b..44820f1 100644
--- a/hw/nubus/nubus-bus.c
+++ b/hw/nubus/nubus-bus.c
@@ -162,7 +162,7 @@ static bool nubus_check_address(BusState *bus, DeviceState *dev, Error **errp)
return true;
}
-static void nubus_class_init(ObjectClass *oc, void *data)
+static void nubus_class_init(ObjectClass *oc, const void *data)
{
BusClass *bc = BUS_CLASS(oc);
diff --git a/hw/nubus/nubus-device.c b/hw/nubus/nubus-device.c
index be4cb24..7797e61 100644
--- a/hw/nubus/nubus-device.c
+++ b/hw/nubus/nubus-device.c
@@ -35,6 +35,13 @@ static void nubus_device_realize(DeviceState *dev, Error **errp)
uint8_t *rom_ptr;
int ret;
+ if (nd->slot < 0 || nd->slot >= NUBUS_SLOT_NB) {
+ error_setg(errp,
+ "'slot' value %d out of range (must be between 0 and %d)",
+ nd->slot, NUBUS_SLOT_NB - 1);
+ return;
+ }
+
/* Super */
slot_offset = nd->slot * NUBUS_SUPER_SLOT_SIZE;
@@ -100,13 +107,12 @@ static void nubus_device_realize(DeviceState *dev, Error **errp)
}
}
-static Property nubus_device_properties[] = {
+static const Property nubus_device_properties[] = {
DEFINE_PROP_INT32("slot", NubusDevice, slot, -1),
DEFINE_PROP_STRING("romfile", NubusDevice, romfile),
- DEFINE_PROP_END_OF_LIST()
};
-static void nubus_device_class_init(ObjectClass *oc, void *data)
+static void nubus_device_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/nubus/nubus-virtio-mmio.c b/hw/nubus/nubus-virtio-mmio.c
index 58a63c8..63aeca5 100644
--- a/hw/nubus/nubus-virtio-mmio.c
+++ b/hw/nubus/nubus-virtio-mmio.c
@@ -7,6 +7,7 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "hw/nubus/nubus-virtio-mmio.h"
@@ -23,6 +24,7 @@ static void nubus_virtio_mmio_set_input_irq(void *opaque, int n, int level)
static void nubus_virtio_mmio_realize(DeviceState *dev, Error **errp)
{
+ ERRP_GUARD();
NubusVirtioMMIODeviceClass *nvmdc = NUBUS_VIRTIO_MMIO_GET_CLASS(dev);
NubusVirtioMMIO *s = NUBUS_VIRTIO_MMIO(dev);
NubusDevice *nd = NUBUS_DEVICE(dev);
@@ -79,7 +81,7 @@ static void nubus_virtio_mmio_init(Object *obj)
"pic-input-irq", 1);
}
-static void nubus_virtio_mmio_class_init(ObjectClass *oc, void *data)
+static void nubus_virtio_mmio_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
NubusVirtioMMIODeviceClass *nvmdc = NUBUS_VIRTIO_MMIO_CLASS(oc);
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 5b1b0ca..2200028 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -40,6 +40,9 @@
* sriov_vi_flexible=<N[optional]> \
* sriov_max_vi_per_vf=<N[optional]> \
* sriov_max_vq_per_vf=<N[optional]> \
+ * atomic.dn=<on|off[optional]>, \
+ * atomic.awun<N[optional]>, \
+ * atomic.awupf<N[optional]>, \
* subsys=<subsys_id>
* -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\
* zoned=<true|false[optional]>, \
@@ -198,11 +201,12 @@
#include "qemu/range.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/hostmem.h"
+#include "system/system.h"
+#include "system/block-backend.h"
+#include "system/hostmem.h"
#include "hw/pci/msix.h"
#include "hw/pci/pcie_sriov.h"
+#include "system/spdm-socket.h"
#include "migration/vmstate.h"
#include "nvme.h"
@@ -253,6 +257,7 @@ static const uint32_t nvme_feature_cap[NVME_FID_MAX] = {
[NVME_ERROR_RECOVERY] = NVME_FEAT_CAP_CHANGE | NVME_FEAT_CAP_NS,
[NVME_VOLATILE_WRITE_CACHE] = NVME_FEAT_CAP_CHANGE,
[NVME_NUMBER_OF_QUEUES] = NVME_FEAT_CAP_CHANGE,
+ [NVME_WRITE_ATOMICITY] = NVME_FEAT_CAP_CHANGE,
[NVME_ASYNCHRONOUS_EVENT_CONF] = NVME_FEAT_CAP_CHANGE,
[NVME_TIMESTAMP] = NVME_FEAT_CAP_CHANGE,
[NVME_HOST_BEHAVIOR_SUPPORT] = NVME_FEAT_CAP_CHANGE,
@@ -261,7 +266,7 @@ static const uint32_t nvme_feature_cap[NVME_FID_MAX] = {
[NVME_FDP_EVENTS] = NVME_FEAT_CAP_CHANGE | NVME_FEAT_CAP_NS,
};
-static const uint32_t nvme_cse_acs[256] = {
+static const uint32_t nvme_cse_acs_default[256] = {
[NVME_ADM_CMD_DELETE_SQ] = NVME_CMD_EFF_CSUPP,
[NVME_ADM_CMD_CREATE_SQ] = NVME_CMD_EFF_CSUPP,
[NVME_ADM_CMD_GET_LOG_PAGE] = NVME_CMD_EFF_CSUPP,
@@ -272,17 +277,14 @@ static const uint32_t nvme_cse_acs[256] = {
[NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP,
[NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP,
[NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP,
- [NVME_ADM_CMD_NS_ATTACHMENT] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
- [NVME_ADM_CMD_VIRT_MNGMT] = NVME_CMD_EFF_CSUPP,
- [NVME_ADM_CMD_DBBUF_CONFIG] = NVME_CMD_EFF_CSUPP,
+ [NVME_ADM_CMD_NS_ATTACHMENT] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC |
+ NVME_CMD_EFF_CCC,
[NVME_ADM_CMD_FORMAT_NVM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_ADM_CMD_DIRECTIVE_RECV] = NVME_CMD_EFF_CSUPP,
[NVME_ADM_CMD_DIRECTIVE_SEND] = NVME_CMD_EFF_CSUPP,
};
-static const uint32_t nvme_cse_iocs_none[256];
-
-static const uint32_t nvme_cse_iocs_nvm[256] = {
+static const uint32_t nvme_cse_iocs_nvm_default[256] = {
[NVME_CMD_FLUSH] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
@@ -295,7 +297,7 @@ static const uint32_t nvme_cse_iocs_nvm[256] = {
[NVME_CMD_IO_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
};
-static const uint32_t nvme_cse_iocs_zoned[256] = {
+static const uint32_t nvme_cse_iocs_zoned_default[256] = {
[NVME_CMD_FLUSH] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
@@ -304,6 +306,9 @@ static const uint32_t nvme_cse_iocs_zoned[256] = {
[NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP,
[NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP,
+ [NVME_CMD_IO_MGMT_RECV] = NVME_CMD_EFF_CSUPP,
+ [NVME_CMD_IO_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
+
[NVME_CMD_ZONE_APPEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_CMD_ZONE_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_CMD_ZONE_MGMT_RECV] = NVME_CMD_EFF_CSUPP,
@@ -651,6 +656,12 @@ static void nvme_irq_check(NvmeCtrl *n)
if (msix_enabled(pci)) {
return;
}
+
+ /* vfs does not implement intx */
+ if (pci_is_vf(pci)) {
+ return;
+ }
+
if (~intms & n->irq_status) {
pci_irq_assert(pci);
} else {
@@ -1046,7 +1057,8 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl,
*/
#define SEG_CHUNK_SIZE 256
- NvmeSglDescriptor segment[SEG_CHUNK_SIZE], *sgld, *last_sgld;
+ QEMU_UNINITIALIZED NvmeSglDescriptor segment[SEG_CHUNK_SIZE];
+ NvmeSglDescriptor *sgld, *last_sgld;
uint64_t nsgld;
uint32_t seg_len;
uint16_t status;
@@ -1515,9 +1527,16 @@ static void nvme_post_cqes(void *opaque)
stl_le_p(&n->bar.csts, NVME_CSTS_FAILED);
break;
}
+
QTAILQ_REMOVE(&cq->req_list, req, entry);
+
nvme_inc_cq_tail(cq);
nvme_sg_unmap(&req->sg);
+
+ if (QTAILQ_EMPTY(&sq->req_list) && !nvme_sq_empty(sq)) {
+ qemu_bh_schedule(sq->bh);
+ }
+
QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
}
if (cq->tail != cq->head) {
@@ -1648,9 +1667,16 @@ static void nvme_smart_event(NvmeCtrl *n, uint8_t event)
static void nvme_clear_events(NvmeCtrl *n, uint8_t event_type)
{
+ NvmeAsyncEvent *event, *next;
+
n->aer_mask &= ~(1 << event_type);
- if (!QTAILQ_EMPTY(&n->aer_queue)) {
- nvme_process_aers(n);
+
+ QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) {
+ if (event->result.event_type == event_type) {
+ QTAILQ_REMOVE(&n->aer_queue, event, entry);
+ n->aer_queued--;
+ g_free(event);
+ }
}
}
@@ -1737,43 +1763,6 @@ static uint16_t nvme_check_dulbe(NvmeNamespace *ns, uint64_t slba,
return NVME_SUCCESS;
}
-static void nvme_aio_err(NvmeRequest *req, int ret)
-{
- uint16_t status = NVME_SUCCESS;
- Error *local_err = NULL;
-
- switch (req->cmd.opcode) {
- case NVME_CMD_READ:
- status = NVME_UNRECOVERED_READ;
- break;
- case NVME_CMD_FLUSH:
- case NVME_CMD_WRITE:
- case NVME_CMD_WRITE_ZEROES:
- case NVME_CMD_ZONE_APPEND:
- case NVME_CMD_COPY:
- status = NVME_WRITE_FAULT;
- break;
- default:
- status = NVME_INTERNAL_DEV_ERROR;
- break;
- }
-
- trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), status);
-
- error_setg_errno(&local_err, -ret, "aio failed");
- error_report_err(local_err);
-
- /*
- * Set the command status code to the first encountered error but allow a
- * subsequent Internal Device Error to trump it.
- */
- if (req->status && status != NVME_INTERNAL_DEV_ERROR) {
- return;
- }
-
- req->status = status;
-}
-
static inline uint32_t nvme_zone_idx(NvmeNamespace *ns, uint64_t slba)
{
return ns->zone_size_log2 > 0 ? slba >> ns->zone_size_log2 :
@@ -1811,7 +1800,7 @@ static uint16_t nvme_check_zone_state_for_write(NvmeZone *zone)
trace_pci_nvme_err_zone_is_read_only(zslba);
return NVME_ZONE_READ_ONLY;
default:
- assert(false);
+ g_assert_not_reached();
}
return NVME_INTERNAL_DEV_ERROR;
@@ -1865,7 +1854,7 @@ static uint16_t nvme_check_zone_state_for_read(NvmeZone *zone)
trace_pci_nvme_err_zone_is_offline(zone->d.zslba);
return NVME_ZONE_OFFLINE;
default:
- assert(false);
+ g_assert_not_reached();
}
return NVME_INTERNAL_DEV_ERROR;
@@ -2132,11 +2121,16 @@ static inline bool nvme_is_write(NvmeRequest *req)
static void nvme_misc_cb(void *opaque, int ret)
{
NvmeRequest *req = opaque;
+ uint16_t cid = nvme_cid(req);
- trace_pci_nvme_misc_cb(nvme_cid(req));
+ trace_pci_nvme_misc_cb(cid);
if (ret) {
- nvme_aio_err(req, ret);
+ if (!req->status) {
+ req->status = NVME_INTERNAL_DEV_ERROR;
+ }
+
+ trace_pci_nvme_err_aio(cid, strerror(-ret), req->status);
}
nvme_enqueue_req_completion(nvme_cq(req), req);
@@ -2153,8 +2147,30 @@ void nvme_rw_complete_cb(void *opaque, int ret)
trace_pci_nvme_rw_complete_cb(nvme_cid(req), blk_name(blk));
if (ret) {
+ Error *err = NULL;
+
block_acct_failed(stats, acct);
- nvme_aio_err(req, ret);
+
+ switch (req->cmd.opcode) {
+ case NVME_CMD_READ:
+ req->status = NVME_UNRECOVERED_READ;
+ break;
+
+ case NVME_CMD_WRITE:
+ case NVME_CMD_WRITE_ZEROES:
+ case NVME_CMD_ZONE_APPEND:
+ req->status = NVME_WRITE_FAULT;
+ break;
+
+ default:
+ req->status = NVME_INTERNAL_DEV_ERROR;
+ break;
+ }
+
+ trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status);
+
+ error_setg_errno(&err, -ret, "aio failed");
+ error_report_err(err);
} else {
block_acct_done(stats, acct);
}
@@ -2239,7 +2255,10 @@ static void nvme_verify_cb(void *opaque, int ret)
if (ret) {
block_acct_failed(stats, acct);
- nvme_aio_err(req, ret);
+ req->status = NVME_UNRECOVERED_READ;
+
+ trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status);
+
goto out;
}
@@ -2338,7 +2357,10 @@ static void nvme_compare_mdata_cb(void *opaque, int ret)
if (ret) {
block_acct_failed(stats, acct);
- nvme_aio_err(req, ret);
+ req->status = NVME_UNRECOVERED_READ;
+
+ trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status);
+
goto out;
}
@@ -2420,7 +2442,10 @@ static void nvme_compare_data_cb(void *opaque, int ret)
if (ret) {
block_acct_failed(stats, acct);
- nvme_aio_err(req, ret);
+ req->status = NVME_UNRECOVERED_READ;
+
+ trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status);
+
goto out;
}
@@ -2591,6 +2616,7 @@ next:
done:
iocb->aiocb = NULL;
iocb->common.cb(iocb->common.opaque, iocb->ret);
+ g_free(iocb->range);
qemu_aio_unref(iocb);
}
@@ -2640,6 +2666,7 @@ static uint16_t nvme_verify(NvmeCtrl *n, NvmeRequest *req)
uint64_t slba = le64_to_cpu(rw->slba);
uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
size_t len = nvme_l2b(ns, nlb);
+ size_t data_len = len;
int64_t offset = nvme_l2b(ns, slba);
uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control));
uint32_t reftag = le32_to_cpu(rw->reftag);
@@ -2659,7 +2686,11 @@ static uint16_t nvme_verify(NvmeCtrl *n, NvmeRequest *req)
}
}
- if (len > n->page_size << n->params.vsl) {
+ if (nvme_ns_ext(ns) && !(NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt))) {
+ data_len += nvme_m2b(ns, nlb);
+ }
+
+ if (data_len > (n->page_size << n->params.vsl)) {
return NVME_INVALID_FIELD | NVME_DNR;
}
@@ -2695,6 +2726,7 @@ typedef struct NvmeCopyAIOCB {
BlockAIOCB common;
BlockAIOCB *aiocb;
NvmeRequest *req;
+ NvmeCtrl *n;
int ret;
void *ranges;
@@ -2713,6 +2745,8 @@ typedef struct NvmeCopyAIOCB {
uint64_t slba;
NvmeZone *zone;
+ NvmeNamespace *sns;
+ uint32_t tcl;
} NvmeCopyAIOCB;
static void nvme_copy_cancel(BlockAIOCB *aiocb)
@@ -2759,13 +2793,19 @@ static void nvme_copy_done(NvmeCopyAIOCB *iocb)
static void nvme_do_copy(NvmeCopyAIOCB *iocb);
-static void nvme_copy_source_range_parse_format0(void *ranges, int idx,
- uint64_t *slba, uint32_t *nlb,
- uint16_t *apptag,
- uint16_t *appmask,
- uint64_t *reftag)
+static void nvme_copy_source_range_parse_format0_2(void *ranges,
+ int idx, uint64_t *slba,
+ uint32_t *nlb,
+ uint32_t *snsid,
+ uint16_t *apptag,
+ uint16_t *appmask,
+ uint64_t *reftag)
{
- NvmeCopySourceRangeFormat0 *_ranges = ranges;
+ NvmeCopySourceRangeFormat0_2 *_ranges = ranges;
+
+ if (snsid) {
+ *snsid = le32_to_cpu(_ranges[idx].sparams);
+ }
if (slba) {
*slba = le64_to_cpu(_ranges[idx].slba);
@@ -2788,13 +2828,19 @@ static void nvme_copy_source_range_parse_format0(void *ranges, int idx,
}
}
-static void nvme_copy_source_range_parse_format1(void *ranges, int idx,
- uint64_t *slba, uint32_t *nlb,
- uint16_t *apptag,
- uint16_t *appmask,
- uint64_t *reftag)
+static void nvme_copy_source_range_parse_format1_3(void *ranges, int idx,
+ uint64_t *slba,
+ uint32_t *nlb,
+ uint32_t *snsid,
+ uint16_t *apptag,
+ uint16_t *appmask,
+ uint64_t *reftag)
{
- NvmeCopySourceRangeFormat1 *_ranges = ranges;
+ NvmeCopySourceRangeFormat1_3 *_ranges = ranges;
+
+ if (snsid) {
+ *snsid = le32_to_cpu(_ranges[idx].sparams);
+ }
if (slba) {
*slba = le64_to_cpu(_ranges[idx].slba);
@@ -2826,18 +2872,20 @@ static void nvme_copy_source_range_parse_format1(void *ranges, int idx,
static void nvme_copy_source_range_parse(void *ranges, int idx, uint8_t format,
uint64_t *slba, uint32_t *nlb,
- uint16_t *apptag, uint16_t *appmask,
- uint64_t *reftag)
+ uint32_t *snsid, uint16_t *apptag,
+ uint16_t *appmask, uint64_t *reftag)
{
switch (format) {
case NVME_COPY_FORMAT_0:
- nvme_copy_source_range_parse_format0(ranges, idx, slba, nlb, apptag,
- appmask, reftag);
+ case NVME_COPY_FORMAT_2:
+ nvme_copy_source_range_parse_format0_2(ranges, idx, slba, nlb, snsid,
+ apptag, appmask, reftag);
break;
case NVME_COPY_FORMAT_1:
- nvme_copy_source_range_parse_format1(ranges, idx, slba, nlb, apptag,
- appmask, reftag);
+ case NVME_COPY_FORMAT_3:
+ nvme_copy_source_range_parse_format1_3(ranges, idx, slba, nlb, snsid,
+ apptag, appmask, reftag);
break;
default:
@@ -2853,10 +2901,10 @@ static inline uint16_t nvme_check_copy_mcl(NvmeNamespace *ns,
for (int idx = 0; idx < nr; idx++) {
uint32_t nlb;
nvme_copy_source_range_parse(iocb->ranges, idx, iocb->format, NULL,
- &nlb, NULL, NULL, NULL);
+ &nlb, NULL, NULL, NULL, NULL);
copy_len += nlb;
}
-
+ iocb->tcl = copy_len;
if (copy_len > ns->id_ns.mcl) {
return NVME_CMD_SIZE_LIMIT | NVME_DNR;
}
@@ -2868,21 +2916,22 @@ static void nvme_copy_out_completed_cb(void *opaque, int ret)
{
NvmeCopyAIOCB *iocb = opaque;
NvmeRequest *req = iocb->req;
- NvmeNamespace *ns = req->ns;
+ NvmeNamespace *dns = req->ns;
uint32_t nlb;
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL,
- &nlb, NULL, NULL, NULL);
+ &nlb, NULL, NULL, NULL, NULL);
if (ret < 0) {
iocb->ret = ret;
+ req->status = NVME_WRITE_FAULT;
goto out;
} else if (iocb->ret < 0) {
goto out;
}
- if (ns->params.zoned) {
- nvme_advance_zone_wp(ns, iocb->zone, nlb);
+ if (dns->params.zoned) {
+ nvme_advance_zone_wp(dns, iocb->zone, nlb);
}
iocb->idx++;
@@ -2895,25 +2944,25 @@ static void nvme_copy_out_cb(void *opaque, int ret)
{
NvmeCopyAIOCB *iocb = opaque;
NvmeRequest *req = iocb->req;
- NvmeNamespace *ns = req->ns;
+ NvmeNamespace *dns = req->ns;
uint32_t nlb;
size_t mlen;
uint8_t *mbounce;
- if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
+ if (ret < 0 || iocb->ret < 0 || !dns->lbaf.ms) {
goto out;
}
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL,
- &nlb, NULL, NULL, NULL);
+ &nlb, NULL, NULL, NULL, NULL);
- mlen = nvme_m2b(ns, nlb);
- mbounce = iocb->bounce + nvme_l2b(ns, nlb);
+ mlen = nvme_m2b(dns, nlb);
+ mbounce = iocb->bounce + nvme_l2b(dns, nlb);
qemu_iovec_reset(&iocb->iov);
qemu_iovec_add(&iocb->iov, mbounce, mlen);
- iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_moff(ns, iocb->slba),
+ iocb->aiocb = blk_aio_pwritev(dns->blkconf.blk, nvme_moff(dns, iocb->slba),
&iocb->iov, 0, nvme_copy_out_completed_cb,
iocb);
@@ -2927,59 +2976,71 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret)
{
NvmeCopyAIOCB *iocb = opaque;
NvmeRequest *req = iocb->req;
- NvmeNamespace *ns = req->ns;
+ NvmeNamespace *sns = iocb->sns;
+ NvmeNamespace *dns = req->ns;
+ NvmeCopyCmd *copy = NULL;
+ uint8_t *mbounce = NULL;
uint32_t nlb;
uint64_t slba;
uint16_t apptag, appmask;
uint64_t reftag;
- size_t len;
+ size_t len, mlen;
uint16_t status;
if (ret < 0) {
iocb->ret = ret;
+ req->status = NVME_UNRECOVERED_READ;
goto out;
} else if (iocb->ret < 0) {
goto out;
}
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba,
- &nlb, &apptag, &appmask, &reftag);
- len = nvme_l2b(ns, nlb);
+ &nlb, NULL, &apptag, &appmask, &reftag);
trace_pci_nvme_copy_out(iocb->slba, nlb);
- if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
- NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
+ len = nvme_l2b(sns, nlb);
+
+ if (NVME_ID_NS_DPS_TYPE(sns->id_ns.dps)) {
+ copy = (NvmeCopyCmd *)&req->cmd;
uint16_t prinfor = ((copy->control[0] >> 4) & 0xf);
- uint16_t prinfow = ((copy->control[2] >> 2) & 0xf);
- size_t mlen = nvme_m2b(ns, nlb);
- uint8_t *mbounce = iocb->bounce + nvme_l2b(ns, nlb);
+ mlen = nvme_m2b(sns, nlb);
+ mbounce = iocb->bounce + nvme_l2b(sns, nlb);
- status = nvme_dif_mangle_mdata(ns, mbounce, mlen, slba);
+ status = nvme_dif_mangle_mdata(sns, mbounce, mlen, slba);
if (status) {
goto invalid;
}
- status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen, prinfor,
+ status = nvme_dif_check(sns, iocb->bounce, len, mbounce, mlen, prinfor,
slba, apptag, appmask, &reftag);
if (status) {
goto invalid;
}
+ }
+
+ if (NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) {
+ copy = (NvmeCopyCmd *)&req->cmd;
+ uint16_t prinfow = ((copy->control[2] >> 2) & 0xf);
+
+ mlen = nvme_m2b(dns, nlb);
+ mbounce = iocb->bounce + nvme_l2b(dns, nlb);
apptag = le16_to_cpu(copy->apptag);
appmask = le16_to_cpu(copy->appmask);
if (prinfow & NVME_PRINFO_PRACT) {
- status = nvme_check_prinfo(ns, prinfow, iocb->slba, iocb->reftag);
+ status = nvme_check_prinfo(dns, prinfow, iocb->slba, iocb->reftag);
if (status) {
goto invalid;
}
- nvme_dif_pract_generate_dif(ns, iocb->bounce, len, mbounce, mlen,
+ nvme_dif_pract_generate_dif(dns, iocb->bounce, len, mbounce, mlen,
apptag, &iocb->reftag);
} else {
- status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen,
+ status = nvme_dif_check(dns, iocb->bounce, len, mbounce, mlen,
prinfow, iocb->slba, apptag, appmask,
&iocb->reftag);
if (status) {
@@ -2988,13 +3049,13 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret)
}
}
- status = nvme_check_bounds(ns, iocb->slba, nlb);
+ status = nvme_check_bounds(dns, iocb->slba, nlb);
if (status) {
goto invalid;
}
- if (ns->params.zoned) {
- status = nvme_check_zone_write(ns, iocb->zone, iocb->slba, nlb);
+ if (dns->params.zoned) {
+ status = nvme_check_zone_write(dns, iocb->zone, iocb->slba, nlb);
if (status) {
goto invalid;
}
@@ -3007,7 +3068,10 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret)
qemu_iovec_reset(&iocb->iov);
qemu_iovec_add(&iocb->iov, iocb->bounce, len);
- iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_l2b(ns, iocb->slba),
+ block_acct_start(blk_get_stats(dns->blkconf.blk), &iocb->acct.write, 0,
+ BLOCK_ACCT_WRITE);
+
+ iocb->aiocb = blk_aio_pwritev(dns->blkconf.blk, nvme_l2b(dns, iocb->slba),
&iocb->iov, 0, nvme_copy_out_cb, iocb);
return;
@@ -3022,23 +3086,22 @@ out:
static void nvme_copy_in_cb(void *opaque, int ret)
{
NvmeCopyAIOCB *iocb = opaque;
- NvmeRequest *req = iocb->req;
- NvmeNamespace *ns = req->ns;
+ NvmeNamespace *sns = iocb->sns;
uint64_t slba;
uint32_t nlb;
- if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
+ if (ret < 0 || iocb->ret < 0 || !sns->lbaf.ms) {
goto out;
}
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba,
- &nlb, NULL, NULL, NULL);
+ &nlb, NULL, NULL, NULL, NULL);
qemu_iovec_reset(&iocb->iov);
- qemu_iovec_add(&iocb->iov, iocb->bounce + nvme_l2b(ns, nlb),
- nvme_m2b(ns, nlb));
+ qemu_iovec_add(&iocb->iov, iocb->bounce + nvme_l2b(sns, nlb),
+ nvme_m2b(sns, nlb));
- iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_moff(ns, slba),
+ iocb->aiocb = blk_aio_preadv(sns->blkconf.blk, nvme_moff(sns, slba),
&iocb->iov, 0, nvme_copy_in_completed_cb,
iocb);
return;
@@ -3047,14 +3110,78 @@ out:
nvme_copy_in_completed_cb(iocb, ret);
}
+static inline bool nvme_csi_supports_copy(uint8_t csi)
+{
+ return csi == NVME_CSI_NVM || csi == NVME_CSI_ZONED;
+}
+
+static inline bool nvme_copy_ns_format_match(NvmeNamespace *sns,
+ NvmeNamespace *dns)
+{
+ return sns->lbaf.ds == dns->lbaf.ds && sns->lbaf.ms == dns->lbaf.ms;
+}
+
+static bool nvme_copy_matching_ns_format(NvmeNamespace *sns, NvmeNamespace *dns,
+ bool pi_enable)
+{
+ if (!nvme_csi_supports_copy(sns->csi) ||
+ !nvme_csi_supports_copy(dns->csi)) {
+ return false;
+ }
+
+ if (!pi_enable && !nvme_copy_ns_format_match(sns, dns)) {
+ return false;
+ }
+
+ if (pi_enable && (!nvme_copy_ns_format_match(sns, dns) ||
+ sns->id_ns.dps != dns->id_ns.dps)) {
+ return false;
+ }
+
+ return true;
+}
+
+static inline bool nvme_copy_corresp_pi_match(NvmeNamespace *sns,
+ NvmeNamespace *dns)
+{
+ return sns->lbaf.ms == 0 &&
+ ((dns->lbaf.ms == 8 && dns->pif == 0) ||
+ (dns->lbaf.ms == 16 && dns->pif == 1));
+}
+
+static bool nvme_copy_corresp_pi_format(NvmeNamespace *sns, NvmeNamespace *dns,
+ bool sns_pi_en)
+{
+ if (!nvme_csi_supports_copy(sns->csi) ||
+ !nvme_csi_supports_copy(dns->csi)) {
+ return false;
+ }
+
+ if (!sns_pi_en && !nvme_copy_corresp_pi_match(sns, dns)) {
+ return false;
+ }
+
+ if (sns_pi_en && !nvme_copy_corresp_pi_match(dns, sns)) {
+ return false;
+ }
+
+ return true;
+}
+
static void nvme_do_copy(NvmeCopyAIOCB *iocb)
{
NvmeRequest *req = iocb->req;
- NvmeNamespace *ns = req->ns;
+ NvmeNamespace *sns;
+ NvmeNamespace *dns = req->ns;
+ NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
+ uint16_t prinfor = ((copy->control[0] >> 4) & 0xf);
+ uint16_t prinfow = ((copy->control[2] >> 2) & 0xf);
uint64_t slba;
uint32_t nlb;
size_t len;
uint16_t status;
+ uint32_t dnsid = le32_to_cpu(req->cmd.nsid);
+ uint32_t snsid = dnsid;
if (iocb->ret < 0) {
goto done;
@@ -3064,40 +3191,124 @@ static void nvme_do_copy(NvmeCopyAIOCB *iocb)
goto done;
}
- nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba,
- &nlb, NULL, NULL, NULL);
- len = nvme_l2b(ns, nlb);
+ if (iocb->format == 2 || iocb->format == 3) {
+ nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format,
+ &slba, &nlb, &snsid, NULL, NULL, NULL);
+ if (snsid != dnsid) {
+ if (snsid == NVME_NSID_BROADCAST ||
+ !nvme_nsid_valid(iocb->n, snsid)) {
+ status = NVME_INVALID_NSID | NVME_DNR;
+ goto invalid;
+ }
+ iocb->sns = nvme_ns(iocb->n, snsid);
+ if (unlikely(!iocb->sns)) {
+ status = NVME_INVALID_FIELD | NVME_DNR;
+ goto invalid;
+ }
+ } else {
+ if (((slba + nlb) > iocb->slba) &&
+ ((slba + nlb) < (iocb->slba + iocb->tcl))) {
+ status = NVME_CMD_OVERLAP_IO_RANGE | NVME_DNR;
+ goto invalid;
+ }
+ }
+ } else {
+ nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format,
+ &slba, &nlb, NULL, NULL, NULL, NULL);
+ }
+
+ sns = iocb->sns;
+ if ((snsid == dnsid) && NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) &&
+ ((prinfor & NVME_PRINFO_PRACT) != (prinfow & NVME_PRINFO_PRACT))) {
+ status = NVME_INVALID_FIELD | NVME_DNR;
+ goto invalid;
+ } else if (snsid != dnsid) {
+ if (!NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) &&
+ !NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) {
+ if (!nvme_copy_matching_ns_format(sns, dns, false)) {
+ status = NVME_CMD_INCOMP_NS_OR_FMT | NVME_DNR;
+ goto invalid;
+ }
+ }
+ if (NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) &&
+ NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) {
+ if ((prinfor & NVME_PRINFO_PRACT) !=
+ (prinfow & NVME_PRINFO_PRACT)) {
+ status = NVME_CMD_INCOMP_NS_OR_FMT | NVME_DNR;
+ goto invalid;
+ } else {
+ if (!nvme_copy_matching_ns_format(sns, dns, true)) {
+ status = NVME_CMD_INCOMP_NS_OR_FMT | NVME_DNR;
+ goto invalid;
+ }
+ }
+ }
+
+ if (!NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) &&
+ NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) {
+ if (!(prinfow & NVME_PRINFO_PRACT)) {
+ status = NVME_CMD_INCOMP_NS_OR_FMT | NVME_DNR;
+ goto invalid;
+ } else {
+ if (!nvme_copy_corresp_pi_format(sns, dns, false)) {
+ status = NVME_CMD_INCOMP_NS_OR_FMT | NVME_DNR;
+ goto invalid;
+ }
+ }
+ }
+
+ if (NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) &&
+ !NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) {
+ if (!(prinfor & NVME_PRINFO_PRACT)) {
+ status = NVME_CMD_INCOMP_NS_OR_FMT | NVME_DNR;
+ goto invalid;
+ } else {
+ if (!nvme_copy_corresp_pi_format(sns, dns, true)) {
+ status = NVME_CMD_INCOMP_NS_OR_FMT | NVME_DNR;
+ goto invalid;
+ }
+ }
+ }
+ }
+ len = nvme_l2b(sns, nlb);
trace_pci_nvme_copy_source_range(slba, nlb);
- if (nlb > le16_to_cpu(ns->id_ns.mssrl)) {
+ if (nlb > le16_to_cpu(sns->id_ns.mssrl)) {
status = NVME_CMD_SIZE_LIMIT | NVME_DNR;
goto invalid;
}
- status = nvme_check_bounds(ns, slba, nlb);
+ status = nvme_check_bounds(sns, slba, nlb);
if (status) {
goto invalid;
}
- if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
- status = nvme_check_dulbe(ns, slba, nlb);
+ if (NVME_ERR_REC_DULBE(sns->features.err_rec)) {
+ status = nvme_check_dulbe(sns, slba, nlb);
if (status) {
goto invalid;
}
}
- if (ns->params.zoned) {
- status = nvme_check_zone_read(ns, slba, nlb);
+ if (sns->params.zoned) {
+ status = nvme_check_zone_read(sns, slba, nlb);
if (status) {
goto invalid;
}
}
+ g_free(iocb->bounce);
+ iocb->bounce = g_malloc_n(le16_to_cpu(sns->id_ns.mssrl),
+ sns->lbasz + sns->lbaf.ms);
+
qemu_iovec_reset(&iocb->iov);
qemu_iovec_add(&iocb->iov, iocb->bounce, len);
- iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_l2b(ns, slba),
+ block_acct_start(blk_get_stats(sns->blkconf.blk), &iocb->acct.read, 0,
+ BLOCK_ACCT_READ);
+
+ iocb->aiocb = blk_aio_preadv(sns->blkconf.blk, nvme_l2b(sns, slba),
&iocb->iov, 0, nvme_copy_in_cb, iocb);
return;
@@ -3116,9 +3327,7 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
nvme_misc_cb, req);
uint16_t nr = copy->nr + 1;
uint8_t format = copy->control[0] & 0xf;
- uint16_t prinfor = ((copy->control[0] >> 4) & 0xf);
- uint16_t prinfow = ((copy->control[2] >> 2) & 0xf);
- size_t len = sizeof(NvmeCopySourceRangeFormat0);
+ size_t len = sizeof(NvmeCopySourceRangeFormat0_2);
uint16_t status;
@@ -3127,13 +3336,9 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
iocb->ranges = NULL;
iocb->zone = NULL;
- if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) &&
- ((prinfor & NVME_PRINFO_PRACT) != (prinfow & NVME_PRINFO_PRACT))) {
- status = NVME_INVALID_FIELD | NVME_DNR;
- goto invalid;
- }
-
- if (!(n->id_ctrl.ocfs & (1 << format))) {
+ if (!(n->id_ctrl.ocfs & (1 << format)) ||
+ ((format == 2 || format == 3) &&
+ !(n->features.hbs.cdfe & (1 << format)))) {
trace_pci_nvme_err_copy_invalid_format(format);
status = NVME_INVALID_FIELD | NVME_DNR;
goto invalid;
@@ -3144,14 +3349,14 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
goto invalid;
}
- if ((ns->pif == 0x0 && format != 0x0) ||
- (ns->pif != 0x0 && format != 0x1)) {
+ if ((ns->pif == 0x0 && (format != 0x0 && format != 0x2)) ||
+ (ns->pif != 0x0 && (format != 0x1 && format != 0x3))) {
status = NVME_INVALID_FORMAT | NVME_DNR;
goto invalid;
}
if (ns->pif) {
- len = sizeof(NvmeCopySourceRangeFormat1);
+ len = sizeof(NvmeCopySourceRangeFormat1_3);
}
iocb->format = format;
@@ -3187,17 +3392,13 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
iocb->idx = 0;
iocb->reftag = le32_to_cpu(copy->reftag);
iocb->reftag |= (uint64_t)le32_to_cpu(copy->cdw3) << 32;
- iocb->bounce = g_malloc_n(le16_to_cpu(ns->id_ns.mssrl),
- ns->lbasz + ns->lbaf.ms);
qemu_iovec_init(&iocb->iov, 1);
- block_acct_start(blk_get_stats(ns->blkconf.blk), &iocb->acct.read, 0,
- BLOCK_ACCT_READ);
- block_acct_start(blk_get_stats(ns->blkconf.blk), &iocb->acct.write, 0,
- BLOCK_ACCT_WRITE);
-
req->aiocb = &iocb->common;
+ iocb->sns = req->ns;
+ iocb->n = n;
+ iocb->bounce = NULL;
nvme_do_copy(iocb);
return NVME_NO_COMPLETE;
@@ -3232,7 +3433,11 @@ static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req)
len += nvme_m2b(ns, nlb);
}
- status = nvme_check_mdts(n, len);
+ if (NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt)) {
+ status = nvme_check_mdts(n, data_len);
+ } else {
+ status = nvme_check_mdts(n, len);
+ }
if (status) {
return status;
}
@@ -3307,6 +3512,7 @@ static void nvme_flush_ns_cb(void *opaque, int ret)
if (ret < 0) {
iocb->ret = ret;
+ iocb->req->status = NVME_WRITE_FAULT;
goto out;
} else if (iocb->ret < 0) {
goto out;
@@ -3409,7 +3615,7 @@ static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req)
BlockBackend *blk = ns->blkconf.blk;
uint16_t status;
- if (nvme_ns_ext(ns)) {
+ if (nvme_ns_ext(ns) && !(NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt))) {
mapped_size += nvme_m2b(ns, nlb);
if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
@@ -3521,7 +3727,7 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append,
BlockBackend *blk = ns->blkconf.blk;
uint16_t status;
- if (nvme_ns_ext(ns)) {
+ if (nvme_ns_ext(ns) && !(NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt))) {
mapped_size += nvme_m2b(ns, nlb);
if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
@@ -4167,7 +4373,7 @@ static bool nvme_zone_matches_filter(uint32_t zafs, NvmeZone *zl)
static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req)
{
- NvmeCmd *cmd = (NvmeCmd *)&req->cmd;
+ NvmeCmd *cmd = &req->cmd;
NvmeNamespace *ns = req->ns;
/* cdw12 is zero-based number of dwords to return. Convert to bytes */
uint32_t data_size = (le32_to_cpu(cmd->cdw12) + 1) << 2;
@@ -4300,7 +4506,7 @@ static uint16_t nvme_io_mgmt_recv_ruhs(NvmeCtrl *n, NvmeRequest *req,
nruhsd = ns->fdp.nphs * endgrp->fdp.nrg;
trans_len = sizeof(NvmeRuhStatus) + nruhsd * sizeof(NvmeRuhStatusDescr);
- buf = g_malloc(trans_len);
+ buf = g_malloc0(trans_len);
trans_len = MIN(trans_len, len);
@@ -4398,6 +4604,61 @@ static uint16_t nvme_io_mgmt_send(NvmeCtrl *n, NvmeRequest *req)
};
}
+static uint16_t __nvme_io_cmd_nvm(NvmeCtrl *n, NvmeRequest *req)
+{
+ switch (req->cmd.opcode) {
+ case NVME_CMD_WRITE:
+ return nvme_write(n, req);
+ case NVME_CMD_READ:
+ return nvme_read(n, req);
+ case NVME_CMD_COMPARE:
+ return nvme_compare(n, req);
+ case NVME_CMD_WRITE_ZEROES:
+ return nvme_write_zeroes(n, req);
+ case NVME_CMD_DSM:
+ return nvme_dsm(n, req);
+ case NVME_CMD_VERIFY:
+ return nvme_verify(n, req);
+ case NVME_CMD_COPY:
+ return nvme_copy(n, req);
+ case NVME_CMD_IO_MGMT_RECV:
+ return nvme_io_mgmt_recv(n, req);
+ case NVME_CMD_IO_MGMT_SEND:
+ return nvme_io_mgmt_send(n, req);
+ }
+
+ g_assert_not_reached();
+}
+
+static uint16_t nvme_io_cmd_nvm(NvmeCtrl *n, NvmeRequest *req)
+{
+ if (!(n->cse.iocs.nvm[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) {
+ trace_pci_nvme_err_invalid_opc(req->cmd.opcode);
+ return NVME_INVALID_OPCODE | NVME_DNR;
+ }
+
+ return __nvme_io_cmd_nvm(n, req);
+}
+
+static uint16_t nvme_io_cmd_zoned(NvmeCtrl *n, NvmeRequest *req)
+{
+ if (!(n->cse.iocs.zoned[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) {
+ trace_pci_nvme_err_invalid_opc(req->cmd.opcode);
+ return NVME_INVALID_OPCODE | NVME_DNR;
+ }
+
+ switch (req->cmd.opcode) {
+ case NVME_CMD_ZONE_APPEND:
+ return nvme_zone_append(n, req);
+ case NVME_CMD_ZONE_MGMT_SEND:
+ return nvme_zone_mgmt_send(n, req);
+ case NVME_CMD_ZONE_MGMT_RECV:
+ return nvme_zone_mgmt_recv(n, req);
+ }
+
+ return __nvme_io_cmd_nvm(n, req);
+}
+
static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
{
NvmeNamespace *ns;
@@ -4406,10 +4667,6 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req),
req->cmd.opcode, nvme_io_opc_str(req->cmd.opcode));
- if (!nvme_nsid_valid(n, nsid)) {
- return NVME_INVALID_NSID | NVME_DNR;
- }
-
/*
* In the base NVM command set, Flush may apply to all namespaces
* (indicated by NSID being set to FFFFFFFFh). But if that feature is used
@@ -4429,20 +4686,20 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
* device only supports namespace types that includes the NVM Flush command
* (NVM and Zoned), so always do an NVM Flush.
*/
+
if (req->cmd.opcode == NVME_CMD_FLUSH) {
return nvme_flush(n, req);
}
+ if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
+ return NVME_INVALID_NSID | NVME_DNR;
+ }
+
ns = nvme_ns(n, nsid);
if (unlikely(!ns)) {
return NVME_INVALID_FIELD | NVME_DNR;
}
- if (!(ns->iocs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) {
- trace_pci_nvme_err_invalid_opc(req->cmd.opcode);
- return NVME_INVALID_OPCODE | NVME_DNR;
- }
-
if (ns->status) {
return ns->status;
}
@@ -4453,36 +4710,14 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
req->ns = ns;
- switch (req->cmd.opcode) {
- case NVME_CMD_WRITE_ZEROES:
- return nvme_write_zeroes(n, req);
- case NVME_CMD_ZONE_APPEND:
- return nvme_zone_append(n, req);
- case NVME_CMD_WRITE:
- return nvme_write(n, req);
- case NVME_CMD_READ:
- return nvme_read(n, req);
- case NVME_CMD_COMPARE:
- return nvme_compare(n, req);
- case NVME_CMD_DSM:
- return nvme_dsm(n, req);
- case NVME_CMD_VERIFY:
- return nvme_verify(n, req);
- case NVME_CMD_COPY:
- return nvme_copy(n, req);
- case NVME_CMD_ZONE_MGMT_SEND:
- return nvme_zone_mgmt_send(n, req);
- case NVME_CMD_ZONE_MGMT_RECV:
- return nvme_zone_mgmt_recv(n, req);
- case NVME_CMD_IO_MGMT_RECV:
- return nvme_io_mgmt_recv(n, req);
- case NVME_CMD_IO_MGMT_SEND:
- return nvme_io_mgmt_send(n, req);
- default:
- assert(false);
+ switch (ns->csi) {
+ case NVME_CSI_NVM:
+ return nvme_io_cmd_nvm(n, req);
+ case NVME_CSI_ZONED:
+ return nvme_io_cmd_zoned(n, req);
}
- return NVME_INVALID_OPCODE | NVME_DNR;
+ g_assert_not_reached();
}
static void nvme_cq_notifier(EventNotifier *e)
@@ -4591,6 +4826,7 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req)
while (!QTAILQ_EMPTY(&sq->out_req_list)) {
r = QTAILQ_FIRST(&sq->out_req_list);
assert(r->aiocb);
+ r->status = NVME_CMD_ABORT_SQ_DEL;
blk_aio_cancel(r->aiocb);
}
@@ -4709,6 +4945,45 @@ static void nvme_set_blk_stats(NvmeNamespace *ns, struct nvme_stats *stats)
stats->write_commands += s->nr_ops[BLOCK_ACCT_WRITE];
}
+static uint16_t nvme_ocp_extended_smart_info(NvmeCtrl *n, uint8_t rae,
+ uint32_t buf_len, uint64_t off,
+ NvmeRequest *req)
+{
+ NvmeNamespace *ns = NULL;
+ NvmeSmartLogExtended smart_l = { 0 };
+ struct nvme_stats stats = { 0 };
+ uint32_t trans_len;
+
+ if (off >= sizeof(smart_l)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ /* accumulate all stats from all namespaces */
+ for (int i = 1; i <= NVME_MAX_NAMESPACES; i++) {
+ ns = nvme_ns(n, i);
+ if (ns) {
+ nvme_set_blk_stats(ns, &stats);
+ }
+ }
+
+ smart_l.physical_media_units_written[0] = cpu_to_le64(stats.units_written);
+ smart_l.physical_media_units_read[0] = cpu_to_le64(stats.units_read);
+ smart_l.log_page_version = 0x0005;
+
+ static const uint8_t guid[16] = {
+ 0xC5, 0xAF, 0x10, 0x28, 0xEA, 0xBF, 0xF2, 0xA4,
+ 0x9C, 0x4F, 0x6F, 0x7C, 0xC9, 0x14, 0xD5, 0xAF
+ };
+ memcpy(smart_l.log_page_guid, guid, sizeof(smart_l.log_page_guid));
+
+ if (!rae) {
+ nvme_clear_events(n, NVME_AER_TYPE_SMART);
+ }
+
+ trans_len = MIN(sizeof(smart_l) - off, buf_len);
+ return nvme_c2h(n, (uint8_t *) &smart_l + off, trans_len, req);
+}
+
static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
uint64_t off, NvmeRequest *req)
{
@@ -4854,7 +5129,7 @@ static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
static uint16_t nvme_changed_nslist(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
uint64_t off, NvmeRequest *req)
{
- uint32_t nslist[1024];
+ uint32_t nslist[1024] = {};
uint32_t trans_len;
int i = 0;
uint32_t nsid;
@@ -4864,7 +5139,6 @@ static uint16_t nvme_changed_nslist(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
return NVME_INVALID_FIELD | NVME_DNR;
}
- memset(nslist, 0x0, sizeof(nslist));
trans_len = MIN(sizeof(nslist) - off, buf_len);
while ((nsid = find_first_bit(n->changed_nsids, NVME_CHANGED_NSID_SIZE)) !=
@@ -4902,7 +5176,7 @@ static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len,
uint64_t off, NvmeRequest *req)
{
NvmeEffectsLog log = {};
- const uint32_t *src_iocs = NULL;
+ const uint32_t *iocs = NULL;
uint32_t trans_len;
if (off >= sizeof(log)) {
@@ -4912,25 +5186,26 @@ static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len,
switch (NVME_CC_CSS(ldl_le_p(&n->bar.cc))) {
case NVME_CC_CSS_NVM:
- src_iocs = nvme_cse_iocs_nvm;
- /* fall through */
- case NVME_CC_CSS_ADMIN_ONLY:
+ iocs = n->cse.iocs.nvm;
break;
- case NVME_CC_CSS_CSI:
+
+ case NVME_CC_CSS_ALL:
switch (csi) {
case NVME_CSI_NVM:
- src_iocs = nvme_cse_iocs_nvm;
+ iocs = n->cse.iocs.nvm;
break;
case NVME_CSI_ZONED:
- src_iocs = nvme_cse_iocs_zoned;
+ iocs = n->cse.iocs.zoned;
break;
}
+
+ break;
}
- memcpy(log.acs, nvme_cse_acs, sizeof(nvme_cse_acs));
+ memcpy(log.acs, n->cse.acs, sizeof(log.acs));
- if (src_iocs) {
- memcpy(log.iocs, src_iocs, sizeof(log.iocs));
+ if (iocs) {
+ memcpy(log.iocs, iocs, sizeof(log.iocs));
}
trans_len = MIN(sizeof(log) - off, buf_len);
@@ -4938,6 +5213,23 @@ static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len,
return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req);
}
+static uint16_t nvme_vendor_specific_log(NvmeCtrl *n, uint8_t rae,
+ uint32_t buf_len, uint64_t off,
+ NvmeRequest *req, uint8_t lid)
+{
+ switch (lid) {
+ case NVME_OCP_EXTENDED_SMART_INFO:
+ if (n->params.ocp) {
+ return nvme_ocp_extended_smart_info(n, rae, buf_len, off, req);
+ }
+ break;
+ /* add a case for each additional vendor specific log id */
+ }
+
+ trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid);
+ return NVME_INVALID_FIELD | NVME_DNR;
+}
+
static size_t sizeof_fdp_conf_descr(size_t nruh, size_t vss)
{
size_t entry_siz = sizeof(NvmeFdpDescrHdr) + nruh * sizeof(NvmeRuhDescr)
@@ -5188,6 +5480,8 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
return nvme_smart_info(n, rae, len, off, req);
case NVME_LOG_FW_SLOT_INFO:
return nvme_fw_log_info(n, len, off, req);
+ case NVME_LOG_VENDOR_START...NVME_LOG_VENDOR_END:
+ return nvme_vendor_specific_log(n, rae, len, off, req, lid);
case NVME_LOG_CHANGED_NSLIST:
return nvme_changed_nslist(n, rae, len, off, req);
case NVME_LOG_CMD_EFFECTS:
@@ -5221,7 +5515,7 @@ static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
event_notifier_set_handler(&cq->notifier, NULL);
event_notifier_cleanup(&cq->notifier);
}
- if (msix_enabled(pci)) {
+ if (msix_enabled(pci) && cq->irq_enabled) {
msix_vector_unuse(pci, cq->vector);
}
if (cq->cqid) {
@@ -5262,9 +5556,10 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
{
PCIDevice *pci = PCI_DEVICE(n);
- if (msix_enabled(pci)) {
+ if (msix_enabled(pci) && irq_enabled) {
msix_vector_use(pci, vector);
}
+
cq->ctrl = n;
cq->cqid = cqid;
cq->size = size;
@@ -5374,7 +5669,9 @@ static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
switch (c->csi) {
case NVME_CSI_NVM:
id_nvm->vsl = n->params.vsl;
+ id_nvm->dmrl = NVME_ID_CTRL_NVM_DMRL_MAX;
id_nvm->dmrsl = cpu_to_le32(n->dmrsl);
+ id_nvm->dmsl = NVME_ID_CTRL_NVM_DMRL_MAX * n->dmrsl;
break;
case NVME_CSI_ZONED:
@@ -5416,7 +5713,7 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, bool active)
return nvme_c2h(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs), req);
}
- return NVME_INVALID_CMD_SET | NVME_DNR;
+ return NVME_INVALID_IOCS | NVME_DNR;
}
static uint16_t nvme_identify_ctrl_list(NvmeCtrl *n, NvmeRequest *req,
@@ -5497,6 +5794,33 @@ static uint16_t nvme_identify_sec_ctrl_list(NvmeCtrl *n, NvmeRequest *req)
return nvme_c2h(n, (uint8_t *)&list, sizeof(list), req);
}
+static uint16_t nvme_identify_ns_ind(NvmeCtrl *n, NvmeRequest *req, bool alloc)
+{
+ NvmeNamespace *ns;
+ NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
+ uint32_t nsid = le32_to_cpu(c->nsid);
+
+ trace_pci_nvme_identify_ns_ind(nsid);
+
+ if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
+ return NVME_INVALID_NSID | NVME_DNR;
+ }
+
+ ns = nvme_ns(n, nsid);
+ if (unlikely(!ns)) {
+ if (alloc) {
+ ns = nvme_subsys_ns(n->subsys, nsid);
+ if (!ns) {
+ return nvme_rpt_empty_id_struct(n, req);
+ }
+ } else {
+ return nvme_rpt_empty_id_struct(n, req);
+ }
+ }
+
+ return nvme_c2h(n, (uint8_t *)&ns->id_ns_ind, sizeof(NvmeIdNsInd), req);
+}
+
static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req,
bool active)
{
@@ -5751,6 +6075,10 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
return nvme_identify_sec_ctrl_list(n, req);
case NVME_ID_CNS_CS_NS:
return nvme_identify_ns_csi(n, req, true);
+ case NVME_ID_CNS_CS_IND_NS:
+ return nvme_identify_ns_ind(n, req, false);
+ case NVME_ID_CNS_CS_IND_NS_ALLOCATED:
+ return nvme_identify_ns_ind(n, req, true);
case NVME_ID_CNS_CS_NS_PRESENT:
return nvme_identify_ns_csi(n, req, false);
case NVME_ID_CNS_CTRL:
@@ -5780,12 +6108,41 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req)
{
uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff;
+ uint16_t cid = (le32_to_cpu(req->cmd.cdw10) >> 16) & 0xffff;
+ NvmeSQueue *sq = n->sq[sqid];
+ NvmeRequest *r, *next;
+ int i;
req->cqe.result = 1;
if (nvme_check_sqid(n, sqid)) {
return NVME_INVALID_FIELD | NVME_DNR;
}
+ if (sqid == 0) {
+ for (i = 0; i < n->outstanding_aers; i++) {
+ NvmeRequest *re = n->aer_reqs[i];
+ if (re->cqe.cid == cid) {
+ memmove(n->aer_reqs + i, n->aer_reqs + i + 1,
+ (n->outstanding_aers - i - 1) * sizeof(NvmeRequest *));
+ n->outstanding_aers--;
+ re->status = NVME_CMD_ABORT_REQ;
+ req->cqe.result = 0;
+ nvme_enqueue_req_completion(&n->admin_cq, re);
+ return NVME_SUCCESS;
+ }
+ }
+ }
+
+ QTAILQ_FOREACH_SAFE(r, &sq->out_req_list, entry, next) {
+ if (r->cqe.cid == cid) {
+ if (r->aiocb) {
+ r->status = NVME_CMD_ABORT_REQ;
+ blk_aio_cancel_async(r->aiocb);
+ }
+ break;
+ }
+ }
+
return NVME_SUCCESS;
}
@@ -6090,8 +6447,10 @@ defaults:
if (ret) {
return ret;
}
- goto out;
+ break;
+ case NVME_WRITE_ATOMICITY:
+ result = n->dn;
break;
default:
result = nvme_feature_default[fid];
@@ -6175,6 +6534,8 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req)
uint8_t save = NVME_SETFEAT_SAVE(dw10);
uint16_t status;
int i;
+ NvmeIdCtrl *id = &n->id_ctrl;
+ NvmeAtomic *atomic = &n->atomic;
trace_pci_nvme_setfeat(nvme_cid(req), nsid, fid, save, dw11);
@@ -6319,7 +6680,7 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req)
case NVME_COMMAND_SET_PROFILE:
if (dw11 & 0x1ff) {
trace_pci_nvme_err_invalid_iocsci(dw11 & 0x1ff);
- return NVME_CMD_SET_CMB_REJECTED | NVME_DNR;
+ return NVME_IOCS_COMBINATION_REJECTED | NVME_DNR;
}
break;
case NVME_FDP_MODE:
@@ -6327,6 +6688,22 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req)
return NVME_CMD_SEQ_ERROR | NVME_DNR;
case NVME_FDP_EVENTS:
return nvme_set_feature_fdp_events(n, ns, req);
+ case NVME_WRITE_ATOMICITY:
+
+ n->dn = 0x1 & dw11;
+
+ if (n->dn) {
+ atomic->atomic_max_write_size = le16_to_cpu(id->awupf) + 1;
+ } else {
+ atomic->atomic_max_write_size = le16_to_cpu(id->awun) + 1;
+ }
+
+ if (atomic->atomic_max_write_size == 1) {
+ atomic->atomic_writes = 0;
+ } else {
+ atomic->atomic_writes = 1;
+ }
+ break;
default:
return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR;
}
@@ -6352,40 +6729,49 @@ static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req)
return NVME_NO_COMPLETE;
}
-static void nvme_update_dmrsl(NvmeCtrl *n)
+static void nvme_update_dsm_limits(NvmeCtrl *n, NvmeNamespace *ns)
{
- int nsid;
+ if (ns) {
+ n->dmrsl =
+ MIN_NON_ZERO(n->dmrsl, BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1));
- for (nsid = 1; nsid <= NVME_MAX_NAMESPACES; nsid++) {
- NvmeNamespace *ns = nvme_ns(n, nsid);
+ return;
+ }
+
+ for (uint32_t nsid = 1; nsid <= NVME_MAX_NAMESPACES; nsid++) {
+ ns = nvme_ns(n, nsid);
if (!ns) {
continue;
}
- n->dmrsl = MIN_NON_ZERO(n->dmrsl,
- BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1));
+ n->dmrsl =
+ MIN_NON_ZERO(n->dmrsl, BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1));
}
}
-static void nvme_select_iocs_ns(NvmeCtrl *n, NvmeNamespace *ns)
+static bool nvme_csi_supported(NvmeCtrl *n, uint8_t csi)
{
- uint32_t cc = ldl_le_p(&n->bar.cc);
+ uint32_t cc;
- ns->iocs = nvme_cse_iocs_none;
- switch (ns->csi) {
+ switch (csi) {
case NVME_CSI_NVM:
- if (NVME_CC_CSS(cc) != NVME_CC_CSS_ADMIN_ONLY) {
- ns->iocs = nvme_cse_iocs_nvm;
- }
- break;
+ return true;
+
case NVME_CSI_ZONED:
- if (NVME_CC_CSS(cc) == NVME_CC_CSS_CSI) {
- ns->iocs = nvme_cse_iocs_zoned;
- } else if (NVME_CC_CSS(cc) == NVME_CC_CSS_NVM) {
- ns->iocs = nvme_cse_iocs_nvm;
- }
- break;
+ cc = ldl_le_p(&n->bar.cc);
+
+ return NVME_CC_CSS(cc) == NVME_CC_CSS_ALL;
}
+
+ g_assert_not_reached();
+}
+
+static void nvme_detach_ns(NvmeCtrl *n, NvmeNamespace *ns)
+{
+ assert(ns->attached > 0);
+
+ n->namespaces[ns->params.nsid] = NULL;
+ ns->attached--;
}
static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
@@ -6430,7 +6816,7 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
switch (sel) {
case NVME_NS_ATTACHMENT_ATTACH:
- if (nvme_ns(ctrl, nsid)) {
+ if (nvme_ns(n, nsid)) {
return NVME_NS_ALREADY_ATTACHED | NVME_DNR;
}
@@ -6438,20 +6824,18 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
return NVME_NS_PRIVATE | NVME_DNR;
}
+ if (!nvme_csi_supported(n, ns->csi)) {
+ return NVME_IOCS_NOT_SUPPORTED | NVME_DNR;
+ }
+
nvme_attach_ns(ctrl, ns);
- nvme_select_iocs_ns(ctrl, ns);
+ nvme_update_dsm_limits(ctrl, ns);
break;
case NVME_NS_ATTACHMENT_DETACH:
- if (!nvme_ns(ctrl, nsid)) {
- return NVME_NS_NOT_ATTACHED | NVME_DNR;
- }
-
- ctrl->namespaces[nsid] = NULL;
- ns->attached--;
-
- nvme_update_dmrsl(ctrl);
+ nvme_detach_ns(ctrl, ns);
+ nvme_update_dsm_limits(ctrl, NULL);
break;
@@ -6954,7 +7338,7 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode,
nvme_adm_opc_str(req->cmd.opcode));
- if (!(nvme_cse_acs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) {
+ if (!(n->cse.acs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) {
trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode);
return NVME_INVALID_OPCODE | NVME_DNR;
}
@@ -7002,7 +7386,7 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
case NVME_ADM_CMD_DIRECTIVE_RECV:
return nvme_directive_receive(n, req);
default:
- assert(false);
+ g_assert_not_reached();
}
return NVME_INVALID_OPCODE | NVME_DNR;
@@ -7024,6 +7408,81 @@ static void nvme_update_sq_tail(NvmeSQueue *sq)
trace_pci_nvme_update_sq_tail(sq->sqid, sq->tail);
}
+#define NVME_ATOMIC_NO_START 0
+#define NVME_ATOMIC_START_ATOMIC 1
+#define NVME_ATOMIC_START_NONATOMIC 2
+
+static int nvme_atomic_write_check(NvmeCtrl *n, NvmeCmd *cmd,
+ NvmeAtomic *atomic)
+{
+ NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
+ uint64_t slba = le64_to_cpu(rw->slba);
+ uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb);
+ uint64_t elba = slba + nlb;
+ bool cmd_atomic_wr = true;
+ int i;
+
+ if ((cmd->opcode == NVME_CMD_READ) || ((cmd->opcode == NVME_CMD_WRITE) &&
+ ((rw->nlb + 1) > atomic->atomic_max_write_size))) {
+ cmd_atomic_wr = false;
+ }
+
+ /*
+ * Walk the queues to see if there are any atomic conflicts.
+ */
+ for (i = 1; i < n->params.max_ioqpairs + 1; i++) {
+ NvmeSQueue *sq;
+ NvmeRequest *req;
+ NvmeRwCmd *req_rw;
+ uint64_t req_slba;
+ uint32_t req_nlb;
+ uint64_t req_elba;
+
+ sq = n->sq[i];
+ if (!sq) {
+ continue;
+ }
+
+ /*
+ * Walk all the requests on a given queue.
+ */
+ QTAILQ_FOREACH(req, &sq->out_req_list, entry) {
+ req_rw = (NvmeRwCmd *)&req->cmd;
+
+ if (((req_rw->opcode == NVME_CMD_WRITE) ||
+ (req_rw->opcode == NVME_CMD_READ)) &&
+ (cmd->nsid == req->ns->params.nsid)) {
+ req_slba = le64_to_cpu(req_rw->slba);
+ req_nlb = (uint32_t)le16_to_cpu(req_rw->nlb);
+ req_elba = req_slba + req_nlb;
+
+ if (cmd_atomic_wr) {
+ if ((elba >= req_slba) && (slba <= req_elba)) {
+ return NVME_ATOMIC_NO_START;
+ }
+ } else {
+ if (req->atomic_write && ((elba >= req_slba) &&
+ (slba <= req_elba))) {
+ return NVME_ATOMIC_NO_START;
+ }
+ }
+ }
+ }
+ }
+ if (cmd_atomic_wr) {
+ return NVME_ATOMIC_START_ATOMIC;
+ }
+ return NVME_ATOMIC_START_NONATOMIC;
+}
+
+static NvmeAtomic *nvme_get_atomic(NvmeCtrl *n, NvmeCmd *cmd)
+{
+ if (n->atomic.atomic_writes) {
+ return &n->atomic;
+ }
+ return NULL;
+}
+
static void nvme_process_sq(void *opaque)
{
NvmeSQueue *sq = opaque;
@@ -7040,6 +7499,9 @@ static void nvme_process_sq(void *opaque)
}
while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
+ NvmeAtomic *atomic;
+ bool cmd_is_atomic;
+
addr = sq->dma_addr + (sq->head << NVME_SQES);
if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) {
trace_pci_nvme_err_addr_read(addr);
@@ -7047,6 +7509,26 @@ static void nvme_process_sq(void *opaque)
stl_le_p(&n->bar.csts, NVME_CSTS_FAILED);
break;
}
+
+ atomic = nvme_get_atomic(n, &cmd);
+
+ cmd_is_atomic = false;
+ if (sq->sqid && atomic) {
+ int ret;
+
+ ret = nvme_atomic_write_check(n, &cmd, atomic);
+ switch (ret) {
+ case NVME_ATOMIC_NO_START:
+ qemu_bh_schedule(sq->bh);
+ return;
+ case NVME_ATOMIC_START_ATOMIC:
+ cmd_is_atomic = true;
+ break;
+ case NVME_ATOMIC_START_NONATOMIC:
+ default:
+ break;
+ }
+ }
nvme_inc_sq_head(sq);
req = QTAILQ_FIRST(&sq->req_list);
@@ -7056,6 +7538,10 @@ static void nvme_process_sq(void *opaque)
req->cqe.cid = cmd.cid;
memcpy(&req->cmd, &cmd, sizeof(NvmeCmd));
+ if (sq->sqid && atomic) {
+ req->atomic_write = cmd_is_atomic;
+ }
+
status = sq->sqid ? nvme_io_cmd(n, req) :
nvme_admin_cmd(n, req);
if (status != NVME_NO_COMPLETE) {
@@ -7159,6 +7645,8 @@ static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst)
n->outstanding_aers = 0;
n->qs_created = false;
+ n->dn = n->params.atomic_dn; /* Set Disable Normal */
+
nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize);
if (pci_is_vf(pci_dev)) {
@@ -7197,21 +7685,6 @@ static void nvme_ctrl_shutdown(NvmeCtrl *n)
}
}
-static void nvme_select_iocs(NvmeCtrl *n)
-{
- NvmeNamespace *ns;
- int i;
-
- for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
- ns = nvme_ns(n, i);
- if (!ns) {
- continue;
- }
-
- nvme_select_iocs_ns(n, ns);
- }
-}
-
static int nvme_start_ctrl(NvmeCtrl *n)
{
uint64_t cap = ldq_le_p(&n->bar.cap);
@@ -7278,7 +7751,22 @@ static int nvme_start_ctrl(NvmeCtrl *n)
nvme_set_timestamp(n, 0ULL);
- nvme_select_iocs(n);
+ /* verify that the command sets of attached namespaces are supported */
+ for (int i = 1; i <= NVME_MAX_NAMESPACES; i++) {
+ NvmeNamespace *ns = nvme_subsys_ns(n->subsys, i);
+
+ if (!ns || (!ns->params.shared && ns->ctrl != n)) {
+ continue;
+ }
+
+ if (nvme_csi_supported(n, ns->csi) && !ns->params.detached) {
+ if (!ns->attached || ns->params.shared) {
+ nvme_attach_ns(n, ns);
+ }
+ }
+ }
+
+ nvme_update_dsm_limits(n, NULL);
return 0;
}
@@ -7603,7 +8091,6 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
/* Completion queue doorbell write */
uint16_t new_head = val & 0xffff;
- int start_sqs;
NvmeCQueue *cq;
qid = (addr - (0x1000 + (1 << 2))) >> 3;
@@ -7654,18 +8141,15 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
trace_pci_nvme_mmio_doorbell_cq(cq->cqid, new_head);
- start_sqs = nvme_cq_full(cq) ? 1 : 0;
+ /* scheduled deferred cqe posting if queue was previously full */
+ if (nvme_cq_full(cq)) {
+ qemu_bh_schedule(cq->bh);
+ }
+
cq->head = new_head;
if (!qid && n->dbbuf_enabled) {
stl_le_pci_dma(pci, cq->db_addr, cq->head, MEMTXATTRS_UNSPECIFIED);
}
- if (start_sqs) {
- NvmeSQueue *sq;
- QTAILQ_FOREACH(sq, &cq->sq_list, entry) {
- qemu_bh_schedule(sq->bh);
- }
- qemu_bh_schedule(cq->bh);
- }
if (cq->tail == cq->head) {
if (cq->irq_enabled) {
@@ -7935,6 +8419,8 @@ static void nvme_init_state(NvmeCtrl *n)
NvmeSecCtrlEntry *list = n->sec_ctrl_list;
NvmeSecCtrlEntry *sctrl;
PCIDevice *pci = PCI_DEVICE(n);
+ NvmeAtomic *atomic = &n->atomic;
+ NvmeIdCtrl *id = &n->id_ctrl;
uint8_t max_vfs;
int i;
@@ -7992,6 +8478,29 @@ static void nvme_init_state(NvmeCtrl *n)
cpu_to_le16(n->params.sriov_max_vi_per_vf) :
cap->vifrt / MAX(max_vfs, 1);
}
+
+ /* Atomic Write */
+ id->awun = cpu_to_le16(n->params.atomic_awun);
+ id->awupf = cpu_to_le16(n->params.atomic_awupf);
+ n->dn = n->params.atomic_dn;
+
+ if (id->awun || id->awupf) {
+ if (id->awupf > id->awun) {
+ id->awupf = 0;
+ }
+
+ if (n->dn) {
+ atomic->atomic_max_write_size = id->awupf + 1;
+ } else {
+ atomic->atomic_max_write_size = id->awun + 1;
+ }
+
+ if (atomic->atomic_max_write_size == 1) {
+ atomic->atomic_writes = 0;
+ } else {
+ atomic->atomic_writes = 1;
+ }
+ }
}
static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev)
@@ -8080,8 +8589,7 @@ static bool nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset,
if (!pcie_sriov_pf_init(pci_dev, offset, "nvme", vf_dev_id,
n->params.sriov_max_vfs, n->params.sriov_max_vfs,
- NVME_VF_OFFSET, NVME_VF_STRIDE,
- errp)) {
+ NVME_VF_OFFSET, NVME_VF_STRIDE, errp)) {
return false;
}
@@ -8096,8 +8604,7 @@ static int nvme_add_pm_capability(PCIDevice *pci_dev, uint8_t offset)
Error *err = NULL;
int ret;
- ret = pci_add_capability(pci_dev, PCI_CAP_ID_PM, offset,
- PCI_PM_SIZEOF, &err);
+ ret = pci_pm_init(pci_dev, offset, &err);
if (err) {
error_report_err(err);
return ret;
@@ -8113,6 +8620,27 @@ static int nvme_add_pm_capability(PCIDevice *pci_dev, uint8_t offset)
return 0;
}
+static bool pcie_doe_spdm_rsp(DOECap *doe_cap)
+{
+ void *req = pcie_doe_get_write_mbox_ptr(doe_cap);
+ uint32_t req_len = pcie_doe_get_obj_len(req) * 4;
+ void *rsp = doe_cap->read_mbox;
+ uint32_t rsp_len = SPDM_SOCKET_MAX_MESSAGE_BUFFER_SIZE;
+
+ uint32_t recvd = spdm_socket_rsp(doe_cap->spdm_socket,
+ SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE,
+ req, req_len, rsp, rsp_len);
+ doe_cap->read_mbox_len += DIV_ROUND_UP(recvd, 4);
+
+ return recvd != 0;
+}
+
+static DOEProtocol doe_spdm_prot[] = {
+ { PCI_VENDOR_ID_PCI_SIG, PCI_SIG_DOE_CMA, pcie_doe_spdm_rsp },
+ { PCI_VENDOR_ID_PCI_SIG, PCI_SIG_DOE_SECURED_CMA, pcie_doe_spdm_rsp },
+ { }
+};
+
static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
{
ERRP_GUARD();
@@ -8122,7 +8650,7 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
unsigned nr_vectors;
int ret;
- pci_conf[PCI_INTERRUPT_PIN] = 1;
+ pci_conf[PCI_INTERRUPT_PIN] = pci_is_vf(pci_dev) ? 0 : 1;
pci_config_set_prog_interface(pci_conf, 0x2);
if (n->params.use_intel_id) {
@@ -8194,12 +8722,30 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs &&
!nvme_init_sriov(n, pci_dev, 0x120, errp)) {
- msix_uninit(pci_dev, &n->bar0, &n->bar0);
return false;
}
nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize);
+ pcie_cap_deverr_init(pci_dev);
+
+ /* DOE Initialisation */
+ if (pci_dev->spdm_port) {
+ uint16_t doe_offset = n->params.sriov_max_vfs ?
+ PCI_CONFIG_SPACE_SIZE + PCI_ARI_SIZEOF
+ : PCI_CONFIG_SPACE_SIZE;
+
+ pcie_doe_init(pci_dev, &pci_dev->doe_spdm, doe_offset,
+ doe_spdm_prot, true, 0);
+
+ pci_dev->doe_spdm.spdm_socket = spdm_socket_connect(pci_dev->spdm_port,
+ errp);
+
+ if (pci_dev->doe_spdm.spdm_socket < 0) {
+ return false;
+ }
+ }
+
if (n->params.cmb_size_mb) {
nvme_init_cmb(n, pci_dev);
}
@@ -8231,6 +8777,12 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
uint64_t cap = ldq_le_p(&n->bar.cap);
NvmeSecCtrlEntry *sctrl = nvme_sctrl(n);
uint32_t ctratt;
+ uint16_t oacs;
+
+ memcpy(n->cse.acs, nvme_cse_acs_default, sizeof(n->cse.acs));
+ memcpy(n->cse.iocs.nvm, nvme_cse_iocs_nvm_default, sizeof(n->cse.iocs.nvm));
+ memcpy(n->cse.iocs.zoned, nvme_cse_iocs_zoned_default,
+ sizeof(n->cse.iocs.zoned));
id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
@@ -8241,7 +8793,11 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
id->cntlid = cpu_to_le16(n->cntlid);
id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR);
+
ctratt = NVME_CTRATT_ELBAS;
+ if (n->params.ctratt.mem) {
+ ctratt |= NVME_CTRATT_MEM;
+ }
id->rab = 6;
@@ -8257,9 +8813,23 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
id->mdts = n->params.mdts;
id->ver = cpu_to_le32(NVME_SPEC_VER);
- id->oacs =
- cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT | NVME_OACS_DBBUF |
- NVME_OACS_DIRECTIVES);
+
+ oacs = NVME_OACS_NMS | NVME_OACS_FORMAT | NVME_OACS_DIRECTIVES;
+
+ if (n->params.dbcs) {
+ oacs |= NVME_OACS_DBCS;
+
+ n->cse.acs[NVME_ADM_CMD_DBBUF_CONFIG] = NVME_CMD_EFF_CSUPP;
+ }
+
+ if (n->params.sriov_max_vfs) {
+ oacs |= NVME_OACS_VMS;
+
+ n->cse.acs[NVME_ADM_CMD_VIRT_MNGMT] = NVME_CMD_EFF_CSUPP;
+ }
+
+ id->oacs = cpu_to_le16(oacs);
+
id->cntrltype = 0x1;
/*
@@ -8287,7 +8857,8 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
id->nn = cpu_to_le32(NVME_MAX_NAMESPACES);
id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP |
NVME_ONCS_FEATURES | NVME_ONCS_DSM |
- NVME_ONCS_COMPARE | NVME_ONCS_COPY);
+ NVME_ONCS_COMPARE | NVME_ONCS_COPY |
+ NVME_ONCS_NVMCSA | NVME_ONCS_NVMAFC);
/*
* NOTE: If this device ever supports a command set that does NOT use 0x0
@@ -8298,8 +8869,10 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
*/
id->vwc = NVME_VWC_NSID_BROADCAST_SUPPORT | NVME_VWC_PRESENT;
- id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0 | NVME_OCFS_COPY_FORMAT_1);
- id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN);
+ id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0 | NVME_OCFS_COPY_FORMAT_1 |
+ NVME_OCFS_COPY_FORMAT_2 | NVME_OCFS_COPY_FORMAT_3);
+ id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN |
+ NVME_CTRL_SGLS_MPTR_SGL);
nvme_init_subnqn(n);
@@ -8307,15 +8880,13 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
id->psd[0].enlat = cpu_to_le32(0x10);
id->psd[0].exlat = cpu_to_le32(0x4);
- if (n->subsys) {
- id->cmic |= NVME_CMIC_MULTI_CTRL;
- ctratt |= NVME_CTRATT_ENDGRPS;
+ id->cmic |= NVME_CMIC_MULTI_CTRL;
+ ctratt |= NVME_CTRATT_ENDGRPS;
- id->endgidmax = cpu_to_le16(0x1);
+ id->endgidmax = cpu_to_le16(0x1);
- if (n->subsys->endgrp.fdp.enabled) {
- ctratt |= NVME_CTRATT_FDPS;
- }
+ if (n->subsys->endgrp.fdp.enabled) {
+ ctratt |= NVME_CTRATT_FDPS;
}
id->ctratt = cpu_to_le32(ctratt);
@@ -8323,9 +8894,8 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
NVME_CAP_SET_MQES(cap, n->params.mqes);
NVME_CAP_SET_CQR(cap, 1);
NVME_CAP_SET_TO(cap, 0xf);
- NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_NVM);
- NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_CSI_SUPP);
- NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_ADMIN_ONLY);
+ NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_NCSS);
+ NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_IOCSS);
NVME_CAP_SET_MPSMAX(cap, 4);
NVME_CAP_SET_CMBS(cap, n->params.cmb_size_mb ? 1 : 0);
NVME_CAP_SET_PMRS(cap, n->pmr.dev ? 1 : 0);
@@ -8344,7 +8914,15 @@ static int nvme_init_subsys(NvmeCtrl *n, Error **errp)
int cntlid;
if (!n->subsys) {
- return 0;
+ DeviceState *dev = qdev_new(TYPE_NVME_SUBSYS);
+
+ qdev_prop_set_string(dev, "nqn", n->params.serial);
+
+ if (!qdev_realize(dev, NULL, errp)) {
+ return -1;
+ }
+
+ n->subsys = NVME_SUBSYS(dev);
}
cntlid = nvme_subsys_register_ctrl(n, errp);
@@ -8364,9 +8942,6 @@ void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns)
n->namespaces[nsid] = ns;
ns->attached++;
-
- n->dmrsl = MIN_NON_ZERO(n->dmrsl,
- BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1));
}
static void nvme_realize(PCIDevice *pci_dev, Error **errp)
@@ -8389,6 +8964,13 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
*/
n->params.serial = g_strdup(pn->params.serial);
n->subsys = pn->subsys;
+
+ /*
+ * Assigning this link (strong link) causes an `object_unref` later in
+ * `object_release_link_property`. Increment the refcount to balance
+ * this out.
+ */
+ object_ref(OBJECT(pn->subsys));
}
if (!nvme_check_params(n, errp)) {
@@ -8410,12 +8992,13 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
if (n->namespace.blkconf.blk) {
ns = &n->namespace;
ns->params.nsid = 1;
+ ns->ctrl = n;
if (nvme_ns_setup(ns, errp)) {
return;
}
- nvme_attach_ns(n, ns);
+ n->subsys->namespaces[ns->params.nsid] = ns;
}
}
@@ -8427,17 +9010,15 @@ static void nvme_exit(PCIDevice *pci_dev)
nvme_ctrl_reset(n, NVME_RESET_FUNCTION);
- if (n->subsys) {
- for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
- ns = nvme_ns(n, i);
- if (ns) {
- ns->attached--;
- }
+ for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
+ ns = nvme_ns(n, i);
+ if (ns) {
+ ns->attached--;
}
-
- nvme_subsys_unregister_ctrl(n->subsys, n);
}
+ nvme_subsys_unregister_ctrl(n->subsys, n);
+
g_free(n->cq);
g_free(n->sq);
g_free(n->aer_reqs);
@@ -8446,6 +9027,11 @@ static void nvme_exit(PCIDevice *pci_dev)
g_free(n->cmb.buf);
}
+ if (pci_dev->doe_spdm.spdm_socket > 0) {
+ spdm_socket_close(pci_dev->doe_spdm.spdm_socket,
+ SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE);
+ }
+
if (n->pmr.dev) {
host_memory_backend_set_mapped(n->pmr.dev, false);
}
@@ -8454,11 +9040,16 @@ static void nvme_exit(PCIDevice *pci_dev)
pcie_sriov_pf_exit(pci_dev);
}
- msix_uninit(pci_dev, &n->bar0, &n->bar0);
+ if (n->params.msix_exclusive_bar && !pci_is_vf(pci_dev)) {
+ msix_uninit_exclusive_bar(pci_dev);
+ } else {
+ msix_uninit(pci_dev, &n->bar0, &n->bar0);
+ }
+
memory_region_del_subregion(&n->bar0, &n->iomem);
}
-static Property nvme_props[] = {
+static const Property nvme_props[] = {
DEFINE_BLOCK_PROPERTIES(NvmeCtrl, namespace.blkconf),
DEFINE_PROP_LINK("pmrdev", NvmeCtrl, pmr.dev, TYPE_MEMORY_BACKEND,
HostMemoryBackend *),
@@ -8476,6 +9067,7 @@ static Property nvme_props[] = {
DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false),
DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl, params.legacy_cmb, false),
DEFINE_PROP_BOOL("ioeventfd", NvmeCtrl, params.ioeventfd, false),
+ DEFINE_PROP_BOOL("dbcs", NvmeCtrl, params.dbcs, true),
DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl, params.zasl, 0),
DEFINE_PROP_BOOL("zoned.auto_transition", NvmeCtrl,
params.auto_transition_zones, true),
@@ -8491,7 +9083,12 @@ static Property nvme_props[] = {
DEFINE_PROP_BOOL("msix-exclusive-bar", NvmeCtrl, params.msix_exclusive_bar,
false),
DEFINE_PROP_UINT16("mqes", NvmeCtrl, params.mqes, 0x7ff),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_UINT16("spdm_port", PCIDevice, spdm_port, 0),
+ DEFINE_PROP_BOOL("ctratt.mem", NvmeCtrl, params.ctratt.mem, false),
+ DEFINE_PROP_BOOL("atomic.dn", NvmeCtrl, params.atomic_dn, 0),
+ DEFINE_PROP_UINT16("atomic.awun", NvmeCtrl, params.atomic_awun, 0),
+ DEFINE_PROP_UINT16("atomic.awupf", NvmeCtrl, params.atomic_awupf, 0),
+ DEFINE_PROP_BOOL("ocp", NvmeCtrl, params.ocp, false),
};
static void nvme_get_smart_warning(Object *obj, Visitor *v, const char *name,
@@ -8562,23 +9159,38 @@ static void nvme_pci_write_config(PCIDevice *dev, uint32_t address,
{
uint16_t old_num_vfs = pcie_sriov_num_vfs(dev);
+ if (pcie_find_capability(dev, PCI_EXT_CAP_ID_DOE)) {
+ pcie_doe_write_config(&dev->doe_spdm, address, val, len);
+ }
pci_default_write_config(dev, address, val, len);
pcie_cap_flr_write_config(dev, address, val, len);
nvme_sriov_post_write_config(dev, old_num_vfs);
}
+static uint32_t nvme_pci_read_config(PCIDevice *dev, uint32_t address, int len)
+{
+ uint32_t val;
+ if (dev->spdm_port && pcie_find_capability(dev, PCI_EXT_CAP_ID_DOE)) {
+ if (pcie_doe_read_config(&dev->doe_spdm, address, len, &val)) {
+ return val;
+ }
+ }
+ return pci_default_read_config(dev, address, len);
+}
+
static const VMStateDescription nvme_vmstate = {
.name = "nvme",
.unmigratable = 1,
};
-static void nvme_class_init(ObjectClass *oc, void *data)
+static void nvme_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
pc->realize = nvme_realize;
pc->config_write = nvme_pci_write_config;
+ pc->config_read = nvme_pci_read_config;
pc->exit = nvme_exit;
pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
pc->revision = 2;
@@ -8587,7 +9199,7 @@ static void nvme_class_init(ObjectClass *oc, void *data)
dc->desc = "Non-Volatile Memory Express";
device_class_set_props(dc, nvme_props);
dc->vmsd = &nvme_vmstate;
- dc->reset = nvme_pci_reset;
+ device_class_set_legacy_reset(dc, nvme_pci_reset);
}
static void nvme_instance_init(Object *obj)
@@ -8609,7 +9221,7 @@ static const TypeInfo nvme_info = {
.instance_size = sizeof(NvmeCtrl),
.instance_init = nvme_instance_init,
.class_init = nvme_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ }
},
diff --git a/hw/nvme/dif.c b/hw/nvme/dif.c
index 01b19c3..4e7874f 100644
--- a/hw/nvme/dif.c
+++ b/hw/nvme/dif.c
@@ -10,7 +10,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "nvme.h"
#include "dif.h"
@@ -575,11 +575,6 @@ uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req)
uint8_t *mbuf, *end;
int16_t pil = ns->lbaf.ms - nvme_pi_tuple_size(ns);
- status = nvme_check_prinfo(ns, prinfo, slba, reftag);
- if (status) {
- goto err;
- }
-
flags = 0;
ctx->mdata.bounce = g_malloc0(mlen);
diff --git a/hw/nvme/nguid.c b/hw/nvme/nguid.c
index 829832b..4cd6fad 100644
--- a/hw/nvme/nguid.c
+++ b/hw/nvme/nguid.c
@@ -149,7 +149,7 @@ static void nvme_nguid_stringify(const NvmeNGUID *nguid, char *out)
static void get_nguid(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
NvmeNGUID *nguid = object_field_prop_ptr(obj, prop);
char buffer[NGUID_STR_LEN];
char *p = buffer;
@@ -162,7 +162,7 @@ static void get_nguid(Object *obj, Visitor *v, const char *name, void *opaque,
static void set_nguid(Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
NvmeNGUID *nguid = object_field_prop_ptr(obj, prop);
char *str;
@@ -179,7 +179,7 @@ static void set_nguid(Object *obj, Visitor *v, const char *name, void *opaque,
}
const PropertyInfo qdev_prop_nguid = {
- .name = "str",
+ .type = "str",
.description =
"NGUID or \"" NGUID_VALUE_AUTO "\" for random value",
.get = get_nguid,
diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c
index ea8db17..6df2e8e 100644
--- a/hw/nvme/ns.c
+++ b/hw/nvme/ns.c
@@ -18,8 +18,8 @@
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qemu/bitops.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/block-backend.h"
+#include "system/system.h"
+#include "system/block-backend.h"
#include "nvme.h"
#include "trace.h"
@@ -30,6 +30,7 @@
void nvme_ns_init_format(NvmeNamespace *ns)
{
NvmeIdNs *id_ns = &ns->id_ns;
+ NvmeIdNsNvm *id_ns_nvm = &ns->id_ns_nvm;
BlockDriverInfo bdi;
int npdg, ret;
int64_t nlbas;
@@ -55,6 +56,8 @@ void nvme_ns_init_format(NvmeNamespace *ns)
}
id_ns->npda = id_ns->npdg = npdg - 1;
+ id_ns_nvm->npdal = npdg;
+ id_ns_nvm->npdgl = npdg;
}
static int nvme_ns_init(NvmeNamespace *ns, Error **errp)
@@ -62,6 +65,7 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp)
static uint64_t ns_count;
NvmeIdNs *id_ns = &ns->id_ns;
NvmeIdNsNvm *id_ns_nvm = &ns->id_ns_nvm;
+ NvmeIdNsInd *id_ns_ind = &ns->id_ns_ind;
uint8_t ds;
uint16_t ms;
int i;
@@ -72,10 +76,12 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp)
ns->id_ns.dlfeat = 0x1;
/* support DULBE and I/O optimization fields */
- id_ns->nsfeat |= (0x4 | 0x10);
+ id_ns->nsfeat |= (NVME_ID_NS_NSFEAT_DAE | NVME_ID_NS_NSFEAT_OPTPERF_ALL);
if (ns->params.shared) {
- id_ns->nmic |= NVME_NMIC_NS_SHARED;
+ id_ns->nmic |= NVME_ID_NS_IND_NMIC_SHRNS;
+ id_ns_ind->nmic = NVME_ID_NS_IND_NMIC_SHRNS;
+ id_ns_ind->nstat = NVME_ID_NS_IND_NSTAT_NRDY;
}
/* Substitute a missing EUI-64 by an autogenerated one */
@@ -721,25 +727,14 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)
uint32_t nsid = ns->params.nsid;
int i;
- if (!n->subsys) {
- /* If no subsys, the ns cannot be attached to more than one ctrl. */
- ns->params.shared = false;
- if (ns->params.detached) {
- error_setg(errp, "detached requires that the nvme device is "
- "linked to an nvme-subsys device");
- return;
- }
- } else {
- /*
- * If this namespace belongs to a subsystem (through a link on the
- * controller device), reparent the device.
- */
- if (!qdev_set_parent_bus(dev, &subsys->bus.parent_bus, errp)) {
- return;
- }
- ns->subsys = subsys;
- ns->endgrp = &subsys->endgrp;
+ assert(subsys);
+
+ /* reparent to subsystem bus */
+ if (!qdev_set_parent_bus(dev, &subsys->bus.parent_bus, errp)) {
+ return;
}
+ ns->subsys = subsys;
+ ns->endgrp = &subsys->endgrp;
if (nvme_ns_setup(ns, errp)) {
return;
@@ -747,7 +742,7 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)
if (!nsid) {
for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
- if (nvme_ns(n, i) || nvme_subsys_ns(subsys, i)) {
+ if (nvme_subsys_ns(subsys, i)) {
continue;
}
@@ -759,40 +754,22 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)
error_setg(errp, "no free namespace id");
return;
}
- } else {
- if (nvme_ns(n, nsid) || nvme_subsys_ns(subsys, nsid)) {
- error_setg(errp, "namespace id '%d' already allocated", nsid);
- return;
- }
+ } else if (nvme_subsys_ns(subsys, nsid)) {
+ error_setg(errp, "namespace id '%d' already allocated", nsid);
+ return;
}
- if (subsys) {
- subsys->namespaces[nsid] = ns;
+ subsys->namespaces[nsid] = ns;
- ns->id_ns.endgid = cpu_to_le16(0x1);
-
- if (ns->params.detached) {
- return;
- }
-
- if (ns->params.shared) {
- for (i = 0; i < ARRAY_SIZE(subsys->ctrls); i++) {
- NvmeCtrl *ctrl = subsys->ctrls[i];
-
- if (ctrl && ctrl != SUBSYS_SLOT_RSVD) {
- nvme_attach_ns(ctrl, ns);
- }
- }
-
- return;
- }
+ ns->id_ns.endgid = cpu_to_le16(0x1);
+ ns->id_ns_ind.endgrpid = cpu_to_le16(0x1);
+ if (!ns->params.shared) {
+ ns->ctrl = n;
}
-
- nvme_attach_ns(n, ns);
}
-static Property nvme_ns_props[] = {
+static const Property nvme_ns_props[] = {
DEFINE_BLOCK_PROPERTIES(NvmeNamespace, blkconf),
DEFINE_PROP_BOOL("detached", NvmeNamespace, params.detached, false),
DEFINE_PROP_BOOL("shared", NvmeNamespace, params.shared, true),
@@ -827,10 +804,9 @@ static Property nvme_ns_props[] = {
DEFINE_PROP_BOOL("eui64-default", NvmeNamespace, params.eui64_default,
false),
DEFINE_PROP_STRING("fdp.ruhs", NvmeNamespace, params.fdp.ruhs),
- DEFINE_PROP_END_OF_LIST(),
};
-static void nvme_ns_class_init(ObjectClass *oc, void *data)
+static void nvme_ns_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h
index 7819857..b5c9378 100644
--- a/hw/nvme/nvme.h
+++ b/hw/nvme/nvme.h
@@ -220,6 +220,11 @@ typedef struct NvmeNamespaceParams {
} fdp;
} NvmeNamespaceParams;
+typedef struct NvmeAtomic {
+ uint32_t atomic_max_write_size;
+ bool atomic_writes;
+} NvmeAtomic;
+
typedef struct NvmeNamespace {
DeviceState parent_obj;
BlockConf blkconf;
@@ -228,10 +233,10 @@ typedef struct NvmeNamespace {
int64_t moff;
NvmeIdNs id_ns;
NvmeIdNsNvm id_ns_nvm;
+ NvmeIdNsInd id_ns_ind;
NvmeLBAF lbaf;
unsigned int nlbaf;
size_t lbasz;
- const uint32_t *iocs;
uint8_t csi;
uint16_t status;
int attached;
@@ -263,6 +268,9 @@ typedef struct NvmeNamespace {
NvmeSubsystem *subsys;
NvmeEnduranceGroup *endgrp;
+ /* NULL for shared namespaces; set to specific controller if private */
+ NvmeCtrl *ctrl;
+
struct {
uint32_t err_rec;
} features;
@@ -421,6 +429,7 @@ typedef struct NvmeRequest {
NvmeCmd cmd;
BlockAcctCookie acct;
NvmeSg sg;
+ bool atomic_write;
QTAILQ_ENTRY(NvmeRequest)entry;
} NvmeRequest;
@@ -532,12 +541,22 @@ typedef struct NvmeParams {
bool auto_transition_zones;
bool legacy_cmb;
bool ioeventfd;
+ bool dbcs;
uint16_t sriov_max_vfs;
uint16_t sriov_vq_flexible;
uint16_t sriov_vi_flexible;
uint32_t sriov_max_vq_per_vf;
uint32_t sriov_max_vi_per_vf;
bool msix_exclusive_bar;
+ bool ocp;
+
+ struct {
+ bool mem;
+ } ctratt;
+
+ uint16_t atomic_awun;
+ uint16_t atomic_awupf;
+ bool atomic_dn;
} NvmeParams;
typedef struct NvmeCtrl {
@@ -569,6 +588,14 @@ typedef struct NvmeCtrl {
bool dbbuf_enabled;
struct {
+ uint32_t acs[256];
+ struct {
+ uint32_t nvm[256];
+ uint32_t zoned[256];
+ } iocs;
+ } cse;
+
+ struct {
MemoryRegion mem;
uint8_t *buf;
bool cmse;
@@ -619,6 +646,8 @@ typedef struct NvmeCtrl {
uint16_t vqrfap;
uint16_t virfap;
} next_pri_ctrl_cap; /* These override pri_ctrl_cap after reset */
+ uint32_t dn; /* Disable Normal */
+ NvmeAtomic atomic;
} NvmeCtrl;
typedef enum NvmeResetType {
diff --git a/hw/nvme/subsys.c b/hw/nvme/subsys.c
index 77deaf2..777e1c6 100644
--- a/hw/nvme/subsys.c
+++ b/hw/nvme/subsys.c
@@ -56,7 +56,7 @@ int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp)
{
NvmeSubsystem *subsys = n->subsys;
NvmeSecCtrlEntry *sctrl = nvme_sctrl(n);
- int cntlid, nsid, num_rsvd, num_vfs = n->params.sriov_max_vfs;
+ int cntlid, num_rsvd, num_vfs = n->params.sriov_max_vfs;
if (pci_is_vf(&n->parent_obj)) {
cntlid = le16_to_cpu(sctrl->scid);
@@ -92,13 +92,6 @@ int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp)
subsys->ctrls[cntlid] = n;
- for (nsid = 1; nsid < ARRAY_SIZE(subsys->namespaces); nsid++) {
- NvmeNamespace *ns = subsys->namespaces[nsid];
- if (ns && ns->params.shared && !ns->params.detached) {
- nvme_attach_ns(n, ns);
- }
- }
-
return cntlid;
}
@@ -216,17 +209,16 @@ static void nvme_subsys_realize(DeviceState *dev, Error **errp)
nvme_subsys_setup(subsys, errp);
}
-static Property nvme_subsystem_props[] = {
+static const Property nvme_subsystem_props[] = {
DEFINE_PROP_STRING("nqn", NvmeSubsystem, params.nqn),
DEFINE_PROP_BOOL("fdp", NvmeSubsystem, params.fdp.enabled, false),
DEFINE_PROP_SIZE("fdp.runs", NvmeSubsystem, params.fdp.runs,
NVME_DEFAULT_RU_SIZE),
DEFINE_PROP_UINT32("fdp.nrg", NvmeSubsystem, params.fdp.nrg, 1),
DEFINE_PROP_UINT16("fdp.nruh", NvmeSubsystem, params.fdp.nruh, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void nvme_subsys_class_init(ObjectClass *oc, void *data)
+static void nvme_subsys_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -234,7 +226,6 @@ static void nvme_subsys_class_init(ObjectClass *oc, void *data)
dc->realize = nvme_subsys_realize;
dc->desc = "Virtual NVMe subsystem";
- dc->hotpluggable = false;
device_class_set_props(dc, nvme_subsystem_props);
}
diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events
index 3a67680..6be0bfa 100644
--- a/hw/nvme/trace-events
+++ b/hw/nvme/trace-events
@@ -56,6 +56,7 @@ pci_nvme_identify(uint16_t cid, uint8_t cns, uint16_t ctrlid, uint8_t csi) "cid
pci_nvme_identify_ctrl(void) "identify controller"
pci_nvme_identify_ctrl_csi(uint8_t csi) "identify controller, csi=0x%"PRIx8""
pci_nvme_identify_ns(uint32_t ns) "nsid %"PRIu32""
+pci_nvme_identify_ns_ind(uint32_t nsid) "nsid %"PRIu32""
pci_nvme_identify_ctrl_list(uint8_t cns, uint16_t cntid) "cns 0x%"PRIx8" cntid %"PRIu16""
pci_nvme_identify_pri_ctrl_cap(uint16_t cntlid) "identify primary controller capabilities cntlid=%"PRIu16""
pci_nvme_identify_sec_ctrl_list(uint16_t cntlid, uint8_t numcntl) "identify secondary controller list cntlid=%"PRIu16" numcntl=%"PRIu8""
diff --git a/hw/nvram/bcm2835_otp.c b/hw/nvram/bcm2835_otp.c
index c4aed28..6816b53 100644
--- a/hw/nvram/bcm2835_otp.c
+++ b/hw/nvram/bcm2835_otp.c
@@ -164,7 +164,7 @@ static const VMStateDescription vmstate_bcm2835_otp = {
}
};
-static void bcm2835_otp_class_init(ObjectClass *klass, void *data)
+static void bcm2835_otp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/nvram/chrp_nvram.c b/hw/nvram/chrp_nvram.c
index d4d10a7..0b204e3 100644
--- a/hw/nvram/chrp_nvram.c
+++ b/hw/nvram/chrp_nvram.c
@@ -23,7 +23,7 @@
#include "qemu/cutils.h"
#include "qemu/error-report.h"
#include "hw/nvram/chrp_nvram.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
static int chrp_nvram_set_var(uint8_t *nvram, int addr, const char *str,
int max_len)
diff --git a/hw/nvram/ds1225y.c b/hw/nvram/ds1225y.c
index 6d510dc..dbfd0d2 100644
--- a/hw/nvram/ds1225y.c
+++ b/hw/nvram/ds1225y.c
@@ -142,13 +142,12 @@ static void nvram_sysbus_realize(DeviceState *dev, Error **errp)
nvram_post_load(s, 0);
}
-static Property nvram_sysbus_properties[] = {
+static const Property nvram_sysbus_properties[] = {
DEFINE_PROP_UINT32("size", SysBusNvRamState, nvram.chip_size, 0x2000),
DEFINE_PROP_STRING("filename", SysBusNvRamState, nvram.filename),
- DEFINE_PROP_END_OF_LIST(),
};
-static void nvram_sysbus_class_init(ObjectClass *klass, void *data)
+static void nvram_sysbus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/nvram/eeprom_at24c.c b/hw/nvram/eeprom_at24c.c
index 3272068..82ea97e 100644
--- a/hw/nvram/eeprom_at24c.c
+++ b/hw/nvram/eeprom_at24c.c
@@ -10,12 +10,13 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "qemu/module.h"
#include "hw/i2c/i2c.h"
#include "hw/nvram/eeprom_at24c.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qom/object.h"
/* #define DEBUG_AT24C */
@@ -26,13 +27,8 @@
#define DPRINTK(FMT, ...) do {} while (0)
#endif
-#define ERR(FMT, ...) fprintf(stderr, TYPE_AT24C_EE " : " FMT, \
- ## __VA_ARGS__)
-
#define TYPE_AT24C_EE "at24c-eeprom"
-typedef struct EEPROMState EEPROMState;
-DECLARE_INSTANCE_CHECKER(EEPROMState, AT24C_EE,
- TYPE_AT24C_EE)
+OBJECT_DECLARE_SIMPLE_TYPE(EEPROMState, AT24C_EE)
struct EEPROMState {
I2CSlave parent_obj;
@@ -77,8 +73,7 @@ int at24c_eeprom_event(I2CSlave *s, enum i2c_event event)
if (ee->blk && ee->changed) {
int ret = blk_pwrite(ee->blk, 0, ee->rsize, ee->mem, 0);
if (ret < 0) {
- ERR(TYPE_AT24C_EE
- " : failed to write backing file\n");
+ error_report("%s: failed to write backing file", __func__);
}
DPRINTK("Wrote to backing file\n");
}
@@ -195,20 +190,18 @@ static void at24c_eeprom_realize(DeviceState *dev, Error **errp)
}
ee->mem = g_malloc0(ee->rsize);
- memset(ee->mem, 0, ee->rsize);
-
- if (ee->init_rom) {
- memcpy(ee->mem, ee->init_rom, MIN(ee->init_rom_size, ee->rsize));
- }
if (ee->blk) {
int ret = blk_pread(ee->blk, 0, ee->rsize, ee->mem, 0);
if (ret < 0) {
- ERR(TYPE_AT24C_EE
- " : Failed initial sync with backing file\n");
+ error_setg(errp, "%s: Failed initial sync with backing file",
+ TYPE_AT24C_EE);
+ return;
}
DPRINTK("Reset read backing file\n");
+ } else if (ee->init_rom) {
+ memcpy(ee->mem, ee->init_rom, MIN(ee->init_rom_size, ee->rsize));
}
/*
@@ -234,16 +227,15 @@ void at24c_eeprom_reset(DeviceState *state)
ee->haveaddr = 0;
}
-static Property at24c_eeprom_props[] = {
+static const Property at24c_eeprom_props[] = {
DEFINE_PROP_UINT32("rom-size", EEPROMState, rsize, 0),
DEFINE_PROP_UINT8("address-size", EEPROMState, asize, 0),
DEFINE_PROP_BOOL("writable", EEPROMState, writable, true),
DEFINE_PROP_DRIVE("drive", EEPROMState, blk),
- DEFINE_PROP_END_OF_LIST()
};
static
-void at24c_eeprom_class_init(ObjectClass *klass, void *data)
+void at24c_eeprom_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
@@ -254,7 +246,7 @@ void at24c_eeprom_class_init(ObjectClass *klass, void *data)
k->send = &at24c_eeprom_send;
device_class_set_props(dc, at24c_eeprom_props);
- dc->reset = at24c_eeprom_reset;
+ device_class_set_legacy_reset(dc, at24c_eeprom_reset);
}
static
diff --git a/hw/nvram/fw_cfg-acpi.c b/hw/nvram/fw_cfg-acpi.c
index 58cdcd3..2e6ef89 100644
--- a/hw/nvram/fw_cfg-acpi.c
+++ b/hw/nvram/fw_cfg-acpi.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Add fw_cfg device in DSDT
*
diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c
index fc0263f..aa24050 100644
--- a/hw/nvram/fw_cfg.c
+++ b/hw/nvram/fw_cfg.c
@@ -24,10 +24,10 @@
#include "qemu/osdep.h"
#include "qemu/datadir.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/dma.h"
-#include "sysemu/reset.h"
-#include "exec/address-spaces.h"
+#include "system/system.h"
+#include "system/dma.h"
+#include "system/reset.h"
+#include "system/address-spaces.h"
#include "hw/boards.h"
#include "hw/nvram/fw_cfg.h"
#include "hw/qdev-properties.h"
@@ -41,7 +41,6 @@
#include "qemu/cutils.h"
#include "qapi/error.h"
#include "hw/acpi/aml-build.h"
-#include "hw/pci/pci_bus.h"
#include "hw/loader.h"
#define FW_CFG_FILE_SLOTS_DFLT 0x20
@@ -730,7 +729,6 @@ static void *fw_cfg_modify_bytes_read(FWCfgState *s, uint16_t key,
ptr = s->entries[arch][key].data;
s->entries[arch][key].data = data;
s->entries[arch][key].len = len;
- s->entries[arch][key].callback_opaque = NULL;
s->entries[arch][key].allow_write = false;
return ptr;
@@ -819,62 +817,6 @@ void fw_cfg_modify_i64(FWCfgState *s, uint16_t key, uint64_t value)
g_free(old);
}
-void fw_cfg_set_order_override(FWCfgState *s, int order)
-{
- assert(s->fw_cfg_order_override == 0);
- s->fw_cfg_order_override = order;
-}
-
-void fw_cfg_reset_order_override(FWCfgState *s)
-{
- assert(s->fw_cfg_order_override != 0);
- s->fw_cfg_order_override = 0;
-}
-
-/*
- * This is the legacy order list. For legacy systems, files are in
- * the fw_cfg in the order defined below, by the "order" value. Note
- * that some entries (VGA ROMs, NIC option ROMS, etc.) go into a
- * specific area, but there may be more than one and they occur in the
- * order that the user specifies them on the command line. Those are
- * handled in a special manner, using the order override above.
- *
- * For non-legacy, the files are sorted by filename to avoid this kind
- * of complexity in the future.
- *
- * This is only for x86, other arches don't implement versioning so
- * they won't set legacy mode.
- */
-static struct {
- const char *name;
- int order;
-} fw_cfg_order[] = {
- { "etc/boot-menu-wait", 10 },
- { "bootsplash.jpg", 11 },
- { "bootsplash.bmp", 12 },
- { "etc/boot-fail-wait", 15 },
- { "etc/smbios/smbios-tables", 20 },
- { "etc/smbios/smbios-anchor", 30 },
- { "etc/e820", 40 },
- { "etc/reserved-memory-end", 50 },
- { "genroms/kvmvapic.bin", 55 },
- { "genroms/linuxboot.bin", 60 },
- { }, /* VGA ROMs from pc_vga_init come here, 70. */
- { }, /* NIC option ROMs from pc_nic_init come here, 80. */
- { "etc/system-states", 90 },
- { }, /* User ROMs come here, 100. */
- { }, /* Device FW comes here, 110. */
- { "etc/extra-pci-roots", 120 },
- { "etc/acpi/tables", 130 },
- { "etc/table-loader", 140 },
- { "etc/tpm/log", 150 },
- { "etc/acpi/rsdp", 160 },
- { "bootorder", 170 },
- { "etc/msr_feature_control", 180 },
-
-#define FW_CFG_ORDER_OVERRIDE_LAST 200
-};
-
/*
* Any sub-page size update to these table MRs will be lost during migration,
* as we use aligned size in ram_load_precopy() -> qemu_ram_resize() path.
@@ -892,29 +834,6 @@ static void fw_cfg_acpi_mr_save(FWCfgState *s, const char *filename, size_t len)
}
}
-static int get_fw_cfg_order(FWCfgState *s, const char *name)
-{
- int i;
-
- if (s->fw_cfg_order_override > 0) {
- return s->fw_cfg_order_override;
- }
-
- for (i = 0; i < ARRAY_SIZE(fw_cfg_order); i++) {
- if (fw_cfg_order[i].name == NULL) {
- continue;
- }
-
- if (strcmp(name, fw_cfg_order[i].name) == 0) {
- return fw_cfg_order[i].order;
- }
- }
-
- /* Stick unknown stuff at the end. */
- warn_report("Unknown firmware file in legacy mode: %s", name);
- return FW_CFG_ORDER_OVERRIDE_LAST;
-}
-
void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
FWCfgCallback select_cb,
FWCfgWriteCallback write_cb,
@@ -923,7 +842,6 @@ void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
{
int i, index, count;
size_t dsize;
- MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
int order = 0;
if (!s->files) {
@@ -935,22 +853,11 @@ void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
count = be32_to_cpu(s->files->count);
assert(count < fw_cfg_file_slots(s));
- /* Find the insertion point. */
- if (mc->legacy_fw_cfg_order) {
- /*
- * Sort by order. For files with the same order, we keep them
- * in the sequence in which they were added.
- */
- order = get_fw_cfg_order(s, filename);
- for (index = count;
- index > 0 && order < s->entry_order[index - 1];
- index--);
- } else {
- /* Sort by file name. */
- for (index = count;
- index > 0 && strcmp(filename, s->files->f[index - 1].name) < 0;
- index--);
- }
+ /* Find the insertion point, sorting by file name. */
+ for (index = count;
+ index > 0 && strcmp(filename, s->files->f[index - 1].name) < 0;
+ index--)
+ ;
/*
* Move all the entries from the index point and after down one
@@ -1027,27 +934,29 @@ void *fw_cfg_modify_file(FWCfgState *s, const char *filename,
return NULL;
}
-bool fw_cfg_add_from_generator(FWCfgState *s, const char *filename,
- const char *gen_id, Error **errp)
+bool fw_cfg_add_file_from_generator(FWCfgState *s,
+ Object *parent, const char *part,
+ const char *filename, Error **errp)
{
+ ERRP_GUARD();
FWCfgDataGeneratorClass *klass;
GByteArray *array;
Object *obj;
gsize size;
- obj = object_resolve_path_component(object_get_objects_root(), gen_id);
+ obj = object_resolve_path_component(parent, part);
if (!obj) {
- error_setg(errp, "Cannot find object ID '%s'", gen_id);
+ error_setg(errp, "Cannot find object ID '%s'", part);
return false;
}
if (!object_dynamic_cast(obj, TYPE_FW_CFG_DATA_GENERATOR_INTERFACE)) {
error_setg(errp, "Object ID '%s' is not a '%s' subclass",
- gen_id, TYPE_FW_CFG_DATA_GENERATOR_INTERFACE);
+ part, TYPE_FW_CFG_DATA_GENERATOR_INTERFACE);
return false;
}
klass = FW_CFG_DATA_GENERATOR_GET_CLASS(obj);
array = klass->get_data(obj, errp);
- if (!array) {
+ if (*errp || !array) {
return false;
}
size = array->len;
@@ -1056,31 +965,8 @@ bool fw_cfg_add_from_generator(FWCfgState *s, const char *filename,
return true;
}
-void fw_cfg_add_extra_pci_roots(PCIBus *bus, FWCfgState *s)
-{
- int extra_hosts = 0;
-
- if (!bus) {
- return;
- }
-
- QLIST_FOREACH(bus, &bus->child, sibling) {
- /* look for expander root buses */
- if (pci_bus_is_root(bus)) {
- extra_hosts++;
- }
- }
-
- if (extra_hosts && s) {
- uint64_t *val = g_malloc(sizeof(*val));
- *val = cpu_to_le64(extra_hosts);
- fw_cfg_add_file(s, "etc/extra-pci-roots", val, sizeof(*val));
- }
-}
-
static void fw_cfg_machine_reset(void *opaque)
{
- MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
FWCfgState *s = opaque;
void *ptr;
size_t len;
@@ -1090,11 +976,9 @@ static void fw_cfg_machine_reset(void *opaque)
ptr = fw_cfg_modify_file(s, "bootorder", (uint8_t *)buf, len);
g_free(ptr);
- if (!mc->legacy_fw_cfg_order) {
- buf = get_boot_devices_lchs_list(&len);
- ptr = fw_cfg_modify_file(s, "bios-geometry", (uint8_t *)buf, len);
- g_free(ptr);
- }
+ buf = get_boot_devices_lchs_list(&len);
+ ptr = fw_cfg_modify_file(s, "bios-geometry", (uint8_t *)buf, len);
+ g_free(ptr);
}
static void fw_cfg_machine_ready(struct Notifier *n, void *data)
@@ -1103,9 +987,8 @@ static void fw_cfg_machine_ready(struct Notifier *n, void *data)
qemu_register_reset(fw_cfg_machine_reset, s);
}
-static Property fw_cfg_properties[] = {
+static const Property fw_cfg_properties[] = {
DEFINE_PROP_BOOL("acpi-mr-restore", FWCfgState, acpi_mr_restore, true),
- DEFINE_PROP_END_OF_LIST(),
};
static void fw_cfg_common_realize(DeviceState *dev, Error **errp)
@@ -1171,11 +1054,6 @@ FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase,
return s;
}
-FWCfgState *fw_cfg_init_io(uint32_t iobase)
-{
- return fw_cfg_init_io_dma(iobase, 0, NULL);
-}
-
FWCfgState *fw_cfg_init_mem_wide(hwaddr ctl_addr,
hwaddr data_addr, uint32_t data_width,
hwaddr dma_addr, AddressSpace *dma_as)
@@ -1256,11 +1134,11 @@ void load_image_to_fw_cfg(FWCfgState *fw_cfg, uint16_t size_key,
fw_cfg_add_bytes(fw_cfg, data_key, data, size);
}
-static void fw_cfg_class_init(ObjectClass *klass, void *data)
+static void fw_cfg_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = fw_cfg_reset;
+ device_class_set_legacy_reset(dc, fw_cfg_reset);
dc->vmsd = &vmstate_fw_cfg;
device_class_set_props(dc, fw_cfg_properties);
@@ -1299,12 +1177,11 @@ static void fw_cfg_file_slots_allocate(FWCfgState *s, Error **errp)
s->entry_order = g_new0(int, fw_cfg_max_entry(s));
}
-static Property fw_cfg_io_properties[] = {
+static const Property fw_cfg_io_properties[] = {
DEFINE_PROP_BOOL("dma_enabled", FWCfgIoState, parent_obj.dma_enabled,
true),
DEFINE_PROP_UINT16("x-file-slots", FWCfgIoState, parent_obj.file_slots,
FW_CFG_FILE_SLOTS_DFLT),
- DEFINE_PROP_END_OF_LIST(),
};
static void fw_cfg_io_realize(DeviceState *dev, Error **errp)
@@ -1332,7 +1209,7 @@ static void fw_cfg_io_realize(DeviceState *dev, Error **errp)
fw_cfg_common_realize(dev, errp);
}
-static void fw_cfg_io_class_init(ObjectClass *klass, void *data)
+static void fw_cfg_io_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -1348,13 +1225,12 @@ static const TypeInfo fw_cfg_io_info = {
};
-static Property fw_cfg_mem_properties[] = {
+static const Property fw_cfg_mem_properties[] = {
DEFINE_PROP_UINT32("data_width", FWCfgMemState, data_width, -1),
DEFINE_PROP_BOOL("dma_enabled", FWCfgMemState, parent_obj.dma_enabled,
true),
DEFINE_PROP_UINT16("x-file-slots", FWCfgMemState, parent_obj.file_slots,
FW_CFG_FILE_SLOTS_DFLT),
- DEFINE_PROP_END_OF_LIST(),
};
static void fw_cfg_mem_realize(DeviceState *dev, Error **errp)
@@ -1394,7 +1270,7 @@ static void fw_cfg_mem_realize(DeviceState *dev, Error **errp)
fw_cfg_common_realize(dev, errp);
}
-static void fw_cfg_mem_class_init(ObjectClass *klass, void *data)
+static void fw_cfg_mem_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/nvram/mac_nvram.c b/hw/nvram/mac_nvram.c
index fe9df9f..66526a2 100644
--- a/hw/nvram/mac_nvram.c
+++ b/hw/nvram/mac_nvram.c
@@ -29,13 +29,13 @@
#include "hw/nvram/mac_nvram.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "migration/vmstate.h"
#include "qemu/cutils.h"
#include "qemu/module.h"
#include "qemu/error-report.h"
#include "trace.h"
-#include <zlib.h>
+#include <zlib.h> /* for adler32 */
#define DEF_SYSTEM_SIZE 0xc10
@@ -134,20 +134,19 @@ static void macio_nvram_unrealizefn(DeviceState *dev)
g_free(s->data);
}
-static Property macio_nvram_properties[] = {
+static const Property macio_nvram_properties[] = {
DEFINE_PROP_UINT32("size", MacIONVRAMState, size, 0),
DEFINE_PROP_UINT32("it_shift", MacIONVRAMState, it_shift, 0),
DEFINE_PROP_DRIVE("drive", MacIONVRAMState, blk),
- DEFINE_PROP_END_OF_LIST()
};
-static void macio_nvram_class_init(ObjectClass *oc, void *data)
+static void macio_nvram_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = macio_nvram_realizefn;
dc->unrealize = macio_nvram_unrealizefn;
- dc->reset = macio_nvram_reset;
+ device_class_set_legacy_reset(dc, macio_nvram_reset);
dc->vmsd = &vmstate_macio_nvram;
device_class_set_props(dc, macio_nvram_properties);
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
diff --git a/hw/nvram/npcm7xx_otp.c b/hw/nvram/npcm7xx_otp.c
index f00ebfa..1fb752b 100644
--- a/hw/nvram/npcm7xx_otp.c
+++ b/hw/nvram/npcm7xx_otp.c
@@ -391,7 +391,7 @@ static const VMStateDescription vmstate_npcm7xx_otp = {
},
};
-static void npcm7xx_otp_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_otp_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -403,14 +403,14 @@ static void npcm7xx_otp_class_init(ObjectClass *klass, void *data)
rc->phases.enter = npcm7xx_otp_enter_reset;
}
-static void npcm7xx_key_storage_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_key_storage_class_init(ObjectClass *klass, const void *data)
{
NPCM7xxOTPClass *oc = NPCM7XX_OTP_CLASS(klass);
oc->mmio_ops = &npcm7xx_key_storage_ops;
}
-static void npcm7xx_fuse_array_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_fuse_array_class_init(ObjectClass *klass, const void *data)
{
NPCM7xxOTPClass *oc = NPCM7XX_OTP_CLASS(klass);
diff --git a/hw/nvram/nrf51_nvm.c b/hw/nvram/nrf51_nvm.c
index 73564f7..23cc9fe 100644
--- a/hw/nvram/nrf51_nvm.c
+++ b/hw/nvram/nrf51_nvm.c
@@ -354,9 +354,8 @@ static void nrf51_nvm_reset(DeviceState *dev)
memset(s->uicr_content, 0xFF, sizeof(s->uicr_content));
}
-static Property nrf51_nvm_properties[] = {
+static const Property nrf51_nvm_properties[] = {
DEFINE_PROP_UINT32("flash-size", NRF51NVMState, flash_size, 0x40000),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_nvm = {
@@ -371,14 +370,14 @@ static const VMStateDescription vmstate_nvm = {
}
};
-static void nrf51_nvm_class_init(ObjectClass *klass, void *data)
+static void nrf51_nvm_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, nrf51_nvm_properties);
dc->vmsd = &vmstate_nvm;
dc->realize = nrf51_nvm_realize;
- dc->reset = nrf51_nvm_reset;
+ device_class_set_legacy_reset(dc, nrf51_nvm_reset);
}
static const TypeInfo nrf51_nvm_info = {
diff --git a/hw/nvram/spapr_nvram.c b/hw/nvram/spapr_nvram.c
index bfd8aa3..d0ac4e57 100644
--- a/hw/nvram/spapr_nvram.c
+++ b/hw/nvram/spapr_nvram.c
@@ -28,10 +28,10 @@
#include "qapi/error.h"
#include <libfdt.h>
-#include "sysemu/block-backend.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/runstate.h"
+#include "system/block-backend.h"
+#include "system/device_tree.h"
+#include "system/system.h"
+#include "system/runstate.h"
#include "migration/vmstate.h"
#include "hw/nvram/chrp_nvram.h"
#include "hw/ppc/spapr.h"
@@ -252,13 +252,12 @@ static const VMStateDescription vmstate_spapr_nvram = {
},
};
-static Property spapr_nvram_properties[] = {
+static const Property spapr_nvram_properties[] = {
DEFINE_SPAPR_PROPERTIES(SpaprNvram, sdev),
DEFINE_PROP_DRIVE("drive", SpaprNvram, blk),
- DEFINE_PROP_END_OF_LIST(),
};
-static void spapr_nvram_class_init(ObjectClass *klass, void *data)
+static void spapr_nvram_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass);
diff --git a/hw/nvram/xlnx-bbram.c b/hw/nvram/xlnx-bbram.c
index 09575a7..5702bb3 100644
--- a/hw/nvram/xlnx-bbram.c
+++ b/hw/nvram/xlnx-bbram.c
@@ -29,7 +29,7 @@
#include "qemu/error-report.h"
#include "qemu/log.h"
#include "qapi/error.h"
-#include "sysemu/blockdev.h"
+#include "system/blockdev.h"
#include "migration/vmstate.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
@@ -456,9 +456,8 @@ static void bbram_ctrl_init(Object *obj)
{
XlnxBBRam *s = XLNX_BBRAM(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- RegisterInfoArray *reg_array;
- reg_array =
+ s->reg_array =
register_init_block32(DEVICE(obj), bbram_ctrl_regs_info,
ARRAY_SIZE(bbram_ctrl_regs_info),
s->regs_info, s->regs,
@@ -466,10 +465,17 @@ static void bbram_ctrl_init(Object *obj)
XLNX_BBRAM_ERR_DEBUG,
R_MAX * 4);
- sysbus_init_mmio(sbd, &reg_array->mem);
+ sysbus_init_mmio(sbd, &s->reg_array->mem);
sysbus_init_irq(sbd, &s->irq_bbram);
}
+static void bbram_ctrl_finalize(Object *obj)
+{
+ XlnxBBRam *s = XLNX_BBRAM(obj);
+
+ register_finalize_block(s->reg_array);
+}
+
static void bbram_prop_set_drive(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
@@ -496,7 +502,7 @@ static void bbram_prop_release_drive(Object *obj, const char *name,
}
static const PropertyInfo bbram_prop_drive = {
- .name = "str",
+ .type = "str",
.description = "Node name or ID of a block device to use as BBRAM backend",
.realized_set_allowed = true,
.get = bbram_prop_get_drive,
@@ -514,13 +520,12 @@ static const VMStateDescription vmstate_bbram_ctrl = {
}
};
-static Property bbram_ctrl_props[] = {
+static const Property bbram_ctrl_props[] = {
DEFINE_PROP("drive", XlnxBBRam, blk, bbram_prop_drive, BlockBackend *),
DEFINE_PROP_UINT32("crc-zpads", XlnxBBRam, crc_zpads, 1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void bbram_ctrl_class_init(ObjectClass *klass, void *data)
+static void bbram_ctrl_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -537,6 +542,7 @@ static const TypeInfo bbram_ctrl_info = {
.instance_size = sizeof(XlnxBBRam),
.class_init = bbram_ctrl_class_init,
.instance_init = bbram_ctrl_init,
+ .instance_finalize = bbram_ctrl_finalize,
};
static void bbram_ctrl_register_types(void)
diff --git a/hw/nvram/xlnx-efuse.c b/hw/nvram/xlnx-efuse.c
index f7b849f..4c23f8b 100644
--- a/hw/nvram/xlnx-efuse.c
+++ b/hw/nvram/xlnx-efuse.c
@@ -30,7 +30,7 @@
#include "qemu/error-report.h"
#include "qemu/log.h"
#include "qapi/error.h"
-#include "sysemu/blockdev.h"
+#include "system/blockdev.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
@@ -257,7 +257,7 @@ static void efuse_prop_release_drive(Object *obj, const char *name,
}
static const PropertyInfo efuse_prop_drive = {
- .name = "str",
+ .type = "str",
.description = "Node name or ID of a block device to use as eFUSE backend",
.realized_set_allowed = true,
.get = efuse_prop_get_drive,
@@ -265,22 +265,23 @@ static const PropertyInfo efuse_prop_drive = {
.release = efuse_prop_release_drive,
};
-static Property efuse_properties[] = {
+static const Property efuse_properties[] = {
DEFINE_PROP("drive", XlnxEFuse, blk, efuse_prop_drive, BlockBackend *),
DEFINE_PROP_UINT8("efuse-nr", XlnxEFuse, efuse_nr, 3),
DEFINE_PROP_UINT32("efuse-size", XlnxEFuse, efuse_size, 64 * 32),
DEFINE_PROP_BOOL("init-factory-tbits", XlnxEFuse, init_tbits, true),
DEFINE_PROP_ARRAY("read-only", XlnxEFuse, ro_bits_cnt, ro_bits,
qdev_prop_uint32, uint32_t),
- DEFINE_PROP_END_OF_LIST(),
};
-static void efuse_class_init(ObjectClass *klass, void *data)
+static void efuse_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = efuse_realize;
device_class_set_props(dc, efuse_properties);
+ /* Reason: Part of Xilinx SoC */
+ dc->user_creatable = false;
}
static const TypeInfo efuse_info = {
diff --git a/hw/nvram/xlnx-versal-efuse-cache.c b/hw/nvram/xlnx-versal-efuse-cache.c
index eaec64d..d4ec96a 100644
--- a/hw/nvram/xlnx-versal-efuse-cache.c
+++ b/hw/nvram/xlnx-versal-efuse-cache.c
@@ -83,15 +83,13 @@ static void efuse_cache_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
}
-static Property efuse_cache_props[] = {
+static const Property efuse_cache_props[] = {
DEFINE_PROP_LINK("efuse",
XlnxVersalEFuseCache, efuse,
TYPE_XLNX_EFUSE, XlnxEFuse *),
-
- DEFINE_PROP_END_OF_LIST(),
};
-static void efuse_cache_class_init(ObjectClass *klass, void *data)
+static void efuse_cache_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/nvram/xlnx-versal-efuse-ctrl.c b/hw/nvram/xlnx-versal-efuse-ctrl.c
index def6fe33..9096219 100644
--- a/hw/nvram/xlnx-versal-efuse-ctrl.c
+++ b/hw/nvram/xlnx-versal-efuse-ctrl.c
@@ -494,7 +494,6 @@ static void efuse_rd_addr_postw(RegisterInfo *reg, uint64_t val64)
ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 1);
efuse_imr_update_irq(s);
- return;
}
static uint64_t efuse_cache_load_prew(RegisterInfo *reg, uint64_t val64)
@@ -712,9 +711,8 @@ static void efuse_ctrl_init(Object *obj)
{
XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- RegisterInfoArray *reg_array;
- reg_array =
+ s->reg_array =
register_init_block32(DEVICE(obj), efuse_ctrl_regs_info,
ARRAY_SIZE(efuse_ctrl_regs_info),
s->regs_info, s->regs,
@@ -722,7 +720,7 @@ static void efuse_ctrl_init(Object *obj)
XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG,
R_MAX * 4);
- sysbus_init_mmio(sbd, &reg_array->mem);
+ sysbus_init_mmio(sbd, &s->reg_array->mem);
sysbus_init_irq(sbd, &s->irq_efuse_imr);
}
@@ -730,6 +728,7 @@ static void efuse_ctrl_finalize(Object *obj)
{
XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj);
+ register_finalize_block(s->reg_array);
g_free(s->extra_pg0_lock_spec);
}
@@ -743,18 +742,16 @@ static const VMStateDescription vmstate_efuse_ctrl = {
}
};
-static Property efuse_ctrl_props[] = {
+static const Property efuse_ctrl_props[] = {
DEFINE_PROP_LINK("efuse",
XlnxVersalEFuseCtrl, efuse,
TYPE_XLNX_EFUSE, XlnxEFuse *),
DEFINE_PROP_ARRAY("pg0-lock",
XlnxVersalEFuseCtrl, extra_pg0_lock_n16,
extra_pg0_lock_spec, qdev_prop_uint16, uint16_t),
-
- DEFINE_PROP_END_OF_LIST(),
};
-static void efuse_ctrl_class_init(ObjectClass *klass, void *data)
+static void efuse_ctrl_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/nvram/xlnx-zynqmp-efuse.c b/hw/nvram/xlnx-zynqmp-efuse.c
index 2d465f0..5a218c3 100644
--- a/hw/nvram/xlnx-zynqmp-efuse.c
+++ b/hw/nvram/xlnx-zynqmp-efuse.c
@@ -803,9 +803,8 @@ static void zynqmp_efuse_init(Object *obj)
{
XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- RegisterInfoArray *reg_array;
- reg_array =
+ s->reg_array =
register_init_block32(DEVICE(obj), zynqmp_efuse_regs_info,
ARRAY_SIZE(zynqmp_efuse_regs_info),
s->regs_info, s->regs,
@@ -813,10 +812,17 @@ static void zynqmp_efuse_init(Object *obj)
ZYNQMP_EFUSE_ERR_DEBUG,
R_MAX * 4);
- sysbus_init_mmio(sbd, &reg_array->mem);
+ sysbus_init_mmio(sbd, &s->reg_array->mem);
sysbus_init_irq(sbd, &s->irq);
}
+static void zynqmp_efuse_finalize(Object *obj)
+{
+ XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(obj);
+
+ register_finalize_block(s->reg_array);
+}
+
static const VMStateDescription vmstate_efuse = {
.name = TYPE_XLNX_ZYNQMP_EFUSE,
.version_id = 1,
@@ -827,15 +833,13 @@ static const VMStateDescription vmstate_efuse = {
}
};
-static Property zynqmp_efuse_props[] = {
+static const Property zynqmp_efuse_props[] = {
DEFINE_PROP_LINK("efuse",
XlnxZynqMPEFuse, efuse,
TYPE_XLNX_EFUSE, XlnxEFuse *),
-
- DEFINE_PROP_END_OF_LIST(),
};
-static void zynqmp_efuse_class_init(ObjectClass *klass, void *data)
+static void zynqmp_efuse_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -853,6 +857,7 @@ static const TypeInfo efuse_info = {
.instance_size = sizeof(XlnxZynqMPEFuse),
.class_init = zynqmp_efuse_class_init,
.instance_init = zynqmp_efuse_init,
+ .instance_finalize = zynqmp_efuse_finalize,
};
static void efuse_register_types(void)
diff --git a/hw/openrisc/Kconfig b/hw/openrisc/Kconfig
index 76b953c..0702f62 100644
--- a/hw/openrisc/Kconfig
+++ b/hw/openrisc/Kconfig
@@ -3,7 +3,7 @@ config OR1K_SIM
default y
depends on OPENRISC
select DEVICE_TREE
- select SERIAL
+ select SERIAL_MM
select OPENCORES_ETH
select OMPIC
select SPLIT_IRQ
@@ -19,6 +19,6 @@ config OR1K_VIRT
select PCI
select PCI_EXPRESS_GENERIC_BRIDGE
select GOLDFISH_RTC
- select SERIAL
+ select SERIAL_MM
select SIFIVE_TEST
select VIRTIO_MMIO
diff --git a/hw/openrisc/boot.c b/hw/openrisc/boot.c
index 55475aa..c81efe8 100644
--- a/hw/openrisc/boot.c
+++ b/hw/openrisc/boot.c
@@ -9,12 +9,13 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/cpu-defs.h"
+#include "exec/target_page.h"
#include "elf.h"
#include "hw/loader.h"
#include "hw/openrisc/boot.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/qtest.h"
-#include "sysemu/reset.h"
+#include "system/device_tree.h"
+#include "system/qtest.h"
+#include "system/reset.h"
#include "qemu/error-report.h"
#include <libfdt.h>
@@ -32,7 +33,7 @@ hwaddr openrisc_load_kernel(ram_addr_t ram_size,
if (kernel_filename && !qtest_enabled()) {
kernel_size = load_elf(kernel_filename, NULL, NULL, NULL,
- &elf_entry, NULL, &high_addr, NULL, 1,
+ &elf_entry, NULL, &high_addr, NULL, ELFDATA2MSB,
EM_OPENRISC, 1, 0);
entry = elf_entry;
if (kernel_size < 0) {
@@ -90,8 +91,8 @@ hwaddr openrisc_load_initrd(void *fdt, const char *filename,
return start + size;
}
-uint32_t openrisc_load_fdt(void *fdt, hwaddr load_start,
- uint64_t mem_size)
+uint32_t openrisc_load_fdt(MachineState *ms, void *fdt,
+ hwaddr load_start, uint64_t mem_size)
{
uint32_t fdt_addr;
int ret;
@@ -109,7 +110,9 @@ uint32_t openrisc_load_fdt(void *fdt, hwaddr load_start,
/* Should only fail if we've built a corrupted tree */
g_assert(ret == 0);
/* copy in the device tree */
- qemu_fdt_dumpdtb(fdt, fdtsize);
+
+ /* Save FDT for dumpdtb monitor command */
+ ms->fdt = fdt;
rom_add_blob_fixed_as("fdt", fdt, fdtsize, fdt_addr,
&address_space_memory);
diff --git a/hw/openrisc/cputimer.c b/hw/openrisc/cputimer.c
index 835986c..6331997 100644
--- a/hw/openrisc/cputimer.c
+++ b/hw/openrisc/cputimer.c
@@ -22,14 +22,15 @@
#include "cpu.h"
#include "migration/vmstate.h"
#include "qemu/timer.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#define TIMER_PERIOD 50 /* 50 ns period for 20 MHz timer */
/* Tick Timer global state to allow all cores to be in sync */
typedef struct OR1KTimerState {
uint32_t ttcr;
- uint64_t last_clk;
+ uint32_t ttcr_offset;
+ uint64_t clk_offset;
} OR1KTimerState;
static OR1KTimerState *or1k_timer;
@@ -37,6 +38,8 @@ static OR1KTimerState *or1k_timer;
void cpu_openrisc_count_set(OpenRISCCPU *cpu, uint32_t val)
{
or1k_timer->ttcr = val;
+ or1k_timer->ttcr_offset = val;
+ or1k_timer->clk_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
uint32_t cpu_openrisc_count_get(OpenRISCCPU *cpu)
@@ -53,9 +56,8 @@ void cpu_openrisc_count_update(OpenRISCCPU *cpu)
return;
}
now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- or1k_timer->ttcr += (uint32_t)((now - or1k_timer->last_clk)
- / TIMER_PERIOD);
- or1k_timer->last_clk = now;
+ or1k_timer->ttcr = or1k_timer->ttcr_offset +
+ DIV_ROUND_UP(now - or1k_timer->clk_offset, TIMER_PERIOD);
}
/* Update the next timeout time as difference between ttmr and ttcr */
@@ -69,7 +71,7 @@ void cpu_openrisc_timer_update(OpenRISCCPU *cpu)
}
cpu_openrisc_count_update(cpu);
- now = or1k_timer->last_clk;
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if ((cpu->env.ttmr & TTMR_TP) <= (or1k_timer->ttcr & TTMR_TP)) {
wait = TTMR_TP - (or1k_timer->ttcr & TTMR_TP) + 1;
@@ -110,7 +112,8 @@ static void openrisc_timer_cb(void *opaque)
case TIMER_NONE:
break;
case TIMER_INTR:
- or1k_timer->ttcr = 0;
+ /* Zero the count by applying a negative offset to the counter */
+ or1k_timer->ttcr_offset -= (cpu->env.ttmr & TTMR_TP);
break;
case TIMER_SHOT:
cpu_openrisc_count_stop(cpu);
@@ -137,17 +140,18 @@ static void openrisc_count_reset(void *opaque)
/* Reset the global timer state. */
static void openrisc_timer_reset(void *opaque)
{
- or1k_timer->ttcr = 0x00000000;
- or1k_timer->last_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ OpenRISCCPU *cpu = opaque;
+ cpu_openrisc_count_set(cpu, 0);
}
static const VMStateDescription vmstate_or1k_timer = {
.name = "or1k_timer",
- .version_id = 1,
- .minimum_version_id = 1,
+ .version_id = 2,
+ .minimum_version_id = 2,
.fields = (const VMStateField[]) {
VMSTATE_UINT32(ttcr, OR1KTimerState),
- VMSTATE_UINT64(last_clk, OR1KTimerState),
+ VMSTATE_UINT32(ttcr_offset, OR1KTimerState),
+ VMSTATE_UINT64(clk_offset, OR1KTimerState),
VMSTATE_END_OF_LIST()
}
};
diff --git a/hw/openrisc/openrisc_sim.c b/hw/openrisc/openrisc_sim.c
index bffd6f7..880c8eb 100644
--- a/hw/openrisc/openrisc_sim.c
+++ b/hw/openrisc/openrisc_sim.c
@@ -24,16 +24,16 @@
#include "cpu.h"
#include "hw/irq.h"
#include "hw/boards.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "net/net.h"
#include "hw/openrisc/boot.h"
#include "hw/qdev-properties.h"
-#include "exec/address-spaces.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/sysemu.h"
+#include "system/address-spaces.h"
+#include "system/device_tree.h"
+#include "system/system.h"
#include "hw/sysbus.h"
-#include "sysemu/qtest.h"
-#include "sysemu/reset.h"
+#include "system/qtest.h"
+#include "system/reset.h"
#include "hw/core/split-irq.h"
#include <libfdt.h>
@@ -250,7 +250,7 @@ static void openrisc_sim_serial_init(Or1ksimState *state, hwaddr base,
void *fdt = state->fdt;
char *nodename;
qemu_irq serial_irq;
- char alias[sizeof("uart0")];
+ char alias[sizeof("serial0")];
int i;
if (num_cpus > 1) {
@@ -265,8 +265,8 @@ static void openrisc_sim_serial_init(Or1ksimState *state, hwaddr base,
serial_irq = get_cpu_irq(cpus, 0, irq_pin);
}
serial_mm_init(get_system_memory(), base, 0, serial_irq, 115200,
- serial_hd(OR1KSIM_UART_COUNT - uart_idx - 1),
- DEVICE_NATIVE_ENDIAN);
+ serial_hd(uart_idx),
+ DEVICE_BIG_ENDIAN);
/* Add device tree node for serial. */
nodename = g_strdup_printf("/serial@%" HWADDR_PRIx, base);
@@ -277,10 +277,13 @@ static void openrisc_sim_serial_init(Or1ksimState *state, hwaddr base,
qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency", OR1KSIM_CLK_MHZ);
qemu_fdt_setprop(fdt, nodename, "big-endian", NULL, 0);
- /* The /chosen node is created during fdt creation. */
- qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", nodename);
- snprintf(alias, sizeof(alias), "uart%d", uart_idx);
+ if (uart_idx == 0) {
+ /* The /chosen node is created during fdt creation. */
+ qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", nodename);
+ }
+ snprintf(alias, sizeof(alias), "serial%d", uart_idx);
qemu_fdt_setprop_string(fdt, "/aliases", alias, nodename);
+
g_free(nodename);
}
@@ -303,8 +306,6 @@ static void openrisc_sim_init(MachineState *machine)
exit(1);
}
- cpu_openrisc_clock_init(cpus[n]);
-
qemu_register_reset(main_cpu_reset, cpus[n]);
}
@@ -326,11 +327,22 @@ static void openrisc_sim_init(MachineState *machine)
smp_cpus, cpus, OR1KSIM_OMPIC_IRQ);
}
- for (n = 0; n < OR1KSIM_UART_COUNT; ++n)
+ /*
+ * We create the UART nodes starting with the highest address and
+ * working downwards, because in QEMU the DTB nodes end up in the
+ * DTB in reverse order of creation. Correctly-written guest software
+ * will not care about the node order (it will look at stdout-path
+ * or the alias nodes), but for the benefit of guest software which
+ * just looks for the first UART node in the DTB, make sure the
+ * lowest-address UART (which is QEMU's first serial port) appears
+ * first in the DTB.
+ */
+ for (n = OR1KSIM_UART_COUNT - 1; n >= 0; n--) {
openrisc_sim_serial_init(state, or1ksim_memmap[OR1KSIM_UART].base +
or1ksim_memmap[OR1KSIM_UART].size * n,
or1ksim_memmap[OR1KSIM_UART].size,
smp_cpus, cpus, OR1KSIM_UART_IRQ, n);
+ }
load_addr = openrisc_load_kernel(ram_size, kernel_filename,
&boot_info.bootstrap_pc);
@@ -340,12 +352,12 @@ static void openrisc_sim_init(MachineState *machine)
machine->initrd_filename,
load_addr, machine->ram_size);
}
- boot_info.fdt_addr = openrisc_load_fdt(state->fdt, load_addr,
+ boot_info.fdt_addr = openrisc_load_fdt(machine, state->fdt, load_addr,
machine->ram_size);
}
}
-static void openrisc_sim_machine_init(ObjectClass *oc, void *data)
+static void openrisc_sim_machine_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
diff --git a/hw/openrisc/virt.c b/hw/openrisc/virt.c
index f8a68a6..a98071c 100644
--- a/hw/openrisc/virt.c
+++ b/hw/openrisc/virt.c
@@ -11,10 +11,10 @@
#include "qemu/guest-random.h"
#include "qapi/error.h"
#include "cpu.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/irq.h"
#include "hw/boards.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/core/split-irq.h"
#include "hw/openrisc/boot.h"
#include "hw/misc/sifive_test.h"
@@ -24,10 +24,10 @@
#include "hw/rtc/goldfish_rtc.h"
#include "hw/sysbus.h"
#include "hw/virtio/virtio-mmio.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/qtest.h"
-#include "sysemu/reset.h"
+#include "system/device_tree.h"
+#include "system/system.h"
+#include "system/qtest.h"
+#include "system/reset.h"
#include <libfdt.h>
@@ -236,7 +236,7 @@ static void openrisc_virt_serial_init(OR1KVirtState *state, hwaddr base,
qemu_irq serial_irq = get_per_cpu_irq(cpus, num_cpus, irq_pin);
serial_mm_init(get_system_memory(), base, 0, serial_irq, 115200,
- serial_hd(0), DEVICE_NATIVE_ENDIAN);
+ serial_hd(0), DEVICE_BIG_ENDIAN);
/* Add device tree node for serial. */
nodename = g_strdup_printf("/serial@%" HWADDR_PRIx, base);
@@ -318,7 +318,7 @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base,
{
int pin, dev;
uint32_t irq_map_stride = 0;
- uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * 6] = {};
+ uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS * 6] = {};
uint32_t *irq_map = full_irq_map;
/*
@@ -330,11 +330,11 @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base,
* possible slot) seeing the interrupt-map-mask will allow the table
* to wrap to any number of devices.
*/
- for (dev = 0; dev < GPEX_NUM_IRQS; dev++) {
+ for (dev = 0; dev < PCI_NUM_PINS; dev++) {
int devfn = dev << 3;
- for (pin = 0; pin < GPEX_NUM_IRQS; pin++) {
- int irq_nr = irq_base + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS);
+ for (pin = 0; pin < PCI_NUM_PINS; pin++) {
+ int irq_nr = irq_base + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS);
int i = 0;
/* Fill PCI address cells */
@@ -357,7 +357,7 @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base,
}
qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map,
- GPEX_NUM_IRQS * GPEX_NUM_IRQS *
+ PCI_NUM_PINS * PCI_NUM_PINS *
irq_map_stride * sizeof(uint32_t));
qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask",
@@ -409,7 +409,7 @@ static void openrisc_virt_pcie_init(OR1KVirtState *state,
memory_region_add_subregion(get_system_memory(), pio_base, alias);
/* Connect IRQ lines. */
- for (i = 0; i < GPEX_NUM_IRQS; i++) {
+ for (i = 0; i < PCI_NUM_PINS; i++) {
pcie_irq = get_per_cpu_irq(cpus, num_cpus, irq_base + i);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pcie_irq);
@@ -487,8 +487,6 @@ static void openrisc_virt_init(MachineState *machine)
exit(1);
}
- cpu_openrisc_clock_init(cpus[n]);
-
qemu_register_reset(main_cpu_reset, cpus[n]);
}
@@ -540,12 +538,12 @@ static void openrisc_virt_init(MachineState *machine)
machine->initrd_filename,
load_addr, machine->ram_size);
}
- boot_info.fdt_addr = openrisc_load_fdt(state->fdt, load_addr,
+ boot_info.fdt_addr = openrisc_load_fdt(machine, state->fdt, load_addr,
machine->ram_size);
}
}
-static void openrisc_virt_machine_init(ObjectClass *oc, void *data)
+static void openrisc_virt_machine_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
diff --git a/hw/pci-bridge/Kconfig b/hw/pci-bridge/Kconfig
index 6707736..449ec98 100644
--- a/hw/pci-bridge/Kconfig
+++ b/hw/pci-bridge/Kconfig
@@ -1,3 +1,8 @@
+config PCI_BRIDGE
+ bool
+ default y if PCI_DEVICES
+ depends on PCI
+
config PCIE_PORT
bool
default y if PCI_DEVICES
diff --git a/hw/pci-bridge/cxl_downstream.c b/hw/pci-bridge/cxl_downstream.c
index 742da07..1065245 100644
--- a/hw/pci-bridge/cxl_downstream.c
+++ b/hw/pci-bridge/cxl_downstream.c
@@ -13,6 +13,8 @@
#include "hw/pci/msi.h"
#include "hw/pci/pcie.h"
#include "hw/pci/pcie_port.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
#include "hw/cxl/cxl.h"
#include "qapi/error.h"
@@ -210,24 +212,19 @@ static void cxl_dsp_exitfn(PCIDevice *d)
pci_bridge_exitfn(d);
}
-static void cxl_dsp_instance_post_init(Object *obj)
-{
- PCIESlot *s = PCIE_SLOT(obj);
-
- if (!s->speed) {
- s->speed = QEMU_PCI_EXP_LNK_2_5GT;
- }
-
- if (!s->width) {
- s->width = QEMU_PCI_EXP_LNK_X1;
- }
-}
+static const Property cxl_dsp_props[] = {
+ DEFINE_PROP_PCIE_LINK_SPEED("x-speed", PCIESlot,
+ speed, PCIE_LINK_SPEED_64),
+ DEFINE_PROP_PCIE_LINK_WIDTH("x-width", PCIESlot,
+ width, PCIE_LINK_WIDTH_16),
+};
-static void cxl_dsp_class_init(ObjectClass *oc, void *data)
+static void cxl_dsp_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *k = PCI_DEVICE_CLASS(oc);
+ device_class_set_props(dc, cxl_dsp_props);
k->config_write = cxl_dsp_config_write;
k->realize = cxl_dsp_realize;
k->exit = cxl_dsp_exitfn;
@@ -236,16 +233,15 @@ static void cxl_dsp_class_init(ObjectClass *oc, void *data)
k->revision = 0;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->desc = "CXL Switch Downstream Port";
- dc->reset = cxl_dsp_reset;
+ device_class_set_legacy_reset(dc, cxl_dsp_reset);
}
static const TypeInfo cxl_dsp_info = {
.name = TYPE_CXL_DSP,
.instance_size = sizeof(CXLDownstreamPort),
.parent = TYPE_PCIE_SLOT,
- .instance_post_init = cxl_dsp_instance_post_init,
.class_init = cxl_dsp_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ INTERFACE_CXL_DEVICE },
{ }
diff --git a/hw/pci-bridge/cxl_root_port.c b/hw/pci-bridge/cxl_root_port.c
index 2dd1023..e6a4035 100644
--- a/hw/pci-bridge/cxl_root_port.c
+++ b/hw/pci-bridge/cxl_root_port.c
@@ -24,6 +24,7 @@
#include "hw/pci/pcie_port.h"
#include "hw/pci/msi.h"
#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
#include "hw/sysbus.h"
#include "qapi/error.h"
#include "hw/cxl/cxl.h"
@@ -198,7 +199,7 @@ static void cxl_rp_reset_hold(Object *obj, ResetType type)
latch_registers(crp);
}
-static Property gen_rp_props[] = {
+static const Property gen_rp_props[] = {
DEFINE_PROP_UINT32("bus-reserve", CXLRootPort, res_reserve.bus, -1),
DEFINE_PROP_SIZE("io-reserve", CXLRootPort, res_reserve.io, -1),
DEFINE_PROP_SIZE("mem-reserve", CXLRootPort, res_reserve.mem_non_pref, -1),
@@ -206,7 +207,10 @@ static Property gen_rp_props[] = {
-1),
DEFINE_PROP_SIZE("pref64-reserve", CXLRootPort, res_reserve.mem_pref_64,
-1),
- DEFINE_PROP_END_OF_LIST()
+ DEFINE_PROP_PCIE_LINK_SPEED("x-speed", PCIESlot,
+ speed, PCIE_LINK_SPEED_64),
+ DEFINE_PROP_PCIE_LINK_WIDTH("x-width", PCIESlot,
+ width, PCIE_LINK_WIDTH_32),
};
static void cxl_rp_dvsec_write_config(PCIDevice *dev, uint32_t addr,
@@ -258,7 +262,7 @@ static void cxl_rp_write_config(PCIDevice *d, uint32_t address, uint32_t val,
cxl_rp_dvsec_write_config(d, address, val, len);
}
-static void cxl_root_port_class_init(ObjectClass *oc, void *data)
+static void cxl_root_port_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *k = PCI_DEVICE_CLASS(oc);
@@ -290,7 +294,7 @@ static const TypeInfo cxl_root_port_info = {
.parent = TYPE_PCIE_ROOT_PORT,
.instance_size = sizeof(CXLRootPort),
.class_init = cxl_root_port_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CXL_DEVICE },
{ }
},
diff --git a/hw/pci-bridge/cxl_upstream.c b/hw/pci-bridge/cxl_upstream.c
index e51221a..208e0c6 100644
--- a/hw/pci-bridge/cxl_upstream.c
+++ b/hw/pci-bridge/cxl_upstream.c
@@ -11,6 +11,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
#include "hw/pci/msi.h"
#include "hw/pci/pcie.h"
#include "hw/pci/pcie_port.h"
@@ -100,6 +101,7 @@ static void cxl_usp_reset(DeviceState *qdev)
pci_bridge_reset(qdev);
pcie_cap_deverr_reset(d);
+ pcie_cap_fill_link_ep_usp(d, usp->width, usp->speed);
latch_registers(usp);
}
@@ -234,7 +236,7 @@ static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
.type = CDAT_TYPE_SSLBIS,
.length = sslbis_size,
},
- .data_type = HMATLB_DATA_TYPE_ACCESS_LATENCY,
+ .data_type = HMAT_LB_DATA_TYPE_ACCESS_LATENCY,
.entry_base_unit = 10000,
},
};
@@ -254,7 +256,7 @@ static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
.type = CDAT_TYPE_SSLBIS,
.length = sslbis_size,
},
- .data_type = HMATLB_DATA_TYPE_ACCESS_BANDWIDTH,
+ .data_type = HMAT_LB_DATA_TYPE_ACCESS_BANDWIDTH,
.entry_base_unit = 1024,
},
};
@@ -360,13 +362,16 @@ static void cxl_usp_exitfn(PCIDevice *d)
pci_bridge_exitfn(d);
}
-static Property cxl_upstream_props[] = {
+static const Property cxl_upstream_props[] = {
DEFINE_PROP_UINT64("sn", CXLUpstreamPort, sn, UI64_NULL),
DEFINE_PROP_STRING("cdat", CXLUpstreamPort, cxl_cstate.cdat.filename),
- DEFINE_PROP_END_OF_LIST()
+ DEFINE_PROP_PCIE_LINK_SPEED("x-speed", CXLUpstreamPort,
+ speed, PCIE_LINK_SPEED_32),
+ DEFINE_PROP_PCIE_LINK_WIDTH("x-width", CXLUpstreamPort,
+ width, PCIE_LINK_WIDTH_16),
};
-static void cxl_upstream_class_init(ObjectClass *oc, void *data)
+static void cxl_upstream_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *k = PCI_DEVICE_CLASS(oc);
@@ -380,7 +385,7 @@ static void cxl_upstream_class_init(ObjectClass *oc, void *data)
k->revision = 0;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->desc = "CXL Switch Upstream Port";
- dc->reset = cxl_usp_reset;
+ device_class_set_legacy_reset(dc, cxl_usp_reset);
device_class_set_props(dc, cxl_upstream_props);
}
@@ -389,7 +394,7 @@ static const TypeInfo cxl_usp_info = {
.parent = TYPE_PCIE_PORT,
.instance_size = sizeof(CXLUpstreamPort),
.class_init = cxl_upstream_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ INTERFACE_CXL_DEVICE },
{ }
diff --git a/hw/pci-bridge/gen_pcie_root_port.c b/hw/pci-bridge/gen_pcie_root_port.c
index 784507c..d9078e7 100644
--- a/hw/pci-bridge/gen_pcie_root_port.c
+++ b/hw/pci-bridge/gen_pcie_root_port.c
@@ -128,7 +128,7 @@ static const VMStateDescription vmstate_rp_dev = {
}
};
-static Property gen_rp_props[] = {
+static const Property gen_rp_props[] = {
DEFINE_PROP_BOOL("x-migrate-msix", GenPCIERootPort,
migrate_msix, true),
DEFINE_PROP_UINT32("bus-reserve", GenPCIERootPort,
@@ -145,10 +145,9 @@ static Property gen_rp_props[] = {
speed, PCIE_LINK_SPEED_16),
DEFINE_PROP_PCIE_LINK_WIDTH("x-width", PCIESlot,
width, PCIE_LINK_WIDTH_32),
- DEFINE_PROP_END_OF_LIST()
};
-static void gen_rp_dev_class_init(ObjectClass *klass, void *data)
+static void gen_rp_dev_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
diff --git a/hw/pci-bridge/i82801b11.c b/hw/pci-bridge/i82801b11.c
index c140919..1d73c14 100644
--- a/hw/pci-bridge/i82801b11.c
+++ b/hw/pci-bridge/i82801b11.c
@@ -87,7 +87,7 @@ static const VMStateDescription i82801b11_bridge_dev_vmstate = {
}
};
-static void i82801b11_bridge_class_init(ObjectClass *klass, void *data)
+static void i82801b11_bridge_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -98,7 +98,7 @@ static void i82801b11_bridge_class_init(ObjectClass *klass, void *data)
k->realize = i82801b11_bridge_realize;
k->config_write = pci_bridge_write_config;
dc->vmsd = &i82801b11_bridge_dev_vmstate;
- dc->reset = pci_bridge_reset;
+ device_class_set_legacy_reset(dc, pci_bridge_reset);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
@@ -107,7 +107,7 @@ static const TypeInfo i82801b11_bridge_info = {
.parent = TYPE_PCI_BRIDGE,
.instance_size = sizeof(I82801b11Bridge),
.class_init = i82801b11_bridge_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/pci-bridge/ioh3420.c b/hw/pci-bridge/ioh3420.c
index be752a4..bba640f 100644
--- a/hw/pci-bridge/ioh3420.c
+++ b/hw/pci-bridge/ioh3420.c
@@ -96,7 +96,7 @@ static const VMStateDescription vmstate_ioh3420 = {
}
};
-static void ioh3420_class_init(ObjectClass *klass, void *data)
+static void ioh3420_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
diff --git a/hw/pci-bridge/meson.build b/hw/pci-bridge/meson.build
index f2a6043..2e0eb0d 100644
--- a/hw/pci-bridge/meson.build
+++ b/hw/pci-bridge/meson.build
@@ -1,5 +1,5 @@
pci_ss = ss.source_set()
-pci_ss.add(files('pci_bridge_dev.c'))
+pci_ss.add(when: 'CONFIG_PCI_BRIDGE', if_true: files('pci_bridge_dev.c'))
pci_ss.add(when: 'CONFIG_I82801B11', if_true: files('i82801b11.c'))
pci_ss.add(when: 'CONFIG_IOH3420', if_true: files('ioh3420.c'))
pci_ss.add(when: 'CONFIG_PCIE_PORT', if_true: files('pcie_root_port.c', 'gen_pcie_root_port.c'))
diff --git a/hw/pci-bridge/pci_bridge_dev.c b/hw/pci-bridge/pci_bridge_dev.c
index 089f91e..b328e50 100644
--- a/hw/pci-bridge/pci_bridge_dev.c
+++ b/hw/pci-bridge/pci_bridge_dev.c
@@ -28,7 +28,7 @@
#include "hw/pci/shpc.h"
#include "hw/pci/slotid_cap.h"
#include "hw/qdev-properties.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/pci/pci_bus.h"
#include "hw/hotplug.h"
#include "qom/object.h"
@@ -168,7 +168,7 @@ static void qdev_pci_bridge_dev_reset(DeviceState *qdev)
}
}
-static Property pci_bridge_dev_properties[] = {
+static const Property pci_bridge_dev_properties[] = {
/* Note: 0 is not a legal chassis number. */
DEFINE_PROP_UINT8(PCI_BRIDGE_DEV_PROP_CHASSIS_NR, PCIBridgeDev, chassis_nr,
0),
@@ -186,7 +186,6 @@ static Property pci_bridge_dev_properties[] = {
res_reserve.mem_pref_32, -1),
DEFINE_PROP_SIZE("pref64-reserve", PCIBridgeDev,
res_reserve.mem_pref_64, -1),
- DEFINE_PROP_END_OF_LIST(),
};
static bool pci_device_shpc_present(void *opaque, int version_id)
@@ -241,7 +240,7 @@ void pci_bridge_dev_unplug_request_cb(HotplugHandler *hotplug_dev,
shpc_device_unplug_request_cb(hotplug_dev, dev, errp);
}
-static void pci_bridge_dev_class_init(ObjectClass *klass, void *data)
+static void pci_bridge_dev_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -254,7 +253,7 @@ static void pci_bridge_dev_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_REDHAT_BRIDGE;
k->class_id = PCI_CLASS_BRIDGE_PCI;
dc->desc = "Standard PCI Bridge";
- dc->reset = qdev_pci_bridge_dev_reset;
+ device_class_set_legacy_reset(dc, qdev_pci_bridge_dev_reset);
device_class_set_props(dc, pci_bridge_dev_properties);
dc->vmsd = &pci_bridge_dev_vmstate;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
@@ -269,7 +268,7 @@ static const TypeInfo pci_bridge_dev_info = {
.instance_size = sizeof(PCIBridgeDev),
.class_init = pci_bridge_dev_class_init,
.instance_finalize = pci_bridge_dev_instance_finalize,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ }
@@ -281,7 +280,7 @@ static const TypeInfo pci_bridge_dev_info = {
* different pci id, so we can match it easily in the guest for
* automagic multiseat configuration. See docs/multiseat.txt for more.
*/
-static void pci_bridge_dev_seat_class_init(ObjectClass *klass, void *data)
+static void pci_bridge_dev_seat_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c
index 0411ad3..3a29dfe 100644
--- a/hw/pci-bridge/pci_expander_bridge.c
+++ b/hw/pci-bridge/pci_expander_bridge.c
@@ -23,7 +23,7 @@
#include "qemu/range.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
-#include "sysemu/numa.h"
+#include "system/numa.h"
#include "hw/boards.h"
#include "qom/object.h"
@@ -38,7 +38,6 @@ DECLARE_INSTANCE_CHECKER(PXBBus, PXB_BUS,
DECLARE_INSTANCE_CHECKER(PXBBus, PXB_PCIE_BUS,
TYPE_PXB_PCIE_BUS)
-#define TYPE_PXB_CXL_BUS "pxb-cxl-bus"
DECLARE_INSTANCE_CHECKER(PXBBus, PXB_CXL_BUS,
TYPE_PXB_CXL_BUS)
@@ -85,12 +84,25 @@ static uint16_t pxb_bus_numa_node(PCIBus *bus)
return pxb->numa_node;
}
-static void pxb_bus_class_init(ObjectClass *class, void *data)
+static void prop_pxb_uid_get(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ uint32_t uid = pci_bus_num(PCI_BUS(obj));
+
+ visit_type_uint32(v, name, &uid, errp);
+}
+
+static void pxb_bus_class_init(ObjectClass *class, const void *data)
{
PCIBusClass *pbc = PCI_BUS_CLASS(class);
pbc->bus_num = pxb_bus_num;
pbc->numa_node = pxb_bus_numa_node;
+
+ object_class_property_add(class, "acpi_uid", "uint32",
+ prop_pxb_uid_get, NULL, NULL, NULL);
+ object_class_property_set_description(class, "acpi_uid",
+ "ACPI Unique ID used to distinguish this PCI Host Bridge / ACPI00016");
}
static const TypeInfo pxb_bus_info = {
@@ -157,7 +169,7 @@ static char *pxb_host_ofw_unit_address(const SysBusDevice *dev)
return NULL;
}
-static void pxb_host_class_init(ObjectClass *class, void *data)
+static void pxb_host_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(class);
@@ -212,7 +224,7 @@ void pxb_cxl_hook_up_registers(CXLState *cxl_state, PCIBus *bus, Error **errp)
cxl_state->next_mr_idx++;
}
-static void pxb_cxl_host_class_init(ObjectClass *class, void *data)
+static void pxb_cxl_host_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(class);
@@ -318,7 +330,7 @@ static gint pxb_compare(gconstpointer a, gconstpointer b)
0;
}
-static void pxb_dev_realize_common(PCIDevice *dev, enum BusType type,
+static bool pxb_dev_realize_common(PCIDevice *dev, enum BusType type,
Error **errp)
{
PXBDev *pxb = PXB_DEV(dev);
@@ -330,13 +342,13 @@ static void pxb_dev_realize_common(PCIDevice *dev, enum BusType type,
if (ms->numa_state == NULL) {
error_setg(errp, "NUMA is not supported by this machine-type");
- return;
+ return false;
}
if (pxb->numa_node != NUMA_NODE_UNASSIGNED &&
pxb->numa_node >= ms->numa_state->num_nodes) {
error_setg(errp, "Illegal numa node %d", pxb->numa_node);
- return;
+ return false;
}
if (dev->qdev.id && *dev->qdev.id) {
@@ -382,12 +394,13 @@ static void pxb_dev_realize_common(PCIDevice *dev, enum BusType type,
pci_config_set_class(dev->config, PCI_CLASS_BRIDGE_HOST);
pxb_dev_list = g_list_insert_sorted(pxb_dev_list, pxb, pxb_compare);
- return;
+ return true;
err_register_bus:
object_unref(OBJECT(bds));
object_unparent(OBJECT(bus));
object_unref(OBJECT(ds));
+ return false;
}
static void pxb_dev_realize(PCIDevice *dev, Error **errp)
@@ -407,15 +420,14 @@ static void pxb_dev_exitfn(PCIDevice *pci_dev)
pxb_dev_list = g_list_remove(pxb_dev_list, pxb);
}
-static Property pxb_dev_properties[] = {
+static const Property pxb_dev_properties[] = {
/* Note: 0 is not a legal PXB bus number. */
DEFINE_PROP_UINT8("bus_nr", PXBDev, bus_nr, 0),
DEFINE_PROP_UINT16("numa_node", PXBDev, numa_node, NUMA_NODE_UNASSIGNED),
DEFINE_PROP_BOOL("bypass_iommu", PXBDev, bypass_iommu, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pxb_dev_class_init(ObjectClass *klass, void *data)
+static void pxb_dev_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -437,7 +449,7 @@ static const TypeInfo pxb_dev_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PXBDev),
.class_init = pxb_dev_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -453,7 +465,7 @@ static void pxb_pcie_dev_realize(PCIDevice *dev, Error **errp)
pxb_dev_realize_common(dev, PCIE, errp);
}
-static void pxb_pcie_dev_class_init(ObjectClass *klass, void *data)
+static void pxb_pcie_dev_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -474,7 +486,7 @@ static const TypeInfo pxb_pcie_dev_info = {
.parent = TYPE_PXB_DEV,
.instance_size = sizeof(PXBPCIEDev),
.class_init = pxb_pcie_dev_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -488,16 +500,17 @@ static void pxb_cxl_dev_realize(PCIDevice *dev, Error **errp)
return;
}
- pxb_dev_realize_common(dev, CXL, errp);
+ if (!pxb_dev_realize_common(dev, CXL, errp)) {
+ return;
+ }
pxb_cxl_dev_reset(DEVICE(dev));
}
-static Property pxb_cxl_dev_properties[] = {
+static const Property pxb_cxl_dev_properties[] = {
DEFINE_PROP_BOOL("hdm_for_passthrough", PXBCXLDev, hdm_for_passthrough, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pxb_cxl_dev_class_init(ObjectClass *klass, void *data)
+static void pxb_cxl_dev_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -515,7 +528,7 @@ static void pxb_cxl_dev_class_init(ObjectClass *klass, void *data)
/* Host bridges aren't hotpluggable. FIXME: spec reference */
dc->hotpluggable = false;
- dc->reset = pxb_cxl_dev_reset;
+ device_class_set_legacy_reset(dc, pxb_cxl_dev_reset);
}
static const TypeInfo pxb_cxl_dev_info = {
@@ -524,7 +537,7 @@ static const TypeInfo pxb_cxl_dev_info = {
.instance_size = sizeof(PXBCXLDev),
.class_init = pxb_cxl_dev_class_init,
.interfaces =
- (InterfaceInfo[]){
+ (const InterfaceInfo[]){
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{},
},
diff --git a/hw/pci-bridge/pcie_pci_bridge.c b/hw/pci-bridge/pcie_pci_bridge.c
index 7646ac2..fce292a 100644
--- a/hw/pci-bridge/pcie_pci_bridge.c
+++ b/hw/pci-bridge/pcie_pci_bridge.c
@@ -52,11 +52,10 @@ static void pcie_pci_bridge_realize(PCIDevice *d, Error **errp)
goto cap_error;
}
- pos = pci_add_capability(d, PCI_CAP_ID_PM, 0, PCI_PM_SIZEOF, errp);
+ pos = pci_pm_init(d, 0, errp);
if (pos < 0) {
goto pm_error;
}
- d->exp.pm_cap = pos;
pci_set_word(d->config + pos + PCI_PM_PMC, 0x3);
pcie_cap_arifwd_init(d);
@@ -124,9 +123,8 @@ static void pcie_pci_bridge_write_config(PCIDevice *d,
shpc_cap_write_config(d, address, val, len);
}
-static Property pcie_pci_bridge_dev_properties[] = {
+static const Property pcie_pci_bridge_dev_properties[] = {
DEFINE_PROP_ON_OFF_AUTO("msi", PCIEPCIBridge, msi, ON_OFF_AUTO_AUTO),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription pcie_pci_bridge_dev_vmstate = {
@@ -139,7 +137,7 @@ static const VMStateDescription pcie_pci_bridge_dev_vmstate = {
}
};
-static void pcie_pci_bridge_class_init(ObjectClass *klass, void *data)
+static void pcie_pci_bridge_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -152,7 +150,7 @@ static void pcie_pci_bridge_class_init(ObjectClass *klass, void *data)
k->config_write = pcie_pci_bridge_write_config;
dc->vmsd = &pcie_pci_bridge_dev_vmstate;
device_class_set_props(dc, pcie_pci_bridge_dev_properties);
- dc->reset = &pcie_pci_bridge_reset;
+ device_class_set_legacy_reset(dc, pcie_pci_bridge_reset);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
hc->plug = pci_bridge_dev_plug_cb;
hc->unplug = pci_bridge_dev_unplug_cb;
@@ -164,7 +162,7 @@ static const TypeInfo pcie_pci_bridge_info = {
.parent = TYPE_PCI_BRIDGE,
.instance_size = sizeof(PCIEPCIBridge),
.class_init = pcie_pci_bridge_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ INTERFACE_PCIE_DEVICE },
{ },
diff --git a/hw/pci-bridge/pcie_root_port.c b/hw/pci-bridge/pcie_root_port.c
index 09a3478..22c2fdb 100644
--- a/hw/pci-bridge/pcie_root_port.c
+++ b/hw/pci-bridge/pcie_root_port.c
@@ -148,11 +148,10 @@ static void rp_exit(PCIDevice *d)
pci_bridge_exitfn(d);
}
-static Property rp_props[] = {
+static const Property rp_props[] = {
DEFINE_PROP_BIT(COMPAT_PROP_PCP, PCIDevice, cap_present,
QEMU_PCIE_SLTCAP_PCP_BITNR, true),
DEFINE_PROP_BOOL("disable-acs", PCIESlot, disable_acs, false),
- DEFINE_PROP_END_OF_LIST()
};
static void rp_instance_post_init(Object *obj)
@@ -168,7 +167,7 @@ static void rp_instance_post_init(Object *obj)
}
}
-static void rp_class_init(ObjectClass *klass, void *data)
+static void rp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -189,7 +188,7 @@ static const TypeInfo rp_info = {
.class_init = rp_class_init,
.abstract = true,
.class_size = sizeof(PCIERootPortClass),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ }
},
diff --git a/hw/pci-bridge/simba.c b/hw/pci-bridge/simba.c
index 17aa0d7..bbae594 100644
--- a/hw/pci-bridge/simba.c
+++ b/hw/pci-bridge/simba.c
@@ -66,7 +66,7 @@ static void simba_pci_bridge_realize(PCIDevice *dev, Error **errp)
pci_bridge_update_mappings(PCI_BRIDGE(br));
}
-static void simba_pci_bridge_class_init(ObjectClass *klass, void *data)
+static void simba_pci_bridge_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -78,7 +78,7 @@ static void simba_pci_bridge_class_init(ObjectClass *klass, void *data)
k->revision = 0x11;
k->config_write = pci_bridge_write_config;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
- dc->reset = pci_bridge_reset;
+ device_class_set_legacy_reset(dc, pci_bridge_reset);
dc->vmsd = &vmstate_pci_device;
}
@@ -87,7 +87,7 @@ static const TypeInfo simba_pci_bridge_info = {
.parent = TYPE_PCI_BRIDGE,
.class_init = simba_pci_bridge_class_init,
.instance_size = sizeof(SimbaPCIBridge),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/pci-bridge/xio3130_downstream.c b/hw/pci-bridge/xio3130_downstream.c
index 907d510..dc7d1aa 100644
--- a/hw/pci-bridge/xio3130_downstream.c
+++ b/hw/pci-bridge/xio3130_downstream.c
@@ -134,10 +134,9 @@ static void xio3130_downstream_exitfn(PCIDevice *d)
pci_bridge_exitfn(d);
}
-static Property xio3130_downstream_props[] = {
+static const Property xio3130_downstream_props[] = {
DEFINE_PROP_BIT(COMPAT_PROP_PCP, PCIDevice, cap_present,
QEMU_PCIE_SLTCAP_PCP_BITNR, true),
- DEFINE_PROP_END_OF_LIST()
};
static const VMStateDescription vmstate_xio3130_downstream = {
@@ -154,7 +153,7 @@ static const VMStateDescription vmstate_xio3130_downstream = {
}
};
-static void xio3130_downstream_class_init(ObjectClass *klass, void *data)
+static void xio3130_downstream_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -167,7 +166,7 @@ static void xio3130_downstream_class_init(ObjectClass *klass, void *data)
k->revision = XIO3130_REVISION;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->desc = "TI X3130 Downstream Port of PCI Express Switch";
- dc->reset = xio3130_downstream_reset;
+ device_class_set_legacy_reset(dc, xio3130_downstream_reset);
dc->vmsd = &vmstate_xio3130_downstream;
device_class_set_props(dc, xio3130_downstream_props);
}
@@ -176,7 +175,7 @@ static const TypeInfo xio3130_downstream_info = {
.name = TYPE_XIO3130_DOWNSTREAM,
.parent = TYPE_PCIE_SLOT,
.class_init = xio3130_downstream_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ }
},
diff --git a/hw/pci-bridge/xio3130_upstream.c b/hw/pci-bridge/xio3130_upstream.c
index 2a6cff6..40057b7 100644
--- a/hw/pci-bridge/xio3130_upstream.c
+++ b/hw/pci-bridge/xio3130_upstream.c
@@ -123,7 +123,7 @@ static const VMStateDescription vmstate_xio3130_upstream = {
}
};
-static void xio3130_upstream_class_init(ObjectClass *klass, void *data)
+static void xio3130_upstream_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -136,7 +136,7 @@ static void xio3130_upstream_class_init(ObjectClass *klass, void *data)
k->revision = XIO3130_REVISION;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->desc = "TI X3130 Upstream Port of PCI Express Switch";
- dc->reset = xio3130_upstream_reset;
+ device_class_set_legacy_reset(dc, xio3130_upstream_reset);
dc->vmsd = &vmstate_xio3130_upstream;
}
@@ -144,7 +144,7 @@ static const TypeInfo xio3130_upstream_info = {
.name = "x3130-upstream",
.parent = TYPE_PCIE_PORT,
.class_init = xio3130_upstream_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ }
},
diff --git a/hw/pci-host/Kconfig b/hw/pci-host/Kconfig
index c91880b..35c0415 100644
--- a/hw/pci-host/Kconfig
+++ b/hw/pci-host/Kconfig
@@ -99,6 +99,9 @@ config ASTRO
bool
select PCI
+config PCI_EXPRESS_FSL_IMX8M_PHY
+ bool
+
config GT64120
bool
select PCI
diff --git a/hw/pci-host/articia.c b/hw/pci-host/articia.c
index f3fcc49..cc65aac 100644
--- a/hw/pci-host/articia.c
+++ b/hw/pci-host/articia.c
@@ -195,7 +195,7 @@ static void articia_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_out(dev, s->irq, ARRAY_SIZE(s->irq));
}
-static void articia_class_init(ObjectClass *klass, void *data)
+static void articia_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -228,7 +228,7 @@ static void articia_pci_host_cfg_write(PCIDevice *d, uint32_t addr,
}
}
-static void articia_pci_host_class_init(ObjectClass *klass, void *data)
+static void articia_pci_host_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -246,7 +246,7 @@ static void articia_pci_host_class_init(ObjectClass *klass, void *data)
/* TYPE_ARTICIA_PCI_BRIDGE */
-static void articia_pci_bridge_class_init(ObjectClass *klass, void *data)
+static void articia_pci_bridge_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -273,7 +273,7 @@ static const TypeInfo articia_types[] = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(ArticiaHostState),
.class_init = articia_pci_host_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -283,7 +283,7 @@ static const TypeInfo articia_types[] = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIDevice),
.class_init = articia_pci_bridge_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/pci-host/astro.c b/hw/pci-host/astro.c
index e3e589c..859e308 100644
--- a/hw/pci-host/astro.c
+++ b/hw/pci-host/astro.c
@@ -35,6 +35,7 @@
#include "target/hppa/cpu.h"
#include "trace.h"
#include "qom/object.h"
+#include "exec/target_page.h"
/*
* Helper functions
@@ -461,10 +462,6 @@ static void elroy_pcihost_init(Object *obj)
qdev_init_gpio_in(DEVICE(obj), elroy_set_irq, ELROY_IRQS);
}
-static Property elroy_pcihost_properties[] = {
- DEFINE_PROP_END_OF_LIST(),
-};
-
static const VMStateDescription vmstate_elroy = {
.name = "Elroy",
.version_id = 1,
@@ -485,12 +482,11 @@ static const VMStateDescription vmstate_elroy = {
}
};
-static void elroy_pcihost_class_init(ObjectClass *klass, void *data)
+static void elroy_pcihost_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = elroy_reset;
- device_class_set_props(dc, elroy_pcihost_properties);
+ device_class_set_legacy_reset(dc, elroy_reset);
dc->vmsd = &vmstate_elroy;
dc->user_creatable = false;
}
@@ -526,6 +522,53 @@ static ElroyState *elroy_init(int num)
* Astro Runway chip.
*/
+static void adjust_LMMIO_DIRECT_mapping(AstroState *s, unsigned int reg_index)
+{
+ MemoryRegion *lmmio_alias;
+ unsigned int lmmio_index, map_route;
+ hwaddr map_addr;
+ uint32_t map_size;
+ struct ElroyState *elroy;
+
+ /* pointer to LMMIO_DIRECT entry */
+ lmmio_index = reg_index / 3;
+ lmmio_alias = &s->lmmio_direct[lmmio_index];
+
+ map_addr = s->ioc_ranges[3 * lmmio_index + 0];
+ map_size = s->ioc_ranges[3 * lmmio_index + 1];
+ map_route = s->ioc_ranges[3 * lmmio_index + 2];
+
+ /* find elroy to which this address is routed */
+ map_route &= (ELROY_NUM - 1);
+ elroy = s->elroy[map_route];
+
+ if (lmmio_alias->enabled) {
+ memory_region_set_enabled(lmmio_alias, false);
+ }
+
+ map_addr = F_EXTEND(map_addr);
+ map_addr &= TARGET_PAGE_MASK;
+ map_size = (~map_size) + 1;
+ map_size &= TARGET_PAGE_MASK;
+
+ /* exit if disabled or zero map size */
+ if (!(map_addr & 1) || !map_size) {
+ return;
+ }
+
+ if (!memory_region_size(lmmio_alias)) {
+ memory_region_init_alias(lmmio_alias, OBJECT(elroy),
+ "pci-lmmmio-alias", &elroy->pci_mmio,
+ (uint32_t) map_addr, map_size);
+ memory_region_add_subregion(get_system_memory(), map_addr,
+ lmmio_alias);
+ } else {
+ memory_region_set_alias_offset(lmmio_alias, map_addr);
+ memory_region_set_size(lmmio_alias, map_size);
+ memory_region_set_enabled(lmmio_alias, true);
+ }
+}
+
static MemTxResult astro_chip_read_with_attrs(void *opaque, hwaddr addr,
uint64_t *data, unsigned size,
MemTxAttrs attrs)
@@ -633,6 +676,11 @@ static MemTxResult astro_chip_write_with_attrs(void *opaque, hwaddr addr,
break;
case 0x0300 ... 0x03d8 - 1: /* LMMIO_DIRECT0_BASE... */
put_val_in_arrary(s->ioc_ranges, 0x300, addr, size, val);
+ unsigned int index = (addr - 0x300) / 8;
+ /* check if one of the 4 LMMIO_DIRECT regs, each using 3 entries. */
+ if (index < LMMIO_DIRECT_RANGES * 3) {
+ adjust_LMMIO_DIRECT_mapping(s, index);
+ }
break;
case 0x10200:
case 0x10220:
@@ -861,11 +909,11 @@ static void astro_realize(DeviceState *obj, Error **errp)
}
}
-static void astro_class_init(ObjectClass *klass, void *data)
+static void astro_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = astro_reset;
+ device_class_set_legacy_reset(dc, astro_reset);
dc->vmsd = &vmstate_astro;
dc->realize = astro_realize;
/*
@@ -884,7 +932,7 @@ static const TypeInfo astro_chip_info = {
};
static void astro_iommu_memory_region_class_init(ObjectClass *klass,
- void *data)
+ const void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
diff --git a/hw/pci-host/bonito.c b/hw/pci-host/bonito.c
index 1516d00..7d6251a 100644
--- a/hw/pci-host/bonito.c
+++ b/hw/pci-host/bonito.c
@@ -48,7 +48,7 @@
#include "hw/pci-host/bonito.h"
#include "hw/pci/pci_host.h"
#include "migration/vmstate.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/misc/unimp.h"
#include "hw/registerfields.h"
#include "qom/object.h"
@@ -757,7 +757,7 @@ PCIBus *bonito_init(qemu_irq *pic)
return phb->bus;
}
-static void bonito_pci_class_init(ObjectClass *klass, void *data)
+static void bonito_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -783,13 +783,13 @@ static const TypeInfo bonito_pci_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIBonitoState),
.class_init = bonito_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-static void bonito_host_class_init(ObjectClass *klass, void *data)
+static void bonito_host_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/pci-host/designware.c b/hw/pci-host/designware.c
index c25d50f..f6e49ce 100644
--- a/hw/pci-host/designware.c
+++ b/hw/pci-host/designware.c
@@ -20,8 +20,8 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qemu/module.h"
#include "qemu/log.h"
+#include "qemu/bitops.h"
#include "hw/pci/msi.h"
#include "hw/pci/pci_bridge.h"
#include "hw/pci/pci_host.h"
@@ -55,7 +55,17 @@
#define DESIGNWARE_PCIE_ATU_DEVFN(x) (((x) >> 16) & 0xff)
#define DESIGNWARE_PCIE_ATU_UPPER_TARGET 0x91C
-#define DESIGNWARE_PCIE_IRQ_MSI 3
+static void designware_pcie_root_bus_class_init(ObjectClass *klass,
+ const void *data)
+{
+ BusClass *k = BUS_CLASS(klass);
+
+ /*
+ * Designware has only a single root complex. Enforce the limit on the
+ * parent bus
+ */
+ k->max_dev = 1;
+}
static DesignwarePCIEHost *
designware_pcie_root_to_host(DesignwarePCIERoot *root)
@@ -90,7 +100,7 @@ static void designware_pcie_root_msi_write(void *opaque, hwaddr addr,
root->msi.intr[0].status |= BIT(val) & root->msi.intr[0].enable;
if (root->msi.intr[0].status & ~root->msi.intr[0].mask) {
- qemu_set_irq(host->pci.irqs[DESIGNWARE_PCIE_IRQ_MSI], 1);
+ qemu_set_irq(host->pci.msi, 1);
}
}
@@ -153,11 +163,9 @@ designware_pcie_root_config_read(PCIDevice *d, uint32_t address, int len)
break;
case DESIGNWARE_PCIE_MSI_ADDR_LO:
- val = root->msi.base;
- break;
-
case DESIGNWARE_PCIE_MSI_ADDR_HI:
- val = root->msi.base >> 32;
+ val = extract64(root->msi.base,
+ address == DESIGNWARE_PCIE_MSI_ADDR_LO ? 0 : 32, 32);
break;
case DESIGNWARE_PCIE_MSI_INTR0_ENABLE:
@@ -181,19 +189,16 @@ designware_pcie_root_config_read(PCIDevice *d, uint32_t address, int len)
break;
case DESIGNWARE_PCIE_ATU_LOWER_BASE:
- val = viewport->base;
- break;
-
case DESIGNWARE_PCIE_ATU_UPPER_BASE:
- val = viewport->base >> 32;
+ val = extract64(viewport->base,
+ address == DESIGNWARE_PCIE_ATU_LOWER_BASE ? 0 : 32, 32);
break;
case DESIGNWARE_PCIE_ATU_LOWER_TARGET:
- val = viewport->target;
- break;
-
case DESIGNWARE_PCIE_ATU_UPPER_TARGET:
- val = viewport->target >> 32;
+ val = extract64(viewport->target,
+ address == DESIGNWARE_PCIE_ATU_LOWER_TARGET ? 0 : 32,
+ 32);
break;
case DESIGNWARE_PCIE_ATU_LIMIT:
@@ -312,14 +317,10 @@ static void designware_pcie_root_config_write(PCIDevice *d, uint32_t address,
break;
case DESIGNWARE_PCIE_MSI_ADDR_LO:
- root->msi.base &= 0xFFFFFFFF00000000ULL;
- root->msi.base |= val;
- designware_pcie_root_update_msi_mapping(root);
- break;
-
case DESIGNWARE_PCIE_MSI_ADDR_HI:
- root->msi.base &= 0x00000000FFFFFFFFULL;
- root->msi.base |= (uint64_t)val << 32;
+ root->msi.base = deposit64(root->msi.base,
+ address == DESIGNWARE_PCIE_MSI_ADDR_LO
+ ? 0 : 32, 32, val);
designware_pcie_root_update_msi_mapping(root);
break;
@@ -335,7 +336,7 @@ static void designware_pcie_root_config_write(PCIDevice *d, uint32_t address,
case DESIGNWARE_PCIE_MSI_INTR0_STATUS:
root->msi.intr[0].status ^= val;
if (!root->msi.intr[0].status) {
- qemu_set_irq(host->pci.irqs[DESIGNWARE_PCIE_IRQ_MSI], 0);
+ qemu_set_irq(host->pci.msi, 0);
}
break;
@@ -346,23 +347,17 @@ static void designware_pcie_root_config_write(PCIDevice *d, uint32_t address,
break;
case DESIGNWARE_PCIE_ATU_LOWER_BASE:
- viewport->base &= 0xFFFFFFFF00000000ULL;
- viewport->base |= val;
- break;
-
case DESIGNWARE_PCIE_ATU_UPPER_BASE:
- viewport->base &= 0x00000000FFFFFFFFULL;
- viewport->base |= (uint64_t)val << 32;
+ viewport->base = deposit64(viewport->base,
+ address == DESIGNWARE_PCIE_ATU_LOWER_BASE
+ ? 0 : 32, 32, val);
break;
case DESIGNWARE_PCIE_ATU_LOWER_TARGET:
- viewport->target &= 0xFFFFFFFF00000000ULL;
- viewport->target |= val;
- break;
-
case DESIGNWARE_PCIE_ATU_UPPER_TARGET:
- viewport->target &= 0x00000000FFFFFFFFULL;
- viewport->target |= val;
+ viewport->target = deposit64(viewport->target,
+ address == DESIGNWARE_PCIE_ATU_LOWER_TARGET
+ ? 0 : 32, 32, val);
break;
case DESIGNWARE_PCIE_ATU_LIMIT:
@@ -395,6 +390,7 @@ static void designware_pcie_root_realize(PCIDevice *dev, Error **errp)
{
DesignwarePCIERoot *root = DESIGNWARE_PCIE_ROOT(dev);
DesignwarePCIEHost *host = designware_pcie_root_to_host(root);
+ MemoryRegion *host_mem = get_system_memory();
MemoryRegion *address_space = &host->pci.memory;
PCIBridge *br = PCI_BRIDGE(dev);
DesignwarePCIEViewport *viewport;
@@ -435,7 +431,7 @@ static void designware_pcie_root_realize(PCIDevice *dev, Error **errp)
viewport->cr[0] = DESIGNWARE_PCIE_ATU_TYPE_MEM;
source = &host->pci.address_space_root;
- destination = get_system_memory();
+ destination = host_mem;
direction = "Inbound";
/*
@@ -460,7 +456,7 @@ static void designware_pcie_root_realize(PCIDevice *dev, Error **errp)
destination = &host->pci.memory;
direction = "Outbound";
- source = get_system_memory();
+ source = host_mem;
/*
* Configure MemoryRegion implementing CPU -> PCI memory
@@ -591,7 +587,8 @@ static const VMStateDescription vmstate_designware_pcie_root = {
}
};
-static void designware_pcie_root_class_init(ObjectClass *klass, void *data)
+static void designware_pcie_root_class_init(ObjectClass *klass,
+ const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -607,7 +604,7 @@ static void designware_pcie_root_class_init(ObjectClass *klass, void *data)
k->config_read = designware_pcie_root_config_read;
k->config_write = designware_pcie_root_config_write;
- dc->reset = pci_bridge_reset;
+ device_class_set_legacy_reset(dc, pci_bridge_reset);
/*
* PCI-facing part of the host bridge, not usable without the
* host-facing part, which can't be device_add'ed, yet.
@@ -679,6 +676,7 @@ static void designware_pcie_host_realize(DeviceState *dev, Error **errp)
for (i = 0; i < ARRAY_SIZE(s->pci.irqs); i++) {
sysbus_init_irq(sbd, &s->pci.irqs[i]);
}
+ sysbus_init_irq(sbd, &s->pci.msi);
memory_region_init_io(&s->mmio,
OBJECT(s),
@@ -699,7 +697,7 @@ static void designware_pcie_host_realize(DeviceState *dev, Error **errp)
&s->pci.memory,
&s->pci.io,
0, 4,
- TYPE_PCIE_BUS);
+ TYPE_DESIGNWARE_PCIE_ROOT_BUS);
pci->bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
memory_region_init(&s->pci.address_space_root,
@@ -730,7 +728,8 @@ static const VMStateDescription vmstate_designware_pcie_host = {
}
};
-static void designware_pcie_host_class_init(ObjectClass *klass, void *data)
+static void designware_pcie_host_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
@@ -752,28 +751,28 @@ static void designware_pcie_host_init(Object *obj)
qdev_prop_set_bit(DEVICE(root), "multifunction", false);
}
-static const TypeInfo designware_pcie_root_info = {
- .name = TYPE_DESIGNWARE_PCIE_ROOT,
- .parent = TYPE_PCI_BRIDGE,
- .instance_size = sizeof(DesignwarePCIERoot),
- .class_init = designware_pcie_root_class_init,
- .interfaces = (InterfaceInfo[]) {
- { INTERFACE_PCIE_DEVICE },
- { }
+static const TypeInfo designware_pcie_types[] = {
+ {
+ .name = TYPE_DESIGNWARE_PCIE_ROOT_BUS,
+ .parent = TYPE_PCIE_BUS,
+ .instance_size = sizeof(DesignwarePCIERootBus),
+ .class_init = designware_pcie_root_bus_class_init,
+ }, {
+ .name = TYPE_DESIGNWARE_PCIE_HOST,
+ .parent = TYPE_PCI_HOST_BRIDGE,
+ .instance_size = sizeof(DesignwarePCIEHost),
+ .instance_init = designware_pcie_host_init,
+ .class_init = designware_pcie_host_class_init,
+ }, {
+ .name = TYPE_DESIGNWARE_PCIE_ROOT,
+ .parent = TYPE_PCI_BRIDGE,
+ .instance_size = sizeof(DesignwarePCIERoot),
+ .class_init = designware_pcie_root_class_init,
+ .interfaces = (const InterfaceInfo[]) {
+ { INTERFACE_PCIE_DEVICE },
+ { }
+ },
},
};
-static const TypeInfo designware_pcie_host_info = {
- .name = TYPE_DESIGNWARE_PCIE_HOST,
- .parent = TYPE_PCI_HOST_BRIDGE,
- .instance_size = sizeof(DesignwarePCIEHost),
- .instance_init = designware_pcie_host_init,
- .class_init = designware_pcie_host_class_init,
-};
-
-static void designware_pcie_register(void)
-{
- type_register_static(&designware_pcie_root_info);
- type_register_static(&designware_pcie_host_info);
-}
-type_init(designware_pcie_register)
+DEFINE_TYPES(designware_pcie_types)
diff --git a/hw/pci-host/dino.c b/hw/pci-host/dino.c
index d992c4b..11b353b 100644
--- a/hw/pci-host/dino.c
+++ b/hw/pci-host/dino.c
@@ -492,17 +492,16 @@ static void dino_pcihost_init(Object *obj)
qdev_init_gpio_in(DEVICE(obj), dino_set_irq, DINO_IRQS);
}
-static Property dino_pcihost_properties[] = {
+static const Property dino_pcihost_properties[] = {
DEFINE_PROP_LINK("memory-as", DinoState, memory_as, TYPE_MEMORY_REGION,
MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void dino_pcihost_class_init(ObjectClass *klass, void *data)
+static void dino_pcihost_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = dino_pcihost_reset;
+ device_class_set_legacy_reset(dc, dino_pcihost_reset);
dc->realize = dino_pcihost_realize;
dc->unrealize = dino_pcihost_unrealize;
device_class_set_props(dc, dino_pcihost_properties);
diff --git a/hw/pci-host/fsl_imx8m_phy.c b/hw/pci-host/fsl_imx8m_phy.c
new file mode 100644
index 0000000..04da3f9
--- /dev/null
+++ b/hw/pci-host/fsl_imx8m_phy.c
@@ -0,0 +1,98 @@
+/*
+ * i.MX8 PCIe PHY emulation
+ *
+ * Copyright (c) 2025 Bernhard Beschow <shentey@gmail.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hw/pci-host/fsl_imx8m_phy.h"
+#include "hw/resettable.h"
+#include "migration/vmstate.h"
+
+#define CMN_REG075 0x1d4
+#define ANA_PLL_LOCK_DONE BIT(1)
+#define ANA_PLL_AFC_DONE BIT(0)
+
+static uint64_t fsl_imx8m_pcie_phy_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ FslImx8mPciePhyState *s = opaque;
+
+ if (offset == CMN_REG075) {
+ return s->data[offset] | ANA_PLL_LOCK_DONE | ANA_PLL_AFC_DONE;
+ }
+
+ return s->data[offset];
+}
+
+static void fsl_imx8m_pcie_phy_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ FslImx8mPciePhyState *s = opaque;
+
+ s->data[offset] = value;
+}
+
+static const MemoryRegionOps fsl_imx8m_pcie_phy_ops = {
+ .read = fsl_imx8m_pcie_phy_read,
+ .write = fsl_imx8m_pcie_phy_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void fsl_imx8m_pcie_phy_realize(DeviceState *dev, Error **errp)
+{
+ FslImx8mPciePhyState *s = FSL_IMX8M_PCIE_PHY(dev);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &fsl_imx8m_pcie_phy_ops, s,
+ TYPE_FSL_IMX8M_PCIE_PHY, ARRAY_SIZE(s->data));
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
+}
+
+static void fsl_imx8m_pcie_phy_reset_hold(Object *obj, ResetType type)
+{
+ FslImx8mPciePhyState *s = FSL_IMX8M_PCIE_PHY(obj);
+
+ memset(s->data, 0, sizeof(s->data));
+}
+
+static const VMStateDescription fsl_imx8m_pcie_phy_vmstate = {
+ .name = "fsl-imx8m-pcie-phy",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(data, FslImx8mPciePhyState,
+ FSL_IMX8M_PCIE_PHY_DATA_SIZE),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void fsl_imx8m_pcie_phy_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ dc->realize = fsl_imx8m_pcie_phy_realize;
+ dc->vmsd = &fsl_imx8m_pcie_phy_vmstate;
+ rc->phases.hold = fsl_imx8m_pcie_phy_reset_hold;
+}
+
+static const TypeInfo fsl_imx8m_pcie_phy_types[] = {
+ {
+ .name = TYPE_FSL_IMX8M_PCIE_PHY,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(FslImx8mPciePhyState),
+ .class_init = fsl_imx8m_pcie_phy_class_init,
+ }
+};
+
+DEFINE_TYPES(fsl_imx8m_pcie_phy_types)
diff --git a/hw/pci-host/gpex-acpi.c b/hw/pci-host/gpex-acpi.c
index f69413e..0aba47c 100644
--- a/hw/pci-host/gpex-acpi.c
+++ b/hw/pci-host/gpex-acpi.c
@@ -7,7 +7,8 @@
#include "hw/pci/pcie_host.h"
#include "hw/acpi/cxl.h"
-static void acpi_dsdt_add_pci_route_table(Aml *dev, uint32_t irq)
+static void acpi_dsdt_add_pci_route_table(Aml *dev, uint32_t irq,
+ Aml *scope, uint8_t bus_num)
{
Aml *method, *crs;
int i, slot_no;
@@ -20,7 +21,7 @@ static void acpi_dsdt_add_pci_route_table(Aml *dev, uint32_t irq)
Aml *pkg = aml_package(4);
aml_append(pkg, aml_int((slot_no << 16) | 0xFFFF));
aml_append(pkg, aml_int(i));
- aml_append(pkg, aml_name("GSI%d", gsi));
+ aml_append(pkg, aml_name("L%.02X%X", bus_num, gsi));
aml_append(pkg, aml_int(0));
aml_append(rt_pkg, pkg);
}
@@ -30,7 +31,7 @@ static void acpi_dsdt_add_pci_route_table(Aml *dev, uint32_t irq)
/* Create GSI link device */
for (i = 0; i < PCI_NUM_PINS; i++) {
uint32_t irqs = irq + i;
- Aml *dev_gsi = aml_device("GSI%d", i);
+ Aml *dev_gsi = aml_device("L%.02X%X", bus_num, i);
aml_append(dev_gsi, aml_name_decl("_HID", aml_string("PNP0C0F")));
aml_append(dev_gsi, aml_name_decl("_UID", aml_int(i)));
crs = aml_resource_template();
@@ -45,7 +46,7 @@ static void acpi_dsdt_add_pci_route_table(Aml *dev, uint32_t irq)
aml_append(dev_gsi, aml_name_decl("_CRS", crs));
method = aml_method("_SRS", 1, AML_NOTSERIALIZED);
aml_append(dev_gsi, method);
- aml_append(dev, dev_gsi);
+ aml_append(scope, dev_gsi);
}
}
@@ -140,6 +141,7 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg)
QLIST_FOREACH(bus, &bus->child, sibling) {
uint8_t bus_num = pci_bus_num(bus);
uint8_t numa_node = pci_bus_numa_node(bus);
+ uint32_t uid;
bool is_cxl = pci_bus_is_cxl(bus);
if (!pci_bus_is_root(bus)) {
@@ -155,6 +157,8 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg)
nr_pcie_buses = bus_num;
}
+ uid = object_property_get_uint(OBJECT(bus), "acpi_uid",
+ &error_fatal);
dev = aml_device("PC%.02X", bus_num);
if (is_cxl) {
struct Aml *pkg = aml_package(2);
@@ -167,18 +171,18 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg)
aml_append(dev, aml_name_decl("_CID", aml_string("PNP0A03")));
}
aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num)));
- aml_append(dev, aml_name_decl("_UID", aml_int(bus_num)));
+ aml_append(dev, aml_name_decl("_UID", aml_int(uid)));
aml_append(dev, aml_name_decl("_STR", aml_unicode("pxb Device")));
aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
if (numa_node != NUMA_NODE_UNASSIGNED) {
aml_append(dev, aml_name_decl("_PXM", aml_int(numa_node)));
}
- acpi_dsdt_add_pci_route_table(dev, cfg->irq);
+ acpi_dsdt_add_pci_route_table(dev, cfg->irq, scope, bus_num);
/*
* Resources defined for PXBs are composed of the following parts:
- * 1. The resources the pci-brige/pcie-root-port need.
+ * 1. The resources the pci-bridge/pcie-root-port need.
* 2. The resources the devices behind pxb need.
*/
crs = build_crs(PCI_HOST_BRIDGE(BUS(bus)->parent), &crs_range_set,
@@ -205,7 +209,7 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg)
aml_append(dev, aml_name_decl("_STR", aml_unicode("PCIe 0 Device")));
aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
- acpi_dsdt_add_pci_route_table(dev, cfg->irq);
+ acpi_dsdt_add_pci_route_table(dev, cfg->irq, scope, 0);
method = aml_method("_CBA", 0, AML_NOTSERIALIZED);
aml_append(method, aml_return(aml_int(cfg->ecam.base)));
diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c
index e9cf455..b806a22 100644
--- a/hw/pci-host/gpex.c
+++ b/hw/pci-host/gpex.c
@@ -32,6 +32,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/irq.h"
+#include "hw/pci/pci_bus.h"
#include "hw/pci-host/gpex.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
@@ -41,20 +42,25 @@
* GPEX host
*/
+struct GPEXIrq {
+ qemu_irq irq;
+ int irq_num;
+};
+
static void gpex_set_irq(void *opaque, int irq_num, int level)
{
GPEXHost *s = opaque;
- qemu_set_irq(s->irq[irq_num], level);
+ qemu_set_irq(s->irq[irq_num].irq, level);
}
int gpex_set_irq_num(GPEXHost *s, int index, int gsi)
{
- if (index >= GPEX_NUM_IRQS) {
+ if (index >= s->num_irqs) {
return -EINVAL;
}
- s->irq_num[index] = gsi;
+ s->irq[index].irq_num = gsi;
return 0;
}
@@ -62,7 +68,7 @@ static PCIINTxRoute gpex_route_intx_pin_to_irq(void *opaque, int pin)
{
PCIINTxRoute route;
GPEXHost *s = opaque;
- int gsi = s->irq_num[pin];
+ int gsi = s->irq[pin].irq_num;
route.irq = gsi;
if (gsi < 0) {
@@ -74,6 +80,13 @@ static PCIINTxRoute gpex_route_intx_pin_to_irq(void *opaque, int pin)
return route;
}
+static int gpex_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin)
+{
+ PCIBus *bus = pci_device_root_bus(pci_dev);
+
+ return (PCI_SLOT(pci_dev->devfn) + pin) % bus->nirq;
+}
+
static void gpex_host_realize(DeviceState *dev, Error **errp)
{
PCIHostState *pci = PCI_HOST_BRIDGE(dev);
@@ -82,6 +95,8 @@ static void gpex_host_realize(DeviceState *dev, Error **errp)
PCIExpressHost *pex = PCIE_HOST_BRIDGE(dev);
int i;
+ s->irq = g_malloc0_n(s->num_irqs, sizeof(*s->irq));
+
pcie_host_mmcfg_init(pex, PCIE_MMCFG_SIZE_MAX);
sysbus_init_mmio(sbd, &pex->mmio);
@@ -128,26 +143,34 @@ static void gpex_host_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->io_ioport);
}
- for (i = 0; i < GPEX_NUM_IRQS; i++) {
- sysbus_init_irq(sbd, &s->irq[i]);
- s->irq_num[i] = -1;
+ for (i = 0; i < s->num_irqs; i++) {
+ sysbus_init_irq(sbd, &s->irq[i].irq);
+ s->irq[i].irq_num = -1;
}
pci->bus = pci_register_root_bus(dev, "pcie.0", gpex_set_irq,
- pci_swizzle_map_irq_fn, s, &s->io_mmio,
- &s->io_ioport, 0, 4, TYPE_PCIE_BUS);
+ gpex_swizzle_map_irq_fn,
+ s, &s->io_mmio, &s->io_ioport, 0,
+ s->num_irqs, TYPE_PCIE_BUS);
pci_bus_set_route_irq_fn(pci->bus, gpex_route_intx_pin_to_irq);
qdev_realize(DEVICE(&s->gpex_root), BUS(pci->bus), &error_fatal);
}
+static void gpex_host_unrealize(DeviceState *dev)
+{
+ GPEXHost *s = GPEX_HOST(dev);
+
+ g_free(s->irq);
+}
+
static const char *gpex_host_root_bus_path(PCIHostState *host_bridge,
PCIBus *rootbus)
{
return "0000:00";
}
-static Property gpex_host_properties[] = {
+static const Property gpex_host_properties[] = {
/*
* Permit CPU accesses to unmapped areas of the PIO and MMIO windows
* (discarding writes and returning -1 for reads) rather than aborting.
@@ -166,16 +189,17 @@ static Property gpex_host_properties[] = {
gpex_cfg.mmio64.base, 0),
DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MMIO_SIZE, GPEXHost,
gpex_cfg.mmio64.size, 0),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_UINT8("num-irqs", GPEXHost, num_irqs, PCI_NUM_PINS),
};
-static void gpex_host_class_init(ObjectClass *klass, void *data)
+static void gpex_host_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
hc->root_bus_path = gpex_host_root_bus_path;
dc->realize = gpex_host_realize;
+ dc->unrealize = gpex_host_unrealize;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
device_class_set_props(dc, gpex_host_properties);
@@ -213,7 +237,7 @@ static const VMStateDescription vmstate_gpex_root = {
}
};
-static void gpex_root_class_init(ObjectClass *klass, void *data)
+static void gpex_root_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -237,7 +261,7 @@ static const TypeInfo gpex_root_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(GPEXRootState),
.class_init = gpex_root_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/pci-host/grackle.c b/hw/pci-host/grackle.c
index 8e589ff..f9da5a9 100644
--- a/hw/pci-host/grackle.c
+++ b/hw/pci-host/grackle.c
@@ -94,7 +94,7 @@ static void grackle_pci_realize(PCIDevice *d, Error **errp)
d->config[PCI_CLASS_PROG] = 0x01;
}
-static void grackle_pci_class_init(ObjectClass *klass, void *data)
+static void grackle_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -116,7 +116,7 @@ static const TypeInfo grackle_pci_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIDevice),
.class_init = grackle_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -129,12 +129,11 @@ static char *grackle_ofw_unit_address(const SysBusDevice *dev)
return g_strdup_printf("%x", s->ofw_addr);
}
-static Property grackle_properties[] = {
+static const Property grackle_properties[] = {
DEFINE_PROP_UINT32("ofw-addr", GrackleState, ofw_addr, -1),
- DEFINE_PROP_END_OF_LIST()
};
-static void grackle_class_init(ObjectClass *klass, void *data)
+static void grackle_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass);
diff --git a/hw/pci-host/gt64120.c b/hw/pci-host/gt64120.c
index e02efc9..b12a256 100644
--- a/hw/pci-host/gt64120.c
+++ b/hw/pci-host/gt64120.c
@@ -1,6 +1,8 @@
/*
* QEMU GT64120 PCI host
*
+ * (Datasheet GT-64120 Rev 1.4 from Sep 14, 1999)
+ *
* Copyright (c) 2006,2007 Aurelien Jarno
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -318,38 +320,6 @@ static void gt64120_isd_mapping(GT64120State *s)
memory_region_transaction_commit();
}
-static void gt64120_update_pci_cfgdata_mapping(GT64120State *s)
-{
- /* Indexed on MByteSwap bit, see Table 158: PCI_0 Command, Offset: 0xc00 */
- static const MemoryRegionOps *pci_host_data_ops[] = {
- &pci_host_data_be_ops, &pci_host_data_le_ops
- };
- PCIHostState *phb = PCI_HOST_BRIDGE(s);
-
- memory_region_transaction_begin();
-
- /*
- * The setting of the MByteSwap bit and MWordSwap bit in the PCI Internal
- * Command Register determines how data transactions from the CPU to/from
- * PCI are handled along with the setting of the Endianness bit in the CPU
- * Configuration Register. See:
- * - Table 16: 32-bit PCI Transaction Endianness
- * - Table 158: PCI_0 Command, Offset: 0xc00
- */
-
- if (memory_region_is_mapped(&phb->data_mem)) {
- memory_region_del_subregion(&s->ISD_mem, &phb->data_mem);
- object_unparent(OBJECT(&phb->data_mem));
- }
- memory_region_init_io(&phb->data_mem, OBJECT(phb),
- pci_host_data_ops[s->regs[GT_PCI0_CMD] & 1],
- s, "pci-conf-data", 4);
- memory_region_add_subregion_overlap(&s->ISD_mem, GT_PCI0_CFGDATA << 2,
- &phb->data_mem, 1);
-
- memory_region_transaction_commit();
-}
-
static void gt64120_pci_mapping(GT64120State *s)
{
memory_region_transaction_begin();
@@ -643,7 +613,6 @@ static void gt64120_writel(void *opaque, hwaddr addr,
case GT_PCI0_CMD:
case GT_PCI1_CMD:
s->regs[saddr] = val & 0x0401fc0f;
- gt64120_update_pci_cfgdata_mapping(s);
break;
case GT_PCI0_TOR:
case GT_PCI0_BS_SCS10:
@@ -687,7 +656,6 @@ static void gt64120_writel(void *opaque, hwaddr addr,
case GT_PCI0_CFGDATA:
/* Mapped via in gt64120_pci_mapping() */
g_assert_not_reached();
- break;
/* Interrupts */
case GT_INTRCAUSE:
@@ -931,7 +899,6 @@ static uint64_t gt64120_readl(void *opaque,
case GT_PCI0_CFGDATA:
/* Mapped via in gt64120_pci_mapping() */
g_assert_not_reached();
- break;
case GT_PCI0_CMD:
case GT_PCI0_TOR:
@@ -1024,6 +991,48 @@ static const MemoryRegionOps isd_mem_ops = {
},
};
+static bool bswap(const GT64120State *s)
+{
+ PCIHostState *phb = PCI_HOST_BRIDGE(s);
+ /*check for bus == 0 && device == 0, Bits 11:15 = Device , Bits 16:23 = Bus*/
+ bool is_phb_dev0 = extract32(phb->config_reg, 11, 13) == 0;
+ bool le_mode = FIELD_EX32(s->regs[GT_PCI0_CMD], GT_PCI0_CMD, MByteSwap);
+ /* Only swap for non-bridge devices in big-endian mode */
+ return !le_mode && !is_phb_dev0;
+}
+
+static uint64_t gt64120_pci_data_read(void *opaque, hwaddr addr, unsigned size)
+{
+ GT64120State *s = opaque;
+ uint32_t val = pci_host_data_le_ops.read(opaque, addr, size);
+
+ if (bswap(s)) {
+ val = bswap32(val);
+ }
+ return val;
+}
+
+static void gt64120_pci_data_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ GT64120State *s = opaque;
+
+ if (bswap(s)) {
+ val = bswap32(val);
+ }
+ pci_host_data_le_ops.write(opaque, addr, val, size);
+}
+
+static const MemoryRegionOps gt64120_pci_data_ops = {
+ .read = gt64120_pci_data_read,
+ .write = gt64120_pci_data_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
static void gt64120_reset(DeviceState *dev)
{
GT64120State *s = GT64120_PCI_HOST_BRIDGE(dev);
@@ -1178,7 +1187,6 @@ static void gt64120_reset(DeviceState *dev)
gt64120_isd_mapping(s);
gt64120_pci_mapping(s);
- gt64120_update_pci_cfgdata_mapping(s);
}
static void gt64120_realize(DeviceState *dev, Error **errp)
@@ -1202,6 +1210,12 @@ static void gt64120_realize(DeviceState *dev, Error **errp)
memory_region_add_subregion_overlap(&s->ISD_mem, GT_PCI0_CFGADDR << 2,
&phb->conf_mem, 1);
+ memory_region_init_io(&phb->data_mem, OBJECT(phb),
+ &gt64120_pci_data_ops,
+ s, "pci-conf-data", 4);
+ memory_region_add_subregion_overlap(&s->ISD_mem, GT_PCI0_CFGDATA << 2,
+ &phb->data_mem, 1);
+
/*
* The whole address space decoded by the GT-64120A doesn't generate
@@ -1213,25 +1227,44 @@ static void gt64120_realize(DeviceState *dev, Error **errp)
static void gt64120_pci_realize(PCIDevice *d, Error **errp)
{
- /* FIXME: Malta specific hw assumptions ahead */
+ /* Values from chapter 17.16 "PCI Configuration" */
+
+ pci_set_long(d->wmask + PCI_BASE_ADDRESS_0, 0xfffff008); /* SCS[1:0] */
+ pci_set_long(d->wmask + PCI_BASE_ADDRESS_1, 0xfffff008); /* SCS[3:2] */
+ pci_set_long(d->wmask + PCI_BASE_ADDRESS_2, 0xfffff008); /* CS[2:0] */
+ pci_set_long(d->wmask + PCI_BASE_ADDRESS_3, 0xfffff008); /* CS[3], BootCS */
+ pci_set_long(d->wmask + PCI_BASE_ADDRESS_4, 0xfffff000); /* ISD MMIO */
+ pci_set_long(d->wmask + PCI_BASE_ADDRESS_5, 0xfffff001); /* ISD I/O */
+}
+
+static void gt64120_pci_reset_hold(Object *obj, ResetType type)
+{
+ PCIDevice *d = PCI_DEVICE(obj);
+
+ /* Values from chapter 17.16 "PCI Configuration" */
+
pci_set_word(d->config + PCI_COMMAND, 0);
pci_set_word(d->config + PCI_STATUS,
PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MEDIUM);
pci_config_set_prog_interface(d->config, 0);
+
pci_set_long(d->config + PCI_BASE_ADDRESS_0, 0x00000008);
pci_set_long(d->config + PCI_BASE_ADDRESS_1, 0x01000008);
pci_set_long(d->config + PCI_BASE_ADDRESS_2, 0x1c000000);
pci_set_long(d->config + PCI_BASE_ADDRESS_3, 0x1f000000);
pci_set_long(d->config + PCI_BASE_ADDRESS_4, 0x14000000);
pci_set_long(d->config + PCI_BASE_ADDRESS_5, 0x14000001);
+
pci_set_byte(d->config + 0x3d, 0x01);
}
-static void gt64120_pci_class_init(ObjectClass *klass, void *data)
+static void gt64120_pci_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+ rc->phases.hold = gt64120_pci_reset_hold;
k->realize = gt64120_pci_realize;
k->vendor_id = PCI_VENDOR_ID_MARVELL;
k->device_id = PCI_DEVICE_ID_MARVELL_GT6412X;
@@ -1249,26 +1282,25 @@ static const TypeInfo gt64120_pci_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIDevice),
.class_init = gt64120_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-static Property gt64120_properties[] = {
+static const Property gt64120_properties[] = {
DEFINE_PROP_BOOL("cpu-little-endian", GT64120State,
cpu_little_endian, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void gt64120_class_init(ObjectClass *klass, void *data)
+static void gt64120_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
device_class_set_props(dc, gt64120_properties);
dc->realize = gt64120_realize;
- dc->reset = gt64120_reset;
+ device_class_set_legacy_reset(dc, gt64120_reset);
dc->vmsd = &vmstate_gt64120;
}
diff --git a/hw/pci-host/i440fx.c b/hw/pci-host/i440fx.c
index 4f0a043..e13bb1b 100644
--- a/hw/pci-host/i440fx.c
+++ b/hw/pci-host/i440fx.c
@@ -315,7 +315,7 @@ static void i440fx_pcihost_realize(DeviceState *dev, Error **errp)
i440fx_update_memory_mappings(f);
}
-static void i440fx_class_init(ObjectClass *klass, void *data)
+static void i440fx_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -341,7 +341,7 @@ static const TypeInfo i440fx_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCII440FXState),
.class_init = i440fx_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -353,7 +353,7 @@ static const char *i440fx_pcihost_root_bus_path(PCIHostState *host_bridge,
return "0000:00";
}
-static Property i440fx_props[] = {
+static const Property i440fx_props[] = {
DEFINE_PROP_SIZE(PCI_HOST_PROP_PCI_HOLE64_SIZE, I440FXState,
pci_hole64_size, I440FX_PCI_HOST_HOLE64_SIZE_DEFAULT),
DEFINE_PROP_SIZE(PCI_HOST_BELOW_4G_MEM_SIZE, I440FXState,
@@ -362,10 +362,9 @@ static Property i440fx_props[] = {
above_4g_mem_size, 0),
DEFINE_PROP_BOOL("x-pci-hole64-fix", I440FXState, pci_hole64_fix, true),
DEFINE_PROP_STRING(I440FX_HOST_PROP_PCI_TYPE, I440FXState, pci_type),
- DEFINE_PROP_END_OF_LIST(),
};
-static void i440fx_pcihost_class_init(ObjectClass *klass, void *data)
+static void i440fx_pcihost_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
diff --git a/hw/pci-host/meson.build b/hw/pci-host/meson.build
index 3001e93..937a0f7 100644
--- a/hw/pci-host/meson.build
+++ b/hw/pci-host/meson.build
@@ -28,6 +28,7 @@ pci_ss.add(when: 'CONFIG_ARTICIA', if_true: files('articia.c'))
pci_ss.add(when: 'CONFIG_MV64361', if_true: files('mv64361.c'))
# ARM devices
+pci_ss.add(when: 'CONFIG_PCI_EXPRESS_FSL_IMX8M_PHY', if_true: files('fsl_imx8m_phy.c'))
pci_ss.add(when: 'CONFIG_VERSATILE_PCI', if_true: files('versatile.c'))
# HPPA devices
diff --git a/hw/pci-host/mv64361.c b/hw/pci-host/mv64361.c
index 01bd8c8..e05b677 100644
--- a/hw/pci-host/mv64361.c
+++ b/hw/pci-host/mv64361.c
@@ -17,7 +17,7 @@
#include "hw/irq.h"
#include "hw/intc/i8259.h"
#include "hw/qdev-properties.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
#include "trace.h"
@@ -26,7 +26,7 @@
#define TYPE_MV64361_PCI_BRIDGE "mv64361-pcibridge"
-static void mv64361_pcibridge_class_init(ObjectClass *klass, void *data)
+static void mv64361_pcibridge_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -46,7 +46,7 @@ static const TypeInfo mv64361_pcibridge_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIDevice),
.class_init = mv64361_pcibridge_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -95,14 +95,14 @@ static void mv64361_pcihost_realize(DeviceState *dev, Error **errp)
&s->mem, &s->io, 0, 4, TYPE_PCI_BUS);
g_free(name);
pci_create_simple(h->bus, 0, TYPE_MV64361_PCI_BRIDGE);
+ qdev_init_gpio_out(dev, s->irq, ARRAY_SIZE(s->irq));
}
-static Property mv64361_pcihost_props[] = {
+static const Property mv64361_pcihost_props[] = {
DEFINE_PROP_UINT8("index", MV64361PCIState, index, 0),
- DEFINE_PROP_END_OF_LIST()
};
-static void mv64361_pcihost_class_init(ObjectClass *klass, void *data)
+static void mv64361_pcihost_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -923,12 +923,12 @@ static void mv64361_reset(DeviceState *dev)
set_mem_windows(s, 0xfbfff);
}
-static void mv64361_class_init(ObjectClass *klass, void *data)
+static void mv64361_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = mv64361_realize;
- dc->reset = mv64361_reset;
+ device_class_set_legacy_reset(dc, mv64361_reset);
}
static const TypeInfo mv64361_type_info = {
diff --git a/hw/pci-host/pnv_phb.c b/hw/pci-host/pnv_phb.c
index d4c118d..4b0ced7 100644
--- a/hw/pci-host/pnv_phb.c
+++ b/hw/pci-host/pnv_phb.c
@@ -17,7 +17,7 @@
#include "hw/ppc/pnv.h"
#include "hw/qdev-properties.h"
#include "qom/object.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
/*
@@ -183,7 +183,7 @@ static const char *pnv_phb_root_bus_path(PCIHostState *host_bridge,
return phb->bus_path;
}
-static Property pnv_phb_properties[] = {
+static const Property pnv_phb_properties[] = {
DEFINE_PROP_UINT32("index", PnvPHB, phb_id, 0),
DEFINE_PROP_UINT32("chip-id", PnvPHB, chip_id, 0),
DEFINE_PROP_UINT32("version", PnvPHB, version, 0),
@@ -192,11 +192,9 @@ static Property pnv_phb_properties[] = {
DEFINE_PROP_LINK("pec", PnvPHB, pec, TYPE_PNV_PHB4_PEC,
PnvPhb4PecState *),
-
- DEFINE_PROP_END_OF_LIST(),
};
-static void pnv_phb_class_init(ObjectClass *klass, void *data)
+static void pnv_phb_class_init(ObjectClass *klass, const void *data)
{
PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -302,13 +300,11 @@ static void pnv_phb_root_port_realize(DeviceState *dev, Error **errp)
pci_config_set_interrupt_pin(pci->config, 0);
}
-static Property pnv_phb_root_port_properties[] = {
+static const Property pnv_phb_root_port_properties[] = {
DEFINE_PROP_UINT32("version", PnvPHBRootPort, version, 0),
-
- DEFINE_PROP_END_OF_LIST(),
};
-static void pnv_phb_root_port_class_init(ObjectClass *klass, void *data)
+static void pnv_phb_root_port_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/pci-host/pnv_phb3.c b/hw/pci-host/pnv_phb3.c
index 2a74dbe..a4335f4 100644
--- a/hw/pci-host/pnv_phb3.c
+++ b/hw/pci-host/pnv_phb3.c
@@ -20,7 +20,7 @@
#include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "qom/object.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#define phb3_error(phb, fmt, ...) \
qemu_log_mask(LOG_GUEST_ERROR, "phb3[%d:%d]: " fmt "\n", \
@@ -888,7 +888,7 @@ DECLARE_INSTANCE_CHECKER(IOMMUMemoryRegion, PNV_PHB3_IOMMU_MEMORY_REGION,
TYPE_PNV_PHB3_IOMMU_MEMORY_REGION)
static void pnv_phb3_iommu_memory_region_class_init(ObjectClass *klass,
- void *data)
+ const void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
@@ -1090,15 +1090,14 @@ void pnv_phb3_update_regions(PnvPHB3 *phb)
pnv_phb3_check_all_m64s(phb);
}
-static Property pnv_phb3_properties[] = {
+static const Property pnv_phb3_properties[] = {
DEFINE_PROP_UINT32("index", PnvPHB3, phb_id, 0),
DEFINE_PROP_UINT32("chip-id", PnvPHB3, chip_id, 0),
DEFINE_PROP_LINK("chip", PnvPHB3, chip, TYPE_PNV_CHIP, PnvChip *),
DEFINE_PROP_LINK("phb-base", PnvPHB3, phb_base, TYPE_PNV_PHB, PnvPHB *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pnv_phb3_class_init(ObjectClass *klass, void *data)
+static void pnv_phb3_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -1150,7 +1149,7 @@ static void pnv_phb3_root_bus_set_prop(Object *obj, Visitor *v,
}
}
-static void pnv_phb3_root_bus_class_init(ObjectClass *klass, void *data)
+static void pnv_phb3_root_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
diff --git a/hw/pci-host/pnv_phb3_msi.c b/hw/pci-host/pnv_phb3_msi.c
index 77d673d..3a83311 100644
--- a/hw/pci-host/pnv_phb3_msi.c
+++ b/hw/pci-host/pnv_phb3_msi.c
@@ -15,7 +15,7 @@
#include "hw/pci/msi.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
static uint64_t phb3_msi_ive_addr(PnvPHB3 *phb, int srcno)
{
@@ -284,7 +284,7 @@ static void phb3_msi_instance_init(Object *obj)
ics->offset = 0;
}
-static void phb3_msi_class_init(ObjectClass *klass, void *data)
+static void phb3_msi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ICSStateClass *isc = ICS_CLASS(klass);
diff --git a/hw/pci-host/pnv_phb3_pbcq.c b/hw/pci-host/pnv_phb3_pbcq.c
index 82f70ef..1f7a149 100644
--- a/hw/pci-host/pnv_phb3_pbcq.c
+++ b/hw/pci-host/pnv_phb3_pbcq.c
@@ -337,7 +337,7 @@ static void phb3_pbcq_instance_init(Object *obj)
OBJ_PROP_LINK_STRONG);
}
-static void pnv_pbcq_class_init(ObjectClass *klass, void *data)
+static void pnv_pbcq_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
@@ -354,7 +354,7 @@ static const TypeInfo pnv_pbcq_type_info = {
.instance_size = sizeof(PnvPBCQState),
.instance_init = phb3_pbcq_instance_init,
.class_init = pnv_pbcq_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
diff --git a/hw/pci-host/pnv_phb4.c b/hw/pci-host/pnv_phb4.c
index 9999100..77ea352 100644
--- a/hw/pci-host/pnv_phb4.c
+++ b/hw/pci-host/pnv_phb4.c
@@ -1362,7 +1362,7 @@ DECLARE_INSTANCE_CHECKER(IOMMUMemoryRegion, PNV_PHB4_IOMMU_MEMORY_REGION,
TYPE_PNV_PHB4_IOMMU_MEMORY_REGION)
static void pnv_phb4_iommu_memory_region_class_init(ObjectClass *klass,
- void *data)
+ const void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
@@ -1688,16 +1688,15 @@ static void pnv_phb4_xive_notify(XiveNotifier *xf, uint32_t srcno,
}
}
-static Property pnv_phb4_properties[] = {
+static const Property pnv_phb4_properties[] = {
DEFINE_PROP_UINT32("index", PnvPHB4, phb_id, 0),
DEFINE_PROP_UINT32("chip-id", PnvPHB4, chip_id, 0),
DEFINE_PROP_LINK("pec", PnvPHB4, pec, TYPE_PNV_PHB4_PEC,
PnvPhb4PecState *),
DEFINE_PROP_LINK("phb-base", PnvPHB4, phb_base, TYPE_PNV_PHB, PnvPHB *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pnv_phb4_class_init(ObjectClass *klass, void *data)
+static void pnv_phb4_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
XiveNotifierClass *xfc = XIVE_NOTIFIER_CLASS(klass);
@@ -1715,7 +1714,7 @@ static const TypeInfo pnv_phb4_type_info = {
.instance_init = pnv_phb4_instance_init,
.instance_size = sizeof(PnvPHB4),
.class_init = pnv_phb4_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_XIVE_NOTIFIER },
{ },
}
@@ -1762,7 +1761,7 @@ static void pnv_phb4_root_bus_set_prop(Object *obj, Visitor *v,
}
}
-static void pnv_phb4_root_bus_class_init(ObjectClass *klass, void *data)
+static void pnv_phb4_root_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
diff --git a/hw/pci-host/pnv_phb4_pec.c b/hw/pci-host/pnv_phb4_pec.c
index ce8e228..5bac1c4 100644
--- a/hw/pci-host/pnv_phb4_pec.c
+++ b/hw/pci-host/pnv_phb4_pec.c
@@ -19,7 +19,7 @@
#include "hw/ppc/pnv.h"
#include "hw/ppc/pnv_chip.h"
#include "hw/qdev-properties.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include <libfdt.h>
@@ -197,6 +197,9 @@ static PnvPHB *pnv_pec_default_phb_realize(PnvPhb4PecState *pec,
return phb;
}
+#define XPEC_P9_PCI_LANE_CFG PPC_BITMASK(10, 11)
+#define XPEC_P10_PCI_LANE_CFG PPC_BITMASK(0, 1)
+
static void pnv_pec_realize(DeviceState *dev, Error **errp)
{
PnvPhb4PecState *pec = PNV_PHB4_PEC(dev);
@@ -211,6 +214,43 @@ static void pnv_pec_realize(DeviceState *dev, Error **errp)
pec->num_phbs = pecc->num_phbs[pec->index];
+ /* Pervasive chiplet */
+ object_initialize_child(OBJECT(pec), "nest-pervasive-common",
+ &pec->nest_pervasive,
+ TYPE_PNV_NEST_CHIPLET_PERVASIVE);
+ if (!qdev_realize(DEVICE(&pec->nest_pervasive), NULL, errp)) {
+ return;
+ }
+
+ /* Set up pervasive chiplet registers */
+ /*
+ * Most registers are not set up, this just sets the PCI CONF1 link-width
+ * field because skiboot probes it.
+ */
+ if (pecc->version == PNV_PHB4_VERSION) {
+ /*
+ * On P9, PEC2 has configurable 1/2/3-furcation).
+ * Make it trifurcated (x8, x4, x4) to match pnv_pec_num_phbs.
+ */
+ if (pec->index == 2) {
+ pec->nest_pervasive.control_regs.cplt_cfg1 =
+ SETFIELD(XPEC_P9_PCI_LANE_CFG,
+ pec->nest_pervasive.control_regs.cplt_cfg1,
+ 0b10);
+ }
+ } else if (pecc->version == PNV_PHB5_VERSION) {
+ /*
+ * On P10, both PECs are configurable 1/2/3-furcation).
+ * Both are trifurcated to match pnv_phb5_pec_num_stacks.
+ */
+ pec->nest_pervasive.control_regs.cplt_cfg1 =
+ SETFIELD(XPEC_P10_PCI_LANE_CFG,
+ pec->nest_pervasive.control_regs.cplt_cfg1,
+ 0b10);
+ } else {
+ g_assert_not_reached();
+ }
+
/* Create PHBs if running with defaults */
if (defaults_enabled()) {
g_assert(pec->num_phbs <= MAX_PHBS_PER_PEC);
@@ -283,17 +323,23 @@ static int pnv_pec_dt_xscom(PnvXScomInterface *dev, void *fdt,
return 0;
}
-static Property pnv_pec_properties[] = {
+static const Property pnv_pec_properties[] = {
DEFINE_PROP_UINT32("index", PnvPhb4PecState, index, 0),
DEFINE_PROP_UINT32("chip-id", PnvPhb4PecState, chip_id, 0),
DEFINE_PROP_LINK("chip", PnvPhb4PecState, chip, TYPE_PNV_CHIP,
PnvChip *),
- DEFINE_PROP_END_OF_LIST(),
};
+#define XPEC_PCI_CPLT_OFFSET 0x1000000ULL
+
+static uint32_t pnv_pec_xscom_cplt_base(PnvPhb4PecState *pec)
+{
+ return PNV9_XSCOM_PEC_NEST_CPLT_BASE + XPEC_PCI_CPLT_OFFSET * pec->index;
+}
+
static uint32_t pnv_pec_xscom_pci_base(PnvPhb4PecState *pec)
{
- return PNV9_XSCOM_PEC_PCI_BASE + 0x1000000 * pec->index;
+ return PNV9_XSCOM_PEC_PCI_BASE + XPEC_PCI_CPLT_OFFSET * pec->index;
}
static uint32_t pnv_pec_xscom_nest_base(PnvPhb4PecState *pec)
@@ -308,7 +354,7 @@ static uint32_t pnv_pec_xscom_nest_base(PnvPhb4PecState *pec)
*/
static const uint32_t pnv_pec_num_phbs[] = { 1, 2, 3 };
-static void pnv_pec_class_init(ObjectClass *klass, void *data)
+static void pnv_pec_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
@@ -322,6 +368,7 @@ static void pnv_pec_class_init(ObjectClass *klass, void *data)
device_class_set_props(dc, pnv_pec_properties);
dc->user_creatable = false;
+ pecc->xscom_cplt_base = pnv_pec_xscom_cplt_base;
pecc->xscom_nest_base = pnv_pec_xscom_nest_base;
pecc->xscom_pci_base = pnv_pec_xscom_pci_base;
pecc->xscom_nest_size = PNV9_XSCOM_PEC_NEST_SIZE;
@@ -341,7 +388,7 @@ static const TypeInfo pnv_pec_type_info = {
.instance_size = sizeof(PnvPhb4PecState),
.class_init = pnv_pec_class_init,
.class_size = sizeof(PnvPhb4PecClass),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
@@ -350,6 +397,10 @@ static const TypeInfo pnv_pec_type_info = {
/*
* POWER10 definitions
*/
+static uint32_t pnv_phb5_pec_xscom_cplt_base(PnvPhb4PecState *pec)
+{
+ return PNV10_XSCOM_PEC_NEST_CPLT_BASE + XPEC_PCI_CPLT_OFFSET * pec->index;
+}
static uint32_t pnv_phb5_pec_xscom_pci_base(PnvPhb4PecState *pec)
{
@@ -368,12 +419,13 @@ static uint32_t pnv_phb5_pec_xscom_nest_base(PnvPhb4PecState *pec)
*/
static const uint32_t pnv_phb5_pec_num_stacks[] = { 3, 3 };
-static void pnv_phb5_pec_class_init(ObjectClass *klass, void *data)
+static void pnv_phb5_pec_class_init(ObjectClass *klass, const void *data)
{
PnvPhb4PecClass *pecc = PNV_PHB4_PEC_CLASS(klass);
static const char compat[] = "ibm,power10-pbcq";
static const char stk_compat[] = "ibm,power10-phb-stack";
+ pecc->xscom_cplt_base = pnv_phb5_pec_xscom_cplt_base;
pecc->xscom_nest_base = pnv_phb5_pec_xscom_nest_base;
pecc->xscom_pci_base = pnv_phb5_pec_xscom_pci_base;
pecc->xscom_nest_size = PNV10_XSCOM_PEC_NEST_SIZE;
@@ -393,7 +445,7 @@ static const TypeInfo pnv_phb5_pec_type_info = {
.instance_size = sizeof(PnvPhb4PecState),
.class_init = pnv_phb5_pec_class_init,
.class_size = sizeof(PnvPhb4PecClass),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
diff --git a/hw/pci-host/ppc440_pcix.c b/hw/pci-host/ppc440_pcix.c
index ef212d9..744b85e 100644
--- a/hw/pci-host/ppc440_pcix.c
+++ b/hw/pci-host/ppc440_pcix.c
@@ -519,12 +519,12 @@ static void ppc440_pcix_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->iomem);
}
-static void ppc440_pcix_class_init(ObjectClass *klass, void *data)
+static void ppc440_pcix_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = ppc440_pcix_realize;
- dc->reset = ppc440_pcix_reset;
+ device_class_set_legacy_reset(dc, ppc440_pcix_reset);
}
static const TypeInfo ppc440_pcix_info = {
diff --git a/hw/pci-host/ppc4xx_pci.c b/hw/pci-host/ppc4xx_pci.c
index b6c6c89..2547817 100644
--- a/hw/pci-host/ppc4xx_pci.c
+++ b/hw/pci-host/ppc4xx_pci.c
@@ -27,7 +27,7 @@
#include "hw/pci-host/ppc4xx.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "hw/pci/pci_device.h"
#include "hw/pci/pci_host.h"
#include "trace.h"
@@ -349,7 +349,7 @@ static void ppc4xx_pcihost_realize(DeviceState *dev, Error **errp)
qemu_register_reset(ppc4xx_pci_reset, s);
}
-static void ppc4xx_host_bridge_class_init(ObjectClass *klass, void *data)
+static void ppc4xx_host_bridge_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -370,13 +370,13 @@ static const TypeInfo ppc4xx_host_bridge_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIDevice),
.class_init = ppc4xx_host_bridge_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-static void ppc4xx_pcihost_class_init(ObjectClass *klass, void *data)
+static void ppc4xx_pcihost_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/pci-host/ppce500.c b/hw/pci-host/ppce500.c
index 95b983b..52269b0 100644
--- a/hw/pci-host/ppce500.c
+++ b/hw/pci-host/ppce500.c
@@ -16,13 +16,11 @@
#include "qemu/osdep.h"
#include "hw/irq.h"
-#include "hw/ppc/e500-ccsr.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
#include "hw/pci/pci_device.h"
#include "hw/pci/pci_host.h"
#include "qemu/bswap.h"
-#include "qemu/module.h"
#include "hw/pci-host/ppce500.h"
#include "qom/object.h"
@@ -419,11 +417,12 @@ static const VMStateDescription vmstate_ppce500_pci = {
static void e500_pcihost_bridge_realize(PCIDevice *d, Error **errp)
{
PPCE500PCIBridgeState *b = PPC_E500_PCI_BRIDGE(d);
- PPCE500CCSRState *ccsr = CCSR(container_get(qdev_get_machine(),
- "/e500-ccsr"));
+ SysBusDevice *ccsr = SYS_BUS_DEVICE(
+ object_resolve_path_component(qdev_get_machine(), "e500-ccsr"));
+ MemoryRegion *ccsr_space = sysbus_mmio_get_region(ccsr, 0);
- memory_region_init_alias(&b->bar0, OBJECT(ccsr), "e500-pci-bar0", &ccsr->ccsr_space,
- 0, int128_get64(ccsr->ccsr_space.size));
+ memory_region_init_alias(&b->bar0, OBJECT(ccsr), "e500-pci-bar0",
+ ccsr_space, 0, int128_get64(ccsr_space->size));
pci_register_bar(d, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &b->bar0);
}
@@ -475,7 +474,7 @@ static void e500_pcihost_realize(DeviceState *dev, Error **errp)
address_space_init(&s->bm_as, &s->bm, "pci-bm");
pci_setup_iommu(b, &ppce500_iommu_ops, s);
- pci_create_simple(b, 0, "e500-host-bridge");
+ pci_create_simple(b, 0, TYPE_PPC_E500_PCI_BRIDGE);
memory_region_init(&s->container, OBJECT(h), "pci-container", PCIE500_ALL_SIZE);
memory_region_init_io(&h->conf_mem, OBJECT(h), &pci_host_conf_be_ops, h,
@@ -491,7 +490,7 @@ static void e500_pcihost_realize(DeviceState *dev, Error **errp)
pci_bus_set_route_irq_fn(b, e500_route_intx_pin_to_irq);
}
-static void e500_host_bridge_class_init(ObjectClass *klass, void *data)
+static void e500_host_bridge_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -508,24 +507,12 @@ static void e500_host_bridge_class_init(ObjectClass *klass, void *data)
dc->user_creatable = false;
}
-static const TypeInfo e500_host_bridge_info = {
- .name = TYPE_PPC_E500_PCI_BRIDGE,
- .parent = TYPE_PCI_DEVICE,
- .instance_size = sizeof(PPCE500PCIBridgeState),
- .class_init = e500_host_bridge_class_init,
- .interfaces = (InterfaceInfo[]) {
- { INTERFACE_CONVENTIONAL_PCI_DEVICE },
- { },
- },
-};
-
-static Property pcihost_properties[] = {
+static const Property pcihost_properties[] = {
DEFINE_PROP_UINT32("first_slot", PPCE500PCIState, first_slot, 0x11),
DEFINE_PROP_UINT32("first_pin_irq", PPCE500PCIState, first_pin_irq, 0x1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void e500_pcihost_class_init(ObjectClass *klass, void *data)
+static void e500_pcihost_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -535,17 +522,23 @@ static void e500_pcihost_class_init(ObjectClass *klass, void *data)
dc->vmsd = &vmstate_ppce500_pci;
}
-static const TypeInfo e500_pcihost_info = {
- .name = TYPE_PPC_E500_PCI_HOST_BRIDGE,
- .parent = TYPE_PCI_HOST_BRIDGE,
- .instance_size = sizeof(PPCE500PCIState),
- .class_init = e500_pcihost_class_init,
+static const TypeInfo e500_pci_types[] = {
+ {
+ .name = TYPE_PPC_E500_PCI_BRIDGE,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(PPCE500PCIBridgeState),
+ .class_init = e500_host_bridge_class_init,
+ .interfaces = (const InterfaceInfo[]) {
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { },
+ },
+ },
+ {
+ .name = TYPE_PPC_E500_PCI_HOST_BRIDGE,
+ .parent = TYPE_PCI_HOST_BRIDGE,
+ .instance_size = sizeof(PPCE500PCIState),
+ .class_init = e500_pcihost_class_init,
+ },
};
-static void e500_pci_register_types(void)
-{
- type_register_static(&e500_pcihost_info);
- type_register_static(&e500_host_bridge_info);
-}
-
-type_init(e500_pci_register_types)
+DEFINE_TYPES(e500_pci_types)
diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c
index 0b6cbae..1951ae4 100644
--- a/hw/pci-host/q35.c
+++ b/hw/pci-host/q35.c
@@ -170,7 +170,7 @@ static void q35_host_get_pci_hole64_end(Object *obj, Visitor *v,
* properties need to be initialized manually by
* q35_host_initfn() after the object_initialize() call.
*/
-static Property q35_host_props[] = {
+static const Property q35_host_props[] = {
DEFINE_PROP_UINT64(PCIE_HOST_MCFG_BASE, Q35PCIHost, parent_obj.base_addr,
MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT),
DEFINE_PROP_SIZE(PCI_HOST_PROP_PCI_HOLE64_SIZE, Q35PCIHost,
@@ -182,10 +182,9 @@ static Property q35_host_props[] = {
DEFINE_PROP_BOOL(PCI_HOST_PROP_SMM_RANGES, Q35PCIHost,
mch.has_smm_ranges, true),
DEFINE_PROP_BOOL("x-pci-hole64-fix", Q35PCIHost, pci_hole64_fix, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void q35_host_class_init(ObjectClass *klass, void *data)
+static void q35_host_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
@@ -662,31 +661,20 @@ static void mch_realize(PCIDevice *d, Error **errp)
OBJECT(&mch->smram));
}
-uint64_t mch_mcfg_base(void)
-{
- bool ambiguous;
- Object *o = object_resolve_path_type("", TYPE_MCH_PCI_DEVICE, &ambiguous);
- if (!o) {
- return 0;
- }
- return MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT;
-}
-
-static Property mch_props[] = {
+static const Property mch_props[] = {
DEFINE_PROP_UINT16("extended-tseg-mbytes", MCHPCIState, ext_tseg_mbytes,
16),
DEFINE_PROP_BOOL("smbase-smram", MCHPCIState, has_smram_at_smbase, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mch_class_init(ObjectClass *klass, void *data)
+static void mch_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
k->realize = mch_realize;
k->config_write = mch_write_config;
- dc->reset = mch_reset;
+ device_class_set_legacy_reset(dc, mch_reset);
device_class_set_props(dc, mch_props);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->desc = "Host bridge";
@@ -715,7 +703,7 @@ static const TypeInfo mch_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(MCHPCIState),
.class_init = mch_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/pci-host/raven.c b/hw/pci-host/raven.c
index a7dfddd..f8c0be5 100644
--- a/hw/pci-host/raven.c
+++ b/hw/pci-host/raven.c
@@ -24,7 +24,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu/datadir.h"
#include "qemu/units.h"
#include "qemu/log.h"
#include "qapi/error.h"
@@ -35,9 +34,7 @@
#include "migration/vmstate.h"
#include "hw/intc/i8259.h"
#include "hw/irq.h"
-#include "hw/loader.h"
#include "hw/or-irq.h"
-#include "elf.h"
#include "qom/object.h"
#define TYPE_RAVEN_PCI_DEVICE "raven"
@@ -47,10 +44,6 @@ OBJECT_DECLARE_SIMPLE_TYPE(RavenPCIState, RAVEN_PCI_DEVICE)
struct RavenPCIState {
PCIDevice dev;
-
- uint32_t elf_machine;
- char *bios_name;
- MemoryRegion bios;
};
typedef struct PRePPCIState PREPPCIState;
@@ -75,11 +68,8 @@ struct PRePPCIState {
RavenPCIState pci_dev;
int contiguous_map;
- bool is_legacy_prep;
};
-#define BIOS_SIZE (1 * MiB)
-
#define PCI_IO_BASE_ADDR 0x80000000 /* Physical address on main bus */
static inline uint32_t raven_pci_io_config(hwaddr addr)
@@ -243,22 +233,18 @@ static void raven_pcihost_realizefn(DeviceState *d, Error **errp)
MemoryRegion *address_space_mem = get_system_memory();
int i;
- if (s->is_legacy_prep) {
- for (i = 0; i < PCI_NUM_PINS; i++) {
- sysbus_init_irq(dev, &s->pci_irqs[i]);
- }
- } else {
- /* According to PReP specification section 6.1.6 "System Interrupt
- * Assignments", all PCI interrupts are routed via IRQ 15 */
- s->or_irq = OR_IRQ(object_new(TYPE_OR_IRQ));
- object_property_set_int(OBJECT(s->or_irq), "num-lines", PCI_NUM_PINS,
- &error_fatal);
- qdev_realize(DEVICE(s->or_irq), NULL, &error_fatal);
- sysbus_init_irq(dev, &s->or_irq->out_irq);
-
- for (i = 0; i < PCI_NUM_PINS; i++) {
- s->pci_irqs[i] = qdev_get_gpio_in(DEVICE(s->or_irq), i);
- }
+ /*
+ * According to PReP specification section 6.1.6 "System Interrupt
+ * Assignments", all PCI interrupts are routed via IRQ 15
+ */
+ s->or_irq = OR_IRQ(object_new(TYPE_OR_IRQ));
+ object_property_set_int(OBJECT(s->or_irq), "num-lines", PCI_NUM_PINS,
+ &error_fatal);
+ qdev_realize(DEVICE(s->or_irq), NULL, &error_fatal);
+ sysbus_init_irq(dev, &s->or_irq->out_irq);
+
+ for (i = 0; i < PCI_NUM_PINS; i++) {
+ s->pci_irqs[i] = qdev_get_gpio_in(DEVICE(s->or_irq), i);
}
qdev_init_gpio_in(d, raven_change_gpio, 1);
@@ -338,48 +324,9 @@ static void raven_pcihost_initfn(Object *obj)
static void raven_realize(PCIDevice *d, Error **errp)
{
- RavenPCIState *s = RAVEN_PCI_DEVICE(d);
- char *filename;
- int bios_size = -1;
-
d->config[PCI_CACHE_LINE_SIZE] = 0x08;
d->config[PCI_LATENCY_TIMER] = 0x10;
d->config[PCI_CAPABILITY_LIST] = 0x00;
-
- if (!memory_region_init_rom_nomigrate(&s->bios, OBJECT(s), "bios",
- BIOS_SIZE, errp)) {
- return;
- }
- memory_region_add_subregion(get_system_memory(), (uint32_t)(-BIOS_SIZE),
- &s->bios);
- if (s->bios_name) {
- filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, s->bios_name);
- if (filename) {
- if (s->elf_machine != EM_NONE) {
- bios_size = load_elf(filename, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, 1, s->elf_machine,
- 0, 0);
- }
- if (bios_size < 0) {
- bios_size = get_image_size(filename);
- if (bios_size > 0 && bios_size <= BIOS_SIZE) {
- hwaddr bios_addr;
- bios_size = (bios_size + 0xfff) & ~0xfff;
- bios_addr = (uint32_t)(-BIOS_SIZE);
- bios_size = load_image_targphys(filename, bios_addr,
- bios_size);
- }
- }
- }
- g_free(filename);
- if (bios_size < 0 || bios_size > BIOS_SIZE) {
- memory_region_del_subregion(get_system_memory(), &s->bios);
- error_setg(errp, "Could not load bios image '%s'", s->bios_name);
- return;
- }
- }
-
- vmstate_register_ram_global(&s->bios);
}
static const VMStateDescription vmstate_raven = {
@@ -392,7 +339,7 @@ static const VMStateDescription vmstate_raven = {
},
};
-static void raven_class_init(ObjectClass *klass, void *data)
+static void raven_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -416,29 +363,18 @@ static const TypeInfo raven_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(RavenPCIState),
.class_init = raven_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-static Property raven_pcihost_properties[] = {
- DEFINE_PROP_UINT32("elf-machine", PREPPCIState, pci_dev.elf_machine,
- EM_NONE),
- DEFINE_PROP_STRING("bios-name", PREPPCIState, pci_dev.bios_name),
- /* Temporary workaround until legacy prep machine is removed */
- DEFINE_PROP_BOOL("is-legacy-prep", PREPPCIState, is_legacy_prep,
- false),
- DEFINE_PROP_END_OF_LIST()
-};
-
-static void raven_pcihost_class_init(ObjectClass *klass, void *data)
+static void raven_pcihost_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->realize = raven_pcihost_realizefn;
- device_class_set_props(dc, raven_pcihost_properties);
dc->fw_name = "pci";
}
diff --git a/hw/pci-host/remote.c b/hw/pci-host/remote.c
index bfb25ef..e6d2af4 100644
--- a/hw/pci-host/remote.c
+++ b/hw/pci-host/remote.c
@@ -28,7 +28,7 @@
#include "hw/pci/pcie_host.h"
#include "hw/qdev-properties.h"
#include "hw/pci-host/remote.h"
-#include "exec/memory.h"
+#include "system/memory.h"
static const char *remote_pcihost_root_bus_path(PCIHostState *host_bridge,
PCIBus *rootbus)
@@ -46,7 +46,7 @@ static void remote_pcihost_realize(DeviceState *dev, Error **errp)
0, TYPE_PCIE_BUS);
}
-static void remote_pcihost_class_init(ObjectClass *klass, void *data)
+static void remote_pcihost_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
diff --git a/hw/pci-host/sabre.c b/hw/pci-host/sabre.c
index d0851b4..538624c 100644
--- a/hw/pci-host/sabre.c
+++ b/hw/pci-host/sabre.c
@@ -37,7 +37,7 @@
#include "qapi/error.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "trace.h"
/*
@@ -456,7 +456,7 @@ static void sabre_pci_realize(PCIDevice *d, Error **errp)
PCI_STATUS_DEVSEL_MEDIUM);
}
-static void sabre_pci_class_init(ObjectClass *klass, void *data)
+static void sabre_pci_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -477,7 +477,7 @@ static const TypeInfo sabre_pci_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(SabrePCIState),
.class_init = sabre_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -492,19 +492,18 @@ static char *sabre_ofw_unit_address(const SysBusDevice *dev)
(uint32_t)(s->special_base & 0xffffffff));
}
-static Property sabre_properties[] = {
+static const Property sabre_properties[] = {
DEFINE_PROP_UINT64("special-base", SabreState, special_base, 0),
DEFINE_PROP_UINT64("mem-base", SabreState, mem_base, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void sabre_class_init(ObjectClass *klass, void *data)
+static void sabre_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass);
dc->realize = sabre_realize;
- dc->reset = sabre_reset;
+ device_class_set_legacy_reset(dc, sabre_reset);
device_class_set_props(dc, sabre_properties);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
diff --git a/hw/pci-host/sh_pci.c b/hw/pci-host/sh_pci.c
index 4edebce..de8f6a8 100644
--- a/hw/pci-host/sh_pci.c
+++ b/hw/pci-host/sh_pci.c
@@ -153,7 +153,7 @@ static void sh_pcic_pci_realize(PCIDevice *d, Error **errp)
PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MEDIUM);
}
-static void sh_pcic_pci_class_init(ObjectClass *klass, void *data)
+static void sh_pcic_pci_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -168,7 +168,7 @@ static void sh_pcic_pci_class_init(ObjectClass *klass, void *data)
dc->user_creatable = false;
}
-static void sh_pcic_host_class_init(ObjectClass *klass, void *data)
+static void sh_pcic_host_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -186,7 +186,7 @@ static const TypeInfo sh_pcic_types[] = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIDevice),
.class_init = sh_pcic_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/pci-host/uninorth.c b/hw/pci-host/uninorth.c
index e4c1abd..194037d 100644
--- a/hw/pci-host/uninorth.c
+++ b/hw/pci-host/uninorth.c
@@ -311,7 +311,7 @@ static void unin_internal_pci_host_realize(PCIDevice *d, Error **errp)
d->config[PCI_CAPABILITY_LIST] = 0x00;
}
-static void unin_main_pci_host_class_init(ObjectClass *klass, void *data)
+static void unin_main_pci_host_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -333,13 +333,13 @@ static const TypeInfo unin_main_pci_host_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIDevice),
.class_init = unin_main_pci_host_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-static void u3_agp_pci_host_class_init(ObjectClass *klass, void *data)
+static void u3_agp_pci_host_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -361,13 +361,13 @@ static const TypeInfo u3_agp_pci_host_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIDevice),
.class_init = u3_agp_pci_host_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-static void unin_agp_pci_host_class_init(ObjectClass *klass, void *data)
+static void unin_agp_pci_host_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -389,13 +389,14 @@ static const TypeInfo unin_agp_pci_host_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIDevice),
.class_init = unin_agp_pci_host_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-static void unin_internal_pci_host_class_init(ObjectClass *klass, void *data)
+static void unin_internal_pci_host_class_init(ObjectClass *klass,
+ const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -417,18 +418,17 @@ static const TypeInfo unin_internal_pci_host_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIDevice),
.class_init = unin_internal_pci_host_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-static Property pci_unin_main_pci_host_props[] = {
+static const Property pci_unin_main_pci_host_props[] = {
DEFINE_PROP_UINT32("ofw-addr", UNINHostState, ofw_addr, -1),
- DEFINE_PROP_END_OF_LIST()
};
-static void pci_unin_main_class_init(ObjectClass *klass, void *data)
+static void pci_unin_main_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass);
@@ -448,7 +448,7 @@ static const TypeInfo pci_unin_main_info = {
.class_init = pci_unin_main_class_init,
};
-static void pci_u3_agp_class_init(ObjectClass *klass, void *data)
+static void pci_u3_agp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -464,7 +464,7 @@ static const TypeInfo pci_u3_agp_info = {
.class_init = pci_u3_agp_class_init,
};
-static void pci_unin_agp_class_init(ObjectClass *klass, void *data)
+static void pci_unin_agp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -480,7 +480,7 @@ static const TypeInfo pci_unin_agp_info = {
.class_init = pci_unin_agp_class_init,
};
-static void pci_unin_internal_class_init(ObjectClass *klass, void *data)
+static void pci_unin_internal_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -536,7 +536,7 @@ static void unin_init(Object *obj)
sysbus_init_mmio(sbd, &s->mem);
}
-static void unin_class_init(ObjectClass *klass, void *data)
+static void unin_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/pci-host/versatile.c b/hw/pci-host/versatile.c
index 0e65deb..8ea26e3 100644
--- a/hw/pci-host/versatile.c
+++ b/hw/pci-host/versatile.c
@@ -246,7 +246,7 @@ static uint64_t pci_vpb_reg_read(void *opaque, hwaddr addr,
static const MemoryRegionOps pci_vpb_reg_ops = {
.read = pci_vpb_reg_read,
.write = pci_vpb_reg_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -312,7 +312,7 @@ static uint64_t pci_vpb_config_read(void *opaque, hwaddr addr,
static const MemoryRegionOps pci_vpb_config_ops = {
.read = pci_vpb_config_read,
.write = pci_vpb_config_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
};
static int pci_vpb_map_irq(PCIDevice *d, int irq_num)
@@ -471,7 +471,7 @@ static void versatile_pci_host_realize(PCIDevice *d, Error **errp)
pci_set_byte(d->config + PCI_LATENCY_TIMER, 0x10);
}
-static void versatile_pci_host_class_init(ObjectClass *klass, void *data)
+static void versatile_pci_host_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -492,24 +492,23 @@ static const TypeInfo versatile_pci_host_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIDevice),
.class_init = versatile_pci_host_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-static Property pci_vpb_properties[] = {
+static const Property pci_vpb_properties[] = {
DEFINE_PROP_UINT8("broken-irq-mapping", PCIVPBState, irq_mapping_prop,
PCI_VPB_IRQMAP_ASSUME_OK),
- DEFINE_PROP_END_OF_LIST()
};
-static void pci_vpb_class_init(ObjectClass *klass, void *data)
+static void pci_vpb_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = pci_vpb_realize;
- dc->reset = pci_vpb_reset;
+ device_class_set_legacy_reset(dc, pci_vpb_reset);
dc->vmsd = &pci_vpb_vmstate;
device_class_set_props(dc, pci_vpb_properties);
}
diff --git a/hw/pci-host/xen_igd_pt.c b/hw/pci-host/xen_igd_pt.c
index d094b67..5dd17ef 100644
--- a/hw/pci-host/xen_igd_pt.c
+++ b/hw/pci-host/xen_igd_pt.c
@@ -95,7 +95,8 @@ static void igd_pt_i440fx_realize(PCIDevice *pci_dev, Error **errp)
}
}
-static void igd_passthrough_i440fx_class_init(ObjectClass *klass, void *data)
+static void igd_passthrough_i440fx_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
diff --git a/hw/pci-host/xilinx-pcie.c b/hw/pci-host/xilinx-pcie.c
index c9ab705..c71492d 100644
--- a/hw/pci-host/xilinx-pcie.c
+++ b/hw/pci-host/xilinx-pcie.c
@@ -156,17 +156,16 @@ static void xilinx_pcie_host_init(Object *obj)
qdev_prop_set_bit(DEVICE(root), "multifunction", false);
}
-static Property xilinx_pcie_host_props[] = {
+static const Property xilinx_pcie_host_props[] = {
DEFINE_PROP_UINT32("bus_nr", XilinxPCIEHost, bus_nr, 0),
DEFINE_PROP_SIZE("cfg_base", XilinxPCIEHost, cfg_base, 0),
DEFINE_PROP_SIZE("cfg_size", XilinxPCIEHost, cfg_size, 32 * MiB),
DEFINE_PROP_SIZE("mmio_base", XilinxPCIEHost, mmio_base, 0),
DEFINE_PROP_SIZE("mmio_size", XilinxPCIEHost, mmio_size, 1 * MiB),
DEFINE_PROP_BOOL("link_up", XilinxPCIEHost, link_up, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xilinx_pcie_host_class_init(ObjectClass *klass, void *data)
+static void xilinx_pcie_host_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
@@ -287,7 +286,7 @@ static void xilinx_pcie_root_realize(PCIDevice *pci_dev, Error **errp)
}
}
-static void xilinx_pcie_root_class_init(ObjectClass *klass, void *data)
+static void xilinx_pcie_root_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -300,7 +299,7 @@ static void xilinx_pcie_root_class_init(ObjectClass *klass, void *data)
k->class_id = PCI_CLASS_BRIDGE_HOST;
k->realize = xilinx_pcie_root_realize;
k->exit = pci_bridge_exitfn;
- dc->reset = pci_bridge_reset;
+ device_class_set_legacy_reset(dc, pci_bridge_reset);
k->config_read = xilinx_pcie_root_config_read;
k->config_write = xilinx_pcie_root_config_write;
/*
@@ -315,7 +314,7 @@ static const TypeInfo xilinx_pcie_root_info = {
.parent = TYPE_PCI_BRIDGE,
.instance_size = sizeof(XilinxPCIERoot),
.class_init = xilinx_pcie_root_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ }
},
diff --git a/hw/pci/msi.c b/hw/pci/msi.c
index 8104ac1..b9f5b45 100644
--- a/hw/pci/msi.c
+++ b/hw/pci/msi.c
@@ -23,7 +23,7 @@
#include "hw/xen/xen.h"
#include "qemu/range.h"
#include "qapi/error.h"
-#include "sysemu/xen.h"
+#include "system/xen.h"
#include "hw/i386/kvm/xen_evtchn.h"
diff --git a/hw/pci/msix.c b/hw/pci/msix.c
index 487e498..8c7f670 100644
--- a/hw/pci/msix.c
+++ b/hw/pci/msix.c
@@ -15,11 +15,12 @@
*/
#include "qemu/osdep.h"
+#include "qemu/log.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/pci/pci.h"
#include "hw/xen/xen.h"
-#include "sysemu/xen.h"
+#include "system/xen.h"
#include "migration/qemu-file-types.h"
#include "migration/vmstate.h"
#include "qemu/range.h"
@@ -71,7 +72,7 @@ static uint8_t *msix_pending_byte(PCIDevice *dev, int vector)
return dev->msix_pba + vector / 8;
}
-static int msix_is_pending(PCIDevice *dev, int vector)
+int msix_is_pending(PCIDevice *dev, unsigned int vector)
{
return *msix_pending_byte(dev, vector) & msix_pending_mask(vector);
}
@@ -250,7 +251,7 @@ static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr,
PCIDevice *dev = opaque;
if (dev->msix_vector_poll_notifier) {
unsigned vector_start = addr * 8;
- unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr);
+ unsigned vector_end = MIN((addr + size) * 8, dev->msix_entries_nr);
dev->msix_vector_poll_notifier(dev, vector_start, vector_end);
}
@@ -260,6 +261,14 @@ static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr,
static void msix_pba_mmio_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
+ PCIDevice *dev = opaque;
+
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "PCI [%s:%02x:%02x.%x] attempt to write to MSI-X "
+ "PBA at 0x%" FMT_PCIBUS ", ignoring.\n",
+ pci_root_bus_path(dev), pci_dev_bus_num(dev),
+ PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
+ addr);
}
static const MemoryRegionOps msix_pba_mmio_ops = {
diff --git a/hw/pci/pci-hmp-cmds.c b/hw/pci/pci-hmp-cmds.c
index b09fce9..a5f6483 100644
--- a/hw/pci/pci-hmp-cmds.c
+++ b/hw/pci/pci-hmp-cmds.c
@@ -20,7 +20,7 @@
#include "monitor/monitor.h"
#include "pci-internal.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/qapi-commands-pci.h"
#include "qemu/cutils.h"
@@ -83,15 +83,25 @@ static void hmp_info_pci_device(Monitor *mon, const PciDeviceInfo *dev)
monitor_printf(mon, " BAR%" PRId64 ": ", region->value->bar);
if (!strcmp(region->value->type, "io")) {
- monitor_printf(mon, "I/O at 0x%04" PRIx64
- " [0x%04" PRIx64 "].\n",
- addr, addr + size - 1);
+ if (addr != PCI_BAR_UNMAPPED) {
+ monitor_printf(mon, "I/O at 0x%04" PRIx64
+ " [0x%04" PRIx64 "]\n",
+ addr, addr + size - 1);
+ } else {
+ monitor_printf(mon, "I/O (not mapped)\n");
+ }
} else {
- monitor_printf(mon, "%d bit%s memory at 0x%08" PRIx64
- " [0x%08" PRIx64 "].\n",
- region->value->mem_type_64 ? 64 : 32,
- region->value->prefetch ? " prefetchable" : "",
- addr, addr + size - 1);
+ if (addr != PCI_BAR_UNMAPPED) {
+ monitor_printf(mon, "%d bit%s memory at 0x%08" PRIx64
+ " [0x%08" PRIx64 "]\n",
+ region->value->mem_type_64 ? 64 : 32,
+ region->value->prefetch ? " prefetchable" : "",
+ addr, addr + size - 1);
+ } else {
+ monitor_printf(mon, "%d bit%s memory (not mapped)\n",
+ region->value->mem_type_64 ? 64 : 32,
+ region->value->prefetch ? " prefetchable" : "");
+ }
}
}
diff --git a/hw/pci/pci-stub.c b/hw/pci/pci-stub.c
index f050868..3397d0c 100644
--- a/hw/pci/pci-stub.c
+++ b/hw/pci/pci-stub.c
@@ -46,14 +46,12 @@ void hmp_pcie_aer_inject_error(Monitor *mon, const QDict *qdict)
/* kvm-all wants this */
MSIMessage pci_get_msi_message(PCIDevice *dev, int vector)
{
- g_assert(false);
- return (MSIMessage){};
+ g_assert_not_reached();
}
uint16_t pci_requester_id(PCIDevice *dev)
{
- g_assert(false);
- return 0;
+ g_assert_not_reached();
}
/* Required by ahci.c */
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
index 4c7be52..c70b5ce 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
@@ -32,12 +32,13 @@
#include "hw/pci/pci_host.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
+#include "migration/cpr.h"
#include "migration/qemu-file-types.h"
#include "migration/vmstate.h"
#include "net/net.h"
-#include "sysemu/numa.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/numa.h"
+#include "system/runstate.h"
+#include "system/system.h"
#include "hw/loader.h"
#include "qemu/error-report.h"
#include "qemu/range.h"
@@ -46,6 +47,7 @@
#include "hw/pci/msix.h"
#include "hw/hotplug.h"
#include "hw/boards.h"
+#include "hw/nvram/fw_cfg.h"
#include "qapi/error.h"
#include "qemu/cutils.h"
#include "pci-internal.h"
@@ -53,13 +55,6 @@
#include "hw/xen/xen.h"
#include "hw/i386/kvm/xen_evtchn.h"
-//#define DEBUG_PCI
-#ifdef DEBUG_PCI
-# define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__)
-#else
-# define PCI_DPRINTF(format, ...) do { } while (0)
-#endif
-
bool pci_available = true;
static char *pcibus_get_dev_path(DeviceState *dev);
@@ -67,11 +62,24 @@ static char *pcibus_get_fw_dev_path(DeviceState *dev);
static void pcibus_reset_hold(Object *obj, ResetType type);
static bool pcie_has_upstream_port(PCIDevice *dev);
-static Property pci_props[] = {
+static void prop_pci_busnr_get(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ uint8_t busnr = pci_dev_bus_num(PCI_DEVICE(obj));
+
+ visit_type_uint8(v, name, &busnr, errp);
+}
+
+static const PropertyInfo prop_pci_busnr = {
+ .type = "busnr",
+ .get = prop_pci_busnr_get,
+};
+
+static const Property pci_props[] = {
DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1),
DEFINE_PROP_STRING("romfile", PCIDevice, romfile),
DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, UINT32_MAX),
- DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1),
+ DEFINE_PROP_INT32("rombar", PCIDevice, rom_bar, -1),
DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present,
QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false),
DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present,
@@ -85,7 +93,12 @@ static Property pci_props[] = {
QEMU_PCIE_ERR_UNC_MASK_BITNR, true),
DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present,
QEMU_PCIE_ARI_NEXTFN_1_BITNR, false),
- DEFINE_PROP_END_OF_LIST()
+ DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice,
+ max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE),
+ DEFINE_PROP_STRING("sriov-pf", PCIDevice, sriov_pf),
+ DEFINE_PROP_BIT("x-pcie-ext-tag", PCIDevice, cap_present,
+ QEMU_PCIE_EXT_TAG_BITNR, true),
+ { .name = "busnr", .info = &prop_pci_busnr },
};
static const VMStateDescription vmstate_pcibus = {
@@ -116,6 +129,12 @@ static GSequence *pci_acpi_index_list(void)
return used_acpi_index_list;
}
+static void pci_set_master(PCIDevice *d, bool enable)
+{
+ memory_region_set_enabled(&d->bus_master_enable_region, enable);
+ d->is_master = enable; /* cache the status */
+}
+
static void pci_init_bus_master(PCIDevice *pci_dev)
{
AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev);
@@ -123,7 +142,7 @@ static void pci_init_bus_master(PCIDevice *pci_dev)
memory_region_init_alias(&pci_dev->bus_master_enable_region,
OBJECT(pci_dev), "bus master",
dma_as->root, 0, memory_region_size(dma_as->root));
- memory_region_set_enabled(&pci_dev->bus_master_enable_region, false);
+ pci_set_master(pci_dev, false);
memory_region_add_subregion(&pci_dev->bus_master_container_region, 0,
&pci_dev->bus_master_enable_region);
}
@@ -198,11 +217,57 @@ static uint16_t pcibus_numa_node(PCIBus *bus)
return NUMA_NODE_UNASSIGNED;
}
-static void pci_bus_class_init(ObjectClass *klass, void *data)
+bool pci_bus_add_fw_cfg_extra_pci_roots(FWCfgState *fw_cfg,
+ PCIBus *bus,
+ Error **errp)
+{
+ Object *obj;
+
+ if (!bus) {
+ return true;
+ }
+ obj = OBJECT(bus);
+
+ return fw_cfg_add_file_from_generator(fw_cfg, obj->parent,
+ object_get_canonical_path_component(obj),
+ "etc/extra-pci-roots", errp);
+}
+
+static GByteArray *pci_bus_fw_cfg_gen_data(Object *obj, Error **errp)
+{
+ PCIBus *bus = PCI_BUS(obj);
+ GByteArray *byte_array;
+ uint64_t extra_hosts = 0;
+
+ if (!bus) {
+ return NULL;
+ }
+
+ QLIST_FOREACH(bus, &bus->child, sibling) {
+ /* look for expander root buses */
+ if (pci_bus_is_root(bus)) {
+ extra_hosts++;
+ }
+ }
+
+ if (!extra_hosts) {
+ return NULL;
+ }
+ extra_hosts = cpu_to_le64(extra_hosts);
+
+ byte_array = g_byte_array_new();
+ g_byte_array_append(byte_array,
+ (const void *)&extra_hosts, sizeof(extra_hosts));
+
+ return byte_array;
+}
+
+static void pci_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
PCIBusClass *pbc = PCI_BUS_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
+ FWCfgDataGeneratorClass *fwgc = FW_CFG_DATA_GENERATOR_CLASS(klass);
k->print_dev = pcibus_dev_print;
k->get_dev_path = pcibus_get_dev_path;
@@ -214,6 +279,8 @@ static void pci_bus_class_init(ObjectClass *klass, void *data)
pbc->bus_num = pcibus_num;
pbc->numa_node = pcibus_numa_node;
+
+ fwgc->get_data = pci_bus_fw_cfg_gen_data;
}
static const TypeInfo pci_bus_info = {
@@ -222,6 +289,10 @@ static const TypeInfo pci_bus_info = {
.instance_size = sizeof(PCIBus),
.class_size = sizeof(PCIBusClass),
.class_init = pci_bus_class_init,
+ .interfaces = (const InterfaceInfo[]) {
+ { TYPE_FW_CFG_DATA_GENERATOR_INTERFACE },
+ { }
+ }
};
static const TypeInfo cxl_interface_info = {
@@ -239,7 +310,7 @@ static const TypeInfo conventional_pci_interface_info = {
.parent = TYPE_INTERFACE,
};
-static void pcie_bus_class_init(ObjectClass *klass, void *data)
+static void pcie_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
@@ -365,6 +436,84 @@ static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg)
attrs, NULL);
}
+/*
+ * Register and track a PM capability. If wmask is also enabled for the power
+ * state field of the pmcsr register, guest writes may change the device PM
+ * state. BAR access is only enabled while the device is in the D0 state.
+ * Return the capability offset or negative error code.
+ */
+int pci_pm_init(PCIDevice *d, uint8_t offset, Error **errp)
+{
+ int cap = pci_add_capability(d, PCI_CAP_ID_PM, offset, PCI_PM_SIZEOF, errp);
+
+ if (cap < 0) {
+ return cap;
+ }
+
+ d->pm_cap = cap;
+ d->cap_present |= QEMU_PCI_CAP_PM;
+
+ return cap;
+}
+
+static uint8_t pci_pm_state(PCIDevice *d)
+{
+ uint16_t pmcsr;
+
+ if (!(d->cap_present & QEMU_PCI_CAP_PM)) {
+ return 0;
+ }
+
+ pmcsr = pci_get_word(d->config + d->pm_cap + PCI_PM_CTRL);
+
+ return pmcsr & PCI_PM_CTRL_STATE_MASK;
+}
+
+/*
+ * Update the PM capability state based on the new value stored in config
+ * space respective to the old, pre-write state provided. If the new value
+ * is rejected (unsupported or invalid transition) restore the old value.
+ * Return the resulting PM state.
+ */
+static uint8_t pci_pm_update(PCIDevice *d, uint32_t addr, int l, uint8_t old)
+{
+ uint16_t pmc;
+ uint8_t new;
+
+ if (!(d->cap_present & QEMU_PCI_CAP_PM) ||
+ !range_covers_byte(addr, l, d->pm_cap + PCI_PM_CTRL)) {
+ return old;
+ }
+
+ new = pci_pm_state(d);
+ if (new == old) {
+ return old;
+ }
+
+ pmc = pci_get_word(d->config + d->pm_cap + PCI_PM_PMC);
+
+ /*
+ * Transitions to D1 & D2 are only allowed if supported. Devices may
+ * only transition to higher D-states or to D0.
+ */
+ if ((!(pmc & PCI_PM_CAP_D1) && new == 1) ||
+ (!(pmc & PCI_PM_CAP_D2) && new == 2) ||
+ (old && new && new < old)) {
+ pci_word_test_and_clear_mask(d->config + d->pm_cap + PCI_PM_CTRL,
+ PCI_PM_CTRL_STATE_MASK);
+ pci_word_test_and_set_mask(d->config + d->pm_cap + PCI_PM_CTRL,
+ old);
+ trace_pci_pm_bad_transition(d->name, pci_dev_bus_num(d),
+ PCI_SLOT(d->devfn), PCI_FUNC(d->devfn),
+ old, new);
+ return old;
+ }
+
+ trace_pci_pm_transition(d->name, pci_dev_bus_num(d), PCI_SLOT(d->devfn),
+ PCI_FUNC(d->devfn), old, new);
+ return new;
+}
+
static void pci_reset_regions(PCIDevice *dev)
{
int r;
@@ -389,6 +538,10 @@ static void pci_reset_regions(PCIDevice *dev)
static void pci_do_device_reset(PCIDevice *dev)
{
+ if ((dev->cap_present & QEMU_PCI_SKIP_RESET_ON_CPR) && cpr_is_incoming()) {
+ return;
+ }
+
pci_device_deassert_intx(dev);
assert(dev->irq_state == 0);
@@ -404,6 +557,11 @@ static void pci_do_device_reset(PCIDevice *dev)
pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) |
pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE));
dev->config[PCI_CACHE_LINE_SIZE] = 0x0;
+ /* Default PM state is D0 */
+ if (dev->cap_present & QEMU_PCI_CAP_PM) {
+ pci_word_test_and_clear_mask(dev->config + dev->pm_cap + PCI_PM_CTRL,
+ PCI_PM_CTRL_STATE_MASK);
+ }
pci_reset_regions(dev);
pci_update_mappings(dev);
@@ -657,9 +815,8 @@ static int get_pci_config_device(QEMUFile *f, void *pv, size_t size,
pci_bridge_update_mappings(PCI_BRIDGE(s));
}
- memory_region_set_enabled(&s->bus_master_enable_region,
- pci_get_word(s->config + PCI_COMMAND)
- & PCI_COMMAND_MASTER);
+ pci_set_master(s, pci_get_word(s->config + PCI_COMMAND)
+ & PCI_COMMAND_MASTER);
g_free(config);
return 0;
@@ -959,13 +1116,8 @@ static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp)
dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
}
- /*
- * With SR/IOV and ARI, a device at function 0 need not be a multifunction
- * device, as it may just be a VF that ended up with function 0 in
- * the legacy PCI interpretation. Avoid failing in such cases:
- */
- if (pci_is_vf(dev) &&
- dev->exp.sriov_vf.pf->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
+ /* SR/IOV is not handled here. */
+ if (pci_is_vf(dev)) {
return;
}
@@ -998,7 +1150,8 @@ static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp)
}
/* function 0 indicates single function, so function > 0 must be NULL */
for (func = 1; func < PCI_FUNC_MAX; ++func) {
- if (bus->devices[PCI_DEVFN(slot, func)]) {
+ PCIDevice *device = bus->devices[PCI_DEVFN(slot, func)];
+ if (device && !pci_is_vf(device)) {
error_setg(errp, "PCI: %x.0 indicates single function, "
"but %x.%x is already populated.",
slot, slot, func);
@@ -1186,14 +1339,15 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev,
PCI_SLOT(devfn), PCI_FUNC(devfn), name,
bus->devices[devfn]->name, bus->devices[devfn]->qdev.id);
return NULL;
- } /*
- * Populating function 0 triggers a scan from the guest that
- * exposes other non-zero functions. Hence we need to ensure that
- * function 0 wasn't added yet.
- */
- else if (dev->hotplugged &&
- !pci_is_vf(pci_dev) &&
- pci_get_function_0(pci_dev)) {
+ }
+
+ /*
+ * Populating function 0 triggers a scan from the guest that
+ * exposes other non-zero functions. Hence we need to ensure that
+ * function 0 wasn't added yet.
+ */
+ if (dev->hotplugged && !pci_is_vf(pci_dev) &&
+ pci_get_function_0(pci_dev)) {
error_setg(errp, "PCI: slot %d function 0 already occupied by %s,"
" new func %s cannot be exposed to guest.",
PCI_SLOT(pci_get_function_0(pci_dev)->devfn),
@@ -1211,6 +1365,8 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev,
"bus master container", UINT64_MAX);
address_space_init(&pci_dev->bus_master_as,
&pci_dev->bus_master_container_region, pci_dev->name);
+ pci_dev->bus_master_as.max_bounce_buffer_size =
+ pci_dev->max_bounce_buffer_size;
if (phase_check(PHASE_MACHINE_READY)) {
pci_init_bus_master(pci_dev);
@@ -1283,6 +1439,7 @@ static void pci_qdev_unrealize(DeviceState *dev)
pci_unregister_io_regions(pci_dev);
pci_del_option_rom(pci_dev);
+ pcie_sriov_unregister_device(pci_dev);
if (pc->exit) {
pc->exit(pci_dev);
@@ -1314,7 +1471,6 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num,
pcibus_t size = memory_region_size(memory);
uint8_t hdr_type;
- assert(!pci_is_vf(pci_dev)); /* VFs must use pcie_sriov_vf_register_bar */
assert(region_num >= 0);
assert(region_num < PCI_NUM_REGIONS);
assert(is_power_of_2(size));
@@ -1325,7 +1481,7 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num,
assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2);
r = &pci_dev->io_regions[region_num];
- r->addr = PCI_BAR_UNMAPPED;
+ assert(!r->size);
r->size = size;
r->type = type;
r->memory = memory;
@@ -1333,22 +1489,35 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num,
? pci_get_bus(pci_dev)->address_space_io
: pci_get_bus(pci_dev)->address_space_mem;
- wmask = ~(size - 1);
- if (region_num == PCI_ROM_SLOT) {
- /* ROM enable bit is writable */
- wmask |= PCI_ROM_ADDRESS_ENABLE;
- }
-
- addr = pci_bar(pci_dev, region_num);
- pci_set_long(pci_dev->config + addr, type);
+ if (pci_is_vf(pci_dev)) {
+ PCIDevice *pf = pci_dev->exp.sriov_vf.pf;
+ assert(!pf || type == pf->exp.sriov_pf.vf_bar_type[region_num]);
- if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) &&
- r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
- pci_set_quad(pci_dev->wmask + addr, wmask);
- pci_set_quad(pci_dev->cmask + addr, ~0ULL);
+ r->addr = pci_bar_address(pci_dev, region_num, r->type, r->size);
+ if (r->addr != PCI_BAR_UNMAPPED) {
+ memory_region_add_subregion_overlap(r->address_space,
+ r->addr, r->memory, 1);
+ }
} else {
- pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff);
- pci_set_long(pci_dev->cmask + addr, 0xffffffff);
+ r->addr = PCI_BAR_UNMAPPED;
+
+ wmask = ~(size - 1);
+ if (region_num == PCI_ROM_SLOT) {
+ /* ROM enable bit is writable */
+ wmask |= PCI_ROM_ADDRESS_ENABLE;
+ }
+
+ addr = pci_bar(pci_dev, region_num);
+ pci_set_long(pci_dev->config + addr, type);
+
+ if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) &&
+ r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ pci_set_quad(pci_dev->wmask + addr, wmask);
+ pci_set_quad(pci_dev->cmask + addr, ~0ULL);
+ } else {
+ pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff);
+ pci_set_long(pci_dev->cmask + addr, 0xffffffff);
+ }
}
}
@@ -1437,7 +1606,11 @@ static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg,
pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET);
uint16_t vf_stride =
pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE);
- uint32_t vf_num = (d->devfn - (pf->devfn + vf_offset)) / vf_stride;
+ uint32_t vf_num = d->devfn - (pf->devfn + vf_offset);
+
+ if (vf_num) {
+ vf_num /= vf_stride;
+ }
if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
new_addr = pci_get_quad(pf->config + bar);
@@ -1532,7 +1705,7 @@ static void pci_update_mappings(PCIDevice *d)
continue;
new_addr = pci_bar_address(d, i, r->type, r->size);
- if (!d->enabled) {
+ if (!d->enabled || pci_pm_state(d)) {
new_addr = PCI_BAR_UNMAPPED;
}
@@ -1562,7 +1735,7 @@ static void pci_update_mappings(PCIDevice *d)
pci_update_vga(d);
}
-static inline int pci_irq_disabled(PCIDevice *d)
+int pci_irq_disabled(PCIDevice *d)
{
return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE;
}
@@ -1598,6 +1771,7 @@ uint32_t pci_default_read_config(PCIDevice *d,
void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l)
{
+ uint8_t new_pm_state, old_pm_state = pci_pm_state(d);
int i, was_irq_disabled = pci_irq_disabled(d);
uint32_t val = val_in;
@@ -1610,17 +1784,21 @@ void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int
d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask);
d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */
}
+
+ new_pm_state = pci_pm_update(d, addr, l, old_pm_state);
+
if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) ||
ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) ||
ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) ||
- range_covers_byte(addr, l, PCI_COMMAND))
+ range_covers_byte(addr, l, PCI_COMMAND) ||
+ !!new_pm_state != !!old_pm_state) {
pci_update_mappings(d);
+ }
if (ranges_overlap(addr, l, PCI_COMMAND, 2)) {
pci_update_irq_disabled(d, was_irq_disabled);
- memory_region_set_enabled(&d->bus_master_enable_region,
- (pci_get_word(d->config + PCI_COMMAND)
- & PCI_COMMAND_MASTER) && d->enabled);
+ pci_set_master(d, (pci_get_word(d->config + PCI_COMMAND) &
+ PCI_COMMAND_MASTER) && d->enabled);
}
msi_write_config(d, addr, val_in, l);
@@ -2105,6 +2283,11 @@ static void pci_qdev_realize(DeviceState *qdev, Error **errp)
}
}
+ if (!pcie_sriov_register_device(pci_dev, errp)) {
+ pci_qdev_unrealize(DEVICE(pci_dev));
+ return;
+ }
+
/*
* A PCIe Downstream Port that do not have ARI Forwarding enabled must
* associate only Device 0 with the device attached to the bus
@@ -2276,12 +2459,12 @@ static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size)
/* Only a valid rom will be patched. */
rom_magic = pci_get_word(ptr);
if (rom_magic != 0xaa55) {
- PCI_DPRINTF("Bad ROM magic %04x\n", rom_magic);
+ trace_pci_bad_rom_magic(rom_magic, 0xaa55);
return;
}
pcir_offset = pci_get_word(ptr + 0x18);
if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) {
- PCI_DPRINTF("Bad PCIR offset 0x%x or signature\n", pcir_offset);
+ trace_pci_bad_pcir_offset(pcir_offset);
return;
}
@@ -2290,8 +2473,8 @@ static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size)
rom_vendor_id = pci_get_word(ptr + pcir_offset + 4);
rom_device_id = pci_get_word(ptr + pcir_offset + 6);
- PCI_DPRINTF("%s: ROM id %04x%04x / PCI id %04x%04x\n", pdev->romfile,
- vendor_id, device_id, rom_vendor_id, rom_device_id);
+ trace_pci_rom_and_pci_ids(pdev->romfile, vendor_id, device_id,
+ rom_vendor_id, rom_device_id);
checksum = ptr[6];
@@ -2299,7 +2482,7 @@ static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size)
/* Patch vendor id and checksum (at offset 6 for etherboot roms). */
checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8);
checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8);
- PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum);
+ trace_pci_rom_checksum_change(ptr[6], checksum);
ptr[6] = checksum;
pci_set_word(ptr + pcir_offset + 4, vendor_id);
}
@@ -2308,7 +2491,7 @@ static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size)
/* Patch device id and checksum (at offset 6 for etherboot roms). */
checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8);
checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8);
- PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum);
+ trace_pci_rom_checksum_change(ptr[6], checksum);
ptr[6] = checksum;
pci_set_word(ptr + pcir_offset + 6, device_id);
}
@@ -2359,6 +2542,14 @@ static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom,
return;
}
+ if (pci_is_vf(pdev)) {
+ if (pdev->rom_bar > 0) {
+ error_setg(errp, "ROM BAR cannot be enabled for SR-IOV VF");
+ }
+
+ return;
+ }
+
if (load_file || pdev->romsize == UINT32_MAX) {
path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile);
if (path == NULL) {
@@ -2632,7 +2823,7 @@ MemoryRegion *pci_address_space_io(PCIDevice *dev)
return pci_get_bus(dev)->address_space_io;
}
-static void pci_device_class_init(ObjectClass *klass, void *data)
+static void pci_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
@@ -2640,9 +2831,13 @@ static void pci_device_class_init(ObjectClass *klass, void *data)
k->unrealize = pci_qdev_unrealize;
k->bus_type = TYPE_PCI_BUS;
device_class_set_props(k, pci_props);
+ object_class_property_set_description(
+ klass, "x-max-bounce-buffer-size",
+ "Maximum buffer size allocated for bounce buffers used for mapped "
+ "access to indirect DMA memory");
}
-static void pci_device_class_base_init(ObjectClass *klass, void *data)
+static void pci_device_class_base_init(ObjectClass *klass, const void *data)
{
if (!object_class_is_abstract(klass)) {
ObjectClass *conventional =
@@ -2749,6 +2944,23 @@ AddressSpace *pci_device_iommu_address_space(PCIDevice *dev)
return &address_space_memory;
}
+int pci_iommu_init_iotlb_notifier(PCIDevice *dev, IOMMUNotifier *n,
+ IOMMUNotify fn, void *opaque)
+{
+ PCIBus *bus;
+ PCIBus *iommu_bus;
+ int devfn;
+
+ pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
+ if (iommu_bus && iommu_bus->iommu_ops->init_iotlb_notifier) {
+ iommu_bus->iommu_ops->init_iotlb_notifier(bus, iommu_bus->iommu_opaque,
+ devfn, n, fn, opaque);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod,
Error **errp)
{
@@ -2780,6 +2992,170 @@ void pci_device_unset_iommu_device(PCIDevice *dev)
}
}
+int pci_pri_request_page(PCIDevice *dev, uint32_t pasid, bool priv_req,
+ bool exec_req, hwaddr addr, bool lpig,
+ uint16_t prgi, bool is_read, bool is_write)
+{
+ PCIBus *bus;
+ PCIBus *iommu_bus;
+ int devfn;
+
+ if (!dev->is_master ||
+ ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) {
+ return -EPERM;
+ }
+
+ if (!pcie_pri_enabled(dev)) {
+ return -EPERM;
+ }
+
+ pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
+ if (iommu_bus && iommu_bus->iommu_ops->pri_request_page) {
+ return iommu_bus->iommu_ops->pri_request_page(bus,
+ iommu_bus->iommu_opaque,
+ devfn, pasid, priv_req,
+ exec_req, addr, lpig, prgi,
+ is_read, is_write);
+ }
+
+ return -ENODEV;
+}
+
+int pci_pri_register_notifier(PCIDevice *dev, uint32_t pasid,
+ IOMMUPRINotifier *notifier)
+{
+ PCIBus *bus;
+ PCIBus *iommu_bus;
+ int devfn;
+
+ if (!dev->is_master ||
+ ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) {
+ return -EPERM;
+ }
+
+ pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
+ if (iommu_bus && iommu_bus->iommu_ops->pri_register_notifier) {
+ iommu_bus->iommu_ops->pri_register_notifier(bus,
+ iommu_bus->iommu_opaque,
+ devfn, pasid, notifier);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+void pci_pri_unregister_notifier(PCIDevice *dev, uint32_t pasid)
+{
+ PCIBus *bus;
+ PCIBus *iommu_bus;
+ int devfn;
+
+ pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
+ if (iommu_bus && iommu_bus->iommu_ops->pri_unregister_notifier) {
+ iommu_bus->iommu_ops->pri_unregister_notifier(bus,
+ iommu_bus->iommu_opaque,
+ devfn, pasid);
+ }
+}
+
+ssize_t pci_ats_request_translation(PCIDevice *dev, uint32_t pasid,
+ bool priv_req, bool exec_req,
+ hwaddr addr, size_t length,
+ bool no_write, IOMMUTLBEntry *result,
+ size_t result_length,
+ uint32_t *err_count)
+{
+ PCIBus *bus;
+ PCIBus *iommu_bus;
+ int devfn;
+
+ if (!dev->is_master ||
+ ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) {
+ return -EPERM;
+ }
+
+ if (result_length == 0) {
+ return -ENOSPC;
+ }
+
+ if (!pcie_ats_enabled(dev)) {
+ return -EPERM;
+ }
+
+ pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
+ if (iommu_bus && iommu_bus->iommu_ops->ats_request_translation) {
+ return iommu_bus->iommu_ops->ats_request_translation(bus,
+ iommu_bus->iommu_opaque,
+ devfn, pasid, priv_req,
+ exec_req, addr, length,
+ no_write, result,
+ result_length, err_count);
+ }
+
+ return -ENODEV;
+}
+
+int pci_iommu_register_iotlb_notifier(PCIDevice *dev, uint32_t pasid,
+ IOMMUNotifier *n)
+{
+ PCIBus *bus;
+ PCIBus *iommu_bus;
+ int devfn;
+
+ if ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev)) {
+ return -EPERM;
+ }
+
+ pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
+ if (iommu_bus && iommu_bus->iommu_ops->register_iotlb_notifier) {
+ iommu_bus->iommu_ops->register_iotlb_notifier(bus,
+ iommu_bus->iommu_opaque, devfn,
+ pasid, n);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+int pci_iommu_unregister_iotlb_notifier(PCIDevice *dev, uint32_t pasid,
+ IOMMUNotifier *n)
+{
+ PCIBus *bus;
+ PCIBus *iommu_bus;
+ int devfn;
+
+ if ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev)) {
+ return -EPERM;
+ }
+
+ pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
+ if (iommu_bus && iommu_bus->iommu_ops->unregister_iotlb_notifier) {
+ iommu_bus->iommu_ops->unregister_iotlb_notifier(bus,
+ iommu_bus->iommu_opaque,
+ devfn, pasid, n);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+int pci_iommu_get_iotlb_info(PCIDevice *dev, uint8_t *addr_width,
+ uint32_t *min_page_size)
+{
+ PCIBus *bus;
+ PCIBus *iommu_bus;
+ int devfn;
+
+ pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
+ if (iommu_bus && iommu_bus->iommu_ops->get_iotlb_info) {
+ iommu_bus->iommu_ops->get_iotlb_info(iommu_bus->iommu_opaque,
+ addr_width, min_page_size);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque)
{
/*
@@ -2891,6 +3267,21 @@ MSIMessage pci_get_msi_message(PCIDevice *dev, int vector)
return msg;
}
+void pci_set_power(PCIDevice *d, bool state)
+{
+ /*
+ * Don't change the enabled state of VFs when powering on/off the device.
+ *
+ * When powering on, VFs must not be enabled immediately but they must
+ * wait until the guest configures SR-IOV.
+ * When powering off, their corresponding PFs will be reset and disable
+ * VFs.
+ */
+ if (!pci_is_vf(d)) {
+ pci_set_enabled(d, state);
+ }
+}
+
void pci_set_enabled(PCIDevice *d, bool state)
{
if (d->enabled == state) {
@@ -2899,10 +3290,9 @@ void pci_set_enabled(PCIDevice *d, bool state)
d->enabled = state;
pci_update_mappings(d);
- memory_region_set_enabled(&d->bus_master_enable_region,
- (pci_get_word(d->config + PCI_COMMAND)
- & PCI_COMMAND_MASTER) && d->enabled);
- if (d->qdev.realized) {
+ pci_set_master(d, (pci_get_word(d->config + PCI_COMMAND)
+ & PCI_COMMAND_MASTER) && d->enabled);
+ if (qdev_is_realized(&d->qdev)) {
pci_device_reset(d);
}
}
diff --git a/hw/pci/pci_bridge.c b/hw/pci/pci_bridge.c
index 6a4e388..76255c4 100644
--- a/hw/pci/pci_bridge.c
+++ b/hw/pci/pci_bridge.c
@@ -380,9 +380,12 @@ void pci_bridge_initfn(PCIDevice *dev, const char *typename)
sec_bus->map_irq = br->map_irq ? br->map_irq : pci_swizzle_map_irq_fn;
sec_bus->address_space_mem = &br->address_space_mem;
memory_region_init(&br->address_space_mem, OBJECT(br), "pci_bridge_pci", UINT64_MAX);
+ address_space_init(&br->as_mem, &br->address_space_mem,
+ "pci_bridge_pci_mem");
sec_bus->address_space_io = &br->address_space_io;
memory_region_init(&br->address_space_io, OBJECT(br), "pci_bridge_io",
4 * GiB);
+ address_space_init(&br->as_io, &br->address_space_io, "pci_bridge_pci_io");
pci_bridge_region_init(br);
QLIST_INIT(&sec_bus->child);
QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling);
@@ -399,6 +402,8 @@ void pci_bridge_exitfn(PCIDevice *pci_dev)
PCIBridge *s = PCI_BRIDGE(pci_dev);
assert(QLIST_EMPTY(&s->sec_bus.child));
QLIST_REMOVE(&s->sec_bus, sibling);
+ address_space_destroy(&s->as_mem);
+ address_space_destroy(&s->as_io);
pci_bridge_region_del(s, &s->windows);
pci_bridge_region_cleanup(s, &s->windows);
/* object_unparent() is called automatically during device deletion */
@@ -472,13 +477,12 @@ int pci_bridge_qemu_reserve_cap_init(PCIDevice *dev, int cap_offset,
return 0;
}
-static Property pci_bridge_properties[] = {
+static const Property pci_bridge_properties[] = {
DEFINE_PROP_BOOL("x-pci-express-writeable-slt-bug", PCIBridge,
pcie_writeable_slt_bug, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pci_bridge_class_init(ObjectClass *klass, void *data)
+static void pci_bridge_class_init(ObjectClass *klass, const void *data)
{
AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass);
DeviceClass *k = DEVICE_CLASS(klass);
@@ -493,7 +497,7 @@ static const TypeInfo pci_bridge_type_info = {
.instance_size = sizeof(PCIBridge),
.class_init = pci_bridge_class_init,
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_ACPI_DEV_AML_IF },
{ },
},
diff --git a/hw/pci/pci_host.c b/hw/pci/pci_host.c
index 0d82727..7179d99 100644
--- a/hw/pci/pci_host.c
+++ b/hw/pci/pci_host.c
@@ -217,12 +217,6 @@ const MemoryRegionOps pci_host_data_le_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
-const MemoryRegionOps pci_host_data_be_ops = {
- .read = pci_host_data_read,
- .write = pci_host_data_write,
- .endianness = DEVICE_BIG_ENDIAN,
-};
-
static bool pci_host_needed(void *opaque)
{
PCIHostState *s = opaque;
@@ -240,14 +234,13 @@ const VMStateDescription vmstate_pcihost = {
}
};
-static Property pci_host_properties_common[] = {
+static const Property pci_host_properties_common[] = {
DEFINE_PROP_BOOL("x-config-reg-migration-enabled", PCIHostState,
mig_enabled, true),
DEFINE_PROP_BOOL(PCI_HOST_BYPASS_IOMMU, PCIHostState, bypass_iommu, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pci_host_class_init(ObjectClass *klass, void *data)
+static void pci_host_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, pci_host_properties_common);
diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c
index 4b2f080..eaeb688 100644
--- a/hw/pci/pcie.c
+++ b/hw/pci/pcie.c
@@ -86,7 +86,13 @@ pcie_cap_v1_fill(PCIDevice *dev, uint8_t port, uint8_t type, uint8_t version)
* Specification, Revision 1.1., or subsequent PCI Express Base
* Specification revisions.
*/
- pci_set_long(exp_cap + PCI_EXP_DEVCAP, PCI_EXP_DEVCAP_RBER);
+ uint32_t devcap = PCI_EXP_DEVCAP_RBER;
+
+ if (dev->cap_present & QEMU_PCIE_EXT_TAG) {
+ devcap = PCI_EXP_DEVCAP_RBER | PCI_EXP_DEVCAP_EXT_TAG;
+ }
+
+ pci_set_long(exp_cap + PCI_EXP_DEVCAP, devcap);
pci_set_long(exp_cap + PCI_EXP_LNKCAP,
(port << PCI_EXP_LNKCAP_PN_SHIFT) |
@@ -105,46 +111,18 @@ pcie_cap_v1_fill(PCIDevice *dev, uint8_t port, uint8_t type, uint8_t version)
pci_set_word(cmask + PCI_EXP_LNKSTA, 0);
}
-static void pcie_cap_fill_slot_lnk(PCIDevice *dev)
+/* Includes setting the target speed default */
+static void pcie_cap_fill_lnk(uint8_t *exp_cap, PCIExpLinkWidth width,
+ PCIExpLinkSpeed speed)
{
- PCIESlot *s = (PCIESlot *)object_dynamic_cast(OBJECT(dev), TYPE_PCIE_SLOT);
- uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
-
- /* Skip anything that isn't a PCIESlot */
- if (!s) {
- return;
- }
-
/* Clear and fill LNKCAP from what was configured above */
pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP,
PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP,
- QEMU_PCI_EXP_LNKCAP_MLW(s->width) |
- QEMU_PCI_EXP_LNKCAP_MLS(s->speed));
-
- /*
- * Link bandwidth notification is required for all root ports and
- * downstream ports supporting links wider than x1 or multiple link
- * speeds.
- */
- if (s->width > QEMU_PCI_EXP_LNK_X1 ||
- s->speed > QEMU_PCI_EXP_LNK_2_5GT) {
- pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP,
- PCI_EXP_LNKCAP_LBNC);
- }
-
- if (s->speed > QEMU_PCI_EXP_LNK_2_5GT) {
- /*
- * Hot-plug capable downstream ports and downstream ports supporting
- * link speeds greater than 5GT/s must hardwire PCI_EXP_LNKCAP_DLLLARC
- * to 1b. PCI_EXP_LNKCAP_DLLLARC implies PCI_EXP_LNKSTA_DLLLA, which
- * we also hardwire to 1b here. 2.5GT/s hot-plug slots should also
- * technically implement this, but it's not done here for compatibility.
- */
- pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP,
- PCI_EXP_LNKCAP_DLLLARC);
- /* the PCI_EXP_LNKSTA_DLLLA will be set in the hotplug function */
+ QEMU_PCI_EXP_LNKCAP_MLW(width) |
+ QEMU_PCI_EXP_LNKCAP_MLS(speed));
+ if (speed > QEMU_PCI_EXP_LNK_2_5GT) {
/*
* Target Link Speed defaults to the highest link speed supported by
* the component. 2.5GT/s devices are permitted to hardwire to zero.
@@ -152,7 +130,7 @@ static void pcie_cap_fill_slot_lnk(PCIDevice *dev)
pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKCTL2,
PCI_EXP_LNKCTL2_TLS);
pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKCTL2,
- QEMU_PCI_EXP_LNKCAP_MLS(s->speed) &
+ QEMU_PCI_EXP_LNKCAP_MLS(speed) &
PCI_EXP_LNKCTL2_TLS);
}
@@ -161,27 +139,82 @@ static void pcie_cap_fill_slot_lnk(PCIDevice *dev)
* actually a reference to the highest bit supported in this register.
* We assume the device supports all link speeds.
*/
- if (s->speed > QEMU_PCI_EXP_LNK_5GT) {
+ if (speed > QEMU_PCI_EXP_LNK_5GT) {
pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP2, ~0U);
pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2,
PCI_EXP_LNKCAP2_SLS_2_5GB |
PCI_EXP_LNKCAP2_SLS_5_0GB |
PCI_EXP_LNKCAP2_SLS_8_0GB);
- if (s->speed > QEMU_PCI_EXP_LNK_8GT) {
+ if (speed > QEMU_PCI_EXP_LNK_8GT) {
pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2,
PCI_EXP_LNKCAP2_SLS_16_0GB);
}
- if (s->speed > QEMU_PCI_EXP_LNK_16GT) {
+ if (speed > QEMU_PCI_EXP_LNK_16GT) {
pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2,
PCI_EXP_LNKCAP2_SLS_32_0GB);
}
- if (s->speed > QEMU_PCI_EXP_LNK_32GT) {
+ if (speed > QEMU_PCI_EXP_LNK_32GT) {
pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2,
PCI_EXP_LNKCAP2_SLS_64_0GB);
}
}
}
+void pcie_cap_fill_link_ep_usp(PCIDevice *dev, PCIExpLinkWidth width,
+ PCIExpLinkSpeed speed)
+{
+ uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
+
+ /*
+ * For an end point or USP need to set the current status as well
+ * as the capabilities.
+ */
+ pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA,
+ PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW);
+ pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA,
+ QEMU_PCI_EXP_LNKSTA_NLW(width) |
+ QEMU_PCI_EXP_LNKSTA_CLS(speed));
+
+ pcie_cap_fill_lnk(exp_cap, width, speed);
+}
+
+static void pcie_cap_fill_slot_lnk(PCIDevice *dev)
+{
+ PCIESlot *s = (PCIESlot *)object_dynamic_cast(OBJECT(dev), TYPE_PCIE_SLOT);
+ uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
+
+ /* Skip anything that isn't a PCIESlot */
+ if (!s) {
+ return;
+ }
+
+ /*
+ * Link bandwidth notification is required for all root ports and
+ * downstream ports supporting links wider than x1 or multiple link
+ * speeds.
+ */
+ if (s->width > QEMU_PCI_EXP_LNK_X1 ||
+ s->speed > QEMU_PCI_EXP_LNK_2_5GT) {
+ pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP,
+ PCI_EXP_LNKCAP_LBNC);
+ }
+
+ if (s->speed > QEMU_PCI_EXP_LNK_2_5GT) {
+ /*
+ * Hot-plug capable downstream ports and downstream ports supporting
+ * link speeds greater than 5GT/s must hardwire PCI_EXP_LNKCAP_DLLLARC
+ * to 1b. PCI_EXP_LNKCAP_DLLLARC implies PCI_EXP_LNKSTA_DLLLA, which
+ * we also hardwire to 1b here. 2.5GT/s hot-plug slots should also
+ * technically implement this, but it's not done here for compatibility.
+ */
+ pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP,
+ PCI_EXP_LNKCAP_DLLLARC);
+ /* the PCI_EXP_LNKSTA_DLLLA will be set in the hotplug function */
+ }
+
+ pcie_cap_fill_lnk(exp_cap, s->width, s->speed);
+}
+
int pcie_cap_init(PCIDevice *dev, uint8_t offset,
uint8_t type, uint8_t port,
Error **errp)
@@ -1080,18 +1113,22 @@ void pcie_sync_bridge_lnk(PCIDevice *bridge_dev)
if ((lnksta & PCI_EXP_LNKSTA_NLW) > (lnkcap & PCI_EXP_LNKCAP_MLW)) {
lnksta &= ~PCI_EXP_LNKSTA_NLW;
lnksta |= lnkcap & PCI_EXP_LNKCAP_MLW;
- } else if (!(lnksta & PCI_EXP_LNKSTA_NLW)) {
- lnksta |= QEMU_PCI_EXP_LNKSTA_NLW(QEMU_PCI_EXP_LNK_X1);
}
if ((lnksta & PCI_EXP_LNKSTA_CLS) > (lnkcap & PCI_EXP_LNKCAP_SLS)) {
lnksta &= ~PCI_EXP_LNKSTA_CLS;
lnksta |= lnkcap & PCI_EXP_LNKCAP_SLS;
- } else if (!(lnksta & PCI_EXP_LNKSTA_CLS)) {
- lnksta |= QEMU_PCI_EXP_LNKSTA_CLS(QEMU_PCI_EXP_LNK_2_5GT);
}
}
+ if (!(lnksta & PCI_EXP_LNKSTA_NLW)) {
+ lnksta |= QEMU_PCI_EXP_LNKSTA_NLW(QEMU_PCI_EXP_LNK_X1);
+ }
+
+ if (!(lnksta & PCI_EXP_LNKSTA_CLS)) {
+ lnksta |= QEMU_PCI_EXP_LNKSTA_CLS(QEMU_PCI_EXP_LNK_2_5GT);
+ }
+
pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA,
PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW);
pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA, lnksta &
@@ -1177,3 +1214,81 @@ void pcie_acs_reset(PCIDevice *dev)
pci_set_word(dev->config + dev->exp.acs_cap + PCI_ACS_CTRL, 0);
}
}
+
+/* PASID */
+void pcie_pasid_init(PCIDevice *dev, uint16_t offset, uint8_t pasid_width,
+ bool exec_perm, bool priv_mod)
+{
+ static const uint16_t control_reg_rw_mask = 0x07;
+ uint16_t capability_reg;
+
+ assert(pasid_width <= PCI_EXT_CAP_PASID_MAX_WIDTH);
+
+ pcie_add_capability(dev, PCI_EXT_CAP_ID_PASID, PCI_PASID_VER, offset,
+ PCI_EXT_CAP_PASID_SIZEOF);
+
+ capability_reg = ((uint16_t)pasid_width) << PCI_PASID_CAP_WIDTH_SHIFT;
+ capability_reg |= exec_perm ? PCI_PASID_CAP_EXEC : 0;
+ capability_reg |= priv_mod ? PCI_PASID_CAP_PRIV : 0;
+ pci_set_word(dev->config + offset + PCI_PASID_CAP, capability_reg);
+
+ /* Everything is disabled by default */
+ pci_set_word(dev->config + offset + PCI_PASID_CTRL, 0);
+
+ pci_set_word(dev->wmask + offset + PCI_PASID_CTRL, control_reg_rw_mask);
+
+ dev->exp.pasid_cap = offset;
+}
+
+/* PRI */
+void pcie_pri_init(PCIDevice *dev, uint16_t offset, uint32_t outstanding_pr_cap,
+ bool prg_response_pasid_req)
+{
+ static const uint16_t control_reg_rw_mask = 0x3;
+ static const uint16_t status_reg_rw1_mask = 0x3;
+ static const uint32_t pr_alloc_reg_rw_mask = 0xffffffff;
+ uint16_t status_reg;
+
+ status_reg = prg_response_pasid_req ? PCI_PRI_STATUS_PASID : 0;
+ status_reg |= PCI_PRI_STATUS_STOPPED; /* Stopped by default */
+
+ pcie_add_capability(dev, PCI_EXT_CAP_ID_PRI, PCI_PRI_VER, offset,
+ PCI_EXT_CAP_PRI_SIZEOF);
+ /* Disabled by default */
+
+ pci_set_word(dev->config + offset + PCI_PRI_STATUS, status_reg);
+ pci_set_long(dev->config + offset + PCI_PRI_MAX_REQ, outstanding_pr_cap);
+
+ pci_set_word(dev->wmask + offset + PCI_PRI_CTRL, control_reg_rw_mask);
+ pci_set_word(dev->w1cmask + offset + PCI_PRI_STATUS, status_reg_rw1_mask);
+ pci_set_long(dev->wmask + offset + PCI_PRI_ALLOC_REQ, pr_alloc_reg_rw_mask);
+
+ dev->exp.pri_cap = offset;
+}
+
+bool pcie_pri_enabled(const PCIDevice *dev)
+{
+ if (!pci_is_express(dev) || !dev->exp.pri_cap) {
+ return false;
+ }
+ return (pci_get_word(dev->config + dev->exp.pri_cap + PCI_PRI_CTRL) &
+ PCI_PRI_CTRL_ENABLE) != 0;
+}
+
+bool pcie_pasid_enabled(const PCIDevice *dev)
+{
+ if (!pci_is_express(dev) || !dev->exp.pasid_cap) {
+ return false;
+ }
+ return (pci_get_word(dev->config + dev->exp.pasid_cap + PCI_PASID_CTRL) &
+ PCI_PASID_CTRL_ENABLE) != 0;
+}
+
+bool pcie_ats_enabled(const PCIDevice *dev)
+{
+ if (!pci_is_express(dev) || !dev->exp.ats_cap) {
+ return false;
+ }
+ return (pci_get_word(dev->config + dev->exp.ats_cap + PCI_ATS_CTRL) &
+ PCI_ATS_CTRL_ENABLE) != 0;
+}
diff --git a/hw/pci/pcie_port.c b/hw/pci/pcie_port.c
index 20ff2b3..f3841a2 100644
--- a/hw/pci/pcie_port.c
+++ b/hw/pci/pcie_port.c
@@ -92,16 +92,6 @@ static PCIESlot *pcie_chassis_find_slot_with_chassis(struct PCIEChassis *c,
return s;
}
-PCIESlot *pcie_chassis_find_slot(uint8_t chassis_number, uint16_t slot)
-{
- struct PCIEChassis *c;
- c = pcie_chassis_find(chassis_number);
- if (!c) {
- return NULL;
- }
- return pcie_chassis_find_slot_with_chassis(c, slot);
-}
-
int pcie_chassis_add_slot(struct PCIESlot *slot)
{
struct PCIEChassis *c;
@@ -121,15 +111,14 @@ void pcie_chassis_del_slot(PCIESlot *s)
QLIST_REMOVE(s, next);
}
-static Property pcie_port_props[] = {
+static const Property pcie_port_props[] = {
DEFINE_PROP_UINT8("port", PCIEPort, port, 0),
DEFINE_PROP_UINT16("aer_log_max", PCIEPort,
parent_obj.parent_obj.exp.aer_log.log_max,
PCIE_AER_LOG_MAX_DEFAULT),
- DEFINE_PROP_END_OF_LIST()
};
-static void pcie_port_class_init(ObjectClass *oc, void *data)
+static void pcie_port_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -199,7 +188,7 @@ int pcie_count_ds_ports(PCIBus *bus)
return dsp_count;
}
-static bool pcie_slot_is_hotpluggbale_bus(HotplugHandler *plug_handler,
+static bool pcie_slot_is_hotpluggable_bus(HotplugHandler *plug_handler,
BusState *bus)
{
PCIESlot *s = PCIE_SLOT(bus->parent);
@@ -214,16 +203,15 @@ static const TypeInfo pcie_port_type_info = {
.class_init = pcie_port_class_init,
};
-static Property pcie_slot_props[] = {
+static const Property pcie_slot_props[] = {
DEFINE_PROP_UINT8("chassis", PCIESlot, chassis, 0),
DEFINE_PROP_UINT16("slot", PCIESlot, slot, 0),
DEFINE_PROP_BOOL("hotplug", PCIESlot, hotplug, true),
DEFINE_PROP_BOOL("x-do-not-expose-native-hotplug-cap", PCIESlot,
hide_native_hotplug_cap, false),
- DEFINE_PROP_END_OF_LIST()
};
-static void pcie_slot_class_init(ObjectClass *oc, void *data)
+static void pcie_slot_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
@@ -233,7 +221,7 @@ static void pcie_slot_class_init(ObjectClass *oc, void *data)
hc->plug = pcie_cap_slot_plug_cb;
hc->unplug = pcie_cap_slot_unplug_cb;
hc->unplug_request = pcie_cap_slot_unplug_request_cb;
- hc->is_hotpluggable_bus = pcie_slot_is_hotpluggbale_bus;
+ hc->is_hotpluggable_bus = pcie_slot_is_hotpluggable_bus;
}
static const TypeInfo pcie_slot_type_info = {
@@ -242,7 +230,7 @@ static const TypeInfo pcie_slot_type_info = {
.instance_size = sizeof(PCIESlot),
.abstract = true,
.class_init = pcie_slot_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
diff --git a/hw/pci/pcie_sriov.c b/hw/pci/pcie_sriov.c
index 56523ab..3ad1874 100644
--- a/hw/pci/pcie_sriov.c
+++ b/hw/pci/pcie_sriov.c
@@ -15,11 +15,12 @@
#include "hw/pci/pcie.h"
#include "hw/pci/pci_bus.h"
#include "hw/qdev-properties.h"
-#include "qemu/error-report.h"
#include "qemu/range.h"
#include "qapi/error.h"
#include "trace.h"
+static GHashTable *pfs;
+
static void unparent_vfs(PCIDevice *dev, uint16_t total_vfs)
{
for (uint16_t i = 0; i < total_vfs; i++) {
@@ -31,27 +32,61 @@ static void unparent_vfs(PCIDevice *dev, uint16_t total_vfs)
dev->exp.sriov_pf.vf = NULL;
}
-bool pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
- const char *vfname, uint16_t vf_dev_id,
- uint16_t init_vfs, uint16_t total_vfs,
- uint16_t vf_offset, uint16_t vf_stride,
- Error **errp)
+static void register_vfs(PCIDevice *dev)
+{
+ uint16_t num_vfs;
+ uint16_t i;
+ uint16_t sriov_cap = dev->exp.sriov_cap;
+
+ assert(sriov_cap > 0);
+ num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF);
+
+ trace_sriov_register_vfs(dev->name, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn), num_vfs);
+ for (i = 0; i < num_vfs; i++) {
+ pci_set_enabled(dev->exp.sriov_pf.vf[i], true);
+ }
+
+ pci_set_word(dev->wmask + sriov_cap + PCI_SRIOV_NUM_VF, 0);
+}
+
+static void unregister_vfs(PCIDevice *dev)
+{
+ uint8_t *cfg = dev->config + dev->exp.sriov_cap;
+ uint16_t i;
+
+ trace_sriov_unregister_vfs(dev->name, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+ for (i = 0; i < pci_get_word(cfg + PCI_SRIOV_TOTAL_VF); i++) {
+ pci_set_enabled(dev->exp.sriov_pf.vf[i], false);
+ }
+
+ pci_set_word(dev->wmask + dev->exp.sriov_cap + PCI_SRIOV_NUM_VF, 0xffff);
+}
+
+static bool pcie_sriov_pf_init_common(PCIDevice *dev, uint16_t offset,
+ uint16_t vf_dev_id, uint16_t init_vfs,
+ uint16_t total_vfs, uint16_t vf_offset,
+ uint16_t vf_stride, Error **errp)
{
- BusState *bus = qdev_get_parent_bus(&dev->qdev);
int32_t devfn = dev->devfn + vf_offset;
uint8_t *cfg = dev->config + offset;
uint8_t *wmask;
- if (total_vfs) {
- uint16_t ari_cap = pcie_find_capability(dev, PCI_EXT_CAP_ID_ARI);
- uint16_t first_vf_devfn = dev->devfn + vf_offset;
- uint16_t last_vf_devfn = first_vf_devfn + vf_stride * (total_vfs - 1);
+ if (!pci_is_express(dev)) {
+ error_setg(errp, "PCI Express is required for SR-IOV PF");
+ return false;
+ }
- if ((!ari_cap && PCI_SLOT(dev->devfn) != PCI_SLOT(last_vf_devfn)) ||
- last_vf_devfn >= PCI_DEVFN_MAX) {
- error_setg(errp, "VF function number overflows");
- return false;
- }
+ if (pci_is_vf(dev)) {
+ error_setg(errp, "a device cannot be a SR-IOV PF and a VF at the same time");
+ return false;
+ }
+
+ if (total_vfs &&
+ (uint32_t)devfn + (uint32_t)(total_vfs - 1) * vf_stride >= PCI_DEVFN_MAX) {
+ error_setg(errp, "VF addr overflows");
+ return false;
}
pcie_add_capability(dev, PCI_EXT_CAP_ID_SRIOV, 1,
@@ -90,6 +125,28 @@ bool pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
qdev_prop_set_bit(&dev->qdev, "multifunction", true);
+ return true;
+}
+
+bool pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
+ const char *vfname, uint16_t vf_dev_id,
+ uint16_t init_vfs, uint16_t total_vfs,
+ uint16_t vf_offset, uint16_t vf_stride,
+ Error **errp)
+{
+ BusState *bus = qdev_get_parent_bus(&dev->qdev);
+ int32_t devfn = dev->devfn + vf_offset;
+
+ if (pfs && g_hash_table_contains(pfs, dev->qdev.id)) {
+ error_setg(errp, "attaching user-created SR-IOV VF unsupported");
+ return false;
+ }
+
+ if (!pcie_sriov_pf_init_common(dev, offset, vf_dev_id, init_vfs,
+ total_vfs, vf_offset, vf_stride, errp)) {
+ return false;
+ }
+
dev->exp.sriov_pf.vf = g_new(PCIDevice *, total_vfs);
for (uint16_t i = 0; i < total_vfs; i++) {
@@ -119,7 +176,22 @@ void pcie_sriov_pf_exit(PCIDevice *dev)
{
uint8_t *cfg = dev->config + dev->exp.sriov_cap;
- unparent_vfs(dev, pci_get_word(cfg + PCI_SRIOV_TOTAL_VF));
+ if (dev->exp.sriov_pf.vf_user_created) {
+ uint16_t ven_id = pci_get_word(dev->config + PCI_VENDOR_ID);
+ uint16_t total_vfs = pci_get_word(dev->config + PCI_SRIOV_TOTAL_VF);
+ uint16_t vf_dev_id = pci_get_word(dev->config + PCI_SRIOV_VF_DID);
+
+ unregister_vfs(dev);
+
+ for (uint16_t i = 0; i < total_vfs; i++) {
+ dev->exp.sriov_pf.vf[i]->exp.sriov_vf.pf = NULL;
+
+ pci_config_set_vendor_id(dev->exp.sriov_pf.vf[i]->config, ven_id);
+ pci_config_set_device_id(dev->exp.sriov_pf.vf[i]->config, vf_dev_id);
+ }
+ } else {
+ unparent_vfs(dev, pci_get_word(cfg + PCI_SRIOV_TOTAL_VF));
+ }
}
void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num,
@@ -152,74 +224,178 @@ void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num,
void pcie_sriov_vf_register_bar(PCIDevice *dev, int region_num,
MemoryRegion *memory)
{
- PCIIORegion *r;
- PCIBus *bus = pci_get_bus(dev);
uint8_t type;
- pcibus_t size = memory_region_size(memory);
- assert(pci_is_vf(dev)); /* PFs must use pci_register_bar */
- assert(region_num >= 0);
- assert(region_num < PCI_NUM_REGIONS);
+ assert(dev->exp.sriov_vf.pf);
type = dev->exp.sriov_vf.pf->exp.sriov_pf.vf_bar_type[region_num];
- if (!is_power_of_2(size)) {
- error_report("%s: PCI region size must be a power"
- " of two - type=0x%x, size=0x%"FMT_PCIBUS,
- __func__, type, size);
- exit(1);
- }
-
- r = &dev->io_regions[region_num];
- r->memory = memory;
- r->address_space =
- type & PCI_BASE_ADDRESS_SPACE_IO
- ? bus->address_space_io
- : bus->address_space_mem;
- r->size = size;
- r->type = type;
-
- r->addr = pci_bar_address(dev, region_num, r->type, r->size);
- if (r->addr != PCI_BAR_UNMAPPED) {
- memory_region_add_subregion_overlap(r->address_space,
- r->addr, r->memory, 1);
- }
+ return pci_register_bar(dev, region_num, type, memory);
}
-static void clear_ctrl_vfe(PCIDevice *dev)
+static gint compare_vf_devfns(gconstpointer a, gconstpointer b)
{
- uint8_t *ctrl = dev->config + dev->exp.sriov_cap + PCI_SRIOV_CTRL;
- pci_set_word(ctrl, pci_get_word(ctrl) & ~PCI_SRIOV_CTRL_VFE);
+ return (*(PCIDevice **)a)->devfn - (*(PCIDevice **)b)->devfn;
}
-static void register_vfs(PCIDevice *dev)
+int16_t pcie_sriov_pf_init_from_user_created_vfs(PCIDevice *dev,
+ uint16_t offset,
+ Error **errp)
{
- uint16_t num_vfs;
+ GPtrArray *pf;
+ PCIDevice **vfs;
+ BusState *bus = qdev_get_parent_bus(DEVICE(dev));
+ uint16_t ven_id = pci_get_word(dev->config + PCI_VENDOR_ID);
+ uint16_t size = PCI_EXT_CAP_SRIOV_SIZEOF;
+ uint16_t vf_dev_id;
+ uint16_t vf_offset;
+ uint16_t vf_stride;
uint16_t i;
- uint16_t sriov_cap = dev->exp.sriov_cap;
- assert(sriov_cap > 0);
- num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF);
- if (num_vfs > pci_get_word(dev->config + sriov_cap + PCI_SRIOV_TOTAL_VF)) {
- clear_ctrl_vfe(dev);
- return;
+ if (!pfs || !dev->qdev.id) {
+ return 0;
}
- trace_sriov_register_vfs(dev->name, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn), num_vfs);
- for (i = 0; i < num_vfs; i++) {
- pci_set_enabled(dev->exp.sriov_pf.vf[i], true);
+ pf = g_hash_table_lookup(pfs, dev->qdev.id);
+ if (!pf) {
+ return 0;
+ }
+
+ if (pf->len > UINT16_MAX) {
+ error_setg(errp, "too many VFs");
+ return -1;
}
+
+ g_ptr_array_sort(pf, compare_vf_devfns);
+ vfs = (void *)pf->pdata;
+
+ if (vfs[0]->devfn <= dev->devfn) {
+ error_setg(errp, "a VF function number is less than the PF function number");
+ return -1;
+ }
+
+ vf_dev_id = pci_get_word(vfs[0]->config + PCI_DEVICE_ID);
+ vf_offset = vfs[0]->devfn - dev->devfn;
+ vf_stride = pf->len < 2 ? 0 : vfs[1]->devfn - vfs[0]->devfn;
+
+ for (i = 0; i < pf->len; i++) {
+ if (bus != qdev_get_parent_bus(&vfs[i]->qdev)) {
+ error_setg(errp, "SR-IOV VF parent bus mismatches with PF");
+ return -1;
+ }
+
+ if (ven_id != pci_get_word(vfs[i]->config + PCI_VENDOR_ID)) {
+ error_setg(errp, "SR-IOV VF vendor ID mismatches with PF");
+ return -1;
+ }
+
+ if (vf_dev_id != pci_get_word(vfs[i]->config + PCI_DEVICE_ID)) {
+ error_setg(errp, "inconsistent SR-IOV VF device IDs");
+ return -1;
+ }
+
+ for (size_t j = 0; j < PCI_NUM_REGIONS; j++) {
+ if (vfs[i]->io_regions[j].size != vfs[0]->io_regions[j].size ||
+ vfs[i]->io_regions[j].type != vfs[0]->io_regions[j].type) {
+ error_setg(errp, "inconsistent SR-IOV BARs");
+ return -1;
+ }
+ }
+
+ if (vfs[i]->devfn - vfs[0]->devfn != vf_stride * i) {
+ error_setg(errp, "inconsistent SR-IOV stride");
+ return -1;
+ }
+ }
+
+ if (!pcie_sriov_pf_init_common(dev, offset, vf_dev_id, pf->len,
+ pf->len, vf_offset, vf_stride, errp)) {
+ return -1;
+ }
+
+ if (!pcie_find_capability(dev, PCI_EXT_CAP_ID_ARI)) {
+ pcie_ari_init(dev, offset + size);
+ size += PCI_ARI_SIZEOF;
+ }
+
+ for (i = 0; i < pf->len; i++) {
+ vfs[i]->exp.sriov_vf.pf = dev;
+ vfs[i]->exp.sriov_vf.vf_number = i;
+
+ /* set vid/did according to sr/iov spec - they are not used */
+ pci_config_set_vendor_id(vfs[i]->config, 0xffff);
+ pci_config_set_device_id(vfs[i]->config, 0xffff);
+ }
+
+ dev->exp.sriov_pf.vf = vfs;
+ dev->exp.sriov_pf.vf_user_created = true;
+
+ for (i = 0; i < PCI_NUM_REGIONS; i++) {
+ PCIIORegion *region = &vfs[0]->io_regions[i];
+
+ if (region->size) {
+ pcie_sriov_pf_init_vf_bar(dev, i, region->type, region->size);
+ }
+ }
+
+ return size;
}
-static void unregister_vfs(PCIDevice *dev)
+bool pcie_sriov_register_device(PCIDevice *dev, Error **errp)
{
- uint16_t i;
- uint8_t *cfg = dev->config + dev->exp.sriov_cap;
+ if (!dev->exp.sriov_pf.vf && dev->qdev.id &&
+ pfs && g_hash_table_contains(pfs, dev->qdev.id)) {
+ error_setg(errp, "attaching user-created SR-IOV VF unsupported");
+ return false;
+ }
- trace_sriov_unregister_vfs(dev->name, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn));
- for (i = 0; i < pci_get_word(cfg + PCI_SRIOV_TOTAL_VF); i++) {
- pci_set_enabled(dev->exp.sriov_pf.vf[i], false);
+ if (dev->sriov_pf) {
+ PCIDevice *pci_pf;
+ GPtrArray *pf;
+
+ if (!PCI_DEVICE_GET_CLASS(dev)->sriov_vf_user_creatable) {
+ error_setg(errp, "user cannot create SR-IOV VF with this device type");
+ return false;
+ }
+
+ if (!pci_is_express(dev)) {
+ error_setg(errp, "PCI Express is required for SR-IOV VF");
+ return false;
+ }
+
+ if (!pci_qdev_find_device(dev->sriov_pf, &pci_pf)) {
+ error_setg(errp, "PCI device specified as SR-IOV PF already exists");
+ return false;
+ }
+
+ if (!pfs) {
+ pfs = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL);
+ }
+
+ pf = g_hash_table_lookup(pfs, dev->sriov_pf);
+ if (!pf) {
+ pf = g_ptr_array_new();
+ g_hash_table_insert(pfs, g_strdup(dev->sriov_pf), pf);
+ }
+
+ g_ptr_array_add(pf, dev);
+ }
+
+ return true;
+}
+
+void pcie_sriov_unregister_device(PCIDevice *dev)
+{
+ if (dev->sriov_pf && pfs) {
+ GPtrArray *pf = g_hash_table_lookup(pfs, dev->sriov_pf);
+
+ if (pf) {
+ g_ptr_array_remove_fast(pf, dev);
+
+ if (!pf->len) {
+ g_hash_table_remove(pfs, dev->sriov_pf);
+ g_ptr_array_free(pf, FALSE);
+ }
+ }
}
}
@@ -247,8 +423,16 @@ void pcie_sriov_config_write(PCIDevice *dev, uint32_t address,
unregister_vfs(dev);
}
} else if (range_covers_byte(off, len, PCI_SRIOV_NUM_VF)) {
- clear_ctrl_vfe(dev);
- unregister_vfs(dev);
+ uint8_t *cfg = dev->config + sriov_cap;
+ uint8_t *wmask = dev->wmask + sriov_cap;
+ uint16_t num_vfs = pci_get_word(cfg + PCI_SRIOV_NUM_VF);
+ uint16_t wmask_val = PCI_SRIOV_CTRL_MSE | PCI_SRIOV_CTRL_ARI;
+
+ if (num_vfs <= pci_get_word(cfg + PCI_SRIOV_TOTAL_VF)) {
+ wmask_val |= PCI_SRIOV_CTRL_VFE;
+ }
+
+ pci_set_word(wmask + PCI_SRIOV_CTRL, wmask_val);
}
}
@@ -272,6 +456,8 @@ void pcie_sriov_pf_reset(PCIDevice *dev)
unregister_vfs(dev);
pci_set_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF, 0);
+ pci_set_word(dev->wmask + sriov_cap + PCI_SRIOV_CTRL,
+ PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE | PCI_SRIOV_CTRL_ARI);
/*
* Default is to use 4K pages, software can modify it
@@ -306,7 +492,7 @@ void pcie_sriov_pf_add_sup_pgsize(PCIDevice *dev, uint16_t opt_sup_pgsize)
uint16_t pcie_sriov_vf_number(PCIDevice *dev)
{
- assert(pci_is_vf(dev));
+ assert(dev->exp.sriov_vf.pf);
return dev->exp.sriov_vf.vf_number;
}
diff --git a/hw/pci/trace-events b/hw/pci/trace-events
index e98f575..02c80d3 100644
--- a/hw/pci/trace-events
+++ b/hw/pci/trace-events
@@ -1,9 +1,15 @@
# See docs/devel/tracing.rst for syntax documentation.
# pci.c
+pci_pm_bad_transition(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, uint8_t old, uint8_t new) "%s %02x:%02x.%x REJECTED PM transition D%d->D%d"
+pci_pm_transition(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, uint8_t old, uint8_t new) "%s %02x:%02x.%x PM transition D%d->D%d"
pci_update_mappings_del(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64
pci_update_mappings_add(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64
pci_route_irq(int dev_irq, const char *dev_path, int parent_irq, const char *parent_path) "IRQ %d @%s -> IRQ %d @%s"
+pci_bad_rom_magic(uint16_t bad_rom_magic, uint16_t good_rom_magic) "Bad ROM magic number: %04"PRIX16". Should be: %04"PRIX16
+pci_bad_pcir_offset(uint16_t pcir_offset) "Bad PCIR offset 0x%"PRIx16" or signature"
+pci_rom_and_pci_ids(char *romfile, uint16_t vendor_id, uint16_t device_id, uint16_t rom_vendor_id, uint16_t rom_device_id) "%s: ROM ID %04"PRIx16":%04"PRIx16" | PCI ID %04"PRIx16":%04"PRIx16
+pci_rom_checksum_change(uint8_t old_checksum, uint8_t new_checksum) "ROM checksum changed from %02"PRIx8" to %02"PRIx8
# pci_host.c
pci_cfg_read(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, unsigned offs, unsigned val) "%s %02x:%02x.%x @0x%x -> 0x%x"
diff --git a/hw/pcmcia/Kconfig b/hw/pcmcia/Kconfig
deleted file mode 100644
index 41f2df9..0000000
--- a/hw/pcmcia/Kconfig
+++ /dev/null
@@ -1,2 +0,0 @@
-config PCMCIA
- bool
diff --git a/hw/pcmcia/meson.build b/hw/pcmcia/meson.build
deleted file mode 100644
index 04e29c1..0000000
--- a/hw/pcmcia/meson.build
+++ /dev/null
@@ -1,2 +0,0 @@
-system_ss.add(when: 'CONFIG_PCMCIA', if_true: files('pcmcia.c'))
-system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx.c'))
diff --git a/hw/pcmcia/pcmcia.c b/hw/pcmcia/pcmcia.c
deleted file mode 100644
index 03d13e7..0000000
--- a/hw/pcmcia/pcmcia.c
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * PCMCIA emulation
- *
- * Copyright 2013 SUSE LINUX Products GmbH
- */
-
-#include "qemu/osdep.h"
-#include "qemu/module.h"
-#include "hw/pcmcia.h"
-
-static const TypeInfo pcmcia_card_type_info = {
- .name = TYPE_PCMCIA_CARD,
- .parent = TYPE_DEVICE,
- .instance_size = sizeof(PCMCIACardState),
- .abstract = true,
- .class_size = sizeof(PCMCIACardClass),
-};
-
-static void pcmcia_register_types(void)
-{
- type_register_static(&pcmcia_card_type_info);
-}
-
-type_init(pcmcia_register_types)
diff --git a/hw/pcmcia/pxa2xx.c b/hw/pcmcia/pxa2xx.c
deleted file mode 100644
index e3111fd..0000000
--- a/hw/pcmcia/pxa2xx.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Intel XScale PXA255/270 PC Card and CompactFlash Interface.
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GPLv2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu/osdep.h"
-#include "hw/irq.h"
-#include "hw/sysbus.h"
-#include "qapi/error.h"
-#include "qemu/module.h"
-#include "hw/pcmcia.h"
-#include "hw/arm/pxa.h"
-
-struct PXA2xxPCMCIAState {
- SysBusDevice parent_obj;
-
- PCMCIASocket slot;
- MemoryRegion container_mem;
- MemoryRegion common_iomem;
- MemoryRegion attr_iomem;
- MemoryRegion iomem;
-
- qemu_irq irq;
- qemu_irq cd_irq;
-
- PCMCIACardState *card;
-};
-
-static uint64_t pxa2xx_pcmcia_common_read(void *opaque,
- hwaddr offset, unsigned size)
-{
- PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
- PCMCIACardClass *pcc;
-
- if (s->slot.attached) {
- pcc = PCMCIA_CARD_GET_CLASS(s->card);
- return pcc->common_read(s->card, offset);
- }
-
- return 0;
-}
-
-static void pxa2xx_pcmcia_common_write(void *opaque, hwaddr offset,
- uint64_t value, unsigned size)
-{
- PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
- PCMCIACardClass *pcc;
-
- if (s->slot.attached) {
- pcc = PCMCIA_CARD_GET_CLASS(s->card);
- pcc->common_write(s->card, offset, value);
- }
-}
-
-static uint64_t pxa2xx_pcmcia_attr_read(void *opaque,
- hwaddr offset, unsigned size)
-{
- PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
- PCMCIACardClass *pcc;
-
- if (s->slot.attached) {
- pcc = PCMCIA_CARD_GET_CLASS(s->card);
- return pcc->attr_read(s->card, offset);
- }
-
- return 0;
-}
-
-static void pxa2xx_pcmcia_attr_write(void *opaque, hwaddr offset,
- uint64_t value, unsigned size)
-{
- PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
- PCMCIACardClass *pcc;
-
- if (s->slot.attached) {
- pcc = PCMCIA_CARD_GET_CLASS(s->card);
- pcc->attr_write(s->card, offset, value);
- }
-}
-
-static uint64_t pxa2xx_pcmcia_io_read(void *opaque,
- hwaddr offset, unsigned size)
-{
- PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
- PCMCIACardClass *pcc;
-
- if (s->slot.attached) {
- pcc = PCMCIA_CARD_GET_CLASS(s->card);
- return pcc->io_read(s->card, offset);
- }
-
- return 0;
-}
-
-static void pxa2xx_pcmcia_io_write(void *opaque, hwaddr offset,
- uint64_t value, unsigned size)
-{
- PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
- PCMCIACardClass *pcc;
-
- if (s->slot.attached) {
- pcc = PCMCIA_CARD_GET_CLASS(s->card);
- pcc->io_write(s->card, offset, value);
- }
-}
-
-static const MemoryRegionOps pxa2xx_pcmcia_common_ops = {
- .read = pxa2xx_pcmcia_common_read,
- .write = pxa2xx_pcmcia_common_write,
- .endianness = DEVICE_NATIVE_ENDIAN
-};
-
-static const MemoryRegionOps pxa2xx_pcmcia_attr_ops = {
- .read = pxa2xx_pcmcia_attr_read,
- .write = pxa2xx_pcmcia_attr_write,
- .endianness = DEVICE_NATIVE_ENDIAN
-};
-
-static const MemoryRegionOps pxa2xx_pcmcia_io_ops = {
- .read = pxa2xx_pcmcia_io_read,
- .write = pxa2xx_pcmcia_io_write,
- .endianness = DEVICE_NATIVE_ENDIAN
-};
-
-static void pxa2xx_pcmcia_set_irq(void *opaque, int line, int level)
-{
- PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
- if (!s->irq)
- return;
-
- qemu_set_irq(s->irq, level);
-}
-
-static void pxa2xx_pcmcia_initfn(Object *obj)
-{
- SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- PXA2xxPCMCIAState *s = PXA2XX_PCMCIA(obj);
-
- memory_region_init(&s->container_mem, obj, "container", 0x10000000);
- sysbus_init_mmio(sbd, &s->container_mem);
-
- /* Socket I/O Memory Space */
- memory_region_init_io(&s->iomem, obj, &pxa2xx_pcmcia_io_ops, s,
- "pxa2xx-pcmcia-io", 0x04000000);
- memory_region_add_subregion(&s->container_mem, 0x00000000,
- &s->iomem);
-
- /* Then next 64 MB is reserved */
-
- /* Socket Attribute Memory Space */
- memory_region_init_io(&s->attr_iomem, obj, &pxa2xx_pcmcia_attr_ops, s,
- "pxa2xx-pcmcia-attribute", 0x04000000);
- memory_region_add_subregion(&s->container_mem, 0x08000000,
- &s->attr_iomem);
-
- /* Socket Common Memory Space */
- memory_region_init_io(&s->common_iomem, obj, &pxa2xx_pcmcia_common_ops, s,
- "pxa2xx-pcmcia-common", 0x04000000);
- memory_region_add_subregion(&s->container_mem, 0x0c000000,
- &s->common_iomem);
-
- s->slot.irq = qemu_allocate_irq(pxa2xx_pcmcia_set_irq, s, 0);
-
- object_property_add_link(obj, "card", TYPE_PCMCIA_CARD,
- (Object **)&s->card,
- NULL, /* read-only property */
- 0);
-}
-
-/* Insert a new card into a slot */
-int pxa2xx_pcmcia_attach(void *opaque, PCMCIACardState *card)
-{
- PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
- PCMCIACardClass *pcc;
-
- if (s->slot.attached) {
- return -EEXIST;
- }
-
- if (s->cd_irq) {
- qemu_irq_raise(s->cd_irq);
- }
-
- s->card = card;
- pcc = PCMCIA_CARD_GET_CLASS(s->card);
-
- s->slot.attached = true;
- s->card->slot = &s->slot;
- pcc->attach(s->card);
-
- return 0;
-}
-
-/* Eject card from the slot */
-int pxa2xx_pcmcia_detach(void *opaque)
-{
- PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
- PCMCIACardClass *pcc;
-
- if (!s->slot.attached) {
- return -ENOENT;
- }
-
- pcc = PCMCIA_CARD_GET_CLASS(s->card);
- pcc->detach(s->card);
- s->card->slot = NULL;
- s->card = NULL;
-
- s->slot.attached = false;
-
- if (s->irq) {
- qemu_irq_lower(s->irq);
- }
- if (s->cd_irq) {
- qemu_irq_lower(s->cd_irq);
- }
-
- return 0;
-}
-
-/* Who to notify on card events */
-void pxa2xx_pcmcia_set_irq_cb(void *opaque, qemu_irq irq, qemu_irq cd_irq)
-{
- PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
- s->irq = irq;
- s->cd_irq = cd_irq;
-}
-
-static const TypeInfo pxa2xx_pcmcia_type_info = {
- .name = TYPE_PXA2XX_PCMCIA,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(PXA2xxPCMCIAState),
- .instance_init = pxa2xx_pcmcia_initfn,
-};
-
-static void pxa2xx_pcmcia_register_types(void)
-{
- type_register_static(&pxa2xx_pcmcia_type_info);
-}
-
-type_init(pxa2xx_pcmcia_register_types)
diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig
index 347212f..ced6bbc 100644
--- a/hw/ppc/Kconfig
+++ b/hw/ppc/Kconfig
@@ -39,15 +39,10 @@ config POWERNV
select PCI_POWERNV
select PCA9552
select PCA9554
-
-config PPC405
- bool
- default y
- depends on PPC
- select M48T59
- select PFLASH_CFI02
- select PPC4XX
- select SERIAL
+ select SERIAL_ISA
+ select SSI
+ select SSI_M25P80
+ select PNV_SPI
config PPC440
bool
@@ -59,7 +54,7 @@ config PPC440
select PCI_EXPRESS
select PPC440_PCIX
select PPC4XX
- select SERIAL
+ select SERIAL_MM
select FDT_PPC
config PPC4XX
@@ -76,7 +71,7 @@ config SAM460EX
select IDE_SII3112
select M41T80
select PPC440
- select SERIAL
+ select SERIAL_MM
select SM501
select SMBUS_EEPROM
select USB_EHCI_SYSBUS
@@ -159,7 +154,7 @@ config E500
select PLATFORM_BUS
select PPCE500_PCI
select SDHCI
- select SERIAL
+ select SERIAL_MM
select MPC_I2C
select FDT_PPC
select DS1338
@@ -183,7 +178,7 @@ config VIRTEX
depends on PPC && FDT
select PPC4XX
select PFLASH_CFI01
- select SERIAL
+ select SERIAL_MM
select XILINX
select XILINX_ETHLITE
select FDT_PPC
diff --git a/hw/ppc/amigaone.c b/hw/ppc/amigaone.c
index 900f93c..12279f4 100644
--- a/hw/ppc/amigaone.c
+++ b/hw/ppc/amigaone.c
@@ -21,12 +21,26 @@
#include "hw/ide/pci.h"
#include "hw/i2c/smbus_eeprom.h"
#include "hw/ppc/ppc.h"
-#include "sysemu/qtest.h"
-#include "sysemu/reset.h"
+#include "system/block-backend.h"
+#include "system/qtest.h"
+#include "system/reset.h"
#include "kvm_ppc.h"
+#include "elf.h"
+
+#include <zlib.h> /* for crc32 */
#define BUS_FREQ_HZ 100000000
+#define INITRD_MIN_ADDR 0x600000
+#define INIT_RAM_ADDR 0x40000000
+
+#define PCI_HIGH_ADDR 0x80000000
+#define PCI_HIGH_SIZE 0x7d000000
+#define PCI_LOW_ADDR 0xfd000000
+#define PCI_LOW_SIZE 0xe0000
+
+#define ARTICIA_ADDR 0xfe000000
+
/*
* Firmware binary available at
* https://www.hyperion-entertainment.com/index.php/downloads?view=files&parent=28
@@ -41,20 +55,204 @@
/* AmigaOS calls this routine from ROM, use this if no firmware loaded */
static const char dummy_fw[] = {
- 0x38, 0x00, 0x00, 0x08, /* li r0,8 */
- 0x7c, 0x09, 0x03, 0xa6, /* mtctr r0 */
- 0x54, 0x63, 0xf8, 0x7e, /* srwi r3,r3,1 */
- 0x42, 0x00, 0xff, 0xfc, /* bdnz 0x8 */
+ 0x54, 0x63, 0xc2, 0x3e, /* srwi r3,r3,8 */
0x7c, 0x63, 0x18, 0xf8, /* not r3,r3 */
0x4e, 0x80, 0x00, 0x20, /* blr */
};
+#define NVRAM_ADDR 0xfd0e0000
+#define NVRAM_SIZE (4 * KiB)
+
+static const char default_env[] =
+ "baudrate=115200\0"
+ "stdout=vga\0"
+ "stdin=ps2kbd\0"
+ "bootcmd=boota; menu; run menuboot_cmd\0"
+ "boot1=ide\0"
+ "boot2=cdrom\0"
+ "boota_timeout=3\0"
+ "ide_doreset=on\0"
+ "pci_irqa=9\0"
+ "pci_irqa_select=level\0"
+ "pci_irqb=10\0"
+ "pci_irqb_select=level\0"
+ "pci_irqc=11\0"
+ "pci_irqc_select=level\0"
+ "pci_irqd=7\0"
+ "pci_irqd_select=level\0"
+ "a1ide_irq=1111\0"
+ "a1ide_xfer=FFFF\0";
+#define CRC32_DEFAULT_ENV 0xb5548481
+#define CRC32_ALL_ZEROS 0x603b0489
+
+#define TYPE_A1_NVRAM "a1-nvram"
+OBJECT_DECLARE_SIMPLE_TYPE(A1NVRAMState, A1_NVRAM)
+
+struct A1NVRAMState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mr;
+ BlockBackend *blk;
+};
+
+static uint64_t nvram_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ /* read callback not used because of romd mode */
+ g_assert_not_reached();
+}
+
+static void nvram_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned int size)
+{
+ A1NVRAMState *s = opaque;
+ uint8_t *p = memory_region_get_ram_ptr(&s->mr);
+
+ p[addr] = val;
+ if (s->blk && blk_pwrite(s->blk, addr, 1, &val, 0) < 0) {
+ error_report("%s: could not write %s", __func__, blk_name(s->blk));
+ }
+}
+
+static const MemoryRegionOps nvram_ops = {
+ .read = nvram_read,
+ .write = nvram_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+};
+
+static void nvram_realize(DeviceState *dev, Error **errp)
+{
+ A1NVRAMState *s = A1_NVRAM(dev);
+ void *p;
+ uint32_t crc, *c;
+
+ memory_region_init_rom_device(&s->mr, NULL, &nvram_ops, s, "nvram",
+ NVRAM_SIZE, &error_fatal);
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mr);
+ c = p = memory_region_get_ram_ptr(&s->mr);
+ if (s->blk) {
+ if (blk_getlength(s->blk) != NVRAM_SIZE) {
+ error_setg(errp, "NVRAM backing file size must be %" PRId64 "bytes",
+ NVRAM_SIZE);
+ return;
+ }
+ blk_set_perm(s->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE,
+ BLK_PERM_ALL, &error_fatal);
+ if (blk_pread(s->blk, 0, NVRAM_SIZE, p, 0) < 0) {
+ error_setg(errp, "Cannot read NVRAM contents from backing file");
+ return;
+ }
+ }
+ crc = crc32(0, p + 4, NVRAM_SIZE - 4);
+ if (crc == CRC32_ALL_ZEROS) { /* If env is uninitialized set default */
+ *c = cpu_to_be32(CRC32_DEFAULT_ENV);
+ /* Also copies terminating \0 as env is terminated by \0\0 */
+ memcpy(p + 4, default_env, sizeof(default_env));
+ if (s->blk &&
+ blk_pwrite(s->blk, 0, sizeof(crc) + sizeof(default_env), p, 0) < 0
+ ) {
+ error_report("%s: could not write %s", __func__, blk_name(s->blk));
+ }
+ return;
+ }
+ if (*c == 0) {
+ *c = cpu_to_be32(crc32(0, p + 4, NVRAM_SIZE - 4));
+ if (s->blk && blk_pwrite(s->blk, 0, 4, p, 0) < 0) {
+ error_report("%s: could not write %s", __func__, blk_name(s->blk));
+ }
+ }
+ if (be32_to_cpu(*c) != crc) {
+ warn_report("NVRAM checksum mismatch");
+ }
+}
+
+static const Property nvram_properties[] = {
+ DEFINE_PROP_DRIVE("drive", A1NVRAMState, blk),
+};
+
+static void nvram_class_init(ObjectClass *oc, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = nvram_realize;
+ device_class_set_props(dc, nvram_properties);
+}
+
+static const TypeInfo nvram_types[] = {
+ {
+ .name = TYPE_A1_NVRAM,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(A1NVRAMState),
+ .class_init = nvram_class_init,
+ },
+};
+DEFINE_TYPES(nvram_types)
+
+struct boot_info {
+ hwaddr entry;
+ hwaddr stack;
+ hwaddr bd_info;
+ hwaddr initrd_start;
+ hwaddr initrd_end;
+ hwaddr cmdline_start;
+ hwaddr cmdline_end;
+};
+
+/* Board info struct from U-Boot */
+struct bd_info {
+ uint32_t bi_memstart;
+ uint32_t bi_memsize;
+ uint32_t bi_flashstart;
+ uint32_t bi_flashsize;
+ uint32_t bi_flashoffset;
+ uint32_t bi_sramstart;
+ uint32_t bi_sramsize;
+ uint32_t bi_bootflags;
+ uint32_t bi_ip_addr;
+ uint8_t bi_enetaddr[6];
+ uint16_t bi_ethspeed;
+ uint32_t bi_intfreq;
+ uint32_t bi_busfreq;
+ uint32_t bi_baudrate;
+} QEMU_PACKED;
+
+static void create_bd_info(hwaddr addr, ram_addr_t ram_size)
+{
+ struct bd_info *bd = g_new0(struct bd_info, 1);
+
+ bd->bi_memsize = cpu_to_be32(ram_size);
+ bd->bi_flashstart = cpu_to_be32(PROM_ADDR);
+ bd->bi_flashsize = cpu_to_be32(1); /* match what U-Boot detects */
+ bd->bi_bootflags = cpu_to_be32(1);
+ bd->bi_intfreq = cpu_to_be32(11.5 * BUS_FREQ_HZ);
+ bd->bi_busfreq = cpu_to_be32(BUS_FREQ_HZ);
+ bd->bi_baudrate = cpu_to_be32(115200);
+
+ cpu_physical_memory_write(addr, bd, sizeof(*bd));
+}
+
static void amigaone_cpu_reset(void *opaque)
{
PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
cpu_reset(CPU(cpu));
- cpu_ppc_tb_reset(&cpu->env);
+ if (env->load_info) {
+ struct boot_info *bi = env->load_info;
+
+ env->gpr[1] = bi->stack;
+ env->gpr[2] = 1024;
+ env->gpr[3] = bi->bd_info;
+ env->gpr[4] = bi->initrd_start;
+ env->gpr[5] = bi->initrd_end;
+ env->gpr[6] = bi->cmdline_start;
+ env->gpr[7] = bi->cmdline_end;
+ env->nip = bi->entry;
+ }
+ cpu_ppc_tb_reset(env);
}
static void fix_spd_data(uint8_t *spd)
@@ -75,7 +273,9 @@ static void amigaone_init(MachineState *machine)
DeviceState *dev;
I2CBus *i2c_bus;
uint8_t *spd_data;
- int i;
+ DriveInfo *di;
+ hwaddr loadaddr;
+ struct boot_info *bi = NULL;
/* init CPU */
cpu = POWERPC_CPU(cpu_create(machine->cpu_type));
@@ -97,9 +297,19 @@ static void amigaone_init(MachineState *machine)
/* Firmware uses this area for startup */
mr = g_new(MemoryRegion, 1);
memory_region_init_ram(mr, NULL, "init-cache", 32 * KiB, &error_fatal);
- memory_region_add_subregion(get_system_memory(), 0x40000000, mr);
+ memory_region_add_subregion(get_system_memory(), INIT_RAM_ADDR, mr);
}
+ /* nvram */
+ dev = qdev_new(TYPE_A1_NVRAM);
+ di = drive_get(IF_MTD, 0, 0);
+ if (di) {
+ qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(di));
+ }
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ memory_region_add_subregion(get_system_memory(), NVRAM_ADDR,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0));
+
/* allocate and load firmware */
rom = g_new(MemoryRegion, 1);
memory_region_init_rom(rom, NULL, "rom", PROM_SIZE, &error_fatal);
@@ -122,7 +332,7 @@ static void amigaone_init(MachineState *machine)
}
/* Articia S */
- dev = sysbus_create_simple(TYPE_ARTICIA, 0xfe000000, NULL);
+ dev = sysbus_create_simple(TYPE_ARTICIA, ARTICIA_ADDR, NULL);
i2c_bus = I2C_BUS(qdev_get_child_bus(dev, "smbus"));
if (machine->ram_size > 512 * MiB) {
@@ -139,12 +349,12 @@ static void amigaone_init(MachineState *machine)
pci_mem = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
mr = g_new(MemoryRegion, 1);
memory_region_init_alias(mr, OBJECT(dev), "pci-mem-low", pci_mem,
- 0, 0x1000000);
- memory_region_add_subregion(get_system_memory(), 0xfd000000, mr);
+ 0, PCI_LOW_SIZE);
+ memory_region_add_subregion(get_system_memory(), PCI_LOW_ADDR, mr);
mr = g_new(MemoryRegion, 1);
memory_region_init_alias(mr, OBJECT(dev), "pci-mem-high", pci_mem,
- 0x80000000, 0x7d000000);
- memory_region_add_subregion(get_system_memory(), 0x80000000, mr);
+ PCI_HIGH_ADDR, PCI_HIGH_SIZE);
+ memory_region_add_subregion(get_system_memory(), PCI_HIGH_ADDR, mr);
pci_bus = PCI_BUS(qdev_get_child_bus(dev, "pci.0"));
/* VIA VT82c686B South Bridge (multifunction PCI device) */
@@ -156,12 +366,62 @@ static void amigaone_init(MachineState *machine)
qdev_connect_gpio_out_named(DEVICE(via), "intr", 0,
qdev_get_gpio_in(DEVICE(cpu),
PPC6xx_INPUT_INT));
- for (i = 0; i < PCI_NUM_PINS; i++) {
+ for (int i = 0; i < PCI_NUM_PINS; i++) {
qdev_connect_gpio_out(dev, i, qdev_get_gpio_in_named(DEVICE(via),
"pirq", i));
}
pci_ide_create_devs(PCI_DEVICE(object_resolve_path_component(via, "ide")));
pci_vga_init(pci_bus);
+
+ if (!machine->kernel_filename) {
+ return;
+ }
+
+ /* handle -kernel, -initrd, -append options and emulate U-Boot */
+ bi = g_new0(struct boot_info, 1);
+ cpu->env.load_info = bi;
+
+ loadaddr = MIN(machine->ram_size, 256 * MiB);
+ bi->bd_info = loadaddr - 8 * MiB;
+ create_bd_info(bi->bd_info, machine->ram_size);
+ bi->stack = bi->bd_info - 64 * KiB - 8;
+
+ if (machine->kernel_cmdline && machine->kernel_cmdline[0]) {
+ size_t len = strlen(machine->kernel_cmdline);
+
+ loadaddr = bi->bd_info + 1 * MiB;
+ cpu_physical_memory_write(loadaddr, machine->kernel_cmdline, len + 1);
+ bi->cmdline_start = loadaddr;
+ bi->cmdline_end = loadaddr + len + 1; /* including terminating '\0' */
+ }
+
+ sz = load_elf(machine->kernel_filename, NULL, NULL, NULL,
+ &bi->entry, &loadaddr, NULL, NULL,
+ ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
+ if (sz <= 0) {
+ sz = load_uimage(machine->kernel_filename, &bi->entry, &loadaddr,
+ NULL, NULL, NULL);
+ }
+ if (sz <= 0) {
+ error_report("Could not load kernel '%s'",
+ machine->kernel_filename);
+ exit(1);
+ }
+ loadaddr += sz;
+
+ if (machine->initrd_filename) {
+ loadaddr = ROUND_UP(loadaddr + 4 * MiB, 4 * KiB);
+ loadaddr = MAX(loadaddr, INITRD_MIN_ADDR);
+ sz = load_image_targphys(machine->initrd_filename, loadaddr,
+ bi->bd_info - loadaddr);
+ if (sz <= 0) {
+ error_report("Could not load initrd '%s'",
+ machine->initrd_filename);
+ exit(1);
+ }
+ bi->initrd_start = loadaddr;
+ bi->initrd_end = loadaddr + sz;
+ }
}
static void amigaone_machine_init(MachineClass *mc)
diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c
index 3bd12b5..723c97f 100644
--- a/hw/ppc/e500.c
+++ b/hw/ppc/e500.c
@@ -18,21 +18,22 @@
#include "qemu/datadir.h"
#include "qemu/units.h"
#include "qemu/guest-random.h"
+#include "exec/target_page.h"
#include "qapi/error.h"
#include "e500.h"
#include "e500-ccsr.h"
#include "net/net.h"
#include "qemu/config-file.h"
#include "hw/block/flash.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/pci/pci.h"
-#include "sysemu/block-backend-io.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/kvm.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
+#include "system/block-backend-io.h"
+#include "system/system.h"
+#include "system/kvm.h"
+#include "system/reset.h"
+#include "system/runstate.h"
#include "kvm_ppc.h"
-#include "sysemu/device_tree.h"
+#include "system/device_tree.h"
#include "hw/ppc/openpic.h"
#include "hw/ppc/openpic_kvm.h"
#include "hw/ppc/ppc.h"
@@ -78,8 +79,6 @@
#define MPC85XX_ESDHC_IRQ 72
#define RTC_REGS_OFFSET 0x68
-#define PLATFORM_CLK_FREQ_HZ (400 * 1000 * 1000)
-
struct boot_info
{
uint32_t dt_base;
@@ -119,7 +118,7 @@ static uint32_t *pci_map_create(void *fdt, uint32_t mpic, int first_slot,
}
static void dt_serial_create(void *fdt, unsigned long long offset,
- const char *soc, const char *mpic,
+ const char *soc, uint32_t freq, const char *mpic,
const char *alias, int idx, bool defcon)
{
char *ser;
@@ -130,7 +129,7 @@ static void dt_serial_create(void *fdt, unsigned long long offset,
qemu_fdt_setprop_string(fdt, ser, "compatible", "ns16550");
qemu_fdt_setprop_cells(fdt, ser, "reg", offset, 0x100);
qemu_fdt_setprop_cell(fdt, ser, "cell-index", idx);
- qemu_fdt_setprop_cell(fdt, ser, "clock-frequency", PLATFORM_CLK_FREQ_HZ);
+ qemu_fdt_setprop_cell(fdt, ser, "clock-frequency", freq);
qemu_fdt_setprop_cells(fdt, ser, "interrupts", 42, 2);
qemu_fdt_setprop_phandle(fdt, ser, "interrupt-parent", mpic);
qemu_fdt_setprop_string(fdt, "/aliases", alias, ser);
@@ -203,6 +202,8 @@ static void dt_i2c_create(void *fdt, const char *soc, const char *mpic,
qemu_fdt_setprop_cells(fdt, i2c, "cell-index", 0);
qemu_fdt_setprop_cells(fdt, i2c, "interrupts", irq0, 0x2);
qemu_fdt_setprop_phandle(fdt, i2c, "interrupt-parent", mpic);
+ qemu_fdt_setprop_cell(fdt, i2c, "#size-cells", 0);
+ qemu_fdt_setprop_cell(fdt, i2c, "#address-cells", 1);
qemu_fdt_setprop_string(fdt, "/aliases", alias, i2c);
g_free(i2c);
@@ -379,8 +380,7 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms,
int fdt_size;
void *fdt;
uint8_t hypercall[16];
- uint32_t clock_freq = PLATFORM_CLK_FREQ_HZ;
- uint32_t tb_freq = PLATFORM_CLK_FREQ_HZ;
+ uint32_t clock_freq, tb_freq;
int i;
char compatible_sb[] = "fsl,mpc8544-immr\0simple-bus";
char *soc;
@@ -408,7 +408,7 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms,
if (dtb_file) {
char *filename;
- filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_file);
+ filename = qemu_find_file(QEMU_FILE_TYPE_DTB, dtb_file);
if (!filename) {
goto out;
}
@@ -481,6 +481,9 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms,
if (kvmppc_get_hasidle(env)) {
qemu_fdt_setprop(fdt, "/hypervisor", "has-idle", NULL, 0);
}
+ } else {
+ clock_freq = pmc->clock_freq;
+ tb_freq = pmc->tb_freq;
}
/* Create CPU nodes */
@@ -561,12 +564,12 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms,
*/
if (serial_hd(1)) {
dt_serial_create(fdt, MPC8544_SERIAL1_REGS_OFFSET,
- soc, mpic, "serial1", 1, false);
+ soc, pmc->clock_freq, mpic, "serial1", 1, false);
}
if (serial_hd(0)) {
dt_serial_create(fdt, MPC8544_SERIAL0_REGS_OFFSET,
- soc, mpic, "serial0", 0, true);
+ soc, pmc->clock_freq, mpic, "serial0", 0, true);
}
/* i2c */
@@ -656,7 +659,6 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms,
done:
if (!dry_run) {
- qemu_fdt_dumpdtb(fdt, fdt_size);
cpu_physical_memory_write(addr, fdt, fdt_size);
/* Set machine->fdt for 'dumpdtb' QMP/HMP command */
@@ -721,11 +723,21 @@ static int ppce500_prep_device_tree(PPCE500MachineState *machine,
kernel_base, kernel_size, true);
}
-hwaddr booke206_page_size_to_tlb(uint64_t size)
+static hwaddr booke206_page_size_to_tlb(uint64_t size)
{
return 63 - clz64(size / KiB);
}
+void booke206_set_tlb(ppcmas_tlb_t *tlb, target_ulong va, hwaddr pa,
+ hwaddr len)
+{
+ tlb->mas1 = booke206_page_size_to_tlb(len) << MAS1_TSIZE_SHIFT;
+ tlb->mas1 |= MAS1_VALID;
+ tlb->mas2 = va & TARGET_PAGE_MASK;
+ tlb->mas7_3 = pa & TARGET_PAGE_MASK;
+ tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX;
+}
+
static int booke206_initial_map_tsize(CPUPPCState *env)
{
struct boot_info *bi = env->load_info;
@@ -751,25 +763,6 @@ static uint64_t mmubooke_initial_mapsize(CPUPPCState *env)
return (1ULL << 10 << tsize);
}
-/* Create -kernel TLB entries for BookE. */
-static void mmubooke_create_initial_mapping(CPUPPCState *env)
-{
- ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 0);
- hwaddr size;
- int ps;
-
- ps = booke206_initial_map_tsize(env);
- size = (ps << MAS1_TSIZE_SHIFT);
- tlb->mas1 = MAS1_VALID | size;
- tlb->mas2 = 0;
- tlb->mas7_3 = 0;
- tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX;
-
-#ifdef CONFIG_KVM
- env->tlb_dirty = true;
-#endif
-}
-
static void ppce500_cpu_reset_sec(void *opaque)
{
PowerPCCPU *cpu = opaque;
@@ -786,6 +779,8 @@ static void ppce500_cpu_reset(void *opaque)
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
struct boot_info *bi = env->load_info;
+ uint64_t map_size = mmubooke_initial_mapsize(env);
+ ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 0);
cpu_reset(cs);
@@ -796,11 +791,15 @@ static void ppce500_cpu_reset(void *opaque)
env->gpr[4] = 0;
env->gpr[5] = 0;
env->gpr[6] = EPAPR_MAGIC;
- env->gpr[7] = mmubooke_initial_mapsize(env);
+ env->gpr[7] = map_size;
env->gpr[8] = 0;
env->gpr[9] = 0;
env->nip = bi->entry;
- mmubooke_create_initial_mapping(env);
+ /* create initial mapping */
+ booke206_set_tlb(tlb, 0, 0, map_size);
+#ifdef CONFIG_KVM
+ env->tlb_dirty = true;
+#endif
}
static DeviceState *ppce500_init_mpic_qemu(PPCE500MachineState *pms,
@@ -832,7 +831,7 @@ static DeviceState *ppce500_init_mpic_qemu(PPCE500MachineState *pms,
}
static DeviceState *ppce500_init_mpic_kvm(const PPCE500MachineClass *pmc,
- IrqLines *irqs, Error **errp)
+ Error **errp)
{
#ifdef CONFIG_KVM
DeviceState *dev;
@@ -872,7 +871,7 @@ static DeviceState *ppce500_init_mpic(PPCE500MachineState *pms,
Error *err = NULL;
if (kvm_kernel_irqchip_allowed()) {
- dev = ppce500_init_mpic_kvm(pmc, irqs, &err);
+ dev = ppce500_init_mpic_kvm(pmc, &err);
}
if (kvm_kernel_irqchip_required() && !dev) {
error_reportf_err(err,
@@ -932,7 +931,6 @@ void ppce500_init(MachineState *machine)
CPUPPCState *firstenv = NULL;
MemoryRegion *ccsr_addr_space;
SysBusDevice *s;
- PPCE500CCSRState *ccsr;
I2CBus *i2c;
irqs = g_new0(IrqLines, smp_cpus);
@@ -969,7 +967,7 @@ void ppce500_init(MachineState *machine)
env->spr_cb[SPR_BOOKE_PIR].default_value = cs->cpu_index = i;
env->mpic_iack = pmc->ccsrbar_base + MPC8544_MPIC_REGS_OFFSET + 0xa0;
- ppc_booke_timers_init(cpu, PLATFORM_CLK_FREQ_HZ, PPC_TIMER_E500);
+ ppc_booke_timers_init(cpu, pmc->tb_freq, PPC_TIMER_E500);
/* Register reset handler */
if (!i) {
@@ -994,10 +992,10 @@ void ppce500_init(MachineState *machine)
memory_region_add_subregion(address_space_mem, 0, machine->ram);
dev = qdev_new("e500-ccsr");
+ s = SYS_BUS_DEVICE(dev);
object_property_add_child(OBJECT(machine), "e500-ccsr", OBJECT(dev));
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
- ccsr = CCSR(dev);
- ccsr_addr_space = &ccsr->ccsr_space;
+ sysbus_realize_and_unref(s, &error_fatal);
+ ccsr_addr_space = sysbus_mmio_get_region(s, 0);
memory_region_add_subregion(address_space_mem, pmc->ccsrbar_base,
ccsr_addr_space);
@@ -1024,7 +1022,7 @@ void ppce500_init(MachineState *machine)
sysbus_connect_irq(s, 0, qdev_get_gpio_in(mpicdev, MPC8544_I2C_IRQ));
memory_region_add_subregion(ccsr_addr_space, MPC8544_I2C_REGS_OFFSET,
sysbus_mmio_get_region(s, 0));
- i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c");
+ i2c = I2C_BUS(qdev_get_child_bus(dev, "i2c"));
i2c_slave_create_simple(i2c, "ds1338", RTC_REGS_OFFSET);
/* eSDHC */
@@ -1045,6 +1043,7 @@ void ppce500_init(MachineState *machine)
dev = qdev_new(TYPE_SYSBUS_SDHCI);
qdev_prop_set_uint8(dev, "sd-spec-version", 2);
qdev_prop_set_uint8(dev, "endianness", DEVICE_BIG_ENDIAN);
+ qdev_prop_set_uint8(dev, "vendor", SDHCI_VENDOR_FSL);
s = SYS_BUS_DEVICE(dev);
sysbus_realize_and_unref(s, &error_fatal);
sysbus_connect_irq(s, 0, qdev_get_gpio_in(mpicdev, MPC85XX_ESDHC_IRQ));
@@ -1073,7 +1072,7 @@ void ppce500_init(MachineState *machine)
memory_region_add_subregion(ccsr_addr_space, MPC8544_PCI_REGS_OFFSET,
sysbus_mmio_get_region(s, 0));
- pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci.0");
+ pci_bus = PCI_BUS(qdev_get_child_bus(dev, "pci.0"));
if (!pci_bus)
printf("couldn't create PCI controller!\n");
@@ -1195,7 +1194,7 @@ void ppce500_init(MachineState *machine)
payload_size = load_elf(filename, NULL, NULL, NULL,
&bios_entry, &loadaddr, NULL, NULL,
- 1, PPC_ELF_MACHINE, 0, 0);
+ ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
if (payload_size < 0) {
/*
* Hrm. No ELF image? Try a uImage, maybe someone is giving us an
@@ -1284,6 +1283,7 @@ static void e500_ccsr_initfn(Object *obj)
PPCE500CCSRState *ccsr = CCSR(obj);
memory_region_init(&ccsr->ccsr_space, obj, "e500-ccsr",
MPC8544_CCSRBAR_SIZE);
+ sysbus_init_mmio(SYS_BUS_DEVICE(ccsr), &ccsr->ccsr_space);
}
static const TypeInfo e500_ccsr_info = {
diff --git a/hw/ppc/e500.h b/hw/ppc/e500.h
index 8c09ef9..00f4905 100644
--- a/hw/ppc/e500.h
+++ b/hw/ppc/e500.h
@@ -5,6 +5,8 @@
#include "hw/platform-bus.h"
#include "qom/object.h"
+#define PLATFORM_CLK_FREQ_HZ (400 * 1000 * 1000)
+
struct PPCE500MachineState {
/*< private >*/
MachineState parent_obj;
@@ -37,12 +39,12 @@ struct PPCE500MachineClass {
hwaddr pci_mmio_base;
hwaddr pci_mmio_bus_base;
hwaddr spin_base;
+ uint32_t clock_freq;
+ uint32_t tb_freq;
};
void ppce500_init(MachineState *machine);
-hwaddr booke206_page_size_to_tlb(uint64_t size);
-
#define TYPE_PPCE500_MACHINE "ppce500-base-machine"
OBJECT_DECLARE_TYPE(PPCE500MachineState, PPCE500MachineClass, PPCE500_MACHINE)
diff --git a/hw/ppc/e500plat.c b/hw/ppc/e500plat.c
index 7aa2f21..4f1d659 100644
--- a/hw/ppc/e500plat.c
+++ b/hw/ppc/e500plat.c
@@ -13,8 +13,8 @@
#include "qemu/units.h"
#include "e500.h"
#include "hw/net/fsl_etsec/etsec.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/kvm.h"
+#include "system/device_tree.h"
+#include "system/kvm.h"
#include "hw/sysbus.h"
#include "hw/pci/pci.h"
#include "hw/ppc/openpic.h"
@@ -68,7 +68,7 @@ HotplugHandler *e500plat_machine_get_hotpug_handler(MachineState *machine,
#define TYPE_E500PLAT_MACHINE MACHINE_TYPE_NAME("ppce500")
-static void e500plat_machine_class_init(ObjectClass *oc, void *data)
+static void e500plat_machine_class_init(ObjectClass *oc, const void *data)
{
PPCE500MachineClass *pmc = PPCE500_MACHINE_CLASS(oc);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
@@ -93,6 +93,8 @@ static void e500plat_machine_class_init(ObjectClass *oc, void *data)
pmc->pci_mmio_base = 0xC00000000ULL;
pmc->pci_mmio_bus_base = 0xE0000000ULL;
pmc->spin_base = 0xFEF000000ULL;
+ pmc->clock_freq = PLATFORM_CLK_FREQ_HZ;
+ pmc->tb_freq = PLATFORM_CLK_FREQ_HZ;
mc->desc = "generic paravirt e500 platform";
mc->init = e500plat_init;
@@ -107,7 +109,7 @@ static const TypeInfo e500plat_info = {
.name = TYPE_E500PLAT_MACHINE,
.parent = TYPE_PPCE500_MACHINE,
.class_init = e500plat_machine_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c
index ff9e490..0b6e096 100644
--- a/hw/ppc/mac_newworld.c
+++ b/hw/ppc/mac_newworld.c
@@ -50,6 +50,7 @@
#include "qemu/datadir.h"
#include "qemu/units.h"
#include "qapi/error.h"
+#include "exec/target_page.h"
#include "hw/ppc/ppc.h"
#include "hw/qdev-properties.h"
#include "hw/nvram/mac_nvram.h"
@@ -59,7 +60,7 @@
#include "hw/ppc/mac_dbdma.h"
#include "hw/pci/pci.h"
#include "net/net.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/nvram/fw_cfg.h"
#include "hw/char/escc.h"
#include "hw/misc/macio/macio.h"
@@ -68,8 +69,8 @@
#include "hw/fw-path-provider.h"
#include "elf.h"
#include "qemu/error-report.h"
-#include "sysemu/kvm.h"
-#include "sysemu/reset.h"
+#include "system/kvm.h"
+#include "system/reset.h"
#include "kvm_ppc.h"
#include "hw/usb.h"
#include "hw/sysbus.h"
@@ -182,7 +183,8 @@ static void ppc_core99_init(MachineState *machine)
if (filename) {
/* Load OpenBIOS (ELF) */
bios_size = load_elf(filename, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0);
+ NULL, NULL, NULL,
+ ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
if (bios_size <= 0) {
/* or load binary ROM image */
@@ -196,19 +198,14 @@ static void ppc_core99_init(MachineState *machine)
}
if (machine->kernel_filename) {
- int bswap_needed = 0;
-
-#ifdef BSWAP_NEEDED
- bswap_needed = 1;
-#endif
kernel_base = KERNEL_LOAD_ADDR;
kernel_size = load_elf(machine->kernel_filename, NULL,
translate_kernel_address, NULL, NULL, NULL,
- NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0);
+ NULL, NULL, ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
if (kernel_size < 0) {
kernel_size = load_aout(machine->kernel_filename, kernel_base,
machine->ram_size - kernel_base,
- bswap_needed, TARGET_PAGE_SIZE);
+ true, TARGET_PAGE_SIZE);
}
if (kernel_size < 0) {
kernel_size = load_image_targphys(machine->kernel_filename,
@@ -566,12 +563,12 @@ static int core99_kvm_type(MachineState *machine, const char *arg)
return 2;
}
-static void core99_machine_class_init(ObjectClass *oc, void *data)
+static void core99_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
- mc->desc = "Mac99 based PowerMAC";
+ mc->desc = "Mac99 based PowerMac";
mc->init = ppc_core99_init;
mc->block_default_type = IF_IDE;
/* SMP is not supported currently */
@@ -634,8 +631,6 @@ static void core99_instance_init(Object *obj)
object_property_set_description(obj, "via",
"Set VIA configuration. "
"Valid values are cuda, pmu and pmu-adb");
-
- return;
}
static const TypeInfo core99_machine_info = {
@@ -644,7 +639,7 @@ static const TypeInfo core99_machine_info = {
.class_init = core99_machine_class_init,
.instance_init = core99_instance_init,
.instance_size = sizeof(Core99MachineState),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_FW_PATH_PROVIDER },
{ }
},
diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c
index 1981d3d..40ae936 100644
--- a/hw/ppc/mac_oldworld.c
+++ b/hw/ppc/mac_oldworld.c
@@ -28,11 +28,12 @@
#include "qemu/datadir.h"
#include "qemu/units.h"
#include "qapi/error.h"
+#include "exec/target_page.h"
#include "hw/ppc/ppc.h"
#include "hw/qdev-properties.h"
#include "hw/boards.h"
#include "hw/input/adb.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "net/net.h"
#include "hw/isa/isa.h"
#include "hw/pci/pci.h"
@@ -45,8 +46,8 @@
#include "hw/fw-path-provider.h"
#include "elf.h"
#include "qemu/error-report.h"
-#include "sysemu/kvm.h"
-#include "sysemu/reset.h"
+#include "system/kvm.h"
+#include "system/reset.h"
#include "kvm_ppc.h"
#define MAX_IDE_BUS 2
@@ -136,7 +137,7 @@ static void ppc_heathrow_init(MachineState *machine)
if (filename) {
/* Load OpenBIOS (ELF) */
bios_size = load_elf(filename, NULL, NULL, NULL, NULL, &bios_addr,
- NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0);
+ NULL, NULL, ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
/* Unfortunately, load_elf sign-extends reading elf32 */
bios_addr = (uint32_t)bios_addr;
@@ -153,19 +154,14 @@ static void ppc_heathrow_init(MachineState *machine)
}
if (machine->kernel_filename) {
- int bswap_needed = 0;
-
-#ifdef BSWAP_NEEDED
- bswap_needed = 1;
-#endif
kernel_base = KERNEL_LOAD_ADDR;
kernel_size = load_elf(machine->kernel_filename, NULL,
translate_kernel_address, NULL, NULL, NULL,
- NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0);
+ NULL, NULL, ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
if (kernel_size < 0) {
kernel_size = load_aout(machine->kernel_filename, kernel_base,
machine->ram_size - kernel_base,
- bswap_needed, TARGET_PAGE_SIZE);
+ true, TARGET_PAGE_SIZE);
}
if (kernel_size < 0) {
kernel_size = load_image_targphys(machine->kernel_filename,
@@ -406,12 +402,12 @@ static int heathrow_kvm_type(MachineState *machine, const char *arg)
return 2;
}
-static void heathrow_class_init(ObjectClass *oc, void *data)
+static void heathrow_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
- mc->desc = "Heathrow based PowerMAC";
+ mc->desc = "Heathrow based PowerMac";
mc->init = ppc_heathrow_init;
mc->block_default_type = IF_IDE;
/* SMP is not supported currently */
@@ -434,7 +430,7 @@ static const TypeInfo ppc_heathrow_machine_info = {
.name = MACHINE_TYPE_NAME("g3beige"),
.parent = TYPE_MACHINE,
.class_init = heathrow_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_FW_PATH_PROVIDER },
{ }
},
diff --git a/hw/ppc/meson.build b/hw/ppc/meson.build
index 3ebbf32..9893f8a 100644
--- a/hw/ppc/meson.build
+++ b/hw/ppc/meson.build
@@ -42,6 +42,7 @@ endif
ppc_ss.add(when: 'CONFIG_POWERNV', if_true: files(
'pnv.c',
'pnv_xscom.c',
+ 'pnv_adu.c',
'pnv_core.c',
'pnv_i2c.c',
'pnv_lpc.c',
@@ -56,9 +57,6 @@ ppc_ss.add(when: 'CONFIG_POWERNV', if_true: files(
'pnv_n1_chiplet.c',
))
# PowerPC 4xx boards
-ppc_ss.add(when: 'CONFIG_PPC405', if_true: files(
- 'ppc405_boards.c',
- 'ppc405_uc.c'))
ppc_ss.add(when: 'CONFIG_PPC440', if_true: files(
'ppc440_bamboo.c',
'ppc440_uc.c'))
diff --git a/hw/ppc/mpc8544_guts.c b/hw/ppc/mpc8544_guts.c
index e3540b0..a25041e 100644
--- a/hw/ppc/mpc8544_guts.c
+++ b/hw/ppc/mpc8544_guts.c
@@ -18,9 +18,8 @@
*/
#include "qemu/osdep.h"
-#include "qemu/module.h"
#include "qemu/log.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "cpu.h"
#include "hw/sysbus.h"
#include "qom/object.h"
@@ -29,6 +28,12 @@
#define MPC8544_GUTS_RSTCR_RESET 0x02
#define MPC8544_GUTS_ADDR_PORPLLSR 0x00
+REG32(GUTS_PORPLLSR, 0x00)
+ FIELD(GUTS_PORPLLSR, E500_1_RATIO, 24, 6)
+ FIELD(GUTS_PORPLLSR, E500_0_RATIO, 16, 6)
+ FIELD(GUTS_PORPLLSR, DDR_RATIO, 9, 5)
+ FIELD(GUTS_PORPLLSR, PLAT_RATIO, 1, 5)
+
#define MPC8544_GUTS_ADDR_PORBMSR 0x04
#define MPC8544_GUTS_ADDR_PORIMPSCR 0x08
#define MPC8544_GUTS_ADDR_PORDEVSR 0x0C
@@ -75,6 +80,12 @@ static uint64_t mpc8544_guts_read(void *opaque, hwaddr addr,
addr &= MPC8544_GUTS_MMIO_SIZE - 1;
switch (addr) {
+ case MPC8544_GUTS_ADDR_PORPLLSR:
+ value = FIELD_DP32(value, GUTS_PORPLLSR, E500_1_RATIO, 6); /* 3:1 */
+ value = FIELD_DP32(value, GUTS_PORPLLSR, E500_0_RATIO, 6); /* 3:1 */
+ value = FIELD_DP32(value, GUTS_PORPLLSR, DDR_RATIO, 12); /* 12:1 */
+ value = FIELD_DP32(value, GUTS_PORPLLSR, PLAT_RATIO, 6); /* 6:1 */
+ break;
case MPC8544_GUTS_ADDR_PVR:
value = env->spr[SPR_PVR];
break;
@@ -129,16 +140,13 @@ static void mpc8544_guts_initfn(Object *obj)
sysbus_init_mmio(d, &s->iomem);
}
-static const TypeInfo mpc8544_guts_info = {
- .name = TYPE_MPC8544_GUTS,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(GutsState),
- .instance_init = mpc8544_guts_initfn,
+static const TypeInfo mpc8544_guts_types[] = {
+ {
+ .name = TYPE_MPC8544_GUTS,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(GutsState),
+ .instance_init = mpc8544_guts_initfn,
+ },
};
-static void mpc8544_guts_register_types(void)
-{
- type_register_static(&mpc8544_guts_info);
-}
-
-type_init(mpc8544_guts_register_types)
+DEFINE_TYPES(mpc8544_guts_types)
diff --git a/hw/ppc/mpc8544ds.c b/hw/ppc/mpc8544ds.c
index b713090..5826985 100644
--- a/hw/ppc/mpc8544ds.c
+++ b/hw/ppc/mpc8544ds.c
@@ -11,7 +11,7 @@
#include "qemu/osdep.h"
#include "e500.h"
-#include "sysemu/device_tree.h"
+#include "system/device_tree.h"
#include "hw/ppc/openpic.h"
#include "qemu/error-report.h"
#include "qemu/units.h"
@@ -37,7 +37,7 @@ static void mpc8544ds_init(MachineState *machine)
ppce500_init(machine);
}
-static void mpc8544ds_machine_class_init(ObjectClass *oc, void *data)
+static void mpc8544ds_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
PPCE500MachineClass *pmc = PPCE500_MACHINE_CLASS(oc);
@@ -55,6 +55,8 @@ static void mpc8544ds_machine_class_init(ObjectClass *oc, void *data)
pmc->pci_mmio_bus_base = 0xC0000000ULL;
pmc->pci_pio_base = 0xE1000000ULL;
pmc->spin_base = 0xEF000000ULL;
+ pmc->clock_freq = PLATFORM_CLK_FREQ_HZ;
+ pmc->tb_freq = PLATFORM_CLK_FREQ_HZ;
mc->desc = "mpc8544ds";
mc->init = mpc8544ds_init;
diff --git a/hw/ppc/pef.c b/hw/ppc/pef.c
index 4755334..254f570 100644
--- a/hw/ppc/pef.c
+++ b/hw/ppc/pef.c
@@ -12,9 +12,9 @@
#include "qapi/error.h"
#include "qom/object_interfaces.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "migration/blocker.h"
-#include "exec/confidential-guest-support.h"
+#include "system/confidential-guest-support.h"
#define TYPE_PEF_GUEST "pef-guest"
OBJECT_DECLARE_SIMPLE_TYPE(PefGuest, PEF_GUEST)
@@ -128,7 +128,7 @@ OBJECT_DEFINE_TYPE_WITH_INTERFACES(PefGuest,
{ TYPE_USER_CREATABLE },
{ NULL })
-static void pef_guest_class_init(ObjectClass *oc, void *data)
+static void pef_guest_class_init(ObjectClass *oc, const void *data)
{
ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc);
diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c
index 9b0a6b7..e15cf96 100644
--- a/hw/ppc/pegasos2.c
+++ b/hw/ppc/pegasos2.c
@@ -14,28 +14,29 @@
#include "hw/sysbus.h"
#include "hw/pci/pci_host.h"
#include "hw/irq.h"
+#include "hw/or-irq.h"
#include "hw/pci-host/mv64361.h"
#include "hw/isa/vt82c686.h"
#include "hw/ide/pci.h"
#include "hw/i2c/smbus_eeprom.h"
#include "hw/qdev-properties.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "sysemu/qtest.h"
+#include "system/reset.h"
+#include "system/runstate.h"
+#include "system/qtest.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "hw/fw-path-provider.h"
#include "elf.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "kvm_ppc.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qom/qom-qobject.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "trace.h"
#include "qemu/datadir.h"
-#include "sysemu/device_tree.h"
+#include "system/device_tree.h"
#include "hw/ppc/vof.h"
#include <libfdt.h>
@@ -73,8 +74,11 @@ OBJECT_DECLARE_TYPE(Pegasos2MachineState, MachineClass, PEGASOS2_MACHINE)
struct Pegasos2MachineState {
MachineState parent_obj;
+
PowerPCCPU *cpu;
DeviceState *mv;
+ IRQState pci_irqs[PCI_NUM_PINS];
+ OrIRQState orirq[PCI_NUM_PINS];
qemu_irq mv_pirq[PCI_NUM_PINS];
qemu_irq via_pirq[PCI_NUM_PINS];
Vof *vof;
@@ -156,8 +160,8 @@ static void pegasos2_init(MachineState *machine)
}
memory_region_init_rom(rom, NULL, "pegasos2.rom", PROM_SIZE, &error_fatal);
memory_region_add_subregion(get_system_memory(), PROM_ADDR, rom);
- sz = load_elf(filename, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1,
- PPC_ELF_MACHINE, 0, 0);
+ sz = load_elf(filename, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
if (sz <= 0) {
sz = load_image_targphys(filename, pm->vof ? 0 : PROM_ADDR, PROM_SIZE);
}
@@ -177,7 +181,6 @@ static void pegasos2_init(MachineState *machine)
pm->mv_pirq[i] = qdev_get_gpio_in_named(pm->mv, "gpp", 12 + i);
}
pci_bus = mv64361_get_pci_bus(pm->mv, 1);
- pci_bus_irqs(pci_bus, pegasos2_pci_irq, pm, PCI_NUM_PINS);
/* VIA VT8231 South Bridge (multifunction PCI device) */
via = OBJECT(pci_new_multifunction(PCI_DEVFN(12, 0), TYPE_VT8231_ISA));
@@ -209,10 +212,35 @@ static void pegasos2_init(MachineState *machine)
/* other PC hardware */
pci_vga_init(pci_bus);
+ /* PCI interrupt routing: lines from pci.0 and pci.1 are ORed */
+ for (int h = 0; h < 2; h++) {
+ DeviceState *pd;
+ g_autofree const char *pn = g_strdup_printf("pcihost%d", h);
+
+ pd = DEVICE(object_resolve_path_component(OBJECT(pm->mv), pn));
+ assert(pd);
+ for (i = 0; i < PCI_NUM_PINS; i++) {
+ OrIRQState *ori = &pm->orirq[i];
+
+ if (h == 0) {
+ g_autofree const char *n = g_strdup_printf("pci-orirq[%d]", i);
+
+ object_initialize_child_with_props(OBJECT(pm), n,
+ ori, sizeof(*ori),
+ TYPE_OR_IRQ, &error_fatal,
+ "num-lines", "2", NULL);
+ qdev_realize(DEVICE(ori), NULL, &error_fatal);
+ qemu_init_irq(&pm->pci_irqs[i], pegasos2_pci_irq, pm, i);
+ qdev_connect_gpio_out(DEVICE(ori), 0, &pm->pci_irqs[i]);
+ }
+ qdev_connect_gpio_out(pd, i, qdev_get_gpio_in(DEVICE(ori), h));
+ }
+ }
+
if (machine->kernel_filename) {
sz = load_elf(machine->kernel_filename, NULL, NULL, NULL,
- &pm->kernel_entry, &pm->kernel_addr, NULL, NULL, 1,
- PPC_ELF_MACHINE, 0, 0);
+ &pm->kernel_entry, &pm->kernel_addr, NULL, NULL,
+ ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
if (sz <= 0) {
error_report("Could not load kernel '%s'",
machine->kernel_filename);
@@ -291,14 +319,14 @@ static void pegasos2_superio_write(uint8_t addr, uint8_t val)
cpu_physical_memory_write(PCI1_IO_BASE + 0x3f1, &val, 1);
}
-static void pegasos2_machine_reset(MachineState *machine, ShutdownCause reason)
+static void pegasos2_machine_reset(MachineState *machine, ResetType type)
{
Pegasos2MachineState *pm = PEGASOS2_MACHINE(machine);
void *fdt;
uint64_t d[2];
int sz;
- qemu_devices_reset(reason);
+ qemu_devices_reset(type);
if (!pm->vof) {
return; /* Firmware should set up machine so nothing to do */
}
@@ -389,7 +417,6 @@ static void pegasos2_machine_reset(MachineState *machine, ShutdownCause reason)
d[1] = cpu_to_be64(pm->kernel_size - (pm->kernel_entry - pm->kernel_addr));
qemu_fdt_setprop(fdt, "/chosen", "qemu,boot-kernel", d, sizeof(d));
- qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
g_free(pm->fdt_blob);
pm->fdt_blob = fdt;
@@ -561,7 +588,7 @@ static bool pegasos2_setprop(MachineState *ms, const char *path,
return true;
}
-static void pegasos2_machine_class_init(ObjectClass *oc, void *data)
+static void pegasos2_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc);
@@ -592,7 +619,7 @@ static const TypeInfo pegasos2_machine_info = {
.parent = TYPE_MACHINE,
.class_init = pegasos2_machine_class_init,
.instance_size = sizeof(Pegasos2MachineState),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_PPC_VIRTUAL_HYPERVISOR },
{ TYPE_VOF_MACHINE_IF },
{ }
diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c
index 6b41d1d..4a49e9d 100644
--- a/hw/ppc/pnv.c
+++ b/hw/ppc/pnv.c
@@ -1,7 +1,9 @@
/*
* QEMU PowerPC PowerNV machine model
*
- * Copyright (c) 2016, IBM Corporation.
+ * Copyright (c) 2016-2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -22,14 +24,14 @@
#include "qemu/units.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
-#include "sysemu/qtest.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/numa.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "sysemu/cpus.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/hw_accel.h"
+#include "system/qtest.h"
+#include "system/system.h"
+#include "system/numa.h"
+#include "system/reset.h"
+#include "system/runstate.h"
+#include "system/cpus.h"
+#include "system/device_tree.h"
+#include "system/hw_accel.h"
#include "target/ppc/cpu.h"
#include "hw/ppc/fdt.h"
#include "hw/ppc/ppc.h"
@@ -53,7 +55,7 @@
#include "hw/ppc/pnv_pnor.h"
#include "hw/isa/isa.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-isa.h"
#include "hw/rtc/mc146818rtc.h"
#include <libfdt.h>
@@ -64,6 +66,8 @@
#define FW_LOAD_ADDR 0x0
#define FW_MAX_SIZE (16 * MiB)
+#define PNOR_FILE_NAME "pnv-pnor.bin"
+
#define KERNEL_LOAD_ADDR 0x20000000
#define KERNEL_MAX_SIZE (128 * MiB)
#define INITRD_LOAD_ADDR 0x28000000
@@ -141,9 +145,9 @@ static int pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt)
CPUPPCState *env = &cpu->env;
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
PnvChipClass *pnv_cc = PNV_CHIP_GET_CLASS(chip);
- g_autofree uint32_t *servers_prop = g_new(uint32_t, smt_threads);
+ uint32_t *servers_prop;
int i;
- uint32_t pir;
+ uint32_t pir, tir;
uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
0xffffffff, 0xffffffff};
uint32_t tbfreq = PNV_TIMEBASE_FREQ;
@@ -154,7 +158,10 @@ static int pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt)
char *nodename;
int cpus_offset = get_cpus_node(fdt);
- pir = pnv_cc->chip_pir(chip, pc->hwid, 0);
+ pnv_cc->get_pir_tir(chip, pc->hwid, 0, &pir, &tir);
+
+ /* Only one DT node per (big) core */
+ g_assert(tir == 0);
nodename = g_strdup_printf("%s@%x", dc->fw_name, pir);
offset = fdt_add_subnode(fdt, cpus_offset, nodename);
@@ -235,11 +242,28 @@ static int pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt)
}
/* Build interrupt servers properties */
- for (i = 0; i < smt_threads; i++) {
- servers_prop[i] = cpu_to_be32(pnv_cc->chip_pir(chip, pc->hwid, i));
+ if (pc->big_core) {
+ servers_prop = g_new(uint32_t, smt_threads * 2);
+ for (i = 0; i < smt_threads; i++) {
+ pnv_cc->get_pir_tir(chip, pc->hwid, i, &pir, NULL);
+ servers_prop[i * 2] = cpu_to_be32(pir);
+
+ pnv_cc->get_pir_tir(chip, pc->hwid + 1, i, &pir, NULL);
+ servers_prop[i * 2 + 1] = cpu_to_be32(pir);
+ }
+ _FDT((fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
+ servers_prop, sizeof(*servers_prop) * smt_threads
+ * 2)));
+ } else {
+ servers_prop = g_new(uint32_t, smt_threads);
+ for (i = 0; i < smt_threads; i++) {
+ pnv_cc->get_pir_tir(chip, pc->hwid, i, &pir, NULL);
+ servers_prop[i] = cpu_to_be32(pir);
+ }
+ _FDT((fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
+ servers_prop, sizeof(*servers_prop) * smt_threads)));
}
- _FDT((fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
- servers_prop, sizeof(*servers_prop) * smt_threads)));
+ g_free(servers_prop);
return offset;
}
@@ -248,14 +272,17 @@ static void pnv_dt_icp(PnvChip *chip, void *fdt, uint32_t hwid,
uint32_t nr_threads)
{
PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip);
- uint32_t pir = pcc->chip_pir(chip, hwid, 0);
- uint64_t addr = PNV_ICP_BASE(chip) | (pir << 12);
+ uint32_t pir;
+ uint64_t addr;
char *name;
const char compat[] = "IBM,power8-icp\0IBM,ppc-xicp";
uint32_t irange[2], i, rsize;
uint64_t *reg;
int offset;
+ pcc->get_pir_tir(chip, hwid, 0, &pir, NULL);
+ addr = PNV_ICP_BASE(chip) | (pir << 12);
+
irange[0] = cpu_to_be32(pir);
irange[1] = cpu_to_be32(nr_threads);
@@ -385,6 +412,10 @@ static void pnv_chip_power9_dt_populate(PnvChip *chip, void *fdt)
_FDT((fdt_setprop(fdt, offset, "ibm,pa-features",
pa_features_300, sizeof(pa_features_300))));
+
+ if (pnv_core->big_core) {
+ i++; /* Big-core groups two QEMU cores */
+ }
}
if (chip->ram_size) {
@@ -446,6 +477,10 @@ static void pnv_chip_power10_dt_populate(PnvChip *chip, void *fdt)
_FDT((fdt_setprop(fdt, offset, "ibm,pa-features",
pa_features_31, sizeof(pa_features_31))));
+
+ if (pnv_core->big_core) {
+ i++; /* Big-core groups two QEMU cores */
+ }
}
if (chip->ram_size) {
@@ -678,13 +713,13 @@ static void pnv_powerdown_notify(Notifier *n, void *opaque)
}
}
-static void pnv_reset(MachineState *machine, ShutdownCause reason)
+static void pnv_reset(MachineState *machine, ResetType type)
{
PnvMachineState *pnv = PNV_MACHINE(machine);
IPMIBmc *bmc;
void *fdt;
- qemu_devices_reset(reason);
+ qemu_devices_reset(type);
/*
* The machine should provide by default an internal BMC simulator.
@@ -705,21 +740,26 @@ static void pnv_reset(MachineState *machine, ShutdownCause reason)
}
}
- fdt = pnv_dt_create(machine);
-
- /* Pack resulting tree */
- _FDT((fdt_pack(fdt)));
+ if (machine->fdt) {
+ fdt = machine->fdt;
+ } else {
+ fdt = pnv_dt_create(machine);
+ /* Pack resulting tree */
+ _FDT((fdt_pack(fdt)));
+ }
- qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
cpu_physical_memory_write(PNV_FDT_ADDR, fdt, fdt_totalsize(fdt));
- /*
- * Set machine->fdt for 'dumpdtb' QMP/HMP command. Free
- * the existing machine->fdt to avoid leaking it during
- * a reset.
- */
- g_free(machine->fdt);
- machine->fdt = fdt;
+ /* Update machine->fdt with latest fdt */
+ if (machine->fdt != fdt) {
+ /*
+ * Set machine->fdt for 'dumpdtb' QMP/HMP command. Free
+ * the existing machine->fdt to avoid leaking it during
+ * a reset.
+ */
+ g_free(machine->fdt);
+ machine->fdt = fdt;
+ }
}
static ISABus *pnv_chip_power8_isa_create(PnvChip *chip, Error **errp)
@@ -727,7 +767,8 @@ static ISABus *pnv_chip_power8_isa_create(PnvChip *chip, Error **errp)
Pnv8Chip *chip8 = PNV8_CHIP(chip);
qemu_irq irq = qdev_get_gpio_in(DEVICE(&chip8->psi), PSIHB_IRQ_EXTERNAL);
- qdev_connect_gpio_out(DEVICE(&chip8->lpc), 0, irq);
+ qdev_connect_gpio_out_named(DEVICE(&chip8->lpc), "LPCHC", 0, irq);
+
return pnv_lpc_isa_create(&chip8->lpc, true, errp);
}
@@ -736,25 +777,48 @@ static ISABus *pnv_chip_power8nvl_isa_create(PnvChip *chip, Error **errp)
Pnv8Chip *chip8 = PNV8_CHIP(chip);
qemu_irq irq = qdev_get_gpio_in(DEVICE(&chip8->psi), PSIHB_IRQ_LPC_I2C);
- qdev_connect_gpio_out(DEVICE(&chip8->lpc), 0, irq);
+ qdev_connect_gpio_out_named(DEVICE(&chip8->lpc), "LPCHC", 0, irq);
+
return pnv_lpc_isa_create(&chip8->lpc, false, errp);
}
static ISABus *pnv_chip_power9_isa_create(PnvChip *chip, Error **errp)
{
Pnv9Chip *chip9 = PNV9_CHIP(chip);
- qemu_irq irq = qdev_get_gpio_in(DEVICE(&chip9->psi), PSIHB9_IRQ_LPCHC);
+ qemu_irq irq;
+
+ irq = qdev_get_gpio_in(DEVICE(&chip9->psi), PSIHB9_IRQ_LPCHC);
+ qdev_connect_gpio_out_named(DEVICE(&chip9->lpc), "LPCHC", 0, irq);
+
+ irq = qdev_get_gpio_in(DEVICE(&chip9->psi), PSIHB9_IRQ_LPC_SIRQ0);
+ qdev_connect_gpio_out_named(DEVICE(&chip9->lpc), "SERIRQ", 0, irq);
+ irq = qdev_get_gpio_in(DEVICE(&chip9->psi), PSIHB9_IRQ_LPC_SIRQ1);
+ qdev_connect_gpio_out_named(DEVICE(&chip9->lpc), "SERIRQ", 1, irq);
+ irq = qdev_get_gpio_in(DEVICE(&chip9->psi), PSIHB9_IRQ_LPC_SIRQ2);
+ qdev_connect_gpio_out_named(DEVICE(&chip9->lpc), "SERIRQ", 2, irq);
+ irq = qdev_get_gpio_in(DEVICE(&chip9->psi), PSIHB9_IRQ_LPC_SIRQ3);
+ qdev_connect_gpio_out_named(DEVICE(&chip9->lpc), "SERIRQ", 3, irq);
- qdev_connect_gpio_out(DEVICE(&chip9->lpc), 0, irq);
return pnv_lpc_isa_create(&chip9->lpc, false, errp);
}
static ISABus *pnv_chip_power10_isa_create(PnvChip *chip, Error **errp)
{
Pnv10Chip *chip10 = PNV10_CHIP(chip);
- qemu_irq irq = qdev_get_gpio_in(DEVICE(&chip10->psi), PSIHB9_IRQ_LPCHC);
+ qemu_irq irq;
+
+ irq = qdev_get_gpio_in(DEVICE(&chip10->psi), PSIHB9_IRQ_LPCHC);
+ qdev_connect_gpio_out_named(DEVICE(&chip10->lpc), "LPCHC", 0, irq);
+
+ irq = qdev_get_gpio_in(DEVICE(&chip10->psi), PSIHB9_IRQ_LPC_SIRQ0);
+ qdev_connect_gpio_out_named(DEVICE(&chip10->lpc), "SERIRQ", 0, irq);
+ irq = qdev_get_gpio_in(DEVICE(&chip10->psi), PSIHB9_IRQ_LPC_SIRQ1);
+ qdev_connect_gpio_out_named(DEVICE(&chip10->lpc), "SERIRQ", 1, irq);
+ irq = qdev_get_gpio_in(DEVICE(&chip10->psi), PSIHB9_IRQ_LPC_SIRQ2);
+ qdev_connect_gpio_out_named(DEVICE(&chip10->lpc), "SERIRQ", 2, irq);
+ irq = qdev_get_gpio_in(DEVICE(&chip10->psi), PSIHB9_IRQ_LPC_SIRQ3);
+ qdev_connect_gpio_out_named(DEVICE(&chip10->lpc), "SERIRQ", 3, irq);
- qdev_connect_gpio_out(DEVICE(&chip10->lpc), 0, irq);
return pnv_lpc_isa_create(&chip10->lpc, false, errp);
}
@@ -875,12 +939,13 @@ static void pnv_init(MachineState *machine)
PnvMachineState *pnv = PNV_MACHINE(machine);
MachineClass *mc = MACHINE_GET_CLASS(machine);
PnvMachineClass *pmc = PNV_MACHINE_GET_CLASS(machine);
+ int max_smt_threads = pmc->max_smt_threads;
char *fw_filename;
long fw_size;
uint64_t chip_ram_start = 0;
int i;
char *chip_typename;
- DriveInfo *pnor = drive_get(IF_MTD, 0, 0);
+ DriveInfo *pnor;
DeviceState *dev;
if (kvm_enabled()) {
@@ -896,12 +961,32 @@ static void pnv_init(MachineState *machine)
g_free(sz);
exit(EXIT_FAILURE);
}
+
+ /* checks for invalid option combinations */
+ if (machine->dtb && (strlen(machine->kernel_cmdline) != 0)) {
+ error_report("-append and -dtb cannot be used together, as passed"
+ " command line is ignored in case of custom dtb");
+ exit(EXIT_FAILURE);
+ }
+
memory_region_add_subregion(get_system_memory(), 0, machine->ram);
/*
* Create our simple PNOR device
*/
dev = qdev_new(TYPE_PNV_PNOR);
+ pnor = drive_get(IF_MTD, 0, 0);
+ if (!pnor && defaults_enabled()) {
+ fw_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, PNOR_FILE_NAME);
+ if (!fw_filename) {
+ warn_report("Could not find PNOR '%s'", PNOR_FILE_NAME);
+ } else {
+ QemuOpts *opts;
+ opts = drive_add(IF_MTD, -1, fw_filename, "format=raw,readonly=on");
+ pnor = drive_new(opts, IF_MTD, &error_fatal);
+ g_free(fw_filename);
+ }
+ }
if (pnor) {
qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(pnor));
}
@@ -947,6 +1032,21 @@ static void pnv_init(MachineState *machine)
}
}
+ /* load dtb if passed */
+ if (machine->dtb) {
+ int fdt_size;
+
+ warn_report("with manually passed dtb, some options like '-append'"
+ " will get ignored and the dtb passed will be used as-is");
+
+ /* read the file 'machine->dtb', and load it into 'fdt' buffer */
+ machine->fdt = load_device_tree(machine->dtb, &fdt_size);
+ if (!machine->fdt) {
+ error_report("Could not load dtb '%s'", machine->dtb);
+ exit(1);
+ }
+ }
+
/* MSIs are supported on this platform */
msi_nonbroken = true;
@@ -970,20 +1070,52 @@ static void pnv_init(MachineState *machine)
exit(1);
}
+ /* Set lpar-per-core mode if lpar-per-thread is not supported */
+ if (!pmc->has_lpar_per_thread) {
+ pnv->lpar_per_core = true;
+ }
+
pnv->num_chips =
machine->smp.max_cpus / (machine->smp.cores * machine->smp.threads);
- if (machine->smp.threads > 8) {
- error_report("Cannot support more than 8 threads/core "
- "on a powernv machine");
+ if (pnv->big_core) {
+ if (machine->smp.threads % 2 == 1) {
+ error_report("Cannot support %d threads with big-core option "
+ "because it must be an even number",
+ machine->smp.threads);
+ exit(1);
+ }
+ max_smt_threads *= 2;
+ }
+
+ if (machine->smp.threads > max_smt_threads) {
+ error_report("Cannot support more than %d threads/core "
+ "on %s machine", max_smt_threads, mc->desc);
+ if (pmc->max_smt_threads == 4) {
+ error_report("(use big-core=on for 8 threads per core)");
+ }
exit(1);
}
+
+ if (pnv->big_core) {
+ /*
+ * powernv models PnvCore as a SMT4 core. Big-core requires 2xPnvCore
+ * per core, so adjust topology here. pnv_dt_core() processor
+ * device-tree and TCG SMT code make the 2 cores appear as one big core
+ * from software point of view. pnv pervasive models and xscoms tend to
+ * see the big core as 2 small core halves.
+ */
+ machine->smp.cores *= 2;
+ machine->smp.threads /= 2;
+ }
+
if (!is_power_of_2(machine->smp.threads)) {
- error_report("Cannot support %d threads/core on a powernv"
+ error_report("Cannot support %d threads/core on a powernv "
"machine because it must be a power of 2",
machine->smp.threads);
exit(1);
}
+
/*
* TODO: should we decide on how many chips we can create based
* on #cores and Venice vs. Murano vs. Naples chip type etc...,
@@ -1017,6 +1149,10 @@ static void pnv_init(MachineState *machine)
&error_fatal);
object_property_set_int(chip, "nr-threads", machine->smp.threads,
&error_fatal);
+ object_property_set_bool(chip, "big-core", pnv->big_core,
+ &error_fatal);
+ object_property_set_bool(chip, "lpar-per-core", pnv->lpar_per_core,
+ &error_fatal);
/*
* The POWER8 machine use the XICS interrupt interface.
* Propagate the XICS fabric to the chip and its controllers.
@@ -1055,7 +1191,7 @@ static void pnv_init(MachineState *machine)
* Since we can not reach the remote BMC machine with LPC memops,
* map it always for now.
*/
- memory_region_add_subregion(pnv->chips[0]->fw_mr, PNOR_SPI_OFFSET,
+ memory_region_add_subregion(pnv->chips[0]->fw_mr, pnv->pnor->lpc_address,
&pnv->pnor->mmio);
/*
@@ -1079,10 +1215,16 @@ static void pnv_init(MachineState *machine)
* 25:28 Core number
* 29:31 Thread ID
*/
-static uint32_t pnv_chip_pir_p8(PnvChip *chip, uint32_t core_id,
- uint32_t thread_id)
+static void pnv_get_pir_tir_p8(PnvChip *chip,
+ uint32_t core_id, uint32_t thread_id,
+ uint32_t *pir, uint32_t *tir)
{
- return (chip->chip_id << 7) | (core_id << 3) | thread_id;
+ if (pir) {
+ *pir = (chip->chip_id << 7) | (core_id << 3) | thread_id;
+ }
+ if (tir) {
+ *tir = thread_id;
+ }
}
static void pnv_chip_power8_intc_create(PnvChip *chip, PowerPCCPU *cpu,
@@ -1134,14 +1276,26 @@ static void pnv_chip_power8_intc_print_info(PnvChip *chip, PowerPCCPU *cpu,
*
* We only care about the lower bits. uint32_t is fine for the moment.
*/
-static uint32_t pnv_chip_pir_p9(PnvChip *chip, uint32_t core_id,
- uint32_t thread_id)
-{
- if (chip->nr_threads == 8) {
- return (chip->chip_id << 8) | ((thread_id & 1) << 2) | (core_id << 3) |
- (thread_id >> 1);
+static void pnv_get_pir_tir_p9(PnvChip *chip,
+ uint32_t core_id, uint32_t thread_id,
+ uint32_t *pir, uint32_t *tir)
+{
+ if (chip->big_core) {
+ /* Big-core interleaves thread ID between small-cores */
+ thread_id <<= 1;
+ thread_id |= core_id & 1;
+ core_id >>= 1;
+
+ if (pir) {
+ *pir = (chip->chip_id << 8) | (core_id << 3) | thread_id;
+ }
} else {
- return (chip->chip_id << 8) | (core_id << 2) | thread_id;
+ if (pir) {
+ *pir = (chip->chip_id << 8) | (core_id << 2) | thread_id;
+ }
+ }
+ if (tir) {
+ *tir = thread_id;
}
}
@@ -1156,14 +1310,26 @@ static uint32_t pnv_chip_pir_p9(PnvChip *chip, uint32_t core_id,
*
* We only care about the lower bits. uint32_t is fine for the moment.
*/
-static uint32_t pnv_chip_pir_p10(PnvChip *chip, uint32_t core_id,
- uint32_t thread_id)
-{
- if (chip->nr_threads == 8) {
- return (chip->chip_id << 8) | ((core_id / 4) << 4) |
- ((core_id % 2) << 3) | thread_id;
+static void pnv_get_pir_tir_p10(PnvChip *chip,
+ uint32_t core_id, uint32_t thread_id,
+ uint32_t *pir, uint32_t *tir)
+{
+ if (chip->big_core) {
+ /* Big-core interleaves thread ID between small-cores */
+ thread_id <<= 1;
+ thread_id |= core_id & 1;
+ core_id >>= 1;
+
+ if (pir) {
+ *pir = (chip->chip_id << 8) | (core_id << 3) | thread_id;
+ }
} else {
- return (chip->chip_id << 8) | (core_id << 2) | thread_id;
+ if (pir) {
+ *pir = (chip->chip_id << 8) | (core_id << 2) | thread_id;
+ }
+ }
+ if (tir) {
+ *tir = thread_id;
}
}
@@ -1343,8 +1509,11 @@ static void pnv_chip_icp_realize(Pnv8Chip *chip8, Error **errp)
int core_hwid = CPU_CORE(pnv_core)->core_id;
for (j = 0; j < CPU_CORE(pnv_core)->nr_threads; j++) {
- uint32_t pir = pcc->chip_pir(chip, core_hwid, j);
- PnvICPState *icp = PNV_ICP(xics_icp_get(chip8->xics, pir));
+ uint32_t pir;
+ PnvICPState *icp;
+
+ pcc->get_pir_tir(chip, core_hwid, j, &pir, NULL);
+ icp = PNV_ICP(xics_icp_get(chip8->xics, pir));
memory_region_add_subregion(&chip8->icp_mmio, pir << 12,
&icp->mmio);
@@ -1402,7 +1571,21 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp)
return;
}
+ /* HOMER (must be created before OCC) */
+ object_property_set_link(OBJECT(&chip8->homer), "chip", OBJECT(chip),
+ &error_abort);
+ if (!qdev_realize(DEVICE(&chip8->homer), NULL, errp)) {
+ return;
+ }
+ /* Homer Xscom region */
+ pnv_xscom_add_subregion(chip, PNV_XSCOM_PBA_BASE, &chip8->homer.pba_regs);
+ /* Homer RAM region */
+ memory_region_add_subregion(get_system_memory(), chip8->homer.base,
+ &chip8->homer.mem);
+
/* Create the simplified OCC model */
+ object_property_set_link(OBJECT(&chip8->occ), "homer",
+ OBJECT(&chip8->homer), &error_abort);
if (!qdev_realize(DEVICE(&chip8->occ), NULL, errp)) {
return;
}
@@ -1414,19 +1597,6 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp)
memory_region_add_subregion(get_system_memory(), PNV_OCC_SENSOR_BASE(chip),
&chip8->occ.sram_regs);
- /* HOMER */
- object_property_set_link(OBJECT(&chip8->homer), "chip", OBJECT(chip),
- &error_abort);
- if (!qdev_realize(DEVICE(&chip8->homer), NULL, errp)) {
- return;
- }
- /* Homer Xscom region */
- pnv_xscom_add_subregion(chip, PNV_XSCOM_PBA_BASE, &chip8->homer.pba_regs);
-
- /* Homer mmio region */
- memory_region_add_subregion(get_system_memory(), PNV_HOMER_BASE(chip),
- &chip8->homer.regs);
-
/* PHB controllers */
for (i = 0; i < chip8->num_phbs; i++) {
PnvPHB *phb = chip8->phbs[i];
@@ -1448,7 +1618,7 @@ static uint32_t pnv_chip_power8_xscom_pcba(PnvChip *chip, uint64_t addr)
return ((addr >> 4) & ~0xfull) | ((addr >> 3) & 0xf);
}
-static void pnv_chip_power8e_class_init(ObjectClass *klass, void *data)
+static void pnv_chip_power8e_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvChipClass *k = PNV_CHIP_CLASS(klass);
@@ -1456,7 +1626,7 @@ static void pnv_chip_power8e_class_init(ObjectClass *klass, void *data)
k->chip_cfam_id = 0x221ef04980000000ull; /* P8 Murano DD2.1 */
k->cores_mask = POWER8E_CORE_MASK;
k->num_phbs = 3;
- k->chip_pir = pnv_chip_pir_p8;
+ k->get_pir_tir = pnv_get_pir_tir_p8;
k->intc_create = pnv_chip_power8_intc_create;
k->intc_reset = pnv_chip_power8_intc_reset;
k->intc_destroy = pnv_chip_power8_intc_destroy;
@@ -1472,7 +1642,7 @@ static void pnv_chip_power8e_class_init(ObjectClass *klass, void *data)
&k->parent_realize);
}
-static void pnv_chip_power8_class_init(ObjectClass *klass, void *data)
+static void pnv_chip_power8_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvChipClass *k = PNV_CHIP_CLASS(klass);
@@ -1480,7 +1650,7 @@ static void pnv_chip_power8_class_init(ObjectClass *klass, void *data)
k->chip_cfam_id = 0x220ea04980000000ull; /* P8 Venice DD2.0 */
k->cores_mask = POWER8_CORE_MASK;
k->num_phbs = 3;
- k->chip_pir = pnv_chip_pir_p8;
+ k->get_pir_tir = pnv_get_pir_tir_p8;
k->intc_create = pnv_chip_power8_intc_create;
k->intc_reset = pnv_chip_power8_intc_reset;
k->intc_destroy = pnv_chip_power8_intc_destroy;
@@ -1496,7 +1666,7 @@ static void pnv_chip_power8_class_init(ObjectClass *klass, void *data)
&k->parent_realize);
}
-static void pnv_chip_power8nvl_class_init(ObjectClass *klass, void *data)
+static void pnv_chip_power8nvl_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvChipClass *k = PNV_CHIP_CLASS(klass);
@@ -1504,7 +1674,7 @@ static void pnv_chip_power8nvl_class_init(ObjectClass *klass, void *data)
k->chip_cfam_id = 0x120d304980000000ull; /* P8 Naples DD1.0 */
k->cores_mask = POWER8_CORE_MASK;
k->num_phbs = 4;
- k->chip_pir = pnv_chip_pir_p8;
+ k->get_pir_tir = pnv_get_pir_tir_p8;
k->intc_create = pnv_chip_power8_intc_create;
k->intc_reset = pnv_chip_power8_intc_reset;
k->intc_destroy = pnv_chip_power8_intc_destroy;
@@ -1527,6 +1697,7 @@ static void pnv_chip_power9_instance_init(Object *obj)
PnvChipClass *pcc = PNV_CHIP_GET_CLASS(obj);
int i;
+ object_initialize_child(obj, "adu", &chip9->adu, TYPE_PNV_ADU);
object_initialize_child(obj, "xive", &chip9->xive, TYPE_PNV_XIVE);
object_property_add_alias(obj, "xive-fabric", OBJECT(&chip9->xive),
"xive-fabric");
@@ -1599,6 +1770,7 @@ static void pnv_chip_power9_pec_realize(PnvChip *chip, Error **errp)
for (i = 0; i < chip->num_pecs; i++) {
PnvPhb4PecState *pec = &chip9->pecs[i];
PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec);
+ uint32_t pec_cplt_base;
uint32_t pec_nest_base;
uint32_t pec_pci_base;
@@ -1611,9 +1783,12 @@ static void pnv_chip_power9_pec_realize(PnvChip *chip, Error **errp)
return;
}
+ pec_cplt_base = pecc->xscom_cplt_base(pec);
pec_nest_base = pecc->xscom_nest_base(pec);
pec_pci_base = pecc->xscom_pci_base(pec);
+ pnv_xscom_add_subregion(chip, pec_cplt_base,
+ &pec->nest_pervasive.xscom_ctrl_regs_mr);
pnv_xscom_add_subregion(chip, pec_nest_base, &pec->nest_regs_mr);
pnv_xscom_add_subregion(chip, pec_pci_base, &pec->pci_regs_mr);
}
@@ -1637,6 +1812,15 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp)
return;
}
+ /* ADU */
+ object_property_set_link(OBJECT(&chip9->adu), "lpc", OBJECT(&chip9->lpc),
+ &error_abort);
+ if (!qdev_realize(DEVICE(&chip9->adu), NULL, errp)) {
+ return;
+ }
+ pnv_xscom_add_subregion(chip, PNV9_XSCOM_ADU_BASE,
+ &chip9->adu.xscom_regs);
+
pnv_chip_quad_realize(chip9, &local_err);
if (local_err) {
error_propagate(errp, local_err);
@@ -1696,18 +1880,6 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp)
pnv_xscom_add_subregion(chip, PNV9_XSCOM_CHIPTOD_BASE,
&chip9->chiptod.xscom_regs);
- /* Create the simplified OCC model */
- if (!qdev_realize(DEVICE(&chip9->occ), NULL, errp)) {
- return;
- }
- pnv_xscom_add_subregion(chip, PNV9_XSCOM_OCC_BASE, &chip9->occ.xscom_regs);
- qdev_connect_gpio_out(DEVICE(&chip9->occ), 0, qdev_get_gpio_in(
- DEVICE(psi9), PSIHB9_IRQ_OCC));
-
- /* OCC SRAM model */
- memory_region_add_subregion(get_system_memory(), PNV9_OCC_SENSOR_BASE(chip),
- &chip9->occ.sram_regs);
-
/* SBE */
if (!qdev_realize(DEVICE(&chip9->sbe), NULL, errp)) {
return;
@@ -1719,7 +1891,7 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp)
qdev_connect_gpio_out(DEVICE(&chip9->sbe), 0, qdev_get_gpio_in(
DEVICE(psi9), PSIHB9_IRQ_PSU));
- /* HOMER */
+ /* HOMER (must be created before OCC) */
object_property_set_link(OBJECT(&chip9->homer), "chip", OBJECT(chip),
&error_abort);
if (!qdev_realize(DEVICE(&chip9->homer), NULL, errp)) {
@@ -1727,10 +1899,23 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp)
}
/* Homer Xscom region */
pnv_xscom_add_subregion(chip, PNV9_XSCOM_PBA_BASE, &chip9->homer.pba_regs);
+ /* Homer RAM region */
+ memory_region_add_subregion(get_system_memory(), chip9->homer.base,
+ &chip9->homer.mem);
- /* Homer mmio region */
- memory_region_add_subregion(get_system_memory(), PNV9_HOMER_BASE(chip),
- &chip9->homer.regs);
+ /* Create the simplified OCC model */
+ object_property_set_link(OBJECT(&chip9->occ), "homer",
+ OBJECT(&chip9->homer), &error_abort);
+ if (!qdev_realize(DEVICE(&chip9->occ), NULL, errp)) {
+ return;
+ }
+ pnv_xscom_add_subregion(chip, PNV9_XSCOM_OCC_BASE, &chip9->occ.xscom_regs);
+ qdev_connect_gpio_out(DEVICE(&chip9->occ), 0, qdev_get_gpio_in(
+ DEVICE(psi9), PSIHB9_IRQ_OCC));
+
+ /* OCC SRAM model */
+ memory_region_add_subregion(get_system_memory(), PNV9_OCC_SENSOR_BASE(chip),
+ &chip9->occ.sram_regs);
/* PEC PHBs */
pnv_chip_power9_pec_realize(chip, &local_err);
@@ -1769,7 +1954,7 @@ static uint32_t pnv_chip_power9_xscom_pcba(PnvChip *chip, uint64_t addr)
return addr >> 3;
}
-static void pnv_chip_power9_class_init(ObjectClass *klass, void *data)
+static void pnv_chip_power9_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvChipClass *k = PNV_CHIP_CLASS(klass);
@@ -1777,7 +1962,7 @@ static void pnv_chip_power9_class_init(ObjectClass *klass, void *data)
k->chip_cfam_id = 0x220d104900008000ull; /* P9 Nimbus DD2.0 */
k->cores_mask = POWER9_CORE_MASK;
- k->chip_pir = pnv_chip_pir_p9;
+ k->get_pir_tir = pnv_get_pir_tir_p9;
k->intc_create = pnv_chip_power9_intc_create;
k->intc_reset = pnv_chip_power9_intc_reset;
k->intc_destroy = pnv_chip_power9_intc_destroy;
@@ -1803,6 +1988,7 @@ static void pnv_chip_power10_instance_init(Object *obj)
PnvChipClass *pcc = PNV_CHIP_GET_CLASS(obj);
int i;
+ object_initialize_child(obj, "adu", &chip10->adu, TYPE_PNV_ADU);
object_initialize_child(obj, "xive", &chip10->xive, TYPE_PNV_XIVE2);
object_property_add_alias(obj, "xive-fabric", OBJECT(&chip10->xive),
"xive-fabric");
@@ -1826,6 +2012,11 @@ static void pnv_chip_power10_instance_init(Object *obj)
for (i = 0; i < pcc->i2c_num_engines; i++) {
object_initialize_child(obj, "i2c[*]", &chip10->i2c[i], TYPE_PNV_I2C);
}
+
+ for (i = 0; i < PNV10_CHIP_MAX_PIB_SPIC; i++) {
+ object_initialize_child(obj, "pib_spic[*]", &chip10->pib_spic[i],
+ TYPE_PNV_SPI);
+ }
}
static void pnv_chip_power10_quad_realize(Pnv10Chip *chip10, Error **errp)
@@ -1858,6 +2049,7 @@ static void pnv_chip_power10_phb_realize(PnvChip *chip, Error **errp)
for (i = 0; i < chip->num_pecs; i++) {
PnvPhb4PecState *pec = &chip10->pecs[i];
PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec);
+ uint32_t pec_cplt_base;
uint32_t pec_nest_base;
uint32_t pec_pci_base;
@@ -1870,9 +2062,12 @@ static void pnv_chip_power10_phb_realize(PnvChip *chip, Error **errp)
return;
}
+ pec_cplt_base = pecc->xscom_cplt_base(pec);
pec_nest_base = pecc->xscom_nest_base(pec);
pec_pci_base = pecc->xscom_pci_base(pec);
+ pnv_xscom_add_subregion(chip, pec_cplt_base,
+ &pec->nest_pervasive.xscom_ctrl_regs_mr);
pnv_xscom_add_subregion(chip, pec_nest_base, &pec->nest_regs_mr);
pnv_xscom_add_subregion(chip, pec_pci_base, &pec->pci_regs_mr);
}
@@ -1895,6 +2090,15 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
return;
}
+ /* ADU */
+ object_property_set_link(OBJECT(&chip10->adu), "lpc", OBJECT(&chip10->lpc),
+ &error_abort);
+ if (!qdev_realize(DEVICE(&chip10->adu), NULL, errp)) {
+ return;
+ }
+ pnv_xscom_add_subregion(chip, PNV10_XSCOM_ADU_BASE,
+ &chip10->adu.xscom_regs);
+
pnv_chip_power10_quad_realize(chip10, &local_err);
if (local_err) {
error_propagate(errp, local_err);
@@ -1958,7 +2162,22 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
pnv_xscom_add_subregion(chip, PNV10_XSCOM_CHIPTOD_BASE,
&chip10->chiptod.xscom_regs);
+ /* HOMER (must be created before OCC) */
+ object_property_set_link(OBJECT(&chip10->homer), "chip", OBJECT(chip),
+ &error_abort);
+ if (!qdev_realize(DEVICE(&chip10->homer), NULL, errp)) {
+ return;
+ }
+ /* Homer Xscom region */
+ pnv_xscom_add_subregion(chip, PNV10_XSCOM_PBA_BASE,
+ &chip10->homer.pba_regs);
+ /* Homer RAM region */
+ memory_region_add_subregion(get_system_memory(), chip10->homer.base,
+ &chip10->homer.mem);
+
/* Create the simplified OCC model */
+ object_property_set_link(OBJECT(&chip10->occ), "homer",
+ OBJECT(&chip10->homer), &error_abort);
if (!qdev_realize(DEVICE(&chip10->occ), NULL, errp)) {
return;
}
@@ -1983,20 +2202,6 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
qdev_connect_gpio_out(DEVICE(&chip10->sbe), 0, qdev_get_gpio_in(
DEVICE(&chip10->psi), PSIHB9_IRQ_PSU));
- /* HOMER */
- object_property_set_link(OBJECT(&chip10->homer), "chip", OBJECT(chip),
- &error_abort);
- if (!qdev_realize(DEVICE(&chip10->homer), NULL, errp)) {
- return;
- }
- /* Homer Xscom region */
- pnv_xscom_add_subregion(chip, PNV10_XSCOM_PBA_BASE,
- &chip10->homer.pba_regs);
-
- /* Homer mmio region */
- memory_region_add_subregion(get_system_memory(), PNV10_HOMER_BASE(chip),
- &chip10->homer.regs);
-
/* N1 chiplet */
if (!qdev_realize(DEVICE(&chip10->n1_chiplet), NULL, errp)) {
return;
@@ -2040,7 +2245,23 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
qdev_get_gpio_in(DEVICE(&chip10->psi),
PSIHB9_IRQ_SBE_I2C));
}
-
+ /* PIB SPI Controller */
+ for (i = 0; i < PNV10_CHIP_MAX_PIB_SPIC; i++) {
+ object_property_set_int(OBJECT(&chip10->pib_spic[i]), "spic_num",
+ i, &error_fatal);
+ /* pib_spic[2] connected to 25csm04 which implements 1 byte transfer */
+ object_property_set_int(OBJECT(&chip10->pib_spic[i]), "transfer_len",
+ (i == 2) ? 1 : 4, &error_fatal);
+ object_property_set_int(OBJECT(&chip10->pib_spic[i]), "chip-id",
+ chip->chip_id, &error_fatal);
+ if (!sysbus_realize(SYS_BUS_DEVICE(OBJECT
+ (&chip10->pib_spic[i])), errp)) {
+ return;
+ }
+ pnv_xscom_add_subregion(chip, PNV10_XSCOM_PIB_SPIC_BASE +
+ i * PNV10_XSCOM_PIB_SPIC_SIZE,
+ &chip10->pib_spic[i].xscom_spic_regs);
+ }
}
static void pnv_rainier_i2c_init(PnvMachineState *pnv)
@@ -2081,15 +2302,15 @@ static uint32_t pnv_chip_power10_xscom_pcba(PnvChip *chip, uint64_t addr)
return addr >> 3;
}
-static void pnv_chip_power10_class_init(ObjectClass *klass, void *data)
+static void pnv_chip_power10_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvChipClass *k = PNV_CHIP_CLASS(klass);
static const int i2c_ports_per_engine[PNV10_CHIP_MAX_I2C] = {14, 14, 2, 16};
- k->chip_cfam_id = 0x120da04900008000ull; /* P10 DD1.0 (with NX) */
+ k->chip_cfam_id = 0x220da04980000000ull; /* P10 DD2.0 (with NX) */
k->cores_mask = POWER10_CORE_MASK;
- k->chip_pir = pnv_chip_pir_p10;
+ k->get_pir_tir = pnv_get_pir_tir_p10;
k->intc_create = pnv_chip_power10_intc_create;
k->intc_reset = pnv_chip_power10_intc_reset;
k->intc_destroy = pnv_chip_power10_intc_destroy;
@@ -2108,7 +2329,8 @@ static void pnv_chip_power10_class_init(ObjectClass *klass, void *data)
&k->parent_realize);
}
-static void pnv_chip_core_sanitize(PnvChip *chip, Error **errp)
+static void pnv_chip_core_sanitize(PnvMachineState *pnv, PnvChip *chip,
+ Error **errp)
{
PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip);
int cores_max;
@@ -2129,6 +2351,17 @@ static void pnv_chip_core_sanitize(PnvChip *chip, Error **errp)
}
chip->cores_mask &= pcc->cores_mask;
+ /* Ensure small-cores a paired up in big-core mode */
+ if (pnv->big_core) {
+ uint64_t even_cores = chip->cores_mask & 0x5555555555555555ULL;
+ uint64_t odd_cores = chip->cores_mask & 0xaaaaaaaaaaaaaaaaULL;
+
+ if (even_cores ^ (odd_cores >> 1)) {
+ error_setg(errp, "warning: unpaired cores in big-core mode !");
+ return;
+ }
+ }
+
/* now that we have a sane layout, let check the number of cores */
cores_max = ctpop64(chip->cores_mask);
if (chip->nr_cores > cores_max) {
@@ -2140,11 +2373,12 @@ static void pnv_chip_core_sanitize(PnvChip *chip, Error **errp)
static void pnv_chip_core_realize(PnvChip *chip, Error **errp)
{
+ PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
+ PnvMachineClass *pmc = PNV_MACHINE_GET_CLASS(pnv);
Error *error = NULL;
PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip);
const char *typename = pnv_chip_core_typename(chip);
int i, core_hwid;
- PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
if (!object_class_by_name(typename)) {
error_setg(errp, "Unable to find PowerNV CPU Core '%s'", typename);
@@ -2152,7 +2386,7 @@ static void pnv_chip_core_realize(PnvChip *chip, Error **errp)
}
/* Cores */
- pnv_chip_core_sanitize(chip, &error);
+ pnv_chip_core_sanitize(pnv, chip, &error);
if (error) {
error_propagate(errp, error);
return;
@@ -2183,8 +2417,15 @@ static void pnv_chip_core_realize(PnvChip *chip, Error **errp)
&error_fatal);
object_property_set_int(OBJECT(pnv_core), "hrmor", pnv->fw_load_addr,
&error_fatal);
+ object_property_set_bool(OBJECT(pnv_core), "big-core", chip->big_core,
+ &error_fatal);
+ object_property_set_bool(OBJECT(pnv_core), "quirk-tb-big-core",
+ pmc->quirk_tb_big_core, &error_fatal);
+ object_property_set_bool(OBJECT(pnv_core), "lpar-per-core",
+ chip->lpar_per_core, &error_fatal);
object_property_set_link(OBJECT(pnv_core), "chip", OBJECT(chip),
&error_abort);
+
qdev_realize(DEVICE(pnv_core), NULL, &error_fatal);
/* Each core has an XSCOM MMIO region */
@@ -2209,17 +2450,18 @@ static void pnv_chip_realize(DeviceState *dev, Error **errp)
}
}
-static Property pnv_chip_properties[] = {
+static const Property pnv_chip_properties[] = {
DEFINE_PROP_UINT32("chip-id", PnvChip, chip_id, 0),
DEFINE_PROP_UINT64("ram-start", PnvChip, ram_start, 0),
DEFINE_PROP_UINT64("ram-size", PnvChip, ram_size, 0),
DEFINE_PROP_UINT32("nr-cores", PnvChip, nr_cores, 1),
DEFINE_PROP_UINT64("cores-mask", PnvChip, cores_mask, 0x0),
DEFINE_PROP_UINT32("nr-threads", PnvChip, nr_threads, 1),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_BOOL("big-core", PnvChip, big_core, false),
+ DEFINE_PROP_BOOL("lpar-per-core", PnvChip, lpar_per_core, false),
};
-static void pnv_chip_class_init(ObjectClass *klass, void *data)
+static void pnv_chip_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -2368,7 +2610,7 @@ static void pnv_pic_print_info(InterruptStatsProvider *obj, GString *buf)
static int pnv_match_nvt(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv,
XiveTCTXMatch *match)
{
@@ -2382,8 +2624,8 @@ static int pnv_match_nvt(XiveFabric *xfb, uint8_t format,
XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
int count;
- count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore,
- priority, logic_serv, match);
+ count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd,
+ cam_ignore, priority, logic_serv, match);
if (count < 0) {
return count;
@@ -2397,7 +2639,7 @@ static int pnv_match_nvt(XiveFabric *xfb, uint8_t format,
static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv,
XiveTCTXMatch *match)
{
@@ -2411,8 +2653,8 @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format,
XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
int count;
- count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore,
- priority, logic_serv, match);
+ count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd,
+ cam_ignore, priority, logic_serv, match);
if (count < 0) {
return count;
@@ -2424,7 +2666,65 @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format,
return total_count;
}
-static void pnv_machine_power8_class_init(ObjectClass *oc, void *data)
+static int pnv10_xive_broadcast(XiveFabric *xfb,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool crowd, bool cam_ignore,
+ uint8_t priority)
+{
+ PnvMachineState *pnv = PNV_MACHINE(xfb);
+ int i;
+
+ for (i = 0; i < pnv->num_chips; i++) {
+ Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
+ XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive);
+ XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
+
+ xpc->broadcast(xptr, nvt_blk, nvt_idx, crowd, cam_ignore, priority);
+ }
+ return 0;
+}
+
+static bool pnv_machine_get_big_core(Object *obj, Error **errp)
+{
+ PnvMachineState *pnv = PNV_MACHINE(obj);
+ return pnv->big_core;
+}
+
+static void pnv_machine_set_big_core(Object *obj, bool value, Error **errp)
+{
+ PnvMachineState *pnv = PNV_MACHINE(obj);
+ pnv->big_core = value;
+}
+
+static bool pnv_machine_get_lpar_per_core(Object *obj, Error **errp)
+{
+ PnvMachineState *pnv = PNV_MACHINE(obj);
+ return pnv->lpar_per_core;
+}
+
+static void pnv_machine_set_lpar_per_core(Object *obj, bool value, Error **errp)
+{
+ PnvMachineState *pnv = PNV_MACHINE(obj);
+ pnv->lpar_per_core = value;
+}
+
+static bool pnv_machine_get_hb(Object *obj, Error **errp)
+{
+ PnvMachineState *pnv = PNV_MACHINE(obj);
+
+ return !!pnv->fw_load_addr;
+}
+
+static void pnv_machine_set_hb(Object *obj, bool value, Error **errp)
+{
+ PnvMachineState *pnv = PNV_MACHINE(obj);
+
+ if (value) {
+ pnv->fw_load_addr = 0x8000000;
+ }
+}
+
+static void pnv_machine_power8_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
XICSFabricClass *xic = XICS_FABRIC_CLASS(oc);
@@ -2446,11 +2746,14 @@ static void pnv_machine_power8_class_init(ObjectClass *oc, void *data)
pmc->compat = compat;
pmc->compat_size = sizeof(compat);
+ pmc->max_smt_threads = 8;
+ /* POWER8 is always lpar-per-core mode */
+ pmc->has_lpar_per_thread = false;
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB);
}
-static void pnv_machine_power9_class_init(ObjectClass *oc, void *data)
+static void pnv_machine_power9_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc);
@@ -2470,12 +2773,26 @@ static void pnv_machine_power9_class_init(ObjectClass *oc, void *data)
pmc->compat = compat;
pmc->compat_size = sizeof(compat);
+ pmc->max_smt_threads = 4;
+ pmc->has_lpar_per_thread = true;
pmc->dt_power_mgt = pnv_dt_power_mgt;
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB);
+
+ object_class_property_add_bool(oc, "big-core",
+ pnv_machine_get_big_core,
+ pnv_machine_set_big_core);
+ object_class_property_set_description(oc, "big-core",
+ "Use big-core (aka fused-core) mode");
+
+ object_class_property_add_bool(oc, "lpar-per-core",
+ pnv_machine_get_lpar_per_core,
+ pnv_machine_set_lpar_per_core);
+ object_class_property_set_description(oc, "lpar-per-core",
+ "Use 1 LPAR per core mode");
}
-static void pnv_machine_p10_common_class_init(ObjectClass *oc, void *data)
+static void pnv_machine_p10_common_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc);
@@ -2494,22 +2811,44 @@ static void pnv_machine_p10_common_class_init(ObjectClass *oc, void *data)
pmc->compat = compat;
pmc->compat_size = sizeof(compat);
+ pmc->max_smt_threads = 4;
+ pmc->has_lpar_per_thread = true;
+ pmc->quirk_tb_big_core = true;
pmc->dt_power_mgt = pnv_dt_power_mgt;
xfc->match_nvt = pnv10_xive_match_nvt;
+ xfc->broadcast = pnv10_xive_broadcast;
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB);
}
-static void pnv_machine_power10_class_init(ObjectClass *oc, void *data)
+static void pnv_machine_power10_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
pnv_machine_p10_common_class_init(oc, data);
mc->desc = "IBM PowerNV (Non-Virtualized) POWER10";
+
+ /*
+ * This is the parent of POWER10 Rainier class, so properies go here
+ * rather than common init (which would add them to both parent and
+ * child which is invalid).
+ */
+ object_class_property_add_bool(oc, "big-core",
+ pnv_machine_get_big_core,
+ pnv_machine_set_big_core);
+ object_class_property_set_description(oc, "big-core",
+ "Use big-core (aka fused-core) mode");
+
+ object_class_property_add_bool(oc, "lpar-per-core",
+ pnv_machine_get_lpar_per_core,
+ pnv_machine_set_lpar_per_core);
+ object_class_property_set_description(oc, "lpar-per-core",
+ "Use 1 LPAR per core mode");
}
-static void pnv_machine_p10_rainier_class_init(ObjectClass *oc, void *data)
+static void pnv_machine_p10_rainier_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc);
@@ -2519,22 +2858,6 @@ static void pnv_machine_p10_rainier_class_init(ObjectClass *oc, void *data)
pmc->i2c_init = pnv_rainier_i2c_init;
}
-static bool pnv_machine_get_hb(Object *obj, Error **errp)
-{
- PnvMachineState *pnv = PNV_MACHINE(obj);
-
- return !!pnv->fw_load_addr;
-}
-
-static void pnv_machine_set_hb(Object *obj, bool value, Error **errp)
-{
- PnvMachineState *pnv = PNV_MACHINE(obj);
-
- if (value) {
- pnv->fw_load_addr = 0x8000000;
- }
-}
-
static void pnv_cpu_do_nmi_on_cpu(CPUState *cs, run_on_cpu_data arg)
{
CPUPPCState *env = cpu_env(cs);
@@ -2561,11 +2884,23 @@ static void pnv_cpu_do_nmi_on_cpu(CPUState *cs, run_on_cpu_data arg)
*/
env->spr[SPR_SRR1] |= SRR1_WAKESCOM;
}
+ if (arg.host_int == 1) {
+ cpu_resume(cs);
+ }
+}
+
+/*
+ * Send a SRESET (NMI) interrupt to the CPU, and resume execution if it was
+ * paused.
+ */
+void pnv_cpu_do_nmi_resume(CPUState *cs)
+{
+ async_run_on_cpu(cs, pnv_cpu_do_nmi_on_cpu, RUN_ON_CPU_HOST_INT(1));
}
static void pnv_cpu_do_nmi(PnvChip *chip, PowerPCCPU *cpu, void *opaque)
{
- async_run_on_cpu(CPU(cpu), pnv_cpu_do_nmi_on_cpu, RUN_ON_CPU_NULL);
+ async_run_on_cpu(CPU(cpu), pnv_cpu_do_nmi_on_cpu, RUN_ON_CPU_HOST_INT(0));
}
static void pnv_nmi(NMIState *n, int cpu_index, Error **errp)
@@ -2578,7 +2913,7 @@ static void pnv_nmi(NMIState *n, int cpu_index, Error **errp)
}
}
-static void pnv_machine_class_init(ObjectClass *oc, void *data)
+static void pnv_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc);
@@ -2638,7 +2973,7 @@ static const TypeInfo types[] = {
.name = MACHINE_TYPE_NAME("powernv10"),
.parent = TYPE_PNV_MACHINE,
.class_init = pnv_machine_power10_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_XIVE_FABRIC },
{ },
},
@@ -2647,7 +2982,7 @@ static const TypeInfo types[] = {
.name = MACHINE_TYPE_NAME("powernv9"),
.parent = TYPE_PNV_MACHINE,
.class_init = pnv_machine_power9_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_XIVE_FABRIC },
{ },
},
@@ -2656,7 +2991,7 @@ static const TypeInfo types[] = {
.name = MACHINE_TYPE_NAME("powernv8"),
.parent = TYPE_PNV_MACHINE,
.class_init = pnv_machine_power8_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_XICS_FABRIC },
{ },
},
@@ -2668,7 +3003,7 @@ static const TypeInfo types[] = {
.instance_size = sizeof(PnvMachineState),
.class_init = pnv_machine_class_init,
.class_size = sizeof(PnvMachineClass),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_INTERRUPT_STATS_PROVIDER },
{ TYPE_NMI },
{ },
diff --git a/hw/ppc/pnv_adu.c b/hw/ppc/pnv_adu.c
new file mode 100644
index 0000000..005fbda
--- /dev/null
+++ b/hw/ppc/pnv_adu.c
@@ -0,0 +1,217 @@
+/*
+ * QEMU PowerPC PowerNV ADU unit
+ *
+ * The ADU unit actually implements XSCOM, which is the bridge between MMIO
+ * and PIB. However it also includes control and status registers and other
+ * functions that are exposed as PIB (xscom) registers.
+ *
+ * To keep things simple, pnv_xscom.c remains the XSCOM bridge
+ * implementation, and pnv_adu.c implements the ADU registers and other
+ * functions.
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+
+#include "hw/qdev-properties.h"
+#include "hw/ppc/pnv.h"
+#include "hw/ppc/pnv_adu.h"
+#include "hw/ppc/pnv_chip.h"
+#include "hw/ppc/pnv_lpc.h"
+#include "hw/ppc/pnv_xscom.h"
+#include "trace.h"
+
+#define ADU_LPC_BASE_REG 0x40
+#define ADU_LPC_CMD_REG 0x41
+#define ADU_LPC_DATA_REG 0x42
+#define ADU_LPC_STATUS_REG 0x43
+
+static uint64_t pnv_adu_xscom_read(void *opaque, hwaddr addr, unsigned width)
+{
+ PnvADU *adu = PNV_ADU(opaque);
+ uint32_t offset = addr >> 3;
+ uint64_t val = 0;
+
+ switch (offset) {
+ case 0x18: /* Receive status reg */
+ case 0x12: /* log register */
+ case 0x13: /* error register */
+ break;
+ case ADU_LPC_BASE_REG:
+ /*
+ * LPC Address Map in Pervasive ADU Workbook
+ *
+ * return PNV10_LPCM_BASE(chip) & PPC_BITMASK(8, 31);
+ * XXX: implement as class property, or get from LPC?
+ */
+ qemu_log_mask(LOG_UNIMP, "ADU: LPC_BASE_REG is not implemented\n");
+ break;
+ case ADU_LPC_CMD_REG:
+ val = adu->lpc_cmd_reg;
+ break;
+ case ADU_LPC_DATA_REG:
+ val = adu->lpc_data_reg;
+ break;
+ case ADU_LPC_STATUS_REG:
+ val = PPC_BIT(0); /* ack / done */
+ break;
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "ADU Unimplemented read register: Ox%08x\n",
+ offset);
+ }
+
+ trace_pnv_adu_xscom_read(addr, val);
+
+ return val;
+}
+
+static bool lpc_cmd_read(PnvADU *adu)
+{
+ return !!(adu->lpc_cmd_reg & PPC_BIT(0));
+}
+
+static bool lpc_cmd_write(PnvADU *adu)
+{
+ return !lpc_cmd_read(adu);
+}
+
+static uint32_t lpc_cmd_addr(PnvADU *adu)
+{
+ return (adu->lpc_cmd_reg & PPC_BITMASK(32, 63)) >> PPC_BIT_NR(63);
+}
+
+static uint32_t lpc_cmd_size(PnvADU *adu)
+{
+ return (adu->lpc_cmd_reg & PPC_BITMASK(5, 11)) >> PPC_BIT_NR(11);
+}
+
+static void pnv_adu_xscom_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ PnvADU *adu = PNV_ADU(opaque);
+ uint32_t offset = addr >> 3;
+
+ trace_pnv_adu_xscom_write(addr, val);
+
+ switch (offset) {
+ case 0x18: /* Receive status reg */
+ case 0x12: /* log register */
+ case 0x13: /* error register */
+ break;
+
+ case ADU_LPC_BASE_REG:
+ qemu_log_mask(LOG_UNIMP,
+ "ADU: Changing LPC_BASE_REG is not implemented\n");
+ break;
+
+ case ADU_LPC_CMD_REG:
+ adu->lpc_cmd_reg = val;
+ if (lpc_cmd_read(adu)) {
+ uint32_t lpc_addr = lpc_cmd_addr(adu);
+ uint32_t lpc_size = lpc_cmd_size(adu);
+ uint64_t data = 0;
+
+ if (!is_power_of_2(lpc_size) || lpc_size > sizeof(data)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "ADU: Unsupported LPC access "
+ "size:%" PRId32 "\n", lpc_size);
+ break;
+ }
+
+ pnv_lpc_opb_read(adu->lpc, lpc_addr, (void *)&data, lpc_size);
+
+ /*
+ * ADU access is performed within 8-byte aligned sectors. Smaller
+ * access sizes don't get formatted to the least significant byte,
+ * but rather appear in the data reg at the same offset as the
+ * address in memory. This shifts them into that position.
+ */
+ adu->lpc_data_reg = be64_to_cpu(data) >> ((lpc_addr & 7) * 8);
+ }
+ break;
+
+ case ADU_LPC_DATA_REG:
+ adu->lpc_data_reg = val;
+ if (lpc_cmd_write(adu)) {
+ uint32_t lpc_addr = lpc_cmd_addr(adu);
+ uint32_t lpc_size = lpc_cmd_size(adu);
+ uint64_t data;
+
+ if (!is_power_of_2(lpc_size) || lpc_size > sizeof(data)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "ADU: Unsupported LPC access "
+ "size:%" PRId32 "\n", lpc_size);
+ break;
+ }
+
+ data = cpu_to_be64(val) >> ((lpc_addr & 7) * 8); /* See above */
+ pnv_lpc_opb_write(adu->lpc, lpc_addr, (void *)&data, lpc_size);
+ }
+ break;
+
+ case ADU_LPC_STATUS_REG:
+ qemu_log_mask(LOG_UNIMP,
+ "ADU: Changing LPC_STATUS_REG is not implemented\n");
+ break;
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "ADU Unimplemented write register: Ox%08x\n",
+ offset);
+ }
+}
+
+const MemoryRegionOps pnv_adu_xscom_ops = {
+ .read = pnv_adu_xscom_read,
+ .write = pnv_adu_xscom_write,
+ .valid.min_access_size = 8,
+ .valid.max_access_size = 8,
+ .impl.min_access_size = 8,
+ .impl.max_access_size = 8,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static void pnv_adu_realize(DeviceState *dev, Error **errp)
+{
+ PnvADU *adu = PNV_ADU(dev);
+
+ assert(adu->lpc);
+
+ /* XScom regions for ADU registers */
+ pnv_xscom_region_init(&adu->xscom_regs, OBJECT(dev),
+ &pnv_adu_xscom_ops, adu, "xscom-adu",
+ PNV9_XSCOM_ADU_SIZE);
+}
+
+static const Property pnv_adu_properties[] = {
+ DEFINE_PROP_LINK("lpc", PnvADU, lpc, TYPE_PNV_LPC, PnvLpcController *),
+};
+
+static void pnv_adu_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = pnv_adu_realize;
+ dc->desc = "PowerNV ADU";
+ device_class_set_props(dc, pnv_adu_properties);
+ dc->user_creatable = false;
+}
+
+static const TypeInfo pnv_adu_type_info = {
+ .name = TYPE_PNV_ADU,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(PnvADU),
+ .class_init = pnv_adu_class_init,
+ .interfaces = (const InterfaceInfo[]) {
+ { TYPE_PNV_XSCOM_INTERFACE },
+ { } },
+};
+
+static void pnv_adu_register_types(void)
+{
+ type_register_static(&pnv_adu_type_info);
+}
+
+type_init(pnv_adu_register_types);
diff --git a/hw/ppc/pnv_bmc.c b/hw/ppc/pnv_bmc.c
index 0c1274d..fb70a8c 100644
--- a/hw/ppc/pnv_bmc.c
+++ b/hw/ppc/pnv_bmc.c
@@ -174,8 +174,8 @@ static void hiomap_cmd(IPMIBmcSim *ibs, uint8_t *cmd, unsigned int cmd_len,
{
PnvPnor *pnor = PNV_PNOR(object_property_get_link(OBJECT(ibs), "pnor",
&error_abort));
+ uint32_t pnor_addr = pnor->lpc_address;
uint32_t pnor_size = pnor->size;
- uint32_t pnor_addr = PNOR_SPI_OFFSET;
bool readonly = false;
rsp_buffer_push(rsp, cmd[2]);
@@ -251,10 +251,38 @@ static const IPMINetfn hiomap_netfn = {
void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor)
{
+ uint32_t pnor_addr = pnor->lpc_address;
+ uint32_t pnor_size = pnor->size;
+
if (!pnv_bmc_is_simulator(bmc)) {
return;
}
+ /*
+ * The HIOMAP protocol uses block units and 16-bit addressing.
+ * Prevent overflow or misalign.
+ */
+ if (pnor_addr >= 1U << (BLOCK_SHIFT + 16)) {
+ warn_report("PNOR address is larger than 2^%d, disabling PNOR",
+ BLOCK_SHIFT + 16);
+ return;
+ }
+ if (pnor_addr & ((1U << BLOCK_SHIFT) - 1)) {
+ warn_report("PNOR address is not aligned to 2^%d, disabling PNOR",
+ BLOCK_SHIFT);
+ return;
+ }
+ if (pnor_size > 1U << (BLOCK_SHIFT + 16)) {
+ warn_report("PNOR size is larger than 2^%d, disabling PNOR",
+ BLOCK_SHIFT + 16);
+ return;
+ }
+ if (pnor_size & ((1U << BLOCK_SHIFT) - 1)) {
+ warn_report("PNOR size is not aligned to 2^%d, disabling PNOR",
+ BLOCK_SHIFT);
+ return;
+ }
+
object_ref(OBJECT(pnor));
object_property_add_const_link(OBJECT(bmc), "pnor", OBJECT(pnor));
diff --git a/hw/ppc/pnv_chiptod.c b/hw/ppc/pnv_chiptod.c
index 3831a72..b9e9c7b 100644
--- a/hw/ppc/pnv_chiptod.c
+++ b/hw/ppc/pnv_chiptod.c
@@ -23,7 +23,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "target/ppc/cpu.h"
#include "qapi/error.h"
#include "qemu/log.h"
@@ -364,8 +364,7 @@ static void pnv_chiptod_xscom_write(void *opaque, hwaddr addr,
qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: xscom write reg"
" TOD_MOVE_TOD_TO_TB_REG with no slave target\n");
} else {
- PowerPCCPU *cpu = chiptod->slave_pc_target->threads[0];
- CPUPPCState *env = &cpu->env;
+ PnvCore *pc = chiptod->slave_pc_target;
/*
* Moving TOD to TB will set the TB of all threads in a
@@ -377,8 +376,8 @@ static void pnv_chiptod_xscom_write(void *opaque, hwaddr addr,
* thread 0.
*/
- if (env->pnv_tod_tbst.tb_ready_for_tod) {
- env->pnv_tod_tbst.tod_sent_to_tb = 1;
+ if (pc->tod_state.tb_ready_for_tod) {
+ pc->tod_state.tod_sent_to_tb = 1;
} else {
qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: xscom write reg"
" TOD_MOVE_TOD_TO_TB_REG with TB not ready to"
@@ -451,14 +450,13 @@ static int pnv_chiptod_power9_dt_xscom(PnvXScomInterface *dev, void *fdt,
return pnv_chiptod_dt_xscom(dev, fdt, xscom_offset, compat, sizeof(compat));
}
-static Property pnv_chiptod_properties[] = {
+static const Property pnv_chiptod_properties[] = {
DEFINE_PROP_BOOL("primary", PnvChipTOD, primary, false),
DEFINE_PROP_BOOL("secondary", PnvChipTOD, secondary, false),
DEFINE_PROP_LINK("chip", PnvChipTOD , chip, TYPE_PNV_CHIP, PnvChip *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pnv_chiptod_power9_class_init(ObjectClass *klass, void *data)
+static void pnv_chiptod_power9_class_init(ObjectClass *klass, const void *data)
{
PnvChipTODClass *pctc = PNV_CHIPTOD_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -480,7 +478,7 @@ static const TypeInfo pnv_chiptod_power9_type_info = {
.parent = TYPE_PNV_CHIPTOD,
.instance_size = sizeof(PnvChipTOD),
.class_init = pnv_chiptod_power9_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
@@ -494,7 +492,7 @@ static int pnv_chiptod_power10_dt_xscom(PnvXScomInterface *dev, void *fdt,
return pnv_chiptod_dt_xscom(dev, fdt, xscom_offset, compat, sizeof(compat));
}
-static void pnv_chiptod_power10_class_init(ObjectClass *klass, void *data)
+static void pnv_chiptod_power10_class_init(ObjectClass *klass, const void *data)
{
PnvChipTODClass *pctc = PNV_CHIPTOD_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -516,7 +514,7 @@ static const TypeInfo pnv_chiptod_power10_type_info = {
.parent = TYPE_PNV_CHIPTOD,
.instance_size = sizeof(PnvChipTOD),
.class_init = pnv_chiptod_power10_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
@@ -557,7 +555,7 @@ static void pnv_chiptod_unrealize(DeviceState *dev)
qemu_unregister_reset(pnv_chiptod_reset, chiptod);
}
-static void pnv_chiptod_class_init(ObjectClass *klass, void *data)
+static void pnv_chiptod_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c
index f40ab72..08c2022 100644
--- a/hw/ppc/pnv_core.c
+++ b/hw/ppc/pnv_core.c
@@ -18,7 +18,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "qapi/error.h"
#include "qemu/log.h"
#include "qemu/module.h"
@@ -58,6 +58,10 @@ static void pnv_core_cpu_reset(PnvCore *pc, PowerPCCPU *cpu)
env->nip = 0x10;
env->msr |= MSR_HVB; /* Hypervisor mode */
env->spr[SPR_HRMOR] = pc->hrmor;
+ if (pc->big_core) {
+ /* Clear "small core" bit on Power9/10 (this is set in default PVR) */
+ env->spr[SPR_PVR] &= ~PPC_BIT(51);
+ }
hreg_compute_hflags(env);
ppc_maybe_interrupt(env);
@@ -181,16 +185,43 @@ static const MemoryRegionOps pnv_core_power9_xscom_ops = {
*/
#define PNV10_XSCOM_EC_CORE_THREAD_STATE 0x412
+#define PNV10_XSCOM_EC_CORE_THREAD_INFO 0x413
+#define PNV10_XSCOM_EC_CORE_DIRECT_CONTROLS 0x449
+#define PNV10_XSCOM_EC_CORE_RAS_STATUS 0x454
static uint64_t pnv_core_power10_xscom_read(void *opaque, hwaddr addr,
unsigned int width)
{
+ PnvCore *pc = PNV_CORE(opaque);
+ int nr_threads = CPU_CORE(pc)->nr_threads;
+ int i;
uint32_t offset = addr >> 3;
uint64_t val = 0;
switch (offset) {
case PNV10_XSCOM_EC_CORE_THREAD_STATE:
- val = 0;
+ for (i = 0; i < nr_threads; i++) {
+ PowerPCCPU *cpu = pc->threads[i];
+ CPUState *cs = CPU(cpu);
+
+ if (cs->halted) {
+ val |= PPC_BIT(56 + i);
+ }
+ }
+ if (pc->lpar_per_core) {
+ val |= PPC_BIT(62);
+ }
+ break;
+ case PNV10_XSCOM_EC_CORE_THREAD_INFO:
+ break;
+ case PNV10_XSCOM_EC_CORE_RAS_STATUS:
+ for (i = 0; i < nr_threads; i++) {
+ PowerPCCPU *cpu = pc->threads[i];
+ CPUPPCState *env = &cpu->env;
+ if (env->quiesced) {
+ val |= PPC_BIT(0 + 8 * i) | PPC_BIT(1 + 8 * i);
+ }
+ }
break;
default:
qemu_log_mask(LOG_UNIMP, "%s: unimp read 0x%08x\n", __func__,
@@ -203,9 +234,55 @@ static uint64_t pnv_core_power10_xscom_read(void *opaque, hwaddr addr,
static void pnv_core_power10_xscom_write(void *opaque, hwaddr addr,
uint64_t val, unsigned int width)
{
+ PnvCore *pc = PNV_CORE(opaque);
+ int nr_threads = CPU_CORE(pc)->nr_threads;
+ int i;
uint32_t offset = addr >> 3;
switch (offset) {
+ case PNV10_XSCOM_EC_CORE_DIRECT_CONTROLS:
+ for (i = 0; i < nr_threads; i++) {
+ PowerPCCPU *cpu = pc->threads[i];
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ if (val & PPC_BIT(7 + 8 * i)) { /* stop */
+ val &= ~PPC_BIT(7 + 8 * i);
+ env->quiesced = true;
+ ppc_maybe_interrupt(env);
+ cpu_pause(cs);
+ }
+ if (val & PPC_BIT(6 + 8 * i)) { /* start */
+ val &= ~PPC_BIT(6 + 8 * i);
+ env->quiesced = false;
+ ppc_maybe_interrupt(env);
+ cpu_resume(cs);
+ }
+ if (val & PPC_BIT(4 + 8 * i)) { /* sreset */
+ val &= ~PPC_BIT(4 + 8 * i);
+ env->quiesced = false;
+ ppc_maybe_interrupt(env);
+ pnv_cpu_do_nmi_resume(cs);
+ }
+ if (val & PPC_BIT(3 + 8 * i)) { /* clear maint */
+ env->quiesced = false;
+ ppc_maybe_interrupt(env);
+ /*
+ * Hardware has very particular cases for where clear maint
+ * must be used and where start must be used to resume a
+ * thread. These are not modelled exactly, just treat
+ * this and start the same.
+ */
+ val &= ~PPC_BIT(3 + 8 * i);
+ cpu_resume(cs);
+ }
+ }
+ if (val) {
+ qemu_log_mask(LOG_UNIMP, "%s: unimp bits in DIRECT_CONTROLS "
+ "0x%016" PRIx64 "\n", __func__, val);
+ }
+ break;
+
default:
qemu_log_mask(LOG_UNIMP, "%s: unimp write 0x%08x\n", __func__,
offset);
@@ -227,8 +304,9 @@ static void pnv_core_cpu_realize(PnvCore *pc, PowerPCCPU *cpu, Error **errp,
{
CPUPPCState *env = &cpu->env;
int core_hwid;
- ppc_spr_t *pir = &env->spr_cb[SPR_PIR];
- ppc_spr_t *tir = &env->spr_cb[SPR_TIR];
+ ppc_spr_t *pir_spr = &env->spr_cb[SPR_PIR];
+ ppc_spr_t *tir_spr = &env->spr_cb[SPR_TIR];
+ uint32_t pir, tir;
Error *local_err = NULL;
PnvChipClass *pcc = PNV_CHIP_GET_CLASS(pc->chip);
@@ -244,8 +322,22 @@ static void pnv_core_cpu_realize(PnvCore *pc, PowerPCCPU *cpu, Error **errp,
core_hwid = object_property_get_uint(OBJECT(pc), "hwid", &error_abort);
- tir->default_value = thread_index;
- pir->default_value = pcc->chip_pir(pc->chip, core_hwid, thread_index);
+ pcc->get_pir_tir(pc->chip, core_hwid, thread_index, &pir, &tir);
+ pir_spr->default_value = pir;
+ tir_spr->default_value = tir;
+
+ env->chip_index = pc->chip->chip_id;
+
+ if (pc->big_core) {
+ /* 2 "small cores" get the same core index for SMT operations */
+ env->core_index = core_hwid >> 1;
+ } else {
+ env->core_index = core_hwid;
+ }
+
+ if (pc->lpar_per_core) {
+ cpu_ppc_set_1lpar(cpu);
+ }
/* Set time-base frequency to 512 MHz */
cpu_ppc_tb_init(env, PNV_TIMEBASE_FREQ);
@@ -278,16 +370,22 @@ static void pnv_core_realize(DeviceState *dev, Error **errp)
pc->threads = g_new(PowerPCCPU *, cc->nr_threads);
for (i = 0; i < cc->nr_threads; i++) {
PowerPCCPU *cpu;
+ PnvCPUState *pnv_cpu;
obj = object_new(typename);
cpu = POWERPC_CPU(obj);
pc->threads[i] = POWERPC_CPU(obj);
+ if (cc->nr_threads > 1) {
+ cpu->env.has_smt_siblings = true;
+ }
snprintf(name, sizeof(name), "thread[%d]", i);
object_property_add_child(OBJECT(pc), name, obj);
cpu->machine_data = g_new0(PnvCPUState, 1);
+ pnv_cpu = pnv_cpu_state(cpu);
+ pnv_cpu->pnv_core = pc;
object_unref(obj);
}
@@ -341,14 +439,17 @@ static void pnv_core_unrealize(DeviceState *dev)
g_free(pc->threads);
}
-static Property pnv_core_properties[] = {
+static const Property pnv_core_properties[] = {
DEFINE_PROP_UINT32("hwid", PnvCore, hwid, 0),
DEFINE_PROP_UINT64("hrmor", PnvCore, hrmor, 0),
+ DEFINE_PROP_BOOL("big-core", PnvCore, big_core, false),
+ DEFINE_PROP_BOOL("quirk-tb-big-core", PnvCore, tod_state.big_core_quirk,
+ false),
+ DEFINE_PROP_BOOL("lpar-per-core", PnvCore, lpar_per_core, false),
DEFINE_PROP_LINK("chip", PnvCore, chip, TYPE_PNV_CHIP, PnvChip *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pnv_core_power8_class_init(ObjectClass *oc, void *data)
+static void pnv_core_power8_class_init(ObjectClass *oc, const void *data)
{
PnvCoreClass *pcc = PNV_CORE_CLASS(oc);
@@ -356,7 +457,7 @@ static void pnv_core_power8_class_init(ObjectClass *oc, void *data)
pcc->xscom_size = PNV_XSCOM_EX_SIZE;
}
-static void pnv_core_power9_class_init(ObjectClass *oc, void *data)
+static void pnv_core_power9_class_init(ObjectClass *oc, const void *data)
{
PnvCoreClass *pcc = PNV_CORE_CLASS(oc);
@@ -364,7 +465,7 @@ static void pnv_core_power9_class_init(ObjectClass *oc, void *data)
pcc->xscom_size = PNV_XSCOM_EX_SIZE;
}
-static void pnv_core_power10_class_init(ObjectClass *oc, void *data)
+static void pnv_core_power10_class_init(ObjectClass *oc, const void *data)
{
PnvCoreClass *pcc = PNV_CORE_CLASS(oc);
@@ -372,7 +473,7 @@ static void pnv_core_power10_class_init(ObjectClass *oc, void *data)
pcc->xscom_size = PNV10_XSCOM_EC_SIZE;
}
-static void pnv_core_class_init(ObjectClass *oc, void *data)
+static void pnv_core_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -504,6 +605,7 @@ static const MemoryRegionOps pnv_quad_power10_xscom_ops = {
static uint64_t pnv_qme_power10_xscom_read(void *opaque, hwaddr addr,
unsigned int width)
{
+ PnvQuad *eq = PNV_QUAD(opaque);
uint32_t offset = addr >> 3;
uint64_t val = -1;
@@ -511,10 +613,14 @@ static uint64_t pnv_qme_power10_xscom_read(void *opaque, hwaddr addr,
* Forth nibble selects the core within a quad, mask it to process read
* for any core.
*/
- switch (offset & ~0xf000) {
- case P10_QME_SPWU_HYP:
+ switch (offset & ~PPC_BITMASK32(16, 19)) {
case P10_QME_SSH_HYP:
- return 0;
+ val = 0;
+ if (eq->special_wakeup_done) {
+ val |= PPC_BIT(1); /* SPWU DONE */
+ val |= PPC_BIT(4); /* SSH SPWU DONE */
+ }
+ break;
default:
qemu_log_mask(LOG_UNIMP, "%s: unimp read 0x%08x\n", __func__,
offset);
@@ -526,9 +632,22 @@ static uint64_t pnv_qme_power10_xscom_read(void *opaque, hwaddr addr,
static void pnv_qme_power10_xscom_write(void *opaque, hwaddr addr,
uint64_t val, unsigned int width)
{
+ PnvQuad *eq = PNV_QUAD(opaque);
uint32_t offset = addr >> 3;
+ bool set;
+ int i;
- switch (offset) {
+ switch (offset & ~PPC_BITMASK32(16, 19)) {
+ case P10_QME_SPWU_HYP:
+ set = !!(val & PPC_BIT(0));
+ eq->special_wakeup_done = set;
+ for (i = 0; i < 4; i++) {
+ /* These bits select cores in the quad */
+ if (offset & PPC_BIT32(16 + i)) {
+ eq->special_wakeup[i] = set;
+ }
+ }
+ break;
default:
qemu_log_mask(LOG_UNIMP, "%s: unimp write 0x%08x\n", __func__,
offset);
@@ -577,12 +696,11 @@ static void pnv_quad_power10_realize(DeviceState *dev, Error **errp)
pqc->xscom_qme_size);
}
-static Property pnv_quad_properties[] = {
+static const Property pnv_quad_properties[] = {
DEFINE_PROP_UINT32("quad-id", PnvQuad, quad_id, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pnv_quad_power9_class_init(ObjectClass *oc, void *data)
+static void pnv_quad_power9_class_init(ObjectClass *oc, const void *data)
{
PnvQuadClass *pqc = PNV_QUAD_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -593,7 +711,7 @@ static void pnv_quad_power9_class_init(ObjectClass *oc, void *data)
pqc->xscom_size = PNV9_XSCOM_EQ_SIZE;
}
-static void pnv_quad_power10_class_init(ObjectClass *oc, void *data)
+static void pnv_quad_power10_class_init(ObjectClass *oc, const void *data)
{
PnvQuadClass *pqc = PNV_QUAD_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -607,7 +725,7 @@ static void pnv_quad_power10_class_init(ObjectClass *oc, void *data)
pqc->xscom_qme_size = PNV10_XSCOM_QME_SIZE;
}
-static void pnv_quad_class_init(ObjectClass *oc, void *data)
+static void pnv_quad_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/ppc/pnv_homer.c b/hw/ppc/pnv_homer.c
index f9a203d..2208ffe 100644
--- a/hw/ppc/pnv_homer.c
+++ b/hw/ppc/pnv_homer.c
@@ -20,8 +20,8 @@
#include "qemu/log.h"
#include "qapi/error.h"
#include "exec/hwaddr.h"
-#include "exec/memory.h"
-#include "sysemu/cpus.h"
+#include "system/memory.h"
+#include "system/cpus.h"
#include "hw/qdev-core.h"
#include "hw/qdev-properties.h"
#include "hw/ppc/pnv.h"
@@ -29,94 +29,6 @@
#include "hw/ppc/pnv_homer.h"
#include "hw/ppc/pnv_xscom.h"
-
-static bool core_max_array(PnvHomer *homer, hwaddr addr)
-{
- int i;
- PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer);
-
- for (i = 0; i <= homer->chip->nr_cores; i++) {
- if (addr == (hmrc->core_max_base + i)) {
- return true;
- }
- }
- return false;
-}
-
-/* P8 Pstate table */
-
-#define PNV8_OCC_PSTATE_VERSION 0x1f8001
-#define PNV8_OCC_PSTATE_MIN 0x1f8003
-#define PNV8_OCC_PSTATE_VALID 0x1f8000
-#define PNV8_OCC_PSTATE_THROTTLE 0x1f8002
-#define PNV8_OCC_PSTATE_NOM 0x1f8004
-#define PNV8_OCC_PSTATE_TURBO 0x1f8005
-#define PNV8_OCC_PSTATE_ULTRA_TURBO 0x1f8006
-#define PNV8_OCC_PSTATE_DATA 0x1f8008
-#define PNV8_OCC_PSTATE_ID_ZERO 0x1f8010
-#define PNV8_OCC_PSTATE_ID_ONE 0x1f8018
-#define PNV8_OCC_PSTATE_ID_TWO 0x1f8020
-#define PNV8_OCC_VDD_VOLTAGE_IDENTIFIER 0x1f8012
-#define PNV8_OCC_VCS_VOLTAGE_IDENTIFIER 0x1f8013
-#define PNV8_OCC_PSTATE_ZERO_FREQUENCY 0x1f8014
-#define PNV8_OCC_PSTATE_ONE_FREQUENCY 0x1f801c
-#define PNV8_OCC_PSTATE_TWO_FREQUENCY 0x1f8024
-#define PNV8_CORE_MAX_BASE 0x1f8810
-
-
-static uint64_t pnv_power8_homer_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- PnvHomer *homer = PNV_HOMER(opaque);
-
- switch (addr) {
- case PNV8_OCC_PSTATE_VERSION:
- case PNV8_OCC_PSTATE_MIN:
- case PNV8_OCC_PSTATE_ID_ZERO:
- return 0;
- case PNV8_OCC_PSTATE_VALID:
- case PNV8_OCC_PSTATE_THROTTLE:
- case PNV8_OCC_PSTATE_NOM:
- case PNV8_OCC_PSTATE_TURBO:
- case PNV8_OCC_PSTATE_ID_ONE:
- case PNV8_OCC_VDD_VOLTAGE_IDENTIFIER:
- case PNV8_OCC_VCS_VOLTAGE_IDENTIFIER:
- return 1;
- case PNV8_OCC_PSTATE_ULTRA_TURBO:
- case PNV8_OCC_PSTATE_ID_TWO:
- return 2;
- case PNV8_OCC_PSTATE_DATA:
- return 0x1000000000000000;
- /* P8 frequency for 0, 1, and 2 pstates */
- case PNV8_OCC_PSTATE_ZERO_FREQUENCY:
- case PNV8_OCC_PSTATE_ONE_FREQUENCY:
- case PNV8_OCC_PSTATE_TWO_FREQUENCY:
- return 3000;
- }
- /* pstate table core max array */
- if (core_max_array(homer, addr)) {
- return 1;
- }
- return 0;
-}
-
-static void pnv_power8_homer_write(void *opaque, hwaddr addr,
- uint64_t val, unsigned size)
-{
- /* callback function defined to homer write */
- return;
-}
-
-static const MemoryRegionOps pnv_power8_homer_ops = {
- .read = pnv_power8_homer_read,
- .write = pnv_power8_homer_write,
- .valid.min_access_size = 1,
- .valid.max_access_size = 8,
- .impl.min_access_size = 1,
- .impl.max_access_size = 8,
- .endianness = DEVICE_BIG_ENDIAN,
-};
-
/* P8 PBA BARs */
#define PBA_BAR0 0x00
#define PBA_BAR1 0x01
@@ -131,16 +43,16 @@ static uint64_t pnv_homer_power8_pba_read(void *opaque, hwaddr addr,
unsigned size)
{
PnvHomer *homer = PNV_HOMER(opaque);
- PnvChip *chip = homer->chip;
+ PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer);
uint32_t reg = addr >> 3;
uint64_t val = 0;
switch (reg) {
case PBA_BAR0:
- val = PNV_HOMER_BASE(chip);
+ val = homer->base;
break;
case PBA_BARMASK0: /* P8 homer region mask */
- val = (PNV_HOMER_SIZE - 1) & 0x300000;
+ val = (hmrc->size - 1) & 0x300000;
break;
case PBA_BAR3: /* P8 occ common area */
val = PNV_OCC_COMMON_AREA_BASE;
@@ -172,15 +84,19 @@ static const MemoryRegionOps pnv_homer_power8_pba_ops = {
.endianness = DEVICE_BIG_ENDIAN,
};
-static void pnv_homer_power8_class_init(ObjectClass *klass, void *data)
+static hwaddr pnv_homer_power8_get_base(PnvChip *chip)
+{
+ return PNV_HOMER_BASE(chip);
+}
+
+static void pnv_homer_power8_class_init(ObjectClass *klass, const void *data)
{
PnvHomerClass *homer = PNV_HOMER_CLASS(klass);
+ homer->get_base = pnv_homer_power8_get_base;
+ homer->size = PNV_HOMER_SIZE;
homer->pba_size = PNV_XSCOM_PBA_SIZE;
homer->pba_ops = &pnv_homer_power8_pba_ops;
- homer->homer_size = PNV_HOMER_SIZE;
- homer->homer_ops = &pnv_power8_homer_ops;
- homer->core_max_base = PNV8_CORE_MAX_BASE;
}
static const TypeInfo pnv_homer_power8_type_info = {
@@ -190,100 +106,20 @@ static const TypeInfo pnv_homer_power8_type_info = {
.class_init = pnv_homer_power8_class_init,
};
-/* P9 Pstate table */
-
-#define PNV9_OCC_PSTATE_ID_ZERO 0xe2018
-#define PNV9_OCC_PSTATE_ID_ONE 0xe2020
-#define PNV9_OCC_PSTATE_ID_TWO 0xe2028
-#define PNV9_OCC_PSTATE_DATA 0xe2000
-#define PNV9_OCC_PSTATE_DATA_AREA 0xe2008
-#define PNV9_OCC_PSTATE_MIN 0xe2003
-#define PNV9_OCC_PSTATE_NOM 0xe2004
-#define PNV9_OCC_PSTATE_TURBO 0xe2005
-#define PNV9_OCC_PSTATE_ULTRA_TURBO 0xe2818
-#define PNV9_OCC_MAX_PSTATE_ULTRA_TURBO 0xe2006
-#define PNV9_OCC_PSTATE_MAJOR_VERSION 0xe2001
-#define PNV9_OCC_OPAL_RUNTIME_DATA 0xe2b85
-#define PNV9_CHIP_HOMER_IMAGE_POINTER 0x200008
-#define PNV9_CHIP_HOMER_BASE 0x0
-#define PNV9_OCC_PSTATE_ZERO_FREQUENCY 0xe201c
-#define PNV9_OCC_PSTATE_ONE_FREQUENCY 0xe2024
-#define PNV9_OCC_PSTATE_TWO_FREQUENCY 0xe202c
-#define PNV9_OCC_ROLE_MASTER_OR_SLAVE 0xe2002
-#define PNV9_CORE_MAX_BASE 0xe2819
-
-
-static uint64_t pnv_power9_homer_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- PnvHomer *homer = PNV_HOMER(opaque);
-
- switch (addr) {
- case PNV9_OCC_MAX_PSTATE_ULTRA_TURBO:
- case PNV9_OCC_PSTATE_ID_ZERO:
- return 0;
- case PNV9_OCC_PSTATE_DATA:
- case PNV9_OCC_ROLE_MASTER_OR_SLAVE:
- case PNV9_OCC_PSTATE_NOM:
- case PNV9_OCC_PSTATE_TURBO:
- case PNV9_OCC_PSTATE_ID_ONE:
- case PNV9_OCC_PSTATE_ULTRA_TURBO:
- case PNV9_OCC_OPAL_RUNTIME_DATA:
- return 1;
- case PNV9_OCC_PSTATE_MIN:
- case PNV9_OCC_PSTATE_ID_TWO:
- return 2;
-
- /* 3000 khz frequency for 0, 1, and 2 pstates */
- case PNV9_OCC_PSTATE_ZERO_FREQUENCY:
- case PNV9_OCC_PSTATE_ONE_FREQUENCY:
- case PNV9_OCC_PSTATE_TWO_FREQUENCY:
- return 3000;
- case PNV9_OCC_PSTATE_MAJOR_VERSION:
- return 0x90;
- case PNV9_CHIP_HOMER_BASE:
- case PNV9_OCC_PSTATE_DATA_AREA:
- case PNV9_CHIP_HOMER_IMAGE_POINTER:
- return 0x1000000000000000;
- }
- /* pstate table core max array */
- if (core_max_array(homer, addr)) {
- return 1;
- }
- return 0;
-}
-
-static void pnv_power9_homer_write(void *opaque, hwaddr addr,
- uint64_t val, unsigned size)
-{
- /* callback function defined to homer write */
- return;
-}
-
-static const MemoryRegionOps pnv_power9_homer_ops = {
- .read = pnv_power9_homer_read,
- .write = pnv_power9_homer_write,
- .valid.min_access_size = 1,
- .valid.max_access_size = 8,
- .impl.min_access_size = 1,
- .impl.max_access_size = 8,
- .endianness = DEVICE_BIG_ENDIAN,
-};
-
static uint64_t pnv_homer_power9_pba_read(void *opaque, hwaddr addr,
unsigned size)
{
PnvHomer *homer = PNV_HOMER(opaque);
- PnvChip *chip = homer->chip;
+ PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer);
uint32_t reg = addr >> 3;
uint64_t val = 0;
switch (reg) {
case PBA_BAR0:
- val = PNV9_HOMER_BASE(chip);
+ val = homer->base;
break;
case PBA_BARMASK0: /* P9 homer region mask */
- val = (PNV9_HOMER_SIZE - 1) & 0x300000;
+ val = (hmrc->size - 1) & 0x300000;
break;
case PBA_BAR2: /* P9 occ common area */
val = PNV9_OCC_COMMON_AREA_BASE;
@@ -315,15 +151,19 @@ static const MemoryRegionOps pnv_homer_power9_pba_ops = {
.endianness = DEVICE_BIG_ENDIAN,
};
-static void pnv_homer_power9_class_init(ObjectClass *klass, void *data)
+static hwaddr pnv_homer_power9_get_base(PnvChip *chip)
+{
+ return PNV9_HOMER_BASE(chip);
+}
+
+static void pnv_homer_power9_class_init(ObjectClass *klass, const void *data)
{
PnvHomerClass *homer = PNV_HOMER_CLASS(klass);
+ homer->get_base = pnv_homer_power9_get_base;
+ homer->size = PNV_HOMER_SIZE;
homer->pba_size = PNV9_XSCOM_PBA_SIZE;
homer->pba_ops = &pnv_homer_power9_pba_ops;
- homer->homer_size = PNV9_HOMER_SIZE;
- homer->homer_ops = &pnv_power9_homer_ops;
- homer->core_max_base = PNV9_CORE_MAX_BASE;
}
static const TypeInfo pnv_homer_power9_type_info = {
@@ -337,16 +177,16 @@ static uint64_t pnv_homer_power10_pba_read(void *opaque, hwaddr addr,
unsigned size)
{
PnvHomer *homer = PNV_HOMER(opaque);
- PnvChip *chip = homer->chip;
+ PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer);
uint32_t reg = addr >> 3;
uint64_t val = 0;
switch (reg) {
case PBA_BAR0:
- val = PNV10_HOMER_BASE(chip);
+ val = homer->base;
break;
case PBA_BARMASK0: /* P10 homer region mask */
- val = (PNV10_HOMER_SIZE - 1) & 0x300000;
+ val = (hmrc->size - 1) & 0x300000;
break;
case PBA_BAR2: /* P10 occ common area */
val = PNV10_OCC_COMMON_AREA_BASE;
@@ -378,15 +218,19 @@ static const MemoryRegionOps pnv_homer_power10_pba_ops = {
.endianness = DEVICE_BIG_ENDIAN,
};
-static void pnv_homer_power10_class_init(ObjectClass *klass, void *data)
+static hwaddr pnv_homer_power10_get_base(PnvChip *chip)
+{
+ return PNV10_HOMER_BASE(chip);
+}
+
+static void pnv_homer_power10_class_init(ObjectClass *klass, const void *data)
{
PnvHomerClass *homer = PNV_HOMER_CLASS(klass);
+ homer->get_base = pnv_homer_power10_get_base;
+ homer->size = PNV_HOMER_SIZE;
homer->pba_size = PNV10_XSCOM_PBA_SIZE;
homer->pba_ops = &pnv_homer_power10_pba_ops;
- homer->homer_size = PNV10_HOMER_SIZE;
- homer->homer_ops = &pnv_power9_homer_ops; /* TODO */
- homer->core_max_base = PNV9_CORE_MAX_BASE;
}
static const TypeInfo pnv_homer_power10_type_info = {
@@ -400,24 +244,29 @@ static void pnv_homer_realize(DeviceState *dev, Error **errp)
{
PnvHomer *homer = PNV_HOMER(dev);
PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer);
+ char homer_str[32];
assert(homer->chip);
pnv_xscom_region_init(&homer->pba_regs, OBJECT(dev), hmrc->pba_ops,
homer, "xscom-pba", hmrc->pba_size);
- /* homer region */
- memory_region_init_io(&homer->regs, OBJECT(dev),
- hmrc->homer_ops, homer, "homer-main-memory",
- hmrc->homer_size);
+ /* Homer RAM region */
+ homer->base = hmrc->get_base(homer->chip);
+
+ snprintf(homer_str, sizeof(homer_str), "homer-chip%d-memory",
+ homer->chip->chip_id);
+ if (!memory_region_init_ram(&homer->mem, OBJECT(homer),
+ homer_str, hmrc->size, errp)) {
+ return;
+ }
}
-static Property pnv_homer_properties[] = {
+static const Property pnv_homer_properties[] = {
DEFINE_PROP_LINK("chip", PnvHomer, chip, TYPE_PNV_CHIP, PnvChip *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pnv_homer_class_init(ObjectClass *klass, void *data)
+static void pnv_homer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/ppc/pnv_i2c.c b/hw/ppc/pnv_i2c.c
index eec5047..60de479 100644
--- a/hw/ppc/pnv_i2c.c
+++ b/hw/ppc/pnv_i2c.c
@@ -9,7 +9,7 @@
#include "qemu/osdep.h"
#include "qemu/module.h"
#include "qemu/log.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
@@ -543,14 +543,13 @@ static void pnv_i2c_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_out(DEVICE(dev), &i2c->psi_irq, 1);
}
-static Property pnv_i2c_properties[] = {
+static const Property pnv_i2c_properties[] = {
DEFINE_PROP_LINK("chip", PnvI2C, chip, TYPE_PNV_CHIP, PnvChip *),
DEFINE_PROP_UINT32("engine", PnvI2C, engine, 1),
DEFINE_PROP_UINT32("num-busses", PnvI2C, num_busses, 1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pnv_i2c_class_init(ObjectClass *klass, void *data)
+static void pnv_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvXScomInterfaceClass *xscomc = PNV_XSCOM_INTERFACE_CLASS(klass);
@@ -570,7 +569,7 @@ static const TypeInfo pnv_i2c_info = {
.parent = TYPE_DEVICE,
.instance_size = sizeof(PnvI2C),
.class_init = pnv_i2c_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
diff --git a/hw/ppc/pnv_lpc.c b/hw/ppc/pnv_lpc.c
index d692858..f6beba0 100644
--- a/hw/ppc/pnv_lpc.c
+++ b/hw/ppc/pnv_lpc.c
@@ -64,6 +64,7 @@ enum {
#define LPC_HC_IRQSER_START_4CLK 0x00000000
#define LPC_HC_IRQSER_START_6CLK 0x01000000
#define LPC_HC_IRQSER_START_8CLK 0x02000000
+#define LPC_HC_IRQSER_AUTO_CLEAR 0x00800000
#define LPC_HC_IRQMASK 0x34 /* same bit defs as LPC_HC_IRQSTAT */
#define LPC_HC_IRQSTAT 0x38
#define LPC_HC_IRQ_SERIRQ0 0x80000000 /* all bits down to ... */
@@ -84,7 +85,7 @@ enum {
#define ISA_IO_SIZE 0x00010000
#define ISA_MEM_SIZE 0x10000000
-#define ISA_FW_SIZE 0x10000000
+#define ISA_FW_SIZE 0x100000000
#define LPC_IO_OPB_ADDR 0xd0010000
#define LPC_IO_OPB_SIZE 0x00010000
#define LPC_MEM_OPB_ADDR 0xe0000000
@@ -235,16 +236,16 @@ int pnv_dt_lpc(PnvChip *chip, void *fdt, int root_offset, uint64_t lpcm_addr,
* TODO: rework to use address_space_stq() and address_space_ldq()
* instead.
*/
-static bool opb_read(PnvLpcController *lpc, uint32_t addr, uint8_t *data,
- int sz)
+bool pnv_lpc_opb_read(PnvLpcController *lpc, uint32_t addr,
+ uint8_t *data, int sz)
{
/* XXX Handle access size limits and FW read caching here */
return !address_space_read(&lpc->opb_as, addr, MEMTXATTRS_UNSPECIFIED,
data, sz);
}
-static bool opb_write(PnvLpcController *lpc, uint32_t addr, uint8_t *data,
- int sz)
+bool pnv_lpc_opb_write(PnvLpcController *lpc, uint32_t addr,
+ uint8_t *data, int sz)
{
/* XXX Handle access size limits here */
return !address_space_write(&lpc->opb_as, addr, MEMTXATTRS_UNSPECIFIED,
@@ -276,7 +277,7 @@ static void pnv_lpc_do_eccb(PnvLpcController *lpc, uint64_t cmd)
}
if (cmd & ECCB_CTL_READ) {
- success = opb_read(lpc, opb_addr, data, sz);
+ success = pnv_lpc_opb_read(lpc, opb_addr, data, sz);
if (success) {
lpc->eccb_stat_reg = ECCB_STAT_OP_DONE |
(((uint64_t)data[0]) << 24 |
@@ -293,7 +294,7 @@ static void pnv_lpc_do_eccb(PnvLpcController *lpc, uint64_t cmd)
data[2] = lpc->eccb_data_reg >> 8;
data[3] = lpc->eccb_data_reg;
- success = opb_write(lpc, opb_addr, data, sz);
+ success = pnv_lpc_opb_write(lpc, opb_addr, data, sz);
lpc->eccb_stat_reg = ECCB_STAT_OP_DONE;
}
/* XXX Which error bit (if any) to signal OPB error ? */
@@ -352,6 +353,8 @@ static const MemoryRegionOps pnv_lpc_xscom_ops = {
.endianness = DEVICE_BIG_ENDIAN,
};
+static void pnv_lpc_opb_noresponse(PnvLpcController *lpc);
+
static uint64_t pnv_lpc_mmio_read(void *opaque, hwaddr addr, unsigned size)
{
PnvLpcController *lpc = PNV_LPC(opaque);
@@ -375,6 +378,7 @@ static uint64_t pnv_lpc_mmio_read(void *opaque, hwaddr addr, unsigned size)
}
if (result != MEMTX_OK) {
+ pnv_lpc_opb_noresponse(lpc);
qemu_log_mask(LOG_GUEST_ERROR, "OPB read failed at @0x%"
HWADDR_PRIx "\n", addr);
}
@@ -405,6 +409,7 @@ static void pnv_lpc_mmio_write(void *opaque, hwaddr addr,
}
if (result != MEMTX_OK) {
+ pnv_lpc_opb_noresponse(lpc);
qemu_log_mask(LOG_GUEST_ERROR, "OPB write failed at @0x%"
HWADDR_PRIx "\n", addr);
}
@@ -420,22 +425,85 @@ static const MemoryRegionOps pnv_lpc_mmio_ops = {
.endianness = DEVICE_BIG_ENDIAN,
};
+/* Program the POWER9 LPC irq to PSI serirq routing table */
+static void pnv_lpc_eval_serirq_routes(PnvLpcController *lpc)
+{
+ int irq;
+
+ if (!lpc->psi_has_serirq) {
+ if ((lpc->opb_irq_route0 & PPC_BITMASK32(8, 13)) ||
+ (lpc->opb_irq_route1 & PPC_BITMASK32(4, 31))) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "OPB: setting serirq routing on POWER8 system, ignoring.\n");
+ }
+ return;
+ }
+
+ /*
+ * Each of the ISA irqs is routed to one of the 4 SERIRQ irqs with 2
+ * bits, split across 2 OPB registers.
+ */
+ for (irq = 0; irq <= 13; irq++) {
+ int serirq = extract32(lpc->opb_irq_route1,
+ PPC_BIT32_NR(5 + irq * 2), 2);
+ lpc->irq_to_serirq_route[irq] = serirq;
+ }
+
+ for (irq = 14; irq < ISA_NUM_IRQS; irq++) {
+ int serirq = extract32(lpc->opb_irq_route0,
+ PPC_BIT32_NR(9 + (irq - 14) * 2), 2);
+ lpc->irq_to_serirq_route[irq] = serirq;
+ }
+}
+
static void pnv_lpc_eval_irqs(PnvLpcController *lpc)
{
- bool lpc_to_opb_irq = false;
+ uint32_t active_irqs = 0;
+
+ active_irqs = lpc->lpc_hc_irqstat & lpc->lpc_hc_irqmask;
+ if (!(lpc->lpc_hc_irqser_ctrl & LPC_HC_IRQSER_EN)) {
+ active_irqs &= ~LPC_HC_IRQ_SERIRQ_ALL;
+ }
+
+ /* Reflect the interrupt */
+ if (lpc->psi_has_serirq) {
+ /*
+ * POWER9 and later have routing fields in OPB master registers that
+ * send LPC irqs to 4 output lines that raise the PSI SERIRQ irqs.
+ * These don't appear to get latched into an OPB register like the
+ * LPCHC irqs.
+ */
+ bool serirq_out[4] = { false, false, false, false };
+ int irq;
+
+ for (irq = 0; irq < ISA_NUM_IRQS; irq++) {
+ if (active_irqs & (LPC_HC_IRQ_SERIRQ0 >> irq)) {
+ serirq_out[lpc->irq_to_serirq_route[irq]] = true;
+ }
+ }
- /* Update LPC controller to OPB line */
- if (lpc->lpc_hc_irqser_ctrl & LPC_HC_IRQSER_EN) {
- uint32_t irqs;
+ qemu_set_irq(lpc->psi_irq_serirq[0], serirq_out[0]);
+ qemu_set_irq(lpc->psi_irq_serirq[1], serirq_out[1]);
+ qemu_set_irq(lpc->psi_irq_serirq[2], serirq_out[2]);
+ qemu_set_irq(lpc->psi_irq_serirq[3], serirq_out[3]);
- irqs = lpc->lpc_hc_irqstat & lpc->lpc_hc_irqmask;
- lpc_to_opb_irq = (irqs != 0);
+ /*
+ * POWER9 and later LPC controller internal irqs still go via the OPB
+ * and LPCHC PSI irqs like P8, so take the SERIRQs out and continue.
+ */
+ active_irqs &= ~LPC_HC_IRQ_SERIRQ_ALL;
}
- /* We don't honor the polarity register, it's pointless and unused
+ /*
+ * POWER8 ORs all irqs together (also with LPCHC internal interrupt
+ * sources) and outputs a single line that raises the PSI LPCHC irq
+ * which then latches an OPB IRQ status register that sends the irq
+ * to PSI.
+ *
+ * We don't honor the polarity register, it's pointless and unused
* anyway
*/
- if (lpc_to_opb_irq) {
+ if (active_irqs) {
lpc->opb_irq_input |= OPB_MASTER_IRQ_LPC;
} else {
lpc->opb_irq_input &= ~OPB_MASTER_IRQ_LPC;
@@ -444,8 +512,13 @@ static void pnv_lpc_eval_irqs(PnvLpcController *lpc)
/* Update OPB internal latch */
lpc->opb_irq_stat |= lpc->opb_irq_input & lpc->opb_irq_mask;
- /* Reflect the interrupt */
- qemu_set_irq(lpc->psi_irq, lpc->opb_irq_stat != 0);
+ qemu_set_irq(lpc->psi_irq_lpchc, lpc->opb_irq_stat != 0);
+}
+
+static void pnv_lpc_opb_noresponse(PnvLpcController *lpc)
+{
+ lpc->lpc_hc_irqstat |= LPC_HC_IRQ_SYNC_NORESP_ERR;
+ pnv_lpc_eval_irqs(lpc);
}
static uint64_t lpc_hc_read(void *opaque, hwaddr addr, unsigned size)
@@ -488,10 +561,13 @@ static void lpc_hc_write(void *opaque, hwaddr addr, uint64_t val,
switch (addr) {
case LPC_HC_FW_SEG_IDSEL:
- /* XXX Actually figure out how that works as this impact
- * memory regions/aliases
+ /*
+ * ISA FW "devices" are modeled as 16x256MB windows into a
+ * 4GB LPC FW address space.
*/
+ val &= 0xf; /* Selects device 0-15 */
lpc->lpc_hc_fw_seg_idsel = val;
+ memory_region_set_alias_offset(&lpc->opb_isa_fw, val * LPC_FW_OPB_SIZE);
break;
case LPC_HC_FW_RD_ACC_SIZE:
lpc->lpc_hc_fw_rd_acc_size = val;
@@ -505,7 +581,14 @@ static void lpc_hc_write(void *opaque, hwaddr addr, uint64_t val,
pnv_lpc_eval_irqs(lpc);
break;
case LPC_HC_IRQSTAT:
- lpc->lpc_hc_irqstat &= ~val;
+ /*
+ * This register is write-to-clear for the IRQSER (LPC device IRQ)
+ * status. However if the device has not de-asserted its interrupt
+ * that will just raise this IRQ status bit again. Model this by
+ * keeping track of the inputs and only clearing if the inputs are
+ * deasserted.
+ */
+ lpc->lpc_hc_irqstat &= ~(val & ~lpc->lpc_hc_irq_inputs);
pnv_lpc_eval_irqs(lpc);
break;
case LPC_HC_ERROR_ADDRESS:
@@ -536,10 +619,10 @@ static uint64_t opb_master_read(void *opaque, hwaddr addr, unsigned size)
uint64_t val = 0xfffffffffffffffful;
switch (addr) {
- case OPB_MASTER_LS_ROUTE0: /* TODO */
+ case OPB_MASTER_LS_ROUTE0:
val = lpc->opb_irq_route0;
break;
- case OPB_MASTER_LS_ROUTE1: /* TODO */
+ case OPB_MASTER_LS_ROUTE1:
val = lpc->opb_irq_route1;
break;
case OPB_MASTER_LS_IRQ_STAT:
@@ -568,11 +651,15 @@ static void opb_master_write(void *opaque, hwaddr addr,
PnvLpcController *lpc = opaque;
switch (addr) {
- case OPB_MASTER_LS_ROUTE0: /* TODO */
+ case OPB_MASTER_LS_ROUTE0:
lpc->opb_irq_route0 = val;
+ pnv_lpc_eval_serirq_routes(lpc);
+ pnv_lpc_eval_irqs(lpc);
break;
- case OPB_MASTER_LS_ROUTE1: /* TODO */
+ case OPB_MASTER_LS_ROUTE1:
lpc->opb_irq_route1 = val;
+ pnv_lpc_eval_serirq_routes(lpc);
+ pnv_lpc_eval_irqs(lpc);
break;
case OPB_MASTER_LS_IRQ_STAT:
lpc->opb_irq_stat &= ~val;
@@ -627,7 +714,7 @@ static void pnv_lpc_power8_realize(DeviceState *dev, Error **errp)
PNV_XSCOM_LPC_SIZE);
}
-static void pnv_lpc_power8_class_init(ObjectClass *klass, void *data)
+static void pnv_lpc_power8_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
@@ -645,7 +732,7 @@ static const TypeInfo pnv_lpc_power8_info = {
.name = TYPE_PNV8_LPC,
.parent = TYPE_PNV_LPC,
.class_init = pnv_lpc_power8_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
@@ -657,6 +744,8 @@ static void pnv_lpc_power9_realize(DeviceState *dev, Error **errp)
PnvLpcClass *plc = PNV_LPC_GET_CLASS(dev);
Error *local_err = NULL;
+ object_property_set_bool(OBJECT(lpc), "psi-serirq", true, &error_abort);
+
plc->parent_realize(dev, &local_err);
if (local_err) {
error_propagate(errp, local_err);
@@ -666,9 +755,12 @@ static void pnv_lpc_power9_realize(DeviceState *dev, Error **errp)
/* P9 uses a MMIO region */
memory_region_init_io(&lpc->xscom_regs, OBJECT(lpc), &pnv_lpc_mmio_ops,
lpc, "lpcm", PNV9_LPCM_SIZE);
+
+ /* P9 LPC routes ISA irqs to 4 PSI SERIRQ lines */
+ qdev_init_gpio_out_named(dev, lpc->psi_irq_serirq, "SERIRQ", 4);
}
-static void pnv_lpc_power9_class_init(ObjectClass *klass, void *data)
+static void pnv_lpc_power9_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvLpcClass *plc = PNV_LPC_CLASS(klass);
@@ -685,7 +777,7 @@ static const TypeInfo pnv_lpc_power9_info = {
.class_init = pnv_lpc_power9_class_init,
};
-static void pnv_lpc_power10_class_init(ObjectClass *klass, void *data)
+static void pnv_lpc_power10_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -709,9 +801,9 @@ static void pnv_lpc_realize(DeviceState *dev, Error **errp)
memory_region_init(&lpc->opb_mr, OBJECT(dev), "lpc-opb", 0x100000000ull);
address_space_init(&lpc->opb_as, &lpc->opb_mr, "lpc-opb");
- /* Create ISA IO and Mem space regions which are the root of
- * the ISA bus (ie, ISA address spaces). We don't create a
- * separate one for FW which we alias to memory.
+ /*
+ * Create ISA IO, Mem, and FW space regions which are the root of
+ * the ISA bus (ie, ISA address spaces).
*/
memory_region_init(&lpc->isa_io, OBJECT(dev), "isa-io", ISA_IO_SIZE);
memory_region_init(&lpc->isa_mem, OBJECT(dev), "isa-mem", ISA_MEM_SIZE);
@@ -744,13 +836,18 @@ static void pnv_lpc_realize(DeviceState *dev, Error **errp)
memory_region_add_subregion(&lpc->opb_mr, LPC_HC_REGS_OPB_ADDR,
&lpc->lpc_hc_regs);
- qdev_init_gpio_out(dev, &lpc->psi_irq, 1);
+ qdev_init_gpio_out_named(dev, &lpc->psi_irq_lpchc, "LPCHC", 1);
}
-static void pnv_lpc_class_init(ObjectClass *klass, void *data)
+static const Property pnv_lpc_properties[] = {
+ DEFINE_PROP_BOOL("psi-serirq", PnvLpcController, psi_has_serirq, false),
+};
+
+static void pnv_lpc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ device_class_set_props(dc, pnv_lpc_properties);
dc->realize = pnv_lpc_realize;
dc->desc = "PowerNV LPC Controller";
dc->user_creatable = false;
@@ -796,18 +893,34 @@ static void pnv_lpc_isa_irq_handler_cpld(void *opaque, int n, int level)
}
if (pnv->cpld_irqstate != old_state) {
- qemu_set_irq(lpc->psi_irq, pnv->cpld_irqstate != 0);
+ qemu_set_irq(lpc->psi_irq_lpchc, pnv->cpld_irqstate != 0);
}
}
static void pnv_lpc_isa_irq_handler(void *opaque, int n, int level)
{
PnvLpcController *lpc = PNV_LPC(opaque);
+ uint32_t irq_bit = LPC_HC_IRQ_SERIRQ0 >> n;
- /* The Naples HW latches the 1 levels, clearing is done by SW */
if (level) {
- lpc->lpc_hc_irqstat |= LPC_HC_IRQ_SERIRQ0 >> n;
+ lpc->lpc_hc_irq_inputs |= irq_bit;
+
+ /*
+ * The LPC HC in Naples and later latches LPC IRQ into a bit field in
+ * the IRQSTAT register, and that drives the PSI IRQ to the IC.
+ * Software clears this bit manually (see LPC_HC_IRQSTAT handler).
+ */
+ lpc->lpc_hc_irqstat |= irq_bit;
pnv_lpc_eval_irqs(lpc);
+ } else {
+ lpc->lpc_hc_irq_inputs &= ~irq_bit;
+
+ /* POWER9 adds an auto-clear mode that clears IRQSTAT bits on EOI */
+ if (lpc->psi_has_serirq &&
+ (lpc->lpc_hc_irqser_ctrl & LPC_HC_IRQSER_AUTO_CLEAR)) {
+ lpc->lpc_hc_irqstat &= ~irq_bit;
+ pnv_lpc_eval_irqs(lpc);
+ }
}
}
@@ -838,6 +951,7 @@ ISABus *pnv_lpc_isa_create(PnvLpcController *lpc, bool use_cpld, Error **errp)
handler = pnv_lpc_isa_irq_handler;
}
+ /* POWER has a 17th irq, QEMU only implements the 16 regular device irqs */
irqs = qemu_allocate_irqs(handler, lpc, ISA_NUM_IRQS);
isa_bus_register_input_irqs(isa_bus, irqs);
diff --git a/hw/ppc/pnv_n1_chiplet.c b/hw/ppc/pnv_n1_chiplet.c
index 03ff9fb..053f647 100644
--- a/hw/ppc/pnv_n1_chiplet.c
+++ b/hw/ppc/pnv_n1_chiplet.c
@@ -136,7 +136,7 @@ static void pnv_n1_chiplet_realize(DeviceState *dev, Error **errp)
PNV10_XSCOM_N1_PB_SCOM_ES_SIZE);
}
-static void pnv_n1_chiplet_class_init(ObjectClass *klass, void *data)
+static void pnv_n1_chiplet_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -159,7 +159,7 @@ static const TypeInfo pnv_n1_chiplet_info = {
.instance_init = pnv_n1_chiplet_instance_init,
.instance_size = sizeof(PnvN1Chiplet),
.class_init = pnv_n1_chiplet_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
diff --git a/hw/ppc/pnv_nest_pervasive.c b/hw/ppc/pnv_nest_pervasive.c
index 7747675..1b1b14f 100644
--- a/hw/ppc/pnv_nest_pervasive.c
+++ b/hw/ppc/pnv_nest_pervasive.c
@@ -177,11 +177,11 @@ static void pnv_nest_pervasive_realize(DeviceState *dev, Error **errp)
pnv_xscom_region_init(&nest_pervasive->xscom_ctrl_regs_mr,
OBJECT(nest_pervasive),
&pnv_nest_pervasive_control_xscom_ops,
- nest_pervasive, "pervasive-control",
+ nest_pervasive, "xscom-pervasive-control",
PNV10_XSCOM_CHIPLET_CTRL_REGS_SIZE);
}
-static void pnv_nest_pervasive_class_init(ObjectClass *klass, void *data)
+static void pnv_nest_pervasive_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -194,7 +194,7 @@ static const TypeInfo pnv_nest_pervasive_info = {
.parent = TYPE_DEVICE,
.instance_size = sizeof(PnvNestChipletPervasive),
.class_init = pnv_nest_pervasive_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
diff --git a/hw/ppc/pnv_occ.c b/hw/ppc/pnv_occ.c
index 48123ce..24b789c 100644
--- a/hw/ppc/pnv_occ.c
+++ b/hw/ppc/pnv_occ.c
@@ -24,40 +24,53 @@
#include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "hw/ppc/pnv.h"
+#include "hw/ppc/pnv_chip.h"
#include "hw/ppc/pnv_xscom.h"
#include "hw/ppc/pnv_occ.h"
+#define P8_HOMER_OPAL_DATA_OFFSET 0x1F8000
+#define P9_HOMER_OPAL_DATA_OFFSET 0x0E2000
+
#define OCB_OCI_OCCMISC 0x4020
#define OCB_OCI_OCCMISC_AND 0x4021
#define OCB_OCI_OCCMISC_OR 0x4022
+#define OCCMISC_PSI_IRQ PPC_BIT(0)
+#define OCCMISC_IRQ_SHMEM PPC_BIT(3)
/* OCC sensors */
-#define OCC_SENSOR_DATA_BLOCK_OFFSET 0x580000
-#define OCC_SENSOR_DATA_VALID 0x580001
-#define OCC_SENSOR_DATA_VERSION 0x580002
-#define OCC_SENSOR_DATA_READING_VERSION 0x580004
-#define OCC_SENSOR_DATA_NR_SENSORS 0x580008
-#define OCC_SENSOR_DATA_NAMES_OFFSET 0x580010
-#define OCC_SENSOR_DATA_READING_PING_OFFSET 0x580014
-#define OCC_SENSOR_DATA_READING_PONG_OFFSET 0x58000c
-#define OCC_SENSOR_DATA_NAME_LENGTH 0x58000d
-#define OCC_SENSOR_NAME_STRUCTURE_TYPE 0x580023
-#define OCC_SENSOR_LOC_CORE 0x580022
-#define OCC_SENSOR_LOC_GPU 0x580020
-#define OCC_SENSOR_TYPE_POWER 0x580003
-#define OCC_SENSOR_NAME 0x580005
-#define HWMON_SENSORS_MASK 0x58001e
-#define SLW_IMAGE_BASE 0x0
+#define OCC_SENSOR_DATA_BLOCK_OFFSET 0x0000
+#define OCC_SENSOR_DATA_VALID 0x0001
+#define OCC_SENSOR_DATA_VERSION 0x0002
+#define OCC_SENSOR_DATA_READING_VERSION 0x0004
+#define OCC_SENSOR_DATA_NR_SENSORS 0x0008
+#define OCC_SENSOR_DATA_NAMES_OFFSET 0x0010
+#define OCC_SENSOR_DATA_READING_PING_OFFSET 0x0014
+#define OCC_SENSOR_DATA_READING_PONG_OFFSET 0x000c
+#define OCC_SENSOR_DATA_NAME_LENGTH 0x000d
+#define OCC_SENSOR_NAME_STRUCTURE_TYPE 0x0023
+#define OCC_SENSOR_LOC_CORE 0x0022
+#define OCC_SENSOR_LOC_GPU 0x0020
+#define OCC_SENSOR_TYPE_POWER 0x0003
+#define OCC_SENSOR_NAME 0x0005
+#define HWMON_SENSORS_MASK 0x001e
static void pnv_occ_set_misc(PnvOCC *occ, uint64_t val)
{
- bool irq_state;
-
- val &= 0xffff000000000000ull;
+ val &= PPC_BITMASK(0, 18); /* Mask out unimplemented bits */
occ->occmisc = val;
- irq_state = !!(val >> 63);
- qemu_set_irq(occ->psi_irq, irq_state);
+
+ /*
+ * OCCMISC IRQ bit triggers the interrupt on a 0->1 edge, but not clear
+ * how that is handled in PSI so it is level-triggered here, which is not
+ * really correct (but skiboot is okay with it).
+ */
+ qemu_set_irq(occ->psi_irq, !!(val & OCCMISC_PSI_IRQ));
+}
+
+static void pnv_occ_raise_msg_irq(PnvOCC *occ)
+{
+ pnv_occ_set_misc(occ, occ->occmisc | OCCMISC_PSI_IRQ | OCCMISC_IRQ_SHMEM);
}
static uint64_t pnv_occ_power8_xscom_read(void *opaque, hwaddr addr,
@@ -129,8 +142,6 @@ static uint64_t pnv_occ_common_area_read(void *opaque, hwaddr addr,
case HWMON_SENSORS_MASK:
case OCC_SENSOR_LOC_GPU:
return 0x8e00;
- case SLW_IMAGE_BASE:
- return 0x1000000000000000;
}
return 0;
}
@@ -139,7 +150,6 @@ static void pnv_occ_common_area_write(void *opaque, hwaddr addr,
uint64_t val, unsigned width)
{
/* callback function defined to occ common area write */
- return;
}
static const MemoryRegionOps pnv_occ_power8_xscom_ops = {
@@ -162,10 +172,14 @@ const MemoryRegionOps pnv_occ_sram_ops = {
.endianness = DEVICE_BIG_ENDIAN,
};
-static void pnv_occ_power8_class_init(ObjectClass *klass, void *data)
+static void pnv_occ_power8_class_init(ObjectClass *klass, const void *data)
{
PnvOCCClass *poc = PNV_OCC_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->desc = "PowerNV OCC Controller (POWER8)";
+ poc->opal_shared_memory_offset = P8_HOMER_OPAL_DATA_OFFSET;
+ poc->opal_shared_memory_version = 0x02;
poc->xscom_size = PNV_XSCOM_OCC_SIZE;
poc->xscom_ops = &pnv_occ_power8_xscom_ops;
}
@@ -232,14 +246,17 @@ static const MemoryRegionOps pnv_occ_power9_xscom_ops = {
.endianness = DEVICE_BIG_ENDIAN,
};
-static void pnv_occ_power9_class_init(ObjectClass *klass, void *data)
+static void pnv_occ_power9_class_init(ObjectClass *klass, const void *data)
{
PnvOCCClass *poc = PNV_OCC_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "PowerNV OCC Controller (POWER9)";
+ poc->opal_shared_memory_offset = P9_HOMER_OPAL_DATA_OFFSET;
+ poc->opal_shared_memory_version = 0x90;
poc->xscom_size = PNV9_XSCOM_OCC_SIZE;
poc->xscom_ops = &pnv_occ_power9_xscom_ops;
+ assert(!dc->user_creatable);
}
static const TypeInfo pnv_occ_power9_type_info = {
@@ -249,23 +266,52 @@ static const TypeInfo pnv_occ_power9_type_info = {
.class_init = pnv_occ_power9_class_init,
};
-static void pnv_occ_power10_class_init(ObjectClass *klass, void *data)
+static void pnv_occ_power10_class_init(ObjectClass *klass, const void *data)
{
+ PnvOCCClass *poc = PNV_OCC_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "PowerNV OCC Controller (POWER10)";
+ poc->opal_shared_memory_offset = P9_HOMER_OPAL_DATA_OFFSET;
+ poc->opal_shared_memory_version = 0xA0;
+ poc->xscom_size = PNV9_XSCOM_OCC_SIZE;
+ poc->xscom_ops = &pnv_occ_power9_xscom_ops;
+ assert(!dc->user_creatable);
}
static const TypeInfo pnv_occ_power10_type_info = {
.name = TYPE_PNV10_OCC,
- .parent = TYPE_PNV9_OCC,
+ .parent = TYPE_PNV_OCC,
.class_init = pnv_occ_power10_class_init,
};
+static bool occ_init_homer_memory(PnvOCC *occ, Error **errp);
+static bool occ_model_tick(PnvOCC *occ);
+
+/* Relatively arbitrary */
+#define OCC_POLL_MS 100
+
+static void occ_state_machine_timer(void *opaque)
+{
+ PnvOCC *occ = opaque;
+ uint64_t next = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + OCC_POLL_MS;
+
+ if (occ_model_tick(occ)) {
+ timer_mod(&occ->state_machine_timer, next);
+ }
+}
+
static void pnv_occ_realize(DeviceState *dev, Error **errp)
{
PnvOCC *occ = PNV_OCC(dev);
PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ);
+ PnvHomer *homer = occ->homer;
+
+ assert(homer);
+
+ if (!occ_init_homer_memory(occ, errp)) {
+ return;
+ }
occ->occmisc = 0;
@@ -279,14 +325,22 @@ static void pnv_occ_realize(DeviceState *dev, Error **errp)
PNV_OCC_SENSOR_DATA_BLOCK_SIZE);
qdev_init_gpio_out(dev, &occ->psi_irq, 1);
+
+ timer_init_ms(&occ->state_machine_timer, QEMU_CLOCK_VIRTUAL,
+ occ_state_machine_timer, occ);
+ timer_mod(&occ->state_machine_timer, OCC_POLL_MS);
}
-static void pnv_occ_class_init(ObjectClass *klass, void *data)
+static const Property pnv_occ_properties[] = {
+ DEFINE_PROP_LINK("homer", PnvOCC, homer, TYPE_PNV_HOMER, PnvHomer *),
+};
+
+static void pnv_occ_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = pnv_occ_realize;
- dc->desc = "PowerNV OCC Controller";
+ device_class_set_props(dc, pnv_occ_properties);
dc->user_creatable = false;
}
@@ -308,3 +362,561 @@ static void pnv_occ_register_types(void)
}
type_init(pnv_occ_register_types);
+
+/*
+ * From skiboot/hw/occ.c with following changes:
+ * - tab to space conversion
+ * - Type conversions u8->uint8_t s8->int8_t __be16->uint16_t etc
+ * - __packed -> QEMU_PACKED
+ */
+/* OCC Communication Area for PStates */
+
+#define OPAL_DYNAMIC_DATA_OFFSET 0x0B80
+/* relative to HOMER_OPAL_DATA_OFFSET */
+
+#define MAX_PSTATES 256
+#define MAX_P8_CORES 12
+#define MAX_P9_CORES 24
+#define MAX_P10_CORES 32
+
+#define MAX_OPAL_CMD_DATA_LENGTH 4090
+#define MAX_OCC_RSP_DATA_LENGTH 8698
+
+#define P8_PIR_CORE_MASK 0xFFF8
+#define P9_PIR_QUAD_MASK 0xFFF0
+#define P10_PIR_CHIP_MASK 0x0000
+#define FREQ_MAX_IN_DOMAIN 0
+#define FREQ_MOST_RECENTLY_SET 1
+
+/**
+ * OCC-OPAL Shared Memory Region
+ *
+ * Reference document :
+ * https://github.com/open-power/docs/blob/master/occ/OCC_OpenPwr_FW_Interfaces.pdf
+ *
+ * Supported layout versions:
+ * - 0x01, 0x02 : P8
+ * https://github.com/open-power/occ/blob/master_p8/src/occ/proc/proc_pstate.h
+ *
+ * - 0x90 : P9
+ * https://github.com/open-power/occ/blob/master/src/occ_405/proc/proc_pstate.h
+ * In 0x90 the data is separated into :-
+ * -- Static Data (struct occ_pstate_table): Data is written once by OCC
+ * -- Dynamic Data (struct occ_dynamic_data): Data is updated at runtime
+ *
+ * struct occ_pstate_table - Pstate table layout
+ * @valid: Indicates if data is valid
+ * @version: Layout version [Major/Minor]
+ * @v2.throttle: Reason for limiting the max pstate
+ * @v9.occ_role: OCC role (Master/Slave)
+ * @v#.pstate_min: Minimum pstate ever allowed
+ * @v#.pstate_nom: Nominal pstate
+ * @v#.pstate_turbo: Maximum turbo pstate
+ * @v#.pstate_ultra_turbo: Maximum ultra turbo pstate and the maximum
+ * pstate ever allowed
+ * @v#.pstates: Pstate-id and frequency list from Pmax to Pmin
+ * @v#.pstates.id: Pstate-id
+ * @v#.pstates.flags: Pstate-flag(reserved)
+ * @v2.pstates.vdd: Voltage Identifier
+ * @v2.pstates.vcs: Voltage Identifier
+ * @v#.pstates.freq_khz: Frequency in KHz
+ * @v#.core_max[1..N]: Max pstate with N active cores
+ * @spare/reserved/pad: Unused data
+ */
+struct occ_pstate_table {
+ uint8_t valid;
+ uint8_t version;
+ union QEMU_PACKED {
+ struct QEMU_PACKED { /* Version 0x01 and 0x02 */
+ uint8_t throttle;
+ int8_t pstate_min;
+ int8_t pstate_nom;
+ int8_t pstate_turbo;
+ int8_t pstate_ultra_turbo;
+ uint8_t spare;
+ uint64_t reserved;
+ struct QEMU_PACKED {
+ int8_t id;
+ uint8_t flags;
+ uint8_t vdd;
+ uint8_t vcs;
+ uint32_t freq_khz;
+ } pstates[MAX_PSTATES];
+ int8_t core_max[MAX_P8_CORES];
+ uint8_t pad[100];
+ } v2;
+ struct QEMU_PACKED { /* Version 0x90 */
+ uint8_t occ_role;
+ uint8_t pstate_min;
+ uint8_t pstate_nom;
+ uint8_t pstate_turbo;
+ uint8_t pstate_ultra_turbo;
+ uint8_t spare;
+ uint64_t reserved1;
+ uint64_t reserved2;
+ struct QEMU_PACKED {
+ uint8_t id;
+ uint8_t flags;
+ uint16_t reserved;
+ uint32_t freq_khz;
+ } pstates[MAX_PSTATES];
+ uint8_t core_max[MAX_P9_CORES];
+ uint8_t pad[56];
+ } v9;
+ struct QEMU_PACKED { /* Version 0xA0 */
+ uint8_t occ_role;
+ uint8_t pstate_min;
+ uint8_t pstate_fixed_freq;
+ uint8_t pstate_base;
+ uint8_t pstate_ultra_turbo;
+ uint8_t pstate_fmax;
+ uint8_t minor;
+ uint8_t pstate_bottom_throttle;
+ uint8_t spare;
+ uint8_t spare1;
+ uint32_t reserved_32;
+ uint64_t reserved_64;
+ struct QEMU_PACKED {
+ uint8_t id;
+ uint8_t valid;
+ uint16_t reserved;
+ uint32_t freq_khz;
+ } pstates[MAX_PSTATES];
+ uint8_t core_max[MAX_P10_CORES];
+ uint8_t pad[48];
+ } v10;
+ };
+} QEMU_PACKED;
+
+/**
+ * OPAL-OCC Command Response Interface
+ *
+ * OPAL-OCC Command Buffer
+ *
+ * ---------------------------------------------------------------------
+ * | OPAL | Cmd | OPAL | | Cmd Data | Cmd Data | OPAL |
+ * | Cmd | Request | OCC | Reserved | Length | Length | Cmd |
+ * | Flags | ID | Cmd | | (MSB) | (LSB) | Data... |
+ * ---------------------------------------------------------------------
+ * | ….OPAL Command Data up to max of Cmd Data Length 4090 bytes |
+ * | |
+ * ---------------------------------------------------------------------
+ *
+ * OPAL Command Flag
+ *
+ * -----------------------------------------------------------------
+ * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
+ * | (msb) | | | | | | | (lsb) |
+ * -----------------------------------------------------------------
+ * |Cmd | | | | | | | |
+ * |Ready | | | | | | | |
+ * -----------------------------------------------------------------
+ *
+ * struct opal_command_buffer - Defines the layout of OPAL command buffer
+ * @flag: Provides general status of the command
+ * @request_id: Token to identify request
+ * @cmd: Command sent
+ * @data_size: Command data length
+ * @data: Command specific data
+ * @spare: Unused byte
+ */
+struct opal_command_buffer {
+ uint8_t flag;
+ uint8_t request_id;
+ uint8_t cmd;
+ uint8_t spare;
+ uint16_t data_size;
+ uint8_t data[MAX_OPAL_CMD_DATA_LENGTH];
+} QEMU_PACKED;
+
+/**
+ * OPAL-OCC Response Buffer
+ *
+ * ---------------------------------------------------------------------
+ * | OCC | Cmd | OPAL | Response | Rsp Data | Rsp Data | OPAL |
+ * | Rsp | Request | OCC | Status | Length | Length | Rsp |
+ * | Flags | ID | Cmd | | (MSB) | (LSB) | Data... |
+ * ---------------------------------------------------------------------
+ * | ….OPAL Response Data up to max of Rsp Data Length 8698 bytes |
+ * | |
+ * ---------------------------------------------------------------------
+ *
+ * OCC Response Flag
+ *
+ * -----------------------------------------------------------------
+ * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
+ * | (msb) | | | | | | | (lsb) |
+ * -----------------------------------------------------------------
+ * | | | | | | |OCC in | Rsp |
+ * | | | | | | |progress|Ready |
+ * -----------------------------------------------------------------
+ *
+ * struct occ_response_buffer - Defines the layout of OCC response buffer
+ * @flag: Provides general status of the response
+ * @request_id: Token to identify request
+ * @cmd: Command requested
+ * @status: Indicates success/failure status of
+ * the command
+ * @data_size: Response data length
+ * @data: Response specific data
+ */
+struct occ_response_buffer {
+ uint8_t flag;
+ uint8_t request_id;
+ uint8_t cmd;
+ uint8_t status;
+ uint16_t data_size;
+ uint8_t data[MAX_OCC_RSP_DATA_LENGTH];
+} QEMU_PACKED;
+
+/**
+ * OCC-OPAL Shared Memory Interface Dynamic Data Vx90
+ *
+ * struct occ_dynamic_data - Contains runtime attributes
+ * @occ_state: Current state of OCC
+ * @major_version: Major version number
+ * @minor_version: Minor version number (backwards compatible)
+ * Version 1 indicates GPU presence populated
+ * @gpus_present: Bitmask of GPUs present (on systems where GPU
+ * presence is detected through APSS)
+ * @cpu_throttle: Reason for limiting the max pstate
+ * @mem_throttle: Reason for throttling memory
+ * @quick_pwr_drop: Indicates if QPD is asserted
+ * @pwr_shifting_ratio: Indicates the current percentage of power to
+ * take away from the CPU vs GPU when shifting
+ * power to maintain a power cap. Value of 100
+ * means take all power from CPU.
+ * @pwr_cap_type: Indicates type of power cap in effect
+ * @hard_min_pwr_cap: Hard minimum system power cap in Watts.
+ * Guaranteed unless hardware failure
+ * @max_pwr_cap: Maximum allowed system power cap in Watts
+ * @cur_pwr_cap: Current system power cap
+ * @soft_min_pwr_cap: Soft powercap minimum. OCC may or may not be
+ * able to maintain this
+ * @spare/reserved: Unused data
+ * @cmd: Opal Command Buffer
+ * @rsp: OCC Response Buffer
+ */
+struct occ_dynamic_data {
+ uint8_t occ_state;
+ uint8_t major_version;
+ uint8_t minor_version;
+ uint8_t gpus_present;
+ union QEMU_PACKED {
+ struct QEMU_PACKED { /* Version 0x90 */
+ uint8_t spare1;
+ } v9;
+ struct QEMU_PACKED { /* Version 0xA0 */
+ uint8_t wof_enabled;
+ } v10;
+ };
+ uint8_t cpu_throttle;
+ uint8_t mem_throttle;
+ uint8_t quick_pwr_drop;
+ uint8_t pwr_shifting_ratio;
+ uint8_t pwr_cap_type;
+ uint16_t hard_min_pwr_cap;
+ uint16_t max_pwr_cap;
+ uint16_t cur_pwr_cap;
+ uint16_t soft_min_pwr_cap;
+ uint8_t pad[110];
+ struct opal_command_buffer cmd;
+ struct occ_response_buffer rsp;
+} QEMU_PACKED;
+
+enum occ_response_status {
+ OCC_RSP_SUCCESS = 0x00,
+ OCC_RSP_INVALID_COMMAND = 0x11,
+ OCC_RSP_INVALID_CMD_DATA_LENGTH = 0x12,
+ OCC_RSP_INVALID_DATA = 0x13,
+ OCC_RSP_INTERNAL_ERROR = 0x15,
+};
+
+#define OCC_ROLE_SLAVE 0x00
+#define OCC_ROLE_MASTER 0x01
+
+#define OCC_FLAG_RSP_READY 0x01
+#define OCC_FLAG_CMD_IN_PROGRESS 0x02
+#define OPAL_FLAG_CMD_READY 0x80
+
+#define PCAP_MAX_POWER_W 100
+#define PCAP_SOFT_MIN_POWER_W 20
+#define PCAP_HARD_MIN_POWER_W 10
+
+static bool occ_write_static_data(PnvOCC *occ,
+ struct occ_pstate_table *static_data,
+ Error **errp)
+{
+ PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ);
+ PnvHomer *homer = occ->homer;
+ hwaddr static_addr = homer->base + poc->opal_shared_memory_offset;
+ MemTxResult ret;
+
+ ret = address_space_write(&address_space_memory, static_addr,
+ MEMTXATTRS_UNSPECIFIED, static_data,
+ sizeof(*static_data));
+ if (ret != MEMTX_OK) {
+ error_setg(errp, "OCC: cannot write OCC-OPAL static data");
+ return false;
+ }
+
+ return true;
+}
+
+static bool occ_read_dynamic_data(PnvOCC *occ,
+ struct occ_dynamic_data *dynamic_data,
+ Error **errp)
+{
+ PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ);
+ PnvHomer *homer = occ->homer;
+ hwaddr static_addr = homer->base + poc->opal_shared_memory_offset;
+ hwaddr dynamic_addr = static_addr + OPAL_DYNAMIC_DATA_OFFSET;
+ MemTxResult ret;
+
+ ret = address_space_read(&address_space_memory, dynamic_addr,
+ MEMTXATTRS_UNSPECIFIED, dynamic_data,
+ sizeof(*dynamic_data));
+ if (ret != MEMTX_OK) {
+ error_setg(errp, "OCC: cannot read OCC-OPAL dynamic data");
+ return false;
+ }
+
+ return true;
+}
+
+static bool occ_write_dynamic_data(PnvOCC *occ,
+ struct occ_dynamic_data *dynamic_data,
+ Error **errp)
+{
+ PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ);
+ PnvHomer *homer = occ->homer;
+ hwaddr static_addr = homer->base + poc->opal_shared_memory_offset;
+ hwaddr dynamic_addr = static_addr + OPAL_DYNAMIC_DATA_OFFSET;
+ MemTxResult ret;
+
+ ret = address_space_write(&address_space_memory, dynamic_addr,
+ MEMTXATTRS_UNSPECIFIED, dynamic_data,
+ sizeof(*dynamic_data));
+ if (ret != MEMTX_OK) {
+ error_setg(errp, "OCC: cannot write OCC-OPAL dynamic data");
+ return false;
+ }
+
+ return true;
+}
+
+static bool occ_opal_send_response(PnvOCC *occ,
+ struct occ_dynamic_data *dynamic_data,
+ enum occ_response_status status,
+ uint8_t *data, uint16_t datalen)
+{
+ struct opal_command_buffer *cmd = &dynamic_data->cmd;
+ struct occ_response_buffer *rsp = &dynamic_data->rsp;
+
+ rsp->request_id = cmd->request_id;
+ rsp->cmd = cmd->cmd;
+ rsp->status = status;
+ rsp->data_size = cpu_to_be16(datalen);
+ if (datalen) {
+ memcpy(rsp->data, data, datalen);
+ }
+ if (!occ_write_dynamic_data(occ, dynamic_data, NULL)) {
+ return false;
+ }
+ /* Would be a memory barrier here */
+ rsp->flag = OCC_FLAG_RSP_READY;
+ cmd->flag = 0;
+ if (!occ_write_dynamic_data(occ, dynamic_data, NULL)) {
+ return false;
+ }
+
+ pnv_occ_raise_msg_irq(occ);
+
+ return true;
+}
+
+/* Returns error status */
+static bool occ_opal_process_command(PnvOCC *occ,
+ struct occ_dynamic_data *dynamic_data)
+{
+ struct opal_command_buffer *cmd = &dynamic_data->cmd;
+ struct occ_response_buffer *rsp = &dynamic_data->rsp;
+
+ if (rsp->flag == 0) {
+ /* Spend one "tick" in the in-progress state */
+ rsp->flag = OCC_FLAG_CMD_IN_PROGRESS;
+ return occ_write_dynamic_data(occ, dynamic_data, NULL);
+ } else if (rsp->flag != OCC_FLAG_CMD_IN_PROGRESS) {
+ return occ_opal_send_response(occ, dynamic_data,
+ OCC_RSP_INTERNAL_ERROR,
+ NULL, 0);
+ }
+
+ switch (cmd->cmd) {
+ case 0xD1: { /* SET_POWER_CAP */
+ uint16_t data;
+ if (be16_to_cpu(cmd->data_size) != 2) {
+ return occ_opal_send_response(occ, dynamic_data,
+ OCC_RSP_INVALID_CMD_DATA_LENGTH,
+ (uint8_t *)&dynamic_data->cur_pwr_cap,
+ 2);
+ }
+ data = be16_to_cpu(*(uint16_t *)cmd->data);
+ if (data == 0) { /* clear power cap */
+ dynamic_data->pwr_cap_type = 0x00; /* none */
+ data = PCAP_MAX_POWER_W;
+ } else {
+ dynamic_data->pwr_cap_type = 0x02; /* user set in-band */
+ if (data < PCAP_HARD_MIN_POWER_W) {
+ data = PCAP_HARD_MIN_POWER_W;
+ } else if (data > PCAP_MAX_POWER_W) {
+ data = PCAP_MAX_POWER_W;
+ }
+ }
+ dynamic_data->cur_pwr_cap = cpu_to_be16(data);
+ return occ_opal_send_response(occ, dynamic_data,
+ OCC_RSP_SUCCESS,
+ (uint8_t *)&dynamic_data->cur_pwr_cap, 2);
+ }
+
+ default:
+ return occ_opal_send_response(occ, dynamic_data,
+ OCC_RSP_INVALID_COMMAND,
+ NULL, 0);
+ }
+ g_assert_not_reached();
+}
+
+static bool occ_model_tick(PnvOCC *occ)
+{
+ QEMU_UNINITIALIZED struct occ_dynamic_data dynamic_data;
+
+ if (!occ_read_dynamic_data(occ, &dynamic_data, NULL)) {
+ /* Can't move OCC state field to safe because we can't map it! */
+ qemu_log("OCC: failed to read HOMER data, shutting down OCC\n");
+ return false;
+ }
+ if (dynamic_data.cmd.flag == OPAL_FLAG_CMD_READY) {
+ if (!occ_opal_process_command(occ, &dynamic_data)) {
+ qemu_log("OCC: failed to write HOMER data, shutting down OCC\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool occ_init_homer_memory(PnvOCC *occ, Error **errp)
+{
+ PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ);
+ PnvHomer *homer = occ->homer;
+ PnvChip *chip = homer->chip;
+ struct occ_pstate_table static_data;
+ struct occ_dynamic_data dynamic_data;
+ int i;
+
+ memset(&static_data, 0, sizeof(static_data));
+ static_data.valid = 1;
+ static_data.version = poc->opal_shared_memory_version;
+ switch (poc->opal_shared_memory_version) {
+ case 0x02:
+ static_data.v2.throttle = 0;
+ static_data.v2.pstate_min = -2;
+ static_data.v2.pstate_nom = -1;
+ static_data.v2.pstate_turbo = -1;
+ static_data.v2.pstate_ultra_turbo = 0;
+ static_data.v2.pstates[0].id = 0;
+ static_data.v2.pstates[1].freq_khz = cpu_to_be32(4000000);
+ static_data.v2.pstates[1].id = -1;
+ static_data.v2.pstates[1].freq_khz = cpu_to_be32(3000000);
+ static_data.v2.pstates[2].id = -2;
+ static_data.v2.pstates[2].freq_khz = cpu_to_be32(2000000);
+ for (i = 0; i < chip->nr_cores; i++) {
+ static_data.v2.core_max[i] = 1;
+ }
+ break;
+ case 0x90:
+ if (chip->chip_id == 0) {
+ static_data.v9.occ_role = OCC_ROLE_MASTER;
+ } else {
+ static_data.v9.occ_role = OCC_ROLE_SLAVE;
+ }
+ static_data.v9.pstate_min = 2;
+ static_data.v9.pstate_nom = 1;
+ static_data.v9.pstate_turbo = 1;
+ static_data.v9.pstate_ultra_turbo = 0;
+ static_data.v9.pstates[0].id = 0;
+ static_data.v9.pstates[0].freq_khz = cpu_to_be32(4000000);
+ static_data.v9.pstates[1].id = 1;
+ static_data.v9.pstates[1].freq_khz = cpu_to_be32(3000000);
+ static_data.v9.pstates[2].id = 2;
+ static_data.v9.pstates[2].freq_khz = cpu_to_be32(2000000);
+ for (i = 0; i < chip->nr_cores; i++) {
+ static_data.v9.core_max[i] = 1;
+ }
+ break;
+ case 0xA0:
+ if (chip->chip_id == 0) {
+ static_data.v10.occ_role = OCC_ROLE_MASTER;
+ } else {
+ static_data.v10.occ_role = OCC_ROLE_SLAVE;
+ }
+ static_data.v10.pstate_min = 4;
+ static_data.v10.pstate_fixed_freq = 3;
+ static_data.v10.pstate_base = 2;
+ static_data.v10.pstate_ultra_turbo = 0;
+ static_data.v10.pstate_fmax = 1;
+ static_data.v10.minor = 0x01;
+ static_data.v10.pstates[0].valid = 1;
+ static_data.v10.pstates[0].id = 0;
+ static_data.v10.pstates[0].freq_khz = cpu_to_be32(4200000);
+ static_data.v10.pstates[1].valid = 1;
+ static_data.v10.pstates[1].id = 1;
+ static_data.v10.pstates[1].freq_khz = cpu_to_be32(4000000);
+ static_data.v10.pstates[2].valid = 1;
+ static_data.v10.pstates[2].id = 2;
+ static_data.v10.pstates[2].freq_khz = cpu_to_be32(3800000);
+ static_data.v10.pstates[3].valid = 1;
+ static_data.v10.pstates[3].id = 3;
+ static_data.v10.pstates[3].freq_khz = cpu_to_be32(3000000);
+ static_data.v10.pstates[4].valid = 1;
+ static_data.v10.pstates[4].id = 4;
+ static_data.v10.pstates[4].freq_khz = cpu_to_be32(2000000);
+ for (i = 0; i < chip->nr_cores; i++) {
+ static_data.v10.core_max[i] = 1;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (!occ_write_static_data(occ, &static_data, errp)) {
+ return false;
+ }
+
+ memset(&dynamic_data, 0, sizeof(dynamic_data));
+ dynamic_data.occ_state = 0x3; /* active */
+ dynamic_data.major_version = 0x0;
+ dynamic_data.hard_min_pwr_cap = cpu_to_be16(PCAP_HARD_MIN_POWER_W);
+ dynamic_data.max_pwr_cap = cpu_to_be16(PCAP_MAX_POWER_W);
+ dynamic_data.cur_pwr_cap = cpu_to_be16(PCAP_MAX_POWER_W);
+ dynamic_data.soft_min_pwr_cap = cpu_to_be16(PCAP_SOFT_MIN_POWER_W);
+ switch (poc->opal_shared_memory_version) {
+ case 0xA0:
+ dynamic_data.minor_version = 0x1;
+ dynamic_data.v10.wof_enabled = 0x1;
+ break;
+ case 0x90:
+ dynamic_data.minor_version = 0x1;
+ break;
+ case 0x02:
+ dynamic_data.minor_version = 0x0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (!occ_write_dynamic_data(occ, &dynamic_data, errp)) {
+ return false;
+ }
+
+ return true;
+}
diff --git a/hw/ppc/pnv_pnor.c b/hw/ppc/pnv_pnor.c
index 6280408..af7cfd0 100644
--- a/hw/ppc/pnv_pnor.c
+++ b/hw/ppc/pnv_pnor.c
@@ -11,8 +11,8 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/units.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
+#include "system/block-backend.h"
+#include "system/blockdev.h"
#include "hw/loader.h"
#include "hw/ppc/pnv_pnor.h"
#include "hw/qdev-properties.h"
@@ -108,17 +108,18 @@ static void pnv_pnor_realize(DeviceState *dev, Error **errp)
memset(s->storage, 0xFF, s->size);
}
+ s->lpc_address = PNOR_SPI_OFFSET;
+
memory_region_init_io(&s->mmio, OBJECT(s), &pnv_pnor_ops, s,
TYPE_PNV_PNOR, s->size);
}
-static Property pnv_pnor_properties[] = {
+static const Property pnv_pnor_properties[] = {
DEFINE_PROP_INT64("size", PnvPnor, size, 128 * MiB),
DEFINE_PROP_DRIVE("drive", PnvPnor, blk),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pnv_pnor_class_init(ObjectClass *klass, void *data)
+static void pnv_pnor_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/ppc/pnv_psi.c b/hw/ppc/pnv_psi.c
index 18cc76a..5d947d8 100644
--- a/hw/ppc/pnv_psi.c
+++ b/hw/ppc/pnv_psi.c
@@ -18,12 +18,12 @@
*/
#include "qemu/osdep.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/irq.h"
#include "target/ppc/cpu.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "qapi/error.h"
@@ -552,13 +552,12 @@ static int pnv_psi_dt_xscom(PnvXScomInterface *dev, void *fdt, int xscom_offset)
return 0;
}
-static Property pnv_psi_properties[] = {
+static const Property pnv_psi_properties[] = {
DEFINE_PROP_UINT64("bar", PnvPsi, bar, 0),
DEFINE_PROP_UINT64("fsp-bar", PnvPsi, fsp_bar, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pnv_psi_power8_class_init(ObjectClass *klass, void *data)
+static void pnv_psi_power8_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvPsiClass *ppc = PNV_PSI_CLASS(klass);
@@ -888,7 +887,7 @@ static void pnv_psi_power9_realize(DeviceState *dev, Error **errp)
pnv_psi_realize(dev, errp);
}
-static void pnv_psi_power9_class_init(ObjectClass *klass, void *data)
+static void pnv_psi_power9_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvPsiClass *ppc = PNV_PSI_CLASS(klass);
@@ -897,7 +896,7 @@ static void pnv_psi_power9_class_init(ObjectClass *klass, void *data)
dc->desc = "PowerNV PSI Controller POWER9";
dc->realize = pnv_psi_power9_realize;
- dc->reset = pnv_psi_power9_reset;
+ device_class_set_legacy_reset(dc, pnv_psi_power9_reset);
ppc->xscom_pcba = PNV9_XSCOM_PSIHB_BASE;
ppc->xscom_size = PNV9_XSCOM_PSIHB_SIZE;
@@ -914,13 +913,13 @@ static const TypeInfo pnv_psi_power9_info = {
.instance_size = sizeof(Pnv9Psi),
.instance_init = pnv_psi_power9_instance_init,
.class_init = pnv_psi_power9_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_XIVE_NOTIFIER },
{ },
},
};
-static void pnv_psi_power10_class_init(ObjectClass *klass, void *data)
+static void pnv_psi_power10_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvPsiClass *ppc = PNV_PSI_CLASS(klass);
@@ -940,7 +939,7 @@ static const TypeInfo pnv_psi_power10_info = {
.class_init = pnv_psi_power10_class_init,
};
-static void pnv_psi_class_init(ObjectClass *klass, void *data)
+static void pnv_psi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
@@ -949,7 +948,7 @@ static void pnv_psi_class_init(ObjectClass *klass, void *data)
dc->desc = "PowerNV PSI Controller";
device_class_set_props(dc, pnv_psi_properties);
- dc->reset = pnv_psi_reset;
+ device_class_set_legacy_reset(dc, pnv_psi_reset);
dc->user_creatable = false;
}
@@ -960,7 +959,7 @@ static const TypeInfo pnv_psi_info = {
.class_init = pnv_psi_class_init,
.class_size = sizeof(PnvPsiClass),
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
diff --git a/hw/ppc/pnv_sbe.c b/hw/ppc/pnv_sbe.c
index 74cee4e..34dc013 100644
--- a/hw/ppc/pnv_sbe.c
+++ b/hw/ppc/pnv_sbe.c
@@ -331,7 +331,7 @@ static const MemoryRegionOps pnv_sbe_power9_xscom_mbox_ops = {
.endianness = DEVICE_BIG_ENDIAN,
};
-static void pnv_sbe_power9_class_init(ObjectClass *klass, void *data)
+static void pnv_sbe_power9_class_init(ObjectClass *klass, const void *data)
{
PnvSBEClass *psc = PNV_SBE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -350,7 +350,7 @@ static const TypeInfo pnv_sbe_power9_type_info = {
.class_init = pnv_sbe_power9_class_init,
};
-static void pnv_sbe_power10_class_init(ObjectClass *klass, void *data)
+static void pnv_sbe_power10_class_init(ObjectClass *klass, const void *data)
{
PnvSBEClass *psc = PNV_SBE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -386,7 +386,7 @@ static void pnv_sbe_realize(DeviceState *dev, Error **errp)
sbe->timer = timer_new_us(QEMU_CLOCK_VIRTUAL, sbe_timer, sbe);
}
-static void pnv_sbe_class_init(ObjectClass *klass, void *data)
+static void pnv_sbe_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/ppc/pnv_xscom.c b/hw/ppc/pnv_xscom.c
index a17816d..fbfec82 100644
--- a/hw/ppc/pnv_xscom.c
+++ b/hw/ppc/pnv_xscom.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "sysemu/hw_accel.h"
+#include "system/hw_accel.h"
#include "target/ppc/cpu.h"
#include "hw/sysbus.h"
@@ -75,11 +75,6 @@ static uint64_t xscom_read_default(PnvChip *chip, uint32_t pcba)
case PRD_P9_IPOLL_REG_MASK:
case PRD_P9_IPOLL_REG_STATUS:
- /* P9 xscom reset */
- case 0x0090018: /* Receive status reg */
- case 0x0090012: /* log register */
- case 0x0090013: /* error register */
-
/* P8 xscom reset */
case 0x2020007: /* ADU stuff, log register */
case 0x2020009: /* ADU stuff, error register */
@@ -119,10 +114,6 @@ static bool xscom_write_default(PnvChip *chip, uint32_t pcba, uint64_t val)
case 0x1010c03: /* PIBAM FIR MASK */
case 0x1010c04: /* PIBAM FIR MASK */
case 0x1010c05: /* PIBAM FIR MASK */
- /* P9 xscom reset */
- case 0x0090018: /* Receive status reg */
- case 0x0090012: /* log register */
- case 0x0090013: /* error register */
/* P8 xscom reset */
case 0x2020007: /* ADU stuff, log register */
diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
index e6fa558..43d0d0e 100644
--- a/hw/ppc/ppc.c
+++ b/hw/ppc/ppc.c
@@ -27,13 +27,14 @@
#include "hw/ppc/ppc.h"
#include "hw/ppc/ppc_e500.h"
#include "qemu/timer.h"
-#include "sysemu/cpus.h"
+#include "exec/cpu-interrupt.h"
+#include "system/cpus.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
#include "qemu/error-report.h"
-#include "sysemu/kvm.h"
-#include "sysemu/replay.h"
-#include "sysemu/runstate.h"
+#include "system/kvm.h"
+#include "system/replay.h"
+#include "system/runstate.h"
#include "kvm_ppc.h"
#include "migration/vmstate.h"
#include "trace.h"
@@ -267,7 +268,6 @@ static void power9_set_irq(void *opaque, int pin, int level)
break;
default:
g_assert_not_reached();
- return;
}
}
@@ -729,7 +729,9 @@ static inline int64_t __cpu_ppc_load_decr(CPUPPCState *env, int64_t now,
int64_t decr;
n = ns_to_tb(tb_env->decr_freq, now);
- if (next > n && tb_env->flags & PPC_TIMER_BOOKE) {
+
+ /* BookE timers stop when reaching 0. */
+ if (next < n && tb_env->flags & PPC_TIMER_BOOKE) {
decr = 0;
} else {
decr = next - n;
@@ -1122,16 +1124,21 @@ void cpu_ppc_tb_reset(CPUPPCState *env)
timer_del(tb_env->hdecr_timer);
ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
tb_env->hdecr_next = 0;
+ _cpu_ppc_store_hdecr(cpu, 0, 0, 0, 64);
}
/*
* There is a bug in Linux 2.4 kernels:
* if a decrementer exception is pending when it enables msr_ee at startup,
* it's not ready to handle it...
+ *
+ * On machine reset, this is called before icount is reset, so for
+ * icount-mode, setting TB registers using now == qemu_clock_get_ns()
+ * results in them being garbage after icount is reset. Use an
+ * explicit now == 0 to get a consistent reset state.
*/
- cpu_ppc_store_decr(env, -1);
- cpu_ppc_store_hdecr(env, -1);
- cpu_ppc_store_purr(env, 0x0000000000000000ULL);
+ _cpu_ppc_store_decr(cpu, 0, 0, -1, 64);
+ _cpu_ppc_store_purr(env, 0, 0);
}
void cpu_ppc_tb_free(CPUPPCState *env)
diff --git a/hw/ppc/ppc405.h b/hw/ppc/ppc405.h
deleted file mode 100644
index 9a43126..0000000
--- a/hw/ppc/ppc405.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * QEMU PowerPC 405 shared definitions
- *
- * Copyright (c) 2007 Jocelyn Mayer
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifndef PPC405_H
-#define PPC405_H
-
-#include "qom/object.h"
-#include "hw/ppc/ppc4xx.h"
-#include "hw/intc/ppc-uic.h"
-#include "hw/i2c/ppc4xx_i2c.h"
-
-/* PLB to OPB bridge */
-#define TYPE_PPC405_POB "ppc405-pob"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405PobState, PPC405_POB);
-struct Ppc405PobState {
- Ppc4xxDcrDeviceState parent_obj;
-
- uint32_t bear;
- uint32_t besr0;
- uint32_t besr1;
-};
-
-/* OPB arbitrer */
-#define TYPE_PPC405_OPBA "ppc405-opba"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405OpbaState, PPC405_OPBA);
-struct Ppc405OpbaState {
- SysBusDevice parent_obj;
-
- MemoryRegion io;
- uint8_t cr;
- uint8_t pr;
-};
-
-/* DMA controller */
-#define TYPE_PPC405_DMA "ppc405-dma"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405DmaState, PPC405_DMA);
-struct Ppc405DmaState {
- Ppc4xxDcrDeviceState parent_obj;
-
- qemu_irq irqs[4];
- uint32_t cr[4];
- uint32_t ct[4];
- uint32_t da[4];
- uint32_t sa[4];
- uint32_t sg[4];
- uint32_t sr;
- uint32_t sgc;
- uint32_t slp;
- uint32_t pol;
-};
-
-/* GPIO */
-#define TYPE_PPC405_GPIO "ppc405-gpio"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405GpioState, PPC405_GPIO);
-struct Ppc405GpioState {
- SysBusDevice parent_obj;
-
- MemoryRegion io;
- uint32_t or;
- uint32_t tcr;
- uint32_t osrh;
- uint32_t osrl;
- uint32_t tsrh;
- uint32_t tsrl;
- uint32_t odr;
- uint32_t ir;
- uint32_t rr1;
- uint32_t isr1h;
- uint32_t isr1l;
-};
-
-/* On Chip Memory */
-#define TYPE_PPC405_OCM "ppc405-ocm"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405OcmState, PPC405_OCM);
-struct Ppc405OcmState {
- Ppc4xxDcrDeviceState parent_obj;
-
- MemoryRegion ram;
- MemoryRegion isarc_ram;
- MemoryRegion dsarc_ram;
- uint32_t isarc;
- uint32_t isacntl;
- uint32_t dsarc;
- uint32_t dsacntl;
-};
-
-/* General purpose timers */
-#define TYPE_PPC405_GPT "ppc405-gpt"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405GptState, PPC405_GPT);
-struct Ppc405GptState {
- SysBusDevice parent_obj;
-
- MemoryRegion iomem;
-
- int64_t tb_offset;
- uint32_t tb_freq;
- QEMUTimer *timer;
- qemu_irq irqs[5];
- uint32_t oe;
- uint32_t ol;
- uint32_t im;
- uint32_t is;
- uint32_t ie;
- uint32_t comp[5];
- uint32_t mask[5];
-};
-
-#define TYPE_PPC405_CPC "ppc405-cpc"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405CpcState, PPC405_CPC);
-
-enum {
- PPC405EP_CPU_CLK = 0,
- PPC405EP_PLB_CLK = 1,
- PPC405EP_OPB_CLK = 2,
- PPC405EP_EBC_CLK = 3,
- PPC405EP_MAL_CLK = 4,
- PPC405EP_PCI_CLK = 5,
- PPC405EP_UART0_CLK = 6,
- PPC405EP_UART1_CLK = 7,
- PPC405EP_CLK_NB = 8,
-};
-
-struct Ppc405CpcState {
- Ppc4xxDcrDeviceState parent_obj;
-
- uint32_t sysclk;
- clk_setup_t clk_setup[PPC405EP_CLK_NB];
- uint32_t boot;
- uint32_t epctl;
- uint32_t pllmr[2];
- uint32_t ucr;
- uint32_t srr;
- uint32_t jtagid;
- uint32_t pci;
- /* Clock and power management */
- uint32_t er;
- uint32_t fr;
- uint32_t sr;
-};
-
-#define TYPE_PPC405_SOC "ppc405-soc"
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405SoCState, PPC405_SOC);
-
-struct Ppc405SoCState {
- /* Private */
- DeviceState parent_obj;
-
- /* Public */
- PowerPCCPU cpu;
- PPCUIC uic;
- Ppc405CpcState cpc;
- Ppc405GptState gpt;
- Ppc405OcmState ocm;
- Ppc405GpioState gpio;
- Ppc405DmaState dma;
- PPC4xxI2CState i2c;
- Ppc4xxEbcState ebc;
- Ppc405OpbaState opba;
- Ppc405PobState pob;
- Ppc4xxPlbState plb;
- Ppc4xxMalState mal;
- Ppc4xxSdramDdrState sdram;
-};
-
-#endif /* PPC405_H */
diff --git a/hw/ppc/ppc405_boards.c b/hw/ppc/ppc405_boards.c
deleted file mode 100644
index c44e7ed..0000000
--- a/hw/ppc/ppc405_boards.c
+++ /dev/null
@@ -1,520 +0,0 @@
-/*
- * QEMU PowerPC 405 evaluation boards emulation
- *
- * Copyright (c) 2007 Jocelyn Mayer
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/units.h"
-#include "qapi/error.h"
-#include "qemu/datadir.h"
-#include "cpu.h"
-#include "hw/ppc/ppc.h"
-#include "hw/qdev-properties.h"
-#include "hw/sysbus.h"
-#include "ppc405.h"
-#include "hw/rtc/m48t59.h"
-#include "hw/block/flash.h"
-#include "sysemu/qtest.h"
-#include "sysemu/reset.h"
-#include "sysemu/block-backend.h"
-#include "hw/boards.h"
-#include "qemu/error-report.h"
-#include "hw/loader.h"
-#include "qemu/cutils.h"
-#include "elf.h"
-
-#define BIOS_FILENAME "ppc405_rom.bin"
-#define BIOS_SIZE (2 * MiB)
-
-#define KERNEL_LOAD_ADDR 0x01000000
-#define INITRD_LOAD_ADDR 0x01800000
-
-#define PPC405EP_SDRAM_BASE 0x00000000
-#define PPC405EP_SRAM_BASE 0xFFF00000
-#define PPC405EP_SRAM_SIZE (512 * KiB)
-
-#define USE_FLASH_BIOS
-
-#define TYPE_PPC405_MACHINE MACHINE_TYPE_NAME("ppc405")
-OBJECT_DECLARE_SIMPLE_TYPE(Ppc405MachineState, PPC405_MACHINE);
-
-struct Ppc405MachineState {
- /* Private */
- MachineState parent_obj;
- /* Public */
-
- Ppc405SoCState soc;
-};
-
-/* CPU reset handler when booting directly from a loaded kernel */
-static struct boot_info {
- uint32_t entry;
- uint32_t bdloc;
- uint32_t initrd_base;
- uint32_t initrd_size;
- uint32_t cmdline_base;
- uint32_t cmdline_size;
-} boot_info;
-
-static void main_cpu_reset(void *opaque)
-{
- PowerPCCPU *cpu = opaque;
- CPUPPCState *env = &cpu->env;
- struct boot_info *bi = env->load_info;
-
- cpu_reset(CPU(cpu));
-
- /* stack: top of sram */
- env->gpr[1] = PPC405EP_SRAM_BASE + PPC405EP_SRAM_SIZE - 8;
-
- /* Tune our boot state */
- env->gpr[3] = bi->bdloc;
- env->gpr[4] = bi->initrd_base;
- env->gpr[5] = bi->initrd_base + bi->initrd_size;
- env->gpr[6] = bi->cmdline_base;
- env->gpr[7] = bi->cmdline_size;
-
- env->nip = bi->entry;
-}
-
-/* Bootinfo as set-up by u-boot */
-typedef struct {
- uint32_t bi_memstart;
- uint32_t bi_memsize;
- uint32_t bi_flashstart;
- uint32_t bi_flashsize;
- uint32_t bi_flashoffset; /* 0x10 */
- uint32_t bi_sramstart;
- uint32_t bi_sramsize;
- uint32_t bi_bootflags;
- uint32_t bi_ipaddr; /* 0x20 */
- uint8_t bi_enetaddr[6];
- uint16_t bi_ethspeed;
- uint32_t bi_intfreq;
- uint32_t bi_busfreq; /* 0x30 */
- uint32_t bi_baudrate;
- uint8_t bi_s_version[4];
- uint8_t bi_r_version[32];
- uint32_t bi_procfreq;
- uint32_t bi_plb_busfreq;
- uint32_t bi_pci_busfreq;
- uint8_t bi_pci_enetaddr[6];
- uint8_t bi_pci_enetaddr2[6]; /* PPC405EP specific */
- uint32_t bi_opbfreq;
- uint32_t bi_iic_fast[2];
-} ppc4xx_bd_info_t;
-
-static void ppc405_set_default_bootinfo(ppc4xx_bd_info_t *bd,
- ram_addr_t ram_size)
-{
- memset(bd, 0, sizeof(*bd));
-
- bd->bi_memstart = PPC405EP_SDRAM_BASE;
- bd->bi_memsize = ram_size;
- bd->bi_sramstart = PPC405EP_SRAM_BASE;
- bd->bi_sramsize = PPC405EP_SRAM_SIZE;
- bd->bi_bootflags = 0;
- bd->bi_intfreq = 133333333;
- bd->bi_busfreq = 33333333;
- bd->bi_baudrate = 115200;
- bd->bi_s_version[0] = 'Q';
- bd->bi_s_version[1] = 'M';
- bd->bi_s_version[2] = 'U';
- bd->bi_s_version[3] = '\0';
- bd->bi_r_version[0] = 'Q';
- bd->bi_r_version[1] = 'E';
- bd->bi_r_version[2] = 'M';
- bd->bi_r_version[3] = 'U';
- bd->bi_r_version[4] = '\0';
- bd->bi_procfreq = 133333333;
- bd->bi_plb_busfreq = 33333333;
- bd->bi_pci_busfreq = 33333333;
- bd->bi_opbfreq = 33333333;
-}
-
-static ram_addr_t __ppc405_set_bootinfo(CPUPPCState *env, ppc4xx_bd_info_t *bd)
-{
- CPUState *cs = env_cpu(env);
- ram_addr_t bdloc;
- int i, n;
-
- /* We put the bd structure at the top of memory */
- if (bd->bi_memsize >= 0x01000000UL) {
- bdloc = 0x01000000UL - sizeof(ppc4xx_bd_info_t);
- } else {
- bdloc = bd->bi_memsize - sizeof(ppc4xx_bd_info_t);
- }
- stl_be_phys(cs->as, bdloc + 0x00, bd->bi_memstart);
- stl_be_phys(cs->as, bdloc + 0x04, bd->bi_memsize);
- stl_be_phys(cs->as, bdloc + 0x08, bd->bi_flashstart);
- stl_be_phys(cs->as, bdloc + 0x0C, bd->bi_flashsize);
- stl_be_phys(cs->as, bdloc + 0x10, bd->bi_flashoffset);
- stl_be_phys(cs->as, bdloc + 0x14, bd->bi_sramstart);
- stl_be_phys(cs->as, bdloc + 0x18, bd->bi_sramsize);
- stl_be_phys(cs->as, bdloc + 0x1C, bd->bi_bootflags);
- stl_be_phys(cs->as, bdloc + 0x20, bd->bi_ipaddr);
- for (i = 0; i < 6; i++) {
- stb_phys(cs->as, bdloc + 0x24 + i, bd->bi_enetaddr[i]);
- }
- stw_be_phys(cs->as, bdloc + 0x2A, bd->bi_ethspeed);
- stl_be_phys(cs->as, bdloc + 0x2C, bd->bi_intfreq);
- stl_be_phys(cs->as, bdloc + 0x30, bd->bi_busfreq);
- stl_be_phys(cs->as, bdloc + 0x34, bd->bi_baudrate);
- for (i = 0; i < 4; i++) {
- stb_phys(cs->as, bdloc + 0x38 + i, bd->bi_s_version[i]);
- }
- for (i = 0; i < 32; i++) {
- stb_phys(cs->as, bdloc + 0x3C + i, bd->bi_r_version[i]);
- }
- stl_be_phys(cs->as, bdloc + 0x5C, bd->bi_procfreq);
- stl_be_phys(cs->as, bdloc + 0x60, bd->bi_plb_busfreq);
- stl_be_phys(cs->as, bdloc + 0x64, bd->bi_pci_busfreq);
- for (i = 0; i < 6; i++) {
- stb_phys(cs->as, bdloc + 0x68 + i, bd->bi_pci_enetaddr[i]);
- }
- n = 0x70; /* includes 2 bytes hole */
- for (i = 0; i < 6; i++) {
- stb_phys(cs->as, bdloc + n++, bd->bi_pci_enetaddr2[i]);
- }
- stl_be_phys(cs->as, bdloc + n, bd->bi_opbfreq);
- n += 4;
- for (i = 0; i < 2; i++) {
- stl_be_phys(cs->as, bdloc + n, bd->bi_iic_fast[i]);
- n += 4;
- }
-
- return bdloc;
-}
-
-static ram_addr_t ppc405_set_bootinfo(CPUPPCState *env, ram_addr_t ram_size)
-{
- ppc4xx_bd_info_t bd;
-
- memset(&bd, 0, sizeof(bd));
-
- ppc405_set_default_bootinfo(&bd, ram_size);
-
- return __ppc405_set_bootinfo(env, &bd);
-}
-
-static void boot_from_kernel(MachineState *machine, PowerPCCPU *cpu)
-{
- CPUPPCState *env = &cpu->env;
- hwaddr boot_entry;
- hwaddr kernel_base;
- int kernel_size;
- hwaddr initrd_base;
- int initrd_size;
- ram_addr_t bdloc;
- int len;
-
- bdloc = ppc405_set_bootinfo(env, machine->ram_size);
- boot_info.bdloc = bdloc;
-
- kernel_size = load_elf(machine->kernel_filename, NULL, NULL, NULL,
- &boot_entry, &kernel_base, NULL, NULL,
- 1, PPC_ELF_MACHINE, 0, 0);
- if (kernel_size < 0) {
- error_report("Could not load kernel '%s' : %s",
- machine->kernel_filename, load_elf_strerror(kernel_size));
- exit(1);
- }
- boot_info.entry = boot_entry;
-
- /* load initrd */
- if (machine->initrd_filename) {
- initrd_base = INITRD_LOAD_ADDR;
- initrd_size = load_image_targphys(machine->initrd_filename, initrd_base,
- machine->ram_size - initrd_base);
- if (initrd_size < 0) {
- error_report("could not load initial ram disk '%s'",
- machine->initrd_filename);
- exit(1);
- }
-
- boot_info.initrd_base = initrd_base;
- boot_info.initrd_size = initrd_size;
- }
-
- if (machine->kernel_cmdline) {
- len = strlen(machine->kernel_cmdline);
- bdloc -= ((len + 255) & ~255);
- cpu_physical_memory_write(bdloc, machine->kernel_cmdline, len + 1);
- boot_info.cmdline_base = bdloc;
- boot_info.cmdline_size = bdloc + len;
- }
-
- /* Install our custom reset handler to start from Linux */
- qemu_register_reset(main_cpu_reset, cpu);
- env->load_info = &boot_info;
-}
-
-static void ppc405_init(MachineState *machine)
-{
- Ppc405MachineState *ppc405 = PPC405_MACHINE(machine);
- const char *kernel_filename = machine->kernel_filename;
- MemoryRegion *sysmem = get_system_memory();
-
- object_initialize_child(OBJECT(machine), "soc", &ppc405->soc,
- TYPE_PPC405_SOC);
- object_property_set_link(OBJECT(&ppc405->soc), "dram",
- OBJECT(machine->ram), &error_abort);
- object_property_set_uint(OBJECT(&ppc405->soc), "sys-clk", 33333333,
- &error_abort);
- qdev_realize(DEVICE(&ppc405->soc), NULL, &error_fatal);
-
- /* allocate and load BIOS */
- if (machine->firmware) {
- MemoryRegion *bios = g_new(MemoryRegion, 1);
- g_autofree char *filename = qemu_find_file(QEMU_FILE_TYPE_BIOS,
- machine->firmware);
- long bios_size;
-
- memory_region_init_rom(bios, NULL, "ef405ep.bios", BIOS_SIZE,
- &error_fatal);
-
- if (!filename) {
- error_report("Could not find firmware '%s'", machine->firmware);
- exit(1);
- }
-
- bios_size = load_image_size(filename,
- memory_region_get_ram_ptr(bios),
- BIOS_SIZE);
- if (bios_size < 0) {
- error_report("Could not load PowerPC BIOS '%s'", machine->firmware);
- exit(1);
- }
-
- bios_size = (bios_size + 0xfff) & ~0xfff;
- memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios);
- }
-
- /* Load kernel and initrd using U-Boot images */
- if (kernel_filename && machine->firmware) {
- target_ulong kernel_base, initrd_base;
- long kernel_size, initrd_size;
-
- kernel_base = KERNEL_LOAD_ADDR;
- kernel_size = load_image_targphys(kernel_filename, kernel_base,
- machine->ram_size - kernel_base);
- if (kernel_size < 0) {
- error_report("could not load kernel '%s'", kernel_filename);
- exit(1);
- }
-
- /* load initrd */
- if (machine->initrd_filename) {
- initrd_base = INITRD_LOAD_ADDR;
- initrd_size = load_image_targphys(machine->initrd_filename,
- initrd_base,
- machine->ram_size - initrd_base);
- if (initrd_size < 0) {
- error_report("could not load initial ram disk '%s'",
- machine->initrd_filename);
- exit(1);
- }
- }
-
- /* Load ELF kernel and rootfs.cpio */
- } else if (kernel_filename && !machine->firmware) {
- ppc4xx_sdram_ddr_enable(&ppc405->soc.sdram);
- boot_from_kernel(machine, &ppc405->soc.cpu);
- }
-}
-
-static void ppc405_machine_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
-
- mc->desc = "PPC405 generic machine";
- mc->init = ppc405_init;
- mc->default_ram_size = 128 * MiB;
- mc->default_ram_id = "ppc405.ram";
- mc->deprecation_reason = "machine is old and unmaintained";
-}
-
-static const TypeInfo ppc405_machine_type = {
- .name = TYPE_PPC405_MACHINE,
- .parent = TYPE_MACHINE,
- .instance_size = sizeof(Ppc405MachineState),
- .class_init = ppc405_machine_class_init,
- .abstract = true,
-};
-
-/*****************************************************************************/
-/* PPC405EP reference board (IBM) */
-/*
- * Standalone board with:
- * - PowerPC 405EP CPU
- * - SDRAM (0x00000000)
- * - Flash (0xFFF80000)
- * - SRAM (0xFFF00000)
- * - NVRAM (0xF0000000)
- * - FPGA (0xF0300000)
- */
-
-#define PPC405EP_NVRAM_BASE 0xF0000000
-#define PPC405EP_FPGA_BASE 0xF0300000
-#define PPC405EP_FLASH_BASE 0xFFF80000
-
-#define TYPE_REF405EP_FPGA "ref405ep-fpga"
-OBJECT_DECLARE_SIMPLE_TYPE(Ref405epFpgaState, REF405EP_FPGA);
-struct Ref405epFpgaState {
- SysBusDevice parent_obj;
-
- MemoryRegion iomem;
-
- uint8_t reg0;
- uint8_t reg1;
-};
-
-static uint64_t ref405ep_fpga_readb(void *opaque, hwaddr addr, unsigned size)
-{
- Ref405epFpgaState *fpga = opaque;
- uint32_t ret;
-
- switch (addr) {
- case 0x0:
- ret = fpga->reg0;
- break;
- case 0x1:
- ret = fpga->reg1;
- break;
- default:
- ret = 0;
- break;
- }
-
- return ret;
-}
-
-static void ref405ep_fpga_writeb(void *opaque, hwaddr addr, uint64_t value,
- unsigned size)
-{
- Ref405epFpgaState *fpga = opaque;
-
- switch (addr) {
- case 0x0:
- /* Read only */
- break;
- case 0x1:
- fpga->reg1 = value;
- break;
- default:
- break;
- }
-}
-
-static const MemoryRegionOps ref405ep_fpga_ops = {
- .read = ref405ep_fpga_readb,
- .write = ref405ep_fpga_writeb,
- .impl.min_access_size = 1,
- .impl.max_access_size = 1,
- .valid.min_access_size = 1,
- .valid.max_access_size = 4,
- .endianness = DEVICE_BIG_ENDIAN,
-};
-
-static void ref405ep_fpga_reset(DeviceState *dev)
-{
- Ref405epFpgaState *fpga = REF405EP_FPGA(dev);
-
- fpga->reg0 = 0x00;
- fpga->reg1 = 0x0F;
-}
-
-static void ref405ep_fpga_realize(DeviceState *dev, Error **errp)
-{
- Ref405epFpgaState *s = REF405EP_FPGA(dev);
-
- memory_region_init_io(&s->iomem, OBJECT(s), &ref405ep_fpga_ops, s,
- "fpga", 0x00000100);
- sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
-}
-
-static void ref405ep_fpga_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ref405ep_fpga_realize;
- dc->reset = ref405ep_fpga_reset;
- /* Reason: only works as part of a ppc405 board */
- dc->user_creatable = false;
-}
-
-static const TypeInfo ref405ep_fpga_type = {
- .name = TYPE_REF405EP_FPGA,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(Ref405epFpgaState),
- .class_init = ref405ep_fpga_class_init,
-};
-
-static void ref405ep_init(MachineState *machine)
-{
- DeviceState *dev;
- SysBusDevice *s;
- MemoryRegion *sram = g_new(MemoryRegion, 1);
-
- ppc405_init(machine);
-
- /* allocate SRAM */
- memory_region_init_ram(sram, NULL, "ref405ep.sram", PPC405EP_SRAM_SIZE,
- &error_fatal);
- memory_region_add_subregion(get_system_memory(), PPC405EP_SRAM_BASE, sram);
-
- /* Register FPGA */
- dev = qdev_new(TYPE_REF405EP_FPGA);
- object_property_add_child(OBJECT(machine), "fpga", OBJECT(dev));
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, PPC405EP_FPGA_BASE);
-
- /* Register NVRAM */
- dev = qdev_new("sysbus-m48t08");
- qdev_prop_set_int32(dev, "base-year", 1968);
- s = SYS_BUS_DEVICE(dev);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, PPC405EP_NVRAM_BASE);
-}
-
-static void ref405ep_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
-
- mc->desc = "ref405ep";
- mc->init = ref405ep_init;
-}
-
-static const TypeInfo ref405ep_type = {
- .name = MACHINE_TYPE_NAME("ref405ep"),
- .parent = TYPE_PPC405_MACHINE,
- .class_init = ref405ep_class_init,
-};
-
-static void ppc405_machine_init(void)
-{
- type_register_static(&ppc405_machine_type);
- type_register_static(&ref405ep_type);
- type_register_static(&ref405ep_fpga_type);
-}
-
-type_init(ppc405_machine_init)
diff --git a/hw/ppc/ppc405_uc.c b/hw/ppc/ppc405_uc.c
deleted file mode 100644
index 0cc6817..0000000
--- a/hw/ppc/ppc405_uc.c
+++ /dev/null
@@ -1,1217 +0,0 @@
-/*
- * QEMU PowerPC 405 embedded processors emulation
- *
- * Copyright (c) 2007 Jocelyn Mayer
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/units.h"
-#include "qapi/error.h"
-#include "qemu/log.h"
-#include "cpu.h"
-#include "hw/ppc/ppc.h"
-#include "hw/i2c/ppc4xx_i2c.h"
-#include "hw/irq.h"
-#include "hw/qdev-properties.h"
-#include "ppc405.h"
-#include "hw/char/serial.h"
-#include "qemu/timer.h"
-#include "sysemu/reset.h"
-#include "sysemu/sysemu.h"
-#include "exec/address-spaces.h"
-#include "hw/intc/ppc-uic.h"
-#include "trace.h"
-
-/*****************************************************************************/
-/* Shared peripherals */
-
-/*****************************************************************************/
-/* PLB to OPB bridge */
-enum {
- POB0_BESR0 = 0x0A0,
- POB0_BESR1 = 0x0A2,
- POB0_BEAR = 0x0A4,
-};
-
-static uint32_t dcr_read_pob(void *opaque, int dcrn)
-{
- Ppc405PobState *pob = opaque;
- uint32_t ret;
-
- switch (dcrn) {
- case POB0_BEAR:
- ret = pob->bear;
- break;
- case POB0_BESR0:
- ret = pob->besr0;
- break;
- case POB0_BESR1:
- ret = pob->besr1;
- break;
- default:
- /* Avoid gcc warning */
- ret = 0;
- break;
- }
-
- return ret;
-}
-
-static void dcr_write_pob(void *opaque, int dcrn, uint32_t val)
-{
- Ppc405PobState *pob = opaque;
-
- switch (dcrn) {
- case POB0_BEAR:
- /* Read only */
- break;
- case POB0_BESR0:
- /* Write-clear */
- pob->besr0 &= ~val;
- break;
- case POB0_BESR1:
- /* Write-clear */
- pob->besr1 &= ~val;
- break;
- }
-}
-
-static void ppc405_pob_reset(DeviceState *dev)
-{
- Ppc405PobState *pob = PPC405_POB(dev);
-
- /* No error */
- pob->bear = 0x00000000;
- pob->besr0 = 0x0000000;
- pob->besr1 = 0x0000000;
-}
-
-static void ppc405_pob_realize(DeviceState *dev, Error **errp)
-{
- Ppc405PobState *pob = PPC405_POB(dev);
- Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev);
-
- ppc4xx_dcr_register(dcr, POB0_BEAR, pob, &dcr_read_pob, &dcr_write_pob);
- ppc4xx_dcr_register(dcr, POB0_BESR0, pob, &dcr_read_pob, &dcr_write_pob);
- ppc4xx_dcr_register(dcr, POB0_BESR1, pob, &dcr_read_pob, &dcr_write_pob);
-}
-
-static void ppc405_pob_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_pob_realize;
- dc->reset = ppc405_pob_reset;
- /* Reason: only works as function of a ppc4xx SoC */
- dc->user_creatable = false;
-}
-
-/*****************************************************************************/
-/* OPB arbitrer */
-static uint64_t opba_readb(void *opaque, hwaddr addr, unsigned size)
-{
- Ppc405OpbaState *opba = opaque;
- uint32_t ret;
-
- switch (addr) {
- case 0x00:
- ret = opba->cr;
- break;
- case 0x01:
- ret = opba->pr;
- break;
- default:
- ret = 0x00;
- break;
- }
-
- trace_opba_readb(addr, ret);
- return ret;
-}
-
-static void opba_writeb(void *opaque, hwaddr addr, uint64_t value,
- unsigned size)
-{
- Ppc405OpbaState *opba = opaque;
-
- trace_opba_writeb(addr, value);
-
- switch (addr) {
- case 0x00:
- opba->cr = value & 0xF8;
- break;
- case 0x01:
- opba->pr = value & 0xFF;
- break;
- default:
- break;
- }
-}
-static const MemoryRegionOps opba_ops = {
- .read = opba_readb,
- .write = opba_writeb,
- .impl.min_access_size = 1,
- .impl.max_access_size = 1,
- .valid.min_access_size = 1,
- .valid.max_access_size = 4,
- .endianness = DEVICE_BIG_ENDIAN,
-};
-
-static void ppc405_opba_reset(DeviceState *dev)
-{
- Ppc405OpbaState *opba = PPC405_OPBA(dev);
-
- opba->cr = 0x00; /* No dynamic priorities - park disabled */
- opba->pr = 0x11;
-}
-
-static void ppc405_opba_realize(DeviceState *dev, Error **errp)
-{
- Ppc405OpbaState *s = PPC405_OPBA(dev);
-
- memory_region_init_io(&s->io, OBJECT(s), &opba_ops, s, "opba", 2);
- sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->io);
-}
-
-static void ppc405_opba_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_opba_realize;
- dc->reset = ppc405_opba_reset;
- /* Reason: only works as function of a ppc4xx SoC */
- dc->user_creatable = false;
-}
-
-/*****************************************************************************/
-/* Code decompression controller */
-/* XXX: TODO */
-
-/*****************************************************************************/
-/* DMA controller */
-enum {
- DMA0_CR0 = 0x100,
- DMA0_CT0 = 0x101,
- DMA0_DA0 = 0x102,
- DMA0_SA0 = 0x103,
- DMA0_SG0 = 0x104,
- DMA0_CR1 = 0x108,
- DMA0_CT1 = 0x109,
- DMA0_DA1 = 0x10A,
- DMA0_SA1 = 0x10B,
- DMA0_SG1 = 0x10C,
- DMA0_CR2 = 0x110,
- DMA0_CT2 = 0x111,
- DMA0_DA2 = 0x112,
- DMA0_SA2 = 0x113,
- DMA0_SG2 = 0x114,
- DMA0_CR3 = 0x118,
- DMA0_CT3 = 0x119,
- DMA0_DA3 = 0x11A,
- DMA0_SA3 = 0x11B,
- DMA0_SG3 = 0x11C,
- DMA0_SR = 0x120,
- DMA0_SGC = 0x123,
- DMA0_SLP = 0x125,
- DMA0_POL = 0x126,
-};
-
-static uint32_t dcr_read_dma(void *opaque, int dcrn)
-{
- return 0;
-}
-
-static void dcr_write_dma(void *opaque, int dcrn, uint32_t val)
-{
-}
-
-static void ppc405_dma_reset(DeviceState *dev)
-{
- Ppc405DmaState *dma = PPC405_DMA(dev);
- int i;
-
- for (i = 0; i < 4; i++) {
- dma->cr[i] = 0x00000000;
- dma->ct[i] = 0x00000000;
- dma->da[i] = 0x00000000;
- dma->sa[i] = 0x00000000;
- dma->sg[i] = 0x00000000;
- }
- dma->sr = 0x00000000;
- dma->sgc = 0x00000000;
- dma->slp = 0x7C000000;
- dma->pol = 0x00000000;
-}
-
-static void ppc405_dma_realize(DeviceState *dev, Error **errp)
-{
- Ppc405DmaState *dma = PPC405_DMA(dev);
- Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dma->irqs); i++) {
- sysbus_init_irq(SYS_BUS_DEVICE(dma), &dma->irqs[i]);
- }
-
- ppc4xx_dcr_register(dcr, DMA0_CR0, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_CT0, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_DA0, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SA0, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SG0, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_CR1, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_CT1, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_DA1, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SA1, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SG1, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_CR2, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_CT2, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_DA2, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SA2, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SG2, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_CR3, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_CT3, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_DA3, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SA3, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SG3, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SR, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SGC, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_SLP, dma, &dcr_read_dma, &dcr_write_dma);
- ppc4xx_dcr_register(dcr, DMA0_POL, dma, &dcr_read_dma, &dcr_write_dma);
-}
-
-static void ppc405_dma_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_dma_realize;
- dc->reset = ppc405_dma_reset;
- /* Reason: only works as function of a ppc4xx SoC */
- dc->user_creatable = false;
-}
-
-/*****************************************************************************/
-/* GPIO */
-static uint64_t ppc405_gpio_read(void *opaque, hwaddr addr, unsigned size)
-{
- trace_ppc405_gpio_read(addr, size);
- return 0;
-}
-
-static void ppc405_gpio_write(void *opaque, hwaddr addr, uint64_t value,
- unsigned size)
-{
- trace_ppc405_gpio_write(addr, size, value);
-}
-
-static const MemoryRegionOps ppc405_gpio_ops = {
- .read = ppc405_gpio_read,
- .write = ppc405_gpio_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void ppc405_gpio_realize(DeviceState *dev, Error **errp)
-{
- Ppc405GpioState *s = PPC405_GPIO(dev);
-
- memory_region_init_io(&s->io, OBJECT(s), &ppc405_gpio_ops, s, "gpio",
- 0x38);
- sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->io);
-}
-
-static void ppc405_gpio_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_gpio_realize;
- /* Reason: only works as function of a ppc4xx SoC */
- dc->user_creatable = false;
-}
-
-/*****************************************************************************/
-/* On Chip Memory */
-enum {
- OCM0_ISARC = 0x018,
- OCM0_ISACNTL = 0x019,
- OCM0_DSARC = 0x01A,
- OCM0_DSACNTL = 0x01B,
-};
-
-static void ocm_update_mappings(Ppc405OcmState *ocm,
- uint32_t isarc, uint32_t isacntl,
- uint32_t dsarc, uint32_t dsacntl)
-{
- trace_ocm_update_mappings(isarc, isacntl, dsarc, dsacntl, ocm->isarc,
- ocm->isacntl, ocm->dsarc, ocm->dsacntl);
-
- if (ocm->isarc != isarc ||
- (ocm->isacntl & 0x80000000) != (isacntl & 0x80000000)) {
- if (ocm->isacntl & 0x80000000) {
- /* Unmap previously assigned memory region */
- trace_ocm_unmap("ISA", ocm->isarc);
- memory_region_del_subregion(get_system_memory(), &ocm->isarc_ram);
- }
- if (isacntl & 0x80000000) {
- /* Map new instruction memory region */
- trace_ocm_map("ISA", isarc);
- memory_region_add_subregion(get_system_memory(), isarc,
- &ocm->isarc_ram);
- }
- }
- if (ocm->dsarc != dsarc ||
- (ocm->dsacntl & 0x80000000) != (dsacntl & 0x80000000)) {
- if (ocm->dsacntl & 0x80000000) {
- /* Beware not to unmap the region we just mapped */
- if (!(isacntl & 0x80000000) || ocm->dsarc != isarc) {
- /* Unmap previously assigned memory region */
- trace_ocm_unmap("DSA", ocm->dsarc);
- memory_region_del_subregion(get_system_memory(),
- &ocm->dsarc_ram);
- }
- }
- if (dsacntl & 0x80000000) {
- /* Beware not to remap the region we just mapped */
- if (!(isacntl & 0x80000000) || dsarc != isarc) {
- /* Map new data memory region */
- trace_ocm_map("DSA", dsarc);
- memory_region_add_subregion(get_system_memory(), dsarc,
- &ocm->dsarc_ram);
- }
- }
- }
-}
-
-static uint32_t dcr_read_ocm(void *opaque, int dcrn)
-{
- Ppc405OcmState *ocm = opaque;
- uint32_t ret;
-
- switch (dcrn) {
- case OCM0_ISARC:
- ret = ocm->isarc;
- break;
- case OCM0_ISACNTL:
- ret = ocm->isacntl;
- break;
- case OCM0_DSARC:
- ret = ocm->dsarc;
- break;
- case OCM0_DSACNTL:
- ret = ocm->dsacntl;
- break;
- default:
- ret = 0;
- break;
- }
-
- return ret;
-}
-
-static void dcr_write_ocm(void *opaque, int dcrn, uint32_t val)
-{
- Ppc405OcmState *ocm = opaque;
- uint32_t isarc, dsarc, isacntl, dsacntl;
-
- isarc = ocm->isarc;
- dsarc = ocm->dsarc;
- isacntl = ocm->isacntl;
- dsacntl = ocm->dsacntl;
- switch (dcrn) {
- case OCM0_ISARC:
- isarc = val & 0xFC000000;
- break;
- case OCM0_ISACNTL:
- isacntl = val & 0xC0000000;
- break;
- case OCM0_DSARC:
- isarc = val & 0xFC000000;
- break;
- case OCM0_DSACNTL:
- isacntl = val & 0xC0000000;
- break;
- }
- ocm_update_mappings(ocm, isarc, isacntl, dsarc, dsacntl);
- ocm->isarc = isarc;
- ocm->dsarc = dsarc;
- ocm->isacntl = isacntl;
- ocm->dsacntl = dsacntl;
-}
-
-static void ppc405_ocm_reset(DeviceState *dev)
-{
- Ppc405OcmState *ocm = PPC405_OCM(dev);
- uint32_t isarc, dsarc, isacntl, dsacntl;
-
- isarc = 0x00000000;
- isacntl = 0x00000000;
- dsarc = 0x00000000;
- dsacntl = 0x00000000;
- ocm_update_mappings(ocm, isarc, isacntl, dsarc, dsacntl);
- ocm->isarc = isarc;
- ocm->dsarc = dsarc;
- ocm->isacntl = isacntl;
- ocm->dsacntl = dsacntl;
-}
-
-static void ppc405_ocm_realize(DeviceState *dev, Error **errp)
-{
- Ppc405OcmState *ocm = PPC405_OCM(dev);
- Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev);
-
- /* XXX: Size is 4096 or 0x04000000 */
- memory_region_init_ram(&ocm->isarc_ram, OBJECT(ocm), "ppc405.ocm", 4 * KiB,
- &error_fatal);
- memory_region_init_alias(&ocm->dsarc_ram, OBJECT(ocm), "ppc405.dsarc",
- &ocm->isarc_ram, 0, 4 * KiB);
-
- ppc4xx_dcr_register(dcr, OCM0_ISARC, ocm, &dcr_read_ocm, &dcr_write_ocm);
- ppc4xx_dcr_register(dcr, OCM0_ISACNTL, ocm, &dcr_read_ocm, &dcr_write_ocm);
- ppc4xx_dcr_register(dcr, OCM0_DSARC, ocm, &dcr_read_ocm, &dcr_write_ocm);
- ppc4xx_dcr_register(dcr, OCM0_DSACNTL, ocm, &dcr_read_ocm, &dcr_write_ocm);
-}
-
-static void ppc405_ocm_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_ocm_realize;
- dc->reset = ppc405_ocm_reset;
- /* Reason: only works as function of a ppc4xx SoC */
- dc->user_creatable = false;
-}
-
-/*****************************************************************************/
-/* General purpose timers */
-static int ppc4xx_gpt_compare(Ppc405GptState *gpt, int n)
-{
- /* XXX: TODO */
- return 0;
-}
-
-static void ppc4xx_gpt_set_output(Ppc405GptState *gpt, int n, int level)
-{
- /* XXX: TODO */
-}
-
-static void ppc4xx_gpt_set_outputs(Ppc405GptState *gpt)
-{
- uint32_t mask;
- int i;
-
- mask = 0x80000000;
- for (i = 0; i < 5; i++) {
- if (gpt->oe & mask) {
- /* Output is enabled */
- if (ppc4xx_gpt_compare(gpt, i)) {
- /* Comparison is OK */
- ppc4xx_gpt_set_output(gpt, i, gpt->ol & mask);
- } else {
- /* Comparison is KO */
- ppc4xx_gpt_set_output(gpt, i, gpt->ol & mask ? 0 : 1);
- }
- }
- mask = mask >> 1;
- }
-}
-
-static void ppc4xx_gpt_set_irqs(Ppc405GptState *gpt)
-{
- uint32_t mask;
- int i;
-
- mask = 0x00008000;
- for (i = 0; i < 5; i++) {
- if (gpt->is & gpt->im & mask) {
- qemu_irq_raise(gpt->irqs[i]);
- } else {
- qemu_irq_lower(gpt->irqs[i]);
- }
- mask = mask >> 1;
- }
-}
-
-static void ppc4xx_gpt_compute_timer(Ppc405GptState *gpt)
-{
- /* XXX: TODO */
-}
-
-static uint64_t ppc4xx_gpt_read(void *opaque, hwaddr addr, unsigned size)
-{
- Ppc405GptState *gpt = opaque;
- uint32_t ret;
- int idx;
-
- trace_ppc4xx_gpt_read(addr, size);
-
- switch (addr) {
- case 0x00:
- /* Time base counter */
- ret = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + gpt->tb_offset,
- gpt->tb_freq, NANOSECONDS_PER_SECOND);
- break;
- case 0x10:
- /* Output enable */
- ret = gpt->oe;
- break;
- case 0x14:
- /* Output level */
- ret = gpt->ol;
- break;
- case 0x18:
- /* Interrupt mask */
- ret = gpt->im;
- break;
- case 0x1C:
- case 0x20:
- /* Interrupt status */
- ret = gpt->is;
- break;
- case 0x24:
- /* Interrupt enable */
- ret = gpt->ie;
- break;
- case 0x80 ... 0x90:
- /* Compare timer */
- idx = (addr - 0x80) >> 2;
- ret = gpt->comp[idx];
- break;
- case 0xC0 ... 0xD0:
- /* Compare mask */
- idx = (addr - 0xC0) >> 2;
- ret = gpt->mask[idx];
- break;
- default:
- ret = -1;
- break;
- }
-
- return ret;
-}
-
-static void ppc4xx_gpt_write(void *opaque, hwaddr addr, uint64_t value,
- unsigned size)
-{
- Ppc405GptState *gpt = opaque;
- int idx;
-
- trace_ppc4xx_gpt_write(addr, size, value);
-
- switch (addr) {
- case 0x00:
- /* Time base counter */
- gpt->tb_offset = muldiv64(value, NANOSECONDS_PER_SECOND, gpt->tb_freq)
- - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- ppc4xx_gpt_compute_timer(gpt);
- break;
- case 0x10:
- /* Output enable */
- gpt->oe = value & 0xF8000000;
- ppc4xx_gpt_set_outputs(gpt);
- break;
- case 0x14:
- /* Output level */
- gpt->ol = value & 0xF8000000;
- ppc4xx_gpt_set_outputs(gpt);
- break;
- case 0x18:
- /* Interrupt mask */
- gpt->im = value & 0x0000F800;
- break;
- case 0x1C:
- /* Interrupt status set */
- gpt->is |= value & 0x0000F800;
- ppc4xx_gpt_set_irqs(gpt);
- break;
- case 0x20:
- /* Interrupt status clear */
- gpt->is &= ~(value & 0x0000F800);
- ppc4xx_gpt_set_irqs(gpt);
- break;
- case 0x24:
- /* Interrupt enable */
- gpt->ie = value & 0x0000F800;
- ppc4xx_gpt_set_irqs(gpt);
- break;
- case 0x80 ... 0x90:
- /* Compare timer */
- idx = (addr - 0x80) >> 2;
- gpt->comp[idx] = value & 0xF8000000;
- ppc4xx_gpt_compute_timer(gpt);
- break;
- case 0xC0 ... 0xD0:
- /* Compare mask */
- idx = (addr - 0xC0) >> 2;
- gpt->mask[idx] = value & 0xF8000000;
- ppc4xx_gpt_compute_timer(gpt);
- break;
- }
-}
-
-static const MemoryRegionOps gpt_ops = {
- .read = ppc4xx_gpt_read,
- .write = ppc4xx_gpt_write,
- .valid.min_access_size = 4,
- .valid.max_access_size = 4,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void ppc4xx_gpt_cb(void *opaque)
-{
- Ppc405GptState *gpt = opaque;
-
- ppc4xx_gpt_set_irqs(gpt);
- ppc4xx_gpt_set_outputs(gpt);
- ppc4xx_gpt_compute_timer(gpt);
-}
-
-static void ppc405_gpt_reset(DeviceState *dev)
-{
- Ppc405GptState *gpt = PPC405_GPT(dev);
- int i;
-
- timer_del(gpt->timer);
- gpt->oe = 0x00000000;
- gpt->ol = 0x00000000;
- gpt->im = 0x00000000;
- gpt->is = 0x00000000;
- gpt->ie = 0x00000000;
- for (i = 0; i < 5; i++) {
- gpt->comp[i] = 0x00000000;
- gpt->mask[i] = 0x00000000;
- }
-}
-
-static void ppc405_gpt_realize(DeviceState *dev, Error **errp)
-{
- Ppc405GptState *s = PPC405_GPT(dev);
- SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
- int i;
-
- s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &ppc4xx_gpt_cb, s);
- memory_region_init_io(&s->iomem, OBJECT(s), &gpt_ops, s, "gpt", 0xd4);
- sysbus_init_mmio(sbd, &s->iomem);
-
- for (i = 0; i < ARRAY_SIZE(s->irqs); i++) {
- sysbus_init_irq(sbd, &s->irqs[i]);
- }
-}
-
-static void ppc405_gpt_finalize(Object *obj)
-{
- /* timer will be NULL if the GPT wasn't realized */
- if (PPC405_GPT(obj)->timer) {
- timer_del(PPC405_GPT(obj)->timer);
- }
-}
-
-static void ppc405_gpt_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_gpt_realize;
- dc->reset = ppc405_gpt_reset;
- /* Reason: only works as function of a ppc4xx SoC */
- dc->user_creatable = false;
-}
-
-/*****************************************************************************/
-/* PowerPC 405EP */
-/* CPU control */
-enum {
- PPC405EP_CPC0_PLLMR0 = 0x0F0,
- PPC405EP_CPC0_BOOT = 0x0F1,
- PPC405EP_CPC0_EPCTL = 0x0F3,
- PPC405EP_CPC0_PLLMR1 = 0x0F4,
- PPC405EP_CPC0_UCR = 0x0F5,
- PPC405EP_CPC0_SRR = 0x0F6,
- PPC405EP_CPC0_JTAGID = 0x0F7,
- PPC405EP_CPC0_PCI = 0x0F9,
-#if 0
- PPC405EP_CPC0_ER = xxx,
- PPC405EP_CPC0_FR = xxx,
- PPC405EP_CPC0_SR = xxx,
-#endif
-};
-
-static void ppc405ep_compute_clocks(Ppc405CpcState *cpc)
-{
- uint32_t CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk;
- uint32_t UART0_clk, UART1_clk;
- uint64_t VCO_out, PLL_out;
- int M, D;
-
- VCO_out = 0;
- if ((cpc->pllmr[1] & 0x80000000) && !(cpc->pllmr[1] & 0x40000000)) {
- M = (((cpc->pllmr[1] >> 20) - 1) & 0xF) + 1; /* FBMUL */
- trace_ppc405ep_clocks_compute("FBMUL", (cpc->pllmr[1] >> 20) & 0xF, M);
- D = 8 - ((cpc->pllmr[1] >> 16) & 0x7); /* FWDA */
- trace_ppc405ep_clocks_compute("FWDA", (cpc->pllmr[1] >> 16) & 0x7, D);
- VCO_out = (uint64_t)cpc->sysclk * M * D;
- if (VCO_out < 500000000UL || VCO_out > 1000000000UL) {
- /* Error - unlock the PLL */
- qemu_log_mask(LOG_GUEST_ERROR, "VCO out of range %" PRIu64 "\n",
- VCO_out);
-#if 0
- cpc->pllmr[1] &= ~0x80000000;
- goto pll_bypass;
-#endif
- }
- PLL_out = VCO_out / D;
- /* Pretend the PLL is locked */
- cpc->boot |= 0x00000001;
- } else {
-#if 0
- pll_bypass:
-#endif
- PLL_out = cpc->sysclk;
- if (cpc->pllmr[1] & 0x40000000) {
- /* Pretend the PLL is not locked */
- cpc->boot &= ~0x00000001;
- }
- }
- /* Now, compute all other clocks */
- D = ((cpc->pllmr[0] >> 20) & 0x3) + 1; /* CCDV */
- trace_ppc405ep_clocks_compute("CCDV", (cpc->pllmr[0] >> 20) & 0x3, D);
- CPU_clk = PLL_out / D;
- D = ((cpc->pllmr[0] >> 16) & 0x3) + 1; /* CBDV */
- trace_ppc405ep_clocks_compute("CBDV", (cpc->pllmr[0] >> 16) & 0x3, D);
- PLB_clk = CPU_clk / D;
- D = ((cpc->pllmr[0] >> 12) & 0x3) + 1; /* OPDV */
- trace_ppc405ep_clocks_compute("OPDV", (cpc->pllmr[0] >> 12) & 0x3, D);
- OPB_clk = PLB_clk / D;
- D = ((cpc->pllmr[0] >> 8) & 0x3) + 2; /* EPDV */
- trace_ppc405ep_clocks_compute("EPDV", (cpc->pllmr[0] >> 8) & 0x3, D);
- EBC_clk = PLB_clk / D;
- D = ((cpc->pllmr[0] >> 4) & 0x3) + 1; /* MPDV */
- trace_ppc405ep_clocks_compute("MPDV", (cpc->pllmr[0] >> 4) & 0x3, D);
- MAL_clk = PLB_clk / D;
- D = (cpc->pllmr[0] & 0x3) + 1; /* PPDV */
- trace_ppc405ep_clocks_compute("PPDV", cpc->pllmr[0] & 0x3, D);
- PCI_clk = PLB_clk / D;
- D = ((cpc->ucr - 1) & 0x7F) + 1; /* U0DIV */
- trace_ppc405ep_clocks_compute("U0DIV", cpc->ucr & 0x7F, D);
- UART0_clk = PLL_out / D;
- D = (((cpc->ucr >> 8) - 1) & 0x7F) + 1; /* U1DIV */
- trace_ppc405ep_clocks_compute("U1DIV", (cpc->ucr >> 8) & 0x7F, D);
- UART1_clk = PLL_out / D;
-
- if (trace_event_get_state_backends(TRACE_PPC405EP_CLOCKS_SETUP)) {
- g_autofree char *trace = g_strdup_printf(
- "Setup PPC405EP clocks - sysclk %" PRIu32 " VCO %" PRIu64
- " PLL out %" PRIu64 " Hz\n"
- "CPU %" PRIu32 " PLB %" PRIu32 " OPB %" PRIu32 " EBC %" PRIu32
- " MAL %" PRIu32 " PCI %" PRIu32 " UART0 %" PRIu32
- " UART1 %" PRIu32 "\n",
- cpc->sysclk, VCO_out, PLL_out,
- CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk,
- UART0_clk, UART1_clk);
- trace_ppc405ep_clocks_setup(trace);
- }
-
- /* Setup CPU clocks */
- clk_setup(&cpc->clk_setup[PPC405EP_CPU_CLK], CPU_clk);
- /* Setup PLB clock */
- clk_setup(&cpc->clk_setup[PPC405EP_PLB_CLK], PLB_clk);
- /* Setup OPB clock */
- clk_setup(&cpc->clk_setup[PPC405EP_OPB_CLK], OPB_clk);
- /* Setup external clock */
- clk_setup(&cpc->clk_setup[PPC405EP_EBC_CLK], EBC_clk);
- /* Setup MAL clock */
- clk_setup(&cpc->clk_setup[PPC405EP_MAL_CLK], MAL_clk);
- /* Setup PCI clock */
- clk_setup(&cpc->clk_setup[PPC405EP_PCI_CLK], PCI_clk);
- /* Setup UART0 clock */
- clk_setup(&cpc->clk_setup[PPC405EP_UART0_CLK], UART0_clk);
- /* Setup UART1 clock */
- clk_setup(&cpc->clk_setup[PPC405EP_UART1_CLK], UART1_clk);
-}
-
-static uint32_t dcr_read_epcpc(void *opaque, int dcrn)
-{
- Ppc405CpcState *cpc = opaque;
- uint32_t ret;
-
- switch (dcrn) {
- case PPC405EP_CPC0_BOOT:
- ret = cpc->boot;
- break;
- case PPC405EP_CPC0_EPCTL:
- ret = cpc->epctl;
- break;
- case PPC405EP_CPC0_PLLMR0:
- ret = cpc->pllmr[0];
- break;
- case PPC405EP_CPC0_PLLMR1:
- ret = cpc->pllmr[1];
- break;
- case PPC405EP_CPC0_UCR:
- ret = cpc->ucr;
- break;
- case PPC405EP_CPC0_SRR:
- ret = cpc->srr;
- break;
- case PPC405EP_CPC0_JTAGID:
- ret = cpc->jtagid;
- break;
- case PPC405EP_CPC0_PCI:
- ret = cpc->pci;
- break;
- default:
- /* Avoid gcc warning */
- ret = 0;
- break;
- }
-
- return ret;
-}
-
-static void dcr_write_epcpc(void *opaque, int dcrn, uint32_t val)
-{
- Ppc405CpcState *cpc = opaque;
-
- switch (dcrn) {
- case PPC405EP_CPC0_BOOT:
- /* Read-only register */
- break;
- case PPC405EP_CPC0_EPCTL:
- /* Don't care for now */
- cpc->epctl = val & 0xC00000F3;
- break;
- case PPC405EP_CPC0_PLLMR0:
- cpc->pllmr[0] = val & 0x00633333;
- ppc405ep_compute_clocks(cpc);
- break;
- case PPC405EP_CPC0_PLLMR1:
- cpc->pllmr[1] = val & 0xC0F73FFF;
- ppc405ep_compute_clocks(cpc);
- break;
- case PPC405EP_CPC0_UCR:
- /* UART control - don't care for now */
- cpc->ucr = val & 0x003F7F7F;
- break;
- case PPC405EP_CPC0_SRR:
- cpc->srr = val;
- break;
- case PPC405EP_CPC0_JTAGID:
- /* Read-only */
- break;
- case PPC405EP_CPC0_PCI:
- cpc->pci = val;
- break;
- }
-}
-
-static void ppc405_cpc_reset(DeviceState *dev)
-{
- Ppc405CpcState *cpc = PPC405_CPC(dev);
-
- cpc->boot = 0x00000010; /* Boot from PCI - IIC EEPROM disabled */
- cpc->epctl = 0x00000000;
- cpc->pllmr[0] = 0x00021002;
- cpc->pllmr[1] = 0x80a552be;
- cpc->ucr = 0x00004646;
- cpc->srr = 0x00040000;
- cpc->pci = 0x00000000;
- cpc->er = 0x00000000;
- cpc->fr = 0x00000000;
- cpc->sr = 0x00000000;
- cpc->jtagid = 0x20267049;
- ppc405ep_compute_clocks(cpc);
-}
-
-/* XXX: sysclk should be between 25 and 100 MHz */
-static void ppc405_cpc_realize(DeviceState *dev, Error **errp)
-{
- Ppc405CpcState *cpc = PPC405_CPC(dev);
- Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev);
-
- assert(dcr->cpu);
- cpc->clk_setup[PPC405EP_CPU_CLK].cb =
- ppc_40x_timers_init(&dcr->cpu->env, cpc->sysclk, PPC_INTERRUPT_PIT);
- cpc->clk_setup[PPC405EP_CPU_CLK].opaque = &dcr->cpu->env;
-
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_BOOT, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_EPCTL, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_PLLMR0, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_PLLMR1, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_UCR, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_SRR, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_JTAGID, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
- ppc4xx_dcr_register(dcr, PPC405EP_CPC0_PCI, cpc,
- &dcr_read_epcpc, &dcr_write_epcpc);
-}
-
-static Property ppc405_cpc_properties[] = {
- DEFINE_PROP_UINT32("sys-clk", Ppc405CpcState, sysclk, 0),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void ppc405_cpc_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_cpc_realize;
- dc->reset = ppc405_cpc_reset;
- /* Reason: only works as function of a ppc4xx SoC */
- dc->user_creatable = false;
- device_class_set_props(dc, ppc405_cpc_properties);
-}
-
-/* PPC405_SOC */
-
-static void ppc405_soc_instance_init(Object *obj)
-{
- Ppc405SoCState *s = PPC405_SOC(obj);
-
- object_initialize_child(obj, "cpu", &s->cpu,
- POWERPC_CPU_TYPE_NAME("405ep"));
-
- object_initialize_child(obj, "uic", &s->uic, TYPE_PPC_UIC);
-
- object_initialize_child(obj, "cpc", &s->cpc, TYPE_PPC405_CPC);
- object_property_add_alias(obj, "sys-clk", OBJECT(&s->cpc), "sys-clk");
-
- object_initialize_child(obj, "gpt", &s->gpt, TYPE_PPC405_GPT);
-
- object_initialize_child(obj, "ocm", &s->ocm, TYPE_PPC405_OCM);
-
- object_initialize_child(obj, "gpio", &s->gpio, TYPE_PPC405_GPIO);
-
- object_initialize_child(obj, "dma", &s->dma, TYPE_PPC405_DMA);
-
- object_initialize_child(obj, "i2c", &s->i2c, TYPE_PPC4xx_I2C);
-
- object_initialize_child(obj, "ebc", &s->ebc, TYPE_PPC4xx_EBC);
-
- object_initialize_child(obj, "opba", &s->opba, TYPE_PPC405_OPBA);
-
- object_initialize_child(obj, "pob", &s->pob, TYPE_PPC405_POB);
-
- object_initialize_child(obj, "plb", &s->plb, TYPE_PPC4xx_PLB);
-
- object_initialize_child(obj, "mal", &s->mal, TYPE_PPC4xx_MAL);
-
- object_initialize_child(obj, "sdram", &s->sdram, TYPE_PPC4xx_SDRAM_DDR);
- object_property_add_alias(obj, "dram", OBJECT(&s->sdram), "dram");
-}
-
-static void ppc405_reset(void *opaque)
-{
- cpu_reset(CPU(opaque));
-}
-
-static void ppc405_soc_realize(DeviceState *dev, Error **errp)
-{
- Ppc405SoCState *s = PPC405_SOC(dev);
- CPUPPCState *env;
- SysBusDevice *sbd;
- int i;
-
- /* init CPUs */
- if (!qdev_realize(DEVICE(&s->cpu), NULL, errp)) {
- return;
- }
- qemu_register_reset(ppc405_reset, &s->cpu);
-
- env = &s->cpu.env;
-
- ppc_dcr_init(env, NULL, NULL);
-
- /* CPU control */
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->cpc), &s->cpu, errp)) {
- return;
- }
-
- /* PLB arbitrer */
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->plb), &s->cpu, errp)) {
- return;
- }
-
- /* PLB to OPB bridge */
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->pob), &s->cpu, errp)) {
- return;
- }
-
- /* OBP arbitrer */
- sbd = SYS_BUS_DEVICE(&s->opba);
- if (!sysbus_realize(sbd, errp)) {
- return;
- }
- sysbus_mmio_map(sbd, 0, 0xef600600);
-
- /* Universal interrupt controller */
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->uic), &s->cpu, errp)) {
- return;
- }
- sbd = SYS_BUS_DEVICE(&s->uic);
- sysbus_connect_irq(sbd, PPCUIC_OUTPUT_INT,
- qdev_get_gpio_in(DEVICE(&s->cpu), PPC40x_INPUT_INT));
- sysbus_connect_irq(sbd, PPCUIC_OUTPUT_CINT,
- qdev_get_gpio_in(DEVICE(&s->cpu), PPC40x_INPUT_CINT));
-
- /* SDRAM controller */
- /*
- * We use the 440 DDR SDRAM controller which has more regs and features
- * but it's compatible enough for now
- */
- object_property_set_int(OBJECT(&s->sdram), "nbanks", 2, &error_abort);
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->sdram), &s->cpu, errp)) {
- return;
- }
- /* XXX 405EP has no ECC interrupt */
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdram), 0,
- qdev_get_gpio_in(DEVICE(&s->uic), 17));
-
- /* External bus controller */
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->ebc), &s->cpu, errp)) {
- return;
- }
-
- /* DMA controller */
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->dma), &s->cpu, errp)) {
- return;
- }
- sbd = SYS_BUS_DEVICE(&s->dma);
- for (i = 0; i < ARRAY_SIZE(s->dma.irqs); i++) {
- sysbus_connect_irq(sbd, i, qdev_get_gpio_in(DEVICE(&s->uic), 5 + i));
- }
-
- /* I2C controller */
- sbd = SYS_BUS_DEVICE(&s->i2c);
- if (!sysbus_realize(sbd, errp)) {
- return;
- }
- sysbus_mmio_map(sbd, 0, 0xef600500);
- sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(DEVICE(&s->uic), 2));
-
- /* GPIO */
- sbd = SYS_BUS_DEVICE(&s->gpio);
- if (!sysbus_realize(sbd, errp)) {
- return;
- }
- sysbus_mmio_map(sbd, 0, 0xef600700);
-
- /* Serial ports */
- if (serial_hd(0) != NULL) {
- serial_mm_init(get_system_memory(), 0xef600300, 0,
- qdev_get_gpio_in(DEVICE(&s->uic), 0),
- PPC_SERIAL_MM_BAUDBASE, serial_hd(0),
- DEVICE_BIG_ENDIAN);
- }
- if (serial_hd(1) != NULL) {
- serial_mm_init(get_system_memory(), 0xef600400, 0,
- qdev_get_gpio_in(DEVICE(&s->uic), 1),
- PPC_SERIAL_MM_BAUDBASE, serial_hd(1),
- DEVICE_BIG_ENDIAN);
- }
-
- /* OCM */
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->ocm), &s->cpu, errp)) {
- return;
- }
-
- /* GPT */
- sbd = SYS_BUS_DEVICE(&s->gpt);
- if (!sysbus_realize(sbd, errp)) {
- return;
- }
- sysbus_mmio_map(sbd, 0, 0xef600000);
- for (i = 0; i < ARRAY_SIZE(s->gpt.irqs); i++) {
- sysbus_connect_irq(sbd, i, qdev_get_gpio_in(DEVICE(&s->uic), 19 + i));
- }
-
- /* MAL */
- object_property_set_int(OBJECT(&s->mal), "txc-num", 4, &error_abort);
- object_property_set_int(OBJECT(&s->mal), "rxc-num", 2, &error_abort);
- if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->mal), &s->cpu, errp)) {
- return;
- }
- sbd = SYS_BUS_DEVICE(&s->mal);
- for (i = 0; i < ARRAY_SIZE(s->mal.irqs); i++) {
- sysbus_connect_irq(sbd, i, qdev_get_gpio_in(DEVICE(&s->uic), 11 + i));
- }
-
- /* Ethernet */
- /* Uses UIC IRQs 9, 15, 17 */
-}
-
-static void ppc405_soc_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ppc405_soc_realize;
- /* Reason: only works as part of a ppc405 board/machine */
- dc->user_creatable = false;
-}
-
-static const TypeInfo ppc405_types[] = {
- {
- .name = TYPE_PPC405_POB,
- .parent = TYPE_PPC4xx_DCR_DEVICE,
- .instance_size = sizeof(Ppc405PobState),
- .class_init = ppc405_pob_class_init,
- }, {
- .name = TYPE_PPC405_OPBA,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(Ppc405OpbaState),
- .class_init = ppc405_opba_class_init,
- }, {
- .name = TYPE_PPC405_DMA,
- .parent = TYPE_PPC4xx_DCR_DEVICE,
- .instance_size = sizeof(Ppc405DmaState),
- .class_init = ppc405_dma_class_init,
- }, {
- .name = TYPE_PPC405_GPIO,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(Ppc405GpioState),
- .class_init = ppc405_gpio_class_init,
- }, {
- .name = TYPE_PPC405_OCM,
- .parent = TYPE_PPC4xx_DCR_DEVICE,
- .instance_size = sizeof(Ppc405OcmState),
- .class_init = ppc405_ocm_class_init,
- }, {
- .name = TYPE_PPC405_GPT,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(Ppc405GptState),
- .instance_finalize = ppc405_gpt_finalize,
- .class_init = ppc405_gpt_class_init,
- }, {
- .name = TYPE_PPC405_CPC,
- .parent = TYPE_PPC4xx_DCR_DEVICE,
- .instance_size = sizeof(Ppc405CpcState),
- .class_init = ppc405_cpc_class_init,
- }, {
- .name = TYPE_PPC405_SOC,
- .parent = TYPE_DEVICE,
- .instance_size = sizeof(Ppc405SoCState),
- .instance_init = ppc405_soc_instance_init,
- .class_init = ppc405_soc_class_init,
- }
-};
-
-DEFINE_TYPES(ppc405_types)
diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c
index 73f80cf..6fff0d8 100644
--- a/hw/ppc/ppc440_bamboo.c
+++ b/hw/ppc/ppc440_bamboo.c
@@ -19,15 +19,15 @@
#include "net/net.h"
#include "hw/pci/pci.h"
#include "hw/boards.h"
-#include "sysemu/kvm.h"
-#include "sysemu/device_tree.h"
+#include "system/kvm.h"
+#include "system/device_tree.h"
#include "hw/loader.h"
#include "elf.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/ppc/ppc.h"
#include "hw/pci-host/ppc4xx.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/reset.h"
+#include "system/system.h"
+#include "system/reset.h"
#include "hw/sysbus.h"
#include "hw/intc/ppc-uic.h"
#include "hw/qdev-properties.h"
@@ -64,7 +64,7 @@ static int bamboo_load_device_tree(MachineState *machine,
uint32_t tb_freq = 400000000;
uint32_t clock_freq = 400000000;
- filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE);
+ filename = qemu_find_file(QEMU_FILE_TYPE_DTB, BINARY_DEVICE_TREE_FILE);
if (!filename) {
return -1;
}
@@ -110,29 +110,6 @@ static int bamboo_load_device_tree(MachineState *machine,
return 0;
}
-/* Create reset TLB entries for BookE, spanning the 32bit addr space. */
-static void mmubooke_create_initial_mapping(CPUPPCState *env,
- target_ulong va,
- hwaddr pa)
-{
- ppcemb_tlb_t *tlb = &env->tlb.tlbe[0];
-
- tlb->attr = 0;
- tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
- tlb->size = 1U << 31; /* up to 0x80000000 */
- tlb->EPN = va & TARGET_PAGE_MASK;
- tlb->RPN = pa & TARGET_PAGE_MASK;
- tlb->PID = 0;
-
- tlb = &env->tlb.tlbe[1];
- tlb->attr = 0;
- tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
- tlb->size = 1U << 31; /* up to 0xffffffff */
- tlb->EPN = 0x80000000 & TARGET_PAGE_MASK;
- tlb->RPN = 0x80000000 & TARGET_PAGE_MASK;
- tlb->PID = 0;
-}
-
static void main_cpu_reset(void *opaque)
{
PowerPCCPU *cpu = opaque;
@@ -143,8 +120,9 @@ static void main_cpu_reset(void *opaque)
env->gpr[3] = FDT_ADDR;
env->nip = entry;
- /* Create a mapping for the kernel. */
- mmubooke_create_initial_mapping(env, 0, 0);
+ /* Create a mapping spanning the 32bit addr space. */
+ booke_set_tlb(&env->tlb.tlbe[0], 0, 0, 1U << 31);
+ booke_set_tlb(&env->tlb.tlbe[1], 0x80000000, 0x80000000, 1U << 31);
}
static void bamboo_init(MachineState *machine)
@@ -250,7 +228,8 @@ static void bamboo_init(MachineState *machine)
if (success < 0) {
uint64_t elf_entry;
success = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry,
- NULL, NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0);
+ NULL, NULL, NULL,
+ ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
entry = elf_entry;
}
/* XXX try again as binary */
diff --git a/hw/ppc/ppc440_uc.c b/hw/ppc/ppc440_uc.c
index 1312aa2..89e3fae 100644
--- a/hw/ppc/ppc440_uc.c
+++ b/hw/ppc/ppc440_uc.c
@@ -17,7 +17,7 @@
#include "hw/pci-host/ppc4xx.h"
#include "hw/qdev-properties.h"
#include "hw/pci/pci.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "cpu.h"
#include "ppc440.h"
@@ -1020,15 +1020,14 @@ static void ppc460ex_pcie_realize(DeviceState *dev, Error **errp)
ppc460ex_pcie_register_dcrs(s);
}
-static Property ppc460ex_pcie_props[] = {
+static const Property ppc460ex_pcie_props[] = {
DEFINE_PROP_INT32("busnum", PPC460EXPCIEState, num, -1),
DEFINE_PROP_INT32("dcrn-base", PPC460EXPCIEState, dcrn_base, -1),
DEFINE_PROP_LINK("cpu", PPC460EXPCIEState, cpu, TYPE_POWERPC_CPU,
PowerPCCPU *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ppc460ex_pcie_class_init(ObjectClass *klass, void *data)
+static void ppc460ex_pcie_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/ppc/ppc4xx_devs.c b/hw/ppc/ppc4xx_devs.c
index c1d1114..f36c519 100644
--- a/hw/ppc/ppc4xx_devs.c
+++ b/hw/ppc/ppc4xx_devs.c
@@ -231,18 +231,17 @@ static void ppc4xx_mal_finalize(Object *obj)
g_free(mal->txctpr);
}
-static Property ppc4xx_mal_properties[] = {
+static const Property ppc4xx_mal_properties[] = {
DEFINE_PROP_UINT8("txc-num", Ppc4xxMalState, txcnum, 0),
DEFINE_PROP_UINT8("rxc-num", Ppc4xxMalState, rxcnum, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ppc4xx_mal_class_init(ObjectClass *oc, void *data)
+static void ppc4xx_mal_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = ppc4xx_mal_realize;
- dc->reset = ppc4xx_mal_reset;
+ device_class_set_legacy_reset(dc, ppc4xx_mal_reset);
/* Reason: only works as function of a ppc4xx SoC */
dc->user_creatable = false;
device_class_set_props(dc, ppc4xx_mal_properties);
@@ -327,12 +326,12 @@ static void ppc405_plb_realize(DeviceState *dev, Error **errp)
ppc4xx_dcr_register(dcr, PLB4A1_ACR, plb, &dcr_read_plb, &dcr_write_plb);
}
-static void ppc405_plb_class_init(ObjectClass *oc, void *data)
+static void ppc405_plb_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = ppc405_plb_realize;
- dc->reset = ppc405_plb_reset;
+ device_class_set_legacy_reset(dc, ppc405_plb_reset);
/* Reason: only works as function of a ppc4xx SoC */
dc->user_creatable = false;
}
@@ -513,12 +512,12 @@ static void ppc405_ebc_realize(DeviceState *dev, Error **errp)
ppc4xx_dcr_register(dcr, EBC0_CFGDATA, ebc, &dcr_read_ebc, &dcr_write_ebc);
}
-static void ppc405_ebc_class_init(ObjectClass *oc, void *data)
+static void ppc405_ebc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = ppc405_ebc_realize;
- dc->reset = ppc405_ebc_reset;
+ device_class_set_legacy_reset(dc, ppc405_ebc_reset);
/* Reason: only works as function of a ppc4xx SoC */
dc->user_creatable = false;
}
@@ -539,13 +538,12 @@ bool ppc4xx_dcr_realize(Ppc4xxDcrDeviceState *dev, PowerPCCPU *cpu,
return sysbus_realize(SYS_BUS_DEVICE(dev), errp);
}
-static Property ppc4xx_dcr_properties[] = {
+static const Property ppc4xx_dcr_properties[] = {
DEFINE_PROP_LINK("cpu", Ppc4xxDcrDeviceState, cpu, TYPE_POWERPC_CPU,
PowerPCCPU *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ppc4xx_dcr_class_init(ObjectClass *oc, void *data)
+static void ppc4xx_dcr_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/ppc/ppc4xx_sdram.c b/hw/ppc/ppc4xx_sdram.c
index c0c87ff..5927698 100644
--- a/hw/ppc/ppc4xx_sdram.c
+++ b/hw/ppc/ppc4xx_sdram.c
@@ -34,7 +34,7 @@
#include "qapi/error.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
-#include "exec/address-spaces.h" /* get_system_memory() */
+#include "system/address-spaces.h" /* get_system_memory() */
#include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "hw/ppc/ppc4xx.h"
@@ -425,19 +425,18 @@ static void ppc4xx_sdram_ddr_realize(DeviceState *dev, Error **errp)
s, &sdram_ddr_dcr_read, &sdram_ddr_dcr_write);
}
-static Property ppc4xx_sdram_ddr_props[] = {
+static const Property ppc4xx_sdram_ddr_props[] = {
DEFINE_PROP_LINK("dram", Ppc4xxSdramDdrState, dram_mr, TYPE_MEMORY_REGION,
MemoryRegion *),
DEFINE_PROP_UINT32("nbanks", Ppc4xxSdramDdrState, nbanks, 4),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ppc4xx_sdram_ddr_class_init(ObjectClass *oc, void *data)
+static void ppc4xx_sdram_ddr_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = ppc4xx_sdram_ddr_realize;
- dc->reset = ppc4xx_sdram_ddr_reset;
+ device_class_set_legacy_reset(dc, ppc4xx_sdram_ddr_reset);
/* Reason: only works as function of a ppc4xx SoC */
dc->user_creatable = false;
device_class_set_props(dc, ppc4xx_sdram_ddr_props);
@@ -710,19 +709,18 @@ static void ppc4xx_sdram_ddr2_realize(DeviceState *dev, Error **errp)
s, &sdram_ddr2_dcr_read, &sdram_ddr2_dcr_write);
}
-static Property ppc4xx_sdram_ddr2_props[] = {
+static const Property ppc4xx_sdram_ddr2_props[] = {
DEFINE_PROP_LINK("dram", Ppc4xxSdramDdr2State, dram_mr, TYPE_MEMORY_REGION,
MemoryRegion *),
DEFINE_PROP_UINT32("nbanks", Ppc4xxSdramDdr2State, nbanks, 4),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ppc4xx_sdram_ddr2_class_init(ObjectClass *oc, void *data)
+static void ppc4xx_sdram_ddr2_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = ppc4xx_sdram_ddr2_realize;
- dc->reset = ppc4xx_sdram_ddr2_reset;
+ device_class_set_legacy_reset(dc, ppc4xx_sdram_ddr2_reset);
/* Reason: only works as function of a ppc4xx SoC */
dc->user_creatable = false;
device_class_set_props(dc, ppc4xx_sdram_ddr2_props);
diff --git a/hw/ppc/ppc_booke.c b/hw/ppc/ppc_booke.c
index ca22da1..3872ae2 100644
--- a/hw/ppc/ppc_booke.c
+++ b/hw/ppc/ppc_booke.c
@@ -24,13 +24,24 @@
#include "qemu/osdep.h"
#include "cpu.h"
+#include "exec/target_page.h"
#include "hw/ppc/ppc.h"
#include "qemu/timer.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
+#include "system/reset.h"
+#include "system/runstate.h"
#include "hw/loader.h"
#include "kvm_ppc.h"
+void booke_set_tlb(ppcemb_tlb_t *tlb, target_ulong va, hwaddr pa,
+ target_ulong size)
+{
+ tlb->attr = 0;
+ tlb->prot = PAGE_RWX << 4 | PAGE_VALID;
+ tlb->size = size;
+ tlb->EPN = va & TARGET_PAGE_MASK;
+ tlb->RPN = pa & TARGET_PAGE_MASK;
+ tlb->PID = 0;
+}
/* Timer Control Register */
diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c
index dfbe759..2310f62 100644
--- a/hw/ppc/ppce500_spin.c
+++ b/hw/ppc/ppce500_spin.c
@@ -32,7 +32,8 @@
#include "qemu/units.h"
#include "hw/hw.h"
#include "hw/sysbus.h"
-#include "sysemu/hw_accel.h"
+#include "system/hw_accel.h"
+#include "hw/ppc/ppc.h"
#include "e500.h"
#include "qom/object.h"
@@ -70,30 +71,12 @@ static void spin_reset(DeviceState *dev)
}
}
-static void mmubooke_create_initial_mapping(CPUPPCState *env,
- target_ulong va,
- hwaddr pa,
- hwaddr len)
-{
- ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 1);
- hwaddr size;
-
- size = (booke206_page_size_to_tlb(len) << MAS1_TSIZE_SHIFT);
- tlb->mas1 = MAS1_VALID | size;
- tlb->mas2 = (va & TARGET_PAGE_MASK) | MAS2_M;
- tlb->mas7_3 = pa & TARGET_PAGE_MASK;
- tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX;
-#ifdef CONFIG_KVM
- env->tlb_dirty = true;
-#endif
-}
-
static void spin_kick(CPUState *cs, run_on_cpu_data data)
{
CPUPPCState *env = cpu_env(cs);
SpinInfo *curspin = data.host_ptr;
- hwaddr map_size = 64 * MiB;
- hwaddr map_start;
+ hwaddr map_start, map_size = 64 * MiB;
+ ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 1);
cpu_synchronize_state(cs);
stl_p(&curspin->pir, env->spr[SPR_BOOKE_PIR]);
@@ -107,7 +90,12 @@ static void spin_kick(CPUState *cs, run_on_cpu_data data)
env->gpr[9] = 0;
map_start = ldq_p(&curspin->addr) & ~(map_size - 1);
- mmubooke_create_initial_mapping(env, 0, map_start, map_size);
+ /* create initial mapping */
+ booke206_set_tlb(tlb, 0, map_start, map_size);
+ tlb->mas2 |= MAS2_M;
+#ifdef CONFIG_KVM
+ env->tlb_dirty = true;
+#endif
cs->halted = 0;
cs->exception_index = -1;
@@ -187,11 +175,11 @@ static void ppce500_spin_initfn(Object *obj)
sysbus_init_mmio(dev, &s->iomem);
}
-static void ppce500_spin_class_init(ObjectClass *klass, void *data)
+static void ppce500_spin_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = spin_reset;
+ device_class_set_legacy_reset(dc, spin_reset);
}
static const TypeInfo ppce500_spin_info = {
diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c
index 4eb5477..982e40e 100644
--- a/hw/ppc/prep.c
+++ b/hw/ppc/prep.c
@@ -25,7 +25,6 @@
#include "qemu/osdep.h"
#include "hw/rtc/m48t59.h"
-#include "hw/char/serial.h"
#include "hw/block/fdc.h"
#include "net/net.h"
#include "hw/isa/isa.h"
@@ -36,12 +35,14 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
+#include "qemu/datadir.h"
#include "hw/loader.h"
#include "hw/rtc/mc146818rtc.h"
#include "hw/isa/pc87312.h"
#include "hw/qdev-properties.h"
-#include "sysemu/kvm.h"
-#include "sysemu/reset.h"
+#include "exec/target_page.h"
+#include "system/kvm.h"
+#include "system/reset.h"
#include "trace.h"
#include "elf.h"
#include "qemu/units.h"
@@ -55,6 +56,8 @@
#define KERNEL_LOAD_ADDR 0x01000000
#define INITRD_LOAD_ADDR 0x01800000
+#define BIOS_ADDR 0xfff00000
+#define BIOS_SIZE (1 * MiB)
#define NVRAM_SIZE 0x2000
static void fw_cfg_boot_set(void *opaque, const char *boot_device,
@@ -241,6 +244,9 @@ static void ibm_40p_init(MachineState *machine)
ISADevice *isa_dev;
ISABus *isa_bus;
void *fw_cfg;
+ MemoryRegion *bios = g_new(MemoryRegion, 1);
+ char *filename;
+ ssize_t bios_size = -1;
uint32_t kernel_base = 0, initrd_base = 0;
long kernel_size = 0, initrd_size = 0;
char boot_device;
@@ -263,10 +269,27 @@ static void ibm_40p_init(MachineState *machine)
cpu_ppc_tb_init(env, 100UL * 1000UL * 1000UL);
qemu_register_reset(ppc_prep_reset, cpu);
+ /* allocate and load firmware */
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+ if (!filename) {
+ error_report("Could not find bios image '%s'", bios_name);
+ exit(1);
+ }
+ memory_region_init_rom(bios, NULL, "bios", BIOS_SIZE, &error_fatal);
+ memory_region_add_subregion(get_system_memory(), BIOS_ADDR, bios);
+ bios_size = load_elf(filename, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
+ if (bios_size < 0) {
+ bios_size = load_image_targphys(filename, BIOS_ADDR, BIOS_SIZE);
+ }
+ if (bios_size < 0 || bios_size > BIOS_SIZE) {
+ error_report("Could not load bios image '%s'", filename);
+ return;
+ }
+ g_free(filename);
+
/* PCI host */
dev = qdev_new("raven-pcihost");
- qdev_prop_set_string(dev, "bios-name", bios_name);
- qdev_prop_set_uint32(dev, "elf-machine", PPC_ELF_MACHINE);
pcihost = SYS_BUS_DEVICE(dev);
object_property_add_child(qdev_get_machine(), "raven", OBJECT(dev));
sysbus_realize_and_unref(pcihost, &error_fatal);
diff --git a/hw/ppc/prep_systemio.c b/hw/ppc/prep_systemio.c
index 4d3a251..41cd923 100644
--- a/hw/ppc/prep_systemio.c
+++ b/hw/ppc/prep_systemio.c
@@ -28,11 +28,11 @@
#include "hw/isa/isa.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qom/object.h"
#include "qemu/error-report.h" /* for error_report() */
#include "qemu/module.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "cpu.h"
#include "trace.h"
@@ -285,13 +285,12 @@ static const VMStateDescription vmstate_prep_systemio = {
},
};
-static Property prep_systemio_properties[] = {
+static const Property prep_systemio_properties[] = {
DEFINE_PROP_UINT8("ibm-planar-id", PrepSystemIoState, ibm_planar_id, 0),
DEFINE_PROP_UINT8("equipment", PrepSystemIoState, equipment, 0),
- DEFINE_PROP_END_OF_LIST()
};
-static void prep_systemio_class_initfn(ObjectClass *klass, void *data)
+static void prep_systemio_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/ppc/rs6000_mc.c b/hw/ppc/rs6000_mc.c
index e6ec4b4..a096405 100644
--- a/hw/ppc/rs6000_mc.c
+++ b/hw/ppc/rs6000_mc.c
@@ -3,10 +3,12 @@
*
* Copyright (c) 2017 HervƩ Poussineau
*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
- * (at your option) version 3 or any later version.
+ * (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,7 +24,7 @@
#include "hw/isa/isa.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qapi/error.h"
#include "trace.h"
#include "qom/object.h"
@@ -205,13 +207,12 @@ static const VMStateDescription vmstate_rs6000mc = {
},
};
-static Property rs6000mc_properties[] = {
+static const Property rs6000mc_properties[] = {
DEFINE_PROP_UINT32("ram-size", RS6000MCState, ram_size, 0),
DEFINE_PROP_BOOL("auto-configure", RS6000MCState, autoconfigure, true),
- DEFINE_PROP_END_OF_LIST()
};
-static void rs6000mc_class_initfn(ObjectClass *klass, void *data)
+static void rs6000mc_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/ppc/sam460ex.c b/hw/ppc/sam460ex.c
index 8dc75fb..ee31bd8 100644
--- a/hw/ppc/sam460ex.c
+++ b/hw/ppc/sam460ex.c
@@ -17,21 +17,21 @@
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "hw/boards.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "kvm_ppc.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/block-backend.h"
+#include "system/device_tree.h"
+#include "system/block-backend.h"
#include "exec/page-protection.h"
#include "hw/loader.h"
#include "elf.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "ppc440.h"
#include "hw/pci-host/ppc4xx.h"
#include "hw/block/flash.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/reset.h"
+#include "system/system.h"
+#include "system/reset.h"
#include "hw/sysbus.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/i2c/ppc4xx_i2c.h"
#include "hw/i2c/smbus_eeprom.h"
#include "hw/ide/pci.h"
@@ -142,7 +142,7 @@ static int sam460ex_load_device_tree(MachineState *machine,
uint32_t clock_freq = CPU_FREQ;
int offset;
- filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE);
+ filename = qemu_find_file(QEMU_FILE_TYPE_DTB, BINARY_DEVICE_TREE_FILE);
if (!filename) {
error_report("Couldn't find dtb file `%s'", BINARY_DEVICE_TREE_FILE);
exit(1);
@@ -213,38 +213,6 @@ static int sam460ex_load_device_tree(MachineState *machine,
return fdt_size;
}
-/* Create reset TLB entries for BookE, mapping only the flash memory. */
-static void mmubooke_create_initial_mapping_uboot(CPUPPCState *env)
-{
- ppcemb_tlb_t *tlb = &env->tlb.tlbe[0];
-
- /* on reset the flash is mapped by a shadow TLB,
- * but since we don't implement them we need to use
- * the same values U-Boot will use to avoid a fault.
- */
- tlb->attr = 0;
- tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
- tlb->size = 0x10000000; /* up to 0xffffffff */
- tlb->EPN = 0xf0000000 & TARGET_PAGE_MASK;
- tlb->RPN = (0xf0000000 & TARGET_PAGE_MASK) | 0x4;
- tlb->PID = 0;
-}
-
-/* Create reset TLB entries for BookE, spanning the 32bit addr space. */
-static void mmubooke_create_initial_mapping(CPUPPCState *env,
- target_ulong va,
- hwaddr pa)
-{
- ppcemb_tlb_t *tlb = &env->tlb.tlbe[0];
-
- tlb->attr = 0;
- tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
- tlb->size = 1 << 31; /* up to 0x80000000 */
- tlb->EPN = va & TARGET_PAGE_MASK;
- tlb->RPN = pa & TARGET_PAGE_MASK;
- tlb->PID = 0;
-}
-
static void main_cpu_reset(void *opaque)
{
PowerPCCPU *cpu = opaque;
@@ -253,20 +221,27 @@ static void main_cpu_reset(void *opaque)
cpu_reset(CPU(cpu));
- /* either we have a kernel to boot or we jump to U-Boot */
+ /*
+ * On reset the flash is mapped by a shadow TLB, but since we
+ * don't implement them we need to use the same values U-Boot
+ * will use to avoid a fault.
+ * either we have a kernel to boot or we jump to U-Boot
+ */
if (bi->entry != UBOOT_ENTRY) {
env->gpr[1] = (16 * MiB) - 8;
env->gpr[3] = FDT_ADDR;
env->nip = bi->entry;
/* Create a mapping for the kernel. */
- mmubooke_create_initial_mapping(env, 0, 0);
- env->gpr[6] = tswap32(EPAPR_MAGIC);
+ booke_set_tlb(&env->tlb.tlbe[0], 0, 0, 1 << 31);
+ env->gpr[6] = EPAPR_MAGIC;
env->gpr[7] = (16 * MiB) - 8; /* bi->ima_size; */
} else {
env->nip = UBOOT_ENTRY;
- mmubooke_create_initial_mapping_uboot(env);
+ /* Create a mapping for U-Boot. */
+ booke_set_tlb(&env->tlb.tlbe[0], 0xf0000000, 0xf0000000, 0x10000000);
+ env->tlb.tlbe[0].RPN |= 4;
}
}
@@ -504,7 +479,7 @@ static void sam460ex_init(MachineState *machine)
success = load_elf(machine->kernel_filename, NULL, NULL, NULL,
&elf_entry, NULL, NULL, NULL,
- 1, PPC_ELF_MACHINE, 0, 0);
+ ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
entry = elf_entry;
}
/* XXX try again as binary */
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 98fa3aa..702f774 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -4,6 +4,9 @@
* Copyright (c) 2004-2007 Fabrice Bellard
* Copyright (c) 2007 Jocelyn Mayer
* Copyright (c) 2010 David Gibson, IBM Corporation.
+ * Copyright (c) 2010-2024, IBM Corporation..
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -32,20 +35,20 @@
#include "qapi/qapi-events-machine.h"
#include "qapi/qapi-events-qdev.h"
#include "qapi/visitor.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/hostmem.h"
-#include "sysemu/numa.h"
-#include "sysemu/tcg.h"
-#include "sysemu/qtest.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
+#include "system/system.h"
+#include "system/hostmem.h"
+#include "system/numa.h"
+#include "system/tcg.h"
+#include "system/qtest.h"
+#include "system/reset.h"
+#include "system/runstate.h"
#include "qemu/log.h"
#include "hw/fw-path-provider.h"
#include "elf.h"
#include "net/net.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/cpus.h"
-#include "sysemu/hw_accel.h"
+#include "system/device_tree.h"
+#include "system/cpus.h"
+#include "system/hw_accel.h"
#include "kvm_ppc.h"
#include "migration/misc.h"
#include "migration/qemu-file-types.h"
@@ -74,8 +77,8 @@
#include "hw/virtio/virtio-scsi.h"
#include "hw/virtio/vhost-scsi-common.h"
-#include "exec/ram_addr.h"
-#include "exec/confidential-guest-support.h"
+#include "system/ram_addr.h"
+#include "system/confidential-guest-support.h"
#include "hw/usb.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
@@ -132,61 +135,6 @@ static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr,
return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0;
}
-static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque)
-{
- /* Dummy entries correspond to unused ICPState objects in older QEMUs,
- * and newer QEMUs don't even have them. In both cases, we don't want
- * to send anything on the wire.
- */
- return false;
-}
-
-static const VMStateDescription pre_2_10_vmstate_dummy_icp = {
- /*
- * Hack ahead. We can't have two devices with the same name and
- * instance id. So I rename this to pass make check.
- * Real help from people who knows the hardware is needed.
- */
- .name = "icp/server",
- .version_id = 1,
- .minimum_version_id = 1,
- .needed = pre_2_10_vmstate_dummy_icp_needed,
- .fields = (const VMStateField[]) {
- VMSTATE_UNUSED(4), /* uint32_t xirr */
- VMSTATE_UNUSED(1), /* uint8_t pending_priority */
- VMSTATE_UNUSED(1), /* uint8_t mfrr */
- VMSTATE_END_OF_LIST()
- },
-};
-
-/*
- * See comment in hw/intc/xics.c:icp_realize()
- *
- * You have to remove vmstate_replace_hack_for_ppc() when you remove
- * the machine types that need the following function.
- */
-static void pre_2_10_vmstate_register_dummy_icp(int i)
-{
- vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp,
- (void *)(uintptr_t) i);
-}
-
-/*
- * See comment in hw/intc/xics.c:icp_realize()
- *
- * You have to remove vmstate_replace_hack_for_ppc() when you remove
- * the machine types that need the following function.
- */
-static void pre_2_10_vmstate_unregister_dummy_icp(int i)
-{
- /*
- * This used to be:
- *
- * vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp,
- * (void *)(uintptr_t) i);
- */
-}
-
int spapr_max_server_number(SpaprMachineState *spapr)
{
MachineState *ms = MACHINE(spapr);
@@ -298,7 +246,7 @@ static void spapr_dt_pa_features(SpaprMachineState *spapr,
0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
/* 54: DecFP, 56: DecI, 58: SHA */
0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
- /* 60: NM atomic, 62: RNG */
+ /* 60: NM atomic, 62: RNG, 64: DAWR1 (ISA 3.1) */
0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
/* 68: DEXCR[SBHE|IBRTPDUS|SRAPD|NPHIE|PHIE] */
0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 66 - 71 */
@@ -347,6 +295,10 @@ static void spapr_dt_pa_features(SpaprMachineState *spapr,
* in pa-features. So hide it from them. */
pa_features[40 + 2] &= ~0x80; /* Radix MMU */
}
+ if (spapr_get_cap(spapr, SPAPR_CAP_DAWR1)) {
+ g_assert(pa_size > 66);
+ pa_features[66] |= 0x80;
+ }
_FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
}
@@ -682,7 +634,6 @@ static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr,
static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt)
{
MachineState *machine = MACHINE(spapr);
- SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
hwaddr mem_start, node_size;
int i, nb_nodes = machine->numa_state->num_nodes;
NodeInfo *nodes = machine->numa_state->nodes;
@@ -724,7 +675,6 @@ static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt)
if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) {
int ret;
- g_assert(smc->dr_lmb_enabled);
ret = spapr_dt_dynamic_reconfiguration_memory(spapr, fdt);
if (ret) {
return ret;
@@ -758,7 +708,7 @@ static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset,
uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ];
int i;
- drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index);
+ drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, env->core_index);
if (drc) {
drc_index = spapr_drc_index(drc);
_FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
@@ -1307,9 +1257,7 @@ void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space)
spapr_dt_cpus(fdt, spapr);
/* ibm,drc-indexes and friends */
- if (smc->dr_lmb_enabled) {
- root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB;
- }
+ root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB;
if (smc->dr_phb_enabled) {
root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PHB;
}
@@ -1458,11 +1406,34 @@ static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
}
}
-#define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2))
-#define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
-#define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
-#define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
-#define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
+static uint64_t *hpte_get_ptr(SpaprMachineState *s, unsigned index)
+{
+ uint64_t *table = s->htab;
+
+ return &table[2 * index];
+}
+
+static bool hpte_is_valid(SpaprMachineState *s, unsigned index)
+{
+ return ldq_be_p(hpte_get_ptr(s, index)) & HPTE64_V_VALID;
+}
+
+static bool hpte_is_dirty(SpaprMachineState *s, unsigned index)
+{
+ return ldq_be_p(hpte_get_ptr(s, index)) & HPTE64_V_HPTE_DIRTY;
+}
+
+static void hpte_set_clean(SpaprMachineState *s, unsigned index)
+{
+ stq_be_p(hpte_get_ptr(s, index),
+ ldq_be_p(hpte_get_ptr(s, index)) & ~HPTE64_V_HPTE_DIRTY);
+}
+
+static void hpte_set_dirty(SpaprMachineState *s, unsigned index)
+{
+ stq_be_p(hpte_get_ptr(s, index),
+ ldq_be_p(hpte_get_ptr(s, index)) | HPTE64_V_HPTE_DIRTY);
+}
/*
* Get the fd to access the kernel htab, re-opening it if necessary
@@ -1673,7 +1644,7 @@ int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp)
spapr->htab_shift = shift;
for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
- DIRTY_HPTE(HPTE(spapr->htab, i));
+ hpte_set_dirty(spapr, i);
}
}
/* We're setting up a hash table, so that means we're not radix */
@@ -1725,7 +1696,7 @@ void spapr_check_mmu_mode(bool guest_radix)
}
}
-static void spapr_machine_reset(MachineState *machine, ShutdownCause reason)
+static void spapr_machine_reset(MachineState *machine, ResetType type)
{
SpaprMachineState *spapr = SPAPR_MACHINE(machine);
PowerPCCPU *first_ppc_cpu;
@@ -1733,7 +1704,7 @@ static void spapr_machine_reset(MachineState *machine, ShutdownCause reason)
void *fdt;
int rc;
- if (reason != SHUTDOWN_CAUSE_SNAPSHOT_LOAD) {
+ if (type != RESET_TYPE_SNAPSHOT_LOAD) {
/*
* Record-replay snapshot load must not consume random, this was
* already replayed from initial machine reset.
@@ -1762,7 +1733,7 @@ static void spapr_machine_reset(MachineState *machine, ShutdownCause reason)
spapr_setup_hpt(spapr);
}
- qemu_devices_reset(reason);
+ qemu_devices_reset(type);
spapr_ovec_cleanup(spapr->ov5_cas);
spapr->ov5_cas = spapr_ovec_new();
@@ -1819,7 +1790,6 @@ static void spapr_machine_reset(MachineState *machine, ShutdownCause reason)
0, fdt_addr, 0);
cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
}
- qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
g_free(spapr->fdt_blob);
spapr->fdt_size = fdt_totalsize(fdt);
@@ -2195,7 +2165,9 @@ static const VMStateDescription vmstate_spapr = {
&vmstate_spapr_cap_fwnmi,
&vmstate_spapr_fwnmi,
&vmstate_spapr_cap_rpt_invalidate,
+ &vmstate_spapr_cap_ail_mode_3,
&vmstate_spapr_cap_nested_papr,
+ &vmstate_spapr_cap_dawr1,
NULL
}
};
@@ -2230,7 +2202,7 @@ static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr,
qemu_put_be32(f, chunkstart);
qemu_put_be16(f, n_valid);
qemu_put_be16(f, n_invalid);
- qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
+ qemu_put_buffer(f, (void *)hpte_get_ptr(spapr, chunkstart),
HASH_PTE_SIZE_64 * n_valid);
}
@@ -2256,16 +2228,16 @@ static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr,
/* Consume invalid HPTEs */
while ((index < htabslots)
- && !HPTE_VALID(HPTE(spapr->htab, index))) {
- CLEAN_HPTE(HPTE(spapr->htab, index));
+ && !hpte_is_valid(spapr, index)) {
+ hpte_set_clean(spapr, index);
index++;
}
/* Consume valid HPTEs */
chunkstart = index;
while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
- && HPTE_VALID(HPTE(spapr->htab, index))) {
- CLEAN_HPTE(HPTE(spapr->htab, index));
+ && hpte_is_valid(spapr, index)) {
+ hpte_set_clean(spapr, index);
index++;
}
@@ -2305,7 +2277,7 @@ static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr,
/* Consume non-dirty HPTEs */
while ((index < htabslots)
- && !HPTE_DIRTY(HPTE(spapr->htab, index))) {
+ && !hpte_is_dirty(spapr, index)) {
index++;
examined++;
}
@@ -2313,9 +2285,9 @@ static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr,
chunkstart = index;
/* Consume valid dirty HPTEs */
while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
- && HPTE_DIRTY(HPTE(spapr->htab, index))
- && HPTE_VALID(HPTE(spapr->htab, index))) {
- CLEAN_HPTE(HPTE(spapr->htab, index));
+ && hpte_is_dirty(spapr, index)
+ && hpte_is_valid(spapr, index)) {
+ hpte_set_clean(spapr, index);
index++;
examined++;
}
@@ -2323,9 +2295,9 @@ static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr,
invalidstart = index;
/* Consume invalid dirty HPTEs */
while ((index < htabslots) && (index - invalidstart < USHRT_MAX)
- && HPTE_DIRTY(HPTE(spapr->htab, index))
- && !HPTE_VALID(HPTE(spapr->htab, index))) {
- CLEAN_HPTE(HPTE(spapr->htab, index));
+ && hpte_is_dirty(spapr, index)
+ && !hpte_is_valid(spapr, index)) {
+ hpte_set_clean(spapr, index);
index++;
examined++;
}
@@ -2507,11 +2479,11 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id)
if (spapr->htab) {
if (n_valid) {
- qemu_get_buffer(f, HPTE(spapr->htab, index),
+ qemu_get_buffer(f, (void *)hpte_get_ptr(spapr, index),
HASH_PTE_SIZE_64 * n_valid);
}
if (n_invalid) {
- memset(HPTE(spapr->htab, index + n_valid), 0,
+ memset(hpte_get_ptr(spapr, index + n_valid), 0,
HASH_PTE_SIZE_64 * n_invalid);
}
} else {
@@ -2714,7 +2686,6 @@ static void spapr_init_cpus(SpaprMachineState *spapr)
{
MachineState *machine = MACHINE(spapr);
MachineClass *mc = MACHINE_GET_CLASS(machine);
- SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
const char *type = spapr_get_cpu_core_type(machine->cpu_type);
const CPUArchIdList *possible_cpus;
unsigned int smp_cpus = machine->smp.cpus;
@@ -2743,15 +2714,6 @@ static void spapr_init_cpus(SpaprMachineState *spapr)
boot_cores_nr = possible_cpus->len;
}
- if (smc->pre_2_10_has_unused_icps) {
- for (i = 0; i < spapr_max_server_number(spapr); i++) {
- /* Dummy entries get deregistered when real ICPState objects
- * are registered during CPU core hotplug.
- */
- pre_2_10_vmstate_register_dummy_icp(i);
- }
- }
-
for (i = 0; i < possible_cpus->len; i++) {
int core_id = i * smp_threads;
@@ -2928,10 +2890,8 @@ static void spapr_machine_init(MachineState *machine)
spapr->ov5 = spapr_ovec_new();
spapr->ov5_cas = spapr_ovec_new();
- if (smc->dr_lmb_enabled) {
- spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
- spapr_validate_node_memory(machine, &error_fatal);
- }
+ spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
+ spapr_validate_node_memory(machine, &error_fatal);
spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY);
@@ -2958,6 +2918,9 @@ static void spapr_machine_init(MachineState *machine)
spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT);
}
+ qemu_guest_getrandom_nofail(&spapr->hashpkey_val,
+ sizeof(spapr->hashpkey_val));
+
/* init CPUs */
spapr_init_cpus(spapr);
@@ -3015,9 +2978,7 @@ static void spapr_machine_init(MachineState *machine)
machine_memory_devices_init(machine, device_mem_base, device_mem_size);
}
- if (smc->dr_lmb_enabled) {
- spapr_create_lmb_dr_connectors(spapr);
- }
+ spapr_create_lmb_dr_connectors(spapr);
if (mc->nvdimm_supported) {
spapr_create_nvdimm_dr_connectors(spapr);
@@ -3077,11 +3038,7 @@ static void spapr_machine_init(MachineState *machine)
}
if (machine->usb) {
- if (smc->use_ohci_by_default) {
- pci_create_simple(phb->bus, -1, "pci-ohci");
- } else {
- pci_create_simple(phb->bus, -1, "nec-usb-xhci");
- }
+ pci_create_simple(phb->bus, -1, "nec-usb-xhci");
if (has_vga) {
USBBus *usb_bus;
@@ -3098,13 +3055,13 @@ static void spapr_machine_init(MachineState *machine)
spapr->kernel_size = load_elf(kernel_filename, NULL,
translate_kernel_address, spapr,
- NULL, &loaded_addr, NULL, NULL, 1,
- PPC_ELF_MACHINE, 0, 0);
+ NULL, &loaded_addr, NULL, NULL,
+ ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) {
spapr->kernel_size = load_elf(kernel_filename, NULL,
translate_kernel_address, spapr,
- NULL, &loaded_addr, NULL, NULL, 0,
- PPC_ELF_MACHINE, 0, 0);
+ NULL, &loaded_addr, NULL, NULL,
+ ELFDATA2LSB, PPC_ELF_MACHINE, 0, 0);
spapr->kernel_le = spapr->kernel_size > 0;
}
if (spapr->kernel_size < 0) {
@@ -3661,7 +3618,6 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp)
{
- const SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(hotplug_dev);
SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
PCDIMMDevice *dimm = PC_DIMM(dev);
@@ -3670,11 +3626,6 @@ static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
Object *memdev;
hwaddr pagesize;
- if (!smc->dr_lmb_enabled) {
- error_setg(errp, "Memory hotplug not supported for this machine");
- return;
- }
-
size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err);
if (local_err) {
error_propagate(errp, local_err);
@@ -3931,21 +3882,9 @@ void spapr_core_release(DeviceState *dev)
static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
{
MachineState *ms = MACHINE(hotplug_dev);
- SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms);
CPUCore *cc = CPU_CORE(dev);
CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
- if (smc->pre_2_10_has_unused_icps) {
- SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
- int i;
-
- for (i = 0; i < cc->nr_threads; i++) {
- CPUState *cs = CPU(sc->threads[i]);
-
- pre_2_10_vmstate_register_dummy_icp(cs->cpu_index);
- }
- }
-
assert(core_slot);
core_slot->cpu = NULL;
qdev_unrealize(dev);
@@ -4026,7 +3965,6 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
{
SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
MachineClass *mc = MACHINE_GET_CLASS(spapr);
- SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev));
CPUCore *cc = CPU_CORE(dev);
SpaprDrc *drc;
@@ -4076,12 +4014,6 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
}
}
- if (smc->pre_2_10_has_unused_icps) {
- for (i = 0; i < cc->nr_threads; i++) {
- CPUState *cs = CPU(core->threads[i]);
- pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index);
- }
- }
}
static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
@@ -4538,7 +4470,7 @@ static void spapr_pic_print_info(InterruptStatsProvider *obj, GString *buf)
*/
static int spapr_match_nvt(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
{
SpaprMachineState *spapr = SPAPR_MACHINE(xfb);
@@ -4546,7 +4478,7 @@ static int spapr_match_nvt(XiveFabric *xfb, uint8_t format,
XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
int count;
- count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore,
+ count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, cam_ignore,
priority, logic_serv, match);
if (count < 0) {
return count;
@@ -4663,7 +4595,7 @@ static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
}
}
-static void spapr_machine_class_init(ObjectClass *oc, void *data)
+static void spapr_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(oc);
@@ -4712,7 +4644,6 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
hc->unplug_request = spapr_machine_device_unplug_request;
hc->unplug = spapr_machine_device_unplug;
- smc->dr_lmb_enabled = true;
smc->update_dt_enabled = true;
mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power10_v2.0");
mc->has_hotpluggable_cpus = true;
@@ -4757,6 +4688,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON;
smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON;
smc->default_caps.caps[SPAPR_CAP_RPT_INVALIDATE] = SPAPR_CAP_OFF;
+ smc->default_caps.caps[SPAPR_CAP_DAWR1] = SPAPR_CAP_ON;
/*
* This cap specifies whether the AIL 3 mode for
@@ -4785,7 +4717,7 @@ static const TypeInfo spapr_machine_info = {
.instance_finalize = spapr_machine_finalizefn,
.class_size = sizeof(SpaprMachineClass),
.class_init = spapr_machine_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_FW_PATH_PROVIDER },
{ TYPE_NMI },
{ TYPE_HOTPLUG_HANDLER },
@@ -4807,7 +4739,7 @@ static void spapr_machine_latest_class_options(MachineClass *mc)
#define DEFINE_SPAPR_MACHINE_IMPL(latest, ...) \
static void MACHINE_VER_SYM(class_init, spapr, __VA_ARGS__)( \
ObjectClass *oc, \
- void *data) \
+ const void *data) \
{ \
MachineClass *mc = MACHINE_CLASS(oc); \
MACHINE_VER_SYM(class_options, spapr, __VA_ARGS__)(mc); \
@@ -4825,7 +4757,7 @@ static void spapr_machine_latest_class_options(MachineClass *mc)
static void MACHINE_VER_SYM(register, spapr, __VA_ARGS__)(void) \
{ \
MACHINE_VER_DELETION(__VA_ARGS__); \
- type_register(&MACHINE_VER_SYM(info, spapr, __VA_ARGS__)); \
+ type_register_static(&MACHINE_VER_SYM(info, spapr, __VA_ARGS__)); \
} \
type_init(MACHINE_VER_SYM(register, spapr, __VA_ARGS__))
@@ -4833,18 +4765,49 @@ static void spapr_machine_latest_class_options(MachineClass *mc)
DEFINE_SPAPR_MACHINE_IMPL(true, major, minor)
#define DEFINE_SPAPR_MACHINE(major, minor) \
DEFINE_SPAPR_MACHINE_IMPL(false, major, minor)
-#define DEFINE_SPAPR_MACHINE_TAGGED(major, minor, tag) \
- DEFINE_SPAPR_MACHINE_IMPL(false, major, minor, _, tag)
+
+/*
+ * pseries-10.1
+ */
+static void spapr_machine_10_1_class_options(MachineClass *mc)
+{
+ /* Defaults for the latest behaviour inherited from the base class */
+}
+
+DEFINE_SPAPR_MACHINE_AS_LATEST(10, 1);
+
+/*
+ * pseries-10.0
+ */
+static void spapr_machine_10_0_class_options(MachineClass *mc)
+{
+ spapr_machine_10_1_class_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_10_0, hw_compat_10_0_len);
+}
+
+DEFINE_SPAPR_MACHINE(10, 0);
+
+/*
+ * pseries-9.2
+ */
+static void spapr_machine_9_2_class_options(MachineClass *mc)
+{
+ spapr_machine_10_0_class_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_9_2, hw_compat_9_2_len);
+}
+
+DEFINE_SPAPR_MACHINE(9, 2);
/*
* pseries-9.1
*/
static void spapr_machine_9_1_class_options(MachineClass *mc)
{
- /* Defaults for the latest behaviour inherited from the base class */
+ spapr_machine_9_2_class_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_9_1, hw_compat_9_1_len);
}
-DEFINE_SPAPR_MACHINE_AS_LATEST(9, 1);
+DEFINE_SPAPR_MACHINE(9, 1);
/*
* pseries-9.0
@@ -4864,6 +4827,7 @@ static void spapr_machine_8_2_class_options(MachineClass *mc)
{
spapr_machine_9_0_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len);
+ mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.2");
}
DEFINE_SPAPR_MACHINE(8, 2);
@@ -5108,278 +5072,6 @@ static void spapr_machine_3_0_class_options(MachineClass *mc)
DEFINE_SPAPR_MACHINE(3, 0);
-/*
- * pseries-2.12
- */
-static void spapr_machine_2_12_class_options(MachineClass *mc)
-{
- SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
- static GlobalProperty compat[] = {
- { TYPE_POWERPC_CPU, "pre-3.0-migration", "on" },
- { TYPE_SPAPR_CPU_CORE, "pre-3.0-migration", "on" },
- };
-
- spapr_machine_3_0_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len);
- compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
-
- /* We depend on kvm_enabled() to choose a default value for the
- * hpt-max-page-size capability. Of course we can't do it here
- * because this is too early and the HW accelerator isn't initialized
- * yet. Postpone this to machine init (see default_caps_with_cpu()).
- */
- smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0;
-}
-
-DEFINE_SPAPR_MACHINE(2, 12);
-
-static void spapr_machine_2_12_sxxm_class_options(MachineClass *mc)
-{
- SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
-
- spapr_machine_2_12_class_options(mc);
- smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND;
- smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND;
- smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_FIXED_CCD;
-}
-
-DEFINE_SPAPR_MACHINE_TAGGED(2, 12, sxxm);
-
-/*
- * pseries-2.11
- */
-
-static void spapr_machine_2_11_class_options(MachineClass *mc)
-{
- SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
-
- spapr_machine_2_12_class_options(mc);
- smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON;
- compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len);
-}
-
-DEFINE_SPAPR_MACHINE(2, 11);
-
-/*
- * pseries-2.10
- */
-
-static void spapr_machine_2_10_class_options(MachineClass *mc)
-{
- spapr_machine_2_11_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len);
-}
-
-DEFINE_SPAPR_MACHINE(2, 10);
-
-/*
- * pseries-2.9
- */
-
-static void spapr_machine_2_9_class_options(MachineClass *mc)
-{
- SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
- static GlobalProperty compat[] = {
- { TYPE_POWERPC_CPU, "pre-2.10-migration", "on" },
- };
-
- spapr_machine_2_10_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len);
- compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
- smc->pre_2_10_has_unused_icps = true;
- smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED;
-}
-
-DEFINE_SPAPR_MACHINE(2, 9);
-
-/*
- * pseries-2.8
- */
-
-static void spapr_machine_2_8_class_options(MachineClass *mc)
-{
- static GlobalProperty compat[] = {
- { TYPE_SPAPR_PCI_HOST_BRIDGE, "pcie-extended-configuration-space", "off" },
- };
-
- spapr_machine_2_9_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len);
- compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
- mc->numa_mem_align_shift = 23;
-}
-
-DEFINE_SPAPR_MACHINE(2, 8);
-
-/*
- * pseries-2.7
- */
-
-static bool phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
- uint64_t *buid, hwaddr *pio,
- hwaddr *mmio32, hwaddr *mmio64,
- unsigned n_dma, uint32_t *liobns, Error **errp)
-{
- /* Legacy PHB placement for pseries-2.7 and earlier machine types */
- const uint64_t base_buid = 0x800000020000000ULL;
- const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */
- const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */
- const hwaddr pio_offset = 0x80000000; /* 2 GiB */
- const uint32_t max_index = 255;
- const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */
-
- uint64_t ram_top = MACHINE(spapr)->ram_size;
- hwaddr phb0_base, phb_base;
- int i;
-
- /* Do we have device memory? */
- if (MACHINE(spapr)->device_memory) {
- /* Can't just use maxram_size, because there may be an
- * alignment gap between normal and device memory regions
- */
- ram_top = MACHINE(spapr)->device_memory->base +
- memory_region_size(&MACHINE(spapr)->device_memory->mr);
- }
-
- phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment);
-
- if (index > max_index) {
- error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
- max_index);
- return false;
- }
-
- *buid = base_buid + index;
- for (i = 0; i < n_dma; ++i) {
- liobns[i] = SPAPR_PCI_LIOBN(index, i);
- }
-
- phb_base = phb0_base + index * phb_spacing;
- *pio = phb_base + pio_offset;
- *mmio32 = phb_base + mmio_offset;
- /*
- * We don't set the 64-bit MMIO window, relying on the PHB's
- * fallback behaviour of automatically splitting a large "32-bit"
- * window into contiguous 32-bit and 64-bit windows
- */
-
- return true;
-}
-
-static void spapr_machine_2_7_class_options(MachineClass *mc)
-{
- SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
- static GlobalProperty compat[] = {
- { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0xf80000000", },
- { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem64_win_size", "0", },
- { TYPE_POWERPC_CPU, "pre-2.8-migration", "on", },
- { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-2.8-migration", "on", },
- };
-
- spapr_machine_2_8_class_options(mc);
- mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3");
- mc->default_machine_opts = "modern-hotplug-events=off";
- compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len);
- compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
- smc->phb_placement = phb_placement_2_7;
-}
-
-DEFINE_SPAPR_MACHINE(2, 7);
-
-/*
- * pseries-2.6
- */
-
-static void spapr_machine_2_6_class_options(MachineClass *mc)
-{
- static GlobalProperty compat[] = {
- { TYPE_SPAPR_PCI_HOST_BRIDGE, "ddw", "off" },
- };
-
- spapr_machine_2_7_class_options(mc);
- mc->has_hotpluggable_cpus = false;
- compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len);
- compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
-}
-
-DEFINE_SPAPR_MACHINE(2, 6);
-
-/*
- * pseries-2.5
- */
-
-static void spapr_machine_2_5_class_options(MachineClass *mc)
-{
- SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
- static GlobalProperty compat[] = {
- { "spapr-vlan", "use-rx-buffer-pools", "off" },
- };
-
- spapr_machine_2_6_class_options(mc);
- smc->use_ohci_by_default = true;
- compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len);
- compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
-}
-
-DEFINE_SPAPR_MACHINE(2, 5);
-
-/*
- * pseries-2.4
- */
-
-static void spapr_machine_2_4_class_options(MachineClass *mc)
-{
- SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
-
- spapr_machine_2_5_class_options(mc);
- smc->dr_lmb_enabled = false;
- compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len);
-}
-
-DEFINE_SPAPR_MACHINE(2, 4);
-
-/*
- * pseries-2.3
- */
-
-static void spapr_machine_2_3_class_options(MachineClass *mc)
-{
- static GlobalProperty compat[] = {
- { "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" },
- };
- spapr_machine_2_4_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_3, hw_compat_2_3_len);
- compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
-}
-DEFINE_SPAPR_MACHINE(2, 3);
-
-/*
- * pseries-2.2
- */
-
-static void spapr_machine_2_2_class_options(MachineClass *mc)
-{
- static GlobalProperty compat[] = {
- { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0x20000000" },
- };
-
- spapr_machine_2_3_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_2, hw_compat_2_2_len);
- compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
- mc->default_machine_opts = "modern-hotplug-events=off,suppress-vmdesc=on";
-}
-DEFINE_SPAPR_MACHINE(2, 2);
-
-/*
- * pseries-2.1
- */
-
-static void spapr_machine_2_1_class_options(MachineClass *mc)
-{
- spapr_machine_2_2_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_1, hw_compat_2_1_len);
-}
-DEFINE_SPAPR_MACHINE(2, 1);
-
static void spapr_machine_register_types(void)
{
type_register_static(&spapr_machine_info);
diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c
index 0a15415..f2f5722 100644
--- a/hw/ppc/spapr_caps.c
+++ b/hw/ppc/spapr_caps.c
@@ -26,14 +26,15 @@
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
-#include "sysemu/hw_accel.h"
-#include "exec/ram_addr.h"
+#include "system/hw_accel.h"
+#include "system/ram_addr.h"
#include "target/ppc/cpu.h"
#include "target/ppc/mmu-hash64.h"
#include "cpu-models.h"
#include "kvm_ppc.h"
#include "migration/vmstate.h"
-#include "sysemu/tcg.h"
+#include "system/tcg.h"
+#include "system/hostmem.h"
#include "hw/ppc/spapr.h"
@@ -696,6 +697,34 @@ static void cap_ail_mode_3_apply(SpaprMachineState *spapr,
}
}
+static void cap_dawr1_apply(SpaprMachineState *spapr, uint8_t val,
+ Error **errp)
+{
+ ERRP_GUARD();
+
+ if (!val) {
+ return; /* Disable by default */
+ }
+
+ if (!ppc_type_check_compat(MACHINE(spapr)->cpu_type,
+ CPU_POWERPC_LOGICAL_3_10, 0,
+ spapr->max_compat_pvr)) {
+ error_setg(errp, "DAWR1 supported only on POWER10 and later CPUs");
+ error_append_hint(errp, "Try appending -machine cap-dawr1=off\n");
+ return;
+ }
+
+ if (kvm_enabled()) {
+ if (!kvmppc_has_cap_dawr1()) {
+ error_setg(errp, "DAWR1 not supported by KVM.");
+ error_append_hint(errp, "Try appending -machine cap-dawr1=off");
+ } else if (kvmppc_set_cap_dawr1(val) < 0) {
+ error_setg(errp, "Error enabling cap-dawr1 with KVM.");
+ error_append_hint(errp, "Try appending -machine cap-dawr1=off");
+ }
+ }
+}
+
SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = {
[SPAPR_CAP_HTM] = {
.name = "htm",
@@ -831,6 +860,15 @@ SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = {
.type = "bool",
.apply = cap_ail_mode_3_apply,
},
+ [SPAPR_CAP_DAWR1] = {
+ .name = "dawr1",
+ .description = "Allow 2nd Data Address Watchpoint Register (DAWR1)",
+ .index = SPAPR_CAP_DAWR1,
+ .get = spapr_cap_get_bool,
+ .set = spapr_cap_set_bool,
+ .type = "bool",
+ .apply = cap_dawr1_apply,
+ },
};
static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr,
@@ -841,6 +879,11 @@ static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr,
caps = smc->default_caps;
+ if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_3_10,
+ 0, spapr->max_compat_pvr)) {
+ caps.caps[SPAPR_CAP_DAWR1] = SPAPR_CAP_OFF;
+ }
+
if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_3_00,
0, spapr->max_compat_pvr)) {
caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF;
@@ -974,6 +1017,8 @@ SPAPR_CAP_MIG_STATE(large_decr, SPAPR_CAP_LARGE_DECREMENTER);
SPAPR_CAP_MIG_STATE(ccf_assist, SPAPR_CAP_CCF_ASSIST);
SPAPR_CAP_MIG_STATE(fwnmi, SPAPR_CAP_FWNMI);
SPAPR_CAP_MIG_STATE(rpt_invalidate, SPAPR_CAP_RPT_INVALIDATE);
+SPAPR_CAP_MIG_STATE(ail_mode_3, SPAPR_CAP_AIL_MODE_3);
+SPAPR_CAP_MIG_STATE(dawr1, SPAPR_CAP_DAWR1);
void spapr_caps_init(SpaprMachineState *spapr)
{
@@ -1033,7 +1078,7 @@ void spapr_caps_add_properties(SpaprMachineClass *smc)
for (i = 0; i < ARRAY_SIZE(capability_table); i++) {
SpaprCapabilityInfo *cap = &capability_table[i];
g_autofree char *name = g_strdup_printf("cap-%s", cap->name);
- g_autofree char *desc = g_strdup_printf("%s", cap->description);
+ g_autofree char *desc = g_strdup(cap->description);
object_class_property_add(klass, name, cap->type,
cap->get, cap->set,
diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index e7c9edd..4952f9b 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -15,15 +15,15 @@
#include "target/ppc/cpu.h"
#include "hw/ppc/spapr.h"
#include "qapi/error.h"
-#include "sysemu/cpus.h"
-#include "sysemu/kvm.h"
+#include "system/cpus.h"
+#include "system/kvm.h"
#include "target/ppc/kvm_ppc.h"
#include "hw/ppc/ppc.h"
#include "target/ppc/mmu-hash64.h"
#include "target/ppc/power8-pmu.h"
-#include "sysemu/numa.h"
-#include "sysemu/reset.h"
-#include "sysemu/hw_accel.h"
+#include "system/numa.h"
+#include "system/reset.h"
+#include "system/hw_accel.h"
#include "qemu/error-report.h"
static void spapr_reset_vcpu(PowerPCCPU *cpu)
@@ -37,6 +37,9 @@ static void spapr_reset_vcpu(PowerPCCPU *cpu)
cpu_reset(cs);
+ env->quiesced = true; /* set "RTAS stopped" state. */
+ ppc_maybe_interrupt(env);
+
/*
* "PowerPC Processor binding to IEEE 1275" defines the initial MSR state
* as 32bit (MSR_SF=0) with MSR_ME=1 and MSR_FP=1 in "8.2.1. Initial
@@ -98,6 +101,9 @@ void spapr_cpu_set_entry_state(PowerPCCPU *cpu, target_ulong nip,
CPU(cpu)->halted = 0;
/* Enable Power-saving mode Exit Cause exceptions */
ppc_store_lpcr(cpu, env->spr[SPR_LPCR] | pcc->lpcr_pm);
+
+ env->quiesced = false; /* clear "RTAS stopped" state. */
+ ppc_maybe_interrupt(env);
}
/*
@@ -197,9 +203,7 @@ static void spapr_unrealize_vcpu(PowerPCCPU *cpu, SpaprCpuCore *sc)
{
CPUPPCState *env = &cpu->env;
- if (!sc->pre_3_0_migration) {
- vmstate_unregister(NULL, &vmstate_spapr_cpu_state, cpu->machine_data);
- }
+ vmstate_unregister(NULL, &vmstate_spapr_cpu_state, cpu->machine_data);
spapr_irq_cpu_intc_destroy(SPAPR_MACHINE(qdev_get_machine()), cpu);
cpu_ppc_tb_free(env);
qdev_unrealize(DEVICE(cpu));
@@ -275,6 +279,8 @@ static bool spapr_realize_vcpu(PowerPCCPU *cpu, SpaprMachineState *spapr,
env->spr_cb[SPR_PIR].default_value = cs->cpu_index;
env->spr_cb[SPR_TIR].default_value = thread_index;
+ env->spr_cb[SPR_HASHPKEYR].default_value = spapr->hashpkey_val;
+
cpu_ppc_set_1lpar(cpu);
/* Set time-base frequency to 512 MHz. vhyp must be set first. */
@@ -285,10 +291,8 @@ static bool spapr_realize_vcpu(PowerPCCPU *cpu, SpaprMachineState *spapr,
return false;
}
- if (!sc->pre_3_0_migration) {
- vmstate_register(NULL, cs->cpu_index, &vmstate_spapr_cpu_state,
- cpu->machine_data);
- }
+ vmstate_register(NULL, cs->cpu_index, &vmstate_spapr_cpu_state,
+ cpu->machine_data);
return true;
}
@@ -300,11 +304,13 @@ static PowerPCCPU *spapr_create_vcpu(SpaprCpuCore *sc, int i, Error **errp)
g_autofree char *id = NULL;
CPUState *cs;
PowerPCCPU *cpu;
+ CPUPPCState *env;
obj = object_new(scc->cpu_type);
cs = CPU(obj);
cpu = POWERPC_CPU(obj);
+ env = &cpu->env;
/*
* All CPUs start halted. CPU0 is unhalted from the machine level reset code
* and the rest are explicitly started up by the guest using an RTAS call.
@@ -315,6 +321,9 @@ static PowerPCCPU *spapr_create_vcpu(SpaprCpuCore *sc, int i, Error **errp)
return NULL;
}
+ env->chip_index = sc->node_id;
+ env->core_index = cc->core_id;
+
cpu->node_id = sc->node_id;
id = g_strdup_printf("thread[%d]", i);
@@ -345,30 +354,33 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
qemu_register_reset(spapr_cpu_core_reset_handler, sc);
sc->threads = g_new0(PowerPCCPU *, cc->nr_threads);
for (i = 0; i < cc->nr_threads; i++) {
- sc->threads[i] = spapr_create_vcpu(sc, i, errp);
- if (!sc->threads[i] ||
- !spapr_realize_vcpu(sc->threads[i], spapr, sc, i, errp)) {
+ PowerPCCPU *cpu;
+
+ cpu = spapr_create_vcpu(sc, i, errp);
+ sc->threads[i] = cpu;
+ if (cpu && cc->nr_threads > 1) {
+ cpu->env.has_smt_siblings = true;
+ }
+
+ if (!cpu || !spapr_realize_vcpu(cpu, spapr, sc, i, errp)) {
spapr_cpu_core_unrealize(dev);
return;
}
}
}
-static Property spapr_cpu_core_properties[] = {
+static const Property spapr_cpu_core_properties[] = {
DEFINE_PROP_INT32("node-id", SpaprCpuCore, node_id, CPU_UNSET_NUMA_NODE_ID),
- DEFINE_PROP_BOOL("pre-3.0-migration", SpaprCpuCore, pre_3_0_migration,
- false),
- DEFINE_PROP_END_OF_LIST()
};
-static void spapr_cpu_core_class_init(ObjectClass *oc, void *data)
+static void spapr_cpu_core_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
SpaprCpuCoreClass *scc = SPAPR_CPU_CORE_CLASS(oc);
dc->realize = spapr_cpu_core_realize;
dc->unrealize = spapr_cpu_core_unrealize;
- dc->reset = spapr_cpu_core_reset;
+ device_class_set_legacy_reset(dc, spapr_cpu_core_reset);
device_class_set_props(dc, spapr_cpu_core_properties);
scc->cpu_type = data;
}
@@ -376,7 +388,7 @@ static void spapr_cpu_core_class_init(ObjectClass *oc, void *data)
#define DEFINE_SPAPR_CPU_CORE_TYPE(cpu_model) \
{ \
.parent = TYPE_SPAPR_CPU_CORE, \
- .class_data = (void *) POWERPC_CPU_TYPE_NAME(cpu_model), \
+ .class_data = POWERPC_CPU_TYPE_NAME(cpu_model), \
.class_init = spapr_cpu_core_class_init, \
.name = SPAPR_CPU_CORE_TYPE_NAME(cpu_model), \
}
@@ -401,6 +413,7 @@ static const TypeInfo spapr_cpu_core_type_infos[] = {
DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.0"),
DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.2"),
DEFINE_SPAPR_CPU_CORE_TYPE("power10_v2.0"),
+ DEFINE_SPAPR_CPU_CORE_TYPE("power11_v2.0"),
#ifdef CONFIG_KVM
DEFINE_SPAPR_CPU_CORE_TYPE("host"),
#endif
diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c
index 1484e32..d2044b4 100644
--- a/hw/ppc/spapr_drc.c
+++ b/hw/ppc/spapr_drc.c
@@ -12,7 +12,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qmp/qnull.h"
+#include "qobject/qnull.h"
#include "qemu/cutils.h"
#include "hw/ppc/spapr_drc.h"
#include "qom/object.h"
@@ -23,11 +23,11 @@
#include "hw/ppc/spapr.h" /* for RTAS return codes */
#include "hw/pci-host/spapr.h" /* spapr_phb_remove_pci_device_cb callback */
#include "hw/ppc/spapr_nvdimm.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/reset.h"
+#include "system/device_tree.h"
+#include "system/reset.h"
#include "trace.h"
-#define DRC_CONTAINER_PATH "/dr-connector"
+#define DRC_CONTAINER_PATH "dr-connector"
#define DRC_INDEX_TYPE_SHIFT 28
#define DRC_INDEX_ID_MASK ((1ULL << DRC_INDEX_TYPE_SHIFT) - 1)
@@ -514,6 +514,16 @@ static const VMStateDescription vmstate_spapr_drc = {
}
};
+static void drc_container_create(void)
+{
+ object_property_add_new_container(object_get_root(), DRC_CONTAINER_PATH);
+}
+
+static Object *drc_container_get(void)
+{
+ return object_resolve_path_component(object_get_root(), DRC_CONTAINER_PATH);
+}
+
static void drc_realize(DeviceState *d, Error **errp)
{
SpaprDrc *drc = SPAPR_DR_CONNECTOR(d);
@@ -529,7 +539,7 @@ static void drc_realize(DeviceState *d, Error **errp)
* inaccessible by the guest, since lookups rely on this path
* existing in the composition tree
*/
- root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
+ root_container = drc_container_get();
child_name = object_get_canonical_path_component(OBJECT(drc));
trace_spapr_drc_realize_child(spapr_drc_index(drc), child_name);
object_property_add_alias(root_container, link_name,
@@ -543,12 +553,10 @@ static void drc_unrealize(DeviceState *d)
{
SpaprDrc *drc = SPAPR_DR_CONNECTOR(d);
g_autofree gchar *name = g_strdup_printf("%x", spapr_drc_index(drc));
- Object *root_container;
trace_spapr_drc_unrealize(spapr_drc_index(drc));
vmstate_unregister(VMSTATE_IF(drc), &vmstate_spapr_drc, drc);
- root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
- object_property_del(root_container, name);
+ object_property_del(drc_container_get(), name);
}
SpaprDrc *spapr_dr_connector_new(Object *owner, const char *type,
@@ -581,10 +589,12 @@ static void spapr_dr_connector_instance_init(Object *obj)
drc->state = drck->empty_state;
}
-static void spapr_dr_connector_class_init(ObjectClass *k, void *data)
+static void spapr_dr_connector_class_init(ObjectClass *k, const void *data)
{
DeviceClass *dk = DEVICE_CLASS(k);
+ drc_container_create();
+
dk->realize = drc_realize;
dk->unrealize = drc_unrealize;
/*
@@ -655,7 +665,7 @@ static void unrealize_physical(DeviceState *d)
qemu_unregister_reset(drc_physical_reset, drcp);
}
-static void spapr_drc_physical_class_init(ObjectClass *k, void *data)
+static void spapr_drc_physical_class_init(ObjectClass *k, const void *data)
{
DeviceClass *dk = DEVICE_CLASS(k);
SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
@@ -669,7 +679,7 @@ static void spapr_drc_physical_class_init(ObjectClass *k, void *data)
drck->empty_state = SPAPR_DRC_STATE_PHYSICAL_POWERON;
}
-static void spapr_drc_logical_class_init(ObjectClass *k, void *data)
+static void spapr_drc_logical_class_init(ObjectClass *k, const void *data)
{
SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
@@ -680,7 +690,7 @@ static void spapr_drc_logical_class_init(ObjectClass *k, void *data)
drck->empty_state = SPAPR_DRC_STATE_LOGICAL_UNUSABLE;
}
-static void spapr_drc_cpu_class_init(ObjectClass *k, void *data)
+static void spapr_drc_cpu_class_init(ObjectClass *k, const void *data)
{
SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
@@ -691,7 +701,7 @@ static void spapr_drc_cpu_class_init(ObjectClass *k, void *data)
drck->dt_populate = spapr_core_dt_populate;
}
-static void spapr_drc_pci_class_init(ObjectClass *k, void *data)
+static void spapr_drc_pci_class_init(ObjectClass *k, const void *data)
{
SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
@@ -702,7 +712,7 @@ static void spapr_drc_pci_class_init(ObjectClass *k, void *data)
drck->dt_populate = spapr_pci_dt_populate;
}
-static void spapr_drc_lmb_class_init(ObjectClass *k, void *data)
+static void spapr_drc_lmb_class_init(ObjectClass *k, const void *data)
{
SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
@@ -713,7 +723,7 @@ static void spapr_drc_lmb_class_init(ObjectClass *k, void *data)
drck->dt_populate = spapr_lmb_dt_populate;
}
-static void spapr_drc_phb_class_init(ObjectClass *k, void *data)
+static void spapr_drc_phb_class_init(ObjectClass *k, const void *data)
{
SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
@@ -724,7 +734,7 @@ static void spapr_drc_phb_class_init(ObjectClass *k, void *data)
drck->dt_populate = spapr_phb_dt_populate;
}
-static void spapr_drc_pmem_class_init(ObjectClass *k, void *data)
+static void spapr_drc_pmem_class_init(ObjectClass *k, const void *data)
{
SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
@@ -796,9 +806,8 @@ static const TypeInfo spapr_drc_pmem_info = {
SpaprDrc *spapr_drc_by_index(uint32_t index)
{
Object *obj;
- g_autofree gchar *name = g_strdup_printf("%s/%x", DRC_CONTAINER_PATH,
- index);
- obj = object_resolve_path(name, NULL);
+ g_autofree gchar *name = g_strdup_printf("%x", index);
+ obj = object_resolve_path_component(drc_container_get(), name);
return !obj ? NULL : SPAPR_DR_CONNECTOR(obj);
}
@@ -860,7 +869,7 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
/* aliases for all DRConnector objects will be rooted in QOM
* composition tree at DRC_CONTAINER_PATH
*/
- root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
+ root_container = drc_container_get();
object_property_iter_init(&iter, root_container);
while ((prop = object_property_iter_next(&iter))) {
@@ -953,7 +962,7 @@ void spapr_drc_reset_all(SpaprMachineState *spapr)
ObjectProperty *prop;
ObjectPropertyIterator iter;
- drc_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
+ drc_container = drc_container_get();
restart:
object_property_iter_init(&iter, drc_container);
while ((prop = object_property_iter_next(&iter))) {
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
index cb0eeee..832b021 100644
--- a/hw/ppc/spapr_events.c
+++ b/hw/ppc/spapr_events.c
@@ -27,8 +27,8 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/runstate.h"
+#include "system/device_tree.h"
+#include "system/runstate.h"
#include "hw/ppc/fdt.h"
#include "hw/ppc/spapr.h"
@@ -645,8 +645,7 @@ static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
/* we shouldn't be signaling hotplug events for resources
* that don't support them
*/
- g_assert(false);
- return;
+ g_assert_not_reached();
}
if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT) {
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 5e1d020..1e936f3 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -1,14 +1,15 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/runstate.h"
-#include "sysemu/tcg.h"
+#include "system/hw_accel.h"
+#include "system/runstate.h"
+#include "system/tcg.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qemu/error-report.h"
#include "exec/tb-flush.h"
+#include "exec/target_page.h"
#include "helper_regs.h"
#include "hw/ppc/ppc.h"
#include "hw/ppc/spapr.h"
@@ -299,8 +300,10 @@ static target_ulong h_page_init(PowerPCCPU *cpu, SpaprMachineState *spapr,
if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) {
if (kvm_enabled()) {
kvmppc_icbi_range(cpu, pdst, len);
- } else {
+ } else if (tcg_enabled()) {
tb_flush(CPU(cpu));
+ } else {
+ g_assert_not_reached();
}
}
@@ -578,6 +581,8 @@ static target_ulong h_confer(PowerPCCPU *cpu, SpaprMachineState *spapr,
CPUState *cs = CPU(cpu);
SpaprCpuState *spapr_cpu;
+ assert(tcg_enabled()); /* KVM will have handled this */
+
/*
* -1 means confer to all other CPUs without dispatch counter check,
* otherwise it's a targeted confer.
@@ -818,11 +823,12 @@ static target_ulong h_set_mode_resource_set_ciabr(PowerPCCPU *cpu,
return H_SUCCESS;
}
-static target_ulong h_set_mode_resource_set_dawr0(PowerPCCPU *cpu,
- SpaprMachineState *spapr,
- target_ulong mflags,
- target_ulong value1,
- target_ulong value2)
+static target_ulong h_set_mode_resource_set_dawr(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong mflags,
+ target_ulong resource,
+ target_ulong value1,
+ target_ulong value2)
{
CPUPPCState *env = &cpu->env;
@@ -835,8 +841,15 @@ static target_ulong h_set_mode_resource_set_dawr0(PowerPCCPU *cpu,
return H_P4;
}
- ppc_store_dawr0(env, value1);
- ppc_store_dawrx0(env, value2);
+ if (resource == H_SET_MODE_RESOURCE_SET_DAWR0) {
+ ppc_store_dawr0(env, value1);
+ ppc_store_dawrx0(env, value2);
+ } else if (resource == H_SET_MODE_RESOURCE_SET_DAWR1) {
+ ppc_store_dawr1(env, value1);
+ ppc_store_dawrx1(env, value2);
+ } else {
+ g_assert_not_reached();
+ }
return H_SUCCESS;
}
@@ -915,8 +928,9 @@ static target_ulong h_set_mode(PowerPCCPU *cpu, SpaprMachineState *spapr,
args[3]);
break;
case H_SET_MODE_RESOURCE_SET_DAWR0:
- ret = h_set_mode_resource_set_dawr0(cpu, spapr, args[0], args[2],
- args[3]);
+ case H_SET_MODE_RESOURCE_SET_DAWR1:
+ ret = h_set_mode_resource_set_dawr(cpu, spapr, args[0], args[1],
+ args[2], args[3]);
break;
case H_SET_MODE_RESOURCE_LE:
ret = h_set_mode_resource_le(cpu, spapr, args[0], args[2], args[3]);
@@ -968,7 +982,6 @@ static void spapr_check_setup_free_hpt(SpaprMachineState *spapr,
/* RADIX->HASH || NOTHING->HASH : Allocate HPT */
spapr_setup_hpt(spapr);
}
- return;
}
#define FLAGS_MASK 0x01FULL
diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c
index e3c01ef..c2432a0 100644
--- a/hw/ppc/spapr_iommu.c
+++ b/hw/ppc/spapr_iommu.c
@@ -21,10 +21,10 @@
#include "qemu/error-report.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "kvm_ppc.h"
#include "migration/vmstate.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "trace.h"
#include "hw/ppc/spapr.h"
@@ -668,11 +668,11 @@ int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname,
tcet->liobn, 0, tcet->nb_table << tcet->page_shift);
}
-static void spapr_tce_table_class_init(ObjectClass *klass, void *data)
+static void spapr_tce_table_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = spapr_tce_table_realize;
- dc->reset = spapr_tce_reset;
+ device_class_set_legacy_reset(dc, spapr_tce_reset);
dc->unrealize = spapr_tce_table_unrealize;
/* Reason: This is just an internal device for handling the hypercalls */
dc->user_creatable = false;
@@ -693,7 +693,8 @@ static const TypeInfo spapr_tce_table_info = {
.class_init = spapr_tce_table_class_init,
};
-static void spapr_iommu_memory_region_class_init(ObjectClass *klass, void *data)
+static void spapr_iommu_memory_region_class_init(ObjectClass *klass,
+ const void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c
index aebd7ea..d6d368d 100644
--- a/hw/ppc/spapr_irq.c
+++ b/hw/ppc/spapr_irq.c
@@ -19,7 +19,7 @@
#include "hw/ppc/xics_spapr.h"
#include "hw/qdev-properties.h"
#include "cpu-models.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "trace.h"
diff --git a/hw/ppc/spapr_nested.c b/hw/ppc/spapr_nested.c
index c027857..10cf634 100644
--- a/hw/ppc/spapr_nested.c
+++ b/hw/ppc/spapr_nested.c
@@ -1,6 +1,7 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
+#include "exec/target_long.h"
#include "helper_regs.h"
#include "hw/ppc/ppc.h"
#include "hw/ppc/spapr.h"
@@ -64,10 +65,9 @@ static
SpaprMachineStateNestedGuest *spapr_get_nested_guest(SpaprMachineState *spapr,
target_ulong guestid)
{
- SpaprMachineStateNestedGuest *guest;
-
- guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid));
- return guest;
+ return spapr->nested.guests ?
+ g_hash_table_lookup(spapr->nested.guests,
+ GINT_TO_POINTER(guestid)) : NULL;
}
bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu,
@@ -593,26 +593,37 @@ static bool spapr_nested_vcpu_check(SpaprMachineStateNestedGuest *guest,
return false;
}
-static void *get_vcpu_state_ptr(SpaprMachineStateNestedGuest *guest,
- target_ulong vcpuid)
+static void *get_vcpu_state_ptr(SpaprMachineState *spapr,
+ SpaprMachineStateNestedGuest *guest,
+ target_ulong vcpuid)
{
assert(spapr_nested_vcpu_check(guest, vcpuid, false));
return &guest->vcpus[vcpuid].state;
}
-static void *get_vcpu_ptr(SpaprMachineStateNestedGuest *guest,
- target_ulong vcpuid)
+static void *get_vcpu_ptr(SpaprMachineState *spapr,
+ SpaprMachineStateNestedGuest *guest,
+ target_ulong vcpuid)
{
assert(spapr_nested_vcpu_check(guest, vcpuid, false));
return &guest->vcpus[vcpuid];
}
-static void *get_guest_ptr(SpaprMachineStateNestedGuest *guest,
+static void *get_guest_ptr(SpaprMachineState *spapr,
+ SpaprMachineStateNestedGuest *guest,
target_ulong vcpuid)
{
return guest; /* for GSBE_NESTED */
}
+static void *get_machine_ptr(SpaprMachineState *spapr,
+ SpaprMachineStateNestedGuest *guest,
+ target_ulong vcpuid)
+{
+ /* ignore guest and vcpuid for this */
+ return &spapr->nested;
+}
+
/*
* set=1 means the L1 is trying to set some state
* set=0 means the L1 is trying to get some state
@@ -771,6 +782,7 @@ static void copy_logical_pvr(void *a, void *b, bool set)
if (*pvr_logical_ptr) {
switch (*pvr_logical_ptr) {
+ case CPU_POWERPC_LOGICAL_3_10_P11:
case CPU_POWERPC_LOGICAL_3_10:
pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00;
break;
@@ -982,6 +994,7 @@ struct guest_state_element_type guest_state_element_types[] = {
GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FSCR, fscr),
GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PSPB, pspb),
GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTRL, ctrl),
+ GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DPDES, dpdes),
GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_VRSAVE, vrsave),
GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAR, dar),
GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DSISR, dsisr),
@@ -1010,7 +1023,15 @@ struct guest_state_element_type guest_state_element_types[] = {
GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUFFER, 0x10, runbufout, copy_state_runbuf),
GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUF_MIN_SZ, 0x8, runbufout, out_buf_min_size),
GSBE_NESTED_VCPU(GSB_VCPU_HDEC_EXPIRY_TB, 0x8, hdecr_expiry_tb,
- copy_state_hdecr)
+ copy_state_hdecr),
+ GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_HEAP_INUSE, l0_guest_heap_inuse),
+ GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_HEAP_MAX, l0_guest_heap_max),
+ GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_PGTABLE_SIZE_INUSE,
+ l0_guest_pgtable_size_inuse),
+ GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_PGTABLE_SIZE_MAX,
+ l0_guest_pgtable_size_max),
+ GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_PGTABLE_RECLAIMED,
+ l0_guest_pgtable_reclaimed),
};
void spapr_nested_gsb_init(void)
@@ -1028,8 +1049,13 @@ void spapr_nested_gsb_init(void)
else if (type->id >= GSB_VCPU_IN_BUFFER)
/* 0x0c00 - 0xf000 Thread + RW */
type->flags = 0;
+ else if (type->id >= GSB_L0_GUEST_HEAP_INUSE)
+
+ /*0x0800 - 0x0804 Hostwide Counters + RO */
+ type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_HOST_WIDE |
+ GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY;
else if (type->id >= GSB_VCPU_LPVR)
- /* 0x0003 - 0x0bff Guest + RW */
+ /* 0x0003 - 0x07ff Guest + RW */
type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE;
else if (type->id >= GSB_HV_VCPU_STATE_SIZE)
/* 0x0001 - 0x0002 Guest + RO */
@@ -1136,18 +1162,26 @@ static bool guest_state_request_check(struct guest_state_request *gsr)
return false;
}
- if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE) {
+ if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_HOST_WIDE) {
+ /* Hostwide elements cant be clubbed with other types */
+ if (!(gsr->flags & GUEST_STATE_REQUEST_HOST_WIDE)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "trying to get/set a host wide "
+ "Element ID:%04x.\n", id);
+ return false;
+ }
+ } else if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE) {
/* guest wide element type */
if (!(gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE)) {
- qemu_log_mask(LOG_GUEST_ERROR, "trying to set a guest wide "
+ qemu_log_mask(LOG_GUEST_ERROR, "trying to get/set a guest wide "
"Element ID:%04x.\n", id);
return false;
}
} else {
/* thread wide element type */
- if (gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE) {
- qemu_log_mask(LOG_GUEST_ERROR, "trying to set a thread wide "
- "Element ID:%04x.\n", id);
+ if (gsr->flags & (GUEST_STATE_REQUEST_GUEST_WIDE |
+ GUEST_STATE_REQUEST_HOST_WIDE)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "trying to get/set a thread wide"
+ " Element ID:%04x.\n", id);
return false;
}
}
@@ -1184,6 +1218,12 @@ static target_ulong h_guest_get_capabilities(PowerPCCPU *cpu,
return H_PARAMETER;
}
+ /* P11 capabilities */
+ if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10_P11, 0,
+ spapr->max_compat_pvr)) {
+ env->gpr[4] |= H_GUEST_CAPABILITIES_P11_MODE;
+ }
+
/* P10 capabilities */
if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0,
spapr->max_compat_pvr)) {
@@ -1226,7 +1266,10 @@ static target_ulong h_guest_set_capabilities(PowerPCCPU *cpu,
env->gpr[4] = 1;
/* set R5 to the first supported Power Processor Mode */
- if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0,
+ if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10_P11, 0,
+ spapr->max_compat_pvr)) {
+ env->gpr[5] = H_GUEST_CAP_P11_MODE_BMAP;
+ } else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0,
spapr->max_compat_pvr)) {
env->gpr[5] = H_GUEST_CAP_P10_MODE_BMAP;
} else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0,
@@ -1407,7 +1450,8 @@ static target_ulong h_guest_create_vcpu(PowerPCCPU *cpu,
return H_SUCCESS;
}
-static target_ulong getset_state(SpaprMachineStateNestedGuest *guest,
+static target_ulong getset_state(SpaprMachineState *spapr,
+ SpaprMachineStateNestedGuest *guest,
uint64_t vcpuid,
struct guest_state_request *gsr)
{
@@ -1440,7 +1484,7 @@ static target_ulong getset_state(SpaprMachineStateNestedGuest *guest,
/* Get pointer to guest data to get/set */
if (type->location && type->copy) {
- ptr = type->location(guest, vcpuid);
+ ptr = type->location(spapr, guest, vcpuid);
assert(ptr);
if (!~(type->mask) && is_gsr_invalid(gsr, element, type)) {
return H_INVALID_ELEMENT_VALUE;
@@ -1457,6 +1501,7 @@ next_element:
}
static target_ulong map_and_getset_state(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
SpaprMachineStateNestedGuest *guest,
uint64_t vcpuid,
struct guest_state_request *gsr)
@@ -1480,7 +1525,7 @@ static target_ulong map_and_getset_state(PowerPCCPU *cpu,
goto out1;
}
- rc = getset_state(guest, vcpuid, gsr);
+ rc = getset_state(spapr, guest, vcpuid, gsr);
out1:
address_space_unmap(CPU(cpu)->as, gsr->gsb, len, is_write, len);
@@ -1498,27 +1543,46 @@ static target_ulong h_guest_getset_state(PowerPCCPU *cpu,
target_ulong buf = args[3];
target_ulong buflen = args[4];
struct guest_state_request gsr;
- SpaprMachineStateNestedGuest *guest;
+ SpaprMachineStateNestedGuest *guest = NULL;
- guest = spapr_get_nested_guest(spapr, lpid);
- if (!guest) {
- return H_P2;
- }
gsr.buf = buf;
assert(buflen <= GSB_MAX_BUF_SIZE);
gsr.len = buflen;
gsr.flags = 0;
- if (flags & H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) {
+
+ /* Works for both get/set state */
+ if ((flags & H_GUEST_GET_STATE_FLAGS_GUEST_WIDE) ||
+ (flags & H_GUEST_SET_STATE_FLAGS_GUEST_WIDE)) {
gsr.flags |= GUEST_STATE_REQUEST_GUEST_WIDE;
}
- if (flags & ~H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) {
- return H_PARAMETER; /* flag not supported yet */
- }
if (set) {
+ if (flags & ~H_GUEST_SET_STATE_FLAGS_MASK) {
+ return H_PARAMETER;
+ }
gsr.flags |= GUEST_STATE_REQUEST_SET;
+ } else {
+ /*
+ * No reserved fields to be set in flags nor both
+ * GUEST/HOST wide bits
+ */
+ if ((flags & ~H_GUEST_GET_STATE_FLAGS_MASK) ||
+ (flags == H_GUEST_GET_STATE_FLAGS_MASK)) {
+ return H_PARAMETER;
+ }
+
+ if (flags & H_GUEST_GET_STATE_FLAGS_HOST_WIDE) {
+ gsr.flags |= GUEST_STATE_REQUEST_HOST_WIDE;
+ }
+ }
+
+ if (!(gsr.flags & GUEST_STATE_REQUEST_HOST_WIDE)) {
+ guest = spapr_get_nested_guest(spapr, lpid);
+ if (!guest) {
+ return H_P2;
+ }
}
- return map_and_getset_state(cpu, guest, vcpuid, &gsr);
+ return map_and_getset_state(cpu, spapr, guest, vcpuid, &gsr);
}
static target_ulong h_guest_set_state(PowerPCCPU *cpu,
@@ -1629,7 +1693,8 @@ static int get_exit_ids(uint64_t srr0, uint16_t ids[16])
return nr;
}
-static void exit_process_output_buffer(PowerPCCPU *cpu,
+static void exit_process_output_buffer(SpaprMachineState *spapr,
+ PowerPCCPU *cpu,
SpaprMachineStateNestedGuest *guest,
target_ulong vcpuid,
target_ulong *r3)
@@ -1667,10 +1732,9 @@ static void exit_process_output_buffer(PowerPCCPU *cpu,
gsr.gsb = gsb;
gsr.len = VCPU_OUT_BUF_MIN_SZ;
gsr.flags = 0; /* get + never guest wide */
- getset_state(guest, vcpuid, &gsr);
+ getset_state(spapr, guest, vcpuid, &gsr);
address_space_unmap(CPU(cpu)->as, gsb, len, true, len);
- return;
}
static
@@ -1693,7 +1757,7 @@ void spapr_exit_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, int excp)
exit_nested_store_l2(cpu, excp, vcpu);
/* do the output buffer for run_vcpu*/
- exit_process_output_buffer(cpu, guest, vcpuid, &r3_return);
+ exit_process_output_buffer(spapr, cpu, guest, vcpuid, &r3_return);
assert(env->spr[SPR_LPIDR] != 0);
nested_load_state(cpu, spapr_cpu->nested_host_state);
@@ -1808,7 +1872,7 @@ static target_ulong h_guest_run_vcpu(PowerPCCPU *cpu,
gsr.buf = vcpu->runbufin.addr;
gsr.len = vcpu->runbufin.size;
gsr.flags = GUEST_STATE_REQUEST_SET; /* Thread wide + writing */
- rc = map_and_getset_state(cpu, guest, vcpuid, &gsr);
+ rc = map_and_getset_state(cpu, spapr, guest, vcpuid, &gsr);
if (rc == H_SUCCESS) {
nested_papr_run_vcpu(cpu, lpid, vcpu);
} else {
diff --git a/hw/ppc/spapr_nvdimm.c b/hw/ppc/spapr_nvdimm.c
index 7d2dfe5..72b4a63 100644
--- a/hw/ppc/spapr_nvdimm.c
+++ b/hw/ppc/spapr_nvdimm.c
@@ -235,8 +235,6 @@ void spapr_dt_persistent_memory(SpaprMachineState *spapr, void *fdt)
spapr_dt_nvdimm(spapr, fdt, offset, nvdimm);
}
g_slist_free(nvdimms);
-
- return;
}
static target_ulong h_scm_read_metadata(PowerPCCPU *cpu,
@@ -884,22 +882,22 @@ static void spapr_nvdimm_unrealize(NVDIMMDevice *dimm)
vmstate_unregister(NULL, &vmstate_spapr_nvdimm_states, dimm);
}
-static Property spapr_nvdimm_properties[] = {
#ifdef CONFIG_LIBPMEM
+static const Property spapr_nvdimm_properties[] = {
DEFINE_PROP_BOOL("pmem-override", SpaprNVDIMMDevice, pmem_override, false),
-#endif
- DEFINE_PROP_END_OF_LIST(),
};
+#endif
-static void spapr_nvdimm_class_init(ObjectClass *oc, void *data)
+static void spapr_nvdimm_class_init(ObjectClass *oc, const void *data)
{
- DeviceClass *dc = DEVICE_CLASS(oc);
NVDIMMClass *nvc = NVDIMM_CLASS(oc);
nvc->realize = spapr_nvdimm_realize;
nvc->unrealize = spapr_nvdimm_unrealize;
- device_class_set_props(dc, spapr_nvdimm_properties);
+#ifdef CONFIG_LIBPMEM
+ device_class_set_props(DEVICE_CLASS(oc), spapr_nvdimm_properties);
+#endif
}
static void spapr_nvdimm_init(Object *obj)
diff --git a/hw/ppc/spapr_ovec.c b/hw/ppc/spapr_ovec.c
index 88e2953..75ab4fe 100644
--- a/hw/ppc/spapr_ovec.c
+++ b/hw/ppc/spapr_ovec.c
@@ -15,7 +15,8 @@
#include "hw/ppc/spapr_ovec.h"
#include "migration/vmstate.h"
#include "qemu/bitmap.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
+#include "system/memory.h"
#include "qemu/error-report.h"
#include "trace.h"
#include <libfdt.h>
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index ed4454b..1ac1185 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -34,7 +34,7 @@
#include "hw/pci/pci_host.h"
#include "hw/ppc/spapr.h"
#include "hw/pci-host/spapr.h"
-#include "exec/ram_addr.h"
+#include "system/ram_addr.h"
#include <libfdt.h>
#include "trace.h"
#include "qemu/error-report.h"
@@ -45,10 +45,10 @@
#include "hw/pci/pci_ids.h"
#include "hw/ppc/spapr_drc.h"
#include "hw/qdev-properties.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/kvm.h"
-#include "sysemu/hostmem.h"
-#include "sysemu/numa.h"
+#include "system/device_tree.h"
+#include "system/kvm.h"
+#include "system/hostmem.h"
+#include "system/numa.h"
#include "hw/ppc/spapr_numa.h"
#include "qemu/log.h"
@@ -1237,10 +1237,6 @@ static void add_drcs(SpaprPhbState *phb, PCIBus *bus)
int i;
uint8_t chassis;
- if (!phb->dr_enabled) {
- return;
- }
-
chassis = chassis_from_bus(bus);
if (pci_bus_is_root(bus)) {
@@ -1260,10 +1256,6 @@ static void remove_drcs(SpaprPhbState *phb, PCIBus *bus)
int i;
uint8_t chassis;
- if (!phb->dr_enabled) {
- return;
- }
-
chassis = chassis_from_bus(bus);
for (i = PCI_SLOT_MAX * PCI_FUNC_MAX - 1; i >= 0; i--) {
@@ -1291,12 +1283,7 @@ static void spapr_dt_pci_device_cb(PCIBus *bus, PCIDevice *pdev,
PciWalkFdt *p = opaque;
int err;
- if (p->err) {
- /* Something's already broken, don't keep going */
- return;
- }
-
- if (!pdev->enabled) {
+ if (p->err || !pdev->enabled) {
return;
}
@@ -1552,17 +1539,6 @@ static void spapr_pci_pre_plug(HotplugHandler *plug_handler,
PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)));
uint32_t slotnr = PCI_SLOT(pdev->devfn);
- if (!phb->dr_enabled) {
- /* if this is a hotplug operation initiated by the user
- * we need to let them know it's not enabled
- */
- if (plugged_dev->hotplugged) {
- error_setg(errp, "Bus '%s' does not support hotplugging",
- phb->parent_obj.bus->qbus.name);
- return;
- }
- }
-
if (IS_PCI_BRIDGE(plugged_dev)) {
if (!bridge_has_valid_chassis_nr(OBJECT(plugged_dev), errp)) {
return;
@@ -1598,10 +1574,10 @@ static void spapr_pci_plug(HotplugHandler *plug_handler,
uint32_t slotnr = PCI_SLOT(pdev->devfn);
/*
- * If DR is disabled we don't need to do anything in the case of
- * hotplug or coldplug callbacks.
+ * If DR or the PCI device is disabled we don't need to do anything
+ * in the case of hotplug or coldplug callbacks.
*/
- if (!phb->dr_enabled) {
+ if (!pdev->enabled) {
return;
}
@@ -1679,13 +1655,12 @@ static void spapr_pci_unplug_request(HotplugHandler *plug_handler,
PCIDevice *pdev = PCI_DEVICE(plugged_dev);
SpaprDrc *drc = drc_from_dev(phb, pdev);
- if (!phb->dr_enabled) {
- error_setg(errp, "Bus '%s' does not support hotplugging",
- phb->parent_obj.bus->qbus.name);
+ g_assert(drc);
+
+ if (!drc->dev) {
return;
}
- g_assert(drc);
g_assert(drc->dev == plugged_dev);
if (!spapr_drc_unplug_requested(drc)) {
@@ -1853,30 +1828,15 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
assert(sphb->index != (uint32_t)-1); /* checked in spapr_phb_pre_plug() */
- if (sphb->mem64_win_size != 0) {
- if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) {
- error_setg(errp, "32-bit memory window of size 0x%"HWADDR_PRIx
- " (max 2 GiB)", sphb->mem_win_size);
- return;
- }
-
- /* 64-bit window defaults to identity mapping */
- sphb->mem64_win_pciaddr = sphb->mem64_win_addr;
- } else if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) {
- /*
- * For compatibility with old configuration, if no 64-bit MMIO
- * window is specified, but the ordinary (32-bit) memory
- * window is specified as > 2GiB, we treat it as a 2GiB 32-bit
- * window, with a 64-bit MMIO window following on immediately
- * afterwards
- */
- sphb->mem64_win_size = sphb->mem_win_size - SPAPR_PCI_MEM32_WIN_SIZE;
- sphb->mem64_win_addr = sphb->mem_win_addr + SPAPR_PCI_MEM32_WIN_SIZE;
- sphb->mem64_win_pciaddr =
- SPAPR_PCI_MEM_WIN_BUS_OFFSET + SPAPR_PCI_MEM32_WIN_SIZE;
- sphb->mem_win_size = SPAPR_PCI_MEM32_WIN_SIZE;
+ if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) {
+ error_setg(errp, "32-bit memory window of size 0x%"HWADDR_PRIx
+ " (max 2 GiB)", sphb->mem_win_size);
+ return;
}
+ /* 64-bit window defaults to identity mapping */
+ sphb->mem64_win_pciaddr = sphb->mem64_win_addr;
+
if (spapr_pci_find_phb(spapr, sphb->buid)) {
SpaprPhbState *s;
@@ -2087,7 +2047,7 @@ static void spapr_phb_reset(DeviceState *qdev)
g_hash_table_remove_all(sphb->msi);
}
-static Property spapr_phb_properties[] = {
+static const Property spapr_phb_properties[] = {
DEFINE_PROP_UINT32("index", SpaprPhbState, index, -1),
DEFINE_PROP_UINT64("mem_win_size", SpaprPhbState, mem_win_size,
SPAPR_PCI_MEM32_WIN_SIZE),
@@ -2095,8 +2055,6 @@ static Property spapr_phb_properties[] = {
SPAPR_PCI_MEM64_WIN_SIZE),
DEFINE_PROP_UINT64("io_win_size", SpaprPhbState, io_win_size,
SPAPR_PCI_IO_WIN_SIZE),
- DEFINE_PROP_BOOL("dynamic-reconfiguration", SpaprPhbState, dr_enabled,
- true),
/* Default DMA window is 0..1GB */
DEFINE_PROP_UINT64("dma_win_addr", SpaprPhbState, dma_win_addr, 0),
DEFINE_PROP_UINT64("dma_win_size", SpaprPhbState, dma_win_size, 0x40000000),
@@ -2107,13 +2065,10 @@ static Property spapr_phb_properties[] = {
(1ULL << 12) | (1ULL << 16)
| (1ULL << 21) | (1ULL << 24)),
DEFINE_PROP_UINT32("numa_node", SpaprPhbState, numa_node, -1),
- DEFINE_PROP_BOOL("pre-2.8-migration", SpaprPhbState,
- pre_2_8_migration, false),
DEFINE_PROP_BOOL("pcie-extended-configuration-space", SpaprPhbState,
pcie_ecs, true),
DEFINE_PROP_BOOL("pre-5.1-associativity", SpaprPhbState,
pre_5_1_assoc, false),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_spapr_pci_lsi = {
@@ -2146,20 +2101,6 @@ static int spapr_pci_pre_save(void *opaque)
gpointer key, value;
int i;
- if (sphb->pre_2_8_migration) {
- sphb->mig_liobn = sphb->dma_liobn[0];
- sphb->mig_mem_win_addr = sphb->mem_win_addr;
- sphb->mig_mem_win_size = sphb->mem_win_size;
- sphb->mig_io_win_addr = sphb->io_win_addr;
- sphb->mig_io_win_size = sphb->io_win_size;
-
- if ((sphb->mem64_win_size != 0)
- && (sphb->mem64_win_addr
- == (sphb->mem_win_addr + sphb->mem_win_size))) {
- sphb->mig_mem_win_size += sphb->mem64_win_size;
- }
- }
-
g_free(sphb->msi_devs);
sphb->msi_devs = NULL;
sphb->msi_devs_num = g_hash_table_size(sphb->msi);
@@ -2206,13 +2147,6 @@ static int spapr_pci_post_load(void *opaque, int version_id)
return 0;
}
-static bool pre_2_8_migration(void *opaque, int version_id)
-{
- SpaprPhbState *sphb = opaque;
-
- return sphb->pre_2_8_migration;
-}
-
static const VMStateDescription vmstate_spapr_pci = {
.name = "spapr_pci",
.version_id = 2,
@@ -2222,11 +2156,6 @@ static const VMStateDescription vmstate_spapr_pci = {
.post_load = spapr_pci_post_load,
.fields = (const VMStateField[]) {
VMSTATE_UINT64_EQUAL(buid, SpaprPhbState, NULL),
- VMSTATE_UINT32_TEST(mig_liobn, SpaprPhbState, pre_2_8_migration),
- VMSTATE_UINT64_TEST(mig_mem_win_addr, SpaprPhbState, pre_2_8_migration),
- VMSTATE_UINT64_TEST(mig_mem_win_size, SpaprPhbState, pre_2_8_migration),
- VMSTATE_UINT64_TEST(mig_io_win_addr, SpaprPhbState, pre_2_8_migration),
- VMSTATE_UINT64_TEST(mig_io_win_size, SpaprPhbState, pre_2_8_migration),
VMSTATE_STRUCT_ARRAY(lsi_table, SpaprPhbState, PCI_NUM_PINS, 0,
vmstate_spapr_pci_lsi, SpaprPciLsi),
VMSTATE_INT32(msi_devs_num, SpaprPhbState),
@@ -2244,7 +2173,7 @@ static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge,
return sphb->dtbusname;
}
-static void spapr_phb_class_init(ObjectClass *klass, void *data)
+static void spapr_phb_class_init(ObjectClass *klass, const void *data)
{
PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -2254,7 +2183,7 @@ static void spapr_phb_class_init(ObjectClass *klass, void *data)
dc->realize = spapr_phb_realize;
dc->unrealize = spapr_phb_unrealize;
device_class_set_props(dc, spapr_phb_properties);
- dc->reset = spapr_phb_reset;
+ device_class_set_legacy_reset(dc, spapr_phb_reset);
dc->vmsd = &vmstate_spapr_pci;
/* Supported by TYPE_SPAPR_MACHINE */
dc->user_creatable = true;
@@ -2271,7 +2200,7 @@ static const TypeInfo spapr_phb_info = {
.instance_size = sizeof(SpaprPhbState),
.instance_finalize = spapr_phb_finalizefn,
.class_init = spapr_phb_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
diff --git a/hw/ppc/spapr_pci_vfio.c b/hw/ppc/spapr_pci_vfio.c
index 76b2a34..e318d0d 100644
--- a/hw/ppc/spapr_pci_vfio.c
+++ b/hw/ppc/spapr_pci_vfio.c
@@ -24,7 +24,7 @@
#include "hw/pci-host/spapr.h"
#include "hw/pci/msix.h"
#include "hw/pci/pci_device.h"
-#include "hw/vfio/vfio-common.h"
+#include "hw/vfio/vfio-container.h"
#include "qemu/error-report.h"
#include CONFIG_DEVICES /* CONFIG_VFIO_PCI */
@@ -85,7 +85,7 @@ static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
{
- VFIOAddressSpace *space = vfio_get_address_space(as);
+ VFIOAddressSpace *space = vfio_address_space_get(as);
VFIOContainerBase *bcontainer = NULL;
if (QLIST_EMPTY(&space->containers)) {
@@ -105,7 +105,7 @@ static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
}
out:
- vfio_put_address_space(space);
+ vfio_address_space_put(space);
return container_of(bcontainer, VFIOContainer, bcontainer);
}
diff --git a/hw/ppc/spapr_rng.c b/hw/ppc/spapr_rng.c
index c2fda7a..6fec607 100644
--- a/hw/ppc/spapr_rng.c
+++ b/hw/ppc/spapr_rng.c
@@ -22,8 +22,8 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/rng.h"
+#include "system/device_tree.h"
+#include "system/rng.h"
#include "hw/ppc/spapr.h"
#include "hw/qdev-properties.h"
#include "kvm_ppc.h"
@@ -130,14 +130,13 @@ static void spapr_rng_realize(DeviceState *dev, Error **errp)
}
}
-static Property spapr_rng_properties[] = {
+static const Property spapr_rng_properties[] = {
DEFINE_PROP_BOOL("use-kvm", SpaprRngState, use_kvm, false),
DEFINE_PROP_LINK("rng", SpaprRngState, backend, TYPE_RNG_BACKEND,
RngBackend *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void spapr_rng_class_init(ObjectClass *oc, void *data)
+static void spapr_rng_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
index f329693..78309db 100644
--- a/hw/ppc/spapr_rtas.c
+++ b/hw/ppc/spapr_rtas.c
@@ -28,12 +28,12 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/cpus.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/runstate.h"
-#include "sysemu/qtest.h"
+#include "system/system.h"
+#include "system/device_tree.h"
+#include "system/cpus.h"
+#include "system/hw_accel.h"
+#include "system/runstate.h"
+#include "system/qtest.h"
#include "kvm_ppc.h"
#include "hw/ppc/spapr.h"
@@ -110,7 +110,8 @@ static void rtas_query_cpu_stopped_state(PowerPCCPU *cpu_,
id = rtas_ld(args, 0);
cpu = spapr_find_cpu(id);
if (cpu != NULL) {
- if (CPU(cpu)->halted) {
+ CPUPPCState *env = &cpu->env;
+ if (env->quiesced) {
rtas_st(rets, 1, 0);
} else {
rtas_st(rets, 1, 2);
@@ -215,6 +216,8 @@ static void rtas_stop_self(PowerPCCPU *cpu, SpaprMachineState *spapr,
* For the same reason, set PSSCR_EC.
*/
env->spr[SPR_PSSCR] |= PSSCR_EC;
+ env->quiesced = true; /* set "RTAS stopped" state. */
+ ppc_maybe_interrupt(env);
cs->halted = 1;
ppc_store_lpcr(cpu, env->spr[SPR_LPCR] & ~pcc->lpcr_pm);
kvmppc_set_reg_ppc_online(cpu, 0);
@@ -565,7 +568,6 @@ static bool spapr_qtest_callback(CharBackend *chr, gchar **words)
g_assert(rc == 0);
res = qtest_rtas_call(words[1], nargs, args, nret, ret);
- qtest_send_prefix(chr);
qtest_sendf(chr, "OK %"PRIu64"\n", res);
return true;
diff --git a/hw/ppc/spapr_rtc.c b/hw/ppc/spapr_rtc.c
index deb3ea4..1f7d2d8 100644
--- a/hw/ppc/spapr_rtc.c
+++ b/hw/ppc/spapr_rtc.c
@@ -27,8 +27,8 @@
#include "qemu/osdep.h"
#include "qemu/timer.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/rtc.h"
+#include "system/system.h"
+#include "system/rtc.h"
#include "hw/ppc/spapr.h"
#include "migration/vmstate.h"
#include "qapi/error.h"
@@ -163,7 +163,7 @@ static const VMStateDescription vmstate_spapr_rtc = {
},
};
-static void spapr_rtc_class_init(ObjectClass *oc, void *data)
+static void spapr_rtc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/ppc/spapr_tpm_proxy.c b/hw/ppc/spapr_tpm_proxy.c
index e10af35..1297b3a 100644
--- a/hw/ppc/spapr_tpm_proxy.c
+++ b/hw/ppc/spapr_tpm_proxy.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "hw/ppc/spapr.h"
#include "hw/qdev-properties.h"
#include "trace.h"
@@ -41,8 +41,8 @@ static ssize_t tpm_execute(SpaprTpmProxy *tpm_proxy, target_ulong *args)
target_ulong data_in_size = args[2];
uint64_t data_out = ppc64_phys_to_real(args[3]);
target_ulong data_out_size = args[4];
- uint8_t buf_in[TPM_SPAPR_BUFSIZE];
- uint8_t buf_out[TPM_SPAPR_BUFSIZE];
+ QEMU_UNINITIALIZED uint8_t buf_in[TPM_SPAPR_BUFSIZE];
+ QEMU_UNINITIALIZED uint8_t buf_out[TPM_SPAPR_BUFSIZE];
ssize_t ret;
trace_spapr_tpm_execute(data_in, data_in_size, data_out, data_out_size);
@@ -145,12 +145,11 @@ static void spapr_tpm_proxy_unrealize(DeviceState *d)
qemu_unregister_reset(spapr_tpm_proxy_reset, tpm_proxy);
}
-static Property spapr_tpm_proxy_properties[] = {
+static const Property spapr_tpm_proxy_properties[] = {
DEFINE_PROP_STRING("host-path", SpaprTpmProxy, host_path),
- DEFINE_PROP_END_OF_LIST(),
};
-static void spapr_tpm_proxy_class_init(ObjectClass *k, void *data)
+static void spapr_tpm_proxy_class_init(ObjectClass *k, const void *data)
{
DeviceClass *dk = DEVICE_CLASS(k);
diff --git a/hw/ppc/spapr_vhyp_mmu.c b/hw/ppc/spapr_vhyp_mmu.c
index b3dd8b3..2d41d7f 100644
--- a/hw/ppc/spapr_vhyp_mmu.c
+++ b/hw/ppc/spapr_vhyp_mmu.c
@@ -15,19 +15,6 @@
#include "helper_regs.h"
#include "hw/ppc/spapr.h"
#include "mmu-hash64.h"
-#include "mmu-book3s-v3.h"
-
-
-static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex)
-{
- /*
- * hash value/pteg group index is normalized by HPT mask
- */
- if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) {
- return false;
- }
- return true;
-}
static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
@@ -70,7 +57,7 @@ static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr,
pteh &= ~0x60ULL;
- if (!valid_ptex(cpu, ptex)) {
+ if (!ppc_hash64_valid_ptex(cpu, ptex)) {
return H_PARAMETER;
}
@@ -119,7 +106,7 @@ static RemoveResult remove_hpte(PowerPCCPU *cpu
const ppc_hash_pte64_t *hptes;
target_ulong v, r;
- if (!valid_ptex(cpu, ptex)) {
+ if (!ppc_hash64_valid_ptex(cpu, ptex)) {
return REMOVE_PARM;
}
@@ -250,7 +237,7 @@ static target_ulong h_protect(PowerPCCPU *cpu, SpaprMachineState *spapr,
const ppc_hash_pte64_t *hptes;
target_ulong v, r;
- if (!valid_ptex(cpu, ptex)) {
+ if (!ppc_hash64_valid_ptex(cpu, ptex)) {
return H_PARAMETER;
}
@@ -287,7 +274,7 @@ static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr,
int i, ridx, n_entries = 1;
const ppc_hash_pte64_t *hptes;
- if (!valid_ptex(cpu, ptex)) {
+ if (!ppc_hash64_valid_ptex(cpu, ptex)) {
return H_PARAMETER;
}
diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c
index 3221874..7759436 100644
--- a/hw/ppc/spapr_vio.c
+++ b/hw/ppc/spapr_vio.c
@@ -27,8 +27,8 @@
#include "hw/loader.h"
#include "elf.h"
#include "hw/sysbus.h"
-#include "sysemu/kvm.h"
-#include "sysemu/device_tree.h"
+#include "system/kvm.h"
+#include "system/device_tree.h"
#include "kvm_ppc.h"
#include "migration/vmstate.h"
@@ -50,7 +50,7 @@ static char *spapr_vio_get_dev_name(DeviceState *qdev)
return g_strdup_printf("%s@%x", pc->dt_name, dev->reg);
}
-static void spapr_vio_bus_class_init(ObjectClass *klass, void *data)
+static void spapr_vio_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
@@ -599,7 +599,7 @@ SpaprVioBus *spapr_vio_bus_init(void)
return bus;
}
-static void spapr_vio_bridge_class_init(ObjectClass *klass, void *data)
+static void spapr_vio_bridge_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -631,11 +631,11 @@ const VMStateDescription vmstate_spapr_vio = {
},
};
-static void vio_spapr_device_class_init(ObjectClass *klass, void *data)
+static void vio_spapr_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
k->realize = spapr_vio_busdev_realize;
- k->reset = spapr_vio_busdev_reset;
+ device_class_set_legacy_reset(k, spapr_vio_busdev_reset);
k->bus_type = TYPE_SPAPR_VIO_BUS;
}
diff --git a/hw/ppc/spapr_vof.c b/hw/ppc/spapr_vof.c
index 09f29be..46d7875 100644
--- a/hw/ppc/spapr_vof.c
+++ b/hw/ppc/spapr_vof.c
@@ -10,7 +10,7 @@
#include "hw/ppc/spapr_cpu_core.h"
#include "hw/ppc/fdt.h"
#include "hw/ppc/vof.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qom/qom-qobject.h"
#include "trace.h"
@@ -28,7 +28,7 @@ target_ulong spapr_h_vof_client(PowerPCCPU *cpu, SpaprMachineState *spapr,
void spapr_vof_client_dt_finalize(SpaprMachineState *spapr, void *fdt)
{
- char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
+ g_autofree char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
vof_build_dt(fdt, spapr->vof);
diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events
index bf29bbf..1f125ce 100644
--- a/hw/ppc/trace-events
+++ b/hw/ppc/trace-events
@@ -95,6 +95,10 @@ vof_write(uint32_t ih, unsigned cb, const char *msg) "ih=0x%x [%u] \"%s\""
vof_avail(uint64_t start, uint64_t end, uint64_t size) "0x%"PRIx64"..0x%"PRIx64" size=0x%"PRIx64
vof_claimed(uint64_t start, uint64_t end, uint64_t size) "0x%"PRIx64"..0x%"PRIx64" size=0x%"PRIx64
+# pnv_adu.c
+pnv_adu_xscom_read(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64
+pnv_adu_xscom_write(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64
+
# pnv_chiptod.c
pnv_chiptod_xscom_read(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64
pnv_chiptod_xscom_write(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64
diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c
index c49da1f..c9969ae 100644
--- a/hw/ppc/virtex_ml507.c
+++ b/hw/ppc/virtex_ml507.c
@@ -28,12 +28,12 @@
#include "exec/page-protection.h"
#include "cpu.h"
#include "hw/sysbus.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/block/flash.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/reset.h"
+#include "system/system.h"
+#include "system/reset.h"
#include "hw/boards.h"
-#include "sysemu/device_tree.h"
+#include "system/device_tree.h"
#include "hw/loader.h"
#include "elf.h"
#include "qapi/error.h"
@@ -67,29 +67,6 @@ static struct boot_info
void *vfdt;
} boot_info;
-/* Create reset TLB entries for BookE, spanning the 32bit addr space. */
-static void mmubooke_create_initial_mapping(CPUPPCState *env,
- target_ulong va,
- hwaddr pa)
-{
- ppcemb_tlb_t *tlb = &env->tlb.tlbe[0];
-
- tlb->attr = 0;
- tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
- tlb->size = 1U << 31; /* up to 0x80000000 */
- tlb->EPN = va & TARGET_PAGE_MASK;
- tlb->RPN = pa & TARGET_PAGE_MASK;
- tlb->PID = 0;
-
- tlb = &env->tlb.tlbe[1];
- tlb->attr = 0;
- tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
- tlb->size = 1U << 31; /* up to 0xffffffff */
- tlb->EPN = 0x80000000 & TARGET_PAGE_MASK;
- tlb->RPN = 0x80000000 & TARGET_PAGE_MASK;
- tlb->PID = 0;
-}
-
static PowerPCCPU *ppc440_init_xilinx(const char *cpu_type, uint32_t sysclk)
{
PowerPCCPU *cpu;
@@ -139,9 +116,10 @@ static void main_cpu_reset(void *opaque)
env->gpr[3] = bi->fdt;
env->nip = bi->bootstrap_pc;
- /* Create a mapping for the kernel. */
- mmubooke_create_initial_mapping(env, 0, 0);
- env->gpr[6] = tswap32(EPAPR_MAGIC);
+ /* Create a mapping spanning the 32bit addr space. */
+ booke_set_tlb(&env->tlb.tlbe[0], 0, 0, 1U << 31);
+ booke_set_tlb(&env->tlb.tlbe[1], 0x80000000, 0x80000000, 1U << 31);
+ env->gpr[6] = EPAPR_MAGIC;
env->gpr[7] = bi->ima_size;
}
@@ -168,7 +146,7 @@ static int xilinx_load_device_tree(MachineState *machine,
/* Try the local "ppc.dtb" override. */
fdt = load_device_tree("ppc.dtb", &fdt_size);
if (!fdt) {
- path = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE);
+ path = qemu_find_file(QEMU_FILE_TYPE_DTB, BINARY_DEVICE_TREE_FILE);
if (path) {
fdt = load_device_tree(path, &fdt_size);
g_free(path);
@@ -239,6 +217,7 @@ static void virtex_init(MachineState *machine)
cpu_irq = qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_INT);
dev = qdev_new("xlnx.xps-intc");
+ qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_BIG);
qdev_prop_set_uint32(dev, "kind-of-intr", 0);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR);
@@ -252,6 +231,7 @@ static void virtex_init(MachineState *machine)
/* 2 timers at irq 2 @ 62 Mhz. */
dev = qdev_new("xlnx.xps-timer");
+ qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_BIG);
qdev_prop_set_uint32(dev, "one-timer-only", 0);
qdev_prop_set_uint32(dev, "clock-frequency", 62 * 1000000);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
@@ -264,8 +244,8 @@ static void virtex_init(MachineState *machine)
/* Boots a kernel elf binary. */
kernel_size = load_elf(kernel_filename, NULL, NULL, NULL,
- &entry, NULL, &high, NULL, 1, PPC_ELF_MACHINE,
- 0, 0);
+ &entry, NULL, &high, NULL,
+ ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
boot_info.bootstrap_pc = entry & 0x00ffffff;
if (kernel_size < 0) {
diff --git a/hw/ppc/vof.c b/hw/ppc/vof.c
index e3b430a..f14efa3 100644
--- a/hw/ppc/vof.c
+++ b/hw/ppc/vof.c
@@ -15,10 +15,10 @@
#include "qemu/units.h"
#include "qemu/log.h"
#include "qapi/error.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/ppc/vof.h"
#include "hw/ppc/fdt.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "qom/qom-qobject.h"
#include "trace.h"
@@ -646,7 +646,7 @@ static void vof_dt_memory_available(void *fdt, GArray *claimed, uint64_t base)
mem0_reg = fdt_getprop(fdt, offset, "reg", &proplen);
g_assert(mem0_reg && proplen == sizeof(uint32_t) * (ac + sc));
if (sc == 2) {
- mem0_end = be64_to_cpu(*(uint64_t *)(mem0_reg + sizeof(uint32_t) * ac));
+ mem0_end = ldq_be_p(mem0_reg + sizeof(uint32_t) * ac);
} else {
mem0_end = be32_to_cpu(*(uint32_t *)(mem0_reg + sizeof(uint32_t) * ac));
}
diff --git a/hw/remote/iohub.c b/hw/remote/iohub.c
index 40dfee4..988d328 100644
--- a/hw/remote/iohub.c
+++ b/hw/remote/iohub.c
@@ -33,19 +33,6 @@ void remote_iohub_init(RemoteIOHubState *iohub)
}
}
-void remote_iohub_finalize(RemoteIOHubState *iohub)
-{
- int pirq;
-
- for (pirq = 0; pirq < REMOTE_IOHUB_NB_PIRQS; pirq++) {
- qemu_set_fd_handler(event_notifier_get_fd(&iohub->resamplefds[pirq]),
- NULL, NULL, NULL);
- event_notifier_cleanup(&iohub->irqfds[pirq]);
- event_notifier_cleanup(&iohub->resamplefds[pirq]);
- qemu_mutex_destroy(&iohub->irq_level_lock[pirq]);
- }
-}
-
int remote_iohub_map_irq(PCIDevice *pci_dev, int intx)
{
return pci_dev->devfn;
diff --git a/hw/remote/iommu.c b/hw/remote/iommu.c
index 7c56aad..3e0758a 100644
--- a/hw/remote/iommu.c
+++ b/hw/remote/iommu.c
@@ -13,8 +13,8 @@
#include "hw/remote/iommu.h"
#include "hw/pci/pci_bus.h"
#include "hw/pci/pci.h"
-#include "exec/memory.h"
-#include "exec/address-spaces.h"
+#include "system/memory.h"
+#include "system/address-spaces.h"
#include "trace.h"
/**
diff --git a/hw/remote/machine.c b/hw/remote/machine.c
index fdc6c44..e4b4783 100644
--- a/hw/remote/machine.c
+++ b/hw/remote/machine.c
@@ -16,7 +16,7 @@
#include "qemu/osdep.h"
#include "hw/remote/machine.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qapi/error.h"
#include "hw/pci/pci_host.h"
#include "hw/remote/iohub.h"
@@ -121,7 +121,7 @@ static void remote_machine_dev_unplug_cb(HotplugHandler *hotplug_dev,
}
}
-static void remote_machine_class_init(ObjectClass *oc, void *data)
+static void remote_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
@@ -146,7 +146,7 @@ static const TypeInfo remote_machine = {
.instance_size = sizeof(RemoteMachineState),
.instance_init = remote_machine_instance_init,
.class_init = remote_machine_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
diff --git a/hw/remote/memory.c b/hw/remote/memory.c
index 6d60da9..00193a5 100644
--- a/hw/remote/memory.c
+++ b/hw/remote/memory.c
@@ -11,7 +11,7 @@
#include "qemu/osdep.h"
#include "hw/remote/memory.h"
-#include "exec/ram_addr.h"
+#include "system/ram_addr.h"
#include "qapi/error.h"
static void remote_sysmem_reset(void)
diff --git a/hw/remote/message.c b/hw/remote/message.c
index 50f6bf2..273f1e0 100644
--- a/hw/remote/message.c
+++ b/hw/remote/message.c
@@ -13,12 +13,12 @@
#include "io/channel.h"
#include "hw/remote/mpqemu-link.h"
#include "qapi/error.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/pci/pci.h"
#include "exec/memattrs.h"
#include "hw/remote/memory.h"
#include "hw/remote/iohub.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
static void process_config_write(QIOChannel *ioc, PCIDevice *dev,
MPQemuMsg *msg, Error **errp);
@@ -215,13 +215,10 @@ fail:
static void process_device_reset_msg(QIOChannel *ioc, PCIDevice *dev,
Error **errp)
{
- DeviceClass *dc = DEVICE_GET_CLASS(dev);
DeviceState *s = DEVICE(dev);
MPQemuMsg ret = { 0 };
- if (dc->reset) {
- dc->reset(s);
- }
+ device_cold_reset(s);
ret.cmd = MPQEMU_CMD_RET;
diff --git a/hw/remote/mpqemu-link.c b/hw/remote/mpqemu-link.c
index 4394dc4..49885a1 100644
--- a/hw/remote/mpqemu-link.c
+++ b/hw/remote/mpqemu-link.c
@@ -17,7 +17,7 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "io/channel.h"
-#include "sysemu/iothread.h"
+#include "system/iothread.h"
#include "trace.h"
/*
@@ -110,7 +110,7 @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds,
bql_unlock();
}
- ret = qio_channel_readv_full_all_eof(ioc, &iov, 1, fds, nfds, errp);
+ ret = qio_channel_readv_full_all_eof(ioc, &iov, 1, fds, nfds, 0, errp);
if (drop_bql && !iothread && !qemu_in_coroutine()) {
bql_lock();
diff --git a/hw/remote/proxy-memory-listener.c b/hw/remote/proxy-memory-listener.c
index a926f61..30ac749 100644
--- a/hw/remote/proxy-memory-listener.c
+++ b/hw/remote/proxy-memory-listener.c
@@ -10,9 +10,9 @@
#include "qemu/int128.h"
#include "qemu/range.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "exec/cpu-common.h"
-#include "exec/ram_addr.h"
+#include "system/ram_addr.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "hw/remote/mpqemu-link.h"
diff --git a/hw/remote/proxy.c b/hw/remote/proxy.c
index fbc85a8..b0165aa 100644
--- a/hw/remote/proxy.c
+++ b/hw/remote/proxy.c
@@ -21,7 +21,7 @@
#include "hw/remote/proxy-memory-listener.h"
#include "qom/object.h"
#include "qemu/event_notifier.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
static void probe_pci_info(PCIDevice *dev, Error **errp);
static void proxy_device_reset(DeviceState *dev);
@@ -191,12 +191,11 @@ static void pci_proxy_write_config(PCIDevice *d, uint32_t addr, uint32_t val,
config_op_send(PCI_PROXY_DEV(d), addr, &val, len, MPQEMU_CMD_PCI_CFGWRITE);
}
-static Property proxy_properties[] = {
+static const Property proxy_properties[] = {
DEFINE_PROP_STRING("fd", PCIProxyDev, fd),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pci_proxy_dev_class_init(ObjectClass *klass, void *data)
+static void pci_proxy_dev_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -206,7 +205,7 @@ static void pci_proxy_dev_class_init(ObjectClass *klass, void *data)
k->config_read = pci_proxy_read_config;
k->config_write = pci_proxy_write_config;
- dc->reset = proxy_device_reset;
+ device_class_set_legacy_reset(dc, proxy_device_reset);
device_class_set_props(dc, proxy_properties);
}
@@ -216,7 +215,7 @@ static const TypeInfo pci_proxy_dev_type_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIProxyDev),
.class_init = pci_proxy_dev_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/remote/remote-obj.c b/hw/remote/remote-obj.c
index dc27cc8..8588290 100644
--- a/hw/remote/remote-obj.c
+++ b/hw/remote/remote-obj.c
@@ -17,7 +17,7 @@
#include "hw/remote/machine.h"
#include "io/channel-util.h"
#include "qapi/error.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/pci/pci.h"
#include "qemu/sockets.h"
#include "monitor/monitor.h"
@@ -163,7 +163,7 @@ static void remote_object_finalize(Object *obj)
g_free(o->devid);
}
-static void remote_object_class_init(ObjectClass *klass, void *data)
+static void remote_object_class_init(ObjectClass *klass, const void *data)
{
RemoteObjectClass *k = REMOTE_OBJECT_CLASS(klass);
@@ -188,7 +188,7 @@ static const TypeInfo remote_object_info = {
.instance_finalize = remote_object_finalize,
.class_size = sizeof(RemoteObjectClass),
.class_init = remote_object_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c
index 8dbafaf..ea6165e 100644
--- a/hw/remote/vfio-user-obj.c
+++ b/hw/remote/vfio-user-obj.c
@@ -43,7 +43,7 @@
#include "qom/object_interfaces.h"
#include "qemu/error-report.h"
#include "trace.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/boards.h"
#include "hw/remote/machine.h"
#include "qapi/error.h"
@@ -52,12 +52,12 @@
#include "qemu/notify.h"
#include "qemu/thread.h"
#include "qemu/main-loop.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "libvfio-user.h"
#include "hw/qdev-core.h"
#include "hw/pci/pci.h"
#include "qemu/timer.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/remote/vfio-user-obj.h"
@@ -358,7 +358,7 @@ static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t *buf, hwaddr offset,
int access_size;
uint64_t val;
- if (memory_access_is_direct(mr, is_write)) {
+ if (memory_access_is_direct(mr, is_write, MEMTXATTRS_UNSPECIFIED)) {
/**
* Some devices expose a PCI expansion ROM, which could be buffer
* based as compared to other regions which are primarily based on
@@ -917,7 +917,7 @@ static void vfu_object_finalize(Object *obj)
}
}
-static void vfu_object_class_init(ObjectClass *klass, void *data)
+static void vfu_object_class_init(ObjectClass *klass, const void *data)
{
VfuObjectClass *k = VFU_OBJECT_CLASS(klass);
@@ -944,7 +944,7 @@ static const TypeInfo vfu_object_info = {
.instance_finalize = vfu_object_finalize,
.class_size = sizeof(VfuObjectClass),
.class_init = vfu_object_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig
index a2030e3..e6a0ac1 100644
--- a/hw/riscv/Kconfig
+++ b/hw/riscv/Kconfig
@@ -1,3 +1,6 @@
+config RISCV_IOMMU
+ bool
+
config RISCV_NUMA
bool
@@ -22,6 +25,14 @@ config MICROCHIP_PFSOC
select SIFIVE_PLIC
select UNIMP
+config MICROBLAZE_V
+ bool
+ default y
+ depends on RISCV32 || RISCV64
+ select XILINX
+ select XILINX_AXI
+ select XILINX_ETHLITE
+
config OPENTITAN
bool
default y
@@ -44,9 +55,10 @@ config RISCV_VIRT
select PCI
select PCI_EXPRESS_GENERIC_BRIDGE
select PFLASH_CFI01
- select SERIAL
+ select SERIAL_MM
select RISCV_ACLINT
select RISCV_APLIC
+ select RISCV_IOMMU
select RISCV_IMSIC
select SIFIVE_PLIC
select SIFIVE_TEST
diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
index 47281ca..828a867 100644
--- a/hw/riscv/boot.c
+++ b/hw/riscv/boot.c
@@ -27,17 +27,17 @@
#include "hw/riscv/boot.h"
#include "hw/riscv/boot_opensbi.h"
#include "elf.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/qtest.h"
-#include "sysemu/kvm.h"
-#include "sysemu/reset.h"
+#include "system/device_tree.h"
+#include "system/qtest.h"
+#include "system/kvm.h"
+#include "system/reset.h"
#include <libfdt.h>
bool riscv_is_32bit(RISCVHartArrayState *harts)
{
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(&harts->harts[0]);
- return mcc->misa_mxl_max == MXL_RV32;
+ return mcc->def->misa_mxl_max == MXL_RV32;
}
/*
@@ -67,9 +67,16 @@ char *riscv_plic_hart_config_string(int hart_count)
return g_strjoinv(",", (char **)vals);
}
-target_ulong riscv_calc_kernel_start_addr(RISCVHartArrayState *harts,
+void riscv_boot_info_init(RISCVBootInfo *info, RISCVHartArrayState *harts)
+{
+ info->kernel_size = 0;
+ info->initrd_size = 0;
+ info->is_32bit = riscv_is_32bit(harts);
+}
+
+target_ulong riscv_calc_kernel_start_addr(RISCVBootInfo *info,
target_ulong firmware_end_addr) {
- if (riscv_is_32bit(harts)) {
+ if (info->is_32bit) {
return QEMU_ALIGN_UP(firmware_end_addr, 4 * MiB);
} else {
return QEMU_ALIGN_UP(firmware_end_addr, 2 * MiB);
@@ -128,11 +135,11 @@ char *riscv_find_firmware(const char *firmware_filename,
target_ulong riscv_find_and_load_firmware(MachineState *machine,
const char *default_machine_firmware,
- hwaddr firmware_load_addr,
+ hwaddr *firmware_load_addr,
symbol_fn_t sym_cb)
{
char *firmware_filename;
- target_ulong firmware_end_addr = firmware_load_addr;
+ target_ulong firmware_end_addr = *firmware_load_addr;
firmware_filename = riscv_find_firmware(machine->firmware,
default_machine_firmware);
@@ -148,7 +155,7 @@ target_ulong riscv_find_and_load_firmware(MachineState *machine,
}
target_ulong riscv_load_firmware(const char *firmware_filename,
- hwaddr firmware_load_addr,
+ hwaddr *firmware_load_addr,
symbol_fn_t sym_cb)
{
uint64_t firmware_entry, firmware_end;
@@ -159,22 +166,23 @@ target_ulong riscv_load_firmware(const char *firmware_filename,
if (load_elf_ram_sym(firmware_filename, NULL, NULL, NULL,
&firmware_entry, NULL, &firmware_end, NULL,
0, EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) {
+ *firmware_load_addr = firmware_entry;
return firmware_end;
}
firmware_size = load_image_targphys_as(firmware_filename,
- firmware_load_addr,
+ *firmware_load_addr,
current_machine->ram_size, NULL);
if (firmware_size > 0) {
- return firmware_load_addr + firmware_size;
+ return *firmware_load_addr + firmware_size;
}
error_report("could not load firmware '%s'", firmware_filename);
exit(1);
}
-static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
+static void riscv_load_initrd(MachineState *machine, RISCVBootInfo *info)
{
const char *filename = machine->initrd_filename;
uint64_t mem_size = machine->ram_size;
@@ -195,7 +203,7 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
* halfway into RAM, and for boards with 1GB of RAM or more we put
* the initrd at 512MB.
*/
- start = kernel_entry + MIN(mem_size / 2, 512 * MiB);
+ start = info->image_low_addr + MIN(mem_size / 2, 512 * MiB);
size = load_ramdisk(filename, start, mem_size - start);
if (size == -1) {
@@ -206,6 +214,9 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
}
}
+ info->initrd_start = start;
+ info->initrd_size = size;
+
/* Some RISC-V machines (e.g. opentitan) don't have a fdt. */
if (fdt) {
end = start + size;
@@ -214,14 +225,14 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
}
}
-target_ulong riscv_load_kernel(MachineState *machine,
- RISCVHartArrayState *harts,
- target_ulong kernel_start_addr,
- bool load_initrd,
- symbol_fn_t sym_cb)
+void riscv_load_kernel(MachineState *machine,
+ RISCVBootInfo *info,
+ target_ulong kernel_start_addr,
+ bool load_initrd,
+ symbol_fn_t sym_cb)
{
const char *kernel_filename = machine->kernel_filename;
- uint64_t kernel_load_base, kernel_entry;
+ ssize_t kernel_size;
void *fdt = machine->fdt;
g_assert(kernel_filename != NULL);
@@ -233,21 +244,29 @@ target_ulong riscv_load_kernel(MachineState *machine,
* the (expected) load address load address. This allows kernels to have
* separate SBI and ELF entry points (used by FreeBSD, for example).
*/
- if (load_elf_ram_sym(kernel_filename, NULL, NULL, NULL,
- NULL, &kernel_load_base, NULL, NULL, 0,
- EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) {
- kernel_entry = kernel_load_base;
+ kernel_size = load_elf_ram_sym(kernel_filename, NULL, NULL, NULL, NULL,
+ &info->image_low_addr, &info->image_high_addr,
+ NULL, ELFDATA2LSB, EM_RISCV,
+ 1, 0, NULL, true, sym_cb);
+ if (kernel_size > 0) {
+ info->kernel_size = kernel_size;
goto out;
}
- if (load_uimage_as(kernel_filename, &kernel_entry, NULL, NULL,
- NULL, NULL, NULL) > 0) {
+ kernel_size = load_uimage_as(kernel_filename, &info->image_low_addr,
+ NULL, NULL, NULL, NULL, NULL);
+ if (kernel_size > 0) {
+ info->kernel_size = kernel_size;
+ info->image_high_addr = info->image_low_addr + kernel_size;
goto out;
}
- if (load_image_targphys_as(kernel_filename, kernel_start_addr,
- current_machine->ram_size, NULL) > 0) {
- kernel_entry = kernel_start_addr;
+ kernel_size = load_image_targphys_as(kernel_filename, kernel_start_addr,
+ current_machine->ram_size, NULL);
+ if (kernel_size > 0) {
+ info->kernel_size = kernel_size;
+ info->image_low_addr = kernel_start_addr;
+ info->image_high_addr = info->image_low_addr + kernel_size;
goto out;
}
@@ -256,23 +275,21 @@ target_ulong riscv_load_kernel(MachineState *machine,
out:
/*
- * For 32 bit CPUs 'kernel_entry' can be sign-extended by
+ * For 32 bit CPUs 'image_low_addr' can be sign-extended by
* load_elf_ram_sym().
*/
- if (riscv_is_32bit(harts)) {
- kernel_entry = extract64(kernel_entry, 0, 32);
+ if (info->is_32bit) {
+ info->image_low_addr = extract64(info->image_low_addr, 0, 32);
}
if (load_initrd && machine->initrd_filename) {
- riscv_load_initrd(machine, kernel_entry);
+ riscv_load_initrd(machine, info);
}
if (fdt && machine->kernel_cmdline && *machine->kernel_cmdline) {
qemu_fdt_setprop_string(fdt, "/chosen", "bootargs",
machine->kernel_cmdline);
}
-
- return kernel_entry;
}
/*
@@ -292,11 +309,12 @@ out:
* The FDT is fdt_packed() during the calculation.
*/
uint64_t riscv_compute_fdt_addr(hwaddr dram_base, hwaddr dram_size,
- MachineState *ms)
+ MachineState *ms, RISCVBootInfo *info)
{
int ret = fdt_pack(ms->fdt);
hwaddr dram_end, temp;
int fdtsize;
+ uint64_t dtb_start, dtb_start_limit;
/* Should only fail if we've built a corrupted tree */
g_assert(ret == 0);
@@ -307,6 +325,17 @@ uint64_t riscv_compute_fdt_addr(hwaddr dram_base, hwaddr dram_size,
exit(1);
}
+ if (info->initrd_size) {
+ /* If initrd is successfully loaded, place DTB after it. */
+ dtb_start_limit = info->initrd_start + info->initrd_size;
+ } else if (info->kernel_size) {
+ /* If only kernel is successfully loaded, place DTB after it. */
+ dtb_start_limit = info->image_high_addr;
+ } else {
+ /* Otherwise, do not check DTB overlapping */
+ dtb_start_limit = 0;
+ }
+
/*
* A dram_size == 0, usually from a MemMapEntry[].size element,
* means that the DRAM block goes all the way to ms->ram_size.
@@ -316,13 +345,24 @@ uint64_t riscv_compute_fdt_addr(hwaddr dram_base, hwaddr dram_size,
/*
* We should put fdt as far as possible to avoid kernel/initrd overwriting
- * its content. But it should be addressable by 32 bit system as well.
- * Thus, put it at an 2MB aligned address that less than fdt size from the
- * end of dram or 3GB whichever is lesser.
+ * its content. But it should be addressable by 32 bit system as well in RV32.
+ * Thus, put it near to the end of dram in RV64, and put it near to the end
+ * of dram or 3GB whichever is lesser in RV32.
*/
- temp = (dram_base < 3072 * MiB) ? MIN(dram_end, 3072 * MiB) : dram_end;
+ if (!info->is_32bit) {
+ temp = dram_end;
+ } else {
+ temp = (dram_base < 3072 * MiB) ? MIN(dram_end, 3072 * MiB) : dram_end;
+ }
- return QEMU_ALIGN_DOWN(temp - fdtsize, 2 * MiB);
+ dtb_start = QEMU_ALIGN_DOWN(temp - fdtsize, 2 * MiB);
+
+ if (dtb_start_limit && (dtb_start < dtb_start_limit)) {
+ error_report("No enough memory to place DTB after kernel/initrd");
+ exit(1);
+ }
+
+ return dtb_start;
}
/*
@@ -334,35 +374,39 @@ void riscv_load_fdt(hwaddr fdt_addr, void *fdt)
uint32_t fdtsize = fdt_totalsize(fdt);
/* copy in the device tree */
- qemu_fdt_dumpdtb(fdt, fdtsize);
-
rom_add_blob_fixed_as("fdt", fdt, fdtsize, fdt_addr,
&address_space_memory);
qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds,
rom_ptr_for_as(&address_space_memory, fdt_addr, fdtsize));
}
-void riscv_rom_copy_firmware_info(MachineState *machine, hwaddr rom_base,
- hwaddr rom_size, uint32_t reset_vec_size,
+void riscv_rom_copy_firmware_info(MachineState *machine,
+ RISCVHartArrayState *harts,
+ hwaddr rom_base, hwaddr rom_size,
+ uint32_t reset_vec_size,
uint64_t kernel_entry)
{
+ struct fw_dynamic_info32 dinfo32;
struct fw_dynamic_info dinfo;
size_t dinfo_len;
- if (sizeof(dinfo.magic) == 4) {
- dinfo.magic = cpu_to_le32(FW_DYNAMIC_INFO_MAGIC_VALUE);
- dinfo.version = cpu_to_le32(FW_DYNAMIC_INFO_VERSION);
- dinfo.next_mode = cpu_to_le32(FW_DYNAMIC_INFO_NEXT_MODE_S);
- dinfo.next_addr = cpu_to_le32(kernel_entry);
+ if (riscv_is_32bit(harts)) {
+ dinfo32.magic = cpu_to_le32(FW_DYNAMIC_INFO_MAGIC_VALUE);
+ dinfo32.version = cpu_to_le32(FW_DYNAMIC_INFO_VERSION);
+ dinfo32.next_mode = cpu_to_le32(FW_DYNAMIC_INFO_NEXT_MODE_S);
+ dinfo32.next_addr = cpu_to_le32(kernel_entry);
+ dinfo32.options = 0;
+ dinfo32.boot_hart = 0;
+ dinfo_len = sizeof(dinfo32);
} else {
dinfo.magic = cpu_to_le64(FW_DYNAMIC_INFO_MAGIC_VALUE);
dinfo.version = cpu_to_le64(FW_DYNAMIC_INFO_VERSION);
dinfo.next_mode = cpu_to_le64(FW_DYNAMIC_INFO_NEXT_MODE_S);
dinfo.next_addr = cpu_to_le64(kernel_entry);
+ dinfo.options = 0;
+ dinfo.boot_hart = 0;
+ dinfo_len = sizeof(dinfo);
}
- dinfo.options = 0;
- dinfo.boot_hart = 0;
- dinfo_len = sizeof(dinfo);
/**
* copy the dynamic firmware info. This information is specific to
@@ -374,7 +418,10 @@ void riscv_rom_copy_firmware_info(MachineState *machine, hwaddr rom_base,
exit(1);
}
- rom_add_blob_fixed_as("mrom.finfo", &dinfo, dinfo_len,
+ rom_add_blob_fixed_as("mrom.finfo",
+ riscv_is_32bit(harts) ?
+ (void *)&dinfo32 : (void *)&dinfo,
+ dinfo_len,
rom_base + reset_vec_size,
&address_space_memory);
}
@@ -430,7 +477,9 @@ void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts
}
rom_add_blob_fixed_as("mrom.reset", reset_vec, sizeof(reset_vec),
rom_base, &address_space_memory);
- riscv_rom_copy_firmware_info(machine, rom_base, rom_size, sizeof(reset_vec),
+ riscv_rom_copy_firmware_info(machine, harts,
+ rom_base, rom_size,
+ sizeof(reset_vec),
kernel_entry);
}
diff --git a/hw/riscv/meson.build b/hw/riscv/meson.build
index f872674..c22f3a7 100644
--- a/hw/riscv/meson.build
+++ b/hw/riscv/meson.build
@@ -10,5 +10,8 @@ riscv_ss.add(when: 'CONFIG_SIFIVE_U', if_true: files('sifive_u.c'))
riscv_ss.add(when: 'CONFIG_SPIKE', if_true: files('spike.c'))
riscv_ss.add(when: 'CONFIG_MICROCHIP_PFSOC', if_true: files('microchip_pfsoc.c'))
riscv_ss.add(when: 'CONFIG_ACPI', if_true: files('virt-acpi-build.c'))
+riscv_ss.add(when: 'CONFIG_RISCV_IOMMU', if_true: files(
+ 'riscv-iommu.c', 'riscv-iommu-pci.c', 'riscv-iommu-sys.c', 'riscv-iommu-hpm.c'))
+riscv_ss.add(when: 'CONFIG_MICROBLAZE_V', if_true: files('microblaze-v-generic.c'))
hw_arch += {'riscv': riscv_ss}
diff --git a/hw/riscv/microblaze-v-generic.c b/hw/riscv/microblaze-v-generic.c
new file mode 100644
index 0000000..e863c50
--- /dev/null
+++ b/hw/riscv/microblaze-v-generic.c
@@ -0,0 +1,189 @@
+/*
+ * QEMU model of Microblaze V generic board.
+ *
+ * based on hw/microblaze/petalogix_ml605_mmu.c
+ *
+ * Copyright (c) 2011 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2011 PetaLogix
+ * Copyright (c) 2009 Edgar E. Iglesias.
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Written by Sai Pavan Boddu <sai.pavan.boddu@amd.com
+ * and by Michal Simek <michal.simek@amd.com>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "hw/sysbus.h"
+#include "system/system.h"
+#include "net/net.h"
+#include "hw/boards.h"
+#include "hw/char/serial-mm.h"
+#include "system/address-spaces.h"
+#include "hw/char/xilinx_uartlite.h"
+#include "hw/misc/unimp.h"
+
+#define LMB_BRAM_SIZE (128 * KiB)
+#define MEMORY_BASEADDR 0x80000000
+#define INTC_BASEADDR 0x41200000
+#define TIMER_BASEADDR 0x41c00000
+#define TIMER_BASEADDR2 0x41c10000
+#define UARTLITE_BASEADDR 0x40600000
+#define ETHLITE_BASEADDR 0x40e00000
+#define UART16550_BASEADDR 0x44a10000
+#define AXIENET_BASEADDR 0x40c00000
+#define AXIDMA_BASEADDR 0x41e00000
+#define GPIO_BASEADDR 0x40000000
+#define GPIO_BASEADDR2 0x40010000
+#define GPIO_BASEADDR3 0x40020000
+#define I2C_BASEADDR 0x40800000
+#define QSPI_BASEADDR 0x44a00000
+
+#define TIMER_IRQ 0
+#define UARTLITE_IRQ 1
+#define UART16550_IRQ 4
+#define ETHLITE_IRQ 5
+#define TIMER_IRQ2 6
+#define AXIENET_IRQ 7
+#define AXIDMA_IRQ1 8
+#define AXIDMA_IRQ0 9
+
+static void mb_v_generic_init(MachineState *machine)
+{
+ ram_addr_t ram_size = machine->ram_size;
+ DeviceState *dev, *dma, *eth0;
+ Object *ds, *cs;
+ int i;
+ RISCVCPU *cpu;
+ hwaddr ddr_base = MEMORY_BASEADDR;
+ MemoryRegion *phys_lmb_bram = g_new(MemoryRegion, 1);
+ MemoryRegion *phys_ram = g_new(MemoryRegion, 1);
+ qemu_irq irq[32];
+ MemoryRegion *sysmem = get_system_memory();
+
+ cpu = RISCV_CPU(object_new(machine->cpu_type));
+ object_property_set_bool(OBJECT(cpu), "h", false, NULL);
+ object_property_set_bool(OBJECT(cpu), "d", false, NULL);
+ qdev_realize(DEVICE(cpu), NULL, &error_abort);
+ /* Attach emulated BRAM through the LMB. */
+ memory_region_init_ram(phys_lmb_bram, NULL,
+ "mb_v.lmb_bram", LMB_BRAM_SIZE,
+ &error_fatal);
+ memory_region_add_subregion(sysmem, 0x00000000, phys_lmb_bram);
+
+ memory_region_init_ram(phys_ram, NULL, "mb_v.ram",
+ ram_size, &error_fatal);
+ memory_region_add_subregion(sysmem, ddr_base, phys_ram);
+
+ dev = qdev_new("xlnx.xps-intc");
+ qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE);
+ qdev_prop_set_uint32(dev, "kind-of-intr",
+ 1 << UARTLITE_IRQ);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0,
+ qdev_get_gpio_in(DEVICE(cpu), 11));
+ for (i = 0; i < 32; i++) {
+ irq[i] = qdev_get_gpio_in(dev, i);
+ }
+
+ /* Uartlite */
+ dev = qdev_new(TYPE_XILINX_UARTLITE);
+ qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE);
+ qdev_prop_set_chr(dev, "chardev", serial_hd(0));
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, UARTLITE_BASEADDR);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[UARTLITE_IRQ]);
+
+ /* Full uart */
+ serial_mm_init(sysmem, UART16550_BASEADDR + 0x1000, 2,
+ irq[UART16550_IRQ], 115200, serial_hd(1),
+ DEVICE_LITTLE_ENDIAN);
+
+ /* 2 timers at irq 0 @ 100 Mhz. */
+ dev = qdev_new("xlnx.xps-timer");
+ qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE);
+ qdev_prop_set_uint32(dev, "one-timer-only", 0);
+ qdev_prop_set_uint32(dev, "clock-frequency", 100000000);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, TIMER_BASEADDR);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ]);
+
+ /* 2 timers at irq 3 @ 100 Mhz. */
+ dev = qdev_new("xlnx.xps-timer");
+ qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE);
+ qdev_prop_set_uint32(dev, "one-timer-only", 0);
+ qdev_prop_set_uint32(dev, "clock-frequency", 100000000);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, TIMER_BASEADDR2);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ2]);
+
+ /* Emaclite */
+ dev = qdev_new("xlnx.xps-ethernetlite");
+ qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE);
+ qemu_configure_nic_device(dev, true, NULL);
+ qdev_prop_set_uint32(dev, "tx-ping-pong", 0);
+ qdev_prop_set_uint32(dev, "rx-ping-pong", 0);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, ETHLITE_BASEADDR);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[ETHLITE_IRQ]);
+
+ /* axi ethernet and dma initialization. */
+ eth0 = qdev_new("xlnx.axi-ethernet");
+ dma = qdev_new("xlnx.axi-dma");
+
+ /* FIXME: attach to the sysbus instead */
+ object_property_add_child(qdev_get_machine(), "xilinx-eth", OBJECT(eth0));
+ object_property_add_child(qdev_get_machine(), "xilinx-dma", OBJECT(dma));
+
+ ds = object_property_get_link(OBJECT(dma),
+ "axistream-connected-target", NULL);
+ cs = object_property_get_link(OBJECT(dma),
+ "axistream-control-connected-target", NULL);
+ qemu_configure_nic_device(eth0, true, NULL);
+ qdev_prop_set_uint32(eth0, "rxmem", 0x1000);
+ qdev_prop_set_uint32(eth0, "txmem", 0x1000);
+ object_property_set_link(OBJECT(eth0), "axistream-connected", ds,
+ &error_abort);
+ object_property_set_link(OBJECT(eth0), "axistream-control-connected", cs,
+ &error_abort);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(eth0), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(eth0), 0, AXIENET_BASEADDR);
+ sysbus_connect_irq(SYS_BUS_DEVICE(eth0), 0, irq[AXIENET_IRQ]);
+
+ ds = object_property_get_link(OBJECT(eth0),
+ "axistream-connected-target", NULL);
+ cs = object_property_get_link(OBJECT(eth0),
+ "axistream-control-connected-target", NULL);
+ qdev_prop_set_uint32(dma, "freqhz", 100000000);
+ object_property_set_link(OBJECT(dma), "axistream-connected", ds,
+ &error_abort);
+ object_property_set_link(OBJECT(dma), "axistream-control-connected", cs,
+ &error_abort);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dma), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dma), 0, AXIDMA_BASEADDR);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dma), 0, irq[AXIDMA_IRQ0]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dma), 1, irq[AXIDMA_IRQ1]);
+
+ /* unimplemented devices */
+ create_unimplemented_device("gpio", GPIO_BASEADDR, 0x10000);
+ create_unimplemented_device("gpio2", GPIO_BASEADDR2, 0x10000);
+ create_unimplemented_device("gpio3", GPIO_BASEADDR3, 0x10000);
+ create_unimplemented_device("i2c", I2C_BASEADDR, 0x10000);
+ create_unimplemented_device("qspi", QSPI_BASEADDR, 0x10000);
+}
+
+static void mb_v_generic_machine_init(MachineClass *mc)
+{
+ mc->desc = "AMD Microblaze-V generic platform";
+ mc->init = mb_v_generic_init;
+ mc->min_cpus = 1;
+ mc->max_cpus = 1;
+ mc->default_cpu_type = TYPE_RISCV_CPU_BASE;
+ mc->default_cpus = 1;
+}
+
+DEFINE_MACHINE("amd-microblaze-v-generic", mb_v_generic_machine_init)
diff --git a/hw/riscv/microchip_pfsoc.c b/hw/riscv/microchip_pfsoc.c
index 7725dfb..2e74783 100644
--- a/hw/riscv/microchip_pfsoc.c
+++ b/hw/riscv/microchip_pfsoc.c
@@ -39,6 +39,7 @@
#include "qemu/units.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
+#include "qapi/visitor.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "hw/sysbus.h"
@@ -51,8 +52,8 @@
#include "hw/riscv/microchip_pfsoc.h"
#include "hw/intc/riscv_aclint.h"
#include "hw/intc/sifive_plic.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/sysemu.h"
+#include "system/device_tree.h"
+#include "system/system.h"
/*
* The BIOS image used by this machine is called Hart Software Services (HSS).
@@ -61,9 +62,6 @@
#define BIOS_FILENAME "hss.bin"
#define RESET_VECTOR 0x20220000
-/* CLINT timebase frequency */
-#define CLINT_TIMEBASE_FREQ 1000000
-
/* GEM version */
#define GEM_REVISION 0x0107010c
@@ -193,6 +191,7 @@ static void microchip_pfsoc_soc_instance_init(Object *obj)
static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp)
{
MachineState *ms = MACHINE(qdev_get_machine());
+ MicrochipIcicleKitState *iks = MICROCHIP_ICICLE_KIT_MACHINE(ms);
MicrochipPFSoCState *s = MICROCHIP_PFSOC(dev);
const MemMapEntry *memmap = microchip_pfsoc_memmap;
MemoryRegion *system_memory = get_system_memory();
@@ -253,7 +252,7 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp)
memmap[MICROCHIP_PFSOC_CLINT].base + RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE, 0, ms->smp.cpus,
RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
- CLINT_TIMEBASE_FREQ, false);
+ iks->clint_timebase_freq, false);
/* L2 cache controller */
create_unimplemented_device("microchip.pfsoc.l2cc",
@@ -479,7 +478,7 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp)
qspi_xip_mem);
}
-static void microchip_pfsoc_soc_class_init(ObjectClass *oc, void *data)
+static void microchip_pfsoc_soc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -516,11 +515,11 @@ static void microchip_icicle_kit_machine_init(MachineState *machine)
uint64_t mem_low_size, mem_high_size;
hwaddr firmware_load_addr;
const char *firmware_name;
- bool kernel_as_payload = false;
target_ulong firmware_end_addr, kernel_start_addr;
uint64_t kernel_entry;
- uint32_t fdt_load_addr;
+ uint64_t fdt_load_addr;
DriveInfo *dinfo = drive_get(IF_SD, 0, 0);
+ RISCVBootInfo boot_info;
/* Sanity check on RAM size */
if (machine->ram_size < mc->default_ram_size) {
@@ -578,65 +577,135 @@ static void microchip_icicle_kit_machine_init(MachineState *machine)
}
/*
- * We follow the following table to select which payload we execute.
- *
- * -bios | -kernel | payload
- * -------+------------+--------
- * N | N | HSS
- * Y | don't care | HSS
- * N | Y | kernel
+ * We follow the following table to select which firmware we use.
*
- * This ensures backwards compatibility with how we used to expose -bios
- * to users but allows them to run through direct kernel booting as well.
- *
- * When -kernel is used for direct boot, -dtb must be present to provide
- * a valid device tree for the board, as we don't generate device tree.
+ * -bios | -kernel | firmware
+ * --------------+------------+--------
+ * none | N | error
+ * none | Y | kernel
+ * NULL, default | N | BIOS_FILENAME
+ * NULL, default | Y | RISCV64_BIOS_BIN
+ * other | don't care | other
*/
-
- if (machine->kernel_filename && machine->dtb) {
- int fdt_size;
- machine->fdt = load_device_tree(machine->dtb, &fdt_size);
- if (!machine->fdt) {
- error_report("load_device_tree() failed");
+ if (machine->firmware && !strcmp(machine->firmware, "none")) {
+ if (!machine->kernel_filename) {
+ error_report("for -bios none, a kernel is required");
exit(1);
}
- firmware_name = RISCV64_BIOS_BIN;
- firmware_load_addr = memmap[MICROCHIP_PFSOC_DRAM_LO].base;
- kernel_as_payload = true;
- }
-
- if (!kernel_as_payload) {
- firmware_name = BIOS_FILENAME;
+ firmware_name = NULL;
+ firmware_load_addr = RESET_VECTOR;
+ } else if (!machine->firmware || !strcmp(machine->firmware, "default")) {
+ if (machine->kernel_filename) {
+ firmware_name = RISCV64_BIOS_BIN;
+ firmware_load_addr = memmap[MICROCHIP_PFSOC_DRAM_LO].base;
+ } else {
+ firmware_name = BIOS_FILENAME;
+ firmware_load_addr = RESET_VECTOR;
+ }
+ } else {
+ firmware_name = machine->firmware;
firmware_load_addr = RESET_VECTOR;
}
- /* Load the firmware */
- firmware_end_addr = riscv_find_and_load_firmware(machine, firmware_name,
- firmware_load_addr, NULL);
+ /* Load the firmware if necessary */
+ firmware_end_addr = firmware_load_addr;
+ if (firmware_name) {
+ char *filename = riscv_find_firmware(firmware_name, NULL);
+ if (filename) {
+ firmware_end_addr = riscv_load_firmware(filename,
+ &firmware_load_addr, NULL);
+ g_free(filename);
+ }
+ }
- if (kernel_as_payload) {
- kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus,
+ riscv_boot_info_init(&boot_info, &s->soc.u_cpus);
+ if (machine->kernel_filename) {
+ kernel_start_addr = riscv_calc_kernel_start_addr(&boot_info,
firmware_end_addr);
- kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus,
- kernel_start_addr, true, NULL);
+ riscv_load_kernel(machine, &boot_info, kernel_start_addr,
+ true, NULL);
+ kernel_entry = boot_info.image_low_addr;
+
+ if (machine->dtb) {
+ int fdt_size;
+ machine->fdt = load_device_tree(machine->dtb, &fdt_size);
+ if (!machine->fdt) {
+ error_report("load_device_tree() failed");
+ exit(1);
+ }
+
+ /* Compute the FDT load address in DRAM */
+ hwaddr kernel_ram_base = memmap[MICROCHIP_PFSOC_DRAM_LO].base;
+ hwaddr kernel_ram_size = memmap[MICROCHIP_PFSOC_DRAM_LO].size;
+
+ if (kernel_entry - kernel_ram_base >= kernel_ram_size) {
+ kernel_ram_base = memmap[MICROCHIP_PFSOC_DRAM_HI].base;
+ kernel_ram_size = mem_high_size;
+ }
+
+ fdt_load_addr = riscv_compute_fdt_addr(kernel_ram_base, kernel_ram_size,
+ machine, &boot_info);
+ riscv_load_fdt(fdt_load_addr, machine->fdt);
+ } else {
+ warn_report_once("The QEMU microchip-icicle-kit machine does not "
+ "generate a device tree, so no device tree is "
+ "being provided to the guest.");
+ fdt_load_addr = 0;
+ }
- /* Compute the fdt load address in dram */
- fdt_load_addr = riscv_compute_fdt_addr(memmap[MICROCHIP_PFSOC_DRAM_LO].base,
- memmap[MICROCHIP_PFSOC_DRAM_LO].size,
- machine);
- riscv_load_fdt(fdt_load_addr, machine->fdt);
+ hwaddr start_addr;
+ if (firmware_name) {
+ start_addr = firmware_load_addr;
+ } else {
+ start_addr = kernel_entry;
+ }
/* Load the reset vector */
- riscv_setup_rom_reset_vec(machine, &s->soc.u_cpus, firmware_load_addr,
+ riscv_setup_rom_reset_vec(machine, &s->soc.u_cpus, start_addr,
memmap[MICROCHIP_PFSOC_ENVM_DATA].base,
memmap[MICROCHIP_PFSOC_ENVM_DATA].size,
kernel_entry, fdt_load_addr);
}
}
-static void microchip_icicle_kit_machine_class_init(ObjectClass *oc, void *data)
+static void microchip_icicle_kit_set_clint_timebase_freq(Object *obj,
+ Visitor *v,
+ const char *name,
+ void *opaque,
+ Error **errp)
+{
+ MicrochipIcicleKitState *s = MICROCHIP_ICICLE_KIT_MACHINE(obj);
+ uint32_t value;
+
+ if (!visit_type_uint32(v, name, &value, errp)) {
+ return;
+ }
+
+ s->clint_timebase_freq = value;
+}
+
+static void microchip_icicle_kit_get_clint_timebase_freq(Object *obj,
+ Visitor *v,
+ const char *name,
+ void *opaque,
+ Error **errp)
+{
+ MicrochipIcicleKitState *s = MICROCHIP_ICICLE_KIT_MACHINE(obj);
+ uint32_t value = s->clint_timebase_freq;
+
+ visit_type_uint32(v, name, &value, errp);
+}
+
+static void microchip_icicle_kit_machine_instance_init(Object *obj)
+{
+ MicrochipIcicleKitState *m = MICROCHIP_ICICLE_KIT_MACHINE(obj);
+ m->clint_timebase_freq = 1000000;
+}
+
+static void microchip_icicle_kit_machine_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -647,6 +716,7 @@ static void microchip_icicle_kit_machine_class_init(ObjectClass *oc, void *data)
mc->min_cpus = MICROCHIP_PFSOC_MANAGEMENT_CPU_COUNT + 1;
mc->default_cpus = mc->min_cpus;
mc->default_ram_id = "microchip.icicle.kit.ram";
+ mc->auto_create_sdcard = true;
/*
* Map 513 MiB high memory, the minimum required high memory size, because
@@ -656,12 +726,20 @@ static void microchip_icicle_kit_machine_class_init(ObjectClass *oc, void *data)
* See memory_tests() in mss_ddr.c in the HSS source code.
*/
mc->default_ram_size = 1537 * MiB;
+
+ object_class_property_add(oc, "clint-timebase-frequency", "uint32_t",
+ microchip_icicle_kit_get_clint_timebase_freq,
+ microchip_icicle_kit_set_clint_timebase_freq,
+ NULL, NULL);
+ object_class_property_set_description(oc, "clint-timebase-frequency",
+ "Set CLINT timebase frequency in Hz.");
}
static const TypeInfo microchip_icicle_kit_machine_typeinfo = {
.name = MACHINE_TYPE_NAME("microchip-icicle-kit"),
.parent = TYPE_MACHINE,
.class_init = microchip_icicle_kit_machine_class_init,
+ .instance_init = microchip_icicle_kit_machine_instance_init,
.instance_size = sizeof(MicrochipIcicleKitState),
};
diff --git a/hw/riscv/numa.c b/hw/riscv/numa.c
index cf686f4..7a7b012 100644
--- a/hw/riscv/numa.c
+++ b/hw/riscv/numa.c
@@ -23,7 +23,7 @@
#include "hw/boards.h"
#include "hw/qdev-properties.h"
#include "hw/riscv/numa.h"
-#include "sysemu/device_tree.h"
+#include "system/device_tree.h"
static bool numa_enabled(const MachineState *ms)
{
diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c
index 436503f..d369a8a 100644
--- a/hw/riscv/opentitan.c
+++ b/hw/riscv/opentitan.c
@@ -27,7 +27,8 @@
#include "hw/misc/unimp.h"
#include "hw/riscv/boot.h"
#include "qemu/units.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
+#include "system/address-spaces.h"
/*
* This version of the OpenTitan machine currently supports
@@ -81,6 +82,7 @@ static void opentitan_machine_init(MachineState *machine)
OpenTitanState *s = OPENTITAN_MACHINE(machine);
const MemMapEntry *memmap = ibex_memmap;
MemoryRegion *sys_mem = get_system_memory();
+ RISCVBootInfo boot_info;
if (machine->ram_size != mc->default_ram_size) {
char *sz = size_to_str(mc->default_ram_size);
@@ -98,17 +100,19 @@ static void opentitan_machine_init(MachineState *machine)
memmap[IBEX_DEV_RAM].base, machine->ram);
if (machine->firmware) {
- riscv_load_firmware(machine->firmware, memmap[IBEX_DEV_RAM].base, NULL);
+ hwaddr firmware_load_addr = memmap[IBEX_DEV_RAM].base;
+ riscv_load_firmware(machine->firmware, &firmware_load_addr, NULL);
}
+ riscv_boot_info_init(&boot_info, &s->soc.cpus);
if (machine->kernel_filename) {
- riscv_load_kernel(machine, &s->soc.cpus,
+ riscv_load_kernel(machine, &boot_info,
memmap[IBEX_DEV_RAM].base,
false, NULL);
}
}
-static void opentitan_machine_class_init(ObjectClass *oc, void *data)
+static void opentitan_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -305,12 +309,11 @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp)
memmap[IBEX_DEV_IBEX_CFG].base, memmap[IBEX_DEV_IBEX_CFG].size);
}
-static Property lowrisc_ibex_soc_props[] = {
+static const Property lowrisc_ibex_soc_props[] = {
DEFINE_PROP_UINT32("resetvec", LowRISCIbexSoCState, resetvec, 0x20000400),
- DEFINE_PROP_END_OF_LIST()
};
-static void lowrisc_ibex_soc_class_init(ObjectClass *oc, void *data)
+static void lowrisc_ibex_soc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/riscv/riscv-iommu-bits.h b/hw/riscv/riscv-iommu-bits.h
new file mode 100644
index 0000000..1017d73
--- /dev/null
+++ b/hw/riscv/riscv-iommu-bits.h
@@ -0,0 +1,468 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright Ā© 2022-2023 Rivos Inc.
+ * Copyright Ā© 2023 FORTH-ICS/CARV
+ * Copyright Ā© 2023 RISC-V IOMMU Task Group
+ *
+ * RISC-V IOMMU - Register Layout and Data Structures.
+ *
+ * Based on the IOMMU spec version 1.0, 3/2023
+ * https://github.com/riscv-non-isa/riscv-iommu
+ */
+
+#ifndef HW_RISCV_IOMMU_BITS_H
+#define HW_RISCV_IOMMU_BITS_H
+
+#define RISCV_IOMMU_SPEC_DOT_VER 0x010
+
+#ifndef GENMASK_ULL
+#define GENMASK_ULL(h, l) (((~0ULL) >> (63 - (h) + (l))) << (l))
+#endif
+
+/*
+ * struct riscv_iommu_fq_record - Fault/Event Queue Record
+ * See section 3.2 for more info.
+ */
+struct riscv_iommu_fq_record {
+ uint64_t hdr;
+ uint64_t _reserved;
+ uint64_t iotval;
+ uint64_t iotval2;
+};
+/* Header fields */
+#define RISCV_IOMMU_FQ_HDR_CAUSE GENMASK_ULL(11, 0)
+#define RISCV_IOMMU_FQ_HDR_PID GENMASK_ULL(31, 12)
+#define RISCV_IOMMU_FQ_HDR_PV BIT_ULL(32)
+#define RISCV_IOMMU_FQ_HDR_TTYPE GENMASK_ULL(39, 34)
+#define RISCV_IOMMU_FQ_HDR_DID GENMASK_ULL(63, 40)
+
+/*
+ * struct riscv_iommu_pq_record - PCIe Page Request record
+ * For more infos on the PCIe Page Request queue see chapter 3.3.
+ */
+struct riscv_iommu_pq_record {
+ uint64_t hdr;
+ uint64_t payload;
+};
+/* Header fields */
+#define RISCV_IOMMU_PREQ_HDR_PID GENMASK_ULL(31, 12)
+#define RISCV_IOMMU_PREQ_HDR_PV BIT_ULL(32)
+#define RISCV_IOMMU_PREQ_HDR_PRIV BIT_ULL(33)
+#define RISCV_IOMMU_PREQ_HDR_EXEC BIT_ULL(34)
+#define RISCV_IOMMU_PREQ_HDR_DID GENMASK_ULL(63, 40)
+
+/* Payload fields */
+#define RISCV_IOMMU_PREQ_PAYLOAD_R BIT_ULL(0)
+#define RISCV_IOMMU_PREQ_PAYLOAD_W BIT_ULL(1)
+#define RISCV_IOMMU_PREQ_PAYLOAD_L BIT_ULL(2)
+#define RISCV_IOMMU_PREQ_PAYLOAD_M GENMASK_ULL(2, 0)
+#define RISCV_IOMMU_PREQ_PRG_INDEX GENMASK_ULL(11, 3)
+#define RISCV_IOMMU_PREQ_UADDR GENMASK_ULL(63, 12)
+
+/* Common field positions */
+#define RISCV_IOMMU_PPN_FIELD GENMASK_ULL(53, 10)
+#define RISCV_IOMMU_QUEUE_LOGSZ_FIELD GENMASK_ULL(4, 0)
+#define RISCV_IOMMU_QUEUE_INDEX_FIELD GENMASK_ULL(31, 0)
+#define RISCV_IOMMU_QUEUE_ENABLE BIT(0)
+#define RISCV_IOMMU_QUEUE_INTR_ENABLE BIT(1)
+#define RISCV_IOMMU_QUEUE_MEM_FAULT BIT(8)
+#define RISCV_IOMMU_QUEUE_OVERFLOW BIT(9)
+#define RISCV_IOMMU_QUEUE_ACTIVE BIT(16)
+#define RISCV_IOMMU_QUEUE_BUSY BIT(17)
+#define RISCV_IOMMU_ATP_PPN_FIELD GENMASK_ULL(43, 0)
+#define RISCV_IOMMU_ATP_MODE_FIELD GENMASK_ULL(63, 60)
+
+/* 5.3 IOMMU Capabilities (64bits) */
+#define RISCV_IOMMU_REG_CAP 0x0000
+#define RISCV_IOMMU_CAP_VERSION GENMASK_ULL(7, 0)
+#define RISCV_IOMMU_CAP_SV32 BIT_ULL(8)
+#define RISCV_IOMMU_CAP_SV39 BIT_ULL(9)
+#define RISCV_IOMMU_CAP_SV48 BIT_ULL(10)
+#define RISCV_IOMMU_CAP_SV57 BIT_ULL(11)
+#define RISCV_IOMMU_CAP_SV32X4 BIT_ULL(16)
+#define RISCV_IOMMU_CAP_SV39X4 BIT_ULL(17)
+#define RISCV_IOMMU_CAP_SV48X4 BIT_ULL(18)
+#define RISCV_IOMMU_CAP_SV57X4 BIT_ULL(19)
+#define RISCV_IOMMU_CAP_MSI_FLAT BIT_ULL(22)
+#define RISCV_IOMMU_CAP_MSI_MRIF BIT_ULL(23)
+#define RISCV_IOMMU_CAP_ATS BIT_ULL(25)
+#define RISCV_IOMMU_CAP_T2GPA BIT_ULL(26)
+#define RISCV_IOMMU_CAP_IGS GENMASK_ULL(29, 28)
+#define RISCV_IOMMU_CAP_HPM BIT_ULL(30)
+#define RISCV_IOMMU_CAP_DBG BIT_ULL(31)
+#define RISCV_IOMMU_CAP_PAS GENMASK_ULL(37, 32)
+#define RISCV_IOMMU_CAP_PD8 BIT_ULL(38)
+#define RISCV_IOMMU_CAP_PD17 BIT_ULL(39)
+#define RISCV_IOMMU_CAP_PD20 BIT_ULL(40)
+
+enum riscv_iommu_igs_modes {
+ RISCV_IOMMU_CAP_IGS_MSI = 0,
+ RISCV_IOMMU_CAP_IGS_WSI,
+ RISCV_IOMMU_CAP_IGS_BOTH
+};
+
+/* 5.4 Features control register (32bits) */
+#define RISCV_IOMMU_REG_FCTL 0x0008
+#define RISCV_IOMMU_FCTL_BE BIT(0)
+#define RISCV_IOMMU_FCTL_WSI BIT(1)
+#define RISCV_IOMMU_FCTL_GXL BIT(2)
+
+/* 5.5 Device-directory-table pointer (64bits) */
+#define RISCV_IOMMU_REG_DDTP 0x0010
+#define RISCV_IOMMU_DDTP_MODE GENMASK_ULL(3, 0)
+#define RISCV_IOMMU_DDTP_BUSY BIT_ULL(4)
+#define RISCV_IOMMU_DDTP_PPN RISCV_IOMMU_PPN_FIELD
+
+enum riscv_iommu_ddtp_modes {
+ RISCV_IOMMU_DDTP_MODE_OFF = 0,
+ RISCV_IOMMU_DDTP_MODE_BARE = 1,
+ RISCV_IOMMU_DDTP_MODE_1LVL = 2,
+ RISCV_IOMMU_DDTP_MODE_2LVL = 3,
+ RISCV_IOMMU_DDTP_MODE_3LVL = 4,
+ RISCV_IOMMU_DDTP_MODE_MAX = 4
+};
+
+/* 5.6 Command Queue Base (64bits) */
+#define RISCV_IOMMU_REG_CQB 0x0018
+#define RISCV_IOMMU_CQB_LOG2SZ RISCV_IOMMU_QUEUE_LOGSZ_FIELD
+#define RISCV_IOMMU_CQB_PPN RISCV_IOMMU_PPN_FIELD
+
+/* 5.7 Command Queue head (32bits) */
+#define RISCV_IOMMU_REG_CQH 0x0020
+
+/* 5.8 Command Queue tail (32bits) */
+#define RISCV_IOMMU_REG_CQT 0x0024
+
+/* 5.9 Fault Queue Base (64bits) */
+#define RISCV_IOMMU_REG_FQB 0x0028
+#define RISCV_IOMMU_FQB_LOG2SZ RISCV_IOMMU_QUEUE_LOGSZ_FIELD
+#define RISCV_IOMMU_FQB_PPN RISCV_IOMMU_PPN_FIELD
+
+/* 5.10 Fault Queue Head (32bits) */
+#define RISCV_IOMMU_REG_FQH 0x0030
+
+/* 5.11 Fault Queue tail (32bits) */
+#define RISCV_IOMMU_REG_FQT 0x0034
+
+/* 5.12 Page Request Queue base (64bits) */
+#define RISCV_IOMMU_REG_PQB 0x0038
+#define RISCV_IOMMU_PQB_LOG2SZ RISCV_IOMMU_QUEUE_LOGSZ_FIELD
+#define RISCV_IOMMU_PQB_PPN RISCV_IOMMU_PPN_FIELD
+
+/* 5.13 Page Request Queue head (32bits) */
+#define RISCV_IOMMU_REG_PQH 0x0040
+
+/* 5.14 Page Request Queue tail (32bits) */
+#define RISCV_IOMMU_REG_PQT 0x0044
+
+/* 5.15 Command Queue CSR (32bits) */
+#define RISCV_IOMMU_REG_CQCSR 0x0048
+#define RISCV_IOMMU_CQCSR_CQEN RISCV_IOMMU_QUEUE_ENABLE
+#define RISCV_IOMMU_CQCSR_CIE RISCV_IOMMU_QUEUE_INTR_ENABLE
+#define RISCV_IOMMU_CQCSR_CQMF RISCV_IOMMU_QUEUE_MEM_FAULT
+#define RISCV_IOMMU_CQCSR_CMD_TO BIT(9)
+#define RISCV_IOMMU_CQCSR_CMD_ILL BIT(10)
+#define RISCV_IOMMU_CQCSR_FENCE_W_IP BIT(11)
+#define RISCV_IOMMU_CQCSR_CQON RISCV_IOMMU_QUEUE_ACTIVE
+#define RISCV_IOMMU_CQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY
+
+/* 5.16 Fault Queue CSR (32bits) */
+#define RISCV_IOMMU_REG_FQCSR 0x004C
+#define RISCV_IOMMU_FQCSR_FQEN RISCV_IOMMU_QUEUE_ENABLE
+#define RISCV_IOMMU_FQCSR_FIE RISCV_IOMMU_QUEUE_INTR_ENABLE
+#define RISCV_IOMMU_FQCSR_FQMF RISCV_IOMMU_QUEUE_MEM_FAULT
+#define RISCV_IOMMU_FQCSR_FQOF RISCV_IOMMU_QUEUE_OVERFLOW
+#define RISCV_IOMMU_FQCSR_FQON RISCV_IOMMU_QUEUE_ACTIVE
+#define RISCV_IOMMU_FQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY
+
+/* 5.17 Page Request Queue CSR (32bits) */
+#define RISCV_IOMMU_REG_PQCSR 0x0050
+#define RISCV_IOMMU_PQCSR_PQEN RISCV_IOMMU_QUEUE_ENABLE
+#define RISCV_IOMMU_PQCSR_PIE RISCV_IOMMU_QUEUE_INTR_ENABLE
+#define RISCV_IOMMU_PQCSR_PQMF RISCV_IOMMU_QUEUE_MEM_FAULT
+#define RISCV_IOMMU_PQCSR_PQOF RISCV_IOMMU_QUEUE_OVERFLOW
+#define RISCV_IOMMU_PQCSR_PQON RISCV_IOMMU_QUEUE_ACTIVE
+#define RISCV_IOMMU_PQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY
+
+/* 5.18 Interrupt Pending Status (32bits) */
+#define RISCV_IOMMU_REG_IPSR 0x0054
+#define RISCV_IOMMU_IPSR_CIP BIT(0)
+#define RISCV_IOMMU_IPSR_FIP BIT(1)
+#define RISCV_IOMMU_IPSR_PIP BIT(3)
+
+enum {
+ RISCV_IOMMU_INTR_CQ,
+ RISCV_IOMMU_INTR_FQ,
+ RISCV_IOMMU_INTR_PM,
+ RISCV_IOMMU_INTR_PQ,
+ RISCV_IOMMU_INTR_COUNT
+};
+
+#define RISCV_IOMMU_IOCOUNT_NUM 31
+
+/* 5.19 Performance monitoring counter overflow status (32bits) */
+#define RISCV_IOMMU_REG_IOCOUNTOVF 0x0058
+#define RISCV_IOMMU_IOCOUNTOVF_CY BIT(0)
+
+/* 5.20 Performance monitoring counter inhibits (32bits) */
+#define RISCV_IOMMU_REG_IOCOUNTINH 0x005C
+#define RISCV_IOMMU_IOCOUNTINH_CY BIT(0)
+
+/* 5.21 Performance monitoring cycles counter (64bits) */
+#define RISCV_IOMMU_REG_IOHPMCYCLES 0x0060
+#define RISCV_IOMMU_IOHPMCYCLES_COUNTER GENMASK_ULL(62, 0)
+#define RISCV_IOMMU_IOHPMCYCLES_OVF BIT_ULL(63)
+
+/* 5.22 Performance monitoring event counters (31 * 64bits) */
+#define RISCV_IOMMU_REG_IOHPMCTR_BASE 0x0068
+#define RISCV_IOMMU_REG_IOHPMCTR(_n) \
+ (RISCV_IOMMU_REG_IOHPMCTR_BASE + (_n * 0x8))
+
+/* 5.23 Performance monitoring event selectors (31 * 64bits) */
+#define RISCV_IOMMU_REG_IOHPMEVT_BASE 0x0160
+#define RISCV_IOMMU_REG_IOHPMEVT(_n) \
+ (RISCV_IOMMU_REG_IOHPMEVT_BASE + (_n * 0x8))
+#define RISCV_IOMMU_IOHPMEVT_EVENT_ID GENMASK_ULL(14, 0)
+#define RISCV_IOMMU_IOHPMEVT_DMASK BIT_ULL(15)
+#define RISCV_IOMMU_IOHPMEVT_PID_PSCID GENMASK_ULL(35, 16)
+#define RISCV_IOMMU_IOHPMEVT_DID_GSCID GENMASK_ULL(59, 36)
+#define RISCV_IOMMU_IOHPMEVT_PV_PSCV BIT_ULL(60)
+#define RISCV_IOMMU_IOHPMEVT_DV_GSCV BIT_ULL(61)
+#define RISCV_IOMMU_IOHPMEVT_IDT BIT_ULL(62)
+#define RISCV_IOMMU_IOHPMEVT_OF BIT_ULL(63)
+
+enum RISCV_IOMMU_HPMEVENT_id {
+ RISCV_IOMMU_HPMEVENT_INVALID = 0,
+ RISCV_IOMMU_HPMEVENT_URQ = 1,
+ RISCV_IOMMU_HPMEVENT_TRQ = 2,
+ RISCV_IOMMU_HPMEVENT_ATS_RQ = 3,
+ RISCV_IOMMU_HPMEVENT_TLB_MISS = 4,
+ RISCV_IOMMU_HPMEVENT_DD_WALK = 5,
+ RISCV_IOMMU_HPMEVENT_PD_WALK = 6,
+ RISCV_IOMMU_HPMEVENT_S_VS_WALKS = 7,
+ RISCV_IOMMU_HPMEVENT_G_WALKS = 8,
+ RISCV_IOMMU_HPMEVENT_MAX = 9
+};
+
+/* 5.24 Translation request IOVA (64bits) */
+#define RISCV_IOMMU_REG_TR_REQ_IOVA 0x0258
+
+/* 5.25 Translation request control (64bits) */
+#define RISCV_IOMMU_REG_TR_REQ_CTL 0x0260
+#define RISCV_IOMMU_TR_REQ_CTL_GO_BUSY BIT_ULL(0)
+#define RISCV_IOMMU_TR_REQ_CTL_NW BIT_ULL(3)
+#define RISCV_IOMMU_TR_REQ_CTL_PID GENMASK_ULL(31, 12)
+#define RISCV_IOMMU_TR_REQ_CTL_DID GENMASK_ULL(63, 40)
+
+/* 5.26 Translation request response (64bits) */
+#define RISCV_IOMMU_REG_TR_RESPONSE 0x0268
+#define RISCV_IOMMU_TR_RESPONSE_FAULT BIT_ULL(0)
+#define RISCV_IOMMU_TR_RESPONSE_S BIT_ULL(9)
+#define RISCV_IOMMU_TR_RESPONSE_PPN RISCV_IOMMU_PPN_FIELD
+
+/* 5.27 Interrupt cause to vector (64bits) */
+#define RISCV_IOMMU_REG_ICVEC 0x02F8
+#define RISCV_IOMMU_ICVEC_CIV GENMASK_ULL(3, 0)
+#define RISCV_IOMMU_ICVEC_FIV GENMASK_ULL(7, 4)
+#define RISCV_IOMMU_ICVEC_PMIV GENMASK_ULL(11, 8)
+#define RISCV_IOMMU_ICVEC_PIV GENMASK_ULL(15, 12)
+
+/* 5.28 MSI Configuration table (32 * 64bits) */
+#define RISCV_IOMMU_REG_MSI_CONFIG 0x0300
+
+#define RISCV_IOMMU_REG_SIZE 0x1000
+
+#define RISCV_IOMMU_DDTE_VALID BIT_ULL(0)
+#define RISCV_IOMMU_DDTE_PPN RISCV_IOMMU_PPN_FIELD
+
+/* Struct riscv_iommu_dc - Device Context - section 2.1 */
+struct riscv_iommu_dc {
+ uint64_t tc;
+ uint64_t iohgatp;
+ uint64_t ta;
+ uint64_t fsc;
+ uint64_t msiptp;
+ uint64_t msi_addr_mask;
+ uint64_t msi_addr_pattern;
+ uint64_t _reserved;
+};
+
+/* Translation control fields */
+#define RISCV_IOMMU_DC_TC_V BIT_ULL(0)
+#define RISCV_IOMMU_DC_TC_EN_ATS BIT_ULL(1)
+#define RISCV_IOMMU_DC_TC_EN_PRI BIT_ULL(2)
+#define RISCV_IOMMU_DC_TC_T2GPA BIT_ULL(3)
+#define RISCV_IOMMU_DC_TC_DTF BIT_ULL(4)
+#define RISCV_IOMMU_DC_TC_PDTV BIT_ULL(5)
+#define RISCV_IOMMU_DC_TC_PRPR BIT_ULL(6)
+#define RISCV_IOMMU_DC_TC_GADE BIT_ULL(7)
+#define RISCV_IOMMU_DC_TC_SADE BIT_ULL(8)
+#define RISCV_IOMMU_DC_TC_DPE BIT_ULL(9)
+#define RISCV_IOMMU_DC_TC_SBE BIT_ULL(10)
+#define RISCV_IOMMU_DC_TC_SXL BIT_ULL(11)
+
+/* Second-stage (aka G-stage) context fields */
+#define RISCV_IOMMU_DC_IOHGATP_PPN RISCV_IOMMU_ATP_PPN_FIELD
+#define RISCV_IOMMU_DC_IOHGATP_GSCID GENMASK_ULL(59, 44)
+#define RISCV_IOMMU_DC_IOHGATP_MODE RISCV_IOMMU_ATP_MODE_FIELD
+
+enum riscv_iommu_dc_iohgatp_modes {
+ RISCV_IOMMU_DC_IOHGATP_MODE_BARE = 0,
+ RISCV_IOMMU_DC_IOHGATP_MODE_SV32X4 = 8,
+ RISCV_IOMMU_DC_IOHGATP_MODE_SV39X4 = 8,
+ RISCV_IOMMU_DC_IOHGATP_MODE_SV48X4 = 9,
+ RISCV_IOMMU_DC_IOHGATP_MODE_SV57X4 = 10
+};
+
+/* Translation attributes fields */
+#define RISCV_IOMMU_DC_TA_PSCID GENMASK_ULL(31, 12)
+
+/* First-stage context fields */
+#define RISCV_IOMMU_DC_FSC_PPN RISCV_IOMMU_ATP_PPN_FIELD
+#define RISCV_IOMMU_DC_FSC_MODE RISCV_IOMMU_ATP_MODE_FIELD
+
+/* Generic I/O MMU command structure - check section 3.1 */
+struct riscv_iommu_command {
+ uint64_t dword0;
+ uint64_t dword1;
+};
+
+#define RISCV_IOMMU_CMD_OPCODE GENMASK_ULL(6, 0)
+#define RISCV_IOMMU_CMD_FUNC GENMASK_ULL(9, 7)
+
+#define RISCV_IOMMU_CMD_IOTINVAL_OPCODE 1
+#define RISCV_IOMMU_CMD_IOTINVAL_FUNC_VMA 0
+#define RISCV_IOMMU_CMD_IOTINVAL_FUNC_GVMA 1
+#define RISCV_IOMMU_CMD_IOTINVAL_AV BIT_ULL(10)
+#define RISCV_IOMMU_CMD_IOTINVAL_PSCID GENMASK_ULL(31, 12)
+#define RISCV_IOMMU_CMD_IOTINVAL_PSCV BIT_ULL(32)
+#define RISCV_IOMMU_CMD_IOTINVAL_GV BIT_ULL(33)
+#define RISCV_IOMMU_CMD_IOTINVAL_GSCID GENMASK_ULL(59, 44)
+
+#define RISCV_IOMMU_CMD_IOFENCE_OPCODE 2
+#define RISCV_IOMMU_CMD_IOFENCE_FUNC_C 0
+#define RISCV_IOMMU_CMD_IOFENCE_AV BIT_ULL(10)
+#define RISCV_IOMMU_CMD_IOFENCE_DATA GENMASK_ULL(63, 32)
+
+#define RISCV_IOMMU_CMD_IODIR_OPCODE 3
+#define RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_DDT 0
+#define RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_PDT 1
+#define RISCV_IOMMU_CMD_IODIR_PID GENMASK_ULL(31, 12)
+#define RISCV_IOMMU_CMD_IODIR_DV BIT_ULL(33)
+#define RISCV_IOMMU_CMD_IODIR_DID GENMASK_ULL(63, 40)
+
+/* 3.1.4 I/O MMU PCIe ATS */
+#define RISCV_IOMMU_CMD_ATS_OPCODE 4
+#define RISCV_IOMMU_CMD_ATS_FUNC_INVAL 0
+#define RISCV_IOMMU_CMD_ATS_FUNC_PRGR 1
+#define RISCV_IOMMU_CMD_ATS_PID GENMASK_ULL(31, 12)
+#define RISCV_IOMMU_CMD_ATS_PV BIT_ULL(32)
+#define RISCV_IOMMU_CMD_ATS_DSV BIT_ULL(33)
+#define RISCV_IOMMU_CMD_ATS_RID GENMASK_ULL(55, 40)
+#define RISCV_IOMMU_CMD_ATS_DSEG GENMASK_ULL(63, 56)
+/* dword1 is the ATS payload, two different payload types for INVAL and PRGR */
+
+/* ATS.PRGR payload */
+#define RISCV_IOMMU_CMD_ATS_PRGR_RESP_CODE GENMASK_ULL(47, 44)
+
+enum riscv_iommu_dc_fsc_atp_modes {
+ RISCV_IOMMU_DC_FSC_MODE_BARE = 0,
+ RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV32 = 8,
+ RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39 = 8,
+ RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48 = 9,
+ RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57 = 10,
+ RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8 = 1,
+ RISCV_IOMMU_DC_FSC_PDTP_MODE_PD17 = 2,
+ RISCV_IOMMU_DC_FSC_PDTP_MODE_PD20 = 3
+};
+
+enum riscv_iommu_fq_causes {
+ RISCV_IOMMU_FQ_CAUSE_INST_FAULT = 1,
+ RISCV_IOMMU_FQ_CAUSE_RD_ADDR_MISALIGNED = 4,
+ RISCV_IOMMU_FQ_CAUSE_RD_FAULT = 5,
+ RISCV_IOMMU_FQ_CAUSE_WR_ADDR_MISALIGNED = 6,
+ RISCV_IOMMU_FQ_CAUSE_WR_FAULT = 7,
+ RISCV_IOMMU_FQ_CAUSE_INST_FAULT_S = 12,
+ RISCV_IOMMU_FQ_CAUSE_RD_FAULT_S = 13,
+ RISCV_IOMMU_FQ_CAUSE_WR_FAULT_S = 15,
+ RISCV_IOMMU_FQ_CAUSE_INST_FAULT_VS = 20,
+ RISCV_IOMMU_FQ_CAUSE_RD_FAULT_VS = 21,
+ RISCV_IOMMU_FQ_CAUSE_WR_FAULT_VS = 23,
+ RISCV_IOMMU_FQ_CAUSE_DMA_DISABLED = 256,
+ RISCV_IOMMU_FQ_CAUSE_DDT_LOAD_FAULT = 257,
+ RISCV_IOMMU_FQ_CAUSE_DDT_INVALID = 258,
+ RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED = 259,
+ RISCV_IOMMU_FQ_CAUSE_TTYPE_BLOCKED = 260,
+ RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT = 261,
+ RISCV_IOMMU_FQ_CAUSE_MSI_INVALID = 262,
+ RISCV_IOMMU_FQ_CAUSE_MSI_MISCONFIGURED = 263,
+ RISCV_IOMMU_FQ_CAUSE_MRIF_FAULT = 264,
+ RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT = 265,
+ RISCV_IOMMU_FQ_CAUSE_PDT_INVALID = 266,
+ RISCV_IOMMU_FQ_CAUSE_PDT_MISCONFIGURED = 267,
+ RISCV_IOMMU_FQ_CAUSE_DDT_CORRUPTED = 268,
+ RISCV_IOMMU_FQ_CAUSE_PDT_CORRUPTED = 269,
+ RISCV_IOMMU_FQ_CAUSE_MSI_PT_CORRUPTED = 270,
+ RISCV_IOMMU_FQ_CAUSE_MRIF_CORRUIPTED = 271,
+ RISCV_IOMMU_FQ_CAUSE_INTERNAL_DP_ERROR = 272,
+ RISCV_IOMMU_FQ_CAUSE_MSI_WR_FAULT = 273,
+ RISCV_IOMMU_FQ_CAUSE_PT_CORRUPTED = 274
+};
+
+/* MSI page table pointer */
+#define RISCV_IOMMU_DC_MSIPTP_PPN RISCV_IOMMU_ATP_PPN_FIELD
+#define RISCV_IOMMU_DC_MSIPTP_MODE RISCV_IOMMU_ATP_MODE_FIELD
+#define RISCV_IOMMU_DC_MSIPTP_MODE_OFF 0
+#define RISCV_IOMMU_DC_MSIPTP_MODE_FLAT 1
+
+/* 2.2 Process Directory Table */
+#define RISCV_IOMMU_PDTE_VALID BIT_ULL(0)
+#define RISCV_IOMMU_PDTE_PPN RISCV_IOMMU_PPN_FIELD
+
+/* Translation attributes fields */
+#define RISCV_IOMMU_PC_TA_V BIT_ULL(0)
+#define RISCV_IOMMU_PC_TA_RESERVED GENMASK_ULL(63, 32)
+
+/* First stage context fields */
+#define RISCV_IOMMU_PC_FSC_PPN RISCV_IOMMU_ATP_PPN_FIELD
+#define RISCV_IOMMU_PC_FSC_RESERVED GENMASK_ULL(59, 44)
+
+enum riscv_iommu_fq_ttypes {
+ RISCV_IOMMU_FQ_TTYPE_NONE = 0,
+ RISCV_IOMMU_FQ_TTYPE_UADDR_INST_FETCH = 1,
+ RISCV_IOMMU_FQ_TTYPE_UADDR_RD = 2,
+ RISCV_IOMMU_FQ_TTYPE_UADDR_WR = 3,
+ RISCV_IOMMU_FQ_TTYPE_TADDR_INST_FETCH = 5,
+ RISCV_IOMMU_FQ_TTYPE_TADDR_RD = 6,
+ RISCV_IOMMU_FQ_TTYPE_TADDR_WR = 7,
+ RISCV_IOMMU_FQ_TTYPE_PCIE_ATS_REQ = 8,
+ RISCV_IOMMU_FW_TTYPE_PCIE_MSG_REQ = 9,
+};
+
+/*
+ * struct riscv_iommu_msi_pte - MSI Page Table Entry
+ */
+struct riscv_iommu_msi_pte {
+ uint64_t pte;
+ uint64_t mrif_info;
+};
+
+/* Fields on pte */
+#define RISCV_IOMMU_MSI_PTE_V BIT_ULL(0)
+#define RISCV_IOMMU_MSI_PTE_M GENMASK_ULL(2, 1)
+
+#define RISCV_IOMMU_MSI_PTE_M_MRIF 1
+#define RISCV_IOMMU_MSI_PTE_M_BASIC 3
+
+/* When M == 1 (MRIF mode) */
+#define RISCV_IOMMU_MSI_PTE_MRIF_ADDR GENMASK_ULL(53, 7)
+/* When M == 3 (basic mode) */
+#define RISCV_IOMMU_MSI_PTE_PPN RISCV_IOMMU_PPN_FIELD
+#define RISCV_IOMMU_MSI_PTE_C BIT_ULL(63)
+
+/* Fields on mrif_info */
+#define RISCV_IOMMU_MSI_MRIF_NID GENMASK_ULL(9, 0)
+#define RISCV_IOMMU_MSI_MRIF_NPPN RISCV_IOMMU_PPN_FIELD
+#define RISCV_IOMMU_MSI_MRIF_NID_MSB BIT_ULL(60)
+
+#endif /* _RISCV_IOMMU_BITS_H_ */
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
new file mode 100644
index 0000000..c5034bf
--- /dev/null
+++ b/hw/riscv/riscv-iommu-hpm.c
@@ -0,0 +1,381 @@
+/*
+ * RISC-V IOMMU - Hardware Performance Monitor (HPM) helpers
+ *
+ * Copyright (C) 2022-2023 Rivos Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/timer.h"
+#include "cpu_bits.h"
+#include "riscv-iommu-hpm.h"
+#include "riscv-iommu.h"
+#include "riscv-iommu-bits.h"
+#include "trace.h"
+
+/* For now we assume IOMMU HPM frequency to be 1GHz so 1-cycle is of 1-ns. */
+static inline uint64_t get_cycles(void)
+{
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+}
+
+uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s)
+{
+ const uint64_t cycle = riscv_iommu_reg_get64(
+ s, RISCV_IOMMU_REG_IOHPMCYCLES);
+ const uint32_t inhibit = riscv_iommu_reg_get32(
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
+ const uint64_t ctr_prev = s->hpmcycle_prev;
+ const uint64_t ctr_val = s->hpmcycle_val;
+
+ trace_riscv_iommu_hpm_read(cycle, inhibit, ctr_prev, ctr_val);
+
+ if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
+ /*
+ * Counter should not increment if inhibit bit is set. We can't really
+ * stop the QEMU_CLOCK_VIRTUAL, so we just return the last updated
+ * counter value to indicate that counter was not incremented.
+ */
+ return (ctr_val & RISCV_IOMMU_IOHPMCYCLES_COUNTER) |
+ (cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
+ }
+
+ return (ctr_val + get_cycles() - ctr_prev) |
+ (cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
+}
+
+static void hpm_incr_ctr(RISCVIOMMUState *s, uint32_t ctr_idx)
+{
+ const uint32_t off = ctr_idx << 3;
+ uint64_t cntr_val;
+
+ cntr_val = ldq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off]);
+ stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off], cntr_val + 1);
+
+ trace_riscv_iommu_hpm_incr_ctr(cntr_val);
+
+ /* Handle the overflow scenario. */
+ if (cntr_val == UINT64_MAX) {
+ /*
+ * Generate interrupt only if OF bit is clear. +1 to offset the cycle
+ * register OF bit.
+ */
+ const uint32_t ovf =
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
+ BIT(ctr_idx + 1), 0);
+ if (!get_field(ovf, BIT(ctr_idx + 1))) {
+ riscv_iommu_reg_mod64(s,
+ RISCV_IOMMU_REG_IOHPMEVT_BASE + off,
+ RISCV_IOMMU_IOHPMEVT_OF,
+ 0);
+ riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
+ }
+ }
+}
+
+void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
+ unsigned event_id)
+{
+ const uint32_t inhibit = riscv_iommu_reg_get32(
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
+ uint32_t did_gscid;
+ uint32_t pid_pscid;
+ uint32_t ctr_idx;
+ gpointer value;
+ uint32_t ctrs;
+ uint64_t evt;
+
+ if (!(s->cap & RISCV_IOMMU_CAP_HPM)) {
+ return;
+ }
+
+ value = g_hash_table_lookup(s->hpm_event_ctr_map,
+ GUINT_TO_POINTER(event_id));
+ if (value == NULL) {
+ return;
+ }
+
+ for (ctrs = GPOINTER_TO_UINT(value); ctrs != 0; ctrs &= ctrs - 1) {
+ ctr_idx = ctz32(ctrs);
+ if (get_field(inhibit, BIT(ctr_idx + 1))) {
+ continue;
+ }
+
+ evt = riscv_iommu_reg_get64(s,
+ RISCV_IOMMU_REG_IOHPMEVT_BASE + (ctr_idx << 3));
+
+ /*
+ * It's quite possible that event ID has been changed in counter
+ * but hashtable hasn't been updated yet. We don't want to increment
+ * counter for the old event ID.
+ */
+ if (event_id != get_field(evt, RISCV_IOMMU_IOHPMEVT_EVENT_ID)) {
+ continue;
+ }
+
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_IDT)) {
+ did_gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID);
+ pid_pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID);
+ } else {
+ did_gscid = ctx->devid;
+ pid_pscid = ctx->process_id;
+ }
+
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_PV_PSCV)) {
+ /*
+ * If the transaction does not have a valid process_id, counter
+ * increments if device_id matches DID_GSCID. If the transaction
+ * has a valid process_id, counter increments if device_id
+ * matches DID_GSCID and process_id matches PID_PSCID. See
+ * IOMMU Specification, Chapter 5.23. Performance-monitoring
+ * event selector.
+ */
+ if (ctx->process_id &&
+ get_field(evt, RISCV_IOMMU_IOHPMEVT_PID_PSCID) != pid_pscid) {
+ continue;
+ }
+ }
+
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DV_GSCV)) {
+ uint32_t mask = ~0;
+
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DMASK)) {
+ /*
+ * 1001 1011 mask = GSCID
+ * 0000 0111 mask = mask ^ (mask + 1)
+ * 1111 1000 mask = ~mask;
+ */
+ mask = get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID);
+ mask = mask ^ (mask + 1);
+ mask = ~mask;
+ }
+
+ if ((get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID) & mask) !=
+ (did_gscid & mask)) {
+ continue;
+ }
+ }
+
+ hpm_incr_ctr(s, ctr_idx);
+ }
+}
+
+/* Timer callback for cycle counter overflow. */
+void riscv_iommu_hpm_timer_cb(void *priv)
+{
+ RISCVIOMMUState *s = priv;
+ const uint32_t inhibit = riscv_iommu_reg_get32(
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
+ uint32_t ovf;
+
+ if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
+ return;
+ }
+
+ if (s->irq_overflow_left > 0) {
+ uint64_t irq_trigger_at =
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->irq_overflow_left;
+ timer_mod_anticipate_ns(s->hpm_timer, irq_trigger_at);
+ s->irq_overflow_left = 0;
+ return;
+ }
+
+ ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
+ if (!get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY)) {
+ /*
+ * We don't need to set hpmcycle_val to zero and update hpmcycle_prev to
+ * current clock value. The way we calculate iohpmcycs will overflow
+ * and return the correct value. This avoids the need to synchronize
+ * timer callback and write callback.
+ */
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
+ RISCV_IOMMU_IOCOUNTOVF_CY, 0);
+ riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_IOHPMCYCLES,
+ RISCV_IOMMU_IOHPMCYCLES_OVF, 0);
+ riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
+ }
+}
+
+static void hpm_setup_timer(RISCVIOMMUState *s, uint64_t value)
+{
+ const uint32_t inhibit = riscv_iommu_reg_get32(
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
+ uint64_t overflow_at, overflow_ns;
+
+ if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
+ return;
+ }
+
+ /*
+ * We are using INT64_MAX here instead to UINT64_MAX because cycle counter
+ * has 63-bit precision and INT64_MAX is the maximum it can store.
+ */
+ if (value) {
+ overflow_ns = INT64_MAX - value + 1;
+ } else {
+ overflow_ns = INT64_MAX;
+ }
+
+ overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + overflow_ns;
+
+ if (overflow_at > INT64_MAX) {
+ s->irq_overflow_left = overflow_at - INT64_MAX;
+ overflow_at = INT64_MAX;
+ }
+
+ timer_mod_anticipate_ns(s->hpm_timer, overflow_at);
+}
+
+/* Updates the internal cycle counter state when iocntinh:CY is changed. */
+void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh)
+{
+ const uint32_t inhibit = riscv_iommu_reg_get32(
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
+
+ /* We only need to process CY bit toggle. */
+ if (!(inhibit ^ prev_cy_inh)) {
+ return;
+ }
+
+ trace_riscv_iommu_hpm_iocntinh_cy(prev_cy_inh);
+
+ if (!(inhibit & RISCV_IOMMU_IOCOUNTINH_CY)) {
+ /*
+ * Cycle counter is enabled. Just start the timer again and update
+ * the clock snapshot value to point to the current time to make
+ * sure iohpmcycles read is correct.
+ */
+ s->hpmcycle_prev = get_cycles();
+ hpm_setup_timer(s, s->hpmcycle_val);
+ } else {
+ /*
+ * Cycle counter is disabled. Stop the timer and update the cycle
+ * counter to record the current value which is last programmed
+ * value + the cycles passed so far.
+ */
+ s->hpmcycle_val = s->hpmcycle_val + (get_cycles() - s->hpmcycle_prev);
+ timer_del(s->hpm_timer);
+ }
+}
+
+void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s)
+{
+ const uint64_t val = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_IOHPMCYCLES);
+ const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
+
+ trace_riscv_iommu_hpm_cycle_write(ovf, val);
+
+ /*
+ * Clear OF bit in IOCNTOVF if it's being cleared in IOHPMCYCLES register.
+ */
+ if (get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY) &&
+ !get_field(val, RISCV_IOMMU_IOHPMCYCLES_OVF)) {
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF, 0,
+ RISCV_IOMMU_IOCOUNTOVF_CY);
+ }
+
+ s->hpmcycle_val = val & ~RISCV_IOMMU_IOHPMCYCLES_OVF;
+ s->hpmcycle_prev = get_cycles();
+ hpm_setup_timer(s, s->hpmcycle_val);
+}
+
+static inline bool check_valid_event_id(unsigned event_id)
+{
+ return event_id > RISCV_IOMMU_HPMEVENT_INVALID &&
+ event_id < RISCV_IOMMU_HPMEVENT_MAX;
+}
+
+static gboolean hpm_event_equal(gpointer key, gpointer value, gpointer udata)
+{
+ uint32_t *pair = udata;
+
+ if (GPOINTER_TO_UINT(value) & (1 << pair[0])) {
+ pair[1] = GPOINTER_TO_UINT(key);
+ return true;
+ }
+
+ return false;
+}
+
+/* Caller must check ctr_idx against hpm_ctrs to see if its supported or not. */
+static void update_event_map(RISCVIOMMUState *s, uint64_t value,
+ uint32_t ctr_idx)
+{
+ unsigned event_id = get_field(value, RISCV_IOMMU_IOHPMEVT_EVENT_ID);
+ uint32_t pair[2] = { ctr_idx, RISCV_IOMMU_HPMEVENT_INVALID };
+ uint32_t new_value = 1 << ctr_idx;
+ gpointer data;
+
+ /*
+ * If EventID field is RISCV_IOMMU_HPMEVENT_INVALID
+ * remove the current mapping.
+ */
+ if (event_id == RISCV_IOMMU_HPMEVENT_INVALID) {
+ data = g_hash_table_find(s->hpm_event_ctr_map, hpm_event_equal, pair);
+
+ new_value = GPOINTER_TO_UINT(data) & ~(new_value);
+ if (new_value != 0) {
+ g_hash_table_replace(s->hpm_event_ctr_map,
+ GUINT_TO_POINTER(pair[1]),
+ GUINT_TO_POINTER(new_value));
+ } else {
+ g_hash_table_remove(s->hpm_event_ctr_map,
+ GUINT_TO_POINTER(pair[1]));
+ }
+
+ return;
+ }
+
+ /* Update the counter mask if the event is already enabled. */
+ if (g_hash_table_lookup_extended(s->hpm_event_ctr_map,
+ GUINT_TO_POINTER(event_id),
+ NULL,
+ &data)) {
+ new_value |= GPOINTER_TO_UINT(data);
+ }
+
+ g_hash_table_insert(s->hpm_event_ctr_map,
+ GUINT_TO_POINTER(event_id),
+ GUINT_TO_POINTER(new_value));
+}
+
+void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg)
+{
+ const uint32_t ctr_idx = (evt_reg - RISCV_IOMMU_REG_IOHPMEVT_BASE) >> 3;
+ const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
+ uint64_t val = riscv_iommu_reg_get64(s, evt_reg);
+
+ if (ctr_idx >= s->hpm_cntrs) {
+ return;
+ }
+
+ trace_riscv_iommu_hpm_evt_write(ctr_idx, ovf, val);
+
+ /* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMEVT register. */
+ if (get_field(ovf, BIT(ctr_idx + 1)) &&
+ !get_field(val, RISCV_IOMMU_IOHPMEVT_OF)) {
+ /* +1 to offset CYCLE register OF bit. */
+ riscv_iommu_reg_mod32(
+ s, RISCV_IOMMU_REG_IOCOUNTOVF, 0, BIT(ctr_idx + 1));
+ }
+
+ if (!check_valid_event_id(get_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID))) {
+ /* Reset EventID (WARL) field to invalid. */
+ val = set_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID,
+ RISCV_IOMMU_HPMEVENT_INVALID);
+ riscv_iommu_reg_set64(s, evt_reg, val);
+ }
+
+ update_event_map(s, val, ctr_idx);
+}
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
new file mode 100644
index 0000000..5fc4ef2
--- /dev/null
+++ b/hw/riscv/riscv-iommu-hpm.h
@@ -0,0 +1,33 @@
+/*
+ * RISC-V IOMMU - Hardware Performance Monitor (HPM) helpers
+ *
+ * Copyright (C) 2022-2023 Rivos Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_RISCV_IOMMU_HPM_H
+#define HW_RISCV_IOMMU_HPM_H
+
+#include "qom/object.h"
+#include "hw/riscv/riscv-iommu.h"
+
+uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s);
+void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
+ unsigned event_id);
+void riscv_iommu_hpm_timer_cb(void *priv);
+void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh);
+void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s);
+void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg);
+
+#endif
diff --git a/hw/riscv/riscv-iommu-pci.c b/hw/riscv/riscv-iommu-pci.c
new file mode 100644
index 0000000..cdb4a7a
--- /dev/null
+++ b/hw/riscv/riscv-iommu-pci.c
@@ -0,0 +1,217 @@
+/*
+ * QEMU emulation of an RISC-V IOMMU
+ *
+ * Copyright (C) 2022-2023 Rivos Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "exec/target_page.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
+#include "hw/pci/pci_bus.h"
+#include "hw/qdev-properties.h"
+#include "hw/riscv/riscv_hart.h"
+#include "migration/vmstate.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "qemu/host-utils.h"
+#include "qom/object.h"
+
+#include "cpu_bits.h"
+#include "riscv-iommu.h"
+#include "riscv-iommu-bits.h"
+#include "trace.h"
+
+/* RISC-V IOMMU PCI Device Emulation */
+#define RISCV_PCI_CLASS_SYSTEM_IOMMU 0x0806
+
+/*
+ * 4 MSIx vectors for ICVEC, one for MRIF. The spec mentions in
+ * the "Placement and data flow" section that:
+ *
+ * "The interfaces related to recording an incoming MSI in a memory-resident
+ * interrupt file (MRIF) are implementation-specific. The partitioning of
+ * responsibility between the IOMMU and the IO bridge for recording the
+ * incoming MSI in an MRIF and generating the associated notice MSI are
+ * implementation-specific."
+ *
+ * We're making a design decision to create the MSIx for MRIF in the
+ * IOMMU MSIx emulation.
+ */
+#define RISCV_IOMMU_PCI_MSIX_VECTORS 5
+
+/*
+ * 4 vectors that can be used by civ, fiv, pmiv and piv. Number of
+ * vectors is represented by 2^N, where N = number of writable bits
+ * in each cause. For 4 vectors we'll write 0b11 (3) in each reg.
+ */
+#define RISCV_IOMMU_PCI_ICVEC_VECTORS 0x3333
+
+typedef struct RISCVIOMMUStatePci {
+ PCIDevice pci; /* Parent PCIe device state */
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint8_t revision;
+ MemoryRegion bar0; /* PCI BAR (including MSI-x config) */
+ RISCVIOMMUState iommu; /* common IOMMU state */
+} RISCVIOMMUStatePci;
+
+/* interrupt delivery callback */
+static void riscv_iommu_pci_notify(RISCVIOMMUState *iommu, unsigned vector)
+{
+ RISCVIOMMUStatePci *s = container_of(iommu, RISCVIOMMUStatePci, iommu);
+
+ if (msix_enabled(&(s->pci))) {
+ msix_notify(&(s->pci), vector);
+ }
+}
+
+static void riscv_iommu_pci_realize(PCIDevice *dev, Error **errp)
+{
+ RISCVIOMMUStatePci *s = DO_UPCAST(RISCVIOMMUStatePci, pci, dev);
+ RISCVIOMMUState *iommu = &s->iommu;
+ uint8_t *pci_conf = dev->config;
+ Error *err = NULL;
+
+ pci_set_word(pci_conf + PCI_VENDOR_ID, s->vendor_id);
+ pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, s->vendor_id);
+ pci_set_word(pci_conf + PCI_DEVICE_ID, s->device_id);
+ pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, s->device_id);
+ pci_set_byte(pci_conf + PCI_REVISION_ID, s->revision);
+
+ /* Set device id for trace / debug */
+ DEVICE(iommu)->id = g_strdup_printf("%02x:%02x.%01x",
+ pci_dev_bus_num(dev), PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
+ qdev_realize(DEVICE(iommu), NULL, errp);
+
+ memory_region_init(&s->bar0, OBJECT(s), "riscv-iommu-bar0",
+ QEMU_ALIGN_UP(memory_region_size(&iommu->regs_mr), TARGET_PAGE_SIZE));
+ memory_region_add_subregion(&s->bar0, 0, &iommu->regs_mr);
+
+ pcie_endpoint_cap_init(dev, 0);
+
+ pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_TYPE_64, &s->bar0);
+
+ int ret = msix_init(dev, RISCV_IOMMU_PCI_MSIX_VECTORS,
+ &s->bar0, 0, RISCV_IOMMU_REG_MSI_CONFIG,
+ &s->bar0, 0, RISCV_IOMMU_REG_MSI_CONFIG + 256, 0, &err);
+
+ if (ret == -ENOTSUP) {
+ /*
+ * MSI-x is not supported by the platform.
+ * Driver should use timer/polling based notification handlers.
+ */
+ warn_report_err(err);
+ } else if (ret < 0) {
+ error_propagate(errp, err);
+ return;
+ } else {
+ /* Mark all ICVEC MSIx vectors as used */
+ for (int i = 0; i < RISCV_IOMMU_PCI_MSIX_VECTORS; i++) {
+ msix_vector_use(dev, i);
+ }
+
+ iommu->notify = riscv_iommu_pci_notify;
+ }
+
+ PCIBus *bus = pci_device_root_bus(dev);
+ if (!bus) {
+ error_setg(errp, "can't find PCIe root port for %02x:%02x.%x",
+ pci_bus_num(pci_get_bus(dev)), PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+ return;
+ }
+
+ riscv_iommu_pci_setup_iommu(iommu, bus, errp);
+}
+
+static void riscv_iommu_pci_exit(PCIDevice *pci_dev)
+{
+ pci_setup_iommu(pci_device_root_bus(pci_dev), NULL, NULL);
+}
+
+static const VMStateDescription riscv_iommu_vmstate = {
+ .name = "riscv-iommu",
+ .unmigratable = 1
+};
+
+static void riscv_iommu_pci_init(Object *obj)
+{
+ RISCVIOMMUStatePci *s = RISCV_IOMMU_PCI(obj);
+ RISCVIOMMUState *iommu = &s->iommu;
+
+ object_initialize_child(obj, "iommu", iommu, TYPE_RISCV_IOMMU);
+ qdev_alias_all_properties(DEVICE(iommu), obj);
+
+ iommu->icvec_avail_vectors = RISCV_IOMMU_PCI_ICVEC_VECTORS;
+ riscv_iommu_set_cap_igs(iommu, RISCV_IOMMU_CAP_IGS_MSI);
+}
+
+static const Property riscv_iommu_pci_properties[] = {
+ DEFINE_PROP_UINT16("vendor-id", RISCVIOMMUStatePci, vendor_id,
+ PCI_VENDOR_ID_REDHAT),
+ DEFINE_PROP_UINT16("device-id", RISCVIOMMUStatePci, device_id,
+ PCI_DEVICE_ID_REDHAT_RISCV_IOMMU),
+ DEFINE_PROP_UINT8("revision", RISCVIOMMUStatePci, revision, 0x01),
+};
+
+static void riscv_iommu_pci_reset_hold(Object *obj, ResetType type)
+{
+ RISCVIOMMUStatePci *pci = RISCV_IOMMU_PCI(obj);
+ RISCVIOMMUState *iommu = &pci->iommu;
+
+ riscv_iommu_reset(iommu);
+
+ trace_riscv_iommu_pci_reset_hold(type);
+}
+
+static void riscv_iommu_pci_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ rc->phases.hold = riscv_iommu_pci_reset_hold;
+
+ k->realize = riscv_iommu_pci_realize;
+ k->exit = riscv_iommu_pci_exit;
+ k->class_id = RISCV_PCI_CLASS_SYSTEM_IOMMU;
+ dc->desc = "RISCV-IOMMU DMA Remapping device";
+ dc->vmsd = &riscv_iommu_vmstate;
+ dc->hotpluggable = false;
+ dc->user_creatable = true;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ device_class_set_props(dc, riscv_iommu_pci_properties);
+}
+
+static const TypeInfo riscv_iommu_pci = {
+ .name = TYPE_RISCV_IOMMU_PCI,
+ .parent = TYPE_PCI_DEVICE,
+ .class_init = riscv_iommu_pci_class_init,
+ .instance_init = riscv_iommu_pci_init,
+ .instance_size = sizeof(RISCVIOMMUStatePci),
+ .interfaces = (const InterfaceInfo[]) {
+ { INTERFACE_PCIE_DEVICE },
+ { },
+ },
+};
+
+static void riscv_iommu_register_pci_types(void)
+{
+ type_register_static(&riscv_iommu_pci);
+}
+
+type_init(riscv_iommu_register_pci_types);
diff --git a/hw/riscv/riscv-iommu-sys.c b/hw/riscv/riscv-iommu-sys.c
new file mode 100644
index 0000000..e34d00a
--- /dev/null
+++ b/hw/riscv/riscv-iommu-sys.c
@@ -0,0 +1,248 @@
+/*
+ * QEMU emulation of an RISC-V IOMMU Platform Device
+ *
+ * Copyright (C) 2022-2023 Rivos Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/irq.h"
+#include "hw/pci/pci_bus.h"
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "qemu/host-utils.h"
+#include "qemu/module.h"
+#include "qom/object.h"
+#include "trace.h"
+
+#include "riscv-iommu.h"
+
+#define RISCV_IOMMU_SYSDEV_ICVEC_VECTORS 0x3333
+
+#define RISCV_IOMMU_PCI_MSIX_VECTORS 5
+
+/* RISC-V IOMMU System Platform Device Emulation */
+
+struct RISCVIOMMUStateSys {
+ SysBusDevice parent;
+ uint64_t addr;
+ uint32_t base_irq;
+ DeviceState *irqchip;
+ RISCVIOMMUState iommu;
+
+ /* Wired int support */
+ qemu_irq irqs[RISCV_IOMMU_INTR_COUNT];
+
+ /* Memory Regions for MSIX table and pending bit entries. */
+ MemoryRegion msix_table_mmio;
+ MemoryRegion msix_pba_mmio;
+ uint8_t *msix_table;
+ uint8_t *msix_pba;
+};
+
+static uint64_t msix_table_mmio_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ RISCVIOMMUStateSys *s = opaque;
+
+ g_assert(addr + size <= RISCV_IOMMU_PCI_MSIX_VECTORS * PCI_MSIX_ENTRY_SIZE);
+ return pci_get_long(s->msix_table + addr);
+}
+
+static void msix_table_mmio_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ RISCVIOMMUStateSys *s = opaque;
+
+ g_assert(addr + size <= RISCV_IOMMU_PCI_MSIX_VECTORS * PCI_MSIX_ENTRY_SIZE);
+ pci_set_long(s->msix_table + addr, val);
+}
+
+static const MemoryRegionOps msix_table_mmio_ops = {
+ .read = msix_table_mmio_read,
+ .write = msix_table_mmio_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .max_access_size = 4,
+ },
+};
+
+static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ RISCVIOMMUStateSys *s = opaque;
+
+ return pci_get_long(s->msix_pba + addr);
+}
+
+static void msix_pba_mmio_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+}
+
+static const MemoryRegionOps msix_pba_mmio_ops = {
+ .read = msix_pba_mmio_read,
+ .write = msix_pba_mmio_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .max_access_size = 4,
+ },
+};
+
+static void riscv_iommu_sysdev_init_msi(RISCVIOMMUStateSys *s,
+ uint32_t n_vectors)
+{
+ RISCVIOMMUState *iommu = &s->iommu;
+ uint32_t table_size = n_vectors * PCI_MSIX_ENTRY_SIZE;
+ uint32_t table_offset = RISCV_IOMMU_REG_MSI_CONFIG;
+ uint32_t pba_size = QEMU_ALIGN_UP(n_vectors, 64) / 8;
+ uint32_t pba_offset = RISCV_IOMMU_REG_MSI_CONFIG + 256;
+
+ s->msix_table = g_malloc0(table_size);
+ s->msix_pba = g_malloc0(pba_size);
+
+ memory_region_init_io(&s->msix_table_mmio, OBJECT(s), &msix_table_mmio_ops,
+ s, "msix-table", table_size);
+ memory_region_add_subregion(&iommu->regs_mr, table_offset,
+ &s->msix_table_mmio);
+
+ memory_region_init_io(&s->msix_pba_mmio, OBJECT(s), &msix_pba_mmio_ops, s,
+ "msix-pba", pba_size);
+ memory_region_add_subregion(&iommu->regs_mr, pba_offset,
+ &s->msix_pba_mmio);
+}
+
+static void riscv_iommu_sysdev_send_MSI(RISCVIOMMUStateSys *s,
+ uint32_t vector)
+{
+ uint8_t *table_entry = s->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
+ uint64_t msi_addr = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR);
+ uint32_t msi_data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA);
+ MemTxResult result;
+
+ address_space_stl_le(&address_space_memory, msi_addr,
+ msi_data, MEMTXATTRS_UNSPECIFIED, &result);
+ trace_riscv_iommu_sys_msi_sent(vector, msi_addr, msi_data, result);
+}
+
+static void riscv_iommu_sysdev_notify(RISCVIOMMUState *iommu,
+ unsigned vector)
+{
+ RISCVIOMMUStateSys *s = container_of(iommu, RISCVIOMMUStateSys, iommu);
+ uint32_t fctl = riscv_iommu_reg_get32(iommu, RISCV_IOMMU_REG_FCTL);
+
+ if (fctl & RISCV_IOMMU_FCTL_WSI) {
+ qemu_irq_pulse(s->irqs[vector]);
+ trace_riscv_iommu_sys_irq_sent(vector);
+ return;
+ }
+
+ riscv_iommu_sysdev_send_MSI(s, vector);
+}
+
+static void riscv_iommu_sys_realize(DeviceState *dev, Error **errp)
+{
+ RISCVIOMMUStateSys *s = RISCV_IOMMU_SYS(dev);
+ SysBusDevice *sysdev = SYS_BUS_DEVICE(s);
+ PCIBus *pci_bus;
+ qemu_irq irq;
+
+ qdev_realize(DEVICE(&s->iommu), NULL, errp);
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iommu.regs_mr);
+ if (s->addr) {
+ sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, s->addr);
+ }
+
+ pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL);
+ if (pci_bus) {
+ riscv_iommu_pci_setup_iommu(&s->iommu, pci_bus, errp);
+ }
+
+ s->iommu.notify = riscv_iommu_sysdev_notify;
+
+ /* 4 IRQs are defined starting from s->base_irq */
+ for (int i = 0; i < RISCV_IOMMU_INTR_COUNT; i++) {
+ sysbus_init_irq(sysdev, &s->irqs[i]);
+ irq = qdev_get_gpio_in(s->irqchip, s->base_irq + i);
+ sysbus_connect_irq(sysdev, i, irq);
+ }
+
+ riscv_iommu_sysdev_init_msi(s, RISCV_IOMMU_PCI_MSIX_VECTORS);
+}
+
+static void riscv_iommu_sys_init(Object *obj)
+{
+ RISCVIOMMUStateSys *s = RISCV_IOMMU_SYS(obj);
+ RISCVIOMMUState *iommu = &s->iommu;
+
+ object_initialize_child(obj, "iommu", iommu, TYPE_RISCV_IOMMU);
+ qdev_alias_all_properties(DEVICE(iommu), obj);
+
+ iommu->icvec_avail_vectors = RISCV_IOMMU_SYSDEV_ICVEC_VECTORS;
+ riscv_iommu_set_cap_igs(iommu, RISCV_IOMMU_CAP_IGS_BOTH);
+}
+
+static const Property riscv_iommu_sys_properties[] = {
+ DEFINE_PROP_UINT64("addr", RISCVIOMMUStateSys, addr, 0),
+ DEFINE_PROP_UINT32("base-irq", RISCVIOMMUStateSys, base_irq, 0),
+ DEFINE_PROP_LINK("irqchip", RISCVIOMMUStateSys, irqchip,
+ TYPE_DEVICE, DeviceState *),
+};
+
+static void riscv_iommu_sys_reset_hold(Object *obj, ResetType type)
+{
+ RISCVIOMMUStateSys *sys = RISCV_IOMMU_SYS(obj);
+ RISCVIOMMUState *iommu = &sys->iommu;
+
+ riscv_iommu_reset(iommu);
+
+ trace_riscv_iommu_sys_reset_hold(type);
+}
+
+static void riscv_iommu_sys_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ rc->phases.hold = riscv_iommu_sys_reset_hold;
+
+ dc->realize = riscv_iommu_sys_realize;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ device_class_set_props(dc, riscv_iommu_sys_properties);
+}
+
+static const TypeInfo riscv_iommu_sys = {
+ .name = TYPE_RISCV_IOMMU_SYS,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .class_init = riscv_iommu_sys_class_init,
+ .instance_init = riscv_iommu_sys_init,
+ .instance_size = sizeof(RISCVIOMMUStateSys),
+};
+
+static void riscv_iommu_register_sys(void)
+{
+ type_register_static(&riscv_iommu_sys);
+}
+
+type_init(riscv_iommu_register_sys)
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
new file mode 100644
index 0000000..a877e5d
--- /dev/null
+++ b/hw/riscv/riscv-iommu.c
@@ -0,0 +1,2679 @@
+/*
+ * QEMU emulation of an RISC-V IOMMU
+ *
+ * Copyright (C) 2021-2023, Rivos Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qom/object.h"
+#include "exec/target_page.h"
+#include "hw/pci/pci_bus.h"
+#include "hw/pci/pci_device.h"
+#include "hw/qdev-properties.h"
+#include "hw/riscv/riscv_hart.h"
+#include "migration/vmstate.h"
+#include "qapi/error.h"
+#include "qemu/timer.h"
+
+#include "cpu_bits.h"
+#include "riscv-iommu.h"
+#include "riscv-iommu-bits.h"
+#include "riscv-iommu-hpm.h"
+#include "trace.h"
+
+#define LIMIT_CACHE_CTX (1U << 7)
+#define LIMIT_CACHE_IOT (1U << 20)
+
+/* Physical page number coversions */
+#define PPN_PHYS(ppn) ((ppn) << TARGET_PAGE_BITS)
+#define PPN_DOWN(phy) ((phy) >> TARGET_PAGE_BITS)
+
+typedef struct RISCVIOMMUEntry RISCVIOMMUEntry;
+
+/* Device assigned I/O address space */
+struct RISCVIOMMUSpace {
+ IOMMUMemoryRegion iova_mr; /* IOVA memory region for attached device */
+ AddressSpace iova_as; /* IOVA address space for attached device */
+ RISCVIOMMUState *iommu; /* Managing IOMMU device state */
+ uint32_t devid; /* Requester identifier, AKA device_id */
+ bool notifier; /* IOMMU unmap notifier enabled */
+ QLIST_ENTRY(RISCVIOMMUSpace) list;
+};
+
+typedef enum RISCVIOMMUTransTag {
+ RISCV_IOMMU_TRANS_TAG_BY, /* Bypass */
+ RISCV_IOMMU_TRANS_TAG_SS, /* Single Stage */
+ RISCV_IOMMU_TRANS_TAG_VG, /* G-stage only */
+ RISCV_IOMMU_TRANS_TAG_VN, /* Nested translation */
+} RISCVIOMMUTransTag;
+
+/* Address translation cache entry */
+struct RISCVIOMMUEntry {
+ RISCVIOMMUTransTag tag; /* Translation Tag */
+ uint64_t iova:44; /* IOVA Page Number */
+ uint64_t pscid:20; /* Process Soft-Context identifier */
+ uint64_t phys:44; /* Physical Page Number */
+ uint64_t gscid:16; /* Guest Soft-Context identifier */
+ uint64_t perm:2; /* IOMMU_RW flags */
+};
+
+/* IOMMU index for transactions without process_id specified. */
+#define RISCV_IOMMU_NOPROCID 0
+
+static uint8_t riscv_iommu_get_icvec_vector(uint32_t icvec, uint32_t vec_type)
+{
+ switch (vec_type) {
+ case RISCV_IOMMU_INTR_CQ:
+ return icvec & RISCV_IOMMU_ICVEC_CIV;
+ case RISCV_IOMMU_INTR_FQ:
+ return (icvec & RISCV_IOMMU_ICVEC_FIV) >> 4;
+ case RISCV_IOMMU_INTR_PM:
+ return (icvec & RISCV_IOMMU_ICVEC_PMIV) >> 8;
+ case RISCV_IOMMU_INTR_PQ:
+ return (icvec & RISCV_IOMMU_ICVEC_PIV) >> 12;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type)
+{
+ uint32_t ipsr, icvec, vector;
+
+ if (!s->notify) {
+ return;
+ }
+
+ icvec = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_ICVEC);
+ ipsr = riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IPSR, (1 << vec_type), 0);
+
+ if (!(ipsr & (1 << vec_type))) {
+ vector = riscv_iommu_get_icvec_vector(icvec, vec_type);
+ s->notify(s, vector);
+ trace_riscv_iommu_notify_int_vector(vec_type, vector);
+ }
+}
+
+static void riscv_iommu_fault(RISCVIOMMUState *s,
+ struct riscv_iommu_fq_record *ev)
+{
+ uint32_t ctrl = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_FQCSR);
+ uint32_t head = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_FQH) & s->fq_mask;
+ uint32_t tail = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_FQT) & s->fq_mask;
+ uint32_t next = (tail + 1) & s->fq_mask;
+ uint32_t devid = get_field(ev->hdr, RISCV_IOMMU_FQ_HDR_DID);
+
+ trace_riscv_iommu_flt(s->parent_obj.id, PCI_BUS_NUM(devid), PCI_SLOT(devid),
+ PCI_FUNC(devid), ev->hdr, ev->iotval);
+
+ if (!(ctrl & RISCV_IOMMU_FQCSR_FQON) ||
+ !!(ctrl & (RISCV_IOMMU_FQCSR_FQOF | RISCV_IOMMU_FQCSR_FQMF))) {
+ return;
+ }
+
+ if (head == next) {
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_FQCSR,
+ RISCV_IOMMU_FQCSR_FQOF, 0);
+ } else {
+ dma_addr_t addr = s->fq_addr + tail * sizeof(*ev);
+ if (dma_memory_write(s->target_as, addr, ev, sizeof(*ev),
+ MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) {
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_FQCSR,
+ RISCV_IOMMU_FQCSR_FQMF, 0);
+ } else {
+ riscv_iommu_reg_set32(s, RISCV_IOMMU_REG_FQT, next);
+ }
+ }
+
+ if (ctrl & RISCV_IOMMU_FQCSR_FIE) {
+ riscv_iommu_notify(s, RISCV_IOMMU_INTR_FQ);
+ }
+}
+
+static void riscv_iommu_pri(RISCVIOMMUState *s,
+ struct riscv_iommu_pq_record *pr)
+{
+ uint32_t ctrl = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_PQCSR);
+ uint32_t head = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_PQH) & s->pq_mask;
+ uint32_t tail = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_PQT) & s->pq_mask;
+ uint32_t next = (tail + 1) & s->pq_mask;
+ uint32_t devid = get_field(pr->hdr, RISCV_IOMMU_PREQ_HDR_DID);
+
+ trace_riscv_iommu_pri(s->parent_obj.id, PCI_BUS_NUM(devid), PCI_SLOT(devid),
+ PCI_FUNC(devid), pr->payload);
+
+ if (!(ctrl & RISCV_IOMMU_PQCSR_PQON) ||
+ !!(ctrl & (RISCV_IOMMU_PQCSR_PQOF | RISCV_IOMMU_PQCSR_PQMF))) {
+ return;
+ }
+
+ if (head == next) {
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_PQCSR,
+ RISCV_IOMMU_PQCSR_PQOF, 0);
+ } else {
+ dma_addr_t addr = s->pq_addr + tail * sizeof(*pr);
+ if (dma_memory_write(s->target_as, addr, pr, sizeof(*pr),
+ MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) {
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_PQCSR,
+ RISCV_IOMMU_PQCSR_PQMF, 0);
+ } else {
+ riscv_iommu_reg_set32(s, RISCV_IOMMU_REG_PQT, next);
+ }
+ }
+
+ if (ctrl & RISCV_IOMMU_PQCSR_PIE) {
+ riscv_iommu_notify(s, RISCV_IOMMU_INTR_PQ);
+ }
+}
+
+/*
+ * Discards all bits from 'val' whose matching bits in the same
+ * positions in the mask 'ext' are zeros, and packs the remaining
+ * bits from 'val' contiguously at the least-significant end of the
+ * result, keeping the same bit order as 'val' and filling any
+ * other bits at the most-significant end of the result with zeros.
+ *
+ * For example, for the following 'val' and 'ext', the return 'ret'
+ * will be:
+ *
+ * val = a b c d e f g h
+ * ext = 1 0 1 0 0 1 1 0
+ * ret = 0 0 0 0 a c f g
+ *
+ * This function, taken from the riscv-iommu 1.0 spec, section 2.3.3
+ * "Process to translate addresses of MSIs", is similar to bit manip
+ * function PEXT (Parallel bits extract) from x86.
+ */
+static uint64_t riscv_iommu_pext_u64(uint64_t val, uint64_t ext)
+{
+ uint64_t ret = 0;
+ uint64_t rot = 1;
+
+ while (ext) {
+ if (ext & 1) {
+ if (val & 1) {
+ ret |= rot;
+ }
+ rot <<= 1;
+ }
+ val >>= 1;
+ ext >>= 1;
+ }
+
+ return ret;
+}
+
+/* Check if GPA matches MSI/MRIF pattern. */
+static bool riscv_iommu_msi_check(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
+ dma_addr_t gpa)
+{
+ if (!s->enable_msi) {
+ return false;
+ }
+
+ if (get_field(ctx->msiptp, RISCV_IOMMU_DC_MSIPTP_MODE) !=
+ RISCV_IOMMU_DC_MSIPTP_MODE_FLAT) {
+ return false; /* Invalid MSI/MRIF mode */
+ }
+
+ if ((PPN_DOWN(gpa) ^ ctx->msi_addr_pattern) & ~ctx->msi_addr_mask) {
+ return false; /* GPA not in MSI range defined by AIA IMSIC rules. */
+ }
+
+ return true;
+}
+
+/*
+ * RISCV IOMMU Address Translation Lookup - Page Table Walk
+ *
+ * Note: Code is based on get_physical_address() from target/riscv/cpu_helper.c
+ * Both implementation can be merged into single helper function in future.
+ * Keeping them separate for now, as error reporting and flow specifics are
+ * sufficiently different for separate implementation.
+ *
+ * @s : IOMMU Device State
+ * @ctx : Translation context for device id and process address space id.
+ * @iotlb : translation data: physical address and access mode.
+ * @return : success or fault cause code.
+ */
+static int riscv_iommu_spa_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
+ IOMMUTLBEntry *iotlb)
+{
+ dma_addr_t addr, base;
+ uint64_t satp, gatp, pte;
+ bool en_s, en_g;
+ struct {
+ unsigned char step;
+ unsigned char levels;
+ unsigned char ptidxbits;
+ unsigned char ptesize;
+ } sc[2];
+ /* Translation stage phase */
+ enum {
+ S_STAGE = 0,
+ G_STAGE = 1,
+ } pass;
+ MemTxResult ret;
+
+ satp = get_field(ctx->satp, RISCV_IOMMU_ATP_MODE_FIELD);
+ gatp = get_field(ctx->gatp, RISCV_IOMMU_ATP_MODE_FIELD);
+
+ en_s = satp != RISCV_IOMMU_DC_FSC_MODE_BARE;
+ en_g = gatp != RISCV_IOMMU_DC_IOHGATP_MODE_BARE;
+
+ /*
+ * Early check for MSI address match when IOVA == GPA.
+ * Note that the (!en_s) condition means that the MSI
+ * page table may only be used when guest pages are
+ * mapped using the g-stage page table, whether single-
+ * or two-stage paging is enabled. It's unavoidable though,
+ * because the spec mandates that we do a first-stage
+ * translation before we check the MSI page table, which
+ * means we can't do an early MSI check unless we have
+ * strictly !en_s.
+ */
+ if (!en_s && (iotlb->perm & IOMMU_WO) &&
+ riscv_iommu_msi_check(s, ctx, iotlb->iova)) {
+ iotlb->target_as = &s->trap_as;
+ iotlb->translated_addr = iotlb->iova;
+ iotlb->addr_mask = ~TARGET_PAGE_MASK;
+ return 0;
+ }
+
+ /* Exit early for pass-through mode. */
+ if (!(en_s || en_g)) {
+ iotlb->translated_addr = iotlb->iova;
+ iotlb->addr_mask = ~TARGET_PAGE_MASK;
+ /* Allow R/W in pass-through mode */
+ iotlb->perm = IOMMU_RW;
+ return 0;
+ }
+
+ /* S/G translation parameters. */
+ for (pass = 0; pass < 2; pass++) {
+ uint32_t sv_mode;
+
+ sc[pass].step = 0;
+ if (pass ? (s->fctl & RISCV_IOMMU_FCTL_GXL) :
+ (ctx->tc & RISCV_IOMMU_DC_TC_SXL)) {
+ /* 32bit mode for GXL/SXL == 1 */
+ switch (pass ? gatp : satp) {
+ case RISCV_IOMMU_DC_IOHGATP_MODE_BARE:
+ sc[pass].levels = 0;
+ sc[pass].ptidxbits = 0;
+ sc[pass].ptesize = 0;
+ break;
+ case RISCV_IOMMU_DC_IOHGATP_MODE_SV32X4:
+ sv_mode = pass ? RISCV_IOMMU_CAP_SV32X4 : RISCV_IOMMU_CAP_SV32;
+ if (!(s->cap & sv_mode)) {
+ return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED;
+ }
+ sc[pass].levels = 2;
+ sc[pass].ptidxbits = 10;
+ sc[pass].ptesize = 4;
+ break;
+ default:
+ return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED;
+ }
+ } else {
+ /* 64bit mode for GXL/SXL == 0 */
+ switch (pass ? gatp : satp) {
+ case RISCV_IOMMU_DC_IOHGATP_MODE_BARE:
+ sc[pass].levels = 0;
+ sc[pass].ptidxbits = 0;
+ sc[pass].ptesize = 0;
+ break;
+ case RISCV_IOMMU_DC_IOHGATP_MODE_SV39X4:
+ sv_mode = pass ? RISCV_IOMMU_CAP_SV39X4 : RISCV_IOMMU_CAP_SV39;
+ if (!(s->cap & sv_mode)) {
+ return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED;
+ }
+ sc[pass].levels = 3;
+ sc[pass].ptidxbits = 9;
+ sc[pass].ptesize = 8;
+ break;
+ case RISCV_IOMMU_DC_IOHGATP_MODE_SV48X4:
+ sv_mode = pass ? RISCV_IOMMU_CAP_SV48X4 : RISCV_IOMMU_CAP_SV48;
+ if (!(s->cap & sv_mode)) {
+ return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED;
+ }
+ sc[pass].levels = 4;
+ sc[pass].ptidxbits = 9;
+ sc[pass].ptesize = 8;
+ break;
+ case RISCV_IOMMU_DC_IOHGATP_MODE_SV57X4:
+ sv_mode = pass ? RISCV_IOMMU_CAP_SV57X4 : RISCV_IOMMU_CAP_SV57;
+ if (!(s->cap & sv_mode)) {
+ return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED;
+ }
+ sc[pass].levels = 5;
+ sc[pass].ptidxbits = 9;
+ sc[pass].ptesize = 8;
+ break;
+ default:
+ return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED;
+ }
+ }
+ };
+
+ /* S/G stages translation tables root pointers */
+ gatp = PPN_PHYS(get_field(ctx->gatp, RISCV_IOMMU_ATP_PPN_FIELD));
+ satp = PPN_PHYS(get_field(ctx->satp, RISCV_IOMMU_ATP_PPN_FIELD));
+ addr = (en_s && en_g) ? satp : iotlb->iova;
+ base = en_g ? gatp : satp;
+ pass = en_g ? G_STAGE : S_STAGE;
+
+ do {
+ const unsigned widened = (pass && !sc[pass].step) ? 2 : 0;
+ const unsigned va_bits = widened + sc[pass].ptidxbits;
+ const unsigned va_skip = TARGET_PAGE_BITS + sc[pass].ptidxbits *
+ (sc[pass].levels - 1 - sc[pass].step);
+ const unsigned idx = (addr >> va_skip) & ((1 << va_bits) - 1);
+ const dma_addr_t pte_addr = base + idx * sc[pass].ptesize;
+ const bool ade =
+ ctx->tc & (pass ? RISCV_IOMMU_DC_TC_GADE : RISCV_IOMMU_DC_TC_SADE);
+
+ /* Address range check before first level lookup */
+ if (!sc[pass].step) {
+ const uint64_t va_len = va_skip + va_bits;
+ const uint64_t va_mask = (1ULL << va_len) - 1;
+
+ if (pass == S_STAGE && va_len > 32) {
+ target_ulong mask, masked_msbs;
+
+ mask = (1L << (TARGET_LONG_BITS - (va_len - 1))) - 1;
+ masked_msbs = (addr >> (va_len - 1)) & mask;
+
+ if (masked_msbs != 0 && masked_msbs != mask) {
+ return (iotlb->perm & IOMMU_WO) ?
+ RISCV_IOMMU_FQ_CAUSE_WR_FAULT_S :
+ RISCV_IOMMU_FQ_CAUSE_RD_FAULT_S;
+ }
+ } else {
+ if ((addr & va_mask) != addr) {
+ return (iotlb->perm & IOMMU_WO) ?
+ RISCV_IOMMU_FQ_CAUSE_WR_FAULT_VS :
+ RISCV_IOMMU_FQ_CAUSE_RD_FAULT_VS;
+ }
+ }
+ }
+
+
+ if (pass == S_STAGE) {
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_S_VS_WALKS);
+ } else {
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_G_WALKS);
+ }
+
+ /* Read page table entry */
+ if (sc[pass].ptesize == 4) {
+ uint32_t pte32 = 0;
+ ret = ldl_le_dma(s->target_as, pte_addr, &pte32,
+ MEMTXATTRS_UNSPECIFIED);
+ pte = pte32;
+ } else {
+ ret = ldq_le_dma(s->target_as, pte_addr, &pte,
+ MEMTXATTRS_UNSPECIFIED);
+ }
+ if (ret != MEMTX_OK) {
+ return (iotlb->perm & IOMMU_WO) ? RISCV_IOMMU_FQ_CAUSE_WR_FAULT
+ : RISCV_IOMMU_FQ_CAUSE_RD_FAULT;
+ }
+
+ sc[pass].step++;
+ hwaddr ppn = pte >> PTE_PPN_SHIFT;
+
+ if (!(pte & PTE_V)) {
+ break; /* Invalid PTE */
+ } else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
+ base = PPN_PHYS(ppn); /* Inner PTE, continue walking */
+ } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
+ break; /* Reserved leaf PTE flags: PTE_W */
+ } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
+ break; /* Reserved leaf PTE flags: PTE_W + PTE_X */
+ } else if (ppn & ((1ULL << (va_skip - TARGET_PAGE_BITS)) - 1)) {
+ break; /* Misaligned PPN */
+ } else if ((iotlb->perm & IOMMU_RO) && !(pte & PTE_R)) {
+ break; /* Read access check failed */
+ } else if ((iotlb->perm & IOMMU_WO) && !(pte & PTE_W)) {
+ break; /* Write access check failed */
+ } else if ((iotlb->perm & IOMMU_RO) && !ade && !(pte & PTE_A)) {
+ break; /* Access bit not set */
+ } else if ((iotlb->perm & IOMMU_WO) && !ade && !(pte & PTE_D)) {
+ break; /* Dirty bit not set */
+ } else {
+ /* Leaf PTE, translation completed. */
+ sc[pass].step = sc[pass].levels;
+ base = PPN_PHYS(ppn) | (addr & ((1ULL << va_skip) - 1));
+ /* Update address mask based on smallest translation granularity */
+ iotlb->addr_mask &= (1ULL << va_skip) - 1;
+ /* Continue with S-Stage translation? */
+ if (pass && sc[0].step != sc[0].levels) {
+ pass = S_STAGE;
+ addr = iotlb->iova;
+ continue;
+ }
+ /* Translation phase completed (GPA or SPA) */
+ iotlb->translated_addr = base;
+ iotlb->perm = (pte & PTE_W) ? ((pte & PTE_R) ? IOMMU_RW : IOMMU_WO)
+ : IOMMU_RO;
+
+ /* Check MSI GPA address match */
+ if (pass == S_STAGE && (iotlb->perm & IOMMU_WO) &&
+ riscv_iommu_msi_check(s, ctx, base)) {
+ /* Trap MSI writes and return GPA address. */
+ iotlb->target_as = &s->trap_as;
+ iotlb->addr_mask = ~TARGET_PAGE_MASK;
+ return 0;
+ }
+
+ /* Continue with G-Stage translation? */
+ if (!pass && en_g) {
+ pass = G_STAGE;
+ addr = base;
+ base = gatp;
+ sc[pass].step = 0;
+ continue;
+ }
+
+ return 0;
+ }
+
+ if (sc[pass].step == sc[pass].levels) {
+ break; /* Can't find leaf PTE */
+ }
+
+ /* Continue with G-Stage translation? */
+ if (!pass && en_g) {
+ pass = G_STAGE;
+ addr = base;
+ base = gatp;
+ sc[pass].step = 0;
+ }
+ } while (1);
+
+ return (iotlb->perm & IOMMU_WO) ?
+ (pass ? RISCV_IOMMU_FQ_CAUSE_WR_FAULT_VS :
+ RISCV_IOMMU_FQ_CAUSE_WR_FAULT_S) :
+ (pass ? RISCV_IOMMU_FQ_CAUSE_RD_FAULT_VS :
+ RISCV_IOMMU_FQ_CAUSE_RD_FAULT_S);
+}
+
+static void riscv_iommu_report_fault(RISCVIOMMUState *s,
+ RISCVIOMMUContext *ctx,
+ uint32_t fault_type, uint32_t cause,
+ bool pv,
+ uint64_t iotval, uint64_t iotval2)
+{
+ struct riscv_iommu_fq_record ev = { 0 };
+
+ if (ctx->tc & RISCV_IOMMU_DC_TC_DTF) {
+ switch (cause) {
+ case RISCV_IOMMU_FQ_CAUSE_DMA_DISABLED:
+ case RISCV_IOMMU_FQ_CAUSE_DDT_LOAD_FAULT:
+ case RISCV_IOMMU_FQ_CAUSE_DDT_INVALID:
+ case RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED:
+ case RISCV_IOMMU_FQ_CAUSE_DDT_CORRUPTED:
+ case RISCV_IOMMU_FQ_CAUSE_INTERNAL_DP_ERROR:
+ case RISCV_IOMMU_FQ_CAUSE_MSI_WR_FAULT:
+ break;
+ default:
+ /* DTF prevents reporting a fault for this given cause */
+ return;
+ }
+ }
+
+ ev.hdr = set_field(ev.hdr, RISCV_IOMMU_FQ_HDR_CAUSE, cause);
+ ev.hdr = set_field(ev.hdr, RISCV_IOMMU_FQ_HDR_TTYPE, fault_type);
+ ev.hdr = set_field(ev.hdr, RISCV_IOMMU_FQ_HDR_DID, ctx->devid);
+ ev.hdr = set_field(ev.hdr, RISCV_IOMMU_FQ_HDR_PV, true);
+
+ if (pv) {
+ ev.hdr = set_field(ev.hdr, RISCV_IOMMU_FQ_HDR_PID, ctx->process_id);
+ }
+
+ ev.iotval = iotval;
+ ev.iotval2 = iotval2;
+
+ riscv_iommu_fault(s, &ev);
+}
+
+/* Redirect MSI write for given GPA. */
+static MemTxResult riscv_iommu_msi_write(RISCVIOMMUState *s,
+ RISCVIOMMUContext *ctx, uint64_t gpa, uint64_t data,
+ unsigned size, MemTxAttrs attrs)
+{
+ MemTxResult res;
+ dma_addr_t addr;
+ uint64_t intn;
+ uint32_t n190;
+ uint64_t pte[2];
+ int fault_type = RISCV_IOMMU_FQ_TTYPE_UADDR_WR;
+ int cause;
+
+ /* Interrupt File Number */
+ intn = riscv_iommu_pext_u64(PPN_DOWN(gpa), ctx->msi_addr_mask);
+ if (intn >= 256) {
+ /* Interrupt file number out of range */
+ res = MEMTX_ACCESS_ERROR;
+ cause = RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT;
+ goto err;
+ }
+
+ /* fetch MSI PTE */
+ addr = PPN_PHYS(get_field(ctx->msiptp, RISCV_IOMMU_DC_MSIPTP_PPN));
+ addr = addr | (intn * sizeof(pte));
+ res = dma_memory_read(s->target_as, addr, &pte, sizeof(pte),
+ MEMTXATTRS_UNSPECIFIED);
+ if (res != MEMTX_OK) {
+ if (res == MEMTX_DECODE_ERROR) {
+ cause = RISCV_IOMMU_FQ_CAUSE_MSI_PT_CORRUPTED;
+ } else {
+ cause = RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT;
+ }
+ goto err;
+ }
+
+ le64_to_cpus(&pte[0]);
+ le64_to_cpus(&pte[1]);
+
+ if (!(pte[0] & RISCV_IOMMU_MSI_PTE_V) || (pte[0] & RISCV_IOMMU_MSI_PTE_C)) {
+ /*
+ * The spec mentions that: "If msipte.C == 1, then further
+ * processing to interpret the PTE is implementation
+ * defined.". We'll abort with cause = 262 for this
+ * case too.
+ */
+ res = MEMTX_ACCESS_ERROR;
+ cause = RISCV_IOMMU_FQ_CAUSE_MSI_INVALID;
+ goto err;
+ }
+
+ switch (get_field(pte[0], RISCV_IOMMU_MSI_PTE_M)) {
+ case RISCV_IOMMU_MSI_PTE_M_BASIC:
+ /* MSI Pass-through mode */
+ addr = PPN_PHYS(get_field(pte[0], RISCV_IOMMU_MSI_PTE_PPN));
+
+ trace_riscv_iommu_msi(s->parent_obj.id, PCI_BUS_NUM(ctx->devid),
+ PCI_SLOT(ctx->devid), PCI_FUNC(ctx->devid),
+ gpa, addr);
+
+ res = dma_memory_write(s->target_as, addr, &data, size, attrs);
+ if (res != MEMTX_OK) {
+ cause = RISCV_IOMMU_FQ_CAUSE_MSI_WR_FAULT;
+ goto err;
+ }
+
+ return MEMTX_OK;
+ case RISCV_IOMMU_MSI_PTE_M_MRIF:
+ /* MRIF mode, continue. */
+ break;
+ default:
+ res = MEMTX_ACCESS_ERROR;
+ cause = RISCV_IOMMU_FQ_CAUSE_MSI_MISCONFIGURED;
+ goto err;
+ }
+
+ /*
+ * Report an error for interrupt identities exceeding the maximum allowed
+ * for an IMSIC interrupt file (2047) or destination address is not 32-bit
+ * aligned. See IOMMU Specification, Chapter 2.3. MSI page tables.
+ */
+ if ((data > 2047) || (gpa & 3)) {
+ res = MEMTX_ACCESS_ERROR;
+ cause = RISCV_IOMMU_FQ_CAUSE_MSI_MISCONFIGURED;
+ goto err;
+ }
+
+ /* MSI MRIF mode, non atomic pending bit update */
+
+ /* MRIF pending bit address */
+ addr = get_field(pte[0], RISCV_IOMMU_MSI_PTE_MRIF_ADDR) << 9;
+ addr = addr | ((data & 0x7c0) >> 3);
+
+ trace_riscv_iommu_msi(s->parent_obj.id, PCI_BUS_NUM(ctx->devid),
+ PCI_SLOT(ctx->devid), PCI_FUNC(ctx->devid),
+ gpa, addr);
+
+ /* MRIF pending bit mask */
+ data = 1ULL << (data & 0x03f);
+ res = dma_memory_read(s->target_as, addr, &intn, sizeof(intn), attrs);
+ if (res != MEMTX_OK) {
+ cause = RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT;
+ goto err;
+ }
+
+ intn = intn | data;
+ res = dma_memory_write(s->target_as, addr, &intn, sizeof(intn), attrs);
+ if (res != MEMTX_OK) {
+ cause = RISCV_IOMMU_FQ_CAUSE_MSI_WR_FAULT;
+ goto err;
+ }
+
+ /* Get MRIF enable bits */
+ addr = addr + sizeof(intn);
+ res = dma_memory_read(s->target_as, addr, &intn, sizeof(intn), attrs);
+ if (res != MEMTX_OK) {
+ cause = RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT;
+ goto err;
+ }
+
+ if (!(intn & data)) {
+ /* notification disabled, MRIF update completed. */
+ return MEMTX_OK;
+ }
+
+ /* Send notification message */
+ addr = PPN_PHYS(get_field(pte[1], RISCV_IOMMU_MSI_MRIF_NPPN));
+ n190 = get_field(pte[1], RISCV_IOMMU_MSI_MRIF_NID) |
+ (get_field(pte[1], RISCV_IOMMU_MSI_MRIF_NID_MSB) << 10);
+
+ res = dma_memory_write(s->target_as, addr, &n190, sizeof(n190), attrs);
+ if (res != MEMTX_OK) {
+ cause = RISCV_IOMMU_FQ_CAUSE_MSI_WR_FAULT;
+ goto err;
+ }
+
+ trace_riscv_iommu_mrif_notification(s->parent_obj.id, n190, addr);
+
+ return MEMTX_OK;
+
+err:
+ riscv_iommu_report_fault(s, ctx, fault_type, cause,
+ !!ctx->process_id, 0, 0);
+ return res;
+}
+
+/*
+ * Check device context configuration as described by the
+ * riscv-iommu spec section "Device-context configuration
+ * checks".
+ */
+static bool riscv_iommu_validate_device_ctx(RISCVIOMMUState *s,
+ RISCVIOMMUContext *ctx)
+{
+ uint32_t fsc_mode, msi_mode;
+ uint64_t gatp;
+
+ if (!(s->cap & RISCV_IOMMU_CAP_ATS) &&
+ (ctx->tc & RISCV_IOMMU_DC_TC_EN_ATS ||
+ ctx->tc & RISCV_IOMMU_DC_TC_EN_PRI ||
+ ctx->tc & RISCV_IOMMU_DC_TC_PRPR)) {
+ return false;
+ }
+
+ if (!(ctx->tc & RISCV_IOMMU_DC_TC_EN_ATS) &&
+ (ctx->tc & RISCV_IOMMU_DC_TC_T2GPA ||
+ ctx->tc & RISCV_IOMMU_DC_TC_EN_PRI)) {
+ return false;
+ }
+
+ if (!(ctx->tc & RISCV_IOMMU_DC_TC_EN_PRI) &&
+ ctx->tc & RISCV_IOMMU_DC_TC_PRPR) {
+ return false;
+ }
+
+ if (!(s->cap & RISCV_IOMMU_CAP_T2GPA) &&
+ ctx->tc & RISCV_IOMMU_DC_TC_T2GPA) {
+ return false;
+ }
+
+ if (s->cap & RISCV_IOMMU_CAP_MSI_FLAT) {
+ msi_mode = get_field(ctx->msiptp, RISCV_IOMMU_DC_MSIPTP_MODE);
+
+ if (msi_mode != RISCV_IOMMU_DC_MSIPTP_MODE_OFF &&
+ msi_mode != RISCV_IOMMU_DC_MSIPTP_MODE_FLAT) {
+ return false;
+ }
+ }
+
+ gatp = get_field(ctx->gatp, RISCV_IOMMU_ATP_MODE_FIELD);
+ if (ctx->tc & RISCV_IOMMU_DC_TC_T2GPA &&
+ gatp == RISCV_IOMMU_DC_IOHGATP_MODE_BARE) {
+ return false;
+ }
+
+ fsc_mode = get_field(ctx->satp, RISCV_IOMMU_DC_FSC_MODE);
+
+ if (ctx->tc & RISCV_IOMMU_DC_TC_PDTV) {
+ switch (fsc_mode) {
+ case RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8:
+ if (!(s->cap & RISCV_IOMMU_CAP_PD8)) {
+ return false;
+ }
+ break;
+ case RISCV_IOMMU_DC_FSC_PDTP_MODE_PD17:
+ if (!(s->cap & RISCV_IOMMU_CAP_PD17)) {
+ return false;
+ }
+ break;
+ case RISCV_IOMMU_DC_FSC_PDTP_MODE_PD20:
+ if (!(s->cap & RISCV_IOMMU_CAP_PD20)) {
+ return false;
+ }
+ break;
+ }
+ } else {
+ /* DC.tc.PDTV is 0 */
+ if (ctx->tc & RISCV_IOMMU_DC_TC_DPE) {
+ return false;
+ }
+
+ if (ctx->tc & RISCV_IOMMU_DC_TC_SXL) {
+ if (fsc_mode == RISCV_IOMMU_CAP_SV32 &&
+ !(s->cap & RISCV_IOMMU_CAP_SV32)) {
+ return false;
+ }
+ } else {
+ switch (fsc_mode) {
+ case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39:
+ if (!(s->cap & RISCV_IOMMU_CAP_SV39)) {
+ return false;
+ }
+ break;
+ case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48:
+ if (!(s->cap & RISCV_IOMMU_CAP_SV48)) {
+ return false;
+ }
+ break;
+ case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57:
+ if (!(s->cap & RISCV_IOMMU_CAP_SV57)) {
+ return false;
+ }
+ break;
+ }
+ }
+ }
+
+ /*
+ * CAP_END is always zero (only one endianess). FCTL_BE is
+ * always zero (little-endian accesses). Thus TC_SBE must
+ * always be LE, i.e. zero.
+ */
+ if (ctx->tc & RISCV_IOMMU_DC_TC_SBE) {
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Validate process context (PC) according to section
+ * "Process-context configuration checks".
+ */
+static bool riscv_iommu_validate_process_ctx(RISCVIOMMUState *s,
+ RISCVIOMMUContext *ctx)
+{
+ uint32_t mode;
+
+ if (get_field(ctx->ta, RISCV_IOMMU_PC_TA_RESERVED)) {
+ return false;
+ }
+
+ if (get_field(ctx->satp, RISCV_IOMMU_PC_FSC_RESERVED)) {
+ return false;
+ }
+
+ mode = get_field(ctx->satp, RISCV_IOMMU_DC_FSC_MODE);
+ switch (mode) {
+ case RISCV_IOMMU_DC_FSC_MODE_BARE:
+ /* sv39 and sv32 modes have the same value (8) */
+ case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39:
+ case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48:
+ case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57:
+ break;
+ default:
+ return false;
+ }
+
+ if (ctx->tc & RISCV_IOMMU_DC_TC_SXL) {
+ if (mode == RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV32 &&
+ !(s->cap & RISCV_IOMMU_CAP_SV32)) {
+ return false;
+ }
+ } else {
+ switch (mode) {
+ case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39:
+ if (!(s->cap & RISCV_IOMMU_CAP_SV39)) {
+ return false;
+ }
+ break;
+ case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48:
+ if (!(s->cap & RISCV_IOMMU_CAP_SV48)) {
+ return false;
+ }
+ break;
+ case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57:
+ if (!(s->cap & RISCV_IOMMU_CAP_SV57)) {
+ return false;
+ }
+ break;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * RISC-V IOMMU Device Context Loopkup - Device Directory Tree Walk
+ *
+ * @s : IOMMU Device State
+ * @ctx : Device Translation Context with devid and process_id set.
+ * @return : success or fault code.
+ */
+static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
+{
+ const uint64_t ddtp = s->ddtp;
+ unsigned mode = get_field(ddtp, RISCV_IOMMU_DDTP_MODE);
+ dma_addr_t addr = PPN_PHYS(get_field(ddtp, RISCV_IOMMU_DDTP_PPN));
+ struct riscv_iommu_dc dc;
+ /* Device Context format: 0: extended (64 bytes) | 1: base (32 bytes) */
+ const int dc_fmt = !s->enable_msi;
+ const size_t dc_len = sizeof(dc) >> dc_fmt;
+ int depth;
+ uint64_t de;
+
+ switch (mode) {
+ case RISCV_IOMMU_DDTP_MODE_OFF:
+ return RISCV_IOMMU_FQ_CAUSE_DMA_DISABLED;
+
+ case RISCV_IOMMU_DDTP_MODE_BARE:
+ /* mock up pass-through translation context */
+ ctx->gatp = set_field(0, RISCV_IOMMU_ATP_MODE_FIELD,
+ RISCV_IOMMU_DC_IOHGATP_MODE_BARE);
+ ctx->satp = set_field(0, RISCV_IOMMU_ATP_MODE_FIELD,
+ RISCV_IOMMU_DC_FSC_MODE_BARE);
+
+ ctx->tc = RISCV_IOMMU_DC_TC_V;
+ if (s->enable_ats) {
+ ctx->tc |= RISCV_IOMMU_DC_TC_EN_ATS;
+ }
+
+ ctx->ta = 0;
+ ctx->msiptp = 0;
+ return 0;
+
+ case RISCV_IOMMU_DDTP_MODE_1LVL:
+ depth = 0;
+ break;
+
+ case RISCV_IOMMU_DDTP_MODE_2LVL:
+ depth = 1;
+ break;
+
+ case RISCV_IOMMU_DDTP_MODE_3LVL:
+ depth = 2;
+ break;
+
+ default:
+ return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED;
+ }
+
+ /*
+ * Check supported device id width (in bits).
+ * See IOMMU Specification, Chapter 6. Software guidelines.
+ * - if extended device-context format is used:
+ * 1LVL: 6, 2LVL: 15, 3LVL: 24
+ * - if base device-context format is used:
+ * 1LVL: 7, 2LVL: 16, 3LVL: 24
+ */
+ if (ctx->devid >= (1 << (depth * 9 + 6 + (dc_fmt && depth != 2)))) {
+ return RISCV_IOMMU_FQ_CAUSE_TTYPE_BLOCKED;
+ }
+
+ /* Device directory tree walk */
+ for (; depth-- > 0; ) {
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_DD_WALK);
+ /*
+ * Select device id index bits based on device directory tree level
+ * and device context format.
+ * See IOMMU Specification, Chapter 2. Data Structures.
+ * - if extended device-context format is used:
+ * device index: [23:15][14:6][5:0]
+ * - if base device-context format is used:
+ * device index: [23:16][15:7][6:0]
+ */
+ const int split = depth * 9 + 6 + dc_fmt;
+ addr |= ((ctx->devid >> split) << 3) & ~TARGET_PAGE_MASK;
+ if (dma_memory_read(s->target_as, addr, &de, sizeof(de),
+ MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) {
+ return RISCV_IOMMU_FQ_CAUSE_DDT_LOAD_FAULT;
+ }
+ le64_to_cpus(&de);
+ if (!(de & RISCV_IOMMU_DDTE_VALID)) {
+ /* invalid directory entry */
+ return RISCV_IOMMU_FQ_CAUSE_DDT_INVALID;
+ }
+ if (de & ~(RISCV_IOMMU_DDTE_PPN | RISCV_IOMMU_DDTE_VALID)) {
+ /* reserved bits set */
+ return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED;
+ }
+ addr = PPN_PHYS(get_field(de, RISCV_IOMMU_DDTE_PPN));
+ }
+
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_DD_WALK);
+
+ /* index into device context entry page */
+ addr |= (ctx->devid * dc_len) & ~TARGET_PAGE_MASK;
+
+ memset(&dc, 0, sizeof(dc));
+ if (dma_memory_read(s->target_as, addr, &dc, dc_len,
+ MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) {
+ return RISCV_IOMMU_FQ_CAUSE_DDT_LOAD_FAULT;
+ }
+
+ /* Set translation context. */
+ ctx->tc = le64_to_cpu(dc.tc);
+ ctx->gatp = le64_to_cpu(dc.iohgatp);
+ ctx->satp = le64_to_cpu(dc.fsc);
+ ctx->ta = le64_to_cpu(dc.ta);
+ ctx->msiptp = le64_to_cpu(dc.msiptp);
+ ctx->msi_addr_mask = le64_to_cpu(dc.msi_addr_mask);
+ ctx->msi_addr_pattern = le64_to_cpu(dc.msi_addr_pattern);
+
+ if (!(ctx->tc & RISCV_IOMMU_DC_TC_V)) {
+ return RISCV_IOMMU_FQ_CAUSE_DDT_INVALID;
+ }
+
+ if (!riscv_iommu_validate_device_ctx(s, ctx)) {
+ return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED;
+ }
+
+ /* FSC field checks */
+ mode = get_field(ctx->satp, RISCV_IOMMU_DC_FSC_MODE);
+ addr = PPN_PHYS(get_field(ctx->satp, RISCV_IOMMU_DC_FSC_PPN));
+
+ if (!(ctx->tc & RISCV_IOMMU_DC_TC_PDTV)) {
+ if (ctx->process_id != RISCV_IOMMU_NOPROCID) {
+ /* PID is disabled */
+ return RISCV_IOMMU_FQ_CAUSE_TTYPE_BLOCKED;
+ }
+ if (mode > RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57) {
+ /* Invalid translation mode */
+ return RISCV_IOMMU_FQ_CAUSE_DDT_INVALID;
+ }
+ return 0;
+ }
+
+ if (ctx->process_id == RISCV_IOMMU_NOPROCID) {
+ if (!(ctx->tc & RISCV_IOMMU_DC_TC_DPE)) {
+ /* No default process_id enabled, set BARE mode */
+ ctx->satp = 0ULL;
+ return 0;
+ } else {
+ /* Use default process_id #0 */
+ ctx->process_id = 0;
+ }
+ }
+
+ if (mode == RISCV_IOMMU_DC_FSC_MODE_BARE) {
+ /* No S-Stage translation, done. */
+ return 0;
+ }
+
+ /* FSC.TC.PDTV enabled */
+ if (mode > RISCV_IOMMU_DC_FSC_PDTP_MODE_PD20) {
+ /* Invalid PDTP.MODE */
+ return RISCV_IOMMU_FQ_CAUSE_PDT_MISCONFIGURED;
+ }
+
+ for (depth = mode - RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8; depth-- > 0; ) {
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK);
+
+ /*
+ * Select process id index bits based on process directory tree
+ * level. See IOMMU Specification, 2.2. Process-Directory-Table.
+ */
+ const int split = depth * 9 + 8;
+ addr |= ((ctx->process_id >> split) << 3) & ~TARGET_PAGE_MASK;
+ if (dma_memory_read(s->target_as, addr, &de, sizeof(de),
+ MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) {
+ return RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT;
+ }
+ le64_to_cpus(&de);
+ if (!(de & RISCV_IOMMU_PDTE_VALID)) {
+ return RISCV_IOMMU_FQ_CAUSE_PDT_INVALID;
+ }
+ addr = PPN_PHYS(get_field(de, RISCV_IOMMU_PDTE_PPN));
+ }
+
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK);
+
+ /* Leaf entry in PDT */
+ addr |= (ctx->process_id << 4) & ~TARGET_PAGE_MASK;
+ if (dma_memory_read(s->target_as, addr, &dc.ta, sizeof(uint64_t) * 2,
+ MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) {
+ return RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT;
+ }
+
+ /* Use FSC and TA from process directory entry. */
+ ctx->ta = le64_to_cpu(dc.ta);
+ ctx->satp = le64_to_cpu(dc.fsc);
+
+ if (!(ctx->ta & RISCV_IOMMU_PC_TA_V)) {
+ return RISCV_IOMMU_FQ_CAUSE_PDT_INVALID;
+ }
+
+ if (!riscv_iommu_validate_process_ctx(s, ctx)) {
+ return RISCV_IOMMU_FQ_CAUSE_PDT_MISCONFIGURED;
+ }
+
+ return 0;
+}
+
+/* Translation Context cache support */
+static gboolean riscv_iommu_ctx_equal(gconstpointer v1, gconstpointer v2)
+{
+ RISCVIOMMUContext *c1 = (RISCVIOMMUContext *) v1;
+ RISCVIOMMUContext *c2 = (RISCVIOMMUContext *) v2;
+ return c1->devid == c2->devid &&
+ c1->process_id == c2->process_id;
+}
+
+static guint riscv_iommu_ctx_hash(gconstpointer v)
+{
+ RISCVIOMMUContext *ctx = (RISCVIOMMUContext *) v;
+ /*
+ * Generate simple hash of (process_id, devid)
+ * assuming 24-bit wide devid.
+ */
+ return (guint)(ctx->devid) + ((guint)(ctx->process_id) << 24);
+}
+
+static void riscv_iommu_ctx_inval_devid_procid(gpointer key, gpointer value,
+ gpointer data)
+{
+ RISCVIOMMUContext *ctx = (RISCVIOMMUContext *) value;
+ RISCVIOMMUContext *arg = (RISCVIOMMUContext *) data;
+ if (ctx->tc & RISCV_IOMMU_DC_TC_V &&
+ ctx->devid == arg->devid &&
+ ctx->process_id == arg->process_id) {
+ ctx->tc &= ~RISCV_IOMMU_DC_TC_V;
+ }
+}
+
+static void riscv_iommu_ctx_inval_devid(gpointer key, gpointer value,
+ gpointer data)
+{
+ RISCVIOMMUContext *ctx = (RISCVIOMMUContext *) value;
+ RISCVIOMMUContext *arg = (RISCVIOMMUContext *) data;
+ if (ctx->tc & RISCV_IOMMU_DC_TC_V &&
+ ctx->devid == arg->devid) {
+ ctx->tc &= ~RISCV_IOMMU_DC_TC_V;
+ }
+}
+
+static void riscv_iommu_ctx_inval_all(gpointer key, gpointer value,
+ gpointer data)
+{
+ RISCVIOMMUContext *ctx = (RISCVIOMMUContext *) value;
+ if (ctx->tc & RISCV_IOMMU_DC_TC_V) {
+ ctx->tc &= ~RISCV_IOMMU_DC_TC_V;
+ }
+}
+
+static void riscv_iommu_ctx_inval(RISCVIOMMUState *s, GHFunc func,
+ uint32_t devid, uint32_t process_id)
+{
+ GHashTable *ctx_cache;
+ RISCVIOMMUContext key = {
+ .devid = devid,
+ .process_id = process_id,
+ };
+ ctx_cache = g_hash_table_ref(s->ctx_cache);
+ g_hash_table_foreach(ctx_cache, func, &key);
+ g_hash_table_unref(ctx_cache);
+}
+
+/* Find or allocate translation context for a given {device_id, process_id} */
+static RISCVIOMMUContext *riscv_iommu_ctx(RISCVIOMMUState *s,
+ unsigned devid, unsigned process_id,
+ void **ref)
+{
+ GHashTable *ctx_cache;
+ RISCVIOMMUContext *ctx;
+ RISCVIOMMUContext key = {
+ .devid = devid,
+ .process_id = process_id,
+ };
+
+ ctx_cache = g_hash_table_ref(s->ctx_cache);
+ ctx = g_hash_table_lookup(ctx_cache, &key);
+
+ if (ctx && (ctx->tc & RISCV_IOMMU_DC_TC_V)) {
+ *ref = ctx_cache;
+ return ctx;
+ }
+
+ ctx = g_new0(RISCVIOMMUContext, 1);
+ ctx->devid = devid;
+ ctx->process_id = process_id;
+
+ int fault = riscv_iommu_ctx_fetch(s, ctx);
+ if (!fault) {
+ if (g_hash_table_size(ctx_cache) >= LIMIT_CACHE_CTX) {
+ g_hash_table_unref(ctx_cache);
+ ctx_cache = g_hash_table_new_full(riscv_iommu_ctx_hash,
+ riscv_iommu_ctx_equal,
+ g_free, NULL);
+ g_hash_table_ref(ctx_cache);
+ g_hash_table_unref(qatomic_xchg(&s->ctx_cache, ctx_cache));
+ }
+ g_hash_table_add(ctx_cache, ctx);
+ *ref = ctx_cache;
+ return ctx;
+ }
+
+ g_hash_table_unref(ctx_cache);
+ *ref = NULL;
+
+ riscv_iommu_report_fault(s, ctx, RISCV_IOMMU_FQ_TTYPE_UADDR_RD,
+ fault, !!process_id, 0, 0);
+
+ g_free(ctx);
+ return NULL;
+}
+
+static void riscv_iommu_ctx_put(RISCVIOMMUState *s, void *ref)
+{
+ if (ref) {
+ g_hash_table_unref((GHashTable *)ref);
+ }
+}
+
+/* Find or allocate address space for a given device */
+static AddressSpace *riscv_iommu_space(RISCVIOMMUState *s, uint32_t devid)
+{
+ RISCVIOMMUSpace *as;
+
+ /* FIXME: PCIe bus remapping for attached endpoints. */
+ devid |= s->bus << 8;
+
+ QLIST_FOREACH(as, &s->spaces, list) {
+ if (as->devid == devid) {
+ break;
+ }
+ }
+
+ if (as == NULL) {
+ char name[64];
+ as = g_new0(RISCVIOMMUSpace, 1);
+
+ as->iommu = s;
+ as->devid = devid;
+
+ snprintf(name, sizeof(name), "riscv-iommu-%04x:%02x.%d-iova",
+ PCI_BUS_NUM(as->devid), PCI_SLOT(as->devid), PCI_FUNC(as->devid));
+
+ /* IOVA address space, untranslated addresses */
+ memory_region_init_iommu(&as->iova_mr, sizeof(as->iova_mr),
+ TYPE_RISCV_IOMMU_MEMORY_REGION,
+ OBJECT(as), "riscv_iommu", UINT64_MAX);
+ address_space_init(&as->iova_as, MEMORY_REGION(&as->iova_mr), name);
+
+ QLIST_INSERT_HEAD(&s->spaces, as, list);
+
+ trace_riscv_iommu_new(s->parent_obj.id, PCI_BUS_NUM(as->devid),
+ PCI_SLOT(as->devid), PCI_FUNC(as->devid));
+ }
+ return &as->iova_as;
+}
+
+/* Translation Object cache support */
+static gboolean riscv_iommu_iot_equal(gconstpointer v1, gconstpointer v2)
+{
+ RISCVIOMMUEntry *t1 = (RISCVIOMMUEntry *) v1;
+ RISCVIOMMUEntry *t2 = (RISCVIOMMUEntry *) v2;
+ return t1->gscid == t2->gscid && t1->pscid == t2->pscid &&
+ t1->iova == t2->iova && t1->tag == t2->tag;
+}
+
+static guint riscv_iommu_iot_hash(gconstpointer v)
+{
+ RISCVIOMMUEntry *t = (RISCVIOMMUEntry *) v;
+ return (guint)t->iova;
+}
+
+/* GV: 0 AV: 0 PSCV: 0 GVMA: 0 */
+/* GV: 0 AV: 0 GVMA: 1 */
+static
+void riscv_iommu_iot_inval_all(gpointer key, gpointer value, gpointer data)
+{
+ RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
+ RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
+ if (iot->tag == arg->tag) {
+ iot->perm = IOMMU_NONE;
+ }
+}
+
+/* GV: 0 AV: 0 PSCV: 1 GVMA: 0 */
+static
+void riscv_iommu_iot_inval_pscid(gpointer key, gpointer value, gpointer data)
+{
+ RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
+ RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
+ if (iot->tag == arg->tag &&
+ iot->pscid == arg->pscid) {
+ iot->perm = IOMMU_NONE;
+ }
+}
+
+/* GV: 0 AV: 1 PSCV: 0 GVMA: 0 */
+static
+void riscv_iommu_iot_inval_iova(gpointer key, gpointer value, gpointer data)
+{
+ RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
+ RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
+ if (iot->tag == arg->tag &&
+ iot->iova == arg->iova) {
+ iot->perm = IOMMU_NONE;
+ }
+}
+
+/* GV: 0 AV: 1 PSCV: 1 GVMA: 0 */
+static void riscv_iommu_iot_inval_pscid_iova(gpointer key, gpointer value,
+ gpointer data)
+{
+ RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
+ RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
+ if (iot->tag == arg->tag &&
+ iot->pscid == arg->pscid &&
+ iot->iova == arg->iova) {
+ iot->perm = IOMMU_NONE;
+ }
+}
+
+/* GV: 1 AV: 0 PSCV: 0 GVMA: 0 */
+/* GV: 1 AV: 0 GVMA: 1 */
+static
+void riscv_iommu_iot_inval_gscid(gpointer key, gpointer value, gpointer data)
+{
+ RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
+ RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
+ if (iot->tag == arg->tag &&
+ iot->gscid == arg->gscid) {
+ iot->perm = IOMMU_NONE;
+ }
+}
+
+/* GV: 1 AV: 0 PSCV: 1 GVMA: 0 */
+static void riscv_iommu_iot_inval_gscid_pscid(gpointer key, gpointer value,
+ gpointer data)
+{
+ RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
+ RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
+ if (iot->tag == arg->tag &&
+ iot->gscid == arg->gscid &&
+ iot->pscid == arg->pscid) {
+ iot->perm = IOMMU_NONE;
+ }
+}
+
+/* GV: 1 AV: 1 PSCV: 0 GVMA: 0 */
+/* GV: 1 AV: 1 GVMA: 1 */
+static void riscv_iommu_iot_inval_gscid_iova(gpointer key, gpointer value,
+ gpointer data)
+{
+ RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
+ RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
+ if (iot->tag == arg->tag &&
+ iot->gscid == arg->gscid &&
+ iot->iova == arg->iova) {
+ iot->perm = IOMMU_NONE;
+ }
+}
+
+/* GV: 1 AV: 1 PSCV: 1 GVMA: 0 */
+static void riscv_iommu_iot_inval_gscid_pscid_iova(gpointer key, gpointer value,
+ gpointer data)
+{
+ RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
+ RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
+ if (iot->tag == arg->tag &&
+ iot->gscid == arg->gscid &&
+ iot->pscid == arg->pscid &&
+ iot->iova == arg->iova) {
+ iot->perm = IOMMU_NONE;
+ }
+}
+
+/* caller should keep ref-count for iot_cache object */
+static RISCVIOMMUEntry *riscv_iommu_iot_lookup(RISCVIOMMUContext *ctx,
+ GHashTable *iot_cache, hwaddr iova, RISCVIOMMUTransTag transtag)
+{
+ RISCVIOMMUEntry key = {
+ .tag = transtag,
+ .gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID),
+ .pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID),
+ .iova = PPN_DOWN(iova),
+ };
+ return g_hash_table_lookup(iot_cache, &key);
+}
+
+/* caller should keep ref-count for iot_cache object */
+static void riscv_iommu_iot_update(RISCVIOMMUState *s,
+ GHashTable *iot_cache, RISCVIOMMUEntry *iot)
+{
+ if (!s->iot_limit) {
+ return;
+ }
+
+ if (g_hash_table_size(s->iot_cache) >= s->iot_limit) {
+ iot_cache = g_hash_table_new_full(riscv_iommu_iot_hash,
+ riscv_iommu_iot_equal,
+ g_free, NULL);
+ g_hash_table_unref(qatomic_xchg(&s->iot_cache, iot_cache));
+ }
+ g_hash_table_add(iot_cache, iot);
+}
+
+static void riscv_iommu_iot_inval(RISCVIOMMUState *s, GHFunc func,
+ uint32_t gscid, uint32_t pscid, hwaddr iova, RISCVIOMMUTransTag transtag)
+{
+ GHashTable *iot_cache;
+ RISCVIOMMUEntry key = {
+ .tag = transtag,
+ .gscid = gscid,
+ .pscid = pscid,
+ .iova = PPN_DOWN(iova),
+ };
+
+ iot_cache = g_hash_table_ref(s->iot_cache);
+ g_hash_table_foreach(iot_cache, func, &key);
+ g_hash_table_unref(iot_cache);
+}
+
+static RISCVIOMMUTransTag riscv_iommu_get_transtag(RISCVIOMMUContext *ctx)
+{
+ uint64_t satp = get_field(ctx->satp, RISCV_IOMMU_ATP_MODE_FIELD);
+ uint64_t gatp = get_field(ctx->gatp, RISCV_IOMMU_ATP_MODE_FIELD);
+
+ if (satp == RISCV_IOMMU_DC_FSC_MODE_BARE) {
+ return (gatp == RISCV_IOMMU_DC_IOHGATP_MODE_BARE) ?
+ RISCV_IOMMU_TRANS_TAG_BY : RISCV_IOMMU_TRANS_TAG_VG;
+ } else {
+ return (gatp == RISCV_IOMMU_DC_IOHGATP_MODE_BARE) ?
+ RISCV_IOMMU_TRANS_TAG_SS : RISCV_IOMMU_TRANS_TAG_VN;
+ }
+}
+
+static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
+ IOMMUTLBEntry *iotlb, bool enable_cache)
+{
+ RISCVIOMMUTransTag transtag = riscv_iommu_get_transtag(ctx);
+ RISCVIOMMUEntry *iot;
+ IOMMUAccessFlags perm;
+ bool enable_pid;
+ bool enable_pri;
+ GHashTable *iot_cache;
+ int fault;
+
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_URQ);
+
+ iot_cache = g_hash_table_ref(s->iot_cache);
+ /*
+ * TC[32] is reserved for custom extensions, used here to temporarily
+ * enable automatic page-request generation for ATS queries.
+ */
+ enable_pri = (iotlb->perm == IOMMU_NONE) && (ctx->tc & BIT_ULL(32));
+ enable_pid = (ctx->tc & RISCV_IOMMU_DC_TC_PDTV);
+
+ /* Check for ATS request. */
+ if (iotlb->perm == IOMMU_NONE) {
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_ATS_RQ);
+ /* Check if ATS is disabled. */
+ if (!(ctx->tc & RISCV_IOMMU_DC_TC_EN_ATS)) {
+ enable_pri = false;
+ fault = RISCV_IOMMU_FQ_CAUSE_TTYPE_BLOCKED;
+ goto done;
+ }
+ }
+
+ iot = riscv_iommu_iot_lookup(ctx, iot_cache, iotlb->iova, transtag);
+ perm = iot ? iot->perm : IOMMU_NONE;
+ if (perm != IOMMU_NONE) {
+ iotlb->translated_addr = PPN_PHYS(iot->phys);
+ iotlb->addr_mask = ~TARGET_PAGE_MASK;
+ iotlb->perm = perm;
+ fault = 0;
+ goto done;
+ }
+
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_TLB_MISS);
+
+ /* Translate using device directory / page table information. */
+ fault = riscv_iommu_spa_fetch(s, ctx, iotlb);
+
+ if (!fault && iotlb->target_as == &s->trap_as) {
+ /* Do not cache trapped MSI translations */
+ goto done;
+ }
+
+ /*
+ * We made an implementation choice to not cache identity-mapped
+ * translations, as allowed by the specification, to avoid
+ * translation cache evictions for other devices sharing the
+ * IOMMU hardware model.
+ */
+ if (!fault && iotlb->translated_addr != iotlb->iova && enable_cache) {
+ iot = g_new0(RISCVIOMMUEntry, 1);
+ iot->iova = PPN_DOWN(iotlb->iova);
+ iot->phys = PPN_DOWN(iotlb->translated_addr);
+ iot->gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID);
+ iot->pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID);
+ iot->perm = iotlb->perm;
+ iot->tag = transtag;
+ riscv_iommu_iot_update(s, iot_cache, iot);
+ }
+
+done:
+ g_hash_table_unref(iot_cache);
+
+ if (enable_pri && fault) {
+ struct riscv_iommu_pq_record pr = {0};
+ if (enable_pid) {
+ pr.hdr = set_field(RISCV_IOMMU_PREQ_HDR_PV,
+ RISCV_IOMMU_PREQ_HDR_PID, ctx->process_id);
+ }
+ pr.hdr = set_field(pr.hdr, RISCV_IOMMU_PREQ_HDR_DID, ctx->devid);
+ pr.payload = (iotlb->iova & TARGET_PAGE_MASK) |
+ RISCV_IOMMU_PREQ_PAYLOAD_M;
+ riscv_iommu_pri(s, &pr);
+ return fault;
+ }
+
+ if (fault) {
+ unsigned ttype = RISCV_IOMMU_FQ_TTYPE_PCIE_ATS_REQ;
+
+ if (iotlb->perm & IOMMU_RW) {
+ ttype = RISCV_IOMMU_FQ_TTYPE_UADDR_WR;
+ } else if (iotlb->perm & IOMMU_RO) {
+ ttype = RISCV_IOMMU_FQ_TTYPE_UADDR_RD;
+ }
+
+ riscv_iommu_report_fault(s, ctx, ttype, fault, enable_pid,
+ iotlb->iova, iotlb->translated_addr);
+ return fault;
+ }
+
+ return 0;
+}
+
+/* IOMMU Command Interface */
+static MemTxResult riscv_iommu_iofence(RISCVIOMMUState *s, bool notify,
+ uint64_t addr, uint32_t data)
+{
+ /*
+ * ATS processing in this implementation of the IOMMU is synchronous,
+ * no need to wait for completions here.
+ */
+ if (!notify) {
+ return MEMTX_OK;
+ }
+
+ return dma_memory_write(s->target_as, addr, &data, sizeof(data),
+ MEMTXATTRS_UNSPECIFIED);
+}
+
+static void riscv_iommu_ats(RISCVIOMMUState *s,
+ struct riscv_iommu_command *cmd, IOMMUNotifierFlag flag,
+ IOMMUAccessFlags perm,
+ void (*trace_fn)(const char *id))
+{
+ RISCVIOMMUSpace *as = NULL;
+ IOMMUNotifier *n;
+ IOMMUTLBEvent event;
+ uint32_t pid;
+ uint32_t devid;
+ const bool pv = cmd->dword0 & RISCV_IOMMU_CMD_ATS_PV;
+
+ if (cmd->dword0 & RISCV_IOMMU_CMD_ATS_DSV) {
+ /* Use device segment and requester id */
+ devid = get_field(cmd->dword0,
+ RISCV_IOMMU_CMD_ATS_DSEG | RISCV_IOMMU_CMD_ATS_RID);
+ } else {
+ devid = get_field(cmd->dword0, RISCV_IOMMU_CMD_ATS_RID);
+ }
+
+ pid = get_field(cmd->dword0, RISCV_IOMMU_CMD_ATS_PID);
+
+ QLIST_FOREACH(as, &s->spaces, list) {
+ if (as->devid == devid) {
+ break;
+ }
+ }
+
+ if (!as || !as->notifier) {
+ return;
+ }
+
+ event.type = flag;
+ event.entry.perm = perm;
+ event.entry.target_as = s->target_as;
+
+ IOMMU_NOTIFIER_FOREACH(n, &as->iova_mr) {
+ if (!pv || n->iommu_idx == pid) {
+ event.entry.iova = n->start;
+ event.entry.addr_mask = n->end - n->start;
+ trace_fn(as->iova_mr.parent_obj.name);
+ memory_region_notify_iommu_one(n, &event);
+ }
+ }
+}
+
+static void riscv_iommu_ats_inval(RISCVIOMMUState *s,
+ struct riscv_iommu_command *cmd)
+{
+ return riscv_iommu_ats(s, cmd, IOMMU_NOTIFIER_DEVIOTLB_UNMAP, IOMMU_NONE,
+ trace_riscv_iommu_ats_inval);
+}
+
+static void riscv_iommu_ats_prgr(RISCVIOMMUState *s,
+ struct riscv_iommu_command *cmd)
+{
+ unsigned resp_code = get_field(cmd->dword1,
+ RISCV_IOMMU_CMD_ATS_PRGR_RESP_CODE);
+
+ /* Using the access flag to carry response code information */
+ IOMMUAccessFlags perm = resp_code ? IOMMU_NONE : IOMMU_RW;
+ return riscv_iommu_ats(s, cmd, IOMMU_NOTIFIER_MAP, perm,
+ trace_riscv_iommu_ats_prgr);
+}
+
+static void riscv_iommu_process_ddtp(RISCVIOMMUState *s)
+{
+ uint64_t old_ddtp = s->ddtp;
+ uint64_t new_ddtp = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_DDTP);
+ unsigned new_mode = get_field(new_ddtp, RISCV_IOMMU_DDTP_MODE);
+ unsigned old_mode = get_field(old_ddtp, RISCV_IOMMU_DDTP_MODE);
+ bool ok = false;
+
+ /*
+ * Check for allowed DDTP.MODE transitions:
+ * {OFF, BARE} -> {OFF, BARE, 1LVL, 2LVL, 3LVL}
+ * {1LVL, 2LVL, 3LVL} -> {OFF, BARE}
+ */
+ if (new_mode == old_mode ||
+ new_mode == RISCV_IOMMU_DDTP_MODE_OFF ||
+ new_mode == RISCV_IOMMU_DDTP_MODE_BARE) {
+ ok = true;
+ } else if (new_mode == RISCV_IOMMU_DDTP_MODE_1LVL ||
+ new_mode == RISCV_IOMMU_DDTP_MODE_2LVL ||
+ new_mode == RISCV_IOMMU_DDTP_MODE_3LVL) {
+ ok = old_mode == RISCV_IOMMU_DDTP_MODE_OFF ||
+ old_mode == RISCV_IOMMU_DDTP_MODE_BARE;
+ }
+
+ if (ok) {
+ /* clear reserved and busy bits, report back sanitized version */
+ new_ddtp = set_field(new_ddtp & RISCV_IOMMU_DDTP_PPN,
+ RISCV_IOMMU_DDTP_MODE, new_mode);
+ } else {
+ new_ddtp = old_ddtp;
+ }
+ s->ddtp = new_ddtp;
+
+ riscv_iommu_reg_set64(s, RISCV_IOMMU_REG_DDTP, new_ddtp);
+}
+
+/* Command function and opcode field. */
+#define RISCV_IOMMU_CMD(func, op) (((func) << 7) | (op))
+
+static void riscv_iommu_process_cq_tail(RISCVIOMMUState *s)
+{
+ struct riscv_iommu_command cmd;
+ MemTxResult res;
+ dma_addr_t addr;
+ uint32_t tail, head, ctrl;
+ uint64_t cmd_opcode;
+ GHFunc func;
+
+ ctrl = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_CQCSR);
+ tail = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_CQT) & s->cq_mask;
+ head = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_CQH) & s->cq_mask;
+
+ /* Check for pending error or queue processing disabled */
+ if (!(ctrl & RISCV_IOMMU_CQCSR_CQON) ||
+ !!(ctrl & (RISCV_IOMMU_CQCSR_CMD_ILL | RISCV_IOMMU_CQCSR_CQMF))) {
+ return;
+ }
+
+ while (tail != head) {
+ addr = s->cq_addr + head * sizeof(cmd);
+ res = dma_memory_read(s->target_as, addr, &cmd, sizeof(cmd),
+ MEMTXATTRS_UNSPECIFIED);
+
+ if (res != MEMTX_OK) {
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_CQCSR,
+ RISCV_IOMMU_CQCSR_CQMF, 0);
+ goto fault;
+ }
+
+ trace_riscv_iommu_cmd(s->parent_obj.id, cmd.dword0, cmd.dword1);
+
+ cmd_opcode = get_field(cmd.dword0,
+ RISCV_IOMMU_CMD_OPCODE | RISCV_IOMMU_CMD_FUNC);
+
+ switch (cmd_opcode) {
+ case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IOFENCE_FUNC_C,
+ RISCV_IOMMU_CMD_IOFENCE_OPCODE):
+ res = riscv_iommu_iofence(s,
+ cmd.dword0 & RISCV_IOMMU_CMD_IOFENCE_AV, cmd.dword1 << 2,
+ get_field(cmd.dword0, RISCV_IOMMU_CMD_IOFENCE_DATA));
+
+ if (res != MEMTX_OK) {
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_CQCSR,
+ RISCV_IOMMU_CQCSR_CQMF, 0);
+ goto fault;
+ }
+ break;
+
+ case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IOTINVAL_FUNC_GVMA,
+ RISCV_IOMMU_CMD_IOTINVAL_OPCODE):
+ {
+ bool gv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_GV);
+ bool av = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_AV);
+ bool pscv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_PSCV);
+ uint32_t gscid = get_field(cmd.dword0,
+ RISCV_IOMMU_CMD_IOTINVAL_GSCID);
+ uint32_t pscid = get_field(cmd.dword0,
+ RISCV_IOMMU_CMD_IOTINVAL_PSCID);
+ hwaddr iova = (cmd.dword1 << 2) & TARGET_PAGE_MASK;
+
+ if (pscv) {
+ /* illegal command arguments IOTINVAL.GVMA & PSCV == 1 */
+ goto cmd_ill;
+ }
+
+ func = riscv_iommu_iot_inval_all;
+
+ if (gv) {
+ func = (av) ? riscv_iommu_iot_inval_gscid_iova :
+ riscv_iommu_iot_inval_gscid;
+ }
+
+ riscv_iommu_iot_inval(
+ s, func, gscid, pscid, iova, RISCV_IOMMU_TRANS_TAG_VG);
+
+ riscv_iommu_iot_inval(
+ s, func, gscid, pscid, iova, RISCV_IOMMU_TRANS_TAG_VN);
+ break;
+ }
+
+ case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IOTINVAL_FUNC_VMA,
+ RISCV_IOMMU_CMD_IOTINVAL_OPCODE):
+ {
+ bool gv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_GV);
+ bool av = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_AV);
+ bool pscv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_PSCV);
+ uint32_t gscid = get_field(cmd.dword0,
+ RISCV_IOMMU_CMD_IOTINVAL_GSCID);
+ uint32_t pscid = get_field(cmd.dword0,
+ RISCV_IOMMU_CMD_IOTINVAL_PSCID);
+ hwaddr iova = (cmd.dword1 << 2) & TARGET_PAGE_MASK;
+ RISCVIOMMUTransTag transtag;
+
+ if (gv) {
+ transtag = RISCV_IOMMU_TRANS_TAG_VN;
+ if (pscv) {
+ func = (av) ? riscv_iommu_iot_inval_gscid_pscid_iova :
+ riscv_iommu_iot_inval_gscid_pscid;
+ } else {
+ func = (av) ? riscv_iommu_iot_inval_gscid_iova :
+ riscv_iommu_iot_inval_gscid;
+ }
+ } else {
+ transtag = RISCV_IOMMU_TRANS_TAG_SS;
+ if (pscv) {
+ func = (av) ? riscv_iommu_iot_inval_pscid_iova :
+ riscv_iommu_iot_inval_pscid;
+ } else {
+ func = (av) ? riscv_iommu_iot_inval_iova :
+ riscv_iommu_iot_inval_all;
+ }
+ }
+
+ riscv_iommu_iot_inval(s, func, gscid, pscid, iova, transtag);
+ break;
+ }
+
+ case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_DDT,
+ RISCV_IOMMU_CMD_IODIR_OPCODE):
+ if (!(cmd.dword0 & RISCV_IOMMU_CMD_IODIR_DV)) {
+ /* invalidate all device context cache mappings */
+ func = riscv_iommu_ctx_inval_all;
+ } else {
+ /* invalidate all device context matching DID */
+ func = riscv_iommu_ctx_inval_devid;
+ }
+ riscv_iommu_ctx_inval(s, func,
+ get_field(cmd.dword0, RISCV_IOMMU_CMD_IODIR_DID), 0);
+ break;
+
+ case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_PDT,
+ RISCV_IOMMU_CMD_IODIR_OPCODE):
+ if (!(cmd.dword0 & RISCV_IOMMU_CMD_IODIR_DV)) {
+ /* illegal command arguments IODIR_PDT & DV == 0 */
+ goto cmd_ill;
+ } else {
+ func = riscv_iommu_ctx_inval_devid_procid;
+ }
+ riscv_iommu_ctx_inval(s, func,
+ get_field(cmd.dword0, RISCV_IOMMU_CMD_IODIR_DID),
+ get_field(cmd.dword0, RISCV_IOMMU_CMD_IODIR_PID));
+ break;
+
+ /* ATS commands */
+ case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_ATS_FUNC_INVAL,
+ RISCV_IOMMU_CMD_ATS_OPCODE):
+ if (!s->enable_ats) {
+ goto cmd_ill;
+ }
+
+ riscv_iommu_ats_inval(s, &cmd);
+ break;
+
+ case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_ATS_FUNC_PRGR,
+ RISCV_IOMMU_CMD_ATS_OPCODE):
+ if (!s->enable_ats) {
+ goto cmd_ill;
+ }
+
+ riscv_iommu_ats_prgr(s, &cmd);
+ break;
+
+ default:
+ cmd_ill:
+ /* Invalid instruction, do not advance instruction index. */
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_CQCSR,
+ RISCV_IOMMU_CQCSR_CMD_ILL, 0);
+ goto fault;
+ }
+
+ /* Advance and update head pointer after command completes. */
+ head = (head + 1) & s->cq_mask;
+ riscv_iommu_reg_set32(s, RISCV_IOMMU_REG_CQH, head);
+ }
+ return;
+
+fault:
+ if (ctrl & RISCV_IOMMU_CQCSR_CIE) {
+ riscv_iommu_notify(s, RISCV_IOMMU_INTR_CQ);
+ }
+}
+
+static void riscv_iommu_process_cq_control(RISCVIOMMUState *s)
+{
+ uint64_t base;
+ uint32_t ctrl_set = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_CQCSR);
+ uint32_t ctrl_clr;
+ bool enable = !!(ctrl_set & RISCV_IOMMU_CQCSR_CQEN);
+ bool active = !!(ctrl_set & RISCV_IOMMU_CQCSR_CQON);
+
+ if (enable && !active) {
+ base = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_CQB);
+ s->cq_mask = (2ULL << get_field(base, RISCV_IOMMU_CQB_LOG2SZ)) - 1;
+ s->cq_addr = PPN_PHYS(get_field(base, RISCV_IOMMU_CQB_PPN));
+ stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_CQT], ~s->cq_mask);
+ stl_le_p(&s->regs_rw[RISCV_IOMMU_REG_CQH], 0);
+ stl_le_p(&s->regs_rw[RISCV_IOMMU_REG_CQT], 0);
+ ctrl_set = RISCV_IOMMU_CQCSR_CQON;
+ ctrl_clr = RISCV_IOMMU_CQCSR_BUSY | RISCV_IOMMU_CQCSR_CQMF |
+ RISCV_IOMMU_CQCSR_CMD_ILL | RISCV_IOMMU_CQCSR_CMD_TO |
+ RISCV_IOMMU_CQCSR_FENCE_W_IP;
+ } else if (!enable && active) {
+ stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_CQT], ~0);
+ ctrl_set = 0;
+ ctrl_clr = RISCV_IOMMU_CQCSR_BUSY | RISCV_IOMMU_CQCSR_CQON;
+ } else {
+ ctrl_set = 0;
+ ctrl_clr = RISCV_IOMMU_CQCSR_BUSY;
+ }
+
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_CQCSR, ctrl_set, ctrl_clr);
+}
+
+static void riscv_iommu_process_fq_control(RISCVIOMMUState *s)
+{
+ uint64_t base;
+ uint32_t ctrl_set = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_FQCSR);
+ uint32_t ctrl_clr;
+ bool enable = !!(ctrl_set & RISCV_IOMMU_FQCSR_FQEN);
+ bool active = !!(ctrl_set & RISCV_IOMMU_FQCSR_FQON);
+
+ if (enable && !active) {
+ base = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_FQB);
+ s->fq_mask = (2ULL << get_field(base, RISCV_IOMMU_FQB_LOG2SZ)) - 1;
+ s->fq_addr = PPN_PHYS(get_field(base, RISCV_IOMMU_FQB_PPN));
+ stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_FQH], ~s->fq_mask);
+ stl_le_p(&s->regs_rw[RISCV_IOMMU_REG_FQH], 0);
+ stl_le_p(&s->regs_rw[RISCV_IOMMU_REG_FQT], 0);
+ ctrl_set = RISCV_IOMMU_FQCSR_FQON;
+ ctrl_clr = RISCV_IOMMU_FQCSR_BUSY | RISCV_IOMMU_FQCSR_FQMF |
+ RISCV_IOMMU_FQCSR_FQOF;
+ } else if (!enable && active) {
+ stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_FQH], ~0);
+ ctrl_set = 0;
+ ctrl_clr = RISCV_IOMMU_FQCSR_BUSY | RISCV_IOMMU_FQCSR_FQON;
+ } else {
+ ctrl_set = 0;
+ ctrl_clr = RISCV_IOMMU_FQCSR_BUSY;
+ }
+
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_FQCSR, ctrl_set, ctrl_clr);
+}
+
+static void riscv_iommu_process_pq_control(RISCVIOMMUState *s)
+{
+ uint64_t base;
+ uint32_t ctrl_set = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_PQCSR);
+ uint32_t ctrl_clr;
+ bool enable = !!(ctrl_set & RISCV_IOMMU_PQCSR_PQEN);
+ bool active = !!(ctrl_set & RISCV_IOMMU_PQCSR_PQON);
+
+ if (enable && !active) {
+ base = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_PQB);
+ s->pq_mask = (2ULL << get_field(base, RISCV_IOMMU_PQB_LOG2SZ)) - 1;
+ s->pq_addr = PPN_PHYS(get_field(base, RISCV_IOMMU_PQB_PPN));
+ stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_PQH], ~s->pq_mask);
+ stl_le_p(&s->regs_rw[RISCV_IOMMU_REG_PQH], 0);
+ stl_le_p(&s->regs_rw[RISCV_IOMMU_REG_PQT], 0);
+ ctrl_set = RISCV_IOMMU_PQCSR_PQON;
+ ctrl_clr = RISCV_IOMMU_PQCSR_BUSY | RISCV_IOMMU_PQCSR_PQMF |
+ RISCV_IOMMU_PQCSR_PQOF;
+ } else if (!enable && active) {
+ stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_PQH], ~0);
+ ctrl_set = 0;
+ ctrl_clr = RISCV_IOMMU_PQCSR_BUSY | RISCV_IOMMU_PQCSR_PQON;
+ } else {
+ ctrl_set = 0;
+ ctrl_clr = RISCV_IOMMU_PQCSR_BUSY;
+ }
+
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_PQCSR, ctrl_set, ctrl_clr);
+}
+
+static void riscv_iommu_process_dbg(RISCVIOMMUState *s)
+{
+ uint64_t iova = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_TR_REQ_IOVA);
+ uint64_t ctrl = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_TR_REQ_CTL);
+ unsigned devid = get_field(ctrl, RISCV_IOMMU_TR_REQ_CTL_DID);
+ unsigned pid = get_field(ctrl, RISCV_IOMMU_TR_REQ_CTL_PID);
+ RISCVIOMMUContext *ctx;
+ void *ref;
+
+ if (!(ctrl & RISCV_IOMMU_TR_REQ_CTL_GO_BUSY)) {
+ return;
+ }
+
+ ctx = riscv_iommu_ctx(s, devid, pid, &ref);
+ if (ctx == NULL) {
+ riscv_iommu_reg_set64(s, RISCV_IOMMU_REG_TR_RESPONSE,
+ RISCV_IOMMU_TR_RESPONSE_FAULT |
+ (RISCV_IOMMU_FQ_CAUSE_DMA_DISABLED << 10));
+ } else {
+ IOMMUTLBEntry iotlb = {
+ .iova = iova,
+ .perm = ctrl & RISCV_IOMMU_TR_REQ_CTL_NW ? IOMMU_RO : IOMMU_RW,
+ .addr_mask = ~0,
+ .target_as = NULL,
+ };
+ int fault = riscv_iommu_translate(s, ctx, &iotlb, false);
+ if (fault) {
+ iova = RISCV_IOMMU_TR_RESPONSE_FAULT | (((uint64_t) fault) << 10);
+ } else {
+ iova = iotlb.translated_addr & ~iotlb.addr_mask;
+ iova >>= TARGET_PAGE_BITS;
+ iova &= RISCV_IOMMU_TR_RESPONSE_PPN;
+
+ /* We do not support superpages (> 4kbs) for now */
+ iova &= ~RISCV_IOMMU_TR_RESPONSE_S;
+ }
+ riscv_iommu_reg_set64(s, RISCV_IOMMU_REG_TR_RESPONSE, iova);
+ }
+
+ riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_TR_REQ_CTL, 0,
+ RISCV_IOMMU_TR_REQ_CTL_GO_BUSY);
+ riscv_iommu_ctx_put(s, ref);
+}
+
+typedef void riscv_iommu_process_fn(RISCVIOMMUState *s);
+
+static void riscv_iommu_update_icvec(RISCVIOMMUState *s, uint64_t data)
+{
+ uint64_t icvec = 0;
+
+ icvec |= MIN(data & RISCV_IOMMU_ICVEC_CIV,
+ s->icvec_avail_vectors & RISCV_IOMMU_ICVEC_CIV);
+
+ icvec |= MIN(data & RISCV_IOMMU_ICVEC_FIV,
+ s->icvec_avail_vectors & RISCV_IOMMU_ICVEC_FIV);
+
+ icvec |= MIN(data & RISCV_IOMMU_ICVEC_PMIV,
+ s->icvec_avail_vectors & RISCV_IOMMU_ICVEC_PMIV);
+
+ icvec |= MIN(data & RISCV_IOMMU_ICVEC_PIV,
+ s->icvec_avail_vectors & RISCV_IOMMU_ICVEC_PIV);
+
+ trace_riscv_iommu_icvec_write(data, icvec);
+
+ riscv_iommu_reg_set64(s, RISCV_IOMMU_REG_ICVEC, icvec);
+}
+
+static void riscv_iommu_update_ipsr(RISCVIOMMUState *s, uint64_t data)
+{
+ uint32_t cqcsr, fqcsr, pqcsr;
+ uint32_t ipsr_set = 0;
+ uint32_t ipsr_clr = 0;
+
+ if (data & RISCV_IOMMU_IPSR_CIP) {
+ cqcsr = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_CQCSR);
+
+ if (cqcsr & RISCV_IOMMU_CQCSR_CIE &&
+ (cqcsr & RISCV_IOMMU_CQCSR_FENCE_W_IP ||
+ cqcsr & RISCV_IOMMU_CQCSR_CMD_ILL ||
+ cqcsr & RISCV_IOMMU_CQCSR_CMD_TO ||
+ cqcsr & RISCV_IOMMU_CQCSR_CQMF)) {
+ ipsr_set |= RISCV_IOMMU_IPSR_CIP;
+ } else {
+ ipsr_clr |= RISCV_IOMMU_IPSR_CIP;
+ }
+ } else {
+ ipsr_clr |= RISCV_IOMMU_IPSR_CIP;
+ }
+
+ if (data & RISCV_IOMMU_IPSR_FIP) {
+ fqcsr = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_FQCSR);
+
+ if (fqcsr & RISCV_IOMMU_FQCSR_FIE &&
+ (fqcsr & RISCV_IOMMU_FQCSR_FQOF ||
+ fqcsr & RISCV_IOMMU_FQCSR_FQMF)) {
+ ipsr_set |= RISCV_IOMMU_IPSR_FIP;
+ } else {
+ ipsr_clr |= RISCV_IOMMU_IPSR_FIP;
+ }
+ } else {
+ ipsr_clr |= RISCV_IOMMU_IPSR_FIP;
+ }
+
+ if (data & RISCV_IOMMU_IPSR_PIP) {
+ pqcsr = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_PQCSR);
+
+ if (pqcsr & RISCV_IOMMU_PQCSR_PIE &&
+ (pqcsr & RISCV_IOMMU_PQCSR_PQOF ||
+ pqcsr & RISCV_IOMMU_PQCSR_PQMF)) {
+ ipsr_set |= RISCV_IOMMU_IPSR_PIP;
+ } else {
+ ipsr_clr |= RISCV_IOMMU_IPSR_PIP;
+ }
+ } else {
+ ipsr_clr |= RISCV_IOMMU_IPSR_PIP;
+ }
+
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IPSR, ipsr_set, ipsr_clr);
+}
+
+static void riscv_iommu_process_hpm_writes(RISCVIOMMUState *s,
+ uint32_t regb,
+ bool prev_cy_inh)
+{
+ switch (regb) {
+ case RISCV_IOMMU_REG_IOCOUNTINH:
+ riscv_iommu_process_iocntinh_cy(s, prev_cy_inh);
+ break;
+
+ case RISCV_IOMMU_REG_IOHPMCYCLES:
+ case RISCV_IOMMU_REG_IOHPMCYCLES + 4:
+ riscv_iommu_process_hpmcycle_write(s);
+ break;
+
+ case RISCV_IOMMU_REG_IOHPMEVT_BASE ...
+ RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4:
+ riscv_iommu_process_hpmevt_write(s, regb & ~7);
+ break;
+ }
+}
+
+/*
+ * Write the resulting value of 'data' for the reg specified
+ * by 'reg_addr', after considering read-only/read-write/write-clear
+ * bits, in the pointer 'dest'.
+ *
+ * The result is written in little-endian.
+ */
+static void riscv_iommu_write_reg_val(RISCVIOMMUState *s,
+ void *dest, hwaddr reg_addr,
+ int size, uint64_t data)
+{
+ uint64_t ro = ldn_le_p(&s->regs_ro[reg_addr], size);
+ uint64_t wc = ldn_le_p(&s->regs_wc[reg_addr], size);
+ uint64_t rw = ldn_le_p(&s->regs_rw[reg_addr], size);
+
+ stn_le_p(dest, size, ((rw & ro) | (data & ~ro)) & ~(data & wc));
+}
+
+static MemTxResult riscv_iommu_mmio_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size,
+ MemTxAttrs attrs)
+{
+ riscv_iommu_process_fn *process_fn = NULL;
+ RISCVIOMMUState *s = opaque;
+ uint32_t regb = addr & ~3;
+ uint32_t busy = 0;
+ uint64_t val = 0;
+ bool cy_inh = false;
+
+ if ((addr & (size - 1)) != 0) {
+ /* Unsupported MMIO alignment or access size */
+ return MEMTX_ERROR;
+ }
+
+ if (addr + size > RISCV_IOMMU_REG_MSI_CONFIG) {
+ /* Unsupported MMIO access location. */
+ return MEMTX_ACCESS_ERROR;
+ }
+
+ /* Track actionable MMIO write. */
+ switch (regb) {
+ case RISCV_IOMMU_REG_DDTP:
+ case RISCV_IOMMU_REG_DDTP + 4:
+ process_fn = riscv_iommu_process_ddtp;
+ regb = RISCV_IOMMU_REG_DDTP;
+ busy = RISCV_IOMMU_DDTP_BUSY;
+ break;
+
+ case RISCV_IOMMU_REG_CQT:
+ process_fn = riscv_iommu_process_cq_tail;
+ break;
+
+ case RISCV_IOMMU_REG_CQCSR:
+ process_fn = riscv_iommu_process_cq_control;
+ busy = RISCV_IOMMU_CQCSR_BUSY;
+ break;
+
+ case RISCV_IOMMU_REG_FQCSR:
+ process_fn = riscv_iommu_process_fq_control;
+ busy = RISCV_IOMMU_FQCSR_BUSY;
+ break;
+
+ case RISCV_IOMMU_REG_PQCSR:
+ process_fn = riscv_iommu_process_pq_control;
+ busy = RISCV_IOMMU_PQCSR_BUSY;
+ break;
+
+ case RISCV_IOMMU_REG_ICVEC:
+ case RISCV_IOMMU_REG_IPSR:
+ /*
+ * ICVEC and IPSR have special read/write procedures. We'll
+ * call their respective helpers and exit.
+ */
+ riscv_iommu_write_reg_val(s, &val, addr, size, data);
+
+ /*
+ * 'val' is stored as LE. Switch to host endianess
+ * before using it.
+ */
+ val = le64_to_cpu(val);
+
+ if (regb == RISCV_IOMMU_REG_ICVEC) {
+ riscv_iommu_update_icvec(s, val);
+ } else {
+ riscv_iommu_update_ipsr(s, val);
+ }
+
+ return MEMTX_OK;
+
+ case RISCV_IOMMU_REG_TR_REQ_CTL:
+ process_fn = riscv_iommu_process_dbg;
+ regb = RISCV_IOMMU_REG_TR_REQ_CTL;
+ busy = RISCV_IOMMU_TR_REQ_CTL_GO_BUSY;
+ break;
+
+ case RISCV_IOMMU_REG_IOCOUNTINH:
+ if (addr != RISCV_IOMMU_REG_IOCOUNTINH) {
+ break;
+ }
+ /* Store previous value of CY bit. */
+ cy_inh = !!(riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTINH) &
+ RISCV_IOMMU_IOCOUNTINH_CY);
+ break;
+
+
+ default:
+ break;
+ }
+
+ /*
+ * Registers update might be not synchronized with core logic.
+ * If system software updates register when relevant BUSY bit
+ * is set IOMMU behavior of additional writes to the register
+ * is UNSPECIFIED.
+ */
+ riscv_iommu_write_reg_val(s, &s->regs_rw[addr], addr, size, data);
+
+ /* Busy flag update, MSB 4-byte register. */
+ if (busy) {
+ uint32_t rw = ldl_le_p(&s->regs_rw[regb]);
+ stl_le_p(&s->regs_rw[regb], rw | busy);
+ }
+
+ /* Process HPM writes and update any internal state if needed. */
+ if (regb >= RISCV_IOMMU_REG_IOCOUNTOVF &&
+ regb <= (RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4)) {
+ riscv_iommu_process_hpm_writes(s, regb, cy_inh);
+ }
+
+ if (process_fn) {
+ process_fn(s);
+ }
+
+ return MEMTX_OK;
+}
+
+static MemTxResult riscv_iommu_mmio_read(void *opaque, hwaddr addr,
+ uint64_t *data, unsigned size, MemTxAttrs attrs)
+{
+ RISCVIOMMUState *s = opaque;
+ uint64_t val = -1;
+ uint8_t *ptr;
+
+ if ((addr & (size - 1)) != 0) {
+ /* Unsupported MMIO alignment. */
+ return MEMTX_ERROR;
+ }
+
+ if (addr + size > RISCV_IOMMU_REG_MSI_CONFIG) {
+ return MEMTX_ACCESS_ERROR;
+ }
+
+ /* Compute cycle register value. */
+ if ((addr & ~7) == RISCV_IOMMU_REG_IOHPMCYCLES) {
+ val = riscv_iommu_hpmcycle_read(s);
+ ptr = (uint8_t *)&val + (addr & 7);
+ } else if ((addr & ~3) == RISCV_IOMMU_REG_IOCOUNTOVF) {
+ /*
+ * Software can read RISCV_IOMMU_REG_IOCOUNTOVF before timer
+ * callback completes. In which case CY_OF bit in
+ * RISCV_IOMMU_IOHPMCYCLES_OVF would be 0. Here we take the
+ * CY_OF bit state from RISCV_IOMMU_REG_IOHPMCYCLES register as
+ * it's not dependent over the timer callback and is computed
+ * from cycle overflow.
+ */
+ val = ldq_le_p(&s->regs_rw[addr]);
+ val |= (riscv_iommu_hpmcycle_read(s) & RISCV_IOMMU_IOHPMCYCLES_OVF)
+ ? RISCV_IOMMU_IOCOUNTOVF_CY
+ : 0;
+ ptr = (uint8_t *)&val + (addr & 3);
+ } else {
+ ptr = &s->regs_rw[addr];
+ }
+
+ val = ldn_le_p(ptr, size);
+
+ *data = val;
+
+ return MEMTX_OK;
+}
+
+static const MemoryRegionOps riscv_iommu_mmio_ops = {
+ .read_with_attrs = riscv_iommu_mmio_read,
+ .write_with_attrs = riscv_iommu_mmio_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ .unaligned = false,
+ },
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ }
+};
+
+/*
+ * Translations matching MSI pattern check are redirected to "riscv-iommu-trap"
+ * memory region as untranslated address, for additional MSI/MRIF interception
+ * by IOMMU interrupt remapping implementation.
+ * Note: Device emulation code generating an MSI is expected to provide a valid
+ * memory transaction attributes with requested_id set.
+ */
+static MemTxResult riscv_iommu_trap_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size, MemTxAttrs attrs)
+{
+ RISCVIOMMUState* s = (RISCVIOMMUState *)opaque;
+ RISCVIOMMUContext *ctx;
+ MemTxResult res;
+ void *ref;
+ uint32_t devid = attrs.requester_id;
+
+ if (attrs.unspecified) {
+ return MEMTX_ACCESS_ERROR;
+ }
+
+ /* FIXME: PCIe bus remapping for attached endpoints. */
+ devid |= s->bus << 8;
+
+ ctx = riscv_iommu_ctx(s, devid, 0, &ref);
+ if (ctx == NULL) {
+ res = MEMTX_ACCESS_ERROR;
+ } else {
+ res = riscv_iommu_msi_write(s, ctx, addr, data, size, attrs);
+ }
+ riscv_iommu_ctx_put(s, ref);
+ return res;
+}
+
+static MemTxResult riscv_iommu_trap_read(void *opaque, hwaddr addr,
+ uint64_t *data, unsigned size, MemTxAttrs attrs)
+{
+ return MEMTX_ACCESS_ERROR;
+}
+
+static const MemoryRegionOps riscv_iommu_trap_ops = {
+ .read_with_attrs = riscv_iommu_trap_read,
+ .write_with_attrs = riscv_iommu_trap_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ .unaligned = true,
+ },
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ }
+};
+
+void riscv_iommu_set_cap_igs(RISCVIOMMUState *s, riscv_iommu_igs_mode mode)
+{
+ s->cap = set_field(s->cap, RISCV_IOMMU_CAP_IGS, mode);
+}
+
+static void riscv_iommu_instance_init(Object *obj)
+{
+ RISCVIOMMUState *s = RISCV_IOMMU(obj);
+
+ /* Enable translation debug interface */
+ s->cap = RISCV_IOMMU_CAP_DBG;
+
+ /* Report QEMU target physical address space limits */
+ s->cap = set_field(s->cap, RISCV_IOMMU_CAP_PAS,
+ TARGET_PHYS_ADDR_SPACE_BITS);
+
+ /* TODO: method to report supported PID bits */
+ s->pid_bits = 8; /* restricted to size of MemTxAttrs.pid */
+ s->cap |= RISCV_IOMMU_CAP_PD8;
+
+ /* register storage */
+ s->regs_rw = g_new0(uint8_t, RISCV_IOMMU_REG_SIZE);
+ s->regs_ro = g_new0(uint8_t, RISCV_IOMMU_REG_SIZE);
+ s->regs_wc = g_new0(uint8_t, RISCV_IOMMU_REG_SIZE);
+
+ /* Mark all registers read-only */
+ memset(s->regs_ro, 0xff, RISCV_IOMMU_REG_SIZE);
+
+ /* Device translation context cache */
+ s->ctx_cache = g_hash_table_new_full(riscv_iommu_ctx_hash,
+ riscv_iommu_ctx_equal,
+ g_free, NULL);
+
+ s->iot_cache = g_hash_table_new_full(riscv_iommu_iot_hash,
+ riscv_iommu_iot_equal,
+ g_free, NULL);
+
+ s->iommus.le_next = NULL;
+ s->iommus.le_prev = NULL;
+ QLIST_INIT(&s->spaces);
+}
+
+static void riscv_iommu_realize(DeviceState *dev, Error **errp)
+{
+ RISCVIOMMUState *s = RISCV_IOMMU(dev);
+
+ s->cap |= s->version & RISCV_IOMMU_CAP_VERSION;
+ if (s->enable_msi) {
+ s->cap |= RISCV_IOMMU_CAP_MSI_FLAT | RISCV_IOMMU_CAP_MSI_MRIF;
+ }
+ if (s->enable_ats) {
+ s->cap |= RISCV_IOMMU_CAP_ATS;
+ }
+ if (s->enable_s_stage) {
+ s->cap |= RISCV_IOMMU_CAP_SV32 | RISCV_IOMMU_CAP_SV39 |
+ RISCV_IOMMU_CAP_SV48 | RISCV_IOMMU_CAP_SV57;
+ }
+ if (s->enable_g_stage) {
+ s->cap |= RISCV_IOMMU_CAP_SV32X4 | RISCV_IOMMU_CAP_SV39X4 |
+ RISCV_IOMMU_CAP_SV48X4 | RISCV_IOMMU_CAP_SV57X4;
+ }
+
+ if (s->hpm_cntrs > 0) {
+ /* Clip number of HPM counters to maximum supported (31). */
+ if (s->hpm_cntrs > RISCV_IOMMU_IOCOUNT_NUM) {
+ s->hpm_cntrs = RISCV_IOMMU_IOCOUNT_NUM;
+ }
+ /* Enable hardware performance monitor interface */
+ s->cap |= RISCV_IOMMU_CAP_HPM;
+ }
+
+ /* Out-of-reset translation mode: OFF (DMA disabled) BARE (passthrough) */
+ s->ddtp = set_field(0, RISCV_IOMMU_DDTP_MODE, s->enable_off ?
+ RISCV_IOMMU_DDTP_MODE_OFF : RISCV_IOMMU_DDTP_MODE_BARE);
+
+ /*
+ * Register complete MMIO space, including MSI/PBA registers.
+ * Note, PCIDevice implementation will add overlapping MR for MSI/PBA,
+ * managed directly by the PCIDevice implementation.
+ */
+ memory_region_init_io(&s->regs_mr, OBJECT(dev), &riscv_iommu_mmio_ops, s,
+ "riscv-iommu-regs", RISCV_IOMMU_REG_SIZE);
+
+ /* Set power-on register state */
+ stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_CAP], s->cap);
+ stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_FCTL], 0);
+ stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_FCTL],
+ ~(RISCV_IOMMU_FCTL_BE | RISCV_IOMMU_FCTL_WSI));
+ stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_DDTP],
+ ~(RISCV_IOMMU_DDTP_PPN | RISCV_IOMMU_DDTP_MODE));
+ stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_CQB],
+ ~(RISCV_IOMMU_CQB_LOG2SZ | RISCV_IOMMU_CQB_PPN));
+ stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_FQB],
+ ~(RISCV_IOMMU_FQB_LOG2SZ | RISCV_IOMMU_FQB_PPN));
+ stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_PQB],
+ ~(RISCV_IOMMU_PQB_LOG2SZ | RISCV_IOMMU_PQB_PPN));
+ stl_le_p(&s->regs_wc[RISCV_IOMMU_REG_CQCSR], RISCV_IOMMU_CQCSR_CQMF |
+ RISCV_IOMMU_CQCSR_CMD_TO | RISCV_IOMMU_CQCSR_CMD_ILL);
+ stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_CQCSR], RISCV_IOMMU_CQCSR_CQON |
+ RISCV_IOMMU_CQCSR_BUSY);
+ stl_le_p(&s->regs_wc[RISCV_IOMMU_REG_FQCSR], RISCV_IOMMU_FQCSR_FQMF |
+ RISCV_IOMMU_FQCSR_FQOF);
+ stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_FQCSR], RISCV_IOMMU_FQCSR_FQON |
+ RISCV_IOMMU_FQCSR_BUSY);
+ stl_le_p(&s->regs_wc[RISCV_IOMMU_REG_PQCSR], RISCV_IOMMU_PQCSR_PQMF |
+ RISCV_IOMMU_PQCSR_PQOF);
+ stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_PQCSR], RISCV_IOMMU_PQCSR_PQON |
+ RISCV_IOMMU_PQCSR_BUSY);
+ stl_le_p(&s->regs_wc[RISCV_IOMMU_REG_IPSR], ~0);
+ stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_ICVEC], 0);
+ stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_DDTP], s->ddtp);
+ /* If debug registers enabled. */
+ if (s->cap & RISCV_IOMMU_CAP_DBG) {
+ stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_TR_REQ_IOVA], 0);
+ stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_TR_REQ_CTL],
+ RISCV_IOMMU_TR_REQ_CTL_GO_BUSY);
+ }
+
+ /* If HPM registers are enabled. */
+ if (s->cap & RISCV_IOMMU_CAP_HPM) {
+ /* +1 for cycle counter bit. */
+ stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_IOCOUNTINH],
+ ~((2 << s->hpm_cntrs) - 1));
+ stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_IOHPMCYCLES], 0);
+ memset(&s->regs_ro[RISCV_IOMMU_REG_IOHPMCTR_BASE],
+ 0x00, s->hpm_cntrs * 8);
+ memset(&s->regs_ro[RISCV_IOMMU_REG_IOHPMEVT_BASE],
+ 0x00, s->hpm_cntrs * 8);
+ }
+
+ /* Memory region for downstream access, if specified. */
+ if (s->target_mr) {
+ s->target_as = g_new0(AddressSpace, 1);
+ address_space_init(s->target_as, s->target_mr,
+ "riscv-iommu-downstream");
+ } else {
+ /* Fallback to global system memory. */
+ s->target_as = &address_space_memory;
+ }
+
+ /* Memory region for untranslated MRIF/MSI writes */
+ memory_region_init_io(&s->trap_mr, OBJECT(dev), &riscv_iommu_trap_ops, s,
+ "riscv-iommu-trap", ~0ULL);
+ address_space_init(&s->trap_as, &s->trap_mr, "riscv-iommu-trap-as");
+
+ if (s->cap & RISCV_IOMMU_CAP_HPM) {
+ s->hpm_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, riscv_iommu_hpm_timer_cb, s);
+ s->hpm_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
+ }
+}
+
+static void riscv_iommu_unrealize(DeviceState *dev)
+{
+ RISCVIOMMUState *s = RISCV_IOMMU(dev);
+
+ g_hash_table_unref(s->iot_cache);
+ g_hash_table_unref(s->ctx_cache);
+
+ if (s->cap & RISCV_IOMMU_CAP_HPM) {
+ g_hash_table_unref(s->hpm_event_ctr_map);
+ timer_free(s->hpm_timer);
+ }
+}
+
+void riscv_iommu_reset(RISCVIOMMUState *s)
+{
+ uint32_t reg_clr;
+ int ddtp_mode;
+
+ /*
+ * Clear DDTP while setting DDTP_mode back to user
+ * initial setting.
+ */
+ ddtp_mode = s->enable_off ?
+ RISCV_IOMMU_DDTP_MODE_OFF : RISCV_IOMMU_DDTP_MODE_BARE;
+ s->ddtp = set_field(0, RISCV_IOMMU_DDTP_MODE, ddtp_mode);
+ riscv_iommu_reg_set64(s, RISCV_IOMMU_REG_DDTP, s->ddtp);
+
+ reg_clr = RISCV_IOMMU_CQCSR_CQEN | RISCV_IOMMU_CQCSR_CIE |
+ RISCV_IOMMU_CQCSR_CQON | RISCV_IOMMU_CQCSR_BUSY;
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_CQCSR, 0, reg_clr);
+
+ reg_clr = RISCV_IOMMU_FQCSR_FQEN | RISCV_IOMMU_FQCSR_FIE |
+ RISCV_IOMMU_FQCSR_FQON | RISCV_IOMMU_FQCSR_BUSY;
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_FQCSR, 0, reg_clr);
+
+ reg_clr = RISCV_IOMMU_PQCSR_PQEN | RISCV_IOMMU_PQCSR_PIE |
+ RISCV_IOMMU_PQCSR_PQON | RISCV_IOMMU_PQCSR_BUSY;
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_PQCSR, 0, reg_clr);
+
+ riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_TR_REQ_CTL, 0,
+ RISCV_IOMMU_TR_REQ_CTL_GO_BUSY);
+
+ riscv_iommu_reg_set32(s, RISCV_IOMMU_REG_IPSR, 0);
+
+ g_hash_table_remove_all(s->ctx_cache);
+ g_hash_table_remove_all(s->iot_cache);
+}
+
+static const Property riscv_iommu_properties[] = {
+ DEFINE_PROP_UINT32("version", RISCVIOMMUState, version,
+ RISCV_IOMMU_SPEC_DOT_VER),
+ DEFINE_PROP_UINT32("bus", RISCVIOMMUState, bus, 0x0),
+ DEFINE_PROP_UINT32("ioatc-limit", RISCVIOMMUState, iot_limit,
+ LIMIT_CACHE_IOT),
+ DEFINE_PROP_BOOL("intremap", RISCVIOMMUState, enable_msi, TRUE),
+ DEFINE_PROP_BOOL("ats", RISCVIOMMUState, enable_ats, TRUE),
+ DEFINE_PROP_BOOL("off", RISCVIOMMUState, enable_off, TRUE),
+ DEFINE_PROP_BOOL("s-stage", RISCVIOMMUState, enable_s_stage, TRUE),
+ DEFINE_PROP_BOOL("g-stage", RISCVIOMMUState, enable_g_stage, TRUE),
+ DEFINE_PROP_LINK("downstream-mr", RISCVIOMMUState, target_mr,
+ TYPE_MEMORY_REGION, MemoryRegion *),
+ DEFINE_PROP_UINT8("hpm-counters", RISCVIOMMUState, hpm_cntrs,
+ RISCV_IOMMU_IOCOUNT_NUM),
+};
+
+static void riscv_iommu_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ /* internal device for riscv-iommu-{pci/sys}, not user-creatable */
+ dc->user_creatable = false;
+ dc->realize = riscv_iommu_realize;
+ dc->unrealize = riscv_iommu_unrealize;
+ device_class_set_props(dc, riscv_iommu_properties);
+}
+
+static const TypeInfo riscv_iommu_info = {
+ .name = TYPE_RISCV_IOMMU,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(RISCVIOMMUState),
+ .instance_init = riscv_iommu_instance_init,
+ .class_init = riscv_iommu_class_init,
+};
+
+static const char *IOMMU_FLAG_STR[] = {
+ "NA",
+ "RO",
+ "WR",
+ "RW",
+};
+
+/* RISC-V IOMMU Memory Region - Address Translation Space */
+static IOMMUTLBEntry riscv_iommu_memory_region_translate(
+ IOMMUMemoryRegion *iommu_mr, hwaddr addr,
+ IOMMUAccessFlags flag, int iommu_idx)
+{
+ RISCVIOMMUSpace *as = container_of(iommu_mr, RISCVIOMMUSpace, iova_mr);
+ RISCVIOMMUContext *ctx;
+ void *ref;
+ IOMMUTLBEntry iotlb = {
+ .iova = addr,
+ .target_as = as->iommu->target_as,
+ .addr_mask = ~0ULL,
+ .perm = flag,
+ };
+
+ ctx = riscv_iommu_ctx(as->iommu, as->devid, iommu_idx, &ref);
+ if (ctx == NULL) {
+ /* Translation disabled or invalid. */
+ iotlb.addr_mask = 0;
+ iotlb.perm = IOMMU_NONE;
+ } else if (riscv_iommu_translate(as->iommu, ctx, &iotlb, true)) {
+ /* Translation disabled or fault reported. */
+ iotlb.addr_mask = 0;
+ iotlb.perm = IOMMU_NONE;
+ }
+
+ /* Trace all dma translations with original access flags. */
+ trace_riscv_iommu_dma(as->iommu->parent_obj.id, PCI_BUS_NUM(as->devid),
+ PCI_SLOT(as->devid), PCI_FUNC(as->devid), iommu_idx,
+ IOMMU_FLAG_STR[flag & IOMMU_RW], iotlb.iova,
+ iotlb.translated_addr);
+
+ riscv_iommu_ctx_put(as->iommu, ref);
+
+ return iotlb;
+}
+
+static int riscv_iommu_memory_region_notify(
+ IOMMUMemoryRegion *iommu_mr, IOMMUNotifierFlag old,
+ IOMMUNotifierFlag new, Error **errp)
+{
+ RISCVIOMMUSpace *as = container_of(iommu_mr, RISCVIOMMUSpace, iova_mr);
+
+ if (old == IOMMU_NOTIFIER_NONE) {
+ as->notifier = true;
+ trace_riscv_iommu_notifier_add(iommu_mr->parent_obj.name);
+ } else if (new == IOMMU_NOTIFIER_NONE) {
+ as->notifier = false;
+ trace_riscv_iommu_notifier_del(iommu_mr->parent_obj.name);
+ }
+
+ return 0;
+}
+
+static inline bool pci_is_iommu(PCIDevice *pdev)
+{
+ return pci_get_word(pdev->config + PCI_CLASS_DEVICE) == 0x0806;
+}
+
+static AddressSpace *riscv_iommu_find_as(PCIBus *bus, void *opaque, int devfn)
+{
+ RISCVIOMMUState *s = (RISCVIOMMUState *) opaque;
+ PCIDevice *pdev = pci_find_device(bus, pci_bus_num(bus), devfn);
+ AddressSpace *as = NULL;
+
+ if (pdev && pci_is_iommu(pdev)) {
+ return s->target_as;
+ }
+
+ /* Find first registered IOMMU device */
+ while (s->iommus.le_prev) {
+ s = *(s->iommus.le_prev);
+ }
+
+ /* Find first matching IOMMU */
+ while (s != NULL && as == NULL) {
+ as = riscv_iommu_space(s, PCI_BUILD_BDF(pci_bus_num(bus), devfn));
+ s = s->iommus.le_next;
+ }
+
+ return as ? as : &address_space_memory;
+}
+
+static const PCIIOMMUOps riscv_iommu_ops = {
+ .get_address_space = riscv_iommu_find_as,
+};
+
+void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus,
+ Error **errp)
+{
+ if (bus->iommu_ops &&
+ bus->iommu_ops->get_address_space == riscv_iommu_find_as) {
+ /* Allow multiple IOMMUs on the same PCIe bus, link known devices */
+ RISCVIOMMUState *last = (RISCVIOMMUState *)bus->iommu_opaque;
+ QLIST_INSERT_AFTER(last, iommu, iommus);
+ } else if (!bus->iommu_ops && !bus->iommu_opaque) {
+ pci_setup_iommu(bus, &riscv_iommu_ops, iommu);
+ } else {
+ error_setg(errp, "can't register secondary IOMMU for PCI bus #%d",
+ pci_bus_num(bus));
+ }
+}
+
+static int riscv_iommu_memory_region_index(IOMMUMemoryRegion *iommu_mr,
+ MemTxAttrs attrs)
+{
+ return attrs.unspecified ? RISCV_IOMMU_NOPROCID : (int)attrs.pid;
+}
+
+static int riscv_iommu_memory_region_index_len(IOMMUMemoryRegion *iommu_mr)
+{
+ RISCVIOMMUSpace *as = container_of(iommu_mr, RISCVIOMMUSpace, iova_mr);
+ return 1 << as->iommu->pid_bits;
+}
+
+static void riscv_iommu_memory_region_init(ObjectClass *klass, const void *data)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
+
+ imrc->translate = riscv_iommu_memory_region_translate;
+ imrc->notify_flag_changed = riscv_iommu_memory_region_notify;
+ imrc->attrs_to_index = riscv_iommu_memory_region_index;
+ imrc->num_indexes = riscv_iommu_memory_region_index_len;
+}
+
+static const TypeInfo riscv_iommu_memory_region_info = {
+ .parent = TYPE_IOMMU_MEMORY_REGION,
+ .name = TYPE_RISCV_IOMMU_MEMORY_REGION,
+ .class_init = riscv_iommu_memory_region_init,
+};
+
+static void riscv_iommu_register_mr_types(void)
+{
+ type_register_static(&riscv_iommu_memory_region_info);
+ type_register_static(&riscv_iommu_info);
+}
+
+type_init(riscv_iommu_register_mr_types);
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
new file mode 100644
index 0000000..a31aa62
--- /dev/null
+++ b/hw/riscv/riscv-iommu.h
@@ -0,0 +1,157 @@
+/*
+ * QEMU emulation of an RISC-V IOMMU
+ *
+ * Copyright (C) 2022-2023 Rivos Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_RISCV_IOMMU_STATE_H
+#define HW_RISCV_IOMMU_STATE_H
+
+#include "qom/object.h"
+#include "hw/qdev-properties.h"
+#include "system/dma.h"
+#include "hw/riscv/iommu.h"
+#include "hw/riscv/riscv-iommu-bits.h"
+
+typedef enum riscv_iommu_igs_modes riscv_iommu_igs_mode;
+
+struct RISCVIOMMUState {
+ /*< private >*/
+ DeviceState parent_obj;
+
+ /*< public >*/
+ uint32_t version; /* Reported interface version number */
+ uint32_t pid_bits; /* process identifier width */
+ uint32_t bus; /* PCI bus mapping for non-root endpoints */
+
+ uint64_t cap; /* IOMMU supported capabilities */
+ uint64_t fctl; /* IOMMU enabled features */
+ uint64_t icvec_avail_vectors; /* Available interrupt vectors in ICVEC */
+
+ bool enable_off; /* Enable out-of-reset OFF mode (DMA disabled) */
+ bool enable_msi; /* Enable MSI remapping */
+ bool enable_ats; /* Enable ATS support */
+ bool enable_s_stage; /* Enable S/VS-Stage translation */
+ bool enable_g_stage; /* Enable G-Stage translation */
+
+ /* IOMMU Internal State */
+ uint64_t ddtp; /* Validated Device Directory Tree Root Pointer */
+
+ dma_addr_t cq_addr; /* Command queue base physical address */
+ dma_addr_t fq_addr; /* Fault/event queue base physical address */
+ dma_addr_t pq_addr; /* Page request queue base physical address */
+
+ uint32_t cq_mask; /* Command queue index bit mask */
+ uint32_t fq_mask; /* Fault/event queue index bit mask */
+ uint32_t pq_mask; /* Page request queue index bit mask */
+
+ /* interrupt notifier */
+ void (*notify)(RISCVIOMMUState *iommu, unsigned vector);
+
+ /* IOMMU target address space */
+ AddressSpace *target_as;
+ MemoryRegion *target_mr;
+
+ /* MSI / MRIF access trap */
+ AddressSpace trap_as;
+ MemoryRegion trap_mr;
+
+ GHashTable *ctx_cache; /* Device translation Context Cache */
+
+ GHashTable *iot_cache; /* IO Translated Address Cache */
+ unsigned iot_limit; /* IO Translation Cache size limit */
+
+ /* MMIO Hardware Interface */
+ MemoryRegion regs_mr;
+ uint8_t *regs_rw; /* register state (user write) */
+ uint8_t *regs_wc; /* write-1-to-clear mask */
+ uint8_t *regs_ro; /* read-only mask */
+
+ QLIST_ENTRY(RISCVIOMMUState) iommus;
+ QLIST_HEAD(, RISCVIOMMUSpace) spaces;
+
+ /* HPM cycle counter */
+ QEMUTimer *hpm_timer;
+ uint64_t hpmcycle_val; /* Current value of cycle register */
+ uint64_t hpmcycle_prev; /* Saved value of QEMU_CLOCK_VIRTUAL clock */
+ uint64_t irq_overflow_left; /* Value beyond INT64_MAX after overflow */
+
+ /* HPM event counters */
+ GHashTable *hpm_event_ctr_map; /* Mapping of events to counters */
+ uint8_t hpm_cntrs;
+};
+
+void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus,
+ Error **errp);
+void riscv_iommu_set_cap_igs(RISCVIOMMUState *s, riscv_iommu_igs_mode mode);
+void riscv_iommu_reset(RISCVIOMMUState *s);
+void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type);
+
+typedef struct RISCVIOMMUContext RISCVIOMMUContext;
+/* Device translation context state. */
+struct RISCVIOMMUContext {
+ uint64_t devid:24; /* Requester Id, AKA device_id */
+ uint64_t process_id:20; /* Process ID. PASID for PCIe */
+ uint64_t tc; /* Translation Control */
+ uint64_t ta; /* Translation Attributes */
+ uint64_t satp; /* S-Stage address translation and protection */
+ uint64_t gatp; /* G-Stage address translation and protection */
+ uint64_t msi_addr_mask; /* MSI filtering - address mask */
+ uint64_t msi_addr_pattern; /* MSI filtering - address pattern */
+ uint64_t msiptp; /* MSI redirection page table pointer */
+};
+
+/* private helpers */
+
+/* Register helper functions */
+static inline uint32_t riscv_iommu_reg_mod32(RISCVIOMMUState *s,
+ unsigned idx, uint32_t set, uint32_t clr)
+{
+ uint32_t val = ldl_le_p(s->regs_rw + idx);
+ stl_le_p(s->regs_rw + idx, (val & ~clr) | set);
+ return val;
+}
+
+static inline void riscv_iommu_reg_set32(RISCVIOMMUState *s, unsigned idx,
+ uint32_t set)
+{
+ stl_le_p(s->regs_rw + idx, set);
+}
+
+static inline uint32_t riscv_iommu_reg_get32(RISCVIOMMUState *s, unsigned idx)
+{
+ return ldl_le_p(s->regs_rw + idx);
+}
+
+static inline uint64_t riscv_iommu_reg_mod64(RISCVIOMMUState *s, unsigned idx,
+ uint64_t set, uint64_t clr)
+{
+ uint64_t val = ldq_le_p(s->regs_rw + idx);
+ stq_le_p(s->regs_rw + idx, (val & ~clr) | set);
+ return val;
+}
+
+static inline void riscv_iommu_reg_set64(RISCVIOMMUState *s, unsigned idx,
+ uint64_t set)
+{
+ stq_le_p(s->regs_rw + idx, set);
+}
+
+static inline uint64_t riscv_iommu_reg_get64(RISCVIOMMUState *s,
+ unsigned idx)
+{
+ return ldq_le_p(s->regs_rw + idx);
+}
+#endif
diff --git a/hw/riscv/riscv_hart.c b/hw/riscv/riscv_hart.c
index 613ea2a..7f26760 100644
--- a/hw/riscv/riscv_hart.c
+++ b/hw/riscv/riscv_hart.c
@@ -21,19 +21,38 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/module.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
+#include "system/qtest.h"
+#include "qemu/cutils.h"
#include "hw/sysbus.h"
#include "target/riscv/cpu.h"
#include "hw/qdev-properties.h"
#include "hw/riscv/riscv_hart.h"
+#include "qemu/error-report.h"
-static Property riscv_harts_props[] = {
+static const Property riscv_harts_props[] = {
DEFINE_PROP_UINT32("num-harts", RISCVHartArrayState, num_harts, 1),
DEFINE_PROP_UINT32("hartid-base", RISCVHartArrayState, hartid_base, 0),
DEFINE_PROP_STRING("cpu-type", RISCVHartArrayState, cpu_type),
DEFINE_PROP_UINT64("resetvec", RISCVHartArrayState, resetvec,
DEFAULT_RSTVEC),
- DEFINE_PROP_END_OF_LIST(),
+
+ /*
+ * Smrnmi implementation-defined interrupt and exception trap handlers.
+ *
+ * When an RNMI interrupt is detected, the hart then enters M-mode and
+ * jumps to the address defined by "rnmi-interrupt-vector".
+ *
+ * When the hart encounters an exception while executing in M-mode with
+ * the mnstatus.NMIE bit clear, the hart then jumps to the address
+ * defined by "rnmi-exception-vector".
+ */
+ DEFINE_PROP_ARRAY("rnmi-interrupt-vector", RISCVHartArrayState,
+ num_rnmi_irqvec, rnmi_irqvec, qdev_prop_uint64,
+ uint64_t),
+ DEFINE_PROP_ARRAY("rnmi-exception-vector", RISCVHartArrayState,
+ num_rnmi_excpvec, rnmi_excpvec, qdev_prop_uint64,
+ uint64_t),
};
static void riscv_harts_cpu_reset(void *opaque)
@@ -42,11 +61,85 @@ static void riscv_harts_cpu_reset(void *opaque)
cpu_reset(CPU(cpu));
}
+#ifndef CONFIG_USER_ONLY
+static void csr_call(char *cmd, uint64_t cpu_num, int csrno, uint64_t *val)
+{
+ RISCVCPU *cpu = RISCV_CPU(cpu_by_arch_id(cpu_num));
+ CPURISCVState *env = &cpu->env;
+
+ int ret = RISCV_EXCP_NONE;
+ if (strcmp(cmd, "get_csr") == 0) {
+ ret = riscv_csrr(env, csrno, (target_ulong *)val);
+ } else if (strcmp(cmd, "set_csr") == 0) {
+ ret = riscv_csrrw(env, csrno, NULL, *(target_ulong *)val,
+ MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0);
+ }
+
+ g_assert(ret == RISCV_EXCP_NONE);
+}
+
+static bool csr_qtest_callback(CharBackend *chr, gchar **words)
+{
+ if (strcmp(words[0], "csr") == 0) {
+
+ uint64_t cpu;
+ uint64_t val;
+ int rc, csr;
+
+ rc = qemu_strtou64(words[2], NULL, 0, &cpu);
+ g_assert(rc == 0);
+ rc = qemu_strtoi(words[3], NULL, 0, &csr);
+ g_assert(rc == 0);
+ rc = qemu_strtou64(words[4], NULL, 0, &val);
+ g_assert(rc == 0);
+ csr_call(words[1], cpu, csr, &val);
+
+ qtest_sendf(chr, "OK 0 "TARGET_FMT_lx"\n", (target_ulong)val);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void riscv_cpu_register_csr_qtest_callback(void)
+{
+ static bool first = true;
+ if (first) {
+ first = false;
+ qtest_set_command_cb(csr_qtest_callback);
+ }
+}
+#endif
+
static bool riscv_hart_realize(RISCVHartArrayState *s, int idx,
char *cpu_type, Error **errp)
{
object_initialize_child(OBJECT(s), "harts[*]", &s->harts[idx], cpu_type);
qdev_prop_set_uint64(DEVICE(&s->harts[idx]), "resetvec", s->resetvec);
+
+ if (s->harts[idx].cfg.ext_smrnmi) {
+ if (idx < s->num_rnmi_irqvec) {
+ qdev_prop_set_uint64(DEVICE(&s->harts[idx]),
+ "rnmi-interrupt-vector", s->rnmi_irqvec[idx]);
+ }
+
+ if (idx < s->num_rnmi_excpvec) {
+ qdev_prop_set_uint64(DEVICE(&s->harts[idx]),
+ "rnmi-exception-vector", s->rnmi_excpvec[idx]);
+ }
+ } else {
+ if (s->num_rnmi_irqvec > 0) {
+ warn_report_once("rnmi-interrupt-vector property is ignored "
+ "because Smrnmi extension is not enabled.");
+ }
+
+ if (s->num_rnmi_excpvec > 0) {
+ warn_report_once("rnmi-exception-vector property is ignored "
+ "because Smrnmi extension is not enabled.");
+ }
+ }
+
s->harts[idx].env.mhartid = s->hartid_base + idx;
qemu_register_reset(riscv_harts_cpu_reset, &s->harts[idx]);
return qdev_realize(DEVICE(&s->harts[idx]), NULL, errp);
@@ -59,6 +152,10 @@ static void riscv_harts_realize(DeviceState *dev, Error **errp)
s->harts = g_new0(RISCVCPU, s->num_harts);
+#ifndef CONFIG_USER_ONLY
+ riscv_cpu_register_csr_qtest_callback();
+#endif
+
for (n = 0; n < s->num_harts; n++) {
if (!riscv_hart_realize(s, n, s->cpu_type, errp)) {
return;
@@ -66,7 +163,7 @@ static void riscv_harts_realize(DeviceState *dev, Error **errp)
}
}
-static void riscv_harts_class_init(ObjectClass *klass, void *data)
+static void riscv_harts_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/riscv/shakti_c.c b/hw/riscv/shakti_c.c
index 3888034..3e7f441 100644
--- a/hw/riscv/shakti_c.c
+++ b/hw/riscv/shakti_c.c
@@ -23,9 +23,9 @@
#include "qemu/error-report.h"
#include "hw/intc/sifive_plic.h"
#include "hw/intc/riscv_aclint.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/qdev-properties.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/riscv/boot.h"
static const struct MemmapEntry {
@@ -45,6 +45,7 @@ static void shakti_c_machine_state_init(MachineState *mstate)
{
ShaktiCMachineState *sms = RISCV_SHAKTI_MACHINE(mstate);
MemoryRegion *system_memory = get_system_memory();
+ hwaddr firmware_load_addr = shakti_c_memmap[SHAKTI_C_RAM].base;
/* Initialize SoC */
object_initialize_child(OBJECT(mstate), "soc", &sms->soc,
@@ -56,23 +57,21 @@ static void shakti_c_machine_state_init(MachineState *mstate)
shakti_c_memmap[SHAKTI_C_RAM].base,
mstate->ram);
+ if (mstate->firmware) {
+ riscv_load_firmware(mstate->firmware, &firmware_load_addr, NULL);
+ }
+
/* ROM reset vector */
- riscv_setup_rom_reset_vec(mstate, &sms->soc.cpus,
- shakti_c_memmap[SHAKTI_C_RAM].base,
+ riscv_setup_rom_reset_vec(mstate, &sms->soc.cpus, firmware_load_addr,
shakti_c_memmap[SHAKTI_C_ROM].base,
shakti_c_memmap[SHAKTI_C_ROM].size, 0, 0);
- if (mstate->firmware) {
- riscv_load_firmware(mstate->firmware,
- shakti_c_memmap[SHAKTI_C_RAM].base,
- NULL);
- }
}
static void shakti_c_machine_instance_init(Object *obj)
{
}
-static void shakti_c_machine_class_init(ObjectClass *klass, void *data)
+static void shakti_c_machine_class_init(ObjectClass *klass, const void *data)
{
MachineClass *mc = MACHINE_CLASS(klass);
static const char * const valid_cpu_types[] = {
@@ -143,7 +142,7 @@ static void shakti_c_soc_state_realize(DeviceState *dev, Error **errp)
shakti_c_memmap[SHAKTI_C_ROM].base, &sss->rom);
}
-static void shakti_c_soc_class_init(ObjectClass *klass, void *data)
+static void shakti_c_soc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = shakti_c_soc_state_realize;
diff --git a/hw/riscv/sifive_e.c b/hw/riscv/sifive_e.c
index 87d9602..7baed19 100644
--- a/hw/riscv/sifive_e.c
+++ b/hw/riscv/sifive_e.c
@@ -35,7 +35,6 @@
#include "hw/boards.h"
#include "hw/loader.h"
#include "hw/sysbus.h"
-#include "hw/char/serial.h"
#include "hw/misc/unimp.h"
#include "target/riscv/cpu.h"
#include "hw/riscv/riscv_hart.h"
@@ -47,7 +46,7 @@
#include "hw/misc/sifive_e_prci.h"
#include "hw/misc/sifive_e_aon.h"
#include "chardev/char.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
static const MemMapEntry sifive_e_memmap[] = {
[SIFIVE_E_DEV_DEBUG] = { 0x0, 0x1000 },
@@ -79,6 +78,7 @@ static void sifive_e_machine_init(MachineState *machine)
SiFiveEState *s = RISCV_E_MACHINE(machine);
MemoryRegion *sys_mem = get_system_memory();
int i;
+ RISCVBootInfo boot_info;
if (machine->ram_size != mc->default_ram_size) {
char *sz = size_to_str(mc->default_ram_size);
@@ -114,8 +114,9 @@ static void sifive_e_machine_init(MachineState *machine)
rom_add_blob_fixed_as("mrom.reset", reset_vec, sizeof(reset_vec),
memmap[SIFIVE_E_DEV_MROM].base, &address_space_memory);
+ riscv_boot_info_init(&boot_info, &s->soc.cpus);
if (machine->kernel_filename) {
- riscv_load_kernel(machine, &s->soc.cpus,
+ riscv_load_kernel(machine, &boot_info,
memmap[SIFIVE_E_DEV_DTIM].base,
false, NULL);
}
@@ -142,7 +143,7 @@ static void sifive_e_machine_instance_init(Object *obj)
s->revb = false;
}
-static void sifive_e_machine_class_init(ObjectClass *oc, void *data)
+static void sifive_e_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -283,7 +284,7 @@ static void sifive_e_soc_realize(DeviceState *dev, Error **errp)
&s->xip_mem);
}
-static void sifive_e_soc_class_init(ObjectClass *oc, void *data)
+static void sifive_e_soc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c
index af5f923..d69f942 100644
--- a/hw/riscv/sifive_u.c
+++ b/hw/riscv/sifive_u.c
@@ -43,7 +43,6 @@
#include "hw/irq.h"
#include "hw/loader.h"
#include "hw/sysbus.h"
-#include "hw/char/serial.h"
#include "hw/cpu/cluster.h"
#include "hw/misc/unimp.h"
#include "hw/sd/sd.h"
@@ -57,9 +56,9 @@
#include "hw/intc/sifive_plic.h"
#include "chardev/char.h"
#include "net/eth.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/device_tree.h"
+#include "system/runstate.h"
+#include "system/system.h"
#include <libfdt.h>
@@ -515,17 +514,19 @@ static void sifive_u_machine_init(MachineState *machine)
SiFiveUState *s = RISCV_U_MACHINE(machine);
MemoryRegion *system_memory = get_system_memory();
MemoryRegion *flash0 = g_new(MemoryRegion, 1);
- target_ulong start_addr = memmap[SIFIVE_U_DEV_DRAM].base;
+ hwaddr start_addr = memmap[SIFIVE_U_DEV_DRAM].base;
target_ulong firmware_end_addr, kernel_start_addr;
const char *firmware_name;
uint32_t start_addr_hi32 = 0x00000000;
+ uint32_t fdt_load_addr_hi32 = 0x00000000;
int i;
- uint32_t fdt_load_addr;
+ uint64_t fdt_load_addr;
uint64_t kernel_entry;
DriveInfo *dinfo;
BlockBackend *blk;
DeviceState *flash_dev, *sd_dev, *card_dev;
qemu_irq flash_cs, sd_cs;
+ RISCVBootInfo boot_info;
/* Initialize SoC */
object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_RISCV_U_SOC);
@@ -589,14 +590,15 @@ static void sifive_u_machine_init(MachineState *machine)
firmware_name = riscv_default_firmware_name(&s->soc.u_cpus);
firmware_end_addr = riscv_find_and_load_firmware(machine, firmware_name,
- start_addr, NULL);
+ &start_addr, NULL);
+ riscv_boot_info_init(&boot_info, &s->soc.u_cpus);
if (machine->kernel_filename) {
- kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus,
+ kernel_start_addr = riscv_calc_kernel_start_addr(&boot_info,
firmware_end_addr);
-
- kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus,
- kernel_start_addr, true, NULL);
+ riscv_load_kernel(machine, &boot_info, kernel_start_addr,
+ true, NULL);
+ kernel_entry = boot_info.image_low_addr;
} else {
/*
* If dynamic firmware is used, it doesn't know where is the next mode
@@ -607,11 +609,12 @@ static void sifive_u_machine_init(MachineState *machine)
fdt_load_addr = riscv_compute_fdt_addr(memmap[SIFIVE_U_DEV_DRAM].base,
memmap[SIFIVE_U_DEV_DRAM].size,
- machine);
+ machine, &boot_info);
riscv_load_fdt(fdt_load_addr, machine->fdt);
if (!riscv_is_32bit(&s->soc.u_cpus)) {
start_addr_hi32 = (uint64_t)start_addr >> 32;
+ fdt_load_addr_hi32 = fdt_load_addr >> 32;
}
/* reset vector */
@@ -626,7 +629,7 @@ static void sifive_u_machine_init(MachineState *machine)
start_addr, /* start: .dword */
start_addr_hi32,
fdt_load_addr, /* fdt_laddr: .dword */
- 0x00000000,
+ fdt_load_addr_hi32,
0x00000000,
/* fw_dyn: */
};
@@ -646,7 +649,8 @@ static void sifive_u_machine_init(MachineState *machine)
rom_add_blob_fixed_as("mrom.reset", reset_vec, sizeof(reset_vec),
memmap[SIFIVE_U_DEV_MROM].base, &address_space_memory);
- riscv_rom_copy_firmware_info(machine, memmap[SIFIVE_U_DEV_MROM].base,
+ riscv_rom_copy_firmware_info(machine, &s->soc.u_cpus,
+ memmap[SIFIVE_U_DEV_MROM].base,
memmap[SIFIVE_U_DEV_MROM].size,
sizeof(reset_vec), kernel_entry);
@@ -709,7 +713,7 @@ static void sifive_u_machine_instance_init(Object *obj)
object_property_set_description(obj, "serial", "Board serial number");
}
-static void sifive_u_machine_class_init(ObjectClass *oc, void *data)
+static void sifive_u_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -720,6 +724,7 @@ static void sifive_u_machine_class_init(ObjectClass *oc, void *data)
mc->default_cpu_type = SIFIVE_U_CPU;
mc->default_cpus = mc->min_cpus;
mc->default_ram_id = "riscv.sifive.u.ram";
+ mc->auto_create_sdcard = true;
object_class_property_add_bool(oc, "start-in-flash",
sifive_u_machine_get_start_in_flash,
@@ -936,13 +941,12 @@ static void sifive_u_soc_realize(DeviceState *dev, Error **errp)
qdev_get_gpio_in(DEVICE(s->plic), SIFIVE_U_QSPI2_IRQ));
}
-static Property sifive_u_soc_props[] = {
+static const Property sifive_u_soc_props[] = {
DEFINE_PROP_UINT32("serial", SiFiveUSoCState, serial, OTP_SERIAL),
DEFINE_PROP_STRING("cpu-type", SiFiveUSoCState, cpu_type),
- DEFINE_PROP_END_OF_LIST()
};
-static void sifive_u_soc_class_init(ObjectClass *oc, void *data)
+static void sifive_u_soc_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
index 6407439..641aae8 100644
--- a/hw/riscv/spike.c
+++ b/hw/riscv/spike.c
@@ -36,8 +36,8 @@
#include "hw/char/riscv_htif.h"
#include "hw/intc/riscv_aclint.h"
#include "chardev/char.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/sysemu.h"
+#include "system/device_tree.h"
+#include "system/system.h"
#include <libfdt.h>
@@ -198,13 +198,15 @@ static void spike_board_init(MachineState *machine)
MemoryRegion *system_memory = get_system_memory();
MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
target_ulong firmware_end_addr = memmap[SPIKE_DRAM].base;
+ hwaddr firmware_load_addr = memmap[SPIKE_DRAM].base;
target_ulong kernel_start_addr;
char *firmware_name;
- uint32_t fdt_load_addr;
+ uint64_t fdt_load_addr;
uint64_t kernel_entry;
char *soc_name;
int i, base_hartid, hart_count;
bool htif_custom_base = false;
+ RISCVBootInfo boot_info;
/* Check socket count limit */
if (SPIKE_SOCKETS_MAX < riscv_socket_count(machine)) {
@@ -290,7 +292,7 @@ static void spike_board_init(MachineState *machine)
/* Load firmware */
if (firmware_name) {
firmware_end_addr = riscv_load_firmware(firmware_name,
- memmap[SPIKE_DRAM].base,
+ &firmware_load_addr,
htif_symbol_callback);
g_free(firmware_name);
}
@@ -299,13 +301,14 @@ static void spike_board_init(MachineState *machine)
create_fdt(s, memmap, riscv_is_32bit(&s->soc[0]), htif_custom_base);
/* Load kernel */
+ riscv_boot_info_init(&boot_info, &s->soc[0]);
if (machine->kernel_filename) {
- kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
+ kernel_start_addr = riscv_calc_kernel_start_addr(&boot_info,
firmware_end_addr);
- kernel_entry = riscv_load_kernel(machine, &s->soc[0],
- kernel_start_addr,
- true, htif_symbol_callback);
+ riscv_load_kernel(machine, &boot_info, kernel_start_addr,
+ true, htif_symbol_callback);
+ kernel_entry = boot_info.image_low_addr;
} else {
/*
* If dynamic firmware is used, it doesn't know where is the next mode
@@ -316,11 +319,11 @@ static void spike_board_init(MachineState *machine)
fdt_load_addr = riscv_compute_fdt_addr(memmap[SPIKE_DRAM].base,
memmap[SPIKE_DRAM].size,
- machine);
+ machine, &boot_info);
riscv_load_fdt(fdt_load_addr, machine->fdt);
/* load the reset vector */
- riscv_setup_rom_reset_vec(machine, &s->soc[0], memmap[SPIKE_DRAM].base,
+ riscv_setup_rom_reset_vec(machine, &s->soc[0], firmware_load_addr,
memmap[SPIKE_MROM].base,
memmap[SPIKE_MROM].size, kernel_entry,
fdt_load_addr);
@@ -339,7 +342,7 @@ static void spike_machine_instance_init(Object *obj)
{
}
-static void spike_machine_class_init(ObjectClass *oc, void *data)
+static void spike_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
diff --git a/hw/riscv/trace-events b/hw/riscv/trace-events
new file mode 100644
index 0000000..b50b14a
--- /dev/null
+++ b/hw/riscv/trace-events
@@ -0,0 +1,26 @@
+# See documentation at docs/devel/tracing.rst
+
+# riscv-iommu.c
+riscv_iommu_new(const char *id, unsigned b, unsigned d, unsigned f) "%s: device attached %04x:%02x.%d"
+riscv_iommu_flt(const char *id, unsigned b, unsigned d, unsigned f, uint64_t reason, uint64_t iova) "%s: fault %04x:%02x.%u reason: 0x%"PRIx64" iova: 0x%"PRIx64
+riscv_iommu_pri(const char *id, unsigned b, unsigned d, unsigned f, uint64_t iova) "%s: page request %04x:%02x.%u iova: 0x%"PRIx64
+riscv_iommu_dma(const char *id, unsigned b, unsigned d, unsigned f, unsigned pasid, const char *dir, uint64_t iova, uint64_t phys) "%s: translate %04x:%02x.%u #%u %s 0x%"PRIx64" -> 0x%"PRIx64
+riscv_iommu_msi(const char *id, unsigned b, unsigned d, unsigned f, uint64_t iova, uint64_t phys) "%s: translate %04x:%02x.%u MSI 0x%"PRIx64" -> 0x%"PRIx64
+riscv_iommu_mrif_notification(const char *id, uint32_t nid, uint64_t phys) "%s: sent MRIF notification 0x%x to 0x%"PRIx64
+riscv_iommu_cmd(const char *id, uint64_t l, uint64_t u) "%s: command 0x%"PRIx64" 0x%"PRIx64
+riscv_iommu_notifier_add(const char *id) "%s: dev-iotlb notifier added"
+riscv_iommu_notifier_del(const char *id) "%s: dev-iotlb notifier removed"
+riscv_iommu_notify_int_vector(uint32_t cause, uint32_t vector) "Interrupt cause 0x%x sent via vector 0x%x"
+riscv_iommu_icvec_write(uint32_t orig, uint32_t actual) "ICVEC write: incoming 0x%x actual 0x%x"
+riscv_iommu_ats(const char *id, unsigned b, unsigned d, unsigned f, uint64_t iova) "%s: translate request %04x:%02x.%u iova: 0x%"PRIx64
+riscv_iommu_ats_inval(const char *id) "%s: dev-iotlb invalidate"
+riscv_iommu_ats_prgr(const char *id) "%s: dev-iotlb page request group response"
+riscv_iommu_sys_irq_sent(uint32_t vector) "IRQ sent to vector %u"
+riscv_iommu_sys_msi_sent(uint32_t vector, uint64_t msi_addr, uint32_t msi_data, uint32_t result) "MSI sent to vector %u msi_addr 0x%"PRIx64" msi_data 0x%x result %u"
+riscv_iommu_sys_reset_hold(int reset_type) "reset type %d"
+riscv_iommu_pci_reset_hold(int reset_type) "reset type %d"
+riscv_iommu_hpm_read(uint64_t cycle, uint32_t inhibit, uint64_t ctr_prev, uint64_t ctr_val) "cycle 0x%"PRIx64" inhibit 0x%x ctr_prev 0x%"PRIx64" ctr_val 0x%"PRIx64
+riscv_iommu_hpm_incr_ctr(uint64_t cntr_val) "cntr_val 0x%"PRIx64
+riscv_iommu_hpm_iocntinh_cy(bool prev_cy_inh) "prev_cy_inh %d"
+riscv_iommu_hpm_cycle_write(uint32_t ovf, uint64_t val) "ovf 0x%x val 0x%"PRIx64
+riscv_iommu_hpm_evt_write(uint32_t ctr_idx, uint32_t ovf, uint64_t val) "ctr_idx 0x%x ovf 0x%x val 0x%"PRIx64
diff --git a/hw/riscv/trace.h b/hw/riscv/trace.h
new file mode 100644
index 0000000..8c0e3ca
--- /dev/null
+++ b/hw/riscv/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-hw_riscv.h"
diff --git a/hw/riscv/virt-acpi-build.c b/hw/riscv/virt-acpi-build.c
index 0925528..8b5683d 100644
--- a/hw/riscv/virt-acpi-build.c
+++ b/hw/riscv/virt-acpi-build.c
@@ -38,7 +38,7 @@
#include "migration/vmstate.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#define ACPI_BUILD_TABLE_SIZE 0x20000
#define ACPI_BUILD_INTC_ID(socket, index) ((socket << 24) | (index))
@@ -141,12 +141,36 @@ static void acpi_dsdt_add_cpus(Aml *scope, RISCVVirtState *s)
}
}
+static void acpi_dsdt_add_plic_aplic(Aml *scope, uint8_t socket_count,
+ uint64_t mmio_base, uint64_t mmio_size,
+ const char *hid)
+{
+ uint64_t plic_aplic_addr;
+ uint32_t gsi_base;
+ uint8_t socket;
+
+ for (socket = 0; socket < socket_count; socket++) {
+ plic_aplic_addr = mmio_base + mmio_size * socket;
+ gsi_base = VIRT_IRQCHIP_NUM_SOURCES * socket;
+ Aml *dev = aml_device("IC%.02X", socket);
+ aml_append(dev, aml_name_decl("_HID", aml_string("%s", hid)));
+ aml_append(dev, aml_name_decl("_UID", aml_int(socket)));
+ aml_append(dev, aml_name_decl("_GSB", aml_int(gsi_base)));
+
+ Aml *crs = aml_resource_template();
+ aml_append(crs, aml_memory32_fixed(plic_aplic_addr, mmio_size,
+ AML_READ_WRITE));
+ aml_append(dev, aml_name_decl("_CRS", crs));
+ aml_append(scope, dev);
+ }
+}
+
static void
acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
uint32_t uart_irq)
{
Aml *dev = aml_device("COM0");
- aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501")));
+ aml_append(dev, aml_name_decl("_HID", aml_string("RSCV0003")));
aml_append(dev, aml_name_decl("_UID", aml_int(0)));
Aml *crs = aml_resource_template();
@@ -175,15 +199,42 @@ acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
}
/*
+ * Add DSDT entry for the IOMMU platform device.
+ * ACPI ID for IOMMU is defined in the section 6.2 of RISC-V BRS spec.
+ * https://github.com/riscv-non-isa/riscv-brs/releases/download/v0.8/riscv-brs-spec.pdf
+ */
+static void acpi_dsdt_add_iommu_sys(Aml *scope, const MemMapEntry *iommu_memmap,
+ uint32_t iommu_irq)
+{
+ uint32_t i;
+
+ Aml *dev = aml_device("IMU0");
+ aml_append(dev, aml_name_decl("_HID", aml_string("RSCV0004")));
+ aml_append(dev, aml_name_decl("_UID", aml_int(0)));
+
+ Aml *crs = aml_resource_template();
+ aml_append(crs, aml_memory32_fixed(iommu_memmap->base,
+ iommu_memmap->size, AML_READ_WRITE));
+ for (i = iommu_irq; i < iommu_irq + 4; i++) {
+ aml_append(crs, aml_interrupt(AML_CONSUMER, AML_EDGE, AML_ACTIVE_LOW,
+ AML_EXCLUSIVE, &i, 1));
+ }
+
+ aml_append(dev, aml_name_decl("_CRS", crs));
+ aml_append(scope, dev);
+}
+
+/*
* Serial Port Console Redirection Table (SPCR)
- * Rev: 1.07
+ * Rev: 1.10
*/
static void
spcr_setup(GArray *table_data, BIOSLinker *linker, RISCVVirtState *s)
{
+ const char name[] = ".";
AcpiSpcrData serial = {
- .interface_type = 0, /* 16550 compatible */
+ .interface_type = 0x12, /* 16550 compatible */
.base_addr.id = AML_AS_SYSTEM_MEMORY,
.base_addr.width = 32,
.base_addr.offset = 0,
@@ -205,9 +256,14 @@ spcr_setup(GArray *table_data, BIOSLinker *linker, RISCVVirtState *s)
.pci_function = 0,
.pci_flags = 0,
.pci_segment = 0,
+ .uart_clk_freq = 0,
+ .precise_baudrate = 0,
+ .namespace_string_length = sizeof(name),
+ .namespace_string_offset = 88,
};
- build_spcr(table_data, linker, &serial, 2, s->oem_id, s->oem_table_id);
+ build_spcr(table_data, linker, &serial, 4, s->oem_id, s->oem_table_id,
+ name);
}
/* RHCT Node[N] starts at offset 56 */
@@ -231,7 +287,7 @@ static void build_rhct(GArray *table_data,
uint32_t isa_offset, num_rhct_nodes, cmo_offset = 0;
RISCVCPU *cpu = &s->soc[0].harts[0];
uint32_t mmu_offset = 0;
- uint8_t satp_mode_max;
+ bool rv32 = riscv_cpu_is_32bit(cpu);
g_autofree char *isa = NULL;
AcpiTable table = { .sig = "RHCT", .rev = 1, .oem_id = s->oem_id,
@@ -251,7 +307,7 @@ static void build_rhct(GArray *table_data,
num_rhct_nodes++;
}
- if (cpu->cfg.satp_mode.supported != 0) {
+ if (!rv32 && cpu->cfg.max_satp_mode >= VM_1_10_SV39) {
num_rhct_nodes++;
}
@@ -311,22 +367,21 @@ static void build_rhct(GArray *table_data,
}
/* MMU node structure */
- if (cpu->cfg.satp_mode.supported != 0) {
- satp_mode_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
+ if (!rv32 && cpu->cfg.max_satp_mode >= VM_1_10_SV39) {
mmu_offset = table_data->len - table.table_offset;
build_append_int_noprefix(table_data, 2, 2); /* Type */
build_append_int_noprefix(table_data, 8, 2); /* Length */
build_append_int_noprefix(table_data, 0x1, 2); /* Revision */
build_append_int_noprefix(table_data, 0, 1); /* Reserved */
/* MMU Type */
- if (satp_mode_max == VM_1_10_SV57) {
+ if (cpu->cfg.max_satp_mode == VM_1_10_SV57) {
build_append_int_noprefix(table_data, 2, 1); /* Sv57 */
- } else if (satp_mode_max == VM_1_10_SV48) {
+ } else if (cpu->cfg.max_satp_mode == VM_1_10_SV48) {
build_append_int_noprefix(table_data, 1, 1); /* Sv48 */
- } else if (satp_mode_max == VM_1_10_SV39) {
+ } else if (cpu->cfg.max_satp_mode == VM_1_10_SV39) {
build_append_int_noprefix(table_data, 0, 1); /* Sv39 */
} else {
- assert(1);
+ g_assert_not_reached();
}
}
@@ -411,7 +466,18 @@ static void build_dsdt(GArray *table_data,
socket_count = riscv_socket_count(ms);
+ if (s->aia_type == VIRT_AIA_TYPE_NONE) {
+ acpi_dsdt_add_plic_aplic(scope, socket_count, memmap[VIRT_PLIC].base,
+ memmap[VIRT_PLIC].size, "RSCV0001");
+ } else {
+ acpi_dsdt_add_plic_aplic(scope, socket_count, memmap[VIRT_APLIC_S].base,
+ memmap[VIRT_APLIC_S].size, "RSCV0002");
+ }
+
acpi_dsdt_add_uart(scope, &memmap[VIRT_UART0], UART0_IRQ);
+ if (virt_is_iommu_sys_enabled(s)) {
+ acpi_dsdt_add_iommu_sys(scope, &memmap[VIRT_IOMMU_SYS], IOMMU_SYS_IRQ);
+ }
if (socket_count == 1) {
virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base,
@@ -564,6 +630,187 @@ static void build_madt(GArray *table_data,
acpi_table_end(linker, &table);
}
+#define ID_MAPPING_ENTRY_SIZE 20
+#define IOMMU_ENTRY_SIZE 40
+#define RISCV_INTERRUPT_WIRE_OFFSSET 40
+#define ROOT_COMPLEX_ENTRY_SIZE 20
+#define RIMT_NODE_OFFSET 48
+
+/*
+ * ID Mapping Structure
+ */
+static void build_rimt_id_mapping(GArray *table_data, uint32_t source_id_base,
+ uint32_t num_ids, uint32_t dest_id_base)
+{
+ /* Source ID Base */
+ build_append_int_noprefix(table_data, source_id_base, 4);
+ /* Number of IDs */
+ build_append_int_noprefix(table_data, num_ids, 4);
+ /* Destination Device ID Base */
+ build_append_int_noprefix(table_data, source_id_base, 4);
+ /* Destination IOMMU Offset */
+ build_append_int_noprefix(table_data, dest_id_base, 4);
+ /* Flags */
+ build_append_int_noprefix(table_data, 0, 4);
+}
+
+struct AcpiRimtIdMapping {
+ uint32_t source_id_base;
+ uint32_t num_ids;
+};
+typedef struct AcpiRimtIdMapping AcpiRimtIdMapping;
+
+/* Build the rimt ID mapping to IOMMU for a given PCI host bridge */
+static int rimt_host_bridges(Object *obj, void *opaque)
+{
+ GArray *idmap_blob = opaque;
+
+ if (object_dynamic_cast(obj, TYPE_PCI_HOST_BRIDGE)) {
+ PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus;
+
+ if (bus && !pci_bus_bypass_iommu(bus)) {
+ int min_bus, max_bus;
+
+ pci_bus_range(bus, &min_bus, &max_bus);
+
+ AcpiRimtIdMapping idmap = {
+ .source_id_base = min_bus << 8,
+ .num_ids = (max_bus - min_bus + 1) << 8,
+ };
+ g_array_append_val(idmap_blob, idmap);
+ }
+ }
+
+ return 0;
+}
+
+static int rimt_idmap_compare(gconstpointer a, gconstpointer b)
+{
+ AcpiRimtIdMapping *idmap_a = (AcpiRimtIdMapping *)a;
+ AcpiRimtIdMapping *idmap_b = (AcpiRimtIdMapping *)b;
+
+ return idmap_a->source_id_base - idmap_b->source_id_base;
+}
+
+/*
+ * RISC-V IO Mapping Table (RIMT)
+ * https://github.com/riscv-non-isa/riscv-acpi-rimt/releases/download/v0.99/rimt-spec.pdf
+ */
+static void build_rimt(GArray *table_data, BIOSLinker *linker,
+ RISCVVirtState *s)
+{
+ int i, nb_nodes, rc_mapping_count;
+ size_t node_size, iommu_offset = 0;
+ uint32_t id = 0;
+ g_autoptr(GArray) iommu_idmaps = g_array_new(false, true,
+ sizeof(AcpiRimtIdMapping));
+
+ AcpiTable table = { .sig = "RIMT", .rev = 1, .oem_id = s->oem_id,
+ .oem_table_id = s->oem_table_id };
+
+ acpi_table_begin(&table, table_data);
+
+ object_child_foreach_recursive(object_get_root(),
+ rimt_host_bridges, iommu_idmaps);
+
+ /* Sort the ID mapping by Source ID Base*/
+ g_array_sort(iommu_idmaps, rimt_idmap_compare);
+
+ nb_nodes = 2; /* RC, IOMMU */
+ rc_mapping_count = iommu_idmaps->len;
+ /* Number of RIMT Nodes */
+ build_append_int_noprefix(table_data, nb_nodes, 4);
+
+ /* Offset to Array of RIMT Nodes */
+ build_append_int_noprefix(table_data, RIMT_NODE_OFFSET, 4);
+ build_append_int_noprefix(table_data, 0, 4); /* Reserved */
+
+ iommu_offset = table_data->len - table.table_offset;
+ /* IOMMU Device Structure */
+ build_append_int_noprefix(table_data, 0, 1); /* Type - IOMMU*/
+ build_append_int_noprefix(table_data, 1, 1); /* Revision */
+ node_size = IOMMU_ENTRY_SIZE;
+ build_append_int_noprefix(table_data, node_size, 2); /* Length */
+ build_append_int_noprefix(table_data, 0, 2); /* Reserved */
+ build_append_int_noprefix(table_data, id++, 2); /* ID */
+ if (virt_is_iommu_sys_enabled(s)) {
+ /* Hardware ID */
+ build_append_int_noprefix(table_data, 'R', 1);
+ build_append_int_noprefix(table_data, 'S', 1);
+ build_append_int_noprefix(table_data, 'C', 1);
+ build_append_int_noprefix(table_data, 'V', 1);
+ build_append_int_noprefix(table_data, '0', 1);
+ build_append_int_noprefix(table_data, '0', 1);
+ build_append_int_noprefix(table_data, '0', 1);
+ build_append_int_noprefix(table_data, '4', 1);
+ /* Base Address */
+ build_append_int_noprefix(table_data,
+ s->memmap[VIRT_IOMMU_SYS].base, 8);
+ build_append_int_noprefix(table_data, 0, 4); /* Flags */
+ } else {
+ /* Hardware ID */
+ build_append_int_noprefix(table_data, '0', 1);
+ build_append_int_noprefix(table_data, '0', 1);
+ build_append_int_noprefix(table_data, '1', 1);
+ build_append_int_noprefix(table_data, '0', 1);
+ build_append_int_noprefix(table_data, '0', 1);
+ build_append_int_noprefix(table_data, '0', 1);
+ build_append_int_noprefix(table_data, '1', 1);
+ build_append_int_noprefix(table_data, '4', 1);
+
+ build_append_int_noprefix(table_data, 0, 8); /* Base Address */
+ build_append_int_noprefix(table_data, 1, 4); /* Flags */
+ }
+
+ build_append_int_noprefix(table_data, 0, 4); /* Proximity Domain */
+ build_append_int_noprefix(table_data, 0, 2); /* PCI Segment number */
+ /* PCIe B/D/F */
+ if (virt_is_iommu_sys_enabled(s)) {
+ build_append_int_noprefix(table_data, 0, 2);
+ } else {
+ build_append_int_noprefix(table_data, s->pci_iommu_bdf, 2);
+ }
+ /* Number of interrupt wires */
+ build_append_int_noprefix(table_data, 0, 2);
+ /* Interrupt wire array offset */
+ build_append_int_noprefix(table_data, RISCV_INTERRUPT_WIRE_OFFSSET, 2);
+
+ /* PCIe Root Complex Node */
+ build_append_int_noprefix(table_data, 1, 1); /* Type */
+ build_append_int_noprefix(table_data, 1, 1); /* Revision */
+ node_size = ROOT_COMPLEX_ENTRY_SIZE +
+ ID_MAPPING_ENTRY_SIZE * rc_mapping_count;
+ build_append_int_noprefix(table_data, node_size, 2); /* Length */
+ build_append_int_noprefix(table_data, 0, 2); /* Reserved */
+ build_append_int_noprefix(table_data, id++, 2); /* ID */
+ build_append_int_noprefix(table_data, 0, 4); /* Flags */
+ build_append_int_noprefix(table_data, 0, 2); /* Reserved */
+ /* PCI Segment number */
+ build_append_int_noprefix(table_data, 0, 2);
+ /* ID mapping array offset */
+ build_append_int_noprefix(table_data, ROOT_COMPLEX_ENTRY_SIZE, 2);
+ /* Number of ID mappings */
+ build_append_int_noprefix(table_data, rc_mapping_count, 2);
+
+ /* Output Reference */
+ AcpiRimtIdMapping *range;
+
+ /* ID mapping array */
+ for (i = 0; i < iommu_idmaps->len; i++) {
+ range = &g_array_index(iommu_idmaps, AcpiRimtIdMapping, i);
+ if (virt_is_iommu_sys_enabled(s)) {
+ range->source_id_base = 0;
+ } else {
+ range->source_id_base = s->pci_iommu_bdf + 1;
+ }
+ range->num_ids = 0xffff - s->pci_iommu_bdf;
+ build_rimt_id_mapping(table_data, range->source_id_base,
+ range->num_ids, iommu_offset);
+ }
+
+ acpi_table_end(linker, &table);
+}
+
/*
* ACPI spec, Revision 6.5+
* 5.2.16 System Resource Affinity Table (SRAT)
@@ -641,6 +888,11 @@ static void virt_acpi_build(RISCVVirtState *s, AcpiBuildTables *tables)
acpi_add_table(table_offsets, tables_blob);
build_rhct(tables_blob, tables->linker, s);
+ if (virt_is_iommu_sys_enabled(s) || s->pci_iommu_bdf) {
+ acpi_add_table(table_offsets, tables_blob);
+ build_rimt(tables_blob, tables->linker, s);
+ }
+
acpi_add_table(table_offsets, tables_blob);
spcr_setup(tables_blob, tables->linker, s);
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
index 9981e0f..cf280a9 100644
--- a/hw/riscv/virt.c
+++ b/hw/riscv/virt.c
@@ -27,11 +27,13 @@
#include "hw/loader.h"
#include "hw/sysbus.h"
#include "hw/qdev-properties.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "target/riscv/cpu.h"
#include "hw/core/sysbus-fdt.h"
#include "target/riscv/pmu.h"
#include "hw/riscv/riscv_hart.h"
+#include "hw/riscv/iommu.h"
+#include "hw/riscv/riscv-iommu-bits.h"
#include "hw/riscv/virt.h"
#include "hw/riscv/boot.h"
#include "hw/riscv/numa.h"
@@ -43,23 +45,33 @@
#include "hw/misc/sifive_test.h"
#include "hw/platform-bus.h"
#include "chardev/char.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/tcg.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tpm.h"
-#include "sysemu/qtest.h"
+#include "system/device_tree.h"
+#include "system/system.h"
+#include "system/tcg.h"
+#include "system/kvm.h"
+#include "system/tpm.h"
+#include "system/qtest.h"
#include "hw/pci/pci.h"
#include "hw/pci-host/gpex.h"
#include "hw/display/ramfb.h"
#include "hw/acpi/aml-build.h"
#include "qapi/qapi-visit-common.h"
#include "hw/virtio/virtio-iommu.h"
+#include "hw/uefi/var-service-api.h"
/* KVM AIA only supports APLIC MSI. APLIC Wired is always emulated by QEMU. */
-static bool virt_use_kvm_aia(RISCVVirtState *s)
+static bool virt_use_kvm_aia_aplic_imsic(RISCVVirtAIAType aia_type)
{
- return kvm_irqchip_in_kernel() && s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC;
+ bool msimode = aia_type == VIRT_AIA_TYPE_APLIC_IMSIC;
+
+ return riscv_is_kvm_aia_aplic_imsic(msimode);
+}
+
+static bool virt_use_emulated_aplic(RISCVVirtAIAType aia_type)
+{
+ bool msimode = aia_type == VIRT_AIA_TYPE_APLIC_IMSIC;
+
+ return riscv_use_emulated_aplic(msimode);
}
static bool virt_aclint_allowed(void)
@@ -75,6 +87,7 @@ static const MemMapEntry virt_memmap[] = {
[VIRT_CLINT] = { 0x2000000, 0x10000 },
[VIRT_ACLINT_SSWI] = { 0x2F00000, 0x4000 },
[VIRT_PCIE_PIO] = { 0x3000000, 0x10000 },
+ [VIRT_IOMMU_SYS] = { 0x3010000, 0x1000 },
[VIRT_PLATFORM_BUS] = { 0x4000000, 0x2000000 },
[VIRT_PLIC] = { 0xc000000, VIRT_PLIC_SIZE(VIRT_CPUS_MAX * 2) },
[VIRT_APLIC_M] = { 0xc000000, APLIC_SIZE(VIRT_CPUS_MAX) },
@@ -153,8 +166,8 @@ static void virt_flash_map1(PFlashCFI01 *flash,
static void virt_flash_map(RISCVVirtState *s,
MemoryRegion *sysmem)
{
- hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2;
- hwaddr flashbase = virt_memmap[VIRT_FLASH].base;
+ hwaddr flashsize = s->memmap[VIRT_FLASH].size / 2;
+ hwaddr flashbase = s->memmap[VIRT_FLASH].base;
virt_flash_map1(s->flash[0], flashbase, flashsize,
sysmem);
@@ -167,7 +180,7 @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename,
{
int pin, dev;
uint32_t irq_map_stride = 0;
- uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS *
+ uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS *
FDT_MAX_INT_MAP_WIDTH] = {};
uint32_t *irq_map = full_irq_map;
@@ -179,11 +192,11 @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename,
* possible slot) seeing the interrupt-map-mask will allow the table
* to wrap to any number of devices.
*/
- for (dev = 0; dev < GPEX_NUM_IRQS; dev++) {
+ for (dev = 0; dev < PCI_NUM_PINS; dev++) {
int devfn = dev * 0x8;
- for (pin = 0; pin < GPEX_NUM_IRQS; pin++) {
- int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS);
+ for (pin = 0; pin < PCI_NUM_PINS; pin++) {
+ int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS);
int i = 0;
/* Fill PCI address cells */
@@ -209,7 +222,7 @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename,
}
qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map,
- GPEX_NUM_IRQS * GPEX_NUM_IRQS *
+ PCI_NUM_PINS * PCI_NUM_PINS *
irq_map_stride * sizeof(uint32_t));
qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask",
@@ -224,10 +237,10 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket,
uint32_t cpu_phandle;
MachineState *ms = MACHINE(s);
bool is_32_bit = riscv_is_32bit(&s->soc[0]);
- uint8_t satp_mode_max;
for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
RISCVCPU *cpu_ptr = &s->soc[socket].harts[cpu];
+ int8_t satp_mode_max = cpu_ptr->cfg.max_satp_mode;
g_autofree char *cpu_name = NULL;
g_autofree char *core_name = NULL;
g_autofree char *intc_name = NULL;
@@ -239,8 +252,7 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket,
s->soc[socket].hartid_base + cpu);
qemu_fdt_add_subnode(ms->fdt, cpu_name);
- if (cpu_ptr->cfg.satp_mode.supported != 0) {
- satp_mode_max = satp_mode_max_from_map(cpu_ptr->cfg.satp_mode.map);
+ if (satp_mode_max != -1) {
sv_name = g_strdup_printf("riscv,%s",
satp_mode_str(satp_mode_max, is_32_bit));
qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type", sv_name);
@@ -288,16 +300,16 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket,
}
}
-static void create_fdt_socket_memory(RISCVVirtState *s,
- const MemMapEntry *memmap, int socket)
+static void create_fdt_socket_memory(RISCVVirtState *s, int socket)
{
g_autofree char *mem_name = NULL;
- uint64_t addr, size;
+ hwaddr addr;
+ uint64_t size;
MachineState *ms = MACHINE(s);
- addr = memmap[VIRT_DRAM].base + riscv_socket_mem_offset(ms, socket);
+ addr = s->memmap[VIRT_DRAM].base + riscv_socket_mem_offset(ms, socket);
size = riscv_socket_mem_size(ms, socket);
- mem_name = g_strdup_printf("/memory@%lx", (long)addr);
+ mem_name = g_strdup_printf("/memory@%"HWADDR_PRIx, addr);
qemu_fdt_add_subnode(ms->fdt, mem_name);
qemu_fdt_setprop_cells(ms->fdt, mem_name, "reg",
addr >> 32, addr, size >> 32, size);
@@ -306,7 +318,7 @@ static void create_fdt_socket_memory(RISCVVirtState *s,
}
static void create_fdt_socket_clint(RISCVVirtState *s,
- const MemMapEntry *memmap, int socket,
+ int socket,
uint32_t *intc_phandles)
{
int cpu;
@@ -327,21 +339,22 @@ static void create_fdt_socket_clint(RISCVVirtState *s,
clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
}
- clint_addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket);
+ clint_addr = s->memmap[VIRT_CLINT].base +
+ (s->memmap[VIRT_CLINT].size * socket);
clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr);
qemu_fdt_add_subnode(ms->fdt, clint_name);
qemu_fdt_setprop_string_array(ms->fdt, clint_name, "compatible",
(char **)&clint_compat,
ARRAY_SIZE(clint_compat));
qemu_fdt_setprop_cells(ms->fdt, clint_name, "reg",
- 0x0, clint_addr, 0x0, memmap[VIRT_CLINT].size);
+ 0x0, clint_addr, 0x0, s->memmap[VIRT_CLINT].size);
qemu_fdt_setprop(ms->fdt, clint_name, "interrupts-extended",
clint_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4);
riscv_socket_fdt_write_id(ms, clint_name, socket);
}
static void create_fdt_socket_aclint(RISCVVirtState *s,
- const MemMapEntry *memmap, int socket,
+ int socket,
uint32_t *intc_phandles)
{
int cpu;
@@ -368,8 +381,10 @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
aclint_cells_size = s->soc[socket].num_harts * sizeof(uint32_t) * 2;
if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) {
- addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket);
+ addr = s->memmap[VIRT_CLINT].base +
+ (s->memmap[VIRT_CLINT].size * socket);
name = g_strdup_printf("/soc/mswi@%lx", addr);
+
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_setprop_string(ms->fdt, name, "compatible",
"riscv,aclint-mswi");
@@ -384,13 +399,13 @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
}
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
- addr = memmap[VIRT_CLINT].base +
+ addr = s->memmap[VIRT_CLINT].base +
(RISCV_ACLINT_DEFAULT_MTIMER_SIZE * socket);
size = RISCV_ACLINT_DEFAULT_MTIMER_SIZE;
} else {
- addr = memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE +
- (memmap[VIRT_CLINT].size * socket);
- size = memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE;
+ addr = s->memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE +
+ (s->memmap[VIRT_CLINT].size * socket);
+ size = s->memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE;
}
name = g_strdup_printf("/soc/mtimer@%lx", addr);
qemu_fdt_add_subnode(ms->fdt, name);
@@ -407,14 +422,15 @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
g_free(name);
if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) {
- addr = memmap[VIRT_ACLINT_SSWI].base +
- (memmap[VIRT_ACLINT_SSWI].size * socket);
+ addr = s->memmap[VIRT_ACLINT_SSWI].base +
+ (s->memmap[VIRT_ACLINT_SSWI].size * socket);
+
name = g_strdup_printf("/soc/sswi@%lx", addr);
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_setprop_string(ms->fdt, name, "compatible",
"riscv,aclint-sswi");
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
- 0x0, addr, 0x0, memmap[VIRT_ACLINT_SSWI].size);
+ 0x0, addr, 0x0, s->memmap[VIRT_ACLINT_SSWI].size);
qemu_fdt_setprop(ms->fdt, name, "interrupts-extended",
aclint_sswi_cells, aclint_cells_size);
qemu_fdt_setprop(ms->fdt, name, "interrupt-controller", NULL, 0);
@@ -425,7 +441,7 @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
}
static void create_fdt_socket_plic(RISCVVirtState *s,
- const MemMapEntry *memmap, int socket,
+ int socket,
uint32_t *phandle, uint32_t *intc_phandles,
uint32_t *plic_phandles)
{
@@ -439,7 +455,8 @@ static void create_fdt_socket_plic(RISCVVirtState *s,
};
plic_phandles[socket] = (*phandle)++;
- plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket);
+ plic_addr = s->memmap[VIRT_PLIC].base +
+ (s->memmap[VIRT_PLIC].size * socket);
plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr);
qemu_fdt_add_subnode(ms->fdt, plic_name);
qemu_fdt_setprop_cell(ms->fdt, plic_name,
@@ -478,7 +495,7 @@ static void create_fdt_socket_plic(RISCVVirtState *s,
}
qemu_fdt_setprop_cells(ms->fdt, plic_name, "reg",
- 0x0, plic_addr, 0x0, memmap[VIRT_PLIC].size);
+ 0x0, plic_addr, 0x0, s->memmap[VIRT_PLIC].size);
qemu_fdt_setprop_cell(ms->fdt, plic_name, "riscv,ndev",
VIRT_IRQCHIP_NUM_SOURCES - 1);
riscv_socket_fdt_write_id(ms, plic_name, socket);
@@ -487,8 +504,8 @@ static void create_fdt_socket_plic(RISCVVirtState *s,
if (!socket) {
platform_bus_add_all_fdt_nodes(ms->fdt, plic_name,
- memmap[VIRT_PLATFORM_BUS].base,
- memmap[VIRT_PLATFORM_BUS].size,
+ s->memmap[VIRT_PLATFORM_BUS].base,
+ s->memmap[VIRT_PLATFORM_BUS].size,
VIRT_PLATFORM_BUS_IRQ);
}
}
@@ -552,7 +569,6 @@ static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr,
FDT_IMSIC_INT_CELLS);
qemu_fdt_setprop(ms->fdt, imsic_name, "interrupt-controller", NULL, 0);
qemu_fdt_setprop(ms->fdt, imsic_name, "msi-controller", NULL, 0);
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "#msi-cells", 0);
qemu_fdt_setprop(ms->fdt, imsic_name, "interrupts-extended",
imsic_cells, ms->smp.cpus * sizeof(uint32_t) * 2);
qemu_fdt_setprop(ms->fdt, imsic_name, "reg", imsic_regs,
@@ -576,7 +592,7 @@ static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr,
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", msi_phandle);
}
-static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
+static void create_fdt_imsic(RISCVVirtState *s,
uint32_t *phandle, uint32_t *intc_phandles,
uint32_t *msi_m_phandle, uint32_t *msi_s_phandle)
{
@@ -585,12 +601,12 @@ static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
if (!kvm_enabled()) {
/* M-level IMSIC node */
- create_fdt_one_imsic(s, memmap[VIRT_IMSIC_M].base, intc_phandles,
+ create_fdt_one_imsic(s, s->memmap[VIRT_IMSIC_M].base, intc_phandles,
*msi_m_phandle, true, 0);
}
/* S-level IMSIC node */
- create_fdt_one_imsic(s, memmap[VIRT_IMSIC_S].base, intc_phandles,
+ create_fdt_one_imsic(s, s->memmap[VIRT_IMSIC_S].base, intc_phandles,
*msi_s_phandle, false,
imsic_num_bits(s->aia_guests + 1));
@@ -667,7 +683,7 @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
}
static void create_fdt_socket_aplic(RISCVVirtState *s,
- const MemMapEntry *memmap, int socket,
+ int socket,
uint32_t msi_m_phandle,
uint32_t msi_s_phandle,
uint32_t *phandle,
@@ -684,18 +700,19 @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
if (!kvm_enabled()) {
/* M-level APLIC node */
- aplic_addr = memmap[VIRT_APLIC_M].base +
- (memmap[VIRT_APLIC_M].size * socket);
- create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_M].size,
+ aplic_addr = s->memmap[VIRT_APLIC_M].base +
+ (s->memmap[VIRT_APLIC_M].size * socket);
+ create_fdt_one_aplic(s, socket, aplic_addr,
+ s->memmap[VIRT_APLIC_M].size,
msi_m_phandle, intc_phandles,
aplic_m_phandle, aplic_s_phandle,
true, num_harts);
}
/* S-level APLIC node */
- aplic_addr = memmap[VIRT_APLIC_S].base +
- (memmap[VIRT_APLIC_S].size * socket);
- create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_S].size,
+ aplic_addr = s->memmap[VIRT_APLIC_S].base +
+ (s->memmap[VIRT_APLIC_S].size * socket);
+ create_fdt_one_aplic(s, socket, aplic_addr, s->memmap[VIRT_APLIC_S].size,
msi_s_phandle, intc_phandles,
aplic_s_phandle, 0,
false, num_harts);
@@ -703,8 +720,8 @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
if (!socket) {
g_autofree char *aplic_name = fdt_get_aplic_nodename(aplic_addr);
platform_bus_add_all_fdt_nodes(ms->fdt, aplic_name,
- memmap[VIRT_PLATFORM_BUS].base,
- memmap[VIRT_PLATFORM_BUS].size,
+ s->memmap[VIRT_PLATFORM_BUS].base,
+ s->memmap[VIRT_PLATFORM_BUS].size,
VIRT_PLATFORM_BUS_IRQ);
}
@@ -722,7 +739,7 @@ static void create_fdt_pmu(RISCVVirtState *s)
riscv_pmu_generate_fdt_node(ms->fdt, hart.pmu_avail_ctrs, pmu_name);
}
-static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
+static void create_fdt_sockets(RISCVVirtState *s,
uint32_t *phandle,
uint32_t *irq_mmio_phandle,
uint32_t *irq_pcie_phandle,
@@ -739,7 +756,7 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
qemu_fdt_add_subnode(ms->fdt, "/cpus");
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "timebase-frequency",
kvm_enabled() ?
- kvm_riscv_get_timebase_frequency(first_cpu) :
+ kvm_riscv_get_timebase_frequency(&s->soc->harts[0]) :
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ);
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0);
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", 0x1);
@@ -758,53 +775,54 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
create_fdt_socket_cpus(s, socket, clust_name, phandle,
&intc_phandles[phandle_pos]);
- create_fdt_socket_memory(s, memmap, socket);
+ create_fdt_socket_memory(s, socket);
if (virt_aclint_allowed() && s->have_aclint) {
- create_fdt_socket_aclint(s, memmap, socket,
+ create_fdt_socket_aclint(s, socket,
&intc_phandles[phandle_pos]);
} else if (tcg_enabled()) {
- create_fdt_socket_clint(s, memmap, socket,
+ create_fdt_socket_clint(s, socket,
&intc_phandles[phandle_pos]);
}
}
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
- create_fdt_imsic(s, memmap, phandle, intc_phandles,
- &msi_m_phandle, &msi_s_phandle);
+ create_fdt_imsic(s, phandle, intc_phandles,
+ &msi_m_phandle, &msi_s_phandle);
*msi_pcie_phandle = msi_s_phandle;
}
- /* KVM AIA only has one APLIC instance */
- if (kvm_enabled() && virt_use_kvm_aia(s)) {
- create_fdt_socket_aplic(s, memmap, 0,
+ /*
+ * With KVM AIA aplic-imsic, using an irqchip without split
+ * mode, we'll use only one APLIC instance.
+ */
+ if (!virt_use_emulated_aplic(s->aia_type)) {
+ create_fdt_socket_aplic(s, 0,
msi_m_phandle, msi_s_phandle, phandle,
&intc_phandles[0], xplic_phandles,
ms->smp.cpus);
+
+ *irq_mmio_phandle = xplic_phandles[0];
+ *irq_virtio_phandle = xplic_phandles[0];
+ *irq_pcie_phandle = xplic_phandles[0];
} else {
phandle_pos = ms->smp.cpus;
for (socket = (socket_count - 1); socket >= 0; socket--) {
phandle_pos -= s->soc[socket].num_harts;
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
- create_fdt_socket_plic(s, memmap, socket, phandle,
+ create_fdt_socket_plic(s, socket, phandle,
&intc_phandles[phandle_pos],
xplic_phandles);
} else {
- create_fdt_socket_aplic(s, memmap, socket,
+ create_fdt_socket_aplic(s, socket,
msi_m_phandle, msi_s_phandle, phandle,
&intc_phandles[phandle_pos],
xplic_phandles,
s->soc[socket].num_harts);
}
}
- }
- if (kvm_enabled() && virt_use_kvm_aia(s)) {
- *irq_mmio_phandle = xplic_phandles[0];
- *irq_virtio_phandle = xplic_phandles[0];
- *irq_pcie_phandle = xplic_phandles[0];
- } else {
for (socket = 0; socket < socket_count; socket++) {
if (socket == 0) {
*irq_mmio_phandle = xplic_phandles[socket];
@@ -824,21 +842,24 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
riscv_socket_fdt_write_distance_matrix(ms);
}
-static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap,
- uint32_t irq_virtio_phandle)
+static void create_fdt_virtio(RISCVVirtState *s, uint32_t irq_virtio_phandle)
{
int i;
MachineState *ms = MACHINE(s);
+ hwaddr virtio_base = s->memmap[VIRT_VIRTIO].base;
for (i = 0; i < VIRTIO_COUNT; i++) {
- g_autofree char *name = g_strdup_printf("/soc/virtio_mmio@%lx",
- (long)(memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size));
+ g_autofree char *name = NULL;
+ uint64_t size = s->memmap[VIRT_VIRTIO].size;
+ hwaddr addr = virtio_base + i * size;
+
+ name = g_strdup_printf("/soc/virtio_mmio@%"HWADDR_PRIx, addr);
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_setprop_string(ms->fdt, name, "compatible", "virtio,mmio");
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
- 0x0, memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
- 0x0, memmap[VIRT_VIRTIO].size);
+ 0x0, addr,
+ 0x0, size);
qemu_fdt_setprop_cell(ms->fdt, name, "interrupt-parent",
irq_virtio_phandle);
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
@@ -851,15 +872,16 @@ static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap,
}
}
-static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
+static void create_fdt_pcie(RISCVVirtState *s,
uint32_t irq_pcie_phandle,
- uint32_t msi_pcie_phandle)
+ uint32_t msi_pcie_phandle,
+ uint32_t iommu_sys_phandle)
{
g_autofree char *name = NULL;
MachineState *ms = MACHINE(s);
- name = g_strdup_printf("/soc/pci@%lx",
- (long) memmap[VIRT_PCIE_ECAM].base);
+ name = g_strdup_printf("/soc/pci@%"HWADDR_PRIx,
+ s->memmap[VIRT_PCIE_ECAM].base);
qemu_fdt_setprop_cell(ms->fdt, name, "#address-cells",
FDT_PCI_ADDR_CELLS);
qemu_fdt_setprop_cell(ms->fdt, name, "#interrupt-cells",
@@ -870,36 +892,41 @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
qemu_fdt_setprop_string(ms->fdt, name, "device_type", "pci");
qemu_fdt_setprop_cell(ms->fdt, name, "linux,pci-domain", 0);
qemu_fdt_setprop_cells(ms->fdt, name, "bus-range", 0,
- memmap[VIRT_PCIE_ECAM].size / PCIE_MMCFG_SIZE_MIN - 1);
+ s->memmap[VIRT_PCIE_ECAM].size / PCIE_MMCFG_SIZE_MIN - 1);
qemu_fdt_setprop(ms->fdt, name, "dma-coherent", NULL, 0);
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
qemu_fdt_setprop_cell(ms->fdt, name, "msi-parent", msi_pcie_phandle);
}
qemu_fdt_setprop_cells(ms->fdt, name, "reg", 0,
- memmap[VIRT_PCIE_ECAM].base, 0, memmap[VIRT_PCIE_ECAM].size);
+ s->memmap[VIRT_PCIE_ECAM].base, 0, s->memmap[VIRT_PCIE_ECAM].size);
qemu_fdt_setprop_sized_cells(ms->fdt, name, "ranges",
1, FDT_PCI_RANGE_IOPORT, 2, 0,
- 2, memmap[VIRT_PCIE_PIO].base, 2, memmap[VIRT_PCIE_PIO].size,
+ 2, s->memmap[VIRT_PCIE_PIO].base, 2, s->memmap[VIRT_PCIE_PIO].size,
1, FDT_PCI_RANGE_MMIO,
- 2, memmap[VIRT_PCIE_MMIO].base,
- 2, memmap[VIRT_PCIE_MMIO].base, 2, memmap[VIRT_PCIE_MMIO].size,
+ 2, s->memmap[VIRT_PCIE_MMIO].base,
+ 2, s->memmap[VIRT_PCIE_MMIO].base, 2, s->memmap[VIRT_PCIE_MMIO].size,
1, FDT_PCI_RANGE_MMIO_64BIT,
2, virt_high_pcie_memmap.base,
2, virt_high_pcie_memmap.base, 2, virt_high_pcie_memmap.size);
+ if (virt_is_iommu_sys_enabled(s)) {
+ qemu_fdt_setprop_cells(ms->fdt, name, "iommu-map",
+ 0, iommu_sys_phandle, 0, 0, 0,
+ iommu_sys_phandle, 0, 0xffff);
+ }
+
create_pcie_irq_map(s, ms->fdt, name, irq_pcie_phandle);
}
-static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap,
- uint32_t *phandle)
+static void create_fdt_reset(RISCVVirtState *s, uint32_t *phandle)
{
char *name;
uint32_t test_phandle;
MachineState *ms = MACHINE(s);
test_phandle = (*phandle)++;
- name = g_strdup_printf("/soc/test@%lx",
- (long)memmap[VIRT_TEST].base);
+ name = g_strdup_printf("/soc/test@%"HWADDR_PRIx,
+ s->memmap[VIRT_TEST].base);
qemu_fdt_add_subnode(ms->fdt, name);
{
static const char * const compat[3] = {
@@ -909,7 +936,7 @@ static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap,
(char **)&compat, ARRAY_SIZE(compat));
}
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
- 0x0, memmap[VIRT_TEST].base, 0x0, memmap[VIRT_TEST].size);
+ 0x0, s->memmap[VIRT_TEST].base, 0x0, s->memmap[VIRT_TEST].size);
qemu_fdt_setprop_cell(ms->fdt, name, "phandle", test_phandle);
test_phandle = qemu_fdt_get_phandle(ms->fdt, name);
g_free(name);
@@ -931,18 +958,19 @@ static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap,
g_free(name);
}
-static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap,
+static void create_fdt_uart(RISCVVirtState *s,
uint32_t irq_mmio_phandle)
{
g_autofree char *name = NULL;
MachineState *ms = MACHINE(s);
- name = g_strdup_printf("/soc/serial@%lx", (long)memmap[VIRT_UART0].base);
+ name = g_strdup_printf("/soc/serial@%"HWADDR_PRIx,
+ s->memmap[VIRT_UART0].base);
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_setprop_string(ms->fdt, name, "compatible", "ns16550a");
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
- 0x0, memmap[VIRT_UART0].base,
- 0x0, memmap[VIRT_UART0].size);
+ 0x0, s->memmap[VIRT_UART0].base,
+ 0x0, s->memmap[VIRT_UART0].size);
qemu_fdt_setprop_cell(ms->fdt, name, "clock-frequency", 3686400);
qemu_fdt_setprop_cell(ms->fdt, name, "interrupt-parent", irq_mmio_phandle);
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
@@ -952,20 +980,22 @@ static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap,
}
qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", name);
+ qemu_fdt_setprop_string(ms->fdt, "/aliases", "serial0", name);
}
-static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap,
+static void create_fdt_rtc(RISCVVirtState *s,
uint32_t irq_mmio_phandle)
{
g_autofree char *name = NULL;
MachineState *ms = MACHINE(s);
- name = g_strdup_printf("/soc/rtc@%lx", (long)memmap[VIRT_RTC].base);
+ name = g_strdup_printf("/soc/rtc@%"HWADDR_PRIx,
+ s->memmap[VIRT_RTC].base);
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_setprop_string(ms->fdt, name, "compatible",
"google,goldfish-rtc");
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
- 0x0, memmap[VIRT_RTC].base, 0x0, memmap[VIRT_RTC].size);
+ 0x0, s->memmap[VIRT_RTC].base, 0x0, s->memmap[VIRT_RTC].size);
qemu_fdt_setprop_cell(ms->fdt, name, "interrupt-parent",
irq_mmio_phandle);
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
@@ -975,11 +1005,11 @@ static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap,
}
}
-static void create_fdt_flash(RISCVVirtState *s, const MemMapEntry *memmap)
+static void create_fdt_flash(RISCVVirtState *s)
{
MachineState *ms = MACHINE(s);
- hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2;
- hwaddr flashbase = virt_memmap[VIRT_FLASH].base;
+ hwaddr flashsize = s->memmap[VIRT_FLASH].size / 2;
+ hwaddr flashbase = s->memmap[VIRT_FLASH].base;
g_autofree char *name = g_strdup_printf("/flash@%" PRIx64, flashbase);
qemu_fdt_add_subnode(ms->fdt, name);
@@ -990,11 +1020,11 @@ static void create_fdt_flash(RISCVVirtState *s, const MemMapEntry *memmap)
qemu_fdt_setprop_cell(ms->fdt, name, "bank-width", 4);
}
-static void create_fdt_fw_cfg(RISCVVirtState *s, const MemMapEntry *memmap)
+static void create_fdt_fw_cfg(RISCVVirtState *s)
{
MachineState *ms = MACHINE(s);
- hwaddr base = memmap[VIRT_FW_CFG].base;
- hwaddr size = memmap[VIRT_FW_CFG].size;
+ hwaddr base = s->memmap[VIRT_FW_CFG].base;
+ hwaddr size = s->memmap[VIRT_FW_CFG].size;
g_autofree char *nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base);
qemu_fdt_add_subnode(ms->fdt, nodename);
@@ -1013,8 +1043,8 @@ static void create_fdt_virtio_iommu(RISCVVirtState *s, uint16_t bdf)
g_autofree char *iommu_node = NULL;
g_autofree char *pci_node = NULL;
- pci_node = g_strdup_printf("/soc/pci@%lx",
- (long) virt_memmap[VIRT_PCIE_ECAM].base);
+ pci_node = g_strdup_printf("/soc/pci@%"HWADDR_PRIx,
+ s->memmap[VIRT_PCIE_ECAM].base);
iommu_node = g_strdup_printf("%s/virtio_iommu@%x,%x", pci_node,
PCI_SLOT(bdf), PCI_FUNC(bdf));
iommu_phandle = qemu_fdt_alloc_phandle(fdt);
@@ -1033,27 +1063,99 @@ static void create_fdt_virtio_iommu(RISCVVirtState *s, uint16_t bdf)
bdf + 1, iommu_phandle, bdf + 1, 0xffff - bdf);
}
+static void create_fdt_iommu_sys(RISCVVirtState *s, uint32_t irq_chip,
+ uint32_t msi_phandle,
+ uint32_t *iommu_sys_phandle)
+{
+ const char comp[] = "riscv,iommu";
+ void *fdt = MACHINE(s)->fdt;
+ uint32_t iommu_phandle;
+ g_autofree char *iommu_node = NULL;
+ hwaddr addr = s->memmap[VIRT_IOMMU_SYS].base;
+ hwaddr size = s->memmap[VIRT_IOMMU_SYS].size;
+ uint32_t iommu_irq_map[RISCV_IOMMU_INTR_COUNT] = {
+ IOMMU_SYS_IRQ + RISCV_IOMMU_INTR_CQ,
+ IOMMU_SYS_IRQ + RISCV_IOMMU_INTR_FQ,
+ IOMMU_SYS_IRQ + RISCV_IOMMU_INTR_PM,
+ IOMMU_SYS_IRQ + RISCV_IOMMU_INTR_PQ,
+ };
+
+ iommu_node = g_strdup_printf("/soc/iommu@%x",
+ (unsigned int) s->memmap[VIRT_IOMMU_SYS].base);
+ iommu_phandle = qemu_fdt_alloc_phandle(fdt);
+ qemu_fdt_add_subnode(fdt, iommu_node);
+
+ qemu_fdt_setprop(fdt, iommu_node, "compatible", comp, sizeof(comp));
+ qemu_fdt_setprop_cell(fdt, iommu_node, "#iommu-cells", 1);
+ qemu_fdt_setprop_cell(fdt, iommu_node, "phandle", iommu_phandle);
+
+ qemu_fdt_setprop_cells(fdt, iommu_node, "reg",
+ addr >> 32, addr, size >> 32, size);
+ qemu_fdt_setprop_cell(fdt, iommu_node, "interrupt-parent", irq_chip);
+
+ qemu_fdt_setprop_cells(fdt, iommu_node, "interrupts",
+ iommu_irq_map[0], FDT_IRQ_TYPE_EDGE_LOW,
+ iommu_irq_map[1], FDT_IRQ_TYPE_EDGE_LOW,
+ iommu_irq_map[2], FDT_IRQ_TYPE_EDGE_LOW,
+ iommu_irq_map[3], FDT_IRQ_TYPE_EDGE_LOW);
+
+ qemu_fdt_setprop_cell(fdt, iommu_node, "msi-parent", msi_phandle);
+
+ *iommu_sys_phandle = iommu_phandle;
+}
+
+static void create_fdt_iommu(RISCVVirtState *s, uint16_t bdf)
+{
+ const char comp[] = "riscv,pci-iommu";
+ void *fdt = MACHINE(s)->fdt;
+ uint32_t iommu_phandle;
+ g_autofree char *iommu_node = NULL;
+ g_autofree char *pci_node = NULL;
+
+ pci_node = g_strdup_printf("/soc/pci@%"HWADDR_PRIx,
+ s->memmap[VIRT_PCIE_ECAM].base);
+ iommu_node = g_strdup_printf("%s/iommu@%x", pci_node, bdf);
+ iommu_phandle = qemu_fdt_alloc_phandle(fdt);
+ qemu_fdt_add_subnode(fdt, iommu_node);
+
+ qemu_fdt_setprop(fdt, iommu_node, "compatible", comp, sizeof(comp));
+ qemu_fdt_setprop_cell(fdt, iommu_node, "#iommu-cells", 1);
+ qemu_fdt_setprop_cell(fdt, iommu_node, "phandle", iommu_phandle);
+ qemu_fdt_setprop_cells(fdt, iommu_node, "reg",
+ bdf << 8, 0, 0, 0, 0);
+ qemu_fdt_setprop_cells(fdt, pci_node, "iommu-map",
+ 0, iommu_phandle, 0, bdf,
+ bdf + 1, iommu_phandle, bdf + 1, 0xffff - bdf);
+ s->pci_iommu_bdf = bdf;
+}
+
static void finalize_fdt(RISCVVirtState *s)
{
uint32_t phandle = 1, irq_mmio_phandle = 1, msi_pcie_phandle = 1;
uint32_t irq_pcie_phandle = 1, irq_virtio_phandle = 1;
+ uint32_t iommu_sys_phandle = 1;
- create_fdt_sockets(s, virt_memmap, &phandle, &irq_mmio_phandle,
+ create_fdt_sockets(s, &phandle, &irq_mmio_phandle,
&irq_pcie_phandle, &irq_virtio_phandle,
&msi_pcie_phandle);
- create_fdt_virtio(s, virt_memmap, irq_virtio_phandle);
+ create_fdt_virtio(s, irq_virtio_phandle);
- create_fdt_pcie(s, virt_memmap, irq_pcie_phandle, msi_pcie_phandle);
+ if (virt_is_iommu_sys_enabled(s)) {
+ create_fdt_iommu_sys(s, irq_mmio_phandle, msi_pcie_phandle,
+ &iommu_sys_phandle);
+ }
+ create_fdt_pcie(s, irq_pcie_phandle, msi_pcie_phandle,
+ iommu_sys_phandle);
- create_fdt_reset(s, virt_memmap, &phandle);
+ create_fdt_reset(s, &phandle);
- create_fdt_uart(s, virt_memmap, irq_mmio_phandle);
+ create_fdt_uart(s, irq_mmio_phandle);
- create_fdt_rtc(s, virt_memmap, irq_mmio_phandle);
+ create_fdt_rtc(s, irq_mmio_phandle);
}
-static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap)
+static void create_fdt(RISCVVirtState *s)
{
MachineState *ms = MACHINE(s);
uint8_t rng_seed[32];
@@ -1080,7 +1182,8 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap)
* The "/soc/pci@..." node is needed for PCIE hotplugs
* that might happen before finalize_fdt().
*/
- name = g_strdup_printf("/soc/pci@%lx", (long) memmap[VIRT_PCIE_ECAM].base);
+ name = g_strdup_printf("/soc/pci@%"HWADDR_PRIx,
+ s->memmap[VIRT_PCIE_ECAM].base);
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_add_subnode(ms->fdt, "/chosen");
@@ -1090,8 +1193,10 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap)
qemu_fdt_setprop(ms->fdt, "/chosen", "rng-seed",
rng_seed, sizeof(rng_seed));
- create_fdt_flash(s, memmap);
- create_fdt_fw_cfg(s, memmap);
+ qemu_fdt_add_subnode(ms->fdt, "/aliases");
+
+ create_fdt_flash(s);
+ create_fdt_fw_cfg(s);
create_fdt_pmu(s);
}
@@ -1116,23 +1221,21 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
dev = qdev_new(TYPE_GPEX_HOST);
/* Set GPEX object properties for the virt machine */
- object_property_set_uint(OBJECT(GPEX_HOST(dev)), PCI_HOST_ECAM_BASE,
+ object_property_set_uint(OBJECT(dev), PCI_HOST_ECAM_BASE,
ecam_base, NULL);
- object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_ECAM_SIZE,
+ object_property_set_int(OBJECT(dev), PCI_HOST_ECAM_SIZE,
ecam_size, NULL);
- object_property_set_uint(OBJECT(GPEX_HOST(dev)),
- PCI_HOST_BELOW_4G_MMIO_BASE,
+ object_property_set_uint(OBJECT(dev), PCI_HOST_BELOW_4G_MMIO_BASE,
mmio_base, NULL);
- object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_BELOW_4G_MMIO_SIZE,
+ object_property_set_int(OBJECT(dev), PCI_HOST_BELOW_4G_MMIO_SIZE,
mmio_size, NULL);
- object_property_set_uint(OBJECT(GPEX_HOST(dev)),
- PCI_HOST_ABOVE_4G_MMIO_BASE,
+ object_property_set_uint(OBJECT(dev), PCI_HOST_ABOVE_4G_MMIO_BASE,
high_mmio_base, NULL);
- object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_ABOVE_4G_MMIO_SIZE,
+ object_property_set_int(OBJECT(dev), PCI_HOST_ABOVE_4G_MMIO_SIZE,
high_mmio_size, NULL);
- object_property_set_uint(OBJECT(GPEX_HOST(dev)), PCI_HOST_PIO_BASE,
+ object_property_set_uint(OBJECT(dev), PCI_HOST_PIO_BASE,
pio_base, NULL);
- object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_PIO_SIZE,
+ object_property_set_int(OBJECT(dev), PCI_HOST_PIO_SIZE,
pio_size, NULL);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
@@ -1158,20 +1261,19 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, pio_base);
- for (i = 0; i < GPEX_NUM_IRQS; i++) {
+ for (i = 0; i < PCI_NUM_PINS; i++) {
irq = qdev_get_gpio_in(irqchip, PCIE_IRQ + i);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq);
gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ + i);
}
- GPEX_HOST(dev)->gpex_cfg.bus = PCI_HOST_BRIDGE(GPEX_HOST(dev))->bus;
+ GPEX_HOST(dev)->gpex_cfg.bus = PCI_HOST_BRIDGE(dev)->bus;
return dev;
}
-static FWCfgState *create_fw_cfg(const MachineState *ms)
+static FWCfgState *create_fw_cfg(const MachineState *ms, hwaddr base)
{
- hwaddr base = virt_memmap[VIRT_FW_CFG].base;
FWCfgState *fw_cfg;
fw_cfg = fw_cfg_init_mem_wide(base + 8, base, 8, base + 16,
@@ -1184,27 +1286,22 @@ static FWCfgState *create_fw_cfg(const MachineState *ms)
static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket,
int base_hartid, int hart_count)
{
- DeviceState *ret;
g_autofree char *plic_hart_config = NULL;
/* Per-socket PLIC hart topology configuration string */
plic_hart_config = riscv_plic_hart_config_string(hart_count);
/* Per-socket PLIC */
- ret = sifive_plic_create(
- memmap[VIRT_PLIC].base + socket * memmap[VIRT_PLIC].size,
- plic_hart_config, hart_count, base_hartid,
- VIRT_IRQCHIP_NUM_SOURCES,
- ((1U << VIRT_IRQCHIP_NUM_PRIO_BITS) - 1),
- VIRT_PLIC_PRIORITY_BASE,
- VIRT_PLIC_PENDING_BASE,
- VIRT_PLIC_ENABLE_BASE,
- VIRT_PLIC_ENABLE_STRIDE,
- VIRT_PLIC_CONTEXT_BASE,
- VIRT_PLIC_CONTEXT_STRIDE,
- memmap[VIRT_PLIC].size);
-
- return ret;
+ return sifive_plic_create(
+ memmap[VIRT_PLIC].base + socket * memmap[VIRT_PLIC].size,
+ plic_hart_config, hart_count, base_hartid,
+ VIRT_IRQCHIP_NUM_SOURCES,
+ ((1U << VIRT_IRQCHIP_NUM_PRIO_BITS) - 1),
+ VIRT_PLIC_PRIORITY_BASE, VIRT_PLIC_PENDING_BASE,
+ VIRT_PLIC_ENABLE_BASE, VIRT_PLIC_ENABLE_STRIDE,
+ VIRT_PLIC_CONTEXT_BASE,
+ VIRT_PLIC_CONTEXT_STRIDE,
+ memmap[VIRT_PLIC].size);
}
static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests,
@@ -1212,7 +1309,7 @@ static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests,
int base_hartid, int hart_count)
{
int i;
- hwaddr addr;
+ hwaddr addr = 0;
uint32_t guest_bits;
DeviceState *aplic_s = NULL;
DeviceState *aplic_m = NULL;
@@ -1262,6 +1359,10 @@ static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests,
VIRT_IRQCHIP_NUM_PRIO_BITS,
msimode, false, aplic_m);
+ if (kvm_enabled() && msimode) {
+ riscv_aplic_set_kvm_msicfgaddr(RISCV_APLIC(aplic_s), addr);
+ }
+
return kvm_enabled() ? aplic_s : aplic_m;
}
@@ -1269,14 +1370,13 @@ static void create_platform_bus(RISCVVirtState *s, DeviceState *irqchip)
{
DeviceState *dev;
SysBusDevice *sysbus;
- const MemMapEntry *memmap = virt_memmap;
int i;
MemoryRegion *sysmem = get_system_memory();
dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE);
dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE);
qdev_prop_set_uint32(dev, "num_irqs", VIRT_PLATFORM_BUS_NUM_IRQS);
- qdev_prop_set_uint32(dev, "mmio_size", memmap[VIRT_PLATFORM_BUS].size);
+ qdev_prop_set_uint32(dev, "mmio_size", s->memmap[VIRT_PLATFORM_BUS].size);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
s->platform_bus_dev = dev;
@@ -1287,7 +1387,7 @@ static void create_platform_bus(RISCVVirtState *s, DeviceState *irqchip)
}
memory_region_add_subregion(sysmem,
- memmap[VIRT_PLATFORM_BUS].base,
+ s->memmap[VIRT_PLATFORM_BUS].base,
sysbus_mmio_get_region(sysbus, 0));
}
@@ -1334,14 +1434,14 @@ static void virt_machine_done(Notifier *notifier, void *data)
{
RISCVVirtState *s = container_of(notifier, RISCVVirtState,
machine_done);
- const MemMapEntry *memmap = virt_memmap;
MachineState *machine = MACHINE(s);
- target_ulong start_addr = memmap[VIRT_DRAM].base;
+ hwaddr start_addr = s->memmap[VIRT_DRAM].base;
target_ulong firmware_end_addr, kernel_start_addr;
const char *firmware_name = riscv_default_firmware_name(&s->soc[0]);
uint64_t fdt_load_addr;
uint64_t kernel_entry = 0;
BlockBackend *pflash_blk0;
+ RISCVBootInfo boot_info;
/*
* An user provided dtb must include everything, including
@@ -1368,7 +1468,7 @@ static void virt_machine_done(Notifier *notifier, void *data)
}
firmware_end_addr = riscv_find_and_load_firmware(machine, firmware_name,
- start_addr, NULL);
+ &start_addr, NULL);
pflash_blk0 = pflash_cfi01_get_blk(s->flash[0]);
if (pflash_blk0) {
@@ -1379,34 +1479,36 @@ static void virt_machine_done(Notifier *notifier, void *data)
* let's overwrite the address we jump to after reset to
* the base of the flash.
*/
- start_addr = virt_memmap[VIRT_FLASH].base;
+ start_addr = s->memmap[VIRT_FLASH].base;
} else {
/*
* Pflash was supplied but either KVM guest or bios is not none.
* In this case, base of the flash would contain S-mode payload.
*/
riscv_setup_firmware_boot(machine);
- kernel_entry = virt_memmap[VIRT_FLASH].base;
+ kernel_entry = s->memmap[VIRT_FLASH].base;
}
}
+ riscv_boot_info_init(&boot_info, &s->soc[0]);
+
if (machine->kernel_filename && !kernel_entry) {
- kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
+ kernel_start_addr = riscv_calc_kernel_start_addr(&boot_info,
firmware_end_addr);
-
- kernel_entry = riscv_load_kernel(machine, &s->soc[0],
- kernel_start_addr, true, NULL);
+ riscv_load_kernel(machine, &boot_info, kernel_start_addr,
+ true, NULL);
+ kernel_entry = boot_info.image_low_addr;
}
- fdt_load_addr = riscv_compute_fdt_addr(memmap[VIRT_DRAM].base,
- memmap[VIRT_DRAM].size,
- machine);
+ fdt_load_addr = riscv_compute_fdt_addr(s->memmap[VIRT_DRAM].base,
+ s->memmap[VIRT_DRAM].size,
+ machine, &boot_info);
riscv_load_fdt(fdt_load_addr, machine->fdt);
/* load the reset vector */
riscv_setup_rom_reset_vec(machine, &s->soc[0], start_addr,
- virt_memmap[VIRT_MROM].base,
- virt_memmap[VIRT_MROM].size, kernel_entry,
+ s->memmap[VIRT_MROM].base,
+ s->memmap[VIRT_MROM].size, kernel_entry,
fdt_load_addr);
/*
@@ -1427,7 +1529,6 @@ static void virt_machine_done(Notifier *notifier, void *data)
static void virt_machine_init(MachineState *machine)
{
- const MemMapEntry *memmap = virt_memmap;
RISCVVirtState *s = RISCV_VIRT_MACHINE(machine);
MemoryRegion *system_memory = get_system_memory();
MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
@@ -1435,6 +1536,8 @@ static void virt_machine_init(MachineState *machine)
int i, base_hartid, hart_count;
int socket_count = riscv_socket_count(machine);
+ s->memmap = virt_memmap;
+
/* Check socket count limit */
if (VIRT_SOCKETS_MAX < socket_count) {
error_report("number of sockets/nodes should be less than %d",
@@ -1482,7 +1585,7 @@ static void virt_machine_init(MachineState *machine)
if (virt_aclint_allowed() && s->have_aclint) {
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
/* Per-socket ACLINT MTIMER */
- riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
+ riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
i * RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
base_hartid, hart_count,
@@ -1491,28 +1594,28 @@ static void virt_machine_init(MachineState *machine)
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
} else {
/* Per-socket ACLINT MSWI, MTIMER, and SSWI */
- riscv_aclint_swi_create(memmap[VIRT_CLINT].base +
- i * memmap[VIRT_CLINT].size,
+ riscv_aclint_swi_create(s->memmap[VIRT_CLINT].base +
+ i * s->memmap[VIRT_CLINT].size,
base_hartid, hart_count, false);
- riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
- i * memmap[VIRT_CLINT].size +
+ riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
+ i * s->memmap[VIRT_CLINT].size +
RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP,
RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
- riscv_aclint_swi_create(memmap[VIRT_ACLINT_SSWI].base +
- i * memmap[VIRT_ACLINT_SSWI].size,
+ riscv_aclint_swi_create(s->memmap[VIRT_ACLINT_SSWI].base +
+ i * s->memmap[VIRT_ACLINT_SSWI].size,
base_hartid, hart_count, true);
}
} else if (tcg_enabled()) {
/* Per-socket SiFive CLINT */
riscv_aclint_swi_create(
- memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
+ s->memmap[VIRT_CLINT].base + i * s->memmap[VIRT_CLINT].size,
base_hartid, hart_count, false);
- riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
- i * memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE,
+ riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
+ i * s->memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
@@ -1520,11 +1623,11 @@ static void virt_machine_init(MachineState *machine)
/* Per-socket interrupt controller */
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
- s->irqchip[i] = virt_create_plic(memmap, i,
+ s->irqchip[i] = virt_create_plic(s->memmap, i,
base_hartid, hart_count);
} else {
s->irqchip[i] = virt_create_aia(s->aia_type, s->aia_guests,
- memmap, i, base_hartid,
+ s->memmap, i, base_hartid,
hart_count);
}
@@ -1543,11 +1646,11 @@ static void virt_machine_init(MachineState *machine)
}
}
- if (kvm_enabled() && virt_use_kvm_aia(s)) {
+ if (kvm_enabled() && virt_use_kvm_aia_aplic_imsic(s->aia_type)) {
kvm_riscv_aia_create(machine, IMSIC_MMIO_GROUP_MIN_SHIFT,
VIRT_IRQCHIP_NUM_SOURCES, VIRT_IRQCHIP_NUM_MSIS,
- memmap[VIRT_APLIC_S].base,
- memmap[VIRT_IMSIC_S].base,
+ s->memmap[VIRT_APLIC_S].base,
+ s->memmap[VIRT_IMSIC_S].base,
s->aia_guests);
}
@@ -1563,37 +1666,36 @@ static void virt_machine_init(MachineState *machine)
virt_high_pcie_memmap.size = VIRT32_HIGH_PCIE_MMIO_SIZE;
} else {
virt_high_pcie_memmap.size = VIRT64_HIGH_PCIE_MMIO_SIZE;
- virt_high_pcie_memmap.base = memmap[VIRT_DRAM].base + machine->ram_size;
+ virt_high_pcie_memmap.base = s->memmap[VIRT_DRAM].base +
+ machine->ram_size;
virt_high_pcie_memmap.base =
ROUND_UP(virt_high_pcie_memmap.base, virt_high_pcie_memmap.size);
}
- s->memmap = virt_memmap;
-
/* register system main memory (actual RAM) */
- memory_region_add_subregion(system_memory, memmap[VIRT_DRAM].base,
- machine->ram);
+ memory_region_add_subregion(system_memory, s->memmap[VIRT_DRAM].base,
+ machine->ram);
/* boot rom */
memory_region_init_rom(mask_rom, NULL, "riscv_virt_board.mrom",
- memmap[VIRT_MROM].size, &error_fatal);
- memory_region_add_subregion(system_memory, memmap[VIRT_MROM].base,
+ s->memmap[VIRT_MROM].size, &error_fatal);
+ memory_region_add_subregion(system_memory, s->memmap[VIRT_MROM].base,
mask_rom);
/*
* Init fw_cfg. Must be done before riscv_load_fdt, otherwise the
* device tree cannot be altered and we get FDT_ERR_NOSPACE.
*/
- s->fw_cfg = create_fw_cfg(machine);
+ s->fw_cfg = create_fw_cfg(machine, s->memmap[VIRT_FW_CFG].base);
rom_set_fw(s->fw_cfg);
/* SiFive Test MMIO device */
- sifive_test_create(memmap[VIRT_TEST].base);
+ sifive_test_create(s->memmap[VIRT_TEST].base);
/* VirtIO MMIO devices */
for (i = 0; i < VIRTIO_COUNT; i++) {
sysbus_create_simple("virtio-mmio",
- memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
+ s->memmap[VIRT_VIRTIO].base + i * s->memmap[VIRT_VIRTIO].size,
qdev_get_gpio_in(virtio_irqchip, VIRTIO_IRQ + i));
}
@@ -1601,11 +1703,11 @@ static void virt_machine_init(MachineState *machine)
create_platform_bus(s, mmio_irqchip);
- serial_mm_init(system_memory, memmap[VIRT_UART0].base,
+ serial_mm_init(system_memory, s->memmap[VIRT_UART0].base,
0, qdev_get_gpio_in(mmio_irqchip, UART0_IRQ), 399193,
serial_hd(0), DEVICE_LITTLE_ENDIAN);
- sysbus_create_simple("goldfish_rtc", memmap[VIRT_RTC].base,
+ sysbus_create_simple("goldfish_rtc", s->memmap[VIRT_RTC].base,
qdev_get_gpio_in(mmio_irqchip, RTC_IRQ));
for (i = 0; i < ARRAY_SIZE(s->flash); i++) {
@@ -1623,7 +1725,23 @@ static void virt_machine_init(MachineState *machine)
exit(1);
}
} else {
- create_fdt(s, memmap);
+ create_fdt(s);
+ }
+
+ if (virt_is_iommu_sys_enabled(s)) {
+ DeviceState *iommu_sys = qdev_new(TYPE_RISCV_IOMMU_SYS);
+
+ object_property_set_uint(OBJECT(iommu_sys), "addr",
+ s->memmap[VIRT_IOMMU_SYS].base,
+ &error_fatal);
+ object_property_set_uint(OBJECT(iommu_sys), "base-irq",
+ IOMMU_SYS_IRQ,
+ &error_fatal);
+ object_property_set_link(OBJECT(iommu_sys), "irqchip",
+ OBJECT(mmio_irqchip),
+ &error_fatal);
+
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(iommu_sys), &error_fatal);
}
s->machine_done.notify = virt_machine_done;
@@ -1639,6 +1757,7 @@ static void virt_machine_instance_init(Object *obj)
s->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6);
s->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8);
s->acpi = ON_OFF_AUTO_AUTO;
+ s->iommu_sys = ON_OFF_AUTO_AUTO;
}
static char *virt_get_aia_guests(Object *obj, Error **errp)
@@ -1711,6 +1830,28 @@ static void virt_set_aclint(Object *obj, bool value, Error **errp)
s->have_aclint = value;
}
+bool virt_is_iommu_sys_enabled(RISCVVirtState *s)
+{
+ return s->iommu_sys == ON_OFF_AUTO_ON;
+}
+
+static void virt_get_iommu_sys(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
+ OnOffAuto iommu_sys = s->iommu_sys;
+
+ visit_type_OnOffAuto(v, name, &iommu_sys, errp);
+}
+
+static void virt_set_iommu_sys(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
+
+ visit_type_OnOffAuto(v, name, &s->iommu_sys, errp);
+}
+
bool virt_is_acpi_enabled(RISCVVirtState *s)
{
return s->acpi != ON_OFF_AUTO_OFF;
@@ -1737,11 +1878,15 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine,
DeviceState *dev)
{
MachineClass *mc = MACHINE_GET_CLASS(machine);
+ RISCVVirtState *s = RISCV_VIRT_MACHINE(machine);
if (device_is_dynamic_sysbus(mc, dev) ||
- object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
+ object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI) ||
+ object_dynamic_cast(OBJECT(dev), TYPE_RISCV_IOMMU_PCI)) {
+ s->iommu_sys = ON_OFF_AUTO_OFF;
return HOTPLUG_HANDLER(machine);
}
+
return NULL;
}
@@ -1762,9 +1907,14 @@ static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev,
if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
create_fdt_virtio_iommu(s, pci_get_bdf(PCI_DEVICE(dev)));
}
+
+ if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_IOMMU_PCI)) {
+ create_fdt_iommu(s, pci_get_bdf(PCI_DEVICE(dev)));
+ s->iommu_sys = ON_OFF_AUTO_OFF;
+ }
}
-static void virt_machine_class_init(ObjectClass *oc, void *data)
+static void virt_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
@@ -1789,6 +1939,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
hc->plug = virt_machine_device_plug_cb;
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE);
+ machine_class_allow_dynamic_sysbus_dev(mc, TYPE_UEFI_VARS_SYSBUS);
#ifdef CONFIG_TPM
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS);
#endif
@@ -1823,6 +1974,12 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
NULL, NULL);
object_class_property_set_description(oc, "acpi",
"Enable ACPI");
+
+ object_class_property_add(oc, "iommu-sys", "OnOffAuto",
+ virt_get_iommu_sys, virt_set_iommu_sys,
+ NULL, NULL);
+ object_class_property_set_description(oc, "iommu-sys",
+ "Enable IOMMU platform device");
}
static const TypeInfo virt_machine_typeinfo = {
@@ -1831,7 +1988,7 @@ static const TypeInfo virt_machine_typeinfo = {
.class_init = virt_machine_class_init,
.instance_init = virt_machine_instance_init,
.instance_size = sizeof(RISCVVirtState),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
},
diff --git a/hw/rtc/Kconfig b/hw/rtc/Kconfig
index d0d8dda..315b0e4 100644
--- a/hw/rtc/Kconfig
+++ b/hw/rtc/Kconfig
@@ -14,10 +14,6 @@ config M48T59
config PL031
bool
-config TWL92230
- bool
- depends on I2C
-
config MC146818RTC
depends on ISA_BUS
bool
@@ -30,3 +26,8 @@ config GOLDFISH_RTC
config LS7A_RTC
bool
+
+config RS5C372_RTC
+ bool
+ depends on I2C
+ default y if I2C_DEVICES
diff --git a/hw/rtc/allwinner-rtc.c b/hw/rtc/allwinner-rtc.c
index 2ac50b3..a747bff 100644
--- a/hw/rtc/allwinner-rtc.c
+++ b/hw/rtc/allwinner-rtc.c
@@ -25,7 +25,7 @@
#include "qemu/module.h"
#include "hw/qdev-properties.h"
#include "hw/rtc/allwinner-rtc.h"
-#include "sysemu/rtc.h"
+#include "system/rtc.h"
#include "trace.h"
/* RTC registers */
@@ -259,7 +259,7 @@ static void allwinner_rtc_write(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_rtc_ops = {
.read = allwinner_rtc_read,
.write = allwinner_rtc_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -311,16 +311,15 @@ static const VMStateDescription allwinner_rtc_vmstate = {
}
};
-static Property allwinner_rtc_properties[] = {
+static const Property allwinner_rtc_properties[] = {
DEFINE_PROP_INT32("base-year", AwRtcState, base_year, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void allwinner_rtc_class_init(ObjectClass *klass, void *data)
+static void allwinner_rtc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = allwinner_rtc_reset;
+ device_class_set_legacy_reset(dc, allwinner_rtc_reset);
dc->vmsd = &allwinner_rtc_vmstate;
device_class_set_props(dc, allwinner_rtc_properties);
}
@@ -331,7 +330,7 @@ static void allwinner_rtc_sun4i_init(Object *obj)
s->base_year = 2010;
}
-static void allwinner_rtc_sun4i_class_init(ObjectClass *klass, void *data)
+static void allwinner_rtc_sun4i_class_init(ObjectClass *klass, const void *data)
{
AwRtcClass *arc = AW_RTC_CLASS(klass);
@@ -347,7 +346,7 @@ static void allwinner_rtc_sun6i_init(Object *obj)
s->base_year = 1970;
}
-static void allwinner_rtc_sun6i_class_init(ObjectClass *klass, void *data)
+static void allwinner_rtc_sun6i_class_init(ObjectClass *klass, const void *data)
{
AwRtcClass *arc = AW_RTC_CLASS(klass);
@@ -363,7 +362,7 @@ static void allwinner_rtc_sun7i_init(Object *obj)
s->base_year = 1970;
}
-static void allwinner_rtc_sun7i_class_init(ObjectClass *klass, void *data)
+static void allwinner_rtc_sun7i_class_init(ObjectClass *klass, const void *data)
{
AwRtcClass *arc = AW_RTC_CLASS(klass);
allwinner_rtc_sun4i_class_init(klass, arc);
diff --git a/hw/rtc/aspeed_rtc.c b/hw/rtc/aspeed_rtc.c
index 589d9a5..c4feea2 100644
--- a/hw/rtc/aspeed_rtc.c
+++ b/hw/rtc/aspeed_rtc.c
@@ -11,7 +11,7 @@
#include "migration/vmstate.h"
#include "qemu/log.h"
#include "qemu/timer.h"
-#include "sysemu/rtc.h"
+#include "system/rtc.h"
#include "trace.h"
@@ -156,13 +156,13 @@ static void aspeed_rtc_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->iomem);
}
-static void aspeed_rtc_class_init(ObjectClass *klass, void *data)
+static void aspeed_rtc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aspeed_rtc_realize;
dc->vmsd = &vmstate_aspeed_rtc;
- dc->reset = aspeed_rtc_reset;
+ device_class_set_legacy_reset(dc, aspeed_rtc_reset);
}
static const TypeInfo aspeed_rtc_info = {
diff --git a/hw/rtc/ds1338.c b/hw/rtc/ds1338.c
index e479661..5f1ee2e 100644
--- a/hw/rtc/ds1338.c
+++ b/hw/rtc/ds1338.c
@@ -14,9 +14,9 @@
#include "hw/i2c/i2c.h"
#include "migration/vmstate.h"
#include "qemu/bcd.h"
-#include "qemu/module.h"
#include "qom/object.h"
-#include "sysemu/rtc.h"
+#include "system/rtc.h"
+#include "trace.h"
/* Size of NVRAM including both the user-accessible area and the
* secondary register area.
@@ -126,6 +126,9 @@ static uint8_t ds1338_recv(I2CSlave *i2c)
uint8_t res;
res = s->nvram[s->ptr];
+
+ trace_ds1338_recv(s->ptr, res);
+
inc_regptr(s);
return res;
}
@@ -134,6 +137,8 @@ static int ds1338_send(I2CSlave *i2c, uint8_t data)
{
DS1338State *s = DS1338(i2c);
+ trace_ds1338_send(s->ptr, data);
+
if (s->addr_byte) {
s->ptr = data & (NVRAM_SIZE - 1);
s->addr_byte = false;
@@ -215,7 +220,7 @@ static void ds1338_reset(DeviceState *dev)
s->addr_byte = false;
}
-static void ds1338_class_init(ObjectClass *klass, void *data)
+static void ds1338_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
@@ -223,20 +228,17 @@ static void ds1338_class_init(ObjectClass *klass, void *data)
k->event = ds1338_event;
k->recv = ds1338_recv;
k->send = ds1338_send;
- dc->reset = ds1338_reset;
+ device_class_set_legacy_reset(dc, ds1338_reset);
dc->vmsd = &vmstate_ds1338;
}
-static const TypeInfo ds1338_info = {
- .name = TYPE_DS1338,
- .parent = TYPE_I2C_SLAVE,
- .instance_size = sizeof(DS1338State),
- .class_init = ds1338_class_init,
+static const TypeInfo ds1338_types[] = {
+ {
+ .name = TYPE_DS1338,
+ .parent = TYPE_I2C_SLAVE,
+ .instance_size = sizeof(DS1338State),
+ .class_init = ds1338_class_init,
+ },
};
-static void ds1338_register_types(void)
-{
- type_register_static(&ds1338_info);
-}
-
-type_init(ds1338_register_types)
+DEFINE_TYPES(ds1338_types)
diff --git a/hw/rtc/exynos4210_rtc.c b/hw/rtc/exynos4210_rtc.c
index 319371f..624b4f6 100644
--- a/hw/rtc/exynos4210_rtc.c
+++ b/hw/rtc/exynos4210_rtc.c
@@ -38,7 +38,7 @@
#include "hw/arm/exynos4210.h"
#include "qom/object.h"
-#include "sysemu/rtc.h"
+#include "system/rtc.h"
#define DEBUG_RTC 0
@@ -592,11 +592,11 @@ static void exynos4210_rtc_finalize(Object *obj)
ptimer_free(s->ptimer_1Hz);
}
-static void exynos4210_rtc_class_init(ObjectClass *klass, void *data)
+static void exynos4210_rtc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = exynos4210_rtc_reset;
+ device_class_set_legacy_reset(dc, exynos4210_rtc_reset);
dc->vmsd = &vmstate_exynos4210_rtc_state;
}
diff --git a/hw/rtc/goldfish_rtc.c b/hw/rtc/goldfish_rtc.c
index 01acf30..78df031 100644
--- a/hw/rtc/goldfish_rtc.c
+++ b/hw/rtc/goldfish_rtc.c
@@ -27,8 +27,8 @@
#include "hw/sysbus.h"
#include "qemu/bitops.h"
#include "qemu/timer.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/rtc.h"
+#include "system/system.h"
+#include "system/rtc.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
@@ -178,38 +178,21 @@ static void goldfish_rtc_write(void *opaque, hwaddr offset,
trace_goldfish_rtc_write(offset, value);
}
-static int goldfish_rtc_pre_save(void *opaque)
-{
- uint64_t delta;
- GoldfishRTCState *s = opaque;
-
- /*
- * We want to migrate this offset, which sounds straightforward.
- * Unfortunately, we cannot directly pass tick_offset because
- * rtc_clock on destination Host might not be same source Host.
- *
- * To tackle, this we pass tick_offset relative to vm_clock from
- * source Host and make it relative to rtc_clock at destination Host.
- */
- delta = qemu_clock_get_ns(rtc_clock) -
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- s->tick_offset_vmstate = s->tick_offset + delta;
-
- return 0;
-}
-
static int goldfish_rtc_post_load(void *opaque, int version_id)
{
- uint64_t delta;
GoldfishRTCState *s = opaque;
- /*
- * We extract tick_offset from tick_offset_vmstate by doing
- * reverse math compared to pre_save() function.
- */
- delta = qemu_clock_get_ns(rtc_clock) -
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- s->tick_offset = s->tick_offset_vmstate - delta;
+ if (version_id < 3) {
+ /*
+ * Previous versions didn't migrate tick_offset directly. Instead, they
+ * migrated tick_offset_vmstate, which is a recalculation based on
+ * QEMU_CLOCK_VIRTUAL. We use tick_offset_vmstate when migrating from
+ * older versions.
+ */
+ uint64_t delta = qemu_clock_get_ns(rtc_clock) -
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ s->tick_offset = s->tick_offset_vmstate - delta;
+ }
goldfish_rtc_set_alarm(s);
@@ -239,8 +222,7 @@ static const MemoryRegionOps goldfish_rtc_ops[2] = {
static const VMStateDescription goldfish_rtc_vmstate = {
.name = TYPE_GOLDFISH_RTC,
- .version_id = 2,
- .pre_save = goldfish_rtc_pre_save,
+ .version_id = 3,
.post_load = goldfish_rtc_post_load,
.fields = (const VMStateField[]) {
VMSTATE_UINT64(tick_offset_vmstate, GoldfishRTCState),
@@ -249,6 +231,7 @@ static const VMStateDescription goldfish_rtc_vmstate = {
VMSTATE_UINT32(irq_pending, GoldfishRTCState),
VMSTATE_UINT32(irq_enabled, GoldfishRTCState),
VMSTATE_UINT32(time_high, GoldfishRTCState),
+ VMSTATE_UINT64_V(tick_offset, GoldfishRTCState, 3),
VMSTATE_END_OF_LIST()
}
};
@@ -256,15 +239,8 @@ static const VMStateDescription goldfish_rtc_vmstate = {
static void goldfish_rtc_reset(DeviceState *dev)
{
GoldfishRTCState *s = GOLDFISH_RTC(dev);
- struct tm tm;
timer_del(s->timer);
-
- qemu_get_timedate(&tm, 0);
- s->tick_offset = mktimegm(&tm);
- s->tick_offset *= NANOSECONDS_PER_SECOND;
- s->tick_offset -= qemu_clock_get_ns(rtc_clock);
- s->tick_offset_vmstate = 0;
s->alarm_next = 0;
s->alarm_running = 0;
s->irq_pending = 0;
@@ -275,6 +251,7 @@ static void goldfish_rtc_realize(DeviceState *d, Error **errp)
{
SysBusDevice *dev = SYS_BUS_DEVICE(d);
GoldfishRTCState *s = GOLDFISH_RTC(d);
+ struct tm tm;
memory_region_init_io(&s->iomem, OBJECT(s),
&goldfish_rtc_ops[s->big_endian], s,
@@ -284,21 +261,25 @@ static void goldfish_rtc_realize(DeviceState *d, Error **errp)
sysbus_init_irq(dev, &s->irq);
s->timer = timer_new_ns(rtc_clock, goldfish_rtc_interrupt, s);
+
+ qemu_get_timedate(&tm, 0);
+ s->tick_offset = mktimegm(&tm);
+ s->tick_offset *= NANOSECONDS_PER_SECOND;
+ s->tick_offset -= qemu_clock_get_ns(rtc_clock);
}
-static Property goldfish_rtc_properties[] = {
+static const Property goldfish_rtc_properties[] = {
DEFINE_PROP_BOOL("big-endian", GoldfishRTCState, big_endian,
false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void goldfish_rtc_class_init(ObjectClass *klass, void *data)
+static void goldfish_rtc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, goldfish_rtc_properties);
dc->realize = goldfish_rtc_realize;
- dc->reset = goldfish_rtc_reset;
+ device_class_set_legacy_reset(dc, goldfish_rtc_reset);
dc->vmsd = &goldfish_rtc_vmstate;
}
diff --git a/hw/rtc/ls7a_rtc.c b/hw/rtc/ls7a_rtc.c
index 052201c..10097b2 100644
--- a/hw/rtc/ls7a_rtc.c
+++ b/hw/rtc/ls7a_rtc.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Loongarch LS7A Real Time Clock emulation
+ * LoongArch LS7A Real Time Clock emulation
*
* Copyright (C) 2021 Loongson Technology Corporation Limited
*/
@@ -10,12 +10,12 @@
#include "hw/irq.h"
#include "hw/register.h"
#include "qemu/timer.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
#include "migration/vmstate.h"
#include "hw/misc/unimp.h"
-#include "sysemu/rtc.h"
+#include "system/rtc.h"
#include "hw/registerfields.h"
#define SYS_TOYTRIM 0x20
@@ -464,12 +464,12 @@ static const VMStateDescription vmstate_ls7a_rtc = {
}
};
-static void ls7a_rtc_class_init(ObjectClass *klass, void *data)
+static void ls7a_rtc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_ls7a_rtc;
dc->realize = ls7a_rtc_realize;
- dc->reset = ls7a_rtc_reset;
+ device_class_set_legacy_reset(dc, ls7a_rtc_reset);
dc->desc = "ls7a rtc";
}
diff --git a/hw/rtc/m41t80.c b/hw/rtc/m41t80.c
index e045c86..c631ec3 100644
--- a/hw/rtc/m41t80.c
+++ b/hw/rtc/m41t80.c
@@ -14,7 +14,7 @@
#include "qemu/bcd.h"
#include "hw/i2c/i2c.h"
#include "qom/object.h"
-#include "sysemu/rtc.h"
+#include "system/rtc.h"
#define TYPE_M41T80 "m41t80"
OBJECT_DECLARE_SIMPLE_TYPE(M41t80State, M41T80)
@@ -94,7 +94,7 @@ static int m41t80_event(I2CSlave *i2c, enum i2c_event event)
return 0;
}
-static void m41t80_class_init(ObjectClass *klass, void *data)
+static void m41t80_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass);
diff --git a/hw/rtc/m48t59-isa.c b/hw/rtc/m48t59-isa.c
index 5bb46f2..9e2f656 100644
--- a/hw/rtc/m48t59-isa.c
+++ b/hw/rtc/m48t59-isa.c
@@ -77,11 +77,10 @@ static void m48txx_isa_toggle_lock(Nvram *obj, int lock)
m48t59_toggle_lock(&d->state, lock);
}
-static Property m48t59_isa_properties[] = {
+static const Property m48t59_isa_properties[] = {
DEFINE_PROP_INT32("base-year", M48txxISAState, state.base_year, 0),
DEFINE_PROP_UINT32("iobase", M48txxISAState, io_base, 0x74),
DEFINE_PROP_UINT8("irq", M48txxISAState, isairq, 8),
- DEFINE_PROP_END_OF_LIST(),
};
static void m48t59_reset_isa(DeviceState *d)
@@ -114,23 +113,23 @@ static void m48t59_isa_realize(DeviceState *dev, Error **errp)
}
}
-static void m48txx_isa_class_init(ObjectClass *klass, void *data)
+static void m48txx_isa_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
NvramClass *nc = NVRAM_CLASS(klass);
dc->realize = m48t59_isa_realize;
- dc->reset = m48t59_reset_isa;
+ device_class_set_legacy_reset(dc, m48t59_reset_isa);
device_class_set_props(dc, m48t59_isa_properties);
nc->read = m48txx_isa_read;
nc->write = m48txx_isa_write;
nc->toggle_lock = m48txx_isa_toggle_lock;
}
-static void m48txx_isa_concrete_class_init(ObjectClass *klass, void *data)
+static void m48txx_isa_concrete_class_init(ObjectClass *klass, const void *data)
{
M48txxISADeviceClass *u = M48TXX_ISA_CLASS(klass);
- M48txxInfo *info = data;
+ const M48txxInfo *info = data;
u->info = *info;
}
@@ -141,7 +140,7 @@ static const TypeInfo m48txx_isa_type_info = {
.instance_size = sizeof(M48txxISAState),
.abstract = true,
.class_init = m48txx_isa_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_NVRAM },
{ }
}
@@ -161,7 +160,7 @@ static void m48t59_isa_register_types(void)
for (i = 0; i < ARRAY_SIZE(m48txx_isa_info); i++) {
isa_type_info.name = m48txx_isa_info[i].bus_name;
isa_type_info.class_data = &m48txx_isa_info[i];
- type_register(&isa_type_info);
+ type_register_static(&isa_type_info);
}
}
diff --git a/hw/rtc/m48t59.c b/hw/rtc/m48t59.c
index 1585a2d..68be2da 100644
--- a/hw/rtc/m48t59.c
+++ b/hw/rtc/m48t59.c
@@ -28,15 +28,15 @@
#include "hw/qdev-properties.h"
#include "hw/rtc/m48t59.h"
#include "qemu/timer.h"
-#include "sysemu/runstate.h"
-#include "sysemu/rtc.h"
-#include "sysemu/sysemu.h"
+#include "system/runstate.h"
+#include "system/rtc.h"
+#include "system/system.h"
#include "hw/sysbus.h"
#include "qapi/error.h"
#include "qemu/bcd.h"
#include "qemu/module.h"
#include "trace.h"
-#include "sysemu/watchdog.h"
+#include "system/watchdog.h"
#include "m48t59-internal.h"
#include "migration/vmstate.h"
@@ -618,18 +618,17 @@ static void m48txx_sysbus_toggle_lock(Nvram *obj, int lock)
m48t59_toggle_lock(&d->state, lock);
}
-static Property m48t59_sysbus_properties[] = {
+static const Property m48t59_sysbus_properties[] = {
DEFINE_PROP_INT32("base-year", M48txxSysBusState, state.base_year, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void m48txx_sysbus_class_init(ObjectClass *klass, void *data)
+static void m48txx_sysbus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
NvramClass *nc = NVRAM_CLASS(klass);
dc->realize = m48t59_realize;
- dc->reset = m48t59_reset_sysbus;
+ device_class_set_legacy_reset(dc, m48t59_reset_sysbus);
device_class_set_props(dc, m48t59_sysbus_properties);
dc->vmsd = &vmstate_m48t59;
nc->read = m48txx_sysbus_read;
@@ -637,10 +636,11 @@ static void m48txx_sysbus_class_init(ObjectClass *klass, void *data)
nc->toggle_lock = m48txx_sysbus_toggle_lock;
}
-static void m48txx_sysbus_concrete_class_init(ObjectClass *klass, void *data)
+static void m48txx_sysbus_concrete_class_init(ObjectClass *klass,
+ const void *data)
{
M48txxSysBusDeviceClass *u = M48TXX_SYS_BUS_CLASS(klass);
- M48txxInfo *info = data;
+ const M48txxInfo *info = data;
u->info = *info;
}
@@ -658,7 +658,7 @@ static const TypeInfo m48txx_sysbus_type_info = {
.instance_init = m48t59_init1,
.abstract = true,
.class_init = m48txx_sysbus_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_NVRAM },
{ }
}
@@ -679,7 +679,7 @@ static void m48t59_register_types(void)
for (i = 0; i < ARRAY_SIZE(m48txx_sysbus_info); i++) {
sysbus_type_info.name = m48txx_sysbus_info[i].bus_name;
sysbus_type_info.class_data = &m48txx_sysbus_info[i];
- type_register(&sysbus_type_info);
+ type_register_static(&sysbus_type_info);
}
}
diff --git a/hw/rtc/mc146818rtc.c b/hw/rtc/mc146818rtc.c
index 8ccee9a..f9f5cf3 100644
--- a/hw/rtc/mc146818rtc.c
+++ b/hw/rtc/mc146818rtc.c
@@ -32,11 +32,11 @@
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
#include "qemu/timer.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/replay.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "sysemu/rtc.h"
+#include "system/system.h"
+#include "system/replay.h"
+#include "system/reset.h"
+#include "system/runstate.h"
+#include "system/rtc.h"
#include "hw/rtc/mc146818rtc.h"
#include "hw/rtc/mc146818rtc_regs.h"
#include "migration/vmstate.h"
@@ -819,7 +819,7 @@ static const VMStateDescription vmstate_rtc_irq_reinject_on_ack_count = {
static const VMStateDescription vmstate_rtc = {
.name = "mc146818rtc",
.version_id = 3,
- .minimum_version_id = 1,
+ .minimum_version_id = 3,
.pre_save = rtc_pre_save,
.post_load = rtc_post_load,
.fields = (const VMStateField[]) {
@@ -829,13 +829,13 @@ static const VMStateDescription vmstate_rtc = {
VMSTATE_TIMER_PTR(periodic_timer, MC146818RtcState),
VMSTATE_INT64(next_periodic_time, MC146818RtcState),
VMSTATE_UNUSED(3*8),
- VMSTATE_UINT32_V(irq_coalesced, MC146818RtcState, 2),
- VMSTATE_UINT32_V(period, MC146818RtcState, 2),
- VMSTATE_UINT64_V(base_rtc, MC146818RtcState, 3),
- VMSTATE_UINT64_V(last_update, MC146818RtcState, 3),
- VMSTATE_INT64_V(offset, MC146818RtcState, 3),
- VMSTATE_TIMER_PTR_V(update_timer, MC146818RtcState, 3),
- VMSTATE_UINT64_V(next_alarm_time, MC146818RtcState, 3),
+ VMSTATE_UINT32(irq_coalesced, MC146818RtcState),
+ VMSTATE_UINT32(period, MC146818RtcState),
+ VMSTATE_UINT64(base_rtc, MC146818RtcState),
+ VMSTATE_UINT64(last_update, MC146818RtcState),
+ VMSTATE_INT64(offset, MC146818RtcState),
+ VMSTATE_TIMER_PTR(update_timer, MC146818RtcState),
+ VMSTATE_UINT64(next_alarm_time, MC146818RtcState),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * const []) {
@@ -929,8 +929,6 @@ static void rtc_realizefn(DeviceState *dev, Error **errp)
memory_region_add_subregion(&s->io, 0, &s->coalesced_io);
memory_region_add_coalescing(&s->coalesced_io, 0, 1);
- qdev_set_legacy_instance_id(dev, s->io_base, 3);
-
object_property_add_tm(OBJECT(s), "date", rtc_get_date);
qdev_init_gpio_out(dev, &s->irq, 1);
@@ -960,13 +958,12 @@ MC146818RtcState *mc146818_rtc_init(ISABus *bus, int base_year,
return s;
}
-static Property mc146818rtc_properties[] = {
+static const Property mc146818rtc_properties[] = {
DEFINE_PROP_INT32("base_year", MC146818RtcState, base_year, 1980),
DEFINE_PROP_UINT16("iobase", MC146818RtcState, io_base, RTC_ISA_BASE),
DEFINE_PROP_UINT8("irq", MC146818RtcState, isairq, RTC_ISA_IRQ),
DEFINE_PROP_LOSTTICKPOLICY("lost_tick_policy", MC146818RtcState,
lost_tick_policy, LOST_TICK_POLICY_DISCARD),
- DEFINE_PROP_END_OF_LIST(),
};
static void rtc_reset_enter(Object *obj, ResetType type)
@@ -1019,7 +1016,7 @@ static void rtc_build_aml(AcpiDevAmlIf *adev, Aml *scope)
aml_append(scope, dev);
}
-static void rtc_class_initfn(ObjectClass *klass, void *data)
+static void rtc_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -1039,7 +1036,7 @@ static const TypeInfo mc146818rtc_info = {
.parent = TYPE_ISA_DEVICE,
.instance_size = sizeof(MC146818RtcState),
.class_init = rtc_class_initfn,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_ACPI_DEV_AML_IF },
{ },
},
diff --git a/hw/rtc/meson.build b/hw/rtc/meson.build
index 3ea2aff..6c87864 100644
--- a/hw/rtc/meson.build
+++ b/hw/rtc/meson.build
@@ -3,7 +3,6 @@ system_ss.add(when: 'CONFIG_DS1338', if_true: files('ds1338.c'))
system_ss.add(when: 'CONFIG_M41T80', if_true: files('m41t80.c'))
system_ss.add(when: 'CONFIG_M48T59', if_true: files('m48t59.c'))
system_ss.add(when: 'CONFIG_PL031', if_true: files('pl031.c'))
-system_ss.add(when: 'CONFIG_TWL92230', if_true: files('twl92230.c'))
system_ss.add(when: ['CONFIG_ISA_BUS', 'CONFIG_M48T59'], if_true: files('m48t59-isa.c'))
system_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-rtc.c'))
@@ -14,3 +13,4 @@ system_ss.add(when: 'CONFIG_GOLDFISH_RTC', if_true: files('goldfish_rtc.c'))
system_ss.add(when: 'CONFIG_LS7A_RTC', if_true: files('ls7a_rtc.c'))
system_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-rtc.c'))
system_ss.add(when: 'CONFIG_MC146818RTC', if_true: files('mc146818rtc.c'))
+system_ss.add(when: 'CONFIG_RS5C372_RTC', if_true: files('rs5c372.c'))
diff --git a/hw/rtc/pl031.c b/hw/rtc/pl031.c
index 563bb4b..e545b9d 100644
--- a/hw/rtc/pl031.c
+++ b/hw/rtc/pl031.c
@@ -18,8 +18,8 @@
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "qemu/timer.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/rtc.h"
+#include "system/system.h"
+#include "system/rtc.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
#include "qemu/module.h"
@@ -319,7 +319,7 @@ static const VMStateDescription vmstate_pl031 = {
}
};
-static Property pl031_properties[] = {
+static const Property pl031_properties[] = {
/*
* True to correctly migrate the tick offset of the RTC. False to
* obtain backward migration compatibility with older QEMU versions,
@@ -330,10 +330,9 @@ static Property pl031_properties[] = {
*/
DEFINE_PROP_BOOL("migrate-tick-offset",
PL031State, migrate_tick_offset, true),
- DEFINE_PROP_END_OF_LIST()
};
-static void pl031_class_init(ObjectClass *klass, void *data)
+static void pl031_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/rtc/rs5c372.c b/hw/rtc/rs5c372.c
new file mode 100644
index 0000000..bb92453
--- /dev/null
+++ b/hw/rtc/rs5c372.c
@@ -0,0 +1,236 @@
+/*
+ * Ricoh RS5C372, R222x I2C RTC
+ *
+ * Copyright (c) 2025 Bernhard Beschow <shentey@gmail.com>
+ *
+ * Based on hw/rtc/ds1338.c
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hw/i2c/i2c.h"
+#include "hw/qdev-properties.h"
+#include "hw/resettable.h"
+#include "migration/vmstate.h"
+#include "qemu/bcd.h"
+#include "qom/object.h"
+#include "system/rtc.h"
+#include "trace.h"
+
+#define NVRAM_SIZE 0x10
+
+/* Flags definitions */
+#define SECONDS_CH 0x80
+#define HOURS_PM 0x20
+#define CTRL2_24 0x20
+
+#define TYPE_RS5C372 "rs5c372"
+OBJECT_DECLARE_SIMPLE_TYPE(RS5C372State, RS5C372)
+
+struct RS5C372State {
+ I2CSlave parent_obj;
+
+ int64_t offset;
+ uint8_t wday_offset;
+ uint8_t nvram[NVRAM_SIZE];
+ uint8_t ptr;
+ uint8_t tx_format;
+ bool addr_byte;
+};
+
+static void capture_current_time(RS5C372State *s)
+{
+ /*
+ * Capture the current time into the secondary registers which will be
+ * actually read by the data transfer operation.
+ */
+ struct tm now;
+ qemu_get_timedate(&now, s->offset);
+ s->nvram[0] = to_bcd(now.tm_sec);
+ s->nvram[1] = to_bcd(now.tm_min);
+ if (s->nvram[0xf] & CTRL2_24) {
+ s->nvram[2] = to_bcd(now.tm_hour);
+ } else {
+ int tmp = now.tm_hour;
+ if (tmp % 12 == 0) {
+ tmp += 12;
+ }
+ if (tmp <= 12) {
+ s->nvram[2] = to_bcd(tmp);
+ } else {
+ s->nvram[2] = HOURS_PM | to_bcd(tmp - 12);
+ }
+ }
+ s->nvram[3] = (now.tm_wday + s->wday_offset) % 7 + 1;
+ s->nvram[4] = to_bcd(now.tm_mday);
+ s->nvram[5] = to_bcd(now.tm_mon + 1);
+ s->nvram[6] = to_bcd(now.tm_year - 100);
+}
+
+static void inc_regptr(RS5C372State *s)
+{
+ s->ptr = (s->ptr + 1) & (NVRAM_SIZE - 1);
+}
+
+static int rs5c372_event(I2CSlave *i2c, enum i2c_event event)
+{
+ RS5C372State *s = RS5C372(i2c);
+
+ switch (event) {
+ case I2C_START_RECV:
+ /*
+ * In h/w, capture happens on any START condition, not just a
+ * START_RECV, but there is no need to actually capture on
+ * START_SEND, because the guest can't get at that data
+ * without going through a START_RECV which would overwrite it.
+ */
+ capture_current_time(s);
+ s->ptr = 0xf;
+ break;
+ case I2C_START_SEND:
+ s->addr_byte = true;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static uint8_t rs5c372_recv(I2CSlave *i2c)
+{
+ RS5C372State *s = RS5C372(i2c);
+ uint8_t res;
+
+ res = s->nvram[s->ptr];
+
+ trace_rs5c372_recv(s->ptr, res);
+
+ inc_regptr(s);
+ return res;
+}
+
+static int rs5c372_send(I2CSlave *i2c, uint8_t data)
+{
+ RS5C372State *s = RS5C372(i2c);
+
+ if (s->addr_byte) {
+ s->ptr = data >> 4;
+ s->tx_format = data & 0xf;
+ s->addr_byte = false;
+ return 0;
+ }
+
+ trace_rs5c372_send(s->ptr, data);
+
+ if (s->ptr < 7) {
+ /* Time register. */
+ struct tm now;
+ qemu_get_timedate(&now, s->offset);
+ switch (s->ptr) {
+ case 0:
+ now.tm_sec = from_bcd(data & 0x7f);
+ break;
+ case 1:
+ now.tm_min = from_bcd(data & 0x7f);
+ break;
+ case 2:
+ if (s->nvram[0xf] & CTRL2_24) {
+ now.tm_hour = from_bcd(data & 0x3f);
+ } else {
+ int tmp = from_bcd(data & (HOURS_PM - 1));
+ if (data & HOURS_PM) {
+ tmp += 12;
+ }
+ if (tmp % 12 == 0) {
+ tmp -= 12;
+ }
+ now.tm_hour = tmp;
+ }
+ break;
+ case 3:
+ {
+ /*
+ * The day field is supposed to contain a value in the range
+ * 1-7. Otherwise behavior is undefined.
+ */
+ int user_wday = (data & 7) - 1;
+ s->wday_offset = (user_wday - now.tm_wday + 7) % 7;
+ }
+ break;
+ case 4:
+ now.tm_mday = from_bcd(data & 0x3f);
+ break;
+ case 5:
+ now.tm_mon = from_bcd(data & 0x1f) - 1;
+ break;
+ case 6:
+ now.tm_year = from_bcd(data) + 100;
+ break;
+ }
+ s->offset = qemu_timedate_diff(&now);
+ } else {
+ s->nvram[s->ptr] = data;
+ }
+ inc_regptr(s);
+ return 0;
+}
+
+static void rs5c372_reset_hold(Object *obj, ResetType type)
+{
+ RS5C372State *s = RS5C372(obj);
+
+ /* The clock is running and synchronized with the host */
+ s->offset = 0;
+ s->wday_offset = 0;
+ memset(s->nvram, 0, NVRAM_SIZE);
+ s->ptr = 0;
+ s->addr_byte = false;
+}
+
+static const VMStateDescription rs5c372_vmstate = {
+ .name = "rs5c372",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_I2C_SLAVE(parent_obj, RS5C372State),
+ VMSTATE_INT64(offset, RS5C372State),
+ VMSTATE_UINT8_V(wday_offset, RS5C372State, 2),
+ VMSTATE_UINT8_ARRAY(nvram, RS5C372State, NVRAM_SIZE),
+ VMSTATE_UINT8(ptr, RS5C372State),
+ VMSTATE_UINT8(tx_format, RS5C372State),
+ VMSTATE_BOOL(addr_byte, RS5C372State),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void rs5c372_init(Object *obj)
+{
+ qdev_prop_set_uint8(DEVICE(obj), "address", 0x32);
+}
+
+static void rs5c372_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ k->event = rs5c372_event;
+ k->recv = rs5c372_recv;
+ k->send = rs5c372_send;
+ dc->vmsd = &rs5c372_vmstate;
+ rc->phases.hold = rs5c372_reset_hold;
+}
+
+static const TypeInfo rs5c372_types[] = {
+ {
+ .name = TYPE_RS5C372,
+ .parent = TYPE_I2C_SLAVE,
+ .instance_size = sizeof(RS5C372State),
+ .instance_init = rs5c372_init,
+ .class_init = rs5c372_class_init,
+ },
+};
+
+DEFINE_TYPES(rs5c372_types)
diff --git a/hw/rtc/sun4v-rtc.c b/hw/rtc/sun4v-rtc.c
index ffcc0aa..29e24ef 100644
--- a/hw/rtc/sun4v-rtc.c
+++ b/hw/rtc/sun4v-rtc.c
@@ -75,7 +75,7 @@ static void sun4v_rtc_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->iomem);
}
-static void sun4v_rtc_class_init(ObjectClass *klass, void *data)
+static void sun4v_rtc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/rtc/trace-events b/hw/rtc/trace-events
index ebb311a..b9f2852 100644
--- a/hw/rtc/trace-events
+++ b/hw/rtc/trace-events
@@ -22,6 +22,10 @@ pl031_set_alarm(uint32_t ticks) "alarm set for %u ticks"
aspeed_rtc_read(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64
aspeed_rtc_write(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64
+# ds1338.c
+ds1338_recv(uint32_t addr, uint8_t value) "[0x%" PRIx32 "] -> 0x%02" PRIx8
+ds1338_send(uint32_t addr, uint8_t value) "[0x%" PRIx32 "] <- 0x%02" PRIx8
+
# m48t59.c
m48txx_nvram_io_read(uint64_t addr, uint64_t value) "io read addr:0x%04" PRIx64 " value:0x%02" PRIx64
m48txx_nvram_io_write(uint64_t addr, uint64_t value) "io write addr:0x%04" PRIx64 " value:0x%02" PRIx64
@@ -31,3 +35,7 @@ m48txx_nvram_mem_write(uint32_t addr, uint32_t value) "mem write addr:0x%04x val
# goldfish_rtc.c
goldfish_rtc_read(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64
goldfish_rtc_write(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64
+
+# rs5c372.c
+rs5c372_recv(uint32_t addr, uint8_t value) "[0x%" PRIx32 "] -> 0x%02" PRIx8
+rs5c372_send(uint32_t addr, uint8_t value) "[0x%" PRIx32 "] <- 0x%02" PRIx8
diff --git a/hw/rtc/twl92230.c b/hw/rtc/twl92230.c
deleted file mode 100644
index efd19a7..0000000
--- a/hw/rtc/twl92230.c
+++ /dev/null
@@ -1,882 +0,0 @@
-/*
- * TI TWL92230C energy-management companion device for the OMAP24xx.
- * Aka. Menelaus (N4200 MENELAUS1_V2.2)
- *
- * Copyright (C) 2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/timer.h"
-#include "hw/i2c/i2c.h"
-#include "hw/irq.h"
-#include "migration/qemu-file-types.h"
-#include "migration/vmstate.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/rtc.h"
-#include "qemu/bcd.h"
-#include "qemu/module.h"
-#include "qom/object.h"
-
-#define VERBOSE 1
-
-#define TYPE_TWL92230 "twl92230"
-OBJECT_DECLARE_SIMPLE_TYPE(MenelausState, TWL92230)
-
-struct MenelausState {
- I2CSlave parent_obj;
-
- int firstbyte;
- uint8_t reg;
-
- uint8_t vcore[5];
- uint8_t dcdc[3];
- uint8_t ldo[8];
- uint8_t sleep[2];
- uint8_t osc;
- uint8_t detect;
- uint16_t mask;
- uint16_t status;
- uint8_t dir;
- uint8_t inputs;
- uint8_t outputs;
- uint8_t bbsms;
- uint8_t pull[4];
- uint8_t mmc_ctrl[3];
- uint8_t mmc_debounce;
- struct {
- uint8_t ctrl;
- uint16_t comp;
- QEMUTimer *hz_tm;
- int64_t next;
- struct tm tm;
- struct tm new;
- struct tm alm;
- int64_t sec_offset;
- int64_t alm_sec;
- int next_comp;
- } rtc;
- uint16_t rtc_next_vmstate;
- qemu_irq out[4];
- uint8_t pwrbtn_state;
-};
-
-static inline void menelaus_update(MenelausState *s)
-{
- qemu_set_irq(s->out[3], s->status & ~s->mask);
-}
-
-static inline void menelaus_rtc_start(MenelausState *s)
-{
- s->rtc.next += qemu_clock_get_ms(rtc_clock);
- timer_mod(s->rtc.hz_tm, s->rtc.next);
-}
-
-static inline void menelaus_rtc_stop(MenelausState *s)
-{
- timer_del(s->rtc.hz_tm);
- s->rtc.next -= qemu_clock_get_ms(rtc_clock);
- if (s->rtc.next < 1)
- s->rtc.next = 1;
-}
-
-static void menelaus_rtc_update(MenelausState *s)
-{
- qemu_get_timedate(&s->rtc.tm, s->rtc.sec_offset);
-}
-
-static void menelaus_alm_update(MenelausState *s)
-{
- if ((s->rtc.ctrl & 3) == 3)
- s->rtc.alm_sec = qemu_timedate_diff(&s->rtc.alm) - s->rtc.sec_offset;
-}
-
-static void menelaus_rtc_hz(void *opaque)
-{
- MenelausState *s = (MenelausState *) opaque;
-
- s->rtc.next_comp --;
- s->rtc.alm_sec --;
- s->rtc.next += 1000;
- timer_mod(s->rtc.hz_tm, s->rtc.next);
- if ((s->rtc.ctrl >> 3) & 3) { /* EVERY */
- menelaus_rtc_update(s);
- if (((s->rtc.ctrl >> 3) & 3) == 1 && !s->rtc.tm.tm_sec)
- s->status |= 1 << 8; /* RTCTMR */
- else if (((s->rtc.ctrl >> 3) & 3) == 2 && !s->rtc.tm.tm_min)
- s->status |= 1 << 8; /* RTCTMR */
- else if (!s->rtc.tm.tm_hour)
- s->status |= 1 << 8; /* RTCTMR */
- } else
- s->status |= 1 << 8; /* RTCTMR */
- if ((s->rtc.ctrl >> 1) & 1) { /* RTC_AL_EN */
- if (s->rtc.alm_sec == 0)
- s->status |= 1 << 9; /* RTCALM */
- /* TODO: wake-up */
- }
- if (s->rtc.next_comp <= 0) {
- s->rtc.next -= muldiv64((int16_t) s->rtc.comp, 1000, 0x8000);
- s->rtc.next_comp = 3600;
- }
- menelaus_update(s);
-}
-
-static void menelaus_reset(I2CSlave *i2c)
-{
- MenelausState *s = TWL92230(i2c);
-
- s->reg = 0x00;
-
- s->vcore[0] = 0x0c; /* XXX: X-loader needs 0x8c? check! */
- s->vcore[1] = 0x05;
- s->vcore[2] = 0x02;
- s->vcore[3] = 0x0c;
- s->vcore[4] = 0x03;
- s->dcdc[0] = 0x33; /* Depends on wiring */
- s->dcdc[1] = 0x03;
- s->dcdc[2] = 0x00;
- s->ldo[0] = 0x95;
- s->ldo[1] = 0x7e;
- s->ldo[2] = 0x00;
- s->ldo[3] = 0x00; /* Depends on wiring */
- s->ldo[4] = 0x03; /* Depends on wiring */
- s->ldo[5] = 0x00;
- s->ldo[6] = 0x00;
- s->ldo[7] = 0x00;
- s->sleep[0] = 0x00;
- s->sleep[1] = 0x00;
- s->osc = 0x01;
- s->detect = 0x09;
- s->mask = 0x0fff;
- s->status = 0;
- s->dir = 0x07;
- s->outputs = 0x00;
- s->bbsms = 0x00;
- s->pull[0] = 0x00;
- s->pull[1] = 0x00;
- s->pull[2] = 0x00;
- s->pull[3] = 0x00;
- s->mmc_ctrl[0] = 0x03;
- s->mmc_ctrl[1] = 0xc0;
- s->mmc_ctrl[2] = 0x00;
- s->mmc_debounce = 0x05;
-
- if (s->rtc.ctrl & 1)
- menelaus_rtc_stop(s);
- s->rtc.ctrl = 0x00;
- s->rtc.comp = 0x0000;
- s->rtc.next = 1000;
- s->rtc.sec_offset = 0;
- s->rtc.next_comp = 1800;
- s->rtc.alm_sec = 1800;
- s->rtc.alm.tm_sec = 0x00;
- s->rtc.alm.tm_min = 0x00;
- s->rtc.alm.tm_hour = 0x00;
- s->rtc.alm.tm_mday = 0x01;
- s->rtc.alm.tm_mon = 0x00;
- s->rtc.alm.tm_year = 2004;
- menelaus_update(s);
-}
-
-static void menelaus_gpio_set(void *opaque, int line, int level)
-{
- MenelausState *s = (MenelausState *) opaque;
-
- if (line < 3) {
- /* No interrupt generated */
- s->inputs &= ~(1 << line);
- s->inputs |= level << line;
- return;
- }
-
- if (!s->pwrbtn_state && level) {
- s->status |= 1 << 11; /* PSHBTN */
- menelaus_update(s);
- }
- s->pwrbtn_state = level;
-}
-
-#define MENELAUS_REV 0x01
-#define MENELAUS_VCORE_CTRL1 0x02
-#define MENELAUS_VCORE_CTRL2 0x03
-#define MENELAUS_VCORE_CTRL3 0x04
-#define MENELAUS_VCORE_CTRL4 0x05
-#define MENELAUS_VCORE_CTRL5 0x06
-#define MENELAUS_DCDC_CTRL1 0x07
-#define MENELAUS_DCDC_CTRL2 0x08
-#define MENELAUS_DCDC_CTRL3 0x09
-#define MENELAUS_LDO_CTRL1 0x0a
-#define MENELAUS_LDO_CTRL2 0x0b
-#define MENELAUS_LDO_CTRL3 0x0c
-#define MENELAUS_LDO_CTRL4 0x0d
-#define MENELAUS_LDO_CTRL5 0x0e
-#define MENELAUS_LDO_CTRL6 0x0f
-#define MENELAUS_LDO_CTRL7 0x10
-#define MENELAUS_LDO_CTRL8 0x11
-#define MENELAUS_SLEEP_CTRL1 0x12
-#define MENELAUS_SLEEP_CTRL2 0x13
-#define MENELAUS_DEVICE_OFF 0x14
-#define MENELAUS_OSC_CTRL 0x15
-#define MENELAUS_DETECT_CTRL 0x16
-#define MENELAUS_INT_MASK1 0x17
-#define MENELAUS_INT_MASK2 0x18
-#define MENELAUS_INT_STATUS1 0x19
-#define MENELAUS_INT_STATUS2 0x1a
-#define MENELAUS_INT_ACK1 0x1b
-#define MENELAUS_INT_ACK2 0x1c
-#define MENELAUS_GPIO_CTRL 0x1d
-#define MENELAUS_GPIO_IN 0x1e
-#define MENELAUS_GPIO_OUT 0x1f
-#define MENELAUS_BBSMS 0x20
-#define MENELAUS_RTC_CTRL 0x21
-#define MENELAUS_RTC_UPDATE 0x22
-#define MENELAUS_RTC_SEC 0x23
-#define MENELAUS_RTC_MIN 0x24
-#define MENELAUS_RTC_HR 0x25
-#define MENELAUS_RTC_DAY 0x26
-#define MENELAUS_RTC_MON 0x27
-#define MENELAUS_RTC_YR 0x28
-#define MENELAUS_RTC_WKDAY 0x29
-#define MENELAUS_RTC_AL_SEC 0x2a
-#define MENELAUS_RTC_AL_MIN 0x2b
-#define MENELAUS_RTC_AL_HR 0x2c
-#define MENELAUS_RTC_AL_DAY 0x2d
-#define MENELAUS_RTC_AL_MON 0x2e
-#define MENELAUS_RTC_AL_YR 0x2f
-#define MENELAUS_RTC_COMP_MSB 0x30
-#define MENELAUS_RTC_COMP_LSB 0x31
-#define MENELAUS_S1_PULL_EN 0x32
-#define MENELAUS_S1_PULL_DIR 0x33
-#define MENELAUS_S2_PULL_EN 0x34
-#define MENELAUS_S2_PULL_DIR 0x35
-#define MENELAUS_MCT_CTRL1 0x36
-#define MENELAUS_MCT_CTRL2 0x37
-#define MENELAUS_MCT_CTRL3 0x38
-#define MENELAUS_MCT_PIN_ST 0x39
-#define MENELAUS_DEBOUNCE1 0x3a
-
-static uint8_t menelaus_read(void *opaque, uint8_t addr)
-{
- MenelausState *s = (MenelausState *) opaque;
-
- switch (addr) {
- case MENELAUS_REV:
- return 0x22;
-
- case MENELAUS_VCORE_CTRL1 ... MENELAUS_VCORE_CTRL5:
- return s->vcore[addr - MENELAUS_VCORE_CTRL1];
-
- case MENELAUS_DCDC_CTRL1 ... MENELAUS_DCDC_CTRL3:
- return s->dcdc[addr - MENELAUS_DCDC_CTRL1];
-
- case MENELAUS_LDO_CTRL1 ... MENELAUS_LDO_CTRL8:
- return s->ldo[addr - MENELAUS_LDO_CTRL1];
-
- case MENELAUS_SLEEP_CTRL1:
- case MENELAUS_SLEEP_CTRL2:
- return s->sleep[addr - MENELAUS_SLEEP_CTRL1];
-
- case MENELAUS_DEVICE_OFF:
- return 0;
-
- case MENELAUS_OSC_CTRL:
- return s->osc | (1 << 7); /* CLK32K_GOOD */
-
- case MENELAUS_DETECT_CTRL:
- return s->detect;
-
- case MENELAUS_INT_MASK1:
- return (s->mask >> 0) & 0xff;
- case MENELAUS_INT_MASK2:
- return (s->mask >> 8) & 0xff;
-
- case MENELAUS_INT_STATUS1:
- return (s->status >> 0) & 0xff;
- case MENELAUS_INT_STATUS2:
- return (s->status >> 8) & 0xff;
-
- case MENELAUS_INT_ACK1:
- case MENELAUS_INT_ACK2:
- return 0;
-
- case MENELAUS_GPIO_CTRL:
- return s->dir;
- case MENELAUS_GPIO_IN:
- return s->inputs | (~s->dir & s->outputs);
- case MENELAUS_GPIO_OUT:
- return s->outputs;
-
- case MENELAUS_BBSMS:
- return s->bbsms;
-
- case MENELAUS_RTC_CTRL:
- return s->rtc.ctrl;
- case MENELAUS_RTC_UPDATE:
- return 0x00;
- case MENELAUS_RTC_SEC:
- menelaus_rtc_update(s);
- return to_bcd(s->rtc.tm.tm_sec);
- case MENELAUS_RTC_MIN:
- menelaus_rtc_update(s);
- return to_bcd(s->rtc.tm.tm_min);
- case MENELAUS_RTC_HR:
- menelaus_rtc_update(s);
- if ((s->rtc.ctrl >> 2) & 1) /* MODE12_n24 */
- return to_bcd((s->rtc.tm.tm_hour % 12) + 1) |
- (!!(s->rtc.tm.tm_hour >= 12) << 7); /* PM_nAM */
- else
- return to_bcd(s->rtc.tm.tm_hour);
- case MENELAUS_RTC_DAY:
- menelaus_rtc_update(s);
- return to_bcd(s->rtc.tm.tm_mday);
- case MENELAUS_RTC_MON:
- menelaus_rtc_update(s);
- return to_bcd(s->rtc.tm.tm_mon + 1);
- case MENELAUS_RTC_YR:
- menelaus_rtc_update(s);
- return to_bcd(s->rtc.tm.tm_year - 2000);
- case MENELAUS_RTC_WKDAY:
- menelaus_rtc_update(s);
- return to_bcd(s->rtc.tm.tm_wday);
- case MENELAUS_RTC_AL_SEC:
- return to_bcd(s->rtc.alm.tm_sec);
- case MENELAUS_RTC_AL_MIN:
- return to_bcd(s->rtc.alm.tm_min);
- case MENELAUS_RTC_AL_HR:
- if ((s->rtc.ctrl >> 2) & 1) /* MODE12_n24 */
- return to_bcd((s->rtc.alm.tm_hour % 12) + 1) |
- (!!(s->rtc.alm.tm_hour >= 12) << 7);/* AL_PM_nAM */
- else
- return to_bcd(s->rtc.alm.tm_hour);
- case MENELAUS_RTC_AL_DAY:
- return to_bcd(s->rtc.alm.tm_mday);
- case MENELAUS_RTC_AL_MON:
- return to_bcd(s->rtc.alm.tm_mon + 1);
- case MENELAUS_RTC_AL_YR:
- return to_bcd(s->rtc.alm.tm_year - 2000);
- case MENELAUS_RTC_COMP_MSB:
- return (s->rtc.comp >> 8) & 0xff;
- case MENELAUS_RTC_COMP_LSB:
- return (s->rtc.comp >> 0) & 0xff;
-
- case MENELAUS_S1_PULL_EN:
- return s->pull[0];
- case MENELAUS_S1_PULL_DIR:
- return s->pull[1];
- case MENELAUS_S2_PULL_EN:
- return s->pull[2];
- case MENELAUS_S2_PULL_DIR:
- return s->pull[3];
-
- case MENELAUS_MCT_CTRL1 ... MENELAUS_MCT_CTRL3:
- return s->mmc_ctrl[addr - MENELAUS_MCT_CTRL1];
- case MENELAUS_MCT_PIN_ST:
- /* TODO: return the real Card Detect */
- return 0;
- case MENELAUS_DEBOUNCE1:
- return s->mmc_debounce;
-
- default:
-#ifdef VERBOSE
- printf("%s: unknown register %02x\n", __func__, addr);
-#endif
- break;
- }
- return 0;
-}
-
-static void menelaus_write(void *opaque, uint8_t addr, uint8_t value)
-{
- MenelausState *s = (MenelausState *) opaque;
- int line;
- struct tm tm;
-
- switch (addr) {
- case MENELAUS_VCORE_CTRL1:
- s->vcore[0] = (value & 0xe) | MIN(value & 0x1f, 0x12);
- break;
- case MENELAUS_VCORE_CTRL2:
- s->vcore[1] = value;
- break;
- case MENELAUS_VCORE_CTRL3:
- s->vcore[2] = MIN(value & 0x1f, 0x12);
- break;
- case MENELAUS_VCORE_CTRL4:
- s->vcore[3] = MIN(value & 0x1f, 0x12);
- break;
- case MENELAUS_VCORE_CTRL5:
- s->vcore[4] = value & 3;
- /* XXX
- * auto set to 3 on M_Active, nRESWARM
- * auto set to 0 on M_WaitOn, M_Backup
- */
- break;
-
- case MENELAUS_DCDC_CTRL1:
- s->dcdc[0] = value & 0x3f;
- break;
- case MENELAUS_DCDC_CTRL2:
- s->dcdc[1] = value & 0x07;
- /* XXX
- * auto set to 3 on M_Active, nRESWARM
- * auto set to 0 on M_WaitOn, M_Backup
- */
- break;
- case MENELAUS_DCDC_CTRL3:
- s->dcdc[2] = value & 0x07;
- break;
-
- case MENELAUS_LDO_CTRL1:
- s->ldo[0] = value;
- break;
- case MENELAUS_LDO_CTRL2:
- s->ldo[1] = value & 0x7f;
- /* XXX
- * auto set to 0x7e on M_WaitOn, M_Backup
- */
- break;
- case MENELAUS_LDO_CTRL3:
- s->ldo[2] = value & 3;
- /* XXX
- * auto set to 3 on M_Active, nRESWARM
- * auto set to 0 on M_WaitOn, M_Backup
- */
- break;
- case MENELAUS_LDO_CTRL4:
- s->ldo[3] = value & 3;
- /* XXX
- * auto set to 3 on M_Active, nRESWARM
- * auto set to 0 on M_WaitOn, M_Backup
- */
- break;
- case MENELAUS_LDO_CTRL5:
- s->ldo[4] = value & 3;
- /* XXX
- * auto set to 3 on M_Active, nRESWARM
- * auto set to 0 on M_WaitOn, M_Backup
- */
- break;
- case MENELAUS_LDO_CTRL6:
- s->ldo[5] = value & 3;
- break;
- case MENELAUS_LDO_CTRL7:
- s->ldo[6] = value & 3;
- break;
- case MENELAUS_LDO_CTRL8:
- s->ldo[7] = value & 3;
- break;
-
- case MENELAUS_SLEEP_CTRL1:
- case MENELAUS_SLEEP_CTRL2:
- s->sleep[addr - MENELAUS_SLEEP_CTRL1] = value;
- break;
-
- case MENELAUS_DEVICE_OFF:
- if (value & 1) {
- menelaus_reset(I2C_SLAVE(s));
- }
- break;
-
- case MENELAUS_OSC_CTRL:
- s->osc = value & 7;
- break;
-
- case MENELAUS_DETECT_CTRL:
- s->detect = value & 0x7f;
- break;
-
- case MENELAUS_INT_MASK1:
- s->mask &= 0xf00;
- s->mask |= value << 0;
- menelaus_update(s);
- break;
- case MENELAUS_INT_MASK2:
- s->mask &= 0x0ff;
- s->mask |= value << 8;
- menelaus_update(s);
- break;
-
- case MENELAUS_INT_ACK1:
- s->status &= ~(((uint16_t) value) << 0);
- menelaus_update(s);
- break;
- case MENELAUS_INT_ACK2:
- s->status &= ~(((uint16_t) value) << 8);
- menelaus_update(s);
- break;
-
- case MENELAUS_GPIO_CTRL:
- for (line = 0; line < 3; line ++) {
- if (((s->dir ^ value) >> line) & 1) {
- qemu_set_irq(s->out[line],
- ((s->outputs & ~s->dir) >> line) & 1);
- }
- }
- s->dir = value & 0x67;
- break;
- case MENELAUS_GPIO_OUT:
- for (line = 0; line < 3; line ++) {
- if ((((s->outputs ^ value) & ~s->dir) >> line) & 1) {
- qemu_set_irq(s->out[line], (s->outputs >> line) & 1);
- }
- }
- s->outputs = value & 0x07;
- break;
-
- case MENELAUS_BBSMS:
- s->bbsms = 0x0d;
- break;
-
- case MENELAUS_RTC_CTRL:
- if ((s->rtc.ctrl ^ value) & 1) { /* RTC_EN */
- if (value & 1)
- menelaus_rtc_start(s);
- else
- menelaus_rtc_stop(s);
- }
- s->rtc.ctrl = value & 0x1f;
- menelaus_alm_update(s);
- break;
- case MENELAUS_RTC_UPDATE:
- menelaus_rtc_update(s);
- memcpy(&tm, &s->rtc.tm, sizeof(tm));
- switch (value & 0xf) {
- case 0:
- break;
- case 1:
- tm.tm_sec = s->rtc.new.tm_sec;
- break;
- case 2:
- tm.tm_min = s->rtc.new.tm_min;
- break;
- case 3:
- if (s->rtc.new.tm_hour > 23)
- goto rtc_badness;
- tm.tm_hour = s->rtc.new.tm_hour;
- break;
- case 4:
- if (s->rtc.new.tm_mday < 1)
- goto rtc_badness;
- /* TODO check range */
- tm.tm_mday = s->rtc.new.tm_mday;
- break;
- case 5:
- if (s->rtc.new.tm_mon < 0 || s->rtc.new.tm_mon > 11)
- goto rtc_badness;
- tm.tm_mon = s->rtc.new.tm_mon;
- break;
- case 6:
- tm.tm_year = s->rtc.new.tm_year;
- break;
- case 7:
- /* TODO set .tm_mday instead */
- tm.tm_wday = s->rtc.new.tm_wday;
- break;
- case 8:
- if (s->rtc.new.tm_hour > 23)
- goto rtc_badness;
- if (s->rtc.new.tm_mday < 1)
- goto rtc_badness;
- if (s->rtc.new.tm_mon < 0 || s->rtc.new.tm_mon > 11)
- goto rtc_badness;
- tm.tm_sec = s->rtc.new.tm_sec;
- tm.tm_min = s->rtc.new.tm_min;
- tm.tm_hour = s->rtc.new.tm_hour;
- tm.tm_mday = s->rtc.new.tm_mday;
- tm.tm_mon = s->rtc.new.tm_mon;
- tm.tm_year = s->rtc.new.tm_year;
- break;
- rtc_badness:
- default:
- fprintf(stderr, "%s: bad RTC_UPDATE value %02x\n",
- __func__, value);
- s->status |= 1 << 10; /* RTCERR */
- menelaus_update(s);
- }
- s->rtc.sec_offset = qemu_timedate_diff(&tm);
- break;
- case MENELAUS_RTC_SEC:
- s->rtc.tm.tm_sec = from_bcd(value & 0x7f);
- break;
- case MENELAUS_RTC_MIN:
- s->rtc.tm.tm_min = from_bcd(value & 0x7f);
- break;
- case MENELAUS_RTC_HR:
- s->rtc.tm.tm_hour = (s->rtc.ctrl & (1 << 2)) ? /* MODE12_n24 */
- MIN(from_bcd(value & 0x3f), 12) + ((value >> 7) ? 11 : -1) :
- from_bcd(value & 0x3f);
- break;
- case MENELAUS_RTC_DAY:
- s->rtc.tm.tm_mday = from_bcd(value);
- break;
- case MENELAUS_RTC_MON:
- s->rtc.tm.tm_mon = MAX(1, from_bcd(value)) - 1;
- break;
- case MENELAUS_RTC_YR:
- s->rtc.tm.tm_year = 2000 + from_bcd(value);
- break;
- case MENELAUS_RTC_WKDAY:
- s->rtc.tm.tm_mday = from_bcd(value);
- break;
- case MENELAUS_RTC_AL_SEC:
- s->rtc.alm.tm_sec = from_bcd(value & 0x7f);
- menelaus_alm_update(s);
- break;
- case MENELAUS_RTC_AL_MIN:
- s->rtc.alm.tm_min = from_bcd(value & 0x7f);
- menelaus_alm_update(s);
- break;
- case MENELAUS_RTC_AL_HR:
- s->rtc.alm.tm_hour = (s->rtc.ctrl & (1 << 2)) ? /* MODE12_n24 */
- MIN(from_bcd(value & 0x3f), 12) + ((value >> 7) ? 11 : -1) :
- from_bcd(value & 0x3f);
- menelaus_alm_update(s);
- break;
- case MENELAUS_RTC_AL_DAY:
- s->rtc.alm.tm_mday = from_bcd(value);
- menelaus_alm_update(s);
- break;
- case MENELAUS_RTC_AL_MON:
- s->rtc.alm.tm_mon = MAX(1, from_bcd(value)) - 1;
- menelaus_alm_update(s);
- break;
- case MENELAUS_RTC_AL_YR:
- s->rtc.alm.tm_year = 2000 + from_bcd(value);
- menelaus_alm_update(s);
- break;
- case MENELAUS_RTC_COMP_MSB:
- s->rtc.comp &= 0xff;
- s->rtc.comp |= value << 8;
- break;
- case MENELAUS_RTC_COMP_LSB:
- s->rtc.comp &= 0xff << 8;
- s->rtc.comp |= value;
- break;
-
- case MENELAUS_S1_PULL_EN:
- s->pull[0] = value;
- break;
- case MENELAUS_S1_PULL_DIR:
- s->pull[1] = value & 0x1f;
- break;
- case MENELAUS_S2_PULL_EN:
- s->pull[2] = value;
- break;
- case MENELAUS_S2_PULL_DIR:
- s->pull[3] = value & 0x1f;
- break;
-
- case MENELAUS_MCT_CTRL1:
- s->mmc_ctrl[0] = value & 0x7f;
- break;
- case MENELAUS_MCT_CTRL2:
- s->mmc_ctrl[1] = value;
- /* TODO update Card Detect interrupts */
- break;
- case MENELAUS_MCT_CTRL3:
- s->mmc_ctrl[2] = value & 0xf;
- break;
- case MENELAUS_DEBOUNCE1:
- s->mmc_debounce = value & 0x3f;
- break;
-
- default:
-#ifdef VERBOSE
- printf("%s: unknown register %02x\n", __func__, addr);
-#endif
- break;
- }
-}
-
-static int menelaus_event(I2CSlave *i2c, enum i2c_event event)
-{
- MenelausState *s = TWL92230(i2c);
-
- if (event == I2C_START_SEND)
- s->firstbyte = 1;
-
- return 0;
-}
-
-static int menelaus_tx(I2CSlave *i2c, uint8_t data)
-{
- MenelausState *s = TWL92230(i2c);
-
- /* Interpret register address byte */
- if (s->firstbyte) {
- s->reg = data;
- s->firstbyte = 0;
- } else
- menelaus_write(s, s->reg ++, data);
-
- return 0;
-}
-
-static uint8_t menelaus_rx(I2CSlave *i2c)
-{
- MenelausState *s = TWL92230(i2c);
-
- return menelaus_read(s, s->reg ++);
-}
-
-/* Save restore 32 bit int as uint16_t
- This is a Big hack, but it is how the old state did it.
- Or we broke compatibility in the state, or we can't use struct tm
- */
-
-static int get_int32_as_uint16(QEMUFile *f, void *pv, size_t size,
- const VMStateField *field)
-{
- int *v = pv;
- *v = qemu_get_be16(f);
- return 0;
-}
-
-static int put_int32_as_uint16(QEMUFile *f, void *pv, size_t size,
- const VMStateField *field, JSONWriter *vmdesc)
-{
- int *v = pv;
- qemu_put_be16(f, *v);
-
- return 0;
-}
-
-static const VMStateInfo vmstate_hack_int32_as_uint16 = {
- .name = "int32_as_uint16",
- .get = get_int32_as_uint16,
- .put = put_int32_as_uint16,
-};
-
-#define VMSTATE_UINT16_HACK(_f, _s) \
- VMSTATE_SINGLE(_f, _s, 0, vmstate_hack_int32_as_uint16, int32_t)
-
-
-static const VMStateDescription vmstate_menelaus_tm = {
- .name = "menelaus_tm",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT16_HACK(tm_sec, struct tm),
- VMSTATE_UINT16_HACK(tm_min, struct tm),
- VMSTATE_UINT16_HACK(tm_hour, struct tm),
- VMSTATE_UINT16_HACK(tm_mday, struct tm),
- VMSTATE_UINT16_HACK(tm_min, struct tm),
- VMSTATE_UINT16_HACK(tm_year, struct tm),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static int menelaus_pre_save(void *opaque)
-{
- MenelausState *s = opaque;
- /* Should be <= 1000 */
- s->rtc_next_vmstate = s->rtc.next - qemu_clock_get_ms(rtc_clock);
-
- return 0;
-}
-
-static int menelaus_post_load(void *opaque, int version_id)
-{
- MenelausState *s = opaque;
-
- if (s->rtc.ctrl & 1) /* RTC_EN */
- menelaus_rtc_stop(s);
-
- s->rtc.next = s->rtc_next_vmstate;
-
- menelaus_alm_update(s);
- menelaus_update(s);
- if (s->rtc.ctrl & 1) /* RTC_EN */
- menelaus_rtc_start(s);
- return 0;
-}
-
-static const VMStateDescription vmstate_menelaus = {
- .name = "menelaus",
- .version_id = 0,
- .minimum_version_id = 0,
- .pre_save = menelaus_pre_save,
- .post_load = menelaus_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_INT32(firstbyte, MenelausState),
- VMSTATE_UINT8(reg, MenelausState),
- VMSTATE_UINT8_ARRAY(vcore, MenelausState, 5),
- VMSTATE_UINT8_ARRAY(dcdc, MenelausState, 3),
- VMSTATE_UINT8_ARRAY(ldo, MenelausState, 8),
- VMSTATE_UINT8_ARRAY(sleep, MenelausState, 2),
- VMSTATE_UINT8(osc, MenelausState),
- VMSTATE_UINT8(detect, MenelausState),
- VMSTATE_UINT16(mask, MenelausState),
- VMSTATE_UINT16(status, MenelausState),
- VMSTATE_UINT8(dir, MenelausState),
- VMSTATE_UINT8(inputs, MenelausState),
- VMSTATE_UINT8(outputs, MenelausState),
- VMSTATE_UINT8(bbsms, MenelausState),
- VMSTATE_UINT8_ARRAY(pull, MenelausState, 4),
- VMSTATE_UINT8_ARRAY(mmc_ctrl, MenelausState, 3),
- VMSTATE_UINT8(mmc_debounce, MenelausState),
- VMSTATE_UINT8(rtc.ctrl, MenelausState),
- VMSTATE_UINT16(rtc.comp, MenelausState),
- VMSTATE_UINT16(rtc_next_vmstate, MenelausState),
- VMSTATE_STRUCT(rtc.new, MenelausState, 0, vmstate_menelaus_tm,
- struct tm),
- VMSTATE_STRUCT(rtc.alm, MenelausState, 0, vmstate_menelaus_tm,
- struct tm),
- VMSTATE_UINT8(pwrbtn_state, MenelausState),
- VMSTATE_I2C_SLAVE(parent_obj, MenelausState),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static void twl92230_realize(DeviceState *dev, Error **errp)
-{
- MenelausState *s = TWL92230(dev);
-
- s->rtc.hz_tm = timer_new_ms(rtc_clock, menelaus_rtc_hz, s);
- /* Three output pins plus one interrupt pin. */
- qdev_init_gpio_out(dev, s->out, 4);
-
- /* Three input pins plus one power-button pin. */
- qdev_init_gpio_in(dev, menelaus_gpio_set, 4);
-
- menelaus_reset(I2C_SLAVE(dev));
-}
-
-static void twl92230_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass);
-
- dc->realize = twl92230_realize;
- sc->event = menelaus_event;
- sc->recv = menelaus_rx;
- sc->send = menelaus_tx;
- dc->vmsd = &vmstate_menelaus;
-}
-
-static const TypeInfo twl92230_info = {
- .name = TYPE_TWL92230,
- .parent = TYPE_I2C_SLAVE,
- .instance_size = sizeof(MenelausState),
- .class_init = twl92230_class_init,
-};
-
-static void twl92230_register_types(void)
-{
- type_register_static(&twl92230_info);
-}
-
-type_init(twl92230_register_types)
diff --git a/hw/rtc/xlnx-zynqmp-rtc.c b/hw/rtc/xlnx-zynqmp-rtc.c
index 613c640..500982a 100644
--- a/hw/rtc/xlnx-zynqmp-rtc.c
+++ b/hw/rtc/xlnx-zynqmp-rtc.c
@@ -32,8 +32,8 @@
#include "qemu/module.h"
#include "hw/irq.h"
#include "qemu/cutils.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/rtc.h"
+#include "system/system.h"
+#include "system/rtc.h"
#include "trace.h"
#include "hw/rtc/xlnx-zynqmp-rtc.h"
#include "migration/vmstate.h"
@@ -251,11 +251,11 @@ static const VMStateDescription vmstate_rtc = {
}
};
-static void rtc_class_init(ObjectClass *klass, void *data)
+static void rtc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = rtc_reset;
+ device_class_set_legacy_reset(dc, rtc_reset);
dc->vmsd = &vmstate_rtc;
}
diff --git a/hw/rx/rx-gdbsim.c b/hw/rx/rx-gdbsim.c
index bb4746c..5b9004e 100644
--- a/hw/rx/rx-gdbsim.c
+++ b/hw/rx/rx-gdbsim.c
@@ -24,9 +24,9 @@
#include "qapi/error.h"
#include "hw/loader.h"
#include "hw/rx/rx62n.h"
-#include "sysemu/qtest.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/reset.h"
+#include "system/qtest.h"
+#include "system/device_tree.h"
+#include "system/reset.h"
#include "hw/boards.h"
#include "qom/object.h"
@@ -110,9 +110,6 @@ static void rx_gdbsim_init(MachineState *machine)
if (!kernel_filename) {
if (machine->firmware) {
rom_add_file_fixed(machine->firmware, RX62N_CFLASH_BASE, 0);
- } else if (!qtest_enabled()) {
- error_report("No bios or kernel specified");
- exit(1);
}
}
@@ -127,7 +124,7 @@ static void rx_gdbsim_init(MachineState *machine)
* the latter half of the SDRAM space.
*/
kernel_offset = machine->ram_size / 2;
- rx_load_image(RX_CPU(first_cpu), kernel_filename,
+ rx_load_image(&s->mcu.cpu, kernel_filename,
SDRAM_BASE + kernel_offset, kernel_offset);
if (dtb_filename) {
ram_addr_t dtb_offset;
@@ -153,12 +150,12 @@ static void rx_gdbsim_init(MachineState *machine)
qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds,
rom_ptr(SDRAM_BASE + dtb_offset, dtb_size));
/* Set dtb address to R1 */
- RX_CPU(first_cpu)->env.regs[1] = SDRAM_BASE + dtb_offset;
+ s->mcu.cpu.env.regs[1] = SDRAM_BASE + dtb_offset;
}
}
}
-static void rx_gdbsim_class_init(ObjectClass *oc, void *data)
+static void rx_gdbsim_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -168,7 +165,7 @@ static void rx_gdbsim_class_init(ObjectClass *oc, void *data)
mc->default_ram_id = "ext-sdram";
}
-static void rx62n7_class_init(ObjectClass *oc, void *data)
+static void rx62n7_class_init(ObjectClass *oc, const void *data)
{
RxGdbSimMachineClass *rxc = RX_GDBSIM_MACHINE_CLASS(oc);
MachineClass *mc = MACHINE_CLASS(oc);
@@ -178,7 +175,7 @@ static void rx62n7_class_init(ObjectClass *oc, void *data)
mc->desc = "gdb simulator (R5F562N7 MCU and external RAM)";
};
-static void rx62n8_class_init(ObjectClass *oc, void *data)
+static void rx62n8_class_init(ObjectClass *oc, const void *data)
{
RxGdbSimMachineClass *rxc = RX_GDBSIM_MACHINE_CLASS(oc);
MachineClass *mc = MACHINE_CLASS(oc);
diff --git a/hw/rx/rx62n.c b/hw/rx/rx62n.c
index 560f53a..a2a243a 100644
--- a/hw/rx/rx62n.c
+++ b/hw/rx/rx62n.c
@@ -28,8 +28,8 @@
#include "hw/loader.h"
#include "hw/sysbus.h"
#include "hw/qdev-properties.h"
-#include "sysemu/sysemu.h"
-#include "qapi/qmp/qlist.h"
+#include "system/system.h"
+#include "qobject/qlist.h"
#include "qom/object.h"
/*
@@ -257,15 +257,14 @@ static void rx62n_realize(DeviceState *dev, Error **errp)
register_sci(s, 0);
}
-static Property rx62n_properties[] = {
+static const Property rx62n_properties[] = {
DEFINE_PROP_LINK("main-bus", RX62NState, sysmem, TYPE_MEMORY_REGION,
MemoryRegion *),
DEFINE_PROP_BOOL("load-kernel", RX62NState, kernel, false),
DEFINE_PROP_UINT32("xtal-frequency-hz", RX62NState, xtal_freq_hz, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void rx62n_class_init(ObjectClass *klass, void *data)
+static void rx62n_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -273,7 +272,7 @@ static void rx62n_class_init(ObjectClass *klass, void *data)
device_class_set_props(dc, rx62n_properties);
}
-static void r5f562n7_class_init(ObjectClass *oc, void *data)
+static void r5f562n7_class_init(ObjectClass *oc, const void *data)
{
RX62NClass *rxc = RX62N_MCU_CLASS(oc);
@@ -282,7 +281,7 @@ static void r5f562n7_class_init(ObjectClass *oc, void *data)
rxc->data_flash_size = 32 * KiB;
};
-static void r5f562n8_class_init(ObjectClass *oc, void *data)
+static void r5f562n8_class_init(ObjectClass *oc, const void *data)
{
RX62NClass *rxc = RX62N_MCU_CLASS(oc);
diff --git a/hw/s390x/3270-ccw.c b/hw/s390x/3270-ccw.c
index 69e6783..3f0d384 100644
--- a/hw/s390x/3270-ccw.c
+++ b/hw/s390x/3270-ccw.c
@@ -150,15 +150,10 @@ out_err:
g_free(sch);
}
-static Property emulated_ccw_3270_properties[] = {
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void emulated_ccw_3270_class_init(ObjectClass *klass, void *data)
+static void emulated_ccw_3270_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- device_class_set_props(dc, emulated_ccw_3270_properties);
dc->realize = emulated_ccw_3270_realize;
dc->hotpluggable = false;
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
diff --git a/hw/s390x/Kconfig b/hw/s390x/Kconfig
index 3bbf4ae..02ea199 100644
--- a/hw/s390x/Kconfig
+++ b/hw/s390x/Kconfig
@@ -7,6 +7,7 @@ config S390_CCW_VIRTIO
imply VFIO_AP
imply VFIO_CCW
imply WDT_DIAG288
+ imply PCI_BRIDGE
imply PCIE_DEVICES
imply IOMMUFD
select PCI_EXPRESS
@@ -15,3 +16,4 @@ config S390_CCW_VIRTIO
select SCLPCONSOLE
select VIRTIO_CCW
select MSI_NONBROKEN
+ select VIRTIO_MEM_SUPPORTED
diff --git a/hw/s390x/ap-bridge.c b/hw/s390x/ap-bridge.c
index ef8fa2b..edeb3db 100644
--- a/hw/s390x/ap-bridge.c
+++ b/hw/s390x/ap-bridge.c
@@ -22,7 +22,7 @@ static char *ap_bus_get_dev_path(DeviceState *dev)
return g_strdup_printf("/1");
}
-static void ap_bus_class_init(ObjectClass *oc, void *data)
+static void ap_bus_class_init(ObjectClass *oc, const void *data)
{
BusClass *k = BUS_CLASS(oc);
@@ -61,7 +61,7 @@ void s390_init_ap(void)
qbus_set_hotplug_handler(bus, OBJECT(dev));
}
-static void ap_bridge_class_init(ObjectClass *oc, void *data)
+static void ap_bridge_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
@@ -75,7 +75,7 @@ static const TypeInfo ap_bridge_info = {
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = 0,
.class_init = ap_bridge_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
diff --git a/hw/s390x/ap-device.c b/hw/s390x/ap-device.c
index 237d1f1..7331044 100644
--- a/hw/s390x/ap-device.c
+++ b/hw/s390x/ap-device.c
@@ -12,7 +12,7 @@
#include "qapi/error.h"
#include "hw/s390x/ap-device.h"
-static void ap_class_init(ObjectClass *klass, void *data)
+static void ap_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/s390x/ap-stub.c b/hw/s390x/ap-stub.c
new file mode 100644
index 0000000..001fe5f
--- /dev/null
+++ b/hw/s390x/ap-stub.c
@@ -0,0 +1,21 @@
+/*
+ * VFIO based AP matrix device assignment
+ *
+ * Copyright 2025 IBM Corp.
+ * Author(s): Rorie Reyes <rreyes@linux.ibm.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hw/s390x/ap-bridge.h"
+
+int ap_chsc_sei_nt0_get_event(void *res)
+{
+ return EVENT_INFORMATION_NOT_STORED;
+}
+
+bool ap_chsc_sei_nt0_have_event(void)
+{
+ return false;
+}
diff --git a/hw/s390x/ccw-device.c b/hw/s390x/ccw-device.c
index a7d682e..8be1813 100644
--- a/hw/s390x/ccw-device.c
+++ b/hw/s390x/ccw-device.c
@@ -13,6 +13,10 @@
#include "ccw-device.h"
#include "hw/qdev-properties.h"
#include "qemu/module.h"
+#include "ipl.h"
+#include "qapi/visitor.h"
+#include "qemu/ctype.h"
+#include "qapi/error.h"
static void ccw_device_refill_ids(CcwDevice *dev)
{
@@ -37,29 +41,69 @@ static bool ccw_device_realize(CcwDevice *dev, Error **errp)
return true;
}
-static Property ccw_device_properties[] = {
+static void ccw_device_get_loadparm(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ CcwDevice *dev = CCW_DEVICE(obj);
+ char *str = g_strndup((char *) dev->loadparm, sizeof(dev->loadparm));
+
+ visit_type_str(v, name, &str, errp);
+ g_free(str);
+}
+
+static void ccw_device_set_loadparm(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ CcwDevice *dev = CCW_DEVICE(obj);
+ g_autofree char *val = NULL;
+ int index;
+
+ index = object_property_get_int(obj, "bootindex", NULL);
+
+ if (index < 0) {
+ error_setg(errp, "LOADPARM is only valid for boot devices!");
+ }
+
+ if (!visit_type_str(v, name, &val, errp)) {
+ return;
+ }
+
+ s390_ipl_fmt_loadparm(dev->loadparm, val, errp);
+}
+
+const PropertyInfo ccw_loadparm = {
+ .type = "str",
+ .description = "Up to 8 chars in set of [A-Za-z0-9. ] to select"
+ " a guest kernel",
+ .get = ccw_device_get_loadparm,
+ .set = ccw_device_set_loadparm,
+};
+
+static const Property ccw_device_properties[] = {
DEFINE_PROP_CSS_DEV_ID("devno", CcwDevice, devno),
DEFINE_PROP_CSS_DEV_ID_RO("dev_id", CcwDevice, dev_id),
DEFINE_PROP_CSS_DEV_ID_RO("subch_id", CcwDevice, subch_id),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ccw_device_reset(DeviceState *d)
+static void ccw_device_reset_hold(Object *obj, ResetType type)
{
- CcwDevice *ccw_dev = CCW_DEVICE(d);
+ CcwDevice *ccw_dev = CCW_DEVICE(obj);
css_reset_sch(ccw_dev->sch);
}
-static void ccw_device_class_init(ObjectClass *klass, void *data)
+static void ccw_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
CCWDeviceClass *k = CCW_DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
k->realize = ccw_device_realize;
k->refill_ids = ccw_device_refill_ids;
device_class_set_props(dc, ccw_device_properties);
- dc->reset = ccw_device_reset;
+ rc->phases.hold = ccw_device_reset_hold;
dc->bus_type = TYPE_VIRTUAL_CSS_BUS;
}
diff --git a/hw/s390x/ccw-device.h b/hw/s390x/ccw-device.h
index 5feeb0e..4439feb 100644
--- a/hw/s390x/ccw-device.h
+++ b/hw/s390x/ccw-device.h
@@ -26,6 +26,8 @@ struct CcwDevice {
CssDevId dev_id;
/* The actual busid of the virtual subchannel. */
CssDevId subch_id;
+ /* If set, use this loadparm value when device is boot target */
+ uint8_t loadparm[8];
};
typedef struct CcwDevice CcwDevice;
@@ -49,4 +51,9 @@ static inline CcwDevice *to_ccw_dev_fast(DeviceState *d)
OBJECT_DECLARE_TYPE(CcwDevice, CCWDeviceClass, CCW_DEVICE)
+extern const PropertyInfo ccw_loadparm;
+
+#define DEFINE_PROP_CCW_LOADPARM(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, ccw_loadparm, typeof(uint8_t[8]))
+
#endif
diff --git a/hw/s390x/cpu-topology.c b/hw/s390x/cpu-topology.c
index f16bdf6..b513f89 100644
--- a/hw/s390x/cpu-topology.c
+++ b/hw/s390x/cpu-topology.c
@@ -23,8 +23,8 @@
#include "target/s390x/cpu.h"
#include "hw/s390x/s390-virtio-ccw.h"
#include "hw/s390x/cpu-topology.h"
-#include "qapi/qapi-commands-machine-target.h"
-#include "qapi/qapi-events-machine-target.h"
+#include "qapi/qapi-commands-machine-s390x.h"
+#include "qapi/qapi-events-machine-s390x.h"
/*
* s390_topology is used to keep the topology information.
@@ -105,7 +105,7 @@ static void s390_topology_init(MachineState *ms)
*/
void s390_handle_ptf(S390CPU *cpu, uint8_t r1, uintptr_t ra)
{
- CpuS390Polarization polarization;
+ S390CpuPolarization polarization;
CPUS390XState *env = &cpu->env;
uint64_t reg = env->regs[r1];
int fc = reg & S390_TOPO_FC_MASK;
@@ -357,7 +357,7 @@ static void s390_change_topology(uint16_t core_id,
bool has_book_id, uint16_t book_id,
bool has_drawer_id, uint16_t drawer_id,
bool has_entitlement,
- CpuS390Entitlement entitlement,
+ S390CpuEntitlement entitlement,
bool has_dedicated, bool dedicated,
Error **errp)
{
@@ -446,7 +446,7 @@ void qmp_set_cpu_topology(uint16_t core,
bool has_socket, uint16_t socket,
bool has_book, uint16_t book,
bool has_drawer, uint16_t drawer,
- bool has_entitlement, CpuS390Entitlement entitlement,
+ bool has_entitlement, S390CpuEntitlement entitlement,
bool has_dedicated, bool dedicated,
Error **errp)
{
diff --git a/hw/s390x/css-bridge.c b/hw/s390x/css-bridge.c
index 8657ff7..0f87b8c 100644
--- a/hw/s390x/css-bridge.c
+++ b/hw/s390x/css-bridge.c
@@ -66,19 +66,11 @@ static char *virtual_css_bus_get_dev_path(DeviceState *dev)
{
CcwDevice *ccw_dev = CCW_DEVICE(dev);
SubchDev *sch = ccw_dev->sch;
- VirtualCssBridge *bridge =
- VIRTUAL_CSS_BRIDGE(qdev_get_parent_bus(dev)->parent);
- /*
- * We can't provide a dev path for backward compatibility on
- * older machines, as it is visible in the migration stream.
- */
- return bridge->css_dev_path ?
- g_strdup_printf("/%02x.%1x.%04x", sch->cssid, sch->ssid, sch->devno) :
- NULL;
+ return g_strdup_printf("/%02x.%1x.%04x", sch->cssid, sch->ssid, sch->devno);
}
-static void virtual_css_bus_class_init(ObjectClass *klass, void *data)
+static void virtual_css_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -120,25 +112,18 @@ VirtualCssBus *virtual_css_bus_init(void)
/***************** Virtual-css Bus Bridge Device ********************/
-static Property virtual_css_bridge_properties[] = {
- DEFINE_PROP_BOOL("css_dev_path", VirtualCssBridge, css_dev_path,
- true),
- DEFINE_PROP_END_OF_LIST(),
-};
-
static bool prop_get_true(Object *obj, Error **errp)
{
return true;
}
-static void virtual_css_bridge_class_init(ObjectClass *klass, void *data)
+static void virtual_css_bridge_class_init(ObjectClass *klass, const void *data)
{
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
hc->unplug = ccw_device_unplug;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
- device_class_set_props(dc, virtual_css_bridge_properties);
object_class_property_add_bool(klass, "cssid-unrestricted",
prop_get_true, NULL);
object_class_property_set_description(klass, "cssid-unrestricted",
@@ -151,7 +136,7 @@ static const TypeInfo virtual_css_bridge_info = {
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(VirtualCssBridge),
.class_init = virtual_css_bridge_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
diff --git a/hw/s390x/css.c b/hw/s390x/css.c
index b2d5327..53444f6 100644
--- a/hw/s390x/css.c
+++ b/hw/s390x/css.c
@@ -14,7 +14,7 @@
#include "qapi/visitor.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/s390x/ioinst.h"
#include "hw/qdev-properties.h"
#include "hw/s390x/css.h"
@@ -23,8 +23,6 @@
#include "hw/s390x/s390-virtio-ccw.h"
#include "hw/s390x/s390-ccw.h"
-bool css_migration_enabled = true;
-
typedef struct CrwContainer {
CRW crw;
QTAILQ_ENTRY(CrwContainer) sibling;
@@ -180,16 +178,10 @@ static const VMStateDescription vmstate_orb = {
}
};
-static bool vmstate_schdev_orb_needed(void *opaque)
-{
- return css_migration_enabled;
-}
-
static const VMStateDescription vmstate_schdev_orb = {
.name = "s390_subch_dev/orb",
.version_id = 1,
.minimum_version_id = 1,
- .needed = vmstate_schdev_orb_needed,
.fields = (const VMStateField[]) {
VMSTATE_STRUCT(orb, SubchDev, 1, vmstate_orb, ORB),
VMSTATE_END_OF_LIST()
@@ -390,33 +382,12 @@ static int subch_dev_post_load(void *opaque, int version_id)
css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, s);
}
- if (css_migration_enabled) {
- /* No compat voodoo to do ;) */
- return 0;
- }
- /*
- * Hack alert. If we don't migrate the channel subsystem status
- * we still need to find out if the guest enabled mss/mcss-e.
- * If the subchannel is enabled, it certainly was able to access it,
- * so adjust the max_ssid/max_cssid values for relevant ssid/cssid
- * values. This is not watertight, but better than nothing.
- */
- if (s->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA) {
- if (s->ssid) {
- channel_subsys.max_ssid = MAX_SSID;
- }
- if (s->cssid != channel_subsys.default_cssid) {
- channel_subsys.max_cssid = MAX_CSSID;
- }
- }
return 0;
}
void css_register_vmstate(void)
{
- if (css_migration_enabled) {
- vmstate_register(NULL, 0, &vmstate_css, &channel_subsys);
- }
+ vmstate_register(NULL, 0, &vmstate_css, &channel_subsys);
}
IndAddr *get_indicator(hwaddr ind_addr, int len)
@@ -2463,7 +2434,7 @@ void css_reset(void)
static void get_css_devid(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
CssDevId *dev_id = object_field_prop_ptr(obj, prop);
char buffer[] = "xx.x.xxxx";
char *p = buffer;
@@ -2492,7 +2463,7 @@ static void get_css_devid(Object *obj, Visitor *v, const char *name,
static void set_css_devid(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
CssDevId *dev_id = object_field_prop_ptr(obj, prop);
char *str;
int num, n1, n2;
@@ -2523,7 +2494,7 @@ out:
}
const PropertyInfo css_devid_propinfo = {
- .name = "str",
+ .type = "str",
.description = "Identifier of an I/O device in the channel "
"subsystem, example: fe.1.23ab",
.get = get_css_devid,
@@ -2531,7 +2502,7 @@ const PropertyInfo css_devid_propinfo = {
};
const PropertyInfo css_devid_ro_propinfo = {
- .name = "str",
+ .type = "str",
.description = "Read-only identifier of an I/O device in the channel "
"subsystem, example: fe.1.23ab",
.get = get_css_devid,
diff --git a/hw/s390x/event-facility.c b/hw/s390x/event-facility.c
index 06c1da0..fee286e 100644
--- a/hw/s390x/event-facility.c
+++ b/hw/s390x/event-facility.c
@@ -4,6 +4,7 @@
* handles SCLP event types
* - Signal Quiesce - system power down
* - ASCII Console Data - VT220 read and write
+ * - Control-Program Identification - Send OS data from guest to host
*
* Copyright IBM, Corp. 2012
*
@@ -40,18 +41,12 @@ struct SCLPEventFacility {
SysBusDevice parent_obj;
SCLPEventsBus sbus;
SCLPEvent quiesce, cpu_hotplug;
+ SCLPEventCPI cpi;
/* guest's receive mask */
union {
uint32_t receive_mask_pieces[2];
sccb_mask_t receive_mask;
};
- /*
- * when false, we keep the same broken, backwards compatible behaviour as
- * before, allowing only masks of size exactly 4; when true, we implement
- * the architecture correctly, allowing all valid mask sizes. Needed for
- * migration toward older versions.
- */
- bool allow_all_mask_sizes;
/* length of the receive mask */
uint16_t mask_length;
};
@@ -294,8 +289,7 @@ static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb)
uint16_t mask_length = be16_to_cpu(we_mask->mask_length);
sccb_mask_t tmp_mask;
- if (!mask_length || (mask_length > SCLP_EVENT_MASK_LEN_MAX) ||
- ((mask_length != 4) && !ef->allow_all_mask_sizes)) {
+ if (!mask_length || mask_length > SCLP_EVENT_MASK_LEN_MAX) {
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH);
return;
}
@@ -355,13 +349,6 @@ static bool vmstate_event_facility_mask64_needed(void *opaque)
return (ef->receive_mask & 0xFFFFFFFF) != 0;
}
-static bool vmstate_event_facility_mask_length_needed(void *opaque)
-{
- SCLPEventFacility *ef = opaque;
-
- return ef->allow_all_mask_sizes;
-}
-
static const VMStateDescription vmstate_event_facility_mask64 = {
.name = "vmstate-event-facility/mask64",
.version_id = 0,
@@ -377,7 +364,6 @@ static const VMStateDescription vmstate_event_facility_mask_length = {
.name = "vmstate-event-facility/mask_length",
.version_id = 0,
.minimum_version_id = 0,
- .needed = vmstate_event_facility_mask_length_needed,
.fields = (const VMStateField[]) {
VMSTATE_UINT16(mask_length, SCLPEventFacility),
VMSTATE_END_OF_LIST()
@@ -399,31 +385,12 @@ static const VMStateDescription vmstate_event_facility = {
}
};
-static void sclp_event_set_allow_all_mask_sizes(Object *obj, bool value,
- Error **errp)
-{
- SCLPEventFacility *ef = (SCLPEventFacility *)obj;
-
- ef->allow_all_mask_sizes = value;
-}
-
-static bool sclp_event_get_allow_all_mask_sizes(Object *obj, Error **errp)
-{
- SCLPEventFacility *ef = (SCLPEventFacility *)obj;
-
- return ef->allow_all_mask_sizes;
-}
-
static void init_event_facility(Object *obj)
{
SCLPEventFacility *event_facility = EVENT_FACILITY(obj);
DeviceState *sdev = DEVICE(obj);
event_facility->mask_length = 4;
- event_facility->allow_all_mask_sizes = true;
- object_property_add_bool(obj, "allow_all_mask_sizes",
- sclp_event_get_allow_all_mask_sizes,
- sclp_event_set_allow_all_mask_sizes);
/* Spawn a new bus for SCLP events */
qbus_init(&event_facility->sbus, sizeof(event_facility->sbus),
@@ -460,14 +427,14 @@ static void reset_event_facility(DeviceState *dev)
sdev->receive_mask = 0;
}
-static void init_event_facility_class(ObjectClass *klass, void *data)
+static void init_event_facility_class(ObjectClass *klass, const void *data)
{
SysBusDeviceClass *sbdc = SYS_BUS_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(sbdc);
SCLPEventFacilityClass *k = EVENT_FACILITY_CLASS(dc);
dc->realize = realize_event_facility;
- dc->reset = reset_event_facility;
+ device_class_set_legacy_reset(dc, reset_event_facility);
dc->vmsd = &vmstate_event_facility;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
k->command_handler = command_handler;
@@ -497,7 +464,7 @@ static void event_realize(DeviceState *qdev, Error **errp)
}
}
-static void event_class_init(ObjectClass *klass, void *data)
+static void event_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c
index e934bf8..2f08239 100644
--- a/hw/s390x/ipl.c
+++ b/hw/s390x/ipl.c
@@ -15,9 +15,9 @@
#include "qemu/osdep.h"
#include "qemu/datadir.h"
#include "qapi/error.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "sysemu/tcg.h"
+#include "system/reset.h"
+#include "system/runstate.h"
+#include "system/tcg.h"
#include "elf.h"
#include "hw/loader.h"
#include "hw/qdev-properties.h"
@@ -26,7 +26,6 @@
#include "hw/s390x/vfio-ccw.h"
#include "hw/s390x/css.h"
#include "hw/s390x/ebcdic.h"
-#include "target/s390x/kvm/pv.h"
#include "hw/scsi/scsi.h"
#include "hw/virtio/virtio-net.h"
#include "ipl.h"
@@ -34,6 +33,7 @@
#include "qemu/config-file.h"
#include "qemu/cutils.h"
#include "qemu/option.h"
+#include "qemu/ctype.h"
#include "standard-headers/linux/virtio_ids.h"
#define KERN_IMAGE_START 0x010000UL
@@ -45,20 +45,20 @@
#define INITRD_PARM_START 0x010408UL
#define PARMFILE_START 0x001000UL
#define ZIPL_IMAGE_START 0x009000UL
+#define BIOS_MAX_SIZE 0x300000UL
#define IPL_PSW_MASK (PSW_MASK_32 | PSW_MASK_64)
-static bool iplb_extended_needed(void *opaque)
+/* Place the IPLB chain immediately before the BIOS in memory */
+static uint64_t find_iplb_chain_addr(uint64_t bios_addr, uint16_t count)
{
- S390IPLState *ipl = S390_IPL(object_resolve_path(TYPE_S390_IPL, NULL));
-
- return ipl->iplbext_migration;
+ return (bios_addr & TARGET_PAGE_MASK)
+ - (count * sizeof(IplParameterBlock));
}
static const VMStateDescription vmstate_iplb_extended = {
.name = "ipl/iplb_extended",
.version_id = 0,
.minimum_version_id = 0,
- .needed = iplb_extended_needed,
.fields = (const VMStateField[]) {
VMSTATE_UINT8_ARRAY(reserved_ext, IplParameterBlock, 4096 - 200),
VMSTATE_END_OF_LIST()
@@ -144,7 +144,14 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp)
* even if an external kernel has been defined.
*/
if (!ipl->kernel || ipl->enforce_bios) {
- uint64_t fwbase = (MIN(ms->ram_size, 0x80000000U) - 0x200000) & ~0xffffUL;
+ uint64_t fwbase;
+
+ if (ms->ram_size < BIOS_MAX_SIZE) {
+ error_setg(errp, "not enough RAM to load the BIOS file");
+ return;
+ }
+
+ fwbase = (MIN(ms->ram_size, 0x80000000U) - BIOS_MAX_SIZE) & ~0xffffUL;
bios_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, ipl->firmware);
if (bios_filename == NULL) {
@@ -154,8 +161,8 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp)
bios_size = load_elf(bios_filename, NULL,
bios_translate_addr, &fwbase,
- &ipl->bios_start_addr, NULL, NULL, NULL, 1,
- EM_S390, 0, 0);
+ &ipl->bios_start_addr, NULL, NULL, NULL,
+ ELFDATA2MSB, EM_S390, 0, 0);
if (bios_size > 0) {
/* Adjust ELF start address to final location */
ipl->bios_start_addr += fwbase;
@@ -179,7 +186,7 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp)
if (ipl->kernel) {
kernel_size = load_elf(ipl->kernel, NULL, NULL, NULL,
&pentry, NULL,
- NULL, NULL, 1, EM_S390, 0, 0);
+ NULL, NULL, ELFDATA2MSB, EM_S390, 0, 0);
if (kernel_size < 0) {
kernel_size = load_image_targphys(ipl->kernel, 0, ms->ram_size);
if (kernel_size < 0) {
@@ -252,8 +259,8 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp)
*/
romptr = rom_ptr(INITRD_PARM_START, 16);
if (romptr) {
- stq_p(romptr, initrd_offset);
- stq_p(romptr + 1, initrd_size);
+ stq_be_p(romptr, initrd_offset);
+ stq_be_p(romptr + 1, initrd_size);
}
}
}
@@ -275,16 +282,12 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp)
qemu_register_reset(resettable_cold_reset_fn, dev);
}
-static Property s390_ipl_properties[] = {
+static const Property s390_ipl_properties[] = {
DEFINE_PROP_STRING("kernel", S390IPLState, kernel),
DEFINE_PROP_STRING("initrd", S390IPLState, initrd),
DEFINE_PROP_STRING("cmdline", S390IPLState, cmdline),
DEFINE_PROP_STRING("firmware", S390IPLState, firmware),
- DEFINE_PROP_STRING("netboot_fw", S390IPLState, netboot_fw),
DEFINE_PROP_BOOL("enforce_bios", S390IPLState, enforce_bios, false),
- DEFINE_PROP_BOOL("iplbext_migration", S390IPLState, iplbext_migration,
- true),
- DEFINE_PROP_END_OF_LIST(),
};
static void s390_ipl_set_boot_menu(S390IPLState *ipl)
@@ -390,174 +393,162 @@ static CcwDevice *s390_get_ccw_device(DeviceState *dev_st, int *devtype)
return ccw_dev;
}
-static bool s390_gen_initial_iplb(S390IPLState *ipl)
+static uint64_t s390_ipl_map_iplb_chain(IplParameterBlock *iplb_chain)
+{
+ S390IPLState *ipl = get_ipl_device();
+ uint16_t count = be16_to_cpu(ipl->qipl.chain_len);
+ uint64_t len = sizeof(IplParameterBlock) * count;
+ uint64_t chain_addr = find_iplb_chain_addr(ipl->bios_start_addr, count);
+
+ cpu_physical_memory_write(chain_addr, iplb_chain, len);
+ return chain_addr;
+}
+
+void s390_ipl_fmt_loadparm(uint8_t *loadparm, char *str, Error **errp)
+{
+ /* Initialize the loadparm with spaces */
+ memset(loadparm, ' ', LOADPARM_LEN);
+ qdev_prop_sanitize_s390x_loadparm(loadparm, str, errp);
+}
+
+void s390_ipl_convert_loadparm(char *ascii_lp, uint8_t *ebcdic_lp)
+{
+ int i;
+
+ /* Initialize the loadparm with EBCDIC spaces (0x40) */
+ memset(ebcdic_lp, '@', LOADPARM_LEN);
+ for (i = 0; i < LOADPARM_LEN && ascii_lp[i]; i++) {
+ ebcdic_lp[i] = ascii2ebcdic[(uint8_t) ascii_lp[i]];
+ }
+}
+
+static bool s390_build_iplb(DeviceState *dev_st, IplParameterBlock *iplb)
{
- DeviceState *dev_st;
CcwDevice *ccw_dev = NULL;
SCSIDevice *sd;
int devtype;
-
- dev_st = get_boot_device(0);
- if (dev_st) {
- ccw_dev = s390_get_ccw_device(dev_st, &devtype);
- }
+ uint8_t *lp;
+ g_autofree void *scsi_lp = NULL;
/*
* Currently allow IPL only from CCW devices.
*/
+ ccw_dev = s390_get_ccw_device(dev_st, &devtype);
if (ccw_dev) {
+ lp = ccw_dev->loadparm;
+
switch (devtype) {
case CCW_DEVTYPE_SCSI:
sd = SCSI_DEVICE(dev_st);
- ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN);
- ipl->iplb.blk0_len =
+ scsi_lp = object_property_get_str(OBJECT(sd), "loadparm", NULL);
+ if (scsi_lp && strlen(scsi_lp) > 0) {
+ lp = scsi_lp;
+ }
+ iplb->len = cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN);
+ iplb->blk0_len =
cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN - S390_IPLB_HEADER_LEN);
- ipl->iplb.pbt = S390_IPL_TYPE_QEMU_SCSI;
- ipl->iplb.scsi.lun = cpu_to_be32(sd->lun);
- ipl->iplb.scsi.target = cpu_to_be16(sd->id);
- ipl->iplb.scsi.channel = cpu_to_be16(sd->channel);
- ipl->iplb.scsi.devno = cpu_to_be16(ccw_dev->sch->devno);
- ipl->iplb.scsi.ssid = ccw_dev->sch->ssid & 3;
+ iplb->pbt = S390_IPL_TYPE_QEMU_SCSI;
+ iplb->scsi.lun = cpu_to_be32(sd->lun);
+ iplb->scsi.target = cpu_to_be16(sd->id);
+ iplb->scsi.channel = cpu_to_be16(sd->channel);
+ iplb->scsi.devno = cpu_to_be16(ccw_dev->sch->devno);
+ iplb->scsi.ssid = ccw_dev->sch->ssid & 3;
break;
case CCW_DEVTYPE_VFIO:
- ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN);
- ipl->iplb.pbt = S390_IPL_TYPE_CCW;
- ipl->iplb.ccw.devno = cpu_to_be16(ccw_dev->sch->devno);
- ipl->iplb.ccw.ssid = ccw_dev->sch->ssid & 3;
+ iplb->len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN);
+ iplb->pbt = S390_IPL_TYPE_CCW;
+ iplb->ccw.devno = cpu_to_be16(ccw_dev->sch->devno);
+ iplb->ccw.ssid = ccw_dev->sch->ssid & 3;
break;
case CCW_DEVTYPE_VIRTIO_NET:
- ipl->netboot = true;
- /* Fall through to CCW_DEVTYPE_VIRTIO case */
case CCW_DEVTYPE_VIRTIO:
- ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN);
- ipl->iplb.blk0_len =
+ iplb->len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN);
+ iplb->blk0_len =
cpu_to_be32(S390_IPLB_MIN_CCW_LEN - S390_IPLB_HEADER_LEN);
- ipl->iplb.pbt = S390_IPL_TYPE_CCW;
- ipl->iplb.ccw.devno = cpu_to_be16(ccw_dev->sch->devno);
- ipl->iplb.ccw.ssid = ccw_dev->sch->ssid & 3;
+ iplb->pbt = S390_IPL_TYPE_CCW;
+ iplb->ccw.devno = cpu_to_be16(ccw_dev->sch->devno);
+ iplb->ccw.ssid = ccw_dev->sch->ssid & 3;
break;
}
- if (!s390_ipl_set_loadparm(ipl->iplb.loadparm)) {
- ipl->iplb.flags |= DIAG308_FLAGS_LP_VALID;
+ /* If the device loadparm is empty use the global machine loadparm */
+ if (memcmp(lp, NO_LOADPARM, 8) == 0) {
+ lp = S390_CCW_MACHINE(qdev_get_machine())->loadparm;
}
+ s390_ipl_convert_loadparm((char *)lp, iplb->loadparm);
+ iplb->flags |= DIAG308_FLAGS_LP_VALID;
+
return true;
}
return false;
}
-int s390_ipl_set_loadparm(uint8_t *loadparm)
+void s390_rebuild_iplb(uint16_t dev_index, IplParameterBlock *iplb)
{
- MachineState *machine = MACHINE(qdev_get_machine());
- char *lp = object_property_get_str(OBJECT(machine), "loadparm", NULL);
-
- if (lp) {
- int i;
-
- /* lp is an uppercase string without leading/embedded spaces */
- for (i = 0; i < 8 && lp[i]; i++) {
- loadparm[i] = ascii2ebcdic[(uint8_t) lp[i]];
- }
-
- if (i < 8) {
- memset(loadparm + i, 0x40, 8 - i); /* fill with EBCDIC spaces */
- }
-
- g_free(lp);
- return 0;
- }
+ S390IPLState *ipl = get_ipl_device();
+ uint16_t index;
+ index = ipl->rebuilt_iplb ? ipl->iplb_index : dev_index;
- return -1;
+ ipl->rebuilt_iplb = s390_build_iplb(get_boot_device(index), iplb);
+ ipl->iplb_index = index;
}
-static int load_netboot_image(Error **errp)
+static bool s390_init_all_iplbs(S390IPLState *ipl)
{
- MachineState *ms = MACHINE(qdev_get_machine());
- S390IPLState *ipl = get_ipl_device();
- char *netboot_filename;
- MemoryRegion *sysmem = get_system_memory();
- MemoryRegion *mr = NULL;
- void *ram_ptr = NULL;
- int img_size = -1;
-
- mr = memory_region_find(sysmem, 0, 1).mr;
- if (!mr) {
- error_setg(errp, "Failed to find memory region at address 0");
- return -1;
- }
+ int iplb_num = 0;
+ IplParameterBlock iplb_chain[7];
+ DeviceState *dev_st = get_boot_device(0);
+ Object *machine = qdev_get_machine();
- ram_ptr = memory_region_get_ram_ptr(mr);
- if (!ram_ptr) {
- error_setg(errp, "No RAM found");
- goto unref_mr;
+ /*
+ * Parse the boot devices. Generate an IPLB for only the first boot device
+ * which will later be set with DIAG308.
+ */
+ if (!dev_st) {
+ ipl->qipl.chain_len = 0;
+ return false;
}
- netboot_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, ipl->netboot_fw);
- if (netboot_filename == NULL) {
- error_setg(errp, "Could not find network bootloader '%s'",
- ipl->netboot_fw);
- goto unref_mr;
+ /* If no machine loadparm was defined fill it with spaces */
+ if (memcmp(S390_CCW_MACHINE(machine)->loadparm, NO_LOADPARM, 8) == 0) {
+ object_property_set_str(machine, "loadparm", " ", NULL);
}
- img_size = load_elf_ram(netboot_filename, NULL, NULL, NULL,
- &ipl->start_addr,
- NULL, NULL, NULL, 1, EM_S390, 0, 0, NULL,
- false);
-
- if (img_size < 0) {
- img_size = load_image_size(netboot_filename, ram_ptr, ms->ram_size);
- ipl->start_addr = KERN_IMAGE_START;
- }
+ iplb_num = 1;
+ s390_build_iplb(dev_st, &ipl->iplb);
- if (img_size < 0) {
- error_setg(errp, "Failed to load network bootloader");
+ /* Index any fallback boot devices */
+ while (get_boot_device(iplb_num)) {
+ iplb_num++;
}
- g_free(netboot_filename);
-
-unref_mr:
- memory_region_unref(mr);
- return img_size;
-}
-
-static bool is_virtio_ccw_device_of_type(IplParameterBlock *iplb,
- int virtio_id)
-{
- uint8_t cssid;
- uint8_t ssid;
- uint16_t devno;
- uint16_t schid;
- SubchDev *sch = NULL;
+ if (iplb_num > MAX_BOOT_DEVS) {
+ warn_report("Excess boot devices defined! %d boot devices found, "
+ "but only the first %d will be considered.",
+ iplb_num, MAX_BOOT_DEVS);
- if (iplb->pbt != S390_IPL_TYPE_CCW) {
- return false;
+ iplb_num = MAX_BOOT_DEVS;
}
- devno = be16_to_cpu(iplb->ccw.devno);
- ssid = iplb->ccw.ssid & 3;
-
- for (schid = 0; schid < MAX_SCHID; schid++) {
- for (cssid = 0; cssid < MAX_CSSID; cssid++) {
- sch = css_find_subch(1, cssid, ssid, schid);
+ ipl->qipl.chain_len = cpu_to_be16(iplb_num - 1);
- if (sch && sch->devno == devno) {
- return sch->id.cu_model == virtio_id;
- }
+ /*
+ * Build fallback IPLBs for any boot devices above index 0, up to a
+ * maximum amount as defined in ipl.h
+ */
+ if (iplb_num > 1) {
+ /* Start at 1 because the IPLB for boot index 0 is not chained */
+ for (int i = 1; i < iplb_num; i++) {
+ dev_st = get_boot_device(i);
+ s390_build_iplb(dev_st, &iplb_chain[i - 1]);
}
- }
- return false;
-}
-static bool is_virtio_net_device(IplParameterBlock *iplb)
-{
- return is_virtio_ccw_device_of_type(iplb, VIRTIO_ID_NET);
-}
+ ipl->qipl.next_iplb = cpu_to_be64(s390_ipl_map_iplb_chain(iplb_chain));
+ }
-static bool is_virtio_scsi_device(IplParameterBlock *iplb)
-{
- return is_virtio_ccw_device_of_type(iplb, VIRTIO_ID_SCSI);
+ return iplb_num;
}
static void update_machine_ipl_properties(IplParameterBlock *iplb)
@@ -577,7 +568,7 @@ static void update_machine_ipl_properties(IplParameterBlock *iplb)
ascii_loadparm[i] = 0;
object_property_set_str(machine, "loadparm", ascii_loadparm, &err);
} else {
- object_property_set_str(machine, "loadparm", "", &err);
+ object_property_set_str(machine, "loadparm", " ", &err);
}
if (err) {
warn_report_err(err);
@@ -599,7 +590,7 @@ void s390_ipl_update_diag308(IplParameterBlock *iplb)
ipl->iplb = *iplb;
ipl->iplb_valid = true;
}
- ipl->netboot = is_virtio_net_device(iplb);
+
update_machine_ipl_properties(iplb);
}
@@ -626,32 +617,14 @@ IplParameterBlock *s390_ipl_get_iplb(void)
void s390_ipl_reset_request(CPUState *cs, enum s390_reset reset_type)
{
S390IPLState *ipl = get_ipl_device();
-
if (reset_type == S390_RESET_EXTERNAL || reset_type == S390_RESET_REIPL) {
/* use CPU 0 for full resets */
ipl->reset_cpu_index = 0;
} else {
ipl->reset_cpu_index = cs->cpu_index;
}
- ipl->reset_type = reset_type;
- if (reset_type == S390_RESET_REIPL &&
- ipl->iplb_valid &&
- !ipl->netboot &&
- ipl->iplb.pbt == S390_IPL_TYPE_CCW &&
- is_virtio_scsi_device(&ipl->iplb)) {
- CcwDevice *ccw_dev = s390_get_ccw_device(get_boot_device(0), NULL);
-
- if (ccw_dev &&
- cpu_to_be16(ccw_dev->sch->devno) == ipl->iplb.ccw.devno &&
- (ccw_dev->sch->ssid & 3) == ipl->iplb.ccw.ssid) {
- /*
- * this is the original boot device's SCSI
- * so restore IPL parameter info from it
- */
- ipl->iplb_valid = s390_gen_initial_iplb(ipl);
- }
- }
+ ipl->reset_type = reset_type;
if (reset_type == S390_RESET_MODIFIED_CLEAR ||
reset_type == S390_RESET_LOAD_NORMAL ||
reset_type == S390_RESET_PV) {
@@ -702,7 +675,7 @@ static void s390_ipl_prepare_qipl(S390CPU *cpu)
cpu_physical_memory_unmap(addr, len, 1, len);
}
-int s390_ipl_prepare_pv_header(Error **errp)
+int s390_ipl_prepare_pv_header(struct S390PVResponse *pv_resp, Error **errp)
{
IplParameterBlock *ipib = s390_ipl_get_iplb_pv();
IPLBlockPV *ipib_pv = &ipib->pv;
@@ -711,12 +684,13 @@ int s390_ipl_prepare_pv_header(Error **errp)
cpu_physical_memory_read(ipib_pv->pv_header_addr, hdr,
ipib_pv->pv_header_len);
- rc = s390_pv_set_sec_parms((uintptr_t)hdr, ipib_pv->pv_header_len, errp);
+ rc = s390_pv_set_sec_parms((uintptr_t)hdr, ipib_pv->pv_header_len,
+ pv_resp, errp);
g_free(hdr);
return rc;
}
-int s390_ipl_pv_unpack(void)
+int s390_ipl_pv_unpack(struct S390PVResponse *pv_resp)
{
IplParameterBlock *ipib = s390_ipl_get_iplb_pv();
IPLBlockPV *ipib_pv = &ipib->pv;
@@ -725,7 +699,8 @@ int s390_ipl_pv_unpack(void)
for (i = 0; i < ipib_pv->num_comp; i++) {
rc = s390_pv_unpack(ipib_pv->components[i].addr,
TARGET_PAGE_ALIGN(ipib_pv->components[i].size),
- ipib_pv->components[i].tweak_pref);
+ ipib_pv->components[i].tweak_pref,
+ pv_resp);
if (rc) {
break;
}
@@ -743,13 +718,11 @@ void s390_ipl_prepare_cpu(S390CPU *cpu)
if (!ipl->kernel || ipl->iplb_valid) {
cpu->env.psw.addr = ipl->bios_start_addr;
if (!ipl->iplb_valid) {
- ipl->iplb_valid = s390_gen_initial_iplb(ipl);
+ ipl->iplb_valid = s390_init_all_iplbs(ipl);
+ } else {
+ ipl->qipl.chain_len = 0;
}
}
- if (ipl->netboot) {
- load_netboot_image(&error_fatal);
- ipl->qipl.netboot_start_addr = cpu_to_be64(ipl->start_addr);
- }
s390_ipl_set_boot_menu(ipl);
s390_ipl_prepare_qipl(cpu);
}
@@ -764,13 +737,13 @@ static void s390_ipl_reset(DeviceState *dev)
}
}
-static void s390_ipl_class_init(ObjectClass *klass, void *data)
+static void s390_ipl_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = s390_ipl_realize;
device_class_set_props(dc, s390_ipl_properties);
- dc->reset = s390_ipl_reset;
+ device_class_set_legacy_reset(dc, s390_ipl_reset);
dc->vmsd = &vmstate_ipl;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
/* Reason: Loads the ROMs and thus can only be used one time - internally */
diff --git a/hw/s390x/ipl.h b/hw/s390x/ipl.h
index 57cd125..505cded 100644
--- a/hw/s390x/ipl.h
+++ b/hw/s390x/ipl.h
@@ -14,101 +14,24 @@
#define HW_S390_IPL_H
#include "cpu.h"
-#include "exec/address-spaces.h"
+#include "exec/target_page.h"
+#include "system/address-spaces.h"
+#include "system/memory.h"
#include "hw/qdev-core.h"
+#include "hw/s390x/ipl/qipl.h"
#include "qom/object.h"
-
-struct IPLBlockPVComp {
- uint64_t tweak_pref;
- uint64_t addr;
- uint64_t size;
-} QEMU_PACKED;
-typedef struct IPLBlockPVComp IPLBlockPVComp;
-
-struct IPLBlockPV {
- uint8_t reserved18[87]; /* 0x18 */
- uint8_t version; /* 0x6f */
- uint32_t reserved70; /* 0x70 */
- uint32_t num_comp; /* 0x74 */
- uint64_t pv_header_addr; /* 0x78 */
- uint64_t pv_header_len; /* 0x80 */
- struct IPLBlockPVComp components[0];
-} QEMU_PACKED;
-typedef struct IPLBlockPV IPLBlockPV;
-
-struct IplBlockCcw {
- uint8_t reserved0[85];
- uint8_t ssid;
- uint16_t devno;
- uint8_t vm_flags;
- uint8_t reserved3[3];
- uint32_t vm_parm_len;
- uint8_t nss_name[8];
- uint8_t vm_parm[64];
- uint8_t reserved4[8];
-} QEMU_PACKED;
-typedef struct IplBlockCcw IplBlockCcw;
-
-struct IplBlockFcp {
- uint8_t reserved1[305 - 1];
- uint8_t opt;
- uint8_t reserved2[3];
- uint16_t reserved3;
- uint16_t devno;
- uint8_t reserved4[4];
- uint64_t wwpn;
- uint64_t lun;
- uint32_t bootprog;
- uint8_t reserved5[12];
- uint64_t br_lba;
- uint32_t scp_data_len;
- uint8_t reserved6[260];
- uint8_t scp_data[0];
-} QEMU_PACKED;
-typedef struct IplBlockFcp IplBlockFcp;
-
-struct IplBlockQemuScsi {
- uint32_t lun;
- uint16_t target;
- uint16_t channel;
- uint8_t reserved0[77];
- uint8_t ssid;
- uint16_t devno;
-} QEMU_PACKED;
-typedef struct IplBlockQemuScsi IplBlockQemuScsi;
+#include "target/s390x/kvm/pv.h"
#define DIAG308_FLAGS_LP_VALID 0x80
+#define MAX_BOOT_DEVS 8 /* Max number of devices that may have a bootindex */
-union IplParameterBlock {
- struct {
- uint32_t len;
- uint8_t reserved0[3];
- uint8_t version;
- uint32_t blk0_len;
- uint8_t pbt;
- uint8_t flags;
- uint16_t reserved01;
- uint8_t loadparm[8];
- union {
- IplBlockCcw ccw;
- IplBlockFcp fcp;
- IPLBlockPV pv;
- IplBlockQemuScsi scsi;
- };
- } QEMU_PACKED;
- struct {
- uint8_t reserved1[110];
- uint16_t devno;
- uint8_t reserved2[88];
- uint8_t reserved_ext[4096 - 200];
- } QEMU_PACKED;
-} QEMU_PACKED;
-typedef union IplParameterBlock IplParameterBlock;
-
-int s390_ipl_set_loadparm(uint8_t *loadparm);
+void s390_ipl_convert_loadparm(char *ascii_lp, uint8_t *ebcdic_lp);
+void s390_ipl_fmt_loadparm(uint8_t *loadparm, char *str, Error **errp);
+void s390_rebuild_iplb(uint16_t index, IplParameterBlock *iplb);
void s390_ipl_update_diag308(IplParameterBlock *iplb);
-int s390_ipl_prepare_pv_header(Error **errp);
-int s390_ipl_pv_unpack(void);
+int s390_ipl_prepare_pv_header(struct S390PVResponse *pv_resp,
+ Error **errp);
+int s390_ipl_pv_unpack(struct S390PVResponse *pv_resp);
void s390_ipl_prepare_cpu(S390CPU *cpu);
IplParameterBlock *s390_ipl_get_iplb(void);
IplParameterBlock *s390_ipl_get_iplb_pv(void);
@@ -131,27 +54,6 @@ void s390_ipl_clear_reset_request(void);
#define QIPL_FLAG_BM_OPTS_CMD 0x80
#define QIPL_FLAG_BM_OPTS_ZIPL 0x40
-/*
- * The QEMU IPL Parameters will be stored at absolute address
- * 204 (0xcc) which means it is 32-bit word aligned but not
- * double-word aligned.
- * Placement of data fields in this area must account for
- * their alignment needs. E.g., netboot_start_address must
- * have an offset of 4 + n * 8 bytes within the struct in order
- * to keep it double-word aligned.
- * The total size of the struct must never exceed 28 bytes.
- * This definition must be kept in sync with the definition
- * in pc-bios/s390-ccw/iplb.h.
- */
-struct QemuIplParameters {
- uint8_t qipl_flags;
- uint8_t reserved1[3];
- uint64_t netboot_start_addr;
- uint32_t boot_menu_timeout;
- uint8_t reserved2[12];
-} QEMU_PACKED;
-typedef struct QemuIplParameters QemuIplParameters;
-
#define TYPE_S390_IPL "s390-ipl"
OBJECT_DECLARE_SIMPLE_TYPE(S390IPLState, S390_IPL)
@@ -168,7 +70,8 @@ struct S390IPLState {
bool enforce_bios;
bool iplb_valid;
bool iplb_valid_pv;
- bool netboot;
+ bool rebuilt_iplb;
+ uint16_t iplb_index;
/* reset related properties don't have to be migrated or reset */
enum s390_reset reset_type;
int reset_cpu_index;
@@ -178,11 +81,9 @@ struct S390IPLState {
char *initrd;
char *cmdline;
char *firmware;
- char *netboot_fw;
uint8_t cssid;
uint8_t ssid;
uint16_t devno;
- bool iplbext_migration;
};
QEMU_BUILD_BUG_MSG(offsetof(S390IPLState, iplb) & 3, "alignment of iplb wrong");
@@ -276,11 +177,14 @@ static inline bool iplb_valid_pv(IplParameterBlock *iplb)
static inline bool iplb_valid(IplParameterBlock *iplb)
{
+ uint32_t len = be32_to_cpu(iplb->len);
+
switch (iplb->pbt) {
case S390_IPL_TYPE_FCP:
- return be32_to_cpu(iplb->len) >= S390_IPLB_MIN_FCP_LEN;
+ return len >= S390_IPLB_MIN_FCP_LEN;
case S390_IPL_TYPE_CCW:
- return be32_to_cpu(iplb->len) >= S390_IPLB_MIN_CCW_LEN;
+ return len >= S390_IPLB_MIN_CCW_LEN;
+ case S390_IPL_TYPE_QEMU_SCSI:
default:
return false;
}
diff --git a/hw/s390x/meson.build b/hw/s390x/meson.build
index 482fd13..1bc8583 100644
--- a/hw/s390x/meson.build
+++ b/hw/s390x/meson.build
@@ -12,8 +12,8 @@ s390x_ss.add(files(
's390-pci-inst.c',
's390-skeys.c',
's390-stattrib.c',
- 's390-virtio-hcall.c',
'sclp.c',
+ 'sclpcpi.c',
'sclpcpu.c',
'sclpquiesce.c',
'tod.c',
@@ -28,9 +28,13 @@ s390x_ss.add(when: 'CONFIG_KVM', if_true: files(
s390x_ss.add(when: 'CONFIG_TCG', if_true: files(
'tod-tcg.c',
))
-s390x_ss.add(when: 'CONFIG_S390_CCW_VIRTIO', if_true: files('s390-virtio-ccw.c'))
+s390x_ss.add(when: 'CONFIG_S390_CCW_VIRTIO', if_true: files(
+ 's390-virtio-ccw.c',
+ 's390-hypercall.c',
+))
s390x_ss.add(when: 'CONFIG_TERMINAL3270', if_true: files('3270-ccw.c'))
s390x_ss.add(when: 'CONFIG_VFIO', if_true: files('s390-pci-vfio.c'))
+s390x_ss.add(when: 'CONFIG_VFIO_AP', if_false: files('ap-stub.c'))
virtio_ss = ss.source_set()
virtio_ss.add(files('virtio-ccw.c'))
@@ -48,8 +52,12 @@ endif
virtio_ss.add(when: 'CONFIG_VHOST_SCSI', if_true: files('vhost-scsi-ccw.c'))
virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-ccw.c'))
virtio_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs-ccw.c'))
+virtio_ss.add(when: 'CONFIG_VIRTIO_MD', if_true: files('virtio-ccw-md.c'))
+virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-ccw-mem.c'))
s390x_ss.add_all(when: 'CONFIG_VIRTIO_CCW', if_true: virtio_ss)
+s390x_ss.add(when: 'CONFIG_VIRTIO_MD', if_false: files('virtio-ccw-md-stubs.c'))
+
hw_arch += {'s390x': s390x_ss}
hw_s390x_modules = {}
diff --git a/hw/s390x/s390-ccw.c b/hw/s390x/s390-ccw.c
index 3c09750..10c81a4 100644
--- a/hw/s390x/s390-ccw.c
+++ b/hw/s390x/s390-ccw.c
@@ -18,7 +18,7 @@
#include "hw/s390x/css.h"
#include "hw/s390x/css-bridge.h"
#include "hw/s390x/s390-ccw.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
IOInstEnding s390_ccw_cmd_request(SubchDev *sch)
{
@@ -175,7 +175,7 @@ static void s390_ccw_instance_init(Object *obj)
"/disk@0,0", DEVICE(obj));
}
-static void s390_ccw_class_init(ObjectClass *klass, void *data)
+static void s390_ccw_class_init(ObjectClass *klass, const void *data)
{
S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass);
diff --git a/hw/s390x/s390-hypercall.c b/hw/s390x/s390-hypercall.c
new file mode 100644
index 0000000..ac1b08b
--- /dev/null
+++ b/hw/s390x/s390-hypercall.c
@@ -0,0 +1,85 @@
+/*
+ * Support for QEMU/KVM hypercalls on s390
+ *
+ * Copyright 2012 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "hw/s390x/s390-virtio-ccw.h"
+#include "hw/s390x/s390-hypercall.h"
+#include "hw/s390x/ioinst.h"
+#include "hw/s390x/css.h"
+#include "virtio-ccw.h"
+
+static int handle_virtio_notify(uint64_t mem)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+
+ if (mem < ms->ram_size) {
+ /* Early printk */
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int handle_virtio_ccw_notify(uint64_t subch_id, uint64_t data)
+{
+ SubchDev *sch;
+ VirtIODevice *vdev;
+ int cssid, ssid, schid, m;
+ uint16_t vq_idx = data;
+
+ if (ioinst_disassemble_sch_ident(subch_id, &m, &cssid, &ssid, &schid)) {
+ return -EINVAL;
+ }
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (!sch || !css_subch_visible(sch)) {
+ return -EINVAL;
+ }
+
+ vdev = virtio_ccw_get_vdev(sch);
+ if (vq_idx >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, vq_idx)) {
+ return -EINVAL;
+ }
+
+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFICATION_DATA)) {
+ virtio_queue_set_shadow_avail_idx(virtio_get_queue(vdev, vq_idx),
+ (data >> 16) & 0xFFFF);
+ }
+
+ virtio_queue_notify(vdev, vq_idx);
+ return 0;
+}
+
+static uint64_t handle_storage_limit(void)
+{
+ S390CcwMachineState *s390ms = S390_CCW_MACHINE(qdev_get_machine());
+
+ return s390_get_memory_limit(s390ms) - 1;
+}
+
+void handle_diag_500(S390CPU *cpu, uintptr_t ra)
+{
+ CPUS390XState *env = &cpu->env;
+ const uint64_t subcode = env->regs[1];
+
+ switch (subcode) {
+ case DIAG500_VIRTIO_NOTIFY:
+ env->regs[2] = handle_virtio_notify(env->regs[2]);
+ break;
+ case DIAG500_VIRTIO_CCW_NOTIFY:
+ env->regs[2] = handle_virtio_ccw_notify(env->regs[2], env->regs[3]);
+ break;
+ case DIAG500_STORAGE_LIMIT:
+ env->regs[2] = handle_storage_limit();
+ break;
+ default:
+ s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+}
diff --git a/hw/s390x/s390-hypercall.h b/hw/s390x/s390-hypercall.h
new file mode 100644
index 0000000..4f07209
--- /dev/null
+++ b/hw/s390x/s390-hypercall.h
@@ -0,0 +1,25 @@
+/*
+ * Support for QEMU/KVM hypercalls on s390x
+ *
+ * Copyright IBM Corp. 2012, 2017
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_HYPERCALL_H
+#define HW_S390_HYPERCALL_H
+
+#include "cpu.h"
+
+#define DIAG500_VIRTIO_NOTIFY 0 /* legacy, implemented as a NOP */
+#define DIAG500_VIRTIO_RESET 1 /* legacy */
+#define DIAG500_VIRTIO_SET_STATUS 2 /* legacy */
+#define DIAG500_VIRTIO_CCW_NOTIFY 3 /* KVM_S390_VIRTIO_CCW_NOTIFY */
+#define DIAG500_STORAGE_LIMIT 4
+
+void handle_diag_500(S390CPU *cpu, uintptr_t ra);
+
+#endif /* HW_S390_HYPERCALL_H */
diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c
index 3e57d5f..e6aa445 100644
--- a/hw/s390x/s390-pci-bus.c
+++ b/hw/s390x/s390-pci-bus.c
@@ -14,18 +14,21 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
+#include "exec/target_page.h"
#include "hw/s390x/s390-pci-bus.h"
#include "hw/s390x/s390-pci-inst.h"
#include "hw/s390x/s390-pci-kvm.h"
#include "hw/s390x/s390-pci-vfio.h"
+#include "hw/s390x/s390-virtio-ccw.h"
+#include "hw/boards.h"
#include "hw/pci/pci_bus.h"
#include "hw/qdev-properties.h"
#include "hw/pci/pci_bridge.h"
#include "hw/pci/msi.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
+#include "system/reset.h"
+#include "system/runstate.h"
#include "trace.h"
@@ -595,7 +598,6 @@ static void s390_pci_iommu_replay(IOMMUMemoryRegion *iommu,
* zpci device" construct. But when we support migration of vfio-pci
* devices in future, we need to revisit this.
*/
- return;
}
static S390PCIIOMMU *s390_pci_get_iommu(S390pciState *s, PCIBus *bus,
@@ -724,12 +726,42 @@ void s390_pci_iommu_enable(S390PCIIOMMU *iommu)
g_free(name);
}
+void s390_pci_iommu_direct_map_enable(S390PCIIOMMU *iommu)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ S390CcwMachineState *s390ms = S390_CCW_MACHINE(ms);
+
+ /*
+ * For direct-mapping we must map the entire guest address space. Rather
+ * than using an iommu, create a memory region alias that maps GPA X to
+ * IOVA X + SDMA. VFIO will handle pinning via its memory listener.
+ */
+ g_autofree char *name = g_strdup_printf("iommu-dm-s390-%04x",
+ iommu->pbdev->uid);
+
+ iommu->dm_mr = g_malloc0(sizeof(*iommu->dm_mr));
+ memory_region_init_alias(iommu->dm_mr, OBJECT(&iommu->mr), name,
+ get_system_memory(), 0,
+ s390_get_memory_limit(s390ms));
+ iommu->enabled = true;
+ memory_region_add_subregion(&iommu->mr, iommu->pbdev->zpci_fn.sdma,
+ iommu->dm_mr);
+}
+
void s390_pci_iommu_disable(S390PCIIOMMU *iommu)
{
iommu->enabled = false;
g_hash_table_remove_all(iommu->iotlb);
- memory_region_del_subregion(&iommu->mr, MEMORY_REGION(&iommu->iommu_mr));
- object_unparent(OBJECT(&iommu->iommu_mr));
+ if (iommu->dm_mr) {
+ memory_region_del_subregion(&iommu->mr, iommu->dm_mr);
+ object_unparent(OBJECT(iommu->dm_mr));
+ g_free(iommu->dm_mr);
+ iommu->dm_mr = NULL;
+ } else {
+ memory_region_del_subregion(&iommu->mr,
+ MEMORY_REGION(&iommu->iommu_mr));
+ object_unparent(OBJECT(&iommu->iommu_mr));
+ }
}
static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn)
@@ -971,14 +1003,7 @@ static void s390_pcihost_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
"this device");
}
- if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
- PCIDevice *pdev = PCI_DEVICE(dev);
-
- if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
- error_setg(errp, "multifunction not supported in s390");
- return;
- }
- } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
+ if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev);
if (!s390_pci_alloc_idx(s, pbdev)) {
@@ -1069,6 +1094,18 @@ static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
} else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
pdev = PCI_DEVICE(dev);
+ /*
+ * Multifunction is not supported due to the lack of CLP. However,
+ * do not check for multifunction capability for SR-IOV devices because
+ * SR-IOV devices automatically add the multifunction capability whether
+ * the user intends to use the functions other than the PF.
+ */
+ if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION &&
+ !pdev->exp.sriov_cap) {
+ error_setg(errp, "multifunction not supported in s390");
+ return;
+ }
+
if (!dev->id) {
/* In the case the PCI device does not define an id */
/* we generate one based on the PCI address */
@@ -1080,6 +1117,16 @@ static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
pbdev = s390_pci_find_dev_by_target(s, dev->id);
if (!pbdev) {
+ /*
+ * VFs are automatically created by PF, and creating zpci for them
+ * will result in unexpected usage of fids. Currently QEMU does not
+ * support multifunction for s390x so we don't need zpci for VFs
+ * anyway.
+ */
+ if (pci_is_vf(pdev)) {
+ return;
+ }
+
pbdev = s390_pci_device_new(s, dev->id, errp);
if (!pbdev) {
return;
@@ -1130,6 +1177,7 @@ static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
/* Always intercept emulated devices */
pbdev->interp = false;
pbdev->forwarding_assist = false;
+ pbdev->rtr_avail = false;
}
if (s390_pci_msix_init(pbdev) && !pbdev->interp) {
@@ -1167,7 +1215,10 @@ static void s390_pcihost_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
int32_t devfn;
pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev));
- g_assert(pbdev);
+ if (!pbdev) {
+ g_assert(pci_is_vf(pci_dev));
+ return;
+ }
s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED,
pbdev->fh, pbdev->fid);
@@ -1206,7 +1257,11 @@ static void s390_pcihost_unplug_request(HotplugHandler *hotplug_dev,
* we've checked the PCI device already (to prevent endless recursion).
*/
pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev));
- g_assert(pbdev);
+ if (!pbdev) {
+ g_assert(pci_is_vf(PCI_DEVICE(dev)));
+ return;
+ }
+
pbdev->pci_unplug_request_processed = true;
qdev_unplug(DEVICE(pbdev), errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
@@ -1318,12 +1373,12 @@ static void s390_pcihost_reset(DeviceState *dev)
pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s);
}
-static void s390_pcihost_class_init(ObjectClass *klass, void *data)
+static void s390_pcihost_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
- dc->reset = s390_pcihost_reset;
+ device_class_set_legacy_reset(dc, s390_pcihost_reset);
dc->realize = s390_pcihost_realize;
dc->unrealize = s390_pcihost_unrealize;
hc->pre_plug = s390_pcihost_pre_plug;
@@ -1338,7 +1393,7 @@ static const TypeInfo s390_pcihost_info = {
.parent = TYPE_PCI_HOST_BRIDGE,
.instance_size = sizeof(S390pciState),
.class_init = s390_pcihost_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
@@ -1453,7 +1508,7 @@ static void s390_pci_device_reset(DeviceState *dev)
static void s390_pci_get_fid(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint32_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_uint32(v, name, ptr, errp);
@@ -1463,7 +1518,7 @@ static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
S390PCIBusDevice *zpci = S390_PCI_DEVICE(obj);
- Property *prop = opaque;
+ const Property *prop = opaque;
uint32_t *ptr = object_field_prop_ptr(obj, prop);
if (!visit_type_uint32(v, name, ptr, errp)) {
@@ -1473,7 +1528,8 @@ static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo s390_pci_fid_propinfo = {
- .name = "zpci_fid",
+ .type = "uint32",
+ .description = "zpci_fid",
.get = s390_pci_get_fid,
.set = s390_pci_set_fid,
};
@@ -1481,14 +1537,15 @@ static const PropertyInfo s390_pci_fid_propinfo = {
#define DEFINE_PROP_S390_PCI_FID(_n, _s, _f) \
DEFINE_PROP(_n, _s, _f, s390_pci_fid_propinfo, uint32_t)
-static Property s390_pci_device_properties[] = {
+static const Property s390_pci_device_properties[] = {
DEFINE_PROP_UINT16("uid", S390PCIBusDevice, uid, UID_UNDEFINED),
DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice, fid),
DEFINE_PROP_STRING("target", S390PCIBusDevice, target),
DEFINE_PROP_BOOL("interpret", S390PCIBusDevice, interp, true),
DEFINE_PROP_BOOL("forwarding-assist", S390PCIBusDevice, forwarding_assist,
true),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_BOOL("relaxed-translation", S390PCIBusDevice, rtr_avail,
+ true),
};
static const VMStateDescription s390_pci_device_vmstate = {
@@ -1500,13 +1557,13 @@ static const VMStateDescription s390_pci_device_vmstate = {
.unmigratable = 1,
};
-static void s390_pci_device_class_init(ObjectClass *klass, void *data)
+static void s390_pci_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "zpci device";
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
- dc->reset = s390_pci_device_reset;
+ device_class_set_legacy_reset(dc, s390_pci_device_reset);
dc->bus_type = TYPE_S390_PCI_BUS;
dc->realize = s390_pci_device_realize;
device_class_set_props(dc, s390_pci_device_properties);
@@ -1526,7 +1583,8 @@ static const TypeInfo s390_pci_iommu_info = {
.instance_size = sizeof(S390PCIIOMMU),
};
-static void s390_iommu_memory_region_class_init(ObjectClass *klass, void *data)
+static void s390_iommu_memory_region_class_init(ObjectClass *klass,
+ const void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c
index 3014954..b5dddb2 100644
--- a/hw/s390x/s390-pci-inst.c
+++ b/hw/s390x/s390-pci-inst.c
@@ -13,9 +13,11 @@
#include "qemu/osdep.h"
#include "exec/memop.h"
-#include "exec/memory.h"
+#include "exec/target_page.h"
+#include "system/memory.h"
#include "qemu/error-report.h"
-#include "sysemu/hw_accel.h"
+#include "system/hw_accel.h"
+#include "hw/boards.h"
#include "hw/pci/pci_device.h"
#include "hw/s390x/s390-pci-inst.h"
#include "hw/s390x/s390-pci-bus.h"
@@ -55,26 +57,26 @@ static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
uint64_t resume_token;
rc = 0;
- if (lduw_p(&rrb->request.hdr.len) != 32) {
+ if (lduw_be_p(&rrb->request.hdr.len) != 32) {
res_code = CLP_RC_LEN;
rc = -EINVAL;
goto out;
}
- if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
+ if ((ldl_be_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
res_code = CLP_RC_FMT;
rc = -EINVAL;
goto out;
}
- if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
- ldq_p(&rrb->request.reserved1) != 0) {
+ if ((ldl_be_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
+ ldq_be_p(&rrb->request.reserved1) != 0) {
res_code = CLP_RC_RESNOT0;
rc = -EINVAL;
goto out;
}
- resume_token = ldq_p(&rrb->request.resume_token);
+ resume_token = ldq_be_p(&rrb->request.resume_token);
if (resume_token) {
pbdev = s390_pci_find_dev_by_idx(s, resume_token);
@@ -87,13 +89,13 @@ static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
pbdev = s390_pci_find_next_avail_dev(s, NULL);
}
- if (lduw_p(&rrb->response.hdr.len) < 48) {
+ if (lduw_be_p(&rrb->response.hdr.len) < 48) {
res_code = CLP_RC_8K;
rc = -EINVAL;
goto out;
}
- initial_l2 = lduw_p(&rrb->response.hdr.len);
+ initial_l2 = lduw_be_p(&rrb->response.hdr.len);
if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
!= 0) {
res_code = CLP_RC_LEN;
@@ -102,33 +104,33 @@ static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
goto out;
}
- stl_p(&rrb->response.fmt, 0);
- stq_p(&rrb->response.reserved1, 0);
- stl_p(&rrb->response.mdd, FH_MASK_SHM);
- stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
+ stl_be_p(&rrb->response.fmt, 0);
+ stq_be_p(&rrb->response.reserved1, 0);
+ stl_be_p(&rrb->response.mdd, FH_MASK_SHM);
+ stw_be_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
rrb->response.flags = UID_CHECKING_ENABLED;
rrb->response.entry_size = sizeof(ClpFhListEntry);
i = 0;
g_l2 = LIST_PCI_HDR_LEN;
while (g_l2 < initial_l2 && pbdev) {
- stw_p(&rrb->response.fh_list[i].device_id,
+ stw_be_p(&rrb->response.fh_list[i].device_id,
pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
- stw_p(&rrb->response.fh_list[i].vendor_id,
+ stw_be_p(&rrb->response.fh_list[i].vendor_id,
pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
/* Ignore RESERVED devices. */
- stl_p(&rrb->response.fh_list[i].config,
+ stl_be_p(&rrb->response.fh_list[i].config,
pbdev->state == ZPCI_FS_STANDBY ? 0 : 1 << 31);
- stl_p(&rrb->response.fh_list[i].fid, pbdev->fid);
- stl_p(&rrb->response.fh_list[i].fh, pbdev->fh);
+ stl_be_p(&rrb->response.fh_list[i].fid, pbdev->fid);
+ stl_be_p(&rrb->response.fh_list[i].fh, pbdev->fh);
g_l2 += sizeof(ClpFhListEntry);
/* Add endian check for DPRINTF? */
trace_s390_pci_list_entry(g_l2,
- lduw_p(&rrb->response.fh_list[i].vendor_id),
- lduw_p(&rrb->response.fh_list[i].device_id),
- ldl_p(&rrb->response.fh_list[i].fid),
- ldl_p(&rrb->response.fh_list[i].fh));
+ lduw_be_p(&rrb->response.fh_list[i].vendor_id),
+ lduw_be_p(&rrb->response.fh_list[i].device_id),
+ ldl_be_p(&rrb->response.fh_list[i].fid),
+ ldl_be_p(&rrb->response.fh_list[i].fh));
pbdev = s390_pci_find_next_avail_dev(s, pbdev);
i++;
}
@@ -138,13 +140,13 @@ static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
} else {
resume_token = pbdev->fh & FH_MASK_INDEX;
}
- stq_p(&rrb->response.resume_token, resume_token);
- stw_p(&rrb->response.hdr.len, g_l2);
- stw_p(&rrb->response.hdr.rsp, CLP_RC_OK);
+ stq_be_p(&rrb->response.resume_token, resume_token);
+ stw_be_p(&rrb->response.hdr.len, g_l2);
+ stw_be_p(&rrb->response.hdr.rsp, CLP_RC_OK);
out:
if (rc) {
trace_s390_pci_list(rc);
- stw_p(&rrb->response.hdr.rsp, res_code);
+ stw_be_p(&rrb->response.hdr.rsp, res_code);
}
return rc;
}
@@ -172,7 +174,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
return 0;
}
reqh = (ClpReqHdr *)buffer;
- req_len = lduw_p(&reqh->len);
+ req_len = lduw_be_p(&reqh->len);
if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
@@ -184,7 +186,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
return 0;
}
resh = (ClpRspHdr *)(buffer + req_len);
- res_len = lduw_p(&resh->len);
+ res_len = lduw_be_p(&resh->len);
if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
@@ -201,11 +203,11 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
}
if (req_len != 32) {
- stw_p(&resh->rsp, CLP_RC_LEN);
+ stw_be_p(&resh->rsp, CLP_RC_LEN);
goto out;
}
- switch (lduw_p(&reqh->cmd)) {
+ switch (lduw_be_p(&reqh->cmd)) {
case CLP_LIST_PCI: {
ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
list_pci(rrb, &cc);
@@ -215,9 +217,9 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
- pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqsetpci->fh));
+ pbdev = s390_pci_find_dev_by_fh(s, ldl_be_p(&reqsetpci->fh));
if (!pbdev) {
- stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
+ stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
goto out;
}
@@ -225,17 +227,17 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
case CLP_SET_ENABLE_PCI_FN:
switch (reqsetpci->ndas) {
case 0:
- stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS);
+ stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS);
goto out;
case 1:
break;
default:
- stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES);
+ stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES);
goto out;
}
if (pbdev->fh & FH_MASK_ENABLE) {
- stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
+ stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
goto out;
}
@@ -249,29 +251,29 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
/* Take this opportunity to make sure we are sync'd with host */
if (!s390_pci_get_host_fh(pbdev, &pbdev->fh) ||
!(pbdev->fh & FH_MASK_ENABLE)) {
- stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
+ stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
goto out;
}
}
pbdev->fh |= FH_MASK_ENABLE;
pbdev->state = ZPCI_FS_ENABLED;
- stl_p(&ressetpci->fh, pbdev->fh);
- stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
+ stl_be_p(&ressetpci->fh, pbdev->fh);
+ stw_be_p(&ressetpci->hdr.rsp, CLP_RC_OK);
break;
case CLP_SET_DISABLE_PCI_FN:
if (!(pbdev->fh & FH_MASK_ENABLE)) {
- stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
+ stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
goto out;
}
device_cold_reset(DEVICE(pbdev));
pbdev->fh &= ~FH_MASK_ENABLE;
pbdev->state = ZPCI_FS_DISABLED;
- stl_p(&ressetpci->fh, pbdev->fh);
- stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
+ stl_be_p(&ressetpci->fh, pbdev->fh);
+ stw_be_p(&ressetpci->hdr.rsp, CLP_RC_OK);
break;
default:
trace_s390_pci_unknown("set-pci", reqsetpci->oc);
- stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
+ stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
break;
}
break;
@@ -280,23 +282,23 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
- pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqquery->fh));
+ pbdev = s390_pci_find_dev_by_fh(s, ldl_be_p(&reqquery->fh));
if (!pbdev) {
- trace_s390_pci_nodev("query", ldl_p(&reqquery->fh));
- stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
+ trace_s390_pci_nodev("query", ldl_be_p(&reqquery->fh));
+ stw_be_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
goto out;
}
- stq_p(&resquery->sdma, pbdev->zpci_fn.sdma);
- stq_p(&resquery->edma, pbdev->zpci_fn.edma);
- stw_p(&resquery->pchid, pbdev->zpci_fn.pchid);
- stw_p(&resquery->vfn, pbdev->zpci_fn.vfn);
+ stq_be_p(&resquery->sdma, pbdev->zpci_fn.sdma);
+ stq_be_p(&resquery->edma, pbdev->zpci_fn.edma);
+ stw_be_p(&resquery->pchid, pbdev->zpci_fn.pchid);
+ stw_be_p(&resquery->vfn, pbdev->zpci_fn.vfn);
resquery->flags = pbdev->zpci_fn.flags;
resquery->pfgid = pbdev->zpci_fn.pfgid;
resquery->pft = pbdev->zpci_fn.pft;
resquery->fmbl = pbdev->zpci_fn.fmbl;
- stl_p(&resquery->fid, pbdev->zpci_fn.fid);
- stl_p(&resquery->uid, pbdev->zpci_fn.uid);
+ stl_be_p(&resquery->fid, pbdev->zpci_fn.fid);
+ stl_be_p(&resquery->uid, pbdev->zpci_fn.uid);
memcpy(resquery->pfip, pbdev->zpci_fn.pfip, CLP_PFIP_NR_SEGMENTS);
memcpy(resquery->util_str, pbdev->zpci_fn.util_str, CLP_UTIL_STR_LEN);
@@ -304,16 +306,16 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
uint32_t data = pci_get_long(pbdev->pdev->config +
PCI_BASE_ADDRESS_0 + (i * 4));
- stl_p(&resquery->bar[i], data);
+ stl_be_p(&resquery->bar[i], data);
resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
ctz64(pbdev->pdev->io_regions[i].size) : 0;
trace_s390_pci_bar(i,
- ldl_p(&resquery->bar[i]),
+ ldl_be_p(&resquery->bar[i]),
pbdev->pdev->io_regions[i].size,
resquery->bar_size[i]);
}
- stw_p(&resquery->hdr.rsp, CLP_RC_OK);
+ stw_be_p(&resquery->hdr.rsp, CLP_RC_OK);
break;
}
case CLP_QUERY_PCI_FNGRP: {
@@ -326,23 +328,23 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
if (!group) {
/* We do not allow access to unknown groups */
/* The group must have been obtained with a vfio device */
- stw_p(&resgrp->hdr.rsp, CLP_RC_QUERYPCIFG_PFGID);
+ stw_be_p(&resgrp->hdr.rsp, CLP_RC_QUERYPCIFG_PFGID);
goto out;
}
resgrp->fr = group->zpci_group.fr;
- stq_p(&resgrp->dasm, group->zpci_group.dasm);
- stq_p(&resgrp->msia, group->zpci_group.msia);
- stw_p(&resgrp->mui, group->zpci_group.mui);
- stw_p(&resgrp->i, group->zpci_group.i);
- stw_p(&resgrp->maxstbl, group->zpci_group.maxstbl);
+ stq_be_p(&resgrp->dasm, group->zpci_group.dasm);
+ stq_be_p(&resgrp->msia, group->zpci_group.msia);
+ stw_be_p(&resgrp->mui, group->zpci_group.mui);
+ stw_be_p(&resgrp->i, group->zpci_group.i);
+ stw_be_p(&resgrp->maxstbl, group->zpci_group.maxstbl);
resgrp->version = group->zpci_group.version;
resgrp->dtsm = group->zpci_group.dtsm;
- stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
+ stw_be_p(&resgrp->hdr.rsp, CLP_RC_OK);
break;
}
default:
- trace_s390_pci_unknown("clp", lduw_p(&reqh->cmd));
- stw_p(&resh->rsp, CLP_RC_CMD);
+ trace_s390_pci_unknown("clp", lduw_be_p(&reqh->cmd));
+ stw_be_p(&resh->rsp, CLP_RC_CMD);
break;
}
@@ -914,7 +916,7 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
for (i = 0; i < len / 8; i++) {
result = memory_region_dispatch_write(mr, offset + i * 8,
- ldq_p(buffer + i * 8),
+ ldq_be_p(buffer + i * 8),
MO_64, MEMTXATTRS_UNSPECIFIED);
if (result != MEMTX_OK) {
s390_program_interrupt(env, PGM_OPERAND, ra);
@@ -935,13 +937,13 @@ specification_error:
static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
{
int ret, len;
- uint8_t isc = FIB_DATA_ISC(ldl_p(&fib.data));
+ uint8_t isc = FIB_DATA_ISC(ldl_be_p(&fib.data));
pbdev->routes.adapter.adapter_id = css_get_adapter_id(
CSS_IO_ADAPTER_PCI, isc);
- pbdev->summary_ind = get_indicator(ldq_p(&fib.aisb), sizeof(uint64_t));
- len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib.data))) * sizeof(unsigned long);
- pbdev->indicator = get_indicator(ldq_p(&fib.aibv), len);
+ pbdev->summary_ind = get_indicator(ldq_be_p(&fib.aisb), sizeof(uint64_t));
+ len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_be_p(&fib.data))) * sizeof(unsigned long);
+ pbdev->indicator = get_indicator(ldq_be_p(&fib.aibv), len);
ret = map_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
if (ret) {
@@ -953,13 +955,13 @@ static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
goto out;
}
- pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb);
- pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data));
- pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv);
- pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data));
+ pbdev->routes.adapter.summary_addr = ldq_be_p(&fib.aisb);
+ pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_be_p(&fib.data));
+ pbdev->routes.adapter.ind_addr = ldq_be_p(&fib.aibv);
+ pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_be_p(&fib.data));
pbdev->isc = isc;
- pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data));
- pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data));
+ pbdev->noi = FIB_DATA_NOI(ldl_be_p(&fib.data));
+ pbdev->sum = FIB_DATA_SUM(ldl_be_p(&fib.data));
trace_s390_pci_irqs("register", pbdev->routes.adapter.adapter_id);
return 0;
@@ -994,9 +996,9 @@ static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib,
uintptr_t ra)
{
S390PCIIOMMU *iommu = pbdev->iommu;
- uint64_t pba = ldq_p(&fib.pba);
- uint64_t pal = ldq_p(&fib.pal);
- uint64_t g_iota = ldq_p(&fib.iota);
+ uint64_t pba = ldq_be_p(&fib.pba);
+ uint64_t pal = ldq_be_p(&fib.pal);
+ uint64_t g_iota = ldq_be_p(&fib.iota);
uint8_t dt = (g_iota >> 2) & 0x7;
uint8_t t = (g_iota >> 11) & 0x1;
@@ -1008,17 +1010,25 @@ static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib,
}
/* currently we only support designation type 1 with translation */
- if (!(dt == ZPCI_IOTA_RTTO && t)) {
+ if (t && dt != ZPCI_IOTA_RTTO) {
error_report("unsupported ioat dt %d t %d", dt, t);
s390_program_interrupt(env, PGM_OPERAND, ra);
return -EINVAL;
+ } else if (!t && !pbdev->rtr_avail) {
+ error_report("relaxed translation not allowed");
+ s390_program_interrupt(env, PGM_OPERAND, ra);
+ return -EINVAL;
}
iommu->pba = pba;
iommu->pal = pal;
iommu->g_iota = g_iota;
- s390_pci_iommu_enable(iommu);
+ if (t) {
+ s390_pci_iommu_enable(iommu);
+ } else {
+ s390_pci_iommu_direct_map_enable(iommu);
+ }
return 0;
}
@@ -1289,7 +1299,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
}
break;
case ZPCI_MOD_FC_SET_MEASURE: {
- uint64_t fmb_addr = ldq_p(&fib.fmb_addr);
+ uint64_t fmb_addr = ldq_be_p(&fib.fmb_addr);
if (fmb_addr & FMBK_MASK) {
cc = ZPCI_PCI_LS_ERR;
@@ -1399,17 +1409,17 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
return 0;
}
- stq_p(&fib.pba, pbdev->iommu->pba);
- stq_p(&fib.pal, pbdev->iommu->pal);
- stq_p(&fib.iota, pbdev->iommu->g_iota);
- stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
- stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
- stq_p(&fib.fmb_addr, pbdev->fmb_addr);
+ stq_be_p(&fib.pba, pbdev->iommu->pba);
+ stq_be_p(&fib.pal, pbdev->iommu->pal);
+ stq_be_p(&fib.iota, pbdev->iommu->g_iota);
+ stq_be_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
+ stq_be_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
+ stq_be_p(&fib.fmb_addr, pbdev->fmb_addr);
data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) |
((uint32_t)pbdev->routes.adapter.ind_offset << 8) |
((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset;
- stl_p(&fib.data, data);
+ stl_be_p(&fib.data, data);
out:
if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
diff --git a/hw/s390x/s390-pci-vfio.c b/hw/s390x/s390-pci-vfio.c
index 7dbbc76..aaf9131 100644
--- a/hw/s390x/s390-pci-vfio.c
+++ b/hw/s390x/s390-pci-vfio.c
@@ -20,7 +20,8 @@
#include "hw/s390x/s390-pci-clp.h"
#include "hw/s390x/s390-pci-vfio.h"
#include "hw/vfio/pci.h"
-#include "hw/vfio/vfio-common.h"
+#include "hw/vfio/vfio-container.h"
+#include "hw/vfio/vfio-helpers.h"
/*
* Get the current DMA available count from vfio. Returns true if vfio is
@@ -132,12 +133,27 @@ static void s390_pci_read_base(S390PCIBusDevice *pbdev,
pbdev->pft = cap->pft;
/*
+ * If the device is a passthrough ISM device, disallow relaxed
+ * translation.
+ */
+ if (pbdev->pft == ZPCI_PFT_ISM) {
+ pbdev->rtr_avail = false;
+ }
+
+ /*
* If appropriate, reduce the size of the supported DMA aperture reported
- * to the guest based upon the vfio DMA limit.
+ * to the guest based upon the vfio DMA limit. This is applicable for
+ * devices that are guaranteed to not use relaxed translation. If the
+ * device is capable of relaxed translation then we must advertise the
+ * full aperture. In this case, if translation is used then we will
+ * rely on the vfio DMA limit counting and use RPCIT CC1 / status 16
+ * to request that the guest free DMA mappings as necessary.
*/
- vfio_size = pbdev->iommu->max_dma_limit << TARGET_PAGE_BITS;
- if (vfio_size > 0 && vfio_size < cap->end_dma - cap->start_dma + 1) {
- pbdev->zpci_fn.edma = cap->start_dma + vfio_size - 1;
+ if (!pbdev->rtr_avail) {
+ vfio_size = pbdev->iommu->max_dma_limit << TARGET_PAGE_BITS;
+ if (vfio_size > 0 && vfio_size < cap->end_dma - cap->start_dma + 1) {
+ pbdev->zpci_fn.edma = cap->start_dma + vfio_size - 1;
+ }
}
}
@@ -223,8 +239,11 @@ static void s390_pci_read_group(S390PCIBusDevice *pbdev,
pbdev->pci_group = s390_group_create(pbdev->zpci_fn.pfgid, start_gid);
resgrp = &pbdev->pci_group->zpci_group;
+ if (pbdev->rtr_avail) {
+ resgrp->fr |= CLP_RSP_QPCIG_MASK_RTR;
+ }
if (cap->flags & VFIO_DEVICE_INFO_ZPCI_FLAG_REFRESH) {
- resgrp->fr = 1;
+ resgrp->fr |= CLP_RSP_QPCIG_MASK_REFRESH;
}
resgrp->dasm = cap->dasm;
resgrp->msia = cap->msi_addr;
@@ -349,6 +368,4 @@ void s390_pci_get_clp_info(S390PCIBusDevice *pbdev)
s390_pci_read_group(pbdev, info);
s390_pci_read_util(pbdev, info);
s390_pci_read_pfip(pbdev, info);
-
- return;
}
diff --git a/hw/s390x/s390-skeys-kvm.c b/hw/s390x/s390-skeys-kvm.c
index 3ff9d94..f3056d6 100644
--- a/hw/s390x/s390-skeys-kvm.c
+++ b/hw/s390x/s390-skeys-kvm.c
@@ -11,7 +11,7 @@
#include "qemu/osdep.h"
#include "hw/s390x/storage-keys.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
@@ -52,7 +52,7 @@ static int kvm_s390_skeys_set(S390SKeysState *ss, uint64_t start_gfn,
return kvm_vm_ioctl(kvm_state, KVM_S390_SET_SKEYS, &args);
}
-static void kvm_s390_skeys_class_init(ObjectClass *oc, void *data)
+static void kvm_s390_skeys_class_init(ObjectClass *oc, const void *data)
{
S390SKeysClass *skeyclass = S390_SKEYS_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/s390x/s390-skeys.c b/hw/s390x/s390-skeys.c
index bf22d68..8eeecfd 100644
--- a/hw/s390x/s390-skeys.c
+++ b/hw/s390x/s390-skeys.c
@@ -11,16 +11,17 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
-#include "hw/boards.h"
+#include "exec/target_page.h"
+#include "hw/s390x/s390-virtio-ccw.h"
#include "hw/qdev-properties.h"
#include "hw/s390x/storage-keys.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-misc-target.h"
-#include "qapi/qmp/qdict.h"
+#include "qapi/qapi-commands-machine.h"
+#include "qobject/qdict.h"
#include "qemu/error-report.h"
-#include "sysemu/memory_mapping.h"
-#include "exec/address-spaces.h"
-#include "sysemu/kvm.h"
+#include "system/memory_mapping.h"
+#include "system/address-spaces.h"
+#include "system/kvm.h"
#include "migration/qemu-file-types.h"
#include "migration/register.h"
#include "trace.h"
@@ -142,7 +143,7 @@ void hmp_dump_skeys(Monitor *mon, const QDict *qdict)
}
}
-void qmp_dump_skeys(const char *filename, Error **errp)
+void s390_qmp_dump_skeys(const char *filename, Error **errp)
{
S390SKeysState *ss = s390_get_skeys_device();
S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss);
@@ -251,9 +252,9 @@ static bool qemu_s390_enable_skeys(S390SKeysState *ss)
* g_once_init_enter() is good enough.
*/
if (g_once_init_enter(&initialized)) {
- MachineState *machine = MACHINE(qdev_get_machine());
+ S390CcwMachineState *s390ms = S390_CCW_MACHINE(qdev_get_machine());
- skeys->key_count = machine->ram_size / TARGET_PAGE_SIZE;
+ skeys->key_count = s390_get_memory_limit(s390ms) / TARGET_PAGE_SIZE;
skeys->keydata = g_malloc0(skeys->key_count);
g_once_init_leave(&initialized, 1);
}
@@ -302,7 +303,7 @@ static int qemu_s390_skeys_get(S390SKeysState *ss, uint64_t start_gfn,
return 0;
}
-static void qemu_s390_skeys_class_init(ObjectClass *oc, void *data)
+static void qemu_s390_skeys_class_init(ObjectClass *oc, const void *data)
{
S390SKeysClass *skeyclass = S390_SKEYS_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -316,14 +317,6 @@ static void qemu_s390_skeys_class_init(ObjectClass *oc, void *data)
dc->user_creatable = false;
}
-static const TypeInfo qemu_s390_skeys_info = {
- .name = TYPE_QEMU_S390_SKEYS,
- .parent = TYPE_S390_SKEYS,
- .instance_size = sizeof(QEMUS390SKeysState),
- .class_init = qemu_s390_skeys_class_init,
- .class_size = sizeof(S390SKeysClass),
-};
-
static void s390_storage_keys_save(QEMUFile *f, void *opaque)
{
S390SKeysState *ss = S390_SKEYS(opaque);
@@ -469,40 +462,39 @@ static void s390_skeys_realize(DeviceState *dev, Error **errp)
{
S390SKeysState *ss = S390_SKEYS(dev);
- if (ss->migration_enabled) {
- register_savevm_live(TYPE_S390_SKEYS, 0, 1,
- &savevm_s390_storage_keys, ss);
- }
+ register_savevm_live(TYPE_S390_SKEYS, 0, 1, &savevm_s390_storage_keys, ss);
}
-static Property s390_skeys_props[] = {
- DEFINE_PROP_BOOL("migration-enabled", S390SKeysState, migration_enabled, true),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void s390_skeys_class_init(ObjectClass *oc, void *data)
+static void s390_skeys_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->hotpluggable = false;
dc->realize = s390_skeys_realize;
- device_class_set_props(dc, s390_skeys_props);
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
}
-static const TypeInfo s390_skeys_info = {
- .name = TYPE_S390_SKEYS,
- .parent = TYPE_DEVICE,
- .instance_size = sizeof(S390SKeysState),
- .class_init = s390_skeys_class_init,
- .class_size = sizeof(S390SKeysClass),
- .abstract = true,
+static const TypeInfo s390_skeys_types[] = {
+ {
+ .name = TYPE_DUMP_SKEYS_INTERFACE,
+ .parent = TYPE_INTERFACE,
+ .class_size = sizeof(DumpSKeysInterface),
+ },
+ {
+ .name = TYPE_S390_SKEYS,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(S390SKeysState),
+ .class_init = s390_skeys_class_init,
+ .class_size = sizeof(S390SKeysClass),
+ .abstract = true,
+ },
+ {
+ .name = TYPE_QEMU_S390_SKEYS,
+ .parent = TYPE_S390_SKEYS,
+ .instance_size = sizeof(QEMUS390SKeysState),
+ .class_init = qemu_s390_skeys_class_init,
+ .class_size = sizeof(S390SKeysClass),
+ },
};
-static void qemu_s390_skeys_register_types(void)
-{
- type_register_static(&s390_skeys_info);
- type_register_static(&qemu_s390_skeys_info);
-}
-
-type_init(qemu_s390_skeys_register_types)
+DEFINE_TYPES(s390_skeys_types)
diff --git a/hw/s390x/s390-stattrib-kvm.c b/hw/s390x/s390-stattrib-kvm.c
index eeaa811..e1fee36 100644
--- a/hw/s390x/s390-stattrib-kvm.c
+++ b/hw/s390x/s390-stattrib-kvm.c
@@ -10,12 +10,13 @@
*/
#include "qemu/osdep.h"
-#include "hw/boards.h"
+#include "hw/s390x/s390-virtio-ccw.h"
#include "migration/qemu-file.h"
#include "hw/s390x/storage-attributes.h"
#include "qemu/error-report.h"
-#include "sysemu/kvm.h"
-#include "exec/ram_addr.h"
+#include "system/kvm.h"
+#include "system/memory_mapping.h"
+#include "system/ram_addr.h"
#include "kvm/kvm_s390x.h"
#include "qapi/error.h"
@@ -84,8 +85,8 @@ static int kvm_s390_stattrib_set_stattr(S390StAttribState *sa,
uint8_t *values)
{
KVMS390StAttribState *sas = KVM_S390_STATTRIB(sa);
- MachineState *machine = MACHINE(qdev_get_machine());
- unsigned long max = machine->ram_size / TARGET_PAGE_SIZE;
+ S390CcwMachineState *s390ms = S390_CCW_MACHINE(qdev_get_machine());
+ unsigned long max = s390_get_memory_limit(s390ms) / TARGET_PAGE_SIZE;
if (start_gfn + count > max) {
error_report("Out of memory bounds when setting storage attributes");
@@ -103,39 +104,57 @@ static int kvm_s390_stattrib_set_stattr(S390StAttribState *sa,
static void kvm_s390_stattrib_synchronize(S390StAttribState *sa)
{
KVMS390StAttribState *sas = KVM_S390_STATTRIB(sa);
- MachineState *machine = MACHINE(qdev_get_machine());
- unsigned long max = machine->ram_size / TARGET_PAGE_SIZE;
- /* We do not need to reach the maximum buffer size allowed */
- unsigned long cx, len = KVM_S390_SKEYS_MAX / 2;
+ S390CcwMachineState *s390ms = S390_CCW_MACHINE(qdev_get_machine());
+ unsigned long max = s390_get_memory_limit(s390ms) / TARGET_PAGE_SIZE;
+ unsigned long start_gfn, end_gfn, pages;
+ GuestPhysBlockList guest_phys_blocks;
+ GuestPhysBlock *block;
int r;
struct kvm_s390_cmma_log clog = {
.flags = 0,
.mask = ~0ULL,
};
- if (sas->incoming_buffer) {
- for (cx = 0; cx + len <= max; cx += len) {
- clog.start_gfn = cx;
- clog.count = len;
- clog.values = (uint64_t)(sas->incoming_buffer + cx);
- r = kvm_vm_ioctl(kvm_state, KVM_S390_SET_CMMA_BITS, &clog);
- if (r) {
- error_report("KVM_S390_SET_CMMA_BITS failed: %s", strerror(-r));
- return;
- }
- }
- if (cx < max) {
- clog.start_gfn = cx;
- clog.count = max - cx;
- clog.values = (uint64_t)(sas->incoming_buffer + cx);
+ if (!sas->incoming_buffer) {
+ return;
+ }
+ guest_phys_blocks_init(&guest_phys_blocks);
+ guest_phys_blocks_append(&guest_phys_blocks);
+
+ QTAILQ_FOREACH(block, &guest_phys_blocks.head, next) {
+ assert(QEMU_IS_ALIGNED(block->target_start, TARGET_PAGE_SIZE));
+ assert(QEMU_IS_ALIGNED(block->target_end, TARGET_PAGE_SIZE));
+
+ start_gfn = block->target_start / TARGET_PAGE_SIZE;
+ end_gfn = block->target_end / TARGET_PAGE_SIZE;
+
+ while (start_gfn < end_gfn) {
+ /* Don't exceed the maximum buffer size. */
+ pages = MIN(end_gfn - start_gfn, KVM_S390_SKEYS_MAX / 2);
+
+ /*
+ * If we ever get guest physical memory beyond the configured
+ * memory limit, something went very wrong.
+ */
+ assert(start_gfn + pages <= max);
+
+ clog.start_gfn = start_gfn;
+ clog.count = pages;
+ clog.values = (uint64_t)(sas->incoming_buffer + start_gfn);
r = kvm_vm_ioctl(kvm_state, KVM_S390_SET_CMMA_BITS, &clog);
if (r) {
error_report("KVM_S390_SET_CMMA_BITS failed: %s", strerror(-r));
+ goto out;
}
+
+ start_gfn += pages;
}
- g_free(sas->incoming_buffer);
- sas->incoming_buffer = NULL;
}
+
+out:
+ guest_phys_blocks_free(&guest_phys_blocks);
+ g_free(sas->incoming_buffer);
+ sas->incoming_buffer = NULL;
}
static int kvm_s390_stattrib_set_migrationmode(S390StAttribState *sa, bool val,
@@ -166,10 +185,10 @@ static long long kvm_s390_stattrib_get_dirtycount(S390StAttribState *sa)
static int kvm_s390_stattrib_get_active(S390StAttribState *sa)
{
- return kvm_s390_cmma_active() && sa->migration_enabled;
+ return kvm_s390_cmma_active();
}
-static void kvm_s390_stattrib_class_init(ObjectClass *oc, void *data)
+static void kvm_s390_stattrib_class_init(ObjectClass *oc, const void *data)
{
S390StAttribClass *sac = S390_STATTRIB_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/s390x/s390-stattrib.c b/hw/s390x/s390-stattrib.c
index c4259b5..f74cf32 100644
--- a/hw/s390x/s390-stattrib.c
+++ b/hw/s390x/s390-stattrib.c
@@ -16,9 +16,9 @@
#include "hw/qdev-properties.h"
#include "hw/s390x/storage-attributes.h"
#include "qemu/error-report.h"
-#include "exec/ram_addr.h"
+#include "system/ram_addr.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "cpu.h"
/* 512KiB cover 2GB of guest memory */
@@ -304,10 +304,10 @@ static int qemu_s390_set_migrationmode_stub(S390StAttribState *sa, bool value,
static int qemu_s390_get_active(S390StAttribState *sa)
{
- return sa->migration_enabled;
+ return true;
}
-static void qemu_s390_stattrib_class_init(ObjectClass *oc, void *data)
+static void qemu_s390_stattrib_class_init(ObjectClass *oc, const void *data)
{
S390StAttribClass *sa_cl = S390_STATTRIB_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -360,19 +360,13 @@ static void s390_stattrib_realize(DeviceState *dev, Error **errp)
&savevm_s390_stattrib_handlers, dev);
}
-static Property s390_stattrib_props[] = {
- DEFINE_PROP_BOOL("migration-enabled", S390StAttribState, migration_enabled, true),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void s390_stattrib_class_init(ObjectClass *oc, void *data)
+static void s390_stattrib_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->hotpluggable = false;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->realize = s390_stattrib_realize;
- device_class_set_props(dc, s390_stattrib_props);
}
static void s390_stattrib_instance_init(Object *obj)
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index c483ff8..a79bd13 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -13,14 +13,11 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "exec/ram_addr.h"
-#include "exec/confidential-guest-support.h"
+#include "system/ram_addr.h"
+#include "system/confidential-guest-support.h"
#include "hw/boards.h"
-#include "hw/s390x/s390-virtio-hcall.h"
#include "hw/s390x/sclp.h"
#include "hw/s390x/s390_flic.h"
-#include "hw/s390x/ioinst.h"
-#include "hw/s390x/css.h"
#include "virtio-ccw.h"
#include "qemu/config-file.h"
#include "qemu/ctype.h"
@@ -29,7 +26,7 @@
#include "qemu/qemu-print.h"
#include "qemu/units.h"
#include "hw/s390x/s390-pci-bus.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "hw/s390x/storage-keys.h"
#include "hw/s390x/storage-attributes.h"
#include "hw/s390x/event-facility.h"
@@ -38,16 +35,21 @@
#include "hw/s390x/css-bridge.h"
#include "hw/s390x/ap-bridge.h"
#include "migration/register.h"
-#include "cpu_models.h"
+#include "target/s390x/cpu_models.h"
#include "hw/nmi.h"
#include "hw/qdev-properties.h"
#include "hw/s390x/tod.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/cpus.h"
+#include "system/system.h"
+#include "system/cpus.h"
+#include "system/hostmem.h"
#include "target/s390x/kvm/pv.h"
#include "migration/blocker.h"
#include "qapi/visitor.h"
#include "hw/s390x/cpu-topology.h"
+#include "kvm/kvm_s390x.h"
+#include "hw/virtio/virtio-md-pci.h"
+#include "hw/s390x/virtio-ccw-md.h"
+#include "system/replay.h"
#include CONFIG_DEVICES
static Error *pv_mig_blocker;
@@ -124,70 +126,86 @@ static void subsystem_reset(void)
}
}
-static int virtio_ccw_hcall_notify(const uint64_t *args)
+static void s390_set_memory_limit(S390CcwMachineState *s390ms,
+ uint64_t new_limit)
{
- uint64_t subch_id = args[0];
- uint64_t data = args[1];
- SubchDev *sch;
- VirtIODevice *vdev;
- int cssid, ssid, schid, m;
- uint16_t vq_idx = data;
+ uint64_t hw_limit = 0;
+ int ret = 0;
- if (ioinst_disassemble_sch_ident(subch_id, &m, &cssid, &ssid, &schid)) {
- return -EINVAL;
+ assert(!s390ms->memory_limit && new_limit);
+ if (kvm_enabled()) {
+ ret = kvm_s390_set_mem_limit(new_limit, &hw_limit);
}
- sch = css_find_subch(m, cssid, ssid, schid);
- if (!sch || !css_subch_visible(sch)) {
- return -EINVAL;
- }
-
- vdev = virtio_ccw_get_vdev(sch);
- if (vq_idx >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, vq_idx)) {
- return -EINVAL;
+ if (ret == -E2BIG) {
+ error_report("host supports a maximum of %" PRIu64 " GB",
+ hw_limit / GiB);
+ exit(EXIT_FAILURE);
+ } else if (ret) {
+ error_report("setting the guest size failed");
+ exit(EXIT_FAILURE);
}
+ s390ms->memory_limit = new_limit;
+}
- if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFICATION_DATA)) {
- virtio_queue_set_shadow_avail_idx(virtio_get_queue(vdev, vq_idx),
- (data >> 16) & 0xFFFF);
+static void s390_set_max_pagesize(S390CcwMachineState *s390ms,
+ uint64_t pagesize)
+{
+ assert(!s390ms->max_pagesize && pagesize);
+ if (kvm_enabled()) {
+ kvm_s390_set_max_pagesize(pagesize, &error_fatal);
}
-
- virtio_queue_notify(vdev, vq_idx);
- return 0;
+ s390ms->max_pagesize = pagesize;
}
-static int virtio_ccw_hcall_early_printk(const uint64_t *args)
+static void s390_memory_init(MachineState *machine)
{
- uint64_t mem = args[0];
- MachineState *ms = MACHINE(qdev_get_machine());
+ S390CcwMachineState *s390ms = S390_CCW_MACHINE(machine);
+ MemoryRegion *sysmem = get_system_memory();
+ MemoryRegion *ram = machine->ram;
+ uint64_t ram_size = memory_region_size(ram);
+ uint64_t devmem_base, devmem_size;
- if (mem < ms->ram_size) {
- /* Early printk */
- return 0;
+ if (!QEMU_IS_ALIGNED(ram_size, 1 * MiB)) {
+ /*
+ * SCLP cannot possibly expose smaller granularity right now and KVM
+ * cannot handle smaller granularity. As we don't support NUMA, the
+ * region size directly corresponds to machine->ram_size, and the region
+ * is a single RAM memory region.
+ */
+ error_report("ram size must be multiples of 1 MiB");
+ exit(EXIT_FAILURE);
}
- return -EINVAL;
-}
-static void virtio_ccw_register_hcalls(void)
-{
- s390_register_virtio_hypercall(KVM_S390_VIRTIO_CCW_NOTIFY,
- virtio_ccw_hcall_notify);
- /* Tolerate early printk. */
- s390_register_virtio_hypercall(KVM_S390_VIRTIO_NOTIFY,
- virtio_ccw_hcall_early_printk);
-}
+ devmem_size = 0;
+ devmem_base = ram_size;
+#ifdef CONFIG_MEM_DEVICE
+ if (machine->ram_size < machine->maxram_size) {
-static void s390_memory_init(MemoryRegion *ram)
-{
- MemoryRegion *sysmem = get_system_memory();
+ /*
+ * Make sure memory devices have a sane default alignment, even
+ * when weird initial memory sizes are specified.
+ */
+ devmem_base = QEMU_ALIGN_UP(devmem_base, 1 * GiB);
+ devmem_size = machine->maxram_size - machine->ram_size;
+ }
+#endif
+ s390_set_memory_limit(s390ms, devmem_base + devmem_size);
- /* allocate RAM for core */
+ /* Map the initial memory. Must happen after setting the memory limit. */
memory_region_add_subregion(sysmem, 0, ram);
+ /* Initialize address space for memory devices. */
+#ifdef CONFIG_MEM_DEVICE
+ if (devmem_size) {
+ machine_memory_devices_init(machine, devmem_base, devmem_size);
+ }
+#endif /* CONFIG_MEM_DEVICE */
+
/*
* Configure the maximum page size. As no memory devices were created
* yet, this is the page size of initial memory only.
*/
- s390_set_max_pagesize(qemu_maxrampagesize(), &error_fatal);
+ s390_set_max_pagesize(s390ms, qemu_maxrampagesize());
/* Initialize storage key device */
s390_skeys_init();
/* Initialize storage attributes device */
@@ -197,11 +215,10 @@ static void s390_memory_init(MemoryRegion *ram)
static void s390_init_ipl_dev(const char *kernel_filename,
const char *kernel_cmdline,
const char *initrd_filename, const char *firmware,
- const char *netboot_fw, bool enforce_bios)
+ bool enforce_bios)
{
Object *new = object_new(TYPE_S390_IPL);
DeviceState *dev = DEVICE(new);
- char *netboot_fw_prop;
if (kernel_filename) {
qdev_prop_set_string(dev, "kernel", kernel_filename);
@@ -212,11 +229,6 @@ static void s390_init_ipl_dev(const char *kernel_filename,
qdev_prop_set_string(dev, "cmdline", kernel_cmdline);
qdev_prop_set_string(dev, "firmware", firmware);
qdev_prop_set_bit(dev, "enforce_bios", enforce_bios);
- netboot_fw_prop = object_property_get_str(new, "netboot_fw", &error_abort);
- if (!strlen(netboot_fw_prop)) {
- qdev_prop_set_string(dev, "netboot_fw", netboot_fw);
- }
- g_free(netboot_fw_prop);
object_property_add_child(qdev_get_machine(), TYPE_S390_IPL,
new);
object_unref(new);
@@ -248,9 +260,21 @@ static void s390_create_sclpconsole(SCLPDevice *sclp,
qdev_realize_and_unref(dev, ev_fac_bus, &error_fatal);
}
+static void s390_create_sclpcpi(SCLPDevice *sclp)
+{
+ SCLPEventFacility *ef = sclp->event_facility;
+ BusState *ev_fac_bus = sclp_get_event_facility_bus(ef);
+ DeviceState *dev;
+
+ dev = qdev_new(TYPE_SCLP_EVENT_CPI);
+ object_property_add_child(OBJECT(ef), "sclpcpi", OBJECT(dev));
+ qdev_realize_and_unref(dev, ev_fac_bus, &error_fatal);
+}
+
static void ccw_init(MachineState *machine)
{
MachineClass *mc = MACHINE_GET_CLASS(machine);
+ S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc);
S390CcwMachineState *ms = S390_CCW_MACHINE(machine);
int ret;
VirtualCssBus *css_bus;
@@ -261,7 +285,7 @@ static void ccw_init(MachineState *machine)
qdev_realize_and_unref(DEVICE(ms->sclp), NULL, &error_fatal);
/* init memory + setup max page size. Required for the CPU model */
- s390_memory_init(machine->ram);
+ s390_memory_init(machine);
/* init CPUs (incl. CPU model) early so s390_has_feature() works */
s390_init_cpus(machine);
@@ -284,16 +308,13 @@ static void ccw_init(MachineState *machine)
s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline,
machine->initrd_filename,
machine->firmware ?: "s390-ccw.img",
- "s390-netboot.img", true);
+ true);
dev = qdev_new(TYPE_S390_PCI_HOST_BRIDGE);
object_property_add_child(qdev_get_machine(), TYPE_S390_PCI_HOST_BRIDGE,
OBJECT(dev));
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
- /* register hypercalls */
- virtio_ccw_register_hcalls();
-
s390_enable_css_support(s390_cpu_addr2state(0));
ret = css_create_css_image(VIRTUAL_CSSID, true);
@@ -314,6 +335,12 @@ static void ccw_init(MachineState *machine)
/* init the TOD clock */
s390_init_tod();
+
+ /* init SCLP event Control-Program Identification */
+ if (s390mc->use_cpi) {
+ s390_create_sclpcpi(ms->sclp);
+ }
+
}
static void s390_cpu_plug(HotplugHandler *hotplug_dev,
@@ -356,7 +383,8 @@ static void s390_machine_unprotect(S390CcwMachineState *ms)
ram_block_discard_disable(false);
}
-static int s390_machine_protect(S390CcwMachineState *ms)
+static int s390_machine_protect(S390CcwMachineState *ms,
+ struct S390PVResponse *pv_resp)
{
Error *local_err = NULL;
int rc;
@@ -399,19 +427,19 @@ static int s390_machine_protect(S390CcwMachineState *ms)
}
/* Set SE header and unpack */
- rc = s390_ipl_prepare_pv_header(&local_err);
+ rc = s390_ipl_prepare_pv_header(pv_resp, &local_err);
if (rc) {
goto out_err;
}
/* Decrypt image */
- rc = s390_ipl_pv_unpack();
+ rc = s390_ipl_pv_unpack(pv_resp);
if (rc) {
goto out_err;
}
/* Verify integrity */
- rc = s390_pv_verify();
+ rc = s390_pv_verify(pv_resp);
if (rc) {
goto out_err;
}
@@ -440,13 +468,26 @@ static void s390_pv_prepare_reset(S390CcwMachineState *ms)
s390_pv_prep_reset();
}
-static void s390_machine_reset(MachineState *machine, ShutdownCause reason)
+static void s390_machine_reset(MachineState *machine, ResetType type)
{
S390CcwMachineState *ms = S390_CCW_MACHINE(machine);
+ struct S390PVResponse pv_resp;
enum s390_reset reset_type;
CPUState *cs, *t;
S390CPU *cpu;
+ /*
+ * Temporarily drop the record/replay mutex to let rr_cpu_thread_fn()
+ * process the run_on_cpu() requests below. This is safe, because at this
+ * point one of the following is true:
+ * - All CPU threads are not running, either because the machine is being
+ * initialized, or because the guest requested a reset using diag 308.
+ * There is no risk to desync the record/replay state.
+ * - A snapshot is about to be loaded. The record/replay state consistency
+ * is not important.
+ */
+ replay_mutex_unlock();
+
/* get the reset parameters, reset them once done */
s390_ipl_get_reset_request(&cs, &reset_type);
@@ -472,7 +513,7 @@ static void s390_machine_reset(MachineState *machine, ShutdownCause reason)
* Device reset includes CPU clear resets so this has to be
* done AFTER the unprotect call above.
*/
- qemu_devices_reset(reason);
+ qemu_devices_reset(type);
s390_crypto_reset();
/* configure and start the ipl CPU only */
@@ -519,14 +560,14 @@ static void s390_machine_reset(MachineState *machine, ShutdownCause reason)
}
run_on_cpu(cs, s390_do_cpu_reset, RUN_ON_CPU_NULL);
- if (s390_machine_protect(ms)) {
- s390_pv_inject_reset_error(cs);
+ if (s390_machine_protect(ms, &pv_resp)) {
+ s390_pv_inject_reset_error(cs, pv_resp);
/*
* Continue after the diag308 so the guest knows something
* went wrong.
*/
s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu);
- return;
+ goto out_lock;
}
run_on_cpu(cs, s390_do_cpu_load_normal, RUN_ON_CPU_NULL);
@@ -539,13 +580,54 @@ static void s390_machine_reset(MachineState *machine, ShutdownCause reason)
run_on_cpu(t, s390_do_cpu_set_diag318, RUN_ON_CPU_HOST_ULONG(0));
}
s390_ipl_clear_reset_request();
+
+out_lock:
+ /*
+ * Re-take the record/replay mutex, temporarily dropping the BQL in order
+ * to satisfy the ordering requirements.
+ */
+ bql_unlock();
+ replay_mutex_lock();
+ bql_lock();
+}
+
+static void s390_machine_device_pre_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_CCW)) {
+ virtio_ccw_md_pre_plug(VIRTIO_MD_CCW(dev), MACHINE(hotplug_dev), errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
+ virtio_md_pci_pre_plug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp);
+ }
}
static void s390_machine_device_plug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
+ S390CcwMachineState *s390ms = S390_CCW_MACHINE(hotplug_dev);
+
if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
s390_cpu_plug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_CCW) ||
+ object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
+ /*
+ * At this point, the device is realized and set all memdevs mapped, so
+ * qemu_maxrampagesize() will pick up the page sizes of these memdevs
+ * as well. Before we plug the device and expose any RAM memory regions
+ * to the system, make sure we don't exceed the previously set max page
+ * size. While only relevant for KVM, there is not really any use case
+ * for this with TCG, so we'll unconditionally reject it.
+ */
+ if (qemu_maxrampagesize() != s390ms->max_pagesize) {
+ error_setg(errp, "Memory device uses a bigger page size than"
+ " initial memory");
+ return;
+ }
+ if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_CCW)) {
+ virtio_ccw_md_plug(VIRTIO_MD_CCW(dev), MACHINE(hotplug_dev), errp);
+ } else {
+ virtio_md_pci_plug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp);
+ }
}
}
@@ -554,10 +636,25 @@ static void s390_machine_device_unplug_request(HotplugHandler *hotplug_dev,
{
if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
error_setg(errp, "CPU hot unplug not supported on this machine");
- return;
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_CCW)) {
+ virtio_ccw_md_unplug_request(VIRTIO_MD_CCW(dev), MACHINE(hotplug_dev),
+ errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
+ virtio_md_pci_unplug_request(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev),
+ errp);
}
}
+static void s390_machine_device_unplug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_CCW)) {
+ virtio_ccw_md_unplug(VIRTIO_MD_CCW(dev), MACHINE(hotplug_dev), errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
+ virtio_md_pci_unplug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp);
+ }
+ }
+
static CpuInstanceProperties s390_cpu_index_to_props(MachineState *ms,
unsigned cpu_index)
{
@@ -604,7 +701,9 @@ static const CPUArchIdList *s390_possible_cpu_arch_ids(MachineState *ms)
static HotplugHandler *s390_get_hotplug_handler(MachineState *machine,
DeviceState *dev)
{
- if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
+ if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) ||
+ object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_CCW) ||
+ object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
return HOTPLUG_HANDLER(machine);
}
return NULL;
@@ -667,50 +766,6 @@ static inline void machine_set_dea_key_wrap(Object *obj, bool value,
ms->dea_key_wrap = value;
}
-static S390CcwMachineClass *current_mc;
-
-/*
- * Get the class of the s390-ccw-virtio machine that is currently in use.
- * Note: libvirt is using the "none" machine to probe for the features of the
- * host CPU, so in case this is called with the "none" machine, the function
- * returns the TYPE_S390_CCW_MACHINE base class. In this base class, all the
- * various "*_allowed" variables are enabled, so that the *_allowed() wrappers
- * below return the correct default value for the "none" machine.
- *
- * Attention! Do *not* add additional new wrappers for CPU features (e.g. like
- * the ri_allowed() wrapper) via this mechanism anymore. CPU features should
- * be handled via the CPU models, i.e. checking with cpu_model_allowed() during
- * CPU initialization and s390_has_feat() later should be sufficient.
- */
-static S390CcwMachineClass *get_machine_class(void)
-{
- if (unlikely(!current_mc)) {
- /*
- * No s390 ccw machine was instantiated, we are likely to
- * be called for the 'none' machine. The properties will
- * have their after-initialization values.
- */
- current_mc = S390_CCW_MACHINE_CLASS(
- object_class_by_name(TYPE_S390_CCW_MACHINE));
- }
- return current_mc;
-}
-
-bool ri_allowed(void)
-{
- return get_machine_class()->ri_allowed;
-}
-
-bool cpu_model_allowed(void)
-{
- return get_machine_class()->cpu_model_allowed;
-}
-
-bool hpage_1m_allowed(void)
-{
- return get_machine_class()->hpage_1m_allowed;
-}
-
static void machine_get_loadparm(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
@@ -728,48 +783,30 @@ static void machine_set_loadparm(Object *obj, Visitor *v,
{
S390CcwMachineState *ms = S390_CCW_MACHINE(obj);
char *val;
- int i;
if (!visit_type_str(v, name, &val, errp)) {
return;
}
- for (i = 0; i < sizeof(ms->loadparm) && val[i]; i++) {
- uint8_t c = qemu_toupper(val[i]); /* mimic HMC */
-
- if (('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || (c == '.') ||
- (c == ' ')) {
- ms->loadparm[i] = c;
- } else {
- error_setg(errp, "LOADPARM: invalid character '%c' (ASCII 0x%02x)",
- c, c);
- return;
- }
- }
-
- for (; i < sizeof(ms->loadparm); i++) {
- ms->loadparm[i] = ' '; /* pad right with spaces */
- }
+ s390_ipl_fmt_loadparm(ms->loadparm, val, errp);
+ g_free(val);
}
-static void ccw_machine_class_init(ObjectClass *oc, void *data)
+static void ccw_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
NMIClass *nc = NMI_CLASS(oc);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc);
+ DumpSKeysInterface *dsi = DUMP_SKEYS_INTERFACE_CLASS(oc);
- s390mc->ri_allowed = true;
- s390mc->cpu_model_allowed = true;
- s390mc->hpage_1m_allowed = true;
s390mc->max_threads = 1;
- mc->init = ccw_init;
+ s390mc->use_cpi = true;
mc->reset = s390_machine_reset;
mc->block_default_type = IF_VIRTIO;
mc->no_cdrom = 1;
mc->no_floppy = 1;
mc->no_parallel = 1;
- mc->no_sdcard = 1;
mc->max_cpus = S390_MAX_CPUS;
mc->has_hotpluggable_cpus = true;
mc->smp_props.books_supported = true;
@@ -780,11 +817,14 @@ static void ccw_machine_class_init(ObjectClass *oc, void *data)
mc->possible_cpu_arch_ids = s390_possible_cpu_arch_ids;
/* it is overridden with 'host' cpu *in kvm_arch_init* */
mc->default_cpu_type = S390_CPU_TYPE_NAME("qemu");
+ hc->pre_plug = s390_machine_device_pre_plug;
hc->plug = s390_machine_device_plug;
hc->unplug_request = s390_machine_device_unplug_request;
+ hc->unplug = s390_machine_device_unplug;
nc->nmi_monitor_handler = s390_nmi;
mc->default_ram_id = "s390.ram";
mc->default_nic = "virtio-net-ccw";
+ dsi->qmp_dump_skeys = s390_qmp_dump_skeys;
object_class_property_add_bool(oc, "aes-key-wrap",
machine_get_aes_key_wrap,
@@ -823,39 +863,39 @@ static const TypeInfo ccw_machine_info = {
.instance_init = s390_machine_initfn,
.class_size = sizeof(S390CcwMachineClass),
.class_init = ccw_machine_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_NMI },
{ TYPE_HOTPLUG_HANDLER},
+ { TYPE_DUMP_SKEYS_INTERFACE},
{ }
},
};
#define DEFINE_CCW_MACHINE_IMPL(latest, ...) \
+ static void MACHINE_VER_SYM(mach_init, ccw, __VA_ARGS__)(MachineState *mach) \
+ { \
+ MACHINE_VER_SYM(instance_options, ccw, __VA_ARGS__)(mach); \
+ ccw_init(mach); \
+ } \
static void MACHINE_VER_SYM(class_init, ccw, __VA_ARGS__)( \
ObjectClass *oc, \
- void *data) \
+ const void *data) \
{ \
MachineClass *mc = MACHINE_CLASS(oc); \
MACHINE_VER_SYM(class_options, ccw, __VA_ARGS__)(mc); \
mc->desc = "Virtual s390x machine (version " MACHINE_VER_STR(__VA_ARGS__) ")"; \
+ mc->init = MACHINE_VER_SYM(mach_init, ccw, __VA_ARGS__); \
MACHINE_VER_DEPRECATION(__VA_ARGS__); \
if (latest) { \
mc->alias = "s390-ccw-virtio"; \
mc->is_default = true; \
} \
} \
- static void MACHINE_VER_SYM(instance_init, ccw, __VA_ARGS__)(Object *obj) \
- { \
- MachineState *machine = MACHINE(obj); \
- current_mc = S390_CCW_MACHINE_CLASS(MACHINE_GET_CLASS(machine)); \
- MACHINE_VER_SYM(instance_options, ccw, __VA_ARGS__)(machine); \
- } \
static const TypeInfo MACHINE_VER_SYM(info, ccw, __VA_ARGS__) = \
{ \
.name = MACHINE_VER_TYPE_NAME("s390-ccw-virtio", __VA_ARGS__), \
.parent = TYPE_S390_CCW_MACHINE, \
.class_init = MACHINE_VER_SYM(class_init, ccw, __VA_ARGS__), \
- .instance_init = MACHINE_VER_SYM(instance_init, ccw, __VA_ARGS__), \
}; \
static void MACHINE_VER_SYM(register, ccw, __VA_ARGS__)(void) \
{ \
@@ -871,14 +911,58 @@ static const TypeInfo ccw_machine_info = {
DEFINE_CCW_MACHINE_IMPL(false, major, minor)
+static void ccw_machine_10_1_instance_options(MachineState *machine)
+{
+}
+
+static void ccw_machine_10_1_class_options(MachineClass *mc)
+{
+}
+DEFINE_CCW_MACHINE_AS_LATEST(10, 1);
+
+static void ccw_machine_10_0_instance_options(MachineState *machine)
+{
+ ccw_machine_10_1_instance_options(machine);
+}
+
+static void ccw_machine_10_0_class_options(MachineClass *mc)
+{
+ S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc);
+ s390mc->use_cpi = false;
+
+ ccw_machine_10_1_class_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_10_0, hw_compat_10_0_len);
+}
+DEFINE_CCW_MACHINE(10, 0);
+
+static void ccw_machine_9_2_instance_options(MachineState *machine)
+{
+ ccw_machine_10_0_instance_options(machine);
+}
+
+static void ccw_machine_9_2_class_options(MachineClass *mc)
+{
+ static GlobalProperty compat[] = {
+ { TYPE_S390_PCI_DEVICE, "relaxed-translation", "off", },
+ };
+
+ ccw_machine_10_0_class_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_9_2, hw_compat_9_2_len);
+ compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
+}
+DEFINE_CCW_MACHINE(9, 2);
+
static void ccw_machine_9_1_instance_options(MachineState *machine)
{
+ ccw_machine_9_2_instance_options(machine);
}
static void ccw_machine_9_1_class_options(MachineClass *mc)
{
+ ccw_machine_9_2_class_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_9_1, hw_compat_9_1_len);
}
-DEFINE_CCW_MACHINE_AS_LATEST(9, 1);
+DEFINE_CCW_MACHINE(9, 1);
static void ccw_machine_9_0_instance_options(MachineState *machine)
{
@@ -1083,229 +1167,6 @@ static void ccw_machine_4_2_class_options(MachineClass *mc)
}
DEFINE_CCW_MACHINE(4, 2);
-static void ccw_machine_4_1_instance_options(MachineState *machine)
-{
- static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V4_1 };
- ccw_machine_4_2_instance_options(machine);
- s390_set_qemu_cpu_model(0x2964, 13, 2, qemu_cpu_feat);
-}
-
-static void ccw_machine_4_1_class_options(MachineClass *mc)
-{
- ccw_machine_4_2_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len);
-}
-DEFINE_CCW_MACHINE(4, 1);
-
-static void ccw_machine_4_0_instance_options(MachineState *machine)
-{
- static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V4_0 };
- ccw_machine_4_1_instance_options(machine);
- s390_set_qemu_cpu_model(0x2827, 12, 2, qemu_cpu_feat);
-}
-
-static void ccw_machine_4_0_class_options(MachineClass *mc)
-{
- ccw_machine_4_1_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len);
-}
-DEFINE_CCW_MACHINE(4, 0);
-
-static void ccw_machine_3_1_instance_options(MachineState *machine)
-{
- static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V3_1 };
- ccw_machine_4_0_instance_options(machine);
- s390_cpudef_featoff_greater(14, 1, S390_FEAT_MULTIPLE_EPOCH);
- s390_cpudef_group_featoff_greater(14, 1, S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF);
- s390_set_qemu_cpu_model(0x2827, 12, 2, qemu_cpu_feat);
-}
-
-static void ccw_machine_3_1_class_options(MachineClass *mc)
-{
- ccw_machine_4_0_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len);
-}
-DEFINE_CCW_MACHINE(3, 1);
-
-static void ccw_machine_3_0_instance_options(MachineState *machine)
-{
- ccw_machine_3_1_instance_options(machine);
-}
-
-static void ccw_machine_3_0_class_options(MachineClass *mc)
-{
- S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc);
-
- s390mc->hpage_1m_allowed = false;
- ccw_machine_3_1_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len);
-}
-DEFINE_CCW_MACHINE(3, 0);
-
-static void ccw_machine_2_12_instance_options(MachineState *machine)
-{
- ccw_machine_3_0_instance_options(machine);
- s390_cpudef_featoff_greater(11, 1, S390_FEAT_PPA15);
- s390_cpudef_featoff_greater(11, 1, S390_FEAT_BPB);
-}
-
-static void ccw_machine_2_12_class_options(MachineClass *mc)
-{
- ccw_machine_3_0_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len);
-}
-DEFINE_CCW_MACHINE(2, 12);
-
-#ifdef CONFIG_S390X_LEGACY_CPUS
-
-static void ccw_machine_2_11_instance_options(MachineState *machine)
-{
- static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V2_11 };
- ccw_machine_2_12_instance_options(machine);
-
- /* before 2.12 we emulated the very first z900 */
- s390_set_qemu_cpu_model(0x2064, 7, 1, qemu_cpu_feat);
-}
-
-static void ccw_machine_2_11_class_options(MachineClass *mc)
-{
- static GlobalProperty compat[] = {
- { TYPE_SCLP_EVENT_FACILITY, "allow_all_mask_sizes", "off", },
- };
-
- ccw_machine_2_12_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len);
- compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
-}
-DEFINE_CCW_MACHINE(2, 11);
-
-static void ccw_machine_2_10_instance_options(MachineState *machine)
-{
- ccw_machine_2_11_instance_options(machine);
-}
-
-static void ccw_machine_2_10_class_options(MachineClass *mc)
-{
- ccw_machine_2_11_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len);
-}
-DEFINE_CCW_MACHINE(2, 10);
-
-static void ccw_machine_2_9_instance_options(MachineState *machine)
-{
- ccw_machine_2_10_instance_options(machine);
- s390_cpudef_featoff_greater(12, 1, S390_FEAT_ESOP);
- s390_cpudef_featoff_greater(12, 1, S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2);
- s390_cpudef_featoff_greater(12, 1, S390_FEAT_ZPCI);
- s390_cpudef_featoff_greater(12, 1, S390_FEAT_ADAPTER_INT_SUPPRESSION);
- s390_cpudef_featoff_greater(12, 1, S390_FEAT_ADAPTER_EVENT_NOTIFICATION);
-}
-
-static void ccw_machine_2_9_class_options(MachineClass *mc)
-{
- static GlobalProperty compat[] = {
- { TYPE_S390_STATTRIB, "migration-enabled", "off", },
- { TYPE_S390_FLIC_COMMON, "migration-enabled", "off", },
- };
-
- ccw_machine_2_10_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len);
- compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
- css_migration_enabled = false;
-}
-DEFINE_CCW_MACHINE(2, 9);
-
-static void ccw_machine_2_8_instance_options(MachineState *machine)
-{
- ccw_machine_2_9_instance_options(machine);
-}
-
-static void ccw_machine_2_8_class_options(MachineClass *mc)
-{
- static GlobalProperty compat[] = {
- { TYPE_S390_FLIC_COMMON, "adapter_routes_max_batch", "64", },
- };
-
- ccw_machine_2_9_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len);
- compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
-}
-DEFINE_CCW_MACHINE(2, 8);
-
-static void ccw_machine_2_7_instance_options(MachineState *machine)
-{
- ccw_machine_2_8_instance_options(machine);
-}
-
-static void ccw_machine_2_7_class_options(MachineClass *mc)
-{
- S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc);
-
- s390mc->cpu_model_allowed = false;
- ccw_machine_2_8_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len);
-}
-DEFINE_CCW_MACHINE(2, 7);
-
-static void ccw_machine_2_6_instance_options(MachineState *machine)
-{
- ccw_machine_2_7_instance_options(machine);
-}
-
-static void ccw_machine_2_6_class_options(MachineClass *mc)
-{
- S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc);
- static GlobalProperty compat[] = {
- { TYPE_S390_IPL, "iplbext_migration", "off", },
- { TYPE_VIRTUAL_CSS_BRIDGE, "css_dev_path", "off", },
- };
-
- s390mc->ri_allowed = false;
- ccw_machine_2_7_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len);
- compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
-}
-DEFINE_CCW_MACHINE(2, 6);
-
-static void ccw_machine_2_5_instance_options(MachineState *machine)
-{
- ccw_machine_2_6_instance_options(machine);
-}
-
-static void ccw_machine_2_5_class_options(MachineClass *mc)
-{
- ccw_machine_2_6_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len);
-}
-DEFINE_CCW_MACHINE(2, 5);
-
-static void ccw_machine_2_4_instance_options(MachineState *machine)
-{
- ccw_machine_2_5_instance_options(machine);
-}
-
-static void ccw_machine_2_4_class_options(MachineClass *mc)
-{
- static GlobalProperty compat[] = {
- { TYPE_S390_SKEYS, "migration-enabled", "off", },
- { "virtio-blk-ccw", "max_revision", "0", },
- { "virtio-balloon-ccw", "max_revision", "0", },
- { "virtio-serial-ccw", "max_revision", "0", },
- { "virtio-9p-ccw", "max_revision", "0", },
- { "virtio-rng-ccw", "max_revision", "0", },
- { "virtio-net-ccw", "max_revision", "0", },
- { "virtio-scsi-ccw", "max_revision", "0", },
- { "vhost-scsi-ccw", "max_revision", "0", },
- };
-
- ccw_machine_2_5_class_options(mc);
- compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len);
- compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
-}
-DEFINE_CCW_MACHINE(2, 4);
-
-#endif
-
static void ccw_machine_register_types(void)
{
type_register_static(&ccw_machine_info);
diff --git a/hw/s390x/s390-virtio-hcall.c b/hw/s390x/s390-virtio-hcall.c
deleted file mode 100644
index ec7cf8b..0000000
--- a/hw/s390x/s390-virtio-hcall.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Support for virtio hypercalls on s390
- *
- * Copyright 2012 IBM Corp.
- * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or (at
- * your option) any later version. See the COPYING file in the top-level
- * directory.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "hw/s390x/s390-virtio-hcall.h"
-
-#define MAX_DIAG_SUBCODES 255
-
-static s390_virtio_fn s390_diag500_table[MAX_DIAG_SUBCODES];
-
-void s390_register_virtio_hypercall(uint64_t code, s390_virtio_fn fn)
-{
- assert(code < MAX_DIAG_SUBCODES);
- assert(!s390_diag500_table[code]);
-
- s390_diag500_table[code] = fn;
-}
-
-int s390_virtio_hypercall(CPUS390XState *env)
-{
- s390_virtio_fn fn;
-
- if (env->regs[1] < MAX_DIAG_SUBCODES) {
- fn = s390_diag500_table[env->regs[1]];
- if (fn) {
- env->regs[2] = fn(&env->regs[2]);
- return 0;
- }
- }
-
- return -EINVAL;
-}
diff --git a/hw/s390x/s390-virtio-hcall.h b/hw/s390x/s390-virtio-hcall.h
deleted file mode 100644
index 3ae6d6a..0000000
--- a/hw/s390x/s390-virtio-hcall.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Support for virtio hypercalls on s390x
- *
- * Copyright IBM Corp. 2012, 2017
- * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or (at
- * your option) any later version. See the COPYING file in the top-level
- * directory.
- */
-
-#ifndef HW_S390_VIRTIO_HCALL_H
-#define HW_S390_VIRTIO_HCALL_H
-
-#include "standard-headers/asm-s390/virtio-ccw.h"
-#include "cpu.h"
-
-/* The only thing that we need from the old kvm_virtio.h file */
-#define KVM_S390_VIRTIO_NOTIFY 0
-
-typedef int (*s390_virtio_fn)(const uint64_t *args);
-void s390_register_virtio_hypercall(uint64_t code, s390_virtio_fn fn);
-int s390_virtio_hypercall(CPUS390XState *env);
-
-#endif /* HW_S390_VIRTIO_HCALL_H */
diff --git a/hw/s390x/sclp.c b/hw/s390x/sclp.c
index e725dcd..9718564 100644
--- a/hw/s390x/sclp.c
+++ b/hw/s390x/sclp.c
@@ -110,7 +110,6 @@ static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb)
MachineState *machine = MACHINE(qdev_get_machine());
int cpu_count;
int rnsize, rnmax;
- IplParameterBlock *ipib = s390_ipl_get_iplb();
int required_len = SCCB_REQ_LEN(ReadInfo, machine->possible_cpus->len);
int offset_cpu = s390_has_feat(S390_FEAT_EXTENDED_LENGTH_SCCB) ?
offsetof(ReadInfo, entries) :
@@ -162,7 +161,11 @@ static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb)
read_info->rnsize2 = cpu_to_be32(rnsize);
}
- /* we don't support standby memory, maxram_size is never exposed */
+ /*
+ * We don't support standby memory. maxram_size is used for sizing the
+ * memory device region, which is not exposed through SCLP but through
+ * diag500.
+ */
rnmax = machine->ram_size >> sclp->increment_size;
if (rnmax < 0x10000) {
read_info->rnmax = cpu_to_be16(rnmax);
@@ -171,12 +174,8 @@ static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb)
read_info->rnmax2 = cpu_to_be64(rnmax);
}
- if (ipib && ipib->flags & DIAG308_FLAGS_LP_VALID) {
- memcpy(&read_info->loadparm, &ipib->loadparm,
- sizeof(read_info->loadparm));
- } else {
- s390_ipl_set_loadparm(read_info->loadparm);
- }
+ s390_ipl_convert_loadparm((char *)S390_CCW_MACHINE(machine)->loadparm,
+ read_info->loadparm);
sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
}
@@ -381,10 +380,7 @@ void sclp_service_interrupt(uint32_t sccb)
/* qemu object creation and initialization functions */
static void sclp_realize(DeviceState *dev, Error **errp)
{
- MachineState *machine = MACHINE(qdev_get_machine());
SCLPDevice *sclp = SCLP(dev);
- uint64_t hw_limit;
- int ret;
/*
* qdev_device_add searches the sysbus for TYPE_SCLP_EVENTS_BUS. As long
@@ -394,14 +390,6 @@ static void sclp_realize(DeviceState *dev, Error **errp)
if (!sysbus_realize(SYS_BUS_DEVICE(sclp->event_facility), errp)) {
return;
}
-
- ret = s390_set_memory_limit(machine->maxram_size, &hw_limit);
- if (ret == -E2BIG) {
- error_setg(errp, "host supports a maximum of %" PRIu64 " GB",
- hw_limit / GiB);
- } else if (ret) {
- error_setg(errp, "setting the guest size failed");
- }
}
static void sclp_memory_init(SCLPDevice *sclp)
@@ -436,7 +424,7 @@ static void sclp_init(Object *obj)
sclp_memory_init(sclp);
}
-static void sclp_class_init(ObjectClass *oc, void *data)
+static void sclp_class_init(ObjectClass *oc, const void *data)
{
SCLPDeviceClass *sc = SCLP_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/s390x/sclpcpi.c b/hw/s390x/sclpcpi.c
new file mode 100644
index 0000000..7aa039d
--- /dev/null
+++ b/hw/s390x/sclpcpi.c
@@ -0,0 +1,212 @@
+ /*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * SCLP event type 11 - Control-Program Identification (CPI):
+ * CPI is used to send program identifiers from the guest to the
+ * Service-Call Logical Processor (SCLP). It is not sent by the SCLP.
+ *
+ * Control-program identifiers provide data about the guest operating
+ * system. The control-program identifiers are: system type, system name,
+ * system level and sysplex name.
+ *
+ * In Linux, all the control-program identifiers are user configurable. The
+ * system type, system name, and sysplex name use EBCDIC characters from
+ * this set: capital A-Z, 0-9, $, @, #, and blank. In Linux, the system
+ * type, system name and sysplex name are arbitrary free-form texts.
+ *
+ * In Linux, the 8-byte hexadecimal system-level has the format
+ * 0x<a><b><cc><dd><eeee><ff><gg><hh>, where:
+ * <a>: is a 4-bit digit, its most significant bit indicates hypervisor use
+ * <b>: is one digit that represents Linux distributions as follows
+ * 0: generic Linux
+ * 1: Red Hat Enterprise Linux
+ * 2: SUSE Linux Enterprise Server
+ * 3: Canonical Ubuntu
+ * 4: Fedora
+ * 5: openSUSE Leap
+ * 6: Debian GNU/Linux
+ * 7: Red Hat Enterprise Linux CoreOS
+ * <cc>: are two digits for a distribution-specific encoding of the major
+ * version of the distribution
+ * <dd>: are two digits for a distribution-specific encoding of the minor
+ * version of the distribution
+ * <eeee>: are four digits for the patch level of the distribution
+ * <ff>: are two digits for the major version of the kernel
+ * <gg>: are two digits for the minor version of the kernel
+ * <hh>: are two digits for the stable version of the kernel
+ * (e.g. 74872343805430528, when converted to hex is 0x010a000000060b00). On
+ * machines prior to z16, some of the values are not available to display.
+ *
+ * Sysplex refers to a cluster of logical partitions that communicates and
+ * co-operates with each other.
+ *
+ * The CPI feature is supported since 10.1.
+ *
+ * Copyright IBM, Corp. 2024
+ *
+ * Authors:
+ * Shalini Chellathurai Saroja <shalini@linux.ibm.com>
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/timer.h"
+#include "hw/s390x/event-facility.h"
+#include "hw/s390x/ebcdic.h"
+#include "qapi/qapi-visit-machine.h"
+#include "migration/vmstate.h"
+
+typedef struct Data {
+ uint8_t id_format;
+ uint8_t reserved0;
+ uint8_t system_type[8];
+ uint64_t reserved1;
+ uint8_t system_name[8];
+ uint64_t reserved2;
+ uint64_t system_level;
+ uint64_t reserved3;
+ uint8_t sysplex_name[8];
+ uint8_t reserved4[16];
+} QEMU_PACKED Data;
+
+typedef struct ControlProgramIdMsg {
+ EventBufferHeader ebh;
+ Data data;
+} QEMU_PACKED ControlProgramIdMsg;
+
+static bool can_handle_event(uint8_t type)
+{
+ return type == SCLP_EVENT_CTRL_PGM_ID;
+}
+
+static sccb_mask_t send_mask(void)
+{
+ return 0;
+}
+
+/* Enable SCLP to accept buffers of event type CPI from the control-program. */
+static sccb_mask_t receive_mask(void)
+{
+ return SCLP_EVENT_MASK_CTRL_PGM_ID;
+}
+
+static int write_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr)
+{
+ ControlProgramIdMsg *cpim = container_of(evt_buf_hdr, ControlProgramIdMsg,
+ ebh);
+ SCLPEventCPI *e = SCLP_EVENT_CPI(event);
+
+ ascii_put(e->system_type, (char *)cpim->data.system_type,
+ sizeof(cpim->data.system_type));
+ ascii_put(e->system_name, (char *)cpim->data.system_name,
+ sizeof(cpim->data.system_name));
+ ascii_put(e->sysplex_name, (char *)cpim->data.sysplex_name,
+ sizeof(cpim->data.sysplex_name));
+ e->system_level = ldq_be_p(&cpim->data.system_level);
+ e->timestamp = qemu_clock_get_ns(QEMU_CLOCK_HOST);
+
+ cpim->ebh.flags = SCLP_EVENT_BUFFER_ACCEPTED;
+ return SCLP_RC_NORMAL_COMPLETION;
+}
+
+static char *get_system_type(Object *obj, Error **errp)
+{
+ SCLPEventCPI *e = SCLP_EVENT_CPI(obj);
+
+ return g_strndup((char *) e->system_type, sizeof(e->system_type));
+}
+
+static char *get_system_name(Object *obj, Error **errp)
+{
+ SCLPEventCPI *e = SCLP_EVENT_CPI(obj);
+
+ return g_strndup((char *) e->system_name, sizeof(e->system_name));
+}
+
+static char *get_sysplex_name(Object *obj, Error **errp)
+{
+ SCLPEventCPI *e = SCLP_EVENT_CPI(obj);
+
+ return g_strndup((char *) e->sysplex_name, sizeof(e->sysplex_name));
+}
+
+static void get_system_level(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ SCLPEventCPI *e = SCLP_EVENT_CPI(obj);
+
+ visit_type_uint64(v, name, &e->system_level, errp);
+}
+
+static void get_timestamp(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ SCLPEventCPI *e = SCLP_EVENT_CPI(obj);
+
+ visit_type_uint64(v, name, &e->timestamp, errp);
+}
+
+static const VMStateDescription vmstate_sclpcpi = {
+ .name = "s390_control_program_id",
+ .version_id = 0,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(system_type, SCLPEventCPI, 8),
+ VMSTATE_UINT8_ARRAY(system_name, SCLPEventCPI, 8),
+ VMSTATE_UINT64(system_level, SCLPEventCPI),
+ VMSTATE_UINT8_ARRAY(sysplex_name, SCLPEventCPI, 8),
+ VMSTATE_UINT64(timestamp, SCLPEventCPI),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void cpi_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SCLPEventClass *k = SCLP_EVENT_CLASS(klass);
+
+ dc->user_creatable = false;
+ dc->vmsd = &vmstate_sclpcpi;
+
+ k->can_handle_event = can_handle_event;
+ k->get_send_mask = send_mask;
+ k->get_receive_mask = receive_mask;
+ k->write_event_data = write_event_data;
+
+ object_class_property_add_str(klass, "system_type", get_system_type, NULL);
+ object_class_property_set_description(klass, "system_type",
+ "operating system e.g. \"LINUX \"");
+
+ object_class_property_add_str(klass, "system_name", get_system_name, NULL);
+ object_class_property_set_description(klass, "system_name",
+ "user configurable name of the VM e.g. \"TESTVM \"");
+
+ object_class_property_add_str(klass, "sysplex_name", get_sysplex_name,
+ NULL);
+ object_class_property_set_description(klass, "sysplex_name",
+ "name of the cluster which the VM belongs to, if any"
+ " e.g. \"PLEX \"");
+
+ object_class_property_add(klass, "system_level", "uint64", get_system_level,
+ NULL, NULL, NULL);
+ object_class_property_set_description(klass, "system_level",
+ "distribution and kernel version in Linux e.g. 74872343805430528");
+
+ object_class_property_add(klass, "timestamp", "uint64", get_timestamp,
+ NULL, NULL, NULL);
+ object_class_property_set_description(klass, "timestamp",
+ "latest update of CPI data in nanoseconds since the UNIX EPOCH");
+}
+
+static const TypeInfo sclp_cpi_info = {
+ .name = TYPE_SCLP_EVENT_CPI,
+ .parent = TYPE_SCLP_EVENT,
+ .instance_size = sizeof(SCLPEventCPI),
+ .class_init = cpi_class_init,
+};
+
+static void sclp_cpi_register_types(void)
+{
+ type_register_static(&sclp_cpi_info);
+}
+
+type_init(sclp_cpi_register_types)
diff --git a/hw/s390x/sclpcpu.c b/hw/s390x/sclpcpu.c
index fa79891..4b6ebfe 100644
--- a/hw/s390x/sclpcpu.c
+++ b/hw/s390x/sclpcpu.c
@@ -17,7 +17,7 @@
#include "hw/s390x/sclp.h"
#include "qemu/module.h"
#include "hw/s390x/event-facility.h"
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
typedef struct ConfigMgtData {
EventBufferHeader ebh;
@@ -73,7 +73,7 @@ static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr,
return 1;
}
-static void sclp_cpu_class_init(ObjectClass *oc, void *data)
+static void sclp_cpu_class_init(ObjectClass *oc, const void *data)
{
SCLPEventClass *k = SCLP_EVENT_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/s390x/sclpquiesce.c b/hw/s390x/sclpquiesce.c
index 14936aa..da4c8f3 100644
--- a/hw/s390x/sclpquiesce.c
+++ b/hw/s390x/sclpquiesce.c
@@ -16,7 +16,7 @@
#include "hw/s390x/sclp.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/s390x/event-facility.h"
typedef struct SignalQuiesce {
@@ -112,12 +112,12 @@ static void quiesce_reset(DeviceState *dev)
event->event_pending = false;
}
-static void quiesce_class_init(ObjectClass *klass, void *data)
+static void quiesce_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SCLPEventClass *k = SCLP_EVENT_CLASS(klass);
- dc->reset = quiesce_reset;
+ device_class_set_legacy_reset(dc, quiesce_reset);
dc->vmsd = &vmstate_sclpquiesce;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
/*
diff --git a/hw/s390x/tod-kvm.c b/hw/s390x/tod-kvm.c
index 9588b90..c9b8896 100644
--- a/hw/s390x/tod-kvm.c
+++ b/hw/s390x/tod-kvm.c
@@ -11,7 +11,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/module.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/s390x/tod.h"
#include "target/s390x/kvm/pv.h"
#include "kvm/kvm_s390x.h"
@@ -133,7 +133,7 @@ static void kvm_s390_tod_realize(DeviceState *dev, Error **errp)
qemu_add_vm_change_state_handler(kvm_s390_tod_vm_state_change, td);
}
-static void kvm_s390_tod_class_init(ObjectClass *oc, void *data)
+static void kvm_s390_tod_class_init(ObjectClass *oc, const void *data)
{
S390TODClass *tdc = S390_TOD_CLASS(oc);
diff --git a/hw/s390x/tod-tcg.c b/hw/s390x/tod-tcg.c
index 2d540db..0cc9662 100644
--- a/hw/s390x/tod-tcg.c
+++ b/hw/s390x/tod-tcg.c
@@ -16,7 +16,7 @@
#include "qemu/module.h"
#include "cpu.h"
#include "tcg/tcg_s390x.h"
-#include "sysemu/rtc.h"
+#include "system/rtc.h"
static void qemu_s390_tod_get(const S390TODState *td, S390TOD *tod,
Error **errp)
@@ -52,7 +52,7 @@ static void qemu_s390_tod_set(S390TODState *td, const S390TOD *tod,
}
}
-static void qemu_s390_tod_class_init(ObjectClass *oc, void *data)
+static void qemu_s390_tod_class_init(ObjectClass *oc, const void *data)
{
S390TODClass *tdc = S390_TOD_CLASS(oc);
diff --git a/hw/s390x/tod.c b/hw/s390x/tod.c
index c81b1c0..3f913cc 100644
--- a/hw/s390x/tod.c
+++ b/hw/s390x/tod.c
@@ -13,9 +13,9 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
-#include "sysemu/qtest.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
+#include "system/qtest.h"
#include "migration/qemu-file-types.h"
#include "migration/register.h"
@@ -111,7 +111,7 @@ static void s390_tod_realize(DeviceState *dev, Error **errp)
register_savevm_live("todclock", 0, 1, &savevm_tod, td);
}
-static void s390_tod_class_init(ObjectClass *oc, void *data)
+static void s390_tod_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/s390x/vhost-scsi-ccw.c b/hw/s390x/vhost-scsi-ccw.c
index 40dc14b..8341b23 100644
--- a/hw/s390x/vhost-scsi-ccw.c
+++ b/hw/s390x/vhost-scsi-ccw.c
@@ -41,13 +41,12 @@ static void vhost_ccw_scsi_instance_init(Object *obj)
TYPE_VHOST_SCSI);
}
-static Property vhost_ccw_scsi_properties[] = {
+static const Property vhost_ccw_scsi_properties[] = {
DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
VIRTIO_CCW_MAX_REV),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vhost_ccw_scsi_class_init(ObjectClass *klass, void *data)
+static void vhost_ccw_scsi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
diff --git a/hw/s390x/vhost-user-fs-ccw.c b/hw/s390x/vhost-user-fs-ccw.c
index 6c6f269..cc1b822 100644
--- a/hw/s390x/vhost-user-fs-ccw.c
+++ b/hw/s390x/vhost-user-fs-ccw.c
@@ -23,12 +23,11 @@ typedef struct VHostUserFSCcw {
OBJECT_CHECK(VHostUserFSCcw, (obj), TYPE_VHOST_USER_FS_CCW)
-static Property vhost_user_fs_ccw_properties[] = {
+static const Property vhost_user_fs_ccw_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
VIRTIO_CCW_MAX_REV),
- DEFINE_PROP_END_OF_LIST(),
};
static void vhost_user_fs_ccw_realize(VirtioCcwDevice *ccw_dev, Error **errp)
@@ -49,7 +48,7 @@ static void vhost_user_fs_ccw_instance_init(Object *obj)
TYPE_VHOST_USER_FS);
}
-static void vhost_user_fs_ccw_class_init(ObjectClass *klass, void *data)
+static void vhost_user_fs_ccw_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
diff --git a/hw/s390x/vhost-vsock-ccw.c b/hw/s390x/vhost-vsock-ccw.c
index 07845a9..552e9e8 100644
--- a/hw/s390x/vhost-vsock-ccw.c
+++ b/hw/s390x/vhost-vsock-ccw.c
@@ -22,10 +22,9 @@ struct VHostVSockCCWState {
VHostVSock vdev;
};
-static Property vhost_vsock_ccw_properties[] = {
+static const Property vhost_vsock_ccw_properties[] = {
DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
VIRTIO_CCW_MAX_REV),
- DEFINE_PROP_END_OF_LIST(),
};
static void vhost_vsock_ccw_realize(VirtioCcwDevice *ccw_dev, Error **errp)
@@ -36,7 +35,7 @@ static void vhost_vsock_ccw_realize(VirtioCcwDevice *ccw_dev, Error **errp)
qdev_realize(vdev, BUS(&ccw_dev->bus), errp);
}
-static void vhost_vsock_ccw_class_init(ObjectClass *klass, void *data)
+static void vhost_vsock_ccw_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
diff --git a/hw/s390x/virtio-ccw-9p.c b/hw/s390x/virtio-ccw-9p.c
index 6f931f5..72bf6ec 100644
--- a/hw/s390x/virtio-ccw-9p.c
+++ b/hw/s390x/virtio-ccw-9p.c
@@ -41,15 +41,14 @@ static void virtio_ccw_9p_instance_init(Object *obj)
TYPE_VIRTIO_9P);
}
-static Property virtio_ccw_9p_properties[] = {
+static const Property virtio_ccw_9p_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
VIRTIO_CCW_MAX_REV),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_ccw_9p_class_init(ObjectClass *klass, void *data)
+static void virtio_ccw_9p_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
diff --git a/hw/s390x/virtio-ccw-balloon.c b/hw/s390x/virtio-ccw-balloon.c
index 44287b9..399b40f 100644
--- a/hw/s390x/virtio-ccw-balloon.c
+++ b/hw/s390x/virtio-ccw-balloon.c
@@ -46,15 +46,14 @@ static void virtio_ccw_balloon_instance_init(Object *obj)
"guest-stats-polling-interval");
}
-static Property virtio_ccw_balloon_properties[] = {
+static const Property virtio_ccw_balloon_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
VIRTIO_CCW_MAX_REV),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_ccw_balloon_class_init(ObjectClass *klass, void *data)
+static void virtio_ccw_balloon_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
diff --git a/hw/s390x/virtio-ccw-blk.c b/hw/s390x/virtio-ccw-blk.c
index 8e0e58b..7d8c4a75 100644
--- a/hw/s390x/virtio-ccw-blk.c
+++ b/hw/s390x/virtio-ccw-blk.c
@@ -43,15 +43,15 @@ static void virtio_ccw_blk_instance_init(Object *obj)
"bootindex");
}
-static Property virtio_ccw_blk_properties[] = {
+static const Property virtio_ccw_blk_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
VIRTIO_CCW_MAX_REV),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_CCW_LOADPARM("loadparm", CcwDevice, loadparm),
};
-static void virtio_ccw_blk_class_init(ObjectClass *klass, void *data)
+static void virtio_ccw_blk_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
diff --git a/hw/s390x/virtio-ccw-crypto.c b/hw/s390x/virtio-ccw-crypto.c
index 0fa2f89..75e7146 100644
--- a/hw/s390x/virtio-ccw-crypto.c
+++ b/hw/s390x/virtio-ccw-crypto.c
@@ -44,15 +44,14 @@ static void virtio_ccw_crypto_instance_init(Object *obj)
TYPE_VIRTIO_CRYPTO);
}
-static Property virtio_ccw_crypto_properties[] = {
+static const Property virtio_ccw_crypto_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
VIRTIO_CCW_MAX_REV),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_ccw_crypto_class_init(ObjectClass *klass, void *data)
+static void virtio_ccw_crypto_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
diff --git a/hw/s390x/virtio-ccw-gpu.c b/hw/s390x/virtio-ccw-gpu.c
index 0642c52..edb6a47 100644
--- a/hw/s390x/virtio-ccw-gpu.c
+++ b/hw/s390x/virtio-ccw-gpu.c
@@ -42,15 +42,14 @@ static void virtio_ccw_gpu_instance_init(Object *obj)
TYPE_VIRTIO_GPU);
}
-static Property virtio_ccw_gpu_properties[] = {
+static const Property virtio_ccw_gpu_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
VIRTIO_CCW_MAX_REV),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_ccw_gpu_class_init(ObjectClass *klass, void *data)
+static void virtio_ccw_gpu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
diff --git a/hw/s390x/virtio-ccw-input.c b/hw/s390x/virtio-ccw-input.c
index 61a07ba..2250d8c 100644
--- a/hw/s390x/virtio-ccw-input.c
+++ b/hw/s390x/virtio-ccw-input.c
@@ -43,15 +43,14 @@ static void virtio_ccw_input_realize(VirtioCcwDevice *ccw_dev, Error **errp)
qdev_realize(vdev, BUS(&ccw_dev->bus), errp);
}
-static Property virtio_ccw_input_properties[] = {
+static const Property virtio_ccw_input_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
VIRTIO_CCW_MAX_REV),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_ccw_input_class_init(ObjectClass *klass, void *data)
+static void virtio_ccw_input_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
diff --git a/hw/s390x/virtio-ccw-md-stubs.c b/hw/s390x/virtio-ccw-md-stubs.c
new file mode 100644
index 0000000..e937865
--- /dev/null
+++ b/hw/s390x/virtio-ccw-md-stubs.c
@@ -0,0 +1,24 @@
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/s390x/virtio-ccw-md.h"
+
+void virtio_ccw_md_pre_plug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp)
+{
+ error_setg(errp, "virtio based memory devices not supported");
+}
+
+void virtio_ccw_md_plug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp)
+{
+ error_setg(errp, "virtio based memory devices not supported");
+}
+
+void virtio_ccw_md_unplug_request(VirtIOMDCcw *vmd, MachineState *ms,
+ Error **errp)
+{
+ error_setg(errp, "virtio based memory devices not supported");
+}
+
+void virtio_ccw_md_unplug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp)
+{
+ error_setg(errp, "virtio based memory devices not supported");
+}
diff --git a/hw/s390x/virtio-ccw-md.c b/hw/s390x/virtio-ccw-md.c
new file mode 100644
index 0000000..0370f58
--- /dev/null
+++ b/hw/s390x/virtio-ccw-md.c
@@ -0,0 +1,153 @@
+/*
+ * Virtio CCW support for abstract virtio based memory device
+ *
+ * Copyright (C) 2024 Red Hat, Inc.
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/s390x/virtio-ccw-md.h"
+#include "hw/mem/memory-device.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+
+void virtio_ccw_md_pre_plug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp)
+{
+ DeviceState *dev = DEVICE(vmd);
+ HotplugHandler *bus_handler = qdev_get_bus_hotplug_handler(dev);
+ MemoryDeviceState *md = MEMORY_DEVICE(vmd);
+ Error *local_err = NULL;
+
+ if (!bus_handler && dev->hotplugged) {
+ /*
+ * Without a bus hotplug handler, we cannot control the plug/unplug
+ * order. We should never reach this point when hotplugging, but
+ * better add a safety net.
+ */
+ error_setg(errp, "hotplug of virtio based memory devices not supported"
+ " on this bus.");
+ return;
+ }
+
+ /*
+ * First, see if we can plug this memory device at all. If that
+ * succeeds, branch of to the actual hotplug handler.
+ */
+ memory_device_pre_plug(md, ms, &local_err);
+ if (!local_err && bus_handler) {
+ hotplug_handler_pre_plug(bus_handler, dev, &local_err);
+ }
+ error_propagate(errp, local_err);
+}
+
+void virtio_ccw_md_plug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp)
+{
+ DeviceState *dev = DEVICE(vmd);
+ HotplugHandler *bus_handler = qdev_get_bus_hotplug_handler(dev);
+ MemoryDeviceState *md = MEMORY_DEVICE(vmd);
+ Error *local_err = NULL;
+
+ /*
+ * Plug the memory device first and then branch off to the actual
+ * hotplug handler. If that one fails, we can easily undo the memory
+ * device bits.
+ */
+ memory_device_plug(md, ms);
+ if (bus_handler) {
+ hotplug_handler_plug(bus_handler, dev, &local_err);
+ if (local_err) {
+ memory_device_unplug(md, ms);
+ }
+ }
+ error_propagate(errp, local_err);
+}
+
+void virtio_ccw_md_unplug_request(VirtIOMDCcw *vmd, MachineState *ms,
+ Error **errp)
+{
+ VirtIOMDCcwClass *vmdc = VIRTIO_MD_CCW_GET_CLASS(vmd);
+ DeviceState *dev = DEVICE(vmd);
+ HotplugHandler *bus_handler = qdev_get_bus_hotplug_handler(dev);
+ HotplugHandlerClass *hdc;
+ Error *local_err = NULL;
+
+ if (!vmdc->unplug_request_check) {
+ error_setg(errp,
+ "this virtio based memory devices cannot be unplugged");
+ return;
+ }
+
+ if (!bus_handler) {
+ error_setg(errp, "hotunplug of virtio based memory devices not"
+ "supported on this bus");
+ return;
+ }
+
+ vmdc->unplug_request_check(vmd, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ /*
+ * Forward the async request or turn it into a sync request (handling it
+ * like qdev_unplug()).
+ */
+ hdc = HOTPLUG_HANDLER_GET_CLASS(bus_handler);
+ if (hdc->unplug_request) {
+ hotplug_handler_unplug_request(bus_handler, dev, &local_err);
+ } else {
+ virtio_ccw_md_unplug(vmd, ms, &local_err);
+ if (!local_err) {
+ object_unparent(OBJECT(dev));
+ }
+ }
+}
+
+void virtio_ccw_md_unplug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp)
+{
+ DeviceState *dev = DEVICE(vmd);
+ HotplugHandler *bus_handler = qdev_get_bus_hotplug_handler(dev);
+ MemoryDeviceState *md = MEMORY_DEVICE(vmd);
+ Error *local_err = NULL;
+
+ /* Unplug the memory device while it is still realized. */
+ memory_device_unplug(md, ms);
+
+ if (bus_handler) {
+ hotplug_handler_unplug(bus_handler, dev, &local_err);
+ if (local_err) {
+ /* Not expected to fail ... but still try to recover. */
+ memory_device_plug(md, ms);
+ error_propagate(errp, local_err);
+ return;
+ }
+ } else {
+ /* Very unexpected, but let's just try to do the right thing. */
+ warn_report("Unexpected unplug of virtio based memory device");
+ qdev_unrealize(dev);
+ }
+}
+
+static const TypeInfo virtio_ccw_md_info = {
+ .name = TYPE_VIRTIO_MD_CCW,
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VirtIOMDCcw),
+ .class_size = sizeof(VirtIOMDCcwClass),
+ .abstract = true,
+ .interfaces = (const InterfaceInfo[]) {
+ { TYPE_MEMORY_DEVICE },
+ { }
+ },
+};
+
+static void virtio_ccw_md_register(void)
+{
+ type_register_static(&virtio_ccw_md_info);
+}
+type_init(virtio_ccw_md_register)
diff --git a/hw/s390x/virtio-ccw-md.h b/hw/s390x/virtio-ccw-md.h
new file mode 100644
index 0000000..39ba864
--- /dev/null
+++ b/hw/s390x/virtio-ccw-md.h
@@ -0,0 +1,44 @@
+/*
+ * Virtio CCW support for abstract virtio based memory device
+ *
+ * Copyright (C) 2024 Red Hat, Inc.
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_S390X_VIRTIO_CCW_MD_H
+#define HW_S390X_VIRTIO_CCW_MD_H
+
+#include "virtio-ccw.h"
+#include "qom/object.h"
+
+/*
+ * virtio-md-ccw: This extends VirtioCcwDevice.
+ */
+#define TYPE_VIRTIO_MD_CCW "virtio-md-ccw"
+
+OBJECT_DECLARE_TYPE(VirtIOMDCcw, VirtIOMDCcwClass, VIRTIO_MD_CCW)
+
+struct VirtIOMDCcwClass {
+ /* private */
+ VirtIOCCWDeviceClass parent;
+
+ /* public */
+ void (*unplug_request_check)(VirtIOMDCcw *vmd, Error **errp);
+};
+
+struct VirtIOMDCcw {
+ VirtioCcwDevice parent_obj;
+};
+
+void virtio_ccw_md_pre_plug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp);
+void virtio_ccw_md_plug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp);
+void virtio_ccw_md_unplug_request(VirtIOMDCcw *vmd, MachineState *ms,
+ Error **errp);
+void virtio_ccw_md_unplug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp);
+
+#endif /* HW_S390X_VIRTIO_CCW_MD_H */
diff --git a/hw/s390x/virtio-ccw-mem.c b/hw/s390x/virtio-ccw-mem.c
new file mode 100644
index 0000000..daa485d
--- /dev/null
+++ b/hw/s390x/virtio-ccw-mem.c
@@ -0,0 +1,225 @@
+/*
+ * virtio-mem CCW implementation
+ *
+ * Copyright (C) 2024 Red Hat, Inc.
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/qdev-properties.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "virtio-ccw-mem.h"
+#include "hw/mem/memory-device.h"
+#include "qapi/qapi-events-machine.h"
+#include "qapi/qapi-events-misc.h"
+
+static void virtio_ccw_mem_realize(VirtioCcwDevice *ccw_dev, Error **errp)
+{
+ VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(ccw_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+
+ qdev_realize(vdev, BUS(&ccw_dev->bus), errp);
+}
+
+static void virtio_ccw_mem_set_addr(MemoryDeviceState *md, uint64_t addr,
+ Error **errp)
+{
+ object_property_set_uint(OBJECT(md), VIRTIO_MEM_ADDR_PROP, addr, errp);
+}
+
+static uint64_t virtio_ccw_mem_get_addr(const MemoryDeviceState *md)
+{
+ return object_property_get_uint(OBJECT(md), VIRTIO_MEM_ADDR_PROP,
+ &error_abort);
+}
+
+static MemoryRegion *virtio_ccw_mem_get_memory_region(MemoryDeviceState *md,
+ Error **errp)
+{
+ VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(md);
+ VirtIOMEM *vmem = &dev->vdev;
+ VirtIOMEMClass *vmc = VIRTIO_MEM_GET_CLASS(vmem);
+
+ return vmc->get_memory_region(vmem, errp);
+}
+
+static void virtio_ccw_mem_decide_memslots(MemoryDeviceState *md,
+ unsigned int limit)
+{
+ VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(md);
+ VirtIOMEM *vmem = VIRTIO_MEM(&dev->vdev);
+ VirtIOMEMClass *vmc = VIRTIO_MEM_GET_CLASS(vmem);
+
+ vmc->decide_memslots(vmem, limit);
+}
+
+static unsigned int virtio_ccw_mem_get_memslots(MemoryDeviceState *md)
+{
+ VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(md);
+ VirtIOMEM *vmem = VIRTIO_MEM(&dev->vdev);
+ VirtIOMEMClass *vmc = VIRTIO_MEM_GET_CLASS(vmem);
+
+ return vmc->get_memslots(vmem);
+}
+
+static uint64_t virtio_ccw_mem_get_plugged_size(const MemoryDeviceState *md,
+ Error **errp)
+{
+ return object_property_get_uint(OBJECT(md), VIRTIO_MEM_SIZE_PROP,
+ errp);
+}
+
+static void virtio_ccw_mem_fill_device_info(const MemoryDeviceState *md,
+ MemoryDeviceInfo *info)
+{
+ VirtioMEMDeviceInfo *vi = g_new0(VirtioMEMDeviceInfo, 1);
+ VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(md);
+ VirtIOMEM *vmem = &dev->vdev;
+ VirtIOMEMClass *vpc = VIRTIO_MEM_GET_CLASS(vmem);
+ DeviceState *vdev = DEVICE(md);
+
+ if (vdev->id) {
+ vi->id = g_strdup(vdev->id);
+ }
+
+ /* let the real device handle everything else */
+ vpc->fill_device_info(vmem, vi);
+
+ info->u.virtio_mem.data = vi;
+ info->type = MEMORY_DEVICE_INFO_KIND_VIRTIO_MEM;
+}
+
+static uint64_t virtio_ccw_mem_get_min_alignment(const MemoryDeviceState *md)
+{
+ return object_property_get_uint(OBJECT(md), VIRTIO_MEM_BLOCK_SIZE_PROP,
+ &error_abort);
+}
+
+static void virtio_ccw_mem_size_change_notify(Notifier *notifier, void *data)
+{
+ VirtIOMEMCcw *dev = container_of(notifier, VirtIOMEMCcw,
+ size_change_notifier);
+ DeviceState *vdev = DEVICE(dev);
+ char *qom_path = object_get_canonical_path(OBJECT(dev));
+ const uint64_t * const size_p = data;
+
+ qapi_event_send_memory_device_size_change(vdev->id, *size_p, qom_path);
+ g_free(qom_path);
+}
+
+static void virtio_ccw_mem_unplug_request_check(VirtIOMDCcw *vmd, Error **errp)
+{
+ VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(vmd);
+ VirtIOMEM *vmem = &dev->vdev;
+ VirtIOMEMClass *vpc = VIRTIO_MEM_GET_CLASS(vmem);
+
+ vpc->unplug_request_check(vmem, errp);
+}
+
+static void virtio_ccw_mem_get_requested_size(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(obj);
+
+ object_property_get(OBJECT(&dev->vdev), name, v, errp);
+}
+
+static void virtio_ccw_mem_set_requested_size(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(obj);
+ DeviceState *vdev = DEVICE(obj);
+
+ /*
+ * If we passed virtio_ccw_mem_unplug_request_check(), making sure that
+ * the requested size is 0, don't allow modifying the requested size
+ * anymore, otherwise the VM might end up hotplugging memory before
+ * handling the unplug request.
+ */
+ if (vdev->pending_deleted_event) {
+ error_setg(errp, "'%s' cannot be changed if the device is in the"
+ " process of unplug", name);
+ return;
+ }
+
+ object_property_set(OBJECT(&dev->vdev), name, v, errp);
+}
+
+static const Property virtio_ccw_mem_properties[] = {
+ DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
+ VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
+ VIRTIO_CCW_MAX_REV),
+};
+
+static void virtio_ccw_mem_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+ MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(klass);
+ VirtIOMDCcwClass *vmdc = VIRTIO_MD_CCW_CLASS(klass);
+
+ k->realize = virtio_ccw_mem_realize;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ device_class_set_props(dc, virtio_ccw_mem_properties);
+
+ mdc->get_addr = virtio_ccw_mem_get_addr;
+ mdc->set_addr = virtio_ccw_mem_set_addr;
+ mdc->get_plugged_size = virtio_ccw_mem_get_plugged_size;
+ mdc->get_memory_region = virtio_ccw_mem_get_memory_region;
+ mdc->decide_memslots = virtio_ccw_mem_decide_memslots;
+ mdc->get_memslots = virtio_ccw_mem_get_memslots;
+ mdc->fill_device_info = virtio_ccw_mem_fill_device_info;
+ mdc->get_min_alignment = virtio_ccw_mem_get_min_alignment;
+
+ vmdc->unplug_request_check = virtio_ccw_mem_unplug_request_check;
+}
+
+static void virtio_ccw_mem_instance_init(Object *obj)
+{
+ VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(obj);
+ VirtIOMEMClass *vmc;
+ VirtIOMEM *vmem;
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_MEM);
+
+ dev->size_change_notifier.notify = virtio_ccw_mem_size_change_notify;
+ vmem = &dev->vdev;
+ vmc = VIRTIO_MEM_GET_CLASS(vmem);
+ /*
+ * We never remove the notifier again, as we expect both devices to
+ * disappear at the same time.
+ */
+ vmc->add_size_change_notifier(vmem, &dev->size_change_notifier);
+
+ object_property_add_alias(obj, VIRTIO_MEM_BLOCK_SIZE_PROP,
+ OBJECT(&dev->vdev), VIRTIO_MEM_BLOCK_SIZE_PROP);
+ object_property_add_alias(obj, VIRTIO_MEM_SIZE_PROP, OBJECT(&dev->vdev),
+ VIRTIO_MEM_SIZE_PROP);
+ object_property_add(obj, VIRTIO_MEM_REQUESTED_SIZE_PROP, "size",
+ virtio_ccw_mem_get_requested_size,
+ virtio_ccw_mem_set_requested_size, NULL, NULL);
+}
+
+static const TypeInfo virtio_ccw_mem = {
+ .name = TYPE_VIRTIO_MEM_CCW,
+ .parent = TYPE_VIRTIO_MD_CCW,
+ .instance_size = sizeof(VirtIOMEMCcw),
+ .instance_init = virtio_ccw_mem_instance_init,
+ .class_init = virtio_ccw_mem_class_init,
+};
+
+static void virtio_ccw_mem_register_types(void)
+{
+ type_register_static(&virtio_ccw_mem);
+}
+type_init(virtio_ccw_mem_register_types)
diff --git a/hw/s390x/virtio-ccw-mem.h b/hw/s390x/virtio-ccw-mem.h
new file mode 100644
index 0000000..738ab2c
--- /dev/null
+++ b/hw/s390x/virtio-ccw-mem.h
@@ -0,0 +1,34 @@
+/*
+ * Virtio MEM CCW device
+ *
+ * Copyright (C) 2024 Red Hat, Inc.
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_S390X_VIRTIO_CCW_MEM_H
+#define HW_S390X_VIRTIO_CCW_MEM_H
+
+#include "virtio-ccw-md.h"
+#include "hw/virtio/virtio-mem.h"
+#include "qom/object.h"
+
+typedef struct VirtIOMEMCcw VirtIOMEMCcw;
+
+/*
+ * virtio-mem-ccw: This extends VirtIOMDCcw
+ */
+#define TYPE_VIRTIO_MEM_CCW "virtio-mem-ccw"
+DECLARE_INSTANCE_CHECKER(VirtIOMEMCcw, VIRTIO_MEM_CCW, TYPE_VIRTIO_MEM_CCW)
+
+struct VirtIOMEMCcw {
+ VirtIOMDCcw parent_obj;
+ VirtIOMEM vdev;
+ Notifier size_change_notifier;
+};
+
+#endif /* HW_S390X_VIRTIO_CCW_MEM_H */
diff --git a/hw/s390x/virtio-ccw-net.c b/hw/s390x/virtio-ccw-net.c
index 484e617..a7d4afb 100644
--- a/hw/s390x/virtio-ccw-net.c
+++ b/hw/s390x/virtio-ccw-net.c
@@ -46,15 +46,15 @@ static void virtio_ccw_net_instance_init(Object *obj)
"bootindex");
}
-static Property virtio_ccw_net_properties[] = {
+static const Property virtio_ccw_net_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
VIRTIO_CCW_MAX_REV),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_CCW_LOADPARM("loadparm", CcwDevice, loadparm),
};
-static void virtio_ccw_net_class_init(ObjectClass *klass, void *data)
+static void virtio_ccw_net_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
diff --git a/hw/s390x/virtio-ccw-rng.c b/hw/s390x/virtio-ccw-rng.c
index a3fffb5..3263287 100644
--- a/hw/s390x/virtio-ccw-rng.c
+++ b/hw/s390x/virtio-ccw-rng.c
@@ -43,15 +43,14 @@ static void virtio_ccw_rng_instance_init(Object *obj)
TYPE_VIRTIO_RNG);
}
-static Property virtio_ccw_rng_properties[] = {
+static const Property virtio_ccw_rng_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
VIRTIO_CCW_MAX_REV),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_ccw_rng_class_init(ObjectClass *klass, void *data)
+static void virtio_ccw_rng_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
diff --git a/hw/s390x/virtio-ccw-scsi.c b/hw/s390x/virtio-ccw-scsi.c
index d003f89..06b4c6c 100644
--- a/hw/s390x/virtio-ccw-scsi.c
+++ b/hw/s390x/virtio-ccw-scsi.c
@@ -53,15 +53,14 @@ static void virtio_ccw_scsi_instance_init(Object *obj)
TYPE_VIRTIO_SCSI);
}
-static Property virtio_ccw_scsi_properties[] = {
+static const Property virtio_ccw_scsi_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
VIRTIO_CCW_MAX_REV),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_ccw_scsi_class_init(ObjectClass *klass, void *data)
+static void virtio_ccw_scsi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
diff --git a/hw/s390x/virtio-ccw-serial.c b/hw/s390x/virtio-ccw-serial.c
index 8f8d230..0dac590 100644
--- a/hw/s390x/virtio-ccw-serial.c
+++ b/hw/s390x/virtio-ccw-serial.c
@@ -53,15 +53,14 @@ static void virtio_ccw_serial_instance_init(Object *obj)
TYPE_VIRTIO_SERIAL);
}
-static Property virtio_ccw_serial_properties[] = {
+static const Property virtio_ccw_serial_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
VIRTIO_CCW_MAX_REV),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_ccw_serial_class_init(ObjectClass *klass, void *data)
+static void virtio_ccw_serial_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index b467690..d2f85b3 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -12,8 +12,8 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "exec/address-spaces.h"
-#include "sysemu/kvm.h"
+#include "system/address-spaces.h"
+#include "system/kvm.h"
#include "net/net.h"
#include "hw/virtio/virtio.h"
#include "migration/qemu-file-types.h"
@@ -32,7 +32,7 @@
#include "trace.h"
#include "hw/s390x/css-bridge.h"
#include "hw/s390x/s390-virtio-ccw.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#define NR_CLASSIC_INDICATOR_BITS 64
@@ -913,14 +913,15 @@ static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
}
}
-static void virtio_ccw_reset(DeviceState *d)
+static void virtio_ccw_reset_hold(Object *obj, ResetType type)
{
- VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(obj);
VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
virtio_ccw_reset_virtio(dev);
- if (vdc->parent_reset) {
- vdc->parent_reset(d);
+
+ if (vdc->parent_phases.hold) {
+ vdc->parent_phases.hold(obj, type);
}
}
@@ -1156,7 +1157,6 @@ static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
CcwDevice *ccw_dev = CCW_DEVICE(d);
SubchDev *sch = ccw_dev->sch;
int n = virtio_get_num_queues(vdev);
- S390FLICState *flic = s390_get_flic();
if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
dev->max_rev = 0;
@@ -1183,10 +1183,10 @@ static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
VIRTIO_QUEUE_MAX);
return;
}
- if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) {
+ if (virtio_get_num_queues(vdev) > ADAPTER_ROUTES_MAX_GSI) {
error_setg(errp, "The number of virtqueues %d "
"exceeds flic adapter route limit %d", n,
- flic->adapter_routes_max_batch);
+ ADAPTER_ROUTES_MAX_GSI);
return;
}
@@ -1228,16 +1228,18 @@ static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev,
virtio_ccw_stop_ioeventfd(_dev);
}
-static void virtio_ccw_device_class_init(ObjectClass *klass, void *data)
+static void virtio_ccw_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
CCWDeviceClass *k = CCW_DEVICE_CLASS(dc);
VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
k->unplug = virtio_ccw_busdev_unplug;
dc->realize = virtio_ccw_busdev_realize;
dc->unrealize = virtio_ccw_busdev_unrealize;
- device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset);
+ resettable_class_set_parent_phases(rc, NULL, virtio_ccw_reset_hold, NULL,
+ &vdc->parent_phases);
}
static const TypeInfo virtio_ccw_device_info = {
@@ -1260,7 +1262,7 @@ static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
qbus_init(bus, bus_size, TYPE_VIRTIO_CCW_BUS, qdev, virtio_bus_name);
}
-static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
+static void virtio_ccw_bus_class_init(ObjectClass *klass, const void *data)
{
VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
BusClass *bus_class = BUS_CLASS(klass);
diff --git a/hw/s390x/virtio-ccw.h b/hw/s390x/virtio-ccw.h
index fac186c..c7a830a 100644
--- a/hw/s390x/virtio-ccw.h
+++ b/hw/s390x/virtio-ccw.h
@@ -57,7 +57,7 @@ struct VirtIOCCWDeviceClass {
CCWDeviceClass parent_class;
void (*realize)(VirtioCcwDevice *dev, Error **errp);
void (*unrealize)(VirtioCcwDevice *dev);
- void (*parent_reset)(DeviceState *dev);
+ ResettablePhases parent_phases;
};
/* Performance improves when virtqueue kick processing is decoupled from the
diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c
index 42d9d2e..12c86eb 100644
--- a/hw/scsi/esp-pci.c
+++ b/hw/scsi/esp-pci.c
@@ -427,7 +427,7 @@ static void esp_pci_init(Object *obj)
object_initialize_child(obj, "esp", &pci->esp, TYPE_ESP);
}
-static void esp_pci_class_init(ObjectClass *klass, void *data)
+static void esp_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -440,7 +440,7 @@ static void esp_pci_class_init(ObjectClass *klass, void *data)
k->class_id = PCI_CLASS_STORAGE_SCSI;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
dc->desc = "AMD Am53c974 PCscsi-PCI SCSI adapter";
- dc->reset = esp_pci_hard_reset;
+ device_class_set_legacy_reset(dc, esp_pci_hard_reset);
dc->vmsd = &vmstate_esp_pci_scsi;
}
@@ -450,7 +450,7 @@ static const TypeInfo esp_pci_info = {
.instance_init = esp_pci_init,
.instance_size = sizeof(PCIESPState),
.class_init = esp_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -557,7 +557,7 @@ static void dc390_scsi_realize(PCIDevice *dev, Error **errp)
contents[EE_CHKSUM2] = chksum >> 8;
}
-static void dc390_class_init(ObjectClass *klass, void *data)
+static void dc390_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
index 8504dd3..f24991f 100644
--- a/hw/scsi/esp.c
+++ b/hw/scsi/esp.c
@@ -197,39 +197,9 @@ static uint8_t esp_fifo_pop(ESPState *s)
return val;
}
-static uint32_t esp_fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
-{
- const uint8_t *buf;
- uint32_t n, n2;
- int len;
-
- if (maxlen == 0) {
- return 0;
- }
-
- len = maxlen;
- buf = fifo8_pop_buf(fifo, len, &n);
- if (dest) {
- memcpy(dest, buf, n);
- }
-
- /* Add FIFO wraparound if needed */
- len -= n;
- len = MIN(len, fifo8_num_used(fifo));
- if (len) {
- buf = fifo8_pop_buf(fifo, len, &n2);
- if (dest) {
- memcpy(&dest[n], buf, n2);
- }
- n += n2;
- }
-
- return n;
-}
-
static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen)
{
- uint32_t len = esp_fifo8_pop_buf(&s->fifo, dest, maxlen);
+ uint32_t len = fifo8_pop_buf(&s->fifo, dest, maxlen);
esp_update_drq(s);
return len;
@@ -272,10 +242,7 @@ static uint32_t esp_get_stc(ESPState *s)
static uint8_t esp_pdma_read(ESPState *s)
{
- uint8_t val;
-
- val = esp_fifo_pop(s);
- return val;
+ return esp_fifo_pop(s);
}
static void esp_pdma_write(ESPState *s, uint8_t val)
@@ -335,7 +302,7 @@ static void do_command_phase(ESPState *s)
if (!cmdlen || !s->current_dev) {
return;
}
- esp_fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
+ fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
if (!current_lun) {
@@ -381,7 +348,7 @@ static void do_message_phase(ESPState *s)
/* Ignore extended messages for now */
if (s->cmdfifo_cdb_offset) {
int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
- esp_fifo8_pop_buf(&s->cmdfifo, NULL, len);
+ fifo8_drop(&s->cmdfifo, len);
s->cmdfifo_cdb_offset = 0;
}
}
@@ -486,7 +453,7 @@ static bool esp_cdb_ready(ESPState *s)
return false;
}
- pbuf = fifo8_peek_buf(&s->cmdfifo, len, &n);
+ pbuf = fifo8_peek_bufptr(&s->cmdfifo, len, &n);
if (n < len) {
/*
* In normal use the cmdfifo should never wrap, but include this check
@@ -1601,12 +1568,12 @@ static const VMStateDescription vmstate_sysbus_esp_scsi = {
}
};
-static void sysbus_esp_class_init(ObjectClass *klass, void *data)
+static void sysbus_esp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = sysbus_esp_realize;
- dc->reset = sysbus_esp_hard_reset;
+ device_class_set_legacy_reset(dc, sysbus_esp_hard_reset);
dc->vmsd = &vmstate_sysbus_esp_scsi;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
@@ -1627,7 +1594,7 @@ static void esp_init(Object *obj)
fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
}
-static void esp_class_init(ObjectClass *klass, void *data)
+static void esp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c
index f1935e5..9ea4aa0 100644
--- a/hw/scsi/lsi53c895a.c
+++ b/hw/scsi/lsi53c895a.c
@@ -19,7 +19,7 @@
#include "hw/pci/pci_device.h"
#include "hw/scsi/scsi.h"
#include "migration/vmstate.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "trace.h"
@@ -1112,7 +1112,7 @@ bad:
static void lsi_memcpy(LSIState *s, uint32_t dest, uint32_t src, int count)
{
int n;
- uint8_t buf[LSI_BUF_SIZE];
+ QEMU_UNINITIALIZED uint8_t buf[LSI_BUF_SIZE];
trace_lsi_memcpy(dest, src, count);
while (count) {
@@ -2372,10 +2372,10 @@ static void lsi_scsi_exit(PCIDevice *dev)
LSIState *s = LSI53C895A(dev);
address_space_destroy(&s->pci_io_as);
- timer_del(s->scripts_timer);
+ timer_free(s->scripts_timer);
}
-static void lsi_class_init(ObjectClass *klass, void *data)
+static void lsi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -2386,7 +2386,7 @@ static void lsi_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_LSI_53C895A;
k->class_id = PCI_CLASS_STORAGE_SCSI;
k->subsystem_id = 0x1000;
- dc->reset = lsi_scsi_reset;
+ device_class_set_legacy_reset(dc, lsi_scsi_reset);
dc->vmsd = &vmstate_lsi_scsi;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
@@ -2396,13 +2396,13 @@ static const TypeInfo lsi_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(LSIState),
.class_init = lsi_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-static void lsi53c810_class_init(ObjectClass *klass, void *data)
+static void lsi53c810_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
index 2d0c607..844643d 100644
--- a/hw/scsi/megasas.c
+++ b/hw/scsi/megasas.c
@@ -21,9 +21,9 @@
#include "qemu/osdep.h"
#include "hw/pci/pci.h"
#include "hw/qdev-properties.h"
-#include "sysemu/dma.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/rtc.h"
+#include "system/dma.h"
+#include "system/block-backend.h"
+#include "system/rtc.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "qemu/iov.h"
@@ -981,13 +981,11 @@ static int megasas_event_wait(MegasasState *s, MegasasCmd *cmd)
static int megasas_dcmd_pd_get_list(MegasasState *s, MegasasCmd *cmd)
{
- struct mfi_pd_list info;
- size_t dcmd_size = sizeof(info);
+ struct mfi_pd_list info = {};
BusChild *kid;
uint32_t offset, dcmd_limit, num_pd_disks = 0, max_pd_disks;
dma_addr_t residual;
- memset(&info, 0, dcmd_size);
offset = 8;
dcmd_limit = offset + sizeof(struct mfi_pd_address);
if (cmd->iov_size < dcmd_limit) {
@@ -1429,11 +1427,10 @@ static int megasas_dcmd_cfg_read(MegasasState *s, MegasasCmd *cmd)
static int megasas_dcmd_get_properties(MegasasState *s, MegasasCmd *cmd)
{
- struct mfi_ctrl_props info;
+ struct mfi_ctrl_props info = {};
size_t dcmd_size = sizeof(info);
dma_addr_t residual;
- memset(&info, 0x0, dcmd_size);
if (cmd->iov_size < dcmd_size) {
trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size,
dcmd_size);
@@ -1781,7 +1778,7 @@ static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd, int frame_cmd)
uint8_t cdb[16];
int len;
struct SCSIDevice *sdev = NULL;
- int target_id, lun_id, cdb_len;
+ int target_id, lun_id;
lba_count = le32_to_cpu(cmd->frame->io.header.data_len);
lba_start_lo = le32_to_cpu(cmd->frame->io.lba_lo);
@@ -1790,7 +1787,6 @@ static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd, int frame_cmd)
target_id = cmd->frame->header.target_id;
lun_id = cmd->frame->header.lun_id;
- cdb_len = cmd->frame->header.cdb_len;
if (target_id < MFI_MAX_LD && lun_id == 0) {
sdev = scsi_device_find(&s->bus, 0, target_id, lun_id);
@@ -1805,15 +1801,6 @@ static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd, int frame_cmd)
return MFI_STAT_DEVICE_NOT_FOUND;
}
- if (cdb_len > 16) {
- trace_megasas_scsi_invalid_cdb_len(
- mfi_frame_desc(frame_cmd), 1, target_id, lun_id, cdb_len);
- megasas_write_sense(cmd, SENSE_CODE(INVALID_OPCODE));
- cmd->frame->header.scsi_status = CHECK_CONDITION;
- s->event_count++;
- return MFI_STAT_SCSI_DONE_WITH_ERROR;
- }
-
cmd->iov_size = lba_count * sdev->blocksize;
if (megasas_map_sgl(s, cmd, &cmd->frame->io.sgl)) {
megasas_write_sense(cmd, SENSE_CODE(TARGET_FAILURE));
@@ -1824,7 +1811,7 @@ static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd, int frame_cmd)
megasas_encode_lba(cdb, lba_start, lba_count, is_write);
cmd->req = scsi_req_new(sdev, cmd->index,
- lun_id, cdb, cdb_len, cmd);
+ lun_id, cdb, sizeof(cdb), cmd);
if (!cmd->req) {
trace_megasas_scsi_req_alloc_failed(
mfi_frame_desc(frame_cmd), target_id, lun_id);
@@ -2236,7 +2223,6 @@ static uint64_t megasas_queue_read(void *opaque, hwaddr addr,
static void megasas_queue_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
- return;
}
static const MemoryRegionOps megasas_queue_ops = {
@@ -2458,7 +2444,7 @@ static void megasas_scsi_realize(PCIDevice *dev, Error **errp)
scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &megasas_scsi_info);
}
-static Property megasas_properties_gen1[] = {
+static const Property megasas_properties_gen1[] = {
DEFINE_PROP_UINT32("max_sge", MegasasState, fw_sge,
MEGASAS_DEFAULT_SGE),
DEFINE_PROP_UINT32("max_cmds", MegasasState, fw_cmds,
@@ -2469,10 +2455,9 @@ static Property megasas_properties_gen1[] = {
DEFINE_PROP_ON_OFF_AUTO("msix", MegasasState, msix, ON_OFF_AUTO_AUTO),
DEFINE_PROP_BIT("use_jbod", MegasasState, flags,
MEGASAS_FLAG_USE_JBOD, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static Property megasas_properties_gen2[] = {
+static const Property megasas_properties_gen2[] = {
DEFINE_PROP_UINT32("max_sge", MegasasState, fw_sge,
MEGASAS_DEFAULT_SGE),
DEFINE_PROP_UINT32("max_cmds", MegasasState, fw_cmds,
@@ -2483,7 +2468,6 @@ static Property megasas_properties_gen2[] = {
DEFINE_PROP_ON_OFF_AUTO("msix", MegasasState, msix, ON_OFF_AUTO_AUTO),
DEFINE_PROP_BIT("use_jbod", MegasasState, flags,
MEGASAS_FLAG_USE_JBOD, false),
- DEFINE_PROP_END_OF_LIST(),
};
typedef struct MegasasInfo {
@@ -2497,8 +2481,9 @@ typedef struct MegasasInfo {
int mmio_bar;
int osts;
const VMStateDescription *vmsd;
- Property *props;
- InterfaceInfo *interfaces;
+ const Property *props;
+ size_t props_count;
+ const InterfaceInfo *interfaces;
} MegasasInfo;
static struct MegasasInfo megasas_devices[] = {
@@ -2514,7 +2499,8 @@ static struct MegasasInfo megasas_devices[] = {
.osts = MFI_1078_RM | 1,
.vmsd = &vmstate_megasas_gen1,
.props = megasas_properties_gen1,
- .interfaces = (InterfaceInfo[]) {
+ .props_count = ARRAY_SIZE(megasas_properties_gen1),
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -2530,14 +2516,15 @@ static struct MegasasInfo megasas_devices[] = {
.osts = MFI_GEN2_RM,
.vmsd = &vmstate_megasas_gen2,
.props = megasas_properties_gen2,
- .interfaces = (InterfaceInfo[]) {
+ .props_count = ARRAY_SIZE(megasas_properties_gen2),
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ }
},
}
};
-static void megasas_class_init(ObjectClass *oc, void *data)
+static void megasas_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
@@ -2556,8 +2543,8 @@ static void megasas_class_init(ObjectClass *oc, void *data)
e->osts = info->osts;
e->product_name = info->product_name;
e->product_version = info->product_version;
- device_class_set_props(dc, info->props);
- dc->reset = megasas_scsi_reset;
+ device_class_set_props_n(dc, info->props, info->props_count);
+ device_class_set_legacy_reset(dc, megasas_scsi_reset);
dc->vmsd = info->vmsd;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
dc->desc = info->desc;
@@ -2582,11 +2569,11 @@ static void megasas_register_types(void)
type_info.name = info->name;
type_info.parent = TYPE_MEGASAS_BASE;
- type_info.class_data = (void *)info;
+ type_info.class_data = info;
type_info.class_init = megasas_class_init;
type_info.interfaces = info->interfaces;
- type_register(&type_info);
+ type_register_static(&type_info);
}
}
diff --git a/hw/scsi/mptendian.c b/hw/scsi/mptendian.c
index 0d5abb4..6cba92f 100644
--- a/hw/scsi/mptendian.c
+++ b/hw/scsi/mptendian.c
@@ -22,7 +22,7 @@
#include "qemu/osdep.h"
#include "hw/pci/pci.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/pci/msi.h"
#include "qemu/iov.h"
#include "hw/scsi/scsi.h"
diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c
index c5d3138..1ebe0b8 100644
--- a/hw/scsi/mptsas.c
+++ b/hw/scsi/mptsas.c
@@ -25,7 +25,7 @@
#include "qemu/osdep.h"
#include "hw/pci/pci.h"
#include "hw/qdev-properties.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/pci/msi.h"
#include "qemu/iov.h"
#include "qemu/main-loop.h"
@@ -1410,14 +1410,13 @@ static const VMStateDescription vmstate_mptsas = {
}
};
-static Property mptsas_properties[] = {
+static const Property mptsas_properties[] = {
DEFINE_PROP_UINT64("sas_address", MPTSASState, sas_addr, 0),
/* TODO: test MSI support under Windows */
DEFINE_PROP_ON_OFF_AUTO("msi", MPTSASState, msi, ON_OFF_AUTO_AUTO),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mptsas1068_class_init(ObjectClass *oc, void *data)
+static void mptsas1068_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
@@ -1431,7 +1430,7 @@ static void mptsas1068_class_init(ObjectClass *oc, void *data)
pc->subsystem_id = 0x8000;
pc->class_id = PCI_CLASS_STORAGE_SCSI;
device_class_set_props(dc, mptsas_properties);
- dc->reset = mptsas_reset;
+ device_class_set_legacy_reset(dc, mptsas_reset);
dc->vmsd = &vmstate_mptsas;
dc->desc = "LSI SAS 1068";
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
@@ -1442,7 +1441,7 @@ static const TypeInfo mptsas_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(MPTSASState),
.class_init = mptsas1068_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -1450,7 +1449,7 @@ static const TypeInfo mptsas_info = {
static void mptsas_register_types(void)
{
- type_register(&mptsas_info);
+ type_register_static(&mptsas_info);
}
type_init(mptsas_register_types)
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index 53eff5d..9b12ee7 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -9,12 +9,12 @@
#include "migration/qemu-file-types.h"
#include "migration/vmstate.h"
#include "scsi/constants.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/runstate.h"
+#include "system/block-backend.h"
+#include "system/blockdev.h"
+#include "system/system.h"
+#include "system/runstate.h"
#include "trace.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qemu/cutils.h"
static char *scsibus_get_dev_path(DeviceState *dev);
@@ -100,8 +100,15 @@ static void scsi_device_for_each_req_sync(SCSIDevice *s,
assert(!runstate_is_running());
assert(qemu_in_main_thread());
- QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) {
- fn(req, opaque);
+ /*
+ * Locking is not necessary because the guest is stopped and no other
+ * threads can be accessing the requests list, but take the lock for
+ * consistency.
+ */
+ WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
+ QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) {
+ fn(req, opaque);
+ }
}
}
@@ -115,21 +122,29 @@ static void scsi_device_for_each_req_async_bh(void *opaque)
{
g_autofree SCSIDeviceForEachReqAsyncData *data = opaque;
SCSIDevice *s = data->s;
- AioContext *ctx;
- SCSIRequest *req;
- SCSIRequest *next;
+ g_autoptr(GList) reqs = NULL;
/*
- * The BB cannot have changed contexts between this BH being scheduled and
- * now: BBs' AioContexts, when they have a node attached, can only be
- * changed via bdrv_try_change_aio_context(), in a drained section. While
- * we have the in-flight counter incremented, that drain must block.
+ * Build a list of requests in this AioContext so fn() can be invoked later
+ * outside requests_lock.
*/
- ctx = blk_get_aio_context(s->conf.blk);
- assert(ctx == qemu_get_current_aio_context());
+ WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
+ AioContext *ctx = qemu_get_current_aio_context();
+ SCSIRequest *req;
+ SCSIRequest *next;
+
+ QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
+ if (req->ctx == ctx) {
+ scsi_req_ref(req); /* dropped after calling fn() */
+ reqs = g_list_prepend(reqs, req);
+ }
+ }
+ }
- QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
- data->fn(req, data->fn_opaque);
+ /* Call fn() on each request */
+ for (GList *elem = g_list_first(reqs); elem; elem = g_list_next(elem)) {
+ data->fn(elem->data, data->fn_opaque);
+ scsi_req_unref(elem->data);
}
/* Drop the reference taken by scsi_device_for_each_req_async() */
@@ -139,9 +154,35 @@ static void scsi_device_for_each_req_async_bh(void *opaque)
blk_dec_in_flight(s->conf.blk);
}
+static void scsi_device_for_each_req_async_do_ctx(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ AioContext *ctx = key;
+ SCSIDeviceForEachReqAsyncData *params = user_data;
+ SCSIDeviceForEachReqAsyncData *data;
+
+ data = g_new(SCSIDeviceForEachReqAsyncData, 1);
+ data->s = params->s;
+ data->fn = params->fn;
+ data->fn_opaque = params->fn_opaque;
+
+ /*
+ * Hold a reference to the SCSIDevice until
+ * scsi_device_for_each_req_async_bh() finishes.
+ */
+ object_ref(OBJECT(data->s));
+
+ /* Paired with scsi_device_for_each_req_async_bh() */
+ blk_inc_in_flight(data->s->conf.blk);
+
+ aio_bh_schedule_oneshot(ctx, scsi_device_for_each_req_async_bh, data);
+}
+
/*
* Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
- * runs in the AioContext that is executing the request.
+ * must be thread-safe because it runs concurrently in each AioContext that is
+ * executing a request.
+ *
* Keeps the BlockBackend's in-flight counter incremented until everything is
* done, so draining it will settle all scheduled @fn() calls.
*/
@@ -151,24 +192,26 @@ static void scsi_device_for_each_req_async(SCSIDevice *s,
{
assert(qemu_in_main_thread());
- SCSIDeviceForEachReqAsyncData *data =
- g_new(SCSIDeviceForEachReqAsyncData, 1);
-
- data->s = s;
- data->fn = fn;
- data->fn_opaque = opaque;
-
- /*
- * Hold a reference to the SCSIDevice until
- * scsi_device_for_each_req_async_bh() finishes.
- */
- object_ref(OBJECT(s));
+ /* The set of AioContexts where the requests are being processed */
+ g_autoptr(GHashTable) aio_contexts = g_hash_table_new(NULL, NULL);
+ WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
+ SCSIRequest *req;
+ QTAILQ_FOREACH(req, &s->requests, next) {
+ g_hash_table_add(aio_contexts, req->ctx);
+ }
+ }
- /* Paired with blk_dec_in_flight() in scsi_device_for_each_req_async_bh() */
- blk_inc_in_flight(s->conf.blk);
- aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk),
- scsi_device_for_each_req_async_bh,
- data);
+ /* Schedule a BH for each AioContext */
+ SCSIDeviceForEachReqAsyncData params = {
+ .s = s,
+ .fn = fn,
+ .fn_opaque = opaque,
+ };
+ g_hash_table_foreach(
+ aio_contexts,
+ scsi_device_for_each_req_async_do_ctx,
+ &params
+ );
}
static void scsi_device_realize(SCSIDevice *s, Error **errp)
@@ -349,6 +392,7 @@ static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
dev->lun = lun;
}
+ qemu_mutex_init(&dev->requests_lock);
QTAILQ_INIT(&dev->requests);
scsi_device_realize(dev, &local_err);
if (local_err) {
@@ -356,7 +400,7 @@ static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
return;
}
dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev),
- scsi_dma_restart_cb, dev);
+ scsi_dma_restart_cb, NULL, dev);
}
static void scsi_qdev_unrealize(DeviceState *qdev)
@@ -369,6 +413,8 @@ static void scsi_qdev_unrealize(DeviceState *qdev)
scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE));
+ qemu_mutex_destroy(&dev->requests_lock);
+
scsi_device_unrealize(dev);
blockdev_mark_auto_del(dev->conf.blk);
@@ -868,6 +914,7 @@ invalid_opcode:
}
}
+ req->ctx = qemu_get_current_aio_context();
req->cmd = cmd;
req->residual = req->cmd.xfer;
@@ -964,7 +1011,10 @@ static void scsi_req_enqueue_internal(SCSIRequest *req)
req->sg = NULL;
}
req->enqueued = true;
- QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
+
+ WITH_QEMU_LOCK_GUARD(&req->dev->requests_lock) {
+ QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
+ }
}
int32_t scsi_req_enqueue(SCSIRequest *req)
@@ -984,7 +1034,9 @@ static void scsi_req_dequeue(SCSIRequest *req)
trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag);
req->retry = false;
if (req->enqueued) {
- QTAILQ_REMOVE(&req->dev->requests, req, next);
+ WITH_QEMU_LOCK_GUARD(&req->dev->requests_lock) {
+ QTAILQ_REMOVE(&req->dev->requests, req, next);
+ }
req->enqueued = false;
scsi_req_unref(req);
}
@@ -1943,14 +1995,13 @@ const VMStateDescription vmstate_scsi_device = {
}
};
-static Property scsi_props[] = {
+static const Property scsi_props[] = {
DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0),
DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1),
DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void scsi_device_class_init(ObjectClass *klass, void *data)
+static void scsi_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
@@ -1962,8 +2013,7 @@ static void scsi_device_class_init(ObjectClass *klass, void *data)
static void scsi_dev_instance_init(Object *obj)
{
- DeviceState *dev = DEVICE(obj);
- SCSIDevice *s = SCSI_DEVICE(dev);
+ SCSIDevice *s = SCSI_DEVICE(obj);
device_add_bootindex_property(obj, &s->conf.bootindex,
"bootindex", NULL,
@@ -1980,7 +2030,7 @@ static const TypeInfo scsi_device_type_info = {
.instance_init = scsi_dev_instance_init,
};
-static void scsi_bus_class_init(ObjectClass *klass, void *data)
+static void scsi_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
@@ -1996,7 +2046,7 @@ static const TypeInfo scsi_bus_info = {
.parent = TYPE_BUS,
.instance_size = sizeof(SCSIBus),
.class_init = scsi_bus_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index a67092d..b4782c6 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -32,13 +32,14 @@
#include "migration/vmstate.h"
#include "hw/scsi/emulation.h"
#include "scsi/constants.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
+#include "system/arch_init.h"
+#include "system/block-backend.h"
+#include "system/blockdev.h"
#include "hw/block/block.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
-#include "sysemu/dma.h"
-#include "sysemu/sysemu.h"
+#include "system/dma.h"
+#include "system/system.h"
#include "qemu/cutils.h"
#include "trace.h"
#include "qom/object.h"
@@ -65,9 +66,15 @@ OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE)
struct SCSIDiskClass {
SCSIDeviceClass parent_class;
+ /*
+ * Callbacks receive ret == 0 for success. Errors are represented either as
+ * negative errno values, or as positive SAM status codes. For host_status
+ * errors, the function passes ret == -ENODEV and sets the host_status field
+ * of the SCSIRequest.
+ */
DMAIOFunc *dma_readv;
DMAIOFunc *dma_writev;
- bool (*need_fua_emulation)(SCSICommand *cmd);
+ bool (*need_fua)(SCSICommand *cmd);
void (*update_sense)(SCSIRequest *r);
};
@@ -78,7 +85,7 @@ typedef struct SCSIDiskReq {
uint32_t sector_count;
uint32_t buflen;
bool started;
- bool need_fua_emulation;
+ bool need_fua;
struct iovec iov;
QEMUIOVector qiov;
BlockAcctCookie acct;
@@ -98,12 +105,12 @@ struct SCSIDiskState {
uint64_t max_unmap_size;
uint64_t max_io_size;
uint32_t quirks;
- QEMUBH *bh;
char *version;
char *serial;
char *vendor;
char *product;
char *device_id;
+ char *loadparm; /* only for s390x */
bool tray_open;
bool tray_locked;
/*
@@ -217,22 +224,61 @@ static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed)
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
SCSISense sense = SENSE_CODE(NO_SENSE);
- int error = 0;
+ int16_t host_status;
+ int error;
bool req_has_sense = false;
BlockErrorAction action;
int status;
+ /*
+ * host_status should only be set for SG_IO requests that came back with a
+ * host_status error in scsi_block_sgio_complete(). This error path passes
+ * -ENODEV as the return value.
+ *
+ * Reset host_status in the request because we may still want to complete
+ * the request successfully with the 'stop' or 'ignore' error policy.
+ */
+ host_status = r->req.host_status;
+ if (host_status != -1) {
+ assert(ret == -ENODEV);
+ r->req.host_status = -1;
+ }
+
if (ret < 0) {
status = scsi_sense_from_errno(-ret, &sense);
error = -ret;
} else {
/* A passthrough command has completed with nonzero status. */
status = ret;
- if (status == CHECK_CONDITION) {
+ switch (status) {
+ case CHECK_CONDITION:
req_has_sense = true;
error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
- } else {
+ break;
+ case RESERVATION_CONFLICT:
+ /*
+ * Don't apply the error policy, always report to the guest.
+ *
+ * This is a passthrough code path, so it's not a backend error, but
+ * a response to an invalid guest request.
+ *
+ * Windows Failover Cluster validation intentionally sends invalid
+ * requests to verify that reservations work as intended. It is
+ * crucial that it sees the resulting errors.
+ *
+ * Treating a reservation conflict as a guest-side error is obvious
+ * when a pr-manager is in use. Without one, the situation is less
+ * clear, but there might be nothing that can be fixed on the host
+ * (like in the above example), and we don't want to be stuck in a
+ * loop where resuming the VM and retrying the request immediately
+ * stops it again. So always reporting is still the safer option in
+ * this case, too.
+ */
+ error = 0;
+ break;
+ default:
error = EINVAL;
+ break;
}
}
@@ -242,8 +288,9 @@ static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed)
* are usually retried immediately, so do not post them to QMP and
* do not account them as failed I/O.
*/
- if (req_has_sense &&
- scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) {
+ if (!error || (req_has_sense &&
+ scsi_sense_buf_is_guest_recoverable(r->req.sense,
+ sizeof(r->req.sense)))) {
action = BLOCK_ERROR_ACTION_REPORT;
acct_failed = false;
} else {
@@ -256,6 +303,10 @@ static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed)
if (acct_failed) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
+ if (host_status != -1) {
+ scsi_req_complete_failed(&r->req, host_status);
+ return true;
+ }
if (req_has_sense) {
sdc->update_sense(&r->req);
} else if (status == CHECK_CONDITION) {
@@ -283,7 +334,7 @@ static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
return true;
}
- if (ret < 0) {
+ if (ret != 0) {
return scsi_handle_rw_error(r, ret, acct_failed);
}
@@ -295,9 +346,8 @@ static void scsi_aio_complete(void *opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- /* The request must only run in the BlockBackend's AioContext */
- assert(blk_get_aio_context(s->qdev.conf.blk) ==
- qemu_get_current_aio_context());
+ /* The request must run in its AioContext */
+ assert(r->req.ctx == qemu_get_current_aio_context());
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
@@ -339,39 +389,16 @@ static bool scsi_is_cmd_fua(SCSICommand *cmd)
}
}
-static void scsi_write_do_fua(SCSIDiskReq *r)
-{
- SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
-
- assert(r->req.aiocb == NULL);
- assert(!r->req.io_canceled);
-
- if (r->need_fua_emulation) {
- block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
- BLOCK_ACCT_FLUSH);
- r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
- return;
- }
-
- scsi_req_complete(&r->req, GOOD);
- scsi_req_unref(&r->req);
-}
-
static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
{
assert(r->req.aiocb == NULL);
- if (scsi_disk_req_check_error(r, ret, false)) {
+ if (scsi_disk_req_check_error(r, ret, ret > 0)) {
goto done;
}
r->sector += r->sector_count;
r->sector_count = 0;
- if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
- scsi_write_do_fua(r);
- return;
- } else {
- scsi_req_complete(&r->req, GOOD);
- }
+ scsi_req_complete(&r->req, GOOD);
done:
scsi_req_unref(&r->req);
@@ -385,9 +412,10 @@ static void scsi_dma_complete(void *opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ /* ret > 0 is accounted for in scsi_disk_req_check_error() */
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
- } else {
+ } else if (ret == 0) {
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
scsi_dma_complete_noio(r, ret);
@@ -395,15 +423,13 @@ static void scsi_dma_complete(void *opaque, int ret)
static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
{
- SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
uint32_t n;
- /* The request must only run in the BlockBackend's AioContext */
- assert(blk_get_aio_context(s->qdev.conf.blk) ==
- qemu_get_current_aio_context());
+ /* The request must run in its AioContext */
+ assert(r->req.ctx == qemu_get_current_aio_context());
assert(r->req.aiocb == NULL);
- if (scsi_disk_req_check_error(r, ret, false)) {
+ if (scsi_disk_req_check_error(r, ret, ret > 0)) {
goto done;
}
@@ -424,9 +450,10 @@ static void scsi_read_complete(void *opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ /* ret > 0 is accounted for in scsi_disk_req_check_error() */
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
- } else {
+ } else if (ret == 0) {
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
}
@@ -450,8 +477,7 @@ static void scsi_do_read(SCSIDiskReq *r, int ret)
if (r->req.sg) {
dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
r->req.residual -= r->req.sg->size;
- r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
- r->req.sg, r->sector << BDRV_SECTOR_BITS,
+ r->req.aiocb = dma_blk_io(r->req.sg, r->sector << BDRV_SECTOR_BITS,
BDRV_SECTOR_SIZE,
sdc->dma_readv, r, scsi_dma_complete, r,
DMA_DIRECTION_FROM_DEVICE);
@@ -515,7 +541,7 @@ static void scsi_read_data(SCSIRequest *req)
first = !r->started;
r->started = true;
- if (first && r->need_fua_emulation) {
+ if (first && r->need_fua) {
block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
BLOCK_ACCT_FLUSH);
r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
@@ -526,15 +552,13 @@ static void scsi_read_data(SCSIRequest *req)
static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
{
- SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
uint32_t n;
- /* The request must only run in the BlockBackend's AioContext */
- assert(blk_get_aio_context(s->qdev.conf.blk) ==
- qemu_get_current_aio_context());
+ /* The request must run in its AioContext */
+ assert(r->req.ctx == qemu_get_current_aio_context());
assert (r->req.aiocb == NULL);
- if (scsi_disk_req_check_error(r, ret, false)) {
+ if (scsi_disk_req_check_error(r, ret, ret > 0)) {
goto done;
}
@@ -542,8 +566,7 @@ static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
r->sector += n;
r->sector_count -= n;
if (r->sector_count == 0) {
- scsi_write_do_fua(r);
- return;
+ scsi_req_complete(&r->req, GOOD);
} else {
scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size);
@@ -562,9 +585,10 @@ static void scsi_write_complete(void * opaque, int ret)
assert (r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ /* ret > 0 is accounted for in scsi_disk_req_check_error() */
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
- } else {
+ } else if (ret == 0) {
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
scsi_write_complete_noio(r, ret);
@@ -575,6 +599,7 @@ static void scsi_write_data(SCSIRequest *req)
SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
+ BlockCompletionFunc *cb;
/* No data transfer may already be in progress */
assert(r->req.aiocb == NULL);
@@ -600,19 +625,17 @@ static void scsi_write_data(SCSIRequest *req)
if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
r->req.cmd.buf[0] == VERIFY_16) {
- if (r->req.sg) {
- scsi_dma_complete_noio(r, 0);
- } else {
- scsi_write_complete_noio(r, 0);
- }
+ block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
+ BLOCK_ACCT_FLUSH);
+ cb = r->req.sg ? scsi_dma_complete : scsi_write_complete;
+ r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, cb, r);
return;
}
if (r->req.sg) {
dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
r->req.residual -= r->req.sg->size;
- r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
- r->req.sg, r->sector << BDRV_SECTOR_BITS,
+ r->req.aiocb = dma_blk_io(r->req.sg, r->sector << BDRV_SECTOR_BITS,
BDRV_SECTOR_SIZE,
sdc->dma_writev, r, scsi_dma_complete, r,
DMA_DIRECTION_TO_DEVICE);
@@ -2344,7 +2367,7 @@ static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
return 0;
}
- r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
+ r->need_fua = sdc->need_fua(&r->req.cmd);
if (r->sector_count == 0) {
scsi_req_complete(&r->req, GOOD);
}
@@ -2815,26 +2838,13 @@ static void scsi_block_sgio_complete(void *opaque, int ret)
if (ret == 0) {
if (io_hdr->host_status != SCSI_HOST_OK) {
- scsi_req_complete_failed(&r->req, io_hdr->host_status);
- scsi_req_unref(&r->req);
- return;
- }
-
- if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
+ r->req.host_status = io_hdr->host_status;
+ ret = -ENODEV;
+ } else if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
ret = BUSY;
} else {
ret = io_hdr->status;
}
-
- if (ret > 0) {
- if (scsi_handle_rw_error(r, ret, true)) {
- scsi_req_unref(&r->req);
- return;
- }
-
- /* Ignore error. */
- ret = 0;
- }
}
req->cb(req->cb_opaque, ret);
@@ -3103,19 +3113,57 @@ BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
{
SCSIDiskReq *r = opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
+ int flags = r->need_fua ? BDRV_REQ_FUA : 0;
+ return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, flags, cb, cb_opaque);
}
-static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
+static char *scsi_property_get_loadparm(Object *obj, Error **errp)
+{
+ return g_strdup(SCSI_DISK_BASE(obj)->loadparm);
+}
+
+static void scsi_property_set_loadparm(Object *obj, const char *value,
+ Error **errp)
+{
+ void *lp_str;
+
+ if (object_property_get_int(obj, "bootindex", NULL) < 0) {
+ error_setg(errp, "'loadparm' is only valid for boot devices");
+ return;
+ }
+
+ lp_str = g_malloc0(strlen(value) + 1);
+ if (!qdev_prop_sanitize_s390x_loadparm(lp_str, value, errp)) {
+ g_free(lp_str);
+ return;
+ }
+ SCSI_DISK_BASE(obj)->loadparm = lp_str;
+}
+
+static void scsi_property_add_specifics(DeviceClass *dc)
+{
+ ObjectClass *oc = OBJECT_CLASS(dc);
+
+ /* The loadparm property is only supported on s390x */
+ if (qemu_arch_available(QEMU_ARCH_S390X)) {
+ object_class_property_add_str(oc, "loadparm",
+ scsi_property_get_loadparm,
+ scsi_property_set_loadparm);
+ object_class_property_set_description(oc, "loadparm",
+ "load parameter (s390x only)");
+ }
+}
+
+static void scsi_disk_base_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
dc->fw_name = "disk";
- dc->reset = scsi_disk_reset;
+ device_class_set_legacy_reset(dc, scsi_disk_reset);
sdc->dma_readv = scsi_dma_readv;
sdc->dma_writev = scsi_dma_writev;
- sdc->need_fua_emulation = scsi_is_cmd_fua;
+ sdc->need_fua = scsi_is_cmd_fua;
}
static const TypeInfo scsi_disk_base_info = {
@@ -3139,12 +3187,12 @@ static const TypeInfo scsi_disk_base_info = {
DEFINE_PROP_BOOL("migrate-emulated-scsi-request", SCSIDiskState, migrate_emulated_scsi_request, true)
-static Property scsi_hd_properties[] = {
+static const Property scsi_hd_properties[] = {
DEFINE_SCSI_DISK_PROPERTIES(),
DEFINE_PROP_BIT("removable", SCSIDiskState, features,
SCSI_DISK_F_REMOVABLE, false),
DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
- SCSI_DISK_F_DPOFUA, false),
+ SCSI_DISK_F_DPOFUA, true),
DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
@@ -3159,7 +3207,6 @@ static Property scsi_hd_properties[] = {
quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE,
0),
DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_scsi_disk_state = {
@@ -3177,7 +3224,7 @@ static const VMStateDescription vmstate_scsi_disk_state = {
}
};
-static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
+static void scsi_hd_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
@@ -3189,6 +3236,8 @@ static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
dc->desc = "virtual SCSI disk";
device_class_set_props(dc, scsi_hd_properties);
dc->vmsd = &vmstate_scsi_disk_state;
+
+ scsi_property_add_specifics(dc);
}
static const TypeInfo scsi_hd_info = {
@@ -3197,7 +3246,7 @@ static const TypeInfo scsi_hd_info = {
.class_init = scsi_hd_class_initfn,
};
-static Property scsi_cd_properties[] = {
+static const Property scsi_cd_properties[] = {
DEFINE_SCSI_DISK_PROPERTIES(),
DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
@@ -3215,10 +3264,9 @@ static Property scsi_cd_properties[] = {
0),
DEFINE_PROP_BIT("quirk_mode_page_truncated", SCSIDiskState, quirks,
SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
+static void scsi_cd_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
@@ -3229,6 +3277,8 @@ static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
dc->desc = "virtual SCSI CD-ROM";
device_class_set_props(dc, scsi_cd_properties);
dc->vmsd = &vmstate_scsi_disk_state;
+
+ scsi_property_add_specifics(dc);
}
static const TypeInfo scsi_cd_info = {
@@ -3238,7 +3288,7 @@ static const TypeInfo scsi_cd_info = {
};
#ifdef __linux__
-static Property scsi_block_properties[] = {
+static const Property scsi_block_properties[] = {
DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf),
DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
@@ -3251,10 +3301,9 @@ static Property scsi_block_properties[] = {
-1),
DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout,
DEFAULT_IO_TIMEOUT),
- DEFINE_PROP_END_OF_LIST(),
};
-static void scsi_block_class_initfn(ObjectClass *klass, void *data)
+static void scsi_block_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
@@ -3266,7 +3315,7 @@ static void scsi_block_class_initfn(ObjectClass *klass, void *data)
sdc->dma_readv = scsi_block_dma_readv;
sdc->dma_writev = scsi_block_dma_writev;
sdc->update_sense = scsi_block_update_sense;
- sdc->need_fua_emulation = scsi_block_no_fua;
+ sdc->need_fua = scsi_block_no_fua;
dc->desc = "SCSI block device passthrough";
device_class_set_props(dc, scsi_block_properties);
dc->vmsd = &vmstate_scsi_disk_state;
diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c
index ee945f8..9e380a2 100644
--- a/hw/scsi/scsi-generic.c
+++ b/hw/scsi/scsi-generic.c
@@ -21,7 +21,7 @@
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
#include "hw/scsi/emulation.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "trace.h"
#ifdef __linux__
@@ -772,12 +772,11 @@ static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private);
}
-static Property scsi_generic_properties[] = {
+static const Property scsi_generic_properties[] = {
DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk),
DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false),
DEFINE_PROP_UINT32("io_timeout", SCSIDevice, io_timeout,
DEFAULT_IO_TIMEOUT),
- DEFINE_PROP_END_OF_LIST(),
};
static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
@@ -787,7 +786,7 @@ static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
return scsi_bus_parse_cdb(dev, cmd, buf, buf_len, hba_private);
}
-static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
+static void scsi_generic_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
@@ -797,7 +796,7 @@ static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
sc->parse_cdb = scsi_generic_parse_cdb;
dc->fw_name = "disk";
dc->desc = "pass through generic scsi device (/dev/sg*)";
- dc->reset = scsi_generic_reset;
+ device_class_set_legacy_reset(dc, scsi_generic_reset);
device_class_set_props(dc, scsi_generic_properties);
dc->vmsd = &vmstate_scsi_device;
}
diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c
index c75a6c8..20f70fb 100644
--- a/hw/scsi/spapr_vscsi.c
+++ b/hw/scsi/spapr_vscsi.c
@@ -1250,9 +1250,8 @@ static int spapr_vscsi_devnode(SpaprVioDevice *dev, void *fdt, int node_off)
return 0;
}
-static Property spapr_vscsi_properties[] = {
+static const Property spapr_vscsi_properties[] = {
DEFINE_SPAPR_PROPERTIES(VSCSIState, vdev),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_spapr_vscsi = {
@@ -1268,7 +1267,7 @@ static const VMStateDescription vmstate_spapr_vscsi = {
},
};
-static void spapr_vscsi_class_init(ObjectClass *klass, void *data)
+static void spapr_vscsi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass);
diff --git a/hw/scsi/vhost-scsi-common.c b/hw/scsi/vhost-scsi-common.c
index 4c86370..43525ba 100644
--- a/hw/scsi/vhost-scsi-common.c
+++ b/hw/scsi/vhost-scsi-common.c
@@ -101,24 +101,25 @@ err_host_notifiers:
return ret;
}
-void vhost_scsi_common_stop(VHostSCSICommon *vsc)
+int vhost_scsi_common_stop(VHostSCSICommon *vsc)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vsc);
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int ret = 0;
- vhost_dev_stop(&vsc->dev, vdev, true);
+ ret = vhost_dev_stop(&vsc->dev, vdev, true);
if (k->set_guest_notifiers) {
- ret = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false);
- if (ret < 0) {
- error_report("vhost guest notifier cleanup failed: %d", ret);
+ int r = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false);
+ if (r < 0) {
+ error_report("vhost guest notifier cleanup failed: %d", ret);
+ return r;
}
}
- assert(ret >= 0);
vhost_dev_disable_notifiers(&vsc->dev, vdev);
+ return ret;
}
uint64_t vhost_scsi_common_get_features(VirtIODevice *vdev, uint64_t features,
diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
index 3d5fe09..cdf405b 100644
--- a/hw/scsi/vhost-scsi.c
+++ b/hw/scsi/vhost-scsi.c
@@ -29,7 +29,7 @@
#include "hw/fw-path-provider.h"
#include "hw/qdev-properties.h"
#include "qemu/cutils.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
/* Features supported by host kernel. */
static const int kernel_feature_bits[] = {
@@ -38,6 +38,7 @@ static const int kernel_feature_bits[] = {
VIRTIO_RING_F_EVENT_IDX,
VIRTIO_SCSI_F_HOTPLUG,
VIRTIO_F_RING_RESET,
+ VIRTIO_F_IN_ORDER,
VIRTIO_F_NOTIFICATION_DATA,
VHOST_INVALID_FEATURE_BIT
};
@@ -113,7 +114,7 @@ static void vhost_scsi_stop(VHostSCSI *s)
vhost_scsi_common_stop(vsc);
}
-static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val)
+static int vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val)
{
VHostSCSI *s = VHOST_SCSI(vdev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
@@ -124,7 +125,7 @@ static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val)
}
if (vhost_dev_is_started(&vsc->dev) == start) {
- return;
+ return 0;
}
if (start) {
@@ -138,6 +139,7 @@ static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val)
} else {
vhost_scsi_stop(s);
}
+ return 0;
}
static void vhost_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq)
@@ -171,7 +173,7 @@ static int vhost_scsi_set_workers(VHostSCSICommon *vsc, bool per_virtqueue)
struct vhost_dev *dev = &vsc->dev;
struct vhost_vring_worker vq_worker;
struct vhost_worker_state worker;
- int i, ret;
+ int i, ret = 0;
/* Use default worker */
if (!per_virtqueue || dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {
@@ -313,7 +315,6 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
if (vhostfd >= 0) {
close(vhostfd);
}
- return;
}
static void vhost_scsi_unrealize(DeviceState *dev)
@@ -342,7 +343,7 @@ static struct vhost_dev *vhost_scsi_get_vhost(VirtIODevice *vdev)
return &vsc->dev;
}
-static Property vhost_scsi_properties[] = {
+static const Property vhost_scsi_properties[] = {
DEFINE_PROP_STRING("vhostfd", VirtIOSCSICommon, conf.vhostfd),
DEFINE_PROP_STRING("wwpn", VirtIOSCSICommon, conf.wwpn),
DEFINE_PROP_UINT32("boot_tpgt", VirtIOSCSICommon, conf.boot_tpgt, 0),
@@ -358,13 +359,15 @@ static Property vhost_scsi_properties[] = {
DEFINE_PROP_BIT64("t10_pi", VHostSCSICommon, host_features,
VIRTIO_SCSI_F_T10_PI,
false),
+ DEFINE_PROP_BIT64("hotplug", VHostSCSICommon, host_features,
+ VIRTIO_SCSI_F_HOTPLUG,
+ false),
DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false),
DEFINE_PROP_BOOL("worker_per_virtqueue", VirtIOSCSICommon,
conf.worker_per_virtqueue, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vhost_scsi_class_init(ObjectClass *klass, void *data)
+static void vhost_scsi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
@@ -398,7 +401,7 @@ static const TypeInfo vhost_scsi_info = {
.instance_size = sizeof(VHostSCSI),
.class_init = vhost_scsi_class_init,
.instance_init = vhost_scsi_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_FW_PATH_PROVIDER },
{ }
},
diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
index cc91ade..25f2d89 100644
--- a/hw/scsi/vhost-user-scsi.c
+++ b/hw/scsi/vhost-user-scsi.c
@@ -27,7 +27,7 @@
#include "hw/virtio/vhost-user-scsi.h"
#include "hw/virtio/virtio.h"
#include "chardev/char-fe.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
/* Features supported by the host application */
static const int user_feature_bits[] = {
@@ -36,6 +36,7 @@ static const int user_feature_bits[] = {
VIRTIO_RING_F_EVENT_IDX,
VIRTIO_SCSI_F_HOTPLUG,
VIRTIO_F_RING_RESET,
+ VIRTIO_F_IN_ORDER,
VIRTIO_F_NOTIFICATION_DATA,
VHOST_INVALID_FEATURE_BIT
};
@@ -51,19 +52,19 @@ static int vhost_user_scsi_start(VHostUserSCSI *s, Error **errp)
return ret;
}
-static void vhost_user_scsi_stop(VHostUserSCSI *s)
+static int vhost_user_scsi_stop(VHostUserSCSI *s)
{
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
if (!s->started_vu) {
- return;
+ return 0;
}
s->started_vu = false;
- vhost_scsi_common_stop(vsc);
+ return vhost_scsi_common_stop(vsc);
}
-static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status)
+static int vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserSCSI *s = (VHostUserSCSI *)vdev;
DeviceState *dev = DEVICE(vdev);
@@ -74,11 +75,11 @@ static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status)
int ret;
if (!s->connected) {
- return;
+ return -1;
}
if (vhost_dev_is_started(&vsc->dev) == should_start) {
- return;
+ return 0;
}
if (should_start) {
@@ -90,8 +91,12 @@ static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status)
qemu_chr_fe_disconnect(&vs->conf.chardev);
}
} else {
- vhost_user_scsi_stop(s);
+ ret = vhost_user_scsi_stop(s);
+ if (ret) {
+ return ret;
+ }
}
+ return 0;
}
static void vhost_user_scsi_handle_output(VirtIODevice *vdev, VirtQueue *vq)
@@ -340,7 +345,7 @@ static void vhost_user_scsi_unrealize(DeviceState *dev)
virtio_scsi_common_unrealize(dev);
}
-static Property vhost_user_scsi_properties[] = {
+static const Property vhost_user_scsi_properties[] = {
DEFINE_PROP_CHR("chardev", VirtIOSCSICommon, conf.chardev),
DEFINE_PROP_UINT32("boot_tpgt", VirtIOSCSICommon, conf.boot_tpgt, 0),
DEFINE_PROP_UINT32("num_queues", VirtIOSCSICommon, conf.num_queues,
@@ -359,7 +364,6 @@ static Property vhost_user_scsi_properties[] = {
DEFINE_PROP_BIT64("t10_pi", VHostSCSICommon, host_features,
VIRTIO_SCSI_F_T10_PI,
false),
- DEFINE_PROP_END_OF_LIST(),
};
static void vhost_user_scsi_reset(VirtIODevice *vdev)
@@ -386,7 +390,7 @@ static const VMStateDescription vmstate_vhost_scsi = {
},
};
-static void vhost_user_scsi_class_init(ObjectClass *klass, void *data)
+static void vhost_user_scsi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
@@ -422,7 +426,7 @@ static const TypeInfo vhost_user_scsi_info = {
.instance_size = sizeof(VHostUserSCSI),
.class_init = vhost_user_scsi_class_init,
.instance_init = vhost_user_scsi_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_FW_PATH_PROVIDER },
{ }
},
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 2806a12..95f13fb 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -15,9 +15,10 @@
#include "qapi/error.h"
#include "hw/virtio/virtio-scsi.h"
#include "qemu/error-report.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "hw/scsi/scsi.h"
#include "scsi/constants.h"
+#include "hw/virtio/iothread-vq-mapping.h"
#include "hw/virtio/virtio-bus.h"
/* Context: BQL held */
@@ -28,7 +29,14 @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- if (vs->conf.iothread) {
+ if (vs->conf.iothread && vs->conf.iothread_vq_mapping_list) {
+ error_setg(errp,
+ "iothread and iothread-vq-mapping properties cannot be set "
+ "at the same time");
+ return;
+ }
+
+ if (vs->conf.iothread || vs->conf.iothread_vq_mapping_list) {
if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
error_setg(errp,
"device is incompatible with iothread "
@@ -39,15 +47,64 @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
error_setg(errp, "ioeventfd is required for iothread");
return;
}
- s->ctx = iothread_get_aio_context(vs->conf.iothread);
- } else {
- if (!virtio_device_ioeventfd_enabled(vdev)) {
+ }
+
+ s->vq_aio_context = g_new(AioContext *, vs->conf.num_queues +
+ VIRTIO_SCSI_VQ_NUM_FIXED);
+
+ /*
+ * Handle the ctrl virtqueue in the main loop thread where device resets
+ * can be performed.
+ */
+ s->vq_aio_context[0] = qemu_get_aio_context();
+
+ /*
+ * Handle the event virtqueue in the main loop thread where its no_poll
+ * behavior won't stop IOThread polling.
+ */
+ s->vq_aio_context[1] = qemu_get_aio_context();
+
+ if (vs->conf.iothread_vq_mapping_list) {
+ if (!iothread_vq_mapping_apply(vs->conf.iothread_vq_mapping_list,
+ &s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED],
+ vs->conf.num_queues, errp)) {
+ g_free(s->vq_aio_context);
+ s->vq_aio_context = NULL;
return;
}
- s->ctx = qemu_get_aio_context();
+ } else if (vs->conf.iothread) {
+ AioContext *ctx = iothread_get_aio_context(vs->conf.iothread);
+ for (uint16_t i = 0; i < vs->conf.num_queues; i++) {
+ s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i] = ctx;
+ }
+
+ /* Released in virtio_scsi_dataplane_cleanup() */
+ object_ref(OBJECT(vs->conf.iothread));
+ } else {
+ AioContext *ctx = qemu_get_aio_context();
+ for (unsigned i = 0; i < vs->conf.num_queues; i++) {
+ s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i] = ctx;
+ }
}
}
+/* Context: BQL held */
+void virtio_scsi_dataplane_cleanup(VirtIOSCSI *s)
+{
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+
+ if (vs->conf.iothread_vq_mapping_list) {
+ iothread_vq_mapping_cleanup(vs->conf.iothread_vq_mapping_list);
+ }
+
+ if (vs->conf.iothread) {
+ object_unref(OBJECT(vs->conf.iothread));
+ }
+
+ g_free(s->vq_aio_context);
+ s->vq_aio_context = NULL;
+}
+
static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
@@ -66,31 +123,20 @@ static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n)
}
/* Context: BH in IOThread */
-static void virtio_scsi_dataplane_stop_bh(void *opaque)
+static void virtio_scsi_dataplane_stop_vq_bh(void *opaque)
{
- VirtIOSCSI *s = opaque;
- VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+ AioContext *ctx = qemu_get_current_aio_context();
+ VirtQueue *vq = opaque;
EventNotifier *host_notifier;
- int i;
- virtio_queue_aio_detach_host_notifier(vs->ctrl_vq, s->ctx);
- host_notifier = virtio_queue_get_host_notifier(vs->ctrl_vq);
+ virtio_queue_aio_detach_host_notifier(vq, ctx);
+ host_notifier = virtio_queue_get_host_notifier(vq);
/*
* Test and clear notifier after disabling event, in case poll callback
* didn't have time to run.
*/
virtio_queue_host_notifier_read(host_notifier);
-
- virtio_queue_aio_detach_host_notifier(vs->event_vq, s->ctx);
- host_notifier = virtio_queue_get_host_notifier(vs->event_vq);
- virtio_queue_host_notifier_read(host_notifier);
-
- for (i = 0; i < vs->conf.num_queues; i++) {
- virtio_queue_aio_detach_host_notifier(vs->cmd_vqs[i], s->ctx);
- host_notifier = virtio_queue_get_host_notifier(vs->cmd_vqs[i]);
- virtio_queue_host_notifier_read(host_notifier);
- }
}
/* Context: BQL held */
@@ -154,11 +200,14 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev)
smp_wmb(); /* paired with aio_notify_accept() */
if (s->bus.drain_count == 0) {
- virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx);
- virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx);
+ virtio_queue_aio_attach_host_notifier(vs->ctrl_vq,
+ s->vq_aio_context[0]);
+ virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq,
+ s->vq_aio_context[1]);
for (i = 0; i < vs->conf.num_queues; i++) {
- virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx);
+ AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i];
+ virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], ctx);
}
}
return 0;
@@ -207,7 +256,11 @@ void virtio_scsi_dataplane_stop(VirtIODevice *vdev)
s->dataplane_stopping = true;
if (s->bus.drain_count == 0) {
- aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s);
+ for (i = 0; i < vs->conf.num_queues + VIRTIO_SCSI_VQ_NUM_FIXED; i++) {
+ VirtQueue *vq = virtio_get_queue(&vs->parent_obj, i);
+ AioContext *ctx = s->vq_aio_context[i];
+ aio_wait_bh_oneshot(ctx, virtio_scsi_dataplane_stop_vq_bh, vq);
+ }
}
blk_drain_all(); /* ensure there are no in-flight requests */
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 9f02cee..34ae14f 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -22,11 +22,12 @@
#include "qemu/error-report.h"
#include "qemu/iov.h"
#include "qemu/module.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/dma.h"
+#include "system/block-backend.h"
+#include "system/dma.h"
#include "hw/qdev-properties.h"
#include "hw/scsi/scsi.h"
#include "scsi/constants.h"
+#include "hw/virtio/iothread-vq-mapping.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
#include "trace.h"
@@ -47,7 +48,7 @@ typedef struct VirtIOSCSIReq {
/* Used for two-stage request submission and TMFs deferred to BH */
QTAILQ_ENTRY(VirtIOSCSIReq) next;
- /* Used for cancellation of request during TMFs */
+ /* Used for cancellation of request during TMFs. Atomic. */
int remaining;
SCSIRequest *sreq;
@@ -102,13 +103,18 @@ static void virtio_scsi_free_req(VirtIOSCSIReq *req)
g_free(req);
}
-static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
+static void virtio_scsi_complete_req(VirtIOSCSIReq *req, QemuMutex *vq_lock)
{
VirtIOSCSI *s = req->dev;
VirtQueue *vq = req->vq;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
+
+ if (vq_lock) {
+ qemu_mutex_lock(vq_lock);
+ }
+
virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
if (s->dataplane_started && !s->dataplane_fenced) {
virtio_notify_irqfd(vdev, vq);
@@ -116,6 +122,10 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
virtio_notify(vdev, vq);
}
+ if (vq_lock) {
+ qemu_mutex_unlock(vq_lock);
+ }
+
if (req->sreq) {
req->sreq->hba_private = NULL;
scsi_req_unref(req->sreq);
@@ -123,34 +133,20 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
virtio_scsi_free_req(req);
}
-static void virtio_scsi_complete_req_bh(void *opaque)
+static void virtio_scsi_bad_req(VirtIOSCSIReq *req, QemuMutex *vq_lock)
{
- VirtIOSCSIReq *req = opaque;
+ virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
- virtio_scsi_complete_req(req);
-}
+ if (vq_lock) {
+ qemu_mutex_lock(vq_lock);
+ }
-/*
- * Called from virtio_scsi_do_one_tmf_bh() in main loop thread. The main loop
- * thread cannot touch the virtqueue since that could race with an IOThread.
- */
-static void virtio_scsi_complete_req_from_main_loop(VirtIOSCSIReq *req)
-{
- VirtIOSCSI *s = req->dev;
+ virtqueue_detach_element(req->vq, &req->elem, 0);
- if (!s->ctx || s->ctx == qemu_get_aio_context()) {
- /* No need to schedule a BH when there is no IOThread */
- virtio_scsi_complete_req(req);
- } else {
- /* Run request completion in the IOThread */
- aio_wait_bh_oneshot(s->ctx, virtio_scsi_complete_req_bh, req);
+ if (vq_lock) {
+ qemu_mutex_unlock(vq_lock);
}
-}
-static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
-{
- virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
- virtqueue_detach_element(req->vq, &req->elem, 0);
virtio_scsi_free_req(req);
}
@@ -235,12 +231,21 @@ static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
return 0;
}
-static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
+static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq, QemuMutex *vq_lock)
{
VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
VirtIOSCSIReq *req;
+ if (vq_lock) {
+ qemu_mutex_lock(vq_lock);
+ }
+
req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
+
+ if (vq_lock) {
+ qemu_mutex_unlock(vq_lock);
+ }
+
if (!req) {
return NULL;
}
@@ -294,137 +299,157 @@ typedef struct {
VirtIOSCSIReq *tmf_req;
} VirtIOSCSICancelNotifier;
+static void virtio_scsi_tmf_dec_remaining(VirtIOSCSIReq *tmf)
+{
+ if (qatomic_fetch_dec(&tmf->remaining) == 1) {
+ trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(tmf->req.tmf.lun),
+ tmf->req.tmf.tag, tmf->resp.tmf.response);
+
+ virtio_scsi_complete_req(tmf, &tmf->dev->ctrl_lock);
+ }
+}
+
static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
{
VirtIOSCSICancelNotifier *n = container_of(notifier,
VirtIOSCSICancelNotifier,
notifier);
- if (--n->tmf_req->remaining == 0) {
- VirtIOSCSIReq *req = n->tmf_req;
-
- trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
- req->req.tmf.tag, req->resp.tmf.response);
- virtio_scsi_complete_req(req);
- }
+ virtio_scsi_tmf_dec_remaining(n->tmf_req);
g_free(n);
}
-static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d)
+static void virtio_scsi_tmf_cancel_req(VirtIOSCSIReq *tmf, SCSIRequest *r)
{
- if (s->dataplane_started && d && blk_is_available(d->conf.blk)) {
- assert(blk_get_aio_context(d->conf.blk) == s->ctx);
- }
+ VirtIOSCSICancelNotifier *notifier;
+
+ assert(r->ctx == qemu_get_current_aio_context());
+
+ /* Decremented in virtio_scsi_cancel_notify() */
+ qatomic_inc(&tmf->remaining);
+
+ notifier = g_new(VirtIOSCSICancelNotifier, 1);
+ notifier->notifier.notify = virtio_scsi_cancel_notify;
+ notifier->tmf_req = tmf;
+ scsi_req_cancel_async(r, &notifier->notifier);
}
-static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req)
+/* Execute a TMF on the requests in the current AioContext */
+static void virtio_scsi_do_tmf_aio_context(void *opaque)
{
- VirtIOSCSI *s = req->dev;
- SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
- BusChild *kid;
- int target;
+ AioContext *ctx = qemu_get_current_aio_context();
+ VirtIOSCSIReq *tmf = opaque;
+ VirtIOSCSI *s = tmf->dev;
+ SCSIDevice *d = virtio_scsi_device_get(s, tmf->req.tmf.lun);
+ SCSIRequest *r;
+ bool match_tag;
- switch (req->req.tmf.subtype) {
- case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
- if (!d) {
- req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
- goto out;
- }
- if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
- req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
- goto out;
- }
- qatomic_inc(&s->resetting);
- device_cold_reset(&d->qdev);
- qatomic_dec(&s->resetting);
+ if (!d) {
+ tmf->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
+ virtio_scsi_tmf_dec_remaining(tmf);
+ return;
+ }
+
+ /*
+ * This function could handle other subtypes that need to be processed in
+ * the request's AioContext in the future, but for now only request
+ * cancelation subtypes are performed here.
+ */
+ switch (tmf->req.tmf.subtype) {
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK:
+ match_tag = true;
+ break;
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
+ case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
+ match_tag = false;
break;
+ default:
+ g_assert_not_reached();
+ }
- case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
- target = req->req.tmf.lun[1];
- qatomic_inc(&s->resetting);
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
+ QTAILQ_FOREACH(r, &d->requests, next) {
+ VirtIOSCSIReq *cmd_req = r->hba_private;
+ assert(cmd_req); /* request has hba_private while enqueued */
- rcu_read_lock();
- QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
- SCSIDevice *d1 = SCSI_DEVICE(kid->child);
- if (d1->channel == 0 && d1->id == target) {
- device_cold_reset(&d1->qdev);
+ if (r->ctx != ctx) {
+ continue;
+ }
+ if (match_tag && cmd_req->req.cmd.tag != tmf->req.tmf.tag) {
+ continue;
}
+ virtio_scsi_tmf_cancel_req(tmf, r);
}
- rcu_read_unlock();
-
- qatomic_dec(&s->resetting);
- break;
-
- default:
- g_assert_not_reached();
- break;
}
-out:
- object_unref(OBJECT(d));
- virtio_scsi_complete_req_from_main_loop(req);
+ /* Incremented by virtio_scsi_do_tmf() */
+ virtio_scsi_tmf_dec_remaining(tmf);
+
+ object_unref(d);
}
-/* Some TMFs must be processed from the main loop thread */
-static void virtio_scsi_do_tmf_bh(void *opaque)
+static void dummy_bh(void *opaque)
{
- VirtIOSCSI *s = opaque;
- QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
- VirtIOSCSIReq *req;
- VirtIOSCSIReq *tmp;
+ /* Do nothing */
+}
+/*
+ * Wait for pending virtio_scsi_defer_tmf_to_aio_context() BHs.
+ */
+static void virtio_scsi_flush_defer_tmf_to_aio_context(VirtIOSCSI *s)
+{
GLOBAL_STATE_CODE();
- WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
- QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
- QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
- QTAILQ_INSERT_TAIL(&reqs, req, next);
- }
+ assert(!s->dataplane_started);
- qemu_bh_delete(s->tmf_bh);
- s->tmf_bh = NULL;
- }
+ for (uint32_t i = 0; i < s->parent_obj.conf.num_queues; i++) {
+ AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i];
- QTAILQ_FOREACH_SAFE(req, &reqs, next, tmp) {
- QTAILQ_REMOVE(&reqs, req, next);
- virtio_scsi_do_one_tmf_bh(req);
+ /* Our BH only runs after previously scheduled BHs */
+ aio_wait_bh_oneshot(ctx, dummy_bh, NULL);
}
}
-static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
+/*
+ * Run the TMF in a specific AioContext, handling only requests in that
+ * AioContext. This is necessary because requests can run in different
+ * AioContext and it is only possible to cancel them from the AioContext where
+ * they are running.
+ */
+static void virtio_scsi_defer_tmf_to_aio_context(VirtIOSCSIReq *tmf,
+ AioContext *ctx)
{
- VirtIOSCSIReq *req;
- VirtIOSCSIReq *tmp;
+ /* Decremented in virtio_scsi_do_tmf_aio_context() */
+ qatomic_inc(&tmf->remaining);
- GLOBAL_STATE_CODE();
-
- /* Called after ioeventfd has been stopped, so tmf_bh_lock is not needed */
- if (s->tmf_bh) {
- qemu_bh_delete(s->tmf_bh);
- s->tmf_bh = NULL;
- }
-
- QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
- QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
-
- /* SAM-6 6.3.2 Hard reset */
- req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE;
- virtio_scsi_complete_req(req);
- }
+ /* See virtio_scsi_flush_defer_tmf_to_aio_context() cleanup during reset */
+ aio_bh_schedule_oneshot(ctx, virtio_scsi_do_tmf_aio_context, tmf);
}
-static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req)
+/*
+ * Returns the AioContext for a given TMF's tag field or NULL. Note that the
+ * request identified by the tag may have completed by the time you can execute
+ * a BH in the AioContext, so don't assume the request still exists in your BH.
+ */
+static AioContext *find_aio_context_for_tmf_tag(SCSIDevice *d,
+ VirtIOSCSIReq *tmf)
{
- VirtIOSCSI *s = req->dev;
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
+ SCSIRequest *r;
+ SCSIRequest *next;
+
+ QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
+ VirtIOSCSIReq *cmd_req = r->hba_private;
- WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
- QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next);
+ /* hba_private is non-NULL while the request is enqueued */
+ assert(cmd_req);
- if (!s->tmf_bh) {
- s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s);
- qemu_bh_schedule(s->tmf_bh);
+ if (cmd_req->req.cmd.tag == tmf->req.tmf.tag) {
+ return r->ctx;
+ }
}
}
+ return NULL;
}
/* Return 0 if the request is ready to be completed and return to guest;
@@ -434,9 +459,9 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
{
SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
SCSIRequest *r, *next;
+ AioContext *ctx;
int ret = 0;
- virtio_scsi_ctx_check(s, d);
/* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
req->resp.tmf.response = VIRTIO_SCSI_S_OK;
@@ -451,7 +476,22 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
req->req.tmf.tag, req->req.tmf.subtype);
switch (req->req.tmf.subtype) {
- case VIRTIO_SCSI_T_TMF_ABORT_TASK:
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK: {
+ if (!d) {
+ goto fail;
+ }
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
+ goto incorrect_lun;
+ }
+
+ ctx = find_aio_context_for_tmf_tag(d, req);
+ if (ctx) {
+ virtio_scsi_defer_tmf_to_aio_context(req, ctx);
+ ret = -EINPROGRESS;
+ }
+ break;
+ }
+
case VIRTIO_SCSI_T_TMF_QUERY_TASK:
if (!d) {
goto fail;
@@ -459,44 +499,82 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
goto incorrect_lun;
}
- QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
- VirtIOSCSIReq *cmd_req = r->hba_private;
- if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
- break;
+
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
+ QTAILQ_FOREACH(r, &d->requests, next) {
+ VirtIOSCSIReq *cmd_req = r->hba_private;
+ assert(cmd_req); /* request has hba_private while enqueued */
+
+ if (cmd_req->req.cmd.tag == req->req.tmf.tag) {
+ /*
+ * "If the specified command is present in the task set,
+ * then return a service response set to FUNCTION
+ * SUCCEEDED".
+ */
+ req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
+ }
}
}
- if (r) {
- /*
- * Assert that the request has not been completed yet, we
- * check for it in the loop above.
- */
- assert(r->hba_private);
- if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
- /* "If the specified command is present in the task set, then
- * return a service response set to FUNCTION SUCCEEDED".
- */
- req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
- } else {
- VirtIOSCSICancelNotifier *notifier;
-
- req->remaining = 1;
- notifier = g_new(VirtIOSCSICancelNotifier, 1);
- notifier->tmf_req = req;
- notifier->notifier.notify = virtio_scsi_cancel_notify;
- scsi_req_cancel_async(r, &notifier->notifier);
- ret = -EINPROGRESS;
+ break;
+
+ case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
+ if (!d) {
+ goto fail;
+ }
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
+ goto incorrect_lun;
+ }
+ qatomic_inc(&s->resetting);
+ device_cold_reset(&d->qdev);
+ qatomic_dec(&s->resetting);
+ break;
+
+ case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: {
+ BusChild *kid;
+ int target = req->req.tmf.lun[1];
+ qatomic_inc(&s->resetting);
+
+ rcu_read_lock();
+ QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
+ SCSIDevice *d1 = SCSI_DEVICE(kid->child);
+ if (d1->channel == 0 && d1->id == target) {
+ device_cold_reset(&d1->qdev);
}
}
+ rcu_read_unlock();
+
+ qatomic_dec(&s->resetting);
break;
+ }
- case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
- case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
- virtio_scsi_defer_tmf_to_bh(req);
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
+ case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: {
+ g_autoptr(GHashTable) aio_contexts = g_hash_table_new(NULL, NULL);
+
+ if (!d) {
+ goto fail;
+ }
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
+ goto incorrect_lun;
+ }
+
+ qatomic_inc(&req->remaining);
+
+ for (uint32_t i = 0; i < s->parent_obj.conf.num_queues; i++) {
+ ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i];
+
+ if (!g_hash_table_add(aio_contexts, ctx)) {
+ continue; /* skip previously added AioContext */
+ }
+
+ virtio_scsi_defer_tmf_to_aio_context(req, ctx);
+ }
+
+ virtio_scsi_tmf_dec_remaining(req);
ret = -EINPROGRESS;
break;
+ }
- case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
- case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
if (!d) {
goto fail;
@@ -505,34 +583,19 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
goto incorrect_lun;
}
- /* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
- * This way, if the bus starts calling back to the notifiers
- * even before we finish the loop, virtio_scsi_cancel_notify
- * will not complete the TMF too early.
- */
- req->remaining = 1;
- QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
- if (r->hba_private) {
- if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
- /* "If there is any command present in the task set, then
- * return a service response set to FUNCTION SUCCEEDED".
- */
- req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
- break;
- } else {
- VirtIOSCSICancelNotifier *notifier;
-
- req->remaining++;
- notifier = g_new(VirtIOSCSICancelNotifier, 1);
- notifier->notifier.notify = virtio_scsi_cancel_notify;
- notifier->tmf_req = req;
- scsi_req_cancel_async(r, &notifier->notifier);
- }
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
+ QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
+ /* Request has hba_private while enqueued */
+ assert(r->hba_private);
+
+ /*
+ * "If there is any command present in the task set, then
+ * return a service response set to FUNCTION SUCCEEDED".
+ */
+ req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
+ break;
}
}
- if (--req->remaining > 0) {
- ret = -EINPROGRESS;
- }
break;
case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
@@ -563,7 +626,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
&type, sizeof(type)) < sizeof(type)) {
- virtio_scsi_bad_req(req);
+ virtio_scsi_bad_req(req, &s->ctrl_lock);
return;
}
@@ -571,7 +634,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
if (type == VIRTIO_SCSI_T_TMF) {
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
- virtio_scsi_bad_req(req);
+ virtio_scsi_bad_req(req, &s->ctrl_lock);
return;
} else {
r = virtio_scsi_do_tmf(s, req);
@@ -581,7 +644,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
sizeof(VirtIOSCSICtrlANResp)) < 0) {
- virtio_scsi_bad_req(req);
+ virtio_scsi_bad_req(req, &s->ctrl_lock);
return;
} else {
req->req.an.event_requested =
@@ -601,7 +664,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
req->resp.an.response);
- virtio_scsi_complete_req(req);
+ virtio_scsi_complete_req(req, &s->ctrl_lock);
} else {
assert(r == -EINPROGRESS);
}
@@ -611,7 +674,7 @@ static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
{
VirtIOSCSIReq *req;
- while ((req = virtio_scsi_pop_req(s, vq))) {
+ while ((req = virtio_scsi_pop_req(s, vq, &s->ctrl_lock))) {
virtio_scsi_handle_ctrl_req(s, req);
}
}
@@ -626,9 +689,12 @@ static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
*/
static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s)
{
- if (!s->ctx || s->dataplane_started) {
+ if (s->dataplane_started) {
return false;
}
+ if (s->vq_aio_context[0] == qemu_get_aio_context()) {
+ return false; /* not using IOThreads */
+ }
virtio_device_start_ioeventfd(&s->parent_obj.parent_obj);
return !s->dataplane_fenced;
@@ -655,7 +721,7 @@ static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
* in virtio_scsi_command_complete.
*/
req->resp_size = sizeof(VirtIOSCSICmdResp);
- virtio_scsi_complete_req(req);
+ virtio_scsi_complete_req(req, NULL);
}
static void virtio_scsi_command_failed(SCSIRequest *r)
@@ -789,7 +855,7 @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
virtio_scsi_fail_cmd_req(req);
return -ENOTSUP;
} else {
- virtio_scsi_bad_req(req);
+ virtio_scsi_bad_req(req, NULL);
return -EINVAL;
}
}
@@ -802,7 +868,6 @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
virtio_scsi_complete_cmd_req(req);
return -ENOENT;
}
- virtio_scsi_ctx_check(s, d);
req->sreq = scsi_req_new(d, req->req.cmd.tag,
virtio_scsi_get_lun(req->req.cmd.lun),
req->req.cmd.cdb, vs->cdb_size, req);
@@ -844,7 +909,7 @@ static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
virtio_queue_set_notification(vq, 0);
}
- while ((req = virtio_scsi_pop_req(s, vq))) {
+ while ((req = virtio_scsi_pop_req(s, vq, NULL))) {
ret = virtio_scsi_handle_cmd_req_prepare(s, req);
if (!ret) {
QTAILQ_INSERT_TAIL(&reqs, req, next);
@@ -937,7 +1002,7 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
assert(!s->dataplane_started);
- virtio_scsi_reset_tmf_bh(s);
+ virtio_scsi_flush_defer_tmf_to_aio_context(s);
qatomic_inc(&s->resetting);
bus_cold_reset(BUS(&s->bus));
@@ -945,7 +1010,10 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
- s->events_dropped = false;
+
+ WITH_QEMU_LOCK_GUARD(&s->event_lock) {
+ s->events_dropped = false;
+ }
}
typedef struct {
@@ -974,19 +1042,21 @@ static void virtio_scsi_push_event(VirtIOSCSI *s,
return;
}
- req = virtio_scsi_pop_req(s, vs->event_vq);
- if (!req) {
- s->events_dropped = true;
- return;
- }
+ req = virtio_scsi_pop_req(s, vs->event_vq, &s->event_lock);
+ WITH_QEMU_LOCK_GUARD(&s->event_lock) {
+ if (!req) {
+ s->events_dropped = true;
+ return;
+ }
- if (s->events_dropped) {
- event |= VIRTIO_SCSI_T_EVENTS_MISSED;
- s->events_dropped = false;
+ if (s->events_dropped) {
+ event |= VIRTIO_SCSI_T_EVENTS_MISSED;
+ s->events_dropped = false;
+ }
}
if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
- virtio_scsi_bad_req(req);
+ virtio_scsi_bad_req(req, &s->event_lock);
return;
}
@@ -1006,12 +1076,18 @@ static void virtio_scsi_push_event(VirtIOSCSI *s,
}
trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
- virtio_scsi_complete_req(req);
+ virtio_scsi_complete_req(req, &s->event_lock);
}
static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
{
- if (s->events_dropped) {
+ bool events_dropped;
+
+ WITH_QEMU_LOCK_GUARD(&s->event_lock) {
+ events_dropped = s->events_dropped;
+ }
+
+ if (events_dropped) {
VirtIOSCSIEventInfo info = {
.event = VIRTIO_SCSI_T_NO_EVENT,
};
@@ -1062,17 +1138,16 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
{
VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
+ AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED];
SCSIDevice *sd = SCSI_DEVICE(dev);
- int ret;
- if (s->ctx && !s->dataplane_fenced) {
- if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
- return;
- }
- ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
- if (ret < 0) {
- return;
- }
+ if (ctx != qemu_get_aio_context() && !s->dataplane_fenced) {
+ /*
+ * Try to make the BlockBackend's AioContext match ours. Ignore failure
+ * because I/O will still work although block jobs and other users
+ * might be slower when multiple AioContexts use a BlockBackend.
+ */
+ blk_set_aio_context(sd->conf.blk, ctx, NULL);
}
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
@@ -1107,7 +1182,7 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
- if (s->ctx) {
+ if (s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED] != qemu_get_aio_context()) {
/* If other users keep the BlockBackend in the iothread, that's ok */
blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
}
@@ -1141,7 +1216,7 @@ static void virtio_scsi_drained_begin(SCSIBus *bus)
for (uint32_t i = 0; i < total_queues; i++) {
VirtQueue *vq = virtio_get_queue(vdev, i);
- virtio_queue_aio_detach_host_notifier(vq, s->ctx);
+ virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
}
}
@@ -1167,10 +1242,12 @@ static void virtio_scsi_drained_end(SCSIBus *bus)
for (uint32_t i = 0; i < total_queues; i++) {
VirtQueue *vq = virtio_get_queue(vdev, i);
+ AioContext *ctx = s->vq_aio_context[i];
+
if (vq == vs->event_vq) {
- virtio_queue_aio_attach_host_notifier_no_poll(vq, s->ctx);
+ virtio_queue_aio_attach_host_notifier_no_poll(vq, ctx);
} else {
- virtio_queue_aio_attach_host_notifier(vq, s->ctx);
+ virtio_queue_aio_attach_host_notifier(vq, ctx);
}
}
}
@@ -1239,8 +1316,8 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
VirtIOSCSI *s = VIRTIO_SCSI(dev);
Error *err = NULL;
- QTAILQ_INIT(&s->tmf_bh_list);
- qemu_mutex_init(&s->tmf_bh_lock);
+ qemu_mutex_init(&s->ctrl_lock);
+ qemu_mutex_init(&s->event_lock);
virtio_scsi_common_realize(dev,
virtio_scsi_handle_ctrl,
@@ -1275,18 +1352,19 @@ void virtio_scsi_common_unrealize(DeviceState *dev)
virtio_cleanup(vdev);
}
+/* main loop */
static void virtio_scsi_device_unrealize(DeviceState *dev)
{
VirtIOSCSI *s = VIRTIO_SCSI(dev);
- virtio_scsi_reset_tmf_bh(s);
-
+ virtio_scsi_dataplane_cleanup(s);
qbus_set_hotplug_handler(BUS(&s->bus), NULL);
virtio_scsi_common_unrealize(dev);
- qemu_mutex_destroy(&s->tmf_bh_lock);
+ qemu_mutex_destroy(&s->event_lock);
+ qemu_mutex_destroy(&s->ctrl_lock);
}
-static Property virtio_scsi_properties[] = {
+static const Property virtio_scsi_properties[] = {
DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues,
VIRTIO_SCSI_AUTO_NUM_QUEUES),
DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSI,
@@ -1303,7 +1381,8 @@ static Property virtio_scsi_properties[] = {
VIRTIO_SCSI_F_CHANGE, true),
DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread,
TYPE_IOTHREAD, IOThread *),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOSCSI,
+ parent_obj.conf.iothread_vq_mapping_list),
};
static const VMStateDescription vmstate_virtio_scsi = {
@@ -1316,7 +1395,7 @@ static const VMStateDescription vmstate_virtio_scsi = {
},
};
-static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
+static void virtio_scsi_common_class_init(ObjectClass *klass, const void *data)
{
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -1325,7 +1404,7 @@ static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
-static void virtio_scsi_class_init(ObjectClass *klass, void *data)
+static void virtio_scsi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
@@ -1359,7 +1438,7 @@ static const TypeInfo virtio_scsi_info = {
.parent = TYPE_VIRTIO_SCSI_COMMON,
.instance_size = sizeof(VirtIOSCSI),
.class_init = virtio_scsi_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c
index cd7bf6a..7c98b1b 100644
--- a/hw/scsi/vmw_pvscsi.c
+++ b/hw/scsi/vmw_pvscsi.c
@@ -68,18 +68,7 @@ struct PVSCSIClass {
OBJECT_DECLARE_TYPE(PVSCSIState, PVSCSIClass, PVSCSI)
-/* Compatibility flags for migration */
-#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT 0
-#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION \
- (1 << PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT)
-#define PVSCSI_COMPAT_DISABLE_PCIE_BIT 1
-#define PVSCSI_COMPAT_DISABLE_PCIE \
- (1 << PVSCSI_COMPAT_DISABLE_PCIE_BIT)
-
-#define PVSCSI_USE_OLD_PCI_CONFIGURATION(s) \
- ((s)->compat_flags & PVSCSI_COMPAT_OLD_PCI_CONFIGURATION)
-#define PVSCSI_MSI_OFFSET(s) \
- (PVSCSI_USE_OLD_PCI_CONFIGURATION(s) ? 0x50 : 0x7c)
+#define PVSCSI_MSI_OFFSET (0x7c)
#define PVSCSI_EXP_EP_OFFSET (0x40)
typedef struct PVSCSIRingInfo {
@@ -129,8 +118,6 @@ struct PVSCSIState {
uint8_t msi_used; /* For migration compatibility */
PVSCSIRingInfo rings; /* Data transfer rings manager */
uint32_t resetting; /* Reset in progress */
-
- uint32_t compat_flags;
};
typedef struct PVSCSIRequest {
@@ -1110,7 +1097,7 @@ pvscsi_init_msi(PVSCSIState *s)
int res;
PCIDevice *d = PCI_DEVICE(s);
- res = msi_init(d, PVSCSI_MSI_OFFSET(s), PVSCSI_MSIX_NUM_VECTORS,
+ res = msi_init(d, PVSCSI_MSI_OFFSET, PVSCSI_MSIX_NUM_VECTORS,
PVSCSI_USE_64BIT, PVSCSI_PER_VECTOR_MASK, NULL);
if (res < 0) {
trace_pvscsi_init_msi_fail(res);
@@ -1158,15 +1145,11 @@ pvscsi_realizefn(PCIDevice *pci_dev, Error **errp)
trace_pvscsi_state("init");
/* PCI subsystem ID, subsystem vendor ID, revision */
- if (PVSCSI_USE_OLD_PCI_CONFIGURATION(s)) {
- pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 0x1000);
- } else {
- pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
- PCI_VENDOR_ID_VMWARE);
- pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
- PCI_DEVICE_ID_VMWARE_PVSCSI);
- pci_config_set_revision(pci_dev->config, 0x2);
- }
+ pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
+ PCI_VENDOR_ID_VMWARE);
+ pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
+ PCI_DEVICE_ID_VMWARE_PVSCSI);
+ pci_config_set_revision(pci_dev->config, 0x2);
/* PCI latency timer = 255 */
pci_dev->config[PCI_LATENCY_TIMER] = 0xff;
@@ -1234,21 +1217,8 @@ pvscsi_post_load(void *opaque, int version_id)
return 0;
}
-static bool pvscsi_vmstate_need_pcie_device(void *opaque)
-{
- PVSCSIState *s = PVSCSI(opaque);
-
- return !(s->compat_flags & PVSCSI_COMPAT_DISABLE_PCIE);
-}
-
-static bool pvscsi_vmstate_test_pci_device(void *opaque, int version_id)
-{
- return !pvscsi_vmstate_need_pcie_device(opaque);
-}
-
static const VMStateDescription vmstate_pvscsi_pcie_device = {
.name = "pvscsi/pcie",
- .needed = pvscsi_vmstate_need_pcie_device,
.fields = (const VMStateField[]) {
VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState),
VMSTATE_END_OF_LIST()
@@ -1262,9 +1232,6 @@ static const VMStateDescription vmstate_pvscsi = {
.pre_save = pvscsi_pre_save,
.post_load = pvscsi_post_load,
.fields = (const VMStateField[]) {
- VMSTATE_STRUCT_TEST(parent_obj, PVSCSIState,
- pvscsi_vmstate_test_pci_device, 0,
- vmstate_pci_device, PCIDevice),
VMSTATE_UINT8(msi_used, PVSCSIState),
VMSTATE_UINT32(resetting, PVSCSIState),
VMSTATE_UINT64(reg_interrupt_status, PVSCSIState),
@@ -1296,33 +1263,19 @@ static const VMStateDescription vmstate_pvscsi = {
}
};
-static Property pvscsi_properties[] = {
+static const Property pvscsi_properties[] = {
DEFINE_PROP_UINT8("use_msg", PVSCSIState, use_msg, 1),
- DEFINE_PROP_BIT("x-old-pci-configuration", PVSCSIState, compat_flags,
- PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT, false),
- DEFINE_PROP_BIT("x-disable-pcie", PVSCSIState, compat_flags,
- PVSCSI_COMPAT_DISABLE_PCIE_BIT, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pvscsi_realize(DeviceState *qdev, Error **errp)
+static void pvscsi_instance_init(Object *obj)
{
- PVSCSIClass *pvs_c = PVSCSI_GET_CLASS(qdev);
- PCIDevice *pci_dev = PCI_DEVICE(qdev);
- PVSCSIState *s = PVSCSI(qdev);
-
- if (!(s->compat_flags & PVSCSI_COMPAT_DISABLE_PCIE)) {
- pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
- }
-
- pvs_c->parent_dc_realize(qdev, errp);
+ PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS;
}
-static void pvscsi_class_init(ObjectClass *klass, void *data)
+static void pvscsi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
- PVSCSIClass *pvs_k = PVSCSI_CLASS(klass);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
k->realize = pvscsi_realizefn;
@@ -1331,9 +1284,7 @@ static void pvscsi_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_VMWARE_PVSCSI;
k->class_id = PCI_CLASS_STORAGE_SCSI;
k->subsystem_id = 0x1000;
- device_class_set_parent_realize(dc, pvscsi_realize,
- &pvs_k->parent_dc_realize);
- dc->reset = pvscsi_reset;
+ device_class_set_legacy_reset(dc, pvscsi_reset);
dc->vmsd = &vmstate_pvscsi;
device_class_set_props(dc, pvscsi_properties);
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
@@ -1347,7 +1298,8 @@ static const TypeInfo pvscsi_info = {
.class_size = sizeof(PVSCSIClass),
.instance_size = sizeof(PVSCSIState),
.class_init = pvscsi_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .instance_init = pvscsi_instance_init,
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ INTERFACE_PCIE_DEVICE },
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
diff --git a/hw/scsi/vmw_pvscsi.h b/hw/scsi/vmw_pvscsi.h
index 17fcf66..a3ae517 100644
--- a/hw/scsi/vmw_pvscsi.h
+++ b/hw/scsi/vmw_pvscsi.h
@@ -14,8 +14,8 @@
* details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ * along with this program; if not, see
+ * <https://www.gnu.org/licenses/>.
*
* Maintained by: Arvind Kumar <arvindkumar@vmware.com>
*
diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c
index a1b7230..b31da5c 100644
--- a/hw/sd/allwinner-sdhost.c
+++ b/hw/sd/allwinner-sdhost.c
@@ -22,8 +22,8 @@
#include "qemu/module.h"
#include "qemu/units.h"
#include "qapi/error.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/dma.h"
+#include "system/blockdev.h"
+#include "system/dma.h"
#include "hw/qdev-properties.h"
#include "hw/irq.h"
#include "hw/sd/allwinner-sdhost.h"
@@ -761,7 +761,7 @@ static void allwinner_sdhost_write(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_sdhost_ops = {
.read = allwinner_sdhost_read,
.write = allwinner_sdhost_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -808,10 +808,9 @@ static const VMStateDescription vmstate_allwinner_sdhost = {
}
};
-static Property allwinner_sdhost_properties[] = {
+static const Property allwinner_sdhost_properties[] = {
DEFINE_PROP_LINK("dma-memory", AwSdHostState, dma_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
static void allwinner_sdhost_init(Object *obj)
@@ -889,24 +888,26 @@ static void allwinner_sdhost_reset(DeviceState *dev)
}
}
-static void allwinner_sdhost_bus_class_init(ObjectClass *klass, void *data)
+static void allwinner_sdhost_bus_class_init(ObjectClass *klass,
+ const void *data)
{
SDBusClass *sbc = SD_BUS_CLASS(klass);
sbc->set_inserted = allwinner_sdhost_set_inserted;
}
-static void allwinner_sdhost_class_init(ObjectClass *klass, void *data)
+static void allwinner_sdhost_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = allwinner_sdhost_reset;
+ device_class_set_legacy_reset(dc, allwinner_sdhost_reset);
dc->vmsd = &vmstate_allwinner_sdhost;
dc->realize = allwinner_sdhost_realize;
device_class_set_props(dc, allwinner_sdhost_properties);
}
-static void allwinner_sdhost_sun4i_class_init(ObjectClass *klass, void *data)
+static void allwinner_sdhost_sun4i_class_init(ObjectClass *klass,
+ const void *data)
{
AwSdHostClass *sc = AW_SDHOST_CLASS(klass);
sc->max_desc_size = 8 * KiB;
@@ -914,7 +915,8 @@ static void allwinner_sdhost_sun4i_class_init(ObjectClass *klass, void *data)
sc->can_calibrate = false;
}
-static void allwinner_sdhost_sun5i_class_init(ObjectClass *klass, void *data)
+static void allwinner_sdhost_sun5i_class_init(ObjectClass *klass,
+ const void *data)
{
AwSdHostClass *sc = AW_SDHOST_CLASS(klass);
sc->max_desc_size = 64 * KiB;
@@ -923,7 +925,7 @@ static void allwinner_sdhost_sun5i_class_init(ObjectClass *klass, void *data)
}
static void allwinner_sdhost_sun50i_a64_class_init(ObjectClass *klass,
- void *data)
+ const void *data)
{
AwSdHostClass *sc = AW_SDHOST_CLASS(klass);
sc->max_desc_size = 64 * KiB;
@@ -932,7 +934,7 @@ static void allwinner_sdhost_sun50i_a64_class_init(ObjectClass *klass,
}
static void allwinner_sdhost_sun50i_a64_emmc_class_init(ObjectClass *klass,
- void *data)
+ const void *data)
{
AwSdHostClass *sc = AW_SDHOST_CLASS(klass);
sc->max_desc_size = 8 * KiB;
diff --git a/hw/sd/aspeed_sdhci.c b/hw/sd/aspeed_sdhci.c
index 3b63926..fc38ad3 100644
--- a/hw/sd/aspeed_sdhci.c
+++ b/hw/sd/aspeed_sdhci.c
@@ -24,8 +24,10 @@
#define ASPEED_SDHCI_DEBOUNCE_RESET 0x00000005
#define ASPEED_SDHCI_BUS 0x08
#define ASPEED_SDHCI_SDIO_140 0x10
+#define ASPEED_SDHCI_SDIO_144 0x14
#define ASPEED_SDHCI_SDIO_148 0x18
#define ASPEED_SDHCI_SDIO_240 0x20
+#define ASPEED_SDHCI_SDIO_244 0x24
#define ASPEED_SDHCI_SDIO_248 0x28
#define ASPEED_SDHCI_WP_POL 0xec
#define ASPEED_SDHCI_CARD_DET 0xf0
@@ -35,21 +37,27 @@
static uint64_t aspeed_sdhci_read(void *opaque, hwaddr addr, unsigned int size)
{
- uint32_t val = 0;
+ uint64_t val = 0;
AspeedSDHCIState *sdhci = opaque;
switch (addr) {
case ASPEED_SDHCI_SDIO_140:
- val = (uint32_t)sdhci->slots[0].capareg;
+ val = extract64(sdhci->slots[0].capareg, 0, 32);
+ break;
+ case ASPEED_SDHCI_SDIO_144:
+ val = extract64(sdhci->slots[0].capareg, 32, 32);
break;
case ASPEED_SDHCI_SDIO_148:
- val = (uint32_t)sdhci->slots[0].maxcurr;
+ val = extract64(sdhci->slots[0].maxcurr, 0, 32);
break;
case ASPEED_SDHCI_SDIO_240:
- val = (uint32_t)sdhci->slots[1].capareg;
+ val = extract64(sdhci->slots[1].capareg, 0, 32);
+ break;
+ case ASPEED_SDHCI_SDIO_244:
+ val = extract64(sdhci->slots[1].capareg, 32, 32);
break;
case ASPEED_SDHCI_SDIO_248:
- val = (uint32_t)sdhci->slots[1].maxcurr;
+ val = extract64(sdhci->slots[1].maxcurr, 0, 32);
break;
default:
if (addr < ASPEED_SDHCI_REG_SIZE) {
@@ -61,9 +69,9 @@ static uint64_t aspeed_sdhci_read(void *opaque, hwaddr addr, unsigned int size)
}
}
- trace_aspeed_sdhci_read(addr, size, (uint64_t) val);
+ trace_aspeed_sdhci_read(addr, size, val);
- return (uint64_t)val;
+ return val;
}
static void aspeed_sdhci_write(void *opaque, hwaddr addr, uint64_t val,
@@ -79,16 +87,28 @@ static void aspeed_sdhci_write(void *opaque, hwaddr addr, uint64_t val,
sdhci->regs[TO_REG(addr)] = (uint32_t)val & ~ASPEED_SDHCI_INFO_RESET;
break;
case ASPEED_SDHCI_SDIO_140:
- sdhci->slots[0].capareg = (uint64_t)(uint32_t)val;
+ sdhci->slots[0].capareg = deposit64(sdhci->slots[0].capareg,
+ 0, 32, val);
+ break;
+ case ASPEED_SDHCI_SDIO_144:
+ sdhci->slots[0].capareg = deposit64(sdhci->slots[0].capareg,
+ 32, 32, val);
break;
case ASPEED_SDHCI_SDIO_148:
- sdhci->slots[0].maxcurr = (uint64_t)(uint32_t)val;
+ sdhci->slots[0].maxcurr = deposit64(sdhci->slots[0].maxcurr,
+ 0, 32, val);
break;
case ASPEED_SDHCI_SDIO_240:
- sdhci->slots[1].capareg = (uint64_t)(uint32_t)val;
+ sdhci->slots[1].capareg = deposit64(sdhci->slots[1].capareg,
+ 0, 32, val);
+ break;
+ case ASPEED_SDHCI_SDIO_244:
+ sdhci->slots[1].capareg = deposit64(sdhci->slots[1].capareg,
+ 32, 32, val);
break;
case ASPEED_SDHCI_SDIO_248:
- sdhci->slots[1].maxcurr = (uint64_t)(uint32_t)val;
+ sdhci->slots[1].maxcurr = deposit64(sdhci->slots[0].maxcurr,
+ 0, 32, val);
break;
default:
if (addr < ASPEED_SDHCI_REG_SIZE) {
@@ -128,6 +148,7 @@ static void aspeed_sdhci_realize(DeviceState *dev, Error **errp)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
AspeedSDHCIState *sdhci = ASPEED_SDHCI(dev);
+ AspeedSDHCIClass *asc = ASPEED_SDHCI_GET_CLASS(sdhci);
/* Create input irqs for the slots */
qdev_init_gpio_in_named_with_opaque(DEVICE(sbd), aspeed_sdhci_set_irq,
@@ -147,7 +168,7 @@ static void aspeed_sdhci_realize(DeviceState *dev, Error **errp)
}
if (!object_property_set_uint(sdhci_slot, "capareg",
- ASPEED_SDHCI_CAPABILITIES, errp)) {
+ asc->capareg, errp)) {
return;
}
@@ -183,27 +204,84 @@ static const VMStateDescription vmstate_aspeed_sdhci = {
},
};
-static Property aspeed_sdhci_properties[] = {
+static const Property aspeed_sdhci_properties[] = {
DEFINE_PROP_UINT8("num-slots", AspeedSDHCIState, num_slots, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void aspeed_sdhci_class_init(ObjectClass *classp, void *data)
+static void aspeed_sdhci_class_init(ObjectClass *classp, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(classp);
dc->realize = aspeed_sdhci_realize;
- dc->reset = aspeed_sdhci_reset;
+ device_class_set_legacy_reset(dc, aspeed_sdhci_reset);
dc->vmsd = &vmstate_aspeed_sdhci;
device_class_set_props(dc, aspeed_sdhci_properties);
}
+static void aspeed_2400_sdhci_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedSDHCIClass *asc = ASPEED_SDHCI_CLASS(klass);
+
+ dc->desc = "ASPEED 2400 SDHCI Controller";
+ asc->capareg = 0x0000000001e80080;
+}
+
+static void aspeed_2500_sdhci_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedSDHCIClass *asc = ASPEED_SDHCI_CLASS(klass);
+
+ dc->desc = "ASPEED 2500 SDHCI Controller";
+ asc->capareg = 0x0000000001e80080;
+}
+
+static void aspeed_2600_sdhci_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedSDHCIClass *asc = ASPEED_SDHCI_CLASS(klass);
+
+ dc->desc = "ASPEED 2600 SDHCI Controller";
+ asc->capareg = 0x0000000701f80080;
+}
+
+static void aspeed_2700_sdhci_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedSDHCIClass *asc = ASPEED_SDHCI_CLASS(klass);
+
+ dc->desc = "ASPEED 2700 SDHCI Controller";
+ asc->capareg = 0x0000000719f80080;
+}
+
static const TypeInfo aspeed_sdhci_types[] = {
{
.name = TYPE_ASPEED_SDHCI,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(AspeedSDHCIState),
.class_init = aspeed_sdhci_class_init,
+ .class_size = sizeof(AspeedSDHCIClass),
+ .abstract = true,
+ },
+ {
+ .name = TYPE_ASPEED_2400_SDHCI,
+ .parent = TYPE_ASPEED_SDHCI,
+ .class_init = aspeed_2400_sdhci_class_init,
+ },
+ {
+ .name = TYPE_ASPEED_2500_SDHCI,
+ .parent = TYPE_ASPEED_SDHCI,
+ .class_init = aspeed_2500_sdhci_class_init,
+ },
+ {
+ .name = TYPE_ASPEED_2600_SDHCI,
+ .parent = TYPE_ASPEED_SDHCI,
+ .class_init = aspeed_2600_sdhci_class_init,
+ },
+ {
+ .name = TYPE_ASPEED_2700_SDHCI,
+ .parent = TYPE_ASPEED_SDHCI,
+ .class_init = aspeed_2700_sdhci_class_init,
},
};
diff --git a/hw/sd/bcm2835_sdhost.c b/hw/sd/bcm2835_sdhost.c
index 11c54dd..29debdf 100644
--- a/hw/sd/bcm2835_sdhost.c
+++ b/hw/sd/bcm2835_sdhost.c
@@ -14,7 +14,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "sysemu/blockdev.h"
+#include "system/blockdev.h"
#include "hw/irq.h"
#include "hw/sd/bcm2835_sdhost.h"
#include "migration/vmstate.h"
@@ -428,11 +428,11 @@ static void bcm2835_sdhost_reset(DeviceState *dev)
s->fifo_len = 0;
}
-static void bcm2835_sdhost_class_init(ObjectClass *klass, void *data)
+static void bcm2835_sdhost_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = bcm2835_sdhost_reset;
+ device_class_set_legacy_reset(dc, bcm2835_sdhost_reset);
dc->vmsd = &vmstate_bcm2835_sdhost;
}
diff --git a/hw/sd/cadence_sdhci.c b/hw/sd/cadence_sdhci.c
index 7c8bc54..d576855 100644
--- a/hw/sd/cadence_sdhci.c
+++ b/hw/sd/cadence_sdhci.c
@@ -165,13 +165,13 @@ static const VMStateDescription vmstate_cadence_sdhci = {
},
};
-static void cadence_sdhci_class_init(ObjectClass *classp, void *data)
+static void cadence_sdhci_class_init(ObjectClass *classp, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(classp);
dc->desc = "Cadence SD/SDIO/eMMC Host Controller (SD4HC)";
dc->realize = cadence_sdhci_realize;
- dc->reset = cadence_sdhci_reset;
+ device_class_set_legacy_reset(dc, cadence_sdhci_reset);
dc->vmsd = &vmstate_cadence_sdhci;
}
diff --git a/hw/sd/meson.build b/hw/sd/meson.build
index bbb75af..b43d45b 100644
--- a/hw/sd/meson.build
+++ b/hw/sd/meson.build
@@ -5,7 +5,6 @@ system_ss.add(when: 'CONFIG_SDHCI_PCI', if_true: files('sdhci-pci.c'))
system_ss.add(when: 'CONFIG_SSI_SD', if_true: files('ssi-sd.c'))
system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_mmc.c'))
-system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_mmci.c'))
system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_sdhost.c'))
system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_sdhci.c'))
system_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-sdhost.c'))
diff --git a/hw/sd/npcm7xx_sdhci.c b/hw/sd/npcm7xx_sdhci.c
index fb51821..0233d7b 100644
--- a/hw/sd/npcm7xx_sdhci.c
+++ b/hw/sd/npcm7xx_sdhci.c
@@ -149,13 +149,13 @@ static const VMStateDescription vmstate_npcm7xx_sdhci = {
},
};
-static void npcm7xx_sdhci_class_init(ObjectClass *classp, void *data)
+static void npcm7xx_sdhci_class_init(ObjectClass *classp, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(classp);
dc->desc = "NPCM7xx SD/eMMC Host Controller";
dc->realize = npcm7xx_sdhci_realize;
- dc->reset = npcm7xx_sdhci_reset;
+ device_class_set_legacy_reset(dc, npcm7xx_sdhci_reset);
dc->vmsd = &vmstate_npcm7xx_sdhci;
}
diff --git a/hw/sd/omap_mmc.c b/hw/sd/omap_mmc.c
index edd3cf2..b7648d4 100644
--- a/hw/sd/omap_mmc.c
+++ b/hw/sd/omap_mmc.c
@@ -21,17 +21,22 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
+#include "qapi/error.h"
#include "hw/irq.h"
+#include "hw/sysbus.h"
#include "hw/arm/omap.h"
-#include "hw/sd/sdcard_legacy.h"
+#include "hw/sd/sd.h"
+
+typedef struct OMAPMMCState {
+ SysBusDevice parent_obj;
+
+ SDBus sdbus;
-struct omap_mmc_s {
qemu_irq irq;
- qemu_irq *dma;
- qemu_irq coverswitch;
+ qemu_irq dma_tx_gpio;
+ qemu_irq dma_rx_gpio;
MemoryRegion iomem;
omap_clk clk;
- SDState *card;
uint16_t last_cmd;
uint16_t sdio;
uint16_t rsp[8];
@@ -64,16 +69,15 @@ struct omap_mmc_s {
int cdet_wakeup;
int cdet_enable;
- int cdet_state;
qemu_irq cdet;
-};
+} OMAPMMCState;
-static void omap_mmc_interrupts_update(struct omap_mmc_s *s)
+static void omap_mmc_interrupts_update(OMAPMMCState *s)
{
qemu_set_irq(s->irq, !!(s->status & s->mask));
}
-static void omap_mmc_fifolevel_update(struct omap_mmc_s *host)
+static void omap_mmc_fifolevel_update(OMAPMMCState *host)
{
if (!host->transfer && !host->fifo_len) {
host->status &= 0xf3ff;
@@ -83,37 +87,47 @@ static void omap_mmc_fifolevel_update(struct omap_mmc_s *host)
if (host->fifo_len > host->af_level && host->ddir) {
if (host->rx_dma) {
host->status &= 0xfbff;
- qemu_irq_raise(host->dma[1]);
+ qemu_irq_raise(host->dma_rx_gpio);
} else
host->status |= 0x0400;
} else {
host->status &= 0xfbff;
- qemu_irq_lower(host->dma[1]);
+ qemu_irq_lower(host->dma_rx_gpio);
}
if (host->fifo_len < host->ae_level && !host->ddir) {
if (host->tx_dma) {
host->status &= 0xf7ff;
- qemu_irq_raise(host->dma[0]);
+ qemu_irq_raise(host->dma_tx_gpio);
} else
host->status |= 0x0800;
} else {
- qemu_irq_lower(host->dma[0]);
+ qemu_irq_lower(host->dma_tx_gpio);
host->status &= 0xf7ff;
}
}
+/* These must match the encoding of the MMC_CMD Response field */
typedef enum {
- sd_nore = 0, /* no response */
- sd_r1, /* normal response command */
- sd_r2, /* CID, CSD registers */
- sd_r3, /* OCR register */
- sd_r6 = 6, /* Published RCA response */
+ sd_nore = 0, /* no response */
+ sd_r1, /* normal response command */
+ sd_r2, /* CID, CSD registers */
+ sd_r3, /* OCR register */
+ sd_r6 = 6, /* Published RCA response */
sd_r1b = -1,
} sd_rsp_type_t;
-static void omap_mmc_command(struct omap_mmc_s *host, int cmd, int dir,
- sd_cmd_type_t type, int busy, sd_rsp_type_t resptype, int init)
+/* These must match the encoding of the MMC_CMD Type field */
+typedef enum {
+ SD_TYPE_BC = 0, /* broadcast -- no response */
+ SD_TYPE_BCR = 1, /* broadcast with response */
+ SD_TYPE_AC = 2, /* addressed -- no data transfer */
+ SD_TYPE_ADTC = 3, /* addressed with data transfer */
+} MMCCmdType;
+
+static void omap_mmc_command(OMAPMMCState *host, int cmd, int dir,
+ MMCCmdType type, int busy,
+ sd_rsp_type_t resptype, int init)
{
uint32_t rspstatus, mask;
int rsplen, timeout;
@@ -128,7 +142,7 @@ static void omap_mmc_command(struct omap_mmc_s *host, int cmd, int dir,
if (resptype == sd_r1 && busy)
resptype = sd_r1b;
- if (type == sd_adtc) {
+ if (type == SD_TYPE_ADTC) {
host->fifo_start = 0;
host->fifo_len = 0;
host->transfer = 1;
@@ -143,7 +157,7 @@ static void omap_mmc_command(struct omap_mmc_s *host, int cmd, int dir,
request.arg = host->arg;
request.crc = 0; /* FIXME */
- rsplen = sd_do_command(host->card, &request, response);
+ rsplen = sdbus_do_command(&host->sdbus, &request, response);
/* TODO: validate CRCs */
switch (resptype) {
@@ -215,12 +229,12 @@ static void omap_mmc_command(struct omap_mmc_s *host, int cmd, int dir,
if (timeout)
host->status |= 0x0080;
else if (cmd == 12)
- host->status |= 0x0005; /* Makes it more real */
+ host->status |= 0x0005; /* Makes it more real */
else
host->status |= 0x0001;
}
-static void omap_mmc_transfer(struct omap_mmc_s *host)
+static void omap_mmc_transfer(OMAPMMCState *host)
{
uint8_t value;
@@ -232,10 +246,10 @@ static void omap_mmc_transfer(struct omap_mmc_s *host)
if (host->fifo_len > host->af_level)
break;
- value = sd_read_byte(host->card);
+ value = sdbus_read_byte(&host->sdbus);
host->fifo[(host->fifo_start + host->fifo_len) & 31] = value;
if (-- host->blen_counter) {
- value = sd_read_byte(host->card);
+ value = sdbus_read_byte(&host->sdbus);
host->fifo[(host->fifo_start + host->fifo_len) & 31] |=
value << 8;
host->blen_counter --;
@@ -247,10 +261,10 @@ static void omap_mmc_transfer(struct omap_mmc_s *host)
break;
value = host->fifo[host->fifo_start] & 0xff;
- sd_write_byte(host->card, value);
+ sdbus_write_byte(&host->sdbus, value);
if (-- host->blen_counter) {
value = host->fifo[host->fifo_start] >> 8;
- sd_write_byte(host->card, value);
+ sdbus_write_byte(&host->sdbus, value);
host->blen_counter --;
}
@@ -275,19 +289,19 @@ static void omap_mmc_transfer(struct omap_mmc_s *host)
static void omap_mmc_update(void *opaque)
{
- struct omap_mmc_s *s = opaque;
+ OMAPMMCState *s = opaque;
omap_mmc_transfer(s);
omap_mmc_fifolevel_update(s);
omap_mmc_interrupts_update(s);
}
-static void omap_mmc_pseudo_reset(struct omap_mmc_s *host)
+static void omap_mmc_pseudo_reset(OMAPMMCState *host)
{
host->status = 0;
host->fifo_len = 0;
}
-void omap_mmc_reset(struct omap_mmc_s *host)
+static void omap_mmc_reset(OMAPMMCState *host)
{
host->last_cmd = 0;
memset(host->rsp, 0, sizeof(host->rsp));
@@ -309,54 +323,47 @@ void omap_mmc_reset(struct omap_mmc_s *host)
host->transfer = 0;
host->cdet_wakeup = 0;
host->cdet_enable = 0;
- qemu_set_irq(host->coverswitch, host->cdet_state);
host->clkdiv = 0;
omap_mmc_pseudo_reset(host);
-
- /* Since we're still using the legacy SD API the card is not plugged
- * into any bus, and we must reset it manually. When omap_mmc is
- * QOMified this must move into the QOM reset function.
- */
- device_cold_reset(DEVICE(host->card));
}
static uint64_t omap_mmc_read(void *opaque, hwaddr offset, unsigned size)
{
uint16_t i;
- struct omap_mmc_s *s = opaque;
+ OMAPMMCState *s = opaque;
if (size != 2) {
return omap_badwidth_read16(opaque, offset);
}
switch (offset) {
- case 0x00: /* MMC_CMD */
+ case 0x00: /* MMC_CMD */
return s->last_cmd;
- case 0x04: /* MMC_ARGL */
+ case 0x04: /* MMC_ARGL */
return s->arg & 0x0000ffff;
- case 0x08: /* MMC_ARGH */
+ case 0x08: /* MMC_ARGH */
return s->arg >> 16;
- case 0x0c: /* MMC_CON */
+ case 0x0c: /* MMC_CON */
return (s->dw << 15) | (s->mode << 12) | (s->enable << 11) |
(s->be << 10) | s->clkdiv;
- case 0x10: /* MMC_STAT */
+ case 0x10: /* MMC_STAT */
return s->status;
- case 0x14: /* MMC_IE */
+ case 0x14: /* MMC_IE */
return s->mask;
- case 0x18: /* MMC_CTO */
+ case 0x18: /* MMC_CTO */
return s->cto;
- case 0x1c: /* MMC_DTO */
+ case 0x1c: /* MMC_DTO */
return s->dto;
- case 0x20: /* MMC_DATA */
+ case 0x20: /* MMC_DATA */
/* TODO: support 8-bit access */
i = s->fifo[s->fifo_start];
if (s->fifo_len == 0) {
@@ -371,42 +378,42 @@ static uint64_t omap_mmc_read(void *opaque, hwaddr offset, unsigned size)
omap_mmc_interrupts_update(s);
return i;
- case 0x24: /* MMC_BLEN */
+ case 0x24: /* MMC_BLEN */
return s->blen_counter;
- case 0x28: /* MMC_NBLK */
+ case 0x28: /* MMC_NBLK */
return s->nblk_counter;
- case 0x2c: /* MMC_BUF */
+ case 0x2c: /* MMC_BUF */
return (s->rx_dma << 15) | (s->af_level << 8) |
(s->tx_dma << 7) | s->ae_level;
- case 0x30: /* MMC_SPI */
+ case 0x30: /* MMC_SPI */
return 0x0000;
- case 0x34: /* MMC_SDIO */
+ case 0x34: /* MMC_SDIO */
return (s->cdet_wakeup << 2) | (s->cdet_enable) | s->sdio;
- case 0x38: /* MMC_SYST */
+ case 0x38: /* MMC_SYST */
return 0x0000;
- case 0x3c: /* MMC_REV */
+ case 0x3c: /* MMC_REV */
return s->rev;
- case 0x40: /* MMC_RSP0 */
- case 0x44: /* MMC_RSP1 */
- case 0x48: /* MMC_RSP2 */
- case 0x4c: /* MMC_RSP3 */
- case 0x50: /* MMC_RSP4 */
- case 0x54: /* MMC_RSP5 */
- case 0x58: /* MMC_RSP6 */
- case 0x5c: /* MMC_RSP7 */
+ case 0x40: /* MMC_RSP0 */
+ case 0x44: /* MMC_RSP1 */
+ case 0x48: /* MMC_RSP2 */
+ case 0x4c: /* MMC_RSP3 */
+ case 0x50: /* MMC_RSP4 */
+ case 0x54: /* MMC_RSP5 */
+ case 0x58: /* MMC_RSP6 */
+ case 0x5c: /* MMC_RSP7 */
return s->rsp[(offset - 0x40) >> 2];
/* OMAP2-specific */
- case 0x60: /* MMC_IOSR */
- case 0x64: /* MMC_SYSC */
+ case 0x60: /* MMC_IOSR */
+ case 0x64: /* MMC_SYSC */
return 0;
- case 0x68: /* MMC_SYSS */
- return 1; /* RSTD */
+ case 0x68: /* MMC_SYSS */
+ return 1; /* RSTD */
}
OMAP_BAD_REG(offset);
@@ -417,7 +424,7 @@ static void omap_mmc_write(void *opaque, hwaddr offset,
uint64_t value, unsigned size)
{
int i;
- struct omap_mmc_s *s = opaque;
+ OMAPMMCState *s = opaque;
if (size != 2) {
omap_badwidth_write16(opaque, offset, value);
@@ -425,7 +432,7 @@ static void omap_mmc_write(void *opaque, hwaddr offset,
}
switch (offset) {
- case 0x00: /* MMC_CMD */
+ case 0x00: /* MMC_CMD */
if (!s->enable)
break;
@@ -433,24 +440,24 @@ static void omap_mmc_write(void *opaque, hwaddr offset,
for (i = 0; i < 8; i ++)
s->rsp[i] = 0x0000;
omap_mmc_command(s, value & 63, (value >> 15) & 1,
- (sd_cmd_type_t) ((value >> 12) & 3),
- (value >> 11) & 1,
- (sd_rsp_type_t) ((value >> 8) & 7),
- (value >> 7) & 1);
+ (MMCCmdType)((value >> 12) & 3),
+ (value >> 11) & 1,
+ (sd_rsp_type_t) ((value >> 8) & 7),
+ (value >> 7) & 1);
omap_mmc_update(s);
break;
- case 0x04: /* MMC_ARGL */
+ case 0x04: /* MMC_ARGL */
s->arg &= 0xffff0000;
s->arg |= 0x0000ffff & value;
break;
- case 0x08: /* MMC_ARGH */
+ case 0x08: /* MMC_ARGH */
s->arg &= 0x0000ffff;
s->arg |= value << 16;
break;
- case 0x0c: /* MMC_CON */
+ case 0x0c: /* MMC_CON */
s->dw = (value >> 15) & 1;
s->mode = (value >> 12) & 3;
s->enable = (value >> 11) & 1;
@@ -470,27 +477,27 @@ static void omap_mmc_write(void *opaque, hwaddr offset,
omap_mmc_pseudo_reset(s);
break;
- case 0x10: /* MMC_STAT */
+ case 0x10: /* MMC_STAT */
s->status &= ~value;
omap_mmc_interrupts_update(s);
break;
- case 0x14: /* MMC_IE */
+ case 0x14: /* MMC_IE */
s->mask = value & 0x7fff;
omap_mmc_interrupts_update(s);
break;
- case 0x18: /* MMC_CTO */
+ case 0x18: /* MMC_CTO */
s->cto = value & 0xff;
if (s->cto > 0xfd && s->rev <= 1)
printf("MMC: CTO of 0xff and 0xfe cannot be used!\n");
break;
- case 0x1c: /* MMC_DTO */
+ case 0x1c: /* MMC_DTO */
s->dto = value & 0xffff;
break;
- case 0x20: /* MMC_DATA */
+ case 0x20: /* MMC_DATA */
/* TODO: support 8-bit access */
if (s->fifo_len == 32)
break;
@@ -501,18 +508,18 @@ static void omap_mmc_write(void *opaque, hwaddr offset,
omap_mmc_interrupts_update(s);
break;
- case 0x24: /* MMC_BLEN */
+ case 0x24: /* MMC_BLEN */
s->blen = (value & 0x07ff) + 1;
s->blen_counter = s->blen;
break;
- case 0x28: /* MMC_NBLK */
+ case 0x28: /* MMC_NBLK */
s->nblk = (value & 0x07ff) + 1;
s->nblk_counter = s->nblk;
s->blen_counter = s->blen;
break;
- case 0x2c: /* MMC_BUF */
+ case 0x2c: /* MMC_BUF */
s->rx_dma = (value >> 15) & 1;
s->af_level = (value >> 8) & 0x1f;
s->tx_dma = (value >> 7) & 1;
@@ -527,38 +534,38 @@ static void omap_mmc_write(void *opaque, hwaddr offset,
break;
/* SPI, SDIO and TEST modes unimplemented */
- case 0x30: /* MMC_SPI (OMAP1 only) */
+ case 0x30: /* MMC_SPI (OMAP1 only) */
break;
- case 0x34: /* MMC_SDIO */
+ case 0x34: /* MMC_SDIO */
s->sdio = value & (s->rev >= 2 ? 0xfbf3 : 0x2020);
s->cdet_wakeup = (value >> 9) & 1;
s->cdet_enable = (value >> 2) & 1;
break;
- case 0x38: /* MMC_SYST */
+ case 0x38: /* MMC_SYST */
break;
- case 0x3c: /* MMC_REV */
- case 0x40: /* MMC_RSP0 */
- case 0x44: /* MMC_RSP1 */
- case 0x48: /* MMC_RSP2 */
- case 0x4c: /* MMC_RSP3 */
- case 0x50: /* MMC_RSP4 */
- case 0x54: /* MMC_RSP5 */
- case 0x58: /* MMC_RSP6 */
- case 0x5c: /* MMC_RSP7 */
+ case 0x3c: /* MMC_REV */
+ case 0x40: /* MMC_RSP0 */
+ case 0x44: /* MMC_RSP1 */
+ case 0x48: /* MMC_RSP2 */
+ case 0x4c: /* MMC_RSP3 */
+ case 0x50: /* MMC_RSP4 */
+ case 0x54: /* MMC_RSP5 */
+ case 0x58: /* MMC_RSP6 */
+ case 0x5c: /* MMC_RSP7 */
OMAP_RO_REG(offset);
break;
/* OMAP2-specific */
- case 0x60: /* MMC_IOSR */
+ case 0x60: /* MMC_IOSR */
if (value & 0xf)
printf("MMC: SDIO bits used!\n");
break;
- case 0x64: /* MMC_SYSC */
- if (value & (1 << 2)) /* SRTS */
+ case 0x64: /* MMC_SYSC */
+ if (value & (1 << 2)) /* SRTS */
omap_mmc_reset(s);
break;
- case 0x68: /* MMC_SYSS */
+ case 0x68: /* MMC_SYSS */
OMAP_RO_REG(offset);
break;
@@ -573,92 +580,56 @@ static const MemoryRegionOps omap_mmc_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static void omap_mmc_cover_cb(void *opaque, int line, int level)
+void omap_mmc_set_clk(DeviceState *dev, omap_clk clk)
{
- struct omap_mmc_s *host = opaque;
+ OMAPMMCState *s = OMAP_MMC(dev);
- if (!host->cdet_state && level) {
- host->status |= 0x0002;
- omap_mmc_interrupts_update(host);
- if (host->cdet_wakeup) {
- /* TODO: Assert wake-up */
- }
- }
-
- if (host->cdet_state != level) {
- qemu_set_irq(host->coverswitch, level);
- host->cdet_state = level;
- }
+ s->clk = clk;
}
-struct omap_mmc_s *omap_mmc_init(hwaddr base,
- MemoryRegion *sysmem,
- BlockBackend *blk,
- qemu_irq irq, qemu_irq dma[], omap_clk clk)
+static void omap_mmc_reset_hold(Object *obj, ResetType type)
{
- struct omap_mmc_s *s = g_new0(struct omap_mmc_s, 1);
-
- s->irq = irq;
- s->dma = dma;
- s->clk = clk;
- s->lines = 1; /* TODO: needs to be settable per-board */
- s->rev = 1;
-
- memory_region_init_io(&s->iomem, NULL, &omap_mmc_ops, s, "omap.mmc", 0x800);
- memory_region_add_subregion(sysmem, base, &s->iomem);
-
- /* Instantiate the storage */
- s->card = sd_init(blk, false);
- if (s->card == NULL) {
- exit(1);
- }
+ OMAPMMCState *s = OMAP_MMC(obj);
omap_mmc_reset(s);
-
- return s;
}
-struct omap_mmc_s *omap2_mmc_init(struct omap_target_agent_s *ta,
- BlockBackend *blk, qemu_irq irq, qemu_irq dma[],
- omap_clk fclk, omap_clk iclk)
+static void omap_mmc_initfn(Object *obj)
{
- struct omap_mmc_s *s = g_new0(struct omap_mmc_s, 1);
-
- s->irq = irq;
- s->dma = dma;
- s->clk = fclk;
- s->lines = 4;
- s->rev = 2;
-
- memory_region_init_io(&s->iomem, NULL, &omap_mmc_ops, s, "omap.mmc",
- omap_l4_region_size(ta, 0));
- omap_l4_attach(ta, 0, &s->iomem);
-
- /* Instantiate the storage */
- s->card = sd_init(blk, false);
- if (s->card == NULL) {
- exit(1);
- }
+ OMAPMMCState *s = OMAP_MMC(obj);
- s->cdet = qemu_allocate_irq(omap_mmc_cover_cb, s, 0);
- sd_set_cb(s->card, NULL, s->cdet);
+ /* In theory these could be settable per-board */
+ s->lines = 1;
+ s->rev = 1;
- omap_mmc_reset(s);
+ memory_region_init_io(&s->iomem, obj, &omap_mmc_ops, s, "omap.mmc", 0x800);
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
- return s;
+ sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq);
+ qdev_init_gpio_out_named(DEVICE(obj), &s->dma_tx_gpio, "dma-tx", 1);
+ qdev_init_gpio_out_named(DEVICE(obj), &s->dma_rx_gpio, "dma-rx", 1);
+
+ qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SD_BUS, DEVICE(obj), "sd-bus");
}
-void omap_mmc_handlers(struct omap_mmc_s *s, qemu_irq ro, qemu_irq cover)
+static void omap_mmc_class_init(ObjectClass *oc, const void *data)
{
- if (s->cdet) {
- sd_set_cb(s->card, ro, s->cdet);
- s->coverswitch = cover;
- qemu_set_irq(cover, s->cdet_state);
- } else
- sd_set_cb(s->card, ro, cover);
+ ResettableClass *rc = RESETTABLE_CLASS(oc);
+
+ rc->phases.hold = omap_mmc_reset_hold;
}
-void omap_mmc_enable(struct omap_mmc_s *s, int enable)
+static const TypeInfo omap_mmc_info = {
+ .name = TYPE_OMAP_MMC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(OMAPMMCState),
+ .instance_init = omap_mmc_initfn,
+ .class_init = omap_mmc_class_init,
+};
+
+static void omap_mmc_register_types(void)
{
- sd_enable(s->card, enable);
+ type_register_static(&omap_mmc_info);
}
+
+type_init(omap_mmc_register_types)
diff --git a/hw/sd/pl181.c b/hw/sd/pl181.c
index e3633c2..b8fc9f8 100644
--- a/hw/sd/pl181.c
+++ b/hw/sd/pl181.c
@@ -8,7 +8,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/blockdev.h"
+#include "system/blockdev.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
#include "hw/irq.h"
@@ -509,17 +509,17 @@ static void pl181_init(Object *obj)
qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_PL181_BUS, dev, "sd-bus");
}
-static void pl181_class_init(ObjectClass *klass, void *data)
+static void pl181_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
k->vmsd = &vmstate_pl181;
- k->reset = pl181_reset;
+ device_class_set_legacy_reset(k, pl181_reset);
/* Reason: output IRQs should be wired up */
k->user_creatable = false;
}
-static void pl181_bus_class_init(ObjectClass *klass, void *data)
+static void pl181_bus_class_init(ObjectClass *klass, const void *data)
{
SDBusClass *sbc = SD_BUS_CLASS(klass);
diff --git a/hw/sd/pxa2xx_mmci.c b/hw/sd/pxa2xx_mmci.c
deleted file mode 100644
index 8252970..0000000
--- a/hw/sd/pxa2xx_mmci.c
+++ /dev/null
@@ -1,594 +0,0 @@
-/*
- * Intel XScale PXA255/270 MultiMediaCard/SD/SDIO Controller emulation.
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GPLv2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "hw/irq.h"
-#include "hw/sysbus.h"
-#include "migration/vmstate.h"
-#include "hw/arm/pxa.h"
-#include "hw/sd/sd.h"
-#include "hw/qdev-properties.h"
-#include "qemu/log.h"
-#include "qemu/module.h"
-#include "trace.h"
-#include "qom/object.h"
-
-#define TYPE_PXA2XX_MMCI_BUS "pxa2xx-mmci-bus"
-/* This is reusing the SDBus typedef from SD_BUS */
-DECLARE_INSTANCE_CHECKER(SDBus, PXA2XX_MMCI_BUS,
- TYPE_PXA2XX_MMCI_BUS)
-
-struct PXA2xxMMCIState {
- SysBusDevice parent_obj;
-
- MemoryRegion iomem;
- qemu_irq irq;
- qemu_irq rx_dma;
- qemu_irq tx_dma;
- qemu_irq inserted;
- qemu_irq readonly;
-
- BlockBackend *blk;
- SDBus sdbus;
-
- uint32_t status;
- uint32_t clkrt;
- uint32_t spi;
- uint32_t cmdat;
- uint32_t resp_tout;
- uint32_t read_tout;
- int32_t blklen;
- int32_t numblk;
- uint32_t intmask;
- uint32_t intreq;
- int32_t cmd;
- uint32_t arg;
-
- int32_t active;
- int32_t bytesleft;
- uint8_t tx_fifo[64];
- uint32_t tx_start;
- uint32_t tx_len;
- uint8_t rx_fifo[32];
- uint32_t rx_start;
- uint32_t rx_len;
- uint16_t resp_fifo[9];
- uint32_t resp_len;
-
- int32_t cmdreq;
-};
-
-static bool pxa2xx_mmci_vmstate_validate(void *opaque, int version_id)
-{
- PXA2xxMMCIState *s = opaque;
-
- return s->tx_start < ARRAY_SIZE(s->tx_fifo)
- && s->rx_start < ARRAY_SIZE(s->rx_fifo)
- && s->tx_len <= ARRAY_SIZE(s->tx_fifo)
- && s->rx_len <= ARRAY_SIZE(s->rx_fifo)
- && s->resp_len <= ARRAY_SIZE(s->resp_fifo);
-}
-
-
-static const VMStateDescription vmstate_pxa2xx_mmci = {
- .name = "pxa2xx-mmci",
- .version_id = 2,
- .minimum_version_id = 2,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32(status, PXA2xxMMCIState),
- VMSTATE_UINT32(clkrt, PXA2xxMMCIState),
- VMSTATE_UINT32(spi, PXA2xxMMCIState),
- VMSTATE_UINT32(cmdat, PXA2xxMMCIState),
- VMSTATE_UINT32(resp_tout, PXA2xxMMCIState),
- VMSTATE_UINT32(read_tout, PXA2xxMMCIState),
- VMSTATE_INT32(blklen, PXA2xxMMCIState),
- VMSTATE_INT32(numblk, PXA2xxMMCIState),
- VMSTATE_UINT32(intmask, PXA2xxMMCIState),
- VMSTATE_UINT32(intreq, PXA2xxMMCIState),
- VMSTATE_INT32(cmd, PXA2xxMMCIState),
- VMSTATE_UINT32(arg, PXA2xxMMCIState),
- VMSTATE_INT32(cmdreq, PXA2xxMMCIState),
- VMSTATE_INT32(active, PXA2xxMMCIState),
- VMSTATE_INT32(bytesleft, PXA2xxMMCIState),
- VMSTATE_UINT32(tx_start, PXA2xxMMCIState),
- VMSTATE_UINT32(tx_len, PXA2xxMMCIState),
- VMSTATE_UINT32(rx_start, PXA2xxMMCIState),
- VMSTATE_UINT32(rx_len, PXA2xxMMCIState),
- VMSTATE_UINT32(resp_len, PXA2xxMMCIState),
- VMSTATE_VALIDATE("fifo size incorrect", pxa2xx_mmci_vmstate_validate),
- VMSTATE_UINT8_ARRAY(tx_fifo, PXA2xxMMCIState, 64),
- VMSTATE_UINT8_ARRAY(rx_fifo, PXA2xxMMCIState, 32),
- VMSTATE_UINT16_ARRAY(resp_fifo, PXA2xxMMCIState, 9),
- VMSTATE_END_OF_LIST()
- }
-};
-
-#define MMC_STRPCL 0x00 /* MMC Clock Start/Stop register */
-#define MMC_STAT 0x04 /* MMC Status register */
-#define MMC_CLKRT 0x08 /* MMC Clock Rate register */
-#define MMC_SPI 0x0c /* MMC SPI Mode register */
-#define MMC_CMDAT 0x10 /* MMC Command/Data register */
-#define MMC_RESTO 0x14 /* MMC Response Time-Out register */
-#define MMC_RDTO 0x18 /* MMC Read Time-Out register */
-#define MMC_BLKLEN 0x1c /* MMC Block Length register */
-#define MMC_NUMBLK 0x20 /* MMC Number of Blocks register */
-#define MMC_PRTBUF 0x24 /* MMC Buffer Partly Full register */
-#define MMC_I_MASK 0x28 /* MMC Interrupt Mask register */
-#define MMC_I_REG 0x2c /* MMC Interrupt Request register */
-#define MMC_CMD 0x30 /* MMC Command register */
-#define MMC_ARGH 0x34 /* MMC Argument High register */
-#define MMC_ARGL 0x38 /* MMC Argument Low register */
-#define MMC_RES 0x3c /* MMC Response FIFO */
-#define MMC_RXFIFO 0x40 /* MMC Receive FIFO */
-#define MMC_TXFIFO 0x44 /* MMC Transmit FIFO */
-#define MMC_RDWAIT 0x48 /* MMC RD_WAIT register */
-#define MMC_BLKS_REM 0x4c /* MMC Blocks Remaining register */
-
-/* Bitfield masks */
-#define STRPCL_STOP_CLK (1 << 0)
-#define STRPCL_STRT_CLK (1 << 1)
-#define STAT_TOUT_RES (1 << 1)
-#define STAT_CLK_EN (1 << 8)
-#define STAT_DATA_DONE (1 << 11)
-#define STAT_PRG_DONE (1 << 12)
-#define STAT_END_CMDRES (1 << 13)
-#define SPI_SPI_MODE (1 << 0)
-#define CMDAT_RES_TYPE (3 << 0)
-#define CMDAT_DATA_EN (1 << 2)
-#define CMDAT_WR_RD (1 << 3)
-#define CMDAT_DMA_EN (1 << 7)
-#define CMDAT_STOP_TRAN (1 << 10)
-#define INT_DATA_DONE (1 << 0)
-#define INT_PRG_DONE (1 << 1)
-#define INT_END_CMD (1 << 2)
-#define INT_STOP_CMD (1 << 3)
-#define INT_CLK_OFF (1 << 4)
-#define INT_RXFIFO_REQ (1 << 5)
-#define INT_TXFIFO_REQ (1 << 6)
-#define INT_TINT (1 << 7)
-#define INT_DAT_ERR (1 << 8)
-#define INT_RES_ERR (1 << 9)
-#define INT_RD_STALLED (1 << 10)
-#define INT_SDIO_INT (1 << 11)
-#define INT_SDIO_SACK (1 << 12)
-#define PRTBUF_PRT_BUF (1 << 0)
-
-/* Route internal interrupt lines to the global IC and DMA */
-static void pxa2xx_mmci_int_update(PXA2xxMMCIState *s)
-{
- uint32_t mask = s->intmask;
- if (s->cmdat & CMDAT_DMA_EN) {
- mask |= INT_RXFIFO_REQ | INT_TXFIFO_REQ;
-
- qemu_set_irq(s->rx_dma, !!(s->intreq & INT_RXFIFO_REQ));
- qemu_set_irq(s->tx_dma, !!(s->intreq & INT_TXFIFO_REQ));
- }
-
- qemu_set_irq(s->irq, !!(s->intreq & ~mask));
-}
-
-static void pxa2xx_mmci_fifo_update(PXA2xxMMCIState *s)
-{
- if (!s->active)
- return;
-
- if (s->cmdat & CMDAT_WR_RD) {
- while (s->bytesleft && s->tx_len) {
- sdbus_write_byte(&s->sdbus, s->tx_fifo[s->tx_start++]);
- s->tx_start &= 0x1f;
- s->tx_len --;
- s->bytesleft --;
- }
- if (s->bytesleft)
- s->intreq |= INT_TXFIFO_REQ;
- } else
- while (s->bytesleft && s->rx_len < 32) {
- s->rx_fifo[(s->rx_start + (s->rx_len ++)) & 0x1f] =
- sdbus_read_byte(&s->sdbus);
- s->bytesleft --;
- s->intreq |= INT_RXFIFO_REQ;
- }
-
- if (!s->bytesleft) {
- s->active = 0;
- s->intreq |= INT_DATA_DONE;
- s->status |= STAT_DATA_DONE;
-
- if (s->cmdat & CMDAT_WR_RD) {
- s->intreq |= INT_PRG_DONE;
- s->status |= STAT_PRG_DONE;
- }
- }
-
- pxa2xx_mmci_int_update(s);
-}
-
-static void pxa2xx_mmci_wakequeues(PXA2xxMMCIState *s)
-{
- int rsplen, i;
- SDRequest request;
- uint8_t response[16];
-
- s->active = 1;
- s->rx_len = 0;
- s->tx_len = 0;
- s->cmdreq = 0;
-
- request.cmd = s->cmd;
- request.arg = s->arg;
- request.crc = 0; /* FIXME */
-
- rsplen = sdbus_do_command(&s->sdbus, &request, response);
- s->intreq |= INT_END_CMD;
-
- memset(s->resp_fifo, 0, sizeof(s->resp_fifo));
- switch (s->cmdat & CMDAT_RES_TYPE) {
-#define PXAMMCI_RESP(wd, value0, value1) \
- s->resp_fifo[(wd) + 0] |= (value0); \
- s->resp_fifo[(wd) + 1] |= (value1) << 8;
- case 0: /* No response */
- goto complete;
-
- case 1: /* R1, R4, R5 or R6 */
- if (rsplen < 4)
- goto timeout;
- goto complete;
-
- case 2: /* R2 */
- if (rsplen < 16)
- goto timeout;
- goto complete;
-
- case 3: /* R3 */
- if (rsplen < 4)
- goto timeout;
- goto complete;
-
- complete:
- for (i = 0; rsplen > 0; i ++, rsplen -= 2) {
- PXAMMCI_RESP(i, response[i * 2], response[i * 2 + 1]);
- }
- s->status |= STAT_END_CMDRES;
-
- if (!(s->cmdat & CMDAT_DATA_EN))
- s->active = 0;
- else
- s->bytesleft = s->numblk * s->blklen;
-
- s->resp_len = 0;
- break;
-
- timeout:
- s->active = 0;
- s->status |= STAT_TOUT_RES;
- break;
- }
-
- pxa2xx_mmci_fifo_update(s);
-}
-
-static uint64_t pxa2xx_mmci_read(void *opaque, hwaddr offset, unsigned size)
-{
- PXA2xxMMCIState *s = (PXA2xxMMCIState *) opaque;
- uint32_t ret = 0;
-
- switch (offset) {
- case MMC_STRPCL:
- break;
- case MMC_STAT:
- ret = s->status;
- break;
- case MMC_CLKRT:
- ret = s->clkrt;
- break;
- case MMC_SPI:
- ret = s->spi;
- break;
- case MMC_CMDAT:
- ret = s->cmdat;
- break;
- case MMC_RESTO:
- ret = s->resp_tout;
- break;
- case MMC_RDTO:
- ret = s->read_tout;
- break;
- case MMC_BLKLEN:
- ret = s->blklen;
- break;
- case MMC_NUMBLK:
- ret = s->numblk;
- break;
- case MMC_PRTBUF:
- break;
- case MMC_I_MASK:
- ret = s->intmask;
- break;
- case MMC_I_REG:
- ret = s->intreq;
- break;
- case MMC_CMD:
- ret = s->cmd | 0x40;
- break;
- case MMC_ARGH:
- ret = s->arg >> 16;
- break;
- case MMC_ARGL:
- ret = s->arg & 0xffff;
- break;
- case MMC_RES:
- ret = (s->resp_len < 9) ? s->resp_fifo[s->resp_len++] : 0;
- break;
- case MMC_RXFIFO:
- while (size-- && s->rx_len) {
- ret |= s->rx_fifo[s->rx_start++] << (size << 3);
- s->rx_start &= 0x1f;
- s->rx_len --;
- }
- s->intreq &= ~INT_RXFIFO_REQ;
- pxa2xx_mmci_fifo_update(s);
- break;
- case MMC_RDWAIT:
- break;
- case MMC_BLKS_REM:
- ret = s->numblk;
- break;
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: incorrect register 0x%02" HWADDR_PRIx "\n",
- __func__, offset);
- }
- trace_pxa2xx_mmci_read(size, offset, ret);
-
- return ret;
-}
-
-static void pxa2xx_mmci_write(void *opaque,
- hwaddr offset, uint64_t value, unsigned size)
-{
- PXA2xxMMCIState *s = (PXA2xxMMCIState *) opaque;
-
- trace_pxa2xx_mmci_write(size, offset, value);
- switch (offset) {
- case MMC_STRPCL:
- if (value & STRPCL_STRT_CLK) {
- s->status |= STAT_CLK_EN;
- s->intreq &= ~INT_CLK_OFF;
-
- if (s->cmdreq && !(s->cmdat & CMDAT_STOP_TRAN)) {
- s->status &= STAT_CLK_EN;
- pxa2xx_mmci_wakequeues(s);
- }
- }
-
- if (value & STRPCL_STOP_CLK) {
- s->status &= ~STAT_CLK_EN;
- s->intreq |= INT_CLK_OFF;
- s->active = 0;
- }
-
- pxa2xx_mmci_int_update(s);
- break;
-
- case MMC_CLKRT:
- s->clkrt = value & 7;
- break;
-
- case MMC_SPI:
- s->spi = value & 0xf;
- if (value & SPI_SPI_MODE) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: attempted to use card in SPI mode\n", __func__);
- }
- break;
-
- case MMC_CMDAT:
- s->cmdat = value & 0x3dff;
- s->active = 0;
- s->cmdreq = 1;
- if (!(value & CMDAT_STOP_TRAN)) {
- s->status &= STAT_CLK_EN;
-
- if (s->status & STAT_CLK_EN)
- pxa2xx_mmci_wakequeues(s);
- }
-
- pxa2xx_mmci_int_update(s);
- break;
-
- case MMC_RESTO:
- s->resp_tout = value & 0x7f;
- break;
-
- case MMC_RDTO:
- s->read_tout = value & 0xffff;
- break;
-
- case MMC_BLKLEN:
- s->blklen = value & 0xfff;
- break;
-
- case MMC_NUMBLK:
- s->numblk = value & 0xffff;
- break;
-
- case MMC_PRTBUF:
- if (value & PRTBUF_PRT_BUF) {
- s->tx_start ^= 32;
- s->tx_len = 0;
- }
- pxa2xx_mmci_fifo_update(s);
- break;
-
- case MMC_I_MASK:
- s->intmask = value & 0x1fff;
- pxa2xx_mmci_int_update(s);
- break;
-
- case MMC_CMD:
- s->cmd = value & 0x3f;
- break;
-
- case MMC_ARGH:
- s->arg &= 0x0000ffff;
- s->arg |= value << 16;
- break;
-
- case MMC_ARGL:
- s->arg &= 0xffff0000;
- s->arg |= value & 0x0000ffff;
- break;
-
- case MMC_TXFIFO:
- while (size-- && s->tx_len < 0x20)
- s->tx_fifo[(s->tx_start + (s->tx_len ++)) & 0x1f] =
- (value >> (size << 3)) & 0xff;
- s->intreq &= ~INT_TXFIFO_REQ;
- pxa2xx_mmci_fifo_update(s);
- break;
-
- case MMC_RDWAIT:
- case MMC_BLKS_REM:
- break;
-
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: incorrect reg 0x%02" HWADDR_PRIx " "
- "(value 0x%08" PRIx64 ")\n", __func__, offset, value);
- }
-}
-
-static const MemoryRegionOps pxa2xx_mmci_ops = {
- .read = pxa2xx_mmci_read,
- .write = pxa2xx_mmci_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-PXA2xxMMCIState *pxa2xx_mmci_init(MemoryRegion *sysmem,
- hwaddr base,
- qemu_irq irq, qemu_irq rx_dma, qemu_irq tx_dma)
-{
- DeviceState *dev;
-
- dev = sysbus_create_simple(TYPE_PXA2XX_MMCI, base, irq);
- qdev_connect_gpio_out_named(dev, "rx-dma", 0, rx_dma);
- qdev_connect_gpio_out_named(dev, "tx-dma", 0, tx_dma);
-
- return PXA2XX_MMCI(dev);
-}
-
-static void pxa2xx_mmci_set_inserted(DeviceState *dev, bool inserted)
-{
- PXA2xxMMCIState *s = PXA2XX_MMCI(dev);
-
- qemu_set_irq(s->inserted, inserted);
-}
-
-static void pxa2xx_mmci_set_readonly(DeviceState *dev, bool readonly)
-{
- PXA2xxMMCIState *s = PXA2XX_MMCI(dev);
-
- qemu_set_irq(s->readonly, readonly);
-}
-
-void pxa2xx_mmci_handlers(PXA2xxMMCIState *s, qemu_irq readonly,
- qemu_irq coverswitch)
-{
- DeviceState *dev = DEVICE(s);
-
- s->readonly = readonly;
- s->inserted = coverswitch;
-
- pxa2xx_mmci_set_inserted(dev, sdbus_get_inserted(&s->sdbus));
- pxa2xx_mmci_set_readonly(dev, sdbus_get_readonly(&s->sdbus));
-}
-
-static void pxa2xx_mmci_reset(DeviceState *d)
-{
- PXA2xxMMCIState *s = PXA2XX_MMCI(d);
-
- s->status = 0;
- s->clkrt = 0;
- s->spi = 0;
- s->cmdat = 0;
- s->resp_tout = 0;
- s->read_tout = 0;
- s->blklen = 0;
- s->numblk = 0;
- s->intmask = 0;
- s->intreq = 0;
- s->cmd = 0;
- s->arg = 0;
- s->active = 0;
- s->bytesleft = 0;
- s->tx_start = 0;
- s->tx_len = 0;
- s->rx_start = 0;
- s->rx_len = 0;
- s->resp_len = 0;
- s->cmdreq = 0;
- memset(s->tx_fifo, 0, sizeof(s->tx_fifo));
- memset(s->rx_fifo, 0, sizeof(s->rx_fifo));
- memset(s->resp_fifo, 0, sizeof(s->resp_fifo));
-}
-
-static void pxa2xx_mmci_instance_init(Object *obj)
-{
- PXA2xxMMCIState *s = PXA2XX_MMCI(obj);
- SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- DeviceState *dev = DEVICE(obj);
-
- memory_region_init_io(&s->iomem, obj, &pxa2xx_mmci_ops, s,
- "pxa2xx-mmci", 0x00100000);
- sysbus_init_mmio(sbd, &s->iomem);
- sysbus_init_irq(sbd, &s->irq);
- qdev_init_gpio_out_named(dev, &s->rx_dma, "rx-dma", 1);
- qdev_init_gpio_out_named(dev, &s->tx_dma, "tx-dma", 1);
-
- qbus_init(&s->sdbus, sizeof(s->sdbus),
- TYPE_PXA2XX_MMCI_BUS, DEVICE(obj), "sd-bus");
-}
-
-static void pxa2xx_mmci_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->vmsd = &vmstate_pxa2xx_mmci;
- dc->reset = pxa2xx_mmci_reset;
-}
-
-static void pxa2xx_mmci_bus_class_init(ObjectClass *klass, void *data)
-{
- SDBusClass *sbc = SD_BUS_CLASS(klass);
-
- sbc->set_inserted = pxa2xx_mmci_set_inserted;
- sbc->set_readonly = pxa2xx_mmci_set_readonly;
-}
-
-static const TypeInfo pxa2xx_mmci_types[] = {
- {
- .name = TYPE_PXA2XX_MMCI,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(PXA2xxMMCIState),
- .instance_init = pxa2xx_mmci_instance_init,
- .class_init = pxa2xx_mmci_class_init,
- },
- {
- .name = TYPE_PXA2XX_MMCI_BUS,
- .parent = TYPE_SD_BUS,
- .instance_size = sizeof(SDBus),
- .class_init = pxa2xx_mmci_bus_class_init,
- },
-};
-
-DEFINE_TYPES(pxa2xx_mmci_types)
diff --git a/hw/sd/sd.c b/hw/sd/sd.c
index 07cb97d..c275fdd 100644
--- a/hw/sd/sd.c
+++ b/hw/sd/sd.c
@@ -37,9 +37,8 @@
#include "qemu/cutils.h"
#include "hw/irq.h"
#include "hw/registerfields.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "hw/sd/sd.h"
-#include "hw/sd/sdcard_legacy.h"
#include "migration/vmstate.h"
#include "qapi/error.h"
#include "qemu/bitmap.h"
@@ -71,6 +70,14 @@ typedef enum {
sd_illegal = -2,
} sd_rsp_type_t;
+typedef enum {
+ sd_spi,
+ sd_bc, /* broadcast -- no response */
+ sd_bcr, /* broadcast with response */
+ sd_ac, /* addressed -- no data transfer */
+ sd_adtc, /* addressed with data transfer */
+} sd_cmd_type_t;
+
enum SDCardModes {
sd_inactive,
sd_card_identification_mode,
@@ -112,10 +119,6 @@ typedef struct SDProto {
struct SDState {
DeviceState parent_obj;
- /* If true, created by sd_init() for a non-qdevified caller */
- /* TODO purge them with fire */
- bool me_no_qdev_me_kill_mammoth_with_rocks;
-
/* SD Memory Card Registers */
uint32_t ocr;
uint8_t scr[8];
@@ -169,10 +172,7 @@ struct SDState {
uint32_t data_offset;
size_t data_size;
uint8_t data[512];
- qemu_irq readonly_cb;
- qemu_irq inserted_cb;
QEMUTimer *ocr_power_timer;
- bool enable;
uint8_t dat_lines;
bool cmd_line;
};
@@ -291,12 +291,12 @@ static const char *sd_acmd_name(SDState *sd, uint8_t cmd)
static uint8_t sd_get_dat_lines(SDState *sd)
{
- return sd->enable ? sd->dat_lines : 0;
+ return sd->dat_lines;
}
static bool sd_get_cmd_line(SDState *sd)
{
- return sd->enable ? sd->cmd_line : false;
+ return sd->cmd_line;
}
static void sd_set_voltage(SDState *sd, uint16_t millivolts)
@@ -774,19 +774,12 @@ static uint32_t sd_blk_len(SDState *sd)
*/
static uint32_t sd_bootpart_offset(SDState *sd)
{
- bool partitions_enabled;
unsigned partition_access;
if (!sd->boot_part_size || !sd_is_emmc(sd)) {
return 0;
}
- partitions_enabled = sd->ext_csd[EXT_CSD_PART_CONFIG]
- & EXT_CSD_PART_CONFIG_EN_MASK;
- if (!partitions_enabled) {
- return 0;
- }
-
partition_access = sd->ext_csd[EXT_CSD_PART_CONFIG]
& EXT_CSD_PART_CONFIG_ACC_MASK;
switch (partition_access) {
@@ -833,7 +826,9 @@ static void sd_reset(DeviceState *dev)
sect = 0;
}
size = sect << HWBLOCK_SHIFT;
- size -= sd_bootpart_offset(sd);
+ if (sd_is_emmc(sd)) {
+ size -= sd->boot_part_size * 2;
+ }
sect = sd_addr_to_wpnum(size) + 1;
@@ -889,17 +884,10 @@ static void sd_cardchange(void *opaque, bool load, Error **errp)
trace_sdcard_ejected();
}
- if (sd->me_no_qdev_me_kill_mammoth_with_rocks) {
- qemu_set_irq(sd->inserted_cb, inserted);
- if (inserted) {
- qemu_set_irq(sd->readonly_cb, readonly);
- }
- } else {
- sdbus = SD_BUS(qdev_get_parent_bus(dev));
- sdbus_set_inserted(sdbus, inserted);
- if (inserted) {
- sdbus_set_readonly(sdbus, readonly);
- }
+ sdbus = SD_BUS(qdev_get_parent_bus(dev));
+ sdbus_set_inserted(sdbus, inserted);
+ if (inserted) {
+ sdbus_set_readonly(sdbus, readonly);
}
}
@@ -987,7 +975,7 @@ static const VMStateDescription sd_vmstate = {
VMSTATE_UINT32(data_offset, SDState),
VMSTATE_UINT8_ARRAY(data, SDState, 512),
VMSTATE_UNUSED_V(1, 512),
- VMSTATE_BOOL(enable, SDState),
+ VMSTATE_UNUSED(1),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * const []) {
@@ -997,48 +985,6 @@ static const VMStateDescription sd_vmstate = {
},
};
-/* Legacy initialization function for use by non-qdevified callers */
-SDState *sd_init(BlockBackend *blk, bool is_spi)
-{
- Object *obj;
- DeviceState *dev;
- SDState *sd;
- Error *err = NULL;
-
- obj = object_new(is_spi ? TYPE_SD_CARD_SPI : TYPE_SD_CARD);
- dev = DEVICE(obj);
- if (!qdev_prop_set_drive_err(dev, "drive", blk, &err)) {
- error_reportf_err(err, "sd_init failed: ");
- return NULL;
- }
-
- /*
- * Realizing the device properly would put it into the QOM
- * composition tree even though it is not plugged into an
- * appropriate bus. That's a no-no. Hide the device from
- * QOM/qdev, and call its qdev realize callback directly.
- */
- object_ref(obj);
- object_unparent(obj);
- sd_realize(dev, &err);
- if (err) {
- error_reportf_err(err, "sd_init failed: ");
- return NULL;
- }
-
- sd = SD_CARD(dev);
- sd->me_no_qdev_me_kill_mammoth_with_rocks = true;
- return sd;
-}
-
-void sd_set_cb(SDState *sd, qemu_irq readonly, qemu_irq insert)
-{
- sd->readonly_cb = readonly;
- sd->inserted_cb = insert;
- qemu_set_irq(readonly, sd->blk ? !blk_is_writable(sd->blk) : 0);
- qemu_set_irq(insert, sd->blk ? blk_is_inserted(sd->blk) : 0);
-}
-
static void sd_blk_read(SDState *sd, uint64_t addr, uint32_t len)
{
trace_sdcard_read_block(addr, len);
@@ -2193,13 +2139,13 @@ static bool cmd_valid_while_locked(SDState *sd, unsigned cmd)
return cmd_class == 0 || cmd_class == 7;
}
-int sd_do_command(SDState *sd, SDRequest *req,
- uint8_t *response) {
+static int sd_do_command(SDState *sd, SDRequest *req,
+ uint8_t *response) {
int last_state;
sd_rsp_type_t rtype;
int rsplen;
- if (!sd->blk || !blk_is_inserted(sd->blk) || !sd->enable) {
+ if (!sd->blk || !blk_is_inserted(sd->blk)) {
return 0;
}
@@ -2346,12 +2292,13 @@ static bool sd_generic_read_byte(SDState *sd, uint8_t *value)
return false;
}
-void sd_write_byte(SDState *sd, uint8_t value)
+static void sd_write_byte(SDState *sd, uint8_t value)
{
int i;
- if (!sd->blk || !blk_is_inserted(sd->blk) || !sd->enable)
+ if (!sd->blk || !blk_is_inserted(sd->blk)) {
return;
+ }
if (sd->state != sd_receivingdata_state) {
qemu_log_mask(LOG_GUEST_ERROR,
@@ -2475,23 +2422,26 @@ void sd_write_byte(SDState *sd, uint8_t value)
}
}
-uint8_t sd_read_byte(SDState *sd)
+static uint8_t sd_read_byte(SDState *sd)
{
/* TODO: Append CRCs */
+ const uint8_t dummy_byte = 0x00;
uint8_t ret;
uint32_t io_len;
- if (!sd->blk || !blk_is_inserted(sd->blk) || !sd->enable)
- return 0x00;
+ if (!sd->blk || !blk_is_inserted(sd->blk)) {
+ return dummy_byte;
+ }
if (sd->state != sd_sendingdata_state) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: not in Sending-Data state\n", __func__);
- return 0x00;
+ return dummy_byte;
}
- if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION))
- return 0x00;
+ if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION)) {
+ return dummy_byte;
+ }
io_len = sd_blk_len(sd);
@@ -2517,7 +2467,7 @@ uint8_t sd_read_byte(SDState *sd)
if (sd->data_offset == 0) {
if (!address_in_range(sd, "READ_MULTIPLE_BLOCK",
sd->data_start, io_len)) {
- return 0x00;
+ return dummy_byte;
}
sd_blk_read(sd, sd->data_start, io_len);
}
@@ -2538,7 +2488,9 @@ uint8_t sd_read_byte(SDState *sd)
break;
default:
- g_assert_not_reached();
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: DAT read illegal for command %s\n",
+ __func__, sd->last_cmd_name);
+ return dummy_byte;
}
return ret;
@@ -2554,11 +2506,6 @@ static bool sd_data_ready(SDState *sd)
return sd->state == sd_sendingdata_state;
}
-void sd_enable(SDState *sd, bool enable)
-{
- sd->enable = enable;
-}
-
static const SDProto sd_proto_spi = {
.name = "SPI",
.cmd = {
@@ -2718,7 +2665,6 @@ static void sd_instance_init(Object *obj)
sd->proto = sc->proto;
sd->last_cmd_name = "UNSET";
- sd->enable = true;
sd->ocr_power_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sd_ocr_powerup, sd);
}
@@ -2791,31 +2737,28 @@ static void emmc_realize(DeviceState *dev, Error **errp)
sd_realize(dev, errp);
}
-static Property sdmmc_common_properties[] = {
+static const Property sdmmc_common_properties[] = {
DEFINE_PROP_DRIVE("drive", SDState, blk),
- DEFINE_PROP_END_OF_LIST()
};
-static Property sd_properties[] = {
+static const Property sd_properties[] = {
DEFINE_PROP_UINT8("spec_version", SDState,
spec_version, SD_PHY_SPECv3_01_VERS),
- DEFINE_PROP_END_OF_LIST()
};
-static Property emmc_properties[] = {
+static const Property emmc_properties[] = {
DEFINE_PROP_UINT64("boot-partition-size", SDState, boot_part_size, 0),
DEFINE_PROP_UINT8("boot-config", SDState, boot_config, 0x0),
- DEFINE_PROP_END_OF_LIST()
};
-static void sdmmc_common_class_init(ObjectClass *klass, void *data)
+static void sdmmc_common_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SDCardClass *sc = SDMMC_COMMON_CLASS(klass);
device_class_set_props(dc, sdmmc_common_properties);
dc->vmsd = &sd_vmstate;
- dc->reset = sd_reset;
+ device_class_set_legacy_reset(dc, sd_reset);
dc->bus_type = TYPE_SD_BUS;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
@@ -2827,12 +2770,11 @@ static void sdmmc_common_class_init(ObjectClass *klass, void *data)
sc->read_byte = sd_read_byte;
sc->receive_ready = sd_receive_ready;
sc->data_ready = sd_data_ready;
- sc->enable = sd_enable;
sc->get_inserted = sd_get_inserted;
sc->get_readonly = sd_get_readonly;
}
-static void sd_class_init(ObjectClass *klass, void *data)
+static void sd_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SDCardClass *sc = SDMMC_COMMON_CLASS(klass);
@@ -2851,7 +2793,7 @@ static void sd_class_init(ObjectClass *klass, void *data)
* board to ensure that ssi transfers only occur when the chip select
* is asserted.
*/
-static void sd_spi_class_init(ObjectClass *klass, void *data)
+static void sd_spi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SDCardClass *sc = SDMMC_COMMON_CLASS(klass);
@@ -2860,7 +2802,7 @@ static void sd_spi_class_init(ObjectClass *klass, void *data)
sc->proto = &sd_proto_spi;
}
-static void emmc_class_init(ObjectClass *klass, void *data)
+static void emmc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SDCardClass *sc = SDMMC_COMMON_CLASS(klass);
diff --git a/hw/sd/sdhci-internal.h b/hw/sd/sdhci-internal.h
index 5f3765f..9f768c4 100644
--- a/hw/sd/sdhci-internal.h
+++ b/hw/sd/sdhci-internal.h
@@ -322,6 +322,6 @@ void sdhci_initfn(SDHCIState *s);
void sdhci_uninitfn(SDHCIState *s);
void sdhci_common_realize(SDHCIState *s, Error **errp);
void sdhci_common_unrealize(SDHCIState *s);
-void sdhci_common_class_init(ObjectClass *klass, void *data);
+void sdhci_common_class_init(ObjectClass *klass, const void *data);
#endif
diff --git a/hw/sd/sdhci-pci.c b/hw/sd/sdhci-pci.c
index 9b7bee8..c18b91f 100644
--- a/hw/sd/sdhci-pci.c
+++ b/hw/sd/sdhci-pci.c
@@ -18,13 +18,13 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/module.h"
+#include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "hw/sd/sdhci.h"
#include "sdhci-internal.h"
-static Property sdhci_pci_properties[] = {
+static const Property sdhci_pci_properties[] = {
DEFINE_SDHCI_COMMON_PROPERTIES(SDHCIState),
- DEFINE_PROP_END_OF_LIST(),
};
static void sdhci_pci_realize(PCIDevice *dev, Error **errp)
@@ -49,11 +49,12 @@ static void sdhci_pci_exit(PCIDevice *dev)
{
SDHCIState *s = PCI_SDHCI(dev);
+ qemu_free_irq(s->irq);
sdhci_common_unrealize(s);
sdhci_uninitfn(s);
}
-static void sdhci_pci_class_init(ObjectClass *klass, void *data)
+static void sdhci_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -74,7 +75,7 @@ static const TypeInfo sdhci_pci_types[] = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(SDHCIState),
.class_init = sdhci_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c
index d02c3e3..226ff13 100644
--- a/hw/sd/sdhci.c
+++ b/hw/sd/sdhci.c
@@ -30,14 +30,13 @@
#include "qapi/error.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qemu/timer.h"
#include "qemu/bitops.h"
#include "hw/sd/sdhci.h"
#include "migration/vmstate.h"
#include "sdhci-internal.h"
#include "qemu/log.h"
-#include "qemu/module.h"
#include "trace.h"
#include "qom/object.h"
@@ -234,7 +233,7 @@ static void sdhci_raise_insertion_irq(void *opaque)
if (s->norintsts & SDHC_NIS_REMOVE) {
timer_mod(s->insert_timer,
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY);
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY);
} else {
s->prnsts = 0x1ff0000;
if (s->norintstsen & SDHC_NISEN_INSERT) {
@@ -252,7 +251,7 @@ static void sdhci_set_inserted(DeviceState *dev, bool level)
if ((s->norintsts & SDHC_NIS_REMOVE) && level) {
/* Give target some time to notice card ejection */
timer_mod(s->insert_timer,
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY);
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY);
} else {
if (level) {
s->prnsts = 0x1ff0000;
@@ -275,6 +274,10 @@ static void sdhci_set_readonly(DeviceState *dev, bool level)
{
SDHCIState *s = (SDHCIState *)dev;
+ if (s->wp_inverted) {
+ level = !level;
+ }
+
if (level) {
s->prnsts &= ~SDHC_WRITE_PROTECT;
} else {
@@ -290,9 +293,11 @@ static void sdhci_reset(SDHCIState *s)
timer_del(s->insert_timer);
timer_del(s->transfer_timer);
- /* Set all registers to 0. Capabilities/Version registers are not cleared
+ /*
+ * Set all registers to 0. Capabilities/Version registers are not cleared
* and assumed to always preserve their value, given to them during
- * initialization */
+ * initialization
+ */
memset(&s->sdmasysad, 0, (uintptr_t)&s->capareg - (uintptr_t)&s->sdmasysad);
/* Reset other state based on current card insertion/readonly status */
@@ -302,11 +307,16 @@ static void sdhci_reset(SDHCIState *s)
s->data_count = 0;
s->stopped_state = sdhc_not_stopped;
s->pending_insert_state = false;
+ if (s->vendor == SDHCI_VENDOR_FSL) {
+ s->norintstsen = 0x013f;
+ s->errintstsen = 0x117f;
+ }
}
static void sdhci_poweron_reset(DeviceState *dev)
{
- /* QOM (ie power-on) reset. This is identical to reset
+ /*
+ * QOM (ie power-on) reset. This is identical to reset
* commanded via device register apart from handling of the
* 'pending insert on powerup' quirk.
*/
@@ -446,8 +456,10 @@ static void sdhci_read_block_from_card(SDHCIState *s)
s->prnsts &= ~SDHC_DAT_LINE_ACTIVE;
}
- /* If stop at block gap request was set and it's not the last block of
- * data - generate Block Event interrupt */
+ /*
+ * If stop at block gap request was set and it's not the last block of
+ * data - generate Block Event interrupt
+ */
if (s->stopped_state == sdhc_gap_read && (s->trnmod & SDHC_TRNS_MULTI) &&
s->blkcnt != 1) {
s->prnsts &= ~SDHC_DAT_LINE_ACTIVE;
@@ -549,8 +561,10 @@ static void sdhci_write_block_to_card(SDHCIState *s)
sdhci_update_irq(s);
}
-/* Write @size bytes of @value data to host controller @s Buffer Data Port
- * register */
+/*
+ * Write @size bytes of @value data to host controller @s Buffer Data Port
+ * register
+ */
static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size)
{
unsigned i;
@@ -595,9 +609,11 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s)
return;
}
- /* XXX: Some sd/mmc drivers (for example, u-boot-slp) do not account for
+ /*
+ * XXX: Some sd/mmc drivers (for example, u-boot-slp) do not account for
* possible stop at page boundary if initial address is not page aligned,
- * allow them to work properly */
+ * allow them to work properly
+ */
if ((s->sdmasysad % boundary_chk) == 0) {
page_aligned = true;
}
@@ -657,12 +673,13 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s)
}
}
+ if (s->norintstsen & SDHC_NISEN_DMA) {
+ s->norintsts |= SDHC_NIS_DMA;
+ }
+
if (s->blkcnt == 0) {
sdhci_end_transfer(s);
} else {
- if (s->norintstsen & SDHC_NISEN_DMA) {
- s->norintsts |= SDHC_NIS_DMA;
- }
sdhci_update_irq(s);
}
}
@@ -683,9 +700,22 @@ static void sdhci_sdma_transfer_single_block(SDHCIState *s)
}
s->blkcnt--;
+ if (s->norintstsen & SDHC_NISEN_DMA) {
+ s->norintsts |= SDHC_NIS_DMA;
+ }
+
sdhci_end_transfer(s);
}
+static void sdhci_sdma_transfer(SDHCIState *s)
+{
+ if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) {
+ sdhci_sdma_transfer_single_block(s);
+ } else {
+ sdhci_sdma_transfer_multi_blocks(s);
+ }
+}
+
typedef struct ADMADescr {
hwaddr addr;
uint16_t length;
@@ -703,7 +733,8 @@ static void get_adma_description(SDHCIState *s, ADMADescr *dscr)
dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2),
MEMTXATTRS_UNSPECIFIED);
adma2 = le64_to_cpu(adma2);
- /* The spec does not specify endianness of descriptor table.
+ /*
+ * The spec does not specify endianness of descriptor table.
* We currently assume that it is LE.
*/
dscr->addr = (hwaddr)extract64(adma2, 32, 32) & ~0x3ull;
@@ -747,7 +778,7 @@ static void sdhci_do_adma(SDHCIState *s)
const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK;
const MemTxAttrs attrs = { .memory = true };
ADMADescr dscr = {};
- MemTxResult res;
+ MemTxResult res = MEMTX_ERROR;
int i;
if (s->trnmod & SDHC_TRNS_BLK_CNT_EN && !s->blkcnt) {
@@ -846,6 +877,7 @@ static void sdhci_do_adma(SDHCIState *s)
}
}
if (res != MEMTX_OK) {
+ s->data_count = 0;
if (s->errintstsen & SDHC_EISEN_ADMAERR) {
trace_sdhci_error("Set ADMA error flag");
s->errintsts |= SDHC_EIS_ADMAERR;
@@ -915,12 +947,7 @@ static void sdhci_data_transfer(void *opaque)
if (s->trnmod & SDHC_TRNS_DMA) {
switch (SDHC_DMA_TYPE(s->hostctl1)) {
case SDHC_CTRL_SDMA:
- if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) {
- sdhci_sdma_transfer_single_block(s);
- } else {
- sdhci_sdma_transfer_multi_blocks(s);
- }
-
+ sdhci_sdma_transfer(s);
break;
case SDHC_CTRL_ADMA1_32:
if (!(s->capareg & R_SDHC_CAPAB_ADMA1_MASK)) {
@@ -977,8 +1004,10 @@ static bool sdhci_can_issue_command(SDHCIState *s)
return true;
}
-/* The Buffer Data Port register must be accessed in sequential and
- * continuous manner */
+/*
+ * The Buffer Data Port register must be accessed in sequential and
+ * continuous manner
+ */
static inline bool
sdhci_buff_access_is_sequential(SDHCIState *s, unsigned byte_num)
{
@@ -1162,11 +1191,7 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
if (!(mask & 0xFF000000) && s->blkcnt &&
(s->blksize & BLOCK_SIZE_MASK) &&
SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) {
- if (s->trnmod & SDHC_TRNS_MULTI) {
- sdhci_sdma_transfer_multi_blocks(s);
- } else {
- sdhci_sdma_transfer_single_block(s);
- }
+ sdhci_sdma_transfer(s);
}
}
break;
@@ -1206,8 +1231,10 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
MASKED_WRITE(s->argument, mask, value);
break;
case SDHC_TRNMOD:
- /* DMA can be enabled only if it is supported as indicated by
- * capabilities register */
+ /*
+ * DMA can be enabled only if it is supported as indicated by
+ * capabilities register
+ */
if (!(s->capareg & R_SDHC_CAPAB_SDMA_MASK)) {
value &= ~SDHC_TRNS_DMA;
}
@@ -1279,8 +1306,10 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
} else {
s->norintsts &= ~SDHC_NIS_ERR;
}
- /* Quirk for Raspberry Pi: pending card insert interrupt
- * appears when first enabled after power on */
+ /*
+ * Quirk for Raspberry Pi: pending card insert interrupt
+ * appears when first enabled after power on
+ */
if ((s->norintstsen & SDHC_NISEN_INSERT) && s->pending_insert_state) {
assert(s->pending_insert_quirk);
s->norintsts |= SDHC_NIS_INSERT;
@@ -1396,8 +1425,10 @@ void sdhci_initfn(SDHCIState *s)
{
qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SDHCI_BUS, DEVICE(s), "sd-bus");
- s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s);
- s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_data_transfer, s);
+ s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ sdhci_raise_insertion_irq, s);
+ s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ sdhci_data_transfer, s);
s->io_ops = &sdhci_mmio_le_ops;
}
@@ -1445,11 +1476,13 @@ void sdhci_common_realize(SDHCIState *s, Error **errp)
void sdhci_common_unrealize(SDHCIState *s)
{
- /* This function is expected to be called only once for each class:
+ /*
+ * This function is expected to be called only once for each class:
* - SysBus: via DeviceClass->unrealize(),
* - PCI: via PCIDeviceClass->exit().
* However to avoid double-free and/or use-after-free we still nullify
- * this variable (better safe than sorry!). */
+ * this variable (better safe than sorry!).
+ */
g_free(s->fifo_buffer);
s->fifo_buffer = NULL;
}
@@ -1513,24 +1546,25 @@ const VMStateDescription sdhci_vmstate = {
},
};
-void sdhci_common_class_init(ObjectClass *klass, void *data)
+void sdhci_common_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
dc->vmsd = &sdhci_vmstate;
- dc->reset = sdhci_poweron_reset;
+ device_class_set_legacy_reset(dc, sdhci_poweron_reset);
}
/* --- qdev SysBus --- */
-static Property sdhci_sysbus_properties[] = {
+static const Property sdhci_sysbus_properties[] = {
DEFINE_SDHCI_COMMON_PROPERTIES(SDHCIState),
DEFINE_PROP_BOOL("pending-insert-quirk", SDHCIState, pending_insert_quirk,
false),
DEFINE_PROP_LINK("dma", SDHCIState,
dma_mr, TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_BOOL("wp-inverted", SDHCIState,
+ wp_inverted, false),
};
static void sdhci_sysbus_init(Object *obj)
@@ -1586,7 +1620,7 @@ static void sdhci_sysbus_unrealize(DeviceState *dev)
}
}
-static void sdhci_sysbus_class_init(ObjectClass *klass, void *data)
+static void sdhci_sysbus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -1597,18 +1631,9 @@ static void sdhci_sysbus_class_init(ObjectClass *klass, void *data)
sdhci_common_class_init(klass, data);
}
-static const TypeInfo sdhci_sysbus_info = {
- .name = TYPE_SYSBUS_SDHCI,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(SDHCIState),
- .instance_init = sdhci_sysbus_init,
- .instance_finalize = sdhci_sysbus_finalize,
- .class_init = sdhci_sysbus_class_init,
-};
-
/* --- qdev bus master --- */
-static void sdhci_bus_class_init(ObjectClass *klass, void *data)
+static void sdhci_bus_class_init(ObjectClass *klass, const void *data)
{
SDBusClass *sbc = SD_BUS_CLASS(klass);
@@ -1616,13 +1641,6 @@ static void sdhci_bus_class_init(ObjectClass *klass, void *data)
sbc->set_readonly = sdhci_set_readonly;
}
-static const TypeInfo sdhci_bus_info = {
- .name = TYPE_SDHCI_BUS,
- .parent = TYPE_SD_BUS,
- .instance_size = sizeof(SDBus),
- .class_init = sdhci_bus_class_init,
-};
-
/* --- qdev i.MX eSDHC --- */
#define USDHC_MIX_CTRL 0x48
@@ -1717,16 +1735,10 @@ usdhc_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
case USDHC_VENDOR_SPEC:
s->vendor_spec = value;
- switch (s->vendor) {
- case SDHCI_VENDOR_IMX:
- if (value & USDHC_IMX_FRC_SDCLK_ON) {
- s->prnsts &= ~SDHC_IMX_CLOCK_GATE_OFF;
- } else {
- s->prnsts |= SDHC_IMX_CLOCK_GATE_OFF;
- }
- break;
- default:
- break;
+ if (value & USDHC_IMX_FRC_SDCLK_ON) {
+ s->prnsts &= ~SDHC_IMX_CLOCK_GATE_OFF;
+ } else {
+ s->prnsts |= SDHC_IMX_CLOCK_GATE_OFF;
}
break;
@@ -1881,12 +1893,6 @@ static void imx_usdhc_init(Object *obj)
s->quirks = SDHCI_QUIRK_NO_BUSY_IRQ;
}
-static const TypeInfo imx_usdhc_info = {
- .name = TYPE_IMX_USDHC,
- .parent = TYPE_SYSBUS_SDHCI,
- .instance_init = imx_usdhc_init,
-};
-
/* --- qdev Samsung s3c --- */
#define S3C_SDHCI_CONTROL2 0x80
@@ -1945,18 +1951,31 @@ static void sdhci_s3c_init(Object *obj)
s->io_ops = &sdhci_s3c_mmio_ops;
}
-static const TypeInfo sdhci_s3c_info = {
- .name = TYPE_S3C_SDHCI ,
- .parent = TYPE_SYSBUS_SDHCI,
- .instance_init = sdhci_s3c_init,
+static const TypeInfo sdhci_types[] = {
+ {
+ .name = TYPE_SDHCI_BUS,
+ .parent = TYPE_SD_BUS,
+ .instance_size = sizeof(SDBus),
+ .class_init = sdhci_bus_class_init,
+ },
+ {
+ .name = TYPE_SYSBUS_SDHCI,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SDHCIState),
+ .instance_init = sdhci_sysbus_init,
+ .instance_finalize = sdhci_sysbus_finalize,
+ .class_init = sdhci_sysbus_class_init,
+ },
+ {
+ .name = TYPE_IMX_USDHC,
+ .parent = TYPE_SYSBUS_SDHCI,
+ .instance_init = imx_usdhc_init,
+ },
+ {
+ .name = TYPE_S3C_SDHCI,
+ .parent = TYPE_SYSBUS_SDHCI,
+ .instance_init = sdhci_s3c_init,
+ },
};
-static void sdhci_register_types(void)
-{
- type_register_static(&sdhci_sysbus_info);
- type_register_static(&sdhci_bus_info);
- type_register_static(&imx_usdhc_info);
- type_register_static(&sdhci_s3c_info);
-}
-
-type_init(sdhci_register_types)
+DEFINE_TYPES(sdhci_types)
diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c
index 2dd070f..6c90a86 100644
--- a/hw/sd/ssi-sd.c
+++ b/hw/sd/ssi-sd.c
@@ -16,7 +16,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/blockdev.h"
+#include "system/blockdev.h"
#include "hw/ssi/ssi.h"
#include "migration/vmstate.h"
#include "hw/qdev-properties.h"
@@ -389,7 +389,7 @@ static void ssi_sd_reset(DeviceState *dev)
s->stopping = 0;
}
-static void ssi_sd_class_init(ObjectClass *klass, void *data)
+static void ssi_sd_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass);
@@ -398,7 +398,7 @@ static void ssi_sd_class_init(ObjectClass *klass, void *data)
k->transfer = ssi_sd_transfer;
k->cs_polarity = SSI_CS_LOW;
dc->vmsd = &vmstate_ssi_sd;
- dc->reset = ssi_sd_reset;
+ device_class_set_legacy_reset(dc, ssi_sd_reset);
/* Reason: GPIO chip-select line should be wired up */
dc->user_creatable = false;
}
diff --git a/hw/sd/trace-events b/hw/sd/trace-events
index 43671dc..db06442 100644
--- a/hw/sd/trace-events
+++ b/hw/sd/trace-events
@@ -60,10 +60,6 @@ sdcard_set_voltage(uint16_t millivolts) "%u mV"
sdcard_ext_csd_update(unsigned index, uint8_t oval, uint8_t nval) "index %u: 0x%02x -> 0x%02x"
sdcard_switch(unsigned access, unsigned index, unsigned value, unsigned set) "SWITCH acc:%u idx:%u val:%u set:%u"
-# pxa2xx_mmci.c
-pxa2xx_mmci_read(uint8_t size, uint32_t addr, uint32_t value) "size %d addr 0x%02x value 0x%08x"
-pxa2xx_mmci_write(uint8_t size, uint32_t addr, uint32_t value) "size %d addr 0x%02x value 0x%08x"
-
# pl181.c
pl181_command_send(uint8_t cmd, uint32_t arg) "sending CMD%02d arg 0x%08" PRIx32
pl181_command_sent(void) "command sent"
diff --git a/hw/sensor/adm1266.c b/hw/sensor/adm1266.c
index 25b87a7..9017ce6 100644
--- a/hw/sensor/adm1266.c
+++ b/hw/sensor/adm1266.c
@@ -223,7 +223,7 @@ static void adm1266_init(Object *obj)
}
}
-static void adm1266_class_init(ObjectClass *klass, void *data)
+static void adm1266_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/sensor/adm1272.c b/hw/sensor/adm1272.c
index 3fc1e5d..0c739aa 100644
--- a/hw/sensor/adm1272.c
+++ b/hw/sensor/adm1272.c
@@ -511,7 +511,7 @@ static void adm1272_init(Object *obj)
}
-static void adm1272_class_init(ObjectClass *klass, void *data)
+static void adm1272_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/sensor/dps310.c b/hw/sensor/dps310.c
index 01c776d..bcf6154 100644
--- a/hw/sensor/dps310.c
+++ b/hw/sensor/dps310.c
@@ -197,7 +197,7 @@ static const VMStateDescription vmstate_dps310 = {
}
};
-static void dps310_class_init(ObjectClass *klass, void *data)
+static void dps310_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
@@ -205,7 +205,7 @@ static void dps310_class_init(ObjectClass *klass, void *data)
k->event = dps310_event;
k->recv = dps310_rx;
k->send = dps310_tx;
- dc->reset = dps310_reset;
+ device_class_set_legacy_reset(dc, dps310_reset);
dc->vmsd = &vmstate_dps310;
}
diff --git a/hw/sensor/emc141x.c b/hw/sensor/emc141x.c
index 9507955..7b2ce38 100644
--- a/hw/sensor/emc141x.c
+++ b/hw/sensor/emc141x.c
@@ -265,19 +265,19 @@ static void emc141x_initfn(Object *obj)
emc141x_set_temperature, NULL, NULL);
}
-static void emc141x_class_init(ObjectClass *klass, void *data)
+static void emc141x_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
- dc->reset = emc141x_reset;
+ device_class_set_legacy_reset(dc, emc141x_reset);
k->event = emc141x_event;
k->recv = emc141x_rx;
k->send = emc141x_tx;
dc->vmsd = &vmstate_emc141x;
}
-static void emc1413_class_init(ObjectClass *klass, void *data)
+static void emc1413_class_init(ObjectClass *klass, const void *data)
{
EMC141XClass *ec = EMC141X_CLASS(klass);
@@ -286,7 +286,7 @@ static void emc1413_class_init(ObjectClass *klass, void *data)
ec->sensors_count = 3;
}
-static void emc1414_class_init(ObjectClass *klass, void *data)
+static void emc1414_class_init(ObjectClass *klass, const void *data)
{
EMC141XClass *ec = EMC141X_CLASS(klass);
diff --git a/hw/sensor/isl_pmbus_vr.c b/hw/sensor/isl_pmbus_vr.c
index 304a66e..e8d29b0 100644
--- a/hw/sensor/isl_pmbus_vr.c
+++ b/hw/sensor/isl_pmbus_vr.c
@@ -233,7 +233,7 @@ static void raa228000_init(Object *obj)
isl_pmbus_vr_add_props(obj, flags, 1);
}
-static void isl_pmbus_vr_class_init(ObjectClass *klass, void *data,
+static void isl_pmbus_vr_class_init(ObjectClass *klass, const void *data,
uint8_t pages)
{
PMBusDeviceClass *k = PMBUS_DEVICE_CLASS(klass);
@@ -242,7 +242,7 @@ static void isl_pmbus_vr_class_init(ObjectClass *klass, void *data,
k->device_num_pages = pages;
}
-static void isl69260_class_init(ObjectClass *klass, void *data)
+static void isl69260_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -251,7 +251,7 @@ static void isl69260_class_init(ObjectClass *klass, void *data)
isl_pmbus_vr_class_init(klass, data, 2);
}
-static void raa228000_class_init(ObjectClass *klass, void *data)
+static void raa228000_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -260,7 +260,7 @@ static void raa228000_class_init(ObjectClass *klass, void *data)
isl_pmbus_vr_class_init(klass, data, 1);
}
-static void raa229004_class_init(ObjectClass *klass, void *data)
+static void raa229004_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -269,7 +269,7 @@ static void raa229004_class_init(ObjectClass *klass, void *data)
isl_pmbus_vr_class_init(klass, data, 2);
}
-static void isl69259_class_init(ObjectClass *klass, void *data)
+static void isl69259_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/sensor/lsm303dlhc_mag.c b/hw/sensor/lsm303dlhc_mag.c
index 343ff98..f9e501d 100644
--- a/hw/sensor/lsm303dlhc_mag.c
+++ b/hw/sensor/lsm303dlhc_mag.c
@@ -530,12 +530,12 @@ static void lsm303dlhc_mag_initfn(Object *obj)
/*
* Set the virtual method pointers (bus state change, tx/rx, etc.).
*/
-static void lsm303dlhc_mag_class_init(ObjectClass *klass, void *data)
+static void lsm303dlhc_mag_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
- dc->reset = lsm303dlhc_mag_reset;
+ device_class_set_legacy_reset(dc, lsm303dlhc_mag_reset);
dc->vmsd = &vmstate_lsm303dlhc_mag;
k->event = lsm303dlhc_mag_event;
k->recv = lsm303dlhc_mag_recv;
diff --git a/hw/sensor/max31785.c b/hw/sensor/max31785.c
index 3577a7c..c755814 100644
--- a/hw/sensor/max31785.c
+++ b/hw/sensor/max31785.c
@@ -544,7 +544,7 @@ static void max31785_init(Object *obj)
}
}
-static void max31785_class_init(ObjectClass *klass, void *data)
+static void max31785_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/sensor/max34451.c b/hw/sensor/max34451.c
index 93b53f3..a369d2b 100644
--- a/hw/sensor/max34451.c
+++ b/hw/sensor/max34451.c
@@ -746,7 +746,7 @@ static void max34451_init(Object *obj)
}
-static void max34451_class_init(ObjectClass *klass, void *data)
+static void max34451_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/sensor/tmp105.c b/hw/sensor/tmp105.c
index a8730d0..f5b6110 100644
--- a/hw/sensor/tmp105.c
+++ b/hw/sensor/tmp105.c
@@ -26,22 +26,28 @@
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qemu/module.h"
+#include "hw/registerfields.h"
+#include "trace.h"
+
+FIELD(CONFIG, SHUTDOWN_MODE, 0, 1)
+FIELD(CONFIG, THERMOSTAT_MODE, 1, 1)
+FIELD(CONFIG, POLARITY, 2, 1)
+FIELD(CONFIG, FAULT_QUEUE, 3, 2)
+FIELD(CONFIG, CONVERTER_RESOLUTION, 5, 2)
+FIELD(CONFIG, ONE_SHOT, 7, 1)
static void tmp105_interrupt_update(TMP105State *s)
{
- qemu_set_irq(s->pin, s->alarm ^ ((~s->config >> 2) & 1)); /* POL */
+ qemu_set_irq(s->pin, s->alarm ^ FIELD_EX8(~s->config, CONFIG, POLARITY));
}
-static void tmp105_alarm_update(TMP105State *s)
+static void tmp105_alarm_update(TMP105State *s, bool one_shot)
{
- if ((s->config >> 0) & 1) { /* SD */
- if ((s->config >> 7) & 1) /* OS */
- s->config &= ~(1 << 7); /* OS */
- else
- return;
+ if (FIELD_EX8(s->config, CONFIG, SHUTDOWN_MODE) && !one_shot) {
+ return;
}
- if (s->config >> 1 & 1) {
+ if (FIELD_EX8(s->config, CONFIG, THERMOSTAT_MODE)) {
/*
* TM == 1 : Interrupt mode. We signal Alert when the
* temperature rises above T_high, and expect the guest to clear
@@ -89,7 +95,8 @@ static void tmp105_get_temperature(Object *obj, Visitor *v, const char *name,
visit_type_int(v, name, &value, errp);
}
-/* Units are 0.001 centigrades relative to 0 C. s->temperature is 8.8
+/*
+ * Units are 0.001 centigrades relative to 0 C. s->temperature is 8.8
* fixed point, so units are 1/256 centigrades. A simple ratio will do.
*/
static void tmp105_set_temperature(Object *obj, Visitor *v, const char *name,
@@ -109,7 +116,7 @@ static void tmp105_set_temperature(Object *obj, Visitor *v, const char *name,
s->temperature = (int16_t) (temp * 256 / 1000);
- tmp105_alarm_update(s);
+ tmp105_alarm_update(s, false);
}
static const int tmp105_faultq[4] = { 1, 2, 4, 6 };
@@ -118,54 +125,60 @@ static void tmp105_read(TMP105State *s)
{
s->len = 0;
- if ((s->config >> 1) & 1) { /* TM */
+ if (FIELD_EX8(s->config, CONFIG, THERMOSTAT_MODE)) {
s->alarm = 0;
tmp105_interrupt_update(s);
}
switch (s->pointer & 3) {
case TMP105_REG_TEMPERATURE:
- s->buf[s->len ++] = (((uint16_t) s->temperature) >> 8);
- s->buf[s->len ++] = (((uint16_t) s->temperature) >> 0) &
- (0xf0 << ((~s->config >> 5) & 3)); /* R */
+ s->buf[s->len++] = (((uint16_t) s->temperature) >> 8);
+ s->buf[s->len++] = (((uint16_t) s->temperature) >> 0) &
+ (0xf0 << (FIELD_EX8(~s->config, CONFIG, CONVERTER_RESOLUTION)));
break;
case TMP105_REG_CONFIG:
- s->buf[s->len ++] = s->config;
+ s->buf[s->len++] = s->config;
break;
case TMP105_REG_T_LOW:
- s->buf[s->len ++] = ((uint16_t) s->limit[0]) >> 8;
- s->buf[s->len ++] = ((uint16_t) s->limit[0]) >> 0;
+ s->buf[s->len++] = ((uint16_t) s->limit[0]) >> 8;
+ s->buf[s->len++] = ((uint16_t) s->limit[0]) >> 0;
break;
case TMP105_REG_T_HIGH:
- s->buf[s->len ++] = ((uint16_t) s->limit[1]) >> 8;
- s->buf[s->len ++] = ((uint16_t) s->limit[1]) >> 0;
+ s->buf[s->len++] = ((uint16_t) s->limit[1]) >> 8;
+ s->buf[s->len++] = ((uint16_t) s->limit[1]) >> 0;
break;
}
+
+ trace_tmp105_read(s->i2c.address, s->pointer);
}
static void tmp105_write(TMP105State *s)
{
+ trace_tmp105_write(s->i2c.address, s->pointer);
+
switch (s->pointer & 3) {
case TMP105_REG_TEMPERATURE:
break;
case TMP105_REG_CONFIG:
- if (s->buf[0] & ~s->config & (1 << 0)) /* SD */
- printf("%s: TMP105 shutdown\n", __func__);
- s->config = s->buf[0];
- s->faults = tmp105_faultq[(s->config >> 3) & 3]; /* F */
- tmp105_alarm_update(s);
+ if (FIELD_EX8(s->buf[0] & ~s->config, CONFIG, SHUTDOWN_MODE)) {
+ trace_tmp105_write_shutdown(s->i2c.address);
+ }
+ s->config = FIELD_DP8(s->buf[0], CONFIG, ONE_SHOT, 0);
+ s->faults = tmp105_faultq[FIELD_EX8(s->config, CONFIG, FAULT_QUEUE)];
+ tmp105_alarm_update(s, FIELD_EX8(s->buf[0], CONFIG, ONE_SHOT));
break;
case TMP105_REG_T_LOW:
case TMP105_REG_T_HIGH:
- if (s->len >= 3)
+ if (s->len >= 3) {
s->limit[s->pointer & 1] = (int16_t)
- ((((uint16_t) s->buf[0]) << 8) | s->buf[1]);
- tmp105_alarm_update(s);
+ ((((uint16_t) s->buf[0]) << 8) | (s->buf[1] & 0xf0));
+ }
+ tmp105_alarm_update(s, false);
break;
}
}
@@ -175,7 +188,7 @@ static uint8_t tmp105_rx(I2CSlave *i2c)
TMP105State *s = TMP105(i2c);
if (s->len < 2) {
- return s->buf[s->len ++];
+ return s->buf[s->len++];
} else {
return 0xff;
}
@@ -215,7 +228,7 @@ static int tmp105_post_load(void *opaque, int version_id)
{
TMP105State *s = opaque;
- s->faults = tmp105_faultq[(s->config >> 3) & 3]; /* F */
+ s->faults = tmp105_faultq[FIELD_EX8(s->config, CONFIG, FAULT_QUEUE)];
tmp105_interrupt_update(s);
return 0;
@@ -273,7 +286,7 @@ static void tmp105_reset(I2CSlave *i2c)
s->temperature = 0;
s->pointer = 0;
s->config = 0;
- s->faults = tmp105_faultq[(s->config >> 3) & 3];
+ s->faults = tmp105_faultq[FIELD_EX8(s->config, CONFIG, FAULT_QUEUE)];
s->alarm = 0;
s->detect_falling = false;
@@ -300,7 +313,7 @@ static void tmp105_initfn(Object *obj)
tmp105_set_temperature, NULL, NULL);
}
-static void tmp105_class_init(ObjectClass *klass, void *data)
+static void tmp105_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
diff --git a/hw/sensor/tmp421.c b/hw/sensor/tmp421.c
index b6f0b62..3421c44 100644
--- a/hw/sensor/tmp421.c
+++ b/hw/sensor/tmp421.c
@@ -68,7 +68,7 @@ struct TMP421State {
struct TMP421Class {
I2CSlaveClass parent_class;
- DeviceInfo *dev;
+ const DeviceInfo *dev;
};
#define TYPE_TMP421 "tmp421-generic"
@@ -337,7 +337,7 @@ static void tmp421_realize(DeviceState *dev, Error **errp)
tmp421_reset(&s->i2c);
}
-static void tmp421_class_init(ObjectClass *klass, void *data)
+static void tmp421_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
@@ -382,9 +382,9 @@ static void tmp421_register_types(void)
.name = devices[i].name,
.parent = TYPE_TMP421,
.class_init = tmp421_class_init,
- .class_data = (void *) &devices[i],
+ .class_data = &devices[i],
};
- type_register(&ti);
+ type_register_static(&ti);
}
}
diff --git a/hw/sensor/trace-events b/hw/sensor/trace-events
new file mode 100644
index 0000000..a3fe54f
--- /dev/null
+++ b/hw/sensor/trace-events
@@ -0,0 +1,6 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# tmp105.c
+tmp105_read(uint8_t dev, uint8_t addr) "device: 0x%02x, addr: 0x%02x"
+tmp105_write(uint8_t dev, uint8_t addr) "device: 0x%02x, addr 0x%02x"
+tmp105_write_shutdown(uint8_t dev) "device: 0x%02x"
diff --git a/hw/sensor/trace.h b/hw/sensor/trace.h
new file mode 100644
index 0000000..e472156
--- /dev/null
+++ b/hw/sensor/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-hw_sensor.h"
diff --git a/hw/sh4/Kconfig b/hw/sh4/Kconfig
index 99a76a9..1660d29 100644
--- a/hw/sh4/Kconfig
+++ b/hw/sh4/Kconfig
@@ -13,13 +13,6 @@ config R2D
select SH7750
select SH_PCI
-config SHIX
- bool
- default y
- depends on SH4
- select SH7750
- select TC58128
-
config SH7750
bool
select SH_INTC
diff --git a/hw/sh4/meson.build b/hw/sh4/meson.build
index 70e814c..7d27839 100644
--- a/hw/sh4/meson.build
+++ b/hw/sh4/meson.build
@@ -4,6 +4,5 @@ sh4_ss.add(when: 'CONFIG_SH7750', if_true: files(
'sh7750_regnames.c',
))
sh4_ss.add(when: 'CONFIG_R2D', if_true: files('r2d.c'))
-sh4_ss.add(when: 'CONFIG_SHIX', if_true: files('shix.c'))
hw_arch += {'sh4': sh4_ss}
diff --git a/hw/sh4/r2d.c b/hw/sh4/r2d.c
index e5ac675..d68c94e 100644
--- a/hw/sh4/r2d.c
+++ b/hw/sh4/r2d.c
@@ -30,9 +30,9 @@
#include "cpu.h"
#include "hw/sysbus.h"
#include "hw/sh4/sh.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/reset.h"
+#include "system/runstate.h"
+#include "system/system.h"
#include "hw/boards.h"
#include "hw/pci/pci.h"
#include "hw/qdev-properties.h"
@@ -43,6 +43,7 @@
#include "hw/loader.h"
#include "hw/usb.h"
#include "hw/block/flash.h"
+#include "exec/tswap.h"
#define FLASH_BASE 0x00000000
#define FLASH_SIZE (16 * MiB)
@@ -62,6 +63,12 @@
#define PA_VERREG 0x32
#define PA_OUTPORT 0x36
+enum r2d_fpga_irq {
+ PCI_INTD, CF_IDE, CF_CD, PCI_INTC, SM501, KEY, RTC_A, RTC_T,
+ SDCARD, PCI_INTA, PCI_INTB, EXT, TP,
+ NR_IRQS
+};
+
typedef struct {
uint16_t bcr;
uint16_t irlmsk;
@@ -87,15 +94,10 @@ typedef struct {
/* output pin */
qemu_irq irl;
+ IRQState irq[NR_IRQS];
MemoryRegion iomem;
} r2d_fpga_t;
-enum r2d_fpga_irq {
- PCI_INTD, CF_IDE, CF_CD, PCI_INTC, SM501, KEY, RTC_A, RTC_T,
- SDCARD, PCI_INTA, PCI_INTB, EXT, TP,
- NR_IRQS
-};
-
static const struct { short irl; uint16_t msk; } irqtab[NR_IRQS] = {
[CF_IDE] = { 1, 1 << 9 },
[CF_CD] = { 2, 1 << 8 },
@@ -185,8 +187,8 @@ static const MemoryRegionOps r2d_fpga_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static qemu_irq *r2d_fpga_init(MemoryRegion *sysmem,
- hwaddr base, qemu_irq irl)
+static r2d_fpga_t *r2d_fpga_init(MemoryRegion *sysmem,
+ hwaddr base, qemu_irq irl)
{
r2d_fpga_t *s;
@@ -196,7 +198,10 @@ static qemu_irq *r2d_fpga_init(MemoryRegion *sysmem,
memory_region_init_io(&s->iomem, NULL, &r2d_fpga_ops, s, "r2d-fpga", 0x40);
memory_region_add_subregion(sysmem, base, &s->iomem);
- return qemu_allocate_irqs(r2d_fpga_irq_set, s, NR_IRQS);
+
+ qemu_init_irqs(s->irq, NR_IRQS, r2d_fpga_irq_set, s);
+
+ return s;
}
typedef struct ResetData {
@@ -238,13 +243,13 @@ static void r2d_init(MachineState *machine)
ResetData *reset_info;
struct SH7750State *s;
MemoryRegion *sdram = g_new(MemoryRegion, 1);
- qemu_irq *irq;
DriveInfo *dinfo;
DeviceState *dev;
SysBusDevice *busdev;
MemoryRegion *address_space_mem = get_system_memory();
PCIBus *pci_bus;
USBBus *usb_bus;
+ r2d_fpga_t *fpga;
cpu = SUPERH_CPU(cpu_create(machine->cpu_type));
env = &cpu->env;
@@ -259,7 +264,7 @@ static void r2d_init(MachineState *machine)
memory_region_add_subregion(address_space_mem, SDRAM_BASE, sdram);
/* Register peripherals */
s = sh7750_init(cpu, address_space_mem);
- irq = r2d_fpga_init(address_space_mem, 0x04000000, sh7750_irl(s));
+ fpga = r2d_fpga_init(address_space_mem, 0x04000000, sh7750_irl(s));
dev = qdev_new("sh_pci");
busdev = SYS_BUS_DEVICE(dev);
@@ -267,10 +272,10 @@ static void r2d_init(MachineState *machine)
pci_bus = PCI_BUS(qdev_get_child_bus(dev, "pci"));
sysbus_mmio_map(busdev, 0, P4ADDR(0x1e200000));
sysbus_mmio_map(busdev, 1, A7ADDR(0x1e200000));
- sysbus_connect_irq(busdev, 0, irq[PCI_INTA]);
- sysbus_connect_irq(busdev, 1, irq[PCI_INTB]);
- sysbus_connect_irq(busdev, 2, irq[PCI_INTC]);
- sysbus_connect_irq(busdev, 3, irq[PCI_INTD]);
+ sysbus_connect_irq(busdev, 0, &fpga->irq[PCI_INTA]);
+ sysbus_connect_irq(busdev, 1, &fpga->irq[PCI_INTB]);
+ sysbus_connect_irq(busdev, 2, &fpga->irq[PCI_INTC]);
+ sysbus_connect_irq(busdev, 3, &fpga->irq[PCI_INTD]);
dev = qdev_new("sysbus-sm501");
busdev = SYS_BUS_DEVICE(dev);
@@ -280,15 +285,15 @@ static void r2d_init(MachineState *machine)
sysbus_realize_and_unref(busdev, &error_fatal);
sysbus_mmio_map(busdev, 0, 0x10000000);
sysbus_mmio_map(busdev, 1, 0x13e00000);
- sysbus_connect_irq(busdev, 0, irq[SM501]);
+ sysbus_connect_irq(busdev, 0, &fpga->irq[SM501]);
/* onboard CF (True IDE mode, Master only). */
dinfo = drive_get(IF_IDE, 0, 0);
dev = qdev_new("mmio-ide");
busdev = SYS_BUS_DEVICE(dev);
+ sysbus_connect_irq(busdev, 0, &fpga->irq[CF_IDE]);
qdev_prop_set_uint32(dev, "shift", 1);
sysbus_realize_and_unref(busdev, &error_fatal);
- sysbus_connect_irq(busdev, 0, irq[CF_IDE]);
sysbus_mmio_map(busdev, 0, 0x14001000);
sysbus_mmio_map(busdev, 1, 0x1400080c);
mmio_ide_init_drives(dev, dinfo, NULL);
diff --git a/hw/sh4/sh7750.c b/hw/sh4/sh7750.c
index ebe0fd9..300eabc 100644
--- a/hw/sh4/sh7750.c
+++ b/hw/sh4/sh7750.c
@@ -28,18 +28,17 @@
#include "hw/sysbus.h"
#include "hw/irq.h"
#include "hw/sh4/sh.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
+#include "target/sh4/cpu.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
#include "sh7750_regs.h"
#include "sh7750_regnames.h"
#include "hw/sh4/sh_intc.h"
#include "hw/timer/tmu012.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "trace.h"
-#define NB_DEVICES 4
-
typedef struct SH7750State {
MemoryRegion iomem;
MemoryRegion iomem_1f0;
@@ -75,7 +74,6 @@ typedef struct SH7750State {
uint16_t periph_portdira; /* Direction seen from the peripherals */
uint16_t periph_pdtrb; /* Imposed by the peripherals */
uint16_t periph_portdirb; /* Direction seen from the peripherals */
- sh7750_io_device *devices[NB_DEVICES]; /* External peripherals */
/* Cache */
uint32_t ccr;
@@ -92,19 +90,6 @@ static inline int has_bcr3_and_bcr4(SH7750State *s)
* I/O ports
*/
-int sh7750_register_io_device(SH7750State *s, sh7750_io_device *device)
-{
- int i;
-
- for (i = 0; i < NB_DEVICES; i++) {
- if (s->devices[i] == NULL) {
- s->devices[i] = device;
- return 0;
- }
- }
- return -1;
-}
-
static uint16_t portdir(uint32_t v)
{
#define EVENPORTMASK(n) ((v & (1 << ((n) << 1))) >> (n))
@@ -142,63 +127,26 @@ static uint16_t portb_lines(SH7750State *s)
(~(s->portdirb | s->periph_portdirb) & s->portpullupb); /* Pullups */
}
-static void gen_port_interrupts(SH7750State *s)
-{
- /* XXXXX interrupts not generated */
-}
-
static void porta_changed(SH7750State *s, uint16_t prev)
{
- uint16_t currenta, changes;
- int i, r = 0;
+ uint16_t currenta;
currenta = porta_lines(s);
if (currenta == prev) {
return;
}
trace_sh7750_porta(prev, currenta, s->pdtra, s->pctra);
- changes = currenta ^ prev;
-
- for (i = 0; i < NB_DEVICES; i++) {
- if (s->devices[i] && (s->devices[i]->portamask_trigger & changes)) {
- r |= s->devices[i]->port_change_cb(currenta, portb_lines(s),
- &s->periph_pdtra,
- &s->periph_portdira,
- &s->periph_pdtrb,
- &s->periph_portdirb);
- }
- }
-
- if (r) {
- gen_port_interrupts(s);
- }
}
static void portb_changed(SH7750State *s, uint16_t prev)
{
- uint16_t currentb, changes;
- int i, r = 0;
+ uint16_t currentb;
currentb = portb_lines(s);
if (currentb == prev) {
return;
}
trace_sh7750_portb(prev, currentb, s->pdtrb, s->pctrb);
- changes = currentb ^ prev;
-
- for (i = 0; i < NB_DEVICES; i++) {
- if (s->devices[i] && (s->devices[i]->portbmask_trigger & changes)) {
- r |= s->devices[i]->port_change_cb(portb_lines(s), currentb,
- &s->periph_pdtra,
- &s->periph_portdira,
- &s->periph_pdtrb,
- &s->periph_portdirb);
- }
- }
-
- if (r) {
- gen_port_interrupts(s);
- }
}
/*
diff --git a/hw/sh4/shix.c b/hw/sh4/shix.c
deleted file mode 100644
index eb3150b..0000000
--- a/hw/sh4/shix.c
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * SHIX 2.0 board description
- *
- * Copyright (c) 2005 Samuel Tardieu
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-/*
- * Shix 2.0 board by Alexis Polti, described at
- * https://web.archive.org/web/20070917001736/perso.enst.fr/~polti/realisations/shix20
- *
- * More information in target/sh4/README.sh4
- */
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "cpu.h"
-#include "hw/sh4/sh.h"
-#include "sysemu/qtest.h"
-#include "hw/boards.h"
-#include "hw/loader.h"
-#include "qemu/error-report.h"
-
-#define BIOS_FILENAME "shix_bios.bin"
-#define BIOS_ADDRESS 0xA0000000
-
-static void shix_init(MachineState *machine)
-{
- int ret;
- SuperHCPU *cpu;
- struct SH7750State *s;
- MemoryRegion *sysmem = get_system_memory();
- MemoryRegion *rom = g_new(MemoryRegion, 1);
- MemoryRegion *sdram = g_new(MemoryRegion, 2);
- const char *bios_name = machine->firmware ?: BIOS_FILENAME;
-
- cpu = SUPERH_CPU(cpu_create(machine->cpu_type));
-
- /* Allocate memory space */
- memory_region_init_rom(rom, NULL, "shix.rom", 0x4000, &error_fatal);
- memory_region_add_subregion(sysmem, 0x00000000, rom);
- memory_region_init_ram(&sdram[0], NULL, "shix.sdram1", 0x01000000,
- &error_fatal);
- memory_region_add_subregion(sysmem, 0x08000000, &sdram[0]);
- memory_region_init_ram(&sdram[1], NULL, "shix.sdram2", 0x01000000,
- &error_fatal);
- memory_region_add_subregion(sysmem, 0x0c000000, &sdram[1]);
-
- /* Load BIOS in 0 (and access it through P2, 0xA0000000) */
- ret = load_image_targphys(bios_name, 0, 0x4000);
- if (ret < 0 && !qtest_enabled()) {
- error_report("Could not load SHIX bios '%s'", bios_name);
- exit(1);
- }
-
- /* Register peripherals */
- s = sh7750_init(cpu, sysmem);
- /* XXXXX Check success */
- tc58128_init(s, "shix_linux_nand.bin", NULL);
-}
-
-static void shix_machine_init(MachineClass *mc)
-{
- mc->desc = "shix card";
- mc->init = shix_init;
- mc->is_default = true;
- mc->default_cpu_type = TYPE_SH7750R_CPU;
- mc->deprecation_reason = "old and unmaintained";
-}
-
-DEFINE_MACHINE("shix", shix_machine_init)
diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c
index 3b77034..ad4cd67 100644
--- a/hw/smbios/smbios.c
+++ b/hw/smbios/smbios.c
@@ -21,7 +21,7 @@
#include "qemu/config-file.h"
#include "qemu/module.h"
#include "qemu/option.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qemu/uuid.h"
#include "hw/firmware/smbios.h"
#include "hw/loader.h"
@@ -1093,6 +1093,7 @@ static bool smbios_get_tables_ep(MachineState *ms,
Error **errp)
{
unsigned i, dimm_cnt, offset;
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
ERRP_GUARD();
assert(ep_type == SMBIOS_ENTRY_POINT_TYPE_32 ||
@@ -1123,12 +1124,12 @@ static bool smbios_get_tables_ep(MachineState *ms,
smbios_build_type_9_table(errp);
smbios_build_type_11_table();
-#define MAX_DIMM_SZ (16 * GiB)
-#define GET_DIMM_SZ ((i < dimm_cnt - 1) ? MAX_DIMM_SZ \
- : ((current_machine->ram_size - 1) % MAX_DIMM_SZ) + 1)
+#define GET_DIMM_SZ ((i < dimm_cnt - 1) ? mc->smbios_memory_device_size \
+ : ((current_machine->ram_size - 1) % mc->smbios_memory_device_size) + 1)
- dimm_cnt = QEMU_ALIGN_UP(current_machine->ram_size, MAX_DIMM_SZ) /
- MAX_DIMM_SZ;
+ dimm_cnt = QEMU_ALIGN_UP(current_machine->ram_size,
+ mc->smbios_memory_device_size) /
+ mc->smbios_memory_device_size;
/*
* The offset determines if we need to keep additional space between
@@ -1284,6 +1285,9 @@ static int save_opt_one(void *opaque,
g_byte_array_append(data, (guint8 *)buf, ret);
}
+ buf[0] = '\0';
+ g_byte_array_append(data, (guint8 *)buf, 1);
+
qemu_close(fd);
*opt->dest = g_renew(char *, *opt->dest, (*opt->ndest) + 1);
diff --git a/hw/smbios/smbios_legacy.c b/hw/smbios/smbios_legacy.c
index c37a8ee..14319d4 100644
--- a/hw/smbios/smbios_legacy.c
+++ b/hw/smbios/smbios_legacy.c
@@ -18,7 +18,7 @@
#include "qemu/osdep.h"
#include "qemu/bswap.h"
#include "hw/firmware/smbios.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qapi/error.h"
struct smbios_header {
diff --git a/hw/smbios/smbios_type_38.c b/hw/smbios/smbios_type_38.c
index 168b886..e9b856f 100644
--- a/hw/smbios/smbios_type_38.c
+++ b/hw/smbios/smbios_type_38.c
@@ -72,7 +72,12 @@ static void smbios_build_one_type_38(IPMIFwInfo *info)
" SMBIOS, ignoring this entry.", info->register_spacing);
return;
}
- t->interrupt_number = info->interrupt_number;
+ if (info->irq_source == IPMI_ISA_IRQ) {
+ t->interrupt_number = info->interrupt_number;
+ } else {
+ /* TODO: How to handle PCI? */
+ t->interrupt_number = 0;
+ }
SMBIOS_BUILD_TABLE_POST;
}
diff --git a/hw/sparc/leon3.c b/hw/sparc/leon3.c
index 6aaa04c..0aeaad3 100644
--- a/hw/sparc/leon3.c
+++ b/hw/sparc/leon3.c
@@ -34,9 +34,9 @@
#include "qemu/timer.h"
#include "hw/ptimer.h"
#include "hw/qdev-properties.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/qtest.h"
-#include "sysemu/reset.h"
+#include "system/system.h"
+#include "system/qtest.h"
+#include "system/reset.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "elf.h"
@@ -380,7 +380,7 @@ static void leon3_generic_hw_init(MachineState *machine)
kernel_size = load_elf(kernel_filename, NULL, NULL, NULL,
&entry, NULL, NULL, NULL,
- 1 /* big endian */, EM_SPARC, 0, 0);
+ ELFDATA2MSB, EM_SPARC, 0, 0);
if (kernel_size < 0) {
kernel_size = load_uimage(kernel_filename, NULL, &entry,
NULL, NULL, NULL);
diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c
index d52e6a7..8ac7e62 100644
--- a/hw/sparc/sun4m.c
+++ b/hw/sparc/sun4m.c
@@ -27,6 +27,7 @@
#include "qapi/error.h"
#include "qemu/datadir.h"
#include "cpu.h"
+#include "exec/target_page.h"
#include "hw/sysbus.h"
#include "qemu/error-report.h"
#include "qemu/timer.h"
@@ -35,9 +36,9 @@
#include "migration/vmstate.h"
#include "hw/sparc/sparc32_dma.h"
#include "hw/block/fdc.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/reset.h"
+#include "system/runstate.h"
+#include "system/system.h"
#include "net/net.h"
#include "hw/boards.h"
#include "hw/scsi/esp.h"
@@ -233,19 +234,13 @@ static unsigned long sun4m_load_kernel(const char *kernel_filename,
kernel_size = 0;
if (linux_boot) {
- int bswap_needed;
-
-#ifdef BSWAP_NEEDED
- bswap_needed = 1;
-#else
- bswap_needed = 0;
-#endif
kernel_size = load_elf(kernel_filename, NULL,
translate_kernel_address, NULL,
- NULL, NULL, NULL, NULL, 1, EM_SPARC, 0, 0);
+ NULL, NULL, NULL, NULL,
+ ELFDATA2MSB, EM_SPARC, 0, 0);
if (kernel_size < 0)
kernel_size = load_aout(kernel_filename, KERNEL_LOAD_ADDR,
- RAM_size - KERNEL_LOAD_ADDR, bswap_needed,
+ RAM_size - KERNEL_LOAD_ADDR, true,
TARGET_PAGE_SIZE);
if (kernel_size < 0)
kernel_size = load_image_targphys(kernel_filename,
@@ -600,7 +595,7 @@ static void idreg_realize(DeviceState *ds, Error **errp)
sysbus_init_mmio(dev, &s->mem);
}
-static void idreg_class_init(ObjectClass *oc, void *data)
+static void idreg_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -650,7 +645,7 @@ static void afx_realize(DeviceState *ds, Error **errp)
sysbus_init_mmio(dev, &s->mem);
}
-static void afx_class_init(ObjectClass *oc, void *data)
+static void afx_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -703,7 +698,7 @@ static void prom_init(hwaddr addr, const char *bios_name)
if (filename) {
ret = load_elf(filename, NULL,
translate_prom_address, &addr, NULL,
- NULL, NULL, NULL, 1, EM_SPARC, 0, 0);
+ NULL, NULL, NULL, ELFDATA2MSB, EM_SPARC, 0, 0);
if (ret < 0 || ret > PROM_SIZE_MAX) {
ret = load_image_targphys(filename, addr, PROM_SIZE_MAX);
}
@@ -732,15 +727,10 @@ static void prom_realize(DeviceState *ds, Error **errp)
sysbus_init_mmio(dev, &s->prom);
}
-static Property prom_properties[] = {
- {/* end of property list */},
-};
-
-static void prom_class_init(ObjectClass *klass, void *data)
+static void prom_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- device_class_set_props(dc, prom_properties);
dc->realize = prom_realize;
}
@@ -781,7 +771,7 @@ static void ram_initfn(Object *obj)
"Valid value is ID of a hostmem backend");
}
-static void ram_class_init(ObjectClass *klass, void *data)
+static void ram_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -979,7 +969,7 @@ static void sun4m_hw_init(MachineState *machine)
sysbus_mmio_map(s, 0, hwdef->ms_kb_base);
/* Logically OR both its IRQs together */
- ms_kb_orgate = DEVICE(object_new(TYPE_OR_IRQ));
+ ms_kb_orgate = qdev_new(TYPE_OR_IRQ);
object_property_set_int(OBJECT(ms_kb_orgate), "num-lines", 2, &error_fatal);
qdev_realize_and_unref(ms_kb_orgate, NULL, &error_fatal);
sysbus_connect_irq(s, 0, qdev_get_gpio_in(ms_kb_orgate, 0));
@@ -1000,7 +990,7 @@ static void sun4m_hw_init(MachineState *machine)
sysbus_mmio_map(s, 0, hwdef->serial_base);
/* Logically OR both its IRQs together */
- serial_orgate = DEVICE(object_new(TYPE_OR_IRQ));
+ serial_orgate = qdev_new(TYPE_OR_IRQ);
object_property_set_int(OBJECT(serial_orgate), "num-lines", 2,
&error_fatal);
qdev_realize_and_unref(serial_orgate, NULL, &error_fatal);
@@ -1108,7 +1098,7 @@ enum {
ss600mp_id,
};
-static void sun4m_machine_class_init(ObjectClass *oc, void *data)
+static void sun4m_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -1119,7 +1109,7 @@ static void sun4m_machine_class_init(ObjectClass *oc, void *data)
mc->default_ram_id = "sun4m.ram";
}
-static void ss5_class_init(ObjectClass *oc, void *data)
+static void ss5_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc);
@@ -1156,7 +1146,7 @@ static void ss5_class_init(ObjectClass *oc, void *data)
smc->hwdef = &ss5_hwdef;
}
-static void ss10_class_init(ObjectClass *oc, void *data)
+static void ss10_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc);
@@ -1191,7 +1181,7 @@ static void ss10_class_init(ObjectClass *oc, void *data)
smc->hwdef = &ss10_hwdef;
}
-static void ss600mp_class_init(ObjectClass *oc, void *data)
+static void ss600mp_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc);
@@ -1224,7 +1214,7 @@ static void ss600mp_class_init(ObjectClass *oc, void *data)
smc->hwdef = &ss600mp_hwdef;
}
-static void ss20_class_init(ObjectClass *oc, void *data)
+static void ss20_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc);
@@ -1275,7 +1265,7 @@ static void ss20_class_init(ObjectClass *oc, void *data)
smc->hwdef = &ss20_hwdef;
}
-static void voyager_class_init(ObjectClass *oc, void *data)
+static void voyager_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc);
@@ -1307,7 +1297,7 @@ static void voyager_class_init(ObjectClass *oc, void *data)
smc->hwdef = &voyager_hwdef;
}
-static void ss_lx_class_init(ObjectClass *oc, void *data)
+static void ss_lx_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc);
@@ -1340,7 +1330,7 @@ static void ss_lx_class_init(ObjectClass *oc, void *data)
smc->hwdef = &ss_lx_hwdef;
}
-static void ss4_class_init(ObjectClass *oc, void *data)
+static void ss4_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc);
@@ -1373,7 +1363,7 @@ static void ss4_class_init(ObjectClass *oc, void *data)
smc->hwdef = &ss4_hwdef;
}
-static void scls_class_init(ObjectClass *oc, void *data)
+static void scls_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc);
@@ -1405,7 +1395,7 @@ static void scls_class_init(ObjectClass *oc, void *data)
smc->hwdef = &scls_hwdef;
}
-static void sbook_class_init(ObjectClass *oc, void *data)
+static void sbook_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc);
diff --git a/hw/sparc/sun4m_iommu.c b/hw/sparc/sun4m_iommu.c
index 06703b1..a7ff36e 100644
--- a/hw/sparc/sun4m_iommu.c
+++ b/hw/sparc/sun4m_iommu.c
@@ -29,7 +29,7 @@
#include "hw/sysbus.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "trace.h"
/*
@@ -238,7 +238,7 @@ static void iommu_mem_write(void *opaque, hwaddr addr,
static const MemoryRegionOps iommu_mem_ops = {
.read = iommu_mem_read,
.write = iommu_mem_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_BIG_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -368,16 +368,15 @@ static void iommu_init(Object *obj)
sysbus_init_mmio(dev, &s->iomem);
}
-static Property iommu_properties[] = {
+static const Property iommu_properties[] = {
DEFINE_PROP_UINT32("version", IOMMUState, version, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void iommu_class_init(ObjectClass *klass, void *data)
+static void iommu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = iommu_reset;
+ device_class_set_legacy_reset(dc, iommu_reset);
dc->vmsd = &vmstate_iommu;
device_class_set_props(dc, iommu_properties);
}
@@ -390,7 +389,8 @@ static const TypeInfo iommu_info = {
.class_init = iommu_class_init,
};
-static void sun4m_iommu_memory_region_class_init(ObjectClass *klass, void *data)
+static void sun4m_iommu_memory_region_class_init(ObjectClass *klass,
+ const void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
diff --git a/hw/sparc64/Kconfig b/hw/sparc64/Kconfig
index 3b948a2..f764c8a 100644
--- a/hw/sparc64/Kconfig
+++ b/hw/sparc64/Kconfig
@@ -10,6 +10,7 @@ config SUN4U
select ISA_BUS
select FDC_ISA
select SERIAL_ISA
+ select SERIAL_MM
select PCI_SABRE
select IDE_CMD646
select PCKBD
diff --git a/hw/sparc64/niagara.c b/hw/sparc64/niagara.c
index ab3c4ec..1ffe920 100644
--- a/hw/sparc64/niagara.c
+++ b/hw/sparc64/niagara.c
@@ -27,15 +27,15 @@
#include "qemu/units.h"
#include "cpu.h"
#include "hw/boards.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/misc/unimp.h"
#include "hw/loader.h"
#include "hw/sparc/sparc64.h"
#include "hw/rtc/sun4v-rtc.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/error-report.h"
-#include "sysemu/qtest.h"
-#include "sysemu/sysemu.h"
+#include "system/qtest.h"
+#include "system/system.h"
#include "qapi/error.h"
typedef struct NiagaraBoardState {
@@ -157,7 +157,7 @@ static void niagara_init(MachineState *machine)
sun4v_rtc_init(NIAGARA_RTC_BASE);
}
-static void niagara_class_init(ObjectClass *oc, void *data)
+static void niagara_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
diff --git a/hw/sparc64/sparc64.c b/hw/sparc64/sparc64.c
index 3091cde..9cffc92 100644
--- a/hw/sparc64/sparc64.c
+++ b/hw/sparc64/sparc64.c
@@ -29,7 +29,7 @@
#include "hw/boards.h"
#include "hw/sparc/sparc64.h"
#include "qemu/timer.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "trace.h"
diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c
index 4ece1ac..e9f9b0a 100644
--- a/hw/sparc64/sun4u.c
+++ b/hw/sparc64/sun4u.c
@@ -28,13 +28,15 @@
#include "qapi/error.h"
#include "qemu/datadir.h"
#include "cpu.h"
+#include "exec/target_page.h"
#include "hw/irq.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_bridge.h"
#include "hw/pci/pci_host.h"
#include "hw/qdev-properties.h"
#include "hw/pci-host/sabre.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-isa.h"
+#include "hw/char/serial-mm.h"
#include "hw/char/parallel-isa.h"
#include "hw/rtc/m48t59.h"
#include "migration/vmstate.h"
@@ -42,8 +44,8 @@
#include "hw/block/fdc.h"
#include "net/net.h"
#include "qemu/timer.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/runstate.h"
+#include "system/system.h"
#include "hw/boards.h"
#include "hw/nvram/sun_nvram.h"
#include "hw/nvram/chrp_nvram.h"
@@ -167,21 +169,14 @@ static uint64_t sun4u_load_kernel(const char *kernel_filename,
kernel_size = 0;
if (linux_boot) {
- int bswap_needed;
-
-#ifdef BSWAP_NEEDED
- bswap_needed = 1;
-#else
- bswap_needed = 0;
-#endif
kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, kernel_entry,
- kernel_addr, &kernel_top, NULL, 1, EM_SPARCV9, 0,
- 0);
+ kernel_addr, &kernel_top, NULL,
+ ELFDATA2MSB, EM_SPARCV9, 0, 0);
if (kernel_size < 0) {
*kernel_addr = KERNEL_LOAD_ADDR;
*kernel_entry = KERNEL_LOAD_ADDR;
kernel_size = load_aout(kernel_filename, KERNEL_LOAD_ADDR,
- RAM_size - KERNEL_LOAD_ADDR, bswap_needed,
+ RAM_size - KERNEL_LOAD_ADDR, true,
TARGET_PAGE_SIZE);
}
if (kernel_size < 0) {
@@ -253,7 +248,7 @@ static void power_mem_write(void *opaque, hwaddr addr,
static const MemoryRegionOps power_mem_ops = {
.read = power_mem_read,
.write = power_mem_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_BIG_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -271,7 +266,7 @@ static void power_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &d->power_mmio);
}
-static void power_class_init(ObjectClass *klass, void *data)
+static void power_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -373,13 +368,12 @@ static void ebus_realize(PCIDevice *pci_dev, Error **errp)
pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->bar1);
}
-static Property ebus_properties[] = {
+static const Property ebus_properties[] = {
DEFINE_PROP_UINT64("console-serial-base", EbusState,
console_serial_base, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ebus_class_init(ObjectClass *klass, void *data)
+static void ebus_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -397,7 +391,7 @@ static const TypeInfo ebus_info = {
.parent = TYPE_PCI_DEVICE,
.class_init = ebus_class_init,
.instance_size = sizeof(EbusState),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -441,7 +435,7 @@ static void prom_init(hwaddr addr, const char *bios_name)
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
if (filename) {
ret = load_elf(filename, NULL, translate_prom_address, &addr,
- NULL, NULL, NULL, NULL, 1, EM_SPARCV9, 0, 0);
+ NULL, NULL, NULL, NULL, ELFDATA2MSB, EM_SPARCV9, 0, 0);
if (ret < 0 || ret > PROM_SIZE_MAX) {
ret = load_image_targphys(filename, addr, PROM_SIZE_MAX);
}
@@ -470,15 +464,10 @@ static void prom_realize(DeviceState *ds, Error **errp)
sysbus_init_mmio(dev, &s->prom);
}
-static Property prom_properties[] = {
- {/* end of property list */},
-};
-
-static void prom_class_init(ObjectClass *klass, void *data)
+static void prom_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- device_class_set_props(dc, prom_properties);
dc->realize = prom_realize;
}
@@ -531,12 +520,11 @@ static void ram_init(hwaddr addr, ram_addr_t RAM_size)
sysbus_mmio_map(s, 0, addr);
}
-static Property ram_properties[] = {
+static const Property ram_properties[] = {
DEFINE_PROP_UINT64("size", RamDevice, size, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ram_class_init(ObjectClass *klass, void *data)
+static void ram_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -799,7 +787,7 @@ static GlobalProperty hw_compat_sparc64[] = {
};
static const size_t hw_compat_sparc64_len = G_N_ELEMENTS(hw_compat_sparc64);
-static void sun4u_class_init(ObjectClass *oc, void *data)
+static void sun4u_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
@@ -823,13 +811,13 @@ static const TypeInfo sun4u_type = {
.name = MACHINE_TYPE_NAME("sun4u"),
.parent = TYPE_MACHINE,
.class_init = sun4u_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_FW_PATH_PROVIDER },
{ }
},
};
-static void sun4v_class_init(ObjectClass *oc, void *data)
+static void sun4v_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
diff --git a/hw/sparc64/sun4u_iommu.c b/hw/sparc64/sun4u_iommu.c
index 1c1dca7..14645f4 100644
--- a/hw/sparc64/sun4u_iommu.c
+++ b/hw/sparc64/sun4u_iommu.c
@@ -27,7 +27,7 @@
#include "qemu/osdep.h"
#include "hw/sysbus.h"
#include "hw/sparc/sun4u_iommu.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "trace.h"
@@ -305,11 +305,11 @@ static void iommu_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
}
-static void iommu_class_init(ObjectClass *klass, void *data)
+static void iommu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = iommu_reset;
+ device_class_set_legacy_reset(dc, iommu_reset);
}
static const TypeInfo iommu_info = {
@@ -320,7 +320,8 @@ static const TypeInfo iommu_info = {
.class_init = iommu_class_init,
};
-static void sun4u_iommu_memory_region_class_init(ObjectClass *klass, void *data)
+static void sun4u_iommu_memory_region_class_init(ObjectClass *klass,
+ const void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
diff --git a/hw/ssi/Kconfig b/hw/ssi/Kconfig
index 83ee53c..1bd5646 100644
--- a/hw/ssi/Kconfig
+++ b/hw/ssi/Kconfig
@@ -24,3 +24,11 @@ config STM32F2XX_SPI
config BCM2835_SPI
bool
select SSI
+
+config PNV_SPI
+ bool
+ select SSI
+
+config ALLWINNER_A10_SPI
+ bool
+ select SSI
diff --git a/hw/ssi/allwinner-a10-spi.c b/hw/ssi/allwinner-a10-spi.c
new file mode 100644
index 0000000..6b7cca8
--- /dev/null
+++ b/hw/ssi/allwinner-a10-spi.c
@@ -0,0 +1,561 @@
+/*
+ * Allwinner SPI Bus Serial Interface Emulation
+ *
+ * Copyright (C) 2024 Strahinja Jankovic <strahinja.p.jankovic@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hw/irq.h"
+#include "hw/ssi/allwinner-a10-spi.h"
+#include "migration/vmstate.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "trace.h"
+
+/* Allwinner SPI memory map */
+#define SPI_RXDATA_REG 0x00 /* receive data register */
+#define SPI_TXDATA_REG 0x04 /* transmit data register */
+#define SPI_CTL_REG 0x08 /* control register */
+#define SPI_INTCTL_REG 0x0c /* interrupt control register */
+#define SPI_INT_STA_REG 0x10 /* interrupt status register */
+#define SPI_DMACTL_REG 0x14 /* DMA control register */
+#define SPI_WAIT_REG 0x18 /* wait clock counter register */
+#define SPI_CCTL_REG 0x1c /* clock rate control register */
+#define SPI_BC_REG 0x20 /* burst control register */
+#define SPI_TC_REG 0x24 /* transmit counter register */
+#define SPI_FIFO_STA_REG 0x28 /* FIFO status register */
+
+/* Data register */
+#define SPI_DATA_RESET 0
+
+/* Control register */
+#define SPI_CTL_SDC (1 << 19)
+#define SPI_CTL_TP_EN (1 << 18)
+#define SPI_CTL_SS_LEVEL (1 << 17)
+#define SPI_CTL_SS_CTRL (1 << 16)
+#define SPI_CTL_DHB (1 << 15)
+#define SPI_CTL_DDB (1 << 14)
+#define SPI_CTL_SS (3 << 12)
+#define SPI_CTL_SS_SHIFT 12
+#define SPI_CTL_RPSM (1 << 11)
+#define SPI_CTL_XCH (1 << 10)
+#define SPI_CTL_RF_RST (1 << 9)
+#define SPI_CTL_TF_RST (1 << 8)
+#define SPI_CTL_SSCTL (1 << 7)
+#define SPI_CTL_LMTF (1 << 6)
+#define SPI_CTL_DMAMC (1 << 5)
+#define SPI_CTL_SSPOL (1 << 4)
+#define SPI_CTL_POL (1 << 3)
+#define SPI_CTL_PHA (1 << 2)
+#define SPI_CTL_MODE (1 << 1)
+#define SPI_CTL_EN (1 << 0)
+#define SPI_CTL_MASK 0xFFFFFu
+#define SPI_CTL_RESET 0x0002001Cu
+
+/* Interrupt control register */
+#define SPI_INTCTL_SS_INT_EN (1 << 17)
+#define SPI_INTCTL_TX_INT_EN (1 << 16)
+#define SPI_INTCTL_TF_UR_INT_EN (1 << 14)
+#define SPI_INTCTL_TF_OF_INT_EN (1 << 13)
+#define SPI_INTCTL_TF_E34_INT_EN (1 << 12)
+#define SPI_INTCTL_TF_E14_INT_EN (1 << 11)
+#define SPI_INTCTL_TF_FL_INT_EN (1 << 10)
+#define SPI_INTCTL_TF_HALF_EMP_INT_EN (1 << 9)
+#define SPI_INTCTL_TF_EMP_INT_EN (1 << 8)
+#define SPI_INTCTL_RF_UR_INT_EN (1 << 6)
+#define SPI_INTCTL_RF_OF_INT_EN (1 << 5)
+#define SPI_INTCTL_RF_E34_INT_EN (1 << 4)
+#define SPI_INTCTL_RF_E14_INT_EN (1 << 3)
+#define SPI_INTCTL_RF_FU_INT_EN (1 << 2)
+#define SPI_INTCTL_RF_HALF_FU_INT_EN (1 << 1)
+#define SPI_INTCTL_RF_RDY_INT_EN (1 << 0)
+#define SPI_INTCTL_MASK 0x37F7Fu
+#define SPI_INTCTL_RESET 0
+
+/* Interrupt status register */
+#define SPI_INT_STA_INT_CBF (1 << 31)
+#define SPI_INT_STA_SSI (1 << 17)
+#define SPI_INT_STA_TC (1 << 16)
+#define SPI_INT_STA_TU (1 << 14)
+#define SPI_INT_STA_TO (1 << 13)
+#define SPI_INT_STA_TE34 (1 << 12)
+#define SPI_INT_STA_TE14 (1 << 11)
+#define SPI_INT_STA_TF (1 << 10)
+#define SPI_INT_STA_THE (1 << 9)
+#define SPI_INT_STA_TE (1 << 8)
+#define SPI_INT_STA_RU (1 << 6)
+#define SPI_INT_STA_RO (1 << 5)
+#define SPI_INT_STA_RF34 (1 << 4)
+#define SPI_INT_STA_RF14 (1 << 3)
+#define SPI_INT_STA_RF (1 << 2)
+#define SPI_INT_STA_RHF (1 << 1)
+#define SPI_INT_STA_RR (1 << 0)
+#define SPI_INT_STA_MASK 0x80037F7Fu
+#define SPI_INT_STA_RESET 0x00001B00u
+
+/* DMA control register - not implemented */
+#define SPI_DMACTL_RESET 0
+
+/* Wait clock register */
+#define SPI_WAIT_REG_WCC_MASK 0xFFFFu
+#define SPI_WAIT_RESET 0
+
+/* Clock control register - not implemented */
+#define SPI_CCTL_RESET 2
+
+/* Burst count register */
+#define SPI_BC_BC_MASK 0xFFFFFFu
+#define SPI_BC_RESET 0
+
+/* Transmi counter register */
+#define SPI_TC_WTC_MASK 0xFFFFFFu
+#define SPI_TC_RESET 0
+
+/* FIFO status register */
+#define SPI_FIFO_STA_CNT_MASK 0x7F
+#define SPI_FIFO_STA_TF_CNT_SHIFT 16
+#define SPI_FIFO_STA_RF_CNT_SHIFT 0
+#define SPI_FIFO_STA_RESET 0
+
+#define REG_INDEX(offset) (offset / sizeof(uint32_t))
+
+
+static const char *allwinner_a10_spi_get_regname(unsigned offset)
+{
+ switch (offset) {
+ case SPI_RXDATA_REG:
+ return "RXDATA";
+ case SPI_TXDATA_REG:
+ return "TXDATA";
+ case SPI_CTL_REG:
+ return "CTL";
+ case SPI_INTCTL_REG:
+ return "INTCTL";
+ case SPI_INT_STA_REG:
+ return "INT_STA";
+ case SPI_DMACTL_REG:
+ return "DMACTL";
+ case SPI_WAIT_REG:
+ return "WAIT";
+ case SPI_CCTL_REG:
+ return "CCTL";
+ case SPI_BC_REG:
+ return "BC";
+ case SPI_TC_REG:
+ return "TC";
+ case SPI_FIFO_STA_REG:
+ return "FIFO_STA";
+ default:
+ return "[?]";
+ }
+}
+
+static bool allwinner_a10_spi_is_enabled(AWA10SPIState *s)
+{
+ return s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_EN;
+}
+
+static void allwinner_a10_spi_txfifo_reset(AWA10SPIState *s)
+{
+ fifo8_reset(&s->tx_fifo);
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] |= (SPI_INT_STA_TE | SPI_INT_STA_TE14 |
+ SPI_INT_STA_THE | SPI_INT_STA_TE34);
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~(SPI_INT_STA_TU | SPI_INT_STA_TO);
+}
+
+static void allwinner_a10_spi_rxfifo_reset(AWA10SPIState *s)
+{
+ fifo8_reset(&s->rx_fifo);
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] &=
+ ~(SPI_INT_STA_RU | SPI_INT_STA_RO | SPI_INT_STA_RF | SPI_INT_STA_RR |
+ SPI_INT_STA_RHF | SPI_INT_STA_RF14 | SPI_INT_STA_RF34);
+}
+
+static uint8_t allwinner_a10_spi_selected_channel(AWA10SPIState *s)
+{
+ return (s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_SS) >> SPI_CTL_SS_SHIFT;
+}
+
+static void allwinner_a10_spi_reset_hold(Object *obj, ResetType type)
+{
+ AWA10SPIState *s = AW_A10_SPI(obj);
+
+ s->regs[REG_INDEX(SPI_RXDATA_REG)] = SPI_DATA_RESET;
+ s->regs[REG_INDEX(SPI_TXDATA_REG)] = SPI_DATA_RESET;
+ s->regs[REG_INDEX(SPI_CTL_REG)] = SPI_CTL_RESET;
+ s->regs[REG_INDEX(SPI_INTCTL_REG)] = SPI_INTCTL_RESET;
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] = SPI_INT_STA_RESET;
+ s->regs[REG_INDEX(SPI_DMACTL_REG)] = SPI_DMACTL_RESET;
+ s->regs[REG_INDEX(SPI_WAIT_REG)] = SPI_WAIT_RESET;
+ s->regs[REG_INDEX(SPI_CCTL_REG)] = SPI_CCTL_RESET;
+ s->regs[REG_INDEX(SPI_BC_REG)] = SPI_BC_RESET;
+ s->regs[REG_INDEX(SPI_TC_REG)] = SPI_TC_RESET;
+ s->regs[REG_INDEX(SPI_FIFO_STA_REG)] = SPI_FIFO_STA_RESET;
+
+ allwinner_a10_spi_txfifo_reset(s);
+ allwinner_a10_spi_rxfifo_reset(s);
+}
+
+static void allwinner_a10_spi_update_irq(AWA10SPIState *s)
+{
+ bool level;
+
+ if (fifo8_is_empty(&s->rx_fifo)) {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_RR;
+ } else {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_RR;
+ }
+
+ if (fifo8_num_used(&s->rx_fifo) >= (AW_A10_SPI_FIFO_SIZE >> 2)) {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_RF14;
+ } else {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_RF14;
+ }
+
+ if (fifo8_num_used(&s->rx_fifo) >= (AW_A10_SPI_FIFO_SIZE >> 1)) {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_RHF;
+ } else {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_RHF;
+ }
+
+ if (fifo8_num_free(&s->rx_fifo) <= (AW_A10_SPI_FIFO_SIZE >> 2)) {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_RF34;
+ } else {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_RF34;
+ }
+
+ if (fifo8_is_full(&s->rx_fifo)) {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_RF;
+ } else {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_RF;
+ }
+
+ if (fifo8_is_empty(&s->tx_fifo)) {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_TE;
+ } else {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_TE;
+ }
+
+ if (fifo8_num_free(&s->tx_fifo) >= (AW_A10_SPI_FIFO_SIZE >> 2)) {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_TE14;
+ } else {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_TE14;
+ }
+
+ if (fifo8_num_free(&s->tx_fifo) >= (AW_A10_SPI_FIFO_SIZE >> 1)) {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_THE;
+ } else {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_THE;
+ }
+
+ if (fifo8_num_used(&s->tx_fifo) <= (AW_A10_SPI_FIFO_SIZE >> 2)) {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_TE34;
+ } else {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_TE34;
+ }
+
+ if (fifo8_is_full(&s->rx_fifo)) {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_TF;
+ } else {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_TF;
+ }
+
+ level = (s->regs[REG_INDEX(SPI_INT_STA_REG)] &
+ s->regs[REG_INDEX(SPI_INTCTL_REG)]) != 0;
+
+ qemu_set_irq(s->irq, level);
+
+ trace_allwinner_a10_spi_update_irq(level);
+}
+
+static void allwinner_a10_spi_flush_txfifo(AWA10SPIState *s)
+{
+ uint32_t burst_count = s->regs[REG_INDEX(SPI_BC_REG)];
+ uint32_t tx_burst = s->regs[REG_INDEX(SPI_TC_REG)];
+ trace_allwinner_a10_spi_burst_length(tx_burst);
+
+ trace_allwinner_a10_spi_flush_txfifo_begin(fifo8_num_used(&s->tx_fifo),
+ fifo8_num_used(&s->rx_fifo));
+
+ while (!fifo8_is_empty(&s->tx_fifo)) {
+ uint8_t tx = fifo8_pop(&s->tx_fifo);
+ uint8_t rx = 0;
+ bool fill_rx = true;
+
+ trace_allwinner_a10_spi_tx(tx);
+
+ /* Write one byte at a time */
+ rx = ssi_transfer(s->bus, tx);
+
+ trace_allwinner_a10_spi_rx(rx);
+
+ /* Check DHB here to determine if RX bytes should be stored */
+ if (s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_DHB) {
+ /* Store rx bytes only after WTC transfers */
+ if (tx_burst > 0u) {
+ fill_rx = false;
+ tx_burst--;
+ }
+ }
+
+ if (fill_rx) {
+ if (fifo8_is_full(&s->rx_fifo)) {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_RF;
+ } else {
+ fifo8_push(&s->rx_fifo, rx);
+ }
+ }
+
+ allwinner_a10_spi_update_irq(s);
+
+ burst_count--;
+
+ if (burst_count == 0) {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_TC;
+ s->regs[REG_INDEX(SPI_CTL_REG)] &= ~SPI_CTL_XCH;
+ break;
+ }
+ }
+
+ if (fifo8_is_empty(&s->tx_fifo)) {
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_TC;
+ s->regs[REG_INDEX(SPI_CTL_REG)] &= ~SPI_CTL_XCH;
+ }
+
+ trace_allwinner_a10_spi_flush_txfifo_end(fifo8_num_used(&s->tx_fifo),
+ fifo8_num_used(&s->rx_fifo));
+}
+
+static uint64_t allwinner_a10_spi_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ uint32_t value = 0;
+ AWA10SPIState *s = opaque;
+ uint32_t index = offset >> 2;
+
+ if (offset > SPI_FIFO_STA_REG) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[%s]%s: Bad register at offset 0x%" HWADDR_PRIx "\n",
+ TYPE_AW_A10_SPI, __func__, offset);
+ return 0;
+ }
+
+ value = s->regs[index];
+
+ if (allwinner_a10_spi_is_enabled(s)) {
+ switch (offset) {
+ case SPI_RXDATA_REG:
+ if (fifo8_is_empty(&s->rx_fifo)) {
+ /* value is undefined */
+ value = 0xdeadbeef;
+ } else {
+ /* read from the RX FIFO */
+ value = fifo8_pop(&s->rx_fifo);
+ }
+ break;
+ case SPI_TXDATA_REG:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[%s]%s: Trying to read from TX FIFO\n",
+ TYPE_AW_A10_SPI, __func__);
+
+ /* Reading from TXDATA gives 0 */
+ break;
+ case SPI_FIFO_STA_REG:
+ /* Read current tx/rx fifo data count */
+ value = fifo8_num_used(&s->tx_fifo) << SPI_FIFO_STA_TF_CNT_SHIFT |
+ fifo8_num_used(&s->rx_fifo) << SPI_FIFO_STA_RF_CNT_SHIFT;
+ break;
+ case SPI_CTL_REG:
+ case SPI_INTCTL_REG:
+ case SPI_INT_STA_REG:
+ case SPI_DMACTL_REG:
+ case SPI_WAIT_REG:
+ case SPI_CCTL_REG:
+ case SPI_BC_REG:
+ case SPI_TC_REG:
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: bad offset 0x%x\n", __func__,
+ (uint32_t)offset);
+ break;
+ }
+
+ allwinner_a10_spi_update_irq(s);
+ }
+ trace_allwinner_a10_spi_read(allwinner_a10_spi_get_regname(offset), value);
+
+ return value;
+}
+
+static bool allwinner_a10_spi_update_cs_level(AWA10SPIState *s, int cs_line_nr)
+{
+ if (cs_line_nr == allwinner_a10_spi_selected_channel(s)) {
+ return (s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_SS_LEVEL) != 0;
+ } else {
+ return (s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_SSPOL) != 0;
+ }
+}
+
+static void allwinner_a10_spi_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned size)
+{
+ AWA10SPIState *s = opaque;
+ uint32_t index = offset >> 2;
+ int i = 0;
+
+ if (offset > SPI_FIFO_STA_REG) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[%s]%s: Bad register at offset 0x%" HWADDR_PRIx "\n",
+ TYPE_AW_A10_SPI, __func__, offset);
+ return;
+ }
+
+ trace_allwinner_a10_spi_write(allwinner_a10_spi_get_regname(offset),
+ (uint32_t)value);
+
+ if (!allwinner_a10_spi_is_enabled(s)) {
+ /* Block is disabled */
+ if (offset != SPI_CTL_REG) {
+ /* Ignore access */
+ return;
+ }
+ }
+
+ switch (offset) {
+ case SPI_RXDATA_REG:
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Trying to write to RX FIFO\n",
+ TYPE_AW_A10_SPI, __func__);
+ break;
+ case SPI_TXDATA_REG:
+ if (fifo8_is_full(&s->tx_fifo)) {
+ /* Ignore writes if queue is full */
+ break;
+ }
+
+ fifo8_push(&s->tx_fifo, (uint8_t)value);
+
+ break;
+ case SPI_INT_STA_REG:
+ /* Handle W1C bits - everything except SPI_INT_STA_INT_CBF. */
+ value &= ~SPI_INT_STA_INT_CBF;
+ s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~(value & SPI_INT_STA_MASK);
+ break;
+ case SPI_CTL_REG:
+ s->regs[REG_INDEX(SPI_CTL_REG)] = value;
+
+ for (i = 0; i < AW_A10_SPI_CS_LINES_NR; i++) {
+ qemu_set_irq(
+ s->cs_lines[i],
+ allwinner_a10_spi_update_cs_level(s, i));
+ }
+
+ if (s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_XCH) {
+ /* Request to start emitting */
+ allwinner_a10_spi_flush_txfifo(s);
+ }
+ if (s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_TF_RST) {
+ allwinner_a10_spi_txfifo_reset(s);
+ s->regs[REG_INDEX(SPI_CTL_REG)] &= ~SPI_CTL_TF_RST;
+ }
+ if (s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_RF_RST) {
+ allwinner_a10_spi_rxfifo_reset(s);
+ s->regs[REG_INDEX(SPI_CTL_REG)] &= ~SPI_CTL_RF_RST;
+ }
+ break;
+ case SPI_INTCTL_REG:
+ case SPI_DMACTL_REG:
+ case SPI_WAIT_REG:
+ case SPI_CCTL_REG:
+ case SPI_BC_REG:
+ case SPI_TC_REG:
+ case SPI_FIFO_STA_REG:
+ s->regs[index] = value;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: bad offset 0x%x\n", __func__,
+ (uint32_t)offset);
+ break;
+ }
+
+ allwinner_a10_spi_update_irq(s);
+}
+
+static const MemoryRegionOps allwinner_a10_spi_ops = {
+ .read = allwinner_a10_spi_read,
+ .write = allwinner_a10_spi_write,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static const VMStateDescription allwinner_a10_spi_vmstate = {
+ .name = TYPE_AW_A10_SPI,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_FIFO8(tx_fifo, AWA10SPIState),
+ VMSTATE_FIFO8(rx_fifo, AWA10SPIState),
+ VMSTATE_UINT32_ARRAY(regs, AWA10SPIState, AW_A10_SPI_REGS_NUM),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void allwinner_a10_spi_realize(DeviceState *dev, Error **errp)
+{
+ AWA10SPIState *s = AW_A10_SPI(dev);
+ int i = 0;
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &allwinner_a10_spi_ops, s,
+ TYPE_AW_A10_SPI, AW_A10_SPI_IOSIZE);
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
+
+ s->bus = ssi_create_bus(dev, "spi");
+ for (i = 0; i < AW_A10_SPI_CS_LINES_NR; i++) {
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cs_lines[i]);
+ }
+ fifo8_create(&s->tx_fifo, AW_A10_SPI_FIFO_SIZE);
+ fifo8_create(&s->rx_fifo, AW_A10_SPI_FIFO_SIZE);
+}
+
+static void allwinner_a10_spi_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ rc->phases.hold = allwinner_a10_spi_reset_hold;
+ dc->vmsd = &allwinner_a10_spi_vmstate;
+ dc->realize = allwinner_a10_spi_realize;
+ dc->desc = "Allwinner A10 SPI Controller";
+}
+
+static const TypeInfo allwinner_a10_spi_type_info = {
+ .name = TYPE_AW_A10_SPI,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AWA10SPIState),
+ .class_init = allwinner_a10_spi_class_init,
+};
+
+static void allwinner_a10_spi_register_types(void)
+{
+ type_register_static(&allwinner_a10_spi_type_info);
+}
+
+type_init(allwinner_a10_spi_register_types)
diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c
index 49205ab..614528b 100644
--- a/hw/ssi/aspeed_smc.c
+++ b/hw/ssi/aspeed_smc.c
@@ -359,7 +359,7 @@ static const MemoryRegionOps aspeed_smc_flash_default_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 1,
- .max_access_size = 4,
+ .max_access_size = 8,
},
};
@@ -417,7 +417,7 @@ static void aspeed_smc_flash_do_select(AspeedSMCFlash *fl, bool unselect)
AspeedSMCState *s = fl->controller;
trace_aspeed_smc_flash_select(fl->cs, unselect ? "un" : "");
-
+ s->unselect = unselect;
qemu_set_irq(s->cs_lines[fl->cs], unselect);
}
@@ -670,29 +670,42 @@ static const MemoryRegionOps aspeed_smc_flash_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 1,
- .max_access_size = 4,
+ .max_access_size = 8,
},
};
static void aspeed_smc_flash_update_ctrl(AspeedSMCFlash *fl, uint32_t value)
{
AspeedSMCState *s = fl->controller;
- bool unselect;
+ bool unselect = false;
+ uint32_t old_mode;
+ uint32_t new_mode;
- /* User mode selects the CS, other modes unselect */
- unselect = (value & CTRL_CMD_MODE_MASK) != CTRL_USERMODE;
+ old_mode = s->regs[s->r_ctrl0 + fl->cs] & CTRL_CMD_MODE_MASK;
+ new_mode = value & CTRL_CMD_MODE_MASK;
- /* A change of CTRL_CE_STOP_ACTIVE from 0 to 1, unselects the CS */
- if (!(s->regs[s->r_ctrl0 + fl->cs] & CTRL_CE_STOP_ACTIVE) &&
- value & CTRL_CE_STOP_ACTIVE) {
- unselect = true;
+ if (old_mode == CTRL_USERMODE) {
+ if (new_mode != CTRL_USERMODE) {
+ unselect = true;
+ }
+
+ /* A change of CTRL_CE_STOP_ACTIVE from 0 to 1, unselects the CS */
+ if (!(s->regs[s->r_ctrl0 + fl->cs] & CTRL_CE_STOP_ACTIVE) &&
+ value & CTRL_CE_STOP_ACTIVE) {
+ unselect = true;
+ }
+ } else {
+ if (new_mode != CTRL_USERMODE) {
+ unselect = true;
+ }
}
s->regs[s->r_ctrl0 + fl->cs] = value;
- s->snoop_index = unselect ? SNOOP_OFF : SNOOP_START;
-
- aspeed_smc_flash_do_select(fl, unselect);
+ if (unselect != s->unselect) {
+ s->snoop_index = unselect ? SNOOP_OFF : SNOOP_START;
+ aspeed_smc_flash_do_select(fl, unselect);
+ }
}
static void aspeed_smc_reset(DeviceState *d)
@@ -729,6 +742,8 @@ static void aspeed_smc_reset(DeviceState *d)
qemu_set_irq(s->cs_lines[i], true);
}
+ s->unselect = true;
+
/* setup the default segment register values and regions for all */
for (i = 0; i < asc->cs_num_max; ++i) {
aspeed_smc_flash_set_segment_region(s, i,
@@ -789,8 +804,7 @@ static uint8_t aspeed_smc_hclk_divisor(uint8_t hclk_mask)
}
}
- aspeed_smc_error("invalid HCLK mask %x", hclk_mask);
- return 0;
+ g_assert_not_reached();
}
/*
@@ -1262,30 +1276,30 @@ static void aspeed_smc_realize(DeviceState *dev, Error **errp)
static const VMStateDescription vmstate_aspeed_smc = {
.name = "aspeed.smc",
- .version_id = 2,
+ .version_id = 3,
.minimum_version_id = 2,
.fields = (const VMStateField[]) {
VMSTATE_UINT32_ARRAY(regs, AspeedSMCState, ASPEED_SMC_R_MAX),
VMSTATE_UINT8(snoop_index, AspeedSMCState),
VMSTATE_UINT8(snoop_dummies, AspeedSMCState),
+ VMSTATE_BOOL_V(unselect, AspeedSMCState, 3),
VMSTATE_END_OF_LIST()
}
};
-static Property aspeed_smc_properties[] = {
+static const Property aspeed_smc_properties[] = {
DEFINE_PROP_BOOL("inject-failure", AspeedSMCState, inject_failure, false),
DEFINE_PROP_UINT64("dram-base", AspeedSMCState, dram_base, 0),
DEFINE_PROP_LINK("dram", AspeedSMCState, dram_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void aspeed_smc_class_init(ObjectClass *klass, void *data)
+static void aspeed_smc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aspeed_smc_realize;
- dc->reset = aspeed_smc_reset;
+ device_class_set_legacy_reset(dc, aspeed_smc_reset);
device_class_set_props(dc, aspeed_smc_properties);
dc->vmsd = &vmstate_aspeed_smc;
}
@@ -1321,14 +1335,13 @@ static void aspeed_smc_flash_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio);
}
-static Property aspeed_smc_flash_properties[] = {
+static const Property aspeed_smc_flash_properties[] = {
DEFINE_PROP_UINT8("cs", AspeedSMCFlash, cs, 0),
DEFINE_PROP_LINK("controller", AspeedSMCFlash, controller, TYPE_ASPEED_SMC,
AspeedSMCState *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void aspeed_smc_flash_class_init(ObjectClass *klass, void *data)
+static void aspeed_smc_flash_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -1370,7 +1383,7 @@ static const AspeedSegments aspeed_2400_smc_segments[] = {
{ 0x10000000, 32 * MiB },
};
-static void aspeed_2400_smc_class_init(ObjectClass *klass, void *data)
+static void aspeed_2400_smc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -1416,7 +1429,7 @@ static const AspeedSegments aspeed_2400_fmc_segments[] = {
{ 0x2A000000, 32 * MiB }
};
-static void aspeed_2400_fmc_class_init(ObjectClass *klass, void *data)
+static void aspeed_2400_fmc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -1460,7 +1473,7 @@ static int aspeed_2400_spi1_addr_width(const AspeedSMCState *s)
return s->regs[R_SPI_CTRL0] & CTRL_AST2400_SPI_4BYTE ? 4 : 3;
}
-static void aspeed_2400_spi1_class_init(ObjectClass *klass, void *data)
+static void aspeed_2400_spi1_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -1502,7 +1515,7 @@ static const AspeedSegments aspeed_2500_fmc_segments[] = {
{ 0x2A000000, 32 * MiB },
};
-static void aspeed_2500_fmc_class_init(ObjectClass *klass, void *data)
+static void aspeed_2500_fmc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -1542,7 +1555,7 @@ static const AspeedSegments aspeed_2500_spi1_segments[] = {
{ 0x32000000, 96 * MiB }, /* end address is readonly */
};
-static void aspeed_2500_spi1_class_init(ObjectClass *klass, void *data)
+static void aspeed_2500_spi1_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -1578,7 +1591,7 @@ static const AspeedSegments aspeed_2500_spi2_segments[] = {
{ 0x3A000000, 96 * MiB }, /* end address is readonly */
};
-static void aspeed_2500_spi2_class_init(ObjectClass *klass, void *data)
+static void aspeed_2500_spi2_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -1661,7 +1674,7 @@ static const AspeedSegments aspeed_2600_fmc_segments[] = {
{ 0x0, 0 }, /* disabled */
};
-static void aspeed_2600_fmc_class_init(ObjectClass *klass, void *data)
+static void aspeed_2600_fmc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -1702,7 +1715,7 @@ static const AspeedSegments aspeed_2600_spi1_segments[] = {
{ 0x0, 0 }, /* disabled */
};
-static void aspeed_2600_spi1_class_init(ObjectClass *klass, void *data)
+static void aspeed_2600_spi1_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -1743,7 +1756,7 @@ static const AspeedSegments aspeed_2600_spi2_segments[] = {
{ 0x0, 0 }, /* disabled */
};
-static void aspeed_2600_spi2_class_init(ObjectClass *klass, void *data)
+static void aspeed_2600_spi2_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -1826,7 +1839,7 @@ static const AspeedSegments aspeed_1030_fmc_segments[] = {
{ 0x0, 0 }, /* disabled */
};
-static void aspeed_1030_fmc_class_init(ObjectClass *klass, void *data)
+static void aspeed_1030_fmc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -1866,7 +1879,7 @@ static const AspeedSegments aspeed_1030_spi1_segments[] = {
{ 0x0, 0 }, /* disabled */
};
-static void aspeed_1030_spi1_class_init(ObjectClass *klass, void *data)
+static void aspeed_1030_spi1_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -1904,7 +1917,7 @@ static const AspeedSegments aspeed_1030_spi2_segments[] = {
{ 0x0, 0 }, /* disabled */
};
-static void aspeed_1030_spi2_class_init(ObjectClass *klass, void *data)
+static void aspeed_1030_spi2_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -2009,7 +2022,7 @@ static const AspeedSegments aspeed_2700_fmc_segments[] = {
{ 0x0, 0 }, /* disabled */
};
-static void aspeed_2700_fmc_class_init(ObjectClass *klass, void *data)
+static void aspeed_2700_fmc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -2051,7 +2064,7 @@ static const AspeedSegments aspeed_2700_spi0_segments[] = {
{ 0x0, 0 }, /* disabled */
};
-static void aspeed_2700_spi0_class_init(ObjectClass *klass, void *data)
+static void aspeed_2700_spi0_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -2091,7 +2104,7 @@ static const AspeedSegments aspeed_2700_spi1_segments[] = {
{ 0x0, 0 }, /* disabled */
};
-static void aspeed_2700_spi1_class_init(ObjectClass *klass, void *data)
+static void aspeed_2700_spi1_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
@@ -2131,7 +2144,7 @@ static const AspeedSegments aspeed_2700_spi2_segments[] = {
{ 0x0, 0 }, /* disabled */
};
-static void aspeed_2700_spi2_class_init(ObjectClass *klass, void *data)
+static void aspeed_2700_spi2_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass);
diff --git a/hw/ssi/bcm2835_spi.c b/hw/ssi/bcm2835_spi.c
index 6ecb42d..bf8ba35 100644
--- a/hw/ssi/bcm2835_spi.c
+++ b/hw/ssi/bcm2835_spi.c
@@ -264,11 +264,11 @@ static const VMStateDescription vmstate_bcm2835_spi = {
}
};
-static void bcm2835_spi_class_init(ObjectClass *klass, void *data)
+static void bcm2835_spi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = bcm2835_spi_reset;
+ device_class_set_legacy_reset(dc, bcm2835_spi_reset);
dc->realize = bcm2835_spi_realize;
dc->vmsd = &vmstate_bcm2835_spi;
}
diff --git a/hw/ssi/ibex_spi_host.c b/hw/ssi/ibex_spi_host.c
index 863b5fd..f05be68 100644
--- a/hw/ssi/ibex_spi_host.c
+++ b/hw/ssi/ibex_spi_host.c
@@ -154,7 +154,6 @@ static void ibex_spi_host_reset(DeviceState *dev)
ibex_spi_txfifo_reset(s);
s->init_status = true;
- return;
}
/*
@@ -561,9 +560,8 @@ static const MemoryRegionOps ibex_spi_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
-static Property ibex_spi_properties[] = {
+static const Property ibex_spi_properties[] = {
DEFINE_PROP_UINT32("num_cs", IbexSPIHostState, num_cs, 1),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_ibex = {
@@ -624,11 +622,11 @@ static void ibex_spi_host_init(Object *obj)
sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
}
-static void ibex_spi_host_class_init(ObjectClass *klass, void *data)
+static void ibex_spi_host_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = ibex_spi_host_realize;
- dc->reset = ibex_spi_host_reset;
+ device_class_set_legacy_reset(dc, ibex_spi_host_reset);
dc->vmsd = &vmstate_ibex;
device_class_set_props(dc, ibex_spi_properties);
}
diff --git a/hw/ssi/imx_spi.c b/hw/ssi/imx_spi.c
index 12d897d..1312f58 100644
--- a/hw/ssi/imx_spi.c
+++ b/hw/ssi/imx_spi.c
@@ -475,13 +475,13 @@ static void imx_spi_realize(DeviceState *dev, Error **errp)
fifo32_create(&s->rx_fifo, ECSPI_FIFO_SIZE);
}
-static void imx_spi_class_init(ObjectClass *klass, void *data)
+static void imx_spi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = imx_spi_realize;
dc->vmsd = &vmstate_imx_spi;
- dc->reset = imx_spi_reset;
+ device_class_set_legacy_reset(dc, imx_spi_reset);
dc->desc = "i.MX SPI Controller";
}
diff --git a/hw/ssi/meson.build b/hw/ssi/meson.build
index b999aeb..6afb1ea 100644
--- a/hw/ssi/meson.build
+++ b/hw/ssi/meson.build
@@ -1,3 +1,4 @@
+system_ss.add(when: 'CONFIG_ALLWINNER_A10_SPI', if_true: files('allwinner-a10-spi.c'))
system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_smc.c'))
system_ss.add(when: 'CONFIG_MSF2', if_true: files('mss-spi.c'))
system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_fiu.c', 'npcm_pspi.c'))
@@ -9,6 +10,6 @@ system_ss.add(when: 'CONFIG_XILINX_SPI', if_true: files('xilinx_spi.c'))
system_ss.add(when: 'CONFIG_XILINX_SPIPS', if_true: files('xilinx_spips.c'))
system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-ospi.c'))
system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_spi.c'))
-system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_spi.c'))
system_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_spi_host.c'))
system_ss.add(when: 'CONFIG_BCM2835_SPI', if_true: files('bcm2835_spi.c'))
+system_ss.add(when: 'CONFIG_PNV_SPI', if_true: files('pnv_spi.c'))
diff --git a/hw/ssi/mss-spi.c b/hw/ssi/mss-spi.c
index 1d25ba2..fd7ba7e 100644
--- a/hw/ssi/mss-spi.c
+++ b/hw/ssi/mss-spi.c
@@ -398,12 +398,12 @@ static const VMStateDescription vmstate_mss_spi = {
}
};
-static void mss_spi_class_init(ObjectClass *klass, void *data)
+static void mss_spi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = mss_spi_realize;
- dc->reset = mss_spi_reset;
+ device_class_set_legacy_reset(dc, mss_spi_reset);
dc->vmsd = &vmstate_mss_spi;
}
diff --git a/hw/ssi/npcm7xx_fiu.c b/hw/ssi/npcm7xx_fiu.c
index 119c38c..056ce13 100644
--- a/hw/ssi/npcm7xx_fiu.c
+++ b/hw/ssi/npcm7xx_fiu.c
@@ -29,7 +29,7 @@
#include "trace.h"
/* Up to 128 MiB of flash may be accessed directly as memory. */
-#define NPCM7XX_FIU_FLASH_WINDOW_SIZE (128 * MiB)
+#define NPCM7XX_FIU_MAX_FLASH_WINDOW_SIZE (128 * MiB)
/* Each module has 4 KiB of register space. Only a fraction of it is used. */
#define NPCM7XX_FIU_CTRL_REGS_SIZE (4 * KiB)
@@ -507,6 +507,17 @@ static void npcm7xx_fiu_realize(DeviceState *dev, Error **errp)
return;
}
+ if (s->flash_size == 0) {
+ error_setg(errp, "%s: flash size must be set", dev->canonical_path);
+ return;
+ }
+
+ if (s->flash_size > NPCM7XX_FIU_MAX_FLASH_WINDOW_SIZE) {
+ error_setg(errp, "%s: flash size should not exceed 128 MiB",
+ dev->canonical_path);
+ return;
+ }
+
s->spi = ssi_create_bus(dev, "spi");
s->cs_lines = g_new0(qemu_irq, s->cs_count);
qdev_init_gpio_out_named(DEVICE(s), s->cs_lines, "cs", s->cs_count);
@@ -525,7 +536,7 @@ static void npcm7xx_fiu_realize(DeviceState *dev, Error **errp)
flash->fiu = s;
memory_region_init_io(&flash->direct_access, OBJECT(s),
&npcm7xx_fiu_flash_ops, &s->flash[i], "flash",
- NPCM7XX_FIU_FLASH_WINDOW_SIZE);
+ s->flash_size);
sysbus_init_mmio(sbd, &flash->direct_access);
}
}
@@ -541,12 +552,12 @@ static const VMStateDescription vmstate_npcm7xx_fiu = {
},
};
-static Property npcm7xx_fiu_properties[] = {
+static const Property npcm7xx_fiu_properties[] = {
DEFINE_PROP_INT32("cs-count", NPCM7xxFIUState, cs_count, 0),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_SIZE("flash-size", NPCM7xxFIUState, flash_size, 0),
};
-static void npcm7xx_fiu_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_fiu_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/ssi/npcm_pspi.c b/hw/ssi/npcm_pspi.c
index 41a5323..a31dcc0 100644
--- a/hw/ssi/npcm_pspi.c
+++ b/hw/ssi/npcm_pspi.c
@@ -199,7 +199,7 @@ static const VMStateDescription vmstate_npcm_pspi = {
};
-static void npcm_pspi_class_init(ObjectClass *klass, void *data)
+static void npcm_pspi_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/ssi/omap_spi.c b/hw/ssi/omap_spi.c
deleted file mode 100644
index 8f85c3e..0000000
--- a/hw/ssi/omap_spi.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * TI OMAP processor's Multichannel SPI emulation.
- *
- * Copyright (C) 2007-2009 Nokia Corporation
- *
- * Original code for OMAP2 by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) any later version of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/log.h"
-#include "hw/hw.h"
-#include "hw/irq.h"
-#include "hw/arm/omap.h"
-
-/* Multichannel SPI */
-struct omap_mcspi_s {
- MemoryRegion iomem;
- qemu_irq irq;
- int chnum;
-
- uint32_t sysconfig;
- uint32_t systest;
- uint32_t irqst;
- uint32_t irqen;
- uint32_t wken;
- uint32_t control;
-
- struct omap_mcspi_ch_s {
- qemu_irq txdrq;
- qemu_irq rxdrq;
- uint32_t (*txrx)(void *opaque, uint32_t, int);
- void *opaque;
-
- uint32_t tx;
- uint32_t rx;
-
- uint32_t config;
- uint32_t status;
- uint32_t control;
- } ch[4];
-};
-
-static inline void omap_mcspi_interrupt_update(struct omap_mcspi_s *s)
-{
- qemu_set_irq(s->irq, s->irqst & s->irqen);
-}
-
-static inline void omap_mcspi_dmarequest_update(struct omap_mcspi_ch_s *ch)
-{
- qemu_set_irq(ch->txdrq,
- (ch->control & 1) && /* EN */
- (ch->config & (1 << 14)) && /* DMAW */
- (ch->status & (1 << 1)) && /* TXS */
- ((ch->config >> 12) & 3) != 1); /* TRM */
- qemu_set_irq(ch->rxdrq,
- (ch->control & 1) && /* EN */
- (ch->config & (1 << 15)) && /* DMAW */
- (ch->status & (1 << 0)) && /* RXS */
- ((ch->config >> 12) & 3) != 2); /* TRM */
-}
-
-static void omap_mcspi_transfer_run(struct omap_mcspi_s *s, int chnum)
-{
- struct omap_mcspi_ch_s *ch = s->ch + chnum;
-
- if (!(ch->control & 1)) /* EN */
- return;
- if ((ch->status & (1 << 0)) && /* RXS */
- ((ch->config >> 12) & 3) != 2 && /* TRM */
- !(ch->config & (1 << 19))) /* TURBO */
- goto intr_update;
- if ((ch->status & (1 << 1)) && /* TXS */
- ((ch->config >> 12) & 3) != 1) /* TRM */
- goto intr_update;
-
- if (!(s->control & 1) || /* SINGLE */
- (ch->config & (1 << 20))) { /* FORCE */
- if (ch->txrx)
- ch->rx = ch->txrx(ch->opaque, ch->tx, /* WL */
- 1 + (0x1f & (ch->config >> 7)));
- }
-
- ch->tx = 0;
- ch->status |= 1 << 2; /* EOT */
- ch->status |= 1 << 1; /* TXS */
- if (((ch->config >> 12) & 3) != 2) /* TRM */
- ch->status |= 1 << 0; /* RXS */
-
-intr_update:
- if ((ch->status & (1 << 0)) && /* RXS */
- ((ch->config >> 12) & 3) != 2 && /* TRM */
- !(ch->config & (1 << 19))) /* TURBO */
- s->irqst |= 1 << (2 + 4 * chnum); /* RX_FULL */
- if ((ch->status & (1 << 1)) && /* TXS */
- ((ch->config >> 12) & 3) != 1) /* TRM */
- s->irqst |= 1 << (0 + 4 * chnum); /* TX_EMPTY */
- omap_mcspi_interrupt_update(s);
- omap_mcspi_dmarequest_update(ch);
-}
-
-void omap_mcspi_reset(struct omap_mcspi_s *s)
-{
- int ch;
-
- s->sysconfig = 0;
- s->systest = 0;
- s->irqst = 0;
- s->irqen = 0;
- s->wken = 0;
- s->control = 4;
-
- for (ch = 0; ch < 4; ch ++) {
- s->ch[ch].config = 0x060000;
- s->ch[ch].status = 2; /* TXS */
- s->ch[ch].control = 0;
-
- omap_mcspi_dmarequest_update(s->ch + ch);
- }
-
- omap_mcspi_interrupt_update(s);
-}
-
-static uint64_t omap_mcspi_read(void *opaque, hwaddr addr, unsigned size)
-{
- struct omap_mcspi_s *s = opaque;
- int ch = 0;
- uint32_t ret;
-
- if (size != 4) {
- return omap_badwidth_read32(opaque, addr);
- }
-
- switch (addr) {
- case 0x00: /* MCSPI_REVISION */
- return 0x91;
-
- case 0x10: /* MCSPI_SYSCONFIG */
- return s->sysconfig;
-
- case 0x14: /* MCSPI_SYSSTATUS */
- return 1; /* RESETDONE */
-
- case 0x18: /* MCSPI_IRQSTATUS */
- return s->irqst;
-
- case 0x1c: /* MCSPI_IRQENABLE */
- return s->irqen;
-
- case 0x20: /* MCSPI_WAKEUPENABLE */
- return s->wken;
-
- case 0x24: /* MCSPI_SYST */
- return s->systest;
-
- case 0x28: /* MCSPI_MODULCTRL */
- return s->control;
-
- case 0x68: ch ++;
- /* fall through */
- case 0x54: ch ++;
- /* fall through */
- case 0x40: ch ++;
- /* fall through */
- case 0x2c: /* MCSPI_CHCONF */
- return s->ch[ch].config;
-
- case 0x6c: ch ++;
- /* fall through */
- case 0x58: ch ++;
- /* fall through */
- case 0x44: ch ++;
- /* fall through */
- case 0x30: /* MCSPI_CHSTAT */
- return s->ch[ch].status;
-
- case 0x70: ch ++;
- /* fall through */
- case 0x5c: ch ++;
- /* fall through */
- case 0x48: ch ++;
- /* fall through */
- case 0x34: /* MCSPI_CHCTRL */
- return s->ch[ch].control;
-
- case 0x74: ch ++;
- /* fall through */
- case 0x60: ch ++;
- /* fall through */
- case 0x4c: ch ++;
- /* fall through */
- case 0x38: /* MCSPI_TX */
- return s->ch[ch].tx;
-
- case 0x78: ch ++;
- /* fall through */
- case 0x64: ch ++;
- /* fall through */
- case 0x50: ch ++;
- /* fall through */
- case 0x3c: /* MCSPI_RX */
- s->ch[ch].status &= ~(1 << 0); /* RXS */
- ret = s->ch[ch].rx;
- omap_mcspi_transfer_run(s, ch);
- return ret;
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static void omap_mcspi_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- struct omap_mcspi_s *s = opaque;
- int ch = 0;
-
- if (size != 4) {
- omap_badwidth_write32(opaque, addr, value);
- return;
- }
-
- switch (addr) {
- case 0x00: /* MCSPI_REVISION */
- case 0x14: /* MCSPI_SYSSTATUS */
- case 0x30: /* MCSPI_CHSTAT0 */
- case 0x3c: /* MCSPI_RX0 */
- case 0x44: /* MCSPI_CHSTAT1 */
- case 0x50: /* MCSPI_RX1 */
- case 0x58: /* MCSPI_CHSTAT2 */
- case 0x64: /* MCSPI_RX2 */
- case 0x6c: /* MCSPI_CHSTAT3 */
- case 0x78: /* MCSPI_RX3 */
- OMAP_RO_REG(addr);
- return;
-
- case 0x10: /* MCSPI_SYSCONFIG */
- if (value & (1 << 1)) /* SOFTRESET */
- omap_mcspi_reset(s);
- s->sysconfig = value & 0x31d;
- break;
-
- case 0x18: /* MCSPI_IRQSTATUS */
- if (!((s->control & (1 << 3)) && (s->systest & (1 << 11)))) {
- s->irqst &= ~value;
- omap_mcspi_interrupt_update(s);
- }
- break;
-
- case 0x1c: /* MCSPI_IRQENABLE */
- s->irqen = value & 0x1777f;
- omap_mcspi_interrupt_update(s);
- break;
-
- case 0x20: /* MCSPI_WAKEUPENABLE */
- s->wken = value & 1;
- break;
-
- case 0x24: /* MCSPI_SYST */
- if (s->control & (1 << 3)) /* SYSTEM_TEST */
- if (value & (1 << 11)) { /* SSB */
- s->irqst |= 0x1777f;
- omap_mcspi_interrupt_update(s);
- }
- s->systest = value & 0xfff;
- break;
-
- case 0x28: /* MCSPI_MODULCTRL */
- if (value & (1 << 3)) /* SYSTEM_TEST */
- if (s->systest & (1 << 11)) { /* SSB */
- s->irqst |= 0x1777f;
- omap_mcspi_interrupt_update(s);
- }
- s->control = value & 0xf;
- break;
-
- case 0x68: ch ++;
- /* fall through */
- case 0x54: ch ++;
- /* fall through */
- case 0x40: ch ++;
- /* fall through */
- case 0x2c: /* MCSPI_CHCONF */
- if ((value ^ s->ch[ch].config) & (3 << 14)) /* DMAR | DMAW */
- omap_mcspi_dmarequest_update(s->ch + ch);
- if (((value >> 12) & 3) == 3) { /* TRM */
- qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid TRM value (3)\n",
- __func__);
- }
- if (((value >> 7) & 0x1f) < 3) { /* WL */
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid WL value (%" PRIx64 ")\n",
- __func__, (value >> 7) & 0x1f);
- }
- s->ch[ch].config = value & 0x7fffff;
- break;
-
- case 0x70: ch ++;
- /* fall through */
- case 0x5c: ch ++;
- /* fall through */
- case 0x48: ch ++;
- /* fall through */
- case 0x34: /* MCSPI_CHCTRL */
- if (value & ~s->ch[ch].control & 1) { /* EN */
- s->ch[ch].control |= 1;
- omap_mcspi_transfer_run(s, ch);
- } else
- s->ch[ch].control = value & 1;
- break;
-
- case 0x74: ch ++;
- /* fall through */
- case 0x60: ch ++;
- /* fall through */
- case 0x4c: ch ++;
- /* fall through */
- case 0x38: /* MCSPI_TX */
- s->ch[ch].tx = value;
- s->ch[ch].status &= ~(1 << 1); /* TXS */
- omap_mcspi_transfer_run(s, ch);
- break;
-
- default:
- OMAP_BAD_REG(addr);
- return;
- }
-}
-
-static const MemoryRegionOps omap_mcspi_ops = {
- .read = omap_mcspi_read,
- .write = omap_mcspi_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-struct omap_mcspi_s *omap_mcspi_init(struct omap_target_agent_s *ta, int chnum,
- qemu_irq irq, qemu_irq *drq, omap_clk fclk, omap_clk iclk)
-{
- struct omap_mcspi_s *s = g_new0(struct omap_mcspi_s, 1);
- struct omap_mcspi_ch_s *ch = s->ch;
-
- s->irq = irq;
- s->chnum = chnum;
- while (chnum --) {
- ch->txdrq = *drq ++;
- ch->rxdrq = *drq ++;
- ch ++;
- }
- omap_mcspi_reset(s);
-
- memory_region_init_io(&s->iomem, NULL, &omap_mcspi_ops, s, "omap.mcspi",
- omap_l4_region_size(ta, 0));
- omap_l4_attach(ta, 0, &s->iomem);
-
- return s;
-}
-
-void omap_mcspi_attach(struct omap_mcspi_s *s,
- uint32_t (*txrx)(void *opaque, uint32_t, int), void *opaque,
- int chipselect)
-{
- if (chipselect < 0 || chipselect >= s->chnum)
- hw_error("%s: Bad chipselect %i\n", __func__, chipselect);
-
- s->ch[chipselect].txrx = txrx;
- s->ch[chipselect].opaque = opaque;
-}
diff --git a/hw/ssi/pl022.c b/hw/ssi/pl022.c
index b8be8dd..1dc0bcb 100644
--- a/hw/ssi/pl022.c
+++ b/hw/ssi/pl022.c
@@ -292,11 +292,11 @@ static void pl022_realize(DeviceState *dev, Error **errp)
s->ssi = ssi_create_bus(dev, "ssi");
}
-static void pl022_class_init(ObjectClass *klass, void *data)
+static void pl022_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = pl022_reset;
+ device_class_set_legacy_reset(dc, pl022_reset);
dc->vmsd = &vmstate_pl022;
dc->realize = pl022_realize;
}
diff --git a/hw/ssi/pnv_spi.c b/hw/ssi/pnv_spi.c
new file mode 100644
index 0000000..f40e883
--- /dev/null
+++ b/hw/ssi/pnv_spi.c
@@ -0,0 +1,1231 @@
+/*
+ * QEMU PowerPC SPI model
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "hw/qdev-properties.h"
+#include "hw/ppc/pnv_xscom.h"
+#include "hw/ssi/pnv_spi.h"
+#include "hw/ssi/pnv_spi_regs.h"
+#include "hw/ssi/ssi.h"
+#include <libfdt.h>
+#include "hw/irq.h"
+#include "trace.h"
+
+#define PNV_SPI_OPCODE_LO_NIBBLE(x) (x & 0x0F)
+#define PNV_SPI_MASKED_OPCODE(x) (x & 0xF0)
+#define PNV_SPI_FIFO_SIZE 16
+#define RDR_MATCH_FAILURE_LIMIT 16
+
+/*
+ * Macro from include/hw/ppc/fdt.h
+ * fdt.h cannot be included here as it contain ppc target specific dependency.
+ */
+#define _FDT(exp) \
+ do { \
+ int _ret = (exp); \
+ if (_ret < 0) { \
+ qemu_log_mask(LOG_GUEST_ERROR, \
+ "error creating device tree: %s: %s", \
+ #exp, fdt_strerror(_ret)); \
+ exit(1); \
+ } \
+ } while (0)
+
+static bool does_rdr_match(PnvSpi *s)
+{
+ /*
+ * According to spec, the mask bits that are 0 are compared and the
+ * bits that are 1 are ignored.
+ */
+ uint16_t rdr_match_mask = GETFIELD(SPI_MM_RDR_MATCH_MASK, s->regs[SPI_MM_REG]);
+ uint16_t rdr_match_val = GETFIELD(SPI_MM_RDR_MATCH_VAL, s->regs[SPI_MM_REG]);
+
+ if ((~rdr_match_mask & rdr_match_val) == ((~rdr_match_mask) &
+ GETFIELD(PPC_BITMASK(48, 63), s->regs[SPI_RCV_DATA_REG]))) {
+ return true;
+ }
+ return false;
+}
+
+static uint8_t get_from_offset(PnvSpi *s, uint8_t offset)
+{
+ uint8_t byte;
+
+ /*
+ * Offset is an index between 0 and PNV_SPI_REG_SIZE - 1
+ * Check the offset before using it.
+ */
+ if (offset < PNV_SPI_REG_SIZE) {
+ byte = (s->regs[SPI_XMIT_DATA_REG] >> (56 - offset * 8)) & 0xFF;
+ } else {
+ /*
+ * Log an error and return a 0xFF since we have to assign something
+ * to byte before returning.
+ */
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid offset = %d used to get byte "
+ "from TDR\n", offset);
+ byte = 0xff;
+ }
+ return byte;
+}
+
+static uint8_t read_from_frame(PnvSpi *s, uint8_t nr_bytes, uint8_t ecc_count,
+ uint8_t shift_in_count)
+{
+ uint8_t byte;
+ int count = 0;
+
+ while (count < nr_bytes) {
+ shift_in_count++;
+ if ((ecc_count != 0) &&
+ (shift_in_count == (PNV_SPI_REG_SIZE + ecc_count))) {
+ shift_in_count = 0;
+ } else if (!fifo8_is_empty(&s->rx_fifo)) {
+ byte = fifo8_pop(&s->rx_fifo);
+ trace_pnv_spi_shift_rx(byte, count);
+ s->regs[SPI_RCV_DATA_REG] = (s->regs[SPI_RCV_DATA_REG] << 8) | byte;
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: Reading empty RX_FIFO\n");
+ }
+ count++;
+ } /* end of while */
+ return shift_in_count;
+}
+
+static void spi_response(PnvSpi *s)
+{
+ uint8_t ecc_count;
+ uint8_t shift_in_count;
+ uint32_t rx_len;
+ int i;
+
+ /*
+ * Processing here must handle:
+ * - Which bytes in the payload we should move to the RDR
+ * - Explicit mode counter configuration settings
+ * - RDR full and RDR overrun status
+ */
+
+ /*
+ * First check that the response payload is the exact same
+ * number of bytes as the request payload was
+ */
+ rx_len = fifo8_num_used(&s->rx_fifo);
+ if (rx_len != (s->N1_bytes + s->N2_bytes)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid response payload size in "
+ "bytes, expected %d, got %d\n",
+ (s->N1_bytes + s->N2_bytes), rx_len);
+ } else {
+ uint8_t ecc_control;
+ trace_pnv_spi_rx_received(rx_len);
+ trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
+ s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
+ /*
+ * Adding an ECC count let's us know when we have found a payload byte
+ * that was shifted in but cannot be loaded into RDR. Bits 29-30 of
+ * clock_config_reset_control register equal to either 0b00 or 0b10
+ * indicate that we are taking in data with ECC and either applying
+ * the ECC or discarding it.
+ */
+ ecc_count = 0;
+ ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
+ if (ecc_control == 0 || ecc_control == 2) {
+ ecc_count = 1;
+ }
+ /*
+ * Use the N1_rx and N2_rx counts to control shifting data from the
+ * payload into the RDR. Keep an overall count of the number of bytes
+ * shifted into RDR so we can discard every 9th byte when ECC is
+ * enabled.
+ */
+ shift_in_count = 0;
+ /* Handle the N1 portion of the frame first */
+ if (s->N1_rx != 0) {
+ trace_pnv_spi_rx_read_N1frame();
+ shift_in_count = read_from_frame(s, s->N1_bytes, ecc_count, shift_in_count);
+ }
+ /* Handle the N2 portion of the frame */
+ if (s->N2_rx != 0) {
+ /* pop out N1_bytes from rx_fifo if not already */
+ if (s->N1_rx == 0) {
+ for (i = 0; i < s->N1_bytes; i++) {
+ if (!fifo8_is_empty(&s->rx_fifo)) {
+ fifo8_pop(&s->rx_fifo);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: Reading empty"
+ " RX_FIFO\n");
+ }
+ }
+ }
+ trace_pnv_spi_rx_read_N2frame();
+ shift_in_count = read_from_frame(s, s->N2_bytes, ecc_count, shift_in_count);
+ }
+ if ((s->N1_rx + s->N2_rx) > 0) {
+ /*
+ * Data was received so handle RDR status.
+ * It is easier to handle RDR_full and RDR_overrun status here
+ * since the RDR register's shift_byte_in method is called
+ * multiple times in a row. Controlling RDR status is done here
+ * instead of in the RDR scoped methods for that reason.
+ */
+ if (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1) {
+ /*
+ * Data was shifted into the RDR before having been read
+ * causing previous data to have been overrun.
+ */
+ s->status = SETFIELD(SPI_STS_RDR_OVERRUN, s->status, 1);
+ } else {
+ /*
+ * Set status to indicate that the received data register is
+ * full. This flag is only cleared once the RDR is unloaded.
+ */
+ s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 1);
+ }
+ }
+ } /* end of else */
+} /* end of spi_response() */
+
+static void transfer(PnvSpi *s)
+{
+ uint32_t tx, rx, payload_len;
+ uint8_t rx_byte;
+
+ payload_len = fifo8_num_used(&s->tx_fifo);
+ for (int offset = 0; offset < payload_len; offset += s->transfer_len) {
+ tx = 0;
+ for (int i = 0; i < s->transfer_len; i++) {
+ if ((offset + i) >= payload_len) {
+ tx <<= 8;
+ } else if (!fifo8_is_empty(&s->tx_fifo)) {
+ tx = (tx << 8) | fifo8_pop(&s->tx_fifo);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO underflow\n");
+ }
+ }
+ rx = ssi_transfer(s->ssi_bus, tx);
+ for (int i = 0; i < s->transfer_len; i++) {
+ if ((offset + i) >= payload_len) {
+ break;
+ }
+ rx_byte = (rx >> (8 * (s->transfer_len - 1) - i * 8)) & 0xFF;
+ if (!fifo8_is_full(&s->rx_fifo)) {
+ fifo8_push(&s->rx_fifo, rx_byte);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: RX_FIFO is full\n");
+ break;
+ }
+ }
+ }
+ spi_response(s);
+ /* Reset fifo for next frame */
+ fifo8_reset(&s->tx_fifo);
+ fifo8_reset(&s->rx_fifo);
+}
+
+/*
+ * Calculate the N1 counters based on passed in opcode and
+ * internal register values.
+ * The method assumes that the opcode is a Shift_N1 opcode
+ * and doesn't test it.
+ * The counters returned are:
+ * N1 bits: Number of bits in the payload data that are significant
+ * to the responder.
+ * N1_bytes: Total count of payload bytes for the N1 (portion of the) frame.
+ * N1_tx: Total number of bytes taken from TDR for N1
+ * N1_rx: Total number of bytes taken from the payload for N1
+ */
+static void calculate_N1(PnvSpi *s, uint8_t opcode)
+{
+ /*
+ * Shift_N1 opcode form: 0x3M
+ * Implicit mode:
+ * If M != 0 the shift count is M bytes and M is the number of tx bytes.
+ * Forced Implicit mode:
+ * M is the shift count but tx and rx is determined by the count control
+ * register fields. Note that we only check for forced Implicit mode when
+ * M != 0 since the mode doesn't make sense when M = 0.
+ * Explicit mode:
+ * If M == 0 then shift count is number of bits defined in the
+ * Counter Configuration Register's shift_count_N1 field.
+ */
+ if (PNV_SPI_OPCODE_LO_NIBBLE(opcode) == 0) {
+ /* Explicit mode */
+ s->N1_bits = GETFIELD(SPI_CTR_CFG_N1, s->regs[SPI_CTR_CFG_REG]);
+ s->N1_bytes = (s->N1_bits + 7) / 8;
+ s->N1_tx = 0;
+ s->N1_rx = 0;
+ /* If tx count control for N1 is set, load the tx value */
+ if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
+ s->N1_tx = s->N1_bytes;
+ }
+ /* If rx count control for N1 is set, load the rx value */
+ if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
+ s->N1_rx = s->N1_bytes;
+ }
+ } else {
+ /* Implicit mode/Forced Implicit mode, use M field from opcode */
+ s->N1_bytes = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
+ s->N1_bits = s->N1_bytes * 8;
+ /*
+ * Assume that we are going to transmit the count
+ * (pure Implicit only)
+ */
+ s->N1_tx = s->N1_bytes;
+ s->N1_rx = 0;
+ /* Let Forced Implicit mode have an effect on the counts */
+ if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B1, s->regs[SPI_CTR_CFG_REG]) == 1) {
+ /*
+ * If Forced Implicit mode and count control doesn't
+ * indicate transmit then reset the tx count to 0
+ */
+ if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 0) {
+ s->N1_tx = 0;
+ }
+ /* If rx count control for N1 is set, load the rx value */
+ if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
+ s->N1_rx = s->N1_bytes;
+ }
+ }
+ }
+ /*
+ * Enforce an upper limit on the size of N1 that is equal to the known size
+ * of the shift register, 64 bits or 72 bits if ECC is enabled.
+ * If the size exceeds 72 bits it is a user error so log an error,
+ * cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
+ * error bit.
+ */
+ uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
+ if (ecc_control == 0 || ecc_control == 2) {
+ if (s->N1_bytes > (PNV_SPI_REG_SIZE + 1)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size when "
+ "ECC enabled, bytes = 0x%x, bits = 0x%x\n",
+ s->N1_bytes, s->N1_bits);
+ s->N1_bytes = PNV_SPI_REG_SIZE + 1;
+ s->N1_bits = s->N1_bytes * 8;
+ }
+ } else if (s->N1_bytes > PNV_SPI_REG_SIZE) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size, "
+ "bytes = 0x%x, bits = 0x%x\n", s->N1_bytes, s->N1_bits);
+ s->N1_bytes = PNV_SPI_REG_SIZE;
+ s->N1_bits = s->N1_bytes * 8;
+ }
+} /* end of calculate_N1 */
+
+/*
+ * Shift_N1 operation handler method
+ */
+static bool operation_shiftn1(PnvSpi *s, uint8_t opcode, bool send_n1_alone)
+{
+ uint8_t n1_count;
+ bool stop = false;
+ /*
+ * Use a combination of N1 counters to build the N1 portion of the
+ * transmit payload.
+ * We only care about transmit at this time since the request payload
+ * only represents data going out on the controller output line.
+ * Leave mode specific considerations in the calculate function since
+ * all we really care about are counters that tell use exactly how
+ * many bytes are in the payload and how many of those bytes to
+ * include from the TDR into the payload.
+ */
+ calculate_N1(s, opcode);
+ trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
+ s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
+ /*
+ * Zero out the N2 counters here in case there is no N2 operation following
+ * the N1 operation in the sequencer. This keeps leftover N2 information
+ * from interfering with spi_response logic.
+ */
+ s->N2_bits = 0;
+ s->N2_bytes = 0;
+ s->N2_tx = 0;
+ s->N2_rx = 0;
+ /*
+ * N1_bytes is the overall size of the N1 portion of the frame regardless of
+ * whether N1 is used for tx, rx or both. Loop over the size to build a
+ * payload that is N1_bytes long.
+ * N1_tx is the count of bytes to take from the TDR and "shift" into the
+ * frame which means append those bytes to the payload for the N1 portion
+ * of the frame.
+ * If N1_tx is 0 or if the count exceeds the size of the TDR append 0xFF to
+ * the frame until the overall N1 count is reached.
+ */
+ n1_count = 0;
+ while (n1_count < s->N1_bytes) {
+ /*
+ * Assuming that if N1_tx is not equal to 0 then it is the same as
+ * N1_bytes.
+ */
+ if ((s->N1_tx != 0) && (n1_count < PNV_SPI_REG_SIZE)) {
+
+ if (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1) {
+ /*
+ * Note that we are only appending to the payload IF the TDR
+ * is full otherwise we don't touch the payload because we are
+ * going to NOT send the payload and instead tell the sequencer
+ * that called us to stop and wait for a TDR write so we have
+ * data to load into the payload.
+ */
+ uint8_t n1_byte = 0x00;
+ n1_byte = get_from_offset(s, n1_count);
+ if (!fifo8_is_full(&s->tx_fifo)) {
+ trace_pnv_spi_tx_append("n1_byte", n1_byte, n1_count);
+ fifo8_push(&s->tx_fifo, n1_byte);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
+ break;
+ }
+ } else {
+ /*
+ * We hit a shift_n1 opcode TX but the TDR is empty, tell the
+ * sequencer to stop and break this loop.
+ */
+ trace_pnv_spi_sequencer_stop_requested("Shift N1"
+ "set for transmit but TDR is empty");
+ stop = true;
+ break;
+ }
+ } else {
+ /*
+ * Cases here:
+ * - we are receiving during the N1 frame segment and the RDR
+ * is full so we need to stop until the RDR is read
+ * - we are transmitting and we don't care about RDR status
+ * since we won't be loading RDR during the frame segment.
+ * - we are receiving and the RDR is empty so we allow the operation
+ * to proceed.
+ */
+ if ((s->N1_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) {
+ trace_pnv_spi_sequencer_stop_requested("shift N1"
+ "set for receive but RDR is full");
+ stop = true;
+ break;
+ } else if (!fifo8_is_full(&s->tx_fifo)) {
+ trace_pnv_spi_tx_append_FF("n1_byte");
+ fifo8_push(&s->tx_fifo, 0xff);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
+ break;
+ }
+ }
+ n1_count++;
+ } /* end of while */
+ /*
+ * If we are not stopping due to an empty TDR and we are doing an N1 TX
+ * and the TDR is full we need to clear the TDR_full status.
+ * Do this here instead of up in the loop above so we don't log the message
+ * in every loop iteration.
+ * Ignore the send_n1_alone flag, all that does is defer the TX until the N2
+ * operation, which was found immediately after the current opcode. The TDR
+ * was unloaded and will be shifted so we have to clear the TDR_full status.
+ */
+ if (!stop && (s->N1_tx != 0) &&
+ (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
+ s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
+ }
+ /*
+ * There are other reasons why the shifter would stop, such as a TDR empty
+ * or RDR full condition with N1 set to receive. If we haven't stopped due
+ * to either one of those conditions then check if the send_n1_alone flag is
+ * equal to False, indicating the next opcode is an N2 operation, AND if
+ * the N2 counter reload switch (bit 0 of the N2 count control field) is
+ * set. This condition requires a pacing write to "kick" off the N2
+ * shift which includes the N1 shift as well when send_n1_alone is False.
+ */
+ if (!stop && !send_n1_alone &&
+ (GETFIELD(SPI_CTR_CFG_N2_CTRL_B0, s->regs[SPI_CTR_CFG_REG]) == 1)) {
+ trace_pnv_spi_sequencer_stop_requested("N2 counter reload "
+ "active, stop N1 shift, TDR_underrun set to 1");
+ stop = true;
+ s->status = SETFIELD(SPI_STS_TDR_UNDERRUN, s->status, 1);
+ }
+ /*
+ * If send_n1_alone is set AND we have a full TDR then this is the first and
+ * last payload to send and we don't have an N2 frame segment to add to the
+ * payload.
+ */
+ if (send_n1_alone && !stop) {
+ /* We have a TX and a full TDR or an RX and an empty RDR */
+ trace_pnv_spi_tx_request("Shifting N1 frame", fifo8_num_used(&s->tx_fifo));
+ transfer(s);
+ /* The N1 frame shift is complete so reset the N1 counters */
+ s->N2_bits = 0;
+ s->N2_bytes = 0;
+ s->N2_tx = 0;
+ s->N2_rx = 0;
+ }
+ return stop;
+} /* end of operation_shiftn1() */
+
+/*
+ * Calculate the N2 counters based on passed in opcode and
+ * internal register values.
+ * The method assumes that the opcode is a Shift_N2 opcode
+ * and doesn't test it.
+ * The counters returned are:
+ * N2 bits: Number of bits in the payload data that are significant
+ * to the responder.
+ * N2_bytes: Total count of payload bytes for the N2 frame.
+ * N2_tx: Total number of bytes taken from TDR for N2
+ * N2_rx: Total number of bytes taken from the payload for N2
+ */
+static void calculate_N2(PnvSpi *s, uint8_t opcode)
+{
+ /*
+ * Shift_N2 opcode form: 0x4M
+ * Implicit mode:
+ * If M!=0 the shift count is M bytes and M is the number of rx bytes.
+ * Forced Implicit mode:
+ * M is the shift count but tx and rx is determined by the count control
+ * register fields. Note that we only check for Forced Implicit mode when
+ * M != 0 since the mode doesn't make sense when M = 0.
+ * Explicit mode:
+ * If M==0 then shift count is number of bits defined in the
+ * Counter Configuration Register's shift_count_N1 field.
+ */
+ if (PNV_SPI_OPCODE_LO_NIBBLE(opcode) == 0) {
+ /* Explicit mode */
+ s->N2_bits = GETFIELD(SPI_CTR_CFG_N2, s->regs[SPI_CTR_CFG_REG]);
+ s->N2_bytes = (s->N2_bits + 7) / 8;
+ s->N2_tx = 0;
+ s->N2_rx = 0;
+ /* If tx count control for N2 is set, load the tx value */
+ if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
+ s->N2_tx = s->N2_bytes;
+ }
+ /* If rx count control for N2 is set, load the rx value */
+ if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
+ s->N2_rx = s->N2_bytes;
+ }
+ } else {
+ /* Implicit mode/Forced Implicit mode, use M field from opcode */
+ s->N2_bytes = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
+ s->N2_bits = s->N2_bytes * 8;
+ /* Assume that we are going to receive the count */
+ s->N2_rx = s->N2_bytes;
+ s->N2_tx = 0;
+ /* Let Forced Implicit mode have an effect on the counts */
+ if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B1, s->regs[SPI_CTR_CFG_REG]) == 1) {
+ /*
+ * If Forced Implicit mode and count control doesn't
+ * indicate a receive then reset the rx count to 0
+ */
+ if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 0) {
+ s->N2_rx = 0;
+ }
+ /* If tx count control for N2 is set, load the tx value */
+ if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
+ s->N2_tx = s->N2_bytes;
+ }
+ }
+ }
+ /*
+ * Enforce an upper limit on the size of N1 that is equal to the
+ * known size of the shift register, 64 bits or 72 bits if ECC
+ * is enabled.
+ * If the size exceeds 72 bits it is a user error so log an error,
+ * cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
+ * error bit.
+ */
+ uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
+ if (ecc_control == 0 || ecc_control == 2) {
+ if (s->N2_bytes > (PNV_SPI_REG_SIZE + 1)) {
+ /* Unsupported N2 shift size when ECC enabled */
+ s->N2_bytes = PNV_SPI_REG_SIZE + 1;
+ s->N2_bits = s->N2_bytes * 8;
+ }
+ } else if (s->N2_bytes > PNV_SPI_REG_SIZE) {
+ /* Unsupported N2 shift size */
+ s->N2_bytes = PNV_SPI_REG_SIZE;
+ s->N2_bits = s->N2_bytes * 8;
+ }
+} /* end of calculate_N2 */
+
+/*
+ * Shift_N2 operation handler method
+ */
+
+static bool operation_shiftn2(PnvSpi *s, uint8_t opcode)
+{
+ uint8_t n2_count;
+ bool stop = false;
+ /*
+ * Use a combination of N2 counters to build the N2 portion of the
+ * transmit payload.
+ */
+ calculate_N2(s, opcode);
+ trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
+ s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
+ /*
+ * The only difference between this code and the code for shift N1 is
+ * that this code has to account for the possible presence of N1 transmit
+ * bytes already taken from the TDR.
+ * If there are bytes to be transmitted for the N2 portion of the frame
+ * and there are still bytes in TDR that have not been copied into the
+ * TX data of the payload, this code will handle transmitting those
+ * remaining bytes.
+ * If for some reason the transmit count(s) add up to more than the size
+ * of the TDR we will just append 0xFF to the transmit payload data until
+ * the payload is N1 + N2 bytes long.
+ */
+ n2_count = 0;
+ while (n2_count < s->N2_bytes) {
+ /*
+ * If the RDR is full and we need to RX just bail out, letting the
+ * code continue will end up building the payload twice in the same
+ * buffer since RDR full causes a sequence stop and restart.
+ */
+ if ((s->N2_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) {
+ trace_pnv_spi_sequencer_stop_requested("shift N2 set"
+ "for receive but RDR is full");
+ stop = true;
+ break;
+ }
+ if ((s->N2_tx != 0) && ((s->N1_tx + n2_count) < PNV_SPI_REG_SIZE)) {
+ /* Always append data for the N2 segment if it is set for TX */
+ uint8_t n2_byte = 0x00;
+ n2_byte = get_from_offset(s, (s->N1_tx + n2_count));
+ if (!fifo8_is_full(&s->tx_fifo)) {
+ trace_pnv_spi_tx_append("n2_byte", n2_byte, (s->N1_tx + n2_count));
+ fifo8_push(&s->tx_fifo, n2_byte);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
+ break;
+ }
+ } else if (!fifo8_is_full(&s->tx_fifo)) {
+ /*
+ * Regardless of whether or not N2 is set for TX or RX, we need
+ * the number of bytes in the payload to match the overall length
+ * of the operation.
+ */
+ trace_pnv_spi_tx_append_FF("n2_byte");
+ fifo8_push(&s->tx_fifo, 0xff);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
+ break;
+ }
+ n2_count++;
+ } /* end of while */
+ if (!stop) {
+ /* We have a TX and a full TDR or an RX and an empty RDR */
+ trace_pnv_spi_tx_request("Shifting N2 frame", fifo8_num_used(&s->tx_fifo));
+ transfer(s);
+ /*
+ * If we are doing an N2 TX and the TDR is full we need to clear the
+ * TDR_full status. Do this here instead of up in the loop above so we
+ * don't log the message in every loop iteration.
+ */
+ if ((s->N2_tx != 0) && (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
+ s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
+ }
+ /*
+ * The N2 frame shift is complete so reset the N2 counters.
+ * Reset the N1 counters also in case the frame was a combination of
+ * N1 and N2 segments.
+ */
+ s->N2_bits = 0;
+ s->N2_bytes = 0;
+ s->N2_tx = 0;
+ s->N2_rx = 0;
+ s->N1_bits = 0;
+ s->N1_bytes = 0;
+ s->N1_tx = 0;
+ s->N1_rx = 0;
+ }
+ return stop;
+} /* end of operation_shiftn2()*/
+
+static void operation_sequencer(PnvSpi *s)
+{
+ /*
+ * Loop through each sequencer operation ID and perform the requested
+ * operations.
+ * Flag for indicating if we should send the N1 frame or wait to combine
+ * it with a preceding N2 frame.
+ */
+ bool send_n1_alone = true;
+ bool stop = false; /* Flag to stop the sequencer */
+ uint8_t opcode = 0;
+ uint8_t masked_opcode = 0;
+ uint8_t seq_index;
+
+ /*
+ * Clear the sequencer FSM error bit - general_SPI_status[3]
+ * before starting a sequence.
+ */
+ s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 0);
+ /*
+ * If the FSM is idle set the sequencer index to 0
+ * (new/restarted sequence)
+ */
+ if (GETFIELD(SPI_STS_SEQ_FSM, s->status) == SEQ_STATE_IDLE) {
+ s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0);
+ }
+ /*
+ * SPI_STS_SEQ_INDEX of status register is kept in seq_index variable and
+ * updated back to status register at the end of operation_sequencer().
+ */
+ seq_index = GETFIELD(SPI_STS_SEQ_INDEX, s->status);
+ /*
+ * There are only 8 possible operation IDs to iterate through though
+ * some operations may cause more than one frame to be sequenced.
+ */
+ while (seq_index < NUM_SEQ_OPS) {
+ opcode = s->seq_op[seq_index];
+ /* Set sequencer state to decode */
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_DECODE);
+ /*
+ * Only the upper nibble of the operation ID is needed to know what
+ * kind of operation is requested.
+ */
+ masked_opcode = PNV_SPI_MASKED_OPCODE(opcode);
+ switch (masked_opcode) {
+ /*
+ * Increment the operation index in each case instead of just
+ * once at the end in case an operation like the branch
+ * operation needs to change the index.
+ */
+ case SEQ_OP_STOP:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ /* A stop operation in any position stops the sequencer */
+ trace_pnv_spi_sequencer_op("STOP", seq_index);
+
+ stop = true;
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
+ s->loop_counter_1 = 0;
+ s->loop_counter_2 = 0;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
+ break;
+
+ case SEQ_OP_SELECT_SLAVE:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ trace_pnv_spi_sequencer_op("SELECT_SLAVE", seq_index);
+ /*
+ * This device currently only supports a single responder
+ * connection at position 0. De-selecting a responder is fine
+ * and expected at the end of a sequence but selecting any
+ * responder other than 0 should cause an error.
+ */
+ s->responder_select = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
+ if (s->responder_select == 0) {
+ trace_pnv_spi_shifter_done();
+ qemu_set_irq(s->cs_line[0], 1);
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_DONE);
+ } else if (s->responder_select != 1) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Slave selection other than 1 "
+ "not supported, select = 0x%x\n", s->responder_select);
+ trace_pnv_spi_sequencer_stop_requested("invalid responder select");
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
+ stop = true;
+ } else {
+ /*
+ * Only allow an FSM_START state when a responder is
+ * selected
+ */
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_START);
+ trace_pnv_spi_shifter_stating();
+ qemu_set_irq(s->cs_line[0], 0);
+ /*
+ * A Shift_N2 operation is only valid after a Shift_N1
+ * according to the spec. The spec doesn't say if that means
+ * immediately after or just after at any point. We will track
+ * the occurrence of a Shift_N1 to enforce this requirement in
+ * the most generic way possible by assuming that the rule
+ * applies once a valid responder select has occurred.
+ */
+ s->shift_n1_done = false;
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
+ SEQ_STATE_INDEX_INCREMENT);
+ }
+ break;
+
+ case SEQ_OP_SHIFT_N1:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ trace_pnv_spi_sequencer_op("SHIFT_N1", seq_index);
+ /*
+ * Only allow a shift_n1 when the state is not IDLE or DONE.
+ * In either of those two cases the sequencer is not in a proper
+ * state to perform shift operations because the sequencer has:
+ * - processed a responder deselect (DONE)
+ * - processed a stop opcode (IDLE)
+ * - encountered an error (IDLE)
+ */
+ if ((GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_IDLE) ||
+ (GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_DONE)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Shift_N1 not allowed in "
+ "shifter state = 0x%llx", GETFIELD(
+ SPI_STS_SHIFTER_FSM, s->status));
+ /*
+ * Set sequencer FSM error bit 3 (general_SPI_status[3])
+ * in status reg.
+ */
+ s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
+ trace_pnv_spi_sequencer_stop_requested("invalid shifter state");
+ stop = true;
+ } else {
+ /*
+ * Look for the special case where there is a shift_n1 set for
+ * transmit and it is followed by a shift_n2 set for transmit
+ * AND the combined transmit length of the two operations is
+ * less than or equal to the size of the TDR register. In this
+ * case we want to use both this current shift_n1 opcode and the
+ * following shift_n2 opcode to assemble the frame for
+ * transmission to the responder without requiring a refill of
+ * the TDR between the two operations.
+ */
+ if ((seq_index != 7) &&
+ PNV_SPI_MASKED_OPCODE(s->seq_op[(seq_index + 1)]) ==
+ SEQ_OP_SHIFT_N2) {
+ send_n1_alone = false;
+ }
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_SHIFT_N1);
+ stop = operation_shiftn1(s, opcode, send_n1_alone);
+ if (stop) {
+ /*
+ * The operation code says to stop, this can occur if:
+ * (1) RDR is full and the N1 shift is set for receive
+ * (2) TDR was empty at the time of the N1 shift so we need
+ * to wait for data.
+ * (3) Neither 1 nor 2 are occurring and we aren't sending
+ * N1 alone and N2 counter reload is set (bit 0 of the N2
+ * counter reload field). In this case TDR_underrun will
+ * will be set and the Payload has been loaded so it is
+ * ok to advance the sequencer.
+ */
+ if (GETFIELD(SPI_STS_TDR_UNDERRUN, s->status)) {
+ s->shift_n1_done = true;
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
+ FSM_SHIFT_N2);
+ seq_index++;
+ } else {
+ /*
+ * This is case (1) or (2) so the sequencer needs to
+ * wait and NOT go to the next sequence yet.
+ */
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
+ }
+ } else {
+ /* Ok to move on to the next index */
+ s->shift_n1_done = true;
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
+ SEQ_STATE_INDEX_INCREMENT);
+ }
+ }
+ break;
+
+ case SEQ_OP_SHIFT_N2:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ trace_pnv_spi_sequencer_op("SHIFT_N2", seq_index);
+ if (!s->shift_n1_done) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Shift_N2 is not allowed if a "
+ "Shift_N1 is not done, shifter state = 0x%llx",
+ GETFIELD(SPI_STS_SHIFTER_FSM, s->status));
+ /*
+ * In case the sequencer actually stops if an N2 shift is
+ * requested before any N1 shift is done. Set sequencer FSM
+ * error bit 3 (general_SPI_status[3]) in status reg.
+ */
+ s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
+ trace_pnv_spi_sequencer_stop_requested("shift_n2 w/no shift_n1 done");
+ stop = true;
+ } else {
+ /* Ok to do a Shift_N2 */
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_SHIFT_N2);
+ stop = operation_shiftn2(s, opcode);
+ /*
+ * If the operation code says to stop set the shifter state to
+ * wait and stop
+ */
+ if (stop) {
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
+ } else {
+ /* Ok to move on to the next index */
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
+ SEQ_STATE_INDEX_INCREMENT);
+ }
+ }
+ break;
+
+ case SEQ_OP_BRANCH_IFNEQ_RDR:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_RDR", seq_index);
+ /*
+ * The memory mapping register RDR match value is compared against
+ * the 16 rightmost bytes of the RDR (potentially with masking).
+ * Since this comparison is performed against the contents of the
+ * RDR then a receive must have previously occurred otherwise
+ * there is no data to compare and the operation cannot be
+ * completed and will stop the sequencer until RDR full is set to
+ * 1.
+ */
+ if (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1) {
+ bool rdr_matched = false;
+ rdr_matched = does_rdr_match(s);
+ if (rdr_matched) {
+ trace_pnv_spi_RDR_match("success");
+ s->fail_count = 0;
+ /* A match occurred, increment the sequencer index. */
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
+ SEQ_STATE_INDEX_INCREMENT);
+ } else {
+ trace_pnv_spi_RDR_match("failed");
+ s->fail_count++;
+ /*
+ * Branch the sequencer to the index coded into the op
+ * code.
+ */
+ seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
+ }
+ if (s->fail_count >= RDR_MATCH_FAILURE_LIMIT) {
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: RDR match failure"
+ " limit crossed %d times hence requesting "
+ "sequencer to stop.\n",
+ RDR_MATCH_FAILURE_LIMIT);
+ stop = true;
+ }
+ /*
+ * Regardless of where the branch ended up we want the
+ * sequencer to continue shifting so we have to clear
+ * RDR_full.
+ */
+ s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 0);
+ } else {
+ trace_pnv_spi_sequencer_stop_requested("RDR not"
+ "full for 0x6x opcode");
+ stop = true;
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
+ }
+ break;
+
+ case SEQ_OP_TRANSFER_TDR:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ qemu_log_mask(LOG_GUEST_ERROR, "Transfer TDR is not supported\n");
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT);
+ break;
+
+ case SEQ_OP_BRANCH_IFNEQ_INC_1:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_1", seq_index);
+ /*
+ * The spec says the loop should execute count compare + 1 times.
+ * However we learned from engineering that we really only loop
+ * count_compare times, count compare = 0 makes this op code a
+ * no-op
+ */
+ if (s->loop_counter_1 !=
+ GETFIELD(SPI_CTR_CFG_CMP1, s->regs[SPI_CTR_CFG_REG])) {
+ /*
+ * Next index is the lower nibble of the branch operation ID,
+ * mask off all but the first three bits so we don't try to
+ * access beyond the sequencer_operation_reg boundary.
+ */
+ seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
+ s->loop_counter_1++;
+ } else {
+ /* Continue to next index if loop counter is reached */
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
+ SEQ_STATE_INDEX_INCREMENT);
+ }
+ break;
+
+ case SEQ_OP_BRANCH_IFNEQ_INC_2:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_2", seq_index);
+ uint8_t condition2 = GETFIELD(SPI_CTR_CFG_CMP2,
+ s->regs[SPI_CTR_CFG_REG]);
+ /*
+ * The spec says the loop should execute count compare + 1 times.
+ * However we learned from engineering that we really only loop
+ * count_compare times, count compare = 0 makes this op code a
+ * no-op
+ */
+ if (s->loop_counter_2 != condition2) {
+ /*
+ * Next index is the lower nibble of the branch operation ID,
+ * mask off all but the first three bits so we don't try to
+ * access beyond the sequencer_operation_reg boundary.
+ */
+ seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
+ s->loop_counter_2++;
+ } else {
+ /* Continue to next index if loop counter is reached */
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
+ SEQ_STATE_INDEX_INCREMENT);
+ }
+ break;
+
+ default:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ /* Ignore unsupported operations. */
+ seq_index++;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT);
+ break;
+ } /* end of switch */
+ /*
+ * If we used all 8 opcodes without seeing a 00 - STOP in the sequence
+ * we need to go ahead and end things as if there was a STOP at the
+ * end.
+ */
+ if (seq_index == NUM_SEQ_OPS) {
+ /* All 8 opcodes completed, sequencer idling */
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
+ seq_index = 0;
+ s->loop_counter_1 = 0;
+ s->loop_counter_2 = 0;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
+ break;
+ }
+ /* Break the loop if a stop was requested */
+ if (stop) {
+ break;
+ }
+ } /* end of while */
+ /* Update sequencer index field in status.*/
+ s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, seq_index);
+} /* end of operation_sequencer() */
+
+/*
+ * The SPIC engine and its internal sequencer can be interrupted and reset by
+ * a hardware signal, the sbe_spicst_hard_reset bits from Pervasive
+ * Miscellaneous Register of sbe_register_bo device.
+ * Reset immediately aborts any SPI transaction in progress and returns the
+ * sequencer and state machines to idle state.
+ * The configuration register values are not changed. The status register is
+ * not reset. The engine registers are not reset.
+ * The SPIC engine reset does not have any affect on the attached devices.
+ * Reset handling of any attached devices is beyond the scope of the engine.
+ */
+static void do_reset(DeviceState *dev)
+{
+ PnvSpi *s = PNV_SPI(dev);
+ DeviceState *ssi_dev;
+
+ trace_pnv_spi_reset();
+
+ /* Connect cs irq */
+ ssi_dev = ssi_get_cs(s->ssi_bus, 0);
+ if (ssi_dev) {
+ qemu_irq cs_line = qdev_get_gpio_in_named(ssi_dev, SSI_GPIO_CS, 0);
+ qdev_connect_gpio_out_named(DEVICE(s), "cs", 0, cs_line);
+ }
+
+ /* Reset all N1 and N2 counters, and other constants */
+ s->N2_bits = 0;
+ s->N2_bytes = 0;
+ s->N2_tx = 0;
+ s->N2_rx = 0;
+ s->N1_bits = 0;
+ s->N1_bytes = 0;
+ s->N1_tx = 0;
+ s->N1_rx = 0;
+ s->loop_counter_1 = 0;
+ s->loop_counter_2 = 0;
+ /* Disconnected from responder */
+ qemu_set_irq(s->cs_line[0], 1);
+}
+
+static uint64_t pnv_spi_xscom_read(void *opaque, hwaddr addr, unsigned size)
+{
+ PnvSpi *s = PNV_SPI(opaque);
+ uint32_t reg = addr >> 3;
+ uint64_t val = ~0ull;
+
+ switch (reg) {
+ case ERROR_REG:
+ case SPI_CTR_CFG_REG:
+ case CONFIG_REG1:
+ case SPI_CLK_CFG_REG:
+ case SPI_MM_REG:
+ case SPI_XMIT_DATA_REG:
+ val = s->regs[reg];
+ break;
+ case SPI_RCV_DATA_REG:
+ val = s->regs[reg];
+ trace_pnv_spi_read_RDR(val);
+ s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 0);
+ if (GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_WAIT) {
+ trace_pnv_spi_start_sequencer();
+ operation_sequencer(s);
+ }
+ break;
+ case SPI_SEQ_OP_REG:
+ val = 0;
+ for (int i = 0; i < PNV_SPI_REG_SIZE; i++) {
+ val = (val << 8) | s->seq_op[i];
+ }
+ break;
+ case SPI_STS_REG:
+ val = s->status;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom "
+ "read at 0x%" PRIx32 "\n", reg);
+ }
+
+ trace_pnv_spi_read(addr, val);
+ return val;
+}
+
+static void pnv_spi_xscom_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ PnvSpi *s = PNV_SPI(opaque);
+ uint32_t reg = addr >> 3;
+
+ trace_pnv_spi_write(addr, val);
+
+ switch (reg) {
+ case ERROR_REG:
+ case SPI_CTR_CFG_REG:
+ case CONFIG_REG1:
+ case SPI_MM_REG:
+ case SPI_RCV_DATA_REG:
+ s->regs[reg] = val;
+ break;
+ case SPI_CLK_CFG_REG:
+ /*
+ * To reset the SPI controller write the sequence 0x5 0xA to
+ * reset_control field
+ */
+ if ((GETFIELD(SPI_CLK_CFG_RST_CTRL, s->regs[SPI_CLK_CFG_REG]) == 0x5)
+ && (GETFIELD(SPI_CLK_CFG_RST_CTRL, val) == 0xA)) {
+ /* SPI controller reset sequence completed, resetting */
+ s->regs[reg] = SPI_CLK_CFG_HARD_RST;
+ } else {
+ s->regs[reg] = val;
+ }
+ break;
+ case SPI_XMIT_DATA_REG:
+ /*
+ * Writing to the transmit data register causes the transmit data
+ * register full status bit in the status register to be set. Writing
+ * when the transmit data register full status bit is already set
+ * causes a "Resource Not Available" condition. This is not possible
+ * in the model since writes to this register are not asynchronous to
+ * the operation sequence like it would be in hardware.
+ */
+ s->regs[reg] = val;
+ trace_pnv_spi_write_TDR(val);
+ s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 1);
+ s->status = SETFIELD(SPI_STS_TDR_UNDERRUN, s->status, 0);
+ trace_pnv_spi_start_sequencer();
+ operation_sequencer(s);
+ break;
+ case SPI_SEQ_OP_REG:
+ for (int i = 0; i < PNV_SPI_REG_SIZE; i++) {
+ s->seq_op[i] = (val >> (56 - i * 8)) & 0xFF;
+ }
+ break;
+ case SPI_STS_REG:
+ /* other fields are ignore_write */
+ s->status = SETFIELD(SPI_STS_RDR_OVERRUN, s->status,
+ GETFIELD(SPI_STS_RDR, val));
+ s->status = SETFIELD(SPI_STS_TDR_OVERRUN, s->status,
+ GETFIELD(SPI_STS_TDR, val));
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom "
+ "write at 0x%" PRIx32 "\n", reg);
+ }
+}
+
+static const MemoryRegionOps pnv_spi_xscom_ops = {
+ .read = pnv_spi_xscom_read,
+ .write = pnv_spi_xscom_write,
+ .valid.min_access_size = 8,
+ .valid.max_access_size = 8,
+ .impl.min_access_size = 8,
+ .impl.max_access_size = 8,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static const Property pnv_spi_properties[] = {
+ DEFINE_PROP_UINT32("spic_num", PnvSpi, spic_num, 0),
+ DEFINE_PROP_UINT32("chip-id", PnvSpi, chip_id, 0),
+ DEFINE_PROP_UINT8("transfer_len", PnvSpi, transfer_len, 4),
+};
+
+static void pnv_spi_realize(DeviceState *dev, Error **errp)
+{
+ PnvSpi *s = PNV_SPI(dev);
+ g_autofree char *name = g_strdup_printf("chip%d." TYPE_PNV_SPI_BUS ".%d",
+ s->chip_id, s->spic_num);
+ s->ssi_bus = ssi_create_bus(dev, name);
+ s->cs_line = g_new0(qemu_irq, 1);
+ qdev_init_gpio_out_named(DEVICE(s), s->cs_line, "cs", 1);
+
+ fifo8_create(&s->tx_fifo, PNV_SPI_FIFO_SIZE);
+ fifo8_create(&s->rx_fifo, PNV_SPI_FIFO_SIZE);
+
+ /* spi scoms */
+ pnv_xscom_region_init(&s->xscom_spic_regs, OBJECT(s), &pnv_spi_xscom_ops,
+ s, "xscom-spi", PNV10_XSCOM_PIB_SPIC_SIZE);
+}
+
+static int pnv_spi_dt_xscom(PnvXScomInterface *dev, void *fdt,
+ int offset)
+{
+ PnvSpi *s = PNV_SPI(dev);
+ g_autofree char *name;
+ int s_offset;
+ const char compat[] = "ibm,power10-spi";
+ uint32_t spic_pcba = PNV10_XSCOM_PIB_SPIC_BASE +
+ s->spic_num * PNV10_XSCOM_PIB_SPIC_SIZE;
+ uint32_t reg[] = {
+ cpu_to_be32(spic_pcba),
+ cpu_to_be32(PNV10_XSCOM_PIB_SPIC_SIZE)
+ };
+ name = g_strdup_printf("pnv_spi@%x", spic_pcba);
+ s_offset = fdt_add_subnode(fdt, offset, name);
+ _FDT(s_offset);
+
+ _FDT(fdt_setprop(fdt, s_offset, "reg", reg, sizeof(reg)));
+ _FDT(fdt_setprop(fdt, s_offset, "compatible", compat, sizeof(compat)));
+ _FDT((fdt_setprop_cell(fdt, s_offset, "spic_num#", s->spic_num)));
+ return 0;
+}
+
+static void pnv_spi_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PnvXScomInterfaceClass *xscomc = PNV_XSCOM_INTERFACE_CLASS(klass);
+
+ xscomc->dt_xscom = pnv_spi_dt_xscom;
+
+ dc->desc = "PowerNV SPI";
+ dc->realize = pnv_spi_realize;
+ device_class_set_legacy_reset(dc, do_reset);
+ device_class_set_props(dc, pnv_spi_properties);
+}
+
+static const TypeInfo pnv_spi_info = {
+ .name = TYPE_PNV_SPI,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(PnvSpi),
+ .class_init = pnv_spi_class_init,
+ .interfaces = (const InterfaceInfo[]) {
+ { TYPE_PNV_XSCOM_INTERFACE },
+ { }
+ }
+};
+
+static void pnv_spi_register_types(void)
+{
+ type_register_static(&pnv_spi_info);
+}
+
+type_init(pnv_spi_register_types);
diff --git a/hw/ssi/sifive_spi.c b/hw/ssi/sifive_spi.c
index 1b4a401..3e01fef 100644
--- a/hw/ssi/sifive_spi.c
+++ b/hw/ssi/sifive_spi.c
@@ -328,17 +328,16 @@ static void sifive_spi_realize(DeviceState *dev, Error **errp)
fifo8_create(&s->rx_fifo, FIFO_CAPACITY);
}
-static Property sifive_spi_properties[] = {
+static const Property sifive_spi_properties[] = {
DEFINE_PROP_UINT32("num-cs", SiFiveSPIState, num_cs, 1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void sifive_spi_class_init(ObjectClass *klass, void *data)
+static void sifive_spi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, sifive_spi_properties);
- dc->reset = sifive_spi_reset;
+ device_class_set_legacy_reset(dc, sifive_spi_reset);
dc->realize = sifive_spi_realize;
}
diff --git a/hw/ssi/ssi.c b/hw/ssi/ssi.c
index 3f357e8..d0de640 100644
--- a/hw/ssi/ssi.c
+++ b/hw/ssi/ssi.c
@@ -55,7 +55,7 @@ static bool ssi_bus_check_address(BusState *b, DeviceState *dev, Error **errp)
return true;
}
-static void ssi_bus_class_init(ObjectClass *klass, void *data)
+static void ssi_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
@@ -108,12 +108,11 @@ static void ssi_peripheral_realize(DeviceState *dev, Error **errp)
ssc->realize(s, errp);
}
-static Property ssi_peripheral_properties[] = {
+static const Property ssi_peripheral_properties[] = {
DEFINE_PROP_UINT8("cs", SSIPeripheral, cs_index, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ssi_peripheral_class_init(ObjectClass *klass, void *data)
+static void ssi_peripheral_class_init(ObjectClass *klass, const void *data)
{
SSIPeripheralClass *ssc = SSI_PERIPHERAL_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/ssi/stm32f2xx_spi.c b/hw/ssi/stm32f2xx_spi.c
index a37139f..871d573 100644
--- a/hw/ssi/stm32f2xx_spi.c
+++ b/hw/ssi/stm32f2xx_spi.c
@@ -202,11 +202,11 @@ static void stm32f2xx_spi_init(Object *obj)
s->ssi = ssi_create_bus(dev, "ssi");
}
-static void stm32f2xx_spi_class_init(ObjectClass *klass, void *data)
+static void stm32f2xx_spi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = stm32f2xx_spi_reset;
+ device_class_set_legacy_reset(dc, stm32f2xx_spi_reset);
dc->vmsd = &vmstate_stm32f2xx_spi;
}
diff --git a/hw/ssi/trace-events b/hw/ssi/trace-events
index 7b5ad6a..2f36cf9 100644
--- a/hw/ssi/trace-events
+++ b/hw/ssi/trace-events
@@ -32,3 +32,34 @@ ibex_spi_host_reset(const char *msg) "%s"
ibex_spi_host_transfer(uint32_t tx_data, uint32_t rx_data) "tx_data: 0x%" PRIx32 " rx_data: @0x%" PRIx32
ibex_spi_host_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size %u: 0x%" PRIx64
ibex_spi_host_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size %u:"
+
+#pnv_spi.c
+pnv_spi_read(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64
+pnv_spi_write(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64
+pnv_spi_read_RDR(uint64_t val) "data extracted = 0x%" PRIx64
+pnv_spi_write_TDR(uint64_t val) "being written, data written = 0x%" PRIx64
+pnv_spi_start_sequencer(void) ""
+pnv_spi_reset(void) "spic engine sequencer configuration and spi communication"
+pnv_spi_sequencer_op(const char* op, uint8_t index) "%s at index = 0x%x"
+pnv_spi_shifter_stating(void) "pull CS line low"
+pnv_spi_shifter_done(void) "pull the CS line high"
+pnv_spi_log_Ncounts(uint8_t N1_bits, uint8_t N1_bytes, uint8_t N1_tx, uint8_t N1_rx, uint8_t N2_bits, uint8_t N2_bytes, uint8_t N2_tx, uint8_t N2_rx) "N1_bits = %d, N1_bytes = %d, N1_tx = %d, N1_rx = %d, N2_bits = %d, N2_bytes = %d, N2_tx = %d, N2_rx = %d"
+pnv_spi_tx_append(const char* frame, uint8_t byte, uint8_t tdr_index) "%s = 0x%2.2x to payload from TDR at index %d"
+pnv_spi_tx_append_FF(const char* frame) "%s to Payload"
+pnv_spi_tx_request(const char* frame, uint32_t payload_len) "%s, payload len = %d"
+pnv_spi_rx_received(uint32_t payload_len) "payload len = %d"
+pnv_spi_rx_read_N1frame(void) ""
+pnv_spi_rx_read_N2frame(void) ""
+pnv_spi_shift_rx(uint8_t byte, uint32_t index) "byte = 0x%2.2x into RDR from payload index %d"
+pnv_spi_sequencer_stop_requested(const char* reason) "due to %s"
+pnv_spi_RDR_match(const char* result) "%s"
+
+# allwinner_a10_spi.c
+allwinner_a10_spi_update_irq(uint32_t level) "IRQ level is %d"
+allwinner_a10_spi_flush_txfifo_begin(uint32_t tx, uint32_t rx) "Begin: TX Fifo Size = %d, RX Fifo Size = %d"
+allwinner_a10_spi_flush_txfifo_end(uint32_t tx, uint32_t rx) "End: TX Fifo Size = %d, RX Fifo Size = %d"
+allwinner_a10_spi_burst_length(uint32_t len) "Burst length = %d"
+allwinner_a10_spi_tx(uint8_t byte) "write 0x%02x"
+allwinner_a10_spi_rx(uint8_t byte) "read 0x%02x"
+allwinner_a10_spi_read(const char* regname, uint32_t value) "reg[%s] => 0x%08x"
+allwinner_a10_spi_write(const char* regname, uint32_t value) "reg[%s] <= 0x%08x"
diff --git a/hw/ssi/xilinx_spi.c b/hw/ssi/xilinx_spi.c
index 2e0687a..4144c8a 100644
--- a/hw/ssi/xilinx_spi.c
+++ b/hw/ssi/xilinx_spi.c
@@ -25,6 +25,7 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
@@ -32,6 +33,7 @@
#include "hw/irq.h"
#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
#include "hw/ssi/ssi.h"
#include "qom/object.h"
@@ -83,6 +85,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(XilinxSPI, XILINX_SPI)
struct XilinxSPI {
SysBusDevice parent_obj;
+ EndianMode model_endianness;
MemoryRegion mmio;
qemu_irq irq;
@@ -313,14 +316,17 @@ done:
xlx_spi_update_irq(s);
}
-static const MemoryRegionOps spi_ops = {
- .read = spi_read,
- .write = spi_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid = {
- .min_access_size = 4,
- .max_access_size = 4
- }
+static const MemoryRegionOps spi_ops[2] = {
+ [0 ... 1] = {
+ .read = spi_read,
+ .write = spi_write,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ },
+ [0].endianness = DEVICE_LITTLE_ENDIAN,
+ [1].endianness = DEVICE_BIG_ENDIAN,
};
static void xilinx_spi_realize(DeviceState *dev, Error **errp)
@@ -329,6 +335,12 @@ static void xilinx_spi_realize(DeviceState *dev, Error **errp)
XilinxSPI *s = XILINX_SPI(dev);
int i;
+ if (s->model_endianness == ENDIAN_MODE_UNSPECIFIED) {
+ error_setg(errp, TYPE_XILINX_SPI " property 'endianness'"
+ " must be set to 'big' or 'little'");
+ return;
+ }
+
DB_PRINT("\n");
s->spi = ssi_create_bus(dev, "spi");
@@ -339,7 +351,8 @@ static void xilinx_spi_realize(DeviceState *dev, Error **errp)
sysbus_init_irq(sbd, &s->cs_lines[i]);
}
- memory_region_init_io(&s->mmio, OBJECT(s), &spi_ops, s,
+ memory_region_init_io(&s->mmio, OBJECT(s),
+ &spi_ops[s->model_endianness == ENDIAN_MODE_BIG], s,
"xilinx-spi", R_MAX * 4);
sysbus_init_mmio(sbd, &s->mmio);
@@ -361,17 +374,17 @@ static const VMStateDescription vmstate_xilinx_spi = {
}
};
-static Property xilinx_spi_properties[] = {
+static const Property xilinx_spi_properties[] = {
+ DEFINE_PROP_ENDIAN_NODEFAULT("endianness", XilinxSPI, model_endianness),
DEFINE_PROP_UINT8("num-ss-bits", XilinxSPI, num_cs, 1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xilinx_spi_class_init(ObjectClass *klass, void *data)
+static void xilinx_spi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = xilinx_spi_realize;
- dc->reset = xlx_spi_reset;
+ device_class_set_legacy_reset(dc, xlx_spi_reset);
device_class_set_props(dc, xilinx_spi_properties);
dc->vmsd = &vmstate_xilinx_spi;
}
diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c
index 71952a4..a79f3b8 100644
--- a/hw/ssi/xilinx_spips.c
+++ b/hw/ssi/xilinx_spips.c
@@ -33,7 +33,7 @@
#include "hw/ssi/xilinx_spips.h"
#include "qapi/error.h"
#include "hw/register.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "migration/blocker.h"
#include "migration/vmstate.h"
@@ -620,7 +620,9 @@ static void xilinx_spips_flush_txfifo(XilinxSPIPS *s)
} else if (s->snoop_state == SNOOP_STRIPING ||
s->snoop_state == SNOOP_NONE) {
for (i = 0; i < num_effective_busses(s); ++i) {
- tx_rx[i] = fifo8_pop(&s->tx_fifo);
+ if (!fifo8_is_empty(&s->tx_fifo)) {
+ tx_rx[i] = fifo8_pop(&s->tx_fifo);
+ }
}
stripe8(tx_rx, num_effective_busses(s), false);
} else if (s->snoop_state >= SNOOP_ADDR) {
@@ -1418,19 +1420,17 @@ static const VMStateDescription vmstate_xlnx_zynqmp_qspips = {
}
};
-static Property xilinx_zynqmp_qspips_properties[] = {
+static const Property xilinx_zynqmp_qspips_properties[] = {
DEFINE_PROP_UINT32("dma-burst-size", XlnxZynqMPQSPIPS, dma_burst_size, 64),
- DEFINE_PROP_END_OF_LIST(),
};
-static Property xilinx_spips_properties[] = {
+static const Property xilinx_spips_properties[] = {
DEFINE_PROP_UINT8("num-busses", XilinxSPIPS, num_busses, 1),
DEFINE_PROP_UINT8("num-ss-bits", XilinxSPIPS, num_cs, 4),
DEFINE_PROP_UINT8("num-txrx-bytes", XilinxSPIPS, num_txrx_bytes, 1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xilinx_qspips_class_init(ObjectClass *klass, void * data)
+static void xilinx_qspips_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
XilinxSPIPSClass *xsc = XILINX_SPIPS_CLASS(klass);
@@ -1442,13 +1442,13 @@ static void xilinx_qspips_class_init(ObjectClass *klass, void * data)
xsc->tx_fifo_size = TXFF_A_Q;
}
-static void xilinx_spips_class_init(ObjectClass *klass, void *data)
+static void xilinx_spips_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
XilinxSPIPSClass *xsc = XILINX_SPIPS_CLASS(klass);
dc->realize = xilinx_spips_realize;
- dc->reset = xilinx_spips_reset;
+ device_class_set_legacy_reset(dc, xilinx_spips_reset);
device_class_set_props(dc, xilinx_spips_properties);
dc->vmsd = &vmstate_xilinx_spips;
@@ -1458,13 +1458,13 @@ static void xilinx_spips_class_init(ObjectClass *klass, void *data)
xsc->tx_fifo_size = TXFF_A;
}
-static void xlnx_zynqmp_qspips_class_init(ObjectClass *klass, void * data)
+static void xlnx_zynqmp_qspips_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
XilinxSPIPSClass *xsc = XILINX_SPIPS_CLASS(klass);
dc->realize = xlnx_zynqmp_qspips_realize;
- dc->reset = xlnx_zynqmp_qspips_reset;
+ device_class_set_legacy_reset(dc, xlnx_zynqmp_qspips_reset);
dc->vmsd = &vmstate_xlnx_zynqmp_qspips;
device_class_set_props(dc, xilinx_zynqmp_qspips_properties);
xsc->reg_ops = &xlnx_zynqmp_qspips_ops;
diff --git a/hw/ssi/xlnx-versal-ospi.c b/hw/ssi/xlnx-versal-ospi.c
index c479138..56d51ce 100644
--- a/hw/ssi/xlnx-versal-ospi.c
+++ b/hw/ssi/xlnx-versal-ospi.c
@@ -1825,18 +1825,17 @@ static const VMStateDescription vmstate_xlnx_versal_ospi = {
}
};
-static Property xlnx_versal_ospi_properties[] = {
+static const Property xlnx_versal_ospi_properties[] = {
DEFINE_PROP_BOOL("dac-with-indac", XlnxVersalOspi, dac_with_indac, false),
DEFINE_PROP_BOOL("indac-write-disabled", XlnxVersalOspi,
ind_write_disabled, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xlnx_versal_ospi_class_init(ObjectClass *klass, void *data)
+static void xlnx_versal_ospi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = xlnx_versal_ospi_reset;
+ device_class_set_legacy_reset(dc, xlnx_versal_ospi_reset);
dc->realize = xlnx_versal_ospi_realize;
dc->vmsd = &vmstate_xlnx_versal_ospi;
device_class_set_props(dc, xlnx_versal_ospi_properties);
diff --git a/hw/timer/Kconfig b/hw/timer/Kconfig
index 61fbb62..b3d823c 100644
--- a/hw/timer/Kconfig
+++ b/hw/timer/Kconfig
@@ -12,6 +12,12 @@ config A9_GTIMER
config HPET
bool
default y if PC
+ # The HPET has both a Rust and a C implementation
+ select HPET_C if !HAVE_RUST
+ select X_HPET_RUST if HAVE_RUST
+
+config HPET_C
+ bool
config I8254
bool
@@ -21,6 +27,9 @@ config ALLWINNER_A10_PIT
bool
select PTIMER
+config PXA2XX_TIMER
+ bool
+
config SIFIVE_PWM
bool
diff --git a/hw/timer/a9gtimer.c b/hw/timer/a9gtimer.c
index 64d80cd..690140f 100644
--- a/hw/timer/a9gtimer.c
+++ b/hw/timer/a9gtimer.c
@@ -32,7 +32,7 @@
#include "qemu/log.h"
#include "qemu/module.h"
#include "hw/core/cpu.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#ifndef A9_GTIMER_ERR_DEBUG
#define A9_GTIMER_ERR_DEBUG 0
@@ -373,18 +373,17 @@ static const VMStateDescription vmstate_a9_gtimer = {
}
};
-static Property a9_gtimer_properties[] = {
+static const Property a9_gtimer_properties[] = {
DEFINE_PROP_UINT32("num-cpu", A9GTimerState, num_cpu, 0),
- DEFINE_PROP_END_OF_LIST()
};
-static void a9_gtimer_class_init(ObjectClass *klass, void *data)
+static void a9_gtimer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = a9_gtimer_realize;
dc->vmsd = &vmstate_a9_gtimer;
- dc->reset = a9_gtimer_reset;
+ device_class_set_legacy_reset(dc, a9_gtimer_reset);
device_class_set_props(dc, a9_gtimer_properties);
}
diff --git a/hw/timer/allwinner-a10-pit.c b/hw/timer/allwinner-a10-pit.c
index a524de1..e4c3532 100644
--- a/hw/timer/allwinner-a10-pit.c
+++ b/hw/timer/allwinner-a10-pit.c
@@ -185,15 +185,14 @@ static void a10_pit_write(void *opaque, hwaddr offset, uint64_t value,
static const MemoryRegionOps a10_pit_ops = {
.read = a10_pit_read,
.write = a10_pit_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
};
-static Property a10_pit_properties[] = {
+static const Property a10_pit_properties[] = {
DEFINE_PROP_UINT32("clk0-freq", AwA10PITState, clk_freq[0], 0),
DEFINE_PROP_UINT32("clk1-freq", AwA10PITState, clk_freq[1], 0),
DEFINE_PROP_UINT32("clk2-freq", AwA10PITState, clk_freq[2], 0),
DEFINE_PROP_UINT32("clk3-freq", AwA10PITState, clk_freq[3], 0),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_a10_pit = {
@@ -289,11 +288,11 @@ static void a10_pit_finalize(Object *obj)
}
}
-static void a10_pit_class_init(ObjectClass *klass, void *data)
+static void a10_pit_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = a10_pit_reset;
+ device_class_set_legacy_reset(dc, a10_pit_reset);
device_class_set_props(dc, a10_pit_properties);
dc->desc = "allwinner a10 timer";
dc->vmsd = &vmstate_a10_pit;
diff --git a/hw/timer/arm_mptimer.c b/hw/timer/arm_mptimer.c
index bca4cee..7cc5915 100644
--- a/hw/timer/arm_mptimer.c
+++ b/hw/timer/arm_mptimer.c
@@ -300,18 +300,17 @@ static const VMStateDescription vmstate_arm_mptimer = {
}
};
-static Property arm_mptimer_properties[] = {
+static const Property arm_mptimer_properties[] = {
DEFINE_PROP_UINT32("num-cpu", ARMMPTimerState, num_cpu, 0),
- DEFINE_PROP_END_OF_LIST()
};
-static void arm_mptimer_class_init(ObjectClass *klass, void *data)
+static void arm_mptimer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = arm_mptimer_realize;
dc->vmsd = &vmstate_arm_mptimer;
- dc->reset = arm_mptimer_reset;
+ device_class_set_legacy_reset(dc, arm_mptimer_reset);
device_class_set_props(dc, arm_mptimer_properties);
}
diff --git a/hw/timer/arm_timer.c b/hw/timer/arm_timer.c
index 0940e03..56638ff 100644
--- a/hw/timer/arm_timer.c
+++ b/hw/timer/arm_timer.c
@@ -387,13 +387,12 @@ static const TypeInfo icp_pit_info = {
.instance_init = icp_pit_init,
};
-static Property sp804_properties[] = {
+static const Property sp804_properties[] = {
DEFINE_PROP_UINT32("freq0", SP804State, freq0, 1000000),
DEFINE_PROP_UINT32("freq1", SP804State, freq1, 1000000),
- DEFINE_PROP_END_OF_LIST(),
};
-static void sp804_class_init(ObjectClass *klass, void *data)
+static void sp804_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
diff --git a/hw/timer/armv7m_systick.c b/hw/timer/armv7m_systick.c
index f6b1ace..7e4ddcd 100644
--- a/hw/timer/armv7m_systick.c
+++ b/hw/timer/armv7m_systick.c
@@ -285,12 +285,12 @@ static const VMStateDescription vmstate_systick = {
}
};
-static void systick_class_init(ObjectClass *klass, void *data)
+static void systick_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_systick;
- dc->reset = systick_reset;
+ device_class_set_legacy_reset(dc, systick_reset);
dc->realize = systick_realize;
}
diff --git a/hw/timer/aspeed_timer.c b/hw/timer/aspeed_timer.c
index fc5c94b..57db035 100644
--- a/hw/timer/aspeed_timer.c
+++ b/hw/timer/aspeed_timer.c
@@ -239,9 +239,8 @@ static uint64_t aspeed_timer_get_value(AspeedTimer *t, int reg)
return value;
}
-static uint64_t aspeed_timer_read(void *opaque, hwaddr offset, unsigned size)
+static uint64_t aspeed_timer_read_common(AspeedTimerCtrlState *s, hwaddr offset)
{
- AspeedTimerCtrlState *s = opaque;
const int reg = (offset & 0xf) / 4;
uint64_t value;
@@ -256,10 +255,11 @@ static uint64_t aspeed_timer_read(void *opaque, hwaddr offset, unsigned size)
value = aspeed_timer_get_value(&s->timers[(offset >> 4) - 1], reg);
break;
default:
- value = ASPEED_TIMER_GET_CLASS(s)->read(s, offset);
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
+ value = 0;
break;
}
- trace_aspeed_timer_read(offset, size, value);
return value;
}
@@ -276,7 +276,8 @@ static void aspeed_timer_set_value(AspeedTimerCtrlState *s, int timer, int reg,
old_reload = t->reload;
t->reload = calculate_min_ticks(t, value);
- /* If the reload value was not previously set, or zero, and
+ /*
+ * If the reload value was not previously set, or zero, and
* the current value is valid, try to start the timer if it is
* enabled.
*/
@@ -312,7 +313,8 @@ static void aspeed_timer_set_value(AspeedTimerCtrlState *s, int timer, int reg,
}
}
-/* Control register operations are broken out into helpers that can be
+/*
+ * Control register operations are broken out into helpers that can be
* explicitly called on aspeed_timer_reset(), but also from
* aspeed_timer_ctrl_op().
*/
@@ -396,7 +398,8 @@ static void aspeed_timer_set_ctrl(AspeedTimerCtrlState *s, uint32_t reg)
AspeedTimer *t;
const uint8_t enable_mask = BIT(op_enable);
- /* Handle a dependency between the 'enable' and remaining three
+ /*
+ * Handle a dependency between the 'enable' and remaining three
* configuration bits - i.e. if more than one bit in the control set has
* changed, including the 'enable' bit, then we want either disable the
* timer and perform configuration, or perform configuration and then
@@ -428,12 +431,11 @@ static void aspeed_timer_set_ctrl2(AspeedTimerCtrlState *s, uint32_t value)
trace_aspeed_timer_set_ctrl2(value);
}
-static void aspeed_timer_write(void *opaque, hwaddr offset, uint64_t value,
- unsigned size)
+static void aspeed_timer_write_common(AspeedTimerCtrlState *s, hwaddr offset,
+ uint64_t value)
{
const uint32_t tv = (uint32_t)(value & 0xFFFFFFFF);
const int reg = (offset & 0xf) / 4;
- AspeedTimerCtrlState *s = opaque;
switch (offset) {
/* Control Registers */
@@ -448,11 +450,25 @@ static void aspeed_timer_write(void *opaque, hwaddr offset, uint64_t value,
aspeed_timer_set_value(s, (offset >> TIMER_NR_REGS) - 1, reg, tv);
break;
default:
- ASPEED_TIMER_GET_CLASS(s)->write(s, offset, value);
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
break;
}
}
+static uint64_t aspeed_timer_read(void *opaque, hwaddr offset, unsigned size)
+{
+ AspeedTimerCtrlState *s = ASPEED_TIMER(opaque);
+ return ASPEED_TIMER_GET_CLASS(s)->read(s, offset);
+}
+
+static void aspeed_timer_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned size)
+{
+ AspeedTimerCtrlState *s = ASPEED_TIMER(opaque);
+ ASPEED_TIMER_GET_CLASS(s)->write(s, offset, value);
+}
+
static const MemoryRegionOps aspeed_timer_ops = {
.read = aspeed_timer_read,
.write = aspeed_timer_write,
@@ -472,12 +488,15 @@ static uint64_t aspeed_2400_timer_read(AspeedTimerCtrlState *s, hwaddr offset)
break;
case 0x38:
case 0x3C:
- default:
qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n",
__func__, offset);
value = 0;
break;
+ default:
+ value = aspeed_timer_read_common(s, offset);
+ break;
}
+ trace_aspeed_timer_read(offset, value);
return value;
}
@@ -492,10 +511,12 @@ static void aspeed_2400_timer_write(AspeedTimerCtrlState *s, hwaddr offset,
break;
case 0x38:
case 0x3C:
- default:
qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n",
__func__, offset);
break;
+ default:
+ aspeed_timer_write_common(s, offset, value);
+ break;
}
}
@@ -511,12 +532,15 @@ static uint64_t aspeed_2500_timer_read(AspeedTimerCtrlState *s, hwaddr offset)
value = s->ctrl3 & BIT(0);
break;
case 0x3C:
- default:
qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n",
__func__, offset);
value = 0;
break;
+ default:
+ value = aspeed_timer_read_common(s, offset);
+ break;
}
+ trace_aspeed_timer_read(offset, value);
return value;
}
@@ -545,8 +569,7 @@ static void aspeed_2500_timer_write(AspeedTimerCtrlState *s, hwaddr offset,
break;
default:
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n",
- __func__, offset);
+ aspeed_timer_write_common(s, offset, value);
break;
}
}
@@ -561,12 +584,15 @@ static uint64_t aspeed_2600_timer_read(AspeedTimerCtrlState *s, hwaddr offset)
break;
case 0x38:
case 0x3C:
- default:
qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n",
__func__, offset);
value = 0;
break;
+ default:
+ value = aspeed_timer_read_common(s, offset);
+ break;
}
+ trace_aspeed_timer_read(offset, value);
return value;
}
@@ -577,17 +603,209 @@ static void aspeed_2600_timer_write(AspeedTimerCtrlState *s, hwaddr offset,
switch (offset) {
case 0x34:
- s->irq_sts &= tv;
+ s->irq_sts &= ~tv;
break;
case 0x3C:
aspeed_timer_set_ctrl(s, s->ctrl & ~tv);
break;
-
case 0x38:
- default:
qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n",
__func__, offset);
break;
+ default:
+ aspeed_timer_write_common(s, offset, value);
+ break;
+ }
+}
+
+static void aspeed_2700_timer_set_ctrl(AspeedTimerCtrlState *s, int index,
+ uint32_t reg)
+{
+ const uint8_t overflow_interrupt_mask = BIT(op_overflow_interrupt);
+ const uint8_t external_clock_mask = BIT(op_external_clock);
+ const uint8_t pulse_enable_mask = BIT(op_pulse_enable);
+ const uint8_t enable_mask = BIT(op_enable);
+ AspeedTimer *t;
+ uint8_t t_old;
+ uint8_t t_new;
+ int shift;
+
+ /*
+ * Only 1 will set the specific bits to 1
+ * Handle a dependency between the 'enable' and remaining three
+ * configuration bits - i.e. if more than one bit in the control set has
+ * set, including the 'enable' bit, perform configuration and then
+ * enable the timer.
+ * Interrupt Status bit should not be set.
+ */
+
+ t = &s->timers[index];
+ shift = index * TIMER_CTRL_BITS;
+
+ t_old = (s->ctrl >> shift) & TIMER_CTRL_MASK;
+ t_new = reg & TIMER_CTRL_MASK;
+
+ if (!(t_old & external_clock_mask) &&
+ (t_new & external_clock_mask)) {
+ aspeed_timer_ctrl_external_clock(t, true);
+ s->ctrl = deposit32(s->ctrl, shift + op_external_clock, 1, 1);
+ }
+
+ if (!(t_old & overflow_interrupt_mask) &&
+ (t_new & overflow_interrupt_mask)) {
+ aspeed_timer_ctrl_overflow_interrupt(t, true);
+ s->ctrl = deposit32(s->ctrl, shift + op_overflow_interrupt, 1, 1);
+ }
+
+
+ if (!(t_old & pulse_enable_mask) &&
+ (t_new & pulse_enable_mask)) {
+ aspeed_timer_ctrl_pulse_enable(t, true);
+ s->ctrl = deposit32(s->ctrl, shift + op_pulse_enable, 1, 1);
+ }
+
+ /* If we are enabling, do so last */
+ if (!(t_old & enable_mask) &&
+ (t_new & enable_mask)) {
+ aspeed_timer_ctrl_enable(t, true);
+ s->ctrl = deposit32(s->ctrl, shift + op_enable, 1, 1);
+ }
+}
+
+static void aspeed_2700_timer_clear_ctrl(AspeedTimerCtrlState *s, int index,
+ uint32_t reg)
+{
+ const uint8_t overflow_interrupt_mask = BIT(op_overflow_interrupt);
+ const uint8_t external_clock_mask = BIT(op_external_clock);
+ const uint8_t pulse_enable_mask = BIT(op_pulse_enable);
+ const uint8_t enable_mask = BIT(op_enable);
+ AspeedTimer *t;
+ uint8_t t_old;
+ uint8_t t_new;
+ int shift;
+
+ /*
+ * Only 1 will clear the specific bits to 0
+ * Handle a dependency between the 'enable' and remaining three
+ * configuration bits - i.e. if more than one bit in the control set has
+ * clear, including the 'enable' bit, then disable the timer and perform
+ * configuration
+ */
+
+ t = &s->timers[index];
+ shift = index * TIMER_CTRL_BITS;
+
+ t_old = (s->ctrl >> shift) & TIMER_CTRL_MASK;
+ t_new = reg & TIMER_CTRL_MASK;
+
+ /* If we are disabling, do so first */
+ if ((t_old & enable_mask) &&
+ (t_new & enable_mask)) {
+ aspeed_timer_ctrl_enable(t, false);
+ s->ctrl = deposit32(s->ctrl, shift + op_enable, 1, 0);
+ }
+
+ if ((t_old & external_clock_mask) &&
+ (t_new & external_clock_mask)) {
+ aspeed_timer_ctrl_external_clock(t, false);
+ s->ctrl = deposit32(s->ctrl, shift + op_external_clock, 1, 0);
+ }
+
+ if ((t_old & overflow_interrupt_mask) &&
+ (t_new & overflow_interrupt_mask)) {
+ aspeed_timer_ctrl_overflow_interrupt(t, false);
+ s->ctrl = deposit32(s->ctrl, shift + op_overflow_interrupt, 1, 0);
+ }
+
+ if ((t_old & pulse_enable_mask) &&
+ (t_new & pulse_enable_mask)) {
+ aspeed_timer_ctrl_pulse_enable(t, false);
+ s->ctrl = deposit32(s->ctrl, shift + op_pulse_enable, 1, 0);
+ }
+
+ /* Clear interrupt status */
+ if (reg & 0x10000) {
+ s->irq_sts = deposit32(s->irq_sts, index, 1, 0);
+ }
+}
+
+static uint64_t aspeed_2700_timer_read(AspeedTimerCtrlState *s, hwaddr offset)
+{
+ uint32_t timer_offset = offset & 0x3f;
+ int timer_index = offset >> 6;
+ uint64_t value = 0;
+
+ if (timer_index >= ASPEED_TIMER_NR_TIMERS) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: offset 0x%" PRIx64 " out of bounds\n",
+ __func__, offset);
+ return 0;
+ }
+
+ switch (timer_offset) {
+ /*
+ * Counter Status
+ * Counter Reload
+ * Counter First Matching
+ * Counter Second Matching
+ */
+ case 0x00 ... 0x0C:
+ value = aspeed_timer_get_value(&s->timers[timer_index],
+ timer_offset >> 2);
+ break;
+ /* Counter Control and Interrupt Status */
+ case 0x10:
+ value = deposit64(value, 0, 4,
+ extract32(s->ctrl, timer_index * 4, 4));
+ value = deposit64(value, 16, 1,
+ extract32(s->irq_sts, timer_index, 1));
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: no getter for offset 0x%"
+ PRIx64"\n", __func__, offset);
+ value = 0;
+ break;
+ }
+ trace_aspeed_timer_read(offset, value);
+ return value;
+}
+
+static void aspeed_2700_timer_write(AspeedTimerCtrlState *s, hwaddr offset,
+ uint64_t value)
+{
+ const uint32_t timer_value = (uint32_t)(value & 0xFFFFFFFF);
+ uint32_t timer_offset = offset & 0x3f;
+ int timer_index = offset >> 6;
+
+ if (timer_index >= ASPEED_TIMER_NR_TIMERS) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: offset 0x%" PRIx64 " out of bounds\n",
+ __func__, offset);
+ }
+
+ switch (timer_offset) {
+ /*
+ * Counter Status
+ * Counter Reload
+ * Counter First Matching
+ * Counter Second Matching
+ */
+ case 0x00 ... 0x0C:
+ aspeed_timer_set_value(s, timer_index, timer_offset >> 2,
+ timer_value);
+ break;
+ /* Counter Control Set and Interrupt Status */
+ case 0x10:
+ aspeed_2700_timer_set_ctrl(s, timer_index, timer_value);
+ break;
+ /* Counter Control Clear and Interrupr Status */
+ case 0x14:
+ aspeed_2700_timer_clear_ctrl(s, timer_index, timer_value);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: no setter for offset 0x%"
+ PRIx64"\n", __func__, offset);
+ break;
}
}
@@ -623,7 +841,8 @@ static void aspeed_timer_reset(DeviceState *dev)
for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) {
AspeedTimer *t = &s->timers[i];
- /* Explicitly call helpers to avoid any conditional behaviour through
+ /*
+ * Explicitly call helpers to avoid any conditional behaviour through
* aspeed_timer_set_ctrl().
*/
aspeed_timer_ctrl_enable(t, false);
@@ -671,18 +890,17 @@ static const VMStateDescription vmstate_aspeed_timer_state = {
}
};
-static Property aspeed_timer_properties[] = {
+static const Property aspeed_timer_properties[] = {
DEFINE_PROP_LINK("scu", AspeedTimerCtrlState, scu, TYPE_ASPEED_SCU,
AspeedSCUState *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void timer_class_init(ObjectClass *klass, void *data)
+static void timer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aspeed_timer_realize;
- dc->reset = aspeed_timer_reset;
+ device_class_set_legacy_reset(dc, aspeed_timer_reset);
dc->desc = "ASPEED Timer";
dc->vmsd = &vmstate_aspeed_timer_state;
device_class_set_props(dc, aspeed_timer_properties);
@@ -697,7 +915,7 @@ static const TypeInfo aspeed_timer_info = {
.abstract = true,
};
-static void aspeed_2400_timer_class_init(ObjectClass *klass, void *data)
+static void aspeed_2400_timer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedTimerClass *awc = ASPEED_TIMER_CLASS(klass);
@@ -713,7 +931,7 @@ static const TypeInfo aspeed_2400_timer_info = {
.class_init = aspeed_2400_timer_class_init,
};
-static void aspeed_2500_timer_class_init(ObjectClass *klass, void *data)
+static void aspeed_2500_timer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedTimerClass *awc = ASPEED_TIMER_CLASS(klass);
@@ -729,7 +947,7 @@ static const TypeInfo aspeed_2500_timer_info = {
.class_init = aspeed_2500_timer_class_init,
};
-static void aspeed_2600_timer_class_init(ObjectClass *klass, void *data)
+static void aspeed_2600_timer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedTimerClass *awc = ASPEED_TIMER_CLASS(klass);
@@ -745,7 +963,7 @@ static const TypeInfo aspeed_2600_timer_info = {
.class_init = aspeed_2600_timer_class_init,
};
-static void aspeed_1030_timer_class_init(ObjectClass *klass, void *data)
+static void aspeed_1030_timer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedTimerClass *awc = ASPEED_TIMER_CLASS(klass);
@@ -761,6 +979,22 @@ static const TypeInfo aspeed_1030_timer_info = {
.class_init = aspeed_1030_timer_class_init,
};
+static void aspeed_2700_timer_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedTimerClass *awc = ASPEED_TIMER_CLASS(klass);
+
+ dc->desc = "ASPEED 2700 Timer";
+ awc->read = aspeed_2700_timer_read;
+ awc->write = aspeed_2700_timer_write;
+}
+
+static const TypeInfo aspeed_2700_timer_info = {
+ .name = TYPE_ASPEED_2700_TIMER,
+ .parent = TYPE_ASPEED_TIMER,
+ .class_init = aspeed_2700_timer_class_init,
+};
+
static void aspeed_timer_register_types(void)
{
type_register_static(&aspeed_timer_info);
@@ -768,6 +1002,7 @@ static void aspeed_timer_register_types(void)
type_register_static(&aspeed_2500_timer_info);
type_register_static(&aspeed_2600_timer_info);
type_register_static(&aspeed_1030_timer_info);
+ type_register_static(&aspeed_2700_timer_info);
}
type_init(aspeed_timer_register_types)
diff --git a/hw/timer/avr_timer16.c b/hw/timer/avr_timer16.c
index c48555d..012d829 100644
--- a/hw/timer/avr_timer16.c
+++ b/hw/timer/avr_timer16.c
@@ -542,11 +542,10 @@ static const MemoryRegionOps avr_timer16_ifr_ops = {
.impl = {.max_access_size = 1}
};
-static Property avr_timer16_properties[] = {
+static const Property avr_timer16_properties[] = {
DEFINE_PROP_UINT8("id", struct AVRTimer16State, id, 0),
DEFINE_PROP_UINT64("cpu-frequency-hz", struct AVRTimer16State,
cpu_freq_hz, 0),
- DEFINE_PROP_END_OF_LIST(),
};
static void avr_timer16_pr(void *opaque, int irq, int level)
@@ -596,11 +595,11 @@ static void avr_timer16_realize(DeviceState *dev, Error **errp)
s->enabled = true;
}
-static void avr_timer16_class_init(ObjectClass *klass, void *data)
+static void avr_timer16_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = avr_timer16_reset;
+ device_class_set_legacy_reset(dc, avr_timer16_reset);
dc->realize = avr_timer16_realize;
device_class_set_props(dc, avr_timer16_properties);
}
diff --git a/hw/timer/bcm2835_systmr.c b/hw/timer/bcm2835_systmr.c
index 3ec6460..7929aaa 100644
--- a/hw/timer/bcm2835_systmr.c
+++ b/hw/timer/bcm2835_systmr.c
@@ -154,12 +154,12 @@ static const VMStateDescription bcm2835_systmr_vmstate = {
}
};
-static void bcm2835_systmr_class_init(ObjectClass *klass, void *data)
+static void bcm2835_systmr_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = bcm2835_systmr_realize;
- dc->reset = bcm2835_systmr_reset;
+ device_class_set_legacy_reset(dc, bcm2835_systmr_reset);
dc->vmsd = &bcm2835_systmr_vmstate;
}
diff --git a/hw/timer/cadence_ttc.c b/hw/timer/cadence_ttc.c
index 54dbd4c..9c7ba16 100644
--- a/hw/timer/cadence_ttc.c
+++ b/hw/timer/cadence_ttc.c
@@ -451,7 +451,7 @@ static const VMStateDescription vmstate_cadence_ttc = {
}
};
-static void cadence_ttc_class_init(ObjectClass *klass, void *data)
+static void cadence_ttc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/timer/cmsdk-apb-dualtimer.c b/hw/timer/cmsdk-apb-dualtimer.c
index ddf9070..34c550a 100644
--- a/hw/timer/cmsdk-apb-dualtimer.c
+++ b/hw/timer/cmsdk-apb-dualtimer.c
@@ -534,13 +534,13 @@ static const VMStateDescription cmsdk_apb_dualtimer_vmstate = {
}
};
-static void cmsdk_apb_dualtimer_class_init(ObjectClass *klass, void *data)
+static void cmsdk_apb_dualtimer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = cmsdk_apb_dualtimer_realize;
dc->vmsd = &cmsdk_apb_dualtimer_vmstate;
- dc->reset = cmsdk_apb_dualtimer_reset;
+ device_class_set_legacy_reset(dc, cmsdk_apb_dualtimer_reset);
}
static const TypeInfo cmsdk_apb_dualtimer_info = {
diff --git a/hw/timer/cmsdk-apb-timer.c b/hw/timer/cmsdk-apb-timer.c
index 814545c..4095267 100644
--- a/hw/timer/cmsdk-apb-timer.c
+++ b/hw/timer/cmsdk-apb-timer.c
@@ -261,13 +261,13 @@ static const VMStateDescription cmsdk_apb_timer_vmstate = {
}
};
-static void cmsdk_apb_timer_class_init(ObjectClass *klass, void *data)
+static void cmsdk_apb_timer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = cmsdk_apb_timer_realize;
dc->vmsd = &cmsdk_apb_timer_vmstate;
- dc->reset = cmsdk_apb_timer_reset;
+ device_class_set_legacy_reset(dc, cmsdk_apb_timer_reset);
}
static const TypeInfo cmsdk_apb_timer_info = {
diff --git a/hw/timer/digic-timer.c b/hw/timer/digic-timer.c
index 9fc5c1d..355138d 100644
--- a/hw/timer/digic-timer.c
+++ b/hw/timer/digic-timer.c
@@ -161,11 +161,11 @@ static void digic_timer_finalize(Object *obj)
ptimer_free(s->ptimer);
}
-static void digic_timer_class_init(ObjectClass *klass, void *class_data)
+static void digic_timer_class_init(ObjectClass *klass, const void *class_data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = digic_timer_reset;
+ device_class_set_legacy_reset(dc, digic_timer_reset);
dc->vmsd = &vmstate_digic_timer;
}
diff --git a/hw/timer/etraxfs_timer.c b/hw/timer/etraxfs_timer.c
deleted file mode 100644
index dd6d96b..0000000
--- a/hw/timer/etraxfs_timer.c
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
- * QEMU ETRAX Timers
- *
- * Copyright (c) 2007 Edgar E. Iglesias, Axis Communications AB.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "hw/sysbus.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "migration/vmstate.h"
-#include "qemu/module.h"
-#include "qemu/timer.h"
-#include "hw/irq.h"
-#include "hw/ptimer.h"
-#include "qom/object.h"
-
-#define D(x)
-
-#define RW_TMR0_DIV 0x00
-#define R_TMR0_DATA 0x04
-#define RW_TMR0_CTRL 0x08
-#define RW_TMR1_DIV 0x10
-#define R_TMR1_DATA 0x14
-#define RW_TMR1_CTRL 0x18
-#define R_TIME 0x38
-#define RW_WD_CTRL 0x40
-#define R_WD_STAT 0x44
-#define RW_INTR_MASK 0x48
-#define RW_ACK_INTR 0x4c
-#define R_INTR 0x50
-#define R_MASKED_INTR 0x54
-
-#define TYPE_ETRAX_FS_TIMER "etraxfs-timer"
-typedef struct ETRAXTimerState ETRAXTimerState;
-DECLARE_INSTANCE_CHECKER(ETRAXTimerState, ETRAX_TIMER,
- TYPE_ETRAX_FS_TIMER)
-
-struct ETRAXTimerState {
- SysBusDevice parent_obj;
-
- MemoryRegion mmio;
- qemu_irq irq;
- qemu_irq nmi;
-
- ptimer_state *ptimer_t0;
- ptimer_state *ptimer_t1;
- ptimer_state *ptimer_wd;
-
- uint32_t wd_hits;
-
- /* Control registers. */
- uint32_t rw_tmr0_div;
- uint32_t r_tmr0_data;
- uint32_t rw_tmr0_ctrl;
-
- uint32_t rw_tmr1_div;
- uint32_t r_tmr1_data;
- uint32_t rw_tmr1_ctrl;
-
- uint32_t rw_wd_ctrl;
-
- uint32_t rw_intr_mask;
- uint32_t rw_ack_intr;
- uint32_t r_intr;
- uint32_t r_masked_intr;
-};
-
-static const VMStateDescription vmstate_etraxfs = {
- .name = "etraxfs",
- .version_id = 0,
- .minimum_version_id = 0,
- .fields = (const VMStateField[]) {
- VMSTATE_PTIMER(ptimer_t0, ETRAXTimerState),
- VMSTATE_PTIMER(ptimer_t1, ETRAXTimerState),
- VMSTATE_PTIMER(ptimer_wd, ETRAXTimerState),
-
- VMSTATE_UINT32(wd_hits, ETRAXTimerState),
-
- VMSTATE_UINT32(rw_tmr0_div, ETRAXTimerState),
- VMSTATE_UINT32(r_tmr0_data, ETRAXTimerState),
- VMSTATE_UINT32(rw_tmr0_ctrl, ETRAXTimerState),
-
- VMSTATE_UINT32(rw_tmr1_div, ETRAXTimerState),
- VMSTATE_UINT32(r_tmr1_data, ETRAXTimerState),
- VMSTATE_UINT32(rw_tmr1_ctrl, ETRAXTimerState),
-
- VMSTATE_UINT32(rw_wd_ctrl, ETRAXTimerState),
-
- VMSTATE_UINT32(rw_intr_mask, ETRAXTimerState),
- VMSTATE_UINT32(rw_ack_intr, ETRAXTimerState),
- VMSTATE_UINT32(r_intr, ETRAXTimerState),
- VMSTATE_UINT32(r_masked_intr, ETRAXTimerState),
-
- VMSTATE_END_OF_LIST()
- }
-};
-
-static uint64_t
-timer_read(void *opaque, hwaddr addr, unsigned int size)
-{
- ETRAXTimerState *t = opaque;
- uint32_t r = 0;
-
- switch (addr) {
- case R_TMR0_DATA:
- r = ptimer_get_count(t->ptimer_t0);
- break;
- case R_TMR1_DATA:
- r = ptimer_get_count(t->ptimer_t1);
- break;
- case R_TIME:
- r = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / 10;
- break;
- case RW_INTR_MASK:
- r = t->rw_intr_mask;
- break;
- case R_MASKED_INTR:
- r = t->r_intr & t->rw_intr_mask;
- break;
- default:
- D(printf ("%s %x\n", __func__, addr));
- break;
- }
- return r;
-}
-
-static void update_ctrl(ETRAXTimerState *t, int tnum)
-{
- unsigned int op;
- unsigned int freq;
- unsigned int freq_hz;
- unsigned int div;
- uint32_t ctrl;
-
- ptimer_state *timer;
-
- if (tnum == 0) {
- ctrl = t->rw_tmr0_ctrl;
- div = t->rw_tmr0_div;
- timer = t->ptimer_t0;
- } else {
- ctrl = t->rw_tmr1_ctrl;
- div = t->rw_tmr1_div;
- timer = t->ptimer_t1;
- }
-
-
- op = ctrl & 3;
- freq = ctrl >> 2;
- freq_hz = 32000000;
-
- switch (freq)
- {
- case 0:
- case 1:
- D(printf ("extern or disabled timer clock?\n"));
- break;
- case 4: freq_hz = 29493000; break;
- case 5: freq_hz = 32000000; break;
- case 6: freq_hz = 32768000; break;
- case 7: freq_hz = 100000000; break;
- default:
- abort();
- break;
- }
-
- D(printf ("freq_hz=%d div=%d\n", freq_hz, div));
- ptimer_transaction_begin(timer);
- ptimer_set_freq(timer, freq_hz);
- ptimer_set_limit(timer, div, 0);
-
- switch (op)
- {
- case 0:
- /* Load. */
- ptimer_set_limit(timer, div, 1);
- break;
- case 1:
- /* Hold. */
- ptimer_stop(timer);
- break;
- case 2:
- /* Run. */
- ptimer_run(timer, 0);
- break;
- default:
- abort();
- break;
- }
- ptimer_transaction_commit(timer);
-}
-
-static void timer_update_irq(ETRAXTimerState *t)
-{
- t->r_intr &= ~(t->rw_ack_intr);
- t->r_masked_intr = t->r_intr & t->rw_intr_mask;
-
- D(printf("%s: masked_intr=%x\n", __func__, t->r_masked_intr));
- qemu_set_irq(t->irq, !!t->r_masked_intr);
-}
-
-static void timer0_hit(void *opaque)
-{
- ETRAXTimerState *t = opaque;
- t->r_intr |= 1;
- timer_update_irq(t);
-}
-
-static void timer1_hit(void *opaque)
-{
- ETRAXTimerState *t = opaque;
- t->r_intr |= 2;
- timer_update_irq(t);
-}
-
-static void watchdog_hit(void *opaque)
-{
- ETRAXTimerState *t = opaque;
- if (t->wd_hits == 0) {
- /* real hw gives a single tick before resetting but we are
- a bit friendlier to compensate for our slower execution. */
- ptimer_set_count(t->ptimer_wd, 10);
- ptimer_run(t->ptimer_wd, 1);
- qemu_irq_raise(t->nmi);
- }
- else
- qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
-
- t->wd_hits++;
-}
-
-static inline void timer_watchdog_update(ETRAXTimerState *t, uint32_t value)
-{
- unsigned int wd_en = t->rw_wd_ctrl & (1 << 8);
- unsigned int wd_key = t->rw_wd_ctrl >> 9;
- unsigned int wd_cnt = t->rw_wd_ctrl & 511;
- unsigned int new_key = value >> 9 & ((1 << 7) - 1);
- unsigned int new_cmd = (value >> 8) & 1;
-
- /* If the watchdog is enabled, they written key must match the
- complement of the previous. */
- wd_key = ~wd_key & ((1 << 7) - 1);
-
- if (wd_en && wd_key != new_key)
- return;
-
- D(printf("en=%d new_key=%x oldkey=%x cmd=%d cnt=%d\n",
- wd_en, new_key, wd_key, new_cmd, wd_cnt));
-
- if (t->wd_hits)
- qemu_irq_lower(t->nmi);
-
- t->wd_hits = 0;
-
- ptimer_transaction_begin(t->ptimer_wd);
- ptimer_set_freq(t->ptimer_wd, 760);
- if (wd_cnt == 0)
- wd_cnt = 256;
- ptimer_set_count(t->ptimer_wd, wd_cnt);
- if (new_cmd)
- ptimer_run(t->ptimer_wd, 1);
- else
- ptimer_stop(t->ptimer_wd);
-
- t->rw_wd_ctrl = value;
- ptimer_transaction_commit(t->ptimer_wd);
-}
-
-static void
-timer_write(void *opaque, hwaddr addr,
- uint64_t val64, unsigned int size)
-{
- ETRAXTimerState *t = opaque;
- uint32_t value = val64;
-
- switch (addr)
- {
- case RW_TMR0_DIV:
- t->rw_tmr0_div = value;
- break;
- case RW_TMR0_CTRL:
- D(printf ("RW_TMR0_CTRL=%x\n", value));
- t->rw_tmr0_ctrl = value;
- update_ctrl(t, 0);
- break;
- case RW_TMR1_DIV:
- t->rw_tmr1_div = value;
- break;
- case RW_TMR1_CTRL:
- D(printf ("RW_TMR1_CTRL=%x\n", value));
- t->rw_tmr1_ctrl = value;
- update_ctrl(t, 1);
- break;
- case RW_INTR_MASK:
- D(printf ("RW_INTR_MASK=%x\n", value));
- t->rw_intr_mask = value;
- timer_update_irq(t);
- break;
- case RW_WD_CTRL:
- timer_watchdog_update(t, value);
- break;
- case RW_ACK_INTR:
- t->rw_ack_intr = value;
- timer_update_irq(t);
- t->rw_ack_intr = 0;
- break;
- default:
- printf("%s " HWADDR_FMT_plx " %x\n", __func__, addr, value);
- break;
- }
-}
-
-static const MemoryRegionOps timer_ops = {
- .read = timer_read,
- .write = timer_write,
- .endianness = DEVICE_LITTLE_ENDIAN,
- .valid = {
- .min_access_size = 4,
- .max_access_size = 4
- }
-};
-
-static void etraxfs_timer_reset_enter(Object *obj, ResetType type)
-{
- ETRAXTimerState *t = ETRAX_TIMER(obj);
-
- ptimer_transaction_begin(t->ptimer_t0);
- ptimer_stop(t->ptimer_t0);
- ptimer_transaction_commit(t->ptimer_t0);
- ptimer_transaction_begin(t->ptimer_t1);
- ptimer_stop(t->ptimer_t1);
- ptimer_transaction_commit(t->ptimer_t1);
- ptimer_transaction_begin(t->ptimer_wd);
- ptimer_stop(t->ptimer_wd);
- ptimer_transaction_commit(t->ptimer_wd);
- t->rw_wd_ctrl = 0;
- t->r_intr = 0;
- t->rw_intr_mask = 0;
-}
-
-static void etraxfs_timer_reset_hold(Object *obj, ResetType type)
-{
- ETRAXTimerState *t = ETRAX_TIMER(obj);
-
- qemu_irq_lower(t->irq);
-}
-
-static void etraxfs_timer_realize(DeviceState *dev, Error **errp)
-{
- ETRAXTimerState *t = ETRAX_TIMER(dev);
- SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
-
- t->ptimer_t0 = ptimer_init(timer0_hit, t, PTIMER_POLICY_LEGACY);
- t->ptimer_t1 = ptimer_init(timer1_hit, t, PTIMER_POLICY_LEGACY);
- t->ptimer_wd = ptimer_init(watchdog_hit, t, PTIMER_POLICY_LEGACY);
-
- sysbus_init_irq(sbd, &t->irq);
- sysbus_init_irq(sbd, &t->nmi);
-
- memory_region_init_io(&t->mmio, OBJECT(t), &timer_ops, t,
- "etraxfs-timer", 0x5c);
- sysbus_init_mmio(sbd, &t->mmio);
-}
-
-static void etraxfs_timer_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- ResettableClass *rc = RESETTABLE_CLASS(klass);
-
- dc->realize = etraxfs_timer_realize;
- dc->vmsd = &vmstate_etraxfs;
- rc->phases.enter = etraxfs_timer_reset_enter;
- rc->phases.hold = etraxfs_timer_reset_hold;
-}
-
-static const TypeInfo etraxfs_timer_info = {
- .name = TYPE_ETRAX_FS_TIMER,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(ETRAXTimerState),
- .class_init = etraxfs_timer_class_init,
-};
-
-static void etraxfs_timer_register_types(void)
-{
- type_register_static(&etraxfs_timer_info);
-}
-
-type_init(etraxfs_timer_register_types)
diff --git a/hw/timer/exynos4210_mct.c b/hw/timer/exynos4210_mct.c
index 75098cd..bb0f9c8 100644
--- a/hw/timer/exynos4210_mct.c
+++ b/hw/timer/exynos4210_mct.c
@@ -815,7 +815,7 @@ static uint32_t exynos4210_ltick_cnt_get_cnto(struct tick_timer *s)
/* Both are counting */
icnto = remain / s->tcntb;
if (icnto) {
- tcnto = remain % (icnto * s->tcntb);
+ tcnto = remain % ((uint64_t)icnto * s->tcntb);
} else {
tcnto = remain % s->tcntb;
}
@@ -1546,11 +1546,11 @@ static void exynos4210_mct_finalize(Object *obj)
}
}
-static void exynos4210_mct_class_init(ObjectClass *klass, void *data)
+static void exynos4210_mct_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = exynos4210_mct_reset;
+ device_class_set_legacy_reset(dc, exynos4210_mct_reset);
dc->vmsd = &vmstate_exynos4210_mct_state;
}
diff --git a/hw/timer/exynos4210_pwm.c b/hw/timer/exynos4210_pwm.c
index ca330e9..69f737a 100644
--- a/hw/timer/exynos4210_pwm.c
+++ b/hw/timer/exynos4210_pwm.c
@@ -420,11 +420,11 @@ static void exynos4210_pwm_finalize(Object *obj)
}
}
-static void exynos4210_pwm_class_init(ObjectClass *klass, void *data)
+static void exynos4210_pwm_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = exynos4210_pwm_reset;
+ device_class_set_legacy_reset(dc, exynos4210_pwm_reset);
dc->vmsd = &vmstate_exynos4210_pwm_state;
}
diff --git a/hw/timer/grlib_gptimer.c b/hw/timer/grlib_gptimer.c
index 4990885..0e06fa0 100644
--- a/hw/timer/grlib_gptimer.c
+++ b/hw/timer/grlib_gptimer.c
@@ -403,19 +403,18 @@ static void grlib_gptimer_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &unit->iomem);
}
-static Property grlib_gptimer_properties[] = {
+static const Property grlib_gptimer_properties[] = {
DEFINE_PROP_UINT32("frequency", GPTimerUnit, freq_hz, 40000000),
DEFINE_PROP_UINT32("irq-line", GPTimerUnit, irq_line, 8),
DEFINE_PROP_UINT32("nr-timers", GPTimerUnit, nr_timers, 2),
- DEFINE_PROP_END_OF_LIST(),
};
-static void grlib_gptimer_class_init(ObjectClass *klass, void *data)
+static void grlib_gptimer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = grlib_gptimer_realize;
- dc->reset = grlib_gptimer_reset;
+ device_class_set_legacy_reset(dc, grlib_gptimer_reset);
device_class_set_props(dc, grlib_gptimer_properties);
}
diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c
index 4cb5393..cb48cc1 100644
--- a/hw/timer/hpet.c
+++ b/hw/timer/hpet.c
@@ -36,10 +36,12 @@
#include "hw/rtc/mc146818rtc_regs.h"
#include "migration/vmstate.h"
#include "hw/timer/i8254.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qom/object.h"
#include "trace.h"
+struct hpet_fw_config hpet_fw_cfg = {.count = UINT8_MAX};
+
#define HPET_MSI_SUPPORT 0
OBJECT_DECLARE_SIMPLE_TYPE(HPETState, HPET)
@@ -54,10 +56,12 @@ typedef struct HPETTimer { /* timers */
uint64_t cmp; /* comparator */
uint64_t fsb; /* FSB route */
/* Hidden register state */
+ uint64_t cmp64; /* comparator (extended to counter width) */
uint64_t period; /* Last value written to comparator */
uint8_t wrap_flag; /* timer pop will indicate wrap for one-shot 32-bit
* mode. Next pop will be actual timer expiration.
*/
+ uint64_t last; /* last value armed, to avoid timer storms */
} HPETTimer;
struct HPETState {
@@ -73,6 +77,7 @@ struct HPETState {
uint8_t rtc_irq_level;
qemu_irq pit_enabled;
uint8_t num_timers;
+ uint8_t num_timers_save;
uint32_t intcap;
HPETTimer timer[HPET_MAX_TIMERS];
@@ -116,11 +121,6 @@ static uint32_t timer_enabled(HPETTimer *t)
static uint32_t hpet_time_after(uint64_t a, uint64_t b)
{
- return ((int32_t)(b - a) < 0);
-}
-
-static uint32_t hpet_time_after64(uint64_t a, uint64_t b)
-{
return ((int64_t)(b - a) < 0);
}
@@ -156,29 +156,34 @@ static uint64_t hpet_get_ticks(HPETState *s)
return ns_to_ticks(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->hpet_offset);
}
+static uint64_t hpet_get_ns(HPETState *s, uint64_t tick)
+{
+ return ticks_to_ns(tick) - s->hpet_offset;
+}
+
/*
- * calculate diff between comparator value and current ticks
+ * calculate next value of the general counter that matches the
+ * target (either entirely, or the low 32-bit only depending on
+ * the timer mode).
*/
-static inline uint64_t hpet_calculate_diff(HPETTimer *t, uint64_t current)
+static uint64_t hpet_calculate_cmp64(HPETTimer *t, uint64_t cur_tick, uint64_t target)
{
-
if (t->config & HPET_TN_32BIT) {
- uint32_t diff, cmp;
-
- cmp = (uint32_t)t->cmp;
- diff = cmp - (uint32_t)current;
- diff = (int32_t)diff > 0 ? diff : (uint32_t)1;
- return (uint64_t)diff;
+ uint64_t result = deposit64(cur_tick, 0, 32, target);
+ if (result < cur_tick) {
+ result += 0x100000000ULL;
+ }
+ return result;
} else {
- uint64_t diff, cmp;
-
- cmp = t->cmp;
- diff = cmp - current;
- diff = (int64_t)diff > 0 ? diff : (uint64_t)1;
- return diff;
+ return target;
}
}
+static uint64_t hpet_next_wrap(uint64_t cur_tick)
+{
+ return (cur_tick | 0xffffffffU) + 1;
+}
+
static void update_irq(struct HPETTimer *timer, int set)
{
uint64_t mask;
@@ -196,21 +201,31 @@ static void update_irq(struct HPETTimer *timer, int set)
}
s = timer->state;
mask = 1 << timer->tn;
- if (!set || !timer_enabled(timer) || !hpet_enabled(timer->state)) {
+
+ if (set && (timer->config & HPET_TN_TYPE_LEVEL)) {
+ /*
+ * If HPET_TN_ENABLE bit is 0, "the timer will still operate and
+ * generate appropriate status bits, but will not cause an interrupt"
+ */
+ s->isr |= mask;
+ } else {
s->isr &= ~mask;
+ }
+
+ if (set && timer_enabled(timer) && hpet_enabled(s)) {
+ if (timer_fsb_route(timer)) {
+ address_space_stl_le(&address_space_memory, timer->fsb >> 32,
+ timer->fsb & 0xffffffff, MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ } else if (timer->config & HPET_TN_TYPE_LEVEL) {
+ qemu_irq_raise(s->irqs[route]);
+ } else {
+ qemu_irq_pulse(s->irqs[route]);
+ }
+ } else {
if (!timer_fsb_route(timer)) {
qemu_irq_lower(s->irqs[route]);
}
- } else if (timer_fsb_route(timer)) {
- address_space_stl_le(&address_space_memory, timer->fsb >> 32,
- timer->fsb & 0xffffffff, MEMTXATTRS_UNSPECIFIED,
- NULL);
- } else if (timer->config & HPET_TN_TYPE_LEVEL) {
- s->isr |= mask;
- qemu_irq_raise(s->irqs[route]);
- } else {
- s->isr &= ~mask;
- qemu_irq_pulse(s->irqs[route]);
}
}
@@ -223,15 +238,12 @@ static int hpet_pre_save(void *opaque)
s->hpet_counter = hpet_get_ticks(s);
}
- return 0;
-}
-
-static int hpet_pre_load(void *opaque)
-{
- HPETState *s = opaque;
-
- /* version 1 only supports 3, later versions will load the actual value */
- s->num_timers = HPET_MIN_TIMERS;
+ /*
+ * The number of timers must match on source and destination, but it was
+ * also added to the migration stream. Check that it matches the value
+ * that was configured.
+ */
+ s->num_timers_save = s->num_timers;
return 0;
}
@@ -239,34 +251,25 @@ static bool hpet_validate_num_timers(void *opaque, int version_id)
{
HPETState *s = opaque;
- if (s->num_timers < HPET_MIN_TIMERS) {
- return false;
- } else if (s->num_timers > HPET_MAX_TIMERS) {
- return false;
- }
- return true;
+ return s->num_timers == s->num_timers_save;
}
static int hpet_post_load(void *opaque, int version_id)
{
HPETState *s = opaque;
+ int i;
+ for (i = 0; i < s->num_timers; i++) {
+ HPETTimer *t = &s->timer[i];
+ t->cmp64 = hpet_calculate_cmp64(t, s->hpet_counter, t->cmp);
+ t->last = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - NANOSECONDS_PER_SECOND;
+ }
/* Recalculate the offset between the main counter and guest time */
if (!s->hpet_offset_saved) {
s->hpet_offset = ticks_to_ns(s->hpet_counter)
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
- /* Push number of timers into capability returned via HPET_ID */
- s->capability &= ~HPET_ID_NUM_TIM_MASK;
- s->capability |= (s->num_timers - 1) << HPET_ID_NUM_TIM_SHIFT;
- hpet_cfg.hpet[s->hpet_id].event_timer_block_id = (uint32_t)s->capability;
-
- /* Derive HPET_MSI_SUPPORT from the capability of the first timer. */
- s->flags &= ~(1 << HPET_MSI_SUPPORT);
- if (s->timer[0].config & HPET_TN_FSB_CAP) {
- s->flags |= 1 << HPET_MSI_SUPPORT;
- }
return 0;
}
@@ -325,17 +328,16 @@ static const VMStateDescription vmstate_hpet_timer = {
static const VMStateDescription vmstate_hpet = {
.name = "hpet",
.version_id = 2,
- .minimum_version_id = 1,
+ .minimum_version_id = 2,
.pre_save = hpet_pre_save,
- .pre_load = hpet_pre_load,
.post_load = hpet_post_load,
.fields = (const VMStateField[]) {
VMSTATE_UINT64(config, HPETState),
VMSTATE_UINT64(isr, HPETState),
VMSTATE_UINT64(hpet_counter, HPETState),
- VMSTATE_UINT8_V(num_timers, HPETState, 2),
- VMSTATE_VALIDATE("num_timers in range", hpet_validate_num_timers),
- VMSTATE_STRUCT_VARRAY_UINT8(timer, HPETState, num_timers, 0,
+ VMSTATE_UINT8(num_timers_save, HPETState),
+ VMSTATE_VALIDATE("num_timers must match", hpet_validate_num_timers),
+ VMSTATE_STRUCT_VARRAY_UINT8(timer, HPETState, num_timers_save, 0,
vmstate_hpet_timer, HPETTimer),
VMSTATE_END_OF_LIST()
},
@@ -346,14 +348,17 @@ static const VMStateDescription vmstate_hpet = {
}
};
-static void hpet_arm(HPETTimer *t, uint64_t ticks)
+static void hpet_arm(HPETTimer *t, uint64_t tick)
{
- if (ticks < ns_to_ticks(INT64_MAX / 2)) {
- timer_mod(t->qemu_timer,
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ticks_to_ns(ticks));
- } else {
- timer_del(t->qemu_timer);
+ uint64_t ns = hpet_get_ns(t->state, tick);
+
+ /* Clamp period to reasonable min value (1 us) */
+ if (timer_is_periodic(t) && ns - t->last < 1000) {
+ ns = t->last + 1000;
}
+
+ t->last = ns;
+ timer_mod(t->qemu_timer, ns);
}
/*
@@ -362,72 +367,89 @@ static void hpet_arm(HPETTimer *t, uint64_t ticks)
static void hpet_timer(void *opaque)
{
HPETTimer *t = opaque;
- uint64_t diff;
-
uint64_t period = t->period;
uint64_t cur_tick = hpet_get_ticks(t->state);
if (timer_is_periodic(t) && period != 0) {
+ while (hpet_time_after(cur_tick, t->cmp64)) {
+ t->cmp64 += period;
+ }
if (t->config & HPET_TN_32BIT) {
- while (hpet_time_after(cur_tick, t->cmp)) {
- t->cmp = (uint32_t)(t->cmp + t->period);
- }
+ t->cmp = (uint32_t)t->cmp64;
} else {
- while (hpet_time_after64(cur_tick, t->cmp)) {
- t->cmp += period;
- }
- }
- diff = hpet_calculate_diff(t, cur_tick);
- hpet_arm(t, diff);
- } else if (t->config & HPET_TN_32BIT && !timer_is_periodic(t)) {
- if (t->wrap_flag) {
- diff = hpet_calculate_diff(t, cur_tick);
- hpet_arm(t, diff);
- t->wrap_flag = 0;
+ t->cmp = t->cmp64;
}
+ hpet_arm(t, t->cmp64);
+ } else if (t->wrap_flag) {
+ t->wrap_flag = 0;
+ hpet_arm(t, t->cmp64);
}
update_irq(t, 1);
}
static void hpet_set_timer(HPETTimer *t)
{
- uint64_t diff;
- uint32_t wrap_diff; /* how many ticks until we wrap? */
uint64_t cur_tick = hpet_get_ticks(t->state);
- /* whenever new timer is being set up, make sure wrap_flag is 0 */
t->wrap_flag = 0;
- diff = hpet_calculate_diff(t, cur_tick);
+ t->cmp64 = hpet_calculate_cmp64(t, cur_tick, t->cmp);
+ if (t->config & HPET_TN_32BIT) {
- /* hpet spec says in one-shot 32-bit mode, generate an interrupt when
- * counter wraps in addition to an interrupt with comparator match.
- */
- if (t->config & HPET_TN_32BIT && !timer_is_periodic(t)) {
- wrap_diff = 0xffffffff - (uint32_t)cur_tick;
- if (wrap_diff < (uint32_t)diff) {
- diff = wrap_diff;
+ /* hpet spec says in one-shot 32-bit mode, generate an interrupt when
+ * counter wraps in addition to an interrupt with comparator match.
+ */
+ if (!timer_is_periodic(t) && t->cmp64 > hpet_next_wrap(cur_tick)) {
t->wrap_flag = 1;
+ hpet_arm(t, hpet_next_wrap(cur_tick));
+ return;
}
}
- hpet_arm(t, diff);
+ hpet_arm(t, t->cmp64);
}
static void hpet_del_timer(HPETTimer *t)
{
+ HPETState *s = t->state;
timer_del(t->qemu_timer);
- update_irq(t, 0);
+
+ if (s->isr & (1 << t->tn)) {
+ /* For level-triggered interrupt, this leaves ISR set but lowers irq. */
+ update_irq(t, 1);
+ }
}
static uint64_t hpet_ram_read(void *opaque, hwaddr addr,
unsigned size)
{
HPETState *s = opaque;
- uint64_t cur_tick, index;
+ int shift = (addr & 4) * 8;
+ uint64_t cur_tick;
trace_hpet_ram_read(addr);
- index = addr;
- /*address range of all TN regs*/
- if (index >= 0x100 && index <= 0x3ff) {
+ addr &= ~4;
+
+ /*address range of all global regs*/
+ if (addr <= 0xff) {
+ switch (addr) {
+ case HPET_ID: // including HPET_PERIOD
+ return s->capability >> shift;
+ case HPET_CFG:
+ return s->config >> shift;
+ case HPET_COUNTER:
+ if (hpet_enabled(s)) {
+ cur_tick = hpet_get_ticks(s);
+ } else {
+ cur_tick = s->hpet_counter;
+ }
+ trace_hpet_ram_read_reading_counter(addr & 4, cur_tick);
+ return cur_tick >> shift;
+ case HPET_STATUS:
+ return s->isr >> shift;
+ default:
+ trace_hpet_ram_read_invalid();
+ break;
+ }
+ } else {
uint8_t timer_id = (addr - 0x100) / 0x20;
HPETTimer *timer = &s->timer[timer_id];
@@ -436,52 +458,13 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr,
return 0;
}
- switch ((addr - 0x100) % 0x20) {
- case HPET_TN_CFG:
- return timer->config;
- case HPET_TN_CFG + 4: // Interrupt capabilities
- return timer->config >> 32;
+ switch (addr & 0x1f) {
+ case HPET_TN_CFG: // including interrupt capabilities
+ return timer->config >> shift;
case HPET_TN_CMP: // comparator register
- return timer->cmp;
- case HPET_TN_CMP + 4:
- return timer->cmp >> 32;
+ return timer->cmp >> shift;
case HPET_TN_ROUTE:
- return timer->fsb;
- case HPET_TN_ROUTE + 4:
- return timer->fsb >> 32;
- default:
- trace_hpet_ram_read_invalid();
- break;
- }
- } else {
- switch (index) {
- case HPET_ID:
- return s->capability;
- case HPET_PERIOD:
- return s->capability >> 32;
- case HPET_CFG:
- return s->config;
- case HPET_CFG + 4:
- trace_hpet_invalid_hpet_cfg(4);
- return 0;
- case HPET_COUNTER:
- if (hpet_enabled(s)) {
- cur_tick = hpet_get_ticks(s);
- } else {
- cur_tick = s->hpet_counter;
- }
- trace_hpet_ram_read_reading_counter(0, cur_tick);
- return cur_tick;
- case HPET_COUNTER + 4:
- if (hpet_enabled(s)) {
- cur_tick = hpet_get_ticks(s);
- } else {
- cur_tick = s->hpet_counter;
- }
- trace_hpet_ram_read_reading_counter(4, cur_tick);
- return cur_tick >> 32;
- case HPET_STATUS:
- return s->isr;
+ return timer->fsb >> shift;
default:
trace_hpet_ram_read_invalid();
break;
@@ -495,120 +478,32 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
{
int i;
HPETState *s = opaque;
- uint64_t old_val, new_val, val, index;
+ int shift = (addr & 4) * 8;
+ int len = MIN(size * 8, 64 - shift);
+ uint64_t old_val, new_val, cleared;
trace_hpet_ram_write(addr, value);
- index = addr;
- old_val = hpet_ram_read(opaque, addr, 4);
- new_val = value;
-
- /*address range of all TN regs*/
- if (index >= 0x100 && index <= 0x3ff) {
- uint8_t timer_id = (addr - 0x100) / 0x20;
- HPETTimer *timer = &s->timer[timer_id];
+ addr &= ~4;
- trace_hpet_ram_write_timer_id(timer_id);
- if (timer_id > s->num_timers) {
- trace_hpet_timer_id_out_of_range(timer_id);
- return;
- }
- switch ((addr - 0x100) % 0x20) {
- case HPET_TN_CFG:
- trace_hpet_ram_write_tn_cfg();
- if (activating_bit(old_val, new_val, HPET_TN_FSB_ENABLE)) {
- update_irq(timer, 0);
- }
- val = hpet_fixup_reg(new_val, old_val, HPET_TN_CFG_WRITE_MASK);
- timer->config = (timer->config & 0xffffffff00000000ULL) | val;
- if (new_val & HPET_TN_32BIT) {
- timer->cmp = (uint32_t)timer->cmp;
- timer->period = (uint32_t)timer->period;
- }
- if (activating_bit(old_val, new_val, HPET_TN_ENABLE) &&
- hpet_enabled(s)) {
- hpet_set_timer(timer);
- } else if (deactivating_bit(old_val, new_val, HPET_TN_ENABLE)) {
- hpet_del_timer(timer);
- }
- break;
- case HPET_TN_CFG + 4: // Interrupt capabilities
- trace_hpet_ram_write_invalid_tn_cfg(4);
- break;
- case HPET_TN_CMP: // comparator register
- trace_hpet_ram_write_tn_cmp(0);
- if (timer->config & HPET_TN_32BIT) {
- new_val = (uint32_t)new_val;
- }
- if (!timer_is_periodic(timer)
- || (timer->config & HPET_TN_SETVAL)) {
- timer->cmp = (timer->cmp & 0xffffffff00000000ULL) | new_val;
- }
- if (timer_is_periodic(timer)) {
- /*
- * FIXME: Clamp period to reasonable min value?
- * Clamp period to reasonable max value
- */
- if (timer->config & HPET_TN_32BIT) {
- new_val = MIN(new_val, ~0u >> 1);
- }
- timer->period =
- (timer->period & 0xffffffff00000000ULL) | new_val;
- }
- /*
- * FIXME: on a 64-bit write, HPET_TN_SETVAL should apply to the
- * high bits part as well.
- */
- timer->config &= ~HPET_TN_SETVAL;
- if (hpet_enabled(s)) {
- hpet_set_timer(timer);
- }
- break;
- case HPET_TN_CMP + 4: // comparator register high order
- trace_hpet_ram_write_tn_cmp(4);
- if (!timer_is_periodic(timer)
- || (timer->config & HPET_TN_SETVAL)) {
- timer->cmp = (timer->cmp & 0xffffffffULL) | new_val << 32;
- }
- if (timer_is_periodic(timer)) {
- /*
- * FIXME: Clamp period to reasonable min value?
- * Clamp period to reasonable max value
- */
- new_val = MIN(new_val, ~0u >> 1);
- timer->period =
- (timer->period & 0xffffffffULL) | new_val << 32;
- }
- timer->config &= ~HPET_TN_SETVAL;
- if (hpet_enabled(s)) {
- hpet_set_timer(timer);
- }
- break;
- case HPET_TN_ROUTE:
- timer->fsb = (timer->fsb & 0xffffffff00000000ULL) | new_val;
- break;
- case HPET_TN_ROUTE + 4:
- timer->fsb = (new_val << 32) | (timer->fsb & 0xffffffff);
- break;
- default:
- trace_hpet_ram_write_invalid();
- break;
- }
- return;
- } else {
- switch (index) {
+ /*address range of all global regs*/
+ if (addr <= 0xff) {
+ switch (addr) {
case HPET_ID:
return;
case HPET_CFG:
- val = hpet_fixup_reg(new_val, old_val, HPET_CFG_WRITE_MASK);
- s->config = (s->config & 0xffffffff00000000ULL) | val;
+ old_val = s->config;
+ new_val = deposit64(old_val, shift, len, value);
+ new_val = hpet_fixup_reg(new_val, old_val, HPET_CFG_WRITE_MASK);
+ s->config = new_val;
if (activating_bit(old_val, new_val, HPET_CFG_ENABLE)) {
/* Enable main counter and interrupt generation. */
s->hpet_offset =
ticks_to_ns(s->hpet_counter) - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
for (i = 0; i < s->num_timers; i++) {
- if ((&s->timer[i])->cmp != ~0ULL) {
- hpet_set_timer(&s->timer[i]);
+ if (timer_enabled(&s->timer[i]) && (s->isr & (1 << i))) {
+ update_irq(&s->timer[i], 1);
}
+ hpet_set_timer(&s->timer[i]);
}
} else if (deactivating_bit(old_val, new_val, HPET_CFG_ENABLE)) {
/* Halt main counter and disable interrupt generation. */
@@ -629,13 +524,11 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
qemu_set_irq(s->irqs[RTC_ISA_IRQ], s->rtc_irq_level);
}
break;
- case HPET_CFG + 4:
- trace_hpet_invalid_hpet_cfg(4);
- break;
case HPET_STATUS:
- val = new_val & s->isr;
+ new_val = value << shift;
+ cleared = new_val & s->isr;
for (i = 0; i < s->num_timers; i++) {
- if (val & (1 << i)) {
+ if (cleared & (1 << i)) {
update_irq(&s->timer[i], 0);
}
}
@@ -644,20 +537,78 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
if (hpet_enabled(s)) {
trace_hpet_ram_write_counter_write_while_enabled();
}
- s->hpet_counter =
- (s->hpet_counter & 0xffffffff00000000ULL) | value;
- trace_hpet_ram_write_counter_written(0, value, s->hpet_counter);
+ s->hpet_counter = deposit64(s->hpet_counter, shift, len, value);
break;
- case HPET_COUNTER + 4:
- trace_hpet_ram_write_counter_write_while_enabled();
- s->hpet_counter =
- (s->hpet_counter & 0xffffffffULL) | (((uint64_t)value) << 32);
- trace_hpet_ram_write_counter_written(4, value, s->hpet_counter);
+ default:
+ trace_hpet_ram_write_invalid();
+ break;
+ }
+ } else {
+ uint8_t timer_id = (addr - 0x100) / 0x20;
+ HPETTimer *timer = &s->timer[timer_id];
+
+ trace_hpet_ram_write_timer_id(timer_id);
+ if (timer_id > s->num_timers) {
+ trace_hpet_timer_id_out_of_range(timer_id);
+ return;
+ }
+ switch (addr & 0x18) {
+ case HPET_TN_CFG:
+ trace_hpet_ram_write_tn_cfg(addr & 4);
+ old_val = timer->config;
+ new_val = deposit64(old_val, shift, len, value);
+ new_val = hpet_fixup_reg(new_val, old_val, HPET_TN_CFG_WRITE_MASK);
+ if (deactivating_bit(old_val, new_val, HPET_TN_TYPE_LEVEL)) {
+ /*
+ * Do this before changing timer->config; otherwise, if
+ * HPET_TN_FSB is set, update_irq will not lower the qemu_irq.
+ */
+ update_irq(timer, 0);
+ }
+ timer->config = new_val;
+ if (activating_bit(old_val, new_val, HPET_TN_ENABLE)
+ && (s->isr & (1 << timer_id))) {
+ update_irq(timer, 1);
+ }
+ if (new_val & HPET_TN_32BIT) {
+ timer->cmp = (uint32_t)timer->cmp;
+ timer->period = (uint32_t)timer->period;
+ }
+ if (hpet_enabled(s)) {
+ hpet_set_timer(timer);
+ }
+ break;
+ case HPET_TN_CMP: // comparator register
+ if (timer->config & HPET_TN_32BIT) {
+ /* High 32-bits are zero, leave them untouched. */
+ if (shift) {
+ trace_hpet_ram_write_invalid_tn_cmp();
+ break;
+ }
+ len = 64;
+ value = (uint32_t) value;
+ }
+ trace_hpet_ram_write_tn_cmp(addr & 4);
+ if (!timer_is_periodic(timer)
+ || (timer->config & HPET_TN_SETVAL)) {
+ timer->cmp = deposit64(timer->cmp, shift, len, value);
+ }
+ if (timer_is_periodic(timer)) {
+ timer->period = deposit64(timer->period, shift, len, value);
+ }
+ timer->config &= ~HPET_TN_SETVAL;
+ if (hpet_enabled(s)) {
+ hpet_set_timer(timer);
+ }
+ break;
+ case HPET_TN_ROUTE:
+ timer->fsb = deposit64(timer->fsb, shift, len, value);
break;
default:
trace_hpet_ram_write_invalid();
break;
}
+ return;
}
}
@@ -666,7 +617,11 @@ static const MemoryRegionOps hpet_ram_ops = {
.write = hpet_ram_write,
.valid = {
.min_access_size = 4,
- .max_access_size = 4,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 8,
},
.endianness = DEVICE_NATIVE_ENDIAN,
};
@@ -696,8 +651,8 @@ static void hpet_reset(DeviceState *d)
s->hpet_counter = 0ULL;
s->hpet_offset = 0ULL;
s->config = 0ULL;
- hpet_cfg.hpet[s->hpet_id].event_timer_block_id = (uint32_t)s->capability;
- hpet_cfg.hpet[s->hpet_id].address = sbd->mmio[0].addr;
+ hpet_fw_cfg.hpet[s->hpet_id].event_timer_block_id = (uint32_t)s->capability;
+ hpet_fw_cfg.hpet[s->hpet_id].address = sbd->mmio[0].addr;
/* to document that the RTC lowers its output on reset as well */
s->rtc_irq_level = 0;
@@ -736,30 +691,31 @@ static void hpet_realize(DeviceState *dev, Error **errp)
int i;
HPETTimer *timer;
+ if (s->num_timers < HPET_MIN_TIMERS || s->num_timers > HPET_MAX_TIMERS) {
+ error_setg(errp, "hpet.num_timers must be between %d and %d",
+ HPET_MIN_TIMERS, HPET_MAX_TIMERS);
+ return;
+ }
if (!s->intcap) {
- warn_report("Hpet's intcap not initialized");
+ error_setg(errp, "hpet.hpet-intcap not initialized");
+ return;
}
- if (hpet_cfg.count == UINT8_MAX) {
+ if (hpet_fw_cfg.count == UINT8_MAX) {
/* first instance */
- hpet_cfg.count = 0;
+ hpet_fw_cfg.count = 0;
}
- if (hpet_cfg.count == 8) {
- error_setg(errp, "Only 8 instances of HPET is allowed");
+ if (hpet_fw_cfg.count == 8) {
+ error_setg(errp, "Only 8 instances of HPET are allowed");
return;
}
- s->hpet_id = hpet_cfg.count++;
+ s->hpet_id = hpet_fw_cfg.count++;
for (i = 0; i < HPET_NUM_IRQ_ROUTES; i++) {
sysbus_init_irq(sbd, &s->irqs[i]);
}
- if (s->num_timers < HPET_MIN_TIMERS) {
- s->num_timers = HPET_MIN_TIMERS;
- } else if (s->num_timers > HPET_MAX_TIMERS) {
- s->num_timers = HPET_MAX_TIMERS;
- }
for (i = 0; i < HPET_MAX_TIMERS; i++) {
timer = &s->timer[i];
timer->qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, hpet_timer, timer);
@@ -767,7 +723,7 @@ static void hpet_realize(DeviceState *dev, Error **errp)
timer->state = s;
}
- /* 64-bit main counter; LegacyReplacementRoute. */
+ /* 64-bit General Capabilities and ID Register; LegacyReplacementRoute. */
s->capability = 0x8086a001ULL;
s->capability |= (s->num_timers - 1) << HPET_ID_NUM_TIM_SHIFT;
s->capability |= ((uint64_t)(HPET_CLK_PERIOD * FS_PER_NS) << 32);
@@ -776,20 +732,19 @@ static void hpet_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_out(dev, &s->pit_enabled, 1);
}
-static Property hpet_device_properties[] = {
+static const Property hpet_device_properties[] = {
DEFINE_PROP_UINT8("timers", HPETState, num_timers, HPET_MIN_TIMERS),
DEFINE_PROP_BIT("msi", HPETState, flags, HPET_MSI_SUPPORT, false),
DEFINE_PROP_UINT32(HPET_INTCAP, HPETState, intcap, 0),
DEFINE_PROP_BOOL("hpet-offset-saved", HPETState, hpet_offset_saved, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void hpet_device_class_init(ObjectClass *klass, void *data)
+static void hpet_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = hpet_realize;
- dc->reset = hpet_reset;
+ device_class_set_legacy_reset(dc, hpet_reset);
dc->vmsd = &vmstate_hpet;
device_class_set_props(dc, hpet_device_properties);
}
diff --git a/hw/timer/i8254.c b/hw/timer/i8254.c
index c235496..4b25c48 100644
--- a/hw/timer/i8254.c
+++ b/hw/timer/i8254.c
@@ -350,7 +350,7 @@ static void pit_realizefn(DeviceState *dev, Error **errp)
pc->parent_realize(dev, errp);
}
-static void pit_class_initfn(ObjectClass *klass, void *data)
+static void pit_class_initfn(ObjectClass *klass, const void *data)
{
PITClass *pc = PIT_CLASS(klass);
PITCommonClass *k = PIT_COMMON_CLASS(klass);
@@ -360,7 +360,7 @@ static void pit_class_initfn(ObjectClass *klass, void *data)
k->set_channel_gate = pit_set_channel_gate;
k->get_channel_info = pit_get_channel_info_common;
k->post_load = pit_post_load;
- dc->reset = pit_reset;
+ device_class_set_legacy_reset(dc, pit_reset);
}
static const TypeInfo pit_info = {
diff --git a/hw/timer/i8254_common.c b/hw/timer/i8254_common.c
index 28fdabc..ad09159 100644
--- a/hw/timer/i8254_common.c
+++ b/hw/timer/i8254_common.c
@@ -238,12 +238,11 @@ static const VMStateDescription vmstate_pit_common = {
}
};
-static Property pit_common_properties[] = {
+static const Property pit_common_properties[] = {
DEFINE_PROP_UINT32("iobase", PITCommonState, iobase, -1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pit_common_class_init(ObjectClass *klass, void *data)
+static void pit_common_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/timer/ibex_timer.c b/hw/timer/ibex_timer.c
index 4917388..c7320ef 100644
--- a/hw/timer/ibex_timer.c
+++ b/hw/timer/ibex_timer.c
@@ -263,9 +263,8 @@ static const VMStateDescription vmstate_ibex_timer = {
}
};
-static Property ibex_timer_properties[] = {
+static const Property ibex_timer_properties[] = {
DEFINE_PROP_UINT32("timebase-freq", IbexTimerState, timebase_freq, 10000),
- DEFINE_PROP_END_OF_LIST(),
};
static void ibex_timer_init(Object *obj)
@@ -287,11 +286,11 @@ static void ibex_timer_realize(DeviceState *dev, Error **errp)
}
-static void ibex_timer_class_init(ObjectClass *klass, void *data)
+static void ibex_timer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = ibex_timer_reset;
+ device_class_set_legacy_reset(dc, ibex_timer_reset);
dc->vmsd = &vmstate_ibex_timer;
dc->realize = ibex_timer_realize;
device_class_set_props(dc, ibex_timer_properties);
diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c
index bd62520..6123321 100644
--- a/hw/timer/imx_epit.c
+++ b/hw/timer/imx_epit.c
@@ -427,12 +427,12 @@ static void imx_epit_dev_reset(DeviceState *dev)
imx_epit_reset(s, true);
}
-static void imx_epit_class_init(ObjectClass *klass, void *data)
+static void imx_epit_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = imx_epit_realize;
- dc->reset = imx_epit_dev_reset;
+ device_class_set_legacy_reset(dc, imx_epit_dev_reset);
dc->vmsd = &vmstate_imx_timer_epit;
dc->desc = "i.MX periodic timer";
}
diff --git a/hw/timer/imx_gpt.c b/hw/timer/imx_gpt.c
index a8edaec..8c7cbfd 100644
--- a/hw/timer/imx_gpt.c
+++ b/hw/timer/imx_gpt.c
@@ -18,18 +18,7 @@
#include "migration/vmstate.h"
#include "qemu/module.h"
#include "qemu/log.h"
-
-#ifndef DEBUG_IMX_GPT
-#define DEBUG_IMX_GPT 0
-#endif
-
-#define DPRINTF(fmt, args...) \
- do { \
- if (DEBUG_IMX_GPT) { \
- fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_GPT, \
- __func__, ##args); \
- } \
- } while (0)
+#include "trace.h"
static const char *imx_gpt_reg_name(uint32_t reg)
{
@@ -137,6 +126,17 @@ static const IMXClk imx7_gpt_clocks[] = {
CLK_NONE, /* 111 not defined */
};
+static const IMXClk imx8mp_gpt_clocks[] = {
+ CLK_NONE, /* 000 No clock source */
+ CLK_IPG, /* 001 ipg_clk, 532MHz */
+ CLK_IPG_HIGH, /* 010 ipg_clk_highfreq */
+ CLK_EXT, /* 011 External clock */
+ CLK_32k, /* 100 ipg_clk_32k */
+ CLK_HIGH, /* 101 ipg_clk_16M */
+ CLK_NONE, /* 110 not defined */
+ CLK_NONE, /* 111 not defined */
+};
+
/* Must be called from within ptimer_transaction_begin/commit block */
static void imx_gpt_set_freq(IMXGPTState *s)
{
@@ -145,7 +145,7 @@ static void imx_gpt_set_freq(IMXGPTState *s)
s->freq = imx_ccm_get_clock_frequency(s->ccm,
s->clocks[clksrc]) / (1 + s->pr);
- DPRINTF("Setting clksrc %d to frequency %d\n", clksrc, s->freq);
+ trace_imx_gpt_set_freq(clksrc, s->freq);
if (s->freq) {
ptimer_set_freq(s->timer, s->freq);
@@ -317,7 +317,7 @@ static uint64_t imx_gpt_read(void *opaque, hwaddr offset, unsigned size)
break;
}
- DPRINTF("(%s) = 0x%08x\n", imx_gpt_reg_name(offset >> 2), reg_value);
+ trace_imx_gpt_read(imx_gpt_reg_name(offset >> 2), reg_value);
return reg_value;
}
@@ -384,8 +384,7 @@ static void imx_gpt_write(void *opaque, hwaddr offset, uint64_t value,
IMXGPTState *s = IMX_GPT(opaque);
uint32_t oldreg;
- DPRINTF("(%s, value = 0x%08x)\n", imx_gpt_reg_name(offset >> 2),
- (uint32_t)value);
+ trace_imx_gpt_write(imx_gpt_reg_name(offset >> 2), (uint32_t)value);
switch (offset >> 2) {
case 0:
@@ -485,7 +484,7 @@ static void imx_gpt_timeout(void *opaque)
{
IMXGPTState *s = IMX_GPT(opaque);
- DPRINTF("\n");
+ trace_imx_gpt_timeout();
s->sr |= s->next_int;
s->next_int = 0;
@@ -519,12 +518,12 @@ static void imx_gpt_realize(DeviceState *dev, Error **errp)
s->timer = ptimer_init(imx_gpt_timeout, s, PTIMER_POLICY_LEGACY);
}
-static void imx_gpt_class_init(ObjectClass *klass, void *data)
+static void imx_gpt_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = imx_gpt_realize;
- dc->reset = imx_gpt_reset;
+ device_class_set_legacy_reset(dc, imx_gpt_reset);
dc->vmsd = &vmstate_imx_timer_gpt;
dc->desc = "i.MX general timer";
}
@@ -564,6 +563,13 @@ static void imx7_gpt_init(Object *obj)
s->clocks = imx7_gpt_clocks;
}
+static void imx8mp_gpt_init(Object *obj)
+{
+ IMXGPTState *s = IMX_GPT(obj);
+
+ s->clocks = imx8mp_gpt_clocks;
+}
+
static const TypeInfo imx25_gpt_info = {
.name = TYPE_IMX25_GPT,
.parent = TYPE_SYS_BUS_DEVICE,
@@ -596,6 +602,12 @@ static const TypeInfo imx7_gpt_info = {
.instance_init = imx7_gpt_init,
};
+static const TypeInfo imx8mp_gpt_info = {
+ .name = TYPE_IMX8MP_GPT,
+ .parent = TYPE_IMX25_GPT,
+ .instance_init = imx8mp_gpt_init,
+};
+
static void imx_gpt_register_types(void)
{
type_register_static(&imx25_gpt_info);
@@ -603,6 +615,7 @@ static void imx_gpt_register_types(void)
type_register_static(&imx6_gpt_info);
type_register_static(&imx6ul_gpt_info);
type_register_static(&imx7_gpt_info);
+ type_register_static(&imx8mp_gpt_info);
}
type_init(imx_gpt_register_types)
diff --git a/hw/timer/meson.build b/hw/timer/meson.build
index 8042785..178321c 100644
--- a/hw/timer/meson.build
+++ b/hw/timer/meson.build
@@ -10,11 +10,10 @@ system_ss.add(when: 'CONFIG_CMSDK_APB_TIMER', if_true: files('cmsdk-apb-timer.c'
system_ss.add(when: 'CONFIG_RENESAS_TMR', if_true: files('renesas_tmr.c'))
system_ss.add(when: 'CONFIG_RENESAS_CMT', if_true: files('renesas_cmt.c'))
system_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic-timer.c'))
-system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_timer.c'))
system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_mct.c'))
system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_pwm.c'))
system_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_gptimer.c'))
-system_ss.add(when: 'CONFIG_HPET', if_true: files('hpet.c'))
+system_ss.add(when: 'CONFIG_HPET_C', if_true: files('hpet.c'))
system_ss.add(when: 'CONFIG_I8254', if_true: files('i8254_common.c', 'i8254.c'))
system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_epit.c'))
system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_gpt.c'))
@@ -22,9 +21,7 @@ system_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_gictimer.c'))
system_ss.add(when: 'CONFIG_MSF2', if_true: files('mss-timer.c'))
system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_timer.c'))
system_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_timer.c'))
-system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_gptimer.c'))
-system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_synctimer.c'))
-system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_timer.c'))
+system_ss.add(when: 'CONFIG_PXA2XX_TIMER', if_true: files('pxa2xx_timer.c'))
system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_systmr.c'))
system_ss.add(when: 'CONFIG_SH_TIMER', if_true: files('sh_timer.c'))
system_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_timer.c'))
diff --git a/hw/timer/mss-timer.c b/hw/timer/mss-timer.c
index b66aed5..2ce8211 100644
--- a/hw/timer/mss-timer.c
+++ b/hw/timer/mss-timer.c
@@ -279,14 +279,13 @@ static const VMStateDescription vmstate_mss_timer = {
}
};
-static Property mss_timer_properties[] = {
+static const Property mss_timer_properties[] = {
/* Libero GUI shows 100Mhz as default for clocks */
DEFINE_PROP_UINT32("clock-frequency", MSSTimerState, freq_hz,
100 * 1000000),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mss_timer_class_init(ObjectClass *klass, void *data)
+static void mss_timer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/timer/npcm7xx_timer.c b/hw/timer/npcm7xx_timer.c
index c55ba02..6a116ad 100644
--- a/hw/timer/npcm7xx_timer.c
+++ b/hw/timer/npcm7xx_timer.c
@@ -689,7 +689,7 @@ static const VMStateDescription vmstate_npcm7xx_timer_ctrl = {
},
};
-static void npcm7xx_timer_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_timer_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/timer/nrf51_timer.c b/hw/timer/nrf51_timer.c
index a33166a..e228fde 100644
--- a/hw/timer/nrf51_timer.c
+++ b/hw/timer/nrf51_timer.c
@@ -379,16 +379,15 @@ static const VMStateDescription vmstate_nrf51_timer = {
}
};
-static Property nrf51_timer_properties[] = {
+static const Property nrf51_timer_properties[] = {
DEFINE_PROP_UINT8("id", NRF51TimerState, id, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void nrf51_timer_class_init(ObjectClass *klass, void *data)
+static void nrf51_timer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = nrf51_timer_reset;
+ device_class_set_legacy_reset(dc, nrf51_timer_reset);
dc->vmsd = &vmstate_nrf51_timer;
device_class_set_props(dc, nrf51_timer_properties);
}
diff --git a/hw/timer/omap_gptimer.c b/hw/timer/omap_gptimer.c
deleted file mode 100644
index 34e6af7..0000000
--- a/hw/timer/omap_gptimer.c
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- * TI OMAP2 general purpose timers emulation.
- *
- * Copyright (C) 2007-2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) any later version of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "hw/irq.h"
-#include "qemu/timer.h"
-#include "hw/arm/omap.h"
-
-/* GP timers */
-struct omap_gp_timer_s {
- MemoryRegion iomem;
- qemu_irq irq;
- qemu_irq wkup;
- qemu_irq in;
- qemu_irq out;
- omap_clk clk;
- QEMUTimer *timer;
- QEMUTimer *match;
- struct omap_target_agent_s *ta;
-
- int in_val;
- int out_val;
- int64_t time;
- int64_t rate;
- int64_t ticks_per_sec;
-
- int16_t config;
- int status;
- int it_ena;
- int wu_ena;
- int enable;
- int inout;
- int capt2;
- int pt;
- enum {
- gpt_trigger_none, gpt_trigger_overflow, gpt_trigger_both
- } trigger;
- enum {
- gpt_capture_none, gpt_capture_rising,
- gpt_capture_falling, gpt_capture_both
- } capture;
- int scpwm;
- int ce;
- int pre;
- int ptv;
- int ar;
- int st;
- int posted;
- uint32_t val;
- uint32_t load_val;
- uint32_t capture_val[2];
- uint32_t match_val;
- int capt_num;
-
- uint16_t writeh; /* LSB */
- uint16_t readh; /* MSB */
-};
-
-#define GPT_TCAR_IT (1 << 2)
-#define GPT_OVF_IT (1 << 1)
-#define GPT_MAT_IT (1 << 0)
-
-static inline void omap_gp_timer_intr(struct omap_gp_timer_s *timer, int it)
-{
- if (timer->it_ena & it) {
- if (!timer->status)
- qemu_irq_raise(timer->irq);
-
- timer->status |= it;
- /* Or are the status bits set even when masked?
- * i.e. is masking applied before or after the status register? */
- }
-
- if (timer->wu_ena & it)
- qemu_irq_pulse(timer->wkup);
-}
-
-static inline void omap_gp_timer_out(struct omap_gp_timer_s *timer, int level)
-{
- if (!timer->inout && timer->out_val != level) {
- timer->out_val = level;
- qemu_set_irq(timer->out, level);
- }
-}
-
-static inline uint32_t omap_gp_timer_read(struct omap_gp_timer_s *timer)
-{
- uint64_t distance;
-
- if (timer->st && timer->rate) {
- distance = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - timer->time;
- distance = muldiv64(distance, timer->rate, timer->ticks_per_sec);
-
- if (distance >= 0xffffffff - timer->val)
- return 0xffffffff;
- else
- return timer->val + distance;
- } else
- return timer->val;
-}
-
-static inline void omap_gp_timer_sync(struct omap_gp_timer_s *timer)
-{
- if (timer->st) {
- timer->val = omap_gp_timer_read(timer);
- timer->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- }
-}
-
-static inline void omap_gp_timer_update(struct omap_gp_timer_s *timer)
-{
- int64_t expires, matches;
-
- if (timer->st && timer->rate) {
- expires = muldiv64(0x100000000ll - timer->val,
- timer->ticks_per_sec, timer->rate);
- timer_mod(timer->timer, timer->time + expires);
-
- if (timer->ce && timer->match_val >= timer->val) {
- matches = muldiv64(timer->ticks_per_sec,
- timer->match_val - timer->val, timer->rate);
- timer_mod(timer->match, timer->time + matches);
- } else
- timer_del(timer->match);
- } else {
- timer_del(timer->timer);
- timer_del(timer->match);
- omap_gp_timer_out(timer, timer->scpwm);
- }
-}
-
-static inline void omap_gp_timer_trigger(struct omap_gp_timer_s *timer)
-{
- if (timer->pt)
- /* TODO in overflow-and-match mode if the first event to
- * occur is the match, don't toggle. */
- omap_gp_timer_out(timer, !timer->out_val);
- else
- /* TODO inverted pulse on timer->out_val == 1? */
- qemu_irq_pulse(timer->out);
-}
-
-static void omap_gp_timer_tick(void *opaque)
-{
- struct omap_gp_timer_s *timer = opaque;
-
- if (!timer->ar) {
- timer->st = 0;
- timer->val = 0;
- } else {
- timer->val = timer->load_val;
- timer->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- }
-
- if (timer->trigger == gpt_trigger_overflow ||
- timer->trigger == gpt_trigger_both)
- omap_gp_timer_trigger(timer);
-
- omap_gp_timer_intr(timer, GPT_OVF_IT);
- omap_gp_timer_update(timer);
-}
-
-static void omap_gp_timer_match(void *opaque)
-{
- struct omap_gp_timer_s *timer = opaque;
-
- if (timer->trigger == gpt_trigger_both)
- omap_gp_timer_trigger(timer);
-
- omap_gp_timer_intr(timer, GPT_MAT_IT);
-}
-
-static void omap_gp_timer_input(void *opaque, int line, int on)
-{
- struct omap_gp_timer_s *s = opaque;
- int trigger;
-
- switch (s->capture) {
- default:
- case gpt_capture_none:
- trigger = 0;
- break;
- case gpt_capture_rising:
- trigger = !s->in_val && on;
- break;
- case gpt_capture_falling:
- trigger = s->in_val && !on;
- break;
- case gpt_capture_both:
- trigger = (s->in_val == !on);
- break;
- }
- s->in_val = on;
-
- if (s->inout && trigger && s->capt_num < 2) {
- s->capture_val[s->capt_num] = omap_gp_timer_read(s);
-
- if (s->capt2 == s->capt_num ++)
- omap_gp_timer_intr(s, GPT_TCAR_IT);
- }
-}
-
-static void omap_gp_timer_clk_update(void *opaque, int line, int on)
-{
- struct omap_gp_timer_s *timer = opaque;
-
- omap_gp_timer_sync(timer);
- timer->rate = on ? omap_clk_getrate(timer->clk) : 0;
- omap_gp_timer_update(timer);
-}
-
-static void omap_gp_timer_clk_setup(struct omap_gp_timer_s *timer)
-{
- omap_clk_adduser(timer->clk,
- qemu_allocate_irq(omap_gp_timer_clk_update, timer, 0));
- timer->rate = omap_clk_getrate(timer->clk);
-}
-
-void omap_gp_timer_reset(struct omap_gp_timer_s *s)
-{
- s->config = 0x000;
- s->status = 0;
- s->it_ena = 0;
- s->wu_ena = 0;
- s->inout = 0;
- s->capt2 = 0;
- s->capt_num = 0;
- s->pt = 0;
- s->trigger = gpt_trigger_none;
- s->capture = gpt_capture_none;
- s->scpwm = 0;
- s->ce = 0;
- s->pre = 0;
- s->ptv = 0;
- s->ar = 0;
- s->st = 0;
- s->posted = 1;
- s->val = 0x00000000;
- s->load_val = 0x00000000;
- s->capture_val[0] = 0x00000000;
- s->capture_val[1] = 0x00000000;
- s->match_val = 0x00000000;
- omap_gp_timer_update(s);
-}
-
-static uint32_t omap_gp_timer_readw(void *opaque, hwaddr addr)
-{
- struct omap_gp_timer_s *s = opaque;
-
- switch (addr) {
- case 0x00: /* TIDR */
- return 0x21;
-
- case 0x10: /* TIOCP_CFG */
- return s->config;
-
- case 0x14: /* TISTAT */
- /* ??? When's this bit reset? */
- return 1; /* RESETDONE */
-
- case 0x18: /* TISR */
- return s->status;
-
- case 0x1c: /* TIER */
- return s->it_ena;
-
- case 0x20: /* TWER */
- return s->wu_ena;
-
- case 0x24: /* TCLR */
- return (s->inout << 14) |
- (s->capt2 << 13) |
- (s->pt << 12) |
- (s->trigger << 10) |
- (s->capture << 8) |
- (s->scpwm << 7) |
- (s->ce << 6) |
- (s->pre << 5) |
- (s->ptv << 2) |
- (s->ar << 1) |
- (s->st << 0);
-
- case 0x28: /* TCRR */
- return omap_gp_timer_read(s);
-
- case 0x2c: /* TLDR */
- return s->load_val;
-
- case 0x30: /* TTGR */
- return 0xffffffff;
-
- case 0x34: /* TWPS */
- return 0x00000000; /* No posted writes pending. */
-
- case 0x38: /* TMAR */
- return s->match_val;
-
- case 0x3c: /* TCAR1 */
- return s->capture_val[0];
-
- case 0x40: /* TSICR */
- return s->posted << 2;
-
- case 0x44: /* TCAR2 */
- return s->capture_val[1];
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static uint32_t omap_gp_timer_readh(void *opaque, hwaddr addr)
-{
- struct omap_gp_timer_s *s = opaque;
- uint32_t ret;
-
- if (addr & 2)
- return s->readh;
- else {
- ret = omap_gp_timer_readw(opaque, addr);
- s->readh = ret >> 16;
- return ret & 0xffff;
- }
-}
-
-static void omap_gp_timer_write(void *opaque, hwaddr addr, uint32_t value)
-{
- struct omap_gp_timer_s *s = opaque;
-
- switch (addr) {
- case 0x00: /* TIDR */
- case 0x14: /* TISTAT */
- case 0x34: /* TWPS */
- case 0x3c: /* TCAR1 */
- case 0x44: /* TCAR2 */
- OMAP_RO_REG(addr);
- break;
-
- case 0x10: /* TIOCP_CFG */
- s->config = value & 0x33d;
- if (((value >> 3) & 3) == 3) /* IDLEMODE */
- fprintf(stderr, "%s: illegal IDLEMODE value in TIOCP_CFG\n",
- __func__);
- if (value & 2) /* SOFTRESET */
- omap_gp_timer_reset(s);
- break;
-
- case 0x18: /* TISR */
- if (value & GPT_TCAR_IT)
- s->capt_num = 0;
- if (s->status && !(s->status &= ~value))
- qemu_irq_lower(s->irq);
- break;
-
- case 0x1c: /* TIER */
- s->it_ena = value & 7;
- break;
-
- case 0x20: /* TWER */
- s->wu_ena = value & 7;
- break;
-
- case 0x24: /* TCLR */
- omap_gp_timer_sync(s);
- s->inout = (value >> 14) & 1;
- s->capt2 = (value >> 13) & 1;
- s->pt = (value >> 12) & 1;
- s->trigger = (value >> 10) & 3;
- if (s->capture == gpt_capture_none &&
- ((value >> 8) & 3) != gpt_capture_none)
- s->capt_num = 0;
- s->capture = (value >> 8) & 3;
- s->scpwm = (value >> 7) & 1;
- s->ce = (value >> 6) & 1;
- s->pre = (value >> 5) & 1;
- s->ptv = (value >> 2) & 7;
- s->ar = (value >> 1) & 1;
- s->st = (value >> 0) & 1;
- if (s->inout && s->trigger != gpt_trigger_none)
- fprintf(stderr, "%s: GP timer pin must be an output "
- "for this trigger mode\n", __func__);
- if (!s->inout && s->capture != gpt_capture_none)
- fprintf(stderr, "%s: GP timer pin must be an input "
- "for this capture mode\n", __func__);
- if (s->trigger == gpt_trigger_none)
- omap_gp_timer_out(s, s->scpwm);
- /* TODO: make sure this doesn't overflow 32-bits */
- s->ticks_per_sec = NANOSECONDS_PER_SECOND << (s->pre ? s->ptv + 1 : 0);
- omap_gp_timer_update(s);
- break;
-
- case 0x28: /* TCRR */
- s->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- s->val = value;
- omap_gp_timer_update(s);
- break;
-
- case 0x2c: /* TLDR */
- s->load_val = value;
- break;
-
- case 0x30: /* TTGR */
- s->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- s->val = s->load_val;
- omap_gp_timer_update(s);
- break;
-
- case 0x38: /* TMAR */
- omap_gp_timer_sync(s);
- s->match_val = value;
- omap_gp_timer_update(s);
- break;
-
- case 0x40: /* TSICR */
- s->posted = (value >> 2) & 1;
- if (value & 2) /* How much exactly are we supposed to reset? */
- omap_gp_timer_reset(s);
- break;
-
- default:
- OMAP_BAD_REG(addr);
- }
-}
-
-static void omap_gp_timer_writeh(void *opaque, hwaddr addr, uint32_t value)
-{
- struct omap_gp_timer_s *s = opaque;
-
- if (addr & 2)
- omap_gp_timer_write(opaque, addr, (value << 16) | s->writeh);
- else
- s->writeh = (uint16_t) value;
-}
-
-static uint64_t omap_gp_timer_readfn(void *opaque, hwaddr addr,
- unsigned size)
-{
- switch (size) {
- case 1:
- return omap_badwidth_read32(opaque, addr);
- case 2:
- return omap_gp_timer_readh(opaque, addr);
- case 4:
- return omap_gp_timer_readw(opaque, addr);
- default:
- g_assert_not_reached();
- }
-}
-
-static void omap_gp_timer_writefn(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- switch (size) {
- case 1:
- omap_badwidth_write32(opaque, addr, value);
- break;
- case 2:
- omap_gp_timer_writeh(opaque, addr, value);
- break;
- case 4:
- omap_gp_timer_write(opaque, addr, value);
- break;
- default:
- g_assert_not_reached();
- }
-}
-
-static const MemoryRegionOps omap_gp_timer_ops = {
- .read = omap_gp_timer_readfn,
- .write = omap_gp_timer_writefn,
- .valid.min_access_size = 1,
- .valid.max_access_size = 4,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-struct omap_gp_timer_s *omap_gp_timer_init(struct omap_target_agent_s *ta,
- qemu_irq irq, omap_clk fclk, omap_clk iclk)
-{
- struct omap_gp_timer_s *s = g_new0(struct omap_gp_timer_s, 1);
-
- s->ta = ta;
- s->irq = irq;
- s->clk = fclk;
- s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_gp_timer_tick, s);
- s->match = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_gp_timer_match, s);
- s->in = qemu_allocate_irq(omap_gp_timer_input, s, 0);
- omap_gp_timer_reset(s);
- omap_gp_timer_clk_setup(s);
-
- memory_region_init_io(&s->iomem, NULL, &omap_gp_timer_ops, s, "omap.gptimer",
- omap_l4_region_size(ta, 0));
- omap_l4_attach(ta, 0, &s->iomem);
-
- return s;
-}
diff --git a/hw/timer/omap_synctimer.c b/hw/timer/omap_synctimer.c
deleted file mode 100644
index d93a934..0000000
--- a/hw/timer/omap_synctimer.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * TI OMAP2 32kHz sync timer emulation.
- *
- * Copyright (C) 2007-2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) any later version of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "qemu/osdep.h"
-#include "qemu/timer.h"
-#include "hw/arm/omap.h"
-struct omap_synctimer_s {
- MemoryRegion iomem;
- uint32_t val;
- uint16_t readh;
-};
-
-/* 32-kHz Sync Timer of the OMAP2 */
-static uint32_t omap_synctimer_read(struct omap_synctimer_s *s) {
- return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 0x8000,
- NANOSECONDS_PER_SECOND);
-}
-
-void omap_synctimer_reset(struct omap_synctimer_s *s)
-{
- s->val = omap_synctimer_read(s);
-}
-
-static uint32_t omap_synctimer_readw(void *opaque, hwaddr addr)
-{
- struct omap_synctimer_s *s = opaque;
-
- switch (addr) {
- case 0x00: /* 32KSYNCNT_REV */
- return 0x21;
-
- case 0x10: /* CR */
- return omap_synctimer_read(s) - s->val;
- }
-
- OMAP_BAD_REG(addr);
- return 0;
-}
-
-static uint32_t omap_synctimer_readh(void *opaque, hwaddr addr)
-{
- struct omap_synctimer_s *s = opaque;
- uint32_t ret;
-
- if (addr & 2)
- return s->readh;
- else {
- ret = omap_synctimer_readw(opaque, addr);
- s->readh = ret >> 16;
- return ret & 0xffff;
- }
-}
-
-static uint64_t omap_synctimer_readfn(void *opaque, hwaddr addr,
- unsigned size)
-{
- switch (size) {
- case 1:
- return omap_badwidth_read32(opaque, addr);
- case 2:
- return omap_synctimer_readh(opaque, addr);
- case 4:
- return omap_synctimer_readw(opaque, addr);
- default:
- g_assert_not_reached();
- }
-}
-
-static void omap_synctimer_writefn(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- OMAP_BAD_REG(addr);
-}
-
-static const MemoryRegionOps omap_synctimer_ops = {
- .read = omap_synctimer_readfn,
- .write = omap_synctimer_writefn,
- .valid.min_access_size = 1,
- .valid.max_access_size = 4,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-struct omap_synctimer_s *omap_synctimer_init(struct omap_target_agent_s *ta,
- struct omap_mpu_state_s *mpu, omap_clk fclk, omap_clk iclk)
-{
- struct omap_synctimer_s *s = g_malloc0(sizeof(*s));
-
- omap_synctimer_reset(s);
- memory_region_init_io(&s->iomem, NULL, &omap_synctimer_ops, s, "omap.synctimer",
- omap_l4_region_size(ta, 0));
- omap_l4_attach(ta, 0, &s->iomem);
-
- return s;
-}
diff --git a/hw/timer/pxa2xx_timer.c b/hw/timer/pxa2xx_timer.c
index 6479ab1..6d4ac31 100644
--- a/hw/timer/pxa2xx_timer.c
+++ b/hw/timer/pxa2xx_timer.c
@@ -11,51 +11,49 @@
#include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "qemu/timer.h"
-#include "sysemu/runstate.h"
-#include "hw/arm/pxa.h"
+#include "system/runstate.h"
#include "hw/sysbus.h"
#include "migration/vmstate.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "qom/object.h"
-#include "sysemu/watchdog.h"
-
-#define OSMR0 0x00
-#define OSMR1 0x04
-#define OSMR2 0x08
-#define OSMR3 0x0c
-#define OSMR4 0x80
-#define OSMR5 0x84
-#define OSMR6 0x88
-#define OSMR7 0x8c
-#define OSMR8 0x90
-#define OSMR9 0x94
-#define OSMR10 0x98
-#define OSMR11 0x9c
-#define OSCR 0x10 /* OS Timer Count */
-#define OSCR4 0x40
-#define OSCR5 0x44
-#define OSCR6 0x48
-#define OSCR7 0x4c
-#define OSCR8 0x50
-#define OSCR9 0x54
-#define OSCR10 0x58
-#define OSCR11 0x5c
-#define OSSR 0x14 /* Timer status register */
-#define OWER 0x18
-#define OIER 0x1c /* Interrupt enable register 3-0 to E3-E0 */
-#define OMCR4 0xc0 /* OS Match Control registers */
-#define OMCR5 0xc4
-#define OMCR6 0xc8
-#define OMCR7 0xcc
-#define OMCR8 0xd0
-#define OMCR9 0xd4
-#define OMCR10 0xd8
-#define OMCR11 0xdc
-#define OSNR 0x20
-
-#define PXA25X_FREQ 3686400 /* 3.6864 MHz */
-#define PXA27X_FREQ 3250000 /* 3.25 MHz */
+#include "system/watchdog.h"
+
+#define OSMR0 0x00
+#define OSMR1 0x04
+#define OSMR2 0x08
+#define OSMR3 0x0c
+#define OSMR4 0x80
+#define OSMR5 0x84
+#define OSMR6 0x88
+#define OSMR7 0x8c
+#define OSMR8 0x90
+#define OSMR9 0x94
+#define OSMR10 0x98
+#define OSMR11 0x9c
+#define OSCR 0x10 /* OS Timer Count */
+#define OSCR4 0x40
+#define OSCR5 0x44
+#define OSCR6 0x48
+#define OSCR7 0x4c
+#define OSCR8 0x50
+#define OSCR9 0x54
+#define OSCR10 0x58
+#define OSCR11 0x5c
+#define OSSR 0x14 /* Timer status register */
+#define OWER 0x18
+#define OIER 0x1c /* Interrupt enable register 3-0 to E3-E0 */
+#define OMCR4 0xc0 /* OS Match Control registers */
+#define OMCR5 0xc4
+#define OMCR6 0xc8
+#define OMCR7 0xcc
+#define OMCR8 0xd0
+#define OMCR9 0xd4
+#define OMCR10 0xd8
+#define OMCR11 0xdc
+#define OSNR 0x20
+
+#define PXA25X_FREQ 3686400 /* 3.6864 MHz */
static int pxa2xx_timer4_freq[8] = {
[0] = 0,
@@ -108,7 +106,7 @@ struct PXA2xxTimerInfo {
PXA2xxTimer4 tm4[8];
};
-#define PXA2XX_TIMER_HAVE_TM4 0
+#define PXA2XX_TIMER_HAVE_TM4 0
static inline int pxa2xx_timer_has_tm4(PXA2xxTimerInfo *s)
{
@@ -232,7 +230,7 @@ static uint64_t pxa2xx_timer_read(void *opaque, hwaddr offset,
NANOSECONDS_PER_SECOND);
case OIER:
return s->irq_enabled;
- case OSSR: /* Status register */
+ case OSSR: /* Status register */
return s->events;
case OWER:
return s->reset3;
@@ -338,7 +336,7 @@ static void pxa2xx_timer_write(void *opaque, hwaddr offset,
case OIER:
s->irq_enabled = value & 0xfff;
break;
- case OSSR: /* Status register */
+ case OSSR: /* Status register */
value &= s->events;
s->events &= ~value;
for (i = 0; i < 4; i ++, value >>= 1)
@@ -347,7 +345,7 @@ static void pxa2xx_timer_write(void *opaque, hwaddr offset,
if (pxa2xx_timer_has_tm4(s) && !(s->events & 0xff0) && value)
qemu_irq_lower(s->irq4);
break;
- case OWER: /* XXX: Reset on OSMR3 match? */
+ case OWER: /* XXX: Reset on OSMR3 match? */
s->reset3 = value;
break;
case OMCR7: tm ++;
@@ -551,14 +549,13 @@ static const VMStateDescription vmstate_pxa2xx_timer_regs = {
}
};
-static Property pxa25x_timer_dev_properties[] = {
+static const Property pxa25x_timer_dev_properties[] = {
DEFINE_PROP_UINT32("freq", PXA2xxTimerInfo, freq, PXA25X_FREQ),
DEFINE_PROP_BIT("tm4", PXA2xxTimerInfo, flags,
PXA2XX_TIMER_HAVE_TM4, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pxa25x_timer_dev_class_init(ObjectClass *klass, void *data)
+static void pxa25x_timer_dev_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -573,29 +570,7 @@ static const TypeInfo pxa25x_timer_dev_info = {
.class_init = pxa25x_timer_dev_class_init,
};
-static Property pxa27x_timer_dev_properties[] = {
- DEFINE_PROP_UINT32("freq", PXA2xxTimerInfo, freq, PXA27X_FREQ),
- DEFINE_PROP_BIT("tm4", PXA2xxTimerInfo, flags,
- PXA2XX_TIMER_HAVE_TM4, true),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void pxa27x_timer_dev_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->desc = "PXA27x timer";
- device_class_set_props(dc, pxa27x_timer_dev_properties);
-}
-
-static const TypeInfo pxa27x_timer_dev_info = {
- .name = "pxa27x-timer",
- .parent = TYPE_PXA2XX_TIMER,
- .instance_size = sizeof(PXA2xxTimerInfo),
- .class_init = pxa27x_timer_dev_class_init,
-};
-
-static void pxa2xx_timer_class_init(ObjectClass *oc, void *data)
+static void pxa2xx_timer_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -616,7 +591,6 @@ static void pxa2xx_timer_register_types(void)
{
type_register_static(&pxa2xx_timer_type_info);
type_register_static(&pxa25x_timer_dev_info);
- type_register_static(&pxa27x_timer_dev_info);
}
type_init(pxa2xx_timer_register_types)
diff --git a/hw/timer/renesas_cmt.c b/hw/timer/renesas_cmt.c
index 0883293..cdff7f4 100644
--- a/hw/timer/renesas_cmt.c
+++ b/hw/timer/renesas_cmt.c
@@ -253,17 +253,16 @@ static const VMStateDescription vmstate_rcmt = {
}
};
-static Property rcmt_properties[] = {
+static const Property rcmt_properties[] = {
DEFINE_PROP_UINT64("input-freq", RCMTState, input_freq, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void rcmt_class_init(ObjectClass *klass, void *data)
+static void rcmt_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_rcmt;
- dc->reset = rcmt_reset;
+ device_class_set_legacy_reset(dc, rcmt_reset);
device_class_set_props(dc, rcmt_properties);
}
diff --git a/hw/timer/renesas_tmr.c b/hw/timer/renesas_tmr.c
index 1d47d06..95707f2 100644
--- a/hw/timer/renesas_tmr.c
+++ b/hw/timer/renesas_tmr.c
@@ -463,17 +463,16 @@ static const VMStateDescription vmstate_rtmr = {
}
};
-static Property rtmr_properties[] = {
+static const Property rtmr_properties[] = {
DEFINE_PROP_UINT64("input-freq", RTMRState, input_freq, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void rtmr_class_init(ObjectClass *klass, void *data)
+static void rtmr_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_rtmr;
- dc->reset = rtmr_reset;
+ device_class_set_legacy_reset(dc, rtmr_reset);
device_class_set_props(dc, rtmr_properties);
}
diff --git a/hw/timer/sh_timer.c b/hw/timer/sh_timer.c
index 7788939..d4fa32c 100644
--- a/hw/timer/sh_timer.c
+++ b/hw/timer/sh_timer.c
@@ -9,7 +9,7 @@
*/
#include "qemu/osdep.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qemu/log.h"
#include "hw/irq.h"
#include "hw/sh4/sh.h"
diff --git a/hw/timer/sifive_pwm.c b/hw/timer/sifive_pwm.c
index e8610c3..e85e389 100644
--- a/hw/timer/sifive_pwm.c
+++ b/hw/timer/sifive_pwm.c
@@ -404,11 +404,10 @@ static const VMStateDescription vmstate_sifive_pwm = {
}
};
-static Property sifive_pwm_properties[] = {
+static const Property sifive_pwm_properties[] = {
/* 0.5Ghz per spec after FSBL */
DEFINE_PROP_UINT64("clock-frequency", struct SiFivePwmState,
freq_hz, 500000000ULL),
- DEFINE_PROP_END_OF_LIST(),
};
static void sifive_pwm_init(Object *obj)
@@ -442,11 +441,11 @@ static void sifive_pwm_realize(DeviceState *dev, Error **errp)
sifive_pwm_interrupt_3, s);
}
-static void sifive_pwm_class_init(ObjectClass *klass, void *data)
+static void sifive_pwm_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = sifive_pwm_reset;
+ device_class_set_legacy_reset(dc, sifive_pwm_reset);
device_class_set_props(dc, sifive_pwm_properties);
dc->vmsd = &vmstate_sifive_pwm;
dc->realize = sifive_pwm_realize;
diff --git a/hw/timer/slavio_timer.c b/hw/timer/slavio_timer.c
index 5507b01..3e071fb 100644
--- a/hw/timer/slavio_timer.c
+++ b/hw/timer/slavio_timer.c
@@ -420,16 +420,15 @@ static void slavio_timer_init(Object *obj)
}
}
-static Property slavio_timer_properties[] = {
+static const Property slavio_timer_properties[] = {
DEFINE_PROP_UINT32("num_cpus", SLAVIO_TIMERState, num_cpus, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void slavio_timer_class_init(ObjectClass *klass, void *data)
+static void slavio_timer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = slavio_timer_reset;
+ device_class_set_legacy_reset(dc, slavio_timer_reset);
dc->vmsd = &vmstate_slavio_timer;
device_class_set_props(dc, slavio_timer_properties);
}
diff --git a/hw/timer/sse-counter.c b/hw/timer/sse-counter.c
index daceedf..31f77ac 100644
--- a/hw/timer/sse-counter.c
+++ b/hw/timer/sse-counter.c
@@ -448,13 +448,13 @@ static const VMStateDescription sse_counter_vmstate = {
}
};
-static void sse_counter_class_init(ObjectClass *klass, void *data)
+static void sse_counter_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = sse_counter_realize;
dc->vmsd = &sse_counter_vmstate;
- dc->reset = sse_counter_reset;
+ device_class_set_legacy_reset(dc, sse_counter_reset);
}
static const TypeInfo sse_counter_info = {
diff --git a/hw/timer/sse-timer.c b/hw/timer/sse-timer.c
index cb20a9e..866d5ee 100644
--- a/hw/timer/sse-timer.c
+++ b/hw/timer/sse-timer.c
@@ -440,18 +440,17 @@ static const VMStateDescription sse_timer_vmstate = {
}
};
-static Property sse_timer_properties[] = {
+static const Property sse_timer_properties[] = {
DEFINE_PROP_LINK("counter", SSETimer, counter, TYPE_SSE_COUNTER, SSECounter *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void sse_timer_class_init(ObjectClass *klass, void *data)
+static void sse_timer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = sse_timer_realize;
dc->vmsd = &sse_timer_vmstate;
- dc->reset = sse_timer_reset;
+ device_class_set_legacy_reset(dc, sse_timer_reset);
device_class_set_props(dc, sse_timer_properties);
}
diff --git a/hw/timer/stellaris-gptm.c b/hw/timer/stellaris-gptm.c
index f28958c..d97b2f8 100644
--- a/hw/timer/stellaris-gptm.c
+++ b/hw/timer/stellaris-gptm.c
@@ -308,7 +308,7 @@ static void stellaris_gptm_realize(DeviceState *dev, Error **errp)
s->timer[1] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[1]);
}
-static void stellaris_gptm_class_init(ObjectClass *klass, void *data)
+static void stellaris_gptm_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/timer/stm32f2xx_timer.c b/hw/timer/stm32f2xx_timer.c
index de4208b..be844e7 100644
--- a/hw/timer/stm32f2xx_timer.c
+++ b/hw/timer/stm32f2xx_timer.c
@@ -298,10 +298,9 @@ static const VMStateDescription vmstate_stm32f2xx_timer = {
}
};
-static Property stm32f2xx_timer_properties[] = {
+static const Property stm32f2xx_timer_properties[] = {
DEFINE_PROP_UINT64("clock-frequency", struct STM32F2XXTimerState,
freq_hz, 1000000000),
- DEFINE_PROP_END_OF_LIST(),
};
static void stm32f2xx_timer_init(Object *obj)
@@ -321,11 +320,11 @@ static void stm32f2xx_timer_realize(DeviceState *dev, Error **errp)
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, stm32f2xx_timer_interrupt, s);
}
-static void stm32f2xx_timer_class_init(ObjectClass *klass, void *data)
+static void stm32f2xx_timer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = stm32f2xx_timer_reset;
+ device_class_set_legacy_reset(dc, stm32f2xx_timer_reset);
device_class_set_props(dc, stm32f2xx_timer_properties);
dc->vmsd = &vmstate_stm32f2xx_timer;
dc->realize = stm32f2xx_timer_realize;
diff --git a/hw/timer/trace-events b/hw/timer/trace-events
index de769f4..c5b6db4 100644
--- a/hw/timer/trace-events
+++ b/hw/timer/trace-events
@@ -31,7 +31,7 @@ aspeed_timer_ctrl_overflow_interrupt(uint8_t i, bool enable) "Timer %" PRIu8 ":
aspeed_timer_ctrl_pulse_enable(uint8_t i, bool enable) "Timer %" PRIu8 ": %d"
aspeed_timer_set_ctrl2(uint32_t value) "Value: 0x%" PRIx32
aspeed_timer_set_value(int timer, int reg, uint32_t value) "Timer %d register %d: 0x%" PRIx32
-aspeed_timer_read(uint64_t offset, unsigned size, uint64_t value) "From 0x%" PRIx64 ": of size %u: 0x%" PRIx64
+aspeed_timer_read(uint64_t offset, uint64_t value) "From 0x%" PRIx64 ": 0x%" PRIx64
# armv7m_systick.c
systick_reload(void) "systick reload"
@@ -49,6 +49,12 @@ cmsdk_apb_dualtimer_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK A
cmsdk_apb_dualtimer_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB dualtimer write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
cmsdk_apb_dualtimer_reset(void) "CMSDK APB dualtimer: reset"
+# imx_gpt.c
+imx_gpt_set_freq(uint32_t clksrc, uint32_t freq) "Setting clksrc %u to %u Hz"
+imx_gpt_read(const char *name, uint64_t value) "%s -> 0x%08" PRIx64
+imx_gpt_write(const char *name, uint64_t value) "%s <- 0x%08" PRIx64
+imx_gpt_timeout(void) ""
+
# npcm7xx_timer.c
npcm7xx_timer_read(const char *id, uint64_t offset, uint64_t value) " %s offset: 0x%04" PRIx64 " value 0x%08" PRIx64
npcm7xx_timer_write(const char *id, uint64_t offset, uint64_t value) "%s offset: 0x%04" PRIx64 " value 0x%08" PRIx64
@@ -108,9 +114,9 @@ hpet_ram_read_reading_counter(uint8_t reg_off, uint64_t cur_tick) "reading count
hpet_ram_read_invalid(void) "invalid hpet_ram_readl"
hpet_ram_write(uint64_t addr, uint64_t value) "enter hpet_ram_writel at 0x%" PRIx64 " = 0x%" PRIx64
hpet_ram_write_timer_id(uint64_t timer_id) "hpet_ram_writel timer_id = 0x%" PRIx64
-hpet_ram_write_tn_cfg(void) "hpet_ram_writel HPET_TN_CFG"
-hpet_ram_write_invalid_tn_cfg(uint8_t reg_off) "invalid HPET_TN_CFG + %" PRIu8 " write"
+hpet_ram_write_tn_cfg(uint8_t reg_off) "hpet_ram_writel HPET_TN_CFG + %" PRIu8
hpet_ram_write_tn_cmp(uint8_t reg_off) "hpet_ram_writel HPET_TN_CMP + %" PRIu8
+hpet_ram_write_invalid_tn_cmp(void) "invalid HPET_TN_CMP + 4 write"
hpet_ram_write_invalid(void) "invalid hpet_ram_writel"
hpet_ram_write_counter_write_while_enabled(void) "Writing counter while HPET enabled!"
hpet_ram_write_counter_written(uint8_t reg_off, uint64_t value, uint64_t counter) "HPET counter + %" PRIu8 "written. crt = 0x%" PRIx64 " -> 0x%" PRIx64
diff --git a/hw/timer/xilinx_timer.c b/hw/timer/xilinx_timer.c
index 32a9df6..ff4a224 100644
--- a/hw/timer/xilinx_timer.c
+++ b/hw/timer/xilinx_timer.c
@@ -3,6 +3,9 @@
*
* Copyright (c) 2009 Edgar E. Iglesias.
*
+ * DS573: https://docs.amd.com/v/u/en-US/xps_timer
+ * LogiCORE IP XPS Timer/Counter (v1.02a)
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
@@ -23,10 +26,12 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "hw/sysbus.h"
#include "hw/irq.h"
#include "hw/ptimer.h"
#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "qom/object.h"
@@ -69,6 +74,7 @@ struct XpsTimerState
{
SysBusDevice parent_obj;
+ EndianMode model_endianness;
MemoryRegion mmio;
qemu_irq irq;
uint8_t one_timer_only;
@@ -189,14 +195,21 @@ timer_write(void *opaque, hwaddr addr,
timer_update_irq(t);
}
-static const MemoryRegionOps timer_ops = {
- .read = timer_read,
- .write = timer_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid = {
- .min_access_size = 4,
- .max_access_size = 4
- }
+static const MemoryRegionOps timer_ops[2] = {
+ [0 ... 1] = {
+ .read = timer_read,
+ .write = timer_write,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ },
+ [0].endianness = DEVICE_LITTLE_ENDIAN,
+ [1].endianness = DEVICE_BIG_ENDIAN,
};
static void timer_hit(void *opaque)
@@ -216,6 +229,12 @@ static void xilinx_timer_realize(DeviceState *dev, Error **errp)
XpsTimerState *t = XILINX_TIMER(dev);
unsigned int i;
+ if (t->model_endianness == ENDIAN_MODE_UNSPECIFIED) {
+ error_setg(errp, TYPE_XILINX_TIMER " property 'endianness'"
+ " must be set to 'big' or 'little'");
+ return;
+ }
+
/* Init all the ptimers. */
t->timers = g_malloc0(sizeof t->timers[0] * num_timers(t));
for (i = 0; i < num_timers(t); i++) {
@@ -229,8 +248,9 @@ static void xilinx_timer_realize(DeviceState *dev, Error **errp)
ptimer_transaction_commit(xt->ptimer);
}
- memory_region_init_io(&t->mmio, OBJECT(t), &timer_ops, t, "xlnx.xps-timer",
- R_MAX * 4 * num_timers(t));
+ memory_region_init_io(&t->mmio, OBJECT(t),
+ &timer_ops[t->model_endianness == ENDIAN_MODE_BIG],
+ t, "xlnx.xps-timer", R_MAX * 4 * num_timers(t));
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &t->mmio);
}
@@ -242,13 +262,13 @@ static void xilinx_timer_init(Object *obj)
sysbus_init_irq(SYS_BUS_DEVICE(obj), &t->irq);
}
-static Property xilinx_timer_properties[] = {
+static const Property xilinx_timer_properties[] = {
+ DEFINE_PROP_ENDIAN_NODEFAULT("endianness", XpsTimerState, model_endianness),
DEFINE_PROP_UINT32("clock-frequency", XpsTimerState, freq_hz, 62 * 1000000),
DEFINE_PROP_UINT8("one-timer-only", XpsTimerState, one_timer_only, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xilinx_timer_class_init(ObjectClass *klass, void *data)
+static void xilinx_timer_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/tpm/tpm_crb.c b/hw/tpm/tpm_crb.c
index 5cd5a25..bc7a78f 100644
--- a/hw/tpm/tpm_crb.c
+++ b/hw/tpm/tpm_crb.c
@@ -18,15 +18,15 @@
#include "qemu/module.h"
#include "qapi/error.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/qdev-properties.h"
#include "hw/pci/pci_ids.h"
#include "hw/acpi/tpm.h"
#include "migration/vmstate.h"
-#include "sysemu/tpm_backend.h"
-#include "sysemu/tpm_util.h"
-#include "sysemu/reset.h"
-#include "sysemu/xen.h"
+#include "system/tpm_backend.h"
+#include "system/tpm_util.h"
+#include "system/reset.h"
+#include "system/xen.h"
#include "tpm_prop.h"
#include "tpm_ppi.h"
#include "trace.h"
@@ -226,10 +226,9 @@ static const VMStateDescription vmstate_tpm_crb = {
}
};
-static Property tpm_crb_properties[] = {
+static const Property tpm_crb_properties[] = {
DEFINE_PROP_TPMBE("tpmdev", CRBState, tpmbe),
DEFINE_PROP_BOOL("ppi", CRBState, ppi_enabled, true),
- DEFINE_PROP_END_OF_LIST(),
};
static void tpm_crb_reset(void *dev)
@@ -316,7 +315,7 @@ static void tpm_crb_realize(DeviceState *dev, Error **errp)
}
}
-static void tpm_crb_class_init(ObjectClass *klass, void *data)
+static void tpm_crb_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
TPMIfClass *tc = TPM_IF_CLASS(klass);
@@ -338,7 +337,7 @@ static const TypeInfo tpm_crb_info = {
.parent = TYPE_DEVICE,
.instance_size = sizeof(CRBState),
.class_init = tpm_crb_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_TPM_IF },
{ }
}
diff --git a/hw/tpm/tpm_ppi.c b/hw/tpm/tpm_ppi.c
index f27ed6c..984d3d1 100644
--- a/hw/tpm/tpm_ppi.c
+++ b/hw/tpm/tpm_ppi.c
@@ -14,7 +14,7 @@
#include "qemu/osdep.h"
#include "qemu/memalign.h"
#include "qapi/error.h"
-#include "sysemu/memory_mapping.h"
+#include "system/memory_mapping.h"
#include "migration/vmstate.h"
#include "hw/qdev-core.h"
#include "hw/acpi/tpm.h"
diff --git a/hw/tpm/tpm_ppi.h b/hw/tpm/tpm_ppi.h
index bf5d4a3..88f316e 100644
--- a/hw/tpm/tpm_ppi.h
+++ b/hw/tpm/tpm_ppi.h
@@ -12,7 +12,7 @@
#ifndef TPM_TPM_PPI_H
#define TPM_TPM_PPI_H
-#include "exec/memory.h"
+#include "system/memory.h"
typedef struct TPMPPI {
MemoryRegion ram;
diff --git a/hw/tpm/tpm_prop.h b/hw/tpm/tpm_prop.h
index bbd4225..c4df748 100644
--- a/hw/tpm/tpm_prop.h
+++ b/hw/tpm/tpm_prop.h
@@ -22,7 +22,7 @@
#ifndef HW_TPM_PROP_H
#define HW_TPM_PROP_H
-#include "sysemu/tpm_backend.h"
+#include "system/tpm_backend.h"
#include "hw/qdev-properties.h"
extern const PropertyInfo qdev_prop_tpm;
diff --git a/hw/tpm/tpm_spapr.c b/hw/tpm/tpm_spapr.c
index e084e98..ea608ba 100644
--- a/hw/tpm/tpm_spapr.c
+++ b/hw/tpm/tpm_spapr.c
@@ -19,8 +19,8 @@
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
-#include "sysemu/tpm_backend.h"
-#include "sysemu/tpm_util.h"
+#include "system/tpm_backend.h"
+#include "system/tpm_util.h"
#include "tpm_prop.h"
#include "hw/ppc/spapr.h"
@@ -206,7 +206,6 @@ static int tpm_spapr_do_crq(struct SpaprVioDevice *dev, uint8_t *crq_data)
break;
default:
g_assert_not_reached();
- break;
}
trace_tpm_spapr_do_crq_get_version(be32_to_cpu(local_crq.data));
spapr_tpm_send_crq(dev, &local_crq);
@@ -365,10 +364,9 @@ static const VMStateDescription vmstate_spapr_vtpm = {
}
};
-static Property tpm_spapr_properties[] = {
+static const Property tpm_spapr_properties[] = {
DEFINE_SPAPR_PROPERTIES(SpaprTpmState, vdev),
DEFINE_PROP_TPMBE("tpmdev", SpaprTpmState, be_driver),
- DEFINE_PROP_END_OF_LIST(),
};
static void tpm_spapr_realizefn(SpaprVioDevice *dev, Error **errp)
@@ -389,7 +387,7 @@ static void tpm_spapr_realizefn(SpaprVioDevice *dev, Error **errp)
s->buffer = g_malloc(TPM_SPAPR_BUFFER_MAX);
}
-static void tpm_spapr_class_init(ObjectClass *klass, void *data)
+static void tpm_spapr_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass);
@@ -416,7 +414,7 @@ static const TypeInfo tpm_spapr_info = {
.parent = TYPE_VIO_SPAPR_DEVICE,
.instance_size = sizeof(SpaprTpmState),
.class_init = tpm_spapr_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_TPM_IF },
{ }
}
diff --git a/hw/tpm/tpm_tis.h b/hw/tpm/tpm_tis.h
index 6f14896..184632f 100644
--- a/hw/tpm/tpm_tis.h
+++ b/hw/tpm/tpm_tis.h
@@ -24,7 +24,7 @@
#ifndef TPM_TPM_TIS_H
#define TPM_TPM_TIS_H
-#include "sysemu/tpm_backend.h"
+#include "system/tpm_backend.h"
#include "tpm_ppi.h"
#define TPM_TIS_NUM_LOCALITIES 5 /* per spec */
diff --git a/hw/tpm/tpm_tis_common.c b/hw/tpm/tpm_tis_common.c
index 1bfa28b..cdd0df1 100644
--- a/hw/tpm/tpm_tis_common.c
+++ b/hw/tpm/tpm_tis_common.c
@@ -34,8 +34,8 @@
#include "hw/pci/pci_ids.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
-#include "sysemu/tpm_backend.h"
-#include "sysemu/tpm_util.h"
+#include "system/tpm_backend.h"
+#include "system/tpm_util.h"
#include "tpm_ppi.h"
#include "trace.h"
diff --git a/hw/tpm/tpm_tis_i2c.c b/hw/tpm/tpm_tis_i2c.c
index 4bb0965..5ce84dc 100644
--- a/hw/tpm/tpm_tis_i2c.c
+++ b/hw/tpm/tpm_tis_i2c.c
@@ -211,8 +211,6 @@ static inline void tpm_tis_i2c_clear_data(TPMStateI2C *i2cst)
i2cst->tis_addr = 0xffffffff;
i2cst->reg_name = NULL;
memset(i2cst->data, 0, sizeof(i2cst->data));
-
- return;
}
/* Send data to TPM */
@@ -281,8 +279,6 @@ static inline void tpm_tis_i2c_tpm_send(TPMStateI2C *i2cst)
tpm_tis_i2c_clear_data(i2cst);
}
-
- return;
}
/* Callback from TPM to indicate that response is copied */
@@ -491,9 +487,8 @@ static int tpm_tis_i2c_send(I2CSlave *i2c, uint8_t data)
return 1;
}
-static Property tpm_tis_i2c_properties[] = {
+static const Property tpm_tis_i2c_properties[] = {
DEFINE_PROP_TPMBE("tpmdev", TPMStateI2C, state.be_driver),
- DEFINE_PROP_END_OF_LIST(),
};
static void tpm_tis_i2c_realizefn(DeviceState *dev, Error **errp)
@@ -531,14 +526,14 @@ static void tpm_tis_i2c_reset(DeviceState *dev)
return tpm_tis_reset(s);
}
-static void tpm_tis_i2c_class_init(ObjectClass *klass, void *data)
+static void tpm_tis_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
TPMIfClass *tc = TPM_IF_CLASS(klass);
dc->realize = tpm_tis_i2c_realizefn;
- dc->reset = tpm_tis_i2c_reset;
+ device_class_set_legacy_reset(dc, tpm_tis_i2c_reset);
dc->vmsd = &vmstate_tpm_tis_i2c;
device_class_set_props(dc, tpm_tis_i2c_properties);
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
@@ -557,7 +552,7 @@ static const TypeInfo tpm_tis_i2c_info = {
.parent = TYPE_I2C_SLAVE,
.instance_size = sizeof(TPMStateI2C),
.class_init = tpm_tis_i2c_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_TPM_IF },
{ }
}
diff --git a/hw/tpm/tpm_tis_isa.c b/hw/tpm/tpm_tis_isa.c
index 8887b3c..dce8305 100644
--- a/hw/tpm/tpm_tis_isa.c
+++ b/hw/tpm/tpm_tis_isa.c
@@ -91,11 +91,10 @@ static void tpm_tis_isa_reset(DeviceState *dev)
return tpm_tis_reset(s);
}
-static Property tpm_tis_isa_properties[] = {
+static const Property tpm_tis_isa_properties[] = {
DEFINE_PROP_UINT32("irq", TPMStateISA, state.irq_num, TPM_TIS_IRQ),
DEFINE_PROP_TPMBE("tpmdev", TPMStateISA, state.be_driver),
DEFINE_PROP_BOOL("ppi", TPMStateISA, state.ppi_enabled, true),
- DEFINE_PROP_END_OF_LIST(),
};
static void tpm_tis_isa_initfn(Object *obj)
@@ -167,7 +166,7 @@ static void build_tpm_tis_isa_aml(AcpiDevAmlIf *adev, Aml *scope)
aml_append(scope, dev);
}
-static void tpm_tis_isa_class_init(ObjectClass *klass, void *data)
+static void tpm_tis_isa_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
TPMIfClass *tc = TPM_IF_CLASS(klass);
@@ -177,7 +176,7 @@ static void tpm_tis_isa_class_init(ObjectClass *klass, void *data)
dc->vmsd = &vmstate_tpm_tis_isa;
tc->model = TPM_MODEL_TPM_TIS;
dc->realize = tpm_tis_isa_realizefn;
- dc->reset = tpm_tis_isa_reset;
+ device_class_set_legacy_reset(dc, tpm_tis_isa_reset);
tc->request_completed = tpm_tis_isa_request_completed;
tc->get_version = tpm_tis_isa_get_tpm_version;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
@@ -190,7 +189,7 @@ static const TypeInfo tpm_tis_isa_info = {
.instance_size = sizeof(TPMStateISA),
.instance_init = tpm_tis_isa_initfn,
.class_init = tpm_tis_isa_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_TPM_IF },
{ TYPE_ACPI_DEV_AML_IF },
{ }
diff --git a/hw/tpm/tpm_tis_sysbus.c b/hw/tpm/tpm_tis_sysbus.c
index 941f7f7..2ffa858 100644
--- a/hw/tpm/tpm_tis_sysbus.c
+++ b/hw/tpm/tpm_tis_sysbus.c
@@ -90,10 +90,9 @@ static void tpm_tis_sysbus_reset(DeviceState *dev)
return tpm_tis_reset(s);
}
-static Property tpm_tis_sysbus_properties[] = {
+static const Property tpm_tis_sysbus_properties[] = {
DEFINE_PROP_UINT32("irq", TPMStateSysBus, state.irq_num, TPM_TIS_IRQ),
DEFINE_PROP_TPMBE("tpmdev", TPMStateSysBus, state.be_driver),
- DEFINE_PROP_END_OF_LIST(),
};
static void tpm_tis_sysbus_initfn(Object *obj)
@@ -125,7 +124,7 @@ static void tpm_tis_sysbus_realizefn(DeviceState *dev, Error **errp)
}
}
-static void tpm_tis_sysbus_class_init(ObjectClass *klass, void *data)
+static void tpm_tis_sysbus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
TPMIfClass *tc = TPM_IF_CLASS(klass);
@@ -134,8 +133,7 @@ static void tpm_tis_sysbus_class_init(ObjectClass *klass, void *data)
dc->vmsd = &vmstate_tpm_tis_sysbus;
tc->model = TPM_MODEL_TPM_TIS;
dc->realize = tpm_tis_sysbus_realizefn;
- dc->user_creatable = true;
- dc->reset = tpm_tis_sysbus_reset;
+ device_class_set_legacy_reset(dc, tpm_tis_sysbus_reset);
tc->request_completed = tpm_tis_sysbus_request_completed;
tc->get_version = tpm_tis_sysbus_get_tpm_version;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
@@ -143,11 +141,11 @@ static void tpm_tis_sysbus_class_init(ObjectClass *klass, void *data)
static const TypeInfo tpm_tis_sysbus_info = {
.name = TYPE_TPM_TIS_SYSBUS,
- .parent = TYPE_SYS_BUS_DEVICE,
+ .parent = TYPE_DYNAMIC_SYS_BUS_DEVICE,
.instance_size = sizeof(TPMStateSysBus),
.instance_init = tpm_tis_sysbus_initfn,
.class_init = tpm_tis_sysbus_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_TPM_IF },
{ }
}
diff --git a/hw/tricore/tc27x_soc.c b/hw/tricore/tc27x_soc.c
index ecd9271..f3b8498 100644
--- a/hw/tricore/tc27x_soc.c
+++ b/hw/tricore/tc27x_soc.c
@@ -201,19 +201,14 @@ static void tc27x_soc_init(Object *obj)
object_initialize_child(obj, "tc27x", &s->cpu, sc->cpu_type);
}
-static Property tc27x_soc_properties[] = {
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void tc27x_soc_class_init(ObjectClass *klass, void *data)
+static void tc27x_soc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = tc27x_soc_realize;
- device_class_set_props(dc, tc27x_soc_properties);
}
-static void tc277d_soc_class_init(ObjectClass *oc, void *data)
+static void tc277d_soc_class_init(ObjectClass *oc, const void *data)
{
TC27XSoCClass *sc = TC27X_SOC_CLASS(oc);
diff --git a/hw/tricore/triboard.c b/hw/tricore/triboard.c
index 4dba025..cb45b01 100644
--- a/hw/tricore/triboard.c
+++ b/hw/tricore/triboard.c
@@ -31,22 +31,20 @@
#include "hw/tricore/triboard.h"
#include "hw/tricore/tc27x_soc.h"
-static void tricore_load_kernel(const char *kernel_filename)
+static void tricore_load_kernel(TriCoreCPU *cpu, const char *kernel_filename)
{
uint64_t entry;
long kernel_size;
- TriCoreCPU *cpu;
CPUTriCoreState *env;
kernel_size = load_elf(kernel_filename, NULL,
NULL, NULL, &entry, NULL,
- NULL, NULL, 0,
+ NULL, NULL, ELFDATA2LSB,
EM_TRICORE, 1, 0);
if (kernel_size <= 0) {
error_report("no kernel file '%s'", kernel_filename);
exit(1);
}
- cpu = TRICORE_CPU(first_cpu);
env = &cpu->env;
env->PC = entry;
}
@@ -62,12 +60,12 @@ static void triboard_machine_init(MachineState *machine)
sysbus_realize(SYS_BUS_DEVICE(&ms->tc27x_soc), &error_fatal);
if (machine->kernel_filename) {
- tricore_load_kernel(machine->kernel_filename);
+ tricore_load_kernel(&ms->tc27x_soc.cpu, machine->kernel_filename);
}
}
static void triboard_machine_tc277d_class_init(ObjectClass *oc,
- void *data)
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
TriBoardMachineClass *amc = TRIBOARD_MACHINE_CLASS(oc);
diff --git a/hw/tricore/tricore_testboard.c b/hw/tricore/tricore_testboard.c
index c29db8b..3facfdf 100644
--- a/hw/tricore/tricore_testboard.c
+++ b/hw/tricore/tricore_testboard.c
@@ -42,7 +42,7 @@ static void tricore_load_kernel(CPUTriCoreState *env)
kernel_size = load_elf(tricoretb_binfo.kernel_filename, NULL,
NULL, NULL, &entry, NULL,
- NULL, NULL, 0,
+ NULL, NULL, ELFDATA2LSB,
EM_TRICORE, 1, 0);
if (kernel_size <= 0) {
error_report("no kernel file '%s'",
diff --git a/hw/tricore/tricore_testdevice.c b/hw/tricore/tricore_testdevice.c
index 9028d97..e8daf95 100644
--- a/hw/tricore/tricore_testdevice.c
+++ b/hw/tricore/tricore_testdevice.c
@@ -47,7 +47,7 @@ static const MemoryRegionOps tricore_testdevice_ops = {
.min_access_size = 4,
.max_access_size = 4,
},
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
};
static void tricore_testdevice_init(Object *obj)
@@ -58,16 +58,11 @@ static void tricore_testdevice_init(Object *obj)
"tricore_testdevice", 0x4);
}
-static Property tricore_testdevice_properties[] = {
- DEFINE_PROP_END_OF_LIST()
-};
-
-static void tricore_testdevice_class_init(ObjectClass *klass, void *data)
+static void tricore_testdevice_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- device_class_set_props(dc, tricore_testdevice_properties);
- dc->reset = tricore_testdevice_reset;
+ device_class_set_legacy_reset(dc, tricore_testdevice_reset);
}
static const TypeInfo tricore_testdevice_info = {
diff --git a/hw/uefi/Kconfig b/hw/uefi/Kconfig
new file mode 100644
index 0000000..046d553
--- /dev/null
+++ b/hw/uefi/Kconfig
@@ -0,0 +1,3 @@
+config UEFI_VARS
+ bool
+ default y if X86_64 || AARCH64 || RISCV64 || LOONGARCH64
diff --git a/hw/uefi/LIMITATIONS.md b/hw/uefi/LIMITATIONS.md
new file mode 100644
index 0000000..29308bd
--- /dev/null
+++ b/hw/uefi/LIMITATIONS.md
@@ -0,0 +1,7 @@
+known issues and limitations
+----------------------------
+
+* works only on little endian hosts
+ - accessing structs in guest ram is done without endian conversion.
+* works only for 64-bit guests
+ - UINTN is mapped to uint64_t, for 32-bit guests that would be uint32_t
diff --git a/hw/uefi/hardware-info.c b/hw/uefi/hardware-info.c
new file mode 100644
index 0000000..930502a
--- /dev/null
+++ b/hw/uefi/hardware-info.c
@@ -0,0 +1,31 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * pass hardware information to uefi
+ *
+ * see OvmfPkg/Library/HardwareInfoLib/ in edk2
+ */
+
+#include "qemu/osdep.h"
+
+#include "hw/nvram/fw_cfg.h"
+#include "hw/uefi/hardware-info.h"
+
+static void *blob;
+static uint64_t blobsize;
+
+void hardware_info_register(HARDWARE_INFO_TYPE type, void *info, uint64_t infosize)
+{
+ HARDWARE_INFO_HEADER hdr = {
+ .type.value = cpu_to_le64(type),
+ .size = cpu_to_le64(infosize),
+ };
+
+ blob = g_realloc(blob, blobsize + sizeof(hdr) + infosize);
+ memcpy(blob + blobsize, &hdr, sizeof(hdr));
+ blobsize += sizeof(hdr);
+ memcpy(blob + blobsize, info, infosize);
+ blobsize += infosize;
+
+ fw_cfg_modify_file(fw_cfg_find(), "etc/hardware-info", blob, blobsize);
+}
diff --git a/hw/uefi/meson.build b/hw/uefi/meson.build
new file mode 100644
index 0000000..91eb95f
--- /dev/null
+++ b/hw/uefi/meson.build
@@ -0,0 +1,21 @@
+system_ss.add(files('hardware-info.c'))
+
+uefi_vars_ss = ss.source_set()
+if (config_all_devices.has_key('CONFIG_UEFI_VARS'))
+ uefi_vars_ss.add(files('var-service-core.c',
+ 'var-service-json.c',
+ 'var-service-vars.c',
+ 'var-service-auth.c',
+ 'var-service-guid.c',
+ 'var-service-utils.c',
+ 'var-service-policy.c',
+ 'var-service-sysbus.c'))
+ uefi_vars_ss.add(when: gnutls,
+ if_true: files('var-service-pkcs7.c'),
+ if_false: files('var-service-pkcs7-stub.c'))
+ uefi_vars_ss.add(files('var-service-siglist.c'))
+endif
+
+modules += { 'hw-uefi' : {
+ 'vars' : uefi_vars_ss,
+}}
diff --git a/hw/uefi/trace-events b/hw/uefi/trace-events
new file mode 100644
index 0000000..3694712
--- /dev/null
+++ b/hw/uefi/trace-events
@@ -0,0 +1,17 @@
+# device
+uefi_reg_read(uint64_t addr, unsigned size) "addr 0x%" PRIx64 ", size %u"
+uefi_reg_write(uint64_t addr, uint64_t val, unsigned size) "addr 0x%" PRIx64 ", val 0x%" PRIx64 ", size %d"
+uefi_hard_reset(void) ""
+
+# generic uefi
+uefi_variable(const char *context, const char *name, uint64_t size, const char *uuid) "context %s, name %s, size %" PRIu64 ", uuid %s"
+uefi_status(const char *context, const char *name) "context %s, status %s"
+uefi_event(const char *name) "event %s"
+
+# variable protocol
+uefi_vars_proto_cmd(const char *cmd) "cmd %s"
+uefi_vars_security_violation(const char *reason) "reason %s"
+
+# variable policy protocol
+uefi_vars_policy_cmd(const char *cmd) "cmd %s"
+uefi_vars_policy_deny(const char *reason) "reason %s"
diff --git a/hw/uefi/var-service-auth.c b/hw/uefi/var-service-auth.c
new file mode 100644
index 0000000..fba5a09
--- /dev/null
+++ b/hw/uefi/var-service-auth.c
@@ -0,0 +1,361 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * uefi vars device - AuthVariableLib
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "system/dma.h"
+
+#include "hw/uefi/var-service.h"
+
+static const uint16_t name_pk[] = u"PK";
+static const uint16_t name_kek[] = u"KEK";
+static const uint16_t name_db[] = u"db";
+static const uint16_t name_dbx[] = u"dbx";
+static const uint16_t name_setup_mode[] = u"SetupMode";
+static const uint16_t name_sigs_support[] = u"SignatureSupport";
+static const uint16_t name_sb[] = u"SecureBoot";
+static const uint16_t name_sb_enable[] = u"SecureBootEnable";
+static const uint16_t name_custom_mode[] = u"CustomMode";
+static const uint16_t name_vk[] = u"VendorKeys";
+static const uint16_t name_vk_nv[] = u"VendorKeysNv";
+
+static const uint32_t sigdb_attrs =
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS |
+ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS;
+
+static void set_secure_boot(uefi_vars_state *uv, uint8_t sb)
+{
+ uefi_vars_set_variable(uv, EfiGlobalVariable,
+ name_sb, sizeof(name_sb),
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS,
+ &sb, sizeof(sb));
+}
+
+static void set_secure_boot_enable(uefi_vars_state *uv, uint8_t sbe)
+{
+ uefi_vars_set_variable(uv, EfiSecureBootEnableDisable,
+ name_sb_enable, sizeof(name_sb_enable),
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS,
+ &sbe, sizeof(sbe));
+}
+
+static void set_setup_mode(uefi_vars_state *uv, uint8_t sm)
+{
+ uefi_vars_set_variable(uv, EfiGlobalVariable,
+ name_setup_mode, sizeof(name_setup_mode),
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS,
+ &sm, sizeof(sm));
+}
+
+static void set_custom_mode(uefi_vars_state *uv, uint8_t cm)
+{
+ uefi_vars_set_variable(uv, EfiCustomModeEnable,
+ name_custom_mode, sizeof(name_custom_mode),
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS,
+ &cm, sizeof(cm));
+}
+
+static void set_signature_support(uefi_vars_state *uv)
+{
+ QemuUUID sigs_support[5];
+
+ sigs_support[0] = EfiCertSha256Guid;
+ sigs_support[1] = EfiCertSha384Guid;
+ sigs_support[2] = EfiCertSha512Guid;
+ sigs_support[3] = EfiCertRsa2048Guid;
+ sigs_support[4] = EfiCertX509Guid;
+
+ uefi_vars_set_variable(uv, EfiGlobalVariable,
+ name_sigs_support, sizeof(name_sigs_support),
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS,
+ sigs_support, sizeof(sigs_support));
+}
+
+static bool setup_mode_is_active(uefi_vars_state *uv)
+{
+ uefi_variable *var;
+ uint8_t *value;
+
+ var = uefi_vars_find_variable(uv, EfiGlobalVariable,
+ name_setup_mode, sizeof(name_setup_mode));
+ if (var) {
+ value = var->data;
+ if (value[0] == SETUP_MODE) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool custom_mode_is_active(uefi_vars_state *uv)
+{
+ uefi_variable *var;
+ uint8_t *value;
+
+ var = uefi_vars_find_variable(uv, EfiCustomModeEnable,
+ name_custom_mode, sizeof(name_custom_mode));
+ if (var) {
+ value = var->data;
+ if (value[0] == CUSTOM_SECURE_BOOT_MODE) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool uefi_vars_is_sb_pk(uefi_variable *var)
+{
+ if (qemu_uuid_is_equal(&var->guid, &EfiGlobalVariable) &&
+ uefi_str_equal(var->name, var->name_size, name_pk, sizeof(name_pk))) {
+ return true;
+ }
+ return false;
+}
+
+static bool uefi_vars_is_sb_kek(uefi_variable *var)
+{
+ if (qemu_uuid_is_equal(&var->guid, &EfiGlobalVariable) &&
+ uefi_str_equal(var->name, var->name_size, name_kek, sizeof(name_kek))) {
+ return true;
+ }
+ return false;
+}
+
+static bool uefi_vars_is_sb_db(uefi_variable *var)
+{
+ if (!qemu_uuid_is_equal(&var->guid, &EfiImageSecurityDatabase)) {
+ return false;
+ }
+ if (uefi_str_equal(var->name, var->name_size, name_db, sizeof(name_db))) {
+ return true;
+ }
+ if (uefi_str_equal(var->name, var->name_size, name_dbx, sizeof(name_dbx))) {
+ return true;
+ }
+ return false;
+}
+
+bool uefi_vars_is_sb_any(uefi_variable *var)
+{
+ if (uefi_vars_is_sb_pk(var) ||
+ uefi_vars_is_sb_kek(var) ||
+ uefi_vars_is_sb_db(var)) {
+ return true;
+ }
+ return false;
+}
+
+static uefi_variable *uefi_vars_find_siglist(uefi_vars_state *uv,
+ uefi_variable *var)
+{
+ if (uefi_vars_is_sb_pk(var)) {
+ return uefi_vars_find_variable(uv, EfiGlobalVariable,
+ name_pk, sizeof(name_pk));
+ }
+ if (uefi_vars_is_sb_kek(var)) {
+ return uefi_vars_find_variable(uv, EfiGlobalVariable,
+ name_pk, sizeof(name_pk));
+ }
+ if (uefi_vars_is_sb_db(var)) {
+ return uefi_vars_find_variable(uv, EfiGlobalVariable,
+ name_kek, sizeof(name_kek));
+ }
+
+ return NULL;
+}
+
+static efi_status uefi_vars_check_auth_2_sb(uefi_vars_state *uv,
+ uefi_variable *var,
+ mm_variable_access *va,
+ void *data,
+ uint64_t data_offset)
+{
+ variable_auth_2 *auth = data;
+ uefi_variable *siglist;
+
+ if (custom_mode_is_active(uv)) {
+ /* no authentication in custom mode */
+ return EFI_SUCCESS;
+ }
+
+ if (setup_mode_is_active(uv) && !uefi_vars_is_sb_pk(var)) {
+ /* no authentication in setup mode (except PK) */
+ return EFI_SUCCESS;
+ }
+
+ if (auth->hdr_length == 24) {
+ /* no signature (auth->cert_data is empty) */
+ return EFI_SECURITY_VIOLATION;
+ }
+
+ siglist = uefi_vars_find_siglist(uv, var);
+ if (!siglist && setup_mode_is_active(uv) && uefi_vars_is_sb_pk(var)) {
+ /* check PK is self-signed */
+ uefi_variable tmp = {
+ .guid = EfiGlobalVariable,
+ .name = (uint16_t *)name_pk,
+ .name_size = sizeof(name_pk),
+ .attributes = sigdb_attrs,
+ .data = data + data_offset,
+ .data_size = va->data_size - data_offset,
+ };
+ return uefi_vars_check_pkcs7_2(&tmp, NULL, NULL, va, data);
+ }
+
+ return uefi_vars_check_pkcs7_2(siglist, NULL, NULL, va, data);
+}
+
+efi_status uefi_vars_check_auth_2(uefi_vars_state *uv, uefi_variable *var,
+ mm_variable_access *va, void *data)
+{
+ variable_auth_2 *auth = data;
+ uint64_t data_offset;
+ efi_status status;
+
+ if (va->data_size < sizeof(*auth)) {
+ return EFI_SECURITY_VIOLATION;
+ }
+ if (uadd64_overflow(sizeof(efi_time), auth->hdr_length, &data_offset)) {
+ return EFI_SECURITY_VIOLATION;
+ }
+ if (va->data_size < data_offset) {
+ return EFI_SECURITY_VIOLATION;
+ }
+
+ if (auth->hdr_revision != 0x0200 ||
+ auth->hdr_cert_type != WIN_CERT_TYPE_EFI_GUID ||
+ !qemu_uuid_is_equal(&auth->guid_cert_type, &EfiCertTypePkcs7Guid)) {
+ return EFI_UNSUPPORTED;
+ }
+
+ if (uefi_vars_is_sb_any(var)) {
+ /* secure boot variables */
+ status = uefi_vars_check_auth_2_sb(uv, var, va, data, data_offset);
+ if (status != EFI_SUCCESS) {
+ return status;
+ }
+ } else {
+ /* other authenticated variables */
+ status = uefi_vars_check_pkcs7_2(NULL,
+ &var->digest, &var->digest_size,
+ va, data);
+ if (status != EFI_SUCCESS) {
+ return status;
+ }
+ }
+
+ /* checks passed, set variable data */
+ var->time = auth->timestamp;
+ if (va->data_size - data_offset > 0) {
+ var->data = g_malloc(va->data_size - data_offset);
+ memcpy(var->data, data + data_offset, va->data_size - data_offset);
+ var->data_size = va->data_size - data_offset;
+ }
+
+ return EFI_SUCCESS;
+}
+
+efi_status uefi_vars_check_secure_boot(uefi_vars_state *uv, uefi_variable *var)
+{
+ uint8_t *value = var->data;
+
+ if (uefi_vars_is_sb_any(var)) {
+ if (var->attributes != sigdb_attrs) {
+ return EFI_INVALID_PARAMETER;
+ }
+ }
+
+ /* reject SecureBootEnable updates if force_secure_boot is set */
+ if (qemu_uuid_is_equal(&var->guid, &EfiSecureBootEnableDisable) &&
+ uefi_str_equal(var->name, var->name_size,
+ name_sb_enable, sizeof(name_sb_enable)) &&
+ uv->force_secure_boot &&
+ value[0] != SECURE_BOOT_ENABLE) {
+ return EFI_WRITE_PROTECTED;
+ }
+
+ /* reject CustomMode updates if disable_custom_mode is set */
+ if (qemu_uuid_is_equal(&var->guid, &EfiCustomModeEnable) &&
+ uefi_str_equal(var->name, var->name_size,
+ name_custom_mode, sizeof(name_custom_mode)) &&
+ uv->disable_custom_mode) {
+ return EFI_WRITE_PROTECTED;
+ }
+
+ return EFI_SUCCESS;
+}
+
+/* AuthVariableLibInitialize */
+void uefi_vars_auth_init(uefi_vars_state *uv)
+{
+ uefi_variable *pk_var, *sbe_var;
+ uint8_t platform_mode, sb, sbe, vk;
+
+ /* SetupMode */
+ pk_var = uefi_vars_find_variable(uv, EfiGlobalVariable,
+ name_pk, sizeof(name_pk));
+ if (!pk_var) {
+ platform_mode = SETUP_MODE;
+ } else {
+ platform_mode = USER_MODE;
+ }
+ set_setup_mode(uv, platform_mode);
+
+ /* SignatureSupport */
+ set_signature_support(uv);
+
+ /* SecureBootEnable */
+ sbe = SECURE_BOOT_DISABLE;
+ sbe_var = uefi_vars_find_variable(uv, EfiSecureBootEnableDisable,
+ name_sb_enable, sizeof(name_sb_enable));
+ if (sbe_var) {
+ if (platform_mode == USER_MODE) {
+ sbe = ((uint8_t *)sbe_var->data)[0];
+ }
+ } else if (platform_mode == USER_MODE) {
+ sbe = SECURE_BOOT_ENABLE;
+ set_secure_boot_enable(uv, sbe);
+ }
+
+ if (uv->force_secure_boot && sbe != SECURE_BOOT_ENABLE) {
+ sbe = SECURE_BOOT_ENABLE;
+ set_secure_boot_enable(uv, sbe);
+ }
+
+ /* SecureBoot */
+ if ((sbe == SECURE_BOOT_ENABLE) && (platform_mode == USER_MODE)) {
+ sb = SECURE_BOOT_MODE_ENABLE;
+ } else {
+ sb = SECURE_BOOT_MODE_DISABLE;
+ }
+ set_secure_boot(uv, sb);
+
+ /* CustomMode */
+ set_custom_mode(uv, STANDARD_SECURE_BOOT_MODE);
+
+ vk = 0;
+ uefi_vars_set_variable(uv, EfiGlobalVariable,
+ name_vk_nv, sizeof(name_vk_nv),
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS,
+ &vk, sizeof(vk));
+ uefi_vars_set_variable(uv, EfiGlobalVariable,
+ name_vk, sizeof(name_vk),
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS,
+ &vk, sizeof(vk));
+
+ /* flush to disk */
+ uefi_vars_json_save(uv);
+}
diff --git a/hw/uefi/var-service-core.c b/hw/uefi/var-service-core.c
new file mode 100644
index 0000000..4836a0c
--- /dev/null
+++ b/hw/uefi/var-service-core.c
@@ -0,0 +1,322 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * uefi vars device
+ */
+#include "qemu/osdep.h"
+#include "qemu/crc32c.h"
+#include "system/dma.h"
+#include "migration/vmstate.h"
+
+#include "hw/uefi/var-service.h"
+#include "hw/uefi/var-service-api.h"
+#include "hw/uefi/var-service-edk2.h"
+
+#include "trace/trace-hw_uefi.h"
+
+static int uefi_vars_pre_load(void *opaque)
+{
+ uefi_vars_state *uv = opaque;
+
+ uefi_vars_clear_all(uv);
+ uefi_vars_policies_clear(uv);
+ g_free(uv->buffer);
+ return 0;
+}
+
+static int uefi_vars_post_load(void *opaque, int version_id)
+{
+ uefi_vars_state *uv = opaque;
+
+ uefi_vars_update_storage(uv);
+ uefi_vars_json_save(uv);
+ uv->buffer = g_malloc(uv->buf_size);
+ return 0;
+}
+
+const VMStateDescription vmstate_uefi_vars = {
+ .name = "uefi-vars",
+ .pre_load = uefi_vars_pre_load,
+ .post_load = uefi_vars_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT16(sts, uefi_vars_state),
+ VMSTATE_UINT32(buf_size, uefi_vars_state),
+ VMSTATE_UINT32(buf_addr_lo, uefi_vars_state),
+ VMSTATE_UINT32(buf_addr_hi, uefi_vars_state),
+ VMSTATE_UINT32(pio_xfer_offset, uefi_vars_state),
+ VMSTATE_VBUFFER_ALLOC_UINT32(pio_xfer_buffer, uefi_vars_state,
+ 0, NULL, buf_size),
+ VMSTATE_BOOL(end_of_dxe, uefi_vars_state),
+ VMSTATE_BOOL(ready_to_boot, uefi_vars_state),
+ VMSTATE_BOOL(exit_boot_service, uefi_vars_state),
+ VMSTATE_BOOL(policy_locked, uefi_vars_state),
+ VMSTATE_UINT64(used_storage, uefi_vars_state),
+ VMSTATE_QTAILQ_V(variables, uefi_vars_state, 0,
+ vmstate_uefi_variable, uefi_variable, next),
+ VMSTATE_QTAILQ_V(var_policies, uefi_vars_state, 0,
+ vmstate_uefi_var_policy, uefi_var_policy, next),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static uint32_t uefi_vars_cmd_mm(uefi_vars_state *uv, bool dma_mode)
+{
+ hwaddr dma;
+ mm_header *mhdr;
+ uint64_t size;
+ uint32_t retval;
+
+ dma = uv->buf_addr_lo | ((hwaddr)uv->buf_addr_hi << 32);
+ mhdr = (mm_header *) uv->buffer;
+
+ if (!uv->buffer || uv->buf_size < sizeof(*mhdr)) {
+ return UEFI_VARS_STS_ERR_BAD_BUFFER_SIZE;
+ }
+
+ /* read header */
+ if (dma_mode) {
+ dma_memory_read(&address_space_memory, dma,
+ uv->buffer, sizeof(*mhdr),
+ MEMTXATTRS_UNSPECIFIED);
+ } else {
+ memcpy(uv->buffer, uv->pio_xfer_buffer, sizeof(*mhdr));
+ }
+
+ if (uadd64_overflow(sizeof(*mhdr), mhdr->length, &size)) {
+ return UEFI_VARS_STS_ERR_BAD_BUFFER_SIZE;
+ }
+ if (uv->buf_size < size) {
+ return UEFI_VARS_STS_ERR_BAD_BUFFER_SIZE;
+ }
+
+ /* read buffer (excl header) */
+ if (dma_mode) {
+ dma_memory_read(&address_space_memory, dma + sizeof(*mhdr),
+ uv->buffer + sizeof(*mhdr), mhdr->length,
+ MEMTXATTRS_UNSPECIFIED);
+ } else {
+ memcpy(uv->buffer + sizeof(*mhdr),
+ uv->pio_xfer_buffer + sizeof(*mhdr),
+ mhdr->length);
+ }
+ memset(uv->buffer + size, 0, uv->buf_size - size);
+
+ /* dispatch */
+ if (qemu_uuid_is_equal(&mhdr->guid, &EfiSmmVariableProtocolGuid)) {
+ retval = uefi_vars_mm_vars_proto(uv);
+
+ } else if (qemu_uuid_is_equal(&mhdr->guid, &VarCheckPolicyLibMmiHandlerGuid)) {
+ retval = uefi_vars_mm_check_policy_proto(uv);
+
+ } else if (qemu_uuid_is_equal(&mhdr->guid, &EfiEndOfDxeEventGroupGuid)) {
+ trace_uefi_event("end-of-dxe");
+ uv->end_of_dxe = true;
+ retval = UEFI_VARS_STS_SUCCESS;
+
+ } else if (qemu_uuid_is_equal(&mhdr->guid, &EfiEventReadyToBootGuid)) {
+ trace_uefi_event("ready-to-boot");
+ uv->ready_to_boot = true;
+ retval = UEFI_VARS_STS_SUCCESS;
+
+ } else if (qemu_uuid_is_equal(&mhdr->guid, &EfiEventExitBootServicesGuid)) {
+ trace_uefi_event("exit-boot-service");
+ uv->exit_boot_service = true;
+ retval = UEFI_VARS_STS_SUCCESS;
+
+ } else {
+ retval = UEFI_VARS_STS_ERR_NOT_SUPPORTED;
+ }
+
+ /* write buffer */
+ if (dma_mode) {
+ dma_memory_write(&address_space_memory, dma,
+ uv->buffer, sizeof(*mhdr) + mhdr->length,
+ MEMTXATTRS_UNSPECIFIED);
+ } else {
+ memcpy(uv->pio_xfer_buffer + sizeof(*mhdr),
+ uv->buffer + sizeof(*mhdr),
+ sizeof(*mhdr) + mhdr->length);
+ }
+
+ return retval;
+}
+
+static void uefi_vars_soft_reset(uefi_vars_state *uv)
+{
+ g_free(uv->buffer);
+ uv->buffer = NULL;
+ uv->buf_size = 0;
+ uv->buf_addr_lo = 0;
+ uv->buf_addr_hi = 0;
+}
+
+void uefi_vars_hard_reset(uefi_vars_state *uv)
+{
+ trace_uefi_hard_reset();
+ uefi_vars_soft_reset(uv);
+
+ uv->end_of_dxe = false;
+ uv->ready_to_boot = false;
+ uv->exit_boot_service = false;
+ uv->policy_locked = false;
+
+ uefi_vars_clear_volatile(uv);
+ uefi_vars_policies_clear(uv);
+ uefi_vars_auth_init(uv);
+}
+
+static uint32_t uefi_vars_cmd(uefi_vars_state *uv, uint32_t cmd)
+{
+ switch (cmd) {
+ case UEFI_VARS_CMD_RESET:
+ uefi_vars_soft_reset(uv);
+ return UEFI_VARS_STS_SUCCESS;
+ case UEFI_VARS_CMD_DMA_MM:
+ return uefi_vars_cmd_mm(uv, true);
+ case UEFI_VARS_CMD_PIO_MM:
+ return uefi_vars_cmd_mm(uv, false);
+ case UEFI_VARS_CMD_PIO_ZERO_OFFSET:
+ uv->pio_xfer_offset = 0;
+ return UEFI_VARS_STS_SUCCESS;
+ default:
+ return UEFI_VARS_STS_ERR_NOT_SUPPORTED;
+ }
+}
+
+static uint64_t uefi_vars_read(void *opaque, hwaddr addr, unsigned size)
+{
+ uefi_vars_state *uv = opaque;
+ uint64_t retval = -1;
+ void *xfer_ptr;
+
+ trace_uefi_reg_read(addr, size);
+
+ switch (addr) {
+ case UEFI_VARS_REG_MAGIC:
+ retval = UEFI_VARS_MAGIC_VALUE;
+ break;
+ case UEFI_VARS_REG_CMD_STS:
+ retval = uv->sts;
+ break;
+ case UEFI_VARS_REG_BUFFER_SIZE:
+ retval = uv->buf_size;
+ break;
+ case UEFI_VARS_REG_DMA_BUFFER_ADDR_LO:
+ retval = uv->buf_addr_lo;
+ break;
+ case UEFI_VARS_REG_DMA_BUFFER_ADDR_HI:
+ retval = uv->buf_addr_hi;
+ break;
+ case UEFI_VARS_REG_PIO_BUFFER_TRANSFER:
+ if (uv->pio_xfer_offset + size > uv->buf_size) {
+ retval = 0;
+ break;
+ }
+ xfer_ptr = uv->pio_xfer_buffer + uv->pio_xfer_offset;
+ switch (size) {
+ case 1:
+ retval = *(uint8_t *)xfer_ptr;
+ break;
+ case 2:
+ retval = *(uint16_t *)xfer_ptr;
+ break;
+ case 4:
+ retval = *(uint32_t *)xfer_ptr;
+ break;
+ case 8:
+ retval = *(uint64_t *)xfer_ptr;
+ break;
+ }
+ uv->pio_xfer_offset += size;
+ break;
+ case UEFI_VARS_REG_PIO_BUFFER_CRC32C:
+ retval = crc32c(0xffffffff, uv->pio_xfer_buffer, uv->pio_xfer_offset);
+ break;
+ case UEFI_VARS_REG_FLAGS:
+ retval = 0;
+ if (uv->use_pio) {
+ retval |= UEFI_VARS_FLAG_USE_PIO;
+ }
+ }
+ return retval;
+}
+
+static void uefi_vars_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
+{
+ uefi_vars_state *uv = opaque;
+ void *xfer_ptr;
+
+ trace_uefi_reg_write(addr, val, size);
+
+ switch (addr) {
+ case UEFI_VARS_REG_CMD_STS:
+ uv->sts = uefi_vars_cmd(uv, val);
+ break;
+ case UEFI_VARS_REG_BUFFER_SIZE:
+ if (val > MAX_BUFFER_SIZE) {
+ val = MAX_BUFFER_SIZE;
+ }
+ uv->buf_size = val;
+ g_free(uv->buffer);
+ g_free(uv->pio_xfer_buffer);
+ uv->buffer = g_malloc(uv->buf_size);
+ uv->pio_xfer_buffer = g_malloc(uv->buf_size);
+ break;
+ case UEFI_VARS_REG_DMA_BUFFER_ADDR_LO:
+ uv->buf_addr_lo = val;
+ break;
+ case UEFI_VARS_REG_DMA_BUFFER_ADDR_HI:
+ uv->buf_addr_hi = val;
+ break;
+ case UEFI_VARS_REG_PIO_BUFFER_TRANSFER:
+ if (uv->pio_xfer_offset + size > uv->buf_size) {
+ break;
+ }
+ xfer_ptr = uv->pio_xfer_buffer + uv->pio_xfer_offset;
+ switch (size) {
+ case 1:
+ *(uint8_t *)xfer_ptr = val;
+ break;
+ case 2:
+ *(uint16_t *)xfer_ptr = val;
+ break;
+ case 4:
+ *(uint32_t *)xfer_ptr = val;
+ break;
+ case 8:
+ *(uint64_t *)xfer_ptr = val;
+ break;
+ }
+ uv->pio_xfer_offset += size;
+ break;
+ case UEFI_VARS_REG_PIO_BUFFER_CRC32C:
+ case UEFI_VARS_REG_FLAGS:
+ default:
+ break;
+ }
+}
+
+static const MemoryRegionOps uefi_vars_ops = {
+ .read = uefi_vars_read,
+ .write = uefi_vars_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 2,
+ .max_access_size = 4,
+ },
+};
+
+void uefi_vars_init(Object *obj, uefi_vars_state *uv)
+{
+ QTAILQ_INIT(&uv->variables);
+ QTAILQ_INIT(&uv->var_policies);
+ uv->jsonfd = -1;
+ memory_region_init_io(&uv->mr, obj, &uefi_vars_ops, uv,
+ "uefi-vars", UEFI_VARS_REGS_SIZE);
+}
+
+void uefi_vars_realize(uefi_vars_state *uv, Error **errp)
+{
+ uefi_vars_json_init(uv, errp);
+ uefi_vars_json_load(uv, errp);
+}
diff --git a/hw/uefi/var-service-guid.c b/hw/uefi/var-service-guid.c
new file mode 100644
index 0000000..eba3655
--- /dev/null
+++ b/hw/uefi/var-service-guid.c
@@ -0,0 +1,99 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * uefi vars device - GUIDs
+ */
+
+#include "qemu/osdep.h"
+#include "system/dma.h"
+
+#include "hw/uefi/var-service.h"
+
+/* variable namespaces */
+
+const QemuUUID EfiGlobalVariable = {
+ .data = UUID_LE(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d,
+ 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
+};
+
+const QemuUUID EfiImageSecurityDatabase = {
+ .data = UUID_LE(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc,
+ 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
+};
+
+const QemuUUID EfiCustomModeEnable = {
+ .data = UUID_LE(0xc076ec0c, 0x7028, 0x4399, 0xa0, 0x72,
+ 0x71, 0xee, 0x5c, 0x44, 0x8b, 0x9f)
+};
+
+const QemuUUID EfiSecureBootEnableDisable = {
+ .data = UUID_LE(0xf0a30bc7, 0xaf08, 0x4556, 0x99, 0xc4,
+ 0x0, 0x10, 0x9, 0xc9, 0x3a, 0x44)
+};
+
+/* signatures */
+
+const QemuUUID EfiCertSha256Guid = {
+ .data = UUID_LE(0xc1c41626, 0x504c, 0x4092, 0xac, 0xa9,
+ 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28)
+};
+
+const QemuUUID EfiCertSha384Guid = {
+ .data = UUID_LE(0xff3e5307, 0x9fd0, 0x48c9, 0x85, 0xf1,
+ 0x8a, 0xd5, 0x6c, 0x70, 0x1e, 0x1)
+};
+
+const QemuUUID EfiCertSha512Guid = {
+ .data = UUID_LE(0x93e0fae, 0xa6c4, 0x4f50, 0x9f, 0x1b,
+ 0xd4, 0x1e, 0x2b, 0x89, 0xc1, 0x9a)
+};
+
+const QemuUUID EfiCertRsa2048Guid = {
+ .data = UUID_LE(0x3c5766e8, 0x269c, 0x4e34, 0xaa, 0x14,
+ 0xed, 0x77, 0x6e, 0x85, 0xb3, 0xb6)
+};
+
+const QemuUUID EfiCertX509Guid = {
+ .data = UUID_LE(0xa5c059a1, 0x94e4, 0x4aa7, 0x87, 0xb5,
+ 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72)
+};
+
+const QemuUUID EfiCertTypePkcs7Guid = {
+ .data = UUID_LE(0x4aafd29d, 0x68df, 0x49ee, 0x8a, 0xa9,
+ 0x34, 0x7d, 0x37, 0x56, 0x65, 0xa7)
+};
+
+/*
+ * mm_header.guid values that the guest DXE/BDS phases use for
+ * sending requests to management mode
+ */
+
+const QemuUUID EfiSmmVariableProtocolGuid = {
+ .data = UUID_LE(0xed32d533, 0x99e6, 0x4209, 0x9c, 0xc0,
+ 0x2d, 0x72, 0xcd, 0xd9, 0x98, 0xa7)
+};
+
+const QemuUUID VarCheckPolicyLibMmiHandlerGuid = {
+ .data = UUID_LE(0xda1b0d11, 0xd1a7, 0x46c4, 0x9d, 0xc9,
+ 0xf3, 0x71, 0x48, 0x75, 0xc6, 0xeb)
+};
+
+/*
+ * mm_header.guid values that the guest DXE/BDS phases use for
+ * reporting event groups being signaled to management mode
+ */
+
+const QemuUUID EfiEndOfDxeEventGroupGuid = {
+ .data = UUID_LE(0x02ce967a, 0xdd7e, 0x4FFc, 0x9e, 0xe7,
+ 0x81, 0x0c, 0xF0, 0x47, 0x08, 0x80)
+};
+
+const QemuUUID EfiEventReadyToBootGuid = {
+ .data = UUID_LE(0x7ce88Fb3, 0x4bd7, 0x4679, 0x87, 0xa8,
+ 0xa8, 0xd8, 0xde, 0xe5, 0x0d, 0x2b)
+};
+
+const QemuUUID EfiEventExitBootServicesGuid = {
+ .data = UUID_LE(0x27abF055, 0xb1b8, 0x4c26, 0x80, 0x48,
+ 0x74, 0x8F, 0x37, 0xba, 0xa2, 0xdF)
+};
diff --git a/hw/uefi/var-service-json.c b/hw/uefi/var-service-json.c
new file mode 100644
index 0000000..ad3462c
--- /dev/null
+++ b/hw/uefi/var-service-json.c
@@ -0,0 +1,257 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * uefi vars device - serialize non-volatile varstore from/to json,
+ * using qapi
+ *
+ * tools which can read/write these json files:
+ * - https://gitlab.com/kraxel/virt-firmware
+ * - https://github.com/awslabs/python-uefivars
+ */
+#include "qemu/osdep.h"
+#include "qemu/cutils.h"
+#include "qemu/error-report.h"
+#include "system/dma.h"
+
+#include "hw/uefi/var-service.h"
+
+#include "qobject/qobject.h"
+#include "qobject/qjson.h"
+
+#include "qapi/dealloc-visitor.h"
+#include "qapi/qobject-input-visitor.h"
+#include "qapi/qobject-output-visitor.h"
+#include "qapi/qapi-types-uefi.h"
+#include "qapi/qapi-visit-uefi.h"
+
+static char *generate_hexstr(void *data, size_t len)
+{
+ static const char hex[] = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f',
+ };
+ uint8_t *src = data;
+ char *dest;
+ size_t i;
+
+ dest = g_malloc(len * 2 + 1);
+ for (i = 0; i < len * 2;) {
+ dest[i++] = hex[*src >> 4];
+ dest[i++] = hex[*src & 15];
+ src++;
+ }
+ dest[i++] = 0;
+
+ return dest;
+}
+
+static UefiVarStore *uefi_vars_to_qapi(uefi_vars_state *uv)
+{
+ UefiVarStore *vs;
+ UefiVariableList **tail;
+ UefiVariable *v;
+ QemuUUID be;
+ uefi_variable *var;
+
+ vs = g_new0(UefiVarStore, 1);
+ vs->version = 2;
+ tail = &vs->variables;
+
+ QTAILQ_FOREACH(var, &uv->variables, next) {
+ if (!(var->attributes & EFI_VARIABLE_NON_VOLATILE)) {
+ continue;
+ }
+
+ v = g_new0(UefiVariable, 1);
+ be = qemu_uuid_bswap(var->guid);
+ v->guid = qemu_uuid_unparse_strdup(&be);
+ v->name = uefi_ucs2_to_ascii(var->name, var->name_size);
+ v->attr = var->attributes;
+
+ v->data = generate_hexstr(var->data, var->data_size);
+
+ if (var->attributes &
+ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS) {
+ v->time = generate_hexstr(&var->time, sizeof(var->time));
+ if (var->digest && var->digest_size) {
+ v->digest = generate_hexstr(var->digest, var->digest_size);
+ }
+ }
+
+ QAPI_LIST_APPEND(tail, v);
+ }
+ return vs;
+}
+
+static unsigned parse_hexchar(char c)
+{
+ switch (c) {
+ case '0' ... '9': return c - '0';
+ case 'a' ... 'f': return c - 'a' + 0xa;
+ case 'A' ... 'F': return c - 'A' + 0xA;
+ default: return 0;
+ }
+}
+
+static void parse_hexstr(void *dest, char *src, int len)
+{
+ uint8_t *data = dest;
+ size_t i;
+
+ for (i = 0; i < len; i += 2) {
+ *(data++) =
+ parse_hexchar(src[i]) << 4 |
+ parse_hexchar(src[i + 1]);
+ }
+}
+
+static void uefi_vars_from_qapi(uefi_vars_state *uv, UefiVarStore *vs)
+{
+ UefiVariableList *item;
+ UefiVariable *v;
+ QemuUUID be;
+ uefi_variable *var;
+ uint8_t *data;
+ size_t i, len;
+
+ for (item = vs->variables; item != NULL; item = item->next) {
+ v = item->value;
+
+ var = g_new0(uefi_variable, 1);
+ var->attributes = v->attr;
+ qemu_uuid_parse(v->guid, &be);
+ var->guid = qemu_uuid_bswap(be);
+
+ len = strlen(v->name);
+ var->name_size = len * 2 + 2;
+ var->name = g_malloc(var->name_size);
+ for (i = 0; i <= len; i++) {
+ var->name[i] = v->name[i];
+ }
+
+ len = strlen(v->data);
+ var->data_size = len / 2;
+ var->data = data = g_malloc(var->data_size);
+ parse_hexstr(var->data, v->data, len);
+
+ if (v->time && strlen(v->time) == 32) {
+ parse_hexstr(&var->time, v->time, 32);
+ }
+
+ if (v->digest) {
+ len = strlen(v->digest);
+ var->digest_size = len / 2;
+ var->digest = g_malloc(var->digest_size);
+ parse_hexstr(var->digest, v->digest, len);
+ }
+
+ QTAILQ_INSERT_TAIL(&uv->variables, var, next);
+ }
+}
+
+static GString *uefi_vars_to_json(uefi_vars_state *uv)
+{
+ UefiVarStore *vs = uefi_vars_to_qapi(uv);
+ QObject *qobj = NULL;
+ Visitor *v;
+ GString *gstr;
+
+ v = qobject_output_visitor_new(&qobj);
+ if (visit_type_UefiVarStore(v, NULL, &vs, NULL)) {
+ visit_complete(v, &qobj);
+ }
+ visit_free(v);
+ qapi_free_UefiVarStore(vs);
+
+ gstr = qobject_to_json_pretty(qobj, true);
+ qobject_unref(qobj);
+
+ return gstr;
+}
+
+void uefi_vars_json_init(uefi_vars_state *uv, Error **errp)
+{
+ if (uv->jsonfile) {
+ uv->jsonfd = qemu_create(uv->jsonfile, O_RDWR, 0666, errp);
+ }
+}
+
+void uefi_vars_json_save(uefi_vars_state *uv)
+{
+ g_autoptr(GString) gstr = NULL;
+ int rc;
+
+ if (uv->jsonfd == -1) {
+ return;
+ }
+
+ gstr = uefi_vars_to_json(uv);
+
+ rc = lseek(uv->jsonfd, 0, SEEK_SET);
+ if (rc < 0) {
+ warn_report("%s: lseek error", __func__);
+ return;
+ }
+
+ rc = ftruncate(uv->jsonfd, 0);
+ if (rc != 0) {
+ warn_report("%s: ftruncate error", __func__);
+ return;
+ }
+
+ rc = write(uv->jsonfd, gstr->str, gstr->len);
+ if (rc != gstr->len) {
+ warn_report("%s: write error", __func__);
+ return;
+ }
+
+ fsync(uv->jsonfd);
+}
+
+void uefi_vars_json_load(uefi_vars_state *uv, Error **errp)
+{
+ UefiVarStore *vs;
+ QObject *qobj;
+ Visitor *v;
+ char *str;
+ ssize_t len;
+ int rc;
+
+ if (uv->jsonfd == -1) {
+ return;
+ }
+
+ len = lseek(uv->jsonfd, 0, SEEK_END);
+ if (len < 0) {
+ warn_report("%s: lseek error", __func__);
+ return;
+ }
+ if (len == 0) {
+ /* empty file */
+ return;
+ }
+
+ str = g_malloc(len + 1);
+ lseek(uv->jsonfd, 0, SEEK_SET);
+ rc = read(uv->jsonfd, str, len);
+ if (rc != len) {
+ warn_report("%s: read error", __func__);
+ g_free(str);
+ return;
+ }
+ str[len] = 0;
+
+ qobj = qobject_from_json(str, errp);
+ v = qobject_input_visitor_new(qobj);
+ visit_type_UefiVarStore(v, NULL, &vs, errp);
+ visit_free(v);
+
+ if (!(*errp)) {
+ uefi_vars_from_qapi(uv, vs);
+ uefi_vars_update_storage(uv);
+ }
+
+ qapi_free_UefiVarStore(vs);
+ qobject_unref(qobj);
+ g_free(str);
+}
diff --git a/hw/uefi/var-service-pkcs7-stub.c b/hw/uefi/var-service-pkcs7-stub.c
new file mode 100644
index 0000000..118cba4
--- /dev/null
+++ b/hw/uefi/var-service-pkcs7-stub.c
@@ -0,0 +1,16 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * uefi vars device - pkcs7 stubs
+ */
+#include "qemu/osdep.h"
+#include "system/dma.h"
+
+#include "hw/uefi/var-service.h"
+
+efi_status uefi_vars_check_pkcs7_2(uefi_variable *siglist,
+ void **digest, uint32_t *digest_size,
+ mm_variable_access *va, void *data)
+{
+ return EFI_WRITE_PROTECTED;
+}
diff --git a/hw/uefi/var-service-pkcs7.c b/hw/uefi/var-service-pkcs7.c
new file mode 100644
index 0000000..32accf4
--- /dev/null
+++ b/hw/uefi/var-service-pkcs7.c
@@ -0,0 +1,436 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * uefi vars device - pkcs7 verification
+ */
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "system/dma.h"
+
+#include <gnutls/gnutls.h>
+#include <gnutls/pkcs7.h>
+#include <gnutls/crypto.h>
+
+#include "hw/uefi/var-service.h"
+
+#define AUTHVAR_DIGEST_ALGO GNUTLS_DIG_SHA256
+#define AUTHVAR_DIGEST_SIZE 32
+
+/*
+ * Replicate the signed data for signature verification.
+ */
+static gnutls_datum_t *build_signed_data(mm_variable_access *va, void *data)
+{
+ variable_auth_2 *auth = data;
+ uint64_t data_offset = sizeof(efi_time) + auth->hdr_length;
+ uint16_t *name = (void *)va + sizeof(mm_variable_access);
+ gnutls_datum_t *sdata;
+ uint64_t pos = 0;
+
+ sdata = g_new(gnutls_datum_t, 1);
+ sdata->size = (va->name_size - 2
+ + sizeof(QemuUUID)
+ + sizeof(va->attributes)
+ + sizeof(auth->timestamp)
+ + va->data_size - data_offset);
+ sdata->data = g_malloc(sdata->size);
+
+ /* Variable Name (without terminating \0) */
+ memcpy(sdata->data + pos, name, va->name_size - 2);
+ pos += va->name_size - 2;
+
+ /* Variable Namespace Guid */
+ memcpy(sdata->data + pos, &va->guid, sizeof(va->guid));
+ pos += sizeof(va->guid);
+
+ /* Attributes */
+ memcpy(sdata->data + pos, &va->attributes, sizeof(va->attributes));
+ pos += sizeof(va->attributes);
+
+ /* TimeStamp */
+ memcpy(sdata->data + pos, &auth->timestamp, sizeof(auth->timestamp));
+ pos += sizeof(auth->timestamp);
+
+ /* Variable Content */
+ memcpy(sdata->data + pos, data + data_offset, va->data_size - data_offset);
+ pos += va->data_size - data_offset;
+
+ assert(pos == sdata->size);
+ return sdata;
+}
+
+/*
+ * See WrapPkcs7Data() in edk2.
+ *
+ * UEFI spec allows pkcs7 signatures being used without the envelope which
+ * identifies them as pkcs7 signatures. openssl and gnutls will not parse them
+ * without the envelope though. So add it if needed.
+ */
+static void wrap_pkcs7(gnutls_datum_t *pkcs7)
+{
+ static uint8_t signed_data_oid[9] = {
+ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x02
+ };
+ gnutls_datum_t wrap;
+
+ if (pkcs7->data[4] == 0x06 &&
+ pkcs7->data[5] == 0x09 &&
+ memcmp(pkcs7->data + 6, signed_data_oid, sizeof(signed_data_oid)) == 0 &&
+ pkcs7->data[15] == 0x0a &&
+ pkcs7->data[16] == 0x82) {
+ return;
+ }
+
+ wrap.size = pkcs7->size + 19;
+ wrap.data = g_malloc(wrap.size);
+
+ wrap.data[0] = 0x30;
+ wrap.data[1] = 0x82;
+ wrap.data[2] = (wrap.size - 4) >> 8;
+ wrap.data[3] = (wrap.size - 4) & 0xff;
+ wrap.data[4] = 0x06;
+ wrap.data[5] = 0x09;
+ memcpy(wrap.data + 6, signed_data_oid, sizeof(signed_data_oid));
+
+ wrap.data[15] = 0xa0;
+ wrap.data[16] = 0x82;
+ wrap.data[17] = pkcs7->size >> 8;
+ wrap.data[18] = pkcs7->size & 0xff;
+ memcpy(wrap.data + 19, pkcs7->data, pkcs7->size);
+
+ g_free(pkcs7->data);
+ *pkcs7 = wrap;
+}
+
+static gnutls_datum_t *build_pkcs7(void *data)
+{
+ variable_auth_2 *auth = data;
+ gnutls_datum_t *pkcs7;
+
+ pkcs7 = g_new(gnutls_datum_t, 1);
+ pkcs7->size = auth->hdr_length - 24;
+ pkcs7->data = g_malloc(pkcs7->size);
+ memcpy(pkcs7->data, data + 16 + 24, pkcs7->size);
+
+ wrap_pkcs7(pkcs7);
+
+ return pkcs7;
+}
+
+/*
+ * Read UEFI signature database, store x509 all certificates found in
+ * gnutls_x509_trust_list_t.
+ */
+static gnutls_x509_trust_list_t build_trust_list_sb(uefi_variable *var)
+{
+ gnutls_x509_trust_list_t tlist;
+ gnutls_datum_t cert_data;
+ gnutls_x509_crt_t cert;
+ uefi_vars_siglist siglist;
+ uefi_vars_cert *c;
+ int rc;
+
+ rc = gnutls_x509_trust_list_init(&tlist, 0);
+ if (rc < 0) {
+ warn_report("gnutls_x509_trust_list_init error: %s",
+ gnutls_strerror(rc));
+ return NULL;
+ }
+
+ uefi_vars_siglist_init(&siglist);
+ uefi_vars_siglist_parse(&siglist, var->data, var->data_size);
+
+ QTAILQ_FOREACH(c, &siglist.x509, next) {
+ cert_data.size = c->size;
+ cert_data.data = c->data;
+
+ rc = gnutls_x509_crt_init(&cert);
+ if (rc < 0) {
+ warn_report("gnutls_x509_crt_init error: %s", gnutls_strerror(rc));
+ break;
+ }
+ rc = gnutls_x509_crt_import(cert, &cert_data, GNUTLS_X509_FMT_DER);
+ if (rc < 0) {
+ warn_report("gnutls_x509_crt_import error: %s",
+ gnutls_strerror(rc));
+ gnutls_x509_crt_deinit(cert);
+ break;
+ }
+ rc = gnutls_x509_trust_list_add_cas(tlist, &cert, 1, 0);
+ if (rc < 0) {
+ warn_report("gnutls_x509_crt_import error: %s",
+ gnutls_strerror(rc));
+ gnutls_x509_crt_deinit(cert);
+ break;
+ }
+ }
+
+ uefi_vars_siglist_free(&siglist);
+
+ return tlist;
+}
+
+static int build_digest_authvar(gnutls_x509_crt_t signer,
+ gnutls_x509_crt_t root,
+ uint8_t *hash_digest)
+{
+ char *cn;
+ size_t cn_size = 0;
+ uint8_t fp[AUTHVAR_DIGEST_SIZE];
+ size_t fp_size = sizeof(fp);
+ gnutls_hash_hd_t hash;
+ int rc;
+
+ /* get signer CN */
+ rc = gnutls_x509_crt_get_dn_by_oid(signer, GNUTLS_OID_X520_COMMON_NAME,
+ 0, 0, NULL, &cn_size);
+ if (rc != GNUTLS_E_SHORT_MEMORY_BUFFER) {
+ warn_report("gnutls_x509_crt_get_dn_by_oid error #1: %s",
+ gnutls_strerror(rc));
+ return rc;
+ }
+
+ cn = g_malloc(cn_size);
+ rc = gnutls_x509_crt_get_dn_by_oid(signer, GNUTLS_OID_X520_COMMON_NAME,
+ 0, 0, cn, &cn_size);
+ if (rc < 0) {
+ warn_report("gnutls_x509_crt_get_dn_by_oid error #2: %s",
+ gnutls_strerror(rc));
+ goto err;
+ }
+
+ /* get root certificate fingerprint */
+ rc = gnutls_x509_crt_get_fingerprint(root, AUTHVAR_DIGEST_ALGO,
+ fp, &fp_size);
+ if (rc < 0) {
+ warn_report("gnutls_x509_crt_get_fingerprint error: %s",
+ gnutls_strerror(rc));
+ goto err;
+ }
+
+ /* digest both items */
+ rc = gnutls_hash_init(&hash, AUTHVAR_DIGEST_ALGO);
+ if (rc < 0) {
+ warn_report("gnutls_hash_init error: %s",
+ gnutls_strerror(rc));
+ goto err;
+ }
+ rc = gnutls_hash(hash, cn, cn_size);
+ if (rc < 0) {
+ warn_report("gnutls_hash error: %s",
+ gnutls_strerror(rc));
+ goto err;
+ }
+ rc = gnutls_hash(hash, fp, fp_size);
+ if (rc < 0) {
+ warn_report("gnutls_hash error: %s",
+ gnutls_strerror(rc));
+ goto err;
+ }
+ gnutls_hash_deinit(hash, hash_digest);
+
+ return 0;
+
+err:
+ g_free(cn);
+ return rc;
+}
+
+/*
+ * uefi spec 2.9, section 8.2.2
+ *
+ * For EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS variables which are
+ * NOT secure boot variables we should track the root certificate of the trust
+ * chain, and the subject CN of the signer certificate.
+ *
+ * So we'll go store a digest of these two items so we can verify this. Also
+ * create a gnutls_x509_trust_list_t with the root certificate, so
+ * gnutls_pkcs7_verify() will pass (assuming the signature is otherwise
+ * correct).
+ */
+static gnutls_x509_trust_list_t build_trust_list_authvar(gnutls_pkcs7_t pkcs7,
+ uint8_t *hash_digest)
+{
+ gnutls_datum_t signer_data = { 0 };
+ gnutls_datum_t root_data = { 0 };
+ gnutls_x509_crt_t signer = NULL;
+ gnutls_x509_crt_t root = NULL;
+ gnutls_x509_trust_list_t tlist = NULL;
+ int n, rc;
+
+ n = gnutls_pkcs7_get_crt_count(pkcs7);
+
+ /* first is signer certificate */
+ rc = gnutls_pkcs7_get_crt_raw2(pkcs7, 0, &signer_data);
+ if (rc < 0) {
+ warn_report("gnutls_pkcs7_get_crt_raw2(0) error: %s",
+ gnutls_strerror(rc));
+ goto done;
+ }
+ rc = gnutls_x509_crt_init(&signer);
+ if (rc < 0) {
+ warn_report("gnutls_x509_crt_init error: %s", gnutls_strerror(rc));
+ goto done;
+ }
+ rc = gnutls_x509_crt_import(signer, &signer_data, GNUTLS_X509_FMT_DER);
+ if (rc < 0) {
+ warn_report("gnutls_x509_crt_import error: %s",
+ gnutls_strerror(rc));
+ gnutls_x509_crt_deinit(signer);
+ goto done;
+ }
+
+ /* last is root-of-trust certificate (can be identical to signer) */
+ rc = gnutls_pkcs7_get_crt_raw2(pkcs7, n - 1, &root_data);
+ if (rc < 0) {
+ warn_report("gnutls_pkcs7_get_crt_raw2(%d) error: %s",
+ n - 1, gnutls_strerror(rc));
+ goto done;
+ }
+ rc = gnutls_x509_crt_init(&root);
+ if (rc < 0) {
+ warn_report("gnutls_x509_crt_init error: %s", gnutls_strerror(rc));
+ goto done;
+ }
+ rc = gnutls_x509_crt_import(root, &root_data, GNUTLS_X509_FMT_DER);
+ if (rc < 0) {
+ warn_report("gnutls_x509_crt_import error: %s",
+ gnutls_strerror(rc));
+ goto done;
+ }
+
+ /* calc digest for signer CN + root cert */
+ rc = build_digest_authvar(signer, root, hash_digest);
+ if (rc < 0) {
+ goto done;
+ }
+
+ /* add root to trust list */
+ rc = gnutls_x509_trust_list_init(&tlist, 0);
+ if (rc < 0) {
+ warn_report("gnutls_x509_trust_list_init error: %s",
+ gnutls_strerror(rc));
+ goto done;
+ }
+ rc = gnutls_x509_trust_list_add_cas(tlist, &root, 1, 0);
+ if (rc < 0) {
+ warn_report("gnutls_x509_crt_import error: %s",
+ gnutls_strerror(rc));
+ gnutls_x509_trust_list_deinit(tlist, 1);
+ tlist = NULL;
+ goto done;
+ } else {
+ /* ownership passed to tlist */
+ root = NULL;
+ }
+
+done:
+ if (signer_data.data) {
+ gnutls_free(signer_data.data);
+ }
+ if (root_data.data) {
+ gnutls_free(root_data.data);
+ }
+ if (signer) {
+ gnutls_x509_crt_deinit(signer);
+ }
+ if (root) {
+ gnutls_x509_crt_deinit(root);
+ }
+ return tlist;
+}
+
+static void free_datum(gnutls_datum_t *ptr)
+{
+ if (!ptr) {
+ return;
+ }
+ g_free(ptr->data);
+ g_free(ptr);
+}
+
+static void gnutls_log_stderr(int level, const char *msg)
+{
+ if (strncmp(msg, "ASSERT:", 7) == 0) {
+ return;
+ }
+ fprintf(stderr, " %d: %s", level, msg);
+}
+
+/*
+ * pkcs7 signature verification (EFI_VARIABLE_AUTHENTICATION_2).
+ */
+efi_status uefi_vars_check_pkcs7_2(uefi_variable *siglist,
+ void **digest, uint32_t *digest_size,
+ mm_variable_access *va, void *data)
+{
+ gnutls_x509_trust_list_t tlist = NULL;
+ gnutls_datum_t *signed_data = NULL;
+ gnutls_datum_t *pkcs7_data = NULL;
+ gnutls_pkcs7_t pkcs7 = NULL;
+ efi_status status = EFI_SECURITY_VIOLATION;
+ int rc;
+
+ if (0) {
+ /* gnutls debug logging */
+ static bool first = true;
+
+ if (first) {
+ first = false;
+ gnutls_global_set_log_function(gnutls_log_stderr);
+ gnutls_global_set_log_level(99);
+ }
+ }
+
+ signed_data = build_signed_data(va, data);
+ pkcs7_data = build_pkcs7(data);
+
+ rc = gnutls_pkcs7_init(&pkcs7);
+ if (rc < 0) {
+ warn_report("gnutls_pkcs7_init error: %s", gnutls_strerror(rc));
+ goto out;
+ }
+
+ rc = gnutls_pkcs7_import(pkcs7, pkcs7_data, GNUTLS_X509_FMT_DER);
+ if (rc < 0) {
+ warn_report("gnutls_pkcs7_import error: %s", gnutls_strerror(rc));
+ goto out;
+ }
+
+ if (siglist) {
+ /* secure boot variables */
+ tlist = build_trust_list_sb(siglist);
+ } else if (digest && digest_size) {
+ /* other authenticated variables */
+ *digest_size = AUTHVAR_DIGEST_SIZE;
+ *digest = g_malloc(*digest_size);
+ tlist = build_trust_list_authvar(pkcs7, *digest);
+ } else {
+ /* should not happen */
+ goto out;
+ }
+
+ rc = gnutls_pkcs7_verify(pkcs7, tlist,
+ NULL, 0,
+ 0, signed_data,
+ GNUTLS_VERIFY_DISABLE_TIME_CHECKS |
+ GNUTLS_VERIFY_DISABLE_TRUSTED_TIME_CHECKS);
+ if (rc < 0) {
+ warn_report("gnutls_pkcs7_verify error: %s", gnutls_strerror(rc));
+ goto out;
+ }
+
+ /* check passed */
+ status = EFI_SUCCESS;
+
+out:
+ free_datum(signed_data);
+ free_datum(pkcs7_data);
+ if (tlist) {
+ gnutls_x509_trust_list_deinit(tlist, 1);
+ }
+ if (pkcs7) {
+ gnutls_pkcs7_deinit(pkcs7);
+ }
+ return status;
+}
diff --git a/hw/uefi/var-service-policy.c b/hw/uefi/var-service-policy.c
new file mode 100644
index 0000000..3b1155f
--- /dev/null
+++ b/hw/uefi/var-service-policy.c
@@ -0,0 +1,370 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * uefi vars device - VarCheckPolicyLibMmiHandler implementation
+ *
+ * variable policy specs:
+ * https://github.com/tianocore/edk2/blob/master/MdeModulePkg/Library/VariablePolicyLib/ReadMe.md
+ */
+#include "qemu/osdep.h"
+#include "system/dma.h"
+#include "migration/vmstate.h"
+
+#include "hw/uefi/var-service.h"
+#include "hw/uefi/var-service-api.h"
+#include "hw/uefi/var-service-edk2.h"
+
+#include "trace/trace-hw_uefi.h"
+
+static void calc_policy(uefi_var_policy *pol);
+
+static int uefi_var_policy_post_load(void *opaque, int version_id)
+{
+ uefi_var_policy *pol = opaque;
+
+ calc_policy(pol);
+ return 0;
+}
+
+const VMStateDescription vmstate_uefi_var_policy = {
+ .name = "uefi-var-policy",
+ .post_load = uefi_var_policy_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(entry_size, uefi_var_policy),
+ VMSTATE_VBUFFER_ALLOC_UINT32(entry, uefi_var_policy,
+ 0, NULL, entry_size),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void print_policy_entry(variable_policy_entry *pe)
+{
+ uint16_t *name = (void *)pe + pe->offset_to_name;
+
+ fprintf(stderr, "%s:\n", __func__);
+
+ fprintf(stderr, " name Ā“");
+ while (*name) {
+ fprintf(stderr, "%c", *name);
+ name++;
+ }
+ fprintf(stderr, "', version=%d.%d, size=%d\n",
+ pe->version >> 16, pe->version & 0xffff, pe->size);
+
+ if (pe->min_size) {
+ fprintf(stderr, " size min=%d\n", pe->min_size);
+ }
+ if (pe->max_size != UINT32_MAX) {
+ fprintf(stderr, " size max=%u\n", pe->max_size);
+ }
+ if (pe->attributes_must_have) {
+ fprintf(stderr, " attr must=0x%x\n", pe->attributes_must_have);
+ }
+ if (pe->attributes_cant_have) {
+ fprintf(stderr, " attr cant=0x%x\n", pe->attributes_cant_have);
+ }
+ if (pe->lock_policy_type) {
+ fprintf(stderr, " lock policy type %d\n", pe->lock_policy_type);
+ }
+}
+
+static gboolean wildcard_str_equal(uefi_var_policy *pol,
+ uefi_variable *var)
+{
+ return uefi_str_equal_ex(pol->name, pol->name_size,
+ var->name, var->name_size,
+ true);
+}
+
+static uefi_var_policy *find_policy(uefi_vars_state *uv, QemuUUID guid,
+ uint16_t *name, uint64_t name_size)
+{
+ uefi_var_policy *pol;
+
+ QTAILQ_FOREACH(pol, &uv->var_policies, next) {
+ if (!qemu_uuid_is_equal(&pol->entry->namespace, &guid)) {
+ continue;
+ }
+ if (!uefi_str_equal(pol->name, pol->name_size,
+ name, name_size)) {
+ continue;
+ }
+ return pol;
+ }
+ return NULL;
+}
+
+static uefi_var_policy *wildcard_find_policy(uefi_vars_state *uv,
+ uefi_variable *var)
+{
+ uefi_var_policy *pol;
+
+ QTAILQ_FOREACH(pol, &uv->var_policies, next) {
+ if (!qemu_uuid_is_equal(&pol->entry->namespace, &var->guid)) {
+ continue;
+ }
+ if (!wildcard_str_equal(pol, var)) {
+ continue;
+ }
+ return pol;
+ }
+ return NULL;
+}
+
+static void calc_policy(uefi_var_policy *pol)
+{
+ variable_policy_entry *pe = pol->entry;
+ unsigned int i;
+
+ pol->name = (void *)pol->entry + pe->offset_to_name;
+ pol->name_size = pe->size - pe->offset_to_name;
+
+ for (i = 0; i < pol->name_size / 2; i++) {
+ if (pol->name[i] == '#') {
+ pol->hashmarks++;
+ }
+ }
+}
+
+uefi_var_policy *uefi_vars_add_policy(uefi_vars_state *uv,
+ variable_policy_entry *pe)
+{
+ uefi_var_policy *pol, *p;
+
+ pol = g_new0(uefi_var_policy, 1);
+ pol->entry = g_malloc(pe->size);
+ memcpy(pol->entry, pe, pe->size);
+ pol->entry_size = pe->size;
+
+ calc_policy(pol);
+
+ /* keep list sorted by priority, add to tail of priority group */
+ QTAILQ_FOREACH(p, &uv->var_policies, next) {
+ if ((p->hashmarks > pol->hashmarks) ||
+ (!p->name_size && pol->name_size)) {
+ QTAILQ_INSERT_BEFORE(p, pol, next);
+ return pol;
+ }
+ }
+
+ QTAILQ_INSERT_TAIL(&uv->var_policies, pol, next);
+ return pol;
+}
+
+efi_status uefi_vars_policy_check(uefi_vars_state *uv,
+ uefi_variable *var,
+ gboolean is_newvar)
+{
+ uefi_var_policy *pol;
+ variable_policy_entry *pe;
+ variable_lock_on_var_state *lvarstate;
+ uint16_t *lvarname;
+ size_t lvarnamesize;
+ uefi_variable *lvar;
+
+ if (!uv->end_of_dxe) {
+ return EFI_SUCCESS;
+ }
+
+ pol = wildcard_find_policy(uv, var);
+ if (!pol) {
+ return EFI_SUCCESS;
+ }
+ pe = pol->entry;
+
+ uefi_trace_variable(__func__, var->guid, var->name, var->name_size);
+ print_policy_entry(pe);
+
+ if ((var->attributes & pe->attributes_must_have) != pe->attributes_must_have) {
+ trace_uefi_vars_policy_deny("must-have-attr");
+ return EFI_INVALID_PARAMETER;
+ }
+ if ((var->attributes & pe->attributes_cant_have) != 0) {
+ trace_uefi_vars_policy_deny("cant-have-attr");
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (var->data_size < pe->min_size) {
+ trace_uefi_vars_policy_deny("min-size");
+ return EFI_INVALID_PARAMETER;
+ }
+ if (var->data_size > pe->max_size) {
+ trace_uefi_vars_policy_deny("max-size");
+ return EFI_INVALID_PARAMETER;
+ }
+
+ switch (pe->lock_policy_type) {
+ case VARIABLE_POLICY_TYPE_NO_LOCK:
+ break;
+
+ case VARIABLE_POLICY_TYPE_LOCK_NOW:
+ trace_uefi_vars_policy_deny("lock-now");
+ return EFI_WRITE_PROTECTED;
+
+ case VARIABLE_POLICY_TYPE_LOCK_ON_CREATE:
+ if (!is_newvar) {
+ trace_uefi_vars_policy_deny("lock-on-create");
+ return EFI_WRITE_PROTECTED;
+ }
+ break;
+
+ case VARIABLE_POLICY_TYPE_LOCK_ON_VAR_STATE:
+ lvarstate = (void *)pol->entry + sizeof(*pe);
+ lvarname = (void *)pol->entry + sizeof(*pe) + sizeof(*lvarstate);
+ lvarnamesize = pe->offset_to_name - sizeof(*pe) - sizeof(*lvarstate);
+
+ uefi_trace_variable(__func__, lvarstate->namespace,
+ lvarname, lvarnamesize);
+ lvar = uefi_vars_find_variable(uv, lvarstate->namespace,
+ lvarname, lvarnamesize);
+ if (lvar && lvar->data_size == 1) {
+ uint8_t *value = lvar->data;
+ if (lvarstate->value == *value) {
+ return EFI_WRITE_PROTECTED;
+ }
+ }
+ break;
+ }
+
+ return EFI_SUCCESS;
+}
+
+void uefi_vars_policies_clear(uefi_vars_state *uv)
+{
+ uefi_var_policy *pol;
+
+ while (!QTAILQ_EMPTY(&uv->var_policies)) {
+ pol = QTAILQ_FIRST(&uv->var_policies);
+ QTAILQ_REMOVE(&uv->var_policies, pol, next);
+ g_free(pol->entry);
+ g_free(pol);
+ }
+}
+
+static size_t uefi_vars_mm_policy_error(mm_header *mhdr,
+ mm_check_policy *mchk,
+ uint64_t status)
+{
+ mchk->result = status;
+ return sizeof(*mchk);
+}
+
+static uint32_t uefi_vars_mm_check_policy_is_enabled(uefi_vars_state *uv,
+ mm_header *mhdr,
+ mm_check_policy *mchk,
+ void *func)
+{
+ mm_check_policy_is_enabled *mpar = func;
+ size_t length;
+
+ length = sizeof(*mchk) + sizeof(*mpar);
+ if (mhdr->length < length) {
+ return uefi_vars_mm_policy_error(mhdr, mchk, EFI_BAD_BUFFER_SIZE);
+ }
+
+ mpar->state = TRUE;
+ mchk->result = EFI_SUCCESS;
+ return sizeof(*mchk);
+}
+
+static uint32_t uefi_vars_mm_check_policy_register(uefi_vars_state *uv,
+ mm_header *mhdr,
+ mm_check_policy *mchk,
+ void *func)
+{
+ variable_policy_entry *pe = func;
+ uefi_var_policy *pol;
+ uint64_t length;
+
+ if (uadd64_overflow(sizeof(*mchk), pe->size, &length)) {
+ return uefi_vars_mm_policy_error(mhdr, mchk, EFI_BAD_BUFFER_SIZE);
+ }
+ if (mhdr->length < length) {
+ return uefi_vars_mm_policy_error(mhdr, mchk, EFI_BAD_BUFFER_SIZE);
+ }
+ if (pe->size < sizeof(*pe)) {
+ return uefi_vars_mm_policy_error(mhdr, mchk, EFI_BAD_BUFFER_SIZE);
+ }
+ if (pe->offset_to_name < sizeof(*pe)) {
+ return uefi_vars_mm_policy_error(mhdr, mchk, EFI_BAD_BUFFER_SIZE);
+ }
+
+ if (pe->lock_policy_type == VARIABLE_POLICY_TYPE_LOCK_ON_VAR_STATE &&
+ pe->offset_to_name < sizeof(*pe) + sizeof(variable_lock_on_var_state)) {
+ return uefi_vars_mm_policy_error(mhdr, mchk, EFI_BAD_BUFFER_SIZE);
+ }
+
+ /* check space for minimum string length */
+ if (pe->size < (size_t)pe->offset_to_name) {
+ return uefi_vars_mm_policy_error(mhdr, mchk, EFI_BAD_BUFFER_SIZE);
+ }
+
+ if (!uefi_str_is_valid((void *)pe + pe->offset_to_name,
+ pe->size - pe->offset_to_name,
+ false)) {
+ return uefi_vars_mm_policy_error(mhdr, mchk, EFI_INVALID_PARAMETER);
+ }
+
+ pol = find_policy(uv, pe->namespace,
+ (void *)pe + pe->offset_to_name,
+ pe->size - pe->offset_to_name);
+ if (pol) {
+ return uefi_vars_mm_policy_error(mhdr, mchk, EFI_ALREADY_STARTED);
+ }
+
+ uefi_vars_add_policy(uv, pe);
+
+ mchk->result = EFI_SUCCESS;
+ return sizeof(*mchk);
+}
+
+uint32_t uefi_vars_mm_check_policy_proto(uefi_vars_state *uv)
+{
+ static const char *fnames[] = {
+ "zero",
+ "disable",
+ "is-enabled",
+ "register",
+ "dump",
+ "lock",
+ };
+ const char *fname;
+ mm_header *mhdr = (mm_header *) uv->buffer;
+ mm_check_policy *mchk = (mm_check_policy *) (uv->buffer + sizeof(*mhdr));
+ void *func = (uv->buffer + sizeof(*mhdr) + sizeof(*mchk));
+
+ if (mhdr->length < sizeof(*mchk)) {
+ return UEFI_VARS_STS_ERR_BAD_BUFFER_SIZE;
+ }
+
+ fname = mchk->command < ARRAY_SIZE(fnames)
+ ? fnames[mchk->command]
+ : "unknown";
+ trace_uefi_vars_policy_cmd(fname);
+
+ switch (mchk->command) {
+ case VAR_CHECK_POLICY_COMMAND_DISABLE:
+ mchk->result = EFI_UNSUPPORTED;
+ break;
+ case VAR_CHECK_POLICY_COMMAND_IS_ENABLED:
+ uefi_vars_mm_check_policy_is_enabled(uv, mhdr, mchk, func);
+ break;
+ case VAR_CHECK_POLICY_COMMAND_REGISTER:
+ if (uv->policy_locked) {
+ mchk->result = EFI_WRITE_PROTECTED;
+ } else {
+ uefi_vars_mm_check_policy_register(uv, mhdr, mchk, func);
+ }
+ break;
+ case VAR_CHECK_POLICY_COMMAND_LOCK:
+ uv->policy_locked = true;
+ mchk->result = EFI_SUCCESS;
+ break;
+ default:
+ mchk->result = EFI_UNSUPPORTED;
+ break;
+ }
+
+ uefi_trace_status(__func__, mchk->result);
+ return UEFI_VARS_STS_SUCCESS;
+}
diff --git a/hw/uefi/var-service-siglist.c b/hw/uefi/var-service-siglist.c
new file mode 100644
index 0000000..8948f1b
--- /dev/null
+++ b/hw/uefi/var-service-siglist.c
@@ -0,0 +1,212 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * uefi vars device - parse and generate efi signature databases
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "system/dma.h"
+
+#include "hw/uefi/var-service.h"
+
+/*
+ * Add x509 certificate to list (with duplicate check).
+ */
+static void uefi_vars_siglist_add_x509(uefi_vars_siglist *siglist,
+ QemuUUID *owner,
+ void *data, uint64_t size)
+{
+ uefi_vars_cert *c;
+
+ QTAILQ_FOREACH(c, &siglist->x509, next) {
+ if (c->size != size) {
+ continue;
+ }
+ if (memcmp(c->data, data, size) != 0) {
+ continue;
+ }
+ return;
+ }
+
+ c = g_malloc(sizeof(*c) + size);
+ c->owner = *owner;
+ c->size = size;
+ memcpy(c->data, data, size);
+ QTAILQ_INSERT_TAIL(&siglist->x509, c, next);
+}
+
+/*
+ * Add sha256 hash to list (with duplicate check).
+ */
+static void uefi_vars_siglist_add_sha256(uefi_vars_siglist *siglist,
+ QemuUUID *owner,
+ void *data)
+{
+ uefi_vars_hash *h;
+
+ QTAILQ_FOREACH(h, &siglist->sha256, next) {
+ if (memcmp(h->data, data, 32) != 0) {
+ continue;
+ }
+ return;
+ }
+
+ h = g_malloc(sizeof(*h) + 32);
+ h->owner = *owner;
+ memcpy(h->data, data, 32);
+ QTAILQ_INSERT_TAIL(&siglist->sha256, h, next);
+}
+
+void uefi_vars_siglist_init(uefi_vars_siglist *siglist)
+{
+ memset(siglist, 0, sizeof(*siglist));
+ QTAILQ_INIT(&siglist->x509);
+ QTAILQ_INIT(&siglist->sha256);
+}
+
+void uefi_vars_siglist_free(uefi_vars_siglist *siglist)
+{
+ uefi_vars_cert *c, *cs;
+ uefi_vars_hash *h, *hs;
+
+ QTAILQ_FOREACH_SAFE(c, &siglist->x509, next, cs) {
+ QTAILQ_REMOVE(&siglist->x509, c, next);
+ g_free(c);
+ }
+ QTAILQ_FOREACH_SAFE(h, &siglist->sha256, next, hs) {
+ QTAILQ_REMOVE(&siglist->sha256, h, next);
+ g_free(h);
+ }
+}
+
+/*
+ * Parse UEFI signature list.
+ */
+void uefi_vars_siglist_parse(uefi_vars_siglist *siglist,
+ void *data, uint64_t size)
+{
+ efi_siglist *efilist;
+ uint64_t start;
+
+ while (size) {
+ if (size < sizeof(*efilist)) {
+ break;
+ }
+ efilist = data;
+ if (size < efilist->siglist_size) {
+ break;
+ }
+
+ if (uadd64_overflow(sizeof(*efilist), efilist->header_size, &start)) {
+ break;
+ }
+ if (efilist->sig_size <= sizeof(QemuUUID)) {
+ break;
+ }
+
+ if (qemu_uuid_is_equal(&efilist->guid_type, &EfiCertX509Guid)) {
+ if (start + efilist->sig_size != efilist->siglist_size) {
+ break;
+ }
+ uefi_vars_siglist_add_x509(siglist,
+ (QemuUUID *)(data + start),
+ data + start + sizeof(QemuUUID),
+ efilist->sig_size - sizeof(QemuUUID));
+
+ } else if (qemu_uuid_is_equal(&efilist->guid_type, &EfiCertSha256Guid)) {
+ if (efilist->sig_size != sizeof(QemuUUID) + 32) {
+ break;
+ }
+ if (start + efilist->sig_size > efilist->siglist_size) {
+ break;
+ }
+ while (start <= efilist->siglist_size - efilist->sig_size) {
+ uefi_vars_siglist_add_sha256(siglist,
+ (QemuUUID *)(data + start),
+ data + start + sizeof(QemuUUID));
+ start += efilist->sig_size;
+ }
+
+ } else {
+ QemuUUID be = qemu_uuid_bswap(efilist->guid_type);
+ char *str_uuid = qemu_uuid_unparse_strdup(&be);
+ warn_report("%s: unknown type (%s)", __func__, str_uuid);
+ g_free(str_uuid);
+ }
+
+ data += efilist->siglist_size;
+ size -= efilist->siglist_size;
+ }
+}
+
+uint64_t uefi_vars_siglist_blob_size(uefi_vars_siglist *siglist)
+{
+ uefi_vars_cert *c;
+ uefi_vars_hash *h;
+ uint64_t size = 0;
+
+ QTAILQ_FOREACH(c, &siglist->x509, next) {
+ size += sizeof(efi_siglist) + sizeof(QemuUUID) + c->size;
+ }
+
+ if (!QTAILQ_EMPTY(&siglist->sha256)) {
+ size += sizeof(efi_siglist);
+ QTAILQ_FOREACH(h, &siglist->sha256, next) {
+ size += sizeof(QemuUUID) + 32;
+ }
+ }
+
+ return size;
+}
+
+/*
+ * Generate UEFI signature list.
+ */
+void uefi_vars_siglist_blob_generate(uefi_vars_siglist *siglist,
+ void *data, uint64_t size)
+{
+ uefi_vars_cert *c;
+ uefi_vars_hash *h;
+ efi_siglist *efilist;
+ uint64_t pos = 0, start;
+ uint32_t i;
+
+ QTAILQ_FOREACH(c, &siglist->x509, next) {
+ efilist = data + pos;
+ efilist->guid_type = EfiCertX509Guid;
+ efilist->sig_size = sizeof(QemuUUID) + c->size;
+ efilist->header_size = 0;
+
+ start = pos + sizeof(efi_siglist);
+ memcpy(data + start,
+ &c->owner, sizeof(QemuUUID));
+ memcpy(data + start + sizeof(QemuUUID),
+ c->data, c->size);
+
+ efilist->siglist_size = sizeof(efi_siglist) + efilist->sig_size;
+ pos += efilist->siglist_size;
+ }
+
+ if (!QTAILQ_EMPTY(&siglist->sha256)) {
+ efilist = data + pos;
+ efilist->guid_type = EfiCertSha256Guid;
+ efilist->sig_size = sizeof(QemuUUID) + 32;
+ efilist->header_size = 0;
+
+ i = 0;
+ start = pos + sizeof(efi_siglist);
+ QTAILQ_FOREACH(h, &siglist->sha256, next) {
+ memcpy(data + start + efilist->sig_size * i,
+ &h->owner, sizeof(QemuUUID));
+ memcpy(data + start + efilist->sig_size * i + sizeof(QemuUUID),
+ h->data, 32);
+ i++;
+ }
+
+ efilist->siglist_size = sizeof(efi_siglist) + efilist->sig_size * i;
+ pos += efilist->siglist_size;
+ }
+
+ assert(pos == size);
+}
diff --git a/hw/uefi/var-service-sysbus.c b/hw/uefi/var-service-sysbus.c
new file mode 100644
index 0000000..a5aa218
--- /dev/null
+++ b/hw/uefi/var-service-sysbus.c
@@ -0,0 +1,124 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * uefi vars device - sysbus variant.
+ */
+#include "qemu/osdep.h"
+#include "migration/vmstate.h"
+
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+
+#include "hw/uefi/hardware-info.h"
+#include "hw/uefi/var-service.h"
+#include "hw/uefi/var-service-api.h"
+
+OBJECT_DECLARE_SIMPLE_TYPE(uefi_vars_sysbus_state, UEFI_VARS_SYSBUS)
+
+struct uefi_vars_sysbus_state {
+ SysBusDevice parent_obj;
+ struct uefi_vars_state state;
+};
+
+static const VMStateDescription vmstate_uefi_vars_sysbus = {
+ .name = TYPE_UEFI_VARS_SYSBUS,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT(state, uefi_vars_sysbus_state, 0,
+ vmstate_uefi_vars, uefi_vars_state),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const Property uefi_vars_sysbus_properties[] = {
+ DEFINE_PROP_SIZE("size", uefi_vars_sysbus_state, state.max_storage,
+ 256 * 1024),
+ DEFINE_PROP_STRING("jsonfile", uefi_vars_sysbus_state, state.jsonfile),
+ DEFINE_PROP_BOOL("force-secure-boot", uefi_vars_sysbus_state,
+ state.force_secure_boot, false),
+ DEFINE_PROP_BOOL("disable-custom-mode", uefi_vars_sysbus_state,
+ state.disable_custom_mode, false),
+ DEFINE_PROP_BOOL("use-pio", uefi_vars_sysbus_state,
+ state.use_pio, false),
+};
+
+static void uefi_vars_sysbus_init(Object *obj)
+{
+ uefi_vars_sysbus_state *uv = UEFI_VARS_SYSBUS(obj);
+
+ uefi_vars_init(obj, &uv->state);
+}
+
+static void uefi_vars_sysbus_reset(DeviceState *dev)
+{
+ uefi_vars_sysbus_state *uv = UEFI_VARS_SYSBUS(dev);
+
+ uefi_vars_hard_reset(&uv->state);
+}
+
+static void uefi_vars_sysbus_realize(DeviceState *dev, Error **errp)
+{
+ uefi_vars_sysbus_state *uv = UEFI_VARS_SYSBUS(dev);
+ SysBusDevice *sysbus = SYS_BUS_DEVICE(dev);
+
+ sysbus_init_mmio(sysbus, &uv->state.mr);
+ uefi_vars_realize(&uv->state, errp);
+}
+
+static void uefi_vars_sysbus_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = uefi_vars_sysbus_realize;
+ dc->vmsd = &vmstate_uefi_vars_sysbus;
+ dc->user_creatable = true;
+ device_class_set_legacy_reset(dc, uefi_vars_sysbus_reset);
+ device_class_set_props(dc, uefi_vars_sysbus_properties);
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+/* generic: hardware discovery via FDT */
+static const TypeInfo uefi_vars_sysbus_info = {
+ .name = TYPE_UEFI_VARS_SYSBUS,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(uefi_vars_sysbus_state),
+ .instance_init = uefi_vars_sysbus_init,
+ .class_init = uefi_vars_sysbus_class_init,
+};
+module_obj(TYPE_UEFI_VARS_SYSBUS);
+
+static void uefi_vars_x64_realize(DeviceState *dev, Error **errp)
+{
+ HARDWARE_INFO_SIMPLE_DEVICE hwinfo = {
+ .mmio_address = cpu_to_le64(0xfef10000),
+ };
+ SysBusDevice *sysbus = SYS_BUS_DEVICE(dev);
+
+ uefi_vars_sysbus_realize(dev, errp);
+
+ hardware_info_register(HardwareInfoQemuUefiVars,
+ &hwinfo, sizeof(hwinfo));
+ sysbus_mmio_map(sysbus, 0, hwinfo.mmio_address);
+}
+
+static void uefi_vars_x64_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = uefi_vars_x64_realize;
+}
+
+/* x64: hardware discovery via etc/hardware-info fw_cfg */
+static const TypeInfo uefi_vars_x64_info = {
+ .name = TYPE_UEFI_VARS_X64,
+ .parent = TYPE_UEFI_VARS_SYSBUS,
+ .class_init = uefi_vars_x64_class_init,
+};
+module_obj(TYPE_UEFI_VARS_X64);
+
+static void uefi_vars_sysbus_register_types(void)
+{
+ type_register_static(&uefi_vars_sysbus_info);
+ type_register_static(&uefi_vars_x64_info);
+}
+
+type_init(uefi_vars_sysbus_register_types)
diff --git a/hw/uefi/var-service-utils.c b/hw/uefi/var-service-utils.c
new file mode 100644
index 0000000..c9ef465
--- /dev/null
+++ b/hw/uefi/var-service-utils.c
@@ -0,0 +1,241 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * uefi vars device - helper functions for ucs2 strings and tracing
+ */
+#include "qemu/osdep.h"
+#include "system/dma.h"
+
+#include "hw/uefi/var-service.h"
+
+#include "trace/trace-hw_uefi.h"
+
+/* ------------------------------------------------------------------ */
+
+/*
+ * string helper functions.
+ *
+ * Most of the time uefi ucs2 strings are NULL-terminated, except
+ * sometimes when they are not (for example in variable policies).
+ */
+
+gboolean uefi_str_is_valid(const uint16_t *str, size_t len,
+ gboolean must_be_null_terminated)
+{
+ size_t pos = 0;
+
+ for (;;) {
+ if (pos == len) {
+ if (must_be_null_terminated) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+ switch (str[pos]) {
+ case 0:
+ /* end of string */
+ return true;
+ case 0xd800 ... 0xdfff:
+ /* reject surrogates */
+ return false;
+ default:
+ /* char is good, check next */
+ break;
+ }
+ pos++;
+ }
+}
+
+size_t uefi_strlen(const uint16_t *str, size_t len)
+{
+ size_t pos = 0;
+
+ for (;;) {
+ if (pos == len) {
+ return pos;
+ }
+ if (str[pos] == 0) {
+ return pos;
+ }
+ pos++;
+ }
+}
+
+gboolean uefi_str_equal_ex(const uint16_t *a, size_t alen,
+ const uint16_t *b, size_t blen,
+ gboolean wildcards_in_a)
+{
+ size_t pos = 0;
+
+ alen = alen / 2;
+ blen = blen / 2;
+ for (;;) {
+ if (pos == alen && pos == blen) {
+ return true;
+ }
+ if (pos == alen && b[pos] == 0) {
+ return true;
+ }
+ if (pos == blen && a[pos] == 0) {
+ return true;
+ }
+ if (pos == alen || pos == blen) {
+ return false;
+ }
+ if (a[pos] == 0 && b[pos] == 0) {
+ return true;
+ }
+
+ if (wildcards_in_a && a[pos] == '#') {
+ if (!isxdigit(b[pos])) {
+ return false;
+ }
+ } else {
+ if (a[pos] != b[pos]) {
+ return false;
+ }
+ }
+ pos++;
+ }
+}
+
+gboolean uefi_str_equal(const uint16_t *a, size_t alen,
+ const uint16_t *b, size_t blen)
+{
+ return uefi_str_equal_ex(a, alen, b, blen, false);
+}
+
+char *uefi_ucs2_to_ascii(const uint16_t *ucs2, uint64_t ucs2_size)
+{
+ char *str = g_malloc0(ucs2_size / 2 + 1);
+ int i;
+
+ for (i = 0; i * 2 < ucs2_size; i++) {
+ if (ucs2[i] == 0) {
+ break;
+ }
+ if (ucs2[i] < 128) {
+ str[i] = ucs2[i];
+ } else {
+ str[i] = '?';
+ }
+ }
+ str[i] = 0;
+ return str;
+}
+
+/* ------------------------------------------------------------------ */
+/* time helper functions */
+
+int uefi_time_compare(efi_time *a, efi_time *b)
+{
+ if (a->year < b->year) {
+ return -1;
+ }
+ if (a->year > b->year) {
+ return 1;
+ }
+
+ if (a->month < b->month) {
+ return -1;
+ }
+ if (a->month > b->month) {
+ return 1;
+ }
+
+ if (a->day < b->day) {
+ return -1;
+ }
+ if (a->day > b->day) {
+ return 1;
+ }
+
+ if (a->hour < b->hour) {
+ return -1;
+ }
+ if (a->hour > b->hour) {
+ return 1;
+ }
+
+ if (a->minute < b->minute) {
+ return -1;
+ }
+ if (a->minute > b->minute) {
+ return 1;
+ }
+
+ if (a->second < b->second) {
+ return -1;
+ }
+ if (a->second > b->second) {
+ return 1;
+ }
+
+ if (a->nanosecond < b->nanosecond) {
+ return -1;
+ }
+ if (a->nanosecond > b->nanosecond) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------ */
+/* tracing helper functions */
+
+void uefi_trace_variable(const char *action, QemuUUID guid,
+ const uint16_t *name, uint64_t name_size)
+{
+ QemuUUID be = qemu_uuid_bswap(guid);
+ char *str_uuid = qemu_uuid_unparse_strdup(&be);
+ char *str_name = uefi_ucs2_to_ascii(name, name_size);
+
+ trace_uefi_variable(action, str_name, name_size, str_uuid);
+
+ g_free(str_name);
+ g_free(str_uuid);
+}
+
+void uefi_trace_status(const char *action, efi_status status)
+{
+ switch (status) {
+ case EFI_SUCCESS:
+ trace_uefi_status(action, "success");
+ break;
+ case EFI_INVALID_PARAMETER:
+ trace_uefi_status(action, "invalid parameter");
+ break;
+ case EFI_UNSUPPORTED:
+ trace_uefi_status(action, "unsupported");
+ break;
+ case EFI_BAD_BUFFER_SIZE:
+ trace_uefi_status(action, "bad buffer size");
+ break;
+ case EFI_BUFFER_TOO_SMALL:
+ trace_uefi_status(action, "buffer too small");
+ break;
+ case EFI_WRITE_PROTECTED:
+ trace_uefi_status(action, "write protected");
+ break;
+ case EFI_OUT_OF_RESOURCES:
+ trace_uefi_status(action, "out of resources");
+ break;
+ case EFI_NOT_FOUND:
+ trace_uefi_status(action, "not found");
+ break;
+ case EFI_ACCESS_DENIED:
+ trace_uefi_status(action, "access denied");
+ break;
+ case EFI_ALREADY_STARTED:
+ trace_uefi_status(action, "already started");
+ break;
+ case EFI_SECURITY_VIOLATION:
+ trace_uefi_status(action, "security violation");
+ break;
+ default:
+ trace_uefi_status(action, "unknown error");
+ break;
+ }
+}
diff --git a/hw/uefi/var-service-vars.c b/hw/uefi/var-service-vars.c
new file mode 100644
index 0000000..7f98d77
--- /dev/null
+++ b/hw/uefi/var-service-vars.c
@@ -0,0 +1,725 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * uefi vars device - EfiSmmVariableProtocol implementation
+ */
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "system/dma.h"
+#include "migration/vmstate.h"
+
+#include "hw/uefi/var-service.h"
+#include "hw/uefi/var-service-api.h"
+#include "hw/uefi/var-service-edk2.h"
+
+#include "trace/trace-hw_uefi.h"
+
+#define EFI_VARIABLE_ATTRIBUTE_SUPPORTED \
+ (EFI_VARIABLE_NON_VOLATILE | \
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+ EFI_VARIABLE_RUNTIME_ACCESS | \
+ EFI_VARIABLE_HARDWARE_ERROR_RECORD | \
+ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS | \
+ EFI_VARIABLE_APPEND_WRITE)
+
+
+const VMStateDescription vmstate_uefi_time = {
+ .name = "uefi-time",
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT16(year, efi_time),
+ VMSTATE_UINT8(month, efi_time),
+ VMSTATE_UINT8(day, efi_time),
+ VMSTATE_UINT8(hour, efi_time),
+ VMSTATE_UINT8(minute, efi_time),
+ VMSTATE_UINT8(second, efi_time),
+ VMSTATE_UINT32(nanosecond, efi_time),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+const VMStateDescription vmstate_uefi_variable = {
+ .name = "uefi-variable",
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY_V(guid.data, uefi_variable, sizeof(QemuUUID), 0),
+ VMSTATE_UINT32(name_size, uefi_variable),
+ VMSTATE_UINT32(data_size, uefi_variable),
+ VMSTATE_UINT32(attributes, uefi_variable),
+ VMSTATE_VBUFFER_ALLOC_UINT32(name, uefi_variable, 0, NULL, name_size),
+ VMSTATE_VBUFFER_ALLOC_UINT32(data, uefi_variable, 0, NULL, data_size),
+ VMSTATE_STRUCT(time, uefi_variable, 0, vmstate_uefi_time, efi_time),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+uefi_variable *uefi_vars_find_variable(uefi_vars_state *uv, QemuUUID guid,
+ const uint16_t *name, uint64_t name_size)
+{
+ uefi_variable *var;
+
+ QTAILQ_FOREACH(var, &uv->variables, next) {
+ if (!uefi_str_equal(var->name, var->name_size,
+ name, name_size)) {
+ continue;
+ }
+ if (!qemu_uuid_is_equal(&var->guid, &guid)) {
+ continue;
+ }
+ if (!var->data_size) {
+ /* in process of being created/updated */
+ continue;
+ }
+ return var;
+ }
+ return NULL;
+}
+
+static uefi_variable *add_variable(uefi_vars_state *uv, QemuUUID guid,
+ const uint16_t *name, uint64_t name_size,
+ uint32_t attributes)
+{
+ uefi_variable *var;
+
+ var = g_new0(uefi_variable, 1);
+ var->guid = guid;
+ var->name = g_malloc(name_size);
+ memcpy(var->name, name, name_size);
+ var->name_size = name_size;
+ var->attributes = attributes;
+
+ var->attributes &= ~EFI_VARIABLE_APPEND_WRITE;
+
+ QTAILQ_INSERT_TAIL(&uv->variables, var, next);
+ return var;
+}
+
+static void del_variable(uefi_vars_state *uv, uefi_variable *var)
+{
+ if (!var) {
+ return;
+ }
+
+ QTAILQ_REMOVE(&uv->variables, var, next);
+ g_free(var->data);
+ g_free(var->name);
+ g_free(var->digest);
+ g_free(var);
+}
+
+static size_t variable_size(uefi_variable *var)
+{
+ size_t size;
+
+ size = sizeof(*var);
+ size += var->name_size;
+ size += var->data_size;
+ size += var->digest_size;
+ return size;
+}
+
+void uefi_vars_set_variable(uefi_vars_state *uv, QemuUUID guid,
+ const uint16_t *name, uint64_t name_size,
+ uint32_t attributes,
+ void *data, uint64_t data_size)
+{
+ uefi_variable *old_var, *new_var;
+
+ uefi_trace_variable(__func__, guid, name, name_size);
+
+ old_var = uefi_vars_find_variable(uv, guid, name, name_size);
+ if (old_var) {
+ uv->used_storage -= variable_size(old_var);
+ del_variable(uv, old_var);
+ }
+
+ new_var = add_variable(uv, guid, name, name_size, attributes);
+ new_var->data = g_malloc(data_size);
+ new_var->data_size = data_size;
+ memcpy(new_var->data, data, data_size);
+ uv->used_storage += variable_size(new_var);
+}
+
+void uefi_vars_clear_volatile(uefi_vars_state *uv)
+{
+ uefi_variable *var, *n;
+
+ QTAILQ_FOREACH_SAFE(var, &uv->variables, next, n) {
+ if (var->attributes & EFI_VARIABLE_NON_VOLATILE) {
+ continue;
+ }
+ uv->used_storage -= variable_size(var);
+ del_variable(uv, var);
+ }
+}
+
+void uefi_vars_clear_all(uefi_vars_state *uv)
+{
+ uefi_variable *var, *n;
+
+ QTAILQ_FOREACH_SAFE(var, &uv->variables, next, n) {
+ del_variable(uv, var);
+ }
+ uv->used_storage = 0;
+}
+
+void uefi_vars_update_storage(uefi_vars_state *uv)
+{
+ uefi_variable *var;
+
+ uv->used_storage = 0;
+ QTAILQ_FOREACH(var, &uv->variables, next) {
+ uv->used_storage += variable_size(var);
+ }
+}
+
+static gboolean check_access(uefi_vars_state *uv, uefi_variable *var)
+{
+ if (!uv->exit_boot_service) {
+ if (!(var->attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS)) {
+ return false;
+ }
+ } else {
+ if (!(var->attributes & EFI_VARIABLE_RUNTIME_ACCESS)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static efi_status check_update(uefi_vars_state *uv, uefi_variable *old_var,
+ uefi_variable *new_var)
+{
+ efi_status status;
+
+ if (old_var) {
+ if (!check_access(uv, old_var)) {
+ return EFI_ACCESS_DENIED;
+ }
+ }
+
+ if (new_var) {
+ if (new_var->attributes & ~EFI_VARIABLE_ATTRIBUTE_SUPPORTED) {
+ return EFI_UNSUPPORTED;
+ }
+ if (!check_access(uv, new_var)) {
+ return EFI_ACCESS_DENIED;
+ }
+ }
+
+ if (old_var && new_var) {
+ if (old_var->attributes != new_var->attributes) {
+ return EFI_INVALID_PARAMETER;
+ }
+ }
+
+ if (new_var) {
+ /* create + update */
+ status = uefi_vars_policy_check(uv, new_var, old_var == NULL);
+ } else {
+ /* delete */
+ g_assert(old_var);
+ status = uefi_vars_policy_check(uv, old_var, false);
+ }
+ if (status != EFI_SUCCESS) {
+ return status;
+ }
+
+ status = uefi_vars_check_secure_boot(uv, new_var ?: old_var);
+ if (status != EFI_SUCCESS) {
+ return status;
+ }
+
+ return EFI_SUCCESS;
+}
+
+static void append_write(uefi_variable *old_var,
+ uefi_variable *new_var)
+{
+ uefi_vars_siglist siglist;
+ uint64_t size;
+ void *data;
+
+ uefi_vars_siglist_init(&siglist);
+ uefi_vars_siglist_parse(&siglist, old_var->data, old_var->data_size);
+ uefi_vars_siglist_parse(&siglist, new_var->data, new_var->data_size);
+
+ size = uefi_vars_siglist_blob_size(&siglist);
+ data = g_malloc(size);
+ uefi_vars_siglist_blob_generate(&siglist, data, size);
+
+ g_free(new_var->data);
+ new_var->data = data;
+ new_var->data_size = size;
+
+ uefi_vars_siglist_free(&siglist);
+}
+
+static size_t uefi_vars_mm_error(mm_header *mhdr, mm_variable *mvar,
+ uint64_t status)
+{
+ mvar->status = status;
+ return sizeof(*mvar);
+}
+
+static size_t uefi_vars_mm_get_variable(uefi_vars_state *uv, mm_header *mhdr,
+ mm_variable *mvar, void *func)
+{
+ mm_variable_access *va = func;
+ uint16_t *name;
+ void *data;
+ uefi_variable *var;
+ uint64_t length;
+
+ length = sizeof(*mvar) + sizeof(*va);
+ if (mhdr->length < length) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+
+ if (va->name_size > uv->max_storage ||
+ va->data_size > uv->max_storage) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_OUT_OF_RESOURCES);
+ }
+
+ name = func + sizeof(*va);
+ if (uadd64_overflow(length, va->name_size, &length)) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+ if (mhdr->length < length) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+
+ if (!uefi_str_is_valid(name, va->name_size, true)) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_INVALID_PARAMETER);
+ }
+
+ uefi_trace_variable(__func__, va->guid, name, va->name_size);
+
+ var = uefi_vars_find_variable(uv, va->guid, name, va->name_size);
+ if (!var) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_NOT_FOUND);
+ }
+
+ /* check permissions etc. */
+ if (!check_access(uv, var)) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_ACCESS_DENIED);
+ }
+
+ data = func + sizeof(*va) + va->name_size;
+ if (uadd64_overflow(length, va->data_size, &length)) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+ if (uv->buf_size < length) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+
+ va->attributes = var->attributes;
+ if (va->data_size < var->data_size) {
+ va->data_size = var->data_size;
+ length -= va->data_size;
+ mvar->status = EFI_BUFFER_TOO_SMALL;
+ } else {
+ va->data_size = var->data_size;
+ memcpy(data, var->data, var->data_size);
+ mvar->status = EFI_SUCCESS;
+ }
+ return length;
+}
+
+static size_t
+uefi_vars_mm_get_next_variable(uefi_vars_state *uv, mm_header *mhdr,
+ mm_variable *mvar, void *func)
+{
+ mm_next_variable *nv = func;
+ uefi_variable *var;
+ uint16_t *name;
+ uint64_t length;
+
+ length = sizeof(*mvar) + sizeof(*nv);
+ if (mhdr->length < length) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+
+ if (nv->name_size > uv->max_storage) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_OUT_OF_RESOURCES);
+ }
+
+ name = func + sizeof(*nv);
+ if (uadd64_overflow(length, nv->name_size, &length)) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+ if (mhdr->length < length) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+
+ if (!uefi_str_is_valid(name, nv->name_size, true)) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_INVALID_PARAMETER);
+ }
+
+ if (uefi_strlen(name, nv->name_size) == 0) {
+ /* empty string -> first */
+ var = QTAILQ_FIRST(&uv->variables);
+ if (!var) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_NOT_FOUND);
+ }
+ } else {
+ var = uefi_vars_find_variable(uv, nv->guid, name, nv->name_size);
+ if (!var) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_INVALID_PARAMETER);
+ }
+ do {
+ var = QTAILQ_NEXT(var, next);
+ } while (var && !check_access(uv, var));
+ if (!var) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_NOT_FOUND);
+ }
+ }
+
+ length = sizeof(*mvar) + sizeof(*nv) + var->name_size;
+ if (uv->buf_size < length) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+
+ nv->guid = var->guid;
+ nv->name_size = var->name_size;
+ memcpy(name, var->name, var->name_size);
+ mvar->status = EFI_SUCCESS;
+ return length;
+}
+
+static bool uefi_vars_mm_digest_compare(uefi_variable *old_var,
+ uefi_variable *new_var)
+{
+ if (!old_var->digest ||
+ !new_var->digest ||
+ !old_var->digest_size ||
+ !new_var->digest_size) {
+ /* should not happen */
+ trace_uefi_vars_security_violation("inconsistent authvar digest state");
+ return false;
+ }
+ if (old_var->digest_size != new_var->digest_size) {
+ trace_uefi_vars_security_violation("authvar digest size mismatch");
+ return false;
+ }
+ if (memcmp(old_var->digest, new_var->digest,
+ old_var->digest_size) != 0) {
+ trace_uefi_vars_security_violation("authvar digest data mismatch");
+ return false;
+ }
+ return true;
+}
+
+static size_t uefi_vars_mm_set_variable(uefi_vars_state *uv, mm_header *mhdr,
+ mm_variable *mvar, void *func)
+{
+ mm_variable_access *va = func;
+ uint32_t attributes = 0;
+ uint16_t *name;
+ void *data;
+ uefi_variable *old_var, *new_var;
+ uint64_t length;
+ size_t new_storage;
+ efi_status status;
+
+ length = sizeof(*mvar) + sizeof(*va);
+ if (mhdr->length < length) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+
+ if (va->name_size > uv->max_storage ||
+ va->data_size > uv->max_storage) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_OUT_OF_RESOURCES);
+ }
+
+ name = func + sizeof(*va);
+ if (uadd64_overflow(length, va->name_size, &length)) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+ if (mhdr->length < length) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+
+ data = func + sizeof(*va) + va->name_size;
+ if (uadd64_overflow(length, va->data_size, &length)) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+ if (mhdr->length < length) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+
+ g_assert(va->name_size < G_MAXUINT32);
+ g_assert(va->data_size < G_MAXUINT32);
+
+ if (!uefi_str_is_valid(name, va->name_size, true)) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_INVALID_PARAMETER);
+ }
+
+ uefi_trace_variable(__func__, va->guid, name, va->name_size);
+
+ old_var = uefi_vars_find_variable(uv, va->guid, name, va->name_size);
+ if (va->data_size) {
+ new_var = add_variable(uv, va->guid, name, va->name_size,
+ va->attributes);
+ if (va->attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS) {
+ /* not implemented (deprecated in uefi spec) */
+ warn_report("%s: AUTHENTICATED_WRITE_ACCESS", __func__);
+ mvar->status = EFI_UNSUPPORTED;
+ goto rollback;
+ } else if (va->attributes &
+ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS) {
+ status = uefi_vars_check_auth_2(uv, new_var, va, data);
+ if (status != EFI_SUCCESS) {
+ mvar->status = status;
+ goto rollback;
+ }
+ if (old_var && new_var) {
+ if (uefi_time_compare(&old_var->time, &new_var->time) > 0) {
+ trace_uefi_vars_security_violation("time check failed");
+ mvar->status = EFI_SECURITY_VIOLATION;
+ goto rollback;
+ }
+ if (old_var->digest_size || new_var->digest_size) {
+ if (!uefi_vars_mm_digest_compare(old_var, new_var)) {
+ mvar->status = EFI_SECURITY_VIOLATION;
+ goto rollback;
+ }
+ }
+ }
+ } else {
+ new_var->data = g_malloc(va->data_size);
+ memcpy(new_var->data, data, va->data_size);
+ new_var->data_size = va->data_size;
+ }
+ if (!new_var->data) {
+ /* we land here when deleting authenticated variables */
+ del_variable(uv, new_var);
+ new_var = NULL;
+ }
+ } else {
+ new_var = NULL;
+ }
+
+ if (!old_var && !new_var) {
+ /* delete non-existing variable -> nothing to do */
+ mvar->status = EFI_SUCCESS;
+ return sizeof(*mvar);
+ }
+
+ /* check permissions etc. */
+ status = check_update(uv, old_var, new_var);
+ if (status != EFI_SUCCESS) {
+ mvar->status = status;
+ goto rollback;
+ }
+
+ if (va->attributes & EFI_VARIABLE_APPEND_WRITE && old_var && new_var) {
+ /* merge signature databases */
+ if (!uefi_vars_is_sb_any(new_var)) {
+ mvar->status = EFI_UNSUPPORTED;
+ goto rollback;
+ }
+ append_write(old_var, new_var);
+ }
+
+ /* check storage space */
+ new_storage = uv->used_storage;
+ if (old_var) {
+ new_storage -= variable_size(old_var);
+ }
+ if (new_var) {
+ new_storage += variable_size(new_var);
+ }
+ if (new_storage > uv->max_storage) {
+ mvar->status = EFI_OUT_OF_RESOURCES;
+ goto rollback;
+ }
+
+ attributes = new_var
+ ? new_var->attributes
+ : old_var->attributes;
+
+ /* all good, commit */
+ del_variable(uv, old_var);
+ uv->used_storage = new_storage;
+
+ if (attributes & EFI_VARIABLE_NON_VOLATILE) {
+ uefi_vars_json_save(uv);
+ }
+
+ if (new_var && uefi_vars_is_sb_pk(new_var)) {
+ uefi_vars_auth_init(uv);
+ }
+
+ mvar->status = EFI_SUCCESS;
+ return sizeof(*mvar);
+
+rollback:
+ del_variable(uv, new_var);
+ return sizeof(*mvar);
+}
+
+static size_t uefi_vars_mm_variable_info(uefi_vars_state *uv, mm_header *mhdr,
+ mm_variable *mvar, void *func)
+{
+ mm_variable_info *vi = func;
+ uint64_t length;
+
+ length = sizeof(*mvar) + sizeof(*vi);
+ if (uv->buf_size < length) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+
+ vi->max_storage_size = uv->max_storage;
+ vi->free_storage_size = uv->max_storage - uv->used_storage;
+ vi->max_variable_size = uv->max_storage >> 2;
+ vi->attributes = 0;
+
+ mvar->status = EFI_SUCCESS;
+ return length;
+}
+
+static size_t
+uefi_vars_mm_get_payload_size(uefi_vars_state *uv, mm_header *mhdr,
+ mm_variable *mvar, void *func)
+{
+ mm_get_payload_size *ps = func;
+ uint64_t length;
+
+ length = sizeof(*mvar) + sizeof(*ps);
+ if (uv->buf_size < length) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+
+ ps->payload_size = uv->buf_size;
+ mvar->status = EFI_SUCCESS;
+ return length;
+}
+
+static size_t
+uefi_vars_mm_lock_variable(uefi_vars_state *uv, mm_header *mhdr,
+ mm_variable *mvar, void *func)
+{
+ mm_lock_variable *lv = func;
+ variable_policy_entry *pe;
+ uint16_t *name, *dest;
+ uint64_t length;
+
+ length = sizeof(*mvar) + sizeof(*lv);
+ if (mhdr->length < length) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+
+ name = func + sizeof(*lv);
+ if (uadd64_overflow(length, lv->name_size, &length)) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+ if (mhdr->length < length) {
+ return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE);
+ }
+
+ uefi_trace_variable(__func__, lv->guid, name, lv->name_size);
+
+ pe = g_malloc0(sizeof(*pe) + lv->name_size);
+ pe->version = VARIABLE_POLICY_ENTRY_REVISION;
+ pe->size = sizeof(*pe) + lv->name_size;
+ pe->offset_to_name = sizeof(*pe);
+ pe->namespace = lv->guid;
+ pe->min_size = 0;
+ pe->max_size = UINT32_MAX;
+ pe->attributes_must_have = 0;
+ pe->attributes_cant_have = 0;
+ pe->lock_policy_type = VARIABLE_POLICY_TYPE_LOCK_NOW;
+
+ dest = (void *)pe + pe->offset_to_name;
+ memcpy(dest, name, lv->name_size);
+
+ uefi_vars_add_policy(uv, pe);
+ g_free(pe);
+
+ mvar->status = EFI_SUCCESS;
+ return length;
+}
+
+uint32_t uefi_vars_mm_vars_proto(uefi_vars_state *uv)
+{
+ static const char *fnames[] = {
+ "zero",
+ "get-variable",
+ "get-next-variable-name",
+ "set-variable",
+ "query-variable-info",
+ "ready-to-boot",
+ "exit-boot-service",
+ "get-statistics",
+ "lock-variable",
+ "var-check-prop-set",
+ "var-check-prop-get",
+ "get-payload-size",
+ "init-runtime-cache-contect",
+ "sync-runtime-cache",
+ "get-runtime-cache-info",
+ };
+ const char *fname;
+ uint64_t length;
+
+ mm_header *mhdr = (mm_header *) uv->buffer;
+ mm_variable *mvar = (mm_variable *) (uv->buffer + sizeof(*mhdr));
+ void *func = (uv->buffer + sizeof(*mhdr) + sizeof(*mvar));
+
+ if (mhdr->length < sizeof(*mvar)) {
+ return UEFI_VARS_STS_ERR_BAD_BUFFER_SIZE;
+ }
+
+ fname = mvar->function < ARRAY_SIZE(fnames)
+ ? fnames[mvar->function]
+ : "unknown";
+ trace_uefi_vars_proto_cmd(fname);
+
+ switch (mvar->function) {
+ case SMM_VARIABLE_FUNCTION_GET_VARIABLE:
+ length = uefi_vars_mm_get_variable(uv, mhdr, mvar, func);
+ break;
+
+ case SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME:
+ length = uefi_vars_mm_get_next_variable(uv, mhdr, mvar, func);
+ break;
+
+ case SMM_VARIABLE_FUNCTION_SET_VARIABLE:
+ length = uefi_vars_mm_set_variable(uv, mhdr, mvar, func);
+ break;
+
+ case SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO:
+ length = uefi_vars_mm_variable_info(uv, mhdr, mvar, func);
+ break;
+
+ case SMM_VARIABLE_FUNCTION_LOCK_VARIABLE:
+ length = uefi_vars_mm_lock_variable(uv, mhdr, mvar, func);
+ break;
+
+ case SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE:
+ length = uefi_vars_mm_get_payload_size(uv, mhdr, mvar, func);
+ break;
+
+ case SMM_VARIABLE_FUNCTION_READY_TO_BOOT:
+ trace_uefi_event("ready-to-boot");
+ uv->ready_to_boot = true;
+ length = 0;
+ break;
+
+ case SMM_VARIABLE_FUNCTION_EXIT_BOOT_SERVICE:
+ trace_uefi_event("exit-boot-service");
+ uv->exit_boot_service = true;
+ length = 0;
+ break;
+
+ default:
+ length = uefi_vars_mm_error(mhdr, mvar, EFI_UNSUPPORTED);
+ break;
+ }
+
+ if (mhdr->length < length) {
+ mvar->status = EFI_BUFFER_TOO_SMALL;
+ }
+
+ uefi_trace_status(__func__, mvar->status);
+ return UEFI_VARS_STS_SUCCESS;
+}
diff --git a/hw/ufs/lu.c b/hw/ufs/lu.c
index 81bfff9..2d8ffd7 100644
--- a/hw/ufs/lu.c
+++ b/hw/ufs/lu.c
@@ -14,7 +14,7 @@
#include "qemu/memalign.h"
#include "hw/scsi/scsi.h"
#include "scsi/constants.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qemu/cutils.h"
#include "trace.h"
#include "ufs.h"
@@ -194,7 +194,7 @@ static int ufs_emulate_wlun_inquiry(UfsRequest *req, uint8_t *outbuf,
static UfsReqResult ufs_emulate_scsi_cmd(UfsLu *lu, UfsRequest *req)
{
uint8_t lun = lu->lun;
- uint8_t outbuf[4096];
+ QEMU_UNINITIALIZED uint8_t outbuf[4096];
uint8_t sense_buf[UFS_SENSE_SIZE];
uint8_t scsi_status;
int len = 0;
@@ -274,10 +274,9 @@ static UfsReqResult ufs_process_scsi_cmd(UfsLu *lu, UfsRequest *req)
return UFS_REQUEST_NO_COMPLETE;
}
-static Property ufs_lu_props[] = {
+static const Property ufs_lu_props[] = {
DEFINE_PROP_DRIVE("drive", UfsLu, conf.blk),
DEFINE_PROP_UINT8("lun", UfsLu, lun, 0),
- DEFINE_PROP_END_OF_LIST(),
};
static bool ufs_add_lu(UfsHc *u, UfsLu *lu, Error **errp)
@@ -420,7 +419,7 @@ static void ufs_lu_unrealize(DeviceState *dev)
}
}
-static void ufs_lu_class_init(ObjectClass *oc, void *data)
+static void ufs_lu_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/ufs/ufs.c b/hw/ufs/ufs.c
index 945a0ea..0577747 100644
--- a/hw/ufs/ufs.c
+++ b/hw/ufs/ufs.c
@@ -25,6 +25,7 @@
#include "qapi/error.h"
#include "migration/vmstate.h"
#include "scsi/constants.h"
+#include "hw/irq.h"
#include "trace.h"
#include "ufs.h"
@@ -34,6 +35,11 @@
#define UFS_MAX_NUTMRS 8
#define UFS_MCQ_QCFGPTR 2
+/* Each value represents the temperature in celsius as (value - 80) */
+#define UFS_TEMPERATURE 120
+#define UFS_TOO_HIGH_TEMP_BOUNDARY 160
+#define UFS_TOO_LOW_TEMP_BOUNDARY 60
+
static void ufs_exec_req(UfsRequest *req);
static void ufs_clear_req(UfsRequest *req);
@@ -838,6 +844,42 @@ static const MemoryRegionOps ufs_mmio_ops = {
},
};
+static void ufs_update_ee_status(UfsHc *u)
+{
+ uint16_t ee_status = be16_to_cpu(u->attributes.exception_event_status);
+ uint8_t high_temp_thresh = u->attributes.device_too_high_temp_boundary;
+ uint8_t low_temp_thresh = u->attributes.device_too_low_temp_boundary;
+
+ if (u->temperature >= high_temp_thresh) {
+ ee_status |= MASK_EE_TOO_HIGH_TEMP;
+ } else {
+ ee_status &= ~MASK_EE_TOO_HIGH_TEMP;
+ }
+
+ if (u->temperature <= low_temp_thresh) {
+ ee_status |= MASK_EE_TOO_LOW_TEMP;
+ } else {
+ ee_status &= ~MASK_EE_TOO_LOW_TEMP;
+ }
+
+ u->attributes.exception_event_status = cpu_to_be16(ee_status);
+}
+
+static bool ufs_check_exception_event_alert(UfsHc *u, uint8_t trans_type)
+{
+ uint16_t ee_control = be16_to_cpu(u->attributes.exception_event_control);
+ uint16_t ee_status;
+
+ if (trans_type != UFS_UPIU_TRANSACTION_RESPONSE) {
+ return false;
+ }
+
+ ufs_update_ee_status(u);
+
+ ee_status = be16_to_cpu(u->attributes.exception_event_status);
+
+ return ee_control & ee_status;
+}
void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags,
uint8_t response, uint8_t scsi_status,
@@ -848,9 +890,19 @@ void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags,
req->rsp_upiu.header.flags = flags;
req->rsp_upiu.header.response = response;
req->rsp_upiu.header.scsi_status = scsi_status;
+ req->rsp_upiu.header.device_inf =
+ ufs_check_exception_event_alert(req->hc, trans_type);
req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length);
}
+void ufs_build_query_response(UfsRequest *req)
+{
+ req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode;
+ req->rsp_upiu.qr.idn = req->req_upiu.qr.idn;
+ req->rsp_upiu.qr.index = req->req_upiu.qr.index;
+ req->rsp_upiu.qr.selector = req->req_upiu.qr.selector;
+}
+
static UfsReqResult ufs_exec_scsi_cmd(UfsRequest *req)
{
UfsHc *u = req->hc;
@@ -1034,6 +1086,25 @@ static QueryRespCode ufs_exec_query_flag(UfsRequest *req, int op)
return UFS_QUERY_RESULT_SUCCESS;
}
+static inline uint8_t ufs_read_device_temp(UfsHc *u)
+{
+ uint8_t feat_sup = u->device_desc.ufs_features_support;
+ bool high_temp_sup, low_temp_sup, high_temp_en, low_temp_en;
+ uint16_t ee_control = be16_to_cpu(u->attributes.exception_event_control);
+
+ high_temp_sup = feat_sup & UFS_DEV_HIGH_TEMP_NOTIF;
+ low_temp_sup = feat_sup & UFS_DEV_LOW_TEMP_NOTIF;
+ high_temp_en = ee_control & MASK_EE_TOO_HIGH_TEMP;
+ low_temp_en = ee_control & MASK_EE_TOO_LOW_TEMP;
+
+ if ((high_temp_sup && high_temp_en) ||
+ (low_temp_sup && low_temp_en)) {
+ return u->temperature;
+ }
+
+ return 0;
+}
+
static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn)
{
switch (idn) {
@@ -1064,6 +1135,7 @@ static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn)
case UFS_QUERY_ATTR_IDN_EE_CONTROL:
return be16_to_cpu(u->attributes.exception_event_control);
case UFS_QUERY_ATTR_IDN_EE_STATUS:
+ ufs_update_ee_status(u);
return be16_to_cpu(u->attributes.exception_event_status);
case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
return be32_to_cpu(u->attributes.seconds_passed);
@@ -1078,7 +1150,8 @@ static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn)
case UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME:
return u->attributes.ref_clk_gating_wait_time;
case UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP:
- return u->attributes.device_case_rough_temperaure;
+ u->attributes.device_case_rough_temperature = ufs_read_device_temp(u);
+ return u->attributes.device_case_rough_temperature;
case UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND:
return u->attributes.device_too_high_temp_boundary;
case UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND:
@@ -1103,10 +1176,13 @@ static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn)
return 0;
}
-static void ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value)
+static QueryRespCode ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value)
{
switch (idn) {
case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
+ if (value > UFS_QUERY_ATTR_ACTIVE_ICC_MAXVALUE) {
+ return UFS_QUERY_RESULT_INVALID_VALUE;
+ }
u->attributes.active_icc_level = value;
break;
case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
@@ -1134,6 +1210,7 @@ static void ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value)
u->attributes.psa_data_size = cpu_to_be32(value);
break;
}
+ return UFS_QUERY_RESULT_SUCCESS;
}
static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op)
@@ -1150,13 +1227,13 @@ static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op)
if (op == UFS_QUERY_ATTR_READ) {
value = ufs_read_attr_value(u, idn);
+ ret = UFS_QUERY_RESULT_SUCCESS;
} else {
value = be32_to_cpu(req->req_upiu.qr.value);
- ufs_write_attr_value(u, idn, value);
+ ret = ufs_write_attr_value(u, idn, value);
}
-
req->rsp_upiu.qr.value = cpu_to_be32(value);
- return UFS_QUERY_RESULT_SUCCESS;
+ return ret;
}
static const RpmbUnitDescriptor rpmb_unit_desc = {
@@ -1279,9 +1356,12 @@ static QueryRespCode ufs_read_desc(UfsRequest *req)
UfsHc *u = req->hc;
QueryRespCode status;
uint8_t idn = req->req_upiu.qr.idn;
+ uint8_t selector = req->req_upiu.qr.selector;
uint16_t length = be16_to_cpu(req->req_upiu.qr.length);
InterconnectDescriptor desc;
-
+ if (selector != 0) {
+ return UFS_QUERY_RESULT_INVALID_SELECTOR;
+ }
switch (idn) {
case UFS_QUERY_DESC_IDN_DEVICE:
memcpy(&req->rsp_upiu.qr.data, &u->device_desc, sizeof(u->device_desc));
@@ -1327,10 +1407,6 @@ static QueryRespCode ufs_read_desc(UfsRequest *req)
if (length > req->rsp_upiu.qr.data[0]) {
length = req->rsp_upiu.qr.data[0];
}
- req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode;
- req->rsp_upiu.qr.idn = req->req_upiu.qr.idn;
- req->rsp_upiu.qr.index = req->req_upiu.qr.index;
- req->rsp_upiu.qr.selector = req->req_upiu.qr.selector;
req->rsp_upiu.qr.length = cpu_to_be16(length);
return status;
@@ -1411,6 +1487,7 @@ static UfsReqResult ufs_exec_query_cmd(UfsRequest *req)
data_segment_length = be16_to_cpu(req->rsp_upiu.qr.length);
ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_QUERY_RSP, 0, status, 0,
data_segment_length);
+ ufs_build_query_response(req);
if (status != UFS_QUERY_RESULT_SUCCESS) {
return UFS_REQUEST_FAIL;
@@ -1623,7 +1700,7 @@ static void ufs_init_hc(UfsHc *u)
cap = FIELD_DP32(cap, CAP, OODDS, 0);
cap = FIELD_DP32(cap, CAP, UICDMETMS, 0);
cap = FIELD_DP32(cap, CAP, CS, 0);
- cap = FIELD_DP32(cap, CAP, LSDBS, 1);
+ cap = FIELD_DP32(cap, CAP, LSDBS, 0);
cap = FIELD_DP32(cap, CAP, MCQS, u->params.mcq);
u->reg.cap = cap;
@@ -1665,15 +1742,19 @@ static void ufs_init_hc(UfsHc *u)
u->device_desc.ud_0_base_offset = 0x16;
u->device_desc.ud_config_p_length = 0x1A;
u->device_desc.device_rtt_cap = 0x02;
+ u->device_desc.ufs_features_support = UFS_DEV_HIGH_TEMP_NOTIF |
+ UFS_DEV_LOW_TEMP_NOTIF;
u->device_desc.queue_depth = u->params.nutrs;
u->device_desc.product_revision_level = 0x04;
+ u->device_desc.extended_ufs_features_support =
+ cpu_to_be32(UFS_DEV_HIGH_TEMP_NOTIF | UFS_DEV_LOW_TEMP_NOTIF);
memset(&u->geometry_desc, 0, sizeof(GeometryDescriptor));
u->geometry_desc.length = sizeof(GeometryDescriptor);
u->geometry_desc.descriptor_idn = UFS_QUERY_DESC_IDN_GEOMETRY;
u->geometry_desc.max_number_lu = (UFS_MAX_LUS == 32) ? 0x1 : 0x0;
- u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4KB */
- u->geometry_desc.allocation_unit_size = 0x1; /* 4KB */
+ u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4MB: 8192 * 512B */
+ u->geometry_desc.allocation_unit_size = 0x1; /* 4MB: 1 segment */
u->geometry_desc.min_addr_block_size = 0x8; /* 4KB */
u->geometry_desc.max_in_buffer_size = 0x8;
u->geometry_desc.max_out_buffer_size = 0x8;
@@ -1690,9 +1771,17 @@ static void ufs_init_hc(UfsHc *u)
/* configure descriptor is not supported */
u->attributes.config_descr_lock = 0x01;
u->attributes.max_num_of_rtt = 0x02;
+ u->attributes.device_too_high_temp_boundary = UFS_TOO_HIGH_TEMP_BOUNDARY;
+ u->attributes.device_too_low_temp_boundary = UFS_TOO_LOW_TEMP_BOUNDARY;
memset(&u->flags, 0, sizeof(u->flags));
u->flags.permanently_disable_fw_update = 1;
+
+ /*
+ * The temperature value is fixed to UFS_TEMPERATURE and does not change
+ * dynamically
+ */
+ u->temperature = UFS_TEMPERATURE;
}
static void ufs_realize(PCIDevice *pci_dev, Error **errp)
@@ -1720,6 +1809,8 @@ static void ufs_exit(PCIDevice *pci_dev)
{
UfsHc *u = UFS(pci_dev);
+ qemu_free_irq(u->irq);
+
qemu_bh_delete(u->doorbell_bh);
qemu_bh_delete(u->complete_bh);
@@ -1740,13 +1831,12 @@ static void ufs_exit(PCIDevice *pci_dev)
}
}
-static Property ufs_props[] = {
+static const Property ufs_props[] = {
DEFINE_PROP_STRING("serial", UfsHc, params.serial),
DEFINE_PROP_UINT8("nutrs", UfsHc, params.nutrs, 32),
DEFINE_PROP_UINT8("nutmrs", UfsHc, params.nutmrs, 8),
DEFINE_PROP_BOOL("mcq", UfsHc, params.mcq, false),
DEFINE_PROP_UINT8("mcq-maxq", UfsHc, params.mcq_maxq, 2),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription ufs_vmstate = {
@@ -1754,7 +1844,7 @@ static const VMStateDescription ufs_vmstate = {
.unmigratable = 1,
};
-static void ufs_class_init(ObjectClass *oc, void *data)
+static void ufs_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
@@ -1790,7 +1880,7 @@ static char *ufs_bus_get_dev_path(DeviceState *dev)
return qdev_get_dev_path(bus->parent);
}
-static void ufs_bus_class_init(ObjectClass *class, void *data)
+static void ufs_bus_class_init(ObjectClass *class, const void *data)
{
BusClass *bc = BUS_CLASS(class);
bc->get_dev_path = ufs_bus_get_dev_path;
@@ -1802,7 +1892,7 @@ static const TypeInfo ufs_info = {
.parent = TYPE_PCI_DEVICE,
.class_init = ufs_class_init,
.instance_size = sizeof(UfsHc),
- .interfaces = (InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} },
+ .interfaces = (const InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} },
};
static const TypeInfo ufs_bus_info = {
diff --git a/hw/ufs/ufs.h b/hw/ufs/ufs.h
index 6c9382c..3799d97 100644
--- a/hw/ufs/ufs.h
+++ b/hw/ufs/ufs.h
@@ -146,6 +146,8 @@ typedef struct UfsHc {
/* MCQ properties */
UfsSq *sq[UFS_MAX_MCQ_QNUM];
UfsCq *cq[UFS_MAX_MCQ_QNUM];
+
+ uint8_t temperature;
} UfsHc;
static inline uint32_t ufs_mcq_sq_tail(UfsHc *u, uint32_t qid)
@@ -228,6 +230,7 @@ static inline bool is_wlun(uint8_t lun)
void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags,
uint8_t response, uint8_t scsi_status,
uint16_t data_segment_length);
+void ufs_build_query_response(UfsRequest *req);
void ufs_complete_req(UfsRequest *req, UfsReqResult req_result);
void ufs_init_wlu(UfsLu *wlu, uint8_t wlun);
#endif /* HW_UFS_UFS_H */
diff --git a/hw/usb/Kconfig b/hw/usb/Kconfig
index 84bc7fb..69c663be 100644
--- a/hw/usb/Kconfig
+++ b/hw/usb/Kconfig
@@ -53,18 +53,10 @@ config USB_XHCI_SYSBUS
bool
select USB_XHCI
-config USB_MUSB
- bool
- select USB
-
config USB_DWC2
bool
select USB
-config TUSB6010
- bool
- select USB_MUSB
-
config USB_HUB
bool
default y
@@ -151,3 +143,7 @@ config USB_DWC3
config XLNX_USB_SUBSYS
bool
select USB_DWC3
+
+config USB_CHIPIDEA
+ bool
+ select USB_EHCI_SYSBUS
diff --git a/hw/usb/bus-stub.c b/hw/usb/bus-stub.c
index fcabe84..cd0c317 100644
--- a/hw/usb/bus-stub.c
+++ b/hw/usb/bus-stub.c
@@ -10,7 +10,7 @@
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-machine.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "monitor/monitor.h"
#include "hw/usb.h"
diff --git a/hw/usb/bus.c b/hw/usb/bus.c
index bfab280..8dd2ce4 100644
--- a/hw/usb/bus.c
+++ b/hw/usb/bus.c
@@ -6,7 +6,7 @@
#include "qapi/type-helpers.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "migration/vmstate.h"
#include "monitor/monitor.h"
#include "trace.h"
@@ -18,16 +18,15 @@ static char *usb_get_dev_path(DeviceState *dev);
static char *usb_get_fw_dev_path(DeviceState *qdev);
static void usb_qdev_unrealize(DeviceState *qdev);
-static Property usb_props[] = {
+static const Property usb_props[] = {
DEFINE_PROP_STRING("port", USBDevice, port_path),
DEFINE_PROP_STRING("serial", USBDevice, serial),
DEFINE_PROP_BIT("msos-desc", USBDevice, flags,
USB_DEV_FLAG_MSOS_DESC_ENABLE, true),
DEFINE_PROP_STRING("pcap", USBDevice, pcap_filename),
- DEFINE_PROP_END_OF_LIST()
};
-static void usb_bus_class_init(ObjectClass *klass, void *data)
+static void usb_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
@@ -43,7 +42,7 @@ static const TypeInfo usb_bus_info = {
.parent = TYPE_BUS,
.instance_size = sizeof(USBBus),
.class_init = usb_bus_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
@@ -412,7 +411,7 @@ void usb_claim_port(USBDevice *dev, Error **errp)
} else {
if (bus->nfree == 1 && strcmp(object_get_typename(OBJECT(dev)), "usb-hub") != 0) {
/* Create a new hub and chain it on */
- hub = usb_try_new("usb-hub");
+ hub = USB_DEVICE(qdev_try_new("usb-hub"));
if (hub) {
usb_realize_and_unref(hub, bus, NULL);
}
@@ -663,7 +662,8 @@ USBDevice *usbdevice_create(const char *driver)
return NULL;
}
- dev = f->usbdevice_init ? f->usbdevice_init() : usb_new(f->name);
+ dev = f->usbdevice_init ? f->usbdevice_init()
+ : USB_DEVICE(qdev_new(f->name));
if (!dev) {
error_report("Failed to create USB device '%s'", f->name);
return NULL;
@@ -713,7 +713,7 @@ static void usb_device_instance_init(Object *obj)
}
}
-static void usb_device_class_init(ObjectClass *klass, void *data)
+static void usb_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
k->bus_type = TYPE_USB_BUS;
diff --git a/hw/usb/canokey.c b/hw/usb/canokey.c
index b306eeb..cbefbb5 100644
--- a/hw/usb/canokey.c
+++ b/hw/usb/canokey.c
@@ -197,8 +197,8 @@ static void canokey_handle_data(USBDevice *dev, USBPacket *p)
switch (p->pid) {
case USB_TOKEN_OUT:
trace_canokey_handle_data_out(ep_out, p->iov.size);
- usb_packet_copy(p, key->ep_out_buffer[ep_out], p->iov.size);
out_pos = 0;
+ /* segment packet into (possibly multiple) ep_out */
while (out_pos != p->iov.size) {
/*
* key->ep_out[ep_out] set by prepare_receive
@@ -207,8 +207,8 @@ static void canokey_handle_data(USBDevice *dev, USBPacket *p)
* to be the buffer length
*/
out_len = MIN(p->iov.size - out_pos, key->ep_out_size[ep_out]);
- memcpy(key->ep_out[ep_out],
- key->ep_out_buffer[ep_out] + out_pos, out_len);
+ /* usb_packet_copy would update the pos offset internally */
+ usb_packet_copy(p, key->ep_out[ep_out], out_len);
out_pos += out_len;
/* update ep_out_size to actual len */
key->ep_out_size[ep_out] = out_len;
@@ -296,12 +296,11 @@ static void canokey_unrealize(USBDevice *base)
trace_canokey_unrealize();
}
-static Property canokey_properties[] = {
+static const Property canokey_properties[] = {
DEFINE_PROP_STRING("file", CanoKeyState, file),
- DEFINE_PROP_END_OF_LIST(),
};
-static void canokey_class_init(ObjectClass *klass, void *data)
+static void canokey_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
diff --git a/hw/usb/canokey.h b/hw/usb/canokey.h
index e528889..1b60d73 100644
--- a/hw/usb/canokey.h
+++ b/hw/usb/canokey.h
@@ -24,8 +24,6 @@
#define CANOKEY_EP_NUM 3
/* BULK/INTR IN can be up to 1352 bytes, e.g. get key info */
#define CANOKEY_EP_IN_BUFFER_SIZE 2048
-/* BULK OUT can be up to 270 bytes, e.g. PIV import cert */
-#define CANOKEY_EP_OUT_BUFFER_SIZE 512
typedef enum {
CANOKEY_EP_IN_WAIT,
@@ -59,8 +57,6 @@ typedef struct CanoKeyState {
/* OUT pointer to canokey recv buffer */
uint8_t *ep_out[CANOKEY_EP_NUM];
uint32_t ep_out_size[CANOKEY_EP_NUM];
- /* For large BULK OUT, multiple write to ep_out is needed */
- uint8_t ep_out_buffer[CANOKEY_EP_NUM][CANOKEY_EP_OUT_BUFFER_SIZE];
/* Properties */
char *file; /* canokey-file */
diff --git a/hw/usb/ccid-card-emulated.c b/hw/usb/ccid-card-emulated.c
index 3ee9c73..c21cefd 100644
--- a/hw/usb/ccid-card-emulated.c
+++ b/hw/usb/ccid-card-emulated.c
@@ -582,17 +582,16 @@ static void emulated_unrealize(CCIDCardState *base)
qemu_mutex_destroy(&card->event_list_mutex);
}
-static Property emulated_card_properties[] = {
+static const Property emulated_card_properties[] = {
DEFINE_PROP_STRING("backend", EmulatedState, backend_str),
DEFINE_PROP_STRING("cert1", EmulatedState, cert1),
DEFINE_PROP_STRING("cert2", EmulatedState, cert2),
DEFINE_PROP_STRING("cert3", EmulatedState, cert3),
DEFINE_PROP_STRING("db", EmulatedState, db),
DEFINE_PROP_UINT8("debug", EmulatedState, debug, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void emulated_class_initfn(ObjectClass *klass, void *data)
+static void emulated_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
CCIDCardClass *cc = CCID_CARD_CLASS(klass);
diff --git a/hw/usb/ccid-card-passthru.c b/hw/usb/ccid-card-passthru.c
index a515703..1eea21a 100644
--- a/hw/usb/ccid-card-passthru.c
+++ b/hw/usb/ccid-card-passthru.c
@@ -388,13 +388,12 @@ static const VMStateDescription passthru_vmstate = {
}
};
-static Property passthru_card_properties[] = {
+static const Property passthru_card_properties[] = {
DEFINE_PROP_CHR("chardev", PassthruState, cs),
DEFINE_PROP_UINT8("debug", PassthruState, debug, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void passthru_class_initfn(ObjectClass *klass, void *data)
+static void passthru_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
CCIDCardClass *cc = CCID_CARD_CLASS(klass);
diff --git a/hw/usb/chipidea.c b/hw/usb/chipidea.c
index b1c8540..250c2b3 100644
--- a/hw/usb/chipidea.c
+++ b/hw/usb/chipidea.c
@@ -144,7 +144,7 @@ static void chipidea_init(Object *obj)
}
}
-static void chipidea_class_init(ObjectClass *klass, void *data)
+static void chipidea_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(klass);
diff --git a/hw/usb/dev-audio.c b/hw/usb/dev-audio.c
index 1897fff..26af709 100644
--- a/hw/usb/dev-audio.c
+++ b/hw/usb/dev-audio.c
@@ -990,15 +990,14 @@ static const VMStateDescription vmstate_usb_audio = {
.unmigratable = 1,
};
-static Property usb_audio_properties[] = {
+static const Property usb_audio_properties[] = {
DEFINE_AUDIO_PROPERTIES(USBAudioState, card),
DEFINE_PROP_UINT32("debug", USBAudioState, debug, 0),
DEFINE_PROP_UINT32("buffer", USBAudioState, buffer_user, 0),
DEFINE_PROP_BOOL("multi", USBAudioState, multi, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void usb_audio_class_init(ObjectClass *klass, void *data)
+static void usb_audio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *k = USB_DEVICE_CLASS(klass);
diff --git a/hw/usb/dev-hid.c b/hw/usb/dev-hid.c
index 9e358c9..54d064e 100644
--- a/hw/usb/dev-hid.c
+++ b/hw/usb/dev-hid.c
@@ -774,7 +774,7 @@ static const VMStateDescription vmstate_usb_kbd = {
}
};
-static void usb_hid_class_initfn(ObjectClass *klass, void *data)
+static void usb_hid_class_initfn(ObjectClass *klass, const void *data)
{
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
@@ -793,14 +793,13 @@ static const TypeInfo usb_hid_type_info = {
.class_init = usb_hid_class_initfn,
};
-static Property usb_tablet_properties[] = {
+static const Property usb_tablet_properties[] = {
DEFINE_PROP_UINT32("usb_version", USBHIDState, usb_version, 2),
DEFINE_PROP_STRING("display", USBHIDState, display),
DEFINE_PROP_UINT32("head", USBHIDState, head, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void usb_tablet_class_initfn(ObjectClass *klass, void *data)
+static void usb_tablet_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
@@ -818,12 +817,11 @@ static const TypeInfo usb_tablet_info = {
.class_init = usb_tablet_class_initfn,
};
-static Property usb_mouse_properties[] = {
+static const Property usb_mouse_properties[] = {
DEFINE_PROP_UINT32("usb_version", USBHIDState, usb_version, 2),
- DEFINE_PROP_END_OF_LIST(),
};
-static void usb_mouse_class_initfn(ObjectClass *klass, void *data)
+static void usb_mouse_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
@@ -841,13 +839,12 @@ static const TypeInfo usb_mouse_info = {
.class_init = usb_mouse_class_initfn,
};
-static Property usb_keyboard_properties[] = {
+static const Property usb_keyboard_properties[] = {
DEFINE_PROP_UINT32("usb_version", USBHIDState, usb_version, 2),
DEFINE_PROP_STRING("display", USBHIDState, display),
- DEFINE_PROP_END_OF_LIST(),
};
-static void usb_keyboard_class_initfn(ObjectClass *klass, void *data)
+static void usb_keyboard_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
diff --git a/hw/usb/dev-hub.c b/hw/usb/dev-hub.c
index 06e9537..a19350d 100644
--- a/hw/usb/dev-hub.c
+++ b/hw/usb/dev-hub.c
@@ -479,6 +479,7 @@ static void usb_hub_handle_control(USBDevice *dev, USBPacket *p,
usb_hub_port_clear(port, PORT_STAT_SUSPEND);
port->wPortChange = 0;
}
+ break;
default:
goto fail;
}
@@ -664,13 +665,12 @@ static const VMStateDescription vmstate_usb_hub = {
}
};
-static Property usb_hub_properties[] = {
+static const Property usb_hub_properties[] = {
DEFINE_PROP_UINT32("ports", USBHubState, num_ports, 8),
DEFINE_PROP_BOOL("port-power", USBHubState, port_power, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void usb_hub_class_initfn(ObjectClass *klass, void *data)
+static void usb_hub_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c
index 554b397..ce45c9c 100644
--- a/hw/usb/dev-mtp.c
+++ b/hw/usb/dev-mtp.c
@@ -1234,8 +1234,6 @@ static void usb_mtp_object_delete(MTPState *s, uint32_t handle,
default:
g_assert_not_reached();
}
-
- return;
}
static void usb_mtp_command(MTPState *s, MTPControl *c)
@@ -2078,14 +2076,13 @@ static const VMStateDescription vmstate_usb_mtp = {
}
};
-static Property mtp_properties[] = {
+static const Property mtp_properties[] = {
DEFINE_PROP_STRING("rootdir", MTPState, root),
DEFINE_PROP_STRING("desc", MTPState, desc),
DEFINE_PROP_BOOL("readonly", MTPState, readonly, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void usb_mtp_class_initfn(ObjectClass *klass, void *data)
+static void usb_mtp_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
diff --git a/hw/usb/dev-network.c b/hw/usb/dev-network.c
index d00d68b2..81cc09d 100644
--- a/hw/usb/dev-network.c
+++ b/hw/usb/dev-network.c
@@ -33,7 +33,7 @@
#include "qemu/error-report.h"
#include "qemu/queue.h"
#include "qemu/config-file.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qemu/iov.h"
#include "qemu/module.h"
#include "qemu/cutils.h"
@@ -1407,12 +1407,11 @@ static const VMStateDescription vmstate_usb_net = {
.unmigratable = 1,
};
-static Property net_properties[] = {
+static const Property net_properties[] = {
DEFINE_NIC_PROPERTIES(USBNetState, conf),
- DEFINE_PROP_END_OF_LIST(),
};
-static void usb_net_class_initfn(ObjectClass *klass, void *data)
+static void usb_net_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
diff --git a/hw/usb/dev-serial.c b/hw/usb/dev-serial.c
index 63047d7..1c116d8 100644
--- a/hw/usb/dev-serial.c
+++ b/hw/usb/dev-serial.c
@@ -472,8 +472,6 @@ static void usb_serial_token_in(USBSerialState *s, USBPacket *p)
s->recv_ptr = (s->recv_ptr + len) % RECV_BUF;
packet_len -= len + 2;
}
-
- return;
}
static void usb_serial_handle_data(USBDevice *dev, USBPacket *p)
@@ -624,7 +622,7 @@ static USBDevice *usb_braille_init(void)
return NULL;
}
- dev = usb_new("usb-braille");
+ dev = USB_DEVICE(qdev_new("usb-braille"));
qdev_prop_set_chr(&dev->qdev, "chardev", cdrv);
return dev;
}
@@ -634,13 +632,12 @@ static const VMStateDescription vmstate_usb_serial = {
.unmigratable = 1,
};
-static Property serial_properties[] = {
+static const Property serial_properties[] = {
DEFINE_PROP_CHR("chardev", USBSerialState, cs),
DEFINE_PROP_BOOL("always-plugged", USBSerialState, always_plugged, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void usb_serial_dev_class_init(ObjectClass *klass, void *data)
+static void usb_serial_dev_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
@@ -661,7 +658,7 @@ static const TypeInfo usb_serial_dev_type_info = {
.class_init = usb_serial_dev_class_init,
};
-static void usb_serial_class_initfn(ObjectClass *klass, void *data)
+static void usb_serial_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
@@ -677,12 +674,11 @@ static const TypeInfo serial_info = {
.class_init = usb_serial_class_initfn,
};
-static Property braille_properties[] = {
+static const Property braille_properties[] = {
DEFINE_PROP_CHR("chardev", USBSerialState, cs),
- DEFINE_PROP_END_OF_LIST(),
};
-static void usb_braille_class_initfn(ObjectClass *klass, void *data)
+static void usb_braille_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
diff --git a/hw/usb/dev-smartcard-reader.c b/hw/usb/dev-smartcard-reader.c
index c0d63e0..6ce7154 100644
--- a/hw/usb/dev-smartcard-reader.c
+++ b/hw/usb/dev-smartcard-reader.c
@@ -1069,7 +1069,6 @@ static void ccid_handle_bulk_out(USBCCIDState *s, USBPacket *p)
err:
p->status = USB_RET_STALL;
s->bulk_out_pos = 0;
- return;
}
static void ccid_bulk_in_copy_to_guest(USBCCIDState *s, USBPacket *p,
@@ -1171,9 +1170,8 @@ static Answer *ccid_peek_next_answer(USBCCIDState *s)
: &s->pending_answers[s->pending_answers_start % PENDING_ANSWERS_NUM];
}
-static Property ccid_props[] = {
+static const Property ccid_props[] = {
DEFINE_PROP_UINT32("slot", struct CCIDCardState, slot, 0),
- DEFINE_PROP_END_OF_LIST(),
};
static const TypeInfo ccid_bus_info = {
@@ -1431,12 +1429,11 @@ static const VMStateDescription ccid_vmstate = {
}
};
-static Property ccid_properties[] = {
+static const Property ccid_properties[] = {
DEFINE_PROP_UINT8("debug", USBCCIDState, debug, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ccid_class_initfn(ObjectClass *klass, void *data)
+static void ccid_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
@@ -1461,13 +1458,13 @@ static const TypeInfo ccid_info = {
.parent = TYPE_USB_DEVICE,
.instance_size = sizeof(USBCCIDState),
.class_init = ccid_class_initfn,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
};
-static void ccid_card_class_init(ObjectClass *klass, void *data)
+static void ccid_card_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
k->bus_type = TYPE_CCID_BUS;
diff --git a/hw/usb/dev-storage-bot.c b/hw/usb/dev-storage-bot.c
index 1e5c5c7..df6ab7f 100644
--- a/hw/usb/dev-storage-bot.c
+++ b/hw/usb/dev-storage-bot.c
@@ -40,7 +40,7 @@ static void usb_msd_bot_realize(USBDevice *dev, Error **errp)
usb_msd_handle_reset(dev);
}
-static void usb_msd_class_bot_initfn(ObjectClass *klass, void *data)
+static void usb_msd_class_bot_initfn(ObjectClass *klass, const void *data)
{
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
diff --git a/hw/usb/dev-storage-classic.c b/hw/usb/dev-storage-classic.c
index 6147387..dabe156 100644
--- a/hw/usb/dev-storage-classic.c
+++ b/hw/usb/dev-storage-classic.c
@@ -13,8 +13,8 @@
#include "hw/usb.h"
#include "hw/usb/desc.h"
#include "hw/usb/msd.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/block-backend.h"
+#include "system/system.h"
+#include "system/block-backend.h"
static const struct SCSIBusInfo usb_msd_scsi_info_storage = {
.tcq = false,
@@ -67,15 +67,14 @@ static void usb_msd_storage_realize(USBDevice *dev, Error **errp)
s->scsi_dev = scsi_dev;
}
-static Property msd_properties[] = {
+static const Property msd_properties[] = {
DEFINE_BLOCK_PROPERTIES(MSDState, conf),
DEFINE_BLOCK_ERROR_PROPERTIES(MSDState, conf),
DEFINE_PROP_BOOL("removable", MSDState, removable, false),
DEFINE_PROP_BOOL("commandlog", MSDState, commandlog, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void usb_msd_class_storage_initfn(ObjectClass *klass, void *data)
+static void usb_msd_class_storage_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
diff --git a/hw/usb/dev-storage.c b/hw/usb/dev-storage.c
index 341e505..b13fe34 100644
--- a/hw/usb/dev-storage.c
+++ b/hw/usb/dev-storage.c
@@ -177,7 +177,7 @@ static const USBDesc desc = {
.str = desc_strings,
};
-static void usb_msd_packet_complete(MSDState *s)
+static void usb_msd_packet_complete(MSDState *s, int status)
{
USBPacket *p = s->packet;
@@ -187,6 +187,7 @@ static void usb_msd_packet_complete(MSDState *s)
* usb_packet_complete returns.
*/
trace_usb_msd_packet_complete();
+ p->status = status;
s->packet = NULL;
usb_packet_complete(&s->dev, p);
}
@@ -196,8 +197,7 @@ static void usb_msd_fatal_error(MSDState *s)
trace_usb_msd_fatal_error();
if (s->packet) {
- s->packet->status = USB_RET_STALL;
- usb_msd_packet_complete(s);
+ usb_msd_packet_complete(s, USB_RET_STALL);
}
/*
@@ -255,8 +255,8 @@ void usb_msd_transfer_data(SCSIRequest *req, uint32_t len)
usb_msd_copy_data(s, p);
p = s->packet;
if (p && p->actual_length == p->iov.size) {
- p->status = USB_RET_SUCCESS; /* Clear previous ASYNC status */
- usb_msd_packet_complete(s);
+ /* USB_RET_SUCCESS status clears previous ASYNC status */
+ usb_msd_packet_complete(s, USB_RET_SUCCESS);
}
}
}
@@ -295,8 +295,8 @@ void usb_msd_command_complete(SCSIRequest *req, size_t resid)
s->mode = USB_MSDM_CSW;
}
}
- p->status = USB_RET_SUCCESS; /* Clear previous ASYNC status */
- usb_msd_packet_complete(s);
+ /* USB_RET_SUCCESS status clears previous ASYNC status */
+ usb_msd_packet_complete(s, USB_RET_SUCCESS);
} else if (s->data_len == 0) {
s->mode = USB_MSDM_CSW;
}
@@ -332,8 +332,7 @@ void usb_msd_handle_reset(USBDevice *dev)
assert(s->req == NULL);
if (s->packet) {
- s->packet->status = USB_RET_STALL;
- usb_msd_packet_complete(s);
+ usb_msd_packet_complete(s, USB_RET_STALL);
}
memset(&s->csw, 0, sizeof(s->csw));
@@ -586,7 +585,7 @@ static const VMStateDescription vmstate_usb_msd = {
}
};
-static void usb_msd_class_initfn_common(ObjectClass *klass, void *data)
+static void usb_msd_class_initfn_common(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c
index 1804cb6..21cc283 100644
--- a/hw/usb/dev-uas.c
+++ b/hw/usb/dev-uas.c
@@ -914,7 +914,6 @@ static void usb_uas_handle_data(USBDevice *dev, USBPacket *p)
err_stream:
error_report("%s: invalid stream %d", __func__, p->stream);
p->status = USB_RET_STALL;
- return;
}
static void usb_uas_unrealize(USBDevice *dev)
@@ -953,12 +952,11 @@ static const VMStateDescription vmstate_usb_uas = {
}
};
-static Property uas_properties[] = {
+static const Property uas_properties[] = {
DEFINE_PROP_UINT32("log-scsi-req", UASDevice, requestlog, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void usb_uas_class_initfn(ObjectClass *klass, void *data)
+static void usb_uas_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
diff --git a/hw/usb/dev-wacom.c b/hw/usb/dev-wacom.c
index 7177c17..f4b71a2 100644
--- a/hw/usb/dev-wacom.c
+++ b/hw/usb/dev-wacom.c
@@ -420,7 +420,7 @@ static const VMStateDescription vmstate_usb_wacom = {
.unmigratable = 1,
};
-static void usb_wacom_class_init(ObjectClass *klass, void *data)
+static void usb_wacom_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
diff --git a/hw/usb/hcd-dwc2.c b/hw/usb/hcd-dwc2.c
index b4f0652..8386450 100644
--- a/hw/usb/hcd-dwc2.c
+++ b/hw/usb/hcd-dwc2.c
@@ -1448,12 +1448,11 @@ const VMStateDescription vmstate_dwc2_state = {
}
};
-static Property dwc2_usb_properties[] = {
+static const Property dwc2_usb_properties[] = {
DEFINE_PROP_UINT32("usb_version", DWC2State, usb_version, 2),
- DEFINE_PROP_END_OF_LIST(),
};
-static void dwc2_class_init(ObjectClass *klass, void *data)
+static void dwc2_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
DWC2Class *c = DWC2_USB_CLASS(klass);
diff --git a/hw/usb/hcd-dwc2.h b/hw/usb/hcd-dwc2.h
index 9c3d88e..2d5a569 100644
--- a/hw/usb/hcd-dwc2.h
+++ b/hw/usb/hcd-dwc2.h
@@ -23,7 +23,7 @@
#include "hw/irq.h"
#include "hw/sysbus.h"
#include "hw/usb.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qom/object.h"
#define DWC2_MMIO_SIZE 0x11000
diff --git a/hw/usb/hcd-dwc3.c b/hw/usb/hcd-dwc3.c
index 09d8e25..98a342b 100644
--- a/hw/usb/hcd-dwc3.c
+++ b/hw/usb/hcd-dwc3.c
@@ -343,6 +343,8 @@ REG32(GFLADJ, 0x530)
FIELD(GFLADJ, GFLADJ_REFCLK_FLADJ, 8, 14)
FIELD(GFLADJ, GFLADJ_30MHZ_SDBND_SEL, 7, 1)
FIELD(GFLADJ, GFLADJ_30MHZ, 0, 6)
+REG32(GUSB2RHBCTL, 0x540)
+ FIELD(GUSB2RHBCTL, OVRD_L1TIMEOUT, 0, 4)
#define DWC3_GLOBAL_OFFSET 0xC100
static void reset_csr(USBDWC3 * s)
@@ -560,6 +562,9 @@ static const RegisterAccessInfo usb_dwc3_regs_info[] = {
.rsvd = 0x40,
.ro = 0x400040,
.unimp = 0xffffffff,
+ },{ .name = "GUSB2RHBCTL", .addr = A_GUSB2RHBCTL,
+ .rsvd = 0xfffffff0,
+ .unimp = 0xffffffff,
}
};
@@ -656,17 +661,16 @@ static const VMStateDescription vmstate_usb_dwc3 = {
}
};
-static Property usb_dwc3_properties[] = {
+static const Property usb_dwc3_properties[] = {
DEFINE_PROP_UINT32("DWC_USB3_USERID", USBDWC3, cfg.dwc_usb3_user,
0x12345678),
- DEFINE_PROP_END_OF_LIST(),
};
-static void usb_dwc3_class_init(ObjectClass *klass, void *data)
+static void usb_dwc3_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = usb_dwc3_reset;
+ device_class_set_legacy_reset(dc, usb_dwc3_reset);
dc->realize = usb_dwc3_realize;
dc->vmsd = &vmstate_usb_dwc3;
device_class_set_props(dc, usb_dwc3_properties);
diff --git a/hw/usb/hcd-ehci-pci.c b/hw/usb/hcd-ehci-pci.c
index 3ff54ed..38ad340 100644
--- a/hw/usb/hcd-ehci-pci.c
+++ b/hw/usb/hcd-ehci-pci.c
@@ -135,9 +135,8 @@ static void usb_ehci_pci_write_config(PCIDevice *dev, uint32_t addr,
i->ehci.as = busmaster ? pci_get_address_space(dev) : &address_space_memory;
}
-static Property ehci_pci_properties[] = {
+static const Property ehci_pci_properties[] = {
DEFINE_PROP_UINT32("maxframes", EHCIPCIState, ehci.maxframes, 128),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_ehci_pci = {
@@ -151,7 +150,7 @@ static const VMStateDescription vmstate_ehci_pci = {
}
};
-static void ehci_class_init(ObjectClass *klass, void *data)
+static void ehci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -162,7 +161,7 @@ static void ehci_class_init(ObjectClass *klass, void *data)
k->config_write = usb_ehci_pci_write_config;
dc->vmsd = &vmstate_ehci_pci;
device_class_set_props(dc, ehci_pci_properties);
- dc->reset = usb_ehci_pci_reset;
+ device_class_set_legacy_reset(dc, usb_ehci_pci_reset);
}
static const TypeInfo ehci_pci_type_info = {
@@ -173,17 +172,17 @@ static const TypeInfo ehci_pci_type_info = {
.instance_finalize = usb_ehci_pci_finalize,
.abstract = true,
.class_init = ehci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-static void ehci_data_class_init(ObjectClass *klass, void *data)
+static void ehci_data_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
- EHCIPCIInfo *i = data;
+ const EHCIPCIInfo *i = data;
k->vendor_id = i->vendor_id;
k->device_id = i->device_id;
@@ -228,7 +227,7 @@ static void ehci_pci_register_types(void)
for (i = 0; i < ARRAY_SIZE(ehci_pci_info); i++) {
ehci_type_info.name = ehci_pci_info[i].name;
ehci_type_info.class_data = ehci_pci_info + i;
- type_register(&ehci_type_info);
+ type_register_static(&ehci_type_info);
}
}
diff --git a/hw/usb/hcd-ehci-sysbus.c b/hw/usb/hcd-ehci-sysbus.c
index fe1dabd..0449f5f 100644
--- a/hw/usb/hcd-ehci-sysbus.c
+++ b/hw/usb/hcd-ehci-sysbus.c
@@ -19,7 +19,6 @@
#include "hw/qdev-properties.h"
#include "hw/usb/hcd-ehci.h"
#include "migration/vmstate.h"
-#include "qemu/module.h"
static const VMStateDescription vmstate_ehci_sysbus = {
.name = "ehci-sysbus",
@@ -31,11 +30,10 @@ static const VMStateDescription vmstate_ehci_sysbus = {
}
};
-static Property ehci_sysbus_properties[] = {
+static const Property ehci_sysbus_properties[] = {
DEFINE_PROP_UINT32("maxframes", EHCISysBusState, ehci.maxframes, 128),
DEFINE_PROP_BOOL("companion-enable", EHCISysBusState, ehci.companion_enable,
false),
- DEFINE_PROP_END_OF_LIST(),
};
static void usb_ehci_sysbus_realize(DeviceState *dev, Error **errp)
@@ -82,7 +80,7 @@ static void ehci_sysbus_finalize(Object *obj)
usb_ehci_finalize(s);
}
-static void ehci_sysbus_class_init(ObjectClass *klass, void *data)
+static void ehci_sysbus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(klass);
@@ -93,22 +91,11 @@ static void ehci_sysbus_class_init(ObjectClass *klass, void *data)
dc->realize = usb_ehci_sysbus_realize;
dc->vmsd = &vmstate_ehci_sysbus;
device_class_set_props(dc, ehci_sysbus_properties);
- dc->reset = usb_ehci_sysbus_reset;
+ device_class_set_legacy_reset(dc, usb_ehci_sysbus_reset);
set_bit(DEVICE_CATEGORY_USB, dc->categories);
}
-static const TypeInfo ehci_type_info = {
- .name = TYPE_SYS_BUS_EHCI,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(EHCISysBusState),
- .instance_init = ehci_sysbus_init,
- .instance_finalize = ehci_sysbus_finalize,
- .abstract = true,
- .class_init = ehci_sysbus_class_init,
- .class_size = sizeof(SysBusEHCIClass),
-};
-
-static void ehci_platform_class_init(ObjectClass *oc, void *data)
+static void ehci_platform_class_init(ObjectClass *oc, const void *data)
{
SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -118,13 +105,7 @@ static void ehci_platform_class_init(ObjectClass *oc, void *data)
set_bit(DEVICE_CATEGORY_USB, dc->categories);
}
-static const TypeInfo ehci_platform_type_info = {
- .name = TYPE_PLATFORM_EHCI,
- .parent = TYPE_SYS_BUS_EHCI,
- .class_init = ehci_platform_class_init,
-};
-
-static void ehci_exynos4210_class_init(ObjectClass *oc, void *data)
+static void ehci_exynos4210_class_init(ObjectClass *oc, const void *data)
{
SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -134,13 +115,7 @@ static void ehci_exynos4210_class_init(ObjectClass *oc, void *data)
set_bit(DEVICE_CATEGORY_USB, dc->categories);
}
-static const TypeInfo ehci_exynos4210_type_info = {
- .name = TYPE_EXYNOS4210_EHCI,
- .parent = TYPE_SYS_BUS_EHCI,
- .class_init = ehci_exynos4210_class_init,
-};
-
-static void ehci_aw_h3_class_init(ObjectClass *oc, void *data)
+static void ehci_aw_h3_class_init(ObjectClass *oc, const void *data)
{
SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -150,13 +125,7 @@ static void ehci_aw_h3_class_init(ObjectClass *oc, void *data)
set_bit(DEVICE_CATEGORY_USB, dc->categories);
}
-static const TypeInfo ehci_aw_h3_type_info = {
- .name = TYPE_AW_H3_EHCI,
- .parent = TYPE_SYS_BUS_EHCI,
- .class_init = ehci_aw_h3_class_init,
-};
-
-static void ehci_npcm7xx_class_init(ObjectClass *oc, void *data)
+static void ehci_npcm7xx_class_init(ObjectClass *oc, const void *data)
{
SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -168,13 +137,7 @@ static void ehci_npcm7xx_class_init(ObjectClass *oc, void *data)
set_bit(DEVICE_CATEGORY_USB, dc->categories);
}
-static const TypeInfo ehci_npcm7xx_type_info = {
- .name = TYPE_NPCM7XX_EHCI,
- .parent = TYPE_SYS_BUS_EHCI,
- .class_init = ehci_npcm7xx_class_init,
-};
-
-static void ehci_tegra2_class_init(ObjectClass *oc, void *data)
+static void ehci_tegra2_class_init(ObjectClass *oc, const void *data)
{
SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -184,12 +147,6 @@ static void ehci_tegra2_class_init(ObjectClass *oc, void *data)
set_bit(DEVICE_CATEGORY_USB, dc->categories);
}
-static const TypeInfo ehci_tegra2_type_info = {
- .name = TYPE_TEGRA2_EHCI,
- .parent = TYPE_SYS_BUS_EHCI,
- .class_init = ehci_tegra2_class_init,
-};
-
static void ehci_ppc4xx_init(Object *o)
{
EHCISysBusState *s = SYS_BUS_EHCI(o);
@@ -197,7 +154,7 @@ static void ehci_ppc4xx_init(Object *o)
s->ehci.companion_enable = true;
}
-static void ehci_ppc4xx_class_init(ObjectClass *oc, void *data)
+static void ehci_ppc4xx_class_init(ObjectClass *oc, const void *data)
{
SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -207,13 +164,6 @@ static void ehci_ppc4xx_class_init(ObjectClass *oc, void *data)
set_bit(DEVICE_CATEGORY_USB, dc->categories);
}
-static const TypeInfo ehci_ppc4xx_type_info = {
- .name = TYPE_PPC4xx_EHCI,
- .parent = TYPE_SYS_BUS_EHCI,
- .class_init = ehci_ppc4xx_class_init,
- .instance_init = ehci_ppc4xx_init,
-};
-
/*
* Faraday FUSBH200 USB 2.0 EHCI
*/
@@ -270,7 +220,7 @@ static void fusbh200_ehci_init(Object *obj)
&f->mem_vendor);
}
-static void fusbh200_ehci_class_init(ObjectClass *oc, void *data)
+static void fusbh200_ehci_class_init(ObjectClass *oc, const void *data)
{
SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -282,24 +232,55 @@ static void fusbh200_ehci_class_init(ObjectClass *oc, void *data)
set_bit(DEVICE_CATEGORY_USB, dc->categories);
}
-static const TypeInfo ehci_fusbh200_type_info = {
- .name = TYPE_FUSBH200_EHCI,
- .parent = TYPE_SYS_BUS_EHCI,
- .instance_size = sizeof(FUSBH200EHCIState),
- .instance_init = fusbh200_ehci_init,
- .class_init = fusbh200_ehci_class_init,
+static const TypeInfo ehci_sysbus_types[] = {
+ {
+ .name = TYPE_SYS_BUS_EHCI,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(EHCISysBusState),
+ .instance_init = ehci_sysbus_init,
+ .instance_finalize = ehci_sysbus_finalize,
+ .abstract = true,
+ .class_init = ehci_sysbus_class_init,
+ .class_size = sizeof(SysBusEHCIClass),
+ },
+ {
+ .name = TYPE_PLATFORM_EHCI,
+ .parent = TYPE_SYS_BUS_EHCI,
+ .class_init = ehci_platform_class_init,
+ },
+ {
+ .name = TYPE_EXYNOS4210_EHCI,
+ .parent = TYPE_SYS_BUS_EHCI,
+ .class_init = ehci_exynos4210_class_init,
+ },
+ {
+ .name = TYPE_AW_H3_EHCI,
+ .parent = TYPE_SYS_BUS_EHCI,
+ .class_init = ehci_aw_h3_class_init,
+ },
+ {
+ .name = TYPE_NPCM7XX_EHCI,
+ .parent = TYPE_SYS_BUS_EHCI,
+ .class_init = ehci_npcm7xx_class_init,
+ },
+ {
+ .name = TYPE_TEGRA2_EHCI,
+ .parent = TYPE_SYS_BUS_EHCI,
+ .class_init = ehci_tegra2_class_init,
+ },
+ {
+ .name = TYPE_PPC4xx_EHCI,
+ .parent = TYPE_SYS_BUS_EHCI,
+ .class_init = ehci_ppc4xx_class_init,
+ .instance_init = ehci_ppc4xx_init,
+ },
+ {
+ .name = TYPE_FUSBH200_EHCI,
+ .parent = TYPE_SYS_BUS_EHCI,
+ .instance_size = sizeof(FUSBH200EHCIState),
+ .instance_init = fusbh200_ehci_init,
+ .class_init = fusbh200_ehci_class_init,
+ },
};
-static void ehci_sysbus_register_types(void)
-{
- type_register_static(&ehci_type_info);
- type_register_static(&ehci_platform_type_info);
- type_register_static(&ehci_exynos4210_type_info);
- type_register_static(&ehci_aw_h3_type_info);
- type_register_static(&ehci_npcm7xx_type_info);
- type_register_static(&ehci_tegra2_type_info);
- type_register_static(&ehci_ppc4xx_type_info);
- type_register_static(&ehci_fusbh200_type_info);
-}
-
-type_init(ehci_sysbus_register_types)
+DEFINE_TYPES(ehci_sysbus_types)
diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c
index 01864d4..b090f25 100644
--- a/hw/usb/hcd-ehci.c
+++ b/hw/usb/hcd-ehci.c
@@ -35,7 +35,7 @@
#include "trace.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#define FRAME_TIMER_FREQ 1000
#define FRAME_TIMER_NS (NANOSECONDS_PER_SECOND / FRAME_TIMER_FREQ)
@@ -2287,7 +2287,8 @@ static void ehci_work_bh(void *opaque)
ehci_update_frindex(ehci, skipped_uframes);
ehci->last_run_ns += UFRAME_TIMER_NS * skipped_uframes;
uframes -= skipped_uframes;
- DPRINTF("WARNING - EHCI skipped %d uframes\n", skipped_uframes);
+ DPRINTF("WARNING - EHCI skipped %"PRIu64" uframes\n",
+ skipped_uframes);
}
for (i = 0; i < uframes; i++) {
diff --git a/hw/usb/hcd-ehci.h b/hw/usb/hcd-ehci.h
index 56a1c09..ffd6c51 100644
--- a/hw/usb/hcd-ehci.h
+++ b/hw/usb/hcd-ehci.h
@@ -20,7 +20,7 @@
#include "qemu/timer.h"
#include "hw/usb.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/pci/pci_device.h"
#include "hw/sysbus.h"
diff --git a/hw/usb/hcd-musb.c b/hw/usb/hcd-musb.c
deleted file mode 100644
index 6dca373..0000000
--- a/hw/usb/hcd-musb.c
+++ /dev/null
@@ -1,1553 +0,0 @@
-/*
- * "Inventra" High-speed Dual-Role Controller (MUSB-HDRC), Mentor Graphics,
- * USB2.0 OTG compliant core used in various chips.
- *
- * Copyright (C) 2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Only host-mode and non-DMA accesses are currently supported.
- */
-#include "qemu/osdep.h"
-#include "qemu/timer.h"
-#include "hw/usb.h"
-#include "hw/usb/hcd-musb.h"
-#include "hw/irq.h"
-#include "hw/hw.h"
-
-/* Common USB registers */
-#define MUSB_HDRC_FADDR 0x00 /* 8-bit */
-#define MUSB_HDRC_POWER 0x01 /* 8-bit */
-
-#define MUSB_HDRC_INTRTX 0x02 /* 16-bit */
-#define MUSB_HDRC_INTRRX 0x04
-#define MUSB_HDRC_INTRTXE 0x06
-#define MUSB_HDRC_INTRRXE 0x08
-#define MUSB_HDRC_INTRUSB 0x0a /* 8 bit */
-#define MUSB_HDRC_INTRUSBE 0x0b /* 8 bit */
-#define MUSB_HDRC_FRAME 0x0c /* 16-bit */
-#define MUSB_HDRC_INDEX 0x0e /* 8 bit */
-#define MUSB_HDRC_TESTMODE 0x0f /* 8 bit */
-
-/* Per-EP registers in indexed mode */
-#define MUSB_HDRC_EP_IDX 0x10 /* 8-bit */
-
-/* EP FIFOs */
-#define MUSB_HDRC_FIFO 0x20
-
-/* Additional Control Registers */
-#define MUSB_HDRC_DEVCTL 0x60 /* 8 bit */
-
-/* These are indexed */
-#define MUSB_HDRC_TXFIFOSZ 0x62 /* 8 bit (see masks) */
-#define MUSB_HDRC_RXFIFOSZ 0x63 /* 8 bit (see masks) */
-#define MUSB_HDRC_TXFIFOADDR 0x64 /* 16 bit offset shifted right 3 */
-#define MUSB_HDRC_RXFIFOADDR 0x66 /* 16 bit offset shifted right 3 */
-
-/* Some more registers */
-#define MUSB_HDRC_VCTRL 0x68 /* 8 bit */
-#define MUSB_HDRC_HWVERS 0x6c /* 8 bit */
-
-/* Added in HDRC 1.9(?) & MHDRC 1.4 */
-/* ULPI pass-through */
-#define MUSB_HDRC_ULPI_VBUSCTL 0x70
-#define MUSB_HDRC_ULPI_REGDATA 0x74
-#define MUSB_HDRC_ULPI_REGADDR 0x75
-#define MUSB_HDRC_ULPI_REGCTL 0x76
-
-/* Extended config & PHY control */
-#define MUSB_HDRC_ENDCOUNT 0x78 /* 8 bit */
-#define MUSB_HDRC_DMARAMCFG 0x79 /* 8 bit */
-#define MUSB_HDRC_PHYWAIT 0x7a /* 8 bit */
-#define MUSB_HDRC_PHYVPLEN 0x7b /* 8 bit */
-#define MUSB_HDRC_HS_EOF1 0x7c /* 8 bit, units of 546.1 us */
-#define MUSB_HDRC_FS_EOF1 0x7d /* 8 bit, units of 533.3 ns */
-#define MUSB_HDRC_LS_EOF1 0x7e /* 8 bit, units of 1.067 us */
-
-/* Per-EP BUSCTL registers */
-#define MUSB_HDRC_BUSCTL 0x80
-
-/* Per-EP registers in flat mode */
-#define MUSB_HDRC_EP 0x100
-
-/* offsets to registers in flat model */
-#define MUSB_HDRC_TXMAXP 0x00 /* 16 bit apparently */
-#define MUSB_HDRC_TXCSR 0x02 /* 16 bit apparently */
-#define MUSB_HDRC_CSR0 MUSB_HDRC_TXCSR /* re-used for EP0 */
-#define MUSB_HDRC_RXMAXP 0x04 /* 16 bit apparently */
-#define MUSB_HDRC_RXCSR 0x06 /* 16 bit apparently */
-#define MUSB_HDRC_RXCOUNT 0x08 /* 16 bit apparently */
-#define MUSB_HDRC_COUNT0 MUSB_HDRC_RXCOUNT /* re-used for EP0 */
-#define MUSB_HDRC_TXTYPE 0x0a /* 8 bit apparently */
-#define MUSB_HDRC_TYPE0 MUSB_HDRC_TXTYPE /* re-used for EP0 */
-#define MUSB_HDRC_TXINTERVAL 0x0b /* 8 bit apparently */
-#define MUSB_HDRC_NAKLIMIT0 MUSB_HDRC_TXINTERVAL /* re-used for EP0 */
-#define MUSB_HDRC_RXTYPE 0x0c /* 8 bit apparently */
-#define MUSB_HDRC_RXINTERVAL 0x0d /* 8 bit apparently */
-#define MUSB_HDRC_FIFOSIZE 0x0f /* 8 bit apparently */
-#define MUSB_HDRC_CONFIGDATA MGC_O_HDRC_FIFOSIZE /* re-used for EP0 */
-
-/* "Bus control" registers */
-#define MUSB_HDRC_TXFUNCADDR 0x00
-#define MUSB_HDRC_TXHUBADDR 0x02
-#define MUSB_HDRC_TXHUBPORT 0x03
-
-#define MUSB_HDRC_RXFUNCADDR 0x04
-#define MUSB_HDRC_RXHUBADDR 0x06
-#define MUSB_HDRC_RXHUBPORT 0x07
-
-/*
- * MUSBHDRC Register bit masks
- */
-
-/* POWER */
-#define MGC_M_POWER_ISOUPDATE 0x80
-#define MGC_M_POWER_SOFTCONN 0x40
-#define MGC_M_POWER_HSENAB 0x20
-#define MGC_M_POWER_HSMODE 0x10
-#define MGC_M_POWER_RESET 0x08
-#define MGC_M_POWER_RESUME 0x04
-#define MGC_M_POWER_SUSPENDM 0x02
-#define MGC_M_POWER_ENSUSPEND 0x01
-
-/* INTRUSB */
-#define MGC_M_INTR_SUSPEND 0x01
-#define MGC_M_INTR_RESUME 0x02
-#define MGC_M_INTR_RESET 0x04
-#define MGC_M_INTR_BABBLE 0x04
-#define MGC_M_INTR_SOF 0x08
-#define MGC_M_INTR_CONNECT 0x10
-#define MGC_M_INTR_DISCONNECT 0x20
-#define MGC_M_INTR_SESSREQ 0x40
-#define MGC_M_INTR_VBUSERROR 0x80 /* FOR SESSION END */
-#define MGC_M_INTR_EP0 0x01 /* FOR EP0 INTERRUPT */
-
-/* DEVCTL */
-#define MGC_M_DEVCTL_BDEVICE 0x80
-#define MGC_M_DEVCTL_FSDEV 0x40
-#define MGC_M_DEVCTL_LSDEV 0x20
-#define MGC_M_DEVCTL_VBUS 0x18
-#define MGC_S_DEVCTL_VBUS 3
-#define MGC_M_DEVCTL_HM 0x04
-#define MGC_M_DEVCTL_HR 0x02
-#define MGC_M_DEVCTL_SESSION 0x01
-
-/* TESTMODE */
-#define MGC_M_TEST_FORCE_HOST 0x80
-#define MGC_M_TEST_FIFO_ACCESS 0x40
-#define MGC_M_TEST_FORCE_FS 0x20
-#define MGC_M_TEST_FORCE_HS 0x10
-#define MGC_M_TEST_PACKET 0x08
-#define MGC_M_TEST_K 0x04
-#define MGC_M_TEST_J 0x02
-#define MGC_M_TEST_SE0_NAK 0x01
-
-/* CSR0 */
-#define MGC_M_CSR0_FLUSHFIFO 0x0100
-#define MGC_M_CSR0_TXPKTRDY 0x0002
-#define MGC_M_CSR0_RXPKTRDY 0x0001
-
-/* CSR0 in Peripheral mode */
-#define MGC_M_CSR0_P_SVDSETUPEND 0x0080
-#define MGC_M_CSR0_P_SVDRXPKTRDY 0x0040
-#define MGC_M_CSR0_P_SENDSTALL 0x0020
-#define MGC_M_CSR0_P_SETUPEND 0x0010
-#define MGC_M_CSR0_P_DATAEND 0x0008
-#define MGC_M_CSR0_P_SENTSTALL 0x0004
-
-/* CSR0 in Host mode */
-#define MGC_M_CSR0_H_NO_PING 0x0800
-#define MGC_M_CSR0_H_WR_DATATOGGLE 0x0400 /* set to allow setting: */
-#define MGC_M_CSR0_H_DATATOGGLE 0x0200 /* data toggle control */
-#define MGC_M_CSR0_H_NAKTIMEOUT 0x0080
-#define MGC_M_CSR0_H_STATUSPKT 0x0040
-#define MGC_M_CSR0_H_REQPKT 0x0020
-#define MGC_M_CSR0_H_ERROR 0x0010
-#define MGC_M_CSR0_H_SETUPPKT 0x0008
-#define MGC_M_CSR0_H_RXSTALL 0x0004
-
-/* CONFIGDATA */
-#define MGC_M_CONFIGDATA_MPRXE 0x80 /* auto bulk pkt combining */
-#define MGC_M_CONFIGDATA_MPTXE 0x40 /* auto bulk pkt splitting */
-#define MGC_M_CONFIGDATA_BIGENDIAN 0x20
-#define MGC_M_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */
-#define MGC_M_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */
-#define MGC_M_CONFIGDATA_DYNFIFO 0x04 /* dynamic FIFO sizing */
-#define MGC_M_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */
-#define MGC_M_CONFIGDATA_UTMIDW 0x01 /* Width, 0 => 8b, 1 => 16b */
-
-/* TXCSR in Peripheral and Host mode */
-#define MGC_M_TXCSR_AUTOSET 0x8000
-#define MGC_M_TXCSR_ISO 0x4000
-#define MGC_M_TXCSR_MODE 0x2000
-#define MGC_M_TXCSR_DMAENAB 0x1000
-#define MGC_M_TXCSR_FRCDATATOG 0x0800
-#define MGC_M_TXCSR_DMAMODE 0x0400
-#define MGC_M_TXCSR_CLRDATATOG 0x0040
-#define MGC_M_TXCSR_FLUSHFIFO 0x0008
-#define MGC_M_TXCSR_FIFONOTEMPTY 0x0002
-#define MGC_M_TXCSR_TXPKTRDY 0x0001
-
-/* TXCSR in Peripheral mode */
-#define MGC_M_TXCSR_P_INCOMPTX 0x0080
-#define MGC_M_TXCSR_P_SENTSTALL 0x0020
-#define MGC_M_TXCSR_P_SENDSTALL 0x0010
-#define MGC_M_TXCSR_P_UNDERRUN 0x0004
-
-/* TXCSR in Host mode */
-#define MGC_M_TXCSR_H_WR_DATATOGGLE 0x0200
-#define MGC_M_TXCSR_H_DATATOGGLE 0x0100
-#define MGC_M_TXCSR_H_NAKTIMEOUT 0x0080
-#define MGC_M_TXCSR_H_RXSTALL 0x0020
-#define MGC_M_TXCSR_H_ERROR 0x0004
-
-/* RXCSR in Peripheral and Host mode */
-#define MGC_M_RXCSR_AUTOCLEAR 0x8000
-#define MGC_M_RXCSR_DMAENAB 0x2000
-#define MGC_M_RXCSR_DISNYET 0x1000
-#define MGC_M_RXCSR_DMAMODE 0x0800
-#define MGC_M_RXCSR_INCOMPRX 0x0100
-#define MGC_M_RXCSR_CLRDATATOG 0x0080
-#define MGC_M_RXCSR_FLUSHFIFO 0x0010
-#define MGC_M_RXCSR_DATAERROR 0x0008
-#define MGC_M_RXCSR_FIFOFULL 0x0002
-#define MGC_M_RXCSR_RXPKTRDY 0x0001
-
-/* RXCSR in Peripheral mode */
-#define MGC_M_RXCSR_P_ISO 0x4000
-#define MGC_M_RXCSR_P_SENTSTALL 0x0040
-#define MGC_M_RXCSR_P_SENDSTALL 0x0020
-#define MGC_M_RXCSR_P_OVERRUN 0x0004
-
-/* RXCSR in Host mode */
-#define MGC_M_RXCSR_H_AUTOREQ 0x4000
-#define MGC_M_RXCSR_H_WR_DATATOGGLE 0x0400
-#define MGC_M_RXCSR_H_DATATOGGLE 0x0200
-#define MGC_M_RXCSR_H_RXSTALL 0x0040
-#define MGC_M_RXCSR_H_REQPKT 0x0020
-#define MGC_M_RXCSR_H_ERROR 0x0004
-
-/* HUBADDR */
-#define MGC_M_HUBADDR_MULTI_TT 0x80
-
-/* ULPI: Added in HDRC 1.9(?) & MHDRC 1.4 */
-#define MGC_M_ULPI_VBCTL_USEEXTVBUSIND 0x02
-#define MGC_M_ULPI_VBCTL_USEEXTVBUS 0x01
-#define MGC_M_ULPI_REGCTL_INT_ENABLE 0x08
-#define MGC_M_ULPI_REGCTL_READNOTWRITE 0x04
-#define MGC_M_ULPI_REGCTL_COMPLETE 0x02
-#define MGC_M_ULPI_REGCTL_REG 0x01
-
-/* #define MUSB_DEBUG */
-
-#ifdef MUSB_DEBUG
-#define TRACE(fmt, ...) fprintf(stderr, "%s@%d: " fmt "\n", __func__, \
- __LINE__, ##__VA_ARGS__)
-#else
-#define TRACE(...)
-#endif
-
-
-static void musb_attach(USBPort *port);
-static void musb_detach(USBPort *port);
-static void musb_child_detach(USBPort *port, USBDevice *child);
-static void musb_schedule_cb(USBPort *port, USBPacket *p);
-static void musb_async_cancel_device(MUSBState *s, USBDevice *dev);
-
-static USBPortOps musb_port_ops = {
- .attach = musb_attach,
- .detach = musb_detach,
- .child_detach = musb_child_detach,
- .complete = musb_schedule_cb,
-};
-
-static USBBusOps musb_bus_ops = {
-};
-
-typedef struct MUSBPacket MUSBPacket;
-typedef struct MUSBEndPoint MUSBEndPoint;
-
-struct MUSBPacket {
- USBPacket p;
- MUSBEndPoint *ep;
- int dir;
-};
-
-struct MUSBEndPoint {
- uint16_t faddr[2];
- uint8_t haddr[2];
- uint8_t hport[2];
- uint16_t csr[2];
- uint16_t maxp[2];
- uint16_t rxcount;
- uint8_t type[2];
- uint8_t interval[2];
- uint8_t config;
- uint8_t fifosize;
- int timeout[2]; /* Always in microframes */
-
- uint8_t *buf[2];
- int fifolen[2];
- int fifostart[2];
- int fifoaddr[2];
- MUSBPacket packey[2];
- int status[2];
- int ext_size[2];
-
- /* For callbacks' use */
- int epnum;
- int interrupt[2];
- MUSBState *musb;
- USBCallback *delayed_cb[2];
- QEMUTimer *intv_timer[2];
-};
-
-struct MUSBState {
- qemu_irq irqs[musb_irq_max];
- USBBus bus;
- USBPort port;
-
- int idx;
- uint8_t devctl;
- uint8_t power;
- uint8_t faddr;
-
- uint8_t intr;
- uint8_t mask;
- uint16_t tx_intr;
- uint16_t tx_mask;
- uint16_t rx_intr;
- uint16_t rx_mask;
-
- int setup_len;
- int session;
-
- uint8_t buf[0x8000];
-
- /* Duplicating the world since 2008!... probably we should have 32
- * logical, single endpoints instead. */
- MUSBEndPoint ep[16];
-};
-
-void musb_reset(MUSBState *s)
-{
- int i;
-
- s->faddr = 0x00;
- s->devctl = 0;
- s->power = MGC_M_POWER_HSENAB;
- s->tx_intr = 0x0000;
- s->rx_intr = 0x0000;
- s->tx_mask = 0xffff;
- s->rx_mask = 0xffff;
- s->intr = 0x00;
- s->mask = 0x06;
- s->idx = 0;
-
- s->setup_len = 0;
- s->session = 0;
- memset(s->buf, 0, sizeof(s->buf));
-
- /* TODO: _DW */
- s->ep[0].config = MGC_M_CONFIGDATA_SOFTCONE | MGC_M_CONFIGDATA_DYNFIFO;
- for (i = 0; i < 16; i ++) {
- s->ep[i].fifosize = 64;
- s->ep[i].maxp[0] = 0x40;
- s->ep[i].maxp[1] = 0x40;
- s->ep[i].musb = s;
- s->ep[i].epnum = i;
- usb_packet_init(&s->ep[i].packey[0].p);
- usb_packet_init(&s->ep[i].packey[1].p);
- }
-}
-
-struct MUSBState *musb_init(DeviceState *parent_device, int gpio_base)
-{
- MUSBState *s = g_malloc0(sizeof(*s));
- int i;
-
- for (i = 0; i < musb_irq_max; i++) {
- s->irqs[i] = qdev_get_gpio_in(parent_device, gpio_base + i);
- }
-
- musb_reset(s);
-
- usb_bus_new(&s->bus, sizeof(s->bus), &musb_bus_ops, parent_device);
- usb_register_port(&s->bus, &s->port, s, 0, &musb_port_ops,
- USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL);
-
- return s;
-}
-
-static void musb_vbus_set(MUSBState *s, int level)
-{
- if (level)
- s->devctl |= 3 << MGC_S_DEVCTL_VBUS;
- else
- s->devctl &= ~MGC_M_DEVCTL_VBUS;
-
- qemu_set_irq(s->irqs[musb_set_vbus], level);
-}
-
-static void musb_intr_set(MUSBState *s, int line, int level)
-{
- if (!level) {
- s->intr &= ~(1 << line);
- qemu_irq_lower(s->irqs[line]);
- } else if (s->mask & (1 << line)) {
- s->intr |= 1 << line;
- qemu_irq_raise(s->irqs[line]);
- }
-}
-
-static void musb_tx_intr_set(MUSBState *s, int line, int level)
-{
- if (!level) {
- s->tx_intr &= ~(1 << line);
- if (!s->tx_intr)
- qemu_irq_lower(s->irqs[musb_irq_tx]);
- } else if (s->tx_mask & (1 << line)) {
- s->tx_intr |= 1 << line;
- qemu_irq_raise(s->irqs[musb_irq_tx]);
- }
-}
-
-static void musb_rx_intr_set(MUSBState *s, int line, int level)
-{
- if (line) {
- if (!level) {
- s->rx_intr &= ~(1 << line);
- if (!s->rx_intr)
- qemu_irq_lower(s->irqs[musb_irq_rx]);
- } else if (s->rx_mask & (1 << line)) {
- s->rx_intr |= 1 << line;
- qemu_irq_raise(s->irqs[musb_irq_rx]);
- }
- } else
- musb_tx_intr_set(s, line, level);
-}
-
-uint32_t musb_core_intr_get(MUSBState *s)
-{
- return (s->rx_intr << 15) | s->tx_intr;
-}
-
-void musb_core_intr_clear(MUSBState *s, uint32_t mask)
-{
- if (s->rx_intr) {
- s->rx_intr &= mask >> 15;
- if (!s->rx_intr)
- qemu_irq_lower(s->irqs[musb_irq_rx]);
- }
-
- if (s->tx_intr) {
- s->tx_intr &= mask & 0xffff;
- if (!s->tx_intr)
- qemu_irq_lower(s->irqs[musb_irq_tx]);
- }
-}
-
-void musb_set_size(MUSBState *s, int epnum, int size, int is_tx)
-{
- s->ep[epnum].ext_size[!is_tx] = size;
- s->ep[epnum].fifostart[0] = 0;
- s->ep[epnum].fifostart[1] = 0;
- s->ep[epnum].fifolen[0] = 0;
- s->ep[epnum].fifolen[1] = 0;
-}
-
-static void musb_session_update(MUSBState *s, int prev_dev, int prev_sess)
-{
- int detect_prev = prev_dev && prev_sess;
- int detect = !!s->port.dev && s->session;
-
- if (detect && !detect_prev) {
- /* Let's skip the ID pin sense and VBUS sense formalities and
- * and signal a successful SRP directly. This should work at least
- * for the Linux driver stack. */
- musb_intr_set(s, musb_irq_connect, 1);
-
- if (s->port.dev->speed == USB_SPEED_LOW) {
- s->devctl &= ~MGC_M_DEVCTL_FSDEV;
- s->devctl |= MGC_M_DEVCTL_LSDEV;
- } else {
- s->devctl |= MGC_M_DEVCTL_FSDEV;
- s->devctl &= ~MGC_M_DEVCTL_LSDEV;
- }
-
- /* A-mode? */
- s->devctl &= ~MGC_M_DEVCTL_BDEVICE;
-
- /* Host-mode bit? */
- s->devctl |= MGC_M_DEVCTL_HM;
-#if 1
- musb_vbus_set(s, 1);
-#endif
- } else if (!detect && detect_prev) {
-#if 1
- musb_vbus_set(s, 0);
-#endif
- }
-}
-
-/* Attach or detach a device on our only port. */
-static void musb_attach(USBPort *port)
-{
- MUSBState *s = (MUSBState *) port->opaque;
-
- musb_intr_set(s, musb_irq_vbus_request, 1);
- musb_session_update(s, 0, s->session);
-}
-
-static void musb_detach(USBPort *port)
-{
- MUSBState *s = (MUSBState *) port->opaque;
-
- musb_async_cancel_device(s, port->dev);
-
- musb_intr_set(s, musb_irq_disconnect, 1);
- musb_session_update(s, 1, s->session);
-}
-
-static void musb_child_detach(USBPort *port, USBDevice *child)
-{
- MUSBState *s = (MUSBState *) port->opaque;
-
- musb_async_cancel_device(s, child);
-}
-
-static void musb_cb_tick0(void *opaque)
-{
- MUSBEndPoint *ep = (MUSBEndPoint *) opaque;
-
- ep->delayed_cb[0](&ep->packey[0].p, opaque);
-}
-
-static void musb_cb_tick1(void *opaque)
-{
- MUSBEndPoint *ep = (MUSBEndPoint *) opaque;
-
- ep->delayed_cb[1](&ep->packey[1].p, opaque);
-}
-
-#define musb_cb_tick (dir ? musb_cb_tick1 : musb_cb_tick0)
-
-static void musb_schedule_cb(USBPort *port, USBPacket *packey)
-{
- MUSBPacket *p = container_of(packey, MUSBPacket, p);
- MUSBEndPoint *ep = p->ep;
- int dir = p->dir;
- int timeout = 0;
-
- if (ep->status[dir] == USB_RET_NAK)
- timeout = ep->timeout[dir];
- else if (ep->interrupt[dir])
- timeout = 8;
- else {
- musb_cb_tick(ep);
- return;
- }
-
- if (!ep->intv_timer[dir])
- ep->intv_timer[dir] = timer_new_ns(QEMU_CLOCK_VIRTUAL, musb_cb_tick, ep);
-
- timer_mod(ep->intv_timer[dir], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- muldiv64(timeout, NANOSECONDS_PER_SECOND, 8000));
-}
-
-static int musb_timeout(int ttype, int speed, int val)
-{
-#if 1
- return val << 3;
-#endif
-
- switch (ttype) {
- case USB_ENDPOINT_XFER_CONTROL:
- if (val < 2)
- return 0;
- else if (speed == USB_SPEED_HIGH)
- return 1 << (val - 1);
- else
- return 8 << (val - 1);
-
- case USB_ENDPOINT_XFER_INT:
- if (speed == USB_SPEED_HIGH)
- if (val < 2)
- return 0;
- else
- return 1 << (val - 1);
- else
- return val << 3;
-
- case USB_ENDPOINT_XFER_BULK:
- case USB_ENDPOINT_XFER_ISOC:
- if (val < 2)
- return 0;
- else if (speed == USB_SPEED_HIGH)
- return 1 << (val - 1);
- else
- return 8 << (val - 1);
- /* TODO: what with low-speed Bulk and Isochronous? */
- }
-
- hw_error("bad interval\n");
-}
-
-static void musb_packet(MUSBState *s, MUSBEndPoint *ep,
- int epnum, int pid, int len, USBCallback cb, int dir)
-{
- USBDevice *dev;
- USBEndpoint *uep;
- int idx = epnum && dir;
- int id;
- int ttype;
-
- /* ep->type[0,1] contains:
- * in bits 7:6 the speed (0 - invalid, 1 - high, 2 - full, 3 - slow)
- * in bits 5:4 the transfer type (BULK / INT)
- * in bits 3:0 the EP num
- */
- ttype = epnum ? (ep->type[idx] >> 4) & 3 : 0;
-
- ep->timeout[dir] = musb_timeout(ttype,
- ep->type[idx] >> 6, ep->interval[idx]);
- ep->interrupt[dir] = ttype == USB_ENDPOINT_XFER_INT;
- ep->delayed_cb[dir] = cb;
-
- /* A wild guess on the FADDR semantics... */
- dev = usb_find_device(&s->port, ep->faddr[idx]);
- if (dev == NULL) {
- return;
- }
- uep = usb_ep_get(dev, pid, ep->type[idx] & 0xf);
- id = pid | (dev->addr << 16) | (uep->nr << 8);
- usb_packet_setup(&ep->packey[dir].p, pid, uep, 0, id, false, true);
- usb_packet_addbuf(&ep->packey[dir].p, ep->buf[idx], len);
- ep->packey[dir].ep = ep;
- ep->packey[dir].dir = dir;
-
- usb_handle_packet(dev, &ep->packey[dir].p);
-
- if (ep->packey[dir].p.status == USB_RET_ASYNC) {
- usb_device_flush_ep_queue(dev, uep);
- ep->status[dir] = len;
- return;
- }
-
- if (ep->packey[dir].p.status == USB_RET_SUCCESS) {
- ep->status[dir] = ep->packey[dir].p.actual_length;
- } else {
- ep->status[dir] = ep->packey[dir].p.status;
- }
- musb_schedule_cb(&s->port, &ep->packey[dir].p);
-}
-
-static void musb_tx_packet_complete(USBPacket *packey, void *opaque)
-{
- /* Unfortunately we can't use packey->devep because that's the remote
- * endpoint number and may be different than our local. */
- MUSBEndPoint *ep = (MUSBEndPoint *) opaque;
- int epnum = ep->epnum;
- MUSBState *s = ep->musb;
-
- ep->fifostart[0] = 0;
- ep->fifolen[0] = 0;
-#ifdef CLEAR_NAK
- if (ep->status[0] != USB_RET_NAK) {
-#endif
- if (epnum)
- ep->csr[0] &= ~(MGC_M_TXCSR_FIFONOTEMPTY | MGC_M_TXCSR_TXPKTRDY);
- else
- ep->csr[0] &= ~MGC_M_CSR0_TXPKTRDY;
-#ifdef CLEAR_NAK
- }
-#endif
-
- /* Clear all of the error bits first */
- if (epnum)
- ep->csr[0] &= ~(MGC_M_TXCSR_H_ERROR | MGC_M_TXCSR_H_RXSTALL |
- MGC_M_TXCSR_H_NAKTIMEOUT);
- else
- ep->csr[0] &= ~(MGC_M_CSR0_H_ERROR | MGC_M_CSR0_H_RXSTALL |
- MGC_M_CSR0_H_NAKTIMEOUT | MGC_M_CSR0_H_NO_PING);
-
- if (ep->status[0] == USB_RET_STALL) {
- /* Command not supported by target! */
- ep->status[0] = 0;
-
- if (epnum)
- ep->csr[0] |= MGC_M_TXCSR_H_RXSTALL;
- else
- ep->csr[0] |= MGC_M_CSR0_H_RXSTALL;
- }
-
- if (ep->status[0] == USB_RET_NAK) {
- ep->status[0] = 0;
-
- /* NAK timeouts are only generated in Bulk transfers and
- * Data-errors in Isochronous. */
- if (ep->interrupt[0]) {
- return;
- }
-
- if (epnum)
- ep->csr[0] |= MGC_M_TXCSR_H_NAKTIMEOUT;
- else
- ep->csr[0] |= MGC_M_CSR0_H_NAKTIMEOUT;
- }
-
- if (ep->status[0] < 0) {
- if (ep->status[0] == USB_RET_BABBLE)
- musb_intr_set(s, musb_irq_rst_babble, 1);
-
- /* Pretend we've tried three times already and failed (in
- * case of USB_TOKEN_SETUP). */
- if (epnum)
- ep->csr[0] |= MGC_M_TXCSR_H_ERROR;
- else
- ep->csr[0] |= MGC_M_CSR0_H_ERROR;
-
- musb_tx_intr_set(s, epnum, 1);
- return;
- }
- /* TODO: check len for over/underruns of an OUT packet? */
-
-#ifdef SETUPLEN_HACK
- if (!epnum && ep->packey[0].pid == USB_TOKEN_SETUP)
- s->setup_len = ep->packey[0].data[6];
-#endif
-
- /* In DMA mode: if no error, assert DMA request for this EP,
- * and skip the interrupt. */
- musb_tx_intr_set(s, epnum, 1);
-}
-
-static void musb_rx_packet_complete(USBPacket *packey, void *opaque)
-{
- /* Unfortunately we can't use packey->devep because that's the remote
- * endpoint number and may be different than our local. */
- MUSBEndPoint *ep = (MUSBEndPoint *) opaque;
- int epnum = ep->epnum;
- MUSBState *s = ep->musb;
-
- ep->fifostart[1] = 0;
- ep->fifolen[1] = 0;
-
-#ifdef CLEAR_NAK
- if (ep->status[1] != USB_RET_NAK) {
-#endif
- ep->csr[1] &= ~MGC_M_RXCSR_H_REQPKT;
- if (!epnum)
- ep->csr[0] &= ~MGC_M_CSR0_H_REQPKT;
-#ifdef CLEAR_NAK
- }
-#endif
-
- /* Clear all of the imaginable error bits first */
- ep->csr[1] &= ~(MGC_M_RXCSR_H_ERROR | MGC_M_RXCSR_H_RXSTALL |
- MGC_M_RXCSR_DATAERROR);
- if (!epnum)
- ep->csr[0] &= ~(MGC_M_CSR0_H_ERROR | MGC_M_CSR0_H_RXSTALL |
- MGC_M_CSR0_H_NAKTIMEOUT | MGC_M_CSR0_H_NO_PING);
-
- if (ep->status[1] == USB_RET_STALL) {
- ep->status[1] = 0;
-
- ep->csr[1] |= MGC_M_RXCSR_H_RXSTALL;
- if (!epnum)
- ep->csr[0] |= MGC_M_CSR0_H_RXSTALL;
- }
-
- if (ep->status[1] == USB_RET_NAK) {
- ep->status[1] = 0;
-
- /* NAK timeouts are only generated in Bulk transfers and
- * Data-errors in Isochronous. */
- if (ep->interrupt[1]) {
- musb_packet(s, ep, epnum, USB_TOKEN_IN,
- packey->iov.size, musb_rx_packet_complete, 1);
- return;
- }
-
- ep->csr[1] |= MGC_M_RXCSR_DATAERROR;
- if (!epnum)
- ep->csr[0] |= MGC_M_CSR0_H_NAKTIMEOUT;
- }
-
- if (ep->status[1] < 0) {
- if (ep->status[1] == USB_RET_BABBLE) {
- musb_intr_set(s, musb_irq_rst_babble, 1);
- return;
- }
-
- /* Pretend we've tried three times already and failed (in
- * case of a control transfer). */
- ep->csr[1] |= MGC_M_RXCSR_H_ERROR;
- if (!epnum)
- ep->csr[0] |= MGC_M_CSR0_H_ERROR;
-
- musb_rx_intr_set(s, epnum, 1);
- return;
- }
- /* TODO: check len for over/underruns of an OUT packet? */
- /* TODO: perhaps make use of e->ext_size[1] here. */
-
- if (!(ep->csr[1] & (MGC_M_RXCSR_H_RXSTALL | MGC_M_RXCSR_DATAERROR))) {
- ep->csr[1] |= MGC_M_RXCSR_FIFOFULL | MGC_M_RXCSR_RXPKTRDY;
- if (!epnum)
- ep->csr[0] |= MGC_M_CSR0_RXPKTRDY;
-
- ep->rxcount = ep->status[1]; /* XXX: MIN(packey->len, ep->maxp[1]); */
- /* In DMA mode: assert DMA request for this EP */
- }
-
- /* Only if DMA has not been asserted */
- musb_rx_intr_set(s, epnum, 1);
-}
-
-static void musb_async_cancel_device(MUSBState *s, USBDevice *dev)
-{
- int ep, dir;
-
- for (ep = 0; ep < 16; ep++) {
- for (dir = 0; dir < 2; dir++) {
- if (!usb_packet_is_inflight(&s->ep[ep].packey[dir].p) ||
- s->ep[ep].packey[dir].p.ep->dev != dev) {
- continue;
- }
- usb_cancel_packet(&s->ep[ep].packey[dir].p);
- /* status updates needed here? */
- }
- }
-}
-
-static void musb_tx_rdy(MUSBState *s, int epnum)
-{
- MUSBEndPoint *ep = s->ep + epnum;
- int pid;
- int total, valid = 0;
- TRACE("start %d, len %d", ep->fifostart[0], ep->fifolen[0] );
- ep->fifostart[0] += ep->fifolen[0];
- ep->fifolen[0] = 0;
-
- /* XXX: how's the total size of the packet retrieved exactly in
- * the generic case? */
- total = ep->maxp[0] & 0x3ff;
-
- if (ep->ext_size[0]) {
- total = ep->ext_size[0];
- ep->ext_size[0] = 0;
- valid = 1;
- }
-
- /* If the packet is not fully ready yet, wait for a next segment. */
- if (epnum && (ep->fifostart[0]) < total)
- return;
-
- if (!valid)
- total = ep->fifostart[0];
-
- pid = USB_TOKEN_OUT;
- if (!epnum && (ep->csr[0] & MGC_M_CSR0_H_SETUPPKT)) {
- pid = USB_TOKEN_SETUP;
- if (total != 8) {
- TRACE("illegal SETUPPKT length of %i bytes", total);
- }
- /* Controller should retry SETUP packets three times on errors
- * but it doesn't make sense for us to do that. */
- }
-
- musb_packet(s, ep, epnum, pid, total, musb_tx_packet_complete, 0);
-}
-
-static void musb_rx_req(MUSBState *s, int epnum)
-{
- MUSBEndPoint *ep = s->ep + epnum;
- int total;
-
- /* If we already have a packet, which didn't fit into the
- * 64 bytes of the FIFO, only move the FIFO start and return. (Obsolete) */
- if (ep->packey[1].p.pid == USB_TOKEN_IN && ep->status[1] >= 0 &&
- (ep->fifostart[1]) + ep->rxcount <
- ep->packey[1].p.iov.size) {
- TRACE("0x%08x, %d", ep->fifostart[1], ep->rxcount );
- ep->fifostart[1] += ep->rxcount;
- ep->fifolen[1] = 0;
-
- ep->rxcount = MIN(ep->packey[0].p.iov.size - (ep->fifostart[1]),
- ep->maxp[1]);
-
- ep->csr[1] &= ~MGC_M_RXCSR_H_REQPKT;
- if (!epnum)
- ep->csr[0] &= ~MGC_M_CSR0_H_REQPKT;
-
- /* Clear all of the error bits first */
- ep->csr[1] &= ~(MGC_M_RXCSR_H_ERROR | MGC_M_RXCSR_H_RXSTALL |
- MGC_M_RXCSR_DATAERROR);
- if (!epnum)
- ep->csr[0] &= ~(MGC_M_CSR0_H_ERROR | MGC_M_CSR0_H_RXSTALL |
- MGC_M_CSR0_H_NAKTIMEOUT | MGC_M_CSR0_H_NO_PING);
-
- ep->csr[1] |= MGC_M_RXCSR_FIFOFULL | MGC_M_RXCSR_RXPKTRDY;
- if (!epnum)
- ep->csr[0] |= MGC_M_CSR0_RXPKTRDY;
- musb_rx_intr_set(s, epnum, 1);
- return;
- }
-
- /* The driver sets maxp[1] to 64 or less because it knows the hardware
- * FIFO is this deep. Bigger packets get split in
- * usb_generic_handle_packet but we can also do the splitting locally
- * for performance. It turns out we can also have a bigger FIFO and
- * ignore the limit set in ep->maxp[1]. The Linux MUSB driver deals
- * OK with single packets of even 32KB and we avoid splitting, however
- * usb_msd.c sometimes sends a packet bigger than what Linux expects
- * (e.g. 8192 bytes instead of 4096) and we get an OVERRUN. Splitting
- * hides this overrun from Linux. Up to 4096 everything is fine
- * though. Currently this is disabled.
- *
- * XXX: mind ep->fifosize. */
- total = MIN(ep->maxp[1] & 0x3ff, sizeof(s->buf));
-
-#ifdef SETUPLEN_HACK
- /* Why should *we* do that instead of Linux? */
- if (!epnum) {
- if (ep->packey[0].p.devaddr == 2) {
- total = MIN(s->setup_len, 8);
- } else {
- total = MIN(s->setup_len, 64);
- }
- s->setup_len -= total;
- }
-#endif
-
- musb_packet(s, ep, epnum, USB_TOKEN_IN, total, musb_rx_packet_complete, 1);
-}
-
-static uint8_t musb_read_fifo(MUSBEndPoint *ep)
-{
- uint8_t value;
- if (ep->fifolen[1] >= 64) {
- /* We have a FIFO underrun */
- TRACE("EP%d FIFO is now empty, stop reading", ep->epnum);
- return 0x00000000;
- }
- /* In DMA mode clear RXPKTRDY and set REQPKT automatically
- * (if AUTOREQ is set) */
-
- ep->csr[1] &= ~MGC_M_RXCSR_FIFOFULL;
- value=ep->buf[1][ep->fifostart[1] + ep->fifolen[1] ++];
- TRACE("EP%d 0x%02x, %d", ep->epnum, value, ep->fifolen[1] );
- return value;
-}
-
-static void musb_write_fifo(MUSBEndPoint *ep, uint8_t value)
-{
- TRACE("EP%d = %02x", ep->epnum, value);
- if (ep->fifolen[0] >= 64) {
- /* We have a FIFO overrun */
- TRACE("EP%d FIFO exceeded 64 bytes, stop feeding data", ep->epnum);
- return;
- }
-
- ep->buf[0][ep->fifostart[0] + ep->fifolen[0] ++] = value;
- ep->csr[0] |= MGC_M_TXCSR_FIFONOTEMPTY;
-}
-
-static void musb_ep_frame_cancel(MUSBEndPoint *ep, int dir)
-{
- if (ep->intv_timer[dir])
- timer_del(ep->intv_timer[dir]);
-}
-
-/* Bus control */
-static uint8_t musb_busctl_readb(void *opaque, int ep, int addr)
-{
- MUSBState *s = (MUSBState *) opaque;
-
- switch (addr) {
- /* For USB2.0 HS hubs only */
- case MUSB_HDRC_TXHUBADDR:
- return s->ep[ep].haddr[0];
- case MUSB_HDRC_TXHUBPORT:
- return s->ep[ep].hport[0];
- case MUSB_HDRC_RXHUBADDR:
- return s->ep[ep].haddr[1];
- case MUSB_HDRC_RXHUBPORT:
- return s->ep[ep].hport[1];
-
- default:
- TRACE("unknown register 0x%02x", addr);
- return 0x00;
- };
-}
-
-static void musb_busctl_writeb(void *opaque, int ep, int addr, uint8_t value)
-{
- MUSBState *s = (MUSBState *) opaque;
-
- switch (addr) {
- case MUSB_HDRC_TXFUNCADDR:
- s->ep[ep].faddr[0] = value;
- break;
- case MUSB_HDRC_RXFUNCADDR:
- s->ep[ep].faddr[1] = value;
- break;
- case MUSB_HDRC_TXHUBADDR:
- s->ep[ep].haddr[0] = value;
- break;
- case MUSB_HDRC_TXHUBPORT:
- s->ep[ep].hport[0] = value;
- break;
- case MUSB_HDRC_RXHUBADDR:
- s->ep[ep].haddr[1] = value;
- break;
- case MUSB_HDRC_RXHUBPORT:
- s->ep[ep].hport[1] = value;
- break;
-
- default:
- TRACE("unknown register 0x%02x", addr);
- break;
- };
-}
-
-static uint16_t musb_busctl_readh(void *opaque, int ep, int addr)
-{
- MUSBState *s = (MUSBState *) opaque;
-
- switch (addr) {
- case MUSB_HDRC_TXFUNCADDR:
- return s->ep[ep].faddr[0];
- case MUSB_HDRC_RXFUNCADDR:
- return s->ep[ep].faddr[1];
-
- default:
- return musb_busctl_readb(s, ep, addr) |
- (musb_busctl_readb(s, ep, addr | 1) << 8);
- };
-}
-
-static void musb_busctl_writeh(void *opaque, int ep, int addr, uint16_t value)
-{
- MUSBState *s = (MUSBState *) opaque;
-
- switch (addr) {
- case MUSB_HDRC_TXFUNCADDR:
- s->ep[ep].faddr[0] = value;
- break;
- case MUSB_HDRC_RXFUNCADDR:
- s->ep[ep].faddr[1] = value;
- break;
-
- default:
- musb_busctl_writeb(s, ep, addr, value & 0xff);
- musb_busctl_writeb(s, ep, addr | 1, value >> 8);
- };
-}
-
-/* Endpoint control */
-static uint8_t musb_ep_readb(void *opaque, int ep, int addr)
-{
- MUSBState *s = (MUSBState *) opaque;
-
- switch (addr) {
- case MUSB_HDRC_TXTYPE:
- return s->ep[ep].type[0];
- case MUSB_HDRC_TXINTERVAL:
- return s->ep[ep].interval[0];
- case MUSB_HDRC_RXTYPE:
- return s->ep[ep].type[1];
- case MUSB_HDRC_RXINTERVAL:
- return s->ep[ep].interval[1];
- case (MUSB_HDRC_FIFOSIZE & ~1):
- return 0x00;
- case MUSB_HDRC_FIFOSIZE:
- return ep ? s->ep[ep].fifosize : s->ep[ep].config;
- case MUSB_HDRC_RXCOUNT:
- return s->ep[ep].rxcount;
-
- default:
- TRACE("unknown register 0x%02x", addr);
- return 0x00;
- };
-}
-
-static void musb_ep_writeb(void *opaque, int ep, int addr, uint8_t value)
-{
- MUSBState *s = (MUSBState *) opaque;
-
- switch (addr) {
- case MUSB_HDRC_TXTYPE:
- s->ep[ep].type[0] = value;
- break;
- case MUSB_HDRC_TXINTERVAL:
- s->ep[ep].interval[0] = value;
- musb_ep_frame_cancel(&s->ep[ep], 0);
- break;
- case MUSB_HDRC_RXTYPE:
- s->ep[ep].type[1] = value;
- break;
- case MUSB_HDRC_RXINTERVAL:
- s->ep[ep].interval[1] = value;
- musb_ep_frame_cancel(&s->ep[ep], 1);
- break;
- case (MUSB_HDRC_FIFOSIZE & ~1):
- break;
- case MUSB_HDRC_FIFOSIZE:
- TRACE("somebody messes with fifosize (now %i bytes)", value);
- s->ep[ep].fifosize = value;
- break;
- default:
- TRACE("unknown register 0x%02x", addr);
- break;
- };
-}
-
-static uint16_t musb_ep_readh(void *opaque, int ep, int addr)
-{
- MUSBState *s = (MUSBState *) opaque;
- uint16_t ret;
-
- switch (addr) {
- case MUSB_HDRC_TXMAXP:
- return s->ep[ep].maxp[0];
- case MUSB_HDRC_TXCSR:
- return s->ep[ep].csr[0];
- case MUSB_HDRC_RXMAXP:
- return s->ep[ep].maxp[1];
- case MUSB_HDRC_RXCSR:
- ret = s->ep[ep].csr[1];
-
- /* TODO: This and other bits probably depend on
- * ep->csr[1] & MGC_M_RXCSR_AUTOCLEAR. */
- if (s->ep[ep].csr[1] & MGC_M_RXCSR_AUTOCLEAR)
- s->ep[ep].csr[1] &= ~MGC_M_RXCSR_RXPKTRDY;
-
- return ret;
- case MUSB_HDRC_RXCOUNT:
- return s->ep[ep].rxcount;
-
- default:
- return musb_ep_readb(s, ep, addr) |
- (musb_ep_readb(s, ep, addr | 1) << 8);
- };
-}
-
-static void musb_ep_writeh(void *opaque, int ep, int addr, uint16_t value)
-{
- MUSBState *s = (MUSBState *) opaque;
-
- switch (addr) {
- case MUSB_HDRC_TXMAXP:
- s->ep[ep].maxp[0] = value;
- break;
- case MUSB_HDRC_TXCSR:
- if (ep) {
- s->ep[ep].csr[0] &= value & 0xa6;
- s->ep[ep].csr[0] |= value & 0xff59;
- } else {
- s->ep[ep].csr[0] &= value & 0x85;
- s->ep[ep].csr[0] |= value & 0xf7a;
- }
-
- musb_ep_frame_cancel(&s->ep[ep], 0);
-
- if ((ep && (value & MGC_M_TXCSR_FLUSHFIFO)) ||
- (!ep && (value & MGC_M_CSR0_FLUSHFIFO))) {
- s->ep[ep].fifolen[0] = 0;
- s->ep[ep].fifostart[0] = 0;
- if (ep)
- s->ep[ep].csr[0] &=
- ~(MGC_M_TXCSR_FIFONOTEMPTY | MGC_M_TXCSR_TXPKTRDY);
- else
- s->ep[ep].csr[0] &=
- ~(MGC_M_CSR0_TXPKTRDY | MGC_M_CSR0_RXPKTRDY);
- }
- if (
- (ep &&
-#ifdef CLEAR_NAK
- (value & MGC_M_TXCSR_TXPKTRDY) &&
- !(value & MGC_M_TXCSR_H_NAKTIMEOUT)) ||
-#else
- (value & MGC_M_TXCSR_TXPKTRDY)) ||
-#endif
- (!ep &&
-#ifdef CLEAR_NAK
- (value & MGC_M_CSR0_TXPKTRDY) &&
- !(value & MGC_M_CSR0_H_NAKTIMEOUT)))
-#else
- (value & MGC_M_CSR0_TXPKTRDY)))
-#endif
- musb_tx_rdy(s, ep);
- if (!ep &&
- (value & MGC_M_CSR0_H_REQPKT) &&
-#ifdef CLEAR_NAK
- !(value & (MGC_M_CSR0_H_NAKTIMEOUT |
- MGC_M_CSR0_RXPKTRDY)))
-#else
- !(value & MGC_M_CSR0_RXPKTRDY))
-#endif
- musb_rx_req(s, ep);
- break;
-
- case MUSB_HDRC_RXMAXP:
- s->ep[ep].maxp[1] = value;
- break;
- case MUSB_HDRC_RXCSR:
- /* (DMA mode only) */
- if (
- (value & MGC_M_RXCSR_H_AUTOREQ) &&
- !(value & MGC_M_RXCSR_RXPKTRDY) &&
- (s->ep[ep].csr[1] & MGC_M_RXCSR_RXPKTRDY))
- value |= MGC_M_RXCSR_H_REQPKT;
-
- s->ep[ep].csr[1] &= 0x102 | (value & 0x4d);
- s->ep[ep].csr[1] |= value & 0xfeb0;
-
- musb_ep_frame_cancel(&s->ep[ep], 1);
-
- if (value & MGC_M_RXCSR_FLUSHFIFO) {
- s->ep[ep].fifolen[1] = 0;
- s->ep[ep].fifostart[1] = 0;
- s->ep[ep].csr[1] &= ~(MGC_M_RXCSR_FIFOFULL | MGC_M_RXCSR_RXPKTRDY);
- /* If double buffering and we have two packets ready, flush
- * only the first one and set up the fifo at the second packet. */
- }
-#ifdef CLEAR_NAK
- if ((value & MGC_M_RXCSR_H_REQPKT) && !(value & MGC_M_RXCSR_DATAERROR))
-#else
- if (value & MGC_M_RXCSR_H_REQPKT)
-#endif
- musb_rx_req(s, ep);
- break;
- case MUSB_HDRC_RXCOUNT:
- s->ep[ep].rxcount = value;
- break;
-
- default:
- musb_ep_writeb(s, ep, addr, value & 0xff);
- musb_ep_writeb(s, ep, addr | 1, value >> 8);
- };
-}
-
-/* Generic control */
-static uint32_t musb_readb(void *opaque, hwaddr addr)
-{
- MUSBState *s = (MUSBState *) opaque;
- int ep, i;
- uint8_t ret;
-
- switch (addr) {
- case MUSB_HDRC_FADDR:
- return s->faddr;
- case MUSB_HDRC_POWER:
- return s->power;
- case MUSB_HDRC_INTRUSB:
- ret = s->intr;
- for (i = 0; i < sizeof(ret) * 8; i ++)
- if (ret & (1 << i))
- musb_intr_set(s, i, 0);
- return ret;
- case MUSB_HDRC_INTRUSBE:
- return s->mask;
- case MUSB_HDRC_INDEX:
- return s->idx;
- case MUSB_HDRC_TESTMODE:
- return 0x00;
-
- case MUSB_HDRC_EP_IDX ... (MUSB_HDRC_EP_IDX + 0xf):
- return musb_ep_readb(s, s->idx, addr & 0xf);
-
- case MUSB_HDRC_DEVCTL:
- return s->devctl;
-
- case MUSB_HDRC_TXFIFOSZ:
- case MUSB_HDRC_RXFIFOSZ:
- case MUSB_HDRC_VCTRL:
- /* TODO */
- return 0x00;
-
- case MUSB_HDRC_HWVERS:
- return (1 << 10) | 400;
-
- case (MUSB_HDRC_VCTRL | 1):
- case (MUSB_HDRC_HWVERS | 1):
- case (MUSB_HDRC_DEVCTL | 1):
- return 0x00;
-
- case MUSB_HDRC_BUSCTL ... (MUSB_HDRC_BUSCTL + 0x7f):
- ep = (addr >> 3) & 0xf;
- return musb_busctl_readb(s, ep, addr & 0x7);
-
- case MUSB_HDRC_EP ... (MUSB_HDRC_EP + 0xff):
- ep = (addr >> 4) & 0xf;
- return musb_ep_readb(s, ep, addr & 0xf);
-
- case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f):
- ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf;
- return musb_read_fifo(s->ep + ep);
-
- default:
- TRACE("unknown register 0x%02x", (int) addr);
- return 0x00;
- };
-}
-
-static void musb_writeb(void *opaque, hwaddr addr, uint32_t value)
-{
- MUSBState *s = (MUSBState *) opaque;
- int ep;
-
- switch (addr) {
- case MUSB_HDRC_FADDR:
- s->faddr = value & 0x7f;
- break;
- case MUSB_HDRC_POWER:
- s->power = (value & 0xef) | (s->power & 0x10);
- /* MGC_M_POWER_RESET is also read-only in Peripheral Mode */
- if ((value & MGC_M_POWER_RESET) && s->port.dev) {
- usb_device_reset(s->port.dev);
- /* Negotiate high-speed operation if MGC_M_POWER_HSENAB is set. */
- if ((value & MGC_M_POWER_HSENAB) &&
- s->port.dev->speed == USB_SPEED_HIGH)
- s->power |= MGC_M_POWER_HSMODE; /* Success */
- /* Restart frame counting. */
- }
- if (value & MGC_M_POWER_SUSPENDM) {
- /* When all transfers finish, suspend and if MGC_M_POWER_ENSUSPEND
- * is set, also go into low power mode. Frame counting stops. */
- /* XXX: Cleared when the interrupt register is read */
- }
- if (value & MGC_M_POWER_RESUME) {
- /* Wait 20ms and signal resuming on the bus. Frame counting
- * restarts. */
- }
- break;
- case MUSB_HDRC_INTRUSB:
- break;
- case MUSB_HDRC_INTRUSBE:
- s->mask = value & 0xff;
- break;
- case MUSB_HDRC_INDEX:
- s->idx = value & 0xf;
- break;
- case MUSB_HDRC_TESTMODE:
- break;
-
- case MUSB_HDRC_EP_IDX ... (MUSB_HDRC_EP_IDX + 0xf):
- musb_ep_writeb(s, s->idx, addr & 0xf, value);
- break;
-
- case MUSB_HDRC_DEVCTL:
- s->session = !!(value & MGC_M_DEVCTL_SESSION);
- musb_session_update(s,
- !!s->port.dev,
- !!(s->devctl & MGC_M_DEVCTL_SESSION));
-
- /* It seems this is the only R/W bit in this register? */
- s->devctl &= ~MGC_M_DEVCTL_SESSION;
- s->devctl |= value & MGC_M_DEVCTL_SESSION;
- break;
-
- case MUSB_HDRC_TXFIFOSZ:
- case MUSB_HDRC_RXFIFOSZ:
- case MUSB_HDRC_VCTRL:
- /* TODO */
- break;
-
- case (MUSB_HDRC_VCTRL | 1):
- case (MUSB_HDRC_DEVCTL | 1):
- break;
-
- case MUSB_HDRC_BUSCTL ... (MUSB_HDRC_BUSCTL + 0x7f):
- ep = (addr >> 3) & 0xf;
- musb_busctl_writeb(s, ep, addr & 0x7, value);
- break;
-
- case MUSB_HDRC_EP ... (MUSB_HDRC_EP + 0xff):
- ep = (addr >> 4) & 0xf;
- musb_ep_writeb(s, ep, addr & 0xf, value);
- break;
-
- case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f):
- ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf;
- musb_write_fifo(s->ep + ep, value & 0xff);
- break;
-
- default:
- TRACE("unknown register 0x%02x", (int) addr);
- break;
- };
-}
-
-static uint32_t musb_readh(void *opaque, hwaddr addr)
-{
- MUSBState *s = (MUSBState *) opaque;
- int ep, i;
- uint16_t ret;
-
- switch (addr) {
- case MUSB_HDRC_INTRTX:
- ret = s->tx_intr;
- /* Auto clear */
- for (i = 0; i < sizeof(ret) * 8; i ++)
- if (ret & (1 << i))
- musb_tx_intr_set(s, i, 0);
- return ret;
- case MUSB_HDRC_INTRRX:
- ret = s->rx_intr;
- /* Auto clear */
- for (i = 0; i < sizeof(ret) * 8; i ++)
- if (ret & (1 << i))
- musb_rx_intr_set(s, i, 0);
- return ret;
- case MUSB_HDRC_INTRTXE:
- return s->tx_mask;
- case MUSB_HDRC_INTRRXE:
- return s->rx_mask;
-
- case MUSB_HDRC_FRAME:
- /* TODO */
- return 0x0000;
- case MUSB_HDRC_TXFIFOADDR:
- return s->ep[s->idx].fifoaddr[0];
- case MUSB_HDRC_RXFIFOADDR:
- return s->ep[s->idx].fifoaddr[1];
-
- case MUSB_HDRC_EP_IDX ... (MUSB_HDRC_EP_IDX + 0xf):
- return musb_ep_readh(s, s->idx, addr & 0xf);
-
- case MUSB_HDRC_BUSCTL ... (MUSB_HDRC_BUSCTL + 0x7f):
- ep = (addr >> 3) & 0xf;
- return musb_busctl_readh(s, ep, addr & 0x7);
-
- case MUSB_HDRC_EP ... (MUSB_HDRC_EP + 0xff):
- ep = (addr >> 4) & 0xf;
- return musb_ep_readh(s, ep, addr & 0xf);
-
- case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f):
- ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf;
- return (musb_read_fifo(s->ep + ep) | musb_read_fifo(s->ep + ep) << 8);
-
- default:
- return musb_readb(s, addr) | (musb_readb(s, addr | 1) << 8);
- };
-}
-
-static void musb_writeh(void *opaque, hwaddr addr, uint32_t value)
-{
- MUSBState *s = (MUSBState *) opaque;
- int ep;
-
- switch (addr) {
- case MUSB_HDRC_INTRTXE:
- s->tx_mask = value;
- /* XXX: the masks seem to apply on the raising edge like with
- * edge-triggered interrupts, thus no need to update. I may be
- * wrong though. */
- break;
- case MUSB_HDRC_INTRRXE:
- s->rx_mask = value;
- break;
-
- case MUSB_HDRC_FRAME:
- /* TODO */
- break;
- case MUSB_HDRC_TXFIFOADDR:
- s->ep[s->idx].fifoaddr[0] = value;
- s->ep[s->idx].buf[0] =
- s->buf + ((value << 3) & 0x7ff );
- break;
- case MUSB_HDRC_RXFIFOADDR:
- s->ep[s->idx].fifoaddr[1] = value;
- s->ep[s->idx].buf[1] =
- s->buf + ((value << 3) & 0x7ff);
- break;
-
- case MUSB_HDRC_EP_IDX ... (MUSB_HDRC_EP_IDX + 0xf):
- musb_ep_writeh(s, s->idx, addr & 0xf, value);
- break;
-
- case MUSB_HDRC_BUSCTL ... (MUSB_HDRC_BUSCTL + 0x7f):
- ep = (addr >> 3) & 0xf;
- musb_busctl_writeh(s, ep, addr & 0x7, value);
- break;
-
- case MUSB_HDRC_EP ... (MUSB_HDRC_EP + 0xff):
- ep = (addr >> 4) & 0xf;
- musb_ep_writeh(s, ep, addr & 0xf, value);
- break;
-
- case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f):
- ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf;
- musb_write_fifo(s->ep + ep, value & 0xff);
- musb_write_fifo(s->ep + ep, (value >> 8) & 0xff);
- break;
-
- default:
- musb_writeb(s, addr, value & 0xff);
- musb_writeb(s, addr | 1, value >> 8);
- };
-}
-
-static uint32_t musb_readw(void *opaque, hwaddr addr)
-{
- MUSBState *s = (MUSBState *) opaque;
- int ep;
-
- switch (addr) {
- case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f):
- ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf;
- return ( musb_read_fifo(s->ep + ep) |
- musb_read_fifo(s->ep + ep) << 8 |
- musb_read_fifo(s->ep + ep) << 16 |
- musb_read_fifo(s->ep + ep) << 24 );
- default:
- TRACE("unknown register 0x%02x", (int) addr);
- return 0x00000000;
- };
-}
-
-static void musb_writew(void *opaque, hwaddr addr, uint32_t value)
-{
- MUSBState *s = (MUSBState *) opaque;
- int ep;
-
- switch (addr) {
- case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f):
- ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf;
- musb_write_fifo(s->ep + ep, value & 0xff);
- musb_write_fifo(s->ep + ep, (value >> 8 ) & 0xff);
- musb_write_fifo(s->ep + ep, (value >> 16) & 0xff);
- musb_write_fifo(s->ep + ep, (value >> 24) & 0xff);
- break;
- default:
- TRACE("unknown register 0x%02x", (int) addr);
- break;
- };
-}
-
-MUSBReadFunc * const musb_read[] = {
- musb_readb,
- musb_readh,
- musb_readw,
-};
-
-MUSBWriteFunc * const musb_write[] = {
- musb_writeb,
- musb_writeh,
- musb_writew,
-};
diff --git a/hw/usb/hcd-ohci-pci.c b/hw/usb/hcd-ohci-pci.c
index 33ed9b6..94d1077 100644
--- a/hw/usb/hcd-ohci-pci.c
+++ b/hw/usb/hcd-ohci-pci.c
@@ -109,11 +109,10 @@ static void usb_ohci_reset_pci(DeviceState *d)
ohci_hard_reset(s);
}
-static Property ohci_pci_properties[] = {
+static const Property ohci_pci_properties[] = {
DEFINE_PROP_STRING("masterbus", OHCIPCIState, masterbus),
DEFINE_PROP_UINT32("num-ports", OHCIPCIState, num_ports, 3),
DEFINE_PROP_UINT32("firstport", OHCIPCIState, firstport, 0),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_ohci = {
@@ -127,7 +126,7 @@ static const VMStateDescription vmstate_ohci = {
}
};
-static void ohci_pci_class_init(ObjectClass *klass, void *data)
+static void ohci_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -142,7 +141,7 @@ static void ohci_pci_class_init(ObjectClass *klass, void *data)
device_class_set_props(dc, ohci_pci_properties);
dc->hotpluggable = false;
dc->vmsd = &vmstate_ohci;
- dc->reset = usb_ohci_reset_pci;
+ device_class_set_legacy_reset(dc, usb_ohci_reset_pci);
}
static const TypeInfo ohci_pci_info = {
@@ -150,7 +149,7 @@ static const TypeInfo ohci_pci_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(OHCIPCIState),
.class_init = ohci_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/usb/hcd-ohci-sysbus.c b/hw/usb/hcd-ohci-sysbus.c
index 6fba7f5..3fc6cce 100644
--- a/hw/usb/hcd-ohci-sysbus.c
+++ b/hw/usb/hcd-ohci-sysbus.c
@@ -57,15 +57,14 @@ static void ohci_sysbus_reset(DeviceState *dev)
ohci_hard_reset(ohci);
}
-static Property ohci_sysbus_properties[] = {
+static const Property ohci_sysbus_properties[] = {
DEFINE_PROP_STRING("masterbus", OHCISysBusState, masterbus),
DEFINE_PROP_UINT32("num-ports", OHCISysBusState, num_ports, 3),
DEFINE_PROP_UINT32("firstport", OHCISysBusState, firstport, 0),
DEFINE_PROP_DMAADDR("dma-offset", OHCISysBusState, dma_offset, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void ohci_sysbus_class_init(ObjectClass *klass, void *data)
+static void ohci_sysbus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -73,7 +72,7 @@ static void ohci_sysbus_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_USB, dc->categories);
dc->desc = "OHCI USB Controller";
device_class_set_props(dc, ohci_sysbus_properties);
- dc->reset = ohci_sysbus_reset;
+ device_class_set_legacy_reset(dc, ohci_sysbus_reset);
}
static const TypeInfo ohci_sysbus_types[] = {
diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
index 71b5491..72a9f9f 100644
--- a/hw/usb/hcd-ohci.c
+++ b/hw/usb/hcd-ohci.c
@@ -577,7 +577,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed)
USBDevice *dev;
USBEndpoint *ep;
USBPacket *pkt;
- uint8_t buf[8192];
+ QEMU_UNINITIALIZED uint8_t buf[8192];
bool int_req;
struct ohci_iso_td iso_td;
uint32_t addr;
diff --git a/hw/usb/hcd-ohci.h b/hw/usb/hcd-ohci.h
index e182722..3cc35a5 100644
--- a/hw/usb/hcd-ohci.h
+++ b/hw/usb/hcd-ohci.h
@@ -22,7 +22,7 @@
#define HCD_OHCI_H
#include "hw/sysbus.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/usb.h"
#include "qom/object.h"
diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c
index a03cf22..4822c70 100644
--- a/hw/usb/hcd-uhci.c
+++ b/hw/usb/hcd-uhci.c
@@ -36,7 +36,7 @@
#include "qapi/error.h"
#include "qemu/timer.h"
#include "qemu/iov.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "trace.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
@@ -67,7 +67,7 @@ struct UHCIPCIDeviceClass {
UHCIInfo info;
};
-/*
+/*
* Pending async transaction.
* 'packet' must be the first field because completion
* handler does "(UHCIAsync *) pkt" cast.
@@ -220,8 +220,9 @@ static void uhci_async_cancel(UHCIAsync *async)
uhci_async_unlink(async);
trace_usb_uhci_packet_cancel(async->queue->token, async->td_addr,
async->done);
- if (!async->done)
+ if (!async->done) {
usb_cancel_packet(&async->packet);
+ }
uhci_async_free(async);
}
@@ -322,7 +323,7 @@ static void uhci_reset(DeviceState *dev)
s->fl_base_addr = 0;
s->sof_timing = 64;
- for(i = 0; i < UHCI_PORTS; i++) {
+ for (i = 0; i < UHCI_PORTS; i++) {
port = &s->ports[i];
port->ctrl = 0x0080;
if (port->port.dev && port->port.dev->attached) {
@@ -387,8 +388,8 @@ static void uhci_port_write(void *opaque, hwaddr addr,
trace_usb_uhci_mmio_writew(addr, val);
- switch(addr) {
- case 0x00:
+ switch (addr) {
+ case UHCI_USBCMD:
if ((val & UHCI_CMD_RS) && !(s->cmd & UHCI_CMD_RS)) {
/* start frame processing */
trace_usb_uhci_schedule_start();
@@ -404,7 +405,7 @@ static void uhci_port_write(void *opaque, hwaddr addr,
int i;
/* send reset on the USB bus */
- for(i = 0; i < UHCI_PORTS; i++) {
+ for (i = 0; i < UHCI_PORTS; i++) {
port = &s->ports[i];
usb_device_reset(port->port.dev);
}
@@ -423,34 +424,38 @@ static void uhci_port_write(void *opaque, hwaddr addr,
}
}
break;
- case 0x02:
+ case UHCI_USBSTS:
s->status &= ~val;
- /* XXX: the chip spec is not coherent, so we add a hidden
- register to distinguish between IOC and SPD */
- if (val & UHCI_STS_USBINT)
+ /*
+ * XXX: the chip spec is not coherent, so we add a hidden
+ * register to distinguish between IOC and SPD
+ */
+ if (val & UHCI_STS_USBINT) {
s->status2 = 0;
+ }
uhci_update_irq(s);
break;
- case 0x04:
+ case UHCI_USBINTR:
s->intr = val;
uhci_update_irq(s);
break;
- case 0x06:
- if (s->status & UHCI_STS_HCHALTED)
+ case UHCI_USBFRNUM:
+ if (s->status & UHCI_STS_HCHALTED) {
s->frnum = val & 0x7ff;
+ }
break;
- case 0x08:
+ case UHCI_USBFLBASEADD:
s->fl_base_addr &= 0xffff0000;
s->fl_base_addr |= val & ~0xfff;
break;
- case 0x0a:
+ case UHCI_USBFLBASEADD + 2:
s->fl_base_addr &= 0x0000ffff;
s->fl_base_addr |= (val << 16);
break;
- case 0x0c:
+ case UHCI_USBSOF:
s->sof_timing = val & 0xff;
break;
- case 0x10 ... 0x1f:
+ case UHCI_USBPORTSC1 ... UHCI_USBPORTSC4:
{
UHCIPort *port;
USBDevice *dev;
@@ -464,8 +469,8 @@ static void uhci_port_write(void *opaque, hwaddr addr,
dev = port->port.dev;
if (dev && dev->attached) {
/* port reset */
- if ( (val & UHCI_PORT_RESET) &&
- !(port->ctrl & UHCI_PORT_RESET) ) {
+ if ((val & UHCI_PORT_RESET) &&
+ !(port->ctrl & UHCI_PORT_RESET)) {
usb_device_reset(dev);
}
}
@@ -487,29 +492,29 @@ static uint64_t uhci_port_read(void *opaque, hwaddr addr, unsigned size)
UHCIState *s = opaque;
uint32_t val;
- switch(addr) {
- case 0x00:
+ switch (addr) {
+ case UHCI_USBCMD:
val = s->cmd;
break;
- case 0x02:
+ case UHCI_USBSTS:
val = s->status;
break;
- case 0x04:
+ case UHCI_USBINTR:
val = s->intr;
break;
- case 0x06:
+ case UHCI_USBFRNUM:
val = s->frnum;
break;
- case 0x08:
+ case UHCI_USBFLBASEADD:
val = s->fl_base_addr & 0xffff;
break;
- case 0x0a:
+ case UHCI_USBFLBASEADD + 2:
val = (s->fl_base_addr >> 16) & 0xffff;
break;
- case 0x0c:
+ case UHCI_USBSOF:
val = s->sof_timing;
break;
- case 0x10 ... 0x1f:
+ case UHCI_USBPORTSC1 ... UHCI_USBPORTSC4:
{
UHCIPort *port;
int n;
@@ -533,12 +538,13 @@ static uint64_t uhci_port_read(void *opaque, hwaddr addr, unsigned size)
}
/* signal resume if controller suspended */
-static void uhci_resume (void *opaque)
+static void uhci_resume(void *opaque)
{
UHCIState *s = (UHCIState *)opaque;
- if (!s)
+ if (!s) {
return;
+ }
if (s->cmd & UHCI_CMD_EGSM) {
s->cmd |= UHCI_CMD_FGR;
@@ -674,7 +680,8 @@ static int uhci_handle_td_error(UHCIState *s, UHCI_TD *td, uint32_t td_addr,
return ret;
}
-static int uhci_complete_td(UHCIState *s, UHCI_TD *td, UHCIAsync *async, uint32_t *int_mask)
+static int uhci_complete_td(UHCIState *s, UHCI_TD *td, UHCIAsync *async,
+ uint32_t *int_mask)
{
int len = 0, max_len;
uint8_t pid;
@@ -682,8 +689,9 @@ static int uhci_complete_td(UHCIState *s, UHCI_TD *td, UHCIAsync *async, uint32_
max_len = ((td->token >> 21) + 1) & 0x7ff;
pid = td->token & 0xff;
- if (td->ctrl & TD_CTRL_IOS)
+ if (td->ctrl & TD_CTRL_IOS) {
td->ctrl &= ~TD_CTRL_ACTIVE;
+ }
if (async->packet.status != USB_RET_SUCCESS) {
return uhci_handle_td_error(s, td, async->td_addr,
@@ -693,12 +701,15 @@ static int uhci_complete_td(UHCIState *s, UHCI_TD *td, UHCIAsync *async, uint32_
len = async->packet.actual_length;
td->ctrl = (td->ctrl & ~0x7ff) | ((len - 1) & 0x7ff);
- /* The NAK bit may have been set by a previous frame, so clear it
- here. The docs are somewhat unclear, but win2k relies on this
- behavior. */
+ /*
+ * The NAK bit may have been set by a previous frame, so clear it
+ * here. The docs are somewhat unclear, but win2k relies on this
+ * behavior.
+ */
td->ctrl &= ~(TD_CTRL_ACTIVE | TD_CTRL_NAK);
- if (td->ctrl & TD_CTRL_IOC)
+ if (td->ctrl & TD_CTRL_IOC) {
*int_mask |= 0x01;
+ }
if (pid == USB_TOKEN_IN) {
pci_dma_write(&s->dev, td->buffer, async->buf, len);
@@ -780,9 +791,11 @@ static int uhci_handle_td(UHCIState *s, UHCIQueue *q, uint32_t qh_addr,
if (async) {
if (queuing) {
- /* we are busy filling the queue, we are not prepared
- to consume completed packages then, just leave them
- in async state */
+ /*
+ * we are busy filling the queue, we are not prepared
+ * to consume completed packages then, just leave them
+ * in async state
+ */
return TD_RESULT_ASYNC_CONT;
}
if (!async->done) {
@@ -832,7 +845,7 @@ static int uhci_handle_td(UHCIState *s, UHCIQueue *q, uint32_t qh_addr,
}
usb_packet_addbuf(&async->packet, async->buf, max_len);
- switch(pid) {
+ switch (pid) {
case USB_TOKEN_OUT:
case USB_TOKEN_SETUP:
pci_dma_read(&s->dev, td->buffer, async->buf, max_len);
@@ -911,12 +924,15 @@ static void qhdb_reset(QhDb *db)
static int qhdb_insert(QhDb *db, uint32_t addr)
{
int i;
- for (i = 0; i < db->count; i++)
- if (db->addr[i] == addr)
+ for (i = 0; i < db->count; i++) {
+ if (db->addr[i] == addr) {
return 1;
+ }
+ }
- if (db->count >= UHCI_MAX_QUEUES)
+ if (db->count >= UHCI_MAX_QUEUES) {
return 1;
+ }
db->addr[db->count++] = addr;
return 0;
@@ -970,8 +986,10 @@ static void uhci_process_frame(UHCIState *s)
for (cnt = FRAME_MAX_LOOPS; is_valid(link) && cnt; cnt--) {
if (!s->completions_only && s->frame_bytes >= s->frame_bandwidth) {
- /* We've reached the usb 1.1 bandwidth, which is
- 1280 bytes/frame, stop processing */
+ /*
+ * We've reached the usb 1.1 bandwidth, which is
+ * 1280 bytes/frame, stop processing
+ */
trace_usb_uhci_frame_stop_bandwidth();
break;
}
@@ -1120,8 +1138,10 @@ static void uhci_frame_timer(void *opaque)
uhci_async_validate_begin(s);
uhci_process_frame(s);
uhci_async_validate_end(s);
- /* The spec says frnum is the frame currently being processed, and
- * the guest must look at frnum - 1 on interrupt, so inc frnum now */
+ /*
+ * The spec says frnum is the frame currently being processed, and
+ * the guest must look at frnum - 1 on interrupt, so inc frnum now
+ */
s->frnum = (s->frnum + 1) & 0x7ff;
s->expire_time += frame_t;
}
@@ -1174,7 +1194,7 @@ void usb_uhci_common_realize(PCIDevice *dev, Error **errp)
if (s->masterbus) {
USBPort *ports[UHCI_PORTS];
- for(i = 0; i < UHCI_PORTS; i++) {
+ for (i = 0; i < UHCI_PORTS; i++) {
ports[i] = &s->ports[i].port;
}
usb_register_companion(s->masterbus, ports, UHCI_PORTS,
@@ -1200,8 +1220,10 @@ void usb_uhci_common_realize(PCIDevice *dev, Error **errp)
memory_region_init_io(&s->io_bar, OBJECT(s), &uhci_ioport_ops, s,
"uhci", 0x20);
- /* Use region 4 for consistency with real hardware. BSD guests seem
- to rely on this. */
+ /*
+ * Use region 4 for consistency with real hardware. BSD guests seem
+ * to rely on this.
+ */
pci_register_bar(&s->dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &s->io_bar);
}
@@ -1227,27 +1249,25 @@ static void usb_uhci_exit(PCIDevice *dev)
}
}
-static Property uhci_properties_companion[] = {
+static const Property uhci_properties_companion[] = {
DEFINE_PROP_STRING("masterbus", UHCIState, masterbus),
DEFINE_PROP_UINT32("firstport", UHCIState, firstport, 0),
DEFINE_PROP_UINT32("bandwidth", UHCIState, frame_bandwidth, 1280),
DEFINE_PROP_UINT32("maxframes", UHCIState, maxframes, 128),
- DEFINE_PROP_END_OF_LIST(),
};
-static Property uhci_properties_standalone[] = {
+static const Property uhci_properties_standalone[] = {
DEFINE_PROP_UINT32("bandwidth", UHCIState, frame_bandwidth, 1280),
DEFINE_PROP_UINT32("maxframes", UHCIState, maxframes, 128),
- DEFINE_PROP_END_OF_LIST(),
};
-static void uhci_class_init(ObjectClass *klass, void *data)
+static void uhci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->class_id = PCI_CLASS_SERIAL_USB;
dc->vmsd = &vmstate_uhci;
- dc->reset = uhci_reset;
+ device_class_set_legacy_reset(dc, uhci_reset);
set_bit(DEVICE_CATEGORY_USB, dc->categories);
}
@@ -1258,18 +1278,18 @@ static const TypeInfo uhci_pci_type_info = {
.class_size = sizeof(UHCIPCIDeviceClass),
.abstract = true,
.class_init = uhci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-void uhci_data_class_init(ObjectClass *klass, void *data)
+void uhci_data_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
UHCIPCIDeviceClass *u = UHCI_CLASS(klass);
- UHCIInfo *info = data;
+ const UHCIInfo *info = data;
k->realize = info->realize ? info->realize : usb_uhci_common_realize;
k->exit = info->unplug ? usb_uhci_exit : NULL;
@@ -1362,7 +1382,7 @@ static void uhci_register_types(void)
for (i = 0; i < ARRAY_SIZE(uhci_info); i++) {
uhci_type_info.name = uhci_info[i].name;
uhci_type_info.class_data = uhci_info + i;
- type_register(&uhci_type_info);
+ type_register_static(&uhci_type_info);
}
}
diff --git a/hw/usb/hcd-uhci.h b/hw/usb/hcd-uhci.h
index 6d26b94..e0a6525 100644
--- a/hw/usb/hcd-uhci.h
+++ b/hw/usb/hcd-uhci.h
@@ -28,7 +28,7 @@
#ifndef HW_USB_HCD_UHCI_H
#define HW_USB_HCD_UHCI_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qemu/timer.h"
#include "hw/pci/pci_device.h"
#include "hw/usb.h"
@@ -88,7 +88,7 @@ typedef struct UHCIInfo {
bool notuser; /* disallow user_creatable */
} UHCIInfo;
-void uhci_data_class_init(ObjectClass *klass, void *data);
+void uhci_data_class_init(ObjectClass *klass, const void *data);
void usb_uhci_common_realize(PCIDevice *dev, Error **errp);
#define TYPE_PIIX3_USB_UHCI "piix3-usb-uhci"
diff --git a/hw/usb/hcd-xhci-nec.c b/hw/usb/hcd-xhci-nec.c
index 0c063b3..9e0fea2 100644
--- a/hw/usb/hcd-xhci-nec.c
+++ b/hw/usb/hcd-xhci-nec.c
@@ -30,20 +30,15 @@
OBJECT_DECLARE_SIMPLE_TYPE(XHCINecState, NEC_XHCI)
struct XHCINecState {
- /*< private >*/
XHCIPciState parent_obj;
- /*< public >*/
- uint32_t flags;
+
uint32_t intrs;
uint32_t slots;
};
-static Property nec_xhci_properties[] = {
- DEFINE_PROP_ON_OFF_AUTO("msi", XHCIPciState, msi, ON_OFF_AUTO_AUTO),
- DEFINE_PROP_ON_OFF_AUTO("msix", XHCIPciState, msix, ON_OFF_AUTO_AUTO),
+static const Property nec_xhci_properties[] = {
DEFINE_PROP_UINT32("intrs", XHCINecState, intrs, XHCI_MAXINTRS),
DEFINE_PROP_UINT32("slots", XHCINecState, slots, XHCI_MAXSLOTS),
- DEFINE_PROP_END_OF_LIST(),
};
static void nec_xhci_instance_init(Object *obj)
@@ -51,12 +46,11 @@ static void nec_xhci_instance_init(Object *obj)
XHCIPciState *pci = XHCI_PCI(obj);
XHCINecState *nec = NEC_XHCI(obj);
- pci->xhci.flags = nec->flags;
pci->xhci.numintrs = nec->intrs;
pci->xhci.numslots = nec->slots;
}
-static void nec_xhci_class_init(ObjectClass *klass, void *data)
+static void nec_xhci_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/usb/hcd-xhci-pci.c b/hw/usb/hcd-xhci-pci.c
index 264d7eb..b93c80b 100644
--- a/hw/usb/hcd-xhci-pci.c
+++ b/hw/usb/hcd-xhci-pci.c
@@ -74,6 +74,7 @@ static bool xhci_pci_intr_raise(XHCIState *xhci, int n, bool level)
}
if (msi_enabled(pci_dev) && level) {
+ n %= msi_nr_vectors_allocated(pci_dev);
msi_notify(pci_dev, n);
return true;
}
@@ -81,6 +82,21 @@ static bool xhci_pci_intr_raise(XHCIState *xhci, int n, bool level)
return false;
}
+static bool xhci_pci_intr_mapping_conditional(XHCIState *xhci)
+{
+ XHCIPciState *s = container_of(xhci, XHCIPciState, xhci);
+ PCIDevice *pci_dev = PCI_DEVICE(s);
+
+ /*
+ * Implementation of the "conditional-intr-mapping" property, which only
+ * enables interrupter mapping if MSI or MSI-X is available and active.
+ * Forces all events onto interrupter/event ring 0 in pin-based IRQ mode.
+ * Provides compatibility with macOS guests on machine types where MSI(-X)
+ * is not available.
+ */
+ return msix_enabled(pci_dev) || msi_enabled(pci_dev);
+}
+
static void xhci_pci_reset(DeviceState *dev)
{
XHCIPciState *s = XHCI_PCI(dev);
@@ -94,7 +110,7 @@ static int xhci_pci_vmstate_post_load(void *opaque, int version_id)
PCIDevice *pci_dev = PCI_DEVICE(s);
int intr;
- for (intr = 0; intr < s->xhci.numintrs; intr++) {
+ for (intr = 0; intr < s->xhci.numintrs; intr++) {
if (s->xhci.intr[intr].msix_used) {
msix_vector_use(pci_dev, intr);
} else {
@@ -118,6 +134,9 @@ static void usb_xhci_pci_realize(struct PCIDevice *dev, Error **errp)
object_property_set_link(OBJECT(&s->xhci), "host", OBJECT(s), NULL);
s->xhci.intr_update = xhci_pci_intr_update;
s->xhci.intr_raise = xhci_pci_intr_raise;
+ if (s->conditional_intr_mapping) {
+ s->xhci.intr_mapping_supported = xhci_pci_intr_mapping_conditional;
+ }
if (!qdev_realize(DEVICE(&s->xhci), NULL, errp)) {
return;
}
@@ -197,17 +216,29 @@ static void xhci_instance_init(Object *obj)
qdev_alias_all_properties(DEVICE(&s->xhci), obj);
}
-static void xhci_class_init(ObjectClass *klass, void *data)
+static const Property xhci_pci_properties[] = {
+ DEFINE_PROP_ON_OFF_AUTO("msi", XHCIPciState, msi, ON_OFF_AUTO_AUTO),
+ DEFINE_PROP_ON_OFF_AUTO("msix", XHCIPciState, msix, ON_OFF_AUTO_AUTO),
+ DEFINE_PROP_BOOL("conditional-intr-mapping", XHCIPciState,
+ conditional_intr_mapping, false),
+};
+
+static void xhci_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = xhci_pci_reset;
+ device_class_set_legacy_reset(dc, xhci_pci_reset);
dc->vmsd = &vmstate_xhci_pci;
set_bit(DEVICE_CATEGORY_USB, dc->categories);
k->realize = usb_xhci_pci_realize;
k->exit = usb_xhci_pci_exit;
k->class_id = PCI_CLASS_SERIAL_USB;
+ device_class_set_props(dc, xhci_pci_properties);
+ object_class_property_set_description(klass, "conditional-intr-mapping",
+ "When true, disables interrupter mapping for pin-based IRQ mode. "
+ "Intended to be used with guest drivers with questionable behaviour, "
+ "such as macOS's.");
}
static const TypeInfo xhci_pci_info = {
@@ -217,14 +248,14 @@ static const TypeInfo xhci_pci_info = {
.class_init = xhci_class_init,
.instance_init = xhci_instance_init,
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ }
},
};
-static void qemu_xhci_class_init(ObjectClass *klass, void *data)
+static void qemu_xhci_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
diff --git a/hw/usb/hcd-xhci-pci.h b/hw/usb/hcd-xhci-pci.h
index 08f70ce..5b61ae8 100644
--- a/hw/usb/hcd-xhci-pci.h
+++ b/hw/usb/hcd-xhci-pci.h
@@ -40,6 +40,7 @@ typedef struct XHCIPciState {
XHCIState xhci;
OnOffAuto msi;
OnOffAuto msix;
+ bool conditional_intr_mapping;
} XHCIPciState;
#endif
diff --git a/hw/usb/hcd-xhci-sysbus.c b/hw/usb/hcd-xhci-sysbus.c
index d93bae3..244698e 100644
--- a/hw/usb/hcd-xhci-sysbus.c
+++ b/hw/usb/hcd-xhci-sysbus.c
@@ -82,10 +82,9 @@ void xhci_sysbus_build_aml(Aml *scope, uint32_t mmio, unsigned int irq)
aml_append(scope, dev);
}
-static Property xhci_sysbus_props[] = {
+static const Property xhci_sysbus_props[] = {
DEFINE_PROP_UINT32("intrs", XHCISysbusState, xhci.numintrs, XHCI_MAXINTRS),
DEFINE_PROP_UINT32("slots", XHCISysbusState, xhci.numslots, XHCI_MAXSLOTS),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_xhci_sysbus = {
@@ -97,11 +96,11 @@ static const VMStateDescription vmstate_xhci_sysbus = {
}
};
-static void xhci_sysbus_class_init(ObjectClass *klass, void *data)
+static void xhci_sysbus_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = xhci_sysbus_reset;
+ device_class_set_legacy_reset(dc, xhci_sysbus_reset);
dc->realize = xhci_sysbus_realize;
dc->vmsd = &vmstate_xhci_sysbus;
device_class_set_props(dc, xhci_sysbus_props);
diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
index b6411f0..292c378 100644
--- a/hw/usb/hcd-xhci.c
+++ b/hw/usb/hcd-xhci.c
@@ -644,6 +644,11 @@ static void xhci_event(XHCIState *xhci, XHCIEvent *event, int v)
dma_addr_t erdp;
unsigned int dp_idx;
+ if (xhci->numintrs == 1 ||
+ (xhci->intr_mapping_supported && !xhci->intr_mapping_supported(xhci))) {
+ v = 0;
+ }
+
if (v >= xhci->numintrs) {
DPRINTF("intr nr out of range (%d >= %d)\n", v, xhci->numintrs);
return;
@@ -1182,6 +1187,12 @@ static void xhci_ep_free_xfer(XHCITransfer *xfer)
g_free(xfer);
}
+static void xhci_xfer_unmap(XHCITransfer *xfer)
+{
+ usb_packet_unmap(&xfer->packet, &xfer->sgl);
+ qemu_sglist_destroy(&xfer->sgl);
+}
+
static int xhci_ep_nuke_one_xfer(XHCITransfer *t, TRBCCode report)
{
int killed = 0;
@@ -1193,6 +1204,7 @@ static int xhci_ep_nuke_one_xfer(XHCITransfer *t, TRBCCode report)
if (t->running_async) {
usb_cancel_packet(&t->packet);
+ xhci_xfer_unmap(t);
t->running_async = 0;
killed = 1;
}
@@ -1475,12 +1487,6 @@ err:
return -1;
}
-static void xhci_xfer_unmap(XHCITransfer *xfer)
-{
- usb_packet_unmap(&xfer->packet, &xfer->sgl);
- qemu_sglist_destroy(&xfer->sgl);
-}
-
static void xhci_xfer_report(XHCITransfer *xfer)
{
uint32_t edtla = 0;
@@ -2810,9 +2816,15 @@ static uint64_t xhci_port_read(void *ptr, hwaddr reg, unsigned size)
case 0x08: /* PORTLI */
ret = 0;
break;
- case 0x0c: /* reserved */
+ case 0x0c: /* PORTHLPMC */
+ ret = 0;
+ qemu_log_mask(LOG_UNIMP, "%s: read from port register PORTHLPMC",
+ __func__);
+ break;
default:
- trace_usb_xhci_unimplemented("port read", reg);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: read from port offset 0x%" HWADDR_PRIx,
+ __func__, reg);
ret = 0;
}
@@ -2881,9 +2893,22 @@ static void xhci_port_write(void *ptr, hwaddr reg,
}
break;
case 0x04: /* PORTPMSC */
+ case 0x0c: /* PORTHLPMC */
+ qemu_log_mask(LOG_UNIMP,
+ "%s: write 0x%" PRIx64
+ " (%u bytes) to port register at offset 0x%" HWADDR_PRIx,
+ __func__, val, size, reg);
+ break;
case 0x08: /* PORTLI */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Write to read-only PORTLI register",
+ __func__);
+ break;
default:
- trace_usb_xhci_unimplemented("port write", reg);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: write 0x%" PRIx64 " (%u bytes) to unknown port "
+ "register at offset 0x%" HWADDR_PRIx,
+ __func__, val, size, reg);
+ break;
}
}
@@ -3605,23 +3630,22 @@ const VMStateDescription vmstate_xhci = {
}
};
-static Property xhci_properties[] = {
+static const Property xhci_properties[] = {
DEFINE_PROP_BIT("streams", XHCIState, flags,
XHCI_FLAG_ENABLE_STREAMS, true),
DEFINE_PROP_UINT32("p2", XHCIState, numports_2, 4),
DEFINE_PROP_UINT32("p3", XHCIState, numports_3, 4),
DEFINE_PROP_LINK("host", XHCIState, hostOpaque, TYPE_DEVICE,
DeviceState *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void xhci_class_init(ObjectClass *klass, void *data)
+static void xhci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = usb_xhci_realize;
dc->unrealize = usb_xhci_unrealize;
- dc->reset = xhci_reset;
+ device_class_set_legacy_reset(dc, xhci_reset);
device_class_set_props(dc, xhci_properties);
dc->user_creatable = false;
}
diff --git a/hw/usb/hcd-xhci.h b/hw/usb/hcd-xhci.h
index fe16d7a..9c3974f 100644
--- a/hw/usb/hcd-xhci.h
+++ b/hw/usb/hcd-xhci.h
@@ -25,7 +25,7 @@
#include "hw/usb.h"
#include "hw/usb/xhci.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
OBJECT_DECLARE_SIMPLE_TYPE(XHCIState, XHCI)
@@ -193,6 +193,11 @@ typedef struct XHCIState {
uint32_t max_pstreams_mask;
void (*intr_update)(XHCIState *s, int n, bool enable);
bool (*intr_raise)(XHCIState *s, int n, bool level);
+ /*
+ * Callback for special-casing interrupter mapping support. NULL for most
+ * implementations, for defaulting to enabled mapping unless numintrs == 1.
+ */
+ bool (*intr_mapping_supported)(XHCIState *s);
DeviceState *hostOpaque;
/* Operational Registers */
diff --git a/hw/usb/host-libusb.c b/hw/usb/host-libusb.c
index 691bc88..b74670a 100644
--- a/hw/usb/host-libusb.c
+++ b/hw/usb/host-libusb.c
@@ -51,8 +51,8 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/runstate.h"
+#include "system/system.h"
#include "trace.h"
#include "hw/qdev-properties.h"
@@ -1758,7 +1758,7 @@ static const VMStateDescription vmstate_usb_host = {
}
};
-static Property usb_host_dev_properties[] = {
+static const Property usb_host_dev_properties[] = {
DEFINE_PROP_UINT32("hostbus", USBHostDevice, match.bus_num, 0),
DEFINE_PROP_UINT32("hostaddr", USBHostDevice, match.addr, 0),
DEFINE_PROP_STRING("hostport", USBHostDevice, match.port),
@@ -1779,10 +1779,9 @@ static Property usb_host_dev_properties[] = {
USB_HOST_OPT_PIPELINE, true),
DEFINE_PROP_BOOL("suppress-remote-wake", USBHostDevice,
suppress_remote_wake, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void usb_host_class_initfn(ObjectClass *klass, void *data)
+static void usb_host_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
diff --git a/hw/usb/imx-usb-phy.c b/hw/usb/imx-usb-phy.c
index 18917d7..c25566d 100644
--- a/hw/usb/imx-usb-phy.c
+++ b/hw/usb/imx-usb-phy.c
@@ -214,11 +214,11 @@ static void imx_usbphy_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
}
-static void imx_usbphy_class_init(ObjectClass *klass, void *data)
+static void imx_usbphy_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->reset = imx_usbphy_reset;
+ device_class_set_legacy_reset(dc, imx_usbphy_reset);
dc->vmsd = &vmstate_imx_usbphy;
dc->desc = "i.MX USB PHY Module";
dc->realize = imx_usbphy_realize;
diff --git a/hw/usb/libhw.c b/hw/usb/libhw.c
index f350eae..4f03ef4 100644
--- a/hw/usb/libhw.c
+++ b/hw/usb/libhw.c
@@ -21,7 +21,7 @@
*/
#include "qemu/osdep.h"
#include "hw/usb.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
int usb_packet_map(USBPacket *p, QEMUSGList *sgl)
{
diff --git a/hw/usb/meson.build b/hw/usb/meson.build
index d7de100..17360a5 100644
--- a/hw/usb/meson.build
+++ b/hw/usb/meson.build
@@ -23,12 +23,10 @@ system_ss.add(when: 'CONFIG_USB_XHCI', if_true: files('hcd-xhci.c'))
system_ss.add(when: 'CONFIG_USB_XHCI_PCI', if_true: files('hcd-xhci-pci.c'))
system_ss.add(when: 'CONFIG_USB_XHCI_SYSBUS', if_true: files('hcd-xhci-sysbus.c'))
system_ss.add(when: 'CONFIG_USB_XHCI_NEC', if_true: files('hcd-xhci-nec.c'))
-system_ss.add(when: 'CONFIG_USB_MUSB', if_true: files('hcd-musb.c'))
system_ss.add(when: 'CONFIG_USB_DWC2', if_true: files('hcd-dwc2.c'))
system_ss.add(when: 'CONFIG_USB_DWC3', if_true: files('hcd-dwc3.c'))
+system_ss.add(when: 'CONFIG_USB_CHIPIDEA', if_true: files('chipidea.c'))
-system_ss.add(when: 'CONFIG_TUSB6010', if_true: files('tusb6010.c'))
-system_ss.add(when: 'CONFIG_IMX', if_true: files('chipidea.c'))
system_ss.add(when: 'CONFIG_IMX_USBPHY', if_true: files('imx-usb-phy.c'))
system_ss.add(when: 'CONFIG_VT82C686', if_true: files('vt82c686-uhci-pci.c'))
system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-usb2-ctrl-regs.c'))
diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c
index 0f2dd2e..f516ff4 100644
--- a/hw/usb/redirect.c
+++ b/hw/usb/redirect.c
@@ -30,8 +30,8 @@
#include "qemu/units.h"
#include "qapi/error.h"
#include "qemu/timer.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/runstate.h"
+#include "system/system.h"
#include "qapi/qmp/qerror.h"
#include "qemu/error-report.h"
#include "qemu/iov.h"
@@ -2573,17 +2573,16 @@ static const VMStateDescription usbredir_vmstate = {
}
};
-static Property usbredir_properties[] = {
+static const Property usbredir_properties[] = {
DEFINE_PROP_CHR("chardev", USBRedirDevice, cs),
DEFINE_PROP_UINT8("debug", USBRedirDevice, debug, usbredirparser_warning),
DEFINE_PROP_STRING("filter", USBRedirDevice, filter_str),
DEFINE_PROP_BOOL("streams", USBRedirDevice, enable_streams, true),
DEFINE_PROP_BOOL("suppress-remote-wake", USBRedirDevice,
suppress_remote_wake, true),
- DEFINE_PROP_END_OF_LIST(),
};
-static void usbredir_class_initfn(ObjectClass *klass, void *data)
+static void usbredir_class_initfn(ObjectClass *klass, const void *data)
{
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/usb/tusb6010.c b/hw/usb/tusb6010.c
deleted file mode 100644
index 1dd4071..0000000
--- a/hw/usb/tusb6010.c
+++ /dev/null
@@ -1,850 +0,0 @@
-/*
- * Texas Instruments TUSB6010 emulation.
- * Based on reverse-engineering of a linux driver.
- *
- * Copyright (C) 2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/module.h"
-#include "qemu/timer.h"
-#include "hw/usb.h"
-#include "hw/usb/hcd-musb.h"
-#include "hw/arm/omap.h"
-#include "hw/hw.h"
-#include "hw/irq.h"
-#include "hw/sysbus.h"
-#include "qom/object.h"
-
-#define TYPE_TUSB6010 "tusb6010"
-OBJECT_DECLARE_SIMPLE_TYPE(TUSBState, TUSB6010)
-
-struct TUSBState {
- SysBusDevice parent_obj;
-
- MemoryRegion iomem[2];
- qemu_irq irq;
- MUSBState *musb;
- QEMUTimer *otg_timer;
- QEMUTimer *pwr_timer;
-
- int power;
- uint32_t scratch;
- uint16_t test_reset;
- uint32_t prcm_config;
- uint32_t prcm_mngmt;
- uint16_t otg_status;
- uint32_t dev_config;
- int host_mode;
- uint32_t intr;
- uint32_t intr_ok;
- uint32_t mask;
- uint32_t usbip_intr;
- uint32_t usbip_mask;
- uint32_t gpio_intr;
- uint32_t gpio_mask;
- uint32_t gpio_config;
- uint32_t dma_intr;
- uint32_t dma_mask;
- uint32_t dma_map;
- uint32_t dma_config;
- uint32_t ep0_config;
- uint32_t rx_config[15];
- uint32_t tx_config[15];
- uint32_t wkup_mask;
- uint32_t pullup[2];
- uint32_t control_config;
- uint32_t otg_timer_val;
-};
-
-#define TUSB_DEVCLOCK 60000000 /* 60 MHz */
-
-#define TUSB_VLYNQ_CTRL 0x004
-
-/* Mentor Graphics OTG core registers. */
-#define TUSB_BASE_OFFSET 0x400
-
-/* FIFO registers, 32-bit. */
-#define TUSB_FIFO_BASE 0x600
-
-/* Device System & Control registers, 32-bit. */
-#define TUSB_SYS_REG_BASE 0x800
-
-#define TUSB_DEV_CONF (TUSB_SYS_REG_BASE + 0x000)
-#define TUSB_DEV_CONF_USB_HOST_MODE (1 << 16)
-#define TUSB_DEV_CONF_PROD_TEST_MODE (1 << 15)
-#define TUSB_DEV_CONF_SOFT_ID (1 << 1)
-#define TUSB_DEV_CONF_ID_SEL (1 << 0)
-
-#define TUSB_PHY_OTG_CTRL_ENABLE (TUSB_SYS_REG_BASE + 0x004)
-#define TUSB_PHY_OTG_CTRL (TUSB_SYS_REG_BASE + 0x008)
-#define TUSB_PHY_OTG_CTRL_WRPROTECT (0xa5 << 24)
-#define TUSB_PHY_OTG_CTRL_O_ID_PULLUP (1 << 23)
-#define TUSB_PHY_OTG_CTRL_O_VBUS_DET_EN (1 << 19)
-#define TUSB_PHY_OTG_CTRL_O_SESS_END_EN (1 << 18)
-#define TUSB_PHY_OTG_CTRL_TESTM2 (1 << 17)
-#define TUSB_PHY_OTG_CTRL_TESTM1 (1 << 16)
-#define TUSB_PHY_OTG_CTRL_TESTM0 (1 << 15)
-#define TUSB_PHY_OTG_CTRL_TX_DATA2 (1 << 14)
-#define TUSB_PHY_OTG_CTRL_TX_GZ2 (1 << 13)
-#define TUSB_PHY_OTG_CTRL_TX_ENABLE2 (1 << 12)
-#define TUSB_PHY_OTG_CTRL_DM_PULLDOWN (1 << 11)
-#define TUSB_PHY_OTG_CTRL_DP_PULLDOWN (1 << 10)
-#define TUSB_PHY_OTG_CTRL_OSC_EN (1 << 9)
-#define TUSB_PHY_OTG_CTRL_PHYREF_CLK(v) (((v) & 3) << 7)
-#define TUSB_PHY_OTG_CTRL_PD (1 << 6)
-#define TUSB_PHY_OTG_CTRL_PLL_ON (1 << 5)
-#define TUSB_PHY_OTG_CTRL_EXT_RPU (1 << 4)
-#define TUSB_PHY_OTG_CTRL_PWR_GOOD (1 << 3)
-#define TUSB_PHY_OTG_CTRL_RESET (1 << 2)
-#define TUSB_PHY_OTG_CTRL_SUSPENDM (1 << 1)
-#define TUSB_PHY_OTG_CTRL_CLK_MODE (1 << 0)
-
-/* OTG status register */
-#define TUSB_DEV_OTG_STAT (TUSB_SYS_REG_BASE + 0x00c)
-#define TUSB_DEV_OTG_STAT_PWR_CLK_GOOD (1 << 8)
-#define TUSB_DEV_OTG_STAT_SESS_END (1 << 7)
-#define TUSB_DEV_OTG_STAT_SESS_VALID (1 << 6)
-#define TUSB_DEV_OTG_STAT_VBUS_VALID (1 << 5)
-#define TUSB_DEV_OTG_STAT_VBUS_SENSE (1 << 4)
-#define TUSB_DEV_OTG_STAT_ID_STATUS (1 << 3)
-#define TUSB_DEV_OTG_STAT_HOST_DISCON (1 << 2)
-#define TUSB_DEV_OTG_STAT_LINE_STATE (3 << 0)
-#define TUSB_DEV_OTG_STAT_DP_ENABLE (1 << 1)
-#define TUSB_DEV_OTG_STAT_DM_ENABLE (1 << 0)
-
-#define TUSB_DEV_OTG_TIMER (TUSB_SYS_REG_BASE + 0x010)
-#define TUSB_DEV_OTG_TIMER_ENABLE (1 << 31)
-#define TUSB_DEV_OTG_TIMER_VAL(v) ((v) & 0x07ffffff)
-#define TUSB_PRCM_REV (TUSB_SYS_REG_BASE + 0x014)
-
-/* PRCM configuration register */
-#define TUSB_PRCM_CONF (TUSB_SYS_REG_BASE + 0x018)
-#define TUSB_PRCM_CONF_SFW_CPEN (1 << 24)
-#define TUSB_PRCM_CONF_SYS_CLKSEL(v) (((v) & 3) << 16)
-
-/* PRCM management register */
-#define TUSB_PRCM_MNGMT (TUSB_SYS_REG_BASE + 0x01c)
-#define TUSB_PRCM_MNGMT_SRP_FIX_TMR(v) (((v) & 0xf) << 25)
-#define TUSB_PRCM_MNGMT_SRP_FIX_EN (1 << 24)
-#define TUSB_PRCM_MNGMT_VBUS_VAL_TMR(v) (((v) & 0xf) << 20)
-#define TUSB_PRCM_MNGMT_VBUS_VAL_FLT_EN (1 << 19)
-#define TUSB_PRCM_MNGMT_DFT_CLK_DIS (1 << 18)
-#define TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS (1 << 17)
-#define TUSB_PRCM_MNGMT_OTG_SESS_END_EN (1 << 10)
-#define TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN (1 << 9)
-#define TUSB_PRCM_MNGMT_OTG_ID_PULLUP (1 << 8)
-#define TUSB_PRCM_MNGMT_15_SW_EN (1 << 4)
-#define TUSB_PRCM_MNGMT_33_SW_EN (1 << 3)
-#define TUSB_PRCM_MNGMT_5V_CPEN (1 << 2)
-#define TUSB_PRCM_MNGMT_PM_IDLE (1 << 1)
-#define TUSB_PRCM_MNGMT_DEV_IDLE (1 << 0)
-
-/* Wake-up source clear and mask registers */
-#define TUSB_PRCM_WAKEUP_SOURCE (TUSB_SYS_REG_BASE + 0x020)
-#define TUSB_PRCM_WAKEUP_CLEAR (TUSB_SYS_REG_BASE + 0x028)
-#define TUSB_PRCM_WAKEUP_MASK (TUSB_SYS_REG_BASE + 0x02c)
-#define TUSB_PRCM_WAKEUP_RESERVED_BITS (0xffffe << 13)
-#define TUSB_PRCM_WGPIO_7 (1 << 12)
-#define TUSB_PRCM_WGPIO_6 (1 << 11)
-#define TUSB_PRCM_WGPIO_5 (1 << 10)
-#define TUSB_PRCM_WGPIO_4 (1 << 9)
-#define TUSB_PRCM_WGPIO_3 (1 << 8)
-#define TUSB_PRCM_WGPIO_2 (1 << 7)
-#define TUSB_PRCM_WGPIO_1 (1 << 6)
-#define TUSB_PRCM_WGPIO_0 (1 << 5)
-#define TUSB_PRCM_WHOSTDISCON (1 << 4) /* Host disconnect */
-#define TUSB_PRCM_WBUS (1 << 3) /* USB bus resume */
-#define TUSB_PRCM_WNORCS (1 << 2) /* NOR chip select */
-#define TUSB_PRCM_WVBUS (1 << 1) /* OTG PHY VBUS */
-#define TUSB_PRCM_WID (1 << 0) /* OTG PHY ID detect */
-
-#define TUSB_PULLUP_1_CTRL (TUSB_SYS_REG_BASE + 0x030)
-#define TUSB_PULLUP_2_CTRL (TUSB_SYS_REG_BASE + 0x034)
-#define TUSB_INT_CTRL_REV (TUSB_SYS_REG_BASE + 0x038)
-#define TUSB_INT_CTRL_CONF (TUSB_SYS_REG_BASE + 0x03c)
-#define TUSB_USBIP_INT_SRC (TUSB_SYS_REG_BASE + 0x040)
-#define TUSB_USBIP_INT_SET (TUSB_SYS_REG_BASE + 0x044)
-#define TUSB_USBIP_INT_CLEAR (TUSB_SYS_REG_BASE + 0x048)
-#define TUSB_USBIP_INT_MASK (TUSB_SYS_REG_BASE + 0x04c)
-#define TUSB_DMA_INT_SRC (TUSB_SYS_REG_BASE + 0x050)
-#define TUSB_DMA_INT_SET (TUSB_SYS_REG_BASE + 0x054)
-#define TUSB_DMA_INT_CLEAR (TUSB_SYS_REG_BASE + 0x058)
-#define TUSB_DMA_INT_MASK (TUSB_SYS_REG_BASE + 0x05c)
-#define TUSB_GPIO_INT_SRC (TUSB_SYS_REG_BASE + 0x060)
-#define TUSB_GPIO_INT_SET (TUSB_SYS_REG_BASE + 0x064)
-#define TUSB_GPIO_INT_CLEAR (TUSB_SYS_REG_BASE + 0x068)
-#define TUSB_GPIO_INT_MASK (TUSB_SYS_REG_BASE + 0x06c)
-
-/* NOR flash interrupt source registers */
-#define TUSB_INT_SRC (TUSB_SYS_REG_BASE + 0x070)
-#define TUSB_INT_SRC_SET (TUSB_SYS_REG_BASE + 0x074)
-#define TUSB_INT_SRC_CLEAR (TUSB_SYS_REG_BASE + 0x078)
-#define TUSB_INT_MASK (TUSB_SYS_REG_BASE + 0x07c)
-#define TUSB_INT_SRC_TXRX_DMA_DONE (1 << 24)
-#define TUSB_INT_SRC_USB_IP_CORE (1 << 17)
-#define TUSB_INT_SRC_OTG_TIMEOUT (1 << 16)
-#define TUSB_INT_SRC_VBUS_SENSE_CHNG (1 << 15)
-#define TUSB_INT_SRC_ID_STATUS_CHNG (1 << 14)
-#define TUSB_INT_SRC_DEV_WAKEUP (1 << 13)
-#define TUSB_INT_SRC_DEV_READY (1 << 12)
-#define TUSB_INT_SRC_USB_IP_TX (1 << 9)
-#define TUSB_INT_SRC_USB_IP_RX (1 << 8)
-#define TUSB_INT_SRC_USB_IP_VBUS_ERR (1 << 7)
-#define TUSB_INT_SRC_USB_IP_VBUS_REQ (1 << 6)
-#define TUSB_INT_SRC_USB_IP_DISCON (1 << 5)
-#define TUSB_INT_SRC_USB_IP_CONN (1 << 4)
-#define TUSB_INT_SRC_USB_IP_SOF (1 << 3)
-#define TUSB_INT_SRC_USB_IP_RST_BABBLE (1 << 2)
-#define TUSB_INT_SRC_USB_IP_RESUME (1 << 1)
-#define TUSB_INT_SRC_USB_IP_SUSPEND (1 << 0)
-
-#define TUSB_GPIO_REV (TUSB_SYS_REG_BASE + 0x080)
-#define TUSB_GPIO_CONF (TUSB_SYS_REG_BASE + 0x084)
-#define TUSB_DMA_CTRL_REV (TUSB_SYS_REG_BASE + 0x100)
-#define TUSB_DMA_REQ_CONF (TUSB_SYS_REG_BASE + 0x104)
-#define TUSB_EP0_CONF (TUSB_SYS_REG_BASE + 0x108)
-#define TUSB_EP_IN_SIZE (TUSB_SYS_REG_BASE + 0x10c)
-#define TUSB_DMA_EP_MAP (TUSB_SYS_REG_BASE + 0x148)
-#define TUSB_EP_OUT_SIZE (TUSB_SYS_REG_BASE + 0x14c)
-#define TUSB_EP_MAX_PACKET_SIZE_OFFSET (TUSB_SYS_REG_BASE + 0x188)
-#define TUSB_SCRATCH_PAD (TUSB_SYS_REG_BASE + 0x1c4)
-#define TUSB_WAIT_COUNT (TUSB_SYS_REG_BASE + 0x1c8)
-#define TUSB_PROD_TEST_RESET (TUSB_SYS_REG_BASE + 0x1d8)
-
-#define TUSB_DIDR1_LO (TUSB_SYS_REG_BASE + 0x1f8)
-#define TUSB_DIDR1_HI (TUSB_SYS_REG_BASE + 0x1fc)
-
-/* Device System & Control register bitfields */
-#define TUSB_INT_CTRL_CONF_INT_RLCYC(v) (((v) & 0x7) << 18)
-#define TUSB_INT_CTRL_CONF_INT_POLARITY (1 << 17)
-#define TUSB_INT_CTRL_CONF_INT_MODE (1 << 16)
-#define TUSB_GPIO_CONF_DMAREQ(v) (((v) & 0x3f) << 24)
-#define TUSB_DMA_REQ_CONF_BURST_SIZE(v) (((v) & 3) << 26)
-#define TUSB_DMA_REQ_CONF_DMA_RQ_EN(v) (((v) & 0x3f) << 20)
-#define TUSB_DMA_REQ_CONF_DMA_RQ_ASR(v) (((v) & 0xf) << 16)
-#define TUSB_EP0_CONFIG_SW_EN (1 << 8)
-#define TUSB_EP0_CONFIG_DIR_TX (1 << 7)
-#define TUSB_EP0_CONFIG_XFR_SIZE(v) ((v) & 0x7f)
-#define TUSB_EP_CONFIG_SW_EN (1 << 31)
-#define TUSB_EP_CONFIG_XFR_SIZE(v) ((v) & 0x7fffffff)
-#define TUSB_PROD_TEST_RESET_VAL 0xa596
-
-static void tusb_intr_update(TUSBState *s)
-{
- if (s->control_config & TUSB_INT_CTRL_CONF_INT_POLARITY)
- qemu_set_irq(s->irq, s->intr & ~s->mask & s->intr_ok);
- else
- qemu_set_irq(s->irq, (!(s->intr & ~s->mask)) & s->intr_ok);
-}
-
-static void tusb_usbip_intr_update(TUSBState *s)
-{
- /* TX interrupt in the MUSB */
- if (s->usbip_intr & 0x0000ffff & ~s->usbip_mask)
- s->intr |= TUSB_INT_SRC_USB_IP_TX;
- else
- s->intr &= ~TUSB_INT_SRC_USB_IP_TX;
-
- /* RX interrupt in the MUSB */
- if (s->usbip_intr & 0xffff0000 & ~s->usbip_mask)
- s->intr |= TUSB_INT_SRC_USB_IP_RX;
- else
- s->intr &= ~TUSB_INT_SRC_USB_IP_RX;
-
- /* XXX: What about TUSB_INT_SRC_USB_IP_CORE? */
-
- tusb_intr_update(s);
-}
-
-static void tusb_dma_intr_update(TUSBState *s)
-{
- if (s->dma_intr & ~s->dma_mask)
- s->intr |= TUSB_INT_SRC_TXRX_DMA_DONE;
- else
- s->intr &= ~TUSB_INT_SRC_TXRX_DMA_DONE;
-
- tusb_intr_update(s);
-}
-
-static void tusb_gpio_intr_update(TUSBState *s)
-{
- /* TODO: How is this signalled? */
-}
-
-static uint32_t tusb_async_readb(void *opaque, hwaddr addr)
-{
- TUSBState *s = (TUSBState *) opaque;
-
- switch (addr & 0xfff) {
- case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff):
- return musb_read[0](s->musb, addr & 0x1ff);
-
- case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff):
- return musb_read[0](s->musb, 0x20 + ((addr >> 3) & 0x3c));
- }
-
- printf("%s: unknown register at %03x\n",
- __func__, (int) (addr & 0xfff));
- return 0;
-}
-
-static uint32_t tusb_async_readh(void *opaque, hwaddr addr)
-{
- TUSBState *s = (TUSBState *) opaque;
-
- switch (addr & 0xfff) {
- case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff):
- return musb_read[1](s->musb, addr & 0x1ff);
-
- case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff):
- return musb_read[1](s->musb, 0x20 + ((addr >> 3) & 0x3c));
- }
-
- printf("%s: unknown register at %03x\n",
- __func__, (int) (addr & 0xfff));
- return 0;
-}
-
-static uint32_t tusb_async_readw(void *opaque, hwaddr addr)
-{
- TUSBState *s = (TUSBState *) opaque;
- int offset = addr & 0xfff;
- int epnum;
- uint32_t ret;
-
- switch (offset) {
- case TUSB_DEV_CONF:
- return s->dev_config;
-
- case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff):
- return musb_read[2](s->musb, offset & 0x1ff);
-
- case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff):
- return musb_read[2](s->musb, 0x20 + ((addr >> 3) & 0x3c));
-
- case TUSB_PHY_OTG_CTRL_ENABLE:
- case TUSB_PHY_OTG_CTRL:
- return 0x00; /* TODO */
-
- case TUSB_DEV_OTG_STAT:
- ret = s->otg_status;
-#if 0
- if (!(s->prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN))
- ret &= ~TUSB_DEV_OTG_STAT_VBUS_VALID;
-#endif
- return ret;
- case TUSB_DEV_OTG_TIMER:
- return s->otg_timer_val;
-
- case TUSB_PRCM_REV:
- return 0x20;
- case TUSB_PRCM_CONF:
- return s->prcm_config;
- case TUSB_PRCM_MNGMT:
- return s->prcm_mngmt;
- case TUSB_PRCM_WAKEUP_SOURCE:
- case TUSB_PRCM_WAKEUP_CLEAR: /* TODO: What does this one return? */
- return 0x00000000;
- case TUSB_PRCM_WAKEUP_MASK:
- return s->wkup_mask;
-
- case TUSB_PULLUP_1_CTRL:
- return s->pullup[0];
- case TUSB_PULLUP_2_CTRL:
- return s->pullup[1];
-
- case TUSB_INT_CTRL_REV:
- return 0x20;
- case TUSB_INT_CTRL_CONF:
- return s->control_config;
-
- case TUSB_USBIP_INT_SRC:
- case TUSB_USBIP_INT_SET: /* TODO: What do these two return? */
- case TUSB_USBIP_INT_CLEAR:
- return s->usbip_intr;
- case TUSB_USBIP_INT_MASK:
- return s->usbip_mask;
-
- case TUSB_DMA_INT_SRC:
- case TUSB_DMA_INT_SET: /* TODO: What do these two return? */
- case TUSB_DMA_INT_CLEAR:
- return s->dma_intr;
- case TUSB_DMA_INT_MASK:
- return s->dma_mask;
-
- case TUSB_GPIO_INT_SRC: /* TODO: What do these two return? */
- case TUSB_GPIO_INT_SET:
- case TUSB_GPIO_INT_CLEAR:
- return s->gpio_intr;
- case TUSB_GPIO_INT_MASK:
- return s->gpio_mask;
-
- case TUSB_INT_SRC:
- case TUSB_INT_SRC_SET: /* TODO: What do these two return? */
- case TUSB_INT_SRC_CLEAR:
- return s->intr;
- case TUSB_INT_MASK:
- return s->mask;
-
- case TUSB_GPIO_REV:
- return 0x30;
- case TUSB_GPIO_CONF:
- return s->gpio_config;
-
- case TUSB_DMA_CTRL_REV:
- return 0x30;
- case TUSB_DMA_REQ_CONF:
- return s->dma_config;
- case TUSB_EP0_CONF:
- return s->ep0_config;
- case TUSB_EP_IN_SIZE ... (TUSB_EP_IN_SIZE + 0x3b):
- epnum = (offset - TUSB_EP_IN_SIZE) >> 2;
- return s->tx_config[epnum];
- case TUSB_DMA_EP_MAP:
- return s->dma_map;
- case TUSB_EP_OUT_SIZE ... (TUSB_EP_OUT_SIZE + 0x3b):
- epnum = (offset - TUSB_EP_OUT_SIZE) >> 2;
- return s->rx_config[epnum];
- case TUSB_EP_MAX_PACKET_SIZE_OFFSET ...
- (TUSB_EP_MAX_PACKET_SIZE_OFFSET + 0x3b):
- return 0x00000000; /* TODO */
- case TUSB_WAIT_COUNT:
- return 0x00; /* TODO */
-
- case TUSB_SCRATCH_PAD:
- return s->scratch;
-
- case TUSB_PROD_TEST_RESET:
- return s->test_reset;
-
- /* DIE IDs */
- case TUSB_DIDR1_LO:
- return 0xa9453c59;
- case TUSB_DIDR1_HI:
- return 0x54059adf;
- }
-
- printf("%s: unknown register at %03x\n", __func__, offset);
- return 0;
-}
-
-static void tusb_async_writeb(void *opaque, hwaddr addr,
- uint32_t value)
-{
- TUSBState *s = (TUSBState *) opaque;
-
- switch (addr & 0xfff) {
- case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff):
- musb_write[0](s->musb, addr & 0x1ff, value);
- break;
-
- case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff):
- musb_write[0](s->musb, 0x20 + ((addr >> 3) & 0x3c), value);
- break;
-
- default:
- printf("%s: unknown register at %03x\n",
- __func__, (int) (addr & 0xfff));
- return;
- }
-}
-
-static void tusb_async_writeh(void *opaque, hwaddr addr,
- uint32_t value)
-{
- TUSBState *s = (TUSBState *) opaque;
-
- switch (addr & 0xfff) {
- case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff):
- musb_write[1](s->musb, addr & 0x1ff, value);
- break;
-
- case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff):
- musb_write[1](s->musb, 0x20 + ((addr >> 3) & 0x3c), value);
- break;
-
- default:
- printf("%s: unknown register at %03x\n",
- __func__, (int) (addr & 0xfff));
- return;
- }
-}
-
-static void tusb_async_writew(void *opaque, hwaddr addr,
- uint32_t value)
-{
- TUSBState *s = (TUSBState *) opaque;
- int offset = addr & 0xfff;
- int epnum;
-
- switch (offset) {
- case TUSB_VLYNQ_CTRL:
- break;
-
- case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff):
- musb_write[2](s->musb, offset & 0x1ff, value);
- break;
-
- case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff):
- musb_write[2](s->musb, 0x20 + ((addr >> 3) & 0x3c), value);
- break;
-
- case TUSB_DEV_CONF:
- s->dev_config = value;
- s->host_mode = (value & TUSB_DEV_CONF_USB_HOST_MODE);
- if (value & TUSB_DEV_CONF_PROD_TEST_MODE)
- hw_error("%s: Product Test mode not allowed\n", __func__);
- break;
-
- case TUSB_PHY_OTG_CTRL_ENABLE:
- case TUSB_PHY_OTG_CTRL:
- return; /* TODO */
- case TUSB_DEV_OTG_TIMER:
- s->otg_timer_val = value;
- if (value & TUSB_DEV_OTG_TIMER_ENABLE)
- timer_mod(s->otg_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- muldiv64(TUSB_DEV_OTG_TIMER_VAL(value),
- NANOSECONDS_PER_SECOND, TUSB_DEVCLOCK));
- else
- timer_del(s->otg_timer);
- break;
-
- case TUSB_PRCM_CONF:
- s->prcm_config = value;
- break;
- case TUSB_PRCM_MNGMT:
- s->prcm_mngmt = value;
- break;
- case TUSB_PRCM_WAKEUP_CLEAR:
- break;
- case TUSB_PRCM_WAKEUP_MASK:
- s->wkup_mask = value;
- break;
-
- case TUSB_PULLUP_1_CTRL:
- s->pullup[0] = value;
- break;
- case TUSB_PULLUP_2_CTRL:
- s->pullup[1] = value;
- break;
- case TUSB_INT_CTRL_CONF:
- s->control_config = value;
- tusb_intr_update(s);
- break;
-
- case TUSB_USBIP_INT_SET:
- s->usbip_intr |= value;
- tusb_usbip_intr_update(s);
- break;
- case TUSB_USBIP_INT_CLEAR:
- s->usbip_intr &= ~value;
- tusb_usbip_intr_update(s);
- musb_core_intr_clear(s->musb, ~value);
- break;
- case TUSB_USBIP_INT_MASK:
- s->usbip_mask = value;
- tusb_usbip_intr_update(s);
- break;
-
- case TUSB_DMA_INT_SET:
- s->dma_intr |= value;
- tusb_dma_intr_update(s);
- break;
- case TUSB_DMA_INT_CLEAR:
- s->dma_intr &= ~value;
- tusb_dma_intr_update(s);
- break;
- case TUSB_DMA_INT_MASK:
- s->dma_mask = value;
- tusb_dma_intr_update(s);
- break;
-
- case TUSB_GPIO_INT_SET:
- s->gpio_intr |= value;
- tusb_gpio_intr_update(s);
- break;
- case TUSB_GPIO_INT_CLEAR:
- s->gpio_intr &= ~value;
- tusb_gpio_intr_update(s);
- break;
- case TUSB_GPIO_INT_MASK:
- s->gpio_mask = value;
- tusb_gpio_intr_update(s);
- break;
-
- case TUSB_INT_SRC_SET:
- s->intr |= value;
- tusb_intr_update(s);
- break;
- case TUSB_INT_SRC_CLEAR:
- s->intr &= ~value;
- tusb_intr_update(s);
- break;
- case TUSB_INT_MASK:
- s->mask = value;
- tusb_intr_update(s);
- break;
-
- case TUSB_GPIO_CONF:
- s->gpio_config = value;
- break;
- case TUSB_DMA_REQ_CONF:
- s->dma_config = value;
- break;
- case TUSB_EP0_CONF:
- s->ep0_config = value & 0x1ff;
- musb_set_size(s->musb, 0, TUSB_EP0_CONFIG_XFR_SIZE(value),
- value & TUSB_EP0_CONFIG_DIR_TX);
- break;
- case TUSB_EP_IN_SIZE ... (TUSB_EP_IN_SIZE + 0x3b):
- epnum = (offset - TUSB_EP_IN_SIZE) >> 2;
- s->tx_config[epnum] = value;
- musb_set_size(s->musb, epnum + 1, TUSB_EP_CONFIG_XFR_SIZE(value), 1);
- break;
- case TUSB_DMA_EP_MAP:
- s->dma_map = value;
- break;
- case TUSB_EP_OUT_SIZE ... (TUSB_EP_OUT_SIZE + 0x3b):
- epnum = (offset - TUSB_EP_OUT_SIZE) >> 2;
- s->rx_config[epnum] = value;
- musb_set_size(s->musb, epnum + 1, TUSB_EP_CONFIG_XFR_SIZE(value), 0);
- break;
- case TUSB_EP_MAX_PACKET_SIZE_OFFSET ...
- (TUSB_EP_MAX_PACKET_SIZE_OFFSET + 0x3b):
- return; /* TODO */
- case TUSB_WAIT_COUNT:
- return; /* TODO */
-
- case TUSB_SCRATCH_PAD:
- s->scratch = value;
- break;
-
- case TUSB_PROD_TEST_RESET:
- s->test_reset = value;
- break;
-
- default:
- printf("%s: unknown register at %03x\n", __func__, offset);
- return;
- }
-}
-
-static uint64_t tusb_async_readfn(void *opaque, hwaddr addr, unsigned size)
-{
- switch (size) {
- case 1:
- return tusb_async_readb(opaque, addr);
- case 2:
- return tusb_async_readh(opaque, addr);
- case 4:
- return tusb_async_readw(opaque, addr);
- default:
- g_assert_not_reached();
- }
-}
-
-static void tusb_async_writefn(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
-{
- switch (size) {
- case 1:
- tusb_async_writeb(opaque, addr, value);
- break;
- case 2:
- tusb_async_writeh(opaque, addr, value);
- break;
- case 4:
- tusb_async_writew(opaque, addr, value);
- break;
- default:
- g_assert_not_reached();
- }
-}
-
-static const MemoryRegionOps tusb_async_ops = {
- .read = tusb_async_readfn,
- .write = tusb_async_writefn,
- .valid.min_access_size = 1,
- .valid.max_access_size = 4,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void tusb_otg_tick(void *opaque)
-{
- TUSBState *s = (TUSBState *) opaque;
-
- s->otg_timer_val = 0;
- s->intr |= TUSB_INT_SRC_OTG_TIMEOUT;
- tusb_intr_update(s);
-}
-
-static void tusb_power_tick(void *opaque)
-{
- TUSBState *s = (TUSBState *) opaque;
-
- if (s->power) {
- s->intr_ok = ~0;
- tusb_intr_update(s);
- }
-}
-
-static void tusb_musb_core_intr(void *opaque, int source, int level)
-{
- TUSBState *s = (TUSBState *) opaque;
- uint16_t otg_status = s->otg_status;
-
- switch (source) {
- case musb_set_vbus:
- if (level)
- otg_status |= TUSB_DEV_OTG_STAT_VBUS_VALID;
- else
- otg_status &= ~TUSB_DEV_OTG_STAT_VBUS_VALID;
-
- /* XXX: only if TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN set? */
- /* XXX: only if TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN set? */
- if (s->otg_status != otg_status) {
- s->otg_status = otg_status;
- s->intr |= TUSB_INT_SRC_VBUS_SENSE_CHNG;
- tusb_intr_update(s);
- }
- break;
-
- case musb_set_session:
- /* XXX: only if TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN set? */
- /* XXX: only if TUSB_PRCM_MNGMT_OTG_SESS_END_EN set? */
- if (level) {
- s->otg_status |= TUSB_DEV_OTG_STAT_SESS_VALID;
- s->otg_status &= ~TUSB_DEV_OTG_STAT_SESS_END;
- } else {
- s->otg_status &= ~TUSB_DEV_OTG_STAT_SESS_VALID;
- s->otg_status |= TUSB_DEV_OTG_STAT_SESS_END;
- }
-
- /* XXX: some IRQ or anything? */
- break;
-
- case musb_irq_tx:
- case musb_irq_rx:
- s->usbip_intr = musb_core_intr_get(s->musb);
- /* Fall through. */
- default:
- if (level)
- s->intr |= 1 << source;
- else
- s->intr &= ~(1 << source);
- tusb_intr_update(s);
- break;
- }
-}
-
-static void tusb6010_power(TUSBState *s, int on)
-{
- if (!on) {
- s->power = 0;
- } else if (!s->power && on) {
- s->power = 1;
- /* Pull the interrupt down after TUSB6010 comes up. */
- s->intr_ok = 0;
- tusb_intr_update(s);
- timer_mod(s->pwr_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- NANOSECONDS_PER_SECOND / 2);
- }
-}
-
-static void tusb6010_irq(void *opaque, int source, int level)
-{
- if (source) {
- tusb_musb_core_intr(opaque, source - 1, level);
- } else {
- tusb6010_power(opaque, level);
- }
-}
-
-static void tusb6010_reset(DeviceState *dev)
-{
- TUSBState *s = TUSB6010(dev);
- int i;
-
- s->test_reset = TUSB_PROD_TEST_RESET_VAL;
- s->host_mode = 0;
- s->dev_config = 0;
- s->otg_status = 0; /* !TUSB_DEV_OTG_STAT_ID_STATUS means host mode */
- s->power = 0;
- s->mask = 0xffffffff;
- s->intr = 0x00000000;
- s->otg_timer_val = 0;
- s->scratch = 0;
- s->prcm_config = 0;
- s->prcm_mngmt = 0;
- s->intr_ok = 0;
- s->usbip_intr = 0;
- s->usbip_mask = 0;
- s->gpio_intr = 0;
- s->gpio_mask = 0;
- s->gpio_config = 0;
- s->dma_intr = 0;
- s->dma_mask = 0;
- s->dma_map = 0;
- s->dma_config = 0;
- s->ep0_config = 0;
- s->wkup_mask = 0;
- s->pullup[0] = s->pullup[1] = 0;
- s->control_config = 0;
- for (i = 0; i < 15; i++) {
- s->rx_config[i] = s->tx_config[i] = 0;
- }
- musb_reset(s->musb);
-}
-
-static void tusb6010_realize(DeviceState *dev, Error **errp)
-{
- TUSBState *s = TUSB6010(dev);
- SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
-
- s->otg_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tusb_otg_tick, s);
- s->pwr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tusb_power_tick, s);
- memory_region_init_io(&s->iomem[1], OBJECT(s), &tusb_async_ops, s,
- "tusb-async", UINT32_MAX);
- sysbus_init_mmio(sbd, &s->iomem[0]);
- sysbus_init_mmio(sbd, &s->iomem[1]);
- sysbus_init_irq(sbd, &s->irq);
- qdev_init_gpio_in(dev, tusb6010_irq, musb_irq_max + 1);
- s->musb = musb_init(dev, 1);
-}
-
-static void tusb6010_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->realize = tusb6010_realize;
- dc->reset = tusb6010_reset;
-}
-
-static const TypeInfo tusb6010_info = {
- .name = TYPE_TUSB6010,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(TUSBState),
- .class_init = tusb6010_class_init,
-};
-
-static void tusb6010_register_types(void)
-{
- type_register_static(&tusb6010_info);
-}
-
-type_init(tusb6010_register_types)
diff --git a/hw/usb/u2f-emulated.c b/hw/usb/u2f-emulated.c
index 63cceaa..ace5ece 100644
--- a/hw/usb/u2f-emulated.c
+++ b/hw/usb/u2f-emulated.c
@@ -369,16 +369,15 @@ static void u2f_emulated_unrealize(U2FKeyState *base)
}
}
-static Property u2f_emulated_properties[] = {
+static const Property u2f_emulated_properties[] = {
DEFINE_PROP_STRING("dir", U2FEmulatedState, dir),
DEFINE_PROP_STRING("cert", U2FEmulatedState, cert),
DEFINE_PROP_STRING("privkey", U2FEmulatedState, privkey),
DEFINE_PROP_STRING("entropy", U2FEmulatedState, entropy),
DEFINE_PROP_STRING("counter", U2FEmulatedState, counter),
- DEFINE_PROP_END_OF_LIST(),
};
-static void u2f_emulated_class_init(ObjectClass *klass, void *data)
+static void u2f_emulated_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
U2FKeyClass *kc = U2F_KEY_CLASS(klass);
diff --git a/hw/usb/u2f-passthru.c b/hw/usb/u2f-passthru.c
index c4a783d..fa8d9cd 100644
--- a/hw/usb/u2f-passthru.c
+++ b/hw/usb/u2f-passthru.c
@@ -516,12 +516,11 @@ static const VMStateDescription u2f_passthru_vmstate = {
}
};
-static Property u2f_passthru_properties[] = {
+static const Property u2f_passthru_properties[] = {
DEFINE_PROP_STRING("hidraw", U2FPassthruState, hidraw),
- DEFINE_PROP_END_OF_LIST(),
};
-static void u2f_passthru_class_init(ObjectClass *klass, void *data)
+static void u2f_passthru_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
U2FKeyClass *kc = U2F_KEY_CLASS(klass);
diff --git a/hw/usb/u2f.c b/hw/usb/u2f.c
index 1fb59cf..b051a99 100644
--- a/hw/usb/u2f.c
+++ b/hw/usb/u2f.c
@@ -317,7 +317,7 @@ const VMStateDescription vmstate_u2f_key = {
}
};
-static void u2f_key_class_init(ObjectClass *klass, void *data)
+static void u2f_key_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c
index 1390162..fa46a7d 100644
--- a/hw/usb/xen-usb.c
+++ b/hw/usb/xen-usb.c
@@ -30,8 +30,8 @@
#include "hw/xen/xen-legacy-backend.h"
#include "monitor/qdev.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "hw/xen/interface/io/usbif.h"
@@ -755,10 +755,10 @@ static void usbback_portid_add(struct usbback_info *usbif, unsigned port,
qdict = qdict_new();
qdict_put_str(qdict, "driver", "usb-host");
- tmp = g_strdup_printf("%s.0", usbif->xendev.qdev.id);
+ tmp = g_strdup_printf("%s.0", DEVICE(&usbif->xendev)->id);
qdict_put_str(qdict, "bus", tmp);
g_free(tmp);
- tmp = g_strdup_printf("%s-%u", usbif->xendev.qdev.id, port);
+ tmp = g_strdup_printf("%s-%u", DEVICE(&usbif->xendev)->id, port);
qdict_put_str(qdict, "id", tmp);
g_free(tmp);
qdict_put_int(qdict, "port", port);
@@ -1022,7 +1022,7 @@ static void usbback_alloc(struct XenLegacyDevice *xendev)
usbif = container_of(xendev, struct usbback_info, xendev);
usb_bus_new(&usbif->bus, sizeof(usbif->bus), &xen_usb_bus_ops,
- DEVICE(&xendev->qdev));
+ DEVICE(xendev));
for (i = 0; i < USBBACK_MAXPORTS; i++) {
p = &(usbif->ports[i].port);
usb_register_port(&usbif->bus, p, usbif, i, &xen_usb_port_ops,
diff --git a/hw/usb/xlnx-usb-subsystem.c b/hw/usb/xlnx-usb-subsystem.c
index d8deeb6..98967ef 100644
--- a/hw/usb/xlnx-usb-subsystem.c
+++ b/hw/usb/xlnx-usb-subsystem.c
@@ -69,7 +69,7 @@ static void versal_usb2_init(Object *obj)
object_property_add_alias(obj, "dma", OBJECT(&s->dwc3.sysbus_xhci), "dma");
}
-static void versal_usb2_class_init(ObjectClass *klass, void *data)
+static void versal_usb2_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/usb/xlnx-versal-usb2-ctrl-regs.c b/hw/usb/xlnx-versal-usb2-ctrl-regs.c
index 66c793a..4114672 100644
--- a/hw/usb/xlnx-versal-usb2-ctrl-regs.c
+++ b/hw/usb/xlnx-versal-usb2-ctrl-regs.c
@@ -202,7 +202,7 @@ static const VMStateDescription vmstate_usb2_ctrl_regs = {
}
};
-static void usb2_ctrl_regs_class_init(ObjectClass *klass, void *data)
+static void usb2_ctrl_regs_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
diff --git a/hw/vfio-user/Kconfig b/hw/vfio-user/Kconfig
new file mode 100644
index 0000000..24bdf7a
--- /dev/null
+++ b/hw/vfio-user/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config VFIO_USER
+ bool
+ default y
+ depends on VFIO_PCI
+
diff --git a/hw/vfio-user/container.c b/hw/vfio-user/container.c
new file mode 100644
index 0000000..3133fef
--- /dev/null
+++ b/hw/vfio-user/container.c
@@ -0,0 +1,370 @@
+/*
+ * Container for vfio-user IOMMU type: rather than communicating with the kernel
+ * vfio driver, we communicate over a socket to a server using the vfio-user
+ * protocol.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <sys/ioctl.h>
+#include <linux/vfio.h>
+#include "qemu/osdep.h"
+
+#include "hw/vfio-user/container.h"
+#include "hw/vfio-user/device.h"
+#include "hw/vfio-user/trace.h"
+#include "hw/vfio/vfio-cpr.h"
+#include "hw/vfio/vfio-device.h"
+#include "hw/vfio/vfio-listener.h"
+#include "qapi/error.h"
+
+/*
+ * When DMA space is the physical address space, the region add/del listeners
+ * will fire during memory update transactions. These depend on BQL being held,
+ * so do any resulting map/demap ops async while keeping BQL.
+ */
+static void vfio_user_listener_begin(VFIOContainerBase *bcontainer)
+{
+ VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer,
+ bcontainer);
+
+ container->proxy->async_ops = true;
+}
+
+static void vfio_user_listener_commit(VFIOContainerBase *bcontainer)
+{
+ VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer,
+ bcontainer);
+
+ /* wait here for any async requests sent during the transaction */
+ container->proxy->async_ops = false;
+ vfio_user_wait_reqs(container->proxy);
+}
+
+static int vfio_user_dma_unmap(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb, bool unmap_all)
+{
+ VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer,
+ bcontainer);
+ Error *local_err = NULL;
+ int ret = 0;
+
+ VFIOUserDMAUnmap *msgp = g_malloc(sizeof(*msgp));
+
+ vfio_user_request_msg(&msgp->hdr, VFIO_USER_DMA_UNMAP, sizeof(*msgp), 0);
+ msgp->argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
+ msgp->flags = unmap_all ? VFIO_DMA_UNMAP_FLAG_ALL : 0;
+ msgp->iova = iova;
+ msgp->size = size;
+ trace_vfio_user_dma_unmap(msgp->iova, msgp->size, msgp->flags,
+ container->proxy->async_ops);
+
+ if (container->proxy->async_ops) {
+ if (!vfio_user_send_nowait(container->proxy, &msgp->hdr, NULL,
+ 0, &local_err)) {
+ error_report_err(local_err);
+ ret = -EFAULT;
+ } else {
+ ret = 0;
+ }
+ } else {
+ if (!vfio_user_send_wait(container->proxy, &msgp->hdr, NULL,
+ 0, &local_err)) {
+ error_report_err(local_err);
+ ret = -EFAULT;
+ }
+
+ if (msgp->hdr.flags & VFIO_USER_ERROR) {
+ ret = -msgp->hdr.error_reply;
+ }
+
+ g_free(msgp);
+ }
+
+ return ret;
+}
+
+static int vfio_user_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova,
+ ram_addr_t size, void *vaddr, bool readonly,
+ MemoryRegion *mrp)
+{
+ VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer,
+ bcontainer);
+ int fd = memory_region_get_fd(mrp);
+ Error *local_err = NULL;
+ int ret;
+
+ VFIOUserFDs *fds = NULL;
+ VFIOUserDMAMap *msgp = g_malloc0(sizeof(*msgp));
+
+ vfio_user_request_msg(&msgp->hdr, VFIO_USER_DMA_MAP, sizeof(*msgp), 0);
+ msgp->argsz = sizeof(struct vfio_iommu_type1_dma_map);
+ msgp->flags = VFIO_DMA_MAP_FLAG_READ;
+ msgp->offset = 0;
+ msgp->iova = iova;
+ msgp->size = size;
+
+ /*
+ * vaddr enters as a QEMU process address; make it either a file offset
+ * for mapped areas or leave as 0.
+ */
+ if (fd != -1) {
+ msgp->offset = qemu_ram_block_host_offset(mrp->ram_block, vaddr);
+ }
+
+ if (!readonly) {
+ msgp->flags |= VFIO_DMA_MAP_FLAG_WRITE;
+ }
+
+ trace_vfio_user_dma_map(msgp->iova, msgp->size, msgp->offset, msgp->flags,
+ container->proxy->async_ops);
+
+ /*
+ * The async_ops case sends without blocking. They're later waited for in
+ * vfio_send_wait_reqs.
+ */
+ if (container->proxy->async_ops) {
+ /* can't use auto variable since we don't block */
+ if (fd != -1) {
+ fds = vfio_user_getfds(1);
+ fds->send_fds = 1;
+ fds->fds[0] = fd;
+ }
+
+ if (!vfio_user_send_nowait(container->proxy, &msgp->hdr, fds,
+ 0, &local_err)) {
+ error_report_err(local_err);
+ ret = -EFAULT;
+ } else {
+ ret = 0;
+ }
+ } else {
+ VFIOUserFDs local_fds = { 1, 0, &fd };
+
+ fds = fd != -1 ? &local_fds : NULL;
+
+ if (!vfio_user_send_wait(container->proxy, &msgp->hdr, fds,
+ 0, &local_err)) {
+ error_report_err(local_err);
+ ret = -EFAULT;
+ }
+
+ if (msgp->hdr.flags & VFIO_USER_ERROR) {
+ ret = -msgp->hdr.error_reply;
+ }
+
+ g_free(msgp);
+ }
+
+ return ret;
+}
+
+static int
+vfio_user_set_dirty_page_tracking(const VFIOContainerBase *bcontainer,
+ bool start, Error **errp)
+{
+ error_setg_errno(errp, ENOTSUP, "Not supported");
+ return -ENOTSUP;
+}
+
+static int vfio_user_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap, hwaddr iova,
+ hwaddr size, Error **errp)
+{
+ error_setg_errno(errp, ENOTSUP, "Not supported");
+ return -ENOTSUP;
+}
+
+static bool vfio_user_setup(VFIOContainerBase *bcontainer, Error **errp)
+{
+ VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer,
+ bcontainer);
+
+ assert(container->proxy->dma_pgsizes != 0);
+ bcontainer->pgsizes = container->proxy->dma_pgsizes;
+ bcontainer->dma_max_mappings = container->proxy->max_dma;
+
+ /* No live migration support yet. */
+ bcontainer->dirty_pages_supported = false;
+ bcontainer->max_dirty_bitmap_size = container->proxy->max_bitmap;
+ bcontainer->dirty_pgsizes = container->proxy->migr_pgsize;
+
+ return true;
+}
+
+static VFIOUserContainer *vfio_user_create_container(VFIODevice *vbasedev,
+ Error **errp)
+{
+ VFIOUserContainer *container;
+
+ container = VFIO_IOMMU_USER(object_new(TYPE_VFIO_IOMMU_USER));
+ container->proxy = vbasedev->proxy;
+ return container;
+}
+
+/*
+ * Try to mirror vfio_container_connect() as much as possible.
+ */
+static VFIOUserContainer *
+vfio_user_container_connect(AddressSpace *as, VFIODevice *vbasedev,
+ Error **errp)
+{
+ VFIOContainerBase *bcontainer;
+ VFIOUserContainer *container;
+ VFIOAddressSpace *space;
+ VFIOIOMMUClass *vioc;
+ int ret;
+
+ space = vfio_address_space_get(as);
+
+ container = vfio_user_create_container(vbasedev, errp);
+ if (!container) {
+ goto put_space_exit;
+ }
+
+ bcontainer = &container->bcontainer;
+
+ if (!vfio_cpr_register_container(bcontainer, errp)) {
+ goto free_container_exit;
+ }
+
+ ret = ram_block_uncoordinated_discard_disable(true);
+ if (ret) {
+ error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken");
+ goto unregister_container_exit;
+ }
+
+ vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
+ assert(vioc->setup);
+
+ if (!vioc->setup(bcontainer, errp)) {
+ goto enable_discards_exit;
+ }
+
+ vfio_address_space_insert(space, bcontainer);
+
+ if (!vfio_listener_register(bcontainer, errp)) {
+ goto listener_release_exit;
+ }
+
+ bcontainer->initialized = true;
+
+ return container;
+
+listener_release_exit:
+ vfio_listener_unregister(bcontainer);
+ if (vioc->release) {
+ vioc->release(bcontainer);
+ }
+
+enable_discards_exit:
+ ram_block_uncoordinated_discard_disable(false);
+
+unregister_container_exit:
+ vfio_cpr_unregister_container(bcontainer);
+
+free_container_exit:
+ object_unref(container);
+
+put_space_exit:
+ vfio_address_space_put(space);
+
+ return NULL;
+}
+
+static void vfio_user_container_disconnect(VFIOUserContainer *container)
+{
+ VFIOContainerBase *bcontainer = &container->bcontainer;
+ VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
+ VFIOAddressSpace *space = bcontainer->space;
+
+ ram_block_uncoordinated_discard_disable(false);
+
+ vfio_listener_unregister(bcontainer);
+ if (vioc->release) {
+ vioc->release(bcontainer);
+ }
+
+ vfio_cpr_unregister_container(bcontainer);
+ object_unref(container);
+
+ vfio_address_space_put(space);
+}
+
+static bool vfio_user_device_get(VFIOUserContainer *container,
+ VFIODevice *vbasedev, Error **errp)
+{
+ struct vfio_device_info info = { .argsz = sizeof(info) };
+
+
+ if (!vfio_user_get_device_info(vbasedev->proxy, &info, errp)) {
+ return false;
+ }
+
+ vbasedev->fd = -1;
+
+ vfio_device_prepare(vbasedev, &container->bcontainer, &info);
+
+ return true;
+}
+
+/*
+ * vfio_user_device_attach: attach a device to a new container.
+ */
+static bool vfio_user_device_attach(const char *name, VFIODevice *vbasedev,
+ AddressSpace *as, Error **errp)
+{
+ VFIOUserContainer *container;
+
+ container = vfio_user_container_connect(as, vbasedev, errp);
+ if (container == NULL) {
+ error_prepend(errp, "failed to connect proxy");
+ return false;
+ }
+
+ return vfio_user_device_get(container, vbasedev, errp);
+}
+
+static void vfio_user_device_detach(VFIODevice *vbasedev)
+{
+ VFIOUserContainer *container = container_of(vbasedev->bcontainer,
+ VFIOUserContainer, bcontainer);
+
+ vfio_device_unprepare(vbasedev);
+
+ vfio_user_container_disconnect(container);
+}
+
+static int vfio_user_pci_hot_reset(VFIODevice *vbasedev, bool single)
+{
+ /* ->needs_reset is always false for vfio-user. */
+ return 0;
+}
+
+static void vfio_iommu_user_class_init(ObjectClass *klass, const void *data)
+{
+ VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass);
+
+ vioc->setup = vfio_user_setup;
+ vioc->listener_begin = vfio_user_listener_begin,
+ vioc->listener_commit = vfio_user_listener_commit,
+ vioc->dma_map = vfio_user_dma_map;
+ vioc->dma_unmap = vfio_user_dma_unmap;
+ vioc->attach_device = vfio_user_device_attach;
+ vioc->detach_device = vfio_user_device_detach;
+ vioc->set_dirty_page_tracking = vfio_user_set_dirty_page_tracking;
+ vioc->query_dirty_bitmap = vfio_user_query_dirty_bitmap;
+ vioc->pci_hot_reset = vfio_user_pci_hot_reset;
+};
+
+static const TypeInfo types[] = {
+ {
+ .name = TYPE_VFIO_IOMMU_USER,
+ .parent = TYPE_VFIO_IOMMU,
+ .instance_size = sizeof(VFIOUserContainer),
+ .class_init = vfio_iommu_user_class_init,
+ },
+};
+
+DEFINE_TYPES(types)
diff --git a/hw/vfio-user/container.h b/hw/vfio-user/container.h
new file mode 100644
index 0000000..2bb1fa1
--- /dev/null
+++ b/hw/vfio-user/container.h
@@ -0,0 +1,23 @@
+/*
+ * vfio-user specific definitions.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VFIO_USER_CONTAINER_H
+#define HW_VFIO_USER_CONTAINER_H
+
+#include "qemu/osdep.h"
+
+#include "hw/vfio/vfio-container-base.h"
+#include "hw/vfio-user/proxy.h"
+
+/* MMU container sub-class for vfio-user. */
+typedef struct VFIOUserContainer {
+ VFIOContainerBase bcontainer;
+ VFIOUserProxy *proxy;
+} VFIOUserContainer;
+
+OBJECT_DECLARE_SIMPLE_TYPE(VFIOUserContainer, VFIO_IOMMU_USER);
+
+#endif /* HW_VFIO_USER_CONTAINER_H */
diff --git a/hw/vfio-user/device.c b/hw/vfio-user/device.c
new file mode 100644
index 0000000..0609a7d
--- /dev/null
+++ b/hw/vfio-user/device.c
@@ -0,0 +1,441 @@
+/*
+ * vfio protocol over a UNIX socket device handling.
+ *
+ * Copyright Ā© 2018, 2021 Oracle and/or its affiliates.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "qemu/lockable.h"
+#include "qemu/thread.h"
+
+#include "hw/vfio-user/device.h"
+#include "hw/vfio-user/trace.h"
+
+/*
+ * These are to defend against a malign server trying
+ * to force us to run out of memory.
+ */
+#define VFIO_USER_MAX_REGIONS 100
+#define VFIO_USER_MAX_IRQS 50
+
+bool vfio_user_get_device_info(VFIOUserProxy *proxy,
+ struct vfio_device_info *info, Error **errp)
+{
+ VFIOUserDeviceInfo msg;
+ uint32_t argsz = sizeof(msg) - sizeof(msg.hdr);
+
+ memset(&msg, 0, sizeof(msg));
+ vfio_user_request_msg(&msg.hdr, VFIO_USER_DEVICE_GET_INFO, sizeof(msg), 0);
+ msg.argsz = argsz;
+
+ if (!vfio_user_send_wait(proxy, &msg.hdr, NULL, 0, errp)) {
+ return false;
+ }
+
+ if (msg.hdr.flags & VFIO_USER_ERROR) {
+ error_setg_errno(errp, -msg.hdr.error_reply,
+ "VFIO_USER_DEVICE_GET_INFO failed");
+ return false;
+ }
+
+ trace_vfio_user_get_info(msg.num_regions, msg.num_irqs);
+
+ memcpy(info, &msg.argsz, argsz);
+
+ /* defend against a malicious server */
+ if (info->num_regions > VFIO_USER_MAX_REGIONS ||
+ info->num_irqs > VFIO_USER_MAX_IRQS) {
+ error_setg_errno(errp, EINVAL, "invalid reply");
+ return false;
+ }
+
+ return true;
+}
+
+void vfio_user_device_reset(VFIOUserProxy *proxy)
+{
+ Error *local_err = NULL;
+ VFIOUserHdr hdr;
+
+ vfio_user_request_msg(&hdr, VFIO_USER_DEVICE_RESET, sizeof(hdr), 0);
+
+ if (!vfio_user_send_wait(proxy, &hdr, NULL, 0, &local_err)) {
+ error_prepend(&local_err, "%s: ", __func__);
+ error_report_err(local_err);
+ return;
+ }
+
+ if (hdr.flags & VFIO_USER_ERROR) {
+ error_printf("reset reply error %d\n", hdr.error_reply);
+ }
+}
+
+static int vfio_user_get_region_info(VFIOUserProxy *proxy,
+ struct vfio_region_info *info,
+ VFIOUserFDs *fds)
+{
+ g_autofree VFIOUserRegionInfo *msgp = NULL;
+ Error *local_err = NULL;
+ uint32_t size;
+
+ /* data returned can be larger than vfio_region_info */
+ if (info->argsz < sizeof(*info)) {
+ error_printf("vfio_user_get_region_info argsz too small\n");
+ return -E2BIG;
+ }
+ if (fds != NULL && fds->send_fds != 0) {
+ error_printf("vfio_user_get_region_info can't send FDs\n");
+ return -EINVAL;
+ }
+
+ size = info->argsz + sizeof(VFIOUserHdr);
+ msgp = g_malloc0(size);
+
+ vfio_user_request_msg(&msgp->hdr, VFIO_USER_DEVICE_GET_REGION_INFO,
+ sizeof(*msgp), 0);
+ msgp->argsz = info->argsz;
+ msgp->index = info->index;
+
+ if (!vfio_user_send_wait(proxy, &msgp->hdr, fds, size, &local_err)) {
+ error_prepend(&local_err, "%s: ", __func__);
+ error_report_err(local_err);
+ return -EFAULT;
+ }
+
+ if (msgp->hdr.flags & VFIO_USER_ERROR) {
+ return -msgp->hdr.error_reply;
+ }
+ trace_vfio_user_get_region_info(msgp->index, msgp->flags, msgp->size);
+
+ memcpy(info, &msgp->argsz, info->argsz);
+
+ /*
+ * If at least one region is directly mapped into the VM, then we can no
+ * longer rely on the sequential nature of vfio-user request handling to
+ * ensure that posted writes are completed before a subsequent read. In this
+ * case, disable posted write support. This is a per-device property, not
+ * per-region.
+ */
+ if (info->flags & VFIO_REGION_INFO_FLAG_MMAP) {
+ vfio_user_disable_posted_writes(proxy);
+ }
+
+ return 0;
+}
+
+static int vfio_user_device_io_get_region_info(VFIODevice *vbasedev,
+ struct vfio_region_info *info,
+ int *fd)
+{
+ VFIOUserFDs fds = { 0, 1, fd};
+ int ret;
+
+ if (info->index > vbasedev->num_regions) {
+ return -EINVAL;
+ }
+
+ ret = vfio_user_get_region_info(vbasedev->proxy, info, &fds);
+ if (ret) {
+ return ret;
+ }
+
+ /* cap_offset in valid area */
+ if ((info->flags & VFIO_REGION_INFO_FLAG_CAPS) &&
+ (info->cap_offset < sizeof(*info) || info->cap_offset > info->argsz)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vfio_user_device_io_get_irq_info(VFIODevice *vbasedev,
+ struct vfio_irq_info *info)
+{
+ VFIOUserProxy *proxy = vbasedev->proxy;
+ Error *local_err = NULL;
+ VFIOUserIRQInfo msg;
+
+ memset(&msg, 0, sizeof(msg));
+ vfio_user_request_msg(&msg.hdr, VFIO_USER_DEVICE_GET_IRQ_INFO,
+ sizeof(msg), 0);
+ msg.argsz = info->argsz;
+ msg.index = info->index;
+
+ if (!vfio_user_send_wait(proxy, &msg.hdr, NULL, 0, &local_err)) {
+ error_prepend(&local_err, "%s: ", __func__);
+ error_report_err(local_err);
+ return -EFAULT;
+ }
+
+ if (msg.hdr.flags & VFIO_USER_ERROR) {
+ return -msg.hdr.error_reply;
+ }
+ trace_vfio_user_get_irq_info(msg.index, msg.flags, msg.count);
+
+ memcpy(info, &msg.argsz, sizeof(*info));
+ return 0;
+}
+
+static int irq_howmany(int *fdp, uint32_t cur, uint32_t max)
+{
+ int n = 0;
+
+ if (fdp[cur] != -1) {
+ do {
+ n++;
+ } while (n < max && fdp[cur + n] != -1);
+ } else {
+ do {
+ n++;
+ } while (n < max && fdp[cur + n] == -1);
+ }
+
+ return n;
+}
+
+static int vfio_user_device_io_set_irqs(VFIODevice *vbasedev,
+ struct vfio_irq_set *irq)
+{
+ VFIOUserProxy *proxy = vbasedev->proxy;
+ g_autofree VFIOUserIRQSet *msgp = NULL;
+ uint32_t size, nfds, send_fds, sent_fds, max;
+ Error *local_err = NULL;
+
+ if (irq->argsz < sizeof(*irq)) {
+ error_printf("vfio_user_set_irqs argsz too small\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Handle simple case
+ */
+ if ((irq->flags & VFIO_IRQ_SET_DATA_EVENTFD) == 0) {
+ size = sizeof(VFIOUserHdr) + irq->argsz;
+ msgp = g_malloc0(size);
+
+ vfio_user_request_msg(&msgp->hdr, VFIO_USER_DEVICE_SET_IRQS, size, 0);
+ msgp->argsz = irq->argsz;
+ msgp->flags = irq->flags;
+ msgp->index = irq->index;
+ msgp->start = irq->start;
+ msgp->count = irq->count;
+ trace_vfio_user_set_irqs(msgp->index, msgp->start, msgp->count,
+ msgp->flags);
+
+ if (!vfio_user_send_wait(proxy, &msgp->hdr, NULL, 0, &local_err)) {
+ error_prepend(&local_err, "%s: ", __func__);
+ error_report_err(local_err);
+ return -EFAULT;
+ }
+
+ if (msgp->hdr.flags & VFIO_USER_ERROR) {
+ return -msgp->hdr.error_reply;
+ }
+
+ return 0;
+ }
+
+ /*
+ * Calculate the number of FDs to send
+ * and adjust argsz
+ */
+ nfds = (irq->argsz - sizeof(*irq)) / sizeof(int);
+ irq->argsz = sizeof(*irq);
+ msgp = g_malloc0(sizeof(*msgp));
+ /*
+ * Send in chunks if over max_send_fds
+ */
+ for (sent_fds = 0; nfds > sent_fds; sent_fds += send_fds) {
+ VFIOUserFDs *arg_fds, loop_fds;
+
+ /* must send all valid FDs or all invalid FDs in single msg */
+ max = nfds - sent_fds;
+ if (max > proxy->max_send_fds) {
+ max = proxy->max_send_fds;
+ }
+ send_fds = irq_howmany((int *)irq->data, sent_fds, max);
+
+ vfio_user_request_msg(&msgp->hdr, VFIO_USER_DEVICE_SET_IRQS,
+ sizeof(*msgp), 0);
+ msgp->argsz = irq->argsz;
+ msgp->flags = irq->flags;
+ msgp->index = irq->index;
+ msgp->start = irq->start + sent_fds;
+ msgp->count = send_fds;
+ trace_vfio_user_set_irqs(msgp->index, msgp->start, msgp->count,
+ msgp->flags);
+
+ loop_fds.send_fds = send_fds;
+ loop_fds.recv_fds = 0;
+ loop_fds.fds = (int *)irq->data + sent_fds;
+ arg_fds = loop_fds.fds[0] != -1 ? &loop_fds : NULL;
+
+ if (!vfio_user_send_wait(proxy, &msgp->hdr, arg_fds, 0, &local_err)) {
+ error_prepend(&local_err, "%s: ", __func__);
+ error_report_err(local_err);
+ return -EFAULT;
+ }
+
+ if (msgp->hdr.flags & VFIO_USER_ERROR) {
+ return -msgp->hdr.error_reply;
+ }
+ }
+
+ return 0;
+}
+
+static int vfio_user_device_io_region_read(VFIODevice *vbasedev, uint8_t index,
+ off_t off, uint32_t count,
+ void *data)
+{
+ g_autofree VFIOUserRegionRW *msgp = NULL;
+ VFIOUserProxy *proxy = vbasedev->proxy;
+ int size = sizeof(*msgp) + count;
+ Error *local_err = NULL;
+
+ if (count > proxy->max_xfer_size) {
+ return -EINVAL;
+ }
+
+ msgp = g_malloc0(size);
+ vfio_user_request_msg(&msgp->hdr, VFIO_USER_REGION_READ, sizeof(*msgp), 0);
+ msgp->offset = off;
+ msgp->region = index;
+ msgp->count = count;
+ trace_vfio_user_region_rw(msgp->region, msgp->offset, msgp->count);
+
+ if (!vfio_user_send_wait(proxy, &msgp->hdr, NULL, size, &local_err)) {
+ error_prepend(&local_err, "%s: ", __func__);
+ error_report_err(local_err);
+ return -EFAULT;
+ }
+
+ if (msgp->hdr.flags & VFIO_USER_ERROR) {
+ return -msgp->hdr.error_reply;
+ } else if (msgp->count > count) {
+ return -E2BIG;
+ } else {
+ memcpy(data, &msgp->data, msgp->count);
+ }
+
+ return msgp->count;
+}
+
+/*
+ * If this is a posted write, and VFIO_PROXY_NO_POST is not set, then we are OK
+ * to send the write to the socket without waiting for the server's reply:
+ * a subsequent read (of any region) will not pass the posted write, as all
+ * messages are handled sequentially.
+ */
+static int vfio_user_device_io_region_write(VFIODevice *vbasedev, uint8_t index,
+ off_t off, unsigned count,
+ void *data, bool post)
+{
+ VFIOUserRegionRW *msgp = NULL;
+ VFIOUserProxy *proxy = vbasedev->proxy;
+ int size = sizeof(*msgp) + count;
+ Error *local_err = NULL;
+ bool can_multi;
+ int flags = 0;
+ int ret;
+
+ if (count > proxy->max_xfer_size) {
+ return -EINVAL;
+ }
+
+ if (proxy->flags & VFIO_PROXY_NO_POST) {
+ post = false;
+ }
+
+ if (post) {
+ flags |= VFIO_USER_NO_REPLY;
+ }
+
+ /* write eligible to be in a WRITE_MULTI msg ? */
+ can_multi = (proxy->flags & VFIO_PROXY_USE_MULTI) && post &&
+ count <= VFIO_USER_MULTI_DATA;
+
+ /*
+ * This should be a rare case, so first check without the lock,
+ * if we're wrong, vfio_send_queued() will flush any posted writes
+ * we missed here
+ */
+ if (proxy->wr_multi != NULL ||
+ (proxy->num_outgoing > VFIO_USER_OUT_HIGH && can_multi)) {
+
+ /*
+ * re-check with lock
+ *
+ * if already building a WRITE_MULTI msg,
+ * add this one if possible else flush pending before
+ * sending the current one
+ *
+ * else if outgoing queue is over the highwater,
+ * start a new WRITE_MULTI message
+ */
+ WITH_QEMU_LOCK_GUARD(&proxy->lock) {
+ if (proxy->wr_multi != NULL) {
+ if (can_multi) {
+ vfio_user_add_multi(proxy, index, off, count, data);
+ return count;
+ }
+ vfio_user_flush_multi(proxy);
+ } else if (proxy->num_outgoing > VFIO_USER_OUT_HIGH && can_multi) {
+ vfio_user_create_multi(proxy);
+ vfio_user_add_multi(proxy, index, off, count, data);
+ return count;
+ }
+ }
+ }
+
+ msgp = g_malloc0(size);
+ vfio_user_request_msg(&msgp->hdr, VFIO_USER_REGION_WRITE, size, flags);
+ msgp->offset = off;
+ msgp->region = index;
+ msgp->count = count;
+ memcpy(&msgp->data, data, count);
+ trace_vfio_user_region_rw(msgp->region, msgp->offset, msgp->count);
+
+ /* async send will free msg after it's sent */
+ if (post) {
+ if (!vfio_user_send_async(proxy, &msgp->hdr, NULL, &local_err)) {
+ error_prepend(&local_err, "%s: ", __func__);
+ error_report_err(local_err);
+ return -EFAULT;
+ }
+
+ return count;
+ }
+
+ if (!vfio_user_send_wait(proxy, &msgp->hdr, NULL, 0, &local_err)) {
+ error_prepend(&local_err, "%s: ", __func__);
+ error_report_err(local_err);
+ g_free(msgp);
+ return -EFAULT;
+ }
+
+ if (msgp->hdr.flags & VFIO_USER_ERROR) {
+ ret = -msgp->hdr.error_reply;
+ } else {
+ ret = count;
+ }
+
+ g_free(msgp);
+ return ret;
+}
+
+/*
+ * Socket-based io_ops
+ */
+VFIODeviceIOOps vfio_user_device_io_ops_sock = {
+ .get_region_info = vfio_user_device_io_get_region_info,
+ .get_irq_info = vfio_user_device_io_get_irq_info,
+ .set_irqs = vfio_user_device_io_set_irqs,
+ .region_read = vfio_user_device_io_region_read,
+ .region_write = vfio_user_device_io_region_write,
+
+};
diff --git a/hw/vfio-user/device.h b/hw/vfio-user/device.h
new file mode 100644
index 0000000..d183a39
--- /dev/null
+++ b/hw/vfio-user/device.h
@@ -0,0 +1,24 @@
+#ifndef VFIO_USER_DEVICE_H
+#define VFIO_USER_DEVICE_H
+
+/*
+ * vfio protocol over a UNIX socket device handling.
+ *
+ * Copyright Ā© 2018, 2021 Oracle and/or its affiliates.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "linux/vfio.h"
+
+#include "hw/vfio-user/proxy.h"
+
+bool vfio_user_get_device_info(VFIOUserProxy *proxy,
+ struct vfio_device_info *info, Error **errp);
+
+void vfio_user_device_reset(VFIOUserProxy *proxy);
+
+extern VFIODeviceIOOps vfio_user_device_io_ops_sock;
+
+#endif /* VFIO_USER_DEVICE_H */
diff --git a/hw/vfio-user/meson.build b/hw/vfio-user/meson.build
new file mode 100644
index 0000000..2ed0ae5
--- /dev/null
+++ b/hw/vfio-user/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+vfio_user_ss = ss.source_set()
+vfio_user_ss.add(files(
+ 'container.c',
+ 'device.c',
+ 'pci.c',
+ 'proxy.c',
+))
+
+system_ss.add_all(when: 'CONFIG_VFIO_USER', if_true: vfio_user_ss)
diff --git a/hw/vfio-user/pci.c b/hw/vfio-user/pci.c
new file mode 100644
index 0000000..be71c77
--- /dev/null
+++ b/hw/vfio-user/pci.c
@@ -0,0 +1,475 @@
+/*
+ * vfio PCI device over a UNIX socket.
+ *
+ * Copyright Ā© 2018, 2021 Oracle and/or its affiliates.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <sys/ioctl.h>
+#include "qemu/osdep.h"
+#include "qapi-visit-sockets.h"
+#include "qemu/error-report.h"
+
+#include "hw/qdev-properties.h"
+#include "hw/vfio/pci.h"
+#include "hw/vfio-user/device.h"
+#include "hw/vfio-user/proxy.h"
+
+#define TYPE_VFIO_USER_PCI "vfio-user-pci"
+OBJECT_DECLARE_SIMPLE_TYPE(VFIOUserPCIDevice, VFIO_USER_PCI)
+
+struct VFIOUserPCIDevice {
+ VFIOPCIDevice device;
+ SocketAddress *socket;
+ bool send_queued; /* all sends are queued */
+ uint32_t wait_time; /* timeout for message replies */
+ bool no_post; /* all region writes are sync */
+};
+
+/*
+ * The server maintains the device's pending interrupts,
+ * via its MSIX table and PBA, so we treat these accesses
+ * like PCI config space and forward them.
+ */
+static uint64_t vfio_user_pba_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ VFIOPCIDevice *vdev = opaque;
+ VFIORegion *region = &vdev->bars[vdev->msix->pba_bar].region;
+ uint64_t data;
+
+ /* server copy is what matters */
+ data = vfio_region_read(region, addr + vdev->msix->pba_offset, size);
+ return data;
+}
+
+static void vfio_user_pba_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ /* dropped */
+}
+
+static const MemoryRegionOps vfio_user_pba_ops = {
+ .read = vfio_user_pba_read,
+ .write = vfio_user_pba_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void vfio_user_msix_setup(VFIOPCIDevice *vdev)
+{
+ MemoryRegion *vfio_reg, *msix_reg, *pba_reg;
+
+ pba_reg = g_new0(MemoryRegion, 1);
+ vdev->msix->pba_region = pba_reg;
+
+ vfio_reg = vdev->bars[vdev->msix->pba_bar].mr;
+ msix_reg = &vdev->pdev.msix_pba_mmio;
+ memory_region_init_io(pba_reg, OBJECT(vdev), &vfio_user_pba_ops, vdev,
+ "VFIO MSIX PBA", int128_get64(msix_reg->size));
+ memory_region_add_subregion_overlap(vfio_reg, vdev->msix->pba_offset,
+ pba_reg, 1);
+}
+
+static void vfio_user_msix_teardown(VFIOPCIDevice *vdev)
+{
+ MemoryRegion *mr, *sub;
+
+ mr = vdev->bars[vdev->msix->pba_bar].mr;
+ sub = vdev->msix->pba_region;
+ memory_region_del_subregion(mr, sub);
+
+ g_free(vdev->msix->pba_region);
+ vdev->msix->pba_region = NULL;
+}
+
+static void vfio_user_dma_read(VFIOPCIDevice *vdev, VFIOUserDMARW *msg)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ VFIOUserProxy *proxy = vdev->vbasedev.proxy;
+ VFIOUserDMARW *res;
+ MemTxResult r;
+ size_t size;
+
+ if (msg->hdr.size < sizeof(*msg)) {
+ vfio_user_send_error(proxy, &msg->hdr, EINVAL);
+ return;
+ }
+ if (msg->count > proxy->max_xfer_size) {
+ vfio_user_send_error(proxy, &msg->hdr, E2BIG);
+ return;
+ }
+
+ /* switch to our own message buffer */
+ size = msg->count + sizeof(VFIOUserDMARW);
+ res = g_malloc0(size);
+ memcpy(res, msg, sizeof(*res));
+ g_free(msg);
+
+ r = pci_dma_read(pdev, res->offset, &res->data, res->count);
+
+ switch (r) {
+ case MEMTX_OK:
+ if (res->hdr.flags & VFIO_USER_NO_REPLY) {
+ g_free(res);
+ return;
+ }
+ vfio_user_send_reply(proxy, &res->hdr, size);
+ break;
+ case MEMTX_ERROR:
+ vfio_user_send_error(proxy, &res->hdr, EFAULT);
+ break;
+ case MEMTX_DECODE_ERROR:
+ vfio_user_send_error(proxy, &res->hdr, ENODEV);
+ break;
+ case MEMTX_ACCESS_ERROR:
+ vfio_user_send_error(proxy, &res->hdr, EPERM);
+ break;
+ default:
+ error_printf("vfio_user_dma_read unknown error %d\n", r);
+ vfio_user_send_error(vdev->vbasedev.proxy, &res->hdr, EINVAL);
+ }
+}
+
+static void vfio_user_dma_write(VFIOPCIDevice *vdev, VFIOUserDMARW *msg)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ VFIOUserProxy *proxy = vdev->vbasedev.proxy;
+ MemTxResult r;
+
+ if (msg->hdr.size < sizeof(*msg)) {
+ vfio_user_send_error(proxy, &msg->hdr, EINVAL);
+ return;
+ }
+ /* make sure transfer count isn't larger than the message data */
+ if (msg->count > msg->hdr.size - sizeof(*msg)) {
+ vfio_user_send_error(proxy, &msg->hdr, E2BIG);
+ return;
+ }
+
+ r = pci_dma_write(pdev, msg->offset, &msg->data, msg->count);
+
+ switch (r) {
+ case MEMTX_OK:
+ if ((msg->hdr.flags & VFIO_USER_NO_REPLY) == 0) {
+ vfio_user_send_reply(proxy, &msg->hdr, sizeof(msg->hdr));
+ } else {
+ g_free(msg);
+ }
+ break;
+ case MEMTX_ERROR:
+ vfio_user_send_error(proxy, &msg->hdr, EFAULT);
+ break;
+ case MEMTX_DECODE_ERROR:
+ vfio_user_send_error(proxy, &msg->hdr, ENODEV);
+ break;
+ case MEMTX_ACCESS_ERROR:
+ vfio_user_send_error(proxy, &msg->hdr, EPERM);
+ break;
+ default:
+ error_printf("vfio_user_dma_write unknown error %d\n", r);
+ vfio_user_send_error(vdev->vbasedev.proxy, &msg->hdr, EINVAL);
+ }
+}
+
+/*
+ * Incoming request message callback.
+ *
+ * Runs off main loop, so BQL held.
+ */
+static void vfio_user_pci_process_req(void *opaque, VFIOUserMsg *msg)
+{
+ VFIOPCIDevice *vdev = opaque;
+ VFIOUserHdr *hdr = msg->hdr;
+
+ /* no incoming PCI requests pass FDs */
+ if (msg->fds != NULL) {
+ vfio_user_send_error(vdev->vbasedev.proxy, hdr, EINVAL);
+ vfio_user_putfds(msg);
+ return;
+ }
+
+ switch (hdr->command) {
+ case VFIO_USER_DMA_READ:
+ vfio_user_dma_read(vdev, (VFIOUserDMARW *)hdr);
+ break;
+ case VFIO_USER_DMA_WRITE:
+ vfio_user_dma_write(vdev, (VFIOUserDMARW *)hdr);
+ break;
+ default:
+ error_printf("vfio_user_pci_process_req unknown cmd %d\n",
+ hdr->command);
+ vfio_user_send_error(vdev->vbasedev.proxy, hdr, ENOSYS);
+ }
+}
+
+/*
+ * Emulated devices don't use host hot reset
+ */
+static void vfio_user_compute_needs_reset(VFIODevice *vbasedev)
+{
+ vbasedev->needs_reset = false;
+}
+
+static Object *vfio_user_pci_get_object(VFIODevice *vbasedev)
+{
+ VFIOUserPCIDevice *vdev = container_of(vbasedev, VFIOUserPCIDevice,
+ device.vbasedev);
+
+ return OBJECT(vdev);
+}
+
+static VFIODeviceOps vfio_user_pci_ops = {
+ .vfio_compute_needs_reset = vfio_user_compute_needs_reset,
+ .vfio_eoi = vfio_pci_intx_eoi,
+ .vfio_get_object = vfio_user_pci_get_object,
+ /* No live migration support yet. */
+ .vfio_save_config = NULL,
+ .vfio_load_config = NULL,
+};
+
+static void vfio_user_pci_realize(PCIDevice *pdev, Error **errp)
+{
+ ERRP_GUARD();
+ VFIOUserPCIDevice *udev = VFIO_USER_PCI(pdev);
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
+ VFIODevice *vbasedev = &vdev->vbasedev;
+ const char *sock_name;
+ AddressSpace *as;
+ SocketAddress addr;
+ VFIOUserProxy *proxy;
+
+ if (!udev->socket) {
+ error_setg(errp, "No socket specified");
+ error_append_hint(errp, "e.g. -device '{"
+ "\"driver\":\"vfio-user-pci\", "
+ "\"socket\": {\"path\": \"/tmp/vfio-user.sock\", "
+ "\"type\": \"unix\"}'"
+ "}'\n");
+ return;
+ }
+
+ sock_name = udev->socket->u.q_unix.path;
+
+ vbasedev->name = g_strdup_printf("vfio-user:%s", sock_name);
+
+ memset(&addr, 0, sizeof(addr));
+ addr.type = SOCKET_ADDRESS_TYPE_UNIX;
+ addr.u.q_unix.path = (char *)sock_name;
+ proxy = vfio_user_connect_dev(&addr, errp);
+ if (!proxy) {
+ return;
+ }
+ vbasedev->proxy = proxy;
+ vfio_user_set_handler(vbasedev, vfio_user_pci_process_req, vdev);
+
+ vbasedev->name = g_strdup_printf("vfio-user:%s", sock_name);
+
+ if (udev->send_queued) {
+ proxy->flags |= VFIO_PROXY_FORCE_QUEUED;
+ }
+
+ if (udev->no_post) {
+ proxy->flags |= VFIO_PROXY_NO_POST;
+ }
+
+ /* user specified or 5 sec default */
+ proxy->wait_time = udev->wait_time;
+
+ if (!vfio_user_validate_version(proxy, errp)) {
+ goto error;
+ }
+
+ /*
+ * Use socket-based device I/O instead of vfio kernel driver.
+ */
+ vbasedev->io_ops = &vfio_user_device_io_ops_sock;
+
+ /*
+ * vfio-user devices are effectively mdevs (don't use a host iommu).
+ */
+ vbasedev->mdev = true;
+
+ /*
+ * Enable per-region fds.
+ */
+ vbasedev->use_region_fds = true;
+
+ as = pci_device_iommu_address_space(pdev);
+ if (!vfio_device_attach_by_iommu_type(TYPE_VFIO_IOMMU_USER,
+ vbasedev->name, vbasedev,
+ as, errp)) {
+ goto error;
+ }
+
+ if (!vfio_pci_populate_device(vdev, errp)) {
+ goto error;
+ }
+
+ if (!vfio_pci_config_setup(vdev, errp)) {
+ goto error;
+ }
+
+ /*
+ * vfio_pci_config_setup will have registered the device's BARs
+ * and setup any MSIX BARs, so errors after it succeeds must
+ * use out_teardown
+ */
+
+ if (!vfio_pci_add_capabilities(vdev, errp)) {
+ goto out_teardown;
+ }
+
+ if (vdev->msix != NULL) {
+ vfio_user_msix_setup(vdev);
+ }
+
+ if (!vfio_pci_interrupt_setup(vdev, errp)) {
+ goto out_teardown;
+ }
+
+ vfio_pci_register_err_notifier(vdev);
+ vfio_pci_register_req_notifier(vdev);
+
+ return;
+
+out_teardown:
+ vfio_pci_teardown_msi(vdev);
+ vfio_pci_bars_exit(vdev);
+error:
+ error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ vfio_pci_put_device(vdev);
+}
+
+static void vfio_user_instance_init(Object *obj)
+{
+ PCIDevice *pci_dev = PCI_DEVICE(obj);
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj);
+ VFIODevice *vbasedev = &vdev->vbasedev;
+
+ device_add_bootindex_property(obj, &vdev->bootindex,
+ "bootindex", NULL,
+ &pci_dev->qdev);
+ vdev->host.domain = ~0U;
+ vdev->host.bus = ~0U;
+ vdev->host.slot = ~0U;
+ vdev->host.function = ~0U;
+
+ vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_PCI, &vfio_user_pci_ops,
+ DEVICE(vdev), false);
+
+ vdev->nv_gpudirect_clique = 0xFF;
+
+ /*
+ * QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
+ * line, therefore, no need to wait to realize like other devices.
+ */
+ pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
+}
+
+static void vfio_user_instance_finalize(Object *obj)
+{
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj);
+ VFIODevice *vbasedev = &vdev->vbasedev;
+
+ if (vdev->msix != NULL) {
+ vfio_user_msix_teardown(vdev);
+ }
+
+ vfio_pci_put_device(vdev);
+
+ if (vbasedev->proxy != NULL) {
+ vfio_user_disconnect(vbasedev->proxy);
+ }
+}
+
+static void vfio_user_pci_reset(DeviceState *dev)
+{
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(dev);
+ VFIODevice *vbasedev = &vdev->vbasedev;
+
+ vfio_pci_pre_reset(vdev);
+
+ if (vbasedev->reset_works) {
+ vfio_user_device_reset(vbasedev->proxy);
+ }
+
+ vfio_pci_post_reset(vdev);
+}
+
+static const Property vfio_user_pci_dev_properties[] = {
+ DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice,
+ vendor_id, PCI_ANY_ID),
+ DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice,
+ device_id, PCI_ANY_ID),
+ DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
+ sub_vendor_id, PCI_ANY_ID),
+ DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
+ sub_device_id, PCI_ANY_ID),
+ DEFINE_PROP_BOOL("x-send-queued", VFIOUserPCIDevice, send_queued, false),
+ DEFINE_PROP_UINT32("x-msg-timeout", VFIOUserPCIDevice, wait_time, 5000),
+ DEFINE_PROP_BOOL("x-no-posted-writes", VFIOUserPCIDevice, no_post, false),
+};
+
+static void vfio_user_pci_set_socket(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ VFIOUserPCIDevice *udev = VFIO_USER_PCI(obj);
+ bool success;
+
+ if (udev->device.vbasedev.proxy) {
+ error_setg(errp, "Proxy is connected");
+ return;
+ }
+
+ qapi_free_SocketAddress(udev->socket);
+
+ udev->socket = NULL;
+
+ success = visit_type_SocketAddress(v, name, &udev->socket, errp);
+
+ if (!success) {
+ return;
+ }
+
+ if (udev->socket->type != SOCKET_ADDRESS_TYPE_UNIX) {
+ error_setg(errp, "Unsupported socket type %s",
+ SocketAddressType_str(udev->socket->type));
+ qapi_free_SocketAddress(udev->socket);
+ udev->socket = NULL;
+ return;
+ }
+}
+
+static void vfio_user_pci_dev_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
+
+ device_class_set_legacy_reset(dc, vfio_user_pci_reset);
+ device_class_set_props(dc, vfio_user_pci_dev_properties);
+
+ object_class_property_add(klass, "socket", "SocketAddress", NULL,
+ vfio_user_pci_set_socket, NULL, NULL);
+ object_class_property_set_description(klass, "socket",
+ "SocketAddress (UNIX sockets only)");
+
+ dc->desc = "VFIO over socket PCI device assignment";
+ pdc->realize = vfio_user_pci_realize;
+}
+
+static const TypeInfo vfio_user_pci_dev_info = {
+ .name = TYPE_VFIO_USER_PCI,
+ .parent = TYPE_VFIO_PCI_BASE,
+ .instance_size = sizeof(VFIOUserPCIDevice),
+ .class_init = vfio_user_pci_dev_class_init,
+ .instance_init = vfio_user_instance_init,
+ .instance_finalize = vfio_user_instance_finalize,
+};
+
+static void register_vfio_user_dev_type(void)
+{
+ type_register_static(&vfio_user_pci_dev_info);
+}
+
+ type_init(register_vfio_user_dev_type)
diff --git a/hw/vfio-user/protocol.h b/hw/vfio-user/protocol.h
new file mode 100644
index 0000000..3249a4a
--- /dev/null
+++ b/hw/vfio-user/protocol.h
@@ -0,0 +1,242 @@
+#ifndef VFIO_USER_PROTOCOL_H
+#define VFIO_USER_PROTOCOL_H
+
+/*
+ * vfio protocol over a UNIX socket.
+ *
+ * Copyright Ā© 2018, 2021 Oracle and/or its affiliates.
+ *
+ * Each message has a standard header that describes the command
+ * being sent, which is almost always a VFIO ioctl().
+ *
+ * The header may be followed by command-specific data, such as the
+ * region and offset info for read and write commands.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+typedef struct {
+ uint16_t id;
+ uint16_t command;
+ uint32_t size;
+ uint32_t flags;
+ uint32_t error_reply;
+} VFIOUserHdr;
+
+/* VFIOUserHdr commands */
+enum vfio_user_command {
+ VFIO_USER_VERSION = 1,
+ VFIO_USER_DMA_MAP = 2,
+ VFIO_USER_DMA_UNMAP = 3,
+ VFIO_USER_DEVICE_GET_INFO = 4,
+ VFIO_USER_DEVICE_GET_REGION_INFO = 5,
+ VFIO_USER_DEVICE_GET_REGION_IO_FDS = 6,
+ VFIO_USER_DEVICE_GET_IRQ_INFO = 7,
+ VFIO_USER_DEVICE_SET_IRQS = 8,
+ VFIO_USER_REGION_READ = 9,
+ VFIO_USER_REGION_WRITE = 10,
+ VFIO_USER_DMA_READ = 11,
+ VFIO_USER_DMA_WRITE = 12,
+ VFIO_USER_DEVICE_RESET = 13,
+ VFIO_USER_DIRTY_PAGES = 14,
+ VFIO_USER_REGION_WRITE_MULTI = 15,
+ VFIO_USER_MAX,
+};
+
+/* VFIOUserHdr flags */
+#define VFIO_USER_REQUEST 0x0
+#define VFIO_USER_REPLY 0x1
+#define VFIO_USER_TYPE 0xF
+
+#define VFIO_USER_NO_REPLY 0x10
+#define VFIO_USER_ERROR 0x20
+
+
+/*
+ * VFIO_USER_VERSION
+ */
+typedef struct {
+ VFIOUserHdr hdr;
+ uint16_t major;
+ uint16_t minor;
+ char capabilities[];
+} VFIOUserVersion;
+
+#define VFIO_USER_MAJOR_VER 0
+#define VFIO_USER_MINOR_VER 0
+
+#define VFIO_USER_CAP "capabilities"
+
+/* "capabilities" members */
+#define VFIO_USER_CAP_MAX_FDS "max_msg_fds"
+#define VFIO_USER_CAP_MAX_XFER "max_data_xfer_size"
+#define VFIO_USER_CAP_PGSIZES "pgsizes"
+#define VFIO_USER_CAP_MAP_MAX "max_dma_maps"
+#define VFIO_USER_CAP_MIGR "migration"
+#define VFIO_USER_CAP_MULTI "write_multiple"
+
+/* "migration" members */
+#define VFIO_USER_CAP_PGSIZE "pgsize"
+#define VFIO_USER_CAP_MAX_BITMAP "max_bitmap_size"
+
+/*
+ * Max FDs mainly comes into play when a device supports multiple interrupts
+ * where each ones uses an eventfd to inject it into the guest.
+ * It is clamped by the the number of FDs the qio channel supports in a
+ * single message.
+ */
+#define VFIO_USER_DEF_MAX_FDS 8
+#define VFIO_USER_MAX_MAX_FDS 16
+
+/*
+ * Max transfer limits the amount of data in region and DMA messages.
+ * Region R/W will be very small (limited by how much a single instruction
+ * can process) so just use a reasonable limit here.
+ */
+#define VFIO_USER_DEF_MAX_XFER (1024 * 1024)
+#define VFIO_USER_MAX_MAX_XFER (64 * 1024 * 1024)
+
+/*
+ * Default pagesizes supported is 4k.
+ */
+#define VFIO_USER_DEF_PGSIZE 4096
+
+/*
+ * Default max number of DMA mappings is stolen from the
+ * linux kernel "dma_entry_limit"
+ */
+#define VFIO_USER_DEF_MAP_MAX 65535
+
+/*
+ * Default max bitmap size is also take from the linux kernel,
+ * where usage of signed ints limits the VA range to 2^31 bytes.
+ * Dividing that by the number of bits per byte yields 256MB
+ */
+#define VFIO_USER_DEF_MAX_BITMAP (256 * 1024 * 1024)
+
+/*
+ * VFIO_USER_DMA_MAP
+ * imported from struct vfio_iommu_type1_dma_map
+ */
+typedef struct {
+ VFIOUserHdr hdr;
+ uint32_t argsz;
+ uint32_t flags;
+ uint64_t offset; /* FD offset */
+ uint64_t iova;
+ uint64_t size;
+} VFIOUserDMAMap;
+
+/*
+ * VFIO_USER_DMA_UNMAP
+ * imported from struct vfio_iommu_type1_dma_unmap
+ */
+typedef struct {
+ VFIOUserHdr hdr;
+ uint32_t argsz;
+ uint32_t flags;
+ uint64_t iova;
+ uint64_t size;
+} VFIOUserDMAUnmap;
+
+/*
+ * VFIO_USER_DEVICE_GET_INFO
+ * imported from struct vfio_device_info
+ */
+typedef struct {
+ VFIOUserHdr hdr;
+ uint32_t argsz;
+ uint32_t flags;
+ uint32_t num_regions;
+ uint32_t num_irqs;
+} VFIOUserDeviceInfo;
+
+/*
+ * VFIO_USER_DEVICE_GET_REGION_INFO
+ * imported from struct vfio_region_info
+ */
+typedef struct {
+ VFIOUserHdr hdr;
+ uint32_t argsz;
+ uint32_t flags;
+ uint32_t index;
+ uint32_t cap_offset;
+ uint64_t size;
+ uint64_t offset;
+} VFIOUserRegionInfo;
+
+/*
+ * VFIO_USER_DEVICE_GET_IRQ_INFO
+ * imported from struct vfio_irq_info
+ */
+typedef struct {
+ VFIOUserHdr hdr;
+ uint32_t argsz;
+ uint32_t flags;
+ uint32_t index;
+ uint32_t count;
+} VFIOUserIRQInfo;
+
+/*
+ * VFIO_USER_DEVICE_SET_IRQS
+ * imported from struct vfio_irq_set
+ */
+typedef struct {
+ VFIOUserHdr hdr;
+ uint32_t argsz;
+ uint32_t flags;
+ uint32_t index;
+ uint32_t start;
+ uint32_t count;
+} VFIOUserIRQSet;
+
+/*
+ * VFIO_USER_REGION_READ
+ * VFIO_USER_REGION_WRITE
+ */
+typedef struct {
+ VFIOUserHdr hdr;
+ uint64_t offset;
+ uint32_t region;
+ uint32_t count;
+ char data[];
+} VFIOUserRegionRW;
+
+/*
+ * VFIO_USER_DMA_READ
+ * VFIO_USER_DMA_WRITE
+ */
+typedef struct {
+ VFIOUserHdr hdr;
+ uint64_t offset;
+ uint32_t count;
+ char data[];
+} VFIOUserDMARW;
+
+/* imported from struct vfio_bitmap */
+typedef struct {
+ uint64_t pgsize;
+ uint64_t size;
+ char data[];
+} VFIOUserBitmap;
+
+/*
+ * VFIO_USER_REGION_WRITE_MULTI
+ */
+#define VFIO_USER_MULTI_DATA 8
+#define VFIO_USER_MULTI_MAX 200
+
+typedef struct {
+ uint64_t offset;
+ uint32_t region;
+ uint32_t count;
+ char data[VFIO_USER_MULTI_DATA];
+} VFIOUserWROne;
+
+typedef struct {
+ VFIOUserHdr hdr;
+ uint64_t wr_cnt;
+ VFIOUserWROne wrs[VFIO_USER_MULTI_MAX];
+} VFIOUserWRMulti;
+
+#endif /* VFIO_USER_PROTOCOL_H */
diff --git a/hw/vfio-user/proxy.c b/hw/vfio-user/proxy.c
new file mode 100644
index 0000000..c418954
--- /dev/null
+++ b/hw/vfio-user/proxy.c
@@ -0,0 +1,1356 @@
+/*
+ * vfio protocol over a UNIX socket.
+ *
+ * Copyright Ā© 2018, 2021 Oracle and/or its affiliates.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+
+#include "hw/vfio/vfio-device.h"
+#include "hw/vfio-user/proxy.h"
+#include "hw/vfio-user/trace.h"
+#include "qapi/error.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
+#include "qobject/qjson.h"
+#include "qobject/qnum.h"
+#include "qemu/error-report.h"
+#include "qemu/lockable.h"
+#include "qemu/main-loop.h"
+#include "qemu/thread.h"
+#include "system/iothread.h"
+
+static IOThread *vfio_user_iothread;
+
+static void vfio_user_shutdown(VFIOUserProxy *proxy);
+static VFIOUserMsg *vfio_user_getmsg(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
+ VFIOUserFDs *fds);
+static void vfio_user_recycle(VFIOUserProxy *proxy, VFIOUserMsg *msg);
+
+static void vfio_user_recv(void *opaque);
+static void vfio_user_send(void *opaque);
+static void vfio_user_cb(void *opaque);
+
+static void vfio_user_request(void *opaque);
+
+static inline void vfio_user_set_error(VFIOUserHdr *hdr, uint32_t err)
+{
+ hdr->flags |= VFIO_USER_ERROR;
+ hdr->error_reply = err;
+}
+
+/*
+ * Functions called by main, CPU, or iothread threads
+ */
+
+static void vfio_user_shutdown(VFIOUserProxy *proxy)
+{
+ qio_channel_shutdown(proxy->ioc, QIO_CHANNEL_SHUTDOWN_READ, NULL);
+ qio_channel_set_aio_fd_handler(proxy->ioc, proxy->ctx, NULL,
+ proxy->ctx, NULL, NULL);
+}
+
+/*
+ * Same return values as qio_channel_writev_full():
+ *
+ * QIO_CHANNEL_ERR_BLOCK: *errp not set
+ * -1: *errp will be populated
+ * otherwise: bytes written
+ */
+static ssize_t vfio_user_send_qio(VFIOUserProxy *proxy, VFIOUserMsg *msg,
+ Error **errp)
+{
+ VFIOUserFDs *fds = msg->fds;
+ struct iovec iov = {
+ .iov_base = msg->hdr,
+ .iov_len = msg->hdr->size,
+ };
+ size_t numfds = 0;
+ int *fdp = NULL;
+ ssize_t ret;
+
+ if (fds != NULL && fds->send_fds != 0) {
+ numfds = fds->send_fds;
+ fdp = fds->fds;
+ }
+
+ ret = qio_channel_writev_full(proxy->ioc, &iov, 1, fdp, numfds, 0, errp);
+
+ if (ret == -1) {
+ vfio_user_set_error(msg->hdr, EIO);
+ vfio_user_shutdown(proxy);
+ }
+ trace_vfio_user_send_write(msg->hdr->id, ret);
+
+ return ret;
+}
+
+static VFIOUserMsg *vfio_user_getmsg(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
+ VFIOUserFDs *fds)
+{
+ VFIOUserMsg *msg;
+
+ msg = QTAILQ_FIRST(&proxy->free);
+ if (msg != NULL) {
+ QTAILQ_REMOVE(&proxy->free, msg, next);
+ } else {
+ msg = g_malloc0(sizeof(*msg));
+ qemu_cond_init(&msg->cv);
+ }
+
+ msg->hdr = hdr;
+ msg->fds = fds;
+ return msg;
+}
+
+/*
+ * Recycle a message list entry to the free list.
+ */
+static void vfio_user_recycle(VFIOUserProxy *proxy, VFIOUserMsg *msg)
+{
+ if (msg->type == VFIO_MSG_NONE) {
+ error_printf("vfio_user_recycle - freeing free msg\n");
+ return;
+ }
+
+ /* free msg buffer if no one is waiting to consume the reply */
+ if (msg->type == VFIO_MSG_NOWAIT || msg->type == VFIO_MSG_ASYNC) {
+ g_free(msg->hdr);
+ if (msg->fds != NULL) {
+ g_free(msg->fds);
+ }
+ }
+
+ msg->type = VFIO_MSG_NONE;
+ msg->hdr = NULL;
+ msg->fds = NULL;
+ msg->complete = false;
+ msg->pending = false;
+ QTAILQ_INSERT_HEAD(&proxy->free, msg, next);
+}
+
+VFIOUserFDs *vfio_user_getfds(int numfds)
+{
+ VFIOUserFDs *fds = g_malloc0(sizeof(*fds) + (numfds * sizeof(int)));
+
+ fds->fds = (int *)((char *)fds + sizeof(*fds));
+
+ return fds;
+}
+
+/*
+ * Functions only called by iothread
+ */
+
+/*
+ * Process a received message.
+ */
+static void vfio_user_process(VFIOUserProxy *proxy, VFIOUserMsg *msg,
+ bool isreply)
+{
+
+ /*
+ * Replies signal a waiter, if none just check for errors
+ * and free the message buffer.
+ *
+ * Requests get queued for the BH.
+ */
+ if (isreply) {
+ msg->complete = true;
+ if (msg->type == VFIO_MSG_WAIT) {
+ qemu_cond_signal(&msg->cv);
+ } else {
+ if (msg->hdr->flags & VFIO_USER_ERROR) {
+ error_printf("vfio_user_process: error reply on async ");
+ error_printf("request command %x error %s\n",
+ msg->hdr->command,
+ strerror(msg->hdr->error_reply));
+ }
+ /* youngest nowait msg has been ack'd */
+ if (proxy->last_nowait == msg) {
+ proxy->last_nowait = NULL;
+ }
+ vfio_user_recycle(proxy, msg);
+ }
+ } else {
+ QTAILQ_INSERT_TAIL(&proxy->incoming, msg, next);
+ qemu_bh_schedule(proxy->req_bh);
+ }
+}
+
+/*
+ * Complete a partial message read
+ */
+static int vfio_user_complete(VFIOUserProxy *proxy, Error **errp)
+{
+ VFIOUserMsg *msg = proxy->part_recv;
+ size_t msgleft = proxy->recv_left;
+ bool isreply;
+ char *data;
+ int ret;
+
+ data = (char *)msg->hdr + (msg->hdr->size - msgleft);
+ while (msgleft > 0) {
+ ret = qio_channel_read(proxy->ioc, data, msgleft, errp);
+
+ /* error or would block */
+ if (ret <= 0) {
+ /* try for rest on next iternation */
+ if (ret == QIO_CHANNEL_ERR_BLOCK) {
+ proxy->recv_left = msgleft;
+ }
+ return ret;
+ }
+ trace_vfio_user_recv_read(msg->hdr->id, ret);
+
+ msgleft -= ret;
+ data += ret;
+ }
+
+ /*
+ * Read complete message, process it.
+ */
+ proxy->part_recv = NULL;
+ proxy->recv_left = 0;
+ isreply = (msg->hdr->flags & VFIO_USER_TYPE) == VFIO_USER_REPLY;
+ vfio_user_process(proxy, msg, isreply);
+
+ /* return positive value */
+ return 1;
+}
+
+/*
+ * Receive and process one incoming message.
+ *
+ * For replies, find matching outgoing request and wake any waiters.
+ * For requests, queue in incoming list and run request BH.
+ */
+static int vfio_user_recv_one(VFIOUserProxy *proxy, Error **errp)
+{
+ VFIOUserMsg *msg = NULL;
+ g_autofree int *fdp = NULL;
+ VFIOUserFDs *reqfds;
+ VFIOUserHdr hdr;
+ struct iovec iov = {
+ .iov_base = &hdr,
+ .iov_len = sizeof(hdr),
+ };
+ bool isreply = false;
+ int i, ret;
+ size_t msgleft, numfds = 0;
+ char *data = NULL;
+ char *buf = NULL;
+
+ /*
+ * Complete any partial reads
+ */
+ if (proxy->part_recv != NULL) {
+ ret = vfio_user_complete(proxy, errp);
+
+ /* still not complete, try later */
+ if (ret == QIO_CHANNEL_ERR_BLOCK) {
+ return ret;
+ }
+
+ if (ret <= 0) {
+ goto fatal;
+ }
+ /* else fall into reading another msg */
+ }
+
+ /*
+ * Read header
+ */
+ ret = qio_channel_readv_full(proxy->ioc, &iov, 1, &fdp, &numfds, 0,
+ errp);
+ if (ret == QIO_CHANNEL_ERR_BLOCK) {
+ return ret;
+ }
+
+ /* read error or other side closed connection */
+ if (ret <= 0) {
+ goto fatal;
+ }
+
+ if (ret < sizeof(hdr)) {
+ error_setg(errp, "short read of header");
+ goto fatal;
+ }
+
+ /*
+ * Validate header
+ */
+ if (hdr.size < sizeof(VFIOUserHdr)) {
+ error_setg(errp, "bad header size");
+ goto fatal;
+ }
+ switch (hdr.flags & VFIO_USER_TYPE) {
+ case VFIO_USER_REQUEST:
+ isreply = false;
+ break;
+ case VFIO_USER_REPLY:
+ isreply = true;
+ break;
+ default:
+ error_setg(errp, "unknown message type");
+ goto fatal;
+ }
+ trace_vfio_user_recv_hdr(proxy->sockname, hdr.id, hdr.command, hdr.size,
+ hdr.flags);
+
+ /*
+ * For replies, find the matching pending request.
+ * For requests, reap incoming FDs.
+ */
+ if (isreply) {
+ QTAILQ_FOREACH(msg, &proxy->pending, next) {
+ if (hdr.id == msg->id) {
+ break;
+ }
+ }
+ if (msg == NULL) {
+ error_setg(errp, "unexpected reply");
+ goto err;
+ }
+ QTAILQ_REMOVE(&proxy->pending, msg, next);
+
+ /*
+ * Process any received FDs
+ */
+ if (numfds != 0) {
+ if (msg->fds == NULL || msg->fds->recv_fds < numfds) {
+ error_setg(errp, "unexpected FDs");
+ goto err;
+ }
+ msg->fds->recv_fds = numfds;
+ memcpy(msg->fds->fds, fdp, numfds * sizeof(int));
+ }
+ } else {
+ if (numfds != 0) {
+ reqfds = vfio_user_getfds(numfds);
+ memcpy(reqfds->fds, fdp, numfds * sizeof(int));
+ } else {
+ reqfds = NULL;
+ }
+ }
+
+ /*
+ * Put the whole message into a single buffer.
+ */
+ if (isreply) {
+ if (hdr.size > msg->rsize) {
+ error_setg(errp, "reply larger than recv buffer");
+ goto err;
+ }
+ *msg->hdr = hdr;
+ data = (char *)msg->hdr + sizeof(hdr);
+ } else {
+ if (hdr.size > proxy->max_xfer_size + sizeof(VFIOUserDMARW)) {
+ error_setg(errp, "vfio_user_recv request larger than max");
+ goto err;
+ }
+ buf = g_malloc0(hdr.size);
+ memcpy(buf, &hdr, sizeof(hdr));
+ data = buf + sizeof(hdr);
+ msg = vfio_user_getmsg(proxy, (VFIOUserHdr *)buf, reqfds);
+ msg->type = VFIO_MSG_REQ;
+ }
+
+ /*
+ * Read rest of message.
+ */
+ msgleft = hdr.size - sizeof(hdr);
+ while (msgleft > 0) {
+ ret = qio_channel_read(proxy->ioc, data, msgleft, errp);
+
+ /* prepare to complete read on next iternation */
+ if (ret == QIO_CHANNEL_ERR_BLOCK) {
+ proxy->part_recv = msg;
+ proxy->recv_left = msgleft;
+ return ret;
+ }
+
+ if (ret <= 0) {
+ goto fatal;
+ }
+ trace_vfio_user_recv_read(hdr.id, ret);
+
+ msgleft -= ret;
+ data += ret;
+ }
+
+ vfio_user_process(proxy, msg, isreply);
+ return 0;
+
+ /*
+ * fatal means the other side closed or we don't trust the stream
+ * err means this message is corrupt
+ */
+fatal:
+ vfio_user_shutdown(proxy);
+ proxy->state = VFIO_PROXY_ERROR;
+
+ /* set error if server side closed */
+ if (ret == 0) {
+ error_setg(errp, "server closed socket");
+ }
+
+err:
+ for (i = 0; i < numfds; i++) {
+ close(fdp[i]);
+ }
+ if (isreply && msg != NULL) {
+ /* force an error to keep sending thread from hanging */
+ vfio_user_set_error(msg->hdr, EINVAL);
+ msg->complete = true;
+ qemu_cond_signal(&msg->cv);
+ }
+ return -1;
+}
+
+static void vfio_user_recv(void *opaque)
+{
+ VFIOUserProxy *proxy = opaque;
+
+ QEMU_LOCK_GUARD(&proxy->lock);
+
+ if (proxy->state == VFIO_PROXY_CONNECTED) {
+ Error *local_err = NULL;
+
+ while (vfio_user_recv_one(proxy, &local_err) == 0) {
+ ;
+ }
+
+ if (local_err != NULL) {
+ error_report_err(local_err);
+ }
+ }
+}
+
+/*
+ * Send a single message, same return semantics as vfio_user_send_qio().
+ *
+ * Sent async messages are freed, others are moved to pending queue.
+ */
+static ssize_t vfio_user_send_one(VFIOUserProxy *proxy, Error **errp)
+{
+ VFIOUserMsg *msg;
+ ssize_t ret;
+
+ msg = QTAILQ_FIRST(&proxy->outgoing);
+ ret = vfio_user_send_qio(proxy, msg, errp);
+ if (ret < 0) {
+ return ret;
+ }
+
+ QTAILQ_REMOVE(&proxy->outgoing, msg, next);
+ proxy->num_outgoing--;
+ if (msg->type == VFIO_MSG_ASYNC) {
+ vfio_user_recycle(proxy, msg);
+ } else {
+ QTAILQ_INSERT_TAIL(&proxy->pending, msg, next);
+ msg->pending = true;
+ }
+
+ return ret;
+}
+
+/*
+ * Send messages from outgoing queue when the socket buffer has space.
+ * If we deplete 'outgoing', remove ourselves from the poll list.
+ */
+static void vfio_user_send(void *opaque)
+{
+ VFIOUserProxy *proxy = opaque;
+
+ QEMU_LOCK_GUARD(&proxy->lock);
+
+ if (proxy->state == VFIO_PROXY_CONNECTED) {
+ while (!QTAILQ_EMPTY(&proxy->outgoing)) {
+ Error *local_err = NULL;
+ int ret;
+
+ ret = vfio_user_send_one(proxy, &local_err);
+
+ if (ret == QIO_CHANNEL_ERR_BLOCK) {
+ return;
+ } else if (ret == -1) {
+ error_report_err(local_err);
+ return;
+ }
+ }
+ qio_channel_set_aio_fd_handler(proxy->ioc, proxy->ctx,
+ vfio_user_recv, NULL, NULL, proxy);
+
+ /* queue empty - send any pending multi write msgs */
+ if (proxy->wr_multi != NULL) {
+ vfio_user_flush_multi(proxy);
+ }
+ }
+}
+
+static void vfio_user_cb(void *opaque)
+{
+ VFIOUserProxy *proxy = opaque;
+
+ QEMU_LOCK_GUARD(&proxy->lock);
+
+ proxy->state = VFIO_PROXY_CLOSED;
+ qemu_cond_signal(&proxy->close_cv);
+}
+
+
+/*
+ * Functions called by main or CPU threads
+ */
+
+/*
+ * Process incoming requests.
+ *
+ * The bus-specific callback has the form:
+ * request(opaque, msg)
+ * where 'opaque' was specified in vfio_user_set_handler
+ * and 'msg' is the inbound message.
+ *
+ * The callback is responsible for disposing of the message buffer,
+ * usually by re-using it when calling vfio_send_reply or vfio_send_error,
+ * both of which free their message buffer when the reply is sent.
+ *
+ * If the callback uses a new buffer, it needs to free the old one.
+ */
+static void vfio_user_request(void *opaque)
+{
+ VFIOUserProxy *proxy = opaque;
+ VFIOUserMsgQ new, free;
+ VFIOUserMsg *msg, *m1;
+
+ /* reap all incoming */
+ QTAILQ_INIT(&new);
+ WITH_QEMU_LOCK_GUARD(&proxy->lock) {
+ QTAILQ_FOREACH_SAFE(msg, &proxy->incoming, next, m1) {
+ QTAILQ_REMOVE(&proxy->incoming, msg, next);
+ QTAILQ_INSERT_TAIL(&new, msg, next);
+ }
+ }
+
+ /* process list */
+ QTAILQ_INIT(&free);
+ QTAILQ_FOREACH_SAFE(msg, &new, next, m1) {
+ QTAILQ_REMOVE(&new, msg, next);
+ trace_vfio_user_recv_request(msg->hdr->command);
+ proxy->request(proxy->req_arg, msg);
+ QTAILQ_INSERT_HEAD(&free, msg, next);
+ }
+
+ /* free list */
+ WITH_QEMU_LOCK_GUARD(&proxy->lock) {
+ QTAILQ_FOREACH_SAFE(msg, &free, next, m1) {
+ vfio_user_recycle(proxy, msg);
+ }
+ }
+}
+
+/*
+ * Messages are queued onto the proxy's outgoing list.
+ *
+ * It handles 3 types of messages:
+ *
+ * async messages - replies and posted writes
+ *
+ * There will be no reply from the server, so message
+ * buffers are freed after they're sent.
+ *
+ * nowait messages - map/unmap during address space transactions
+ *
+ * These are also sent async, but a reply is expected so that
+ * vfio_wait_reqs() can wait for the youngest nowait request.
+ * They transition from the outgoing list to the pending list
+ * when sent, and are freed when the reply is received.
+ *
+ * wait messages - all other requests
+ *
+ * The reply to these messages is waited for by their caller.
+ * They also transition from outgoing to pending when sent, but
+ * the message buffer is returned to the caller with the reply
+ * contents. The caller is responsible for freeing these messages.
+ *
+ * As an optimization, if the outgoing list and the socket send
+ * buffer are empty, the message is sent inline instead of being
+ * added to the outgoing list. The rest of the transitions are
+ * unchanged.
+ */
+static bool vfio_user_send_queued(VFIOUserProxy *proxy, VFIOUserMsg *msg,
+ Error **errp)
+{
+ int ret;
+
+ /* older coalesced writes go first */
+ if (proxy->wr_multi != NULL &&
+ ((msg->hdr->flags & VFIO_USER_TYPE) == VFIO_USER_REQUEST)) {
+ vfio_user_flush_multi(proxy);
+ }
+
+ /*
+ * Unsent outgoing msgs - add to tail
+ */
+ if (!QTAILQ_EMPTY(&proxy->outgoing)) {
+ QTAILQ_INSERT_TAIL(&proxy->outgoing, msg, next);
+ proxy->num_outgoing++;
+ return true;
+ }
+
+ /*
+ * Try inline - if blocked, queue it and kick send poller
+ */
+ if (proxy->flags & VFIO_PROXY_FORCE_QUEUED) {
+ ret = QIO_CHANNEL_ERR_BLOCK;
+ } else {
+ ret = vfio_user_send_qio(proxy, msg, errp);
+ }
+
+ if (ret == QIO_CHANNEL_ERR_BLOCK) {
+ QTAILQ_INSERT_HEAD(&proxy->outgoing, msg, next);
+ proxy->num_outgoing = 1;
+ qio_channel_set_aio_fd_handler(proxy->ioc, proxy->ctx,
+ vfio_user_recv, proxy->ctx,
+ vfio_user_send, proxy);
+ return true;
+ }
+ if (ret == -1) {
+ return false;
+ }
+
+ /*
+ * Sent - free async, add others to pending
+ */
+ if (msg->type == VFIO_MSG_ASYNC) {
+ vfio_user_recycle(proxy, msg);
+ } else {
+ QTAILQ_INSERT_TAIL(&proxy->pending, msg, next);
+ msg->pending = true;
+ }
+
+ return true;
+}
+
+/*
+ * nowait send - vfio_wait_reqs() can wait for it later
+ *
+ * Returns false if we did not successfully receive a reply message, in which
+ * case @errp will be populated.
+ *
+ * In either case, ownership of @hdr and @fds is taken, and the caller must
+ * *not* free them itself.
+ */
+bool vfio_user_send_nowait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
+ VFIOUserFDs *fds, int rsize, Error **errp)
+{
+ VFIOUserMsg *msg;
+
+ QEMU_LOCK_GUARD(&proxy->lock);
+
+ msg = vfio_user_getmsg(proxy, hdr, fds);
+ msg->id = hdr->id;
+ msg->rsize = rsize ? rsize : hdr->size;
+ msg->type = VFIO_MSG_NOWAIT;
+
+ if (hdr->flags & VFIO_USER_NO_REPLY) {
+ error_setg_errno(errp, EINVAL, "%s on NO_REPLY message", __func__);
+ vfio_user_recycle(proxy, msg);
+ return false;
+ }
+
+ if (!vfio_user_send_queued(proxy, msg, errp)) {
+ vfio_user_recycle(proxy, msg);
+ return false;
+ }
+
+ proxy->last_nowait = msg;
+
+ return true;
+}
+
+/*
+ * Returns false if we did not successfully receive a reply message, in which
+ * case @errp will be populated.
+ *
+ * In either case, the caller must free @hdr and @fds if needed.
+ */
+bool vfio_user_send_wait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
+ VFIOUserFDs *fds, int rsize, Error **errp)
+{
+ VFIOUserMsg *msg;
+ bool ok = false;
+
+ if (hdr->flags & VFIO_USER_NO_REPLY) {
+ error_setg_errno(errp, EINVAL, "%s on NO_REPLY message", __func__);
+ return false;
+ }
+
+ qemu_mutex_lock(&proxy->lock);
+
+ msg = vfio_user_getmsg(proxy, hdr, fds);
+ msg->id = hdr->id;
+ msg->rsize = rsize ? rsize : hdr->size;
+ msg->type = VFIO_MSG_WAIT;
+
+ ok = vfio_user_send_queued(proxy, msg, errp);
+
+ if (ok) {
+ while (!msg->complete) {
+ if (!qemu_cond_timedwait(&msg->cv, &proxy->lock,
+ proxy->wait_time)) {
+ VFIOUserMsgQ *list;
+
+ list = msg->pending ? &proxy->pending : &proxy->outgoing;
+ QTAILQ_REMOVE(list, msg, next);
+ error_setg_errno(errp, ETIMEDOUT,
+ "timed out waiting for reply");
+ ok = false;
+ break;
+ }
+ }
+ }
+
+ vfio_user_recycle(proxy, msg);
+
+ qemu_mutex_unlock(&proxy->lock);
+
+ return ok;
+}
+
+/*
+ * async send - msg can be queued, but will be freed when sent
+ *
+ * Returns false on failure, in which case @errp will be populated.
+ *
+ * In either case, ownership of @hdr and @fds is taken, and the caller must
+ * *not* free them itself.
+ */
+bool vfio_user_send_async(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
+ VFIOUserFDs *fds, Error **errp)
+{
+ VFIOUserMsg *msg;
+
+ QEMU_LOCK_GUARD(&proxy->lock);
+
+ msg = vfio_user_getmsg(proxy, hdr, fds);
+ msg->id = hdr->id;
+ msg->rsize = 0;
+ msg->type = VFIO_MSG_ASYNC;
+
+ if (!(hdr->flags & (VFIO_USER_NO_REPLY | VFIO_USER_REPLY))) {
+ error_setg_errno(errp, EINVAL, "%s on sync message", __func__);
+ vfio_user_recycle(proxy, msg);
+ return false;
+ }
+
+ if (!vfio_user_send_queued(proxy, msg, errp)) {
+ vfio_user_recycle(proxy, msg);
+ return false;
+ }
+
+ return true;
+}
+
+void vfio_user_wait_reqs(VFIOUserProxy *proxy)
+{
+ VFIOUserMsg *msg;
+
+ /*
+ * Any DMA map/unmap requests sent in the middle
+ * of a memory region transaction were sent nowait.
+ * Wait for them here.
+ */
+ qemu_mutex_lock(&proxy->lock);
+ if (proxy->last_nowait != NULL) {
+ /*
+ * Change type to WAIT to wait for reply
+ */
+ msg = proxy->last_nowait;
+ msg->type = VFIO_MSG_WAIT;
+ proxy->last_nowait = NULL;
+ while (!msg->complete) {
+ if (!qemu_cond_timedwait(&msg->cv, &proxy->lock,
+ proxy->wait_time)) {
+ VFIOUserMsgQ *list;
+
+ list = msg->pending ? &proxy->pending : &proxy->outgoing;
+ QTAILQ_REMOVE(list, msg, next);
+ error_printf("vfio_wait_reqs - timed out\n");
+ break;
+ }
+ }
+
+ if (msg->hdr->flags & VFIO_USER_ERROR) {
+ error_printf("vfio_user_wait_reqs - error reply on async ");
+ error_printf("request: command %x error %s\n", msg->hdr->command,
+ strerror(msg->hdr->error_reply));
+ }
+
+ /*
+ * Change type back to NOWAIT to free
+ */
+ msg->type = VFIO_MSG_NOWAIT;
+ vfio_user_recycle(proxy, msg);
+ }
+
+ qemu_mutex_unlock(&proxy->lock);
+}
+
+/*
+ * Reply to an incoming request.
+ */
+void vfio_user_send_reply(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int size)
+{
+ Error *local_err = NULL;
+
+ if (size < sizeof(VFIOUserHdr)) {
+ error_printf("%s: size too small", __func__);
+ g_free(hdr);
+ return;
+ }
+
+ /*
+ * convert header to associated reply
+ */
+ hdr->flags = VFIO_USER_REPLY;
+ hdr->size = size;
+
+ if (!vfio_user_send_async(proxy, hdr, NULL, &local_err)) {
+ error_report_err(local_err);
+ }
+}
+
+/*
+ * Send an error reply to an incoming request.
+ */
+void vfio_user_send_error(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int error)
+{
+ Error *local_err = NULL;
+
+ /*
+ * convert header to associated reply
+ */
+ hdr->flags = VFIO_USER_REPLY;
+ hdr->flags |= VFIO_USER_ERROR;
+ hdr->error_reply = error;
+ hdr->size = sizeof(*hdr);
+
+ if (!vfio_user_send_async(proxy, hdr, NULL, &local_err)) {
+ error_report_err(local_err);
+ }
+}
+
+/*
+ * Close FDs erroneously received in an incoming request.
+ */
+void vfio_user_putfds(VFIOUserMsg *msg)
+{
+ VFIOUserFDs *fds = msg->fds;
+ int i;
+
+ for (i = 0; i < fds->recv_fds; i++) {
+ close(fds->fds[i]);
+ }
+ g_free(fds);
+ msg->fds = NULL;
+}
+
+void
+vfio_user_disable_posted_writes(VFIOUserProxy *proxy)
+{
+ WITH_QEMU_LOCK_GUARD(&proxy->lock) {
+ proxy->flags |= VFIO_PROXY_NO_POST;
+ }
+}
+
+static QLIST_HEAD(, VFIOUserProxy) vfio_user_sockets =
+ QLIST_HEAD_INITIALIZER(vfio_user_sockets);
+
+VFIOUserProxy *vfio_user_connect_dev(SocketAddress *addr, Error **errp)
+{
+ VFIOUserProxy *proxy;
+ QIOChannelSocket *sioc;
+ QIOChannel *ioc;
+ char *sockname;
+
+ if (addr->type != SOCKET_ADDRESS_TYPE_UNIX) {
+ error_setg(errp, "vfio_user_connect - bad address family");
+ return NULL;
+ }
+ sockname = addr->u.q_unix.path;
+
+ sioc = qio_channel_socket_new();
+ ioc = QIO_CHANNEL(sioc);
+ if (qio_channel_socket_connect_sync(sioc, addr, errp)) {
+ object_unref(OBJECT(ioc));
+ return NULL;
+ }
+ qio_channel_set_blocking(ioc, false, NULL);
+
+ proxy = g_malloc0(sizeof(VFIOUserProxy));
+ proxy->sockname = g_strdup_printf("unix:%s", sockname);
+ proxy->ioc = ioc;
+
+ /* init defaults */
+ proxy->max_xfer_size = VFIO_USER_DEF_MAX_XFER;
+ proxy->max_send_fds = VFIO_USER_DEF_MAX_FDS;
+ proxy->max_dma = VFIO_USER_DEF_MAP_MAX;
+ proxy->dma_pgsizes = VFIO_USER_DEF_PGSIZE;
+ proxy->max_bitmap = VFIO_USER_DEF_MAX_BITMAP;
+ proxy->migr_pgsize = VFIO_USER_DEF_PGSIZE;
+
+ proxy->flags = VFIO_PROXY_CLIENT;
+ proxy->state = VFIO_PROXY_CONNECTED;
+
+ qemu_mutex_init(&proxy->lock);
+ qemu_cond_init(&proxy->close_cv);
+
+ if (vfio_user_iothread == NULL) {
+ vfio_user_iothread = iothread_create("VFIO user", errp);
+ }
+
+ proxy->ctx = iothread_get_aio_context(vfio_user_iothread);
+ proxy->req_bh = qemu_bh_new(vfio_user_request, proxy);
+
+ QTAILQ_INIT(&proxy->outgoing);
+ QTAILQ_INIT(&proxy->incoming);
+ QTAILQ_INIT(&proxy->free);
+ QTAILQ_INIT(&proxy->pending);
+ QLIST_INSERT_HEAD(&vfio_user_sockets, proxy, next);
+
+ return proxy;
+}
+
+void vfio_user_set_handler(VFIODevice *vbasedev,
+ void (*handler)(void *opaque, VFIOUserMsg *msg),
+ void *req_arg)
+{
+ VFIOUserProxy *proxy = vbasedev->proxy;
+
+ proxy->request = handler;
+ proxy->req_arg = req_arg;
+ qio_channel_set_aio_fd_handler(proxy->ioc, proxy->ctx,
+ vfio_user_recv, NULL, NULL, proxy);
+}
+
+void vfio_user_disconnect(VFIOUserProxy *proxy)
+{
+ VFIOUserMsg *r1, *r2;
+
+ qemu_mutex_lock(&proxy->lock);
+
+ /* our side is quitting */
+ if (proxy->state == VFIO_PROXY_CONNECTED) {
+ vfio_user_shutdown(proxy);
+ if (!QTAILQ_EMPTY(&proxy->pending)) {
+ error_printf("vfio_user_disconnect: outstanding requests\n");
+ }
+ }
+ object_unref(OBJECT(proxy->ioc));
+ proxy->ioc = NULL;
+ qemu_bh_delete(proxy->req_bh);
+ proxy->req_bh = NULL;
+
+ proxy->state = VFIO_PROXY_CLOSING;
+ QTAILQ_FOREACH_SAFE(r1, &proxy->outgoing, next, r2) {
+ qemu_cond_destroy(&r1->cv);
+ QTAILQ_REMOVE(&proxy->outgoing, r1, next);
+ g_free(r1);
+ }
+ QTAILQ_FOREACH_SAFE(r1, &proxy->incoming, next, r2) {
+ qemu_cond_destroy(&r1->cv);
+ QTAILQ_REMOVE(&proxy->incoming, r1, next);
+ g_free(r1);
+ }
+ QTAILQ_FOREACH_SAFE(r1, &proxy->pending, next, r2) {
+ qemu_cond_destroy(&r1->cv);
+ QTAILQ_REMOVE(&proxy->pending, r1, next);
+ g_free(r1);
+ }
+ QTAILQ_FOREACH_SAFE(r1, &proxy->free, next, r2) {
+ qemu_cond_destroy(&r1->cv);
+ QTAILQ_REMOVE(&proxy->free, r1, next);
+ g_free(r1);
+ }
+
+ /*
+ * Make sure the iothread isn't blocking anywhere
+ * with a ref to this proxy by waiting for a BH
+ * handler to run after the proxy fd handlers were
+ * deleted above.
+ */
+ aio_bh_schedule_oneshot(proxy->ctx, vfio_user_cb, proxy);
+ qemu_cond_wait(&proxy->close_cv, &proxy->lock);
+
+ /* we now hold the only ref to proxy */
+ qemu_mutex_unlock(&proxy->lock);
+ qemu_cond_destroy(&proxy->close_cv);
+ qemu_mutex_destroy(&proxy->lock);
+
+ QLIST_REMOVE(proxy, next);
+ if (QLIST_EMPTY(&vfio_user_sockets)) {
+ iothread_destroy(vfio_user_iothread);
+ vfio_user_iothread = NULL;
+ }
+
+ g_free(proxy->sockname);
+ g_free(proxy);
+}
+
+void vfio_user_request_msg(VFIOUserHdr *hdr, uint16_t cmd,
+ uint32_t size, uint32_t flags)
+{
+ static uint16_t next_id;
+
+ hdr->id = qatomic_fetch_inc(&next_id);
+ hdr->command = cmd;
+ hdr->size = size;
+ hdr->flags = (flags & ~VFIO_USER_TYPE) | VFIO_USER_REQUEST;
+ hdr->error_reply = 0;
+}
+
+struct cap_entry {
+ const char *name;
+ bool (*check)(VFIOUserProxy *proxy, QObject *qobj, Error **errp);
+};
+
+static bool caps_parse(VFIOUserProxy *proxy, QDict *qdict,
+ struct cap_entry caps[], Error **errp)
+{
+ QObject *qobj;
+ struct cap_entry *p;
+
+ for (p = caps; p->name != NULL; p++) {
+ qobj = qdict_get(qdict, p->name);
+ if (qobj != NULL) {
+ if (!p->check(proxy, qobj, errp)) {
+ return false;
+ }
+ qdict_del(qdict, p->name);
+ }
+ }
+
+ /* warning, for now */
+ if (qdict_size(qdict) != 0) {
+ warn_report("spurious capabilities");
+ }
+ return true;
+}
+
+static bool check_migr_pgsize(VFIOUserProxy *proxy, QObject *qobj, Error **errp)
+{
+ QNum *qn = qobject_to(QNum, qobj);
+ uint64_t pgsize;
+
+ if (qn == NULL || !qnum_get_try_uint(qn, &pgsize)) {
+ error_setg(errp, "malformed %s", VFIO_USER_CAP_PGSIZE);
+ return false;
+ }
+
+ /* must be larger than default */
+ if (pgsize & (VFIO_USER_DEF_PGSIZE - 1)) {
+ error_setg(errp, "pgsize 0x%"PRIx64" too small", pgsize);
+ return false;
+ }
+
+ proxy->migr_pgsize = pgsize;
+ return true;
+}
+
+static bool check_bitmap(VFIOUserProxy *proxy, QObject *qobj, Error **errp)
+{
+ QNum *qn = qobject_to(QNum, qobj);
+ uint64_t bitmap_size;
+
+ if (qn == NULL || !qnum_get_try_uint(qn, &bitmap_size)) {
+ error_setg(errp, "malformed %s", VFIO_USER_CAP_MAX_BITMAP);
+ return false;
+ }
+
+ /* can only lower it */
+ if (bitmap_size > VFIO_USER_DEF_MAX_BITMAP) {
+ error_setg(errp, "%s too large", VFIO_USER_CAP_MAX_BITMAP);
+ return false;
+ }
+
+ proxy->max_bitmap = bitmap_size;
+ return true;
+}
+
+static struct cap_entry caps_migr[] = {
+ { VFIO_USER_CAP_PGSIZE, check_migr_pgsize },
+ { VFIO_USER_CAP_MAX_BITMAP, check_bitmap },
+ { NULL }
+};
+
+static bool check_max_fds(VFIOUserProxy *proxy, QObject *qobj, Error **errp)
+{
+ QNum *qn = qobject_to(QNum, qobj);
+ uint64_t max_send_fds;
+
+ if (qn == NULL || !qnum_get_try_uint(qn, &max_send_fds) ||
+ max_send_fds > VFIO_USER_MAX_MAX_FDS) {
+ error_setg(errp, "malformed %s", VFIO_USER_CAP_MAX_FDS);
+ return false;
+ }
+ proxy->max_send_fds = max_send_fds;
+ return true;
+}
+
+static bool check_max_xfer(VFIOUserProxy *proxy, QObject *qobj, Error **errp)
+{
+ QNum *qn = qobject_to(QNum, qobj);
+ uint64_t max_xfer_size;
+
+ if (qn == NULL || !qnum_get_try_uint(qn, &max_xfer_size) ||
+ max_xfer_size > VFIO_USER_MAX_MAX_XFER) {
+ error_setg(errp, "malformed %s", VFIO_USER_CAP_MAX_XFER);
+ return false;
+ }
+ proxy->max_xfer_size = max_xfer_size;
+ return true;
+}
+
+static bool check_pgsizes(VFIOUserProxy *proxy, QObject *qobj, Error **errp)
+{
+ QNum *qn = qobject_to(QNum, qobj);
+ uint64_t pgsizes;
+
+ if (qn == NULL || !qnum_get_try_uint(qn, &pgsizes)) {
+ error_setg(errp, "malformed %s", VFIO_USER_CAP_PGSIZES);
+ return false;
+ }
+
+ /* must be larger than default */
+ if (pgsizes & (VFIO_USER_DEF_PGSIZE - 1)) {
+ error_setg(errp, "pgsize 0x%"PRIx64" too small", pgsizes);
+ return false;
+ }
+
+ proxy->dma_pgsizes = pgsizes;
+ return true;
+}
+
+static bool check_max_dma(VFIOUserProxy *proxy, QObject *qobj, Error **errp)
+{
+ QNum *qn = qobject_to(QNum, qobj);
+ uint64_t max_dma;
+
+ if (qn == NULL || !qnum_get_try_uint(qn, &max_dma)) {
+ error_setg(errp, "malformed %s", VFIO_USER_CAP_MAP_MAX);
+ return false;
+ }
+
+ /* can only lower it */
+ if (max_dma > VFIO_USER_DEF_MAP_MAX) {
+ error_setg(errp, "%s too large", VFIO_USER_CAP_MAP_MAX);
+ return false;
+ }
+
+ proxy->max_dma = max_dma;
+ return true;
+}
+
+static bool check_migr(VFIOUserProxy *proxy, QObject *qobj, Error **errp)
+{
+ QDict *qdict = qobject_to(QDict, qobj);
+
+ if (qdict == NULL) {
+ error_setg(errp, "malformed %s", VFIO_USER_CAP_MAX_FDS);
+ return true;
+ }
+ return caps_parse(proxy, qdict, caps_migr, errp);
+}
+
+static bool check_multi(VFIOUserProxy *proxy, QObject *qobj, Error **errp)
+{
+ QBool *qb = qobject_to(QBool, qobj);
+
+ if (qb == NULL) {
+ error_setg(errp, "malformed %s", VFIO_USER_CAP_MULTI);
+ return false;
+ }
+ if (qbool_get_bool(qb)) {
+ proxy->flags |= VFIO_PROXY_USE_MULTI;
+ }
+ return true;
+}
+
+static struct cap_entry caps_cap[] = {
+ { VFIO_USER_CAP_MAX_FDS, check_max_fds },
+ { VFIO_USER_CAP_MAX_XFER, check_max_xfer },
+ { VFIO_USER_CAP_PGSIZES, check_pgsizes },
+ { VFIO_USER_CAP_MAP_MAX, check_max_dma },
+ { VFIO_USER_CAP_MIGR, check_migr },
+ { VFIO_USER_CAP_MULTI, check_multi },
+ { NULL }
+};
+
+static bool check_cap(VFIOUserProxy *proxy, QObject *qobj, Error **errp)
+{
+ QDict *qdict = qobject_to(QDict, qobj);
+
+ if (qdict == NULL) {
+ error_setg(errp, "malformed %s", VFIO_USER_CAP);
+ return false;
+ }
+ return caps_parse(proxy, qdict, caps_cap, errp);
+}
+
+static struct cap_entry ver_0_0[] = {
+ { VFIO_USER_CAP, check_cap },
+ { NULL }
+};
+
+static bool caps_check(VFIOUserProxy *proxy, int minor, const char *caps,
+ Error **errp)
+{
+ QObject *qobj;
+ QDict *qdict;
+ bool ret;
+
+ qobj = qobject_from_json(caps, NULL);
+ if (qobj == NULL) {
+ error_setg(errp, "malformed capabilities %s", caps);
+ return false;
+ }
+ qdict = qobject_to(QDict, qobj);
+ if (qdict == NULL) {
+ error_setg(errp, "capabilities %s not an object", caps);
+ qobject_unref(qobj);
+ return false;
+ }
+ ret = caps_parse(proxy, qdict, ver_0_0, errp);
+
+ qobject_unref(qobj);
+ return ret;
+}
+
+static GString *caps_json(void)
+{
+ QDict *dict = qdict_new();
+ QDict *capdict = qdict_new();
+ QDict *migdict = qdict_new();
+ GString *str;
+
+ qdict_put_int(migdict, VFIO_USER_CAP_PGSIZE, VFIO_USER_DEF_PGSIZE);
+ qdict_put_int(migdict, VFIO_USER_CAP_MAX_BITMAP, VFIO_USER_DEF_MAX_BITMAP);
+ qdict_put_obj(capdict, VFIO_USER_CAP_MIGR, QOBJECT(migdict));
+
+ qdict_put_int(capdict, VFIO_USER_CAP_MAX_FDS, VFIO_USER_MAX_MAX_FDS);
+ qdict_put_int(capdict, VFIO_USER_CAP_MAX_XFER, VFIO_USER_DEF_MAX_XFER);
+ qdict_put_int(capdict, VFIO_USER_CAP_PGSIZES, VFIO_USER_DEF_PGSIZE);
+ qdict_put_int(capdict, VFIO_USER_CAP_MAP_MAX, VFIO_USER_DEF_MAP_MAX);
+ qdict_put_bool(capdict, VFIO_USER_CAP_MULTI, true);
+
+ qdict_put_obj(dict, VFIO_USER_CAP, QOBJECT(capdict));
+
+ str = qobject_to_json(QOBJECT(dict));
+ qobject_unref(dict);
+ return str;
+}
+
+bool vfio_user_validate_version(VFIOUserProxy *proxy, Error **errp)
+{
+ g_autofree VFIOUserVersion *msgp = NULL;
+ GString *caps;
+ char *reply;
+ int size, caplen;
+
+ caps = caps_json();
+ caplen = caps->len + 1;
+ size = sizeof(*msgp) + caplen;
+ msgp = g_malloc0(size);
+
+ vfio_user_request_msg(&msgp->hdr, VFIO_USER_VERSION, size, 0);
+ msgp->major = VFIO_USER_MAJOR_VER;
+ msgp->minor = VFIO_USER_MINOR_VER;
+ memcpy(&msgp->capabilities, caps->str, caplen);
+ g_string_free(caps, true);
+ trace_vfio_user_version(msgp->major, msgp->minor, msgp->capabilities);
+
+ if (!vfio_user_send_wait(proxy, &msgp->hdr, NULL, 0, errp)) {
+ return false;
+ }
+
+ if (msgp->hdr.flags & VFIO_USER_ERROR) {
+ error_setg_errno(errp, msgp->hdr.error_reply, "version reply");
+ return false;
+ }
+
+ if (msgp->major != VFIO_USER_MAJOR_VER ||
+ msgp->minor > VFIO_USER_MINOR_VER) {
+ error_setg(errp, "incompatible server version");
+ return false;
+ }
+
+ reply = msgp->capabilities;
+ if (reply[msgp->hdr.size - sizeof(*msgp) - 1] != '\0') {
+ error_setg(errp, "corrupt version reply");
+ return false;
+ }
+
+ if (!caps_check(proxy, msgp->minor, reply, errp)) {
+ return false;
+ }
+
+ trace_vfio_user_version(msgp->major, msgp->minor, msgp->capabilities);
+ return true;
+}
+
+void vfio_user_flush_multi(VFIOUserProxy *proxy)
+{
+ VFIOUserMsg *msg;
+ VFIOUserWRMulti *wm = proxy->wr_multi;
+ Error *local_err = NULL;
+
+ proxy->wr_multi = NULL;
+
+ /* adjust size for actual # of writes */
+ wm->hdr.size -= (VFIO_USER_MULTI_MAX - wm->wr_cnt) * sizeof(VFIOUserWROne);
+
+ msg = vfio_user_getmsg(proxy, &wm->hdr, NULL);
+ msg->id = wm->hdr.id;
+ msg->rsize = 0;
+ msg->type = VFIO_MSG_ASYNC;
+ trace_vfio_user_wrmulti("flush", wm->wr_cnt);
+
+ if (!vfio_user_send_queued(proxy, msg, &local_err)) {
+ error_report_err(local_err);
+ vfio_user_recycle(proxy, msg);
+ }
+}
+
+void vfio_user_create_multi(VFIOUserProxy *proxy)
+{
+ VFIOUserWRMulti *wm;
+
+ wm = g_malloc0(sizeof(*wm));
+ vfio_user_request_msg(&wm->hdr, VFIO_USER_REGION_WRITE_MULTI,
+ sizeof(*wm), VFIO_USER_NO_REPLY);
+ proxy->wr_multi = wm;
+}
+
+void vfio_user_add_multi(VFIOUserProxy *proxy, uint8_t index,
+ off_t offset, uint32_t count, void *data)
+{
+ VFIOUserWRMulti *wm = proxy->wr_multi;
+ VFIOUserWROne *w1 = &wm->wrs[wm->wr_cnt];
+
+ w1->offset = offset;
+ w1->region = index;
+ w1->count = count;
+ memcpy(&w1->data, data, count);
+
+ wm->wr_cnt++;
+ trace_vfio_user_wrmulti("add", wm->wr_cnt);
+ if (wm->wr_cnt == VFIO_USER_MULTI_MAX ||
+ proxy->num_outgoing < VFIO_USER_OUT_LOW) {
+ vfio_user_flush_multi(proxy);
+ }
+}
diff --git a/hw/vfio-user/proxy.h b/hw/vfio-user/proxy.h
new file mode 100644
index 0000000..61e64a0
--- /dev/null
+++ b/hw/vfio-user/proxy.h
@@ -0,0 +1,135 @@
+#ifndef VFIO_USER_PROXY_H
+#define VFIO_USER_PROXY_H
+
+/*
+ * vfio protocol over a UNIX socket.
+ *
+ * Copyright Ā© 2018, 2021 Oracle and/or its affiliates.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "io/channel.h"
+#include "io/channel-socket.h"
+
+#include "qemu/queue.h"
+#include "qemu/sockets.h"
+#include "qemu/thread.h"
+#include "hw/vfio/vfio-device.h"
+#include "hw/vfio-user/protocol.h"
+
+typedef struct {
+ int send_fds;
+ int recv_fds;
+ int *fds;
+} VFIOUserFDs;
+
+enum msg_type {
+ VFIO_MSG_NONE,
+ VFIO_MSG_ASYNC,
+ VFIO_MSG_WAIT,
+ VFIO_MSG_NOWAIT,
+ VFIO_MSG_REQ,
+};
+
+typedef struct VFIOUserMsg {
+ QTAILQ_ENTRY(VFIOUserMsg) next;
+ VFIOUserHdr *hdr;
+ VFIOUserFDs *fds;
+ uint32_t rsize;
+ uint32_t id;
+ QemuCond cv;
+ bool complete;
+ bool pending;
+ enum msg_type type;
+} VFIOUserMsg;
+
+
+enum proxy_state {
+ VFIO_PROXY_CONNECTED = 1,
+ VFIO_PROXY_ERROR = 2,
+ VFIO_PROXY_CLOSING = 3,
+ VFIO_PROXY_CLOSED = 4,
+};
+
+typedef QTAILQ_HEAD(VFIOUserMsgQ, VFIOUserMsg) VFIOUserMsgQ;
+
+typedef struct VFIOUserProxy {
+ QLIST_ENTRY(VFIOUserProxy) next;
+ char *sockname;
+ struct QIOChannel *ioc;
+ void (*request)(void *opaque, VFIOUserMsg *msg);
+ void *req_arg;
+ uint64_t max_xfer_size;
+ uint64_t max_send_fds;
+ uint64_t max_dma;
+ uint64_t dma_pgsizes;
+ uint64_t max_bitmap;
+ uint64_t migr_pgsize;
+ int flags;
+ uint32_t wait_time;
+ QemuCond close_cv;
+ AioContext *ctx;
+ QEMUBH *req_bh;
+ bool async_ops;
+
+ /*
+ * above only changed when BQL is held
+ * below are protected by per-proxy lock
+ */
+ QemuMutex lock;
+ VFIOUserMsgQ free;
+ VFIOUserMsgQ pending;
+ VFIOUserMsgQ incoming;
+ VFIOUserMsgQ outgoing;
+ VFIOUserMsg *last_nowait;
+ VFIOUserMsg *part_recv;
+ size_t recv_left;
+ VFIOUserWRMulti *wr_multi;
+ int num_outgoing;
+ enum proxy_state state;
+} VFIOUserProxy;
+
+/* VFIOProxy flags */
+#define VFIO_PROXY_CLIENT 0x1
+#define VFIO_PROXY_FORCE_QUEUED 0x4
+#define VFIO_PROXY_NO_POST 0x8
+#define VFIO_PROXY_USE_MULTI 0x16
+
+/* coalescing high and low water marks for VFIOProxy num_outgoing */
+#define VFIO_USER_OUT_HIGH 1024
+#define VFIO_USER_OUT_LOW 128
+
+typedef struct VFIODevice VFIODevice;
+
+VFIOUserProxy *vfio_user_connect_dev(SocketAddress *addr, Error **errp);
+void vfio_user_disconnect(VFIOUserProxy *proxy);
+void vfio_user_set_handler(VFIODevice *vbasedev,
+ void (*handler)(void *opaque, VFIOUserMsg *msg),
+ void *reqarg);
+bool vfio_user_validate_version(VFIOUserProxy *proxy, Error **errp);
+
+VFIOUserFDs *vfio_user_getfds(int numfds);
+void vfio_user_putfds(VFIOUserMsg *msg);
+
+void vfio_user_disable_posted_writes(VFIOUserProxy *proxy);
+
+void vfio_user_request_msg(VFIOUserHdr *hdr, uint16_t cmd,
+ uint32_t size, uint32_t flags);
+void vfio_user_wait_reqs(VFIOUserProxy *proxy);
+bool vfio_user_send_wait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
+ VFIOUserFDs *fds, int rsize, Error **errp);
+bool vfio_user_send_nowait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
+ VFIOUserFDs *fds, int rsize, Error **errp);
+bool vfio_user_send_async(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
+ VFIOUserFDs *fds, Error **errp);
+
+void vfio_user_send_reply(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int size);
+void vfio_user_send_error(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int error);
+
+void vfio_user_flush_multi(VFIOUserProxy *proxy);
+void vfio_user_create_multi(VFIOUserProxy *proxy);
+void vfio_user_add_multi(VFIOUserProxy *proxy, uint8_t index,
+ off_t offset, uint32_t count, void *data);
+
+#endif /* VFIO_USER_PROXY_H */
diff --git a/hw/vfio-user/trace-events b/hw/vfio-user/trace-events
new file mode 100644
index 0000000..abb67f4
--- /dev/null
+++ b/hw/vfio-user/trace-events
@@ -0,0 +1,20 @@
+# See docs/devel/tracing.rst for syntax documentation.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# common.c
+vfio_user_recv_hdr(const char *name, uint16_t id, uint16_t cmd, uint32_t size, uint32_t flags) " (%s) id 0x%x cmd 0x%x size 0x%x flags 0x%x"
+vfio_user_recv_read(uint16_t id, int read) " id 0x%x read 0x%x"
+vfio_user_recv_request(uint16_t cmd) " command 0x%x"
+vfio_user_send_write(uint16_t id, int wrote) " id 0x%x wrote 0x%x"
+vfio_user_version(uint16_t major, uint16_t minor, const char *caps) " major %d minor %d caps: %s"
+vfio_user_get_info(uint32_t nregions, uint32_t nirqs) " #regions %d #irqs %d"
+vfio_user_get_region_info(uint32_t index, uint32_t flags, uint64_t size) " index %d flags 0x%x size 0x%"PRIx64
+vfio_user_region_rw(uint32_t region, uint64_t off, uint32_t count) " region %d offset 0x%"PRIx64" count %d"
+vfio_user_get_irq_info(uint32_t index, uint32_t flags, uint32_t count) " index %d flags 0x%x count %d"
+vfio_user_set_irqs(uint32_t index, uint32_t start, uint32_t count, uint32_t flags) " index %d start %d count %d flags 0x%x"
+vfio_user_wrmulti(const char *s, uint64_t wr_cnt) " %s count 0x%"PRIx64
+
+# container.c
+vfio_user_dma_map(uint64_t iova, uint64_t size, uint64_t off, uint32_t flags, bool async_ops) " iova 0x%"PRIx64" size 0x%"PRIx64" off 0x%"PRIx64" flags 0x%x async_ops %d"
+vfio_user_dma_unmap(uint64_t iova, uint64_t size, uint32_t flags, bool async_ops) " iova 0x%"PRIx64" size 0x%"PRIx64" flags 0x%x async_ops %d"
diff --git a/hw/vfio-user/trace.h b/hw/vfio-user/trace.h
new file mode 100644
index 0000000..9cf02d9
--- /dev/null
+++ b/hw/vfio-user/trace.h
@@ -0,0 +1,4 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "trace/trace-hw_vfio_user.h"
diff --git a/hw/vfio/Kconfig b/hw/vfio/Kconfig
index 7cdba05..91d9023 100644
--- a/hw/vfio/Kconfig
+++ b/hw/vfio/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
config VFIO
bool
depends on LINUX
diff --git a/hw/vfio/amd-xgbe.c b/hw/vfio/amd-xgbe.c
index 96bd608..58f590e 100644
--- a/hw/vfio/amd-xgbe.c
+++ b/hw/vfio/amd-xgbe.c
@@ -15,12 +15,14 @@
#include "hw/vfio/vfio-amd-xgbe.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
+#include "qemu/error-report.h"
static void amd_xgbe_realize(DeviceState *dev, Error **errp)
{
VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev);
VFIOAmdXgbeDeviceClass *k = VFIO_AMD_XGBE_DEVICE_GET_CLASS(dev);
+ warn_report("-device vfio-amd-xgbe is deprecated");
vdev->compat = g_strdup("amd,xgbe-seattle-v1a");
vdev->num_compat = 1;
@@ -32,7 +34,7 @@ static const VMStateDescription vfio_platform_amd_xgbe_vmstate = {
.unmigratable = 1,
};
-static void vfio_amd_xgbe_class_init(ObjectClass *klass, void *data)
+static void vfio_amd_xgbe_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VFIOAmdXgbeDeviceClass *vcxc =
@@ -41,8 +43,6 @@ static void vfio_amd_xgbe_class_init(ObjectClass *klass, void *data)
&vcxc->parent_realize);
dc->desc = "VFIO AMD XGBE";
dc->vmsd = &vfio_platform_amd_xgbe_vmstate;
- /* Supported by TYPE_VIRT_MACHINE */
- dc->user_creatable = true;
}
static const TypeInfo vfio_amd_xgbe_dev_info = {
diff --git a/hw/vfio/ap.c b/hw/vfio/ap.c
index 0c4354e..1df4438 100644
--- a/hw/vfio/ap.c
+++ b/hw/vfio/ap.c
@@ -10,16 +10,19 @@
* directory.
*/
+#include <stdbool.h>
#include "qemu/osdep.h"
#include CONFIG_DEVICES /* CONFIG_IOMMUFD */
#include <linux/vfio.h>
#include <sys/ioctl.h>
#include "qapi/error.h"
-#include "hw/vfio/vfio-common.h"
-#include "sysemu/iommufd.h"
+#include "hw/vfio/vfio-device.h"
+#include "system/iommufd.h"
#include "hw/s390x/ap-device.h"
+#include "hw/s390x/css.h"
#include "qemu/error-report.h"
#include "qemu/event_notifier.h"
+#include "qemu/lockable.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qemu/option.h"
@@ -28,7 +31,7 @@
#include "migration/vmstate.h"
#include "hw/qdev-properties.h"
#include "hw/s390x/ap-bridge.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qom/object.h"
#define TYPE_VFIO_AP_DEVICE "vfio-ap"
@@ -37,8 +40,23 @@ struct VFIOAPDevice {
APDevice apdev;
VFIODevice vdev;
EventNotifier req_notifier;
+ EventNotifier cfg_notifier;
};
+typedef struct APConfigChgEvent {
+ QTAILQ_ENTRY(APConfigChgEvent) next;
+} APConfigChgEvent;
+
+static QTAILQ_HEAD(, APConfigChgEvent) cfg_chg_events =
+ QTAILQ_HEAD_INITIALIZER(cfg_chg_events);
+
+static QemuMutex cfg_chg_events_lock;
+
+static void __attribute__((constructor)) vfio_ap_global_init(void)
+{
+ qemu_mutex_init(&cfg_chg_events_lock);
+}
+
OBJECT_DECLARE_SIMPLE_TYPE(VFIOAPDevice, VFIO_AP_DEVICE)
static void vfio_ap_compute_needs_reset(VFIODevice *vdev)
@@ -70,14 +88,65 @@ static void vfio_ap_req_notifier_handler(void *opaque)
}
}
+static void vfio_ap_cfg_chg_notifier_handler(void *opaque)
+{
+ APConfigChgEvent *cfg_chg_event;
+ VFIOAPDevice *vapdev = opaque;
+
+ if (!event_notifier_test_and_clear(&vapdev->cfg_notifier)) {
+ return;
+ }
+
+ cfg_chg_event = g_new0(APConfigChgEvent, 1);
+
+ WITH_QEMU_LOCK_GUARD(&cfg_chg_events_lock) {
+ QTAILQ_INSERT_TAIL(&cfg_chg_events, cfg_chg_event, next);
+ }
+
+ css_generate_css_crws(0);
+
+}
+
+int ap_chsc_sei_nt0_get_event(void *res)
+{
+ ChscSeiNt0Res *nt0_res = (ChscSeiNt0Res *)res;
+ APConfigChgEvent *cfg_chg_event;
+
+ WITH_QEMU_LOCK_GUARD(&cfg_chg_events_lock) {
+ if (QTAILQ_EMPTY(&cfg_chg_events)) {
+ return EVENT_INFORMATION_NOT_STORED;
+ }
+
+ cfg_chg_event = QTAILQ_FIRST(&cfg_chg_events);
+ QTAILQ_REMOVE(&cfg_chg_events, cfg_chg_event, next);
+ }
+
+ memset(nt0_res, 0, sizeof(*nt0_res));
+ g_free(cfg_chg_event);
+ nt0_res->flags |= PENDING_EVENT_INFO_BITMASK;
+ nt0_res->length = sizeof(ChscSeiNt0Res);
+ nt0_res->code = NT0_RES_RESPONSE_CODE;
+ nt0_res->nt = NT0_RES_NT_DEFAULT;
+ nt0_res->rs = NT0_RES_RS_AP_CHANGE;
+ nt0_res->cc = NT0_RES_CC_AP_CHANGE;
+
+ return EVENT_INFORMATION_STORED;
+}
+
+bool ap_chsc_sei_nt0_have_event(void)
+{
+ QEMU_LOCK_GUARD(&cfg_chg_events_lock);
+ return !QTAILQ_EMPTY(&cfg_chg_events);
+}
+
static bool vfio_ap_register_irq_notifier(VFIOAPDevice *vapdev,
unsigned int irq, Error **errp)
{
int fd;
- size_t argsz;
+ int ret;
IOHandler *fd_read;
EventNotifier *notifier;
- g_autofree struct vfio_irq_info *irq_info = NULL;
+ struct vfio_irq_info irq_info;
VFIODevice *vdev = &vapdev->vdev;
switch (irq) {
@@ -85,6 +154,10 @@ static bool vfio_ap_register_irq_notifier(VFIOAPDevice *vapdev,
notifier = &vapdev->req_notifier;
fd_read = vfio_ap_req_notifier_handler;
break;
+ case VFIO_AP_CFG_CHG_IRQ_INDEX:
+ notifier = &vapdev->cfg_notifier;
+ fd_read = vfio_ap_cfg_chg_notifier_handler;
+ break;
default:
error_setg(errp, "vfio: Unsupported device irq(%d)", irq);
return false;
@@ -96,14 +169,15 @@ static bool vfio_ap_register_irq_notifier(VFIOAPDevice *vapdev,
return false;
}
- argsz = sizeof(*irq_info);
- irq_info = g_malloc0(argsz);
- irq_info->index = irq;
- irq_info->argsz = argsz;
+ ret = vfio_device_get_irq_info(vdev, irq, &irq_info);
+
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "vfio: Error getting irq info");
+ return false;
+ }
- if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO,
- irq_info) < 0 || irq_info->count < 1) {
- error_setg_errno(errp, errno, "vfio: Error getting irq info");
+ if (irq_info.count < 1) {
+ error_setg(errp, "vfio: Error getting irq info, count=0");
return false;
}
@@ -117,8 +191,8 @@ static bool vfio_ap_register_irq_notifier(VFIOAPDevice *vapdev,
fd = event_notifier_get_fd(notifier);
qemu_set_fd_handler(fd, fd_read, NULL, vapdev);
- if (!vfio_set_irq_signaling(vdev, irq, 0, VFIO_IRQ_SET_ACTION_TRIGGER, fd,
- errp)) {
+ if (!vfio_device_irq_set_signaling(vdev, irq, 0, VFIO_IRQ_SET_ACTION_TRIGGER, fd,
+ errp)) {
qemu_set_fd_handler(fd, NULL, NULL, vapdev);
event_notifier_cleanup(notifier);
}
@@ -136,13 +210,16 @@ static void vfio_ap_unregister_irq_notifier(VFIOAPDevice *vapdev,
case VFIO_AP_REQ_IRQ_INDEX:
notifier = &vapdev->req_notifier;
break;
+ case VFIO_AP_CFG_CHG_IRQ_INDEX:
+ notifier = &vapdev->cfg_notifier;
+ break;
default:
error_report("vfio: Unsupported device irq(%d)", irq);
return;
}
- if (!vfio_set_irq_signaling(&vapdev->vdev, irq, 0,
- VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
+ if (!vfio_device_irq_set_signaling(&vapdev->vdev, irq, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
warn_reportf_err(err, VFIO_MSG_PREFIX, vapdev->vdev.name);
}
@@ -162,7 +239,7 @@ static void vfio_ap_realize(DeviceState *dev, Error **errp)
return;
}
- if (!vfio_attach_device(vbasedev->name, vbasedev,
+ if (!vfio_device_attach(vbasedev->name, vbasedev,
&address_space_memory, errp)) {
goto error;
}
@@ -175,6 +252,15 @@ static void vfio_ap_realize(DeviceState *dev, Error **errp)
warn_report_err(err);
}
+ if (!vfio_ap_register_irq_notifier(vapdev, VFIO_AP_CFG_CHG_IRQ_INDEX, &err))
+ {
+ /*
+ * Report this error, but do not make it a failing condition.
+ * Lack of this IRQ in the host does not prevent normal operation.
+ */
+ warn_report_err(err);
+ }
+
return;
error:
@@ -187,17 +273,17 @@ static void vfio_ap_unrealize(DeviceState *dev)
VFIOAPDevice *vapdev = VFIO_AP_DEVICE(dev);
vfio_ap_unregister_irq_notifier(vapdev, VFIO_AP_REQ_IRQ_INDEX);
- vfio_detach_device(&vapdev->vdev);
+ vfio_ap_unregister_irq_notifier(vapdev, VFIO_AP_CFG_CHG_IRQ_INDEX);
+ vfio_device_detach(&vapdev->vdev);
g_free(vapdev->vdev.name);
}
-static Property vfio_ap_properties[] = {
+static const Property vfio_ap_properties[] = {
DEFINE_PROP_STRING("sysfsdev", VFIOAPDevice, vdev.sysfsdev),
#ifdef CONFIG_IOMMUFD
DEFINE_PROP_LINK("iommufd", VFIOAPDevice, vdev.iommufd,
TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
#endif
- DEFINE_PROP_END_OF_LIST(),
};
static void vfio_ap_reset(DeviceState *dev)
@@ -230,6 +316,9 @@ static void vfio_ap_instance_init(Object *obj)
*/
vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_AP, &vfio_ap_ops,
DEVICE(vapdev), true);
+
+ /* AP device is mdev type device */
+ vbasedev->mdev = true;
}
#ifdef CONFIG_IOMMUFD
@@ -239,7 +328,7 @@ static void vfio_ap_set_fd(Object *obj, const char *str, Error **errp)
}
#endif
-static void vfio_ap_class_init(ObjectClass *klass, void *data)
+static void vfio_ap_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -253,8 +342,17 @@ static void vfio_ap_class_init(ObjectClass *klass, void *data)
dc->realize = vfio_ap_realize;
dc->unrealize = vfio_ap_unrealize;
dc->hotpluggable = true;
- dc->reset = vfio_ap_reset;
+ device_class_set_legacy_reset(dc, vfio_ap_reset);
dc->bus_type = TYPE_AP_BUS;
+
+ object_class_property_set_description(klass, /* 3.1 */
+ "sysfsdev",
+ "Host sysfs path of assigned device");
+#ifdef CONFIG_IOMMUFD
+ object_class_property_set_description(klass, /* 9.0 */
+ "iommufd",
+ "Set host IOMMUFD backend device");
+#endif
}
static const TypeInfo vfio_ap_info = {
diff --git a/hw/vfio/calxeda-xgmac.c b/hw/vfio/calxeda-xgmac.c
index 87c382e..03f2ff5 100644
--- a/hw/vfio/calxeda-xgmac.c
+++ b/hw/vfio/calxeda-xgmac.c
@@ -15,12 +15,14 @@
#include "hw/vfio/vfio-calxeda-xgmac.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
+#include "qemu/error-report.h"
static void calxeda_xgmac_realize(DeviceState *dev, Error **errp)
{
VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev);
VFIOCalxedaXgmacDeviceClass *k = VFIO_CALXEDA_XGMAC_DEVICE_GET_CLASS(dev);
+ warn_report("-device vfio-calxeda-xgmac is deprecated");
vdev->compat = g_strdup("calxeda,hb-xgmac");
vdev->num_compat = 1;
@@ -32,7 +34,7 @@ static const VMStateDescription vfio_platform_calxeda_xgmac_vmstate = {
.unmigratable = 1,
};
-static void vfio_calxeda_xgmac_class_init(ObjectClass *klass, void *data)
+static void vfio_calxeda_xgmac_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VFIOCalxedaXgmacDeviceClass *vcxc =
@@ -41,8 +43,6 @@ static void vfio_calxeda_xgmac_class_init(ObjectClass *klass, void *data)
&vcxc->parent_realize);
dc->desc = "VFIO Calxeda XGMAC";
dc->vmsd = &vfio_platform_calxeda_xgmac_vmstate;
- /* Supported by TYPE_VIRT_MACHINE */
- dc->user_creatable = true;
}
static const TypeInfo vfio_calxeda_xgmac_dev_info = {
diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c
index 1f8e127..cea9d6e 100644
--- a/hw/vfio/ccw.c
+++ b/hw/vfio/ccw.c
@@ -21,13 +21,13 @@
#include <sys/ioctl.h>
#include "qapi/error.h"
-#include "hw/vfio/vfio-common.h"
-#include "sysemu/iommufd.h"
+#include "hw/vfio/vfio-device.h"
+#include "system/iommufd.h"
#include "hw/s390x/s390-ccw.h"
#include "hw/s390x/vfio-ccw.h"
#include "hw/qdev-properties.h"
#include "hw/s390x/ccw-device.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
@@ -51,17 +51,8 @@ struct VFIOCCWDevice {
EventNotifier crw_notifier;
EventNotifier req_notifier;
bool force_orb_pfch;
- bool warned_orb_pfch;
};
-static inline void warn_once_pfch(VFIOCCWDevice *vcdev, SubchDev *sch,
- const char *msg)
-{
- warn_report_once_cond(&vcdev->warned_orb_pfch,
- "vfio-ccw (devno %x.%x.%04x): %s",
- sch->cssid, sch->ssid, sch->devno, msg);
-}
-
static void vfio_ccw_compute_needs_reset(VFIODevice *vdev)
{
vdev->needs_reset = false;
@@ -83,7 +74,8 @@ static IOInstEnding vfio_ccw_handle_request(SubchDev *sch)
if (!(sch->orb.ctrl0 & ORB_CTRL0_MASK_PFCH) && vcdev->force_orb_pfch) {
sch->orb.ctrl0 |= ORB_CTRL0_MASK_PFCH;
- warn_once_pfch(vcdev, sch, "PFCH flag forced");
+ warn_report_once("vfio-ccw (devno %x.%x.%04x): PFCH flag forced",
+ sch->cssid, sch->ssid, sch->devno);
}
QEMU_BUILD_BUG_ON(sizeof(region->orb_area) != sizeof(ORB));
@@ -384,8 +376,8 @@ static bool vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
Error **errp)
{
VFIODevice *vdev = &vcdev->vdev;
- g_autofree struct vfio_irq_info *irq_info = NULL;
- size_t argsz;
+ struct vfio_irq_info irq_info;
+ int ret;
int fd;
EventNotifier *notifier;
IOHandler *fd_read;
@@ -414,13 +406,15 @@ static bool vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
return false;
}
- argsz = sizeof(*irq_info);
- irq_info = g_malloc0(argsz);
- irq_info->index = irq;
- irq_info->argsz = argsz;
- if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO,
- irq_info) < 0 || irq_info->count < 1) {
- error_setg_errno(errp, errno, "vfio: Error getting irq info");
+ ret = vfio_device_get_irq_info(vdev, irq, &irq_info);
+
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "vfio: Error getting irq info");
+ return false;
+ }
+
+ if (irq_info.count < 1) {
+ error_setg(errp, "vfio: Error getting irq info, count=0");
return false;
}
@@ -434,8 +428,8 @@ static bool vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
fd = event_notifier_get_fd(notifier);
qemu_set_fd_handler(fd, fd_read, NULL, vcdev);
- if (!vfio_set_irq_signaling(vdev, irq, 0,
- VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
+ if (!vfio_device_irq_set_signaling(vdev, irq, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
qemu_set_fd_handler(fd, NULL, NULL, vcdev);
event_notifier_cleanup(notifier);
}
@@ -464,8 +458,8 @@ static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev,
return;
}
- if (!vfio_set_irq_signaling(&vcdev->vdev, irq, 0,
- VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
+ if (!vfio_device_irq_set_signaling(&vcdev->vdev, irq, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
}
@@ -496,7 +490,7 @@ static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
return false;
}
- ret = vfio_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info);
+ ret = vfio_device_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info);
if (ret) {
error_setg_errno(errp, -ret, "vfio: Error getting config info");
return false;
@@ -510,11 +504,10 @@ static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
vcdev->io_region_offset = info->offset;
vcdev->io_region = g_malloc0(info->size);
- g_free(info);
/* check for the optional async command region */
- ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
- VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info);
+ ret = vfio_device_get_region_info_type(vdev, VFIO_REGION_TYPE_CCW,
+ VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info);
if (!ret) {
vcdev->async_cmd_region_size = info->size;
if (sizeof(*vcdev->async_cmd_region) != vcdev->async_cmd_region_size) {
@@ -523,11 +516,10 @@ static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
}
vcdev->async_cmd_region_offset = info->offset;
vcdev->async_cmd_region = g_malloc0(info->size);
- g_free(info);
}
- ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
- VFIO_REGION_SUBTYPE_CCW_SCHIB, &info);
+ ret = vfio_device_get_region_info_type(vdev, VFIO_REGION_TYPE_CCW,
+ VFIO_REGION_SUBTYPE_CCW_SCHIB, &info);
if (!ret) {
vcdev->schib_region_size = info->size;
if (sizeof(*vcdev->schib_region) != vcdev->schib_region_size) {
@@ -536,11 +528,10 @@ static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
}
vcdev->schib_region_offset = info->offset;
vcdev->schib_region = g_malloc(info->size);
- g_free(info);
}
- ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
- VFIO_REGION_SUBTYPE_CCW_CRW, &info);
+ ret = vfio_device_get_region_info_type(vdev, VFIO_REGION_TYPE_CCW,
+ VFIO_REGION_SUBTYPE_CCW_CRW, &info);
if (!ret) {
vcdev->crw_region_size = info->size;
@@ -550,7 +541,6 @@ static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
}
vcdev->crw_region_offset = info->offset;
vcdev->crw_region = g_malloc(info->size);
- g_free(info);
}
return true;
@@ -560,7 +550,6 @@ out_err:
g_free(vcdev->schib_region);
g_free(vcdev->async_cmd_region);
g_free(vcdev->io_region);
- g_free(info);
return false;
}
@@ -591,7 +580,7 @@ static void vfio_ccw_realize(DeviceState *dev, Error **errp)
goto out_unrealize;
}
- if (!vfio_attach_device(cdev->mdevid, vbasedev,
+ if (!vfio_device_attach(cdev->mdevid, vbasedev,
&address_space_memory, errp)) {
goto out_attach_dev_err;
}
@@ -628,7 +617,7 @@ out_irq_notifier_err:
out_io_notifier_err:
vfio_ccw_put_region(vcdev);
out_region_err:
- vfio_detach_device(vbasedev);
+ vfio_device_detach(vbasedev);
out_attach_dev_err:
g_free(vbasedev->name);
out_unrealize:
@@ -647,7 +636,7 @@ static void vfio_ccw_unrealize(DeviceState *dev)
vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
vfio_ccw_put_region(vcdev);
- vfio_detach_device(&vcdev->vdev);
+ vfio_device_detach(&vcdev->vdev);
g_free(vcdev->vdev.name);
if (cdc->unrealize) {
@@ -655,14 +644,14 @@ static void vfio_ccw_unrealize(DeviceState *dev)
}
}
-static Property vfio_ccw_properties[] = {
+static const Property vfio_ccw_properties[] = {
DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev),
DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false),
#ifdef CONFIG_IOMMUFD
DEFINE_PROP_LINK("iommufd", VFIOCCWDevice, vdev.iommufd,
TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
#endif
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_CCW_LOADPARM("loadparm", CcwDevice, loadparm),
};
static const VMStateDescription vfio_ccw_vmstate = {
@@ -675,6 +664,9 @@ static void vfio_ccw_instance_init(Object *obj)
VFIOCCWDevice *vcdev = VFIO_CCW(obj);
VFIODevice *vbasedev = &vcdev->vdev;
+ /* CCW device is mdev type device */
+ vbasedev->mdev = true;
+
/*
* All vfio-ccw devices are believed to operate in a way compatible with
* discarding of memory in RAM blocks, ie. pages pinned in the host are
@@ -694,7 +686,7 @@ static void vfio_ccw_set_fd(Object *obj, const char *str, Error **errp)
}
#endif
-static void vfio_ccw_class_init(ObjectClass *klass, void *data)
+static void vfio_ccw_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass);
@@ -708,12 +700,27 @@ static void vfio_ccw_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->realize = vfio_ccw_realize;
dc->unrealize = vfio_ccw_unrealize;
- dc->reset = vfio_ccw_reset;
+ device_class_set_legacy_reset(dc, vfio_ccw_reset);
cdc->handle_request = vfio_ccw_handle_request;
cdc->handle_halt = vfio_ccw_handle_halt;
cdc->handle_clear = vfio_ccw_handle_clear;
cdc->handle_store = vfio_ccw_handle_store;
+
+ object_class_property_set_description(klass, /* 2.10 */
+ "sysfsdev",
+ "Host sysfs path of assigned device");
+ object_class_property_set_description(klass, /* 3.0 */
+ "force-orb-pfch",
+ "Force unlimited prefetch");
+#ifdef CONFIG_IOMMUFD
+ object_class_property_set_description(klass, /* 9.0 */
+ "iommufd",
+ "Set host IOMMUFD backend device");
+#endif
+ object_class_property_set_description(klass, /* 9.2 */
+ "loadparm",
+ "Define which devices that can be used for booting");
}
static const TypeInfo vfio_ccw_info = {
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
deleted file mode 100644
index 6d15b36..0000000
--- a/hw/vfio/common.c
+++ /dev/null
@@ -1,1569 +0,0 @@
-/*
- * generic functions used by VFIO devices
- *
- * Copyright Red Hat, Inc. 2012
- *
- * Authors:
- * Alex Williamson <alex.williamson@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- * Based on qemu-kvm device-assignment:
- * Adapted for KVM by Qumranet.
- * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
- * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
- * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
- * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
- * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
- */
-
-#include "qemu/osdep.h"
-#include <sys/ioctl.h>
-#ifdef CONFIG_KVM
-#include <linux/kvm.h>
-#endif
-#include <linux/vfio.h>
-
-#include "hw/vfio/vfio-common.h"
-#include "hw/vfio/pci.h"
-#include "exec/address-spaces.h"
-#include "exec/memory.h"
-#include "exec/ram_addr.h"
-#include "hw/hw.h"
-#include "qemu/error-report.h"
-#include "qemu/main-loop.h"
-#include "qemu/range.h"
-#include "sysemu/kvm.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "trace.h"
-#include "qapi/error.h"
-#include "migration/misc.h"
-#include "migration/blocker.h"
-#include "migration/qemu-file.h"
-#include "sysemu/tpm.h"
-
-VFIODeviceList vfio_device_list =
- QLIST_HEAD_INITIALIZER(vfio_device_list);
-static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
- QLIST_HEAD_INITIALIZER(vfio_address_spaces);
-
-#ifdef CONFIG_KVM
-/*
- * We have a single VFIO pseudo device per KVM VM. Once created it lives
- * for the life of the VM. Closing the file descriptor only drops our
- * reference to it and the device's reference to kvm. Therefore once
- * initialized, this file descriptor is only released on QEMU exit and
- * we'll re-use it should another vfio device be attached before then.
- */
-int vfio_kvm_device_fd = -1;
-#endif
-
-/*
- * Device state interfaces
- */
-
-bool vfio_mig_active(void)
-{
- VFIODevice *vbasedev;
-
- if (QLIST_EMPTY(&vfio_device_list)) {
- return false;
- }
-
- QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
- if (vbasedev->migration_blocker) {
- return false;
- }
- }
- return true;
-}
-
-static Error *multiple_devices_migration_blocker;
-
-/*
- * Multiple devices migration is allowed only if all devices support P2P
- * migration. Single device migration is allowed regardless of P2P migration
- * support.
- */
-static bool vfio_multiple_devices_migration_is_supported(void)
-{
- VFIODevice *vbasedev;
- unsigned int device_num = 0;
- bool all_support_p2p = true;
-
- QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
- if (vbasedev->migration) {
- device_num++;
-
- if (!(vbasedev->migration->mig_flags & VFIO_MIGRATION_P2P)) {
- all_support_p2p = false;
- }
- }
- }
-
- return all_support_p2p || device_num <= 1;
-}
-
-int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp)
-{
- int ret;
-
- if (vfio_multiple_devices_migration_is_supported()) {
- return 0;
- }
-
- if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
- error_setg(errp, "Multiple VFIO devices migration is supported only if "
- "all of them support P2P migration");
- return -EINVAL;
- }
-
- if (multiple_devices_migration_blocker) {
- return 0;
- }
-
- error_setg(&multiple_devices_migration_blocker,
- "Multiple VFIO devices migration is supported only if all of "
- "them support P2P migration");
- ret = migrate_add_blocker_normal(&multiple_devices_migration_blocker, errp);
-
- return ret;
-}
-
-void vfio_unblock_multiple_devices_migration(void)
-{
- if (!multiple_devices_migration_blocker ||
- !vfio_multiple_devices_migration_is_supported()) {
- return;
- }
-
- migrate_del_blocker(&multiple_devices_migration_blocker);
-}
-
-bool vfio_viommu_preset(VFIODevice *vbasedev)
-{
- return vbasedev->bcontainer->space->as != &address_space_memory;
-}
-
-static void vfio_set_migration_error(int ret)
-{
- if (migration_is_setup_or_active()) {
- migration_file_set_error(ret, NULL);
- }
-}
-
-bool vfio_device_state_is_running(VFIODevice *vbasedev)
-{
- VFIOMigration *migration = vbasedev->migration;
-
- return migration->device_state == VFIO_DEVICE_STATE_RUNNING ||
- migration->device_state == VFIO_DEVICE_STATE_RUNNING_P2P;
-}
-
-bool vfio_device_state_is_precopy(VFIODevice *vbasedev)
-{
- VFIOMigration *migration = vbasedev->migration;
-
- return migration->device_state == VFIO_DEVICE_STATE_PRE_COPY ||
- migration->device_state == VFIO_DEVICE_STATE_PRE_COPY_P2P;
-}
-
-static bool vfio_devices_all_dirty_tracking(VFIOContainerBase *bcontainer)
-{
- VFIODevice *vbasedev;
-
- if (!migration_is_active() && !migration_is_device()) {
- return false;
- }
-
- QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
- VFIOMigration *migration = vbasedev->migration;
-
- if (!migration) {
- return false;
- }
-
- if (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF &&
- (vfio_device_state_is_running(vbasedev) ||
- vfio_device_state_is_precopy(vbasedev))) {
- return false;
- }
- }
- return true;
-}
-
-bool vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer)
-{
- VFIODevice *vbasedev;
-
- QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
- if (!vbasedev->dirty_pages_supported) {
- return false;
- }
- }
-
- return true;
-}
-
-/*
- * Check if all VFIO devices are running and migration is active, which is
- * essentially equivalent to the migration being in pre-copy phase.
- */
-bool
-vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer)
-{
- VFIODevice *vbasedev;
-
- if (!migration_is_active()) {
- return false;
- }
-
- QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
- VFIOMigration *migration = vbasedev->migration;
-
- if (!migration) {
- return false;
- }
-
- if (vfio_device_state_is_running(vbasedev) ||
- vfio_device_state_is_precopy(vbasedev)) {
- continue;
- } else {
- return false;
- }
- }
- return true;
-}
-
-static bool vfio_listener_skipped_section(MemoryRegionSection *section)
-{
- return (!memory_region_is_ram(section->mr) &&
- !memory_region_is_iommu(section->mr)) ||
- memory_region_is_protected(section->mr) ||
- /*
- * Sizing an enabled 64-bit BAR can cause spurious mappings to
- * addresses in the upper part of the 64-bit address space. These
- * are never accessed by the CPU and beyond the address width of
- * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
- */
- section->offset_within_address_space & (1ULL << 63);
-}
-
-/* Called with rcu_read_lock held. */
-static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
- ram_addr_t *ram_addr, bool *read_only,
- Error **errp)
-{
- bool ret, mr_has_discard_manager;
-
- ret = memory_get_xlat_addr(iotlb, vaddr, ram_addr, read_only,
- &mr_has_discard_manager, errp);
- if (ret && mr_has_discard_manager) {
- /*
- * Malicious VMs might trigger discarding of IOMMU-mapped memory. The
- * pages will remain pinned inside vfio until unmapped, resulting in a
- * higher memory consumption than expected. If memory would get
- * populated again later, there would be an inconsistency between pages
- * pinned by vfio and pages seen by QEMU. This is the case until
- * unmapped from the IOMMU (e.g., during device reset).
- *
- * With malicious guests, we really only care about pinning more memory
- * than expected. RLIMIT_MEMLOCK set for the user/process can never be
- * exceeded and can be used to mitigate this problem.
- */
- warn_report_once("Using vfio with vIOMMUs and coordinated discarding of"
- " RAM (e.g., virtio-mem) works, however, malicious"
- " guests can trigger pinning of more memory than"
- " intended via an IOMMU. It's possible to mitigate "
- " by setting/adjusting RLIMIT_MEMLOCK.");
- }
- return ret;
-}
-
-static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
-{
- VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
- VFIOContainerBase *bcontainer = giommu->bcontainer;
- hwaddr iova = iotlb->iova + giommu->iommu_offset;
- void *vaddr;
- int ret;
- Error *local_err = NULL;
-
- trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
- iova, iova + iotlb->addr_mask);
-
- if (iotlb->target_as != &address_space_memory) {
- error_report("Wrong target AS \"%s\", only system memory is allowed",
- iotlb->target_as->name ? iotlb->target_as->name : "none");
- vfio_set_migration_error(-EINVAL);
- return;
- }
-
- rcu_read_lock();
-
- if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
- bool read_only;
-
- if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, &local_err)) {
- error_report_err(local_err);
- goto out;
- }
- /*
- * vaddr is only valid until rcu_read_unlock(). But after
- * vfio_dma_map has set up the mapping the pages will be
- * pinned by the kernel. This makes sure that the RAM backend
- * of vaddr will always be there, even if the memory object is
- * destroyed and its backing memory munmap-ed.
- */
- ret = vfio_container_dma_map(bcontainer, iova,
- iotlb->addr_mask + 1, vaddr,
- read_only);
- if (ret) {
- error_report("vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", "
- "0x%"HWADDR_PRIx", %p) = %d (%s)",
- bcontainer, iova,
- iotlb->addr_mask + 1, vaddr, ret, strerror(-ret));
- }
- } else {
- ret = vfio_container_dma_unmap(bcontainer, iova,
- iotlb->addr_mask + 1, iotlb);
- if (ret) {
- error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
- "0x%"HWADDR_PRIx") = %d (%s)",
- bcontainer, iova,
- iotlb->addr_mask + 1, ret, strerror(-ret));
- vfio_set_migration_error(ret);
- }
- }
-out:
- rcu_read_unlock();
-}
-
-static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
- MemoryRegionSection *section)
-{
- VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
- listener);
- VFIOContainerBase *bcontainer = vrdl->bcontainer;
- const hwaddr size = int128_get64(section->size);
- const hwaddr iova = section->offset_within_address_space;
- int ret;
-
- /* Unmap with a single call. */
- ret = vfio_container_dma_unmap(bcontainer, iova, size , NULL);
- if (ret) {
- error_report("%s: vfio_container_dma_unmap() failed: %s", __func__,
- strerror(-ret));
- }
-}
-
-static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
- MemoryRegionSection *section)
-{
- VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
- listener);
- VFIOContainerBase *bcontainer = vrdl->bcontainer;
- const hwaddr end = section->offset_within_region +
- int128_get64(section->size);
- hwaddr start, next, iova;
- void *vaddr;
- int ret;
-
- /*
- * Map in (aligned within memory region) minimum granularity, so we can
- * unmap in minimum granularity later.
- */
- for (start = section->offset_within_region; start < end; start = next) {
- next = ROUND_UP(start + 1, vrdl->granularity);
- next = MIN(next, end);
-
- iova = start - section->offset_within_region +
- section->offset_within_address_space;
- vaddr = memory_region_get_ram_ptr(section->mr) + start;
-
- ret = vfio_container_dma_map(bcontainer, iova, next - start,
- vaddr, section->readonly);
- if (ret) {
- /* Rollback */
- vfio_ram_discard_notify_discard(rdl, section);
- return ret;
- }
- }
- return 0;
-}
-
-static void vfio_register_ram_discard_listener(VFIOContainerBase *bcontainer,
- MemoryRegionSection *section)
-{
- RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
- VFIORamDiscardListener *vrdl;
-
- /* Ignore some corner cases not relevant in practice. */
- g_assert(QEMU_IS_ALIGNED(section->offset_within_region, TARGET_PAGE_SIZE));
- g_assert(QEMU_IS_ALIGNED(section->offset_within_address_space,
- TARGET_PAGE_SIZE));
- g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), TARGET_PAGE_SIZE));
-
- vrdl = g_new0(VFIORamDiscardListener, 1);
- vrdl->bcontainer = bcontainer;
- vrdl->mr = section->mr;
- vrdl->offset_within_address_space = section->offset_within_address_space;
- vrdl->size = int128_get64(section->size);
- vrdl->granularity = ram_discard_manager_get_min_granularity(rdm,
- section->mr);
-
- g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity));
- g_assert(bcontainer->pgsizes &&
- vrdl->granularity >= 1ULL << ctz64(bcontainer->pgsizes));
-
- ram_discard_listener_init(&vrdl->listener,
- vfio_ram_discard_notify_populate,
- vfio_ram_discard_notify_discard, true);
- ram_discard_manager_register_listener(rdm, &vrdl->listener, section);
- QLIST_INSERT_HEAD(&bcontainer->vrdl_list, vrdl, next);
-
- /*
- * Sanity-check if we have a theoretically problematic setup where we could
- * exceed the maximum number of possible DMA mappings over time. We assume
- * that each mapped section in the same address space as a RamDiscardManager
- * section consumes exactly one DMA mapping, with the exception of
- * RamDiscardManager sections; i.e., we don't expect to have gIOMMU sections
- * in the same address space as RamDiscardManager sections.
- *
- * We assume that each section in the address space consumes one memslot.
- * We take the number of KVM memory slots as a best guess for the maximum
- * number of sections in the address space we could have over time,
- * also consuming DMA mappings.
- */
- if (bcontainer->dma_max_mappings) {
- unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512;
-
-#ifdef CONFIG_KVM
- if (kvm_enabled()) {
- max_memslots = kvm_get_max_memslots();
- }
-#endif
-
- QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
- hwaddr start, end;
-
- start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space,
- vrdl->granularity);
- end = ROUND_UP(vrdl->offset_within_address_space + vrdl->size,
- vrdl->granularity);
- vrdl_mappings += (end - start) / vrdl->granularity;
- vrdl_count++;
- }
-
- if (vrdl_mappings + max_memslots - vrdl_count >
- bcontainer->dma_max_mappings) {
- warn_report("%s: possibly running out of DMA mappings. E.g., try"
- " increasing the 'block-size' of virtio-mem devies."
- " Maximum possible DMA mappings: %d, Maximum possible"
- " memslots: %d", __func__, bcontainer->dma_max_mappings,
- max_memslots);
- }
- }
-}
-
-static void vfio_unregister_ram_discard_listener(VFIOContainerBase *bcontainer,
- MemoryRegionSection *section)
-{
- RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
- VFIORamDiscardListener *vrdl = NULL;
-
- QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
- if (vrdl->mr == section->mr &&
- vrdl->offset_within_address_space ==
- section->offset_within_address_space) {
- break;
- }
- }
-
- if (!vrdl) {
- hw_error("vfio: Trying to unregister missing RAM discard listener");
- }
-
- ram_discard_manager_unregister_listener(rdm, &vrdl->listener);
- QLIST_REMOVE(vrdl, next);
- g_free(vrdl);
-}
-
-static bool vfio_known_safe_misalignment(MemoryRegionSection *section)
-{
- MemoryRegion *mr = section->mr;
-
- if (!TPM_IS_CRB(mr->owner)) {
- return false;
- }
-
- /* this is a known safe misaligned region, just trace for debug purpose */
- trace_vfio_known_safe_misalignment(memory_region_name(mr),
- section->offset_within_address_space,
- section->offset_within_region,
- qemu_real_host_page_size());
- return true;
-}
-
-static bool vfio_listener_valid_section(MemoryRegionSection *section,
- const char *name)
-{
- if (vfio_listener_skipped_section(section)) {
- trace_vfio_listener_region_skip(name,
- section->offset_within_address_space,
- section->offset_within_address_space +
- int128_get64(int128_sub(section->size, int128_one())));
- return false;
- }
-
- if (unlikely((section->offset_within_address_space &
- ~qemu_real_host_page_mask()) !=
- (section->offset_within_region & ~qemu_real_host_page_mask()))) {
- if (!vfio_known_safe_misalignment(section)) {
- error_report("%s received unaligned region %s iova=0x%"PRIx64
- " offset_within_region=0x%"PRIx64
- " qemu_real_host_page_size=0x%"PRIxPTR,
- __func__, memory_region_name(section->mr),
- section->offset_within_address_space,
- section->offset_within_region,
- qemu_real_host_page_size());
- }
- return false;
- }
-
- return true;
-}
-
-static bool vfio_get_section_iova_range(VFIOContainerBase *bcontainer,
- MemoryRegionSection *section,
- hwaddr *out_iova, hwaddr *out_end,
- Int128 *out_llend)
-{
- Int128 llend;
- hwaddr iova;
-
- iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
- llend = int128_make64(section->offset_within_address_space);
- llend = int128_add(llend, section->size);
- llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask()));
-
- if (int128_ge(int128_make64(iova), llend)) {
- return false;
- }
-
- *out_iova = iova;
- *out_end = int128_get64(int128_sub(llend, int128_one()));
- if (out_llend) {
- *out_llend = llend;
- }
- return true;
-}
-
-static void vfio_listener_region_add(MemoryListener *listener,
- MemoryRegionSection *section)
-{
- VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
- listener);
- hwaddr iova, end;
- Int128 llend, llsize;
- void *vaddr;
- int ret;
- Error *err = NULL;
-
- if (!vfio_listener_valid_section(section, "region_add")) {
- return;
- }
-
- if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end,
- &llend)) {
- if (memory_region_is_ram_device(section->mr)) {
- trace_vfio_listener_region_add_no_dma_map(
- memory_region_name(section->mr),
- section->offset_within_address_space,
- int128_getlo(section->size),
- qemu_real_host_page_size());
- }
- return;
- }
-
- if (!vfio_container_add_section_window(bcontainer, section, &err)) {
- goto fail;
- }
-
- memory_region_ref(section->mr);
-
- if (memory_region_is_iommu(section->mr)) {
- VFIOGuestIOMMU *giommu;
- IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
- int iommu_idx;
-
- trace_vfio_listener_region_add_iommu(iova, end);
- /*
- * FIXME: For VFIO iommu types which have KVM acceleration to
- * avoid bouncing all map/unmaps through qemu this way, this
- * would be the right place to wire that up (tell the KVM
- * device emulation the VFIO iommu handles to use).
- */
- giommu = g_malloc0(sizeof(*giommu));
- giommu->iommu_mr = iommu_mr;
- giommu->iommu_offset = section->offset_within_address_space -
- section->offset_within_region;
- giommu->bcontainer = bcontainer;
- llend = int128_add(int128_make64(section->offset_within_region),
- section->size);
- llend = int128_sub(llend, int128_one());
- iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
- MEMTXATTRS_UNSPECIFIED);
- iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
- IOMMU_NOTIFIER_IOTLB_EVENTS,
- section->offset_within_region,
- int128_get64(llend),
- iommu_idx);
-
- ret = memory_region_register_iommu_notifier(section->mr, &giommu->n,
- &err);
- if (ret) {
- g_free(giommu);
- goto fail;
- }
- QLIST_INSERT_HEAD(&bcontainer->giommu_list, giommu, giommu_next);
- memory_region_iommu_replay(giommu->iommu_mr, &giommu->n);
-
- return;
- }
-
- /* Here we assume that memory_region_is_ram(section->mr)==true */
-
- /*
- * For RAM memory regions with a RamDiscardManager, we only want to map the
- * actually populated parts - and update the mapping whenever we're notified
- * about changes.
- */
- if (memory_region_has_ram_discard_manager(section->mr)) {
- vfio_register_ram_discard_listener(bcontainer, section);
- return;
- }
-
- vaddr = memory_region_get_ram_ptr(section->mr) +
- section->offset_within_region +
- (iova - section->offset_within_address_space);
-
- trace_vfio_listener_region_add_ram(iova, end, vaddr);
-
- llsize = int128_sub(llend, int128_make64(iova));
-
- if (memory_region_is_ram_device(section->mr)) {
- hwaddr pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1;
-
- if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
- trace_vfio_listener_region_add_no_dma_map(
- memory_region_name(section->mr),
- section->offset_within_address_space,
- int128_getlo(section->size),
- pgmask + 1);
- return;
- }
- }
-
- ret = vfio_container_dma_map(bcontainer, iova, int128_get64(llsize),
- vaddr, section->readonly);
- if (ret) {
- error_setg(&err, "vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", "
- "0x%"HWADDR_PRIx", %p) = %d (%s)",
- bcontainer, iova, int128_get64(llsize), vaddr, ret,
- strerror(-ret));
- if (memory_region_is_ram_device(section->mr)) {
- /* Allow unexpected mappings not to be fatal for RAM devices */
- error_report_err(err);
- return;
- }
- goto fail;
- }
-
- return;
-
-fail:
- if (memory_region_is_ram_device(section->mr)) {
- error_reportf_err(err, "PCI p2p may not work: ");
- return;
- }
- /*
- * On the initfn path, store the first error in the container so we
- * can gracefully fail. Runtime, there's not much we can do other
- * than throw a hardware error.
- */
- if (!bcontainer->initialized) {
- if (!bcontainer->error) {
- error_propagate_prepend(&bcontainer->error, err,
- "Region %s: ",
- memory_region_name(section->mr));
- } else {
- error_free(err);
- }
- } else {
- error_report_err(err);
- hw_error("vfio: DMA mapping failed, unable to continue");
- }
-}
-
-static void vfio_listener_region_del(MemoryListener *listener,
- MemoryRegionSection *section)
-{
- VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
- listener);
- hwaddr iova, end;
- Int128 llend, llsize;
- int ret;
- bool try_unmap = true;
-
- if (!vfio_listener_valid_section(section, "region_del")) {
- return;
- }
-
- if (memory_region_is_iommu(section->mr)) {
- VFIOGuestIOMMU *giommu;
-
- QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) {
- if (MEMORY_REGION(giommu->iommu_mr) == section->mr &&
- giommu->n.start == section->offset_within_region) {
- memory_region_unregister_iommu_notifier(section->mr,
- &giommu->n);
- QLIST_REMOVE(giommu, giommu_next);
- g_free(giommu);
- break;
- }
- }
-
- /*
- * FIXME: We assume the one big unmap below is adequate to
- * remove any individual page mappings in the IOMMU which
- * might have been copied into VFIO. This works for a page table
- * based IOMMU where a big unmap flattens a large range of IO-PTEs.
- * That may not be true for all IOMMU types.
- */
- }
-
- if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end,
- &llend)) {
- return;
- }
-
- llsize = int128_sub(llend, int128_make64(iova));
-
- trace_vfio_listener_region_del(iova, end);
-
- if (memory_region_is_ram_device(section->mr)) {
- hwaddr pgmask;
-
- pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1;
- try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
- } else if (memory_region_has_ram_discard_manager(section->mr)) {
- vfio_unregister_ram_discard_listener(bcontainer, section);
- /* Unregistering will trigger an unmap. */
- try_unmap = false;
- }
-
- if (try_unmap) {
- if (int128_eq(llsize, int128_2_64())) {
- /* The unmap ioctl doesn't accept a full 64-bit span. */
- llsize = int128_rshift(llsize, 1);
- ret = vfio_container_dma_unmap(bcontainer, iova,
- int128_get64(llsize), NULL);
- if (ret) {
- error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
- "0x%"HWADDR_PRIx") = %d (%s)",
- bcontainer, iova, int128_get64(llsize), ret,
- strerror(-ret));
- }
- iova += int128_get64(llsize);
- }
- ret = vfio_container_dma_unmap(bcontainer, iova,
- int128_get64(llsize), NULL);
- if (ret) {
- error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
- "0x%"HWADDR_PRIx") = %d (%s)",
- bcontainer, iova, int128_get64(llsize), ret,
- strerror(-ret));
- }
- }
-
- memory_region_unref(section->mr);
-
- vfio_container_del_section_window(bcontainer, section);
-}
-
-typedef struct VFIODirtyRanges {
- hwaddr min32;
- hwaddr max32;
- hwaddr min64;
- hwaddr max64;
- hwaddr minpci64;
- hwaddr maxpci64;
-} VFIODirtyRanges;
-
-typedef struct VFIODirtyRangesListener {
- VFIOContainerBase *bcontainer;
- VFIODirtyRanges ranges;
- MemoryListener listener;
-} VFIODirtyRangesListener;
-
-static bool vfio_section_is_vfio_pci(MemoryRegionSection *section,
- VFIOContainerBase *bcontainer)
-{
- VFIOPCIDevice *pcidev;
- VFIODevice *vbasedev;
- Object *owner;
-
- owner = memory_region_owner(section->mr);
-
- QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
- if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
- continue;
- }
- pcidev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
- if (OBJECT(pcidev) == owner) {
- return true;
- }
- }
-
- return false;
-}
-
-static void vfio_dirty_tracking_update_range(VFIODirtyRanges *range,
- hwaddr iova, hwaddr end,
- bool update_pci)
-{
- hwaddr *min, *max;
-
- /*
- * The address space passed to the dirty tracker is reduced to three ranges:
- * one for 32-bit DMA ranges, one for 64-bit DMA ranges and one for the
- * PCI 64-bit hole.
- *
- * The underlying reports of dirty will query a sub-interval of each of
- * these ranges.
- *
- * The purpose of the three range handling is to handle known cases of big
- * holes in the address space, like the x86 AMD 1T hole, and firmware (like
- * OVMF) which may relocate the pci-hole64 to the end of the address space.
- * The latter would otherwise generate large ranges for tracking, stressing
- * the limits of supported hardware. The pci-hole32 will always be below 4G
- * (overlapping or not) so it doesn't need special handling and is part of
- * the 32-bit range.
- *
- * The alternative would be an IOVATree but that has a much bigger runtime
- * overhead and unnecessary complexity.
- */
- if (update_pci && iova >= UINT32_MAX) {
- min = &range->minpci64;
- max = &range->maxpci64;
- } else {
- min = (end <= UINT32_MAX) ? &range->min32 : &range->min64;
- max = (end <= UINT32_MAX) ? &range->max32 : &range->max64;
- }
- if (*min > iova) {
- *min = iova;
- }
- if (*max < end) {
- *max = end;
- }
-
- trace_vfio_device_dirty_tracking_update(iova, end, *min, *max);
-}
-
-static void vfio_dirty_tracking_update(MemoryListener *listener,
- MemoryRegionSection *section)
-{
- VFIODirtyRangesListener *dirty =
- container_of(listener, VFIODirtyRangesListener, listener);
- hwaddr iova, end;
-
- if (!vfio_listener_valid_section(section, "tracking_update") ||
- !vfio_get_section_iova_range(dirty->bcontainer, section,
- &iova, &end, NULL)) {
- return;
- }
-
- vfio_dirty_tracking_update_range(&dirty->ranges, iova, end,
- vfio_section_is_vfio_pci(section, dirty->bcontainer));
-}
-
-static const MemoryListener vfio_dirty_tracking_listener = {
- .name = "vfio-tracking",
- .region_add = vfio_dirty_tracking_update,
-};
-
-static void vfio_dirty_tracking_init(VFIOContainerBase *bcontainer,
- VFIODirtyRanges *ranges)
-{
- VFIODirtyRangesListener dirty;
-
- memset(&dirty, 0, sizeof(dirty));
- dirty.ranges.min32 = UINT32_MAX;
- dirty.ranges.min64 = UINT64_MAX;
- dirty.ranges.minpci64 = UINT64_MAX;
- dirty.listener = vfio_dirty_tracking_listener;
- dirty.bcontainer = bcontainer;
-
- memory_listener_register(&dirty.listener,
- bcontainer->space->as);
-
- *ranges = dirty.ranges;
-
- /*
- * The memory listener is synchronous, and used to calculate the range
- * to dirty tracking. Unregister it after we are done as we are not
- * interested in any follow-up updates.
- */
- memory_listener_unregister(&dirty.listener);
-}
-
-static void vfio_devices_dma_logging_stop(VFIOContainerBase *bcontainer)
-{
- uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature),
- sizeof(uint64_t))] = {};
- struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
- VFIODevice *vbasedev;
-
- feature->argsz = sizeof(buf);
- feature->flags = VFIO_DEVICE_FEATURE_SET |
- VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP;
-
- QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
- if (!vbasedev->dirty_tracking) {
- continue;
- }
-
- if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
- warn_report("%s: Failed to stop DMA logging, err %d (%s)",
- vbasedev->name, -errno, strerror(errno));
- }
- vbasedev->dirty_tracking = false;
- }
-}
-
-static struct vfio_device_feature *
-vfio_device_feature_dma_logging_start_create(VFIOContainerBase *bcontainer,
- VFIODirtyRanges *tracking)
-{
- struct vfio_device_feature *feature;
- size_t feature_size;
- struct vfio_device_feature_dma_logging_control *control;
- struct vfio_device_feature_dma_logging_range *ranges;
-
- feature_size = sizeof(struct vfio_device_feature) +
- sizeof(struct vfio_device_feature_dma_logging_control);
- feature = g_try_malloc0(feature_size);
- if (!feature) {
- errno = ENOMEM;
- return NULL;
- }
- feature->argsz = feature_size;
- feature->flags = VFIO_DEVICE_FEATURE_SET |
- VFIO_DEVICE_FEATURE_DMA_LOGGING_START;
-
- control = (struct vfio_device_feature_dma_logging_control *)feature->data;
- control->page_size = qemu_real_host_page_size();
-
- /*
- * DMA logging uAPI guarantees to support at least a number of ranges that
- * fits into a single host kernel base page.
- */
- control->num_ranges = !!tracking->max32 + !!tracking->max64 +
- !!tracking->maxpci64;
- ranges = g_try_new0(struct vfio_device_feature_dma_logging_range,
- control->num_ranges);
- if (!ranges) {
- g_free(feature);
- errno = ENOMEM;
-
- return NULL;
- }
-
- control->ranges = (uintptr_t)ranges;
- if (tracking->max32) {
- ranges->iova = tracking->min32;
- ranges->length = (tracking->max32 - tracking->min32) + 1;
- ranges++;
- }
- if (tracking->max64) {
- ranges->iova = tracking->min64;
- ranges->length = (tracking->max64 - tracking->min64) + 1;
- ranges++;
- }
- if (tracking->maxpci64) {
- ranges->iova = tracking->minpci64;
- ranges->length = (tracking->maxpci64 - tracking->minpci64) + 1;
- }
-
- trace_vfio_device_dirty_tracking_start(control->num_ranges,
- tracking->min32, tracking->max32,
- tracking->min64, tracking->max64,
- tracking->minpci64, tracking->maxpci64);
-
- return feature;
-}
-
-static void vfio_device_feature_dma_logging_start_destroy(
- struct vfio_device_feature *feature)
-{
- struct vfio_device_feature_dma_logging_control *control =
- (struct vfio_device_feature_dma_logging_control *)feature->data;
- struct vfio_device_feature_dma_logging_range *ranges =
- (struct vfio_device_feature_dma_logging_range *)(uintptr_t)control->ranges;
-
- g_free(ranges);
- g_free(feature);
-}
-
-static bool vfio_devices_dma_logging_start(VFIOContainerBase *bcontainer,
- Error **errp)
-{
- struct vfio_device_feature *feature;
- VFIODirtyRanges ranges;
- VFIODevice *vbasedev;
- int ret = 0;
-
- vfio_dirty_tracking_init(bcontainer, &ranges);
- feature = vfio_device_feature_dma_logging_start_create(bcontainer,
- &ranges);
- if (!feature) {
- error_setg_errno(errp, errno, "Failed to prepare DMA logging");
- return false;
- }
-
- QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
- if (vbasedev->dirty_tracking) {
- continue;
- }
-
- ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
- if (ret) {
- ret = -errno;
- error_setg_errno(errp, errno, "%s: Failed to start DMA logging",
- vbasedev->name);
- goto out;
- }
- vbasedev->dirty_tracking = true;
- }
-
-out:
- if (ret) {
- vfio_devices_dma_logging_stop(bcontainer);
- }
-
- vfio_device_feature_dma_logging_start_destroy(feature);
-
- return ret == 0;
-}
-
-static bool vfio_listener_log_global_start(MemoryListener *listener,
- Error **errp)
-{
- ERRP_GUARD();
- VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
- listener);
- bool ret;
-
- if (vfio_devices_all_device_dirty_tracking(bcontainer)) {
- ret = vfio_devices_dma_logging_start(bcontainer, errp);
- } else {
- ret = vfio_container_set_dirty_page_tracking(bcontainer, true, errp) == 0;
- }
-
- if (!ret) {
- error_prepend(errp, "vfio: Could not start dirty page tracking - ");
- }
- return ret;
-}
-
-static void vfio_listener_log_global_stop(MemoryListener *listener)
-{
- VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
- listener);
- Error *local_err = NULL;
- int ret = 0;
-
- if (vfio_devices_all_device_dirty_tracking(bcontainer)) {
- vfio_devices_dma_logging_stop(bcontainer);
- } else {
- ret = vfio_container_set_dirty_page_tracking(bcontainer, false,
- &local_err);
- }
-
- if (ret) {
- error_prepend(&local_err,
- "vfio: Could not stop dirty page tracking - ");
- error_report_err(local_err);
- vfio_set_migration_error(ret);
- }
-}
-
-static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova,
- hwaddr size, void *bitmap)
-{
- uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
- sizeof(struct vfio_device_feature_dma_logging_report),
- sizeof(uint64_t))] = {};
- struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
- struct vfio_device_feature_dma_logging_report *report =
- (struct vfio_device_feature_dma_logging_report *)feature->data;
-
- report->iova = iova;
- report->length = size;
- report->page_size = qemu_real_host_page_size();
- report->bitmap = (uintptr_t)bitmap;
-
- feature->argsz = sizeof(buf);
- feature->flags = VFIO_DEVICE_FEATURE_GET |
- VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT;
-
- if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
- return -errno;
- }
-
- return 0;
-}
-
-int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
- VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp)
-{
- VFIODevice *vbasedev;
- int ret;
-
- QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
- ret = vfio_device_dma_logging_report(vbasedev, iova, size,
- vbmap->bitmap);
- if (ret) {
- error_setg_errno(errp, -ret,
- "%s: Failed to get DMA logging report, iova: "
- "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx,
- vbasedev->name, iova, size);
-
- return ret;
- }
- }
-
- return 0;
-}
-
-int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova,
- uint64_t size, ram_addr_t ram_addr, Error **errp)
-{
- bool all_device_dirty_tracking =
- vfio_devices_all_device_dirty_tracking(bcontainer);
- uint64_t dirty_pages;
- VFIOBitmap vbmap;
- int ret;
-
- if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) {
- cpu_physical_memory_set_dirty_range(ram_addr, size,
- tcg_enabled() ? DIRTY_CLIENTS_ALL :
- DIRTY_CLIENTS_NOCODE);
- return 0;
- }
-
- ret = vfio_bitmap_alloc(&vbmap, size);
- if (ret) {
- error_setg_errno(errp, -ret,
- "Failed to allocate dirty tracking bitmap");
- return ret;
- }
-
- if (all_device_dirty_tracking) {
- ret = vfio_devices_query_dirty_bitmap(bcontainer, &vbmap, iova, size,
- errp);
- } else {
- ret = vfio_container_query_dirty_bitmap(bcontainer, &vbmap, iova, size,
- errp);
- }
-
- if (ret) {
- goto out;
- }
-
- dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
- vbmap.pages);
-
- trace_vfio_get_dirty_bitmap(iova, size, vbmap.size, ram_addr, dirty_pages);
-out:
- g_free(vbmap.bitmap);
-
- return ret;
-}
-
-typedef struct {
- IOMMUNotifier n;
- VFIOGuestIOMMU *giommu;
-} vfio_giommu_dirty_notifier;
-
-static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
-{
- vfio_giommu_dirty_notifier *gdn = container_of(n,
- vfio_giommu_dirty_notifier, n);
- VFIOGuestIOMMU *giommu = gdn->giommu;
- VFIOContainerBase *bcontainer = giommu->bcontainer;
- hwaddr iova = iotlb->iova + giommu->iommu_offset;
- ram_addr_t translated_addr;
- Error *local_err = NULL;
- int ret = -EINVAL;
-
- trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask);
-
- if (iotlb->target_as != &address_space_memory) {
- error_report("Wrong target AS \"%s\", only system memory is allowed",
- iotlb->target_as->name ? iotlb->target_as->name : "none");
- goto out;
- }
-
- rcu_read_lock();
- if (!vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL, &local_err)) {
- error_report_err(local_err);
- goto out_unlock;
- }
-
- ret = vfio_get_dirty_bitmap(bcontainer, iova, iotlb->addr_mask + 1,
- translated_addr, &local_err);
- if (ret) {
- error_prepend(&local_err,
- "vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", "
- "0x%"HWADDR_PRIx") failed - ", bcontainer, iova,
- iotlb->addr_mask + 1);
- error_report_err(local_err);
- }
-
-out_unlock:
- rcu_read_unlock();
-
-out:
- if (ret) {
- vfio_set_migration_error(ret);
- }
-}
-
-static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section,
- void *opaque)
-{
- const hwaddr size = int128_get64(section->size);
- const hwaddr iova = section->offset_within_address_space;
- const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) +
- section->offset_within_region;
- VFIORamDiscardListener *vrdl = opaque;
- Error *local_err = NULL;
- int ret;
-
- /*
- * Sync the whole mapped region (spanning multiple individual mappings)
- * in one go.
- */
- ret = vfio_get_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr,
- &local_err);
- if (ret) {
- error_report_err(local_err);
- }
- return ret;
-}
-
-static int
-vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainerBase *bcontainer,
- MemoryRegionSection *section)
-{
- RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
- VFIORamDiscardListener *vrdl = NULL;
-
- QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
- if (vrdl->mr == section->mr &&
- vrdl->offset_within_address_space ==
- section->offset_within_address_space) {
- break;
- }
- }
-
- if (!vrdl) {
- hw_error("vfio: Trying to sync missing RAM discard listener");
- }
-
- /*
- * We only want/can synchronize the bitmap for actually mapped parts -
- * which correspond to populated parts. Replay all populated parts.
- */
- return ram_discard_manager_replay_populated(rdm, section,
- vfio_ram_discard_get_dirty_bitmap,
- &vrdl);
-}
-
-static int vfio_sync_iommu_dirty_bitmap(VFIOContainerBase *bcontainer,
- MemoryRegionSection *section)
-{
- VFIOGuestIOMMU *giommu;
- bool found = false;
- Int128 llend;
- vfio_giommu_dirty_notifier gdn;
- int idx;
-
- QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) {
- if (MEMORY_REGION(giommu->iommu_mr) == section->mr &&
- giommu->n.start == section->offset_within_region) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- return 0;
- }
-
- gdn.giommu = giommu;
- idx = memory_region_iommu_attrs_to_index(giommu->iommu_mr,
- MEMTXATTRS_UNSPECIFIED);
-
- llend = int128_add(int128_make64(section->offset_within_region),
- section->size);
- llend = int128_sub(llend, int128_one());
-
- iommu_notifier_init(&gdn.n, vfio_iommu_map_dirty_notify, IOMMU_NOTIFIER_MAP,
- section->offset_within_region, int128_get64(llend),
- idx);
- memory_region_iommu_replay(giommu->iommu_mr, &gdn.n);
-
- return 0;
-}
-
-static int vfio_sync_dirty_bitmap(VFIOContainerBase *bcontainer,
- MemoryRegionSection *section, Error **errp)
-{
- ram_addr_t ram_addr;
-
- if (memory_region_is_iommu(section->mr)) {
- return vfio_sync_iommu_dirty_bitmap(bcontainer, section);
- } else if (memory_region_has_ram_discard_manager(section->mr)) {
- int ret;
-
- ret = vfio_sync_ram_discard_listener_dirty_bitmap(bcontainer, section);
- if (ret) {
- error_setg(errp,
- "Failed to sync dirty bitmap with RAM discard listener");
- }
- return ret;
- }
-
- ram_addr = memory_region_get_ram_addr(section->mr) +
- section->offset_within_region;
-
- return vfio_get_dirty_bitmap(bcontainer,
- REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
- int128_get64(section->size), ram_addr, errp);
-}
-
-static void vfio_listener_log_sync(MemoryListener *listener,
- MemoryRegionSection *section)
-{
- VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
- listener);
- int ret;
- Error *local_err = NULL;
-
- if (vfio_listener_skipped_section(section)) {
- return;
- }
-
- if (vfio_devices_all_dirty_tracking(bcontainer)) {
- ret = vfio_sync_dirty_bitmap(bcontainer, section, &local_err);
- if (ret) {
- error_report_err(local_err);
- vfio_set_migration_error(ret);
- }
- }
-}
-
-const MemoryListener vfio_memory_listener = {
- .name = "vfio",
- .region_add = vfio_listener_region_add,
- .region_del = vfio_listener_region_del,
- .log_global_start = vfio_listener_log_global_start,
- .log_global_stop = vfio_listener_log_global_stop,
- .log_sync = vfio_listener_log_sync,
-};
-
-void vfio_reset_handler(void *opaque)
-{
- VFIODevice *vbasedev;
-
- QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
- if (vbasedev->dev->realized) {
- vbasedev->ops->vfio_compute_needs_reset(vbasedev);
- }
- }
-
- QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
- if (vbasedev->dev->realized && vbasedev->needs_reset) {
- vbasedev->ops->vfio_hot_reset_multi(vbasedev);
- }
- }
-}
-
-int vfio_kvm_device_add_fd(int fd, Error **errp)
-{
-#ifdef CONFIG_KVM
- struct kvm_device_attr attr = {
- .group = KVM_DEV_VFIO_FILE,
- .attr = KVM_DEV_VFIO_FILE_ADD,
- .addr = (uint64_t)(unsigned long)&fd,
- };
-
- if (!kvm_enabled()) {
- return 0;
- }
-
- if (vfio_kvm_device_fd < 0) {
- struct kvm_create_device cd = {
- .type = KVM_DEV_TYPE_VFIO,
- };
-
- if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
- error_setg_errno(errp, errno, "Failed to create KVM VFIO device");
- return -errno;
- }
-
- vfio_kvm_device_fd = cd.fd;
- }
-
- if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
- error_setg_errno(errp, errno, "Failed to add fd %d to KVM VFIO device",
- fd);
- return -errno;
- }
-#endif
- return 0;
-}
-
-int vfio_kvm_device_del_fd(int fd, Error **errp)
-{
-#ifdef CONFIG_KVM
- struct kvm_device_attr attr = {
- .group = KVM_DEV_VFIO_FILE,
- .attr = KVM_DEV_VFIO_FILE_DEL,
- .addr = (uint64_t)(unsigned long)&fd,
- };
-
- if (vfio_kvm_device_fd < 0) {
- error_setg(errp, "KVM VFIO device isn't created yet");
- return -EINVAL;
- }
-
- if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
- error_setg_errno(errp, errno,
- "Failed to remove fd %d from KVM VFIO device", fd);
- return -errno;
- }
-#endif
- return 0;
-}
-
-VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
-{
- VFIOAddressSpace *space;
-
- QLIST_FOREACH(space, &vfio_address_spaces, list) {
- if (space->as == as) {
- return space;
- }
- }
-
- /* No suitable VFIOAddressSpace, create a new one */
- space = g_malloc0(sizeof(*space));
- space->as = as;
- QLIST_INIT(&space->containers);
-
- if (QLIST_EMPTY(&vfio_address_spaces)) {
- qemu_register_reset(vfio_reset_handler, NULL);
- }
-
- QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
-
- return space;
-}
-
-void vfio_put_address_space(VFIOAddressSpace *space)
-{
- if (!QLIST_EMPTY(&space->containers)) {
- return;
- }
-
- QLIST_REMOVE(space, list);
- g_free(space);
-
- if (QLIST_EMPTY(&vfio_address_spaces)) {
- qemu_unregister_reset(vfio_reset_handler, NULL);
- }
-}
-
-void vfio_address_space_insert(VFIOAddressSpace *space,
- VFIOContainerBase *bcontainer)
-{
- QLIST_INSERT_HEAD(&space->containers, bcontainer, next);
- bcontainer->space = space;
-}
-
-struct vfio_device_info *vfio_get_device_info(int fd)
-{
- struct vfio_device_info *info;
- uint32_t argsz = sizeof(*info);
-
- info = g_malloc0(argsz);
-
-retry:
- info->argsz = argsz;
-
- if (ioctl(fd, VFIO_DEVICE_GET_INFO, info)) {
- g_free(info);
- return NULL;
- }
-
- if (info->argsz > argsz) {
- argsz = info->argsz;
- info = g_realloc(info, argsz);
- goto retry;
- }
-
- return info;
-}
-
-bool vfio_attach_device(char *name, VFIODevice *vbasedev,
- AddressSpace *as, Error **errp)
-{
- const VFIOIOMMUClass *ops =
- VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_LEGACY));
- HostIOMMUDevice *hiod;
-
- if (vbasedev->iommufd) {
- ops = VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD));
- }
-
- assert(ops);
-
- if (!ops->attach_device(name, vbasedev, as, errp)) {
- return false;
- }
-
- hiod = HOST_IOMMU_DEVICE(object_new(ops->hiod_typename));
- if (!HOST_IOMMU_DEVICE_GET_CLASS(hiod)->realize(hiod, vbasedev, errp)) {
- object_unref(hiod);
- ops->detach_device(vbasedev);
- return false;
- }
- vbasedev->hiod = hiod;
-
- return true;
-}
-
-void vfio_detach_device(VFIODevice *vbasedev)
-{
- if (!vbasedev->bcontainer) {
- return;
- }
- object_unref(vbasedev->hiod);
- VFIO_IOMMU_GET_CLASS(vbasedev->bcontainer)->detach_device(vbasedev);
-}
diff --git a/hw/vfio/container-base.c b/hw/vfio/container-base.c
index 809b157..d834bd4 100644
--- a/hw/vfio/container-base.c
+++ b/hw/vfio/container-base.c
@@ -10,29 +10,87 @@
* SPDX-License-Identifier: GPL-2.0-or-later
*/
+#include <sys/ioctl.h>
+#include <linux/vfio.h>
+
#include "qemu/osdep.h"
+#include "system/tcg.h"
+#include "system/ram_addr.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "hw/vfio/vfio-container-base.h"
+#include "hw/vfio/vfio-device.h" /* vfio_device_reset_handler */
+#include "system/reset.h"
+#include "vfio-helpers.h"
+
+#include "trace.h"
+
+static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
+ QLIST_HEAD_INITIALIZER(vfio_address_spaces);
+
+VFIOAddressSpace *vfio_address_space_get(AddressSpace *as)
+{
+ VFIOAddressSpace *space;
+
+ QLIST_FOREACH(space, &vfio_address_spaces, list) {
+ if (space->as == as) {
+ return space;
+ }
+ }
+
+ /* No suitable VFIOAddressSpace, create a new one */
+ space = g_malloc0(sizeof(*space));
+ space->as = as;
+ QLIST_INIT(&space->containers);
+
+ if (QLIST_EMPTY(&vfio_address_spaces)) {
+ qemu_register_reset(vfio_device_reset_handler, NULL);
+ }
+
+ QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
+
+ return space;
+}
+
+void vfio_address_space_put(VFIOAddressSpace *space)
+{
+ if (!QLIST_EMPTY(&space->containers)) {
+ return;
+ }
+
+ QLIST_REMOVE(space, list);
+ g_free(space);
+
+ if (QLIST_EMPTY(&vfio_address_spaces)) {
+ qemu_unregister_reset(vfio_device_reset_handler, NULL);
+ }
+}
+
+void vfio_address_space_insert(VFIOAddressSpace *space,
+ VFIOContainerBase *bcontainer)
+{
+ QLIST_INSERT_HEAD(&space->containers, bcontainer, next);
+ bcontainer->space = space;
+}
int vfio_container_dma_map(VFIOContainerBase *bcontainer,
hwaddr iova, ram_addr_t size,
- void *vaddr, bool readonly)
+ void *vaddr, bool readonly, MemoryRegion *mr)
{
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
g_assert(vioc->dma_map);
- return vioc->dma_map(bcontainer, iova, size, vaddr, readonly);
+ return vioc->dma_map(bcontainer, iova, size, vaddr, readonly, mr);
}
int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
hwaddr iova, ram_addr_t size,
- IOMMUTLBEntry *iotlb)
+ IOMMUTLBEntry *iotlb, bool unmap_all)
{
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
g_assert(vioc->dma_unmap);
- return vioc->dma_unmap(bcontainer, iova, size, iotlb);
+ return vioc->dma_unmap(bcontainer, iova, size, iotlb, unmap_all);
}
bool vfio_container_add_section_window(VFIOContainerBase *bcontainer,
@@ -64,16 +122,86 @@ int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
bool start, Error **errp)
{
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
+ int ret;
if (!bcontainer->dirty_pages_supported) {
return 0;
}
g_assert(vioc->set_dirty_page_tracking);
- return vioc->set_dirty_page_tracking(bcontainer, start, errp);
+ if (bcontainer->dirty_pages_started == start) {
+ return 0;
+ }
+
+ ret = vioc->set_dirty_page_tracking(bcontainer, start, errp);
+ if (!ret) {
+ bcontainer->dirty_pages_started = start;
+ }
+
+ return ret;
+}
+
+static bool vfio_container_devices_dirty_tracking_is_started(
+ const VFIOContainerBase *bcontainer)
+{
+ VFIODevice *vbasedev;
+
+ QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
+ if (!vbasedev->dirty_tracking) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool vfio_container_dirty_tracking_is_started(
+ const VFIOContainerBase *bcontainer)
+{
+ return vfio_container_devices_dirty_tracking_is_started(bcontainer) ||
+ bcontainer->dirty_pages_started;
+}
+
+bool vfio_container_devices_dirty_tracking_is_supported(
+ const VFIOContainerBase *bcontainer)
+{
+ VFIODevice *vbasedev;
+
+ QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
+ if (vbasedev->device_dirty_page_tracking == ON_OFF_AUTO_OFF) {
+ return false;
+ }
+ if (!vbasedev->dirty_pages_supported) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova,
+ hwaddr size, void *bitmap)
+{
+ uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
+ sizeof(struct vfio_device_feature_dma_logging_report),
+ sizeof(uint64_t))] = {};
+ struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
+ struct vfio_device_feature_dma_logging_report *report =
+ (struct vfio_device_feature_dma_logging_report *)feature->data;
+
+ report->iova = iova;
+ report->length = size;
+ report->page_size = qemu_real_host_page_size();
+ report->bitmap = (uintptr_t)bitmap;
+
+ feature->argsz = sizeof(buf);
+ feature->flags = VFIO_DEVICE_FEATURE_GET |
+ VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT;
+
+ return vbasedev->io_ops->device_feature(vbasedev, feature);
}
-int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
+static int vfio_container_iommu_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp)
{
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
@@ -83,6 +211,74 @@ int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
errp);
}
+static int vfio_container_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp)
+{
+ VFIODevice *vbasedev;
+ int ret;
+
+ QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
+ ret = vfio_device_dma_logging_report(vbasedev, iova, size,
+ vbmap->bitmap);
+ if (ret) {
+ error_setg_errno(errp, -ret,
+ "%s: Failed to get DMA logging report, iova: "
+ "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx,
+ vbasedev->name, iova, size);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova,
+ uint64_t size, ram_addr_t ram_addr, Error **errp)
+{
+ bool all_device_dirty_tracking =
+ vfio_container_devices_dirty_tracking_is_supported(bcontainer);
+ uint64_t dirty_pages;
+ VFIOBitmap vbmap;
+ int ret;
+
+ if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) {
+ cpu_physical_memory_set_dirty_range(ram_addr, size,
+ tcg_enabled() ? DIRTY_CLIENTS_ALL :
+ DIRTY_CLIENTS_NOCODE);
+ return 0;
+ }
+
+ ret = vfio_bitmap_alloc(&vbmap, size);
+ if (ret) {
+ error_setg_errno(errp, -ret,
+ "Failed to allocate dirty tracking bitmap");
+ return ret;
+ }
+
+ if (all_device_dirty_tracking) {
+ ret = vfio_container_devices_query_dirty_bitmap(bcontainer, &vbmap, iova, size,
+ errp);
+ } else {
+ ret = vfio_container_iommu_query_dirty_bitmap(bcontainer, &vbmap, iova, size,
+ errp);
+ }
+
+ if (ret) {
+ goto out;
+ }
+
+ dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
+ vbmap.pages);
+
+ trace_vfio_container_query_dirty_bitmap(iova, size, vbmap.size, ram_addr,
+ dirty_pages);
+out:
+ g_free(vbmap.bitmap);
+
+ return ret;
+}
+
static gpointer copy_iova_range(gconstpointer src, gpointer data)
{
Range *source = (Range *)src;
@@ -103,7 +299,7 @@ static void vfio_container_instance_finalize(Object *obj)
VFIOContainerBase *bcontainer = VFIO_IOMMU(obj);
VFIOGuestIOMMU *giommu, *tmp;
- QLIST_REMOVE(bcontainer, next);
+ QLIST_SAFE_REMOVE(bcontainer, next);
QLIST_FOREACH_SAFE(giommu, &bcontainer->giommu_list, giommu_next, tmp) {
memory_region_unregister_iommu_notifier(
diff --git a/hw/vfio/container.c b/hw/vfio/container.c
index 38a9df3..3e13fea 100644
--- a/hw/vfio/container.c
+++ b/hw/vfio/container.c
@@ -22,18 +22,26 @@
#include <sys/ioctl.h>
#include <linux/vfio.h>
-#include "hw/vfio/vfio-common.h"
-#include "exec/address-spaces.h"
-#include "exec/memory.h"
-#include "exec/ram_addr.h"
+#include "hw/vfio/vfio-device.h"
+#include "system/address-spaces.h"
+#include "system/memory.h"
+#include "system/ram_addr.h"
#include "qemu/error-report.h"
#include "qemu/range.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "trace.h"
#include "qapi/error.h"
+#include "migration/cpr.h"
+#include "migration/blocker.h"
#include "pci.h"
+#include "hw/vfio/vfio-container.h"
+#include "vfio-helpers.h"
+#include "vfio-listener.h"
-VFIOGroupList vfio_group_list =
+#define TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO TYPE_HOST_IOMMU_DEVICE "-legacy-vfio"
+
+typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList;
+static VFIOGroupList vfio_group_list =
QLIST_HEAD_INITIALIZER(vfio_group_list);
static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state)
@@ -112,12 +120,9 @@ unmap_exit:
return ret;
}
-/*
- * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
- */
-static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
- hwaddr iova, ram_addr_t size,
- IOMMUTLBEntry *iotlb)
+static int vfio_legacy_dma_unmap_one(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb)
{
const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
bcontainer);
@@ -131,8 +136,10 @@ static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
int ret;
Error *local_err = NULL;
- if (iotlb && vfio_devices_all_running_and_mig_active(bcontainer)) {
- if (!vfio_devices_all_device_dirty_tracking(bcontainer) &&
+ g_assert(!cpr_is_incoming());
+
+ if (iotlb && vfio_container_dirty_tracking_is_started(bcontainer)) {
+ if (!vfio_container_devices_dirty_tracking_is_supported(bcontainer) &&
bcontainer->dirty_pages_supported) {
return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
}
@@ -159,12 +166,11 @@ static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
unmap.size -= 1ULL << ctz64(bcontainer->pgsizes);
continue;
}
- error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
return -errno;
}
if (need_dirty_sync) {
- ret = vfio_get_dirty_bitmap(bcontainer, iova, size,
+ ret = vfio_container_query_dirty_bitmap(bcontainer, iova, size,
iotlb->translated_addr, &local_err);
if (ret) {
error_report_err(local_err);
@@ -175,8 +181,37 @@ static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
return 0;
}
+/*
+ * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
+ */
+static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb, bool unmap_all)
+{
+ int ret;
+
+ if (unmap_all) {
+ /* The unmap ioctl doesn't accept a full 64-bit span. */
+ Int128 llsize = int128_rshift(int128_2_64(), 1);
+
+ ret = vfio_legacy_dma_unmap_one(bcontainer, 0, int128_get64(llsize),
+ iotlb);
+
+ if (ret == 0) {
+ ret = vfio_legacy_dma_unmap_one(bcontainer, int128_get64(llsize),
+ int128_get64(llsize), iotlb);
+ }
+
+ } else {
+ ret = vfio_legacy_dma_unmap_one(bcontainer, iova, size, iotlb);
+ }
+
+ return ret;
+}
+
static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova,
- ram_addr_t size, void *vaddr, bool readonly)
+ ram_addr_t size, void *vaddr, bool readonly,
+ MemoryRegion *mr)
{
const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
bcontainer);
@@ -199,12 +234,11 @@ static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova,
*/
if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
(errno == EBUSY &&
- vfio_legacy_dma_unmap(bcontainer, iova, size, NULL) == 0 &&
+ vfio_legacy_dma_unmap(bcontainer, iova, size, NULL, false) == 0 &&
ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
return 0;
}
- error_report("VFIO_MAP_DMA failed: %s", strerror(errno));
return -errno;
}
@@ -275,37 +309,6 @@ static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
return ret;
}
-static struct vfio_info_cap_header *
-vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
-{
- if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
- return NULL;
- }
-
- return vfio_get_cap((void *)info, info->cap_offset, id);
-}
-
-bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
- unsigned int *avail)
-{
- struct vfio_info_cap_header *hdr;
- struct vfio_iommu_type1_info_dma_avail *cap;
-
- /* If the capability cannot be found, assume no DMA limiting */
- hdr = vfio_get_iommu_type1_info_cap(info,
- VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL);
- if (!hdr) {
- return false;
- }
-
- if (avail != NULL) {
- cap = (void *) hdr;
- *avail = cap->avail;
- }
-
- return true;
-}
-
static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info,
VFIOContainerBase *bcontainer)
{
@@ -332,7 +335,7 @@ static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info,
return true;
}
-static void vfio_kvm_device_add_group(VFIOGroup *group)
+static void vfio_group_add_kvm_device(VFIOGroup *group)
{
Error *err = NULL;
@@ -341,7 +344,7 @@ static void vfio_kvm_device_add_group(VFIOGroup *group)
}
}
-static void vfio_kvm_device_del_group(VFIOGroup *group)
+static void vfio_group_del_kvm_device(VFIOGroup *group)
{
Error *err = NULL;
@@ -426,7 +429,12 @@ static VFIOContainer *vfio_create_container(int fd, VFIOGroup *group,
return NULL;
}
- if (!vfio_set_iommu(fd, group->fd, &iommu_type, errp)) {
+ /*
+ * During CPR, just set the container type and skip the ioctls, as the
+ * container and group are already configured in the kernel.
+ */
+ if (!cpr_is_incoming() &&
+ !vfio_set_iommu(fd, group->fd, &iommu_type, errp)) {
return NULL;
}
@@ -537,16 +545,10 @@ static bool vfio_legacy_setup(VFIOContainerBase *bcontainer, Error **errp)
return true;
}
-static bool vfio_connect_container(VFIOGroup *group, AddressSpace *as,
- Error **errp)
+static bool vfio_container_attach_discard_disable(VFIOContainer *container,
+ VFIOGroup *group, Error **errp)
{
- VFIOContainer *container;
- VFIOContainerBase *bcontainer;
- int ret, fd;
- VFIOAddressSpace *space;
- VFIOIOMMUClass *vioc;
-
- space = vfio_get_address_space(as);
+ int ret;
/*
* VFIO is currently incompatible with discarding of RAM insofar as the
@@ -579,109 +581,158 @@ static bool vfio_connect_container(VFIOGroup *group, AddressSpace *as,
* details once we know which type of IOMMU we are using.
*/
- QLIST_FOREACH(bcontainer, &space->containers, next) {
- container = container_of(bcontainer, VFIOContainer, bcontainer);
- if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
- ret = vfio_ram_block_discard_disable(container, true);
- if (ret) {
- error_setg_errno(errp, -ret,
- "Cannot set discarding of RAM broken");
- if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER,
- &container->fd)) {
- error_report("vfio: error disconnecting group %d from"
- " container", group->groupid);
- }
- return false;
- }
- group->container = container;
- QLIST_INSERT_HEAD(&container->group_list, group, container_next);
- vfio_kvm_device_add_group(group);
- return true;
+ ret = vfio_ram_block_discard_disable(container, true);
+ if (ret) {
+ error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken");
+ if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
+ error_report("vfio: error disconnecting group %d from"
+ " container", group->groupid);
}
}
+ return !ret;
+}
- fd = qemu_open("/dev/vfio/vfio", O_RDWR, errp);
- if (fd < 0) {
- goto put_space_exit;
+static bool vfio_container_group_add(VFIOContainer *container, VFIOGroup *group,
+ Error **errp)
+{
+ if (!vfio_container_attach_discard_disable(container, group, errp)) {
+ return false;
+ }
+ group->container = container;
+ QLIST_INSERT_HEAD(&container->group_list, group, container_next);
+ vfio_group_add_kvm_device(group);
+ /*
+ * Remember the container fd for each group, so we can attach to the same
+ * container after CPR.
+ */
+ cpr_resave_fd("vfio_container_for_group", group->groupid, container->fd);
+ return true;
+}
+
+static void vfio_container_group_del(VFIOContainer *container, VFIOGroup *group)
+{
+ QLIST_REMOVE(group, container_next);
+ group->container = NULL;
+ vfio_group_del_kvm_device(group);
+ vfio_ram_block_discard_disable(container, false);
+ cpr_delete_fd("vfio_container_for_group", group->groupid);
+}
+
+static bool vfio_container_connect(VFIOGroup *group, AddressSpace *as,
+ Error **errp)
+{
+ VFIOContainer *container;
+ VFIOContainerBase *bcontainer;
+ int ret, fd = -1;
+ VFIOAddressSpace *space;
+ VFIOIOMMUClass *vioc = NULL;
+ bool new_container = false;
+ bool group_was_added = false;
+
+ space = vfio_address_space_get(as);
+ fd = cpr_find_fd("vfio_container_for_group", group->groupid);
+
+ if (!cpr_is_incoming()) {
+ QLIST_FOREACH(bcontainer, &space->containers, next) {
+ container = container_of(bcontainer, VFIOContainer, bcontainer);
+ if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
+ return vfio_container_group_add(container, group, errp);
+ }
+ }
+
+ fd = qemu_open("/dev/vfio/vfio", O_RDWR, errp);
+ if (fd < 0) {
+ goto fail;
+ }
+ } else {
+ /*
+ * For incoming CPR, the group is already attached in the kernel.
+ * If a container with matching fd is found, then update the
+ * userland group list and return. If not, then after the loop,
+ * create the container struct and group list.
+ */
+ QLIST_FOREACH(bcontainer, &space->containers, next) {
+ container = container_of(bcontainer, VFIOContainer, bcontainer);
+
+ if (vfio_cpr_container_match(container, group, fd)) {
+ return vfio_container_group_add(container, group, errp);
+ }
+ }
}
ret = ioctl(fd, VFIO_GET_API_VERSION);
if (ret != VFIO_API_VERSION) {
error_setg(errp, "supported vfio version: %d, "
"reported version: %d", VFIO_API_VERSION, ret);
- goto close_fd_exit;
+ goto fail;
}
container = vfio_create_container(fd, group, errp);
if (!container) {
- goto close_fd_exit;
+ goto fail;
}
+ new_container = true;
bcontainer = &container->bcontainer;
- if (!vfio_cpr_register_container(bcontainer, errp)) {
- goto free_container_exit;
- }
-
- ret = vfio_ram_block_discard_disable(container, true);
- if (ret) {
- error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken");
- goto unregister_container_exit;
+ if (!vfio_legacy_cpr_register_container(container, errp)) {
+ goto fail;
}
vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
assert(vioc->setup);
if (!vioc->setup(bcontainer, errp)) {
- goto enable_discards_exit;
+ goto fail;
}
- vfio_kvm_device_add_group(group);
-
vfio_address_space_insert(space, bcontainer);
- group->container = container;
- QLIST_INSERT_HEAD(&container->group_list, group, container_next);
-
- bcontainer->listener = vfio_memory_listener;
- memory_listener_register(&bcontainer->listener, bcontainer->space->as);
+ if (!vfio_container_group_add(container, group, errp)) {
+ goto fail;
+ }
+ group_was_added = true;
- if (bcontainer->error) {
- error_propagate_prepend(errp, bcontainer->error,
- "memory listener initialization failed: ");
- goto listener_release_exit;
+ /*
+ * If CPR, register the listener later, after all state that may
+ * affect regions and mapping boundaries has been cpr load'ed. Later,
+ * the listener will invoke its callback on each flat section and call
+ * dma_map to supply the new vaddr, and the calls will match the mappings
+ * remembered by the kernel.
+ */
+ if (!cpr_is_incoming()) {
+ if (!vfio_listener_register(bcontainer, errp)) {
+ goto fail;
+ }
}
bcontainer->initialized = true;
return true;
-listener_release_exit:
- QLIST_REMOVE(group, container_next);
- QLIST_REMOVE(bcontainer, next);
- vfio_kvm_device_del_group(group);
- memory_listener_unregister(&bcontainer->listener);
- if (vioc->release) {
- vioc->release(bcontainer);
- }
-
-enable_discards_exit:
- vfio_ram_block_discard_disable(container, false);
-
-unregister_container_exit:
- vfio_cpr_unregister_container(bcontainer);
-free_container_exit:
- object_unref(container);
-
-close_fd_exit:
- close(fd);
+fail:
+ if (new_container) {
+ vfio_listener_unregister(bcontainer);
+ }
-put_space_exit:
- vfio_put_address_space(space);
+ if (group_was_added) {
+ vfio_container_group_del(container, group);
+ }
+ if (vioc && vioc->release) {
+ vioc->release(bcontainer);
+ }
+ if (new_container) {
+ vfio_legacy_cpr_unregister_container(container);
+ object_unref(container);
+ }
+ if (fd >= 0) {
+ close(fd);
+ }
+ vfio_address_space_put(space);
return false;
}
-static void vfio_disconnect_container(VFIOGroup *group)
+static void vfio_container_disconnect(VFIOGroup *group)
{
VFIOContainer *container = group->container;
VFIOContainerBase *bcontainer = &container->bcontainer;
@@ -689,6 +740,7 @@ static void vfio_disconnect_container(VFIOGroup *group)
QLIST_REMOVE(group, container_next);
group->container = NULL;
+ cpr_delete_fd("vfio_container_for_group", group->groupid);
/*
* Explicitly release the listener first before unset container,
@@ -696,7 +748,7 @@ static void vfio_disconnect_container(VFIOGroup *group)
* group.
*/
if (QLIST_EMPTY(&container->group_list)) {
- memory_listener_unregister(&bcontainer->listener);
+ vfio_listener_unregister(bcontainer);
if (vioc->release) {
vioc->release(bcontainer);
}
@@ -710,16 +762,16 @@ static void vfio_disconnect_container(VFIOGroup *group)
if (QLIST_EMPTY(&container->group_list)) {
VFIOAddressSpace *space = bcontainer->space;
- trace_vfio_disconnect_container(container->fd);
- vfio_cpr_unregister_container(bcontainer);
+ trace_vfio_container_disconnect(container->fd);
+ vfio_legacy_cpr_unregister_container(container);
close(container->fd);
object_unref(container);
- vfio_put_address_space(space);
+ vfio_address_space_put(space);
}
}
-static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
+static VFIOGroup *vfio_group_get(int groupid, AddressSpace *as, Error **errp)
{
ERRP_GUARD();
VFIOGroup *group;
@@ -742,7 +794,7 @@ static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
group = g_malloc0(sizeof(*group));
snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
- group->fd = qemu_open(path, O_RDWR, errp);
+ group->fd = cpr_open_fd(path, O_RDWR, "vfio_group", groupid, errp);
if (group->fd < 0) {
goto free_group_exit;
}
@@ -763,7 +815,7 @@ static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
group->groupid = groupid;
QLIST_INIT(&group->device_list);
- if (!vfio_connect_container(group, as, errp)) {
+ if (!vfio_container_connect(group, as, errp)) {
error_prepend(errp, "failed to setup container for group %d: ",
groupid);
goto close_fd_exit;
@@ -774,6 +826,7 @@ static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
return group;
close_fd_exit:
+ cpr_delete_fd("vfio_group", groupid);
close(group->fd);
free_group_exit:
@@ -782,7 +835,7 @@ free_group_exit:
return NULL;
}
-static void vfio_put_group(VFIOGroup *group)
+static void vfio_group_put(VFIOGroup *group)
{
if (!group || !QLIST_EMPTY(&group->device_list)) {
return;
@@ -791,21 +844,22 @@ static void vfio_put_group(VFIOGroup *group)
if (!group->ram_block_discard_allowed) {
vfio_ram_block_discard_disable(group->container, false);
}
- vfio_kvm_device_del_group(group);
- vfio_disconnect_container(group);
+ vfio_group_del_kvm_device(group);
+ vfio_container_disconnect(group);
QLIST_REMOVE(group, next);
- trace_vfio_put_group(group->fd);
+ trace_vfio_group_put(group->fd);
+ cpr_delete_fd("vfio_group", group->groupid);
close(group->fd);
g_free(group);
}
-static bool vfio_get_device(VFIOGroup *group, const char *name,
+static bool vfio_device_get(VFIOGroup *group, const char *name,
VFIODevice *vbasedev, Error **errp)
{
g_autofree struct vfio_device_info *info = NULL;
int fd;
- fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
+ fd = vfio_cpr_group_get_device_fd(group->fd, name);
if (fd < 0) {
error_setg_errno(errp, errno, "error getting device from group %d",
group->groupid);
@@ -818,8 +872,7 @@ static bool vfio_get_device(VFIOGroup *group, const char *name,
info = vfio_get_device_info(fd);
if (!info) {
error_setg_errno(errp, errno, "error getting device info");
- close(fd);
- return false;
+ goto fail;
}
/*
@@ -833,8 +886,7 @@ static bool vfio_get_device(VFIOGroup *group, const char *name,
if (!QLIST_EMPTY(&group->device_list)) {
error_setg(errp, "Inconsistent setting of support for discarding "
"RAM (e.g., balloon) within group");
- close(fd);
- return false;
+ goto fail;
}
if (!group->ram_block_discard_allowed) {
@@ -843,33 +895,35 @@ static bool vfio_get_device(VFIOGroup *group, const char *name,
}
}
+ vfio_device_prepare(vbasedev, &group->container->bcontainer, info);
+
vbasedev->fd = fd;
vbasedev->group = group;
QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
- vbasedev->num_irqs = info->num_irqs;
- vbasedev->num_regions = info->num_regions;
- vbasedev->flags = info->flags;
-
- trace_vfio_get_device(name, info->flags, info->num_regions, info->num_irqs);
-
- vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET);
+ trace_vfio_device_get(name, info->flags, info->num_regions, info->num_irqs);
return true;
+
+fail:
+ close(fd);
+ cpr_delete_fd(name, 0);
+ return false;
}
-static void vfio_put_base_device(VFIODevice *vbasedev)
+static void vfio_device_put(VFIODevice *vbasedev)
{
if (!vbasedev->group) {
return;
}
QLIST_REMOVE(vbasedev, next);
vbasedev->group = NULL;
- trace_vfio_put_base_device(vbasedev->fd);
+ trace_vfio_device_put(vbasedev->fd);
+ cpr_delete_fd(vbasedev->name, 0);
close(vbasedev->fd);
}
-static int vfio_device_groupid(VFIODevice *vbasedev, Error **errp)
+static int vfio_device_get_groupid(VFIODevice *vbasedev, Error **errp)
{
char *tmp, group_path[PATH_MAX];
g_autofree char *group_name = NULL;
@@ -897,25 +951,24 @@ static int vfio_device_groupid(VFIODevice *vbasedev, Error **errp)
}
/*
- * vfio_attach_device: attach a device to a security context
+ * vfio_device_attach: attach a device to a security context
* @name and @vbasedev->name are likely to be different depending
* on the type of the device, hence the need for passing @name
*/
static bool vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev,
AddressSpace *as, Error **errp)
{
- int groupid = vfio_device_groupid(vbasedev, errp);
+ int groupid = vfio_device_get_groupid(vbasedev, errp);
VFIODevice *vbasedev_iter;
VFIOGroup *group;
- VFIOContainerBase *bcontainer;
if (groupid < 0) {
return false;
}
- trace_vfio_attach_device(vbasedev->name, groupid);
+ trace_vfio_device_attach(vbasedev->name, groupid);
- group = vfio_get_group(groupid, as, errp);
+ group = vfio_group_get(groupid, as, errp);
if (!group) {
return false;
}
@@ -923,33 +976,51 @@ static bool vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev,
QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) {
error_setg(errp, "device is already attached");
- vfio_put_group(group);
- return false;
+ goto group_put_exit;
}
}
- if (!vfio_get_device(group, name, vbasedev, errp)) {
- vfio_put_group(group);
- return false;
+ if (!vfio_device_get(group, name, vbasedev, errp)) {
+ goto group_put_exit;
+ }
+
+ if (!vfio_device_hiod_create_and_realize(vbasedev,
+ TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO,
+ errp)) {
+ goto device_put_exit;
}
- bcontainer = &group->container->bcontainer;
- vbasedev->bcontainer = bcontainer;
- QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next);
- QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next);
+ if (vbasedev->mdev) {
+ error_setg(&vbasedev->cpr.mdev_blocker,
+ "CPR does not support vfio mdev %s", vbasedev->name);
+ if (migrate_add_blocker_modes(&vbasedev->cpr.mdev_blocker, errp,
+ MIG_MODE_CPR_TRANSFER, -1) < 0) {
+ goto hiod_unref_exit;
+ }
+ }
return true;
+
+hiod_unref_exit:
+ object_unref(vbasedev->hiod);
+device_put_exit:
+ vfio_device_put(vbasedev);
+group_put_exit:
+ vfio_group_put(group);
+ return false;
}
static void vfio_legacy_detach_device(VFIODevice *vbasedev)
{
VFIOGroup *group = vbasedev->group;
- QLIST_REMOVE(vbasedev, global_next);
- QLIST_REMOVE(vbasedev, container_next);
- vbasedev->bcontainer = NULL;
- trace_vfio_detach_device(vbasedev->name, group->groupid);
- vfio_put_base_device(vbasedev);
- vfio_put_group(group);
+ trace_vfio_device_detach(vbasedev->name, group->groupid);
+
+ vfio_device_unprepare(vbasedev);
+
+ migrate_del_blocker(&vbasedev->cpr.mdev_blocker);
+ object_unref(vbasedev->hiod);
+ vfio_device_put(vbasedev);
+ vfio_group_put(group);
}
static int vfio_legacy_pci_hot_reset(VFIODevice *vbasedev, bool single)
@@ -1120,12 +1191,10 @@ out_single:
return ret;
}
-static void vfio_iommu_legacy_class_init(ObjectClass *klass, void *data)
+static void vfio_iommu_legacy_class_init(ObjectClass *klass, const void *data)
{
VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass);
- vioc->hiod_typename = TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO;
-
vioc->setup = vfio_legacy_setup;
vioc->dma_map = vfio_legacy_dma_map;
vioc->dma_unmap = vfio_legacy_dma_unmap;
@@ -1142,7 +1211,6 @@ static bool hiod_legacy_vfio_realize(HostIOMMUDevice *hiod, void *opaque,
VFIODevice *vdev = opaque;
hiod->name = g_strdup(vdev->name);
- hiod->caps.aw_bits = vfio_device_get_aw_bits(vdev);
hiod->agent = opaque;
return true;
@@ -1151,11 +1219,9 @@ static bool hiod_legacy_vfio_realize(HostIOMMUDevice *hiod, void *opaque,
static int hiod_legacy_vfio_get_cap(HostIOMMUDevice *hiod, int cap,
Error **errp)
{
- HostIOMMUDeviceCaps *caps = &hiod->caps;
-
switch (cap) {
case HOST_IOMMU_DEVICE_CAP_AW_BITS:
- return caps->aw_bits;
+ return vfio_device_get_aw_bits(hiod->agent);
default:
error_setg(errp, "%s: unsupported capability %x", hiod->name, cap);
return -EINVAL;
@@ -1187,7 +1253,7 @@ static void vfio_iommu_legacy_instance_init(Object *obj)
QLIST_INIT(&container->group_list);
}
-static void hiod_legacy_vfio_class_init(ObjectClass *oc, void *data)
+static void hiod_legacy_vfio_class_init(ObjectClass *oc, const void *data)
{
HostIOMMUDeviceClass *hioc = HOST_IOMMU_DEVICE_CLASS(oc);
diff --git a/hw/vfio/cpr-legacy.c b/hw/vfio/cpr-legacy.c
new file mode 100644
index 0000000..a84c324
--- /dev/null
+++ b/hw/vfio/cpr-legacy.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2021-2025 Oracle and/or its affiliates.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <sys/ioctl.h>
+#include <linux/vfio.h>
+#include "qemu/osdep.h"
+#include "hw/vfio/vfio-container.h"
+#include "hw/vfio/vfio-device.h"
+#include "hw/vfio/vfio-listener.h"
+#include "migration/blocker.h"
+#include "migration/cpr.h"
+#include "migration/migration.h"
+#include "migration/vmstate.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+
+static bool vfio_dma_unmap_vaddr_all(VFIOContainer *container, Error **errp)
+{
+ struct vfio_iommu_type1_dma_unmap unmap = {
+ .argsz = sizeof(unmap),
+ .flags = VFIO_DMA_UNMAP_FLAG_VADDR | VFIO_DMA_UNMAP_FLAG_ALL,
+ .iova = 0,
+ .size = 0,
+ };
+ if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
+ error_setg_errno(errp, errno, "vfio_dma_unmap_vaddr_all");
+ return false;
+ }
+ container->cpr.vaddr_unmapped = true;
+ return true;
+}
+
+/*
+ * Set the new @vaddr for any mappings registered during cpr load.
+ * The incoming state is cleared thereafter.
+ */
+static int vfio_legacy_cpr_dma_map(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size, void *vaddr,
+ bool readonly, MemoryRegion *mr)
+{
+ const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
+ bcontainer);
+ struct vfio_iommu_type1_dma_map map = {
+ .argsz = sizeof(map),
+ .flags = VFIO_DMA_MAP_FLAG_VADDR,
+ .vaddr = (__u64)(uintptr_t)vaddr,
+ .iova = iova,
+ .size = size,
+ };
+
+ g_assert(cpr_is_incoming());
+
+ if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map)) {
+ return -errno;
+ }
+
+ return 0;
+}
+
+static void vfio_region_remap(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer,
+ cpr.remap_listener);
+ vfio_container_region_add(&container->bcontainer, section, true);
+}
+
+static bool vfio_cpr_supported(VFIOContainer *container, Error **errp)
+{
+ if (!ioctl(container->fd, VFIO_CHECK_EXTENSION, VFIO_UPDATE_VADDR)) {
+ error_setg(errp, "VFIO container does not support VFIO_UPDATE_VADDR");
+ return false;
+
+ } else if (!ioctl(container->fd, VFIO_CHECK_EXTENSION, VFIO_UNMAP_ALL)) {
+ error_setg(errp, "VFIO container does not support VFIO_UNMAP_ALL");
+ return false;
+
+ } else {
+ return true;
+ }
+}
+
+static int vfio_container_pre_save(void *opaque)
+{
+ VFIOContainer *container = opaque;
+ Error *local_err = NULL;
+
+ if (!vfio_dma_unmap_vaddr_all(container, &local_err)) {
+ error_report_err(local_err);
+ return -1;
+ }
+ return 0;
+}
+
+static int vfio_container_post_load(void *opaque, int version_id)
+{
+ VFIOContainer *container = opaque;
+ VFIOContainerBase *bcontainer = &container->bcontainer;
+ VFIOGroup *group;
+ Error *local_err = NULL;
+
+ if (!vfio_listener_register(bcontainer, &local_err)) {
+ error_report_err(local_err);
+ return -1;
+ }
+
+ QLIST_FOREACH(group, &container->group_list, container_next) {
+ VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
+
+ /* Restore original dma_map function */
+ vioc->dma_map = container->cpr.saved_dma_map;
+ }
+ return 0;
+}
+
+static const VMStateDescription vfio_container_vmstate = {
+ .name = "vfio-container",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .priority = MIG_PRI_LOW, /* Must happen after devices and groups */
+ .pre_save = vfio_container_pre_save,
+ .post_load = vfio_container_post_load,
+ .needed = cpr_incoming_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int vfio_cpr_fail_notifier(NotifierWithReturn *notifier,
+ MigrationEvent *e, Error **errp)
+{
+ VFIOContainer *container =
+ container_of(notifier, VFIOContainer, cpr.transfer_notifier);
+ VFIOContainerBase *bcontainer = &container->bcontainer;
+
+ if (e->type != MIG_EVENT_PRECOPY_FAILED) {
+ return 0;
+ }
+
+ if (container->cpr.vaddr_unmapped) {
+ /*
+ * Force a call to vfio_region_remap for each mapped section by
+ * temporarily registering a listener, and temporarily diverting
+ * dma_map to vfio_legacy_cpr_dma_map. The latter restores vaddr.
+ */
+
+ VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
+ vioc->dma_map = vfio_legacy_cpr_dma_map;
+
+ container->cpr.remap_listener = (MemoryListener) {
+ .name = "vfio cpr recover",
+ .region_add = vfio_region_remap
+ };
+ memory_listener_register(&container->cpr.remap_listener,
+ bcontainer->space->as);
+ memory_listener_unregister(&container->cpr.remap_listener);
+ container->cpr.vaddr_unmapped = false;
+ vioc->dma_map = container->cpr.saved_dma_map;
+ }
+ return 0;
+}
+
+bool vfio_legacy_cpr_register_container(VFIOContainer *container, Error **errp)
+{
+ VFIOContainerBase *bcontainer = &container->bcontainer;
+ Error **cpr_blocker = &container->cpr.blocker;
+
+ migration_add_notifier_mode(&bcontainer->cpr_reboot_notifier,
+ vfio_cpr_reboot_notifier,
+ MIG_MODE_CPR_REBOOT);
+
+ if (!vfio_cpr_supported(container, cpr_blocker)) {
+ return migrate_add_blocker_modes(cpr_blocker, errp,
+ MIG_MODE_CPR_TRANSFER, -1) == 0;
+ }
+
+ vmstate_register(NULL, -1, &vfio_container_vmstate, container);
+
+ /* During incoming CPR, divert calls to dma_map. */
+ if (cpr_is_incoming()) {
+ VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
+ container->cpr.saved_dma_map = vioc->dma_map;
+ vioc->dma_map = vfio_legacy_cpr_dma_map;
+ }
+
+ migration_add_notifier_mode(&container->cpr.transfer_notifier,
+ vfio_cpr_fail_notifier,
+ MIG_MODE_CPR_TRANSFER);
+ return true;
+}
+
+void vfio_legacy_cpr_unregister_container(VFIOContainer *container)
+{
+ VFIOContainerBase *bcontainer = &container->bcontainer;
+
+ migration_remove_notifier(&bcontainer->cpr_reboot_notifier);
+ migrate_del_blocker(&container->cpr.blocker);
+ vmstate_unregister(NULL, &vfio_container_vmstate, container);
+ migration_remove_notifier(&container->cpr.transfer_notifier);
+}
+
+/*
+ * In old QEMU, VFIO_DMA_UNMAP_FLAG_VADDR may fail on some mapping after
+ * succeeding for others, so the latter have lost their vaddr. Call this
+ * to restore vaddr for a section with a giommu.
+ *
+ * The giommu already exists. Find it and replay it, which calls
+ * vfio_legacy_cpr_dma_map further down the stack.
+ */
+void vfio_cpr_giommu_remap(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section)
+{
+ VFIOGuestIOMMU *giommu = NULL;
+ hwaddr as_offset = section->offset_within_address_space;
+ hwaddr iommu_offset = as_offset - section->offset_within_region;
+
+ QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) {
+ if (giommu->iommu_mr == IOMMU_MEMORY_REGION(section->mr) &&
+ giommu->iommu_offset == iommu_offset) {
+ break;
+ }
+ }
+ g_assert(giommu);
+ memory_region_iommu_replay(giommu->iommu_mr, &giommu->n);
+}
+
+/*
+ * In old QEMU, VFIO_DMA_UNMAP_FLAG_VADDR may fail on some mapping after
+ * succeeding for others, so the latter have lost their vaddr. Call this
+ * to restore vaddr for a section with a RamDiscardManager.
+ *
+ * The ram discard listener already exists. Call its populate function
+ * directly, which calls vfio_legacy_cpr_dma_map.
+ */
+bool vfio_cpr_ram_discard_register_listener(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section)
+{
+ VFIORamDiscardListener *vrdl =
+ vfio_find_ram_discard_listener(bcontainer, section);
+
+ g_assert(vrdl);
+ return vrdl->listener.notify_populate(&vrdl->listener, section) == 0;
+}
+
+int vfio_cpr_group_get_device_fd(int d, const char *name)
+{
+ const int id = 0;
+ int fd = cpr_find_fd(name, id);
+
+ if (fd < 0) {
+ fd = ioctl(d, VFIO_GROUP_GET_DEVICE_FD, name);
+ if (fd >= 0) {
+ cpr_save_fd(name, id, fd);
+ }
+ }
+ return fd;
+}
+
+static bool same_device(int fd1, int fd2)
+{
+ struct stat st1, st2;
+
+ return !fstat(fd1, &st1) && !fstat(fd2, &st2) && st1.st_dev == st2.st_dev;
+}
+
+bool vfio_cpr_container_match(VFIOContainer *container, VFIOGroup *group,
+ int fd)
+{
+ if (container->fd == fd) {
+ return true;
+ }
+ if (!same_device(container->fd, fd)) {
+ return false;
+ }
+ /*
+ * Same device, different fd. This occurs when the container fd is
+ * cpr_save'd multiple times, once for each groupid, so SCM_RIGHTS
+ * produces duplicates. De-dup it.
+ */
+ cpr_delete_fd("vfio_container_for_group", group->groupid);
+ close(fd);
+ cpr_save_fd("vfio_container_for_group", group->groupid, container->fd);
+ return true;
+}
diff --git a/hw/vfio/cpr.c b/hw/vfio/cpr.c
index 87e51fc..fdbb58e 100644
--- a/hw/vfio/cpr.c
+++ b/hw/vfio/cpr.c
@@ -6,13 +6,15 @@
*/
#include "qemu/osdep.h"
-#include "hw/vfio/vfio-common.h"
-#include "migration/misc.h"
+#include "hw/vfio/vfio-device.h"
+#include "hw/vfio/vfio-cpr.h"
+#include "hw/vfio/pci.h"
+#include "migration/cpr.h"
#include "qapi/error.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
-static int vfio_cpr_reboot_notifier(NotifierWithReturn *notifier,
- MigrationEvent *e, Error **errp)
+int vfio_cpr_reboot_notifier(NotifierWithReturn *notifier,
+ MigrationEvent *e, Error **errp)
{
if (e->type == MIG_EVENT_PRECOPY_SETUP &&
!runstate_check(RUN_STATE_SUSPENDED) && !vm_get_suspended()) {
@@ -37,3 +39,32 @@ void vfio_cpr_unregister_container(VFIOContainerBase *bcontainer)
{
migration_remove_notifier(&bcontainer->cpr_reboot_notifier);
}
+
+/*
+ * The kernel may change non-emulated config bits. Exclude them from the
+ * changed-bits check in get_pci_config_device.
+ */
+static int vfio_cpr_pci_pre_load(void *opaque)
+{
+ VFIOPCIDevice *vdev = opaque;
+ PCIDevice *pdev = &vdev->pdev;
+ int size = MIN(pci_config_size(pdev), vdev->config_size);
+ int i;
+
+ for (i = 0; i < size; i++) {
+ pdev->cmask[i] &= vdev->emulated_config_bits[i];
+ }
+
+ return 0;
+}
+
+const VMStateDescription vfio_cpr_pci_vmstate = {
+ .name = "vfio-cpr-pci",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .pre_load = vfio_cpr_pci_pre_load,
+ .needed = cpr_incoming_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_END_OF_LIST()
+ }
+};
diff --git a/hw/vfio/device.c b/hw/vfio/device.c
new file mode 100644
index 0000000..d91c695
--- /dev/null
+++ b/hw/vfio/device.c
@@ -0,0 +1,576 @@
+/*
+ * VFIO device
+ *
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Authors:
+ * Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Based on qemu-kvm device-assignment:
+ * Adapted for KVM by Qumranet.
+ * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
+ * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
+ * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
+ * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
+ * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+
+#include "hw/vfio/vfio-device.h"
+#include "hw/vfio/pci.h"
+#include "hw/hw.h"
+#include "trace.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "qemu/units.h"
+#include "monitor/monitor.h"
+#include "vfio-helpers.h"
+
+VFIODeviceList vfio_device_list =
+ QLIST_HEAD_INITIALIZER(vfio_device_list);
+
+/*
+ * We want to differentiate hot reset of multiple in-use devices vs
+ * hot reset of a single in-use device. VFIO_DEVICE_RESET will already
+ * handle the case of doing hot resets when there is only a single
+ * device per bus. The in-use here refers to how many VFIODevices are
+ * affected. A hot reset that affects multiple devices, but only a
+ * single in-use device, means that we can call it from our bus
+ * ->reset() callback since the extent is effectively a single
+ * device. This allows us to make use of it in the hotplug path. When
+ * there are multiple in-use devices, we can only trigger the hot
+ * reset during a system reset and thus from our reset handler. We
+ * separate _one vs _multi here so that we don't overlap and do a
+ * double reset on the system reset path where both our reset handler
+ * and ->reset() callback are used. Calling _one() will only do a hot
+ * reset for the one in-use devices case, calling _multi() will do
+ * nothing if a _one() would have been sufficient.
+ */
+void vfio_device_reset_handler(void *opaque)
+{
+ VFIODevice *vbasedev;
+
+ trace_vfio_device_reset_handler();
+ QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
+ if (vbasedev->dev->realized) {
+ vbasedev->ops->vfio_compute_needs_reset(vbasedev);
+ }
+ }
+
+ QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
+ if (vbasedev->dev->realized && vbasedev->needs_reset) {
+ vbasedev->ops->vfio_hot_reset_multi(vbasedev);
+ }
+ }
+}
+
+/*
+ * Common VFIO interrupt disable
+ */
+void vfio_device_irq_disable(VFIODevice *vbasedev, int index)
+{
+ struct vfio_irq_set irq_set = {
+ .argsz = sizeof(irq_set),
+ .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
+ .index = index,
+ .start = 0,
+ .count = 0,
+ };
+
+ vbasedev->io_ops->set_irqs(vbasedev, &irq_set);
+}
+
+void vfio_device_irq_unmask(VFIODevice *vbasedev, int index)
+{
+ struct vfio_irq_set irq_set = {
+ .argsz = sizeof(irq_set),
+ .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
+ .index = index,
+ .start = 0,
+ .count = 1,
+ };
+
+ vbasedev->io_ops->set_irqs(vbasedev, &irq_set);
+}
+
+void vfio_device_irq_mask(VFIODevice *vbasedev, int index)
+{
+ struct vfio_irq_set irq_set = {
+ .argsz = sizeof(irq_set),
+ .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
+ .index = index,
+ .start = 0,
+ .count = 1,
+ };
+
+ vbasedev->io_ops->set_irqs(vbasedev, &irq_set);
+}
+
+static inline const char *action_to_str(int action)
+{
+ switch (action) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ return "MASK";
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ return "UNMASK";
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ return "TRIGGER";
+ default:
+ return "UNKNOWN ACTION";
+ }
+}
+
+static const char *index_to_str(VFIODevice *vbasedev, int index)
+{
+ if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
+ return NULL;
+ }
+
+ switch (index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ return "INTX";
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ return "MSI";
+ case VFIO_PCI_MSIX_IRQ_INDEX:
+ return "MSIX";
+ case VFIO_PCI_ERR_IRQ_INDEX:
+ return "ERR";
+ case VFIO_PCI_REQ_IRQ_INDEX:
+ return "REQ";
+ default:
+ return NULL;
+ }
+}
+
+bool vfio_device_irq_set_signaling(VFIODevice *vbasedev, int index, int subindex,
+ int action, int fd, Error **errp)
+{
+ ERRP_GUARD();
+ g_autofree struct vfio_irq_set *irq_set = NULL;
+ int argsz;
+ const char *name;
+ int32_t *pfd;
+
+ argsz = sizeof(*irq_set) + sizeof(*pfd);
+
+ irq_set = g_malloc0(argsz);
+ irq_set->argsz = argsz;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action;
+ irq_set->index = index;
+ irq_set->start = subindex;
+ irq_set->count = 1;
+ pfd = (int32_t *)&irq_set->data;
+ *pfd = fd;
+
+ if (!vbasedev->io_ops->set_irqs(vbasedev, irq_set)) {
+ return true;
+ }
+
+ error_setg_errno(errp, errno, "VFIO_DEVICE_SET_IRQS failure");
+
+ name = index_to_str(vbasedev, index);
+ if (name) {
+ error_prepend(errp, "%s-%d: ", name, subindex);
+ } else {
+ error_prepend(errp, "index %d-%d: ", index, subindex);
+ }
+ error_prepend(errp,
+ "Failed to %s %s eventfd signaling for interrupt ",
+ fd < 0 ? "tear down" : "set up", action_to_str(action));
+ return false;
+}
+
+int vfio_device_get_irq_info(VFIODevice *vbasedev, int index,
+ struct vfio_irq_info *info)
+{
+ memset(info, 0, sizeof(*info));
+
+ info->argsz = sizeof(*info);
+ info->index = index;
+
+ return vbasedev->io_ops->get_irq_info(vbasedev, info);
+}
+
+int vfio_device_get_region_info(VFIODevice *vbasedev, int index,
+ struct vfio_region_info **info)
+{
+ size_t argsz = sizeof(struct vfio_region_info);
+ int fd = -1;
+ int ret;
+
+ /* check cache */
+ if (vbasedev->reginfo[index] != NULL) {
+ *info = vbasedev->reginfo[index];
+ return 0;
+ }
+
+ *info = g_malloc0(argsz);
+
+ (*info)->index = index;
+retry:
+ (*info)->argsz = argsz;
+
+ ret = vbasedev->io_ops->get_region_info(vbasedev, *info, &fd);
+ if (ret != 0) {
+ g_free(*info);
+ *info = NULL;
+ return ret;
+ }
+
+ if ((*info)->argsz > argsz) {
+ argsz = (*info)->argsz;
+ *info = g_realloc(*info, argsz);
+
+ if (fd != -1) {
+ close(fd);
+ fd = -1;
+ }
+
+ goto retry;
+ }
+
+ /* fill cache */
+ vbasedev->reginfo[index] = *info;
+ if (vbasedev->region_fds != NULL) {
+ vbasedev->region_fds[index] = fd;
+ }
+
+ return 0;
+}
+
+int vfio_device_get_region_fd(VFIODevice *vbasedev, int index)
+{
+ return vbasedev->region_fds ?
+ vbasedev->region_fds[index] :
+ vbasedev->fd;
+}
+
+int vfio_device_get_region_info_type(VFIODevice *vbasedev, uint32_t type,
+ uint32_t subtype, struct vfio_region_info **info)
+{
+ int i;
+
+ for (i = 0; i < vbasedev->num_regions; i++) {
+ struct vfio_info_cap_header *hdr;
+ struct vfio_region_info_cap_type *cap_type;
+
+ if (vfio_device_get_region_info(vbasedev, i, info)) {
+ continue;
+ }
+
+ hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
+ if (!hdr) {
+ continue;
+ }
+
+ cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
+
+ trace_vfio_device_get_region_info_type(vbasedev->name, i,
+ cap_type->type, cap_type->subtype);
+
+ if (cap_type->type == type && cap_type->subtype == subtype) {
+ return 0;
+ }
+ }
+
+ *info = NULL;
+ return -ENODEV;
+}
+
+bool vfio_device_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
+{
+ struct vfio_region_info *info = NULL;
+ bool ret = false;
+
+ if (!vfio_device_get_region_info(vbasedev, region, &info)) {
+ if (vfio_get_region_info_cap(info, cap_type)) {
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp)
+{
+ ERRP_GUARD();
+ struct stat st;
+
+ if (vbasedev->fd < 0) {
+ if (stat(vbasedev->sysfsdev, &st) < 0) {
+ error_setg_errno(errp, errno, "no such host device");
+ error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev);
+ return false;
+ }
+ /* User may specify a name, e.g: VFIO platform device */
+ if (!vbasedev->name) {
+ vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
+ }
+ } else {
+ if (!vbasedev->iommufd) {
+ error_setg(errp, "Use FD passing only with iommufd backend");
+ return false;
+ }
+ /*
+ * Give a name with fd so any function printing out vbasedev->name
+ * will not break.
+ */
+ if (!vbasedev->name) {
+ vbasedev->name = g_strdup_printf("VFIO_FD%d", vbasedev->fd);
+ }
+ }
+
+ return true;
+}
+
+void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp)
+{
+ ERRP_GUARD();
+ int fd = monitor_fd_param(monitor_cur(), str, errp);
+
+ if (fd < 0) {
+ error_prepend(errp, "Could not parse remote object fd %s:", str);
+ return;
+ }
+ vbasedev->fd = fd;
+}
+
+static VFIODeviceIOOps vfio_device_io_ops_ioctl;
+
+void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops,
+ DeviceState *dev, bool ram_discard)
+{
+ vbasedev->type = type;
+ vbasedev->ops = ops;
+ vbasedev->io_ops = &vfio_device_io_ops_ioctl;
+ vbasedev->dev = dev;
+ vbasedev->fd = -1;
+ vbasedev->use_region_fds = false;
+
+ vbasedev->ram_block_discard_allowed = ram_discard;
+}
+
+int vfio_device_get_aw_bits(VFIODevice *vdev)
+{
+ /*
+ * iova_ranges is a sorted list. For old kernels that support
+ * VFIO but not support query of iova ranges, iova_ranges is NULL,
+ * in this case HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX(64) is returned.
+ */
+ GList *l = g_list_last(vdev->bcontainer->iova_ranges);
+
+ if (l) {
+ Range *range = l->data;
+ return range_get_last_bit(range) + 1;
+ }
+
+ return HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX;
+}
+
+bool vfio_device_is_mdev(VFIODevice *vbasedev)
+{
+ g_autofree char *subsys = NULL;
+ g_autofree char *tmp = NULL;
+
+ if (!vbasedev->sysfsdev) {
+ return false;
+ }
+
+ tmp = g_strdup_printf("%s/subsystem", vbasedev->sysfsdev);
+ subsys = realpath(tmp, NULL);
+ return subsys && (strcmp(subsys, "/sys/bus/mdev") == 0);
+}
+
+bool vfio_device_hiod_create_and_realize(VFIODevice *vbasedev,
+ const char *typename, Error **errp)
+{
+ HostIOMMUDevice *hiod;
+
+ if (vbasedev->mdev) {
+ return true;
+ }
+
+ hiod = HOST_IOMMU_DEVICE(object_new(typename));
+
+ if (!HOST_IOMMU_DEVICE_GET_CLASS(hiod)->realize(hiod, vbasedev, errp)) {
+ object_unref(hiod);
+ return false;
+ }
+
+ vbasedev->hiod = hiod;
+ return true;
+}
+
+VFIODevice *vfio_get_vfio_device(Object *obj)
+{
+ if (object_dynamic_cast(obj, TYPE_VFIO_PCI)) {
+ return &VFIO_PCI_BASE(obj)->vbasedev;
+ } else {
+ return NULL;
+ }
+}
+
+bool vfio_device_attach_by_iommu_type(const char *iommu_type, char *name,
+ VFIODevice *vbasedev, AddressSpace *as,
+ Error **errp)
+{
+ const VFIOIOMMUClass *ops =
+ VFIO_IOMMU_CLASS(object_class_by_name(iommu_type));
+
+ assert(ops);
+
+ return ops->attach_device(name, vbasedev, as, errp);
+}
+
+bool vfio_device_attach(char *name, VFIODevice *vbasedev,
+ AddressSpace *as, Error **errp)
+{
+ const char *iommu_type = vbasedev->iommufd ?
+ TYPE_VFIO_IOMMU_IOMMUFD :
+ TYPE_VFIO_IOMMU_LEGACY;
+
+ return vfio_device_attach_by_iommu_type(iommu_type, name, vbasedev,
+ as, errp);
+}
+
+void vfio_device_detach(VFIODevice *vbasedev)
+{
+ if (!vbasedev->bcontainer) {
+ return;
+ }
+ VFIO_IOMMU_GET_CLASS(vbasedev->bcontainer)->detach_device(vbasedev);
+}
+
+void vfio_device_prepare(VFIODevice *vbasedev, VFIOContainerBase *bcontainer,
+ struct vfio_device_info *info)
+{
+ vbasedev->num_irqs = info->num_irqs;
+ vbasedev->num_regions = info->num_regions;
+ vbasedev->flags = info->flags;
+ vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET);
+
+ vbasedev->bcontainer = bcontainer;
+ QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next);
+
+ QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next);
+
+ vbasedev->reginfo = g_new0(struct vfio_region_info *,
+ vbasedev->num_regions);
+ if (vbasedev->use_region_fds) {
+ vbasedev->region_fds = g_new0(int, vbasedev->num_regions);
+ }
+}
+
+void vfio_device_unprepare(VFIODevice *vbasedev)
+{
+ int i;
+
+ for (i = 0; i < vbasedev->num_regions; i++) {
+ g_free(vbasedev->reginfo[i]);
+ if (vbasedev->region_fds != NULL && vbasedev->region_fds[i] != -1) {
+ close(vbasedev->region_fds[i]);
+ }
+
+ }
+
+ g_clear_pointer(&vbasedev->reginfo, g_free);
+ g_clear_pointer(&vbasedev->region_fds, g_free);
+
+ QLIST_REMOVE(vbasedev, container_next);
+ QLIST_REMOVE(vbasedev, global_next);
+ vbasedev->bcontainer = NULL;
+}
+
+/*
+ * Traditional ioctl() based io
+ */
+
+static int vfio_device_io_device_feature(VFIODevice *vbasedev,
+ struct vfio_device_feature *feature)
+{
+ int ret;
+
+ ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
+
+ return ret < 0 ? -errno : ret;
+}
+
+static int vfio_device_io_get_region_info(VFIODevice *vbasedev,
+ struct vfio_region_info *info,
+ int *fd)
+{
+ int ret;
+
+ *fd = -1;
+
+ ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, info);
+
+ return ret < 0 ? -errno : ret;
+}
+
+static int vfio_device_io_get_irq_info(VFIODevice *vbasedev,
+ struct vfio_irq_info *info)
+{
+ int ret;
+
+ ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, info);
+
+ return ret < 0 ? -errno : ret;
+}
+
+static int vfio_device_io_set_irqs(VFIODevice *vbasedev,
+ struct vfio_irq_set *irqs)
+{
+ int ret;
+
+ ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irqs);
+
+ return ret < 0 ? -errno : ret;
+}
+
+static int vfio_device_io_region_read(VFIODevice *vbasedev, uint8_t index,
+ off_t off, uint32_t size, void *data)
+{
+ struct vfio_region_info *info;
+ int ret;
+
+ ret = vfio_device_get_region_info(vbasedev, index, &info);
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = pread(vbasedev->fd, data, size, info->offset + off);
+
+ return ret < 0 ? -errno : ret;
+}
+
+static int vfio_device_io_region_write(VFIODevice *vbasedev, uint8_t index,
+ off_t off, uint32_t size, void *data,
+ bool post)
+{
+ struct vfio_region_info *info;
+ int ret;
+
+ ret = vfio_device_get_region_info(vbasedev, index, &info);
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = pwrite(vbasedev->fd, data, size, info->offset + off);
+
+ return ret < 0 ? -errno : ret;
+}
+
+static VFIODeviceIOOps vfio_device_io_ops_ioctl = {
+ .device_feature = vfio_device_io_device_feature,
+ .get_region_info = vfio_device_io_get_region_info,
+ .get_irq_info = vfio_device_io_get_irq_info,
+ .set_irqs = vfio_device_io_set_irqs,
+ .region_read = vfio_device_io_region_read,
+ .region_write = vfio_device_io_region_write,
+};
diff --git a/hw/vfio/display.c b/hw/vfio/display.c
index ea87830..9c6f5aa 100644
--- a/hw/vfio/display.c
+++ b/hw/vfio/display.c
@@ -16,9 +16,9 @@
#include "qemu/error-report.h"
#include "hw/display/edid.h"
-#include "ui/console.h"
#include "qapi/error.h"
#include "pci.h"
+#include "vfio-display.h"
#include "trace.h"
#ifndef DRM_PLANE_TYPE_PRIMARY
@@ -104,7 +104,6 @@ static void vfio_display_edid_update(VFIOPCIDevice *vdev, bool enabled,
err:
trace_vfio_display_edid_write_error();
- return;
}
static void vfio_display_edid_ui_info(void *opaque, uint32_t idx,
@@ -130,10 +129,10 @@ static bool vfio_display_edid_init(VFIOPCIDevice *vdev, Error **errp)
int fd = vdev->vbasedev.fd;
int ret;
- ret = vfio_get_dev_region_info(&vdev->vbasedev,
- VFIO_REGION_TYPE_GFX,
- VFIO_REGION_SUBTYPE_GFX_EDID,
- &dpy->edid_info);
+ ret = vfio_device_get_region_info_type(&vdev->vbasedev,
+ VFIO_REGION_TYPE_GFX,
+ VFIO_REGION_SUBTYPE_GFX_EDID,
+ &dpy->edid_info);
if (ret) {
/* Failed to get GFX edid info, allow to go through without edid. */
return true;
@@ -214,6 +213,7 @@ static VFIODMABuf *vfio_display_get_dmabuf(VFIOPCIDevice *vdev,
struct vfio_device_gfx_plane_info plane;
VFIODMABuf *dmabuf;
int fd, ret;
+ uint32_t offset = 0;
memset(&plane, 0, sizeof(plane));
plane.argsz = sizeof(plane);
@@ -246,10 +246,10 @@ static VFIODMABuf *vfio_display_get_dmabuf(VFIOPCIDevice *vdev,
dmabuf = g_new0(VFIODMABuf, 1);
dmabuf->dmabuf_id = plane.dmabuf_id;
- dmabuf->buf = qemu_dmabuf_new(plane.width, plane.height,
- plane.stride, 0, 0, plane.width,
+ dmabuf->buf = qemu_dmabuf_new(plane.width, plane.height, &offset,
+ &plane.stride, 0, 0, plane.width,
plane.height, plane.drm_format,
- plane.drm_format_mod, fd, false, false);
+ plane.drm_format_mod, &fd, 1, false, false);
if (plane_type == DRM_PLANE_TYPE_CURSOR) {
vfio_display_update_cursor(dmabuf, &plane);
diff --git a/hw/vfio/helpers.c b/hw/vfio/helpers.c
index b14edd4..d0dbab1 100644
--- a/hw/vfio/helpers.c
+++ b/hw/vfio/helpers.c
@@ -22,240 +22,11 @@
#include "qemu/osdep.h"
#include <sys/ioctl.h>
-#include "hw/vfio/vfio-common.h"
+#include "system/kvm.h"
+#include "hw/vfio/vfio-device.h"
#include "hw/hw.h"
-#include "trace.h"
#include "qapi/error.h"
-#include "qemu/error-report.h"
-#include "monitor/monitor.h"
-
-/*
- * Common VFIO interrupt disable
- */
-void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
-{
- struct vfio_irq_set irq_set = {
- .argsz = sizeof(irq_set),
- .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
- .index = index,
- .start = 0,
- .count = 0,
- };
-
- ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
-}
-
-void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
-{
- struct vfio_irq_set irq_set = {
- .argsz = sizeof(irq_set),
- .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
- .index = index,
- .start = 0,
- .count = 1,
- };
-
- ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
-}
-
-void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
-{
- struct vfio_irq_set irq_set = {
- .argsz = sizeof(irq_set),
- .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
- .index = index,
- .start = 0,
- .count = 1,
- };
-
- ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
-}
-
-static inline const char *action_to_str(int action)
-{
- switch (action) {
- case VFIO_IRQ_SET_ACTION_MASK:
- return "MASK";
- case VFIO_IRQ_SET_ACTION_UNMASK:
- return "UNMASK";
- case VFIO_IRQ_SET_ACTION_TRIGGER:
- return "TRIGGER";
- default:
- return "UNKNOWN ACTION";
- }
-}
-
-static const char *index_to_str(VFIODevice *vbasedev, int index)
-{
- if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
- return NULL;
- }
-
- switch (index) {
- case VFIO_PCI_INTX_IRQ_INDEX:
- return "INTX";
- case VFIO_PCI_MSI_IRQ_INDEX:
- return "MSI";
- case VFIO_PCI_MSIX_IRQ_INDEX:
- return "MSIX";
- case VFIO_PCI_ERR_IRQ_INDEX:
- return "ERR";
- case VFIO_PCI_REQ_IRQ_INDEX:
- return "REQ";
- default:
- return NULL;
- }
-}
-
-bool vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
- int action, int fd, Error **errp)
-{
- ERRP_GUARD();
- g_autofree struct vfio_irq_set *irq_set = NULL;
- int argsz;
- const char *name;
- int32_t *pfd;
-
- argsz = sizeof(*irq_set) + sizeof(*pfd);
-
- irq_set = g_malloc0(argsz);
- irq_set->argsz = argsz;
- irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action;
- irq_set->index = index;
- irq_set->start = subindex;
- irq_set->count = 1;
- pfd = (int32_t *)&irq_set->data;
- *pfd = fd;
-
- if (!ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
- return true;
- }
-
- error_setg_errno(errp, errno, "VFIO_DEVICE_SET_IRQS failure");
-
- name = index_to_str(vbasedev, index);
- if (name) {
- error_prepend(errp, "%s-%d: ", name, subindex);
- } else {
- error_prepend(errp, "index %d-%d: ", index, subindex);
- }
- error_prepend(errp,
- "Failed to %s %s eventfd signaling for interrupt ",
- fd < 0 ? "tear down" : "set up", action_to_str(action));
- return false;
-}
-
-/*
- * IO Port/MMIO - Beware of the endians, VFIO is always little endian
- */
-void vfio_region_write(void *opaque, hwaddr addr,
- uint64_t data, unsigned size)
-{
- VFIORegion *region = opaque;
- VFIODevice *vbasedev = region->vbasedev;
- union {
- uint8_t byte;
- uint16_t word;
- uint32_t dword;
- uint64_t qword;
- } buf;
-
- switch (size) {
- case 1:
- buf.byte = data;
- break;
- case 2:
- buf.word = cpu_to_le16(data);
- break;
- case 4:
- buf.dword = cpu_to_le32(data);
- break;
- case 8:
- buf.qword = cpu_to_le64(data);
- break;
- default:
- hw_error("vfio: unsupported write size, %u bytes", size);
- break;
- }
-
- if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
- error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
- ",%d) failed: %m",
- __func__, vbasedev->name, region->nr,
- addr, data, size);
- }
-
- trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
-
- /*
- * A read or write to a BAR always signals an INTx EOI. This will
- * do nothing if not pending (including not in INTx mode). We assume
- * that a BAR access is in response to an interrupt and that BAR
- * accesses will service the interrupt. Unfortunately, we don't know
- * which access will service the interrupt, so we're potentially
- * getting quite a few host interrupts per guest interrupt.
- */
- vbasedev->ops->vfio_eoi(vbasedev);
-}
-
-uint64_t vfio_region_read(void *opaque,
- hwaddr addr, unsigned size)
-{
- VFIORegion *region = opaque;
- VFIODevice *vbasedev = region->vbasedev;
- union {
- uint8_t byte;
- uint16_t word;
- uint32_t dword;
- uint64_t qword;
- } buf;
- uint64_t data = 0;
-
- if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
- error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
- __func__, vbasedev->name, region->nr,
- addr, size);
- return (uint64_t)-1;
- }
- switch (size) {
- case 1:
- data = buf.byte;
- break;
- case 2:
- data = le16_to_cpu(buf.word);
- break;
- case 4:
- data = le32_to_cpu(buf.dword);
- break;
- case 8:
- data = le64_to_cpu(buf.qword);
- break;
- default:
- hw_error("vfio: unsupported read size, %u bytes", size);
- break;
- }
-
- trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
-
- /* Same as write above */
- vbasedev->ops->vfio_eoi(vbasedev);
-
- return data;
-}
-
-const MemoryRegionOps vfio_region_ops = {
- .read = vfio_region_read,
- .write = vfio_region_write,
- .endianness = DEVICE_LITTLE_ENDIAN,
- .valid = {
- .min_access_size = 1,
- .max_access_size = 8,
- },
- .impl = {
- .min_access_size = 1,
- .max_access_size = 8,
- },
-};
+#include "vfio-helpers.h"
int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size)
{
@@ -304,374 +75,126 @@ vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id)
return vfio_get_cap((void *)info, info->cap_offset, id);
}
-static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
- struct vfio_region_info *info)
+struct vfio_info_cap_header *
+vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
{
- struct vfio_info_cap_header *hdr;
- struct vfio_region_info_cap_sparse_mmap *sparse;
- int i, j;
-
- hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
- if (!hdr) {
- return -ENODEV;
- }
-
- sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
-
- trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
- region->nr, sparse->nr_areas);
-
- region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
-
- for (i = 0, j = 0; i < sparse->nr_areas; i++) {
- if (sparse->areas[i].size) {
- trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
- sparse->areas[i].offset +
- sparse->areas[i].size - 1);
- region->mmaps[j].offset = sparse->areas[i].offset;
- region->mmaps[j].size = sparse->areas[i].size;
- j++;
- }
+ if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
+ return NULL;
}
- region->nr_mmaps = j;
- region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
-
- return 0;
+ return vfio_get_cap((void *)info, info->cap_offset, id);
}
-int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
- int index, const char *name)
+bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
+ unsigned int *avail)
{
- g_autofree struct vfio_region_info *info = NULL;
- int ret;
+ struct vfio_info_cap_header *hdr;
+ struct vfio_iommu_type1_info_dma_avail *cap;
- ret = vfio_get_region_info(vbasedev, index, &info);
- if (ret) {
- return ret;
+ /* If the capability cannot be found, assume no DMA limiting */
+ hdr = vfio_get_iommu_type1_info_cap(info,
+ VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL);
+ if (!hdr) {
+ return false;
}
- region->vbasedev = vbasedev;
- region->flags = info->flags;
- region->size = info->size;
- region->fd_offset = info->offset;
- region->nr = index;
-
- if (region->size) {
- region->mem = g_new0(MemoryRegion, 1);
- memory_region_init_io(region->mem, obj, &vfio_region_ops,
- region, name, region->size);
-
- if (!vbasedev->no_mmap &&
- region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
-
- ret = vfio_setup_region_sparse_mmaps(region, info);
-
- if (ret) {
- region->nr_mmaps = 1;
- region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
- region->mmaps[0].offset = 0;
- region->mmaps[0].size = region->size;
- }
- }
+ if (avail != NULL) {
+ cap = (void *) hdr;
+ *avail = cap->avail;
}
- trace_vfio_region_setup(vbasedev->name, index, name,
- region->flags, region->fd_offset, region->size);
- return 0;
+ return true;
}
-static void vfio_subregion_unmap(VFIORegion *region, int index)
-{
- trace_vfio_region_unmap(memory_region_name(&region->mmaps[index].mem),
- region->mmaps[index].offset,
- region->mmaps[index].offset +
- region->mmaps[index].size - 1);
- memory_region_del_subregion(region->mem, &region->mmaps[index].mem);
- munmap(region->mmaps[index].mmap, region->mmaps[index].size);
- object_unparent(OBJECT(&region->mmaps[index].mem));
- region->mmaps[index].mmap = NULL;
-}
+#ifdef CONFIG_KVM
+/*
+ * We have a single VFIO pseudo device per KVM VM. Once created it lives
+ * for the life of the VM. Closing the file descriptor only drops our
+ * reference to it and the device's reference to kvm. Therefore once
+ * initialized, this file descriptor is only released on QEMU exit and
+ * we'll re-use it should another vfio device be attached before then.
+ */
+int vfio_kvm_device_fd = -1;
+#endif
-int vfio_region_mmap(VFIORegion *region)
+int vfio_kvm_device_add_fd(int fd, Error **errp)
{
- int i, prot = 0;
- char *name;
+#ifdef CONFIG_KVM
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_VFIO_FILE,
+ .attr = KVM_DEV_VFIO_FILE_ADD,
+ .addr = (uint64_t)(unsigned long)&fd,
+ };
- if (!region->mem) {
+ if (!kvm_enabled()) {
return 0;
}
- prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
- prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
-
- for (i = 0; i < region->nr_mmaps; i++) {
- region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
- MAP_SHARED, region->vbasedev->fd,
- region->fd_offset +
- region->mmaps[i].offset);
- if (region->mmaps[i].mmap == MAP_FAILED) {
- int ret = -errno;
-
- trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
- region->fd_offset +
- region->mmaps[i].offset,
- region->fd_offset +
- region->mmaps[i].offset +
- region->mmaps[i].size - 1, ret);
-
- region->mmaps[i].mmap = NULL;
-
- for (i--; i >= 0; i--) {
- vfio_subregion_unmap(region, i);
- }
-
- return ret;
- }
-
- name = g_strdup_printf("%s mmaps[%d]",
- memory_region_name(region->mem), i);
- memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
- memory_region_owner(region->mem),
- name, region->mmaps[i].size,
- region->mmaps[i].mmap);
- g_free(name);
- memory_region_add_subregion(region->mem, region->mmaps[i].offset,
- &region->mmaps[i].mem);
-
- trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
- region->mmaps[i].offset,
- region->mmaps[i].offset +
- region->mmaps[i].size - 1);
- }
-
- return 0;
-}
-
-void vfio_region_unmap(VFIORegion *region)
-{
- int i;
-
- if (!region->mem) {
- return;
- }
+ if (vfio_kvm_device_fd < 0) {
+ struct kvm_create_device cd = {
+ .type = KVM_DEV_TYPE_VFIO,
+ };
- for (i = 0; i < region->nr_mmaps; i++) {
- if (region->mmaps[i].mmap) {
- vfio_subregion_unmap(region, i);
+ if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
+ error_setg_errno(errp, errno, "Failed to create KVM VFIO device");
+ return -errno;
}
- }
-}
-
-void vfio_region_exit(VFIORegion *region)
-{
- int i;
- if (!region->mem) {
- return;
+ vfio_kvm_device_fd = cd.fd;
}
- for (i = 0; i < region->nr_mmaps; i++) {
- if (region->mmaps[i].mmap) {
- memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
- }
- }
-
- trace_vfio_region_exit(region->vbasedev->name, region->nr);
-}
-
-void vfio_region_finalize(VFIORegion *region)
-{
- int i;
-
- if (!region->mem) {
- return;
- }
-
- for (i = 0; i < region->nr_mmaps; i++) {
- if (region->mmaps[i].mmap) {
- munmap(region->mmaps[i].mmap, region->mmaps[i].size);
- object_unparent(OBJECT(&region->mmaps[i].mem));
- }
- }
-
- object_unparent(OBJECT(region->mem));
-
- g_free(region->mem);
- g_free(region->mmaps);
-
- trace_vfio_region_finalize(region->vbasedev->name, region->nr);
-
- region->mem = NULL;
- region->mmaps = NULL;
- region->nr_mmaps = 0;
- region->size = 0;
- region->flags = 0;
- region->nr = 0;
-}
-
-void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
-{
- int i;
-
- if (!region->mem) {
- return;
- }
-
- for (i = 0; i < region->nr_mmaps; i++) {
- if (region->mmaps[i].mmap) {
- memory_region_set_enabled(&region->mmaps[i].mem, enabled);
- }
- }
-
- trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
- enabled);
-}
-
-int vfio_get_region_info(VFIODevice *vbasedev, int index,
- struct vfio_region_info **info)
-{
- size_t argsz = sizeof(struct vfio_region_info);
-
- *info = g_malloc0(argsz);
-
- (*info)->index = index;
-retry:
- (*info)->argsz = argsz;
-
- if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
- g_free(*info);
- *info = NULL;
+ if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
+ error_setg_errno(errp, errno, "Failed to add fd %d to KVM VFIO device",
+ fd);
return -errno;
}
-
- if ((*info)->argsz > argsz) {
- argsz = (*info)->argsz;
- *info = g_realloc(*info, argsz);
-
- goto retry;
- }
-
+#endif
return 0;
}
-int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
- uint32_t subtype, struct vfio_region_info **info)
+int vfio_kvm_device_del_fd(int fd, Error **errp)
{
- int i;
-
- for (i = 0; i < vbasedev->num_regions; i++) {
- struct vfio_info_cap_header *hdr;
- struct vfio_region_info_cap_type *cap_type;
-
- if (vfio_get_region_info(vbasedev, i, info)) {
- continue;
- }
-
- hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
- if (!hdr) {
- g_free(*info);
- continue;
- }
-
- cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
-
- trace_vfio_get_dev_region(vbasedev->name, i,
- cap_type->type, cap_type->subtype);
-
- if (cap_type->type == type && cap_type->subtype == subtype) {
- return 0;
- }
+#ifdef CONFIG_KVM
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_VFIO_FILE,
+ .attr = KVM_DEV_VFIO_FILE_DEL,
+ .addr = (uint64_t)(unsigned long)&fd,
+ };
- g_free(*info);
+ if (vfio_kvm_device_fd < 0) {
+ error_setg(errp, "KVM VFIO device isn't created yet");
+ return -EINVAL;
}
- *info = NULL;
- return -ENODEV;
-}
-
-bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
-{
- g_autofree struct vfio_region_info *info = NULL;
- bool ret = false;
-
- if (!vfio_get_region_info(vbasedev, region, &info)) {
- if (vfio_get_region_info_cap(info, cap_type)) {
- ret = true;
- }
+ if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
+ error_setg_errno(errp, errno,
+ "Failed to remove fd %d from KVM VFIO device", fd);
+ return -errno;
}
-
- return ret;
+#endif
+ return 0;
}
-bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp)
+struct vfio_device_info *vfio_get_device_info(int fd)
{
- ERRP_GUARD();
- struct stat st;
-
- if (vbasedev->fd < 0) {
- if (stat(vbasedev->sysfsdev, &st) < 0) {
- error_setg_errno(errp, errno, "no such host device");
- error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev);
- return false;
- }
- /* User may specify a name, e.g: VFIO platform device */
- if (!vbasedev->name) {
- vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
- }
- } else {
- if (!vbasedev->iommufd) {
- error_setg(errp, "Use FD passing only with iommufd backend");
- return false;
- }
- /*
- * Give a name with fd so any function printing out vbasedev->name
- * will not break.
- */
- if (!vbasedev->name) {
- vbasedev->name = g_strdup_printf("VFIO_FD%d", vbasedev->fd);
- }
- }
+ struct vfio_device_info *info;
+ uint32_t argsz = sizeof(*info);
- return true;
-}
+ info = g_malloc0(argsz);
-void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp)
-{
- ERRP_GUARD();
- int fd = monitor_fd_param(monitor_cur(), str, errp);
+retry:
+ info->argsz = argsz;
- if (fd < 0) {
- error_prepend(errp, "Could not parse remote object fd %s:", str);
- return;
+ if (ioctl(fd, VFIO_DEVICE_GET_INFO, info)) {
+ g_free(info);
+ return NULL;
}
- vbasedev->fd = fd;
-}
-
-void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops,
- DeviceState *dev, bool ram_discard)
-{
- vbasedev->type = type;
- vbasedev->ops = ops;
- vbasedev->dev = dev;
- vbasedev->fd = -1;
-
- vbasedev->ram_block_discard_allowed = ram_discard;
-}
-int vfio_device_get_aw_bits(VFIODevice *vdev)
-{
- /*
- * iova_ranges is a sorted list. For old kernels that support
- * VFIO but not support query of iova ranges, iova_ranges is NULL,
- * in this case HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX(64) is returned.
- */
- GList *l = g_list_last(vdev->bcontainer->iova_ranges);
-
- if (l) {
- Range *range = l->data;
- return range_get_last_bit(range) + 1;
+ if (info->argsz > argsz) {
+ argsz = info->argsz;
+ info = g_realloc(info, argsz);
+ goto retry;
}
- return HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX;
+ return info;
}
diff --git a/hw/vfio/igd.c b/hw/vfio/igd.c
index d320d03..e7a9d1f 100644
--- a/hw/vfio/igd.c
+++ b/hw/vfio/igd.c
@@ -14,9 +14,12 @@
#include "qemu/units.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
+#include "qapi/qmp/qerror.h"
+#include "hw/boards.h"
#include "hw/hw.h"
#include "hw/nvram/fw_cfg.h"
#include "pci.h"
+#include "pci-quirks.h"
#include "trace.h"
/*
@@ -59,49 +62,151 @@
*/
static int igd_gen(VFIOPCIDevice *vdev)
{
- if ((vdev->device_id & 0xfff) == 0xa84) {
- return 8; /* Broxton */
+ /*
+ * Device IDs for Broxton/Apollo Lake are 0x0a84, 0x1a84, 0x1a85, 0x5a84
+ * and 0x5a85, match bit 11:1 here
+ * Prefix 0x0a is taken by Haswell, this rule should be matched first.
+ */
+ if ((vdev->device_id & 0xffe) == 0xa84) {
+ return 9;
}
switch (vdev->device_id & 0xff00) {
- /* Old, untested, unavailable, unknown */
- case 0x0000:
- case 0x2500:
- case 0x2700:
- case 0x2900:
- case 0x2a00:
- case 0x2e00:
- case 0x3500:
- case 0xa000:
- return -1;
- /* SandyBridge, IvyBridge, ValleyView, Haswell */
- case 0x0100:
- case 0x0400:
- case 0x0a00:
- case 0x0c00:
- case 0x0d00:
- case 0x0f00:
+ case 0x0100: /* SandyBridge, IvyBridge */
return 6;
- /* BroadWell, CherryView, SkyLake, KabyLake */
- case 0x1600:
- case 0x1900:
- case 0x2200:
- case 0x5900:
+ case 0x0400: /* Haswell */
+ case 0x0a00: /* Haswell */
+ case 0x0c00: /* Haswell */
+ case 0x0d00: /* Haswell */
+ case 0x0f00: /* Valleyview/Bay Trail */
+ return 7;
+ case 0x1600: /* Broadwell */
+ case 0x2200: /* Cherryview */
return 8;
+ case 0x1900: /* Skylake */
+ case 0x3100: /* Gemini Lake */
+ case 0x5900: /* Kaby Lake */
+ case 0x3e00: /* Coffee Lake */
+ case 0x9B00: /* Comet Lake */
+ return 9;
+ case 0x8A00: /* Ice Lake */
+ case 0x4500: /* Elkhart Lake */
+ case 0x4E00: /* Jasper Lake */
+ return 11;
+ case 0x9A00: /* Tiger Lake */
+ case 0x4C00: /* Rocket Lake */
+ case 0x4600: /* Alder Lake */
+ case 0xA700: /* Raptor Lake */
+ return 12;
}
- return 8; /* Assume newer is compatible */
+ /*
+ * Unfortunately, Intel changes it's specification quite often. This makes
+ * it impossible to use a suitable default value for unknown devices.
+ * Return -1 for not applying any generation-specific quirks.
+ */
+ return -1;
}
-typedef struct VFIOIGDQuirk {
- struct VFIOPCIDevice *vdev;
- uint32_t index;
- uint32_t bdsm;
-} VFIOIGDQuirk;
-
+#define IGD_ASLS 0xfc /* ASL Storage Register */
#define IGD_GMCH 0x50 /* Graphics Control Register */
#define IGD_BDSM 0x5c /* Base Data of Stolen Memory */
+#define IGD_BDSM_GEN11 0xc0 /* Base Data of Stolen Memory of gen 11 and later */
+
+#define IGD_GMCH_GEN6_GMS_SHIFT 3 /* SNB_GMCH in i915 */
+#define IGD_GMCH_GEN6_GMS_MASK 0x1f
+#define IGD_GMCH_GEN8_GMS_SHIFT 8 /* BDW_GMCH in i915 */
+#define IGD_GMCH_GEN8_GMS_MASK 0xff
+static uint64_t igd_stolen_memory_size(int gen, uint32_t gmch)
+{
+ uint64_t gms;
+
+ if (gen < 8) {
+ gms = (gmch >> IGD_GMCH_GEN6_GMS_SHIFT) & IGD_GMCH_GEN6_GMS_MASK;
+ } else {
+ gms = (gmch >> IGD_GMCH_GEN8_GMS_SHIFT) & IGD_GMCH_GEN8_GMS_MASK;
+ }
+
+ if (gen < 9) {
+ return gms * 32 * MiB;
+ } else {
+ if (gms < 0xf0) {
+ return gms * 32 * MiB;
+ } else {
+ return (gms - 0xf0 + 1) * 4 * MiB;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * The OpRegion includes the Video BIOS Table, which seems important for
+ * telling the driver what sort of outputs it has. Without this, the device
+ * may work in the guest, but we may not get output. This also requires BIOS
+ * support to reserve and populate a section of guest memory sufficient for
+ * the table and to write the base address of that memory to the ASLS register
+ * of the IGD device.
+ */
+static bool vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
+ struct vfio_region_info *info,
+ Error **errp)
+{
+ int ret;
+
+ vdev->igd_opregion = g_malloc0(info->size);
+ ret = pread(vdev->vbasedev.fd, vdev->igd_opregion,
+ info->size, info->offset);
+ if (ret != info->size) {
+ error_setg(errp, "failed to read IGD OpRegion");
+ g_free(vdev->igd_opregion);
+ vdev->igd_opregion = NULL;
+ return false;
+ }
+
+ /*
+ * Provide fw_cfg with a copy of the OpRegion which the VM firmware is to
+ * allocate 32bit reserved memory for, copy these contents into, and write
+ * the reserved memory base address to the device ASLS register at 0xFC.
+ * Alignment of this reserved region seems flexible, but using a 4k page
+ * alignment seems to work well. This interface assumes a single IGD
+ * device, which may be at VM address 00:02.0 in legacy mode or another
+ * address in UPT mode.
+ *
+ * NB, there may be future use cases discovered where the VM should have
+ * direct interaction with the host OpRegion, in which case the write to
+ * the ASLS register would trigger MemoryRegion setup to enable that.
+ */
+ fw_cfg_add_file(fw_cfg_find(), "etc/igd-opregion",
+ vdev->igd_opregion, info->size);
+
+ trace_vfio_pci_igd_opregion_enabled(vdev->vbasedev.name);
+
+ return true;
+}
+
+static bool vfio_pci_igd_opregion_detect(VFIOPCIDevice *vdev,
+ struct vfio_region_info **opregion)
+{
+ int ret;
+
+ ret = vfio_device_get_region_info_type(&vdev->vbasedev,
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, opregion);
+ if (ret) {
+ return false;
+ }
+
+ /* Hotplugging is not supported for opregion access */
+ if (vdev->pdev.qdev.hotplugged) {
+ warn_report("IGD device detected, but OpRegion is not supported "
+ "on hotplugged device.");
+ return false;
+ }
+
+ return true;
+}
/*
* The rather short list of registers that we copy from the host devices.
@@ -188,7 +293,8 @@ static void vfio_pci_igd_lpc_bridge_realize(PCIDevice *pdev, Error **errp)
}
}
-static void vfio_pci_igd_lpc_bridge_class_init(ObjectClass *klass, void *data)
+static void vfio_pci_igd_lpc_bridge_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -204,7 +310,7 @@ static const TypeInfo vfio_pci_igd_lpc_bridge_info = {
.name = "vfio-pci-igd-lpc-bridge",
.parent = TYPE_PCI_DEVICE,
.class_init = vfio_pci_igd_lpc_bridge_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -239,374 +345,373 @@ static int vfio_pci_igd_lpc_init(VFIOPCIDevice *vdev,
return ret;
}
-/*
- * IGD Gen8 and newer support up to 8MB for the GTT and use a 64bit PTE
- * entry, older IGDs use 2MB and 32bit. Each PTE maps a 4k page. Therefore
- * we either have 2M/4k * 4 = 2k or 8M/4k * 8 = 16k as the maximum iobar index
- * for programming the GTT.
- *
- * See linux:include/drm/i915_drm.h for shift and mask values.
- */
-static int vfio_igd_gtt_max(VFIOPCIDevice *vdev)
+static bool vfio_pci_igd_setup_lpc_bridge(VFIOPCIDevice *vdev, Error **errp)
{
- uint32_t gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, sizeof(gmch));
- int ggms, gen = igd_gen(vdev);
+ struct vfio_region_info *host = NULL;
+ struct vfio_region_info *lpc = NULL;
+ PCIDevice *lpc_bridge;
+ int ret;
+
+ /*
+ * Copying IDs or creating new devices are not supported on hotplug
+ */
+ if (vdev->pdev.qdev.hotplugged) {
+ error_setg(errp, "IGD LPC is not supported on hotplugged device");
+ return false;
+ }
- gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, sizeof(gmch));
- ggms = (gmch >> (gen < 8 ? 8 : 6)) & 0x3;
- if (gen > 6) {
- ggms = 1 << ggms;
+ /*
+ * We need to create an LPC/ISA bridge at PCI bus address 00:1f.0 that we
+ * can stuff host values into, so if there's already one there and it's not
+ * one we can hack on, this quirk is no-go. Sorry Q35.
+ */
+ lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev),
+ 0, PCI_DEVFN(0x1f, 0));
+ if (lpc_bridge && !object_dynamic_cast(OBJECT(lpc_bridge),
+ "vfio-pci-igd-lpc-bridge")) {
+ error_setg(errp,
+ "Cannot create LPC bridge due to existing device at 1f.0");
+ return false;
}
- ggms *= MiB;
+ /*
+ * Check whether we have all the vfio device specific regions to
+ * support LPC quirk (added in Linux v4.6).
+ */
+ ret = vfio_device_get_region_info_type(&vdev->vbasedev,
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG, &lpc);
+ if (ret) {
+ error_setg(errp, "IGD LPC bridge access is not supported by kernel");
+ return false;
+ }
- return (ggms / (4 * KiB)) * (gen < 8 ? 4 : 8);
-}
+ ret = vfio_device_get_region_info_type(&vdev->vbasedev,
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG, &host);
+ if (ret) {
+ error_setg(errp, "IGD host bridge access is not supported by kernel");
+ return false;
+ }
-/*
- * The IGD ROM will make use of stolen memory (GGMS) for support of VESA modes.
- * Somehow the host stolen memory range is used for this, but how the ROM gets
- * it is a mystery, perhaps it's hardcoded into the ROM. Thankfully though, it
- * reprograms the GTT through the IOBAR where we can trap it and transpose the
- * programming to the VM allocated buffer. That buffer gets reserved by the VM
- * firmware via the fw_cfg entry added below. Here we're just monitoring the
- * IOBAR address and data registers to detect a write sequence targeting the
- * GTTADR. This code is developed by observed behavior and doesn't have a
- * direct spec reference, unfortunately.
- */
-static uint64_t vfio_igd_quirk_data_read(void *opaque,
- hwaddr addr, unsigned size)
-{
- VFIOIGDQuirk *igd = opaque;
- VFIOPCIDevice *vdev = igd->vdev;
+ /* Create/modify LPC bridge */
+ ret = vfio_pci_igd_lpc_init(vdev, lpc);
+ if (ret) {
+ error_setg(errp, "Failed to create/modify LPC bridge for IGD");
+ return false;
+ }
- igd->index = ~0;
+ /* Stuff some host values into the VM PCI host bridge */
+ ret = vfio_pci_igd_host_init(vdev, host);
+ if (ret) {
+ error_setg(errp, "Failed to modify host bridge for IGD");
+ return false;
+ }
- return vfio_region_read(&vdev->bars[4].region, addr + 4, size);
+ return true;
}
-static void vfio_igd_quirk_data_write(void *opaque, hwaddr addr,
- uint64_t data, unsigned size)
+static bool vfio_pci_igd_override_gms(int gen, uint32_t gms, uint32_t *gmch)
{
- VFIOIGDQuirk *igd = opaque;
- VFIOPCIDevice *vdev = igd->vdev;
- uint64_t val = data;
- int gen = igd_gen(vdev);
-
- /*
- * Programming the GGMS starts at index 0x1 and uses every 4th index (ie.
- * 0x1, 0x5, 0x9, 0xd,...). For pre-Gen8 each 4-byte write is a whole PTE
- * entry, with 0th bit enable set. For Gen8 and up, PTEs are 64bit, so
- * entries 0x5 & 0xd are the high dword, in our case zero. Each PTE points
- * to a 4k page, which we translate to a page from the VM allocated region,
- * pointed to by the BDSM register. If this is not set, we fail.
- *
- * We trap writes to the full configured GTT size, but we typically only
- * see the vBIOS writing up to (nearly) the 1MB barrier. In fact it often
- * seems to miss the last entry for an even 1MB GTT. Doing a gratuitous
- * write of that last entry does work, but is hopefully unnecessary since
- * we clear the previous GTT on initialization.
- */
- if ((igd->index % 4 == 1) && igd->index < vfio_igd_gtt_max(vdev)) {
- if (gen < 8 || (igd->index % 8 == 1)) {
- uint32_t base;
-
- base = pci_get_long(vdev->pdev.config + IGD_BDSM);
- if (!base) {
- hw_error("vfio-igd: Guest attempted to program IGD GTT before "
- "BIOS reserved stolen memory. Unsupported BIOS?");
- }
-
- val = data - igd->bdsm + base;
+ bool ret = false;
+
+ if (gen == -1) {
+ error_report("x-igd-gms is not supported on this device");
+ } else if (gen < 8) {
+ if (gms <= 0x10) {
+ *gmch &= ~(IGD_GMCH_GEN6_GMS_MASK << IGD_GMCH_GEN6_GMS_SHIFT);
+ *gmch |= gms << IGD_GMCH_GEN6_GMS_SHIFT;
+ ret = true;
} else {
- val = 0; /* upper 32bits of pte, we only enable below 4G PTEs */
+ error_report(QERR_INVALID_PARAMETER_VALUE, "x-igd-gms", "0~0x10");
+ }
+ } else if (gen == 8) {
+ if (gms <= 0x40) {
+ *gmch &= ~(IGD_GMCH_GEN8_GMS_MASK << IGD_GMCH_GEN8_GMS_SHIFT);
+ *gmch |= gms << IGD_GMCH_GEN8_GMS_SHIFT;
+ ret = true;
+ } else {
+ error_report(QERR_INVALID_PARAMETER_VALUE, "x-igd-gms", "0~0x40");
+ }
+ } else {
+ /* 0x0 to 0x40: 32MB increments starting at 0MB */
+ /* 0xf0 to 0xfe: 4MB increments starting at 4MB */
+ if ((gms <= 0x40) || (gms >= 0xf0 && gms <= 0xfe)) {
+ *gmch &= ~(IGD_GMCH_GEN8_GMS_MASK << IGD_GMCH_GEN8_GMS_SHIFT);
+ *gmch |= gms << IGD_GMCH_GEN8_GMS_SHIFT;
+ ret = true;
+ } else {
+ error_report(QERR_INVALID_PARAMETER_VALUE,
+ "x-igd-gms", "0~0x40 or 0xf0~0xfe");
}
-
- trace_vfio_pci_igd_bar4_write(vdev->vbasedev.name,
- igd->index, data, val);
}
- vfio_region_write(&vdev->bars[4].region, addr + 4, val, size);
-
- igd->index = ~0;
+ return ret;
}
-static const MemoryRegionOps vfio_igd_data_quirk = {
- .read = vfio_igd_quirk_data_read,
- .write = vfio_igd_quirk_data_write,
- .endianness = DEVICE_LITTLE_ENDIAN,
-};
+#define IGD_GGC_MMIO_OFFSET 0x108040
+#define IGD_BDSM_MMIO_OFFSET 0x1080C0
-static uint64_t vfio_igd_quirk_index_read(void *opaque,
- hwaddr addr, unsigned size)
+void vfio_probe_igd_bar0_quirk(VFIOPCIDevice *vdev, int nr)
{
- VFIOIGDQuirk *igd = opaque;
- VFIOPCIDevice *vdev = igd->vdev;
+ VFIOQuirk *ggc_quirk, *bdsm_quirk;
+ VFIOConfigMirrorQuirk *ggc_mirror, *bdsm_mirror;
+ int gen;
- igd->index = ~0;
+ if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) ||
+ !vfio_is_vga(vdev) || nr != 0) {
+ return;
+ }
- return vfio_region_read(&vdev->bars[4].region, addr, size);
-}
+ /* Only on IGD Gen6-12 device needs quirks in BAR 0 */
+ gen = igd_gen(vdev);
+ if (gen < 6) {
+ return;
+ }
-static void vfio_igd_quirk_index_write(void *opaque, hwaddr addr,
- uint64_t data, unsigned size)
-{
- VFIOIGDQuirk *igd = opaque;
- VFIOPCIDevice *vdev = igd->vdev;
+ if (vdev->igd_gms) {
+ ggc_quirk = vfio_quirk_alloc(1);
+ ggc_mirror = ggc_quirk->data = g_malloc0(sizeof(*ggc_mirror));
+ ggc_mirror->mem = ggc_quirk->mem;
+ ggc_mirror->vdev = vdev;
+ ggc_mirror->bar = nr;
+ ggc_mirror->offset = IGD_GGC_MMIO_OFFSET;
+ ggc_mirror->config_offset = IGD_GMCH;
+
+ memory_region_init_io(ggc_mirror->mem, OBJECT(vdev),
+ &vfio_generic_mirror_quirk, ggc_mirror,
+ "vfio-igd-ggc-quirk", 2);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ ggc_mirror->offset, ggc_mirror->mem,
+ 1);
+
+ QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, ggc_quirk, next);
+ }
- igd->index = data;
+ bdsm_quirk = vfio_quirk_alloc(1);
+ bdsm_mirror = bdsm_quirk->data = g_malloc0(sizeof(*bdsm_mirror));
+ bdsm_mirror->mem = bdsm_quirk->mem;
+ bdsm_mirror->vdev = vdev;
+ bdsm_mirror->bar = nr;
+ bdsm_mirror->offset = IGD_BDSM_MMIO_OFFSET;
+ bdsm_mirror->config_offset = (gen < 11) ? IGD_BDSM : IGD_BDSM_GEN11;
+
+ memory_region_init_io(bdsm_mirror->mem, OBJECT(vdev),
+ &vfio_generic_mirror_quirk, bdsm_mirror,
+ "vfio-igd-bdsm-quirk", (gen < 11) ? 4 : 8);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ bdsm_mirror->offset, bdsm_mirror->mem,
+ 1);
- vfio_region_write(&vdev->bars[4].region, addr, data, size);
+ QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, bdsm_quirk, next);
}
-static const MemoryRegionOps vfio_igd_index_quirk = {
- .read = vfio_igd_quirk_index_read,
- .write = vfio_igd_quirk_index_write,
- .endianness = DEVICE_LITTLE_ENDIAN,
-};
-
-void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
+static bool vfio_pci_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp)
{
- g_autofree struct vfio_region_info *rom = NULL;
- g_autofree struct vfio_region_info *opregion = NULL;
- g_autofree struct vfio_region_info *host = NULL;
- g_autofree struct vfio_region_info *lpc = NULL;
- VFIOQuirk *quirk;
- VFIOIGDQuirk *igd;
- PCIDevice *lpc_bridge;
- int i, ret, ggms_mb, gms_mb = 0, gen;
+ struct vfio_region_info *opregion = NULL;
+ int ret, gen;
+ uint64_t gms_size = 0;
uint64_t *bdsm_size;
uint32_t gmch;
- uint16_t cmd_orig, cmd;
+ bool legacy_mode_enabled = false;
Error *err = NULL;
- /*
- * This must be an Intel VGA device at address 00:02.0 for us to even
- * consider enabling legacy mode. The vBIOS has dependencies on the
- * PCI bus address.
- */
if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) ||
- !vfio_is_vga(vdev) || nr != 4 ||
- &vdev->pdev != pci_find_device(pci_device_root_bus(&vdev->pdev),
- 0, PCI_DEVFN(0x2, 0))) {
- return;
+ !vfio_is_vga(vdev)) {
+ return true;
}
- /*
- * We need to create an LPC/ISA bridge at PCI bus address 00:1f.0 that we
- * can stuff host values into, so if there's already one there and it's not
- * one we can hack on, legacy mode is no-go. Sorry Q35.
- */
- lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev),
- 0, PCI_DEVFN(0x1f, 0));
- if (lpc_bridge && !object_dynamic_cast(OBJECT(lpc_bridge),
- "vfio-pci-igd-lpc-bridge")) {
- error_report("IGD device %s cannot support legacy mode due to existing "
- "devices at address 1f.0", vdev->vbasedev.name);
- return;
+ /* IGD device always comes with OpRegion */
+ if (!vfio_pci_igd_opregion_detect(vdev, &opregion)) {
+ return true;
}
+ info_report("OpRegion detected on Intel display %x.", vdev->device_id);
- /*
- * IGD is not a standard, they like to change their specs often. We
- * only attempt to support back to SandBridge and we hope that newer
- * devices maintain compatibility with generation 8.
- */
gen = igd_gen(vdev);
- if (gen != 6 && gen != 8) {
- error_report("IGD device %s is unsupported in legacy mode, "
- "try SandyBridge or newer", vdev->vbasedev.name);
- return;
- }
+ gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, 4);
/*
- * Most of what we're doing here is to enable the ROM to run, so if
- * there's no ROM, there's no point in setting up this quirk.
- * NB. We only seem to get BIOS ROMs, so a UEFI VM would need CSM support.
+ * For backward compatibility, enable legacy mode when
+ * - Device geneation is 6 to 9 (including both)
+ * - Machine type is i440fx (pc_piix)
+ * - IGD device is at guest BDF 00:02.0
+ * - Not manually disabled by x-igd-legacy-mode=off
*/
- ret = vfio_get_region_info(&vdev->vbasedev,
- VFIO_PCI_ROM_REGION_INDEX, &rom);
- if ((ret || !rom->size) && !vdev->pdev.romfile) {
- error_report("IGD device %s has no ROM, legacy mode disabled",
- vdev->vbasedev.name);
- return;
- }
+ if ((vdev->igd_legacy_mode != ON_OFF_AUTO_OFF) &&
+ (gen >= 6 && gen <= 9) &&
+ !strcmp(MACHINE_GET_CLASS(qdev_get_machine())->family, "pc_piix") &&
+ (&vdev->pdev == pci_find_device(pci_device_root_bus(&vdev->pdev),
+ 0, PCI_DEVFN(0x2, 0)))) {
+ /*
+ * IGD legacy mode requires:
+ * - VBIOS in ROM BAR or file
+ * - VGA IO/MMIO ranges are claimed by IGD
+ * - OpRegion
+ * - Same LPC bridge and Host bridge VID/DID/SVID/SSID as host
+ */
+ struct vfio_region_info *rom = NULL;
+
+ legacy_mode_enabled = true;
+ info_report("IGD legacy mode enabled, "
+ "use x-igd-legacy-mode=off to disable it if unwanted.");
+
+ /*
+ * Most of what we're doing here is to enable the ROM to run, so if
+ * there's no ROM, there's no point in setting up this quirk.
+ * NB. We only seem to get BIOS ROMs, so UEFI VM would need CSM support.
+ */
+ ret = vfio_device_get_region_info(&vdev->vbasedev,
+ VFIO_PCI_ROM_REGION_INDEX, &rom);
+ if ((ret || !rom->size) && !vdev->pdev.romfile) {
+ error_setg(&err, "Device has no ROM");
+ goto error;
+ }
- /*
- * Ignore the hotplug corner case, mark the ROM failed, we can't
- * create the devices we need for legacy mode in the hotplug scenario.
- */
- if (vdev->pdev.qdev.hotplugged) {
- error_report("IGD device %s hotplugged, ROM disabled, "
- "legacy mode disabled", vdev->vbasedev.name);
- vdev->rom_read_failed = true;
- return;
- }
+ /*
+ * If IGD VGA Disable is clear (expected) and VGA is not already
+ * enabled, try to enable it. Probably shouldn't be using legacy mode
+ * without VGA, but also no point in us enabling VGA if disabled in
+ * hardware.
+ */
+ if (!(gmch & 0x2) && !vdev->vga && !vfio_populate_vga(vdev, &err)) {
+ error_setg(&err, "Unable to enable VGA access");
+ goto error;
+ }
- /*
- * Check whether we have all the vfio device specific regions to
- * support legacy mode (added in Linux v4.6). If not, bail.
- */
- ret = vfio_get_dev_region_info(&vdev->vbasedev,
- VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
- VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
- if (ret) {
- error_report("IGD device %s does not support OpRegion access,"
- "legacy mode disabled", vdev->vbasedev.name);
- return;
+ /* Enable OpRegion and LPC bridge quirk */
+ vdev->features |= VFIO_FEATURE_ENABLE_IGD_OPREGION;
+ vdev->features |= VFIO_FEATURE_ENABLE_IGD_LPC;
+ } else if (vdev->igd_legacy_mode == ON_OFF_AUTO_ON) {
+ error_setg(&err,
+ "Machine is not i440fx, assigned BDF is not 00:02.0, "
+ "or device %04x (gen %d) doesn't support legacy mode",
+ vdev->device_id, gen);
+ goto error;
}
- ret = vfio_get_dev_region_info(&vdev->vbasedev,
- VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
- VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG, &host);
- if (ret) {
- error_report("IGD device %s does not support host bridge access,"
- "legacy mode disabled", vdev->vbasedev.name);
- return;
+ /* Setup OpRegion access */
+ if ((vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) &&
+ !vfio_pci_igd_opregion_init(vdev, opregion, errp)) {
+ goto error;
}
- ret = vfio_get_dev_region_info(&vdev->vbasedev,
- VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
- VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG, &lpc);
- if (ret) {
- error_report("IGD device %s does not support LPC bridge access,"
- "legacy mode disabled", vdev->vbasedev.name);
- return;
+ /* Setup LPC bridge / Host bridge PCI IDs */
+ if ((vdev->features & VFIO_FEATURE_ENABLE_IGD_LPC) &&
+ !vfio_pci_igd_setup_lpc_bridge(vdev, errp)) {
+ goto error;
}
- gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, 4);
-
/*
- * If IGD VGA Disable is clear (expected) and VGA is not already enabled,
- * try to enable it. Probably shouldn't be using legacy mode without VGA,
- * but also no point in us enabling VGA if disabled in hardware.
+ * ASLS (OpRegion address) is read-only, emulated
+ * It contains HPA, guest firmware need to reprogram it with GPA.
*/
- if (!(gmch & 0x2) && !vdev->vga && !vfio_populate_vga(vdev, &err)) {
- error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
- error_report("IGD device %s failed to enable VGA access, "
- "legacy mode disabled", vdev->vbasedev.name);
- return;
- }
-
- /* Create our LPC/ISA bridge */
- ret = vfio_pci_igd_lpc_init(vdev, lpc);
- if (ret) {
- error_report("IGD device %s failed to create LPC bridge, "
- "legacy mode disabled", vdev->vbasedev.name);
- return;
- }
-
- /* Stuff some host values into the VM PCI host bridge */
- ret = vfio_pci_igd_host_init(vdev, host);
- if (ret) {
- error_report("IGD device %s failed to modify host bridge, "
- "legacy mode disabled", vdev->vbasedev.name);
- return;
- }
-
- /* Setup OpRegion access */
- if (!vfio_pci_igd_opregion_init(vdev, opregion, &err)) {
- error_append_hint(&err, "IGD legacy mode disabled\n");
- error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
- return;
- }
-
- /* Setup our quirk to munge GTT addresses to the VM allocated buffer */
- quirk = vfio_quirk_alloc(2);
- igd = quirk->data = g_malloc0(sizeof(*igd));
- igd->vdev = vdev;
- igd->index = ~0;
- igd->bdsm = vfio_pci_read_config(&vdev->pdev, IGD_BDSM, 4);
- igd->bdsm &= ~((1 * MiB) - 1); /* 1MB aligned */
-
- memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_igd_index_quirk,
- igd, "vfio-igd-index-quirk", 4);
- memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
- 0, &quirk->mem[0], 1);
-
- memory_region_init_io(&quirk->mem[1], OBJECT(vdev), &vfio_igd_data_quirk,
- igd, "vfio-igd-data-quirk", 4);
- memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
- 4, &quirk->mem[1], 1);
+ pci_set_long(vdev->pdev.config + IGD_ASLS, 0);
+ pci_set_long(vdev->pdev.wmask + IGD_ASLS, ~0);
+ pci_set_long(vdev->emulated_config_bits + IGD_ASLS, ~0);
- QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
+ /*
+ * Allow user to override dsm size using x-igd-gms option, in multiples of
+ * 32MiB. This option should only be used when the desired size cannot be
+ * set from DVMT Pre-Allocated option in host BIOS.
+ */
+ if (vdev->igd_gms) {
+ if (!vfio_pci_igd_override_gms(gen, vdev->igd_gms, &gmch)) {
+ return false;
+ }
- /* Determine the size of stolen memory needed for GTT */
- ggms_mb = (gmch >> (gen < 8 ? 8 : 6)) & 0x3;
- if (gen > 6) {
- ggms_mb = 1 << ggms_mb;
+ /* GMCH is read-only, emulated */
+ pci_set_long(vdev->pdev.config + IGD_GMCH, gmch);
+ pci_set_long(vdev->pdev.wmask + IGD_GMCH, 0);
+ pci_set_long(vdev->emulated_config_bits + IGD_GMCH, ~0);
}
- /*
- * Assume we have no GMS memory, but allow it to be overridden by device
- * option (experimental). The spec doesn't actually allow zero GMS when
- * when IVD (IGD VGA Disable) is clear, but the claim is that it's unused,
- * so let's not waste VM memory for it.
- */
- gmch &= ~((gen < 8 ? 0x1f : 0xff) << (gen < 8 ? 3 : 8));
+ if (gen > 0) {
+ gms_size = igd_stolen_memory_size(gen, gmch);
- if (vdev->igd_gms) {
- if (vdev->igd_gms <= 0x10) {
- gms_mb = vdev->igd_gms * 32;
- gmch |= vdev->igd_gms << (gen < 8 ? 3 : 8);
+ /* BDSM is read-write, emulated. BIOS needs to be able to write it */
+ if (gen < 11) {
+ pci_set_long(vdev->pdev.config + IGD_BDSM, 0);
+ pci_set_long(vdev->pdev.wmask + IGD_BDSM, ~0);
+ pci_set_long(vdev->emulated_config_bits + IGD_BDSM, ~0);
} else {
- error_report("Unsupported IGD GMS value 0x%x", vdev->igd_gms);
- vdev->igd_gms = 0;
+ pci_set_quad(vdev->pdev.config + IGD_BDSM_GEN11, 0);
+ pci_set_quad(vdev->pdev.wmask + IGD_BDSM_GEN11, ~0);
+ pci_set_quad(vdev->emulated_config_bits + IGD_BDSM_GEN11, ~0);
}
}
/*
* Request reserved memory for stolen memory via fw_cfg. VM firmware
* must allocate a 1MB aligned reserved memory region below 4GB with
- * the requested size (in bytes) for use by the Intel PCI class VGA
- * device at VM address 00:02.0. The base address of this reserved
- * memory region must be written to the device BDSM register at PCI
- * config offset 0x5C.
+ * the requested size (in bytes) for use by the IGD device. The base
+ * address of this reserved memory region must be written to the
+ * device BDSM register.
+ * For newer device without BDSM register, this fw_cfg item is 0.
*/
bdsm_size = g_malloc(sizeof(*bdsm_size));
- *bdsm_size = cpu_to_le64((ggms_mb + gms_mb) * MiB);
+ *bdsm_size = cpu_to_le64(gms_size);
fw_cfg_add_file(fw_cfg_find(), "etc/igd-bdsm-size",
bdsm_size, sizeof(*bdsm_size));
- /* GMCH is read-only, emulated */
- pci_set_long(vdev->pdev.config + IGD_GMCH, gmch);
- pci_set_long(vdev->pdev.wmask + IGD_GMCH, 0);
- pci_set_long(vdev->emulated_config_bits + IGD_GMCH, ~0);
+ trace_vfio_pci_igd_bdsm_enabled(vdev->vbasedev.name, (gms_size / MiB));
- /* BDSM is read-write, emulated. The BIOS needs to be able to write it */
- pci_set_long(vdev->pdev.config + IGD_BDSM, 0);
- pci_set_long(vdev->pdev.wmask + IGD_BDSM, ~0);
- pci_set_long(vdev->emulated_config_bits + IGD_BDSM, ~0);
+ return true;
+error:
/*
- * This IOBAR gives us access to GTTADR, which allows us to write to
- * the GTT itself. So let's go ahead and write zero to all the GTT
- * entries to avoid spurious DMA faults. Be sure I/O access is enabled
- * before talking to the device.
+ * When legacy mode is implicity enabled, continue on error,
+ * to keep compatibility
*/
- if (pread(vdev->vbasedev.fd, &cmd_orig, sizeof(cmd_orig),
- vdev->config_offset + PCI_COMMAND) != sizeof(cmd_orig)) {
- error_report("IGD device %s - failed to read PCI command register",
- vdev->vbasedev.name);
+ if (legacy_mode_enabled && (vdev->igd_legacy_mode == ON_OFF_AUTO_AUTO)) {
+ error_report_err(err);
+ error_report("IGD legacy mode disabled");
+ return true;
+ }
+
+ error_propagate(errp, err);
+ return false;
+}
+
+/*
+ * KVMGT/GVT-g vGPU exposes an emulated OpRegion. So far, users have to specify
+ * x-igd-opregion=on to enable the access.
+ * TODO: Check VID/DID and enable opregion access automatically
+ */
+static bool vfio_pci_kvmgt_config_quirk(VFIOPCIDevice *vdev, Error **errp)
+{
+ struct vfio_region_info *opregion = NULL;
+ int gen;
+
+ if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) ||
+ !vfio_is_vga(vdev)) {
+ return true;
}
- cmd = cmd_orig | PCI_COMMAND_IO;
+ /* FIXME: Cherryview is Gen8, but don't support GVT-g */
+ gen = igd_gen(vdev);
+ if (gen != 8 && gen != 9) {
+ return true;
+ }
- if (pwrite(vdev->vbasedev.fd, &cmd, sizeof(cmd),
- vdev->config_offset + PCI_COMMAND) != sizeof(cmd)) {
- error_report("IGD device %s - failed to write PCI command register",
- vdev->vbasedev.name);
+ if (!vfio_pci_igd_opregion_detect(vdev, &opregion)) {
+ /* Should never reach here, KVMGT always emulates OpRegion */
+ return false;
}
- for (i = 1; i < vfio_igd_gtt_max(vdev); i += 4) {
- vfio_region_write(&vdev->bars[4].region, 0, i, 4);
- vfio_region_write(&vdev->bars[4].region, 4, 0, 4);
+ if ((vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) &&
+ !vfio_pci_igd_opregion_init(vdev, opregion, errp)) {
+ return false;
}
- if (pwrite(vdev->vbasedev.fd, &cmd_orig, sizeof(cmd_orig),
- vdev->config_offset + PCI_COMMAND) != sizeof(cmd_orig)) {
- error_report("IGD device %s - failed to restore PCI command register",
- vdev->vbasedev.name);
+ return true;
+}
+
+bool vfio_probe_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp)
+{
+ /* KVMGT/GVT-g vGPU is exposed as mdev */
+ if (vdev->vbasedev.mdev) {
+ return vfio_pci_kvmgt_config_quirk(vdev, errp);
}
- trace_vfio_pci_igd_bdsm_enabled(vdev->vbasedev.name, ggms_mb + gms_mb);
+ return vfio_pci_igd_config_quirk(vdev, errp);
}
diff --git a/hw/vfio/iommufd.c b/hw/vfio/iommufd.c
index 7b5f87a..d3efef7 100644
--- a/hw/vfio/iommufd.c
+++ b/hw/vfio/iommufd.c
@@ -15,19 +15,27 @@
#include <linux/vfio.h>
#include <linux/iommufd.h>
-#include "hw/vfio/vfio-common.h"
+#include "hw/vfio/vfio-device.h"
#include "qemu/error-report.h"
#include "trace.h"
#include "qapi/error.h"
-#include "sysemu/iommufd.h"
+#include "system/iommufd.h"
#include "hw/qdev-core.h"
-#include "sysemu/reset.h"
+#include "hw/vfio/vfio-cpr.h"
+#include "system/reset.h"
#include "qemu/cutils.h"
#include "qemu/chardev_open.h"
#include "pci.h"
+#include "vfio-iommufd.h"
+#include "vfio-helpers.h"
+#include "vfio-listener.h"
+
+#define TYPE_HOST_IOMMU_DEVICE_IOMMUFD_VFIO \
+ TYPE_HOST_IOMMU_DEVICE_IOMMUFD "-vfio"
static int iommufd_cdev_map(const VFIOContainerBase *bcontainer, hwaddr iova,
- ram_addr_t size, void *vaddr, bool readonly)
+ ram_addr_t size, void *vaddr, bool readonly,
+ MemoryRegion *mr)
{
const VFIOIOMMUFDContainer *container =
container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer);
@@ -39,11 +47,28 @@ static int iommufd_cdev_map(const VFIOContainerBase *bcontainer, hwaddr iova,
static int iommufd_cdev_unmap(const VFIOContainerBase *bcontainer,
hwaddr iova, ram_addr_t size,
- IOMMUTLBEntry *iotlb)
+ IOMMUTLBEntry *iotlb, bool unmap_all)
{
const VFIOIOMMUFDContainer *container =
container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer);
+ /* unmap in halves */
+ if (unmap_all) {
+ Int128 llsize = int128_rshift(int128_2_64(), 1);
+ int ret;
+
+ ret = iommufd_backend_unmap_dma(container->be, container->ioas_id,
+ 0, int128_get64(llsize));
+
+ if (ret == 0) {
+ ret = iommufd_backend_unmap_dma(container->be, container->ioas_id,
+ int128_get64(llsize),
+ int128_get64(llsize));
+ }
+
+ return ret;
+ }
+
/* TODO: Handle dma_unmap_bitmap with iotlb args (migration) */
return iommufd_backend_unmap_dma(container->be,
container->ioas_id, iova, size);
@@ -110,6 +135,68 @@ static void iommufd_cdev_unbind_and_disconnect(VFIODevice *vbasedev)
iommufd_backend_disconnect(vbasedev->iommufd);
}
+static bool iommufd_hwpt_dirty_tracking(VFIOIOASHwpt *hwpt)
+{
+ return hwpt && hwpt->hwpt_flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
+}
+
+static int iommufd_set_dirty_page_tracking(const VFIOContainerBase *bcontainer,
+ bool start, Error **errp)
+{
+ const VFIOIOMMUFDContainer *container =
+ container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer);
+ VFIOIOASHwpt *hwpt;
+
+ QLIST_FOREACH(hwpt, &container->hwpt_list, next) {
+ if (!iommufd_hwpt_dirty_tracking(hwpt)) {
+ continue;
+ }
+
+ if (!iommufd_backend_set_dirty_tracking(container->be,
+ hwpt->hwpt_id, start, errp)) {
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ QLIST_FOREACH(hwpt, &container->hwpt_list, next) {
+ if (!iommufd_hwpt_dirty_tracking(hwpt)) {
+ continue;
+ }
+ iommufd_backend_set_dirty_tracking(container->be,
+ hwpt->hwpt_id, !start, NULL);
+ }
+ return -EINVAL;
+}
+
+static int iommufd_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap, hwaddr iova,
+ hwaddr size, Error **errp)
+{
+ VFIOIOMMUFDContainer *container = container_of(bcontainer,
+ VFIOIOMMUFDContainer,
+ bcontainer);
+ unsigned long page_size = qemu_real_host_page_size();
+ VFIOIOASHwpt *hwpt;
+
+ QLIST_FOREACH(hwpt, &container->hwpt_list, next) {
+ if (!iommufd_hwpt_dirty_tracking(hwpt)) {
+ continue;
+ }
+
+ if (!iommufd_backend_get_dirty_bitmap(container->be, hwpt->hwpt_id,
+ iova, size, page_size,
+ (uint64_t *)vbmap->bitmap,
+ errp)) {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int iommufd_cdev_getfd(const char *sysfs_path, Error **errp)
{
ERRP_GUARD();
@@ -172,7 +259,7 @@ out:
return ret;
}
-static bool iommufd_cdev_attach_ioas_hwpt(VFIODevice *vbasedev, uint32_t id,
+static int iommufd_cdev_attach_ioas_hwpt(VFIODevice *vbasedev, uint32_t id,
Error **errp)
{
int iommufd = vbasedev->iommufd->fd;
@@ -187,12 +274,12 @@ static bool iommufd_cdev_attach_ioas_hwpt(VFIODevice *vbasedev, uint32_t id,
error_setg_errno(errp, errno,
"[iommufd=%d] error attach %s (%d) to id=%d",
iommufd, vbasedev->name, vbasedev->fd, id);
- return false;
+ return -errno;
}
trace_iommufd_cdev_attach_ioas_hwpt(iommufd, vbasedev->name,
vbasedev->fd, id);
- return true;
+ return 0;
}
static bool iommufd_cdev_detach_ioas_hwpt(VFIODevice *vbasedev, Error **errp)
@@ -212,11 +299,117 @@ static bool iommufd_cdev_detach_ioas_hwpt(VFIODevice *vbasedev, Error **errp)
return true;
}
+static bool iommufd_cdev_autodomains_get(VFIODevice *vbasedev,
+ VFIOIOMMUFDContainer *container,
+ Error **errp)
+{
+ ERRP_GUARD();
+ IOMMUFDBackend *iommufd = vbasedev->iommufd;
+ uint32_t type, flags = 0;
+ uint64_t hw_caps;
+ VFIOIOASHwpt *hwpt;
+ uint32_t hwpt_id;
+ int ret;
+
+ /* Try to find a domain */
+ QLIST_FOREACH(hwpt, &container->hwpt_list, next) {
+ ret = iommufd_cdev_attach_ioas_hwpt(vbasedev, hwpt->hwpt_id, errp);
+ if (ret) {
+ /* -EINVAL means the domain is incompatible with the device. */
+ if (ret == -EINVAL) {
+ /*
+ * It is an expected failure and it just means we will try
+ * another domain, or create one if no existing compatible
+ * domain is found. Hence why the error is discarded below.
+ */
+ error_free(*errp);
+ *errp = NULL;
+ continue;
+ }
+
+ return false;
+ } else {
+ vbasedev->hwpt = hwpt;
+ QLIST_INSERT_HEAD(&hwpt->device_list, vbasedev, hwpt_next);
+ vbasedev->iommu_dirty_tracking = iommufd_hwpt_dirty_tracking(hwpt);
+ return true;
+ }
+ }
+
+ /*
+ * This is quite early and VFIO Migration state isn't yet fully
+ * initialized, thus rely only on IOMMU hardware capabilities as to
+ * whether IOMMU dirty tracking is going to be requested. Later
+ * vfio_migration_realize() may decide to use VF dirty tracking
+ * instead.
+ */
+ if (!iommufd_backend_get_device_info(vbasedev->iommufd, vbasedev->devid,
+ &type, NULL, 0, &hw_caps, errp)) {
+ return false;
+ }
+
+ if (hw_caps & IOMMU_HW_CAP_DIRTY_TRACKING) {
+ flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
+ }
+
+ if (!iommufd_backend_alloc_hwpt(iommufd, vbasedev->devid,
+ container->ioas_id, flags,
+ IOMMU_HWPT_DATA_NONE, 0, NULL,
+ &hwpt_id, errp)) {
+ return false;
+ }
+
+ hwpt = g_malloc0(sizeof(*hwpt));
+ hwpt->hwpt_id = hwpt_id;
+ hwpt->hwpt_flags = flags;
+ QLIST_INIT(&hwpt->device_list);
+
+ ret = iommufd_cdev_attach_ioas_hwpt(vbasedev, hwpt->hwpt_id, errp);
+ if (ret) {
+ iommufd_backend_free_id(container->be, hwpt->hwpt_id);
+ g_free(hwpt);
+ return false;
+ }
+
+ vbasedev->hwpt = hwpt;
+ vbasedev->iommu_dirty_tracking = iommufd_hwpt_dirty_tracking(hwpt);
+ QLIST_INSERT_HEAD(&hwpt->device_list, vbasedev, hwpt_next);
+ QLIST_INSERT_HEAD(&container->hwpt_list, hwpt, next);
+ container->bcontainer.dirty_pages_supported |=
+ vbasedev->iommu_dirty_tracking;
+ if (container->bcontainer.dirty_pages_supported &&
+ !vbasedev->iommu_dirty_tracking) {
+ warn_report("IOMMU instance for device %s doesn't support dirty tracking",
+ vbasedev->name);
+ }
+ return true;
+}
+
+static void iommufd_cdev_autodomains_put(VFIODevice *vbasedev,
+ VFIOIOMMUFDContainer *container)
+{
+ VFIOIOASHwpt *hwpt = vbasedev->hwpt;
+
+ QLIST_REMOVE(vbasedev, hwpt_next);
+ vbasedev->hwpt = NULL;
+
+ if (QLIST_EMPTY(&hwpt->device_list)) {
+ QLIST_REMOVE(hwpt, next);
+ iommufd_backend_free_id(container->be, hwpt->hwpt_id);
+ g_free(hwpt);
+ }
+}
+
static bool iommufd_cdev_attach_container(VFIODevice *vbasedev,
VFIOIOMMUFDContainer *container,
Error **errp)
{
- return iommufd_cdev_attach_ioas_hwpt(vbasedev, container->ioas_id, errp);
+ /* mdevs aren't physical devices and will fail with auto domains */
+ if (!vbasedev->mdev) {
+ return iommufd_cdev_autodomains_get(vbasedev, container, errp);
+ }
+
+ return !iommufd_cdev_attach_ioas_hwpt(vbasedev, container->ioas_id, errp);
}
static void iommufd_cdev_detach_container(VFIODevice *vbasedev,
@@ -227,6 +420,11 @@ static void iommufd_cdev_detach_container(VFIODevice *vbasedev,
if (!iommufd_cdev_detach_ioas_hwpt(vbasedev, &err)) {
error_report_err(err);
}
+
+ if (vbasedev->hwpt) {
+ iommufd_cdev_autodomains_put(vbasedev, container);
+ }
+
}
static void iommufd_cdev_container_destroy(VFIOIOMMUFDContainer *container)
@@ -236,7 +434,8 @@ static void iommufd_cdev_container_destroy(VFIOIOMMUFDContainer *container)
if (!QLIST_EMPTY(&bcontainer->device_list)) {
return;
}
- memory_listener_unregister(&bcontainer->listener);
+ vfio_cpr_unregister_container(bcontainer);
+ vfio_listener_unregister(bcontainer);
iommufd_backend_free_id(container->be, container->ioas_id);
object_unref(container);
}
@@ -318,7 +517,7 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
goto err_connect_bind;
}
- space = vfio_get_address_space(as);
+ space = vfio_address_space_get(as);
/* try to attach to an existing container in this space */
QLIST_FOREACH(bcontainer, &space->containers, next) {
@@ -336,8 +535,8 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
} else {
ret = iommufd_cdev_ram_block_discard_disable(true);
if (ret) {
- error_setg(errp,
- "Cannot set discarding of RAM broken (%d)", ret);
+ error_setg_errno(errp, -ret,
+ "Cannot set discarding of RAM broken");
goto err_discard_disable;
}
goto found_container;
@@ -354,6 +553,7 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
container = VFIO_IOMMU_IOMMUFD(object_new(TYPE_VFIO_IOMMU_IOMMUFD));
container->be = vbasedev->iommufd;
container->ioas_id = ioas_id;
+ QLIST_INIT(&container->hwpt_list);
bcontainer = &container->bcontainer;
vfio_address_space_insert(space, bcontainer);
@@ -364,6 +564,7 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
ret = iommufd_cdev_ram_block_discard_disable(true);
if (ret) {
+ error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken");
goto err_discard_disable;
}
@@ -375,12 +576,11 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
bcontainer->pgsizes = qemu_real_host_page_size();
}
- bcontainer->listener = vfio_memory_listener;
- memory_listener_register(&bcontainer->listener, bcontainer->space->as);
+ if (!vfio_listener_register(bcontainer, errp)) {
+ goto err_listener_register;
+ }
- if (bcontainer->error) {
- error_propagate_prepend(errp, bcontainer->error,
- "memory listener initialization failed: ");
+ if (!vfio_cpr_register_container(bcontainer, errp)) {
goto err_listener_register;
}
@@ -393,7 +593,12 @@ found_container:
goto err_listener_register;
}
- if (!vfio_cpr_register_container(bcontainer, errp)) {
+ /*
+ * Do not move this code before attachment! The nested IOMMU support
+ * needs device and hwpt id which are generated only after attachment.
+ */
+ if (!vfio_device_hiod_create_and_realize(vbasedev,
+ TYPE_HOST_IOMMU_DEVICE_IOMMUFD_VFIO, errp)) {
goto err_listener_register;
}
@@ -405,14 +610,7 @@ found_container:
iommufd_cdev_ram_block_discard_disable(false);
}
- vbasedev->group = 0;
- vbasedev->num_irqs = dev_info.num_irqs;
- vbasedev->num_regions = dev_info.num_regions;
- vbasedev->flags = dev_info.flags;
- vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
- vbasedev->bcontainer = bcontainer;
- QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next);
- QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next);
+ vfio_device_prepare(vbasedev, bcontainer, &dev_info);
trace_iommufd_cdev_device_info(vbasedev->name, devfd, vbasedev->num_irqs,
vbasedev->num_regions, vbasedev->flags);
@@ -425,7 +623,7 @@ err_discard_disable:
err_attach_container:
iommufd_cdev_container_destroy(container);
err_alloc_ioas:
- vfio_put_address_space(space);
+ vfio_address_space_put(space);
iommufd_cdev_unbind_and_disconnect(vbasedev);
err_connect_bind:
close(vbasedev->fd);
@@ -439,18 +637,16 @@ static void iommufd_cdev_detach(VFIODevice *vbasedev)
VFIOIOMMUFDContainer *container = container_of(bcontainer,
VFIOIOMMUFDContainer,
bcontainer);
- QLIST_REMOVE(vbasedev, global_next);
- QLIST_REMOVE(vbasedev, container_next);
- vbasedev->bcontainer = NULL;
+ vfio_device_unprepare(vbasedev);
if (!vbasedev->ram_block_discard_allowed) {
iommufd_cdev_ram_block_discard_disable(false);
}
- vfio_cpr_unregister_container(bcontainer);
+ object_unref(vbasedev->hiod);
iommufd_cdev_detach_container(vbasedev, container);
iommufd_cdev_container_destroy(container);
- vfio_put_address_space(space);
+ vfio_address_space_put(space);
iommufd_cdev_unbind_and_disconnect(vbasedev);
close(vbasedev->fd);
@@ -606,39 +802,63 @@ out_single:
return ret;
}
-static void vfio_iommu_iommufd_class_init(ObjectClass *klass, void *data)
+static void vfio_iommu_iommufd_class_init(ObjectClass *klass, const void *data)
{
VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass);
- vioc->hiod_typename = TYPE_HOST_IOMMU_DEVICE_IOMMUFD_VFIO;
-
vioc->dma_map = iommufd_cdev_map;
vioc->dma_unmap = iommufd_cdev_unmap;
vioc->attach_device = iommufd_cdev_attach;
vioc->detach_device = iommufd_cdev_detach;
vioc->pci_hot_reset = iommufd_cdev_pci_hot_reset;
+ vioc->set_dirty_page_tracking = iommufd_set_dirty_page_tracking;
+ vioc->query_dirty_bitmap = iommufd_query_dirty_bitmap;
};
+static bool
+host_iommu_device_iommufd_vfio_attach_hwpt(HostIOMMUDeviceIOMMUFD *idev,
+ uint32_t hwpt_id, Error **errp)
+{
+ VFIODevice *vbasedev = HOST_IOMMU_DEVICE(idev)->agent;
+
+ return !iommufd_cdev_attach_ioas_hwpt(vbasedev, hwpt_id, errp);
+}
+
+static bool
+host_iommu_device_iommufd_vfio_detach_hwpt(HostIOMMUDeviceIOMMUFD *idev,
+ Error **errp)
+{
+ VFIODevice *vbasedev = HOST_IOMMU_DEVICE(idev)->agent;
+
+ return iommufd_cdev_detach_ioas_hwpt(vbasedev, errp);
+}
+
static bool hiod_iommufd_vfio_realize(HostIOMMUDevice *hiod, void *opaque,
Error **errp)
{
VFIODevice *vdev = opaque;
+ HostIOMMUDeviceIOMMUFD *idev;
HostIOMMUDeviceCaps *caps = &hiod->caps;
+ VendorCaps *vendor_caps = &caps->vendor_caps;
enum iommu_hw_info_type type;
- union {
- struct iommu_hw_info_vtd vtd;
- } data;
+ uint64_t hw_caps;
hiod->agent = opaque;
- if (!iommufd_backend_get_device_info(vdev->iommufd, vdev->devid,
- &type, &data, sizeof(data), errp)) {
+ if (!iommufd_backend_get_device_info(vdev->iommufd, vdev->devid, &type,
+ vendor_caps, sizeof(*vendor_caps),
+ &hw_caps, errp)) {
return false;
}
hiod->name = g_strdup(vdev->name);
caps->type = type;
- caps->aw_bits = vfio_device_get_aw_bits(vdev);
+ caps->hw_caps = hw_caps;
+
+ idev = HOST_IOMMU_DEVICE_IOMMUFD(hiod);
+ idev->iommufd = vdev->iommufd;
+ idev->devid = vdev->devid;
+ idev->hwpt_id = vdev->hwpt->hwpt_id;
return true;
}
@@ -662,13 +882,17 @@ hiod_iommufd_vfio_get_page_size_mask(HostIOMMUDevice *hiod)
}
-static void hiod_iommufd_vfio_class_init(ObjectClass *oc, void *data)
+static void hiod_iommufd_vfio_class_init(ObjectClass *oc, const void *data)
{
HostIOMMUDeviceClass *hiodc = HOST_IOMMU_DEVICE_CLASS(oc);
+ HostIOMMUDeviceIOMMUFDClass *idevc = HOST_IOMMU_DEVICE_IOMMUFD_CLASS(oc);
hiodc->realize = hiod_iommufd_vfio_realize;
hiodc->get_iova_ranges = hiod_iommufd_vfio_get_iova_ranges;
hiodc->get_page_size_mask = hiod_iommufd_vfio_get_page_size_mask;
+
+ idevc->attach_hwpt = host_iommu_device_iommufd_vfio_attach_hwpt;
+ idevc->detach_hwpt = host_iommu_device_iommufd_vfio_detach_hwpt;
};
static const TypeInfo types[] = {
diff --git a/hw/vfio/listener.c b/hw/vfio/listener.c
new file mode 100644
index 0000000..f498e23
--- /dev/null
+++ b/hw/vfio/listener.c
@@ -0,0 +1,1253 @@
+/*
+ * generic functions used by VFIO devices
+ *
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Authors:
+ * Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Based on qemu-kvm device-assignment:
+ * Adapted for KVM by Qumranet.
+ * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
+ * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
+ * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
+ * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
+ * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+#ifdef CONFIG_KVM
+#include <linux/kvm.h>
+#endif
+#include <linux/vfio.h>
+
+#include "hw/vfio/vfio-device.h"
+#include "hw/vfio/pci.h"
+#include "system/address-spaces.h"
+#include "system/memory.h"
+#include "system/ram_addr.h"
+#include "hw/hw.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "qemu/range.h"
+#include "system/kvm.h"
+#include "system/reset.h"
+#include "system/runstate.h"
+#include "trace.h"
+#include "qapi/error.h"
+#include "migration/misc.h"
+#include "migration/qemu-file.h"
+#include "system/tcg.h"
+#include "system/tpm.h"
+#include "vfio-migration-internal.h"
+#include "vfio-helpers.h"
+#include "vfio-listener.h"
+
+/*
+ * Device state interfaces
+ */
+
+
+static bool vfio_log_sync_needed(const VFIOContainerBase *bcontainer)
+{
+ VFIODevice *vbasedev;
+
+ if (!vfio_container_dirty_tracking_is_started(bcontainer)) {
+ return false;
+ }
+
+ QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
+ VFIOMigration *migration = vbasedev->migration;
+
+ if (!migration) {
+ return false;
+ }
+
+ if (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF &&
+ (vfio_device_state_is_running(vbasedev) ||
+ vfio_device_state_is_precopy(vbasedev))) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool vfio_listener_skipped_section(MemoryRegionSection *section)
+{
+ return (!memory_region_is_ram(section->mr) &&
+ !memory_region_is_iommu(section->mr)) ||
+ memory_region_is_protected(section->mr) ||
+ /*
+ * Sizing an enabled 64-bit BAR can cause spurious mappings to
+ * addresses in the upper part of the 64-bit address space. These
+ * are never accessed by the CPU and beyond the address width of
+ * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
+ */
+ section->offset_within_address_space & (1ULL << 63);
+}
+
+/*
+ * Called with rcu_read_lock held.
+ * The returned MemoryRegion must not be accessed after calling rcu_read_unlock.
+ */
+static MemoryRegion *vfio_translate_iotlb(IOMMUTLBEntry *iotlb, hwaddr *xlat_p,
+ Error **errp)
+{
+ MemoryRegion *mr;
+
+ mr = memory_translate_iotlb(iotlb, xlat_p, errp);
+ if (mr && memory_region_has_ram_discard_manager(mr)) {
+ /*
+ * Malicious VMs might trigger discarding of IOMMU-mapped memory. The
+ * pages will remain pinned inside vfio until unmapped, resulting in a
+ * higher memory consumption than expected. If memory would get
+ * populated again later, there would be an inconsistency between pages
+ * pinned by vfio and pages seen by QEMU. This is the case until
+ * unmapped from the IOMMU (e.g., during device reset).
+ *
+ * With malicious guests, we really only care about pinning more memory
+ * than expected. RLIMIT_MEMLOCK set for the user/process can never be
+ * exceeded and can be used to mitigate this problem.
+ */
+ warn_report_once("Using vfio with vIOMMUs and coordinated discarding of"
+ " RAM (e.g., virtio-mem) works, however, malicious"
+ " guests can trigger pinning of more memory than"
+ " intended via an IOMMU. It's possible to mitigate "
+ " by setting/adjusting RLIMIT_MEMLOCK.");
+ }
+ return mr;
+}
+
+static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
+{
+ VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
+ VFIOContainerBase *bcontainer = giommu->bcontainer;
+ hwaddr iova = iotlb->iova + giommu->iommu_offset;
+ MemoryRegion *mr;
+ hwaddr xlat;
+ void *vaddr;
+ int ret;
+ Error *local_err = NULL;
+
+ trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
+ iova, iova + iotlb->addr_mask);
+
+ if (iotlb->target_as != &address_space_memory) {
+ error_setg(&local_err,
+ "Wrong target AS \"%s\", only system memory is allowed",
+ iotlb->target_as->name ? iotlb->target_as->name : "none");
+ if (migration_is_running()) {
+ migration_file_set_error(-EINVAL, local_err);
+ } else {
+ error_report_err(local_err);
+ }
+ return;
+ }
+
+ rcu_read_lock();
+
+ if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
+ bool read_only;
+
+ mr = vfio_translate_iotlb(iotlb, &xlat, &local_err);
+ if (!mr) {
+ error_report_err(local_err);
+ goto out;
+ }
+ vaddr = memory_region_get_ram_ptr(mr) + xlat;
+ read_only = !(iotlb->perm & IOMMU_WO) || mr->readonly;
+
+ /*
+ * vaddr is only valid until rcu_read_unlock(). But after
+ * vfio_dma_map has set up the mapping the pages will be
+ * pinned by the kernel. This makes sure that the RAM backend
+ * of vaddr will always be there, even if the memory object is
+ * destroyed and its backing memory munmap-ed.
+ */
+ ret = vfio_container_dma_map(bcontainer, iova,
+ iotlb->addr_mask + 1, vaddr,
+ read_only, mr);
+ if (ret) {
+ error_report("vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx", %p) = %d (%s)",
+ bcontainer, iova,
+ iotlb->addr_mask + 1, vaddr, ret, strerror(-ret));
+ }
+ } else {
+ ret = vfio_container_dma_unmap(bcontainer, iova,
+ iotlb->addr_mask + 1, iotlb, false);
+ if (ret) {
+ error_setg(&local_err,
+ "vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx") = %d (%s)",
+ bcontainer, iova,
+ iotlb->addr_mask + 1, ret, strerror(-ret));
+ if (migration_is_running()) {
+ migration_file_set_error(ret, local_err);
+ } else {
+ error_report_err(local_err);
+ }
+ }
+ }
+out:
+ rcu_read_unlock();
+}
+
+static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
+ MemoryRegionSection *section)
+{
+ VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
+ listener);
+ VFIOContainerBase *bcontainer = vrdl->bcontainer;
+ const hwaddr size = int128_get64(section->size);
+ const hwaddr iova = section->offset_within_address_space;
+ int ret;
+
+ /* Unmap with a single call. */
+ ret = vfio_container_dma_unmap(bcontainer, iova, size , NULL, false);
+ if (ret) {
+ error_report("%s: vfio_container_dma_unmap() failed: %s", __func__,
+ strerror(-ret));
+ }
+}
+
+static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
+ MemoryRegionSection *section)
+{
+ VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
+ listener);
+ VFIOContainerBase *bcontainer = vrdl->bcontainer;
+ const hwaddr end = section->offset_within_region +
+ int128_get64(section->size);
+ hwaddr start, next, iova;
+ void *vaddr;
+ int ret;
+
+ /*
+ * Map in (aligned within memory region) minimum granularity, so we can
+ * unmap in minimum granularity later.
+ */
+ for (start = section->offset_within_region; start < end; start = next) {
+ next = ROUND_UP(start + 1, vrdl->granularity);
+ next = MIN(next, end);
+
+ iova = start - section->offset_within_region +
+ section->offset_within_address_space;
+ vaddr = memory_region_get_ram_ptr(section->mr) + start;
+
+ ret = vfio_container_dma_map(bcontainer, iova, next - start,
+ vaddr, section->readonly, section->mr);
+ if (ret) {
+ /* Rollback */
+ vfio_ram_discard_notify_discard(rdl, section);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void vfio_ram_discard_register_listener(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section)
+{
+ RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
+ int target_page_size = qemu_target_page_size();
+ VFIORamDiscardListener *vrdl;
+
+ /* Ignore some corner cases not relevant in practice. */
+ g_assert(QEMU_IS_ALIGNED(section->offset_within_region, target_page_size));
+ g_assert(QEMU_IS_ALIGNED(section->offset_within_address_space,
+ target_page_size));
+ g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), target_page_size));
+
+ vrdl = g_new0(VFIORamDiscardListener, 1);
+ vrdl->bcontainer = bcontainer;
+ vrdl->mr = section->mr;
+ vrdl->offset_within_address_space = section->offset_within_address_space;
+ vrdl->size = int128_get64(section->size);
+ vrdl->granularity = ram_discard_manager_get_min_granularity(rdm,
+ section->mr);
+
+ g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity));
+ g_assert(bcontainer->pgsizes &&
+ vrdl->granularity >= 1ULL << ctz64(bcontainer->pgsizes));
+
+ ram_discard_listener_init(&vrdl->listener,
+ vfio_ram_discard_notify_populate,
+ vfio_ram_discard_notify_discard, true);
+ ram_discard_manager_register_listener(rdm, &vrdl->listener, section);
+ QLIST_INSERT_HEAD(&bcontainer->vrdl_list, vrdl, next);
+
+ /*
+ * Sanity-check if we have a theoretically problematic setup where we could
+ * exceed the maximum number of possible DMA mappings over time. We assume
+ * that each mapped section in the same address space as a RamDiscardManager
+ * section consumes exactly one DMA mapping, with the exception of
+ * RamDiscardManager sections; i.e., we don't expect to have gIOMMU sections
+ * in the same address space as RamDiscardManager sections.
+ *
+ * We assume that each section in the address space consumes one memslot.
+ * We take the number of KVM memory slots as a best guess for the maximum
+ * number of sections in the address space we could have over time,
+ * also consuming DMA mappings.
+ */
+ if (bcontainer->dma_max_mappings) {
+ unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512;
+
+#ifdef CONFIG_KVM
+ if (kvm_enabled()) {
+ max_memslots = kvm_get_max_memslots();
+ }
+#endif
+
+ QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
+ hwaddr start, end;
+
+ start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space,
+ vrdl->granularity);
+ end = ROUND_UP(vrdl->offset_within_address_space + vrdl->size,
+ vrdl->granularity);
+ vrdl_mappings += (end - start) / vrdl->granularity;
+ vrdl_count++;
+ }
+
+ if (vrdl_mappings + max_memslots - vrdl_count >
+ bcontainer->dma_max_mappings) {
+ warn_report("%s: possibly running out of DMA mappings. E.g., try"
+ " increasing the 'block-size' of virtio-mem devies."
+ " Maximum possible DMA mappings: %d, Maximum possible"
+ " memslots: %d", __func__, bcontainer->dma_max_mappings,
+ max_memslots);
+ }
+ }
+}
+
+static void vfio_ram_discard_unregister_listener(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section)
+{
+ RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
+ VFIORamDiscardListener *vrdl = NULL;
+
+ QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
+ if (vrdl->mr == section->mr &&
+ vrdl->offset_within_address_space ==
+ section->offset_within_address_space) {
+ break;
+ }
+ }
+
+ if (!vrdl) {
+ hw_error("vfio: Trying to unregister missing RAM discard listener");
+ }
+
+ ram_discard_manager_unregister_listener(rdm, &vrdl->listener);
+ QLIST_REMOVE(vrdl, next);
+ g_free(vrdl);
+}
+
+static bool vfio_known_safe_misalignment(MemoryRegionSection *section)
+{
+ MemoryRegion *mr = section->mr;
+
+ if (!TPM_IS_CRB(mr->owner)) {
+ return false;
+ }
+
+ /* this is a known safe misaligned region, just trace for debug purpose */
+ trace_vfio_known_safe_misalignment(memory_region_name(mr),
+ section->offset_within_address_space,
+ section->offset_within_region,
+ qemu_real_host_page_size());
+ return true;
+}
+
+static bool vfio_listener_valid_section(MemoryRegionSection *section,
+ const char *name)
+{
+ if (vfio_listener_skipped_section(section)) {
+ trace_vfio_listener_region_skip(name,
+ section->offset_within_address_space,
+ section->offset_within_address_space +
+ int128_get64(int128_sub(section->size, int128_one())));
+ return false;
+ }
+
+ if (unlikely((section->offset_within_address_space &
+ ~qemu_real_host_page_mask()) !=
+ (section->offset_within_region & ~qemu_real_host_page_mask()))) {
+ if (!vfio_known_safe_misalignment(section)) {
+ error_report("%s received unaligned region %s iova=0x%"PRIx64
+ " offset_within_region=0x%"PRIx64
+ " qemu_real_host_page_size=0x%"PRIxPTR,
+ __func__, memory_region_name(section->mr),
+ section->offset_within_address_space,
+ section->offset_within_region,
+ qemu_real_host_page_size());
+ }
+ return false;
+ }
+
+ return true;
+}
+
+static bool vfio_get_section_iova_range(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section,
+ hwaddr *out_iova, hwaddr *out_end,
+ Int128 *out_llend)
+{
+ Int128 llend;
+ hwaddr iova;
+
+ iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
+ llend = int128_make64(section->offset_within_address_space);
+ llend = int128_add(llend, section->size);
+ llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask()));
+
+ if (int128_ge(int128_make64(iova), llend)) {
+ return false;
+ }
+
+ *out_iova = iova;
+ *out_end = int128_get64(int128_sub(llend, int128_one()));
+ if (out_llend) {
+ *out_llend = llend;
+ }
+ return true;
+}
+
+static void vfio_listener_begin(MemoryListener *listener)
+{
+ VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
+ listener);
+ void (*listener_begin)(VFIOContainerBase *bcontainer);
+
+ listener_begin = VFIO_IOMMU_GET_CLASS(bcontainer)->listener_begin;
+
+ if (listener_begin) {
+ listener_begin(bcontainer);
+ }
+}
+
+static void vfio_listener_commit(MemoryListener *listener)
+{
+ VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
+ listener);
+ void (*listener_commit)(VFIOContainerBase *bcontainer);
+
+ listener_commit = VFIO_IOMMU_GET_CLASS(bcontainer)->listener_commit;
+
+ if (listener_commit) {
+ listener_commit(bcontainer);
+ }
+}
+
+static void vfio_device_error_append(VFIODevice *vbasedev, Error **errp)
+{
+ /*
+ * MMIO region mapping failures are not fatal but in this case PCI
+ * peer-to-peer transactions are broken.
+ */
+ if (vbasedev && vbasedev->type == VFIO_DEVICE_TYPE_PCI) {
+ error_append_hint(errp, "%s: PCI peer-to-peer transactions "
+ "on BARs are not supported.\n", vbasedev->name);
+ }
+}
+
+VFIORamDiscardListener *vfio_find_ram_discard_listener(
+ VFIOContainerBase *bcontainer, MemoryRegionSection *section)
+{
+ VFIORamDiscardListener *vrdl = NULL;
+
+ QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
+ if (vrdl->mr == section->mr &&
+ vrdl->offset_within_address_space ==
+ section->offset_within_address_space) {
+ break;
+ }
+ }
+
+ if (!vrdl) {
+ hw_error("vfio: Trying to sync missing RAM discard listener");
+ /* does not return */
+ }
+ return vrdl;
+}
+
+static void vfio_listener_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
+ listener);
+ vfio_container_region_add(bcontainer, section, false);
+}
+
+void vfio_container_region_add(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section,
+ bool cpr_remap)
+{
+ hwaddr iova, end;
+ Int128 llend, llsize;
+ void *vaddr;
+ int ret;
+ Error *err = NULL;
+
+ if (!vfio_listener_valid_section(section, "region_add")) {
+ return;
+ }
+
+ if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end,
+ &llend)) {
+ if (memory_region_is_ram_device(section->mr)) {
+ trace_vfio_listener_region_add_no_dma_map(
+ memory_region_name(section->mr),
+ section->offset_within_address_space,
+ int128_getlo(section->size),
+ qemu_real_host_page_size());
+ }
+ return;
+ }
+
+ /* PPC64/pseries machine only */
+ if (!vfio_container_add_section_window(bcontainer, section, &err)) {
+ goto mmio_dma_error;
+ }
+
+ memory_region_ref(section->mr);
+
+ if (memory_region_is_iommu(section->mr)) {
+ VFIOGuestIOMMU *giommu;
+ IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
+ int iommu_idx;
+
+ trace_vfio_listener_region_add_iommu(section->mr->name, iova, end);
+
+ if (cpr_remap) {
+ vfio_cpr_giommu_remap(bcontainer, section);
+ }
+
+ /*
+ * FIXME: For VFIO iommu types which have KVM acceleration to
+ * avoid bouncing all map/unmaps through qemu this way, this
+ * would be the right place to wire that up (tell the KVM
+ * device emulation the VFIO iommu handles to use).
+ */
+ giommu = g_malloc0(sizeof(*giommu));
+ giommu->iommu_mr = iommu_mr;
+ giommu->iommu_offset = section->offset_within_address_space -
+ section->offset_within_region;
+ giommu->bcontainer = bcontainer;
+ llend = int128_add(int128_make64(section->offset_within_region),
+ section->size);
+ llend = int128_sub(llend, int128_one());
+ iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
+ MEMTXATTRS_UNSPECIFIED);
+ iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
+ IOMMU_NOTIFIER_IOTLB_EVENTS,
+ section->offset_within_region,
+ int128_get64(llend),
+ iommu_idx);
+
+ ret = memory_region_register_iommu_notifier(section->mr, &giommu->n,
+ &err);
+ if (ret) {
+ g_free(giommu);
+ goto fail;
+ }
+ QLIST_INSERT_HEAD(&bcontainer->giommu_list, giommu, giommu_next);
+ memory_region_iommu_replay(giommu->iommu_mr, &giommu->n);
+
+ return;
+ }
+
+ /* Here we assume that memory_region_is_ram(section->mr)==true */
+
+ /*
+ * For RAM memory regions with a RamDiscardManager, we only want to map the
+ * actually populated parts - and update the mapping whenever we're notified
+ * about changes.
+ */
+ if (memory_region_has_ram_discard_manager(section->mr)) {
+ if (!cpr_remap) {
+ vfio_ram_discard_register_listener(bcontainer, section);
+ } else if (!vfio_cpr_ram_discard_register_listener(bcontainer,
+ section)) {
+ goto fail;
+ }
+ return;
+ }
+
+ vaddr = memory_region_get_ram_ptr(section->mr) +
+ section->offset_within_region +
+ (iova - section->offset_within_address_space);
+
+ trace_vfio_listener_region_add_ram(iova, end, vaddr);
+
+ llsize = int128_sub(llend, int128_make64(iova));
+
+ if (memory_region_is_ram_device(section->mr)) {
+ hwaddr pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1;
+
+ if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
+ trace_vfio_listener_region_add_no_dma_map(
+ memory_region_name(section->mr),
+ section->offset_within_address_space,
+ int128_getlo(section->size),
+ pgmask + 1);
+ return;
+ }
+ }
+
+ ret = vfio_container_dma_map(bcontainer, iova, int128_get64(llsize),
+ vaddr, section->readonly, section->mr);
+ if (ret) {
+ error_setg(&err, "vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx", %p) = %d (%s)",
+ bcontainer, iova, int128_get64(llsize), vaddr, ret,
+ strerror(-ret));
+ mmio_dma_error:
+ if (memory_region_is_ram_device(section->mr)) {
+ /* Allow unexpected mappings not to be fatal for RAM devices */
+ VFIODevice *vbasedev =
+ vfio_get_vfio_device(memory_region_owner(section->mr));
+ vfio_device_error_append(vbasedev, &err);
+ warn_report_err_once(err);
+ return;
+ }
+ goto fail;
+ }
+
+ return;
+
+fail:
+ if (!bcontainer->initialized) {
+ /*
+ * At machine init time or when the device is attached to the
+ * VM, store the first error in the container so we can
+ * gracefully fail the device realize routine.
+ */
+ if (!bcontainer->error) {
+ error_propagate_prepend(&bcontainer->error, err,
+ "Region %s: ",
+ memory_region_name(section->mr));
+ } else {
+ error_free(err);
+ }
+ } else {
+ /*
+ * At runtime, there's not much we can do other than throw a
+ * hardware error.
+ */
+ error_report_err(err);
+ hw_error("vfio: DMA mapping failed, unable to continue");
+ }
+}
+
+static void vfio_listener_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
+ listener);
+ hwaddr iova, end;
+ Int128 llend, llsize;
+ int ret;
+ bool try_unmap = true;
+
+ if (!vfio_listener_valid_section(section, "region_del")) {
+ return;
+ }
+
+ if (memory_region_is_iommu(section->mr)) {
+ VFIOGuestIOMMU *giommu;
+
+ trace_vfio_listener_region_del_iommu(section->mr->name);
+ QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) {
+ if (MEMORY_REGION(giommu->iommu_mr) == section->mr &&
+ giommu->n.start == section->offset_within_region) {
+ memory_region_unregister_iommu_notifier(section->mr,
+ &giommu->n);
+ QLIST_REMOVE(giommu, giommu_next);
+ g_free(giommu);
+ break;
+ }
+ }
+
+ /*
+ * FIXME: We assume the one big unmap below is adequate to
+ * remove any individual page mappings in the IOMMU which
+ * might have been copied into VFIO. This works for a page table
+ * based IOMMU where a big unmap flattens a large range of IO-PTEs.
+ * That may not be true for all IOMMU types.
+ */
+ }
+
+ if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end,
+ &llend)) {
+ return;
+ }
+
+ llsize = int128_sub(llend, int128_make64(iova));
+
+ trace_vfio_listener_region_del(iova, end);
+
+ if (memory_region_is_ram_device(section->mr)) {
+ hwaddr pgmask;
+
+ pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1;
+ try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
+ } else if (memory_region_has_ram_discard_manager(section->mr)) {
+ vfio_ram_discard_unregister_listener(bcontainer, section);
+ /* Unregistering will trigger an unmap. */
+ try_unmap = false;
+ }
+
+ if (try_unmap) {
+ bool unmap_all = false;
+
+ if (int128_eq(llsize, int128_2_64())) {
+ unmap_all = true;
+ llsize = int128_zero();
+ }
+ ret = vfio_container_dma_unmap(bcontainer, iova, int128_get64(llsize),
+ NULL, unmap_all);
+ if (ret) {
+ error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx") = %d (%s)",
+ bcontainer, iova, int128_get64(llsize), ret,
+ strerror(-ret));
+ }
+ }
+
+ memory_region_unref(section->mr);
+
+ /* PPC64/pseries machine only */
+ vfio_container_del_section_window(bcontainer, section);
+}
+
+typedef struct VFIODirtyRanges {
+ hwaddr min32;
+ hwaddr max32;
+ hwaddr min64;
+ hwaddr max64;
+ hwaddr minpci64;
+ hwaddr maxpci64;
+} VFIODirtyRanges;
+
+typedef struct VFIODirtyRangesListener {
+ VFIOContainerBase *bcontainer;
+ VFIODirtyRanges ranges;
+ MemoryListener listener;
+} VFIODirtyRangesListener;
+
+static bool vfio_section_is_vfio_pci(MemoryRegionSection *section,
+ VFIOContainerBase *bcontainer)
+{
+ VFIOPCIDevice *pcidev;
+ VFIODevice *vbasedev;
+ Object *owner;
+
+ owner = memory_region_owner(section->mr);
+
+ QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
+ if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
+ continue;
+ }
+ pcidev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
+ if (OBJECT(pcidev) == owner) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void vfio_dirty_tracking_update_range(VFIODirtyRanges *range,
+ hwaddr iova, hwaddr end,
+ bool update_pci)
+{
+ hwaddr *min, *max;
+
+ /*
+ * The address space passed to the dirty tracker is reduced to three ranges:
+ * one for 32-bit DMA ranges, one for 64-bit DMA ranges and one for the
+ * PCI 64-bit hole.
+ *
+ * The underlying reports of dirty will query a sub-interval of each of
+ * these ranges.
+ *
+ * The purpose of the three range handling is to handle known cases of big
+ * holes in the address space, like the x86 AMD 1T hole, and firmware (like
+ * OVMF) which may relocate the pci-hole64 to the end of the address space.
+ * The latter would otherwise generate large ranges for tracking, stressing
+ * the limits of supported hardware. The pci-hole32 will always be below 4G
+ * (overlapping or not) so it doesn't need special handling and is part of
+ * the 32-bit range.
+ *
+ * The alternative would be an IOVATree but that has a much bigger runtime
+ * overhead and unnecessary complexity.
+ */
+ if (update_pci && iova >= UINT32_MAX) {
+ min = &range->minpci64;
+ max = &range->maxpci64;
+ } else {
+ min = (end <= UINT32_MAX) ? &range->min32 : &range->min64;
+ max = (end <= UINT32_MAX) ? &range->max32 : &range->max64;
+ }
+ if (*min > iova) {
+ *min = iova;
+ }
+ if (*max < end) {
+ *max = end;
+ }
+
+ trace_vfio_device_dirty_tracking_update(iova, end, *min, *max);
+}
+
+static void vfio_dirty_tracking_update(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIODirtyRangesListener *dirty =
+ container_of(listener, VFIODirtyRangesListener, listener);
+ hwaddr iova, end;
+
+ if (!vfio_listener_valid_section(section, "tracking_update") ||
+ !vfio_get_section_iova_range(dirty->bcontainer, section,
+ &iova, &end, NULL)) {
+ return;
+ }
+
+ vfio_dirty_tracking_update_range(&dirty->ranges, iova, end,
+ vfio_section_is_vfio_pci(section, dirty->bcontainer));
+}
+
+static const MemoryListener vfio_dirty_tracking_listener = {
+ .name = "vfio-tracking",
+ .region_add = vfio_dirty_tracking_update,
+};
+
+static void vfio_dirty_tracking_init(VFIOContainerBase *bcontainer,
+ VFIODirtyRanges *ranges)
+{
+ VFIODirtyRangesListener dirty;
+
+ memset(&dirty, 0, sizeof(dirty));
+ dirty.ranges.min32 = UINT32_MAX;
+ dirty.ranges.min64 = UINT64_MAX;
+ dirty.ranges.minpci64 = UINT64_MAX;
+ dirty.listener = vfio_dirty_tracking_listener;
+ dirty.bcontainer = bcontainer;
+
+ memory_listener_register(&dirty.listener,
+ bcontainer->space->as);
+
+ *ranges = dirty.ranges;
+
+ /*
+ * The memory listener is synchronous, and used to calculate the range
+ * to dirty tracking. Unregister it after we are done as we are not
+ * interested in any follow-up updates.
+ */
+ memory_listener_unregister(&dirty.listener);
+}
+
+static void vfio_devices_dma_logging_stop(VFIOContainerBase *bcontainer)
+{
+ uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature),
+ sizeof(uint64_t))] = {};
+ struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
+ VFIODevice *vbasedev;
+
+ feature->argsz = sizeof(buf);
+ feature->flags = VFIO_DEVICE_FEATURE_SET |
+ VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP;
+
+ QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
+ int ret;
+
+ if (!vbasedev->dirty_tracking) {
+ continue;
+ }
+
+ ret = vbasedev->io_ops->device_feature(vbasedev, feature);
+
+ if (ret != 0) {
+ warn_report("%s: Failed to stop DMA logging, err %d (%s)",
+ vbasedev->name, -ret, strerror(-ret));
+ }
+ vbasedev->dirty_tracking = false;
+ }
+}
+
+static struct vfio_device_feature *
+vfio_device_feature_dma_logging_start_create(VFIOContainerBase *bcontainer,
+ VFIODirtyRanges *tracking)
+{
+ struct vfio_device_feature *feature;
+ size_t feature_size;
+ struct vfio_device_feature_dma_logging_control *control;
+ struct vfio_device_feature_dma_logging_range *ranges;
+
+ feature_size = sizeof(struct vfio_device_feature) +
+ sizeof(struct vfio_device_feature_dma_logging_control);
+ feature = g_try_malloc0(feature_size);
+ if (!feature) {
+ errno = ENOMEM;
+ return NULL;
+ }
+ feature->argsz = feature_size;
+ feature->flags = VFIO_DEVICE_FEATURE_SET |
+ VFIO_DEVICE_FEATURE_DMA_LOGGING_START;
+
+ control = (struct vfio_device_feature_dma_logging_control *)feature->data;
+ control->page_size = qemu_real_host_page_size();
+
+ /*
+ * DMA logging uAPI guarantees to support at least a number of ranges that
+ * fits into a single host kernel base page.
+ */
+ control->num_ranges = !!tracking->max32 + !!tracking->max64 +
+ !!tracking->maxpci64;
+ ranges = g_try_new0(struct vfio_device_feature_dma_logging_range,
+ control->num_ranges);
+ if (!ranges) {
+ g_free(feature);
+ errno = ENOMEM;
+
+ return NULL;
+ }
+
+ control->ranges = (uintptr_t)ranges;
+ if (tracking->max32) {
+ ranges->iova = tracking->min32;
+ ranges->length = (tracking->max32 - tracking->min32) + 1;
+ ranges++;
+ }
+ if (tracking->max64) {
+ ranges->iova = tracking->min64;
+ ranges->length = (tracking->max64 - tracking->min64) + 1;
+ ranges++;
+ }
+ if (tracking->maxpci64) {
+ ranges->iova = tracking->minpci64;
+ ranges->length = (tracking->maxpci64 - tracking->minpci64) + 1;
+ }
+
+ trace_vfio_device_dirty_tracking_start(control->num_ranges,
+ tracking->min32, tracking->max32,
+ tracking->min64, tracking->max64,
+ tracking->minpci64, tracking->maxpci64);
+
+ return feature;
+}
+
+static void vfio_device_feature_dma_logging_start_destroy(
+ struct vfio_device_feature *feature)
+{
+ struct vfio_device_feature_dma_logging_control *control =
+ (struct vfio_device_feature_dma_logging_control *)feature->data;
+ struct vfio_device_feature_dma_logging_range *ranges =
+ (struct vfio_device_feature_dma_logging_range *)(uintptr_t)control->ranges;
+
+ g_free(ranges);
+ g_free(feature);
+}
+
+static bool vfio_devices_dma_logging_start(VFIOContainerBase *bcontainer,
+ Error **errp)
+{
+ struct vfio_device_feature *feature;
+ VFIODirtyRanges ranges;
+ VFIODevice *vbasedev;
+ int ret = 0;
+
+ vfio_dirty_tracking_init(bcontainer, &ranges);
+ feature = vfio_device_feature_dma_logging_start_create(bcontainer,
+ &ranges);
+ if (!feature) {
+ error_setg_errno(errp, errno, "Failed to prepare DMA logging");
+ return false;
+ }
+
+ QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
+ if (vbasedev->dirty_tracking) {
+ continue;
+ }
+
+ ret = vbasedev->io_ops->device_feature(vbasedev, feature);
+ if (ret) {
+ error_setg_errno(errp, -ret, "%s: Failed to start DMA logging",
+ vbasedev->name);
+ goto out;
+ }
+ vbasedev->dirty_tracking = true;
+ }
+
+out:
+ if (ret) {
+ vfio_devices_dma_logging_stop(bcontainer);
+ }
+
+ vfio_device_feature_dma_logging_start_destroy(feature);
+
+ return ret == 0;
+}
+
+static bool vfio_listener_log_global_start(MemoryListener *listener,
+ Error **errp)
+{
+ ERRP_GUARD();
+ VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
+ listener);
+ bool ret;
+
+ if (vfio_container_devices_dirty_tracking_is_supported(bcontainer)) {
+ ret = vfio_devices_dma_logging_start(bcontainer, errp);
+ } else {
+ ret = vfio_container_set_dirty_page_tracking(bcontainer, true, errp) == 0;
+ }
+
+ if (!ret) {
+ error_prepend(errp, "vfio: Could not start dirty page tracking - ");
+ }
+ return ret;
+}
+
+static void vfio_listener_log_global_stop(MemoryListener *listener)
+{
+ VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
+ listener);
+ Error *local_err = NULL;
+ int ret = 0;
+
+ if (vfio_container_devices_dirty_tracking_is_supported(bcontainer)) {
+ vfio_devices_dma_logging_stop(bcontainer);
+ } else {
+ ret = vfio_container_set_dirty_page_tracking(bcontainer, false,
+ &local_err);
+ }
+
+ if (ret) {
+ error_prepend(&local_err,
+ "vfio: Could not stop dirty page tracking - ");
+ if (migration_is_running()) {
+ migration_file_set_error(ret, local_err);
+ } else {
+ error_report_err(local_err);
+ }
+ }
+}
+
+typedef struct {
+ IOMMUNotifier n;
+ VFIOGuestIOMMU *giommu;
+} vfio_giommu_dirty_notifier;
+
+static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
+{
+ vfio_giommu_dirty_notifier *gdn = container_of(n,
+ vfio_giommu_dirty_notifier, n);
+ VFIOGuestIOMMU *giommu = gdn->giommu;
+ VFIOContainerBase *bcontainer = giommu->bcontainer;
+ hwaddr iova = iotlb->iova + giommu->iommu_offset;
+ ram_addr_t translated_addr;
+ Error *local_err = NULL;
+ int ret = -EINVAL;
+ MemoryRegion *mr;
+ hwaddr xlat;
+
+ trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask);
+
+ if (iotlb->target_as != &address_space_memory) {
+ error_setg(&local_err,
+ "Wrong target AS \"%s\", only system memory is allowed",
+ iotlb->target_as->name ? iotlb->target_as->name : "none");
+ goto out;
+ }
+
+ rcu_read_lock();
+ mr = vfio_translate_iotlb(iotlb, &xlat, &local_err);
+ if (!mr) {
+ goto out_unlock;
+ }
+ translated_addr = memory_region_get_ram_addr(mr) + xlat;
+
+ ret = vfio_container_query_dirty_bitmap(bcontainer, iova, iotlb->addr_mask + 1,
+ translated_addr, &local_err);
+ if (ret) {
+ error_prepend(&local_err,
+ "vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx") failed - ", bcontainer, iova,
+ iotlb->addr_mask + 1);
+ }
+
+out_unlock:
+ rcu_read_unlock();
+
+out:
+ if (ret) {
+ if (migration_is_running()) {
+ migration_file_set_error(ret, local_err);
+ } else {
+ error_report_err(local_err);
+ }
+ }
+}
+
+static int vfio_ram_discard_query_dirty_bitmap(MemoryRegionSection *section,
+ void *opaque)
+{
+ const hwaddr size = int128_get64(section->size);
+ const hwaddr iova = section->offset_within_address_space;
+ const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) +
+ section->offset_within_region;
+ VFIORamDiscardListener *vrdl = opaque;
+ Error *local_err = NULL;
+ int ret;
+
+ /*
+ * Sync the whole mapped region (spanning multiple individual mappings)
+ * in one go.
+ */
+ ret = vfio_container_query_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr,
+ &local_err);
+ if (ret) {
+ error_report_err(local_err);
+ }
+ return ret;
+}
+
+static int
+vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section)
+{
+ RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
+ VFIORamDiscardListener *vrdl =
+ vfio_find_ram_discard_listener(bcontainer, section);
+
+ /*
+ * We only want/can synchronize the bitmap for actually mapped parts -
+ * which correspond to populated parts. Replay all populated parts.
+ */
+ return ram_discard_manager_replay_populated(rdm, section,
+ vfio_ram_discard_query_dirty_bitmap,
+ &vrdl);
+}
+
+static int vfio_sync_iommu_dirty_bitmap(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section)
+{
+ VFIOGuestIOMMU *giommu;
+ bool found = false;
+ Int128 llend;
+ vfio_giommu_dirty_notifier gdn;
+ int idx;
+
+ QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) {
+ if (MEMORY_REGION(giommu->iommu_mr) == section->mr &&
+ giommu->n.start == section->offset_within_region) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ return 0;
+ }
+
+ gdn.giommu = giommu;
+ idx = memory_region_iommu_attrs_to_index(giommu->iommu_mr,
+ MEMTXATTRS_UNSPECIFIED);
+
+ llend = int128_add(int128_make64(section->offset_within_region),
+ section->size);
+ llend = int128_sub(llend, int128_one());
+
+ iommu_notifier_init(&gdn.n, vfio_iommu_map_dirty_notify, IOMMU_NOTIFIER_MAP,
+ section->offset_within_region, int128_get64(llend),
+ idx);
+ memory_region_iommu_replay(giommu->iommu_mr, &gdn.n);
+
+ return 0;
+}
+
+static int vfio_sync_dirty_bitmap(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section, Error **errp)
+{
+ ram_addr_t ram_addr;
+
+ if (memory_region_is_iommu(section->mr)) {
+ return vfio_sync_iommu_dirty_bitmap(bcontainer, section);
+ } else if (memory_region_has_ram_discard_manager(section->mr)) {
+ int ret;
+
+ ret = vfio_sync_ram_discard_listener_dirty_bitmap(bcontainer, section);
+ if (ret) {
+ error_setg(errp,
+ "Failed to sync dirty bitmap with RAM discard listener");
+ }
+ return ret;
+ }
+
+ ram_addr = memory_region_get_ram_addr(section->mr) +
+ section->offset_within_region;
+
+ return vfio_container_query_dirty_bitmap(bcontainer,
+ REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
+ int128_get64(section->size), ram_addr, errp);
+}
+
+static void vfio_listener_log_sync(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
+ listener);
+ int ret;
+ Error *local_err = NULL;
+
+ if (vfio_listener_skipped_section(section)) {
+ return;
+ }
+
+ if (vfio_log_sync_needed(bcontainer)) {
+ ret = vfio_sync_dirty_bitmap(bcontainer, section, &local_err);
+ if (ret) {
+ if (migration_is_running()) {
+ migration_file_set_error(ret, local_err);
+ } else {
+ error_report_err(local_err);
+ }
+ }
+ }
+}
+
+static const MemoryListener vfio_memory_listener = {
+ .name = "vfio",
+ .begin = vfio_listener_begin,
+ .commit = vfio_listener_commit,
+ .region_add = vfio_listener_region_add,
+ .region_del = vfio_listener_region_del,
+ .log_global_start = vfio_listener_log_global_start,
+ .log_global_stop = vfio_listener_log_global_stop,
+ .log_sync = vfio_listener_log_sync,
+};
+
+bool vfio_listener_register(VFIOContainerBase *bcontainer, Error **errp)
+{
+ bcontainer->listener = vfio_memory_listener;
+ memory_listener_register(&bcontainer->listener, bcontainer->space->as);
+
+ if (bcontainer->error) {
+ error_propagate_prepend(errp, bcontainer->error,
+ "memory listener initialization failed: ");
+ return false;
+ }
+
+ return true;
+}
+
+void vfio_listener_unregister(VFIOContainerBase *bcontainer)
+{
+ memory_listener_unregister(&bcontainer->listener);
+}
diff --git a/hw/vfio/meson.build b/hw/vfio/meson.build
index bba776f..63ea393 100644
--- a/hw/vfio/meson.build
+++ b/hw/vfio/meson.build
@@ -1,26 +1,37 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
vfio_ss = ss.source_set()
vfio_ss.add(files(
- 'helpers.c',
- 'common.c',
+ 'listener.c',
'container-base.c',
'container.c',
- 'migration.c',
- 'cpr.c',
+ 'helpers.c',
))
vfio_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr.c'))
-vfio_ss.add(when: 'CONFIG_IOMMUFD', if_true: files(
- 'iommufd.c',
-))
vfio_ss.add(when: 'CONFIG_VFIO_PCI', if_true: files(
- 'display.c',
'pci-quirks.c',
'pci.c',
))
vfio_ss.add(when: 'CONFIG_VFIO_CCW', if_true: files('ccw.c'))
vfio_ss.add(when: 'CONFIG_VFIO_PLATFORM', if_true: files('platform.c'))
-vfio_ss.add(when: 'CONFIG_VFIO_XGMAC', if_true: files('calxeda-xgmac.c'))
-vfio_ss.add(when: 'CONFIG_VFIO_AMD_XGBE', if_true: files('amd-xgbe.c'))
vfio_ss.add(when: 'CONFIG_VFIO_AP', if_true: files('ap.c'))
vfio_ss.add(when: 'CONFIG_VFIO_IGD', if_true: files('igd.c'))
specific_ss.add_all(when: 'CONFIG_VFIO', if_true: vfio_ss)
+
+system_ss.add(when: 'CONFIG_VFIO_XGMAC', if_true: files('calxeda-xgmac.c'))
+system_ss.add(when: 'CONFIG_VFIO_AMD_XGBE', if_true: files('amd-xgbe.c'))
+system_ss.add(when: 'CONFIG_VFIO', if_true: files(
+ 'cpr.c',
+ 'cpr-legacy.c',
+ 'device.c',
+ 'migration.c',
+ 'migration-multifd.c',
+ 'region.c',
+))
+system_ss.add(when: ['CONFIG_VFIO', 'CONFIG_IOMMUFD'], if_true: files(
+ 'iommufd.c',
+))
+system_ss.add(when: 'CONFIG_VFIO_PCI', if_true: files(
+ 'display.c',
+))
diff --git a/hw/vfio/migration-multifd.c b/hw/vfio/migration-multifd.c
new file mode 100644
index 0000000..850a319
--- /dev/null
+++ b/hw/vfio/migration-multifd.c
@@ -0,0 +1,685 @@
+/*
+ * Multifd VFIO migration
+ *
+ * Copyright (C) 2024,2025 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hw/vfio/vfio-device.h"
+#include "migration/misc.h"
+#include "qapi/error.h"
+#include "qemu/bswap.h"
+#include "qemu/error-report.h"
+#include "qemu/lockable.h"
+#include "qemu/main-loop.h"
+#include "qemu/thread.h"
+#include "io/channel-buffer.h"
+#include "migration/qemu-file.h"
+#include "migration-multifd.h"
+#include "vfio-migration-internal.h"
+#include "trace.h"
+
+#define VFIO_DEVICE_STATE_CONFIG_STATE (1)
+
+#define VFIO_DEVICE_STATE_PACKET_VER_CURRENT (0)
+
+typedef struct VFIODeviceStatePacket {
+ uint32_t version;
+ uint32_t idx;
+ uint32_t flags;
+ uint8_t data[0];
+} QEMU_PACKED VFIODeviceStatePacket;
+
+/* type safety */
+typedef struct VFIOStateBuffers {
+ GArray *array;
+} VFIOStateBuffers;
+
+typedef struct VFIOStateBuffer {
+ bool is_present;
+ char *data;
+ size_t len;
+} VFIOStateBuffer;
+
+typedef struct VFIOMultifd {
+ bool load_bufs_thread_running;
+ bool load_bufs_thread_want_exit;
+
+ VFIOStateBuffers load_bufs;
+ QemuCond load_bufs_buffer_ready_cond;
+ QemuCond load_bufs_thread_finished_cond;
+ QemuMutex load_bufs_mutex; /* Lock order: this lock -> BQL */
+ uint32_t load_buf_idx;
+ uint32_t load_buf_idx_last;
+} VFIOMultifd;
+
+static void vfio_state_buffer_clear(gpointer data)
+{
+ VFIOStateBuffer *lb = data;
+
+ if (!lb->is_present) {
+ return;
+ }
+
+ g_clear_pointer(&lb->data, g_free);
+ lb->is_present = false;
+}
+
+static void vfio_state_buffers_init(VFIOStateBuffers *bufs)
+{
+ bufs->array = g_array_new(FALSE, TRUE, sizeof(VFIOStateBuffer));
+ g_array_set_clear_func(bufs->array, vfio_state_buffer_clear);
+}
+
+static void vfio_state_buffers_destroy(VFIOStateBuffers *bufs)
+{
+ g_clear_pointer(&bufs->array, g_array_unref);
+}
+
+static void vfio_state_buffers_assert_init(VFIOStateBuffers *bufs)
+{
+ assert(bufs->array);
+}
+
+static unsigned int vfio_state_buffers_size_get(VFIOStateBuffers *bufs)
+{
+ return bufs->array->len;
+}
+
+static void vfio_state_buffers_size_set(VFIOStateBuffers *bufs,
+ unsigned int size)
+{
+ g_array_set_size(bufs->array, size);
+}
+
+static VFIOStateBuffer *vfio_state_buffers_at(VFIOStateBuffers *bufs,
+ unsigned int idx)
+{
+ return &g_array_index(bufs->array, VFIOStateBuffer, idx);
+}
+
+/* called with load_bufs_mutex locked */
+static bool vfio_load_state_buffer_insert(VFIODevice *vbasedev,
+ VFIODeviceStatePacket *packet,
+ size_t packet_total_size,
+ Error **errp)
+{
+ VFIOMigration *migration = vbasedev->migration;
+ VFIOMultifd *multifd = migration->multifd;
+ VFIOStateBuffer *lb;
+
+ vfio_state_buffers_assert_init(&multifd->load_bufs);
+ if (packet->idx >= vfio_state_buffers_size_get(&multifd->load_bufs)) {
+ vfio_state_buffers_size_set(&multifd->load_bufs, packet->idx + 1);
+ }
+
+ lb = vfio_state_buffers_at(&multifd->load_bufs, packet->idx);
+ if (lb->is_present) {
+ error_setg(errp, "%s: state buffer %" PRIu32 " already filled",
+ vbasedev->name, packet->idx);
+ return false;
+ }
+
+ assert(packet->idx >= multifd->load_buf_idx);
+
+ lb->data = g_memdup2(&packet->data, packet_total_size - sizeof(*packet));
+ lb->len = packet_total_size - sizeof(*packet);
+ lb->is_present = true;
+
+ return true;
+}
+
+bool vfio_multifd_load_state_buffer(void *opaque, char *data, size_t data_size,
+ Error **errp)
+{
+ VFIODevice *vbasedev = opaque;
+ VFIOMigration *migration = vbasedev->migration;
+ VFIOMultifd *multifd = migration->multifd;
+ VFIODeviceStatePacket *packet = (VFIODeviceStatePacket *)data;
+
+ if (!vfio_multifd_transfer_enabled(vbasedev)) {
+ error_setg(errp,
+ "%s: got device state packet but not doing multifd transfer",
+ vbasedev->name);
+ return false;
+ }
+
+ assert(multifd);
+
+ if (data_size < sizeof(*packet)) {
+ error_setg(errp, "%s: packet too short at %zu (min is %zu)",
+ vbasedev->name, data_size, sizeof(*packet));
+ return false;
+ }
+
+ packet->version = be32_to_cpu(packet->version);
+ if (packet->version != VFIO_DEVICE_STATE_PACKET_VER_CURRENT) {
+ error_setg(errp, "%s: packet has unknown version %" PRIu32,
+ vbasedev->name, packet->version);
+ return false;
+ }
+
+ packet->idx = be32_to_cpu(packet->idx);
+ packet->flags = be32_to_cpu(packet->flags);
+
+ if (packet->idx == UINT32_MAX) {
+ error_setg(errp, "%s: packet index is invalid", vbasedev->name);
+ return false;
+ }
+
+ trace_vfio_load_state_device_buffer_incoming(vbasedev->name, packet->idx);
+
+ /*
+ * Holding BQL here would violate the lock order and can cause
+ * a deadlock once we attempt to lock load_bufs_mutex below.
+ */
+ assert(!bql_locked());
+
+ WITH_QEMU_LOCK_GUARD(&multifd->load_bufs_mutex) {
+ /* config state packet should be the last one in the stream */
+ if (packet->flags & VFIO_DEVICE_STATE_CONFIG_STATE) {
+ multifd->load_buf_idx_last = packet->idx;
+ }
+
+ if (!vfio_load_state_buffer_insert(vbasedev, packet, data_size,
+ errp)) {
+ return false;
+ }
+
+ qemu_cond_signal(&multifd->load_bufs_buffer_ready_cond);
+ }
+
+ return true;
+}
+
+static bool vfio_load_bufs_thread_load_config(VFIODevice *vbasedev,
+ Error **errp)
+{
+ VFIOMigration *migration = vbasedev->migration;
+ VFIOMultifd *multifd = migration->multifd;
+ VFIOStateBuffer *lb;
+ g_autoptr(QIOChannelBuffer) bioc = NULL;
+ g_autoptr(QEMUFile) f_out = NULL, f_in = NULL;
+ uint64_t mig_header;
+ int ret;
+
+ assert(multifd->load_buf_idx == multifd->load_buf_idx_last);
+ lb = vfio_state_buffers_at(&multifd->load_bufs, multifd->load_buf_idx);
+ assert(lb->is_present);
+
+ bioc = qio_channel_buffer_new(lb->len);
+ qio_channel_set_name(QIO_CHANNEL(bioc), "vfio-device-config-load");
+
+ f_out = qemu_file_new_output(QIO_CHANNEL(bioc));
+ qemu_put_buffer(f_out, (uint8_t *)lb->data, lb->len);
+
+ ret = qemu_fflush(f_out);
+ if (ret) {
+ error_setg(errp, "%s: load config state flush failed: %d",
+ vbasedev->name, ret);
+ return false;
+ }
+
+ qio_channel_io_seek(QIO_CHANNEL(bioc), 0, 0, NULL);
+ f_in = qemu_file_new_input(QIO_CHANNEL(bioc));
+
+ mig_header = qemu_get_be64(f_in);
+ if (mig_header != VFIO_MIG_FLAG_DEV_CONFIG_STATE) {
+ error_setg(errp, "%s: expected FLAG_DEV_CONFIG_STATE but got %" PRIx64,
+ vbasedev->name, mig_header);
+ return false;
+ }
+
+ bql_lock();
+ ret = vfio_load_device_config_state(f_in, vbasedev);
+ bql_unlock();
+
+ if (ret < 0) {
+ error_setg(errp, "%s: vfio_load_device_config_state() failed: %d",
+ vbasedev->name, ret);
+ return false;
+ }
+
+ return true;
+}
+
+static VFIOStateBuffer *vfio_load_state_buffer_get(VFIOMultifd *multifd)
+{
+ VFIOStateBuffer *lb;
+ unsigned int bufs_len;
+
+ bufs_len = vfio_state_buffers_size_get(&multifd->load_bufs);
+ if (multifd->load_buf_idx >= bufs_len) {
+ assert(multifd->load_buf_idx == bufs_len);
+ return NULL;
+ }
+
+ lb = vfio_state_buffers_at(&multifd->load_bufs,
+ multifd->load_buf_idx);
+ if (!lb->is_present) {
+ return NULL;
+ }
+
+ return lb;
+}
+
+static bool vfio_load_state_buffer_write(VFIODevice *vbasedev,
+ VFIOStateBuffer *lb,
+ Error **errp)
+{
+ VFIOMigration *migration = vbasedev->migration;
+ VFIOMultifd *multifd = migration->multifd;
+ g_autofree char *buf = NULL;
+ char *buf_cur;
+ size_t buf_len;
+
+ if (!lb->len) {
+ return true;
+ }
+
+ trace_vfio_load_state_device_buffer_load_start(vbasedev->name,
+ multifd->load_buf_idx);
+
+ /* lb might become re-allocated when we drop the lock */
+ buf = g_steal_pointer(&lb->data);
+ buf_cur = buf;
+ buf_len = lb->len;
+ while (buf_len > 0) {
+ ssize_t wr_ret;
+ int errno_save;
+
+ /*
+ * Loading data to the device takes a while,
+ * drop the lock during this process.
+ */
+ qemu_mutex_unlock(&multifd->load_bufs_mutex);
+ wr_ret = write(migration->data_fd, buf_cur, buf_len);
+ errno_save = errno;
+ qemu_mutex_lock(&multifd->load_bufs_mutex);
+
+ if (wr_ret < 0) {
+ error_setg(errp,
+ "%s: writing state buffer %" PRIu32 " failed: %d",
+ vbasedev->name, multifd->load_buf_idx, errno_save);
+ return false;
+ }
+
+ assert(wr_ret <= buf_len);
+ buf_len -= wr_ret;
+ buf_cur += wr_ret;
+ }
+
+ trace_vfio_load_state_device_buffer_load_end(vbasedev->name,
+ multifd->load_buf_idx);
+
+ return true;
+}
+
+static bool vfio_load_bufs_thread_want_exit(VFIOMultifd *multifd,
+ bool *should_quit)
+{
+ return multifd->load_bufs_thread_want_exit || qatomic_read(should_quit);
+}
+
+/*
+ * This thread is spawned by vfio_multifd_switchover_start() which gets
+ * called upon encountering the switchover point marker in main migration
+ * stream.
+ *
+ * It exits after either:
+ * * completing loading the remaining device state and device config, OR:
+ * * encountering some error while doing the above, OR:
+ * * being forcefully aborted by the migration core by it setting should_quit
+ * or by vfio_load_cleanup_load_bufs_thread() setting
+ * multifd->load_bufs_thread_want_exit.
+ */
+static bool vfio_load_bufs_thread(void *opaque, bool *should_quit, Error **errp)
+{
+ VFIODevice *vbasedev = opaque;
+ VFIOMigration *migration = vbasedev->migration;
+ VFIOMultifd *multifd = migration->multifd;
+ bool ret = false;
+
+ trace_vfio_load_bufs_thread_start(vbasedev->name);
+
+ assert(multifd);
+ QEMU_LOCK_GUARD(&multifd->load_bufs_mutex);
+
+ assert(multifd->load_bufs_thread_running);
+
+ while (true) {
+ VFIOStateBuffer *lb;
+
+ /*
+ * Always check cancellation first after the buffer_ready wait below in
+ * case that cond was signalled by vfio_load_cleanup_load_bufs_thread().
+ */
+ if (vfio_load_bufs_thread_want_exit(multifd, should_quit)) {
+ error_setg(errp, "operation cancelled");
+ goto thread_exit;
+ }
+
+ assert(multifd->load_buf_idx <= multifd->load_buf_idx_last);
+
+ lb = vfio_load_state_buffer_get(multifd);
+ if (!lb) {
+ trace_vfio_load_state_device_buffer_starved(vbasedev->name,
+ multifd->load_buf_idx);
+ qemu_cond_wait(&multifd->load_bufs_buffer_ready_cond,
+ &multifd->load_bufs_mutex);
+ continue;
+ }
+
+ if (multifd->load_buf_idx == multifd->load_buf_idx_last) {
+ break;
+ }
+
+ if (multifd->load_buf_idx == 0) {
+ trace_vfio_load_state_device_buffer_start(vbasedev->name);
+ }
+
+ if (!vfio_load_state_buffer_write(vbasedev, lb, errp)) {
+ goto thread_exit;
+ }
+
+ if (multifd->load_buf_idx == multifd->load_buf_idx_last - 1) {
+ trace_vfio_load_state_device_buffer_end(vbasedev->name);
+ }
+
+ multifd->load_buf_idx++;
+ }
+
+ if (!vfio_load_bufs_thread_load_config(vbasedev, errp)) {
+ goto thread_exit;
+ }
+
+ ret = true;
+
+thread_exit:
+ /*
+ * Notify possibly waiting vfio_load_cleanup_load_bufs_thread() that
+ * this thread is exiting.
+ */
+ multifd->load_bufs_thread_running = false;
+ qemu_cond_signal(&multifd->load_bufs_thread_finished_cond);
+
+ trace_vfio_load_bufs_thread_end(vbasedev->name);
+
+ return ret;
+}
+
+static VFIOMultifd *vfio_multifd_new(void)
+{
+ VFIOMultifd *multifd = g_new(VFIOMultifd, 1);
+
+ vfio_state_buffers_init(&multifd->load_bufs);
+
+ qemu_mutex_init(&multifd->load_bufs_mutex);
+
+ multifd->load_buf_idx = 0;
+ multifd->load_buf_idx_last = UINT32_MAX;
+ qemu_cond_init(&multifd->load_bufs_buffer_ready_cond);
+
+ multifd->load_bufs_thread_running = false;
+ multifd->load_bufs_thread_want_exit = false;
+ qemu_cond_init(&multifd->load_bufs_thread_finished_cond);
+
+ return multifd;
+}
+
+/*
+ * Terminates vfio_load_bufs_thread by setting
+ * multifd->load_bufs_thread_want_exit and signalling all the conditions
+ * the thread could be blocked on.
+ *
+ * Waits for the thread to signal that it had finished.
+ */
+static void vfio_load_cleanup_load_bufs_thread(VFIOMultifd *multifd)
+{
+ /* The lock order is load_bufs_mutex -> BQL so unlock BQL here first */
+ bql_unlock();
+ WITH_QEMU_LOCK_GUARD(&multifd->load_bufs_mutex) {
+ while (multifd->load_bufs_thread_running) {
+ multifd->load_bufs_thread_want_exit = true;
+
+ qemu_cond_signal(&multifd->load_bufs_buffer_ready_cond);
+ qemu_cond_wait(&multifd->load_bufs_thread_finished_cond,
+ &multifd->load_bufs_mutex);
+ }
+ }
+ bql_lock();
+}
+
+static void vfio_multifd_free(VFIOMultifd *multifd)
+{
+ vfio_load_cleanup_load_bufs_thread(multifd);
+
+ qemu_cond_destroy(&multifd->load_bufs_thread_finished_cond);
+ vfio_state_buffers_destroy(&multifd->load_bufs);
+ qemu_cond_destroy(&multifd->load_bufs_buffer_ready_cond);
+ qemu_mutex_destroy(&multifd->load_bufs_mutex);
+
+ g_free(multifd);
+}
+
+void vfio_multifd_cleanup(VFIODevice *vbasedev)
+{
+ VFIOMigration *migration = vbasedev->migration;
+
+ g_clear_pointer(&migration->multifd, vfio_multifd_free);
+}
+
+bool vfio_multifd_transfer_supported(void)
+{
+ return multifd_device_state_supported() &&
+ migrate_send_switchover_start();
+}
+
+bool vfio_multifd_transfer_enabled(VFIODevice *vbasedev)
+{
+ VFIOMigration *migration = vbasedev->migration;
+
+ return migration->multifd_transfer;
+}
+
+bool vfio_multifd_setup(VFIODevice *vbasedev, bool alloc_multifd, Error **errp)
+{
+ VFIOMigration *migration = vbasedev->migration;
+
+ /*
+ * Make a copy of this setting at the start in case it is changed
+ * mid-migration.
+ */
+ if (vbasedev->migration_multifd_transfer == ON_OFF_AUTO_AUTO) {
+ migration->multifd_transfer = vfio_multifd_transfer_supported();
+ } else {
+ migration->multifd_transfer =
+ vbasedev->migration_multifd_transfer == ON_OFF_AUTO_ON;
+ }
+
+ if (!vfio_multifd_transfer_enabled(vbasedev)) {
+ /* Nothing further to check or do */
+ return true;
+ }
+
+ if (!vfio_multifd_transfer_supported()) {
+ error_setg(errp,
+ "%s: Multifd device transfer requested but unsupported in the current config",
+ vbasedev->name);
+ return false;
+ }
+
+ if (alloc_multifd) {
+ assert(!migration->multifd);
+ migration->multifd = vfio_multifd_new();
+ }
+
+ return true;
+}
+
+void vfio_multifd_emit_dummy_eos(VFIODevice *vbasedev, QEMUFile *f)
+{
+ assert(vfio_multifd_transfer_enabled(vbasedev));
+
+ /*
+ * Emit dummy NOP data on the main migration channel since the actual
+ * device state transfer is done via multifd channels.
+ */
+ qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
+}
+
+static bool
+vfio_save_complete_precopy_thread_config_state(VFIODevice *vbasedev,
+ char *idstr,
+ uint32_t instance_id,
+ uint32_t idx,
+ Error **errp)
+{
+ g_autoptr(QIOChannelBuffer) bioc = NULL;
+ g_autoptr(QEMUFile) f = NULL;
+ int ret;
+ g_autofree VFIODeviceStatePacket *packet = NULL;
+ size_t packet_len;
+
+ bioc = qio_channel_buffer_new(0);
+ qio_channel_set_name(QIO_CHANNEL(bioc), "vfio-device-config-save");
+
+ f = qemu_file_new_output(QIO_CHANNEL(bioc));
+
+ if (vfio_save_device_config_state(f, vbasedev, errp)) {
+ return false;
+ }
+
+ ret = qemu_fflush(f);
+ if (ret) {
+ error_setg(errp, "%s: save config state flush failed: %d",
+ vbasedev->name, ret);
+ return false;
+ }
+
+ packet_len = sizeof(*packet) + bioc->usage;
+ packet = g_malloc0(packet_len);
+ packet->version = cpu_to_be32(VFIO_DEVICE_STATE_PACKET_VER_CURRENT);
+ packet->idx = cpu_to_be32(idx);
+ packet->flags = cpu_to_be32(VFIO_DEVICE_STATE_CONFIG_STATE);
+ memcpy(&packet->data, bioc->data, bioc->usage);
+
+ if (!multifd_queue_device_state(idstr, instance_id,
+ (char *)packet, packet_len)) {
+ error_setg(errp, "%s: multifd config data queuing failed",
+ vbasedev->name);
+ return false;
+ }
+
+ vfio_migration_add_bytes_transferred(packet_len);
+
+ return true;
+}
+
+/*
+ * This thread is spawned by the migration core directly via
+ * .save_live_complete_precopy_thread SaveVMHandler.
+ *
+ * It exits after either:
+ * * completing saving the remaining device state and device config, OR:
+ * * encountering some error while doing the above, OR:
+ * * being forcefully aborted by the migration core by
+ * multifd_device_state_save_thread_should_exit() returning true.
+ */
+bool
+vfio_multifd_save_complete_precopy_thread(SaveLiveCompletePrecopyThreadData *d,
+ Error **errp)
+{
+ VFIODevice *vbasedev = d->handler_opaque;
+ VFIOMigration *migration = vbasedev->migration;
+ bool ret = false;
+ g_autofree VFIODeviceStatePacket *packet = NULL;
+ uint32_t idx;
+
+ if (!vfio_multifd_transfer_enabled(vbasedev)) {
+ /* Nothing to do, vfio_save_complete_precopy() does the transfer. */
+ return true;
+ }
+
+ trace_vfio_save_complete_precopy_thread_start(vbasedev->name,
+ d->idstr, d->instance_id);
+
+ /* We reach here with device state STOP or STOP_COPY only */
+ if (vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_STOP_COPY,
+ VFIO_DEVICE_STATE_STOP, errp)) {
+ goto thread_exit;
+ }
+
+ packet = g_malloc0(sizeof(*packet) + migration->data_buffer_size);
+ packet->version = cpu_to_be32(VFIO_DEVICE_STATE_PACKET_VER_CURRENT);
+
+ for (idx = 0; ; idx++) {
+ ssize_t data_size;
+ size_t packet_size;
+
+ if (multifd_device_state_save_thread_should_exit()) {
+ error_setg(errp, "operation cancelled");
+ goto thread_exit;
+ }
+
+ data_size = read(migration->data_fd, &packet->data,
+ migration->data_buffer_size);
+ if (data_size < 0) {
+ error_setg(errp, "%s: reading state buffer %" PRIu32 " failed: %d",
+ vbasedev->name, idx, errno);
+ goto thread_exit;
+ } else if (data_size == 0) {
+ break;
+ }
+
+ packet->idx = cpu_to_be32(idx);
+ packet_size = sizeof(*packet) + data_size;
+
+ if (!multifd_queue_device_state(d->idstr, d->instance_id,
+ (char *)packet, packet_size)) {
+ error_setg(errp, "%s: multifd data queuing failed", vbasedev->name);
+ goto thread_exit;
+ }
+
+ vfio_migration_add_bytes_transferred(packet_size);
+ }
+
+ if (!vfio_save_complete_precopy_thread_config_state(vbasedev,
+ d->idstr,
+ d->instance_id,
+ idx, errp)) {
+ goto thread_exit;
+ }
+
+ ret = true;
+
+thread_exit:
+ trace_vfio_save_complete_precopy_thread_end(vbasedev->name, ret);
+
+ return ret;
+}
+
+int vfio_multifd_switchover_start(VFIODevice *vbasedev)
+{
+ VFIOMigration *migration = vbasedev->migration;
+ VFIOMultifd *multifd = migration->multifd;
+
+ assert(multifd);
+
+ /* The lock order is load_bufs_mutex -> BQL so unlock BQL here first */
+ bql_unlock();
+ WITH_QEMU_LOCK_GUARD(&multifd->load_bufs_mutex) {
+ assert(!multifd->load_bufs_thread_running);
+ multifd->load_bufs_thread_running = true;
+ }
+ bql_lock();
+
+ qemu_loadvm_start_load_thread(vfio_load_bufs_thread, vbasedev);
+
+ return 0;
+}
diff --git a/hw/vfio/migration-multifd.h b/hw/vfio/migration-multifd.h
new file mode 100644
index 0000000..0bab632
--- /dev/null
+++ b/hw/vfio/migration-multifd.h
@@ -0,0 +1,34 @@
+/*
+ * Multifd VFIO migration
+ *
+ * Copyright (C) 2024,2025 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VFIO_MIGRATION_MULTIFD_H
+#define HW_VFIO_MIGRATION_MULTIFD_H
+
+#include "hw/vfio/vfio-device.h"
+
+bool vfio_multifd_setup(VFIODevice *vbasedev, bool alloc_multifd, Error **errp);
+void vfio_multifd_cleanup(VFIODevice *vbasedev);
+
+bool vfio_multifd_transfer_supported(void);
+bool vfio_multifd_transfer_enabled(VFIODevice *vbasedev);
+
+bool vfio_multifd_load_state_buffer(void *opaque, char *data, size_t data_size,
+ Error **errp);
+
+void vfio_multifd_emit_dummy_eos(VFIODevice *vbasedev, QEMUFile *f);
+
+bool
+vfio_multifd_save_complete_precopy_thread(SaveLiveCompletePrecopyThreadData *d,
+ Error **errp);
+
+int vfio_multifd_switchover_start(VFIODevice *vbasedev);
+
+#endif
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index 34d4be2..b76697bd 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -15,38 +15,23 @@
#include <linux/vfio.h>
#include <sys/ioctl.h>
-#include "sysemu/runstate.h"
-#include "hw/vfio/vfio-common.h"
+#include "system/runstate.h"
+#include "hw/vfio/vfio-device.h"
+#include "hw/vfio/vfio-migration.h"
#include "migration/misc.h"
#include "migration/savevm.h"
#include "migration/vmstate.h"
#include "migration/qemu-file.h"
#include "migration/register.h"
#include "migration/blocker.h"
+#include "migration-multifd.h"
#include "qapi/error.h"
#include "qapi/qapi-events-vfio.h"
#include "exec/ramlist.h"
-#include "exec/ram_addr.h"
#include "pci.h"
#include "trace.h"
#include "hw/hw.h"
-
-/*
- * Flags to be used as unique delimiters for VFIO devices in the migration
- * stream. These flags are composed as:
- * 0xffffffff => MSB 32-bit all 1s
- * 0xef10 => Magic ID, represents emulated (virtual) function IO
- * 0x0000 => 16-bits reserved for flags
- *
- * The beginning of state information is marked by _DEV_CONFIG_STATE,
- * _DEV_SETUP_STATE, or _DEV_DATA_STATE, respectively. The end of a
- * certain state information is marked by _END_OF_STATE.
- */
-#define VFIO_MIG_FLAG_END_OF_STATE (0xffffffffef100001ULL)
-#define VFIO_MIG_FLAG_DEV_CONFIG_STATE (0xffffffffef100002ULL)
-#define VFIO_MIG_FLAG_DEV_SETUP_STATE (0xffffffffef100003ULL)
-#define VFIO_MIG_FLAG_DEV_DATA_STATE (0xffffffffef100004ULL)
-#define VFIO_MIG_FLAG_DEV_INIT_DATA_SENT (0xffffffffef100005ULL)
+#include "vfio-migration-internal.h"
/*
* This is an arbitrary size based on migration of mlx5 devices, where typically
@@ -55,7 +40,7 @@
*/
#define VFIO_MIG_DEFAULT_DATA_BUFFER_SIZE (1 * MiB)
-static int64_t bytes_transferred;
+static unsigned long bytes_transferred;
static const char *mig_state_to_str(enum vfio_device_mig_state state)
{
@@ -81,7 +66,7 @@ static const char *mig_state_to_str(enum vfio_device_mig_state state)
}
}
-static VfioMigrationState
+static QapiVfioMigrationState
mig_state_to_qapi_state(enum vfio_device_mig_state state)
{
switch (state) {
@@ -136,10 +121,10 @@ static void vfio_migration_set_device_state(VFIODevice *vbasedev,
vfio_migration_send_event(vbasedev);
}
-static int vfio_migration_set_state(VFIODevice *vbasedev,
- enum vfio_device_mig_state new_state,
- enum vfio_device_mig_state recover_state,
- Error **errp)
+int vfio_migration_set_state(VFIODevice *vbasedev,
+ enum vfio_device_mig_state new_state,
+ enum vfio_device_mig_state recover_state,
+ Error **errp)
{
VFIOMigration *migration = vbasedev->migration;
uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
@@ -254,8 +239,7 @@ static int vfio_load_buffer(QEMUFile *f, VFIODevice *vbasedev,
return ret;
}
-static int vfio_save_device_config_state(QEMUFile *f, void *opaque,
- Error **errp)
+int vfio_save_device_config_state(QEMUFile *f, void *opaque, Error **errp)
{
VFIODevice *vbasedev = opaque;
int ret;
@@ -280,11 +264,13 @@ static int vfio_save_device_config_state(QEMUFile *f, void *opaque,
return ret;
}
-static int vfio_load_device_config_state(QEMUFile *f, void *opaque)
+int vfio_load_device_config_state(QEMUFile *f, void *opaque)
{
VFIODevice *vbasedev = opaque;
uint64_t data;
+ trace_vfio_load_device_config_state_start(vbasedev->name);
+
if (vbasedev->ops && vbasedev->ops->vfio_load_config) {
int ret;
@@ -303,7 +289,7 @@ static int vfio_load_device_config_state(QEMUFile *f, void *opaque)
return -EINVAL;
}
- trace_vfio_load_device_config_state(vbasedev->name);
+ trace_vfio_load_device_config_state_end(vbasedev->name);
return qemu_file_get_error(f);
}
@@ -370,6 +356,10 @@ static ssize_t vfio_save_block(QEMUFile *f, VFIOMigration *migration)
* please refer to the Linux kernel VFIO uAPI.
*/
if (errno == ENOMSG) {
+ if (!migration->event_precopy_empty_hit) {
+ trace_vfio_save_block_precopy_empty_hit(migration->vbasedev->name);
+ migration->event_precopy_empty_hit = true;
+ }
return 0;
}
@@ -379,10 +369,13 @@ static ssize_t vfio_save_block(QEMUFile *f, VFIOMigration *migration)
return 0;
}
+ /* Non-empty read: re-arm the trace event */
+ migration->event_precopy_empty_hit = false;
+
qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE);
qemu_put_be64(f, data_size);
qemu_put_buffer(f, migration->data_buffer, data_size);
- bytes_transferred += data_size;
+ vfio_migration_add_bytes_transferred(data_size);
trace_vfio_save_block(migration->vbasedev->name, data_size);
@@ -460,6 +453,10 @@ static int vfio_save_setup(QEMUFile *f, void *opaque, Error **errp)
uint64_t stop_copy_size = VFIO_MIG_DEFAULT_DATA_BUFFER_SIZE;
int ret;
+ if (!vfio_multifd_setup(vbasedev, false, errp)) {
+ return -EINVAL;
+ }
+
qemu_put_be64(f, VFIO_MIG_FLAG_DEV_SETUP_STATE);
vfio_query_stop_copy_size(vbasedev, &stop_copy_size);
@@ -472,6 +469,9 @@ static int vfio_save_setup(QEMUFile *f, void *opaque, Error **errp)
return -ENOMEM;
}
+ migration->event_save_iterate_started = false;
+ migration->event_precopy_empty_hit = false;
+
if (vfio_precopy_supported(vbasedev)) {
switch (migration->device_state) {
case VFIO_DEVICE_STATE_RUNNING:
@@ -513,6 +513,9 @@ static void vfio_save_cleanup(void *opaque)
Error *local_err = NULL;
int ret;
+ /* Currently a NOP, done for symmetry with load_cleanup() */
+ vfio_multifd_cleanup(vbasedev);
+
/*
* Changing device state from STOP_COPY to STOP can take time. Do it here,
* after migration has completed, so it won't increase downtime.
@@ -576,9 +579,6 @@ static void vfio_state_pending_exact(void *opaque, uint64_t *must_precopy,
if (vfio_device_state_is_precopy(vbasedev)) {
vfio_query_precopy_size(migration);
-
- *must_precopy +=
- migration->precopy_init_size + migration->precopy_dirty_size;
}
trace_vfio_state_pending_exact(vbasedev->name, *must_precopy, *can_postcopy,
@@ -605,6 +605,11 @@ static int vfio_save_iterate(QEMUFile *f, void *opaque)
VFIOMigration *migration = vbasedev->migration;
ssize_t data_size;
+ if (!migration->event_save_iterate_started) {
+ trace_vfio_save_iterate_start(vbasedev->name);
+ migration->event_save_iterate_started = true;
+ }
+
data_size = vfio_save_block(f, migration);
if (data_size < 0) {
return data_size;
@@ -633,6 +638,13 @@ static int vfio_save_complete_precopy(QEMUFile *f, void *opaque)
int ret;
Error *local_err = NULL;
+ if (vfio_multifd_transfer_enabled(vbasedev)) {
+ vfio_multifd_emit_dummy_eos(vbasedev, f);
+ return 0;
+ }
+
+ trace_vfio_save_complete_precopy_start(vbasedev->name);
+
/* We reach here with device state STOP or STOP_COPY only */
ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_STOP_COPY,
VFIO_DEVICE_STATE_STOP, &local_err);
@@ -662,6 +674,11 @@ static void vfio_save_state(QEMUFile *f, void *opaque)
Error *local_err = NULL;
int ret;
+ if (vfio_multifd_transfer_enabled(vbasedev)) {
+ vfio_multifd_emit_dummy_eos(vbasedev, f);
+ return;
+ }
+
ret = vfio_save_device_config_state(f, opaque, &local_err);
if (ret) {
error_prepend(&local_err,
@@ -674,15 +691,28 @@ static void vfio_save_state(QEMUFile *f, void *opaque)
static int vfio_load_setup(QEMUFile *f, void *opaque, Error **errp)
{
VFIODevice *vbasedev = opaque;
+ VFIOMigration *migration = vbasedev->migration;
+ int ret;
+
+ if (!vfio_multifd_setup(vbasedev, true, errp)) {
+ return -EINVAL;
+ }
- return vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_RESUMING,
- vbasedev->migration->device_state, errp);
+ ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_RESUMING,
+ migration->device_state, errp);
+ if (ret) {
+ return ret;
+ }
+
+ return 0;
}
static int vfio_load_cleanup(void *opaque)
{
VFIODevice *vbasedev = opaque;
+ vfio_multifd_cleanup(vbasedev);
+
vfio_migration_cleanup(vbasedev);
trace_vfio_load_cleanup(vbasedev->name);
@@ -703,6 +733,13 @@ static int vfio_load_state(QEMUFile *f, void *opaque, int version_id)
switch (data) {
case VFIO_MIG_FLAG_DEV_CONFIG_STATE:
{
+ if (vfio_multifd_transfer_enabled(vbasedev)) {
+ error_report("%s: got DEV_CONFIG_STATE in main migration "
+ "channel but doing multifd transfer",
+ vbasedev->name);
+ return -EINVAL;
+ }
+
return vfio_load_device_config_state(f, opaque);
}
case VFIO_MIG_FLAG_DEV_SETUP_STATE:
@@ -768,6 +805,17 @@ static bool vfio_switchover_ack_needed(void *opaque)
return vfio_precopy_supported(vbasedev);
}
+static int vfio_switchover_start(void *opaque)
+{
+ VFIODevice *vbasedev = opaque;
+
+ if (vfio_multifd_transfer_enabled(vbasedev)) {
+ return vfio_multifd_switchover_start(vbasedev);
+ }
+
+ return 0;
+}
+
static const SaveVMHandlers savevm_vfio_handlers = {
.save_prepare = vfio_save_prepare,
.save_setup = vfio_save_setup,
@@ -782,6 +830,12 @@ static const SaveVMHandlers savevm_vfio_handlers = {
.load_cleanup = vfio_load_cleanup,
.load_state = vfio_load_state,
.switchover_ack_needed = vfio_switchover_ack_needed,
+ /*
+ * Multifd support
+ */
+ .load_state_buffer = vfio_multifd_load_state_buffer,
+ .switchover_start = vfio_switchover_start,
+ .save_live_complete_precopy_thread = vfio_multifd_save_complete_precopy_thread,
};
/* ---------------------------------------------------------------------- */
@@ -962,13 +1016,72 @@ static int vfio_migration_init(VFIODevice *vbasedev)
vfio_vmstate_change_prepare :
NULL;
migration->vm_state = qdev_add_vm_change_state_handler_full(
- vbasedev->dev, vfio_vmstate_change, prepare_cb, vbasedev);
+ vbasedev->dev, vfio_vmstate_change, prepare_cb, NULL, vbasedev);
migration_add_notifier(&migration->migration_state,
vfio_migration_state_notifier);
return 0;
}
+static Error *multiple_devices_migration_blocker;
+
+/*
+ * Multiple devices migration is allowed only if all devices support P2P
+ * migration. Single device migration is allowed regardless of P2P migration
+ * support.
+ */
+static bool vfio_multiple_devices_migration_is_supported(void)
+{
+ VFIODevice *vbasedev;
+ unsigned int device_num = 0;
+ bool all_support_p2p = true;
+
+ QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
+ if (vbasedev->migration) {
+ device_num++;
+
+ if (!(vbasedev->migration->mig_flags & VFIO_MIGRATION_P2P)) {
+ all_support_p2p = false;
+ }
+ }
+ }
+
+ return all_support_p2p || device_num <= 1;
+}
+
+static int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp)
+{
+ if (vfio_multiple_devices_migration_is_supported()) {
+ return 0;
+ }
+
+ if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
+ error_setg(errp, "Multiple VFIO devices migration is supported only if "
+ "all of them support P2P migration");
+ return -EINVAL;
+ }
+
+ if (multiple_devices_migration_blocker) {
+ return 0;
+ }
+
+ error_setg(&multiple_devices_migration_blocker,
+ "Multiple VFIO devices migration is supported only if all of "
+ "them support P2P migration");
+ return migrate_add_blocker_normal(&multiple_devices_migration_blocker,
+ errp);
+}
+
+static void vfio_unblock_multiple_devices_migration(void)
+{
+ if (!multiple_devices_migration_blocker ||
+ !vfio_multiple_devices_migration_is_supported()) {
+ return;
+ }
+
+ migrate_del_blocker(&multiple_devices_migration_blocker);
+}
+
static void vfio_migration_deinit(VFIODevice *vbasedev)
{
VFIOMigration *migration = vbasedev->migration;
@@ -995,14 +1108,40 @@ static int vfio_block_migration(VFIODevice *vbasedev, Error *err, Error **errp)
/* ---------------------------------------------------------------------- */
-int64_t vfio_mig_bytes_transferred(void)
+int64_t vfio_migration_bytes_transferred(void)
{
- return bytes_transferred;
+ return MIN(qatomic_read(&bytes_transferred), INT64_MAX);
}
-void vfio_reset_bytes_transferred(void)
+void vfio_migration_reset_bytes_transferred(void)
{
- bytes_transferred = 0;
+ qatomic_set(&bytes_transferred, 0);
+}
+
+void vfio_migration_add_bytes_transferred(unsigned long val)
+{
+ qatomic_add(&bytes_transferred, val);
+}
+
+bool vfio_migration_active(void)
+{
+ VFIODevice *vbasedev;
+
+ if (QLIST_EMPTY(&vfio_device_list)) {
+ return false;
+ }
+
+ QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) {
+ if (vbasedev->migration_blocker) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool vfio_viommu_preset(VFIODevice *vbasedev)
+{
+ return vbasedev->bcontainer->space->as != &address_space_memory;
}
/*
@@ -1036,16 +1175,18 @@ bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp)
return !vfio_block_migration(vbasedev, err, errp);
}
- if (!vbasedev->dirty_pages_supported) {
+ if ((!vbasedev->dirty_pages_supported ||
+ vbasedev->device_dirty_page_tracking == ON_OFF_AUTO_OFF) &&
+ !vbasedev->iommu_dirty_tracking) {
if (vbasedev->enable_migration == ON_OFF_AUTO_AUTO) {
error_setg(&err,
- "%s: VFIO device doesn't support device dirty tracking",
- vbasedev->name);
+ "%s: VFIO device doesn't support device and "
+ "IOMMU dirty tracking", vbasedev->name);
goto add_blocker;
}
- warn_report("%s: VFIO device doesn't support device dirty tracking",
- vbasedev->name);
+ warn_report("%s: VFIO device doesn't support device and "
+ "IOMMU dirty tracking", vbasedev->name);
}
ret = vfio_block_multiple_devices_migration(vbasedev, errp);
@@ -1079,3 +1220,19 @@ void vfio_migration_exit(VFIODevice *vbasedev)
migrate_del_blocker(&vbasedev->migration_blocker);
}
+
+bool vfio_device_state_is_running(VFIODevice *vbasedev)
+{
+ VFIOMigration *migration = vbasedev->migration;
+
+ return migration->device_state == VFIO_DEVICE_STATE_RUNNING ||
+ migration->device_state == VFIO_DEVICE_STATE_RUNNING_P2P;
+}
+
+bool vfio_device_state_is_precopy(VFIODevice *vbasedev)
+{
+ VFIOMigration *migration = vbasedev->migration;
+
+ return migration->device_state == VFIO_DEVICE_STATE_PRE_COPY ||
+ migration->device_state == VFIO_DEVICE_STATE_PRE_COPY_P2P;
+}
diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c
index 39dae72..3f00225 100644
--- a/hw/vfio/pci-quirks.c
+++ b/hw/vfio/pci-quirks.c
@@ -25,6 +25,7 @@
#include "hw/nvram/fw_cfg.h"
#include "hw/qdev-properties.h"
#include "pci.h"
+#include "pci-quirks.h"
#include "trace.h"
/*
@@ -66,40 +67,6 @@ bool vfio_opt_rom_in_denylist(VFIOPCIDevice *vdev)
* Device specific region quirks (mostly backdoors to PCI config space)
*/
-/*
- * The generic window quirks operate on an address and data register,
- * vfio_generic_window_address_quirk handles the address register and
- * vfio_generic_window_data_quirk handles the data register. These ops
- * pass reads and writes through to hardware until a value matching the
- * stored address match/mask is written. When this occurs, the data
- * register access emulated PCI config space for the device rather than
- * passing through accesses. This enables devices where PCI config space
- * is accessible behind a window register to maintain the virtualization
- * provided through vfio.
- */
-typedef struct VFIOConfigWindowMatch {
- uint32_t match;
- uint32_t mask;
-} VFIOConfigWindowMatch;
-
-typedef struct VFIOConfigWindowQuirk {
- struct VFIOPCIDevice *vdev;
-
- uint32_t address_val;
-
- uint32_t address_offset;
- uint32_t data_offset;
-
- bool window_enabled;
- uint8_t bar;
-
- MemoryRegion *addr_mem;
- MemoryRegion *data_mem;
-
- uint32_t nr_matches;
- VFIOConfigWindowMatch matches[];
-} VFIOConfigWindowQuirk;
-
static uint64_t vfio_generic_window_quirk_address_read(void *opaque,
hwaddr addr,
unsigned size)
@@ -135,7 +102,7 @@ static void vfio_generic_window_quirk_address_write(void *opaque, hwaddr addr,
}
}
-static const MemoryRegionOps vfio_generic_window_address_quirk = {
+const MemoryRegionOps vfio_generic_window_address_quirk = {
.read = vfio_generic_window_quirk_address_read,
.write = vfio_generic_window_quirk_address_write,
.endianness = DEVICE_LITTLE_ENDIAN,
@@ -178,26 +145,12 @@ static void vfio_generic_window_quirk_data_write(void *opaque, hwaddr addr,
addr + window->data_offset, data, size);
}
-static const MemoryRegionOps vfio_generic_window_data_quirk = {
+const MemoryRegionOps vfio_generic_window_data_quirk = {
.read = vfio_generic_window_quirk_data_read,
.write = vfio_generic_window_quirk_data_write,
.endianness = DEVICE_LITTLE_ENDIAN,
};
-/*
- * The generic mirror quirk handles devices which expose PCI config space
- * through a region within a BAR. When enabled, reads and writes are
- * redirected through to emulated PCI config space. XXX if PCI config space
- * used memory regions, this could just be an alias.
- */
-typedef struct VFIOConfigMirrorQuirk {
- struct VFIOPCIDevice *vdev;
- uint32_t offset;
- uint8_t bar;
- MemoryRegion *mem;
- uint8_t data[];
-} VFIOConfigMirrorQuirk;
-
static uint64_t vfio_generic_quirk_mirror_read(void *opaque,
hwaddr addr, unsigned size)
{
@@ -209,6 +162,7 @@ static uint64_t vfio_generic_quirk_mirror_read(void *opaque,
(void)vfio_region_read(&vdev->bars[mirror->bar].region,
addr + mirror->offset, size);
+ addr += mirror->config_offset;
data = vfio_pci_read_config(&vdev->pdev, addr, size);
trace_vfio_quirk_generic_mirror_read(vdev->vbasedev.name,
memory_region_name(mirror->mem),
@@ -222,13 +176,14 @@ static void vfio_generic_quirk_mirror_write(void *opaque, hwaddr addr,
VFIOConfigMirrorQuirk *mirror = opaque;
VFIOPCIDevice *vdev = mirror->vdev;
+ addr += mirror->config_offset;
vfio_pci_write_config(&vdev->pdev, addr, data, size);
trace_vfio_quirk_generic_mirror_write(vdev->vbasedev.name,
memory_region_name(mirror->mem),
addr, data);
}
-static const MemoryRegionOps vfio_generic_mirror_quirk = {
+const MemoryRegionOps vfio_generic_mirror_quirk = {
.read = vfio_generic_quirk_mirror_read,
.write = vfio_generic_quirk_mirror_write,
.endianness = DEVICE_LITTLE_ENDIAN,
@@ -448,7 +403,7 @@ static void vfio_probe_ati_bar4_quirk(VFIOPCIDevice *vdev, int nr)
/* This windows doesn't seem to be used except by legacy VGA code */
if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) ||
- !vdev->vga || nr != 4) {
+ !vdev->vga || nr != 4 || !vdev->bars[4].ioport) {
return;
}
@@ -1159,59 +1114,19 @@ static void vfio_probe_rtl8168_bar2_quirk(VFIOPCIDevice *vdev, int nr)
trace_vfio_quirk_rtl8168_probe(vdev->vbasedev.name);
}
-#define IGD_ASLS 0xfc /* ASL Storage Register */
-
/*
- * The OpRegion includes the Video BIOS Table, which seems important for
- * telling the driver what sort of outputs it has. Without this, the device
- * may work in the guest, but we may not get output. This also requires BIOS
- * support to reserve and populate a section of guest memory sufficient for
- * the table and to write the base address of that memory to the ASLS register
- * of the IGD device.
+ * Common quirk probe entry points.
*/
-bool vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
- struct vfio_region_info *info, Error **errp)
+bool vfio_config_quirk_setup(VFIOPCIDevice *vdev, Error **errp)
{
- int ret;
-
- vdev->igd_opregion = g_malloc0(info->size);
- ret = pread(vdev->vbasedev.fd, vdev->igd_opregion,
- info->size, info->offset);
- if (ret != info->size) {
- error_setg(errp, "failed to read IGD OpRegion");
- g_free(vdev->igd_opregion);
- vdev->igd_opregion = NULL;
+#ifdef CONFIG_VFIO_IGD
+ if (!vfio_probe_igd_config_quirk(vdev, errp)) {
return false;
}
-
- /*
- * Provide fw_cfg with a copy of the OpRegion which the VM firmware is to
- * allocate 32bit reserved memory for, copy these contents into, and write
- * the reserved memory base address to the device ASLS register at 0xFC.
- * Alignment of this reserved region seems flexible, but using a 4k page
- * alignment seems to work well. This interface assumes a single IGD
- * device, which may be at VM address 00:02.0 in legacy mode or another
- * address in UPT mode.
- *
- * NB, there may be future use cases discovered where the VM should have
- * direct interaction with the host OpRegion, in which case the write to
- * the ASLS register would trigger MemoryRegion setup to enable that.
- */
- fw_cfg_add_file(fw_cfg_find(), "etc/igd-opregion",
- vdev->igd_opregion, info->size);
-
- trace_vfio_pci_igd_opregion_enabled(vdev->vbasedev.name);
-
- pci_set_long(vdev->pdev.config + IGD_ASLS, 0);
- pci_set_long(vdev->pdev.wmask + IGD_ASLS, ~0);
- pci_set_long(vdev->emulated_config_bits + IGD_ASLS, ~0);
-
+#endif
return true;
}
-/*
- * Common quirk probe entry points.
- */
void vfio_vga_quirk_setup(VFIOPCIDevice *vdev)
{
vfio_vga_probe_ati_3c3_quirk(vdev);
@@ -1259,7 +1174,7 @@ void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr)
vfio_probe_nvidia_bar0_quirk(vdev, nr);
vfio_probe_rtl8168_bar2_quirk(vdev, nr);
#ifdef CONFIG_VFIO_IGD
- vfio_probe_igd_bar4_quirk(vdev, nr);
+ vfio_probe_igd_bar0_quirk(vdev, nr);
#endif
}
@@ -1498,7 +1413,7 @@ static void get_nv_gpudirect_clique_id(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint8_t *ptr = object_field_prop_ptr(obj, prop);
visit_type_uint8(v, name, ptr, errp);
@@ -1508,7 +1423,7 @@ static void set_nv_gpudirect_clique_id(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
- Property *prop = opaque;
+ const Property *prop = opaque;
uint8_t value, *ptr = object_field_prop_ptr(obj, prop);
if (!visit_type_uint8(v, name, &value, errp)) {
@@ -1524,7 +1439,7 @@ static void set_nv_gpudirect_clique_id(Object *obj, Visitor *v,
}
const PropertyInfo qdev_prop_nv_gpudirect_clique = {
- .name = "uint4",
+ .type = "uint8",
.description = "NVIDIA GPUDirect Clique ID (0 - 15)",
.get = get_nv_gpudirect_clique_id,
.set = set_nv_gpudirect_clique_id,
diff --git a/hw/vfio/pci-quirks.h b/hw/vfio/pci-quirks.h
new file mode 100644
index 0000000..d1532e3
--- /dev/null
+++ b/hw/vfio/pci-quirks.h
@@ -0,0 +1,72 @@
+/*
+ * vfio generic region quirks (mostly backdoors to PCI config space)
+ *
+ * Copyright Red Hat, Inc. 2012-2015
+ *
+ * Authors:
+ * Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+#ifndef HW_VFIO_VFIO_PCI_QUIRKS_H
+#define HW_VFIO_VFIO_PCI_QUIRKS_H
+
+#include "qemu/osdep.h"
+#include "exec/memop.h"
+
+/*
+ * The generic window quirks operate on an address and data register,
+ * vfio_generic_window_address_quirk handles the address register and
+ * vfio_generic_window_data_quirk handles the data register. These ops
+ * pass reads and writes through to hardware until a value matching the
+ * stored address match/mask is written. When this occurs, the data
+ * register access emulated PCI config space for the device rather than
+ * passing through accesses. This enables devices where PCI config space
+ * is accessible behind a window register to maintain the virtualization
+ * provided through vfio.
+ */
+typedef struct VFIOConfigWindowMatch {
+ uint32_t match;
+ uint32_t mask;
+} VFIOConfigWindowMatch;
+
+typedef struct VFIOConfigWindowQuirk {
+ struct VFIOPCIDevice *vdev;
+
+ uint32_t address_val;
+
+ uint32_t address_offset;
+ uint32_t data_offset;
+
+ bool window_enabled;
+ uint8_t bar;
+
+ MemoryRegion *addr_mem;
+ MemoryRegion *data_mem;
+
+ uint32_t nr_matches;
+ VFIOConfigWindowMatch matches[];
+} VFIOConfigWindowQuirk;
+
+extern const MemoryRegionOps vfio_generic_window_address_quirk;
+extern const MemoryRegionOps vfio_generic_window_data_quirk;
+
+/*
+ * The generic mirror quirk handles devices which expose PCI config space
+ * through a region within a BAR. When enabled, reads and writes are
+ * redirected through to emulated PCI config space. XXX if PCI config space
+ * used memory regions, this could just be an alias.
+ */
+typedef struct VFIOConfigMirrorQuirk {
+ struct VFIOPCIDevice *vdev;
+ uint32_t offset; /* Offset in BAR */
+ uint32_t config_offset; /* Offset in PCI config space */
+ uint8_t bar;
+ MemoryRegion *mem;
+ uint8_t data[];
+} VFIOConfigMirrorQuirk;
+
+extern const MemoryRegionOps vfio_generic_mirror_quirk;
+
+#endif /* HW_VFIO_VFIO_PCI_QUIRKS_H */
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index e03d9f3..fa25bde 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -30,20 +30,23 @@
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
#include "migration/vmstate.h"
-#include "qapi/qmp/qdict.h"
+#include "migration/cpr.h"
+#include "qobject/qdict.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qemu/range.h"
#include "qemu/units.h"
-#include "sysemu/kvm.h"
-#include "sysemu/runstate.h"
+#include "system/kvm.h"
+#include "system/runstate.h"
#include "pci.h"
#include "trace.h"
#include "qapi/error.h"
#include "migration/blocker.h"
#include "migration/qemu-file.h"
-#include "sysemu/iommufd.h"
+#include "system/iommufd.h"
+#include "vfio-migration-internal.h"
+#include "vfio-helpers.h"
#define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug"
@@ -54,6 +57,23 @@ static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
static void vfio_msi_disable_common(VFIOPCIDevice *vdev);
+static bool vfio_notifier_init(VFIOPCIDevice *vdev, EventNotifier *e,
+ const char *name, int nr, Error **errp)
+{
+ int ret = event_notifier_init(e, 0);
+
+ if (ret) {
+ error_setg_errno(errp, -ret, "vfio_notifier_init %s failed", name);
+ }
+ return !ret;
+}
+
+static void vfio_notifier_cleanup(VFIOPCIDevice *vdev, EventNotifier *e,
+ const char *name, int nr)
+{
+ event_notifier_cleanup(e);
+}
+
/*
* Disabling BAR mmaping can be slow, but toggling it around INTx can
* also be a huge overhead. We try to get the best of both worlds by
@@ -101,7 +121,7 @@ static void vfio_intx_interrupt(void *opaque)
}
}
-static void vfio_intx_eoi(VFIODevice *vbasedev)
+void vfio_pci_intx_eoi(VFIODevice *vbasedev)
{
VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
@@ -109,11 +129,11 @@ static void vfio_intx_eoi(VFIODevice *vbasedev)
return;
}
- trace_vfio_intx_eoi(vbasedev->name);
+ trace_vfio_pci_intx_eoi(vbasedev->name);
vdev->intx.pending = false;
pci_irq_deassert(&vdev->pdev);
- vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
+ vfio_device_irq_unmask(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
}
static bool vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
@@ -129,13 +149,12 @@ static bool vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
/* Get to a known interrupt state */
qemu_set_fd_handler(irq_fd, NULL, NULL, vdev);
- vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
+ vfio_device_irq_mask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
vdev->intx.pending = false;
pci_irq_deassert(&vdev->pdev);
/* Get an eventfd for resample/unmask */
- if (event_notifier_init(&vdev->intx.unmask, 0)) {
- error_setg(errp, "event_notifier_init failed eoi");
+ if (!vfio_notifier_init(vdev, &vdev->intx.unmask, "intx-unmask", 0, errp)) {
goto fail;
}
@@ -147,15 +166,15 @@ static bool vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
goto fail_irqfd;
}
- if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
- VFIO_IRQ_SET_ACTION_UNMASK,
- event_notifier_get_fd(&vdev->intx.unmask),
- errp)) {
+ if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
+ VFIO_IRQ_SET_ACTION_UNMASK,
+ event_notifier_get_fd(&vdev->intx.unmask),
+ errp)) {
goto fail_vfio;
}
/* Let'em rip */
- vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
+ vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
vdev->intx.kvm_accel = true;
@@ -167,10 +186,10 @@ fail_vfio:
kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
vdev->intx.route.irq);
fail_irqfd:
- event_notifier_cleanup(&vdev->intx.unmask);
+ vfio_notifier_cleanup(vdev, &vdev->intx.unmask, "intx-unmask", 0);
fail:
qemu_set_fd_handler(irq_fd, vfio_intx_interrupt, NULL, vdev);
- vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
+ vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
return false;
#else
return true;
@@ -188,7 +207,7 @@ static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
* Get to a known state, hardware masked, QEMU ready to accept new
* interrupts, QEMU IRQ de-asserted.
*/
- vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
+ vfio_device_irq_mask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
vdev->intx.pending = false;
pci_irq_deassert(&vdev->pdev);
@@ -199,7 +218,7 @@ static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
}
/* We only need to close the eventfd for VFIO to cleanup the kernel side */
- event_notifier_cleanup(&vdev->intx.unmask);
+ vfio_notifier_cleanup(vdev, &vdev->intx.unmask, "intx-unmask", 0);
/* QEMU starts listening for interrupt events. */
qemu_set_fd_handler(event_notifier_get_fd(&vdev->intx.interrupt),
@@ -208,7 +227,7 @@ static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
vdev->intx.kvm_accel = false;
/* If we've missed an event, let it re-fire through QEMU */
- vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
+ vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
#endif
@@ -234,12 +253,12 @@ static void vfio_intx_update(VFIOPCIDevice *vdev, PCIINTxRoute *route)
}
/* Re-enable the interrupt in cased we missed an EOI */
- vfio_intx_eoi(&vdev->vbasedev);
+ vfio_pci_intx_eoi(&vdev->vbasedev);
}
static void vfio_intx_routing_notifier(PCIDevice *pdev)
{
- VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
PCIINTxRoute route;
if (vdev->interrupt != VFIO_INT_INTx) {
@@ -266,7 +285,6 @@ static bool vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
Error *err = NULL;
int32_t fd;
- int ret;
if (!pin) {
@@ -289,18 +307,17 @@ static bool vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
}
#endif
- ret = event_notifier_init(&vdev->intx.interrupt, 0);
- if (ret) {
- error_setg_errno(errp, -ret, "event_notifier_init failed");
+ if (!vfio_notifier_init(vdev, &vdev->intx.interrupt, "intx-interrupt", 0,
+ errp)) {
return false;
}
fd = event_notifier_get_fd(&vdev->intx.interrupt);
qemu_set_fd_handler(fd, vfio_intx_interrupt, NULL, vdev);
- if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
+ if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
qemu_set_fd_handler(fd, NULL, NULL, vdev);
- event_notifier_cleanup(&vdev->intx.interrupt);
+ vfio_notifier_cleanup(vdev, &vdev->intx.interrupt, "intx-interrupt", 0);
return false;
}
@@ -320,20 +337,25 @@ static void vfio_intx_disable(VFIOPCIDevice *vdev)
timer_del(vdev->intx.mmap_timer);
vfio_intx_disable_kvm(vdev);
- vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
+ vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
vdev->intx.pending = false;
pci_irq_deassert(&vdev->pdev);
vfio_mmap_set_enabled(vdev, true);
fd = event_notifier_get_fd(&vdev->intx.interrupt);
qemu_set_fd_handler(fd, NULL, NULL, vdev);
- event_notifier_cleanup(&vdev->intx.interrupt);
+ vfio_notifier_cleanup(vdev, &vdev->intx.interrupt, "intx-interrupt", 0);
vdev->interrupt = VFIO_INT_NONE;
trace_vfio_intx_disable(vdev->vbasedev.name);
}
+bool vfio_pci_intx_enable(VFIOPCIDevice *vdev, Error **errp)
+{
+ return vfio_intx_enable(vdev, errp);
+}
+
/*
* MSI/X
*/
@@ -379,7 +401,7 @@ static void vfio_msi_interrupt(void *opaque)
static int vfio_enable_msix_no_vec(VFIOPCIDevice *vdev)
{
g_autofree struct vfio_irq_set *irq_set = NULL;
- int ret = 0, argsz;
+ int argsz;
int32_t *fd;
argsz = sizeof(*irq_set) + sizeof(*fd);
@@ -394,9 +416,7 @@ static int vfio_enable_msix_no_vec(VFIOPCIDevice *vdev)
fd = (int32_t *)&irq_set->data;
*fd = -1;
- ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
-
- return ret;
+ return vdev->vbasedev.io_ops->set_irqs(&vdev->vbasedev, irq_set);
}
static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
@@ -453,15 +473,15 @@ static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
fds[i] = fd;
}
- ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ ret = vdev->vbasedev.io_ops->set_irqs(&vdev->vbasedev, irq_set);
g_free(irq_set);
return ret;
}
-static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
- int vector_n, bool msix)
+void vfio_pci_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
+ int vector_n, bool msix)
{
if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) {
return;
@@ -471,13 +491,16 @@ static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
vector_n, &vdev->pdev);
}
-static void vfio_connect_kvm_msi_virq(VFIOMSIVector *vector)
+static void vfio_connect_kvm_msi_virq(VFIOMSIVector *vector, int nr)
{
+ const char *name = "kvm_interrupt";
+
if (vector->virq < 0) {
return;
}
- if (event_notifier_init(&vector->kvm_interrupt, 0)) {
+ if (!vfio_notifier_init(vector->vdev, &vector->kvm_interrupt, name, nr,
+ NULL)) {
goto fail_notifier;
}
@@ -489,19 +512,20 @@ static void vfio_connect_kvm_msi_virq(VFIOMSIVector *vector)
return;
fail_kvm:
- event_notifier_cleanup(&vector->kvm_interrupt);
+ vfio_notifier_cleanup(vector->vdev, &vector->kvm_interrupt, name, nr);
fail_notifier:
kvm_irqchip_release_virq(kvm_state, vector->virq);
vector->virq = -1;
}
-static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
+static void vfio_remove_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
+ int nr)
{
kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
vector->virq);
kvm_irqchip_release_virq(kvm_state, vector->virq);
vector->virq = -1;
- event_notifier_cleanup(&vector->kvm_interrupt);
+ vfio_notifier_cleanup(vdev, &vector->kvm_interrupt, "kvm_interrupt", nr);
}
static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
@@ -511,10 +535,47 @@ static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
kvm_irqchip_commit_routes(kvm_state);
}
+static void set_irq_signalling(VFIODevice *vbasedev, VFIOMSIVector *vector,
+ unsigned int nr)
+{
+ Error *err = NULL;
+ int32_t fd;
+
+ if (vector->virq >= 0) {
+ fd = event_notifier_get_fd(&vector->kvm_interrupt);
+ } else {
+ fd = event_notifier_get_fd(&vector->interrupt);
+ }
+
+ if (!vfio_device_irq_set_signaling(vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, nr,
+ VFIO_IRQ_SET_ACTION_TRIGGER,
+ fd, &err)) {
+ error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
+ }
+}
+
+void vfio_pci_vector_init(VFIOPCIDevice *vdev, int nr)
+{
+ VFIOMSIVector *vector = &vdev->msi_vectors[nr];
+ PCIDevice *pdev = &vdev->pdev;
+ Error *local_err = NULL;
+
+ vector->vdev = vdev;
+ vector->virq = -1;
+ if (!vfio_notifier_init(vdev, &vector->interrupt, "interrupt", nr,
+ &local_err)) {
+ error_report_err(local_err);
+ }
+ vector->use = true;
+ if (vdev->interrupt == VFIO_INT_MSIX) {
+ msix_vector_use(pdev, nr);
+ }
+}
+
static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
MSIMessage *msg, IOHandler *handler)
{
- VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
VFIOMSIVector *vector;
int ret;
bool resizing = !!(vdev->nr_vectors < nr + 1);
@@ -524,13 +585,7 @@ static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
vector = &vdev->msi_vectors[nr];
if (!vector->use) {
- vector->vdev = vdev;
- vector->virq = -1;
- if (event_notifier_init(&vector->interrupt, 0)) {
- error_report("vfio: Error: event_notifier_init failed");
- }
- vector->use = true;
- msix_vector_use(pdev, nr);
+ vfio_pci_vector_init(vdev, nr);
}
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
@@ -542,19 +597,19 @@ static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
*/
if (vector->virq >= 0) {
if (!msg) {
- vfio_remove_kvm_msi_virq(vector);
+ vfio_remove_kvm_msi_virq(vdev, vector, nr);
} else {
vfio_update_kvm_msi_virq(vector, *msg, pdev);
}
} else {
if (msg) {
if (vdev->defer_kvm_irq_routing) {
- vfio_add_kvm_msi_virq(vdev, vector, nr, true);
+ vfio_pci_add_kvm_msi_virq(vdev, vector, nr, true);
} else {
vfio_route_change = kvm_irqchip_begin_route_changes(kvm_state);
- vfio_add_kvm_msi_virq(vdev, vector, nr, true);
+ vfio_pci_add_kvm_msi_virq(vdev, vector, nr, true);
kvm_irqchip_commit_route_changes(&vfio_route_change);
- vfio_connect_kvm_msi_virq(vector);
+ vfio_connect_kvm_msi_virq(vector, nr);
}
}
}
@@ -576,27 +631,14 @@ static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
if (!vdev->defer_kvm_irq_routing) {
if (vdev->msix->noresize && resizing) {
- vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
+ vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
ret = vfio_enable_vectors(vdev, true);
if (ret) {
- error_report("vfio: failed to enable vectors, %d", ret);
+ error_report("vfio: failed to enable vectors, %s",
+ strerror(-ret));
}
} else {
- Error *err = NULL;
- int32_t fd;
-
- if (vector->virq >= 0) {
- fd = event_notifier_get_fd(&vector->kvm_interrupt);
- } else {
- fd = event_notifier_get_fd(&vector->interrupt);
- }
-
- if (!vfio_set_irq_signaling(&vdev->vbasedev,
- VFIO_PCI_MSIX_IRQ_INDEX, nr,
- VFIO_IRQ_SET_ACTION_TRIGGER, fd,
- &err)) {
- error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
- }
+ set_irq_signalling(&vdev->vbasedev, vector, nr);
}
}
@@ -619,7 +661,7 @@ static int vfio_msix_vector_use(PCIDevice *pdev,
static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
{
- VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
VFIOMSIVector *vector = &vdev->msi_vectors[nr];
trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
@@ -636,7 +678,7 @@ static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
int32_t fd = event_notifier_get_fd(&vector->interrupt);
Error *err = NULL;
- if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX,
+ if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX,
nr, VFIO_IRQ_SET_ACTION_TRIGGER, fd,
&err)) {
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
@@ -644,14 +686,14 @@ static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
}
}
-static void vfio_prepare_kvm_msi_virq_batch(VFIOPCIDevice *vdev)
+void vfio_pci_prepare_kvm_msi_virq_batch(VFIOPCIDevice *vdev)
{
assert(!vdev->defer_kvm_irq_routing);
vdev->defer_kvm_irq_routing = true;
vfio_route_change = kvm_irqchip_begin_route_changes(kvm_state);
}
-static void vfio_commit_kvm_msi_virq_batch(VFIOPCIDevice *vdev)
+void vfio_pci_commit_kvm_msi_virq_batch(VFIOPCIDevice *vdev)
{
int i;
@@ -661,7 +703,7 @@ static void vfio_commit_kvm_msi_virq_batch(VFIOPCIDevice *vdev)
kvm_irqchip_commit_route_changes(&vfio_route_change);
for (i = 0; i < vdev->nr_vectors; i++) {
- vfio_connect_kvm_msi_virq(&vdev->msi_vectors[i]);
+ vfio_connect_kvm_msi_virq(&vdev->msi_vectors[i], i);
}
}
@@ -681,19 +723,20 @@ static void vfio_msix_enable(VFIOPCIDevice *vdev)
* routes once rather than per vector provides a substantial
* performance improvement.
*/
- vfio_prepare_kvm_msi_virq_batch(vdev);
+ vfio_pci_prepare_kvm_msi_virq_batch(vdev);
if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
vfio_msix_vector_release, NULL)) {
error_report("vfio: msix_set_vector_notifiers failed");
}
- vfio_commit_kvm_msi_virq_batch(vdev);
+ vfio_pci_commit_kvm_msi_virq_batch(vdev);
if (vdev->nr_vectors) {
ret = vfio_enable_vectors(vdev, true);
if (ret) {
- error_report("vfio: failed to enable vectors, %d", ret);
+ error_report("vfio: failed to enable vectors, %s",
+ strerror(-ret));
}
} else {
/*
@@ -710,7 +753,8 @@ static void vfio_msix_enable(VFIOPCIDevice *vdev)
*/
ret = vfio_enable_msix_no_vec(vdev);
if (ret) {
- error_report("vfio: failed to enable MSI-X, %d", ret);
+ error_report("vfio: failed to enable MSI-X, %s",
+ strerror(-ret));
}
}
@@ -730,19 +774,21 @@ retry:
* Deferring to commit the KVM routes once rather than per vector
* provides a substantial performance improvement.
*/
- vfio_prepare_kvm_msi_virq_batch(vdev);
+ vfio_pci_prepare_kvm_msi_virq_batch(vdev);
vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
for (i = 0; i < vdev->nr_vectors; i++) {
VFIOMSIVector *vector = &vdev->msi_vectors[i];
+ Error *local_err = NULL;
vector->vdev = vdev;
vector->virq = -1;
vector->use = true;
- if (event_notifier_init(&vector->interrupt, 0)) {
- error_report("vfio: Error: event_notifier_init failed");
+ if (!vfio_notifier_init(vdev, &vector->interrupt, "interrupt", i,
+ &local_err)) {
+ error_report_err(local_err);
}
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
@@ -752,10 +798,10 @@ retry:
* Attempt to enable route through KVM irqchip,
* default to userspace handling if unavailable.
*/
- vfio_add_kvm_msi_virq(vdev, vector, i, false);
+ vfio_pci_add_kvm_msi_virq(vdev, vector, i, false);
}
- vfio_commit_kvm_msi_virq_batch(vdev);
+ vfio_pci_commit_kvm_msi_virq_batch(vdev);
/* Set interrupt type prior to possible interrupts */
vdev->interrupt = VFIO_INT_MSI;
@@ -763,7 +809,8 @@ retry:
ret = vfio_enable_vectors(vdev, false);
if (ret) {
if (ret < 0) {
- error_report("vfio: Error: Failed to setup MSI fds: %m");
+ error_report("vfio: Error: Failed to setup MSI fds: %s",
+ strerror(-ret));
} else {
error_report("vfio: Error: Failed to enable %d "
"MSI vectors, retry with %d", vdev->nr_vectors, ret);
@@ -797,11 +844,11 @@ static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
VFIOMSIVector *vector = &vdev->msi_vectors[i];
if (vdev->msi_vectors[i].use) {
if (vector->virq >= 0) {
- vfio_remove_kvm_msi_virq(vector);
+ vfio_remove_kvm_msi_virq(vdev, vector, i);
}
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
NULL, NULL, NULL);
- event_notifier_cleanup(&vector->interrupt);
+ vfio_notifier_cleanup(vdev, &vector->interrupt, "interrupt", i);
}
}
@@ -833,7 +880,7 @@ static void vfio_msix_disable(VFIOPCIDevice *vdev)
* Always clear MSI-X IRQ index. A PF device could have enabled
* MSI-X with no vectors. See vfio_msix_enable().
*/
- vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
+ vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
vfio_msi_disable_common(vdev);
if (!vfio_intx_enable(vdev, &err)) {
@@ -850,7 +897,7 @@ static void vfio_msi_disable(VFIOPCIDevice *vdev)
{
Error *err = NULL;
- vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
+ vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
vfio_msi_disable_common(vdev);
vfio_intx_enable(vdev, &err);
if (err) {
@@ -879,18 +926,22 @@ static void vfio_update_msi(VFIOPCIDevice *vdev)
static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
{
- g_autofree struct vfio_region_info *reg_info = NULL;
+ VFIODevice *vbasedev = &vdev->vbasedev;
+ struct vfio_region_info *reg_info = NULL;
uint64_t size;
off_t off = 0;
ssize_t bytes;
+ int ret;
+
+ ret = vfio_device_get_region_info(vbasedev, VFIO_PCI_ROM_REGION_INDEX,
+ &reg_info);
- if (vfio_get_region_info(&vdev->vbasedev,
- VFIO_PCI_ROM_REGION_INDEX, &reg_info)) {
- error_report("vfio: Error getting ROM info: %m");
+ if (ret != 0) {
+ error_report("vfio: Error getting ROM info: %s", strerror(-ret));
return;
}
- trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size,
+ trace_vfio_pci_load_rom(vbasedev->name, (unsigned long)reg_info->size,
(unsigned long)reg_info->offset,
(unsigned long)reg_info->flags);
@@ -899,8 +950,7 @@ static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
if (!vdev->rom_size) {
vdev->rom_read_failed = true;
- error_report("vfio-pci: Cannot read device rom at "
- "%s", vdev->vbasedev.name);
+ error_report("vfio-pci: Cannot read device rom at %s", vbasedev->name);
error_printf("Device option ROM contents are probably invalid "
"(check dmesg).\nSkip option ROM probe with rombar=0, "
"or load from file with romfile=\n");
@@ -911,18 +961,22 @@ static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
memset(vdev->rom, 0xff, size);
while (size) {
- bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
- size, vdev->rom_offset + off);
+ bytes = vbasedev->io_ops->region_read(vbasedev,
+ VFIO_PCI_ROM_REGION_INDEX,
+ off, size, vdev->rom + off);
+
if (bytes == 0) {
break;
} else if (bytes > 0) {
off += bytes;
size -= bytes;
} else {
- if (errno == EINTR || errno == EAGAIN) {
+ if (bytes == -EINTR || bytes == -EAGAIN) {
continue;
}
- error_report("vfio: Error reading device ROM: %m");
+ error_report("vfio: Error reading device ROM: %s",
+ strreaderror(bytes));
+
break;
}
}
@@ -958,6 +1012,24 @@ static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
}
}
+/* "Raw" read of underlying config space. */
+static int vfio_pci_config_space_read(VFIOPCIDevice *vdev, off_t offset,
+ uint32_t size, void *data)
+{
+ return vdev->vbasedev.io_ops->region_read(&vdev->vbasedev,
+ VFIO_PCI_CONFIG_REGION_INDEX,
+ offset, size, data);
+}
+
+/* "Raw" write of underlying config space. */
+static int vfio_pci_config_space_write(VFIOPCIDevice *vdev, off_t offset,
+ uint32_t size, void *data)
+{
+ return vdev->vbasedev.io_ops->region_write(&vdev->vbasedev,
+ VFIO_PCI_CONFIG_REGION_INDEX,
+ offset, size, data, false);
+}
+
static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
{
VFIOPCIDevice *vdev = opaque;
@@ -1010,11 +1082,9 @@ static const MemoryRegionOps vfio_rom_ops = {
static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
{
+ VFIODevice *vbasedev = &vdev->vbasedev;
uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
- off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
- DeviceState *dev = DEVICE(vdev);
char *name;
- int fd = vdev->vbasedev.fd;
if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
/* Since pci handles romfile, just print a message and return */
@@ -1031,11 +1101,12 @@ static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
* Use the same size ROM BAR as the physical device. The contents
* will get filled in later when the guest tries to read it.
*/
- if (pread(fd, &orig, 4, offset) != 4 ||
- pwrite(fd, &size, 4, offset) != 4 ||
- pread(fd, &size, 4, offset) != 4 ||
- pwrite(fd, &orig, 4, offset) != 4) {
- error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name);
+ if (vfio_pci_config_space_read(vdev, PCI_ROM_ADDRESS, 4, &orig) != 4 ||
+ vfio_pci_config_space_write(vdev, PCI_ROM_ADDRESS, 4, &size) != 4 ||
+ vfio_pci_config_space_read(vdev, PCI_ROM_ADDRESS, 4, &size) != 4 ||
+ vfio_pci_config_space_write(vdev, PCI_ROM_ADDRESS, 4, &orig) != 4) {
+
+ error_report("%s(%s) ROM access failed", __func__, vbasedev->name);
return;
}
@@ -1046,12 +1117,12 @@ static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
}
if (vfio_opt_rom_in_denylist(vdev)) {
- if (dev->opts && qdict_haskey(dev->opts, "rombar")) {
+ if (vdev->pdev.rom_bar > 0) {
warn_report("Device at %s is known to cause system instability"
" issues during option rom execution",
vdev->vbasedev.name);
error_printf("Proceeding anyway since user specified"
- " non zero value for rombar\n");
+ " positive value for rombar\n");
} else {
warn_report("Rom loading for device at %s has been disabled"
" due to system instability issues",
@@ -1168,7 +1239,7 @@ static const MemoryRegionOps vfio_vga_ops = {
*/
static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar)
{
- VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
VFIORegion *region = &vdev->bars[bar].region;
MemoryRegion *mmap_mr, *region_mr, *base_mr;
PCIIORegion *r;
@@ -1214,7 +1285,8 @@ static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar)
*/
uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
{
- VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
+ VFIODevice *vbasedev = &vdev->vbasedev;
uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
@@ -1227,12 +1299,12 @@ uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
ssize_t ret;
- ret = pread(vdev->vbasedev.fd, &phys_val, len,
- vdev->config_offset + addr);
+ ret = vfio_pci_config_space_read(vdev, addr, len, &phys_val);
if (ret != len) {
- error_report("%s(%s, 0x%x, 0x%x) failed: %m",
- __func__, vdev->vbasedev.name, addr, len);
- return -errno;
+ error_report("%s(%s, 0x%x, 0x%x) failed: %s",
+ __func__, vbasedev->name, addr, len,
+ strreaderror(ret));
+ return -1;
}
phys_val = le32_to_cpu(phys_val);
}
@@ -1247,16 +1319,19 @@ uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
void vfio_pci_write_config(PCIDevice *pdev,
uint32_t addr, uint32_t val, int len)
{
- VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
+ VFIODevice *vbasedev = &vdev->vbasedev;
uint32_t val_le = cpu_to_le32(val);
+ int ret;
trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
/* Write everything to VFIO, let it filter out what we can't write */
- if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
- != len) {
- error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
- __func__, vdev->vbasedev.name, addr, val, len);
+ ret = vfio_pci_config_space_write(vdev, addr, len, &val_le);
+ if (ret != len) {
+ error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %s",
+ __func__, vbasedev->name, addr, val, len,
+ strwriteerror(ret));
}
/* MSI/MSI-X Enabling/Disabling */
@@ -1344,9 +1419,11 @@ static bool vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
int ret, entries;
Error *err = NULL;
- if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
- vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
- error_setg_errno(errp, errno, "failed reading MSI PCI_CAP_FLAGS");
+ ret = vfio_pci_config_space_read(vdev, pos + PCI_CAP_FLAGS,
+ sizeof(ctrl), &ctrl);
+ if (ret != sizeof(ctrl)) {
+ error_setg(errp, "failed reading MSI PCI_CAP_FLAGS: %s",
+ strreaderror(ret));
return false;
}
ctrl = le16_to_cpu(ctrl);
@@ -1379,8 +1456,8 @@ static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
* If the host driver allows mapping of a MSIX data, we are going to
* do map the entire BAR and emulate MSIX table on top of that.
*/
- if (vfio_has_region_cap(&vdev->vbasedev, region->nr,
- VFIO_REGION_INFO_CAP_MSIX_MAPPABLE)) {
+ if (vfio_device_has_region_cap(&vdev->vbasedev, region->nr,
+ VFIO_REGION_INFO_CAP_MSIX_MAPPABLE)) {
return;
}
@@ -1452,7 +1529,7 @@ static bool vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
int target_bar = -1;
size_t msix_sz;
- if (!vdev->msix || vdev->msix_relo == OFF_AUTOPCIBAR_OFF) {
+ if (!vdev->msix || vdev->msix_relo == OFF_AUTO_PCIBAR_OFF) {
return true;
}
@@ -1464,7 +1541,7 @@ static bool vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
/* PCI BARs must be a power of 2 */
msix_sz = pow2ceil(msix_sz);
- if (vdev->msix_relo == OFF_AUTOPCIBAR_AUTO) {
+ if (vdev->msix_relo == OFF_AUTO_PCIBAR_AUTO) {
/*
* TODO: Lookup table for known devices.
*
@@ -1479,7 +1556,7 @@ static bool vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
return false;
}
} else {
- target_bar = (int)(vdev->msix_relo - OFF_AUTOPCIBAR_BAR0);
+ target_bar = (int)(vdev->msix_relo - OFF_AUTO_PCIBAR_BAR0);
}
/* I/O port BARs cannot host MSI-X structures */
@@ -1553,31 +1630,35 @@ static bool vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
uint8_t pos;
uint16_t ctrl;
uint32_t table, pba;
- int ret, fd = vdev->vbasedev.fd;
- struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
- .index = VFIO_PCI_MSIX_IRQ_INDEX };
+ struct vfio_irq_info irq_info;
VFIOMSIXInfo *msix;
+ int ret;
pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
if (!pos) {
return true;
}
- if (pread(fd, &ctrl, sizeof(ctrl),
- vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
- error_setg_errno(errp, errno, "failed to read PCI MSIX FLAGS");
+ ret = vfio_pci_config_space_read(vdev, pos + PCI_MSIX_FLAGS,
+ sizeof(ctrl), &ctrl);
+ if (ret != sizeof(ctrl)) {
+ error_setg(errp, "failed to read PCI MSIX FLAGS: %s",
+ strreaderror(ret));
return false;
}
- if (pread(fd, &table, sizeof(table),
- vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
- error_setg_errno(errp, errno, "failed to read PCI MSIX TABLE");
+ ret = vfio_pci_config_space_read(vdev, pos + PCI_MSIX_TABLE,
+ sizeof(table), &table);
+ if (ret != sizeof(table)) {
+ error_setg(errp, "failed to read PCI MSIX TABLE: %s",
+ strreaderror(ret));
return false;
}
- if (pread(fd, &pba, sizeof(pba),
- vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
- error_setg_errno(errp, errno, "failed to read PCI MSIX PBA");
+ ret = vfio_pci_config_space_read(vdev, pos + PCI_MSIX_PBA,
+ sizeof(pba), &pba);
+ if (ret != sizeof(pba)) {
+ error_setg(errp, "failed to read PCI MSIX PBA: %s", strreaderror(ret));
return false;
}
@@ -1592,7 +1673,8 @@ static bool vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
- ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
+ ret = vfio_device_get_irq_info(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX,
+ &irq_info);
if (ret < 0) {
error_setg_errno(errp, -ret, "failed to get MSI-X irq info");
g_free(msix);
@@ -1624,7 +1706,7 @@ static bool vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
} else if (vfio_pci_is(vdev, PCI_VENDOR_ID_BAIDU,
PCI_DEVICE_ID_KUNLUN_VF)) {
msix->pba_offset = 0xb400;
- } else if (vdev->msix_relo == OFF_AUTOPCIBAR_OFF) {
+ } else if (vdev->msix_relo == OFF_AUTO_PCIBAR_OFF) {
error_setg(errp, "hardware reports invalid configuration, "
"MSIX PBA outside of specified BAR");
g_free(msix);
@@ -1699,7 +1781,7 @@ static bool vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
return true;
}
-static void vfio_teardown_msi(VFIOPCIDevice *vdev)
+void vfio_pci_teardown_msi(VFIOPCIDevice *vdev)
{
msi_uninit(&vdev->pdev);
@@ -1736,10 +1818,10 @@ static void vfio_bar_prepare(VFIOPCIDevice *vdev, int nr)
}
/* Determine what type of BAR this is for registration */
- ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
- vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
+ ret = vfio_pci_config_space_read(vdev, PCI_BASE_ADDRESS_0 + (4 * nr),
+ sizeof(pci_bar), &pci_bar);
if (ret != sizeof(pci_bar)) {
- error_report("vfio: Failed to read BAR %d (%m)", nr);
+ error_report("vfio: Failed to read BAR %d: %s", nr, strreaderror(ret));
return;
}
@@ -1749,6 +1831,9 @@ static void vfio_bar_prepare(VFIOPCIDevice *vdev, int nr)
bar->type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
~PCI_BASE_ADDRESS_MEM_MASK);
bar->size = bar->region.size;
+
+ /* IO regions are sync, memory can be async */
+ bar->region.post_wr = (bar->ioport == 0);
}
static void vfio_bars_prepare(VFIOPCIDevice *vdev)
@@ -1795,7 +1880,7 @@ static void vfio_bars_register(VFIOPCIDevice *vdev)
}
}
-static void vfio_bars_exit(VFIOPCIDevice *vdev)
+void vfio_pci_bars_exit(VFIOPCIDevice *vdev)
{
int i;
@@ -2216,8 +2301,12 @@ static bool vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
break;
case PCI_CAP_ID_PM:
vfio_check_pm_reset(vdev, pos);
- vdev->pm_cap = pos;
- ret = pci_add_capability(pdev, cap_id, pos, size, errp) >= 0;
+ ret = pci_pm_init(pdev, pos, errp) >= 0;
+ /*
+ * PCI-core config space emulation needs write access to the power
+ * state enabled for tracking BAR mapping relative to PM state.
+ */
+ pci_set_word(pdev->wmask + pos + PCI_PM_CTRL, PCI_PM_CTRL_STATE_MASK);
break;
case PCI_CAP_ID_AF:
vfio_check_af_flr(vdev, pos);
@@ -2380,10 +2469,9 @@ static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
}
g_free(config);
- return;
}
-static bool vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
+bool vfio_pci_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
{
PCIDevice *pdev = &vdev->pdev;
@@ -2407,18 +2495,27 @@ void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
vfio_disable_interrupts(vdev);
+ /*
+ * Stop any ongoing DMA by disconnecting I/O, MMIO, and bus master.
+ * Also put INTx Disable in known state.
+ */
+ cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
+ cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+ PCI_COMMAND_INTX_DISABLE);
+ vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
+
/* Make sure the device is in D0 */
- if (vdev->pm_cap) {
+ if (pdev->pm_cap) {
uint16_t pmcsr;
uint8_t state;
- pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
+ pmcsr = vfio_pci_read_config(pdev, pdev->pm_cap + PCI_PM_CTRL, 2);
state = pmcsr & PCI_PM_CTRL_STATE_MASK;
if (state) {
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
+ vfio_pci_write_config(pdev, pdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
/* vfio handles the necessary delay here */
- pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
+ pmcsr = vfio_pci_read_config(pdev, pdev->pm_cap + PCI_PM_CTRL, 2);
state = pmcsr & PCI_PM_CTRL_STATE_MASK;
if (state) {
error_report("vfio: Unable to power on device, stuck in D%d",
@@ -2426,34 +2523,27 @@ void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
}
}
}
-
- /*
- * Stop any ongoing DMA by disconnecting I/O, MMIO, and bus master.
- * Also put INTx Disable in known state.
- */
- cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
- cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
- PCI_COMMAND_INTX_DISABLE);
- vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
}
void vfio_pci_post_reset(VFIOPCIDevice *vdev)
{
+ VFIODevice *vbasedev = &vdev->vbasedev;
Error *err = NULL;
- int nr;
+ int ret, nr;
if (!vfio_intx_enable(vdev, &err)) {
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
}
for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) {
- off_t addr = vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr);
+ off_t addr = PCI_BASE_ADDRESS_0 + (4 * nr);
uint32_t val = 0;
uint32_t len = sizeof(val);
- if (pwrite(vdev->vbasedev.fd, &val, len, addr) != len) {
- error_report("%s(%s) reset bar %d failed: %m", __func__,
- vdev->vbasedev.name, nr);
+ ret = vfio_pci_config_space_write(vdev, addr, len, &val);
+ if (ret != len) {
+ error_report("%s(%s) reset bar %d failed: %s", __func__,
+ vbasedev->name, nr, strwriteerror(ret));
}
}
@@ -2657,7 +2747,7 @@ static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f)
static VFIODeviceOps vfio_pci_ops = {
.vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
.vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
- .vfio_eoi = vfio_intx_eoi,
+ .vfio_eoi = vfio_pci_intx_eoi,
.vfio_get_object = vfio_pci_get_object,
.vfio_save_config = vfio_pci_save_config,
.vfio_load_config = vfio_pci_load_config,
@@ -2666,10 +2756,10 @@ static VFIODeviceOps vfio_pci_ops = {
bool vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
{
VFIODevice *vbasedev = &vdev->vbasedev;
- g_autofree struct vfio_region_info *reg_info = NULL;
+ struct vfio_region_info *reg_info = NULL;
int ret;
- ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
+ ret = vfio_device_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
if (ret) {
error_setg_errno(errp, -ret,
"failed getting region info for VGA region index %d",
@@ -2728,11 +2818,11 @@ bool vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
return true;
}
-static bool vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
+bool vfio_pci_populate_device(VFIOPCIDevice *vdev, Error **errp)
{
VFIODevice *vbasedev = &vdev->vbasedev;
- g_autofree struct vfio_region_info *reg_info = NULL;
- struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
+ struct vfio_region_info *reg_info = NULL;
+ struct vfio_irq_info irq_info;
int i, ret = -1;
/* Sanity check device */
@@ -2767,14 +2857,14 @@ static bool vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
QLIST_INIT(&vdev->bars[i].quirks);
}
- ret = vfio_get_region_info(vbasedev,
- VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
+ ret = vfio_device_get_region_info(vbasedev,
+ VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
if (ret) {
error_setg_errno(errp, -ret, "failed to get config info");
return false;
}
- trace_vfio_populate_device_config(vdev->vbasedev.name,
+ trace_vfio_pci_populate_device_config(vdev->vbasedev.name,
(unsigned long)reg_info->size,
(unsigned long)reg_info->offset,
(unsigned long)reg_info->flags);
@@ -2793,12 +2883,10 @@ static bool vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
}
}
- irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
-
- ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
+ ret = vfio_device_get_irq_info(vbasedev, VFIO_PCI_ERR_IRQ_INDEX, &irq_info);
if (ret) {
/* This can fail for an old kernel or legacy PCI dev */
- trace_vfio_populate_device_get_irq_info_failure(strerror(errno));
+ trace_vfio_pci_populate_device_get_irq_info_failure(strerror(-ret));
} else if (irq_info.count == 1) {
vdev->pci_aer = true;
} else {
@@ -2810,9 +2898,21 @@ static bool vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
return true;
}
-static void vfio_pci_put_device(VFIOPCIDevice *vdev)
+void vfio_pci_put_device(VFIOPCIDevice *vdev)
{
- vfio_detach_device(&vdev->vbasedev);
+ vfio_display_finalize(vdev);
+ vfio_bars_finalize(vdev);
+ g_free(vdev->emulated_config_bits);
+ g_free(vdev->rom);
+ /*
+ * XXX Leaking igd_opregion is not an oversight, we can't remove the
+ * fw_cfg entry therefore leaking this allocation seems like the safest
+ * option.
+ *
+ * g_free(vdev->igd_opregion);
+ */
+
+ vfio_device_detach(&vdev->vbasedev);
g_free(vdev->vbasedev.name);
g_free(vdev->msix);
@@ -2846,7 +2946,7 @@ static void vfio_err_notifier_handler(void *opaque)
* and continue after disabling error recovery support for the
* device.
*/
-static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
+void vfio_pci_register_err_notifier(VFIOPCIDevice *vdev)
{
Error *err = NULL;
int32_t fd;
@@ -2855,8 +2955,9 @@ static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
return;
}
- if (event_notifier_init(&vdev->err_notifier, 0)) {
- error_report("vfio: Unable to init event notifier for error detection");
+ if (!vfio_notifier_init(vdev, &vdev->err_notifier, "err_notifier", 0,
+ &err)) {
+ error_report_err(err);
vdev->pci_aer = false;
return;
}
@@ -2864,11 +2965,11 @@ static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
fd = event_notifier_get_fd(&vdev->err_notifier);
qemu_set_fd_handler(fd, vfio_err_notifier_handler, NULL, vdev);
- if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
- VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
+ if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
qemu_set_fd_handler(fd, NULL, NULL, vdev);
- event_notifier_cleanup(&vdev->err_notifier);
+ vfio_notifier_cleanup(vdev, &vdev->err_notifier, "err_notifier", 0);
vdev->pci_aer = false;
}
}
@@ -2881,13 +2982,13 @@ static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
return;
}
- if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
- VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
+ if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
}
qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
NULL, NULL, vdev);
- event_notifier_cleanup(&vdev->err_notifier);
+ vfio_notifier_cleanup(vdev, &vdev->err_notifier, "err_notifier", 0);
}
static void vfio_req_notifier_handler(void *opaque)
@@ -2905,35 +3006,37 @@ static void vfio_req_notifier_handler(void *opaque)
}
}
-static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
+void vfio_pci_register_req_notifier(VFIOPCIDevice *vdev)
{
- struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
- .index = VFIO_PCI_REQ_IRQ_INDEX };
+ struct vfio_irq_info irq_info;
Error *err = NULL;
int32_t fd;
+ int ret;
if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
return;
}
- if (ioctl(vdev->vbasedev.fd,
- VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) {
+ ret = vfio_device_get_irq_info(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX,
+ &irq_info);
+ if (ret < 0 || irq_info.count < 1) {
return;
}
- if (event_notifier_init(&vdev->req_notifier, 0)) {
- error_report("vfio: Unable to init event notifier for device request");
+ if (!vfio_notifier_init(vdev, &vdev->req_notifier, "req_notifier", 0,
+ &err)) {
+ error_report_err(err);
return;
}
fd = event_notifier_get_fd(&vdev->req_notifier);
qemu_set_fd_handler(fd, vfio_req_notifier_handler, NULL, vdev);
- if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
- VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
+ if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
qemu_set_fd_handler(fd, NULL, NULL, vdev);
- event_notifier_cleanup(&vdev->req_notifier);
+ vfio_notifier_cleanup(vdev, &vdev->req_notifier, "req_notifier", 0);
} else {
vdev->req_enabled = true;
}
@@ -2947,93 +3050,33 @@ static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
return;
}
- if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
- VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
+ if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
}
qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
NULL, NULL, vdev);
- event_notifier_cleanup(&vdev->req_notifier);
+ vfio_notifier_cleanup(vdev, &vdev->req_notifier, "req_notifier", 0);
vdev->req_enabled = false;
}
-static void vfio_realize(PCIDevice *pdev, Error **errp)
+bool vfio_pci_config_setup(VFIOPCIDevice *vdev, Error **errp)
{
- ERRP_GUARD();
- VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ PCIDevice *pdev = &vdev->pdev;
VFIODevice *vbasedev = &vdev->vbasedev;
- char *subsys;
- int i, ret;
- bool is_mdev;
- char uuid[UUID_STR_LEN];
- g_autofree char *name = NULL;
- g_autofree char *tmp = NULL;
-
- if (vbasedev->fd < 0 && !vbasedev->sysfsdev) {
- if (!(~vdev->host.domain || ~vdev->host.bus ||
- ~vdev->host.slot || ~vdev->host.function)) {
- error_setg(errp, "No provided host device");
- error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F "
-#ifdef CONFIG_IOMMUFD
- "or -device vfio-pci,fd=DEVICE_FD "
-#endif
- "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
- return;
- }
- vbasedev->sysfsdev =
- g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
- vdev->host.domain, vdev->host.bus,
- vdev->host.slot, vdev->host.function);
- }
-
- if (!vfio_device_get_name(vbasedev, errp)) {
- return;
- }
-
- /*
- * Mediated devices *might* operate compatibly with discarding of RAM, but
- * we cannot know for certain, it depends on whether the mdev vendor driver
- * stays in sync with the active working set of the guest driver. Prevent
- * the x-balloon-allowed option unless this is minimally an mdev device.
- */
- tmp = g_strdup_printf("%s/subsystem", vbasedev->sysfsdev);
- subsys = realpath(tmp, NULL);
- is_mdev = subsys && (strcmp(subsys, "/sys/bus/mdev") == 0);
- free(subsys);
-
- trace_vfio_mdev(vbasedev->name, is_mdev);
-
- if (vbasedev->ram_block_discard_allowed && !is_mdev) {
- error_setg(errp, "x-balloon-allowed only potentially compatible "
- "with mdev devices");
- goto error;
- }
-
- if (!qemu_uuid_is_null(&vdev->vf_token)) {
- qemu_uuid_unparse(&vdev->vf_token, uuid);
- name = g_strdup_printf("%s vf_token=%s", vbasedev->name, uuid);
- } else {
- name = g_strdup(vbasedev->name);
- }
-
- if (!vfio_attach_device(name, vbasedev,
- pci_device_iommu_address_space(pdev), errp)) {
- goto error;
- }
+ uint32_t config_space_size;
+ int ret;
- if (!vfio_populate_device(vdev, errp)) {
- goto error;
- }
+ config_space_size = MIN(pci_config_size(&vdev->pdev), vdev->config_size);
/* Get a copy of config space */
- ret = pread(vbasedev->fd, vdev->pdev.config,
- MIN(pci_config_size(&vdev->pdev), vdev->config_size),
- vdev->config_offset);
- if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
- ret = ret < 0 ? -errno : -EFAULT;
- error_setg_errno(errp, -ret, "failed to read device config space");
- goto error;
+ ret = vfio_pci_config_space_read(vdev, 0, config_space_size,
+ vdev->pdev.config);
+ if (ret < (int)config_space_size) {
+ ret = ret < 0 ? -ret : EFAULT;
+ error_setg_errno(errp, ret, "failed to read device config space");
+ return false;
}
/* vfio emulates a lot for us, but some bits need extra love */
@@ -3052,7 +3095,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
if (vdev->vendor_id != PCI_ANY_ID) {
if (vdev->vendor_id >= 0xffff) {
error_setg(errp, "invalid PCI vendor ID provided");
- goto error;
+ return false;
}
vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
trace_vfio_pci_emulated_vendor_id(vbasedev->name, vdev->vendor_id);
@@ -3063,7 +3106,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
if (vdev->device_id != PCI_ANY_ID) {
if (vdev->device_id > 0xffff) {
error_setg(errp, "invalid PCI device ID provided");
- goto error;
+ return false;
}
vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
trace_vfio_pci_emulated_device_id(vbasedev->name, vdev->device_id);
@@ -3074,7 +3117,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
if (vdev->sub_vendor_id != PCI_ANY_ID) {
if (vdev->sub_vendor_id > 0xffff) {
error_setg(errp, "invalid PCI subsystem vendor ID provided");
- goto error;
+ return false;
}
vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
vdev->sub_vendor_id, ~0);
@@ -3085,7 +3128,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
if (vdev->sub_device_id != PCI_ANY_ID) {
if (vdev->sub_device_id > 0xffff) {
error_setg(errp, "invalid PCI subsystem device ID provided");
- goto error;
+ return false;
}
vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
trace_vfio_pci_emulated_sub_device_id(vbasedev->name,
@@ -3116,52 +3159,17 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
vfio_bars_prepare(vdev);
if (!vfio_msix_early_setup(vdev, errp)) {
- goto error;
+ return false;
}
vfio_bars_register(vdev);
- if (!pci_device_set_iommu_device(pdev, vbasedev->hiod, errp)) {
- error_prepend(errp, "Failed to set iommu_device: ");
- goto out_teardown;
- }
-
- if (!vfio_add_capabilities(vdev, errp)) {
- goto out_unset_idev;
- }
-
- if (vdev->vga) {
- vfio_vga_quirk_setup(vdev);
- }
-
- for (i = 0; i < PCI_ROM_SLOT; i++) {
- vfio_bar_quirk_setup(vdev, i);
- }
-
- if (!vdev->igd_opregion &&
- vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
- g_autofree struct vfio_region_info *opregion = NULL;
-
- if (vdev->pdev.qdev.hotplugged) {
- error_setg(errp,
- "cannot support IGD OpRegion feature on hotplugged "
- "device");
- goto out_unset_idev;
- }
-
- ret = vfio_get_dev_region_info(vbasedev,
- VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
- VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
- if (ret) {
- error_setg_errno(errp, -ret,
- "does not support requested IGD OpRegion feature");
- goto out_unset_idev;
- }
+ return true;
+}
- if (!vfio_pci_igd_opregion_init(vdev, opregion, errp)) {
- goto out_unset_idev;
- }
- }
+bool vfio_pci_interrupt_setup(VFIOPCIDevice *vdev, Error **errp)
+{
+ PCIDevice *pdev = &vdev->pdev;
/* QEMU emulates all of MSI & MSIX */
if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
@@ -3176,14 +3184,111 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
- vfio_intx_mmap_enable, vdev);
+ vfio_intx_mmap_enable, vdev);
pci_device_set_intx_routing_notifier(&vdev->pdev,
vfio_intx_routing_notifier);
vdev->irqchip_change_notifier.notify = vfio_irqchip_change;
kvm_irqchip_add_change_notifier(&vdev->irqchip_change_notifier);
if (!vfio_intx_enable(vdev, errp)) {
- goto out_deregister;
+ timer_free(vdev->intx.mmap_timer);
+ pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
+ kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
+ return false;
+ }
+ }
+ return true;
+}
+
+static void vfio_pci_realize(PCIDevice *pdev, Error **errp)
+{
+ ERRP_GUARD();
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
+ VFIODevice *vbasedev = &vdev->vbasedev;
+ int i;
+ char uuid[UUID_STR_LEN];
+ g_autofree char *name = NULL;
+
+ if (vbasedev->fd < 0 && !vbasedev->sysfsdev) {
+ if (!(~vdev->host.domain || ~vdev->host.bus ||
+ ~vdev->host.slot || ~vdev->host.function)) {
+ error_setg(errp, "No provided host device");
+ error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F "
+#ifdef CONFIG_IOMMUFD
+ "or -device vfio-pci,fd=DEVICE_FD "
+#endif
+ "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
+ return;
}
+ vbasedev->sysfsdev =
+ g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
+ vdev->host.domain, vdev->host.bus,
+ vdev->host.slot, vdev->host.function);
+ }
+
+ if (!vfio_device_get_name(vbasedev, errp)) {
+ return;
+ }
+
+ /*
+ * Mediated devices *might* operate compatibly with discarding of RAM, but
+ * we cannot know for certain, it depends on whether the mdev vendor driver
+ * stays in sync with the active working set of the guest driver. Prevent
+ * the x-balloon-allowed option unless this is minimally an mdev device.
+ */
+ vbasedev->mdev = vfio_device_is_mdev(vbasedev);
+
+ trace_vfio_mdev(vbasedev->name, vbasedev->mdev);
+
+ if (vbasedev->ram_block_discard_allowed && !vbasedev->mdev) {
+ error_setg(errp, "x-balloon-allowed only potentially compatible "
+ "with mdev devices");
+ goto error;
+ }
+
+ if (!qemu_uuid_is_null(&vdev->vf_token)) {
+ qemu_uuid_unparse(&vdev->vf_token, uuid);
+ name = g_strdup_printf("%s vf_token=%s", vbasedev->name, uuid);
+ } else {
+ name = g_strdup(vbasedev->name);
+ }
+
+ if (!vfio_device_attach(name, vbasedev,
+ pci_device_iommu_address_space(pdev), errp)) {
+ goto error;
+ }
+
+ if (!vfio_pci_populate_device(vdev, errp)) {
+ goto error;
+ }
+
+ if (!vfio_pci_config_setup(vdev, errp)) {
+ goto error;
+ }
+
+ if (!vbasedev->mdev &&
+ !pci_device_set_iommu_device(pdev, vbasedev->hiod, errp)) {
+ error_prepend(errp, "Failed to set vIOMMU: ");
+ goto out_teardown;
+ }
+
+ if (!vfio_pci_add_capabilities(vdev, errp)) {
+ goto out_unset_idev;
+ }
+
+ if (!vfio_config_quirk_setup(vdev, errp)) {
+ goto out_unset_idev;
+ }
+
+ if (vdev->vga) {
+ vfio_vga_quirk_setup(vdev);
+ }
+
+ for (i = 0; i < PCI_ROM_SLOT; i++) {
+ vfio_bar_quirk_setup(vdev, i);
+ }
+
+ if (!vfio_pci_interrupt_setup(vdev, errp)) {
+ goto out_unset_idev;
}
if (vdev->display != ON_OFF_AUTO_OFF) {
@@ -3226,8 +3331,8 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
}
}
- vfio_register_err_notifier(vdev);
- vfio_register_req_notifier(vdev);
+ vfio_pci_register_err_notifier(vdev);
+ vfio_pci_register_req_notifier(vdev);
vfio_setup_resetfn_quirk(vdev);
return;
@@ -3244,35 +3349,26 @@ out_deregister:
timer_free(vdev->intx.mmap_timer);
}
out_unset_idev:
- pci_device_unset_iommu_device(pdev);
+ if (!vbasedev->mdev) {
+ pci_device_unset_iommu_device(pdev);
+ }
out_teardown:
- vfio_teardown_msi(vdev);
- vfio_bars_exit(vdev);
+ vfio_pci_teardown_msi(vdev);
+ vfio_pci_bars_exit(vdev);
error:
error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->name);
}
static void vfio_instance_finalize(Object *obj)
{
- VFIOPCIDevice *vdev = VFIO_PCI(obj);
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj);
- vfio_display_finalize(vdev);
- vfio_bars_finalize(vdev);
- g_free(vdev->emulated_config_bits);
- g_free(vdev->rom);
- /*
- * XXX Leaking igd_opregion is not an oversight, we can't remove the
- * fw_cfg entry therefore leaking this allocation seems like the safest
- * option.
- *
- * g_free(vdev->igd_opregion);
- */
vfio_pci_put_device(vdev);
}
static void vfio_exitfn(PCIDevice *pdev)
{
- VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
VFIODevice *vbasedev = &vdev->vbasedev;
vfio_unregister_req_notifier(vdev);
@@ -3285,16 +3381,23 @@ static void vfio_exitfn(PCIDevice *pdev)
if (vdev->intx.mmap_timer) {
timer_free(vdev->intx.mmap_timer);
}
- vfio_teardown_msi(vdev);
+ vfio_pci_teardown_msi(vdev);
vfio_pci_disable_rp_atomics(vdev);
- vfio_bars_exit(vdev);
+ vfio_pci_bars_exit(vdev);
vfio_migration_exit(vbasedev);
- pci_device_unset_iommu_device(pdev);
+ if (!vbasedev->mdev) {
+ pci_device_unset_iommu_device(pdev);
+ }
}
static void vfio_pci_reset(DeviceState *dev)
{
- VFIOPCIDevice *vdev = VFIO_PCI(dev);
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(dev);
+
+ /* Do not reset the device during qemu_system_reset prior to cpr load */
+ if (cpr_is_incoming()) {
+ return;
+ }
trace_vfio_pci_reset(vdev->vbasedev.name);
@@ -3334,7 +3437,7 @@ post_reset:
static void vfio_instance_init(Object *obj)
{
PCIDevice *pci_dev = PCI_DEVICE(obj);
- VFIOPCIDevice *vdev = VFIO_PCI(obj);
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj);
VFIODevice *vbasedev = &vdev->vbasedev;
device_add_bootindex_property(obj, &vdev->bootindex,
@@ -3353,15 +3456,52 @@ static void vfio_instance_init(Object *obj)
/* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
* line, therefore, no need to wait to realize like other devices */
pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
+
+ /*
+ * A device that is resuming for cpr is already configured, so do not
+ * reset it during qemu_system_reset prior to cpr load, else interrupts
+ * may be lost.
+ */
+ pci_dev->cap_present |= QEMU_PCI_SKIP_RESET_ON_CPR;
}
-static Property vfio_pci_dev_properties[] = {
+static void vfio_pci_base_dev_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
+
+ dc->desc = "VFIO PCI base device";
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ pdc->exit = vfio_exitfn;
+ pdc->config_read = vfio_pci_read_config;
+ pdc->config_write = vfio_pci_write_config;
+}
+
+static const TypeInfo vfio_pci_base_dev_info = {
+ .name = TYPE_VFIO_PCI_BASE,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(VFIOPCIDevice),
+ .abstract = true,
+ .class_init = vfio_pci_base_dev_class_init,
+ .interfaces = (const InterfaceInfo[]) {
+ { INTERFACE_PCIE_DEVICE },
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { }
+ },
+};
+
+static PropertyInfo vfio_pci_migration_multifd_transfer_prop;
+
+static const Property vfio_pci_dev_properties[] = {
DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
DEFINE_PROP_UUID_NODEFAULT("vf-token", VFIOPCIDevice, vf_token),
DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
DEFINE_PROP_ON_OFF_AUTO("x-pre-copy-dirty-page-tracking", VFIOPCIDevice,
vbasedev.pre_copy_dirty_page_tracking,
ON_OFF_AUTO_ON),
+ DEFINE_PROP_ON_OFF_AUTO("x-device-dirty-page-tracking", VFIOPCIDevice,
+ vbasedev.device_dirty_page_tracking,
+ ON_OFF_AUTO_ON),
DEFINE_PROP_ON_OFF_AUTO("display", VFIOPCIDevice,
display, ON_OFF_AUTO_OFF),
DEFINE_PROP_UINT32("xres", VFIOPCIDevice, display_xres, 0),
@@ -3373,9 +3513,17 @@ static Property vfio_pci_dev_properties[] = {
DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
VFIO_FEATURE_ENABLE_REQ_BIT, true),
DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
- VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
+ VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, true),
+ DEFINE_PROP_BIT("x-igd-lpc", VFIOPCIDevice, features,
+ VFIO_FEATURE_ENABLE_IGD_LPC_BIT, false),
+ DEFINE_PROP_ON_OFF_AUTO("x-igd-legacy-mode", VFIOPCIDevice,
+ igd_legacy_mode, ON_OFF_AUTO_AUTO),
DEFINE_PROP_ON_OFF_AUTO("enable-migration", VFIOPCIDevice,
vbasedev.enable_migration, ON_OFF_AUTO_AUTO),
+ DEFINE_PROP("x-migration-multifd-transfer", VFIOPCIDevice,
+ vbasedev.migration_multifd_transfer,
+ vfio_pci_migration_multifd_transfer_prop, OnOffAuto,
+ .set_default = true, .defval.i = ON_OFF_AUTO_AUTO),
DEFINE_PROP_BOOL("migration-events", VFIOPCIDevice,
vbasedev.migration_events, false),
DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
@@ -3401,67 +3549,187 @@ static Property vfio_pci_dev_properties[] = {
nv_gpudirect_clique,
qdev_prop_nv_gpudirect_clique, uint8_t),
DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice, msix_relo,
- OFF_AUTOPCIBAR_OFF),
+ OFF_AUTO_PCIBAR_OFF),
#ifdef CONFIG_IOMMUFD
DEFINE_PROP_LINK("iommufd", VFIOPCIDevice, vbasedev.iommufd,
TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
#endif
DEFINE_PROP_BOOL("skip-vsc-check", VFIOPCIDevice, skip_vsc_check, true),
- DEFINE_PROP_END_OF_LIST(),
};
#ifdef CONFIG_IOMMUFD
static void vfio_pci_set_fd(Object *obj, const char *str, Error **errp)
{
- vfio_device_set_fd(&VFIO_PCI(obj)->vbasedev, str, errp);
+ VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj);
+ vfio_device_set_fd(&vdev->vbasedev, str, errp);
}
#endif
-static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
+static void vfio_pci_dev_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
- dc->reset = vfio_pci_reset;
+ device_class_set_legacy_reset(dc, vfio_pci_reset);
device_class_set_props(dc, vfio_pci_dev_properties);
#ifdef CONFIG_IOMMUFD
object_class_property_add_str(klass, "fd", NULL, vfio_pci_set_fd);
#endif
+ dc->vmsd = &vfio_cpr_pci_vmstate;
dc->desc = "VFIO-based PCI device assignment";
- set_bit(DEVICE_CATEGORY_MISC, dc->categories);
- pdc->realize = vfio_realize;
- pdc->exit = vfio_exitfn;
- pdc->config_read = vfio_pci_read_config;
- pdc->config_write = vfio_pci_write_config;
+ pdc->realize = vfio_pci_realize;
+
+ object_class_property_set_description(klass, /* 1.3 */
+ "host",
+ "Host PCI address [domain:]<bus:slot.function> of assigned device");
+ object_class_property_set_description(klass, /* 1.3 */
+ "x-intx-mmap-timeout-ms",
+ "When EOI is not provided by KVM/QEMU, wait time "
+ "(milliseconds) to re-enable device direct access "
+ "after INTx (DEBUG)");
+ object_class_property_set_description(klass, /* 1.5 */
+ "x-vga",
+ "Expose VGA address spaces for device");
+ object_class_property_set_description(klass, /* 2.3 */
+ "x-req",
+ "Disable device request notification support (DEBUG)");
+ object_class_property_set_description(klass, /* 2.4 and 2.5 */
+ "x-no-mmap",
+ "Disable MMAP for device. Allows to trace MMIO "
+ "accesses (DEBUG)");
+ object_class_property_set_description(klass, /* 2.5 */
+ "x-no-kvm-intx",
+ "Disable direct VFIO->KVM INTx injection. Allows to "
+ "trace INTx interrupts (DEBUG)");
+ object_class_property_set_description(klass, /* 2.5 */
+ "x-no-kvm-msi",
+ "Disable direct VFIO->KVM MSI injection. Allows to "
+ "trace MSI interrupts (DEBUG)");
+ object_class_property_set_description(klass, /* 2.5 */
+ "x-no-kvm-msix",
+ "Disable direct VFIO->KVM MSIx injection. Allows to "
+ "trace MSIx interrupts (DEBUG)");
+ object_class_property_set_description(klass, /* 2.5 */
+ "x-pci-vendor-id",
+ "Override PCI Vendor ID with provided value (DEBUG)");
+ object_class_property_set_description(klass, /* 2.5 */
+ "x-pci-device-id",
+ "Override PCI device ID with provided value (DEBUG)");
+ object_class_property_set_description(klass, /* 2.5 */
+ "x-pci-sub-vendor-id",
+ "Override PCI Subsystem Vendor ID with provided value "
+ "(DEBUG)");
+ object_class_property_set_description(klass, /* 2.5 */
+ "x-pci-sub-device-id",
+ "Override PCI Subsystem Device ID with provided value "
+ "(DEBUG)");
+ object_class_property_set_description(klass, /* 2.6 */
+ "sysfsdev",
+ "Host sysfs path of assigned device");
+ object_class_property_set_description(klass, /* 2.7 */
+ "x-igd-opregion",
+ "Expose host IGD OpRegion to guest");
+ object_class_property_set_description(klass, /* 2.7 (See c4c45e943e51) */
+ "x-igd-gms",
+ "Override IGD data stolen memory size (32MiB units)");
+ object_class_property_set_description(klass, /* 2.11 */
+ "x-nv-gpudirect-clique",
+ "Add NVIDIA GPUDirect capability indicating P2P DMA "
+ "clique for device [0-15]");
+ object_class_property_set_description(klass, /* 2.12 */
+ "x-no-geforce-quirks",
+ "Disable GeForce quirks (for NVIDIA Quadro/GRID/Tesla). "
+ "Improves performance");
+ object_class_property_set_description(klass, /* 2.12 */
+ "display",
+ "Enable display support for device, ex. vGPU");
+ object_class_property_set_description(klass, /* 2.12 */
+ "x-msix-relocation",
+ "Specify MSI-X MMIO relocation to the end of specified "
+ "existing BAR or new BAR to avoid virtualization overhead "
+ "due to adjacent device registers");
+ object_class_property_set_description(klass, /* 3.0 */
+ "x-no-kvm-ioeventfd",
+ "Disable registration of ioeventfds with KVM (DEBUG)");
+ object_class_property_set_description(klass, /* 3.0 */
+ "x-no-vfio-ioeventfd",
+ "Disable linking of KVM ioeventfds to VFIO ioeventfds "
+ "(DEBUG)");
+ object_class_property_set_description(klass, /* 3.1 */
+ "x-balloon-allowed",
+ "Override allowing ballooning with device (DEBUG, DANGER)");
+ object_class_property_set_description(klass, /* 3.2 */
+ "xres",
+ "Set X display resolution the vGPU should use");
+ object_class_property_set_description(klass, /* 3.2 */
+ "yres",
+ "Set Y display resolution the vGPU should use");
+ object_class_property_set_description(klass, /* 5.2 */
+ "x-pre-copy-dirty-page-tracking",
+ "Disable dirty pages tracking during iterative phase "
+ "(DEBUG)");
+ object_class_property_set_description(klass, /* 5.2, 8.0 non-experimetal */
+ "enable-migration",
+ "Enale device migration. Also requires a host VFIO PCI "
+ "variant or mdev driver with migration support enabled");
+ object_class_property_set_description(klass, /* 8.1 */
+ "vf-token",
+ "Specify UUID VF token. Required for VF when PF is owned "
+ "by another VFIO driver");
+#ifdef CONFIG_IOMMUFD
+ object_class_property_set_description(klass, /* 9.0 */
+ "iommufd",
+ "Set host IOMMUFD backend device");
+#endif
+ object_class_property_set_description(klass, /* 9.1 */
+ "x-device-dirty-page-tracking",
+ "Disable device dirty page tracking and use "
+ "container-based dirty page tracking");
+ object_class_property_set_description(klass, /* 9.1 */
+ "migration-events",
+ "Emit VFIO migration QAPI event when a VFIO device "
+ "changes its migration state. For management applications");
+ object_class_property_set_description(klass, /* 9.1 */
+ "skip-vsc-check",
+ "Skip config space check for Vendor Specific Capability. "
+ "Setting to false will enforce strict checking of VSC content "
+ "(DEBUG)");
+ object_class_property_set_description(klass, /* 10.0 */
+ "x-migration-multifd-transfer",
+ "Transfer this device state via "
+ "multifd channels when live migrating it");
}
static const TypeInfo vfio_pci_dev_info = {
.name = TYPE_VFIO_PCI,
- .parent = TYPE_PCI_DEVICE,
- .instance_size = sizeof(VFIOPCIDevice),
+ .parent = TYPE_VFIO_PCI_BASE,
.class_init = vfio_pci_dev_class_init,
.instance_init = vfio_instance_init,
.instance_finalize = vfio_instance_finalize,
- .interfaces = (InterfaceInfo[]) {
- { INTERFACE_PCIE_DEVICE },
- { INTERFACE_CONVENTIONAL_PCI_DEVICE },
- { }
- },
};
-static Property vfio_pci_dev_nohotplug_properties[] = {
+static const Property vfio_pci_dev_nohotplug_properties[] = {
DEFINE_PROP_BOOL("ramfb", VFIOPCIDevice, enable_ramfb, false),
DEFINE_PROP_ON_OFF_AUTO("x-ramfb-migrate", VFIOPCIDevice, ramfb_migrate,
ON_OFF_AUTO_AUTO),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vfio_pci_nohotplug_dev_class_init(ObjectClass *klass, void *data)
+static void vfio_pci_nohotplug_dev_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, vfio_pci_dev_nohotplug_properties);
dc->hotpluggable = false;
+
+ object_class_property_set_description(klass, /* 3.1 */
+ "ramfb",
+ "Enable ramfb to provide pre-boot graphics for devices "
+ "enabling display option");
+ object_class_property_set_description(klass, /* 8.2 */
+ "x-ramfb-migrate",
+ "Override default migration support for ramfb support "
+ "(DEBUG)");
}
static const TypeInfo vfio_pci_nohotplug_dev_info = {
@@ -3473,6 +3741,18 @@ static const TypeInfo vfio_pci_nohotplug_dev_info = {
static void register_vfio_pci_dev_type(void)
{
+ /*
+ * Ordinary ON_OFF_AUTO property isn't runtime-mutable, but source VM can
+ * run for a long time before being migrated so it is desirable to have a
+ * fallback mechanism to the old way of transferring VFIO device state if
+ * it turns to be necessary.
+ * The following makes this type of property have the same mutability level
+ * as ordinary migration parameters.
+ */
+ vfio_pci_migration_multifd_transfer_prop = qdev_prop_on_off_auto;
+ vfio_pci_migration_multifd_transfer_prop.realized_set_allowed = true;
+
+ type_register_static(&vfio_pci_base_dev_info);
type_register_static(&vfio_pci_dev_info);
type_register_static(&vfio_pci_nohotplug_dev_info);
}
diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h
index bf67df2..5ba7330 100644
--- a/hw/vfio/pci.h
+++ b/hw/vfio/pci.h
@@ -12,14 +12,16 @@
#ifndef HW_VFIO_VFIO_PCI_H
#define HW_VFIO_VFIO_PCI_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/pci/pci_device.h"
-#include "hw/vfio/vfio-common.h"
+#include "hw/vfio/vfio-device.h"
+#include "hw/vfio/vfio-region.h"
#include "qemu/event_notifier.h"
#include "qemu/queue.h"
#include "qemu/timer.h"
#include "qom/object.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
+#include "vfio-display.h"
#define PCI_ANY_ID (~0)
@@ -114,10 +116,19 @@ typedef struct VFIOMSIXInfo {
uint32_t pba_offset;
unsigned long *pending;
bool noresize;
+ MemoryRegion *pba_region;
} VFIOMSIXInfo;
+/*
+ * TYPE_VFIO_PCI_BASE is an abstract type used to share code
+ * between VFIO implementations that use a kernel driver
+ * with those that use user sockets.
+ */
+#define TYPE_VFIO_PCI_BASE "vfio-pci-base"
+OBJECT_DECLARE_SIMPLE_TYPE(VFIOPCIDevice, VFIO_PCI_BASE)
+
#define TYPE_VFIO_PCI "vfio-pci"
-OBJECT_DECLARE_SIMPLE_TYPE(VFIOPCIDevice, VFIO_PCI)
+/* TYPE_VFIO_PCI shares struct VFIOPCIDevice. */
struct VFIOPCIDevice {
PCIDevice pdev;
@@ -154,13 +165,16 @@ struct VFIOPCIDevice {
#define VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT 2
#define VFIO_FEATURE_ENABLE_IGD_OPREGION \
(1 << VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT)
+#define VFIO_FEATURE_ENABLE_IGD_LPC_BIT 3
+#define VFIO_FEATURE_ENABLE_IGD_LPC \
+ (1 << VFIO_FEATURE_ENABLE_IGD_LPC_BIT)
OnOffAuto display;
uint32_t display_xres;
uint32_t display_yres;
int32_t bootindex;
+ OnOffAuto igd_legacy_mode;
uint32_t igd_gms;
OffAutoPCIBAR msix_relo;
- uint8_t pm_cap;
uint8_t nv_gpudirect_clique;
bool pci_aer;
bool req_enabled;
@@ -197,6 +211,14 @@ static inline bool vfio_is_vga(VFIOPCIDevice *vdev)
return class == PCI_CLASS_DISPLAY_VGA;
}
+/* MSI/MSI-X/INTx */
+void vfio_pci_vector_init(VFIOPCIDevice *vdev, int nr);
+void vfio_pci_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
+ int vector_n, bool msix);
+void vfio_pci_prepare_kvm_msi_virq_batch(VFIOPCIDevice *vdev);
+void vfio_pci_commit_kvm_msi_virq_batch(VFIOPCIDevice *vdev);
+bool vfio_pci_intx_enable(VFIOPCIDevice *vdev, Error **errp);
+
uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len);
void vfio_pci_write_config(PCIDevice *pdev,
uint32_t addr, uint32_t val, int len);
@@ -205,6 +227,7 @@ uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size);
void vfio_vga_write(void *opaque, hwaddr addr, uint64_t data, unsigned size);
bool vfio_opt_rom_in_denylist(VFIOPCIDevice *vdev);
+bool vfio_config_quirk_setup(VFIOPCIDevice *vdev, Error **errp);
void vfio_vga_quirk_setup(VFIOPCIDevice *vdev);
void vfio_vga_quirk_exit(VFIOPCIDevice *vdev);
void vfio_vga_quirk_finalize(VFIOPCIDevice *vdev);
@@ -215,7 +238,8 @@ void vfio_setup_resetfn_quirk(VFIOPCIDevice *vdev);
bool vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp);
void vfio_quirk_reset(VFIOPCIDevice *vdev);
VFIOQuirk *vfio_quirk_alloc(int nr_mem);
-void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr);
+void vfio_probe_igd_bar0_quirk(VFIOPCIDevice *vdev, int nr);
+bool vfio_probe_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp);
extern const PropertyInfo qdev_prop_nv_gpudirect_clique;
@@ -227,14 +251,21 @@ int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev,
bool vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp);
-bool vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
- struct vfio_region_info *info,
- Error **errp);
-
void vfio_display_reset(VFIOPCIDevice *vdev);
bool vfio_display_probe(VFIOPCIDevice *vdev, Error **errp);
void vfio_display_finalize(VFIOPCIDevice *vdev);
extern const VMStateDescription vfio_display_vmstate;
+void vfio_pci_bars_exit(VFIOPCIDevice *vdev);
+bool vfio_pci_add_capabilities(VFIOPCIDevice *vdev, Error **errp);
+bool vfio_pci_config_setup(VFIOPCIDevice *vdev, Error **errp);
+bool vfio_pci_interrupt_setup(VFIOPCIDevice *vdev, Error **errp);
+void vfio_pci_intx_eoi(VFIODevice *vbasedev);
+void vfio_pci_put_device(VFIOPCIDevice *vdev);
+bool vfio_pci_populate_device(VFIOPCIDevice *vdev, Error **errp);
+void vfio_pci_register_err_notifier(VFIOPCIDevice *vdev);
+void vfio_pci_register_req_notifier(VFIOPCIDevice *vdev);
+void vfio_pci_teardown_msi(VFIOPCIDevice *vdev);
+
#endif /* HW_VFIO_VFIO_PCI_H */
diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c
index a85c199..9a21f2e 100644
--- a/hw/vfio/platform.c
+++ b/hw/vfio/platform.c
@@ -21,22 +21,23 @@
#include <linux/vfio.h>
#include "hw/vfio/vfio-platform.h"
-#include "sysemu/iommufd.h"
+#include "system/iommufd.h"
#include "migration/vmstate.h"
#include "qemu/error-report.h"
#include "qemu/lockable.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qemu/range.h"
-#include "exec/memory.h"
-#include "exec/address-spaces.h"
+#include "system/memory.h"
+#include "system/address-spaces.h"
#include "qemu/queue.h"
#include "hw/sysbus.h"
#include "trace.h"
#include "hw/irq.h"
#include "hw/platform-bus.h"
#include "hw/qdev-properties.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
+#include "hw/vfio/vfio-region.h"
/*
* Functions used whatever the injection method
@@ -118,8 +119,8 @@ static int vfio_set_trigger_eventfd(VFIOINTp *intp,
qemu_set_fd_handler(fd, (IOHandler *)handler, NULL, intp);
- if (!vfio_set_irq_signaling(vbasedev, intp->pin, 0,
- VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
+ if (!vfio_device_irq_set_signaling(vbasedev, intp->pin, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
qemu_set_fd_handler(fd, NULL, NULL, NULL);
return -EINVAL;
@@ -300,7 +301,7 @@ static void vfio_platform_eoi(VFIODevice *vbasedev)
if (vfio_irq_is_automasked(intp)) {
/* unmasks the physical level-sensitive IRQ */
- vfio_unmask_single_irqindex(vbasedev, intp->pin);
+ vfio_device_irq_unmask(vbasedev, intp->pin);
}
/* a single IRQ can be active at a time */
@@ -356,8 +357,8 @@ static int vfio_set_resample_eventfd(VFIOINTp *intp)
Error *err = NULL;
qemu_set_fd_handler(fd, NULL, NULL, NULL);
- if (!vfio_set_irq_signaling(vbasedev, intp->pin, 0,
- VFIO_IRQ_SET_ACTION_UNMASK, fd, &err)) {
+ if (!vfio_device_irq_set_signaling(vbasedev, intp->pin, 0,
+ VFIO_IRQ_SET_ACTION_UNMASK, fd, &err)) {
error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
return -EINVAL;
}
@@ -418,7 +419,6 @@ fail_vfio:
abort();
fail_irqfd:
vfio_start_eventfd_injection(sbdev, irq);
- return;
}
/* VFIO skeleton */
@@ -474,10 +474,10 @@ static bool vfio_populate_device(VFIODevice *vbasedev, Error **errp)
QSIMPLEQ_INIT(&vdev->pending_intp_queue);
for (i = 0; i < vbasedev->num_irqs; i++) {
- struct vfio_irq_info irq = { .argsz = sizeof(irq) };
+ struct vfio_irq_info irq;
+
+ ret = vfio_device_get_irq_info(vbasedev, i, &irq);
- irq.index = i;
- ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
if (ret) {
error_setg_errno(errp, -ret, "failed to get device irq info");
goto irq_err;
@@ -546,7 +546,7 @@ static bool vfio_base_device_init(VFIODevice *vbasedev, Error **errp)
return false;
}
- if (!vfio_attach_device(vbasedev->name, vbasedev,
+ if (!vfio_device_attach(vbasedev->name, vbasedev,
&address_space_memory, errp)) {
return false;
}
@@ -555,7 +555,7 @@ static bool vfio_base_device_init(VFIODevice *vbasedev, Error **errp)
return true;
}
- vfio_detach_device(vbasedev);
+ vfio_device_detach(vbasedev);
return false;
}
@@ -575,6 +575,7 @@ static void vfio_platform_realize(DeviceState *dev, Error **errp)
VFIODevice *vbasedev = &vdev->vbasedev;
int i;
+ warn_report("-device vfio-platform is deprecated");
qemu_mutex_init(&vdev->intp_mutex);
trace_vfio_platform_realize(vbasedev->sysfsdev ?
@@ -629,7 +630,7 @@ static const VMStateDescription vfio_platform_vmstate = {
.unmigratable = 1,
};
-static Property vfio_platform_dev_properties[] = {
+static const Property vfio_platform_dev_properties[] = {
DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name),
DEFINE_PROP_STRING("sysfsdev", VFIOPlatformDevice, vbasedev.sysfsdev),
DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false),
@@ -640,7 +641,6 @@ static Property vfio_platform_dev_properties[] = {
DEFINE_PROP_LINK("iommufd", VFIOPlatformDevice, vbasedev.iommufd,
TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
#endif
- DEFINE_PROP_END_OF_LIST(),
};
static void vfio_platform_instance_init(Object *obj)
@@ -659,7 +659,7 @@ static void vfio_platform_set_fd(Object *obj, const char *str, Error **errp)
}
#endif
-static void vfio_platform_class_init(ObjectClass *klass, void *data)
+static void vfio_platform_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass);
@@ -673,13 +673,35 @@ static void vfio_platform_class_init(ObjectClass *klass, void *data)
dc->desc = "VFIO-based platform device assignment";
sbc->connect_irq_notifier = vfio_start_irqfd_injection;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
- /* Supported by TYPE_VIRT_MACHINE */
- dc->user_creatable = true;
+
+ object_class_property_set_description(klass, /* 2.4 */
+ "host",
+ "Host device name of assigned device");
+ object_class_property_set_description(klass, /* 2.4 and 2.5 */
+ "x-no-mmap",
+ "Disable MMAP for device. Allows to trace MMIO "
+ "accesses (DEBUG)");
+ object_class_property_set_description(klass, /* 2.4 */
+ "mmap-timeout-ms",
+ "When EOI is not provided by KVM/QEMU, wait time "
+ "(milliseconds) to re-enable device direct access "
+ "after level interrupt (DEBUG)");
+ object_class_property_set_description(klass, /* 2.4 */
+ "x-irqfd",
+ "Allow disabling irqfd support (DEBUG)");
+ object_class_property_set_description(klass, /* 2.6 */
+ "sysfsdev",
+ "Host sysfs path of assigned device");
+#ifdef CONFIG_IOMMUFD
+ object_class_property_set_description(klass, /* 9.0 */
+ "iommufd",
+ "Set host IOMMUFD backend device");
+#endif
}
static const TypeInfo vfio_platform_dev_info = {
.name = TYPE_VFIO_PLATFORM,
- .parent = TYPE_SYS_BUS_DEVICE,
+ .parent = TYPE_DYNAMIC_SYS_BUS_DEVICE,
.instance_size = sizeof(VFIOPlatformDevice),
.instance_init = vfio_platform_instance_init,
.class_init = vfio_platform_class_init,
diff --git a/hw/vfio/region.c b/hw/vfio/region.c
new file mode 100644
index 0000000..d04c57d
--- /dev/null
+++ b/hw/vfio/region.c
@@ -0,0 +1,403 @@
+/*
+ * VFIO regions
+ *
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Authors:
+ * Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Based on qemu-kvm device-assignment:
+ * Adapted for KVM by Qumranet.
+ * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
+ * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
+ * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
+ * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
+ * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+
+#include "hw/vfio/vfio-region.h"
+#include "hw/vfio/vfio-device.h"
+#include "hw/hw.h"
+#include "trace.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "qemu/units.h"
+#include "monitor/monitor.h"
+#include "vfio-helpers.h"
+
+/*
+ * IO Port/MMIO - Beware of the endians, VFIO is always little endian
+ */
+void vfio_region_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIORegion *region = opaque;
+ VFIODevice *vbasedev = region->vbasedev;
+ union {
+ uint8_t byte;
+ uint16_t word;
+ uint32_t dword;
+ uint64_t qword;
+ } buf;
+ int ret;
+
+ switch (size) {
+ case 1:
+ buf.byte = data;
+ break;
+ case 2:
+ buf.word = cpu_to_le16(data);
+ break;
+ case 4:
+ buf.dword = cpu_to_le32(data);
+ break;
+ case 8:
+ buf.qword = cpu_to_le64(data);
+ break;
+ default:
+ hw_error("vfio: unsupported write size, %u bytes", size);
+ break;
+ }
+
+ ret = vbasedev->io_ops->region_write(vbasedev, region->nr,
+ addr, size, &buf, region->post_wr);
+ if (ret != size) {
+ error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
+ ",%d) failed: %s",
+ __func__, vbasedev->name, region->nr,
+ addr, data, size, strwriteerror(ret));
+ }
+
+ trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
+
+ /*
+ * A read or write to a BAR always signals an INTx EOI. This will
+ * do nothing if not pending (including not in INTx mode). We assume
+ * that a BAR access is in response to an interrupt and that BAR
+ * accesses will service the interrupt. Unfortunately, we don't know
+ * which access will service the interrupt, so we're potentially
+ * getting quite a few host interrupts per guest interrupt.
+ */
+ vbasedev->ops->vfio_eoi(vbasedev);
+}
+
+uint64_t vfio_region_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIORegion *region = opaque;
+ VFIODevice *vbasedev = region->vbasedev;
+ union {
+ uint8_t byte;
+ uint16_t word;
+ uint32_t dword;
+ uint64_t qword;
+ } buf;
+ uint64_t data = 0;
+ int ret;
+
+ ret = vbasedev->io_ops->region_read(vbasedev, region->nr, addr, size, &buf);
+ if (ret != size) {
+ error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %s",
+ __func__, vbasedev->name, region->nr,
+ addr, size, strreaderror(ret));
+ return (uint64_t)-1;
+ }
+ switch (size) {
+ case 1:
+ data = buf.byte;
+ break;
+ case 2:
+ data = le16_to_cpu(buf.word);
+ break;
+ case 4:
+ data = le32_to_cpu(buf.dword);
+ break;
+ case 8:
+ data = le64_to_cpu(buf.qword);
+ break;
+ default:
+ hw_error("vfio: unsupported read size, %u bytes", size);
+ break;
+ }
+
+ trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
+
+ /* Same as write above */
+ vbasedev->ops->vfio_eoi(vbasedev);
+
+ return data;
+}
+
+static const MemoryRegionOps vfio_region_ops = {
+ .read = vfio_region_read,
+ .write = vfio_region_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+};
+
+static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
+ struct vfio_region_info *info)
+{
+ struct vfio_info_cap_header *hdr;
+ struct vfio_region_info_cap_sparse_mmap *sparse;
+ int i, j;
+
+ hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
+ if (!hdr) {
+ return -ENODEV;
+ }
+
+ sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
+
+ trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
+ region->nr, sparse->nr_areas);
+
+ region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
+
+ for (i = 0, j = 0; i < sparse->nr_areas; i++) {
+ if (sparse->areas[i].size) {
+ trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
+ sparse->areas[i].offset +
+ sparse->areas[i].size - 1);
+ region->mmaps[j].offset = sparse->areas[i].offset;
+ region->mmaps[j].size = sparse->areas[i].size;
+ j++;
+ }
+ }
+
+ region->nr_mmaps = j;
+ region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
+
+ return 0;
+}
+
+int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
+ int index, const char *name)
+{
+ struct vfio_region_info *info = NULL;
+ int ret;
+
+ ret = vfio_device_get_region_info(vbasedev, index, &info);
+ if (ret) {
+ return ret;
+ }
+
+ region->vbasedev = vbasedev;
+ region->flags = info->flags;
+ region->size = info->size;
+ region->fd_offset = info->offset;
+ region->nr = index;
+ region->post_wr = false;
+
+ if (region->size) {
+ region->mem = g_new0(MemoryRegion, 1);
+ memory_region_init_io(region->mem, obj, &vfio_region_ops,
+ region, name, region->size);
+
+ if (!vbasedev->no_mmap &&
+ region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
+
+ ret = vfio_setup_region_sparse_mmaps(region, info);
+
+ if (ret) {
+ region->nr_mmaps = 1;
+ region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
+ region->mmaps[0].offset = 0;
+ region->mmaps[0].size = region->size;
+ }
+ }
+ }
+
+ trace_vfio_region_setup(vbasedev->name, index, name,
+ region->flags, region->fd_offset, region->size);
+ return 0;
+}
+
+static void vfio_subregion_unmap(VFIORegion *region, int index)
+{
+ trace_vfio_region_unmap(memory_region_name(&region->mmaps[index].mem),
+ region->mmaps[index].offset,
+ region->mmaps[index].offset +
+ region->mmaps[index].size - 1);
+ memory_region_del_subregion(region->mem, &region->mmaps[index].mem);
+ munmap(region->mmaps[index].mmap, region->mmaps[index].size);
+ object_unparent(OBJECT(&region->mmaps[index].mem));
+ region->mmaps[index].mmap = NULL;
+}
+
+int vfio_region_mmap(VFIORegion *region)
+{
+ int i, ret, prot = 0;
+ char *name;
+ int fd;
+
+ if (!region->mem) {
+ return 0;
+ }
+
+ prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
+ prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
+
+ for (i = 0; i < region->nr_mmaps; i++) {
+ size_t align = MIN(1ULL << ctz64(region->mmaps[i].size), 1 * GiB);
+ void *map_base, *map_align;
+
+ /*
+ * Align the mmap for more efficient mapping in the kernel. Ideally
+ * we'd know the PMD and PUD mapping sizes to use as discrete alignment
+ * intervals, but we don't. As of Linux v6.12, the largest PUD size
+ * supporting huge pfnmap is 1GiB (ARCH_SUPPORTS_PUD_PFNMAP is only set
+ * on x86_64). Align by power-of-two size, capped at 1GiB.
+ *
+ * NB. qemu_memalign() and friends actually allocate memory, whereas
+ * the region size here can exceed host memory, therefore we manually
+ * create an oversized anonymous mapping and clean it up for alignment.
+ */
+ map_base = mmap(0, region->mmaps[i].size + align, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (map_base == MAP_FAILED) {
+ ret = -errno;
+ goto no_mmap;
+ }
+
+ fd = vfio_device_get_region_fd(region->vbasedev, region->nr);
+
+ map_align = (void *)ROUND_UP((uintptr_t)map_base, (uintptr_t)align);
+ munmap(map_base, map_align - map_base);
+ munmap(map_align + region->mmaps[i].size,
+ align - (map_align - map_base));
+
+ region->mmaps[i].mmap = mmap(map_align, region->mmaps[i].size, prot,
+ MAP_SHARED | MAP_FIXED, fd,
+ region->fd_offset +
+ region->mmaps[i].offset);
+ if (region->mmaps[i].mmap == MAP_FAILED) {
+ ret = -errno;
+ goto no_mmap;
+ }
+
+ name = g_strdup_printf("%s mmaps[%d]",
+ memory_region_name(region->mem), i);
+ memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
+ memory_region_owner(region->mem),
+ name, region->mmaps[i].size,
+ region->mmaps[i].mmap);
+ g_free(name);
+ memory_region_add_subregion(region->mem, region->mmaps[i].offset,
+ &region->mmaps[i].mem);
+
+ trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
+ region->mmaps[i].offset,
+ region->mmaps[i].offset +
+ region->mmaps[i].size - 1);
+ }
+
+ return 0;
+
+no_mmap:
+ trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
+ region->fd_offset + region->mmaps[i].offset,
+ region->fd_offset + region->mmaps[i].offset +
+ region->mmaps[i].size - 1, ret);
+
+ region->mmaps[i].mmap = NULL;
+
+ for (i--; i >= 0; i--) {
+ vfio_subregion_unmap(region, i);
+ }
+
+ return ret;
+}
+
+void vfio_region_unmap(VFIORegion *region)
+{
+ int i;
+
+ if (!region->mem) {
+ return;
+ }
+
+ for (i = 0; i < region->nr_mmaps; i++) {
+ if (region->mmaps[i].mmap) {
+ vfio_subregion_unmap(region, i);
+ }
+ }
+}
+
+void vfio_region_exit(VFIORegion *region)
+{
+ int i;
+
+ if (!region->mem) {
+ return;
+ }
+
+ for (i = 0; i < region->nr_mmaps; i++) {
+ if (region->mmaps[i].mmap) {
+ memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
+ }
+ }
+
+ trace_vfio_region_exit(region->vbasedev->name, region->nr);
+}
+
+void vfio_region_finalize(VFIORegion *region)
+{
+ int i;
+
+ if (!region->mem) {
+ return;
+ }
+
+ for (i = 0; i < region->nr_mmaps; i++) {
+ if (region->mmaps[i].mmap) {
+ munmap(region->mmaps[i].mmap, region->mmaps[i].size);
+ object_unparent(OBJECT(&region->mmaps[i].mem));
+ }
+ }
+
+ object_unparent(OBJECT(region->mem));
+
+ g_free(region->mem);
+ g_free(region->mmaps);
+
+ trace_vfio_region_finalize(region->vbasedev->name, region->nr);
+
+ region->mem = NULL;
+ region->mmaps = NULL;
+ region->nr_mmaps = 0;
+ region->size = 0;
+ region->flags = 0;
+ region->nr = 0;
+}
+
+void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
+{
+ int i;
+
+ if (!region->mem) {
+ return;
+ }
+
+ for (i = 0; i < region->nr_mmaps; i++) {
+ if (region->mmaps[i].mmap) {
+ memory_region_set_enabled(&region->mmaps[i].mem, enabled);
+ }
+ }
+
+ trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
+ enabled);
+}
diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c
index 018bd20..564b70e 100644
--- a/hw/vfio/spapr.c
+++ b/hw/vfio/spapr.c
@@ -11,23 +11,30 @@
#include "qemu/osdep.h"
#include <sys/ioctl.h>
#include <linux/vfio.h>
-#ifdef CONFIG_KVM
-#include <linux/kvm.h>
-#endif
-#include "sysemu/kvm.h"
-#include "exec/address-spaces.h"
+#include "system/kvm.h"
+#include "system/hostmem.h"
+#include "system/address-spaces.h"
-#include "hw/vfio/vfio-common.h"
+#include "hw/vfio/vfio-container.h"
#include "hw/hw.h"
-#include "exec/ram_addr.h"
+#include "system/ram_addr.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "trace.h"
+#include "vfio-helpers.h"
+
+typedef struct VFIOHostDMAWindow {
+ hwaddr min_iova;
+ hwaddr max_iova;
+ uint64_t iova_pgsizes;
+ QLIST_ENTRY(VFIOHostDMAWindow) hostwin_next;
+} VFIOHostDMAWindow;
typedef struct VFIOSpaprContainer {
VFIOContainer container;
MemoryListener prereg_listener;
QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
+ unsigned int levels;
} VFIOSpaprContainer;
OBJECT_DECLARE_SIMPLE_TYPE(VFIOSpaprContainer, VFIO_IOMMU_SPAPR);
@@ -232,15 +239,17 @@ static int vfio_spapr_remove_window(VFIOContainer *container,
return 0;
}
-static int vfio_spapr_create_window(VFIOContainer *container,
+static bool vfio_spapr_create_window(VFIOContainer *container,
MemoryRegionSection *section,
- hwaddr *pgsize)
+ hwaddr *pgsize, Error **errp)
{
int ret = 0;
VFIOContainerBase *bcontainer = &container->bcontainer;
+ VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer,
+ container);
IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr), pgmask;
- unsigned entries, bits_total, bits_per_level, max_levels;
+ unsigned entries, bits_total, bits_per_level, max_levels, ddw_levels;
struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) };
long rampagesize = qemu_minrampagesize();
@@ -254,11 +263,11 @@ static int vfio_spapr_create_window(VFIOContainer *container,
pgmask = bcontainer->pgsizes & (pagesize | (pagesize - 1));
pagesize = pgmask ? (1ULL << (63 - clz64(pgmask))) : 0;
if (!pagesize) {
- error_report("Host doesn't support page size 0x%"PRIx64
- ", the supported mask is 0x%lx",
- memory_region_iommu_get_min_page_size(iommu_mr),
- bcontainer->pgsizes);
- return -EINVAL;
+ error_setg_errno(errp, EINVAL, "Host doesn't support page size 0x%"PRIx64
+ ", the supported mask is 0x%lx",
+ memory_region_iommu_get_min_page_size(iommu_mr),
+ bcontainer->pgsizes);
+ return false;
}
/*
@@ -293,28 +302,41 @@ static int vfio_spapr_create_window(VFIOContainer *container,
*/
bits_per_level = ctz64(qemu_real_host_page_size()) + 8;
create.levels = bits_total / bits_per_level;
- if (bits_total % bits_per_level) {
- ++create.levels;
- }
- max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size());
- for ( ; create.levels <= max_levels; ++create.levels) {
- ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
- if (!ret) {
- break;
+
+ ddw_levels = scontainer->levels;
+ if (ddw_levels > 1) {
+ if (bits_total % bits_per_level) {
+ ++create.levels;
+ }
+ max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size());
+ for ( ; create.levels <= max_levels; ++create.levels) {
+ ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
+ if (!ret) {
+ break;
+ }
}
+ } else { /* ddw_levels == 1 */
+ if (create.levels > ddw_levels) {
+ error_setg_errno(errp, EINVAL, "Host doesn't support multi-level TCE tables"
+ ". Use larger IO page size. Supported mask is 0x%lx",
+ bcontainer->pgsizes);
+ return false;
+ }
+ ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
}
+
if (ret) {
- error_report("Failed to create a window, ret = %d (%m)", ret);
- return -errno;
+ error_setg_errno(errp, errno, "Failed to create a window, ret = %d", ret);
+ return false;
}
if (create.start_addr != section->offset_within_address_space) {
vfio_spapr_remove_window(container, create.start_addr);
- error_report("Host doesn't support DMA window at %"HWADDR_PRIx", must be %"PRIx64,
- section->offset_within_address_space,
- (uint64_t)create.start_addr);
- return -EINVAL;
+ error_setg_errno(errp, EINVAL, "Host doesn't support DMA window at %"HWADDR_PRIx
+ ", must be %"PRIx64, section->offset_within_address_space,
+ (uint64_t)create.start_addr);
+ return false;
}
trace_vfio_spapr_create_window(create.page_shift,
create.levels,
@@ -322,7 +344,7 @@ static int vfio_spapr_create_window(VFIOContainer *container,
create.start_addr);
*pgsize = pagesize;
- return 0;
+ return true;
}
static bool
@@ -379,9 +401,8 @@ vfio_spapr_container_add_section_window(VFIOContainerBase *bcontainer,
}
}
- ret = vfio_spapr_create_window(container, section, &pgsize);
- if (ret) {
- error_setg_errno(errp, -ret, "Failed to create SPAPR window");
+ ret = vfio_spapr_create_window(container, section, &pgsize, errp);
+ if (!ret) {
return false;
}
@@ -504,6 +525,8 @@ static bool vfio_spapr_container_setup(VFIOContainerBase *bcontainer,
goto listener_unregister_exit;
}
+ scontainer->levels = info.ddw.levels;
+
if (v2) {
bcontainer->pgsizes = info.ddw.pgsizes;
/*
@@ -536,7 +559,7 @@ listener_unregister_exit:
return false;
}
-static void vfio_iommu_spapr_class_init(ObjectClass *klass, void *data)
+static void vfio_iommu_spapr_class_init(ObjectClass *klass, const void *data)
{
VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass);
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index e16179b..e1728c4 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -1,8 +1,10 @@
# See docs/devel/tracing.rst for syntax documentation.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
# pci.c
vfio_intx_interrupt(const char *name, char line) " (%s) Pin %c"
-vfio_intx_eoi(const char *name) " (%s) EOI"
+vfio_pci_intx_eoi(const char *name) " (%s) EOI"
vfio_intx_enable_kvm(const char *name) " (%s) KVM INTx accel enabled"
vfio_intx_disable_kvm(const char *name) " (%s) KVM INTx accel disabled"
vfio_intx_update(const char *name, int new_irq, int target_irq) " (%s) IRQ moved %d -> %d"
@@ -27,7 +29,7 @@ vfio_vga_read(uint64_t addr, int size, uint64_t data) " (0x%"PRIx64", %d) = 0x%"
vfio_pci_read_config(const char *name, int addr, int len, int val) " (%s, @0x%x, len=0x%x) 0x%x"
vfio_pci_write_config(const char *name, int addr, int val, int len) " (%s, @0x%x, 0x%x, len=0x%x)"
vfio_msi_setup(const char *name, int pos) "%s PCI MSI CAP @0x%x"
-vfio_msix_early_setup(const char *name, int pos, int table_bar, int offset, int entries, bool noresize) "%s PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d, noresize %d"
+vfio_msix_early_setup(const char *name, int pos, int table_bar, uint64_t offset, int entries, bool noresize) "%s PCI MSI-X CAP @0x%x, BAR %d, offset 0x%"PRIx64", entries %d, noresize %d"
vfio_check_pcie_flr(const char *name) "%s Supports FLR via PCIe cap"
vfio_check_pm_reset(const char *name) "%s Supports PM reset"
vfio_check_af_flr(const char *name) "%s Supports FLR via AF cap"
@@ -35,10 +37,8 @@ vfio_pci_hot_reset(const char *name, const char *type) " (%s) %s"
vfio_pci_hot_reset_has_dep_devices(const char *name) "%s: hot reset dependent devices:"
vfio_pci_hot_reset_dep_devices(int domain, int bus, int slot, int function, int group_id) "\t%04x:%02x:%02x.%x group %d"
vfio_pci_hot_reset_result(const char *name, const char *result) "%s hot reset: %s"
-vfio_populate_device_config(const char *name, unsigned long size, unsigned long offset, unsigned long flags) "Device '%s' config: size: 0x%lx, offset: 0x%lx, flags: 0x%lx"
-vfio_populate_device_get_irq_info_failure(const char *errstr) "VFIO_DEVICE_GET_IRQ_INFO failure: %s"
-vfio_attach_device(const char *name, int group_id) " (%s) group %d"
-vfio_detach_device(const char *name, int group_id) " (%s) group %d"
+vfio_pci_populate_device_config(const char *name, unsigned long size, unsigned long offset, unsigned long flags) "Device '%s' config: size: 0x%lx, offset: 0x%lx, flags: 0x%lx"
+vfio_pci_populate_device_get_irq_info_failure(const char *errstr) "VFIO_DEVICE_GET_IRQ_INFO failure: %s"
vfio_mdev(const char *name, bool is_mdev) " (%s) is_mdev %d"
vfio_add_ext_cap_dropped(const char *name, uint16_t cap, uint16_t offset) "%s 0x%x@0x%x"
vfio_pci_reset(const char *name) " (%s)"
@@ -89,23 +89,33 @@ vfio_pci_igd_bdsm_enabled(const char *name, int size) "%s %dMB"
vfio_pci_igd_host_bridge_enabled(const char *name) "%s"
vfio_pci_igd_lpc_bridge_enabled(const char *name) "%s"
-# common.c
-vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)"
-vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64
+# listener.c
vfio_iommu_map_notify(const char *op, uint64_t iova_start, uint64_t iova_end) "iommu %s @ 0x%"PRIx64" - 0x%"PRIx64
vfio_listener_region_skip(const char *name, uint64_t start, uint64_t end) "SKIPPING %s 0x%"PRIx64" - 0x%"PRIx64
vfio_spapr_group_attach(int groupfd, int tablefd) "Attached groupfd %d to liobn fd %d"
-vfio_listener_region_add_iommu(uint64_t start, uint64_t end) "region_add [iommu] 0x%"PRIx64" - 0x%"PRIx64
+vfio_listener_region_add_iommu(const char* name, uint64_t start, uint64_t end) "region_add [iommu] %s 0x%"PRIx64" - 0x%"PRIx64
+vfio_listener_region_del_iommu(const char *name) "region_del [iommu] %s"
vfio_listener_region_add_ram(uint64_t iova_start, uint64_t iova_end, void *vaddr) "region_add [ram] 0x%"PRIx64" - 0x%"PRIx64" [%p]"
vfio_known_safe_misalignment(const char *name, uint64_t iova, uint64_t offset_within_region, uintptr_t page_size) "Region \"%s\" iova=0x%"PRIx64" offset_within_region=0x%"PRIx64" qemu_real_host_page_size=0x%"PRIxPTR
vfio_listener_region_add_no_dma_map(const char *name, uint64_t iova, uint64_t size, uint64_t page_size) "Region \"%s\" 0x%"PRIx64" size=0x%"PRIx64" is not aligned to 0x%"PRIx64" and cannot be mapped for DMA"
vfio_listener_region_del(uint64_t start, uint64_t end) "region_del 0x%"PRIx64" - 0x%"PRIx64
vfio_device_dirty_tracking_update(uint64_t start, uint64_t end, uint64_t min, uint64_t max) "section 0x%"PRIx64" - 0x%"PRIx64" -> update [0x%"PRIx64" - 0x%"PRIx64"]"
vfio_device_dirty_tracking_start(int nr_ranges, uint64_t min32, uint64_t max32, uint64_t min64, uint64_t max64, uint64_t minpci, uint64_t maxpci) "nr_ranges %d 32:[0x%"PRIx64" - 0x%"PRIx64"], 64:[0x%"PRIx64" - 0x%"PRIx64"], pci64:[0x%"PRIx64" - 0x%"PRIx64"]"
-vfio_disconnect_container(int fd) "close container->fd=%d"
-vfio_put_group(int fd) "close group->fd=%d"
-vfio_get_device(const char * name, unsigned int flags, unsigned int num_regions, unsigned int num_irqs) "Device %s flags: %u, regions: %u, irqs: %u"
-vfio_put_base_device(int fd) "close vdev->fd=%d"
+vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64
+
+# container-base.c
+vfio_container_query_dirty_bitmap(uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty_pages) "iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty_pages=%"PRIu64
+
+# container.c
+vfio_container_disconnect(int fd) "close container->fd=%d"
+vfio_group_put(int fd) "close group->fd=%d"
+vfio_device_get(const char * name, unsigned int flags, unsigned int num_regions, unsigned int num_irqs) "Device %s flags: %u, regions: %u, irqs: %u"
+vfio_device_put(int fd) "close vdev->fd=%d"
+vfio_legacy_dma_unmap_overflow_workaround(void) ""
+
+# region.c
+vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)"
+vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64
vfio_region_setup(const char *dev, int index, const char *name, unsigned long flags, unsigned long offset, unsigned long size) "Device %s, region %d \"%s\", flags: 0x%lx, offset: 0x%lx, size: 0x%lx"
vfio_region_mmap_fault(const char *name, int index, unsigned long offset, unsigned long size, int fault) "Region %s mmaps[%d], [0x%lx - 0x%lx], fault: %d"
vfio_region_mmap(const char *name, unsigned long offset, unsigned long end) "Region %s [0x%lx - 0x%lx]"
@@ -115,10 +125,6 @@ vfio_region_mmaps_set_enabled(const char *name, bool enabled) "Region %s mmaps e
vfio_region_unmap(const char *name, unsigned long offset, unsigned long end) "Region %s unmap [0x%lx - 0x%lx]"
vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Device %s region %d: %d sparse mmap entries"
vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]"
-vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%08x"
-vfio_legacy_dma_unmap_overflow_workaround(void) ""
-vfio_get_dirty_bitmap(uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty_pages) "iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty_pages=%"PRIu64
-vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64
# platform.c
vfio_platform_realize(char *name, char *compat) "vfio device %s, compat = %s"
@@ -147,22 +153,36 @@ vfio_display_edid_update(uint32_t prefx, uint32_t prefy) "%ux%u"
vfio_display_edid_write_error(void) ""
# migration.c
+vfio_load_bufs_thread_start(const char *name) " (%s)"
+vfio_load_bufs_thread_end(const char *name) " (%s)"
vfio_load_cleanup(const char *name) " (%s)"
-vfio_load_device_config_state(const char *name) " (%s)"
+vfio_load_device_config_state_start(const char *name) " (%s)"
+vfio_load_device_config_state_end(const char *name) " (%s)"
vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64
-vfio_load_state_device_data(const char *name, uint64_t data_size, int ret) " (%s) size 0x%"PRIx64" ret %d"
+vfio_load_state_device_data(const char *name, uint64_t data_size, int ret) " (%s) size %"PRIu64" ret %d"
+vfio_load_state_device_buffer_incoming(const char *name, uint32_t idx) " (%s) idx %"PRIu32
+vfio_load_state_device_buffer_start(const char *name) " (%s)"
+vfio_load_state_device_buffer_starved(const char *name, uint32_t idx) " (%s) idx %"PRIu32
+vfio_load_state_device_buffer_load_start(const char *name, uint32_t idx) " (%s) idx %"PRIu32
+vfio_load_state_device_buffer_load_end(const char *name, uint32_t idx) " (%s) idx %"PRIu32
+vfio_load_state_device_buffer_end(const char *name) " (%s)"
vfio_migration_realize(const char *name) " (%s)"
vfio_migration_set_device_state(const char *name, const char *state) " (%s) state %s"
vfio_migration_set_state(const char *name, const char *new_state, const char *recover_state) " (%s) new state %s, recover state %s"
vfio_migration_state_notifier(const char *name, int state) " (%s) state %d"
vfio_save_block(const char *name, int data_size) " (%s) data_size %d"
+vfio_save_block_precopy_empty_hit(const char *name) " (%s)"
vfio_save_cleanup(const char *name) " (%s)"
vfio_save_complete_precopy(const char *name, int ret) " (%s) ret %d"
+vfio_save_complete_precopy_start(const char *name) " (%s)"
+vfio_save_complete_precopy_thread_start(const char *name, const char *idstr, uint32_t instance_id) " (%s) idstr %s instance %"PRIu32
+vfio_save_complete_precopy_thread_end(const char *name, int ret) " (%s) ret %d"
vfio_save_device_config_state(const char *name) " (%s)"
-vfio_save_iterate(const char *name, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy initial size 0x%"PRIx64" precopy dirty size 0x%"PRIx64
-vfio_save_setup(const char *name, uint64_t data_buffer_size) " (%s) data buffer size 0x%"PRIx64
-vfio_state_pending_estimate(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy 0x%"PRIx64" postcopy 0x%"PRIx64" precopy initial size 0x%"PRIx64" precopy dirty size 0x%"PRIx64
-vfio_state_pending_exact(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t stopcopy_size, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy 0x%"PRIx64" postcopy 0x%"PRIx64" stopcopy size 0x%"PRIx64" precopy initial size 0x%"PRIx64" precopy dirty size 0x%"PRIx64
+vfio_save_iterate(const char *name, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy initial size %"PRIu64" precopy dirty size %"PRIu64
+vfio_save_iterate_start(const char *name) " (%s)"
+vfio_save_setup(const char *name, uint64_t data_buffer_size) " (%s) data buffer size %"PRIu64
+vfio_state_pending_estimate(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy %"PRIu64" postcopy %"PRIu64" precopy initial size %"PRIu64" precopy dirty size %"PRIu64
+vfio_state_pending_exact(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t stopcopy_size, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy %"PRIu64" postcopy %"PRIu64" stopcopy size %"PRIu64" precopy initial size %"PRIu64" precopy dirty size %"PRIu64
vfio_vmstate_change(const char *name, int running, const char *reason, const char *dev_state) " (%s) running %d reason %s device state %s"
vfio_vmstate_change_prepare(const char *name, int running, const char *reason, const char *dev_state) " (%s) running %d reason %s device state %s"
@@ -176,3 +196,9 @@ iommufd_cdev_fail_attach_existing_container(const char *msg) " %s"
iommufd_cdev_alloc_ioas(int iommufd, int ioas_id) " [iommufd=%d] new IOMMUFD container with ioasid=%d"
iommufd_cdev_device_info(char *name, int devfd, int num_irqs, int num_regions, int flags) " %s (%d) num_irqs=%d num_regions=%d flags=%d"
iommufd_cdev_pci_hot_reset_dep_devices(int domain, int bus, int slot, int function, int dev_id) "\t%04x:%02x:%02x.%x devid %d"
+
+# device.c
+vfio_device_get_region_info_type(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%08x"
+vfio_device_reset_handler(void) ""
+vfio_device_attach(const char *name, int group_id) " (%s) group %d"
+vfio_device_detach(const char *name, int group_id) " (%s) group %d"
diff --git a/hw/vfio/trace.h b/hw/vfio/trace.h
index 5a343aa..b34b61d 100644
--- a/hw/vfio/trace.h
+++ b/hw/vfio/trace.h
@@ -1 +1,4 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
#include "trace/trace-hw_vfio.h"
diff --git a/hw/vfio/vfio-display.h b/hw/vfio/vfio-display.h
new file mode 100644
index 0000000..2606c34
--- /dev/null
+++ b/hw/vfio/vfio-display.h
@@ -0,0 +1,42 @@
+/*
+ * VFIO display
+ *
+ * Copyright Red Hat, Inc. 2025
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VFIO_VFIO_DISPLAY_H
+#define HW_VFIO_VFIO_DISPLAY_H
+
+#include "ui/console.h"
+#include "hw/display/ramfb.h"
+#include "hw/vfio/vfio-region.h"
+
+typedef struct VFIODMABuf {
+ QemuDmaBuf *buf;
+ uint32_t pos_x, pos_y, pos_updates;
+ uint32_t hot_x, hot_y, hot_updates;
+ int dmabuf_id;
+ QTAILQ_ENTRY(VFIODMABuf) next;
+} VFIODMABuf;
+
+typedef struct VFIODisplay {
+ QemuConsole *con;
+ RAMFBState *ramfb;
+ struct vfio_region_info *edid_info;
+ struct vfio_region_gfx_edid *edid_regs;
+ uint8_t *edid_blob;
+ QEMUTimer *edid_link_timer;
+ struct {
+ VFIORegion buffer;
+ DisplaySurface *surface;
+ } region;
+ struct {
+ QTAILQ_HEAD(, VFIODMABuf) bufs;
+ VFIODMABuf *primary;
+ VFIODMABuf *cursor;
+ } dmabuf;
+} VFIODisplay;
+
+#endif /* HW_VFIO_VFIO_DISPLAY_H */
diff --git a/hw/vfio/vfio-helpers.h b/hw/vfio/vfio-helpers.h
new file mode 100644
index 0000000..54a327f
--- /dev/null
+++ b/hw/vfio/vfio-helpers.h
@@ -0,0 +1,35 @@
+/*
+ * VFIO helpers
+ *
+ * Copyright Red Hat, Inc. 2025
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VFIO_VFIO_HELPERS_H
+#define HW_VFIO_VFIO_HELPERS_H
+
+#ifdef CONFIG_LINUX
+#include <linux/vfio.h>
+
+extern int vfio_kvm_device_fd;
+
+struct vfio_info_cap_header *
+vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id);
+struct vfio_info_cap_header *
+vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id);
+struct vfio_info_cap_header *
+vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id);
+struct vfio_info_cap_header *
+vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id);
+bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
+ unsigned int *avail);
+#endif
+
+int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size);
+struct vfio_device_info *vfio_get_device_info(int fd);
+
+int vfio_kvm_device_add_fd(int fd, Error **errp);
+int vfio_kvm_device_del_fd(int fd, Error **errp);
+
+#endif /* HW_VFIO_VFIO_HELPERS_H */
diff --git a/hw/vfio/vfio-iommufd.h b/hw/vfio/vfio-iommufd.h
new file mode 100644
index 0000000..07ea0f4
--- /dev/null
+++ b/hw/vfio/vfio-iommufd.h
@@ -0,0 +1,34 @@
+/*
+ * VFIO iommufd
+ *
+ * Copyright Red Hat, Inc. 2025
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VFIO_VFIO_IOMMUFD_H
+#define HW_VFIO_VFIO_IOMMUFD_H
+
+#include "hw/vfio/vfio-container-base.h"
+
+typedef struct VFIODevice VFIODevice;
+
+typedef struct VFIOIOASHwpt {
+ uint32_t hwpt_id;
+ uint32_t hwpt_flags;
+ QLIST_HEAD(, VFIODevice) device_list;
+ QLIST_ENTRY(VFIOIOASHwpt) next;
+} VFIOIOASHwpt;
+
+typedef struct IOMMUFDBackend IOMMUFDBackend;
+
+typedef struct VFIOIOMMUFDContainer {
+ VFIOContainerBase bcontainer;
+ IOMMUFDBackend *be;
+ uint32_t ioas_id;
+ QLIST_HEAD(, VFIOIOASHwpt) hwpt_list;
+} VFIOIOMMUFDContainer;
+
+OBJECT_DECLARE_SIMPLE_TYPE(VFIOIOMMUFDContainer, VFIO_IOMMU_IOMMUFD);
+
+#endif /* HW_VFIO_VFIO_IOMMUFD_H */
diff --git a/hw/vfio/vfio-listener.h b/hw/vfio/vfio-listener.h
new file mode 100644
index 0000000..eb69ddd
--- /dev/null
+++ b/hw/vfio/vfio-listener.h
@@ -0,0 +1,15 @@
+/*
+ * VFIO MemoryListener services
+ *
+ * Copyright Red Hat, Inc. 2025
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VFIO_VFIO_LISTENER_H
+#define HW_VFIO_VFIO_LISTENER_H
+
+bool vfio_listener_register(VFIOContainerBase *bcontainer, Error **errp);
+void vfio_listener_unregister(VFIOContainerBase *bcontainer);
+
+#endif /* HW_VFIO_VFIO_LISTENER_H */
diff --git a/hw/vfio/vfio-migration-internal.h b/hw/vfio/vfio-migration-internal.h
new file mode 100644
index 0000000..a8b456b
--- /dev/null
+++ b/hw/vfio/vfio-migration-internal.h
@@ -0,0 +1,74 @@
+/*
+ * VFIO migration
+ *
+ * Copyright Red Hat, Inc. 2025
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VFIO_VFIO_MIGRATION_INTERNAL_H
+#define HW_VFIO_VFIO_MIGRATION_INTERNAL_H
+
+#ifdef CONFIG_LINUX
+#include <linux/vfio.h>
+#endif
+
+#include "qemu/typedefs.h"
+#include "qemu/notify.h"
+
+/*
+ * Flags to be used as unique delimiters for VFIO devices in the migration
+ * stream. These flags are composed as:
+ * 0xffffffff => MSB 32-bit all 1s
+ * 0xef10 => Magic ID, represents emulated (virtual) function IO
+ * 0x0000 => 16-bits reserved for flags
+ *
+ * The beginning of state information is marked by _DEV_CONFIG_STATE,
+ * _DEV_SETUP_STATE, or _DEV_DATA_STATE, respectively. The end of a
+ * certain state information is marked by _END_OF_STATE.
+ */
+#define VFIO_MIG_FLAG_END_OF_STATE (0xffffffffef100001ULL)
+#define VFIO_MIG_FLAG_DEV_CONFIG_STATE (0xffffffffef100002ULL)
+#define VFIO_MIG_FLAG_DEV_SETUP_STATE (0xffffffffef100003ULL)
+#define VFIO_MIG_FLAG_DEV_DATA_STATE (0xffffffffef100004ULL)
+#define VFIO_MIG_FLAG_DEV_INIT_DATA_SENT (0xffffffffef100005ULL)
+
+typedef struct VFIODevice VFIODevice;
+typedef struct VFIOMultifd VFIOMultifd;
+
+typedef struct VFIOMigration {
+ struct VFIODevice *vbasedev;
+ VMChangeStateEntry *vm_state;
+ NotifierWithReturn migration_state;
+ uint32_t device_state;
+ int data_fd;
+ void *data_buffer;
+ size_t data_buffer_size;
+ uint64_t mig_flags;
+ uint64_t precopy_init_size;
+ uint64_t precopy_dirty_size;
+ bool multifd_transfer;
+ VFIOMultifd *multifd;
+ bool initial_data_sent;
+
+ bool event_save_iterate_started;
+ bool event_precopy_empty_hit;
+} VFIOMigration;
+
+bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp);
+void vfio_migration_exit(VFIODevice *vbasedev);
+bool vfio_device_state_is_running(VFIODevice *vbasedev);
+bool vfio_device_state_is_precopy(VFIODevice *vbasedev);
+int vfio_save_device_config_state(QEMUFile *f, void *opaque, Error **errp);
+int vfio_load_device_config_state(QEMUFile *f, void *opaque);
+
+#ifdef CONFIG_LINUX
+int vfio_migration_set_state(VFIODevice *vbasedev,
+ enum vfio_device_mig_state new_state,
+ enum vfio_device_mig_state recover_state,
+ Error **errp);
+#endif
+
+void vfio_migration_add_bytes_transferred(unsigned long val);
+
+#endif /* HW_VFIO_VFIO_MIGRATION_INTERNAL_H */
diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig
index aa63ff7..7648a2d 100644
--- a/hw/virtio/Kconfig
+++ b/hw/virtio/Kconfig
@@ -6,6 +6,10 @@ config VIRTIO_RNG
default y
depends on VIRTIO
+config VIRTIO_NSM
+ bool
+ depends on LIBCBOR && VIRTIO
+
config VIRTIO_IOMMU
bool
default y
@@ -16,6 +20,7 @@ config VIRTIO_PCI
default y if PCI_DEVICES
depends on PCI
select VIRTIO
+ select VIRTIO_MD_SUPPORTED
config VIRTIO_MMIO
bool
@@ -24,6 +29,7 @@ config VIRTIO_MMIO
config VIRTIO_CCW
bool
select VIRTIO
+ select VIRTIO_MD_SUPPORTED
config VIRTIO_BALLOON
bool
@@ -35,10 +41,17 @@ config VIRTIO_CRYPTO
default y
depends on VIRTIO
+# not all virtio transports support memory devices; if none does,
+# no need to include the code
+config VIRTIO_MD_SUPPORTED
+ bool
+
config VIRTIO_MD
bool
+ depends on VIRTIO_MD_SUPPORTED
select MEM_DEVICE
+# selected by the board if it has the required support code
config VIRTIO_PMEM_SUPPORTED
bool
@@ -46,9 +59,11 @@ config VIRTIO_PMEM
bool
default y
depends on VIRTIO
+ depends on VIRTIO_MD_SUPPORTED
depends on VIRTIO_PMEM_SUPPORTED
select VIRTIO_MD
+# selected by the board if it has the required support code
config VIRTIO_MEM_SUPPORTED
bool
@@ -57,6 +72,7 @@ config VIRTIO_MEM
default y
depends on VIRTIO
depends on LINUX
+ depends on VIRTIO_MD_SUPPORTED
depends on VIRTIO_MEM_SUPPORTED
select VIRTIO_MD
@@ -109,4 +125,4 @@ config VHOST_USER_SND
config VHOST_USER_SCMI
bool
default y
- depends on VIRTIO && VHOST_USER
+ depends on VIRTIO && VHOST_USER && ARM
diff --git a/hw/virtio/cbor-helpers.c b/hw/virtio/cbor-helpers.c
new file mode 100644
index 0000000..49f55df
--- /dev/null
+++ b/hw/virtio/cbor-helpers.c
@@ -0,0 +1,321 @@
+/*
+ * QEMU CBOR helpers
+ *
+ * Copyright (c) 2024 Dorjoy Chowdhury <dorjoychy111@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include "hw/virtio/cbor-helpers.h"
+
+bool qemu_cbor_map_add(cbor_item_t *map, cbor_item_t *key, cbor_item_t *value)
+{
+ bool success = false;
+ struct cbor_pair pair = (struct cbor_pair) {
+ .key = cbor_move(key),
+ .value = cbor_move(value)
+ };
+
+ success = cbor_map_add(map, pair);
+ if (!success) {
+ cbor_incref(pair.key);
+ cbor_incref(pair.value);
+ }
+
+ return success;
+}
+
+bool qemu_cbor_array_push(cbor_item_t *array, cbor_item_t *value)
+{
+ bool success = false;
+
+ success = cbor_array_push(array, cbor_move(value));
+ if (!success) {
+ cbor_incref(value);
+ }
+
+ return success;
+}
+
+bool qemu_cbor_add_bool_to_map(cbor_item_t *map, const char *key, bool value)
+{
+ cbor_item_t *key_cbor = NULL;
+ cbor_item_t *value_cbor = NULL;
+
+ key_cbor = cbor_build_string(key);
+ if (!key_cbor) {
+ goto cleanup;
+ }
+ value_cbor = cbor_build_bool(value);
+ if (!value_cbor) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) {
+ goto cleanup;
+ }
+
+ return true;
+
+ cleanup:
+ if (key_cbor) {
+ cbor_decref(&key_cbor);
+ }
+ if (value_cbor) {
+ cbor_decref(&value_cbor);
+ }
+ return false;
+}
+
+bool qemu_cbor_add_uint8_to_map(cbor_item_t *map, const char *key,
+ uint8_t value)
+{
+ cbor_item_t *key_cbor = NULL;
+ cbor_item_t *value_cbor = NULL;
+
+ key_cbor = cbor_build_string(key);
+ if (!key_cbor) {
+ goto cleanup;
+ }
+ value_cbor = cbor_build_uint8(value);
+ if (!value_cbor) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) {
+ goto cleanup;
+ }
+
+ return true;
+
+ cleanup:
+ if (key_cbor) {
+ cbor_decref(&key_cbor);
+ }
+ if (value_cbor) {
+ cbor_decref(&value_cbor);
+ }
+ return false;
+}
+
+bool qemu_cbor_add_map_to_map(cbor_item_t *map, const char *key,
+ size_t nested_map_size,
+ cbor_item_t **nested_map)
+{
+ cbor_item_t *key_cbor = NULL;
+ cbor_item_t *value_cbor = NULL;
+
+ key_cbor = cbor_build_string(key);
+ if (!key_cbor) {
+ goto cleanup;
+ }
+ value_cbor = cbor_new_definite_map(nested_map_size);
+ if (!value_cbor) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) {
+ goto cleanup;
+ }
+ *nested_map = value_cbor;
+
+ return true;
+
+ cleanup:
+ if (key_cbor) {
+ cbor_decref(&key_cbor);
+ }
+ if (value_cbor) {
+ cbor_decref(&value_cbor);
+ }
+ return false;
+}
+
+bool qemu_cbor_add_bytestring_to_map(cbor_item_t *map, const char *key,
+ uint8_t *arr, size_t len)
+{
+ cbor_item_t *key_cbor = NULL;
+ cbor_item_t *value_cbor = NULL;
+
+ key_cbor = cbor_build_string(key);
+ if (!key_cbor) {
+ goto cleanup;
+ }
+ value_cbor = cbor_build_bytestring(arr, len);
+ if (!value_cbor) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) {
+ goto cleanup;
+ }
+
+ return true;
+
+ cleanup:
+ if (key_cbor) {
+ cbor_decref(&key_cbor);
+ }
+ if (value_cbor) {
+ cbor_decref(&value_cbor);
+ }
+ return false;
+}
+
+bool qemu_cbor_add_null_to_map(cbor_item_t *map, const char *key)
+{
+ cbor_item_t *key_cbor = NULL;
+ cbor_item_t *value_cbor = NULL;
+
+ key_cbor = cbor_build_string(key);
+ if (!key_cbor) {
+ goto cleanup;
+ }
+ value_cbor = cbor_new_null();
+ if (!value_cbor) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) {
+ goto cleanup;
+ }
+
+ return true;
+
+ cleanup:
+ if (key_cbor) {
+ cbor_decref(&key_cbor);
+ }
+ if (value_cbor) {
+ cbor_decref(&value_cbor);
+ }
+ return false;
+}
+
+bool qemu_cbor_add_string_to_map(cbor_item_t *map, const char *key,
+ const char *value)
+{
+ cbor_item_t *key_cbor = NULL;
+ cbor_item_t *value_cbor = NULL;
+
+ key_cbor = cbor_build_string(key);
+ if (!key_cbor) {
+ goto cleanup;
+ }
+ value_cbor = cbor_build_string(value);
+ if (!value_cbor) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) {
+ goto cleanup;
+ }
+
+ return true;
+
+ cleanup:
+ if (key_cbor) {
+ cbor_decref(&key_cbor);
+ }
+ if (value_cbor) {
+ cbor_decref(&value_cbor);
+ }
+ return false;
+}
+
+bool qemu_cbor_add_uint8_array_to_map(cbor_item_t *map, const char *key,
+ uint8_t *arr, size_t len)
+{
+ cbor_item_t *key_cbor = NULL;
+ cbor_item_t *value_cbor = NULL;
+
+ key_cbor = cbor_build_string(key);
+ if (!key_cbor) {
+ goto cleanup;
+ }
+ value_cbor = cbor_new_definite_array(len);
+ if (!value_cbor) {
+ goto cleanup;
+ }
+
+ for (int i = 0; i < len; ++i) {
+ cbor_item_t *tmp = cbor_build_uint8(arr[i]);
+ if (!tmp) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_array_push(value_cbor, tmp)) {
+ cbor_decref(&tmp);
+ goto cleanup;
+ }
+ }
+ if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) {
+ goto cleanup;
+ }
+
+ return true;
+
+ cleanup:
+ if (key_cbor) {
+ cbor_decref(&key_cbor);
+ }
+ if (value_cbor) {
+ cbor_decref(&value_cbor);
+ }
+ return false;
+}
+
+bool qemu_cbor_add_uint8_key_bytestring_to_map(cbor_item_t *map, uint8_t key,
+ uint8_t *buf, size_t len)
+{
+ cbor_item_t *key_cbor = NULL;
+ cbor_item_t *value_cbor = NULL;
+
+ key_cbor = cbor_build_uint8(key);
+ if (!key_cbor) {
+ goto cleanup;
+ }
+ value_cbor = cbor_build_bytestring(buf, len);
+ if (!value_cbor) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) {
+ goto cleanup;
+ }
+
+ return true;
+
+ cleanup:
+ if (key_cbor) {
+ cbor_decref(&key_cbor);
+ }
+ if (value_cbor) {
+ cbor_decref(&value_cbor);
+ }
+ return false;
+}
+
+bool qemu_cbor_add_uint64_to_map(cbor_item_t *map, const char *key,
+ uint64_t value)
+{
+ cbor_item_t *key_cbor = NULL;
+ cbor_item_t *value_cbor = NULL;
+
+ key_cbor = cbor_build_string(key);
+ if (!key_cbor) {
+ goto cleanup;
+ }
+ value_cbor = cbor_build_uint64(value);
+ if (!value_cbor) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) {
+ goto cleanup;
+ }
+
+ return true;
+
+ cleanup:
+ if (key_cbor) {
+ cbor_decref(&key_cbor);
+ }
+ if (value_cbor) {
+ cbor_decref(&value_cbor);
+ }
+ return false;
+}
diff --git a/hw/virtio/iothread-vq-mapping.c b/hw/virtio/iothread-vq-mapping.c
new file mode 100644
index 0000000..15909eb
--- /dev/null
+++ b/hw/virtio/iothread-vq-mapping.c
@@ -0,0 +1,131 @@
+/*
+ * IOThread Virtqueue Mapping
+ *
+ * Copyright Red Hat, Inc
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ */
+
+#include "qemu/osdep.h"
+#include "system/iothread.h"
+#include "hw/virtio/iothread-vq-mapping.h"
+
+static bool
+iothread_vq_mapping_validate(IOThreadVirtQueueMappingList *list, uint16_t
+ num_queues, Error **errp)
+{
+ g_autofree unsigned long *vqs = bitmap_new(num_queues);
+ g_autoptr(GHashTable) iothreads =
+ g_hash_table_new(g_str_hash, g_str_equal);
+
+ for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) {
+ const char *name = node->value->iothread;
+ uint16List *vq;
+
+ if (!iothread_by_id(name)) {
+ error_setg(errp, "IOThread \"%s\" object does not exist", name);
+ return false;
+ }
+
+ if (!g_hash_table_add(iothreads, (gpointer)name)) {
+ error_setg(errp,
+ "duplicate IOThread name \"%s\" in iothread-vq-mapping",
+ name);
+ return false;
+ }
+
+ if (node != list) {
+ if (!!node->value->vqs != !!list->value->vqs) {
+ error_setg(errp, "either all items in iothread-vq-mapping "
+ "must have vqs or none of them must have it");
+ return false;
+ }
+ }
+
+ for (vq = node->value->vqs; vq; vq = vq->next) {
+ if (vq->value >= num_queues) {
+ error_setg(errp, "vq index %u for IOThread \"%s\" must be "
+ "less than num_queues %u in iothread-vq-mapping",
+ vq->value, name, num_queues);
+ return false;
+ }
+
+ if (test_and_set_bit(vq->value, vqs)) {
+ error_setg(errp, "cannot assign vq %u to IOThread \"%s\" "
+ "because it is already assigned", vq->value, name);
+ return false;
+ }
+ }
+ }
+
+ if (list->value->vqs) {
+ for (uint16_t i = 0; i < num_queues; i++) {
+ if (!test_bit(i, vqs)) {
+ error_setg(errp,
+ "missing vq %u IOThread assignment in iothread-vq-mapping",
+ i);
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool iothread_vq_mapping_apply(
+ IOThreadVirtQueueMappingList *list,
+ AioContext **vq_aio_context,
+ uint16_t num_queues,
+ Error **errp)
+{
+ IOThreadVirtQueueMappingList *node;
+ size_t num_iothreads = 0;
+ size_t cur_iothread = 0;
+
+ if (!iothread_vq_mapping_validate(list, num_queues, errp)) {
+ return false;
+ }
+
+ for (node = list; node; node = node->next) {
+ num_iothreads++;
+ }
+
+ for (node = list; node; node = node->next) {
+ IOThread *iothread = iothread_by_id(node->value->iothread);
+ AioContext *ctx = iothread_get_aio_context(iothread);
+
+ /* Released in virtio_blk_vq_aio_context_cleanup() */
+ object_ref(OBJECT(iothread));
+
+ if (node->value->vqs) {
+ uint16List *vq;
+
+ /* Explicit vq:IOThread assignment */
+ for (vq = node->value->vqs; vq; vq = vq->next) {
+ assert(vq->value < num_queues);
+ vq_aio_context[vq->value] = ctx;
+ }
+ } else {
+ /* Round-robin vq:IOThread assignment */
+ for (unsigned i = cur_iothread; i < num_queues;
+ i += num_iothreads) {
+ vq_aio_context[i] = ctx;
+ }
+ }
+
+ cur_iothread++;
+ }
+
+ return true;
+}
+
+void iothread_vq_mapping_cleanup(IOThreadVirtQueueMappingList *list)
+{
+ IOThreadVirtQueueMappingList *node;
+
+ for (node = list; node; node = node->next) {
+ IOThread *iothread = iothread_by_id(node->value->iothread);
+ object_unref(OBJECT(iothread));
+ }
+}
+
diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build
index 621fc65..164f6fd 100644
--- a/hw/virtio/meson.build
+++ b/hw/virtio/meson.build
@@ -1,5 +1,6 @@
system_virtio_ss = ss.source_set()
system_virtio_ss.add(files('virtio-bus.c'))
+system_virtio_ss.add(files('iothread-vq-mapping.c'))
system_virtio_ss.add(when: 'CONFIG_VIRTIO_PCI', if_true: files('virtio-pci.c'))
system_virtio_ss.add(when: 'CONFIG_VIRTIO_MMIO', if_true: files('virtio-mmio.c'))
system_virtio_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto.c'))
@@ -54,6 +55,7 @@ specific_virtio_ss.add(when: 'CONFIG_VIRTIO_PMEM', if_true: files('virtio-pmem.c
specific_virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock.c'))
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock.c'))
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng.c'))
+specific_virtio_ss.add(when: 'CONFIG_VIRTIO_NSM', if_true: [files('virtio-nsm.c', 'cbor-helpers.c'), libcbor])
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c'))
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_SCMI', if_true: files('vhost-user-scmi.c'))
specific_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_SCMI'], if_true: files('vhost-user-scmi-pci.c'))
@@ -70,6 +72,7 @@ virtio_pci_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto-pc
virtio_pci_ss.add(when: 'CONFIG_VIRTIO_INPUT_HOST', if_true: files('virtio-input-host-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng-pci.c'))
+virtio_pci_ss.add(when: 'CONFIG_VIRTIO_NSM', if_true: [files('virtio-nsm-pci.c', 'cbor-helpers.c'), libcbor])
virtio_pci_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-balloon-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VIRTIO_9P', if_true: files('virtio-9p-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VIRTIO_SCSI', if_true: files('virtio-scsi-pci.c'))
@@ -87,7 +90,8 @@ specific_virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss)
system_ss.add_all(when: 'CONFIG_VIRTIO', if_true: system_virtio_ss)
system_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c'))
system_ss.add(when: 'CONFIG_VIRTIO', if_false: files('virtio-stub.c'))
-system_ss.add(when: 'CONFIG_VIRTIO_MD', if_false: files('virtio-md-stubs.c'))
+system_ss.add(when: ['CONFIG_VIRTIO_MD', 'CONFIG_VIRTIO_PCI'],
+ if_false: files('virtio-md-stubs.c'))
system_ss.add(files('virtio-hmp-cmds.c'))
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index b7c04f0..76f0d45 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -108,7 +108,7 @@ virtio_pci_notify_write(uint64_t addr, uint64_t val, unsigned int size) "0x%" PR
virtio_pci_notify_write_pio(uint64_t addr, uint64_t val, unsigned int size) "0x%" PRIx64" = 0x%" PRIx64 " (%d)"
# hw/virtio/virtio-iommu.c
-virtio_iommu_device_reset(void) "reset!"
+virtio_iommu_device_reset_exit(void) "reset!"
virtio_iommu_system_reset(void) "system reset!"
virtio_iommu_get_features(uint64_t features) "device supports features=0x%"PRIx64
virtio_iommu_device_status(uint8_t status) "driver status = %d"
@@ -116,6 +116,7 @@ virtio_iommu_get_config(uint64_t page_size_mask, uint64_t start, uint64_t end, u
virtio_iommu_set_config(uint8_t bypass) "bypass=0x%x"
virtio_iommu_attach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d"
virtio_iommu_detach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d"
+virtio_iommu_detach_endpoint_from_domain(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d"
virtio_iommu_map(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start, uint32_t flags) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64 " phys_start=0x%"PRIx64" flags=%d"
virtio_iommu_unmap(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64
virtio_iommu_unmap_done(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64
diff --git a/hw/virtio/vdpa-dev-pci.c b/hw/virtio/vdpa-dev-pci.c
index 5446e6b..3068112 100644
--- a/hw/virtio/vdpa-dev-pci.c
+++ b/hw/virtio/vdpa-dev-pci.c
@@ -48,10 +48,6 @@ static void vhost_vdpa_device_pci_instance_init(Object *obj)
"bootindex");
}
-static Property vhost_vdpa_device_pci_properties[] = {
- DEFINE_PROP_END_OF_LIST(),
-};
-
static int vhost_vdpa_device_pci_post_init(VhostVdpaDevice *v, Error **errp)
{
VhostVdpaDevicePCI *dev = container_of(v, VhostVdpaDevicePCI, vdev);
@@ -74,13 +70,13 @@ vhost_vdpa_device_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(DEVICE(&dev->vdev), BUS(&vpci_dev->bus), errp);
}
-static void vhost_vdpa_device_pci_class_init(ObjectClass *klass, void *data)
+static void vhost_vdpa_device_pci_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
- device_class_set_props(dc, vhost_vdpa_device_pci_properties);
k->realize = vhost_vdpa_device_pci_realize;
}
diff --git a/hw/virtio/vdpa-dev.c b/hw/virtio/vdpa-dev.c
index 64b96b226c..d1da40a 100644
--- a/hw/virtio/vdpa-dev.c
+++ b/hw/virtio/vdpa-dev.c
@@ -26,8 +26,8 @@
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/vdpa-dev.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/runstate.h"
+#include "system/system.h"
+#include "system/runstate.h"
static void
vhost_vdpa_device_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq)
@@ -312,7 +312,7 @@ static void vhost_vdpa_device_stop(VirtIODevice *vdev)
vhost_dev_disable_notifiers(&s->dev, vdev);
}
-static void vhost_vdpa_device_set_status(VirtIODevice *vdev, uint8_t status)
+static int vhost_vdpa_device_set_status(VirtIODevice *vdev, uint8_t status)
{
VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
bool should_start = virtio_device_started(vdev, status);
@@ -324,7 +324,7 @@ static void vhost_vdpa_device_set_status(VirtIODevice *vdev, uint8_t status)
}
if (s->started == should_start) {
- return;
+ return 0;
}
if (should_start) {
@@ -335,12 +335,12 @@ static void vhost_vdpa_device_set_status(VirtIODevice *vdev, uint8_t status)
} else {
vhost_vdpa_device_stop(vdev);
}
+ return 0;
}
-static Property vhost_vdpa_device_properties[] = {
+static const Property vhost_vdpa_device_properties[] = {
DEFINE_PROP_STRING("vhostdev", VhostVdpaDevice, vhostdev),
DEFINE_PROP_UINT16("queue-size", VhostVdpaDevice, queue_size, 0),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_vhost_vdpa_device = {
@@ -354,7 +354,7 @@ static const VMStateDescription vmstate_vhost_vdpa_device = {
},
};
-static void vhost_vdpa_device_class_init(ObjectClass *klass, void *data)
+static void vhost_vdpa_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/virtio/vhost-iova-tree.c b/hw/virtio/vhost-iova-tree.c
index 3d03395..fa4147b 100644
--- a/hw/virtio/vhost-iova-tree.c
+++ b/hw/virtio/vhost-iova-tree.c
@@ -28,12 +28,18 @@ struct VhostIOVATree {
/* IOVA address to qemu memory maps. */
IOVATree *iova_taddr_map;
+
+ /* Allocated IOVA addresses */
+ IOVATree *iova_map;
+
+ /* GPA->IOVA address memory maps */
+ IOVATree *gpa_iova_map;
};
/**
- * Create a new IOVA tree
+ * Create a new VhostIOVATree
*
- * Returns the new IOVA tree
+ * Returns the new VhostIOVATree.
*/
VhostIOVATree *vhost_iova_tree_new(hwaddr iova_first, hwaddr iova_last)
{
@@ -44,25 +50,29 @@ VhostIOVATree *vhost_iova_tree_new(hwaddr iova_first, hwaddr iova_last)
tree->iova_last = iova_last;
tree->iova_taddr_map = iova_tree_new();
+ tree->iova_map = iova_tree_new();
+ tree->gpa_iova_map = gpa_tree_new();
return tree;
}
/**
- * Delete an iova tree
+ * Delete a VhostIOVATree
*/
void vhost_iova_tree_delete(VhostIOVATree *iova_tree)
{
iova_tree_destroy(iova_tree->iova_taddr_map);
+ iova_tree_destroy(iova_tree->iova_map);
+ iova_tree_destroy(iova_tree->gpa_iova_map);
g_free(iova_tree);
}
/**
* Find the IOVA address stored from a memory address
*
- * @tree: The iova tree
+ * @tree: The VhostIOVATree
* @map: The map with the memory address
*
- * Return the stored mapping, or NULL if not found.
+ * Returns the stored IOVA->HVA mapping, or NULL if not found.
*/
const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *tree,
const DMAMap *map)
@@ -71,40 +81,111 @@ const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *tree,
}
/**
- * Allocate a new mapping
+ * Allocate a new IOVA range and add the mapping to the IOVA->HVA tree
*
- * @tree: The iova tree
- * @map: The iova map
+ * @tree: The VhostIOVATree
+ * @map: The IOVA mapping
+ * @taddr: The translated address (HVA)
*
* Returns:
* - IOVA_OK if the map fits in the container
* - IOVA_ERR_INVALID if the map does not make sense (like size overflow)
* - IOVA_ERR_NOMEM if tree cannot allocate more space.
*
- * It returns assignated iova in map->iova if return value is VHOST_DMA_MAP_OK.
+ * It returns an assigned IOVA in map->iova if the return value is IOVA_OK.
*/
-int vhost_iova_tree_map_alloc(VhostIOVATree *tree, DMAMap *map)
+int vhost_iova_tree_map_alloc(VhostIOVATree *tree, DMAMap *map, hwaddr taddr)
{
+ int ret;
+
/* Some vhost devices do not like addr 0. Skip first page */
hwaddr iova_first = tree->iova_first ?: qemu_real_host_page_size();
- if (map->translated_addr + map->size < map->translated_addr ||
- map->perm == IOMMU_NONE) {
+ if (taddr + map->size < taddr || map->perm == IOMMU_NONE) {
return IOVA_ERR_INVALID;
}
- /* Allocate a node in IOVA address */
- return iova_tree_alloc_map(tree->iova_taddr_map, map, iova_first,
- tree->iova_last);
+ /* Allocate a node in the IOVA-only tree */
+ ret = iova_tree_alloc_map(tree->iova_map, map, iova_first, tree->iova_last);
+ if (unlikely(ret != IOVA_OK)) {
+ return ret;
+ }
+
+ /* Insert a node in the IOVA->HVA tree */
+ map->translated_addr = taddr;
+ return iova_tree_insert(tree->iova_taddr_map, map);
}
/**
- * Remove existing mappings from iova tree
+ * Remove existing mappings from the IOVA-only and IOVA->HVA trees
*
- * @iova_tree: The vhost iova tree
+ * @iova_tree: The VhostIOVATree
* @map: The map to remove
*/
void vhost_iova_tree_remove(VhostIOVATree *iova_tree, DMAMap map)
{
iova_tree_remove(iova_tree->iova_taddr_map, map);
+ iova_tree_remove(iova_tree->iova_map, map);
+}
+
+/**
+ * Find the IOVA address stored from a guest memory address (GPA)
+ *
+ * @tree: The VhostIOVATree
+ * @map: The map with the guest memory address
+ *
+ * Returns the stored GPA->IOVA mapping, or NULL if not found.
+ */
+const DMAMap *vhost_iova_tree_find_gpa(const VhostIOVATree *tree,
+ const DMAMap *map)
+{
+ return iova_tree_find_iova(tree->gpa_iova_map, map);
+}
+
+/**
+ * Allocate a new IOVA range and add the mapping to the GPA->IOVA tree
+ *
+ * @tree: The VhostIOVATree
+ * @map: The IOVA mapping
+ * @taddr: The translated address (GPA)
+ *
+ * Returns:
+ * - IOVA_OK if the map fits both containers
+ * - IOVA_ERR_INVALID if the map does not make sense (like size overflow)
+ * - IOVA_ERR_NOMEM if the IOVA-only tree cannot allocate more space
+ *
+ * It returns an assigned IOVA in map->iova if the return value is IOVA_OK.
+ */
+int vhost_iova_tree_map_alloc_gpa(VhostIOVATree *tree, DMAMap *map, hwaddr taddr)
+{
+ int ret;
+
+ /* Some vhost devices don't like addr 0. Skip first page */
+ hwaddr iova_first = tree->iova_first ?: qemu_real_host_page_size();
+
+ if (taddr + map->size < taddr || map->perm == IOMMU_NONE) {
+ return IOVA_ERR_INVALID;
+ }
+
+ /* Allocate a node in the IOVA-only tree */
+ ret = iova_tree_alloc_map(tree->iova_map, map, iova_first, tree->iova_last);
+ if (unlikely(ret != IOVA_OK)) {
+ return ret;
+ }
+
+ /* Insert a node in the GPA->IOVA tree */
+ map->translated_addr = taddr;
+ return gpa_tree_insert(tree->gpa_iova_map, map);
+}
+
+/**
+ * Remove existing mappings from the IOVA-only and GPA->IOVA trees
+ *
+ * @tree: The VhostIOVATree
+ * @map: The map to remove
+ */
+void vhost_iova_tree_remove_gpa(VhostIOVATree *iova_tree, DMAMap map)
+{
+ iova_tree_remove(iova_tree->gpa_iova_map, map);
+ iova_tree_remove(iova_tree->iova_map, map);
}
diff --git a/hw/virtio/vhost-iova-tree.h b/hw/virtio/vhost-iova-tree.h
index 4adfd79..08f63b6 100644
--- a/hw/virtio/vhost-iova-tree.h
+++ b/hw/virtio/vhost-iova-tree.h
@@ -11,7 +11,7 @@
#define HW_VIRTIO_VHOST_IOVA_TREE_H
#include "qemu/iova-tree.h"
-#include "exec/memory.h"
+#include "system/memory.h"
typedef struct VhostIOVATree VhostIOVATree;
@@ -21,7 +21,13 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostIOVATree, vhost_iova_tree_delete);
const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *iova_tree,
const DMAMap *map);
-int vhost_iova_tree_map_alloc(VhostIOVATree *iova_tree, DMAMap *map);
+int vhost_iova_tree_map_alloc(VhostIOVATree *iova_tree, DMAMap *map,
+ hwaddr taddr);
void vhost_iova_tree_remove(VhostIOVATree *iova_tree, DMAMap map);
+const DMAMap *vhost_iova_tree_find_gpa(const VhostIOVATree *iova_tree,
+ const DMAMap *map);
+int vhost_iova_tree_map_alloc_gpa(VhostIOVATree *iova_tree, DMAMap *map,
+ hwaddr taddr);
+void vhost_iova_tree_remove_gpa(VhostIOVATree *iova_tree, DMAMap map);
#endif
diff --git a/hw/virtio/vhost-scsi-pci.c b/hw/virtio/vhost-scsi-pci.c
index 08980bc..7399ace 100644
--- a/hw/virtio/vhost-scsi-pci.c
+++ b/hw/virtio/vhost-scsi-pci.c
@@ -38,10 +38,9 @@ struct VHostSCSIPCI {
VHostSCSI vdev;
};
-static Property vhost_scsi_pci_properties[] = {
+static const Property vhost_scsi_pci_properties[] = {
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
DEV_NVECTORS_UNSPECIFIED),
- DEFINE_PROP_END_OF_LIST(),
};
static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -62,7 +61,7 @@ static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data)
+static void vhost_scsi_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index fc5f408..2481d49 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -78,24 +78,39 @@ uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
* @vaddr: Translated IOVA addresses
* @iovec: Source qemu's VA addresses
* @num: Length of iovec and minimum length of vaddr
+ * @gpas: Descriptors' GPAs, if backed by guest memory
*/
static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
hwaddr *addrs, const struct iovec *iovec,
- size_t num)
+ size_t num, const hwaddr *gpas)
{
if (num == 0) {
return true;
}
for (size_t i = 0; i < num; ++i) {
- DMAMap needle = {
- .translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base,
- .size = iovec[i].iov_len,
- };
Int128 needle_last, map_last;
size_t off;
+ const DMAMap *map;
+ DMAMap needle;
+
+ /* Check if the descriptor is backed by guest memory */
+ if (gpas) {
+ /* Search the GPA->IOVA tree */
+ needle = (DMAMap) {
+ .translated_addr = gpas[i],
+ .size = iovec[i].iov_len,
+ };
+ map = vhost_iova_tree_find_gpa(svq->iova_tree, &needle);
+ } else {
+ /* Search the IOVA->HVA tree */
+ needle = (DMAMap) {
+ .translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base,
+ .size = iovec[i].iov_len,
+ };
+ map = vhost_iova_tree_find_iova(svq->iova_tree, &needle);
+ }
- const DMAMap *map = vhost_iova_tree_find_iova(svq->iova_tree, &needle);
/*
* Map cannot be NULL since iova map contains all guest space and
* qemu already has a physical address mapped
@@ -130,6 +145,7 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
* @sg: Cache for hwaddr
* @iovec: The iovec from the guest
* @num: iovec length
+ * @addr: Descriptors' GPAs, if backed by guest memory
* @more_descs: True if more descriptors come in the chain
* @write: True if they are writeable descriptors
*
@@ -137,7 +153,8 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
*/
static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
const struct iovec *iovec, size_t num,
- bool more_descs, bool write)
+ const hwaddr *addr, bool more_descs,
+ bool write)
{
uint16_t i = svq->free_head, last = svq->free_head;
unsigned n;
@@ -149,7 +166,7 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
return true;
}
- ok = vhost_svq_translate_addr(svq, sg, iovec, num);
+ ok = vhost_svq_translate_addr(svq, sg, iovec, num, addr);
if (unlikely(!ok)) {
return false;
}
@@ -165,17 +182,18 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
descs[i].len = cpu_to_le32(iovec[n].iov_len);
last = i;
- i = cpu_to_le16(svq->desc_next[i]);
+ i = svq->desc_next[i];
}
- svq->free_head = le16_to_cpu(svq->desc_next[last]);
+ svq->free_head = svq->desc_next[last];
return true;
}
static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
const struct iovec *out_sg, size_t out_num,
+ const hwaddr *out_addr,
const struct iovec *in_sg, size_t in_num,
- unsigned *head)
+ const hwaddr *in_addr, unsigned *head)
{
unsigned avail_idx;
vring_avail_t *avail = svq->vring.avail;
@@ -191,13 +209,14 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
return false;
}
- ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, in_num > 0,
- false);
+ ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, out_addr,
+ in_num > 0, false);
if (unlikely(!ok)) {
return false;
}
- ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, false, true);
+ ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, in_addr, false,
+ true);
if (unlikely(!ok)) {
return false;
}
@@ -228,10 +247,12 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
smp_mb();
if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
- uint16_t avail_event = *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]);
+ uint16_t avail_event = le16_to_cpu(
+ *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]));
needs_kick = vring_need_event(avail_event, svq->shadow_avail_idx, svq->shadow_avail_idx - 1);
} else {
- needs_kick = !(svq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
+ needs_kick =
+ !(svq->vring.used->flags & cpu_to_le16(VRING_USED_F_NO_NOTIFY));
}
if (!needs_kick) {
@@ -247,8 +268,9 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
* Return -EINVAL if element is invalid, -ENOSPC if dev queue is full
*/
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
- size_t out_num, const struct iovec *in_sg, size_t in_num,
- VirtQueueElement *elem)
+ size_t out_num, const hwaddr *out_addr,
+ const struct iovec *in_sg, size_t in_num,
+ const hwaddr *in_addr, VirtQueueElement *elem)
{
unsigned qemu_head;
unsigned ndescs = in_num + out_num;
@@ -258,7 +280,8 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
return -ENOSPC;
}
- ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head);
+ ok = vhost_svq_add_split(svq, out_sg, out_num, out_addr, in_sg, in_num,
+ in_addr, &qemu_head);
if (unlikely(!ok)) {
return -EINVAL;
}
@@ -274,8 +297,8 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
static int vhost_svq_add_element(VhostShadowVirtqueue *svq,
VirtQueueElement *elem)
{
- return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->in_sg,
- elem->in_num, elem);
+ return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->out_addr,
+ elem->in_sg, elem->in_num, elem->in_addr, elem);
}
/**
@@ -365,7 +388,7 @@ static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
return true;
}
- svq->shadow_used_idx = cpu_to_le16(*(volatile uint16_t *)used_idx);
+ svq->shadow_used_idx = le16_to_cpu(*(volatile uint16_t *)used_idx);
return svq->last_used_idx != svq->shadow_used_idx;
}
@@ -383,7 +406,7 @@ static bool vhost_svq_enable_notification(VhostShadowVirtqueue *svq)
{
if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
uint16_t *used_event = (uint16_t *)&svq->vring.avail->ring[svq->vring.num];
- *used_event = svq->shadow_used_idx;
+ *used_event = cpu_to_le16(svq->shadow_used_idx);
} else {
svq->vring.avail->flags &= ~cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
}
@@ -408,12 +431,13 @@ static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq,
uint16_t num, uint16_t i)
{
for (uint16_t j = 0; j < (num - 1); ++j) {
- i = le16_to_cpu(svq->desc_next[i]);
+ i = svq->desc_next[i];
}
return i;
}
+G_GNUC_WARN_UNUSED_RESULT
static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
uint32_t *len)
{
@@ -526,10 +550,11 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num)
{
size_t len = 0;
- uint32_t r;
while (num--) {
+ g_autofree VirtQueueElement *elem = NULL;
int64_t start_us = g_get_monotonic_time();
+ uint32_t r = 0;
do {
if (vhost_svq_more_used(svq)) {
@@ -541,7 +566,7 @@ size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num)
}
} while (true);
- vhost_svq_get_buf(svq, &r);
+ elem = vhost_svq_get_buf(svq, &r);
len += r;
}
@@ -681,7 +706,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
svq->desc_state = g_new0(SVQDescState, svq->vring.num);
svq->desc_next = g_new0(uint16_t, svq->vring.num);
for (unsigned i = 0; i < svq->vring.num - 1; i++) {
- svq->desc_next[i] = cpu_to_le16(i + 1);
+ svq->desc_next[i] = i + 1;
}
}
diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index 19c842a..9c27373 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -118,8 +118,9 @@ uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq);
void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
const VirtQueueElement *elem, uint32_t len);
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
- size_t out_num, const struct iovec *in_sg, size_t in_num,
- VirtQueueElement *elem);
+ size_t out_num, const hwaddr *out_addr,
+ const struct iovec *in_sg, size_t in_num,
+ const hwaddr *in_addr, VirtQueueElement *elem);
size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num);
void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
index 2bc3423..ff67a02 100644
--- a/hw/virtio/vhost-user-base.c
+++ b/hw/virtio/vhost-user-base.c
@@ -66,7 +66,7 @@ err_host_notifiers:
vhost_dev_disable_notifiers(&vub->vhost_dev, vdev);
}
-static void vub_stop(VirtIODevice *vdev)
+static int vub_stop(VirtIODevice *vdev)
{
VHostUserBase *vub = VHOST_USER_BASE(vdev);
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
@@ -74,34 +74,39 @@ static void vub_stop(VirtIODevice *vdev)
int ret;
if (!k->set_guest_notifiers) {
- return;
+ return 0;
}
- vhost_dev_stop(&vub->vhost_dev, vdev, true);
+ ret = vhost_dev_stop(&vub->vhost_dev, vdev, true);
- ret = k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, false);
- if (ret < 0) {
+ if (k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, false) < 0) {
error_report("vhost guest notifier cleanup failed: %d", ret);
- return;
+ return -1;
}
vhost_dev_disable_notifiers(&vub->vhost_dev, vdev);
+ return ret;
}
-static void vub_set_status(VirtIODevice *vdev, uint8_t status)
+static int vub_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserBase *vub = VHOST_USER_BASE(vdev);
bool should_start = virtio_device_should_start(vdev, status);
if (vhost_dev_is_started(&vub->vhost_dev) == should_start) {
- return;
+ return 0;
}
if (should_start) {
vub_start(vdev);
} else {
- vub_stop(vdev);
+ int ret;
+ ret = vub_stop(vdev);
+ if (ret < 0) {
+ return ret;
+ }
}
+ return 0;
}
/*
@@ -348,7 +353,7 @@ static void vub_device_unrealize(DeviceState *dev)
do_vhost_user_cleanup(vdev, vub);
}
-static void vub_class_init(ObjectClass *klass, void *data)
+static void vub_class_init(ObjectClass *klass, const void *data)
{
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/virtio/vhost-user-blk-pci.c b/hw/virtio/vhost-user-blk-pci.c
index eef8641..904369f 100644
--- a/hw/virtio/vhost-user-blk-pci.c
+++ b/hw/virtio/vhost-user-blk-pci.c
@@ -43,11 +43,10 @@ struct VHostUserBlkPCI {
VHostUserBlk vdev;
};
-static Property vhost_user_blk_pci_properties[] = {
+static const Property vhost_user_blk_pci_properties[] = {
DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
DEV_NVECTORS_UNSPECIFIED),
- DEFINE_PROP_END_OF_LIST(),
};
static void vhost_user_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -66,7 +65,7 @@ static void vhost_user_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void vhost_user_blk_pci_class_init(ObjectClass *klass, void *data)
+static void vhost_user_blk_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/vhost-user-device-pci.c b/hw/virtio/vhost-user-device-pci.c
index efaf55d..f10bac8 100644
--- a/hw/virtio/vhost-user-device-pci.c
+++ b/hw/virtio/vhost-user-device-pci.c
@@ -31,7 +31,8 @@ static void vhost_user_device_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void vhost_user_device_pci_class_init(ObjectClass *klass, void *data)
+static void vhost_user_device_pci_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/vhost-user-device.c b/hw/virtio/vhost-user-device.c
index 67aa934..3939bdf 100644
--- a/hw/virtio/vhost-user-device.c
+++ b/hw/virtio/vhost-user-device.c
@@ -29,16 +29,15 @@ static const VMStateDescription vud_vmstate = {
.unmigratable = 1,
};
-static Property vud_properties[] = {
+static const Property vud_properties[] = {
DEFINE_PROP_CHR("chardev", VHostUserBase, chardev),
DEFINE_PROP_UINT16("virtio-id", VHostUserBase, virtio_id, 0),
DEFINE_PROP_UINT32("vq_size", VHostUserBase, vq_size, 64),
DEFINE_PROP_UINT32("num_vqs", VHostUserBase, num_vqs, 1),
DEFINE_PROP_UINT32("config_size", VHostUserBase, config_size, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vud_class_init(ObjectClass *klass, void *data)
+static void vud_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/virtio/vhost-user-fs-pci.c b/hw/virtio/vhost-user-fs-pci.c
index 6829b8b..1490c11 100644
--- a/hw/virtio/vhost-user-fs-pci.c
+++ b/hw/virtio/vhost-user-fs-pci.c
@@ -29,10 +29,9 @@ typedef struct VHostUserFSPCI VHostUserFSPCI;
DECLARE_INSTANCE_CHECKER(VHostUserFSPCI, VHOST_USER_FS_PCI,
TYPE_VHOST_USER_FS_PCI)
-static Property vhost_user_fs_pci_properties[] = {
+static const Property vhost_user_fs_pci_properties[] = {
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
DEV_NVECTORS_UNSPECIFIED),
- DEFINE_PROP_END_OF_LIST(),
};
static void vhost_user_fs_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -48,7 +47,7 @@ static void vhost_user_fs_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void vhost_user_fs_pci_class_init(ObjectClass *klass, void *data)
+static void vhost_user_fs_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
index ae48cc1..e77c69e 100644
--- a/hw/virtio/vhost-user-fs.c
+++ b/hw/virtio/vhost-user-fs.c
@@ -23,7 +23,7 @@
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-user-fs.h"
#include "monitor/monitor.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
static const int user_feature_bits[] = {
VIRTIO_F_VERSION_1,
@@ -33,6 +33,7 @@ static const int user_feature_bits[] = {
VIRTIO_F_RING_PACKED,
VIRTIO_F_IOMMU_PLATFORM,
VIRTIO_F_RING_RESET,
+ VIRTIO_F_IN_ORDER,
VIRTIO_F_NOTIFICATION_DATA,
VHOST_INVALID_FEATURE_BIT
};
@@ -99,7 +100,7 @@ err_host_notifiers:
vhost_dev_disable_notifiers(&fs->vhost_dev, vdev);
}
-static void vuf_stop(VirtIODevice *vdev)
+static int vuf_stop(VirtIODevice *vdev)
{
VHostUserFS *fs = VHOST_USER_FS(vdev);
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
@@ -107,34 +108,39 @@ static void vuf_stop(VirtIODevice *vdev)
int ret;
if (!k->set_guest_notifiers) {
- return;
+ return 0;
}
- vhost_dev_stop(&fs->vhost_dev, vdev, true);
+ ret = vhost_dev_stop(&fs->vhost_dev, vdev, true);
- ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false);
- if (ret < 0) {
+ if (k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false) < 0) {
error_report("vhost guest notifier cleanup failed: %d", ret);
- return;
+ return -1;
}
vhost_dev_disable_notifiers(&fs->vhost_dev, vdev);
+ return ret;
}
-static void vuf_set_status(VirtIODevice *vdev, uint8_t status)
+static int vuf_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserFS *fs = VHOST_USER_FS(vdev);
bool should_start = virtio_device_should_start(vdev, status);
if (vhost_dev_is_started(&fs->vhost_dev) == should_start) {
- return;
+ return 0;
}
if (should_start) {
vuf_start(vdev);
} else {
- vuf_stop(vdev);
+ int ret;
+ ret = vuf_stop(vdev);
+ if (ret < 0) {
+ return ret;
+ }
}
+ return 0;
}
static uint64_t vuf_get_features(VirtIODevice *vdev,
@@ -266,7 +272,6 @@ err_virtio:
g_free(fs->req_vqs);
virtio_cleanup(vdev);
g_free(fs->vhost_dev.vqs);
- return;
}
static void vuf_device_unrealize(DeviceState *dev)
@@ -402,13 +407,12 @@ static const VMStateDescription vuf_backend_vmstate = {
},
};
-static Property vuf_properties[] = {
+static const Property vuf_properties[] = {
DEFINE_PROP_CHR("chardev", VHostUserFS, conf.chardev),
DEFINE_PROP_STRING("tag", VHostUserFS, conf.tag),
DEFINE_PROP_UINT16("num-request-queues", VHostUserFS,
conf.num_request_queues, 1),
DEFINE_PROP_UINT16("queue-size", VHostUserFS, conf.queue_size, 128),
- DEFINE_PROP_END_OF_LIST(),
};
static void vuf_instance_init(Object *obj)
@@ -419,7 +423,7 @@ static void vuf_instance_init(Object *obj)
"/filesystem@0", DEVICE(obj));
}
-static void vuf_class_init(ObjectClass *klass, void *data)
+static void vuf_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/virtio/vhost-user-gpio-pci.c b/hw/virtio/vhost-user-gpio-pci.c
index b3028a2..9b165b5 100644
--- a/hw/virtio/vhost-user-gpio-pci.c
+++ b/hw/virtio/vhost-user-gpio-pci.c
@@ -32,7 +32,7 @@ static void vhost_user_gpio_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void vhost_user_gpio_pci_class_init(ObjectClass *klass, void *data)
+static void vhost_user_gpio_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/vhost-user-gpio.c b/hw/virtio/vhost-user-gpio.c
index 9f37c25..a7fd49b 100644
--- a/hw/virtio/vhost-user-gpio.c
+++ b/hw/virtio/vhost-user-gpio.c
@@ -14,9 +14,8 @@
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_gpio.h"
-static Property vgpio_properties[] = {
+static const Property vgpio_properties[] = {
DEFINE_PROP_CHR("chardev", VHostUserBase, chardev),
- DEFINE_PROP_END_OF_LIST(),
};
static void vgpio_realize(DeviceState *dev, Error **errp)
@@ -37,7 +36,7 @@ static const VMStateDescription vu_gpio_vmstate = {
.unmigratable = 1,
};
-static void vu_gpio_class_init(ObjectClass *klass, void *data)
+static void vu_gpio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass);
diff --git a/hw/virtio/vhost-user-i2c-pci.c b/hw/virtio/vhost-user-i2c-pci.c
index 00ac109..692cd66 100644
--- a/hw/virtio/vhost-user-i2c-pci.c
+++ b/hw/virtio/vhost-user-i2c-pci.c
@@ -32,7 +32,7 @@ static void vhost_user_i2c_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void vhost_user_i2c_pci_class_init(ObjectClass *klass, void *data)
+static void vhost_user_i2c_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c
index a464f5e..ae007fe 100644
--- a/hw/virtio/vhost-user-i2c.c
+++ b/hw/virtio/vhost-user-i2c.c
@@ -14,9 +14,8 @@
#include "qemu/error-report.h"
#include "standard-headers/linux/virtio_ids.h"
-static Property vi2c_properties[] = {
+static const Property vi2c_properties[] = {
DEFINE_PROP_CHR("chardev", VHostUserBase, chardev),
- DEFINE_PROP_END_OF_LIST(),
};
static void vi2c_realize(DeviceState *dev, Error **errp)
@@ -37,7 +36,7 @@ static const VMStateDescription vu_i2c_vmstate = {
.unmigratable = 1,
};
-static void vu_i2c_class_init(ObjectClass *klass, void *data)
+static void vu_i2c_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass);
diff --git a/hw/virtio/vhost-user-input.c b/hw/virtio/vhost-user-input.c
index bedec04..5cfc5bb 100644
--- a/hw/virtio/vhost-user-input.c
+++ b/hw/virtio/vhost-user-input.c
@@ -7,9 +7,8 @@
#include "qemu/osdep.h"
#include "hw/virtio/virtio-input.h"
-static Property vinput_properties[] = {
+static const Property vinput_properties[] = {
DEFINE_PROP_CHR("chardev", VHostUserBase, chardev),
- DEFINE_PROP_END_OF_LIST(),
};
static void vinput_realize(DeviceState *dev, Error **errp)
@@ -31,7 +30,7 @@ static const VMStateDescription vmstate_vhost_input = {
.unmigratable = 1,
};
-static void vhost_input_class_init(ObjectClass *klass, void *data)
+static void vhost_input_class_init(ObjectClass *klass, const void *data)
{
VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/virtio/vhost-user-rng-pci.c b/hw/virtio/vhost-user-rng-pci.c
index f649354..9f45fc6 100644
--- a/hw/virtio/vhost-user-rng-pci.c
+++ b/hw/virtio/vhost-user-rng-pci.c
@@ -23,10 +23,9 @@ typedef struct VHostUserRNGPCI VHostUserRNGPCI;
DECLARE_INSTANCE_CHECKER(VHostUserRNGPCI, VHOST_USER_RNG_PCI,
TYPE_VHOST_USER_RNG_PCI)
-static Property vhost_user_rng_pci_properties[] = {
+static const Property vhost_user_rng_pci_properties[] = {
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
DEV_NVECTORS_UNSPECIFIED),
- DEFINE_PROP_END_OF_LIST(),
};
static void vhost_user_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -41,7 +40,7 @@ static void vhost_user_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void vhost_user_rng_pci_class_init(ObjectClass *klass, void *data)
+static void vhost_user_rng_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c
index 01879c8..61dadcd 100644
--- a/hw/virtio/vhost-user-rng.c
+++ b/hw/virtio/vhost-user-rng.c
@@ -20,9 +20,8 @@ static const VMStateDescription vu_rng_vmstate = {
.unmigratable = 1,
};
-static Property vrng_properties[] = {
+static const Property vrng_properties[] = {
DEFINE_PROP_CHR("chardev", VHostUserBase, chardev),
- DEFINE_PROP_END_OF_LIST(),
};
static void vu_rng_base_realize(DeviceState *dev, Error **errp)
@@ -38,7 +37,7 @@ static void vu_rng_base_realize(DeviceState *dev, Error **errp)
vubs->parent_realize(dev, errp);
}
-static void vu_rng_class_init(ObjectClass *klass, void *data)
+static void vu_rng_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass);
diff --git a/hw/virtio/vhost-user-scmi-pci.c b/hw/virtio/vhost-user-scmi-pci.c
index 7f53af7..0ab56a5 100644
--- a/hw/virtio/vhost-user-scmi-pci.c
+++ b/hw/virtio/vhost-user-scmi-pci.c
@@ -31,7 +31,7 @@ static void vhost_user_scmi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void vhost_user_scmi_pci_class_init(ObjectClass *klass, void *data)
+static void vhost_user_scmi_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/vhost-user-scmi.c b/hw/virtio/vhost-user-scmi.c
index 300847e..f9264c4 100644
--- a/hw/virtio/vhost-user-scmi.c
+++ b/hw/virtio/vhost-user-scmi.c
@@ -83,7 +83,7 @@ err_host_notifiers:
return ret;
}
-static void vu_scmi_stop(VirtIODevice *vdev)
+static int vu_scmi_stop(VirtIODevice *vdev)
{
VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
@@ -93,41 +93,46 @@ static void vu_scmi_stop(VirtIODevice *vdev)
/* vhost_dev_is_started() check in the callers is not fully reliable. */
if (!scmi->started_vu) {
- return;
+ return 0;
}
scmi->started_vu = false;
if (!k->set_guest_notifiers) {
- return;
+ return 0;
}
- vhost_dev_stop(vhost_dev, vdev, true);
+ ret = vhost_dev_stop(vhost_dev, vdev, true);
- ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false);
- if (ret < 0) {
+ if (k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false) < 0) {
error_report("vhost guest notifier cleanup failed: %d", ret);
- return;
+ return -1;
}
vhost_dev_disable_notifiers(vhost_dev, vdev);
+ return ret;
}
-static void vu_scmi_set_status(VirtIODevice *vdev, uint8_t status)
+static int vu_scmi_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
bool should_start = virtio_device_should_start(vdev, status);
if (!scmi->connected) {
- return;
+ return -1;
}
if (vhost_dev_is_started(&scmi->vhost_dev) == should_start) {
- return;
+ return 0;
}
if (should_start) {
vu_scmi_start(vdev);
} else {
- vu_scmi_stop(vdev);
+ int ret;
+ ret = vu_scmi_stop(vdev);
+ if (ret < 0) {
+ return ret;
+ }
}
+ return 0;
}
static uint64_t vu_scmi_get_features(VirtIODevice *vdev, uint64_t features,
@@ -258,8 +263,6 @@ static void vu_scmi_device_realize(DeviceState *dev, Error **errp)
qemu_chr_fe_set_handlers(&scmi->chardev, NULL, NULL, vu_scmi_event, NULL,
dev, NULL, true);
-
- return;
}
static void vu_scmi_device_unrealize(DeviceState *dev)
@@ -277,12 +280,11 @@ static const VMStateDescription vu_scmi_vmstate = {
.unmigratable = 1,
};
-static Property vu_scmi_properties[] = {
+static const Property vu_scmi_properties[] = {
DEFINE_PROP_CHR("chardev", VHostUserSCMI, chardev),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vu_scmi_class_init(ObjectClass *klass, void *data)
+static void vu_scmi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/virtio/vhost-user-scsi-pci.c b/hw/virtio/vhost-user-scsi-pci.c
index 75882e3..994e51a 100644
--- a/hw/virtio/vhost-user-scsi-pci.c
+++ b/hw/virtio/vhost-user-scsi-pci.c
@@ -29,7 +29,7 @@
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/loader.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "hw/virtio/virtio-pci.h"
#include "qom/object.h"
@@ -44,10 +44,9 @@ struct VHostUserSCSIPCI {
VHostUserSCSI vdev;
};
-static Property vhost_user_scsi_pci_properties[] = {
+static const Property vhost_user_scsi_pci_properties[] = {
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
DEV_NVECTORS_UNSPECIFIED),
- DEFINE_PROP_END_OF_LIST(),
};
static void vhost_user_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -68,7 +67,7 @@ static void vhost_user_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void vhost_user_scsi_pci_class_init(ObjectClass *klass, void *data)
+static void vhost_user_scsi_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/vhost-user-snd-pci.c b/hw/virtio/vhost-user-snd-pci.c
index d61cfda..f5015fb 100644
--- a/hw/virtio/vhost-user-snd-pci.c
+++ b/hw/virtio/vhost-user-snd-pci.c
@@ -23,10 +23,6 @@ typedef struct VHostUserSoundPCI VHostUserSoundPCI;
DECLARE_INSTANCE_CHECKER(VHostUserSoundPCI, VHOST_USER_SND_PCI,
TYPE_VHOST_USER_SND_PCI)
-static Property vhost_user_snd_pci_properties[] = {
- DEFINE_PROP_END_OF_LIST(),
-};
-
static void vhost_user_snd_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
VHostUserSoundPCI *dev = VHOST_USER_SND_PCI(vpci_dev);
@@ -37,14 +33,13 @@ static void vhost_user_snd_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void vhost_user_snd_pci_class_init(ObjectClass *klass, void *data)
+static void vhost_user_snd_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
k->realize = vhost_user_snd_pci_realize;
set_bit(DEVICE_CATEGORY_SOUND, dc->categories);
- device_class_set_props(dc, vhost_user_snd_pci_properties);
pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */
pcidev_k->revision = 0x00;
diff --git a/hw/virtio/vhost-user-snd.c b/hw/virtio/vhost-user-snd.c
index 9a21754..732411c 100644
--- a/hw/virtio/vhost-user-snd.c
+++ b/hw/virtio/vhost-user-snd.c
@@ -16,30 +16,45 @@
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_snd.h"
+static const VirtIOFeature feature_sizes[] = {
+ {.flags = 1ULL << VIRTIO_SND_F_CTLS,
+ .end = endof(struct virtio_snd_config, controls)},
+ {}
+};
+
+static const VirtIOConfigSizeParams cfg_size_params = {
+ .min_size = endof(struct virtio_snd_config, chmaps),
+ .max_size = sizeof(struct virtio_snd_config),
+ .feature_sizes = feature_sizes
+};
+
static const VMStateDescription vu_snd_vmstate = {
.name = "vhost-user-snd",
.unmigratable = 1,
};
-static Property vsnd_properties[] = {
+static const Property vsnd_properties[] = {
DEFINE_PROP_CHR("chardev", VHostUserBase, chardev),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_BIT64("controls", VHostUserBase,
+ parent_obj.host_features, VIRTIO_SND_F_CTLS, false),
};
static void vu_snd_base_realize(DeviceState *dev, Error **errp)
{
VHostUserBase *vub = VHOST_USER_BASE(dev);
VHostUserBaseClass *vubs = VHOST_USER_BASE_GET_CLASS(dev);
+ VirtIODevice *vdev = &vub->parent_obj;
vub->virtio_id = VIRTIO_ID_SOUND;
vub->num_vqs = 4;
- vub->config_size = sizeof(struct virtio_snd_config);
+ vub->config_size = virtio_get_config_size(&cfg_size_params,
+ vdev->host_features);
vub->vq_size = 64;
vubs->parent_realize(dev, errp);
}
-static void vu_snd_class_init(ObjectClass *klass, void *data)
+static void vu_snd_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass);
diff --git a/hw/virtio/vhost-user-vsock-pci.c b/hw/virtio/vhost-user-vsock-pci.c
index e5a86e8..adb877b 100644
--- a/hw/virtio/vhost-user-vsock-pci.c
+++ b/hw/virtio/vhost-user-vsock-pci.c
@@ -31,9 +31,8 @@ struct VHostUserVSockPCI {
/* vhost-user-vsock-pci */
-static Property vhost_user_vsock_pci_properties[] = {
+static const Property vhost_user_vsock_pci_properties[] = {
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
- DEFINE_PROP_END_OF_LIST(),
};
static void vhost_user_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -47,7 +46,8 @@ static void vhost_user_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void vhost_user_vsock_pci_class_init(ObjectClass *klass, void *data)
+static void vhost_user_vsock_pci_class_init(ObjectClass *klass,
+ const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/vhost-user-vsock.c b/hw/virtio/vhost-user-vsock.c
index 802b44a..993c287 100644
--- a/hw/virtio/vhost-user-vsock.c
+++ b/hw/virtio/vhost-user-vsock.c
@@ -21,6 +21,7 @@ static const int user_feature_bits[] = {
VIRTIO_RING_F_INDIRECT_DESC,
VIRTIO_RING_F_EVENT_IDX,
VIRTIO_F_NOTIFY_ON_EMPTY,
+ VIRTIO_F_IN_ORDER,
VIRTIO_F_NOTIFICATION_DATA,
VHOST_INVALID_FEATURE_BIT
};
@@ -53,23 +54,28 @@ const VhostDevConfigOps vsock_ops = {
.vhost_dev_config_notifier = vuv_handle_config_change,
};
-static void vuv_set_status(VirtIODevice *vdev, uint8_t status)
+static int vuv_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
bool should_start = virtio_device_should_start(vdev, status);
+ int ret;
if (vhost_dev_is_started(&vvc->vhost_dev) == should_start) {
- return;
+ return 0;
}
if (should_start) {
- int ret = vhost_vsock_common_start(vdev);
+ ret = vhost_vsock_common_start(vdev);
if (ret < 0) {
- return;
+ return ret;
}
} else {
- vhost_vsock_common_stop(vdev);
+ ret = vhost_vsock_common_stop(vdev);
+ if (ret < 0) {
+ return ret;
+ }
}
+ return 0;
}
static uint64_t vuv_get_features(VirtIODevice *vdev,
@@ -127,7 +133,6 @@ err_vhost_dev:
err_virtio:
vhost_vsock_common_unrealize(vdev);
vhost_user_cleanup(&vsock->vhost_user);
- return;
}
static void vuv_device_unrealize(DeviceState *dev)
@@ -147,12 +152,11 @@ static void vuv_device_unrealize(DeviceState *dev)
}
-static Property vuv_properties[] = {
+static const Property vuv_properties[] = {
DEFINE_PROP_CHR("chardev", VHostUserVSock, conf.chardev),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vuv_class_init(ObjectClass *klass, void *data)
+static void vuv_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 00561da..1e1d6b0 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -19,16 +19,16 @@
#include "hw/virtio/virtio-net.h"
#include "chardev/char-fe.h"
#include "io/channel-socket.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/uuid.h"
#include "qemu/sockets.h"
-#include "sysemu/runstate.h"
-#include "sysemu/cryptodev.h"
+#include "system/runstate.h"
+#include "system/cryptodev.h"
#include "migration/postcopy-ram.h"
#include "trace.h"
-#include "exec/ramblock.h"
+#include "system/ramblock.h"
#include <sys/ioctl.h>
#include <sys/socket.h>
@@ -654,8 +654,6 @@ static void scrub_shadow_regions(struct vhost_dev *dev,
}
*nr_rem_reg = rm_idx;
*nr_add_reg = add_idx;
-
- return;
}
static int send_remove_regions(struct vhost_dev *dev,
@@ -1185,9 +1183,16 @@ static int vhost_user_set_vring_num(struct vhost_dev *dev,
static void vhost_user_host_notifier_free(VhostUserHostNotifier *n)
{
- assert(n && n->unmap_addr);
- munmap(n->unmap_addr, qemu_real_host_page_size());
- n->unmap_addr = NULL;
+ if (n->unmap_addr) {
+ munmap(n->unmap_addr, qemu_real_host_page_size());
+ n->unmap_addr = NULL;
+ }
+ if (n->destroy) {
+ memory_region_transaction_begin();
+ object_unparent(OBJECT(&n->mr));
+ memory_region_transaction_commit();
+ g_free(n);
+ }
}
/*
@@ -1195,17 +1200,28 @@ static void vhost_user_host_notifier_free(VhostUserHostNotifier *n)
* under rcu.
*/
static void vhost_user_host_notifier_remove(VhostUserHostNotifier *n,
- VirtIODevice *vdev)
+ VirtIODevice *vdev, bool destroy)
{
+ /*
+ * if destroy == false and n->addr == NULL, we have nothing to do.
+ * so, just return.
+ */
+ if (!n || (!destroy && !n->addr)) {
+ return;
+ }
+
if (n->addr) {
if (vdev) {
+ memory_region_transaction_begin();
virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false);
+ memory_region_transaction_commit();
}
assert(!n->unmap_addr);
n->unmap_addr = n->addr;
n->addr = NULL;
- call_rcu(n, vhost_user_host_notifier_free, rcu);
}
+ n->destroy = destroy;
+ call_rcu(n, vhost_user_host_notifier_free, rcu);
}
static int vhost_user_set_vring_base(struct vhost_dev *dev,
@@ -1279,9 +1295,7 @@ static int vhost_user_get_vring_base(struct vhost_dev *dev,
struct vhost_user *u = dev->opaque;
VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index);
- if (n) {
- vhost_user_host_notifier_remove(n, dev->vdev);
- }
+ vhost_user_host_notifier_remove(n, dev->vdev, false);
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
@@ -1562,7 +1576,7 @@ static int vhost_user_backend_handle_vring_host_notifier(struct vhost_dev *dev,
* new mapped address.
*/
n = fetch_or_create_notifier(user, queue_idx);
- vhost_user_host_notifier_remove(n, vdev);
+ vhost_user_host_notifier_remove(n, vdev, false);
if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
return 0;
@@ -1607,9 +1621,14 @@ vhost_user_backend_handle_shared_object_add(struct vhost_dev *dev,
QemuUUID uuid;
memcpy(uuid.data, object->uuid, sizeof(object->uuid));
- return virtio_add_vhost_device(&uuid, dev);
+ return !virtio_add_vhost_device(&uuid, dev);
}
+/*
+ * Handle VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE backend requests.
+ *
+ * Return: 0 on success, 1 on error.
+ */
static int
vhost_user_backend_handle_shared_object_remove(struct vhost_dev *dev,
VhostUserShared *object)
@@ -1623,16 +1642,16 @@ vhost_user_backend_handle_shared_object_remove(struct vhost_dev *dev,
struct vhost_dev *owner = virtio_lookup_vhost_device(&uuid);
if (dev != owner) {
/* Not allowed to remove non-owned entries */
- return 0;
+ return 1;
}
break;
}
default:
/* Not allowed to remove non-owned entries */
- return 0;
+ return 1;
}
- return virtio_remove_resource(&uuid);
+ return !virtio_remove_resource(&uuid);
}
static bool vhost_user_send_resp(QIOChannel *ioc, VhostUserHeader *hdr,
@@ -2736,15 +2755,7 @@ static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
static void vhost_user_state_destroy(gpointer data)
{
VhostUserHostNotifier *n = (VhostUserHostNotifier *) data;
- if (n) {
- vhost_user_host_notifier_remove(n, NULL);
- object_unparent(OBJECT(&n->mr));
- /*
- * We can't free until vhost_user_host_notifier_remove has
- * done it's thing so schedule the free with RCU.
- */
- g_free_rcu(n, rcu);
- }
+ vhost_user_host_notifier_remove(n, NULL, true);
}
bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
@@ -2765,9 +2776,7 @@ void vhost_user_cleanup(VhostUserState *user)
if (!user->chr) {
return;
}
- memory_region_transaction_begin();
user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true);
- memory_region_transaction_commit();
user->chr = NULL;
}
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 3cdaa12..7061b6e 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -20,7 +20,7 @@
#include "hw/virtio/virtio-net.h"
#include "hw/virtio/vhost-shadow-virtqueue.h"
#include "hw/virtio/vhost-vdpa.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "migration/blocker.h"
#include "qemu/cutils.h"
#include "qemu/main-loop.h"
@@ -209,6 +209,8 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
int ret;
Int128 llend;
Error *local_err = NULL;
+ MemoryRegion *mr;
+ hwaddr xlat;
if (iotlb->target_as != &address_space_memory) {
error_report("Wrong target AS \"%s\", only system memory is allowed",
@@ -228,11 +230,14 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
bool read_only;
- if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL,
- &local_err)) {
+ mr = memory_translate_iotlb(iotlb, &xlat, &local_err);
+ if (!mr) {
error_report_err(local_err);
return;
}
+ vaddr = memory_region_get_ram_ptr(mr) + xlat;
+ read_only = !(iotlb->perm & IOMMU_WO) || mr->readonly;
+
ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova,
iotlb->addr_mask + 1, vaddr, read_only);
if (ret) {
@@ -288,8 +293,6 @@ static void vhost_vdpa_iommu_region_add(MemoryListener *listener,
QLIST_INSERT_HEAD(&s->iommu_list, iommu, iommu_next);
memory_region_iommu_replay(iommu->iommu_mr, &iommu->n);
-
- return;
}
static void vhost_vdpa_iommu_region_del(MemoryListener *listener,
@@ -360,14 +363,20 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
llsize = int128_sub(llend, int128_make64(iova));
if (s->shadow_data) {
int r;
+ hwaddr gpa = section->offset_within_address_space;
- mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
mem_region.size = int128_get64(llsize) - 1,
mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly),
- r = vhost_iova_tree_map_alloc(s->iova_tree, &mem_region);
+ r = vhost_iova_tree_map_alloc_gpa(s->iova_tree, &mem_region, gpa);
if (unlikely(r != IOVA_OK)) {
error_report("Can't allocate a mapping (%d)", r);
+
+ if (mem_region.translated_addr == gpa) {
+ error_report("Insertion to GPA->IOVA tree failed");
+ /* Remove the mapping from the IOVA-only tree */
+ goto fail_map;
+ }
goto fail;
}
@@ -386,7 +395,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
fail_map:
if (s->shadow_data) {
- vhost_iova_tree_remove(s->iova_tree, mem_region);
+ vhost_iova_tree_remove_gpa(s->iova_tree, mem_region);
}
fail:
@@ -440,21 +449,18 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
if (s->shadow_data) {
const DMAMap *result;
- const void *vaddr = memory_region_get_ram_ptr(section->mr) +
- section->offset_within_region +
- (iova - section->offset_within_address_space);
DMAMap mem_region = {
- .translated_addr = (hwaddr)(uintptr_t)vaddr,
+ .translated_addr = section->offset_within_address_space,
.size = int128_get64(llsize) - 1,
};
- result = vhost_iova_tree_find_iova(s->iova_tree, &mem_region);
+ result = vhost_iova_tree_find_gpa(s->iova_tree, &mem_region);
if (!result) {
/* The memory listener map wasn't mapped */
return;
}
iova = result->iova;
- vhost_iova_tree_remove(s->iova_tree, *result);
+ vhost_iova_tree_remove_gpa(s->iova_tree, *result);
}
vhost_vdpa_iotlb_batch_begin_once(s);
/*
@@ -593,6 +599,36 @@ static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v)
v->shadow_vqs = g_steal_pointer(&shadow_vqs);
}
+static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
+{
+ struct vhost_vdpa *v = dev->opaque;
+
+ uint64_t features;
+ uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
+ 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH |
+ 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID |
+ 0x1ULL << VHOST_BACKEND_F_SUSPEND;
+ int r;
+
+ if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
+ return -EFAULT;
+ }
+
+ features &= f;
+
+ if (vhost_vdpa_first_dev(dev)) {
+ r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
+ if (r) {
+ return -EFAULT;
+ }
+ }
+
+ dev->backend_cap = features;
+ v->shared->backend_cap = features;
+
+ return 0;
+}
+
static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
{
struct vhost_vdpa *v = opaque;
@@ -602,7 +638,12 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
v->dev = dev;
dev->opaque = opaque ;
- v->shared->listener = vhost_vdpa_memory_listener;
+
+ ret = vhost_vdpa_set_backend_cap(dev);
+ if (unlikely(ret != 0)) {
+ return ret;
+ }
+
vhost_vdpa_init_svq(dev, v);
error_propagate(&dev->migration_blocker, v->migration_blocker);
@@ -638,6 +679,7 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
VIRTIO_CONFIG_S_DRIVER);
+ v->shared->listener = vhost_vdpa_memory_listener;
return 0;
}
@@ -840,36 +882,6 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev,
return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
}
-static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
-{
- struct vhost_vdpa *v = dev->opaque;
-
- uint64_t features;
- uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
- 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH |
- 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID |
- 0x1ULL << VHOST_BACKEND_F_SUSPEND;
- int r;
-
- if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
- return -EFAULT;
- }
-
- features &= f;
-
- if (vhost_vdpa_first_dev(dev)) {
- r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
- if (r) {
- return -EFAULT;
- }
- }
-
- dev->backend_cap = features;
- v->shared->backend_cap = features;
-
- return 0;
-}
-
static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
uint32_t *device_id)
{
@@ -887,8 +899,14 @@ static int vhost_vdpa_reset_device(struct vhost_dev *dev)
ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
trace_vhost_vdpa_reset_device(dev);
+ if (ret) {
+ return ret;
+ }
+
+ memory_listener_unregister(&v->shared->listener);
+ v->shared->listener_registered = false;
v->suspended = false;
- return ret;
+ return 0;
}
static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
@@ -1142,16 +1160,23 @@ static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
*
* @v: Vhost-vdpa device
* @needle: The area to search iova
+ * @taddr: The translated address (HVA)
* @errorp: Error pointer
*/
static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
- Error **errp)
+ hwaddr taddr, Error **errp)
{
int r;
- r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle);
+ r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle, taddr);
if (unlikely(r != IOVA_OK)) {
error_setg(errp, "Cannot allocate iova (%d)", r);
+
+ if (needle->translated_addr == taddr) {
+ error_append_hint(errp, "Insertion to IOVA->HVA tree failed");
+ /* Remove the mapping from the IOVA-only tree */
+ vhost_iova_tree_remove(v->shared->iova_tree, *needle);
+ }
return false;
}
@@ -1192,11 +1217,11 @@ static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev,
vhost_svq_get_vring_addr(svq, &svq_addr);
driver_region = (DMAMap) {
- .translated_addr = svq_addr.desc_user_addr,
.size = driver_size - 1,
.perm = IOMMU_RO,
};
- ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp);
+ ok = vhost_vdpa_svq_map_ring(v, &driver_region, svq_addr.desc_user_addr,
+ errp);
if (unlikely(!ok)) {
error_prepend(errp, "Cannot create vq driver region: ");
return false;
@@ -1206,11 +1231,11 @@ static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev,
addr->avail_user_addr = driver_region.iova + avail_offset;
device_region = (DMAMap) {
- .translated_addr = svq_addr.used_user_addr,
.size = device_size - 1,
.perm = IOMMU_RW,
};
- ok = vhost_vdpa_svq_map_ring(v, &device_region, errp);
+ ok = vhost_vdpa_svq_map_ring(v, &device_region, svq_addr.used_user_addr,
+ errp);
if (unlikely(!ok)) {
error_prepend(errp, "Cannot create vq device region: ");
vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr);
@@ -1365,7 +1390,15 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
"IOMMU and try again");
return -1;
}
- memory_listener_register(&v->shared->listener, dev->vdev->dma_as);
+ if (v->shared->listener_registered &&
+ dev->vdev->dma_as != v->shared->listener.address_space) {
+ memory_listener_unregister(&v->shared->listener);
+ v->shared->listener_registered = false;
+ }
+ if (!v->shared->listener_registered) {
+ memory_listener_register(&v->shared->listener, dev->vdev->dma_as);
+ v->shared->listener_registered = true;
+ }
return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
}
@@ -1375,8 +1408,6 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
static void vhost_vdpa_reset_status(struct vhost_dev *dev)
{
- struct vhost_vdpa *v = dev->opaque;
-
if (!vhost_vdpa_last_dev(dev)) {
return;
}
@@ -1384,7 +1415,6 @@ static void vhost_vdpa_reset_status(struct vhost_dev *dev)
vhost_vdpa_reset_device(dev);
vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
VIRTIO_CONFIG_S_DRIVER);
- memory_listener_unregister(&v->shared->listener);
}
static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
@@ -1518,12 +1548,27 @@ static int vhost_vdpa_get_features(struct vhost_dev *dev,
static int vhost_vdpa_set_owner(struct vhost_dev *dev)
{
+ int r;
+ struct vhost_vdpa *v;
+
if (!vhost_vdpa_first_dev(dev)) {
return 0;
}
trace_vhost_vdpa_set_owner(dev);
- return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
+ r = vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+
+ /*
+ * Being optimistic and listening address space memory. If the device
+ * uses vIOMMU, it is changed at vhost_vdpa_dev_start.
+ */
+ v = dev->opaque;
+ memory_listener_register(&v->shared->listener, &address_space_memory);
+ v->shared->listener_registered = true;
+ return 0;
}
static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
@@ -1555,7 +1600,6 @@ const VhostOps vdpa_ops = {
.vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
.vhost_set_vring_call = vhost_vdpa_set_vring_call,
.vhost_get_features = vhost_vdpa_get_features,
- .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
.vhost_set_owner = vhost_vdpa_set_owner,
.vhost_set_vring_endian = NULL,
.vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
index fd88df2..c6c44d8 100644
--- a/hw/virtio/vhost-vsock-common.c
+++ b/hw/virtio/vhost-vsock-common.c
@@ -95,7 +95,7 @@ err_host_notifiers:
return ret;
}
-void vhost_vsock_common_stop(VirtIODevice *vdev)
+int vhost_vsock_common_stop(VirtIODevice *vdev)
{
VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
@@ -103,18 +103,18 @@ void vhost_vsock_common_stop(VirtIODevice *vdev)
int ret;
if (!k->set_guest_notifiers) {
- return;
+ return 0;
}
- vhost_dev_stop(&vvc->vhost_dev, vdev, true);
+ ret = vhost_dev_stop(&vvc->vhost_dev, vdev, true);
- ret = k->set_guest_notifiers(qbus->parent, vvc->vhost_dev.nvqs, false);
- if (ret < 0) {
+ if (k->set_guest_notifiers(qbus->parent, vvc->vhost_dev.nvqs, false) < 0) {
error_report("vhost guest notifier cleanup failed: %d", ret);
- return;
+ return -1;
}
vhost_dev_disable_notifiers(&vvc->vhost_dev, vdev);
+ return ret;
}
@@ -285,13 +285,12 @@ static struct vhost_dev *vhost_vsock_common_get_vhost(VirtIODevice *vdev)
return &vvc->vhost_dev;
}
-static Property vhost_vsock_common_properties[] = {
+static const Property vhost_vsock_common_properties[] = {
DEFINE_PROP_ON_OFF_AUTO("seqpacket", VHostVSockCommon, seqpacket,
ON_OFF_AUTO_AUTO),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vhost_vsock_common_class_init(ObjectClass *klass, void *data)
+static void vhost_vsock_common_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/virtio/vhost-vsock-pci.c b/hw/virtio/vhost-vsock-pci.c
index 9f34414..0022a71 100644
--- a/hw/virtio/vhost-vsock-pci.c
+++ b/hw/virtio/vhost-vsock-pci.c
@@ -35,9 +35,8 @@ struct VHostVSockPCI {
/* vhost-vsock-pci */
-static Property vhost_vsock_pci_properties[] = {
+static const Property vhost_vsock_pci_properties[] = {
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
- DEFINE_PROP_END_OF_LIST(),
};
static void vhost_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -57,7 +56,7 @@ static void vhost_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void vhost_vsock_pci_class_init(ObjectClass *klass, void *data)
+static void vhost_vsock_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
index 3d4a5a9..6e40888 100644
--- a/hw/virtio/vhost-vsock.c
+++ b/hw/virtio/vhost-vsock.c
@@ -67,37 +67,38 @@ static int vhost_vsock_set_running(VirtIODevice *vdev, int start)
}
-static void vhost_vsock_set_status(VirtIODevice *vdev, uint8_t status)
+static int vhost_vsock_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
bool should_start = virtio_device_should_start(vdev, status);
int ret;
if (vhost_dev_is_started(&vvc->vhost_dev) == should_start) {
- return;
+ return 0;
}
if (should_start) {
ret = vhost_vsock_common_start(vdev);
if (ret < 0) {
- return;
+ return 0;
}
ret = vhost_vsock_set_running(vdev, 1);
if (ret < 0) {
vhost_vsock_common_stop(vdev);
error_report("Error starting vhost vsock: %d", -ret);
- return;
+ return 0;
}
} else {
ret = vhost_vsock_set_running(vdev, 0);
if (ret < 0) {
error_report("vhost vsock set running failed: %d", ret);
- return;
+ return 0;
}
vhost_vsock_common_stop(vdev);
}
+ return 0;
}
static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
@@ -205,13 +206,12 @@ static void vhost_vsock_device_unrealize(DeviceState *dev)
vhost_vsock_common_unrealize(vdev);
}
-static Property vhost_vsock_properties[] = {
+static const Property vhost_vsock_properties[] = {
DEFINE_PROP_UINT64("guest-cid", VHostVSock, conf.guest_cid, 0),
DEFINE_PROP_STRING("vhostfd", VHostVSock, conf.vhostfd),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vhost_vsock_class_init(ObjectClass *klass, void *data)
+static void vhost_vsock_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 06fc717..fc43853 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -26,7 +26,7 @@
#include "hw/mem/memory-device.h"
#include "migration/blocker.h"
#include "migration/qemu-file-types.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "trace.h"
/* enabled until disconnected backend stabilizes */
@@ -732,7 +732,6 @@ out:
memory_region_unref(old_sections[n_old_sections].mr);
}
g_free(old_sections);
- return;
}
/* Adds the section data to the tmp_section structure.
@@ -1368,10 +1367,10 @@ fail_alloc_desc:
return r;
}
-void vhost_virtqueue_stop(struct vhost_dev *dev,
- struct VirtIODevice *vdev,
- struct vhost_virtqueue *vq,
- unsigned idx)
+int vhost_virtqueue_stop(struct vhost_dev *dev,
+ struct VirtIODevice *vdev,
+ struct vhost_virtqueue *vq,
+ unsigned idx)
{
int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
struct vhost_vring_state state = {
@@ -1381,7 +1380,7 @@ void vhost_virtqueue_stop(struct vhost_dev *dev,
if (virtio_queue_get_desc_addr(vdev, idx) == 0) {
/* Don't stop the virtqueue which might have not been started */
- return;
+ return 0;
}
r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
@@ -1412,6 +1411,7 @@ void vhost_virtqueue_stop(struct vhost_dev *dev,
0, virtio_queue_get_avail_size(vdev, idx));
vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
0, virtio_queue_get_desc_size(vdev, idx));
+ return r;
}
static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
@@ -1682,9 +1682,9 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
memset(hdev, 0, sizeof(struct vhost_dev));
}
-static void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev,
- VirtIODevice *vdev,
- unsigned int nvqs)
+void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev,
+ VirtIODevice *vdev,
+ unsigned int nvqs)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
int i, r;
@@ -1930,62 +1930,6 @@ void vhost_dev_free_inflight(struct vhost_inflight *inflight)
}
}
-static int vhost_dev_resize_inflight(struct vhost_inflight *inflight,
- uint64_t new_size)
-{
- Error *err = NULL;
- int fd = -1;
- void *addr = qemu_memfd_alloc("vhost-inflight", new_size,
- F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
- &fd, &err);
-
- if (err) {
- error_report_err(err);
- return -ENOMEM;
- }
-
- vhost_dev_free_inflight(inflight);
- inflight->offset = 0;
- inflight->addr = addr;
- inflight->fd = fd;
- inflight->size = new_size;
-
- return 0;
-}
-
-void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f)
-{
- if (inflight->addr) {
- qemu_put_be64(f, inflight->size);
- qemu_put_be16(f, inflight->queue_size);
- qemu_put_buffer(f, inflight->addr, inflight->size);
- } else {
- qemu_put_be64(f, 0);
- }
-}
-
-int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f)
-{
- uint64_t size;
-
- size = qemu_get_be64(f);
- if (!size) {
- return 0;
- }
-
- if (inflight->size != size) {
- int ret = vhost_dev_resize_inflight(inflight, size);
- if (ret < 0) {
- return ret;
- }
- }
- inflight->queue_size = qemu_get_be16(f);
-
- qemu_get_buffer(f, inflight->addr, size);
-
- return 0;
-}
-
int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev)
{
int r;
@@ -2151,11 +2095,22 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
* vhost-kernel code requires for this.*/
for (i = 0; i < hdev->nvqs; ++i) {
struct vhost_virtqueue *vq = hdev->vqs + i;
- vhost_device_iotlb_miss(hdev, vq->used_phys, true);
+ r = vhost_device_iotlb_miss(hdev, vq->used_phys, true);
+ if (r) {
+ goto fail_iotlb;
+ }
}
}
vhost_start_config_intr(hdev);
return 0;
+fail_iotlb:
+ if (vhost_dev_has_iommu(hdev) &&
+ hdev->vhost_ops->vhost_set_iotlb_callback) {
+ hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
+ }
+ if (hdev->vhost_ops->vhost_dev_start) {
+ hdev->vhost_ops->vhost_dev_start(hdev, false);
+ }
fail_start:
if (vrings) {
vhost_dev_set_vring_enable(hdev, false);
@@ -2181,9 +2136,10 @@ fail_features:
}
/* Host notifiers must be enabled at this point. */
-void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
+int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
{
int i;
+ int rc = 0;
/* should only be called after backend is connected */
assert(hdev->vhost_ops);
@@ -2202,10 +2158,10 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
vhost_dev_set_vring_enable(hdev, false);
}
for (i = 0; i < hdev->nvqs; ++i) {
- vhost_virtqueue_stop(hdev,
- vdev,
- hdev->vqs + i,
- hdev->vq_index + i);
+ rc |= vhost_virtqueue_stop(hdev,
+ vdev,
+ hdev->vqs + i,
+ hdev->vq_index + i);
}
if (hdev->vhost_ops->vhost_reset_status) {
hdev->vhost_ops->vhost_reset_status(hdev);
@@ -2222,6 +2178,7 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
hdev->started = false;
vdev->vhost_started = false;
hdev->vdev = NULL;
+ return rc;
}
int vhost_net_set_backend(struct vhost_dev *hdev,
diff --git a/hw/virtio/virtio-9p-pci.c b/hw/virtio/virtio-9p-pci.c
index 94c14f0..594742f 100644
--- a/hw/virtio/virtio-9p-pci.c
+++ b/hw/virtio/virtio-9p-pci.c
@@ -43,14 +43,13 @@ static void virtio_9p_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static Property virtio_9p_pci_properties[] = {
+static const Property virtio_9p_pci_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_9p_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_9p_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
diff --git a/hw/virtio/virtio-acpi.c b/hw/virtio/virtio-acpi.c
index 230a669..85becef 100644
--- a/hw/virtio/virtio-acpi.c
+++ b/hw/virtio/virtio-acpi.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* virtio ACPI Support
*
diff --git a/hw/virtio/virtio-balloon-pci.c b/hw/virtio/virtio-balloon-pci.c
index ce2645b..96e88b6 100644
--- a/hw/virtio/virtio-balloon-pci.c
+++ b/hw/virtio/virtio-balloon-pci.c
@@ -35,16 +35,27 @@ struct VirtIOBalloonPCI {
VirtIOBalloon vdev;
};
+static const Property virtio_balloon_properties[] = {
+ DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
+ DEV_NVECTORS_UNSPECIFIED),
+};
+
static void virtio_balloon_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev);
DeviceState *vdev = DEVICE(&dev->vdev);
+ if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
+ vpci_dev->nvectors = 2;
+ }
+
vpci_dev->class_code = PCI_CLASS_OTHERS;
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_balloon_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
@@ -55,6 +66,7 @@ static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data)
pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
pcidev_k->class_id = PCI_CLASS_OTHERS;
+ device_class_set_props(dc, virtio_balloon_properties);
}
static void virtio_balloon_pci_instance_init(Object *obj)
diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c
index 609e39a..db787d0 100644
--- a/hw/virtio/virtio-balloon.c
+++ b/hw/virtio/virtio-balloon.c
@@ -22,16 +22,16 @@
#include "hw/mem/pc-dimm.h"
#include "hw/qdev-properties.h"
#include "hw/boards.h"
-#include "sysemu/balloon.h"
+#include "system/balloon.h"
#include "hw/virtio/virtio-balloon.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qapi/error.h"
#include "qapi/qapi-events-machine.h"
#include "qapi/visitor.h"
#include "trace.h"
#include "qemu/error-report.h"
#include "migration/misc.h"
-
+#include "system/reset.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
@@ -167,19 +167,33 @@ static void balloon_deflate_page(VirtIOBalloon *balloon,
}
}
+/*
+ * All stats upto VIRTIO_BALLOON_S_NR /must/ have a
+ * non-NULL name declared here, since these are used
+ * as keys for populating the QDict with stats
+ */
static const char *balloon_stat_names[] = {
[VIRTIO_BALLOON_S_SWAP_IN] = "stat-swap-in",
[VIRTIO_BALLOON_S_SWAP_OUT] = "stat-swap-out",
[VIRTIO_BALLOON_S_MAJFLT] = "stat-major-faults",
[VIRTIO_BALLOON_S_MINFLT] = "stat-minor-faults",
[VIRTIO_BALLOON_S_MEMFREE] = "stat-free-memory",
+
[VIRTIO_BALLOON_S_MEMTOT] = "stat-total-memory",
[VIRTIO_BALLOON_S_AVAIL] = "stat-available-memory",
[VIRTIO_BALLOON_S_CACHES] = "stat-disk-caches",
[VIRTIO_BALLOON_S_HTLB_PGALLOC] = "stat-htlb-pgalloc",
[VIRTIO_BALLOON_S_HTLB_PGFAIL] = "stat-htlb-pgfail",
- [VIRTIO_BALLOON_S_NR] = NULL
+
+ [VIRTIO_BALLOON_S_OOM_KILL] = "stat-oom-kills",
+ [VIRTIO_BALLOON_S_ALLOC_STALL] = "stat-alloc-stalls",
+ [VIRTIO_BALLOON_S_ASYNC_SCAN] = "stat-async-scans",
+ [VIRTIO_BALLOON_S_DIRECT_SCAN] = "stat-direct-scans",
+ [VIRTIO_BALLOON_S_ASYNC_RECLAIM] = "stat-async-reclaims",
+
+ [VIRTIO_BALLOON_S_DIRECT_RECLAIM] = "stat-direct-reclaims",
};
+G_STATIC_ASSERT(G_N_ELEMENTS(balloon_stat_names) == VIRTIO_BALLOON_S_NR);
/*
* reset_stats - Mark all items in the stats array as unset
@@ -896,6 +910,8 @@ static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
}
reset_stats(s);
+ s->stats_last_update = 0;
+ qemu_register_resettable(OBJECT(dev));
}
static void virtio_balloon_device_unrealize(DeviceState *dev)
@@ -903,6 +919,7 @@ static void virtio_balloon_device_unrealize(DeviceState *dev)
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOBalloon *s = VIRTIO_BALLOON(dev);
+ qemu_unregister_resettable(OBJECT(dev));
if (s->free_page_bh) {
qemu_bh_delete(s->free_page_bh);
object_unref(OBJECT(s->iothread));
@@ -941,7 +958,7 @@ static void virtio_balloon_device_reset(VirtIODevice *vdev)
s->poison_val = 0;
}
-static void virtio_balloon_set_status(VirtIODevice *vdev, uint8_t status)
+static int virtio_balloon_set_status(VirtIODevice *vdev, uint8_t status)
{
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
@@ -971,6 +988,28 @@ static void virtio_balloon_set_status(VirtIODevice *vdev, uint8_t status)
qemu_mutex_unlock(&s->free_page_lock);
}
}
+ return 0;
+}
+
+static ResettableState *virtio_balloon_get_reset_state(Object *obj)
+{
+ VirtIOBalloon *s = VIRTIO_BALLOON(obj);
+ return &s->reset_state;
+}
+
+static void virtio_balloon_reset_enter(Object *obj, ResetType type)
+{
+ VirtIOBalloon *s = VIRTIO_BALLOON(obj);
+
+ /*
+ * When waking up from standby/suspend-to-ram, do not reset stats.
+ */
+ if (type == RESET_TYPE_WAKEUP) {
+ return;
+ }
+
+ reset_stats(s);
+ s->stats_last_update = 0;
}
static void virtio_balloon_instance_init(Object *obj)
@@ -1001,7 +1040,7 @@ static const VMStateDescription vmstate_virtio_balloon = {
},
};
-static Property virtio_balloon_properties[] = {
+static const Property virtio_balloon_properties[] = {
DEFINE_PROP_BIT("deflate-on-oom", VirtIOBalloon, host_features,
VIRTIO_BALLOON_F_DEFLATE_ON_OOM, false),
DEFINE_PROP_BIT("free-page-hint", VirtIOBalloon, host_features,
@@ -1018,13 +1057,13 @@ static Property virtio_balloon_properties[] = {
qemu_4_0_config_size, false),
DEFINE_PROP_LINK("iothread", VirtIOBalloon, iothread, TYPE_IOTHREAD,
IOThread *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_balloon_class_init(ObjectClass *klass, void *data)
+static void virtio_balloon_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
device_class_set_props(dc, virtio_balloon_properties);
dc->vmsd = &vmstate_virtio_balloon;
@@ -1037,6 +1076,9 @@ static void virtio_balloon_class_init(ObjectClass *klass, void *data)
vdc->get_features = virtio_balloon_get_features;
vdc->set_status = virtio_balloon_set_status;
vdc->vmsd = &vmstate_virtio_balloon_device;
+
+ rc->get_state = virtio_balloon_get_reset_state;
+ rc->phases.enter = virtio_balloon_reset_enter;
}
static const TypeInfo virtio_balloon_info = {
diff --git a/hw/virtio/virtio-blk-pci.c b/hw/virtio/virtio-blk-pci.c
index 9743bee..fd33bbd 100644
--- a/hw/virtio/virtio-blk-pci.c
+++ b/hw/virtio/virtio-blk-pci.c
@@ -38,13 +38,12 @@ struct VirtIOBlkPCI {
VirtIOBlock vdev;
};
-static Property virtio_blk_pci_properties[] = {
+static const Property virtio_blk_pci_properties[] = {
DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
DEV_NVECTORS_UNSPECIFIED),
- DEFINE_PROP_END_OF_LIST(),
};
static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -64,7 +63,7 @@ static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_blk_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c
index 896feb3..11adfbf 100644
--- a/hw/virtio/virtio-bus.c
+++ b/hw/virtio/virtio-bus.c
@@ -28,7 +28,7 @@
#include "qapi/error.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
/* #define DEBUG_VIRTIO_BUS */
@@ -348,7 +348,7 @@ bool virtio_bus_device_iommu_enabled(VirtIODevice *vdev)
return klass->iommu_enabled(qbus->parent);
}
-static void virtio_bus_class_init(ObjectClass *klass, void *data)
+static void virtio_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *bus_class = BUS_CLASS(klass);
bus_class->get_dev_path = virtio_bus_get_dev_path;
diff --git a/hw/virtio/virtio-crypto-pci.c b/hw/virtio/virtio-crypto-pci.c
index 0783dc2..868abc0 100644
--- a/hw/virtio/virtio-crypto-pci.c
+++ b/hw/virtio/virtio-crypto-pci.c
@@ -37,11 +37,10 @@ struct VirtIOCryptoPCI {
VirtIOCrypto vdev;
};
-static Property virtio_crypto_pci_properties[] = {
+static const Property virtio_crypto_pci_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
- DEFINE_PROP_END_OF_LIST(),
};
static void virtio_crypto_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -60,7 +59,7 @@ static void virtio_crypto_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
}
}
-static void virtio_crypto_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_crypto_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
index bbe8aa4..517f208 100644
--- a/hw/virtio/virtio-crypto.c
+++ b/hw/virtio/virtio-crypto.c
@@ -22,7 +22,7 @@
#include "hw/virtio/virtio-crypto.h"
#include "hw/qdev-properties.h"
#include "standard-headers/linux/virtio_ids.h"
-#include "sysemu/cryptodev-vhost.h"
+#include "system/cryptodev-vhost.h"
#define VIRTIO_CRYPTO_VM_VERSION 1
@@ -205,6 +205,7 @@ virtio_crypto_create_asym_session(VirtIOCrypto *vcrypto,
int queue_index;
uint32_t algo, keytype, keylen;
+ sreq->info.op_code = opcode;
algo = ldl_le_p(&sess_req->para.algo);
keytype = ldl_le_p(&sess_req->para.keytype);
keylen = ldl_le_p(&sess_req->para.keylen);
@@ -224,7 +225,6 @@ virtio_crypto_create_asym_session(VirtIOCrypto *vcrypto,
iov_discard_front(&iov, &out_num, keylen);
}
- sreq->info.op_code = opcode;
asym_info = &sreq->info.u.asym_sess_info;
asym_info->algo = algo;
asym_info->keytype = keytype;
@@ -461,7 +461,7 @@ static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq,
req->in_iov = NULL;
req->in_num = 0;
req->in_len = 0;
- req->flags = QCRYPTODEV_BACKEND_ALG__MAX;
+ req->flags = QCRYPTODEV_BACKEND_ALGO_TYPE__MAX;
memset(&req->op_info, 0x00, sizeof(req->op_info));
}
@@ -471,7 +471,7 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req)
return;
}
- if (req->flags == QCRYPTODEV_BACKEND_ALG_SYM) {
+ if (req->flags == QCRYPTODEV_BACKEND_ALGO_TYPE_SYM) {
size_t max_len;
CryptoDevBackendSymOpInfo *op_info = req->op_info.u.sym_op_info;
@@ -486,7 +486,7 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req)
memset(op_info, 0, sizeof(*op_info) + max_len);
g_free(op_info);
}
- } else if (req->flags == QCRYPTODEV_BACKEND_ALG_ASYM) {
+ } else if (req->flags == QCRYPTODEV_BACKEND_ALGO_TYPE_ASYM) {
CryptoDevBackendAsymOpInfo *op_info = req->op_info.u.asym_op_info;
if (op_info) {
g_free(op_info->src);
@@ -571,10 +571,10 @@ static void virtio_crypto_req_complete(void *opaque, int ret)
VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
uint8_t status = -ret;
- if (req->flags == QCRYPTODEV_BACKEND_ALG_SYM) {
+ if (req->flags == QCRYPTODEV_BACKEND_ALGO_TYPE_SYM) {
virtio_crypto_sym_input_data_helper(vdev, req, status,
req->op_info.u.sym_op_info);
- } else if (req->flags == QCRYPTODEV_BACKEND_ALG_ASYM) {
+ } else if (req->flags == QCRYPTODEV_BACKEND_ALGO_TYPE_ASYM) {
virtio_crypto_akcipher_input_data_helper(vdev, req, status,
req->op_info.u.asym_op_info);
}
@@ -884,7 +884,7 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request)
switch (opcode) {
case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
case VIRTIO_CRYPTO_CIPHER_DECRYPT:
- op_info->algtype = request->flags = QCRYPTODEV_BACKEND_ALG_SYM;
+ op_info->algtype = request->flags = QCRYPTODEV_BACKEND_ALGO_TYPE_SYM;
ret = virtio_crypto_handle_sym_req(vcrypto,
&req.u.sym_req, op_info,
out_iov, out_num);
@@ -894,7 +894,7 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request)
case VIRTIO_CRYPTO_AKCIPHER_DECRYPT:
case VIRTIO_CRYPTO_AKCIPHER_SIGN:
case VIRTIO_CRYPTO_AKCIPHER_VERIFY:
- op_info->algtype = request->flags = QCRYPTODEV_BACKEND_ALG_ASYM;
+ op_info->algtype = request->flags = QCRYPTODEV_BACKEND_ALGO_TYPE_ASYM;
ret = virtio_crypto_handle_asym_req(vcrypto,
&req.u.akcipher_req, op_info,
out_iov, out_num);
@@ -1008,19 +1008,19 @@ static uint32_t virtio_crypto_init_services(uint32_t qservices)
{
uint32_t vservices = 0;
- if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_CIPHER)) {
+ if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_TYPE_CIPHER)) {
vservices |= (1 << VIRTIO_CRYPTO_SERVICE_CIPHER);
}
- if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_HASH)) {
+ if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_TYPE_HASH)) {
vservices |= (1 << VIRTIO_CRYPTO_SERVICE_HASH);
}
- if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_MAC)) {
+ if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_TYPE_MAC)) {
vservices |= (1 << VIRTIO_CRYPTO_SERVICE_MAC);
}
- if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_AEAD)) {
+ if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_TYPE_AEAD)) {
vservices |= (1 << VIRTIO_CRYPTO_SERVICE_AEAD);
}
- if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER)) {
+ if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_TYPE_AKCIPHER)) {
vservices |= (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER);
}
@@ -1128,10 +1128,9 @@ static const VMStateDescription vmstate_virtio_crypto = {
},
};
-static Property virtio_crypto_properties[] = {
+static const Property virtio_crypto_properties[] = {
DEFINE_PROP_LINK("cryptodev", VirtIOCrypto, conf.cryptodev,
TYPE_CRYPTODEV_BACKEND, CryptoDevBackend *),
- DEFINE_PROP_END_OF_LIST(),
};
static void virtio_crypto_get_config(VirtIODevice *vdev, uint8_t *config)
@@ -1198,11 +1197,12 @@ static void virtio_crypto_vhost_status(VirtIOCrypto *c, uint8_t status)
}
}
-static void virtio_crypto_set_status(VirtIODevice *vdev, uint8_t status)
+static int virtio_crypto_set_status(VirtIODevice *vdev, uint8_t status)
{
VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
virtio_crypto_vhost_status(vcrypto, status);
+ return 0;
}
static void virtio_crypto_guest_notifier_mask(VirtIODevice *vdev, int idx,
@@ -1247,13 +1247,25 @@ static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx)
static struct vhost_dev *virtio_crypto_get_vhost(VirtIODevice *vdev)
{
VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
- CryptoDevBackend *b = vcrypto->cryptodev;
- CryptoDevBackendClient *cc = b->conf.peers.ccs[0];
- CryptoDevBackendVhost *vhost_crypto = cryptodev_get_vhost(cc, b, 0);
+ CryptoDevBackend *b;
+ CryptoDevBackendClient *cc;
+ CryptoDevBackendVhost *vhost_crypto;
+
+ b = vcrypto->cryptodev;
+ if (!b) {
+ return NULL;
+ }
+
+ cc = b->conf.peers.ccs[0];
+ vhost_crypto = cryptodev_get_vhost(cc, b, 0);
+ if (!vhost_crypto) {
+ return NULL;
+ }
+
return &vhost_crypto->dev;
}
-static void virtio_crypto_class_init(ObjectClass *klass, void *data)
+static void virtio_crypto_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/virtio/virtio-hmp-cmds.c b/hw/virtio/virtio-hmp-cmds.c
index 477c97d..7d8677b 100644
--- a/hw/virtio/virtio-hmp-cmds.c
+++ b/hw/virtio/virtio-hmp-cmds.c
@@ -9,7 +9,7 @@
#include "monitor/hmp.h"
#include "monitor/monitor.h"
#include "qapi/qapi-commands-virtio.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
static void hmp_virtio_dump_protocols(Monitor *mon,
diff --git a/hw/virtio/virtio-input-pci.c b/hw/virtio/virtio-input-pci.c
index a53edf4..3be5358 100644
--- a/hw/virtio/virtio-input-pci.c
+++ b/hw/virtio/virtio-input-pci.c
@@ -37,9 +37,8 @@ struct VirtIOInputHIDPCI {
VirtIOInputHID vdev;
};
-static Property virtio_input_pci_properties[] = {
+static const Property virtio_input_pci_properties[] = {
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
- DEFINE_PROP_END_OF_LIST(),
};
static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -51,7 +50,7 @@ static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void virtio_input_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_input_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
@@ -64,7 +63,8 @@ static void virtio_input_pci_class_init(ObjectClass *klass, void *data)
pcidev_k->class_id = PCI_CLASS_INPUT_OTHER;
}
-static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass,
+ const void *data)
{
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
@@ -72,7 +72,7 @@ static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data)
}
static void virtio_input_hid_mouse_pci_class_init(ObjectClass *klass,
- void *data)
+ const void *data)
{
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
diff --git a/hw/virtio/virtio-iommu-pci.c b/hw/virtio/virtio-iommu-pci.c
index cbdfe4c..8123c6f 100644
--- a/hw/virtio/virtio-iommu-pci.c
+++ b/hw/virtio/virtio-iommu-pci.c
@@ -34,12 +34,11 @@ struct VirtIOIOMMUPCI {
VirtIOIOMMU vdev;
};
-static Property virtio_iommu_pci_properties[] = {
+static const Property virtio_iommu_pci_properties[] = {
DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
DEFINE_PROP_ARRAY("reserved-regions", VirtIOIOMMUPCI,
vdev.nr_prop_resv_regions, vdev.prop_resv_regions,
qdev_prop_reserved_region, ReservedRegion),
- DEFINE_PROP_END_OF_LIST(),
};
static void virtio_iommu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -74,7 +73,7 @@ static void virtio_iommu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void virtio_iommu_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_iommu_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index 33ae61c..3500f1b 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -25,9 +25,9 @@
#include "exec/target_page.h"
#include "hw/qdev-properties.h"
#include "hw/virtio/virtio.h"
-#include "sysemu/kvm.h"
-#include "sysemu/reset.h"
-#include "sysemu/sysemu.h"
+#include "system/kvm.h"
+#include "system/reset.h"
+#include "system/system.h"
#include "qemu/reserved-region.h"
#include "qemu/units.h"
#include "qapi/error.h"
@@ -308,6 +308,7 @@ static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep)
if (!ep->domain) {
return;
}
+ trace_virtio_iommu_detach_endpoint_from_domain(domain->id, ep->id);
g_tree_foreach(domain->mappings, virtio_iommu_notify_unmap_cb,
ep->iommu_mr);
QLIST_REMOVE(ep, next);
@@ -467,26 +468,6 @@ static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque,
return &sdev->as;
}
-static void virtio_iommu_device_clear(VirtIOIOMMU *s, PCIBus *bus, int devfn)
-{
- IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
- IOMMUDevice *sdev;
-
- if (!sbus) {
- return;
- }
-
- sdev = sbus->pbdev[devfn];
- if (!sdev) {
- return;
- }
-
- g_list_free_full(sdev->resv_regions, g_free);
- sdev->resv_regions = NULL;
- g_free(sdev);
- sbus->pbdev[devfn] = NULL;
-}
-
static gboolean hiod_equal(gconstpointer v1, gconstpointer v2)
{
const struct hiod_key *key1 = v1;
@@ -558,8 +539,6 @@ static int virtio_iommu_set_host_iova_ranges(VirtIOIOMMU *s, PCIBus *bus,
{
IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
IOMMUDevice *sdev;
- GList *current_ranges;
- GList *l, *tmp, *new_ranges = NULL;
int ret = -EINVAL;
if (!sbus) {
@@ -573,35 +552,10 @@ static int virtio_iommu_set_host_iova_ranges(VirtIOIOMMU *s, PCIBus *bus,
return ret;
}
- current_ranges = sdev->host_resv_ranges;
-
- g_assert(!sdev->probe_done);
-
- /* check that each new resv region is included in an existing one */
if (sdev->host_resv_ranges) {
- range_inverse_array(iova_ranges,
- &new_ranges,
- 0, UINT64_MAX);
-
- for (tmp = new_ranges; tmp; tmp = tmp->next) {
- Range *newr = (Range *)tmp->data;
- bool included = false;
-
- for (l = current_ranges; l; l = l->next) {
- Range * r = (Range *)l->data;
-
- if (range_contains_range(r, newr)) {
- included = true;
- break;
- }
- }
- if (!included) {
- goto error;
- }
- }
- /* all new reserved ranges are included in existing ones */
- ret = 0;
- goto out;
+ error_setg(errp, "%s virtio-iommu does not support aliased BDF",
+ __func__);
+ return ret;
}
range_inverse_array(iova_ranges,
@@ -610,14 +564,31 @@ static int virtio_iommu_set_host_iova_ranges(VirtIOIOMMU *s, PCIBus *bus,
rebuild_resv_regions(sdev);
return 0;
-error:
- error_setg(errp, "%s Conflicting host reserved ranges set!",
- __func__);
-out:
- g_list_free_full(new_ranges, g_free);
- return ret;
}
+static void virtio_iommu_unset_host_iova_ranges(VirtIOIOMMU *s, PCIBus *bus,
+ int devfn)
+{
+ IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
+ IOMMUDevice *sdev;
+
+ if (!sbus) {
+ return;
+ }
+
+ sdev = sbus->pbdev[devfn];
+ if (!sdev) {
+ return;
+ }
+
+ g_list_free_full(g_steal_pointer(&sdev->host_resv_ranges), g_free);
+ g_list_free_full(sdev->resv_regions, g_free);
+ sdev->host_resv_ranges = NULL;
+ sdev->resv_regions = NULL;
+ add_prop_resv_regions(sdev);
+}
+
+
static bool check_page_size_mask(VirtIOIOMMU *viommu, uint64_t new_mask,
Error **errp)
{
@@ -726,9 +697,10 @@ virtio_iommu_unset_iommu_device(PCIBus *bus, void *opaque, int devfn)
if (!hiod) {
return;
}
+ virtio_iommu_unset_host_iova_ranges(viommu, hiod->aliased_bus,
+ hiod->aliased_devfn);
g_hash_table_remove(viommu->host_iommu_devices, &key);
- virtio_iommu_device_clear(viommu, bus, devfn);
}
static const PCIIOMMUOps virtio_iommu_ops = {
@@ -815,6 +787,7 @@ static int virtio_iommu_detach(VirtIOIOMMU *s,
if (QLIST_EMPTY(&domain->endpoint_list)) {
g_tree_remove(s->domains, GUINT_TO_POINTER(domain->id));
}
+ g_tree_remove(s->endpoints, GUINT_TO_POINTER(ep_id));
return VIRTIO_IOMMU_S_OK;
}
@@ -977,7 +950,6 @@ static int virtio_iommu_probe(VirtIOIOMMU *s,
}
buf += count;
free -= count;
- sdev->probe_done = true;
return VIRTIO_IOMMU_S_OK;
}
@@ -1532,11 +1504,11 @@ static void virtio_iommu_device_unrealize(DeviceState *dev)
virtio_cleanup(vdev);
}
-static void virtio_iommu_device_reset(VirtIODevice *vdev)
+static void virtio_iommu_device_reset_exit(Object *obj, ResetType type)
{
- VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
+ VirtIOIOMMU *s = VIRTIO_IOMMU(obj);
- trace_virtio_iommu_device_reset();
+ trace_virtio_iommu_device_reset_exit();
if (s->domains) {
g_tree_destroy(s->domains);
@@ -1550,9 +1522,10 @@ static void virtio_iommu_device_reset(VirtIODevice *vdev)
NULL, NULL, virtio_iommu_put_endpoint);
}
-static void virtio_iommu_set_status(VirtIODevice *vdev, uint8_t status)
+static int virtio_iommu_set_status(VirtIODevice *vdev, uint8_t status)
{
trace_virtio_iommu_device_status(status);
+ return 0;
}
static void virtio_iommu_instance_init(Object *obj)
@@ -1683,20 +1656,20 @@ static const VMStateDescription vmstate_virtio_iommu = {
},
};
-static Property virtio_iommu_properties[] = {
+static const Property virtio_iommu_properties[] = {
DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus,
TYPE_PCI_BUS, PCIBus *),
DEFINE_PROP_BOOL("boot-bypass", VirtIOIOMMU, boot_bypass, true),
DEFINE_PROP_GRANULE_MODE("granule", VirtIOIOMMU, granule_mode,
GRANULE_MODE_HOST),
DEFINE_PROP_UINT8("aw-bits", VirtIOIOMMU, aw_bits, 64),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_iommu_class_init(ObjectClass *klass, void *data)
+static void virtio_iommu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
device_class_set_props(dc, virtio_iommu_properties);
dc->vmsd = &vmstate_virtio_iommu;
@@ -1704,7 +1677,12 @@ static void virtio_iommu_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
vdc->realize = virtio_iommu_device_realize;
vdc->unrealize = virtio_iommu_device_unrealize;
- vdc->reset = virtio_iommu_device_reset;
+
+ /*
+ * Use 'exit' reset phase to make sure all DMA requests
+ * have been quiesced during 'enter' or 'hold' phase
+ */
+ rc->phases.exit = virtio_iommu_device_reset_exit;
vdc->get_config = virtio_iommu_get_config;
vdc->set_config = virtio_iommu_set_config;
vdc->get_features = virtio_iommu_get_features;
@@ -1713,7 +1691,7 @@ static void virtio_iommu_class_init(ObjectClass *klass, void *data)
}
static void virtio_iommu_memory_region_class_init(ObjectClass *klass,
- void *data)
+ const void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
diff --git a/hw/virtio/virtio-md-pci.c b/hw/virtio/virtio-md-pci.c
index 9ec5067..9278b32 100644
--- a/hw/virtio/virtio-md-pci.c
+++ b/hw/virtio/virtio-md-pci.c
@@ -138,7 +138,7 @@ static const TypeInfo virtio_md_pci_info = {
.instance_size = sizeof(VirtIOMDPCI),
.class_size = sizeof(VirtIOMDPCIClass),
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_MEMORY_DEVICE },
{ }
},
diff --git a/hw/virtio/virtio-mem-pci.c b/hw/virtio/virtio-mem-pci.c
index 1b4e9a3..f592eb1 100644
--- a/hw/virtio/virtio-mem-pci.c
+++ b/hw/virtio/virtio-mem-pci.c
@@ -22,6 +22,10 @@ static void virtio_mem_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
VirtIOMEMPCI *mem_pci = VIRTIO_MEM_PCI(vpci_dev);
DeviceState *vdev = DEVICE(&mem_pci->vdev);
+ if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
+ vpci_dev->nvectors = 2;
+ }
+
virtio_pci_force_virtio_1(vpci_dev);
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
@@ -152,7 +156,14 @@ static void virtio_mem_pci_set_requested_size(Object *obj, Visitor *v,
object_property_set(OBJECT(&pci_mem->vdev), name, v, errp);
}
-static void virtio_mem_pci_class_init(ObjectClass *klass, void *data)
+static const Property virtio_mem_pci_class_properties[] = {
+ DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
+ DEV_NVECTORS_UNSPECIFIED),
+};
+
+static void virtio_mem_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
@@ -164,6 +175,7 @@ static void virtio_mem_pci_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
pcidev_k->class_id = PCI_CLASS_OTHERS;
+ device_class_set_props(dc, virtio_mem_pci_class_properties);
mdc->get_addr = virtio_mem_pci_get_addr;
mdc->set_addr = virtio_mem_pci_set_addr;
diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c
index ef64bf1..c46f6f9 100644
--- a/hw/virtio/virtio-mem.c
+++ b/hw/virtio/virtio-mem.c
@@ -15,20 +15,20 @@
#include "qemu/cutils.h"
#include "qemu/error-report.h"
#include "qemu/units.h"
-#include "sysemu/numa.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
+#include "system/numa.h"
+#include "system/system.h"
+#include "system/reset.h"
+#include "system/runstate.h"
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-mem.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
-#include "exec/ram_addr.h"
+#include "system/ram_addr.h"
#include "migration/misc.h"
#include "hw/boards.h"
#include "hw/qdev-properties.h"
-#include CONFIG_DEVICES
+#include "hw/acpi/acpi.h"
#include "trace.h"
static const VMStateDescription vmstate_virtio_mem_device_early;
@@ -61,6 +61,8 @@ static uint32_t virtio_mem_default_thp_size(void)
} else if (qemu_real_host_page_size() == 64 * KiB) {
default_thp_size = 512 * MiB;
}
+#elif defined(__s390x__)
+ default_thp_size = 1 * MiB;
#endif
return default_thp_size;
@@ -88,6 +90,7 @@ static uint32_t virtio_mem_default_thp_size(void)
static uint32_t thp_size;
#define HPAGE_PMD_SIZE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size"
+#define HPAGE_PATH "/sys/kernel/mm/transparent_hugepage/"
static uint32_t virtio_mem_thp_size(void)
{
gchar *content = NULL;
@@ -98,6 +101,12 @@ static uint32_t virtio_mem_thp_size(void)
return thp_size;
}
+ /* No THP -> no restrictions. */
+ if (!g_file_test(HPAGE_PATH, G_FILE_TEST_EXISTS)) {
+ thp_size = VIRTIO_MEM_MIN_BLOCK_SIZE;
+ return thp_size;
+ }
+
/*
* Try to probe the actual THP size, fallback to (sane but eventually
* incorrect) default sizes.
@@ -161,7 +170,7 @@ static bool virtio_mem_has_shared_zeropage(RAMBlock *rb)
* necessary (as the section size can change). But it's more likely that the
* section size will rather get smaller and not bigger over time.
*/
-#if defined(TARGET_X86_64) || defined(TARGET_I386)
+#if defined(TARGET_X86_64) || defined(TARGET_I386) || defined(TARGET_S390X)
#define VIRTIO_MEM_USABLE_EXTENT (2 * (128 * MiB))
#elif defined(TARGET_ARM)
#define VIRTIO_MEM_USABLE_EXTENT (2 * (512 * MiB))
@@ -181,7 +190,7 @@ static bool virtio_mem_is_busy(void)
* after plugging them) until we're running on the destination (as we didn't
* migrate these blocks when they were unplugged).
*/
- return migration_in_incoming_postcopy() || !migration_is_idle();
+ return migration_in_incoming_postcopy() || migration_is_running();
}
typedef int (*virtio_mem_range_cb)(VirtIOMEM *vmem, void *arg,
@@ -235,28 +244,6 @@ static int virtio_mem_for_each_plugged_range(VirtIOMEM *vmem, void *arg,
return ret;
}
-/*
- * Adjust the memory section to cover the intersection with the given range.
- *
- * Returns false if the intersection is empty, otherwise returns true.
- */
-static bool virtio_mem_intersect_memory_section(MemoryRegionSection *s,
- uint64_t offset, uint64_t size)
-{
- uint64_t start = MAX(s->offset_within_region, offset);
- uint64_t end = MIN(s->offset_within_region + int128_get64(s->size),
- offset + size);
-
- if (end <= start) {
- return false;
- }
-
- s->offset_within_address_space += start - s->offset_within_region;
- s->offset_within_region = start;
- s->size = int128_make64(end - start);
- return true;
-}
-
typedef int (*virtio_mem_section_cb)(MemoryRegionSection *s, void *arg);
static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem,
@@ -278,7 +265,7 @@ static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem,
first_bit + 1) - 1;
size = (last_bit - first_bit + 1) * vmem->block_size;
- if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) {
+ if (!memory_region_section_intersect_range(&tmp, offset, size)) {
break;
}
ret = cb(&tmp, arg);
@@ -310,7 +297,7 @@ static int virtio_mem_for_each_unplugged_section(const VirtIOMEM *vmem,
first_bit + 1) - 1;
size = (last_bit - first_bit + 1) * vmem->block_size;
- if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) {
+ if (!memory_region_section_intersect_range(&tmp, offset, size)) {
break;
}
ret = cb(&tmp, arg);
@@ -346,7 +333,7 @@ static void virtio_mem_notify_unplug(VirtIOMEM *vmem, uint64_t offset,
QLIST_FOREACH(rdl, &vmem->rdl_list, next) {
MemoryRegionSection tmp = *rdl->section;
- if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) {
+ if (!memory_region_section_intersect_range(&tmp, offset, size)) {
continue;
}
rdl->notify_discard(rdl, &tmp);
@@ -362,7 +349,7 @@ static int virtio_mem_notify_plug(VirtIOMEM *vmem, uint64_t offset,
QLIST_FOREACH(rdl, &vmem->rdl_list, next) {
MemoryRegionSection tmp = *rdl->section;
- if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) {
+ if (!memory_region_section_intersect_range(&tmp, offset, size)) {
continue;
}
ret = rdl->notify_populate(rdl, &tmp);
@@ -379,7 +366,7 @@ static int virtio_mem_notify_plug(VirtIOMEM *vmem, uint64_t offset,
if (rdl2 == rdl) {
break;
}
- if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) {
+ if (!memory_region_section_intersect_range(&tmp, offset, size)) {
continue;
}
rdl2->notify_discard(rdl2, &tmp);
@@ -874,15 +861,16 @@ static uint64_t virtio_mem_get_features(VirtIODevice *vdev, uint64_t features,
MachineState *ms = MACHINE(qdev_get_machine());
VirtIOMEM *vmem = VIRTIO_MEM(vdev);
- if (ms->numa_state) {
-#if defined(CONFIG_ACPI)
+ if (ms->numa_state && acpi_builtin()) {
virtio_add_feature(&features, VIRTIO_MEM_F_ACPI_PXM);
-#endif
}
assert(vmem->unplugged_inaccessible != ON_OFF_AUTO_AUTO);
if (vmem->unplugged_inaccessible == ON_OFF_AUTO_ON) {
virtio_add_feature(&features, VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE);
}
+ if (qemu_wakeup_suspend_enabled()) {
+ virtio_add_feature(&features, VIRTIO_MEM_F_PERSISTENT_SUSPEND);
+ }
return features;
}
@@ -895,18 +883,6 @@ static int virtio_mem_validate_features(VirtIODevice *vdev)
return 0;
}
-static void virtio_mem_system_reset(void *opaque)
-{
- VirtIOMEM *vmem = VIRTIO_MEM(opaque);
-
- /*
- * During usual resets, we will unplug all memory and shrink the usable
- * region size. This is, however, not possible in all scenarios. Then,
- * the guest has to deal with this manually (VIRTIO_MEM_REQ_UNPLUG_ALL).
- */
- virtio_mem_unplug_all(vmem);
-}
-
static void virtio_mem_prepare_mr(VirtIOMEM *vmem)
{
const uint64_t region_size = memory_region_size(&vmem->memdev->mr);
@@ -958,6 +934,7 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp)
VirtIOMEM *vmem = VIRTIO_MEM(dev);
uint64_t page_size;
RAMBlock *rb;
+ Object *obj;
int ret;
if (!vmem->memdev) {
@@ -990,7 +967,7 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp)
return;
}
- if (enable_mlock) {
+ if (should_mlock(mlock_state)) {
error_setg(errp, "Incompatible with mlock");
return;
}
@@ -1071,6 +1048,17 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp)
}
/*
+ * Set ourselves as RamDiscardManager before the plug handler maps the
+ * memory region and exposes it via an address space.
+ */
+ if (memory_region_set_ram_discard_manager(&vmem->memdev->mr,
+ RAM_DISCARD_MANAGER(vmem))) {
+ error_setg(errp, "Failed to set RamDiscardManager");
+ ram_block_coordinated_discard_require(false);
+ return;
+ }
+
+ /*
* We don't know at this point whether shared RAM is migrated using
* QEMU or migrated using the file content. "x-ignore-shared" will be
* configured after realizing the device. So in case we have an
@@ -1084,6 +1072,7 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp)
ret = ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb));
if (ret) {
error_setg_errno(errp, -ret, "Unexpected error discarding RAM");
+ memory_region_set_ram_discard_manager(&vmem->memdev->mr, NULL);
ram_block_coordinated_discard_require(false);
return;
}
@@ -1123,14 +1112,28 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp)
vmstate_register_any(VMSTATE_IF(vmem),
&vmstate_virtio_mem_device_early, vmem);
}
- qemu_register_reset(virtio_mem_system_reset, vmem);
/*
- * Set ourselves as RamDiscardManager before the plug handler maps the
- * memory region and exposes it via an address space.
+ * We only want to unplug all memory to start with a clean slate when
+ * it is safe for the guest -- during system resets that call
+ * qemu_devices_reset().
+ *
+ * We'll filter out selected qemu_devices_reset() calls used for other
+ * purposes, like resetting all devices during wakeup from suspend on
+ * x86 based on the reset type passed to qemu_devices_reset().
+ *
+ * Unplugging all memory during simple device resets can result in the VM
+ * unexpectedly losing RAM, corrupting VM state.
+ *
+ * Simple device resets (or resets triggered by getting a parent device
+ * reset) must not change the state of plugged memory blocks. Therefore,
+ * we need a dedicated reset object that only gets called during
+ * qemu_devices_reset().
*/
- memory_region_set_ram_discard_manager(&vmem->memdev->mr,
- RAM_DISCARD_MANAGER(vmem));
+ obj = object_new(TYPE_VIRTIO_MEM_SYSTEM_RESET);
+ vmem->system_reset = VIRTIO_MEM_SYSTEM_RESET(obj);
+ vmem->system_reset->vmem = vmem;
+ qemu_register_resettable(obj);
}
static void virtio_mem_device_unrealize(DeviceState *dev)
@@ -1138,12 +1141,9 @@ static void virtio_mem_device_unrealize(DeviceState *dev)
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOMEM *vmem = VIRTIO_MEM(dev);
- /*
- * The unplug handler unmapped the memory region, it cannot be
- * found via an address space anymore. Unset ourselves.
- */
- memory_region_set_ram_discard_manager(&vmem->memdev->mr, NULL);
- qemu_unregister_reset(virtio_mem_system_reset, vmem);
+ qemu_unregister_resettable(OBJECT(vmem->system_reset));
+ object_unref(OBJECT(vmem->system_reset));
+
if (vmem->early_migration) {
vmstate_unregister(VMSTATE_IF(vmem), &vmstate_virtio_mem_device_early,
vmem);
@@ -1153,6 +1153,11 @@ static void virtio_mem_device_unrealize(DeviceState *dev)
virtio_del_queue(vdev, 0);
virtio_cleanup(vdev);
g_free(vmem->bitmap);
+ /*
+ * The unplug handler unmapped the memory region, it cannot be
+ * found via an address space anymore. Unset ourselves.
+ */
+ memory_region_set_ram_discard_manager(&vmem->memdev->mr, NULL);
ram_block_coordinated_discard_require(false);
}
@@ -1682,7 +1687,7 @@ static void virtio_mem_instance_finalize(Object *obj)
vmem->mr = NULL;
}
-static Property virtio_mem_properties[] = {
+static const Property virtio_mem_properties[] = {
DEFINE_PROP_UINT64(VIRTIO_MEM_ADDR_PROP, VirtIOMEM, addr, 0),
DEFINE_PROP_UINT32(VIRTIO_MEM_NODE_PROP, VirtIOMEM, node, 0),
DEFINE_PROP_BOOL(VIRTIO_MEM_PREALLOC_PROP, VirtIOMEM, prealloc, false),
@@ -1696,7 +1701,6 @@ static Property virtio_mem_properties[] = {
early_migration, true),
DEFINE_PROP_BOOL(VIRTIO_MEM_DYNAMIC_MEMSLOTS_PROP, VirtIOMEM,
dynamic_memslots, false),
- DEFINE_PROP_END_OF_LIST(),
};
static uint64_t virtio_mem_rdm_get_min_granularity(const RamDiscardManager *rdm,
@@ -1728,7 +1732,7 @@ static bool virtio_mem_rdm_is_populated(const RamDiscardManager *rdm,
}
struct VirtIOMEMReplayData {
- void *fn;
+ ReplayRamDiscardState fn;
void *opaque;
};
@@ -1736,12 +1740,12 @@ static int virtio_mem_rdm_replay_populated_cb(MemoryRegionSection *s, void *arg)
{
struct VirtIOMEMReplayData *data = arg;
- return ((ReplayRamPopulate)data->fn)(s, data->opaque);
+ return data->fn(s, data->opaque);
}
static int virtio_mem_rdm_replay_populated(const RamDiscardManager *rdm,
MemoryRegionSection *s,
- ReplayRamPopulate replay_fn,
+ ReplayRamDiscardState replay_fn,
void *opaque)
{
const VirtIOMEM *vmem = VIRTIO_MEM(rdm);
@@ -1760,14 +1764,13 @@ static int virtio_mem_rdm_replay_discarded_cb(MemoryRegionSection *s,
{
struct VirtIOMEMReplayData *data = arg;
- ((ReplayRamDiscard)data->fn)(s, data->opaque);
- return 0;
+ return data->fn(s, data->opaque);
}
-static void virtio_mem_rdm_replay_discarded(const RamDiscardManager *rdm,
- MemoryRegionSection *s,
- ReplayRamDiscard replay_fn,
- void *opaque)
+static int virtio_mem_rdm_replay_discarded(const RamDiscardManager *rdm,
+ MemoryRegionSection *s,
+ ReplayRamDiscardState replay_fn,
+ void *opaque)
{
const VirtIOMEM *vmem = VIRTIO_MEM(rdm);
struct VirtIOMEMReplayData data = {
@@ -1776,8 +1779,8 @@ static void virtio_mem_rdm_replay_discarded(const RamDiscardManager *rdm,
};
g_assert(s->mr == &vmem->memdev->mr);
- virtio_mem_for_each_unplugged_section(vmem, s, &data,
- virtio_mem_rdm_replay_discarded_cb);
+ return virtio_mem_for_each_unplugged_section(vmem, s, &data,
+ virtio_mem_rdm_replay_discarded_cb);
}
static void virtio_mem_rdm_register_listener(RamDiscardManager *rdm,
@@ -1843,7 +1846,7 @@ static void virtio_mem_unplug_request_check(VirtIOMEM *vmem, Error **errp)
}
}
-static void virtio_mem_class_init(ObjectClass *klass, void *data)
+static void virtio_mem_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
@@ -1885,7 +1888,7 @@ static const TypeInfo virtio_mem_info = {
.instance_finalize = virtio_mem_instance_finalize,
.class_init = virtio_mem_class_init,
.class_size = sizeof(VirtIOMEMClass),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_RAM_DISCARD_MANAGER },
{ }
},
@@ -1897,3 +1900,49 @@ static void virtio_register_types(void)
}
type_init(virtio_register_types)
+
+OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(VirtioMemSystemReset, virtio_mem_system_reset, VIRTIO_MEM_SYSTEM_RESET, OBJECT, { TYPE_RESETTABLE_INTERFACE }, { })
+
+static void virtio_mem_system_reset_init(Object *obj)
+{
+}
+
+static void virtio_mem_system_reset_finalize(Object *obj)
+{
+}
+
+static ResettableState *virtio_mem_system_reset_get_state(Object *obj)
+{
+ VirtioMemSystemReset *vmem_reset = VIRTIO_MEM_SYSTEM_RESET(obj);
+
+ return &vmem_reset->reset_state;
+}
+
+static void virtio_mem_system_reset_hold(Object *obj, ResetType type)
+{
+ VirtioMemSystemReset *vmem_reset = VIRTIO_MEM_SYSTEM_RESET(obj);
+ VirtIOMEM *vmem = vmem_reset->vmem;
+
+ /*
+ * When waking up from standby/suspend-to-ram, do not unplug any memory.
+ */
+ if (type == RESET_TYPE_WAKEUP) {
+ return;
+ }
+
+ /*
+ * During usual resets, we will unplug all memory and shrink the usable
+ * region size. This is, however, not possible in all scenarios. Then,
+ * the guest has to deal with this manually (VIRTIO_MEM_REQ_UNPLUG_ALL).
+ */
+ virtio_mem_unplug_all(vmem);
+}
+
+static void virtio_mem_system_reset_class_init(ObjectClass *klass,
+ const void *data)
+{
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ rc->get_state = virtio_mem_system_reset_get_state;
+ rc->phases.hold = virtio_mem_system_reset_hold;
+}
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index 320428a..532c671 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -28,8 +28,8 @@
#include "migration/qemu-file-types.h"
#include "qemu/host-utils.h"
#include "qemu/module.h"
-#include "sysemu/kvm.h"
-#include "sysemu/replay.h"
+#include "system/kvm.h"
+#include "system/replay.h"
#include "hw/virtio/virtio-mmio.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
@@ -751,13 +751,12 @@ static void virtio_mmio_pre_plugged(DeviceState *d, Error **errp)
/* virtio-mmio device */
-static Property virtio_mmio_properties[] = {
+static const Property virtio_mmio_properties[] = {
DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy,
format_transport_address, true),
DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy, legacy, true),
DEFINE_PROP_BIT("ioeventfd", VirtIOMMIOProxy, flags,
VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT, true),
- DEFINE_PROP_END_OF_LIST(),
};
static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
@@ -785,12 +784,12 @@ static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
sysbus_init_mmio(sbd, &proxy->iomem);
}
-static void virtio_mmio_class_init(ObjectClass *klass, void *data)
+static void virtio_mmio_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = virtio_mmio_realizefn;
- dc->reset = virtio_mmio_reset;
+ device_class_set_legacy_reset(dc, virtio_mmio_reset);
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
device_class_set_props(dc, virtio_mmio_properties);
}
@@ -856,7 +855,7 @@ static void virtio_mmio_vmstate_change(DeviceState *d, bool running)
}
}
-static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
+static void virtio_mmio_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *bus_class = BUS_CLASS(klass);
VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
diff --git a/hw/virtio/virtio-net-pci.c b/hw/virtio/virtio-net-pci.c
index e03543a..f857a84 100644
--- a/hw/virtio/virtio-net-pci.c
+++ b/hw/virtio/virtio-net-pci.c
@@ -38,12 +38,11 @@ struct VirtIONetPCI {
VirtIONet vdev;
};
-static Property virtio_net_properties[] = {
+static const Property virtio_net_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
DEV_NVECTORS_UNSPECIFIED),
- DEFINE_PROP_END_OF_LIST(),
};
static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -64,7 +63,7 @@ static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void virtio_net_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_net_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -75,6 +74,7 @@ static void virtio_net_pci_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
k->revision = VIRTIO_PCI_ABI_VERSION;
k->class_id = PCI_CLASS_NETWORK_ETHERNET;
+ k->sriov_vf_user_creatable = true;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
device_class_set_props(dc, virtio_net_properties);
vpciklass->realize = virtio_net_pci_realize;
diff --git a/hw/virtio/virtio-nsm-pci.c b/hw/virtio/virtio-nsm-pci.c
new file mode 100644
index 0000000..ec24396
--- /dev/null
+++ b/hw/virtio/virtio-nsm-pci.c
@@ -0,0 +1,73 @@
+/*
+ * AWS Nitro Secure Module (NSM) device
+ *
+ * Copyright (c) 2024 Dorjoy Chowdhury <dorjoychy111@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include "qemu/osdep.h"
+
+#include "hw/virtio/virtio-pci.h"
+#include "hw/virtio/virtio-nsm.h"
+#include "hw/qdev-properties.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "qom/object.h"
+
+typedef struct VirtIONsmPCI VirtIONsmPCI;
+
+#define TYPE_VIRTIO_NSM_PCI "virtio-nsm-pci-base"
+DECLARE_INSTANCE_CHECKER(VirtIONsmPCI, VIRTIO_NSM_PCI,
+ TYPE_VIRTIO_NSM_PCI)
+
+struct VirtIONsmPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIONSM vdev;
+};
+
+static void virtio_nsm_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VirtIONsmPCI *vnsm = VIRTIO_NSM_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&vnsm->vdev);
+
+ virtio_pci_force_virtio_1(vpci_dev);
+
+ if (!qdev_realize(vdev, BUS(&vpci_dev->bus), errp)) {
+ return;
+ }
+}
+
+static void virtio_nsm_pci_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+
+ k->realize = virtio_nsm_pci_realize;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static void virtio_nsm_initfn(Object *obj)
+{
+ VirtIONsmPCI *dev = VIRTIO_NSM_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_NSM);
+}
+
+static const VirtioPCIDeviceTypeInfo virtio_nsm_pci_info = {
+ .base_name = TYPE_VIRTIO_NSM_PCI,
+ .generic_name = "virtio-nsm-pci",
+ .instance_size = sizeof(VirtIONsmPCI),
+ .instance_init = virtio_nsm_initfn,
+ .class_init = virtio_nsm_pci_class_init,
+};
+
+static void virtio_nsm_pci_register(void)
+{
+ virtio_pci_types_register(&virtio_nsm_pci_info);
+}
+
+type_init(virtio_nsm_pci_register)
diff --git a/hw/virtio/virtio-nsm.c b/hw/virtio/virtio-nsm.c
new file mode 100644
index 0000000..3bf5e70
--- /dev/null
+++ b/hw/virtio/virtio-nsm.c
@@ -0,0 +1,1737 @@
+/*
+ * AWS Nitro Secure Module (NSM) device
+ *
+ * Copyright (c) 2024 Dorjoy Chowdhury <dorjoychy111@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/iov.h"
+#include "qemu/guest-random.h"
+#include "qapi/error.h"
+
+#include "crypto/hash.h"
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-nsm.h"
+#include "hw/virtio/cbor-helpers.h"
+#include "standard-headers/linux/virtio_ids.h"
+
+#define NSM_REQUEST_MAX_SIZE 0x1000
+#define NSM_RESPONSE_BUF_SIZE 0x3000
+#define NSM_RND_BUF_SIZE 256
+
+enum NSMResponseTypes {
+ NSM_SUCCESS = 0,
+ NSM_INVALID_ARGUMENT = 1,
+ NSM_INVALID_INDEX = 2,
+ NSM_READONLY_INDEX = 3,
+ NSM_INVALID_OPERATION = 4,
+ NSM_BUFFER_TOO_SMALL = 5,
+ NSM_INPUT_TOO_LARGE = 6,
+ NSM_INTERNAL_ERROR = 7,
+};
+
+static const char *error_string(enum NSMResponseTypes type)
+{
+ const char *str;
+ switch (type) {
+ case NSM_INVALID_ARGUMENT:
+ str = "InvalidArgument";
+ break;
+ case NSM_INVALID_INDEX:
+ str = "InvalidIndex";
+ break;
+ case NSM_READONLY_INDEX:
+ str = "ReadOnlyIndex";
+ break;
+ case NSM_INVALID_OPERATION:
+ str = "InvalidOperation";
+ break;
+ case NSM_BUFFER_TOO_SMALL:
+ str = "BufferTooSmall";
+ break;
+ case NSM_INPUT_TOO_LARGE:
+ str = "InputTooLarge";
+ break;
+ default:
+ str = "InternalError";
+ break;
+ }
+
+ return str;
+}
+
+/*
+ * Error response structure:
+ *
+ * {
+ * Map(1) {
+ * key = String("Error"),
+ * value = String(error_name)
+ * }
+ * }
+ *
+ * where error_name can be one of the following:
+ * InvalidArgument
+ * InvalidIndex
+ * InvalidResponse
+ * ReadOnlyIndex
+ * InvalidOperation
+ * BufferTooSmall
+ * InputTooLarge
+ * InternalError
+ */
+
+static bool error_response(struct iovec *response, enum NSMResponseTypes error,
+ Error **errp)
+{
+ cbor_item_t *root;
+ size_t len;
+ bool r = false;
+
+ root = cbor_new_definite_map(1);
+ if (!root) {
+ goto err;
+ }
+
+ if (!qemu_cbor_add_string_to_map(root, "Error", error_string(error))) {
+ goto err;
+ }
+
+ len = cbor_serialize(root, response->iov_base, response->iov_len);
+ if (len == 0) {
+ error_setg(errp, "Response buffer is small for %s response",
+ error_string(error));
+ goto out;
+ }
+ response->iov_len = len;
+ r = true;
+
+ out:
+ if (root) {
+ cbor_decref(&root);
+ }
+ return r;
+
+ err:
+ error_setg(errp, "Failed to initialize %s response", error_string(error));
+ goto out;
+}
+
+/*
+ * GetRandom response structure:
+ *
+ * {
+ * Map(1) {
+ * key = String("GetRandom"),
+ * value = Map(1) {
+ * key = String("random"),
+ * value = Byte_String()
+ * }
+ * }
+ * }
+ */
+static bool handle_get_random(VirtIONSM *vnsm, struct iovec *request,
+ struct iovec *response, Error **errp)
+{
+ cbor_item_t *root, *nested_map;
+ size_t len;
+ uint8_t rnd[NSM_RND_BUF_SIZE];
+ bool r = false;
+
+ root = cbor_new_definite_map(1);
+ if (!root) {
+ goto err;
+ }
+
+ if (!qemu_cbor_add_map_to_map(root, "GetRandom", 1, &nested_map)) {
+ goto err;
+ }
+
+ qemu_guest_getrandom_nofail(rnd, NSM_RND_BUF_SIZE);
+
+ if (!qemu_cbor_add_bytestring_to_map(nested_map, "random", rnd,
+ NSM_RND_BUF_SIZE)) {
+ goto err;
+ }
+
+ len = cbor_serialize(root, response->iov_base, response->iov_len);
+ if (len == 0) {
+ if (error_response(response, NSM_INPUT_TOO_LARGE, errp)) {
+ r = true;
+ }
+ goto out;
+ }
+
+ response->iov_len = len;
+ r = true;
+
+ out:
+ if (root) {
+ cbor_decref(&root);
+ }
+ return r;
+
+ err:
+ error_setg(errp, "Failed to initialize GetRandom response");
+ goto out;
+}
+
+/*
+ * DescribeNSM response structure:
+ *
+ * {
+ * Map(1) {
+ * key = String("DescribeNSM"),
+ * value = Map(7) {
+ * key = String("digest"),
+ * value = String("SHA384"),
+ * key = String("max_pcrs"),
+ * value = Uint8(32),
+ * key = String("module_id"),
+ * value = String("i-1234-enc5678"),
+ * key = String("locked_pcrs"),
+ * value = Array<Uint8>(),
+ * key = String("version_major"),
+ * value = Uint8(1),
+ * key = String("version_minor"),
+ * value = Uint8(0),
+ * key = String("version_patch"),
+ * value = Uint8(0)
+ * }
+ * }
+ * }
+ */
+static bool handle_describe_nsm(VirtIONSM *vnsm, struct iovec *request,
+ struct iovec *response, Error **errp)
+{
+ cbor_item_t *root, *nested_map;
+ uint16_t locked_pcrs_cnt = 0;
+ uint8_t locked_pcrs_ind[NSM_MAX_PCRS];
+ size_t len;
+ bool r = false;
+
+ root = cbor_new_definite_map(1);
+ if (!root) {
+ goto err;
+ }
+
+ if (!qemu_cbor_add_map_to_map(root, "DescribeNSM", 7, &nested_map)) {
+ goto err;
+ }
+
+ if (!qemu_cbor_add_string_to_map(nested_map, "digest", vnsm->digest)) {
+ goto err;
+ }
+
+ if (!qemu_cbor_add_uint8_to_map(nested_map, "max_pcrs", vnsm->max_pcrs)) {
+ goto err;
+ }
+
+ if (!qemu_cbor_add_string_to_map(nested_map, "module_id",
+ vnsm->module_id)) {
+ goto err;
+ }
+
+ for (uint8_t i = 0; i < NSM_MAX_PCRS; ++i) {
+ if (vnsm->pcrs[i].locked) {
+ locked_pcrs_ind[locked_pcrs_cnt++] = i;
+ }
+ }
+ if (!qemu_cbor_add_uint8_array_to_map(nested_map, "locked_pcrs",
+ locked_pcrs_ind, locked_pcrs_cnt)) {
+ goto err;
+ }
+
+ if (!qemu_cbor_add_uint8_to_map(nested_map, "version_major",
+ vnsm->version_major)) {
+ goto err;
+ }
+
+ if (!qemu_cbor_add_uint8_to_map(nested_map, "version_minor",
+ vnsm->version_minor)) {
+ goto err;
+ }
+
+ if (!qemu_cbor_add_uint8_to_map(nested_map, "version_patch",
+ vnsm->version_patch)) {
+ goto err;
+ }
+
+ len = cbor_serialize(root, response->iov_base, response->iov_len);
+ if (len == 0) {
+ if (error_response(response, NSM_INPUT_TOO_LARGE, errp)) {
+ r = true;
+ }
+ goto out;
+ }
+
+ response->iov_len = len;
+ r = true;
+
+ out:
+ if (root) {
+ cbor_decref(&root);
+ }
+ return r;
+
+ err:
+ error_setg(errp, "Failed to initialize DescribeNSM response");
+ goto out;
+}
+
+/*
+ * DescribePCR request structure:
+ *
+ * {
+ * Map(1) {
+ * key = String("DescribePCR"),
+ * value = Map(1) {
+ * key = String("index"),
+ * value = Uint8(pcr)
+ * }
+ * }
+ * }
+ */
+typedef struct NSMDescribePCRReq {
+ uint8_t index;
+} NSMDescribePCRReq;
+
+static enum NSMResponseTypes get_nsm_describe_pcr_req(
+ uint8_t *req, size_t len,
+ NSMDescribePCRReq *nsm_req)
+{
+ size_t size;
+ uint8_t *str;
+ struct cbor_pair *pair;
+ cbor_item_t *item = NULL;
+ struct cbor_load_result result;
+ enum NSMResponseTypes r = NSM_INVALID_OPERATION;
+
+ item = cbor_load(req, len, &result);
+ if (!item || result.error.code != CBOR_ERR_NONE) {
+ goto cleanup;
+ }
+
+ pair = cbor_map_handle(item);
+ if (!cbor_isa_map(pair->value)) {
+ goto cleanup;
+ }
+ size = cbor_map_size(pair->value);
+ if (size < 1) {
+ goto cleanup;
+ }
+
+ pair = cbor_map_handle(pair->value);
+ for (int i = 0; i < size; ++i) {
+ if (!cbor_isa_string(pair[i].key)) {
+ continue;
+ }
+
+ str = cbor_string_handle(pair[i].key);
+ if (str && cbor_string_length(pair[i].key) == 5 &&
+ memcmp(str, "index", 5) == 0) {
+ if (!cbor_isa_uint(pair[i].value) ||
+ cbor_int_get_width(pair[i].value) != CBOR_INT_8) {
+ break;
+ }
+
+ nsm_req->index = cbor_get_uint8(pair[i].value);
+ r = NSM_SUCCESS;
+ break;
+ }
+ }
+
+ cleanup:
+ if (item) {
+ cbor_decref(&item);
+ }
+ return r;
+}
+
+/*
+ * DescribePCR response structure:
+ *
+ * {
+ * Map(1) {
+ * key = String("DescribePCR"),
+ * value = Map(2) {
+ * key = String("data"),
+ * value = Byte_String(),
+ * key = String("lock"),
+ * value = Bool()
+ * }
+ * }
+ * }
+ */
+static bool handle_describe_pcr(VirtIONSM *vnsm, struct iovec *request,
+ struct iovec *response, Error **errp)
+{
+ cbor_item_t *root = NULL;
+ cbor_item_t *nested_map;
+ size_t len;
+ NSMDescribePCRReq nsm_req;
+ enum NSMResponseTypes type;
+ struct PCRInfo *pcr;
+ bool r = false;
+
+ type = get_nsm_describe_pcr_req(request->iov_base, request->iov_len,
+ &nsm_req);
+ if (type != NSM_SUCCESS) {
+ if (error_response(response, type, errp)) {
+ r = true;
+ }
+ goto out;
+ }
+ if (nsm_req.index >= vnsm->max_pcrs) {
+ if (error_response(response, NSM_INVALID_INDEX, errp)) {
+ r = true;
+ }
+ goto out;
+ }
+ pcr = &(vnsm->pcrs[nsm_req.index]);
+
+ root = cbor_new_definite_map(1);
+ if (!root) {
+ goto err;
+ }
+
+ if (!qemu_cbor_add_map_to_map(root, "DescribePCR", 2, &nested_map)) {
+ goto err;
+ }
+
+ if (!qemu_cbor_add_bytestring_to_map(nested_map, "data", pcr->data,
+ QCRYPTO_HASH_DIGEST_LEN_SHA384)) {
+ goto err;
+ }
+
+ if (!qemu_cbor_add_bool_to_map(nested_map, "lock", pcr->locked)) {
+ goto err;
+ }
+
+ len = cbor_serialize(root, response->iov_base, response->iov_len);
+ if (len == 0) {
+ if (error_response(response, NSM_INPUT_TOO_LARGE, errp)) {
+ r = true;
+ }
+ goto out;
+ }
+
+ response->iov_len = len;
+ r = true;
+
+ out:
+ if (root) {
+ cbor_decref(&root);
+ }
+ return r;
+
+ err:
+ error_setg(errp, "Failed to initialize DescribePCR response");
+ goto out;
+}
+
+/*
+ * ExtendPCR request structure:
+ *
+ * {
+ * Map(1) {
+ * key = String("ExtendPCR"),
+ * value = Map(2) {
+ * key = String("index"),
+ * value = Uint8(pcr),
+ * key = String("data"),
+ * value = Byte_String(data) || String(data),
+ * }
+ * }
+ * }
+ */
+typedef struct NSMExtendPCRReq {
+ uint8_t index;
+ uint16_t data_len;
+ uint8_t data[NSM_REQUEST_MAX_SIZE];
+} NSMExtendPCRReq;
+
+static enum NSMResponseTypes get_nsm_extend_pcr_req(uint8_t *req, size_t len,
+ NSMExtendPCRReq *nsm_req)
+{
+ cbor_item_t *item = NULL;
+ size_t size ;
+ uint8_t *str;
+ bool index_found = false;
+ bool data_found = false;
+ struct cbor_pair *pair;
+ struct cbor_load_result result;
+ enum NSMResponseTypes r = NSM_INVALID_OPERATION;
+
+ item = cbor_load(req, len, &result);
+ if (!item || result.error.code != CBOR_ERR_NONE) {
+ goto cleanup;
+ }
+
+ pair = cbor_map_handle(item);
+ if (!cbor_isa_map(pair->value)) {
+ goto cleanup;
+ }
+ size = cbor_map_size(pair->value);
+ if (size < 2) {
+ goto cleanup;
+ }
+
+ pair = cbor_map_handle(pair->value);
+ for (int i = 0; i < size; ++i) {
+ if (!cbor_isa_string(pair[i].key)) {
+ continue;
+ }
+ str = cbor_string_handle(pair[i].key);
+ if (!str) {
+ continue;
+ }
+
+ if (cbor_string_length(pair[i].key) == 5 &&
+ memcmp(str, "index", 5) == 0) {
+ if (!cbor_isa_uint(pair[i].value) ||
+ cbor_int_get_width(pair[i].value) != CBOR_INT_8) {
+ goto cleanup;
+ }
+ nsm_req->index = cbor_get_uint8(pair[i].value);
+ index_found = true;
+ continue;
+ }
+
+ if (cbor_string_length(pair[i].key) == 4 &&
+ memcmp(str, "data", 4) == 0) {
+ if (cbor_isa_bytestring(pair[i].value)) {
+ str = cbor_bytestring_handle(pair[i].value);
+ if (!str) {
+ goto cleanup;
+ }
+ nsm_req->data_len = cbor_bytestring_length(pair[i].value);
+ } else if (cbor_isa_string(pair[i].value)) {
+ str = cbor_string_handle(pair[i].value);
+ if (!str) {
+ goto cleanup;
+ }
+ nsm_req->data_len = cbor_string_length(pair[i].value);
+ } else {
+ goto cleanup;
+ }
+ /*
+ * nsm_req->data_len will be smaller than NSM_REQUEST_MAX_SIZE as
+ * we already check for the max request size before processing
+ * any request. So it's safe to copy.
+ */
+ memcpy(nsm_req->data, str, nsm_req->data_len);
+ data_found = true;
+ continue;
+ }
+ }
+
+ if (index_found && data_found) {
+ r = NSM_SUCCESS;
+ }
+
+ cleanup:
+ if (item) {
+ cbor_decref(&item);
+ }
+ return r;
+}
+
+/*
+ * ExtendPCR response structure:
+ *
+ * {
+ * Map(1) {
+ * key = String("ExtendPCR"),
+ * value = Map(1) {
+ * key = String("data"),
+ * value = Byte_String()
+ * }
+ * }
+ * }
+ */
+static bool handle_extend_pcr(VirtIONSM *vnsm, struct iovec *request,
+ struct iovec *response, Error **errp)
+{
+ cbor_item_t *root = NULL;
+ cbor_item_t *nested_map;
+ size_t len;
+ struct PCRInfo *pcr;
+ enum NSMResponseTypes type;
+ bool r = false;
+ g_autofree NSMExtendPCRReq *nsm_req = g_malloc(sizeof(NSMExtendPCRReq));
+
+ type = get_nsm_extend_pcr_req(request->iov_base, request->iov_len,
+ nsm_req);
+ if (type != NSM_SUCCESS) {
+ if (error_response(response, type, errp)) {
+ r = true;
+ }
+ goto out;
+ }
+ if (nsm_req->index >= vnsm->max_pcrs) {
+ if (error_response(response, NSM_INVALID_INDEX, errp)) {
+ r = true;
+ }
+ goto out;
+ }
+
+ pcr = &(vnsm->pcrs[nsm_req->index]);
+
+ if (pcr->locked) {
+ if (error_response(response, NSM_READONLY_INDEX, errp)) {
+ r = true;
+ }
+ goto out;
+ }
+
+ if (!vnsm->extend_pcr(vnsm, nsm_req->index, nsm_req->data,
+ nsm_req->data_len)) {
+ if (error_response(response, NSM_INTERNAL_ERROR, errp)) {
+ r = true;
+ }
+ goto out;
+ }
+
+ root = cbor_new_definite_map(1);
+ if (!root) {
+ goto err;
+ }
+
+ if (!qemu_cbor_add_map_to_map(root, "ExtendPCR", 1, &nested_map)) {
+ goto err;
+ }
+
+ if (!qemu_cbor_add_bytestring_to_map(nested_map, "data", pcr->data,
+ QCRYPTO_HASH_DIGEST_LEN_SHA384)) {
+ goto err;
+ }
+
+ len = cbor_serialize(root, response->iov_base, response->iov_len);
+ if (len == 0) {
+ if (error_response(response, NSM_BUFFER_TOO_SMALL, errp)) {
+ r = true;
+ }
+ goto out;
+ }
+
+ response->iov_len = len;
+ r = true;
+
+ out:
+ if (root) {
+ cbor_decref(&root);
+ }
+ return r;
+
+ err:
+ error_setg(errp, "Failed to initialize DescribePCR response");
+ goto out;
+}
+
+/*
+ * LockPCR request structure:
+ *
+ * {
+ * Map(1) {
+ * key = String("LockPCR"),
+ * value = Map(1) {
+ * key = String("index"),
+ * value = Uint8(pcr)
+ * }
+ * }
+ * }
+ */
+typedef struct NSMLockPCRReq {
+ uint8_t index;
+} NSMLockPCRReq;
+
+static enum NSMResponseTypes get_nsm_lock_pcr_req(uint8_t *req, size_t len,
+ NSMLockPCRReq *nsm_req)
+{
+ cbor_item_t *item = NULL;
+ size_t size;
+ uint8_t *str;
+ struct cbor_pair *pair;
+ struct cbor_load_result result;
+ enum NSMResponseTypes r = NSM_INVALID_OPERATION;
+
+ item = cbor_load(req, len, &result);
+ if (!item || result.error.code != CBOR_ERR_NONE) {
+ goto cleanup;
+ }
+
+ pair = cbor_map_handle(item);
+ if (!cbor_isa_map(pair->value)) {
+ goto cleanup;
+ }
+ size = cbor_map_size(pair->value);
+ if (size < 1) {
+ goto cleanup;
+ }
+
+ pair = cbor_map_handle(pair->value);
+ for (int i = 0; i < size; ++i) {
+ if (!cbor_isa_string(pair[i].key)) {
+ continue;
+ }
+ str = cbor_string_handle(pair[i].key);
+ if (str && cbor_string_length(pair[i].key) == 5 &&
+ memcmp(str, "index", 5) == 0) {
+ if (!cbor_isa_uint(pair[i].value) ||
+ cbor_int_get_width(pair[i].value) != CBOR_INT_8) {
+ break;
+ }
+
+ nsm_req->index = cbor_get_uint8(pair[i].value);
+ r = NSM_SUCCESS;
+ break;
+ }
+ }
+
+ cleanup:
+ if (item) {
+ cbor_decref(&item);
+ }
+ return r;
+}
+
+/*
+ * LockPCR success response structure:
+ * {
+ * String("LockPCR")
+ * }
+ */
+static bool handle_lock_pcr(VirtIONSM *vnsm, struct iovec *request,
+ struct iovec *response, Error **errp)
+{
+ cbor_item_t *root = NULL;
+ size_t len;
+ NSMLockPCRReq nsm_req;
+ enum NSMResponseTypes type;
+ struct PCRInfo *pcr;
+ bool r = false;
+
+ type = get_nsm_lock_pcr_req(request->iov_base, request->iov_len, &nsm_req);
+ if (type != NSM_SUCCESS) {
+ if (error_response(response, type, errp)) {
+ r = true;
+ }
+ goto cleanup;
+ }
+ if (nsm_req.index >= vnsm->max_pcrs) {
+ if (error_response(response, NSM_INVALID_INDEX, errp)) {
+ r = true;
+ }
+ goto cleanup;
+ }
+
+ pcr = &(vnsm->pcrs[nsm_req.index]);
+
+ if (pcr->locked) {
+ if (error_response(response, NSM_READONLY_INDEX, errp)) {
+ r = true;
+ }
+ goto cleanup;
+ }
+
+ pcr->locked = true;
+
+ root = cbor_build_string("LockPCR");
+ if (!root) {
+ goto err;
+ }
+
+ len = cbor_serialize(root, response->iov_base, response->iov_len);
+ if (len == 0) {
+ if (error_response(response, NSM_BUFFER_TOO_SMALL, errp)) {
+ r = true;
+ }
+ goto cleanup;
+ }
+
+ response->iov_len = len;
+ r = true;
+ goto cleanup;
+
+ err:
+ error_setg(errp, "Failed to initialize LockPCR response");
+
+ cleanup:
+ if (root) {
+ cbor_decref(&root);
+ }
+ return r;
+}
+
+/*
+ * LockPCRs request structure:
+ *
+ * {
+ * Map(1) {
+ * key = String("LockPCRs"),
+ * value = Map(1) {
+ * key = String("range"),
+ * value = Uint8(pcr)
+ * }
+ * }
+ * }
+ */
+typedef struct NSMLockPCRsReq {
+ uint16_t range;
+} NSMLockPCRsReq;
+
+static enum NSMResponseTypes get_nsm_lock_pcrs_req(uint8_t *req, size_t len,
+ NSMLockPCRsReq *nsm_req)
+{
+ cbor_item_t *item = NULL;
+ size_t size;
+ uint8_t *str;
+ struct cbor_pair *pair;
+ struct cbor_load_result result;
+ enum NSMResponseTypes r = NSM_INVALID_OPERATION;
+
+ item = cbor_load(req, len, &result);
+ if (!item || result.error.code != CBOR_ERR_NONE) {
+ goto cleanup;
+ }
+
+ pair = cbor_map_handle(item);
+ if (!cbor_isa_map(pair->value)) {
+ goto cleanup;
+ }
+ size = cbor_map_size(pair->value);
+ if (size < 1) {
+ goto cleanup;
+ }
+
+ pair = cbor_map_handle(pair->value);
+ for (int i = 0; i < size; ++i) {
+ if (!cbor_isa_string(pair[i].key)) {
+ continue;
+ }
+ str = cbor_string_handle(pair[i].key);
+ if (str && cbor_string_length(pair[i].key) == 5 &&
+ memcmp(str, "range", 5) == 0) {
+ if (!cbor_isa_uint(pair[i].value) ||
+ cbor_int_get_width(pair[i].value) != CBOR_INT_8) {
+ break;
+ }
+
+ nsm_req->range = cbor_get_uint8(pair[i].value);
+ r = NSM_SUCCESS;
+ break;
+ }
+ }
+
+ cleanup:
+ if (item) {
+ cbor_decref(&item);
+ }
+ return r;
+}
+
+/*
+ * LockPCRs success response structure:
+ * {
+ * String("LockPCRs")
+ * }
+ */
+static bool handle_lock_pcrs(VirtIONSM *vnsm, struct iovec *request,
+ struct iovec *response, Error **errp)
+{
+ cbor_item_t *root = NULL;
+ size_t len;
+ NSMLockPCRsReq nsm_req;
+ enum NSMResponseTypes type;
+ bool r = false;
+
+ type = get_nsm_lock_pcrs_req(request->iov_base, request->iov_len, &nsm_req);
+ if (type != NSM_SUCCESS) {
+ if (error_response(response, type, errp)) {
+ r = true;
+ }
+ goto cleanup;
+ }
+ if (nsm_req.range > vnsm->max_pcrs) {
+ if (error_response(response, NSM_INVALID_INDEX, errp)) {
+ r = true;
+ }
+ goto cleanup;
+ }
+
+ for (int i = 0; i < nsm_req.range; ++i) {
+ vnsm->pcrs[i].locked = true;
+ }
+
+ root = cbor_build_string("LockPCRs");
+ if (!root) {
+ goto err;
+ }
+
+ len = cbor_serialize(root, response->iov_base, response->iov_len);
+ if (len == 0) {
+ if (error_response(response, NSM_BUFFER_TOO_SMALL, errp)) {
+ r = true;
+ }
+ goto cleanup;
+ }
+
+ response->iov_len = len;
+ r = true;
+ goto cleanup;
+
+ err:
+ error_setg(errp, "Failed to initialize response");
+
+ cleanup:
+ if (root) {
+ cbor_decref(&root);
+ }
+ return r;
+}
+
+/*
+ * Attestation request structure:
+ *
+ * Map(1) {
+ * key = String("Attestation"),
+ * value = Map(3) {
+ * key = String("user_data"),
+ * value = Byte_String() || null, // Optional
+ * key = String("nonce"),
+ * value = Byte_String() || null, // Optional
+ * key = String("public_key"),
+ * value = Byte_String() || null, // Optional
+ * }
+ * }
+ * }
+ */
+
+struct AttestationProperty {
+ bool is_null; /* True if property is not present in map or is null */
+ uint16_t len;
+ uint8_t buf[NSM_REQUEST_MAX_SIZE];
+};
+
+typedef struct NSMAttestationReq {
+ struct AttestationProperty public_key;
+ struct AttestationProperty user_data;
+ struct AttestationProperty nonce;
+} NSMAttestationReq;
+
+static bool fill_attestation_property(struct AttestationProperty *prop,
+ cbor_item_t *value)
+{
+ uint8_t *str;
+ bool ret = false;
+
+ if (cbor_is_null(value)) {
+ prop->is_null = true;
+ ret = true;
+ goto out;
+ } else if (cbor_isa_bytestring(value)) {
+ str = cbor_bytestring_handle(value);
+ if (!str) {
+ goto out;
+ }
+ prop->len = cbor_bytestring_length(value);
+ } else if (cbor_isa_string(value)) {
+ str = cbor_string_handle(value);
+ if (!str) {
+ goto out;
+ }
+ prop->len = cbor_string_length(value);
+ } else {
+ goto out;
+ }
+
+ /*
+ * prop->len will be smaller than NSM_REQUEST_MAX_SIZE as we
+ * already check for the max request size before processing
+ * any request. So it's safe to copy.
+ */
+ memcpy(prop->buf, str, prop->len);
+ prop->is_null = false;
+ ret = true;
+
+ out:
+ return ret;
+}
+
+static enum NSMResponseTypes get_nsm_attestation_req(uint8_t *req, size_t len,
+ NSMAttestationReq *nsm_req)
+{
+ cbor_item_t *item = NULL;
+ size_t size;
+ uint8_t *str;
+ struct cbor_pair *pair;
+ struct cbor_load_result result;
+ enum NSMResponseTypes r = NSM_INVALID_OPERATION;
+
+ nsm_req->public_key.is_null = true;
+ nsm_req->user_data.is_null = true;
+ nsm_req->nonce.is_null = true;
+
+ item = cbor_load(req, len, &result);
+ if (!item || result.error.code != CBOR_ERR_NONE) {
+ goto cleanup;
+ }
+
+ pair = cbor_map_handle(item);
+ if (!cbor_isa_map(pair->value)) {
+ goto cleanup;
+ }
+ size = cbor_map_size(pair->value);
+ if (size == 0) {
+ r = NSM_SUCCESS;
+ goto cleanup;
+ }
+
+ pair = cbor_map_handle(pair->value);
+ for (int i = 0; i < size; ++i) {
+ if (!cbor_isa_string(pair[i].key)) {
+ continue;
+ }
+
+ str = cbor_string_handle(pair[i].key);
+ if (!str) {
+ continue;
+ }
+
+ if (cbor_string_length(pair[i].key) == 10 &&
+ memcmp(str, "public_key", 10) == 0) {
+ if (!fill_attestation_property(&(nsm_req->public_key),
+ pair[i].value)) {
+ goto cleanup;
+ }
+ continue;
+ }
+
+ if (cbor_string_length(pair[i].key) == 9 &&
+ memcmp(str, "user_data", 9) == 0) {
+ if (!fill_attestation_property(&(nsm_req->user_data),
+ pair[i].value)) {
+ goto cleanup;
+ }
+ continue;
+ }
+
+ if (cbor_string_length(pair[i].key) == 5 &&
+ memcmp(str, "nonce", 5) == 0) {
+ if (!fill_attestation_property(&(nsm_req->nonce), pair[i].value)) {
+ goto cleanup;
+ }
+ continue;
+ }
+ }
+
+ r = NSM_SUCCESS;
+
+ cleanup:
+ if (item) {
+ cbor_decref(&item);
+ }
+ return r;
+}
+
+static bool add_protected_header_to_cose(cbor_item_t *cose)
+{
+ cbor_item_t *map = NULL;
+ cbor_item_t *key = NULL;
+ cbor_item_t *value = NULL;
+ cbor_item_t *bs = NULL;
+ size_t len;
+ bool r = false;
+ size_t buf_len = 4096;
+ g_autofree uint8_t *buf = g_malloc(buf_len);
+
+ map = cbor_new_definite_map(1);
+ if (!map) {
+ goto cleanup;
+ }
+ key = cbor_build_uint8(1);
+ if (!key) {
+ goto cleanup;
+ }
+ value = cbor_new_int8();
+ if (!value) {
+ goto cleanup;
+ }
+ cbor_mark_negint(value);
+ /* we don't actually sign the data, so we use -1 as the 'alg' value */
+ cbor_set_uint8(value, 0);
+
+ if (!qemu_cbor_map_add(map, key, value)) {
+ goto cleanup;
+ }
+
+ len = cbor_serialize(map, buf, buf_len);
+ if (len == 0) {
+ goto cleanup_map;
+ }
+
+ bs = cbor_build_bytestring(buf, len);
+ if (!bs) {
+ goto cleanup_map;
+ }
+ if (!qemu_cbor_array_push(cose, bs)) {
+ cbor_decref(&bs);
+ goto cleanup_map;
+ }
+ r = true;
+ goto cleanup_map;
+
+ cleanup:
+ if (key) {
+ cbor_decref(&key);
+ }
+ if (value) {
+ cbor_decref(&value);
+ }
+
+ cleanup_map:
+ if (map) {
+ cbor_decref(&map);
+ }
+ return r;
+}
+
+static bool add_unprotected_header_to_cose(cbor_item_t *cose)
+{
+ cbor_item_t *map = cbor_new_definite_map(0);
+ if (!map) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_array_push(cose, map)) {
+ goto cleanup;
+ }
+
+ return true;
+
+ cleanup:
+ if (map) {
+ cbor_decref(&map);
+ }
+ return false;
+}
+
+static bool add_ca_bundle_to_payload(cbor_item_t *map)
+{
+ cbor_item_t *key_cbor = NULL;
+ cbor_item_t *value_cbor = NULL;
+ cbor_item_t *bs = NULL;
+ uint8_t zero[64] = {0};
+
+ key_cbor = cbor_build_string("cabundle");
+ if (!key_cbor) {
+ goto cleanup;
+ }
+ value_cbor = cbor_new_definite_array(1);
+ if (!value_cbor) {
+ goto cleanup;
+ }
+ bs = cbor_build_bytestring(zero, 64);
+ if (!bs) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_array_push(value_cbor, bs)) {
+ cbor_decref(&bs);
+ goto cleanup;
+ }
+ if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) {
+ goto cleanup;
+ }
+
+ return true;
+
+ cleanup:
+ if (key_cbor) {
+ cbor_decref(&key_cbor);
+ }
+ if (value_cbor) {
+ cbor_decref(&value_cbor);
+ }
+ return false;
+}
+
+static bool add_payload_to_cose(cbor_item_t *cose, VirtIONSM *vnsm,
+ NSMAttestationReq *req)
+{
+ cbor_item_t *root = NULL;
+ cbor_item_t *nested_map;
+ cbor_item_t *bs = NULL;
+ size_t locked_cnt;
+ uint8_t ind[NSM_MAX_PCRS];
+ size_t payload_map_size = 9;
+ size_t len;
+ struct PCRInfo *pcr;
+ uint8_t zero[64] = {0};
+ bool r = false;
+ size_t buf_len = 16384;
+ g_autofree uint8_t *buf = g_malloc(buf_len);
+
+ root = cbor_new_definite_map(payload_map_size);
+ if (!root) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_add_string_to_map(root, "module_id", vnsm->module_id)) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_add_string_to_map(root, "digest", vnsm->digest)) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_add_uint64_to_map(root, "timestamp",
+ (uint64_t) time(NULL) * 1000)) {
+ goto cleanup;
+ }
+
+ locked_cnt = 0;
+ for (uint8_t i = 0; i < NSM_MAX_PCRS; ++i) {
+ if (vnsm->pcrs[i].locked) {
+ ind[locked_cnt++] = i;
+ }
+ }
+ if (!qemu_cbor_add_map_to_map(root, "pcrs", locked_cnt, &nested_map)) {
+ goto cleanup;
+ }
+ for (uint8_t i = 0; i < locked_cnt; ++i) {
+ pcr = &(vnsm->pcrs[ind[i]]);
+ if (!qemu_cbor_add_uint8_key_bytestring_to_map(
+ nested_map, ind[i],
+ pcr->data,
+ QCRYPTO_HASH_DIGEST_LEN_SHA384)) {
+ goto cleanup;
+ }
+ }
+ if (!qemu_cbor_add_bytestring_to_map(root, "certificate", zero, 64)) {
+ goto cleanup;
+ }
+ if (!add_ca_bundle_to_payload(root)) {
+ goto cleanup;
+ }
+
+ if (req->public_key.is_null) {
+ if (!qemu_cbor_add_null_to_map(root, "public_key")) {
+ goto cleanup;
+ }
+ } else if (!qemu_cbor_add_bytestring_to_map(root, "public_key",
+ req->public_key.buf,
+ req->public_key.len)) {
+ goto cleanup;
+ }
+
+ if (req->user_data.is_null) {
+ if (!qemu_cbor_add_null_to_map(root, "user_data")) {
+ goto cleanup;
+ }
+ } else if (!qemu_cbor_add_bytestring_to_map(root, "user_data",
+ req->user_data.buf,
+ req->user_data.len)) {
+ goto cleanup;
+ }
+
+ if (req->nonce.is_null) {
+ if (!qemu_cbor_add_null_to_map(root, "nonce")) {
+ goto cleanup;
+ }
+ } else if (!qemu_cbor_add_bytestring_to_map(root, "nonce",
+ req->nonce.buf,
+ req->nonce.len)) {
+ goto cleanup;
+ }
+
+ len = cbor_serialize(root, buf, buf_len);
+ if (len == 0) {
+ goto cleanup;
+ }
+
+ bs = cbor_build_bytestring(buf, len);
+ if (!bs) {
+ goto cleanup;
+ }
+ if (!qemu_cbor_array_push(cose, bs)) {
+ cbor_decref(&bs);
+ goto cleanup;
+ }
+
+ r = true;
+
+ cleanup:
+ if (root) {
+ cbor_decref(&root);
+ }
+ return r;
+}
+
+static bool add_signature_to_cose(cbor_item_t *cose)
+{
+ cbor_item_t *bs = NULL;
+ uint8_t zero[64] = {0};
+
+ /* we don't actually sign the data, so we just put 64 zero bytes */
+ bs = cbor_build_bytestring(zero, 64);
+ if (!bs) {
+ goto cleanup;
+ }
+
+ if (!qemu_cbor_array_push(cose, bs)) {
+ goto cleanup;
+ }
+
+ return true;
+
+ cleanup:
+ if (bs) {
+ cbor_decref(&bs);
+ }
+ return false;
+}
+
+/*
+ * Attestation response structure:
+ *
+ * {
+ * Map(1) {
+ * key = String("Attestation"),
+ * value = Map(1) {
+ * key = String("document"),
+ * value = Byte_String()
+ * }
+ * }
+ * }
+ *
+ * The document is a serialized COSE sign1 blob of the structure:
+ * {
+ * Array(4) {
+ * [0] { ByteString() }, // serialized protected header
+ * [1] { Map(0) }, // 0 length map
+ * [2] { ByteString() }, // serialized payload
+ * [3] { ByteString() }, // signature
+ * }
+ * }
+ *
+ * where [0] protected header is a serialized CBOR blob of the structure:
+ * {
+ * Map(1) {
+ * key = Uint8(1) // alg
+ * value = NegativeInt8() // Signing algorithm
+ * }
+ * }
+ *
+ * [2] payload is serialized CBOR blob of the structure:
+ * {
+ * Map(9) {
+ * [0] { key = String("module_id"), value = String(module_id) },
+ * [1] { key = String("digest"), value = String("SHA384") },
+ * [2] {
+ * key = String("timestamp"),
+ * value = Uint64(unix epoch of when document was created)
+ * },
+ * [3] {
+ * key = String("pcrs"),
+ * value = Map(locked_pcr_cnt) {
+ * key = Uint8(pcr_index),
+ * value = ByteString(pcr_data)
+ * },
+ * },
+ * [4] {
+ * key = String("certificate"),
+ * value = ByteString(Signing certificate)
+ * },
+ * [5] { key = String("cabundle"), value = Array(N) { ByteString()... } },
+ * [6] { key = String("public_key"), value = ByteString() || null },
+ * [7] { key = String("user_data"), value = ByteString() || null},
+ * [8] { key = String("nonce"), value = ByteString() || null},
+ * }
+ * }
+ */
+static bool handle_attestation(VirtIONSM *vnsm, struct iovec *request,
+ struct iovec *response, Error **errp)
+{
+ cbor_item_t *root = NULL;
+ cbor_item_t *cose = NULL;
+ cbor_item_t *nested_map;
+ size_t len;
+ enum NSMResponseTypes type;
+ bool r = false;
+ size_t buf_len = 16384;
+ g_autofree uint8_t *buf = g_malloc(buf_len);
+ g_autofree NSMAttestationReq *nsm_req = g_malloc(sizeof(NSMAttestationReq));
+
+ nsm_req->public_key.is_null = true;
+ nsm_req->user_data.is_null = true;
+ nsm_req->nonce.is_null = true;
+
+ type = get_nsm_attestation_req(request->iov_base, request->iov_len,
+ nsm_req);
+ if (type != NSM_SUCCESS) {
+ if (error_response(response, type, errp)) {
+ r = true;
+ }
+ goto out;
+ }
+
+ cose = cbor_new_definite_array(4);
+ if (!cose) {
+ goto err;
+ }
+ if (!add_protected_header_to_cose(cose)) {
+ goto err;
+ }
+ if (!add_unprotected_header_to_cose(cose)) {
+ goto err;
+ }
+ if (!add_payload_to_cose(cose, vnsm, nsm_req)) {
+ goto err;
+ }
+ if (!add_signature_to_cose(cose)) {
+ goto err;
+ }
+
+ len = cbor_serialize(cose, buf, buf_len);
+ if (len == 0) {
+ goto err;
+ }
+
+ root = cbor_new_definite_map(1);
+ if (!root) {
+ goto err;
+ }
+ if (!qemu_cbor_add_map_to_map(root, "Attestation", 1, &nested_map)) {
+ goto err;
+ }
+ if (!qemu_cbor_add_bytestring_to_map(nested_map, "document", buf, len)) {
+ goto err;
+ }
+
+ len = cbor_serialize(root, response->iov_base, response->iov_len);
+ if (len == 0) {
+ if (error_response(response, NSM_INPUT_TOO_LARGE, errp)) {
+ r = true;
+ }
+ goto out;
+ }
+
+ response->iov_len = len;
+ r = true;
+
+ out:
+ if (root) {
+ cbor_decref(&root);
+ }
+ if (cose) {
+ cbor_decref(&cose);
+ }
+ return r;
+
+ err:
+ error_setg(errp, "Failed to initialize Attestation response");
+ goto out;
+}
+
+enum CBOR_ROOT_TYPE {
+ CBOR_ROOT_TYPE_STRING = 0,
+ CBOR_ROOT_TYPE_MAP = 1,
+};
+
+struct nsm_cmd {
+ char name[16];
+ /*
+ * There are 2 types of request
+ * 1) String(); "GetRandom", "DescribeNSM"
+ * 2) Map(1) { key: String(), value: ... }
+ */
+ enum CBOR_ROOT_TYPE root_type;
+ bool (*response_fn)(VirtIONSM *vnsm, struct iovec *request,
+ struct iovec *response, Error **errp);
+};
+
+const struct nsm_cmd nsm_cmds[] = {
+ { "GetRandom", CBOR_ROOT_TYPE_STRING, handle_get_random },
+ { "DescribeNSM", CBOR_ROOT_TYPE_STRING, handle_describe_nsm },
+ { "DescribePCR", CBOR_ROOT_TYPE_MAP, handle_describe_pcr },
+ { "ExtendPCR", CBOR_ROOT_TYPE_MAP, handle_extend_pcr },
+ { "LockPCR", CBOR_ROOT_TYPE_MAP, handle_lock_pcr },
+ { "LockPCRs", CBOR_ROOT_TYPE_MAP, handle_lock_pcrs },
+ { "Attestation", CBOR_ROOT_TYPE_MAP, handle_attestation },
+};
+
+static const struct nsm_cmd *get_nsm_request_cmd(uint8_t *buf, size_t len)
+{
+ size_t size;
+ uint8_t *req;
+ enum CBOR_ROOT_TYPE root_type;
+ struct cbor_load_result result;
+ cbor_item_t *item = cbor_load(buf, len, &result);
+ if (!item || result.error.code != CBOR_ERR_NONE) {
+ goto cleanup;
+ }
+
+ if (cbor_isa_string(item)) {
+ size = cbor_string_length(item);
+ req = cbor_string_handle(item);
+ root_type = CBOR_ROOT_TYPE_STRING;
+ } else if (cbor_isa_map(item) && cbor_map_size(item) == 1) {
+ struct cbor_pair *handle = cbor_map_handle(item);
+ if (cbor_isa_string(handle->key)) {
+ size = cbor_string_length(handle->key);
+ req = cbor_string_handle(handle->key);
+ root_type = CBOR_ROOT_TYPE_MAP;
+ } else {
+ goto cleanup;
+ }
+ } else {
+ goto cleanup;
+ }
+
+ if (size == 0 || req == NULL) {
+ goto cleanup;
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(nsm_cmds); ++i) {
+ if (nsm_cmds[i].root_type == root_type &&
+ strlen(nsm_cmds[i].name) == size &&
+ memcmp(nsm_cmds[i].name, req, size) == 0) {
+ cbor_decref(&item);
+ return &nsm_cmds[i];
+ }
+ }
+
+ cleanup:
+ if (item) {
+ cbor_decref(&item);
+ }
+ return NULL;
+}
+
+static bool get_nsm_request_response(VirtIONSM *vnsm, struct iovec *req,
+ struct iovec *resp, Error **errp)
+{
+ const struct nsm_cmd *cmd;
+
+ if (req->iov_len > NSM_REQUEST_MAX_SIZE) {
+ if (error_response(resp, NSM_INPUT_TOO_LARGE, errp)) {
+ return true;
+ }
+ error_setg(errp, "Failed to initialize InputTooLarge response");
+ return false;
+ }
+
+ cmd = get_nsm_request_cmd(req->iov_base, req->iov_len);
+
+ if (cmd == NULL) {
+ if (error_response(resp, NSM_INVALID_OPERATION, errp)) {
+ return true;
+ }
+ error_setg(errp, "Failed to initialize InvalidOperation response");
+ return false;
+ }
+
+ return cmd->response_fn(vnsm, req, resp, errp);
+}
+
+static void handle_input(VirtIODevice *vdev, VirtQueue *vq)
+{
+ g_autofree VirtQueueElement *out_elem = NULL;
+ g_autofree VirtQueueElement *in_elem = NULL;
+ VirtIONSM *vnsm = VIRTIO_NSM(vdev);
+ Error *err = NULL;
+ size_t sz;
+ struct iovec req = {.iov_base = NULL, .iov_len = 0};
+ struct iovec res = {.iov_base = NULL, .iov_len = 0};
+
+ out_elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!out_elem) {
+ /* nothing in virtqueue */
+ return;
+ }
+
+ sz = iov_size(out_elem->out_sg, out_elem->out_num);
+ if (sz == 0) {
+ virtio_error(vdev, "Expected non-zero sized request buffer in "
+ "virtqueue");
+ goto cleanup;
+ }
+
+ in_elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!in_elem) {
+ virtio_error(vdev, "Expected response buffer after request buffer "
+ "in virtqueue");
+ goto cleanup;
+ }
+ if (iov_size(in_elem->in_sg, in_elem->in_num) != NSM_RESPONSE_BUF_SIZE) {
+ virtio_error(vdev, "Expected response buffer of length 0x3000");
+ goto cleanup;
+ }
+
+ req.iov_base = g_malloc(sz);
+ req.iov_len = iov_to_buf(out_elem->out_sg, out_elem->out_num, 0,
+ req.iov_base, sz);
+ if (req.iov_len != sz) {
+ virtio_error(vdev, "Failed to copy request buffer");
+ goto cleanup;
+ }
+
+ res.iov_base = g_malloc(NSM_RESPONSE_BUF_SIZE);
+ res.iov_len = NSM_RESPONSE_BUF_SIZE;
+
+ if (!get_nsm_request_response(vnsm, &req, &res, &err)) {
+ error_report_err(err);
+ virtio_error(vdev, "Failed to get NSM request response");
+ goto cleanup;
+ }
+
+ sz = iov_from_buf(in_elem->in_sg, in_elem->in_num, 0, res.iov_base,
+ res.iov_len);
+ if (sz != res.iov_len) {
+ virtio_error(vdev, "Failed to copy response buffer");
+ goto cleanup;
+ }
+
+ g_free(req.iov_base);
+ g_free(res.iov_base);
+ virtqueue_push(vq, out_elem, 0);
+ virtqueue_push(vq, in_elem, sz);
+ virtio_notify(vdev, vq);
+ return;
+
+ cleanup:
+ g_free(req.iov_base);
+ g_free(res.iov_base);
+ if (out_elem) {
+ virtqueue_detach_element(vq, out_elem, 0);
+ }
+ if (in_elem) {
+ virtqueue_detach_element(vq, in_elem, 0);
+ }
+}
+
+static uint64_t get_features(VirtIODevice *vdev, uint64_t f, Error **errp)
+{
+ return f;
+}
+
+static bool extend_pcr(VirtIONSM *vnsm, int ind, uint8_t *data, uint16_t len)
+{
+ Error *err = NULL;
+ struct PCRInfo *pcr = &(vnsm->pcrs[ind]);
+ size_t digest_len = QCRYPTO_HASH_DIGEST_LEN_SHA384;
+ uint8_t result[QCRYPTO_HASH_DIGEST_LEN_SHA384];
+ uint8_t *ptr = result;
+ struct iovec iov[2] = {
+ { .iov_base = pcr->data, .iov_len = QCRYPTO_HASH_DIGEST_LEN_SHA384 },
+ { .iov_base = data, .iov_len = len },
+ };
+
+ if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALGO_SHA384, iov, 2, &ptr, &digest_len,
+ &err) < 0) {
+ return false;
+ }
+
+ memcpy(pcr->data, result, QCRYPTO_HASH_DIGEST_LEN_SHA384);
+ return true;
+}
+
+static void lock_pcr(VirtIONSM *vnsm, int ind)
+{
+ vnsm->pcrs[ind].locked = true;
+}
+
+static void virtio_nsm_device_realize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VirtIONSM *vnsm = VIRTIO_NSM(dev);
+
+ vnsm->max_pcrs = NSM_MAX_PCRS;
+ vnsm->digest = (char *) "SHA384";
+ if (vnsm->module_id == NULL) {
+ vnsm->module_id = (char *) "i-234-enc5678";
+ }
+ vnsm->version_major = 1;
+ vnsm->version_minor = 0;
+ vnsm->version_patch = 0;
+ vnsm->extend_pcr = extend_pcr;
+ vnsm->lock_pcr = lock_pcr;
+
+ virtio_init(vdev, VIRTIO_ID_NITRO_SEC_MOD, 0);
+
+ vnsm->vq = virtio_add_queue(vdev, 2, handle_input);
+}
+
+static void virtio_nsm_device_unrealize(DeviceState *dev)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+
+ virtio_del_queue(vdev, 0);
+ virtio_cleanup(vdev);
+}
+
+static const VMStateDescription vmstate_pcr_info_entry = {
+ .name = "pcr_info_entry",
+ .minimum_version_id = 1,
+ .version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_BOOL(locked, struct PCRInfo),
+ VMSTATE_UINT8_ARRAY(data, struct PCRInfo,
+ QCRYPTO_HASH_DIGEST_LEN_SHA384),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_virtio_nsm_device = {
+ .name = "virtio-nsm-device",
+ .minimum_version_id = 1,
+ .version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(pcrs, VirtIONSM, NSM_MAX_PCRS, 1,
+ vmstate_pcr_info_entry, struct PCRInfo),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_virtio_nsm = {
+ .name = "virtio-nsm",
+ .minimum_version_id = 1,
+ .version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const Property virtio_nsm_properties[] = {
+ DEFINE_PROP_STRING("module-id", VirtIONSM, module_id),
+};
+
+static void virtio_nsm_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, virtio_nsm_properties);
+ dc->vmsd = &vmstate_virtio_nsm;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ vdc->realize = virtio_nsm_device_realize;
+ vdc->unrealize = virtio_nsm_device_unrealize;
+ vdc->get_features = get_features;
+ vdc->vmsd = &vmstate_virtio_nsm_device;
+}
+
+static const TypeInfo virtio_nsm_info = {
+ .name = TYPE_VIRTIO_NSM,
+ .parent = TYPE_VIRTIO_DEVICE,
+ .instance_size = sizeof(VirtIONSM),
+ .class_init = virtio_nsm_class_init,
+};
+
+static void virtio_register_types(void)
+{
+ type_register_static(&virtio_nsm_info);
+}
+
+type_init(virtio_register_types)
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 9534730..fba2372 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -33,12 +33,12 @@
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/loader.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "hw/virtio/virtio-pci.h"
#include "qemu/range.h"
#include "hw/virtio/virtio-bus.h"
#include "qapi/visitor.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "trace.h"
#define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
@@ -146,9 +146,7 @@ static const VMStateDescription vmstate_virtio_pci = {
static bool virtio_pci_has_extra_state(DeviceState *d)
{
- VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
-
- return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
+ return true;
}
static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
@@ -615,8 +613,12 @@ static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy,
reg = &proxy->regs[i];
if (*off >= reg->offset &&
*off + len <= reg->offset + reg->size) {
- *off -= reg->offset;
- return &reg->mr;
+ MemoryRegionSection mrs = memory_region_find(&reg->mr,
+ *off - reg->offset, len);
+ assert(mrs.mr);
+ *off = mrs.offset_within_region;
+ memory_region_unref(mrs.mr);
+ return mrs.mr;
}
}
@@ -866,6 +868,9 @@ static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no,
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtQueue *vq;
+ if (!proxy->vector_irqfd && vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)
+ return -1;
+
if (queue_no == VIRTIO_CONFIG_IRQ_IDX) {
*n = virtio_config_get_guest_notifier(vdev);
*vector = vdev->config_vector;
@@ -1208,7 +1213,12 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
static bool virtio_pci_query_guest_notifiers(DeviceState *d)
{
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
- return msix_enabled(&proxy->pci_dev);
+
+ if (msix_enabled(&proxy->pci_dev)) {
+ return true;
+ } else {
+ return pci_irq_disabled(&proxy->pci_dev);
+ }
}
static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
@@ -1955,6 +1965,7 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
uint8_t *config;
uint32_t size;
VirtIODevice *vdev = virtio_bus_get_device(bus);
+ int16_t res;
/*
* Virtio capabilities present without
@@ -2050,6 +2061,8 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
if (modern_pio) {
memory_region_init(&proxy->io_bar, OBJECT(proxy),
"virtio-pci-io", 0x4);
+ address_space_init(&proxy->modern_cfg_io_as, &proxy->io_bar,
+ "virtio-pci-cfg-io-as");
pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx,
PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
@@ -2100,6 +2113,18 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx,
PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
}
+
+ if (pci_is_vf(&proxy->pci_dev)) {
+ pcie_ari_init(&proxy->pci_dev, proxy->last_pcie_cap_offset);
+ proxy->last_pcie_cap_offset += PCI_ARI_SIZEOF;
+ } else {
+ res = pcie_sriov_pf_init_from_user_created_vfs(
+ &proxy->pci_dev, proxy->last_pcie_cap_offset, errp);
+ if (res > 0) {
+ proxy->last_pcie_cap_offset += res;
+ virtio_add_feature(&vdev->host_features, VIRTIO_F_SR_IOV);
+ }
+ }
}
static void virtio_pci_device_unplugged(DeviceState *d)
@@ -2173,6 +2198,9 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
/* PCI BAR regions must be powers of 2 */
pow2ceil(proxy->notify.offset + proxy->notify.size));
+ address_space_init(&proxy->modern_cfg_mem_as, &proxy->modern_bar,
+ "virtio-pci-cfg-mem-as");
+
if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) {
proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
}
@@ -2187,19 +2215,16 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
if (pcie_port && pci_is_express(pci_dev)) {
int pos;
- uint16_t last_pcie_cap_offset = PCI_CONFIG_SPACE_SIZE;
+ proxy->last_pcie_cap_offset = PCI_CONFIG_SPACE_SIZE;
pos = pcie_endpoint_cap_init(pci_dev, 0);
assert(pos > 0);
- pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0,
- PCI_PM_SIZEOF, errp);
+ pos = pci_pm_init(pci_dev, 0, errp);
if (pos < 0) {
return;
}
- pci_dev->exp.pm_cap = pos;
-
/*
* Indicates that this function complies with revision 1.2 of the
* PCI Power Management Interface Specification.
@@ -2207,9 +2232,9 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
if (proxy->flags & VIRTIO_PCI_FLAG_AER) {
- pcie_aer_init(pci_dev, PCI_ERR_VER, last_pcie_cap_offset,
+ pcie_aer_init(pci_dev, PCI_ERR_VER, proxy->last_pcie_cap_offset,
PCI_ERR_SIZEOF, NULL);
- last_pcie_cap_offset += PCI_ERR_SIZEOF;
+ proxy->last_pcie_cap_offset += PCI_ERR_SIZEOF;
}
if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) {
@@ -2234,9 +2259,9 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
}
if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
- pcie_ats_init(pci_dev, last_pcie_cap_offset,
+ pcie_ats_init(pci_dev, proxy->last_pcie_cap_offset,
proxy->flags & VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED);
- last_pcie_cap_offset += PCI_EXT_CAP_ATS_SIZEOF;
+ proxy->last_pcie_cap_offset += PCI_EXT_CAP_ATS_SIZEOF;
}
if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) {
@@ -2262,12 +2287,18 @@ static void virtio_pci_exit(PCIDevice *pci_dev)
VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) &&
!pci_bus_is_root(pci_get_bus(pci_dev));
+ bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
+ pcie_sriov_pf_exit(&proxy->pci_dev);
msix_uninit_exclusive_bar(pci_dev);
if (proxy->flags & VIRTIO_PCI_FLAG_AER && pcie_port &&
pci_is_express(pci_dev)) {
pcie_aer_exit(pci_dev);
}
+ address_space_destroy(&proxy->modern_cfg_mem_as);
+ if (modern_pio) {
+ address_space_destroy(&proxy->modern_cfg_io_as);
+ }
}
static void virtio_pci_reset(DeviceState *qdev)
@@ -2293,11 +2324,11 @@ static bool virtio_pci_no_soft_reset(PCIDevice *dev)
{
uint16_t pmcsr;
- if (!pci_is_express(dev) || !dev->exp.pm_cap) {
+ if (!pci_is_express(dev) || !(dev->cap_present & QEMU_PCI_CAP_PM)) {
return false;
}
- pmcsr = pci_get_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL);
+ pmcsr = pci_get_word(dev->config + dev->pm_cap + PCI_PM_CTRL);
/*
* When No_Soft_Reset bit is set and the device
@@ -2326,21 +2357,17 @@ static void virtio_pci_bus_reset_hold(Object *obj, ResetType type)
if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) {
pci_word_test_and_clear_mask(
- dev->config + dev->exp.pm_cap + PCI_PM_CTRL,
+ dev->config + dev->pm_cap + PCI_PM_CTRL,
PCI_PM_CTRL_STATE_MASK);
}
}
}
-static Property virtio_pci_properties[] = {
+static const Property virtio_pci_properties[] = {
DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
- DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
- VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
- DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
- VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false),
DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy,
@@ -2361,7 +2388,6 @@ static Property virtio_pci_properties[] = {
VIRTIO_PCI_FLAG_INIT_FLR_BIT, true),
DEFINE_PROP_BIT("aer", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_AER_BIT, false),
- DEFINE_PROP_END_OF_LIST(),
};
static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
@@ -2370,15 +2396,22 @@ static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
PCIDevice *pci_dev = &proxy->pci_dev;
- if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
- virtio_pci_modern(proxy)) {
+ if (virtio_pci_modern(proxy)) {
pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
}
vpciklass->parent_dc_realize(qdev, errp);
}
-static void virtio_pci_class_init(ObjectClass *klass, void *data)
+static int virtio_pci_sync_config(DeviceState *dev, Error **errp)
+{
+ VirtIOPCIProxy *proxy = VIRTIO_PCI(dev);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ return qdev_sync_config(DEVICE(vdev), errp);
+}
+
+static void virtio_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -2394,6 +2427,7 @@ static void virtio_pci_class_init(ObjectClass *klass, void *data)
device_class_set_parent_realize(dc, virtio_pci_dc_realize,
&vpciklass->parent_dc_realize);
rc->phases.hold = virtio_pci_bus_reset_hold;
+ dc->sync_config = virtio_pci_sync_config;
}
static const TypeInfo virtio_pci_info = {
@@ -2405,14 +2439,13 @@ static const TypeInfo virtio_pci_info = {
.abstract = true,
};
-static Property virtio_pci_generic_properties[] = {
+static const Property virtio_pci_generic_properties[] = {
DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy,
ON_OFF_AUTO_AUTO),
DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_pci_base_class_init(ObjectClass *klass, void *data)
+static void virtio_pci_base_class_init(ObjectClass *klass, const void *data)
{
const VirtioPCIDeviceTypeInfo *t = data;
if (t->class_init) {
@@ -2420,7 +2453,7 @@ static void virtio_pci_base_class_init(ObjectClass *klass, void *data)
}
}
-static void virtio_pci_generic_class_init(ObjectClass *klass, void *data)
+static void virtio_pci_generic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -2460,7 +2493,7 @@ void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t)
.name = t->generic_name,
.parent = base_type_info.name,
.class_init = virtio_pci_generic_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ }
@@ -2476,18 +2509,18 @@ void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t)
generic_type_info.parent = base_name;
generic_type_info.class_init = virtio_pci_base_class_init;
- generic_type_info.class_data = (void *)t;
+ generic_type_info.class_data = t;
assert(!t->non_transitional_name);
assert(!t->transitional_name);
} else {
base_type_info.class_init = virtio_pci_base_class_init;
- base_type_info.class_data = (void *)t;
+ base_type_info.class_data = t;
}
- type_register(&base_type_info);
+ type_register_static(&base_type_info);
if (generic_type_info.name) {
- type_register(&generic_type_info);
+ type_register_static(&generic_type_info);
}
if (t->non_transitional_name) {
@@ -2495,13 +2528,13 @@ void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t)
.name = t->non_transitional_name,
.parent = base_type_info.name,
.instance_init = virtio_pci_non_transitional_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ }
},
};
- type_register(&non_transitional_type_info);
+ type_register_static(&non_transitional_type_info);
}
if (t->transitional_name) {
@@ -2509,7 +2542,7 @@ void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t)
.name = t->transitional_name,
.parent = base_type_info.name,
.instance_init = virtio_pci_transitional_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
/*
* Transitional virtio devices work only as Conventional PCI
* devices because they require PIO ports.
@@ -2518,7 +2551,7 @@ void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t)
{ }
},
};
- type_register(&transitional_type_info);
+ type_register_static(&transitional_type_info);
}
g_free(base_name);
}
@@ -2565,7 +2598,7 @@ static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
qbus_init(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, virtio_bus_name);
}
-static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
+static void virtio_pci_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *bus_class = BUS_CLASS(klass);
VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
diff --git a/hw/virtio/virtio-pmem-pci.c b/hw/virtio/virtio-pmem-pci.c
index cfe7f3b..babd91c 100644
--- a/hw/virtio/virtio-pmem-pci.c
+++ b/hw/virtio/virtio-pmem-pci.c
@@ -80,7 +80,7 @@ static void virtio_pmem_pci_fill_device_info(const MemoryDeviceState *md,
info->type = MEMORY_DEVICE_INFO_KIND_VIRTIO_PMEM;
}
-static void virtio_pmem_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_pmem_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/virtio-pmem.c b/hw/virtio/virtio-pmem.c
index c3512c2..3416ea1 100644
--- a/hw/virtio/virtio-pmem.c
+++ b/hw/virtio/virtio-pmem.c
@@ -21,7 +21,7 @@
#include "hw/virtio/virtio-access.h"
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_pmem.h"
-#include "sysemu/hostmem.h"
+#include "system/hostmem.h"
#include "block/aio.h"
#include "block/thread-pool.h"
#include "trace.h"
@@ -155,14 +155,13 @@ static MemoryRegion *virtio_pmem_get_memory_region(VirtIOPMEM *pmem,
return &pmem->memdev->mr;
}
-static Property virtio_pmem_properties[] = {
+static const Property virtio_pmem_properties[] = {
DEFINE_PROP_UINT64(VIRTIO_PMEM_ADDR_PROP, VirtIOPMEM, start, 0),
DEFINE_PROP_LINK(VIRTIO_PMEM_MEMDEV_PROP, VirtIOPMEM, memdev,
TYPE_MEMORY_BACKEND, HostMemoryBackend *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_pmem_class_init(ObjectClass *klass, void *data)
+static void virtio_pmem_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/virtio/virtio-qmp.c b/hw/virtio/virtio-qmp.c
index 1dd96ed..3b6377c 100644
--- a/hw/virtio/virtio-qmp.c
+++ b/hw/virtio/virtio-qmp.c
@@ -15,8 +15,8 @@
#include "qapi/error.h"
#include "qapi/qapi-commands-virtio.h"
#include "qapi/qapi-commands-qom.h"
-#include "qapi/qmp/qobject.h"
-#include "qapi/qmp/qjson.h"
+#include "qobject/qobject.h"
+#include "qobject/qjson.h"
#include "hw/virtio/vhost-user.h"
#include "standard-headers/linux/virtio_ids.h"
@@ -121,6 +121,12 @@ static const qmp_virtio_feature_map_t vhost_user_protocol_map[] = {
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_STATUS, \
"VHOST_USER_PROTOCOL_F_STATUS: Querying and notifying back-end "
"device status supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SHARED_OBJECT, \
+ "VHOST_USER_PROTOCOL_F_SHARED_OBJECT: Backend shared object "
+ "supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_DEVICE_STATE, \
+ "VHOST_USER_PROTOCOL_F_DEVICE_STATE: Backend device state transfer "
+ "supported"),
{ -1, "" }
};
@@ -450,6 +456,9 @@ static const qmp_virtio_feature_map_t virtio_mem_feature_map[] = {
FEATURE_ENTRY(VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, \
"VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE: Unplugged memory cannot be "
"accessed"),
+ FEATURE_ENTRY(VIRTIO_MEM_F_PERSISTENT_SUSPEND, \
+ "VIRTIO_MEM_F_PERSISTENT_SUSPND: Plugged memory will remain "
+ "plugged when suspending+resuming"),
{ -1, "" }
};
#endif
diff --git a/hw/virtio/virtio-rng-pci.c b/hw/virtio/virtio-rng-pci.c
index 6e76f8b..39b6003 100644
--- a/hw/virtio/virtio-rng-pci.c
+++ b/hw/virtio/virtio-rng-pci.c
@@ -32,12 +32,11 @@ struct VirtIORngPCI {
VirtIORNG vdev;
};
-static Property virtio_rng_properties[] = {
+static const Property virtio_rng_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
DEV_NVECTORS_UNSPECIFIED),
- DEFINE_PROP_END_OF_LIST(),
};
static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -54,7 +53,7 @@ static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
}
}
-static void virtio_rng_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_rng_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c
index f74efff..3df5d25 100644
--- a/hw/virtio/virtio-rng.c
+++ b/hw/virtio/virtio-rng.c
@@ -17,8 +17,8 @@
#include "hw/virtio/virtio.h"
#include "hw/qdev-properties.h"
#include "hw/virtio/virtio-rng.h"
-#include "sysemu/rng.h"
-#include "sysemu/runstate.h"
+#include "system/rng.h"
+#include "system/runstate.h"
#include "qom/object_interfaces.h"
#include "trace.h"
@@ -159,17 +159,18 @@ static void check_rate_limit(void *opaque)
vrng->activate_timer = true;
}
-static void virtio_rng_set_status(VirtIODevice *vdev, uint8_t status)
+static int virtio_rng_set_status(VirtIODevice *vdev, uint8_t status)
{
VirtIORNG *vrng = VIRTIO_RNG(vdev);
if (!vdev->vm_running) {
- return;
+ return 0;
}
vdev->status = status;
/* Something changed, try to process buffers */
virtio_rng_process(vrng);
+ return 0;
}
static void virtio_rng_device_realize(DeviceState *dev, Error **errp)
@@ -184,8 +185,9 @@ static void virtio_rng_device_realize(DeviceState *dev, Error **errp)
/* Workaround: Property parsing does not enforce unsigned integers,
* So this is a hack to reject such numbers. */
- if (vrng->conf.max_bytes > INT64_MAX) {
- error_setg(errp, "'max-bytes' parameter must be non-negative, "
+ if (vrng->conf.max_bytes == 0 ||
+ vrng->conf.max_bytes > INT64_MAX) {
+ error_setg(errp, "'max-bytes' parameter must be positive, "
"and less than 2^63");
return;
}
@@ -248,7 +250,7 @@ static const VMStateDescription vmstate_virtio_rng = {
},
};
-static Property virtio_rng_properties[] = {
+static const Property virtio_rng_properties[] = {
/* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s. If
* you have an entropy source capable of generating more entropy than this
* and you can pass it through via virtio-rng, then hats off to you. Until
@@ -257,10 +259,9 @@ static Property virtio_rng_properties[] = {
DEFINE_PROP_UINT64("max-bytes", VirtIORNG, conf.max_bytes, INT64_MAX),
DEFINE_PROP_UINT32("period", VirtIORNG, conf.period_ms, 1 << 16),
DEFINE_PROP_LINK("rng", VirtIORNG, conf.rng, TYPE_RNG_BACKEND, RngBackend *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_rng_class_init(ObjectClass *klass, void *data)
+static void virtio_rng_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/virtio/virtio-scsi-pci.c b/hw/virtio/virtio-scsi-pci.c
index e8e3442..af87759 100644
--- a/hw/virtio/virtio-scsi-pci.c
+++ b/hw/virtio/virtio-scsi-pci.c
@@ -35,12 +35,11 @@ struct VirtIOSCSIPCI {
VirtIOSCSI vdev;
};
-static Property virtio_scsi_pci_properties[] = {
+static const Property virtio_scsi_pci_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
DEV_NVECTORS_UNSPECIFIED),
- DEFINE_PROP_END_OF_LIST(),
};
static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
@@ -73,7 +72,7 @@ static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_scsi_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/virtio-serial-pci.c b/hw/virtio/virtio-serial-pci.c
index cea31ad..3f212ff 100644
--- a/hw/virtio/virtio-serial-pci.c
+++ b/hw/virtio/virtio-serial-pci.c
@@ -69,15 +69,14 @@ static void virtio_serial_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
-static Property virtio_serial_pci_properties[] = {
+static const Property virtio_serial_pci_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_serial_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_serial_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 583a224..82a285a 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -30,8 +30,8 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/qdev-properties.h"
#include "hw/virtio/virtio-access.h"
-#include "sysemu/dma.h"
-#include "sysemu/runstate.h"
+#include "system/dma.h"
+#include "system/runstate.h"
#include "virtio-qmp.h"
#include "standard-headers/linux/virtio_ids.h"
@@ -205,6 +205,15 @@ static const char *virtio_id_to_name(uint16_t device_id)
return name;
}
+static void virtio_check_indirect_feature(VirtIODevice *vdev)
+{
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Device %s: indirect_desc was not negotiated!\n",
+ vdev->name);
+ }
+}
+
/* Called within call_rcu(). */
static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
{
@@ -744,6 +753,60 @@ int virtio_queue_empty(VirtQueue *vq)
}
}
+static bool virtio_queue_split_poll(VirtQueue *vq, unsigned shadow_idx)
+{
+ if (unlikely(!vq->vring.avail)) {
+ return false;
+ }
+
+ return (uint16_t)shadow_idx != vring_avail_idx(vq);
+}
+
+static bool virtio_queue_packed_poll(VirtQueue *vq, unsigned shadow_idx)
+{
+ VRingPackedDesc desc;
+ VRingMemoryRegionCaches *caches;
+
+ if (unlikely(!vq->vring.desc)) {
+ return false;
+ }
+
+ caches = vring_get_region_caches(vq);
+ if (!caches) {
+ return false;
+ }
+
+ vring_packed_desc_read(vq->vdev, &desc, &caches->desc,
+ shadow_idx, true);
+
+ return is_desc_avail(desc.flags, vq->shadow_avail_wrap_counter);
+}
+
+static bool virtio_queue_poll(VirtQueue *vq, unsigned shadow_idx)
+{
+ if (virtio_device_disabled(vq->vdev)) {
+ return false;
+ }
+
+ if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
+ return virtio_queue_packed_poll(vq, shadow_idx);
+ } else {
+ return virtio_queue_split_poll(vq, shadow_idx);
+ }
+}
+
+bool virtio_queue_enable_notification_and_check(VirtQueue *vq,
+ int opaque)
+{
+ virtio_queue_set_notification(vq, 1);
+
+ if (opaque >= 0) {
+ return virtio_queue_poll(vq, (unsigned)opaque);
+ } else {
+ return false;
+ }
+}
+
static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
@@ -872,6 +935,46 @@ static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
vq->used_elems[idx].ndescs = elem->ndescs;
}
+static void virtqueue_ordered_fill(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len)
+{
+ unsigned int i, steps, max_steps;
+
+ i = vq->used_idx % vq->vring.num;
+ steps = 0;
+ /*
+ * We shouldn't need to increase 'i' by more than the distance
+ * between used_idx and last_avail_idx.
+ */
+ max_steps = (vq->last_avail_idx - vq->used_idx) % vq->vring.num;
+
+ /* Search for element in vq->used_elems */
+ while (steps <= max_steps) {
+ /* Found element, set length and mark as filled */
+ if (vq->used_elems[i].index == elem->index) {
+ vq->used_elems[i].len = len;
+ vq->used_elems[i].in_order_filled = true;
+ break;
+ }
+
+ i += vq->used_elems[i].ndescs;
+ steps += vq->used_elems[i].ndescs;
+
+ if (i >= vq->vring.num) {
+ i -= vq->vring.num;
+ }
+ }
+
+ /*
+ * We should be able to find a matching VirtQueueElement in
+ * used_elems. If we don't, this is an error.
+ */
+ if (steps >= max_steps) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: %s cannot fill buffer id %u\n",
+ __func__, vq->vdev->name, elem->index);
+ }
+}
+
static void virtqueue_packed_fill_desc(VirtQueue *vq,
const VirtQueueElement *elem,
unsigned int idx,
@@ -922,7 +1025,9 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
return;
}
- if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
+ if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) {
+ virtqueue_ordered_fill(vq, elem, len);
+ } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
virtqueue_packed_fill(vq, elem, len, idx);
} else {
virtqueue_split_fill(vq, elem, len, idx);
@@ -981,6 +1086,73 @@ static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
}
}
+static void virtqueue_ordered_flush(VirtQueue *vq)
+{
+ unsigned int i = vq->used_idx % vq->vring.num;
+ unsigned int ndescs = 0;
+ uint16_t old = vq->used_idx;
+ uint16_t new;
+ bool packed;
+ VRingUsedElem uelem;
+
+ packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED);
+
+ if (packed) {
+ if (unlikely(!vq->vring.desc)) {
+ return;
+ }
+ } else if (unlikely(!vq->vring.used)) {
+ return;
+ }
+
+ /* First expected in-order element isn't ready, nothing to do */
+ if (!vq->used_elems[i].in_order_filled) {
+ return;
+ }
+
+ /* Search for filled elements in-order */
+ while (vq->used_elems[i].in_order_filled) {
+ /*
+ * First entry for packed VQs is written last so the guest
+ * doesn't see invalid descriptors.
+ */
+ if (packed && i != vq->used_idx) {
+ virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
+ } else if (!packed) {
+ uelem.id = vq->used_elems[i].index;
+ uelem.len = vq->used_elems[i].len;
+ vring_used_write(vq, &uelem, i);
+ }
+
+ vq->used_elems[i].in_order_filled = false;
+ ndescs += vq->used_elems[i].ndescs;
+ i += vq->used_elems[i].ndescs;
+ if (i >= vq->vring.num) {
+ i -= vq->vring.num;
+ }
+ }
+
+ if (packed) {
+ virtqueue_packed_fill_desc(vq, &vq->used_elems[vq->used_idx], 0, true);
+ vq->used_idx += ndescs;
+ if (vq->used_idx >= vq->vring.num) {
+ vq->used_idx -= vq->vring.num;
+ vq->used_wrap_counter ^= 1;
+ vq->signalled_used_valid = false;
+ }
+ } else {
+ /* Make sure buffer is written before we update index. */
+ smp_wmb();
+ new = old + ndescs;
+ vring_used_idx_set(vq, new);
+ if (unlikely((int16_t)(new - vq->signalled_used) <
+ (uint16_t)(new - old))) {
+ vq->signalled_used_valid = false;
+ }
+ }
+ vq->inuse -= ndescs;
+}
+
void virtqueue_flush(VirtQueue *vq, unsigned int count)
{
if (virtio_device_disabled(vq->vdev)) {
@@ -988,7 +1160,9 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
return;
}
- if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
+ if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) {
+ virtqueue_ordered_flush(vq);
+ } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
virtqueue_packed_flush(vq, count);
} else {
virtqueue_split_flush(vq, count);
@@ -1331,9 +1505,9 @@ err:
goto done;
}
-void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
- unsigned int *out_bytes,
- unsigned max_in_bytes, unsigned max_out_bytes)
+int virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
+ unsigned int *out_bytes, unsigned max_in_bytes,
+ unsigned max_out_bytes)
{
uint16_t desc_size;
VRingMemoryRegionCaches *caches;
@@ -1366,7 +1540,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
caches);
}
- return;
+ return (int)vq->shadow_avail_idx;
err:
if (in_bytes) {
*in_bytes = 0;
@@ -1374,6 +1548,8 @@ err:
if (out_bytes) {
*out_bytes = 0;
}
+
+ return -1;
}
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
@@ -1505,7 +1681,7 @@ static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_nu
static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
{
- unsigned int i, head, max;
+ unsigned int i, head, max, idx;
VRingMemoryRegionCaches *caches;
MemoryRegionCache indirect_desc_cache;
MemoryRegionCache *desc_cache;
@@ -1513,8 +1689,8 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
VirtIODevice *vdev = vq->vdev;
VirtQueueElement *elem = NULL;
unsigned out_num, in_num, elem_entries;
- hwaddr addr[VIRTQUEUE_MAX_SIZE];
- struct iovec iov[VIRTQUEUE_MAX_SIZE];
+ hwaddr QEMU_UNINITIALIZED addr[VIRTQUEUE_MAX_SIZE];
+ struct iovec QEMU_UNINITIALIZED iov[VIRTQUEUE_MAX_SIZE];
VRingDesc desc;
int rc;
@@ -1566,6 +1742,7 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
virtio_error(vdev, "Invalid size for indirect buffer table");
goto done;
}
+ virtio_check_indirect_feature(vdev);
/* loop over the indirect descriptor table */
len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
@@ -1629,6 +1806,13 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
elem->in_sg[i] = iov[out_num + i];
}
+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_IN_ORDER)) {
+ idx = (vq->last_avail_idx - 1) % vq->vring.num;
+ vq->used_elems[idx].index = elem->index;
+ vq->used_elems[idx].len = elem->len;
+ vq->used_elems[idx].ndescs = elem->ndescs;
+ }
+
vq->inuse++;
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
@@ -1652,8 +1836,8 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
VirtIODevice *vdev = vq->vdev;
VirtQueueElement *elem = NULL;
unsigned out_num, in_num, elem_entries;
- hwaddr addr[VIRTQUEUE_MAX_SIZE];
- struct iovec iov[VIRTQUEUE_MAX_SIZE];
+ hwaddr QEMU_UNINITIALIZED addr[VIRTQUEUE_MAX_SIZE];
+ struct iovec QEMU_UNINITIALIZED iov[VIRTQUEUE_MAX_SIZE];
VRingPackedDesc desc;
uint16_t id;
int rc;
@@ -1696,6 +1880,7 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
virtio_error(vdev, "Invalid size for indirect buffer table");
goto done;
}
+ virtio_check_indirect_feature(vdev);
/* loop over the indirect descriptor table */
len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
@@ -1762,6 +1947,13 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
elem->index = id;
elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
+
+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_IN_ORDER)) {
+ vq->used_elems[vq->last_avail_idx].index = elem->index;
+ vq->used_elems[vq->last_avail_idx].len = elem->len;
+ vq->used_elems[vq->last_avail_idx].ndescs = elem->ndescs;
+ }
+
vq->last_avail_idx += elem->ndescs;
vq->inuse += elem->ndescs;
@@ -2040,12 +2232,12 @@ int virtio_set_status(VirtIODevice *vdev, uint8_t val)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
trace_virtio_set_status(vdev, val);
+ int ret = 0;
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
val & VIRTIO_CONFIG_S_FEATURES_OK) {
- int ret = virtio_validate_features(vdev);
-
+ ret = virtio_validate_features(vdev);
if (ret) {
return ret;
}
@@ -2058,16 +2250,20 @@ int virtio_set_status(VirtIODevice *vdev, uint8_t val)
}
if (k->set_status) {
- k->set_status(vdev, val);
+ ret = k->set_status(vdev, val);
+ if (ret) {
+ qemu_log("set %s status to %d failed, old status: %d\n",
+ vdev->name, val, vdev->status);
+ }
}
vdev->status = val;
- return 0;
+ return ret;
}
static enum virtio_device_endian virtio_default_endian(void)
{
- if (target_words_bigendian()) {
+ if (target_big_endian()) {
return VIRTIO_DEVICE_ENDIAN_BIG;
} else {
return VIRTIO_DEVICE_ENDIAN_LITTLE;
@@ -2135,45 +2331,6 @@ void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
}
}
-void virtio_reset(void *opaque)
-{
- VirtIODevice *vdev = opaque;
- VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
- int i;
-
- virtio_set_status(vdev, 0);
- if (current_cpu) {
- /* Guest initiated reset */
- vdev->device_endian = virtio_current_cpu_endian();
- } else {
- /* System reset */
- vdev->device_endian = virtio_default_endian();
- }
-
- if (vdev->vhost_started && k->get_vhost) {
- vhost_reset_device(k->get_vhost(vdev));
- }
-
- if (k->reset) {
- k->reset(vdev);
- }
-
- vdev->start_on_kick = false;
- vdev->started = false;
- vdev->broken = false;
- vdev->guest_features = 0;
- vdev->queue_sel = 0;
- vdev->status = 0;
- vdev->disabled = false;
- qatomic_set(&vdev->isr, 0);
- vdev->config_vector = VIRTIO_NO_VECTOR;
- virtio_notify_vector(vdev, vdev->config_vector);
-
- for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
- __virtio_queue_reset(vdev, i);
- }
-}
-
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
{
if (!vdev->vq[n].vring.num) {
@@ -2984,6 +3141,49 @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val)
return ret;
}
+void virtio_reset(void *opaque)
+{
+ VirtIODevice *vdev = opaque;
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ int i;
+
+ virtio_set_status(vdev, 0);
+ if (current_cpu) {
+ /* Guest initiated reset */
+ vdev->device_endian = virtio_current_cpu_endian();
+ } else {
+ /* System reset */
+ vdev->device_endian = virtio_default_endian();
+ }
+
+ if (k->get_vhost) {
+ struct vhost_dev *hdev = k->get_vhost(vdev);
+ /* Only reset when vhost back-end is connected */
+ if (hdev && hdev->vhost_ops) {
+ vhost_reset_device(hdev);
+ }
+ }
+
+ if (k->reset) {
+ k->reset(vdev);
+ }
+
+ vdev->start_on_kick = false;
+ vdev->started = false;
+ vdev->broken = false;
+ virtio_set_features_nocheck(vdev, 0);
+ vdev->queue_sel = 0;
+ vdev->status = 0;
+ vdev->disabled = false;
+ qatomic_set(&vdev->isr, 0);
+ vdev->config_vector = VIRTIO_NO_VECTOR;
+ virtio_notify_vector(vdev, vdev->config_vector);
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ __virtio_queue_reset(vdev, i);
+ }
+}
+
static void virtio_device_check_notification_compatibility(VirtIODevice *vdev,
Error **errp)
{
@@ -3070,6 +3270,13 @@ virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
config_len--;
}
+ if (vdc->pre_load_queues) {
+ ret = vdc->pre_load_queues(vdev);
+ if (ret) {
+ return ret;
+ }
+ }
+
num = qemu_get_be32(f);
if (num > VIRTIO_QUEUE_MAX) {
@@ -3227,7 +3434,7 @@ void virtio_cleanup(VirtIODevice *vdev)
qemu_del_vm_change_state_handler(vdev->vmstate);
}
-static void virtio_vmstate_change(void *opaque, bool running, RunState state)
+static int virtio_vmstate_change(void *opaque, bool running, RunState state)
{
VirtIODevice *vdev = opaque;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
@@ -3244,8 +3451,12 @@ static void virtio_vmstate_change(void *opaque, bool running, RunState state)
}
if (!backend_run) {
- virtio_set_status(vdev, vdev->status);
+ int ret = virtio_set_status(vdev, vdev->status);
+ if (ret) {
+ return ret;
+ }
}
+ return 0;
}
void virtio_instance_init_common(Object *proxy_obj, void *data,
@@ -3297,7 +3508,7 @@ void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size)
vdev->config = NULL;
}
vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
- virtio_vmstate_change, vdev);
+ NULL, virtio_vmstate_change, vdev);
vdev->device_endian = virtio_default_endian();
vdev->use_guest_notifier_mask = true;
}
@@ -3456,7 +3667,6 @@ static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
int n)
{
/* We don't have a reference like avail idx in shared memory */
- return;
}
static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
@@ -3481,10 +3691,9 @@ void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
{
/* used idx was updated through set_last_avail_idx() */
- return;
}
-static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
+static void virtio_queue_split_update_used_idx(VirtIODevice *vdev, int n)
{
RCU_READ_LOCK_GUARD();
if (vdev->vq[n].vring.desc) {
@@ -3497,7 +3706,7 @@ void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
return virtio_queue_packed_update_used_idx(vdev, n);
} else {
- return virtio_split_packed_update_used_idx(vdev, n);
+ return virtio_queue_split_update_used_idx(vdev, n);
}
}
@@ -3820,13 +4029,12 @@ static void virtio_device_instance_finalize(Object *obj)
g_free(vdev->vector_queues);
}
-static Property virtio_properties[] = {
+static const Property virtio_properties[] = {
DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
disable_legacy_check, false),
- DEFINE_PROP_END_OF_LIST(),
};
static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
@@ -3949,7 +4157,7 @@ void virtio_device_release_ioeventfd(VirtIODevice *vdev)
virtio_bus_release_ioeventfd(vbus);
}
-static void virtio_device_class_init(ObjectClass *klass, void *data)
+static void virtio_device_class_init(ObjectClass *klass, const void *data)
{
/* Set the default value here. */
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig
new file mode 100644
index 0000000..2382b29
--- /dev/null
+++ b/hw/vmapple/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config VMAPPLE_AES
+ bool
+
+config VMAPPLE_BDIF
+ bool
+
+config VMAPPLE_CFG
+ bool
+
+config VMAPPLE_VIRTIO_BLK
+ bool
+
+config VMAPPLE
+ bool
+ depends on ARM
+ depends on HVF
+ default y if ARM
+ imply PCI_DEVICES
+ select ARM_GICV3
+ select PLATFORM_BUS
+ select PCI_EXPRESS
+ select PCI_EXPRESS_GENERIC_BRIDGE
+ select PL011 # UART
+ select PL031 # RTC
+ select PL061 # GPIO
+ select GPIO_PWR
+ select PVPANIC_MMIO
+ select VMAPPLE_AES
+ select VMAPPLE_BDIF
+ select VMAPPLE_CFG
+ select MAC_PVG_MMIO
+ select VMAPPLE_VIRTIO_BLK
diff --git a/hw/vmapple/aes.c b/hw/vmapple/aes.c
new file mode 100644
index 0000000..a4853a9
--- /dev/null
+++ b/hw/vmapple/aes.c
@@ -0,0 +1,581 @@
+/*
+ * QEMU Apple AES device emulation
+ *
+ * Copyright Ā© 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "trace.h"
+#include "crypto/hash.h"
+#include "crypto/aes.h"
+#include "crypto/cipher.h"
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "hw/vmapple/vmapple.h"
+#include "migration/vmstate.h"
+#include "qemu/cutils.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "system/dma.h"
+
+OBJECT_DECLARE_SIMPLE_TYPE(AESState, APPLE_AES)
+
+#define MAX_FIFO_SIZE 9
+
+#define CMD_KEY 0x1
+#define CMD_KEY_CONTEXT_SHIFT 27
+#define CMD_KEY_CONTEXT_MASK (0x1 << CMD_KEY_CONTEXT_SHIFT)
+#define CMD_KEY_SELECT_MAX_IDX 0x7
+#define CMD_KEY_SELECT_SHIFT 24
+#define CMD_KEY_SELECT_MASK (CMD_KEY_SELECT_MAX_IDX << CMD_KEY_SELECT_SHIFT)
+#define CMD_KEY_KEY_LEN_NUM 4u
+#define CMD_KEY_KEY_LEN_SHIFT 22
+#define CMD_KEY_KEY_LEN_MASK ((CMD_KEY_KEY_LEN_NUM - 1u) << CMD_KEY_KEY_LEN_SHIFT)
+#define CMD_KEY_ENCRYPT_SHIFT 20
+#define CMD_KEY_ENCRYPT_MASK (0x1 << CMD_KEY_ENCRYPT_SHIFT)
+#define CMD_KEY_BLOCK_MODE_SHIFT 16
+#define CMD_KEY_BLOCK_MODE_MASK (0x3 << CMD_KEY_BLOCK_MODE_SHIFT)
+#define CMD_IV 0x2
+#define CMD_IV_CONTEXT_SHIFT 26
+#define CMD_IV_CONTEXT_MASK (0x3 << CMD_KEY_CONTEXT_SHIFT)
+#define CMD_DSB 0x3
+#define CMD_SKG 0x4
+#define CMD_DATA 0x5
+#define CMD_DATA_KEY_CTX_SHIFT 27
+#define CMD_DATA_KEY_CTX_MASK (0x1 << CMD_DATA_KEY_CTX_SHIFT)
+#define CMD_DATA_IV_CTX_SHIFT 25
+#define CMD_DATA_IV_CTX_MASK (0x3 << CMD_DATA_IV_CTX_SHIFT)
+#define CMD_DATA_LEN_MASK 0xffffff
+#define CMD_STORE_IV 0x6
+#define CMD_STORE_IV_ADDR_MASK 0xffffff
+#define CMD_WRITE_REG 0x7
+#define CMD_FLAG 0x8
+#define CMD_FLAG_STOP_MASK BIT(26)
+#define CMD_FLAG_RAISE_IRQ_MASK BIT(27)
+#define CMD_FLAG_INFO_MASK 0xff
+#define CMD_MAX 0x10
+
+#define CMD_SHIFT 28
+
+#define REG_STATUS 0xc
+#define REG_STATUS_DMA_READ_RUNNING BIT(0)
+#define REG_STATUS_DMA_READ_PENDING BIT(1)
+#define REG_STATUS_DMA_WRITE_RUNNING BIT(2)
+#define REG_STATUS_DMA_WRITE_PENDING BIT(3)
+#define REG_STATUS_BUSY BIT(4)
+#define REG_STATUS_EXECUTING BIT(5)
+#define REG_STATUS_READY BIT(6)
+#define REG_STATUS_TEXT_DPA_SEEDED BIT(7)
+#define REG_STATUS_UNWRAP_DPA_SEEDED BIT(8)
+
+#define REG_IRQ_STATUS 0x18
+#define REG_IRQ_STATUS_INVALID_CMD BIT(2)
+#define REG_IRQ_STATUS_FLAG BIT(5)
+#define REG_IRQ_ENABLE 0x1c
+#define REG_WATERMARK 0x20
+#define REG_Q_STATUS 0x24
+#define REG_FLAG_INFO 0x30
+#define REG_FIFO 0x200
+
+static const uint32_t key_lens[CMD_KEY_KEY_LEN_NUM] = {
+ [0] = 16,
+ [1] = 24,
+ [2] = 32,
+ [3] = 64,
+};
+
+typedef struct Key {
+ uint32_t key_len;
+ uint8_t key[32];
+} Key;
+
+typedef struct IV {
+ uint32_t iv[4];
+} IV;
+
+static Key builtin_keys[CMD_KEY_SELECT_MAX_IDX + 1] = {
+ [1] = {
+ .key_len = 32,
+ .key = { 0x1 },
+ },
+ [2] = {
+ .key_len = 32,
+ .key = { 0x2 },
+ },
+ [3] = {
+ .key_len = 32,
+ .key = { 0x3 },
+ }
+};
+
+struct AESState {
+ SysBusDevice parent_obj;
+
+ qemu_irq irq;
+ MemoryRegion iomem1;
+ MemoryRegion iomem2;
+ AddressSpace *as;
+
+ uint32_t status;
+ uint32_t q_status;
+ uint32_t irq_status;
+ uint32_t irq_enable;
+ uint32_t watermark;
+ uint32_t flag_info;
+ uint32_t fifo[MAX_FIFO_SIZE];
+ uint32_t fifo_idx;
+ Key key[2];
+ IV iv[4];
+ bool is_encrypt;
+ QCryptoCipherMode block_mode;
+};
+
+static void aes_update_irq(AESState *s)
+{
+ qemu_set_irq(s->irq, !!(s->irq_status & s->irq_enable));
+}
+
+static uint64_t aes1_read(void *opaque, hwaddr offset, unsigned size)
+{
+ AESState *s = opaque;
+ uint64_t res = 0;
+
+ switch (offset) {
+ case REG_STATUS:
+ res = s->status;
+ break;
+ case REG_IRQ_STATUS:
+ res = s->irq_status;
+ break;
+ case REG_IRQ_ENABLE:
+ res = s->irq_enable;
+ break;
+ case REG_WATERMARK:
+ res = s->watermark;
+ break;
+ case REG_Q_STATUS:
+ res = s->q_status;
+ break;
+ case REG_FLAG_INFO:
+ res = s->flag_info;
+ break;
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s: Unknown AES MMIO offset %" PRIx64 "\n",
+ __func__, offset);
+ break;
+ }
+
+ trace_aes_read(offset, res);
+
+ return res;
+}
+
+static void fifo_append(AESState *s, uint64_t val)
+{
+ if (s->fifo_idx == MAX_FIFO_SIZE) {
+ /* Exceeded the FIFO. Bail out */
+ return;
+ }
+
+ s->fifo[s->fifo_idx++] = val;
+}
+
+static bool has_payload(AESState *s, uint32_t elems)
+{
+ return s->fifo_idx >= elems + 1;
+}
+
+static bool cmd_key(AESState *s)
+{
+ uint32_t cmd = s->fifo[0];
+ uint32_t key_select = (cmd & CMD_KEY_SELECT_MASK) >> CMD_KEY_SELECT_SHIFT;
+ uint32_t ctxt = (cmd & CMD_KEY_CONTEXT_MASK) >> CMD_KEY_CONTEXT_SHIFT;
+ uint32_t key_len;
+
+ switch ((cmd & CMD_KEY_BLOCK_MODE_MASK) >> CMD_KEY_BLOCK_MODE_SHIFT) {
+ case 0:
+ s->block_mode = QCRYPTO_CIPHER_MODE_ECB;
+ break;
+ case 1:
+ s->block_mode = QCRYPTO_CIPHER_MODE_CBC;
+ break;
+ default:
+ return false;
+ }
+
+ s->is_encrypt = cmd & CMD_KEY_ENCRYPT_MASK;
+ key_len = key_lens[(cmd & CMD_KEY_KEY_LEN_MASK) >> CMD_KEY_KEY_LEN_SHIFT];
+
+ if (key_select) {
+ trace_aes_cmd_key_select_builtin(ctxt, key_select,
+ s->is_encrypt ? "en" : "de",
+ QCryptoCipherMode_str(s->block_mode));
+ s->key[ctxt] = builtin_keys[key_select];
+ } else {
+ trace_aes_cmd_key_select_new(ctxt, key_len,
+ s->is_encrypt ? "en" : "de",
+ QCryptoCipherMode_str(s->block_mode));
+ if (key_len > sizeof(s->key[ctxt].key)) {
+ return false;
+ }
+ if (!has_payload(s, key_len / sizeof(uint32_t))) {
+ /* wait for payload */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__);
+ return false;
+ }
+ memcpy(&s->key[ctxt].key, &s->fifo[1], key_len);
+ s->key[ctxt].key_len = key_len;
+ }
+
+ return true;
+}
+
+static bool cmd_iv(AESState *s)
+{
+ uint32_t cmd = s->fifo[0];
+ uint32_t ctxt = (cmd & CMD_IV_CONTEXT_MASK) >> CMD_IV_CONTEXT_SHIFT;
+
+ if (!has_payload(s, 4)) {
+ /* wait for payload */
+ return false;
+ }
+ memcpy(&s->iv[ctxt].iv, &s->fifo[1], sizeof(s->iv[ctxt].iv));
+ trace_aes_cmd_iv(ctxt, s->fifo[1], s->fifo[2], s->fifo[3], s->fifo[4]);
+
+ return true;
+}
+
+static void dump_data(const char *desc, const void *p, size_t len)
+{
+ static const size_t MAX_LEN = 0x1000;
+ char hex[MAX_LEN * 2 + 1] = "";
+
+ if (len > MAX_LEN) {
+ return;
+ }
+
+ qemu_hexdump_to_buffer(hex, sizeof(hex), p, len);
+ trace_aes_dump_data(desc, hex);
+}
+
+static bool cmd_data(AESState *s)
+{
+ uint32_t cmd = s->fifo[0];
+ uint32_t ctxt_iv = 0;
+ uint32_t ctxt_key = (cmd & CMD_DATA_KEY_CTX_MASK) >> CMD_DATA_KEY_CTX_SHIFT;
+ uint32_t len = cmd & CMD_DATA_LEN_MASK;
+ uint64_t src_addr = s->fifo[2];
+ uint64_t dst_addr = s->fifo[3];
+ QCryptoCipherAlgo alg;
+ g_autoptr(QCryptoCipher) cipher = NULL;
+ g_autoptr(GByteArray) src = NULL;
+ g_autoptr(GByteArray) dst = NULL;
+ MemTxResult r;
+
+ src_addr |= ((uint64_t)s->fifo[1] << 16) & 0xffff00000000ULL;
+ dst_addr |= ((uint64_t)s->fifo[1] << 32) & 0xffff00000000ULL;
+
+ trace_aes_cmd_data(ctxt_key, ctxt_iv, src_addr, dst_addr, len);
+
+ if (!has_payload(s, 3)) {
+ /* wait for payload */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__);
+ return false;
+ }
+
+ if (ctxt_key >= ARRAY_SIZE(s->key) ||
+ ctxt_iv >= ARRAY_SIZE(s->iv)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid key or iv\n", __func__);
+ return false;
+ }
+
+ src = g_byte_array_sized_new(len);
+ g_byte_array_set_size(src, len);
+ dst = g_byte_array_sized_new(len);
+ g_byte_array_set_size(dst, len);
+
+ r = dma_memory_read(s->as, src_addr, src->data, len, MEMTXATTRS_UNSPECIFIED);
+ if (r != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA read of %"PRIu32" bytes "
+ "from 0x%"PRIx64" failed. (r=%d)\n",
+ __func__, len, src_addr, r);
+ return false;
+ }
+
+ dump_data("cmd_data(): src_data=", src->data, len);
+
+ switch (s->key[ctxt_key].key_len) {
+ case 128 / 8:
+ alg = QCRYPTO_CIPHER_ALGO_AES_128;
+ break;
+ case 192 / 8:
+ alg = QCRYPTO_CIPHER_ALGO_AES_192;
+ break;
+ case 256 / 8:
+ alg = QCRYPTO_CIPHER_ALGO_AES_256;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid key length\n", __func__);
+ return false;
+ }
+ cipher = qcrypto_cipher_new(alg, s->block_mode,
+ s->key[ctxt_key].key,
+ s->key[ctxt_key].key_len, NULL);
+ if (!cipher) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to create cipher object\n",
+ __func__);
+ return false;
+ }
+ if (s->block_mode != QCRYPTO_CIPHER_MODE_ECB) {
+ if (qcrypto_cipher_setiv(cipher, (void *)s->iv[ctxt_iv].iv,
+ sizeof(s->iv[ctxt_iv].iv), NULL) != 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to set IV\n", __func__);
+ return false;
+ }
+ }
+ if (s->is_encrypt) {
+ if (qcrypto_cipher_encrypt(cipher, src->data, dst->data, len, NULL) != 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Encryption failed\n", __func__);
+ return false;
+ }
+ } else {
+ if (qcrypto_cipher_decrypt(cipher, src->data, dst->data, len, NULL) != 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Decryption failed\n", __func__);
+ return false;
+ }
+ }
+
+ dump_data("cmd_data(): dst_data=", dst->data, len);
+ r = dma_memory_write(s->as, dst_addr, dst->data, len, MEMTXATTRS_UNSPECIFIED);
+ if (r != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA write of %"PRIu32" bytes "
+ "to 0x%"PRIx64" failed. (r=%d)\n",
+ __func__, len, src_addr, r);
+ return false;
+ }
+
+ return true;
+}
+
+static bool cmd_store_iv(AESState *s)
+{
+ uint32_t cmd = s->fifo[0];
+ uint32_t ctxt = (cmd & CMD_IV_CONTEXT_MASK) >> CMD_IV_CONTEXT_SHIFT;
+ uint64_t addr = s->fifo[1];
+ MemTxResult dma_result;
+
+ if (!has_payload(s, 1)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__);
+ return false;
+ }
+
+ if (ctxt >= ARRAY_SIZE(s->iv)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Invalid context. ctxt = %u, allowed: 0..%zu\n",
+ __func__, ctxt, ARRAY_SIZE(s->iv) - 1);
+ return false;
+ }
+
+ addr |= ((uint64_t)cmd << 32) & 0xff00000000ULL;
+ dma_result = dma_memory_write(&address_space_memory, addr,
+ &s->iv[ctxt].iv, sizeof(s->iv[ctxt].iv),
+ MEMTXATTRS_UNSPECIFIED);
+
+ trace_aes_cmd_store_iv(ctxt, addr, s->iv[ctxt].iv[0], s->iv[ctxt].iv[1],
+ s->iv[ctxt].iv[2], s->iv[ctxt].iv[3]);
+
+ return dma_result == MEMTX_OK;
+}
+
+static bool cmd_flag(AESState *s)
+{
+ uint32_t cmd = s->fifo[0];
+ uint32_t raise_irq = cmd & CMD_FLAG_RAISE_IRQ_MASK;
+
+ /* We always process data when it's coming in, so fire an IRQ immediately */
+ if (raise_irq) {
+ s->irq_status |= REG_IRQ_STATUS_FLAG;
+ }
+
+ s->flag_info = cmd & CMD_FLAG_INFO_MASK;
+
+ trace_aes_cmd_flag(!!raise_irq, s->flag_info);
+
+ return true;
+}
+
+static void fifo_process(AESState *s)
+{
+ uint32_t cmd = s->fifo[0] >> CMD_SHIFT;
+ bool success = false;
+
+ if (!s->fifo_idx) {
+ return;
+ }
+
+ switch (cmd) {
+ case CMD_KEY:
+ success = cmd_key(s);
+ break;
+ case CMD_IV:
+ success = cmd_iv(s);
+ break;
+ case CMD_DATA:
+ success = cmd_data(s);
+ break;
+ case CMD_STORE_IV:
+ success = cmd_store_iv(s);
+ break;
+ case CMD_FLAG:
+ success = cmd_flag(s);
+ break;
+ default:
+ s->irq_status |= REG_IRQ_STATUS_INVALID_CMD;
+ break;
+ }
+
+ if (success) {
+ s->fifo_idx = 0;
+ }
+
+ trace_aes_fifo_process(cmd, success);
+}
+
+static void aes1_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
+{
+ AESState *s = opaque;
+
+ trace_aes_write(offset, val);
+
+ switch (offset) {
+ case REG_IRQ_STATUS:
+ s->irq_status &= ~val;
+ break;
+ case REG_IRQ_ENABLE:
+ s->irq_enable = val;
+ break;
+ case REG_FIFO:
+ fifo_append(s, val);
+ fifo_process(s);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "%s: Unknown AES MMIO offset %"PRIx64", data %"PRIx64"\n",
+ __func__, offset, val);
+ return;
+ }
+
+ aes_update_irq(s);
+}
+
+static const MemoryRegionOps aes1_ops = {
+ .read = aes1_read,
+ .write = aes1_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static uint64_t aes2_read(void *opaque, hwaddr offset, unsigned size)
+{
+ uint64_t res = 0;
+
+ switch (offset) {
+ case 0:
+ res = 0;
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "%s: Unknown AES MMIO 2 offset %"PRIx64"\n",
+ __func__, offset);
+ break;
+ }
+
+ trace_aes_2_read(offset, res);
+
+ return res;
+}
+
+static void aes2_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
+{
+ trace_aes_2_write(offset, val);
+
+ switch (offset) {
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "%s: Unknown AES MMIO 2 offset %"PRIx64", data %"PRIx64"\n",
+ __func__, offset, val);
+ return;
+ }
+}
+
+static const MemoryRegionOps aes2_ops = {
+ .read = aes2_read,
+ .write = aes2_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void aes_reset(Object *obj, ResetType type)
+{
+ AESState *s = APPLE_AES(obj);
+
+ s->status = 0x3f80;
+ s->q_status = 2;
+ s->irq_status = 0;
+ s->irq_enable = 0;
+ s->watermark = 0;
+}
+
+static void aes_init(Object *obj)
+{
+ AESState *s = APPLE_AES(obj);
+
+ memory_region_init_io(&s->iomem1, obj, &aes1_ops, s, TYPE_APPLE_AES, 0x4000);
+ memory_region_init_io(&s->iomem2, obj, &aes2_ops, s, TYPE_APPLE_AES, 0x4000);
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem1);
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem2);
+ sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq);
+ s->as = &address_space_memory;
+}
+
+static void aes_class_init(ObjectClass *klass, const void *data)
+{
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ rc->phases.hold = aes_reset;
+}
+
+static const TypeInfo aes_info = {
+ .name = TYPE_APPLE_AES,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AESState),
+ .class_init = aes_class_init,
+ .instance_init = aes_init,
+};
+
+static void aes_register_types(void)
+{
+ type_register_static(&aes_info);
+}
+
+type_init(aes_register_types)
diff --git a/hw/vmapple/bdif.c b/hw/vmapple/bdif.c
new file mode 100644
index 0000000..5ccd374
--- /dev/null
+++ b/hw/vmapple/bdif.c
@@ -0,0 +1,274 @@
+/*
+ * VMApple Backdoor Interface
+ *
+ * Copyright Ā© 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "trace.h"
+#include "hw/vmapple/vmapple.h"
+#include "hw/sysbus.h"
+#include "hw/block/block.h"
+#include "qapi/error.h"
+#include "system/block-backend.h"
+#include "system/dma.h"
+
+OBJECT_DECLARE_SIMPLE_TYPE(VMAppleBdifState, VMAPPLE_BDIF)
+
+struct VMAppleBdifState {
+ SysBusDevice parent_obj;
+
+ BlockBackend *aux;
+ BlockBackend *root;
+ MemoryRegion mmio;
+};
+
+#define VMAPPLE_BDIF_SIZE 0x00200000
+
+#define REG_DEVID_MASK 0xffff0000
+#define DEVID_ROOT 0x00000000
+#define DEVID_AUX 0x00010000
+#define DEVID_USB 0x00100000
+
+#define REG_STATUS 0x0
+#define REG_STATUS_ACTIVE BIT(0)
+#define REG_CFG 0x4
+#define REG_CFG_ACTIVE BIT(1)
+#define REG_UNK1 0x8
+#define REG_BUSY 0x10
+#define REG_BUSY_READY BIT(0)
+#define REG_UNK2 0x400
+#define REG_CMD 0x408
+#define REG_NEXT_DEVICE 0x420
+#define REG_UNK3 0x434
+
+typedef struct VblkSector {
+ uint32_t pad;
+ uint32_t pad2;
+ uint32_t sector;
+ uint32_t pad3;
+} VblkSector;
+
+typedef struct VblkReqCmd {
+ uint64_t addr;
+ uint32_t len;
+ uint32_t flags;
+} VblkReqCmd;
+
+typedef struct VblkReq {
+ VblkReqCmd sector;
+ VblkReqCmd data;
+ VblkReqCmd retval;
+} VblkReq;
+
+#define VBLK_DATA_FLAGS_READ 0x00030001
+#define VBLK_DATA_FLAGS_WRITE 0x00010001
+
+#define VBLK_RET_SUCCESS 0
+#define VBLK_RET_FAILED 1
+
+static uint64_t bdif_read(void *opaque, hwaddr offset, unsigned size)
+{
+ uint64_t ret = -1;
+ uint64_t devid = offset & REG_DEVID_MASK;
+
+ switch (offset & ~REG_DEVID_MASK) {
+ case REG_STATUS:
+ ret = REG_STATUS_ACTIVE;
+ break;
+ case REG_CFG:
+ ret = REG_CFG_ACTIVE;
+ break;
+ case REG_UNK1:
+ ret = 0x420;
+ break;
+ case REG_BUSY:
+ ret = REG_BUSY_READY;
+ break;
+ case REG_UNK2:
+ ret = 0x1;
+ break;
+ case REG_UNK3:
+ ret = 0x0;
+ break;
+ case REG_NEXT_DEVICE:
+ switch (devid) {
+ case DEVID_ROOT:
+ ret = 0x8000000;
+ break;
+ case DEVID_AUX:
+ ret = 0x10000;
+ break;
+ }
+ break;
+ }
+
+ trace_bdif_read(offset, size, ret);
+ return ret;
+}
+
+static void le2cpu_sector(VblkSector *sector)
+{
+ sector->sector = le32_to_cpu(sector->sector);
+}
+
+static void le2cpu_reqcmd(VblkReqCmd *cmd)
+{
+ cmd->addr = le64_to_cpu(cmd->addr);
+ cmd->len = le32_to_cpu(cmd->len);
+ cmd->flags = le32_to_cpu(cmd->flags);
+}
+
+static void le2cpu_req(VblkReq *req)
+{
+ le2cpu_reqcmd(&req->sector);
+ le2cpu_reqcmd(&req->data);
+ le2cpu_reqcmd(&req->retval);
+}
+
+static void vblk_cmd(uint64_t devid, BlockBackend *blk, uint64_t gp_addr,
+ uint64_t static_off)
+{
+ VblkReq req;
+ VblkSector sector;
+ uint64_t off = 0;
+ g_autofree char *buf = NULL;
+ uint8_t ret = VBLK_RET_FAILED;
+ int r;
+ MemTxResult dma_result;
+
+ dma_result = dma_memory_read(&address_space_memory, gp_addr,
+ &req, sizeof(req), MEMTXATTRS_UNSPECIFIED);
+ if (dma_result != MEMTX_OK) {
+ goto out;
+ }
+
+ le2cpu_req(&req);
+
+ if (req.sector.len != sizeof(sector)) {
+ goto out;
+ }
+
+ /* Read the vblk command */
+ dma_result = dma_memory_read(&address_space_memory, req.sector.addr,
+ &sector, sizeof(sector),
+ MEMTXATTRS_UNSPECIFIED);
+ if (dma_result != MEMTX_OK) {
+ goto out;
+ }
+ le2cpu_sector(&sector);
+
+ off = sector.sector * 512ULL + static_off;
+
+ /* Sanity check that we're not allocating bogus sizes */
+ if (req.data.len > 128 * MiB) {
+ goto out;
+ }
+
+ buf = g_malloc0(req.data.len);
+ switch (req.data.flags) {
+ case VBLK_DATA_FLAGS_READ:
+ r = blk_pread(blk, off, req.data.len, buf, 0);
+ trace_bdif_vblk_read(devid == DEVID_AUX ? "aux" : "root",
+ req.data.addr, off, req.data.len, r);
+ if (r < 0) {
+ goto out;
+ }
+ dma_result = dma_memory_write(&address_space_memory, req.data.addr, buf,
+ req.data.len, MEMTXATTRS_UNSPECIFIED);
+ if (dma_result == MEMTX_OK) {
+ ret = VBLK_RET_SUCCESS;
+ }
+ break;
+ case VBLK_DATA_FLAGS_WRITE:
+ /* Not needed, iBoot only reads */
+ break;
+ default:
+ break;
+ }
+
+out:
+ dma_memory_write(&address_space_memory, req.retval.addr, &ret, 1,
+ MEMTXATTRS_UNSPECIFIED);
+}
+
+static void bdif_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ VMAppleBdifState *s = opaque;
+ uint64_t devid = (offset & REG_DEVID_MASK);
+
+ trace_bdif_write(offset, size, value);
+
+ switch (offset & ~REG_DEVID_MASK) {
+ case REG_CMD:
+ switch (devid) {
+ case DEVID_ROOT:
+ vblk_cmd(devid, s->root, value, 0x0);
+ break;
+ case DEVID_AUX:
+ vblk_cmd(devid, s->aux, value, 0x0);
+ break;
+ }
+ break;
+ }
+}
+
+static const MemoryRegionOps bdif_ops = {
+ .read = bdif_read,
+ .write = bdif_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+};
+
+static void bdif_init(Object *obj)
+{
+ VMAppleBdifState *s = VMAPPLE_BDIF(obj);
+
+ memory_region_init_io(&s->mmio, obj, &bdif_ops, obj,
+ "VMApple Backdoor Interface", VMAPPLE_BDIF_SIZE);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+}
+
+static const Property bdif_properties[] = {
+ DEFINE_PROP_DRIVE("aux", VMAppleBdifState, aux),
+ DEFINE_PROP_DRIVE("root", VMAppleBdifState, root),
+};
+
+static void bdif_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "VMApple Backdoor Interface";
+ device_class_set_props(dc, bdif_properties);
+}
+
+static const TypeInfo bdif_info = {
+ .name = TYPE_VMAPPLE_BDIF,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(VMAppleBdifState),
+ .instance_init = bdif_init,
+ .class_init = bdif_class_init,
+};
+
+static void bdif_register_types(void)
+{
+ type_register_static(&bdif_info);
+}
+
+type_init(bdif_register_types)
diff --git a/hw/vmapple/cfg.c b/hw/vmapple/cfg.c
new file mode 100644
index 0000000..3d58a29
--- /dev/null
+++ b/hw/vmapple/cfg.c
@@ -0,0 +1,195 @@
+/*
+ * VMApple Configuration Region
+ *
+ * Copyright Ā© 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/vmapple/vmapple.h"
+#include "hw/sysbus.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+#include "net/net.h"
+
+OBJECT_DECLARE_SIMPLE_TYPE(VMAppleCfgState, VMAPPLE_CFG)
+
+#define VMAPPLE_CFG_SIZE 0x00010000
+
+typedef struct VMAppleCfg {
+ uint32_t version; /* 0x000 */
+ uint32_t nr_cpus; /* 0x004 */
+ uint32_t unk1; /* 0x008 */
+ uint32_t unk2; /* 0x00c */
+ uint32_t unk3; /* 0x010 */
+ uint32_t unk4; /* 0x014 */
+ uint64_t ecid; /* 0x018 */
+ uint64_t ram_size; /* 0x020 */
+ uint32_t run_installer1; /* 0x028 */
+ uint32_t unk5; /* 0x02c */
+ uint32_t unk6; /* 0x030 */
+ uint32_t run_installer2; /* 0x034 */
+ uint32_t rnd; /* 0x038 */
+ uint32_t unk7; /* 0x03c */
+ MACAddr mac_en0; /* 0x040 */
+ uint8_t pad1[2];
+ MACAddr mac_en1; /* 0x048 */
+ uint8_t pad2[2];
+ MACAddr mac_wifi0; /* 0x050 */
+ uint8_t pad3[2];
+ MACAddr mac_bt0; /* 0x058 */
+ uint8_t pad4[2];
+ uint8_t reserved[0xa0]; /* 0x060 */
+ uint32_t cpu_ids[0x80]; /* 0x100 */
+ uint8_t scratch[0x200]; /* 0x180 */
+ char serial[32]; /* 0x380 */
+ char unk8[32]; /* 0x3a0 */
+ char model[32]; /* 0x3c0 */
+ uint8_t unk9[32]; /* 0x3e0 */
+ uint32_t unk10; /* 0x400 */
+ char soc_name[32]; /* 0x404 */
+} VMAppleCfg;
+
+struct VMAppleCfgState {
+ SysBusDevice parent_obj;
+ VMAppleCfg cfg;
+
+ MemoryRegion mem;
+ char *serial;
+ char *model;
+ char *soc_name;
+};
+
+static void vmapple_cfg_reset(Object *obj, ResetType type)
+{
+ VMAppleCfgState *s = VMAPPLE_CFG(obj);
+ VMAppleCfg *cfg;
+
+ cfg = memory_region_get_ram_ptr(&s->mem);
+ memset(cfg, 0, VMAPPLE_CFG_SIZE);
+ *cfg = s->cfg;
+}
+
+static bool set_fixlen_property_or_error(char *restrict dst,
+ const char *restrict src,
+ size_t dst_size, Error **errp,
+ const char *property_name)
+{
+ ERRP_GUARD();
+ size_t len;
+
+ len = g_strlcpy(dst, src, dst_size);
+ if (len < dst_size) { /* len does not count nul terminator */
+ return true;
+ }
+
+ error_setg(errp, "Provided value too long for property '%s'", property_name);
+ error_append_hint(errp, "length (%zu) exceeds maximum of %zu\n",
+ len, dst_size - 1);
+ return false;
+}
+
+#define set_fixlen_property_or_return(dst_array, src, errp, property_name) \
+ do { \
+ if (!set_fixlen_property_or_error((dst_array), (src), \
+ ARRAY_SIZE(dst_array), \
+ (errp), (property_name))) { \
+ return; \
+ } \
+ } while (0)
+
+static void vmapple_cfg_realize(DeviceState *dev, Error **errp)
+{
+ VMAppleCfgState *s = VMAPPLE_CFG(dev);
+ uint32_t i;
+
+ if (!s->serial) {
+ s->serial = g_strdup("1234");
+ }
+ if (!s->model) {
+ s->model = g_strdup("VM0001");
+ }
+ if (!s->soc_name) {
+ s->soc_name = g_strdup("Apple M1 (Virtual)");
+ }
+
+ set_fixlen_property_or_return(s->cfg.serial, s->serial, errp, "serial");
+ set_fixlen_property_or_return(s->cfg.model, s->model, errp, "model");
+ set_fixlen_property_or_return(s->cfg.soc_name, s->soc_name, errp, "soc_name");
+ set_fixlen_property_or_return(s->cfg.unk8, "D/A", errp, "unk8");
+ s->cfg.version = 2;
+ s->cfg.unk1 = 1;
+ s->cfg.unk2 = 1;
+ s->cfg.unk3 = 0x20;
+ s->cfg.unk4 = 0;
+ s->cfg.unk5 = 1;
+ s->cfg.unk6 = 1;
+ s->cfg.unk7 = 0;
+ s->cfg.unk10 = 1;
+
+ if (s->cfg.nr_cpus > ARRAY_SIZE(s->cfg.cpu_ids)) {
+ error_setg(errp,
+ "Failed to create %u CPUs, vmapple machine supports %zu max",
+ s->cfg.nr_cpus, ARRAY_SIZE(s->cfg.cpu_ids));
+ return;
+ }
+ for (i = 0; i < s->cfg.nr_cpus; i++) {
+ s->cfg.cpu_ids[i] = i;
+ }
+}
+
+static void vmapple_cfg_init(Object *obj)
+{
+ VMAppleCfgState *s = VMAPPLE_CFG(obj);
+
+ memory_region_init_ram(&s->mem, obj, "VMApple Config", VMAPPLE_CFG_SIZE,
+ &error_fatal);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mem);
+}
+
+static const Property vmapple_cfg_properties[] = {
+ DEFINE_PROP_UINT32("nr-cpus", VMAppleCfgState, cfg.nr_cpus, 1),
+ DEFINE_PROP_UINT64("ecid", VMAppleCfgState, cfg.ecid, 0),
+ DEFINE_PROP_UINT64("ram-size", VMAppleCfgState, cfg.ram_size, 0),
+ DEFINE_PROP_UINT32("run_installer1", VMAppleCfgState, cfg.run_installer1, 0),
+ DEFINE_PROP_UINT32("run_installer2", VMAppleCfgState, cfg.run_installer2, 0),
+ DEFINE_PROP_UINT32("rnd", VMAppleCfgState, cfg.rnd, 0),
+ DEFINE_PROP_MACADDR("mac-en0", VMAppleCfgState, cfg.mac_en0),
+ DEFINE_PROP_MACADDR("mac-en1", VMAppleCfgState, cfg.mac_en1),
+ DEFINE_PROP_MACADDR("mac-wifi0", VMAppleCfgState, cfg.mac_wifi0),
+ DEFINE_PROP_MACADDR("mac-bt0", VMAppleCfgState, cfg.mac_bt0),
+ DEFINE_PROP_STRING("serial", VMAppleCfgState, serial),
+ DEFINE_PROP_STRING("model", VMAppleCfgState, model),
+ DEFINE_PROP_STRING("soc_name", VMAppleCfgState, soc_name),
+};
+
+static void vmapple_cfg_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ dc->realize = vmapple_cfg_realize;
+ dc->desc = "VMApple Configuration Region";
+ device_class_set_props(dc, vmapple_cfg_properties);
+ rc->phases.hold = vmapple_cfg_reset;
+}
+
+static const TypeInfo vmapple_cfg_info = {
+ .name = TYPE_VMAPPLE_CFG,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(VMAppleCfgState),
+ .instance_init = vmapple_cfg_init,
+ .class_init = vmapple_cfg_class_init,
+};
+
+static void vmapple_cfg_register_types(void)
+{
+ type_register_static(&vmapple_cfg_info);
+}
+
+type_init(vmapple_cfg_register_types)
diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build
new file mode 100644
index 0000000..23bc4c9
--- /dev/null
+++ b/hw/vmapple/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c'))
+system_ss.add(when: 'CONFIG_VMAPPLE_BDIF', if_true: files('bdif.c'))
+system_ss.add(when: 'CONFIG_VMAPPLE_CFG', if_true: files('cfg.c'))
+system_ss.add(when: 'CONFIG_VMAPPLE_VIRTIO_BLK', if_true: files('virtio-blk.c'))
+specific_ss.add(when: 'CONFIG_VMAPPLE', if_true: files('vmapple.c'))
diff --git a/hw/vmapple/trace-events b/hw/vmapple/trace-events
new file mode 100644
index 0000000..93380ed
--- /dev/null
+++ b/hw/vmapple/trace-events
@@ -0,0 +1,21 @@
+# See docs/devel/tracing.rst for syntax documentation.
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# aes.c
+aes_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64
+aes_cmd_key_select_builtin(uint32_t ctx, uint32_t key_id, const char *direction, const char *cipher) "[%d] Selecting builtin key %d to %scrypt with %s"
+aes_cmd_key_select_new(uint32_t ctx, uint32_t key_len, const char *direction, const char *cipher) "[%d] Selecting new key size=%d to %scrypt with %s"
+aes_cmd_iv(uint32_t ctx, uint32_t iv0, uint32_t iv1, uint32_t iv2, uint32_t iv3) "[%d] 0x%08x 0x%08x 0x%08x 0x%08x"
+aes_cmd_data(uint32_t key, uint32_t iv, uint64_t src, uint64_t dst, uint32_t len) "[key=%d iv=%d] src=0x%"PRIx64" dst=0x%"PRIx64" len=0x%x"
+aes_cmd_store_iv(uint32_t ctx, uint64_t addr, uint32_t iv0, uint32_t iv1, uint32_t iv2, uint32_t iv3) "[%d] addr=0x%"PRIx64"x -> 0x%08x 0x%08x 0x%08x 0x%08x"
+aes_cmd_flag(uint32_t raise, uint32_t flag_info) "raise=%d flag_info=0x%x"
+aes_fifo_process(uint32_t cmd, bool success) "cmd=%d success=%d"
+aes_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64
+aes_2_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64
+aes_2_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64
+aes_dump_data(const char *desc, const char *hex) "%s%s"
+
+# bdif.c
+bdif_read(uint64_t offset, uint32_t size, uint64_t value) "offset=0x%"PRIx64" size=0x%x value=0x%"PRIx64
+bdif_write(uint64_t offset, uint32_t size, uint64_t value) "offset=0x%"PRIx64" size=0x%x value=0x%"PRIx64
+bdif_vblk_read(const char *dev, uint64_t addr, uint64_t offset, uint32_t len, int r) "dev=%s addr=0x%"PRIx64" off=0x%"PRIx64" size=0x%x r=%d"
diff --git a/hw/vmapple/trace.h b/hw/vmapple/trace.h
new file mode 100644
index 0000000..d099d5e
--- /dev/null
+++ b/hw/vmapple/trace.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#include "trace/trace-hw_vmapple.h"
diff --git a/hw/vmapple/virtio-blk.c b/hw/vmapple/virtio-blk.c
new file mode 100644
index 0000000..532b564
--- /dev/null
+++ b/hw/vmapple/virtio-blk.c
@@ -0,0 +1,205 @@
+/*
+ * VMApple specific VirtIO Block implementation
+ *
+ * Copyright Ā© 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * VMApple uses almost standard VirtIO Block, but with a few key differences:
+ *
+ * - Different PCI device/vendor ID
+ * - An additional "type" identifier to differentiate AUX and Root volumes
+ * - An additional BARRIER command
+ */
+
+#include "qemu/osdep.h"
+#include "hw/vmapple/vmapple.h"
+#include "hw/virtio/virtio-blk.h"
+#include "hw/virtio/virtio-pci.h"
+#include "qemu/bswap.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+
+#define TYPE_VMAPPLE_VIRTIO_BLK "vmapple-virtio-blk"
+OBJECT_DECLARE_TYPE(VMAppleVirtIOBlk, VMAppleVirtIOBlkClass, VMAPPLE_VIRTIO_BLK)
+
+typedef struct VMAppleVirtIOBlkClass {
+ VirtIOBlkClass parent;
+
+ void (*get_config)(VirtIODevice *vdev, uint8_t *config);
+} VMAppleVirtIOBlkClass;
+
+typedef struct VMAppleVirtIOBlk {
+ VirtIOBlock parent_obj;
+
+ uint32_t apple_type;
+} VMAppleVirtIOBlk;
+
+/*
+ * vmapple-virtio-blk-pci: This extends VirtioPCIProxy.
+ */
+OBJECT_DECLARE_SIMPLE_TYPE(VMAppleVirtIOBlkPCI, VMAPPLE_VIRTIO_BLK_PCI)
+
+#define VIRTIO_BLK_T_APPLE_BARRIER 0x10000
+
+static bool vmapple_virtio_blk_handle_unknown_request(VirtIOBlockReq *req,
+ MultiReqBuffer *mrb,
+ uint32_t type)
+{
+ switch (type) {
+ case VIRTIO_BLK_T_APPLE_BARRIER:
+ qemu_log_mask(LOG_UNIMP, "%s: Barrier requests are currently no-ops\n",
+ __func__);
+ virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
+ g_free(req);
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * VMApple virtio-blk uses the same config format as normal virtio, with one
+ * exception: It adds an "apple type" specififer at the same location that
+ * the spec reserves for max_secure_erase_sectors. Let's hook into the
+ * get_config code path here, run it as usual and then patch in the apple type.
+ */
+static void vmapple_virtio_blk_get_config(VirtIODevice *vdev, uint8_t *config)
+{
+ VMAppleVirtIOBlk *dev = VMAPPLE_VIRTIO_BLK(vdev);
+ VMAppleVirtIOBlkClass *vvbk = VMAPPLE_VIRTIO_BLK_GET_CLASS(dev);
+ struct virtio_blk_config *blkcfg = (struct virtio_blk_config *)config;
+
+ vvbk->get_config(vdev, config);
+
+ g_assert(dev->parent_obj.config_size >= endof(struct virtio_blk_config, zoned));
+
+ /* Apple abuses the field for max_secure_erase_sectors as type id */
+ stl_he_p(&blkcfg->max_secure_erase_sectors, dev->apple_type);
+}
+
+static void vmapple_virtio_blk_class_init(ObjectClass *klass, const void *data)
+{
+ VirtIOBlkClass *vbk = VIRTIO_BLK_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+ VMAppleVirtIOBlkClass *vvbk = VMAPPLE_VIRTIO_BLK_CLASS(klass);
+
+ vbk->handle_unknown_request = vmapple_virtio_blk_handle_unknown_request;
+ vvbk->get_config = vdc->get_config;
+ vdc->get_config = vmapple_virtio_blk_get_config;
+}
+
+static const TypeInfo vmapple_virtio_blk_info = {
+ .name = TYPE_VMAPPLE_VIRTIO_BLK,
+ .parent = TYPE_VIRTIO_BLK,
+ .instance_size = sizeof(VMAppleVirtIOBlk),
+ .class_size = sizeof(VMAppleVirtIOBlkClass),
+ .class_init = vmapple_virtio_blk_class_init,
+};
+
+/* PCI Devices */
+
+struct VMAppleVirtIOBlkPCI {
+ VirtIOPCIProxy parent_obj;
+
+ VMAppleVirtIOBlk vdev;
+ VMAppleVirtioBlkVariant variant;
+};
+
+static const Property vmapple_virtio_blk_pci_properties[] = {
+ DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
+ DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
+ DEV_NVECTORS_UNSPECIFIED),
+ DEFINE_PROP_VMAPPLE_VIRTIO_BLK_VARIANT("variant", VMAppleVirtIOBlkPCI, variant,
+ VM_APPLE_VIRTIO_BLK_VARIANT_UNSPECIFIED),
+};
+
+static void vmapple_virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ ERRP_GUARD();
+ VMAppleVirtIOBlkPCI *dev = VMAPPLE_VIRTIO_BLK_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ VirtIOBlkConf *conf = &dev->vdev.parent_obj.conf;
+
+ if (dev->variant == VM_APPLE_VIRTIO_BLK_VARIANT_UNSPECIFIED) {
+ error_setg(errp, "vmapple virtio block device variant unspecified");
+ error_append_hint(errp,
+ "Variant property must be set to 'aux' or 'root'.\n"
+ "Use a regular virtio-blk-pci device instead when "
+ "neither is applicaple.\n");
+ return;
+ }
+
+ if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) {
+ conf->num_queues = virtio_pci_optimal_num_queues(0);
+ }
+
+ if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
+ vpci_dev->nvectors = conf->num_queues + 1;
+ }
+
+ /*
+ * We don't support zones, but we need the additional config space size.
+ * Let's just expose the feature so the rest of the virtio-blk logic
+ * allocates enough space for us. The guest will ignore zones anyway.
+ */
+ virtio_add_feature(&dev->vdev.parent_obj.host_features, VIRTIO_BLK_F_ZONED);
+ /* Propagate the apple type down to the virtio-blk device */
+ dev->vdev.apple_type = dev->variant;
+ /* and spawn the virtio-blk device */
+ qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
+
+ /*
+ * The virtio-pci machinery adjusts its vendor/device ID based on whether
+ * we support modern or legacy virtio. Let's patch it back to the Apple
+ * identifiers here.
+ */
+ pci_config_set_vendor_id(vpci_dev->pci_dev.config, PCI_VENDOR_ID_APPLE);
+ pci_config_set_device_id(vpci_dev->pci_dev.config,
+ PCI_DEVICE_ID_APPLE_VIRTIO_BLK);
+}
+
+static void vmapple_virtio_blk_pci_class_init(ObjectClass *klass,
+ const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+ device_class_set_props(dc, vmapple_virtio_blk_pci_properties);
+ k->realize = vmapple_virtio_blk_pci_realize;
+ pcidev_k->vendor_id = PCI_VENDOR_ID_APPLE;
+ pcidev_k->device_id = PCI_DEVICE_ID_APPLE_VIRTIO_BLK;
+ pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
+ pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
+}
+
+static void vmapple_virtio_blk_pci_instance_init(Object *obj)
+{
+ VMAppleVirtIOBlkPCI *dev = VMAPPLE_VIRTIO_BLK_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VMAPPLE_VIRTIO_BLK);
+}
+
+static const VirtioPCIDeviceTypeInfo vmapple_virtio_blk_pci_info = {
+ .generic_name = TYPE_VMAPPLE_VIRTIO_BLK_PCI,
+ .instance_size = sizeof(VMAppleVirtIOBlkPCI),
+ .instance_init = vmapple_virtio_blk_pci_instance_init,
+ .class_init = vmapple_virtio_blk_pci_class_init,
+};
+
+static void vmapple_virtio_blk_register_types(void)
+{
+ type_register_static(&vmapple_virtio_blk_info);
+ virtio_pci_types_register(&vmapple_virtio_blk_pci_info);
+}
+
+type_init(vmapple_virtio_blk_register_types)
diff --git a/hw/vmapple/vmapple.c b/hw/vmapple/vmapple.c
new file mode 100644
index 0000000..16e6110
--- /dev/null
+++ b/hw/vmapple/vmapple.c
@@ -0,0 +1,618 @@
+/*
+ * VMApple machine emulation
+ *
+ * Copyright Ā© 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * VMApple is the device model that the macOS built-in hypervisor called
+ * "Virtualization.framework" exposes to Apple Silicon macOS guests. The
+ * machine model in this file implements the same device model in QEMU, but
+ * does not use any code from Virtualization.Framework.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/bitops.h"
+#include "qemu/datadir.h"
+#include "qemu/error-report.h"
+#include "qemu/guest-random.h"
+#include "qemu/help-texts.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qemu/option.h"
+#include "qemu/units.h"
+#include "monitor/qdev.h"
+#include "hw/boards.h"
+#include "hw/irq.h"
+#include "hw/loader.h"
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "hw/usb.h"
+#include "hw/arm/boot.h"
+#include "hw/arm/primecell.h"
+#include "hw/char/pl011.h"
+#include "hw/intc/arm_gic.h"
+#include "hw/intc/arm_gicv3_common.h"
+#include "hw/misc/pvpanic.h"
+#include "hw/pci-host/gpex.h"
+#include "hw/usb/hcd-xhci-pci.h"
+#include "hw/virtio/virtio-pci.h"
+#include "hw/vmapple/vmapple.h"
+#include "net/net.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "qapi/qapi-visit-common.h"
+#include "qobject/qlist.h"
+#include "standard-headers/linux/input.h"
+#include "system/hvf.h"
+#include "system/reset.h"
+#include "system/runstate.h"
+#include "system/system.h"
+
+struct VMAppleMachineState {
+ MachineState parent;
+
+ Notifier machine_done;
+ struct arm_boot_info bootinfo;
+ const MemMapEntry *memmap;
+ const int *irqmap;
+ DeviceState *gic;
+ DeviceState *cfg;
+ DeviceState *pvpanic;
+ Notifier powerdown_notifier;
+ PCIBus *bus;
+ MemoryRegion fw_mr;
+ MemoryRegion ecam_alias;
+ uint64_t uuid;
+};
+
+#define TYPE_VMAPPLE_MACHINE MACHINE_TYPE_NAME("vmapple")
+OBJECT_DECLARE_SIMPLE_TYPE(VMAppleMachineState, VMAPPLE_MACHINE)
+
+/* Number of external interrupt lines to configure the GIC with */
+#define NUM_IRQS 256
+
+enum {
+ VMAPPLE_FIRMWARE,
+ VMAPPLE_CONFIG,
+ VMAPPLE_MEM,
+ VMAPPLE_GIC_DIST,
+ VMAPPLE_GIC_REDIST,
+ VMAPPLE_UART,
+ VMAPPLE_RTC,
+ VMAPPLE_PCIE,
+ VMAPPLE_PCIE_MMIO,
+ VMAPPLE_PCIE_ECAM,
+ VMAPPLE_GPIO,
+ VMAPPLE_PVPANIC,
+ VMAPPLE_APV_GFX,
+ VMAPPLE_APV_IOSFC,
+ VMAPPLE_AES_1,
+ VMAPPLE_AES_2,
+ VMAPPLE_BDOOR,
+ VMAPPLE_MEMMAP_LAST,
+};
+
+static const MemMapEntry memmap[] = {
+ [VMAPPLE_FIRMWARE] = { 0x00100000, 0x00100000 },
+ [VMAPPLE_CONFIG] = { 0x00400000, 0x00010000 },
+
+ [VMAPPLE_GIC_DIST] = { 0x10000000, 0x00010000 },
+ [VMAPPLE_GIC_REDIST] = { 0x10010000, 0x00400000 },
+
+ [VMAPPLE_UART] = { 0x20010000, 0x00010000 },
+ [VMAPPLE_RTC] = { 0x20050000, 0x00001000 },
+ [VMAPPLE_GPIO] = { 0x20060000, 0x00001000 },
+ [VMAPPLE_PVPANIC] = { 0x20070000, 0x00000002 },
+ [VMAPPLE_BDOOR] = { 0x30000000, 0x00200000 },
+ [VMAPPLE_APV_GFX] = { 0x30200000, 0x00010000 },
+ [VMAPPLE_APV_IOSFC] = { 0x30210000, 0x00010000 },
+ [VMAPPLE_AES_1] = { 0x30220000, 0x00004000 },
+ [VMAPPLE_AES_2] = { 0x30230000, 0x00004000 },
+ [VMAPPLE_PCIE_ECAM] = { 0x40000000, 0x10000000 },
+ [VMAPPLE_PCIE_MMIO] = { 0x50000000, 0x1fff0000 },
+
+ /* Actual RAM size depends on configuration */
+ [VMAPPLE_MEM] = { 0x70000000ULL, GiB},
+};
+
+static const int irqmap[] = {
+ [VMAPPLE_UART] = 1,
+ [VMAPPLE_RTC] = 2,
+ [VMAPPLE_GPIO] = 0x5,
+ [VMAPPLE_APV_IOSFC] = 0x10,
+ [VMAPPLE_APV_GFX] = 0x11,
+ [VMAPPLE_AES_1] = 0x12,
+ [VMAPPLE_PCIE] = 0x20,
+};
+
+#define GPEX_NUM_IRQS 16
+
+static void create_bdif(VMAppleMachineState *vms, MemoryRegion *mem)
+{
+ DeviceState *bdif;
+ SysBusDevice *bdif_sb;
+ DriveInfo *di_aux = drive_get(IF_PFLASH, 0, 0);
+ DriveInfo *di_root = drive_get(IF_PFLASH, 0, 1);
+
+ if (!di_aux) {
+ error_report("No AUX device. Please specify one as pflash drive.");
+ exit(1);
+ }
+
+ if (!di_root) {
+ /* Fall back to the first IF_VIRTIO device as root device */
+ di_root = drive_get(IF_VIRTIO, 0, 0);
+ }
+
+ if (!di_root) {
+ error_report("No root device. Please specify one as virtio drive.");
+ exit(1);
+ }
+
+ /* PV backdoor device */
+ bdif = qdev_new(TYPE_VMAPPLE_BDIF);
+ bdif_sb = SYS_BUS_DEVICE(bdif);
+ sysbus_mmio_map(bdif_sb, 0, vms->memmap[VMAPPLE_BDOOR].base);
+
+ qdev_prop_set_drive(DEVICE(bdif), "aux", blk_by_legacy_dinfo(di_aux));
+ qdev_prop_set_drive(DEVICE(bdif), "root", blk_by_legacy_dinfo(di_root));
+
+ sysbus_realize_and_unref(bdif_sb, &error_fatal);
+}
+
+static void create_pvpanic(VMAppleMachineState *vms, MemoryRegion *mem)
+{
+ SysBusDevice *pvpanic;
+
+ vms->pvpanic = qdev_new(TYPE_PVPANIC_MMIO_DEVICE);
+ pvpanic = SYS_BUS_DEVICE(vms->pvpanic);
+ sysbus_mmio_map(pvpanic, 0, vms->memmap[VMAPPLE_PVPANIC].base);
+
+ sysbus_realize_and_unref(pvpanic, &error_fatal);
+}
+
+static bool create_cfg(VMAppleMachineState *vms, MemoryRegion *mem,
+ Error **errp)
+{
+ ERRP_GUARD();
+ SysBusDevice *cfg;
+ MachineState *machine = MACHINE(vms);
+ uint32_t rnd = 1;
+
+ vms->cfg = qdev_new(TYPE_VMAPPLE_CFG);
+ cfg = SYS_BUS_DEVICE(vms->cfg);
+ sysbus_mmio_map(cfg, 0, vms->memmap[VMAPPLE_CONFIG].base);
+
+ qemu_guest_getrandom_nofail(&rnd, sizeof(rnd));
+
+ qdev_prop_set_uint32(vms->cfg, "nr-cpus", machine->smp.cpus);
+ qdev_prop_set_uint64(vms->cfg, "ecid", vms->uuid);
+ qdev_prop_set_uint64(vms->cfg, "ram-size", machine->ram_size);
+ qdev_prop_set_uint32(vms->cfg, "rnd", rnd);
+
+ if (!sysbus_realize_and_unref(cfg, errp)) {
+ error_prepend(errp, "Error creating vmapple cfg device: ");
+ return false;
+ }
+
+ return true;
+}
+
+static void create_gfx(VMAppleMachineState *vms, MemoryRegion *mem)
+{
+ int irq_gfx = vms->irqmap[VMAPPLE_APV_GFX];
+ int irq_iosfc = vms->irqmap[VMAPPLE_APV_IOSFC];
+ SysBusDevice *gfx;
+
+ gfx = SYS_BUS_DEVICE(qdev_new("apple-gfx-mmio"));
+ sysbus_mmio_map(gfx, 0, vms->memmap[VMAPPLE_APV_GFX].base);
+ sysbus_mmio_map(gfx, 1, vms->memmap[VMAPPLE_APV_IOSFC].base);
+ sysbus_connect_irq(gfx, 0, qdev_get_gpio_in(vms->gic, irq_gfx));
+ sysbus_connect_irq(gfx, 1, qdev_get_gpio_in(vms->gic, irq_iosfc));
+ sysbus_realize_and_unref(gfx, &error_fatal);
+}
+
+static void create_aes(VMAppleMachineState *vms, MemoryRegion *mem)
+{
+ int irq = vms->irqmap[VMAPPLE_AES_1];
+ SysBusDevice *aes;
+
+ aes = SYS_BUS_DEVICE(qdev_new(TYPE_APPLE_AES));
+ sysbus_mmio_map(aes, 0, vms->memmap[VMAPPLE_AES_1].base);
+ sysbus_mmio_map(aes, 1, vms->memmap[VMAPPLE_AES_2].base);
+ sysbus_connect_irq(aes, 0, qdev_get_gpio_in(vms->gic, irq));
+ sysbus_realize_and_unref(aes, &error_fatal);
+}
+
+static int arm_gic_ppi_index(int cpu_nr, int ppi_index)
+{
+ return NUM_IRQS + cpu_nr * GIC_INTERNAL + ppi_index;
+}
+
+static void create_gic(VMAppleMachineState *vms, MemoryRegion *mem)
+{
+ MachineState *ms = MACHINE(vms);
+ /* We create a standalone GIC */
+ SysBusDevice *gicbusdev;
+ QList *redist_region_count;
+ int i;
+ unsigned int smp_cpus = ms->smp.cpus;
+
+ vms->gic = qdev_new(gicv3_class_name());
+ qdev_prop_set_uint32(vms->gic, "revision", 3);
+ qdev_prop_set_uint32(vms->gic, "num-cpu", smp_cpus);
+ /*
+ * Note that the num-irq property counts both internal and external
+ * interrupts; there are always 32 of the former (mandated by GIC spec).
+ */
+ qdev_prop_set_uint32(vms->gic, "num-irq", NUM_IRQS + 32);
+
+ uint32_t redist0_capacity =
+ vms->memmap[VMAPPLE_GIC_REDIST].size / GICV3_REDIST_SIZE;
+ uint32_t redist0_count = MIN(smp_cpus, redist0_capacity);
+
+ redist_region_count = qlist_new();
+ qlist_append_int(redist_region_count, redist0_count);
+ qdev_prop_set_array(vms->gic, "redist-region-count", redist_region_count);
+
+ gicbusdev = SYS_BUS_DEVICE(vms->gic);
+ sysbus_realize_and_unref(gicbusdev, &error_fatal);
+ sysbus_mmio_map(gicbusdev, 0, vms->memmap[VMAPPLE_GIC_DIST].base);
+ sysbus_mmio_map(gicbusdev, 1, vms->memmap[VMAPPLE_GIC_REDIST].base);
+
+ /*
+ * Wire the outputs from each CPU's generic timer and the GICv3
+ * maintenance interrupt signal to the appropriate GIC PPI inputs,
+ * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs.
+ */
+ for (i = 0; i < smp_cpus; i++) {
+ DeviceState *cpudev = DEVICE(qemu_get_cpu(i));
+
+ /* Map the virt timer to PPI 27 */
+ qdev_connect_gpio_out(cpudev, GTIMER_VIRT,
+ qdev_get_gpio_in(vms->gic,
+ arm_gic_ppi_index(i, 27)));
+
+ /* Map the GIC IRQ and FIQ lines to CPU */
+ sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
+ sysbus_connect_irq(gicbusdev, i + smp_cpus,
+ qdev_get_gpio_in(cpudev, ARM_CPU_FIQ));
+ }
+}
+
+static void create_uart(const VMAppleMachineState *vms, int uart,
+ MemoryRegion *mem, Chardev *chr)
+{
+ hwaddr base = vms->memmap[uart].base;
+ int irq = vms->irqmap[uart];
+ DeviceState *dev = qdev_new(TYPE_PL011);
+ SysBusDevice *s = SYS_BUS_DEVICE(dev);
+
+ qdev_prop_set_chr(dev, "chardev", chr);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ memory_region_add_subregion(mem, base,
+ sysbus_mmio_get_region(s, 0));
+ sysbus_connect_irq(s, 0, qdev_get_gpio_in(vms->gic, irq));
+}
+
+static void create_rtc(const VMAppleMachineState *vms)
+{
+ hwaddr base = vms->memmap[VMAPPLE_RTC].base;
+ int irq = vms->irqmap[VMAPPLE_RTC];
+
+ sysbus_create_simple("pl031", base, qdev_get_gpio_in(vms->gic, irq));
+}
+
+static DeviceState *gpio_key_dev;
+static void vmapple_powerdown_req(Notifier *n, void *opaque)
+{
+ /* use gpio Pin 3 for power button event */
+ qemu_set_irq(qdev_get_gpio_in(gpio_key_dev, 0), 1);
+}
+
+static void create_gpio_devices(const VMAppleMachineState *vms, int gpio,
+ MemoryRegion *mem)
+{
+ DeviceState *pl061_dev;
+ hwaddr base = vms->memmap[gpio].base;
+ int irq = vms->irqmap[gpio];
+ SysBusDevice *s;
+
+ pl061_dev = qdev_new("pl061");
+ /* Pull lines down to 0 if not driven by the PL061 */
+ qdev_prop_set_uint32(pl061_dev, "pullups", 0);
+ qdev_prop_set_uint32(pl061_dev, "pulldowns", 0xff);
+ s = SYS_BUS_DEVICE(pl061_dev);
+ sysbus_realize_and_unref(s, &error_fatal);
+ memory_region_add_subregion(mem, base, sysbus_mmio_get_region(s, 0));
+ sysbus_connect_irq(s, 0, qdev_get_gpio_in(vms->gic, irq));
+ gpio_key_dev = sysbus_create_simple("gpio-key", -1,
+ qdev_get_gpio_in(pl061_dev, 3));
+}
+
+static void vmapple_firmware_init(VMAppleMachineState *vms,
+ MemoryRegion *sysmem)
+{
+ hwaddr size = vms->memmap[VMAPPLE_FIRMWARE].size;
+ hwaddr base = vms->memmap[VMAPPLE_FIRMWARE].base;
+ const char *bios_name;
+ int image_size;
+ char *fname;
+
+ bios_name = MACHINE(vms)->firmware;
+ if (!bios_name) {
+ error_report("No firmware specified");
+ exit(1);
+ }
+
+ fname = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+ if (!fname) {
+ error_report("Could not find ROM image '%s'", bios_name);
+ exit(1);
+ }
+
+ memory_region_init_ram(&vms->fw_mr, NULL, "firmware", size, &error_fatal);
+ image_size = load_image_mr(fname, &vms->fw_mr);
+
+ g_free(fname);
+ if (image_size < 0) {
+ error_report("Could not load ROM image '%s'", bios_name);
+ exit(1);
+ }
+
+ memory_region_add_subregion(get_system_memory(), base, &vms->fw_mr);
+}
+
+static void create_pcie(VMAppleMachineState *vms)
+{
+ hwaddr base_mmio = vms->memmap[VMAPPLE_PCIE_MMIO].base;
+ hwaddr size_mmio = vms->memmap[VMAPPLE_PCIE_MMIO].size;
+ hwaddr base_ecam = vms->memmap[VMAPPLE_PCIE_ECAM].base;
+ hwaddr size_ecam = vms->memmap[VMAPPLE_PCIE_ECAM].size;
+ int irq = vms->irqmap[VMAPPLE_PCIE];
+ MemoryRegion *mmio_alias;
+ MemoryRegion *mmio_reg;
+ MemoryRegion *ecam_reg;
+ DeviceState *dev;
+ int i;
+ PCIHostState *pci;
+ DeviceState *usb_controller;
+ USBBus *usb_bus;
+
+ dev = qdev_new(TYPE_GPEX_HOST);
+ qdev_prop_set_uint32(dev, "num-irqs", GPEX_NUM_IRQS);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+
+ /* Map only the first size_ecam bytes of ECAM space */
+ ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
+ memory_region_init_alias(&vms->ecam_alias, OBJECT(dev), "pcie-ecam",
+ ecam_reg, 0, size_ecam);
+ memory_region_add_subregion(get_system_memory(), base_ecam,
+ &vms->ecam_alias);
+
+ /*
+ * Map the MMIO window from [0x50000000-0x7fff0000] in PCI space into
+ * system address space at [0x50000000-0x7fff0000].
+ */
+ mmio_alias = g_new0(MemoryRegion, 1);
+ mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
+ memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
+ mmio_reg, base_mmio, size_mmio);
+ memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
+
+ for (i = 0; i < GPEX_NUM_IRQS; i++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), i,
+ qdev_get_gpio_in(vms->gic, irq + i));
+ gpex_set_irq_num(GPEX_HOST(dev), i, irq + i);
+ }
+
+ pci = PCI_HOST_BRIDGE(dev);
+ vms->bus = pci->bus;
+ g_assert(vms->bus);
+
+ while ((dev = qemu_create_nic_device("virtio-net-pci", true, NULL))) {
+ qdev_realize_and_unref(dev, BUS(vms->bus), &error_fatal);
+ }
+
+ if (defaults_enabled()) {
+ usb_controller = qdev_new(TYPE_QEMU_XHCI);
+ qdev_realize_and_unref(usb_controller, BUS(pci->bus), &error_fatal);
+
+ usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS,
+ &error_fatal));
+ usb_create_simple(usb_bus, "usb-kbd");
+ usb_create_simple(usb_bus, "usb-tablet");
+ }
+}
+
+static void vmapple_reset(void *opaque)
+{
+ VMAppleMachineState *vms = opaque;
+ hwaddr base = vms->memmap[VMAPPLE_FIRMWARE].base;
+
+ cpu_set_pc(first_cpu, base);
+}
+
+static void mach_vmapple_init(MachineState *machine)
+{
+ VMAppleMachineState *vms = VMAPPLE_MACHINE(machine);
+ MachineClass *mc = MACHINE_GET_CLASS(machine);
+ const CPUArchIdList *possible_cpus;
+ MemoryRegion *sysmem = get_system_memory();
+ int n;
+ unsigned int smp_cpus = machine->smp.cpus;
+ unsigned int max_cpus = machine->smp.max_cpus;
+
+ vms->memmap = memmap;
+ machine->usb = true;
+
+ possible_cpus = mc->possible_cpu_arch_ids(machine);
+ assert(possible_cpus->len == max_cpus);
+ for (n = 0; n < possible_cpus->len; n++) {
+ Object *cpu;
+ CPUState *cs;
+
+ if (n >= smp_cpus) {
+ break;
+ }
+
+ cpu = object_new(possible_cpus->cpus[n].type);
+ object_property_set_int(cpu, "mp-affinity",
+ possible_cpus->cpus[n].arch_id, &error_fatal);
+
+ cs = CPU(cpu);
+ cs->cpu_index = n;
+
+ numa_cpu_pre_plug(&possible_cpus->cpus[cs->cpu_index], DEVICE(cpu),
+ &error_fatal);
+
+ if (object_property_find(cpu, "has_el3")) {
+ object_property_set_bool(cpu, "has_el3", false, &error_fatal);
+ }
+ if (object_property_find(cpu, "has_el2")) {
+ object_property_set_bool(cpu, "has_el2", false, &error_fatal);
+ }
+ object_property_set_int(cpu, "psci-conduit", QEMU_PSCI_CONDUIT_HVC,
+ &error_fatal);
+
+ /* Secondary CPUs start in PSCI powered-down state */
+ if (n > 0) {
+ object_property_set_bool(cpu, "start-powered-off", true,
+ &error_fatal);
+ }
+
+ object_property_set_link(cpu, "memory", OBJECT(sysmem), &error_abort);
+ qdev_realize(DEVICE(cpu), NULL, &error_fatal);
+ object_unref(cpu);
+ }
+
+ memory_region_add_subregion(sysmem, vms->memmap[VMAPPLE_MEM].base,
+ machine->ram);
+
+ create_gic(vms, sysmem);
+ create_bdif(vms, sysmem);
+ create_pvpanic(vms, sysmem);
+ create_aes(vms, sysmem);
+ create_gfx(vms, sysmem);
+ create_uart(vms, VMAPPLE_UART, sysmem, serial_hd(0));
+ create_rtc(vms);
+ create_pcie(vms);
+
+ create_gpio_devices(vms, VMAPPLE_GPIO, sysmem);
+
+ vmapple_firmware_init(vms, sysmem);
+ create_cfg(vms, sysmem, &error_fatal);
+
+ /* connect powerdown request */
+ vms->powerdown_notifier.notify = vmapple_powerdown_req;
+ qemu_register_powerdown_notifier(&vms->powerdown_notifier);
+
+ vms->bootinfo.ram_size = machine->ram_size;
+ vms->bootinfo.board_id = -1;
+ vms->bootinfo.loader_start = vms->memmap[VMAPPLE_MEM].base;
+ vms->bootinfo.skip_dtb_autoload = true;
+ vms->bootinfo.firmware_loaded = true;
+ arm_load_kernel(ARM_CPU(first_cpu), machine, &vms->bootinfo);
+
+ qemu_register_reset(vmapple_reset, vms);
+}
+
+static CpuInstanceProperties
+vmapple_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
+{
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
+
+ assert(cpu_index < possible_cpus->len);
+ return possible_cpus->cpus[cpu_index].props;
+}
+
+
+static int64_t vmapple_get_default_cpu_node_id(const MachineState *ms, int idx)
+{
+ return idx % ms->numa_state->num_nodes;
+}
+
+static const CPUArchIdList *vmapple_possible_cpu_arch_ids(MachineState *ms)
+{
+ int n;
+ unsigned int max_cpus = ms->smp.max_cpus;
+
+ if (ms->possible_cpus) {
+ assert(ms->possible_cpus->len == max_cpus);
+ return ms->possible_cpus;
+ }
+
+ ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
+ sizeof(CPUArchId) * max_cpus);
+ ms->possible_cpus->len = max_cpus;
+ for (n = 0; n < ms->possible_cpus->len; n++) {
+ ms->possible_cpus->cpus[n].type = ms->cpu_type;
+ ms->possible_cpus->cpus[n].arch_id =
+ arm_build_mp_affinity(n, GICV3_TARGETLIST_BITS);
+ ms->possible_cpus->cpus[n].props.has_thread_id = true;
+ ms->possible_cpus->cpus[n].props.thread_id = n;
+ }
+ return ms->possible_cpus;
+}
+
+static GlobalProperty vmapple_compat_defaults[] = {
+ { TYPE_VIRTIO_PCI, "disable-legacy", "on" },
+ /*
+ * macOS XHCI driver attempts to schedule events onto even rings 1 & 2
+ * even when (as here) there is no MSI(-X) support. Disabling interrupter
+ * mapping in the XHCI controller works around the problem.
+ */
+ { TYPE_XHCI_PCI, "conditional-intr-mapping", "on" },
+};
+
+static void vmapple_machine_class_init(ObjectClass *oc, const void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->init = mach_vmapple_init;
+ mc->max_cpus = 32;
+ mc->block_default_type = IF_VIRTIO;
+ mc->no_cdrom = 1;
+ mc->pci_allow_0_address = true;
+ mc->minimum_page_bits = 12;
+ mc->possible_cpu_arch_ids = vmapple_possible_cpu_arch_ids;
+ mc->cpu_index_to_instance_props = vmapple_cpu_index_to_props;
+ mc->default_cpu_type = ARM_CPU_TYPE_NAME("host");
+ mc->get_default_cpu_node_id = vmapple_get_default_cpu_node_id;
+ mc->default_ram_id = "mach-vmapple.ram";
+ mc->desc = "Apple aarch64 Virtual Machine";
+
+ compat_props_add(mc->compat_props, vmapple_compat_defaults,
+ G_N_ELEMENTS(vmapple_compat_defaults));
+}
+
+static void vmapple_instance_init(Object *obj)
+{
+ VMAppleMachineState *vms = VMAPPLE_MACHINE(obj);
+
+ vms->irqmap = irqmap;
+
+ object_property_add_uint64_ptr(obj, "uuid", &vms->uuid,
+ OBJ_PROP_FLAG_READWRITE);
+ object_property_set_description(obj, "uuid", "Machine UUID (SDOM)");
+}
+
+static const TypeInfo vmapple_machine_info = {
+ .name = TYPE_VMAPPLE_MACHINE,
+ .parent = TYPE_MACHINE,
+ .instance_size = sizeof(VMAppleMachineState),
+ .class_init = vmapple_machine_class_init,
+ .instance_init = vmapple_instance_init,
+};
+
+static void machvmapple_machine_init(void)
+{
+ type_register_static(&vmapple_machine_info);
+}
+type_init(machvmapple_machine_init);
+
diff --git a/hw/watchdog/allwinner-wdt.c b/hw/watchdog/allwinner-wdt.c
index d35711c..8fcd776 100644
--- a/hw/watchdog/allwinner-wdt.c
+++ b/hw/watchdog/allwinner-wdt.c
@@ -28,7 +28,7 @@
#include "hw/sysbus.h"
#include "hw/registerfields.h"
#include "hw/watchdog/allwinner-wdt.h"
-#include "sysemu/watchdog.h"
+#include "system/watchdog.h"
#include "migration/vmstate.h"
/* WDT registers */
@@ -275,7 +275,7 @@ static void allwinner_wdt_write(void *opaque, hwaddr offset,
static const MemoryRegionOps allwinner_wdt_ops = {
.read = allwinner_wdt_read,
.write = allwinner_wdt_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
@@ -348,7 +348,7 @@ static void allwinner_wdt_realize(DeviceState *dev, Error **errp)
ptimer_transaction_commit(s->timer);
}
-static void allwinner_wdt_class_init(ObjectClass *klass, void *data)
+static void allwinner_wdt_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
ResettableClass *rc = RESETTABLE_CLASS(klass);
@@ -358,7 +358,7 @@ static void allwinner_wdt_class_init(ObjectClass *klass, void *data)
dc->vmsd = &allwinner_wdt_vmstate;
}
-static void allwinner_wdt_sun4i_class_init(ObjectClass *klass, void *data)
+static void allwinner_wdt_sun4i_class_init(ObjectClass *klass, const void *data)
{
AwWdtClass *awc = AW_WDT_CLASS(klass);
@@ -371,7 +371,7 @@ static void allwinner_wdt_sun4i_class_init(ObjectClass *klass, void *data)
awc->get_intv_value = allwinner_wdt_sun4i_get_intv_value;
}
-static void allwinner_wdt_sun6i_class_init(ObjectClass *klass, void *data)
+static void allwinner_wdt_sun6i_class_init(ObjectClass *klass, const void *data)
{
AwWdtClass *awc = AW_WDT_CLASS(klass);
diff --git a/hw/watchdog/cmsdk-apb-watchdog.c b/hw/watchdog/cmsdk-apb-watchdog.c
index 3091e5c..6a8d07c 100644
--- a/hw/watchdog/cmsdk-apb-watchdog.c
+++ b/hw/watchdog/cmsdk-apb-watchdog.c
@@ -12,8 +12,8 @@
/*
* This is a model of the "APB watchdog" which is part of the Cortex-M
* System Design Kit (CMSDK) and documented in the Cortex-M System
- * Design Kit Technical Reference Manual (ARM DDI0479C):
- * https://developer.arm.com/products/system-design/system-design-kits/cortex-m-system-design-kit
+ * Design Kit Technical Reference Manual (ARM DDI0479):
+ * https://developer.arm.com/documentation/ddi0479/
*
* We also support the variant of this device found in the TI
* Stellaris/Luminary boards and documented in:
@@ -25,7 +25,7 @@
#include "trace.h"
#include "qapi/error.h"
#include "qemu/module.h"
-#include "sysemu/watchdog.h"
+#include "system/watchdog.h"
#include "hw/sysbus.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
@@ -196,16 +196,13 @@ static void cmsdk_apb_watchdog_write(void *opaque, hwaddr offset,
switch (offset) {
case A_WDOGLOAD:
- /*
- * Reset the load value and the current count, and make sure
- * we're counting.
- */
+ /* Reset the load value and the current count. */
ptimer_transaction_begin(s->timer);
ptimer_set_limit(s->timer, value, 1);
- ptimer_run(s->timer, 0);
ptimer_transaction_commit(s->timer);
break;
- case A_WDOGCONTROL:
+ case A_WDOGCONTROL: {
+ uint32_t prev_control = s->control;
if (s->is_luminary && 0 != (R_WDOGCONTROL_INTEN_MASK & s->control)) {
/*
* The Luminary version of this device ignores writes to
@@ -215,8 +212,25 @@ static void cmsdk_apb_watchdog_write(void *opaque, hwaddr offset,
break;
}
s->control = value & R_WDOGCONTROL_VALID_MASK;
+ if (R_WDOGCONTROL_INTEN_MASK & (s->control ^ prev_control)) {
+ ptimer_transaction_begin(s->timer);
+ if (R_WDOGCONTROL_INTEN_MASK & s->control) {
+ /*
+ * Set HIGH to enable the counter and the interrupt. Reloads
+ * the counter from the value in WDOGLOAD when the interrupt
+ * is enabled, after previously being disabled.
+ */
+ ptimer_set_count(s->timer, ptimer_get_limit(s->timer));
+ ptimer_run(s->timer, 0);
+ } else {
+ /* Or LOW to disable the counter and interrupt. */
+ ptimer_stop(s->timer);
+ }
+ ptimer_transaction_commit(s->timer);
+ }
cmsdk_apb_watchdog_update(s);
break;
+ }
case A_WDOGINTCLR:
s->intstatus = 0;
ptimer_transaction_begin(s->timer);
@@ -305,8 +319,14 @@ static void cmsdk_apb_watchdog_reset(DeviceState *dev)
s->resetstatus = 0;
/* Set the limit and the count */
ptimer_transaction_begin(s->timer);
+ /*
+ * We need to stop the ptimer before setting its limit reset value. If the
+ * order is the opposite when the code executes the stop after setting a new
+ * limit it may want to recalculate the count based on the current time (if
+ * the timer was currently running) and it won't get the proper reset value.
+ */
+ ptimer_stop(s->timer);
ptimer_set_limit(s->timer, 0xffffffff, 1);
- ptimer_run(s->timer, 0);
ptimer_transaction_commit(s->timer);
}
@@ -374,13 +394,13 @@ static const VMStateDescription cmsdk_apb_watchdog_vmstate = {
}
};
-static void cmsdk_apb_watchdog_class_init(ObjectClass *klass, void *data)
+static void cmsdk_apb_watchdog_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = cmsdk_apb_watchdog_realize;
dc->vmsd = &cmsdk_apb_watchdog_vmstate;
- dc->reset = cmsdk_apb_watchdog_reset;
+ device_class_set_legacy_reset(dc, cmsdk_apb_watchdog_reset);
}
static const TypeInfo cmsdk_apb_watchdog_info = {
diff --git a/hw/watchdog/sbsa_gwdt.c b/hw/watchdog/sbsa_gwdt.c
index d437535..ce84849 100644
--- a/hw/watchdog/sbsa_gwdt.c
+++ b/hw/watchdog/sbsa_gwdt.c
@@ -16,8 +16,8 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/reset.h"
-#include "sysemu/watchdog.h"
+#include "system/reset.h"
+#include "system/watchdog.h"
#include "hw/qdev-properties.h"
#include "hw/watchdog/sbsa_gwdt.h"
#include "qemu/timer.h"
@@ -174,7 +174,6 @@ static void sbsa_gwdt_write(void *opaque, hwaddr offset, uint64_t data,
qemu_log_mask(LOG_GUEST_ERROR, "bad address in control frame write :"
" 0x%x\n", (int)offset);
}
- return;
}
static void wdt_sbsa_gwdt_reset(DeviceState *dev)
@@ -262,7 +261,7 @@ static void wdt_sbsa_gwdt_realize(DeviceState *dev, Error **errp)
dev);
}
-static Property wdt_sbsa_gwdt_props[] = {
+static const Property wdt_sbsa_gwdt_props[] = {
/*
* Timer frequency in Hz. This must match the frequency used by
* the CPU's generic timer. Default 62.5Hz matches QEMU's legacy
@@ -270,15 +269,14 @@ static Property wdt_sbsa_gwdt_props[] = {
*/
DEFINE_PROP_UINT64("clock-frequency", struct SBSA_GWDTState, freq,
62500000),
- DEFINE_PROP_END_OF_LIST(),
};
-static void wdt_sbsa_gwdt_class_init(ObjectClass *klass, void *data)
+static void wdt_sbsa_gwdt_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = wdt_sbsa_gwdt_realize;
- dc->reset = wdt_sbsa_gwdt_reset;
+ device_class_set_legacy_reset(dc, wdt_sbsa_gwdt_reset);
dc->hotpluggable = false;
set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories);
dc->vmsd = &vmstate_sbsa_gwdt;
diff --git a/hw/watchdog/spapr_watchdog.c b/hw/watchdog/spapr_watchdog.c
index 2bb1d3c..5b3f50d 100644
--- a/hw/watchdog/spapr_watchdog.c
+++ b/hw/watchdog/spapr_watchdog.c
@@ -249,7 +249,7 @@ static void spapr_wdt_realize(DeviceState *dev, Error **errp)
&w->leave_others, OBJ_PROP_FLAG_READ);
}
-static void spapr_wdt_class_init(ObjectClass *oc, void *data)
+static void spapr_wdt_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/watchdog/watchdog.c b/hw/watchdog/watchdog.c
index 9550461..0721373 100644
--- a/hw/watchdog/watchdog.c
+++ b/hw/watchdog/watchdog.c
@@ -26,8 +26,8 @@
#include "qapi/error.h"
#include "qapi/qapi-commands-run-state.h"
#include "qapi/qapi-events-run-state.h"
-#include "sysemu/runstate.h"
-#include "sysemu/watchdog.h"
+#include "system/runstate.h"
+#include "system/watchdog.h"
#include "hw/nmi.h"
#include "qemu/help_option.h"
#include "trace.h"
@@ -85,7 +85,7 @@ void watchdog_perform_action(void)
break;
default:
- assert(0);
+ g_assert_not_reached();
}
}
diff --git a/hw/watchdog/wdt_aspeed.c b/hw/watchdog/wdt_aspeed.c
index 75685c5..3022643 100644
--- a/hw/watchdog/wdt_aspeed.c
+++ b/hw/watchdog/wdt_aspeed.c
@@ -13,7 +13,7 @@
#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/timer.h"
-#include "sysemu/watchdog.h"
+#include "system/watchdog.h"
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "hw/watchdog/wdt_aspeed.h"
@@ -51,11 +51,20 @@
#define WDT_TIMEOUT_CLEAR (0x14 / 4)
#define WDT_RESTART_MAGIC 0x4755
+#define WDT_SW_RESET_ENABLE 0xAEEDF123
#define AST2600_SCU_RESET_CONTROL1 (0x40 / 4)
#define SCU_RESET_CONTROL1 (0x04 / 4)
#define SCU_RESET_SDRAM BIT(0)
+static bool aspeed_wdt_is_soc_reset_mode(const AspeedWDTState *s)
+{
+ uint32_t mode;
+
+ mode = extract32(s->regs[WDT_CTRL], 5, 2);
+ return (mode == WDT_CTRL_RESET_MODE_SOC);
+}
+
static bool aspeed_wdt_is_enabled(const AspeedWDTState *s)
{
return s->regs[WDT_CTRL] & WDT_CTRL_ENABLE;
@@ -199,19 +208,23 @@ static void aspeed_wdt_write(void *opaque, hwaddr offset, uint64_t data,
case WDT_TIMEOUT_STATUS:
case WDT_TIMEOUT_CLEAR:
case WDT_RESET_MASK2:
- case WDT_SW_RESET_CTRL:
case WDT_SW_RESET_MASK1:
case WDT_SW_RESET_MASK2:
qemu_log_mask(LOG_UNIMP,
"%s: uninmplemented write at offset 0x%" HWADDR_PRIx "\n",
__func__, offset);
break;
+ case WDT_SW_RESET_CTRL:
+ if (aspeed_wdt_is_soc_reset_mode(s) &&
+ (data == WDT_SW_RESET_ENABLE)) {
+ watchdog_perform_action();
+ }
+ break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
__func__, offset);
}
- return;
}
static const VMStateDescription vmstate_aspeed_wdt = {
@@ -278,7 +291,8 @@ static void aspeed_wdt_realize(DeviceState *dev, Error **errp)
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, aspeed_wdt_timer_expired, dev);
- /* FIXME: This setting should be derived from the SCU hw strapping
+ /*
+ * FIXME: This setting should be derived from the SCU hw strapping
* register SCU70
*/
s->pclk_freq = PCLK_HZ;
@@ -288,19 +302,18 @@ static void aspeed_wdt_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->iomem);
}
-static Property aspeed_wdt_properties[] = {
+static const Property aspeed_wdt_properties[] = {
DEFINE_PROP_LINK("scu", AspeedWDTState, scu, TYPE_ASPEED_SCU,
AspeedSCUState *),
- DEFINE_PROP_END_OF_LIST(),
};
-static void aspeed_wdt_class_init(ObjectClass *klass, void *data)
+static void aspeed_wdt_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "ASPEED Watchdog Controller";
dc->realize = aspeed_wdt_realize;
- dc->reset = aspeed_wdt_reset;
+ device_class_set_legacy_reset(dc, aspeed_wdt_reset);
set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories);
dc->vmsd = &vmstate_aspeed_wdt;
device_class_set_props(dc, aspeed_wdt_properties);
@@ -316,7 +329,7 @@ static const TypeInfo aspeed_wdt_info = {
.abstract = true,
};
-static void aspeed_2400_wdt_class_init(ObjectClass *klass, void *data)
+static void aspeed_2400_wdt_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedWDTClass *awc = ASPEED_WDT_CLASS(klass);
@@ -353,7 +366,7 @@ static void aspeed_2500_wdt_reset_pulse(AspeedWDTState *s, uint32_t property)
}
}
-static void aspeed_2500_wdt_class_init(ObjectClass *klass, void *data)
+static void aspeed_2500_wdt_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedWDTClass *awc = ASPEED_WDT_CLASS(klass);
@@ -376,7 +389,7 @@ static const TypeInfo aspeed_2500_wdt_info = {
.class_init = aspeed_2500_wdt_class_init,
};
-static void aspeed_2600_wdt_class_init(ObjectClass *klass, void *data)
+static void aspeed_2600_wdt_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedWDTClass *awc = ASPEED_WDT_CLASS(klass);
@@ -399,7 +412,7 @@ static const TypeInfo aspeed_2600_wdt_info = {
.class_init = aspeed_2600_wdt_class_init,
};
-static void aspeed_1030_wdt_class_init(ObjectClass *klass, void *data)
+static void aspeed_1030_wdt_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedWDTClass *awc = ASPEED_WDT_CLASS(klass);
@@ -422,7 +435,7 @@ static const TypeInfo aspeed_1030_wdt_info = {
.class_init = aspeed_1030_wdt_class_init,
};
-static void aspeed_2700_wdt_class_init(ObjectClass *klass, void *data)
+static void aspeed_2700_wdt_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
AspeedWDTClass *awc = ASPEED_WDT_CLASS(klass);
diff --git a/hw/watchdog/wdt_diag288.c b/hw/watchdog/wdt_diag288.c
index 1b73b16..1275353 100644
--- a/hw/watchdog/wdt_diag288.c
+++ b/hw/watchdog/wdt_diag288.c
@@ -12,8 +12,8 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/reset.h"
-#include "sysemu/watchdog.h"
+#include "system/reset.h"
+#include "system/watchdog.h"
#include "qemu/timer.h"
#include "hw/watchdog/wdt_diag288.h"
#include "migration/vmstate.h"
@@ -108,14 +108,14 @@ static void wdt_diag288_unrealize(DeviceState *dev)
timer_free(diag288->timer);
}
-static void wdt_diag288_class_init(ObjectClass *klass, void *data)
+static void wdt_diag288_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
DIAG288Class *diag288 = DIAG288_CLASS(klass);
dc->realize = wdt_diag288_realize;
dc->unrealize = wdt_diag288_unrealize;
- dc->reset = wdt_diag288_reset;
+ device_class_set_legacy_reset(dc, wdt_diag288_reset);
dc->hotpluggable = false;
set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories);
dc->vmsd = &vmstate_diag288;
diff --git a/hw/watchdog/wdt_i6300esb.c b/hw/watchdog/wdt_i6300esb.c
index 8bce050..bb8a276 100644
--- a/hw/watchdog/wdt_i6300esb.c
+++ b/hw/watchdog/wdt_i6300esb.c
@@ -23,7 +23,7 @@
#include "qemu/module.h"
#include "qemu/timer.h"
-#include "sysemu/watchdog.h"
+#include "system/watchdog.h"
#include "hw/pci/pci_device.h"
#include "migration/vmstate.h"
#include "qom/object.h"
@@ -457,7 +457,7 @@ static void i6300esb_exit(PCIDevice *dev)
timer_free(d->timer);
}
-static void i6300esb_class_init(ObjectClass *klass, void *data)
+static void i6300esb_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -469,7 +469,7 @@ static void i6300esb_class_init(ObjectClass *klass, void *data)
k->vendor_id = PCI_VENDOR_ID_INTEL;
k->device_id = PCI_DEVICE_ID_INTEL_ESB_9;
k->class_id = PCI_CLASS_SYSTEM_OTHER;
- dc->reset = i6300esb_reset;
+ device_class_set_legacy_reset(dc, i6300esb_reset);
dc->vmsd = &vmstate_i6300esb;
set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories);
dc->desc = "Intel 6300ESB";
@@ -480,7 +480,7 @@ static const TypeInfo i6300esb_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(I6300State),
.class_init = i6300esb_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/watchdog/wdt_ib700.c b/hw/watchdog/wdt_ib700.c
index eea8da6..51a26a4 100644
--- a/hw/watchdog/wdt_ib700.c
+++ b/hw/watchdog/wdt_ib700.c
@@ -22,7 +22,7 @@
#include "qemu/osdep.h"
#include "qemu/module.h"
#include "qemu/timer.h"
-#include "sysemu/watchdog.h"
+#include "system/watchdog.h"
#include "hw/isa/isa.h"
#include "migration/vmstate.h"
#include "qom/object.h"
@@ -128,12 +128,12 @@ static void wdt_ib700_reset(DeviceState *dev)
timer_del(s->timer);
}
-static void wdt_ib700_class_init(ObjectClass *klass, void *data)
+static void wdt_ib700_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = wdt_ib700_realize;
- dc->reset = wdt_ib700_reset;
+ device_class_set_legacy_reset(dc, wdt_ib700_reset);
dc->vmsd = &vmstate_ib700;
set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories);
dc->desc = "iBASE 700";
diff --git a/hw/watchdog/wdt_imx2.c b/hw/watchdog/wdt_imx2.c
index 6452fc4..10151a1 100644
--- a/hw/watchdog/wdt_imx2.c
+++ b/hw/watchdog/wdt_imx2.c
@@ -12,7 +12,7 @@
#include "qemu/osdep.h"
#include "qemu/bitops.h"
#include "qemu/module.h"
-#include "sysemu/watchdog.h"
+#include "system/watchdog.h"
#include "migration/vmstate.h"
#include "hw/qdev-properties.h"
@@ -39,7 +39,6 @@ static void imx2_wdt_expired(void *opaque)
/* Perform watchdog action if watchdog is enabled */
if (s->wcr & IMX2_WDT_WCR_WDE) {
- s->wrsr = IMX2_WDT_WRSR_TOUT;
watchdog_perform_action();
}
}
@@ -282,19 +281,18 @@ static void imx2_wdt_realize(DeviceState *dev, Error **errp)
}
}
-static Property imx2_wdt_properties[] = {
+static const Property imx2_wdt_properties[] = {
DEFINE_PROP_BOOL("pretimeout-support", IMX2WdtState, pretimeout_support,
false),
- DEFINE_PROP_END_OF_LIST()
};
-static void imx2_wdt_class_init(ObjectClass *klass, void *data)
+static void imx2_wdt_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, imx2_wdt_properties);
dc->realize = imx2_wdt_realize;
- dc->reset = imx2_wdt_reset;
+ device_class_set_legacy_reset(dc, imx2_wdt_reset);
dc->vmsd = &vmstate_imx2_wdt;
dc->desc = "i.MX2 watchdog timer";
set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories);
diff --git a/hw/xen/meson.build b/hw/xen/meson.build
index d887fa9..a1850e7 100644
--- a/hw/xen/meson.build
+++ b/hw/xen/meson.build
@@ -9,12 +9,16 @@ system_ss.add(when: ['CONFIG_XEN_BUS'], if_true: files(
system_ss.add(when: ['CONFIG_XEN', xen], if_true: files(
'xen-operations.c',
+),
+if_false: files(
+ 'xen_stubs.c',
))
xen_specific_ss = ss.source_set()
xen_specific_ss.add(files(
'xen-mapcache.c',
'xen-hvm-common.c',
+ 'xen-pvh-common.c',
))
if have_xen_pci_passthrough
xen_specific_ss.add(files(
diff --git a/hw/xen/trace-events b/hw/xen/trace-events
index d1b27f6..b67942d 100644
--- a/hw/xen/trace-events
+++ b/hw/xen/trace-events
@@ -38,7 +38,7 @@ xen_device_remove_watch(const char *type, char *name, const char *node, const ch
xs_node_create(const char *node) "%s"
xs_node_destroy(const char *node) "%s"
xs_node_vprintf(char *path, char *value) "%s %s"
-xs_node_vscanf(char *path, char *value) "%s %s"
+xs_node_read(const char *path, const char *value) "%s %s"
xs_node_watch(char *path) "%s"
xs_node_unwatch(char *path) "%s"
@@ -64,6 +64,10 @@ destroy_hvm_domain_cannot_acquire_handle(void) "Cannot acquire xenctrl handle"
destroy_hvm_domain_failed_action(const char *action, int sts, char *errno_s) "xc_domain_shutdown failed to issue %s, sts %d, %s"
destroy_hvm_domain_action(int xen_domid, const char *action) "Issued domain %d %s"
+# xen-pvh-common.c
+xen_create_virtio_mmio_devices(int i, int irq, uint64_t base) "Created virtio-mmio device %d: irq %d base 0x%"PRIx64
+xen_enable_tpm(uint64_t addr) "Connected tpmdev at address 0x%"PRIx64
+
# xen-mapcache.c
xen_map_cache(uint64_t phys_addr) "want 0x%"PRIx64
xen_remap_bucket(uint64_t index) "index 0x%"PRIx64
diff --git a/hw/xen/xen-bus-helper.c b/hw/xen/xen-bus-helper.c
index b2b2cc9..288fad4 100644
--- a/hw/xen/xen-bus-helper.c
+++ b/hw/xen/xen-bus-helper.c
@@ -105,25 +105,22 @@ int xs_node_vscanf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, va_list ap)
{
- char *path, *value;
+ char *value;
int rc;
- path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) :
- g_strdup(key);
- value = qemu_xen_xs_read(h, tid, path, NULL);
-
- trace_xs_node_vscanf(path, value);
+ if (node && strlen(node) != 0) {
+ value = xs_node_read(h, tid, NULL, errp, "%s/%s", node, key);
+ } else {
+ value = xs_node_read(h, tid, NULL, errp, "%s", key);
+ }
if (value) {
rc = vsscanf(value, fmt, ap);
} else {
- error_setg_errno(errp, errno, "failed to read from '%s'",
- path);
rc = EOF;
}
free(value);
- g_free(path);
return rc;
}
@@ -142,6 +139,28 @@ int xs_node_scanf(struct qemu_xs_handle *h, xs_transaction_t tid,
return rc;
}
+char *xs_node_read(struct qemu_xs_handle *h, xs_transaction_t tid,
+ unsigned int *len, Error **errp,
+ const char *path_fmt, ...)
+{
+ char *path, *value;
+ va_list ap;
+
+ va_start(ap, path_fmt);
+ path = g_strdup_vprintf(path_fmt, ap);
+ va_end(ap);
+
+ value = qemu_xen_xs_read(h, tid, path, len);
+ trace_xs_node_read(path, value);
+ if (!value) {
+ error_setg_errno(errp, errno, "failed to read from '%s'", path);
+ }
+
+ g_free(path);
+
+ return value;
+}
+
struct qemu_xs_watch *xs_node_watch(struct qemu_xs_handle *h, const char *node,
const char *key, xs_watch_fn fn,
void *opaque, Error **errp)
diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c
index 95b207a..6bd2e54 100644
--- a/hw/xen/xen-bus.c
+++ b/hw/xen/xen-bus.c
@@ -18,8 +18,8 @@
#include "hw/xen/xen-bus-helper.h"
#include "monitor/monitor.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
-#include "sysemu/sysemu.h"
+#include "qobject/qdict.h"
+#include "system/system.h"
#include "net/net.h"
#include "trace.h"
@@ -156,8 +156,8 @@ again:
!strcmp(key[i], "hotplug-status"))
continue;
- if (xs_node_scanf(xenbus->xsh, tid, path, key[i], NULL, "%ms",
- &val) == 1) {
+ val = xs_node_read(xenbus->xsh, tid, NULL, NULL, "%s/%s", path, key[i]);
+ if (val) {
qdict_put_str(opts, key[i], val);
free(val);
}
@@ -353,10 +353,10 @@ static void xen_bus_realize(BusState *bus, Error **errp)
xs_node_watch(xenbus->xsh, node, key, xen_bus_backend_changed,
xenbus, &local_err);
if (local_err) {
- /* This need not be treated as a hard error so don't propagate */
- error_reportf_err(local_err,
- "failed to set up '%s' enumeration watch: ",
- type[i]);
+ warn_reportf_err(local_err,
+ "failed to set up '%s' enumeration watch: ",
+ type[i]);
+ local_err = NULL;
}
g_free(node);
@@ -380,7 +380,7 @@ static void xen_bus_unplug_request(HotplugHandler *hotplug,
xen_device_unplug(xendev, errp);
}
-static void xen_bus_class_init(ObjectClass *class, void *data)
+static void xen_bus_class_init(ObjectClass *class, const void *data)
{
BusClass *bus_class = BUS_CLASS(class);
HotplugHandlerClass *hotplug_class = HOTPLUG_HANDLER_CLASS(class);
@@ -399,7 +399,7 @@ static const TypeInfo xen_bus_type_info = {
.instance_size = sizeof(XenBus),
.class_size = sizeof(XenBusClass),
.class_init = xen_bus_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
},
@@ -650,6 +650,16 @@ int xen_device_frontend_scanf(XenDevice *xendev, const char *key,
return rc;
}
+char *xen_device_frontend_read(XenDevice *xendev, const char *key)
+{
+ XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev)));
+
+ g_assert(xenbus->xsh);
+
+ return xs_node_read(xenbus->xsh, XBT_NULL, NULL, NULL, "%s/%s",
+ xendev->frontend_path, key);
+}
+
static void xen_device_frontend_set_state(XenDevice *xendev,
enum xenbus_state state,
bool publish)
@@ -1092,13 +1102,12 @@ unrealize:
xen_device_unrealize(dev);
}
-static Property xen_device_props[] = {
+static const Property xen_device_props[] = {
DEFINE_PROP_UINT16("frontend-id", XenDevice, frontend_id,
DOMID_INVALID),
- DEFINE_PROP_END_OF_LIST()
};
-static void xen_device_class_init(ObjectClass *class, void *data)
+static void xen_device_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dev_class = DEVICE_CLASS(class);
diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
index 3a9d6f9..78e0bc8 100644
--- a/hw/xen/xen-hvm-common.c
+++ b/hw/xen/xen-hvm-common.c
@@ -1,14 +1,21 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
+#include "exec/target_long.h"
#include "exec/target_page.h"
#include "trace.h"
+#include "hw/hw.h"
#include "hw/pci/pci_host.h"
#include "hw/xen/xen-hvm-common.h"
#include "hw/xen/xen-bus.h"
#include "hw/boards.h"
#include "hw/xen/arch_hvm.h"
+#include "system/runstate.h"
+#include "system/system.h"
+#include "system/xen.h"
+#include "system/xen-mapcache.h"
MemoryRegion xen_memory, xen_grants;
@@ -667,6 +674,8 @@ static int xen_map_ioreq_server(XenIOState *state)
xen_pfn_t ioreq_pfn;
xen_pfn_t bufioreq_pfn;
evtchn_port_t bufioreq_evtchn;
+ unsigned long num_frames = 1;
+ unsigned long frame = 1;
int rc;
/*
@@ -675,65 +684,85 @@ static int xen_map_ioreq_server(XenIOState *state)
*/
QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
+
+ if (state->has_bufioreq) {
+ frame = 0;
+ num_frames = 2;
+ }
state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
XENMEM_resource_ioreq_server,
- state->ioservid, 0, 2,
+ state->ioservid,
+ frame, num_frames,
&addr,
PROT_READ | PROT_WRITE, 0);
if (state->fres != NULL) {
trace_xen_map_resource_ioreq(state->ioservid, addr);
- state->buffered_io_page = addr;
- state->shared_page = addr + XC_PAGE_SIZE;
+ state->shared_page = addr;
+ if (state->has_bufioreq) {
+ state->buffered_io_page = addr;
+ state->shared_page = addr + XC_PAGE_SIZE;
+ }
} else if (errno != EOPNOTSUPP) {
error_report("failed to map ioreq server resources: error %d handle=%p",
errno, xen_xc);
return -1;
}
- rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
- (state->shared_page == NULL) ?
- &ioreq_pfn : NULL,
- (state->buffered_io_page == NULL) ?
- &bufioreq_pfn : NULL,
- &bufioreq_evtchn);
- if (rc < 0) {
- error_report("failed to get ioreq server info: error %d handle=%p",
- errno, xen_xc);
- return rc;
- }
+ /*
+ * If we fail to map the shared page with xenforeignmemory_map_resource()
+ * or if we're using buffered ioreqs, we need xen_get_ioreq_server_info()
+ * to provide the addresses to map the shared page and/or to get the
+ * event-channel port for buffered ioreqs.
+ */
+ if (state->shared_page == NULL || state->has_bufioreq) {
+ rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
+ (state->shared_page == NULL) ?
+ &ioreq_pfn : NULL,
+ (state->has_bufioreq &&
+ state->buffered_io_page == NULL) ?
+ &bufioreq_pfn : NULL,
+ &bufioreq_evtchn);
+ if (rc < 0) {
+ error_report("failed to get ioreq server info: error %d handle=%p",
+ errno, xen_xc);
+ return rc;
+ }
- if (state->shared_page == NULL) {
- trace_xen_map_ioreq_server_shared_page(ioreq_pfn);
+ if (state->shared_page == NULL) {
+ trace_xen_map_ioreq_server_shared_page(ioreq_pfn);
- state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
- PROT_READ | PROT_WRITE,
- 1, &ioreq_pfn, NULL);
+ state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
+ PROT_READ | PROT_WRITE,
+ 1, &ioreq_pfn, NULL);
+ }
if (state->shared_page == NULL) {
error_report("map shared IO page returned error %d handle=%p",
errno, xen_xc);
}
- }
- if (state->buffered_io_page == NULL) {
- trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn);
+ if (state->has_bufioreq && state->buffered_io_page == NULL) {
+ trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn);
- state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
- PROT_READ | PROT_WRITE,
- 1, &bufioreq_pfn,
- NULL);
- if (state->buffered_io_page == NULL) {
- error_report("map buffered IO page returned error %d", errno);
- return -1;
+ state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
+ PROT_READ | PROT_WRITE,
+ 1, &bufioreq_pfn,
+ NULL);
+ if (state->buffered_io_page == NULL) {
+ error_report("map buffered IO page returned error %d", errno);
+ return -1;
+ }
}
}
- if (state->shared_page == NULL || state->buffered_io_page == NULL) {
+ if (state->shared_page == NULL ||
+ (state->has_bufioreq && state->buffered_io_page == NULL)) {
return -1;
}
- trace_xen_map_ioreq_server_buffered_io_evtchn(bufioreq_evtchn);
-
- state->bufioreq_remote_port = bufioreq_evtchn;
+ if (state->has_bufioreq) {
+ trace_xen_map_ioreq_server_buffered_io_evtchn(bufioreq_evtchn);
+ state->bufioreq_remote_port = bufioreq_evtchn;
+ }
return 0;
}
@@ -830,14 +859,15 @@ static void xen_do_ioreq_register(XenIOState *state,
state->ioreq_local_port[i] = rc;
}
- rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
- state->bufioreq_remote_port);
- if (rc == -1) {
- error_report("buffered evtchn bind error %d", errno);
- goto err;
+ if (state->has_bufioreq) {
+ rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
+ state->bufioreq_remote_port);
+ if (rc == -1) {
+ error_report("buffered evtchn bind error %d", errno);
+ goto err;
+ }
+ state->bufioreq_local_port = rc;
}
- state->bufioreq_local_port = rc;
-
/* Init RAM management */
#ifdef XEN_COMPAT_PHYSMAP
xen_map_cache_init(xen_phys_offset_to_gaddr, state);
@@ -865,6 +895,7 @@ err:
}
void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
+ uint8_t handle_bufioreq,
const MemoryListener *xen_memory_listener)
{
int rc;
@@ -883,7 +914,8 @@ void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
goto err;
}
- rc = xen_create_ioreq_server(xen_domid, &state->ioservid);
+ state->has_bufioreq = handle_bufioreq != HVM_IOREQSRV_BUFIOREQ_OFF;
+ rc = xen_create_ioreq_server(xen_domid, handle_bufioreq, &state->ioservid);
if (!rc) {
xen_do_ioreq_register(state, max_cpus, xen_memory_listener);
} else {
diff --git a/hw/xen/xen-legacy-backend.c b/hw/xen/xen-legacy-backend.c
index 5514184..5ed53f8 100644
--- a/hw/xen/xen-legacy-backend.c
+++ b/hw/xen/xen-legacy-backend.c
@@ -147,24 +147,6 @@ void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr,
}
}
-int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev,
- bool to_domain,
- XenGrantCopySegment segs[],
- unsigned int nr_segs)
-{
- int rc;
-
- assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV);
-
- rc = qemu_xen_gnttab_grant_copy(xendev->gnttabdev, to_domain, xen_domid,
- segs, nr_segs, NULL);
- if (rc) {
- xen_pv_printf(xendev, 0, "xengnttab_grant_copy failed: %s\n",
- strerror(-rc));
- }
- return rc;
-}
-
/*
* get xen backend device, allocate a new one if it doesn't exist.
*/
@@ -181,7 +163,7 @@ static struct XenLegacyDevice *xen_be_get_xendev(const char *type, int dom,
/* init new xendev */
xendev = g_malloc0(ops->size);
- object_initialize(&xendev->qdev, ops->size, TYPE_XENBACKEND);
+ object_initialize(xendev, ops->size, TYPE_XENBACKEND);
OBJECT(xendev)->free = g_free;
qdev_set_id(DEVICE(xendev), g_strdup_printf("xen-%s-%d", type, dev),
&error_fatal);
@@ -653,29 +635,22 @@ int xen_be_bind_evtchn(struct XenLegacyDevice *xendev)
}
-static Property xendev_properties[] = {
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void xendev_class_init(ObjectClass *klass, void *data)
+static void xendev_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- device_class_set_props(dc, xendev_properties);
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
- /* xen-backend devices can be plugged/unplugged dynamically */
- dc->user_creatable = true;
dc->bus_type = TYPE_XENSYSBUS;
}
static const TypeInfo xendev_type_info = {
.name = TYPE_XENBACKEND,
- .parent = TYPE_DEVICE,
+ .parent = TYPE_DYNAMIC_SYS_BUS_DEVICE,
.class_init = xendev_class_init,
- .instance_size = sizeof(struct XenLegacyDevice),
+ .instance_size = sizeof(XenLegacyDevice),
};
-static void xen_sysbus_class_init(ObjectClass *klass, void *data)
+static void xen_sysbus_class_init(ObjectClass *klass, const void *data)
{
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
@@ -686,28 +661,15 @@ static const TypeInfo xensysbus_info = {
.name = TYPE_XENSYSBUS,
.parent = TYPE_BUS,
.class_init = xen_sysbus_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
};
-static Property xen_sysdev_properties[] = {
- {/* end of property list */},
-};
-
-static void xen_sysdev_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- device_class_set_props(dc, xen_sysdev_properties);
-}
-
static const TypeInfo xensysdev_info = {
.name = TYPE_XENSYSDEV,
.parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(SysBusDevice),
- .class_init = xen_sysdev_class_init,
};
static void xenbe_register_types(void)
diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
index 18ba7b1..e31d379 100644
--- a/hw/xen/xen-mapcache.c
+++ b/hw/xen/xen-mapcache.c
@@ -18,8 +18,8 @@
#include "hw/xen/xen_native.h"
#include "qemu/bitmap.h"
-#include "sysemu/runstate.h"
-#include "sysemu/xen-mapcache.h"
+#include "system/runstate.h"
+#include "system/xen-mapcache.h"
#include "trace.h"
#include <xenevtchn.h>
@@ -75,7 +75,8 @@ typedef struct MapCache {
} MapCache;
static MapCache *mapcache;
-static MapCache *mapcache_grants;
+static MapCache *mapcache_grants_ro;
+static MapCache *mapcache_grants_rw;
static xengnttab_handle *xen_region_gnttabdev;
static inline void mapcache_lock(MapCache *mc)
@@ -176,9 +177,12 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
* Grant mappings must use XC_PAGE_SIZE granularity since we can't
* map anything beyond the number of pages granted to us.
*/
- mapcache_grants = xen_map_cache_init_single(f, opaque,
- XC_PAGE_SHIFT,
- max_mcache_size);
+ mapcache_grants_ro = xen_map_cache_init_single(f, opaque,
+ XC_PAGE_SHIFT,
+ max_mcache_size);
+ mapcache_grants_rw = xen_map_cache_init_single(f, opaque,
+ XC_PAGE_SHIFT,
+ max_mcache_size);
setrlimit(RLIMIT_AS, &rlimit_as);
}
@@ -376,12 +380,12 @@ tryagain:
entry = &mc->entry[address_index % mc->nr_buckets];
- while (entry && (lock || entry->lock) && entry->vaddr_base &&
- (entry->paddr_index != address_index || entry->size != cache_size ||
+ while (entry && (!entry->vaddr_base ||
+ entry->paddr_index != address_index || entry->size != cache_size ||
!test_bits(address_offset >> XC_PAGE_SHIFT,
test_bit_size >> XC_PAGE_SHIFT,
entry->valid_mapping))) {
- if (!free_entry && !entry->lock) {
+ if (!free_entry && (!entry->lock || !entry->vaddr_base)) {
free_entry = entry;
free_pentry = pentry;
}
@@ -456,9 +460,13 @@ uint8_t *xen_map_cache(MemoryRegion *mr,
bool is_write)
{
bool grant = xen_mr_is_grants(mr);
- MapCache *mc = grant ? mapcache_grants : mapcache;
+ MapCache *mc = mapcache;
uint8_t *p;
+ if (grant) {
+ mc = is_write ? mapcache_grants_rw : mapcache_grants_ro;
+ }
+
if (grant && !lock) {
/*
* Grants are only supported via address_space_map(). Anything
@@ -523,7 +531,10 @@ ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
addr = xen_ram_addr_from_mapcache_single(mapcache, ptr);
if (addr == RAM_ADDR_INVALID) {
- addr = xen_ram_addr_from_mapcache_single(mapcache_grants, ptr);
+ addr = xen_ram_addr_from_mapcache_single(mapcache_grants_ro, ptr);
+ }
+ if (addr == RAM_ADDR_INVALID) {
+ addr = xen_ram_addr_from_mapcache_single(mapcache_grants_rw, ptr);
}
return addr;
@@ -626,7 +637,8 @@ static void xen_invalidate_map_cache_entry_single(MapCache *mc, uint8_t *buffer)
static void xen_invalidate_map_cache_entry_all(uint8_t *buffer)
{
xen_invalidate_map_cache_entry_single(mapcache, buffer);
- xen_invalidate_map_cache_entry_single(mapcache_grants, buffer);
+ xen_invalidate_map_cache_entry_single(mapcache_grants_ro, buffer);
+ xen_invalidate_map_cache_entry_single(mapcache_grants_rw, buffer);
}
static void xen_invalidate_map_cache_entry_bh(void *opaque)
@@ -700,7 +712,6 @@ void xen_invalidate_map_cache(void)
bdrv_drain_all();
xen_invalidate_map_cache_single(mapcache);
- xen_invalidate_map_cache_single(mapcache_grants);
}
static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc,
diff --git a/hw/xen/xen-pvh-common.c b/hw/xen/xen-pvh-common.c
new file mode 100644
index 0000000..b93ff80
--- /dev/null
+++ b/hw/xen/xen-pvh-common.c
@@ -0,0 +1,399 @@
+/*
+ * QEMU Xen PVH machine - common code.
+ *
+ * Copyright (c) 2024 Advanced Micro Devices, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qemu/units.h"
+#include "qapi/visitor.h"
+#include "hw/boards.h"
+#include "hw/irq.h"
+#include "system/tpm.h"
+#include "system/tpm_backend.h"
+#include "system/runstate.h"
+#include "hw/xen/xen-pvh-common.h"
+#include "trace.h"
+
+static const MemoryListener xen_memory_listener = {
+ .region_add = xen_region_add,
+ .region_del = xen_region_del,
+ .log_start = NULL,
+ .log_stop = NULL,
+ .log_sync = NULL,
+ .log_global_start = NULL,
+ .log_global_stop = NULL,
+ .priority = MEMORY_LISTENER_PRIORITY_ACCEL,
+};
+
+static void xen_pvh_init_ram(XenPVHMachineState *s,
+ MemoryRegion *sysmem)
+{
+ MachineState *ms = MACHINE(s);
+ ram_addr_t block_len, ram_size[2];
+
+ if (ms->ram_size <= s->cfg.ram_low.size) {
+ ram_size[0] = ms->ram_size;
+ ram_size[1] = 0;
+ block_len = s->cfg.ram_low.base + ram_size[0];
+ } else {
+ ram_size[0] = s->cfg.ram_low.size;
+ ram_size[1] = ms->ram_size - s->cfg.ram_low.size;
+ block_len = s->cfg.ram_high.base + ram_size[1];
+ }
+
+ memory_region_init_ram(&xen_memory, NULL, "xen.ram", block_len,
+ &error_fatal);
+
+ memory_region_init_alias(&s->ram.low, NULL, "xen.ram.lo", &xen_memory,
+ s->cfg.ram_low.base, ram_size[0]);
+ memory_region_add_subregion(sysmem, s->cfg.ram_low.base, &s->ram.low);
+ if (ram_size[1] > 0) {
+ memory_region_init_alias(&s->ram.high, NULL, "xen.ram.hi", &xen_memory,
+ s->cfg.ram_high.base, ram_size[1]);
+ memory_region_add_subregion(sysmem, s->cfg.ram_high.base, &s->ram.high);
+ }
+
+ /* Setup support for grants. */
+ memory_region_init_ram(&xen_grants, NULL, "xen.grants", block_len,
+ &error_fatal);
+ memory_region_add_subregion(sysmem, XEN_GRANT_ADDR_OFF, &xen_grants);
+}
+
+static void xen_set_irq(void *opaque, int irq, int level)
+{
+ if (xendevicemodel_set_irq_level(xen_dmod, xen_domid, irq, level)) {
+ error_report("xendevicemodel_set_irq_level failed");
+ }
+}
+
+static void xen_create_virtio_mmio_devices(XenPVHMachineState *s)
+{
+ int i;
+
+ /*
+ * We create the transports in reverse order. Since qbus_realize()
+ * prepends (not appends) new child buses, the decrementing loop below will
+ * create a list of virtio-mmio buses with increasing base addresses.
+ *
+ * When a -device option is processed from the command line,
+ * qbus_find_recursive() picks the next free virtio-mmio bus in forwards
+ * order.
+ *
+ * This is what the Xen tools expect.
+ */
+ for (i = s->cfg.virtio_mmio_num - 1; i >= 0; i--) {
+ hwaddr base = s->cfg.virtio_mmio.base + i * s->cfg.virtio_mmio.size;
+ qemu_irq irq = qemu_allocate_irq(xen_set_irq, NULL,
+ s->cfg.virtio_mmio_irq_base + i);
+
+ sysbus_create_simple("virtio-mmio", base, irq);
+
+ trace_xen_create_virtio_mmio_devices(i,
+ s->cfg.virtio_mmio_irq_base + i,
+ base);
+ }
+}
+
+#ifdef CONFIG_TPM
+static void xen_enable_tpm(XenPVHMachineState *s)
+{
+ Error *errp = NULL;
+ DeviceState *dev;
+ SysBusDevice *busdev;
+
+ TPMBackend *be = qemu_find_tpm_be("tpm0");
+ if (be == NULL) {
+ error_report("Couldn't find tmp0 backend");
+ return;
+ }
+ dev = qdev_new(TYPE_TPM_TIS_SYSBUS);
+ object_property_set_link(OBJECT(dev), "tpmdev", OBJECT(be), &errp);
+ object_property_set_str(OBJECT(dev), "tpmdev", be->id, &errp);
+ busdev = SYS_BUS_DEVICE(dev);
+ sysbus_realize_and_unref(busdev, &error_fatal);
+ sysbus_mmio_map(busdev, 0, s->cfg.tpm.base);
+
+ trace_xen_enable_tpm(s->cfg.tpm.base);
+}
+#endif
+
+/*
+ * We use the GPEX PCIe controller with its internal INTX PCI interrupt
+ * swizzling. This swizzling is emulated in QEMU and routes all INTX
+ * interrupts from endpoints down to only 4 INTX interrupts.
+ * See include/hw/pci/pci.h : pci_swizzle()
+ */
+static inline void xenpvh_gpex_init(XenPVHMachineState *s,
+ XenPVHMachineClass *xpc,
+ MemoryRegion *sysmem)
+{
+ MemoryRegion *ecam_reg;
+ MemoryRegion *mmio_reg;
+ DeviceState *dev;
+ int i;
+
+ object_initialize_child(OBJECT(s), "gpex", &s->pci.gpex,
+ TYPE_GPEX_HOST);
+ dev = DEVICE(&s->pci.gpex);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+
+ ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
+ memory_region_add_subregion(sysmem, s->cfg.pci_ecam.base, ecam_reg);
+
+ mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
+
+ if (s->cfg.pci_mmio.size) {
+ memory_region_init_alias(&s->pci.mmio_alias, OBJECT(dev), "pcie-mmio",
+ mmio_reg,
+ s->cfg.pci_mmio.base, s->cfg.pci_mmio.size);
+ memory_region_add_subregion(sysmem, s->cfg.pci_mmio.base,
+ &s->pci.mmio_alias);
+ }
+
+ if (s->cfg.pci_mmio_high.size) {
+ memory_region_init_alias(&s->pci.mmio_high_alias, OBJECT(dev),
+ "pcie-mmio-high",
+ mmio_reg, s->cfg.pci_mmio_high.base, s->cfg.pci_mmio_high.size);
+ memory_region_add_subregion(sysmem, s->cfg.pci_mmio_high.base,
+ &s->pci.mmio_high_alias);
+ }
+
+ /*
+ * PVH implementations with PCI enabled must provide set_pci_intx_irq()
+ * and optionally an implementation of set_pci_link_route().
+ */
+ assert(xpc->set_pci_intx_irq);
+
+ for (i = 0; i < PCI_NUM_PINS; i++) {
+ qemu_irq irq = qemu_allocate_irq(xpc->set_pci_intx_irq, s, i);
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq);
+ gpex_set_irq_num(GPEX_HOST(dev), i, s->cfg.pci_intx_irq_base + i);
+ if (xpc->set_pci_link_route) {
+ xpc->set_pci_link_route(i, s->cfg.pci_intx_irq_base + i);
+ }
+ }
+}
+
+static void xen_pvh_init(MachineState *ms)
+{
+ XenPVHMachineState *s = XEN_PVH_MACHINE(ms);
+ XenPVHMachineClass *xpc = XEN_PVH_MACHINE_GET_CLASS(s);
+ MemoryRegion *sysmem = get_system_memory();
+
+ if (ms->ram_size == 0) {
+ warn_report("%s: ram size not specified. QEMU machine started"
+ " without IOREQ (no emulated devices including virtio)",
+ MACHINE_CLASS(object_get_class(OBJECT(ms)))->desc);
+ return;
+ }
+
+ xen_pvh_init_ram(s, sysmem);
+ xen_register_ioreq(&s->ioreq, ms->smp.max_cpus,
+ xpc->handle_bufioreq,
+ &xen_memory_listener);
+
+ if (s->cfg.virtio_mmio_num) {
+ xen_create_virtio_mmio_devices(s);
+ }
+
+#ifdef CONFIG_TPM
+ if (xpc->has_tpm) {
+ if (s->cfg.tpm.base) {
+ xen_enable_tpm(s);
+ } else {
+ warn_report("tpm-base-addr is not set. TPM will not be enabled");
+ }
+ }
+#endif
+
+ /* Non-zero pci-ecam-size enables PCI. */
+ if (s->cfg.pci_ecam.size) {
+ if (s->cfg.pci_ecam.size != 256 * MiB) {
+ error_report("pci-ecam-size only supports values 0 or 0x10000000");
+ exit(EXIT_FAILURE);
+ }
+ if (!s->cfg.pci_intx_irq_base) {
+ error_report("PCI enabled but pci-intx-irq-base not set");
+ exit(EXIT_FAILURE);
+ }
+
+ xenpvh_gpex_init(s, xpc, sysmem);
+ }
+
+ /* Call the implementation specific init. */
+ if (xpc->init) {
+ xpc->init(ms);
+ }
+}
+
+#define XEN_PVH_PROP_MEMMAP_SETTER(n, f) \
+static void xen_pvh_set_ ## n ## _ ## f(Object *obj, Visitor *v, \
+ const char *name, void *opaque, \
+ Error **errp) \
+{ \
+ XenPVHMachineState *xp = XEN_PVH_MACHINE(obj); \
+ uint64_t value; \
+ \
+ if (!visit_type_size(v, name, &value, errp)) { \
+ return; \
+ } \
+ xp->cfg.n.f = value; \
+}
+
+#define XEN_PVH_PROP_MEMMAP_GETTER(n, f) \
+static void xen_pvh_get_ ## n ## _ ## f(Object *obj, Visitor *v, \
+ const char *name, void *opaque, \
+ Error **errp) \
+{ \
+ XenPVHMachineState *xp = XEN_PVH_MACHINE(obj); \
+ uint64_t value = xp->cfg.n.f; \
+ \
+ visit_type_uint64(v, name, &value, errp); \
+}
+
+#define XEN_PVH_PROP_MEMMAP_BASE(n) \
+ XEN_PVH_PROP_MEMMAP_SETTER(n, base) \
+ XEN_PVH_PROP_MEMMAP_GETTER(n, base) \
+
+#define XEN_PVH_PROP_MEMMAP_SIZE(n) \
+ XEN_PVH_PROP_MEMMAP_SETTER(n, size) \
+ XEN_PVH_PROP_MEMMAP_GETTER(n, size)
+
+#define XEN_PVH_PROP_MEMMAP(n) \
+ XEN_PVH_PROP_MEMMAP_BASE(n) \
+ XEN_PVH_PROP_MEMMAP_SIZE(n)
+
+XEN_PVH_PROP_MEMMAP(ram_low)
+XEN_PVH_PROP_MEMMAP(ram_high)
+/* TPM only has a base-addr option. */
+XEN_PVH_PROP_MEMMAP_BASE(tpm)
+XEN_PVH_PROP_MEMMAP(virtio_mmio)
+XEN_PVH_PROP_MEMMAP(pci_ecam)
+XEN_PVH_PROP_MEMMAP(pci_mmio)
+XEN_PVH_PROP_MEMMAP(pci_mmio_high)
+
+static void xen_pvh_set_pci_intx_irq_base(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ XenPVHMachineState *xp = XEN_PVH_MACHINE(obj);
+ uint32_t value;
+
+ if (!visit_type_uint32(v, name, &value, errp)) {
+ return;
+ }
+
+ xp->cfg.pci_intx_irq_base = value;
+}
+
+static void xen_pvh_get_pci_intx_irq_base(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ XenPVHMachineState *xp = XEN_PVH_MACHINE(obj);
+ uint32_t value = xp->cfg.pci_intx_irq_base;
+
+ visit_type_uint32(v, name, &value, errp);
+}
+
+void xen_pvh_class_setup_common_props(XenPVHMachineClass *xpc)
+{
+ ObjectClass *oc = OBJECT_CLASS(xpc);
+ MachineClass *mc = MACHINE_CLASS(xpc);
+
+#define OC_MEMMAP_PROP_BASE(c, prop_name, name) \
+do { \
+ object_class_property_add(c, prop_name "-base", "uint64_t", \
+ xen_pvh_get_ ## name ## _base, \
+ xen_pvh_set_ ## name ## _base, NULL, NULL); \
+ object_class_property_set_description(oc, prop_name "-base", \
+ "Set base address for " prop_name); \
+} while (0)
+
+#define OC_MEMMAP_PROP_SIZE(c, prop_name, name) \
+do { \
+ object_class_property_add(c, prop_name "-size", "uint64_t", \
+ xen_pvh_get_ ## name ## _size, \
+ xen_pvh_set_ ## name ## _size, NULL, NULL); \
+ object_class_property_set_description(oc, prop_name "-size", \
+ "Set memory range size for " prop_name); \
+} while (0)
+
+#define OC_MEMMAP_PROP(c, prop_name, name) \
+do { \
+ OC_MEMMAP_PROP_BASE(c, prop_name, name); \
+ OC_MEMMAP_PROP_SIZE(c, prop_name, name); \
+} while (0)
+
+ /*
+ * We provide memmap properties to allow Xen to move things to other
+ * addresses for example when users need to accomodate the memory-map
+ * for 1:1 mapped devices/memory.
+ */
+ OC_MEMMAP_PROP(oc, "ram-low", ram_low);
+ OC_MEMMAP_PROP(oc, "ram-high", ram_high);
+
+ if (xpc->has_virtio_mmio) {
+ OC_MEMMAP_PROP(oc, "virtio-mmio", virtio_mmio);
+ }
+
+ if (xpc->has_pci) {
+ OC_MEMMAP_PROP(oc, "pci-ecam", pci_ecam);
+ OC_MEMMAP_PROP(oc, "pci-mmio", pci_mmio);
+ OC_MEMMAP_PROP(oc, "pci-mmio-high", pci_mmio_high);
+
+ object_class_property_add(oc, "pci-intx-irq-base", "uint32_t",
+ xen_pvh_get_pci_intx_irq_base,
+ xen_pvh_set_pci_intx_irq_base,
+ NULL, NULL);
+ object_class_property_set_description(oc, "pci-intx-irq-base",
+ "Set PCI INTX interrupt base line.");
+ }
+
+#ifdef CONFIG_TPM
+ if (xpc->has_tpm) {
+ object_class_property_add(oc, "tpm-base-addr", "uint64_t",
+ xen_pvh_get_tpm_base,
+ xen_pvh_set_tpm_base,
+ NULL, NULL);
+ object_class_property_set_description(oc, "tpm-base-addr",
+ "Set Base address for TPM device.");
+
+ machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS);
+ }
+#endif
+}
+
+static void xen_pvh_class_init(ObjectClass *oc, const void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->init = xen_pvh_init;
+
+ mc->desc = "Xen PVH machine";
+ mc->max_cpus = 1;
+ mc->default_machine_opts = "accel=xen";
+ /* Set to zero to make sure that the real ram size is passed. */
+ mc->default_ram_size = 0;
+}
+
+static const TypeInfo xen_pvh_info = {
+ .name = TYPE_XEN_PVH_MACHINE,
+ .parent = TYPE_MACHINE,
+ .abstract = true,
+ .instance_size = sizeof(XenPVHMachineState),
+ .class_size = sizeof(XenPVHMachineClass),
+ .class_init = xen_pvh_class_init,
+};
+
+static void xen_pvh_register_types(void)
+{
+ type_register_static(&xen_pvh_info);
+}
+
+type_init(xen_pvh_register_types);
diff --git a/hw/xen/xen_devconfig.c b/hw/xen/xen_devconfig.c
index 2150869..5698cc7 100644
--- a/hw/xen/xen_devconfig.c
+++ b/hw/xen/xen_devconfig.c
@@ -1,8 +1,8 @@
#include "qemu/osdep.h"
#include "hw/xen/xen-legacy-backend.h"
#include "qemu/option.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/sysemu.h"
+#include "system/blockdev.h"
+#include "system/system.h"
/* ------------------------------------------------------------- */
@@ -66,11 +66,3 @@ int xen_config_dev_vkbd(int vdev)
xen_config_dev_dirs("vkbd", "vkbd", vdev, fe, be, sizeof(fe));
return xen_config_dev_all(fe, be);
}
-
-int xen_config_dev_console(int vdev)
-{
- char fe[256], be[256];
-
- xen_config_dev_dirs("console", "console", vdev, fe, be, sizeof(fe));
- return xen_config_dev_all(fe, be);
-}
diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c
index 3635d1b..9d16644 100644
--- a/hw/xen/xen_pt.c
+++ b/hw/xen/xen_pt.c
@@ -766,6 +766,57 @@ static void xen_pt_destroy(PCIDevice *d) {
}
/* init */
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 42000
+static bool xen_pt_need_gsi(void)
+{
+ FILE *fp;
+ int len;
+ /*
+ * The max length of guest_type is "PVH"+'\n'+'\0', it is 5,
+ * so here set the length of type to be twice.
+ */
+ char type[10];
+ const char *guest_type = "/sys/hypervisor/guest_type";
+
+ fp = fopen(guest_type, "r");
+ if (!fp) {
+ error_report("Cannot open %s: %s", guest_type, strerror(errno));
+ return false;
+ }
+
+ if (fgets(type, sizeof(type), fp)) {
+ len = strlen(type);
+ if (len) {
+ type[len - 1] = '\0';
+ if (!strcmp(type, "PVH")) {
+ fclose(fp);
+ return true;
+ }
+ }
+ }
+
+ fclose(fp);
+ return false;
+}
+
+static int xen_pt_map_pirq_for_gsi(PCIDevice *d, int *pirq)
+{
+ int gsi;
+ XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
+
+ gsi = xc_pcidev_get_gsi(xen_xc,
+ PCI_SBDF(s->real_device.domain,
+ s->real_device.bus,
+ s->real_device.dev,
+ s->real_device.func));
+ if (gsi >= 0) {
+ return xc_physdev_map_pirq_gsi(xen_xc, xen_domid, gsi, pirq);
+ }
+
+ return gsi;
+}
+#endif
+
static void xen_pt_realize(PCIDevice *d, Error **errp)
{
ERRP_GUARD();
@@ -847,7 +898,16 @@ static void xen_pt_realize(PCIDevice *d, Error **errp)
goto out;
}
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 42000
+ if (xen_pt_need_gsi()) {
+ rc = xen_pt_map_pirq_for_gsi(d, &pirq);
+ } else {
+ rc = xc_physdev_map_pirq(xen_xc, xen_domid, machine_irq, &pirq);
+ }
+#else
rc = xc_physdev_map_pirq(xen_xc, xen_domid, machine_irq, &pirq);
+#endif
+
if (rc < 0) {
XEN_PT_ERR(d, "Mapping machine irq %u to pirq %i failed, (err: %d)\n",
machine_irq, pirq, errno);
@@ -931,10 +991,9 @@ static void xen_pt_unregister_device(PCIDevice *d)
xen_pt_destroy(d);
}
-static Property xen_pci_passthrough_properties[] = {
+static const Property xen_pci_passthrough_properties[] = {
DEFINE_PROP_PCI_HOST_DEVADDR("hostaddr", XenPCIPassthroughState, hostaddr),
DEFINE_PROP_BOOL("permissive", XenPCIPassthroughState, permissive, false),
- DEFINE_PROP_END_OF_LIST(),
};
static void xen_pci_passthrough_instance_init(Object *obj)
@@ -988,7 +1047,7 @@ static void xen_igd_clear_slot(DeviceState *qdev, Error **errp)
xpdc->pci_qdev_realize(qdev, errp);
}
-static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data)
+static void xen_pci_passthrough_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -1020,7 +1079,7 @@ static const TypeInfo xen_pci_passthrough_info = {
.class_init = xen_pci_passthrough_class_init,
.class_size = sizeof(XenPTDeviceClass),
.instance_init = xen_pci_passthrough_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ INTERFACE_PCIE_DEVICE },
{ },
diff --git a/hw/xen/xen_pt_graphics.c b/hw/xen/xen_pt_graphics.c
index 6c2e3f4..2c0cec9 100644
--- a/hw/xen/xen_pt_graphics.c
+++ b/hw/xen/xen_pt_graphics.c
@@ -347,7 +347,7 @@ static const IGDDeviceIDInfo igd_combo_id_infos[] = {
{0x162D, 0x9cc3, 0x03}, /* BDWGT3SRVR, BDWM_w7 */
};
-static void isa_bridge_class_init(ObjectClass *klass, void *data)
+static void isa_bridge_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -363,7 +363,7 @@ static const TypeInfo isa_bridge_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PCIDevice),
.class_init = isa_bridge_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
diff --git a/hw/xen/xen_pvdev.c b/hw/xen/xen_pvdev.c
index c5ad71e..fe95b62 100644
--- a/hw/xen/xen_pvdev.c
+++ b/hw/xen/xen_pvdev.c
@@ -22,6 +22,7 @@
#include "qemu/main-loop.h"
#include "hw/qdev-core.h"
#include "hw/xen/xen-legacy-backend.h"
+#include "hw/xen/xen-bus-helper.h"
#include "hw/xen/xen_pvdev.h"
/* private */
@@ -81,12 +82,9 @@ int xenstore_write_str(const char *base, const char *node, const char *val)
char *xenstore_read_str(const char *base, const char *node)
{
- char abspath[XEN_BUFSIZE];
- unsigned int len;
char *str, *ret = NULL;
- snprintf(abspath, sizeof(abspath), "%s/%s", base, node);
- str = qemu_xen_xs_read(xenstore, 0, abspath, &len);
+ str = xs_node_read(xenstore, 0, NULL, NULL, "%s/%s", base, node);
if (str != NULL) {
/* move to qemu-allocated memory to make sure
* callers can safely g_free() stuff. */
@@ -275,7 +273,7 @@ void xen_pv_del_xendev(struct XenLegacyDevice *xendev)
QTAILQ_REMOVE(&xendevs, xendev, next);
- qdev_unplug(&xendev->qdev, NULL);
+ qdev_unplug(DEVICE(xendev), NULL);
}
void xen_pv_insert_xendev(struct XenLegacyDevice *xendev)
diff --git a/hw/xen/xen_stubs.c b/hw/xen/xen_stubs.c
new file mode 100644
index 0000000..5e565df
--- /dev/null
+++ b/hw/xen/xen_stubs.c
@@ -0,0 +1,51 @@
+/*
+ * Various stubs for xen functions
+ *
+ * Those functions are used only if xen_enabled(). This file is linked only if
+ * CONFIG_XEN is not set, so they should never be called.
+ *
+ * Copyright (c) 2025 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "system/xen.h"
+#include "system/xen-mapcache.h"
+
+void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
+{
+ g_assert_not_reached();
+}
+
+void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
+ struct MemoryRegion *mr, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+bool xen_mr_is_memory(MemoryRegion *mr)
+{
+ g_assert_not_reached();
+}
+
+void xen_invalidate_map_cache_entry(uint8_t *buffer)
+{
+ g_assert_not_reached();
+}
+
+ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
+{
+ g_assert_not_reached();
+}
+
+uint8_t *xen_map_cache(MemoryRegion *mr,
+ hwaddr phys_addr,
+ hwaddr size,
+ ram_addr_t ram_addr_offset,
+ uint8_t lock,
+ bool dma,
+ bool is_write)
+{
+ g_assert_not_reached();
+}
diff --git a/hw/xenpv/xen_machine_pv.c b/hw/xenpv/xen_machine_pv.c
index 24395f4..99c0249 100644
--- a/hw/xenpv/xen_machine_pv.c
+++ b/hw/xenpv/xen_machine_pv.c
@@ -27,8 +27,8 @@
#include "hw/boards.h"
#include "hw/xen/xen-legacy-backend.h"
#include "hw/xen/xen-bus.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/sysemu.h"
+#include "system/block-backend.h"
+#include "system/system.h"
static void xen_init_pv(MachineState *machine)
{
diff --git a/hw/xtensa/Kconfig b/hw/xtensa/Kconfig
index fc5c785..1f0492d 100644
--- a/hw/xtensa/Kconfig
+++ b/hw/xtensa/Kconfig
@@ -18,4 +18,4 @@ config XTENSA_XTFPGA
select DEVICE_TREE
select OPENCORES_ETH
select PFLASH_CFI01
- select SERIAL
+ select SERIAL_MM
diff --git a/hw/xtensa/bootparam.h b/hw/xtensa/bootparam.h
index f57ff85..4418c78 100644
--- a/hw/xtensa/bootparam.h
+++ b/hw/xtensa/bootparam.h
@@ -2,6 +2,7 @@
#define HW_XTENSA_BOOTPARAM_H
#include "exec/cpu-common.h"
+#include "exec/tswap.h"
#define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/
#define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */
diff --git a/hw/xtensa/pic_cpu.c b/hw/xtensa/pic_cpu.c
index 8cef88c..e388531 100644
--- a/hw/xtensa/pic_cpu.c
+++ b/hw/xtensa/pic_cpu.c
@@ -27,6 +27,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
+#include "exec/cpu-interrupt.h"
#include "hw/irq.h"
#include "qemu/log.h"
#include "qemu/timer.h"
diff --git a/hw/xtensa/sim.c b/hw/xtensa/sim.c
index 2160e61..49d17e7 100644
--- a/hw/xtensa/sim.c
+++ b/hw/xtensa/sim.c
@@ -27,12 +27,12 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "sysemu/reset.h"
-#include "sysemu/sysemu.h"
+#include "system/reset.h"
+#include "system/system.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "elf.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qemu/error-report.h"
#include "xtensa_memory.h"
#include "xtensa_sim.h"
@@ -100,7 +100,8 @@ void xtensa_sim_load_kernel(XtensaCPU *cpu, MachineState *machine)
if (kernel_filename) {
uint64_t elf_entry;
int success = load_elf(kernel_filename, NULL, translate_phys_addr, cpu,
- &elf_entry, NULL, NULL, NULL, TARGET_BIG_ENDIAN,
+ &elf_entry, NULL, NULL, NULL,
+ TARGET_BIG_ENDIAN ? ELFDATA2MSB : ELFDATA2LSB,
EM_XTENSA, 0, 0);
if (success > 0) {
diff --git a/hw/xtensa/virt.c b/hw/xtensa/virt.c
index 5310a88..b10866c 100644
--- a/hw/xtensa/virt.c
+++ b/hw/xtensa/virt.c
@@ -27,13 +27,13 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "hw/pci-host/gpex.h"
#include "net/net.h"
#include "elf.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qemu/error-report.h"
#include "xtensa_memory.h"
#include "xtensa_sim.h"
@@ -93,7 +93,7 @@ static void create_pcie(MachineState *ms, CPUXtensaState *env, int irq_base,
/* Connect IRQ lines. */
extints = xtensa_get_extints(env);
- for (i = 0; i < GPEX_NUM_IRQS; i++) {
+ for (i = 0; i < PCI_NUM_PINS; i++) {
void *q = extints[irq_base + i];
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, q);
diff --git a/hw/xtensa/xtensa_memory.c b/hw/xtensa/xtensa_memory.c
index 2c1095f..13a6077 100644
--- a/hw/xtensa/xtensa_memory.c
+++ b/hw/xtensa/xtensa_memory.c
@@ -27,7 +27,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qemu/error-report.h"
#include "xtensa_memory.h"
diff --git a/hw/xtensa/xtfpga.c b/hw/xtensa/xtfpga.c
index 955e886..6efffae 100644
--- a/hw/xtensa/xtfpga.c
+++ b/hw/xtensa/xtfpga.c
@@ -29,20 +29,21 @@
#include "qemu/units.h"
#include "qapi/error.h"
#include "cpu.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "hw/qdev-properties.h"
#include "elf.h"
-#include "exec/memory.h"
-#include "hw/char/serial.h"
+#include "system/memory.h"
+#include "exec/tswap.h"
+#include "hw/char/serial-mm.h"
#include "net/net.h"
#include "hw/sysbus.h"
#include "hw/block/flash.h"
#include "chardev/char.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
+#include "system/device_tree.h"
+#include "system/reset.h"
+#include "system/runstate.h"
#include "qemu/error-report.h"
#include "qemu/option.h"
#include "bootparam.h"
@@ -397,7 +398,8 @@ static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine)
uint64_t elf_entry;
int success = load_elf(kernel_filename, NULL, translate_phys_addr, cpu,
- &elf_entry, NULL, NULL, NULL, TARGET_BIG_ENDIAN,
+ &elf_entry, NULL, NULL, NULL,
+ TARGET_BIG_ENDIAN ? ELFDATA2MSB : ELFDATA2LSB,
EM_XTENSA, 0, 0);
if (success > 0) {
entry_point = elf_entry;
@@ -415,8 +417,7 @@ static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine)
}
}
if (entry_point != env->pc) {
- uint8_t boot[] = {
-#if TARGET_BIG_ENDIAN
+ uint8_t boot_be[] = {
0x60, 0x00, 0x08, /* j 1f */
0x00, /* .literal_position */
0x00, 0x00, 0x00, 0x00, /* .literal entry_pc */
@@ -425,7 +426,8 @@ static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine)
0x10, 0xff, 0xfe, /* l32r a0, entry_pc */
0x12, 0xff, 0xfe, /* l32r a2, entry_a2 */
0x0a, 0x00, 0x00, /* jx a0 */
-#else
+ };
+ uint8_t boot_le[] = {
0x06, 0x02, 0x00, /* j 1f */
0x00, /* .literal_position */
0x00, 0x00, 0x00, 0x00, /* .literal entry_pc */
@@ -434,14 +436,16 @@ static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine)
0x01, 0xfe, 0xff, /* l32r a0, entry_pc */
0x21, 0xfe, 0xff, /* l32r a2, entry_a2 */
0xa0, 0x00, 0x00, /* jx a0 */
-#endif
};
+ const size_t boot_sz = TARGET_BIG_ENDIAN ? sizeof(boot_be)
+ : sizeof(boot_le);
+ uint8_t *boot = TARGET_BIG_ENDIAN ? boot_be : boot_le;
uint32_t entry_pc = tswap32(entry_point);
uint32_t entry_a2 = tswap32(tagptr);
memcpy(boot + 4, &entry_pc, sizeof(entry_pc));
memcpy(boot + 8, &entry_a2, sizeof(entry_a2));
- cpu_physical_memory_write(env->pc, boot, sizeof(boot));
+ cpu_physical_memory_write(env->pc, boot, boot_sz);
}
} else {
if (flash) {
@@ -581,7 +585,7 @@ static void xtfpga_kc705_nommu_init(MachineState *machine)
xtfpga_init(&kc705_board, machine);
}
-static void xtfpga_lx60_class_init(ObjectClass *oc, void *data)
+static void xtfpga_lx60_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -598,7 +602,7 @@ static const TypeInfo xtfpga_lx60_type = {
.class_init = xtfpga_lx60_class_init,
};
-static void xtfpga_lx60_nommu_class_init(ObjectClass *oc, void *data)
+static void xtfpga_lx60_nommu_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -615,7 +619,7 @@ static const TypeInfo xtfpga_lx60_nommu_type = {
.class_init = xtfpga_lx60_nommu_class_init,
};
-static void xtfpga_lx200_class_init(ObjectClass *oc, void *data)
+static void xtfpga_lx200_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -632,7 +636,7 @@ static const TypeInfo xtfpga_lx200_type = {
.class_init = xtfpga_lx200_class_init,
};
-static void xtfpga_lx200_nommu_class_init(ObjectClass *oc, void *data)
+static void xtfpga_lx200_nommu_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -649,7 +653,7 @@ static const TypeInfo xtfpga_lx200_nommu_type = {
.class_init = xtfpga_lx200_nommu_class_init,
};
-static void xtfpga_ml605_class_init(ObjectClass *oc, void *data)
+static void xtfpga_ml605_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -666,7 +670,7 @@ static const TypeInfo xtfpga_ml605_type = {
.class_init = xtfpga_ml605_class_init,
};
-static void xtfpga_ml605_nommu_class_init(ObjectClass *oc, void *data)
+static void xtfpga_ml605_nommu_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -683,7 +687,7 @@ static const TypeInfo xtfpga_ml605_nommu_type = {
.class_init = xtfpga_ml605_nommu_class_init,
};
-static void xtfpga_kc705_class_init(ObjectClass *oc, void *data)
+static void xtfpga_kc705_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -700,7 +704,7 @@ static const TypeInfo xtfpga_kc705_type = {
.class_init = xtfpga_kc705_class_init,
};
-static void xtfpga_kc705_nommu_class_init(ObjectClass *oc, void *data)
+static void xtfpga_kc705_nommu_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
diff --git a/include/accel/accel-cpu-target.h b/include/accel/accel-cpu-target.h
new file mode 100644
index 0000000..6feb344
--- /dev/null
+++ b/include/accel/accel-cpu-target.h
@@ -0,0 +1,31 @@
+/*
+ * Accelerator interface, specializes CPUClass
+ * This header is used only by target-specific code.
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef ACCEL_CPU_TARGET_H
+#define ACCEL_CPU_TARGET_H
+
+/*
+ * This header is used to define new accelerator-specific target-specific
+ * accelerator cpu subclasses.
+ * It uses CPU_RESOLVING_TYPE, so this is clearly target-specific.
+ *
+ * Do not try to use for any other purpose than the implementation of new
+ * subclasses in target/, or the accel implementation itself in accel/
+ */
+
+#include "qom/object.h"
+#include "accel/accel-cpu.h"
+#include "cpu.h"
+
+#define TYPE_ACCEL_CPU "accel-" CPU_RESOLVING_TYPE
+#define ACCEL_CPU_NAME(name) (name "-" TYPE_ACCEL_CPU)
+DECLARE_CLASS_CHECKERS(AccelCPUClass, ACCEL_CPU, TYPE_ACCEL_CPU)
+
+#endif /* ACCEL_CPU_H */
diff --git a/include/accel/accel-cpu.h b/include/accel/accel-cpu.h
new file mode 100644
index 0000000..9e7eede
--- /dev/null
+++ b/include/accel/accel-cpu.h
@@ -0,0 +1,23 @@
+/*
+ * Accelerator interface, specializes CPUClass
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef ACCEL_CPU_H
+#define ACCEL_CPU_H
+
+#include "qom/object.h"
+#include "hw/core/cpu.h"
+
+typedef struct AccelCPUClass {
+ ObjectClass parent_class;
+
+ void (*cpu_class_init)(CPUClass *cc);
+ void (*cpu_instance_init)(CPUState *cpu);
+ bool (*cpu_target_realize)(CPUState *cpu, Error **errp);
+} AccelCPUClass;
+
+#endif /* ACCEL_CPU_H */
diff --git a/include/accel/tcg/cpu-ldst-common.h b/include/accel/tcg/cpu-ldst-common.h
new file mode 100644
index 0000000..8bf17c2
--- /dev/null
+++ b/include/accel/tcg/cpu-ldst-common.h
@@ -0,0 +1,122 @@
+/*
+ * Software MMU support
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_CPU_LDST_COMMON_H
+#define ACCEL_TCG_CPU_LDST_COMMON_H
+
+#ifndef CONFIG_TCG
+#error Can only include this header with TCG
+#endif
+
+#include "exec/memopidx.h"
+#include "exec/vaddr.h"
+#include "exec/mmu-access-type.h"
+#include "qemu/int128.h"
+
+uint8_t cpu_ldb_mmu(CPUArchState *env, vaddr ptr, MemOpIdx oi, uintptr_t ra);
+uint16_t cpu_ldw_mmu(CPUArchState *env, vaddr ptr, MemOpIdx oi, uintptr_t ra);
+uint32_t cpu_ldl_mmu(CPUArchState *env, vaddr ptr, MemOpIdx oi, uintptr_t ra);
+uint64_t cpu_ldq_mmu(CPUArchState *env, vaddr ptr, MemOpIdx oi, uintptr_t ra);
+Int128 cpu_ld16_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra);
+
+void cpu_stb_mmu(CPUArchState *env, vaddr ptr, uint8_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stw_mmu(CPUArchState *env, vaddr ptr, uint16_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stl_mmu(CPUArchState *env, vaddr ptr, uint32_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stq_mmu(CPUArchState *env, vaddr ptr, uint64_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
+ MemOpIdx oi, uintptr_t ra);
+
+uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, vaddr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, vaddr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, vaddr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, vaddr addr,
+ uint64_t cmpv, uint64_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, vaddr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, vaddr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, vaddr addr,
+ uint64_t cmpv, uint64_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+
+#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
+TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
+ (CPUArchState *env, vaddr addr, TYPE val, \
+ MemOpIdx oi, uintptr_t retaddr);
+
+#ifdef CONFIG_ATOMIC64
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
+#else
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
+#endif
+
+GEN_ATOMIC_HELPER_ALL(fetch_add)
+GEN_ATOMIC_HELPER_ALL(fetch_sub)
+GEN_ATOMIC_HELPER_ALL(fetch_and)
+GEN_ATOMIC_HELPER_ALL(fetch_or)
+GEN_ATOMIC_HELPER_ALL(fetch_xor)
+GEN_ATOMIC_HELPER_ALL(fetch_smin)
+GEN_ATOMIC_HELPER_ALL(fetch_umin)
+GEN_ATOMIC_HELPER_ALL(fetch_smax)
+GEN_ATOMIC_HELPER_ALL(fetch_umax)
+
+GEN_ATOMIC_HELPER_ALL(add_fetch)
+GEN_ATOMIC_HELPER_ALL(sub_fetch)
+GEN_ATOMIC_HELPER_ALL(and_fetch)
+GEN_ATOMIC_HELPER_ALL(or_fetch)
+GEN_ATOMIC_HELPER_ALL(xor_fetch)
+GEN_ATOMIC_HELPER_ALL(smin_fetch)
+GEN_ATOMIC_HELPER_ALL(umin_fetch)
+GEN_ATOMIC_HELPER_ALL(smax_fetch)
+GEN_ATOMIC_HELPER_ALL(umax_fetch)
+
+GEN_ATOMIC_HELPER_ALL(xchg)
+
+#undef GEN_ATOMIC_HELPER_ALL
+#undef GEN_ATOMIC_HELPER
+
+Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, vaddr addr,
+ Int128 cmpv, Int128 newv,
+ MemOpIdx oi, uintptr_t retaddr);
+Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, vaddr addr,
+ Int128 cmpv, Int128 newv,
+ MemOpIdx oi, uintptr_t retaddr);
+
+uint8_t cpu_ldb_code_mmu(CPUArchState *env, vaddr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint16_t cpu_ldw_code_mmu(CPUArchState *env, vaddr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint32_t cpu_ldl_code_mmu(CPUArchState *env, vaddr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint64_t cpu_ldq_code_mmu(CPUArchState *env, vaddr addr,
+ MemOpIdx oi, uintptr_t ra);
+
+#endif /* ACCEL_TCG_CPU_LDST_COMMON_H */
diff --git a/include/accel/tcg/cpu-ldst.h b/include/accel/tcg/cpu-ldst.h
new file mode 100644
index 0000000..0de7f5e
--- /dev/null
+++ b/include/accel/tcg/cpu-ldst.h
@@ -0,0 +1,505 @@
+/*
+ * Software MMU support (per-target)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+/*
+ * Generate inline load/store functions for all MMU modes (typically
+ * at least _user and _kernel) as well as _data versions, for all data
+ * sizes.
+ *
+ * Used by target op helpers.
+ *
+ * The syntax for the accessors is:
+ *
+ * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
+ * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
+ * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
+ * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
+ *
+ * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
+ * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
+ * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
+ * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
+ *
+ * sign is:
+ * (empty): for 32 and 64 bit sizes
+ * u : unsigned
+ * s : signed
+ *
+ * size is:
+ * b: 8 bits
+ * w: 16 bits
+ * l: 32 bits
+ * q: 64 bits
+ *
+ * end is:
+ * (empty): for target native endian, or for 8 bit access
+ * _be: for forced big endian
+ * _le: for forced little endian
+ *
+ * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
+ * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
+ * the index to use; the "data" and "code" suffixes take the index from
+ * cpu_mmu_index().
+ *
+ * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
+ * MemOp including alignment requirements. The alignment will be enforced.
+ */
+#ifndef ACCEL_TCG_CPU_LDST_H
+#define ACCEL_TCG_CPU_LDST_H
+
+#ifndef CONFIG_TCG
+#error Can only include this header with TCG
+#endif
+
+#include "exec/cpu-common.h"
+#include "accel/tcg/cpu-ldst-common.h"
+#include "accel/tcg/cpu-mmu-index.h"
+#include "exec/abi_ptr.h"
+
+#if defined(CONFIG_USER_ONLY)
+#include "user/guest-host.h"
+#endif /* CONFIG_USER_ONLY */
+
+static inline uint32_t
+cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+ return cpu_ldb_mmu(env, addr, oi, ra);
+}
+
+static inline int
+cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra)
+{
+ return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
+}
+
+static inline uint32_t
+cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
+ return cpu_ldw_mmu(env, addr, oi, ra);
+}
+
+static inline int
+cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
+}
+
+static inline uint32_t
+cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
+ return cpu_ldl_mmu(env, addr, oi, ra);
+}
+
+static inline uint64_t
+cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
+ return cpu_ldq_mmu(env, addr, oi, ra);
+}
+
+static inline uint32_t
+cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
+ return cpu_ldw_mmu(env, addr, oi, ra);
+}
+
+static inline int
+cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
+}
+
+static inline uint32_t
+cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
+ return cpu_ldl_mmu(env, addr, oi, ra);
+}
+
+static inline uint64_t
+cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
+ return cpu_ldq_mmu(env, addr, oi, ra);
+}
+
+static inline void
+cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+ cpu_stb_mmu(env, addr, val, oi, ra);
+}
+
+static inline void
+cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
+ cpu_stw_mmu(env, addr, val, oi, ra);
+}
+
+static inline void
+cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
+ cpu_stl_mmu(env, addr, val, oi, ra);
+}
+
+static inline void
+cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
+ cpu_stq_mmu(env, addr, val, oi, ra);
+}
+
+static inline void
+cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
+ cpu_stw_mmu(env, addr, val, oi, ra);
+}
+
+static inline void
+cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
+ cpu_stl_mmu(env, addr, val, oi, ra);
+}
+
+static inline void
+cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
+ int mmu_idx, uintptr_t ra)
+{
+ MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
+ cpu_stq_mmu(env, addr, val, oi, ra);
+}
+
+/*--------------------------*/
+
+static inline uint32_t
+cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ return cpu_ldub_mmuidx_ra(env, addr, mmu_index, ra);
+}
+
+static inline int
+cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ return (int8_t)cpu_ldub_data_ra(env, addr, ra);
+}
+
+static inline uint32_t
+cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ return cpu_lduw_be_mmuidx_ra(env, addr, mmu_index, ra);
+}
+
+static inline int
+cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ return (int16_t)cpu_lduw_be_data_ra(env, addr, ra);
+}
+
+static inline uint32_t
+cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ return cpu_ldl_be_mmuidx_ra(env, addr, mmu_index, ra);
+}
+
+static inline uint64_t
+cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ return cpu_ldq_be_mmuidx_ra(env, addr, mmu_index, ra);
+}
+
+static inline uint32_t
+cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ return cpu_lduw_le_mmuidx_ra(env, addr, mmu_index, ra);
+}
+
+static inline int
+cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ return (int16_t)cpu_lduw_le_data_ra(env, addr, ra);
+}
+
+static inline uint32_t
+cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ return cpu_ldl_le_mmuidx_ra(env, addr, mmu_index, ra);
+}
+
+static inline uint64_t
+cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ return cpu_ldq_le_mmuidx_ra(env, addr, mmu_index, ra);
+}
+
+static inline void
+cpu_stb_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ cpu_stb_mmuidx_ra(env, addr, val, mmu_index, ra);
+}
+
+static inline void
+cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ cpu_stw_be_mmuidx_ra(env, addr, val, mmu_index, ra);
+}
+
+static inline void
+cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ cpu_stl_be_mmuidx_ra(env, addr, val, mmu_index, ra);
+}
+
+static inline void
+cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr, uint64_t val, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ cpu_stq_be_mmuidx_ra(env, addr, val, mmu_index, ra);
+}
+
+static inline void
+cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ cpu_stw_le_mmuidx_ra(env, addr, val, mmu_index, ra);
+}
+
+static inline void
+cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ cpu_stl_le_mmuidx_ra(env, addr, val, mmu_index, ra);
+}
+
+static inline void
+cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr, uint64_t val, uintptr_t ra)
+{
+ int mmu_index = cpu_mmu_index(env_cpu(env), false);
+ cpu_stq_le_mmuidx_ra(env, addr, val, mmu_index, ra);
+}
+
+/*--------------------------*/
+
+static inline uint32_t
+cpu_ldub_data(CPUArchState *env, abi_ptr addr)
+{
+ return cpu_ldub_data_ra(env, addr, 0);
+}
+
+static inline int
+cpu_ldsb_data(CPUArchState *env, abi_ptr addr)
+{
+ return (int8_t)cpu_ldub_data(env, addr);
+}
+
+static inline uint32_t
+cpu_lduw_be_data(CPUArchState *env, abi_ptr addr)
+{
+ return cpu_lduw_be_data_ra(env, addr, 0);
+}
+
+static inline int
+cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr)
+{
+ return (int16_t)cpu_lduw_be_data(env, addr);
+}
+
+static inline uint32_t
+cpu_ldl_be_data(CPUArchState *env, abi_ptr addr)
+{
+ return cpu_ldl_be_data_ra(env, addr, 0);
+}
+
+static inline uint64_t
+cpu_ldq_be_data(CPUArchState *env, abi_ptr addr)
+{
+ return cpu_ldq_be_data_ra(env, addr, 0);
+}
+
+static inline uint32_t
+cpu_lduw_le_data(CPUArchState *env, abi_ptr addr)
+{
+ return cpu_lduw_le_data_ra(env, addr, 0);
+}
+
+static inline int
+cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr)
+{
+ return (int16_t)cpu_lduw_le_data(env, addr);
+}
+
+static inline uint32_t
+cpu_ldl_le_data(CPUArchState *env, abi_ptr addr)
+{
+ return cpu_ldl_le_data_ra(env, addr, 0);
+}
+
+static inline uint64_t
+cpu_ldq_le_data(CPUArchState *env, abi_ptr addr)
+{
+ return cpu_ldq_le_data_ra(env, addr, 0);
+}
+
+static inline void
+cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val)
+{
+ cpu_stb_data_ra(env, addr, val, 0);
+}
+
+static inline void
+cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
+{
+ cpu_stw_be_data_ra(env, addr, val, 0);
+}
+
+static inline void
+cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
+{
+ cpu_stl_be_data_ra(env, addr, val, 0);
+}
+
+static inline void
+cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val)
+{
+ cpu_stq_be_data_ra(env, addr, val, 0);
+}
+
+static inline void
+cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
+{
+ cpu_stw_le_data_ra(env, addr, val, 0);
+}
+
+static inline void
+cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
+{
+ cpu_stl_le_data_ra(env, addr, val, 0);
+}
+
+static inline void
+cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val)
+{
+ cpu_stq_le_data_ra(env, addr, val, 0);
+}
+
+#if TARGET_BIG_ENDIAN
+# define cpu_lduw_data cpu_lduw_be_data
+# define cpu_ldsw_data cpu_ldsw_be_data
+# define cpu_ldl_data cpu_ldl_be_data
+# define cpu_ldq_data cpu_ldq_be_data
+# define cpu_lduw_data_ra cpu_lduw_be_data_ra
+# define cpu_ldsw_data_ra cpu_ldsw_be_data_ra
+# define cpu_ldl_data_ra cpu_ldl_be_data_ra
+# define cpu_ldq_data_ra cpu_ldq_be_data_ra
+# define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra
+# define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra
+# define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra
+# define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra
+# define cpu_stw_data cpu_stw_be_data
+# define cpu_stl_data cpu_stl_be_data
+# define cpu_stq_data cpu_stq_be_data
+# define cpu_stw_data_ra cpu_stw_be_data_ra
+# define cpu_stl_data_ra cpu_stl_be_data_ra
+# define cpu_stq_data_ra cpu_stq_be_data_ra
+# define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra
+# define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra
+# define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra
+#else
+# define cpu_lduw_data cpu_lduw_le_data
+# define cpu_ldsw_data cpu_ldsw_le_data
+# define cpu_ldl_data cpu_ldl_le_data
+# define cpu_ldq_data cpu_ldq_le_data
+# define cpu_lduw_data_ra cpu_lduw_le_data_ra
+# define cpu_ldsw_data_ra cpu_ldsw_le_data_ra
+# define cpu_ldl_data_ra cpu_ldl_le_data_ra
+# define cpu_ldq_data_ra cpu_ldq_le_data_ra
+# define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra
+# define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra
+# define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra
+# define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra
+# define cpu_stw_data cpu_stw_le_data
+# define cpu_stl_data cpu_stl_le_data
+# define cpu_stq_data cpu_stq_le_data
+# define cpu_stw_data_ra cpu_stw_le_data_ra
+# define cpu_stl_data_ra cpu_stl_le_data_ra
+# define cpu_stq_data_ra cpu_stq_le_data_ra
+# define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra
+# define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra
+# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
+#endif
+
+static inline uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
+{
+ CPUState *cs = env_cpu(env);
+ MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true));
+ return cpu_ldb_code_mmu(env, addr, oi, 0);
+}
+
+static inline uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
+{
+ CPUState *cs = env_cpu(env);
+ MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true));
+ return cpu_ldw_code_mmu(env, addr, oi, 0);
+}
+
+static inline uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
+{
+ CPUState *cs = env_cpu(env);
+ MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true));
+ return cpu_ldl_code_mmu(env, addr, oi, 0);
+}
+
+static inline uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
+{
+ CPUState *cs = env_cpu(env);
+ MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true));
+ return cpu_ldq_code_mmu(env, addr, oi, 0);
+}
+
+#endif /* ACCEL_TCG_CPU_LDST_H */
diff --git a/include/accel/tcg/cpu-mmu-index.h b/include/accel/tcg/cpu-mmu-index.h
new file mode 100644
index 0000000..e681a90
--- /dev/null
+++ b/include/accel/tcg/cpu-mmu-index.h
@@ -0,0 +1,42 @@
+/*
+ * cpu_mmu_index()
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_CPU_MMU_INDEX_H
+#define ACCEL_TCG_CPU_MMU_INDEX_H
+
+#include "hw/core/cpu.h"
+#include "accel/tcg/cpu-ops.h"
+#include "tcg/debug-assert.h"
+#ifdef COMPILING_PER_TARGET
+# ifdef CONFIG_USER_ONLY
+# include "cpu.h"
+# endif
+#endif
+
+/**
+ * cpu_mmu_index:
+ * @env: The cpu environment
+ * @ifetch: True for code access, false for data access.
+ *
+ * Return the core mmu index for the current translation regime.
+ * This function is used by generic TCG code paths.
+ */
+static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+#ifdef COMPILING_PER_TARGET
+# ifdef CONFIG_USER_ONLY
+ return MMU_USER_IDX;
+# endif
+#endif
+
+ int ret = cs->cc->tcg_ops->mmu_index(cs, ifetch);
+ tcg_debug_assert(ret >= 0 && ret < NB_MMU_MODES);
+ return ret;
+}
+
+#endif /* ACCEL_TCG_CPU_MMU_INDEX_H */
diff --git a/include/accel/tcg/cpu-ops.h b/include/accel/tcg/cpu-ops.h
new file mode 100644
index 0000000..dd8ea30
--- /dev/null
+++ b/include/accel/tcg/cpu-ops.h
@@ -0,0 +1,333 @@
+/*
+ * TCG CPU-specific operations
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef TCG_CPU_OPS_H
+#define TCG_CPU_OPS_H
+
+#include "exec/breakpoint.h"
+#include "exec/hwaddr.h"
+#include "exec/memattrs.h"
+#include "exec/memop.h"
+#include "exec/mmu-access-type.h"
+#include "exec/vaddr.h"
+#include "accel/tcg/tb-cpu-state.h"
+#include "tcg/tcg-mo.h"
+
+struct TCGCPUOps {
+ /**
+ * mttcg_supported: multi-threaded TCG is supported
+ *
+ * Target (TCG frontend) supports:
+ * - atomic instructions
+ * - memory ordering primitives (barriers)
+ */
+ bool mttcg_supported;
+
+ /**
+ * @precise_smc: Stores which modify code within the current TB force
+ * the TB to exit; the next executed instruction will see
+ * the result of the store.
+ */
+ bool precise_smc;
+
+ /**
+ * @guest_default_memory_order: default barrier that is required
+ * for the guest memory ordering.
+ */
+ TCGBar guest_default_memory_order;
+
+ /**
+ * @initialize: Initialize TCG state
+ *
+ * Called when the first CPU is realized.
+ */
+ void (*initialize)(void);
+ /**
+ * @translate_code: Translate guest instructions to TCGOps
+ * @cpu: cpu context
+ * @tb: translation block
+ * @max_insns: max number of instructions to translate
+ * @pc: guest virtual program counter address
+ * @host_pc: host physical program counter address
+ *
+ * This function must be provided by the target, which should create
+ * the target-specific DisasContext, and then invoke translator_loop.
+ */
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
+ /**
+ * @get_tb_cpu_state: Extract CPU state for a TCG #TranslationBlock
+ *
+ * Fill in all data required to select or compile a TranslationBlock.
+ */
+ TCGTBCPUState (*get_tb_cpu_state)(CPUState *cs);
+ /**
+ * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
+ *
+ * This is called when we abandon execution of a TB before starting it,
+ * and must set all parts of the CPU state which the previous TB in the
+ * chain may not have updated.
+ * By default, when this is NULL, a call is made to @set_pc(tb->pc).
+ *
+ * If more state needs to be restored, the target must implement a
+ * function to restore all the state, and register it here.
+ */
+ void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb);
+ /**
+ * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn
+ *
+ * This is called when we unwind state in the middle of a TB,
+ * usually before raising an exception. Set all part of the CPU
+ * state which are tracked insn-by-insn in the target-specific
+ * arguments to start_insn, passed as @data.
+ */
+ void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb,
+ const uint64_t *data);
+
+ /** @cpu_exec_enter: Callback for cpu_exec preparation */
+ void (*cpu_exec_enter)(CPUState *cpu);
+ /** @cpu_exec_exit: Callback for cpu_exec cleanup */
+ void (*cpu_exec_exit)(CPUState *cpu);
+ /** @debug_excp_handler: Callback for handling debug exceptions */
+ void (*debug_excp_handler)(CPUState *cpu);
+
+ /** @mmu_index: Callback for choosing softmmu mmu index */
+ int (*mmu_index)(CPUState *cpu, bool ifetch);
+
+#ifdef CONFIG_USER_ONLY
+ /**
+ * @fake_user_interrupt: Callback for 'fake exception' handling.
+ *
+ * Simulate 'fake exception' which will be handled outside the
+ * cpu execution loop (hack for x86 user mode).
+ */
+ void (*fake_user_interrupt)(CPUState *cpu);
+
+ /**
+ * record_sigsegv:
+ * @cpu: cpu context
+ * @addr: faulting guest address
+ * @access_type: access was read/write/execute
+ * @maperr: true for invalid page, false for permission fault
+ * @ra: host pc for unwinding
+ *
+ * We are about to raise SIGSEGV with si_code set for @maperr,
+ * and si_addr set for @addr. Record anything further needed
+ * for the signal ucontext_t.
+ *
+ * If the emulated kernel does not provide anything to the signal
+ * handler with anything besides the user context registers, and
+ * the siginfo_t, then this hook need do nothing and may be omitted.
+ * Otherwise, record the data and return; the caller will raise
+ * the signal, unwind the cpu state, and return to the main loop.
+ *
+ * If it is simpler to re-use the sysemu tlb_fill code, @ra is provided
+ * so that a "normal" cpu exception can be raised. In this case,
+ * the signal must be raised by the architecture cpu_loop.
+ */
+ void (*record_sigsegv)(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+ /**
+ * record_sigbus:
+ * @cpu: cpu context
+ * @addr: misaligned guest address
+ * @access_type: access was read/write/execute
+ * @ra: host pc for unwinding
+ *
+ * We are about to raise SIGBUS with si_code BUS_ADRALN,
+ * and si_addr set for @addr. Record anything further needed
+ * for the signal ucontext_t.
+ *
+ * If the emulated kernel does not provide the signal handler with
+ * anything besides the user context registers, and the siginfo_t,
+ * then this hook need do nothing and may be omitted.
+ * Otherwise, record the data and return; the caller will raise
+ * the signal, unwind the cpu state, and return to the main loop.
+ *
+ * If it is simpler to re-use the sysemu do_unaligned_access code,
+ * @ra is provided so that a "normal" cpu exception can be raised.
+ * In this case, the signal must be raised by the architecture cpu_loop.
+ */
+ void (*record_sigbus)(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type, uintptr_t ra);
+
+ /**
+ * untagged_addr: Remove an ignored tag from an address
+ * @cpu: cpu context
+ * @addr: tagged guest address
+ */
+ vaddr (*untagged_addr)(CPUState *cs, vaddr addr);
+#else
+ /** @do_interrupt: Callback for interrupt handling. */
+ void (*do_interrupt)(CPUState *cpu);
+ /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
+ bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
+ /** @cpu_exec_reset: Callback for reset in cpu_exec. */
+ void (*cpu_exec_reset)(CPUState *cpu);
+ /**
+ * @cpu_exec_halt: Callback for handling halt in cpu_exec.
+ *
+ * The target CPU should do any special processing here that it needs
+ * to do when the CPU is in the halted state.
+ *
+ * Return true to indicate that the CPU should now leave halt, false
+ * if it should remain in the halted state. (This should generally
+ * be the same value that cpu_has_work() would return.)
+ *
+ * This method must be provided. If the target does not need to
+ * do anything special for halt, the same function used for its
+ * SysemuCPUOps::has_work method can be used here, as they have the
+ * same function signature.
+ */
+ bool (*cpu_exec_halt)(CPUState *cpu);
+ /**
+ * @tlb_fill_align: Handle a softmmu tlb miss
+ * @cpu: cpu context
+ * @out: output page properties
+ * @addr: virtual address
+ * @access_type: read, write or execute
+ * @mmu_idx: mmu context
+ * @memop: memory operation for the access
+ * @size: memory access size, or 0 for whole page
+ * @probe: test only, no fault
+ * @ra: host return address for exception unwind
+ *
+ * If the access is valid, fill in @out and return true.
+ * Otherwise if probe is true, return false.
+ * Otherwise raise an exception and do not return.
+ *
+ * The alignment check for the access is deferred to this hook,
+ * so that the target can determine the priority of any alignment
+ * fault with respect to other potential faults from paging.
+ * Zero may be passed for @memop to skip any alignment check
+ * for non-memory-access operations such as probing.
+ */
+ bool (*tlb_fill_align)(CPUState *cpu, CPUTLBEntryFull *out, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ MemOp memop, int size, bool probe, uintptr_t ra);
+ /**
+ * @tlb_fill: Handle a softmmu tlb miss
+ *
+ * If the access is valid, call tlb_set_page and return true;
+ * if the access is invalid and probe is true, return false;
+ * otherwise raise an exception and do not return.
+ */
+ bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+ /**
+ * @pointer_wrap:
+ *
+ * We have incremented @base to @result, resulting in a page change.
+ * For the current cpu state, adjust @result for possible overflow.
+ */
+ vaddr (*pointer_wrap)(CPUState *cpu, int mmu_idx, vaddr result, vaddr base);
+ /**
+ * @do_transaction_failed: Callback for handling failed memory transactions
+ * (ie bus faults or external aborts; not MMU faults)
+ */
+ void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
+ /**
+ * @do_unaligned_access: Callback for unaligned access handling
+ * The callback must exit via raising an exception.
+ */
+ G_NORETURN void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr);
+
+ /**
+ * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM
+ */
+ vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
+
+ /**
+ * @debug_check_watchpoint: return true if the architectural
+ * watchpoint whose address has matched should really fire, used by ARM
+ * and RISC-V
+ */
+ bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
+
+ /**
+ * @debug_check_breakpoint: return true if the architectural
+ * breakpoint whose PC has matched should really fire.
+ */
+ bool (*debug_check_breakpoint)(CPUState *cpu);
+
+ /**
+ * @io_recompile_replay_branch: Callback for cpu_io_recompile.
+ *
+ * The cpu has been stopped, and cpu_restore_state_from_tb has been
+ * called. If the faulting instruction is in a delay slot, and the
+ * target architecture requires re-execution of the branch, then
+ * adjust the cpu state as required and return true.
+ */
+ bool (*io_recompile_replay_branch)(CPUState *cpu,
+ const TranslationBlock *tb);
+ /**
+ * @need_replay_interrupt: Return %true if @interrupt_request
+ * needs to be recorded for replay purposes.
+ */
+ bool (*need_replay_interrupt)(int interrupt_request);
+#endif /* !CONFIG_USER_ONLY */
+};
+
+#if defined(CONFIG_USER_ONLY)
+
+static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
+ MemTxAttrs atr, int fl, uintptr_t ra)
+{
+}
+
+static inline int cpu_watchpoint_address_matches(CPUState *cpu,
+ vaddr addr, vaddr len)
+{
+ return 0;
+}
+
+#else
+
+/**
+ * cpu_check_watchpoint:
+ * @cpu: cpu context
+ * @addr: guest virtual address
+ * @len: access length
+ * @attrs: memory access attributes
+ * @flags: watchpoint access type
+ * @ra: unwind return address
+ *
+ * Check for a watchpoint hit in [addr, addr+len) of the type
+ * specified by @flags. Exit via exception with a hit.
+ */
+void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
+ MemTxAttrs attrs, int flags, uintptr_t ra);
+
+/**
+ * cpu_watchpoint_address_matches:
+ * @cpu: cpu context
+ * @addr: guest virtual address
+ * @len: access length
+ *
+ * Return the watchpoint flags that apply to [addr, addr+len).
+ * If no watchpoint is registered for the range, the result is 0.
+ */
+int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
+
+/*
+ * Common pointer_wrap implementations.
+ */
+vaddr cpu_pointer_wrap_notreached(CPUState *, int, vaddr, vaddr);
+vaddr cpu_pointer_wrap_uint32(CPUState *, int, vaddr, vaddr);
+
+#endif
+
+#endif /* TCG_CPU_OPS_H */
diff --git a/include/accel/tcg/getpc.h b/include/accel/tcg/getpc.h
new file mode 100644
index 0000000..0fc08ad
--- /dev/null
+++ b/include/accel/tcg/getpc.h
@@ -0,0 +1,20 @@
+/*
+ * Get host pc for helper unwinding.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_GETPC_H
+#define ACCEL_TCG_GETPC_H
+
+/* GETPC is the true target of the return instruction that we'll execute. */
+#ifdef CONFIG_TCG_INTERPRETER
+extern __thread uintptr_t tci_tb_ptr;
+# define GETPC() tci_tb_ptr
+#else
+# define GETPC() \
+ ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
+#endif
+
+#endif /* ACCEL_TCG_GETPC_H */
diff --git a/include/accel/tcg/helper-retaddr.h b/include/accel/tcg/helper-retaddr.h
new file mode 100644
index 0000000..037fda2
--- /dev/null
+++ b/include/accel/tcg/helper-retaddr.h
@@ -0,0 +1,43 @@
+/*
+ * Get user helper pc for memory unwinding.
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_HELPER_RETADDR_H
+#define ACCEL_TCG_HELPER_RETADDR_H
+
+/*
+ * For user-only, helpers that use guest to host address translation
+ * must protect the actual host memory access by recording 'retaddr'
+ * for the signal handler. This is required for a race condition in
+ * which another thread unmaps the page between a probe and the
+ * actual access.
+ */
+#ifdef CONFIG_USER_ONLY
+extern __thread uintptr_t helper_retaddr;
+
+static inline void set_helper_retaddr(uintptr_t ra)
+{
+ helper_retaddr = ra;
+ /*
+ * Ensure that this write is visible to the SIGSEGV handler that
+ * may be invoked due to a subsequent invalid memory operation.
+ */
+ signal_barrier();
+}
+
+static inline void clear_helper_retaddr(void)
+{
+ /*
+ * Ensure that previous memory operations have succeeded before
+ * removing the data visible to the signal handler.
+ */
+ signal_barrier();
+ helper_retaddr = 0;
+}
+#else
+#define set_helper_retaddr(ra) do { } while (0)
+#define clear_helper_retaddr() do { } while (0)
+#endif
+
+#endif /* ACCEL_TCG_HELPER_RETADDR_H */
diff --git a/include/accel/tcg/iommu.h b/include/accel/tcg/iommu.h
new file mode 100644
index 0000000..90cfd6c
--- /dev/null
+++ b/include/accel/tcg/iommu.h
@@ -0,0 +1,41 @@
+/*
+ * TCG IOMMU translations.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+#ifndef ACCEL_TCG_IOMMU_H
+#define ACCEL_TCG_IOMMU_H
+
+#ifdef CONFIG_USER_ONLY
+#error Cannot include accel/tcg/iommu.h from user emulation
+#endif
+
+#include "exec/hwaddr.h"
+#include "exec/memattrs.h"
+
+/**
+ * iotlb_to_section:
+ * @cpu: CPU performing the access
+ * @index: TCG CPU IOTLB entry
+ *
+ * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
+ * it refers to. @index will have been initially created and returned
+ * by memory_region_section_get_iotlb().
+ */
+MemoryRegionSection *iotlb_to_section(CPUState *cpu,
+ hwaddr index, MemTxAttrs attrs);
+
+MemoryRegionSection *address_space_translate_for_iotlb(CPUState *cpu,
+ int asidx,
+ hwaddr addr,
+ hwaddr *xlat,
+ hwaddr *plen,
+ MemTxAttrs attrs,
+ int *prot);
+
+hwaddr memory_region_section_get_iotlb(CPUState *cpu,
+ MemoryRegionSection *section);
+
+#endif
+
diff --git a/include/accel/tcg/probe.h b/include/accel/tcg/probe.h
new file mode 100644
index 0000000..dd9ecbb
--- /dev/null
+++ b/include/accel/tcg/probe.h
@@ -0,0 +1,122 @@
+/*
+ * Probe guest virtual addresses for access permissions.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+#ifndef ACCEL_TCG_PROBE_H
+#define ACCEL_TCG_PROBE_H
+
+#include "exec/mmu-access-type.h"
+#include "exec/vaddr.h"
+
+/**
+ * probe_access:
+ * @env: CPUArchState
+ * @addr: guest virtual address to look up
+ * @size: size of the access
+ * @access_type: read, write or execute permission
+ * @mmu_idx: MMU index to use for lookup
+ * @retaddr: return address for unwinding
+ *
+ * Look up the guest virtual address @addr. Raise an exception if the
+ * page does not satisfy @access_type. Raise an exception if the
+ * access (@addr, @size) hits a watchpoint. For writes, mark a clean
+ * page as dirty.
+ *
+ * Finally, return the host address for a page that is backed by RAM,
+ * or NULL if the page requires I/O.
+ */
+void *probe_access(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
+
+static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
+ int mmu_idx, uintptr_t retaddr)
+{
+ return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
+}
+
+static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
+ int mmu_idx, uintptr_t retaddr)
+{
+ return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
+}
+
+/**
+ * probe_access_flags:
+ * @env: CPUArchState
+ * @addr: guest virtual address to look up
+ * @size: size of the access
+ * @access_type: read, write or execute permission
+ * @mmu_idx: MMU index to use for lookup
+ * @nonfault: suppress the fault
+ * @phost: return value for host address
+ * @retaddr: return address for unwinding
+ *
+ * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
+ * the page, and storing the host address for RAM in @phost.
+ *
+ * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
+ * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
+ * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
+ * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
+ */
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost, uintptr_t retaddr);
+
+#ifndef CONFIG_USER_ONLY
+
+/**
+ * probe_access_full:
+ * Like probe_access_flags, except also return into @pfull.
+ *
+ * The CPUTLBEntryFull structure returned via @pfull is transient
+ * and must be consumed or copied immediately, before any further
+ * access or changes to TLB @mmu_idx.
+ *
+ * This function will not fault if @nonfault is set, but will
+ * return TLB_INVALID_MASK if the page is not mapped, or is not
+ * accessible with @access_type.
+ *
+ * This function will return TLB_MMIO in order to force the access
+ * to be handled out-of-line if plugins wish to instrument the access.
+ */
+int probe_access_full(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost,
+ CPUTLBEntryFull **pfull, uintptr_t retaddr);
+
+/**
+ * probe_access_full_mmu:
+ * Like probe_access_full, except:
+ *
+ * This function is intended to be used for page table accesses by
+ * the target mmu itself. Since such page walking happens while
+ * handling another potential mmu fault, this function never raises
+ * exceptions (akin to @nonfault true for probe_access_full).
+ * Likewise this function does not trigger plugin instrumentation.
+ */
+int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ void **phost, CPUTLBEntryFull **pfull);
+
+#endif /* !CONFIG_USER_ONLY */
+
+/**
+ * tlb_vaddr_to_host:
+ * @env: CPUArchState
+ * @addr: guest virtual address to look up
+ * @access_type: 0 for read, 1 for write, 2 for execute
+ * @mmu_idx: MMU index to use for lookup
+ *
+ * Look up the specified guest virtual index in the TCG softmmu TLB.
+ * If we can translate a host virtual address suitable for direct RAM
+ * access, without causing a guest exception, then return it.
+ * Otherwise (TLB entry is for an I/O access, guest software
+ * TLB fill required, etc) return NULL.
+ */
+void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr,
+ MMUAccessType access_type, int mmu_idx);
+
+#endif
diff --git a/include/accel/tcg/tb-cpu-state.h b/include/accel/tcg/tb-cpu-state.h
new file mode 100644
index 0000000..8f91290
--- /dev/null
+++ b/include/accel/tcg/tb-cpu-state.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Definition of TCGTBCPUState.
+ */
+
+#ifndef EXEC_TB_CPU_STATE_H
+#define EXEC_TB_CPU_STATE_H
+
+#include "exec/vaddr.h"
+
+typedef struct TCGTBCPUState {
+ vaddr pc;
+ uint32_t flags;
+ uint32_t cflags;
+ uint64_t cs_base;
+} TCGTBCPUState;
+
+#endif
diff --git a/include/block/aio.h b/include/block/aio.h
index 4ee8193..99ff484 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -20,6 +20,7 @@
#include "qemu/coroutine-core.h"
#include "qemu/queue.h"
#include "qemu/event_notifier.h"
+#include "qemu/lockcnt.h"
#include "qemu/thread.h"
#include "qemu/timer.h"
#include "block/graph-lock.h"
@@ -53,7 +54,7 @@ typedef void QEMUBHFunc(void *opaque);
typedef bool AioPollFn(void *opaque);
typedef void IOHandler(void *opaque);
-struct ThreadPool;
+struct ThreadPoolAio;
struct LinuxAioState;
typedef struct LuringState LuringState;
@@ -122,6 +123,10 @@ struct BHListSlice {
typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
+typedef struct AioPolledEvent {
+ int64_t ns; /* current polling time in nanoseconds */
+} AioPolledEvent;
+
struct AioContext {
GSource source;
@@ -206,7 +211,7 @@ struct AioContext {
/* Thread pool for performing work and receiving completion callbacks.
* Has its own locking.
*/
- struct ThreadPool *thread_pool;
+ struct ThreadPoolAio *thread_pool;
#ifdef CONFIG_LINUX_AIO
struct LinuxAioState *linux_aio;
@@ -228,7 +233,6 @@ struct AioContext {
int poll_disable_cnt;
/* Polling mode parameters */
- int64_t poll_ns; /* current polling time in nanoseconds */
int64_t poll_max_ns; /* maximum polling time in nanoseconds */
int64_t poll_grow; /* polling time growth factor */
int64_t poll_shrink; /* polling time shrink factor */
@@ -499,8 +503,8 @@ void aio_set_event_notifier_poll(AioContext *ctx,
*/
GSource *aio_get_g_source(AioContext *ctx);
-/* Return the ThreadPool bound to this AioContext */
-struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
+/* Return the ThreadPoolAio bound to this AioContext */
+struct ThreadPoolAio *aio_get_thread_pool(AioContext *ctx);
/* Setup the LinuxAioState bound to this AioContext */
struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
diff --git a/include/block/aio_task.h b/include/block/aio_task.h
index 18a9c41..c81d637 100644
--- a/include/block/aio_task.h
+++ b/include/block/aio_task.h
@@ -40,8 +40,6 @@ void aio_task_pool_free(AioTaskPool *);
/* error code of failed task or 0 if all is OK */
int aio_task_pool_status(AioTaskPool *pool);
-bool aio_task_pool_empty(AioTaskPool *pool);
-
/* User provides filled @task, however task->pool will be set automatically */
void coroutine_fn aio_task_pool_start_task(AioTaskPool *pool, AioTask *task);
diff --git a/include/block/block-common.h b/include/block/block-common.h
index 338fe5f..c8c626d 100644
--- a/include/block/block-common.h
+++ b/include/block/block-common.h
@@ -257,6 +257,7 @@ typedef enum {
#define BDRV_OPT_AUTO_READ_ONLY "auto-read-only"
#define BDRV_OPT_DISCARD "discard"
#define BDRV_OPT_FORCE_SHARE "force-share"
+#define BDRV_OPT_ACTIVE "active"
#define BDRV_SECTOR_BITS 9
@@ -332,6 +333,17 @@ typedef enum {
#define BDRV_BLOCK_RECURSE 0x40
#define BDRV_BLOCK_COMPRESSED 0x80
+/*
+ * Block status hints: the bitwise-or of these flags emphasize what
+ * the caller hopes to learn, and some drivers may be able to give
+ * faster answers by doing less work when the hint permits.
+ */
+#define BDRV_WANT_ZERO BDRV_BLOCK_ZERO
+#define BDRV_WANT_OFFSET_VALID BDRV_BLOCK_OFFSET_VALID
+#define BDRV_WANT_ALLOCATED BDRV_BLOCK_ALLOCATED
+#define BDRV_WANT_PRECISE (BDRV_WANT_ZERO | BDRV_WANT_OFFSET_VALID | \
+ BDRV_WANT_OFFSET_VALID)
+
typedef QTAILQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue;
typedef struct BDRVReopenState {
@@ -355,7 +367,6 @@ typedef enum BlockOpType {
BLOCK_OP_TYPE_CHANGE,
BLOCK_OP_TYPE_COMMIT_SOURCE,
BLOCK_OP_TYPE_COMMIT_TARGET,
- BLOCK_OP_TYPE_DATAPLANE,
BLOCK_OP_TYPE_DRIVE_DEL,
BLOCK_OP_TYPE_EJECT,
BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
diff --git a/include/block/block-copy.h b/include/block/block-copy.h
index bdc703b..dd5cc82 100644
--- a/include/block/block-copy.h
+++ b/include/block/block-copy.h
@@ -28,6 +28,7 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
BlockDriverState *copy_bitmap_bs,
const BdrvDirtyBitmap *bitmap,
bool discard_source,
+ uint64_t min_cluster_size,
Error **errp);
/* Function should be called prior any actual copy request */
diff --git a/include/block/block-global-state.h b/include/block/block-global-state.h
index bd7cecd..84a2a4e 100644
--- a/include/block/block-global-state.h
+++ b/include/block/block-global-state.h
@@ -175,21 +175,27 @@ BlockDriverState * GRAPH_RDLOCK
check_to_replace_node(BlockDriverState *parent_bs, const char *node_name,
Error **errp);
+
+bool GRAPH_RDLOCK bdrv_is_inactive(BlockDriverState *bs);
+
int no_coroutine_fn GRAPH_RDLOCK
bdrv_activate(BlockDriverState *bs, Error **errp);
int coroutine_fn no_co_wrapper_bdrv_rdlock
bdrv_co_activate(BlockDriverState *bs, Error **errp);
+int no_coroutine_fn
+bdrv_inactivate(BlockDriverState *bs, Error **errp);
+
void bdrv_activate_all(Error **errp);
int bdrv_inactivate_all(void);
int bdrv_flush_all(void);
void bdrv_close_all(void);
-void bdrv_drain_all_begin(void);
+void GRAPH_UNLOCKED bdrv_drain_all_begin(void);
void bdrv_drain_all_begin_nopoll(void);
void bdrv_drain_all_end(void);
-void bdrv_drain_all(void);
+void GRAPH_UNLOCKED bdrv_drain_all(void);
void bdrv_aio_cancel(BlockAIOCB *acb);
@@ -268,11 +274,16 @@ int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag);
int bdrv_debug_resume(BlockDriverState *bs, const char *tag);
bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag);
-bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx,
- GHashTable *visited, Transaction *tran,
- Error **errp);
-int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx,
- BdrvChild *ignore_child, Error **errp);
+bool GRAPH_RDLOCK
+bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx,
+ GHashTable *visited, Transaction *tran,
+ Error **errp);
+int GRAPH_UNLOCKED
+bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx,
+ BdrvChild *ignore_child, Error **errp);
+int GRAPH_RDLOCK
+bdrv_try_change_aio_context_locked(BlockDriverState *bs, AioContext *ctx,
+ BdrvChild *ignore_child, Error **errp);
int GRAPH_RDLOCK bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz);
int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo);
diff --git a/include/block/block-io.h b/include/block/block-io.h
index b49e053..4cf83fb 100644
--- a/include/block/block-io.h
+++ b/include/block/block-io.h
@@ -161,6 +161,8 @@ bdrv_is_allocated_above(BlockDriverState *bs, BlockDriverState *base,
int coroutine_fn GRAPH_RDLOCK
bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, int64_t bytes);
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_is_all_zeroes(BlockDriverState *bs);
int GRAPH_RDLOCK
bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg,
@@ -429,7 +431,7 @@ bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
*
* This function can be recursive.
*/
-void bdrv_drained_begin(BlockDriverState *bs);
+void GRAPH_UNLOCKED bdrv_drained_begin(BlockDriverState *bs);
/**
* bdrv_do_drained_begin_quiesce:
diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h
index ebb4e56..925a3e7 100644
--- a/include/block/block_int-common.h
+++ b/include/block/block_int-common.h
@@ -396,9 +396,23 @@ struct BlockDriver {
int GRAPH_RDLOCK_PTR (*bdrv_probe_geometry)(
BlockDriverState *bs, HDGeometry *geo);
+ /**
+ * Hot add a BDS's child. Used in combination with bdrv_del_child, so the
+ * user can take a child offline when it is broken and take a new child
+ * online.
+ *
+ * All block nodes must be drained.
+ */
void GRAPH_WRLOCK_PTR (*bdrv_add_child)(
BlockDriverState *parent, BlockDriverState *child, Error **errp);
+ /**
+ * Hot remove a BDS's child. Used in combination with bdrv_add_child, so the
+ * user can take a child offline when it is broken and take a new child
+ * online.
+ *
+ * All block nodes must be drained.
+ */
void GRAPH_WRLOCK_PTR (*bdrv_del_child)(
BlockDriverState *parent, BdrvChild *child, Error **errp);
@@ -506,10 +520,6 @@ struct BlockDriver {
BlockAIOCB * GRAPH_RDLOCK_PTR (*bdrv_aio_flush)(
BlockDriverState *bs, BlockCompletionFunc *cb, void *opaque);
- BlockAIOCB * GRAPH_RDLOCK_PTR (*bdrv_aio_pdiscard)(
- BlockDriverState *bs, int64_t offset, int bytes,
- BlockCompletionFunc *cb, void *opaque);
-
int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_readv)(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
@@ -608,15 +618,16 @@ struct BlockDriver {
* according to the current layer, and should only need to set
* BDRV_BLOCK_DATA, BDRV_BLOCK_ZERO, BDRV_BLOCK_OFFSET_VALID,
* and/or BDRV_BLOCK_RAW; if the current layer defers to a backing
- * layer, the result should be 0 (and not BDRV_BLOCK_ZERO). See
- * block.h for the overall meaning of the bits. As a hint, the
- * flag want_zero is true if the caller cares more about precise
- * mappings (favor accurate _OFFSET_VALID/_ZERO) or false for
- * overall allocation (favor larger *pnum, perhaps by reporting
- * _DATA instead of _ZERO). The block layer guarantees input
- * clamped to bdrv_getlength() and aligned to request_alignment,
- * as well as non-NULL pnum, map, and file; in turn, the driver
- * must return an error or set pnum to an aligned non-zero value.
+ * layer, the result should be 0 (and not BDRV_BLOCK_ZERO). The
+ * caller will synthesize BDRV_BLOCK_ALLOCATED based on the
+ * non-zero results. See block.h for the overall meaning of the
+ * bits. As a hint, the flags in @mode may include a bitwise-or
+ * of BDRV_WANT_ALLOCATED, BDRV_WANT_OFFSET_VALID, or
+ * BDRV_WANT_ZERO based on what the caller is looking for in the
+ * results. The block layer guarantees input clamped to
+ * bdrv_getlength() and aligned to request_alignment, as well as
+ * non-NULL pnum, map, and file; in turn, the driver must return
+ * an error or set pnum to an aligned non-zero value.
*
* Note that @bytes is just a hint on how big of a region the
* caller wants to inspect. It is not a limit on *pnum.
@@ -628,8 +639,8 @@ struct BlockDriver {
* to clamping *pnum for return to its caller.
*/
int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_block_status)(
- BlockDriverState *bs,
- bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum,
+ BlockDriverState *bs, unsigned int mode,
+ int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file);
/*
@@ -653,8 +664,8 @@ struct BlockDriver {
QEMUIOVector *qiov, size_t qiov_offset);
int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_snapshot_block_status)(
- BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
- int64_t *pnum, int64_t *map, BlockDriverState **file);
+ BlockDriverState *bs, unsigned int mode, int64_t offset,
+ int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file);
int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pdiscard_snapshot)(
BlockDriverState *bs, int64_t offset, int64_t bytes);
@@ -986,9 +997,21 @@ struct BdrvChildClass {
bool backing_mask_protocol,
Error **errp);
- bool (*change_aio_ctx)(BdrvChild *child, AioContext *ctx,
- GHashTable *visited, Transaction *tran,
- Error **errp);
+ /*
+ * Notifies the parent that the child is trying to change its AioContext.
+ * The parent may in turn change the AioContext of other nodes in the same
+ * transaction. Returns true if the change is possible and the transaction
+ * can be continued. Returns false and sets @errp if not and the transaction
+ * must be aborted.
+ *
+ * @visited will accumulate all visited BdrvChild objects. The caller is
+ * responsible for freeing the list afterwards.
+ *
+ * Must be called with the affected block nodes drained.
+ */
+ bool GRAPH_RDLOCK_PTR (*change_aio_ctx)(BdrvChild *child, AioContext *ctx,
+ GHashTable *visited,
+ Transaction *tran, Error **errp);
/*
* I/O API functions. These functions are thread-safe.
diff --git a/include/block/block_int-global-state.h b/include/block/block_int-global-state.h
index eb2d92a..e7c8f1a 100644
--- a/include/block/block_int-global-state.h
+++ b/include/block/block_int-global-state.h
@@ -139,7 +139,7 @@ BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
* @buf_size: The amount of data that can be in flight at one time.
* @mode: Whether to collapse all images in the chain to the target.
* @backing_mode: How to establish the target's backing chain after completion.
- * @zero_target: Whether the target should be explicitly zero-initialized
+ * @target_is_zero: Whether the target already is zero-initialized.
* @on_source_error: The action to take upon error reading from the source.
* @on_target_error: The action to take upon error writing to the target.
* @unmap: Whether to unmap target where source sectors only contain zeroes.
@@ -159,7 +159,7 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
int creation_flags, int64_t speed,
uint32_t granularity, int64_t buf_size,
MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
- bool zero_target,
+ bool target_is_zero,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
bool unmap, const char *filter_node_name,
@@ -179,6 +179,7 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
* all ".has_*" fields are ignored.
* @on_source_error: The action to take upon error reading from the source.
* @on_target_error: The action to take upon error writing to the target.
+ * @on_cbw_error: The action to take upon error in copy-before-write operations.
* @creation_flags: Flags that control the behavior of the Job lifetime.
* See @BlockJobCreateFlags
* @cb: Completion function for the job.
@@ -198,6 +199,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BackupPerf *perf,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
+ OnCbwError on_cbw_error,
int creation_flags,
BlockCompletionFunc *cb, void *opaque,
JobTxn *txn, Error **errp);
diff --git a/include/block/block_int-io.h b/include/block/block_int-io.h
index 4a7cf2b..4f94eb3 100644
--- a/include/block/block_int-io.h
+++ b/include/block/block_int-io.h
@@ -38,8 +38,8 @@
int coroutine_fn GRAPH_RDLOCK bdrv_co_preadv_snapshot(BdrvChild *child,
int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset);
int coroutine_fn GRAPH_RDLOCK bdrv_co_snapshot_block_status(
- BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
- int64_t *pnum, int64_t *map, BlockDriverState **file);
+ BlockDriverState *bs, unsigned int mode, int64_t offset,
+ int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file);
int coroutine_fn GRAPH_RDLOCK bdrv_co_pdiscard_snapshot(BlockDriverState *bs,
int64_t offset, int64_t bytes);
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
index 7061ab7..990f3e1 100644
--- a/include/block/blockjob.h
+++ b/include/block/blockjob.h
@@ -137,6 +137,8 @@ BlockJob *block_job_get_locked(const char *id);
* Add @bs to the list of BlockDriverState that are involved in
* @job. This means that all operations will be blocked on @bs while
* @job exists.
+ *
+ * All block nodes must be drained.
*/
int GRAPH_WRLOCK
block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
diff --git a/include/block/export.h b/include/block/export.h
index f2fe0f8..4bd9531 100644
--- a/include/block/export.h
+++ b/include/block/export.h
@@ -29,6 +29,9 @@ typedef struct BlockExportDriver {
*/
size_t instance_size;
+ /* True if the export type supports running on an inactive node */
+ bool supports_inactive;
+
/* Creates and starts a new block export */
int (*create)(BlockExport *, BlockExportOptions *, Error **);
diff --git a/include/block/graph-lock.h b/include/block/graph-lock.h
index d7545e8..2c26c72 100644
--- a/include/block/graph-lock.h
+++ b/include/block/graph-lock.h
@@ -20,8 +20,6 @@
#ifndef GRAPH_LOCK_H
#define GRAPH_LOCK_H
-#include "qemu/clang-tsa.h"
-
/**
* Graph Lock API
* This API provides a rwlock used to protect block layer
@@ -209,31 +207,38 @@ typedef struct GraphLockable { } GraphLockable;
* unlocked. TSA_ASSERT_SHARED() makes sure that the following calls know that
* we hold the lock while unlocking is left unchecked.
*/
-static inline GraphLockable * TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA coroutine_fn
+static inline GraphLockable * TSA_ACQUIRE_SHARED(graph_lock) coroutine_fn
graph_lockable_auto_lock(GraphLockable *x)
{
bdrv_graph_co_rdlock();
return x;
}
-static inline void TSA_NO_TSA coroutine_fn
-graph_lockable_auto_unlock(GraphLockable *x)
+static inline void TSA_RELEASE_SHARED(graph_lock) coroutine_fn
+graph_lockable_auto_unlock(GraphLockable **x)
{
bdrv_graph_co_rdunlock();
}
-G_DEFINE_AUTOPTR_CLEANUP_FUNC(GraphLockable, graph_lockable_auto_unlock)
+#define GRAPH_AUTO_UNLOCK __attribute__((cleanup(graph_lockable_auto_unlock)))
+/*
+ * @var is only used to break the loop after the first iteration.
+ * @unlock_var can't be unlocked and then set to NULL because TSA wants the lock
+ * to be held at the start of every iteration of the loop.
+ */
#define WITH_GRAPH_RDLOCK_GUARD_(var) \
- for (g_autoptr(GraphLockable) var = graph_lockable_auto_lock(GML_OBJ_()); \
+ for (GraphLockable *unlock_var GRAPH_AUTO_UNLOCK = \
+ graph_lockable_auto_lock(GML_OBJ_()), \
+ *var = unlock_var; \
var; \
- graph_lockable_auto_unlock(var), var = NULL)
+ var = NULL)
#define WITH_GRAPH_RDLOCK_GUARD() \
WITH_GRAPH_RDLOCK_GUARD_(glue(graph_lockable_auto, __COUNTER__))
#define GRAPH_RDLOCK_GUARD(x) \
- g_autoptr(GraphLockable) \
+ GraphLockable * GRAPH_AUTO_UNLOCK \
glue(graph_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \
graph_lockable_auto_lock(GML_OBJ_())
diff --git a/include/block/nbd.h b/include/block/nbd.h
index 4e7bd63..92987c7 100644
--- a/include/block/nbd.h
+++ b/include/block/nbd.h
@@ -33,6 +33,19 @@ typedef struct NBDMetaContexts NBDMetaContexts;
extern const BlockExportDriver blk_exp_nbd;
+/*
+ * NBD_DEFAULT_HANDSHAKE_MAX_SECS: Number of seconds in which client must
+ * succeed at NBD_OPT_GO before being forcefully dropped as too slow.
+ */
+#define NBD_DEFAULT_HANDSHAKE_MAX_SECS 10
+
+/*
+ * NBD_DEFAULT_MAX_CONNECTIONS: Number of client sockets to allow at
+ * once; must be large enough to allow a MULTI_CONN-aware client like
+ * nbdcopy to create its typical number of 8-16 sockets.
+ */
+#define NBD_DEFAULT_MAX_CONNECTIONS 100
+
/* Handshake phase structs - this struct is passed on the wire */
typedef struct NBDOption {
@@ -403,18 +416,21 @@ AioContext *nbd_export_aio_context(NBDExport *exp);
NBDExport *nbd_export_find(const char *name);
void nbd_client_new(QIOChannelSocket *sioc,
+ uint32_t handshake_max_secs,
QCryptoTLSCreds *tlscreds,
const char *tlsauthz,
- void (*close_fn)(NBDClient *, bool));
+ void (*close_fn)(NBDClient *, bool),
+ void *owner);
+void *nbd_client_owner(NBDClient *client);
void nbd_client_get(NBDClient *client);
void nbd_client_put(NBDClient *client);
void nbd_server_is_qemu_nbd(int max_connections);
bool nbd_server_is_running(void);
int nbd_server_max_connections(void);
-void nbd_server_start(SocketAddress *addr, const char *tls_creds,
- const char *tls_authz, uint32_t max_connections,
- Error **errp);
+void nbd_server_start(SocketAddress *addr, uint32_t handshake_max_secs,
+ const char *tls_creds, const char *tls_authz,
+ uint32_t max_connections, Error **errp);
void nbd_server_start_options(NbdServerOptions *arg, Error **errp);
/* nbd_read
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 7c77d38..358e516 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -142,9 +142,9 @@ enum NvmeCapMask {
((cap) |= (uint64_t)((val) & CAP_CMBS_MASK) << CAP_CMBS_SHIFT)
enum NvmeCapCss {
- NVME_CAP_CSS_NVM = 1 << 0,
- NVME_CAP_CSS_CSI_SUPP = 1 << 6,
- NVME_CAP_CSS_ADMIN_ONLY = 1 << 7,
+ NVME_CAP_CSS_NCSS = 1 << 0,
+ NVME_CAP_CSS_IOCSS = 1 << 6,
+ NVME_CAP_CSS_NOIOCSS = 1 << 7,
};
enum NvmeCcShift {
@@ -177,7 +177,7 @@ enum NvmeCcMask {
enum NvmeCcCss {
NVME_CC_CSS_NVM = 0x0,
- NVME_CC_CSS_CSI = 0x6,
+ NVME_CC_CSS_ALL = 0x6,
NVME_CC_CSS_ADMIN_ONLY = 0x7,
};
@@ -799,6 +799,8 @@ typedef struct QEMU_PACKED NvmeDsmRange {
enum {
NVME_COPY_FORMAT_0 = 0x0,
NVME_COPY_FORMAT_1 = 0x1,
+ NVME_COPY_FORMAT_2 = 0x2,
+ NVME_COPY_FORMAT_3 = 0x3,
};
typedef struct QEMU_PACKED NvmeCopyCmd {
@@ -820,25 +822,30 @@ typedef struct QEMU_PACKED NvmeCopyCmd {
uint16_t appmask;
} NvmeCopyCmd;
-typedef struct QEMU_PACKED NvmeCopySourceRangeFormat0 {
- uint8_t rsvd0[8];
+typedef struct QEMU_PACKED NvmeCopySourceRangeFormat0_2 {
+ uint32_t sparams;
+ uint8_t rsvd4[4];
uint64_t slba;
uint16_t nlb;
- uint8_t rsvd18[6];
+ uint8_t rsvd18[4];
+ uint16_t sopt;
uint32_t reftag;
uint16_t apptag;
uint16_t appmask;
-} NvmeCopySourceRangeFormat0;
+} NvmeCopySourceRangeFormat0_2;
-typedef struct QEMU_PACKED NvmeCopySourceRangeFormat1 {
- uint8_t rsvd0[8];
+typedef struct QEMU_PACKED NvmeCopySourceRangeFormat1_3 {
+ uint32_t sparams;
+ uint8_t rsvd4[4];
uint64_t slba;
uint16_t nlb;
- uint8_t rsvd18[8];
+ uint8_t rsvd18[4];
+ uint16_t sopt;
+ uint8_t rsvd24[2];
uint8_t sr[10];
uint16_t apptag;
uint16_t appmask;
-} NvmeCopySourceRangeFormat1;
+} NvmeCopySourceRangeFormat1_3;
enum NvmeAsyncEventRequest {
NVME_AER_TYPE_ERROR = 0,
@@ -899,8 +906,7 @@ enum NvmeStatusCodes {
NVME_SGL_DESCR_TYPE_INVALID = 0x0011,
NVME_INVALID_USE_OF_CMB = 0x0012,
NVME_INVALID_PRP_OFFSET = 0x0013,
- NVME_CMD_SET_CMB_REJECTED = 0x002b,
- NVME_INVALID_CMD_SET = 0x002c,
+ NVME_COMMAND_INTERRUPTED = 0x0021,
NVME_FDP_DISABLED = 0x0029,
NVME_INVALID_PHID_LIST = 0x002a,
NVME_LBA_RANGE = 0x0080,
@@ -933,10 +939,16 @@ enum NvmeStatusCodes {
NVME_INVALID_SEC_CTRL_STATE = 0x0120,
NVME_INVALID_NUM_RESOURCES = 0x0121,
NVME_INVALID_RESOURCE_ID = 0x0122,
+ NVME_IOCS_NOT_SUPPORTED = 0x0129,
+ NVME_IOCS_NOT_ENABLED = 0x012a,
+ NVME_IOCS_COMBINATION_REJECTED = 0x012b,
+ NVME_INVALID_IOCS = 0x012c,
NVME_CONFLICTING_ATTRS = 0x0180,
NVME_INVALID_PROT_INFO = 0x0181,
NVME_WRITE_TO_RO = 0x0182,
NVME_CMD_SIZE_LIMIT = 0x0183,
+ NVME_CMD_INCOMP_NS_OR_FMT = 0x0185,
+ NVME_CMD_OVERLAP_IO_RANGE = 0x0187,
NVME_INVALID_ZONE_OP = 0x01b6,
NVME_NOZRWA = 0x01b7,
NVME_ZONE_BOUNDARY_ERROR = 0x01b8,
@@ -1006,6 +1018,40 @@ typedef struct QEMU_PACKED NvmeSmartLog {
uint8_t reserved2[320];
} NvmeSmartLog;
+typedef struct QEMU_PACKED NvmeSmartLogExtended {
+ uint64_t physical_media_units_written[2];
+ uint64_t physical_media_units_read[2];
+ uint64_t bad_user_blocks;
+ uint64_t bad_system_nand_blocks;
+ uint64_t xor_recovery_count;
+ uint64_t uncorrectable_read_error_count;
+ uint64_t soft_ecc_error_count;
+ uint64_t end2end_correction_counts;
+ uint8_t system_data_percent_used;
+ uint8_t refresh_counts[7];
+ uint64_t user_data_erase_counts;
+ uint16_t thermal_throttling_stat_and_count;
+ uint16_t dssd_spec_version[3];
+ uint64_t pcie_correctable_error_count;
+ uint32_t incomplete_shutdowns;
+ uint32_t rsvd116;
+ uint8_t percent_free_blocks;
+ uint8_t rsvd121[7];
+ uint16_t capacity_health;
+ uint8_t nvme_errata_ver;
+ uint8_t rsvd131[5];
+ uint64_t unaligned_io;
+ uint64_t security_ver_num;
+ uint64_t total_nuse;
+ uint64_t plp_start_count[2];
+ uint64_t endurance_estimate[2];
+ uint64_t pcie_retraining_count;
+ uint64_t power_state_change_count;
+ uint8_t rsvd208[286];
+ uint16_t log_page_version;
+ uint64_t log_page_guid[2];
+} NvmeSmartLogExtended;
+
#define NVME_SMART_WARN_MAX 6
enum NvmeSmartWarn {
NVME_SMART_SPARE = 1 << 0,
@@ -1043,6 +1089,12 @@ enum NvmeLogIdentifier {
NVME_LOG_FDP_RUH_USAGE = 0x21,
NVME_LOG_FDP_STATS = 0x22,
NVME_LOG_FDP_EVENTS = 0x23,
+ NVME_LOG_VENDOR_START = 0xc0,
+ NVME_LOG_VENDOR_END = 0xff,
+};
+
+enum NvmeOcpLogIdentifier {
+ NVME_OCP_EXTENDED_SMART_INFO = 0xc0,
};
typedef struct QEMU_PACKED NvmePSD {
@@ -1068,6 +1120,7 @@ enum NvmeIdCns {
NVME_ID_CNS_CS_NS = 0x05,
NVME_ID_CNS_CS_CTRL = 0x06,
NVME_ID_CNS_CS_NS_ACTIVE_LIST = 0x07,
+ NVME_ID_CNS_CS_IND_NS = 0x08,
NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
NVME_ID_CNS_NS_PRESENT = 0x11,
NVME_ID_CNS_NS_ATTACHED_CTRL_LIST = 0x12,
@@ -1078,6 +1131,7 @@ enum NvmeIdCns {
NVME_ID_CNS_CS_NS_PRESENT_LIST = 0x1a,
NVME_ID_CNS_CS_NS_PRESENT = 0x1b,
NVME_ID_CNS_IO_COMMAND_SET = 0x1c,
+ NVME_ID_CNS_CS_IND_NS_ALLOCATED = 0x1f,
};
typedef struct QEMU_PACKED NvmeIdCtrl {
@@ -1156,6 +1210,8 @@ typedef struct NvmeIdCtrlZoned {
uint8_t rsvd1[4095];
} NvmeIdCtrlZoned;
+#define NVME_ID_CTRL_NVM_DMRL_MAX 255
+
typedef struct NvmeIdCtrlNvm {
uint8_t vsl;
uint8_t wzsl;
@@ -1173,6 +1229,7 @@ enum NvmeIdCtrlOaes {
enum NvmeIdCtrlCtratt {
NVME_CTRATT_ENDGRPS = 1 << 4,
NVME_CTRATT_ELBAS = 1 << 15,
+ NVME_CTRATT_MEM = 1 << 16,
NVME_CTRATT_FDPS = 1 << 19,
};
@@ -1180,9 +1237,10 @@ enum NvmeIdCtrlOacs {
NVME_OACS_SECURITY = 1 << 0,
NVME_OACS_FORMAT = 1 << 1,
NVME_OACS_FW = 1 << 2,
- NVME_OACS_NS_MGMT = 1 << 3,
+ NVME_OACS_NMS = 1 << 3,
NVME_OACS_DIRECTIVES = 1 << 5,
- NVME_OACS_DBBUF = 1 << 8,
+ NVME_OACS_VMS = 1 << 7,
+ NVME_OACS_DBCS = 1 << 8,
};
enum NvmeIdCtrlOncs {
@@ -1195,11 +1253,15 @@ enum NvmeIdCtrlOncs {
NVME_ONCS_TIMESTAMP = 1 << 6,
NVME_ONCS_VERIFY = 1 << 7,
NVME_ONCS_COPY = 1 << 8,
+ NVME_ONCS_NVMCSA = 1 << 9,
+ NVME_ONCS_NVMAFC = 1 << 10,
};
enum NvmeIdCtrlOcfs {
NVME_OCFS_COPY_FORMAT_0 = 1 << NVME_COPY_FORMAT_0,
NVME_OCFS_COPY_FORMAT_1 = 1 << NVME_COPY_FORMAT_1,
+ NVME_OCFS_COPY_FORMAT_2 = 1 << NVME_COPY_FORMAT_2,
+ NVME_OCFS_COPY_FORMAT_3 = 1 << NVME_COPY_FORMAT_3,
};
enum NvmeIdctrlVwc {
@@ -1272,6 +1334,8 @@ enum NvmeNsAttachmentOperation {
#define NVME_ERR_REC_TLER(err_rec) (err_rec & 0xffff)
#define NVME_ERR_REC_DULBE(err_rec) (err_rec & 0x10000)
+#define NVME_ID_CTRL_CTRATT_MEM(ctratt) (ctratt & NVME_CTRATT_MEM)
+
enum NvmeFeatureIds {
NVME_ARBITRATION = 0x1,
NVME_POWER_MANAGEMENT = 0x2,
@@ -1333,7 +1397,9 @@ typedef struct NvmeHostBehaviorSupport {
uint8_t acre;
uint8_t etdas;
uint8_t lbafee;
- uint8_t rsvd3[509];
+ uint8_t rsvd3;
+ uint16_t cdfe;
+ uint8_t rsvd6[506];
} NvmeHostBehaviorSupport;
typedef struct QEMU_PACKED NvmeLBAF {
@@ -1398,9 +1464,28 @@ typedef struct QEMU_PACKED NvmeIdNsNvm {
uint8_t pic;
uint8_t rsvd9[3];
uint32_t elbaf[NVME_MAX_NLBAF];
- uint8_t rsvd268[3828];
+ uint32_t npdgl;
+ uint32_t nprg;
+ uint32_t npra;
+ uint32_t nors;
+ uint32_t npdal;
+ uint8_t rsvd288[3808];
} NvmeIdNsNvm;
+typedef struct QEMU_PACKED NvmeIdNsInd {
+ uint8_t nsfeat;
+ uint8_t nmic;
+ uint8_t rescap;
+ uint8_t fpi;
+ uint32_t anagrpid;
+ uint8_t nsattr;
+ uint8_t rsvd9;
+ uint16_t nvmsetid;
+ uint16_t endgrpid;
+ uint8_t nstat;
+ uint8_t rsvd15[4081];
+} NvmeIdNsInd;
+
typedef struct QEMU_PACKED NvmeIdNsDescr {
uint8_t nidt;
uint8_t nidl;
@@ -1421,8 +1506,10 @@ enum NvmeNsIdentifierType {
NVME_NIDT_CSI = 0x04,
};
-enum NvmeIdNsNmic {
- NVME_NMIC_NS_SHARED = 1 << 0,
+enum NvmeIdNsIndependent {
+ NVME_ID_NS_IND_NMIC_SHRNS = 1 << 0,
+ NVME_ID_NS_IND_NMIC_DISNS = 1 << 1,
+ NVME_ID_NS_IND_NSTAT_NRDY = 1 << 0,
};
enum NvmeCsi {
@@ -1500,6 +1587,16 @@ enum NvmeIdNsMc {
NVME_ID_NS_MC_SEPARATE = 1 << 1,
};
+enum NvmeIdNsNsfeat {
+ NVME_ID_NS_NSFEAT_THINP = 1 << 0,
+ NVME_ID_NS_NSFEAT_NSABPNS = 1 << 1,
+ NVME_ID_NS_NSFEAT_DAE = 1 << 2,
+ NVME_ID_NS_NSFEAT_UIDREUSE = 1 << 3,
+ NVME_ID_NS_NSFEAT_OPTPERF_ALL = 3 << 4,
+ NVME_ID_NS_NSFEAT_MAM = 1 << 6,
+ NVME_ID_NS_NSFEAT_OPTRPERF = 1 << 7,
+};
+
#define NVME_ID_NS_DPS_TYPE(dps) (dps & NVME_ID_NS_DPS_TYPE_MASK)
enum NvmePIFormat {
@@ -1833,8 +1930,8 @@ static inline void _nvme_check_size(void)
QEMU_BUILD_BUG_ON(sizeof(NvmeZonedResult) != 8);
QEMU_BUILD_BUG_ON(sizeof(NvmeCqe) != 16);
QEMU_BUILD_BUG_ON(sizeof(NvmeDsmRange) != 16);
- QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRangeFormat0) != 32);
- QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRangeFormat1) != 40);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRangeFormat0_2) != 32);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRangeFormat1_3) != 40);
QEMU_BUILD_BUG_ON(sizeof(NvmeCmd) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeDeleteQ) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeCreateCq) != 64);
@@ -1848,6 +1945,7 @@ static inline void _nvme_check_size(void)
QEMU_BUILD_BUG_ON(sizeof(NvmeErrorLog) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeFwSlotInfoLog) != 512);
QEMU_BUILD_BUG_ON(sizeof(NvmeSmartLog) != 512);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeSmartLogExtended) != 512);
QEMU_BUILD_BUG_ON(sizeof(NvmeEffectsLog) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrl) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrlZoned) != 4096);
@@ -1855,6 +1953,7 @@ static inline void _nvme_check_size(void)
QEMU_BUILD_BUG_ON(sizeof(NvmeLBAF) != 4);
QEMU_BUILD_BUG_ON(sizeof(NvmeLBAFE) != 16);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsInd) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsNvm) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsZoned) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeSglDescriptor) != 16);
diff --git a/include/block/qdict.h b/include/block/qdict.h
index b4c28d9..53c4df4 100644
--- a/include/block/qdict.h
+++ b/include/block/qdict.h
@@ -10,7 +10,7 @@
#ifndef BLOCK_QDICT_H
#define BLOCK_QDICT_H
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
QObject *qdict_crumple(const QDict *src, Error **errp);
void qdict_flatten(QDict *qdict);
diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h
index 6267068..6570244 100644
--- a/include/block/raw-aio.h
+++ b/include/block/raw-aio.h
@@ -17,6 +17,7 @@
#define QEMU_RAW_AIO_H
#include "block/aio.h"
+#include "block/block-common.h"
#include "qemu/iov.h"
/* AIO request types */
@@ -58,11 +59,18 @@ void laio_cleanup(LinuxAioState *s);
/* laio_co_submit: submit I/O requests in the thread's current AioContext. */
int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov,
- int type, uint64_t dev_max_batch);
+ int type, BdrvRequestFlags flags,
+ uint64_t dev_max_batch);
bool laio_has_fdsync(int);
+bool laio_has_fua(void);
void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context);
void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context);
+#else
+static inline bool laio_has_fua(void)
+{
+ return false;
+}
#endif
/* io_uring.c - Linux io_uring implementation */
#ifdef CONFIG_LINUX_IO_URING
@@ -71,9 +79,16 @@ void luring_cleanup(LuringState *s);
/* luring_co_submit: submit I/O requests in the thread's current AioContext. */
int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset,
- QEMUIOVector *qiov, int type);
+ QEMUIOVector *qiov, int type,
+ BdrvRequestFlags flags);
void luring_detach_aio_context(LuringState *s, AioContext *old_context);
void luring_attach_aio_context(LuringState *s, AioContext *new_context);
+bool luring_has_fua(void);
+#else
+static inline bool luring_has_fua(void)
+{
+ return false;
+}
#endif
#ifdef _WIN32
diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h
index 948ff5f..dd48cf0 100644
--- a/include/block/thread-pool.h
+++ b/include/block/thread-pool.h
@@ -24,20 +24,70 @@
typedef int ThreadPoolFunc(void *opaque);
-typedef struct ThreadPool ThreadPool;
+typedef struct ThreadPoolAio ThreadPoolAio;
-ThreadPool *thread_pool_new(struct AioContext *ctx);
-void thread_pool_free(ThreadPool *pool);
+ThreadPoolAio *thread_pool_new_aio(struct AioContext *ctx);
+void thread_pool_free_aio(ThreadPoolAio *pool);
/*
- * thread_pool_submit* API: submit I/O requests in the thread's
+ * thread_pool_submit_{aio,co} API: submit I/O requests in the thread's
* current AioContext.
*/
BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
BlockCompletionFunc *cb, void *opaque);
int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg);
-void thread_pool_submit(ThreadPoolFunc *func, void *arg);
+void thread_pool_update_params(ThreadPoolAio *pool, struct AioContext *ctx);
+
+/* ------------------------------------------- */
+/* Generic thread pool types and methods below */
+typedef struct ThreadPool ThreadPool;
+
+/* Create a new thread pool. Never returns NULL. */
+ThreadPool *thread_pool_new(void);
+
+/*
+ * Free the thread pool.
+ * Waits for all the previously submitted work to complete before performing
+ * the actual freeing operation.
+ */
+void thread_pool_free(ThreadPool *pool);
+
+/*
+ * Submit a new work (task) for the pool.
+ *
+ * @opaque_destroy is an optional GDestroyNotify for the @opaque argument
+ * to the work function at @func.
+ */
+void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func,
+ void *opaque, GDestroyNotify opaque_destroy);
+
+/*
+ * Submit a new work (task) for the pool, making sure it starts getting
+ * processed immediately, launching a new thread for it if necessary.
+ *
+ * @opaque_destroy is an optional GDestroyNotify for the @opaque argument
+ * to the work function at @func.
+ */
+void thread_pool_submit_immediate(ThreadPool *pool, ThreadPoolFunc *func,
+ void *opaque, GDestroyNotify opaque_destroy);
+
+/*
+ * Wait for all previously submitted work to complete before returning.
+ *
+ * Can be used as a barrier between two sets of tasks executed on a thread
+ * pool without destroying it or in a performance sensitive path where the
+ * caller just wants to wait for all tasks to complete while deferring the
+ * pool free operation for later, less performance sensitive time.
+ */
+void thread_pool_wait(ThreadPool *pool);
-void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx);
+/* Set the maximum number of threads in the pool. */
+bool thread_pool_set_max_threads(ThreadPool *pool, int max_threads);
+
+/*
+ * Adjust the maximum number of threads in the pool to give each task its
+ * own thread (exactly one thread per task).
+ */
+bool thread_pool_adjust_max_threads_to_work(ThreadPool *pool);
#endif
diff --git a/include/block/ufs.h b/include/block/ufs.h
index 92da7a8..a3ee62b 100644
--- a/include/block/ufs.h
+++ b/include/block/ufs.h
@@ -461,7 +461,7 @@ typedef struct Attributes {
uint8_t psa_state;
uint32_t psa_data_size;
uint8_t ref_clk_gating_wait_time;
- uint8_t device_case_rough_temperaure;
+ uint8_t device_case_rough_temperature;
uint8_t device_too_high_temp_boundary;
uint8_t device_too_low_temp_boundary;
uint8_t throttling_status;
@@ -764,6 +764,12 @@ typedef struct QEMU_PACKED UtpTaskReqDesc {
#define UFS_WB_EXCEED_LIFETIME 0x0B
/*
+ * The range of valid value of Active ICC attritbute
+ * is from 0x00 to 0x0F.
+ */
+#define UFS_QUERY_ATTR_ACTIVE_ICC_MAXVALUE 0x0F
+
+/*
* In UFS Spec, the Extra Header Segment (EHS) starts from byte 32 in UPIU
* request/response packet
*/
@@ -1067,6 +1073,11 @@ enum health_desc_param {
UFS_HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4,
};
+enum {
+ UFS_DEV_HIGH_TEMP_NOTIF = BIT(4),
+ UFS_DEV_LOW_TEMP_NOTIF = BIT(5),
+};
+
/* WriteBooster buffer mode */
enum {
UFS_WB_BUF_MODE_LU_DEDICATED = 0x0,
@@ -1085,6 +1096,12 @@ enum ufs_lu_wp_type {
UFS_LU_PERM_WP = 0x02,
};
+/* Exception event mask values */
+enum {
+ MASK_EE_TOO_HIGH_TEMP = BIT(3),
+ MASK_EE_TOO_LOW_TEMP = BIT(4),
+};
+
/* UTP QUERY Transaction Specific Fields OpCode */
enum query_opcode {
UFS_UPIU_QUERY_OPCODE_NOP = 0x0,
diff --git a/include/chardev/char-fe.h b/include/chardev/char-fe.h
index ecef182..8ef05b3 100644
--- a/include/chardev/char-fe.h
+++ b/include/chardev/char-fe.h
@@ -20,7 +20,7 @@ struct CharBackend {
IOReadHandler *chr_read;
BackendChangeHandler *chr_be_change;
void *opaque;
- int tag;
+ unsigned int tag;
bool fe_is_open;
};
@@ -228,6 +228,7 @@ guint qemu_chr_fe_add_watch(CharBackend *be, GIOCondition cond,
* is thread-safe.
*
* Returns: the number of bytes consumed (0 if no associated Chardev)
+ * or -1 on error.
*/
int qemu_chr_fe_write(CharBackend *be, const uint8_t *buf, int len);
@@ -242,6 +243,7 @@ int qemu_chr_fe_write(CharBackend *be, const uint8_t *buf, int len);
* attempted to be written. This function is thread-safe.
*
* Returns: the number of bytes consumed (0 if no associated Chardev)
+ * or -1 on error.
*/
int qemu_chr_fe_write_all(CharBackend *be, const uint8_t *buf, int len);
@@ -253,6 +255,7 @@ int qemu_chr_fe_write_all(CharBackend *be, const uint8_t *buf, int len);
* Read data to a buffer from the back end.
*
* Returns: the number of bytes read (0 if no associated Chardev)
+ * or -1 on error.
*/
int qemu_chr_fe_read_all(CharBackend *be, uint8_t *buf, int len);
diff --git a/include/chardev/char-socket.h b/include/chardev/char-socket.h
index 0708ca6..d6d13ad 100644
--- a/include/chardev/char-socket.h
+++ b/include/chardev/char-socket.h
@@ -74,7 +74,7 @@ struct SocketChardev {
bool is_websock;
GSource *reconnect_timer;
- int64_t reconnect_time;
+ int64_t reconnect_time_ms;
bool connect_err_reported;
QIOTask *connect_task;
diff --git a/include/chardev/char.h b/include/chardev/char.h
index 01df55f..429852f 100644
--- a/include/chardev/char.h
+++ b/include/chardev/char.h
@@ -232,6 +232,7 @@ OBJECT_DECLARE_TYPE(Chardev, ChardevClass, CHARDEV)
#define TYPE_CHARDEV_NULL "chardev-null"
#define TYPE_CHARDEV_MUX "chardev-mux"
+#define TYPE_CHARDEV_HUB "chardev-hub"
#define TYPE_CHARDEV_RINGBUF "chardev-ringbuf"
#define TYPE_CHARDEV_PTY "chardev-pty"
#define TYPE_CHARDEV_CONSOLE "chardev-console"
diff --git a/include/crypto/afsplit.h b/include/crypto/afsplit.h
index 4894d64..06f28fe 100644
--- a/include/crypto/afsplit.h
+++ b/include/crypto/afsplit.h
@@ -46,7 +46,7 @@
*
* splitkey = g_new0(uint8_t, nkey * stripes);
*
- * if (qcrypto_afsplit_encode(QCRYPTO_HASH_ALG_SHA256,
+ * if (qcrypto_afsplit_encode(QCRYPTO_HASH_ALGO_SHA256,
* nkey, stripes,
* masterkey, splitkey, errp) < 0) {
* g_free(splitkey);
@@ -71,7 +71,7 @@
*
* masterkey = g_new0(uint8_t, nkey);
*
- * if (qcrypto_afsplit_decode(QCRYPTO_HASH_ALG_SHA256,
+ * if (qcrypto_afsplit_decode(QCRYPTO_HASH_ALGO_SHA256,
* nkey, stripes,
* splitkey, masterkey, errp) < 0) {
* g_free(splitkey);
@@ -102,7 +102,7 @@
*
* Returns: 0 on success, -1 on error;
*/
-int qcrypto_afsplit_encode(QCryptoHashAlgorithm hash,
+int qcrypto_afsplit_encode(QCryptoHashAlgo hash,
size_t blocklen,
uint32_t stripes,
const uint8_t *in,
@@ -124,7 +124,7 @@ int qcrypto_afsplit_encode(QCryptoHashAlgorithm hash,
*
* Returns: 0 on success, -1 on error;
*/
-int qcrypto_afsplit_decode(QCryptoHashAlgorithm hash,
+int qcrypto_afsplit_decode(QCryptoHashAlgo hash,
size_t blocklen,
uint32_t stripes,
const uint8_t *in,
diff --git a/include/crypto/block.h b/include/crypto/block.h
index 5b5d039..b013d27 100644
--- a/include/crypto/block.h
+++ b/include/crypto/block.h
@@ -287,7 +287,7 @@ QCryptoIVGen *qcrypto_block_get_ivgen(QCryptoBlock *block);
*
* Returns: the hash algorithm
*/
-QCryptoHashAlgorithm qcrypto_block_get_kdf_hash(QCryptoBlock *block);
+QCryptoHashAlgo qcrypto_block_get_kdf_hash(QCryptoBlock *block);
/**
* qcrypto_block_get_payload_offset:
diff --git a/include/crypto/cipher.h b/include/crypto/cipher.h
index 083e12a..9293931 100644
--- a/include/crypto/cipher.h
+++ b/include/crypto/cipher.h
@@ -26,7 +26,7 @@
typedef struct QCryptoCipher QCryptoCipher;
typedef struct QCryptoCipherDriver QCryptoCipherDriver;
-/* See also "QCryptoCipherAlgorithm" and "QCryptoCipherMode"
+/* See also "QCryptoCipherAlgo" and "QCryptoCipherMode"
* enums defined in qapi/crypto.json */
/**
@@ -50,12 +50,12 @@ typedef struct QCryptoCipherDriver QCryptoCipherDriver;
* size_t keylen = 16;
* uint8_t iv = ....;
*
- * if (!qcrypto_cipher_supports(QCRYPTO_CIPHER_ALG_AES_128)) {
+ * if (!qcrypto_cipher_supports(QCRYPTO_CIPHER_ALGO_AES_128)) {
* error_report(errp, "Feature <blah> requires AES cipher support");
* return -1;
* }
*
- * cipher = qcrypto_cipher_new(QCRYPTO_CIPHER_ALG_AES_128,
+ * cipher = qcrypto_cipher_new(QCRYPTO_CIPHER_ALGO_AES_128,
* QCRYPTO_CIPHER_MODE_CBC,
* key, keylen,
* errp);
@@ -78,7 +78,7 @@ typedef struct QCryptoCipherDriver QCryptoCipherDriver;
*/
struct QCryptoCipher {
- QCryptoCipherAlgorithm alg;
+ QCryptoCipherAlgo alg;
QCryptoCipherMode mode;
const QCryptoCipherDriver *driver;
};
@@ -93,7 +93,7 @@ struct QCryptoCipher {
*
* Returns: true if the algorithm is supported, false otherwise
*/
-bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg,
+bool qcrypto_cipher_supports(QCryptoCipherAlgo alg,
QCryptoCipherMode mode);
/**
@@ -106,7 +106,7 @@ bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg,
*
* Returns: the block size in bytes
*/
-size_t qcrypto_cipher_get_block_len(QCryptoCipherAlgorithm alg);
+size_t qcrypto_cipher_get_block_len(QCryptoCipherAlgo alg);
/**
@@ -117,7 +117,7 @@ size_t qcrypto_cipher_get_block_len(QCryptoCipherAlgorithm alg);
*
* Returns: the key size in bytes
*/
-size_t qcrypto_cipher_get_key_len(QCryptoCipherAlgorithm alg);
+size_t qcrypto_cipher_get_key_len(QCryptoCipherAlgo alg);
/**
@@ -130,7 +130,7 @@ size_t qcrypto_cipher_get_key_len(QCryptoCipherAlgorithm alg);
*
* Returns: the IV size in bytes, or 0 if no IV is permitted
*/
-size_t qcrypto_cipher_get_iv_len(QCryptoCipherAlgorithm alg,
+size_t qcrypto_cipher_get_iv_len(QCryptoCipherAlgo alg,
QCryptoCipherMode mode);
@@ -156,7 +156,7 @@ size_t qcrypto_cipher_get_iv_len(QCryptoCipherAlgorithm alg,
*
* Returns: a new cipher object, or NULL on error
*/
-QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgorithm alg,
+QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgo alg,
QCryptoCipherMode mode,
const uint8_t *key, size_t nkey,
Error **errp);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 54d87aa..1868d4a 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -1,6 +1,7 @@
/*
* QEMU Crypto hash algorithms
*
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
* Copyright (c) 2015 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
@@ -23,7 +24,23 @@
#include "qapi/qapi-types-crypto.h"
-/* See also "QCryptoHashAlgorithm" defined in qapi/crypto.json */
+#define QCRYPTO_HASH_DIGEST_LEN_MD5 16
+#define QCRYPTO_HASH_DIGEST_LEN_SHA1 20
+#define QCRYPTO_HASH_DIGEST_LEN_SHA224 28
+#define QCRYPTO_HASH_DIGEST_LEN_SHA256 32
+#define QCRYPTO_HASH_DIGEST_LEN_SHA384 48
+#define QCRYPTO_HASH_DIGEST_LEN_SHA512 64
+#define QCRYPTO_HASH_DIGEST_LEN_RIPEMD160 20
+#define QCRYPTO_HASH_DIGEST_LEN_SM3 32
+
+/* See also "QCryptoHashAlgo" defined in qapi/crypto.json */
+
+typedef struct QCryptoHash QCryptoHash;
+struct QCryptoHash {
+ QCryptoHashAlgo alg;
+ void *opaque;
+ void *driver;
+};
/**
* qcrypto_hash_supports:
@@ -34,7 +51,7 @@
*
* Returns: true if the algorithm is supported, false otherwise
*/
-gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg);
+gboolean qcrypto_hash_supports(QCryptoHashAlgo alg);
/**
@@ -45,7 +62,7 @@ gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg);
*
* Returns: the digest length in bytes
*/
-size_t qcrypto_hash_digest_len(QCryptoHashAlgorithm alg);
+size_t qcrypto_hash_digest_len(QCryptoHashAlgo alg);
/**
* qcrypto_hash_bytesv:
@@ -57,15 +74,22 @@ size_t qcrypto_hash_digest_len(QCryptoHashAlgorithm alg);
* @errp: pointer to a NULL-initialized error object
*
* Computes the hash across all the memory regions
- * present in @iov. The @result pointer will be
- * filled with raw bytes representing the computed
- * hash, which will have length @resultlen. The
- * memory pointer in @result must be released
- * with a call to g_free() when no longer required.
+ * present in @iov.
+ *
+ * If @result_len is set to a non-zero value by the caller, then
+ * @result must hold a pointer that is @result_len in size, and
+ * @result_len match the size of the hash output. The digest will
+ * be written into @result.
+ *
+ * If @result_len is set to zero, then this function will allocate
+ * a buffer to hold the hash output digest, storing a pointer to
+ * the buffer in @result, and setting @result_len to its size.
+ * The memory referenced in @result must be released with a call
+ * to g_free() when no longer required by the caller.
*
* Returns: 0 on success, -1 on error
*/
-int qcrypto_hash_bytesv(QCryptoHashAlgorithm alg,
+int qcrypto_hash_bytesv(QCryptoHashAlgo alg,
const struct iovec *iov,
size_t niov,
uint8_t **result,
@@ -82,15 +106,22 @@ int qcrypto_hash_bytesv(QCryptoHashAlgorithm alg,
* @errp: pointer to a NULL-initialized error object
*
* Computes the hash across all the memory region
- * @buf of length @len. The @result pointer will be
- * filled with raw bytes representing the computed
- * hash, which will have length @resultlen. The
- * memory pointer in @result must be released
- * with a call to g_free() when no longer required.
+ * @buf of length @len.
+ *
+ * If @result_len is set to a non-zero value by the caller, then
+ * @result must hold a pointer that is @result_len in size, and
+ * @result_len match the size of the hash output. The digest will
+ * be written into @result.
+ *
+ * If @result_len is set to zero, then this function will allocate
+ * a buffer to hold the hash output digest, storing a pointer to
+ * the buffer in @result, and setting @result_len to its size.
+ * The memory referenced in @result must be released with a call
+ * to g_free() when no longer required by the caller.
*
* Returns: 0 on success, -1 on error
*/
-int qcrypto_hash_bytes(QCryptoHashAlgorithm alg,
+int qcrypto_hash_bytes(QCryptoHashAlgo alg,
const char *buf,
size_t len,
uint8_t **result,
@@ -114,13 +145,133 @@ int qcrypto_hash_bytes(QCryptoHashAlgorithm alg,
*
* Returns: 0 on success, -1 on error
*/
-int qcrypto_hash_digestv(QCryptoHashAlgorithm alg,
+int qcrypto_hash_digestv(QCryptoHashAlgo alg,
const struct iovec *iov,
size_t niov,
char **digest,
Error **errp);
/**
+ * qcrypto_hash_updatev:
+ * @hash: hash object from qcrypto_hash_new
+ * @iov: the array of memory regions to hash
+ * @niov: the length of @iov
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Updates the given hash object with all the memory regions
+ * present in @iov.
+ *
+ * Returns: 0 on success, -1 on error
+ */
+int qcrypto_hash_updatev(QCryptoHash *hash,
+ const struct iovec *iov,
+ size_t niov,
+ Error **errp);
+/**
+ * qcrypto_hash_update:
+ * @hash: hash object from qcrypto_hash_new
+ * @buf: the memory region to hash
+ * @len: the length of @buf
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Updates the given hash object with the data from
+ * the given buffer.
+ *
+ * Returns: 0 on success, -1 on error
+ */
+int qcrypto_hash_update(QCryptoHash *hash,
+ const char *buf,
+ size_t len,
+ Error **errp);
+
+/**
+ * qcrypto_hash_finalize_digest:
+ * @hash: the hash object to finalize
+ * @digest: pointer to hold output hash
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Computes the hash from the given hash object. Hash object
+ * is expected to have its data updated from the qcrypto_hash_update function.
+ * The @digest pointer will be filled with the printable hex digest of the
+ * computed hash, which will be terminated by '\0'. The memory pointer
+ * in @digest must be released with a call to g_free() when
+ * no longer required.
+ *
+ * Returns: 0 on success, -1 on error
+ */
+int qcrypto_hash_finalize_digest(QCryptoHash *hash,
+ char **digest,
+ Error **errp);
+
+/**
+ * qcrypto_hash_finalize_base64:
+ * @hash_ctx: hash object to finalize
+ * @base64: pointer to store the hash result in
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Computes the hash from the given hash object. Hash object
+ * is expected to have it's data updated from the qcrypto_hash_update function.
+ * The @base64 pointer will be filled with the base64 encoding of the computed
+ * hash, which will be terminated by '\0'. The memory pointer in @base64
+ * must be released with a call to g_free() when no longer required.
+ *
+ * Returns: 0 on success, -1 on error
+ */
+int qcrypto_hash_finalize_base64(QCryptoHash *hash,
+ char **base64,
+ Error **errp);
+
+/**
+ * qcrypto_hash_finalize_bytes:
+ * @hash_ctx: hash object to finalize
+ * @result: pointer to store the hash result in
+ * @result_len: Pointer to store the length of the result in
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Computes the hash from the given hash object. Hash object
+ * is expected to have it's data updated from the qcrypto_hash_update function.
+ *
+ * If @result_len is set to a non-zero value by the caller, then
+ * @result must hold a pointer that is @result_len in size, and
+ * @result_len match the size of the hash output. The digest will
+ * be written into @result.
+ *
+ * If @result_len is set to zero, then this function will allocate
+ * a buffer to hold the hash output digest, storing a pointer to
+ * the buffer in @result, and setting @result_len to its size.
+ * The memory referenced in @result must be released with a call
+ * to g_free() when no longer required by the caller.
+ *
+ * Returns: 0 on success, -1 on error
+ */
+int qcrypto_hash_finalize_bytes(QCryptoHash *hash,
+ uint8_t **result,
+ size_t *result_len,
+ Error **errp);
+
+/**
+ * qcrypto_hash_new:
+ * @alg: the hash algorithm
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Creates a new hashing context for the chosen algorithm for
+ * usage with qcrypto_hash_update.
+ *
+ * Returns: New hash object with the given algorithm, or NULL on error.
+ */
+QCryptoHash *qcrypto_hash_new(QCryptoHashAlgo alg, Error **errp);
+
+/**
+ * qcrypto_hash_free:
+ * @hash: hash object to free
+ *
+ * Frees a hashing context for the chosen algorithm.
+ */
+void qcrypto_hash_free(QCryptoHash *hash);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoHash, qcrypto_hash_free)
+
+/**
* qcrypto_hash_digest:
* @alg: the hash algorithm
* @buf: the memory region to hash
@@ -137,7 +288,7 @@ int qcrypto_hash_digestv(QCryptoHashAlgorithm alg,
*
* Returns: 0 on success, -1 on error
*/
-int qcrypto_hash_digest(QCryptoHashAlgorithm alg,
+int qcrypto_hash_digest(QCryptoHashAlgo alg,
const char *buf,
size_t len,
char **digest,
@@ -160,7 +311,7 @@ int qcrypto_hash_digest(QCryptoHashAlgorithm alg,
*
* Returns: 0 on success, -1 on error
*/
-int qcrypto_hash_base64v(QCryptoHashAlgorithm alg,
+int qcrypto_hash_base64v(QCryptoHashAlgo alg,
const struct iovec *iov,
size_t niov,
char **base64,
@@ -183,7 +334,7 @@ int qcrypto_hash_base64v(QCryptoHashAlgorithm alg,
*
* Returns: 0 on success, -1 on error
*/
-int qcrypto_hash_base64(QCryptoHashAlgorithm alg,
+int qcrypto_hash_base64(QCryptoHashAlgo alg,
const char *buf,
size_t len,
char **base64,
diff --git a/include/crypto/hmac.h b/include/crypto/hmac.h
index ad4d778..da8a1e3 100644
--- a/include/crypto/hmac.h
+++ b/include/crypto/hmac.h
@@ -16,7 +16,7 @@
typedef struct QCryptoHmac QCryptoHmac;
struct QCryptoHmac {
- QCryptoHashAlgorithm alg;
+ QCryptoHashAlgo alg;
void *opaque;
void *driver;
};
@@ -31,7 +31,7 @@ struct QCryptoHmac {
* Returns:
* true if the algorithm is supported, false otherwise
*/
-bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg);
+bool qcrypto_hmac_supports(QCryptoHashAlgo alg);
/**
* qcrypto_hmac_new:
@@ -52,7 +52,7 @@ bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg);
* Returns:
* a new hmac object, or NULL on error
*/
-QCryptoHmac *qcrypto_hmac_new(QCryptoHashAlgorithm alg,
+QCryptoHmac *qcrypto_hmac_new(QCryptoHashAlgo alg,
const uint8_t *key, size_t nkey,
Error **errp);
@@ -77,11 +77,18 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoHmac, qcrypto_hmac_free)
* @errp: pointer to a NULL-initialized error object
*
* Computes the hmac across all the memory regions
- * present in @iov. The @result pointer will be
- * filled with raw bytes representing the computed
- * hmac, which will have length @resultlen. The
- * memory pointer in @result must be released
- * with a call to g_free() when no longer required.
+ * present in @iov.
+ *
+ * If @result_len is set to a non-zero value by the caller, then
+ * @result must hold a pointer that is @result_len in size, and
+ * @result_len match the size of the hash output. The digest will
+ * be written into @result.
+ *
+ * If @result_len is set to zero, then this function will allocate
+ * a buffer to hold the hash output digest, storing a pointer to
+ * the buffer in @result, and setting @result_len to its size.
+ * The memory referenced in @result must be released with a call
+ * to g_free() when no longer required by the caller.
*
* Returns:
* 0 on success, -1 on error
@@ -103,11 +110,18 @@ int qcrypto_hmac_bytesv(QCryptoHmac *hmac,
* @errp: pointer to a NULL-initialized error object
*
* Computes the hmac across all the memory region
- * @buf of length @len. The @result pointer will be
- * filled with raw bytes representing the computed
- * hmac, which will have length @resultlen. The
- * memory pointer in @result must be released
- * with a call to g_free() when no longer required.
+ * @buf of length @len.
+ *
+ * If @result_len is set to a non-zero value by the caller, then
+ * @result must hold a pointer that is @result_len in size, and
+ * @result_len match the size of the hash output. The digest will
+ * be written into @result.
+ *
+ * If @result_len is set to zero, then this function will allocate
+ * a buffer to hold the hash output digest, storing a pointer to
+ * the buffer in @result, and setting @result_len to its size.
+ * The memory referenced in @result must be released with a call
+ * to g_free() when no longer required by the caller.
*
* Returns:
* 0 on success, -1 on error
diff --git a/include/crypto/ivgen.h b/include/crypto/ivgen.h
index a09d573..bfa5d28 100644
--- a/include/crypto/ivgen.h
+++ b/include/crypto/ivgen.h
@@ -44,22 +44,22 @@
*
* g_assert((ndata % 512) == 0);
*
- * QCryptoIVGen *ivgen = qcrypto_ivgen_new(QCRYPTO_IVGEN_ALG_ESSIV,
- * QCRYPTO_CIPHER_ALG_AES_128,
- * QCRYPTO_HASH_ALG_SHA256,
+ * QCryptoIVGen *ivgen = qcrypto_ivgen_new(QCRYPTO_IV_GEN_ALGO_ESSIV,
+ * QCRYPTO_CIPHER_ALGO_AES_128,
+ * QCRYPTO_HASH_ALGO_SHA256,
* key, nkey, errp);
* if (!ivgen) {
* return -1;
* }
*
- * QCryptoCipher *cipher = qcrypto_cipher_new(QCRYPTO_CIPHER_ALG_AES_128,
+ * QCryptoCipher *cipher = qcrypto_cipher_new(QCRYPTO_CIPHER_ALGO_AES_128,
* QCRYPTO_CIPHER_MODE_CBC,
* key, nkey, errp);
* if (!cipher) {
* goto error;
* }
*
- * niv = qcrypto_cipher_get_iv_len(QCRYPTO_CIPHER_ALG_AES_128,
+ * niv = qcrypto_cipher_get_iv_len(QCRYPTO_CIPHER_ALGO_AES_128,
* QCRYPTO_CIPHER_MODE_CBC);
* iv = g_new0(uint8_t, niv);
*
@@ -97,7 +97,7 @@
typedef struct QCryptoIVGen QCryptoIVGen;
-/* See also QCryptoIVGenAlgorithm enum in qapi/crypto.json */
+/* See also QCryptoIVGenAlgo enum in qapi/crypto.json */
/**
@@ -113,19 +113,19 @@ typedef struct QCryptoIVGen QCryptoIVGen;
* are required or not depends on the choice of @alg
* requested.
*
- * - QCRYPTO_IVGEN_ALG_PLAIN
+ * - QCRYPTO_IV_GEN_ALGO_PLAIN
*
* The IVs are generated by the 32-bit truncated sector
* number. This should never be used for block devices
* that are larger than 2^32 sectors in size.
* All the other parameters are unused.
*
- * - QCRYPTO_IVGEN_ALG_PLAIN64
+ * - QCRYPTO_IV_GEN_ALGO_PLAIN64
*
* The IVs are generated by the 64-bit sector number.
* All the other parameters are unused.
*
- * - QCRYPTO_IVGEN_ALG_ESSIV:
+ * - QCRYPTO_IV_GEN_ALGO_ESSIV:
*
* The IVs are generated by encrypting the 64-bit sector
* number with a hash of an encryption key. The @cipheralg,
@@ -133,9 +133,9 @@ typedef struct QCryptoIVGen QCryptoIVGen;
*
* Returns: a new IV generator, or NULL on error
*/
-QCryptoIVGen *qcrypto_ivgen_new(QCryptoIVGenAlgorithm alg,
- QCryptoCipherAlgorithm cipheralg,
- QCryptoHashAlgorithm hash,
+QCryptoIVGen *qcrypto_ivgen_new(QCryptoIVGenAlgo alg,
+ QCryptoCipherAlgo cipheralg,
+ QCryptoHashAlgo hash,
const uint8_t *key, size_t nkey,
Error **errp);
@@ -167,7 +167,7 @@ int qcrypto_ivgen_calculate(QCryptoIVGen *ivgen,
*
* Returns: the IV generator algorithm
*/
-QCryptoIVGenAlgorithm qcrypto_ivgen_get_algorithm(QCryptoIVGen *ivgen);
+QCryptoIVGenAlgo qcrypto_ivgen_get_algorithm(QCryptoIVGen *ivgen);
/**
@@ -179,7 +179,7 @@ QCryptoIVGenAlgorithm qcrypto_ivgen_get_algorithm(QCryptoIVGen *ivgen);
*
* Returns: the cipher algorithm
*/
-QCryptoCipherAlgorithm qcrypto_ivgen_get_cipher(QCryptoIVGen *ivgen);
+QCryptoCipherAlgo qcrypto_ivgen_get_cipher(QCryptoIVGen *ivgen);
/**
@@ -191,7 +191,7 @@ QCryptoCipherAlgorithm qcrypto_ivgen_get_cipher(QCryptoIVGen *ivgen);
*
* Returns: the hash algorithm
*/
-QCryptoHashAlgorithm qcrypto_ivgen_get_hash(QCryptoIVGen *ivgen);
+QCryptoHashAlgo qcrypto_ivgen_get_hash(QCryptoIVGen *ivgen);
/**
diff --git a/include/crypto/pbkdf.h b/include/crypto/pbkdf.h
index 2c31a44..cf59fce 100644
--- a/include/crypto/pbkdf.h
+++ b/include/crypto/pbkdf.h
@@ -38,7 +38,7 @@
* ....
*
* char *password = "a-typical-awful-user-password";
- * size_t nkey = qcrypto_cipher_get_key_len(QCRYPTO_CIPHER_ALG_AES_128);
+ * size_t nkey = qcrypto_cipher_get_key_len(QCRYPTO_CIPHER_ALGO_AES_128);
* uint8_t *salt = g_new0(uint8_t, nkey);
* uint8_t *key = g_new0(uint8_t, nkey);
* int iterations;
@@ -50,7 +50,7 @@
* return -1;
* }
*
- * iterations = qcrypto_pbkdf2_count_iters(QCRYPTO_HASH_ALG_SHA256,
+ * iterations = qcrypto_pbkdf2_count_iters(QCRYPTO_HASH_ALGO_SHA256,
* (const uint8_t *)password,
* strlen(password),
* salt, nkey, errp);
@@ -60,7 +60,7 @@
* return -1;
* }
*
- * if (qcrypto_pbkdf2(QCRYPTO_HASH_ALG_SHA256,
+ * if (qcrypto_pbkdf2(QCRYPTO_HASH_ALGO_SHA256,
* (const uint8_t *)password, strlen(password),
* salt, nkey, iterations, key, nkey, errp) < 0) {
* g_free(key);
@@ -70,7 +70,7 @@
*
* g_free(salt);
*
- * cipher = qcrypto_cipher_new(QCRYPTO_CIPHER_ALG_AES_128,
+ * cipher = qcrypto_cipher_new(QCRYPTO_CIPHER_ALGO_AES_128,
* QCRYPTO_CIPHER_MODE_ECB,
* key, nkey, errp);
* g_free(key);
@@ -92,7 +92,7 @@
*
* Returns true if supported, false otherwise
*/
-bool qcrypto_pbkdf2_supports(QCryptoHashAlgorithm hash);
+bool qcrypto_pbkdf2_supports(QCryptoHashAlgo hash);
/**
@@ -119,7 +119,7 @@ bool qcrypto_pbkdf2_supports(QCryptoHashAlgorithm hash);
*
* Returns: 0 on success, -1 on error
*/
-int qcrypto_pbkdf2(QCryptoHashAlgorithm hash,
+int qcrypto_pbkdf2(QCryptoHashAlgo hash,
const uint8_t *key, size_t nkey,
const uint8_t *salt, size_t nsalt,
uint64_t iterations,
@@ -147,7 +147,7 @@ int qcrypto_pbkdf2(QCryptoHashAlgorithm hash,
*
* Returns: number of iterations in 1 second, -1 on error
*/
-uint64_t qcrypto_pbkdf2_count_iters(QCryptoHashAlgorithm hash,
+uint64_t qcrypto_pbkdf2_count_iters(QCryptoHashAlgo hash,
const uint8_t *key, size_t nkey,
const uint8_t *salt, size_t nsalt,
size_t nout,
diff --git a/include/crypto/tlssession.h b/include/crypto/tlssession.h
index 571049b..d77ae0d 100644
--- a/include/crypto/tlssession.h
+++ b/include/crypto/tlssession.h
@@ -75,12 +75,14 @@
* GINT_TO_POINTER(fd));
*
* while (1) {
- * if (qcrypto_tls_session_handshake(sess, errp) < 0) {
+ * int ret = qcrypto_tls_session_handshake(sess, errp);
+ *
+ * if (ret < 0) {
* qcrypto_tls_session_free(sess);
* return -1;
* }
*
- * switch(qcrypto_tls_session_get_handshake_status(sess)) {
+ * switch(ret) {
* case QCRYPTO_TLS_HANDSHAKE_COMPLETE:
* if (qcrypto_tls_session_check_credentials(sess, errp) < )) {
* qcrypto_tls_session_free(sess);
@@ -107,6 +109,7 @@
typedef struct QCryptoTLSSession QCryptoTLSSession;
+#define QCRYPTO_TLS_SESSION_ERR_BLOCK -2
/**
* qcrypto_tls_session_new:
@@ -169,7 +172,7 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoTLSSession, qcrypto_tls_session_free)
*
* Validate the peer's credentials after a successful
* TLS handshake. It is an error to call this before
- * qcrypto_tls_session_get_handshake_status() returns
+ * qcrypto_tls_session_handshake() returns
* QCRYPTO_TLS_HANDSHAKE_COMPLETE
*
* Returns 0 if the credentials validated, -1 on error
@@ -177,12 +180,18 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoTLSSession, qcrypto_tls_session_free)
int qcrypto_tls_session_check_credentials(QCryptoTLSSession *sess,
Error **errp);
+/*
+ * These must return QCRYPTO_TLS_SESSION_ERR_BLOCK if the I/O
+ * would block, but on other errors, must fill 'errp'
+ */
typedef ssize_t (*QCryptoTLSSessionWriteFunc)(const char *buf,
size_t len,
- void *opaque);
+ void *opaque,
+ Error **errp);
typedef ssize_t (*QCryptoTLSSessionReadFunc)(char *buf,
size_t len,
- void *opaque);
+ void *opaque,
+ Error **errp);
/**
* qcrypto_tls_session_set_callbacks:
@@ -212,41 +221,55 @@ void qcrypto_tls_session_set_callbacks(QCryptoTLSSession *sess,
* @sess: the TLS session object
* @buf: the plain text to send
* @len: the length of @buf
+ * @errp: pointer to hold returned error object
*
* Encrypt @len bytes of the data in @buf and send
* it to the remote peer using the callback previously
* registered with qcrypto_tls_session_set_callbacks()
*
* It is an error to call this before
- * qcrypto_tls_session_get_handshake_status() returns
+ * qcrypto_tls_session_handshake() returns
* QCRYPTO_TLS_HANDSHAKE_COMPLETE
*
- * Returns: the number of bytes sent, or -1 on error
+ * Returns: the number of bytes sent,
+ * or QCRYPTO_TLS_SESSION_ERR_BLOCK if the write would block,
+ * or -1 on error.
*/
ssize_t qcrypto_tls_session_write(QCryptoTLSSession *sess,
const char *buf,
- size_t len);
+ size_t len,
+ Error **errp);
/**
* qcrypto_tls_session_read:
* @sess: the TLS session object
* @buf: to fill with plain text received
* @len: the length of @buf
+ * @gracefulTermination: treat premature termination as graceful EOF
+ * @errp: pointer to hold returned error object
*
* Receive up to @len bytes of data from the remote peer
* using the callback previously registered with
* qcrypto_tls_session_set_callbacks(), decrypt it and
* store it in @buf.
*
+ * If @gracefulTermination is true, then a premature termination
+ * of the TLS session will be treated as indicating EOF, as
+ * opposed to an error.
+ *
* It is an error to call this before
- * qcrypto_tls_session_get_handshake_status() returns
+ * qcrypto_tls_session_handshake() returns
* QCRYPTO_TLS_HANDSHAKE_COMPLETE
*
- * Returns: the number of bytes received, or -1 on error
+ * Returns: the number of bytes received,
+ * or QCRYPTO_TLS_SESSION_ERR_BLOCK if the receive would block,
+ * or -1 on error.
*/
ssize_t qcrypto_tls_session_read(QCryptoTLSSession *sess,
char *buf,
- size_t len);
+ size_t len,
+ bool gracefulTermination,
+ Error **errp);
/**
* qcrypto_tls_session_check_pending:
@@ -268,8 +291,7 @@ size_t qcrypto_tls_session_check_pending(QCryptoTLSSession *sess);
* the underlying data channel is non-blocking, then
* this method may return control before the handshake
* is complete. On non-blocking channels the
- * qcrypto_tls_session_get_handshake_status() method
- * should be used to determine whether the handshake
+ * return value determines whether the handshake
* has completed, or is waiting to send or receive
* data. In the latter cases, the caller should setup
* an event loop watch and call this method again
@@ -285,22 +307,27 @@ typedef enum {
QCRYPTO_TLS_HANDSHAKE_RECVING,
} QCryptoTLSSessionHandshakeStatus;
+typedef enum {
+ QCRYPTO_TLS_BYE_COMPLETE,
+ QCRYPTO_TLS_BYE_SENDING,
+ QCRYPTO_TLS_BYE_RECVING,
+} QCryptoTLSSessionByeStatus;
+
/**
- * qcrypto_tls_session_get_handshake_status:
- * @sess: the TLS session object
- *
- * Check the status of the TLS handshake. This
- * is used with non-blocking data channels to
- * determine whether the handshake is waiting
- * to send or receive further data to/from the
- * remote peer.
+ * qcrypto_tls_session_bye:
+ * @session: the TLS session object
+ * @errp: pointer to a NULL-initialized error object
*
- * Once this returns QCRYPTO_TLS_HANDSHAKE_COMPLETE
- * it is permitted to send/receive payload data on
- * the channel
+ * Start, or continue, a TLS termination sequence. If the underlying
+ * data channel is non-blocking, then this method may return control
+ * before the termination is complete. The return value will indicate
+ * whether the termination has completed, or is waiting to send or
+ * receive data. In the latter cases, the caller should setup an event
+ * loop watch and call this method again once the underlying data
+ * channel is ready to read or write again.
*/
-QCryptoTLSSessionHandshakeStatus
-qcrypto_tls_session_get_handshake_status(QCryptoTLSSession *sess);
+int
+qcrypto_tls_session_bye(QCryptoTLSSession *session, Error **errp);
/**
* qcrypto_tls_session_get_key_size:
diff --git a/include/crypto/x509-utils.h b/include/crypto/x509-utils.h
new file mode 100644
index 0000000..1e99661
--- /dev/null
+++ b/include/crypto/x509-utils.h
@@ -0,0 +1,22 @@
+/*
+ * X.509 certificate related helpers
+ *
+ * Copyright (c) 2024 Dorjoy Chowdhury <dorjoychy111@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef QCRYPTO_X509_UTILS_H
+#define QCRYPTO_X509_UTILS_H
+
+#include "crypto/hash.h"
+
+int qcrypto_get_x509_cert_fingerprint(uint8_t *cert, size_t size,
+ QCryptoHashAlgo hash,
+ uint8_t *result,
+ size_t *resultlen,
+ Error **errp);
+
+#endif
diff --git a/include/disas/capstone.h b/include/disas/capstone.h
index a119851..c43033f 100644
--- a/include/disas/capstone.h
+++ b/include/disas/capstone.h
@@ -4,6 +4,7 @@
#ifdef CONFIG_CAPSTONE
#define CAPSTONE_AARCH64_COMPAT_HEADER
+#define CAPSTONE_SYSTEMZ_COMPAT_HEADER
#include <capstone.h>
#else
diff --git a/include/disas/dis-asm.h b/include/disas/dis-asm.h
index a1d26ce..3b50ecf 100644
--- a/include/disas/dis-asm.h
+++ b/include/disas/dis-asm.h
@@ -232,10 +232,6 @@ enum bfd_architecture
#define bfd_mach_avrxmega5 105
#define bfd_mach_avrxmega6 106
#define bfd_mach_avrxmega7 107
- bfd_arch_cris, /* Axis CRIS */
-#define bfd_mach_cris_v0_v10 255
-#define bfd_mach_cris_v32 32
-#define bfd_mach_cris_v10_v32 1032
bfd_arch_microblaze, /* Xilinx MicroBlaze. */
bfd_arch_moxie, /* The Moxie core. */
bfd_arch_ia64, /* HP/Intel ia64 */
@@ -448,8 +444,6 @@ int print_insn_w65 (bfd_vma, disassemble_info*);
int print_insn_d10v (bfd_vma, disassemble_info*);
int print_insn_v850 (bfd_vma, disassemble_info*);
int print_insn_tic30 (bfd_vma, disassemble_info*);
-int print_insn_crisv32 (bfd_vma, disassemble_info*);
-int print_insn_crisv10 (bfd_vma, disassemble_info*);
int print_insn_microblaze (bfd_vma, disassemble_info*);
int print_insn_ia64 (bfd_vma, disassemble_info*);
int print_insn_xtensa (bfd_vma, disassemble_info*);
diff --git a/include/exec/address-spaces.h b/include/exec/address-spaces.h
deleted file mode 100644
index 0d0aa61..0000000
--- a/include/exec/address-spaces.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Internal memory management interfaces
- *
- * Copyright 2011 Red Hat, Inc. and/or its affiliates
- *
- * Authors:
- * Avi Kivity <avi@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- */
-
-#ifndef EXEC_ADDRESS_SPACES_H
-#define EXEC_ADDRESS_SPACES_H
-
-/*
- * Internal interfaces between memory.c/exec.c/vl.c. Do not #include unless
- * you're one of them.
- */
-
-#ifndef CONFIG_USER_ONLY
-
-/* Get the root memory region. This interface should only be used temporarily
- * until a proper bus interface is available.
- */
-MemoryRegion *get_system_memory(void);
-
-/* Get the root I/O port region. This interface should only be used
- * temporarily until a proper bus interface is available.
- */
-MemoryRegion *get_system_io(void);
-
-extern AddressSpace address_space_memory;
-extern AddressSpace address_space_io;
-
-#endif
-
-#endif
diff --git a/include/exec/confidential-guest-support.h b/include/exec/confidential-guest-support.h
deleted file mode 100644
index 02dc4e5..0000000
--- a/include/exec/confidential-guest-support.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * QEMU Confidential Guest support
- * This interface describes the common pieces between various
- * schemes for protecting guest memory or other state against a
- * compromised hypervisor. This includes memory encryption (AMD's
- * SEV and Intel's MKTME) or special protection modes (PEF on POWER,
- * or PV on s390x).
- *
- * Copyright Red Hat.
- *
- * Authors:
- * David Gibson <david@gibson.dropbear.id.au>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or
- * later. See the COPYING file in the top-level directory.
- *
- */
-#ifndef QEMU_CONFIDENTIAL_GUEST_SUPPORT_H
-#define QEMU_CONFIDENTIAL_GUEST_SUPPORT_H
-
-#ifndef CONFIG_USER_ONLY
-
-#include "qom/object.h"
-
-#define TYPE_CONFIDENTIAL_GUEST_SUPPORT "confidential-guest-support"
-OBJECT_DECLARE_TYPE(ConfidentialGuestSupport,
- ConfidentialGuestSupportClass,
- CONFIDENTIAL_GUEST_SUPPORT)
-
-
-struct ConfidentialGuestSupport {
- Object parent;
-
- /*
- * True if the machine should use guest_memfd for RAM.
- */
- bool require_guest_memfd;
-
- /*
- * ready: flag set by CGS initialization code once it's ready to
- * start executing instructions in a potentially-secure
- * guest
- *
- * The definition here is a bit fuzzy, because this is essentially
- * part of a self-sanity-check, rather than a strict mechanism.
- *
- * It's not feasible to have a single point in the common machine
- * init path to configure confidential guest support, because
- * different mechanisms have different interdependencies requiring
- * initialization in different places, often in arch or machine
- * type specific code. It's also usually not possible to check
- * for invalid configurations until that initialization code.
- * That means it would be very easy to have a bug allowing CGS
- * init to be bypassed entirely in certain configurations.
- *
- * Silently ignoring a requested security feature would be bad, so
- * to avoid that we check late in init that this 'ready' flag is
- * set if CGS was requested. If the CGS init hasn't happened, and
- * so 'ready' is not set, we'll abort.
- */
- bool ready;
-};
-
-typedef struct ConfidentialGuestSupportClass {
- ObjectClass parent;
-
- int (*kvm_init)(ConfidentialGuestSupport *cgs, Error **errp);
- int (*kvm_reset)(ConfidentialGuestSupport *cgs, Error **errp);
-} ConfidentialGuestSupportClass;
-
-static inline int confidential_guest_kvm_init(ConfidentialGuestSupport *cgs,
- Error **errp)
-{
- ConfidentialGuestSupportClass *klass;
-
- klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs);
- if (klass->kvm_init) {
- return klass->kvm_init(cgs, errp);
- }
-
- return 0;
-}
-
-static inline int confidential_guest_kvm_reset(ConfidentialGuestSupport *cgs,
- Error **errp)
-{
- ConfidentialGuestSupportClass *klass;
-
- klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs);
- if (klass->kvm_reset) {
- return klass->kvm_reset(cgs, errp);
- }
-
- return 0;
-}
-
-#endif /* !CONFIG_USER_ONLY */
-
-#endif /* QEMU_CONFIDENTIAL_GUEST_SUPPORT_H */
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
deleted file mode 100644
index 6f09b86..0000000
--- a/include/exec/cpu-all.h
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * defines common to all virtual CPUs
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef CPU_ALL_H
-#define CPU_ALL_H
-
-#include "exec/page-protection.h"
-#include "exec/cpu-common.h"
-#include "exec/memory.h"
-#include "exec/tswap.h"
-#include "hw/core/cpu.h"
-
-/* some important defines:
- *
- * HOST_BIG_ENDIAN : whether the host cpu is big endian and
- * otherwise little endian.
- *
- * TARGET_BIG_ENDIAN : same for the target cpu
- */
-
-#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
-#define BSWAP_NEEDED
-#endif
-
-/* Target-endianness CPU memory access functions. These fit into the
- * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
- */
-#if TARGET_BIG_ENDIAN
-#define lduw_p(p) lduw_be_p(p)
-#define ldsw_p(p) ldsw_be_p(p)
-#define ldl_p(p) ldl_be_p(p)
-#define ldq_p(p) ldq_be_p(p)
-#define stw_p(p, v) stw_be_p(p, v)
-#define stl_p(p, v) stl_be_p(p, v)
-#define stq_p(p, v) stq_be_p(p, v)
-#define ldn_p(p, sz) ldn_be_p(p, sz)
-#define stn_p(p, sz, v) stn_be_p(p, sz, v)
-#else
-#define lduw_p(p) lduw_le_p(p)
-#define ldsw_p(p) ldsw_le_p(p)
-#define ldl_p(p) ldl_le_p(p)
-#define ldq_p(p) ldq_le_p(p)
-#define stw_p(p, v) stw_le_p(p, v)
-#define stl_p(p, v) stl_le_p(p, v)
-#define stq_p(p, v) stq_le_p(p, v)
-#define ldn_p(p, sz) ldn_le_p(p, sz)
-#define stn_p(p, sz, v) stn_le_p(p, sz, v)
-#endif
-
-/* MMU memory access macros */
-
-#if defined(CONFIG_USER_ONLY)
-#include "user/abitypes.h"
-
-/*
- * If non-zero, the guest virtual address space is a contiguous subset
- * of the host virtual address space, i.e. '-R reserved_va' is in effect
- * either from the command-line or by default. The value is the last
- * byte of the guest address space e.g. UINT32_MAX.
- *
- * If zero, the host and guest virtual address spaces are intermingled.
- */
-extern unsigned long reserved_va;
-
-/*
- * Limit the guest addresses as best we can.
- *
- * When not using -R reserved_va, we cannot really limit the guest
- * to less address space than the host. For 32-bit guests, this
- * acts as a sanity check that we're not giving the guest an address
- * that it cannot even represent. For 64-bit guests... the address
- * might not be what the real kernel would give, but it is at least
- * representable in the guest.
- *
- * TODO: Improve address allocation to avoid this problem, and to
- * avoid setting bits at the top of guest addresses that might need
- * to be used for tags.
- */
-#define GUEST_ADDR_MAX_ \
- ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
- UINT32_MAX : ~0ul)
-#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_)
-
-#else
-
-#include "exec/hwaddr.h"
-
-#define SUFFIX
-#define ARG1 as
-#define ARG1_DECL AddressSpace *as
-#define TARGET_ENDIANNESS
-#include "exec/memory_ldst.h.inc"
-
-#define SUFFIX _cached_slow
-#define ARG1 cache
-#define ARG1_DECL MemoryRegionCache *cache
-#define TARGET_ENDIANNESS
-#include "exec/memory_ldst.h.inc"
-
-static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
-{
- address_space_stl_notdirty(as, addr, val,
- MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-#define SUFFIX
-#define ARG1 as
-#define ARG1_DECL AddressSpace *as
-#define TARGET_ENDIANNESS
-#include "exec/memory_ldst_phys.h.inc"
-
-/* Inline fast path for direct RAM access. */
-#define ENDIANNESS
-#include "exec/memory_ldst_cached.h.inc"
-
-#define SUFFIX _cached
-#define ARG1 cache
-#define ARG1_DECL MemoryRegionCache *cache
-#define TARGET_ENDIANNESS
-#include "exec/memory_ldst_phys.h.inc"
-#endif
-
-/* page related stuff */
-
-#ifdef TARGET_PAGE_BITS_VARY
-# include "exec/page-vary.h"
-extern const TargetPageBits target_page;
-# ifdef CONFIG_DEBUG_TCG
-# define TARGET_PAGE_BITS ({ assert(target_page.decided); \
- target_page.bits; })
-# define TARGET_PAGE_MASK ({ assert(target_page.decided); \
- (target_long)target_page.mask; })
-# else
-# define TARGET_PAGE_BITS target_page.bits
-# define TARGET_PAGE_MASK ((target_long)target_page.mask)
-# endif
-# define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
-#else
-# define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
-# define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
-# define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
-#endif
-
-#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
-
-#if defined(CONFIG_USER_ONLY)
-void page_dump(FILE *f);
-
-typedef int (*walk_memory_regions_fn)(void *, target_ulong,
- target_ulong, unsigned long);
-int walk_memory_regions(void *, walk_memory_regions_fn);
-
-int page_get_flags(target_ulong address);
-void page_set_flags(target_ulong start, target_ulong last, int flags);
-void page_reset_target_data(target_ulong start, target_ulong last);
-
-/**
- * page_check_range
- * @start: first byte of range
- * @len: length of range
- * @flags: flags required for each page
- *
- * Return true if every page in [@start, @start+@len) has @flags set.
- * Return false if any page is unmapped. Thus testing flags == 0 is
- * equivalent to testing for flags == PAGE_VALID.
- */
-bool page_check_range(target_ulong start, target_ulong last, int flags);
-
-/**
- * page_check_range_empty:
- * @start: first byte of range
- * @last: last byte of range
- * Context: holding mmap lock
- *
- * Return true if the entire range [@start, @last] is unmapped.
- * The memory lock must be held so that the caller will can ensure
- * the result stays true until a new mapping can be installed.
- */
-bool page_check_range_empty(target_ulong start, target_ulong last);
-
-/**
- * page_find_range_empty
- * @min: first byte of search range
- * @max: last byte of search range
- * @len: size of the hole required
- * @align: alignment of the hole required (power of 2)
- *
- * If there is a range [x, x+@len) within [@min, @max] such that
- * x % @align == 0, then return x. Otherwise return -1.
- * The memory lock must be held, as the caller will want to ensure
- * the returned range stays empty until a new mapping can be installed.
- */
-target_ulong page_find_range_empty(target_ulong min, target_ulong max,
- target_ulong len, target_ulong align);
-
-/**
- * page_get_target_data(address)
- * @address: guest virtual address
- *
- * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
- * with the guest page at @address, allocating it if necessary. The
- * caller should already have verified that the address is valid.
- *
- * The memory will be freed when the guest page is deallocated,
- * e.g. with the munmap system call.
- */
-void *page_get_target_data(target_ulong address)
- __attribute__((returns_nonnull));
-#endif
-
-CPUArchState *cpu_copy(CPUArchState *env);
-
-/* Flags for use in ENV->INTERRUPT_PENDING.
-
- The numbers assigned here are non-sequential in order to preserve
- binary compatibility with the vmstate dump. Bit 0 (0x0001) was
- previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
- the vmstate dump. */
-
-/* External hardware interrupt pending. This is typically used for
- interrupts from devices. */
-#define CPU_INTERRUPT_HARD 0x0002
-
-/* Exit the current TB. This is typically used when some system-level device
- makes some change to the memory mapping. E.g. the a20 line change. */
-#define CPU_INTERRUPT_EXITTB 0x0004
-
-/* Halt the CPU. */
-#define CPU_INTERRUPT_HALT 0x0020
-
-/* Debug event pending. */
-#define CPU_INTERRUPT_DEBUG 0x0080
-
-/* Reset signal. */
-#define CPU_INTERRUPT_RESET 0x0400
-
-/* Several target-specific external hardware interrupts. Each target/cpu.h
- should define proper names based on these defines. */
-#define CPU_INTERRUPT_TGT_EXT_0 0x0008
-#define CPU_INTERRUPT_TGT_EXT_1 0x0010
-#define CPU_INTERRUPT_TGT_EXT_2 0x0040
-#define CPU_INTERRUPT_TGT_EXT_3 0x0200
-#define CPU_INTERRUPT_TGT_EXT_4 0x1000
-
-/* Several target-specific internal interrupts. These differ from the
- preceding target-specific interrupts in that they are intended to
- originate from within the cpu itself, typically in response to some
- instruction being executed. These, therefore, are not masked while
- single-stepping within the debugger. */
-#define CPU_INTERRUPT_TGT_INT_0 0x0100
-#define CPU_INTERRUPT_TGT_INT_1 0x0800
-#define CPU_INTERRUPT_TGT_INT_2 0x2000
-
-/* First unused bit: 0x4000. */
-
-/* The set of all bits that should be masked when single-stepping. */
-#define CPU_INTERRUPT_SSTEP_MASK \
- (CPU_INTERRUPT_HARD \
- | CPU_INTERRUPT_TGT_EXT_0 \
- | CPU_INTERRUPT_TGT_EXT_1 \
- | CPU_INTERRUPT_TGT_EXT_2 \
- | CPU_INTERRUPT_TGT_EXT_3 \
- | CPU_INTERRUPT_TGT_EXT_4)
-
-#ifdef CONFIG_USER_ONLY
-
-/*
- * Allow some level of source compatibility with softmmu. We do not
- * support any of the more exotic features, so only invalid pages may
- * be signaled by probe_access_flags().
- */
-#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
-#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 2))
-#define TLB_WATCHPOINT 0
-
-static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
-{
- return MMU_USER_IDX;
-}
-#else
-
-/*
- * Flags stored in the low bits of the TLB virtual address.
- * These are defined so that fast path ram access is all zeros.
- * The flags all must be between TARGET_PAGE_BITS and
- * maximum address alignment bit.
- *
- * Use TARGET_PAGE_BITS_MIN so that these bits are constant
- * when TARGET_PAGE_BITS_VARY is in effect.
- *
- * The count, if not the placement of these bits is known
- * to tcg/tcg-op-ldst.c, check_max_alignment().
- */
-/* Zero if TLB entry is valid. */
-#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
-/* Set if TLB entry references a clean RAM page. The iotlb entry will
- contain the page physical address. */
-#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
-/* Set if TLB entry is an IO callback. */
-#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
-/* Set if TLB entry writes ignored. */
-#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 4))
-/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
-#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
-
-/*
- * Use this mask to check interception with an alignment mask
- * in a TCG backend.
- */
-#define TLB_FLAGS_MASK \
- (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
- | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
-
-/*
- * Flags stored in CPUTLBEntryFull.slow_flags[x].
- * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
- */
-/* Set if TLB entry requires byte swap. */
-#define TLB_BSWAP (1 << 0)
-/* Set if TLB entry contains a watchpoint. */
-#define TLB_WATCHPOINT (1 << 1)
-/* Set if TLB entry requires aligned accesses. */
-#define TLB_CHECK_ALIGNED (1 << 2)
-
-#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED)
-
-/* The two sets of flags must not overlap. */
-QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
-
-/**
- * tlb_hit_page: return true if page aligned @addr is a hit against the
- * TLB entry @tlb_addr
- *
- * @addr: virtual address to test (must be page aligned)
- * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
- */
-static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr)
-{
- return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
-}
-
-/**
- * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
- *
- * @addr: virtual address to test (need not be page aligned)
- * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
- */
-static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr)
-{
- return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
-}
-
-#endif /* !CONFIG_USER_ONLY */
-
-/* Validate correct placement of CPUArchState. */
-#include "cpu.h"
-QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0);
-QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState));
-
-#endif /* CPU_ALL_H */
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 815342d..a684855 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -9,9 +9,7 @@
#define CPU_COMMON_H
#include "exec/vaddr.h"
-#ifndef CONFIG_USER_ONLY
#include "exec/hwaddr.h"
-#endif
#include "hw/core/cpu.h"
#include "tcg/debug-assert.h"
#include "exec/page-protection.h"
@@ -35,23 +33,17 @@ void cpu_list_lock(void);
void cpu_list_unlock(void);
unsigned int cpu_list_generation_id_get(void);
+int cpu_get_free_index(void);
+
void tcg_iommu_init_notifier_list(CPUState *cpu);
void tcg_iommu_free_notifier_list(CPUState *cpu);
-#if !defined(CONFIG_USER_ONLY)
-
enum device_endian {
DEVICE_NATIVE_ENDIAN,
DEVICE_BIG_ENDIAN,
DEVICE_LITTLE_ENDIAN,
};
-#if HOST_BIG_ENDIAN
-#define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN
-#else
-#define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN
-#endif
-
/* address in the RAM (different from a physical address) */
#if defined(CONFIG_XEN_BACKEND)
typedef uint64_t ram_addr_t;
@@ -65,7 +57,7 @@ typedef uintptr_t ram_addr_t;
/* memory API */
-void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
+void qemu_ram_remap(ram_addr_t addr);
/* This should not be used by devices. */
ram_addr_t qemu_ram_addr_from_host(void *ptr);
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
@@ -129,6 +121,14 @@ size_t qemu_ram_pagesize_largest(void);
*/
void cpu_address_space_init(CPUState *cpu, int asidx,
const char *prefix, MemoryRegion *mr);
+/**
+ * cpu_address_space_destroy:
+ * @cpu: CPU for which address space needs to be destroyed
+ * @asidx: integer index of this address space
+ *
+ * Note that with KVM only one address space is supported.
+ */
+void cpu_address_space_destroy(CPUState *cpu, int asidx);
void cpu_physical_memory_rw(hwaddr addr, void *buf,
hwaddr len, bool is_write);
@@ -166,8 +166,6 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start,
size_t length);
-#endif
-
/* Returns: 0 on success, -1 on error */
int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
void *ptr, size_t len, bool is_write);
@@ -176,12 +174,7 @@ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
void list_cpus(void);
#ifdef CONFIG_TCG
-
-bool tcg_cflags_has(CPUState *cpu, uint32_t flags);
-void tcg_cflags_set(CPUState *cpu, uint32_t flags);
-
-/* current cflags for hashing/comparison */
-uint32_t curr_cflags(CPUState *cpu);
+#include "qemu/atomic.h"
/**
* cpu_unwind_state_data:
@@ -189,7 +182,7 @@ uint32_t curr_cflags(CPUState *cpu);
* @host_pc: the host pc within the translation
* @data: output data
*
- * Attempt to load the the unwind state for a host pc occurring in
+ * Attempt to load the unwind state for a host pc occurring in
* translated code. If @host_pc is not in translated code, the
* function returns false; otherwise @data is loaded.
* This is the same unwind info as given to restore_state_to_opc.
@@ -208,6 +201,23 @@ bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data);
*/
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc);
+/**
+ * cpu_loop_exit_requested:
+ * @cpu: The CPU state to be tested
+ *
+ * Indicate if somebody asked for a return of the CPU to the main loop
+ * (e.g., via cpu_exit() or cpu_interrupt()).
+ *
+ * This is helpful for architectures that support interruptible
+ * instructions. After writing back all state to registers/memory, this
+ * call can be used to check if it makes sense to return to the main loop
+ * or to continue executing the interruptible instruction.
+ */
+static inline bool cpu_loop_exit_requested(CPUState *cpu)
+{
+ return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0;
+}
+
G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
#endif /* CONFIG_TCG */
@@ -229,34 +239,25 @@ static inline ArchCPU *env_archcpu(CPUArchState *env)
}
/**
- * env_cpu(env)
+ * env_cpu_const(env)
* @env: The architecture environment
*
* Return the CPUState associated with the environment.
*/
-static inline CPUState *env_cpu(CPUArchState *env)
+static inline const CPUState *env_cpu_const(const CPUArchState *env)
{
return (void *)env - sizeof(CPUState);
}
-#ifndef CONFIG_USER_ONLY
/**
- * cpu_mmu_index:
- * @env: The cpu environment
- * @ifetch: True for code access, false for data access.
- *
- * Return the core mmu index for the current translation regime.
- * This function is used by generic TCG code paths.
+ * env_cpu(env)
+ * @env: The architecture environment
*
- * The user-only version of this function is inline in cpu-all.h,
- * where it always returns MMU_USER_IDX.
+ * Return the CPUState associated with the environment.
*/
-static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
+static inline CPUState *env_cpu(CPUArchState *env)
{
- int ret = cs->cc->mmu_index(cs, ifetch);
- tcg_debug_assert(ret >= 0 && ret < NB_MMU_MODES);
- return ret;
+ return (CPUState *)env_cpu_const(env);
}
-#endif /* !CONFIG_USER_ONLY */
#endif /* CPU_COMMON_H */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 0dbef30..e01acb7 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -23,14 +23,6 @@
#error cpu.h included from common code
#endif
-#include "qemu/host-utils.h"
-#include "qemu/thread.h"
-#ifndef CONFIG_USER_ONLY
-#include "exec/hwaddr.h"
-#endif
-#include "exec/memattrs.h"
-#include "hw/core/cpu.h"
-
#include "cpu-param.h"
#ifndef TARGET_LONG_BITS
@@ -42,42 +34,10 @@
#ifndef TARGET_VIRT_ADDR_SPACE_BITS
# error TARGET_VIRT_ADDR_SPACE_BITS must be defined in cpu-param.h
#endif
-#ifndef TARGET_PAGE_BITS
-# ifdef TARGET_PAGE_BITS_VARY
-# ifndef TARGET_PAGE_BITS_MIN
-# error TARGET_PAGE_BITS_MIN must be defined in cpu-param.h
-# endif
-# else
-# error TARGET_PAGE_BITS must be defined in cpu-param.h
-# endif
+#if !defined(TARGET_PAGE_BITS) && !defined(TARGET_PAGE_BITS_VARY)
+# error TARGET_PAGE_BITS must be defined in cpu-param.h
#endif
#include "exec/target_long.h"
-#if defined(CONFIG_SOFTMMU) && defined(CONFIG_TCG)
-#define CPU_TLB_DYN_MIN_BITS 6
-#define CPU_TLB_DYN_DEFAULT_BITS 8
-
-# if HOST_LONG_BITS == 32
-/* Make sure we do not require a double-word shift for the TLB load */
-# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
-# else /* HOST_LONG_BITS == 64 */
-/*
- * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) ==
- * 2**34 == 16G of address space. This is roughly what one would expect a
- * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel
- * Skylake's Level-2 STLB has 16 1G entries.
- * Also, make sure we do not size the TLB past the guest's address space.
- */
-# ifdef TARGET_PAGE_BITS_VARY
-# define CPU_TLB_DYN_MAX_BITS \
- MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
-# else
-# define CPU_TLB_DYN_MAX_BITS \
- MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
-# endif
-# endif
-
-#endif /* CONFIG_SOFTMMU && CONFIG_TCG */
-
#endif
diff --git a/include/exec/cpu-interrupt.h b/include/exec/cpu-interrupt.h
new file mode 100644
index 0000000..4071519
--- /dev/null
+++ b/include/exec/cpu-interrupt.h
@@ -0,0 +1,70 @@
+/*
+ * Flags for use with cpu_interrupt()
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef CPU_INTERRUPT_H
+#define CPU_INTERRUPT_H
+
+/*
+ * The numbers assigned here are non-sequential in order to preserve binary
+ * compatibility with the vmstate dump. Bit 0 (0x0001) was previously used
+ * for CPU_INTERRUPT_EXIT, and is cleared when loading the vmstate dump.
+ */
+
+/*
+ * External hardware interrupt pending.
+ * This is typically used for interrupts from devices.
+ */
+#define CPU_INTERRUPT_HARD 0x0002
+
+/*
+ * Exit the current TB. This is typically used when some system-level device
+ * makes some change to the memory mapping. E.g. the a20 line change.
+ */
+#define CPU_INTERRUPT_EXITTB 0x0004
+
+/* Halt the CPU. */
+#define CPU_INTERRUPT_HALT 0x0020
+
+/* Debug event pending. */
+#define CPU_INTERRUPT_DEBUG 0x0080
+
+/* Reset signal. */
+#define CPU_INTERRUPT_RESET 0x0400
+
+/*
+ * Several target-specific external hardware interrupts. Each target/cpu.h
+ * should define proper names based on these defines.
+ */
+#define CPU_INTERRUPT_TGT_EXT_0 0x0008
+#define CPU_INTERRUPT_TGT_EXT_1 0x0010
+#define CPU_INTERRUPT_TGT_EXT_2 0x0040
+#define CPU_INTERRUPT_TGT_EXT_3 0x0200
+#define CPU_INTERRUPT_TGT_EXT_4 0x1000
+
+/*
+ * Several target-specific internal interrupts. These differ from the
+ * preceding target-specific interrupts in that they are intended to
+ * originate from within the cpu itself, typically in response to some
+ * instruction being executed. These, therefore, are not masked while
+ * single-stepping within the debugger.
+ */
+#define CPU_INTERRUPT_TGT_INT_0 0x0100
+#define CPU_INTERRUPT_TGT_INT_1 0x0800
+#define CPU_INTERRUPT_TGT_INT_2 0x2000
+
+/* First unused bit: 0x4000. */
+
+/* The set of all bits that should be masked when single-stepping. */
+#define CPU_INTERRUPT_SSTEP_MASK \
+ (CPU_INTERRUPT_HARD \
+ | CPU_INTERRUPT_TGT_EXT_0 \
+ | CPU_INTERRUPT_TGT_EXT_1 \
+ | CPU_INTERRUPT_TGT_EXT_2 \
+ | CPU_INTERRUPT_TGT_EXT_3 \
+ | CPU_INTERRUPT_TGT_EXT_4)
+
+#endif /* CPU_INTERRUPT_H */
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
deleted file mode 100644
index 71009f8..0000000
--- a/include/exec/cpu_ldst.h
+++ /dev/null
@@ -1,382 +0,0 @@
-/*
- * Software MMU support (per-target)
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- *
- */
-
-/*
- * Generate inline load/store functions for all MMU modes (typically
- * at least _user and _kernel) as well as _data versions, for all data
- * sizes.
- *
- * Used by target op helpers.
- *
- * The syntax for the accessors is:
- *
- * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
- * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
- * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
- * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
- *
- * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
- * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
- * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
- * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
- *
- * sign is:
- * (empty): for 32 and 64 bit sizes
- * u : unsigned
- * s : signed
- *
- * size is:
- * b: 8 bits
- * w: 16 bits
- * l: 32 bits
- * q: 64 bits
- *
- * end is:
- * (empty): for target native endian, or for 8 bit access
- * _be: for forced big endian
- * _le: for forced little endian
- *
- * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
- * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
- * the index to use; the "data" and "code" suffixes take the index from
- * cpu_mmu_index().
- *
- * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
- * MemOp including alignment requirements. The alignment will be enforced.
- */
-#ifndef CPU_LDST_H
-#define CPU_LDST_H
-
-#ifndef CONFIG_TCG
-#error Can only include this header with TCG
-#endif
-
-#include "exec/memopidx.h"
-#include "exec/abi_ptr.h"
-#include "exec/mmu-access-type.h"
-#include "qemu/int128.h"
-
-#if defined(CONFIG_USER_ONLY)
-
-#include "user/guest-base.h"
-
-#ifndef TARGET_TAGGED_ADDRESSES
-static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
-{
- return x;
-}
-#endif
-
-/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
-static inline void *g2h_untagged(abi_ptr x)
-{
- return (void *)((uintptr_t)(x) + guest_base);
-}
-
-static inline void *g2h(CPUState *cs, abi_ptr x)
-{
- return g2h_untagged(cpu_untagged_addr(cs, x));
-}
-
-static inline bool guest_addr_valid_untagged(abi_ulong x)
-{
- return x <= GUEST_ADDR_MAX;
-}
-
-static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
-{
- return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1;
-}
-
-#define h2g_valid(x) \
- (HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \
- (uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX)
-
-#define h2g_nocheck(x) ({ \
- uintptr_t __ret = (uintptr_t)(x) - guest_base; \
- (abi_ptr)__ret; \
-})
-
-#define h2g(x) ({ \
- /* Check if given address fits target address space */ \
- assert(h2g_valid(x)); \
- h2g_nocheck(x); \
-})
-
-#endif /* CONFIG_USER_ONLY */
-
-uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
-int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
-uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
-int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
-uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
-uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
-uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
-int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
-uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
-uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
-
-uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
-int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
-uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
-int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
-uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
-uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
-uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
-int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
-uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
-uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
-
-void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
-void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
-void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
-void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
-void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
-void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
-void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
-
-void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
- uint32_t val, uintptr_t ra);
-void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
- uint32_t val, uintptr_t ra);
-void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
- uint32_t val, uintptr_t ra);
-void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
- uint64_t val, uintptr_t ra);
-void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
- uint32_t val, uintptr_t ra);
-void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
- uint32_t val, uintptr_t ra);
-void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
- uint64_t val, uintptr_t ra);
-
-uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
- int mmu_idx, uintptr_t ra);
-int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
- int mmu_idx, uintptr_t ra);
-uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
- int mmu_idx, uintptr_t ra);
-int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
- int mmu_idx, uintptr_t ra);
-uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
- int mmu_idx, uintptr_t ra);
-uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
- int mmu_idx, uintptr_t ra);
-uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
- int mmu_idx, uintptr_t ra);
-int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
- int mmu_idx, uintptr_t ra);
-uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
- int mmu_idx, uintptr_t ra);
-uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
- int mmu_idx, uintptr_t ra);
-
-void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
- int mmu_idx, uintptr_t ra);
-void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
- int mmu_idx, uintptr_t ra);
-void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
- int mmu_idx, uintptr_t ra);
-void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
- int mmu_idx, uintptr_t ra);
-void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
- int mmu_idx, uintptr_t ra);
-void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
- int mmu_idx, uintptr_t ra);
-void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
- int mmu_idx, uintptr_t ra);
-
-uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
-uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
-uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
-uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
-Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra);
-
-void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val,
- MemOpIdx oi, uintptr_t ra);
-void cpu_stw_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
- MemOpIdx oi, uintptr_t ra);
-void cpu_stl_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
- MemOpIdx oi, uintptr_t ra);
-void cpu_stq_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
- MemOpIdx oi, uintptr_t ra);
-void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
- MemOpIdx oi, uintptr_t ra);
-
-uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, abi_ptr addr,
- uint32_t cmpv, uint32_t newv,
- MemOpIdx oi, uintptr_t retaddr);
-uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, abi_ptr addr,
- uint32_t cmpv, uint32_t newv,
- MemOpIdx oi, uintptr_t retaddr);
-uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, abi_ptr addr,
- uint32_t cmpv, uint32_t newv,
- MemOpIdx oi, uintptr_t retaddr);
-uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, abi_ptr addr,
- uint64_t cmpv, uint64_t newv,
- MemOpIdx oi, uintptr_t retaddr);
-uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, abi_ptr addr,
- uint32_t cmpv, uint32_t newv,
- MemOpIdx oi, uintptr_t retaddr);
-uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, abi_ptr addr,
- uint32_t cmpv, uint32_t newv,
- MemOpIdx oi, uintptr_t retaddr);
-uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, abi_ptr addr,
- uint64_t cmpv, uint64_t newv,
- MemOpIdx oi, uintptr_t retaddr);
-
-#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
-TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
- (CPUArchState *env, abi_ptr addr, TYPE val, \
- MemOpIdx oi, uintptr_t retaddr);
-
-#ifdef CONFIG_ATOMIC64
-#define GEN_ATOMIC_HELPER_ALL(NAME) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
- GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
- GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
-#else
-#define GEN_ATOMIC_HELPER_ALL(NAME) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
-#endif
-
-GEN_ATOMIC_HELPER_ALL(fetch_add)
-GEN_ATOMIC_HELPER_ALL(fetch_sub)
-GEN_ATOMIC_HELPER_ALL(fetch_and)
-GEN_ATOMIC_HELPER_ALL(fetch_or)
-GEN_ATOMIC_HELPER_ALL(fetch_xor)
-GEN_ATOMIC_HELPER_ALL(fetch_smin)
-GEN_ATOMIC_HELPER_ALL(fetch_umin)
-GEN_ATOMIC_HELPER_ALL(fetch_smax)
-GEN_ATOMIC_HELPER_ALL(fetch_umax)
-
-GEN_ATOMIC_HELPER_ALL(add_fetch)
-GEN_ATOMIC_HELPER_ALL(sub_fetch)
-GEN_ATOMIC_HELPER_ALL(and_fetch)
-GEN_ATOMIC_HELPER_ALL(or_fetch)
-GEN_ATOMIC_HELPER_ALL(xor_fetch)
-GEN_ATOMIC_HELPER_ALL(smin_fetch)
-GEN_ATOMIC_HELPER_ALL(umin_fetch)
-GEN_ATOMIC_HELPER_ALL(smax_fetch)
-GEN_ATOMIC_HELPER_ALL(umax_fetch)
-
-GEN_ATOMIC_HELPER_ALL(xchg)
-
-#undef GEN_ATOMIC_HELPER_ALL
-#undef GEN_ATOMIC_HELPER
-
-Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, abi_ptr addr,
- Int128 cmpv, Int128 newv,
- MemOpIdx oi, uintptr_t retaddr);
-Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, abi_ptr addr,
- Int128 cmpv, Int128 newv,
- MemOpIdx oi, uintptr_t retaddr);
-
-#if TARGET_BIG_ENDIAN
-# define cpu_lduw_data cpu_lduw_be_data
-# define cpu_ldsw_data cpu_ldsw_be_data
-# define cpu_ldl_data cpu_ldl_be_data
-# define cpu_ldq_data cpu_ldq_be_data
-# define cpu_lduw_data_ra cpu_lduw_be_data_ra
-# define cpu_ldsw_data_ra cpu_ldsw_be_data_ra
-# define cpu_ldl_data_ra cpu_ldl_be_data_ra
-# define cpu_ldq_data_ra cpu_ldq_be_data_ra
-# define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra
-# define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra
-# define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra
-# define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra
-# define cpu_stw_data cpu_stw_be_data
-# define cpu_stl_data cpu_stl_be_data
-# define cpu_stq_data cpu_stq_be_data
-# define cpu_stw_data_ra cpu_stw_be_data_ra
-# define cpu_stl_data_ra cpu_stl_be_data_ra
-# define cpu_stq_data_ra cpu_stq_be_data_ra
-# define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra
-# define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra
-# define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra
-#else
-# define cpu_lduw_data cpu_lduw_le_data
-# define cpu_ldsw_data cpu_ldsw_le_data
-# define cpu_ldl_data cpu_ldl_le_data
-# define cpu_ldq_data cpu_ldq_le_data
-# define cpu_lduw_data_ra cpu_lduw_le_data_ra
-# define cpu_ldsw_data_ra cpu_ldsw_le_data_ra
-# define cpu_ldl_data_ra cpu_ldl_le_data_ra
-# define cpu_ldq_data_ra cpu_ldq_le_data_ra
-# define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra
-# define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra
-# define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra
-# define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra
-# define cpu_stw_data cpu_stw_le_data
-# define cpu_stl_data cpu_stl_le_data
-# define cpu_stq_data cpu_stq_le_data
-# define cpu_stw_data_ra cpu_stw_le_data_ra
-# define cpu_stl_data_ra cpu_stl_le_data_ra
-# define cpu_stq_data_ra cpu_stq_le_data_ra
-# define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra
-# define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra
-# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
-#endif
-
-uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra);
-uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra);
-uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra);
-uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra);
-
-uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
-uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
-uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
-uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
-
-/**
- * tlb_vaddr_to_host:
- * @env: CPUArchState
- * @addr: guest virtual address to look up
- * @access_type: 0 for read, 1 for write, 2 for execute
- * @mmu_idx: MMU index to use for lookup
- *
- * Look up the specified guest virtual index in the TCG softmmu TLB.
- * If we can translate a host virtual address suitable for direct RAM
- * access, without causing a guest exception, then return it.
- * Otherwise (TLB entry is for an I/O access, guest software
- * TLB fill required, etc) return NULL.
- */
-#ifdef CONFIG_USER_ONLY
-static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
- MMUAccessType access_type, int mmu_idx)
-{
- return g2h(env_cpu(env), addr);
-}
-#else
-void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
- MMUAccessType access_type, int mmu_idx);
-#endif
-
-#endif /* CPU_LDST_H */
diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h
index ef18642..03ed7e2 100644
--- a/include/exec/cputlb.h
+++ b/include/exec/cputlb.h
@@ -21,15 +21,266 @@
#define CPUTLB_H
#include "exec/cpu-common.h"
+#include "exec/hwaddr.h"
+#include "exec/memattrs.h"
+#include "exec/vaddr.h"
-#ifdef CONFIG_TCG
-
-#if !defined(CONFIG_USER_ONLY)
-/* cputlb.c */
+#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
void tlb_protect_code(ram_addr_t ram_addr);
void tlb_unprotect_code(ram_addr_t ram_addr);
#endif
-#endif /* CONFIG_TCG */
-
+#ifndef CONFIG_USER_ONLY
+void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length);
+void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
#endif
+
+/**
+ * tlb_set_page_full:
+ * @cpu: CPU context
+ * @mmu_idx: mmu index of the tlb to modify
+ * @addr: virtual address of the entry to add
+ * @full: the details of the tlb entry
+ *
+ * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
+ * @full must be filled, except for xlat_section, and constitute
+ * the complete description of the translated page.
+ *
+ * This is generally called by the target tlb_fill function after
+ * having performed a successful page table walk to find the physical
+ * address and attributes for the translation.
+ *
+ * At most one entry for a given virtual address is permitted. Only a
+ * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
+ * used by tlb_flush_page.
+ */
+void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
+ CPUTLBEntryFull *full);
+
+/**
+ * tlb_set_page_with_attrs:
+ * @cpu: CPU to add this TLB entry for
+ * @addr: virtual address of page to add entry for
+ * @paddr: physical address of the page
+ * @attrs: memory transaction attributes
+ * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
+ * @mmu_idx: MMU index to insert TLB entry for
+ * @size: size of the page in bytes
+ *
+ * Add an entry to this CPU's TLB (a mapping from virtual address
+ * @addr to physical address @paddr) with the specified memory
+ * transaction attributes. This is generally called by the target CPU
+ * specific code after it has been called through the tlb_fill()
+ * entry point and performed a successful page table walk to find
+ * the physical address and attributes for the virtual address
+ * which provoked the TLB miss.
+ *
+ * At most one entry for a given virtual address is permitted. Only a
+ * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
+ * used by tlb_flush_page.
+ */
+void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
+ hwaddr paddr, MemTxAttrs attrs,
+ int prot, int mmu_idx, vaddr size);
+
+/**
+ * tlb_set_page:
+ *
+ * This function is equivalent to calling tlb_set_page_with_attrs()
+ * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
+ * as a convenience for CPUs which don't use memory transaction attributes.
+ */
+void tlb_set_page(CPUState *cpu, vaddr addr,
+ hwaddr paddr, int prot,
+ int mmu_idx, vaddr size);
+
+#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
+/**
+ * tlb_flush_page:
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ *
+ * Flush one page from the TLB of the specified CPU, for all
+ * MMU indexes.
+ */
+void tlb_flush_page(CPUState *cpu, vaddr addr);
+
+/**
+ * tlb_flush_page_all_cpus_synced:
+ * @cpu: src CPU of the flush
+ * @addr: virtual address of page to be flushed
+ *
+ * Flush one page from the TLB of all CPUs, for all
+ * MMU indexes.
+ *
+ * When this function returns, no CPUs will subsequently perform
+ * translations using the flushed TLBs.
+ */
+void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
+
+/**
+ * tlb_flush:
+ * @cpu: CPU whose TLB should be flushed
+ *
+ * Flush the entire TLB for the specified CPU. Most CPU architectures
+ * allow the implementation to drop entries from the TLB at any time
+ * so this is generally safe. If more selective flushing is required
+ * use one of the other functions for efficiency.
+ */
+void tlb_flush(CPUState *cpu);
+
+/**
+ * tlb_flush_all_cpus_synced:
+ * @cpu: src CPU of the flush
+ *
+ * Flush the entire TLB for all CPUs, for all MMU indexes.
+ *
+ * When this function returns, no CPUs will subsequently perform
+ * translations using the flushed TLBs.
+ */
+void tlb_flush_all_cpus_synced(CPUState *src_cpu);
+
+/**
+ * tlb_flush_page_by_mmuidx:
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush one page from the TLB of the specified CPU, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
+ uint16_t idxmap);
+
+/**
+ * tlb_flush_page_by_mmuidx_all_cpus_synced:
+ * @cpu: Originating CPU of the flush
+ * @addr: virtual address of page to be flushed
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush one page from the TLB of all CPUs, for the specified
+ * MMU indexes.
+ *
+ * When this function returns, no CPUs will subsequently perform
+ * translations using the flushed TLBs.
+ */
+void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
+ uint16_t idxmap);
+
+/**
+ * tlb_flush_by_mmuidx:
+ * @cpu: CPU whose TLB should be flushed
+ * @wait: If true ensure synchronisation by exiting the cpu_loop
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush all entries from the TLB of the specified CPU, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
+
+/**
+ * tlb_flush_by_mmuidx_all_cpus_synced:
+ * @cpu: Originating CPU of the flush
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush all entries from the TLB of all CPUs, for the specified
+ * MMU indexes.
+ *
+ * When this function returns, no CPUs will subsequently perform
+ * translations using the flushed TLBs.
+ */
+void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
+
+/**
+ * tlb_flush_page_bits_by_mmuidx
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ * @idxmap: bitmap of mmu indexes to flush
+ * @bits: number of significant bits in address
+ *
+ * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
+ */
+void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
+ uint16_t idxmap, unsigned bits);
+
+/* Similarly, with broadcast and syncing. */
+void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
+ uint16_t idxmap,
+ unsigned bits);
+
+/**
+ * tlb_flush_range_by_mmuidx
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of the start of the range to be flushed
+ * @len: length of range to be flushed
+ * @idxmap: bitmap of mmu indexes to flush
+ * @bits: number of significant bits in address
+ *
+ * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
+ * comparing only the low @bits worth of each virtual page.
+ */
+void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
+ unsigned bits);
+
+/* Similarly, with broadcast and syncing. */
+void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ vaddr addr,
+ vaddr len,
+ uint16_t idxmap,
+ unsigned bits);
+#else
+static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
+{
+}
+static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
+{
+}
+static inline void tlb_flush(CPUState *cpu)
+{
+}
+static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
+{
+}
+static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
+ vaddr addr, uint16_t idxmap)
+{
+}
+
+static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
+{
+}
+static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ vaddr addr,
+ uint16_t idxmap)
+{
+}
+static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ uint16_t idxmap)
+{
+}
+static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
+ vaddr addr,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void
+tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
+ uint16_t idxmap, unsigned bits)
+{
+}
+static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ vaddr addr,
+ vaddr len,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
+#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
+#endif /* CPUTLB_H */
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
deleted file mode 100644
index b6b46ad..0000000
--- a/include/exec/exec-all.h
+++ /dev/null
@@ -1,599 +0,0 @@
-/*
- * internal execution defines for qemu
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef EXEC_ALL_H
-#define EXEC_ALL_H
-
-#include "cpu.h"
-#if defined(CONFIG_USER_ONLY)
-#include "exec/abi_ptr.h"
-#include "exec/cpu_ldst.h"
-#endif
-#include "exec/mmu-access-type.h"
-#include "exec/translation-block.h"
-#include "qemu/clang-tsa.h"
-
-/**
- * cpu_loop_exit_requested:
- * @cpu: The CPU state to be tested
- *
- * Indicate if somebody asked for a return of the CPU to the main loop
- * (e.g., via cpu_exit() or cpu_interrupt()).
- *
- * This is helpful for architectures that support interruptible
- * instructions. After writing back all state to registers/memory, this
- * call can be used to check if it makes sense to return to the main loop
- * or to continue executing the interruptible instruction.
- */
-static inline bool cpu_loop_exit_requested(CPUState *cpu)
-{
- return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0;
-}
-
-#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
-/* cputlb.c */
-/**
- * tlb_init - initialize a CPU's TLB
- * @cpu: CPU whose TLB should be initialized
- */
-void tlb_init(CPUState *cpu);
-/**
- * tlb_destroy - destroy a CPU's TLB
- * @cpu: CPU whose TLB should be destroyed
- */
-void tlb_destroy(CPUState *cpu);
-/**
- * tlb_flush_page:
- * @cpu: CPU whose TLB should be flushed
- * @addr: virtual address of page to be flushed
- *
- * Flush one page from the TLB of the specified CPU, for all
- * MMU indexes.
- */
-void tlb_flush_page(CPUState *cpu, vaddr addr);
-/**
- * tlb_flush_page_all_cpus_synced:
- * @cpu: src CPU of the flush
- * @addr: virtual address of page to be flushed
- *
- * Flush one page from the TLB of all CPUs, for all
- * MMU indexes.
- *
- * When this function returns, no CPUs will subsequently perform
- * translations using the flushed TLBs.
- */
-void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
-/**
- * tlb_flush:
- * @cpu: CPU whose TLB should be flushed
- *
- * Flush the entire TLB for the specified CPU. Most CPU architectures
- * allow the implementation to drop entries from the TLB at any time
- * so this is generally safe. If more selective flushing is required
- * use one of the other functions for efficiency.
- */
-void tlb_flush(CPUState *cpu);
-/**
- * tlb_flush_all_cpus_synced:
- * @cpu: src CPU of the flush
- *
- * Flush the entire TLB for all CPUs, for all MMU indexes.
- *
- * When this function returns, no CPUs will subsequently perform
- * translations using the flushed TLBs.
- */
-void tlb_flush_all_cpus_synced(CPUState *src_cpu);
-/**
- * tlb_flush_page_by_mmuidx:
- * @cpu: CPU whose TLB should be flushed
- * @addr: virtual address of page to be flushed
- * @idxmap: bitmap of MMU indexes to flush
- *
- * Flush one page from the TLB of the specified CPU, for the specified
- * MMU indexes.
- */
-void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
- uint16_t idxmap);
-/**
- * tlb_flush_page_by_mmuidx_all_cpus_synced:
- * @cpu: Originating CPU of the flush
- * @addr: virtual address of page to be flushed
- * @idxmap: bitmap of MMU indexes to flush
- *
- * Flush one page from the TLB of all CPUs, for the specified
- * MMU indexes.
- *
- * When this function returns, no CPUs will subsequently perform
- * translations using the flushed TLBs.
- */
-void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
- uint16_t idxmap);
-/**
- * tlb_flush_by_mmuidx:
- * @cpu: CPU whose TLB should be flushed
- * @wait: If true ensure synchronisation by exiting the cpu_loop
- * @idxmap: bitmap of MMU indexes to flush
- *
- * Flush all entries from the TLB of the specified CPU, for the specified
- * MMU indexes.
- */
-void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
-/**
- * tlb_flush_by_mmuidx_all_cpus_synced:
- * @cpu: Originating CPU of the flush
- * @idxmap: bitmap of MMU indexes to flush
- *
- * Flush all entries from the TLB of all CPUs, for the specified
- * MMU indexes.
- *
- * When this function returns, no CPUs will subsequently perform
- * translations using the flushed TLBs.
- */
-void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
-
-/**
- * tlb_flush_page_bits_by_mmuidx
- * @cpu: CPU whose TLB should be flushed
- * @addr: virtual address of page to be flushed
- * @idxmap: bitmap of mmu indexes to flush
- * @bits: number of significant bits in address
- *
- * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
- */
-void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
- uint16_t idxmap, unsigned bits);
-
-/* Similarly, with broadcast and syncing. */
-void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
- (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
-
-/**
- * tlb_flush_range_by_mmuidx
- * @cpu: CPU whose TLB should be flushed
- * @addr: virtual address of the start of the range to be flushed
- * @len: length of range to be flushed
- * @idxmap: bitmap of mmu indexes to flush
- * @bits: number of significant bits in address
- *
- * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
- * comparing only the low @bits worth of each virtual page.
- */
-void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
- vaddr len, uint16_t idxmap,
- unsigned bits);
-
-/* Similarly, with broadcast and syncing. */
-void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
- vaddr addr,
- vaddr len,
- uint16_t idxmap,
- unsigned bits);
-
-/**
- * tlb_set_page_full:
- * @cpu: CPU context
- * @mmu_idx: mmu index of the tlb to modify
- * @addr: virtual address of the entry to add
- * @full: the details of the tlb entry
- *
- * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
- * @full must be filled, except for xlat_section, and constitute
- * the complete description of the translated page.
- *
- * This is generally called by the target tlb_fill function after
- * having performed a successful page table walk to find the physical
- * address and attributes for the translation.
- *
- * At most one entry for a given virtual address is permitted. Only a
- * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
- * used by tlb_flush_page.
- */
-void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
- CPUTLBEntryFull *full);
-
-/**
- * tlb_set_page_with_attrs:
- * @cpu: CPU to add this TLB entry for
- * @addr: virtual address of page to add entry for
- * @paddr: physical address of the page
- * @attrs: memory transaction attributes
- * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
- * @mmu_idx: MMU index to insert TLB entry for
- * @size: size of the page in bytes
- *
- * Add an entry to this CPU's TLB (a mapping from virtual address
- * @addr to physical address @paddr) with the specified memory
- * transaction attributes. This is generally called by the target CPU
- * specific code after it has been called through the tlb_fill()
- * entry point and performed a successful page table walk to find
- * the physical address and attributes for the virtual address
- * which provoked the TLB miss.
- *
- * At most one entry for a given virtual address is permitted. Only a
- * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
- * used by tlb_flush_page.
- */
-void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
- hwaddr paddr, MemTxAttrs attrs,
- int prot, int mmu_idx, vaddr size);
-/* tlb_set_page:
- *
- * This function is equivalent to calling tlb_set_page_with_attrs()
- * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
- * as a convenience for CPUs which don't use memory transaction attributes.
- */
-void tlb_set_page(CPUState *cpu, vaddr addr,
- hwaddr paddr, int prot,
- int mmu_idx, vaddr size);
-#else
-static inline void tlb_init(CPUState *cpu)
-{
-}
-static inline void tlb_destroy(CPUState *cpu)
-{
-}
-static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
-{
-}
-static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
-{
-}
-static inline void tlb_flush(CPUState *cpu)
-{
-}
-static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
-{
-}
-static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
- vaddr addr, uint16_t idxmap)
-{
-}
-
-static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
-{
-}
-static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
- vaddr addr,
- uint16_t idxmap)
-{
-}
-static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
- uint16_t idxmap)
-{
-}
-static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
- vaddr addr,
- uint16_t idxmap,
- unsigned bits)
-{
-}
-static inline void
-tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
- uint16_t idxmap, unsigned bits)
-{
-}
-static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
- vaddr len, uint16_t idxmap,
- unsigned bits)
-{
-}
-static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
- vaddr addr,
- vaddr len,
- uint16_t idxmap,
- unsigned bits)
-{
-}
-#endif
-/**
- * probe_access:
- * @env: CPUArchState
- * @addr: guest virtual address to look up
- * @size: size of the access
- * @access_type: read, write or execute permission
- * @mmu_idx: MMU index to use for lookup
- * @retaddr: return address for unwinding
- *
- * Look up the guest virtual address @addr. Raise an exception if the
- * page does not satisfy @access_type. Raise an exception if the
- * access (@addr, @size) hits a watchpoint. For writes, mark a clean
- * page as dirty.
- *
- * Finally, return the host address for a page that is backed by RAM,
- * or NULL if the page requires I/O.
- */
-void *probe_access(CPUArchState *env, vaddr addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
-
-static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
- int mmu_idx, uintptr_t retaddr)
-{
- return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
-}
-
-static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
- int mmu_idx, uintptr_t retaddr)
-{
- return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
-}
-
-/**
- * probe_access_flags:
- * @env: CPUArchState
- * @addr: guest virtual address to look up
- * @size: size of the access
- * @access_type: read, write or execute permission
- * @mmu_idx: MMU index to use for lookup
- * @nonfault: suppress the fault
- * @phost: return value for host address
- * @retaddr: return address for unwinding
- *
- * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
- * the page, and storing the host address for RAM in @phost.
- *
- * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
- * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
- * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
- * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
- */
-int probe_access_flags(CPUArchState *env, vaddr addr, int size,
- MMUAccessType access_type, int mmu_idx,
- bool nonfault, void **phost, uintptr_t retaddr);
-
-#ifndef CONFIG_USER_ONLY
-/**
- * probe_access_full:
- * Like probe_access_flags, except also return into @pfull.
- *
- * The CPUTLBEntryFull structure returned via @pfull is transient
- * and must be consumed or copied immediately, before any further
- * access or changes to TLB @mmu_idx.
- */
-int probe_access_full(CPUArchState *env, vaddr addr, int size,
- MMUAccessType access_type, int mmu_idx,
- bool nonfault, void **phost,
- CPUTLBEntryFull **pfull, uintptr_t retaddr);
-
-/**
- * probe_access_mmu() - Like probe_access_full except cannot fault and
- * doesn't trigger instrumentation.
- *
- * @env: CPUArchState
- * @vaddr: virtual address to probe
- * @size: size of the probe
- * @access_type: read, write or execute permission
- * @mmu_idx: softmmu index
- * @phost: ptr to return value host address or NULL
- * @pfull: ptr to return value CPUTLBEntryFull structure or NULL
- *
- * The CPUTLBEntryFull structure returned via @pfull is transient
- * and must be consumed or copied immediately, before any further
- * access or changes to TLB @mmu_idx.
- *
- * Returns: TLB flags as per probe_access_flags()
- */
-int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
- MMUAccessType access_type, int mmu_idx,
- void **phost, CPUTLBEntryFull **pfull);
-
-#endif
-
-static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
-{
-#ifdef CONFIG_USER_ONLY
- return tb->itree.start;
-#else
- return tb->page_addr[0];
-#endif
-}
-
-static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
-{
-#ifdef CONFIG_USER_ONLY
- tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
- return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
-#else
- return tb->page_addr[1];
-#endif
-}
-
-static inline void tb_set_page_addr0(TranslationBlock *tb,
- tb_page_addr_t addr)
-{
-#ifdef CONFIG_USER_ONLY
- tb->itree.start = addr;
- /*
- * To begin, we record an interval of one byte. When the translation
- * loop encounters a second page, the interval will be extended to
- * include the first byte of the second page, which is sufficient to
- * allow tb_page_addr1() above to work properly. The final corrected
- * interval will be set by tb_page_add() from tb->size before the
- * node is added to the interval tree.
- */
- tb->itree.last = addr;
-#else
- tb->page_addr[0] = addr;
-#endif
-}
-
-static inline void tb_set_page_addr1(TranslationBlock *tb,
- tb_page_addr_t addr)
-{
-#ifdef CONFIG_USER_ONLY
- /* Extend the interval to the first byte of the second page. See above. */
- tb->itree.last = addr;
-#else
- tb->page_addr[1] = addr;
-#endif
-}
-
-/* TranslationBlock invalidate API */
-void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
-void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
-
-/* GETPC is the true target of the return instruction that we'll execute. */
-#if defined(CONFIG_TCG_INTERPRETER)
-extern __thread uintptr_t tci_tb_ptr;
-# define GETPC() tci_tb_ptr
-#else
-# define GETPC() \
- ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
-#endif
-
-/* The true return address will often point to a host insn that is part of
- the next translated guest insn. Adjust the address backward to point to
- the middle of the call insn. Subtracting one would do the job except for
- several compressed mode architectures (arm, mips) which set the low bit
- to indicate the compressed mode; subtracting two works around that. It
- is also the case that there are no host isas that contain a call insn
- smaller than 4 bytes, so we don't worry about special-casing this. */
-#define GETPC_ADJ 2
-
-#if !defined(CONFIG_USER_ONLY)
-
-/**
- * iotlb_to_section:
- * @cpu: CPU performing the access
- * @index: TCG CPU IOTLB entry
- *
- * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
- * it refers to. @index will have been initially created and returned
- * by memory_region_section_get_iotlb().
- */
-struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
- hwaddr index, MemTxAttrs attrs);
-#endif
-
-/**
- * get_page_addr_code_hostp()
- * @env: CPUArchState
- * @addr: guest virtual address of guest code
- *
- * See get_page_addr_code() (full-system version) for documentation on the
- * return value.
- *
- * Sets *@hostp (when @hostp is non-NULL) as follows.
- * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
- * to the host address where @addr's content is kept.
- *
- * Note: this function can trigger an exception.
- */
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
- void **hostp);
-
-/**
- * get_page_addr_code()
- * @env: CPUArchState
- * @addr: guest virtual address of guest code
- *
- * If we cannot translate and execute from the entire RAM page, or if
- * the region is not backed by RAM, returns -1. Otherwise, returns the
- * ram_addr_t corresponding to the guest code at @addr.
- *
- * Note: this function can trigger an exception.
- */
-static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
- vaddr addr)
-{
- return get_page_addr_code_hostp(env, addr, NULL);
-}
-
-#if defined(CONFIG_USER_ONLY)
-void TSA_NO_TSA mmap_lock(void);
-void TSA_NO_TSA mmap_unlock(void);
-bool have_mmap_lock(void);
-
-static inline void mmap_unlock_guard(void *unused)
-{
- mmap_unlock();
-}
-
-#define WITH_MMAP_LOCK_GUARD() \
- for (int _mmap_lock_iter __attribute__((cleanup(mmap_unlock_guard))) \
- = (mmap_lock(), 0); _mmap_lock_iter == 0; _mmap_lock_iter = 1)
-
-/**
- * adjust_signal_pc:
- * @pc: raw pc from the host signal ucontext_t.
- * @is_write: host memory operation was write, or read-modify-write.
- *
- * Alter @pc as required for unwinding. Return the type of the
- * guest memory access -- host reads may be for guest execution.
- */
-MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
-
-/**
- * handle_sigsegv_accerr_write:
- * @cpu: the cpu context
- * @old_set: the sigset_t from the signal ucontext_t
- * @host_pc: the host pc, adjusted for the signal
- * @host_addr: the host address of the fault
- *
- * Return true if the write fault has been handled, and should be re-tried.
- */
-bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
- uintptr_t host_pc, abi_ptr guest_addr);
-
-/**
- * cpu_loop_exit_sigsegv:
- * @cpu: the cpu context
- * @addr: the guest address of the fault
- * @access_type: access was read/write/execute
- * @maperr: true for invalid page, false for permission fault
- * @ra: host pc for unwinding
- *
- * Use the TCGCPUOps hook to record cpu state, do guest operating system
- * specific things to raise SIGSEGV, and jump to the main cpu loop.
- */
-G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
- MMUAccessType access_type,
- bool maperr, uintptr_t ra);
-
-/**
- * cpu_loop_exit_sigbus:
- * @cpu: the cpu context
- * @addr: the guest address of the alignment fault
- * @access_type: access was read/write/execute
- * @ra: host pc for unwinding
- *
- * Use the TCGCPUOps hook to record cpu state, do guest operating system
- * specific things to raise SIGBUS, and jump to the main cpu loop.
- */
-G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
- MMUAccessType access_type,
- uintptr_t ra);
-
-#else
-static inline void mmap_lock(void) {}
-static inline void mmap_unlock(void) {}
-#define WITH_MMAP_LOCK_GUARD()
-
-void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
-void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
-
-MemoryRegionSection *
-address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
- hwaddr *xlat, hwaddr *plen,
- MemTxAttrs attrs, int *prot);
-hwaddr memory_region_section_get_iotlb(CPUState *cpu,
- MemoryRegionSection *section);
-#endif
-
-#endif
diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h
index 1bd2c4e..0675b0b 100644
--- a/include/exec/gdbstub.h
+++ b/include/exec/gdbstub.h
@@ -41,14 +41,26 @@ void gdb_register_coprocessor(CPUState *cpu,
const GDBFeature *feature, int g_pos);
/**
+ * gdb_unregister_coprocessor_all() - unregisters supplemental set of registers
+ * @cpu - the CPU associated with registers
+ */
+void gdb_unregister_coprocessor_all(CPUState *cpu);
+
+/**
* gdbserver_start: start the gdb server
* @port_or_device: connection spec for gdb
+ * @errp: error handle
*
* For CONFIG_USER this is either a tcp port or a path to a fifo. For
* system emulation you can use a full chardev spec for your gdbserver
* port.
+ *
+ * The error handle should be either &error_fatal (for start-up) or
+ * &error_warn (for QMP/HMP initiated sessions).
+ *
+ * Returns true when server successfully started.
*/
-int gdbserver_start(const char *port_or_device);
+bool gdbserver_start(const char *port_or_device, Error **errp);
/**
* gdb_feature_builder_init() - Initialize GDBFeatureBuilder.
diff --git a/include/exec/helper-head.h.inc b/include/exec/helper-head.h.inc
index 5ef467a..5b248fd 100644
--- a/include/exec/helper-head.h.inc
+++ b/include/exec/helper-head.h.inc
@@ -23,6 +23,7 @@
#define dh_alias_ptr ptr
#define dh_alias_cptr ptr
#define dh_alias_env ptr
+#define dh_alias_fpst ptr
#define dh_alias_void void
#define dh_alias_noreturn noreturn
#define dh_alias(t) glue(dh_alias_, t)
@@ -39,6 +40,7 @@
#define dh_ctype_ptr void *
#define dh_ctype_cptr const void *
#define dh_ctype_env CPUArchState *
+#define dh_ctype_fpst float_status *
#define dh_ctype_void void
#define dh_ctype_noreturn G_NORETURN void
#define dh_ctype(t) dh_ctype_##t
@@ -56,6 +58,17 @@
# define dh_ctype_tl target_ulong
#endif /* COMPILING_PER_TARGET */
+#if __SIZEOF_POINTER__ == 4
+# define dh_alias_vaddr i32
+# define dh_typecode_vaddr dh_typecode_i32
+#elif __SIZEOF_POINTER__ == 8
+# define dh_alias_vaddr i64
+# define dh_typecode_vaddr dh_typecode_i64
+#else
+# error "sizeof pointer is different from {4,8}"
+#endif /* __SIZEOF_POINTER__ */
+# define dh_ctype_vaddr uintptr_t
+
/* We can't use glue() here because it falls foul of C preprocessor
recursive expansion rules. */
#define dh_retvar_decl0_void void
@@ -96,6 +109,7 @@
#define dh_typecode_f64 dh_typecode_i64
#define dh_typecode_cptr dh_typecode_ptr
#define dh_typecode_env dh_typecode_ptr
+#define dh_typecode_fpst dh_typecode_ptr
#define dh_typecode(t) dh_typecode_##t
#define dh_callflag_i32 0
diff --git a/include/exec/helper-proto-common.h b/include/exec/helper-proto-common.h
index 16782ef..76e6c25 100644
--- a/include/exec/helper-proto-common.h
+++ b/include/exec/helper-proto-common.h
@@ -13,4 +13,6 @@
#include "exec/helper-proto.h.inc"
#undef HELPER_H
+#include "accel/tcg/getpc.h"
+
#endif /* HELPER_PROTO_COMMON_H */
diff --git a/include/exec/icount.h b/include/exec/icount.h
new file mode 100644
index 0000000..7a26b40
--- /dev/null
+++ b/include/exec/icount.h
@@ -0,0 +1,76 @@
+/*
+ * icount - Instruction Counter API
+ * CPU timers state API
+ *
+ * Copyright 2020 SUSE LLC
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef EXEC_ICOUNT_H
+#define EXEC_ICOUNT_H
+
+/**
+ * ICountMode: icount enablement state:
+ *
+ * @ICOUNT_DISABLED: Disabled - Do not count executed instructions.
+ * @ICOUNT_PRECISE: Enabled - Fixed conversion of insn to ns via "shift" option
+ * @ICOUNT_ADAPTATIVE: Enabled - Runtime adaptive algorithm to compute shift
+ */
+typedef enum {
+ ICOUNT_DISABLED = 0,
+ ICOUNT_PRECISE,
+ ICOUNT_ADAPTATIVE,
+} ICountMode;
+
+#ifdef CONFIG_TCG
+extern ICountMode use_icount;
+#define icount_enabled() (use_icount)
+#else
+#define icount_enabled() ICOUNT_DISABLED
+#endif
+
+/* Protect the CONFIG_USER_ONLY test vs poisoning. */
+#if defined(COMPILING_PER_TARGET) || defined(COMPILING_SYSTEM_VS_USER)
+# ifdef CONFIG_USER_ONLY
+# undef icount_enabled
+# define icount_enabled() ICOUNT_DISABLED
+# endif
+#endif
+
+/*
+ * Update the icount with the executed instructions. Called by
+ * cpus-tcg vCPU thread so the main-loop can see time has moved forward.
+ */
+void icount_update(CPUState *cpu);
+
+/* get raw icount value */
+int64_t icount_get_raw(void);
+
+/* return the virtual CPU time in ns, based on the instruction counter. */
+int64_t icount_get(void);
+/*
+ * convert an instruction counter value to ns, based on the icount shift.
+ * This shift is set as a fixed value with the icount "shift" option
+ * (precise mode), or it is constantly approximated and corrected at
+ * runtime in adaptive mode.
+ */
+int64_t icount_to_ns(int64_t icount);
+
+/**
+ * icount_configure: configure the icount options, including "shift"
+ * @opts: Options to parse
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Return: true on success, else false setting @errp with error
+ */
+bool icount_configure(QemuOpts *opts, Error **errp);
+
+/* used by tcg vcpu thread to calc icount budget */
+int64_t icount_round(int64_t count);
+
+/* if the CPUs are idle, start accounting real time to virtual clock. */
+void icount_start_warp_timer(void);
+void icount_account_warp_timer(void);
+void icount_notify_exit(void);
+
+#endif /* EXEC_ICOUNT_H */
diff --git a/include/exec/ioport.h b/include/exec/ioport.h
deleted file mode 100644
index 4397f12..0000000
--- a/include/exec/ioport.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * defines ioport related functions
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/**************************************************************************
- * IO ports API
- */
-
-#ifndef IOPORT_H
-#define IOPORT_H
-
-#include "exec/memory.h"
-
-#define MAX_IOPORTS (64 * 1024)
-#define IOPORTS_MASK (MAX_IOPORTS - 1)
-
-typedef struct MemoryRegionPortio {
- uint32_t offset;
- uint32_t len;
- unsigned size;
- uint32_t (*read)(void *opaque, uint32_t address);
- void (*write)(void *opaque, uint32_t address, uint32_t data);
-} MemoryRegionPortio;
-
-#define PORTIO_END_OF_LIST() { }
-
-#ifndef CONFIG_USER_ONLY
-extern const MemoryRegionOps unassigned_io_ops;
-#endif
-
-void cpu_outb(uint32_t addr, uint8_t val);
-void cpu_outw(uint32_t addr, uint16_t val);
-void cpu_outl(uint32_t addr, uint32_t val);
-uint8_t cpu_inb(uint32_t addr);
-uint16_t cpu_inw(uint32_t addr);
-uint32_t cpu_inl(uint32_t addr);
-
-typedef struct PortioList {
- const struct MemoryRegionPortio *ports;
- Object *owner;
- struct MemoryRegion *address_space;
- uint32_t addr;
- unsigned nr;
- struct MemoryRegion **regions;
- void *opaque;
- const char *name;
- bool flush_coalesced_mmio;
-} PortioList;
-
-void portio_list_init(PortioList *piolist, Object *owner,
- const struct MemoryRegionPortio *callbacks,
- void *opaque, const char *name);
-void portio_list_set_flush_coalesced(PortioList *piolist);
-void portio_list_destroy(PortioList *piolist);
-void portio_list_add(PortioList *piolist,
- struct MemoryRegion *address_space,
- uint32_t addr);
-void portio_list_del(PortioList *piolist);
-void portio_list_set_enabled(PortioList *piolist, bool enabled);
-void portio_list_set_address(PortioList *piolist, uint32_t addr);
-
-#endif /* IOPORT_H */
diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
index 14cdd8d..8db1d30 100644
--- a/include/exec/memattrs.h
+++ b/include/exec/memattrs.h
@@ -23,12 +23,6 @@
* different semantics.
*/
typedef struct MemTxAttrs {
- /* Bus masters which don't specify any attributes will get this
- * (via the MEMTXATTRS_UNSPECIFIED constant), so that we can
- * distinguish "all attributes deliberately clear" from
- * "didn't specify" if necessary.
- */
- unsigned int unspecified:1;
/*
* ARM/AMBA: TrustZone Secure access
* x86: System Management Mode access
@@ -50,16 +44,37 @@ typedef struct MemTxAttrs {
* (see MEMTX_ACCESS_ERROR).
*/
unsigned int memory:1;
+ /* Debug access that can even write to ROM. */
+ unsigned int debug:1;
/* Requester ID (for MSI for example) */
unsigned int requester_id:16;
+
+ /*
+ * PID (PCI PASID) support: Limited to 8 bits process identifier.
+ */
+ unsigned int pid:8;
+
+ /*
+ * Bus masters which don't specify any attributes will get this
+ * (via the MEMTXATTRS_UNSPECIFIED constant), so that we can
+ * distinguish "all attributes deliberately clear" from
+ * "didn't specify" if necessary. "debug" can be set alongside
+ * "unspecified".
+ */
+ bool unspecified;
+
+ uint8_t _reserved1;
+ uint16_t _reserved2;
} MemTxAttrs;
+QEMU_BUILD_BUG_ON(sizeof(MemTxAttrs) > 8);
+
/* Bus masters which don't specify any attributes will get this,
* which has all attribute bits clear except the topmost one
* (so that we can distinguish "all attributes deliberately clear"
* from "didn't specify" if necessary).
*/
-#define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = 1 })
+#define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = true })
/* New-style MMIO accessors can indicate that the transaction failed.
* A zero (MEMTX_OK) response means success; anything else is a failure
diff --git a/include/exec/memop.h b/include/exec/memop.h
index f881fe7..cf7da33 100644
--- a/include/exec/memop.h
+++ b/include/exec/memop.h
@@ -91,8 +91,12 @@ typedef enum MemOp {
* Depending on alignment, one or both will be single-copy atomic.
* This is the atomicity e.g. of Arm FEAT_LSE2 LDP.
* MO_ATOM_SUBALIGN: the operation is single-copy atomic by parts
- * by the alignment. E.g. if the address is 0 mod 4, then each
- * 4-byte subobject is single-copy atomic.
+ * by the alignment. E.g. if an 8-byte value is accessed at an
+ * address which is 0 mod 8, then the whole 8-byte access is
+ * single-copy atomic; otherwise, if it is accessed at 0 mod 4
+ * then each 4-byte subobject is single-copy atomic; otherwise
+ * if it is accessed at 0 mod 2 then the four 2-byte subobjects
+ * are single-copy atomic.
* This is the atomicity e.g. of IBM Power.
* MO_ATOM_NONE: the operation has no atomicity requirements.
*
@@ -158,16 +162,57 @@ static inline unsigned memop_size(MemOp op)
static inline MemOp size_memop(unsigned size)
{
#ifdef CONFIG_DEBUG_TCG
- /* Power of 2 up to 8. */
- assert((size & (size - 1)) == 0 && size >= 1 && size <= 8);
+ /* Power of 2 up to 1024 */
+ assert(is_power_of_2(size) && size >= 1 && size <= (1 << MO_SIZE));
#endif
return (MemOp)ctz32(size);
}
-/* Big endianness from MemOp. */
-static inline bool memop_big_endian(MemOp op)
+/**
+ * memop_alignment_bits:
+ * @memop: MemOp value
+ *
+ * Extract the alignment size from the memop.
+ */
+static inline unsigned memop_alignment_bits(MemOp memop)
+{
+ unsigned a = memop & MO_AMASK;
+
+ if (a == MO_UNALN) {
+ /* No alignment required. */
+ a = 0;
+ } else if (a == MO_ALIGN) {
+ /* A natural alignment requirement. */
+ a = memop & MO_SIZE;
+ } else {
+ /* A specific alignment requirement. */
+ a = a >> MO_ASHIFT;
+ }
+ return a;
+}
+
+/*
+ * memop_atomicity_bits:
+ * @memop: MemOp value
+ *
+ * Extract the atomicity size from the memop.
+ */
+static inline unsigned memop_atomicity_bits(MemOp memop)
{
- return (op & MO_BSWAP) == MO_BE;
+ unsigned size = memop & MO_SIZE;
+
+ switch (memop & MO_ATOM_MASK) {
+ case MO_ATOM_NONE:
+ size = MO_8;
+ break;
+ case MO_ATOM_IFALIGN_PAIR:
+ case MO_ATOM_WITHIN16_PAIR:
+ size = size ? size - 1 : 0;
+ break;
+ default:
+ break;
+ }
+ return size;
}
#endif
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
deleted file mode 100644
index 100c123..0000000
--- a/include/exec/memory-internal.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Declarations for functions which are internal to the memory subsystem.
- *
- * Copyright 2011 Red Hat, Inc. and/or its affiliates
- *
- * Authors:
- * Avi Kivity <avi@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or
- * later. See the COPYING file in the top-level directory.
- *
- */
-
-/*
- * This header is for use by exec.c, memory.c and accel/tcg/cputlb.c ONLY,
- * for declarations which are shared between the memory subsystem's
- * internals and the TCG TLB code. Do not include it from elsewhere.
- */
-
-#ifndef MEMORY_INTERNAL_H
-#define MEMORY_INTERNAL_H
-
-#include "cpu.h"
-
-#ifndef CONFIG_USER_ONLY
-static inline AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
-{
- return fv->dispatch;
-}
-
-static inline AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
-{
- return flatview_to_dispatch(address_space_to_flatview(as));
-}
-
-FlatView *address_space_get_flatview(AddressSpace *as);
-void flatview_unref(FlatView *view);
-
-extern const MemoryRegionOps unassigned_mem_ops;
-
-void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section);
-AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv);
-void address_space_dispatch_compact(AddressSpaceDispatch *d);
-void address_space_dispatch_free(AddressSpaceDispatch *d);
-
-void mtree_print_dispatch(struct AddressSpaceDispatch *d,
- MemoryRegion *root);
-#endif
-#endif
diff --git a/include/exec/memory.h b/include/exec/memory.h
deleted file mode 100644
index 02f7528..0000000
--- a/include/exec/memory.h
+++ /dev/null
@@ -1,3177 +0,0 @@
-/*
- * Physical memory management API
- *
- * Copyright 2011 Red Hat, Inc. and/or its affiliates
- *
- * Authors:
- * Avi Kivity <avi@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- */
-
-#ifndef MEMORY_H
-#define MEMORY_H
-
-#ifndef CONFIG_USER_ONLY
-
-#include "exec/cpu-common.h"
-#include "exec/hwaddr.h"
-#include "exec/memattrs.h"
-#include "exec/memop.h"
-#include "exec/ramlist.h"
-#include "qemu/bswap.h"
-#include "qemu/queue.h"
-#include "qemu/int128.h"
-#include "qemu/range.h"
-#include "qemu/notify.h"
-#include "qom/object.h"
-#include "qemu/rcu.h"
-
-#define RAM_ADDR_INVALID (~(ram_addr_t)0)
-
-#define MAX_PHYS_ADDR_SPACE_BITS 62
-#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
-
-#define TYPE_MEMORY_REGION "memory-region"
-DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
- TYPE_MEMORY_REGION)
-
-#define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
-typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
-DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
- IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
-
-#define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager"
-typedef struct RamDiscardManagerClass RamDiscardManagerClass;
-typedef struct RamDiscardManager RamDiscardManager;
-DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
- RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
-
-#ifdef CONFIG_FUZZ
-void fuzz_dma_read_cb(size_t addr,
- size_t len,
- MemoryRegion *mr);
-#else
-static inline void fuzz_dma_read_cb(size_t addr,
- size_t len,
- MemoryRegion *mr)
-{
- /* Do Nothing */
-}
-#endif
-
-/* Possible bits for global_dirty_log_{start|stop} */
-
-/* Dirty tracking enabled because migration is running */
-#define GLOBAL_DIRTY_MIGRATION (1U << 0)
-
-/* Dirty tracking enabled because measuring dirty rate */
-#define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
-
-/* Dirty tracking enabled because dirty limit */
-#define GLOBAL_DIRTY_LIMIT (1U << 2)
-
-#define GLOBAL_DIRTY_MASK (0x7)
-
-extern unsigned int global_dirty_tracking;
-
-typedef struct MemoryRegionOps MemoryRegionOps;
-
-struct ReservedRegion {
- Range range;
- unsigned type;
-};
-
-/**
- * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
- *
- * @mr: the region, or %NULL if empty
- * @fv: the flat view of the address space the region is mapped in
- * @offset_within_region: the beginning of the section, relative to @mr's start
- * @size: the size of the section; will not exceed @mr's boundaries
- * @offset_within_address_space: the address of the first byte of the section
- * relative to the region's address space
- * @readonly: writes to this section are ignored
- * @nonvolatile: this section is non-volatile
- * @unmergeable: this section should not get merged with adjacent sections
- */
-struct MemoryRegionSection {
- Int128 size;
- MemoryRegion *mr;
- FlatView *fv;
- hwaddr offset_within_region;
- hwaddr offset_within_address_space;
- bool readonly;
- bool nonvolatile;
- bool unmergeable;
-};
-
-typedef struct IOMMUTLBEntry IOMMUTLBEntry;
-
-/* See address_space_translate: bit 0 is read, bit 1 is write. */
-typedef enum {
- IOMMU_NONE = 0,
- IOMMU_RO = 1,
- IOMMU_WO = 2,
- IOMMU_RW = 3,
-} IOMMUAccessFlags;
-
-#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
-
-struct IOMMUTLBEntry {
- AddressSpace *target_as;
- hwaddr iova;
- hwaddr translated_addr;
- hwaddr addr_mask; /* 0xfff = 4k translation */
- IOMMUAccessFlags perm;
-};
-
-/*
- * Bitmap for different IOMMUNotifier capabilities. Each notifier can
- * register with one or multiple IOMMU Notifier capability bit(s).
- *
- * Normally there're two use cases for the notifiers:
- *
- * (1) When the device needs accurate synchronizations of the vIOMMU page
- * tables, it needs to register with both MAP|UNMAP notifies (which
- * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
- *
- * Regarding to accurate synchronization, it's when the notified
- * device maintains a shadow page table and must be notified on each
- * guest MAP (page table entry creation) and UNMAP (invalidation)
- * events (e.g. VFIO). Both notifications must be accurate so that
- * the shadow page table is fully in sync with the guest view.
- *
- * (2) When the device doesn't need accurate synchronizations of the
- * vIOMMU page tables, it needs to register only with UNMAP or
- * DEVIOTLB_UNMAP notifies.
- *
- * It's when the device maintains a cache of IOMMU translations
- * (IOTLB) and is able to fill that cache by requesting translations
- * from the vIOMMU through a protocol similar to ATS (Address
- * Translation Service).
- *
- * Note that in this mode the vIOMMU will not maintain a shadowed
- * page table for the address space, and the UNMAP messages can cover
- * more than the pages that used to get mapped. The IOMMU notifiee
- * should be able to take care of over-sized invalidations.
- */
-typedef enum {
- IOMMU_NOTIFIER_NONE = 0,
- /* Notify cache invalidations */
- IOMMU_NOTIFIER_UNMAP = 0x1,
- /* Notify entry changes (newly created entries) */
- IOMMU_NOTIFIER_MAP = 0x2,
- /* Notify changes on device IOTLB entries */
- IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
-} IOMMUNotifierFlag;
-
-#define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
-#define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
-#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
- IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
-
-struct IOMMUNotifier;
-typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
- IOMMUTLBEntry *data);
-
-struct IOMMUNotifier {
- IOMMUNotify notify;
- IOMMUNotifierFlag notifier_flags;
- /* Notify for address space range start <= addr <= end */
- hwaddr start;
- hwaddr end;
- int iommu_idx;
- QLIST_ENTRY(IOMMUNotifier) node;
-};
-typedef struct IOMMUNotifier IOMMUNotifier;
-
-typedef struct IOMMUTLBEvent {
- IOMMUNotifierFlag type;
- IOMMUTLBEntry entry;
-} IOMMUTLBEvent;
-
-/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
-#define RAM_PREALLOC (1 << 0)
-
-/* RAM is mmap-ed with MAP_SHARED */
-#define RAM_SHARED (1 << 1)
-
-/* Only a portion of RAM (used_length) is actually used, and migrated.
- * Resizing RAM while migrating can result in the migration being canceled.
- */
-#define RAM_RESIZEABLE (1 << 2)
-
-/* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
- * zero the page and wake waiting processes.
- * (Set during postcopy)
- */
-#define RAM_UF_ZEROPAGE (1 << 3)
-
-/* RAM can be migrated */
-#define RAM_MIGRATABLE (1 << 4)
-
-/* RAM is a persistent kind memory */
-#define RAM_PMEM (1 << 5)
-
-
-/*
- * UFFDIO_WRITEPROTECT is used on this RAMBlock to
- * support 'write-tracking' migration type.
- * Implies ram_state->ram_wt_enabled.
- */
-#define RAM_UF_WRITEPROTECT (1 << 6)
-
-/*
- * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
- * pages if applicable) is skipped: will bail out if not supported. When not
- * set, the OS will do the reservation, if supported for the memory type.
- */
-#define RAM_NORESERVE (1 << 7)
-
-/* RAM that isn't accessible through normal means. */
-#define RAM_PROTECTED (1 << 8)
-
-/* RAM is an mmap-ed named file */
-#define RAM_NAMED_FILE (1 << 9)
-
-/* RAM is mmap-ed read-only */
-#define RAM_READONLY (1 << 10)
-
-/* RAM FD is opened read-only */
-#define RAM_READONLY_FD (1 << 11)
-
-/* RAM can be private that has kvm guest memfd backend */
-#define RAM_GUEST_MEMFD (1 << 12)
-
-static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
- IOMMUNotifierFlag flags,
- hwaddr start, hwaddr end,
- int iommu_idx)
-{
- n->notify = fn;
- n->notifier_flags = flags;
- n->start = start;
- n->end = end;
- n->iommu_idx = iommu_idx;
-}
-
-/*
- * Memory region callbacks
- */
-struct MemoryRegionOps {
- /* Read from the memory region. @addr is relative to @mr; @size is
- * in bytes. */
- uint64_t (*read)(void *opaque,
- hwaddr addr,
- unsigned size);
- /* Write to the memory region. @addr is relative to @mr; @size is
- * in bytes. */
- void (*write)(void *opaque,
- hwaddr addr,
- uint64_t data,
- unsigned size);
-
- MemTxResult (*read_with_attrs)(void *opaque,
- hwaddr addr,
- uint64_t *data,
- unsigned size,
- MemTxAttrs attrs);
- MemTxResult (*write_with_attrs)(void *opaque,
- hwaddr addr,
- uint64_t data,
- unsigned size,
- MemTxAttrs attrs);
-
- enum device_endian endianness;
- /* Guest-visible constraints: */
- struct {
- /* If nonzero, specify bounds on access sizes beyond which a machine
- * check is thrown.
- */
- unsigned min_access_size;
- unsigned max_access_size;
- /* If true, unaligned accesses are supported. Otherwise unaligned
- * accesses throw machine checks.
- */
- bool unaligned;
- /*
- * If present, and returns #false, the transaction is not accepted
- * by the device (and results in machine dependent behaviour such
- * as a machine check exception).
- */
- bool (*accepts)(void *opaque, hwaddr addr,
- unsigned size, bool is_write,
- MemTxAttrs attrs);
- } valid;
- /* Internal implementation constraints: */
- struct {
- /* If nonzero, specifies the minimum size implemented. Smaller sizes
- * will be rounded upwards and a partial result will be returned.
- */
- unsigned min_access_size;
- /* If nonzero, specifies the maximum size implemented. Larger sizes
- * will be done as a series of accesses with smaller sizes.
- */
- unsigned max_access_size;
- /* If true, unaligned accesses are supported. Otherwise all accesses
- * are converted to (possibly multiple) naturally aligned accesses.
- */
- bool unaligned;
- } impl;
-};
-
-typedef struct MemoryRegionClass {
- /* private */
- ObjectClass parent_class;
-} MemoryRegionClass;
-
-
-enum IOMMUMemoryRegionAttr {
- IOMMU_ATTR_SPAPR_TCE_FD
-};
-
-/*
- * IOMMUMemoryRegionClass:
- *
- * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
- * and provide an implementation of at least the @translate method here
- * to handle requests to the memory region. Other methods are optional.
- *
- * The IOMMU implementation must use the IOMMU notifier infrastructure
- * to report whenever mappings are changed, by calling
- * memory_region_notify_iommu() (or, if necessary, by calling
- * memory_region_notify_iommu_one() for each registered notifier).
- *
- * Conceptually an IOMMU provides a mapping from input address
- * to an output TLB entry. If the IOMMU is aware of memory transaction
- * attributes and the output TLB entry depends on the transaction
- * attributes, we represent this using IOMMU indexes. Each index
- * selects a particular translation table that the IOMMU has:
- *
- * @attrs_to_index returns the IOMMU index for a set of transaction attributes
- *
- * @translate takes an input address and an IOMMU index
- *
- * and the mapping returned can only depend on the input address and the
- * IOMMU index.
- *
- * Most IOMMUs don't care about the transaction attributes and support
- * only a single IOMMU index. A more complex IOMMU might have one index
- * for secure transactions and one for non-secure transactions.
- */
-struct IOMMUMemoryRegionClass {
- /* private: */
- MemoryRegionClass parent_class;
-
- /* public: */
- /**
- * @translate:
- *
- * Return a TLB entry that contains a given address.
- *
- * The IOMMUAccessFlags indicated via @flag are optional and may
- * be specified as IOMMU_NONE to indicate that the caller needs
- * the full translation information for both reads and writes. If
- * the access flags are specified then the IOMMU implementation
- * may use this as an optimization, to stop doing a page table
- * walk as soon as it knows that the requested permissions are not
- * allowed. If IOMMU_NONE is passed then the IOMMU must do the
- * full page table walk and report the permissions in the returned
- * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
- * return different mappings for reads and writes.)
- *
- * The returned information remains valid while the caller is
- * holding the big QEMU lock or is inside an RCU critical section;
- * if the caller wishes to cache the mapping beyond that it must
- * register an IOMMU notifier so it can invalidate its cached
- * information when the IOMMU mapping changes.
- *
- * @iommu: the IOMMUMemoryRegion
- *
- * @hwaddr: address to be translated within the memory region
- *
- * @flag: requested access permission
- *
- * @iommu_idx: IOMMU index for the translation
- */
- IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
- IOMMUAccessFlags flag, int iommu_idx);
- /**
- * @get_min_page_size:
- *
- * Returns minimum supported page size in bytes.
- *
- * If this method is not provided then the minimum is assumed to
- * be TARGET_PAGE_SIZE.
- *
- * @iommu: the IOMMUMemoryRegion
- */
- uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
- /**
- * @notify_flag_changed:
- *
- * Called when IOMMU Notifier flag changes (ie when the set of
- * events which IOMMU users are requesting notification for changes).
- * Optional method -- need not be provided if the IOMMU does not
- * need to know exactly which events must be notified.
- *
- * @iommu: the IOMMUMemoryRegion
- *
- * @old_flags: events which previously needed to be notified
- *
- * @new_flags: events which now need to be notified
- *
- * Returns 0 on success, or a negative errno; in particular
- * returns -EINVAL if the new flag bitmap is not supported by the
- * IOMMU memory region. In case of failure, the error object
- * must be created
- */
- int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
- IOMMUNotifierFlag old_flags,
- IOMMUNotifierFlag new_flags,
- Error **errp);
- /**
- * @replay:
- *
- * Called to handle memory_region_iommu_replay().
- *
- * The default implementation of memory_region_iommu_replay() is to
- * call the IOMMU translate method for every page in the address space
- * with flag == IOMMU_NONE and then call the notifier if translate
- * returns a valid mapping. If this method is implemented then it
- * overrides the default behaviour, and must provide the full semantics
- * of memory_region_iommu_replay(), by calling @notifier for every
- * translation present in the IOMMU.
- *
- * Optional method -- an IOMMU only needs to provide this method
- * if the default is inefficient or produces undesirable side effects.
- *
- * Note: this is not related to record-and-replay functionality.
- */
- void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
-
- /**
- * @get_attr:
- *
- * Get IOMMU misc attributes. This is an optional method that
- * can be used to allow users of the IOMMU to get implementation-specific
- * information. The IOMMU implements this method to handle calls
- * by IOMMU users to memory_region_iommu_get_attr() by filling in
- * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
- * the IOMMU supports. If the method is unimplemented then
- * memory_region_iommu_get_attr() will always return -EINVAL.
- *
- * @iommu: the IOMMUMemoryRegion
- *
- * @attr: attribute being queried
- *
- * @data: memory to fill in with the attribute data
- *
- * Returns 0 on success, or a negative errno; in particular
- * returns -EINVAL for unrecognized or unimplemented attribute types.
- */
- int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
- void *data);
-
- /**
- * @attrs_to_index:
- *
- * Return the IOMMU index to use for a given set of transaction attributes.
- *
- * Optional method: if an IOMMU only supports a single IOMMU index then
- * the default implementation of memory_region_iommu_attrs_to_index()
- * will return 0.
- *
- * The indexes supported by an IOMMU must be contiguous, starting at 0.
- *
- * @iommu: the IOMMUMemoryRegion
- * @attrs: memory transaction attributes
- */
- int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
-
- /**
- * @num_indexes:
- *
- * Return the number of IOMMU indexes this IOMMU supports.
- *
- * Optional method: if this method is not provided, then
- * memory_region_iommu_num_indexes() will return 1, indicating that
- * only a single IOMMU index is supported.
- *
- * @iommu: the IOMMUMemoryRegion
- */
- int (*num_indexes)(IOMMUMemoryRegion *iommu);
-};
-
-typedef struct RamDiscardListener RamDiscardListener;
-typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
- MemoryRegionSection *section);
-typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
- MemoryRegionSection *section);
-
-struct RamDiscardListener {
- /*
- * @notify_populate:
- *
- * Notification that previously discarded memory is about to get populated.
- * Listeners are able to object. If any listener objects, already
- * successfully notified listeners are notified about a discard again.
- *
- * @rdl: the #RamDiscardListener getting notified
- * @section: the #MemoryRegionSection to get populated. The section
- * is aligned within the memory region to the minimum granularity
- * unless it would exceed the registered section.
- *
- * Returns 0 on success. If the notification is rejected by the listener,
- * an error is returned.
- */
- NotifyRamPopulate notify_populate;
-
- /*
- * @notify_discard:
- *
- * Notification that previously populated memory was discarded successfully
- * and listeners should drop all references to such memory and prevent
- * new population (e.g., unmap).
- *
- * @rdl: the #RamDiscardListener getting notified
- * @section: the #MemoryRegionSection to get populated. The section
- * is aligned within the memory region to the minimum granularity
- * unless it would exceed the registered section.
- */
- NotifyRamDiscard notify_discard;
-
- /*
- * @double_discard_supported:
- *
- * The listener suppors getting @notify_discard notifications that span
- * already discarded parts.
- */
- bool double_discard_supported;
-
- MemoryRegionSection *section;
- QLIST_ENTRY(RamDiscardListener) next;
-};
-
-static inline void ram_discard_listener_init(RamDiscardListener *rdl,
- NotifyRamPopulate populate_fn,
- NotifyRamDiscard discard_fn,
- bool double_discard_supported)
-{
- rdl->notify_populate = populate_fn;
- rdl->notify_discard = discard_fn;
- rdl->double_discard_supported = double_discard_supported;
-}
-
-typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
-typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque);
-
-/*
- * RamDiscardManagerClass:
- *
- * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
- * regions are currently populated to be used/accessed by the VM, notifying
- * after parts were discarded (freeing up memory) and before parts will be
- * populated (consuming memory), to be used/accessed by the VM.
- *
- * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
- * #MemoryRegion isn't mapped into an address space yet (either directly
- * or via an alias); it cannot change while the #MemoryRegion is
- * mapped into an address space.
- *
- * The #RamDiscardManager is intended to be used by technologies that are
- * incompatible with discarding of RAM (e.g., VFIO, which may pin all
- * memory inside a #MemoryRegion), and require proper coordination to only
- * map the currently populated parts, to hinder parts that are expected to
- * remain discarded from silently getting populated and consuming memory.
- * Technologies that support discarding of RAM don't have to bother and can
- * simply map the whole #MemoryRegion.
- *
- * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
- * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
- * Logically unplugging memory consists of discarding RAM. The VM agreed to not
- * access unplugged (discarded) memory - especially via DMA. virtio-mem will
- * properly coordinate with listeners before memory is plugged (populated),
- * and after memory is unplugged (discarded).
- *
- * Listeners are called in multiples of the minimum granularity (unless it
- * would exceed the registered range) and changes are aligned to the minimum
- * granularity within the #MemoryRegion. Listeners have to prepare for memory
- * becoming discarded in a different granularity than it was populated and the
- * other way around.
- */
-struct RamDiscardManagerClass {
- /* private */
- InterfaceClass parent_class;
-
- /* public */
-
- /**
- * @get_min_granularity:
- *
- * Get the minimum granularity in which listeners will get notified
- * about changes within the #MemoryRegion via the #RamDiscardManager.
- *
- * @rdm: the #RamDiscardManager
- * @mr: the #MemoryRegion
- *
- * Returns the minimum granularity.
- */
- uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
- const MemoryRegion *mr);
-
- /**
- * @is_populated:
- *
- * Check whether the given #MemoryRegionSection is completely populated
- * (i.e., no parts are currently discarded) via the #RamDiscardManager.
- * There are no alignment requirements.
- *
- * @rdm: the #RamDiscardManager
- * @section: the #MemoryRegionSection
- *
- * Returns whether the given range is completely populated.
- */
- bool (*is_populated)(const RamDiscardManager *rdm,
- const MemoryRegionSection *section);
-
- /**
- * @replay_populated:
- *
- * Call the #ReplayRamPopulate callback for all populated parts within the
- * #MemoryRegionSection via the #RamDiscardManager.
- *
- * In case any call fails, no further calls are made.
- *
- * @rdm: the #RamDiscardManager
- * @section: the #MemoryRegionSection
- * @replay_fn: the #ReplayRamPopulate callback
- * @opaque: pointer to forward to the callback
- *
- * Returns 0 on success, or a negative error if any notification failed.
- */
- int (*replay_populated)(const RamDiscardManager *rdm,
- MemoryRegionSection *section,
- ReplayRamPopulate replay_fn, void *opaque);
-
- /**
- * @replay_discarded:
- *
- * Call the #ReplayRamDiscard callback for all discarded parts within the
- * #MemoryRegionSection via the #RamDiscardManager.
- *
- * @rdm: the #RamDiscardManager
- * @section: the #MemoryRegionSection
- * @replay_fn: the #ReplayRamDiscard callback
- * @opaque: pointer to forward to the callback
- */
- void (*replay_discarded)(const RamDiscardManager *rdm,
- MemoryRegionSection *section,
- ReplayRamDiscard replay_fn, void *opaque);
-
- /**
- * @register_listener:
- *
- * Register a #RamDiscardListener for the given #MemoryRegionSection and
- * immediately notify the #RamDiscardListener about all populated parts
- * within the #MemoryRegionSection via the #RamDiscardManager.
- *
- * In case any notification fails, no further notifications are triggered
- * and an error is logged.
- *
- * @rdm: the #RamDiscardManager
- * @rdl: the #RamDiscardListener
- * @section: the #MemoryRegionSection
- */
- void (*register_listener)(RamDiscardManager *rdm,
- RamDiscardListener *rdl,
- MemoryRegionSection *section);
-
- /**
- * @unregister_listener:
- *
- * Unregister a previously registered #RamDiscardListener via the
- * #RamDiscardManager after notifying the #RamDiscardListener about all
- * populated parts becoming unpopulated within the registered
- * #MemoryRegionSection.
- *
- * @rdm: the #RamDiscardManager
- * @rdl: the #RamDiscardListener
- */
- void (*unregister_listener)(RamDiscardManager *rdm,
- RamDiscardListener *rdl);
-};
-
-uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
- const MemoryRegion *mr);
-
-bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
- const MemoryRegionSection *section);
-
-int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
- MemoryRegionSection *section,
- ReplayRamPopulate replay_fn,
- void *opaque);
-
-void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
- MemoryRegionSection *section,
- ReplayRamDiscard replay_fn,
- void *opaque);
-
-void ram_discard_manager_register_listener(RamDiscardManager *rdm,
- RamDiscardListener *rdl,
- MemoryRegionSection *section);
-
-void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
- RamDiscardListener *rdl);
-
-/**
- * memory_get_xlat_addr: Extract addresses from a TLB entry
- *
- * @iotlb: pointer to an #IOMMUTLBEntry
- * @vaddr: virtual address
- * @ram_addr: RAM address
- * @read_only: indicates if writes are allowed
- * @mr_has_discard_manager: indicates memory is controlled by a
- * RamDiscardManager
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * Return: true on success, else false setting @errp with error.
- */
-bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
- ram_addr_t *ram_addr, bool *read_only,
- bool *mr_has_discard_manager, Error **errp);
-
-typedef struct CoalescedMemoryRange CoalescedMemoryRange;
-typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
-
-/** MemoryRegion:
- *
- * A struct representing a memory region.
- */
-struct MemoryRegion {
- Object parent_obj;
-
- /* private: */
-
- /* The following fields should fit in a cache line */
- bool romd_mode;
- bool ram;
- bool subpage;
- bool readonly; /* For RAM regions */
- bool nonvolatile;
- bool rom_device;
- bool flush_coalesced_mmio;
- bool unmergeable;
- uint8_t dirty_log_mask;
- bool is_iommu;
- RAMBlock *ram_block;
- Object *owner;
- /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
- DeviceState *dev;
-
- const MemoryRegionOps *ops;
- void *opaque;
- MemoryRegion *container;
- int mapped_via_alias; /* Mapped via an alias, container might be NULL */
- Int128 size;
- hwaddr addr;
- void (*destructor)(MemoryRegion *mr);
- uint64_t align;
- bool terminates;
- bool ram_device;
- bool enabled;
- bool warning_printed; /* For reservations */
- uint8_t vga_logging_count;
- MemoryRegion *alias;
- hwaddr alias_offset;
- int32_t priority;
- QTAILQ_HEAD(, MemoryRegion) subregions;
- QTAILQ_ENTRY(MemoryRegion) subregions_link;
- QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
- const char *name;
- unsigned ioeventfd_nb;
- MemoryRegionIoeventfd *ioeventfds;
- RamDiscardManager *rdm; /* Only for RAM */
-
- /* For devices designed to perform re-entrant IO into their own IO MRs */
- bool disable_reentrancy_guard;
-};
-
-struct IOMMUMemoryRegion {
- MemoryRegion parent_obj;
-
- QLIST_HEAD(, IOMMUNotifier) iommu_notify;
- IOMMUNotifierFlag iommu_notify_flags;
-};
-
-#define IOMMU_NOTIFIER_FOREACH(n, mr) \
- QLIST_FOREACH((n), &(mr)->iommu_notify, node)
-
-#define MEMORY_LISTENER_PRIORITY_MIN 0
-#define MEMORY_LISTENER_PRIORITY_ACCEL 10
-#define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10
-
-/**
- * struct MemoryListener: callbacks structure for updates to the physical memory map
- *
- * Allows a component to adjust to changes in the guest-visible memory map.
- * Use with memory_listener_register() and memory_listener_unregister().
- */
-struct MemoryListener {
- /**
- * @begin:
- *
- * Called at the beginning of an address space update transaction.
- * Followed by calls to #MemoryListener.region_add(),
- * #MemoryListener.region_del(), #MemoryListener.region_nop(),
- * #MemoryListener.log_start() and #MemoryListener.log_stop() in
- * increasing address order.
- *
- * @listener: The #MemoryListener.
- */
- void (*begin)(MemoryListener *listener);
-
- /**
- * @commit:
- *
- * Called at the end of an address space update transaction,
- * after the last call to #MemoryListener.region_add(),
- * #MemoryListener.region_del() or #MemoryListener.region_nop(),
- * #MemoryListener.log_start() and #MemoryListener.log_stop().
- *
- * @listener: The #MemoryListener.
- */
- void (*commit)(MemoryListener *listener);
-
- /**
- * @region_add:
- *
- * Called during an address space update transaction,
- * for a section of the address space that is new in this address space
- * space since the last transaction.
- *
- * @listener: The #MemoryListener.
- * @section: The new #MemoryRegionSection.
- */
- void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
-
- /**
- * @region_del:
- *
- * Called during an address space update transaction,
- * for a section of the address space that has disappeared in the address
- * space since the last transaction.
- *
- * @listener: The #MemoryListener.
- * @section: The old #MemoryRegionSection.
- */
- void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
-
- /**
- * @region_nop:
- *
- * Called during an address space update transaction,
- * for a section of the address space that is in the same place in the address
- * space as in the last transaction.
- *
- * @listener: The #MemoryListener.
- * @section: The #MemoryRegionSection.
- */
- void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
-
- /**
- * @log_start:
- *
- * Called during an address space update transaction, after
- * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
- * #MemoryListener.region_nop(), if dirty memory logging clients have
- * become active since the last transaction.
- *
- * @listener: The #MemoryListener.
- * @section: The #MemoryRegionSection.
- * @old: A bitmap of dirty memory logging clients that were active in
- * the previous transaction.
- * @new: A bitmap of dirty memory logging clients that are active in
- * the current transaction.
- */
- void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
- int old_val, int new_val);
-
- /**
- * @log_stop:
- *
- * Called during an address space update transaction, after
- * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
- * #MemoryListener.region_nop() and possibly after
- * #MemoryListener.log_start(), if dirty memory logging clients have
- * become inactive since the last transaction.
- *
- * @listener: The #MemoryListener.
- * @section: The #MemoryRegionSection.
- * @old: A bitmap of dirty memory logging clients that were active in
- * the previous transaction.
- * @new: A bitmap of dirty memory logging clients that are active in
- * the current transaction.
- */
- void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
- int old_val, int new_val);
-
- /**
- * @log_sync:
- *
- * Called by memory_region_snapshot_and_clear_dirty() and
- * memory_global_dirty_log_sync(), before accessing QEMU's "official"
- * copy of the dirty memory bitmap for a #MemoryRegionSection.
- *
- * @listener: The #MemoryListener.
- * @section: The #MemoryRegionSection.
- */
- void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
-
- /**
- * @log_sync_global:
- *
- * This is the global version of @log_sync when the listener does
- * not have a way to synchronize the log with finer granularity.
- * When the listener registers with @log_sync_global defined, then
- * its @log_sync must be NULL. Vice versa.
- *
- * @listener: The #MemoryListener.
- * @last_stage: The last stage to synchronize the log during migration.
- * The caller should guarantee that the synchronization with true for
- * @last_stage is triggered for once after all VCPUs have been stopped.
- */
- void (*log_sync_global)(MemoryListener *listener, bool last_stage);
-
- /**
- * @log_clear:
- *
- * Called before reading the dirty memory bitmap for a
- * #MemoryRegionSection.
- *
- * @listener: The #MemoryListener.
- * @section: The #MemoryRegionSection.
- */
- void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
-
- /**
- * @log_global_start:
- *
- * Called by memory_global_dirty_log_start(), which
- * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
- * the address space. #MemoryListener.log_global_start() is also
- * called when a #MemoryListener is added, if global dirty logging is
- * active at that time.
- *
- * @listener: The #MemoryListener.
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * Return: true on success, else false setting @errp with error.
- */
- bool (*log_global_start)(MemoryListener *listener, Error **errp);
-
- /**
- * @log_global_stop:
- *
- * Called by memory_global_dirty_log_stop(), which
- * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
- * the address space.
- *
- * @listener: The #MemoryListener.
- */
- void (*log_global_stop)(MemoryListener *listener);
-
- /**
- * @log_global_after_sync:
- *
- * Called after reading the dirty memory bitmap
- * for any #MemoryRegionSection.
- *
- * @listener: The #MemoryListener.
- */
- void (*log_global_after_sync)(MemoryListener *listener);
-
- /**
- * @eventfd_add:
- *
- * Called during an address space update transaction,
- * for a section of the address space that has had a new ioeventfd
- * registration since the last transaction.
- *
- * @listener: The #MemoryListener.
- * @section: The new #MemoryRegionSection.
- * @match_data: The @match_data parameter for the new ioeventfd.
- * @data: The @data parameter for the new ioeventfd.
- * @e: The #EventNotifier parameter for the new ioeventfd.
- */
- void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
- bool match_data, uint64_t data, EventNotifier *e);
-
- /**
- * @eventfd_del:
- *
- * Called during an address space update transaction,
- * for a section of the address space that has dropped an ioeventfd
- * registration since the last transaction.
- *
- * @listener: The #MemoryListener.
- * @section: The new #MemoryRegionSection.
- * @match_data: The @match_data parameter for the dropped ioeventfd.
- * @data: The @data parameter for the dropped ioeventfd.
- * @e: The #EventNotifier parameter for the dropped ioeventfd.
- */
- void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
- bool match_data, uint64_t data, EventNotifier *e);
-
- /**
- * @coalesced_io_add:
- *
- * Called during an address space update transaction,
- * for a section of the address space that has had a new coalesced
- * MMIO range registration since the last transaction.
- *
- * @listener: The #MemoryListener.
- * @section: The new #MemoryRegionSection.
- * @addr: The starting address for the coalesced MMIO range.
- * @len: The length of the coalesced MMIO range.
- */
- void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
- hwaddr addr, hwaddr len);
-
- /**
- * @coalesced_io_del:
- *
- * Called during an address space update transaction,
- * for a section of the address space that has dropped a coalesced
- * MMIO range since the last transaction.
- *
- * @listener: The #MemoryListener.
- * @section: The new #MemoryRegionSection.
- * @addr: The starting address for the coalesced MMIO range.
- * @len: The length of the coalesced MMIO range.
- */
- void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
- hwaddr addr, hwaddr len);
- /**
- * @priority:
- *
- * Govern the order in which memory listeners are invoked. Lower priorities
- * are invoked earlier for "add" or "start" callbacks, and later for "delete"
- * or "stop" callbacks.
- */
- unsigned priority;
-
- /**
- * @name:
- *
- * Name of the listener. It can be used in contexts where we'd like to
- * identify one memory listener with the rest.
- */
- const char *name;
-
- /* private: */
- AddressSpace *address_space;
- QTAILQ_ENTRY(MemoryListener) link;
- QTAILQ_ENTRY(MemoryListener) link_as;
-};
-
-typedef struct AddressSpaceMapClient {
- QEMUBH *bh;
- QLIST_ENTRY(AddressSpaceMapClient) link;
-} AddressSpaceMapClient;
-
-typedef struct {
- MemoryRegion *mr;
- void *buffer;
- hwaddr addr;
- hwaddr len;
- bool in_use;
-} BounceBuffer;
-
-/**
- * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
- */
-struct AddressSpace {
- /* private: */
- struct rcu_head rcu;
- char *name;
- MemoryRegion *root;
-
- /* Accessed via RCU. */
- struct FlatView *current_map;
-
- int ioeventfd_nb;
- int ioeventfd_notifiers;
- struct MemoryRegionIoeventfd *ioeventfds;
- QTAILQ_HEAD(, MemoryListener) listeners;
- QTAILQ_ENTRY(AddressSpace) address_spaces_link;
-
- /* Bounce buffer to use for this address space. */
- BounceBuffer bounce;
- /* List of callbacks to invoke when buffers free up */
- QemuMutex map_client_list_lock;
- QLIST_HEAD(, AddressSpaceMapClient) map_client_list;
-};
-
-typedef struct AddressSpaceDispatch AddressSpaceDispatch;
-typedef struct FlatRange FlatRange;
-
-/* Flattened global view of current active memory hierarchy. Kept in sorted
- * order.
- */
-struct FlatView {
- struct rcu_head rcu;
- unsigned ref;
- FlatRange *ranges;
- unsigned nr;
- unsigned nr_allocated;
- struct AddressSpaceDispatch *dispatch;
- MemoryRegion *root;
-};
-
-static inline FlatView *address_space_to_flatview(AddressSpace *as)
-{
- return qatomic_rcu_read(&as->current_map);
-}
-
-/**
- * typedef flatview_cb: callback for flatview_for_each_range()
- *
- * @start: start address of the range within the FlatView
- * @len: length of the range in bytes
- * @mr: MemoryRegion covering this range
- * @offset_in_region: offset of the first byte of the range within @mr
- * @opaque: data pointer passed to flatview_for_each_range()
- *
- * Returns: true to stop the iteration, false to keep going.
- */
-typedef bool (*flatview_cb)(Int128 start,
- Int128 len,
- const MemoryRegion *mr,
- hwaddr offset_in_region,
- void *opaque);
-
-/**
- * flatview_for_each_range: Iterate through a FlatView
- * @fv: the FlatView to iterate through
- * @cb: function to call for each range
- * @opaque: opaque data pointer to pass to @cb
- *
- * A FlatView is made up of a list of non-overlapping ranges, each of
- * which is a slice of a MemoryRegion. This function iterates through
- * each range in @fv, calling @cb. The callback function can terminate
- * iteration early by returning 'true'.
- */
-void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
-
-static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
- MemoryRegionSection *b)
-{
- return a->mr == b->mr &&
- a->fv == b->fv &&
- a->offset_within_region == b->offset_within_region &&
- a->offset_within_address_space == b->offset_within_address_space &&
- int128_eq(a->size, b->size) &&
- a->readonly == b->readonly &&
- a->nonvolatile == b->nonvolatile;
-}
-
-/**
- * memory_region_section_new_copy: Copy a memory region section
- *
- * Allocate memory for a new copy, copy the memory region section, and
- * properly take a reference on all relevant members.
- *
- * @s: the #MemoryRegionSection to copy
- */
-MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
-
-/**
- * memory_region_section_new_copy: Free a copied memory region section
- *
- * Free a copy of a memory section created via memory_region_section_new_copy().
- * properly dropping references on all relevant members.
- *
- * @s: the #MemoryRegionSection to copy
- */
-void memory_region_section_free_copy(MemoryRegionSection *s);
-
-/**
- * memory_region_init: Initialize a memory region
- *
- * The region typically acts as a container for other memory regions. Use
- * memory_region_add_subregion() to add subregions.
- *
- * @mr: the #MemoryRegion to be initialized
- * @owner: the object that tracks the region's reference count
- * @name: used for debugging; not visible to the user or ABI
- * @size: size of the region; any subregions beyond this size will be clipped
- */
-void memory_region_init(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size);
-
-/**
- * memory_region_ref: Add 1 to a memory region's reference count
- *
- * Whenever memory regions are accessed outside the BQL, they need to be
- * preserved against hot-unplug. MemoryRegions actually do not have their
- * own reference count; they piggyback on a QOM object, their "owner".
- * This function adds a reference to the owner.
- *
- * All MemoryRegions must have an owner if they can disappear, even if the
- * device they belong to operates exclusively under the BQL. This is because
- * the region could be returned at any time by memory_region_find, and this
- * is usually under guest control.
- *
- * @mr: the #MemoryRegion
- */
-void memory_region_ref(MemoryRegion *mr);
-
-/**
- * memory_region_unref: Remove 1 to a memory region's reference count
- *
- * Whenever memory regions are accessed outside the BQL, they need to be
- * preserved against hot-unplug. MemoryRegions actually do not have their
- * own reference count; they piggyback on a QOM object, their "owner".
- * This function removes a reference to the owner and possibly destroys it.
- *
- * @mr: the #MemoryRegion
- */
-void memory_region_unref(MemoryRegion *mr);
-
-/**
- * memory_region_init_io: Initialize an I/O memory region.
- *
- * Accesses into the region will cause the callbacks in @ops to be called.
- * if @size is nonzero, subregions will be clipped to @size.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @owner: the object that tracks the region's reference count
- * @ops: a structure containing read and write callbacks to be used when
- * I/O is performed on the region.
- * @opaque: passed to the read and write callbacks of the @ops structure.
- * @name: used for debugging; not visible to the user or ABI
- * @size: size of the region.
- */
-void memory_region_init_io(MemoryRegion *mr,
- Object *owner,
- const MemoryRegionOps *ops,
- void *opaque,
- const char *name,
- uint64_t size);
-
-/**
- * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
- * into the region will modify memory
- * directly.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @owner: the object that tracks the region's reference count
- * @name: Region name, becomes part of RAMBlock name used in migration stream
- * must be unique within any device
- * @size: size of the region.
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * Note that this function does not do anything to cause the data in the
- * RAM memory region to be migrated; that is the responsibility of the caller.
- *
- * Return: true on success, else false setting @errp with error.
- */
-bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size,
- Error **errp);
-
-/**
- * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region.
- * Accesses into the region will
- * modify memory directly.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @owner: the object that tracks the region's reference count
- * @name: Region name, becomes part of RAMBlock name used in migration stream
- * must be unique within any device
- * @size: size of the region.
- * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE,
- * RAM_GUEST_MEMFD.
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * Note that this function does not do anything to cause the data in the
- * RAM memory region to be migrated; that is the responsibility of the caller.
- *
- * Return: true on success, else false setting @errp with error.
- */
-bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size,
- uint32_t ram_flags,
- Error **errp);
-
-/**
- * memory_region_init_resizeable_ram: Initialize memory region with resizable
- * RAM. Accesses into the region will
- * modify memory directly. Only an initial
- * portion of this RAM is actually used.
- * Changing the size while migrating
- * can result in the migration being
- * canceled.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @owner: the object that tracks the region's reference count
- * @name: Region name, becomes part of RAMBlock name used in migration stream
- * must be unique within any device
- * @size: used size of the region.
- * @max_size: max size of the region.
- * @resized: callback to notify owner about used size change.
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * Note that this function does not do anything to cause the data in the
- * RAM memory region to be migrated; that is the responsibility of the caller.
- *
- * Return: true on success, else false setting @errp with error.
- */
-bool memory_region_init_resizeable_ram(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size,
- uint64_t max_size,
- void (*resized)(const char*,
- uint64_t length,
- void *host),
- Error **errp);
-#ifdef CONFIG_POSIX
-
-/**
- * memory_region_init_ram_from_file: Initialize RAM memory region with a
- * mmap-ed backend.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @owner: the object that tracks the region's reference count
- * @name: Region name, becomes part of RAMBlock name used in migration stream
- * must be unique within any device
- * @size: size of the region.
- * @align: alignment of the region base address; if 0, the default alignment
- * (getpagesize()) will be used.
- * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
- * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
- * RAM_READONLY_FD, RAM_GUEST_MEMFD
- * @path: the path in which to allocate the RAM.
- * @offset: offset within the file referenced by path
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * Note that this function does not do anything to cause the data in the
- * RAM memory region to be migrated; that is the responsibility of the caller.
- *
- * Return: true on success, else false setting @errp with error.
- */
-bool memory_region_init_ram_from_file(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size,
- uint64_t align,
- uint32_t ram_flags,
- const char *path,
- ram_addr_t offset,
- Error **errp);
-
-/**
- * memory_region_init_ram_from_fd: Initialize RAM memory region with a
- * mmap-ed backend.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @owner: the object that tracks the region's reference count
- * @name: the name of the region.
- * @size: size of the region.
- * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
- * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
- * RAM_READONLY_FD, RAM_GUEST_MEMFD
- * @fd: the fd to mmap.
- * @offset: offset within the file referenced by fd
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * Note that this function does not do anything to cause the data in the
- * RAM memory region to be migrated; that is the responsibility of the caller.
- *
- * Return: true on success, else false setting @errp with error.
- */
-bool memory_region_init_ram_from_fd(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size,
- uint32_t ram_flags,
- int fd,
- ram_addr_t offset,
- Error **errp);
-#endif
-
-/**
- * memory_region_init_ram_ptr: Initialize RAM memory region from a
- * user-provided pointer. Accesses into the
- * region will modify memory directly.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @owner: the object that tracks the region's reference count
- * @name: Region name, becomes part of RAMBlock name used in migration stream
- * must be unique within any device
- * @size: size of the region.
- * @ptr: memory to be mapped; must contain at least @size bytes.
- *
- * Note that this function does not do anything to cause the data in the
- * RAM memory region to be migrated; that is the responsibility of the caller.
- */
-void memory_region_init_ram_ptr(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size,
- void *ptr);
-
-/**
- * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
- * a user-provided pointer.
- *
- * A RAM device represents a mapping to a physical device, such as to a PCI
- * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
- * into the VM address space and access to the region will modify memory
- * directly. However, the memory region should not be included in a memory
- * dump (device may not be enabled/mapped at the time of the dump), and
- * operations incompatible with manipulating MMIO should be avoided. Replaces
- * skip_dump flag.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @owner: the object that tracks the region's reference count
- * @name: the name of the region.
- * @size: size of the region.
- * @ptr: memory to be mapped; must contain at least @size bytes.
- *
- * Note that this function does not do anything to cause the data in the
- * RAM memory region to be migrated; that is the responsibility of the caller.
- * (For RAM device memory regions, migrating the contents rarely makes sense.)
- */
-void memory_region_init_ram_device_ptr(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size,
- void *ptr);
-
-/**
- * memory_region_init_alias: Initialize a memory region that aliases all or a
- * part of another memory region.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @owner: the object that tracks the region's reference count
- * @name: used for debugging; not visible to the user or ABI
- * @orig: the region to be referenced; @mr will be equivalent to
- * @orig between @offset and @offset + @size - 1.
- * @offset: start of the section in @orig to be referenced.
- * @size: size of the region.
- */
-void memory_region_init_alias(MemoryRegion *mr,
- Object *owner,
- const char *name,
- MemoryRegion *orig,
- hwaddr offset,
- uint64_t size);
-
-/**
- * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
- *
- * This has the same effect as calling memory_region_init_ram_nomigrate()
- * and then marking the resulting region read-only with
- * memory_region_set_readonly().
- *
- * Note that this function does not do anything to cause the data in the
- * RAM side of the memory region to be migrated; that is the responsibility
- * of the caller.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @owner: the object that tracks the region's reference count
- * @name: Region name, becomes part of RAMBlock name used in migration stream
- * must be unique within any device
- * @size: size of the region.
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * Return: true on success, else false setting @errp with error.
- */
-bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size,
- Error **errp);
-
-/**
- * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
- * Writes are handled via callbacks.
- *
- * Note that this function does not do anything to cause the data in the
- * RAM side of the memory region to be migrated; that is the responsibility
- * of the caller.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @owner: the object that tracks the region's reference count
- * @ops: callbacks for write access handling (must not be NULL).
- * @opaque: passed to the read and write callbacks of the @ops structure.
- * @name: Region name, becomes part of RAMBlock name used in migration stream
- * must be unique within any device
- * @size: size of the region.
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * Return: true on success, else false setting @errp with error.
- */
-bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
- Object *owner,
- const MemoryRegionOps *ops,
- void *opaque,
- const char *name,
- uint64_t size,
- Error **errp);
-
-/**
- * memory_region_init_iommu: Initialize a memory region of a custom type
- * that translates addresses
- *
- * An IOMMU region translates addresses and forwards accesses to a target
- * memory region.
- *
- * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
- * @_iommu_mr should be a pointer to enough memory for an instance of
- * that subclass, @instance_size is the size of that subclass, and
- * @mrtypename is its name. This function will initialize @_iommu_mr as an
- * instance of the subclass, and its methods will then be called to handle
- * accesses to the memory region. See the documentation of
- * #IOMMUMemoryRegionClass for further details.
- *
- * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
- * @instance_size: the IOMMUMemoryRegion subclass instance size
- * @mrtypename: the type name of the #IOMMUMemoryRegion
- * @owner: the object that tracks the region's reference count
- * @name: used for debugging; not visible to the user or ABI
- * @size: size of the region.
- */
-void memory_region_init_iommu(void *_iommu_mr,
- size_t instance_size,
- const char *mrtypename,
- Object *owner,
- const char *name,
- uint64_t size);
-
-/**
- * memory_region_init_ram - Initialize RAM memory region. Accesses into the
- * region will modify memory directly.
- *
- * @mr: the #MemoryRegion to be initialized
- * @owner: the object that tracks the region's reference count (must be
- * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
- * @name: name of the memory region
- * @size: size of the region in bytes
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * This function allocates RAM for a board model or device, and
- * arranges for it to be migrated (by calling vmstate_register_ram()
- * if @owner is a DeviceState, or vmstate_register_ram_global() if
- * @owner is NULL).
- *
- * TODO: Currently we restrict @owner to being either NULL (for
- * global RAM regions with no owner) or devices, so that we can
- * give the RAM block a unique name for migration purposes.
- * We should lift this restriction and allow arbitrary Objects.
- * If you pass a non-NULL non-device @owner then we will assert.
- *
- * Return: true on success, else false setting @errp with error.
- */
-bool memory_region_init_ram(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size,
- Error **errp);
-
-bool memory_region_init_ram_guest_memfd(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size,
- Error **errp);
-
-/**
- * memory_region_init_rom: Initialize a ROM memory region.
- *
- * This has the same effect as calling memory_region_init_ram()
- * and then marking the resulting region read-only with
- * memory_region_set_readonly(). This includes arranging for the
- * contents to be migrated.
- *
- * TODO: Currently we restrict @owner to being either NULL (for
- * global RAM regions with no owner) or devices, so that we can
- * give the RAM block a unique name for migration purposes.
- * We should lift this restriction and allow arbitrary Objects.
- * If you pass a non-NULL non-device @owner then we will assert.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @owner: the object that tracks the region's reference count
- * @name: Region name, becomes part of RAMBlock name used in migration stream
- * must be unique within any device
- * @size: size of the region.
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * Return: true on success, else false setting @errp with error.
- */
-bool memory_region_init_rom(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size,
- Error **errp);
-
-/**
- * memory_region_init_rom_device: Initialize a ROM memory region.
- * Writes are handled via callbacks.
- *
- * This function initializes a memory region backed by RAM for reads
- * and callbacks for writes, and arranges for the RAM backing to
- * be migrated (by calling vmstate_register_ram()
- * if @owner is a DeviceState, or vmstate_register_ram_global() if
- * @owner is NULL).
- *
- * TODO: Currently we restrict @owner to being either NULL (for
- * global RAM regions with no owner) or devices, so that we can
- * give the RAM block a unique name for migration purposes.
- * We should lift this restriction and allow arbitrary Objects.
- * If you pass a non-NULL non-device @owner then we will assert.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @owner: the object that tracks the region's reference count
- * @ops: callbacks for write access handling (must not be NULL).
- * @opaque: passed to the read and write callbacks of the @ops structure.
- * @name: Region name, becomes part of RAMBlock name used in migration stream
- * must be unique within any device
- * @size: size of the region.
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * Return: true on success, else false setting @errp with error.
- */
-bool memory_region_init_rom_device(MemoryRegion *mr,
- Object *owner,
- const MemoryRegionOps *ops,
- void *opaque,
- const char *name,
- uint64_t size,
- Error **errp);
-
-
-/**
- * memory_region_owner: get a memory region's owner.
- *
- * @mr: the memory region being queried.
- */
-Object *memory_region_owner(MemoryRegion *mr);
-
-/**
- * memory_region_size: get a memory region's size.
- *
- * @mr: the memory region being queried.
- */
-uint64_t memory_region_size(MemoryRegion *mr);
-
-/**
- * memory_region_is_ram: check whether a memory region is random access
- *
- * Returns %true if a memory region is random access.
- *
- * @mr: the memory region being queried
- */
-static inline bool memory_region_is_ram(MemoryRegion *mr)
-{
- return mr->ram;
-}
-
-/**
- * memory_region_is_ram_device: check whether a memory region is a ram device
- *
- * Returns %true if a memory region is a device backed ram region
- *
- * @mr: the memory region being queried
- */
-bool memory_region_is_ram_device(MemoryRegion *mr);
-
-/**
- * memory_region_is_romd: check whether a memory region is in ROMD mode
- *
- * Returns %true if a memory region is a ROM device and currently set to allow
- * direct reads.
- *
- * @mr: the memory region being queried
- */
-static inline bool memory_region_is_romd(MemoryRegion *mr)
-{
- return mr->rom_device && mr->romd_mode;
-}
-
-/**
- * memory_region_is_protected: check whether a memory region is protected
- *
- * Returns %true if a memory region is protected RAM and cannot be accessed
- * via standard mechanisms, e.g. DMA.
- *
- * @mr: the memory region being queried
- */
-bool memory_region_is_protected(MemoryRegion *mr);
-
-/**
- * memory_region_has_guest_memfd: check whether a memory region has guest_memfd
- * associated
- *
- * Returns %true if a memory region's ram_block has valid guest_memfd assigned.
- *
- * @mr: the memory region being queried
- */
-bool memory_region_has_guest_memfd(MemoryRegion *mr);
-
-/**
- * memory_region_get_iommu: check whether a memory region is an iommu
- *
- * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
- * otherwise NULL.
- *
- * @mr: the memory region being queried
- */
-static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
-{
- if (mr->alias) {
- return memory_region_get_iommu(mr->alias);
- }
- if (mr->is_iommu) {
- return (IOMMUMemoryRegion *) mr;
- }
- return NULL;
-}
-
-/**
- * memory_region_get_iommu_class_nocheck: returns iommu memory region class
- * if an iommu or NULL if not
- *
- * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
- * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
- *
- * @iommu_mr: the memory region being queried
- */
-static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
- IOMMUMemoryRegion *iommu_mr)
-{
- return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
-}
-
-#define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
-
-/**
- * memory_region_iommu_get_min_page_size: get minimum supported page size
- * for an iommu
- *
- * Returns minimum supported page size for an iommu.
- *
- * @iommu_mr: the memory region being queried
- */
-uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
-
-/**
- * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
- *
- * Note: for any IOMMU implementation, an in-place mapping change
- * should be notified with an UNMAP followed by a MAP.
- *
- * @iommu_mr: the memory region that was changed
- * @iommu_idx: the IOMMU index for the translation table which has changed
- * @event: TLB event with the new entry in the IOMMU translation table.
- * The entry replaces all old entries for the same virtual I/O address
- * range.
- */
-void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
- int iommu_idx,
- const IOMMUTLBEvent event);
-
-/**
- * memory_region_notify_iommu_one: notify a change in an IOMMU translation
- * entry to a single notifier
- *
- * This works just like memory_region_notify_iommu(), but it only
- * notifies a specific notifier, not all of them.
- *
- * @notifier: the notifier to be notified
- * @event: TLB event with the new entry in the IOMMU translation table.
- * The entry replaces all old entries for the same virtual I/O address
- * range.
- */
-void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
- const IOMMUTLBEvent *event);
-
-/**
- * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
- * translation that covers the
- * range of a notifier
- *
- * @notifier: the notifier to be notified
- */
-void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
-
-
-/**
- * memory_region_register_iommu_notifier: register a notifier for changes to
- * IOMMU translation entries.
- *
- * Returns 0 on success, or a negative errno otherwise. In particular,
- * -EINVAL indicates that at least one of the attributes of the notifier
- * is not supported (flag/range) by the IOMMU memory region. In case of error
- * the error object must be created.
- *
- * @mr: the memory region to observe
- * @n: the IOMMUNotifier to be added; the notify callback receives a
- * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
- * ceases to be valid on exit from the notifier.
- * @errp: pointer to Error*, to store an error if it happens.
- */
-int memory_region_register_iommu_notifier(MemoryRegion *mr,
- IOMMUNotifier *n, Error **errp);
-
-/**
- * memory_region_iommu_replay: replay existing IOMMU translations to
- * a notifier with the minimum page granularity returned by
- * mr->iommu_ops->get_page_size().
- *
- * Note: this is not related to record-and-replay functionality.
- *
- * @iommu_mr: the memory region to observe
- * @n: the notifier to which to replay iommu mappings
- */
-void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
-
-/**
- * memory_region_unregister_iommu_notifier: unregister a notifier for
- * changes to IOMMU translation entries.
- *
- * @mr: the memory region which was observed and for which notity_stopped()
- * needs to be called
- * @n: the notifier to be removed.
- */
-void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
- IOMMUNotifier *n);
-
-/**
- * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
- * defined on the IOMMU.
- *
- * Returns 0 on success, or a negative errno otherwise. In particular,
- * -EINVAL indicates that the IOMMU does not support the requested
- * attribute.
- *
- * @iommu_mr: the memory region
- * @attr: the requested attribute
- * @data: a pointer to the requested attribute data
- */
-int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
- enum IOMMUMemoryRegionAttr attr,
- void *data);
-
-/**
- * memory_region_iommu_attrs_to_index: return the IOMMU index to
- * use for translations with the given memory transaction attributes.
- *
- * @iommu_mr: the memory region
- * @attrs: the memory transaction attributes
- */
-int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
- MemTxAttrs attrs);
-
-/**
- * memory_region_iommu_num_indexes: return the total number of IOMMU
- * indexes that this IOMMU supports.
- *
- * @iommu_mr: the memory region
- */
-int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
-
-/**
- * memory_region_name: get a memory region's name
- *
- * Returns the string that was used to initialize the memory region.
- *
- * @mr: the memory region being queried
- */
-const char *memory_region_name(const MemoryRegion *mr);
-
-/**
- * memory_region_is_logging: return whether a memory region is logging writes
- *
- * Returns %true if the memory region is logging writes for the given client
- *
- * @mr: the memory region being queried
- * @client: the client being queried
- */
-bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
-
-/**
- * memory_region_get_dirty_log_mask: return the clients for which a
- * memory region is logging writes.
- *
- * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
- * are the bit indices.
- *
- * @mr: the memory region being queried
- */
-uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
-
-/**
- * memory_region_is_rom: check whether a memory region is ROM
- *
- * Returns %true if a memory region is read-only memory.
- *
- * @mr: the memory region being queried
- */
-static inline bool memory_region_is_rom(MemoryRegion *mr)
-{
- return mr->ram && mr->readonly;
-}
-
-/**
- * memory_region_is_nonvolatile: check whether a memory region is non-volatile
- *
- * Returns %true is a memory region is non-volatile memory.
- *
- * @mr: the memory region being queried
- */
-static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
-{
- return mr->nonvolatile;
-}
-
-/**
- * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
- *
- * Returns a file descriptor backing a file-based RAM memory region,
- * or -1 if the region is not a file-based RAM memory region.
- *
- * @mr: the RAM or alias memory region being queried.
- */
-int memory_region_get_fd(MemoryRegion *mr);
-
-/**
- * memory_region_from_host: Convert a pointer into a RAM memory region
- * and an offset within it.
- *
- * Given a host pointer inside a RAM memory region (created with
- * memory_region_init_ram() or memory_region_init_ram_ptr()), return
- * the MemoryRegion and the offset within it.
- *
- * Use with care; by the time this function returns, the returned pointer is
- * not protected by RCU anymore. If the caller is not within an RCU critical
- * section and does not hold the BQL, it must have other means of
- * protecting the pointer, such as a reference to the region that includes
- * the incoming ram_addr_t.
- *
- * @ptr: the host pointer to be converted
- * @offset: the offset within memory region
- */
-MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
-
-/**
- * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
- *
- * Returns a host pointer to a RAM memory region (created with
- * memory_region_init_ram() or memory_region_init_ram_ptr()).
- *
- * Use with care; by the time this function returns, the returned pointer is
- * not protected by RCU anymore. If the caller is not within an RCU critical
- * section and does not hold the BQL, it must have other means of
- * protecting the pointer, such as a reference to the region that includes
- * the incoming ram_addr_t.
- *
- * @mr: the memory region being queried.
- */
-void *memory_region_get_ram_ptr(MemoryRegion *mr);
-
-/* memory_region_ram_resize: Resize a RAM region.
- *
- * Resizing RAM while migrating can result in the migration being canceled.
- * Care has to be taken if the guest might have already detected the memory.
- *
- * @mr: a memory region created with @memory_region_init_resizeable_ram.
- * @newsize: the new size the region
- * @errp: pointer to Error*, to store an error if it happens.
- */
-void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
- Error **errp);
-
-/**
- * memory_region_msync: Synchronize selected address range of
- * a memory mapped region
- *
- * @mr: the memory region to be msync
- * @addr: the initial address of the range to be sync
- * @size: the size of the range to be sync
- */
-void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
-
-/**
- * memory_region_writeback: Trigger cache writeback for
- * selected address range
- *
- * @mr: the memory region to be updated
- * @addr: the initial address of the range to be written back
- * @size: the size of the range to be written back
- */
-void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
-
-/**
- * memory_region_set_log: Turn dirty logging on or off for a region.
- *
- * Turns dirty logging on or off for a specified client (display, migration).
- * Only meaningful for RAM regions.
- *
- * @mr: the memory region being updated.
- * @log: whether dirty logging is to be enabled or disabled.
- * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
- */
-void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
-
-/**
- * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
- *
- * Marks a range of bytes as dirty, after it has been dirtied outside
- * guest code.
- *
- * @mr: the memory region being dirtied.
- * @addr: the address (relative to the start of the region) being dirtied.
- * @size: size of the range being dirtied.
- */
-void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
- hwaddr size);
-
-/**
- * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
- *
- * This function is called when the caller wants to clear the remote
- * dirty bitmap of a memory range within the memory region. This can
- * be used by e.g. KVM to manually clear dirty log when
- * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
- * kernel.
- *
- * @mr: the memory region to clear the dirty log upon
- * @start: start address offset within the memory region
- * @len: length of the memory region to clear dirty bitmap
- */
-void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
- hwaddr len);
-
-/**
- * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
- * bitmap and clear it.
- *
- * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
- * returns the snapshot. The snapshot can then be used to query dirty
- * status, using memory_region_snapshot_get_dirty. Snapshotting allows
- * querying the same page multiple times, which is especially useful for
- * display updates where the scanlines often are not page aligned.
- *
- * The dirty bitmap region which gets copied into the snapshot (and
- * cleared afterwards) can be larger than requested. The boundaries
- * are rounded up/down so complete bitmap longs (covering 64 pages on
- * 64bit hosts) can be copied over into the bitmap snapshot. Which
- * isn't a problem for display updates as the extra pages are outside
- * the visible area, and in case the visible area changes a full
- * display redraw is due anyway. Should other use cases for this
- * function emerge we might have to revisit this implementation
- * detail.
- *
- * Use g_free to release DirtyBitmapSnapshot.
- *
- * @mr: the memory region being queried.
- * @addr: the address (relative to the start of the region) being queried.
- * @size: the size of the range being queried.
- * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
- */
-DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
- hwaddr addr,
- hwaddr size,
- unsigned client);
-
-/**
- * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
- * in the specified dirty bitmap snapshot.
- *
- * @mr: the memory region being queried.
- * @snap: the dirty bitmap snapshot
- * @addr: the address (relative to the start of the region) being queried.
- * @size: the size of the range being queried.
- */
-bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
- DirtyBitmapSnapshot *snap,
- hwaddr addr, hwaddr size);
-
-/**
- * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
- * client.
- *
- * Marks a range of pages as no longer dirty.
- *
- * @mr: the region being updated.
- * @addr: the start of the subrange being cleaned.
- * @size: the size of the subrange being cleaned.
- * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
- * %DIRTY_MEMORY_VGA.
- */
-void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
- hwaddr size, unsigned client);
-
-/**
- * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
- * TBs (for self-modifying code).
- *
- * The MemoryRegionOps->write() callback of a ROM device must use this function
- * to mark byte ranges that have been modified internally, such as by directly
- * accessing the memory returned by memory_region_get_ram_ptr().
- *
- * This function marks the range dirty and invalidates TBs so that TCG can
- * detect self-modifying code.
- *
- * @mr: the region being flushed.
- * @addr: the start, relative to the start of the region, of the range being
- * flushed.
- * @size: the size, in bytes, of the range being flushed.
- */
-void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
-
-/**
- * memory_region_set_readonly: Turn a memory region read-only (or read-write)
- *
- * Allows a memory region to be marked as read-only (turning it into a ROM).
- * only useful on RAM regions.
- *
- * @mr: the region being updated.
- * @readonly: whether rhe region is to be ROM or RAM.
- */
-void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
-
-/**
- * memory_region_set_nonvolatile: Turn a memory region non-volatile
- *
- * Allows a memory region to be marked as non-volatile.
- * only useful on RAM regions.
- *
- * @mr: the region being updated.
- * @nonvolatile: whether rhe region is to be non-volatile.
- */
-void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
-
-/**
- * memory_region_rom_device_set_romd: enable/disable ROMD mode
- *
- * Allows a ROM device (initialized with memory_region_init_rom_device() to
- * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
- * device is mapped to guest memory and satisfies read access directly.
- * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
- * Writes are always handled by the #MemoryRegion.write function.
- *
- * @mr: the memory region to be updated
- * @romd_mode: %true to put the region into ROMD mode
- */
-void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
-
-/**
- * memory_region_set_coalescing: Enable memory coalescing for the region.
- *
- * Enabled writes to a region to be queued for later processing. MMIO ->write
- * callbacks may be delayed until a non-coalesced MMIO is issued.
- * Only useful for IO regions. Roughly similar to write-combining hardware.
- *
- * @mr: the memory region to be write coalesced
- */
-void memory_region_set_coalescing(MemoryRegion *mr);
-
-/**
- * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
- * a region.
- *
- * Like memory_region_set_coalescing(), but works on a sub-range of a region.
- * Multiple calls can be issued coalesced disjoint ranges.
- *
- * @mr: the memory region to be updated.
- * @offset: the start of the range within the region to be coalesced.
- * @size: the size of the subrange to be coalesced.
- */
-void memory_region_add_coalescing(MemoryRegion *mr,
- hwaddr offset,
- uint64_t size);
-
-/**
- * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
- *
- * Disables any coalescing caused by memory_region_set_coalescing() or
- * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
- * hardware.
- *
- * @mr: the memory region to be updated.
- */
-void memory_region_clear_coalescing(MemoryRegion *mr);
-
-/**
- * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
- * accesses.
- *
- * Ensure that pending coalesced MMIO request are flushed before the memory
- * region is accessed. This property is automatically enabled for all regions
- * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
- *
- * @mr: the memory region to be updated.
- */
-void memory_region_set_flush_coalesced(MemoryRegion *mr);
-
-/**
- * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
- * accesses.
- *
- * Clear the automatic coalesced MMIO flushing enabled via
- * memory_region_set_flush_coalesced. Note that this service has no effect on
- * memory regions that have MMIO coalescing enabled for themselves. For them,
- * automatic flushing will stop once coalescing is disabled.
- *
- * @mr: the memory region to be updated.
- */
-void memory_region_clear_flush_coalesced(MemoryRegion *mr);
-
-/**
- * memory_region_add_eventfd: Request an eventfd to be triggered when a word
- * is written to a location.
- *
- * Marks a word in an IO region (initialized with memory_region_init_io())
- * as a trigger for an eventfd event. The I/O callback will not be called.
- * The caller must be prepared to handle failure (that is, take the required
- * action if the callback _is_ called).
- *
- * @mr: the memory region being updated.
- * @addr: the address within @mr that is to be monitored
- * @size: the size of the access to trigger the eventfd
- * @match_data: whether to match against @data, instead of just @addr
- * @data: the data to match against the guest write
- * @e: event notifier to be triggered when @addr, @size, and @data all match.
- **/
-void memory_region_add_eventfd(MemoryRegion *mr,
- hwaddr addr,
- unsigned size,
- bool match_data,
- uint64_t data,
- EventNotifier *e);
-
-/**
- * memory_region_del_eventfd: Cancel an eventfd.
- *
- * Cancels an eventfd trigger requested by a previous
- * memory_region_add_eventfd() call.
- *
- * @mr: the memory region being updated.
- * @addr: the address within @mr that is to be monitored
- * @size: the size of the access to trigger the eventfd
- * @match_data: whether to match against @data, instead of just @addr
- * @data: the data to match against the guest write
- * @e: event notifier to be triggered when @addr, @size, and @data all match.
- */
-void memory_region_del_eventfd(MemoryRegion *mr,
- hwaddr addr,
- unsigned size,
- bool match_data,
- uint64_t data,
- EventNotifier *e);
-
-/**
- * memory_region_add_subregion: Add a subregion to a container.
- *
- * Adds a subregion at @offset. The subregion may not overlap with other
- * subregions (except for those explicitly marked as overlapping). A region
- * may only be added once as a subregion (unless removed with
- * memory_region_del_subregion()); use memory_region_init_alias() if you
- * want a region to be a subregion in multiple locations.
- *
- * @mr: the region to contain the new subregion; must be a container
- * initialized with memory_region_init().
- * @offset: the offset relative to @mr where @subregion is added.
- * @subregion: the subregion to be added.
- */
-void memory_region_add_subregion(MemoryRegion *mr,
- hwaddr offset,
- MemoryRegion *subregion);
-/**
- * memory_region_add_subregion_overlap: Add a subregion to a container
- * with overlap.
- *
- * Adds a subregion at @offset. The subregion may overlap with other
- * subregions. Conflicts are resolved by having a higher @priority hide a
- * lower @priority. Subregions without priority are taken as @priority 0.
- * A region may only be added once as a subregion (unless removed with
- * memory_region_del_subregion()); use memory_region_init_alias() if you
- * want a region to be a subregion in multiple locations.
- *
- * @mr: the region to contain the new subregion; must be a container
- * initialized with memory_region_init().
- * @offset: the offset relative to @mr where @subregion is added.
- * @subregion: the subregion to be added.
- * @priority: used for resolving overlaps; highest priority wins.
- */
-void memory_region_add_subregion_overlap(MemoryRegion *mr,
- hwaddr offset,
- MemoryRegion *subregion,
- int priority);
-
-/**
- * memory_region_get_ram_addr: Get the ram address associated with a memory
- * region
- *
- * @mr: the region to be queried
- */
-ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
-
-uint64_t memory_region_get_alignment(const MemoryRegion *mr);
-/**
- * memory_region_del_subregion: Remove a subregion.
- *
- * Removes a subregion from its container.
- *
- * @mr: the container to be updated.
- * @subregion: the region being removed; must be a current subregion of @mr.
- */
-void memory_region_del_subregion(MemoryRegion *mr,
- MemoryRegion *subregion);
-
-/*
- * memory_region_set_enabled: dynamically enable or disable a region
- *
- * Enables or disables a memory region. A disabled memory region
- * ignores all accesses to itself and its subregions. It does not
- * obscure sibling subregions with lower priority - it simply behaves as
- * if it was removed from the hierarchy.
- *
- * Regions default to being enabled.
- *
- * @mr: the region to be updated
- * @enabled: whether to enable or disable the region
- */
-void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
-
-/*
- * memory_region_set_address: dynamically update the address of a region
- *
- * Dynamically updates the address of a region, relative to its container.
- * May be used on regions are currently part of a memory hierarchy.
- *
- * @mr: the region to be updated
- * @addr: new address, relative to container region
- */
-void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
-
-/*
- * memory_region_set_size: dynamically update the size of a region.
- *
- * Dynamically updates the size of a region.
- *
- * @mr: the region to be updated
- * @size: used size of the region.
- */
-void memory_region_set_size(MemoryRegion *mr, uint64_t size);
-
-/*
- * memory_region_set_alias_offset: dynamically update a memory alias's offset
- *
- * Dynamically updates the offset into the target region that an alias points
- * to, as if the fourth argument to memory_region_init_alias() has changed.
- *
- * @mr: the #MemoryRegion to be updated; should be an alias.
- * @offset: the new offset into the target memory region
- */
-void memory_region_set_alias_offset(MemoryRegion *mr,
- hwaddr offset);
-
-/*
- * memory_region_set_unmergeable: Set a memory region unmergeable
- *
- * Mark a memory region unmergeable, resulting in the memory region (or
- * everything contained in a memory region container) not getting merged when
- * simplifying the address space and notifying memory listeners. Consequently,
- * memory listeners will never get notified about ranges that are larger than
- * the original memory regions.
- *
- * This is primarily useful when multiple aliases to a RAM memory region are
- * mapped into a memory region container, and updates (e.g., enable/disable or
- * map/unmap) of individual memory region aliases are not supposed to affect
- * other memory regions in the same container.
- *
- * @mr: the #MemoryRegion to be updated
- * @unmergeable: whether to mark the #MemoryRegion unmergeable
- */
-void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable);
-
-/**
- * memory_region_present: checks if an address relative to a @container
- * translates into #MemoryRegion within @container
- *
- * Answer whether a #MemoryRegion within @container covers the address
- * @addr.
- *
- * @container: a #MemoryRegion within which @addr is a relative address
- * @addr: the area within @container to be searched
- */
-bool memory_region_present(MemoryRegion *container, hwaddr addr);
-
-/**
- * memory_region_is_mapped: returns true if #MemoryRegion is mapped
- * into another memory region, which does not necessarily imply that it is
- * mapped into an address space.
- *
- * @mr: a #MemoryRegion which should be checked if it's mapped
- */
-bool memory_region_is_mapped(MemoryRegion *mr);
-
-/**
- * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
- * #MemoryRegion
- *
- * The #RamDiscardManager cannot change while a memory region is mapped.
- *
- * @mr: the #MemoryRegion
- */
-RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
-
-/**
- * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
- * #RamDiscardManager assigned
- *
- * @mr: the #MemoryRegion
- */
-static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
-{
- return !!memory_region_get_ram_discard_manager(mr);
-}
-
-/**
- * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
- * #MemoryRegion
- *
- * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
- * that does not cover RAM, or a #MemoryRegion that already has a
- * #RamDiscardManager assigned.
- *
- * @mr: the #MemoryRegion
- * @rdm: #RamDiscardManager to set
- */
-void memory_region_set_ram_discard_manager(MemoryRegion *mr,
- RamDiscardManager *rdm);
-
-/**
- * memory_region_find: translate an address/size relative to a
- * MemoryRegion into a #MemoryRegionSection.
- *
- * Locates the first #MemoryRegion within @mr that overlaps the range
- * given by @addr and @size.
- *
- * Returns a #MemoryRegionSection that describes a contiguous overlap.
- * It will have the following characteristics:
- * - @size = 0 iff no overlap was found
- * - @mr is non-%NULL iff an overlap was found
- *
- * Remember that in the return value the @offset_within_region is
- * relative to the returned region (in the .@mr field), not to the
- * @mr argument.
- *
- * Similarly, the .@offset_within_address_space is relative to the
- * address space that contains both regions, the passed and the
- * returned one. However, in the special case where the @mr argument
- * has no container (and thus is the root of the address space), the
- * following will hold:
- * - @offset_within_address_space >= @addr
- * - @offset_within_address_space + .@size <= @addr + @size
- *
- * @mr: a MemoryRegion within which @addr is a relative address
- * @addr: start of the area within @as to be searched
- * @size: size of the area to be searched
- */
-MemoryRegionSection memory_region_find(MemoryRegion *mr,
- hwaddr addr, uint64_t size);
-
-/**
- * memory_global_dirty_log_sync: synchronize the dirty log for all memory
- *
- * Synchronizes the dirty page log for all address spaces.
- *
- * @last_stage: whether this is the last stage of live migration
- */
-void memory_global_dirty_log_sync(bool last_stage);
-
-/**
- * memory_global_dirty_log_sync: synchronize the dirty log for all memory
- *
- * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
- * This function must be called after the dirty log bitmap is cleared, and
- * before dirty guest memory pages are read. If you are using
- * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
- * care of doing this.
- */
-void memory_global_after_dirty_log_sync(void);
-
-/**
- * memory_region_transaction_begin: Start a transaction.
- *
- * During a transaction, changes will be accumulated and made visible
- * only when the transaction ends (is committed).
- */
-void memory_region_transaction_begin(void);
-
-/**
- * memory_region_transaction_commit: Commit a transaction and make changes
- * visible to the guest.
- */
-void memory_region_transaction_commit(void);
-
-/**
- * memory_listener_register: register callbacks to be called when memory
- * sections are mapped or unmapped into an address
- * space
- *
- * @listener: an object containing the callbacks to be called
- * @filter: if non-%NULL, only regions in this address space will be observed
- */
-void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
-
-/**
- * memory_listener_unregister: undo the effect of memory_listener_register()
- *
- * @listener: an object containing the callbacks to be removed
- */
-void memory_listener_unregister(MemoryListener *listener);
-
-/**
- * memory_global_dirty_log_start: begin dirty logging for all regions
- *
- * @flags: purpose of starting dirty log, migration or dirty rate
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * Return: true on success, else false setting @errp with error.
- */
-bool memory_global_dirty_log_start(unsigned int flags, Error **errp);
-
-/**
- * memory_global_dirty_log_stop: end dirty logging for all regions
- *
- * @flags: purpose of stopping dirty log, migration or dirty rate
- */
-void memory_global_dirty_log_stop(unsigned int flags);
-
-void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
-
-bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
- unsigned size, bool is_write,
- MemTxAttrs attrs);
-
-/**
- * memory_region_dispatch_read: perform a read directly to the specified
- * MemoryRegion.
- *
- * @mr: #MemoryRegion to access
- * @addr: address within that region
- * @pval: pointer to uint64_t which the data is written to
- * @op: size, sign, and endianness of the memory operation
- * @attrs: memory transaction attributes to use for the access
- */
-MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
- hwaddr addr,
- uint64_t *pval,
- MemOp op,
- MemTxAttrs attrs);
-/**
- * memory_region_dispatch_write: perform a write directly to the specified
- * MemoryRegion.
- *
- * @mr: #MemoryRegion to access
- * @addr: address within that region
- * @data: data to write
- * @op: size, sign, and endianness of the memory operation
- * @attrs: memory transaction attributes to use for the access
- */
-MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
- hwaddr addr,
- uint64_t data,
- MemOp op,
- MemTxAttrs attrs);
-
-/**
- * address_space_init: initializes an address space
- *
- * @as: an uninitialized #AddressSpace
- * @root: a #MemoryRegion that routes addresses for the address space
- * @name: an address space name. The name is only used for debugging
- * output.
- */
-void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
-
-/**
- * address_space_destroy: destroy an address space
- *
- * Releases all resources associated with an address space. After an address space
- * is destroyed, its root memory region (given by address_space_init()) may be destroyed
- * as well.
- *
- * @as: address space to be destroyed
- */
-void address_space_destroy(AddressSpace *as);
-
-/**
- * address_space_remove_listeners: unregister all listeners of an address space
- *
- * Removes all callbacks previously registered with memory_listener_register()
- * for @as.
- *
- * @as: an initialized #AddressSpace
- */
-void address_space_remove_listeners(AddressSpace *as);
-
-/**
- * address_space_rw: read from or write to an address space.
- *
- * Return a MemTxResult indicating whether the operation succeeded
- * or failed (eg unassigned memory, device rejected the transaction,
- * IOMMU fault).
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @attrs: memory transaction attributes
- * @buf: buffer with the data transferred
- * @len: the number of bytes to read or write
- * @is_write: indicates the transfer direction
- */
-MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, void *buf,
- hwaddr len, bool is_write);
-
-/**
- * address_space_write: write to address space.
- *
- * Return a MemTxResult indicating whether the operation succeeded
- * or failed (eg unassigned memory, device rejected the transaction,
- * IOMMU fault).
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @attrs: memory transaction attributes
- * @buf: buffer with the data transferred
- * @len: the number of bytes to write
- */
-MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs,
- const void *buf, hwaddr len);
-
-/**
- * address_space_write_rom: write to address space, including ROM.
- *
- * This function writes to the specified address space, but will
- * write data to both ROM and RAM. This is used for non-guest
- * writes like writes from the gdb debug stub or initial loading
- * of ROM contents.
- *
- * Note that portions of the write which attempt to write data to
- * a device will be silently ignored -- only real RAM and ROM will
- * be written to.
- *
- * Return a MemTxResult indicating whether the operation succeeded
- * or failed (eg unassigned memory, device rejected the transaction,
- * IOMMU fault).
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @attrs: memory transaction attributes
- * @buf: buffer with the data transferred
- * @len: the number of bytes to write
- */
-MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs,
- const void *buf, hwaddr len);
-
-/* address_space_ld*: load from an address space
- * address_space_st*: store to an address space
- *
- * These functions perform a load or store of the byte, word,
- * longword or quad to the specified address within the AddressSpace.
- * The _le suffixed functions treat the data as little endian;
- * _be indicates big endian; no suffix indicates "same endianness
- * as guest CPU".
- *
- * The "guest CPU endianness" accessors are deprecated for use outside
- * target-* code; devices should be CPU-agnostic and use either the LE
- * or the BE accessors.
- *
- * @as #AddressSpace to be accessed
- * @addr: address within that address space
- * @val: data value, for stores
- * @attrs: memory transaction attributes
- * @result: location to write the success/failure of the transaction;
- * if NULL, this information is discarded
- */
-
-#define SUFFIX
-#define ARG1 as
-#define ARG1_DECL AddressSpace *as
-#include "exec/memory_ldst.h.inc"
-
-#define SUFFIX
-#define ARG1 as
-#define ARG1_DECL AddressSpace *as
-#include "exec/memory_ldst_phys.h.inc"
-
-struct MemoryRegionCache {
- uint8_t *ptr;
- hwaddr xlat;
- hwaddr len;
- FlatView *fv;
- MemoryRegionSection mrs;
- bool is_write;
-};
-
-/* address_space_ld*_cached: load from a cached #MemoryRegion
- * address_space_st*_cached: store into a cached #MemoryRegion
- *
- * These functions perform a load or store of the byte, word,
- * longword or quad to the specified address. The address is
- * a physical address in the AddressSpace, but it must lie within
- * a #MemoryRegion that was mapped with address_space_cache_init.
- *
- * The _le suffixed functions treat the data as little endian;
- * _be indicates big endian; no suffix indicates "same endianness
- * as guest CPU".
- *
- * The "guest CPU endianness" accessors are deprecated for use outside
- * target-* code; devices should be CPU-agnostic and use either the LE
- * or the BE accessors.
- *
- * @cache: previously initialized #MemoryRegionCache to be accessed
- * @addr: address within the address space
- * @val: data value, for stores
- * @attrs: memory transaction attributes
- * @result: location to write the success/failure of the transaction;
- * if NULL, this information is discarded
- */
-
-#define SUFFIX _cached_slow
-#define ARG1 cache
-#define ARG1_DECL MemoryRegionCache *cache
-#include "exec/memory_ldst.h.inc"
-
-/* Inline fast path for direct RAM access. */
-static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
- hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
-{
- assert(addr < cache->len);
- if (likely(cache->ptr)) {
- return ldub_p(cache->ptr + addr);
- } else {
- return address_space_ldub_cached_slow(cache, addr, attrs, result);
- }
-}
-
-static inline void address_space_stb_cached(MemoryRegionCache *cache,
- hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
-{
- assert(addr < cache->len);
- if (likely(cache->ptr)) {
- stb_p(cache->ptr + addr, val);
- } else {
- address_space_stb_cached_slow(cache, addr, val, attrs, result);
- }
-}
-
-#define ENDIANNESS _le
-#include "exec/memory_ldst_cached.h.inc"
-
-#define ENDIANNESS _be
-#include "exec/memory_ldst_cached.h.inc"
-
-#define SUFFIX _cached
-#define ARG1 cache
-#define ARG1_DECL MemoryRegionCache *cache
-#include "exec/memory_ldst_phys.h.inc"
-
-/* address_space_cache_init: prepare for repeated access to a physical
- * memory region
- *
- * @cache: #MemoryRegionCache to be filled
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @len: length of buffer
- * @is_write: indicates the transfer direction
- *
- * Will only work with RAM, and may map a subset of the requested range by
- * returning a value that is less than @len. On failure, return a negative
- * errno value.
- *
- * Because it only works with RAM, this function can be used for
- * read-modify-write operations. In this case, is_write should be %true.
- *
- * Note that addresses passed to the address_space_*_cached functions
- * are relative to @addr.
- */
-int64_t address_space_cache_init(MemoryRegionCache *cache,
- AddressSpace *as,
- hwaddr addr,
- hwaddr len,
- bool is_write);
-
-/**
- * address_space_cache_init_empty: Initialize empty #MemoryRegionCache
- *
- * @cache: The #MemoryRegionCache to operate on.
- *
- * Initializes #MemoryRegionCache structure without memory region attached.
- * Cache initialized this way can only be safely destroyed, but not used.
- */
-static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
-{
- cache->mrs.mr = NULL;
- /* There is no real need to initialize fv, but it makes Coverity happy. */
- cache->fv = NULL;
-}
-
-/**
- * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
- *
- * @cache: The #MemoryRegionCache to operate on.
- * @addr: The first physical address that was written, relative to the
- * address that was passed to @address_space_cache_init.
- * @access_len: The number of bytes that were written starting at @addr.
- */
-void address_space_cache_invalidate(MemoryRegionCache *cache,
- hwaddr addr,
- hwaddr access_len);
-
-/**
- * address_space_cache_destroy: free a #MemoryRegionCache
- *
- * @cache: The #MemoryRegionCache whose memory should be released.
- */
-void address_space_cache_destroy(MemoryRegionCache *cache);
-
-/* address_space_get_iotlb_entry: translate an address into an IOTLB
- * entry. Should be called from an RCU critical section.
- */
-IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
- bool is_write, MemTxAttrs attrs);
-
-/* address_space_translate: translate an address range into an address space
- * into a MemoryRegion and an address range into that section. Should be
- * called from an RCU critical section, to avoid that the last reference
- * to the returned region disappears after address_space_translate returns.
- *
- * @fv: #FlatView to be accessed
- * @addr: address within that address space
- * @xlat: pointer to address within the returned memory region section's
- * #MemoryRegion.
- * @len: pointer to length
- * @is_write: indicates the transfer direction
- * @attrs: memory attributes
- */
-MemoryRegion *flatview_translate(FlatView *fv,
- hwaddr addr, hwaddr *xlat,
- hwaddr *len, bool is_write,
- MemTxAttrs attrs);
-
-static inline MemoryRegion *address_space_translate(AddressSpace *as,
- hwaddr addr, hwaddr *xlat,
- hwaddr *len, bool is_write,
- MemTxAttrs attrs)
-{
- return flatview_translate(address_space_to_flatview(as),
- addr, xlat, len, is_write, attrs);
-}
-
-/* address_space_access_valid: check for validity of accessing an address
- * space range
- *
- * Check whether memory is assigned to the given address space range, and
- * access is permitted by any IOMMU regions that are active for the address
- * space.
- *
- * For now, addr and len should be aligned to a page size. This limitation
- * will be lifted in the future.
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @len: length of the area to be checked
- * @is_write: indicates the transfer direction
- * @attrs: memory attributes
- */
-bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
- bool is_write, MemTxAttrs attrs);
-
-/* address_space_map: map a physical memory region into a host virtual address
- *
- * May map a subset of the requested range, given by and returned in @plen.
- * May return %NULL and set *@plen to zero(0), if resources needed to perform
- * the mapping are exhausted.
- * Use only for reads OR writes - not for read-modify-write operations.
- * Use address_space_register_map_client() to know when retrying the map
- * operation is likely to succeed.
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @plen: pointer to length of buffer; updated on return
- * @is_write: indicates the transfer direction
- * @attrs: memory attributes
- */
-void *address_space_map(AddressSpace *as, hwaddr addr,
- hwaddr *plen, bool is_write, MemTxAttrs attrs);
-
-/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
- *
- * Will also mark the memory as dirty if @is_write == %true. @access_len gives
- * the amount of memory that was actually read or written by the caller.
- *
- * @as: #AddressSpace used
- * @buffer: host pointer as returned by address_space_map()
- * @len: buffer length as returned by address_space_map()
- * @access_len: amount of data actually transferred
- * @is_write: indicates the transfer direction
- */
-void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
- bool is_write, hwaddr access_len);
-
-/*
- * address_space_register_map_client: Register a callback to invoke when
- * resources for address_space_map() are available again.
- *
- * address_space_map may fail when there are not enough resources available,
- * such as when bounce buffer memory would exceed the limit. The callback can
- * be used to retry the address_space_map operation. Note that the callback
- * gets automatically removed after firing.
- *
- * @as: #AddressSpace to be accessed
- * @bh: callback to invoke when address_space_map() retry is appropriate
- */
-void address_space_register_map_client(AddressSpace *as, QEMUBH *bh);
-
-/*
- * address_space_unregister_map_client: Unregister a callback that has
- * previously been registered and not fired yet.
- *
- * @as: #AddressSpace to be accessed
- * @bh: callback to unregister
- */
-void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh);
-
-/* Internal functions, part of the implementation of address_space_read. */
-MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, void *buf, hwaddr len);
-MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
- MemTxAttrs attrs, void *buf,
- hwaddr len, hwaddr addr1, hwaddr l,
- MemoryRegion *mr);
-void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
-
-/* Internal functions, part of the implementation of address_space_read_cached
- * and address_space_write_cached. */
-MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
- hwaddr addr, void *buf, hwaddr len);
-MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
- hwaddr addr, const void *buf,
- hwaddr len);
-
-int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
-bool prepare_mmio_access(MemoryRegion *mr);
-
-static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
-{
- if (is_write) {
- return memory_region_is_ram(mr) && !mr->readonly &&
- !mr->rom_device && !memory_region_is_ram_device(mr);
- } else {
- return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
- memory_region_is_romd(mr);
- }
-}
-
-/**
- * address_space_read: read from an address space.
- *
- * Return a MemTxResult indicating whether the operation succeeded
- * or failed (eg unassigned memory, device rejected the transaction,
- * IOMMU fault). Called within RCU critical section.
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @attrs: memory transaction attributes
- * @buf: buffer with the data transferred
- * @len: length of the data transferred
- */
-static inline __attribute__((__always_inline__))
-MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, void *buf,
- hwaddr len)
-{
- MemTxResult result = MEMTX_OK;
- hwaddr l, addr1;
- void *ptr;
- MemoryRegion *mr;
- FlatView *fv;
-
- if (__builtin_constant_p(len)) {
- if (len) {
- RCU_READ_LOCK_GUARD();
- fv = address_space_to_flatview(as);
- l = len;
- mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
- if (len == l && memory_access_is_direct(mr, false)) {
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
- memcpy(buf, ptr, len);
- } else {
- result = flatview_read_continue(fv, addr, attrs, buf, len,
- addr1, l, mr);
- }
- }
- } else {
- result = address_space_read_full(as, addr, attrs, buf, len);
- }
- return result;
-}
-
-/**
- * address_space_read_cached: read from a cached RAM region
- *
- * @cache: Cached region to be addressed
- * @addr: address relative to the base of the RAM region
- * @buf: buffer with the data transferred
- * @len: length of the data transferred
- */
-static inline MemTxResult
-address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
- void *buf, hwaddr len)
-{
- assert(addr < cache->len && len <= cache->len - addr);
- fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
- if (likely(cache->ptr)) {
- memcpy(buf, cache->ptr + addr, len);
- return MEMTX_OK;
- } else {
- return address_space_read_cached_slow(cache, addr, buf, len);
- }
-}
-
-/**
- * address_space_write_cached: write to a cached RAM region
- *
- * @cache: Cached region to be addressed
- * @addr: address relative to the base of the RAM region
- * @buf: buffer with the data transferred
- * @len: length of the data transferred
- */
-static inline MemTxResult
-address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
- const void *buf, hwaddr len)
-{
- assert(addr < cache->len && len <= cache->len - addr);
- if (likely(cache->ptr)) {
- memcpy(cache->ptr + addr, buf, len);
- return MEMTX_OK;
- } else {
- return address_space_write_cached_slow(cache, addr, buf, len);
- }
-}
-
-/**
- * address_space_set: Fill address space with a constant byte.
- *
- * Return a MemTxResult indicating whether the operation succeeded
- * or failed (eg unassigned memory, device rejected the transaction,
- * IOMMU fault).
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @c: constant byte to fill the memory
- * @len: the number of bytes to fill with the constant byte
- * @attrs: memory transaction attributes
- */
-MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
- uint8_t c, hwaddr len, MemTxAttrs attrs);
-
-#ifdef COMPILING_PER_TARGET
-/* enum device_endian to MemOp. */
-static inline MemOp devend_memop(enum device_endian end)
-{
- QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
- DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
-
-#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
- /* Swap if non-host endianness or native (target) endianness */
- return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
-#else
- const int non_host_endianness =
- DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
-
- /* In this case, native (target) endianness needs no swap. */
- return (end == non_host_endianness) ? MO_BSWAP : 0;
-#endif
-}
-#endif /* COMPILING_PER_TARGET */
-
-/*
- * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
- * to manage the actual amount of memory consumed by the VM (then, the memory
- * provided by RAM blocks might be bigger than the desired memory consumption).
- * This *must* be set if:
- * - Discarding parts of a RAM blocks does not result in the change being
- * reflected in the VM and the pages getting freed.
- * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
- * discards blindly.
- * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
- * encrypted VMs).
- * Technologies that only temporarily pin the current working set of a
- * driver are fine, because we don't expect such pages to be discarded
- * (esp. based on guest action like balloon inflation).
- *
- * This is *not* to be used to protect from concurrent discards (esp.,
- * postcopy).
- *
- * Returns 0 if successful. Returns -EBUSY if a technology that relies on
- * discards to work reliably is active.
- */
-int ram_block_discard_disable(bool state);
-
-/*
- * See ram_block_discard_disable(): only disable uncoordinated discards,
- * keeping coordinated discards (via the RamDiscardManager) enabled.
- */
-int ram_block_uncoordinated_discard_disable(bool state);
-
-/*
- * Inhibit technologies that disable discarding of pages in RAM blocks.
- *
- * Returns 0 if successful. Returns -EBUSY if discards are already set to
- * broken.
- */
-int ram_block_discard_require(bool state);
-
-/*
- * See ram_block_discard_require(): only inhibit technologies that disable
- * uncoordinated discarding of pages in RAM blocks, allowing co-existence with
- * technologies that only inhibit uncoordinated discards (via the
- * RamDiscardManager).
- */
-int ram_block_coordinated_discard_require(bool state);
-
-/*
- * Test if any discarding of memory in ram blocks is disabled.
- */
-bool ram_block_discard_is_disabled(void);
-
-/*
- * Test if any discarding of memory in ram blocks is required to work reliably.
- */
-bool ram_block_discard_is_required(void);
-
-#endif
-
-#endif
diff --git a/include/exec/memory_ldst.h.inc b/include/exec/memory_ldst.h.inc
index 92ad74e..7270235 100644
--- a/include/exec/memory_ldst.h.inc
+++ b/include/exec/memory_ldst.h.inc
@@ -19,7 +19,6 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifdef TARGET_ENDIANNESS
uint16_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
@@ -34,7 +33,6 @@ void glue(address_space_stl, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
void glue(address_space_stq, SUFFIX)(ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
-#else
uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
uint16_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
@@ -63,9 +61,7 @@ void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
-#endif
#undef ARG1_DECL
#undef ARG1
#undef SUFFIX
-#undef TARGET_ENDIANNESS
diff --git a/include/exec/memory_ldst_phys.h.inc b/include/exec/memory_ldst_phys.h.inc
index ecd6786..db67de7 100644
--- a/include/exec/memory_ldst_phys.h.inc
+++ b/include/exec/memory_ldst_phys.h.inc
@@ -19,7 +19,6 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifdef TARGET_ENDIANNESS
static inline uint16_t glue(lduw_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
{
return glue(address_space_lduw, SUFFIX)(ARG1, addr,
@@ -55,7 +54,7 @@ static inline void glue(stq_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
glue(address_space_stq, SUFFIX)(ARG1, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
}
-#else
+
static inline uint8_t glue(ldub_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
{
return glue(address_space_ldub, SUFFIX)(ARG1, addr,
@@ -139,9 +138,7 @@ static inline void glue(stq_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t va
glue(address_space_stq_be, SUFFIX)(ARG1, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
}
-#endif
#undef ARG1_DECL
#undef ARG1
#undef SUFFIX
-#undef TARGET_ENDIANNESS
diff --git a/include/exec/mmap-lock.h b/include/exec/mmap-lock.h
new file mode 100644
index 0000000..50ffdab
--- /dev/null
+++ b/include/exec/mmap-lock.h
@@ -0,0 +1,33 @@
+/*
+ * QEMU user-only mmap lock, with stubs for system mode
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+#ifndef EXEC_MMAP_LOCK_H
+#define EXEC_MMAP_LOCK_H
+
+#ifdef CONFIG_USER_ONLY
+
+void TSA_NO_TSA mmap_lock(void);
+void TSA_NO_TSA mmap_unlock(void);
+bool have_mmap_lock(void);
+
+static inline void mmap_unlock_guard(void *unused)
+{
+ mmap_unlock();
+}
+
+#define WITH_MMAP_LOCK_GUARD() \
+ for (int _mmap_lock_iter __attribute__((cleanup(mmap_unlock_guard))) \
+ = (mmap_lock(), 0); _mmap_lock_iter == 0; _mmap_lock_iter = 1)
+
+#else
+
+static inline void mmap_lock(void) {}
+static inline void mmap_unlock(void) {}
+#define WITH_MMAP_LOCK_GUARD()
+
+#endif /* CONFIG_USER_ONLY */
+#endif /* EXEC_MMAP_LOCK_H */
diff --git a/include/exec/page-vary.h b/include/exec/page-vary.h
index 54ddde3..101c259 100644
--- a/include/exec/page-vary.h
+++ b/include/exec/page-vary.h
@@ -49,4 +49,13 @@ bool set_preferred_target_page_bits(int bits);
*/
void finalize_target_page_bits(void);
+/**
+ * migration_legacy_page_bits
+ *
+ * For migration compatibility with qemu v2.9, prior to the introduction
+ * of the configuration/target-page-bits section, return the value of
+ * TARGET_PAGE_BITS that the target had then.
+ */
+int migration_legacy_page_bits(void);
+
#endif /* EXEC_PAGE_VARY_H */
diff --git a/include/exec/poison.h b/include/exec/poison.h
index 792a83f..a779adb 100644
--- a/include/exec/poison.h
+++ b/include/exec/poison.h
@@ -11,7 +11,6 @@
#pragma GCC poison TARGET_AARCH64
#pragma GCC poison TARGET_ALPHA
#pragma GCC poison TARGET_ARM
-#pragma GCC poison TARGET_CRIS
#pragma GCC poison TARGET_HEXAGON
#pragma GCC poison TARGET_HPPA
#pragma GCC poison TARGET_LOONGARCH64
@@ -36,35 +35,17 @@
#pragma GCC poison TARGET_HAS_BFLT
#pragma GCC poison TARGET_NAME
-#pragma GCC poison TARGET_SUPPORTS_MTTCG
#pragma GCC poison TARGET_BIG_ENDIAN
-#pragma GCC poison BSWAP_NEEDED
+#pragma GCC poison TCG_GUEST_DEFAULT_MO
#pragma GCC poison TARGET_LONG_BITS
#pragma GCC poison TARGET_FMT_lx
#pragma GCC poison TARGET_FMT_ld
#pragma GCC poison TARGET_FMT_lu
-#pragma GCC poison TARGET_PAGE_SIZE
-#pragma GCC poison TARGET_PAGE_MASK
-#pragma GCC poison TARGET_PAGE_BITS
-#pragma GCC poison TARGET_PAGE_ALIGN
-
-#pragma GCC poison CPU_INTERRUPT_HARD
-#pragma GCC poison CPU_INTERRUPT_EXITTB
-#pragma GCC poison CPU_INTERRUPT_HALT
-#pragma GCC poison CPU_INTERRUPT_DEBUG
-#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0
-#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1
-#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2
-#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3
-#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4
-#pragma GCC poison CPU_INTERRUPT_TGT_INT_0
-#pragma GCC poison CPU_INTERRUPT_TGT_INT_1
-#pragma GCC poison CPU_INTERRUPT_TGT_INT_2
+#pragma GCC poison TARGET_PHYS_ADDR_SPACE_BITS
#pragma GCC poison CONFIG_ALPHA_DIS
-#pragma GCC poison CONFIG_CRIS_DIS
#pragma GCC poison CONFIG_HPPA_DIS
#pragma GCC poison CONFIG_I386_DIS
#pragma GCC poison CONFIG_HEXAGON_DIS
@@ -85,4 +66,11 @@
#pragma GCC poison CONFIG_WHPX
#pragma GCC poison CONFIG_XEN
+#ifndef COMPILING_SYSTEM_VS_USER
+#pragma GCC poison CONFIG_USER_ONLY
+#pragma GCC poison CONFIG_SOFTMMU
+#endif
+
+#pragma GCC poison KVM_HAVE_MCE_INJECTION
+
#endif
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
deleted file mode 100644
index 891c44c..0000000
--- a/include/exec/ram_addr.h
+++ /dev/null
@@ -1,552 +0,0 @@
-/*
- * Declarations for cpu physical memory functions
- *
- * Copyright 2011 Red Hat, Inc. and/or its affiliates
- *
- * Authors:
- * Avi Kivity <avi@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or
- * later. See the COPYING file in the top-level directory.
- *
- */
-
-/*
- * This header is for use by exec.c and memory.c ONLY. Do not include it.
- * The functions declared here will be removed soon.
- */
-
-#ifndef RAM_ADDR_H
-#define RAM_ADDR_H
-
-#ifndef CONFIG_USER_ONLY
-#include "cpu.h"
-#include "sysemu/xen.h"
-#include "sysemu/tcg.h"
-#include "exec/ramlist.h"
-#include "exec/ramblock.h"
-#include "exec/exec-all.h"
-#include "qemu/rcu.h"
-
-extern uint64_t total_dirty_pages;
-
-/**
- * clear_bmap_size: calculate clear bitmap size
- *
- * @pages: number of guest pages
- * @shift: guest page number shift
- *
- * Returns: number of bits for the clear bitmap
- */
-static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
-{
- return DIV_ROUND_UP(pages, 1UL << shift);
-}
-
-/**
- * clear_bmap_set: set clear bitmap for the page range. Must be with
- * bitmap_mutex held.
- *
- * @rb: the ramblock to operate on
- * @start: the start page number
- * @size: number of pages to set in the bitmap
- *
- * Returns: None
- */
-static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
- uint64_t npages)
-{
- uint8_t shift = rb->clear_bmap_shift;
-
- bitmap_set(rb->clear_bmap, start >> shift, clear_bmap_size(npages, shift));
-}
-
-/**
- * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set.
- * Must be with bitmap_mutex held.
- *
- * @rb: the ramblock to operate on
- * @page: the page number to check
- *
- * Returns: true if the bit was set, false otherwise
- */
-static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
-{
- uint8_t shift = rb->clear_bmap_shift;
-
- return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1);
-}
-
-static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
-{
- return (b && b->host && offset < b->used_length) ? true : false;
-}
-
-static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
-{
- assert(offset_in_ramblock(block, offset));
- return (char *)block->host + offset;
-}
-
-static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
- RAMBlock *rb)
-{
- uint64_t host_addr_offset =
- (uint64_t)(uintptr_t)(host_addr - (void *)rb->host);
- return host_addr_offset >> TARGET_PAGE_BITS;
-}
-
-bool ramblock_is_pmem(RAMBlock *rb);
-
-long qemu_minrampagesize(void);
-long qemu_maxrampagesize(void);
-
-/**
- * qemu_ram_alloc_from_file,
- * qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing
- * file or device
- *
- * Parameters:
- * @size: the size in bytes of the ram block
- * @mr: the memory region where the ram block is
- * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
- * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
- * RAM_READONLY_FD, RAM_GUEST_MEMFD
- * @mem_path or @fd: specify the backing file or device
- * @offset: Offset into target file
- * @errp: pointer to Error*, to store an error if it happens
- *
- * Return:
- * On success, return a pointer to the ram block.
- * On failure, return NULL.
- */
-RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
- uint32_t ram_flags, const char *mem_path,
- off_t offset, Error **errp);
-RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
- uint32_t ram_flags, int fd, off_t offset,
- Error **errp);
-
-RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
- MemoryRegion *mr, Error **errp);
-RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, MemoryRegion *mr,
- Error **errp);
-RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
- void (*resized)(const char*,
- uint64_t length,
- void *host),
- MemoryRegion *mr, Error **errp);
-void qemu_ram_free(RAMBlock *block);
-
-int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
-
-void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length);
-
-/* Clear whole block of mem */
-static inline void qemu_ram_block_writeback(RAMBlock *block)
-{
- qemu_ram_msync(block, 0, block->used_length);
-}
-
-#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
-#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
-
-static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
- ram_addr_t length,
- unsigned client)
-{
- DirtyMemoryBlocks *blocks;
- unsigned long end, page;
- unsigned long idx, offset, base;
- bool dirty = false;
-
- assert(client < DIRTY_MEMORY_NUM);
-
- end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
- page = start >> TARGET_PAGE_BITS;
-
- WITH_RCU_READ_LOCK_GUARD() {
- blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
-
- idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- base = page - offset;
- while (page < end) {
- unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
- unsigned long num = next - base;
- unsigned long found = find_next_bit(blocks->blocks[idx],
- num, offset);
- if (found < num) {
- dirty = true;
- break;
- }
-
- page = next;
- idx++;
- offset = 0;
- base += DIRTY_MEMORY_BLOCK_SIZE;
- }
- }
-
- return dirty;
-}
-
-static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
- ram_addr_t length,
- unsigned client)
-{
- DirtyMemoryBlocks *blocks;
- unsigned long end, page;
- unsigned long idx, offset, base;
- bool dirty = true;
-
- assert(client < DIRTY_MEMORY_NUM);
-
- end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
- page = start >> TARGET_PAGE_BITS;
-
- RCU_READ_LOCK_GUARD();
-
- blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
-
- idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- base = page - offset;
- while (page < end) {
- unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
- unsigned long num = next - base;
- unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
- if (found < num) {
- dirty = false;
- break;
- }
-
- page = next;
- idx++;
- offset = 0;
- base += DIRTY_MEMORY_BLOCK_SIZE;
- }
-
- return dirty;
-}
-
-static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
- unsigned client)
-{
- return cpu_physical_memory_get_dirty(addr, 1, client);
-}
-
-static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
-{
- bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
- bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
- bool migration =
- cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
- return !(vga && code && migration);
-}
-
-static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
- ram_addr_t length,
- uint8_t mask)
-{
- uint8_t ret = 0;
-
- if (mask & (1 << DIRTY_MEMORY_VGA) &&
- !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
- ret |= (1 << DIRTY_MEMORY_VGA);
- }
- if (mask & (1 << DIRTY_MEMORY_CODE) &&
- !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
- ret |= (1 << DIRTY_MEMORY_CODE);
- }
- if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
- !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
- ret |= (1 << DIRTY_MEMORY_MIGRATION);
- }
- return ret;
-}
-
-static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
- unsigned client)
-{
- unsigned long page, idx, offset;
- DirtyMemoryBlocks *blocks;
-
- assert(client < DIRTY_MEMORY_NUM);
-
- page = addr >> TARGET_PAGE_BITS;
- idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- offset = page % DIRTY_MEMORY_BLOCK_SIZE;
-
- RCU_READ_LOCK_GUARD();
-
- blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
-
- set_bit_atomic(offset, blocks->blocks[idx]);
-}
-
-static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
- ram_addr_t length,
- uint8_t mask)
-{
- DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
- unsigned long end, page;
- unsigned long idx, offset, base;
- int i;
-
- if (!mask && !xen_enabled()) {
- return;
- }
-
- end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
- page = start >> TARGET_PAGE_BITS;
-
- WITH_RCU_READ_LOCK_GUARD() {
- for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]);
- }
-
- idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- base = page - offset;
- while (page < end) {
- unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
-
- if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
- bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
- offset, next - page);
- }
- if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
- bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
- offset, next - page);
- }
- if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
- bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
- offset, next - page);
- }
-
- page = next;
- idx++;
- offset = 0;
- base += DIRTY_MEMORY_BLOCK_SIZE;
- }
- }
-
- xen_hvm_modified_memory(start, length);
-}
-
-#if !defined(_WIN32)
-
-/*
- * Contrary to cpu_physical_memory_sync_dirty_bitmap() this function returns
- * the number of dirty pages in @bitmap passed as argument. On the other hand,
- * cpu_physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
- * weren't set in the global migration bitmap.
- */
-static inline
-uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
- ram_addr_t start,
- ram_addr_t pages)
-{
- unsigned long i, j;
- unsigned long page_number, c, nbits;
- hwaddr addr;
- ram_addr_t ram_addr;
- uint64_t num_dirty = 0;
- unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
- unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE;
- unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
-
- /* start address is aligned at the start of a word? */
- if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
- (hpratio == 1)) {
- unsigned long **blocks[DIRTY_MEMORY_NUM];
- unsigned long idx;
- unsigned long offset;
- long k;
- long nr = BITS_TO_LONGS(pages);
-
- idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
- offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
- DIRTY_MEMORY_BLOCK_SIZE);
-
- WITH_RCU_READ_LOCK_GUARD() {
- for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- blocks[i] =
- qatomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
- }
-
- for (k = 0; k < nr; k++) {
- if (bitmap[k]) {
- unsigned long temp = leul_to_cpu(bitmap[k]);
-
- nbits = ctpopl(temp);
- qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
-
- if (global_dirty_tracking) {
- qatomic_or(
- &blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
- temp);
- if (unlikely(
- global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
- total_dirty_pages += nbits;
- }
- }
-
- num_dirty += nbits;
-
- if (tcg_enabled()) {
- qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
- temp);
- }
- }
-
- if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
- offset = 0;
- idx++;
- }
- }
- }
-
- xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
- } else {
- uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
-
- if (!global_dirty_tracking) {
- clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
- }
-
- /*
- * bitmap-traveling is faster than memory-traveling (for addr...)
- * especially when most of the memory is not dirty.
- */
- for (i = 0; i < len; i++) {
- if (bitmap[i] != 0) {
- c = leul_to_cpu(bitmap[i]);
- nbits = ctpopl(c);
- if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
- total_dirty_pages += nbits;
- }
- num_dirty += nbits;
- do {
- j = ctzl(c);
- c &= ~(1ul << j);
- page_number = (i * HOST_LONG_BITS + j) * hpratio;
- addr = page_number * TARGET_PAGE_SIZE;
- ram_addr = start + addr;
- cpu_physical_memory_set_dirty_range(ram_addr,
- TARGET_PAGE_SIZE * hpratio, clients);
- } while (c != 0);
- }
- }
- }
-
- return num_dirty;
-}
-#endif /* not _WIN32 */
-
-static inline void cpu_physical_memory_dirty_bits_cleared(ram_addr_t start,
- ram_addr_t length)
-{
- if (tcg_enabled()) {
- tlb_reset_dirty_range_all(start, length);
- }
-
-}
-bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
- ram_addr_t length,
- unsigned client);
-
-DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
- (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client);
-
-bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
- ram_addr_t start,
- ram_addr_t length);
-
-static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
- ram_addr_t length)
-{
- cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
- cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
- cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
-}
-
-
-/* Called with RCU critical section */
-static inline
-uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
- ram_addr_t start,
- ram_addr_t length)
-{
- ram_addr_t addr;
- unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
- uint64_t num_dirty = 0;
- unsigned long *dest = rb->bmap;
-
- /* start address and length is aligned at the start of a word? */
- if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
- (start + rb->offset) &&
- !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
- int k;
- int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
- unsigned long * const *src;
- unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
- unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
- DIRTY_MEMORY_BLOCK_SIZE);
- unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
-
- src = qatomic_rcu_read(
- &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
-
- for (k = page; k < page + nr; k++) {
- if (src[idx][offset]) {
- unsigned long bits = qatomic_xchg(&src[idx][offset], 0);
- unsigned long new_dirty;
- new_dirty = ~dest[k];
- dest[k] |= bits;
- new_dirty &= bits;
- num_dirty += ctpopl(new_dirty);
- }
-
- if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
- offset = 0;
- idx++;
- }
- }
- if (num_dirty) {
- cpu_physical_memory_dirty_bits_cleared(start, length);
- }
-
- if (rb->clear_bmap) {
- /*
- * Postpone the dirty bitmap clear to the point before we
- * really send the pages, also we will split the clear
- * dirty procedure into smaller chunks.
- */
- clear_bmap_set(rb, start >> TARGET_PAGE_BITS,
- length >> TARGET_PAGE_BITS);
- } else {
- /* Slow path - still do that in a huge chunk */
- memory_region_clear_dirty_bitmap(rb->mr, start, length);
- }
- } else {
- ram_addr_t offset = rb->offset;
-
- for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
- if (cpu_physical_memory_test_and_clear_dirty(
- start + addr + offset,
- TARGET_PAGE_SIZE,
- DIRTY_MEMORY_MIGRATION)) {
- long k = (start + addr) >> TARGET_PAGE_BITS;
- if (!test_and_set_bit(k, dest)) {
- num_dirty++;
- }
- }
- }
- }
-
- return num_dirty;
-}
-#endif
-#endif
diff --git a/include/exec/ramblock.h b/include/exec/ramblock.h
deleted file mode 100644
index 0babd10..0000000
--- a/include/exec/ramblock.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Declarations for cpu physical memory functions
- *
- * Copyright 2011 Red Hat, Inc. and/or its affiliates
- *
- * Authors:
- * Avi Kivity <avi@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or
- * later. See the COPYING file in the top-level directory.
- *
- */
-
-/*
- * This header is for use by exec.c and memory.c ONLY. Do not include it.
- * The functions declared here will be removed soon.
- */
-
-#ifndef QEMU_EXEC_RAMBLOCK_H
-#define QEMU_EXEC_RAMBLOCK_H
-
-#ifndef CONFIG_USER_ONLY
-#include "cpu-common.h"
-#include "qemu/rcu.h"
-#include "exec/ramlist.h"
-
-struct RAMBlock {
- struct rcu_head rcu;
- struct MemoryRegion *mr;
- uint8_t *host;
- uint8_t *colo_cache; /* For colo, VM's ram cache */
- ram_addr_t offset;
- ram_addr_t used_length;
- ram_addr_t max_length;
- void (*resized)(const char*, uint64_t length, void *host);
- uint32_t flags;
- /* Protected by the BQL. */
- char idstr[256];
- /* RCU-enabled, writes protected by the ramlist lock */
- QLIST_ENTRY(RAMBlock) next;
- QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
- int fd;
- uint64_t fd_offset;
- int guest_memfd;
- size_t page_size;
- /* dirty bitmap used during migration */
- unsigned long *bmap;
-
- /*
- * Below fields are only used by mapped-ram migration
- */
- /* bitmap of pages present in the migration file */
- unsigned long *file_bmap;
- /*
- * offset in the file pages belonging to this ramblock are saved,
- * used only during migration to a file.
- */
- off_t bitmap_offset;
- uint64_t pages_offset;
-
- /* Bitmap of already received pages. Only used on destination side. */
- unsigned long *receivedmap;
-
- /*
- * bitmap to track already cleared dirty bitmap. When the bit is
- * set, it means the corresponding memory chunk needs a log-clear.
- * Set this up to non-NULL to enable the capability to postpone
- * and split clearing of dirty bitmap on the remote node (e.g.,
- * KVM). The bitmap will be set only when doing global sync.
- *
- * It is only used during src side of ram migration, and it is
- * protected by the global ram_state.bitmap_mutex.
- *
- * NOTE: this bitmap is different comparing to the other bitmaps
- * in that one bit can represent multiple guest pages (which is
- * decided by the `clear_bmap_shift' variable below). On
- * destination side, this should always be NULL, and the variable
- * `clear_bmap_shift' is meaningless.
- */
- unsigned long *clear_bmap;
- uint8_t clear_bmap_shift;
-
- /*
- * RAM block length that corresponds to the used_length on the migration
- * source (after RAM block sizes were synchronized). Especially, after
- * starting to run the guest, used_length and postcopy_length can differ.
- * Used to register/unregister uffd handlers and as the size of the received
- * bitmap. Receiving any page beyond this length will bail out, as it
- * could not have been valid on the source.
- */
- ram_addr_t postcopy_length;
-};
-#endif
-#endif
diff --git a/include/exec/ramlist.h b/include/exec/ramlist.h
index 2ad2a81..d9cfe53 100644
--- a/include/exec/ramlist.h
+++ b/include/exec/ramlist.h
@@ -50,6 +50,7 @@ typedef struct RAMList {
/* RCU-enabled, writes protected by the ramlist lock. */
QLIST_HEAD(, RAMBlock) blocks;
DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM];
+ unsigned int num_dirty_blocks;
uint32_t version;
QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
} RAMList;
diff --git a/include/exec/target_page.h b/include/exec/target_page.h
index 98ffbb5..ca0ebbc 100644
--- a/include/exec/target_page.h
+++ b/include/exec/target_page.h
@@ -14,10 +14,54 @@
#ifndef EXEC_TARGET_PAGE_H
#define EXEC_TARGET_PAGE_H
-size_t qemu_target_page_size(void);
-int qemu_target_page_mask(void);
-int qemu_target_page_bits(void);
-int qemu_target_page_bits_min(void);
+/*
+ * If compiling per-target, get the real values.
+ * For generic code, reuse the mechanism for variable page size.
+ */
+#ifdef COMPILING_PER_TARGET
+#include "cpu-param.h"
+#include "exec/target_long.h"
+#define TARGET_PAGE_TYPE target_long
+#else
+#define TARGET_PAGE_BITS_VARY
+#define TARGET_PAGE_TYPE int
+#endif
+
+#ifdef TARGET_PAGE_BITS_VARY
+# include "exec/page-vary.h"
+extern const TargetPageBits target_page;
+# ifdef CONFIG_DEBUG_TCG
+# define TARGET_PAGE_BITS ({ assert(target_page.decided); \
+ target_page.bits; })
+# define TARGET_PAGE_MASK ({ assert(target_page.decided); \
+ (TARGET_PAGE_TYPE)target_page.mask; })
+# else
+# define TARGET_PAGE_BITS target_page.bits
+# define TARGET_PAGE_MASK ((TARGET_PAGE_TYPE)target_page.mask)
+# endif
+# define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
+#else
+# define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
+# define TARGET_PAGE_MASK ((TARGET_PAGE_TYPE)-1 << TARGET_PAGE_BITS)
+#endif
+
+#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
+
+static inline size_t qemu_target_page_size(void)
+{
+ return TARGET_PAGE_SIZE;
+}
+
+static inline int qemu_target_page_mask(void)
+{
+ return TARGET_PAGE_MASK;
+}
+
+static inline int qemu_target_page_bits(void)
+{
+ return TARGET_PAGE_BITS;
+}
size_t qemu_target_pages_to_MiB(size_t pages);
+
#endif
diff --git a/include/exec/tlb-common.h b/include/exec/tlb-common.h
index dc5a5fa..03b5a8f 100644
--- a/include/exec/tlb-common.h
+++ b/include/exec/tlb-common.h
@@ -19,14 +19,14 @@
#ifndef EXEC_TLB_COMMON_H
#define EXEC_TLB_COMMON_H 1
-#define CPU_TLB_ENTRY_BITS 5
+#define CPU_TLB_ENTRY_BITS (HOST_LONG_BITS == 32 ? 4 : 5)
/* Minimalized TLB entry for use by TCG fast path. */
typedef union CPUTLBEntry {
struct {
- uint64_t addr_read;
- uint64_t addr_write;
- uint64_t addr_code;
+ uintptr_t addr_read;
+ uintptr_t addr_write;
+ uintptr_t addr_code;
/*
* Addend to virtual address to get host address. IO accesses
* use the corresponding iotlb value.
@@ -37,7 +37,7 @@ typedef union CPUTLBEntry {
* Padding to get a power of two size, as well as index
* access to addr_{read,write,code}.
*/
- uint64_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uint64_t)];
+ uintptr_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uintptr_t)];
} CPUTLBEntry;
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
diff --git a/include/exec/tlb-flags.h b/include/exec/tlb-flags.h
new file mode 100644
index 0000000..357e790
--- /dev/null
+++ b/include/exec/tlb-flags.h
@@ -0,0 +1,86 @@
+/*
+ * TLB flags definition
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef TLB_FLAGS_H
+#define TLB_FLAGS_H
+
+/*
+ * Flags returned for lookup of a TLB virtual address.
+ */
+
+#ifdef CONFIG_USER_ONLY
+
+/*
+ * Allow some level of source compatibility with softmmu.
+ * Invalid is set when the page does not have requested permissions.
+ * MMIO is set when we want the target helper to use the functional
+ * interface for load/store so that plugins see the access.
+ */
+#define TLB_INVALID_MASK (1 << 0)
+#define TLB_MMIO (1 << 1)
+#define TLB_WATCHPOINT 0
+
+#else
+
+/*
+ * Flags stored in CPUTLBEntryFull.slow_flags[x].
+ * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
+ */
+
+/* Set if TLB entry requires byte swap. */
+#define TLB_BSWAP (1 << 0)
+/* Set if TLB entry contains a watchpoint. */
+#define TLB_WATCHPOINT (1 << 1)
+/* Set if TLB entry requires aligned accesses. */
+#define TLB_CHECK_ALIGNED (1 << 2)
+/* Set if TLB entry writes ignored. */
+#define TLB_DISCARD_WRITE (1 << 3)
+/* Set if TLB entry is an IO callback. */
+#define TLB_MMIO (1 << 4)
+
+#define TLB_SLOW_FLAGS_MASK \
+ (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED | \
+ TLB_DISCARD_WRITE | TLB_MMIO)
+
+/*
+ * Flags stored in CPUTLBEntry.addr_idx[x].
+ * These must be above the largest alignment (64 bytes),
+ * and below the smallest page size (1024 bytes).
+ * This leaves bits [9:6] available for use.
+ */
+
+/* Zero if TLB entry is valid. */
+#define TLB_INVALID_MASK (1 << 6)
+/* Set if TLB entry references a clean RAM page. */
+#define TLB_NOTDIRTY (1 << 7)
+/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
+#define TLB_FORCE_SLOW (1 << 8)
+
+/*
+ * Use this mask to check interception with an alignment mask
+ * in a TCG backend.
+ */
+#define TLB_FLAGS_MASK \
+ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_FORCE_SLOW)
+
+/* The two sets of flags must not overlap. */
+QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
+
+#endif /* !CONFIG_USER_ONLY */
+
+#endif /* TLB_FLAGS_H */
diff --git a/include/exec/translate-all.h b/include/exec/translate-all.h
deleted file mode 100644
index 85c9460..0000000
--- a/include/exec/translate-all.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Translated block handling
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef TRANSLATE_ALL_H
-#define TRANSLATE_ALL_H
-
-#include "exec/exec-all.h"
-
-
-/* translate-all.c */
-void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
-
-#ifdef CONFIG_USER_ONLY
-void page_protect(tb_page_addr_t page_addr);
-int page_unprotect(target_ulong address, uintptr_t pc);
-#endif
-
-#endif /* TRANSLATE_ALL_H */
diff --git a/include/exec/translation-block.h b/include/exec/translation-block.h
index a6d1af6..cdce399 100644
--- a/include/exec/translation-block.h
+++ b/include/exec/translation-block.h
@@ -7,10 +7,13 @@
#ifndef EXEC_TRANSLATION_BLOCK_H
#define EXEC_TRANSLATION_BLOCK_H
+#include "qemu/atomic.h"
#include "qemu/thread.h"
#include "exec/cpu-common.h"
+#include "exec/vaddr.h"
#ifdef CONFIG_USER_ONLY
#include "qemu/interval-tree.h"
+#include "exec/target_page.h"
#endif
/*
@@ -152,4 +155,60 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
return qatomic_read(&tb->cflags);
}
+bool tcg_cflags_has(CPUState *cpu, uint32_t flags);
+void tcg_cflags_set(CPUState *cpu, uint32_t flags);
+
+static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
+{
+#ifdef CONFIG_USER_ONLY
+ return tb->itree.start;
+#else
+ return tb->page_addr[0];
+#endif
+}
+
+static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
+{
+#ifdef CONFIG_USER_ONLY
+ tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
+ return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
+#else
+ return tb->page_addr[1];
+#endif
+}
+
+static inline void tb_set_page_addr0(TranslationBlock *tb,
+ tb_page_addr_t addr)
+{
+#ifdef CONFIG_USER_ONLY
+ tb->itree.start = addr;
+ /*
+ * To begin, we record an interval of one byte. When the translation
+ * loop encounters a second page, the interval will be extended to
+ * include the first byte of the second page, which is sufficient to
+ * allow tb_page_addr1() above to work properly. The final corrected
+ * interval will be set by tb_page_add() from tb->size before the
+ * node is added to the interval tree.
+ */
+ tb->itree.last = addr;
+#else
+ tb->page_addr[0] = addr;
+#endif
+}
+
+static inline void tb_set_page_addr1(TranslationBlock *tb,
+ tb_page_addr_t addr)
+{
+#ifdef CONFIG_USER_ONLY
+ /* Extend the interval to the first byte of the second page. See above. */
+ tb->itree.last = addr;
+#else
+ tb->page_addr[1] = addr;
+#endif
+}
+
+/* TranslationBlock invalidate API */
+void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
+ tb_page_addr_t last);
+
#endif /* EXEC_TRANSLATION_BLOCK_H */
diff --git a/include/exec/translator.h b/include/exec/translator.h
index 25004df..3c32655 100644
--- a/include/exec/translator.h
+++ b/include/exec/translator.h
@@ -18,24 +18,10 @@
* member in your target-specific DisasContext.
*/
-#include "qemu/bswap.h"
+#include "exec/memop.h"
#include "exec/vaddr.h"
/**
- * gen_intermediate_code
- * @cpu: cpu context
- * @tb: translation block
- * @max_insns: max number of instructions to translate
- * @pc: guest virtual program counter address
- * @host_pc: host physical program counter address
- *
- * This function must be provided by the target, which should create
- * the target-specific DisasContext, and then invoke translator_loop.
- */
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc);
-
-/**
* DisasJumpType:
* @DISAS_NEXT: Next instruction in program order.
* @DISAS_TOO_MANY: Too many instructions translated.
@@ -71,7 +57,6 @@ typedef enum DisasJumpType {
* @is_jmp: What instruction to disassemble next.
* @num_insns: Number of translated instructions (including current).
* @max_insns: Maximum number of instructions to be translated in this TB.
- * @singlestep_enabled: "Hardware" single stepping enabled.
* @plugin_enabled: TCG plugin enabled in this TB.
* @fake_insn: True if translator_fake_ldb used.
* @insn_start: The last op emitted by the insn_start hook,
@@ -86,9 +71,9 @@ struct DisasContextBase {
DisasJumpType is_jmp;
int num_insns;
int max_insns;
- bool singlestep_enabled;
bool plugin_enabled;
bool fake_insn;
+ uint8_t code_mmuidx;
struct TCGOp *insn_start;
void *host_addr[2];
@@ -196,42 +181,53 @@ bool translator_io_start(DisasContextBase *db);
*/
uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc);
-uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc);
-uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc);
-uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc);
+uint16_t translator_lduw_end(CPUArchState *env, DisasContextBase *db,
+ vaddr pc, MemOp endian);
+uint32_t translator_ldl_end(CPUArchState *env, DisasContextBase *db,
+ vaddr pc, MemOp endian);
+uint64_t translator_ldq_end(CPUArchState *env, DisasContextBase *db,
+ vaddr pc, MemOp endian);
+
+#ifdef COMPILING_PER_TARGET
+static inline uint16_t
+translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc)
+{
+ return translator_lduw_end(env, db, pc, MO_TE);
+}
+
+static inline uint32_t
+translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc)
+{
+ return translator_ldl_end(env, db, pc, MO_TE);
+}
+
+static inline uint64_t
+translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc)
+{
+ return translator_ldq_end(env, db, pc, MO_TE);
+}
static inline uint16_t
translator_lduw_swap(CPUArchState *env, DisasContextBase *db,
vaddr pc, bool do_swap)
{
- uint16_t ret = translator_lduw(env, db, pc);
- if (do_swap) {
- ret = bswap16(ret);
- }
- return ret;
+ return translator_lduw_end(env, db, pc, MO_TE ^ (do_swap * MO_BSWAP));
}
static inline uint32_t
translator_ldl_swap(CPUArchState *env, DisasContextBase *db,
vaddr pc, bool do_swap)
{
- uint32_t ret = translator_ldl(env, db, pc);
- if (do_swap) {
- ret = bswap32(ret);
- }
- return ret;
+ return translator_ldl_end(env, db, pc, MO_TE ^ (do_swap * MO_BSWAP));
}
static inline uint64_t
translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
vaddr pc, bool do_swap)
{
- uint64_t ret = translator_ldq(env, db, pc);
- if (do_swap) {
- ret = bswap64(ret);
- }
- return ret;
+ return translator_ldq_end(env, db, pc, MO_TE ^ (do_swap * MO_BSWAP));
}
+#endif /* COMPILING_PER_TARGET */
/**
* translator_fake_ld - fake instruction load
@@ -269,16 +265,15 @@ bool translator_st(const DisasContextBase *db, void *dest,
*/
size_t translator_st_len(const DisasContextBase *db);
-#ifdef COMPILING_PER_TARGET
-/*
- * Return whether addr is on the same page as where disassembly started.
+/**
+ * translator_is_same_page
+ * @db: disassembly context
+ * @addr: virtual address within TB
+ *
+ * Return whether @addr is on the same page as where disassembly started.
* Translators can use this to enforce the rule that only single-insn
* translation blocks are allowed to cross page boundaries.
*/
-static inline bool is_same_page(const DisasContextBase *db, vaddr addr)
-{
- return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0;
-}
-#endif
+bool translator_is_same_page(const DisasContextBase *db, vaddr addr);
#endif /* EXEC__TRANSLATOR_H */
diff --git a/include/exec/tswap.h b/include/exec/tswap.h
index b7a4191..49511f2 100644
--- a/include/exec/tswap.h
+++ b/include/exec/tswap.h
@@ -11,15 +11,16 @@
#include "qemu/bswap.h"
/**
- * target_words_bigendian:
+ * target_big_endian:
* Returns true if the (default) endianness of the target is big endian,
- * false otherwise. Note that in target-specific code, you can use
- * TARGET_BIG_ENDIAN directly instead. On the other hand, common
- * code should normally never need to know about the endianness of the
- * target, so please do *not* use this function unless you know very well
- * what you are doing!
+ * false otherwise. Common code should normally never need to know about the
+ * endianness of the target, so please do *not* use this function unless you
+ * know very well what you are doing!
*/
-bool target_words_bigendian(void);
+bool target_big_endian(void);
+#ifdef COMPILING_PER_TARGET
+#define target_big_endian() TARGET_BIG_ENDIAN
+#endif
/*
* If we're in target-specific code, we can hard-code the swapping
@@ -28,7 +29,7 @@ bool target_words_bigendian(void);
#ifdef COMPILING_PER_TARGET
#define target_needs_bswap() (HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN)
#else
-#define target_needs_bswap() (target_words_bigendian() != HOST_BIG_ENDIAN)
+#define target_needs_bswap() (HOST_BIG_ENDIAN != target_big_endian())
#endif /* COMPILING_PER_TARGET */
static inline uint16_t tswap16(uint16_t s)
@@ -79,4 +80,74 @@ static inline void tswap64s(uint64_t *s)
}
}
+/* Return ld{word}_{le,be}_p following target endianness. */
+#define LOAD_IMPL(word, args...) \
+do { \
+ if (target_big_endian()) { \
+ return glue(glue(ld, word), _be_p)(args); \
+ } else { \
+ return glue(glue(ld, word), _le_p)(args); \
+ } \
+} while (0)
+
+static inline int lduw_p(const void *ptr)
+{
+ LOAD_IMPL(uw, ptr);
+}
+
+static inline int ldsw_p(const void *ptr)
+{
+ LOAD_IMPL(sw, ptr);
+}
+
+static inline int ldl_p(const void *ptr)
+{
+ LOAD_IMPL(l, ptr);
+}
+
+static inline uint64_t ldq_p(const void *ptr)
+{
+ LOAD_IMPL(q, ptr);
+}
+
+static inline uint64_t ldn_p(const void *ptr, int sz)
+{
+ LOAD_IMPL(n, ptr, sz);
+}
+
+#undef LOAD_IMPL
+
+/* Call st{word}_{le,be}_p following target endianness. */
+#define STORE_IMPL(word, args...) \
+do { \
+ if (target_big_endian()) { \
+ glue(glue(st, word), _be_p)(args); \
+ } else { \
+ glue(glue(st, word), _le_p)(args); \
+ } \
+} while (0)
+
+
+static inline void stw_p(void *ptr, uint16_t v)
+{
+ STORE_IMPL(w, ptr, v);
+}
+
+static inline void stl_p(void *ptr, uint32_t v)
+{
+ STORE_IMPL(l, ptr, v);
+}
+
+static inline void stq_p(void *ptr, uint64_t v)
+{
+ STORE_IMPL(q, ptr, v);
+}
+
+static inline void stn_p(void *ptr, int sz, uint64_t v)
+{
+ STORE_IMPL(n, ptr, sz, v);
+}
+
+#undef STORE_IMPL
+
#endif /* TSWAP_H */
diff --git a/include/exec/vaddr.h b/include/exec/vaddr.h
index b9844af..28bec63 100644
--- a/include/exec/vaddr.h
+++ b/include/exec/vaddr.h
@@ -6,13 +6,15 @@
/**
* vaddr:
* Type wide enough to contain any #target_ulong virtual address.
+ * We do not support 64-bit guest on 32-host and detect at configure time.
+ * Therefore, a host pointer width will always fit a guest pointer.
*/
-typedef uint64_t vaddr;
-#define VADDR_PRId PRId64
-#define VADDR_PRIu PRIu64
-#define VADDR_PRIo PRIo64
-#define VADDR_PRIx PRIx64
-#define VADDR_PRIX PRIX64
-#define VADDR_MAX UINT64_MAX
+typedef uintptr_t vaddr;
+#define VADDR_PRId PRIdPTR
+#define VADDR_PRIu PRIuPTR
+#define VADDR_PRIo PRIoPTR
+#define VADDR_PRIx PRIxPTR
+#define VADDR_PRIX PRIXPTR
+#define VADDR_MAX UINTPTR_MAX
#endif
diff --git a/include/exec/watchpoint.h b/include/exec/watchpoint.h
new file mode 100644
index 0000000..4b66688
--- /dev/null
+++ b/include/exec/watchpoint.h
@@ -0,0 +1,41 @@
+/*
+ * CPU watchpoints
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef EXEC_WATCHPOINT_H
+#define EXEC_WATCHPOINT_H
+
+#if defined(CONFIG_USER_ONLY)
+static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
+ int flags, CPUWatchpoint **watchpoint)
+{
+ return -ENOSYS;
+}
+
+static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
+ vaddr len, int flags)
+{
+ return -ENOSYS;
+}
+
+static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
+ CPUWatchpoint *wp)
+{
+}
+
+static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
+{
+}
+#else
+int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
+ int flags, CPUWatchpoint **watchpoint);
+int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
+ vaddr len, int flags);
+void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
+void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
+#endif
+
+#endif /* EXEC_WATCHPOINT_H */
diff --git a/include/fpu/softfloat-helpers.h b/include/fpu/softfloat-helpers.h
index 94cbe07..90862f5 100644
--- a/include/fpu/softfloat-helpers.h
+++ b/include/fpu/softfloat-helpers.h
@@ -75,6 +75,36 @@ static inline void set_floatx80_rounding_precision(FloatX80RoundPrec val,
status->floatx80_rounding_precision = val;
}
+static inline void set_floatx80_behaviour(FloatX80Behaviour b,
+ float_status *status)
+{
+ status->floatx80_behaviour = b;
+}
+
+static inline void set_float_2nan_prop_rule(Float2NaNPropRule rule,
+ float_status *status)
+{
+ status->float_2nan_prop_rule = rule;
+}
+
+static inline void set_float_3nan_prop_rule(Float3NaNPropRule rule,
+ float_status *status)
+{
+ status->float_3nan_prop_rule = rule;
+}
+
+static inline void set_float_infzeronan_rule(FloatInfZeroNaNRule rule,
+ float_status *status)
+{
+ status->float_infzeronan_rule = rule;
+}
+
+static inline void set_float_default_nan_pattern(uint8_t dnan_pattern,
+ float_status *status)
+{
+ status->default_nan_pattern = dnan_pattern;
+}
+
static inline void set_flush_to_zero(bool val, float_status *status)
{
status->flush_to_zero = val;
@@ -85,6 +115,12 @@ static inline void set_flush_inputs_to_zero(bool val, float_status *status)
status->flush_inputs_to_zero = val;
}
+static inline void set_float_ftz_detection(FloatFTZDetection d,
+ float_status *status)
+{
+ status->ftz_detection = d;
+}
+
static inline void set_default_nan_mode(bool val, float_status *status)
{
status->default_nan_mode = val;
@@ -95,50 +131,79 @@ static inline void set_snan_bit_is_one(bool val, float_status *status)
status->snan_bit_is_one = val;
}
-static inline void set_use_first_nan(bool val, float_status *status)
-{
- status->use_first_nan = val;
-}
-
static inline void set_no_signaling_nans(bool val, float_status *status)
{
status->no_signaling_nans = val;
}
-static inline bool get_float_detect_tininess(float_status *status)
+static inline bool get_float_detect_tininess(const float_status *status)
{
return status->tininess_before_rounding;
}
-static inline FloatRoundMode get_float_rounding_mode(float_status *status)
+static inline FloatRoundMode get_float_rounding_mode(const float_status *status)
{
return status->float_rounding_mode;
}
-static inline int get_float_exception_flags(float_status *status)
+static inline int get_float_exception_flags(const float_status *status)
{
return status->float_exception_flags;
}
static inline FloatX80RoundPrec
-get_floatx80_rounding_precision(float_status *status)
+get_floatx80_rounding_precision(const float_status *status)
{
return status->floatx80_rounding_precision;
}
-static inline bool get_flush_to_zero(float_status *status)
+static inline FloatX80Behaviour
+get_floatx80_behaviour(const float_status *status)
+{
+ return status->floatx80_behaviour;
+}
+
+static inline Float2NaNPropRule
+get_float_2nan_prop_rule(const float_status *status)
+{
+ return status->float_2nan_prop_rule;
+}
+
+static inline Float3NaNPropRule
+get_float_3nan_prop_rule(const float_status *status)
+{
+ return status->float_3nan_prop_rule;
+}
+
+static inline FloatInfZeroNaNRule
+get_float_infzeronan_rule(const float_status *status)
+{
+ return status->float_infzeronan_rule;
+}
+
+static inline uint8_t get_float_default_nan_pattern(const float_status *status)
+{
+ return status->default_nan_pattern;
+}
+
+static inline bool get_flush_to_zero(const float_status *status)
{
return status->flush_to_zero;
}
-static inline bool get_flush_inputs_to_zero(float_status *status)
+static inline bool get_flush_inputs_to_zero(const float_status *status)
{
return status->flush_inputs_to_zero;
}
-static inline bool get_default_nan_mode(float_status *status)
+static inline bool get_default_nan_mode(const float_status *status)
{
return status->default_nan_mode;
}
+static inline FloatFTZDetection get_float_ftz_detection(const float_status *status)
+{
+ return status->ftz_detection;
+}
+
#endif /* SOFTFLOAT_HELPERS_H */
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
index 0884ec4..1af2a0c 100644
--- a/include/fpu/softfloat-types.h
+++ b/include/fpu/softfloat-types.h
@@ -80,6 +80,8 @@ this code that are retained.
#ifndef SOFTFLOAT_TYPES_H
#define SOFTFLOAT_TYPES_H
+#include "hw/registerfields.h"
+
/*
* Software IEC/IEEE floating-point types.
*/
@@ -138,6 +140,8 @@ typedef enum __attribute__((__packed__)) {
float_round_to_odd = 5,
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
float_round_to_odd_inf = 6,
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
+ float_round_nearest_even_max = 7,
} FloatRoundMode;
/*
@@ -150,8 +154,10 @@ enum {
float_flag_overflow = 0x0004,
float_flag_underflow = 0x0008,
float_flag_inexact = 0x0010,
- float_flag_input_denormal = 0x0020,
- float_flag_output_denormal = 0x0040,
+ /* We flushed an input denormal to 0 (because of flush_inputs_to_zero) */
+ float_flag_input_denormal_flushed = 0x0020,
+ /* We flushed an output denormal to 0 (because of flush_to_zero) */
+ float_flag_output_denormal_flushed = 0x0040,
float_flag_invalid_isi = 0x0080, /* inf - inf */
float_flag_invalid_imz = 0x0100, /* inf * 0 */
float_flag_invalid_idi = 0x0200, /* inf / inf */
@@ -159,6 +165,13 @@ enum {
float_flag_invalid_sqrt = 0x0800, /* sqrt(-x) */
float_flag_invalid_cvti = 0x1000, /* non-nan to integer */
float_flag_invalid_snan = 0x2000, /* any operand was snan */
+ /*
+ * An input was denormal and we used it (without flushing it to zero).
+ * Not set if we do not actually use the denormal input (e.g.
+ * because some other input was a NaN, or because the operation
+ * wasn't actually carried out (divide-by-zero; invalid))
+ */
+ float_flag_input_denormal_used = 0x4000,
};
/*
@@ -171,6 +184,193 @@ typedef enum __attribute__((__packed__)) {
} FloatX80RoundPrec;
/*
+ * 2-input NaN propagation rule. Individual architectures have
+ * different rules for which input NaN is propagated to the output
+ * when there is more than one NaN on the input.
+ *
+ * If default_nan_mode is enabled then it is valid not to set a
+ * NaN propagation rule, because the softfloat code guarantees
+ * not to try to pick a NaN to propagate in default NaN mode.
+ * When not in default-NaN mode, it is an error for the target
+ * not to set the rule in float_status, and we will assert if
+ * we need to handle an input NaN and no rule was selected.
+ */
+typedef enum __attribute__((__packed__)) {
+ /* No propagation rule specified */
+ float_2nan_prop_none = 0,
+ /* Prefer SNaN over QNaN, then operand A over B */
+ float_2nan_prop_s_ab,
+ /* Prefer SNaN over QNaN, then operand B over A */
+ float_2nan_prop_s_ba,
+ /* Prefer A over B regardless of SNaN vs QNaN */
+ float_2nan_prop_ab,
+ /* Prefer B over A regardless of SNaN vs QNaN */
+ float_2nan_prop_ba,
+ /*
+ * This implements x87 NaN propagation rules:
+ * SNaN + QNaN => return the QNaN
+ * two SNaNs => return the one with the larger significand, silenced
+ * two QNaNs => return the one with the larger significand
+ * SNaN and a non-NaN => return the SNaN, silenced
+ * QNaN and a non-NaN => return the QNaN
+ *
+ * If we get down to comparing significands and they are the same,
+ * return the NaN with the positive sign bit (if any).
+ */
+ float_2nan_prop_x87,
+} Float2NaNPropRule;
+
+/*
+ * 3-input NaN propagation rule, for fused multiply-add. Individual
+ * architectures have different rules for which input NaN is
+ * propagated to the output when there is more than one NaN on the
+ * input.
+ *
+ * If default_nan_mode is enabled then it is valid not to set a NaN
+ * propagation rule, because the softfloat code guarantees not to try
+ * to pick a NaN to propagate in default NaN mode. When not in
+ * default-NaN mode, it is an error for the target not to set the rule
+ * in float_status if it uses a muladd, and we will assert if we need
+ * to handle an input NaN and no rule was selected.
+ *
+ * The naming scheme for Float3NaNPropRule values is:
+ * float_3nan_prop_s_abc:
+ * = "Prefer SNaN over QNaN, then operand A over B over C"
+ * float_3nan_prop_abc:
+ * = "Prefer A over B over C regardless of SNaN vs QNAN"
+ *
+ * For QEMU, the multiply-add operation is A * B + C.
+ */
+
+/*
+ * We set the Float3NaNPropRule enum values up so we can select the
+ * right value in pickNaNMulAdd in a data driven way.
+ */
+FIELD(3NAN, 1ST, 0, 2) /* which operand is most preferred ? */
+FIELD(3NAN, 2ND, 2, 2) /* which operand is next most preferred ? */
+FIELD(3NAN, 3RD, 4, 2) /* which operand is least preferred ? */
+FIELD(3NAN, SNAN, 6, 1) /* do we prefer SNaN over QNaN ? */
+
+#define PROPRULE(X, Y, Z) \
+ ((X << R_3NAN_1ST_SHIFT) | (Y << R_3NAN_2ND_SHIFT) | (Z << R_3NAN_3RD_SHIFT))
+
+typedef enum __attribute__((__packed__)) {
+ float_3nan_prop_none = 0, /* No propagation rule specified */
+ float_3nan_prop_abc = PROPRULE(0, 1, 2),
+ float_3nan_prop_acb = PROPRULE(0, 2, 1),
+ float_3nan_prop_bac = PROPRULE(1, 0, 2),
+ float_3nan_prop_bca = PROPRULE(1, 2, 0),
+ float_3nan_prop_cab = PROPRULE(2, 0, 1),
+ float_3nan_prop_cba = PROPRULE(2, 1, 0),
+ float_3nan_prop_s_abc = float_3nan_prop_abc | R_3NAN_SNAN_MASK,
+ float_3nan_prop_s_acb = float_3nan_prop_acb | R_3NAN_SNAN_MASK,
+ float_3nan_prop_s_bac = float_3nan_prop_bac | R_3NAN_SNAN_MASK,
+ float_3nan_prop_s_bca = float_3nan_prop_bca | R_3NAN_SNAN_MASK,
+ float_3nan_prop_s_cab = float_3nan_prop_cab | R_3NAN_SNAN_MASK,
+ float_3nan_prop_s_cba = float_3nan_prop_cba | R_3NAN_SNAN_MASK,
+} Float3NaNPropRule;
+
+#undef PROPRULE
+
+/*
+ * Rule for result of fused multiply-add 0 * Inf + NaN.
+ * This must be a NaN, but implementations differ on whether this
+ * is the input NaN or the default NaN.
+ *
+ * You don't need to set this if default_nan_mode is enabled.
+ * When not in default-NaN mode, it is an error for the target
+ * not to set the rule in float_status if it uses muladd, and we
+ * will assert if we need to handle an input NaN and no rule was
+ * selected.
+ */
+typedef enum __attribute__((__packed__)) {
+ /* No propagation rule specified */
+ float_infzeronan_none = 0,
+ /* Result is never the default NaN (so always the input NaN) */
+ float_infzeronan_dnan_never = 1,
+ /* Result is always the default NaN */
+ float_infzeronan_dnan_always = 2,
+ /* Result is the default NaN if the input NaN is quiet */
+ float_infzeronan_dnan_if_qnan = 3,
+ /*
+ * Don't raise Invalid for 0 * Inf + NaN. Default is to raise.
+ * IEEE 754-2008 section 7.2 makes it implementation defined whether
+ * 0 * Inf + QNaN raises Invalid or not. Note that 0 * Inf + SNaN will
+ * raise the Invalid flag for the SNaN anyway.
+ *
+ * This is a flag which can be ORed in with any of the above
+ * DNaN behaviour options.
+ */
+ float_infzeronan_suppress_invalid = (1 << 7),
+} FloatInfZeroNaNRule;
+
+/*
+ * When flush_to_zero is set, should we detect denormal results to
+ * be flushed before or after rounding? For most architectures this
+ * should be set to match the tininess_before_rounding setting,
+ * but a few architectures, e.g. MIPS MSA, detect FTZ before
+ * rounding but tininess after rounding.
+ *
+ * This enum is arranged so that the default if the target doesn't
+ * configure it matches the default for tininess_before_rounding
+ * (i.e. "after rounding").
+ */
+typedef enum __attribute__((__packed__)) {
+ float_ftz_after_rounding = 0,
+ float_ftz_before_rounding = 1,
+} FloatFTZDetection;
+
+/*
+ * floatx80 is primarily used by x86 and m68k, and there are
+ * differences in the handling, largely related to the explicit
+ * Integer bit which floatx80 has and the other float formats do not.
+ * These flag values allow specification of the target's requirements
+ * and can be ORed together to set floatx80_behaviour.
+ */
+typedef enum __attribute__((__packed__)) {
+ /* In the default Infinity value, is the Integer bit 0 ? */
+ floatx80_default_inf_int_bit_is_zero = 1,
+ /*
+ * Are Pseudo-infinities (Inf with the Integer bit zero) valid?
+ * If so, floatx80_is_infinity() will return true for them.
+ * If not, floatx80_invalid_encoding will return false for them,
+ * and using them as inputs to a float op will raise Invalid.
+ */
+ floatx80_pseudo_inf_valid = 2,
+ /*
+ * Are Pseudo-NaNs (NaNs where the Integer bit is zero) valid?
+ * If not, floatx80_invalid_encoding() will return false for them,
+ * and using them as inputs to a float op will raise Invalid.
+ */
+ floatx80_pseudo_nan_valid = 4,
+ /*
+ * Are Unnormals (0 < exp < 0x7fff, Integer bit zero) valid?
+ * If not, floatx80_invalid_encoding() will return false for them,
+ * and using them as inputs to a float op will raise Invalid.
+ */
+ floatx80_unnormal_valid = 8,
+
+ /*
+ * If the exponent is 0 and the Integer bit is set, Intel call
+ * this a "pseudo-denormal"; x86 supports that only on input
+ * (treating them as denormals by ignoring the Integer bit).
+ * For m68k, the integer bit is considered validly part of the
+ * input value when the exponent is 0, and may be 0 or 1,
+ * giving extra range. They may also be generated as outputs.
+ * (The m68k manual actually calls these values part of the
+ * normalized number range, not the denormalized number range.)
+ *
+ * By default you get the Intel behaviour where the Integer
+ * bit is ignored; if this is set then the Integer bit value
+ * is honoured, m68k-style.
+ *
+ * Either way, floatx80_invalid_encoding() will always accept
+ * pseudo-denormals.
+ */
+ floatx80_pseudo_denormal_valid = 16,
+} FloatX80Behaviour;
+
+/*
* Floating Point Status. Individual architectures may maintain
* several versions of float_status for different functions. The
* correct status for the operation is then passed by reference to
@@ -181,19 +381,34 @@ typedef struct float_status {
uint16_t float_exception_flags;
FloatRoundMode float_rounding_mode;
FloatX80RoundPrec floatx80_rounding_precision;
+ FloatX80Behaviour floatx80_behaviour;
+ Float2NaNPropRule float_2nan_prop_rule;
+ Float3NaNPropRule float_3nan_prop_rule;
+ FloatInfZeroNaNRule float_infzeronan_rule;
bool tininess_before_rounding;
- /* should denormalised results go to zero and set the inexact flag? */
+ /* should denormalised results go to zero and set output_denormal_flushed? */
bool flush_to_zero;
- /* should denormalised inputs go to zero and set the input_denormal flag? */
+ /* do we detect and flush denormal results before or after rounding? */
+ FloatFTZDetection ftz_detection;
+ /* should denormalised inputs go to zero and set input_denormal_flushed? */
bool flush_inputs_to_zero;
bool default_nan_mode;
/*
+ * The pattern to use for the default NaN. Here the high bit specifies
+ * the default NaN's sign bit, and bits 6..0 specify the high bits of the
+ * fractional part. The low bits of the fractional part are copies of bit 0.
+ * The exponent of the default NaN is (as for any NaN) always all 1s.
+ * Note that a value of 0 here is not a valid NaN. The target must set
+ * this to the correct non-zero value, or we will assert when trying to
+ * create a default NaN.
+ */
+ uint8_t default_nan_pattern;
+ /*
* The flags below are not used on all specializations and may
* constant fold away (see snan_bit_is_one()/no_signalling_nans() in
* softfloat-specialize.inc.c)
*/
bool snan_bit_is_one;
- bool use_first_nan;
bool no_signaling_nans;
/* should overflowed results subtract re_bias to its exponent? */
bool rebias_overflow;
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
index eb64075..c18ab2c 100644
--- a/include/fpu/softfloat.h
+++ b/include/fpu/softfloat.h
@@ -120,14 +120,16 @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
| Using these differs from negating an input or output before calling
| the muladd function in that this means that a NaN doesn't have its
| sign bit inverted before it is propagated.
-| We also support halving the result before rounding, as a special
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
+|
+| With float_muladd_suppress_add_product_zero, if A or B is zero
+| such that the product is a true zero, then return C without addition.
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
*----------------------------------------------------------------------------*/
enum {
float_muladd_negate_c = 1,
float_muladd_negate_product = 2,
float_muladd_negate_result = 4,
- float_muladd_halve_result = 8,
+ float_muladd_suppress_add_product_zero = 8,
};
/*----------------------------------------------------------------------------
@@ -238,6 +240,8 @@ float16 float16_add(float16, float16, float_status *status);
float16 float16_sub(float16, float16, float_status *status);
float16 float16_mul(float16, float16, float_status *status);
float16 float16_muladd(float16, float16, float16, int, float_status *status);
+float16 float16_muladd_scalbn(float16, float16, float16,
+ int, int, float_status *status);
float16 float16_div(float16, float16, float_status *status);
float16 float16_scalbn(float16, int, float_status *status);
float16 float16_min(float16, float16, float_status *status);
@@ -597,6 +601,8 @@ float32 float32_mul(float32, float32, float_status *status);
float32 float32_div(float32, float32, float_status *status);
float32 float32_rem(float32, float32, float_status *status);
float32 float32_muladd(float32, float32, float32, int, float_status *status);
+float32 float32_muladd_scalbn(float32, float32, float32,
+ int, int, float_status *status);
float32 float32_sqrt(float32, float_status *status);
float32 float32_exp2(float32, float_status *status);
float32 float32_log2(float32, float_status *status);
@@ -792,6 +798,8 @@ float64 float64_mul(float64, float64, float_status *status);
float64 float64_div(float64, float64, float_status *status);
float64 float64_rem(float64, float64, float_status *status);
float64 float64_muladd(float64, float64, float64, int, float_status *status);
+float64 float64_muladd_scalbn(float64, float64, float64,
+ int, int, float_status *status);
float64 float64_sqrt(float64, float_status *status);
float64 float64_log2(float64, float_status *status);
FloatRelation float64_compare(float64, float64, float_status *status);
@@ -952,7 +960,7 @@ float128 floatx80_to_float128(floatx80, float_status *status);
/*----------------------------------------------------------------------------
| The pattern for an extended double-precision inf.
*----------------------------------------------------------------------------*/
-extern const floatx80 floatx80_infinity;
+floatx80 floatx80_default_inf(bool zSign, float_status *status);
/*----------------------------------------------------------------------------
| Software IEC/IEEE extended double-precision operations.
@@ -987,14 +995,19 @@ static inline floatx80 floatx80_chs(floatx80 a)
return a;
}
-static inline bool floatx80_is_infinity(floatx80 a)
+static inline bool floatx80_is_infinity(floatx80 a, float_status *status)
{
-#if defined(TARGET_M68K)
- return (a.high & 0x7fff) == floatx80_infinity.high && !(a.low << 1);
-#else
- return (a.high & 0x7fff) == floatx80_infinity.high &&
- a.low == floatx80_infinity.low;
-#endif
+ /*
+ * It's target-specific whether the Integer bit is permitted
+ * to be 0 in a valid Infinity value. (x86 says no, m68k says yes).
+ */
+ bool intbit = a.low >> 63;
+
+ if (!intbit &&
+ !(status->floatx80_behaviour & floatx80_pseudo_inf_valid)) {
+ return false;
+ }
+ return (a.high & 0x7fff) == 0x7fff && !(a.low << 1);
}
static inline bool floatx80_is_neg(floatx80 a)
@@ -1060,41 +1073,45 @@ static inline bool floatx80_unordered_quiet(floatx80 a, floatx80 b,
/*----------------------------------------------------------------------------
| Return whether the given value is an invalid floatx80 encoding.
-| Invalid floatx80 encodings arise when the integer bit is not set, but
-| the exponent is not zero. The only times the integer bit is permitted to
-| be zero is in subnormal numbers and the value zero.
-| This includes what the Intel software developer's manual calls pseudo-NaNs,
-| pseudo-infinities and un-normal numbers. It does not include
-| pseudo-denormals, which must still be correctly handled as inputs even
-| if they are never generated as outputs.
+| Invalid floatx80 encodings may arise when the integer bit is not set
+| correctly; this is target-specific. In Intel terminology the
+| categories are:
+| exp == 0, int = 0, mantissa == 0 : zeroes
+| exp == 0, int = 0, mantissa != 0 : denormals
+| exp == 0, int = 1 : pseudo-denormals
+| 0 < exp < 0x7fff, int = 0 : unnormals
+| 0 < exp < 0x7fff, int = 1 : normals
+| exp == 0x7fff, int = 0, mantissa == 0 : pseudo-infinities
+| exp == 0x7fff, int = 1, mantissa == 0 : infinities
+| exp == 0x7fff, int = 0, mantissa != 0 : pseudo-NaNs
+| exp == 0x7fff, int = 1, mantissa == 0 : NaNs
+|
+| The usual IEEE cases of zero, denormal, normal, inf and NaN are always valid.
+| x87 permits as input also pseudo-denormals.
+| m68k permits all those and also pseudo-infinities, pseudo-NaNs and unnormals.
+|
+| Since we don't have a target that handles floatx80 but prohibits
+| pseudo-denormals in input, we don't currently have a floatx80_behaviour
+| flag for that case, but instead always accept it. Conveniently this
+| means that all cases with either exponent 0 or the integer bit set are
+| valid for all targets.
*----------------------------------------------------------------------------*/
-static inline bool floatx80_invalid_encoding(floatx80 a)
-{
-#if defined(TARGET_M68K)
- /*-------------------------------------------------------------------------
- | With m68k, the explicit integer bit can be zero in the case of:
- | - zeros (exp == 0, mantissa == 0)
- | - denormalized numbers (exp == 0, mantissa != 0)
- | - unnormalized numbers (exp != 0, exp < 0x7FFF)
- | - infinities (exp == 0x7FFF, mantissa == 0)
- | - not-a-numbers (exp == 0x7FFF, mantissa != 0)
- |
- | For infinities and NaNs, the explicit integer bit can be either one or
- | zero.
- |
- | The IEEE 754 standard does not define a zero integer bit. Such a number
- | is an unnormalized number. Hardware does not directly support
- | denormalized and unnormalized numbers, but implicitly supports them by
- | trapping them as unimplemented data types, allowing efficient conversion
- | in software.
- |
- | See "M68000 FAMILY PROGRAMMER’S REFERENCE MANUAL",
- | "1.6 FLOATING-POINT DATA TYPES"
- *------------------------------------------------------------------------*/
- return false;
-#else
- return (a.low & (1ULL << 63)) == 0 && (a.high & 0x7FFF) != 0;
-#endif
+static inline bool floatx80_invalid_encoding(floatx80 a, float_status *s)
+{
+ if ((a.low >> 63) || (a.high & 0x7fff) == 0) {
+ /* Anything with the Integer bit set or the exponent 0 is valid */
+ return false;
+ }
+
+ if ((a.high & 0x7fff) == 0x7fff) {
+ if (a.low) {
+ return !(s->floatx80_behaviour & floatx80_pseudo_nan_valid);
+ } else {
+ return !(s->floatx80_behaviour & floatx80_pseudo_inf_valid);
+ }
+ } else {
+ return !(s->floatx80_behaviour & floatx80_unnormal_valid);
+ }
}
#define floatx80_zero make_floatx80(0x0000, 0x0000000000000000LL)
diff --git a/include/gdbstub/commands.h b/include/gdbstub/commands.h
index f3058f9..bff3674 100644
--- a/include/gdbstub/commands.h
+++ b/include/gdbstub/commands.h
@@ -1,5 +1,5 @@
#ifndef GDBSTUB_COMMANDS_H
-#define GDBSTUB
+#define GDBSTUB_COMMANDS_H
typedef void (*GdbCmdHandler)(GArray *params, void *user_ctx);
@@ -74,23 +74,28 @@ int gdb_put_packet(const char *buf);
/**
* gdb_extend_query_table() - Extend query table.
- * @table: The table with the additional query packet handlers.
- * @size: The number of handlers to be added.
+ * @table: GPtrArray of GdbCmdParseEntry entries.
+ *
+ * The caller should free @table afterwards
*/
-void gdb_extend_query_table(GdbCmdParseEntry *table, int size);
+void gdb_extend_query_table(GPtrArray *table);
/**
* gdb_extend_set_table() - Extend set table.
- * @table: The table with the additional set packet handlers.
- * @size: The number of handlers to be added.
+ * @table: GPtrArray of GdbCmdParseEntry entries.
+ *
+ * The caller should free @table afterwards
*/
-void gdb_extend_set_table(GdbCmdParseEntry *table, int size);
+void gdb_extend_set_table(GPtrArray *table);
/**
* gdb_extend_qsupported_features() - Extend the qSupported features string.
* @qsupported_features: The additional qSupported feature(s) string. The string
* should start with a semicolon and, if there are more than one feature, the
- * features should be separate by a semiocolon.
+ * features should be separate by a semicolon.
+ *
+ * The caller should free @qsupported_features afterwards if
+ * dynamically allocated.
*/
void gdb_extend_qsupported_features(char *qsupported_features);
diff --git a/include/gdbstub/helpers.h b/include/gdbstub/helpers.h
index 26140ef..6f7cc48 100644
--- a/include/gdbstub/helpers.h
+++ b/include/gdbstub/helpers.h
@@ -95,9 +95,13 @@ static inline uint8_t *gdb_get_reg_ptr(GByteArray *buf, int len)
#if TARGET_LONG_BITS == 64
#define gdb_get_regl(buf, val) gdb_get_reg64(buf, val)
#define ldtul_p(addr) ldq_p(addr)
+#define ldtul_le_p(addr) ldq_le_p(addr)
+#define ldtul_be_p(addr) ldq_be_p(addr)
#else
#define gdb_get_regl(buf, val) gdb_get_reg32(buf, val)
#define ldtul_p(addr) ldl_p(addr)
+#define ldtul_le_p(addr) ldl_le_p(addr)
+#define ldtul_be_p(addr) ldl_be_p(addr)
#endif
#endif /* _GDBSTUB_HELPERS_H_ */
diff --git a/include/gdbstub/syscalls.h b/include/gdbstub/syscalls.h
index 54ff724..d63228e 100644
--- a/include/gdbstub/syscalls.h
+++ b/include/gdbstub/syscalls.h
@@ -3,7 +3,7 @@
*
* Copyright (c) 2023 Linaro Ltd
*
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#ifndef _SYSCALLS_H_
diff --git a/include/gdbstub/user.h b/include/gdbstub/user.h
index 3b8358e..654986d 100644
--- a/include/gdbstub/user.h
+++ b/include/gdbstub/user.h
@@ -3,7 +3,7 @@
*
* Copyright (c) 2022 Linaro Ltd
*
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#ifndef GDBSTUB_USER_H
diff --git a/include/glib-compat.h b/include/glib-compat.h
index 86be439..2e32b90 100644
--- a/include/glib-compat.h
+++ b/include/glib-compat.h
@@ -37,6 +37,13 @@
#endif
/*
+ * These functions perform function pointer casts which can cause function call
+ * failure on Emscripten. Use g_slist_sort_with_data and g_list_sort_with_data
+ * instead of these functions.
+ */
+#pragma GCC poison g_slist_sort g_list_sort
+
+/*
* Note that because of the GLIB_VERSION_MAX_ALLOWED constant above, allowing
* use of functions from newer GLib via this compat header needs a little
* trickery to prevent warnings being emitted.
diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h
index 0e6e82b..2e6e341 100644
--- a/include/hw/acpi/acpi-defs.h
+++ b/include/hw/acpi/acpi-defs.h
@@ -112,7 +112,6 @@ typedef struct AcpiSpcrData {
uint8_t flow_control;
uint8_t terminal_type;
uint8_t language;
- uint8_t reserved1;
uint16_t pci_device_id; /* Must be 0xffff if not PCI device */
uint16_t pci_vendor_id; /* Must be 0xffff if not PCI device */
uint8_t pci_bus;
@@ -120,7 +119,11 @@ typedef struct AcpiSpcrData {
uint8_t pci_function;
uint32_t pci_flags;
uint8_t pci_segment;
- uint32_t reserved2;
+ uint32_t uart_clk_freq;
+ uint32_t precise_baudrate;
+ uint32_t namespace_string_length;
+ uint32_t namespace_string_offset;
+ char namespace_string[];
} AcpiSpcrData;
#define ACPI_FADT_ARM_PSCI_COMPLIANT (1 << 0)
diff --git a/include/hw/acpi/acpi.h b/include/hw/acpi/acpi.h
index e0e51e8..4b8ee09 100644
--- a/include/hw/acpi/acpi.h
+++ b/include/hw/acpi/acpi.h
@@ -21,7 +21,7 @@
*/
#include "qemu/notify.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/acpi/acpi_dev_interface.h"
/*
@@ -150,6 +150,9 @@ struct ACPIREGS {
Notifier wakeup;
};
+/* Return whether ACPI subsystem is built in */
+bool acpi_builtin(void);
+
/* PM_TMR */
void acpi_pm_tmr_update(ACPIREGS *ar, bool enable);
void acpi_pm_tmr_calc_overflow_time(ACPIREGS *ar);
diff --git a/include/hw/acpi/acpi_generic_initiator.h b/include/hw/acpi/acpi_generic_initiator.h
deleted file mode 100644
index a304bad..0000000
--- a/include/hw/acpi/acpi_generic_initiator.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
- */
-
-#ifndef ACPI_GENERIC_INITIATOR_H
-#define ACPI_GENERIC_INITIATOR_H
-
-#include "qom/object_interfaces.h"
-
-#define TYPE_ACPI_GENERIC_INITIATOR "acpi-generic-initiator"
-
-typedef struct AcpiGenericInitiator {
- /* private */
- Object parent;
-
- /* public */
- char *pci_dev;
- uint16_t node;
-} AcpiGenericInitiator;
-
-/*
- * ACPI 6.3:
- * Table 5-81 Flags – Generic Initiator Affinity Structure
- */
-typedef enum {
- /*
- * If clear, the OSPM ignores the contents of the Generic
- * Initiator/Port Affinity Structure. This allows system firmware
- * to populate the SRAT with a static number of structures, but only
- * enable them as necessary.
- */
- GEN_AFFINITY_ENABLED = (1 << 0),
-} GenericAffinityFlags;
-
-/*
- * ACPI 6.3:
- * Table 5-80 Device Handle - PCI
- */
-typedef struct PCIDeviceHandle {
- uint16_t segment;
- uint16_t bdf;
-} PCIDeviceHandle;
-
-void build_srat_generic_pci_initiator(GArray *table_data);
-
-#endif
diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h
index a378415..c18f681 100644
--- a/include/hw/acpi/aml-build.h
+++ b/include/hw/acpi/aml-build.h
@@ -486,6 +486,13 @@ Aml *build_crs(PCIHostState *host, CrsRangeSet *range_set, uint32_t io_offset,
void build_srat_memory(GArray *table_data, uint64_t base,
uint64_t len, int node, MemoryAffinityFlags flags);
+void build_srat_pci_generic_initiator(GArray *table_data, uint32_t node,
+ uint16_t segment, uint8_t bus,
+ uint8_t devfn);
+
+void build_srat_acpi_generic_port(GArray *table_data, uint32_t node,
+ const char *hid, uint32_t uid);
+
void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms,
const char *oem_id, const char *oem_table_id);
@@ -500,5 +507,5 @@ void build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog,
void build_spcr(GArray *table_data, BIOSLinker *linker,
const AcpiSpcrData *f, const uint8_t rev,
- const char *oem_id, const char *oem_table_id);
+ const char *oem_id, const char *oem_table_id, const char *name);
#endif
diff --git a/include/hw/acpi/cpu.h b/include/hw/acpi/cpu.h
index e6e1a9e..32654dc 100644
--- a/include/hw/acpi/cpu.h
+++ b/include/hw/acpi/cpu.h
@@ -19,6 +19,8 @@
#include "hw/boards.h"
#include "hw/hotplug.h"
+#define ACPI_CPU_HOTPLUG_REG_LEN 12
+
typedef struct AcpiCpuStatus {
CPUState *cpu;
uint64_t arch_id;
@@ -61,9 +63,10 @@ typedef void (*build_madt_cpu_fn)(int uid, const CPUArchIdList *apic_ids,
GArray *entry, bool force_enabled);
void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
- build_madt_cpu_fn build_madt_cpu, hwaddr io_base,
+ build_madt_cpu_fn build_madt_cpu, hwaddr base_addr,
const char *res_root,
- const char *event_handler_method);
+ const char *event_handler_method,
+ AmlRegionSpace rs);
void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list);
diff --git a/include/hw/acpi/generic_event_device.h b/include/hw/acpi/generic_event_device.h
index ba84ce0..d2dac87 100644
--- a/include/hw/acpi/generic_event_device.h
+++ b/include/hw/acpi/generic_event_device.h
@@ -62,6 +62,7 @@
#include "hw/sysbus.h"
#include "hw/acpi/memory_hotplug.h"
#include "hw/acpi/ghes.h"
+#include "hw/acpi/cpu.h"
#include "qom/object.h"
#define ACPI_POWER_BUTTON_DEVICE "PWRB"
@@ -80,12 +81,16 @@ OBJECT_DECLARE_SIMPLE_TYPE(AcpiGedState, ACPI_GED)
/* ACPI_GED_REG_RESET value for reset*/
#define ACPI_GED_RESET_VALUE 0x42
-/* ACPI_GED_REG_SLEEP_CTL.SLP_TYP value for S5 (aka poweroff) */
-#define ACPI_GED_SLP_TYP_S5 0x05
+/* [ACPI 5.0 Chapter 4.8.3.7] Sleep Control and Status Register */
+#define ACPI_GED_SLP_TYP_POS 0x2 /* SLP_TYPx Bit Offset */
+#define ACPI_GED_SLP_TYP_MASK 0x07 /* SLP_TYPx 3-bit mask */
+#define ACPI_GED_SLP_TYP_S5 0x05 /* System _S5 State (Soft Off) */
+#define ACPI_GED_SLP_EN 0x20 /* SLP_EN write-only bit */
#define GED_DEVICE "GED"
#define AML_GED_EVT_REG "EREG"
#define AML_GED_EVT_SEL "ESEL"
+#define AML_GED_EVT_CPU_SCAN_METHOD "\\_SB.GED.CSCN"
/*
* Platforms need to specify the GED event bitmap
@@ -95,6 +100,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(AcpiGedState, ACPI_GED)
#define ACPI_GED_MEM_HOTPLUG_EVT 0x1
#define ACPI_GED_PWR_DOWN_EVT 0x2
#define ACPI_GED_NVDIMM_HOTPLUG_EVT 0x4
+#define ACPI_GED_CPU_HOTPLUG_EVT 0x8
typedef struct GEDState {
MemoryRegion evt;
@@ -106,6 +112,8 @@ struct AcpiGedState {
SysBusDevice parent_obj;
MemHotplugState memhp_state;
MemoryRegion container_memhp;
+ CPUHotplugState cpuhp_state;
+ MemoryRegion container_cpuhp;
GEDState ged_state;
uint32_t ged_event_bitmap;
qemu_irq irq;
diff --git a/include/hw/acpi/ghes.h b/include/hw/acpi/ghes.h
index 674f695..578a582 100644
--- a/include/hw/acpi/ghes.h
+++ b/include/hw/acpi/ghes.h
@@ -23,6 +23,7 @@
#define ACPI_GHES_H
#include "hw/acpi/bios-linker-loader.h"
+#include "qapi/error.h"
/*
* Values for Hardware Error Notification Type field
@@ -59,26 +60,27 @@ enum AcpiGhesNotifyType {
enum {
ACPI_HEST_SRC_ID_SEA = 0,
/* future ids go here */
- ACPI_HEST_SRC_ID_RESERVED,
+
+ ACPI_GHES_ERROR_SOURCE_COUNT
};
typedef struct AcpiGhesState {
- uint64_t ghes_addr_le;
+ uint64_t hw_error_le;
bool present; /* True if GHES is present at all on this board */
} AcpiGhesState;
-void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker);
-void acpi_build_hest(GArray *table_data, BIOSLinker *linker,
+void acpi_build_hest(GArray *table_data, GArray *hardware_errors,
+ BIOSLinker *linker,
const char *oem_id, const char *oem_table_id);
void acpi_ghes_add_fw_cfg(AcpiGhesState *vms, FWCfgState *s,
GArray *hardware_errors);
-int acpi_ghes_record_errors(uint8_t notify, uint64_t error_physical_addr);
+int acpi_ghes_memory_errors(uint16_t source_id, uint64_t error_physical_addr);
/**
* acpi_ghes_present: Report whether ACPI GHES table is present
*
* Returns: true if the system has an ACPI GHES table and it is
- * safe to call acpi_ghes_record_errors() to record a memory error.
+ * safe to call acpi_ghes_memory_errors() to record a memory error.
*/
bool acpi_ghes_present(void);
#endif
diff --git a/include/hw/acpi/ich9.h b/include/hw/acpi/ich9.h
index 2faf7f0..245fe08 100644
--- a/include/hw/acpi/ich9.h
+++ b/include/hw/acpi/ich9.h
@@ -46,6 +46,7 @@ typedef struct ICH9LPCPMRegs {
uint32_t smi_en;
uint32_t smi_en_wmask;
uint32_t smi_sts;
+ uint32_t smi_sts_wmask;
qemu_irq irq; /* SCI */
@@ -68,6 +69,11 @@ typedef struct ICH9LPCPMRegs {
bool smm_compat;
bool enable_tco;
TCOIORegs tco_regs;
+
+ bool swsmi_timer_enabled;
+ bool periodic_timer_enabled;
+ QEMUTimer *swsmi_timer;
+ QEMUTimer *periodic_timer;
} ICH9LPCPMRegs;
#define ACPI_PM_PROP_TCO_ENABLED "enable_tco"
diff --git a/include/hw/acpi/ich9_tco.h b/include/hw/acpi/ich9_tco.h
index 2562a7c..b3c3f69 100644
--- a/include/hw/acpi/ich9_tco.h
+++ b/include/hw/acpi/ich9_tco.h
@@ -10,7 +10,7 @@
#ifndef HW_ACPI_TCO_H
#define HW_ACPI_TCO_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "migration/vmstate.h"
/* As per ICH9 spec, the internal timer has an error of ~0.6s on every tick */
diff --git a/include/hw/acpi/ich9_timer.h b/include/hw/acpi/ich9_timer.h
new file mode 100644
index 0000000..5112df4
--- /dev/null
+++ b/include/hw/acpi/ich9_timer.h
@@ -0,0 +1,23 @@
+/*
+ * QEMU ICH9 Timer emulation
+ *
+ * Copyright (c) 2024 Dominic Prinz <git@dprinz.de>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_ACPI_ICH9_TIMER_H
+#define HW_ACPI_ICH9_TIMER_H
+
+#include "hw/acpi/ich9.h"
+
+void ich9_pm_update_swsmi_timer(ICH9LPCPMRegs *pm, bool enable);
+
+void ich9_pm_swsmi_timer_init(ICH9LPCPMRegs *pm);
+
+void ich9_pm_update_periodic_timer(ICH9LPCPMRegs *pm, bool enable);
+
+void ich9_pm_periodic_timer_init(ICH9LPCPMRegs *pm);
+
+#endif
diff --git a/include/hw/acpi/pci.h b/include/hw/acpi/pci.h
index 467a994..6359d57 100644
--- a/include/hw/acpi/pci.h
+++ b/include/hw/acpi/pci.h
@@ -40,4 +40,7 @@ Aml *aml_pci_device_dsm(void);
void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus);
void build_pci_bridge_aml(AcpiDevAmlIf *adev, Aml *scope);
+
+void build_srat_generic_affinity_structures(GArray *table_data);
+
#endif
diff --git a/include/hw/acpi/pcihp.h b/include/hw/acpi/pcihp.h
index ac21a95..a97904b 100644
--- a/include/hw/acpi/pcihp.h
+++ b/include/hw/acpi/pcihp.h
@@ -58,7 +58,7 @@ typedef struct AcpiPciHpState {
void acpi_pcihp_init(Object *owner, AcpiPciHpState *, PCIBus *root,
MemoryRegion *io, uint16_t io_base);
-bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus);
+bool acpi_pcihp_is_hotpluggable_bus(AcpiPciHpState *s, BusState *bus);
void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp);
void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s,
diff --git a/include/hw/acpi/tpm.h b/include/hw/acpi/tpm.h
index 579c45f..9d0fe6f 100644
--- a/include/hw/acpi/tpm.h
+++ b/include/hw/acpi/tpm.h
@@ -19,7 +19,7 @@
#include "qemu/units.h"
#include "hw/registerfields.h"
#include "hw/acpi/aml-build.h"
-#include "sysemu/tpm.h"
+#include "system/tpm.h"
#ifdef CONFIG_TPM
diff --git a/include/hw/acpi/vmclock.h b/include/hw/acpi/vmclock.h
new file mode 100644
index 0000000..5605605
--- /dev/null
+++ b/include/hw/acpi/vmclock.h
@@ -0,0 +1,34 @@
+#ifndef ACPI_VMCLOCK_H
+#define ACPI_VMCLOCK_H
+
+#include "hw/acpi/bios-linker-loader.h"
+#include "hw/qdev-core.h"
+#include "qemu/uuid.h"
+#include "qom/object.h"
+
+#define TYPE_VMCLOCK "vmclock"
+
+#define VMCLOCK_ADDR 0xfeffb000
+#define VMCLOCK_SIZE 0x1000
+
+OBJECT_DECLARE_SIMPLE_TYPE(VmclockState, VMCLOCK)
+
+struct vmclock_abi;
+
+struct VmclockState {
+ DeviceState parent_obj;
+ MemoryRegion clk_page;
+ uint64_t physaddr;
+ struct vmclock_abi *clk;
+};
+
+/* returns NULL unless there is exactly one device */
+static inline Object *find_vmclock_dev(void)
+{
+ return object_resolve_path_type("", TYPE_VMCLOCK, NULL);
+}
+
+void vmclock_build_acpi(VmclockState *vms, GArray *table_data,
+ BIOSLinker *linker, const char *oem_id);
+
+#endif
diff --git a/include/hw/adc/aspeed_adc.h b/include/hw/adc/aspeed_adc.h
index ff1d06e..f502f19 100644
--- a/include/hw/adc/aspeed_adc.h
+++ b/include/hw/adc/aspeed_adc.h
@@ -18,6 +18,7 @@
#define TYPE_ASPEED_2500_ADC TYPE_ASPEED_ADC "-ast2500"
#define TYPE_ASPEED_2600_ADC TYPE_ASPEED_ADC "-ast2600"
#define TYPE_ASPEED_1030_ADC TYPE_ASPEED_ADC "-ast1030"
+#define TYPE_ASPEED_2700_ADC TYPE_ASPEED_ADC "-ast2700"
OBJECT_DECLARE_TYPE(AspeedADCState, AspeedADCClass, ASPEED_ADC)
#define TYPE_ASPEED_ADC_ENGINE "aspeed.adc.engine"
diff --git a/include/hw/adc/max111x.h b/include/hw/adc/max111x.h
deleted file mode 100644
index beff59c..0000000
--- a/include/hw/adc/max111x.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Maxim MAX1110/1111 ADC chip emulation.
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GNU GPLv2.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#ifndef HW_MISC_MAX111X_H
-#define HW_MISC_MAX111X_H
-
-#include "hw/ssi/ssi.h"
-#include "qom/object.h"
-
-/*
- * This is a model of the Maxim MAX1110/1111 ADC chip, which for QEMU
- * is an SSI slave device. It has either 4 (max1110) or 8 (max1111)
- * 8-bit ADC channels.
- *
- * QEMU interface:
- * + GPIO inputs 0..3 (for max1110) or 0..7 (for max1111): set the value
- * of each ADC input, as an unsigned 8-bit value
- * + GPIO output 0: interrupt line
- * + Properties "input0" to "input3" (max1110) or "input0" to "input7"
- * (max1111): initial reset values for ADC inputs.
- *
- * Known bugs:
- * + the interrupt line is not correctly implemented, and will never
- * be lowered once it has been asserted.
- */
-struct MAX111xState {
- SSIPeripheral parent_obj;
-
- qemu_irq interrupt;
- /* Values of inputs at system reset (settable by QOM property) */
- uint8_t reset_input[8];
-
- uint8_t tb1, rb2, rb3;
- int cycle;
-
- uint8_t input[8];
- int inputs, com;
-};
-
-#define TYPE_MAX_111X "max111x"
-
-OBJECT_DECLARE_SIMPLE_TYPE(MAX111xState, MAX_111X)
-
-#define TYPE_MAX_1110 "max1110"
-#define TYPE_MAX_1111 "max1111"
-
-#endif
diff --git a/include/hw/arm/allwinner-a10.h b/include/hw/arm/allwinner-a10.h
index 67a9a17..445ba1b 100644
--- a/include/hw/arm/allwinner-a10.h
+++ b/include/hw/arm/allwinner-a10.h
@@ -12,8 +12,9 @@
#include "hw/misc/allwinner-a10-ccm.h"
#include "hw/misc/allwinner-a10-dramc.h"
#include "hw/i2c/allwinner-i2c.h"
+#include "hw/ssi/allwinner-a10-spi.h"
#include "hw/watchdog/allwinner-wdt.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "target/arm/cpu.h"
#include "qom/object.h"
@@ -40,6 +41,7 @@ struct AwA10State {
AllwinnerAHCIState sata;
AwSdHostState mmc0;
AWI2CState i2c0;
+ AWA10SPIState spi0;
AwRtcState rtc;
AwWdtState wdt;
MemoryRegion sram_a;
diff --git a/include/hw/arm/allwinner-h3.h b/include/hw/arm/allwinner-h3.h
index 24ba4e1..db897c8 100644
--- a/include/hw/arm/allwinner-h3.h
+++ b/include/hw/arm/allwinner-h3.h
@@ -49,7 +49,7 @@
#include "hw/i2c/allwinner-i2c.h"
#include "hw/watchdog/allwinner-wdt.h"
#include "target/arm/cpu.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
/**
* Allwinner H3 device list
diff --git a/include/hw/arm/allwinner-r40.h b/include/hw/arm/allwinner-r40.h
index 614e74b..f8a0e94 100644
--- a/include/hw/arm/allwinner-r40.h
+++ b/include/hw/arm/allwinner-r40.h
@@ -35,7 +35,7 @@
#include "hw/usb/hcd-ehci.h"
#include "hw/watchdog/allwinner-wdt.h"
#include "target/arm/cpu.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
enum {
AW_R40_DEV_SRAM_A1,
diff --git a/include/hw/arm/aspeed.h b/include/hw/arm/aspeed.h
index cbeacb2..973277b 100644
--- a/include/hw/arm/aspeed.h
+++ b/include/hw/arm/aspeed.h
@@ -39,6 +39,8 @@ struct AspeedMachineClass {
uint32_t macs_mask;
void (*i2c_init)(AspeedMachineState *bmc);
uint32_t uart_default;
+ bool sdhci_wp_inverted;
+ bool vbootrom;
};
diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h
index 849ba37..217ef0e 100644
--- a/include/hw/arm/aspeed_soc.h
+++ b/include/hw/arm/aspeed_soc.h
@@ -39,11 +39,11 @@
#include "hw/misc/unimp.h"
#include "hw/misc/aspeed_peci.h"
#include "hw/fsi/aspeed_apb2opb.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#include "hw/intc/arm_gicv3.h"
-#define ASPEED_SPIS_NUM 2
-#define ASPEED_EHCIS_NUM 2
+#define ASPEED_SPIS_NUM 3
+#define ASPEED_EHCIS_NUM 4
#define ASPEED_WDTS_NUM 8
#define ASPEED_CPUS_NUM 4
#define ASPEED_MACS_NUM 4
@@ -59,6 +59,7 @@ struct AspeedSoCState {
MemoryRegion sram;
MemoryRegion spi_boot_container;
MemoryRegion spi_boot;
+ MemoryRegion vbootrom;
AddressSpace dram_as;
AspeedRtcState rtc;
AspeedTimerCtrlState timerctrl;
@@ -90,6 +91,8 @@ struct AspeedSoCState {
SerialMM uart[ASPEED_UARTS_NUM];
Clock *sysclk;
UnimplementedDeviceState iomem;
+ UnimplementedDeviceState iomem0;
+ UnimplementedDeviceState iomem1;
UnimplementedDeviceState video;
UnimplementedDeviceState emmc_boot_controller;
UnimplementedDeviceState dpmcu;
@@ -97,6 +100,7 @@ struct AspeedSoCState {
UnimplementedDeviceState espi;
UnimplementedDeviceState udc;
UnimplementedDeviceState sgpiom;
+ UnimplementedDeviceState ltpi;
UnimplementedDeviceState jtag[ASPEED_JTAG_NUM];
AspeedAPB2OPBState fsi[2];
};
@@ -128,7 +132,7 @@ struct Aspeed27x0SoCState {
AspeedSoCState parent;
ARMCPU cpu[ASPEED_CPUS_NUM];
- AspeedINTCState intc;
+ AspeedINTCState intc[2];
GICv3State gic;
MemoryRegion dram_empty;
};
@@ -142,13 +146,36 @@ struct Aspeed10x0SoCState {
ARMv7MState armv7m;
};
+struct Aspeed27x0SSPSoCState {
+ AspeedSoCState parent;
+ AspeedINTCState intc[2];
+ UnimplementedDeviceState ipc[2];
+ UnimplementedDeviceState scuio;
+
+ ARMv7MState armv7m;
+};
+
+#define TYPE_ASPEED27X0SSP_SOC "aspeed27x0ssp-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(Aspeed27x0SSPSoCState, ASPEED27X0SSP_SOC)
+
+struct Aspeed27x0TSPSoCState {
+ AspeedSoCState parent;
+ AspeedINTCState intc[2];
+ UnimplementedDeviceState ipc[2];
+ UnimplementedDeviceState scuio;
+
+ ARMv7MState armv7m;
+};
+
+#define TYPE_ASPEED27X0TSP_SOC "aspeed27x0tsp-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(Aspeed27x0TSPSoCState, ASPEED27X0TSP_SOC)
+
#define TYPE_ASPEED10X0_SOC "aspeed10x0-soc"
OBJECT_DECLARE_SIMPLE_TYPE(Aspeed10x0SoCState, ASPEED10X0_SOC)
struct AspeedSoCClass {
DeviceClass parent_class;
- const char *name;
/** valid_cpu_types: NULL terminated array of a single CPU type. */
const char * const *valid_cpu_types;
uint32_t silicon_rev;
@@ -164,13 +191,18 @@ struct AspeedSoCClass {
const hwaddr *memmap;
uint32_t num_cpus;
qemu_irq (*get_irq)(AspeedSoCState *s, int dev);
+ bool (*boot_from_emmc)(AspeedSoCState *s);
};
const char *aspeed_soc_cpu_type(AspeedSoCClass *sc);
enum {
+ ASPEED_DEV_VBOOTROM,
ASPEED_DEV_SPI_BOOT,
ASPEED_DEV_IOMEM,
+ ASPEED_DEV_IOMEM0,
+ ASPEED_DEV_IOMEM1,
+ ASPEED_DEV_LTPI,
ASPEED_DEV_UART0,
ASPEED_DEV_UART1,
ASPEED_DEV_UART2,
@@ -192,8 +224,11 @@ enum {
ASPEED_DEV_SPI2,
ASPEED_DEV_EHCI1,
ASPEED_DEV_EHCI2,
+ ASPEED_DEV_EHCI3,
+ ASPEED_DEV_EHCI4,
ASPEED_DEV_VIC,
ASPEED_DEV_INTC,
+ ASPEED_DEV_INTCIO,
ASPEED_DEV_SDMC,
ASPEED_DEV_SCU,
ASPEED_DEV_ADC,
@@ -248,6 +283,8 @@ enum {
ASPEED_DEV_SLIIO,
ASPEED_GIC_DIST,
ASPEED_GIC_REDIST,
+ ASPEED_DEV_IPC0,
+ ASPEED_DEV_IPC1,
};
qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev);
diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h
index 80c492d..a2e22bd 100644
--- a/include/hw/arm/boot.h
+++ b/include/hw/arm/boot.h
@@ -132,6 +132,9 @@ struct arm_boot_info {
bool secure_board_setup;
arm_endianness endianness;
+
+ /* CPU having load the kernel and that should be the first to boot. */
+ ARMCPU *primary_cpu;
};
/**
@@ -160,6 +163,7 @@ AddressSpace *arm_boot_address_space(ARMCPU *cpu,
* @binfo: struct describing the boot environment
* @addr_limit: upper limit of the available memory area at @addr
* @as: address space to load image to
+ * @cpu: ARM CPU object
*
* Load a device tree supplied by the machine or by the user with the
* '-dtb' command line option, and put it at offset @addr in target
@@ -176,7 +180,8 @@ AddressSpace *arm_boot_address_space(ARMCPU *cpu,
* Note: Must not be called unless have_dtb(binfo) is true.
*/
int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
- hwaddr addr_limit, AddressSpace *as, MachineState *ms);
+ hwaddr addr_limit, AddressSpace *as, MachineState *ms,
+ ARMCPU *cpu);
/* Write a secure board setup routine with a dummy handler for SMCs */
void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu,
diff --git a/include/hw/arm/bsa.h b/include/hw/arm/bsa.h
index 8eaab60..13ed2d2 100644
--- a/include/hw/arm/bsa.h
+++ b/include/hw/arm/bsa.h
@@ -22,6 +22,8 @@
#define QEMU_ARM_BSA_H
/* These are architectural INTID values */
+#define ARCH_TIMER_S_EL2_VIRT_IRQ 19
+#define ARCH_TIMER_S_EL2_IRQ 20
#define VIRTUAL_PMU_IRQ 23
#define ARCH_GIC_MAINT_IRQ 25
#define ARCH_TIMER_NS_EL2_IRQ 26
diff --git a/include/hw/arm/fsl-imx25.h b/include/hw/arm/fsl-imx25.h
index df2f839..b68d433 100644
--- a/include/hw/arm/fsl-imx25.h
+++ b/include/hw/arm/fsl-imx25.h
@@ -29,7 +29,7 @@
#include "hw/sd/sdhci.h"
#include "hw/usb/chipidea.h"
#include "hw/watchdog/wdt_imx2.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "target/arm/cpu.h"
#include "qom/object.h"
diff --git a/include/hw/arm/fsl-imx31.h b/include/hw/arm/fsl-imx31.h
index 40c593a..41232a2 100644
--- a/include/hw/arm/fsl-imx31.h
+++ b/include/hw/arm/fsl-imx31.h
@@ -25,7 +25,7 @@
#include "hw/i2c/imx_i2c.h"
#include "hw/gpio/imx_gpio.h"
#include "hw/watchdog/wdt_imx2.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "target/arm/cpu.h"
#include "qom/object.h"
diff --git a/include/hw/arm/fsl-imx6.h b/include/hw/arm/fsl-imx6.h
index 61c593f..124bbd4 100644
--- a/include/hw/arm/fsl-imx6.h
+++ b/include/hw/arm/fsl-imx6.h
@@ -33,7 +33,8 @@
#include "hw/usb/chipidea.h"
#include "hw/usb/imx-usb-phy.h"
#include "hw/pci-host/designware.h"
-#include "exec/memory.h"
+#include "hw/or-irq.h"
+#include "system/memory.h"
#include "cpu.h"
#include "qom/object.h"
@@ -73,6 +74,7 @@ struct FslIMX6State {
ChipideaState usb[FSL_IMX6_NUM_USBS];
IMXFECState eth;
DesignwarePCIEHost pcie;
+ OrIRQState pcie4_msi_irq;
MemoryRegion rom;
MemoryRegion caam;
MemoryRegion ocram;
@@ -457,7 +459,7 @@ struct FslIMX6State {
#define FSL_IMX6_PCIE1_IRQ 120
#define FSL_IMX6_PCIE2_IRQ 121
#define FSL_IMX6_PCIE3_IRQ 122
-#define FSL_IMX6_PCIE4_IRQ 123
+#define FSL_IMX6_PCIE4_MSI_IRQ 123
#define FSL_IMX6_DCIC1_IRQ 124
#define FSL_IMX6_DCIC2_IRQ 125
#define FSL_IMX6_MLB150_HIGH_IRQ 126
diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h
index 8277b0e..4e3209b 100644
--- a/include/hw/arm/fsl-imx6ul.h
+++ b/include/hw/arm/fsl-imx6ul.h
@@ -33,7 +33,7 @@
#include "hw/net/imx_fec.h"
#include "hw/usb/chipidea.h"
#include "hw/usb/imx-usb-phy.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "cpu.h"
#include "qom/object.h"
#include "qemu/units.h"
diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h
index 411fa1c..aa7818c 100644
--- a/include/hw/arm/fsl-imx7.h
+++ b/include/hw/arm/fsl-imx7.h
@@ -36,6 +36,7 @@
#include "hw/net/imx_fec.h"
#include "hw/pci-host/designware.h"
#include "hw/usb/chipidea.h"
+#include "hw/or-irq.h"
#include "cpu.h"
#include "qom/object.h"
#include "qemu/units.h"
@@ -85,6 +86,7 @@ struct FslIMX7State {
IMX7GPRState gpr;
ChipideaState usb[FSL_IMX7_NUM_USBS];
DesignwarePCIEHost pcie;
+ OrIRQState pcie4_msi_irq;
MemoryRegion rom;
MemoryRegion caam;
MemoryRegion ocram;
@@ -428,7 +430,7 @@ enum FslIMX7IRQs {
FSL_IMX7_PCI_INTA_IRQ = 125,
FSL_IMX7_PCI_INTB_IRQ = 124,
FSL_IMX7_PCI_INTC_IRQ = 123,
- FSL_IMX7_PCI_INTD_IRQ = 122,
+ FSL_IMX7_PCI_INTD_MSI_IRQ = 122,
FSL_IMX7_UART7_IRQ = 126,
diff --git a/include/hw/arm/fsl-imx8mp.h b/include/hw/arm/fsl-imx8mp.h
new file mode 100644
index 0000000..d016f7d
--- /dev/null
+++ b/include/hw/arm/fsl-imx8mp.h
@@ -0,0 +1,284 @@
+/*
+ * i.MX 8M Plus SoC Definitions
+ *
+ * Copyright (c) 2024, Bernhard Beschow <shentey@gmail.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef FSL_IMX8MP_H
+#define FSL_IMX8MP_H
+
+#include "cpu.h"
+#include "hw/char/imx_serial.h"
+#include "hw/gpio/imx_gpio.h"
+#include "hw/i2c/imx_i2c.h"
+#include "hw/intc/arm_gicv3_common.h"
+#include "hw/misc/imx7_snvs.h"
+#include "hw/misc/imx8mp_analog.h"
+#include "hw/misc/imx8mp_ccm.h"
+#include "hw/net/imx_fec.h"
+#include "hw/or-irq.h"
+#include "hw/pci-host/designware.h"
+#include "hw/pci-host/fsl_imx8m_phy.h"
+#include "hw/sd/sdhci.h"
+#include "hw/ssi/imx_spi.h"
+#include "hw/timer/imx_gpt.h"
+#include "hw/usb/hcd-dwc3.h"
+#include "hw/watchdog/wdt_imx2.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+#include "qemu/units.h"
+
+#define TYPE_FSL_IMX8MP "fsl-imx8mp"
+OBJECT_DECLARE_SIMPLE_TYPE(FslImx8mpState, FSL_IMX8MP)
+
+#define FSL_IMX8MP_RAM_START 0x40000000
+#define FSL_IMX8MP_RAM_SIZE_MAX (8 * GiB)
+
+enum FslImx8mpConfiguration {
+ FSL_IMX8MP_NUM_CPUS = 4,
+ FSL_IMX8MP_NUM_ECSPIS = 3,
+ FSL_IMX8MP_NUM_GPIOS = 5,
+ FSL_IMX8MP_NUM_GPTS = 6,
+ FSL_IMX8MP_NUM_I2CS = 6,
+ FSL_IMX8MP_NUM_IRQS = 160,
+ FSL_IMX8MP_NUM_UARTS = 4,
+ FSL_IMX8MP_NUM_USBS = 2,
+ FSL_IMX8MP_NUM_USDHCS = 3,
+ FSL_IMX8MP_NUM_WDTS = 3,
+};
+
+struct FslImx8mpState {
+ SysBusDevice parent_obj;
+
+ ARMCPU cpu[FSL_IMX8MP_NUM_CPUS];
+ GICv3State gic;
+ IMXGPTState gpt[FSL_IMX8MP_NUM_GPTS];
+ IMXGPIOState gpio[FSL_IMX8MP_NUM_GPIOS];
+ IMX8MPCCMState ccm;
+ IMX8MPAnalogState analog;
+ IMX7SNVSState snvs;
+ IMXSPIState spi[FSL_IMX8MP_NUM_ECSPIS];
+ IMXI2CState i2c[FSL_IMX8MP_NUM_I2CS];
+ IMXSerialState uart[FSL_IMX8MP_NUM_UARTS];
+ IMXFECState enet;
+ SDHCIState usdhc[FSL_IMX8MP_NUM_USDHCS];
+ IMX2WdtState wdt[FSL_IMX8MP_NUM_WDTS];
+ USBDWC3 usb[FSL_IMX8MP_NUM_USBS];
+ DesignwarePCIEHost pcie;
+ FslImx8mPciePhyState pcie_phy;
+ OrIRQState gpt5_gpt6_irq;
+ MemoryRegion ocram;
+
+ uint32_t phy_num;
+ bool phy_connected;
+};
+
+enum FslImx8mpMemoryRegions {
+ FSL_IMX8MP_A53_DAP,
+ FSL_IMX8MP_AIPS1_CONFIGURATION,
+ FSL_IMX8MP_AIPS2_CONFIGURATION,
+ FSL_IMX8MP_AIPS3_CONFIGURATION,
+ FSL_IMX8MP_AIPS4_CONFIGURATION,
+ FSL_IMX8MP_AIPS5_CONFIGURATION,
+ FSL_IMX8MP_ANA_OSC,
+ FSL_IMX8MP_ANA_PLL,
+ FSL_IMX8MP_ANA_TSENSOR,
+ FSL_IMX8MP_APBH_DMA,
+ FSL_IMX8MP_ASRC,
+ FSL_IMX8MP_AUDIO_BLK_CTRL,
+ FSL_IMX8MP_AUDIO_DSP,
+ FSL_IMX8MP_AUDIO_XCVR_RX,
+ FSL_IMX8MP_AUD_IRQ_STEER,
+ FSL_IMX8MP_BOOT_ROM,
+ FSL_IMX8MP_BOOT_ROM_PROTECTED,
+ FSL_IMX8MP_CAAM,
+ FSL_IMX8MP_CAAM_MEM,
+ FSL_IMX8MP_CCM,
+ FSL_IMX8MP_CSU,
+ FSL_IMX8MP_DDR_BLK_CTRL,
+ FSL_IMX8MP_DDR_CTL,
+ FSL_IMX8MP_DDR_PERF_MON,
+ FSL_IMX8MP_DDR_PHY,
+ FSL_IMX8MP_DDR_PHY_BROADCAST,
+ FSL_IMX8MP_ECSPI1,
+ FSL_IMX8MP_ECSPI2,
+ FSL_IMX8MP_ECSPI3,
+ FSL_IMX8MP_EDMA_CHANNELS,
+ FSL_IMX8MP_EDMA_MANAGEMENT_PAGE,
+ FSL_IMX8MP_ENET1,
+ FSL_IMX8MP_ENET2_TSN,
+ FSL_IMX8MP_FLEXCAN1,
+ FSL_IMX8MP_FLEXCAN2,
+ FSL_IMX8MP_GIC_DIST,
+ FSL_IMX8MP_GIC_REDIST,
+ FSL_IMX8MP_GPC,
+ FSL_IMX8MP_GPIO1,
+ FSL_IMX8MP_GPIO2,
+ FSL_IMX8MP_GPIO3,
+ FSL_IMX8MP_GPIO4,
+ FSL_IMX8MP_GPIO5,
+ FSL_IMX8MP_GPT1,
+ FSL_IMX8MP_GPT2,
+ FSL_IMX8MP_GPT3,
+ FSL_IMX8MP_GPT4,
+ FSL_IMX8MP_GPT5,
+ FSL_IMX8MP_GPT6,
+ FSL_IMX8MP_GPU2D,
+ FSL_IMX8MP_GPU3D,
+ FSL_IMX8MP_HDMI_TX,
+ FSL_IMX8MP_HDMI_TX_AUDLNK_MSTR,
+ FSL_IMX8MP_HSIO_BLK_CTL,
+ FSL_IMX8MP_I2C1,
+ FSL_IMX8MP_I2C2,
+ FSL_IMX8MP_I2C3,
+ FSL_IMX8MP_I2C4,
+ FSL_IMX8MP_I2C5,
+ FSL_IMX8MP_I2C6,
+ FSL_IMX8MP_INTERCONNECT,
+ FSL_IMX8MP_IOMUXC,
+ FSL_IMX8MP_IOMUXC_GPR,
+ FSL_IMX8MP_IPS_DEWARP,
+ FSL_IMX8MP_ISI,
+ FSL_IMX8MP_ISP1,
+ FSL_IMX8MP_ISP2,
+ FSL_IMX8MP_LCDIF1,
+ FSL_IMX8MP_LCDIF2,
+ FSL_IMX8MP_MEDIA_BLK_CTL,
+ FSL_IMX8MP_MIPI_CSI1,
+ FSL_IMX8MP_MIPI_CSI2,
+ FSL_IMX8MP_MIPI_DSI1,
+ FSL_IMX8MP_MU_1_A,
+ FSL_IMX8MP_MU_1_B,
+ FSL_IMX8MP_MU_2_A,
+ FSL_IMX8MP_MU_2_B,
+ FSL_IMX8MP_MU_3_A,
+ FSL_IMX8MP_MU_3_B,
+ FSL_IMX8MP_NPU,
+ FSL_IMX8MP_OCOTP_CTRL,
+ FSL_IMX8MP_OCRAM,
+ FSL_IMX8MP_OCRAM_S,
+ FSL_IMX8MP_PCIE1,
+ FSL_IMX8MP_PCIE1_MEM,
+ FSL_IMX8MP_PCIE_PHY1,
+ FSL_IMX8MP_PDM,
+ FSL_IMX8MP_PERFMON1,
+ FSL_IMX8MP_PERFMON2,
+ FSL_IMX8MP_PWM1,
+ FSL_IMX8MP_PWM2,
+ FSL_IMX8MP_PWM3,
+ FSL_IMX8MP_PWM4,
+ FSL_IMX8MP_QOSC,
+ FSL_IMX8MP_QSPI,
+ FSL_IMX8MP_QSPI1_RX_BUFFER,
+ FSL_IMX8MP_QSPI1_TX_BUFFER,
+ FSL_IMX8MP_QSPI_MEM,
+ FSL_IMX8MP_RAM,
+ FSL_IMX8MP_RDC,
+ FSL_IMX8MP_SAI1,
+ FSL_IMX8MP_SAI2,
+ FSL_IMX8MP_SAI3,
+ FSL_IMX8MP_SAI5,
+ FSL_IMX8MP_SAI6,
+ FSL_IMX8MP_SAI7,
+ FSL_IMX8MP_SDMA1,
+ FSL_IMX8MP_SDMA2,
+ FSL_IMX8MP_SDMA3,
+ FSL_IMX8MP_SEMAPHORE1,
+ FSL_IMX8MP_SEMAPHORE2,
+ FSL_IMX8MP_SEMAPHORE_HS,
+ FSL_IMX8MP_SNVS_HP,
+ FSL_IMX8MP_SPBA1,
+ FSL_IMX8MP_SPBA2,
+ FSL_IMX8MP_SRC,
+ FSL_IMX8MP_SYSCNT_CMP,
+ FSL_IMX8MP_SYSCNT_CTRL,
+ FSL_IMX8MP_SYSCNT_RD,
+ FSL_IMX8MP_TCM_DTCM,
+ FSL_IMX8MP_TCM_ITCM,
+ FSL_IMX8MP_TZASC,
+ FSL_IMX8MP_UART1,
+ FSL_IMX8MP_UART2,
+ FSL_IMX8MP_UART3,
+ FSL_IMX8MP_UART4,
+ FSL_IMX8MP_USB1,
+ FSL_IMX8MP_USB2,
+ FSL_IMX8MP_USB1_DEV,
+ FSL_IMX8MP_USB2_DEV,
+ FSL_IMX8MP_USB1_OTG,
+ FSL_IMX8MP_USB2_OTG,
+ FSL_IMX8MP_USB1_GLUE,
+ FSL_IMX8MP_USB2_GLUE,
+ FSL_IMX8MP_USDHC1,
+ FSL_IMX8MP_USDHC2,
+ FSL_IMX8MP_USDHC3,
+ FSL_IMX8MP_VPU,
+ FSL_IMX8MP_VPU_BLK_CTRL,
+ FSL_IMX8MP_VPU_G1_DECODER,
+ FSL_IMX8MP_VPU_G2_DECODER,
+ FSL_IMX8MP_VPU_VC8000E_ENCODER,
+ FSL_IMX8MP_WDOG1,
+ FSL_IMX8MP_WDOG2,
+ FSL_IMX8MP_WDOG3,
+};
+
+enum FslImx8mpIrqs {
+ FSL_IMX8MP_USDHC1_IRQ = 22,
+ FSL_IMX8MP_USDHC2_IRQ = 23,
+ FSL_IMX8MP_USDHC3_IRQ = 24,
+
+ FSL_IMX8MP_UART1_IRQ = 26,
+ FSL_IMX8MP_UART2_IRQ = 27,
+ FSL_IMX8MP_UART3_IRQ = 28,
+ FSL_IMX8MP_UART4_IRQ = 29,
+ FSL_IMX8MP_UART5_IRQ = 30,
+ FSL_IMX8MP_UART6_IRQ = 16,
+
+ FSL_IMX8MP_ECSPI1_IRQ = 31,
+ FSL_IMX8MP_ECSPI2_IRQ = 32,
+ FSL_IMX8MP_ECSPI3_IRQ = 33,
+
+ FSL_IMX8MP_I2C1_IRQ = 35,
+ FSL_IMX8MP_I2C2_IRQ = 36,
+ FSL_IMX8MP_I2C3_IRQ = 37,
+ FSL_IMX8MP_I2C4_IRQ = 38,
+
+ FSL_IMX8MP_USB1_IRQ = 40,
+ FSL_IMX8MP_USB2_IRQ = 41,
+
+ FSL_IMX8MP_GPT1_IRQ = 55,
+ FSL_IMX8MP_GPT2_IRQ = 54,
+ FSL_IMX8MP_GPT3_IRQ = 53,
+ FSL_IMX8MP_GPT4_IRQ = 52,
+ FSL_IMX8MP_GPT5_GPT6_IRQ = 51,
+
+ FSL_IMX8MP_GPIO1_LOW_IRQ = 64,
+ FSL_IMX8MP_GPIO1_HIGH_IRQ = 65,
+ FSL_IMX8MP_GPIO2_LOW_IRQ = 66,
+ FSL_IMX8MP_GPIO2_HIGH_IRQ = 67,
+ FSL_IMX8MP_GPIO3_LOW_IRQ = 68,
+ FSL_IMX8MP_GPIO3_HIGH_IRQ = 69,
+ FSL_IMX8MP_GPIO4_LOW_IRQ = 70,
+ FSL_IMX8MP_GPIO4_HIGH_IRQ = 71,
+ FSL_IMX8MP_GPIO5_LOW_IRQ = 72,
+ FSL_IMX8MP_GPIO5_HIGH_IRQ = 73,
+
+ FSL_IMX8MP_I2C5_IRQ = 76,
+ FSL_IMX8MP_I2C6_IRQ = 77,
+
+ FSL_IMX8MP_WDOG1_IRQ = 78,
+ FSL_IMX8MP_WDOG2_IRQ = 79,
+ FSL_IMX8MP_WDOG3_IRQ = 10,
+
+ FSL_IMX8MP_ENET1_MAC_IRQ = 118,
+ FSL_IMX6_ENET1_MAC_1588_IRQ = 121,
+
+ FSL_IMX8MP_PCI_INTA_IRQ = 126,
+ FSL_IMX8MP_PCI_INTB_IRQ = 125,
+ FSL_IMX8MP_PCI_INTC_IRQ = 124,
+ FSL_IMX8MP_PCI_INTD_IRQ = 123,
+ FSL_IMX8MP_PCI_MSI_IRQ = 140,
+};
+
+#endif /* FSL_IMX8MP_H */
diff --git a/include/hw/arm/npcm7xx.h b/include/hw/arm/npcm7xx.h
index 4e0d210..5653656 100644
--- a/include/hw/arm/npcm7xx.h
+++ b/include/hw/arm/npcm7xx.h
@@ -23,8 +23,8 @@
#include "hw/gpio/npcm7xx_gpio.h"
#include "hw/i2c/npcm7xx_smbus.h"
#include "hw/mem/npcm7xx_mc.h"
-#include "hw/misc/npcm7xx_clk.h"
-#include "hw/misc/npcm7xx_gcr.h"
+#include "hw/misc/npcm_clk.h"
+#include "hw/misc/npcm_gcr.h"
#include "hw/misc/npcm7xx_mft.h"
#include "hw/misc/npcm7xx_pwm.h"
#include "hw/misc/npcm7xx_rng.h"
@@ -89,8 +89,8 @@ struct NPCM7xxState {
MemoryRegion ram3;
MemoryRegion *dram;
- NPCM7xxGCRState gcr;
- NPCM7xxCLKState clk;
+ NPCMGCRState gcr;
+ NPCMCLKState clk;
NPCM7xxTimerCtrlState tim[3];
NPCM7xxADCState adc;
NPCM7xxPWMState pwm[NPCM7XX_NR_PWM_MODULES];
diff --git a/include/hw/arm/npcm8xx.h b/include/hw/arm/npcm8xx.h
new file mode 100644
index 0000000..a8377db
--- /dev/null
+++ b/include/hw/arm/npcm8xx.h
@@ -0,0 +1,132 @@
+/*
+ * Nuvoton NPCM8xx SoC family.
+ *
+ * Copyright 2022 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM8XX_H
+#define NPCM8XX_H
+
+#include "hw/adc/npcm7xx_adc.h"
+#include "hw/core/split-irq.h"
+#include "hw/cpu/cluster.h"
+#include "hw/gpio/npcm7xx_gpio.h"
+#include "hw/i2c/npcm7xx_smbus.h"
+#include "hw/intc/arm_gic_common.h"
+#include "hw/mem/npcm7xx_mc.h"
+#include "hw/misc/npcm_clk.h"
+#include "hw/misc/npcm_gcr.h"
+#include "hw/misc/npcm7xx_mft.h"
+#include "hw/misc/npcm7xx_pwm.h"
+#include "hw/misc/npcm7xx_rng.h"
+#include "hw/net/npcm_gmac.h"
+#include "hw/net/npcm_pcs.h"
+#include "hw/nvram/npcm7xx_otp.h"
+#include "hw/sd/npcm7xx_sdhci.h"
+#include "hw/timer/npcm7xx_timer.h"
+#include "hw/ssi/npcm7xx_fiu.h"
+#include "hw/usb/hcd-ehci.h"
+#include "hw/usb/hcd-ohci.h"
+#include "target/arm/cpu.h"
+#include "hw/ssi/npcm_pspi.h"
+
+#define NPCM8XX_MAX_NUM_CPUS (4)
+
+/* The first half of the address space is reserved for DDR4 DRAM. */
+#define NPCM8XX_DRAM_BA (0x00000000)
+#define NPCM8XX_DRAM_SZ (2 * GiB)
+
+/* Magic addresses for setting up direct kernel booting and SMP boot stubs. */
+#define NPCM8XX_LOADER_START (0x00000000) /* Start of SDRAM */
+#define NPCM8XX_SMP_LOADER_START (0xffff0000) /* Boot ROM */
+#define NPCM8XX_SMP_BOOTREG_ADDR (0xf080013c) /* GCR.SCRPAD */
+#define NPCM8XX_BOARD_SETUP_ADDR (0xffff1000) /* Boot ROM */
+
+#define NPCM8XX_NR_PWM_MODULES 3
+
+struct NPCM8xxMachine {
+ MachineState parent_obj;
+
+ /*
+ * PWM fan splitter. each splitter connects to one PWM output and
+ * multiple MFT inputs.
+ */
+ SplitIRQ fan_splitter[NPCM8XX_NR_PWM_MODULES *
+ NPCM7XX_PWM_PER_MODULE];
+};
+
+
+struct NPCM8xxMachineClass {
+ MachineClass parent_class;
+
+ const char *soc_type;
+};
+
+#define TYPE_NPCM8XX_MACHINE MACHINE_TYPE_NAME("npcm8xx")
+OBJECT_DECLARE_TYPE(NPCM8xxMachine, NPCM8xxMachineClass, NPCM8XX_MACHINE)
+
+struct NPCM8xxState {
+ DeviceState parent_obj;
+
+ ARMCPU cpu[NPCM8XX_MAX_NUM_CPUS];
+ CPUClusterState cpu_cluster;
+ GICState gic;
+
+ MemoryRegion sram;
+ MemoryRegion irom;
+ MemoryRegion ram3;
+ MemoryRegion *dram;
+
+ NPCMGCRState gcr;
+ NPCMCLKState clk;
+ NPCM7xxTimerCtrlState tim[3];
+ NPCM7xxADCState adc;
+ NPCM7xxPWMState pwm[NPCM8XX_NR_PWM_MODULES];
+ NPCM7xxMFTState mft[8];
+ NPCM7xxOTPState fuse_array;
+ NPCM7xxMCState mc;
+ NPCM7xxRNGState rng;
+ NPCM7xxGPIOState gpio[8];
+ NPCM7xxSMBusState smbus[27];
+ EHCISysBusState ehci[2];
+ OHCISysBusState ohci[2];
+ NPCM7xxFIUState fiu[3];
+ NPCMGMACState gmac[4];
+ NPCMPCSState pcs;
+ NPCM7xxSDHCIState mmc;
+ NPCMPSPIState pspi;
+};
+
+struct NPCM8xxClass {
+ DeviceClass parent_class;
+
+ /* Bitmask of modules that are permanently disabled on this chip. */
+ uint32_t disabled_modules;
+ /* Number of CPU cores enabled in this SoC class. */
+ uint32_t num_cpus;
+};
+
+#define TYPE_NPCM8XX "npcm8xx"
+OBJECT_DECLARE_TYPE(NPCM8xxState, NPCM8xxClass, NPCM8XX)
+
+/**
+ * npcm8xx_load_kernel - Loads memory with everything needed to boot
+ * @machine - The machine containing the SoC to be booted.
+ * @soc - The SoC containing the CPU to be booted.
+ *
+ * This will set up the ARM boot info structure for the specific NPCM8xx
+ * derivative and call arm_load_kernel() to set up loading of the kernel, etc.
+ * into memory, if requested by the user.
+ */
+void npcm8xx_load_kernel(MachineState *machine, NPCM8xxState *soc);
+
+#endif /* NPCM8XX_H */
diff --git a/include/hw/arm/nrf51_soc.h b/include/hw/arm/nrf51_soc.h
index e52a56e..f88ab1b 100644
--- a/include/hw/arm/nrf51_soc.h
+++ b/include/hw/arm/nrf51_soc.h
@@ -30,7 +30,7 @@ struct NRF51State {
SysBusDevice parent_obj;
/*< public >*/
- ARMv7MState cpu;
+ ARMv7MState armv7m;
NRF51UARTState uart;
NRF51RNGState rng;
diff --git a/include/hw/arm/omap.h b/include/hw/arm/omap.h
index 40ee8ea..bdb2e88 100644
--- a/include/hw/arm/omap.h
+++ b/include/hw/arm/omap.h
@@ -20,39 +20,29 @@
#ifndef HW_ARM_OMAP_H
#define HW_ARM_OMAP_H
-#include "exec/memory.h"
-#include "hw/input/tsc2xxx.h"
+#include "system/memory.h"
#include "target/arm/cpu-qom.h"
#include "qemu/log.h"
#include "qom/object.h"
-# define OMAP_EMIFS_BASE 0x00000000
-# define OMAP2_Q0_BASE 0x00000000
-# define OMAP_CS0_BASE 0x00000000
-# define OMAP_CS1_BASE 0x04000000
-# define OMAP_CS2_BASE 0x08000000
-# define OMAP_CS3_BASE 0x0c000000
-# define OMAP_EMIFF_BASE 0x10000000
-# define OMAP_IMIF_BASE 0x20000000
-# define OMAP_LOCALBUS_BASE 0x30000000
-# define OMAP2_Q1_BASE 0x40000000
-# define OMAP2_L4_BASE 0x48000000
-# define OMAP2_SRAM_BASE 0x40200000
-# define OMAP2_L3_BASE 0x68000000
-# define OMAP2_Q2_BASE 0x80000000
-# define OMAP2_Q3_BASE 0xc0000000
-# define OMAP_MPUI_BASE 0xe1000000
-
-# define OMAP730_SRAM_SIZE 0x00032000
-# define OMAP15XX_SRAM_SIZE 0x00030000
-# define OMAP16XX_SRAM_SIZE 0x00004000
-# define OMAP1611_SRAM_SIZE 0x0003e800
-# define OMAP242X_SRAM_SIZE 0x000a0000
-# define OMAP243X_SRAM_SIZE 0x00010000
-# define OMAP_CS0_SIZE 0x04000000
-# define OMAP_CS1_SIZE 0x04000000
-# define OMAP_CS2_SIZE 0x04000000
-# define OMAP_CS3_SIZE 0x04000000
+#define OMAP_EMIFS_BASE 0x00000000
+#define OMAP_CS0_BASE 0x00000000
+#define OMAP_CS1_BASE 0x04000000
+#define OMAP_CS2_BASE 0x08000000
+#define OMAP_CS3_BASE 0x0c000000
+#define OMAP_EMIFF_BASE 0x10000000
+#define OMAP_IMIF_BASE 0x20000000
+#define OMAP_LOCALBUS_BASE 0x30000000
+#define OMAP_MPUI_BASE 0xe1000000
+
+#define OMAP730_SRAM_SIZE 0x00032000
+#define OMAP15XX_SRAM_SIZE 0x00030000
+#define OMAP16XX_SRAM_SIZE 0x00004000
+#define OMAP1611_SRAM_SIZE 0x0003e800
+#define OMAP_CS0_SIZE 0x04000000
+#define OMAP_CS1_SIZE 0x04000000
+#define OMAP_CS2_SIZE 0x04000000
+#define OMAP_CS3_SIZE 0x04000000
/* omap_clk.c */
struct omap_mpu_state_s;
@@ -69,7 +59,7 @@ int64_t omap_clk_getrate(omap_clk clk);
void omap_clk_reparent(omap_clk clk, omap_clk parent);
/* omap_intc.c */
-#define TYPE_OMAP_INTC "common-omap-intc"
+#define TYPE_OMAP_INTC "omap-intc"
typedef struct OMAPIntcState OMAPIntcState;
DECLARE_INSTANCE_CHECKER(OMAPIntcState, OMAP_INTC, TYPE_OMAP_INTC)
@@ -106,385 +96,241 @@ typedef struct Omap1GpioState Omap1GpioState;
DECLARE_INSTANCE_CHECKER(Omap1GpioState, OMAP1_GPIO,
TYPE_OMAP1_GPIO)
-#define TYPE_OMAP2_GPIO "omap2-gpio"
-typedef struct Omap2GpioState Omap2GpioState;
-DECLARE_INSTANCE_CHECKER(Omap2GpioState, OMAP2_GPIO,
- TYPE_OMAP2_GPIO)
-
/* TODO: clock framework (see above) */
void omap_gpio_set_clk(Omap1GpioState *gpio, omap_clk clk);
-void omap2_gpio_set_iclk(Omap2GpioState *gpio, omap_clk clk);
-void omap2_gpio_set_fclk(Omap2GpioState *gpio, uint8_t i, omap_clk clk);
-
-/* OMAP2 l4 Interconnect */
-struct omap_l4_s;
-struct omap_l4_region_s {
- hwaddr offset;
- size_t size;
- int access;
-};
-struct omap_l4_agent_info_s {
- int ta;
- int region;
- int regions;
- int ta_region;
-};
-struct omap_target_agent_s {
- MemoryRegion iomem;
- struct omap_l4_s *bus;
- int regions;
- const struct omap_l4_region_s *start;
- hwaddr base;
- uint32_t component;
- uint32_t control;
- uint32_t status;
-};
-struct omap_l4_s *omap_l4_init(MemoryRegion *address_space,
- hwaddr base, int ta_num);
-
-struct omap_target_agent_s;
-struct omap_target_agent_s *omap_l4ta_get(
- struct omap_l4_s *bus,
- const struct omap_l4_region_s *regions,
- const struct omap_l4_agent_info_s *agents,
- int cs);
-hwaddr omap_l4_attach(struct omap_target_agent_s *ta,
- int region, MemoryRegion *mr);
-hwaddr omap_l4_region_base(struct omap_target_agent_s *ta,
- int region);
-hwaddr omap_l4_region_size(struct omap_target_agent_s *ta,
- int region);
-
-/* OMAP2 SDRAM controller */
-struct omap_sdrc_s;
-struct omap_sdrc_s *omap_sdrc_init(MemoryRegion *sysmem,
- hwaddr base);
-void omap_sdrc_reset(struct omap_sdrc_s *s);
-
-/* OMAP2 general purpose memory controller */
-struct omap_gpmc_s;
-struct omap_gpmc_s *omap_gpmc_init(struct omap_mpu_state_s *mpu,
- hwaddr base,
- qemu_irq irq, qemu_irq drq);
-void omap_gpmc_reset(struct omap_gpmc_s *s);
-void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem);
-void omap_gpmc_attach_nand(struct omap_gpmc_s *s, int cs, DeviceState *nand);
-
/*
* Common IRQ numbers for level 1 interrupt handler
* See /usr/include/asm-arm/arch-omap/irqs.h in Linux.
*/
-# define OMAP_INT_CAMERA 1
-# define OMAP_INT_FIQ 3
-# define OMAP_INT_RTDX 6
-# define OMAP_INT_DSP_MMU_ABORT 7
-# define OMAP_INT_HOST 8
-# define OMAP_INT_ABORT 9
-# define OMAP_INT_BRIDGE_PRIV 13
-# define OMAP_INT_GPIO_BANK1 14
-# define OMAP_INT_UART3 15
-# define OMAP_INT_TIMER3 16
-# define OMAP_INT_DMA_CH0_6 19
-# define OMAP_INT_DMA_CH1_7 20
-# define OMAP_INT_DMA_CH2_8 21
-# define OMAP_INT_DMA_CH3 22
-# define OMAP_INT_DMA_CH4 23
-# define OMAP_INT_DMA_CH5 24
-# define OMAP_INT_DMA_LCD 25
-# define OMAP_INT_TIMER1 26
-# define OMAP_INT_WD_TIMER 27
-# define OMAP_INT_BRIDGE_PUB 28
-# define OMAP_INT_TIMER2 30
-# define OMAP_INT_LCD_CTRL 31
+#define OMAP_INT_CAMERA 1
+#define OMAP_INT_FIQ 3
+#define OMAP_INT_RTDX 6
+#define OMAP_INT_DSP_MMU_ABORT 7
+#define OMAP_INT_HOST 8
+#define OMAP_INT_ABORT 9
+#define OMAP_INT_BRIDGE_PRIV 13
+#define OMAP_INT_GPIO_BANK1 14
+#define OMAP_INT_UART3 15
+#define OMAP_INT_TIMER3 16
+#define OMAP_INT_DMA_CH0_6 19
+#define OMAP_INT_DMA_CH1_7 20
+#define OMAP_INT_DMA_CH2_8 21
+#define OMAP_INT_DMA_CH3 22
+#define OMAP_INT_DMA_CH4 23
+#define OMAP_INT_DMA_CH5 24
+#define OMAP_INT_DMA_LCD 25
+#define OMAP_INT_TIMER1 26
+#define OMAP_INT_WD_TIMER 27
+#define OMAP_INT_BRIDGE_PUB 28
+#define OMAP_INT_TIMER2 30
+#define OMAP_INT_LCD_CTRL 31
/*
* Common OMAP-15xx IRQ numbers for level 1 interrupt handler
*/
-# define OMAP_INT_15XX_IH2_IRQ 0
-# define OMAP_INT_15XX_LB_MMU 17
-# define OMAP_INT_15XX_LOCAL_BUS 29
+#define OMAP_INT_15XX_IH2_IRQ 0
+#define OMAP_INT_15XX_LB_MMU 17
+#define OMAP_INT_15XX_LOCAL_BUS 29
/*
* OMAP-1510 specific IRQ numbers for level 1 interrupt handler
*/
-# define OMAP_INT_1510_SPI_TX 4
-# define OMAP_INT_1510_SPI_RX 5
-# define OMAP_INT_1510_DSP_MAILBOX1 10
-# define OMAP_INT_1510_DSP_MAILBOX2 11
+#define OMAP_INT_1510_SPI_TX 4
+#define OMAP_INT_1510_SPI_RX 5
+#define OMAP_INT_1510_DSP_MAILBOX1 10
+#define OMAP_INT_1510_DSP_MAILBOX2 11
/*
* OMAP-310 specific IRQ numbers for level 1 interrupt handler
*/
-# define OMAP_INT_310_McBSP2_TX 4
-# define OMAP_INT_310_McBSP2_RX 5
-# define OMAP_INT_310_HSB_MAILBOX1 12
-# define OMAP_INT_310_HSAB_MMU 18
+#define OMAP_INT_310_McBSP2_TX 4
+#define OMAP_INT_310_McBSP2_RX 5
+#define OMAP_INT_310_HSB_MAILBOX1 12
+#define OMAP_INT_310_HSAB_MMU 18
/*
* OMAP-1610 specific IRQ numbers for level 1 interrupt handler
*/
-# define OMAP_INT_1610_IH2_IRQ 0
-# define OMAP_INT_1610_IH2_FIQ 2
-# define OMAP_INT_1610_McBSP2_TX 4
-# define OMAP_INT_1610_McBSP2_RX 5
-# define OMAP_INT_1610_DSP_MAILBOX1 10
-# define OMAP_INT_1610_DSP_MAILBOX2 11
-# define OMAP_INT_1610_LCD_LINE 12
-# define OMAP_INT_1610_GPTIMER1 17
-# define OMAP_INT_1610_GPTIMER2 18
-# define OMAP_INT_1610_SSR_FIFO_0 29
+#define OMAP_INT_1610_IH2_IRQ 0
+#define OMAP_INT_1610_IH2_FIQ 2
+#define OMAP_INT_1610_McBSP2_TX 4
+#define OMAP_INT_1610_McBSP2_RX 5
+#define OMAP_INT_1610_DSP_MAILBOX1 10
+#define OMAP_INT_1610_DSP_MAILBOX2 11
+#define OMAP_INT_1610_LCD_LINE 12
+#define OMAP_INT_1610_GPTIMER1 17
+#define OMAP_INT_1610_GPTIMER2 18
+#define OMAP_INT_1610_SSR_FIFO_0 29
/*
* OMAP-730 specific IRQ numbers for level 1 interrupt handler
*/
-# define OMAP_INT_730_IH2_FIQ 0
-# define OMAP_INT_730_IH2_IRQ 1
-# define OMAP_INT_730_USB_NON_ISO 2
-# define OMAP_INT_730_USB_ISO 3
-# define OMAP_INT_730_ICR 4
-# define OMAP_INT_730_EAC 5
-# define OMAP_INT_730_GPIO_BANK1 6
-# define OMAP_INT_730_GPIO_BANK2 7
-# define OMAP_INT_730_GPIO_BANK3 8
-# define OMAP_INT_730_McBSP2TX 10
-# define OMAP_INT_730_McBSP2RX 11
-# define OMAP_INT_730_McBSP2RX_OVF 12
-# define OMAP_INT_730_LCD_LINE 14
-# define OMAP_INT_730_GSM_PROTECT 15
-# define OMAP_INT_730_TIMER3 16
-# define OMAP_INT_730_GPIO_BANK5 17
-# define OMAP_INT_730_GPIO_BANK6 18
-# define OMAP_INT_730_SPGIO_WR 29
+#define OMAP_INT_730_IH2_FIQ 0
+#define OMAP_INT_730_IH2_IRQ 1
+#define OMAP_INT_730_USB_NON_ISO 2
+#define OMAP_INT_730_USB_ISO 3
+#define OMAP_INT_730_ICR 4
+#define OMAP_INT_730_EAC 5
+#define OMAP_INT_730_GPIO_BANK1 6
+#define OMAP_INT_730_GPIO_BANK2 7
+#define OMAP_INT_730_GPIO_BANK3 8
+#define OMAP_INT_730_McBSP2TX 10
+#define OMAP_INT_730_McBSP2RX 11
+#define OMAP_INT_730_McBSP2RX_OVF 12
+#define OMAP_INT_730_LCD_LINE 14
+#define OMAP_INT_730_GSM_PROTECT 15
+#define OMAP_INT_730_TIMER3 16
+#define OMAP_INT_730_GPIO_BANK5 17
+#define OMAP_INT_730_GPIO_BANK6 18
+#define OMAP_INT_730_SPGIO_WR 29
/*
* Common IRQ numbers for level 2 interrupt handler
*/
-# define OMAP_INT_KEYBOARD 1
-# define OMAP_INT_uWireTX 2
-# define OMAP_INT_uWireRX 3
-# define OMAP_INT_I2C 4
-# define OMAP_INT_MPUIO 5
-# define OMAP_INT_USB_HHC_1 6
-# define OMAP_INT_McBSP3TX 10
-# define OMAP_INT_McBSP3RX 11
-# define OMAP_INT_McBSP1TX 12
-# define OMAP_INT_McBSP1RX 13
-# define OMAP_INT_UART1 14
-# define OMAP_INT_UART2 15
-# define OMAP_INT_USB_W2FC 20
-# define OMAP_INT_1WIRE 21
-# define OMAP_INT_OS_TIMER 22
-# define OMAP_INT_OQN 23
-# define OMAP_INT_GAUGE_32K 24
-# define OMAP_INT_RTC_TIMER 25
-# define OMAP_INT_RTC_ALARM 26
-# define OMAP_INT_DSP_MMU 28
+#define OMAP_INT_KEYBOARD 1
+#define OMAP_INT_uWireTX 2
+#define OMAP_INT_uWireRX 3
+#define OMAP_INT_I2C 4
+#define OMAP_INT_MPUIO 5
+#define OMAP_INT_USB_HHC_1 6
+#define OMAP_INT_McBSP3TX 10
+#define OMAP_INT_McBSP3RX 11
+#define OMAP_INT_McBSP1TX 12
+#define OMAP_INT_McBSP1RX 13
+#define OMAP_INT_UART1 14
+#define OMAP_INT_UART2 15
+#define OMAP_INT_USB_W2FC 20
+#define OMAP_INT_1WIRE 21
+#define OMAP_INT_OS_TIMER 22
+#define OMAP_INT_OQN 23
+#define OMAP_INT_GAUGE_32K 24
+#define OMAP_INT_RTC_TIMER 25
+#define OMAP_INT_RTC_ALARM 26
+#define OMAP_INT_DSP_MMU 28
/*
* OMAP-1510 specific IRQ numbers for level 2 interrupt handler
*/
-# define OMAP_INT_1510_BT_MCSI1TX 16
-# define OMAP_INT_1510_BT_MCSI1RX 17
-# define OMAP_INT_1510_SoSSI_MATCH 19
-# define OMAP_INT_1510_MEM_STICK 27
-# define OMAP_INT_1510_COM_SPI_RO 31
+#define OMAP_INT_1510_BT_MCSI1TX 16
+#define OMAP_INT_1510_BT_MCSI1RX 17
+#define OMAP_INT_1510_SoSSI_MATCH 19
+#define OMAP_INT_1510_MEM_STICK 27
+#define OMAP_INT_1510_COM_SPI_RO 31
/*
* OMAP-310 specific IRQ numbers for level 2 interrupt handler
*/
-# define OMAP_INT_310_FAC 0
-# define OMAP_INT_310_USB_HHC_2 7
-# define OMAP_INT_310_MCSI1_FE 16
-# define OMAP_INT_310_MCSI2_FE 17
-# define OMAP_INT_310_USB_W2FC_ISO 29
-# define OMAP_INT_310_USB_W2FC_NON_ISO 30
-# define OMAP_INT_310_McBSP2RX_OF 31
+#define OMAP_INT_310_FAC 0
+#define OMAP_INT_310_USB_HHC_2 7
+#define OMAP_INT_310_MCSI1_FE 16
+#define OMAP_INT_310_MCSI2_FE 17
+#define OMAP_INT_310_USB_W2FC_ISO 29
+#define OMAP_INT_310_USB_W2FC_NON_ISO 30
+#define OMAP_INT_310_McBSP2RX_OF 31
/*
* OMAP-1610 specific IRQ numbers for level 2 interrupt handler
*/
-# define OMAP_INT_1610_FAC 0
-# define OMAP_INT_1610_USB_HHC_2 7
-# define OMAP_INT_1610_USB_OTG 8
-# define OMAP_INT_1610_SoSSI 9
-# define OMAP_INT_1610_BT_MCSI1TX 16
-# define OMAP_INT_1610_BT_MCSI1RX 17
-# define OMAP_INT_1610_SoSSI_MATCH 19
-# define OMAP_INT_1610_MEM_STICK 27
-# define OMAP_INT_1610_McBSP2RX_OF 31
-# define OMAP_INT_1610_STI 32
-# define OMAP_INT_1610_STI_WAKEUP 33
-# define OMAP_INT_1610_GPTIMER3 34
-# define OMAP_INT_1610_GPTIMER4 35
-# define OMAP_INT_1610_GPTIMER5 36
-# define OMAP_INT_1610_GPTIMER6 37
-# define OMAP_INT_1610_GPTIMER7 38
-# define OMAP_INT_1610_GPTIMER8 39
-# define OMAP_INT_1610_GPIO_BANK2 40
-# define OMAP_INT_1610_GPIO_BANK3 41
-# define OMAP_INT_1610_MMC2 42
-# define OMAP_INT_1610_CF 43
-# define OMAP_INT_1610_WAKE_UP_REQ 46
-# define OMAP_INT_1610_GPIO_BANK4 48
-# define OMAP_INT_1610_SPI 49
-# define OMAP_INT_1610_DMA_CH6 53
-# define OMAP_INT_1610_DMA_CH7 54
-# define OMAP_INT_1610_DMA_CH8 55
-# define OMAP_INT_1610_DMA_CH9 56
-# define OMAP_INT_1610_DMA_CH10 57
-# define OMAP_INT_1610_DMA_CH11 58
-# define OMAP_INT_1610_DMA_CH12 59
-# define OMAP_INT_1610_DMA_CH13 60
-# define OMAP_INT_1610_DMA_CH14 61
-# define OMAP_INT_1610_DMA_CH15 62
-# define OMAP_INT_1610_NAND 63
+#define OMAP_INT_1610_FAC 0
+#define OMAP_INT_1610_USB_HHC_2 7
+#define OMAP_INT_1610_USB_OTG 8
+#define OMAP_INT_1610_SoSSI 9
+#define OMAP_INT_1610_BT_MCSI1TX 16
+#define OMAP_INT_1610_BT_MCSI1RX 17
+#define OMAP_INT_1610_SoSSI_MATCH 19
+#define OMAP_INT_1610_MEM_STICK 27
+#define OMAP_INT_1610_McBSP2RX_OF 31
+#define OMAP_INT_1610_STI 32
+#define OMAP_INT_1610_STI_WAKEUP 33
+#define OMAP_INT_1610_GPTIMER3 34
+#define OMAP_INT_1610_GPTIMER4 35
+#define OMAP_INT_1610_GPTIMER5 36
+#define OMAP_INT_1610_GPTIMER6 37
+#define OMAP_INT_1610_GPTIMER7 38
+#define OMAP_INT_1610_GPTIMER8 39
+#define OMAP_INT_1610_GPIO_BANK2 40
+#define OMAP_INT_1610_GPIO_BANK3 41
+#define OMAP_INT_1610_MMC2 42
+#define OMAP_INT_1610_CF 43
+#define OMAP_INT_1610_WAKE_UP_REQ 46
+#define OMAP_INT_1610_GPIO_BANK4 48
+#define OMAP_INT_1610_SPI 49
+#define OMAP_INT_1610_DMA_CH6 53
+#define OMAP_INT_1610_DMA_CH7 54
+#define OMAP_INT_1610_DMA_CH8 55
+#define OMAP_INT_1610_DMA_CH9 56
+#define OMAP_INT_1610_DMA_CH10 57
+#define OMAP_INT_1610_DMA_CH11 58
+#define OMAP_INT_1610_DMA_CH12 59
+#define OMAP_INT_1610_DMA_CH13 60
+#define OMAP_INT_1610_DMA_CH14 61
+#define OMAP_INT_1610_DMA_CH15 62
+#define OMAP_INT_1610_NAND 63
/*
* OMAP-730 specific IRQ numbers for level 2 interrupt handler
*/
-# define OMAP_INT_730_HW_ERRORS 0
-# define OMAP_INT_730_NFIQ_PWR_FAIL 1
-# define OMAP_INT_730_CFCD 2
-# define OMAP_INT_730_CFIREQ 3
-# define OMAP_INT_730_I2C 4
-# define OMAP_INT_730_PCC 5
-# define OMAP_INT_730_MPU_EXT_NIRQ 6
-# define OMAP_INT_730_SPI_100K_1 7
-# define OMAP_INT_730_SYREN_SPI 8
-# define OMAP_INT_730_VLYNQ 9
-# define OMAP_INT_730_GPIO_BANK4 10
-# define OMAP_INT_730_McBSP1TX 11
-# define OMAP_INT_730_McBSP1RX 12
-# define OMAP_INT_730_McBSP1RX_OF 13
-# define OMAP_INT_730_UART_MODEM_IRDA_2 14
-# define OMAP_INT_730_UART_MODEM_1 15
-# define OMAP_INT_730_MCSI 16
-# define OMAP_INT_730_uWireTX 17
-# define OMAP_INT_730_uWireRX 18
-# define OMAP_INT_730_SMC_CD 19
-# define OMAP_INT_730_SMC_IREQ 20
-# define OMAP_INT_730_HDQ_1WIRE 21
-# define OMAP_INT_730_TIMER32K 22
-# define OMAP_INT_730_MMC_SDIO 23
-# define OMAP_INT_730_UPLD 24
-# define OMAP_INT_730_USB_HHC_1 27
-# define OMAP_INT_730_USB_HHC_2 28
-# define OMAP_INT_730_USB_GENI 29
-# define OMAP_INT_730_USB_OTG 30
-# define OMAP_INT_730_CAMERA_IF 31
-# define OMAP_INT_730_RNG 32
-# define OMAP_INT_730_DUAL_MODE_TIMER 33
-# define OMAP_INT_730_DBB_RF_EN 34
-# define OMAP_INT_730_MPUIO_KEYPAD 35
-# define OMAP_INT_730_SHA1_MD5 36
-# define OMAP_INT_730_SPI_100K_2 37
-# define OMAP_INT_730_RNG_IDLE 38
-# define OMAP_INT_730_MPUIO 39
-# define OMAP_INT_730_LLPC_LCD_CTRL_OFF 40
-# define OMAP_INT_730_LLPC_OE_FALLING 41
-# define OMAP_INT_730_LLPC_OE_RISING 42
-# define OMAP_INT_730_LLPC_VSYNC 43
-# define OMAP_INT_730_WAKE_UP_REQ 46
-# define OMAP_INT_730_DMA_CH6 53
-# define OMAP_INT_730_DMA_CH7 54
-# define OMAP_INT_730_DMA_CH8 55
-# define OMAP_INT_730_DMA_CH9 56
-# define OMAP_INT_730_DMA_CH10 57
-# define OMAP_INT_730_DMA_CH11 58
-# define OMAP_INT_730_DMA_CH12 59
-# define OMAP_INT_730_DMA_CH13 60
-# define OMAP_INT_730_DMA_CH14 61
-# define OMAP_INT_730_DMA_CH15 62
-# define OMAP_INT_730_NAND 63
-
-/*
- * OMAP-24xx common IRQ numbers
- */
-# define OMAP_INT_24XX_STI 4
-# define OMAP_INT_24XX_SYS_NIRQ 7
-# define OMAP_INT_24XX_L3_IRQ 10
-# define OMAP_INT_24XX_PRCM_MPU_IRQ 11
-# define OMAP_INT_24XX_SDMA_IRQ0 12
-# define OMAP_INT_24XX_SDMA_IRQ1 13
-# define OMAP_INT_24XX_SDMA_IRQ2 14
-# define OMAP_INT_24XX_SDMA_IRQ3 15
-# define OMAP_INT_243X_MCBSP2_IRQ 16
-# define OMAP_INT_243X_MCBSP3_IRQ 17
-# define OMAP_INT_243X_MCBSP4_IRQ 18
-# define OMAP_INT_243X_MCBSP5_IRQ 19
-# define OMAP_INT_24XX_GPMC_IRQ 20
-# define OMAP_INT_24XX_GUFFAW_IRQ 21
-# define OMAP_INT_24XX_IVA_IRQ 22
-# define OMAP_INT_24XX_EAC_IRQ 23
-# define OMAP_INT_24XX_CAM_IRQ 24
-# define OMAP_INT_24XX_DSS_IRQ 25
-# define OMAP_INT_24XX_MAIL_U0_MPU 26
-# define OMAP_INT_24XX_DSP_UMA 27
-# define OMAP_INT_24XX_DSP_MMU 28
-# define OMAP_INT_24XX_GPIO_BANK1 29
-# define OMAP_INT_24XX_GPIO_BANK2 30
-# define OMAP_INT_24XX_GPIO_BANK3 31
-# define OMAP_INT_24XX_GPIO_BANK4 32
-# define OMAP_INT_243X_GPIO_BANK5 33
-# define OMAP_INT_24XX_MAIL_U3_MPU 34
-# define OMAP_INT_24XX_WDT3 35
-# define OMAP_INT_24XX_WDT4 36
-# define OMAP_INT_24XX_GPTIMER1 37
-# define OMAP_INT_24XX_GPTIMER2 38
-# define OMAP_INT_24XX_GPTIMER3 39
-# define OMAP_INT_24XX_GPTIMER4 40
-# define OMAP_INT_24XX_GPTIMER5 41
-# define OMAP_INT_24XX_GPTIMER6 42
-# define OMAP_INT_24XX_GPTIMER7 43
-# define OMAP_INT_24XX_GPTIMER8 44
-# define OMAP_INT_24XX_GPTIMER9 45
-# define OMAP_INT_24XX_GPTIMER10 46
-# define OMAP_INT_24XX_GPTIMER11 47
-# define OMAP_INT_24XX_GPTIMER12 48
-# define OMAP_INT_24XX_PKA_IRQ 50
-# define OMAP_INT_24XX_SHA1MD5_IRQ 51
-# define OMAP_INT_24XX_RNG_IRQ 52
-# define OMAP_INT_24XX_MG_IRQ 53
-# define OMAP_INT_24XX_I2C1_IRQ 56
-# define OMAP_INT_24XX_I2C2_IRQ 57
-# define OMAP_INT_24XX_MCBSP1_IRQ_TX 59
-# define OMAP_INT_24XX_MCBSP1_IRQ_RX 60
-# define OMAP_INT_24XX_MCBSP2_IRQ_TX 62
-# define OMAP_INT_24XX_MCBSP2_IRQ_RX 63
-# define OMAP_INT_243X_MCBSP1_IRQ 64
-# define OMAP_INT_24XX_MCSPI1_IRQ 65
-# define OMAP_INT_24XX_MCSPI2_IRQ 66
-# define OMAP_INT_24XX_SSI1_IRQ0 67
-# define OMAP_INT_24XX_SSI1_IRQ1 68
-# define OMAP_INT_24XX_SSI2_IRQ0 69
-# define OMAP_INT_24XX_SSI2_IRQ1 70
-# define OMAP_INT_24XX_SSI_GDD_IRQ 71
-# define OMAP_INT_24XX_UART1_IRQ 72
-# define OMAP_INT_24XX_UART2_IRQ 73
-# define OMAP_INT_24XX_UART3_IRQ 74
-# define OMAP_INT_24XX_USB_IRQ_GEN 75
-# define OMAP_INT_24XX_USB_IRQ_NISO 76
-# define OMAP_INT_24XX_USB_IRQ_ISO 77
-# define OMAP_INT_24XX_USB_IRQ_HGEN 78
-# define OMAP_INT_24XX_USB_IRQ_HSOF 79
-# define OMAP_INT_24XX_USB_IRQ_OTG 80
-# define OMAP_INT_24XX_VLYNQ_IRQ 81
-# define OMAP_INT_24XX_MMC_IRQ 83
-# define OMAP_INT_24XX_MS_IRQ 84
-# define OMAP_INT_24XX_FAC_IRQ 85
-# define OMAP_INT_24XX_MCSPI3_IRQ 91
-# define OMAP_INT_243X_HS_USB_MC 92
-# define OMAP_INT_243X_HS_USB_DMA 93
-# define OMAP_INT_243X_CARKIT 94
-# define OMAP_INT_34XX_GPTIMER12 95
+#define OMAP_INT_730_HW_ERRORS 0
+#define OMAP_INT_730_NFIQ_PWR_FAIL 1
+#define OMAP_INT_730_CFCD 2
+#define OMAP_INT_730_CFIREQ 3
+#define OMAP_INT_730_I2C 4
+#define OMAP_INT_730_PCC 5
+#define OMAP_INT_730_MPU_EXT_NIRQ 6
+#define OMAP_INT_730_SPI_100K_1 7
+#define OMAP_INT_730_SYREN_SPI 8
+#define OMAP_INT_730_VLYNQ 9
+#define OMAP_INT_730_GPIO_BANK4 10
+#define OMAP_INT_730_McBSP1TX 11
+#define OMAP_INT_730_McBSP1RX 12
+#define OMAP_INT_730_McBSP1RX_OF 13
+#define OMAP_INT_730_UART_MODEM_IRDA_2 14
+#define OMAP_INT_730_UART_MODEM_1 15
+#define OMAP_INT_730_MCSI 16
+#define OMAP_INT_730_uWireTX 17
+#define OMAP_INT_730_uWireRX 18
+#define OMAP_INT_730_SMC_CD 19
+#define OMAP_INT_730_SMC_IREQ 20
+#define OMAP_INT_730_HDQ_1WIRE 21
+#define OMAP_INT_730_TIMER32K 22
+#define OMAP_INT_730_MMC_SDIO 23
+#define OMAP_INT_730_UPLD 24
+#define OMAP_INT_730_USB_HHC_1 27
+#define OMAP_INT_730_USB_HHC_2 28
+#define OMAP_INT_730_USB_GENI 29
+#define OMAP_INT_730_USB_OTG 30
+#define OMAP_INT_730_CAMERA_IF 31
+#define OMAP_INT_730_RNG 32
+#define OMAP_INT_730_DUAL_MODE_TIMER 33
+#define OMAP_INT_730_DBB_RF_EN 34
+#define OMAP_INT_730_MPUIO_KEYPAD 35
+#define OMAP_INT_730_SHA1_MD5 36
+#define OMAP_INT_730_SPI_100K_2 37
+#define OMAP_INT_730_RNG_IDLE 38
+#define OMAP_INT_730_MPUIO 39
+#define OMAP_INT_730_LLPC_LCD_CTRL_OFF 40
+#define OMAP_INT_730_LLPC_OE_FALLING 41
+#define OMAP_INT_730_LLPC_OE_RISING 42
+#define OMAP_INT_730_LLPC_VSYNC 43
+#define OMAP_INT_730_WAKE_UP_REQ 46
+#define OMAP_INT_730_DMA_CH6 53
+#define OMAP_INT_730_DMA_CH7 54
+#define OMAP_INT_730_DMA_CH8 55
+#define OMAP_INT_730_DMA_CH9 56
+#define OMAP_INT_730_DMA_CH10 57
+#define OMAP_INT_730_DMA_CH11 58
+#define OMAP_INT_730_DMA_CH12 59
+#define OMAP_INT_730_DMA_CH13 60
+#define OMAP_INT_730_DMA_CH14 61
+#define OMAP_INT_730_DMA_CH15 62
+#define OMAP_INT_730_NAND 63
/* omap_dma.c */
enum omap_dma_model {
omap_dma_3_0,
omap_dma_3_1,
omap_dma_3_2,
- omap_dma_4,
};
struct soc_dma_s;
@@ -507,9 +353,9 @@ struct dma_irq_map {
enum omap_dma_port {
emiff = 0,
emifs,
- imif, /* omap16xx: ocp_t1 */
+ imif, /* omap16xx: ocp_t1 */
tipb,
- local, /* omap16xx: ocp_t2 */
+ local, /* omap16xx: ocp_t2 */
tipb_mpui,
__omap_dma_port_last,
};
@@ -572,157 +418,71 @@ struct omap_dma_lcd_channel_s {
* DMA request numbers for OMAP1
* See /usr/include/asm-arm/arch-omap/dma.h in Linux.
*/
-# define OMAP_DMA_NO_DEVICE 0
-# define OMAP_DMA_MCSI1_TX 1
-# define OMAP_DMA_MCSI1_RX 2
-# define OMAP_DMA_I2C_RX 3
-# define OMAP_DMA_I2C_TX 4
-# define OMAP_DMA_EXT_NDMA_REQ0 5
-# define OMAP_DMA_EXT_NDMA_REQ1 6
-# define OMAP_DMA_UWIRE_TX 7
-# define OMAP_DMA_MCBSP1_TX 8
-# define OMAP_DMA_MCBSP1_RX 9
-# define OMAP_DMA_MCBSP3_TX 10
-# define OMAP_DMA_MCBSP3_RX 11
-# define OMAP_DMA_UART1_TX 12
-# define OMAP_DMA_UART1_RX 13
-# define OMAP_DMA_UART2_TX 14
-# define OMAP_DMA_UART2_RX 15
-# define OMAP_DMA_MCBSP2_TX 16
-# define OMAP_DMA_MCBSP2_RX 17
-# define OMAP_DMA_UART3_TX 18
-# define OMAP_DMA_UART3_RX 19
-# define OMAP_DMA_CAMERA_IF_RX 20
-# define OMAP_DMA_MMC_TX 21
-# define OMAP_DMA_MMC_RX 22
-# define OMAP_DMA_NAND 23 /* Not in OMAP310 */
-# define OMAP_DMA_IRQ_LCD_LINE 24 /* Not in OMAP310 */
-# define OMAP_DMA_MEMORY_STICK 25 /* Not in OMAP310 */
-# define OMAP_DMA_USB_W2FC_RX0 26
-# define OMAP_DMA_USB_W2FC_RX1 27
-# define OMAP_DMA_USB_W2FC_RX2 28
-# define OMAP_DMA_USB_W2FC_TX0 29
-# define OMAP_DMA_USB_W2FC_TX1 30
-# define OMAP_DMA_USB_W2FC_TX2 31
+#define OMAP_DMA_NO_DEVICE 0
+#define OMAP_DMA_MCSI1_TX 1
+#define OMAP_DMA_MCSI1_RX 2
+#define OMAP_DMA_I2C_RX 3
+#define OMAP_DMA_I2C_TX 4
+#define OMAP_DMA_EXT_NDMA_REQ0 5
+#define OMAP_DMA_EXT_NDMA_REQ1 6
+#define OMAP_DMA_UWIRE_TX 7
+#define OMAP_DMA_MCBSP1_TX 8
+#define OMAP_DMA_MCBSP1_RX 9
+#define OMAP_DMA_MCBSP3_TX 10
+#define OMAP_DMA_MCBSP3_RX 11
+#define OMAP_DMA_UART1_TX 12
+#define OMAP_DMA_UART1_RX 13
+#define OMAP_DMA_UART2_TX 14
+#define OMAP_DMA_UART2_RX 15
+#define OMAP_DMA_MCBSP2_TX 16
+#define OMAP_DMA_MCBSP2_RX 17
+#define OMAP_DMA_UART3_TX 18
+#define OMAP_DMA_UART3_RX 19
+#define OMAP_DMA_CAMERA_IF_RX 20
+#define OMAP_DMA_MMC_TX 21
+#define OMAP_DMA_MMC_RX 22
+#define OMAP_DMA_NAND 23 /* Not in OMAP310 */
+#define OMAP_DMA_IRQ_LCD_LINE 24 /* Not in OMAP310 */
+#define OMAP_DMA_MEMORY_STICK 25 /* Not in OMAP310 */
+#define OMAP_DMA_USB_W2FC_RX0 26
+#define OMAP_DMA_USB_W2FC_RX1 27
+#define OMAP_DMA_USB_W2FC_RX2 28
+#define OMAP_DMA_USB_W2FC_TX0 29
+#define OMAP_DMA_USB_W2FC_TX1 30
+#define OMAP_DMA_USB_W2FC_TX2 31
/* These are only for 1610 */
-# define OMAP_DMA_CRYPTO_DES_IN 32
-# define OMAP_DMA_SPI_TX 33
-# define OMAP_DMA_SPI_RX 34
-# define OMAP_DMA_CRYPTO_HASH 35
-# define OMAP_DMA_CCP_ATTN 36
-# define OMAP_DMA_CCP_FIFO_NOT_EMPTY 37
-# define OMAP_DMA_CMT_APE_TX_CHAN_0 38
-# define OMAP_DMA_CMT_APE_RV_CHAN_0 39
-# define OMAP_DMA_CMT_APE_TX_CHAN_1 40
-# define OMAP_DMA_CMT_APE_RV_CHAN_1 41
-# define OMAP_DMA_CMT_APE_TX_CHAN_2 42
-# define OMAP_DMA_CMT_APE_RV_CHAN_2 43
-# define OMAP_DMA_CMT_APE_TX_CHAN_3 44
-# define OMAP_DMA_CMT_APE_RV_CHAN_3 45
-# define OMAP_DMA_CMT_APE_TX_CHAN_4 46
-# define OMAP_DMA_CMT_APE_RV_CHAN_4 47
-# define OMAP_DMA_CMT_APE_TX_CHAN_5 48
-# define OMAP_DMA_CMT_APE_RV_CHAN_5 49
-# define OMAP_DMA_CMT_APE_TX_CHAN_6 50
-# define OMAP_DMA_CMT_APE_RV_CHAN_6 51
-# define OMAP_DMA_CMT_APE_TX_CHAN_7 52
-# define OMAP_DMA_CMT_APE_RV_CHAN_7 53
-# define OMAP_DMA_MMC2_TX 54
-# define OMAP_DMA_MMC2_RX 55
-# define OMAP_DMA_CRYPTO_DES_OUT 56
-
-/*
- * DMA request numbers for the OMAP2
- */
-# define OMAP24XX_DMA_NO_DEVICE 0
-# define OMAP24XX_DMA_XTI_DMA 1 /* Not in OMAP2420 */
-# define OMAP24XX_DMA_EXT_DMAREQ0 2
-# define OMAP24XX_DMA_EXT_DMAREQ1 3
-# define OMAP24XX_DMA_GPMC 4
-# define OMAP24XX_DMA_GFX 5 /* Not in OMAP2420 */
-# define OMAP24XX_DMA_DSS 6
-# define OMAP24XX_DMA_VLYNQ_TX 7 /* Not in OMAP2420 */
-# define OMAP24XX_DMA_CWT 8 /* Not in OMAP2420 */
-# define OMAP24XX_DMA_AES_TX 9 /* Not in OMAP2420 */
-# define OMAP24XX_DMA_AES_RX 10 /* Not in OMAP2420 */
-# define OMAP24XX_DMA_DES_TX 11 /* Not in OMAP2420 */
-# define OMAP24XX_DMA_DES_RX 12 /* Not in OMAP2420 */
-# define OMAP24XX_DMA_SHA1MD5_RX 13 /* Not in OMAP2420 */
-# define OMAP24XX_DMA_EXT_DMAREQ2 14
-# define OMAP24XX_DMA_EXT_DMAREQ3 15
-# define OMAP24XX_DMA_EXT_DMAREQ4 16
-# define OMAP24XX_DMA_EAC_AC_RD 17
-# define OMAP24XX_DMA_EAC_AC_WR 18
-# define OMAP24XX_DMA_EAC_MD_UL_RD 19
-# define OMAP24XX_DMA_EAC_MD_UL_WR 20
-# define OMAP24XX_DMA_EAC_MD_DL_RD 21
-# define OMAP24XX_DMA_EAC_MD_DL_WR 22
-# define OMAP24XX_DMA_EAC_BT_UL_RD 23
-# define OMAP24XX_DMA_EAC_BT_UL_WR 24
-# define OMAP24XX_DMA_EAC_BT_DL_RD 25
-# define OMAP24XX_DMA_EAC_BT_DL_WR 26
-# define OMAP24XX_DMA_I2C1_TX 27
-# define OMAP24XX_DMA_I2C1_RX 28
-# define OMAP24XX_DMA_I2C2_TX 29
-# define OMAP24XX_DMA_I2C2_RX 30
-# define OMAP24XX_DMA_MCBSP1_TX 31
-# define OMAP24XX_DMA_MCBSP1_RX 32
-# define OMAP24XX_DMA_MCBSP2_TX 33
-# define OMAP24XX_DMA_MCBSP2_RX 34
-# define OMAP24XX_DMA_SPI1_TX0 35
-# define OMAP24XX_DMA_SPI1_RX0 36
-# define OMAP24XX_DMA_SPI1_TX1 37
-# define OMAP24XX_DMA_SPI1_RX1 38
-# define OMAP24XX_DMA_SPI1_TX2 39
-# define OMAP24XX_DMA_SPI1_RX2 40
-# define OMAP24XX_DMA_SPI1_TX3 41
-# define OMAP24XX_DMA_SPI1_RX3 42
-# define OMAP24XX_DMA_SPI2_TX0 43
-# define OMAP24XX_DMA_SPI2_RX0 44
-# define OMAP24XX_DMA_SPI2_TX1 45
-# define OMAP24XX_DMA_SPI2_RX1 46
-
-# define OMAP24XX_DMA_UART1_TX 49
-# define OMAP24XX_DMA_UART1_RX 50
-# define OMAP24XX_DMA_UART2_TX 51
-# define OMAP24XX_DMA_UART2_RX 52
-# define OMAP24XX_DMA_UART3_TX 53
-# define OMAP24XX_DMA_UART3_RX 54
-# define OMAP24XX_DMA_USB_W2FC_TX0 55
-# define OMAP24XX_DMA_USB_W2FC_RX0 56
-# define OMAP24XX_DMA_USB_W2FC_TX1 57
-# define OMAP24XX_DMA_USB_W2FC_RX1 58
-# define OMAP24XX_DMA_USB_W2FC_TX2 59
-# define OMAP24XX_DMA_USB_W2FC_RX2 60
-# define OMAP24XX_DMA_MMC1_TX 61
-# define OMAP24XX_DMA_MMC1_RX 62
-# define OMAP24XX_DMA_MS 63 /* Not in OMAP2420 */
-# define OMAP24XX_DMA_EXT_DMAREQ5 64
-
-/* omap[123].c */
-/* OMAP2 gp timer */
-struct omap_gp_timer_s;
-struct omap_gp_timer_s *omap_gp_timer_init(struct omap_target_agent_s *ta,
- qemu_irq irq, omap_clk fclk, omap_clk iclk);
-void omap_gp_timer_reset(struct omap_gp_timer_s *s);
-
-/* OMAP2 sysctimer */
-struct omap_synctimer_s;
-struct omap_synctimer_s *omap_synctimer_init(struct omap_target_agent_s *ta,
- struct omap_mpu_state_s *mpu, omap_clk fclk, omap_clk iclk);
-void omap_synctimer_reset(struct omap_synctimer_s *s);
+#define OMAP_DMA_CRYPTO_DES_IN 32
+#define OMAP_DMA_SPI_TX 33
+#define OMAP_DMA_SPI_RX 34
+#define OMAP_DMA_CRYPTO_HASH 35
+#define OMAP_DMA_CCP_ATTN 36
+#define OMAP_DMA_CCP_FIFO_NOT_EMPTY 37
+#define OMAP_DMA_CMT_APE_TX_CHAN_0 38
+#define OMAP_DMA_CMT_APE_RV_CHAN_0 39
+#define OMAP_DMA_CMT_APE_TX_CHAN_1 40
+#define OMAP_DMA_CMT_APE_RV_CHAN_1 41
+#define OMAP_DMA_CMT_APE_TX_CHAN_2 42
+#define OMAP_DMA_CMT_APE_RV_CHAN_2 43
+#define OMAP_DMA_CMT_APE_TX_CHAN_3 44
+#define OMAP_DMA_CMT_APE_RV_CHAN_3 45
+#define OMAP_DMA_CMT_APE_TX_CHAN_4 46
+#define OMAP_DMA_CMT_APE_RV_CHAN_4 47
+#define OMAP_DMA_CMT_APE_TX_CHAN_5 48
+#define OMAP_DMA_CMT_APE_RV_CHAN_5 49
+#define OMAP_DMA_CMT_APE_TX_CHAN_6 50
+#define OMAP_DMA_CMT_APE_RV_CHAN_6 51
+#define OMAP_DMA_CMT_APE_TX_CHAN_7 52
+#define OMAP_DMA_CMT_APE_RV_CHAN_7 53
+#define OMAP_DMA_MMC2_TX 54
+#define OMAP_DMA_MMC2_RX 55
+#define OMAP_DMA_CRYPTO_DES_OUT 56
struct omap_uart_s;
struct omap_uart_s *omap_uart_init(hwaddr base,
qemu_irq irq, omap_clk fclk, omap_clk iclk,
qemu_irq txdma, qemu_irq rxdma,
const char *label, Chardev *chr);
-struct omap_uart_s *omap2_uart_init(MemoryRegion *sysmem,
- struct omap_target_agent_s *ta,
- qemu_irq irq, omap_clk fclk, omap_clk iclk,
- qemu_irq txdma, qemu_irq rxdma,
- const char *label, Chardev *chr);
void omap_uart_reset(struct omap_uart_s *s);
struct omap_mpuio_s;
@@ -731,17 +491,6 @@ void omap_mpuio_out_set(struct omap_mpuio_s *s, int line, qemu_irq handler);
void omap_mpuio_key(struct omap_mpuio_s *s, int row, int col, int down);
struct omap_uwire_s;
-void omap_uwire_attach(struct omap_uwire_s *s,
- uWireSlave *slave, int chipselect);
-
-/* OMAP2 spi */
-struct omap_mcspi_s;
-struct omap_mcspi_s *omap_mcspi_init(struct omap_target_agent_s *ta, int chnum,
- qemu_irq irq, qemu_irq *drq, omap_clk fclk, omap_clk iclk);
-void omap_mcspi_attach(struct omap_mcspi_s *s,
- uint32_t (*txrx)(void *opaque, uint32_t, int), void *opaque,
- int chipselect);
-void omap_mcspi_reset(struct omap_mcspi_s *s);
struct I2SCodec {
void *opaque;
@@ -770,9 +519,6 @@ struct I2SCodec {
struct omap_mcbsp_s;
void omap_mcbsp_i2s_attach(struct omap_mcbsp_s *s, I2SCodec *slave);
-void omap_tap_init(struct omap_target_agent_s *ta,
- struct omap_mpu_state_s *mpu);
-
/* omap_lcdc.c */
struct omap_lcd_panel_s;
void omap_lcdc_reset(struct omap_lcd_panel_s *s);
@@ -782,61 +528,29 @@ struct omap_lcd_panel_s *omap_lcdc_init(MemoryRegion *sysmem,
struct omap_dma_lcd_channel_s *dma,
omap_clk clk);
-/* omap_dss.c */
-struct rfbi_chip_s {
- void *opaque;
- void (*write)(void *opaque, int dc, uint16_t value);
- void (*block)(void *opaque, int dc, void *buf, size_t len, int pitch);
- uint16_t (*read)(void *opaque, int dc);
-};
-struct omap_dss_s;
-void omap_dss_reset(struct omap_dss_s *s);
-struct omap_dss_s *omap_dss_init(struct omap_target_agent_s *ta,
- MemoryRegion *sysmem,
- hwaddr l3_base,
- qemu_irq irq, qemu_irq drq,
- omap_clk fck1, omap_clk fck2, omap_clk ck54m,
- omap_clk ick1, omap_clk ick2);
-void omap_rfbi_attach(struct omap_dss_s *s, int cs, struct rfbi_chip_s *chip);
-
/* omap_mmc.c */
-struct omap_mmc_s;
-struct omap_mmc_s *omap_mmc_init(hwaddr base,
- MemoryRegion *sysmem,
- BlockBackend *blk,
- qemu_irq irq, qemu_irq dma[], omap_clk clk);
-struct omap_mmc_s *omap2_mmc_init(struct omap_target_agent_s *ta,
- BlockBackend *blk, qemu_irq irq, qemu_irq dma[],
- omap_clk fclk, omap_clk iclk);
-void omap_mmc_reset(struct omap_mmc_s *s);
-void omap_mmc_handlers(struct omap_mmc_s *s, qemu_irq ro, qemu_irq cover);
-void omap_mmc_enable(struct omap_mmc_s *s, int enable);
+#define TYPE_OMAP_MMC "omap-mmc"
+OBJECT_DECLARE_SIMPLE_TYPE(OMAPMMCState, OMAP_MMC)
+
+DeviceState *omap_mmc_init(hwaddr base,
+ MemoryRegion *sysmem,
+ qemu_irq irq, qemu_irq dma[], omap_clk clk);
+/* TODO: clock framework (see above) */
+void omap_mmc_set_clk(DeviceState *dev, omap_clk clk);
+
/* omap_i2c.c */
I2CBus *omap_i2c_bus(DeviceState *omap_i2c);
-# define cpu_is_omap310(cpu) (cpu->mpu_model == omap310)
-# define cpu_is_omap1510(cpu) (cpu->mpu_model == omap1510)
-# define cpu_is_omap1610(cpu) (cpu->mpu_model == omap1610)
-# define cpu_is_omap1710(cpu) (cpu->mpu_model == omap1710)
-# define cpu_is_omap2410(cpu) (cpu->mpu_model == omap2410)
-# define cpu_is_omap2420(cpu) (cpu->mpu_model == omap2420)
-# define cpu_is_omap2430(cpu) (cpu->mpu_model == omap2430)
-# define cpu_is_omap3430(cpu) (cpu->mpu_model == omap3430)
-# define cpu_is_omap3630(cpu) (cpu->mpu_model == omap3630)
-
-# define cpu_is_omap15xx(cpu) \
+#define cpu_is_omap310(cpu) (cpu->mpu_model == omap310)
+#define cpu_is_omap1510(cpu) (cpu->mpu_model == omap1510)
+#define cpu_is_omap1610(cpu) (cpu->mpu_model == omap1610)
+#define cpu_is_omap1710(cpu) (cpu->mpu_model == omap1710)
+
+#define cpu_is_omap15xx(cpu) \
(cpu_is_omap310(cpu) || cpu_is_omap1510(cpu))
-# define cpu_is_omap16xx(cpu) \
+#define cpu_is_omap16xx(cpu) \
(cpu_is_omap1610(cpu) || cpu_is_omap1710(cpu))
-# define cpu_is_omap24xx(cpu) \
- (cpu_is_omap2410(cpu) || cpu_is_omap2420(cpu) || cpu_is_omap2430(cpu))
-
-# define cpu_class_omap1(cpu) \
- (cpu_is_omap15xx(cpu) || cpu_is_omap16xx(cpu))
-# define cpu_class_omap2(cpu) cpu_is_omap24xx(cpu)
-# define cpu_class_omap3(cpu) \
- (cpu_is_omap3430(cpu) || cpu_is_omap3630(cpu))
struct omap_mpu_state_s {
enum omap_mpu_model {
@@ -844,13 +558,6 @@ struct omap_mpu_state_s {
omap1510,
omap1610,
omap1710,
- omap2410,
- omap2420,
- omap2422,
- omap2423,
- omap2430,
- omap3430,
- omap3630,
} mpu_model;
ARMCPU *cpu;
@@ -897,7 +604,7 @@ struct omap_mpu_state_s {
/* MPU public TIPB peripherals */
struct omap_32khz_timer_s *os_timer;
- struct omap_mmc_s *mmc;
+ DeviceState *mmc;
struct omap_mpuio_s *mpuio;
@@ -960,33 +667,12 @@ struct omap_mpu_state_s {
uint16_t dsp_idlect2;
uint16_t dsp_rstct2;
} clkm;
-
- /* OMAP2-only peripherals */
- struct omap_l4_s *l4;
-
- struct omap_gp_timer_s *gptimer[12];
- struct omap_synctimer_s *synctimer;
-
- struct omap_prcm_s *prcm;
- struct omap_sdrc_s *sdrc;
- struct omap_gpmc_s *gpmc;
- struct omap_sysctl_s *sysc;
-
- struct omap_mcspi_s *mcspi[2];
-
- struct omap_dss_s *dss;
-
- struct omap_eac_s *eac;
};
/* omap1.c */
struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *sdram,
const char *core);
-/* omap2.c */
-struct omap_mpu_state_s *omap2420_mpu_init(MemoryRegion *sdram,
- const char *core);
-
uint32_t omap_badwidth_read8(void *opaque, hwaddr addr);
void omap_badwidth_write8(void *opaque, hwaddr addr,
uint32_t value);
@@ -999,43 +685,14 @@ void omap_badwidth_write32(void *opaque, hwaddr addr,
void omap_mpu_wakeup(void *opaque, int irq, int req);
-# define OMAP_BAD_REG(paddr) \
+#define OMAP_BAD_REG(paddr) \
qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad register %#08"HWADDR_PRIx"\n", \
__func__, paddr)
-# define OMAP_RO_REG(paddr) \
+#define OMAP_RO_REG(paddr) \
qemu_log_mask(LOG_GUEST_ERROR, "%s: Read-only register %#08" \
HWADDR_PRIx "\n", \
__func__, paddr)
-/* OMAP-specific Linux bootloader tags for the ATAG_BOARD area
- * (Board-specific tags are not here)
- */
-#define OMAP_TAG_CLOCK 0x4f01
-#define OMAP_TAG_MMC 0x4f02
-#define OMAP_TAG_SERIAL_CONSOLE 0x4f03
-#define OMAP_TAG_USB 0x4f04
-#define OMAP_TAG_LCD 0x4f05
-#define OMAP_TAG_GPIO_SWITCH 0x4f06
-#define OMAP_TAG_UART 0x4f07
-#define OMAP_TAG_FBMEM 0x4f08
-#define OMAP_TAG_STI_CONSOLE 0x4f09
-#define OMAP_TAG_CAMERA_SENSOR 0x4f0a
-#define OMAP_TAG_PARTITION 0x4f0b
-#define OMAP_TAG_TEA5761 0x4f10
-#define OMAP_TAG_TMP105 0x4f11
-#define OMAP_TAG_BOOT_REASON 0x4f80
-#define OMAP_TAG_FLASH_PART_STR 0x4f81
-#define OMAP_TAG_VERSION_STR 0x4f82
-
-enum {
- OMAP_GPIOSW_TYPE_COVER = 0 << 4,
- OMAP_GPIOSW_TYPE_CONNECTION = 1 << 4,
- OMAP_GPIOSW_TYPE_ACTIVITY = 2 << 4,
-};
-
-#define OMAP_GPIOSW_INVERTED 0x0001
-#define OMAP_GPIOSW_OUTPUT 0x0002
-
-# define OMAP_MPUI_REG_MASK 0x000007ff
+#define OMAP_MPUI_REG_MASK 0x000007ff
#endif
diff --git a/include/hw/arm/pxa.h b/include/hw/arm/pxa.h
deleted file mode 100644
index 4c6caee..0000000
--- a/include/hw/arm/pxa.h
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Intel XScale PXA255/270 processor support.
- *
- * Copyright (c) 2006 Openedhand Ltd.
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * This code is licensed under the GNU GPL v2.
- */
-
-#ifndef PXA_H
-#define PXA_H
-
-#include "exec/memory.h"
-#include "target/arm/cpu-qom.h"
-#include "hw/pcmcia.h"
-#include "qom/object.h"
-
-/* Interrupt numbers */
-# define PXA2XX_PIC_SSP3 0
-# define PXA2XX_PIC_USBH2 2
-# define PXA2XX_PIC_USBH1 3
-# define PXA2XX_PIC_KEYPAD 4
-# define PXA2XX_PIC_PWRI2C 6
-# define PXA25X_PIC_HWUART 7
-# define PXA27X_PIC_OST_4_11 7
-# define PXA2XX_PIC_GPIO_0 8
-# define PXA2XX_PIC_GPIO_1 9
-# define PXA2XX_PIC_GPIO_X 10
-# define PXA2XX_PIC_I2S 13
-# define PXA26X_PIC_ASSP 15
-# define PXA25X_PIC_NSSP 16
-# define PXA27X_PIC_SSP2 16
-# define PXA2XX_PIC_LCD 17
-# define PXA2XX_PIC_I2C 18
-# define PXA2XX_PIC_ICP 19
-# define PXA2XX_PIC_STUART 20
-# define PXA2XX_PIC_BTUART 21
-# define PXA2XX_PIC_FFUART 22
-# define PXA2XX_PIC_MMC 23
-# define PXA2XX_PIC_SSP 24
-# define PXA2XX_PIC_DMA 25
-# define PXA2XX_PIC_OST_0 26
-# define PXA2XX_PIC_RTC1HZ 30
-# define PXA2XX_PIC_RTCALARM 31
-
-/* DMA requests */
-# define PXA2XX_RX_RQ_I2S 2
-# define PXA2XX_TX_RQ_I2S 3
-# define PXA2XX_RX_RQ_BTUART 4
-# define PXA2XX_TX_RQ_BTUART 5
-# define PXA2XX_RX_RQ_FFUART 6
-# define PXA2XX_TX_RQ_FFUART 7
-# define PXA2XX_RX_RQ_SSP1 13
-# define PXA2XX_TX_RQ_SSP1 14
-# define PXA2XX_RX_RQ_SSP2 15
-# define PXA2XX_TX_RQ_SSP2 16
-# define PXA2XX_RX_RQ_ICP 17
-# define PXA2XX_TX_RQ_ICP 18
-# define PXA2XX_RX_RQ_STUART 19
-# define PXA2XX_TX_RQ_STUART 20
-# define PXA2XX_RX_RQ_MMCI 21
-# define PXA2XX_TX_RQ_MMCI 22
-# define PXA2XX_USB_RQ(x) ((x) + 24)
-# define PXA2XX_RX_RQ_SSP3 66
-# define PXA2XX_TX_RQ_SSP3 67
-
-# define PXA2XX_SDRAM_BASE 0xa0000000
-# define PXA2XX_INTERNAL_BASE 0x5c000000
-# define PXA2XX_INTERNAL_SIZE 0x40000
-
-/* pxa2xx_pic.c */
-DeviceState *pxa2xx_pic_init(hwaddr base, ARMCPU *cpu);
-
-/* pxa2xx_gpio.c */
-DeviceState *pxa2xx_gpio_init(hwaddr base,
- ARMCPU *cpu, DeviceState *pic, int lines);
-void pxa2xx_gpio_read_notifier(DeviceState *dev, qemu_irq handler);
-
-/* pxa2xx_dma.c */
-DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq);
-DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq);
-
-/* pxa2xx_lcd.c */
-typedef struct PXA2xxLCDState PXA2xxLCDState;
-PXA2xxLCDState *pxa2xx_lcdc_init(MemoryRegion *sysmem,
- hwaddr base, qemu_irq irq);
-void pxa2xx_lcd_vsync_notifier(PXA2xxLCDState *s, qemu_irq handler);
-
-/* pxa2xx_mmci.c */
-#define TYPE_PXA2XX_MMCI "pxa2xx-mmci"
-OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxMMCIState, PXA2XX_MMCI)
-
-PXA2xxMMCIState *pxa2xx_mmci_init(MemoryRegion *sysmem,
- hwaddr base,
- qemu_irq irq, qemu_irq rx_dma, qemu_irq tx_dma);
-void pxa2xx_mmci_handlers(PXA2xxMMCIState *s, qemu_irq readonly,
- qemu_irq coverswitch);
-
-/* pxa2xx_pcmcia.c */
-#define TYPE_PXA2XX_PCMCIA "pxa2xx-pcmcia"
-OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxPCMCIAState, PXA2XX_PCMCIA)
-
-int pxa2xx_pcmcia_attach(void *opaque, PCMCIACardState *card);
-int pxa2xx_pcmcia_detach(void *opaque);
-void pxa2xx_pcmcia_set_irq_cb(void *opaque, qemu_irq irq, qemu_irq cd_irq);
-
-/* pxa2xx_keypad.c */
-struct keymap {
- int8_t column;
- int8_t row;
-};
-typedef struct PXA2xxKeyPadState PXA2xxKeyPadState;
-PXA2xxKeyPadState *pxa27x_keypad_init(MemoryRegion *sysmem,
- hwaddr base,
- qemu_irq irq);
-void pxa27x_register_keypad(PXA2xxKeyPadState *kp,
- const struct keymap *map, int size);
-
-/* pxa2xx.c */
-#define TYPE_PXA2XX_I2C "pxa2xx_i2c"
-OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxI2CState, PXA2XX_I2C)
-
-PXA2xxI2CState *pxa2xx_i2c_init(hwaddr base,
- qemu_irq irq, uint32_t page_size);
-I2CBus *pxa2xx_i2c_bus(PXA2xxI2CState *s);
-
-typedef struct PXA2xxI2SState PXA2xxI2SState;
-
-#define TYPE_PXA2XX_FIR "pxa2xx-fir"
-OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxFIrState, PXA2XX_FIR)
-
-typedef struct {
- ARMCPU *cpu;
- DeviceState *pic;
- qemu_irq reset;
- MemoryRegion sdram;
- MemoryRegion internal;
- MemoryRegion cm_iomem;
- MemoryRegion mm_iomem;
- MemoryRegion pm_iomem;
- DeviceState *dma;
- DeviceState *gpio;
- PXA2xxLCDState *lcd;
- SSIBus **ssp;
- PXA2xxI2CState *i2c[2];
- PXA2xxMMCIState *mmc;
- PXA2xxPCMCIAState *pcmcia[2];
- PXA2xxI2SState *i2s;
- PXA2xxFIrState *fir;
- PXA2xxKeyPadState *kp;
-
- /* Power management */
- hwaddr pm_base;
- uint32_t pm_regs[0x40];
-
- /* Clock management */
- hwaddr cm_base;
- uint32_t cm_regs[4];
- uint32_t clkcfg;
-
- /* Memory management */
- hwaddr mm_base;
- uint32_t mm_regs[0x1a];
-
- /* Performance monitoring */
- uint32_t pmnc;
-} PXA2xxState;
-
-struct PXA2xxI2SState {
- MemoryRegion iomem;
- qemu_irq irq;
- qemu_irq rx_dma;
- qemu_irq tx_dma;
- void (*data_req)(void *, int, int);
-
- uint32_t control[2];
- uint32_t status;
- uint32_t mask;
- uint32_t clk;
-
- int enable;
- int rx_len;
- int tx_len;
- void (*codec_out)(void *, uint32_t);
- uint32_t (*codec_in)(void *);
- void *opaque;
-
- int fifo_len;
- uint32_t fifo[16];
-};
-
-# define PA_FMT "0x%08lx"
-
-PXA2xxState *pxa270_init(unsigned int sdram_size, const char *revision);
-PXA2xxState *pxa255_init(unsigned int sdram_size);
-
-#endif /* PXA_H */
diff --git a/include/hw/arm/sharpsl.h b/include/hw/arm/sharpsl.h
index e986b28..1e3992f 100644
--- a/include/hw/arm/sharpsl.h
+++ b/include/hw/arm/sharpsl.h
@@ -11,7 +11,7 @@
/* zaurus.c */
-#define SL_PXA_PARAM_BASE 0xa0000a00
+#define SL_PXA_PARAM_BASE 0xa0000a00
void sl_bootparam_write(hwaddr ptr);
#endif
diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h
index d1a4a64..e5e2d09 100644
--- a/include/hw/arm/smmu-common.h
+++ b/include/hw/arm/smmu-common.h
@@ -110,7 +110,6 @@ typedef struct SMMUTransCfg {
/* Used by stage-1 only. */
bool aa64; /* arch64 or aarch32 translation table */
bool record_faults; /* record fault events */
- uint64_t ttb; /* TT base address */
uint8_t oas; /* output address width */
uint8_t tbi; /* Top Byte Ignore */
int asid;
@@ -143,6 +142,11 @@ typedef struct SMMUIOTLBKey {
uint8_t level;
} SMMUIOTLBKey;
+typedef struct SMMUSIDRange {
+ uint32_t start;
+ uint32_t end;
+} SMMUSIDRange;
+
struct SMMUState {
/* <private> */
SysBusDevice dev;
@@ -220,6 +224,7 @@ void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova,
uint8_t tg, uint64_t num_pages, uint8_t ttl);
void smmu_iotlb_inv_ipa(SMMUState *s, int vmid, dma_addr_t ipa, uint8_t tg,
uint64_t num_pages, uint8_t ttl);
+void smmu_configs_inv_sid_range(SMMUState *s, SMMUSIDRange sid_range);
/* Unmap the range of all the notifiers registered to any IOMMU mr */
void smmu_inv_notifiers_all(SMMUState *s);
diff --git a/include/hw/arm/soc_dma.h b/include/hw/arm/soc_dma.h
index e93a749..bcdb914 100644
--- a/include/hw/arm/soc_dma.h
+++ b/include/hw/arm/soc_dma.h
@@ -54,7 +54,7 @@ struct soc_dma_ch_s {
int bytes;
/* Initialised by the DMA module, call soc_dma_ch_update after writing. */
enum soc_dma_access_type type[2];
- hwaddr vaddr[2]; /* Updated by .transfer_fn(). */
+ hwaddr vaddr[2]; /* Updated by .transfer_fn(). */
/* Private */
void *paddr[2];
soc_dma_io_t io_fn[2];
@@ -70,7 +70,7 @@ struct soc_dma_ch_s {
struct soc_dma_s {
/* Following fields are set by the SoC DMA module and can be used
* by anybody. */
- uint64_t drqbmp; /* Is zeroed by soc_dma_reset() */
+ uint64_t drqbmp; /* Is zeroed by soc_dma_reset() */
qemu_irq *drq;
void *opaque;
int64_t freq;
diff --git a/include/hw/arm/stm32f405_soc.h b/include/hw/arm/stm32f405_soc.h
index d15c03c..2eeada6 100644
--- a/include/hw/arm/stm32f405_soc.h
+++ b/include/hw/arm/stm32f405_soc.h
@@ -25,6 +25,7 @@
#ifndef HW_ARM_STM32F405_SOC_H
#define HW_ARM_STM32F405_SOC_H
+#include "hw/misc/stm32_rcc.h"
#include "hw/misc/stm32f4xx_syscfg.h"
#include "hw/timer/stm32f2xx_timer.h"
#include "hw/char/stm32f2xx_usart.h"
@@ -55,6 +56,7 @@ struct STM32F405State {
ARMv7MState armv7m;
+ STM32RccState rcc;
STM32F4xxSyscfgState syscfg;
STM32F4xxExtiState exti;
STM32F2XXUsartState usart[STM_NUM_USARTS];
diff --git a/include/hw/arm/stm32l4x5_soc.h b/include/hw/arm/stm32l4x5_soc.h
index c243fb0..c2fae6e 100644
--- a/include/hw/arm/stm32l4x5_soc.h
+++ b/include/hw/arm/stm32l4x5_soc.h
@@ -24,7 +24,7 @@
#ifndef HW_ARM_STM32L4x5_SOC_H
#define HW_ARM_STM32L4x5_SOC_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/arm/armv7m.h"
#include "hw/or-irq.h"
#include "hw/misc/stm32l4x5_syscfg.h"
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index ab961bb..9a1b0f5 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -36,7 +36,7 @@
#include "hw/arm/boot.h"
#include "hw/arm/bsa.h"
#include "hw/block/flash.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "hw/intc/arm_gicv3_common.h"
#include "qom/object.h"
@@ -47,6 +47,9 @@
/* See Linux kernel arch/arm64/include/asm/pvclock-abi.h */
#define PVTIME_SIZE_PER_CPU 64
+/* GPIO pins */
+#define GPIO_PIN_POWER_BUTTON 3
+
enum {
VIRT_FLASH,
VIRT_MEM,
@@ -114,14 +117,8 @@ typedef enum VirtGICType {
struct VirtMachineClass {
MachineClass parent;
- bool disallow_affinity_adjustment;
- bool no_its;
bool no_tcg_its;
- bool no_pmu;
- bool claim_edge_triggered_timers;
- bool smbios_old_sys_ver;
bool no_highmem_compact;
- bool no_highmem_ecam;
bool no_ged; /* Machines < 4.2 have no support for ACPI GED device */
bool kvm_no_adjvtime;
bool no_kvm_steal_time;
@@ -131,6 +128,7 @@ struct VirtMachineClass {
bool no_cpu_topology;
bool no_tcg_lpa2;
bool no_ns_el2_virt_timer_irq;
+ bool no_nested_smmu;
};
struct VirtMachineState {
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
index 025beb5..05ed641 100644
--- a/include/hw/arm/xlnx-versal.h
+++ b/include/hw/arm/xlnx-versal.h
@@ -78,6 +78,7 @@ struct Versal {
struct {
PL011State uart[XLNX_VERSAL_NR_UARTS];
CadenceGEMState gem[XLNX_VERSAL_NR_GEMS];
+ OrIRQState gem_irq_orgate[XLNX_VERSAL_NR_GEMS];
XlnxZDMA adma[XLNX_VERSAL_NR_ADMAS];
VersalUsb2 usb;
CanBusState *canbus[XLNX_VERSAL_NR_CANFD];
diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h
index 48f7948..c137ac5 100644
--- a/include/hw/arm/xlnx-zynqmp.h
+++ b/include/hw/arm/xlnx-zynqmp.h
@@ -116,6 +116,7 @@ struct XlnxZynqMPState {
MemoryRegion mr_unimp[XLNX_ZYNQMP_NUM_UNIMP_AREAS];
CadenceGEMState gem[XLNX_ZYNQMP_NUM_GEMS];
+ OrIRQState gem_irq_orgate[XLNX_ZYNQMP_NUM_GEMS];
CadenceUARTState uart[XLNX_ZYNQMP_NUM_UARTS];
XlnxZynqMPCANState can[XLNX_ZYNQMP_NUM_CAN];
SysbusAHCIState sata;
diff --git a/include/hw/block/flash.h b/include/hw/block/flash.h
index 2b5ccd9..3671f01 100644
--- a/include/hw/block/flash.h
+++ b/include/hw/block/flash.h
@@ -44,38 +44,6 @@ PFlashCFI02 *pflash_cfi02_register(hwaddr base,
uint16_t unlock_addr1,
int be);
-/* nand.c */
-DeviceState *nand_init(BlockBackend *blk, int manf_id, int chip_id);
-void nand_setpins(DeviceState *dev, uint8_t cle, uint8_t ale,
- uint8_t ce, uint8_t wp, uint8_t gnd);
-void nand_getpins(DeviceState *dev, int *rb);
-void nand_setio(DeviceState *dev, uint32_t value);
-uint32_t nand_getio(DeviceState *dev);
-uint32_t nand_getbuswidth(DeviceState *dev);
-
-#define NAND_MFR_TOSHIBA 0x98
-#define NAND_MFR_SAMSUNG 0xec
-#define NAND_MFR_FUJITSU 0x04
-#define NAND_MFR_NATIONAL 0x8f
-#define NAND_MFR_RENESAS 0x07
-#define NAND_MFR_STMICRO 0x20
-#define NAND_MFR_HYNIX 0xad
-#define NAND_MFR_MICRON 0x2c
-
-/* onenand.c */
-void *onenand_raw_otp(DeviceState *onenand_device);
-
-/* ecc.c */
-typedef struct {
- uint8_t cp; /* Column parity */
- uint16_t lp[2]; /* Line parity */
- uint16_t count;
-} ECCState;
-
-uint8_t ecc_digest(ECCState *s, uint8_t sample);
-void ecc_reset(ECCState *s);
-extern const VMStateDescription vmstate_ecc_state;
-
/* m25p80.c */
#define TYPE_M25P80 "m25p80-generic"
diff --git a/include/hw/boards.h b/include/hw/boards.h
index ef6f18f..f424b2b 100644
--- a/include/hw/boards.h
+++ b/include/hw/boards.h
@@ -3,13 +3,14 @@
#ifndef HW_BOARDS_H
#define HW_BOARDS_H
-#include "exec/memory.h"
-#include "sysemu/hostmem.h"
-#include "sysemu/blockdev.h"
+#include "system/memory.h"
+#include "system/hostmem.h"
+#include "system/blockdev.h"
#include "qapi/qapi-types-machine.h"
#include "qemu/module.h"
#include "qom/object.h"
#include "hw/core/cpu.h"
+#include "hw/resettable.h"
#define TYPE_MACHINE_SUFFIX "-machine"
@@ -43,8 +44,16 @@ void machine_set_cpu_numa_node(MachineState *machine,
Error **errp);
void machine_parse_smp_config(MachineState *ms,
const SMPConfiguration *config, Error **errp);
+bool machine_parse_smp_cache(MachineState *ms,
+ const SmpCachePropertiesList *caches,
+ Error **errp);
unsigned int machine_topo_get_cores_per_socket(const MachineState *ms);
unsigned int machine_topo_get_threads_per_socket(const MachineState *ms);
+CpuTopologyLevel machine_get_cache_topo_level(const MachineState *ms,
+ CacheLevelAndType cache);
+void machine_set_cache_topo_level(MachineState *ms, CacheLevelAndType cache,
+ CpuTopologyLevel level);
+bool machine_check_smp_cache(const MachineState *ms, Error **errp);
void machine_memory_devices_init(MachineState *ms, hwaddr base, uint64_t size);
/**
@@ -145,6 +154,10 @@ typedef struct {
* @books_supported - whether books are supported by the machine
* @drawers_supported - whether drawers are supported by the machine
* @modules_supported - whether modules are supported by the machine
+ * @cache_supported - whether cache (l1d, l1i, l2 and l3) configuration are
+ * supported by the machine
+ * @has_caches - whether cache properties are explicitly specified in the
+ * user provided smp-cache configuration
*/
typedef struct {
bool prefer_sockets;
@@ -154,6 +167,8 @@ typedef struct {
bool books_supported;
bool drawers_supported;
bool modules_supported;
+ bool cache_supported[CACHE_LEVEL_AND_TYPE__MAX];
+ bool has_caches;
} SMPCompatProps;
/**
@@ -215,6 +230,10 @@ typedef struct {
* Return the type of KVM corresponding to the kvm-type string option or
* computed based on other criteria such as the host kernel capabilities.
* kvm-type may be NULL if it is not needed.
+ * @hvf_get_physical_address_range:
+ * Returns the physical address range in bits to use for the HVF virtual
+ * machine based on the current boards memory map. This may be NULL if it
+ * is not needed.
* @numa_mem_supported:
* true if '--numa node.mem' option is supported and false otherwise
* @hotplug_allowed:
@@ -237,6 +256,9 @@ typedef struct {
* purposes only.
* Applies only to default memory backend, i.e., explicit memory backend
* wasn't used.
+ * @smbios_memory_device_size:
+ * Default size of memory device,
+ * SMBIOS 3.1.0 "7.18 Memory Device (Type 17)"
*/
struct MachineClass {
/*< private >*/
@@ -250,9 +272,10 @@ struct MachineClass {
const char *deprecation_reason;
void (*init)(MachineState *state);
- void (*reset)(MachineState *state, ShutdownCause reason);
+ void (*reset)(MachineState *state, ResetType type);
void (*wakeup)(MachineState *state);
int (*kvm_type)(MachineState *machine, const char *arg);
+ int (*hvf_get_physical_address_range)(MachineState *machine);
BlockInterfaceType block_default_type;
int units_per_default_bus;
@@ -263,9 +286,8 @@ struct MachineClass {
no_parallel:1,
no_floppy:1,
no_cdrom:1,
- no_sdcard:1,
- pci_allow_0_address:1,
- legacy_fw_cfg_order:1;
+ pci_allow_0_address:1;
+ bool auto_create_sdcard;
bool is_default;
const char *default_machine_opts;
const char *default_boot_order;
@@ -304,6 +326,9 @@ struct MachineClass {
const CPUArchIdList *(*possible_cpu_arch_ids)(MachineState *machine);
int64_t (*get_default_cpu_node_id)(const MachineState *ms, int idx);
ram_addr_t (*fixup_ram_size)(ram_addr_t size);
+ uint64_t smbios_memory_device_size;
+ bool (*create_default_memdev)(MachineState *ms, const char *path,
+ Error **errp);
};
/**
@@ -359,6 +384,10 @@ typedef struct CpuTopology {
unsigned int max_cpus;
} CpuTopology;
+typedef struct SmpCache {
+ SmpCacheProperties props[CACHE_LEVEL_AND_TYPE__MAX];
+} SmpCache;
+
/**
* MachineState:
*/
@@ -383,6 +412,7 @@ struct MachineState {
bool enable_graphics;
ConfidentialGuestSupport *cgs;
HostMemoryBackend *memdev;
+ bool aux_ram_share;
/*
* convenience alias to ram_memdev_id backend memory region
* or to numa container memory region
@@ -404,11 +434,13 @@ struct MachineState {
BootConfiguration boot_config;
char *kernel_filename;
char *kernel_cmdline;
+ char *shim_filename;
char *initrd_filename;
const char *cpu_type;
AccelState *accelerator;
CPUArchIdList *possible_cpus;
CpuTopology smp;
+ SmpCache smp_cache;
struct NVDIMMState *nvdimms_state;
struct NumaState *numa_state;
};
@@ -603,7 +635,11 @@ struct MachineState {
/*
* How many years/major releases for each phase
* of the life cycle. Assumes use of versioning
- * scheme where major is bumped each year
+ * scheme where major is bumped each year.
+ *
+ * These values must match the ver_machine_deprecation_version
+ * and ver_machine_deletion_version logic in docs/conf.py and
+ * the text in docs/about/deprecated.rst
*/
#define MACHINE_VER_DELETION_MAJOR 6
#define MACHINE_VER_DEPRECATION_MAJOR 3
@@ -617,11 +653,42 @@ struct MachineState {
" years old are subject to deletion after " \
stringify(MACHINE_VER_DELETION_MAJOR) " years"
-#define _MACHINE_VER_IS_EXPIRED_IMPL(cutoff, major, minor) \
+#define _MACHINE_VER_IS_CURRENT_EXPIRED(cutoff, major, minor) \
(((QEMU_VERSION_MAJOR - major) > cutoff) || \
(((QEMU_VERSION_MAJOR - major) == cutoff) && \
(QEMU_VERSION_MINOR - minor) >= 0))
+#define _MACHINE_VER_IS_NEXT_MINOR_EXPIRED(cutoff, major, minor) \
+ (((QEMU_VERSION_MAJOR - major) > cutoff) || \
+ (((QEMU_VERSION_MAJOR - major) == cutoff) && \
+ ((QEMU_VERSION_MINOR + 1) - minor) >= 0))
+
+#define _MACHINE_VER_IS_NEXT_MAJOR_EXPIRED(cutoff, major, minor) \
+ ((((QEMU_VERSION_MAJOR + 1) - major) > cutoff) || \
+ ((((QEMU_VERSION_MAJOR + 1) - major) == cutoff) && \
+ (0 - minor) >= 0))
+
+/*
+ * - The first check applies to formal releases
+ * - The second check applies to dev snapshots / release candidates
+ * where the next major version is the same.
+ * e.g. 9.0.50, 9.1.50, 9.0.90, 9.1.90
+ * - The third check applies to dev snapshots / release candidates
+ * where the next major version will change.
+ * e.g. 9.2.50, 9.2.90
+ *
+ * NB: this assumes we do 3 minor releases per year, before bumping major,
+ * and dev snapshots / release candidates are numbered with micro >= 50
+ * If this ever changes the logic below will need modifying....
+ */
+#define _MACHINE_VER_IS_EXPIRED_IMPL(cutoff, major, minor) \
+ ((QEMU_VERSION_MICRO < 50 && \
+ _MACHINE_VER_IS_CURRENT_EXPIRED(cutoff, major, minor)) || \
+ (QEMU_VERSION_MICRO >= 50 && QEMU_VERSION_MINOR < 2 && \
+ _MACHINE_VER_IS_NEXT_MINOR_EXPIRED(cutoff, major, minor)) || \
+ (QEMU_VERSION_MICRO >= 50 && QEMU_VERSION_MINOR == 2 && \
+ _MACHINE_VER_IS_NEXT_MAJOR_EXPIRED(cutoff, major, minor)))
+
#define _MACHINE_VER_IS_EXPIRED2(cutoff, major, minor) \
_MACHINE_VER_IS_EXPIRED_IMPL(cutoff, major, minor)
#define _MACHINE_VER_IS_EXPIRED3(cutoff, major, minor, micro) \
@@ -686,33 +753,16 @@ struct MachineState {
* suitable period of time has passed, it will cause
* execution of the method to return, avoiding registration
* of the machine
- *
- * The new deprecation and deletion policy for versioned
- * machine types was introduced in QEMU 9.1.0.
- *
- * Under the new policy a number of old machine types (any
- * prior to 2.12) would be liable for immediate deletion
- * which would be a violation of our historical deprecation
- * and removal policy
- *
- * Thus deletions are temporarily gated on existance of
- * the env variable "QEMU_DELETE_MACHINES" / QEMU version
- * number >= 10.1.0. This gate can be deleted in the 10.1.0
- * dev cycle
*/
#define MACHINE_VER_DELETION(...) \
do { \
if (MACHINE_VER_SHOULD_DELETE(__VA_ARGS__)) { \
- if (getenv("QEMU_DELETE_MACHINES") || \
- QEMU_VERSION_MAJOR > 10 || (QEMU_VERSION_MAJOR == 10 && \
- QEMU_VERSION_MINOR >= 1)) { \
- return; \
- } \
+ return; \
} \
} while (0)
#define DEFINE_MACHINE(namestr, machine_initfn) \
- static void machine_initfn##_class_init(ObjectClass *oc, void *data) \
+ static void machine_initfn##_class_init(ObjectClass *oc, const void *data) \
{ \
MachineClass *mc = MACHINE_CLASS(oc); \
machine_initfn(mc); \
@@ -728,6 +778,15 @@ struct MachineState {
} \
type_init(machine_initfn##_register_types)
+extern GlobalProperty hw_compat_10_0[];
+extern const size_t hw_compat_10_0_len;
+
+extern GlobalProperty hw_compat_9_2[];
+extern const size_t hw_compat_9_2_len;
+
+extern GlobalProperty hw_compat_9_1[];
+extern const size_t hw_compat_9_1_len;
+
extern GlobalProperty hw_compat_9_0[];
extern const size_t hw_compat_9_0_len;
@@ -803,19 +862,4 @@ extern const size_t hw_compat_2_7_len;
extern GlobalProperty hw_compat_2_6[];
extern const size_t hw_compat_2_6_len;
-extern GlobalProperty hw_compat_2_5[];
-extern const size_t hw_compat_2_5_len;
-
-extern GlobalProperty hw_compat_2_4[];
-extern const size_t hw_compat_2_4_len;
-
-extern GlobalProperty hw_compat_2_3[];
-extern const size_t hw_compat_2_3_len;
-
-extern GlobalProperty hw_compat_2_2[];
-extern const size_t hw_compat_2_2_len;
-
-extern GlobalProperty hw_compat_2_1[];
-extern const size_t hw_compat_2_1_len;
-
#endif
diff --git a/include/hw/char/escc.h b/include/hw/char/escc.h
index 5669a5b..8c4c6a7 100644
--- a/include/hw/char/escc.h
+++ b/include/hw/char/escc.h
@@ -46,6 +46,9 @@ typedef struct ESCCChannelState {
uint8_t rx, tx;
QemuInputHandlerState *hs;
char *sunkbd_layout;
+ int sunmouse_dx;
+ int sunmouse_dy;
+ int sunmouse_buttons;
} ESCCChannelState;
struct ESCCState {
diff --git a/include/hw/char/imx_serial.h b/include/hw/char/imx_serial.h
index 65f0e97..90ba3ff 100644
--- a/include/hw/char/imx_serial.h
+++ b/include/hw/char/imx_serial.h
@@ -109,13 +109,13 @@ struct IMXSerialState {
uint32_t ucr1;
uint32_t ucr2;
uint32_t uts1;
+ uint32_t ufcr;
/*
* The registers below are implemented just so that the
* guest OS sees what it has written
*/
uint32_t onems;
- uint32_t ufcr;
uint32_t ubmr;
uint32_t ubrc;
uint32_t ucr3;
diff --git a/include/hw/char/mchp_pfsoc_mmuart.h b/include/hw/char/mchp_pfsoc_mmuart.h
index b0e14ca..a7b8b1b 100644
--- a/include/hw/char/mchp_pfsoc_mmuart.h
+++ b/include/hw/char/mchp_pfsoc_mmuart.h
@@ -29,7 +29,7 @@
#define HW_MCHP_PFSOC_MMUART_H
#include "hw/sysbus.h"
-#include "hw/char/serial.h"
+#include "hw/char/serial-mm.h"
#define MCHP_PFSOC_MMUART_REG_COUNT 13
diff --git a/include/hw/char/parallel-isa.h b/include/hw/char/parallel-isa.h
index 5284b2f..3edaf9d 100644
--- a/include/hw/char/parallel-isa.h
+++ b/include/hw/char/parallel-isa.h
@@ -12,7 +12,7 @@
#include "parallel.h"
-#include "exec/ioport.h"
+#include "system/ioport.h"
#include "hw/isa/isa.h"
#include "qom/object.h"
diff --git a/include/hw/char/parallel.h b/include/hw/char/parallel.h
index cfb97cc..7b04478 100644
--- a/include/hw/char/parallel.h
+++ b/include/hw/char/parallel.h
@@ -1,7 +1,7 @@
#ifndef HW_PARALLEL_H
#define HW_PARALLEL_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/isa/isa.h"
#include "hw/irq.h"
#include "chardev/char-fe.h"
diff --git a/include/hw/char/pl011.h b/include/hw/char/pl011.h
index d853802..299ca9b 100644
--- a/include/hw/char/pl011.h
+++ b/include/hw/char/pl011.h
@@ -32,7 +32,6 @@ struct PL011State {
SysBusDevice parent_obj;
MemoryRegion iomem;
- uint32_t readbuff;
uint32_t flags;
uint32_t lcr;
uint32_t rsr;
@@ -53,6 +52,11 @@ struct PL011State {
Clock *clk;
bool migrate_clk;
const unsigned char *id;
+ /*
+ * Since some users embed this struct directly, we must
+ * ensure that the C struct is at least as big as the Rust one.
+ */
+ uint8_t padding_for_rust[16];
};
DeviceState *pl011_create(hwaddr addr, qemu_irq irq, Chardev *chr);
diff --git a/include/hw/char/riscv_htif.h b/include/hw/char/riscv_htif.h
index df493fd..ee0ca29 100644
--- a/include/hw/char/riscv_htif.h
+++ b/include/hw/char/riscv_htif.h
@@ -22,7 +22,7 @@
#include "chardev/char.h"
#include "chardev/char-fe.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#define TYPE_HTIF_UART "riscv.htif.uart"
diff --git a/include/hw/char/serial-isa.h b/include/hw/char/serial-isa.h
new file mode 100644
index 0000000..8517afa
--- /dev/null
+++ b/include/hw/char/serial-isa.h
@@ -0,0 +1,38 @@
+/*
+ * QEMU ISA 16550A UART emulation
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ * Copyright (c) 2008 Citrix Systems, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_SERIAL_ISA_H
+#define HW_SERIAL_ISA_H
+
+#include "hw/isa/isa.h"
+
+#define MAX_ISA_SERIAL_PORTS 4
+
+#define TYPE_ISA_SERIAL "isa-serial"
+void serial_hds_isa_init(ISABus *bus, int from, int to);
+void isa_serial_set_iobase(ISADevice *serial, hwaddr iobase);
+void isa_serial_set_enabled(ISADevice *serial, bool enabled);
+
+#endif
diff --git a/include/hw/char/serial-mm.h b/include/hw/char/serial-mm.h
new file mode 100644
index 0000000..77abd09
--- /dev/null
+++ b/include/hw/char/serial-mm.h
@@ -0,0 +1,52 @@
+/*
+ * QEMU 16550A UART emulation
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ * Copyright (c) 2008 Citrix Systems, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_SERIAL_MM_H
+#define HW_SERIAL_MM_H
+
+#include "hw/char/serial.h"
+#include "system/memory.h"
+#include "chardev/char.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_SERIAL_MM "serial-mm"
+OBJECT_DECLARE_SIMPLE_TYPE(SerialMM, SERIAL_MM)
+
+struct SerialMM {
+ SysBusDevice parent;
+
+ SerialState serial;
+
+ uint8_t regshift;
+ uint8_t endianness;
+};
+
+SerialMM *serial_mm_init(MemoryRegion *address_space,
+ hwaddr base, int regshift,
+ qemu_irq irq, int baudbase,
+ Chardev *chr, enum device_endian end);
+
+#endif
diff --git a/include/hw/char/serial.h b/include/hw/char/serial.h
index 6e14099..4bf90a4 100644
--- a/include/hw/char/serial.h
+++ b/include/hw/char/serial.h
@@ -27,10 +27,8 @@
#define HW_SERIAL_H
#include "chardev/char-fe.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qemu/fifo8.h"
-#include "chardev/char.h"
-#include "hw/sysbus.h"
#include "qom/object.h"
#define UART_FIFO_LENGTH 16 /* 16550A Fifo Length */
@@ -81,38 +79,10 @@ struct SerialState {
};
typedef struct SerialState SerialState;
-struct SerialMM {
- SysBusDevice parent;
-
- SerialState serial;
-
- uint8_t regshift;
- uint8_t endianness;
-};
-
extern const VMStateDescription vmstate_serial;
extern const MemoryRegionOps serial_io_ops;
-void serial_set_frequency(SerialState *s, uint32_t frequency);
-
#define TYPE_SERIAL "serial"
OBJECT_DECLARE_SIMPLE_TYPE(SerialState, SERIAL)
-#define TYPE_SERIAL_MM "serial-mm"
-OBJECT_DECLARE_SIMPLE_TYPE(SerialMM, SERIAL_MM)
-
-SerialMM *serial_mm_init(MemoryRegion *address_space,
- hwaddr base, int regshift,
- qemu_irq irq, int baudbase,
- Chardev *chr, enum device_endian end);
-
-/* serial-isa.c */
-
-#define MAX_ISA_SERIAL_PORTS 4
-
-#define TYPE_ISA_SERIAL "isa-serial"
-void serial_hds_isa_init(ISABus *bus, int from, int to);
-void isa_serial_set_iobase(ISADevice *serial, hwaddr iobase);
-void isa_serial_set_enabled(ISADevice *serial, bool enabled);
-
#endif
diff --git a/include/hw/char/sifive_uart.h b/include/hw/char/sifive_uart.h
index 7f6c79f..0846cf6 100644
--- a/include/hw/char/sifive_uart.h
+++ b/include/hw/char/sifive_uart.h
@@ -24,6 +24,7 @@
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "qom/object.h"
+#include "qemu/fifo8.h"
enum {
SIFIVE_UART_TXFIFO = 0,
@@ -48,9 +49,13 @@ enum {
SIFIVE_UART_IP_RXWM = 2 /* Receive watermark interrupt pending */
};
+#define SIFIVE_UART_TXFIFO_FULL 0x80000000
+
#define SIFIVE_UART_GET_TXCNT(txctrl) ((txctrl >> 16) & 0x7)
#define SIFIVE_UART_GET_RXCNT(rxctrl) ((rxctrl >> 16) & 0x7)
+
#define SIFIVE_UART_RX_FIFO_SIZE 8
+#define SIFIVE_UART_TX_FIFO_SIZE 8
#define TYPE_SIFIVE_UART "riscv.sifive.uart"
OBJECT_DECLARE_SIMPLE_TYPE(SiFiveUARTState, SIFIVE_UART)
@@ -63,13 +68,20 @@ struct SiFiveUARTState {
qemu_irq irq;
MemoryRegion mmio;
CharBackend chr;
- uint8_t rx_fifo[SIFIVE_UART_RX_FIFO_SIZE];
- uint8_t rx_fifo_len;
+
+ uint32_t txfifo;
uint32_t ie;
uint32_t ip;
uint32_t txctrl;
uint32_t rxctrl;
uint32_t div;
+
+ uint8_t rx_fifo[SIFIVE_UART_RX_FIFO_SIZE];
+ uint8_t rx_fifo_len;
+
+ Fifo8 tx_fifo;
+
+ QEMUTimer *fifo_trigger_handle;
};
SiFiveUARTState *sifive_uart_create(MemoryRegion *address_space, hwaddr base,
diff --git a/include/hw/clock.h b/include/hw/clock.h
index eb58599..a279bd4 100644
--- a/include/hw/clock.h
+++ b/include/hw/clock.h
@@ -142,14 +142,6 @@ void clock_set_callback(Clock *clk, ClockCallback *cb,
void *opaque, unsigned int events);
/**
- * clock_clear_callback:
- * @clk: the clock to delete the callback from
- *
- * Unregister the callback registered with clock_set_callback.
- */
-void clock_clear_callback(Clock *clk);
-
-/**
* clock_set_source:
* @clk: the clock.
* @src: the source clock
diff --git a/include/hw/core/accel-cpu.h b/include/hw/core/accel-cpu.h
deleted file mode 100644
index 24dad45..0000000
--- a/include/hw/core/accel-cpu.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Accelerator interface, specializes CPUClass
- * This header is used only by target-specific code.
- *
- * Copyright 2021 SUSE LLC
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef ACCEL_CPU_H
-#define ACCEL_CPU_H
-
-/*
- * This header is used to define new accelerator-specific target-specific
- * accelerator cpu subclasses.
- * It uses CPU_RESOLVING_TYPE, so this is clearly target-specific.
- *
- * Do not try to use for any other purpose than the implementation of new
- * subclasses in target/, or the accel implementation itself in accel/
- */
-
-#define TYPE_ACCEL_CPU "accel-" CPU_RESOLVING_TYPE
-#define ACCEL_CPU_NAME(name) (name "-" TYPE_ACCEL_CPU)
-typedef struct AccelCPUClass AccelCPUClass;
-DECLARE_CLASS_CHECKERS(AccelCPUClass, ACCEL_CPU, TYPE_ACCEL_CPU)
-
-typedef struct AccelCPUClass {
- /*< private >*/
- ObjectClass parent_class;
- /*< public >*/
-
- void (*cpu_class_init)(CPUClass *cc);
- void (*cpu_instance_init)(CPUState *cpu);
- bool (*cpu_target_realize)(CPUState *cpu, Error **errp);
-} AccelCPUClass;
-
-#endif /* ACCEL_CPU_H */
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index d946161..33296a1 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -33,6 +33,7 @@
#include "qemu/bitmap.h"
#include "qemu/rcu_queue.h"
#include "qemu/queue.h"
+#include "qemu/lockcnt.h"
#include "qemu/thread.h"
#include "qom/object.h"
@@ -101,11 +102,9 @@ struct SysemuCPUOps;
* CPUClass:
* @class_by_name: Callback to map -cpu command line model name to an
* instantiatable CPU type.
+ * @list_cpus: list available CPU models and flags.
* @parse_features: Callback to parse command line arguments.
* @reset_dump_flags: #CPUDumpFlags to use for reset logging.
- * @has_work: Callback for checking if there is work to do.
- * @mmu_index: Callback for choosing softmmu mmu index;
- * may be used internally by memory_rw_debug without TCG.
* @memory_rw_debug: Callback for GDB memory access.
* @dump_state: Callback for dumping state.
* @query_cpu_fast:
@@ -123,17 +122,24 @@ struct SysemuCPUOps;
* @get_pc: Callback for getting the Program Counter register.
* As above, with the semantics of the target architecture.
* @gdb_read_register: Callback for letting GDB read a register.
+ * No more than @gdb_num_core_regs registers can be read.
* @gdb_write_register: Callback for letting GDB write a register.
+ * No more than @gdb_num_core_regs registers can be written.
* @gdb_adjust_breakpoint: Callback for adjusting the address of a
* breakpoint. Used by AVR to handle a gdb mis-feature with
* its Harvard architecture split code and data.
* @gdb_num_core_regs: Number of core registers accessible to GDB or 0 to infer
* from @gdb_core_xml_file.
* @gdb_core_xml_file: File name for core registers GDB XML description.
+ * @gdb_get_core_xml_file: Optional callback that returns the file name for
+ * the core registers GDB XML description. The returned value is expected to
+ * be a simple constant string: the caller will not g_free() it. If this
+ * is NULL then @gdb_core_xml_file will be used instead.
* @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
* before the insn which triggers a watchpoint rather than after it.
* @gdb_arch_name: Optional callback that returns the architecture name known
- * to GDB. The caller must free the returned string with g_free.
+ * to GDB. The returned value is expected to be a simple constant string:
+ * the caller will not g_free() it.
* @disas_set_info: Setup architecture specific components of disassembly info
* @adjust_watchpoint_address: Perform a target-specific adjustment to an
* address before attempting to match it against watchpoints.
@@ -148,12 +154,11 @@ struct CPUClass {
/*< public >*/
ObjectClass *(*class_by_name)(const char *cpu_model);
+ void (*list_cpus)(void);
void (*parse_features)(const char *typename, char *str, Error **errp);
- bool (*has_work)(CPUState *cpu);
- int (*mmu_index)(CPUState *cpu, bool ifetch);
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
- uint8_t *buf, int len, bool is_write);
+ uint8_t *buf, size_t len, bool is_write);
void (*dump_state)(CPUState *cpu, FILE *, int flags);
void (*query_cpu_fast)(CPUState *cpu, CpuInfoFast *value);
int64_t (*get_arch_id)(CPUState *cpu);
@@ -165,6 +170,7 @@ struct CPUClass {
const char *gdb_core_xml_file;
const gchar * (*gdb_arch_name)(CPUState *cpu);
+ const char * (*gdb_get_core_xml_file)(CPUState *cpu);
void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
@@ -205,7 +211,7 @@ struct CPUClass {
* so the layout is not as critical as that of CPUTLBEntry. This is
* also why we don't want to combine the two structs.
*/
-typedef struct CPUTLBEntryFull {
+struct CPUTLBEntryFull {
/*
* @xlat_section contains:
* - in the lower TARGET_PAGE_BITS, a physical section number
@@ -261,7 +267,7 @@ typedef struct CPUTLBEntryFull {
bool guarded;
} arm;
} extra;
-} CPUTLBEntryFull;
+};
/*
* Data elements that are per MMU mode, minus the bits accessed by
@@ -350,6 +356,8 @@ typedef union IcountDecr {
* from CPUArchState, via small negative offsets.
* @can_do_io: True if memory-mapped IO is allowed.
* @plugin_mem_cbs: active plugin memory callbacks
+ * @plugin_mem_value_low: 64 lower bits of latest accessed mem value.
+ * @plugin_mem_value_high: 64 higher bits of latest accessed mem value.
*/
typedef struct CPUNegativeOffsetState {
CPUTLB tlb;
@@ -358,6 +366,8 @@ typedef struct CPUNegativeOffsetState {
* The callback pointer are accessed via TCG (see gen_empty_mem_helper).
*/
GArray *plugin_mem_cbs;
+ uint64_t plugin_mem_value_low;
+ uint64_t plugin_mem_value_high;
#endif
IcountDecr icount_decr;
bool can_do_io;
@@ -402,7 +412,6 @@ struct qemu_work_item;
* Under TCG this value is propagated to @tcg_cflags.
* See TranslationBlock::TCG CF_CLUSTER_MASK.
* @tcg_cflags: Pre-computed cflags for this cpu.
- * @nr_cores: Number of cores within this CPU package.
* @nr_threads: Number of threads within this CPU core.
* @thread: Host thread details, only live once @created is #true
* @sem: WIN32 only semaphore used only for qtest
@@ -461,7 +470,6 @@ struct CPUState {
CPUClass *cc;
/*< public >*/
- int nr_cores;
int nr_threads;
struct QemuThread *thread;
@@ -496,6 +504,7 @@ struct CPUState {
QSIMPLEQ_HEAD(, qemu_work_item) work_list;
struct CPUAddressSpace *cpu_ases;
+ int cpu_ases_count;
int num_ases;
AddressSpace *as;
MemoryRegion *memory;
@@ -578,7 +587,7 @@ QEMU_BUILD_BUG_ON(offsetof(CPUState, neg) !=
static inline CPUArchState *cpu_env(CPUState *cpu)
{
- /* We validate that CPUArchState follows CPUState in cpu-all.h. */
+ /* We validate that CPUArchState follows CPUState in cpu-target.c */
return (CPUArchState *)(cpu + 1);
}
@@ -594,15 +603,6 @@ extern CPUTailQ cpus_queue;
extern __thread CPUState *current_cpu;
/**
- * qemu_tcg_mttcg_enabled:
- * Check whether we are running MultiThread TCG or not.
- *
- * Returns: %true if we are in MTTCG mode %false otherwise.
- */
-extern bool mttcg_enabled;
-#define qemu_tcg_mttcg_enabled() (mttcg_enabled)
-
-/**
* cpu_paging_enabled:
* @cpu: The CPU whose state is to be inspected.
*
@@ -621,8 +621,6 @@ bool cpu_paging_enabled(const CPUState *cpu);
bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
Error **errp);
-#if !defined(CONFIG_USER_ONLY)
-
/**
* cpu_write_elf64_note:
* @f: pointer to a function that writes memory to a file
@@ -672,8 +670,6 @@ int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
*/
GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
-#endif /* !CONFIG_USER_ONLY */
-
/**
* CPUDumpFlags:
* @CPU_DUMP_CODE:
@@ -697,7 +693,6 @@ enum CPUDumpFlags {
*/
void cpu_dump_state(CPUState *cpu, FILE *f, int flags);
-#ifndef CONFIG_USER_ONLY
/**
* cpu_get_phys_page_attrs_debug:
* @cpu: The CPU to obtain the physical page address for.
@@ -744,7 +739,15 @@ int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs);
*/
bool cpu_virtio_is_big_endian(CPUState *cpu);
-#endif /* CONFIG_USER_ONLY */
+/**
+ * cpu_has_work:
+ * @cpu: The vCPU to check.
+ *
+ * Checks whether the CPU has work to do.
+ *
+ * Returns: %true if the CPU has work, %false otherwise.
+ */
+bool cpu_has_work(CPUState *cpu);
/**
* cpu_list_add:
@@ -811,22 +814,6 @@ CPUState *cpu_create(const char *typename);
const char *parse_cpu_option(const char *cpu_option);
/**
- * cpu_has_work:
- * @cpu: The vCPU to check.
- *
- * Checks whether the CPU has work to do.
- *
- * Returns: %true if the CPU has work, %false otherwise.
- */
-static inline bool cpu_has_work(CPUState *cpu)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- g_assert(cc->has_work);
- return cc->has_work(cpu);
-}
-
-/**
* qemu_cpu_is_self:
* @cpu: The vCPU to check against.
*
@@ -962,9 +949,7 @@ void cpu_interrupt(CPUState *cpu, int mask);
*/
static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- cc->set_pc(cpu, addr);
+ cpu->cc->set_pc(cpu, addr);
}
/**
@@ -1113,36 +1098,6 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
return false;
}
-#if defined(CONFIG_USER_ONLY)
-static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
- int flags, CPUWatchpoint **watchpoint)
-{
- return -ENOSYS;
-}
-
-static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
- vaddr len, int flags)
-{
- return -ENOSYS;
-}
-
-static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
- CPUWatchpoint *wp)
-{
-}
-
-static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
-{
-}
-#else
-int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
- int flags, CPUWatchpoint **watchpoint);
-int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
- vaddr len, int flags);
-void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
-void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
-#endif
-
/**
* cpu_get_address_space:
* @cpu: CPU to get address space from
@@ -1158,31 +1113,23 @@ G_NORETURN void cpu_abort(CPUState *cpu, const char *fmt, ...)
/* $(top_srcdir)/cpu.c */
void cpu_class_init_props(DeviceClass *dc);
+void cpu_exec_class_post_init(CPUClass *cc);
void cpu_exec_initfn(CPUState *cpu);
+void cpu_vmstate_register(CPUState *cpu);
+void cpu_vmstate_unregister(CPUState *cpu);
bool cpu_exec_realizefn(CPUState *cpu, Error **errp);
void cpu_exec_unrealizefn(CPUState *cpu);
void cpu_exec_reset_hold(CPUState *cpu);
-const char *target_name(void);
-
-#ifdef COMPILING_PER_TARGET
-
-#ifndef CONFIG_USER_ONLY
-
extern const VMStateDescription vmstate_cpu_common;
-#define VMSTATE_CPU() { \
- .name = "parent_obj", \
- .size = sizeof(CPUState), \
- .vmsd = &vmstate_cpu_common, \
- .flags = VMS_STRUCT, \
- .offset = 0, \
-}
-#endif /* !CONFIG_USER_ONLY */
-
-#endif /* COMPILING_PER_TARGET */
-
#define UNASSIGNED_CPU_INDEX -1
#define UNASSIGNED_CLUSTER_INDEX -1
+enum CacheType {
+ DATA_CACHE,
+ INSTRUCTION_CACHE,
+ UNIFIED_CACHE
+};
+
#endif
diff --git a/include/hw/core/resetcontainer.h b/include/hw/core/resetcontainer.h
index 23db0c7..daeb18c 100644
--- a/include/hw/core/resetcontainer.h
+++ b/include/hw/core/resetcontainer.h
@@ -20,7 +20,7 @@
#include "qom/object.h"
#define TYPE_RESETTABLE_CONTAINER "resettable-container"
-OBJECT_DECLARE_TYPE(ResettableContainer, ResettableContainerClass, RESETTABLE_CONTAINER)
+OBJECT_DECLARE_SIMPLE_TYPE(ResettableContainer, RESETTABLE_CONTAINER)
/**
* resettable_container_add: Add a resettable object to the container
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
index 24d003f..8778923 100644
--- a/include/hw/core/sysemu-cpu-ops.h
+++ b/include/hw/core/sysemu-cpu-ops.h
@@ -7,8 +7,8 @@
* See the COPYING file in the top-level directory.
*/
-#ifndef SYSEMU_CPU_OPS_H
-#define SYSEMU_CPU_OPS_H
+#ifndef SYSTEM_CPU_OPS_H
+#define SYSTEM_CPU_OPS_H
#include "hw/core/cpu.h"
@@ -17,6 +17,10 @@
*/
typedef struct SysemuCPUOps {
/**
+ * @has_work: Callback for checking if there is work to do.
+ */
+ bool (*has_work)(CPUState *cpu); /* MANDATORY NON-NULL */
+ /**
* @get_memory_mapping: Callback for obtaining the memory mappings.
*/
bool (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
@@ -89,4 +93,4 @@ typedef struct SysemuCPUOps {
} SysemuCPUOps;
-#endif /* SYSEMU_CPU_OPS_H */
+#endif /* SYSTEM_CPU_OPS_H */
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
deleted file mode 100644
index 34318cf..0000000
--- a/include/hw/core/tcg-cpu-ops.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * TCG CPU-specific operations
- *
- * Copyright 2021 SUSE LLC
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef TCG_CPU_OPS_H
-#define TCG_CPU_OPS_H
-
-#include "exec/breakpoint.h"
-#include "exec/hwaddr.h"
-#include "exec/memattrs.h"
-#include "exec/mmu-access-type.h"
-#include "exec/vaddr.h"
-
-struct TCGCPUOps {
- /**
- * @initialize: Initialize TCG state
- *
- * Called when the first CPU is realized.
- */
- void (*initialize)(void);
- /**
- * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
- *
- * This is called when we abandon execution of a TB before starting it,
- * and must set all parts of the CPU state which the previous TB in the
- * chain may not have updated.
- * By default, when this is NULL, a call is made to @set_pc(tb->pc).
- *
- * If more state needs to be restored, the target must implement a
- * function to restore all the state, and register it here.
- */
- void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb);
- /**
- * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn
- *
- * This is called when we unwind state in the middle of a TB,
- * usually before raising an exception. Set all part of the CPU
- * state which are tracked insn-by-insn in the target-specific
- * arguments to start_insn, passed as @data.
- */
- void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb,
- const uint64_t *data);
-
- /** @cpu_exec_enter: Callback for cpu_exec preparation */
- void (*cpu_exec_enter)(CPUState *cpu);
- /** @cpu_exec_exit: Callback for cpu_exec cleanup */
- void (*cpu_exec_exit)(CPUState *cpu);
- /** @debug_excp_handler: Callback for handling debug exceptions */
- void (*debug_excp_handler)(CPUState *cpu);
-
-#ifdef CONFIG_USER_ONLY
- /**
- * @fake_user_interrupt: Callback for 'fake exception' handling.
- *
- * Simulate 'fake exception' which will be handled outside the
- * cpu execution loop (hack for x86 user mode).
- */
- void (*fake_user_interrupt)(CPUState *cpu);
-
- /**
- * record_sigsegv:
- * @cpu: cpu context
- * @addr: faulting guest address
- * @access_type: access was read/write/execute
- * @maperr: true for invalid page, false for permission fault
- * @ra: host pc for unwinding
- *
- * We are about to raise SIGSEGV with si_code set for @maperr,
- * and si_addr set for @addr. Record anything further needed
- * for the signal ucontext_t.
- *
- * If the emulated kernel does not provide anything to the signal
- * handler with anything besides the user context registers, and
- * the siginfo_t, then this hook need do nothing and may be omitted.
- * Otherwise, record the data and return; the caller will raise
- * the signal, unwind the cpu state, and return to the main loop.
- *
- * If it is simpler to re-use the sysemu tlb_fill code, @ra is provided
- * so that a "normal" cpu exception can be raised. In this case,
- * the signal must be raised by the architecture cpu_loop.
- */
- void (*record_sigsegv)(CPUState *cpu, vaddr addr,
- MMUAccessType access_type,
- bool maperr, uintptr_t ra);
- /**
- * record_sigbus:
- * @cpu: cpu context
- * @addr: misaligned guest address
- * @access_type: access was read/write/execute
- * @ra: host pc for unwinding
- *
- * We are about to raise SIGBUS with si_code BUS_ADRALN,
- * and si_addr set for @addr. Record anything further needed
- * for the signal ucontext_t.
- *
- * If the emulated kernel does not provide the signal handler with
- * anything besides the user context registers, and the siginfo_t,
- * then this hook need do nothing and may be omitted.
- * Otherwise, record the data and return; the caller will raise
- * the signal, unwind the cpu state, and return to the main loop.
- *
- * If it is simpler to re-use the sysemu do_unaligned_access code,
- * @ra is provided so that a "normal" cpu exception can be raised.
- * In this case, the signal must be raised by the architecture cpu_loop.
- */
- void (*record_sigbus)(CPUState *cpu, vaddr addr,
- MMUAccessType access_type, uintptr_t ra);
-#else
- /** @do_interrupt: Callback for interrupt handling. */
- void (*do_interrupt)(CPUState *cpu);
- /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
- bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
- /**
- * @cpu_exec_halt: Callback for handling halt in cpu_exec.
- *
- * The target CPU should do any special processing here that it needs
- * to do when the CPU is in the halted state.
- *
- * Return true to indicate that the CPU should now leave halt, false
- * if it should remain in the halted state. (This should generally
- * be the same value that cpu_has_work() would return.)
- *
- * This method must be provided. If the target does not need to
- * do anything special for halt, the same function used for its
- * CPUClass::has_work method can be used here, as they have the
- * same function signature.
- */
- bool (*cpu_exec_halt)(CPUState *cpu);
- /**
- * @tlb_fill: Handle a softmmu tlb miss
- *
- * If the access is valid, call tlb_set_page and return true;
- * if the access is invalid and probe is true, return false;
- * otherwise raise an exception and do not return.
- */
- bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
- /**
- * @do_transaction_failed: Callback for handling failed memory transactions
- * (ie bus faults or external aborts; not MMU faults)
- */
- void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
- unsigned size, MMUAccessType access_type,
- int mmu_idx, MemTxAttrs attrs,
- MemTxResult response, uintptr_t retaddr);
- /**
- * @do_unaligned_access: Callback for unaligned access handling
- * The callback must exit via raising an exception.
- */
- G_NORETURN void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
- MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr);
-
- /**
- * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM
- */
- vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
-
- /**
- * @debug_check_watchpoint: return true if the architectural
- * watchpoint whose address has matched should really fire, used by ARM
- * and RISC-V
- */
- bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
-
- /**
- * @debug_check_breakpoint: return true if the architectural
- * breakpoint whose PC has matched should really fire.
- */
- bool (*debug_check_breakpoint)(CPUState *cpu);
-
- /**
- * @io_recompile_replay_branch: Callback for cpu_io_recompile.
- *
- * The cpu has been stopped, and cpu_restore_state_from_tb has been
- * called. If the faulting instruction is in a delay slot, and the
- * target architecture requires re-execution of the branch, then
- * adjust the cpu state as required and return true.
- */
- bool (*io_recompile_replay_branch)(CPUState *cpu,
- const TranslationBlock *tb);
- /**
- * @need_replay_interrupt: Return %true if @interrupt_request
- * needs to be recorded for replay purposes.
- */
- bool (*need_replay_interrupt)(int interrupt_request);
-#endif /* !CONFIG_USER_ONLY */
-};
-
-#if defined(CONFIG_USER_ONLY)
-
-static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
- MemTxAttrs atr, int fl, uintptr_t ra)
-{
-}
-
-static inline int cpu_watchpoint_address_matches(CPUState *cpu,
- vaddr addr, vaddr len)
-{
- return 0;
-}
-
-#else
-
-/**
- * cpu_check_watchpoint:
- * @cpu: cpu context
- * @addr: guest virtual address
- * @len: access length
- * @attrs: memory access attributes
- * @flags: watchpoint access type
- * @ra: unwind return address
- *
- * Check for a watchpoint hit in [addr, addr+len) of the type
- * specified by @flags. Exit via exception with a hit.
- */
-void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
- MemTxAttrs attrs, int flags, uintptr_t ra);
-
-/**
- * cpu_watchpoint_address_matches:
- * @cpu: cpu context
- * @addr: guest virtual address
- * @len: access length
- *
- * Return the watchpoint flags that apply to [addr, addr+len).
- * If no watchpoint is registered for the range, the result is 0.
- */
-int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
-
-#endif
-
-#endif /* TCG_CPU_OPS_H */
diff --git a/include/hw/cris/etraxfs.h b/include/hw/cris/etraxfs.h
deleted file mode 100644
index 012c4e9..0000000
--- a/include/hw/cris/etraxfs.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * QEMU ETRAX System Emulator
- *
- * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifndef HW_ETRAXFS_H
-#define HW_ETRAXFS_H
-
-#include "net/net.h"
-#include "hw/cris/etraxfs_dma.h"
-#include "hw/qdev-properties.h"
-#include "hw/sysbus.h"
-#include "qapi/error.h"
-
-DeviceState *etraxfs_eth_init(hwaddr base, int phyaddr,
- struct etraxfs_dma_client *dma_out,
- struct etraxfs_dma_client *dma_in);
-
-static inline DeviceState *etraxfs_ser_create(hwaddr addr,
- qemu_irq irq,
- Chardev *chr)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_new("etraxfs-serial");
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, irq);
- return dev;
-}
-
-#endif
diff --git a/include/hw/cris/etraxfs_dma.h b/include/hw/cris/etraxfs_dma.h
deleted file mode 100644
index 095d76b..0000000
--- a/include/hw/cris/etraxfs_dma.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef HW_ETRAXFS_DMA_H
-#define HW_ETRAXFS_DMA_H
-
-#include "exec/hwaddr.h"
-
-struct dma_context_metadata {
- /* data descriptor md */
- uint16_t metadata;
-};
-
-struct etraxfs_dma_client
-{
- /* DMA controller. */
- int channel;
- void *ctrl;
-
- /* client. */
- struct {
- int (*push)(void *opaque, unsigned char *buf,
- int len, bool eop);
- void (*pull)(void *opaque);
- void (*metadata_push)(void *opaque,
- const struct dma_context_metadata *md);
- void *opaque;
- } client;
-};
-
-void *etraxfs_dmac_init(hwaddr base, int nr_channels);
-void etraxfs_dmac_connect(void *opaque, int channel, qemu_irq *line,
- int input);
-void etraxfs_dmac_connect_client(void *opaque, int c,
- struct etraxfs_dma_client *cl);
-int etraxfs_dmac_input(struct etraxfs_dma_client *client,
- void *buf, int len, int eop);
-
-#endif
diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h
index 0a4fcb2..ed6cd50 100644
--- a/include/hw/cxl/cxl_device.h
+++ b/include/hw/cxl/cxl_device.h
@@ -176,16 +176,34 @@ typedef struct CXLCCI {
uint16_t opcode;
uint16_t complete_pct;
uint16_t ret_code; /* Current value of retcode */
+ bool aborted;
uint64_t starttime;
/* set by each bg cmd, cleared by the bg_timer when complete */
uint64_t runtime;
QEMUTimer *timer;
+ QemuMutex lock; /* serializes mbox abort vs timer cb */
} bg;
+
+ /* firmware update */
+ struct {
+ uint8_t active_slot;
+ uint8_t staged_slot;
+ bool slot[4];
+ uint8_t curr_action;
+ uint8_t curr_slot;
+ /* handle partial transfers */
+ bool transferring;
+ size_t prev_offset;
+ size_t prev_len;
+ time_t last_partxfer;
+ } fw;
+
size_t payload_max;
/* Pointer to device hosting the CCI */
DeviceState *d;
/* Pointer to the device hosting the protocol conversion */
DeviceState *intf;
+ bool initialized;
} CXLCCI;
typedef struct cxl_device_state {
@@ -249,8 +267,8 @@ void cxl_device_register_block_init(Object *obj, CXLDeviceState *dev,
typedef struct CXLType3Dev CXLType3Dev;
typedef struct CSWMBCCIDev CSWMBCCIDev;
/* Set up default values for the register block */
-void cxl_device_register_init_t3(CXLType3Dev *ct3d);
-void cxl_device_register_init_swcci(CSWMBCCIDev *sw);
+void cxl_device_register_init_t3(CXLType3Dev *ct3d, int msi_n);
+void cxl_device_register_init_swcci(CSWMBCCIDev *sw, int msi_n);
/*
* CXL r3.1 Section 8.2.8.1: CXL Device Capabilities Array Register
@@ -301,6 +319,7 @@ void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max);
void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf,
DeviceState *d, size_t payload_max);
void cxl_init_cci(CXLCCI *cci, size_t payload_max);
+void cxl_destroy_cci(CXLCCI *cci);
void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256],
size_t payload_max);
int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
@@ -397,9 +416,14 @@ static inline void __toggle_media(CXLDeviceState *cxl_dstate, int val)
#define cxl_dev_enable_media(cxlds) \
do { __toggle_media((cxlds), 0x1); } while (0)
-static inline bool sanitize_running(CXLCCI *cci)
+static inline bool cxl_dev_media_disabled(CXLDeviceState *cxl_dstate)
{
- return !!cci->bg.runtime && cci->bg.opcode == 0x4400;
+ uint64_t dev_status_reg = cxl_dstate->mbox_reg_state64[R_CXL_MEM_DEV_STS];
+ return FIELD_EX64(dev_status_reg, CXL_MEM_DEV_STS, MEDIA_STATUS) == 0x3;
+}
+static inline bool scan_media_running(CXLCCI *cci)
+{
+ return !!cci->bg.runtime && cci->bg.opcode == 0x4304;
}
typedef struct CXLError {
@@ -422,6 +446,55 @@ typedef struct CXLPoison {
typedef QLIST_HEAD(, CXLPoison) CXLPoisonList;
#define CXL_POISON_LIST_LIMIT 256
+/* CXL memory device patrol scrub control attributes */
+typedef struct CXLMemPatrolScrubReadAttrs {
+ uint8_t scrub_cycle_cap;
+ uint16_t scrub_cycle;
+ uint8_t scrub_flags;
+} QEMU_PACKED CXLMemPatrolScrubReadAttrs;
+
+typedef struct CXLMemPatrolScrubWriteAttrs {
+ uint8_t scrub_cycle_hr;
+ uint8_t scrub_flags;
+} QEMU_PACKED CXLMemPatrolScrubWriteAttrs;
+
+#define CXL_MEMDEV_PS_GET_FEATURE_VERSION 0x01
+#define CXL_MEMDEV_PS_SET_FEATURE_VERSION 0x01
+#define CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT BIT(0)
+#define CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT BIT(1)
+#define CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT 12
+#define CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT 1
+#define CXL_MEMDEV_PS_ENABLE_DEFAULT 0
+
+/* CXL memory device DDR5 ECS control attributes */
+#define CXL_ECS_GET_FEATURE_VERSION 0x01
+#define CXL_ECS_SET_FEATURE_VERSION 0x01
+#define CXL_ECS_LOG_ENTRY_TYPE_DEFAULT 0x01
+#define CXL_ECS_REALTIME_REPORT_CAP_DEFAULT 1
+#define CXL_ECS_THRESHOLD_COUNT_DEFAULT 3 /* 3: 256, 4: 1024, 5: 4096 */
+#define CXL_ECS_MODE_DEFAULT 0
+#define CXL_ECS_NUM_MEDIA_FRUS 3 /* Default */
+
+typedef struct CXLMemECSFRUReadAttrs {
+ uint8_t ecs_cap;
+ uint16_t ecs_config;
+ uint8_t ecs_flags;
+} QEMU_PACKED CXLMemECSFRUReadAttrs;
+
+typedef struct CXLMemECSReadAttrs {
+ uint8_t ecs_log_cap;
+ CXLMemECSFRUReadAttrs fru_attrs[CXL_ECS_NUM_MEDIA_FRUS];
+} QEMU_PACKED CXLMemECSReadAttrs;
+
+typedef struct CXLMemECSFRUWriteAttrs {
+ uint16_t ecs_config;
+} QEMU_PACKED CXLMemECSFRUWriteAttrs;
+
+typedef struct CXLMemECSWriteAttrs {
+ uint8_t ecs_log_cap;
+ CXLMemECSFRUWriteAttrs fru_attrs[CXL_ECS_NUM_MEDIA_FRUS];
+} QEMU_PACKED CXLMemECSWriteAttrs;
+
#define DCD_MAX_NUM_REGION 8
typedef struct CXLDCExtentRaw {
@@ -459,6 +532,29 @@ typedef struct CXLDCRegion {
unsigned long *blk_bitmap;
} CXLDCRegion;
+typedef struct CXLSetFeatureInfo {
+ QemuUUID uuid;
+ uint8_t data_transfer_flag;
+ bool data_saved_across_reset;
+ uint16_t data_offset;
+ size_t data_size;
+} CXLSetFeatureInfo;
+
+struct CXLSanitizeInfo;
+
+typedef struct CXLAlertConfig {
+ uint8_t valid_alerts;
+ uint8_t enable_alerts;
+ uint8_t life_used_crit_alert_thresh;
+ uint8_t life_used_warn_thresh;
+ uint16_t over_temp_crit_alert_thresh;
+ uint16_t under_temp_crit_alert_thresh;
+ uint16_t over_temp_warn_thresh;
+ uint16_t under_temp_warn_thresh;
+ uint16_t cor_vmem_err_warn_thresh;
+ uint16_t cor_pmem_err_warn_thresh;
+} QEMU_PACKED CXLAlertConfig;
+
struct CXLType3Dev {
/* Private */
PCIDevice parent_obj;
@@ -480,6 +576,12 @@ struct CXLType3Dev {
CXLCCI vdm_fm_owned_ld_mctp_cci;
CXLCCI ld0_cci;
+ CXLAlertConfig alert_config;
+
+ /* PCIe link characteristics */
+ PCIExpLinkSpeed speed;
+ PCIExpLinkWidth width;
+
/* DOE */
DOECap doe_cdat;
@@ -491,6 +593,19 @@ struct CXLType3Dev {
unsigned int poison_list_cnt;
bool poison_list_overflowed;
uint64_t poison_list_overflow_ts;
+ /* Poison Injection - backup */
+ CXLPoisonList poison_list_bkp;
+ CXLPoisonList scan_media_results;
+ bool scan_media_hasrun;
+
+ CXLSetFeatureInfo set_feat_info;
+
+ /* Patrol scrub control attributes */
+ CXLMemPatrolScrubReadAttrs patrol_scrub_attrs;
+ CXLMemPatrolScrubWriteAttrs patrol_scrub_wr_attrs;
+ /* ECS control attributes */
+ CXLMemECSReadAttrs ecs_attrs;
+ CXLMemECSWriteAttrs ecs_wr_attrs;
struct dynamic_capacity {
HostMemoryBackend *host_dc;
@@ -508,6 +623,8 @@ struct CXLType3Dev {
uint8_t num_regions; /* 0-8 regions */
CXLDCRegion regions[DCD_MAX_NUM_REGION];
} dc;
+
+ struct CXLSanitizeInfo *media_op_sanitize;
};
#define TYPE_CXL_TYPE3 "cxl-type3"
@@ -554,10 +671,12 @@ CXLRetCode cxl_event_get_records(CXLDeviceState *cxlds, CXLGetEventPayload *pl,
size_t *len);
CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds,
CXLClearEventPayload *pl);
+void cxl_discard_all_event_records(CXLDeviceState *cxlds);
void cxl_event_irq_assert(CXLType3Dev *ct3d);
void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d);
+void cxl_clear_poison_list_overflowed(CXLType3Dev *ct3d);
CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len);
diff --git a/include/hw/cxl/cxl_mailbox.h b/include/hw/cxl/cxl_mailbox.h
new file mode 100644
index 0000000..9008402
--- /dev/null
+++ b/include/hw/cxl/cxl_mailbox.h
@@ -0,0 +1,19 @@
+/*
+ * QEMU CXL Mailbox
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef CXL_MAILBOX_H
+#define CXL_MAILBOX_H
+
+#define CXL_MBOX_IMMEDIATE_CONFIG_CHANGE (1 << 1)
+#define CXL_MBOX_IMMEDIATE_DATA_CHANGE (1 << 2)
+#define CXL_MBOX_IMMEDIATE_POLICY_CHANGE (1 << 3)
+#define CXL_MBOX_IMMEDIATE_LOG_CHANGE (1 << 4)
+#define CXL_MBOX_SECURITY_STATE_CHANGE (1 << 5)
+#define CXL_MBOX_BACKGROUND_OPERATION (1 << 6)
+#define CXL_MBOX_BACKGROUND_OPERATION_ABORT (1 << 7)
+
+#endif
diff --git a/include/hw/display/blizzard.h b/include/hw/display/blizzard.h
deleted file mode 100644
index 5b33018..0000000
--- a/include/hw/display/blizzard.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Epson S1D13744/S1D13745 (Blizzard/Hailstorm/Tornado) LCD/TV controller.
- *
- * Copyright (C) 2008 Nokia Corporation
- * Written by Andrzej Zaborowski
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef HW_DISPLAY_BLIZZARD_H
-#define HW_DISPLAY_BLIZZARD_H
-
-
-void *s1d13745_init(qemu_irq gpio_int);
-void s1d13745_write(void *opaque, int dc, uint16_t value);
-void s1d13745_write_block(void *opaque, int dc,
- void *buf, size_t len, int pitch);
-uint16_t s1d13745_read(void *opaque, int dc);
-
-#endif
diff --git a/include/hw/display/macfb.h b/include/hw/display/macfb.h
index 27cebef..0fae1f3 100644
--- a/include/hw/display/macfb.h
+++ b/include/hw/display/macfb.h
@@ -13,7 +13,7 @@
#ifndef MACFB_H
#define MACFB_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/irq.h"
#include "hw/nubus/nubus.h"
#include "hw/sysbus.h"
diff --git a/include/hw/display/tc6393xb.h b/include/hw/display/tc6393xb.h
deleted file mode 100644
index f9263bf..0000000
--- a/include/hw/display/tc6393xb.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Toshiba TC6393XB I/O Controller.
- * Found in Sharp Zaurus SL-6000 (tosa) or some
- * Toshiba e-Series PDAs.
- *
- * Copyright (c) 2007 HervƩ Poussineau
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef HW_DISPLAY_TC6393XB_H
-#define HW_DISPLAY_TC6393XB_H
-
-typedef struct TC6393xbState TC6393xbState;
-
-TC6393xbState *tc6393xb_init(struct MemoryRegion *sysmem,
- uint32_t base, qemu_irq irq);
-qemu_irq tc6393xb_l3v_get(TC6393xbState *s);
-
-#endif
diff --git a/include/hw/dma/i8257.h b/include/hw/dma/i8257.h
index 4342e4a..33b6286 100644
--- a/include/hw/dma/i8257.h
+++ b/include/hw/dma/i8257.h
@@ -2,7 +2,7 @@
#define HW_I8257_H
#include "hw/isa/isa.h"
-#include "exec/ioport.h"
+#include "system/ioport.h"
#include "qom/object.h"
#define TYPE_I8257 "i8257"
diff --git a/include/hw/dma/xlnx-zdma.h b/include/hw/dma/xlnx-zdma.h
index efc7521..9c57c49 100644
--- a/include/hw/dma/xlnx-zdma.h
+++ b/include/hw/dma/xlnx-zdma.h
@@ -31,7 +31,7 @@
#include "hw/sysbus.h"
#include "hw/register.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qom/object.h"
#define ZDMA_R_MAX (0x204 / 4)
diff --git a/include/hw/dma/xlnx_dpdma.h b/include/hw/dma/xlnx_dpdma.h
index 40537a8..484b2e3 100644
--- a/include/hw/dma/xlnx_dpdma.h
+++ b/include/hw/dma/xlnx_dpdma.h
@@ -26,8 +26,7 @@
#define XLNX_DPDMA_H
#include "hw/sysbus.h"
-#include "ui/console.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qom/object.h"
#define XLNX_DPDMA_REG_ARRAY_SIZE (0x1000 >> 2)
diff --git a/include/hw/fsi/aspeed_apb2opb.h b/include/hw/fsi/aspeed_apb2opb.h
index f6a2387..878619e 100644
--- a/include/hw/fsi/aspeed_apb2opb.h
+++ b/include/hw/fsi/aspeed_apb2opb.h
@@ -8,7 +8,7 @@
#ifndef FSI_ASPEED_APB2OPB_H
#define FSI_ASPEED_APB2OPB_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/fsi/fsi-master.h"
#include "hw/sysbus.h"
diff --git a/include/hw/fsi/cfam.h b/include/hw/fsi/cfam.h
index 7abc3b2..cceb4bd 100644
--- a/include/hw/fsi/cfam.h
+++ b/include/hw/fsi/cfam.h
@@ -7,7 +7,7 @@
#ifndef FSI_CFAM_H
#define FSI_CFAM_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/fsi/fsi.h"
#include "hw/fsi/lbus.h"
diff --git a/include/hw/fsi/fsi-master.h b/include/hw/fsi/fsi-master.h
index 68e5f56..b634ecd 100644
--- a/include/hw/fsi/fsi-master.h
+++ b/include/hw/fsi/fsi-master.h
@@ -7,7 +7,7 @@
#ifndef FSI_FSI_MASTER_H
#define FSI_FSI_MASTER_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/qdev-core.h"
#include "hw/fsi/fsi.h"
#include "hw/fsi/cfam.h"
diff --git a/include/hw/fsi/fsi.h b/include/hw/fsi/fsi.h
index e00f6ef..f34765e 100644
--- a/include/hw/fsi/fsi.h
+++ b/include/hw/fsi/fsi.h
@@ -7,7 +7,7 @@
#ifndef FSI_FSI_H
#define FSI_FSI_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/qdev-core.h"
#include "hw/fsi/lbus.h"
#include "qemu/bitops.h"
diff --git a/include/hw/fsi/lbus.h b/include/hw/fsi/lbus.h
index 558268c..1251907 100644
--- a/include/hw/fsi/lbus.h
+++ b/include/hw/fsi/lbus.h
@@ -9,7 +9,7 @@
#include "hw/qdev-core.h"
#include "qemu/units.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#define TYPE_FSI_LBUS_DEVICE "fsi.lbus.device"
OBJECT_DECLARE_SIMPLE_TYPE(FSILBusDevice, FSI_LBUS_DEVICE)
diff --git a/include/hw/gpio/aspeed_gpio.h b/include/hw/gpio/aspeed_gpio.h
index 90a12ae..e6b2fe7 100644
--- a/include/hw/gpio/aspeed_gpio.h
+++ b/include/hw/gpio/aspeed_gpio.h
@@ -70,12 +70,14 @@ typedef struct AspeedGPIOReg {
} AspeedGPIOReg;
struct AspeedGPIOClass {
- SysBusDevice parent_obj;
+ SysBusDeviceClass parent_class;
const GPIOSetProperties *props;
uint32_t nr_gpio_pins;
uint32_t nr_gpio_sets;
const AspeedGPIOReg *reg_table;
unsigned reg_table_count;
+ uint64_t mem_size;
+ const MemoryRegionOps *reg_ops;
};
struct AspeedGPIOState {
@@ -88,7 +90,7 @@ struct AspeedGPIOState {
qemu_irq irq;
qemu_irq gpios[ASPEED_GPIO_MAX_NR_SETS][ASPEED_GPIOS_PER_SET];
-/* Parallel GPIO Registers */
+ /* Parallel GPIO Registers */
uint32_t debounce_regs[ASPEED_GPIO_NR_DEBOUNCE_REGS];
struct GPIOSets {
uint32_t data_value; /* Reflects pin values */
diff --git a/include/hw/gpio/npcm7xx_gpio.h b/include/hw/gpio/npcm7xx_gpio.h
index b1d771b..7c0bf61 100644
--- a/include/hw/gpio/npcm7xx_gpio.h
+++ b/include/hw/gpio/npcm7xx_gpio.h
@@ -15,7 +15,7 @@
#ifndef NPCM7XX_GPIO_H
#define NPCM7XX_GPIO_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/sysbus.h"
/* Number of pins managed by each controller. */
diff --git a/include/hw/hw.h b/include/hw/hw.h
index 045c1c8..1b33d12 100644
--- a/include/hw/hw.h
+++ b/include/hw/hw.h
@@ -1,10 +1,6 @@
#ifndef QEMU_HW_H
#define QEMU_HW_H
-#ifdef CONFIG_USER_ONLY
-#error Cannot include hw/hw.h from user emulation
-#endif
-
G_NORETURN void hw_error(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
#endif
diff --git a/include/hw/hyperv/hyperv-proto.h b/include/hw/hyperv/hyperv-proto.h
index 4a22973..fffc5ce 100644
--- a/include/hw/hyperv/hyperv-proto.h
+++ b/include/hw/hyperv/hyperv-proto.h
@@ -62,6 +62,18 @@
#define HV_MESSAGE_X64_LEGACY_FP_ERROR 0x80010005
/*
+ * Hyper-V Synthetic debug options MSR
+ */
+#define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1
+#define HV_X64_MSR_SYNDBG_STATUS 0x400000F2
+#define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3
+#define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4
+#define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5
+#define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF
+
+#define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2)
+
+/*
* Message flags
*/
#define HV_MESSAGE_FLAG_PENDING 0x1
diff --git a/include/hw/hyperv/hyperv.h b/include/hw/hyperv/hyperv.h
index d717b4e..63a8b65 100644
--- a/include/hw/hyperv/hyperv.h
+++ b/include/hw/hyperv/hyperv.h
@@ -10,7 +10,8 @@
#ifndef HW_HYPERV_HYPERV_H
#define HW_HYPERV_HYPERV_H
-#include "cpu-qom.h"
+#include "exec/hwaddr.h"
+#include "hw/core/cpu.h"
#include "hw/hyperv/hyperv-proto.h"
typedef struct HvSintRoute HvSintRoute;
diff --git a/include/hw/hyperv/vmbus.h b/include/hw/hyperv/vmbus.h
index 5c50585..06b948b 100644
--- a/include/hw/hyperv/vmbus.h
+++ b/include/hw/hyperv/vmbus.h
@@ -10,8 +10,8 @@
#ifndef HW_HYPERV_VMBUS_H
#define HW_HYPERV_VMBUS_H
-#include "sysemu/sysemu.h"
-#include "sysemu/dma.h"
+#include "system/system.h"
+#include "system/dma.h"
#include "hw/qdev-core.h"
#include "migration/vmstate.h"
#include "hw/hyperv/vmbus-proto.h"
diff --git a/include/hw/i2c/aspeed_i2c.h b/include/hw/i2c/aspeed_i2c.h
index a064479..2daacc1 100644
--- a/include/hw/i2c/aspeed_i2c.h
+++ b/include/hw/i2c/aspeed_i2c.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * with this program; if not, see <https://www.gnu.org/licenses/>.
*/
#ifndef ASPEED_I2C_H
@@ -31,12 +30,14 @@
#define TYPE_ASPEED_2500_I2C TYPE_ASPEED_I2C "-ast2500"
#define TYPE_ASPEED_2600_I2C TYPE_ASPEED_I2C "-ast2600"
#define TYPE_ASPEED_1030_I2C TYPE_ASPEED_I2C "-ast1030"
+#define TYPE_ASPEED_2700_I2C TYPE_ASPEED_I2C "-ast2700"
OBJECT_DECLARE_TYPE(AspeedI2CState, AspeedI2CClass, ASPEED_I2C)
#define ASPEED_I2C_NR_BUSSES 16
-#define ASPEED_I2C_MAX_POOL_SIZE 0x800
+#define ASPEED_I2C_SHARE_POOL_SIZE 0x800
+#define ASPEED_I2C_BUS_POOL_SIZE 0x20
#define ASPEED_I2C_OLD_NUM_REG 11
-#define ASPEED_I2C_NEW_NUM_REG 22
+#define ASPEED_I2C_NEW_NUM_REG 28
#define A_I2CD_M_STOP_CMD BIT(5)
#define A_I2CD_M_RX_CMD BIT(3)
@@ -225,6 +226,15 @@ REG32(I2CS_DMA_LEN_STS, 0x4c)
FIELD(I2CS_DMA_LEN_STS, TX_LEN, 0, 13)
REG32(I2CC_DMA_ADDR, 0x50)
REG32(I2CC_DMA_LEN, 0x54)
+/* DMA 64bits */
+REG32(I2CM_DMA_TX_ADDR_HI, 0x60)
+ FIELD(I2CM_DMA_TX_ADDR_HI, ADDR_HI, 0, 7)
+REG32(I2CM_DMA_RX_ADDR_HI, 0x64)
+ FIELD(I2CM_DMA_RX_ADDR_HI, ADDR_HI, 0, 7)
+REG32(I2CS_DMA_TX_ADDR_HI, 0x68)
+ FIELD(I2CS_DMA_TX_ADDR_HI, ADDR_HI, 0, 7)
+REG32(I2CS_DMA_RX_ADDR_HI, 0x6c)
+ FIELD(I2CS_DMA_RX_ADDR_HI, ADDR_HI, 0, 7)
struct AspeedI2CState;
@@ -239,12 +249,15 @@ struct AspeedI2CBus {
I2CSlave *slave;
MemoryRegion mr;
+ MemoryRegion mr_pool;
I2CBus *bus;
uint8_t id;
qemu_irq irq;
uint32_t regs[ASPEED_I2C_NEW_NUM_REG];
+ uint8_t pool[ASPEED_I2C_BUS_POOL_SIZE];
+ uint64_t dma_dram_offset;
};
struct AspeedI2CState {
@@ -257,7 +270,7 @@ struct AspeedI2CState {
uint32_t ctrl_global;
uint32_t new_clk_divider;
MemoryRegion pool_iomem;
- uint8_t pool[ASPEED_I2C_MAX_POOL_SIZE];
+ uint8_t share_pool[ASPEED_I2C_SHARE_POOL_SIZE];
AspeedI2CBus busses[ASPEED_I2C_NR_BUSSES];
MemoryRegion *dram_mr;
@@ -275,15 +288,19 @@ struct AspeedI2CClass {
uint8_t num_busses;
uint8_t reg_size;
+ uint32_t reg_gap_size;
uint8_t gap;
qemu_irq (*bus_get_irq)(AspeedI2CBus *);
uint64_t pool_size;
hwaddr pool_base;
+ uint32_t pool_gap_size;
uint8_t *(*bus_pool_base)(AspeedI2CBus *);
bool check_sram;
bool has_dma;
-
+ bool has_share_pool;
+ uint64_t mem_size;
+ bool has_dma64;
};
static inline bool aspeed_i2c_is_new_mode(AspeedI2CState *s)
@@ -363,14 +380,6 @@ static inline uint32_t aspeed_i2c_bus_dma_len_offset(AspeedI2CBus *bus)
return R_I2CD_DMA_LEN;
}
-static inline uint32_t aspeed_i2c_bus_dma_addr_offset(AspeedI2CBus *bus)
-{
- if (aspeed_i2c_is_new_mode(bus->controller)) {
- return R_I2CC_DMA_ADDR;
- }
- return R_I2CD_DMA_ADDR;
-}
-
static inline bool aspeed_i2c_bus_is_master(AspeedI2CBus *bus)
{
return SHARED_ARRAY_FIELD_EX32(bus->regs, aspeed_i2c_bus_ctrl_offset(bus),
diff --git a/include/hw/i2c/npcm7xx_smbus.h b/include/hw/i2c/npcm7xx_smbus.h
index dc45963..9c544c5 100644
--- a/include/hw/i2c/npcm7xx_smbus.h
+++ b/include/hw/i2c/npcm7xx_smbus.h
@@ -16,7 +16,7 @@
#ifndef NPCM7XX_SMBUS_H
#define NPCM7XX_SMBUS_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/i2c/i2c.h"
#include "hw/irq.h"
#include "hw/sysbus.h"
diff --git a/include/hw/i2c/pm_smbus.h b/include/hw/i2c/pm_smbus.h
index 0d74207..dafe0df 100644
--- a/include/hw/i2c/pm_smbus.h
+++ b/include/hw/i2c/pm_smbus.h
@@ -1,7 +1,7 @@
#ifndef PM_SMBUS_H
#define PM_SMBUS_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/i2c/smbus_master.h"
#define PM_SMBUS_MAX_MSG_SIZE 32
diff --git a/include/hw/i386/apic_internal.h b/include/hw/i386/apic_internal.h
index d6e8583..429278d 100644
--- a/include/hw/i386/apic_internal.h
+++ b/include/hw/i386/apic_internal.h
@@ -22,7 +22,7 @@
#define QEMU_APIC_INTERNAL_H
#include "cpu.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qemu/timer.h"
#include "target/i386/cpu-qom.h"
#include "qom/object.h"
diff --git a/include/hw/i386/hostmem-epc.h b/include/hw/i386/hostmem-epc.h
index 846c726..3988dec 100644
--- a/include/hw/i386/hostmem-epc.h
+++ b/include/hw/i386/hostmem-epc.h
@@ -12,7 +12,7 @@
#ifndef QEMU_HOSTMEM_EPC_H
#define QEMU_HOSTMEM_EPC_H
-#include "sysemu/hostmem.h"
+#include "system/hostmem.h"
#define TYPE_MEMORY_BACKEND_EPC "memory-backend-epc"
diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h
index 1eb05c2..e95477e 100644
--- a/include/hw/i386/intel_iommu.h
+++ b/include/hw/i386/intel_iommu.h
@@ -45,8 +45,9 @@ OBJECT_DECLARE_SIMPLE_TYPE(IntelIOMMUState, INTEL_IOMMU_DEVICE)
#define DMAR_REG_SIZE 0x230
#define VTD_HOST_AW_39BIT 39
#define VTD_HOST_AW_48BIT 48
-#define VTD_HOST_ADDRESS_WIDTH VTD_HOST_AW_39BIT
+#define VTD_HOST_ADDRESS_WIDTH VTD_HOST_AW_48BIT
#define VTD_HAW_MASK(aw) ((1ULL << (aw)) - 1)
+#define VTD_MGAW_FROM_CAP(cap) ((cap >> 16) & 0x3fULL)
#define DMAR_REPORT_F_INTR (1)
@@ -152,9 +153,10 @@ struct VTDIOTLBEntry {
uint64_t gfn;
uint16_t domain_id;
uint32_t pasid;
- uint64_t slpte;
+ uint64_t pte;
uint64_t mask;
uint8_t access_flags;
+ uint8_t pgtt;
};
/* VT-d Source-ID Qualifier types */
@@ -262,6 +264,7 @@ struct IntelIOMMUState {
bool caching_mode; /* RO - is cap CM enabled? */
bool scalable_mode; /* RO - is Scalable Mode supported? */
+ bool flts; /* RO - is stage-1 translation supported? */
bool snoop_control; /* RO - is SNP filed supported? */
dma_addr_t root; /* Current root table pointer */
@@ -305,6 +308,10 @@ struct IntelIOMMUState {
bool dma_drain; /* Whether DMA r/w draining enabled */
bool dma_translation; /* Whether DMA translation supported */
bool pasid; /* Whether to support PASID */
+ bool fs1gp; /* First Stage 1-GByte Page Support */
+
+ /* Transient Mapping, Reserved(0) since VTD spec revision 3.2 */
+ bool stale_tm;
/*
* Protects IOMMU states in general. Currently it protects the
diff --git a/include/hw/i386/microvm.h b/include/hw/i386/microvm.h
index fad97a8..b9ac34a 100644
--- a/include/hw/i386/microvm.h
+++ b/include/hw/i386/microvm.h
@@ -78,6 +78,8 @@ struct MicrovmMachineClass {
X86MachineClass parent;
HotplugHandler *(*orig_hotplug_handler)(MachineState *machine,
DeviceState *dev);
+ void (*x86_load_linux)(X86MachineState *x86ms, FWCfgState *fw_cfg,
+ int acpi_data_size, bool pvh_enabled);
};
struct MicrovmMachineState {
diff --git a/include/hw/i386/nitro_enclave.h b/include/hw/i386/nitro_enclave.h
new file mode 100644
index 0000000..885163f
--- /dev/null
+++ b/include/hw/i386/nitro_enclave.h
@@ -0,0 +1,62 @@
+/*
+ * AWS nitro-enclave machine
+ *
+ * Copyright (c) 2024 Dorjoy Chowdhury <dorjoychy111@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef HW_I386_NITRO_ENCLAVE_H
+#define HW_I386_NITRO_ENCLAVE_H
+
+#include "crypto/hash.h"
+#include "hw/i386/microvm.h"
+#include "qom/object.h"
+#include "hw/virtio/virtio-nsm.h"
+
+/* Machine type options */
+#define NITRO_ENCLAVE_VSOCK_CHARDEV_ID "vsock"
+#define NITRO_ENCLAVE_ID "id"
+#define NITRO_ENCLAVE_PARENT_ROLE "parent-role"
+#define NITRO_ENCLAVE_PARENT_ID "parent-id"
+
+struct NitroEnclaveMachineClass {
+ MicrovmMachineClass parent;
+
+ void (*parent_init)(MachineState *state);
+ void (*parent_reset)(MachineState *machine, ResetType type);
+};
+
+struct NitroEnclaveMachineState {
+ MicrovmMachineState parent;
+
+ /* Machine type options */
+ char *vsock;
+ /* Enclave identifier */
+ char *id;
+ /* Parent instance IAM role ARN */
+ char *parent_role;
+ /* Parent instance identifier */
+ char *parent_id;
+
+ /* Machine state */
+ VirtIONSM *vnsm;
+
+ /* kernel + ramdisks + cmdline SHA384 hash */
+ uint8_t image_hash[QCRYPTO_HASH_DIGEST_LEN_SHA384];
+ /* kernel + boot ramdisk + cmdline SHA384 hash */
+ uint8_t bootstrap_hash[QCRYPTO_HASH_DIGEST_LEN_SHA384];
+ /* application ramdisk(s) SHA384 hash */
+ uint8_t app_hash[QCRYPTO_HASH_DIGEST_LEN_SHA384];
+ /* certificate fingerprint SHA384 hash */
+ uint8_t fingerprint_hash[QCRYPTO_HASH_DIGEST_LEN_SHA384];
+ bool signature_found;
+};
+
+#define TYPE_NITRO_ENCLAVE_MACHINE MACHINE_TYPE_NAME("nitro-enclave")
+OBJECT_DECLARE_TYPE(NitroEnclaveMachineState, NitroEnclaveMachineClass,
+ NITRO_ENCLAVE_MACHINE)
+
+#endif
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index 4e55d7e..79b72c5 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -107,7 +107,6 @@ struct PCMachineClass {
/* RAM / address space compat: */
bool gigabyte_align;
bool has_reserved_memory;
- bool broken_reserved_end;
bool enforce_amd_1tb_hole;
bool isa_bios_alias;
@@ -215,6 +214,15 @@ void pc_system_parse_ovmf_flash(uint8_t *flash_ptr, size_t flash_size);
/* sgx.c */
void pc_machine_init_sgx_epc(PCMachineState *pcms);
+extern GlobalProperty pc_compat_10_0[];
+extern const size_t pc_compat_10_0_len;
+
+extern GlobalProperty pc_compat_9_2[];
+extern const size_t pc_compat_9_2_len;
+
+extern GlobalProperty pc_compat_9_1[];
+extern const size_t pc_compat_9_1_len;
+
extern GlobalProperty pc_compat_9_0[];
extern const size_t pc_compat_9_0_len;
@@ -290,17 +298,9 @@ extern const size_t pc_compat_2_7_len;
extern GlobalProperty pc_compat_2_6[];
extern const size_t pc_compat_2_6_len;
-extern GlobalProperty pc_compat_2_5[];
-extern const size_t pc_compat_2_5_len;
-
-extern GlobalProperty pc_compat_2_4[];
-extern const size_t pc_compat_2_4_len;
-
-extern GlobalProperty pc_compat_2_3[];
-extern const size_t pc_compat_2_3_len;
-
#define DEFINE_PC_MACHINE(suffix, namestr, initfn, optsfn) \
- static void pc_machine_##suffix##_class_init(ObjectClass *oc, void *data) \
+ static void pc_machine_##suffix##_class_init(ObjectClass *oc, \
+ const void *data) \
{ \
MachineClass *mc = MACHINE_CLASS(oc); \
optsfn(mc); \
@@ -313,11 +313,11 @@ extern const size_t pc_compat_2_3_len;
}; \
static void pc_machine_init_##suffix(void) \
{ \
- type_register(&pc_machine_type_##suffix); \
+ type_register_static(&pc_machine_type_##suffix); \
} \
type_init(pc_machine_init_##suffix)
-#define DEFINE_PC_VER_MACHINE(namesym, namestr, initfn, ...) \
+#define DEFINE_PC_VER_MACHINE(namesym, namestr, initfn, isdefault, malias, ...) \
static void MACHINE_VER_SYM(init, namesym, __VA_ARGS__)( \
MachineState *machine) \
{ \
@@ -325,12 +325,14 @@ extern const size_t pc_compat_2_3_len;
} \
static void MACHINE_VER_SYM(class_init, namesym, __VA_ARGS__)( \
ObjectClass *oc, \
- void *data) \
+ const void *data) \
{ \
MachineClass *mc = MACHINE_CLASS(oc); \
MACHINE_VER_SYM(options, namesym, __VA_ARGS__)(mc); \
mc->init = MACHINE_VER_SYM(init, namesym, __VA_ARGS__); \
MACHINE_VER_DEPRECATION(__VA_ARGS__); \
+ mc->is_default = isdefault; \
+ mc->alias = malias; \
} \
static const TypeInfo MACHINE_VER_SYM(info, namesym, __VA_ARGS__) = \
{ \
@@ -341,7 +343,7 @@ extern const size_t pc_compat_2_3_len;
static void MACHINE_VER_SYM(register, namesym, __VA_ARGS__)(void) \
{ \
MACHINE_VER_DELETION(__VA_ARGS__); \
- type_register(&MACHINE_VER_SYM(info, namesym, __VA_ARGS__)); \
+ type_register_static(&MACHINE_VER_SYM(info, namesym, __VA_ARGS__)); \
} \
type_init(MACHINE_VER_SYM(register, namesym, __VA_ARGS__));
diff --git a/include/hw/i386/sgx-epc.h b/include/hw/i386/sgx-epc.h
index 3e00efd..41d55da 100644
--- a/include/hw/i386/sgx-epc.h
+++ b/include/hw/i386/sgx-epc.h
@@ -58,6 +58,7 @@ typedef struct SGXEPCState {
int nr_sections;
} SGXEPCState;
+bool check_sgx_support(void);
bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size);
void sgx_epc_build_srat(GArray *table_data);
diff --git a/include/hw/i386/tdvf.h b/include/hw/i386/tdvf.h
new file mode 100644
index 0000000..e75c8d1
--- /dev/null
+++ b/include/hw/i386/tdvf.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2025 Intel Corporation
+ * Author: Isaku Yamahata <isaku.yamahata at gmail.com>
+ * <isaku.yamahata at intel.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_I386_TDVF_H
+#define HW_I386_TDVF_H
+
+#include "qemu/osdep.h"
+
+#define TDVF_SECTION_TYPE_BFV 0
+#define TDVF_SECTION_TYPE_CFV 1
+#define TDVF_SECTION_TYPE_TD_HOB 2
+#define TDVF_SECTION_TYPE_TEMP_MEM 3
+
+#define TDVF_SECTION_ATTRIBUTES_MR_EXTEND (1U << 0)
+#define TDVF_SECTION_ATTRIBUTES_PAGE_AUG (1U << 1)
+
+typedef struct TdxFirmwareEntry {
+ uint32_t data_offset;
+ uint32_t data_len;
+ uint64_t address;
+ uint64_t size;
+ uint32_t type;
+ uint32_t attributes;
+
+ void *mem_ptr;
+} TdxFirmwareEntry;
+
+typedef struct TdxFirmware {
+ void *mem_ptr;
+
+ uint32_t nr_entries;
+ TdxFirmwareEntry *entries;
+} TdxFirmware;
+
+#define for_each_tdx_fw_entry(fw, e) \
+ for (e = (fw)->entries; e != (fw)->entries + (fw)->nr_entries; e++)
+
+int tdvf_parse_metadata(TdxFirmware *fw, void *flash_ptr, int size);
+
+#endif /* HW_I386_TDVF_H */
diff --git a/include/hw/i386/topology.h b/include/hw/i386/topology.h
index dff49fc..f6380f1 100644
--- a/include/hw/i386/topology.h
+++ b/include/hw/i386/topology.h
@@ -39,7 +39,7 @@
* CPUID Fn8000_0008_ECX[ApicIdCoreIdSize[3:0]] is set to apicid_core_width().
*/
-
+#include "qapi/qapi-types-machine-common.h"
#include "qemu/bitops.h"
/*
@@ -62,21 +62,7 @@ typedef struct X86CPUTopoInfo {
unsigned threads_per_core;
} X86CPUTopoInfo;
-/*
- * CPUTopoLevel is the general i386 topology hierarchical representation,
- * ordered by increasing hierarchical relationship.
- * Its enumeration value is not bound to the type value of Intel (CPUID[0x1F])
- * or AMD (CPUID[0x80000026]).
- */
-enum CPUTopoLevel {
- CPU_TOPO_LEVEL_INVALID,
- CPU_TOPO_LEVEL_SMT,
- CPU_TOPO_LEVEL_CORE,
- CPU_TOPO_LEVEL_MODULE,
- CPU_TOPO_LEVEL_DIE,
- CPU_TOPO_LEVEL_PACKAGE,
- CPU_TOPO_LEVEL_MAX,
-};
+#define CPU_TOPOLOGY_LEVEL_INVALID CPU_TOPOLOGY_LEVEL__MAX
/* Return the bit width needed for 'count' IDs */
static unsigned apicid_bitwidth_for_count(unsigned count)
@@ -135,9 +121,10 @@ static inline unsigned apicid_pkg_offset(X86CPUTopoInfo *topo_info)
}
/*
- * Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID
+ * Make APIC ID for the CPU based on topology and IDs of each topology level.
*
- * The caller must make sure core_id < nr_cores and smt_id < nr_threads.
+ * The caller must make sure the ID of each level doesn't exceed the width of
+ * the level.
*/
static inline apic_id_t x86_apicid_from_topo_ids(X86CPUTopoInfo *topo_info,
const X86CPUTopoIDs *topo_ids)
@@ -212,8 +199,33 @@ static inline apic_id_t x86_apicid_from_cpu_idx(X86CPUTopoInfo *topo_info,
*/
static inline bool x86_has_extended_topo(unsigned long *topo_bitmap)
{
- return test_bit(CPU_TOPO_LEVEL_MODULE, topo_bitmap) ||
- test_bit(CPU_TOPO_LEVEL_DIE, topo_bitmap);
+ return test_bit(CPU_TOPOLOGY_LEVEL_MODULE, topo_bitmap) ||
+ test_bit(CPU_TOPOLOGY_LEVEL_DIE, topo_bitmap);
+}
+
+static inline unsigned x86_module_per_pkg(X86CPUTopoInfo *topo_info)
+{
+ return topo_info->modules_per_die * topo_info->dies_per_pkg;
+}
+
+static inline unsigned x86_cores_per_pkg(X86CPUTopoInfo *topo_info)
+{
+ return topo_info->cores_per_module * x86_module_per_pkg(topo_info);
+}
+
+static inline unsigned x86_threads_per_pkg(X86CPUTopoInfo *topo_info)
+{
+ return topo_info->threads_per_core * x86_cores_per_pkg(topo_info);
+}
+
+static inline unsigned x86_threads_per_module(X86CPUTopoInfo *topo_info)
+{
+ return topo_info->threads_per_core * topo_info->cores_per_module;
+}
+
+static inline unsigned x86_threads_per_die(X86CPUTopoInfo *topo_info)
+{
+ return x86_threads_per_module(topo_info) * topo_info->modules_per_die;
}
#endif /* HW_I386_TOPOLOGY_H */
diff --git a/include/hw/i386/x86.h b/include/hw/i386/x86.h
index d43cb39..fc460b8 100644
--- a/include/hw/i386/x86.h
+++ b/include/hw/i386/x86.h
@@ -18,7 +18,7 @@
#define HW_I386_X86_H
#include "exec/hwaddr.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/boards.h"
#include "hw/i386/topology.h"
@@ -27,13 +27,8 @@
#include "qom/object.h"
struct X86MachineClass {
- /*< private >*/
MachineClass parent;
- /*< public >*/
-
- /* TSC rate migration: */
- bool save_tsc_khz;
/* use DMA capable linuxboot option rom */
bool fwcfg_dma_enabled;
/* CPU and apic information: */
diff --git a/include/hw/ide/ahci-pci.h b/include/hw/ide/ahci-pci.h
index c2ee616..face1a9 100644
--- a/include/hw/ide/ahci-pci.h
+++ b/include/hw/ide/ahci-pci.h
@@ -9,6 +9,7 @@
#include "qom/object.h"
#include "hw/ide/ahci.h"
#include "hw/pci/pci_device.h"
+#include "hw/irq.h"
#define TYPE_ICH9_AHCI "ich9-ahci"
OBJECT_DECLARE_SIMPLE_TYPE(AHCIPCIState, ICH9_AHCI)
@@ -17,6 +18,7 @@ struct AHCIPCIState {
PCIDevice parent_obj;
AHCIState ahci;
+ IRQState irq;
};
#endif
diff --git a/include/hw/ide/ahci.h b/include/hw/ide/ahci.h
index ba31e75..cd07b87 100644
--- a/include/hw/ide/ahci.h
+++ b/include/hw/ide/ahci.h
@@ -24,7 +24,7 @@
#ifndef HW_IDE_AHCI_H
#define HW_IDE_AHCI_H
-#include "exec/memory.h"
+#include "system/memory.h"
typedef struct AHCIDevice AHCIDevice;
@@ -37,8 +37,6 @@ typedef struct AHCIControlRegs {
} AHCIControlRegs;
typedef struct AHCIState {
- DeviceState *container;
-
AHCIDevice *dev;
AHCIControlRegs control_regs;
MemoryRegion mem;
diff --git a/include/hw/ide/ide-bus.h b/include/hw/ide/ide-bus.h
index 4841a7d..121b455 100644
--- a/include/hw/ide/ide-bus.h
+++ b/include/hw/ide/ide-bus.h
@@ -1,7 +1,7 @@
#ifndef HW_IDE_BUS_H
#define HW_IDE_BUS_H
-#include "exec/ioport.h"
+#include "system/ioport.h"
#include "hw/ide/ide-dev.h"
#include "hw/ide/ide-dma.h"
diff --git a/include/hw/ide/ide-dev.h b/include/hw/ide/ide-dev.h
index 9a0d71d..92e8868 100644
--- a/include/hw/ide/ide-dev.h
+++ b/include/hw/ide/ide-dev.h
@@ -20,7 +20,7 @@
#ifndef IDE_DEV_H
#define IDE_DEV_H
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/qdev-properties.h"
#include "hw/block/block.h"
diff --git a/include/hw/input/lm832x.h b/include/hw/input/lm832x.h
deleted file mode 100644
index e0e5d5e..0000000
--- a/include/hw/input/lm832x.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * National Semiconductor LM8322/8323 GPIO keyboard & PWM chips.
- *
- * Copyright (C) 2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef HW_INPUT_LM832X_H
-#define HW_INPUT_LM832X_H
-
-#define TYPE_LM8323 "lm8323"
-
-void lm832x_key_event(DeviceState *dev, int key, int state);
-
-#endif
diff --git a/include/hw/input/tsc2xxx.h b/include/hw/input/tsc2xxx.h
deleted file mode 100644
index 00eca17..0000000
--- a/include/hw/input/tsc2xxx.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * TI touchscreen controller
- *
- * Copyright (c) 2006 Andrzej Zaborowski
- * Copyright (C) 2008 Nokia Corporation
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef HW_INPUT_TSC2XXX_H
-#define HW_INPUT_TSC2XXX_H
-
-typedef struct MouseTransformInfo {
- /* Touchscreen resolution */
- int x;
- int y;
- /* Calibration values as used/generated by tslib */
- int a[7];
-} MouseTransformInfo;
-
-typedef struct uWireSlave {
- uint16_t (*receive)(void *opaque);
- void (*send)(void *opaque, uint16_t data);
- void *opaque;
-} uWireSlave;
-
-/* tsc210x.c */
-uWireSlave *tsc2102_init(qemu_irq pint);
-uWireSlave *tsc2301_init(qemu_irq penirq, qemu_irq kbirq, qemu_irq dav);
-I2SCodec *tsc210x_codec(uWireSlave *chip);
-uint32_t tsc210x_txrx(void *opaque, uint32_t value, int len);
-void tsc210x_set_transform(uWireSlave *chip, const MouseTransformInfo *info);
-void tsc210x_key_event(uWireSlave *chip, int key, int down);
-
-/* tsc2005.c */
-void *tsc2005_init(qemu_irq pintdav);
-uint32_t tsc2005_txrx(void *opaque, uint32_t value, int len);
-void tsc2005_set_transform(void *opaque, const MouseTransformInfo *info);
-
-#endif
diff --git a/include/hw/intc/arm_gic.h b/include/hw/intc/arm_gic.h
index 48f6a51..be923f7 100644
--- a/include/hw/intc/arm_gic.h
+++ b/include/hw/intc/arm_gic.h
@@ -27,6 +27,9 @@
* implement the security extensions
* + QOM property "has-virtualization-extensions": set true if the GIC should
* implement the virtualization extensions
+ * + QOM property "first-cpu-index": index of the first cpu attached to the
+ * GIC (default 0). The CPUs connected to the GIC are assumed to be
+ * first-cpu-index, first-cpu-index + 1, ... first-cpu-index + num-cpu - 1.
* + unnamed GPIO inputs: (where P is number of SPIs, i.e. num-irq - 32)
* [0..P-1] SPIs
* [P..P+31] PPIs for CPU 0
diff --git a/include/hw/intc/arm_gic_common.h b/include/hw/intc/arm_gic_common.h
index 97fea41..93a3cc2 100644
--- a/include/hw/intc/arm_gic_common.h
+++ b/include/hw/intc/arm_gic_common.h
@@ -129,6 +129,8 @@ struct GICState {
uint32_t num_lrs;
uint32_t num_cpu;
+ /* cpu_index of the first CPU, attached to this GIC. */
+ uint32_t first_cpu_index;
MemoryRegion iomem; /* Distributor */
/* This is just so we can have an opaque pointer which identifies
diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h
index cd09bee..a3d6a0e 100644
--- a/include/hw/intc/arm_gicv3_common.h
+++ b/include/hw/intc/arm_gicv3_common.h
@@ -51,13 +51,13 @@
/* Maximum number of list registers (architectural limit) */
#define GICV3_LR_MAX 16
-/* For some distributor fields we want to model the array of 32-bit
+/*
+ * For some distributor fields we want to model the array of 32-bit
* register values which hold various bitmaps corresponding to enabled,
- * pending, etc bits. These macros and functions facilitate that; the
- * APIs are generally modelled on the generic bitmap.h functions
- * (which are unsuitable here because they use 'unsigned long' as the
- * underlying storage type, which is very awkward when you need to
- * access the data as 32-bit values.)
+ * pending, etc bits. We use the set_bit32() etc family of functions
+ * from bitops.h for this. For a few cases we need to implement some
+ * extra operations.
+ *
* Each bitmap contains a bit for each interrupt. Although there is
* space for the PPIs and SGIs, those bits (the first 32) are never
* used as that state lives in the redistributor. The unused bits are
@@ -65,39 +65,13 @@
* avoids bugs where we forget to subtract GIC_INTERNAL from an
* interrupt number.
*/
-#define GICV3_BMP_SIZE DIV_ROUND_UP(GICV3_MAXIRQ, 32)
-
-#define GIC_DECLARE_BITMAP(name) \
- uint32_t name[GICV3_BMP_SIZE]
-
-#define GIC_BIT_MASK(nr) (1U << ((nr) % 32))
-#define GIC_BIT_WORD(nr) ((nr) / 32)
-
-static inline void gic_bmp_set_bit(int nr, uint32_t *addr)
-{
- uint32_t mask = GIC_BIT_MASK(nr);
- uint32_t *p = addr + GIC_BIT_WORD(nr);
-
- *p |= mask;
-}
-
-static inline void gic_bmp_clear_bit(int nr, uint32_t *addr)
-{
- uint32_t mask = GIC_BIT_MASK(nr);
- uint32_t *p = addr + GIC_BIT_WORD(nr);
-
- *p &= ~mask;
-}
-
-static inline int gic_bmp_test_bit(int nr, const uint32_t *addr)
-{
- return 1U & (addr[GIC_BIT_WORD(nr)] >> (nr & 31));
-}
+#define GIC_DECLARE_BITMAP(name) DECLARE_BITMAP32(name, GICV3_MAXIRQ)
+#define GICV3_BMP_SIZE BITS_TO_U32S(GICV3_MAXIRQ)
static inline void gic_bmp_replace_bit(int nr, uint32_t *addr, int val)
{
- uint32_t mask = GIC_BIT_MASK(nr);
- uint32_t *p = addr + GIC_BIT_WORD(nr);
+ uint32_t mask = BIT32_MASK(nr);
+ uint32_t *p = addr + BIT32_WORD(nr);
*p &= ~mask;
*p |= (val & 1U) << (nr % 32);
@@ -106,7 +80,7 @@ static inline void gic_bmp_replace_bit(int nr, uint32_t *addr, int val)
/* Return a pointer to the 32-bit word containing the specified bit. */
static inline uint32_t *gic_bmp_ptr32(uint32_t *addr, int nr)
{
- return addr + GIC_BIT_WORD(nr);
+ return addr + BIT32_WORD(nr);
}
typedef struct GICv3State GICv3State;
@@ -301,15 +275,15 @@ struct GICv3State {
#define GICV3_BITMAP_ACCESSORS(BMP) \
static inline void gicv3_gicd_##BMP##_set(GICv3State *s, int irq) \
{ \
- gic_bmp_set_bit(irq, s->BMP); \
+ set_bit32(irq, s->BMP); \
} \
static inline int gicv3_gicd_##BMP##_test(GICv3State *s, int irq) \
{ \
- return gic_bmp_test_bit(irq, s->BMP); \
+ return test_bit32(irq, s->BMP); \
} \
static inline void gicv3_gicd_##BMP##_clear(GICv3State *s, int irq) \
{ \
- gic_bmp_clear_bit(irq, s->BMP); \
+ clear_bit32(irq, s->BMP); \
} \
static inline void gicv3_gicd_##BMP##_replace(GICv3State *s, \
int irq, int value) \
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
index 89fe8ae..7b9964f 100644
--- a/include/hw/intc/armv7m_nvic.h
+++ b/include/hw/intc/armv7m_nvic.h
@@ -189,21 +189,7 @@ int armv7m_nvic_raw_execution_priority(NVICState *s);
* @secure: the security state to test
* This corresponds to the pseudocode IsReqExecPriNeg().
*/
-#ifndef CONFIG_USER_ONLY
bool armv7m_nvic_neg_prio_requested(NVICState *s, bool secure);
-#else
-static inline bool armv7m_nvic_neg_prio_requested(NVICState *s, bool secure)
-{
- return false;
-}
-#endif
-#ifndef CONFIG_USER_ONLY
bool armv7m_nvic_can_take_pending_exception(NVICState *s);
-#else
-static inline bool armv7m_nvic_can_take_pending_exception(NVICState *s)
-{
- return true;
-}
-#endif
#endif
diff --git a/include/hw/intc/aspeed_intc.h b/include/hw/intc/aspeed_intc.h
index 18cb434..5128838 100644
--- a/include/hw/intc/aspeed_intc.h
+++ b/include/hw/intc/aspeed_intc.h
@@ -14,10 +14,24 @@
#define TYPE_ASPEED_INTC "aspeed.intc"
#define TYPE_ASPEED_2700_INTC TYPE_ASPEED_INTC "-ast2700"
+#define TYPE_ASPEED_2700_INTCIO TYPE_ASPEED_INTC "io-ast2700"
+#define TYPE_ASPEED_2700SSP_INTC TYPE_ASPEED_INTC "-ast2700ssp"
+#define TYPE_ASPEED_2700SSP_INTCIO TYPE_ASPEED_INTC "io-ast2700ssp"
+#define TYPE_ASPEED_2700TSP_INTC TYPE_ASPEED_INTC "-ast2700tsp"
+#define TYPE_ASPEED_2700TSP_INTCIO TYPE_ASPEED_INTC "io-ast2700tsp"
+
OBJECT_DECLARE_TYPE(AspeedINTCState, AspeedINTCClass, ASPEED_INTC)
-#define ASPEED_INTC_NR_REGS (0x2000 >> 2)
-#define ASPEED_INTC_NR_INTS 9
+#define ASPEED_INTC_MAX_INPINS 10
+#define ASPEED_INTC_MAX_OUTPINS 19
+
+typedef struct AspeedINTCIRQ {
+ int inpin_idx;
+ int outpin_idx;
+ int num_outpins;
+ uint32_t enable_reg;
+ uint32_t status_reg;
+} AspeedINTCIRQ;
struct AspeedINTCState {
/*< private >*/
@@ -25,20 +39,29 @@ struct AspeedINTCState {
/*< public >*/
MemoryRegion iomem;
- uint32_t regs[ASPEED_INTC_NR_REGS];
- OrIRQState orgates[ASPEED_INTC_NR_INTS];
- qemu_irq output_pins[ASPEED_INTC_NR_INTS];
+ MemoryRegion iomem_container;
+
+ uint32_t *regs;
+ OrIRQState orgates[ASPEED_INTC_MAX_INPINS];
+ qemu_irq output_pins[ASPEED_INTC_MAX_OUTPINS];
- uint32_t enable[ASPEED_INTC_NR_INTS];
- uint32_t mask[ASPEED_INTC_NR_INTS];
- uint32_t pending[ASPEED_INTC_NR_INTS];
+ uint32_t enable[ASPEED_INTC_MAX_INPINS];
+ uint32_t mask[ASPEED_INTC_MAX_INPINS];
+ uint32_t pending[ASPEED_INTC_MAX_INPINS];
};
struct AspeedINTCClass {
SysBusDeviceClass parent_class;
uint32_t num_lines;
- uint32_t num_ints;
+ uint32_t num_inpins;
+ uint32_t num_outpins;
+ uint64_t mem_size;
+ uint64_t nr_regs;
+ uint64_t reg_offset;
+ const MemoryRegionOps *reg_ops;
+ const AspeedINTCIRQ *irq_table;
+ int irq_table_count;
};
#endif /* ASPEED_INTC_H */
diff --git a/include/hw/intc/loongarch_extioi.h b/include/hw/intc/loongarch_extioi.h
index 626a37d..9be1d73 100644
--- a/include/hw/intc/loongarch_extioi.h
+++ b/include/hw/intc/loongarch_extioi.h
@@ -5,85 +5,29 @@
* Copyright (C) 2021 Loongson Technology Corporation Limited
*/
-#include "hw/sysbus.h"
-#include "hw/loongarch/virt.h"
-
#ifndef LOONGARCH_EXTIOI_H
#define LOONGARCH_EXTIOI_H
-#define LS3A_INTC_IP 8
-#define EXTIOI_IRQS (256)
-#define EXTIOI_IRQS_BITMAP_SIZE (256 / 8)
-/* irq from EXTIOI is routed to no more than 4 cpus */
-#define EXTIOI_CPUS (4)
-/* map to ipnum per 32 irqs */
-#define EXTIOI_IRQS_IPMAP_SIZE (256 / 32)
-#define EXTIOI_IRQS_COREMAP_SIZE 256
-#define EXTIOI_IRQS_NODETYPE_COUNT 16
-#define EXTIOI_IRQS_GROUP_COUNT 8
-
-#define APIC_OFFSET 0x400
-#define APIC_BASE (0x1000ULL + APIC_OFFSET)
+#include "hw/intc/loongarch_extioi_common.h"
-#define EXTIOI_NODETYPE_START (0x4a0 - APIC_OFFSET)
-#define EXTIOI_NODETYPE_END (0x4c0 - APIC_OFFSET)
-#define EXTIOI_IPMAP_START (0x4c0 - APIC_OFFSET)
-#define EXTIOI_IPMAP_END (0x4c8 - APIC_OFFSET)
-#define EXTIOI_ENABLE_START (0x600 - APIC_OFFSET)
-#define EXTIOI_ENABLE_END (0x620 - APIC_OFFSET)
-#define EXTIOI_BOUNCE_START (0x680 - APIC_OFFSET)
-#define EXTIOI_BOUNCE_END (0x6a0 - APIC_OFFSET)
-#define EXTIOI_ISR_START (0x700 - APIC_OFFSET)
-#define EXTIOI_ISR_END (0x720 - APIC_OFFSET)
-#define EXTIOI_COREISR_START (0x800 - APIC_OFFSET)
-#define EXTIOI_COREISR_END (0xB20 - APIC_OFFSET)
-#define EXTIOI_COREMAP_START (0xC00 - APIC_OFFSET)
-#define EXTIOI_COREMAP_END (0xD00 - APIC_OFFSET)
-#define EXTIOI_SIZE 0x800
+#define TYPE_LOONGARCH_EXTIOI "loongarch.extioi"
+OBJECT_DECLARE_TYPE(LoongArchExtIOIState, LoongArchExtIOIClass, LOONGARCH_EXTIOI)
-#define EXTIOI_VIRT_BASE (0x40000000)
-#define EXTIOI_VIRT_SIZE (0x1000)
-#define EXTIOI_VIRT_FEATURES (0x0)
-#define EXTIOI_HAS_VIRT_EXTENSION (0)
-#define EXTIOI_HAS_ENABLE_OPTION (1)
-#define EXTIOI_HAS_INT_ENCODE (2)
-#define EXTIOI_HAS_CPU_ENCODE (3)
-#define EXTIOI_VIRT_HAS_FEATURES (BIT(EXTIOI_HAS_VIRT_EXTENSION) \
- | BIT(EXTIOI_HAS_ENABLE_OPTION) \
- | BIT(EXTIOI_HAS_CPU_ENCODE))
-#define EXTIOI_VIRT_CONFIG (0x4)
-#define EXTIOI_ENABLE (1)
-#define EXTIOI_ENABLE_INT_ENCODE (2)
-#define EXTIOI_ENABLE_CPU_ENCODE (3)
-#define EXTIOI_VIRT_COREMAP_START (0x40)
-#define EXTIOI_VIRT_COREMAP_END (0x240)
+struct LoongArchExtIOIState {
+ LoongArchExtIOICommonState parent_obj;
+ int dev_fd;
+};
-typedef struct ExtIOICore {
- uint32_t coreisr[EXTIOI_IRQS_GROUP_COUNT];
- DECLARE_BITMAP(sw_isr[LS3A_INTC_IP], EXTIOI_IRQS);
- qemu_irq parent_irq[LS3A_INTC_IP];
-} ExtIOICore;
+struct LoongArchExtIOIClass {
+ LoongArchExtIOICommonClass parent_class;
-#define TYPE_LOONGARCH_EXTIOI "loongarch.extioi"
-OBJECT_DECLARE_SIMPLE_TYPE(LoongArchExtIOI, LOONGARCH_EXTIOI)
-struct LoongArchExtIOI {
- SysBusDevice parent_obj;
- uint32_t num_cpu;
- uint32_t features;
- uint32_t status;
- /* hardware state */
- uint32_t nodetype[EXTIOI_IRQS_NODETYPE_COUNT / 2];
- uint32_t bounce[EXTIOI_IRQS_GROUP_COUNT];
- uint32_t isr[EXTIOI_IRQS / 32];
- uint32_t enable[EXTIOI_IRQS / 32];
- uint32_t ipmap[EXTIOI_IRQS_IPMAP_SIZE / 4];
- uint32_t coremap[EXTIOI_IRQS / 4];
- uint32_t sw_pending[EXTIOI_IRQS / 32];
- uint8_t sw_ipmap[EXTIOI_IRQS_IPMAP_SIZE];
- uint8_t sw_coremap[EXTIOI_IRQS];
- qemu_irq irq[EXTIOI_IRQS];
- ExtIOICore *cpu;
- MemoryRegion extioi_system_mem;
- MemoryRegion virt_extend;
+ DeviceRealize parent_realize;
+ DeviceUnrealize parent_unrealize;
+ ResettablePhases parent_phases;
};
+
+void kvm_extioi_realize(DeviceState *dev, Error **errp);
+int kvm_extioi_get(void *opaque);
+int kvm_extioi_put(void *opaque, int version_id);
+
#endif /* LOONGARCH_EXTIOI_H */
diff --git a/include/hw/intc/loongarch_extioi_common.h b/include/hw/intc/loongarch_extioi_common.h
new file mode 100644
index 0000000..dca25ff
--- /dev/null
+++ b/include/hw/intc/loongarch_extioi_common.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch 3A5000 ext interrupt controller definitions
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef LOONGARCH_EXTIOI_COMMON_H
+#define LOONGARCH_EXTIOI_COMMON_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "hw/loongarch/virt.h"
+
+#define LS3A_INTC_IP 8
+#define EXTIOI_IRQS (256)
+#define EXTIOI_IRQS_BITMAP_SIZE (256 / 8)
+/* irq from EXTIOI is routed to no more than 4 cpus */
+#define EXTIOI_CPUS (4)
+/* map to ipnum per 32 irqs */
+#define EXTIOI_IRQS_IPMAP_SIZE (256 / 32)
+#define EXTIOI_IRQS_COREMAP_SIZE 256
+#define EXTIOI_IRQS_NODETYPE_COUNT 16
+#define EXTIOI_IRQS_GROUP_COUNT 8
+
+#define APIC_OFFSET 0x400
+#define APIC_BASE (0x1000ULL + APIC_OFFSET)
+#define EXTIOI_NODETYPE_START (0x4a0 - APIC_OFFSET)
+#define EXTIOI_NODETYPE_END (0x4c0 - APIC_OFFSET)
+#define EXTIOI_IPMAP_START (0x4c0 - APIC_OFFSET)
+#define EXTIOI_IPMAP_END (0x4c8 - APIC_OFFSET)
+#define EXTIOI_ENABLE_START (0x600 - APIC_OFFSET)
+#define EXTIOI_ENABLE_END (0x620 - APIC_OFFSET)
+#define EXTIOI_BOUNCE_START (0x680 - APIC_OFFSET)
+#define EXTIOI_BOUNCE_END (0x6a0 - APIC_OFFSET)
+#define EXTIOI_ISR_START (0x700 - APIC_OFFSET)
+#define EXTIOI_ISR_END (0x720 - APIC_OFFSET)
+#define EXTIOI_COREISR_START (0x800 - APIC_OFFSET)
+#define EXTIOI_COREISR_END (0x820 - APIC_OFFSET)
+#define EXTIOI_COREMAP_START (0xC00 - APIC_OFFSET)
+#define EXTIOI_COREMAP_END (0xD00 - APIC_OFFSET)
+#define EXTIOI_SIZE 0x800
+
+#define EXTIOI_VIRT_BASE (0x40000000)
+#define EXTIOI_VIRT_SIZE (0x1000)
+#define EXTIOI_VIRT_FEATURES (0x0)
+#define EXTIOI_HAS_VIRT_EXTENSION (0)
+#define EXTIOI_HAS_ENABLE_OPTION (1)
+#define EXTIOI_HAS_INT_ENCODE (2)
+#define EXTIOI_HAS_CPU_ENCODE (3)
+#define EXTIOI_VIRT_HAS_FEATURES (BIT(EXTIOI_HAS_VIRT_EXTENSION) \
+ | BIT(EXTIOI_HAS_ENABLE_OPTION) \
+ | BIT(EXTIOI_HAS_CPU_ENCODE))
+#define EXTIOI_VIRT_CONFIG (0x4)
+#define EXTIOI_ENABLE (1)
+#define EXTIOI_ENABLE_INT_ENCODE (2)
+#define EXTIOI_ENABLE_CPU_ENCODE (3)
+#define EXTIOI_VIRT_COREMAP_START (0x40)
+#define EXTIOI_VIRT_COREMAP_END (0x240)
+
+#define TYPE_LOONGARCH_EXTIOI_COMMON "loongarch_extioi_common"
+OBJECT_DECLARE_TYPE(LoongArchExtIOICommonState,
+ LoongArchExtIOICommonClass, LOONGARCH_EXTIOI_COMMON)
+
+typedef struct ExtIOICore {
+ uint32_t coreisr[EXTIOI_IRQS_GROUP_COUNT];
+ DECLARE_BITMAP(sw_isr[LS3A_INTC_IP], EXTIOI_IRQS);
+ qemu_irq parent_irq[LS3A_INTC_IP];
+ uint64_t arch_id;
+ CPUState *cpu;
+} ExtIOICore;
+
+struct LoongArchExtIOICommonState {
+ SysBusDevice parent_obj;
+ uint32_t num_cpu;
+ uint32_t features;
+ uint32_t status;
+ /* hardware state */
+ uint32_t nodetype[EXTIOI_IRQS_NODETYPE_COUNT / 2];
+ uint32_t bounce[EXTIOI_IRQS_GROUP_COUNT];
+ uint32_t isr[EXTIOI_IRQS / 32];
+ uint32_t enable[EXTIOI_IRQS / 32];
+ uint32_t ipmap[EXTIOI_IRQS_IPMAP_SIZE / 4];
+ uint32_t coremap[EXTIOI_IRQS / 4];
+ uint32_t sw_pending[EXTIOI_IRQS / 32];
+ uint8_t sw_ipmap[EXTIOI_IRQS_IPMAP_SIZE];
+ uint8_t sw_coremap[EXTIOI_IRQS];
+ qemu_irq irq[EXTIOI_IRQS];
+ ExtIOICore *cpu;
+ MemoryRegion extioi_system_mem;
+ MemoryRegion virt_extend;
+};
+
+struct LoongArchExtIOICommonClass {
+ SysBusDeviceClass parent_class;
+
+ DeviceRealize parent_realize;
+ ResettablePhases parent_phases;
+ int (*pre_save)(void *s);
+ int (*post_load)(void *s, int version_id);
+};
+#endif /* LOONGARCH_EXTIOI_H */
diff --git a/include/hw/intc/loongarch_ipi.h b/include/hw/intc/loongarch_ipi.h
new file mode 100644
index 0000000..5175a6b
--- /dev/null
+++ b/include/hw/intc/loongarch_ipi.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch IPI interrupt header files
+ *
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef HW_LOONGARCH_IPI_H
+#define HW_LOONGARCH_IPI_H
+
+#include "qom/object.h"
+#include "hw/intc/loongson_ipi_common.h"
+
+#define TYPE_LOONGARCH_IPI "loongarch_ipi"
+OBJECT_DECLARE_TYPE(LoongarchIPIState, LoongarchIPIClass, LOONGARCH_IPI)
+
+struct LoongarchIPIState {
+ LoongsonIPICommonState parent_obj;
+ int dev_fd;
+};
+
+struct LoongarchIPIClass {
+ LoongsonIPICommonClass parent_class;
+ DeviceRealize parent_realize;
+ ResettablePhases parent_phases;
+};
+
+void kvm_ipi_realize(DeviceState *dev, Error **errp);
+int kvm_ipi_get(void *opaque);
+int kvm_ipi_put(void *opaque, int version_id);
+
+#endif
diff --git a/include/hw/intc/loongarch_pch_pic.h b/include/hw/intc/loongarch_pch_pic.h
index d5437e8..a46b6f8 100644
--- a/include/hw/intc/loongarch_pch_pic.h
+++ b/include/hw/intc/loongarch_pch_pic.h
@@ -5,65 +5,29 @@
* Copyright (c) 2021 Loongson Technology Corporation Limited
*/
-#include "hw/sysbus.h"
+#ifndef HW_LOONGARCH_PCH_PIC_H
+#define HW_LOONGARCH_PCH_PIC_H
-#define TYPE_LOONGARCH_PCH_PIC "loongarch_pch_pic"
-#define PCH_PIC_NAME(name) TYPE_LOONGARCH_PCH_PIC#name
-OBJECT_DECLARE_SIMPLE_TYPE(LoongArchPCHPIC, LOONGARCH_PCH_PIC)
+#include "hw/intc/loongarch_pic_common.h"
-#define PCH_PIC_INT_ID_VAL 0x7000000UL
-#define PCH_PIC_INT_ID_VER 0x1UL
+#define TYPE_LOONGARCH_PIC "loongarch_pic"
+#define PCH_PIC_NAME(name) TYPE_LOONGARCH_PIC#name
+OBJECT_DECLARE_TYPE(LoongarchPICState, LoongarchPICClass, LOONGARCH_PIC)
-#define PCH_PIC_INT_ID_LO 0x00
-#define PCH_PIC_INT_ID_HI 0x04
-#define PCH_PIC_INT_MASK_LO 0x20
-#define PCH_PIC_INT_MASK_HI 0x24
-#define PCH_PIC_HTMSI_EN_LO 0x40
-#define PCH_PIC_HTMSI_EN_HI 0x44
-#define PCH_PIC_INT_EDGE_LO 0x60
-#define PCH_PIC_INT_EDGE_HI 0x64
-#define PCH_PIC_INT_CLEAR_LO 0x80
-#define PCH_PIC_INT_CLEAR_HI 0x84
-#define PCH_PIC_AUTO_CTRL0_LO 0xc0
-#define PCH_PIC_AUTO_CTRL0_HI 0xc4
-#define PCH_PIC_AUTO_CTRL1_LO 0xe0
-#define PCH_PIC_AUTO_CTRL1_HI 0xe4
-#define PCH_PIC_ROUTE_ENTRY_OFFSET 0x100
-#define PCH_PIC_ROUTE_ENTRY_END 0x13f
-#define PCH_PIC_HTMSI_VEC_OFFSET 0x200
-#define PCH_PIC_HTMSI_VEC_END 0x23f
-#define PCH_PIC_INT_STATUS_LO 0x3a0
-#define PCH_PIC_INT_STATUS_HI 0x3a4
-#define PCH_PIC_INT_POL_LO 0x3e0
-#define PCH_PIC_INT_POL_HI 0x3e4
-
-#define STATUS_LO_START 0
-#define STATUS_HI_START 0x4
-#define POL_LO_START 0x40
-#define POL_HI_START 0x44
-struct LoongArchPCHPIC {
- SysBusDevice parent_obj;
- qemu_irq parent_irq[64];
- uint64_t int_mask; /*0x020 interrupt mask register*/
- uint64_t htmsi_en; /*0x040 1=msi*/
- uint64_t intedge; /*0x060 edge=1 level =0*/
- uint64_t intclr; /*0x080 for clean edge int,set 1 clean,set 0 is noused*/
- uint64_t auto_crtl0; /*0x0c0*/
- uint64_t auto_crtl1; /*0x0e0*/
- uint64_t last_intirr; /* edge detection */
- uint64_t intirr; /* 0x380 interrupt request register */
- uint64_t intisr; /* 0x3a0 interrupt service register */
- /*
- * 0x3e0 interrupt level polarity selection
- * register 0 for high level trigger
- */
- uint64_t int_polarity;
+struct LoongarchPICState {
+ LoongArchPICCommonState parent_obj;
+ int dev_fd;
+};
- uint8_t route_entry[64]; /*0x100 - 0x138*/
- uint8_t htmsi_vector[64]; /*0x200 - 0x238*/
+struct LoongarchPICClass {
+ LoongArchPICCommonClass parent_class;
- MemoryRegion iomem32_low;
- MemoryRegion iomem32_high;
- MemoryRegion iomem8;
- unsigned int irq_num;
+ DeviceRealize parent_realize;
+ ResettablePhases parent_phases;
};
+
+void kvm_pic_realize(DeviceState *dev, Error **errp);
+int kvm_pic_get(void *opaque);
+int kvm_pic_put(void *opaque, int version_id);
+
+#endif /* HW_LOONGARCH_PCH_PIC_H */
diff --git a/include/hw/intc/loongarch_pic_common.h b/include/hw/intc/loongarch_pic_common.h
new file mode 100644
index 0000000..f774c97
--- /dev/null
+++ b/include/hw/intc/loongarch_pic_common.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch 7A1000 I/O interrupt controller definitions
+ * Copyright (c) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef HW_LOONGARCH_PIC_COMMON_H
+#define HW_LOONGARCH_PIC_COMMON_H
+
+#include "hw/pci-host/ls7a.h"
+#include "hw/sysbus.h"
+
+#define PCH_PIC_INT_ID 0x00
+#define PCH_PIC_INT_ID_VAL 0x7
+#define PCH_PIC_INT_ID_VER 0x1
+#define PCH_PIC_INT_MASK 0x20
+#define PCH_PIC_HTMSI_EN 0x40
+#define PCH_PIC_INT_EDGE 0x60
+#define PCH_PIC_INT_CLEAR 0x80
+#define PCH_PIC_AUTO_CTRL0 0xc0
+#define PCH_PIC_AUTO_CTRL1 0xe0
+#define PCH_PIC_ROUTE_ENTRY 0x100
+#define PCH_PIC_ROUTE_ENTRY_END 0x13f
+#define PCH_PIC_HTMSI_VEC 0x200
+#define PCH_PIC_HTMSI_VEC_END 0x23f
+#define PCH_PIC_INT_REQUEST 0x380
+#define PCH_PIC_INT_STATUS 0x3a0
+#define PCH_PIC_INT_POL 0x3e0
+
+#define TYPE_LOONGARCH_PIC_COMMON "loongarch_pic_common"
+OBJECT_DECLARE_TYPE(LoongArchPICCommonState,
+ LoongArchPICCommonClass, LOONGARCH_PIC_COMMON)
+
+union LoongArchPIC_ID {
+ struct {
+ uint8_t _reserved_0[3];
+ uint8_t id;
+ uint8_t version;
+ uint8_t _reserved_1;
+ uint8_t irq_num;
+ uint8_t _reserved_2;
+ } QEMU_PACKED desc;
+ uint64_t data;
+};
+
+struct LoongArchPICCommonState {
+ SysBusDevice parent_obj;
+
+ qemu_irq parent_irq[64];
+ union LoongArchPIC_ID id; /* 0x00 interrupt ID register */
+ uint64_t int_mask; /* 0x020 interrupt mask register */
+ uint64_t htmsi_en; /* 0x040 1=msi */
+ uint64_t intedge; /* 0x060 edge=1 level=0 */
+ uint64_t intclr; /* 0x080 clean edge int, set 1 clean, 0 noused */
+ uint64_t auto_crtl0; /* 0x0c0 */
+ uint64_t auto_crtl1; /* 0x0e0 */
+ uint64_t last_intirr; /* edge detection */
+ uint64_t intirr; /* 0x380 interrupt request register */
+ uint64_t intisr; /* 0x3a0 interrupt service register */
+ /*
+ * 0x3e0 interrupt level polarity selection
+ * register 0 for high level trigger
+ */
+ uint64_t int_polarity;
+
+ uint8_t route_entry[64]; /* 0x100 - 0x138 */
+ uint8_t htmsi_vector[64]; /* 0x200 - 0x238 */
+
+ MemoryRegion iomem;
+ unsigned int irq_num;
+};
+
+struct LoongArchPICCommonClass {
+ SysBusDeviceClass parent_class;
+
+ DeviceRealize parent_realize;
+ ResettablePhases parent_phases;
+ int (*pre_save)(LoongArchPICCommonState *s);
+ int (*post_load)(LoongArchPICCommonState *s, int version_id);
+};
+#endif /* HW_LOONGARCH_PIC_COMMON_H */
diff --git a/include/hw/intc/loongson_ipi.h b/include/hw/intc/loongson_ipi.h
index 3f795ed..4e517cc 100644
--- a/include/hw/intc/loongson_ipi.h
+++ b/include/hw/intc/loongson_ipi.h
@@ -8,49 +8,24 @@
#ifndef HW_LOONGSON_IPI_H
#define HW_LOONGSON_IPI_H
+#include "qom/object.h"
+#include "hw/intc/loongson_ipi_common.h"
#include "hw/sysbus.h"
-/* Mainy used by iocsr read and write */
-#define SMP_IPI_MAILBOX 0x1000ULL
-#define CORE_STATUS_OFF 0x0
-#define CORE_EN_OFF 0x4
-#define CORE_SET_OFF 0x8
-#define CORE_CLEAR_OFF 0xc
-#define CORE_BUF_20 0x20
-#define CORE_BUF_28 0x28
-#define CORE_BUF_30 0x30
-#define CORE_BUF_38 0x38
-#define IOCSR_IPI_SEND 0x40
-#define IOCSR_MAIL_SEND 0x48
-#define IOCSR_ANY_SEND 0x158
-
-#define MAIL_SEND_ADDR (SMP_IPI_MAILBOX + IOCSR_MAIL_SEND)
-#define MAIL_SEND_OFFSET 0
-#define ANY_SEND_OFFSET (IOCSR_ANY_SEND - IOCSR_MAIL_SEND)
-
-#define IPI_MBX_NUM 4
-
#define TYPE_LOONGSON_IPI "loongson_ipi"
-OBJECT_DECLARE_SIMPLE_TYPE(LoongsonIPI, LOONGSON_IPI)
+OBJECT_DECLARE_TYPE(LoongsonIPIState, LoongsonIPIClass, LOONGSON_IPI)
+
+struct LoongsonIPIClass {
+ LoongsonIPICommonClass parent_class;
+
+ DeviceRealize parent_realize;
+ DeviceUnrealize parent_unrealize;
+};
+
+struct LoongsonIPIState {
+ LoongsonIPICommonState parent_obj;
-typedef struct IPICore {
- LoongsonIPI *ipi;
MemoryRegion *ipi_mmio_mem;
- uint32_t status;
- uint32_t en;
- uint32_t set;
- uint32_t clear;
- /* 64bit buf divide into 2 32bit buf */
- uint32_t buf[IPI_MBX_NUM * 2];
- qemu_irq irq;
-} IPICore;
-
-struct LoongsonIPI {
- SysBusDevice parent_obj;
- MemoryRegion ipi_iocsr_mem;
- MemoryRegion ipi64_iocsr_mem;
- uint32_t num_cpu;
- IPICore *cpu;
};
#endif
diff --git a/include/hw/intc/loongson_ipi_common.h b/include/hw/intc/loongson_ipi_common.h
new file mode 100644
index 0000000..e58ce2a
--- /dev/null
+++ b/include/hw/intc/loongson_ipi_common.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Loongson ipi interrupt header files
+ *
+ * Copyright (C) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef HW_LOONGSON_IPI_COMMON_H
+#define HW_LOONGSON_IPI_COMMON_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "exec/memattrs.h"
+
+#define IPI_MBX_NUM 4
+
+#define TYPE_LOONGSON_IPI_COMMON "loongson_ipi_common"
+OBJECT_DECLARE_TYPE(LoongsonIPICommonState,
+ LoongsonIPICommonClass, LOONGSON_IPI_COMMON)
+
+typedef struct IPICore {
+ LoongsonIPICommonState *ipi;
+ uint32_t status;
+ uint32_t en;
+ uint32_t set;
+ uint32_t clear;
+ /* 64bit buf divide into 2 32-bit buf */
+ uint32_t buf[IPI_MBX_NUM * 2];
+ qemu_irq irq;
+ uint64_t arch_id;
+ CPUState *cpu;
+} IPICore;
+
+struct LoongsonIPICommonState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion ipi_iocsr_mem;
+ MemoryRegion ipi64_iocsr_mem;
+ uint32_t num_cpu;
+ IPICore *cpu;
+};
+
+struct LoongsonIPICommonClass {
+ SysBusDeviceClass parent_class;
+
+ DeviceRealize parent_realize;
+ DeviceUnrealize parent_unrealize;
+ AddressSpace *(*get_iocsr_as)(CPUState *cpu);
+ int (*cpu_by_arch_id)(LoongsonIPICommonState *lics, int64_t id,
+ int *index, CPUState **pcs);
+ int (*pre_save)(void *opaque);
+ int (*post_load)(void *opaque, int version_id);
+};
+
+MemTxResult loongson_ipi_core_readl(void *opaque, hwaddr addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs);
+MemTxResult loongson_ipi_core_writel(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size, MemTxAttrs attrs);
+
+/* Mainy used by iocsr read and write */
+#define SMP_IPI_MAILBOX 0x1000ULL
+
+#define CORE_STATUS_OFF 0x0
+#define CORE_EN_OFF 0x4
+#define CORE_SET_OFF 0x8
+#define CORE_CLEAR_OFF 0xc
+#define CORE_BUF_20 0x20
+#define CORE_BUF_28 0x28
+#define CORE_BUF_30 0x30
+#define CORE_BUF_38 0x38
+#define IOCSR_IPI_SEND 0x40
+#define IOCSR_MAIL_SEND 0x48
+#define IOCSR_ANY_SEND 0x158
+
+#define MAIL_SEND_ADDR (SMP_IPI_MAILBOX + IOCSR_MAIL_SEND)
+#define MAIL_SEND_OFFSET 0
+#define ANY_SEND_OFFSET (IOCSR_ANY_SEND - IOCSR_MAIL_SEND)
+
+#endif
diff --git a/include/hw/intc/riscv_aplic.h b/include/hw/intc/riscv_aplic.h
index de8532f..489b913 100644
--- a/include/hw/intc/riscv_aplic.h
+++ b/include/hw/intc/riscv_aplic.h
@@ -68,9 +68,17 @@ struct RISCVAPLICState {
uint32_t num_irqs;
bool msimode;
bool mmode;
+
+ /* To support KVM aia=aplic-imsic with irqchip split mode */
+ bool kvm_splitmode;
+ uint32_t kvm_msicfgaddr;
+ uint32_t kvm_msicfgaddrH;
};
void riscv_aplic_add_child(DeviceState *parent, DeviceState *child);
+bool riscv_is_kvm_aia_aplic_imsic(bool msimode);
+bool riscv_use_emulated_aplic(bool msimode);
+void riscv_aplic_set_kvm_msicfgaddr(RISCVAPLICState *aplic, hwaddr addr);
DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size,
uint32_t hartid_base, uint32_t num_harts, uint32_t num_sources,
diff --git a/include/hw/ipack/ipack.h b/include/hw/ipack/ipack.h
index cbcdda5..00f397f 100644
--- a/include/hw/ipack/ipack.h
+++ b/include/hw/ipack/ipack.h
@@ -12,6 +12,7 @@
#define QEMU_IPACK_H
#include "hw/qdev-core.h"
+#include "hw/irq.h"
#include "qom/object.h"
@@ -19,10 +20,8 @@
OBJECT_DECLARE_SIMPLE_TYPE(IPackBus, IPACK_BUS)
struct IPackBus {
- /*< private >*/
BusState parent_obj;
- /* All fields are private */
uint8_t n_slots;
uint8_t free_slot;
qemu_irq_handler set_irq;
@@ -58,13 +57,11 @@ struct IPackDeviceClass {
};
struct IPackDevice {
- /*< private >*/
DeviceState parent_obj;
- /*< public >*/
int32_t slot;
/* IRQ objects for the IndustryPack INT0# and INT1# */
- qemu_irq *irq;
+ IRQState irq[2];
};
extern const VMStateDescription vmstate_ipack_device;
diff --git a/include/hw/ipmi/ipmi.h b/include/hw/ipmi/ipmi.h
index 77a7213..cd581aa 100644
--- a/include/hw/ipmi/ipmi.h
+++ b/include/hw/ipmi/ipmi.h
@@ -25,7 +25,7 @@
#ifndef HW_IPMI_H
#define HW_IPMI_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/qdev-core.h"
#include "qom/object.h"
@@ -41,6 +41,15 @@ enum ipmi_op {
IPMI_SEND_NMI
};
+/* Channel properties */
+#define IPMI_CHANNEL_IPMB 0x00
+#define IPMI_CHANNEL_SYSTEM 0x0f
+#define IPMI_CHANNEL_MEDIUM_IPMB 0x01
+#define IPMI_CHANNEL_MEDIUM_SYSTEM 0x0c
+#define IPMI_CHANNEL_PROTOCOL_IPMB 0x01
+#define IPMI_CHANNEL_PROTOCOL_KCS 0x05
+#define IPMI_CHANNEL_PROTOCOL_BT_15 0x08
+
#define IPMI_CC_INVALID_CMD 0xc1
#define IPMI_CC_COMMAND_INVALID_FOR_LUN 0xc2
#define IPMI_CC_TIMEOUT 0xc3
@@ -76,6 +85,7 @@ typedef struct IPMIFwInfo {
int interface_type;
uint8_t ipmi_spec_major_revision;
uint8_t ipmi_spec_minor_revision;
+ uint8_t ipmi_channel_protocol;
uint8_t i2c_slave_address;
uint32_t uuid;
@@ -91,6 +101,11 @@ typedef struct IPMIFwInfo {
int interrupt_number;
enum {
+ IPMI_NO_IRQ = 0,
+ IPMI_ISA_IRQ,
+ IPMI_PCI_IRQ,
+ } irq_source;
+ enum {
IPMI_LEVEL_IRQ,
IPMI_EDGE_IRQ
} irq_type;
diff --git a/include/hw/irq.h b/include/hw/irq.h
index 645b73d..b301223 100644
--- a/include/hw/irq.h
+++ b/include/hw/irq.h
@@ -1,9 +1,20 @@
#ifndef QEMU_IRQ_H
#define QEMU_IRQ_H
+#include "qom/object.h"
+
/* Generic IRQ/GPIO pin infrastructure. */
#define TYPE_IRQ "irq"
+OBJECT_DECLARE_SIMPLE_TYPE(IRQState, IRQ)
+
+struct IRQState {
+ Object parent_obj;
+
+ qemu_irq_handler handler;
+ void *opaque;
+ int n;
+};
void qemu_set_irq(qemu_irq irq, int level);
@@ -23,6 +34,24 @@ static inline void qemu_irq_pulse(qemu_irq irq)
qemu_set_irq(irq, 0);
}
+/*
+ * Init a single IRQ. The irq is assigned with a handler, an opaque data
+ * and the interrupt number.
+ */
+void qemu_init_irq(IRQState *irq, qemu_irq_handler handler, void *opaque,
+ int n);
+
+/**
+ * qemu_init_irqs: Initialize an array of IRQs.
+ *
+ * @irq: Array of IRQs to initialize
+ * @count: number of IRQs to initialize
+ * @handler: handler to assign to each IRQ
+ * @opaque: opaque data to pass to @handler
+ */
+void qemu_init_irqs(IRQState irq[], size_t count,
+ qemu_irq_handler handler, void *opaque);
+
/* Returns an array of N IRQs. Each IRQ is assigned the argument handler and
* opaque data.
*/
diff --git a/include/hw/isa/apm.h b/include/hw/isa/apm.h
index b6e070c..0834539 100644
--- a/include/hw/isa/apm.h
+++ b/include/hw/isa/apm.h
@@ -1,7 +1,7 @@
#ifndef APM_H
#define APM_H
-#include "exec/memory.h"
+#include "system/memory.h"
#define APM_CNT_IOPORT 0xb2
#define ACPI_PORT_SMI_CMD APM_CNT_IOPORT
diff --git a/include/hw/isa/isa.h b/include/hw/isa/isa.h
index 40d6224..a82c5f1 100644
--- a/include/hw/isa/isa.h
+++ b/include/hw/isa/isa.h
@@ -3,8 +3,8 @@
/* ISA bus */
-#include "exec/memory.h"
-#include "exec/ioport.h"
+#include "system/memory.h"
+#include "system/ioport.h"
#include "hw/qdev-core.h"
#include "qom/object.h"
diff --git a/include/hw/isa/superio.h b/include/hw/isa/superio.h
index 0dc4510..14d0513 100644
--- a/include/hw/isa/superio.h
+++ b/include/hw/isa/superio.h
@@ -10,7 +10,7 @@
#ifndef HW_ISA_SUPERIO_H
#define HW_ISA_SUPERIO_H
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/isa/isa.h"
#include "qom/object.h"
diff --git a/include/hw/loader-fit.h b/include/hw/loader-fit.h
index 0832e37..9a43490 100644
--- a/include/hw/loader-fit.h
+++ b/include/hw/loader-fit.h
@@ -30,12 +30,27 @@ struct fit_loader_match {
struct fit_loader {
const struct fit_loader_match *matches;
hwaddr (*addr_to_phys)(void *opaque, uint64_t addr);
- const void *(*fdt_filter)(void *opaque, const void *fdt,
- const void *match_data, hwaddr *load_addr);
+ void *(*fdt_filter)(void *opaque, const void *fdt,
+ const void *match_data, hwaddr *load_addr);
const void *(*kernel_filter)(void *opaque, const void *kernel,
hwaddr *load_addr, hwaddr *entry_addr);
};
-int load_fit(const struct fit_loader *ldr, const char *filename, void *opaque);
+/**
+ * load_fit: load a FIT format image
+ * @ldr: structure defining board specific properties and hooks
+ * @filename: image to load
+ * @pfdt: pointer to update with address of FDT blob
+ * @opaque: opaque value passed back to the hook functions in @ldr
+ * Returns: 0 on success, or a negative errno on failure
+ *
+ * @pfdt is used to tell the caller about the FDT blob. On return, it
+ * has been set to point to the FDT blob, and it is now the caller's
+ * responsibility to free that memory with g_free(). Usually the caller
+ * will want to pass in &machine->fdt here, to record the FDT blob for
+ * the dumpdtb option and QMP/HMP commands.
+ */
+int load_fit(const struct fit_loader *ldr, const char *filename, void **pfdt,
+ void *opaque);
#endif /* HW_LOADER_FIT_H */
diff --git a/include/hw/loader.h b/include/hw/loader.h
index 7f6d06b..c96b5e1 100644
--- a/include/hw/loader.h
+++ b/include/hw/loader.h
@@ -101,7 +101,7 @@ ssize_t load_image_gzipped_buffer(const char *filename, uint64_t max_sz,
* Returns the size of the decompressed payload if decompression was performed
* successfully.
*/
-ssize_t unpack_efi_zboot_image(uint8_t **buffer, int *size);
+ssize_t unpack_efi_zboot_image(uint8_t **buffer, ssize_t *size);
#define ELF_LOAD_FAILED -1
#define ELF_LOAD_NOT_ELF -2
@@ -120,7 +120,7 @@ const char *load_elf_strerror(ssize_t error);
* @lowaddr: Populated with lowest loaded address. Ignored if NULL.
* @highaddr: Populated with highest loaded address. Ignored if NULL.
* @pflags: Populated with ELF processor-specific flags. Ignore if NULL.
- * @bigendian: Expected ELF endianness. 0 for LE otherwise BE
+ * @elf_data_order: Expected ELF endianness (ELFDATA2LSB or ELFDATA2MSB).
* @elf_machine: Expected ELF machine type
* @clear_lsb: Set to mask off LSB of addresses (Some architectures use
* this for non-address data)
@@ -151,30 +151,18 @@ ssize_t load_elf_ram_sym(const char *filename,
uint64_t (*translate_fn)(void *, uint64_t),
void *translate_opaque, uint64_t *pentry,
uint64_t *lowaddr, uint64_t *highaddr,
- uint32_t *pflags, int big_endian, int elf_machine,
+ uint32_t *pflags, int elf_data_order, int elf_machine,
int clear_lsb, int data_swab,
AddressSpace *as, bool load_rom, symbol_fn_t sym_cb);
-/** load_elf_ram:
- * Same as load_elf_ram_sym(), but doesn't allow the caller to specify a
- * symbol callback function
- */
-ssize_t load_elf_ram(const char *filename,
- uint64_t (*elf_note_fn)(void *, void *, bool),
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque, uint64_t *pentry,
- uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags,
- int big_endian, int elf_machine, int clear_lsb,
- int data_swab, AddressSpace *as, bool load_rom);
-
/** load_elf_as:
- * Same as load_elf_ram(), but always loads the elf as ROM
+ * Same as load_elf_ram_sym(), but always loads the elf as ROM
*/
ssize_t load_elf_as(const char *filename,
uint64_t (*elf_note_fn)(void *, void *, bool),
uint64_t (*translate_fn)(void *, uint64_t),
void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
- uint64_t *highaddr, uint32_t *pflags, int big_endian,
+ uint64_t *highaddr, uint32_t *pflags, int elf_data_order,
int elf_machine, int clear_lsb, int data_swab,
AddressSpace *as);
@@ -186,7 +174,7 @@ ssize_t load_elf(const char *filename,
uint64_t (*elf_note_fn)(void *, void *, bool),
uint64_t (*translate_fn)(void *, uint64_t),
void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
- uint64_t *highaddr, uint32_t *pflags, int big_endian,
+ uint64_t *highaddr, uint32_t *pflags, int elf_data_order,
int elf_machine, int clear_lsb, int data_swab);
/** load_elf_hdr:
@@ -202,7 +190,7 @@ ssize_t load_elf(const char *filename,
void load_elf_hdr(const char *filename, void *hdr, bool *is64, Error **errp);
ssize_t load_aout(const char *filename, hwaddr addr, int max_sz,
- int bswap_needed, hwaddr target_page_size);
+ bool big_endian, hwaddr target_page_size);
#define LOAD_UIMAGE_LOADADDR_INVALID (-1)
@@ -282,8 +270,6 @@ int rom_add_elf_program(const char *name, GMappedFile *mapped_file, void *data,
AddressSpace *as);
int rom_check_and_register_reset(void);
void rom_set_fw(FWCfgState *f);
-void rom_set_order_override(int order);
-void rom_reset_order_override(void);
/**
* rom_transaction_begin:
diff --git a/include/hw/loongarch/boot.h b/include/hw/loongarch/boot.h
index b3b870d..9819f7f 100644
--- a/include/hw/loongarch/boot.h
+++ b/include/hw/loongarch/boot.h
@@ -102,11 +102,10 @@ struct loongarch_boot_info {
const char *kernel_cmdline;
const char *initrd_filename;
uint64_t a0, a1, a2;
+ uint64_t initrd_addr;
+ uint64_t initrd_size;
};
-extern struct memmap_entry *memmap_table;
-extern unsigned memmap_entries;
-
struct memmap_entry {
uint64_t address;
uint64_t length;
diff --git a/include/hw/loongarch/virt.h b/include/hw/loongarch/virt.h
index 603c1ce..602feab 100644
--- a/include/hw/loongarch/virt.h
+++ b/include/hw/loongarch/virt.h
@@ -8,10 +8,8 @@
#ifndef HW_LOONGARCH_H
#define HW_LOONGARCH_H
-#include "target/loongarch/cpu.h"
#include "hw/boards.h"
#include "qemu/queue.h"
-#include "hw/intc/loongson_ipi.h"
#include "hw/block/flash.h"
#include "hw/loongarch/boot.h"
@@ -32,6 +30,7 @@
#define VIRT_GED_EVT_ADDR 0x100e0000
#define VIRT_GED_MEM_ADDR (VIRT_GED_EVT_ADDR + ACPI_GED_EVT_SEL_LEN)
#define VIRT_GED_REG_ADDR (VIRT_GED_MEM_ADDR + MEMORY_HOTPLUG_IO_LEN)
+#define VIRT_GED_CPUHP_ADDR (VIRT_GED_REG_ADDR + ACPI_GED_REG_COUNT)
#define COMMAND_LINE_SIZE 512
@@ -62,9 +61,23 @@ struct LoongArchVirtMachineState {
MemoryRegion iocsr_mem;
AddressSpace as_iocsr;
struct loongarch_boot_info bootinfo;
+ DeviceState *ipi;
+ DeviceState *extioi;
+ struct memmap_entry *memmap_table;
+ unsigned int memmap_entries;
};
#define TYPE_LOONGARCH_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
OBJECT_DECLARE_SIMPLE_TYPE(LoongArchVirtMachineState, LOONGARCH_VIRT_MACHINE)
-void loongarch_acpi_setup(LoongArchVirtMachineState *lvms);
+void virt_acpi_setup(LoongArchVirtMachineState *lvms);
+void virt_fdt_setup(LoongArchVirtMachineState *lvms);
+
+static inline bool virt_is_veiointc_enabled(LoongArchVirtMachineState *lvms)
+{
+ if (lvms->veiointc == ON_OFF_AUTO_OFF) {
+ return false;
+ }
+ return true;
+}
+
#endif
diff --git a/include/hw/m68k/q800.h b/include/hw/m68k/q800.h
index 34365c9..9caaed9 100644
--- a/include/hw/m68k/q800.h
+++ b/include/hw/m68k/q800.h
@@ -26,7 +26,7 @@
#include "hw/boards.h"
#include "qom/object.h"
#include "target/m68k/cpu-qom.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/m68k/q800-glue.h"
#include "hw/misc/mac_via.h"
#include "hw/net/dp8393x.h"
diff --git a/include/hw/mem/npcm7xx_mc.h b/include/hw/mem/npcm7xx_mc.h
index 7ed38be..568cc35 100644
--- a/include/hw/mem/npcm7xx_mc.h
+++ b/include/hw/mem/npcm7xx_mc.h
@@ -16,7 +16,7 @@
#ifndef NPCM7XX_MC_H
#define NPCM7XX_MC_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/sysbus.h"
/**
diff --git a/include/hw/mem/pc-dimm.h b/include/hw/mem/pc-dimm.h
index fe0f3ea..e0dbdd4 100644
--- a/include/hw/mem/pc-dimm.h
+++ b/include/hw/mem/pc-dimm.h
@@ -16,7 +16,7 @@
#ifndef QEMU_PC_DIMM_H
#define QEMU_PC_DIMM_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/qdev-core.h"
#include "qom/object.h"
diff --git a/include/hw/mips/cps.h b/include/hw/mips/cps.h
index 04d6362..05ef9f7 100644
--- a/include/hw/mips/cps.h
+++ b/include/hw/mips/cps.h
@@ -38,6 +38,7 @@ struct MIPSCPSState {
uint32_t num_vp;
uint32_t num_irq;
char *cpu_type;
+ bool cpu_is_bigendian;
MemoryRegion container;
MIPSGCRState gcr;
diff --git a/include/hw/mips/mips.h b/include/hw/mips/mips.h
index 101799f..1f3672b 100644
--- a/include/hw/mips/mips.h
+++ b/include/hw/mips/mips.h
@@ -7,7 +7,7 @@
/* Kernels can be configured with 64KB pages */
#define INITRD_PAGE_SIZE (64 * KiB)
-#include "exec/memory.h"
+#include "system/memory.h"
/* bonito.c */
PCIBus *bonito_init(qemu_irq *pic);
diff --git a/include/hw/misc/aspeed_hace.h b/include/hw/misc/aspeed_hace.h
index ecb1b67..d5d07c6 100644
--- a/include/hw/misc/aspeed_hace.h
+++ b/include/hw/misc/aspeed_hace.h
@@ -1,6 +1,7 @@
/*
* ASPEED Hash and Crypto Engine
*
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
* Copyright (C) 2021 IBM Corp.
*
* SPDX-License-Identifier: GPL-2.0-or-later
@@ -10,16 +11,17 @@
#define ASPEED_HACE_H
#include "hw/sysbus.h"
+#include "crypto/hash.h"
#define TYPE_ASPEED_HACE "aspeed.hace"
#define TYPE_ASPEED_AST2400_HACE TYPE_ASPEED_HACE "-ast2400"
#define TYPE_ASPEED_AST2500_HACE TYPE_ASPEED_HACE "-ast2500"
#define TYPE_ASPEED_AST2600_HACE TYPE_ASPEED_HACE "-ast2600"
#define TYPE_ASPEED_AST1030_HACE TYPE_ASPEED_HACE "-ast1030"
+#define TYPE_ASPEED_AST2700_HACE TYPE_ASPEED_HACE "-ast2700"
OBJECT_DECLARE_TYPE(AspeedHACEState, AspeedHACEClass, ASPEED_HACE)
-#define ASPEED_HACE_NR_REGS (0x64 >> 2)
#define ASPEED_HACE_MAX_SG 256 /* max number of entries */
struct AspeedHACEState {
@@ -28,23 +30,30 @@ struct AspeedHACEState {
MemoryRegion iomem;
qemu_irq irq;
- struct iovec iov_cache[ASPEED_HACE_MAX_SG];
- uint32_t regs[ASPEED_HACE_NR_REGS];
+ uint32_t *regs;
uint32_t total_req_len;
- uint32_t iov_count;
MemoryRegion *dram_mr;
AddressSpace dram_as;
+
+ QCryptoHash *hash_ctx;
};
struct AspeedHACEClass {
SysBusDeviceClass parent_class;
+ const MemoryRegionOps *reg_ops;
uint32_t src_mask;
uint32_t dest_mask;
uint32_t key_mask;
uint32_t hash_mask;
+ uint64_t nr_regs;
+ bool raise_crypt_interrupt_workaround;
+ uint32_t src_hi_mask;
+ uint32_t dest_hi_mask;
+ uint32_t key_hi_mask;
+ bool has_dma64;
};
#endif /* ASPEED_HACE_H */
diff --git a/include/hw/misc/aspeed_scu.h b/include/hw/misc/aspeed_scu.h
index 58db28d..684b48b 100644
--- a/include/hw/misc/aspeed_scu.h
+++ b/include/hw/misc/aspeed_scu.h
@@ -54,6 +54,8 @@ struct AspeedSCUState {
#define AST2700_A0_SILICON_REV 0x06000103U
#define AST2720_A0_SILICON_REV 0x06000203U
#define AST2750_A0_SILICON_REV 0x06000003U
+#define AST2700_A1_SILICON_REV 0x06010103U
+#define AST2750_A1_SILICON_REV 0x06010003U
#define ASPEED_IS_AST2500(si_rev) ((((si_rev) >> 24) & 0xff) == 0x04)
@@ -349,6 +351,10 @@ uint32_t aspeed_scu_get_apb_freq(AspeedSCUState *s);
#define SCU_AST2600_H_PLL_BYPASS_EN (0x1 << 24)
#define SCU_AST2600_H_PLL_OFF (0x1 << 23)
+/* STRAP1 SCU500 */
+#define SCU_AST2600_HW_STRAP_BOOT_SRC_EMMC (0x1 << 2)
+#define SCU_AST2600_HW_STRAP_BOOT_SRC_SPI (0x0 << 2)
+
/*
* SCU310 Clock Selection Register Set 4 (for Aspeed AST1030 SOC)
*
diff --git a/include/hw/misc/auxbus.h b/include/hw/misc/auxbus.h
index 03cacde..ccd18ce 100644
--- a/include/hw/misc/auxbus.h
+++ b/include/hw/misc/auxbus.h
@@ -25,7 +25,7 @@
#ifndef HW_MISC_AUXBUS_H
#define HW_MISC_AUXBUS_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/qdev-core.h"
#include "qom/object.h"
diff --git a/include/hw/misc/cbus.h b/include/hw/misc/cbus.h
deleted file mode 100644
index 5334984..0000000
--- a/include/hw/misc/cbus.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * CBUS three-pin bus and the Retu / Betty / Tahvo / Vilma / Avilma /
- * Hinku / Vinku / Ahne / Pihi chips used in various Nokia platforms.
- * Based on reverse-engineering of a linux driver.
- *
- * Copyright (C) 2008 Nokia Corporation
- * Written by Andrzej Zaborowski
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef HW_MISC_CBUS_H
-#define HW_MISC_CBUS_H
-
-
-typedef struct {
- qemu_irq clk;
- qemu_irq dat;
- qemu_irq sel;
-} CBus;
-
-CBus *cbus_init(qemu_irq dat_out);
-void cbus_attach(CBus *bus, void *slave_opaque);
-
-void *retu_init(qemu_irq irq, int vilma);
-void *tahvo_init(qemu_irq irq, int betty);
-
-void retu_key_event(void *retu, int state);
-
-#endif
diff --git a/include/hw/misc/imx8mp_analog.h b/include/hw/misc/imx8mp_analog.h
new file mode 100644
index 0000000..955f032
--- /dev/null
+++ b/include/hw/misc/imx8mp_analog.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2025 Bernhard Beschow <shentey@gmail.com>
+ *
+ * i.MX8MP ANALOG IP block emulation code
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef IMX8MP_ANALOG_H
+#define IMX8MP_ANALOG_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+
+enum IMX8MPAnalogRegisters {
+ ANALOG_AUDIO_PLL1_GEN_CTRL = 0x000 / 4,
+ ANALOG_AUDIO_PLL1_FDIV_CTL0 = 0x004 / 4,
+ ANALOG_AUDIO_PLL1_FDIV_CTL1 = 0x008 / 4,
+ ANALOG_AUDIO_PLL1_SSCG_CTRL = 0x00c / 4,
+ ANALOG_AUDIO_PLL1_MNIT_CTRL = 0x010 / 4,
+ ANALOG_AUDIO_PLL2_GEN_CTRL = 0x014 / 4,
+ ANALOG_AUDIO_PLL2_FDIV_CTL0 = 0x018 / 4,
+ ANALOG_AUDIO_PLL2_FDIV_CTL1 = 0x01c / 4,
+ ANALOG_AUDIO_PLL2_SSCG_CTRL = 0x020 / 4,
+ ANALOG_AUDIO_PLL2_MNIT_CTRL = 0x024 / 4,
+ ANALOG_VIDEO_PLL1_GEN_CTRL = 0x028 / 4,
+ ANALOG_VIDEO_PLL1_FDIV_CTL0 = 0x02c / 4,
+ ANALOG_VIDEO_PLL1_FDIV_CTL1 = 0x030 / 4,
+ ANALOG_VIDEO_PLL1_SSCG_CTRL = 0x034 / 4,
+ ANALOG_VIDEO_PLL1_MNIT_CTRL = 0x038 / 4,
+ ANALOG_DRAM_PLL_GEN_CTRL = 0x050 / 4,
+ ANALOG_DRAM_PLL_FDIV_CTL0 = 0x054 / 4,
+ ANALOG_DRAM_PLL_FDIV_CTL1 = 0x058 / 4,
+ ANALOG_DRAM_PLL_SSCG_CTRL = 0x05c / 4,
+ ANALOG_DRAM_PLL_MNIT_CTRL = 0x060 / 4,
+ ANALOG_GPU_PLL_GEN_CTRL = 0x064 / 4,
+ ANALOG_GPU_PLL_FDIV_CTL0 = 0x068 / 4,
+ ANALOG_GPU_PLL_LOCKD_CTRL = 0x06c / 4,
+ ANALOG_GPU_PLL_MNIT_CTRL = 0x070 / 4,
+ ANALOG_VPU_PLL_GEN_CTRL = 0x074 / 4,
+ ANALOG_VPU_PLL_FDIV_CTL0 = 0x078 / 4,
+ ANALOG_VPU_PLL_LOCKD_CTRL = 0x07c / 4,
+ ANALOG_VPU_PLL_MNIT_CTRL = 0x080 / 4,
+ ANALOG_ARM_PLL_GEN_CTRL = 0x084 / 4,
+ ANALOG_ARM_PLL_FDIV_CTL0 = 0x088 / 4,
+ ANALOG_ARM_PLL_LOCKD_CTRL = 0x08c / 4,
+ ANALOG_ARM_PLL_MNIT_CTRL = 0x090 / 4,
+ ANALOG_SYS_PLL1_GEN_CTRL = 0x094 / 4,
+ ANALOG_SYS_PLL1_FDIV_CTL0 = 0x098 / 4,
+ ANALOG_SYS_PLL1_LOCKD_CTRL = 0x09c / 4,
+ ANALOG_SYS_PLL1_MNIT_CTRL = 0x100 / 4,
+ ANALOG_SYS_PLL2_GEN_CTRL = 0x104 / 4,
+ ANALOG_SYS_PLL2_FDIV_CTL0 = 0x108 / 4,
+ ANALOG_SYS_PLL2_LOCKD_CTRL = 0x10c / 4,
+ ANALOG_SYS_PLL2_MNIT_CTRL = 0x110 / 4,
+ ANALOG_SYS_PLL3_GEN_CTRL = 0x114 / 4,
+ ANALOG_SYS_PLL3_FDIV_CTL0 = 0x118 / 4,
+ ANALOG_SYS_PLL3_LOCKD_CTRL = 0x11c / 4,
+ ANALOG_SYS_PLL3_MNIT_CTRL = 0x120 / 4,
+ ANALOG_OSC_MISC_CFG = 0x124 / 4,
+ ANALOG_ANAMIX_PLL_MNIT_CTL = 0x128 / 4,
+
+ ANALOG_DIGPROG = 0x800 / 4,
+ ANALOG_MAX,
+};
+
+#define TYPE_IMX8MP_ANALOG "imx8mp.analog"
+OBJECT_DECLARE_SIMPLE_TYPE(IMX8MPAnalogState, IMX8MP_ANALOG)
+
+struct IMX8MPAnalogState {
+ SysBusDevice parent_obj;
+
+ struct {
+ MemoryRegion container;
+ MemoryRegion analog;
+ } mmio;
+
+ uint32_t analog[ANALOG_MAX];
+};
+
+#endif /* IMX8MP_ANALOG_H */
diff --git a/include/hw/misc/imx8mp_ccm.h b/include/hw/misc/imx8mp_ccm.h
new file mode 100644
index 0000000..685c858
--- /dev/null
+++ b/include/hw/misc/imx8mp_ccm.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2025 Bernhard Beschow <shentey@gmail.com>
+ *
+ * i.MX 8M Plus CCM IP block emulation code
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef IMX8MP_CCM_H
+#define IMX8MP_CCM_H
+
+#include "hw/misc/imx_ccm.h"
+#include "qom/object.h"
+
+enum IMX8MPCCMRegisters {
+ CCM_MAX = 0xc6fc / sizeof(uint32_t) + 1,
+};
+
+#define TYPE_IMX8MP_CCM "imx8mp.ccm"
+OBJECT_DECLARE_SIMPLE_TYPE(IMX8MPCCMState, IMX8MP_CCM)
+
+struct IMX8MPCCMState {
+ IMXCCMState parent_obj;
+
+ MemoryRegion iomem;
+
+ uint32_t ccm[CCM_MAX];
+};
+
+#endif /* IMX8MP_CCM_H */
diff --git a/include/hw/misc/ivshmem-flat.h b/include/hw/misc/ivshmem-flat.h
new file mode 100644
index 0000000..09bc3ab
--- /dev/null
+++ b/include/hw/misc/ivshmem-flat.h
@@ -0,0 +1,86 @@
+/*
+ * Inter-VM Shared Memory Flat Device
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (c) 2023 Linaro Ltd.
+ * Authors:
+ * Gustavo Romero
+ *
+ */
+
+#ifndef IVSHMEM_FLAT_H
+#define IVSHMEM_FLAT_H
+
+#include "qemu/queue.h"
+#include "qemu/event_notifier.h"
+#include "chardev/char-fe.h"
+#include "system/memory.h"
+#include "qom/object.h"
+#include "hw/sysbus.h"
+
+#define IVSHMEM_MAX_VECTOR_NUM 64
+
+/*
+ * QEMU interface:
+ * + QOM property "chardev" is the character device id of the ivshmem server
+ * socket
+ * + QOM property "shmem-size" sets the size of the RAM region shared between
+ * the device and the ivshmem server
+ * + sysbus MMIO region 0: device I/O mapped registers
+ * + sysbus MMIO region 1: shared memory with ivshmem server
+ * + sysbus IRQ 0: single output interrupt
+ */
+
+#define TYPE_IVSHMEM_FLAT "ivshmem-flat"
+typedef struct IvshmemFTState IvshmemFTState;
+
+DECLARE_INSTANCE_CHECKER(IvshmemFTState, IVSHMEM_FLAT, TYPE_IVSHMEM_FLAT)
+
+/* Ivshmem registers. See ./docs/specs/ivshmem-spec.txt for details. */
+enum ivshmem_registers {
+ INTMASK = 0,
+ INTSTATUS = 4,
+ IVPOSITION = 8,
+ DOORBELL = 12,
+};
+
+typedef struct VectorInfo {
+ EventNotifier event_notifier;
+ uint16_t id;
+} VectorInfo;
+
+typedef struct IvshmemPeer {
+ QTAILQ_ENTRY(IvshmemPeer) next;
+ VectorInfo vector[IVSHMEM_MAX_VECTOR_NUM];
+ int vector_counter;
+ uint16_t id;
+} IvshmemPeer;
+
+struct IvshmemFTState {
+ SysBusDevice parent_obj;
+
+ uint64_t msg_buf;
+ int msg_buffered_bytes;
+
+ QTAILQ_HEAD(, IvshmemPeer) peer;
+ IvshmemPeer own;
+
+ CharBackend server_chr;
+
+ /* IRQ */
+ qemu_irq irq;
+
+ /* I/O registers */
+ MemoryRegion iomem;
+ uint32_t intmask;
+ uint32_t intstatus;
+ uint32_t ivposition;
+ uint32_t doorbell;
+
+ /* Shared memory */
+ MemoryRegion shmem;
+ int shmem_fd;
+ uint32_t shmem_size;
+};
+
+#endif /* IVSHMEM_FLAT_H */
diff --git a/include/hw/misc/lasi.h b/include/hw/misc/lasi.h
index f01c0f6..0bdfb11 100644
--- a/include/hw/misc/lasi.h
+++ b/include/hw/misc/lasi.h
@@ -12,7 +12,7 @@
#ifndef LASI_H
#define LASI_H
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/pci/pci_host.h"
#include "hw/boards.h"
diff --git a/include/hw/misc/mac_via.h b/include/hw/misc/mac_via.h
index 63cdcf7..6a15228 100644
--- a/include/hw/misc/mac_via.h
+++ b/include/hw/misc/mac_via.h
@@ -9,7 +9,7 @@
#ifndef HW_MISC_MAC_VIA_H
#define HW_MISC_MAC_VIA_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/sysbus.h"
#include "hw/misc/mos6522.h"
#include "hw/input/adb.h"
diff --git a/include/hw/misc/mos6522.h b/include/hw/misc/mos6522.h
index fba4566..920871a 100644
--- a/include/hw/misc/mos6522.h
+++ b/include/hw/misc/mos6522.h
@@ -154,7 +154,7 @@ struct MOS6522State {
OBJECT_DECLARE_TYPE(MOS6522State, MOS6522DeviceClass, MOS6522)
struct MOS6522DeviceClass {
- DeviceClass parent_class;
+ SysBusDeviceClass parent_class;
ResettablePhases parent_phases;
void (*portB_write)(MOS6522State *dev);
diff --git a/include/hw/misc/npcm7xx_clk.h b/include/hw/misc/npcm7xx_clk.h
deleted file mode 100644
index 5ed4a46..0000000
--- a/include/hw/misc/npcm7xx_clk.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Nuvoton NPCM7xx Clock Control Registers.
- *
- * Copyright 2020 Google LLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- */
-#ifndef NPCM7XX_CLK_H
-#define NPCM7XX_CLK_H
-
-#include "exec/memory.h"
-#include "hw/clock.h"
-#include "hw/sysbus.h"
-
-/*
- * Number of registers in our device state structure. Don't change this without
- * incrementing the version_id in the vmstate.
- */
-#define NPCM7XX_CLK_NR_REGS (0x70 / sizeof(uint32_t))
-
-#define NPCM7XX_WATCHDOG_RESET_GPIO_IN "npcm7xx-clk-watchdog-reset-gpio-in"
-
-/* Maximum amount of clock inputs in a SEL module. */
-#define NPCM7XX_CLK_SEL_MAX_INPUT 5
-
-/* PLLs in CLK module. */
-typedef enum NPCM7xxClockPLL {
- NPCM7XX_CLOCK_PLL0,
- NPCM7XX_CLOCK_PLL1,
- NPCM7XX_CLOCK_PLL2,
- NPCM7XX_CLOCK_PLLG,
- NPCM7XX_CLOCK_NR_PLLS,
-} NPCM7xxClockPLL;
-
-/* SEL/MUX in CLK module. */
-typedef enum NPCM7xxClockSEL {
- NPCM7XX_CLOCK_PIXCKSEL,
- NPCM7XX_CLOCK_MCCKSEL,
- NPCM7XX_CLOCK_CPUCKSEL,
- NPCM7XX_CLOCK_CLKOUTSEL,
- NPCM7XX_CLOCK_UARTCKSEL,
- NPCM7XX_CLOCK_TIMCKSEL,
- NPCM7XX_CLOCK_SDCKSEL,
- NPCM7XX_CLOCK_GFXMSEL,
- NPCM7XX_CLOCK_SUCKSEL,
- NPCM7XX_CLOCK_NR_SELS,
-} NPCM7xxClockSEL;
-
-/* Dividers in CLK module. */
-typedef enum NPCM7xxClockDivider {
- NPCM7XX_CLOCK_PLL1D2, /* PLL1/2 */
- NPCM7XX_CLOCK_PLL2D2, /* PLL2/2 */
- NPCM7XX_CLOCK_MC_DIVIDER,
- NPCM7XX_CLOCK_AXI_DIVIDER,
- NPCM7XX_CLOCK_AHB_DIVIDER,
- NPCM7XX_CLOCK_AHB3_DIVIDER,
- NPCM7XX_CLOCK_SPI0_DIVIDER,
- NPCM7XX_CLOCK_SPIX_DIVIDER,
- NPCM7XX_CLOCK_APB1_DIVIDER,
- NPCM7XX_CLOCK_APB2_DIVIDER,
- NPCM7XX_CLOCK_APB3_DIVIDER,
- NPCM7XX_CLOCK_APB4_DIVIDER,
- NPCM7XX_CLOCK_APB5_DIVIDER,
- NPCM7XX_CLOCK_CLKOUT_DIVIDER,
- NPCM7XX_CLOCK_UART_DIVIDER,
- NPCM7XX_CLOCK_TIMER_DIVIDER,
- NPCM7XX_CLOCK_ADC_DIVIDER,
- NPCM7XX_CLOCK_MMC_DIVIDER,
- NPCM7XX_CLOCK_SDHC_DIVIDER,
- NPCM7XX_CLOCK_GFXM_DIVIDER, /* divide by 3 */
- NPCM7XX_CLOCK_UTMI_DIVIDER,
- NPCM7XX_CLOCK_NR_DIVIDERS,
-} NPCM7xxClockConverter;
-
-typedef struct NPCM7xxCLKState NPCM7xxCLKState;
-
-/**
- * struct NPCM7xxClockPLLState - A PLL module in CLK module.
- * @name: The name of the module.
- * @clk: The CLK module that owns this module.
- * @clock_in: The input clock of this module.
- * @clock_out: The output clock of this module.
- * @reg: The control registers for this PLL module.
- */
-typedef struct NPCM7xxClockPLLState {
- DeviceState parent;
-
- const char *name;
- NPCM7xxCLKState *clk;
- Clock *clock_in;
- Clock *clock_out;
-
- int reg;
-} NPCM7xxClockPLLState;
-
-/**
- * struct NPCM7xxClockSELState - A SEL module in CLK module.
- * @name: The name of the module.
- * @clk: The CLK module that owns this module.
- * @input_size: The size of inputs of this module.
- * @clock_in: The input clocks of this module.
- * @clock_out: The output clocks of this module.
- * @offset: The offset of this module in the control register.
- * @len: The length of this module in the control register.
- */
-typedef struct NPCM7xxClockSELState {
- DeviceState parent;
-
- const char *name;
- NPCM7xxCLKState *clk;
- uint8_t input_size;
- Clock *clock_in[NPCM7XX_CLK_SEL_MAX_INPUT];
- Clock *clock_out;
-
- int offset;
- int len;
-} NPCM7xxClockSELState;
-
-/**
- * struct NPCM7xxClockDividerState - A Divider module in CLK module.
- * @name: The name of the module.
- * @clk: The CLK module that owns this module.
- * @clock_in: The input clock of this module.
- * @clock_out: The output clock of this module.
- * @divide: The function the divider uses to divide the input.
- * @reg: The index of the control register that contains the divisor.
- * @offset: The offset of the divisor in the control register.
- * @len: The length of the divisor in the control register.
- * @divisor: The divisor for a constant divisor
- */
-typedef struct NPCM7xxClockDividerState {
- DeviceState parent;
-
- const char *name;
- NPCM7xxCLKState *clk;
- Clock *clock_in;
- Clock *clock_out;
-
- uint32_t (*divide)(struct NPCM7xxClockDividerState *s);
- union {
- struct {
- int reg;
- int offset;
- int len;
- };
- int divisor;
- };
-} NPCM7xxClockDividerState;
-
-struct NPCM7xxCLKState {
- SysBusDevice parent;
-
- MemoryRegion iomem;
-
- /* Clock converters */
- NPCM7xxClockPLLState plls[NPCM7XX_CLOCK_NR_PLLS];
- NPCM7xxClockSELState sels[NPCM7XX_CLOCK_NR_SELS];
- NPCM7xxClockDividerState dividers[NPCM7XX_CLOCK_NR_DIVIDERS];
-
- uint32_t regs[NPCM7XX_CLK_NR_REGS];
-
- /* Time reference for SECCNT and CNTR25M, initialized by power on reset */
- int64_t ref_ns;
-
- /* The incoming reference clock. */
- Clock *clkref;
-};
-
-#define TYPE_NPCM7XX_CLK "npcm7xx-clk"
-OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxCLKState, NPCM7XX_CLK)
-
-#endif /* NPCM7XX_CLK_H */
diff --git a/include/hw/misc/npcm7xx_gcr.h b/include/hw/misc/npcm7xx_gcr.h
deleted file mode 100644
index c0bbdda..0000000
--- a/include/hw/misc/npcm7xx_gcr.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Nuvoton NPCM7xx System Global Control Registers.
- *
- * Copyright 2020 Google LLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- */
-#ifndef NPCM7XX_GCR_H
-#define NPCM7XX_GCR_H
-
-#include "exec/memory.h"
-#include "hw/sysbus.h"
-
-/*
- * NPCM7XX PWRON STRAP bit fields
- * 12: SPI0 powered by VSBV3 at 1.8V
- * 11: System flash attached to BMC
- * 10: BSP alternative pins.
- * 9:8: Flash UART command route enabled.
- * 7: Security enabled.
- * 6: HI-Z state control.
- * 5: ECC disabled.
- * 4: Reserved
- * 3: JTAG2 enabled.
- * 2:0: CPU and DRAM clock frequency.
- */
-#define NPCM7XX_PWRON_STRAP_SPI0F18 BIT(12)
-#define NPCM7XX_PWRON_STRAP_SFAB BIT(11)
-#define NPCM7XX_PWRON_STRAP_BSPA BIT(10)
-#define NPCM7XX_PWRON_STRAP_FUP(x) ((x) << 8)
-#define FUP_NORM_UART2 3
-#define FUP_PROG_UART3 2
-#define FUP_PROG_UART2 1
-#define FUP_NORM_UART3 0
-#define NPCM7XX_PWRON_STRAP_SECEN BIT(7)
-#define NPCM7XX_PWRON_STRAP_HIZ BIT(6)
-#define NPCM7XX_PWRON_STRAP_ECC BIT(5)
-#define NPCM7XX_PWRON_STRAP_RESERVE1 BIT(4)
-#define NPCM7XX_PWRON_STRAP_J2EN BIT(3)
-#define NPCM7XX_PWRON_STRAP_CKFRQ(x) (x)
-#define CKFRQ_SKIPINIT 0x000
-#define CKFRQ_DEFAULT 0x111
-
-/*
- * Number of registers in our device state structure. Don't change this without
- * incrementing the version_id in the vmstate.
- */
-#define NPCM7XX_GCR_NR_REGS (0x148 / sizeof(uint32_t))
-
-struct NPCM7xxGCRState {
- SysBusDevice parent;
-
- MemoryRegion iomem;
-
- uint32_t regs[NPCM7XX_GCR_NR_REGS];
-
- uint32_t reset_pwron;
- uint32_t reset_mdlr;
- uint32_t reset_intcr3;
-};
-
-#define TYPE_NPCM7XX_GCR "npcm7xx-gcr"
-OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxGCRState, NPCM7XX_GCR)
-
-#endif /* NPCM7XX_GCR_H */
diff --git a/include/hw/misc/npcm7xx_mft.h b/include/hw/misc/npcm7xx_mft.h
index d638438..e4b997a 100644
--- a/include/hw/misc/npcm7xx_mft.h
+++ b/include/hw/misc/npcm7xx_mft.h
@@ -16,7 +16,7 @@
#ifndef NPCM7XX_MFT_H
#define NPCM7XX_MFT_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/clock.h"
#include "hw/irq.h"
#include "hw/sysbus.h"
diff --git a/include/hw/misc/npcm_clk.h b/include/hw/misc/npcm_clk.h
new file mode 100644
index 0000000..52e972f
--- /dev/null
+++ b/include/hw/misc/npcm_clk.h
@@ -0,0 +1,195 @@
+/*
+ * Nuvoton NPCM7xx/8xx Clock Control Registers.
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM_CLK_H
+#define NPCM_CLK_H
+
+#include "system/memory.h"
+#include "hw/clock.h"
+#include "hw/sysbus.h"
+
+#define NPCM7XX_CLK_NR_REGS (0x70 / sizeof(uint32_t))
+#define NPCM8XX_CLK_NR_REGS (0xc4 / sizeof(uint32_t))
+/*
+ * Number of maximum registers in NPCM device state structure. Don't change
+ * this without incrementing the version_id in the vmstate.
+ */
+#define NPCM_CLK_MAX_NR_REGS NPCM8XX_CLK_NR_REGS
+
+#define NPCM7XX_WATCHDOG_RESET_GPIO_IN "npcm7xx-clk-watchdog-reset-gpio-in"
+
+/* Maximum amount of clock inputs in a SEL module. */
+#define NPCM7XX_CLK_SEL_MAX_INPUT 5
+
+/* PLLs in CLK module. */
+typedef enum NPCM7xxClockPLL {
+ NPCM7XX_CLOCK_PLL0,
+ NPCM7XX_CLOCK_PLL1,
+ NPCM7XX_CLOCK_PLL2,
+ NPCM7XX_CLOCK_PLLG,
+ NPCM7XX_CLOCK_NR_PLLS,
+} NPCM7xxClockPLL;
+
+/* SEL/MUX in CLK module. */
+typedef enum NPCM7xxClockSEL {
+ NPCM7XX_CLOCK_PIXCKSEL,
+ NPCM7XX_CLOCK_MCCKSEL,
+ NPCM7XX_CLOCK_CPUCKSEL,
+ NPCM7XX_CLOCK_CLKOUTSEL,
+ NPCM7XX_CLOCK_UARTCKSEL,
+ NPCM7XX_CLOCK_TIMCKSEL,
+ NPCM7XX_CLOCK_SDCKSEL,
+ NPCM7XX_CLOCK_GFXMSEL,
+ NPCM7XX_CLOCK_SUCKSEL,
+ NPCM7XX_CLOCK_NR_SELS,
+} NPCM7xxClockSEL;
+
+/* Dividers in CLK module. */
+typedef enum NPCM7xxClockDivider {
+ NPCM7XX_CLOCK_PLL1D2, /* PLL1/2 */
+ NPCM7XX_CLOCK_PLL2D2, /* PLL2/2 */
+ NPCM7XX_CLOCK_MC_DIVIDER,
+ NPCM7XX_CLOCK_AXI_DIVIDER,
+ NPCM7XX_CLOCK_AHB_DIVIDER,
+ NPCM7XX_CLOCK_AHB3_DIVIDER,
+ NPCM7XX_CLOCK_SPI0_DIVIDER,
+ NPCM7XX_CLOCK_SPIX_DIVIDER,
+ NPCM7XX_CLOCK_APB1_DIVIDER,
+ NPCM7XX_CLOCK_APB2_DIVIDER,
+ NPCM7XX_CLOCK_APB3_DIVIDER,
+ NPCM7XX_CLOCK_APB4_DIVIDER,
+ NPCM7XX_CLOCK_APB5_DIVIDER,
+ NPCM7XX_CLOCK_CLKOUT_DIVIDER,
+ NPCM7XX_CLOCK_UART_DIVIDER,
+ NPCM7XX_CLOCK_TIMER_DIVIDER,
+ NPCM7XX_CLOCK_ADC_DIVIDER,
+ NPCM7XX_CLOCK_MMC_DIVIDER,
+ NPCM7XX_CLOCK_SDHC_DIVIDER,
+ NPCM7XX_CLOCK_GFXM_DIVIDER, /* divide by 3 */
+ NPCM7XX_CLOCK_UTMI_DIVIDER,
+ NPCM7XX_CLOCK_NR_DIVIDERS,
+} NPCM7xxClockConverter;
+
+typedef struct NPCMCLKState NPCMCLKState;
+
+/**
+ * struct NPCM7xxClockPLLState - A PLL module in CLK module.
+ * @name: The name of the module.
+ * @clk: The CLK module that owns this module.
+ * @clock_in: The input clock of this module.
+ * @clock_out: The output clock of this module.
+ * @reg: The control registers for this PLL module.
+ */
+typedef struct NPCM7xxClockPLLState {
+ DeviceState parent;
+
+ const char *name;
+ NPCMCLKState *clk;
+ Clock *clock_in;
+ Clock *clock_out;
+
+ int reg;
+} NPCM7xxClockPLLState;
+
+/**
+ * struct NPCM7xxClockSELState - A SEL module in CLK module.
+ * @name: The name of the module.
+ * @clk: The CLK module that owns this module.
+ * @input_size: The size of inputs of this module.
+ * @clock_in: The input clocks of this module.
+ * @clock_out: The output clocks of this module.
+ * @offset: The offset of this module in the control register.
+ * @len: The length of this module in the control register.
+ */
+typedef struct NPCM7xxClockSELState {
+ DeviceState parent;
+
+ const char *name;
+ NPCMCLKState *clk;
+ uint8_t input_size;
+ Clock *clock_in[NPCM7XX_CLK_SEL_MAX_INPUT];
+ Clock *clock_out;
+
+ int offset;
+ int len;
+} NPCM7xxClockSELState;
+
+/**
+ * struct NPCM7xxClockDividerState - A Divider module in CLK module.
+ * @name: The name of the module.
+ * @clk: The CLK module that owns this module.
+ * @clock_in: The input clock of this module.
+ * @clock_out: The output clock of this module.
+ * @divide: The function the divider uses to divide the input.
+ * @reg: The index of the control register that contains the divisor.
+ * @offset: The offset of the divisor in the control register.
+ * @len: The length of the divisor in the control register.
+ * @divisor: The divisor for a constant divisor
+ */
+typedef struct NPCM7xxClockDividerState {
+ DeviceState parent;
+
+ const char *name;
+ NPCMCLKState *clk;
+ Clock *clock_in;
+ Clock *clock_out;
+
+ uint32_t (*divide)(struct NPCM7xxClockDividerState *s);
+ union {
+ struct {
+ int reg;
+ int offset;
+ int len;
+ };
+ int divisor;
+ };
+} NPCM7xxClockDividerState;
+
+struct NPCMCLKState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ /* Clock converters */
+ /*
+ * TODO: Implement unique clock converters for NPCM8xx.
+ * NPCM8xx adds a few more clock outputs.
+ */
+ NPCM7xxClockPLLState plls[NPCM7XX_CLOCK_NR_PLLS];
+ NPCM7xxClockSELState sels[NPCM7XX_CLOCK_NR_SELS];
+ NPCM7xxClockDividerState dividers[NPCM7XX_CLOCK_NR_DIVIDERS];
+
+ uint32_t regs[NPCM_CLK_MAX_NR_REGS];
+
+ /* Time reference for SECCNT and CNTR25M, initialized by power on reset */
+ int64_t ref_ns;
+
+ /* The incoming reference clock. */
+ Clock *clkref;
+};
+
+typedef struct NPCMCLKClass {
+ SysBusDeviceClass parent;
+
+ size_t nr_regs;
+ const uint32_t *cold_reset_values;
+} NPCMCLKClass;
+
+#define TYPE_NPCM_CLK "npcm-clk"
+OBJECT_DECLARE_TYPE(NPCMCLKState, NPCMCLKClass, NPCM_CLK)
+#define TYPE_NPCM7XX_CLK "npcm7xx-clk"
+#define TYPE_NPCM8XX_CLK "npcm8xx-clk"
+
+#endif /* NPCM_CLK_H */
diff --git a/include/hw/misc/npcm_gcr.h b/include/hw/misc/npcm_gcr.h
new file mode 100644
index 0000000..702e7fd
--- /dev/null
+++ b/include/hw/misc/npcm_gcr.h
@@ -0,0 +1,86 @@
+/*
+ * Nuvoton NPCM7xx/8xx System Global Control Registers.
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM_GCR_H
+#define NPCM_GCR_H
+
+#include "system/memory.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+/*
+ * NPCM7XX PWRON STRAP bit fields
+ * 12: SPI0 powered by VSBV3 at 1.8V
+ * 11: System flash attached to BMC
+ * 10: BSP alternative pins.
+ * 9:8: Flash UART command route enabled.
+ * 7: Security enabled.
+ * 6: HI-Z state control.
+ * 5: ECC disabled.
+ * 4: Reserved
+ * 3: JTAG2 enabled.
+ * 2:0: CPU and DRAM clock frequency.
+ */
+#define NPCM7XX_PWRON_STRAP_SPI0F18 BIT(12)
+#define NPCM7XX_PWRON_STRAP_SFAB BIT(11)
+#define NPCM7XX_PWRON_STRAP_BSPA BIT(10)
+#define NPCM7XX_PWRON_STRAP_FUP(x) ((x) << 8)
+#define FUP_NORM_UART2 3
+#define FUP_PROG_UART3 2
+#define FUP_PROG_UART2 1
+#define FUP_NORM_UART3 0
+#define NPCM7XX_PWRON_STRAP_SECEN BIT(7)
+#define NPCM7XX_PWRON_STRAP_HIZ BIT(6)
+#define NPCM7XX_PWRON_STRAP_ECC BIT(5)
+#define NPCM7XX_PWRON_STRAP_RESERVE1 BIT(4)
+#define NPCM7XX_PWRON_STRAP_J2EN BIT(3)
+#define NPCM7XX_PWRON_STRAP_CKFRQ(x) (x)
+#define CKFRQ_SKIPINIT 0x000
+#define CKFRQ_DEFAULT 0x111
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM_GCR_MAX_NR_REGS NPCM8XX_GCR_NR_REGS
+#define NPCM7XX_GCR_NR_REGS (0x148 / sizeof(uint32_t))
+#define NPCM8XX_GCR_NR_REGS (0xf80 / sizeof(uint32_t))
+
+typedef struct NPCMGCRState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ uint32_t regs[NPCM_GCR_MAX_NR_REGS];
+
+ uint32_t reset_pwron;
+ uint32_t reset_mdlr;
+ uint32_t reset_intcr3;
+ uint32_t reset_scrpad_b;
+} NPCMGCRState;
+
+typedef struct NPCMGCRClass {
+ SysBusDeviceClass parent;
+
+ size_t nr_regs;
+ const uint32_t *cold_reset_values;
+} NPCMGCRClass;
+
+#define TYPE_NPCM_GCR "npcm-gcr"
+#define TYPE_NPCM7XX_GCR "npcm7xx-gcr"
+#define TYPE_NPCM8XX_GCR "npcm8xx-gcr"
+OBJECT_DECLARE_TYPE(NPCMGCRState, NPCMGCRClass, NPCM_GCR)
+
+#endif /* NPCM_GCR_H */
diff --git a/include/hw/misc/pvpanic.h b/include/hw/misc/pvpanic.h
index 9a71a5a..5098693 100644
--- a/include/hw/misc/pvpanic.h
+++ b/include/hw/misc/pvpanic.h
@@ -15,7 +15,7 @@
#ifndef HW_MISC_PVPANIC_H
#define HW_MISC_PVPANIC_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qom/object.h"
#include "standard-headers/misc/pvpanic.h"
@@ -26,6 +26,7 @@
#define TYPE_PVPANIC_ISA_DEVICE "pvpanic"
#define TYPE_PVPANIC_PCI_DEVICE "pvpanic-pci"
+#define TYPE_PVPANIC_MMIO_DEVICE "pvpanic-mmio"
#define PVPANIC_IOPORT_PROP "ioport"
diff --git a/include/hw/misc/stm32_rcc.h b/include/hw/misc/stm32_rcc.h
new file mode 100644
index 0000000..ffbdf20
--- /dev/null
+++ b/include/hw/misc/stm32_rcc.h
@@ -0,0 +1,91 @@
+/*
+ * STM32 RCC (only reset and enable registers are implemented)
+ *
+ * Copyright (c) 2024 RomƔn CƔrdenas <rcardenas.rod@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_STM32_RCC_H
+#define HW_STM32_RCC_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define STM32_RCC_CR 0x00
+#define STM32_RCC_PLL_CFGR 0x04
+#define STM32_RCC_CFGR 0x08
+#define STM32_RCC_CIR 0x0C
+#define STM32_RCC_AHB1_RSTR 0x10
+#define STM32_RCC_AHB2_RSTR 0x14
+#define STM32_RCC_AHB3_RSTR 0x18
+
+#define STM32_RCC_APB1_RSTR 0x20
+#define STM32_RCC_APB2_RSTR 0x24
+
+#define STM32_RCC_AHB1_ENR 0x30
+#define STM32_RCC_AHB2_ENR 0x34
+#define STM32_RCC_AHB3_ENR 0x38
+
+#define STM32_RCC_APB1_ENR 0x40
+#define STM32_RCC_APB2_ENR 0x44
+
+#define STM32_RCC_AHB1_LPENR 0x50
+#define STM32_RCC_AHB2_LPENR 0x54
+#define STM32_RCC_AHB3_LPENR 0x58
+
+#define STM32_RCC_APB1_LPENR 0x60
+#define STM32_RCC_APB2_LPENR 0x64
+
+#define STM32_RCC_BDCR 0x70
+#define STM32_RCC_CSR 0x74
+
+#define STM32_RCC_SSCGR 0x80
+#define STM32_RCC_PLLI2SCFGR 0x84
+#define STM32_RCC_PLLSAI_CFGR 0x88
+#define STM32_RCC_DCKCFGR 0x8C
+#define STM32_RCC_CKGATENR 0x90
+#define STM32_RCC_DCKCFGR2 0x94
+
+#define STM32_RCC_NREGS ((STM32_RCC_DCKCFGR2 >> 2) + 1)
+#define STM32_RCC_PERIPHERAL_SIZE 0x400
+#define STM32_RCC_NIRQS (32 * 5) /* 32 bits per reg, 5 en/rst regs */
+
+#define STM32_RCC_GPIO_IRQ_OFFSET 0
+
+#define TYPE_STM32_RCC "stm32.rcc"
+
+typedef struct STM32RccState STM32RccState;
+
+DECLARE_INSTANCE_CHECKER(STM32RccState, STM32_RCC, TYPE_STM32_RCC)
+
+#define NUM_GPIO_EVENT_IN_LINES 16
+
+struct STM32RccState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ uint32_t regs[STM32_RCC_NREGS];
+
+ qemu_irq enable_irq[STM32_RCC_NIRQS];
+ qemu_irq reset_irq[STM32_RCC_NIRQS];
+};
+
+#endif /* HW_STM32_RCC_H */
diff --git a/include/hw/misc/stm32l4x5_syscfg.h b/include/hw/misc/stm32l4x5_syscfg.h
index 23bb564..c450df2 100644
--- a/include/hw/misc/stm32l4x5_syscfg.h
+++ b/include/hw/misc/stm32l4x5_syscfg.h
@@ -48,6 +48,7 @@ struct Stm32l4x5SyscfgState {
uint32_t swpr2;
qemu_irq gpio_out[GPIO_NUM_PINS];
+ Clock *clk;
};
#endif
diff --git a/include/hw/misc/vmcoreinfo.h b/include/hw/misc/vmcoreinfo.h
index 0b7b55d..1aa4477 100644
--- a/include/hw/misc/vmcoreinfo.h
+++ b/include/hw/misc/vmcoreinfo.h
@@ -16,10 +16,9 @@
#include "standard-headers/linux/qemu_fw_cfg.h"
#include "qom/object.h"
-#define VMCOREINFO_DEVICE "vmcoreinfo"
+#define TYPE_VMCOREINFO "vmcoreinfo"
typedef struct VMCoreInfoState VMCoreInfoState;
-DECLARE_INSTANCE_CHECKER(VMCoreInfoState, VMCOREINFO,
- VMCOREINFO_DEVICE)
+DECLARE_INSTANCE_CHECKER(VMCoreInfoState, VMCOREINFO, TYPE_VMCOREINFO)
typedef struct fw_cfg_vmcoreinfo FWCfgVMCoreInfo;
@@ -33,7 +32,7 @@ struct VMCoreInfoState {
/* returns NULL unless there is exactly one device */
static inline VMCoreInfoState *vmcoreinfo_find(void)
{
- Object *o = object_resolve_path_type("", VMCOREINFO_DEVICE, NULL);
+ Object *o = object_resolve_path_type("", TYPE_VMCOREINFO, NULL);
return o ? VMCOREINFO(o) : NULL;
}
diff --git a/include/hw/misc/xlnx-versal-trng.h b/include/hw/misc/xlnx-versal-trng.h
index 0bcef8a..d96f8f9 100644
--- a/include/hw/misc/xlnx-versal-trng.h
+++ b/include/hw/misc/xlnx-versal-trng.h
@@ -50,6 +50,7 @@ typedef struct XlnxVersalTRng {
uint64_t forced_prng_count;
uint64_t tst_seed[2];
+ RegisterInfoArray *reg_array;
uint32_t regs[RMAX_XLNX_VERSAL_TRNG];
RegisterInfo regs_info[RMAX_XLNX_VERSAL_TRNG];
} XlnxVersalTRng;
diff --git a/include/hw/net/dp8393x.h b/include/hw/net/dp8393x.h
index 4a3f747..24273dc 100644
--- a/include/hw/net/dp8393x.h
+++ b/include/hw/net/dp8393x.h
@@ -22,7 +22,7 @@
#include "hw/sysbus.h"
#include "net/net.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#define SONIC_REG_COUNT 0x40
diff --git a/include/hw/net/imx_fec.h b/include/hw/net/imx_fec.h
index 2d13290..83b2163 100644
--- a/include/hw/net/imx_fec.h
+++ b/include/hw/net/imx_fec.h
@@ -31,6 +31,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(IMXFECState, IMX_FEC)
#define TYPE_IMX_ENET "imx.enet"
#include "hw/sysbus.h"
+#include "hw/net/lan9118_phy.h"
+#include "hw/irq.h"
#include "net/net.h"
#define ENET_EIR 1
@@ -264,11 +266,8 @@ struct IMXFECState {
uint32_t tx_descriptor[ENET_TX_RING_NUM];
uint32_t tx_ring_num;
- uint32_t phy_status;
- uint32_t phy_control;
- uint32_t phy_advertise;
- uint32_t phy_int;
- uint32_t phy_int_mask;
+ Lan9118PhyState mii;
+ IRQState mii_irq;
uint32_t phy_num;
bool phy_connected;
struct IMXFECState *phy_consumer;
diff --git a/include/hw/net/lan9118_phy.h b/include/hw/net/lan9118_phy.h
new file mode 100644
index 0000000..af12fc3
--- /dev/null
+++ b/include/hw/net/lan9118_phy.h
@@ -0,0 +1,37 @@
+/*
+ * SMSC LAN9118 PHY emulation
+ *
+ * Copyright (c) 2009 CodeSourcery, LLC.
+ * Written by Paul Brook
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_NET_LAN9118_PHY_H
+#define HW_NET_LAN9118_PHY_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+
+#define TYPE_LAN9118_PHY "lan9118-phy"
+OBJECT_DECLARE_SIMPLE_TYPE(Lan9118PhyState, LAN9118_PHY)
+
+typedef struct Lan9118PhyState {
+ SysBusDevice parent_obj;
+
+ uint16_t status;
+ uint16_t control;
+ uint16_t advertise;
+ uint16_t ints;
+ uint16_t int_mask;
+ qemu_irq irq;
+ bool link_down;
+} Lan9118PhyState;
+
+void lan9118_phy_update_link(Lan9118PhyState *s, bool link_down);
+void lan9118_phy_reset(Lan9118PhyState *s);
+uint16_t lan9118_phy_read(Lan9118PhyState *s, int reg);
+void lan9118_phy_write(Lan9118PhyState *s, int reg, uint16_t val);
+
+#endif
diff --git a/include/hw/net/mii.h b/include/hw/net/mii.h
index f7feddac..55bf7c9 100644
--- a/include/hw/net/mii.h
+++ b/include/hw/net/mii.h
@@ -71,6 +71,7 @@
#define MII_BMSR_JABBER (1 << 1) /* Jabber detected */
#define MII_BMSR_EXTCAP (1 << 0) /* Ext-reg capability */
+#define MII_ANAR_RFAULT (1 << 13) /* Say we can detect faults */
#define MII_ANAR_PAUSE_ASYM (1 << 11) /* Try for asymmetric pause */
#define MII_ANAR_PAUSE (1 << 10) /* Try for pause */
#define MII_ANAR_TXFD (1 << 8)
@@ -78,6 +79,7 @@
#define MII_ANAR_10FD (1 << 6)
#define MII_ANAR_10 (1 << 5)
#define MII_ANAR_CSMACD (1 << 0)
+#define MII_ANAR_SELECT (0x001f) /* Selector bits */
#define MII_ANLPAR_ACK (1 << 14)
#define MII_ANLPAR_PAUSEASY (1 << 11) /* can pause asymmetrically */
@@ -112,6 +114,10 @@
#define RTL8201CP_PHYID1 0x0000
#define RTL8201CP_PHYID2 0x8201
+/* SMSC LAN9118 */
+#define SMSCLAN9118_PHYID1 0x0007
+#define SMSCLAN9118_PHYID2 0xc0d1
+
/* RealTek 8211E */
#define RTL8211E_PHYID1 0x001c
#define RTL8211E_PHYID2 0xc915
diff --git a/include/hw/net/msf2-emac.h b/include/hw/net/msf2-emac.h
index 846ba6e..b5d9127 100644
--- a/include/hw/net/msf2-emac.h
+++ b/include/hw/net/msf2-emac.h
@@ -23,7 +23,7 @@
*/
#include "hw/sysbus.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "net/net.h"
#include "net/eth.h"
#include "qom/object.h"
diff --git a/include/hw/net/npcm_pcs.h b/include/hw/net/npcm_pcs.h
new file mode 100644
index 0000000..d5c481a
--- /dev/null
+++ b/include/hw/net/npcm_pcs.h
@@ -0,0 +1,42 @@
+/*
+ * Nuvoton NPCM8xx PCS Module
+ *
+ * Copyright 2022 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef NPCM_PCS_H
+#define NPCM_PCS_H
+
+#include "hw/sysbus.h"
+
+#define NPCM_PCS_NR_SR_CTLS (0x12 / sizeof(uint16_t))
+#define NPCM_PCS_NR_SR_MIIS (0x20 / sizeof(uint16_t))
+#define NPCM_PCS_NR_SR_TIMS (0x22 / sizeof(uint16_t))
+#define NPCM_PCS_NR_VR_MIIS (0x1c6 / sizeof(uint16_t))
+
+struct NPCMPCSState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ uint16_t indirect_access_base;
+ uint16_t sr_ctl[NPCM_PCS_NR_SR_CTLS];
+ uint16_t sr_mii[NPCM_PCS_NR_SR_MIIS];
+ uint16_t sr_tim[NPCM_PCS_NR_SR_TIMS];
+ uint16_t vr_mii[NPCM_PCS_NR_VR_MIIS];
+};
+
+#define TYPE_NPCM_PCS "npcm-pcs"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCMPCSState, NPCM_PCS)
+
+#endif /* NPCM_PCS_H */
diff --git a/include/hw/nubus/nubus.h b/include/hw/nubus/nubus.h
index fee79b7..7825840 100644
--- a/include/hw/nubus/nubus.h
+++ b/include/hw/nubus/nubus.h
@@ -11,7 +11,7 @@
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qom/object.h"
#include "qemu/units.h"
diff --git a/include/hw/nvram/fw_cfg.h b/include/hw/nvram/fw_cfg.h
index d173998..d41b932 100644
--- a/include/hw/nvram/fw_cfg.h
+++ b/include/hw/nvram/fw_cfg.h
@@ -4,7 +4,7 @@
#include "exec/hwaddr.h"
#include "standard-headers/linux/qemu_fw_cfg.h"
#include "hw/sysbus.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qom/object.h"
#define TYPE_FW_CFG "fw_cfg"
@@ -30,8 +30,9 @@ struct FWCfgDataGeneratorClass {
* @obj: the object implementing this interface
* @errp: pointer to a NULL-initialized error object
*
- * Returns: reference to a byte array containing the data on success,
- * or NULL on error.
+ * Returns: A byte array containing data to add, or NULL without
+ * @errp set if no data is required, or NULL with @errp
+ * set on failure.
*
* The caller should release the reference when no longer
* required.
@@ -41,14 +42,6 @@ struct FWCfgDataGeneratorClass {
typedef struct fw_cfg_file FWCfgFile;
-#define FW_CFG_ORDER_OVERRIDE_VGA 70
-#define FW_CFG_ORDER_OVERRIDE_NIC 80
-#define FW_CFG_ORDER_OVERRIDE_USER 100
-#define FW_CFG_ORDER_OVERRIDE_DEVICE 110
-
-void fw_cfg_set_order_override(FWCfgState *fw_cfg, int order);
-void fw_cfg_reset_order_override(FWCfgState *fw_cfg);
-
typedef struct FWCfgFiles {
uint32_t count;
FWCfgFile f[];
@@ -74,8 +67,6 @@ struct FWCfgState {
uint32_t cur_offset;
Notifier machine_ready;
- int fw_cfg_order_override;
-
bool dma_enabled;
dma_addr_t dma_addr;
AddressSpace *dma_as;
@@ -291,37 +282,31 @@ void *fw_cfg_modify_file(FWCfgState *s, const char *filename, void *data,
size_t len);
/**
- * fw_cfg_add_from_generator:
+ * fw_cfg_add_file_from_generator:
* @s: fw_cfg device being modified
* @filename: name of new fw_cfg file item
- * @gen_id: name of object implementing FW_CFG_DATA_GENERATOR interface
+ * @part: name of object implementing FW_CFG_DATA_GENERATOR interface
+ * @parent: the object in which to resolve the @part
* @errp: pointer to a NULL initialized error object
*
- * Add a new NAMED fw_cfg item with the content generated from the
- * @gen_id object. The data generated by the @gen_id object is copied
- * into the data structure of the fw_cfg device.
+ * If the @part object generates content, add a new NAMED fw_cfg item with it.
+ * The data generated by the @part object is copied into the data structure of
+ * the fw_cfg device.
* The next available (unused) selector key starting at FW_CFG_FILE_FIRST
* will be used; also, a new entry will be added to the file directory
* structure residing at key value FW_CFG_FILE_DIR, containing the item name,
* data size, and assigned selector key value.
*
- * Returns: %true on success, %false on error.
- */
-bool fw_cfg_add_from_generator(FWCfgState *s, const char *filename,
- const char *gen_id, Error **errp);
-
-/**
- * fw_cfg_add_extra_pci_roots:
- * @bus: main pci root bus to be scanned from
- * @s: fw_cfg device being modified
+ * If the @part object does not generate content, no fw_cfg item is added.
*
- * Add a new fw_cfg item...
+ * Returns: %true on success, %false on error.
*/
-void fw_cfg_add_extra_pci_roots(PCIBus *bus, FWCfgState *s);
+bool fw_cfg_add_file_from_generator(FWCfgState *s,
+ Object *parent, const char *part,
+ const char *filename, Error **errp);
FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase,
AddressSpace *dma_as);
-FWCfgState *fw_cfg_init_io(uint32_t iobase);
FWCfgState *fw_cfg_init_mem(hwaddr ctl_addr, hwaddr data_addr);
FWCfgState *fw_cfg_init_mem_wide(hwaddr ctl_addr,
hwaddr data_addr, uint32_t data_width,
diff --git a/include/hw/nvram/fw_cfg_acpi.h b/include/hw/nvram/fw_cfg_acpi.h
index b39eb04..dfd2a44 100644
--- a/include/hw/nvram/fw_cfg_acpi.h
+++ b/include/hw/nvram/fw_cfg_acpi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* ACPI support for fw_cfg
*
diff --git a/include/hw/nvram/mac_nvram.h b/include/hw/nvram/mac_nvram.h
index 0c4dfae..e9d8398 100644
--- a/include/hw/nvram/mac_nvram.h
+++ b/include/hw/nvram/mac_nvram.h
@@ -26,7 +26,7 @@
#ifndef MAC_NVRAM_H
#define MAC_NVRAM_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/sysbus.h"
#define MACIO_NVRAM_SIZE 0x2000
diff --git a/include/hw/nvram/npcm7xx_otp.h b/include/hw/nvram/npcm7xx_otp.h
index ea4b5d0..77b05f8 100644
--- a/include/hw/nvram/npcm7xx_otp.h
+++ b/include/hw/nvram/npcm7xx_otp.h
@@ -16,7 +16,7 @@
#ifndef NPCM7XX_OTP_H
#define NPCM7XX_OTP_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/sysbus.h"
/* Each OTP module holds 8192 bits of one-time programmable storage */
diff --git a/include/hw/nvram/xlnx-bbram.h b/include/hw/nvram/xlnx-bbram.h
index 6fc13f8..58acbe9 100644
--- a/include/hw/nvram/xlnx-bbram.h
+++ b/include/hw/nvram/xlnx-bbram.h
@@ -26,7 +26,7 @@
#ifndef XLNX_BBRAM_H
#define XLNX_BBRAM_H
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "hw/qdev-core.h"
#include "hw/irq.h"
#include "hw/sysbus.h"
@@ -47,6 +47,7 @@ struct XlnxBBRam {
bool bbram8_wo;
bool blk_ro;
+ RegisterInfoArray *reg_array;
uint32_t regs[RMAX_XLNX_BBRAM];
RegisterInfo regs_info[RMAX_XLNX_BBRAM];
};
diff --git a/include/hw/nvram/xlnx-efuse.h b/include/hw/nvram/xlnx-efuse.h
index cff7924..ef14fb0 100644
--- a/include/hw/nvram/xlnx-efuse.h
+++ b/include/hw/nvram/xlnx-efuse.h
@@ -27,7 +27,7 @@
#ifndef XLNX_EFUSE_H
#define XLNX_EFUSE_H
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "hw/qdev-core.h"
#define TYPE_XLNX_EFUSE "xlnx-efuse"
diff --git a/include/hw/nvram/xlnx-versal-efuse.h b/include/hw/nvram/xlnx-versal-efuse.h
index 86e2261..afa4f4f 100644
--- a/include/hw/nvram/xlnx-versal-efuse.h
+++ b/include/hw/nvram/xlnx-versal-efuse.h
@@ -44,6 +44,7 @@ struct XlnxVersalEFuseCtrl {
void *extra_pg0_lock_spec; /* Opaque property */
uint32_t extra_pg0_lock_n16;
+ RegisterInfoArray *reg_array;
uint32_t regs[XLNX_VERSAL_EFUSE_CTRL_R_MAX];
RegisterInfo regs_info[XLNX_VERSAL_EFUSE_CTRL_R_MAX];
};
diff --git a/include/hw/nvram/xlnx-zynqmp-efuse.h b/include/hw/nvram/xlnx-zynqmp-efuse.h
index f5beacc..7fb12df 100644
--- a/include/hw/nvram/xlnx-zynqmp-efuse.h
+++ b/include/hw/nvram/xlnx-zynqmp-efuse.h
@@ -37,6 +37,7 @@ struct XlnxZynqMPEFuse {
qemu_irq irq;
XlnxEFuse *efuse;
+ RegisterInfoArray *reg_array;
uint32_t regs[XLNX_ZYNQMP_EFUSE_R_MAX];
RegisterInfo regs_info[XLNX_ZYNQMP_EFUSE_R_MAX];
};
diff --git a/include/hw/openrisc/boot.h b/include/hw/openrisc/boot.h
index 25a313d..9b4d880 100644
--- a/include/hw/openrisc/boot.h
+++ b/include/hw/openrisc/boot.h
@@ -20,6 +20,7 @@
#define OPENRISC_BOOT_H
#include "exec/cpu-defs.h"
+#include "hw/boards.h"
hwaddr openrisc_load_kernel(ram_addr_t ram_size,
const char *kernel_filename,
@@ -28,7 +29,7 @@ hwaddr openrisc_load_kernel(ram_addr_t ram_size,
hwaddr openrisc_load_initrd(void *fdt, const char *filename,
hwaddr load_start, uint64_t mem_size);
-uint32_t openrisc_load_fdt(void *fdt, hwaddr load_start,
+uint32_t openrisc_load_fdt(MachineState *ms, void *fdt, hwaddr load_start,
uint64_t mem_size);
#endif /* OPENRISC_BOOT_H */
diff --git a/include/hw/pci-bridge/cxl_upstream_port.h b/include/hw/pci-bridge/cxl_upstream_port.h
index 1263513..f208397 100644
--- a/include/hw/pci-bridge/cxl_upstream_port.h
+++ b/include/hw/pci-bridge/cxl_upstream_port.h
@@ -12,6 +12,10 @@ typedef struct CXLUpstreamPort {
/*< public >*/
CXLComponentState cxl_cstate;
CXLCCI swcci;
+
+ PCIExpLinkSpeed speed;
+ PCIExpLinkWidth width;
+
DOECap doe_cdat;
uint64_t sn;
} CXLUpstreamPort;
diff --git a/include/hw/pci-host/astro.h b/include/hw/pci-host/astro.h
index e296691..832125a 100644
--- a/include/hw/pci-host/astro.h
+++ b/include/hw/pci-host/astro.h
@@ -24,6 +24,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(ElroyState, ELROY_PCI_HOST_BRIDGE)
#define LMMIO_DIST_BASE_ADDR 0xf4000000ULL
#define LMMIO_DIST_BASE_SIZE 0x4000000ULL
+#define LMMIO_DIRECT_RANGES 4
+
#define IOS_DIST_BASE_ADDR 0xfffee00000ULL
#define IOS_DIST_BASE_SIZE 0x10000ULL
@@ -83,9 +85,7 @@ struct AstroState {
struct ElroyState *elroy[ELROY_NUM];
MemoryRegion this_mem;
-
- MemoryRegion pci_mmio;
- MemoryRegion pci_io;
+ MemoryRegion lmmio_direct[LMMIO_DIRECT_RANGES];
IOMMUMemoryRegion iommu;
AddressSpace iommu_as;
diff --git a/include/hw/pci-host/designware.h b/include/hw/pci-host/designware.h
index 908f3d9..a35a3bd 100644
--- a/include/hw/pci-host/designware.h
+++ b/include/hw/pci-host/designware.h
@@ -25,13 +25,18 @@
#include "hw/pci/pci_bridge.h"
#include "qom/object.h"
+#define TYPE_DESIGNWARE_PCIE_ROOT_BUS "designware-pcie-root-BUS"
+OBJECT_DECLARE_SIMPLE_TYPE(DesignwarePCIERootBus, DESIGNWARE_PCIE_ROOT_BUS)
+
#define TYPE_DESIGNWARE_PCIE_HOST "designware-pcie-host"
OBJECT_DECLARE_SIMPLE_TYPE(DesignwarePCIEHost, DESIGNWARE_PCIE_HOST)
#define TYPE_DESIGNWARE_PCIE_ROOT "designware-pcie-root"
OBJECT_DECLARE_SIMPLE_TYPE(DesignwarePCIERoot, DESIGNWARE_PCIE_ROOT)
-struct DesignwarePCIERoot;
+struct DesignwarePCIERootBus {
+ PCIBus parent;
+};
typedef struct DesignwarePCIEViewport {
DesignwarePCIERoot *root;
@@ -88,6 +93,7 @@ struct DesignwarePCIEHost {
MemoryRegion io;
qemu_irq irqs[4];
+ qemu_irq msi;
} pci;
MemoryRegion mmio;
diff --git a/include/hw/pci-host/dino.h b/include/hw/pci-host/dino.h
index fd7975c..5dc8cdf 100644
--- a/include/hw/pci-host/dino.h
+++ b/include/hw/pci-host/dino.h
@@ -109,10 +109,6 @@ static const uint32_t reg800_keep_bits[DINO800_REGS] = {
struct DinoState {
PCIHostState parent_obj;
- /*
- * PCI_CONFIG_ADDR is parent_obj.config_reg, via pci_host_conf_be_ops,
- * so that we can map PCI_CONFIG_DATA to pci_host_data_be_ops.
- */
uint32_t config_reg_dino; /* keep original copy, including 2 lowest bits */
uint32_t iar0;
diff --git a/include/hw/pci-host/fsl_imx8m_phy.h b/include/hw/pci-host/fsl_imx8m_phy.h
new file mode 100644
index 0000000..5f1b212
--- /dev/null
+++ b/include/hw/pci-host/fsl_imx8m_phy.h
@@ -0,0 +1,28 @@
+/*
+ * i.MX8 PCIe PHY emulation
+ *
+ * Copyright (c) 2025 Bernhard Beschow <shentey@gmail.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_PCIHOST_FSLIMX8MPCIEPHY_H
+#define HW_PCIHOST_FSLIMX8MPCIEPHY_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+#include "system/memory.h"
+
+#define TYPE_FSL_IMX8M_PCIE_PHY "fsl-imx8m-pcie-phy"
+OBJECT_DECLARE_SIMPLE_TYPE(FslImx8mPciePhyState, FSL_IMX8M_PCIE_PHY)
+
+#define FSL_IMX8M_PCIE_PHY_DATA_SIZE 0x800
+
+struct FslImx8mPciePhyState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ uint8_t data[FSL_IMX8M_PCIE_PHY_DATA_SIZE];
+};
+
+#endif
diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h
index dce8835..8447153 100644
--- a/include/hw/pci-host/gpex.h
+++ b/include/hw/pci-host/gpex.h
@@ -32,8 +32,6 @@ OBJECT_DECLARE_SIMPLE_TYPE(GPEXHost, GPEX_HOST)
#define TYPE_GPEX_ROOT_DEVICE "gpex-root"
OBJECT_DECLARE_SIMPLE_TYPE(GPEXRootState, GPEX_ROOT_DEVICE)
-#define GPEX_NUM_IRQS 4
-
struct GPEXRootState {
/*< private >*/
PCIDevice parent_obj;
@@ -49,6 +47,7 @@ struct GPEXConfig {
PCIBus *bus;
};
+typedef struct GPEXIrq GPEXIrq;
struct GPEXHost {
/*< private >*/
PCIExpressHost parent_obj;
@@ -60,8 +59,8 @@ struct GPEXHost {
MemoryRegion io_mmio;
MemoryRegion io_ioport_window;
MemoryRegion io_mmio_window;
- qemu_irq irq[GPEX_NUM_IRQS];
- int irq_num[GPEX_NUM_IRQS];
+ GPEXIrq *irq;
+ uint8_t num_irqs;
bool allow_unmapped_accesses;
diff --git a/include/hw/pci-host/ls7a.h b/include/hw/pci-host/ls7a.h
index cd7c9ec..79d4ea8 100644
--- a/include/hw/pci-host/ls7a.h
+++ b/include/hw/pci-host/ls7a.h
@@ -36,17 +36,18 @@
#define VIRT_PCH_PIC_IRQ_NUM 32
#define VIRT_GSI_BASE 64
#define VIRT_DEVICE_IRQS 16
+#define VIRT_UART_COUNT 4
#define VIRT_UART_IRQ (VIRT_GSI_BASE + 2)
#define VIRT_UART_BASE 0x1fe001e0
-#define VIRT_UART_SIZE 0X100
-#define VIRT_RTC_IRQ (VIRT_GSI_BASE + 3)
+#define VIRT_UART_SIZE 0x100
+#define VIRT_RTC_IRQ (VIRT_GSI_BASE + 6)
#define VIRT_MISC_REG_BASE (VIRT_PCH_REG_BASE + 0x00080000)
#define VIRT_RTC_REG_BASE (VIRT_MISC_REG_BASE + 0x00050100)
#define VIRT_RTC_LEN 0x100
-#define VIRT_SCI_IRQ (VIRT_GSI_BASE + 4)
+#define VIRT_SCI_IRQ (VIRT_GSI_BASE + 7)
#define VIRT_PLATFORM_BUS_BASEADDRESS 0x16000000
#define VIRT_PLATFORM_BUS_SIZE 0x2000000
#define VIRT_PLATFORM_BUS_NUM_IRQS 2
-#define VIRT_PLATFORM_BUS_IRQ (VIRT_GSI_BASE + 5)
+#define VIRT_PLATFORM_BUS_IRQ (VIRT_GSI_BASE + 8)
#endif
diff --git a/include/hw/pci-host/pam.h b/include/hw/pci-host/pam.h
index 005916f..44f3908 100644
--- a/include/hw/pci-host/pam.h
+++ b/include/hw/pci-host/pam.h
@@ -50,7 +50,7 @@
* 0xf0000 - 0xfffff System BIOS Area Memory Segments
*/
-#include "exec/memory.h"
+#include "system/memory.h"
#define SMRAM_C_BASE 0xa0000
#define SMRAM_C_END 0xc0000
diff --git a/include/hw/pci-host/pnv_phb4.h b/include/hw/pci-host/pnv_phb4.h
index 8abee78..8a80c0c 100644
--- a/include/hw/pci-host/pnv_phb4.h
+++ b/include/hw/pci-host/pnv_phb4.h
@@ -13,6 +13,7 @@
#include "hw/pci-host/pnv_phb.h"
#include "hw/pci/pci_bus.h"
#include "hw/ppc/pnv.h"
+#include "hw/ppc/pnv_nest_pervasive.h"
#include "hw/ppc/xive.h"
#include "qom/object.h"
@@ -174,6 +175,9 @@ struct PnvPhb4PecState {
uint32_t index;
uint32_t chip_id;
+ /* Pervasive chiplet control */
+ PnvNestChipletPervasive nest_pervasive;
+
/* Nest registers, excuding per-stack */
#define PHB4_PEC_NEST_REGS_COUNT 0xf
uint64_t nest_regs[PHB4_PEC_NEST_REGS_COUNT];
@@ -196,6 +200,7 @@ struct PnvPhb4PecState {
struct PnvPhb4PecClass {
DeviceClass parent_class;
+ uint32_t (*xscom_cplt_base)(PnvPhb4PecState *pec);
uint32_t (*xscom_nest_base)(PnvPhb4PecState *pec);
uint32_t xscom_nest_size;
uint32_t (*xscom_pci_base)(PnvPhb4PecState *pec);
diff --git a/include/hw/pci-host/q35.h b/include/hw/pci-host/q35.h
index 22fadfa..ddafc3f 100644
--- a/include/hw/pci-host/q35.h
+++ b/include/hw/pci-host/q35.h
@@ -181,8 +181,6 @@ struct Q35PCIHost {
#define MCH_PCIE_DEV 1
#define MCH_PCIE_FUNC 0
-uint64_t mch_mcfg_base(void);
-
/*
* Arbitrary but unique BNF number for IOAPIC device.
*
diff --git a/include/hw/pci-host/remote.h b/include/hw/pci-host/remote.h
index 690a01f..5264c35 100644
--- a/include/hw/pci-host/remote.h
+++ b/include/hw/pci-host/remote.h
@@ -11,7 +11,7 @@
#ifndef PCI_HOST_REMOTE_H
#define PCI_HOST_REMOTE_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/pci/pcie_host.h"
#define TYPE_REMOTE_PCIHOST "remote-pcihost"
diff --git a/include/hw/pci-host/spapr.h b/include/hw/pci-host/spapr.h
index 3778aac..0db87f1 100644
--- a/include/hw/pci-host/spapr.h
+++ b/include/hw/pci-host/spapr.h
@@ -53,7 +53,6 @@ struct SpaprPhbState {
uint32_t index;
uint64_t buid;
char *dtbusname;
- bool dr_enabled;
MemoryRegion memspace, iospace;
hwaddr mem_win_addr, mem_win_size, mem64_win_addr, mem64_win_size;
@@ -84,10 +83,6 @@ struct SpaprPhbState {
bool pcie_ecs; /* Allow access to PCIe extended config space? */
/* Fields for migration compatibility hacks */
- bool pre_2_8_migration;
- uint32_t mig_liobn;
- hwaddr mig_mem_win_addr, mig_mem_win_size;
- hwaddr mig_io_win_addr, mig_io_win_size;
bool pre_5_1_assoc;
};
diff --git a/include/hw/pci/msix.h b/include/hw/pci/msix.h
index 0e6f257..11ef945 100644
--- a/include/hw/pci/msix.h
+++ b/include/hw/pci/msix.h
@@ -32,6 +32,7 @@ int msix_present(PCIDevice *dev);
bool msix_is_masked(PCIDevice *dev, unsigned vector);
void msix_set_pending(PCIDevice *dev, unsigned vector);
void msix_clr_pending(PCIDevice *dev, int vector);
+int msix_is_pending(PCIDevice *dev, unsigned vector);
void msix_vector_use(PCIDevice *dev, unsigned vector);
void msix_vector_unuse(PCIDevice *dev, unsigned vector);
diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
index 14a869e..df3cc7b 100644
--- a/include/hw/pci/pci.h
+++ b/include/hw/pci/pci.h
@@ -1,9 +1,9 @@
#ifndef QEMU_PCI_H
#define QEMU_PCI_H
-#include "exec/memory.h"
-#include "sysemu/dma.h"
-#include "sysemu/host_iommu_device.h"
+#include "system/memory.h"
+#include "system/dma.h"
+#include "system/host_iommu_device.h"
/* PCI includes legacy ISA access. */
#include "hw/isa/isa.h"
@@ -16,13 +16,17 @@ extern bool pci_available;
#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
#define PCI_FUNC(devfn) ((devfn) & 0x07)
-#define PCI_BUILD_BDF(bus, devfn) ((bus << 8) | (devfn))
+#define PCI_BUILD_BDF(bus, devfn) (((bus) << 8) | (devfn))
#define PCI_BDF_TO_DEVFN(x) ((x) & 0xff)
#define PCI_BUS_MAX 256
#define PCI_DEVFN_MAX 256
#define PCI_SLOT_MAX 32
#define PCI_FUNC_MAX 8
+#define PCI_SBDF(seg, bus, dev, func) \
+ ((((uint32_t)(seg)) << 16) | \
+ (PCI_BUILD_BDF(bus, PCI_DEVFN(dev, func))))
+
/* Class, Vendor and Device IDs from Linux's pci_ids.h */
#include "hw/pci/pci_ids.h"
@@ -116,6 +120,7 @@ extern bool pci_available;
#define PCI_DEVICE_ID_REDHAT_PVPANIC 0x0011
#define PCI_DEVICE_ID_REDHAT_ACPI_ERST 0x0012
#define PCI_DEVICE_ID_REDHAT_UFS 0x0013
+#define PCI_DEVICE_ID_REDHAT_RISCV_IOMMU 0x0014
#define PCI_DEVICE_ID_REDHAT_QXL 0x0100
#define FMT_PCIBUS PRIx64
@@ -213,6 +218,12 @@ enum {
QEMU_PCIE_ERR_UNC_MASK = (1 << QEMU_PCIE_ERR_UNC_MASK_BITNR),
#define QEMU_PCIE_ARI_NEXTFN_1_BITNR 12
QEMU_PCIE_ARI_NEXTFN_1 = (1 << QEMU_PCIE_ARI_NEXTFN_1_BITNR),
+#define QEMU_PCIE_EXT_TAG_BITNR 13
+ QEMU_PCIE_EXT_TAG = (1 << QEMU_PCIE_EXT_TAG_BITNR),
+#define QEMU_PCI_CAP_PM_BITNR 14
+ QEMU_PCI_CAP_PM = (1 << QEMU_PCI_CAP_PM_BITNR),
+#define QEMU_PCI_SKIP_RESET_ON_CPR_BITNR 15
+ QEMU_PCI_SKIP_RESET_ON_CPR = (1 << QEMU_PCI_SKIP_RESET_ON_CPR_BITNR),
};
typedef struct PCIINTxRoute {
@@ -294,6 +305,9 @@ int pci_bus_get_irq_level(PCIBus *bus, int irq_num);
uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus);
void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask);
void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask);
+bool pci_bus_add_fw_cfg_extra_pci_roots(FWCfgState *fw_cfg,
+ PCIBus *bus,
+ Error **errp);
/* 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD */
static inline int pci_swizzle(int slot, int pin)
{
@@ -363,6 +377,28 @@ void pci_bus_get_w64_range(PCIBus *bus, Range *range);
void pci_device_deassert_intx(PCIDevice *dev);
+/* Page Request Interface */
+typedef enum {
+ IOMMU_PRI_RESP_SUCCESS,
+ IOMMU_PRI_RESP_INVALID_REQUEST,
+ IOMMU_PRI_RESP_FAILURE,
+} IOMMUPRIResponseCode;
+
+typedef struct IOMMUPRIResponse {
+ IOMMUPRIResponseCode response_code;
+ uint16_t prgi;
+} IOMMUPRIResponse;
+
+struct IOMMUPRINotifier;
+
+typedef void (*IOMMUPRINotify)(struct IOMMUPRINotifier *notifier,
+ IOMMUPRIResponse *response);
+
+typedef struct IOMMUPRINotifier {
+ IOMMUPRINotify notify;
+} IOMMUPRINotifier;
+
+#define PCI_PRI_PRGI_MASK 0x1ffU
/**
* struct PCIIOMMUOps: callbacks structure for specific IOMMU handlers
@@ -417,6 +453,179 @@ typedef struct PCIIOMMUOps {
* @devfn: device and function number of the PCI device.
*/
void (*unset_iommu_device)(PCIBus *bus, void *opaque, int devfn);
+ /**
+ * @get_iotlb_info: get properties required to initialize a device IOTLB.
+ *
+ * Callback required if devices are allowed to cache translations.
+ *
+ * @opaque: the data passed to pci_setup_iommu().
+ *
+ * @addr_width: the address width of the IOMMU (output parameter).
+ *
+ * @min_page_size: the page size of the IOMMU (output parameter).
+ */
+ void (*get_iotlb_info)(void *opaque, uint8_t *addr_width,
+ uint32_t *min_page_size);
+ /**
+ * @init_iotlb_notifier: initialize an IOMMU notifier.
+ *
+ * Optional callback.
+ *
+ * @bus: the #PCIBus of the PCI device.
+ *
+ * @opaque: the data passed to pci_setup_iommu().
+ *
+ * @devfn: device and function number of the PCI device.
+ *
+ * @n: the notifier to be initialized.
+ *
+ * @fn: the callback to be installed.
+ *
+ * @user_opaque: a user pointer that can be used to track a state.
+ */
+ void (*init_iotlb_notifier)(PCIBus *bus, void *opaque, int devfn,
+ IOMMUNotifier *n, IOMMUNotify fn,
+ void *user_opaque);
+ /**
+ * @register_iotlb_notifier: setup an IOTLB invalidation notifier.
+ *
+ * Callback required if devices are allowed to cache translations.
+ *
+ * @bus: the #PCIBus of the PCI device.
+ *
+ * @opaque: the data passed to pci_setup_iommu().
+ *
+ * @devfn: device and function number of the PCI device.
+ *
+ * @pasid: the pasid of the address space to watch.
+ *
+ * @n: the notifier to register.
+ */
+ void (*register_iotlb_notifier)(PCIBus *bus, void *opaque, int devfn,
+ uint32_t pasid, IOMMUNotifier *n);
+ /**
+ * @unregister_iotlb_notifier: remove an IOTLB invalidation notifier.
+ *
+ * Callback required if devices are allowed to cache translations.
+ *
+ * @bus: the #PCIBus of the PCI device.
+ *
+ * @opaque: the data passed to pci_setup_iommu().
+ *
+ * @devfn: device and function number of the PCI device.
+ *
+ * @pasid: the pasid of the address space to stop watching.
+ *
+ * @n: the notifier to unregister.
+ */
+ void (*unregister_iotlb_notifier)(PCIBus *bus, void *opaque, int devfn,
+ uint32_t pasid, IOMMUNotifier *n);
+ /**
+ * @ats_request_translation: issue an ATS request.
+ *
+ * Callback required if devices are allowed to use the address
+ * translation service.
+ *
+ * @bus: the #PCIBus of the PCI device.
+ *
+ * @opaque: the data passed to pci_setup_iommu().
+ *
+ * @devfn: device and function number of the PCI device.
+ *
+ * @pasid: the pasid of the address space to use for the request.
+ *
+ * @priv_req: privileged mode bit (PASID TLP).
+ *
+ * @exec_req: execute request bit (PASID TLP).
+ *
+ * @addr: start address of the memory range to be translated.
+ *
+ * @length: length of the memory range in bytes.
+ *
+ * @no_write: request a read-only translation (if supported).
+ *
+ * @result: buffer in which the TLB entries will be stored.
+ *
+ * @result_length: result buffer length.
+ *
+ * @err_count: number of untranslated subregions.
+ *
+ * Returns: the number of translations stored in the result buffer, or
+ * -ENOMEM if the buffer is not large enough.
+ */
+ ssize_t (*ats_request_translation)(PCIBus *bus, void *opaque, int devfn,
+ uint32_t pasid, bool priv_req,
+ bool exec_req, hwaddr addr,
+ size_t length, bool no_write,
+ IOMMUTLBEntry *result,
+ size_t result_length,
+ uint32_t *err_count);
+ /**
+ * @pri_register_notifier: setup the PRI completion callback.
+ *
+ * Callback required if devices are allowed to use the page request
+ * interface.
+ *
+ * @bus: the #PCIBus of the PCI device.
+ *
+ * @opaque: the data passed to pci_setup_iommu().
+ *
+ * @devfn: device and function number of the PCI device.
+ *
+ * @pasid: the pasid of the address space to track.
+ *
+ * @notifier: the notifier to register.
+ */
+ void (*pri_register_notifier)(PCIBus *bus, void *opaque, int devfn,
+ uint32_t pasid, IOMMUPRINotifier *notifier);
+ /**
+ * @pri_unregister_notifier: remove the PRI completion callback.
+ *
+ * Callback required if devices are allowed to use the page request
+ * interface.
+ *
+ * @bus: the #PCIBus of the PCI device.
+ *
+ * @opaque: the data passed to pci_setup_iommu().
+ *
+ * @devfn: device and function number of the PCI device.
+ *
+ * @pasid: the pasid of the address space to stop tracking.
+ */
+ void (*pri_unregister_notifier)(PCIBus *bus, void *opaque, int devfn,
+ uint32_t pasid);
+ /**
+ * @pri_request_page: issue a PRI request.
+ *
+ * Callback required if devices are allowed to use the page request
+ * interface.
+ *
+ * @bus: the #PCIBus of the PCI device.
+ *
+ * @opaque: the data passed to pci_setup_iommu().
+ *
+ * @devfn: device and function number of the PCI device.
+ *
+ * @pasid: the pasid of the address space to use for the request.
+ *
+ * @priv_req: privileged mode bit (PASID TLP).
+ *
+ * @exec_req: execute request bit (PASID TLP).
+ *
+ * @addr: untranslated address of the requested page.
+ *
+ * @lpig: last page in group.
+ *
+ * @prgi: page request group index.
+ *
+ * @is_read: request read access.
+ *
+ * @is_write: request write access.
+ */
+ int (*pri_request_page)(PCIBus *bus, void *opaque, int devfn,
+ uint32_t pasid, bool priv_req, bool exec_req,
+ hwaddr addr, bool lpig, uint16_t prgi, bool is_read,
+ bool is_write);
} PCIIOMMUOps;
AddressSpace *pci_device_iommu_address_space(PCIDevice *dev);
@@ -425,6 +634,126 @@ bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod,
void pci_device_unset_iommu_device(PCIDevice *dev);
/**
+ * pci_iommu_get_iotlb_info: get properties required to initialize a
+ * device IOTLB.
+ *
+ * Returns 0 on success, or a negative errno otherwise.
+ *
+ * @dev: the device that wants to get the information.
+ * @addr_width: the address width of the IOMMU (output parameter).
+ * @min_page_size: the page size of the IOMMU (output parameter).
+ */
+int pci_iommu_get_iotlb_info(PCIDevice *dev, uint8_t *addr_width,
+ uint32_t *min_page_size);
+
+/**
+ * pci_iommu_init_iotlb_notifier: initialize an IOMMU notifier.
+ *
+ * This function is used by devices before registering an IOTLB notifier.
+ *
+ * @dev: the device.
+ * @n: the notifier to be initialized.
+ * @fn: the callback to be installed.
+ * @opaque: a user pointer that can be used to track a state.
+ */
+int pci_iommu_init_iotlb_notifier(PCIDevice *dev, IOMMUNotifier *n,
+ IOMMUNotify fn, void *opaque);
+
+/**
+ * pci_ats_request_translation: perform an ATS request.
+ *
+ * Returns the number of translations stored in @result in case of success,
+ * a negative error code otherwise.
+ * -ENOMEM is returned when the result buffer is not large enough to store
+ * all the translations.
+ *
+ * @dev: the ATS-capable PCI device.
+ * @pasid: the pasid of the address space in which the translation will be done.
+ * @priv_req: privileged mode bit (PASID TLP).
+ * @exec_req: execute request bit (PASID TLP).
+ * @addr: start address of the memory range to be translated.
+ * @length: length of the memory range in bytes.
+ * @no_write: request a read-only translation (if supported).
+ * @result: buffer in which the TLB entries will be stored.
+ * @result_length: result buffer length.
+ * @err_count: number of untranslated subregions.
+ */
+ssize_t pci_ats_request_translation(PCIDevice *dev, uint32_t pasid,
+ bool priv_req, bool exec_req,
+ hwaddr addr, size_t length,
+ bool no_write, IOMMUTLBEntry *result,
+ size_t result_length,
+ uint32_t *err_count);
+
+/**
+ * pci_pri_request_page: perform a PRI request.
+ *
+ * Returns 0 if the PRI request has been sent to the guest OS,
+ * an error code otherwise.
+ *
+ * @dev: the PRI-capable PCI device.
+ * @pasid: the pasid of the address space in which the translation will be done.
+ * @priv_req: privileged mode bit (PASID TLP).
+ * @exec_req: execute request bit (PASID TLP).
+ * @addr: untranslated address of the requested page.
+ * @lpig: last page in group.
+ * @prgi: page request group index.
+ * @is_read: request read access.
+ * @is_write: request write access.
+ */
+int pci_pri_request_page(PCIDevice *dev, uint32_t pasid, bool priv_req,
+ bool exec_req, hwaddr addr, bool lpig,
+ uint16_t prgi, bool is_read, bool is_write);
+
+/**
+ * pci_pri_register_notifier: register the PRI callback for a given address
+ * space.
+ *
+ * Returns 0 on success, an error code otherwise.
+ *
+ * @dev: the PRI-capable PCI device.
+ * @pasid: the pasid of the address space to track.
+ * @notifier: the notifier to register.
+ */
+int pci_pri_register_notifier(PCIDevice *dev, uint32_t pasid,
+ IOMMUPRINotifier *notifier);
+
+/**
+ * pci_pri_unregister_notifier: remove the PRI callback from a given address
+ * space.
+ *
+ * @dev: the PRI-capable PCI device.
+ * @pasid: the pasid of the address space to stop tracking.
+ */
+void pci_pri_unregister_notifier(PCIDevice *dev, uint32_t pasid);
+
+/**
+ * pci_iommu_register_iotlb_notifier: register a notifier for changes to
+ * IOMMU translation entries in a specific address space.
+ *
+ * Returns 0 on success, or a negative errno otherwise.
+ *
+ * @dev: the device that wants to get notified.
+ * @pasid: the pasid of the address space to track.
+ * @n: the notifier to register.
+ */
+int pci_iommu_register_iotlb_notifier(PCIDevice *dev, uint32_t pasid,
+ IOMMUNotifier *n);
+
+/**
+ * pci_iommu_unregister_iotlb_notifier: unregister a notifier that has been
+ * registerd with pci_iommu_register_iotlb_notifier.
+ *
+ * Returns 0 on success, or a negative errno otherwise.
+ *
+ * @dev: the device that wants to stop notifications.
+ * @pasid: the pasid of the address space to stop tracking.
+ * @n: the notifier to unregister.
+ */
+int pci_iommu_unregister_iotlb_notifier(PCIDevice *dev, uint32_t pasid,
+ IOMMUNotifier *n);
+
+/**
* pci_setup_iommu: Initialize specific IOMMU handlers for a PCIBus
*
* Let PCI host bridges define specific operations.
@@ -656,6 +985,7 @@ void lsi53c8xx_handle_legacy_cmdline(DeviceState *lsi_dev);
qemu_irq pci_allocate_irq(PCIDevice *pci_dev);
void pci_set_irq(PCIDevice *pci_dev, int level);
+int pci_irq_disabled(PCIDevice *d);
static inline void pci_irq_assert(PCIDevice *pci_dev)
{
@@ -667,17 +997,9 @@ static inline void pci_irq_deassert(PCIDevice *pci_dev)
pci_set_irq(pci_dev, 0);
}
-/*
- * FIXME: PCI does not work this way.
- * All the callers to this method should be fixed.
- */
-static inline void pci_irq_pulse(PCIDevice *pci_dev)
-{
- pci_irq_assert(pci_dev);
- pci_irq_deassert(pci_dev);
-}
-
MSIMessage pci_get_msi_message(PCIDevice *dev, int vector);
void pci_set_enabled(PCIDevice *pci_dev, bool state);
+void pci_set_power(PCIDevice *pci_dev, bool state);
+int pci_pm_init(PCIDevice *pci_dev, uint8_t offset, Error **errp);
#endif
diff --git a/include/hw/pci/pci_bridge.h b/include/hw/pci/pci_bridge.h
index 5cd4521..8cdacbc 100644
--- a/include/hw/pci/pci_bridge.h
+++ b/include/hw/pci/pci_bridge.h
@@ -14,8 +14,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * along with this program; if not, see
+ * <https://www.gnu.org/licenses/>.
*
* split out pci bus specific stuff from pci.[hc] to pci_bridge.[hc]
* Copyright (c) 2009 Isaku Yamahata <yamahata at valinux co jp>
@@ -72,6 +72,8 @@ struct PCIBridge {
*/
MemoryRegion address_space_mem;
MemoryRegion address_space_io;
+ AddressSpace as_mem;
+ AddressSpace as_io;
PCIBridgeWindows windows;
@@ -102,6 +104,7 @@ typedef struct PXBPCIEDev {
PXBDev parent_obj;
} PXBPCIEDev;
+#define TYPE_PXB_CXL_BUS "pxb-cxl-bus"
#define TYPE_PXB_DEV "pxb"
OBJECT_DECLARE_SIMPLE_TYPE(PXBDev, PXB_DEV)
diff --git a/include/hw/pci/pci_device.h b/include/hw/pci/pci_device.h
index ca15132..eee0338 100644
--- a/include/hw/pci/pci_device.h
+++ b/include/hw/pci/pci_device.h
@@ -3,6 +3,7 @@
#include "hw/pci/pci.h"
#include "hw/pci/pcie.h"
+#include "hw/pci/pcie_doe.h"
#define TYPE_PCI_DEVICE "pci-device"
typedef struct PCIDeviceClass PCIDeviceClass;
@@ -37,6 +38,8 @@ struct PCIDeviceClass {
uint16_t subsystem_id; /* only for header type = 0 */
const char *romfile; /* rom bar */
+
+ bool sriov_vf_user_creatable;
};
enum PCIReqIDType {
@@ -87,6 +90,7 @@ struct PCIDevice {
char name[64];
PCIIORegion io_regions[PCI_NUM_REGIONS];
AddressSpace bus_master_as;
+ bool is_master;
MemoryRegion bus_master_container_region;
MemoryRegion bus_master_enable_region;
@@ -104,6 +108,9 @@ struct PCIDevice {
/* Capability bits */
uint32_t cap_present;
+ /* Offset of PM capability in config space */
+ uint8_t pm_cap;
+
/* Offset of MSI-X capability in config space */
uint8_t msix_cap;
@@ -147,7 +154,7 @@ struct PCIDevice {
uint32_t romsize;
bool has_rom;
MemoryRegion rom;
- uint32_t rom_bar;
+ int32_t rom_bar;
/* INTx routing notifier */
PCIINTxRoutingNotifier intx_routing_notifier;
@@ -157,9 +164,24 @@ struct PCIDevice {
MSIVectorReleaseNotifier msix_vector_release_notifier;
MSIVectorPollNotifier msix_vector_poll_notifier;
+ /* SPDM */
+ uint16_t spdm_port;
+
+ /* DOE */
+ DOECap doe_spdm;
+
/* ID of standby device in net_failover pair */
char *failover_pair_id;
uint32_t acpi_index;
+
+ /*
+ * Indirect DMA region bounce buffer size as configured for the device. This
+ * is a configuration parameter that is reflected into bus_master_as when
+ * realizing the device.
+ */
+ uint32_t max_bounce_buffer_size;
+
+ char *sriov_pf;
};
static inline int pci_intx(PCIDevice *pci_dev)
@@ -192,7 +214,7 @@ static inline int pci_is_express_downstream_port(const PCIDevice *d)
static inline int pci_is_vf(const PCIDevice *d)
{
- return d->exp.sriov_vf.pf != NULL;
+ return d->sriov_pf || d->exp.sriov_vf.pf != NULL;
}
static inline uint32_t pci_config_size(const PCIDevice *d)
@@ -205,21 +227,6 @@ static inline uint16_t pci_get_bdf(PCIDevice *dev)
return PCI_BUILD_BDF(pci_bus_num(pci_get_bus(dev)), dev->devfn);
}
-static inline void pci_set_power(PCIDevice *pci_dev, bool state)
-{
- /*
- * Don't change the enabled state of VFs when powering on/off the device.
- *
- * When powering on, VFs must not be enabled immediately but they must
- * wait until the guest configures SR-IOV.
- * When powering off, their corresponding PFs will be reset and disable
- * VFs.
- */
- if (!pci_is_vf(pci_dev)) {
- pci_set_enabled(pci_dev, state);
- }
-}
-
uint16_t pci_requester_id(PCIDevice *dev);
/* DMA access functions */
diff --git a/include/hw/pci/pci_host.h b/include/hw/pci/pci_host.h
index e52d8ec..954dd44 100644
--- a/include/hw/pci/pci_host.h
+++ b/include/hw/pci/pci_host.h
@@ -68,6 +68,5 @@ uint32_t pci_data_read(PCIBus *s, uint32_t addr, unsigned len);
extern const MemoryRegionOps pci_host_conf_le_ops;
extern const MemoryRegionOps pci_host_conf_be_ops;
extern const MemoryRegionOps pci_host_data_le_ops;
-extern const MemoryRegionOps pci_host_data_be_ops;
#endif /* PCI_HOST_H */
diff --git a/include/hw/pci/pci_ids.h b/include/hw/pci/pci_ids.h
index f1a53fe..33e2898 100644
--- a/include/hw/pci/pci_ids.h
+++ b/include/hw/pci/pci_ids.h
@@ -191,6 +191,7 @@
#define PCI_DEVICE_ID_APPLE_UNI_N_AGP 0x0020
#define PCI_DEVICE_ID_APPLE_U3_AGP 0x004b
#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC 0x0021
+#define PCI_DEVICE_ID_APPLE_VIRTIO_BLK 0x1a00
#define PCI_VENDOR_ID_SUN 0x108e
#define PCI_DEVICE_ID_SUN_EBUS 0x1000
diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h
index 5eddb90..ff6ce08 100644
--- a/include/hw/pci/pcie.h
+++ b/include/hw/pci/pcie.h
@@ -58,8 +58,6 @@ typedef enum {
struct PCIExpressDevice {
/* Offset of express capability in config space */
uint8_t exp_cap;
- /* Offset of Power Management capability in config space */
- uint8_t pm_cap;
/* SLOT */
bool hpev_notified; /* Logical AND of conditions for hot plug event.
@@ -72,8 +70,10 @@ struct PCIExpressDevice {
uint16_t aer_cap;
PCIEAERLog aer_log;
- /* Offset of ATS capability in config space */
+ /* Offset of ATS, PRI and PASID capabilities in config space */
uint16_t ats_cap;
+ uint16_t pasid_cap;
+ uint16_t pri_cap;
/* ACS */
uint16_t acs_cap;
@@ -141,6 +141,8 @@ void pcie_acs_reset(PCIDevice *dev);
void pcie_ari_init(PCIDevice *dev, uint16_t offset);
void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num);
void pcie_ats_init(PCIDevice *dev, uint16_t offset, bool aligned);
+void pcie_cap_fill_link_ep_usp(PCIDevice *dev, PCIExpLinkWidth width,
+ PCIExpLinkSpeed speed);
void pcie_cap_slot_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp);
@@ -150,4 +152,13 @@ void pcie_cap_slot_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp);
void pcie_cap_slot_unplug_request_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp);
+
+void pcie_pasid_init(PCIDevice *dev, uint16_t offset, uint8_t pasid_width,
+ bool exec_perm, bool priv_mod);
+void pcie_pri_init(PCIDevice *dev, uint16_t offset, uint32_t outstanding_pr_cap,
+ bool prg_response_pasid_req);
+
+bool pcie_pri_enabled(const PCIDevice *dev);
+bool pcie_pasid_enabled(const PCIDevice *dev);
+bool pcie_ats_enabled(const PCIDevice *dev);
#endif /* QEMU_PCIE_H */
diff --git a/include/hw/pci/pcie_doe.h b/include/hw/pci/pcie_doe.h
index 87dc17d..9e1275d 100644
--- a/include/hw/pci/pcie_doe.h
+++ b/include/hw/pci/pcie_doe.h
@@ -46,6 +46,8 @@ REG32(PCI_DOE_CAP_STATUS, 0)
/* PCI-SIG defined Data Object Types - r6.0 Table 6-32 */
#define PCI_SIG_DOE_DISCOVERY 0x00
+#define PCI_SIG_DOE_CMA 0x01
+#define PCI_SIG_DOE_SECURED_CMA 0x02
#define PCI_DOE_DW_SIZE_MAX (1 << 18)
#define PCI_DOE_PROTOCOL_NUM_MAX 256
@@ -106,6 +108,9 @@ struct DOECap {
/* Protocols and its callback response */
DOEProtocol *protocols;
uint16_t protocol_num;
+
+ /* Used for spdm-socket */
+ int spdm_socket;
};
void pcie_doe_init(PCIDevice *pdev, DOECap *doe_cap, uint16_t offset,
diff --git a/include/hw/pci/pcie_host.h b/include/hw/pci/pcie_host.h
index 82d9217..f09de76 100644
--- a/include/hw/pci/pcie_host.h
+++ b/include/hw/pci/pcie_host.h
@@ -22,7 +22,7 @@
#define PCIE_HOST_H
#include "hw/pci/pci_host.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qom/object.h"
#define TYPE_PCIE_HOST_BRIDGE "pcie-host-bridge"
diff --git a/include/hw/pci/pcie_port.h b/include/hw/pci/pcie_port.h
index 90e6cf4..7cd7af8 100644
--- a/include/hw/pci/pcie_port.h
+++ b/include/hw/pci/pcie_port.h
@@ -72,7 +72,6 @@ struct PCIESlot {
};
void pcie_chassis_create(uint8_t chassis_number);
-PCIESlot *pcie_chassis_find_slot(uint8_t chassis, uint16_t slot);
int pcie_chassis_add_slot(struct PCIESlot *slot);
void pcie_chassis_del_slot(PCIESlot *s);
diff --git a/include/hw/pci/pcie_regs.h b/include/hw/pci/pcie_regs.h
index 9d3b686..33a2222 100644
--- a/include/hw/pci/pcie_regs.h
+++ b/include/hw/pci/pcie_regs.h
@@ -86,6 +86,14 @@ typedef enum PCIExpLinkWidth {
#define PCI_ARI_VER 1
#define PCI_ARI_SIZEOF 8
+/* PASID */
+#define PCI_PASID_VER 1
+#define PCI_EXT_CAP_PASID_MAX_WIDTH 20
+#define PCI_PASID_CAP_WIDTH_SHIFT 8
+
+/* PRI */
+#define PCI_PRI_VER 1
+
/* AER */
#define PCI_ERR_VER 2
#define PCI_ERR_SIZEOF 0x48
diff --git a/include/hw/pci/pcie_sriov.h b/include/hw/pci/pcie_sriov.h
index c5d2d31..aeaa38c 100644
--- a/include/hw/pci/pcie_sriov.h
+++ b/include/hw/pci/pcie_sriov.h
@@ -18,6 +18,7 @@
typedef struct PCIESriovPF {
uint8_t vf_bar_type[PCI_NUM_REGIONS]; /* Store type for each VF bar */
PCIDevice **vf; /* Pointer to an array of num_vfs VF devices */
+ bool vf_user_created; /* If VFs are created by user */
} PCIESriovPF;
typedef struct PCIESriovVF {
@@ -40,6 +41,26 @@ void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num,
void pcie_sriov_vf_register_bar(PCIDevice *dev, int region_num,
MemoryRegion *memory);
+/**
+ * pcie_sriov_pf_init_from_user_created_vfs() - Initialize PF with user-created
+ * VFs, adding ARI to PF
+ * @dev: A PCIe device being realized.
+ * @offset: The offset of the SR-IOV capability.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Initializes a PF with user-created VFs, adding the ARI extended capability to
+ * the PF. The VFs should call pcie_ari_init() to form an ARI device.
+ *
+ * Return: The size of added capabilities. 0 if the user did not create VFs.
+ * -1 if failed.
+ */
+int16_t pcie_sriov_pf_init_from_user_created_vfs(PCIDevice *dev,
+ uint16_t offset,
+ Error **errp);
+
+bool pcie_sriov_register_device(PCIDevice *dev, Error **errp);
+void pcie_sriov_unregister_device(PCIDevice *dev);
+
/*
* Default (minimal) page size support values
* as required by the SR/IOV standard:
diff --git a/include/hw/pci/shpc.h b/include/hw/pci/shpc.h
index a0789df..ad10895 100644
--- a/include/hw/pci/shpc.h
+++ b/include/hw/pci/shpc.h
@@ -1,7 +1,7 @@
#ifndef SHPC_H
#define SHPC_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/hotplug.h"
#include "hw/pci/pci_device.h"
#include "migration/vmstate.h"
diff --git a/include/hw/pcmcia.h b/include/hw/pcmcia.h
deleted file mode 100644
index ab26802..0000000
--- a/include/hw/pcmcia.h
+++ /dev/null
@@ -1,66 +0,0 @@
-#ifndef HW_PCMCIA_H
-#define HW_PCMCIA_H
-
-/* PCMCIA/Cardbus */
-
-#include "hw/qdev-core.h"
-#include "qom/object.h"
-
-typedef struct PCMCIASocket {
- qemu_irq irq;
- bool attached;
-} PCMCIASocket;
-
-#define TYPE_PCMCIA_CARD "pcmcia-card"
-OBJECT_DECLARE_TYPE(PCMCIACardState, PCMCIACardClass, PCMCIA_CARD)
-
-struct PCMCIACardState {
- /*< private >*/
- DeviceState parent_obj;
- /*< public >*/
-
- PCMCIASocket *slot;
-};
-
-struct PCMCIACardClass {
- /*< private >*/
- DeviceClass parent_class;
- /*< public >*/
-
- int (*attach)(PCMCIACardState *state);
- int (*detach)(PCMCIACardState *state);
-
- const uint8_t *cis;
- int cis_len;
-
- /* Only valid if attached */
- uint8_t (*attr_read)(PCMCIACardState *card, uint32_t address);
- void (*attr_write)(PCMCIACardState *card, uint32_t address, uint8_t value);
- uint16_t (*common_read)(PCMCIACardState *card, uint32_t address);
- void (*common_write)(PCMCIACardState *card,
- uint32_t address, uint16_t value);
- uint16_t (*io_read)(PCMCIACardState *card, uint32_t address);
- void (*io_write)(PCMCIACardState *card, uint32_t address, uint16_t value);
-};
-
-#define CISTPL_DEVICE 0x01 /* 5V Device Information Tuple */
-#define CISTPL_NO_LINK 0x14 /* No Link Tuple */
-#define CISTPL_VERS_1 0x15 /* Level 1 Version Tuple */
-#define CISTPL_JEDEC_C 0x18 /* JEDEC ID Tuple */
-#define CISTPL_JEDEC_A 0x19 /* JEDEC ID Tuple */
-#define CISTPL_CONFIG 0x1a /* Configuration Tuple */
-#define CISTPL_CFTABLE_ENTRY 0x1b /* 16-bit PCCard Configuration */
-#define CISTPL_DEVICE_OC 0x1c /* Additional Device Information */
-#define CISTPL_DEVICE_OA 0x1d /* Additional Device Information */
-#define CISTPL_DEVICE_GEO 0x1e /* Additional Device Information */
-#define CISTPL_DEVICE_GEO_A 0x1f /* Additional Device Information */
-#define CISTPL_MANFID 0x20 /* Manufacture ID Tuple */
-#define CISTPL_FUNCID 0x21 /* Function ID Tuple */
-#define CISTPL_FUNCE 0x22 /* Function Extension Tuple */
-#define CISTPL_END 0xff /* Tuple End */
-#define CISTPL_ENDMARK 0xff
-
-/* dscm1xxxx.c */
-PCMCIACardState *dscm1xxxx_init(DriveInfo *bdrv);
-
-#endif
diff --git a/include/hw/ppc/mac_dbdma.h b/include/hw/ppc/mac_dbdma.h
index 4a3f644..896ee4a 100644
--- a/include/hw/ppc/mac_dbdma.h
+++ b/include/hw/ppc/mac_dbdma.h
@@ -23,9 +23,9 @@
#ifndef HW_MAC_DBDMA_H
#define HW_MAC_DBDMA_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qemu/iov.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/sysbus.h"
#include "qom/object.h"
@@ -44,10 +44,6 @@ struct DBDMA_io {
DBDMA_end dma_end;
/* DMA is in progress, don't start another one */
bool processing;
- /* DMA request */
- void *dma_mem;
- dma_addr_t dma_len;
- DMADirection dir;
};
/*
diff --git a/include/hw/ppc/pnv.h b/include/hw/ppc/pnv.h
index 476b136..d8fca07 100644
--- a/include/hw/ppc/pnv.h
+++ b/include/hw/ppc/pnv.h
@@ -76,6 +76,9 @@ struct PnvMachineClass {
/*< public >*/
const char *compat;
int compat_size;
+ int max_smt_threads;
+ bool has_lpar_per_thread;
+ bool quirk_tb_big_core;
void (*dt_power_mgt)(PnvMachineState *pnv, void *fdt);
void (*i2c_init)(PnvMachineState *pnv);
@@ -100,6 +103,9 @@ struct PnvMachineState {
PnvPnor *pnor;
hwaddr fw_load_addr;
+
+ bool big_core;
+ bool lpar_per_core;
};
PnvChip *pnv_get_chip(PnvMachineState *pnv, uint32_t chip_id);
@@ -108,6 +114,8 @@ PnvChip *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb);
#define PNV_FDT_ADDR 0x01000000
#define PNV_TIMEBASE_FREQ 512000000ULL
+void pnv_cpu_do_nmi_resume(CPUState *cs);
+
/*
* BMC helpers
*/
@@ -197,9 +205,8 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor);
#define PNV9_OCC_SENSOR_BASE(chip) (PNV9_OCC_COMMON_AREA_BASE + \
PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id))
-#define PNV9_HOMER_SIZE 0x0000000000400000ull
#define PNV9_HOMER_BASE(chip) \
- (0x203ffd800000ull + ((uint64_t)(chip)->chip_id) * PNV9_HOMER_SIZE)
+ (0x203ffd800000ull + ((uint64_t)(chip)->chip_id) * PNV_HOMER_SIZE)
/*
* POWER10 MMIO base addresses - 16TB stride per chip
@@ -242,8 +249,7 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor);
#define PNV10_OCC_SENSOR_BASE(chip) (PNV10_OCC_COMMON_AREA_BASE + \
PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id))
-#define PNV10_HOMER_SIZE 0x0000000000400000ull
#define PNV10_HOMER_BASE(chip) \
- (0x300ffd800000ll + ((uint64_t)(chip)->chip_id) * PNV10_HOMER_SIZE)
+ (0x300ffd800000ll + ((uint64_t)(chip)->chip_id) * PNV_HOMER_SIZE)
#endif /* PPC_PNV_H */
diff --git a/include/hw/ppc/pnv_adu.h b/include/hw/ppc/pnv_adu.h
new file mode 100644
index 0000000..f9dbd8c
--- /dev/null
+++ b/include/hw/ppc/pnv_adu.h
@@ -0,0 +1,32 @@
+/*
+ * QEMU PowerPC PowerNV Emulation of some ADU behaviour
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PPC_PNV_ADU_H
+#define PPC_PNV_ADU_H
+
+#include "hw/ppc/pnv.h"
+#include "hw/ppc/pnv_lpc.h"
+#include "hw/qdev-core.h"
+
+#define TYPE_PNV_ADU "pnv-adu"
+
+OBJECT_DECLARE_TYPE(PnvADU, PnvADUClass, PNV_ADU)
+
+struct PnvADU {
+ DeviceState xd;
+
+ /* LPCMC (LPC Master Controller) access engine */
+ PnvLpcController *lpc;
+ uint64_t lpc_base_reg;
+ uint64_t lpc_cmd_reg;
+ uint64_t lpc_data_reg;
+
+ MemoryRegion xscom_regs;
+};
+
+#endif /* PPC_PNV_ADU_H */
diff --git a/include/hw/ppc/pnv_chip.h b/include/hw/ppc/pnv_chip.h
index a4ed17a..24ce37a 100644
--- a/include/hw/ppc/pnv_chip.h
+++ b/include/hw/ppc/pnv_chip.h
@@ -2,10 +2,12 @@
#define PPC_PNV_CHIP_H
#include "hw/pci-host/pnv_phb4.h"
+#include "hw/ppc/pnv_adu.h"
#include "hw/ppc/pnv_chiptod.h"
#include "hw/ppc/pnv_core.h"
#include "hw/ppc/pnv_homer.h"
#include "hw/ppc/pnv_n1_chiplet.h"
+#include "hw/ssi/pnv_spi.h"
#include "hw/ppc/pnv_lpc.h"
#include "hw/ppc/pnv_occ.h"
#include "hw/ppc/pnv_psi.h"
@@ -26,6 +28,8 @@ struct PnvChip {
uint64_t ram_start;
uint64_t ram_size;
+ bool big_core;
+ bool lpar_per_core;
uint32_t nr_cores;
uint32_t nr_threads;
uint64_t cores_mask;
@@ -77,6 +81,7 @@ struct Pnv9Chip {
PnvChip parent_obj;
/*< public >*/
+ PnvADU adu;
PnvXive xive;
Pnv9Psi psi;
PnvLpcController lpc;
@@ -110,6 +115,7 @@ struct Pnv10Chip {
PnvChip parent_obj;
/*< public >*/
+ PnvADU adu;
PnvXive2 xive;
Pnv9Psi psi;
PnvLpcController lpc;
@@ -118,6 +124,8 @@ struct Pnv10Chip {
PnvSBE sbe;
PnvHomer homer;
PnvN1Chiplet n1_chiplet;
+#define PNV10_CHIP_MAX_PIB_SPIC 6
+ PnvSpi pib_spic[PNV10_CHIP_MAX_PIB_SPIC];
uint32_t nr_quads;
PnvQuad *quads;
@@ -131,6 +139,7 @@ struct Pnv10Chip {
#define PNV10_PIR2FUSEDCORE(pir) (((pir) >> 3) & 0xf)
#define PNV10_PIR2CHIP(pir) (((pir) >> 8) & 0x7f)
+#define PNV10_PIR2THREAD(pir) (((pir) & 0x7f))
struct PnvChipClass {
/*< private >*/
@@ -147,7 +156,9 @@ struct PnvChipClass {
DeviceRealize parent_realize;
- uint32_t (*chip_pir)(PnvChip *chip, uint32_t core_id, uint32_t thread_id);
+ /* Get PIR and TIR values for a CPU thread identified by core/thread id */
+ void (*get_pir_tir)(PnvChip *chip, uint32_t core_id, uint32_t thread_id,
+ uint32_t *pir, uint32_t *tir);
void (*intc_create)(PnvChip *chip, PowerPCCPU *cpu, Error **errp);
void (*intc_reset)(PnvChip *chip, PowerPCCPU *cpu);
void (*intc_destroy)(PnvChip *chip, PowerPCCPU *cpu);
diff --git a/include/hw/ppc/pnv_core.h b/include/hw/ppc/pnv_core.h
index c6d62fd..d8afb4f 100644
--- a/include/hw/ppc/pnv_core.h
+++ b/include/hw/ppc/pnv_core.h
@@ -25,6 +25,27 @@
#include "hw/ppc/pnv.h"
#include "qom/object.h"
+/* Per-core ChipTOD / TimeBase state */
+typedef struct PnvCoreTODState {
+ /*
+ * POWER10 DD2.0 - big core TFMR drives the state machine on the even
+ * small core. Skiboot has a workaround that targets the even small core
+ * for CHIPTOD_TO_TB ops.
+ */
+ bool big_core_quirk;
+
+ int tb_ready_for_tod; /* core TB ready to receive TOD from chiptod */
+ int tod_sent_to_tb; /* chiptod sent TOD to the core TB */
+
+ /*
+ * "Timers" for async TBST events are simulated by mfTFAC because TFAC
+ * is polled for such events. These are just used to ensure firmware
+ * performs the polling at least a few times.
+ */
+ int tb_state_timer;
+ int tb_sync_pulse_timer;
+} PnvCoreTODState;
+
#define TYPE_PNV_CORE "powernv-cpu-core"
OBJECT_DECLARE_TYPE(PnvCore, PnvCoreClass,
PNV_CORE)
@@ -35,9 +56,15 @@ struct PnvCore {
/*< public >*/
PowerPCCPU **threads;
+ bool big_core;
+ bool lpar_per_core;
uint32_t pir;
uint32_t hwid;
uint64_t hrmor;
+
+ target_ulong scratch[8]; /* SPRC/SPRD indirect SCRATCH registers */
+ PnvCoreTODState tod_state;
+
PnvChip *chip;
MemoryRegion xscom_regs;
@@ -54,6 +81,7 @@ struct PnvCoreClass {
#define PNV_CORE_TYPE_NAME(cpu_model) cpu_model PNV_CORE_TYPE_SUFFIX
typedef struct PnvCPUState {
+ PnvCore *pnv_core;
Object *intc;
} PnvCPUState;
@@ -82,6 +110,9 @@ OBJECT_DECLARE_TYPE(PnvQuad, PnvQuadClass, PNV_QUAD)
struct PnvQuad {
DeviceState parent_obj;
+ bool special_wakeup_done;
+ bool special_wakeup[4];
+
uint32_t quad_id;
MemoryRegion xscom_regs;
MemoryRegion xscom_qme_regs;
diff --git a/include/hw/ppc/pnv_homer.h b/include/hw/ppc/pnv_homer.h
index b1c5d49..a6f2710 100644
--- a/include/hw/ppc/pnv_homer.h
+++ b/include/hw/ppc/pnv_homer.h
@@ -41,19 +41,21 @@ struct PnvHomer {
PnvChip *chip;
MemoryRegion pba_regs;
- MemoryRegion regs;
+ MemoryRegion mem;
+ hwaddr base;
};
struct PnvHomerClass {
DeviceClass parent_class;
+ /* Get base address of HOMER memory */
+ hwaddr (*get_base)(PnvChip *chip);
+ /* Size of HOMER memory */
+ int size;
+
int pba_size;
const MemoryRegionOps *pba_ops;
- int homer_size;
- const MemoryRegionOps *homer_ops;
-
- hwaddr core_max_base;
};
#endif /* PPC_PNV_HOMER_H */
diff --git a/include/hw/ppc/pnv_lpc.h b/include/hw/ppc/pnv_lpc.h
index 5d22c45..266d562 100644
--- a/include/hw/ppc/pnv_lpc.h
+++ b/include/hw/ppc/pnv_lpc.h
@@ -20,9 +20,10 @@
#ifndef PPC_PNV_LPC_H
#define PPC_PNV_LPC_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/ppc/pnv.h"
#include "hw/qdev-core.h"
+#include "hw/isa/isa.h" /* For ISA_NUM_IRQS */
#define TYPE_PNV_LPC "pnv-lpc"
typedef struct PnvLpcClass PnvLpcClass;
@@ -73,6 +74,9 @@ struct PnvLpcController {
uint32_t opb_irq_pol;
uint32_t opb_irq_input;
+ /* LPC device IRQ state */
+ uint32_t lpc_hc_irq_inputs;
+
/* LPC HC registers */
uint32_t lpc_hc_fw_seg_idsel;
uint32_t lpc_hc_fw_rd_acc_size;
@@ -84,8 +88,19 @@ struct PnvLpcController {
/* XSCOM registers */
MemoryRegion xscom_regs;
+ /*
+ * In P8, ISA irqs are combined with internal sources to drive the
+ * LPCHC interrupt output. P9 ISA irqs raise one of 4 lines that
+ * drive PSI SERIRQ irqs, routing according to OPB routing registers.
+ */
+ bool psi_has_serirq;
+
/* PSI to generate interrupts */
- qemu_irq psi_irq;
+ qemu_irq psi_irq_lpchc;
+
+ /* P9 serirq lines and irq routing table */
+ qemu_irq psi_irq_serirq[4];
+ int irq_to_serirq_route[ISA_NUM_IRQS];
};
struct PnvLpcClass {
@@ -94,6 +109,11 @@ struct PnvLpcClass {
DeviceRealize parent_realize;
};
+bool pnv_lpc_opb_read(PnvLpcController *lpc, uint32_t addr,
+ uint8_t *data, int sz);
+bool pnv_lpc_opb_write(PnvLpcController *lpc, uint32_t addr,
+ uint8_t *data, int sz);
+
ISABus *pnv_lpc_isa_create(PnvLpcController *lpc, bool use_cpld, Error **errp);
int pnv_dt_lpc(PnvChip *chip, void *fdt, int root_offset,
uint64_t lpcm_addr, uint64_t lpcm_size);
diff --git a/include/hw/ppc/pnv_occ.h b/include/hw/ppc/pnv_occ.h
index df32124..013ea2e 100644
--- a/include/hw/ppc/pnv_occ.h
+++ b/include/hw/ppc/pnv_occ.h
@@ -20,7 +20,7 @@
#ifndef PPC_PNV_OCC_H
#define PPC_PNV_OCC_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/qdev-core.h"
#define TYPE_PNV_OCC "pnv-occ"
@@ -41,11 +41,17 @@ DECLARE_INSTANCE_CHECKER(PnvOCC, PNV10_OCC, TYPE_PNV10_OCC)
struct PnvOCC {
DeviceState xd;
+ /* OCC dynamic model is driven by this timer. */
+ QEMUTimer state_machine_timer;
+
/* OCC Misc interrupt */
uint64_t occmisc;
qemu_irq psi_irq;
+ /* OCCs operate on regions of HOMER memory */
+ PnvHomer *homer;
+
MemoryRegion xscom_regs;
MemoryRegion sram_regs;
};
@@ -53,6 +59,9 @@ struct PnvOCC {
struct PnvOCCClass {
DeviceClass parent_class;
+ hwaddr opal_shared_memory_offset; /* offset in HOMER */
+ uint8_t opal_shared_memory_version;
+
int xscom_size;
const MemoryRegionOps *xscom_ops;
};
diff --git a/include/hw/ppc/pnv_pnor.h b/include/hw/ppc/pnv_pnor.h
index 2e37ac8..b44cafe 100644
--- a/include/hw/ppc/pnv_pnor.h
+++ b/include/hw/ppc/pnv_pnor.h
@@ -13,9 +13,11 @@
#include "hw/sysbus.h"
/*
- * PNOR offset on the LPC FW address space
+ * PNOR offset on the LPC FW address space. For now this should be 0 because
+ * skiboot 7.1 has a bug where IDSEL > 0 (LPC FW address > 256MB) access is
+ * not performed correctly.
*/
-#define PNOR_SPI_OFFSET 0x0c000000UL
+#define PNOR_SPI_OFFSET 0x00000000UL
#define TYPE_PNV_PNOR "pnv-pnor"
OBJECT_DECLARE_SIMPLE_TYPE(PnvPnor, PNV_PNOR)
@@ -26,6 +28,7 @@ struct PnvPnor {
BlockBackend *blk;
uint8_t *storage;
+ uint32_t lpc_address; /* Offset within LPC FW space */
int64_t size;
MemoryRegion mmio;
};
diff --git a/include/hw/ppc/pnv_sbe.h b/include/hw/ppc/pnv_sbe.h
index b6b378a..48a8b86 100644
--- a/include/hw/ppc/pnv_sbe.h
+++ b/include/hw/ppc/pnv_sbe.h
@@ -20,7 +20,7 @@
#ifndef PPC_PNV_SBE_H
#define PPC_PNV_SBE_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/qdev-core.h"
#define TYPE_PNV_SBE "pnv-sbe"
diff --git a/include/hw/ppc/pnv_xscom.h b/include/hw/ppc/pnv_xscom.h
index 6209e18..b14549d 100644
--- a/include/hw/ppc/pnv_xscom.h
+++ b/include/hw/ppc/pnv_xscom.h
@@ -20,10 +20,10 @@
#ifndef PPC_PNV_XSCOM_H
#define PPC_PNV_XSCOM_H
-#include "exec/memory.h"
-#include "hw/ppc/pnv.h"
+#include "system/memory.h"
typedef struct PnvXScomInterface PnvXScomInterface;
+typedef struct PnvChip PnvChip;
#define TYPE_PNV_XSCOM_INTERFACE "pnv-xscom-interface"
#define PNV_XSCOM_INTERFACE(obj) \
@@ -82,6 +82,9 @@ struct PnvXScomInterfaceClass {
#define PNV_XSCOM_PBCQ_SPCI_BASE 0x9013c00
#define PNV_XSCOM_PBCQ_SPCI_SIZE 0x5
+#define PNV9_XSCOM_ADU_BASE 0x0090000
+#define PNV9_XSCOM_ADU_SIZE 0x55
+
/*
* Layout of the XSCOM PCB addresses (POWER 9)
*/
@@ -123,11 +126,16 @@ struct PnvXScomInterfaceClass {
#define PNV9_XSCOM_PEC_PCI_BASE 0xd010800
#define PNV9_XSCOM_PEC_PCI_SIZE 0x200
+#define PNV9_XSCOM_PEC_NEST_CPLT_BASE 0x0d000000
+
/* XSCOM PCI "pass-through" window to PHB SCOM */
#define PNV9_XSCOM_PEC_PCI_STK0 0x100
#define PNV9_XSCOM_PEC_PCI_STK1 0x140
#define PNV9_XSCOM_PEC_PCI_STK2 0x180
+#define PNV10_XSCOM_ADU_BASE 0x0090000
+#define PNV10_XSCOM_ADU_SIZE 0x55
+
/*
* Layout of the XSCOM PCB addresses (POWER 10)
*/
@@ -191,9 +199,14 @@ struct PnvXScomInterfaceClass {
#define PNV10_XSCOM_PEC_NEST_BASE 0x3011800 /* index goes downwards ... */
#define PNV10_XSCOM_PEC_NEST_SIZE 0x100
+#define PNV10_XSCOM_PEC_NEST_CPLT_BASE 0x08000000
+
#define PNV10_XSCOM_PEC_PCI_BASE 0x8010800 /* index goes upwards ... */
#define PNV10_XSCOM_PEC_PCI_SIZE 0x200
+#define PNV10_XSCOM_PIB_SPIC_BASE 0xc0000
+#define PNV10_XSCOM_PIB_SPIC_SIZE 0x20
+
void pnv_xscom_init(PnvChip *chip, uint64_t size, hwaddr addr);
int pnv_dt_xscom(PnvChip *chip, void *fdt, int root_offset,
uint64_t xscom_base, uint64_t xscom_size,
diff --git a/include/hw/ppc/ppc.h b/include/hw/ppc/ppc.h
index d5d119e..8a14d62 100644
--- a/include/hw/ppc/ppc.h
+++ b/include/hw/ppc/ppc.h
@@ -116,6 +116,13 @@ enum {
#define PPC_SERIAL_MM_BAUDBASE 399193
+#ifndef CONFIG_USER_ONLY
+void booke206_set_tlb(ppcmas_tlb_t *tlb, target_ulong va, hwaddr pa,
+ hwaddr len);
+void booke_set_tlb(ppcemb_tlb_t *tlb, target_ulong va, hwaddr pa,
+ target_ulong size);
+#endif
+
/* ppc_booke.c */
void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags);
#endif
diff --git a/include/hw/ppc/ppc4xx.h b/include/hw/ppc/ppc4xx.h
index 1bd9b88..2e94b00 100644
--- a/include/hw/ppc/ppc4xx.h
+++ b/include/hw/ppc/ppc4xx.h
@@ -26,7 +26,7 @@
#define PPC4XX_H
#include "hw/ppc/ppc.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/sysbus.h"
/*
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 4aaf23d..39bd5bd 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -2,7 +2,7 @@
#define HW_SPAPR_H
#include "qemu/units.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/boards.h"
#include "hw/ppc/spapr_drc.h"
#include "hw/mem/pc-dimm.h"
@@ -83,8 +83,10 @@ typedef enum {
#define SPAPR_CAP_AIL_MODE_3 0x0C
/* Nested PAPR */
#define SPAPR_CAP_NESTED_PAPR 0x0D
+/* DAWR1 */
+#define SPAPR_CAP_DAWR1 0x0E
/* Num Caps */
-#define SPAPR_CAP_NUM (SPAPR_CAP_NESTED_PAPR + 1)
+#define SPAPR_CAP_NUM (SPAPR_CAP_DAWR1 + 1)
/*
* Capability Values
@@ -141,11 +143,8 @@ struct SpaprMachineClass {
MachineClass parent_class;
/*< public >*/
- bool dr_lmb_enabled; /* enable dynamic-reconfig/hotplug of LMBs */
bool dr_phb_enabled; /* enable dynamic-reconfig/hotplug of PHBs */
bool update_dt_enabled; /* enable KVMPPC_H_UPDATE_DT */
- bool use_ohci_by_default; /* use USB-OHCI instead of XHCI */
- bool pre_2_10_has_unused_icps;
bool legacy_irq_allocation;
uint32_t nr_xirqs;
bool broken_host_serial_model; /* present real host info to the guest */
@@ -204,6 +203,7 @@ struct SpaprMachineState {
uint32_t fdt_initial_size;
void *fdt_blob;
uint8_t fdt_rng_seed[32];
+ uint64_t hashpkey_val;
long kernel_size;
bool kernel_le;
uint64_t kernel_addr;
@@ -409,6 +409,7 @@ struct SpaprMachineState {
#define H_SET_MODE_RESOURCE_SET_DAWR0 2
#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3
#define H_SET_MODE_RESOURCE_LE 4
+#define H_SET_MODE_RESOURCE_SET_DAWR1 5
/* Flags for H_SET_MODE_RESOURCE_LE */
#define H_SET_MODE_ENDIAN_BIG 0
@@ -1004,7 +1005,9 @@ extern const VMStateDescription vmstate_spapr_cap_large_decr;
extern const VMStateDescription vmstate_spapr_cap_ccf_assist;
extern const VMStateDescription vmstate_spapr_cap_fwnmi;
extern const VMStateDescription vmstate_spapr_cap_rpt_invalidate;
+extern const VMStateDescription vmstate_spapr_cap_ail_mode_3;
extern const VMStateDescription vmstate_spapr_wdt;
+extern const VMStateDescription vmstate_spapr_cap_dawr1;
static inline uint8_t spapr_get_cap(SpaprMachineState *spapr, int cap)
{
diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h
index 69a52e3..68f7083 100644
--- a/include/hw/ppc/spapr_cpu_core.h
+++ b/include/hw/ppc/spapr_cpu_core.h
@@ -28,7 +28,6 @@ struct SpaprCpuCore {
/*< public >*/
PowerPCCPU **threads;
int node_id;
- bool pre_3_0_migration; /* older machine don't know about SpaprCpuState */
};
struct SpaprCpuCoreClass {
diff --git a/include/hw/ppc/spapr_drc.h b/include/hw/ppc/spapr_drc.h
index 02a63b3..9ff4290 100644
--- a/include/hw/ppc/spapr_drc.h
+++ b/include/hw/ppc/spapr_drc.h
@@ -15,7 +15,7 @@
#include <libfdt.h>
#include "qom/object.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/qdev-core.h"
#include "qapi/error.h"
diff --git a/include/hw/ppc/spapr_nested.h b/include/hw/ppc/spapr_nested.h
index 93ef14a..f7be0d5 100644
--- a/include/hw/ppc/spapr_nested.h
+++ b/include/hw/ppc/spapr_nested.h
@@ -11,7 +11,13 @@
#define GSB_TB_OFFSET 0x0004 /* Timebase Offset */
#define GSB_PART_SCOPED_PAGETBL 0x0005 /* Partition Scoped Page Table */
#define GSB_PROCESS_TBL 0x0006 /* Process Table */
- /* RESERVED 0x0007 - 0x0BFF */
+ /* RESERVED 0x0007 - 0x07FF */
+#define GSB_L0_GUEST_HEAP_INUSE 0x0800 /* Guest Management Heap Size */
+#define GSB_L0_GUEST_HEAP_MAX 0x0801 /* Guest Management Heap Max Size */
+#define GSB_L0_GUEST_PGTABLE_SIZE_INUSE 0x0802 /* Guest Pagetable Size */
+#define GSB_L0_GUEST_PGTABLE_SIZE_MAX 0x0803 /* Guest Pagetable Max Size */
+#define GSB_L0_GUEST_PGTABLE_RECLAIMED 0x0804 /* Pagetable Reclaim in bytes */
+ /* RESERVED 0x0805 - 0xBFF */
#define GSB_VCPU_IN_BUFFER 0x0C00 /* Run VCPU Input Buffer */
#define GSB_VCPU_OUT_BUFFER 0x0C01 /* Run VCPU Out Buffer */
#define GSB_VCPU_VPA 0x0C02 /* HRA to Guest VCPU VPA */
@@ -99,7 +105,8 @@
#define GSB_VCPU_SPR_HASHKEYR 0x1050
#define GSB_VCPU_SPR_HASHPKEYR 0x1051
#define GSB_VCPU_SPR_CTRL 0x1052
- /* RESERVED 0x1053 - 0x1FFF */
+#define GSB_VCPU_SPR_DPDES 0x1053
+ /* RESERVED 0x1054 - 0x1FFF */
#define GSB_VCPU_SPR_CR 0x2000
#define GSB_VCPU_SPR_PIDR 0x2001
#define GSB_VCPU_SPR_DSISR 0x2002
@@ -195,6 +202,38 @@ typedef struct SpaprMachineStateNested {
#define NESTED_API_PAPR 2
bool capabilities_set;
uint32_t pvr_base;
+
+ /**
+ * l0_guest_heap_inuse: The currently used bytes in the Hypervisor's Guest
+ * Management Space associated with the Host Partition.
+ **/
+ uint64_t l0_guest_heap_inuse;
+
+ /**
+ * host_heap_max: The maximum bytes available in the Hypervisor's Guest
+ * Management Space associated with the Host Partition.
+ **/
+ uint64_t l0_guest_heap_max;
+
+ /**
+ * host_pagetable: The currently used bytes in the Hypervisor's Guest
+ * Page Table Management Space associated with the Host Partition.
+ **/
+ uint64_t l0_guest_pgtable_size_inuse;
+
+ /**
+ * host_pagetable_max: The maximum bytes available in the Hypervisor's Guest
+ * Page Table Management Space associated with the Host Partition.
+ **/
+ uint64_t l0_guest_pgtable_size_max;
+
+ /**
+ * host_pagetable_reclaim: The amount of space in bytes that has been
+ * reclaimed due to overcommit in the Hypervisor's Guest Page Table
+ * Management Space associated with the Host Partition.
+ **/
+ uint64_t l0_guest_pgtable_reclaimed;
+
GHashTable *guests;
} SpaprMachineStateNested;
@@ -210,11 +249,14 @@ typedef struct SpaprMachineStateNestedGuest {
#define H_GUEST_CAPABILITIES_COPY_MEM 0x8000000000000000
#define H_GUEST_CAPABILITIES_P9_MODE 0x4000000000000000
#define H_GUEST_CAPABILITIES_P10_MODE 0x2000000000000000
-#define H_GUEST_CAP_VALID_MASK (H_GUEST_CAPABILITIES_P10_MODE | \
+#define H_GUEST_CAPABILITIES_P11_MODE 0x1000000000000000
+#define H_GUEST_CAP_VALID_MASK (H_GUEST_CAPABILITIES_P11_MODE | \
+ H_GUEST_CAPABILITIES_P10_MODE | \
H_GUEST_CAPABILITIES_P9_MODE)
#define H_GUEST_CAP_COPY_MEM_BMAP 0
#define H_GUEST_CAP_P9_MODE_BMAP 1
#define H_GUEST_CAP_P10_MODE_BMAP 2
+#define H_GUEST_CAP_P11_MODE_BMAP 3
#define PAPR_NESTED_GUEST_MAX 4096
#define H_GUEST_DELETE_ALL_FLAG 0x8000000000000000ULL
#define PAPR_NESTED_GUEST_VCPU_MAX 2048
@@ -225,9 +267,15 @@ typedef struct SpaprMachineStateNestedGuest {
#define HVMASK_HDEXCR 0x00000000FFFFFFFF
#define HVMASK_TB_OFFSET 0x000000FFFFFFFFFF
#define GSB_MAX_BUF_SIZE (1024 * 1024)
-#define H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE 0x8000000000000000
-#define GUEST_STATE_REQUEST_GUEST_WIDE 0x1
-#define GUEST_STATE_REQUEST_SET 0x2
+#define H_GUEST_GET_STATE_FLAGS_MASK 0xC000000000000000ULL
+#define H_GUEST_SET_STATE_FLAGS_MASK 0x8000000000000000ULL
+#define H_GUEST_SET_STATE_FLAGS_GUEST_WIDE 0x8000000000000000ULL
+#define H_GUEST_GET_STATE_FLAGS_GUEST_WIDE 0x8000000000000000ULL
+#define H_GUEST_GET_STATE_FLAGS_HOST_WIDE 0x4000000000000000ULL
+
+#define GUEST_STATE_REQUEST_GUEST_WIDE 0x1
+#define GUEST_STATE_REQUEST_HOST_WIDE 0x2
+#define GUEST_STATE_REQUEST_SET 0x4
/*
* As per ISA v3.1B, following bits are reserved:
@@ -247,6 +295,15 @@ typedef struct SpaprMachineStateNestedGuest {
.copy = (c) \
}
+#define GSBE_NESTED_MACHINE_DW(i, f) { \
+ .id = (i), \
+ .size = 8, \
+ .location = get_machine_ptr, \
+ .offset = offsetof(struct SpaprMachineStateNested, f), \
+ .copy = copy_state_8to8, \
+ .mask = HVMASK_DEFAULT \
+}
+
#define GSBE_NESTED(i, sz, f, c) { \
.id = (i), \
.size = (sz), \
@@ -505,9 +562,11 @@ struct guest_state_element_type {
uint16_t id;
int size;
#define GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE 0x1
-#define GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY 0x2
+#define GUEST_STATE_ELEMENT_TYPE_FLAG_HOST_WIDE 0x2
+#define GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY 0x4
uint16_t flags;
- void *(*location)(SpaprMachineStateNestedGuest *, target_ulong);
+ void *(*location)(struct SpaprMachineState *, SpaprMachineStateNestedGuest *,
+ target_ulong);
size_t offset;
void (*copy)(void *, void *, bool);
uint64_t mask;
diff --git a/include/hw/ppc/spapr_vio.h b/include/hw/ppc/spapr_vio.h
index 7eae1a4..b8de4b0 100644
--- a/include/hw/ppc/spapr_vio.h
+++ b/include/hw/ppc/spapr_vio.h
@@ -23,7 +23,7 @@
*/
#include "hw/ppc/spapr.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/irq.h"
#include "qom/object.h"
diff --git a/include/hw/ppc/vof.h b/include/hw/ppc/vof.h
index d3f293d..3a0fbff 100644
--- a/include/hw/ppc/vof.h
+++ b/include/hw/ppc/vof.h
@@ -7,8 +7,8 @@
#define HW_VOF_H
#include "qom/object.h"
-#include "exec/address-spaces.h"
-#include "exec/memory.h"
+#include "system/address-spaces.h"
+#include "system/memory.h"
#include "exec/cpu-defs.h"
typedef struct Vof {
diff --git a/include/hw/ppc/xics.h b/include/hw/ppc/xics.h
index e94d534..097fcdf 100644
--- a/include/hw/ppc/xics.h
+++ b/include/hw/ppc/xics.h
@@ -28,7 +28,7 @@
#ifndef XICS_H
#define XICS_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/qdev-core.h"
#include "qom/object.h"
diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h
index 28c181f..538f438 100644
--- a/include/hw/ppc/xive.h
+++ b/include/hw/ppc/xive.h
@@ -130,17 +130,15 @@
* TCTX Thread interrupt Context
*
*
- * Copyright (c) 2017-2018, IBM Corporation.
- *
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * Copyright (c) 2017-2024, IBM Corporation.
*
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef PPC_XIVE_H
#define PPC_XIVE_H
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "hw/sysbus.h"
#include "hw/ppc/xive_regs.h"
#include "qom/object.h"
@@ -218,7 +216,7 @@ static inline bool xive_source_esb_has_2page(XiveSource *xsrc)
xsrc->esb_shift == XIVE_ESB_4K_2PAGE;
}
-static inline size_t xive_source_esb_len(XiveSource *xsrc)
+static inline uint64_t xive_source_esb_len(XiveSource *xsrc)
{
return (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
}
@@ -424,6 +422,7 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas);
typedef struct XiveTCTXMatch {
XiveTCTX *tctx;
uint8_t ring;
+ bool precluded;
} XiveTCTXMatch;
#define TYPE_XIVE_PRESENTER "xive-presenter"
@@ -439,10 +438,13 @@ struct XivePresenterClass {
InterfaceClass parent;
int (*match_nvt)(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match);
bool (*in_kernel)(const XivePresenter *xptr);
uint32_t (*get_config)(XivePresenter *xptr);
+ int (*broadcast)(XivePresenter *xptr,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool crowd, bool cam_ignore, uint8_t priority);
};
int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
@@ -451,8 +453,10 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
bool cam_ignore, uint32_t logic_serv);
bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
- uint32_t logic_serv);
+ bool crowd, bool cam_ignore, uint8_t priority,
+ uint32_t logic_serv, bool *precluded);
+
+uint32_t xive_get_vpgroup_size(uint32_t nvp_index);
/*
* XIVE Fabric (Interface between Interrupt Controller and Machine)
@@ -469,8 +473,10 @@ struct XiveFabricClass {
InterfaceClass parent;
int (*match_nvt)(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint8_t priority,
+ bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match);
+ int (*broadcast)(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx,
+ bool crowd, bool cam_ignore, uint8_t priority);
};
/*
@@ -510,6 +516,21 @@ static inline uint8_t xive_priority_to_ipb(uint8_t priority)
0 : 1 << (XIVE_PRIORITY_MAX - priority);
}
+static inline uint8_t xive_priority_to_pipr(uint8_t priority)
+{
+ return priority > XIVE_PRIORITY_MAX ? 0xFF : priority;
+}
+
+/*
+ * Convert an Interrupt Pending Buffer (IPB) register to a Pending
+ * Interrupt Priority Register (PIPR), which contains the priority of
+ * the most favored pending notification.
+ */
+static inline uint8_t xive_ipb_to_pipr(uint8_t ibp)
+{
+ return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
+}
+
/*
* XIVE Thread Interrupt Management Aera (TIMA)
*
@@ -532,8 +553,10 @@ void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf);
Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp);
void xive_tctx_reset(XiveTCTX *tctx);
void xive_tctx_destroy(XiveTCTX *tctx);
-void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb);
-void xive_tctx_reset_os_signal(XiveTCTX *tctx);
+void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority,
+ uint8_t group_level);
+void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring);
+void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level);
/*
* KVM XIVE device helpers
diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h
index ab68f8d..8cdf819 100644
--- a/include/hw/ppc/xive2.h
+++ b/include/hw/ppc/xive2.h
@@ -1,11 +1,9 @@
/*
* QEMU PowerPC XIVE2 interrupt controller model (POWER10)
*
- * Copyright (c) 2019-2022, IBM Corporation.
- *
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * Copyright (c) 2019-2024, IBM Corporation.
*
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef PPC_XIVE2_H
@@ -53,6 +51,12 @@ typedef struct Xive2RouterClass {
Xive2Nvp *nvp);
int (*write_nvp)(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
Xive2Nvp *nvp, uint8_t word_number);
+ int (*get_nvgc)(Xive2Router *xrtr, bool crowd,
+ uint8_t nvgc_blk, uint32_t nvgc_idx,
+ Xive2Nvgc *nvgc);
+ int (*write_nvgc)(Xive2Router *xrtr, bool crowd,
+ uint8_t nvgc_blk, uint32_t nvgc_idx,
+ Xive2Nvgc *nvgc);
uint8_t (*get_block_id)(Xive2Router *xrtr);
uint32_t (*get_config)(Xive2Router *xrtr);
} Xive2RouterClass;
@@ -67,6 +71,12 @@ int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
Xive2Nvp *nvp);
int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
Xive2Nvp *nvp, uint8_t word_number);
+int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd,
+ uint8_t nvgc_blk, uint32_t nvgc_idx,
+ Xive2Nvgc *nvgc);
+int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd,
+ uint8_t nvgc_blk, uint32_t nvgc_idx,
+ Xive2Nvgc *nvgc);
uint32_t xive2_router_get_config(Xive2Router *xrtr);
void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
@@ -78,7 +88,17 @@ void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
- bool cam_ignore, uint32_t logic_serv);
+ bool crowd, bool cam_ignore,
+ uint32_t logic_serv);
+
+uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr,
+ uint8_t blk, uint32_t idx,
+ uint16_t offset);
+
+uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr,
+ bool crowd,
+ uint8_t blk, uint32_t idx,
+ uint16_t offset, uint16_t val);
/*
* XIVE2 END ESBs (POWER10)
@@ -103,9 +123,21 @@ typedef struct Xive2EndSource {
* XIVE2 Thread Interrupt Management Area (POWER10)
*/
+void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size);
+void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size);
void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
uint64_t value, unsigned size);
uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, unsigned size);
+void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size);
+bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority);
+void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority);
+void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size);
+void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size);
#endif /* PPC_XIVE2_H */
diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h
index 4e5e17c..b11395c 100644
--- a/include/hw/ppc/xive2_regs.h
+++ b/include/hw/ppc/xive2_regs.h
@@ -1,10 +1,9 @@
/*
* QEMU PowerPC XIVE2 internal structure definitions (POWER10)
*
- * Copyright (c) 2019-2022, IBM Corporation.
+ * Copyright (c) 2019-2024, IBM Corporation.
*
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef PPC_XIVE2_REGS_H
@@ -19,16 +18,18 @@
* mode (P10), the CAM line is slightly different as the VP space was
* increased.
*/
-#define TM2_QW0W2_VU PPC_BIT32(0)
+#define TM2_W2_VALID PPC_BIT32(0)
+#define TM2_W2_HW PPC_BIT32(1)
+#define TM2_QW0W2_VU TM2_W2_VALID
#define TM2_QW0W2_LOGIC_SERV PPC_BITMASK32(4, 31)
-#define TM2_QW1W2_VO PPC_BIT32(0)
-#define TM2_QW1W2_HO PPC_BIT32(1)
+#define TM2_QW1W2_VO TM2_W2_VALID
+#define TM2_QW1W2_HO TM2_W2_HW
#define TM2_QW1W2_OS_CAM PPC_BITMASK32(4, 31)
-#define TM2_QW2W2_VP PPC_BIT32(0)
-#define TM2_QW2W2_HP PPC_BIT32(1)
+#define TM2_QW2W2_VP TM2_W2_VALID
+#define TM2_QW2W2_HP TM2_W2_HW
#define TM2_QW2W2_POOL_CAM PPC_BITMASK32(4, 31)
-#define TM2_QW3W2_VT PPC_BIT32(0)
-#define TM2_QW3W2_HT PPC_BIT32(1)
+#define TM2_QW3W2_VT TM2_W2_VALID
+#define TM2_QW3W2_HT TM2_W2_HW
#define TM2_QW3W2_LP PPC_BIT32(6)
#define TM2_QW3W2_LE PPC_BIT32(7)
@@ -97,6 +98,7 @@ typedef struct Xive2End {
uint32_t w6;
#define END2_W6_FORMAT_BIT PPC_BIT32(0)
#define END2_W6_IGNORE PPC_BIT32(1)
+#define END2_W6_CROWD PPC_BIT32(2)
#define END2_W6_VP_BLOCK PPC_BITMASK32(4, 7)
#define END2_W6_VP_OFFSET PPC_BITMASK32(8, 31)
#define END2_W6_VP_OFFSET_GEN1 PPC_BITMASK32(13, 31)
@@ -111,6 +113,8 @@ typedef struct Xive2End {
#define xive2_end_is_notify(end) \
(be32_to_cpu((end)->w0) & END2_W0_UCOND_NOTIFY)
#define xive2_end_is_backlog(end) (be32_to_cpu((end)->w0) & END2_W0_BACKLOG)
+#define xive2_end_is_precluded_escalation(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_PRECL_ESC_CTL)
#define xive2_end_is_escalate(end) \
(be32_to_cpu((end)->w0) & END2_W0_ESCALATE_CTL)
#define xive2_end_is_uncond_escalation(end) \
@@ -123,6 +127,10 @@ typedef struct Xive2End {
(be32_to_cpu((end)->w0) & END2_W0_FIRMWARE1)
#define xive2_end_is_firmware2(end) \
(be32_to_cpu((end)->w0) & END2_W0_FIRMWARE2)
+#define xive2_end_is_ignore(end) \
+ (be32_to_cpu((end)->w6) & END2_W6_IGNORE)
+#define xive2_end_is_crowd(end) \
+ (be32_to_cpu((end)->w6) & END2_W6_CROWD)
static inline uint64_t xive2_end_qaddr(Xive2End *end)
{
@@ -143,7 +151,11 @@ typedef struct Xive2Nvp {
uint32_t w0;
#define NVP2_W0_VALID PPC_BIT32(0)
#define NVP2_W0_HW PPC_BIT32(7)
+#define NVP2_W0_L PPC_BIT32(8)
+#define NVP2_W0_G PPC_BIT32(9)
+#define NVP2_W0_T PPC_BIT32(10)
#define NVP2_W0_ESC_END PPC_BIT32(25) /* 'N' bit 0:ESB 1:END */
+#define NVP2_W0_PGOFIRST PPC_BITMASK32(26, 31)
uint32_t w1;
#define NVP2_W1_CO PPC_BIT32(13)
#define NVP2_W1_CO_PRIV PPC_BITMASK32(14, 15)
@@ -153,6 +165,8 @@ typedef struct Xive2Nvp {
#define NVP2_W2_CPPR PPC_BITMASK32(0, 7)
#define NVP2_W2_IPB PPC_BITMASK32(8, 15)
#define NVP2_W2_LSMFB PPC_BITMASK32(16, 23)
+#define NVP2_W2_T PPC_BIT32(27)
+#define NVP2_W2_LGS PPC_BITMASK32(28, 31)
uint32_t w3;
uint32_t w4;
#define NVP2_W4_ESC_ESB_BLOCK PPC_BITMASK32(0, 3) /* N:0 */
@@ -164,7 +178,9 @@ typedef struct Xive2Nvp {
#define NVP2_W5_VP_END_BLOCK PPC_BITMASK32(4, 7)
#define NVP2_W5_VP_END_INDEX PPC_BITMASK32(8, 31)
uint32_t w6;
+#define NVP2_W6_REPORTING_LINE PPC_BITMASK32(4, 31)
uint32_t w7;
+#define NVP2_W7_REPORTING_LINE PPC_BITMASK32(0, 23)
} Xive2Nvp;
#define xive2_nvp_is_valid(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_VALID)
@@ -194,12 +210,15 @@ static inline uint32_t xive2_nvp_blk(uint32_t cam_line)
return (cam_line >> XIVE2_NVP_SHIFT) & 0xf;
}
+void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf);
+
/*
* Notification Virtual Group or Crowd (NVG/NVC)
*/
typedef struct Xive2Nvgc {
uint32_t w0;
#define NVGC2_W0_VALID PPC_BIT32(0)
+#define NVGC2_W0_PGONEXT PPC_BITMASK32(26, 31)
uint32_t w1;
uint32_t w2;
uint32_t w3;
@@ -209,4 +228,16 @@ typedef struct Xive2Nvgc {
uint32_t w7;
} Xive2Nvgc;
+#define xive2_nvgc_is_valid(nvgc) (be32_to_cpu((nvgc)->w0) & NVGC2_W0_VALID)
+
+void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx,
+ GString *buf);
+
+#define NVx_BACKLOG_OP PPC_BITMASK(52, 53)
+#define NVx_BACKLOG_PRIO PPC_BITMASK(57, 59)
+
+/* split the 6-bit crowd/group level */
+#define NVx_CROWD_LVL(level) ((level >> 4) & 0b11)
+#define NVx_GROUP_LVL(level) (level & 0b1111)
+
#endif /* PPC_XIVE2_REGS_H */
diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h
index b9db7ab..54bc6c5 100644
--- a/include/hw/ppc/xive_regs.h
+++ b/include/hw/ppc/xive_regs.h
@@ -7,10 +7,9 @@
* access to the different fields.
*
*
- * Copyright (c) 2016-2018, IBM Corporation.
+ * Copyright (c) 2016-2024, IBM Corporation.
*
- * This code is licensed under the GPL version 2 or later. See the
- * COPYING file in the top-level directory.
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef PPC_XIVE_REGS_H
@@ -77,8 +76,11 @@
#define TM_LSMFB 0x3 /* - + + + */
#define TM_ACK_CNT 0x4 /* - + - - */
#define TM_INC 0x5 /* - + - + */
+#define TM_LGS 0x5 /* + + + + */ /* Rename P10 */
#define TM_AGE 0x6 /* - + - + */
+#define TM_T 0x6 /* - + - + */ /* Rename P10 */
#define TM_PIPR 0x7 /* - + - + */
+#define TM_OGEN 0xF /* - + - - */ /* P10 only */
#define TM_WORD0 0x0
#define TM_WORD1 0x4
@@ -98,6 +100,7 @@
#define TM_QW3W2_LP PPC_BIT32(6)
#define TM_QW3W2_LE PPC_BIT32(7)
#define TM_QW3W2_T PPC_BIT32(31)
+#define TM_QW3B8_VT PPC_BIT8(0)
/*
* In addition to normal loads to "peek" and writes (only when invalid)
@@ -114,26 +117,42 @@
* Then we have all these "special" CI ops at these offset that trigger
* all sorts of side effects:
*/
-#define TM_SPC_ACK_EBB 0x800 /* Load8 ack EBB to reg*/
-#define TM_SPC_ACK_OS_REG 0x810 /* Load16 ack OS irq to reg */
+#define TM_SPC_ACK_EBB 0x800 /* Load8 ack EBB to reg */
+#define TM_SPC_ACK_OS_REG 0x810 /* Load16 ack OS irq to reg */
#define TM_SPC_PUSH_USR_CTX 0x808 /* Store32 Push/Validate user context */
-#define TM_SPC_PULL_USR_CTX 0x808 /* Load32 Pull/Invalidate user
- * context */
-#define TM_SPC_SET_OS_PENDING 0x812 /* Store8 Set OS irq pending bit */
-#define TM_SPC_PULL_OS_CTX 0x818 /* Load32/Load64 Pull/Invalidate OS
- * context to reg */
-#define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool
- * context to reg*/
-#define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */
-#define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd
- * line */
-#define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */
-#define TM_SPC_ACK_HV_POOL_EL 0xc20 /* Store8 ack HV evt pool to even
- * line */
-#define TM_SPC_ACK_HV_EL 0xc30 /* Store8 ack HV irq to even line */
+#define TM_SPC_PULL_USR_CTX 0x808 /* Load32 Pull/Invalidate user */
+ /* context */
+#define TM_SPC_SET_OS_PENDING 0x812 /* Store8 Set OS irq pending bit */
+#define TM_SPC_PULL_OS_CTX_G2 0x810 /* Load32/Load64 Pull/Invalidate OS */
+ /* context to reg */
+#define TM_SPC_PULL_OS_CTX 0x818 /* Load32/Load64 Pull/Invalidate OS */
+ /* context to reg */
+#define TM_SPC_PULL_POOL_CTX_G2 0x820 /* Load32/Load64 Pull/Invalidate Pool */
+ /* context to reg */
+#define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool */
+ /* context to reg */
+#define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */
+#define TM_SPC_PULL_PHYS_CTX_G2 0x830 /* Load32 Pull phys ctx to reg */
+#define TM_SPC_PULL_PHYS_CTX 0x838 /* Load8 Pull phys ctx to reg */
+#define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd */
+ /* line */
+#define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */
+#define TM_SPC_PULL_OS_CTX_OL 0xc18 /* Pull/Invalidate OS context to */
+ /* odd Thread reporting line */
+#define TM_SPC_ACK_HV_POOL_EL 0xc20 /* Store8 ack HV evt pool to even */
+ /* line */
+#define TM_SPC_ACK_HV_EL 0xc30 /* Store8 ack HV irq to even line */
+#define TM_SPC_PULL_PHYS_CTX_OL 0xc38 /* Pull phys ctx to odd cache line */
/* XXX more... */
-/* NSR fields for the various QW ack types */
+/*
+ * NSR fields for the various QW ack types
+ *
+ * P10 has an extra bit in QW3 for the group level instead of the
+ * reserved 'i' bit. Since it is not used and we don't support group
+ * interrupts on P9, we use the P10 definition for the group level so
+ * that we can have common macros for the NSR
+ */
#define TM_QW0_NSR_EB PPC_BIT8(0)
#define TM_QW1_NSR_EO PPC_BIT8(0)
#define TM_QW3_NSR_HE PPC_BITMASK8(0, 1)
@@ -141,8 +160,15 @@
#define TM_QW3_NSR_HE_POOL 1
#define TM_QW3_NSR_HE_PHYS 2
#define TM_QW3_NSR_HE_LSI 3
-#define TM_QW3_NSR_I PPC_BIT8(2)
-#define TM_QW3_NSR_GRP_LVL PPC_BIT8(3, 7)
+#define TM_NSR_GRP_LVL PPC_BITMASK8(2, 7)
+/*
+ * On P10, the format of the 6-bit group level is: 2 bits for the
+ * crowd size and 4 bits for the group size. Since group/crowd size is
+ * always a power of 2, we encode the log. For example, group_level=4
+ * means crowd size = 0 and group size = 16 (2^4)
+ * Same encoding is used in the NVP and NVGC structures for
+ * PGoFirst and PGoNext fields
+ */
/*
* EAS (Event Assignment Structure)
diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h
index 77bfcbd..530f3da 100644
--- a/include/hw/qdev-core.h
+++ b/include/hw/qdev-core.h
@@ -95,6 +95,7 @@ typedef void (*DeviceUnrealize)(DeviceState *dev);
typedef void (*DeviceReset)(DeviceState *dev);
typedef void (*BusRealize)(BusState *bus, Error **errp);
typedef void (*BusUnrealize)(BusState *bus);
+typedef int (*DeviceSyncConfig)(DeviceState *dev, Error **errp);
/**
* struct DeviceClass - The base class for all devices.
@@ -103,6 +104,9 @@ typedef void (*BusUnrealize)(BusState *bus);
* property is changed to %true.
* @unrealize: Callback function invoked when the #DeviceState:realized
* property is changed to %false.
+ * @sync_config: Callback function invoked when QMP command device-sync-config
+ * is called. Should synchronize device configuration from host to guest part
+ * and notify the guest about the change.
* @hotpluggable: indicates if #DeviceClass is hotpluggable, available
* as readonly "hotpluggable" property of #DeviceState instance
*
@@ -132,7 +136,13 @@ struct DeviceClass {
* ensures a compile-time error if someone attempts to assign
* dc->props directly.
*/
- Property *props_;
+ const Property *props_;
+
+ /**
+ * @props_count_: number of elements in @props_; should only be
+ * assigned by using device_class_set_props().
+ */
+ uint16_t props_count_;
/**
* @user_creatable: Can user instantiate with -device / device_add?
@@ -152,16 +162,17 @@ struct DeviceClass {
/* callbacks */
/**
- * @reset: deprecated device reset method pointer
+ * @legacy_reset: deprecated device reset method pointer
*
* Modern code should use the ResettableClass interface to
* implement a multi-phase reset.
*
* TODO: remove once every reset callback is unused
*/
- DeviceReset reset;
+ DeviceReset legacy_reset;
DeviceRealize realize;
DeviceUnrealize unrealize;
+ DeviceSyncConfig sync_config;
/**
* @vmsd: device state serialisation description for
@@ -238,10 +249,6 @@ struct DeviceState {
*/
int64_t pending_deleted_expires_ms;
/**
- * @opts: QDict of options for the device
- */
- QDict *opts;
- /**
* @hotplugged: was device added after PHASE_MACHINE_READY?
*/
int hotplugged;
@@ -533,7 +540,8 @@ void qdev_set_legacy_instance_id(DeviceState *dev, int alias_id,
int required_for_version);
HotplugHandler *qdev_get_bus_hotplug_handler(DeviceState *dev);
HotplugHandler *qdev_get_machine_hotplug_handler(DeviceState *dev);
-bool qdev_hotplug_allowed(DeviceState *dev, Error **errp);
+bool qdev_hotplug_allowed(DeviceState *dev, BusState *bus, Error **errp);
+bool qdev_hotunplug_allowed(DeviceState *dev, Error **errp);
/**
* qdev_get_hotplug_handler() - Get handler responsible for device wiring
@@ -547,6 +555,7 @@ bool qdev_hotplug_allowed(DeviceState *dev, Error **errp);
*/
HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev);
void qdev_unplug(DeviceState *dev, Error **errp);
+int qdev_sync_config(DeviceState *dev, Error **errp);
void qdev_simple_device_unplug_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp);
void qdev_machine_creation_done(void);
@@ -929,29 +938,38 @@ char *qdev_get_own_fw_dev_path_from_handler(BusState *bus, DeviceState *dev);
/**
* device_class_set_props(): add a set of properties to an device
* @dc: the parent DeviceClass all devices inherit
- * @props: an array of properties, terminate by DEFINE_PROP_END_OF_LIST()
+ * @props: an array of properties
*
* This will add a set of properties to the object. It will fault if
* you attempt to add an existing property defined by a parent class.
* To modify an inherited property you need to use????
+ *
+ * Validate that @props has at least one Property.
+ * Validate that @props is an array, not a pointer, via ARRAY_SIZE.
+ * Validate that the array does not have a legacy terminator at compile-time;
+ * requires -O2 and the array to be const.
*/
-void device_class_set_props(DeviceClass *dc, Property *props);
+#define device_class_set_props(dc, props) \
+ do { \
+ QEMU_BUILD_BUG_ON(sizeof(props) == 0); \
+ size_t props_count_ = ARRAY_SIZE(props); \
+ if ((props)[props_count_ - 1].name == NULL) { \
+ qemu_build_not_reached(); \
+ } \
+ device_class_set_props_n((dc), (props), props_count_); \
+ } while (0)
/**
- * device_class_set_parent_reset() - legacy set device reset handlers
- * @dc: device class
- * @dev_reset: function pointer to reset handler
- * @parent_reset: function pointer to parents reset handler
- *
- * Modern code should use the ResettableClass interface to
- * implement a multi-phase reset instead.
+ * device_class_set_props_n(): add a set of properties to an device
+ * @dc: the parent DeviceClass all devices inherit
+ * @props: an array of properties
+ * @n: ARRAY_SIZE(@props)
*
- * TODO: remove the function when DeviceClass's reset method
- * is not used anymore.
+ * This will add a set of properties to the object. It will fault if
+ * you attempt to add an existing property defined by a parent class.
+ * To modify an inherited property you need to use????
*/
-void device_class_set_parent_reset(DeviceClass *dc,
- DeviceReset dev_reset,
- DeviceReset *parent_reset);
+void device_class_set_props_n(DeviceClass *dc, const Property *props, size_t n);
/**
* device_class_set_parent_realize() - set up for chaining realize fns
@@ -969,6 +987,19 @@ void device_class_set_parent_realize(DeviceClass *dc,
DeviceRealize dev_realize,
DeviceRealize *parent_realize);
+/**
+ * device_class_set_legacy_reset(): set the DeviceClass::reset method
+ * @dc: The device class
+ * @dev_reset: the reset function
+ *
+ * This function sets the DeviceClass::reset method. This is widely
+ * used in existing code, but new code should prefer to use the
+ * Resettable API as documented in docs/devel/reset.rst.
+ * In addition, devices which need to chain to their parent class's
+ * reset methods or which need to be subclassed must use Resettable.
+ */
+void device_class_set_legacy_reset(DeviceClass *dc,
+ DeviceReset dev_reset);
/**
* device_class_set_parent_unrealize() - set up for chaining unrealize fns
@@ -994,6 +1025,26 @@ void qdev_assert_realized_properly(void);
Object *qdev_get_machine(void);
/**
+ * qdev_create_fake_machine(): Create a fake machine container.
+ *
+ * .. note::
+ * This function is a kludge for user emulation (USER_ONLY)
+ * because when thread (TYPE_CPU) are realized, qdev_realize()
+ * access a machine container.
+ */
+void qdev_create_fake_machine(void);
+
+/**
+ * machine_get_container:
+ * @name: The name of container to lookup
+ *
+ * Get a container of the machine (QOM path "/machine/NAME").
+ *
+ * Returns: the machine container object.
+ */
+Object *machine_get_container(const char *name);
+
+/**
* qdev_get_human_name() - Return a human-readable name for a device
* @dev: The device. Must be a valid and non-NULL pointer.
*
diff --git a/include/hw/qdev-properties-system.h b/include/hw/qdev-properties-system.h
index 438f653..b921392 100644
--- a/include/hw/qdev-properties-system.h
+++ b/include/hw/qdev-properties-system.h
@@ -3,6 +3,9 @@
#include "hw/qdev-properties.h"
+bool qdev_prop_sanitize_s390x_loadparm(uint8_t *loadparm, const char *str,
+ Error **errp);
+
extern const PropertyInfo qdev_prop_chr;
extern const PropertyInfo qdev_prop_macaddr;
extern const PropertyInfo qdev_prop_reserved_region;
@@ -27,6 +30,8 @@ extern const PropertyInfo qdev_prop_pcie_link_speed;
extern const PropertyInfo qdev_prop_pcie_link_width;
extern const PropertyInfo qdev_prop_cpus390entitlement;
extern const PropertyInfo qdev_prop_iothread_vq_mapping_list;
+extern const PropertyInfo qdev_prop_endian_mode;
+extern const PropertyInfo qdev_prop_vmapple_virtio_blk_variant;
#define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \
DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t)
@@ -88,10 +93,21 @@ extern const PropertyInfo qdev_prop_iothread_vq_mapping_list;
#define DEFINE_PROP_CPUS390ENTITLEMENT(_n, _s, _f, _d) \
DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_cpus390entitlement, \
- CpuS390Entitlement)
+ S390CpuEntitlement)
#define DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST(_name, _state, _field) \
DEFINE_PROP(_name, _state, _field, qdev_prop_iothread_vq_mapping_list, \
IOThreadVirtQueueMappingList *)
+#define DEFINE_PROP_ENDIAN(_name, _state, _field, _default) \
+ DEFINE_PROP_UNSIGNED(_name, _state, _field, _default, \
+ qdev_prop_endian_mode, EndianMode)
+#define DEFINE_PROP_ENDIAN_NODEFAULT(_name, _state, _field) \
+ DEFINE_PROP_ENDIAN(_name, _state, _field, ENDIAN_MODE_UNSPECIFIED)
+
+#define DEFINE_PROP_VMAPPLE_VIRTIO_BLK_VARIANT(_name, _state, _fld, _default) \
+ DEFINE_PROP_UNSIGNED(_name, _state, _fld, _default, \
+ qdev_prop_vmapple_virtio_blk_variant, \
+ VMAppleVirtioBlkVariant)
+
#endif
diff --git a/include/hw/qdev-properties.h b/include/hw/qdev-properties.h
index 09aa04c..2c99856 100644
--- a/include/hw/qdev-properties.h
+++ b/include/hw/qdev-properties.h
@@ -16,28 +16,28 @@ struct Property {
const char *name;
const PropertyInfo *info;
ptrdiff_t offset;
- uint8_t bitnr;
+ const char *link_type;
uint64_t bitmask;
- bool set_default;
union {
int64_t i;
uint64_t u;
} defval;
- int arrayoffset;
const PropertyInfo *arrayinfo;
+ int arrayoffset;
int arrayfieldsize;
- const char *link_type;
+ uint8_t bitnr;
+ bool set_default;
};
struct PropertyInfo {
- const char *name;
+ const char *type;
const char *description;
const QEnumLookup *enum_table;
bool realized_set_allowed; /* allow setting property on realized device */
- int (*print)(Object *obj, Property *prop, char *dest, size_t len);
+ int (*print)(Object *obj, const Property *prop, char *dest, size_t len);
void (*set_default_value)(ObjectProperty *op, const Property *prop);
ObjectProperty *(*create)(ObjectClass *oc, const char *name,
- Property *prop);
+ const Property *prop);
ObjectPropertyAccessor *get;
ObjectPropertyAccessor *set;
ObjectPropertyRelease *release;
@@ -49,10 +49,10 @@ struct PropertyInfo {
extern const PropertyInfo qdev_prop_bit;
extern const PropertyInfo qdev_prop_bit64;
extern const PropertyInfo qdev_prop_bool;
-extern const PropertyInfo qdev_prop_enum;
extern const PropertyInfo qdev_prop_uint8;
extern const PropertyInfo qdev_prop_uint16;
extern const PropertyInfo qdev_prop_uint32;
+extern const PropertyInfo qdev_prop_usize;
extern const PropertyInfo qdev_prop_int32;
extern const PropertyInfo qdev_prop_uint64;
extern const PropertyInfo qdev_prop_uint64_checkmask;
@@ -171,9 +171,6 @@ extern const PropertyInfo qdev_prop_link;
#define DEFINE_PROP_SIZE32(_n, _s, _f, _d) \
DEFINE_PROP_UNSIGNED(_n, _s, _f, _d, qdev_prop_size32, uint32_t)
-#define DEFINE_PROP_END_OF_LIST() \
- {}
-
/*
* Set properties between creation and realization.
*
@@ -204,7 +201,7 @@ void qdev_prop_set_enum(DeviceState *dev, const char *name, int value);
/* Takes ownership of @values */
void qdev_prop_set_array(DeviceState *dev, const char *name, QList *values);
-void *object_field_prop_ptr(Object *obj, Property *prop);
+void *object_field_prop_ptr(Object *obj, const Property *prop);
void qdev_prop_register_global(GlobalProperty *prop);
const GlobalProperty *qdev_find_global_prop(Object *obj,
@@ -223,7 +220,7 @@ void error_set_from_qdev_prop_error(Error **errp, int ret, Object *obj,
* On error, store error in @errp. Static properties access data in a struct.
* The type of the QOM property is derived from prop->info.
*/
-void qdev_property_add_static(DeviceState *dev, Property *prop);
+void qdev_property_add_static(DeviceState *dev, const Property *prop);
/**
* qdev_alias_all_properties: Create aliases on source for all target properties
diff --git a/include/hw/register.h b/include/hw/register.h
index 6a076cf..a913c52 100644
--- a/include/hw/register.h
+++ b/include/hw/register.h
@@ -12,7 +12,7 @@
#define REGISTER_H
#include "hw/qdev-core.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/registerfields.h"
#include "qom/object.h"
diff --git a/include/hw/remote/iohub.h b/include/hw/remote/iohub.h
index 6a8444f..09ee6c7 100644
--- a/include/hw/remote/iohub.h
+++ b/include/hw/remote/iohub.h
@@ -37,6 +37,5 @@ void remote_iohub_set_irq(void *opaque, int pirq, int level);
void process_set_irqfd_msg(PCIDevice *pci_dev, MPQemuMsg *msg);
void remote_iohub_init(RemoteIOHubState *iohub);
-void remote_iohub_finalize(RemoteIOHubState *iohub);
#endif
diff --git a/include/hw/remote/proxy-memory-listener.h b/include/hw/remote/proxy-memory-listener.h
index c4f3efb..ec516d8 100644
--- a/include/hw/remote/proxy-memory-listener.h
+++ b/include/hw/remote/proxy-memory-listener.h
@@ -9,7 +9,7 @@
#ifndef PROXY_MEMORY_LISTENER_H
#define PROXY_MEMORY_LISTENER_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "io/channel.h"
typedef struct ProxyMemoryListener {
diff --git a/include/hw/resettable.h b/include/hw/resettable.h
index 7e249de..fd862f1 100644
--- a/include/hw/resettable.h
+++ b/include/hw/resettable.h
@@ -29,6 +29,7 @@ typedef struct ResettableState ResettableState;
* Types of reset.
*
* + Cold: reset resulting from a power cycle of the object.
+ * + Wakeup: reset resulting from a wake-up from a suspended state.
*
* TODO: Support has to be added to handle more types. In particular,
* ResettableState structure needs to be expanded.
@@ -36,6 +37,9 @@ typedef struct ResettableState ResettableState;
typedef enum ResetType {
RESET_TYPE_COLD,
RESET_TYPE_SNAPSHOT_LOAD,
+ RESET_TYPE_WAKEUP,
+ RESET_TYPE_S390_CPU_INITIAL,
+ RESET_TYPE_S390_CPU_NORMAL,
} ResetType;
/*
@@ -88,14 +92,6 @@ typedef enum ResetType {
* @get_state: Mandatory method which must return a pointer to a
* ResettableState.
*
- * @get_transitional_function: transitional method to handle Resettable objects
- * not yet fully moved to this interface. It will be removed as soon as it is
- * not needed anymore. This method is optional and may return a pointer to a
- * function to be used instead of the phases. If the method exists and returns
- * a non-NULL function pointer then that function is executed as a replacement
- * of the 'hold' phase method taking the object as argument. The two other phase
- * methods are not executed.
- *
* @child_foreach: Executes a given callback on every Resettable child. Child
* in this context means a child in the qbus tree, so the children of a qbus
* are the devices on it, and the children of a device are all the buses it
@@ -107,8 +103,6 @@ typedef void (*ResettableEnterPhase)(Object *obj, ResetType type);
typedef void (*ResettableHoldPhase)(Object *obj, ResetType type);
typedef void (*ResettableExitPhase)(Object *obj, ResetType type);
typedef ResettableState * (*ResettableGetState)(Object *obj);
-typedef void (*ResettableTrFunction)(Object *obj);
-typedef ResettableTrFunction (*ResettableGetTrFunction)(Object *obj);
typedef void (*ResettableChildCallback)(Object *, void *opaque,
ResetType type);
typedef void (*ResettableChildForeach)(Object *obj,
@@ -128,9 +122,6 @@ struct ResettableClass {
/* State access method */
ResettableGetState get_state;
- /* Transitional method for legacy reset compatibility */
- ResettableGetTrFunction get_transitional_function;
-
/* Hierarchy handling method */
ResettableChildForeach child_foreach;
};
diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h
index a2e4ae9..7d59b2e 100644
--- a/include/hw/riscv/boot.h
+++ b/include/hw/riscv/boot.h
@@ -27,36 +27,50 @@
#define RISCV32_BIOS_BIN "opensbi-riscv32-generic-fw_dynamic.bin"
#define RISCV64_BIOS_BIN "opensbi-riscv64-generic-fw_dynamic.bin"
+typedef struct RISCVBootInfo {
+ ssize_t kernel_size;
+ hwaddr image_low_addr;
+ hwaddr image_high_addr;
+
+ hwaddr initrd_start;
+ ssize_t initrd_size;
+
+ bool is_32bit;
+} RISCVBootInfo;
+
bool riscv_is_32bit(RISCVHartArrayState *harts);
char *riscv_plic_hart_config_string(int hart_count);
-target_ulong riscv_calc_kernel_start_addr(RISCVHartArrayState *harts,
+void riscv_boot_info_init(RISCVBootInfo *info, RISCVHartArrayState *harts);
+target_ulong riscv_calc_kernel_start_addr(RISCVBootInfo *info,
target_ulong firmware_end_addr);
target_ulong riscv_find_and_load_firmware(MachineState *machine,
const char *default_machine_firmware,
- hwaddr firmware_load_addr,
+ hwaddr *firmware_load_addr,
symbol_fn_t sym_cb);
const char *riscv_default_firmware_name(RISCVHartArrayState *harts);
char *riscv_find_firmware(const char *firmware_filename,
const char *default_machine_firmware);
target_ulong riscv_load_firmware(const char *firmware_filename,
- hwaddr firmware_load_addr,
+ hwaddr *firmware_load_addr,
symbol_fn_t sym_cb);
-target_ulong riscv_load_kernel(MachineState *machine,
- RISCVHartArrayState *harts,
- target_ulong firmware_end_addr,
- bool load_initrd,
- symbol_fn_t sym_cb);
-uint64_t riscv_compute_fdt_addr(hwaddr dram_start, uint64_t dram_size,
- MachineState *ms);
+void riscv_load_kernel(MachineState *machine,
+ RISCVBootInfo *info,
+ target_ulong kernel_start_addr,
+ bool load_initrd,
+ symbol_fn_t sym_cb);
+uint64_t riscv_compute_fdt_addr(hwaddr dram_base, hwaddr dram_size,
+ MachineState *ms, RISCVBootInfo *info);
void riscv_load_fdt(hwaddr fdt_addr, void *fdt);
void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts,
hwaddr saddr,
hwaddr rom_base, hwaddr rom_size,
uint64_t kernel_entry,
uint64_t fdt_load_addr);
-void riscv_rom_copy_firmware_info(MachineState *machine, hwaddr rom_base,
+void riscv_rom_copy_firmware_info(MachineState *machine,
+ RISCVHartArrayState *harts,
+ hwaddr rom_base,
hwaddr rom_size,
uint32_t reset_vec_size,
uint64_t kernel_entry);
diff --git a/include/hw/riscv/boot_opensbi.h b/include/hw/riscv/boot_opensbi.h
index 1b74966..18664a1 100644
--- a/include/hw/riscv/boot_opensbi.h
+++ b/include/hw/riscv/boot_opensbi.h
@@ -58,4 +58,33 @@ struct fw_dynamic_info {
target_long boot_hart;
};
+/** Representation dynamic info passed by previous booting stage */
+struct fw_dynamic_info32 {
+ /** Info magic */
+ int32_t magic;
+ /** Info version */
+ int32_t version;
+ /** Next booting stage address */
+ int32_t next_addr;
+ /** Next booting stage mode */
+ int32_t next_mode;
+ /** Options for OpenSBI library */
+ int32_t options;
+ /**
+ * Preferred boot HART id
+ *
+ * It is possible that the previous booting stage uses same link
+ * address as the FW_DYNAMIC firmware. In this case, the relocation
+ * lottery mechanism can potentially overwrite the previous booting
+ * stage while other HARTs are still running in the previous booting
+ * stage leading to boot-time crash. To avoid this boot-time crash,
+ * the previous booting stage can specify last HART that will jump
+ * to the FW_DYNAMIC firmware as the preferred boot HART.
+ *
+ * To avoid specifying a preferred boot HART, the previous booting
+ * stage can set it to -1UL which will force the FW_DYNAMIC firmware
+ * to use the relocation lottery mechanism.
+ */
+ int32_t boot_hart;
+};
#endif
diff --git a/include/hw/riscv/iommu.h b/include/hw/riscv/iommu.h
new file mode 100644
index 0000000..8a8acfc
--- /dev/null
+++ b/include/hw/riscv/iommu.h
@@ -0,0 +1,42 @@
+/*
+ * QEMU emulation of an RISC-V IOMMU
+ *
+ * Copyright (C) 2022-2023 Rivos Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_RISCV_IOMMU_H
+#define HW_RISCV_IOMMU_H
+
+#include "qemu/osdep.h"
+#include "qom/object.h"
+
+#define TYPE_RISCV_IOMMU "riscv-iommu"
+OBJECT_DECLARE_SIMPLE_TYPE(RISCVIOMMUState, RISCV_IOMMU)
+typedef struct RISCVIOMMUState RISCVIOMMUState;
+
+#define TYPE_RISCV_IOMMU_MEMORY_REGION "riscv-iommu-mr"
+typedef struct RISCVIOMMUSpace RISCVIOMMUSpace;
+
+#define TYPE_RISCV_IOMMU_PCI "riscv-iommu-pci"
+OBJECT_DECLARE_SIMPLE_TYPE(RISCVIOMMUStatePci, RISCV_IOMMU_PCI)
+typedef struct RISCVIOMMUStatePci RISCVIOMMUStatePci;
+
+#define TYPE_RISCV_IOMMU_SYS "riscv-iommu-device"
+OBJECT_DECLARE_SIMPLE_TYPE(RISCVIOMMUStateSys, RISCV_IOMMU_SYS)
+typedef struct RISCVIOMMUStateSys RISCVIOMMUStateSys;
+
+#define FDT_IRQ_TYPE_EDGE_LOW 1
+
+#endif
diff --git a/include/hw/riscv/microchip_pfsoc.h b/include/hw/riscv/microchip_pfsoc.h
index daef086..7ca9b97 100644
--- a/include/hw/riscv/microchip_pfsoc.h
+++ b/include/hw/riscv/microchip_pfsoc.h
@@ -67,6 +67,7 @@ typedef struct MicrochipIcicleKitState {
MachineState parent_obj;
/*< public >*/
+ uint32_t clint_timebase_freq;
MicrochipPFSoCState soc;
} MicrochipIcicleKitState;
diff --git a/include/hw/riscv/numa.h b/include/hw/riscv/numa.h
index 8f52802..147f016 100644
--- a/include/hw/riscv/numa.h
+++ b/include/hw/riscv/numa.h
@@ -21,7 +21,7 @@
#include "hw/boards.h"
#include "hw/sysbus.h"
-#include "sysemu/numa.h"
+#include "system/numa.h"
/**
* riscv_socket_count:
diff --git a/include/hw/riscv/riscv_hart.h b/include/hw/riscv/riscv_hart.h
index 912b4a2..a6ed73a 100644
--- a/include/hw/riscv/riscv_hart.h
+++ b/include/hw/riscv/riscv_hart.h
@@ -38,6 +38,10 @@ struct RISCVHartArrayState {
uint32_t hartid_base;
char *cpu_type;
uint64_t resetvec;
+ uint32_t num_rnmi_irqvec;
+ uint64_t *rnmi_irqvec;
+ uint32_t num_rnmi_excpvec;
+ uint64_t *rnmi_excpvec;
RISCVCPU *harts;
};
diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h
index c0dc41f..7b4c2c8 100644
--- a/include/hw/riscv/virt.h
+++ b/include/hw/riscv/virt.h
@@ -62,6 +62,8 @@ struct RISCVVirtState {
OnOffAuto acpi;
const MemMapEntry *memmap;
struct GPEXHost *gpex_host;
+ OnOffAuto iommu_sys;
+ uint16_t pci_iommu_bdf;
};
enum {
@@ -84,7 +86,8 @@ enum {
VIRT_PCIE_MMIO,
VIRT_PCIE_PIO,
VIRT_PLATFORM_BUS,
- VIRT_PCIE_ECAM
+ VIRT_PCIE_ECAM,
+ VIRT_IOMMU_SYS,
};
enum {
@@ -93,6 +96,7 @@ enum {
VIRTIO_IRQ = 1, /* 1 to 8 */
VIRTIO_COUNT = 8,
PCIE_IRQ = 0x20, /* 32 to 35 */
+ IOMMU_SYS_IRQ = 0x24, /* 36-39 */
VIRT_PLATFORM_BUS_IRQ = 64, /* 64 to 95 */
};
@@ -129,6 +133,7 @@ enum {
1 + FDT_APLIC_INT_CELLS)
bool virt_is_acpi_enabled(RISCVVirtState *s);
+bool virt_is_iommu_sys_enabled(RISCVVirtState *s);
void virt_acpi_setup(RISCVVirtState *vms);
uint32_t imsic_num_bits(uint32_t count);
diff --git a/include/hw/s390x/ap-bridge.h b/include/hw/s390x/ap-bridge.h
index 470e439..7efc529 100644
--- a/include/hw/s390x/ap-bridge.h
+++ b/include/hw/s390x/ap-bridge.h
@@ -16,4 +16,43 @@
void s390_init_ap(void);
+typedef struct ChscSeiNt0Res {
+ uint16_t length;
+ uint16_t code;
+ uint8_t reserved1;
+ uint16_t reserved2;
+ uint8_t nt;
+#define PENDING_EVENT_INFO_BITMASK 0x80;
+ uint8_t flags;
+ uint8_t reserved3;
+ uint8_t rs;
+ uint8_t cc;
+} QEMU_PACKED ChscSeiNt0Res;
+
+#define NT0_RES_RESPONSE_CODE 1
+#define NT0_RES_NT_DEFAULT 0
+#define NT0_RES_RS_AP_CHANGE 5
+#define NT0_RES_CC_AP_CHANGE 3
+
+#define EVENT_INFORMATION_NOT_STORED 1
+#define EVENT_INFORMATION_STORED 0
+
+/**
+ * ap_chsc_sei_nt0_get_event - Retrieve the next pending AP config
+ * change event
+ * @res: Pointer to a ChscSeiNt0Res struct to be filled with event
+ * data
+ *
+ * This function checks for any pending AP config change events and,
+ * if present, populates the provided response structure with the
+ * appropriate SEI NT0 fields.
+ *
+ * Return:
+ * EVENT_INFORMATION_STORED - An event was available and written to @res
+ * EVENT_INFORMATION_NOT_STORED - No event was available
+ */
+int ap_chsc_sei_nt0_get_event(void *res);
+
+bool ap_chsc_sei_nt0_have_event(void);
+
#endif
diff --git a/include/hw/s390x/cpu-topology.h b/include/hw/s390x/cpu-topology.h
index c064f42..d5e9aa4 100644
--- a/include/hw/s390x/cpu-topology.h
+++ b/include/hw/s390x/cpu-topology.h
@@ -13,7 +13,7 @@
#include "qemu/queue.h"
#include "hw/boards.h"
-#include "qapi/qapi-types-machine-target.h"
+#include "qapi/qapi-types-machine-s390x.h"
#define S390_TOPOLOGY_CPU_IFL 0x03
@@ -37,7 +37,7 @@ typedef struct S390TopologyEntry {
typedef struct S390Topology {
uint8_t *cores_per_socket;
- CpuS390Polarization polarization;
+ S390CpuPolarization polarization;
} S390Topology;
typedef QTAILQ_HEAD(, S390TopologyEntry) S390TopologyList;
@@ -57,7 +57,7 @@ static inline void s390_topology_setup_cpu(MachineState *ms,
static inline void s390_topology_reset(void)
{
/* Unreachable, CPU topology not implemented for TCG */
- assert(false);
+ g_assert_not_reached();
}
#endif
diff --git a/include/hw/s390x/css-bridge.h b/include/hw/s390x/css-bridge.h
index deb606d..4f874ed 100644
--- a/include/hw/s390x/css-bridge.h
+++ b/include/hw/s390x/css-bridge.h
@@ -19,7 +19,6 @@
/* virtual css bridge */
struct VirtualCssBridge {
SysBusDevice sysbus_dev;
- bool css_dev_path;
};
#define TYPE_VIRTUAL_CSS_BRIDGE "virtual-css-bridge"
diff --git a/include/hw/s390x/css.h b/include/hw/s390x/css.h
index 8289e45..0b0400a 100644
--- a/include/hw/s390x/css.h
+++ b/include/hw/s390x/css.h
@@ -15,7 +15,7 @@
#include "hw/s390x/adapter.h"
#include "hw/s390x/s390_flic.h"
#include "hw/s390x/ioinst.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "target/s390x/cpu-qom.h"
/* Channel subsystem constants. */
@@ -238,7 +238,6 @@ uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc);
void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable,
uint8_t flags, Error **errp);
-#ifndef CONFIG_USER_ONLY
SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid,
uint16_t schid);
bool css_subch_visible(SubchDev *sch);
@@ -262,7 +261,6 @@ int css_enable_mss(void);
IOInstEnding css_do_rsch(SubchDev *sch);
int css_do_rchp(uint8_t cssid, uint8_t chpid);
bool css_present(uint8_t cssid);
-#endif
extern const PropertyInfo css_devid_ro_propinfo;
@@ -333,10 +331,4 @@ static inline int ccw_dstream_read_buf(CcwDataStream *cds, void *buff, int len)
#define ccw_dstream_read(cds, v) ccw_dstream_read_buf((cds), &(v), sizeof(v))
#define ccw_dstream_write(cds, v) ccw_dstream_write_buf((cds), &(v), sizeof(v))
-/**
- * true if (vmstate based) migration of the channel subsystem
- * is enabled, false if it is disabled.
- */
-extern bool css_migration_enabled;
-
#endif
diff --git a/include/hw/s390x/event-facility.h b/include/hw/s390x/event-facility.h
index ff874e7..eac7a51 100644
--- a/include/hw/s390x/event-facility.h
+++ b/include/hw/s390x/event-facility.h
@@ -25,6 +25,7 @@
#define SCLP_EVENT_MESSAGE 0x02
#define SCLP_EVENT_CONFIG_MGT_DATA 0x04
#define SCLP_EVENT_PMSGCMD 0x09
+#define SCLP_EVENT_CTRL_PGM_ID 0x0b
#define SCLP_EVENT_ASCII_CONSOLE_DATA 0x1a
#define SCLP_EVENT_SIGNAL_QUIESCE 0x1d
@@ -35,6 +36,7 @@
#define SCLP_EVENT_MASK_MSG SCLP_EVMASK(SCLP_EVENT_MESSAGE)
#define SCLP_EVENT_MASK_CONFIG_MGT_DATA SCLP_EVMASK(SCLP_EVENT_CONFIG_MGT_DATA)
#define SCLP_EVENT_MASK_PMSGCMD SCLP_EVMASK(SCLP_EVENT_PMSGCMD)
+#define SCLP_EVENT_MASK_CTRL_PGM_ID SCLP_EVMASK(SCLP_EVENT_CTRL_PGM_ID)
#define SCLP_EVENT_MASK_MSG_ASCII SCLP_EVMASK(SCLP_EVENT_ASCII_CONSOLE_DATA)
#define SCLP_EVENT_MASK_SIGNAL_QUIESCE SCLP_EVMASK(SCLP_EVENT_SIGNAL_QUIESCE)
@@ -191,6 +193,21 @@ struct SCLPEventClass {
bool (*can_handle_event)(uint8_t type);
};
+#define TYPE_SCLP_EVENT_CPI "sclpcpi"
+typedef struct SCLPEventCPIClass SCLPEventCPIClass;
+typedef struct SCLPEventCPI SCLPEventCPI;
+OBJECT_DECLARE_TYPE(SCLPEventCPI, SCLPEventCPIClass,
+ SCLP_EVENT_CPI)
+
+struct SCLPEventCPI {
+ SCLPEvent event;
+ uint8_t system_type[8];
+ uint8_t system_name[8];
+ uint64_t system_level;
+ uint8_t sysplex_name[8];
+ uint64_t timestamp;
+};
+
#define TYPE_SCLP_EVENT_FACILITY "s390-sclp-event-facility"
typedef struct SCLPEventFacility SCLPEventFacility;
typedef struct SCLPEventFacilityClass SCLPEventFacilityClass;
diff --git a/include/hw/s390x/ipl/qipl.h b/include/hw/s390x/ipl/qipl.h
new file mode 100644
index 0000000..6824391
--- /dev/null
+++ b/include/hw/s390x/ipl/qipl.h
@@ -0,0 +1,127 @@
+/*
+ * S/390 boot structures
+ *
+ * Copyright 2024 IBM Corp.
+ * Author(s): Jared Rossi <jrossi@linux.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef S390X_QIPL_H
+#define S390X_QIPL_H
+
+/* Boot Menu flags */
+#define QIPL_FLAG_BM_OPTS_CMD 0x80
+#define QIPL_FLAG_BM_OPTS_ZIPL 0x40
+
+#define QIPL_ADDRESS 0xcc
+#define LOADPARM_LEN 8
+#define NO_LOADPARM "\0\0\0\0\0\0\0\0"
+
+/*
+ * The QEMU IPL Parameters will be stored at absolute address
+ * 204 (0xcc) which means it is 32-bit word aligned but not
+ * double-word aligned. Placement of 64-bit data fields in this
+ * area must account for their alignment needs.
+ * The total size of the struct must never exceed 28 bytes.
+ */
+struct QemuIplParameters {
+ uint8_t qipl_flags;
+ uint8_t index;
+ uint8_t reserved1[2];
+ uint64_t reserved2;
+ uint32_t boot_menu_timeout;
+ uint8_t reserved3[2];
+ uint16_t chain_len;
+ uint64_t next_iplb;
+} QEMU_PACKED;
+typedef struct QemuIplParameters QemuIplParameters;
+
+struct IPLBlockPVComp {
+ uint64_t tweak_pref;
+ uint64_t addr;
+ uint64_t size;
+} QEMU_PACKED;
+typedef struct IPLBlockPVComp IPLBlockPVComp;
+
+struct IPLBlockPV {
+ uint8_t reserved18[87]; /* 0x18 */
+ uint8_t version; /* 0x6f */
+ uint32_t reserved70; /* 0x70 */
+ uint32_t num_comp; /* 0x74 */
+ uint64_t pv_header_addr; /* 0x78 */
+ uint64_t pv_header_len; /* 0x80 */
+ struct IPLBlockPVComp components[0];
+} QEMU_PACKED;
+typedef struct IPLBlockPV IPLBlockPV;
+
+struct IplBlockCcw {
+ uint8_t reserved0[85];
+ uint8_t ssid;
+ uint16_t devno;
+ uint8_t vm_flags;
+ uint8_t reserved3[3];
+ uint32_t vm_parm_len;
+ uint8_t nss_name[8];
+ uint8_t vm_parm[64];
+ uint8_t reserved4[8];
+} QEMU_PACKED;
+typedef struct IplBlockCcw IplBlockCcw;
+
+struct IplBlockFcp {
+ uint8_t reserved1[305 - 1];
+ uint8_t opt;
+ uint8_t reserved2[3];
+ uint16_t reserved3;
+ uint16_t devno;
+ uint8_t reserved4[4];
+ uint64_t wwpn;
+ uint64_t lun;
+ uint32_t bootprog;
+ uint8_t reserved5[12];
+ uint64_t br_lba;
+ uint32_t scp_data_len;
+ uint8_t reserved6[260];
+ uint8_t scp_data[0];
+} QEMU_PACKED;
+typedef struct IplBlockFcp IplBlockFcp;
+
+struct IplBlockQemuScsi {
+ uint32_t lun;
+ uint16_t target;
+ uint16_t channel;
+ uint8_t reserved0[77];
+ uint8_t ssid;
+ uint16_t devno;
+} QEMU_PACKED;
+typedef struct IplBlockQemuScsi IplBlockQemuScsi;
+
+union IplParameterBlock {
+ struct {
+ uint32_t len;
+ uint8_t reserved0[3];
+ uint8_t version;
+ uint32_t blk0_len;
+ uint8_t pbt;
+ uint8_t flags;
+ uint16_t reserved01;
+ uint8_t loadparm[LOADPARM_LEN];
+ union {
+ IplBlockCcw ccw;
+ IplBlockFcp fcp;
+ IPLBlockPV pv;
+ IplBlockQemuScsi scsi;
+ };
+ } QEMU_PACKED;
+ struct {
+ uint8_t reserved1[110];
+ uint16_t devno;
+ uint8_t reserved2[88];
+ uint8_t reserved_ext[4096 - 200];
+ } QEMU_PACKED;
+} QEMU_PACKED;
+typedef union IplParameterBlock IplParameterBlock;
+
+#endif
diff --git a/include/hw/s390x/s390-pci-bus.h b/include/hw/s390x/s390-pci-bus.h
index 2c43ea1..04944d4 100644
--- a/include/hw/s390x/s390-pci-bus.h
+++ b/include/hw/s390x/s390-pci-bus.h
@@ -277,6 +277,7 @@ struct S390PCIIOMMU {
AddressSpace as;
MemoryRegion mr;
IOMMUMemoryRegion iommu_mr;
+ MemoryRegion *dm_mr;
bool enabled;
uint64_t g_iota;
uint64_t pba;
@@ -362,6 +363,7 @@ struct S390PCIBusDevice {
bool interp;
bool forwarding_assist;
bool aif;
+ bool rtr_avail;
QTAILQ_ENTRY(S390PCIBusDevice) link;
};
@@ -389,6 +391,7 @@ int pci_chsc_sei_nt2_have_event(void);
void s390_pci_sclp_configure(SCCB *sccb);
void s390_pci_sclp_deconfigure(SCCB *sccb);
void s390_pci_iommu_enable(S390PCIIOMMU *iommu);
+void s390_pci_iommu_direct_map_enable(S390PCIIOMMU *iommu);
void s390_pci_iommu_disable(S390PCIIOMMU *iommu);
void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid,
uint64_t faddr, uint32_t e);
diff --git a/include/hw/s390x/s390-pci-clp.h b/include/hw/s390x/s390-pci-clp.h
index 03b7f9b..6a635d6 100644
--- a/include/hw/s390x/s390-pci-clp.h
+++ b/include/hw/s390x/s390-pci-clp.h
@@ -158,6 +158,7 @@ typedef struct ClpRspQueryPciGrp {
#define CLP_RSP_QPCIG_MASK_NOI 0xfff
uint16_t i;
uint8_t version;
+#define CLP_RSP_QPCIG_MASK_RTR 0x20
#define CLP_RSP_QPCIG_MASK_FRAME 0x2
#define CLP_RSP_QPCIG_MASK_REFRESH 0x1
uint8_t fr;
diff --git a/include/hw/s390x/s390-pci-inst.h b/include/hw/s390x/s390-pci-inst.h
index a55c448..5cb8da5 100644
--- a/include/hw/s390x/s390-pci-inst.h
+++ b/include/hw/s390x/s390-pci-inst.h
@@ -15,7 +15,7 @@
#define HW_S390_PCI_INST_H
#include "s390-pci-bus.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
/* Load/Store status codes */
#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
diff --git a/include/hw/s390x/s390-virtio-ccw.h b/include/hw/s390x/s390-virtio-ccw.h
index 996864a..526078a 100644
--- a/include/hw/s390x/s390-virtio-ccw.h
+++ b/include/hw/s390x/s390-virtio-ccw.h
@@ -29,10 +29,19 @@ struct S390CcwMachineState {
bool dea_key_wrap;
bool pv;
uint8_t loadparm[8];
+ uint64_t memory_limit;
+ uint64_t max_pagesize;
SCLPDevice *sclp;
};
+static inline uint64_t s390_get_memory_limit(S390CcwMachineState *s390ms)
+{
+ /* We expect to be called only after the limit was set. */
+ assert(s390ms->memory_limit);
+ return s390ms->memory_limit;
+}
+
#define S390_PTF_REASON_NONE (0x00 << 8)
#define S390_PTF_REASON_DONE (0x01 << 8)
#define S390_PTF_REASON_BUSY (0x02 << 8)
@@ -44,17 +53,8 @@ struct S390CcwMachineClass {
MachineClass parent_class;
/*< public >*/
- bool ri_allowed;
- bool cpu_model_allowed;
- bool hpage_1m_allowed;
int max_threads;
+ bool use_cpi;
};
-/* runtime-instrumentation allowed by the machine */
-bool ri_allowed(void);
-/* cpu model allowed by the machine */
-bool cpu_model_allowed(void);
-/* 1M huge page mappings allowed by the machine */
-bool hpage_1m_allowed(void);
-
#endif
diff --git a/include/hw/s390x/s390_flic.h b/include/hw/s390x/s390_flic.h
index 4d66c5e..91edaac 100644
--- a/include/hw/s390x/s390_flic.h
+++ b/include/hw/s390x/s390_flic.h
@@ -41,10 +41,7 @@ OBJECT_DECLARE_TYPE(S390FLICState, S390FLICStateClass,
struct S390FLICState {
SysBusDevice parent_obj;
- /* to limit AdapterRoutes.num_routes for compat */
- uint32_t adapter_routes_max_batch;
bool ais_supported;
- bool migration_enabled;
};
diff --git a/include/hw/s390x/storage-attributes.h b/include/hw/s390x/storage-attributes.h
index 8921a04..b5c6d8f 100644
--- a/include/hw/s390x/storage-attributes.h
+++ b/include/hw/s390x/storage-attributes.h
@@ -25,7 +25,6 @@ OBJECT_DECLARE_TYPE(S390StAttribState, S390StAttribClass, S390_STATTRIB)
struct S390StAttribState {
DeviceState parent_obj;
uint64_t migration_cur_gfn;
- bool migration_enabled;
};
diff --git a/include/hw/s390x/storage-keys.h b/include/hw/s390x/storage-keys.h
index 976ffb2..ac30300 100644
--- a/include/hw/s390x/storage-keys.h
+++ b/include/hw/s390x/storage-keys.h
@@ -21,8 +21,6 @@ OBJECT_DECLARE_TYPE(S390SKeysState, S390SKeysClass, S390_SKEYS)
struct S390SKeysState {
DeviceState parent_obj;
- bool migration_enabled;
-
};
@@ -124,7 +122,23 @@ int s390_skeys_set(S390SKeysState *ks, uint64_t start_gfn,
S390SKeysState *s390_get_skeys_device(void);
+void s390_qmp_dump_skeys(const char *filename, Error **errp);
void hmp_dump_skeys(Monitor *mon, const QDict *qdict);
void hmp_info_skeys(Monitor *mon, const QDict *qdict);
+#define TYPE_DUMP_SKEYS_INTERFACE "dump-skeys-interface"
+
+typedef struct DumpSKeysInterface DumpSKeysInterface;
+DECLARE_CLASS_CHECKERS(DumpSKeysInterface, DUMP_SKEYS_INTERFACE,
+ TYPE_DUMP_SKEYS_INTERFACE)
+
+struct DumpSKeysInterface {
+ InterfaceClass parent_class;
+
+ /**
+ * @qmp_dump_skeys: Callback to dump guest's storage keys to @filename.
+ */
+ void (*qmp_dump_skeys)(const char *filename, Error **errp);
+};
+
#endif /* S390_STORAGE_KEYS_H */
diff --git a/include/hw/s390x/vfio-ccw.h b/include/hw/s390x/vfio-ccw.h
index 4209d27..1e0922d 100644
--- a/include/hw/s390x/vfio-ccw.h
+++ b/include/hw/s390x/vfio-ccw.h
@@ -14,7 +14,7 @@
#ifndef HW_VFIO_CCW_H
#define HW_VFIO_CCW_H
-#include "hw/vfio/vfio-common.h"
+#include "hw/vfio/vfio-device.h"
#include "hw/s390x/s390-ccw.h"
#include "hw/s390x/ccw-device.h"
#include "qom/object.h"
diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h
index c3d5e17..90ee192 100644
--- a/include/hw/scsi/scsi.h
+++ b/include/hw/scsi/scsi.h
@@ -24,6 +24,7 @@ struct SCSIRequest {
SCSIBus *bus;
SCSIDevice *dev;
const SCSIReqOps *ops;
+ AioContext *ctx;
uint32_t refcount;
uint32_t tag;
uint32_t lun;
@@ -48,6 +49,8 @@ struct SCSIRequest {
bool dma_started;
BlockAIOCB *aiocb;
QEMUSGList *sg;
+
+ /* Protected by SCSIDevice->requests_lock */
QTAILQ_ENTRY(SCSIRequest) next;
};
@@ -76,10 +79,7 @@ struct SCSIDevice
uint8_t sense[SCSI_SENSE_BUF_SIZE];
uint32_t sense_len;
- /*
- * The requests list is only accessed from the AioContext that executes
- * requests or from the main loop when IOThread processing is stopped.
- */
+ QemuMutex requests_lock; /* protects the requests list */
QTAILQ_HEAD(, SCSIRequest) requests;
uint32_t channel;
diff --git a/include/hw/sd/aspeed_sdhci.h b/include/hw/sd/aspeed_sdhci.h
index 057bc5f..4ef1770 100644
--- a/include/hw/sd/aspeed_sdhci.h
+++ b/include/hw/sd/aspeed_sdhci.h
@@ -13,9 +13,12 @@
#include "qom/object.h"
#define TYPE_ASPEED_SDHCI "aspeed.sdhci"
-OBJECT_DECLARE_SIMPLE_TYPE(AspeedSDHCIState, ASPEED_SDHCI)
+#define TYPE_ASPEED_2400_SDHCI TYPE_ASPEED_SDHCI "-ast2400"
+#define TYPE_ASPEED_2500_SDHCI TYPE_ASPEED_SDHCI "-ast2500"
+#define TYPE_ASPEED_2600_SDHCI TYPE_ASPEED_SDHCI "-ast2600"
+#define TYPE_ASPEED_2700_SDHCI TYPE_ASPEED_SDHCI "-ast2700"
+OBJECT_DECLARE_TYPE(AspeedSDHCIState, AspeedSDHCIClass, ASPEED_SDHCI)
-#define ASPEED_SDHCI_CAPABILITIES 0x01E80080
#define ASPEED_SDHCI_NUM_SLOTS 2
#define ASPEED_SDHCI_NUM_REGS (ASPEED_SDHCI_REG_SIZE / sizeof(uint32_t))
#define ASPEED_SDHCI_REG_SIZE 0x100
@@ -32,4 +35,10 @@ struct AspeedSDHCIState {
uint32_t regs[ASPEED_SDHCI_NUM_REGS];
};
+struct AspeedSDHCIClass {
+ SysBusDeviceClass parent_class;
+
+ uint64_t capareg;
+};
+
#endif /* ASPEED_SDHCI_H */
diff --git a/include/hw/sd/sd.h b/include/hw/sd/sd.h
index d35a839..d6bad17 100644
--- a/include/hw/sd/sd.h
+++ b/include/hw/sd/sd.h
@@ -75,14 +75,6 @@ typedef enum {
UHS_III = 3, /* currently not supported */
} sd_uhs_mode_t;
-typedef enum {
- sd_spi,
- sd_bc, /* broadcast -- no response */
- sd_bcr, /* broadcast with response */
- sd_ac, /* addressed -- no data transfer */
- sd_adtc, /* addressed with data transfer */
-} sd_cmd_type_t;
-
typedef struct {
uint8_t cmd;
uint32_t arg;
@@ -127,7 +119,6 @@ struct SDCardClass {
void (*set_voltage)(SDState *sd, uint16_t millivolts);
uint8_t (*get_dat_lines)(SDState *sd);
bool (*get_cmd_line)(SDState *sd);
- void (*enable)(SDState *sd, bool enable);
bool (*get_inserted)(SDState *sd);
bool (*get_readonly)(SDState *sd);
void (*set_cid)(SDState *sd);
diff --git a/include/hw/sd/sdcard_legacy.h b/include/hw/sd/sdcard_legacy.h
deleted file mode 100644
index 0dc3889..0000000
--- a/include/hw/sd/sdcard_legacy.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * SD Memory Card emulation (deprecated legacy API)
- *
- * Copyright (c) 2006 Andrzej Zaborowski <balrog@zabor.org>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef HW_SDCARD_LEGACY_H
-#define HW_SDCARD_LEGACY_H
-
-#include "hw/sd/sd.h"
-
-/* Legacy functions to be used only by non-qdevified callers */
-SDState *sd_init(BlockBackend *blk, bool is_spi);
-int sd_do_command(SDState *card, SDRequest *request, uint8_t *response);
-void sd_write_byte(SDState *card, uint8_t value);
-uint8_t sd_read_byte(SDState *card);
-void sd_set_cb(SDState *card, qemu_irq readonly, qemu_irq insert);
-
-/* sd_enable should not be used -- it is only used on the nseries boards,
- * where it is part of a broken implementation of the MMC card slot switch
- * (there should be two card slots which are multiplexed to a single MMC
- * controller, but instead we model it with one card and controller and
- * disable the card when the second slot is selected, so it looks like the
- * second slot is always empty).
- */
-void sd_enable(SDState *card, bool enable);
-
-#endif /* HW_SDCARD_LEGACY_H */
diff --git a/include/hw/sd/sdhci.h b/include/hw/sd/sdhci.h
index 6cd2822..51fb30e 100644
--- a/include/hw/sd/sdhci.h
+++ b/include/hw/sd/sdhci.h
@@ -100,11 +100,16 @@ struct SDHCIState {
uint8_t sd_spec_version;
uint8_t uhs_mode;
uint8_t vendor; /* For vendor specific functionality */
+ /*
+ * Write Protect pin default active low for detecting SD card
+ * to be protected. Set wp_inverted to invert the signal.
+ */
+ bool wp_inverted;
};
typedef struct SDHCIState SDHCIState;
#define SDHCI_VENDOR_NONE 0
-#define SDHCI_VENDOR_IMX 1
+#define SDHCI_VENDOR_FSL 2
/*
* Controller does not provide transfer-complete interrupt when not
diff --git a/include/hw/sh4/sh.h b/include/hw/sh4/sh.h
index ec716cd..c82feef 100644
--- a/include/hw/sh4/sh.h
+++ b/include/hw/sh4/sh.h
@@ -38,29 +38,10 @@ struct SH7750State;
struct SH7750State *sh7750_init(SuperHCPU *cpu, MemoryRegion *sysmem);
-typedef struct {
- /* The callback will be triggered if any of the designated lines change */
- uint16_t portamask_trigger;
- uint16_t portbmask_trigger;
- /* Return 0 if no action was taken */
- int (*port_change_cb) (uint16_t porta, uint16_t portb,
- uint16_t *periph_pdtra,
- uint16_t *periph_portdira,
- uint16_t *periph_pdtrb,
- uint16_t *periph_portdirb);
-} sh7750_io_device;
-
-int sh7750_register_io_device(struct SH7750State *s,
- sh7750_io_device *device);
-
-/* sh_serial.c */
#define TYPE_SH_SERIAL "sh-serial"
#define SH_SERIAL_FEAT_SCIF (1 << 0)
/* sh7750.c */
qemu_irq sh7750_irl(struct SH7750State *s);
-/* tc58128.c */
-int tc58128_init(struct SH7750State *s, const char *zone1, const char *zone2);
-
#endif
diff --git a/include/hw/sh4/sh_intc.h b/include/hw/sh4/sh_intc.h
index f62d5c5..94f1831 100644
--- a/include/hw/sh4/sh_intc.h
+++ b/include/hw/sh4/sh_intc.h
@@ -1,7 +1,7 @@
#ifndef SH_INTC_H
#define SH_INTC_H
-#include "exec/memory.h"
+#include "system/memory.h"
typedef unsigned char intc_enum;
diff --git a/include/hw/southbridge/ich9.h b/include/hw/southbridge/ich9.h
index fd01649..1e231e8 100644
--- a/include/hw/southbridge/ich9.h
+++ b/include/hw/southbridge/ich9.h
@@ -7,7 +7,7 @@
#include "hw/pci/pci.h"
#include "hw/pci/pci_device.h"
#include "hw/rtc/mc146818rtc.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qemu/notify.h"
#include "qom/object.h"
@@ -196,8 +196,12 @@ struct ICH9LPCState {
#define ICH9_PMIO_GPE0_LEN 16
#define ICH9_PMIO_SMI_EN 0x30
#define ICH9_PMIO_SMI_EN_APMC_EN (1 << 5)
+#define ICH9_PMIO_SMI_EN_SWSMI_EN (1 << 6)
#define ICH9_PMIO_SMI_EN_TCO_EN (1 << 13)
+#define ICH9_PMIO_SMI_EN_PERIODIC_EN (1 << 14)
#define ICH9_PMIO_SMI_STS 0x34
+#define ICH9_PMIO_SMI_STS_SWSMI_STS (1 << 6)
+#define ICH9_PMIO_SMI_STS_PERIODIC_STS (1 << 14)
#define ICH9_PMIO_TCO_RLD 0x60
#define ICH9_PMIO_TCO_LEN 32
diff --git a/include/hw/ssi/allwinner-a10-spi.h b/include/hw/ssi/allwinner-a10-spi.h
new file mode 100644
index 0000000..da46e29
--- /dev/null
+++ b/include/hw/ssi/allwinner-a10-spi.h
@@ -0,0 +1,57 @@
+/*
+ * Allwinner SPI Bus Serial Interface registers definition
+ *
+ * Copyright (C) 2024 Strahinja Jankovic. <strahinja.p.jankovic@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef ALLWINNER_A10_SPI_H
+#define ALLWINNER_A10_SPI_H
+
+#include "hw/ssi/ssi.h"
+#include "hw/sysbus.h"
+#include "qemu/fifo8.h"
+#include "qom/object.h"
+
+/** Size of register I/O address space used by SPI device */
+#define AW_A10_SPI_IOSIZE (0x1000)
+
+/** Total number of known registers */
+#define AW_A10_SPI_REGS_NUM (AW_A10_SPI_IOSIZE / sizeof(uint32_t))
+#define AW_A10_SPI_FIFO_SIZE (64)
+#define AW_A10_SPI_CS_LINES_NR (4)
+
+#define TYPE_AW_A10_SPI "allwinner.spi"
+OBJECT_DECLARE_SIMPLE_TYPE(AWA10SPIState, AW_A10_SPI)
+
+struct AWA10SPIState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+ SSIBus *bus;
+ qemu_irq irq;
+ qemu_irq cs_lines[AW_A10_SPI_CS_LINES_NR];
+
+ uint32_t regs[AW_A10_SPI_REGS_NUM];
+
+ Fifo8 rx_fifo;
+ Fifo8 tx_fifo;
+};
+
+#endif /* ALLWINNER_A10_SPI_H */
diff --git a/include/hw/ssi/aspeed_smc.h b/include/hw/ssi/aspeed_smc.h
index 234dca3..25b95e7 100644
--- a/include/hw/ssi/aspeed_smc.h
+++ b/include/hw/ssi/aspeed_smc.h
@@ -82,6 +82,7 @@ struct AspeedSMCState {
uint8_t snoop_index;
uint8_t snoop_dummies;
+ bool unselect;
};
typedef struct AspeedSegments {
diff --git a/include/hw/ssi/npcm7xx_fiu.h b/include/hw/ssi/npcm7xx_fiu.h
index a3a1704..7ebd422 100644
--- a/include/hw/ssi/npcm7xx_fiu.h
+++ b/include/hw/ssi/npcm7xx_fiu.h
@@ -60,6 +60,7 @@ struct NPCM7xxFIUState {
int32_t cs_count;
int32_t active_cs;
qemu_irq *cs_lines;
+ uint64_t flash_size;
NPCM7xxFIUFlash *flash;
SSIBus *spi;
diff --git a/include/hw/ssi/pnv_spi.h b/include/hw/ssi/pnv_spi.h
new file mode 100644
index 0000000..c591a06
--- /dev/null
+++ b/include/hw/ssi/pnv_spi.h
@@ -0,0 +1,72 @@
+/*
+ * QEMU PowerPC SPI model
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This model Supports a connection to a single SPI responder.
+ * Introduced for P10 to provide access to SPI seeproms, TPM, flash device
+ * and an ADC controller.
+ *
+ * All SPI function control is mapped into the SPI register space to enable
+ * full control by firmware.
+ *
+ * SPI Controller has sequencer and shift engine. The SPI shift engine
+ * performs serialization and de-serialization according to the control by
+ * the sequencer and according to the setup defined in the configuration
+ * registers and the SPI sequencer implements the main control logic.
+ */
+
+#ifndef PPC_PNV_SPI_H
+#define PPC_PNV_SPI_H
+
+#include "hw/ssi/ssi.h"
+#include "hw/sysbus.h"
+#include "qemu/fifo8.h"
+
+#define TYPE_PNV_SPI "pnv-spi"
+OBJECT_DECLARE_SIMPLE_TYPE(PnvSpi, PNV_SPI)
+
+#define PNV_SPI_REG_SIZE 8
+#define PNV_SPI_REGS 7
+
+#define TYPE_PNV_SPI_BUS "spi"
+typedef struct PnvSpi {
+ SysBusDevice parent_obj;
+
+ SSIBus *ssi_bus;
+ qemu_irq *cs_line;
+ MemoryRegion xscom_spic_regs;
+ Fifo8 tx_fifo;
+ Fifo8 rx_fifo;
+ uint8_t fail_count; /* RDR Match failure counter */
+ /* SPI object number */
+ uint32_t spic_num;
+ uint32_t chip_id;
+ uint8_t transfer_len;
+ uint8_t responder_select;
+ /* To verify if shift_n1 happens prior to shift_n2 */
+ bool shift_n1_done;
+ /* Loop counter for branch operation opcode Ex/Fx */
+ uint8_t loop_counter_1;
+ uint8_t loop_counter_2;
+ /* N1/N2_bits specifies the size of the N1/N2 segment of a frame in bits.*/
+ uint8_t N1_bits;
+ uint8_t N2_bits;
+ /* Number of bytes in a payload for the N1/N2 frame segment.*/
+ uint8_t N1_bytes;
+ uint8_t N2_bytes;
+ /* Number of N1/N2 bytes marked for transmit */
+ uint8_t N1_tx;
+ uint8_t N2_tx;
+ /* Number of N1/N2 bytes marked for receive */
+ uint8_t N1_rx;
+ uint8_t N2_rx;
+
+ /* SPI registers */
+ uint64_t regs[PNV_SPI_REGS];
+ uint8_t seq_op[PNV_SPI_REG_SIZE];
+ uint64_t status;
+} PnvSpi;
+#endif /* PPC_PNV_SPI_H */
diff --git a/include/hw/ssi/pnv_spi_regs.h b/include/hw/ssi/pnv_spi_regs.h
new file mode 100644
index 0000000..596e2c1
--- /dev/null
+++ b/include/hw/ssi/pnv_spi_regs.h
@@ -0,0 +1,133 @@
+/*
+ * QEMU PowerPC SPI model
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PNV_SPI_CONTROLLER_REGS_H
+#define PNV_SPI_CONTROLLER_REGS_H
+
+/*
+ * Macros from target/ppc/cpu.h
+ * These macros are copied from ppc target specific file target/ppc/cpu.h
+ * as target/ppc/cpu.h cannot be included here.
+ */
+#define PPC_BIT(bit) (0x8000000000000000ULL >> (bit))
+#define PPC_BIT8(bit) (0x80 >> (bit))
+#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
+#define PPC_BITMASK8(bs, be) ((PPC_BIT8(bs) - PPC_BIT8(be)) | PPC_BIT8(bs))
+#define MASK_TO_LSH(m) (__builtin_ffsll(m) - 1)
+#define GETFIELD(m, v) (((v) & (m)) >> MASK_TO_LSH(m))
+#define SETFIELD(m, v, val) \
+ (((v) & ~(m)) | ((((typeof(v))(val)) << MASK_TO_LSH(m)) & (m)))
+
+/* Error Register */
+#define ERROR_REG 0x00
+
+/* counter_config_reg */
+#define SPI_CTR_CFG_REG 0x01
+#define SPI_CTR_CFG_N1 PPC_BITMASK(0, 7)
+#define SPI_CTR_CFG_N2 PPC_BITMASK(8, 15)
+#define SPI_CTR_CFG_CMP1 PPC_BITMASK(24, 31)
+#define SPI_CTR_CFG_CMP2 PPC_BITMASK(32, 39)
+#define SPI_CTR_CFG_N1_CTRL_B1 PPC_BIT(49)
+#define SPI_CTR_CFG_N1_CTRL_B2 PPC_BIT(50)
+#define SPI_CTR_CFG_N1_CTRL_B3 PPC_BIT(51)
+#define SPI_CTR_CFG_N2_CTRL_B0 PPC_BIT(52)
+#define SPI_CTR_CFG_N2_CTRL_B1 PPC_BIT(53)
+#define SPI_CTR_CFG_N2_CTRL_B2 PPC_BIT(54)
+#define SPI_CTR_CFG_N2_CTRL_B3 PPC_BIT(55)
+
+/* config_reg */
+#define CONFIG_REG1 0x02
+
+/* clock_config_reset_control_ecc_enable_reg */
+#define SPI_CLK_CFG_REG 0x03
+#define SPI_CLK_CFG_HARD_RST 0x0084000000000000;
+#define SPI_CLK_CFG_RST_CTRL PPC_BITMASK(24, 27)
+#define SPI_CLK_CFG_ECC_EN PPC_BIT(28)
+#define SPI_CLK_CFG_ECC_CTRL PPC_BITMASK(29, 30)
+
+/* memory_mapping_reg */
+#define SPI_MM_REG 0x04
+#define SPI_MM_RDR_MATCH_VAL PPC_BITMASK(32, 47)
+#define SPI_MM_RDR_MATCH_MASK PPC_BITMASK(48, 63)
+
+/* transmit_data_reg */
+#define SPI_XMIT_DATA_REG 0x05
+
+/* receive_data_reg */
+#define SPI_RCV_DATA_REG 0x06
+
+/* sequencer_operation_reg */
+#define SPI_SEQ_OP_REG 0x07
+
+/* status_reg */
+#define SPI_STS_REG 0x08
+#define SPI_STS_RDR_FULL PPC_BIT(0)
+#define SPI_STS_RDR_OVERRUN PPC_BIT(1)
+#define SPI_STS_RDR_UNDERRUN PPC_BIT(2)
+#define SPI_STS_TDR_FULL PPC_BIT(4)
+#define SPI_STS_TDR_OVERRUN PPC_BIT(5)
+#define SPI_STS_TDR_UNDERRUN PPC_BIT(6)
+#define SPI_STS_SEQ_FSM PPC_BITMASK(8, 15)
+#define SPI_STS_SHIFTER_FSM PPC_BITMASK(16, 27)
+#define SPI_STS_SEQ_INDEX PPC_BITMASK(28, 31)
+#define SPI_STS_GEN_STATUS_B3 PPC_BIT(35)
+#define SPI_STS_RDR PPC_BITMASK(1, 3)
+#define SPI_STS_TDR PPC_BITMASK(5, 7)
+
+/*
+ * Shifter states
+ *
+ * These are the same values defined for the Shifter FSM field of the
+ * status register. It's a 12 bit field so we will represent it as three
+ * nibbles in the constants.
+ *
+ * These are shifter_fsm values
+ *
+ * Status reg bits 16-27 -> field bits 0-11
+ * bits 0,1,2,5 unused/reserved
+ * bit 4 crc shift in (unused)
+ * bit 8 crc shift out (unused)
+ */
+
+#define FSM_DONE 0x100 /* bit 3 */
+#define FSM_SHIFT_N2 0x020 /* bit 6 */
+#define FSM_WAIT 0x010 /* bit 7 */
+#define FSM_SHIFT_N1 0x004 /* bit 9 */
+#define FSM_START 0x002 /* bit 10 */
+#define FSM_IDLE 0x001 /* bit 11 */
+
+/*
+ * Sequencer states
+ *
+ * These are sequencer_fsm values
+ *
+ * Status reg bits 8-15 -> field bits 0-7
+ * bits 0-3 unused/reserved
+ *
+ */
+#define SEQ_STATE_INDEX_INCREMENT 0x08 /* bit 4 */
+#define SEQ_STATE_EXECUTE 0x04 /* bit 5 */
+#define SEQ_STATE_DECODE 0x02 /* bit 6 */
+#define SEQ_STATE_IDLE 0x01 /* bit 7 */
+
+/*
+ * These are the supported sequencer operations.
+ * Only the upper nibble is significant because for many operations
+ * the lower nibble is a variable specific to the operation.
+ */
+#define SEQ_OP_STOP 0x00
+#define SEQ_OP_SELECT_SLAVE 0x10
+#define SEQ_OP_SHIFT_N1 0x30
+#define SEQ_OP_SHIFT_N2 0x40
+#define SEQ_OP_BRANCH_IFNEQ_RDR 0x60
+#define SEQ_OP_TRANSFER_TDR 0xC0
+#define SEQ_OP_BRANCH_IFNEQ_INC_1 0xE0
+#define SEQ_OP_BRANCH_IFNEQ_INC_2 0xF0
+#define NUM_SEQ_OPS 8
+
+#endif
diff --git a/include/hw/sysbus.h b/include/hw/sysbus.h
index 3cb29a4..7dc88aa 100644
--- a/include/hw/sysbus.h
+++ b/include/hw/sysbus.h
@@ -4,7 +4,7 @@
/* Devices attached directly to the main system bus. */
#include "hw/qdev-core.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qom/object.h"
#define QDEV_MAX_MMIO 32
@@ -19,6 +19,8 @@ DECLARE_INSTANCE_CHECKER(BusState, SYSTEM_BUS,
OBJECT_DECLARE_TYPE(SysBusDevice, SysBusDeviceClass,
SYS_BUS_DEVICE)
+#define TYPE_DYNAMIC_SYS_BUS_DEVICE "dynamic-sysbus-device"
+
/**
* SysBusDeviceClass:
*
@@ -82,7 +84,6 @@ qemu_irq sysbus_get_connected_irq(SysBusDevice *dev, int n);
void sysbus_mmio_map(SysBusDevice *dev, int n, hwaddr addr);
void sysbus_mmio_map_overlap(SysBusDevice *dev, int n, hwaddr addr,
int priority);
-void sysbus_mmio_unmap(SysBusDevice *dev, int n);
bool sysbus_realize(SysBusDevice *dev, Error **errp);
bool sysbus_realize_and_unref(SysBusDevice *dev, Error **errp);
diff --git a/include/hw/timer/aspeed_timer.h b/include/hw/timer/aspeed_timer.h
index 07dc6b6..a850625 100644
--- a/include/hw/timer/aspeed_timer.h
+++ b/include/hw/timer/aspeed_timer.h
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * with this program; if not, see <https://www.gnu.org/licenses/>.
*/
#ifndef ASPEED_TIMER_H
#define ASPEED_TIMER_H
@@ -32,6 +31,7 @@ OBJECT_DECLARE_TYPE(AspeedTimerCtrlState, AspeedTimerClass, ASPEED_TIMER)
#define TYPE_ASPEED_2500_TIMER TYPE_ASPEED_TIMER "-ast2500"
#define TYPE_ASPEED_2600_TIMER TYPE_ASPEED_TIMER "-ast2600"
#define TYPE_ASPEED_1030_TIMER TYPE_ASPEED_TIMER "-ast1030"
+#define TYPE_ASPEED_2700_TIMER TYPE_ASPEED_TIMER "-ast2700"
#define ASPEED_TIMER_NR_TIMERS 8
diff --git a/include/hw/timer/hpet.h b/include/hw/timer/hpet.h
index d17a8d4..c2656f7 100644
--- a/include/hw/timer/hpet.h
+++ b/include/hw/timer/hpet.h
@@ -58,7 +58,6 @@
#define HPET_TN_CFG_WRITE_MASK 0x7f4e
#define HPET_TN_INT_ROUTE_SHIFT 9
#define HPET_TN_INT_ROUTE_CAP_SHIFT 32
-#define HPET_TN_CFG_BITS_READONLY_OR_RESERVED 0xffff80b1U
struct hpet_fw_entry
{
@@ -74,7 +73,7 @@ struct hpet_fw_config
struct hpet_fw_entry hpet[8];
} QEMU_PACKED;
-extern struct hpet_fw_config hpet_cfg;
+extern struct hpet_fw_config hpet_fw_cfg;
#define TYPE_HPET "hpet"
diff --git a/include/hw/timer/imx_gpt.h b/include/hw/timer/imx_gpt.h
index 5a1230d..5488f7e 100644
--- a/include/hw/timer/imx_gpt.h
+++ b/include/hw/timer/imx_gpt.h
@@ -80,6 +80,7 @@
#define TYPE_IMX6_GPT "imx6.gpt"
#define TYPE_IMX6UL_GPT "imx6ul.gpt"
#define TYPE_IMX7_GPT "imx7.gpt"
+#define TYPE_IMX8MP_GPT "imx8mp.gpt"
#define TYPE_IMX_GPT TYPE_IMX25_GPT
diff --git a/include/hw/timer/npcm7xx_timer.h b/include/hw/timer/npcm7xx_timer.h
index d45c051..e287375 100644
--- a/include/hw/timer/npcm7xx_timer.h
+++ b/include/hw/timer/npcm7xx_timer.h
@@ -16,7 +16,7 @@
#ifndef NPCM7XX_TIMER_H
#define NPCM7XX_TIMER_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/sysbus.h"
#include "qemu/timer.h"
diff --git a/include/hw/tricore/triboard.h b/include/hw/tricore/triboard.h
index 4fdd2d7..ca49a0c 100644
--- a/include/hw/tricore/triboard.h
+++ b/include/hw/tricore/triboard.h
@@ -20,8 +20,8 @@
#include "qapi/error.h"
#include "hw/boards.h"
-#include "sysemu/sysemu.h"
-#include "exec/address-spaces.h"
+#include "system/system.h"
+#include "system/address-spaces.h"
#include "qom/object.h"
#include "hw/tricore/tc27x_soc.h"
diff --git a/include/hw/tricore/tricore.h b/include/hw/tricore/tricore.h
index c19ed3f..4ffc0fe 100644
--- a/include/hw/tricore/tricore.h
+++ b/include/hw/tricore/tricore.h
@@ -1,7 +1,7 @@
#ifndef HW_TRICORE_H
#define HW_TRICORE_H
-#include "exec/memory.h"
+#include "system/memory.h"
struct tricore_boot_info {
uint64_t ram_size;
diff --git a/include/hw/uefi/hardware-info.h b/include/hw/uefi/hardware-info.h
new file mode 100644
index 0000000..94c38cf
--- /dev/null
+++ b/include/hw/uefi/hardware-info.h
@@ -0,0 +1,35 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * pass hardware information to uefi
+ *
+ * see OvmfPkg/Library/HardwareInfoLib/ in edk2
+ */
+#ifndef QEMU_UEFI_HARDWARE_INFO_H
+#define QEMU_UEFI_HARDWARE_INFO_H
+
+/* data structures */
+
+typedef enum {
+ HardwareInfoTypeUndefined = 0,
+ HardwareInfoTypeHostBridge = 1,
+ HardwareInfoQemuUefiVars = 2,
+} HARDWARE_INFO_TYPE;
+
+typedef struct {
+ union {
+ uint64_t uint64;
+ HARDWARE_INFO_TYPE value;
+ } type;
+ uint64_t size;
+} HARDWARE_INFO_HEADER;
+
+typedef struct {
+ uint64_t mmio_address;
+} HARDWARE_INFO_SIMPLE_DEVICE;
+
+/* qemu functions */
+
+void hardware_info_register(HARDWARE_INFO_TYPE type, void *info, uint64_t size);
+
+#endif /* QEMU_UEFI_HARDWARE_INFO_H */
diff --git a/include/hw/uefi/var-service-api.h b/include/hw/uefi/var-service-api.h
new file mode 100644
index 0000000..0d71638
--- /dev/null
+++ b/include/hw/uefi/var-service-api.h
@@ -0,0 +1,48 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * uefi-vars device - API of the virtual device for guest/host communication.
+ */
+#ifndef QEMU_UEFI_VAR_SERVICE_API_H
+#define QEMU_UEFI_VAR_SERVICE_API_H
+
+/* qom: device names */
+#define TYPE_UEFI_VARS_X64 "uefi-vars-x64"
+#define TYPE_UEFI_VARS_SYSBUS "uefi-vars-sysbus"
+
+/* sysbus: fdt node path */
+#define UEFI_VARS_FDT_NODE "qemu-uefi-vars"
+#define UEFI_VARS_FDT_COMPAT "qemu,uefi-vars"
+
+/* registers */
+#define UEFI_VARS_REG_MAGIC 0x00 /* 16 bit */
+#define UEFI_VARS_REG_CMD_STS 0x02 /* 16 bit */
+#define UEFI_VARS_REG_BUFFER_SIZE 0x04 /* 32 bit */
+#define UEFI_VARS_REG_DMA_BUFFER_ADDR_LO 0x08 /* 32 bit */
+#define UEFI_VARS_REG_DMA_BUFFER_ADDR_HI 0x0c /* 32 bit */
+#define UEFI_VARS_REG_PIO_BUFFER_TRANSFER 0x10 /* 8-64 bit */
+#define UEFI_VARS_REG_PIO_BUFFER_CRC32C 0x18 /* 32 bit (read-only) */
+#define UEFI_VARS_REG_FLAGS 0x1c /* 32 bit */
+#define UEFI_VARS_REGS_SIZE 0x20
+
+/* flags register */
+#define UEFI_VARS_FLAG_USE_PIO (1 << 0)
+
+/* magic value */
+#define UEFI_VARS_MAGIC_VALUE 0xef1
+
+/* command values */
+#define UEFI_VARS_CMD_RESET 0x01
+#define UEFI_VARS_CMD_DMA_MM 0x02
+#define UEFI_VARS_CMD_PIO_MM 0x03
+#define UEFI_VARS_CMD_PIO_ZERO_OFFSET 0x04
+
+/* status values */
+#define UEFI_VARS_STS_SUCCESS 0x00
+#define UEFI_VARS_STS_BUSY 0x01
+#define UEFI_VARS_STS_ERR_UNKNOWN 0x10
+#define UEFI_VARS_STS_ERR_NOT_SUPPORTED 0x11
+#define UEFI_VARS_STS_ERR_BAD_BUFFER_SIZE 0x12
+
+
+#endif /* QEMU_UEFI_VAR_SERVICE_API_H */
diff --git a/include/hw/uefi/var-service-edk2.h b/include/hw/uefi/var-service-edk2.h
new file mode 100644
index 0000000..c743a8d
--- /dev/null
+++ b/include/hw/uefi/var-service-edk2.h
@@ -0,0 +1,227 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * uefi-vars device - structs and defines from edk2
+ *
+ * Note: The edk2 UINTN type has been mapped to uint64_t,
+ * so the structs are compatible with 64bit edk2 builds.
+ */
+#ifndef QEMU_UEFI_VAR_SERVICE_EDK2_H
+#define QEMU_UEFI_VAR_SERVICE_EDK2_H
+
+#include "qemu/uuid.h"
+
+#define MAX_BIT 0x8000000000000000ULL
+#define ENCODE_ERROR(StatusCode) (MAX_BIT | (StatusCode))
+#define EFI_SUCCESS 0
+#define EFI_INVALID_PARAMETER ENCODE_ERROR(2)
+#define EFI_UNSUPPORTED ENCODE_ERROR(3)
+#define EFI_BAD_BUFFER_SIZE ENCODE_ERROR(4)
+#define EFI_BUFFER_TOO_SMALL ENCODE_ERROR(5)
+#define EFI_WRITE_PROTECTED ENCODE_ERROR(8)
+#define EFI_OUT_OF_RESOURCES ENCODE_ERROR(9)
+#define EFI_NOT_FOUND ENCODE_ERROR(14)
+#define EFI_ACCESS_DENIED ENCODE_ERROR(15)
+#define EFI_ALREADY_STARTED ENCODE_ERROR(20)
+#define EFI_SECURITY_VIOLATION ENCODE_ERROR(26)
+
+#define EFI_VARIABLE_NON_VOLATILE 0x01
+#define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x02
+#define EFI_VARIABLE_RUNTIME_ACCESS 0x04
+#define EFI_VARIABLE_HARDWARE_ERROR_RECORD 0x08
+#define EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS 0x10 /* deprecated */
+#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x20
+#define EFI_VARIABLE_APPEND_WRITE 0x40
+
+/* SecureBootEnable */
+#define SECURE_BOOT_ENABLE 1
+#define SECURE_BOOT_DISABLE 0
+
+/* SecureBoot */
+#define SECURE_BOOT_MODE_ENABLE 1
+#define SECURE_BOOT_MODE_DISABLE 0
+
+/* CustomMode */
+#define CUSTOM_SECURE_BOOT_MODE 1
+#define STANDARD_SECURE_BOOT_MODE 0
+
+/* SetupMode */
+#define SETUP_MODE 1
+#define USER_MODE 0
+
+typedef uint64_t efi_status;
+typedef struct mm_header mm_header;
+
+/* EFI_MM_COMMUNICATE_HEADER */
+struct mm_header {
+ QemuUUID guid;
+ uint64_t length;
+};
+
+/* --- EfiSmmVariableProtocol ---------------------------------------- */
+
+#define SMM_VARIABLE_FUNCTION_GET_VARIABLE 1
+#define SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME 2
+#define SMM_VARIABLE_FUNCTION_SET_VARIABLE 3
+#define SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO 4
+#define SMM_VARIABLE_FUNCTION_READY_TO_BOOT 5
+#define SMM_VARIABLE_FUNCTION_EXIT_BOOT_SERVICE 6
+#define SMM_VARIABLE_FUNCTION_LOCK_VARIABLE 8
+#define SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE 11
+
+typedef struct mm_variable mm_variable;
+typedef struct mm_variable_access mm_variable_access;
+typedef struct mm_next_variable mm_next_variable;
+typedef struct mm_next_variable mm_lock_variable;
+typedef struct mm_variable_info mm_variable_info;
+typedef struct mm_get_payload_size mm_get_payload_size;
+
+/* SMM_VARIABLE_COMMUNICATE_HEADER */
+struct mm_variable {
+ uint64_t function;
+ uint64_t status;
+};
+
+/* SMM_VARIABLE_COMMUNICATE_ACCESS_VARIABLE */
+struct QEMU_PACKED mm_variable_access {
+ QemuUUID guid;
+ uint64_t data_size;
+ uint64_t name_size;
+ uint32_t attributes;
+ /* Name */
+ /* Data */
+};
+
+/* SMM_VARIABLE_COMMUNICATE_GET_NEXT_VARIABLE_NAME */
+struct mm_next_variable {
+ QemuUUID guid;
+ uint64_t name_size;
+ /* Name */
+};
+
+/* SMM_VARIABLE_COMMUNICATE_QUERY_VARIABLE_INFO */
+struct QEMU_PACKED mm_variable_info {
+ uint64_t max_storage_size;
+ uint64_t free_storage_size;
+ uint64_t max_variable_size;
+ uint32_t attributes;
+};
+
+/* SMM_VARIABLE_COMMUNICATE_GET_PAYLOAD_SIZE */
+struct mm_get_payload_size {
+ uint64_t payload_size;
+};
+
+/* --- VarCheckPolicyLibMmiHandler ----------------------------------- */
+
+#define VAR_CHECK_POLICY_COMMAND_DISABLE 0x01
+#define VAR_CHECK_POLICY_COMMAND_IS_ENABLED 0x02
+#define VAR_CHECK_POLICY_COMMAND_REGISTER 0x03
+#define VAR_CHECK_POLICY_COMMAND_DUMP 0x04
+#define VAR_CHECK_POLICY_COMMAND_LOCK 0x05
+
+typedef struct mm_check_policy mm_check_policy;
+typedef struct mm_check_policy_is_enabled mm_check_policy_is_enabled;
+typedef struct mm_check_policy_dump_params mm_check_policy_dump_params;
+
+/* VAR_CHECK_POLICY_COMM_HEADER */
+struct QEMU_PACKED mm_check_policy {
+ uint32_t signature;
+ uint32_t revision;
+ uint32_t command;
+ uint64_t result;
+};
+
+/* VAR_CHECK_POLICY_COMM_IS_ENABLED_PARAMS */
+struct QEMU_PACKED mm_check_policy_is_enabled {
+ uint8_t state;
+};
+
+/* VAR_CHECK_POLICY_COMM_DUMP_PARAMS */
+struct QEMU_PACKED mm_check_policy_dump_params {
+ uint32_t page_requested;
+ uint32_t total_size;
+ uint32_t page_size;
+ uint8_t has_more;
+};
+
+/* --- Edk2VariablePolicyProtocol ------------------------------------ */
+
+#define VARIABLE_POLICY_ENTRY_REVISION 0x00010000
+
+#define VARIABLE_POLICY_TYPE_NO_LOCK 0
+#define VARIABLE_POLICY_TYPE_LOCK_NOW 1
+#define VARIABLE_POLICY_TYPE_LOCK_ON_CREATE 2
+#define VARIABLE_POLICY_TYPE_LOCK_ON_VAR_STATE 3
+
+typedef struct variable_policy_entry variable_policy_entry;
+typedef struct variable_lock_on_var_state variable_lock_on_var_state;
+
+/* VARIABLE_POLICY_ENTRY */
+struct variable_policy_entry {
+ uint32_t version;
+ uint16_t size;
+ uint16_t offset_to_name;
+ QemuUUID namespace;
+ uint32_t min_size;
+ uint32_t max_size;
+ uint32_t attributes_must_have;
+ uint32_t attributes_cant_have;
+ uint8_t lock_policy_type;
+ uint8_t padding[3];
+ /* LockPolicy */
+ /* Name */
+};
+
+/* VARIABLE_LOCK_ON_VAR_STATE_POLICY */
+struct variable_lock_on_var_state {
+ QemuUUID namespace;
+ uint8_t value;
+ uint8_t padding;
+ /* Name */
+};
+
+/* --- variable authentication --------------------------------------- */
+
+#define WIN_CERT_TYPE_EFI_GUID 0x0EF1
+
+typedef struct efi_time efi_time;
+typedef struct efi_siglist efi_siglist;
+typedef struct variable_auth_2 variable_auth_2;
+
+/* EFI_TIME */
+struct efi_time {
+ uint16_t year;
+ uint8_t month;
+ uint8_t day;
+ uint8_t hour;
+ uint8_t minute;
+ uint8_t second;
+ uint8_t pad1;
+ uint32_t nanosecond;
+ int16_t timezone;
+ uint8_t daylight;
+ uint8_t pad2;
+};
+
+/* EFI_SIGNATURE_LIST */
+struct efi_siglist {
+ QemuUUID guid_type;
+ uint32_t siglist_size;
+ uint32_t header_size;
+ uint32_t sig_size;
+};
+
+/* EFI_VARIABLE_AUTHENTICATION_2 */
+struct variable_auth_2 {
+ struct efi_time timestamp;
+
+ /* WIN_CERTIFICATE_UEFI_GUID */
+ uint32_t hdr_length;
+ uint16_t hdr_revision;
+ uint16_t hdr_cert_type;
+ QemuUUID guid_cert_type;
+ uint8_t cert_data[];
+};
+
+#endif /* QEMU_UEFI_VAR_SERVICE_EDK2_H */
diff --git a/include/hw/uefi/var-service.h b/include/hw/uefi/var-service.h
new file mode 100644
index 0000000..f7ceac4
--- /dev/null
+++ b/include/hw/uefi/var-service.h
@@ -0,0 +1,191 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * uefi-vars device - state struct and function prototypes
+ */
+#ifndef QEMU_UEFI_VAR_SERVICE_H
+#define QEMU_UEFI_VAR_SERVICE_H
+
+#include "qemu/uuid.h"
+#include "qemu/queue.h"
+
+#include "hw/uefi/var-service-edk2.h"
+
+#define MAX_BUFFER_SIZE (64 * 1024)
+
+typedef struct uefi_variable uefi_variable;
+typedef struct uefi_var_policy uefi_var_policy;
+typedef struct uefi_vars_state uefi_vars_state;
+
+typedef struct uefi_vars_cert uefi_vars_cert;
+typedef struct uefi_vars_hash uefi_vars_hash;
+typedef struct uefi_vars_siglist uefi_vars_siglist;
+
+struct uefi_variable {
+ QemuUUID guid;
+ uint16_t *name;
+ uint32_t name_size;
+ uint32_t attributes;
+ void *data;
+ uint32_t data_size;
+ efi_time time;
+ void *digest;
+ uint32_t digest_size;
+ QTAILQ_ENTRY(uefi_variable) next;
+};
+
+struct uefi_var_policy {
+ variable_policy_entry *entry;
+ uint32_t entry_size;
+ uint16_t *name;
+ uint32_t name_size;
+
+ /* number of hashmarks (wildcard character) in name */
+ uint32_t hashmarks;
+
+ QTAILQ_ENTRY(uefi_var_policy) next;
+};
+
+struct uefi_vars_state {
+ MemoryRegion mr;
+ uint16_t sts;
+ uint32_t buf_size;
+ uint32_t buf_addr_lo;
+ uint32_t buf_addr_hi;
+ uint8_t *buffer;
+ QTAILQ_HEAD(, uefi_variable) variables;
+ QTAILQ_HEAD(, uefi_var_policy) var_policies;
+
+ /* pio transfer buffer */
+ uint32_t pio_xfer_offset;
+ uint8_t *pio_xfer_buffer;
+
+ /* boot phases */
+ bool end_of_dxe;
+ bool ready_to_boot;
+ bool exit_boot_service;
+ bool policy_locked;
+
+ /* storage accounting */
+ uint64_t max_storage;
+ uint64_t used_storage;
+
+ /* config options */
+ char *jsonfile;
+ int jsonfd;
+ bool force_secure_boot;
+ bool disable_custom_mode;
+ bool use_pio;
+};
+
+struct uefi_vars_cert {
+ QTAILQ_ENTRY(uefi_vars_cert) next;
+ QemuUUID owner;
+ uint64_t size;
+ uint8_t data[];
+};
+
+struct uefi_vars_hash {
+ QTAILQ_ENTRY(uefi_vars_hash) next;
+ QemuUUID owner;
+ uint8_t data[];
+};
+
+struct uefi_vars_siglist {
+ QTAILQ_HEAD(, uefi_vars_cert) x509;
+ QTAILQ_HEAD(, uefi_vars_hash) sha256;
+};
+
+/* vars-service-guid.c */
+extern const QemuUUID EfiGlobalVariable;
+extern const QemuUUID EfiImageSecurityDatabase;
+extern const QemuUUID EfiCustomModeEnable;
+extern const QemuUUID EfiSecureBootEnableDisable;
+
+extern const QemuUUID EfiCertSha256Guid;
+extern const QemuUUID EfiCertSha384Guid;
+extern const QemuUUID EfiCertSha512Guid;
+extern const QemuUUID EfiCertRsa2048Guid;
+extern const QemuUUID EfiCertX509Guid;
+extern const QemuUUID EfiCertTypePkcs7Guid;
+
+extern const QemuUUID EfiSmmVariableProtocolGuid;
+extern const QemuUUID VarCheckPolicyLibMmiHandlerGuid;
+
+extern const QemuUUID EfiEndOfDxeEventGroupGuid;
+extern const QemuUUID EfiEventReadyToBootGuid;
+extern const QemuUUID EfiEventExitBootServicesGuid;
+
+/* vars-service-utils.c */
+gboolean uefi_str_is_valid(const uint16_t *str, size_t len,
+ gboolean must_be_null_terminated);
+size_t uefi_strlen(const uint16_t *str, size_t len);
+gboolean uefi_str_equal_ex(const uint16_t *a, size_t alen,
+ const uint16_t *b, size_t blen,
+ gboolean wildcards_in_a);
+gboolean uefi_str_equal(const uint16_t *a, size_t alen,
+ const uint16_t *b, size_t blen);
+char *uefi_ucs2_to_ascii(const uint16_t *ucs2, uint64_t ucs2_size);
+int uefi_time_compare(efi_time *a, efi_time *b);
+void uefi_trace_variable(const char *action, QemuUUID guid,
+ const uint16_t *name, uint64_t name_size);
+void uefi_trace_status(const char *action, efi_status status);
+
+/* vars-service-core.c */
+extern const VMStateDescription vmstate_uefi_vars;
+void uefi_vars_init(Object *obj, uefi_vars_state *uv);
+void uefi_vars_realize(uefi_vars_state *uv, Error **errp);
+void uefi_vars_hard_reset(uefi_vars_state *uv);
+
+/* vars-service-json.c */
+void uefi_vars_json_init(uefi_vars_state *uv, Error **errp);
+void uefi_vars_json_save(uefi_vars_state *uv);
+void uefi_vars_json_load(uefi_vars_state *uv, Error **errp);
+
+/* vars-service-vars.c */
+extern const VMStateDescription vmstate_uefi_variable;
+uefi_variable *uefi_vars_find_variable(uefi_vars_state *uv, QemuUUID guid,
+ const uint16_t *name,
+ uint64_t name_size);
+void uefi_vars_set_variable(uefi_vars_state *uv, QemuUUID guid,
+ const uint16_t *name, uint64_t name_size,
+ uint32_t attributes,
+ void *data, uint64_t data_size);
+void uefi_vars_clear_volatile(uefi_vars_state *uv);
+void uefi_vars_clear_all(uefi_vars_state *uv);
+void uefi_vars_update_storage(uefi_vars_state *uv);
+uint32_t uefi_vars_mm_vars_proto(uefi_vars_state *uv);
+
+/* vars-service-auth.c */
+bool uefi_vars_is_sb_pk(uefi_variable *var);
+bool uefi_vars_is_sb_any(uefi_variable *var);
+efi_status uefi_vars_check_auth_2(uefi_vars_state *uv, uefi_variable *var,
+ mm_variable_access *va, void *data);
+efi_status uefi_vars_check_secure_boot(uefi_vars_state *uv, uefi_variable *var);
+void uefi_vars_auth_init(uefi_vars_state *uv);
+
+/* vars-service-pkcs7.c */
+efi_status uefi_vars_check_pkcs7_2(uefi_variable *siglist,
+ void **digest, uint32_t *digest_size,
+ mm_variable_access *va, void *data);
+
+/* vars-service-siglist.c */
+void uefi_vars_siglist_init(uefi_vars_siglist *siglist);
+void uefi_vars_siglist_free(uefi_vars_siglist *siglist);
+void uefi_vars_siglist_parse(uefi_vars_siglist *siglist,
+ void *data, uint64_t size);
+uint64_t uefi_vars_siglist_blob_size(uefi_vars_siglist *siglist);
+void uefi_vars_siglist_blob_generate(uefi_vars_siglist *siglist,
+ void *data, uint64_t size);
+
+/* vars-service-policy.c */
+extern const VMStateDescription vmstate_uefi_var_policy;
+efi_status uefi_vars_policy_check(uefi_vars_state *uv,
+ uefi_variable *var,
+ gboolean is_newvar);
+void uefi_vars_policies_clear(uefi_vars_state *uv);
+uefi_var_policy *uefi_vars_add_policy(uefi_vars_state *uv,
+ variable_policy_entry *pe);
+uint32_t uefi_vars_mm_check_policy_proto(uefi_vars_state *uv);
+
+#endif /* QEMU_UEFI_VAR_SERVICE_H */
diff --git a/include/hw/usb.h b/include/hw/usb.h
index d46d967..26a9f3e 100644
--- a/include/hw/usb.h
+++ b/include/hw/usb.h
@@ -25,7 +25,7 @@
* THE SOFTWARE.
*/
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/qdev-core.h"
#include "qemu/iov.h"
#include "qemu/queue.h"
@@ -579,16 +579,6 @@ void usb_pcap_init(FILE *fp);
void usb_pcap_ctrl(USBPacket *p, bool setup);
void usb_pcap_data(USBPacket *p, bool setup);
-static inline USBDevice *usb_new(const char *name)
-{
- return USB_DEVICE(qdev_new(name));
-}
-
-static inline USBDevice *usb_try_new(const char *name)
-{
- return USB_DEVICE(qdev_try_new(name));
-}
-
static inline bool usb_realize_and_unref(USBDevice *dev, USBBus *bus, Error **errp)
{
return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp);
@@ -596,7 +586,7 @@ static inline bool usb_realize_and_unref(USBDevice *dev, USBBus *bus, Error **er
static inline USBDevice *usb_create_simple(USBBus *bus, const char *name)
{
- USBDevice *dev = usb_new(name);
+ USBDevice *dev = USB_DEVICE(qdev_new(name));
usb_realize_and_unref(dev, bus, &error_abort);
return dev;
diff --git a/include/hw/usb/dwc2-regs.h b/include/hw/usb/dwc2-regs.h
index 0bf3f2a..b8b4266 100644
--- a/include/hw/usb/dwc2-regs.h
+++ b/include/hw/usb/dwc2-regs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) */
/*
* Imported from the Linux kernel file drivers/usb/dwc2/hw.h, commit
* a89bae709b3492b478480a2c9734e7e9393b279c ("usb: dwc2: Move
@@ -838,7 +838,7 @@
struct dwc2_dma_desc {
uint32_t status;
uint32_t buf;
-} __packed;
+} QEMU_PACKED;
/* Host Mode DMA descriptor status quadlet */
diff --git a/include/hw/usb/hcd-dwc3.h b/include/hw/usb/hcd-dwc3.h
index f752a27..dbdf12b 100644
--- a/include/hw/usb/hcd-dwc3.h
+++ b/include/hw/usb/hcd-dwc3.h
@@ -35,7 +35,7 @@
#define USB_DWC3(obj) \
OBJECT_CHECK(USBDWC3, (obj), TYPE_USB_DWC3)
-#define USB_DWC3_R_MAX ((0x530 / 4) + 1)
+#define USB_DWC3_R_MAX (0x600 / 4)
#define DWC3_SIZE 0x10000
typedef struct USBDWC3 {
diff --git a/include/hw/usb/hcd-musb.h b/include/hw/usb/hcd-musb.h
deleted file mode 100644
index 4d4b1ec..0000000
--- a/include/hw/usb/hcd-musb.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * "Inventra" High-speed Dual-Role Controller (MUSB-HDRC), Mentor Graphics,
- * USB2.0 OTG compliant core used in various chips.
- *
- * Only host-mode and non-DMA accesses are currently supported.
- *
- * Copyright (C) 2008 Nokia Corporation
- * Written by Andrzej Zaborowski <balrog@zabor.org>
- *
- * SPDX-License-Identifier: GPL-2.0-or-later
- */
-
-#ifndef HW_USB_HCD_MUSB_H
-#define HW_USB_HCD_MUSB_H
-
-#include "exec/hwaddr.h"
-
-enum musb_irq_source_e {
- musb_irq_suspend = 0,
- musb_irq_resume,
- musb_irq_rst_babble,
- musb_irq_sof,
- musb_irq_connect,
- musb_irq_disconnect,
- musb_irq_vbus_request,
- musb_irq_vbus_error,
- musb_irq_rx,
- musb_irq_tx,
- musb_set_vbus,
- musb_set_session,
- /* Add new interrupts here */
- musb_irq_max /* total number of interrupts defined */
-};
-
-/* TODO convert hcd-musb to QOM/qdev and remove MUSBReadFunc/MUSBWriteFunc */
-typedef void MUSBWriteFunc(void *opaque, hwaddr addr, uint32_t value);
-typedef uint32_t MUSBReadFunc(void *opaque, hwaddr addr);
-extern MUSBReadFunc * const musb_read[];
-extern MUSBWriteFunc * const musb_write[];
-
-typedef struct MUSBState MUSBState;
-
-MUSBState *musb_init(DeviceState *parent_device, int gpio_base);
-void musb_reset(MUSBState *s);
-uint32_t musb_core_intr_get(MUSBState *s);
-void musb_core_intr_clear(MUSBState *s, uint32_t mask);
-void musb_set_size(MUSBState *s, int epnum, int size, int is_tx);
-
-#endif
diff --git a/include/hw/usb/uhci-regs.h b/include/hw/usb/uhci-regs.h
index fd45d29..5b81714 100644
--- a/include/hw/usb/uhci-regs.h
+++ b/include/hw/usb/uhci-regs.h
@@ -1,6 +1,17 @@
#ifndef HW_USB_UHCI_REGS_H
#define HW_USB_UHCI_REGS_H
+#define UHCI_USBCMD 0
+#define UHCI_USBSTS 2
+#define UHCI_USBINTR 4
+#define UHCI_USBFRNUM 6
+#define UHCI_USBFLBASEADD 8
+#define UHCI_USBSOF 0x0c
+#define UHCI_USBPORTSC1 0x10
+#define UHCI_USBPORTSC2 0x12
+#define UHCI_USBPORTSC3 0x14
+#define UHCI_USBPORTSC4 0x16
+
#define UHCI_CMD_FGR (1 << 4)
#define UHCI_CMD_EGSM (1 << 3)
#define UHCI_CMD_GRESET (1 << 2)
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
deleted file mode 100644
index e8ddf92..0000000
--- a/include/hw/vfio/vfio-common.h
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- * common header for vfio based device assignment support
- *
- * Copyright Red Hat, Inc. 2012
- *
- * Authors:
- * Alex Williamson <alex.williamson@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- * Based on qemu-kvm device-assignment:
- * Adapted for KVM by Qumranet.
- * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
- * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
- * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
- * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
- * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
- */
-
-#ifndef HW_VFIO_VFIO_COMMON_H
-#define HW_VFIO_VFIO_COMMON_H
-
-#include "exec/memory.h"
-#include "qemu/queue.h"
-#include "qemu/notify.h"
-#include "ui/console.h"
-#include "hw/display/ramfb.h"
-#ifdef CONFIG_LINUX
-#include <linux/vfio.h>
-#endif
-#include "sysemu/sysemu.h"
-#include "hw/vfio/vfio-container-base.h"
-#include "sysemu/host_iommu_device.h"
-#include "sysemu/iommufd.h"
-
-#define VFIO_MSG_PREFIX "vfio %s: "
-
-enum {
- VFIO_DEVICE_TYPE_PCI = 0,
- VFIO_DEVICE_TYPE_PLATFORM = 1,
- VFIO_DEVICE_TYPE_CCW = 2,
- VFIO_DEVICE_TYPE_AP = 3,
-};
-
-typedef struct VFIOMmap {
- MemoryRegion mem;
- void *mmap;
- off_t offset;
- size_t size;
-} VFIOMmap;
-
-typedef struct VFIORegion {
- struct VFIODevice *vbasedev;
- off_t fd_offset; /* offset of region within device fd */
- MemoryRegion *mem; /* slow, read/write access */
- size_t size;
- uint32_t flags; /* VFIO region flags (rd/wr/mmap) */
- uint32_t nr_mmaps;
- VFIOMmap *mmaps;
- uint8_t nr; /* cache the region number for debug */
-} VFIORegion;
-
-typedef struct VFIOMigration {
- struct VFIODevice *vbasedev;
- VMChangeStateEntry *vm_state;
- NotifierWithReturn migration_state;
- uint32_t device_state;
- int data_fd;
- void *data_buffer;
- size_t data_buffer_size;
- uint64_t mig_flags;
- uint64_t precopy_init_size;
- uint64_t precopy_dirty_size;
- bool initial_data_sent;
-} VFIOMigration;
-
-struct VFIOGroup;
-
-typedef struct VFIOContainer {
- VFIOContainerBase bcontainer;
- int fd; /* /dev/vfio/vfio, empowered by the attached groups */
- unsigned iommu_type;
- QLIST_HEAD(, VFIOGroup) group_list;
-} VFIOContainer;
-
-OBJECT_DECLARE_SIMPLE_TYPE(VFIOContainer, VFIO_IOMMU_LEGACY);
-
-typedef struct VFIOHostDMAWindow {
- hwaddr min_iova;
- hwaddr max_iova;
- uint64_t iova_pgsizes;
- QLIST_ENTRY(VFIOHostDMAWindow) hostwin_next;
-} VFIOHostDMAWindow;
-
-typedef struct IOMMUFDBackend IOMMUFDBackend;
-
-typedef struct VFIOIOMMUFDContainer {
- VFIOContainerBase bcontainer;
- IOMMUFDBackend *be;
- uint32_t ioas_id;
-} VFIOIOMMUFDContainer;
-
-OBJECT_DECLARE_SIMPLE_TYPE(VFIOIOMMUFDContainer, VFIO_IOMMU_IOMMUFD);
-
-typedef struct VFIODeviceOps VFIODeviceOps;
-
-typedef struct VFIODevice {
- QLIST_ENTRY(VFIODevice) next;
- QLIST_ENTRY(VFIODevice) container_next;
- QLIST_ENTRY(VFIODevice) global_next;
- struct VFIOGroup *group;
- VFIOContainerBase *bcontainer;
- char *sysfsdev;
- char *name;
- DeviceState *dev;
- int fd;
- int type;
- bool reset_works;
- bool needs_reset;
- bool no_mmap;
- bool ram_block_discard_allowed;
- OnOffAuto enable_migration;
- bool migration_events;
- VFIODeviceOps *ops;
- unsigned int num_irqs;
- unsigned int num_regions;
- unsigned int flags;
- VFIOMigration *migration;
- Error *migration_blocker;
- OnOffAuto pre_copy_dirty_page_tracking;
- bool dirty_pages_supported;
- bool dirty_tracking;
- HostIOMMUDevice *hiod;
- int devid;
- IOMMUFDBackend *iommufd;
-} VFIODevice;
-
-struct VFIODeviceOps {
- void (*vfio_compute_needs_reset)(VFIODevice *vdev);
- int (*vfio_hot_reset_multi)(VFIODevice *vdev);
- void (*vfio_eoi)(VFIODevice *vdev);
- Object *(*vfio_get_object)(VFIODevice *vdev);
-
- /**
- * @vfio_save_config
- *
- * Save device config state
- *
- * @vdev: #VFIODevice for which to save the config
- * @f: #QEMUFile where to send the data
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * Returns zero to indicate success and negative for error
- */
- int (*vfio_save_config)(VFIODevice *vdev, QEMUFile *f, Error **errp);
-
- /**
- * @vfio_load_config
- *
- * Load device config state
- *
- * @vdev: #VFIODevice for which to load the config
- * @f: #QEMUFile where to get the data
- *
- * Returns zero to indicate success and negative for error
- */
- int (*vfio_load_config)(VFIODevice *vdev, QEMUFile *f);
-};
-
-typedef struct VFIOGroup {
- int fd;
- int groupid;
- VFIOContainer *container;
- QLIST_HEAD(, VFIODevice) device_list;
- QLIST_ENTRY(VFIOGroup) next;
- QLIST_ENTRY(VFIOGroup) container_next;
- bool ram_block_discard_allowed;
-} VFIOGroup;
-
-#define TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO TYPE_HOST_IOMMU_DEVICE "-legacy-vfio"
-#define TYPE_HOST_IOMMU_DEVICE_IOMMUFD_VFIO \
- TYPE_HOST_IOMMU_DEVICE_IOMMUFD "-vfio"
-
-typedef struct VFIODMABuf {
- QemuDmaBuf *buf;
- uint32_t pos_x, pos_y, pos_updates;
- uint32_t hot_x, hot_y, hot_updates;
- int dmabuf_id;
- QTAILQ_ENTRY(VFIODMABuf) next;
-} VFIODMABuf;
-
-typedef struct VFIODisplay {
- QemuConsole *con;
- RAMFBState *ramfb;
- struct vfio_region_info *edid_info;
- struct vfio_region_gfx_edid *edid_regs;
- uint8_t *edid_blob;
- QEMUTimer *edid_link_timer;
- struct {
- VFIORegion buffer;
- DisplaySurface *surface;
- } region;
- struct {
- QTAILQ_HEAD(, VFIODMABuf) bufs;
- VFIODMABuf *primary;
- VFIODMABuf *cursor;
- } dmabuf;
-} VFIODisplay;
-
-VFIOAddressSpace *vfio_get_address_space(AddressSpace *as);
-void vfio_put_address_space(VFIOAddressSpace *space);
-void vfio_address_space_insert(VFIOAddressSpace *space,
- VFIOContainerBase *bcontainer);
-
-void vfio_disable_irqindex(VFIODevice *vbasedev, int index);
-void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index);
-void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index);
-bool vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
- int action, int fd, Error **errp);
-void vfio_region_write(void *opaque, hwaddr addr,
- uint64_t data, unsigned size);
-uint64_t vfio_region_read(void *opaque,
- hwaddr addr, unsigned size);
-int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
- int index, const char *name);
-int vfio_region_mmap(VFIORegion *region);
-void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled);
-void vfio_region_unmap(VFIORegion *region);
-void vfio_region_exit(VFIORegion *region);
-void vfio_region_finalize(VFIORegion *region);
-void vfio_reset_handler(void *opaque);
-struct vfio_device_info *vfio_get_device_info(int fd);
-bool vfio_attach_device(char *name, VFIODevice *vbasedev,
- AddressSpace *as, Error **errp);
-void vfio_detach_device(VFIODevice *vbasedev);
-
-int vfio_kvm_device_add_fd(int fd, Error **errp);
-int vfio_kvm_device_del_fd(int fd, Error **errp);
-
-bool vfio_cpr_register_container(VFIOContainerBase *bcontainer, Error **errp);
-void vfio_cpr_unregister_container(VFIOContainerBase *bcontainer);
-
-extern const MemoryRegionOps vfio_region_ops;
-typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList;
-typedef QLIST_HEAD(VFIODeviceList, VFIODevice) VFIODeviceList;
-extern VFIOGroupList vfio_group_list;
-extern VFIODeviceList vfio_device_list;
-extern const MemoryListener vfio_memory_listener;
-extern int vfio_kvm_device_fd;
-
-bool vfio_mig_active(void);
-int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp);
-void vfio_unblock_multiple_devices_migration(void);
-bool vfio_viommu_preset(VFIODevice *vbasedev);
-int64_t vfio_mig_bytes_transferred(void);
-void vfio_reset_bytes_transferred(void);
-bool vfio_device_state_is_running(VFIODevice *vbasedev);
-bool vfio_device_state_is_precopy(VFIODevice *vbasedev);
-
-#ifdef CONFIG_LINUX
-int vfio_get_region_info(VFIODevice *vbasedev, int index,
- struct vfio_region_info **info);
-int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
- uint32_t subtype, struct vfio_region_info **info);
-bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type);
-struct vfio_info_cap_header *
-vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id);
-bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
- unsigned int *avail);
-struct vfio_info_cap_header *
-vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id);
-struct vfio_info_cap_header *
-vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id);
-#endif
-
-bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp);
-void vfio_migration_exit(VFIODevice *vbasedev);
-
-int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size);
-bool
-vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer);
-bool
-vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer);
-int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
- VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp);
-int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova,
- uint64_t size, ram_addr_t ram_addr, Error **errp);
-
-/* Returns 0 on success, or a negative errno. */
-bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp);
-void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp);
-void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops,
- DeviceState *dev, bool ram_discard);
-int vfio_device_get_aw_bits(VFIODevice *vdev);
-#endif /* HW_VFIO_VFIO_COMMON_H */
diff --git a/include/hw/vfio/vfio-container-base.h b/include/hw/vfio/vfio-container-base.h
index 62a8b60..3cd86ec 100644
--- a/include/hw/vfio/vfio-container-base.h
+++ b/include/hw/vfio/vfio-container-base.h
@@ -13,7 +13,7 @@
#ifndef HW_VFIO_VFIO_CONTAINER_BASE_H
#define HW_VFIO_VFIO_CONTAINER_BASE_H
-#include "exec/memory.h"
+#include "system/memory.h"
typedef struct VFIODevice VFIODevice;
typedef struct VFIOIOMMUClass VFIOIOMMUClass;
@@ -44,6 +44,7 @@ typedef struct VFIOContainerBase {
unsigned long pgsizes;
unsigned int dma_max_mappings;
bool dirty_pages_supported;
+ bool dirty_pages_started; /* Protected by BQL */
QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
QLIST_HEAD(, VFIORamDiscardListener) vrdl_list;
QLIST_ENTRY(VFIOContainerBase) next;
@@ -70,12 +71,17 @@ typedef struct VFIORamDiscardListener {
QLIST_ENTRY(VFIORamDiscardListener) next;
} VFIORamDiscardListener;
+VFIOAddressSpace *vfio_address_space_get(AddressSpace *as);
+void vfio_address_space_put(VFIOAddressSpace *space);
+void vfio_address_space_insert(VFIOAddressSpace *space,
+ VFIOContainerBase *bcontainer);
+
int vfio_container_dma_map(VFIOContainerBase *bcontainer,
hwaddr iova, ram_addr_t size,
- void *vaddr, bool readonly);
+ void *vaddr, bool readonly, MemoryRegion *mr);
int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
hwaddr iova, ram_addr_t size,
- IOMMUTLBEntry *iotlb);
+ IOMMUTLBEntry *iotlb, bool unmap_all);
bool vfio_container_add_section_window(VFIOContainerBase *bcontainer,
MemoryRegionSection *section,
Error **errp);
@@ -83,8 +89,12 @@ void vfio_container_del_section_window(VFIOContainerBase *bcontainer,
MemoryRegionSection *section);
int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
bool start, Error **errp);
+bool vfio_container_dirty_tracking_is_started(
+ const VFIOContainerBase *bcontainer);
+bool vfio_container_devices_dirty_tracking_is_supported(
+ const VFIOContainerBase *bcontainer);
int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
- VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp);
+ uint64_t iova, uint64_t size, ram_addr_t ram_addr, Error **errp);
GList *vfio_container_get_iova_ranges(const VFIOContainerBase *bcontainer);
@@ -99,25 +109,106 @@ vfio_container_get_page_size_mask(const VFIOContainerBase *bcontainer)
#define TYPE_VFIO_IOMMU_LEGACY TYPE_VFIO_IOMMU "-legacy"
#define TYPE_VFIO_IOMMU_SPAPR TYPE_VFIO_IOMMU "-spapr"
#define TYPE_VFIO_IOMMU_IOMMUFD TYPE_VFIO_IOMMU "-iommufd"
+#define TYPE_VFIO_IOMMU_USER TYPE_VFIO_IOMMU "-user"
OBJECT_DECLARE_TYPE(VFIOContainerBase, VFIOIOMMUClass, VFIO_IOMMU)
struct VFIOIOMMUClass {
ObjectClass parent_class;
- /* Properties */
- const char *hiod_typename;
-
- /* basic feature */
+ /**
+ * @setup
+ *
+ * Perform basic setup of the container, including configuring IOMMU
+ * capabilities, IOVA ranges, supported page sizes, etc.
+ *
+ * @bcontainer: #VFIOContainerBase
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Returns true to indicate success and false for error.
+ */
bool (*setup)(VFIOContainerBase *bcontainer, Error **errp);
+
+ /**
+ * @listener_begin
+ *
+ * Called at the beginning of an address space update transaction.
+ * See #MemoryListener.
+ *
+ * @bcontainer: #VFIOContainerBase
+ */
+ void (*listener_begin)(VFIOContainerBase *bcontainer);
+
+ /**
+ * @listener_commit
+ *
+ * Called at the end of an address space update transaction,
+ * See #MemoryListener.
+ *
+ * @bcontainer: #VFIOContainerBase
+ */
+ void (*listener_commit)(VFIOContainerBase *bcontainer);
+
+ /**
+ * @dma_map
+ *
+ * Map an address range into the container. Note that the memory region is
+ * referenced within an RCU read lock region across this call.
+ *
+ * @bcontainer: #VFIOContainerBase to use
+ * @iova: start address to map
+ * @size: size of the range to map
+ * @vaddr: process virtual address of mapping
+ * @readonly: true if mapping should be readonly
+ * @mr: the memory region for this mapping
+ *
+ * Returns 0 to indicate success and -errno otherwise.
+ */
int (*dma_map)(const VFIOContainerBase *bcontainer,
hwaddr iova, ram_addr_t size,
- void *vaddr, bool readonly);
+ void *vaddr, bool readonly, MemoryRegion *mr);
+ /**
+ * @dma_unmap
+ *
+ * Unmap an address range from the container.
+ *
+ * @bcontainer: #VFIOContainerBase to use for unmap
+ * @iova: start address to unmap
+ * @size: size of the range to unmap
+ * @iotlb: The IOMMU TLB mapping entry (or NULL)
+ * @unmap_all: if set, unmap the entire address space
+ *
+ * Returns 0 to indicate success and -errno otherwise.
+ */
int (*dma_unmap)(const VFIOContainerBase *bcontainer,
hwaddr iova, ram_addr_t size,
- IOMMUTLBEntry *iotlb);
+ IOMMUTLBEntry *iotlb, bool unmap_all);
+
+
+ /**
+ * @attach_device
+ *
+ * Associate the given device with a container and do some related
+ * initialization of the device context.
+ *
+ * @name: name of the device
+ * @vbasedev: the device
+ * @as: address space to use
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Returns true to indicate success and false for error.
+ */
bool (*attach_device)(const char *name, VFIODevice *vbasedev,
AddressSpace *as, Error **errp);
+
+ /*
+ * @detach_device
+ *
+ * Detach the given device from its container and clean up any necessary
+ * state.
+ *
+ * @vbasedev: the device to disassociate
+ */
void (*detach_device)(VFIODevice *vbasedev);
/* migration feature */
@@ -132,7 +223,7 @@ struct VFIOIOMMUClass {
* @start: indicates whether to start or stop dirty pages tracking
* @errp: pointer to Error*, to store an error if it happens.
*
- * Returns zero to indicate success and negative for error
+ * Returns zero to indicate success and negative for error.
*/
int (*set_dirty_page_tracking)(const VFIOContainerBase *bcontainer,
bool start, Error **errp);
@@ -147,7 +238,7 @@ struct VFIOIOMMUClass {
* @size: size of iova range
* @errp: pointer to Error*, to store an error if it happens.
*
- * Returns zero to indicate success and negative for error
+ * Returns zero to indicate success and negative for error.
*/
int (*query_dirty_bitmap)(const VFIOContainerBase *bcontainer,
VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp);
@@ -162,4 +253,11 @@ struct VFIOIOMMUClass {
MemoryRegionSection *section);
void (*release)(VFIOContainerBase *bcontainer);
};
+
+VFIORamDiscardListener *vfio_find_ram_discard_listener(
+ VFIOContainerBase *bcontainer, MemoryRegionSection *section);
+
+void vfio_container_region_add(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section, bool cpr_remap);
+
#endif /* HW_VFIO_VFIO_CONTAINER_BASE_H */
diff --git a/include/hw/vfio/vfio-container.h b/include/hw/vfio/vfio-container.h
new file mode 100644
index 0000000..21e5807
--- /dev/null
+++ b/include/hw/vfio/vfio-container.h
@@ -0,0 +1,38 @@
+/*
+ * VFIO container
+ *
+ * Copyright Red Hat, Inc. 2025
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VFIO_CONTAINER_H
+#define HW_VFIO_CONTAINER_H
+
+#include "hw/vfio/vfio-container-base.h"
+#include "hw/vfio/vfio-cpr.h"
+
+typedef struct VFIOContainer VFIOContainer;
+typedef struct VFIODevice VFIODevice;
+
+typedef struct VFIOGroup {
+ int fd;
+ int groupid;
+ VFIOContainer *container;
+ QLIST_HEAD(, VFIODevice) device_list;
+ QLIST_ENTRY(VFIOGroup) next;
+ QLIST_ENTRY(VFIOGroup) container_next;
+ bool ram_block_discard_allowed;
+} VFIOGroup;
+
+typedef struct VFIOContainer {
+ VFIOContainerBase bcontainer;
+ int fd; /* /dev/vfio/vfio, empowered by the attached groups */
+ unsigned iommu_type;
+ QLIST_HEAD(, VFIOGroup) group_list;
+ VFIOContainerCPR cpr;
+} VFIOContainer;
+
+OBJECT_DECLARE_SIMPLE_TYPE(VFIOContainer, VFIO_IOMMU_LEGACY);
+
+#endif /* HW_VFIO_CONTAINER_H */
diff --git a/include/hw/vfio/vfio-cpr.h b/include/hw/vfio/vfio-cpr.h
new file mode 100644
index 0000000..8bf85b9
--- /dev/null
+++ b/include/hw/vfio/vfio-cpr.h
@@ -0,0 +1,57 @@
+/*
+ * VFIO CPR
+ *
+ * Copyright (c) 2025 Oracle and/or its affiliates.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VFIO_VFIO_CPR_H
+#define HW_VFIO_VFIO_CPR_H
+
+#include "migration/misc.h"
+#include "system/memory.h"
+
+struct VFIOContainer;
+struct VFIOContainerBase;
+struct VFIOGroup;
+
+typedef struct VFIOContainerCPR {
+ Error *blocker;
+ bool vaddr_unmapped;
+ NotifierWithReturn transfer_notifier;
+ MemoryListener remap_listener;
+ int (*saved_dma_map)(const struct VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ void *vaddr, bool readonly, MemoryRegion *mr);
+} VFIOContainerCPR;
+
+typedef struct VFIODeviceCPR {
+ Error *mdev_blocker;
+} VFIODeviceCPR;
+
+bool vfio_legacy_cpr_register_container(struct VFIOContainer *container,
+ Error **errp);
+void vfio_legacy_cpr_unregister_container(struct VFIOContainer *container);
+
+int vfio_cpr_reboot_notifier(NotifierWithReturn *notifier, MigrationEvent *e,
+ Error **errp);
+
+bool vfio_cpr_register_container(struct VFIOContainerBase *bcontainer,
+ Error **errp);
+void vfio_cpr_unregister_container(struct VFIOContainerBase *bcontainer);
+
+int vfio_cpr_group_get_device_fd(int d, const char *name);
+
+bool vfio_cpr_container_match(struct VFIOContainer *container,
+ struct VFIOGroup *group, int fd);
+
+void vfio_cpr_giommu_remap(struct VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section);
+
+bool vfio_cpr_ram_discard_register_listener(
+ struct VFIOContainerBase *bcontainer, MemoryRegionSection *section);
+
+extern const VMStateDescription vfio_cpr_pci_vmstate;
+
+#endif /* HW_VFIO_VFIO_CPR_H */
diff --git a/include/hw/vfio/vfio-device.h b/include/hw/vfio/vfio-device.h
new file mode 100644
index 0000000..c616652
--- /dev/null
+++ b/include/hw/vfio/vfio-device.h
@@ -0,0 +1,286 @@
+/*
+ * VFIO Device interface
+ *
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Authors:
+ * Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Based on qemu-kvm device-assignment:
+ * Adapted for KVM by Qumranet.
+ * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
+ * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
+ * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
+ * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
+ * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
+ */
+
+#ifndef HW_VFIO_VFIO_COMMON_H
+#define HW_VFIO_VFIO_COMMON_H
+
+#include "system/memory.h"
+#include "qemu/queue.h"
+#ifdef CONFIG_LINUX
+#include <linux/vfio.h>
+#endif
+#include "system/system.h"
+#include "hw/vfio/vfio-container-base.h"
+#include "hw/vfio/vfio-cpr.h"
+#include "system/host_iommu_device.h"
+#include "system/iommufd.h"
+
+#define VFIO_MSG_PREFIX "vfio %s: "
+
+enum {
+ VFIO_DEVICE_TYPE_PCI = 0,
+ VFIO_DEVICE_TYPE_PLATFORM = 1,
+ VFIO_DEVICE_TYPE_CCW = 2,
+ VFIO_DEVICE_TYPE_AP = 3,
+};
+
+typedef struct VFIODeviceOps VFIODeviceOps;
+typedef struct VFIODeviceIOOps VFIODeviceIOOps;
+typedef struct VFIOMigration VFIOMigration;
+
+typedef struct IOMMUFDBackend IOMMUFDBackend;
+typedef struct VFIOIOASHwpt VFIOIOASHwpt;
+typedef struct VFIOUserProxy VFIOUserProxy;
+
+typedef struct VFIODevice {
+ QLIST_ENTRY(VFIODevice) next;
+ QLIST_ENTRY(VFIODevice) container_next;
+ QLIST_ENTRY(VFIODevice) global_next;
+ struct VFIOGroup *group;
+ VFIOContainerBase *bcontainer;
+ char *sysfsdev;
+ char *name;
+ DeviceState *dev;
+ int fd;
+ int type;
+ bool mdev;
+ bool reset_works;
+ bool needs_reset;
+ bool no_mmap;
+ bool ram_block_discard_allowed;
+ OnOffAuto enable_migration;
+ OnOffAuto migration_multifd_transfer;
+ bool migration_events;
+ bool use_region_fds;
+ VFIODeviceOps *ops;
+ VFIODeviceIOOps *io_ops;
+ unsigned int num_irqs;
+ unsigned int num_regions;
+ unsigned int flags;
+ VFIOMigration *migration;
+ Error *migration_blocker;
+ OnOffAuto pre_copy_dirty_page_tracking;
+ OnOffAuto device_dirty_page_tracking;
+ bool dirty_pages_supported;
+ bool dirty_tracking; /* Protected by BQL */
+ bool iommu_dirty_tracking;
+ HostIOMMUDevice *hiod;
+ int devid;
+ IOMMUFDBackend *iommufd;
+ VFIOIOASHwpt *hwpt;
+ QLIST_ENTRY(VFIODevice) hwpt_next;
+ struct vfio_region_info **reginfo;
+ int *region_fds;
+ VFIODeviceCPR cpr;
+ VFIOUserProxy *proxy;
+} VFIODevice;
+
+struct VFIODeviceOps {
+ void (*vfio_compute_needs_reset)(VFIODevice *vdev);
+ int (*vfio_hot_reset_multi)(VFIODevice *vdev);
+ void (*vfio_eoi)(VFIODevice *vdev);
+ Object *(*vfio_get_object)(VFIODevice *vdev);
+
+ /**
+ * @vfio_save_config
+ *
+ * Save device config state
+ *
+ * @vdev: #VFIODevice for which to save the config
+ * @f: #QEMUFile where to send the data
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Returns zero to indicate success and negative for error
+ */
+ int (*vfio_save_config)(VFIODevice *vdev, QEMUFile *f, Error **errp);
+
+ /**
+ * @vfio_load_config
+ *
+ * Load device config state
+ *
+ * @vdev: #VFIODevice for which to load the config
+ * @f: #QEMUFile where to get the data
+ *
+ * Returns zero to indicate success and negative for error
+ */
+ int (*vfio_load_config)(VFIODevice *vdev, QEMUFile *f);
+};
+
+/*
+ * Given a return value of either a short number of bytes read or -errno,
+ * construct a meaningful error message.
+ */
+#define strreaderror(ret) \
+ (ret < 0 ? strerror(-ret) : "short read")
+
+/*
+ * Given a return value of either a short number of bytes written or -errno,
+ * construct a meaningful error message.
+ */
+#define strwriteerror(ret) \
+ (ret < 0 ? strerror(-ret) : "short write")
+
+void vfio_device_irq_disable(VFIODevice *vbasedev, int index);
+void vfio_device_irq_unmask(VFIODevice *vbasedev, int index);
+void vfio_device_irq_mask(VFIODevice *vbasedev, int index);
+bool vfio_device_irq_set_signaling(VFIODevice *vbasedev, int index, int subindex,
+ int action, int fd, Error **errp);
+
+void vfio_device_reset_handler(void *opaque);
+bool vfio_device_is_mdev(VFIODevice *vbasedev);
+bool vfio_device_hiod_create_and_realize(VFIODevice *vbasedev,
+ const char *typename, Error **errp);
+bool vfio_device_attach(char *name, VFIODevice *vbasedev,
+ AddressSpace *as, Error **errp);
+bool vfio_device_attach_by_iommu_type(const char *iommu_type, char *name,
+ VFIODevice *vbasedev, AddressSpace *as,
+ Error **errp);
+void vfio_device_detach(VFIODevice *vbasedev);
+VFIODevice *vfio_get_vfio_device(Object *obj);
+
+typedef QLIST_HEAD(VFIODeviceList, VFIODevice) VFIODeviceList;
+extern VFIODeviceList vfio_device_list;
+
+#ifdef CONFIG_LINUX
+/*
+ * How devices communicate with the server. The default option is through
+ * ioctl() to the kernel VFIO driver, but vfio-user can use a socket to a remote
+ * process.
+ */
+struct VFIODeviceIOOps {
+ /**
+ * @device_feature
+ *
+ * Fill in feature info for the given device.
+ *
+ * @vdev: #VFIODevice to use
+ * @feat: feature information to fill in
+ *
+ * Returns 0 on success or -errno.
+ */
+ int (*device_feature)(VFIODevice *vdev, struct vfio_device_feature *feat);
+
+ /**
+ * @get_region_info
+ *
+ * Get the information for a given region on the device.
+ *
+ * @vdev: #VFIODevice to use
+ * @info: set @info->index to the region index to look up; the rest of the
+ * struct will be filled in on success
+ * @fd: pointer to the fd for the region; will be -1 if not found
+ *
+ * Returns 0 on success or -errno.
+ */
+ int (*get_region_info)(VFIODevice *vdev,
+ struct vfio_region_info *info, int *fd);
+
+ /**
+ * @get_irq_info
+ *
+ * @vdev: #VFIODevice to use
+ * @irq: set @irq->index to the IRQ index to look up; the rest of the struct
+ * will be filled in on success
+ *
+ * Returns 0 on success or -errno.
+ */
+ int (*get_irq_info)(VFIODevice *vdev, struct vfio_irq_info *irq);
+
+ /**
+ * @set_irqs
+ *
+ * Configure IRQs.
+ *
+ * @vdev: #VFIODevice to use
+ * @irqs: IRQ configuration as defined by VFIO docs.
+ *
+ * Returns 0 on success or -errno.
+ */
+ int (*set_irqs)(VFIODevice *vdev, struct vfio_irq_set *irqs);
+
+ /**
+ * @region_read
+ *
+ * Read part of a region.
+ *
+ * @vdev: #VFIODevice to use
+ * @nr: region index
+ * @off: offset within the region
+ * @size: size in bytes to read
+ * @data: buffer to read into
+ *
+ * Returns number of bytes read on success or -errno.
+ */
+ int (*region_read)(VFIODevice *vdev, uint8_t nr, off_t off, uint32_t size,
+ void *data);
+
+ /**
+ * @region_write
+ *
+ * Write part of a region.
+ *
+ * @vdev: #VFIODevice to use
+ * @nr: region index
+ * @off: offset within the region
+ * @size: size in bytes to write
+ * @data: buffer to write from
+ * @post: true if this is a posted write
+ *
+ * Returns number of bytes write on success or -errno.
+ */
+ int (*region_write)(VFIODevice *vdev, uint8_t nr, off_t off, uint32_t size,
+ void *data, bool post);
+};
+
+void vfio_device_prepare(VFIODevice *vbasedev, VFIOContainerBase *bcontainer,
+ struct vfio_device_info *info);
+
+void vfio_device_unprepare(VFIODevice *vbasedev);
+
+int vfio_device_get_region_info(VFIODevice *vbasedev, int index,
+ struct vfio_region_info **info);
+int vfio_device_get_region_info_type(VFIODevice *vbasedev, uint32_t type,
+ uint32_t subtype, struct vfio_region_info **info);
+
+/**
+ * Return the fd for mapping this region. This is either the device's fd (for
+ * e.g. kernel vfio), or a per-region fd (for vfio-user).
+ *
+ * @vbasedev: #VFIODevice to use
+ * @index: region index
+ *
+ * Returns the fd.
+ */
+int vfio_device_get_region_fd(VFIODevice *vbasedev, int index);
+
+bool vfio_device_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type);
+
+int vfio_device_get_irq_info(VFIODevice *vbasedev, int index,
+ struct vfio_irq_info *info);
+#endif
+
+/* Returns 0 on success, or a negative errno. */
+bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp);
+void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp);
+void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops,
+ DeviceState *dev, bool ram_discard);
+int vfio_device_get_aw_bits(VFIODevice *vdev);
+#endif /* HW_VFIO_VFIO_COMMON_H */
diff --git a/include/hw/vfio/vfio-migration.h b/include/hw/vfio/vfio-migration.h
new file mode 100644
index 0000000..0d4ecd3
--- /dev/null
+++ b/include/hw/vfio/vfio-migration.h
@@ -0,0 +1,16 @@
+/*
+ * VFIO migration interface
+ *
+ * Copyright Red Hat, Inc. 2025
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VFIO_VFIO_MIGRATION_H
+#define HW_VFIO_VFIO_MIGRATION_H
+
+bool vfio_migration_active(void);
+int64_t vfio_migration_bytes_transferred(void);
+void vfio_migration_reset_bytes_transferred(void);
+
+#endif /* HW_VFIO_VFIO_MIGRATION_H */
diff --git a/include/hw/vfio/vfio-platform.h b/include/hw/vfio/vfio-platform.h
index c414c3d..256d850 100644
--- a/include/hw/vfio/vfio-platform.h
+++ b/include/hw/vfio/vfio-platform.h
@@ -17,7 +17,7 @@
#define HW_VFIO_VFIO_PLATFORM_H
#include "hw/sysbus.h"
-#include "hw/vfio/vfio-common.h"
+#include "hw/vfio/vfio-device.h"
#include "qemu/event_notifier.h"
#include "qemu/queue.h"
#include "qom/object.h"
@@ -47,6 +47,8 @@ typedef struct VFIOINTp {
/* function type for user side eventfd handler */
typedef void (*eventfd_user_side_handler_t)(VFIOINTp *intp);
+typedef struct VFIORegion VFIORegion;
+
struct VFIOPlatformDevice {
SysBusDevice sbdev;
VFIODevice vbasedev; /* not a QOM object */
diff --git a/include/hw/vfio/vfio-region.h b/include/hw/vfio/vfio-region.h
new file mode 100644
index 0000000..ede6e0c
--- /dev/null
+++ b/include/hw/vfio/vfio-region.h
@@ -0,0 +1,48 @@
+/*
+ * VFIO region
+ *
+ * Copyright Red Hat, Inc. 2025
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VFIO_REGION_H
+#define HW_VFIO_REGION_H
+
+#include "system/memory.h"
+
+typedef struct VFIOMmap {
+ MemoryRegion mem;
+ void *mmap;
+ off_t offset;
+ size_t size;
+} VFIOMmap;
+
+typedef struct VFIODevice VFIODevice;
+
+typedef struct VFIORegion {
+ struct VFIODevice *vbasedev;
+ off_t fd_offset; /* offset of region within device fd */
+ MemoryRegion *mem; /* slow, read/write access */
+ size_t size;
+ uint32_t flags; /* VFIO region flags (rd/wr/mmap) */
+ uint32_t nr_mmaps;
+ VFIOMmap *mmaps;
+ uint8_t nr; /* cache the region number for debug */
+ bool post_wr; /* writes can be posted */
+} VFIORegion;
+
+
+void vfio_region_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size);
+uint64_t vfio_region_read(void *opaque,
+ hwaddr addr, unsigned size);
+int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
+ int index, const char *name);
+int vfio_region_mmap(VFIORegion *region);
+void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled);
+void vfio_region_unmap(VFIORegion *region);
+void vfio_region_exit(VFIORegion *region);
+void vfio_region_finalize(VFIORegion *region);
+
+#endif /* HW_VFIO_REGION_H */
diff --git a/include/hw/virtio/cbor-helpers.h b/include/hw/virtio/cbor-helpers.h
new file mode 100644
index 0000000..f25fd48
--- /dev/null
+++ b/include/hw/virtio/cbor-helpers.h
@@ -0,0 +1,45 @@
+/*
+ * QEMU CBOR helpers
+ *
+ * Copyright (c) 2024 Dorjoy Chowdhury <dorjoychy111@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef QEMU_VIRTIO_CBOR_HELPERS_H
+#define QEMU_VIRTIO_CBOR_HELPERS_H
+
+#include <cbor.h>
+
+bool qemu_cbor_map_add(cbor_item_t *map, cbor_item_t *key, cbor_item_t *value);
+
+bool qemu_cbor_array_push(cbor_item_t *array, cbor_item_t *value);
+
+bool qemu_cbor_add_bool_to_map(cbor_item_t *map, const char *key, bool value);
+
+bool qemu_cbor_add_uint8_to_map(cbor_item_t *map, const char *key,
+ uint8_t value);
+
+bool qemu_cbor_add_map_to_map(cbor_item_t *map, const char *key,
+ size_t nested_map_size,
+ cbor_item_t **nested_map);
+
+bool qemu_cbor_add_bytestring_to_map(cbor_item_t *map, const char *key,
+ uint8_t *arr, size_t len);
+
+bool qemu_cbor_add_null_to_map(cbor_item_t *map, const char *key);
+
+bool qemu_cbor_add_string_to_map(cbor_item_t *map, const char *key,
+ const char *value);
+
+bool qemu_cbor_add_uint8_array_to_map(cbor_item_t *map, const char *key,
+ uint8_t *arr, size_t len);
+
+bool qemu_cbor_add_uint8_key_bytestring_to_map(cbor_item_t *map, uint8_t key,
+ uint8_t *buf, size_t len);
+
+bool qemu_cbor_add_uint64_to_map(cbor_item_t *map, const char *key,
+ uint64_t value);
+#endif
diff --git a/include/hw/virtio/iothread-vq-mapping.h b/include/hw/virtio/iothread-vq-mapping.h
new file mode 100644
index 0000000..57335c3
--- /dev/null
+++ b/include/hw/virtio/iothread-vq-mapping.h
@@ -0,0 +1,45 @@
+/*
+ * IOThread Virtqueue Mapping
+ *
+ * Copyright Red Hat, Inc
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ */
+
+#ifndef HW_VIRTIO_IOTHREAD_VQ_MAPPING_H
+#define HW_VIRTIO_IOTHREAD_VQ_MAPPING_H
+
+#include "qapi/error.h"
+#include "qapi/qapi-types-virtio.h"
+
+/**
+ * iothread_vq_mapping_apply:
+ * @list: The mapping of virtqueues to IOThreads.
+ * @vq_aio_context: The array of AioContext pointers to fill in.
+ * @num_queues: The length of @vq_aio_context.
+ * @errp: If an error occurs, a pointer to the area to store the error.
+ *
+ * Fill in the AioContext for each virtqueue in the @vq_aio_context array given
+ * the iothread-vq-mapping parameter in @list.
+ *
+ * iothread_vq_mapping_cleanup() must be called to free IOThread object
+ * references after this function returns success.
+ *
+ * Returns: %true on success, %false on failure.
+ **/
+bool iothread_vq_mapping_apply(
+ IOThreadVirtQueueMappingList *list,
+ AioContext **vq_aio_context,
+ uint16_t num_queues,
+ Error **errp);
+
+/**
+ * iothread_vq_mapping_cleanup:
+ * @list: The mapping of virtqueues to IOThreads.
+ *
+ * Release IOThread object references that were acquired by
+ * iothread_vq_mapping_apply().
+ */
+void iothread_vq_mapping_cleanup(IOThreadVirtQueueMappingList *list);
+
+#endif /* HW_VIRTIO_IOTHREAD_VQ_MAPPING_H */
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 70c2e8f..d6df209 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -11,7 +11,7 @@
#ifndef VHOST_BACKEND_H
#define VHOST_BACKEND_H
-#include "exec/memory.h"
+#include "system/memory.h"
typedef enum VhostBackendType {
VHOST_BACKEND_TYPE_NONE = 0,
diff --git a/include/hw/virtio/vhost-scsi-common.h b/include/hw/virtio/vhost-scsi-common.h
index c5d2c09..d54d9c9 100644
--- a/include/hw/virtio/vhost-scsi-common.h
+++ b/include/hw/virtio/vhost-scsi-common.h
@@ -40,7 +40,7 @@ struct VHostSCSICommon {
};
int vhost_scsi_common_start(VHostSCSICommon *vsc, Error **errp);
-void vhost_scsi_common_stop(VHostSCSICommon *vsc);
+int vhost_scsi_common_stop(VHostSCSICommon *vsc);
char *vhost_scsi_common_get_fw_dev_path(FWPathProvider *p, BusState *bus,
DeviceState *dev);
void vhost_scsi_common_set_config(VirtIODevice *vdev, const uint8_t *config);
diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
index 324cd86..9a3f238 100644
--- a/include/hw/virtio/vhost-user.h
+++ b/include/hw/virtio/vhost-user.h
@@ -54,6 +54,7 @@ typedef struct VhostUserHostNotifier {
void *addr;
void *unmap_addr;
int idx;
+ bool destroy;
} VhostUserHostNotifier;
/**
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 0a9575b..449bf5c 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -43,7 +43,21 @@ typedef struct vhost_vdpa_shared {
struct vhost_vdpa_iova_range iova_range;
QLIST_HEAD(, vdpa_iommu) iommu_list;
- /* IOVA mapping used by the Shadow Virtqueue */
+ /*
+ * IOVA mapping used by the Shadow Virtqueue
+ *
+ * It is shared among all ASID for simplicity, whether CVQ shares ASID with
+ * guest or not:
+ * - Memory listener need access to guest's memory addresses allocated in
+ * the IOVA tree.
+ * - There should be plenty of IOVA address space for both ASID not to
+ * worry about collisions between them. Guest's translations are still
+ * validated with virtio virtqueue_pop so there is no risk for the guest
+ * to access memory that it shouldn't.
+ *
+ * To allocate a iova tree per ASID is doable but it complicates the code
+ * and it is not worth it for the moment.
+ */
VhostIOVATree *iova_tree;
/* Copy of backend features */
@@ -51,6 +65,12 @@ typedef struct vhost_vdpa_shared {
bool iotlb_batch_begin_sent;
+ /*
+ * The memory listener has been registered, so DMA maps have been sent to
+ * the device.
+ */
+ bool listener_registered;
+
/* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
bool shadow_data;
diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
index 75a74e8..01bf606 100644
--- a/include/hw/virtio/vhost-vsock-common.h
+++ b/include/hw/virtio/vhost-vsock-common.h
@@ -42,7 +42,7 @@ struct VHostVSockCommon {
};
int vhost_vsock_common_start(VirtIODevice *vdev);
-void vhost_vsock_common_stop(VirtIODevice *vdev);
+int vhost_vsock_common_stop(VirtIODevice *vdev);
int vhost_vsock_common_pre_save(void *opaque);
int vhost_vsock_common_post_load(void *opaque, int version_id);
void vhost_vsock_common_realize(VirtIODevice *vdev);
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index d75faf4..38800a7 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -3,7 +3,7 @@
#include "hw/virtio/vhost-backend.h"
#include "hw/virtio/virtio.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#define VHOST_F_DEVICE_IOTLB 63
#define VHOST_USER_F_PROTOCOL_FEATURES 30
@@ -171,6 +171,10 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
*/
void vhost_dev_cleanup(struct vhost_dev *hdev);
+void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev,
+ VirtIODevice *vdev,
+ unsigned int nvqs);
+
/**
* vhost_dev_enable_notifiers() - enable event notifiers
* @hdev: common vhost_dev structure
@@ -228,8 +232,10 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
* Stop the vhost device. After the device is stopped the notifiers
* can be disabled (@vhost_dev_disable_notifiers) and the device can
* be torn down (@vhost_dev_cleanup).
+ *
+ * Return: 0 on success, != 0 on error when stopping dev.
*/
-void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
+int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
/**
* DOC: vhost device configuration handling
@@ -329,13 +335,11 @@ int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write);
int vhost_virtqueue_start(struct vhost_dev *dev, struct VirtIODevice *vdev,
struct vhost_virtqueue *vq, unsigned idx);
-void vhost_virtqueue_stop(struct vhost_dev *dev, struct VirtIODevice *vdev,
- struct vhost_virtqueue *vq, unsigned idx);
+int vhost_virtqueue_stop(struct vhost_dev *dev, struct VirtIODevice *vdev,
+ struct vhost_virtqueue *vq, unsigned idx);
void vhost_dev_reset_inflight(struct vhost_inflight *inflight);
void vhost_dev_free_inflight(struct vhost_inflight *inflight);
-void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f);
-int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f);
int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev);
int vhost_dev_set_inflight(struct vhost_dev *dev,
struct vhost_inflight *inflight);
@@ -363,7 +367,14 @@ static inline int vhost_reset_device(struct vhost_dev *hdev)
* Returns true if the device supports these commands, and false if it
* does not.
*/
+#ifdef CONFIG_VHOST
bool vhost_supports_device_state(struct vhost_dev *dev);
+#else
+static inline bool vhost_supports_device_state(struct vhost_dev *dev)
+{
+ return false;
+}
+#endif
/**
* vhost_set_device_state_fd(): Begin transfer of internal state from/to
@@ -446,7 +457,15 @@ int vhost_check_device_state(struct vhost_dev *dev, Error **errp);
*
* Returns 0 on success, and -errno otherwise.
*/
+#ifdef CONFIG_VHOST
int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp);
+#else
+static inline int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f,
+ Error **errp)
+{
+ return -ENOSYS;
+}
+#endif
/**
* vhost_load_backend_state(): High-level function to load a vhost
@@ -463,6 +482,14 @@ int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp);
*
* Returns 0 on success, and -errno otherwise.
*/
+#ifdef CONFIG_VHOST
int vhost_load_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp);
+#else
+static inline int vhost_load_backend_state(struct vhost_dev *dev, QEMUFile *f,
+ Error **errp)
+{
+ return -ENOSYS;
+}
+#endif
#endif
diff --git a/include/hw/virtio/virtio-acpi.h b/include/hw/virtio/virtio-acpi.h
index cace2a3..cdfbd94 100644
--- a/include/hw/virtio/virtio-acpi.h
+++ b/include/hw/virtio/virtio-acpi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* ACPI support for virtio
*/
diff --git a/include/hw/virtio/virtio-balloon.h b/include/hw/virtio/virtio-balloon.h
index 5139cf8..0456c21 100644
--- a/include/hw/virtio/virtio-balloon.h
+++ b/include/hw/virtio/virtio-balloon.h
@@ -16,8 +16,9 @@
#define QEMU_VIRTIO_BALLOON_H
#include "standard-headers/linux/virtio_balloon.h"
+#include "hw/resettable.h"
#include "hw/virtio/virtio.h"
-#include "sysemu/iothread.h"
+#include "system/iothread.h"
#include "qom/object.h"
#define TYPE_VIRTIO_BALLOON "virtio-balloon-device"
@@ -71,6 +72,9 @@ struct VirtIOBalloon {
bool qemu_4_0_config_size;
uint32_t poison_val;
+
+ /* State of the resettable container */
+ ResettableState reset_state;
};
#endif
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index 5c14110..3d8dee7 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -17,14 +17,14 @@
#include "standard-headers/linux/virtio_blk.h"
#include "hw/virtio/virtio.h"
#include "hw/block/block.h"
-#include "sysemu/iothread.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/block-ram-registrar.h"
+#include "system/iothread.h"
+#include "system/block-backend.h"
+#include "system/block-ram-registrar.h"
#include "qom/object.h"
#include "qapi/qapi-types-virtio.h"
#define TYPE_VIRTIO_BLK "virtio-blk-device"
-OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlock, VIRTIO_BLK)
+OBJECT_DECLARE_TYPE(VirtIOBlock, VirtIOBlkClass, VIRTIO_BLK)
/* This is the last element of the write scatter-gather list */
struct virtio_blk_inhdr
@@ -100,6 +100,15 @@ typedef struct MultiReqBuffer {
bool is_write;
} MultiReqBuffer;
+typedef struct VirtIOBlkClass {
+ /*< private >*/
+ VirtioDeviceClass parent;
+ /*< public >*/
+ bool (*handle_unknown_request)(VirtIOBlockReq *req, MultiReqBuffer *mrb,
+ uint32_t type);
+} VirtIOBlkClass;
+
void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
+void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status);
#endif
diff --git a/include/hw/virtio/virtio-crypto.h b/include/hw/virtio/virtio-crypto.h
index 348749f..2d56513 100644
--- a/include/hw/virtio/virtio-crypto.h
+++ b/include/hw/virtio/virtio-crypto.h
@@ -16,8 +16,8 @@
#include "standard-headers/linux/virtio_crypto.h"
#include "hw/virtio/virtio.h"
-#include "sysemu/iothread.h"
-#include "sysemu/cryptodev.h"
+#include "system/iothread.h"
+#include "system/cryptodev.h"
#include "qom/object.h"
diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
index 7a59379..a42957c 100644
--- a/include/hw/virtio/virtio-gpu.h
+++ b/include/hw/virtio/virtio-gpu.h
@@ -19,7 +19,7 @@
#include "ui/console.h"
#include "hw/virtio/virtio.h"
#include "qemu/log.h"
-#include "sysemu/vhost-user-backend.h"
+#include "system/vhost-user-backend.h"
#include "standard-headers/linux/virtio_gpu.h"
#include "standard-headers/linux/virtio_ids.h"
@@ -51,9 +51,7 @@ struct virtio_gpu_simple_resource {
unsigned int iov_cnt;
uint32_t scanout_bitmask;
pixman_image_t *image;
-#ifdef WIN32
- HANDLE handle;
-#endif
+ qemu_pixman_shareable share_handle;
uint64_t hostmem;
uint64_t blob_size;
@@ -99,6 +97,8 @@ enum virtio_gpu_base_conf_flags {
VIRTIO_GPU_FLAG_BLOB_ENABLED,
VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED,
VIRTIO_GPU_FLAG_RUTABAGA_ENABLED,
+ VIRTIO_GPU_FLAG_VENUS_ENABLED,
+ VIRTIO_GPU_FLAG_RESOURCE_UUID_ENABLED,
};
#define virtio_gpu_virgl_enabled(_cfg) \
@@ -115,8 +115,12 @@ enum virtio_gpu_base_conf_flags {
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED))
#define virtio_gpu_rutabaga_enabled(_cfg) \
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_RUTABAGA_ENABLED))
+#define virtio_gpu_resource_uuid_enabled(_cfg) \
+ (_cfg.flags & (1 << VIRTIO_GPU_FLAG_RESOURCE_UUID_ENABLED))
#define virtio_gpu_hostmem_enabled(_cfg) \
(_cfg.hostmem > 0)
+#define virtio_gpu_venus_enabled(_cfg) \
+ (_cfg.flags & (1 << VIRTIO_GPU_FLAG_VENUS_ENABLED))
struct virtio_gpu_base_conf {
uint32_t max_outputs;
@@ -196,8 +200,6 @@ struct VirtIOGPU {
uint64_t hostmem;
bool processing_cmdq;
- QEMUTimer *fence_poll;
- QEMUTimer *print_stats;
uint32_t inflight;
struct {
@@ -211,6 +213,8 @@ struct VirtIOGPU {
QTAILQ_HEAD(, VGPUDMABuf) bufs;
VGPUDMABuf *primary[VIRTIO_GPU_MAX_SCANOUTS];
} dmabuf;
+
+ GArray *capset_ids;
};
struct VirtIOGPUClass {
@@ -226,11 +230,23 @@ struct VirtIOGPUClass {
Error **errp);
};
+/* VirtIOGPUGL renderer states */
+typedef enum {
+ RS_START, /* starting state */
+ RS_INIT_FAILED, /* failed initialisation */
+ RS_INITED, /* initialised and working */
+ RS_RESET, /* inited and reset pending, moves to start after reset */
+} RenderState;
+
struct VirtIOGPUGL {
struct VirtIOGPU parent_obj;
- bool renderer_inited;
- bool renderer_reset;
+ RenderState renderer_state;
+
+ QEMUTimer *fence_poll;
+ QEMUTimer *print_stats;
+
+ QEMUBH *cmdq_resume_bh;
};
struct VhostUserGPU {
@@ -320,6 +336,21 @@ void virtio_gpu_update_cursor_data(VirtIOGPU *g,
struct virtio_gpu_scanout *s,
uint32_t resource_id);
+/**
+ * virtio_gpu_scanout_blob_to_fb() - fill out fb based on scanout data
+ * fb: the frame-buffer descriptor to fill out
+ * ss: the scanout blob data
+ * blob_size: size of scanout blob data
+ *
+ * This will check we have enough space for the frame taking into
+ * account that stride.
+ *
+ * Returns true on success, otherwise logs guest error and returns false
+ */
+bool virtio_gpu_scanout_blob_to_fb(struct virtio_gpu_framebuffer *fb,
+ struct virtio_gpu_set_scanout_blob *ss,
+ uint64_t blob_size);
+
/* virtio-gpu-udmabuf.c */
bool virtio_gpu_have_udmabuf(void);
void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res);
@@ -330,6 +361,13 @@ int virtio_gpu_update_dmabuf(VirtIOGPU *g,
struct virtio_gpu_framebuffer *fb,
struct virtio_gpu_rect *r);
+void virtio_gpu_update_scanout(VirtIOGPU *g,
+ uint32_t scanout_id,
+ struct virtio_gpu_simple_resource *res,
+ struct virtio_gpu_framebuffer *fb,
+ struct virtio_gpu_rect *r);
+void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id);
+
/* virtio-gpu-3d.c */
void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd);
@@ -337,6 +375,6 @@ void virtio_gpu_virgl_fence_poll(VirtIOGPU *g);
void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g);
void virtio_gpu_virgl_reset(VirtIOGPU *g);
int virtio_gpu_virgl_init(VirtIOGPU *g);
-int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g);
+GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g);
#endif
diff --git a/include/hw/virtio/virtio-input.h b/include/hw/virtio/virtio-input.h
index e69c0ae..e097b0b 100644
--- a/include/hw/virtio/virtio-input.h
+++ b/include/hw/virtio/virtio-input.h
@@ -4,7 +4,7 @@
#include "hw/virtio/vhost-user.h"
#include "hw/virtio/vhost-user-base.h"
#include "ui/input.h"
-#include "sysemu/vhost-user-backend.h"
+#include "system/vhost-user-backend.h"
/* ----------------------------------------------------------------- */
/* virtio input protocol */
diff --git a/include/hw/virtio/virtio-iommu.h b/include/hw/virtio/virtio-iommu.h
index bdb3da7..3b86050 100644
--- a/include/hw/virtio/virtio-iommu.h
+++ b/include/hw/virtio/virtio-iommu.h
@@ -25,7 +25,7 @@
#include "hw/pci/pci.h"
#include "qom/object.h"
#include "qapi/qapi-types-virtio.h"
-#include "sysemu/host_iommu_device.h"
+#include "system/host_iommu_device.h"
#define TYPE_VIRTIO_IOMMU "virtio-iommu-device"
#define TYPE_VIRTIO_IOMMU_PCI "virtio-iommu-pci"
@@ -43,7 +43,6 @@ typedef struct IOMMUDevice {
MemoryRegion bypass_mr; /* The alias of shared memory MR */
GList *resv_regions;
GList *host_resv_ranges;
- bool probe_done;
} IOMMUDevice;
typedef struct IOMMUPciBus {
diff --git a/include/hw/virtio/virtio-mem.h b/include/hw/virtio/virtio-mem.h
index 5f5b02b..e0ab31b 100644
--- a/include/hw/virtio/virtio-mem.h
+++ b/include/hw/virtio/virtio-mem.h
@@ -14,9 +14,10 @@
#define HW_VIRTIO_MEM_H
#include "standard-headers/linux/virtio_mem.h"
+#include "hw/resettable.h"
#include "hw/virtio/virtio.h"
#include "qapi/qapi-types-misc.h"
-#include "sysemu/hostmem.h"
+#include "system/hostmem.h"
#include "qom/object.h"
#define TYPE_VIRTIO_MEM "virtio-mem"
@@ -24,6 +25,10 @@
OBJECT_DECLARE_TYPE(VirtIOMEM, VirtIOMEMClass,
VIRTIO_MEM)
+#define TYPE_VIRTIO_MEM_SYSTEM_RESET "virtio-mem-system-reset"
+
+OBJECT_DECLARE_SIMPLE_TYPE(VirtioMemSystemReset, VIRTIO_MEM_SYSTEM_RESET)
+
#define VIRTIO_MEM_MEMDEV_PROP "memdev"
#define VIRTIO_MEM_NODE_PROP "node"
#define VIRTIO_MEM_SIZE_PROP "size"
@@ -115,11 +120,21 @@ struct VirtIOMEM {
/* listeners to notify on plug/unplug activity. */
QLIST_HEAD(, RamDiscardListener) rdl_list;
+
+ /* Catch system resets -> qemu_devices_reset() only. */
+ VirtioMemSystemReset *system_reset;
+};
+
+struct VirtioMemSystemReset {
+ Object parent;
+
+ ResettableState reset_state;
+ VirtIOMEM *vmem;
};
struct VirtIOMEMClass {
/* private */
- VirtIODevice parent;
+ VirtioDeviceClass parent_class;
/* public */
void (*fill_device_info)(const VirtIOMEM *vmen, VirtioMEMDeviceInfo *vi);
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index 060c23c..b9ea9e8 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -102,7 +102,7 @@ typedef struct VirtioNetRscStat {
/* Rsc unit general info used to checking if can coalescing */
typedef struct VirtioNetRscUnit {
void *ip; /* ip header */
- uint16_t *ip_plen; /* data len pointer in ip header field */
+ void *ip_plen; /* pointer to unaligned uint16_t data len in ip header */
struct tcp_header *tcp; /* tcp header */
uint16_t tcp_hdrlen; /* tcp header len */
uint16_t payload; /* pure payload without virtio/eth/ip/tcp */
diff --git a/include/hw/virtio/virtio-nsm.h b/include/hw/virtio/virtio-nsm.h
new file mode 100644
index 0000000..57ddbbb
--- /dev/null
+++ b/include/hw/virtio/virtio-nsm.h
@@ -0,0 +1,49 @@
+/*
+ * AWS Nitro Secure Module (NSM) device
+ *
+ * Copyright (c) 2024 Dorjoy Chowdhury <dorjoychy111@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef QEMU_VIRTIO_NSM_H
+#define QEMU_VIRTIO_NSM_H
+
+#include "crypto/hash.h"
+#include "hw/virtio/virtio.h"
+#include "qom/object.h"
+
+#define NSM_MAX_PCRS 32
+
+#define TYPE_VIRTIO_NSM "virtio-nsm-device"
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIONSM, VIRTIO_NSM)
+#define VIRTIO_NSM_GET_PARENT_CLASS(obj) \
+ OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_NSM)
+
+struct PCRInfo {
+ bool locked;
+ uint8_t data[QCRYPTO_HASH_DIGEST_LEN_SHA384];
+};
+
+struct VirtIONSM {
+ VirtIODevice parent_obj;
+
+ /* Only one vq - guest puts request and response buffers on it */
+ VirtQueue *vq;
+
+ /* NSM State */
+ uint16_t max_pcrs;
+ struct PCRInfo pcrs[NSM_MAX_PCRS];
+ char *digest;
+ char *module_id;
+ uint8_t version_major;
+ uint8_t version_minor;
+ uint8_t version_patch;
+
+ bool (*extend_pcr)(VirtIONSM *vnsm, int ind, uint8_t *data, uint16_t len);
+ void (*lock_pcr)(VirtIONSM *vnsm, int ind);
+};
+
+#endif
diff --git a/include/hw/virtio/virtio-pci.h b/include/hw/virtio/virtio-pci.h
index 9e67ba3..eab5394 100644
--- a/include/hw/virtio/virtio-pci.h
+++ b/include/hw/virtio/virtio-pci.h
@@ -32,9 +32,7 @@ DECLARE_OBJ_CHECKERS(VirtioPCIBusState, VirtioPCIBusClass,
enum {
VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT,
- VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT,
VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT,
- VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT,
VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT,
VIRTIO_PCI_FLAG_ATS_BIT,
VIRTIO_PCI_FLAG_INIT_DEVERR_BIT,
@@ -54,12 +52,6 @@ enum {
* vcpu thread using ioeventfd for some devices. */
#define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
-/* virtio version flags */
-#define VIRTIO_PCI_FLAG_DISABLE_PCIE (1 << VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT)
-
-/* migrate extra state */
-#define VIRTIO_PCI_FLAG_MIGRATE_EXTRA (1 << VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT)
-
/* have pio notification for modern device ? */
#define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY \
(1 << VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT)
@@ -147,11 +139,15 @@ struct VirtIOPCIProxy {
};
MemoryRegion modern_bar;
MemoryRegion io_bar;
+ /* address space for VirtIOPCIRegions */
+ AddressSpace modern_cfg_mem_as;
+ AddressSpace modern_cfg_io_as;
uint32_t legacy_io_bar_idx;
uint32_t msix_bar_idx;
uint32_t modern_io_bar_idx;
uint32_t modern_mem_bar_idx;
int config_cap;
+ uint16_t last_pcie_cap_offset;
uint32_t flags;
bool disable_modern;
bool ignore_backend_features;
@@ -252,8 +248,8 @@ typedef struct VirtioPCIDeviceTypeInfo {
size_t class_size;
void (*instance_init)(Object *obj);
void (*instance_finalize)(Object *obj);
- void (*class_init)(ObjectClass *klass, void *data);
- InterfaceInfo *interfaces;
+ void (*class_init)(ObjectClass *klass, const void *data);
+ const InterfaceInfo *interfaces;
} VirtioPCIDeviceTypeInfo;
/* Register virtio-pci type(s). @t must be static. */
diff --git a/include/hw/virtio/virtio-pmem.h b/include/hw/virtio/virtio-pmem.h
index fc4fd1f..9cce600 100644
--- a/include/hw/virtio/virtio-pmem.h
+++ b/include/hw/virtio/virtio-pmem.h
@@ -36,7 +36,7 @@ struct VirtIOPMEM {
struct VirtIOPMEMClass {
/* private */
- VirtIODevice parent;
+ VirtioDeviceClass parent_class;
/* public */
void (*fill_device_info)(const VirtIOPMEM *pmem, VirtioPMEMDeviceInfo *vi);
diff --git a/include/hw/virtio/virtio-rng.h b/include/hw/virtio/virtio-rng.h
index 8273425..7e6d27f 100644
--- a/include/hw/virtio/virtio-rng.h
+++ b/include/hw/virtio/virtio-rng.h
@@ -13,7 +13,7 @@
#define QEMU_VIRTIO_RNG_H
#include "hw/virtio/virtio.h"
-#include "sysemu/rng.h"
+#include "system/rng.h"
#include "standard-headers/linux/virtio_rng.h"
#include "qom/object.h"
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
index 7be0105..31e852e 100644
--- a/include/hw/virtio/virtio-scsi.h
+++ b/include/hw/virtio/virtio-scsi.h
@@ -22,7 +22,8 @@
#include "hw/virtio/virtio.h"
#include "hw/scsi/scsi.h"
#include "chardev/char-fe.h"
-#include "sysemu/iothread.h"
+#include "qapi/qapi-types-virtio.h"
+#include "system/iothread.h"
#define TYPE_VIRTIO_SCSI_COMMON "virtio-scsi-common"
OBJECT_DECLARE_SIMPLE_TYPE(VirtIOSCSICommon, VIRTIO_SCSI_COMMON)
@@ -60,6 +61,7 @@ struct VirtIOSCSIConf {
CharBackend chardev;
uint32_t boot_tpgt;
IOThread *iothread;
+ IOThreadVirtQueueMappingList *iothread_vq_mapping_list;
};
struct VirtIOSCSI;
@@ -82,18 +84,14 @@ struct VirtIOSCSI {
SCSIBus bus;
int resetting; /* written from main loop thread, read from any thread */
+
+ QemuMutex event_lock; /* protects event_vq and events_dropped */
bool events_dropped;
- /*
- * TMFs deferred to main loop BH. These fields are protected by
- * tmf_bh_lock.
- */
- QemuMutex tmf_bh_lock;
- QEMUBH *tmf_bh;
- QTAILQ_HEAD(, VirtIOSCSIReq) tmf_bh_list;
+ QemuMutex ctrl_lock; /* protects ctrl_vq */
/* Fields for dataplane below */
- AioContext *ctx; /* one iothread per virtio-scsi-pci for now */
+ AioContext **vq_aio_context; /* per-virtqueue AioContext pointer */
bool dataplane_started;
bool dataplane_starting;
@@ -111,6 +109,7 @@ void virtio_scsi_common_realize(DeviceState *dev,
void virtio_scsi_common_unrealize(DeviceState *dev);
void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp);
+void virtio_scsi_dataplane_cleanup(VirtIOSCSI *s);
int virtio_scsi_dataplane_start(VirtIODevice *s);
void virtio_scsi_dataplane_stop(VirtIODevice *s);
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 7512afb..214d4a7 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -14,7 +14,7 @@
#ifndef QEMU_VIRTIO_H
#define QEMU_VIRTIO_H
-#include "exec/memory.h"
+#include "system/memory.h"
#include "hw/qdev-core.h"
#include "net/net.h"
#include "migration/vmstate.h"
@@ -69,6 +69,8 @@ typedef struct VirtQueueElement
unsigned int ndescs;
unsigned int out_num;
unsigned int in_num;
+ /* Element has been processed (VIRTIO_F_IN_ORDER) */
+ bool in_order_filled;
hwaddr *in_addr;
hwaddr *out_addr;
struct iovec *in_sg;
@@ -184,7 +186,7 @@ struct VirtioDeviceClass {
void (*get_config)(VirtIODevice *vdev, uint8_t *config);
void (*set_config)(VirtIODevice *vdev, const uint8_t *config);
void (*reset)(VirtIODevice *vdev);
- void (*set_status)(VirtIODevice *vdev, uint8_t val);
+ int (*set_status)(VirtIODevice *vdev, uint8_t val);
/* Device must validate queue_index. */
void (*queue_reset)(VirtIODevice *vdev, uint32_t queue_index);
/* Device must validate queue_index. */
@@ -208,6 +210,8 @@ struct VirtioDeviceClass {
void (*guest_notifier_mask)(VirtIODevice *vdev, int n, bool mask);
int (*start_ioeventfd)(VirtIODevice *vdev);
void (*stop_ioeventfd)(VirtIODevice *vdev);
+ /* Called before loading queues. Useful to add queues before loading. */
+ int (*pre_load_queues)(VirtIODevice *vdev);
/* Saving and loading of a device; trying to deprecate save/load
* use vmsd for new devices.
*/
@@ -221,6 +225,7 @@ struct VirtioDeviceClass {
int (*post_load)(VirtIODevice *vdev);
const VMStateDescription *vmsd;
bool (*primary_unplug_pending)(void *opaque);
+ /* May be called even when vdev->vhost_started is false */
struct vhost_dev *(*get_vhost)(VirtIODevice *vdev);
void (*toggle_device_iotlb)(VirtIODevice *vdev);
};
@@ -271,9 +276,13 @@ void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f,
VirtQueueElement *elem);
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
unsigned int out_bytes);
-void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
- unsigned int *out_bytes,
- unsigned max_in_bytes, unsigned max_out_bytes);
+/**
+ * Return <0 on error or an opaque >=0 to pass to
+ * virtio_queue_enable_notification_and_check on success.
+ */
+int virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
+ unsigned int *out_bytes, unsigned max_in_bytes,
+ unsigned max_out_bytes);
void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq);
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq);
@@ -307,6 +316,15 @@ int virtio_queue_ready(VirtQueue *vq);
int virtio_queue_empty(VirtQueue *vq);
+/**
+ * Enable notification and check whether guest has added some
+ * buffers since last call to virtqueue_get_avail_bytes.
+ *
+ * @opaque: value returned from virtqueue_get_avail_bytes
+ */
+bool virtio_queue_enable_notification_and_check(VirtQueue *vq,
+ int opaque);
+
void virtio_queue_set_shadow_avail_idx(VirtQueue *vq, uint16_t idx);
/* Host binding interface. */
@@ -371,7 +389,9 @@ typedef struct VirtIORNGConf VirtIORNGConf;
DEFINE_PROP_BIT64("packed", _state, _field, \
VIRTIO_F_RING_PACKED, false), \
DEFINE_PROP_BIT64("queue_reset", _state, _field, \
- VIRTIO_F_RING_RESET, true)
+ VIRTIO_F_RING_RESET, true), \
+ DEFINE_PROP_BIT64("in_order", _state, _field, \
+ VIRTIO_F_IN_ORDER, false)
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n);
diff --git a/include/hw/vmapple/vmapple.h b/include/hw/vmapple/vmapple.h
new file mode 100644
index 0000000..9c1ad1b
--- /dev/null
+++ b/include/hw/vmapple/vmapple.h
@@ -0,0 +1,23 @@
+/*
+ * Devices specific to the VMApple machine type
+ *
+ * Copyright Ā© 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VMAPPLE_VMAPPLE_H
+#define HW_VMAPPLE_VMAPPLE_H
+
+#define TYPE_APPLE_AES "apple-aes"
+
+#define TYPE_VMAPPLE_BDIF "vmapple-bdif"
+
+#define TYPE_VMAPPLE_CFG "vmapple-cfg"
+
+#define TYPE_VMAPPLE_VIRTIO_BLK_PCI "vmapple-virtio-blk-pci"
+
+#endif /* HW_VMAPPLE_VMAPPLE_H */
diff --git a/include/hw/xen/arch_hvm.h b/include/hw/xen/arch_hvm.h
index c7c5152..df39c81 100644
--- a/include/hw/xen/arch_hvm.h
+++ b/include/hw/xen/arch_hvm.h
@@ -1,5 +1,5 @@
#if defined(TARGET_I386) || defined(TARGET_X86_64)
#include "hw/i386/xen_arch_hvm.h"
-#elif defined(TARGET_ARM) || defined(TARGET_ARM_64)
+#elif defined(TARGET_ARM) || defined(TARGET_AARCH64)
#include "hw/arm/xen_arch_hvm.h"
#endif
diff --git a/include/hw/xen/interface/io/blkif.h b/include/hw/xen/interface/io/blkif.h
index 22f1eef..c552799 100644
--- a/include/hw/xen/interface/io/blkif.h
+++ b/include/hw/xen/interface/io/blkif.h
@@ -324,7 +324,7 @@
* access (even when it should be read-only). If the frontend hits the
* maximum number of allowed persistently mapped grants, it can fallback
* to non persistent mode. This will cause a performance degradation,
- * since the the backend driver will still try to map those grants
+ * since the backend driver will still try to map those grants
* persistently. Since the persistent grants protocol is compatible with
* the previous protocol, a frontend driver can choose to work in
* persistent mode even when the backend doesn't support it.
diff --git a/include/hw/xen/xen-block.h b/include/hw/xen/xen-block.h
index d692ea7..449a7f7 100644
--- a/include/hw/xen/xen-block.h
+++ b/include/hw/xen/xen-block.h
@@ -11,7 +11,7 @@
#include "hw/xen/xen-bus.h"
#include "hw/block/block.h"
#include "hw/block/dataplane/xen-block.h"
-#include "sysemu/iothread.h"
+#include "system/iothread.h"
#include "qom/object.h"
typedef enum XenBlockVdevType {
diff --git a/include/hw/xen/xen-bus-helper.h b/include/hw/xen/xen-bus-helper.h
index d8dcc2f..e991111 100644
--- a/include/hw/xen/xen-bus-helper.h
+++ b/include/hw/xen/xen-bus-helper.h
@@ -38,6 +38,15 @@ int xs_node_scanf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *fmt, ...)
G_GNUC_SCANF(6, 7);
+/*
+ * Unlike other functions here, the printf-formatted path_fmt is for
+ * the XenStore path, not the contents of the node.
+ */
+char *xs_node_read(struct qemu_xs_handle *h, xs_transaction_t tid,
+ unsigned int *len, Error **errp,
+ const char *path_fmt, ...)
+ G_GNUC_PRINTF(5, 6);
+
/* Watch node/key unless node is empty, in which case watch key */
struct qemu_xs_watch *xs_node_watch(struct qemu_xs_handle *h, const char *node,
const char *key, xs_watch_fn fn,
diff --git a/include/hw/xen/xen-bus.h b/include/hw/xen/xen-bus.h
index 38d40af..bdbf1ed 100644
--- a/include/hw/xen/xen-bus.h
+++ b/include/hw/xen/xen-bus.h
@@ -8,9 +8,10 @@
#ifndef HW_XEN_BUS_H
#define HW_XEN_BUS_H
+#include "hw/qdev-core.h"
#include "hw/xen/xen_backend_ops.h"
-#include "hw/sysbus.h"
#include "qemu/notify.h"
+#include "qemu/queue.h"
#include "qom/object.h"
typedef struct XenEventChannel XenEventChannel;
@@ -91,6 +92,7 @@ void xen_device_frontend_printf(XenDevice *xendev, const char *key,
int xen_device_frontend_scanf(XenDevice *xendev, const char *key,
const char *fmt, ...)
G_GNUC_SCANF(3, 4);
+char *xen_device_frontend_read(XenDevice *xendev, const char *key);
void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs,
Error **errp);
diff --git a/include/hw/xen/xen-hvm-common.h b/include/hw/xen/xen-hvm-common.h
index 3d79623..19df560 100644
--- a/include/hw/xen/xen-hvm-common.h
+++ b/include/hw/xen/xen-hvm-common.h
@@ -1,18 +1,10 @@
#ifndef HW_XEN_HVM_COMMON_H
#define HW_XEN_HVM_COMMON_H
-#include "qemu/units.h"
-
-#include "cpu.h"
-#include "hw/pci/pci.h"
-#include "hw/hw.h"
+#include "qemu/queue.h"
+#include "exec/hwaddr.h"
#include "hw/xen/xen_native.h"
-#include "hw/xen/xen-legacy-backend.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/xen.h"
-#include "sysemu/xen-mapcache.h"
-#include "qemu/error-report.h"
+#include "hw/xen/xen_backend_ops.h"
#include <xen/hvm/ioreq.h>
extern MemoryRegion xen_memory;
@@ -81,6 +73,8 @@ typedef struct XenIOState {
QLIST_HEAD(, XenPciDevice) dev_list;
DeviceListener device_listener;
+ bool has_bufioreq;
+
Notifier exit;
} XenIOState;
@@ -95,6 +89,7 @@ void xen_device_unrealize(DeviceListener *listener, DeviceState *dev);
void xen_hvm_change_state_handler(void *opaque, bool running, RunState rstate);
void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
+ uint8_t handle_bufioreq,
const MemoryListener *xen_memory_listener);
void cpu_ioreq_pio(ioreq_t *req);
diff --git a/include/hw/xen/xen-legacy-backend.h b/include/hw/xen/xen-legacy-backend.h
index 943732b..2d0cbfe 100644
--- a/include/hw/xen/xen-legacy-backend.h
+++ b/include/hw/xen/xen-legacy-backend.h
@@ -3,7 +3,6 @@
#include "hw/xen/xen_backend_ops.h"
#include "hw/xen/xen_pvdev.h"
-#include "net/net.h"
#include "qom/object.h"
#define TYPE_XENSYSDEV "xen-sysdev"
@@ -50,10 +49,6 @@ void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs,
void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr,
uint32_t *refs, unsigned int nr_refs);
-int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev,
- bool to_domain, XenGrantCopySegment segs[],
- unsigned int nr_segs);
-
static inline void *xen_be_map_grant_ref(struct XenLegacyDevice *xendev,
uint32_t ref, int prot)
{
@@ -70,6 +65,5 @@ static inline void xen_be_unmap_grant_ref(struct XenLegacyDevice *xendev,
void xen_config_cleanup(void);
int xen_config_dev_vfb(int vdev, const char *type);
int xen_config_dev_vkbd(int vdev);
-int xen_config_dev_console(int vdev);
#endif /* HW_XEN_LEGACY_BACKEND_H */
diff --git a/include/hw/xen/xen-pvh-common.h b/include/hw/xen/xen-pvh-common.h
new file mode 100644
index 0000000..5db83d8
--- /dev/null
+++ b/include/hw/xen/xen-pvh-common.h
@@ -0,0 +1,91 @@
+/*
+ * QEMU Xen PVH machine - common code.
+ *
+ * Copyright (c) 2024 Advanced Micro Devices, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef XEN_PVH_COMMON_H__
+#define XEN_PVH_COMMON_H__
+
+#include "system/memory.h"
+#include "qom/object.h"
+#include "hw/boards.h"
+#include "hw/pci-host/gpex.h"
+#include "hw/xen/xen-hvm-common.h"
+
+#define TYPE_XEN_PVH_MACHINE MACHINE_TYPE_NAME("xen-pvh-base")
+OBJECT_DECLARE_TYPE(XenPVHMachineState, XenPVHMachineClass,
+ XEN_PVH_MACHINE)
+
+struct XenPVHMachineClass {
+ MachineClass parent;
+
+ /* PVH implementation specific init. */
+ void (*init)(MachineState *state);
+
+ /*
+ * set_pci_intx_irq - Deliver INTX irqs to the guest.
+ *
+ * @opaque: pointer to XenPVHMachineState.
+ * @irq: IRQ after swizzling, between 0-3.
+ * @level: IRQ level.
+ */
+ void (*set_pci_intx_irq)(void *opaque, int irq, int level);
+
+ /*
+ * set_pci_link_route: - optional implementation call to setup
+ * routing between INTX IRQ (0 - 3) and GSI's.
+ *
+ * @line: line the INTx line (0 => A .. 3 => B)
+ * @irq: GSI
+ */
+ int (*set_pci_link_route)(uint8_t line, uint8_t irq);
+
+ /* Allow implementations to optionally enable buffered ioreqs. */
+ uint8_t handle_bufioreq;
+
+ /*
+ * Each implementation can optionally enable features that it
+ * supports and are known to work.
+ */
+ bool has_pci;
+ bool has_tpm;
+ bool has_virtio_mmio;
+};
+
+struct XenPVHMachineState {
+ /*< private >*/
+ MachineState parent;
+
+ XenIOState ioreq;
+
+ struct {
+ MemoryRegion low;
+ MemoryRegion high;
+ } ram;
+
+ struct {
+ GPEXHost gpex;
+ MemoryRegion mmio_alias;
+ MemoryRegion mmio_high_alias;
+ } pci;
+
+ struct {
+ MemMapEntry ram_low, ram_high;
+ MemMapEntry tpm;
+
+ /* Virtio-mmio */
+ MemMapEntry virtio_mmio;
+ uint32_t virtio_mmio_num;
+ uint32_t virtio_mmio_irq_base;
+
+ /* PCI */
+ MemMapEntry pci_ecam, pci_mmio, pci_mmio_high;
+ uint32_t pci_intx_irq_base;
+ } cfg;
+};
+
+void xen_pvh_class_setup_common_props(XenPVHMachineClass *xpc);
+#endif
diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
index ecb89ec..e94c6e5 100644
--- a/include/hw/xen/xen.h
+++ b/include/hw/xen/xen.h
@@ -24,8 +24,6 @@
#define __XEN_INTERFACE_VERSION__ 0x00040e00
#endif
-#include "exec/cpu-common.h"
-
/* xen-machine.c */
enum xen_mode {
XEN_DISABLED = 0, /* xen support disabled (default) */
diff --git a/include/hw/xen/xen_native.h b/include/hw/xen/xen_native.h
index 1a5ad69..5caf91a 100644
--- a/include/hw/xen/xen_native.h
+++ b/include/hw/xen/xen_native.h
@@ -464,10 +464,11 @@ static inline void xen_unmap_pcidev(domid_t dom,
}
static inline int xen_create_ioreq_server(domid_t dom,
+ int handle_bufioreq,
ioservid_t *ioservid)
{
int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
- HVM_IOREQSRV_BUFIOREQ_ATOMIC,
+ handle_bufioreq,
ioservid);
if (rc == 0) {
diff --git a/include/hw/xen/xen_pvdev.h b/include/hw/xen/xen_pvdev.h
index 0c98444..629bec9 100644
--- a/include/hw/xen/xen_pvdev.h
+++ b/include/hw/xen/xen_pvdev.h
@@ -1,7 +1,7 @@
#ifndef QEMU_HW_XEN_PVDEV_H
#define QEMU_HW_XEN_PVDEV_H
-#include "hw/qdev-core.h"
+#include "hw/sysbus.h"
#include "hw/xen/xen_backend_ops.h"
/* ------------------------------------------------------------- */
@@ -32,7 +32,8 @@ struct XenDevOps {
};
struct XenLegacyDevice {
- DeviceState qdev;
+ SysBusDevice parent_obj;
+
const char *type;
int dom;
int dev;
diff --git a/include/hw/xtensa/mx_pic.h b/include/hw/xtensa/mx_pic.h
index 500424c..cd316d8 100644
--- a/include/hw/xtensa/mx_pic.h
+++ b/include/hw/xtensa/mx_pic.h
@@ -28,7 +28,7 @@
#ifndef XTENSA_MX_PIC_H
#define XTENSA_MX_PIC_H
-#include "exec/memory.h"
+#include "system/memory.h"
struct XtensaMxPic;
typedef struct XtensaMxPic XtensaMxPic;
diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h
index ab15577..a88cf8b 100644
--- a/include/io/channel-socket.h
+++ b/include/io/channel-socket.h
@@ -261,5 +261,18 @@ QIOChannelSocket *
qio_channel_socket_accept(QIOChannelSocket *ioc,
Error **errp);
+/**
+ * qio_channel_socket_set_send_buffer:
+ * @ioc: the socket channel object
+ * @size: buffer size
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Set the underlying socket send buffer size.
+ *
+ * Retruns: 0 on success, or -1 on error.
+ */
+int qio_channel_socket_set_send_buffer(QIOChannelSocket *ioc,
+ size_t size,
+ Error **errp);
#endif /* QIO_CHANNEL_SOCKET_H */
diff --git a/include/io/channel-tls.h b/include/io/channel-tls.h
index 26c67f1..7e90235 100644
--- a/include/io/channel-tls.h
+++ b/include/io/channel-tls.h
@@ -49,9 +49,21 @@ struct QIOChannelTLS {
QCryptoTLSSession *session;
QIOChannelShutdown shutdown;
guint hs_ioc_tag;
+ guint bye_ioc_tag;
};
/**
+ * qio_channel_tls_bye:
+ * @ioc: the TLS channel object
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Perform the TLS session termination. This method will return
+ * immediately and the termination will continue in the background,
+ * provided the main loop is running.
+ */
+void qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp);
+
+/**
* qio_channel_tls_new_server:
* @master: the underlying channel object
* @creds: the credentials to use for TLS handshake
diff --git a/include/io/channel.h b/include/io/channel.h
index 7986c49..62b6571 100644
--- a/include/io/channel.h
+++ b/include/io/channel.h
@@ -35,6 +35,7 @@ OBJECT_DECLARE_TYPE(QIOChannel, QIOChannelClass,
#define QIO_CHANNEL_WRITE_FLAG_ZERO_COPY 0x1
#define QIO_CHANNEL_READ_FLAG_MSG_PEEK 0x1
+#define QIO_CHANNEL_READ_FLAG_RELAXED_EOF 0x2
typedef enum QIOChannelFeature QIOChannelFeature;
@@ -160,6 +161,9 @@ struct QIOChannelClass {
void *opaque);
int (*io_flush)(QIOChannel *ioc,
Error **errp);
+ int (*io_peerpid)(QIOChannel *ioc,
+ unsigned int *pid,
+ Error **errp);
};
/* General I/O handling functions */
@@ -882,6 +886,7 @@ void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
* @niov: the length of the @iov array
* @fds: an array of file handles to read
* @nfds: number of file handles in @fds
+ * @flags: read flags (QIO_CHANNEL_READ_FLAG_*)
* @errp: pointer to a NULL-initialized error object
*
*
@@ -900,6 +905,7 @@ int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int **fds, size_t *nfds,
+ int flags,
Error **errp);
/**
@@ -981,4 +987,22 @@ int coroutine_mixed_fn qio_channel_writev_full_all(QIOChannel *ioc,
int qio_channel_flush(QIOChannel *ioc,
Error **errp);
+/**
+ * qio_channel_get_peercred:
+ * @ioc: the channel object
+ * @pid: pointer to pid
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Returns the pid of the peer process connected to this socket.
+ *
+ * The use of this function is possible only for connected
+ * AF_UNIX stream sockets and for AF_UNIX stream and datagram
+ * socket pairs on Linux.
+ * Return -1 on error with pid -1 for the non-Linux OS.
+ *
+ */
+int qio_channel_get_peerpid(QIOChannel *ioc,
+ unsigned int *pid,
+ Error **errp);
+
#endif /* QIO_CHANNEL_H */
diff --git a/include/libdecnumber/dconfig.h b/include/libdecnumber/dconfig.h
index 2bc0ba7f..e67ecc1 100644
--- a/include/libdecnumber/dconfig.h
+++ b/include/libdecnumber/dconfig.h
@@ -23,9 +23,8 @@
for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING. If not, see
+ <https://www.gnu.org/licenses/>. */
#if HOST_BIG_ENDIAN
diff --git a/include/libdecnumber/decContext.h b/include/libdecnumber/decContext.h
index cea6e42..5bb64e1 100644
--- a/include/libdecnumber/decContext.h
+++ b/include/libdecnumber/decContext.h
@@ -24,9 +24,8 @@
for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING. If not, see
+ <https://www.gnu.org/licenses/>. */
/* ------------------------------------------------------------------ */
/* Decimal Context module header */
diff --git a/include/libdecnumber/decDPD.h b/include/libdecnumber/decDPD.h
index 26a21ec..8eb4552 100644
--- a/include/libdecnumber/decDPD.h
+++ b/include/libdecnumber/decDPD.h
@@ -24,9 +24,8 @@
for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING. If not, see
+ <https://www.gnu.org/licenses/>. */
/* ------------------------------------------------------------------------ */
/* Binary Coded Decimal and Densely Packed Decimal conversion lookup tables */
diff --git a/include/libdecnumber/decNumber.h b/include/libdecnumber/decNumber.h
index 41bc2a0..bf37af8 100644
--- a/include/libdecnumber/decNumber.h
+++ b/include/libdecnumber/decNumber.h
@@ -24,9 +24,8 @@
for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING. If not, see
+ <https://www.gnu.org/licenses/>. */
/* ------------------------------------------------------------------ */
/* Decimal Number arithmetic module header */
diff --git a/include/libdecnumber/decNumberLocal.h b/include/libdecnumber/decNumberLocal.h
index 6198ca8..0959f66 100644
--- a/include/libdecnumber/decNumberLocal.h
+++ b/include/libdecnumber/decNumberLocal.h
@@ -24,9 +24,8 @@
for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING. If not, see
+ <https://www.gnu.org/licenses/>. */
/* ------------------------------------------------------------------ */
/* decNumber package local type, tuning, and macro definitions */
diff --git a/include/libdecnumber/dpd/decimal128.h b/include/libdecnumber/dpd/decimal128.h
index aff261e..c57180b 100644
--- a/include/libdecnumber/dpd/decimal128.h
+++ b/include/libdecnumber/dpd/decimal128.h
@@ -24,9 +24,8 @@
for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING. If not, see
+ <https://www.gnu.org/licenses/>. */
/* ------------------------------------------------------------------ */
/* Decimal 128-bit format module header */
diff --git a/include/libdecnumber/dpd/decimal128Local.h b/include/libdecnumber/dpd/decimal128Local.h
index 9765427..2948ab2 100644
--- a/include/libdecnumber/dpd/decimal128Local.h
+++ b/include/libdecnumber/dpd/decimal128Local.h
@@ -23,9 +23,8 @@
for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING. If not, see
+ <https://www.gnu.org/licenses/>. */
#if !defined(DECIMAL128LOCAL)
diff --git a/include/libdecnumber/dpd/decimal32.h b/include/libdecnumber/dpd/decimal32.h
index 6cb9e43..9a17933 100644
--- a/include/libdecnumber/dpd/decimal32.h
+++ b/include/libdecnumber/dpd/decimal32.h
@@ -24,9 +24,8 @@
for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING. If not, see
+ <https://www.gnu.org/licenses/>. */
/* ------------------------------------------------------------------ */
/* Decimal 32-bit format module header */
diff --git a/include/libdecnumber/dpd/decimal64.h b/include/libdecnumber/dpd/decimal64.h
index f29e570..5c3d0bb 100644
--- a/include/libdecnumber/dpd/decimal64.h
+++ b/include/libdecnumber/dpd/decimal64.h
@@ -24,9 +24,8 @@
for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING. If not, see
+ <https://www.gnu.org/licenses/>. */
/* ------------------------------------------------------------------ */
/* Decimal 64-bit format module header */
diff --git a/include/migration/client-options.h b/include/migration/client-options.h
index 59f4b55..289c9d7 100644
--- a/include/migration/client-options.h
+++ b/include/migration/client-options.h
@@ -10,6 +10,10 @@
#ifndef QEMU_MIGRATION_CLIENT_OPTIONS_H
#define QEMU_MIGRATION_CLIENT_OPTIONS_H
+
+/* properties */
+bool migrate_send_switchover_start(void);
+
/* capabilities */
bool migrate_background_snapshot(void);
diff --git a/include/migration/cpr.h b/include/migration/cpr.h
new file mode 100644
index 0000000..07858e9
--- /dev/null
+++ b/include/migration/cpr.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2021, 2024 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef MIGRATION_CPR_H
+#define MIGRATION_CPR_H
+
+#include "qapi/qapi-types-migration.h"
+
+#define MIG_MODE_NONE -1
+
+#define QEMU_CPR_FILE_MAGIC 0x51435052
+#define QEMU_CPR_FILE_VERSION 0x00000001
+
+void cpr_save_fd(const char *name, int id, int fd);
+void cpr_delete_fd(const char *name, int id);
+int cpr_find_fd(const char *name, int id);
+void cpr_resave_fd(const char *name, int id, int fd);
+int cpr_open_fd(const char *path, int flags, const char *name, int id,
+ Error **errp);
+
+MigMode cpr_get_incoming_mode(void);
+void cpr_set_incoming_mode(MigMode mode);
+bool cpr_is_incoming(void);
+
+int cpr_state_save(MigrationChannel *channel, Error **errp);
+int cpr_state_load(MigrationChannel *channel, Error **errp);
+void cpr_state_close(void);
+struct QIOChannel *cpr_state_ioc(void);
+
+bool cpr_incoming_needed(void *opaque);
+
+QEMUFile *cpr_transfer_output(MigrationChannel *channel, Error **errp);
+QEMUFile *cpr_transfer_input(MigrationChannel *channel, Error **errp);
+
+#endif
diff --git a/include/migration/misc.h b/include/migration/misc.h
index bfadc56..8fd36eb 100644
--- a/include/migration/misc.h
+++ b/include/migration/misc.h
@@ -39,25 +39,25 @@ void precopy_add_notifier(NotifierWithReturn *n);
void precopy_remove_notifier(NotifierWithReturn *n);
int precopy_notify(PrecopyNotifyReason reason, Error **errp);
-void ram_mig_init(void);
void qemu_guest_free_page_hint(void *addr, size_t len);
bool migrate_ram_is_ignored(RAMBlock *block);
/* migration/block.c */
AnnounceParameters *migrate_announce_params(void);
+
/* migration/savevm.c */
void dump_vmstate_json_to_file(FILE *out_fp);
+void qemu_loadvm_start_load_thread(MigrationLoadThread function,
+ void *opaque);
/* migration/migration.c */
void migration_object_init(void);
void migration_shutdown(void);
-bool migration_is_idle(void);
-bool migration_is_active(void);
-bool migration_is_device(void);
+
+bool migration_is_running(void);
bool migration_thread_is_self(void);
-bool migration_is_setup_or_active(void);
typedef enum MigrationEventType {
MIG_EVENT_PRECOPY_SETUP,
@@ -96,7 +96,6 @@ void migration_add_notifier_mode(NotifierWithReturn *notify,
MigrationNotifyFunc func, MigMode mode);
void migration_remove_notifier(NotifierWithReturn *notify);
-bool migration_is_running(void);
void migration_file_set_error(int ret, Error *err);
/* True if incoming migration entered POSTCOPY_INCOMING_DISCARD */
@@ -108,7 +107,37 @@ bool migration_incoming_postcopy_advised(void);
/* True if background snapshot is active */
bool migration_in_bg_snapshot(void);
-/* migration/block-dirty-bitmap.c */
-void dirty_bitmap_mig_init(void);
+/* Wrapper for block active/inactive operations */
+bool migration_block_activate(Error **errp);
+bool migration_block_inactivate(void);
+
+/* True if @uri starts with a syntactically valid URI prefix */
+bool migrate_is_uri(const char *uri);
+
+/* Parse @uri and return @channel, returning true on success */
+bool migrate_uri_parse(const char *uri, MigrationChannel **channel,
+ Error **errp);
+
+/* migration/multifd-device-state.c */
+typedef struct SaveLiveCompletePrecopyThreadData {
+ SaveLiveCompletePrecopyThreadHandler hdlr;
+ char *idstr;
+ uint32_t instance_id;
+ void *handler_opaque;
+} SaveLiveCompletePrecopyThreadData;
+
+bool multifd_queue_device_state(char *idstr, uint32_t instance_id,
+ char *data, size_t len);
+bool multifd_device_state_supported(void);
+
+void
+multifd_spawn_device_state_save_thread(SaveLiveCompletePrecopyThreadHandler hdlr,
+ char *idstr, uint32_t instance_id,
+ void *opaque);
+
+bool multifd_device_state_save_thread_should_exit(void);
+
+void multifd_abort_device_state_save_threads(void);
+bool multifd_join_device_state_save_threads(void);
#endif
diff --git a/include/migration/register.h b/include/migration/register.h
index f60e797..b79dc81 100644
--- a/include/migration/register.h
+++ b/include/migration/register.h
@@ -69,7 +69,9 @@ typedef struct SaveVMHandlers {
/**
* @save_cleanup
*
- * Uninitializes the data structures on the source
+ * Uninitializes the data structures on the source.
+ * Note that this handler can be called even if save_setup
+ * wasn't called earlier.
*
* @opaque: data pointer passed to register_savevm_live()
*/
@@ -103,6 +105,25 @@ typedef struct SaveVMHandlers {
*/
int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
+ /**
+ * @save_live_complete_precopy_thread (invoked in a separate thread)
+ *
+ * Called at the end of a precopy phase from a separate worker thread
+ * in configurations where multifd device state transfer is supported
+ * in order to perform asynchronous transmission of the remaining data in
+ * parallel with @save_live_complete_precopy handlers.
+ * When postcopy is enabled, devices that support postcopy will skip this
+ * step.
+ *
+ * @d: a #SaveLiveCompletePrecopyThreadData containing parameters that the
+ * handler may need, including this device section idstr and instance_id,
+ * and opaque data pointer passed to register_savevm_live().
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Returns true to indicate success and false for errors.
+ */
+ SaveLiveCompletePrecopyThreadHandler save_live_complete_precopy_thread;
+
/* This runs both outside and inside the BQL. */
/**
@@ -169,6 +190,21 @@ typedef struct SaveVMHandlers {
/* This runs outside the BQL! */
/**
+ * @save_postcopy_prepare
+ *
+ * This hook will be invoked on the source side right before switching
+ * to postcopy (before VM stopped).
+ *
+ * @f: QEMUFile where to send the data
+ * @opaque: Data pointer passed to register_savevm_live()
+ * @errp: Error** used to report error message
+ *
+ * Returns: true if succeeded, false if error occured. When false is
+ * returned, @errp must be set.
+ */
+ bool (*save_postcopy_prepare)(QEMUFile *f, void *opaque, Error **errp);
+
+ /**
* @state_pending_estimate
*
* This estimates the remaining data to transfer
@@ -228,6 +264,21 @@ typedef struct SaveVMHandlers {
int (*load_state)(QEMUFile *f, void *opaque, int version_id);
/**
+ * @load_state_buffer (invoked outside the BQL)
+ *
+ * Load device state buffer provided to qemu_loadvm_load_state_buffer().
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ * @buf: the data buffer to load
+ * @len: the data length in buffer
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Returns true to indicate success and false for errors.
+ */
+ bool (*load_state_buffer)(void *opaque, char *buf, size_t len,
+ Error **errp);
+
+ /**
* @load_setup
*
* Initializes the data structures on the destination.
@@ -244,6 +295,8 @@ typedef struct SaveVMHandlers {
* @load_cleanup
*
* Uninitializes the data structures on the destination.
+ * Note that this handler can be called even if load_setup
+ * wasn't called earlier.
*
* @opaque: data pointer passed to register_savevm_live()
*
@@ -275,6 +328,18 @@ typedef struct SaveVMHandlers {
* otherwise
*/
bool (*switchover_ack_needed)(void *opaque);
+
+ /**
+ * @switchover_start
+ *
+ * Notifies that the switchover has started. Called only on
+ * the destination.
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns zero to indicate success and negative for error
+ */
+ int (*switchover_start)(void *opaque);
} SaveVMHandlers;
/**
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index f313f2f..1ff7bd9 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -155,7 +155,11 @@ enum VMStateFlags {
};
typedef enum {
- MIG_PRI_DEFAULT = 0,
+ MIG_PRI_UNINITIALIZED = 0, /* An uninitialized priority field maps to */
+ /* MIG_PRI_DEFAULT in save_state_priority */
+
+ MIG_PRI_LOW, /* Must happen after default */
+ MIG_PRI_DEFAULT,
MIG_PRI_IOMMU, /* Must happen before PCI devices */
MIG_PRI_PCI_BUS, /* Must happen before IOMMU */
MIG_PRI_VIRTIO_MEM, /* Must happen before IOMMU */
@@ -230,6 +234,7 @@ extern const VMStateInfo vmstate_info_uint8;
extern const VMStateInfo vmstate_info_uint16;
extern const VMStateInfo vmstate_info_uint32;
extern const VMStateInfo vmstate_info_uint64;
+extern const VMStateInfo vmstate_info_fd;
/** Put this in the stream when migrating a null pointer.*/
#define VMS_NULLPTR_MARKER (0x30U) /* '0' */
@@ -902,6 +907,9 @@ extern const VMStateInfo vmstate_info_qlist;
#define VMSTATE_UINT64_V(_f, _s, _v) \
VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint64, uint64_t)
+#define VMSTATE_FD_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_fd, int32_t)
+
#ifdef CONFIG_LINUX
#define VMSTATE_U8_V(_f, _s, _v) \
@@ -936,6 +944,9 @@ extern const VMStateInfo vmstate_info_qlist;
#define VMSTATE_UINT64(_f, _s) \
VMSTATE_UINT64_V(_f, _s, 0)
+#define VMSTATE_FD(_f, _s) \
+ VMSTATE_FD_V(_f, _s, 0)
+
#ifdef CONFIG_LINUX
#define VMSTATE_U8(_f, _s) \
@@ -1009,6 +1020,8 @@ extern const VMStateInfo vmstate_info_qlist;
#define VMSTATE_UINT64_TEST(_f, _s, _t) \
VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_info_uint64, uint64_t)
+#define VMSTATE_FD_TEST(_f, _s, _t) \
+ VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_info_fd, int32_t)
#define VMSTATE_TIMER_PTR_TEST(_f, _s, _test) \
VMSTATE_POINTER_TEST(_f, _s, _test, vmstate_info_timer, QEMUTimer *)
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 7dec37e..188e4cc 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -30,7 +30,7 @@ uint32_t net_checksum_add_cont(int len, uint8_t *buf, int seq);
uint16_t net_checksum_finish(uint32_t sum);
uint16_t net_checksum_tcpudp(uint16_t length, uint16_t proto,
uint8_t *addrs, uint8_t *buf);
-void net_checksum_calculate(uint8_t *data, int length, int csum_flag);
+void net_checksum_calculate(void *data, int length, int csum_flag);
static inline uint32_t
net_checksum_add(int len, uint8_t *buf)
diff --git a/include/net/eth.h b/include/net/eth.h
index 3b80b6e..14c34f5 100644
--- a/include/net/eth.h
+++ b/include/net/eth.h
@@ -56,7 +56,7 @@ struct ip_header {
uint8_t ip_p; /* protocol */
uint16_t ip_sum; /* checksum */
uint32_t ip_src, ip_dst; /* source and destination address */
-};
+} QEMU_PACKED;
typedef struct tcp_header {
uint16_t th_sport; /* source port */
diff --git a/include/net/net.h b/include/net/net.h
index c8f6797..cdd5b10 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -172,9 +172,6 @@ ssize_t qemu_sendv_packet_async(NetClientState *nc, const struct iovec *iov,
int iovcnt, NetPacketSent *sent_cb);
ssize_t qemu_send_packet(NetClientState *nc, const uint8_t *buf, int size);
ssize_t qemu_receive_packet(NetClientState *nc, const uint8_t *buf, int size);
-ssize_t qemu_receive_packet_iov(NetClientState *nc,
- const struct iovec *iov,
- int iovcnt);
ssize_t qemu_send_packet_raw(NetClientState *nc, const uint8_t *buf, int size);
ssize_t qemu_send_packet_async(NetClientState *nc, const uint8_t *buf,
int size, NetPacketSent *sent_cb);
@@ -307,7 +304,6 @@ void hmp_host_net_remove(Monitor *mon, const QDict *qdict);
void netdev_add(QemuOpts *opts, Error **errp);
int net_hub_id_for_client(NetClientState *nc, int *id);
-NetClientState *net_hub_port_find(int hub_id);
#define DEFAULT_NETWORK_SCRIPT CONFIG_SYSCONFDIR "/qemu-ifup"
#define DEFAULT_NETWORK_DOWN_SCRIPT CONFIG_SYSCONFDIR "/qemu-ifdown"
diff --git a/include/net/queue.h b/include/net/queue.h
index 9f2f289..2e686b1 100644
--- a/include/net/queue.h
+++ b/include/net/queue.h
@@ -59,10 +59,6 @@ ssize_t qemu_net_queue_receive(NetQueue *queue,
const uint8_t *data,
size_t size);
-ssize_t qemu_net_queue_receive_iov(NetQueue *queue,
- const struct iovec *iov,
- int iovcnt);
-
ssize_t qemu_net_queue_send(NetQueue *queue,
NetClientState *sender,
unsigned flags,
diff --git a/include/qapi/compat-policy.h b/include/qapi/compat-policy.h
index 8b7b25c..ea65e10 100644
--- a/include/qapi/compat-policy.h
+++ b/include/qapi/compat-policy.h
@@ -18,7 +18,7 @@
extern CompatPolicy compat_policy;
-bool compat_policy_input_ok(unsigned special_features,
+bool compat_policy_input_ok(uint64_t features,
const CompatPolicy *policy,
ErrorClass error_class,
const char *kind, const char *name,
diff --git a/include/qapi/error-internal.h b/include/qapi/error-internal.h
new file mode 100644
index 0000000..ff18a20
--- /dev/null
+++ b/include/qapi/error-internal.h
@@ -0,0 +1,35 @@
+/*
+ * QEMU Error Objects - struct definition
+ *
+ * Copyright IBM, Corp. 2011
+ * Copyright (C) 2011-2015 Red Hat, Inc.
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Markus Armbruster <armbru@redhat.com>,
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2. See
+ * the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef QAPI_ERROR_INTERNAL_H
+
+struct Error
+{
+ char *msg;
+ ErrorClass err_class;
+
+ /* Used for error_abort only, may be NULL. */
+ const char *func;
+
+ /*
+ * src might be NUL-terminated or not. If it is, src_len is negative.
+ * If it is not, src_len is the length.
+ */
+ const char *src;
+ int src_len;
+ int line;
+ GString *hint;
+};
+
+#endif
diff --git a/include/qapi/error.h b/include/qapi/error.h
index 71f8fb2..41e3816 100644
--- a/include/qapi/error.h
+++ b/include/qapi/error.h
@@ -437,6 +437,8 @@ Error *error_copy(const Error *err);
*/
void error_free(Error *err);
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(Error, error_free)
+
/*
* Convenience function to assert that *@errp is set, then silently free it.
*/
@@ -467,6 +469,18 @@ void error_reportf_err(Error *err, const char *fmt, ...)
G_GNUC_PRINTF(2, 3);
/*
+ * Similar to warn_report_err(), except it prints the message just once.
+ * Return true when it prints, false otherwise.
+ */
+bool warn_report_err_once_cond(bool *printed, Error *err);
+
+#define warn_report_err_once(err) \
+ ({ \
+ static bool print_once_; \
+ warn_report_err_once_cond(&print_once_, err); \
+ })
+
+/*
* Just like error_setg(), except you get to specify the error class.
* Note: use of error classes other than ERROR_CLASS_GENERIC_ERROR is
* strongly discouraged.
diff --git a/include/qapi/qmp-registry.h b/include/qapi/qmp-registry.h
new file mode 100644
index 0000000..e0ee1ad
--- /dev/null
+++ b/include/qapi/qmp-registry.h
@@ -0,0 +1,67 @@
+/*
+ * Core Definitions for QAPI/QMP Dispatch
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QAPI_QMP_DISPATCH_H
+#define QAPI_QMP_DISPATCH_H
+
+#include "monitor/monitor.h"
+#include "qemu/queue.h"
+
+typedef void (QmpCommandFunc)(QDict *, QObject **, Error **);
+
+typedef enum QmpCommandOptions
+{
+ QCO_NO_SUCCESS_RESP = (1U << 0),
+ QCO_ALLOW_OOB = (1U << 1),
+ QCO_ALLOW_PRECONFIG = (1U << 2),
+ QCO_COROUTINE = (1U << 3),
+} QmpCommandOptions;
+
+typedef struct QmpCommand
+{
+ const char *name;
+ /* Runs in coroutine context if QCO_COROUTINE is set */
+ QmpCommandFunc *fn;
+ QmpCommandOptions options;
+ uint64_t features;
+ QTAILQ_ENTRY(QmpCommand) node;
+ bool enabled;
+ const char *disable_reason;
+} QmpCommand;
+
+typedef QTAILQ_HEAD(QmpCommandList, QmpCommand) QmpCommandList;
+
+void qmp_register_command(QmpCommandList *cmds, const char *name,
+ QmpCommandFunc *fn, QmpCommandOptions options,
+ uint64_t features);
+const QmpCommand *qmp_find_command(const QmpCommandList *cmds,
+ const char *name);
+void qmp_disable_command(QmpCommandList *cmds, const char *name,
+ const char *err_msg);
+void qmp_enable_command(QmpCommandList *cmds, const char *name);
+
+bool qmp_command_is_enabled(const QmpCommand *cmd);
+bool qmp_command_available(const QmpCommand *cmd, Error **errp);
+const char *qmp_command_name(const QmpCommand *cmd);
+bool qmp_has_success_response(const QmpCommand *cmd);
+QDict *qmp_error_response(Error *err);
+QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList *cmds, QObject *request,
+ bool allow_oob, Monitor *cur_mon);
+bool qmp_is_oob(const QDict *dict);
+
+typedef void (*qmp_cmd_callback_fn)(const QmpCommand *cmd, void *opaque);
+
+void qmp_for_each_command(const QmpCommandList *cmds, qmp_cmd_callback_fn fn,
+ void *opaque);
+
+#endif
diff --git a/include/qapi/qmp/dispatch.h b/include/qapi/qmp/dispatch.h
deleted file mode 100644
index f2e9568..0000000
--- a/include/qapi/qmp/dispatch.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Core Definitions for QAPI/QMP Dispatch
- *
- * Copyright IBM, Corp. 2011
- *
- * Authors:
- * Anthony Liguori <aliguori@us.ibm.com>
- *
- * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
- * See the COPYING.LIB file in the top-level directory.
- *
- */
-
-#ifndef QAPI_QMP_DISPATCH_H
-#define QAPI_QMP_DISPATCH_H
-
-#include "monitor/monitor.h"
-#include "qemu/queue.h"
-
-typedef void (QmpCommandFunc)(QDict *, QObject **, Error **);
-
-typedef enum QmpCommandOptions
-{
- QCO_NO_SUCCESS_RESP = (1U << 0),
- QCO_ALLOW_OOB = (1U << 1),
- QCO_ALLOW_PRECONFIG = (1U << 2),
- QCO_COROUTINE = (1U << 3),
-} QmpCommandOptions;
-
-typedef struct QmpCommand
-{
- const char *name;
- /* Runs in coroutine context if QCO_COROUTINE is set */
- QmpCommandFunc *fn;
- QmpCommandOptions options;
- unsigned special_features;
- QTAILQ_ENTRY(QmpCommand) node;
- bool enabled;
- const char *disable_reason;
-} QmpCommand;
-
-typedef QTAILQ_HEAD(QmpCommandList, QmpCommand) QmpCommandList;
-
-void qmp_register_command(QmpCommandList *cmds, const char *name,
- QmpCommandFunc *fn, QmpCommandOptions options,
- unsigned special_features);
-const QmpCommand *qmp_find_command(const QmpCommandList *cmds,
- const char *name);
-void qmp_disable_command(QmpCommandList *cmds, const char *name,
- const char *err_msg);
-void qmp_enable_command(QmpCommandList *cmds, const char *name);
-
-bool qmp_command_is_enabled(const QmpCommand *cmd);
-bool qmp_command_available(const QmpCommand *cmd, Error **errp);
-const char *qmp_command_name(const QmpCommand *cmd);
-bool qmp_has_success_response(const QmpCommand *cmd);
-QDict *qmp_error_response(Error *err);
-QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList *cmds, QObject *request,
- bool allow_oob, Monitor *cur_mon);
-bool qmp_is_oob(const QDict *dict);
-
-typedef void (*qmp_cmd_callback_fn)(const QmpCommand *cmd, void *opaque);
-
-void qmp_for_each_command(const QmpCommandList *cmds, qmp_cmd_callback_fn fn,
- void *opaque);
-
-#endif
diff --git a/include/qapi/qmp/qbool.h b/include/qapi/qmp/qbool.h
deleted file mode 100644
index 0d09726..0000000
--- a/include/qapi/qmp/qbool.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * QBool Module
- *
- * Copyright IBM, Corp. 2009
- *
- * Authors:
- * Anthony Liguori <aliguori@us.ibm.com>
- *
- * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
- * See the COPYING.LIB file in the top-level directory.
- *
- */
-
-#ifndef QBOOL_H
-#define QBOOL_H
-
-#include "qapi/qmp/qobject.h"
-
-struct QBool {
- struct QObjectBase_ base;
- bool value;
-};
-
-void qbool_unref(QBool *q);
-
-G_DEFINE_AUTOPTR_CLEANUP_FUNC(QBool, qbool_unref)
-
-QBool *qbool_from_bool(bool value);
-bool qbool_get_bool(const QBool *qb);
-
-#endif /* QBOOL_H */
diff --git a/include/qapi/qmp/qdict.h b/include/qapi/qmp/qdict.h
deleted file mode 100644
index 82e90fc..0000000
--- a/include/qapi/qmp/qdict.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * QDict Module
- *
- * Copyright (C) 2009 Red Hat Inc.
- *
- * Authors:
- * Luiz Capitulino <lcapitulino@redhat.com>
- *
- * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
- * See the COPYING.LIB file in the top-level directory.
- */
-
-#ifndef QDICT_H
-#define QDICT_H
-
-#include "qapi/qmp/qobject.h"
-#include "qemu/queue.h"
-
-#define QDICT_BUCKET_MAX 512
-
-typedef struct QDictEntry {
- char *key;
- QObject *value;
- QLIST_ENTRY(QDictEntry) next;
-} QDictEntry;
-
-struct QDict {
- struct QObjectBase_ base;
- size_t size;
- QLIST_HEAD(,QDictEntry) table[QDICT_BUCKET_MAX];
-};
-
-void qdict_unref(QDict *q);
-
-G_DEFINE_AUTOPTR_CLEANUP_FUNC(QDict, qdict_unref)
-
-/* Object API */
-QDict *qdict_new(void);
-const char *qdict_entry_key(const QDictEntry *entry);
-QObject *qdict_entry_value(const QDictEntry *entry);
-size_t qdict_size(const QDict *qdict);
-void qdict_put_obj(QDict *qdict, const char *key, QObject *value);
-void qdict_del(QDict *qdict, const char *key);
-int qdict_haskey(const QDict *qdict, const char *key);
-QObject *qdict_get(const QDict *qdict, const char *key);
-const QDictEntry *qdict_first(const QDict *qdict);
-const QDictEntry *qdict_next(const QDict *qdict, const QDictEntry *entry);
-
-/* Helper to qdict_put_obj(), accepts any object */
-#define qdict_put(qdict, key, obj) \
- qdict_put_obj(qdict, key, QOBJECT(obj))
-
-void qdict_put_bool(QDict *qdict, const char *key, bool value);
-void qdict_put_int(QDict *qdict, const char *key, int64_t value);
-void qdict_put_null(QDict *qdict, const char *key);
-void qdict_put_str(QDict *qdict, const char *key, const char *value);
-
-double qdict_get_double(const QDict *qdict, const char *key);
-int64_t qdict_get_int(const QDict *qdict, const char *key);
-bool qdict_get_bool(const QDict *qdict, const char *key);
-QList *qdict_get_qlist(const QDict *qdict, const char *key);
-QDict *qdict_get_qdict(const QDict *qdict, const char *key);
-const char *qdict_get_str(const QDict *qdict, const char *key);
-int64_t qdict_get_try_int(const QDict *qdict, const char *key,
- int64_t def_value);
-bool qdict_get_try_bool(const QDict *qdict, const char *key, bool def_value);
-const char *qdict_get_try_str(const QDict *qdict, const char *key);
-
-QDict *qdict_clone_shallow(const QDict *src);
-
-#endif /* QDICT_H */
diff --git a/include/qapi/qmp/qerror.h b/include/qapi/qmp/qerror.h
index 38e8976..d1db6f1 100644
--- a/include/qapi/qmp/qerror.h
+++ b/include/qapi/qmp/qerror.h
@@ -23,10 +23,4 @@
#define QERR_MISSING_PARAMETER \
"Parameter '%s' is missing"
-#define QERR_PROPERTY_VALUE_OUT_OF_RANGE \
- "Property %s.%s doesn't take value %" PRId64 " (minimum: %" PRId64 ", maximum: %" PRId64 ")"
-
-#define QERR_UNSUPPORTED \
- "this feature or command is not currently supported"
-
#endif /* QERROR_H */
diff --git a/include/qapi/qmp/qlist.h b/include/qapi/qmp/qlist.h
deleted file mode 100644
index e4e985d..0000000
--- a/include/qapi/qmp/qlist.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * QList Module
- *
- * Copyright (C) 2009 Red Hat Inc.
- *
- * Authors:
- * Luiz Capitulino <lcapitulino@redhat.com>
- *
- * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
- * See the COPYING.LIB file in the top-level directory.
- */
-
-#ifndef QLIST_H
-#define QLIST_H
-
-#include "qapi/qmp/qobject.h"
-#include "qemu/queue.h"
-
-typedef struct QListEntry {
- QObject *value;
- QTAILQ_ENTRY(QListEntry) next;
-} QListEntry;
-
-struct QList {
- struct QObjectBase_ base;
- QTAILQ_HEAD(,QListEntry) head;
-};
-
-void qlist_unref(QList *q);
-
-G_DEFINE_AUTOPTR_CLEANUP_FUNC(QList, qlist_unref)
-
-#define qlist_append(qlist, obj) \
- qlist_append_obj(qlist, QOBJECT(obj))
-
-void qlist_append_bool(QList *qlist, bool value);
-void qlist_append_int(QList *qlist, int64_t value);
-void qlist_append_null(QList *qlist);
-void qlist_append_str(QList *qlist, const char *value);
-
-#define QLIST_FOREACH_ENTRY(qlist, var) \
- for ((var) = QTAILQ_FIRST(&(qlist)->head); \
- (var); \
- (var) = QTAILQ_NEXT((var), next))
-
-static inline QObject *qlist_entry_obj(const QListEntry *entry)
-{
- return entry->value;
-}
-
-QList *qlist_new(void);
-QList *qlist_copy(QList *src);
-void qlist_append_obj(QList *qlist, QObject *obj);
-QObject *qlist_pop(QList *qlist);
-QObject *qlist_peek(QList *qlist);
-int qlist_empty(const QList *qlist);
-size_t qlist_size(const QList *qlist);
-
-static inline const QListEntry *qlist_first(const QList *qlist)
-{
- return QTAILQ_FIRST(&qlist->head);
-}
-
-static inline const QListEntry *qlist_next(const QListEntry *entry)
-{
- return QTAILQ_NEXT(entry, next);
-}
-
-#endif /* QLIST_H */
diff --git a/include/qapi/qmp/qnull.h b/include/qapi/qmp/qnull.h
deleted file mode 100644
index 7feb7c7..0000000
--- a/include/qapi/qmp/qnull.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * QNull
- *
- * Copyright (C) 2015 Red Hat, Inc.
- *
- * Authors:
- * Markus Armbruster <armbru@redhat.com>
- *
- * This work is licensed under the terms of the GNU LGPL, version 2.1
- * or later. See the COPYING.LIB file in the top-level directory.
- */
-
-#ifndef QNULL_H
-#define QNULL_H
-
-#include "qapi/qmp/qobject.h"
-
-struct QNull {
- struct QObjectBase_ base;
-};
-
-extern QNull qnull_;
-
-static inline QNull *qnull(void)
-{
- return qobject_ref(&qnull_);
-}
-
-void qnull_unref(QNull *q);
-
-G_DEFINE_AUTOPTR_CLEANUP_FUNC(QNull, qnull_unref)
-
-#endif /* QNULL_H */
diff --git a/include/qapi/qmp/qnum.h b/include/qapi/qmp/qnum.h
deleted file mode 100644
index e86788d..0000000
--- a/include/qapi/qmp/qnum.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * QNum Module
- *
- * Copyright (C) 2009 Red Hat Inc.
- *
- * Authors:
- * Luiz Capitulino <lcapitulino@redhat.com>
- * Anthony Liguori <aliguori@us.ibm.com>
- * Marc-AndrƩ Lureau <marcandre.lureau@redhat.com>
- *
- * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
- * See the COPYING.LIB file in the top-level directory.
- */
-
-#ifndef QNUM_H
-#define QNUM_H
-
-#include "qapi/qmp/qobject.h"
-
-typedef enum {
- QNUM_I64,
- QNUM_U64,
- QNUM_DOUBLE
-} QNumKind;
-
-/*
- * QNum encapsulates how our dialect of JSON fills in the blanks left
- * by the JSON specification (RFC 8259) regarding numbers.
- *
- * Conceptually, we treat number as an abstract type with three
- * concrete subtypes: floating-point, signed integer, unsigned
- * integer. QNum implements this as a discriminated union of double,
- * int64_t, uint64_t.
- *
- * The JSON parser picks the subtype as follows. If the number has a
- * decimal point or an exponent, it is floating-point. Else if it
- * fits into int64_t, it's signed integer. Else if it fits into
- * uint64_t, it's unsigned integer. Else it's floating-point.
- *
- * Any number can serve as double: qnum_get_double() converts under
- * the hood.
- *
- * An integer can serve as signed / unsigned integer as long as it is
- * in range: qnum_get_try_int() / qnum_get_try_uint() check range and
- * convert under the hood.
- */
-struct QNum {
- struct QObjectBase_ base;
- QNumKind kind;
- union {
- int64_t i64;
- uint64_t u64;
- double dbl;
- } u;
-};
-
-void qnum_unref(QNum *q);
-
-G_DEFINE_AUTOPTR_CLEANUP_FUNC(QNum, qnum_unref)
-
-QNum *qnum_from_int(int64_t value);
-QNum *qnum_from_uint(uint64_t value);
-QNum *qnum_from_double(double value);
-
-bool qnum_get_try_int(const QNum *qn, int64_t *val);
-int64_t qnum_get_int(const QNum *qn);
-
-bool qnum_get_try_uint(const QNum *qn, uint64_t *val);
-uint64_t qnum_get_uint(const QNum *qn);
-
-double qnum_get_double(QNum *qn);
-
-char *qnum_to_string(QNum *qn);
-
-#endif /* QNUM_H */
diff --git a/include/qapi/qmp/qobject.h b/include/qapi/qmp/qobject.h
deleted file mode 100644
index 89b97d8..0000000
--- a/include/qapi/qmp/qobject.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * QEMU Object Model.
- *
- * Based on ideas by Avi Kivity <avi@redhat.com>
- *
- * Copyright (C) 2009, 2015 Red Hat Inc.
- *
- * Authors:
- * Luiz Capitulino <lcapitulino@redhat.com>
- *
- * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
- * See the COPYING.LIB file in the top-level directory.
- *
- * QObject Reference Counts Terminology
- * ------------------------------------
- *
- * - Returning references: A function that returns an object may
- * return it as either a weak or a strong reference. If the
- * reference is strong, you are responsible for calling
- * qobject_unref() on the reference when you are done.
- *
- * If the reference is weak, the owner of the reference may free it at
- * any time in the future. Before storing the reference anywhere, you
- * should call qobject_ref() to make the reference strong.
- *
- * - Transferring ownership: when you transfer ownership of a reference
- * by calling a function, you are no longer responsible for calling
- * qobject_unref() when the reference is no longer needed. In other words,
- * when the function returns you must behave as if the reference to the
- * passed object was weak.
- */
-#ifndef QOBJECT_H
-#define QOBJECT_H
-
-#include "qapi/qapi-builtin-types.h"
-
-/* Not for use outside include/qapi/qmp/ */
-struct QObjectBase_ {
- QType type;
- size_t refcnt;
-};
-
-/* this struct must have no other members than base */
-struct QObject {
- struct QObjectBase_ base;
-};
-
-/*
- * Preprocessor sorcery ahead: use a different identifier for the
- * local variable in each expansion, so we can nest macro calls
- * without shadowing variables.
- */
-#define QOBJECT_INTERNAL(obj, _obj) ({ \
- typeof(obj) _obj = (obj); \
- _obj ? container_of(&_obj->base, QObject, base) : NULL; \
-})
-#define QOBJECT(obj) QOBJECT_INTERNAL((obj), MAKE_IDENTFIER(_obj))
-
-/* Required for qobject_to() */
-#define QTYPE_CAST_TO_QNull QTYPE_QNULL
-#define QTYPE_CAST_TO_QNum QTYPE_QNUM
-#define QTYPE_CAST_TO_QString QTYPE_QSTRING
-#define QTYPE_CAST_TO_QDict QTYPE_QDICT
-#define QTYPE_CAST_TO_QList QTYPE_QLIST
-#define QTYPE_CAST_TO_QBool QTYPE_QBOOL
-
-QEMU_BUILD_BUG_MSG(QTYPE__MAX != 7,
- "The QTYPE_CAST_TO_* list needs to be extended");
-
-#define qobject_to(type, obj) \
- ((type *)qobject_check_type(obj, glue(QTYPE_CAST_TO_, type)))
-
-static inline void qobject_ref_impl(QObject *obj)
-{
- if (obj) {
- obj->base.refcnt++;
- }
-}
-
-/**
- * qobject_is_equal(): Return whether the two objects are equal.
- *
- * Any of the pointers may be NULL; return true if both are. Always
- * return false if only one is (therefore a QNull object is not
- * considered equal to a NULL pointer).
- */
-bool qobject_is_equal(const QObject *x, const QObject *y);
-
-/**
- * qobject_destroy(): Free resources used by the object
- * For use via qobject_unref() only!
- */
-void qobject_destroy(QObject *obj);
-
-static inline void qobject_unref_impl(QObject *obj)
-{
- assert(!obj || obj->base.refcnt);
- if (obj && --obj->base.refcnt == 0) {
- qobject_destroy(obj);
- }
-}
-
-/**
- * qobject_ref(): Increment QObject's reference count
- *
- * Returns: the same @obj. The type of @obj will be propagated to the
- * return type.
- */
-#define qobject_ref(obj) ({ \
- typeof(obj) _o = (obj); \
- qobject_ref_impl(QOBJECT(_o)); \
- _o; \
-})
-
-/**
- * qobject_unref(): Decrement QObject's reference count, deallocate
- * when it reaches zero
- */
-#define qobject_unref(obj) qobject_unref_impl(QOBJECT(obj))
-
-/**
- * qobject_type(): Return the QObject's type
- */
-static inline QType qobject_type(const QObject *obj)
-{
- assert(QTYPE_NONE < obj->base.type && obj->base.type < QTYPE__MAX);
- return obj->base.type;
-}
-
-/**
- * qobject_check_type(): Helper function for the qobject_to() macro.
- * Return @obj, but only if @obj is not NULL and @type is equal to
- * @obj's type. Return NULL otherwise.
- */
-static inline QObject *qobject_check_type(const QObject *obj, QType type)
-{
- if (obj && qobject_type(obj) == type) {
- return (QObject *)obj;
- } else {
- return NULL;
- }
-}
-
-#endif /* QOBJECT_H */
diff --git a/include/qapi/qmp/qstring.h b/include/qapi/qmp/qstring.h
deleted file mode 100644
index 318d815..0000000
--- a/include/qapi/qmp/qstring.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * QString Module
- *
- * Copyright (C) 2009 Red Hat Inc.
- *
- * Authors:
- * Luiz Capitulino <lcapitulino@redhat.com>
- *
- * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
- * See the COPYING.LIB file in the top-level directory.
- */
-
-#ifndef QSTRING_H
-#define QSTRING_H
-
-#include "qapi/qmp/qobject.h"
-
-struct QString {
- struct QObjectBase_ base;
- const char *string;
-};
-
-void qstring_unref(QString *q);
-
-G_DEFINE_AUTOPTR_CLEANUP_FUNC(QString, qstring_unref)
-
-QString *qstring_new(void);
-QString *qstring_from_str(const char *str);
-QString *qstring_from_substr(const char *str, size_t start, size_t end);
-QString *qstring_from_gstring(GString *gstr);
-const char *qstring_get_str(const QString *qstring);
-
-#endif /* QSTRING_H */
diff --git a/include/qapi/util.h b/include/qapi/util.h
index b825424..29bc4eb 100644
--- a/include/qapi/util.h
+++ b/include/qapi/util.h
@@ -18,7 +18,7 @@ typedef enum {
typedef struct QEnumLookup {
const char *const *array;
- const unsigned char *const special_features;
+ const uint64_t *const features;
const int size;
} QEnumLookup;
diff --git a/include/qapi/visitor-impl.h b/include/qapi/visitor-impl.h
index 2badec5..7beb0db 100644
--- a/include/qapi/visitor-impl.h
+++ b/include/qapi/visitor-impl.h
@@ -115,11 +115,11 @@ struct Visitor
/* Optional */
bool (*policy_reject)(Visitor *v, const char *name,
- unsigned special_features, Error **errp);
+ uint64_t features, Error **errp);
/* Optional */
bool (*policy_skip)(Visitor *v, const char *name,
- unsigned special_features);
+ uint64_t features);
/* Must be set */
VisitorType type;
diff --git a/include/qapi/visitor.h b/include/qapi/visitor.h
index 27b85d4..f6a9b07 100644
--- a/include/qapi/visitor.h
+++ b/include/qapi/visitor.h
@@ -463,29 +463,29 @@ bool visit_optional(Visitor *v, const char *name, bool *present);
/*
* Should we reject member @name due to policy?
*
- * @special_features is the member's special features encoded as a
- * bitset of QapiSpecialFeature.
+ * @features is the member's special features encoded as a
+ * bitset of QapiFeature.
*
* @name must not be NULL. This function is only useful between
* visit_start_struct() and visit_end_struct(), since only objects
* have deprecated members.
*/
bool visit_policy_reject(Visitor *v, const char *name,
- unsigned special_features, Error **errp);
+ uint64_t features, Error **errp);
/*
*
* Should we skip member @name due to policy?
*
- * @special_features is the member's special features encoded as a
- * bitset of QapiSpecialFeature.
+ * @features is the member's special features encoded as a
+ * bitset of QapiFeature.
*
* @name must not be NULL. This function is only useful between
* visit_start_struct() and visit_end_struct(), since only objects
* have deprecated members.
*/
bool visit_policy_skip(Visitor *v, const char *name,
- unsigned special_features);
+ uint64_t features);
/*
* Set policy for handling deprecated management interfaces.
diff --git a/include/qemu-main.h b/include/qemu-main.h
index 940960a..2ee83be 100644
--- a/include/qemu-main.h
+++ b/include/qemu-main.h
@@ -5,7 +5,19 @@
#ifndef QEMU_MAIN_H
#define QEMU_MAIN_H
-int qemu_default_main(void);
+/*
+ * The function to run on the main (initial) thread of the process.
+ * NULL means QEMU's main event loop.
+ * When non-NULL, QEMU's main event loop will run on a purposely created
+ * thread, after which the provided function pointer will be invoked on
+ * the initial thread.
+ * This is useful on platforms which treat the main thread as special
+ * (macOS/Darwin) and/or require all UI API calls to occur from the main
+ * thread. Those platforms can initialise it to a specific function,
+ * while UI implementations may reset it to NULL during their init if they
+ * will handle system and UI events on the main thread via QEMU's own main
+ * event loop.
+ */
extern int (*qemu_main)(void);
#endif /* QEMU_MAIN_H */
diff --git a/include/qemu/accel.h b/include/qemu/accel.h
index 972a849..fbd3d89 100644
--- a/include/qemu/accel.h
+++ b/include/qemu/accel.h
@@ -38,13 +38,13 @@ typedef struct AccelClass {
const char *name;
int (*init_machine)(MachineState *ms);
-#ifndef CONFIG_USER_ONLY
+ bool (*cpu_common_realize)(CPUState *cpu, Error **errp);
+ void (*cpu_common_unrealize)(CPUState *cpu);
+
+ /* system related hooks */
void (*setup_post)(MachineState *ms, AccelState *accel);
bool (*has_memory)(MachineState *ms, AddressSpace *as,
hwaddr start_addr, hwaddr size);
-#endif
- bool (*cpu_common_realize)(CPUState *cpu, Error **errp);
- void (*cpu_common_unrealize)(CPUState *cpu);
/* gdbstub related hooks */
int (*gdbstub_supported_sstep_flags)(void);
@@ -78,12 +78,10 @@ const char *current_accel_name(void);
void accel_init_interfaces(AccelClass *ac);
-#ifndef CONFIG_USER_ONLY
int accel_init_machine(AccelState *accel, MachineState *ms);
/* Called just before os_setup_post (ie just before drop OS privs) */
void accel_setup_post(MachineState *ms);
-#endif /* !CONFIG_USER_ONLY */
/**
* accel_cpu_instance_init:
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index dc4118d..f80cba2 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -56,25 +56,13 @@
*/
#define signal_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST)
-/* Sanity check that the size of an atomic operation isn't "overly large".
+/*
+ * Sanity check that the size of an atomic operation isn't "overly large".
* Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
* want to use them because we ought not need them, and this lets us do a
* bit of sanity checking that other 32-bit hosts might build.
- *
- * That said, we have a problem on 64-bit ILP32 hosts in that in order to
- * sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS.
- * We'd prefer not want to pull in everything else TCG related, so handle
- * those few cases by hand.
- *
- * Note that x32 is fully detected with __x86_64__ + _ILP32, and that for
- * Sparc we always force the use of sparcv9 in configure. MIPS n32 (ILP32) &
- * n64 (LP64) ABIs are both detected using __mips64.
*/
-#if defined(__x86_64__) || defined(__sparc__) || defined(__mips64)
-# define ATOMIC_REG_SIZE 8
-#else
-# define ATOMIC_REG_SIZE sizeof(void *)
-#endif
+#define ATOMIC_REG_SIZE sizeof(void *)
/* Weak atomic operations prevent the compiler moving other
* loads/stores past the atomic operation load/store. However there is
@@ -128,7 +116,7 @@
_val; \
})
#define qatomic_rcu_read(ptr) \
- qatomic_rcu_read_internal((ptr), MAKE_IDENTFIER(_val))
+ qatomic_rcu_read_internal((ptr), MAKE_IDENTIFIER(_val))
#define qatomic_rcu_set(ptr, i) do { \
qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
diff --git a/include/qemu/atomic128.h b/include/qemu/atomic128.h
index 88af6d4..31e5c48 100644
--- a/include/qemu/atomic128.h
+++ b/include/qemu/atomic128.h
@@ -13,6 +13,7 @@
#ifndef QEMU_ATOMIC128_H
#define QEMU_ATOMIC128_H
+#include "qemu/atomic.h"
#include "qemu/int128.h"
/*
@@ -58,7 +59,7 @@
* Therefore, special case each platform.
*/
-#include "host/atomic128-cas.h"
-#include "host/atomic128-ldst.h"
+#include "host/atomic128-cas.h.inc"
+#include "host/atomic128-ldst.h.inc"
#endif /* QEMU_ATOMIC128_H */
diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h
index 1cf2884..0044333 100644
--- a/include/qemu/bitmap.h
+++ b/include/qemu/bitmap.h
@@ -69,6 +69,14 @@
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
+/*
+ * This is for use with the bit32 versions of set_bit() etc;
+ * we don't currently support the full range of bitmap operations
+ * on bitmaps backed by an array of uint32_t.
+ */
+#define DECLARE_BITMAP32(name, bits) \
+ uint32_t name[BITS_TO_U32S(bits)]
+
#define small_nbits(nbits) \
((nbits) <= BITS_PER_LONG)
diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h
index 2c0a2fe..c7b838a 100644
--- a/include/qemu/bitops.h
+++ b/include/qemu/bitops.h
@@ -18,17 +18,48 @@
#define BITS_PER_BYTE CHAR_BIT
#define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE)
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define BITS_TO_U32S(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(uint32_t))
#define BIT(nr) (1UL << (nr))
#define BIT_ULL(nr) (1ULL << (nr))
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
#define MAKE_64BIT_MASK(shift, length) \
(((~0ULL) >> (64 - (length))) << (shift))
/**
+ * DOC: Functions operating on arrays of bits
+ *
+ * We provide a set of functions which work on arbitrary-length arrays of
+ * bits. These come in several flavours which vary in what the type of the
+ * underlying storage for the bits is:
+ *
+ * - Bits stored in an array of 'unsigned long': set_bit(), clear_bit(), etc
+ * - Bits stored in an array of 'uint32_t': set_bit32(), clear_bit32(), etc
+ *
+ * Because the 'unsigned long' type has a size which varies between
+ * host systems, the versions using 'uint32_t' are often preferable.
+ * This is particularly the case in a device model where there may
+ * be some guest-visible register view of the bit array.
+ *
+ * We do not currently implement uint32_t versions of find_last_bit(),
+ * find_next_bit(), find_next_zero_bit(), find_first_bit() or
+ * find_first_zero_bit(), because we haven't yet needed them. If you
+ * need them you should implement them similarly to the 'unsigned long'
+ * versions.
+ *
+ * You can declare a bitmap to be used with these functions via the
+ * DECLARE_BITMAP and DECLARE_BITMAP32 macros in bitmap.h.
+ */
+
+/**
+ * DOC: 'unsigned long' bit array APIs
+ */
+
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+
+/**
* set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
@@ -225,6 +256,141 @@ static inline unsigned long find_first_zero_bit(const unsigned long *addr,
}
/**
+ * DOC: 'uint32_t' bit array APIs
+ */
+
+#define BIT32_MASK(nr) (1UL << ((nr) % 32))
+#define BIT32_WORD(nr) ((nr) / 32)
+
+/**
+ * set_bit32 - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ */
+static inline void set_bit32(long nr, uint32_t *addr)
+{
+ uint32_t mask = BIT32_MASK(nr);
+ uint32_t *p = addr + BIT32_WORD(nr);
+
+ *p |= mask;
+}
+
+/**
+ * set_bit32_atomic - Set a bit in memory atomically
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ */
+static inline void set_bit32_atomic(long nr, uint32_t *addr)
+{
+ uint32_t mask = BIT32_MASK(nr);
+ uint32_t *p = addr + BIT32_WORD(nr);
+
+ qatomic_or(p, mask);
+}
+
+/**
+ * clear_bit32 - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+static inline void clear_bit32(long nr, uint32_t *addr)
+{
+ uint32_t mask = BIT32_MASK(nr);
+ uint32_t *p = addr + BIT32_WORD(nr);
+
+ *p &= ~mask;
+}
+
+/**
+ * clear_bit32_atomic - Clears a bit in memory atomically
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+static inline void clear_bit32_atomic(long nr, uint32_t *addr)
+{
+ uint32_t mask = BIT32_MASK(nr);
+ uint32_t *p = addr + BIT32_WORD(nr);
+
+ return qatomic_and(p, ~mask);
+}
+
+/**
+ * change_bit32 - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ */
+static inline void change_bit32(long nr, uint32_t *addr)
+{
+ uint32_t mask = BIT32_MASK(nr);
+ uint32_t *p = addr + BIT32_WORD(nr);
+
+ *p ^= mask;
+}
+
+/**
+ * test_and_set_bit32 - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+static inline int test_and_set_bit32(long nr, uint32_t *addr)
+{
+ uint32_t mask = BIT32_MASK(nr);
+ uint32_t *p = addr + BIT32_WORD(nr);
+ uint32_t old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * test_and_clear_bit32 - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ */
+static inline int test_and_clear_bit32(long nr, uint32_t *addr)
+{
+ uint32_t mask = BIT32_MASK(nr);
+ uint32_t *p = addr + BIT32_WORD(nr);
+ uint32_t old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * test_and_change_bit32 - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ */
+static inline int test_and_change_bit32(long nr, uint32_t *addr)
+{
+ uint32_t mask = BIT32_MASK(nr);
+ uint32_t *p = addr + BIT32_WORD(nr);
+ uint32_t old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * test_bit32 - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline int test_bit32(long nr, const uint32_t *addr)
+{
+ return 1U & (addr[BIT32_WORD(nr)] >> (nr & 31));
+}
+
+/**
+ * DOC: Miscellaneous bit operations on single values
+ *
+ * These functions are a collection of useful operations
+ * (rotations, bit extract, bit deposit, etc) on single
+ * integer values.
+ */
+
+/**
* rol8 - rotate an 8-bit value left
* @word: value to rotate
* @shift: bits to roll
diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h
index ad22910..9a11764 100644
--- a/include/qemu/bswap.h
+++ b/include/qemu/bswap.h
@@ -140,6 +140,8 @@ CPU_CONVERT(le, 16, uint16_t)
CPU_CONVERT(le, 32, uint32_t)
CPU_CONVERT(le, 64, uint64_t)
+#undef CPU_CONVERT
+
/*
* Same as cpu_to_le{16,32,64}, except that gcc will figure the result is
* a compile-time constant if you pass in a constant. So this can be
@@ -203,9 +205,6 @@ CPU_CONVERT(le, 64, uint64_t)
* te : target endian
* (except for byte accesses, which have no endian infix).
*
- * The target endian accessors are obviously only available to source
- * files which are built per-target; they are defined in cpu-all.h.
- *
* In all cases these functions take a host pointer.
* For accessors that take a guest address rather than a
* host address, see the cpu_{ld,st}_* accessors defined in
diff --git a/include/qemu/cacheflush.h b/include/qemu/cacheflush.h
index ae20bcd..76eb55d 100644
--- a/include/qemu/cacheflush.h
+++ b/include/qemu/cacheflush.h
@@ -26,6 +26,13 @@ static inline void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
/* icache is coherent and does not require flushing. */
}
+#elif defined(EMSCRIPTEN)
+
+static inline void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
+{
+ /* Wasm doesn't have executable region of memory. */
+}
+
#else
void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len);
diff --git a/include/qemu/clang-tsa.h b/include/qemu/clang-tsa.h
deleted file mode 100644
index ba06fb8..0000000
--- a/include/qemu/clang-tsa.h
+++ /dev/null
@@ -1,114 +0,0 @@
-#ifndef CLANG_TSA_H
-#define CLANG_TSA_H
-
-/*
- * Copyright 2018 Jarkko Hietaniemi <jhi@iki.fi>
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without
- * limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/* http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
- *
- * TSA is available since clang 3.6-ish.
- */
-#ifdef __clang__
-# define TSA(x) __attribute__((x))
-#else
-# define TSA(x) /* No TSA, make TSA attributes no-ops. */
-#endif
-
-/* TSA_CAPABILITY() is used to annotate typedefs:
- *
- * typedef pthread_mutex_t TSA_CAPABILITY("mutex") tsa_mutex;
- */
-#define TSA_CAPABILITY(x) TSA(capability(x))
-
-/* TSA_GUARDED_BY() is used to annotate global variables,
- * the data is guarded:
- *
- * Foo foo TSA_GUARDED_BY(mutex);
- */
-#define TSA_GUARDED_BY(x) TSA(guarded_by(x))
-
-/* TSA_PT_GUARDED_BY() is used to annotate global pointers, the data
- * behind the pointer is guarded.
- *
- * Foo* ptr TSA_PT_GUARDED_BY(mutex);
- */
-#define TSA_PT_GUARDED_BY(x) TSA(pt_guarded_by(x))
-
-/* The TSA_REQUIRES() is used to annotate functions: the caller of the
- * function MUST hold the resource, the function will NOT release it.
- *
- * More than one mutex may be specified, comma-separated.
- *
- * void Foo(void) TSA_REQUIRES(mutex);
- */
-#define TSA_REQUIRES(...) TSA(requires_capability(__VA_ARGS__))
-#define TSA_REQUIRES_SHARED(...) TSA(requires_shared_capability(__VA_ARGS__))
-
-/* TSA_EXCLUDES() is used to annotate functions: the caller of the
- * function MUST NOT hold resource, the function first acquires the
- * resource, and then releases it.
- *
- * More than one mutex may be specified, comma-separated.
- *
- * void Foo(void) TSA_EXCLUDES(mutex);
- */
-#define TSA_EXCLUDES(...) TSA(locks_excluded(__VA_ARGS__))
-
-/* TSA_ACQUIRE() is used to annotate functions: the caller of the
- * function MUST NOT hold the resource, the function will acquire the
- * resource, but NOT release it.
- *
- * More than one mutex may be specified, comma-separated.
- *
- * void Foo(void) TSA_ACQUIRE(mutex);
- */
-#define TSA_ACQUIRE(...) TSA(acquire_capability(__VA_ARGS__))
-#define TSA_ACQUIRE_SHARED(...) TSA(acquire_shared_capability(__VA_ARGS__))
-
-/* TSA_RELEASE() is used to annotate functions: the caller of the
- * function MUST hold the resource, but the function will then release it.
- *
- * More than one mutex may be specified, comma-separated.
- *
- * void Foo(void) TSA_RELEASE(mutex);
- */
-#define TSA_RELEASE(...) TSA(release_capability(__VA_ARGS__))
-#define TSA_RELEASE_SHARED(...) TSA(release_shared_capability(__VA_ARGS__))
-
-/* TSA_NO_TSA is used to annotate functions. Use only when you need to.
- *
- * void Foo(void) TSA_NO_TSA;
- */
-#define TSA_NO_TSA TSA(no_thread_safety_analysis)
-
-/*
- * TSA_ASSERT() is used to annotate functions: This function will assert that
- * the lock is held. When it returns, the caller of the function is assumed to
- * already hold the resource.
- *
- * More than one mutex may be specified, comma-separated.
- */
-#define TSA_ASSERT(...) TSA(assert_capability(__VA_ARGS__))
-#define TSA_ASSERT_SHARED(...) TSA(assert_shared_capability(__VA_ARGS__))
-
-#endif /* #ifndef CLANG_TSA_H */
diff --git a/include/qemu/co-shared-resource.h b/include/qemu/co-shared-resource.h
index 78ca585..41be1a8 100644
--- a/include/qemu/co-shared-resource.h
+++ b/include/qemu/co-shared-resource.h
@@ -45,13 +45,6 @@ SharedResource *shres_create(uint64_t total);
void shres_destroy(SharedResource *s);
/*
- * Try to allocate an amount of @n. Return true on success, and false
- * if there is too little left of the collective resource to fulfill
- * the request.
- */
-bool co_try_get_from_shres(SharedResource *s, uint64_t n);
-
-/*
* Allocate an amount of @n, and, if necessary, yield until
* that becomes possible.
*/
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
index 554c5ce..65b8995 100644
--- a/include/qemu/compiler.h
+++ b/include/qemu/compiler.h
@@ -22,12 +22,7 @@
#define QEMU_EXTERN_C extern
#endif
-#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
-# define QEMU_PACKED __attribute__((gcc_struct, packed))
-#else
-# define QEMU_PACKED __attribute__((packed))
-#endif
-
+#define QEMU_PACKED __attribute__((packed))
#define QEMU_ALIGNED(X) __attribute__((aligned(X)))
#ifndef glue
@@ -38,7 +33,7 @@
#endif
/* Expands into an identifier stemN, where N is another number each time */
-#define MAKE_IDENTFIER(stem) glue(stem, __COUNTER__)
+#define MAKE_IDENTIFIER(stem) glue(stem, __COUNTER__)
#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
@@ -213,6 +208,122 @@
#endif
/*
+ * Disable -ftrivial-auto-var-init on a local variable.
+ *
+ * Use this in cases where there a method in the device I/O path (or other
+ * important hot paths), that has large variables on the stack. A rule of
+ * thumb is that "large" means a method with 4kb data in the local stack
+ * frame. Any variables which are KB in size, should be annotated with this
+ * attribute, to pre-emptively eliminate any potential overhead from the
+ * compiler's implicit zero'ing of memory.
+ *
+ * Given that this turns off a security hardening feature, when using this
+ * to flag variables, it is important that the code is double-checked to
+ * ensure there is no possible use of uninitialized data in the method.
+ */
+#if __has_attribute(uninitialized)
+# define QEMU_UNINITIALIZED __attribute__((uninitialized))
+#else
+# define QEMU_UNINITIALIZED
+#endif
+
+/*
+ * http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+ *
+ * TSA is available since clang 3.6-ish.
+ */
+#ifdef __clang__
+# define TSA(x) __attribute__((x))
+#else
+# define TSA(x) /* No TSA, make TSA attributes no-ops. */
+#endif
+
+/*
+ * TSA_CAPABILITY() is used to annotate typedefs:
+ *
+ * typedef pthread_mutex_t TSA_CAPABILITY("mutex") tsa_mutex;
+ */
+#define TSA_CAPABILITY(x) TSA(capability(x))
+
+/*
+ * TSA_GUARDED_BY() is used to annotate global variables,
+ * the data is guarded:
+ *
+ * Foo foo TSA_GUARDED_BY(mutex);
+ */
+#define TSA_GUARDED_BY(x) TSA(guarded_by(x))
+
+/*
+ * TSA_PT_GUARDED_BY() is used to annotate global pointers, the data
+ * behind the pointer is guarded.
+ *
+ * Foo* ptr TSA_PT_GUARDED_BY(mutex);
+ */
+#define TSA_PT_GUARDED_BY(x) TSA(pt_guarded_by(x))
+
+/*
+ * The TSA_REQUIRES() is used to annotate functions: the caller of the
+ * function MUST hold the resource, the function will NOT release it.
+ *
+ * More than one mutex may be specified, comma-separated.
+ *
+ * void Foo(void) TSA_REQUIRES(mutex);
+ */
+#define TSA_REQUIRES(...) TSA(requires_capability(__VA_ARGS__))
+#define TSA_REQUIRES_SHARED(...) TSA(requires_shared_capability(__VA_ARGS__))
+
+/*
+ * TSA_EXCLUDES() is used to annotate functions: the caller of the
+ * function MUST NOT hold resource, the function first acquires the
+ * resource, and then releases it.
+ *
+ * More than one mutex may be specified, comma-separated.
+ *
+ * void Foo(void) TSA_EXCLUDES(mutex);
+ */
+#define TSA_EXCLUDES(...) TSA(locks_excluded(__VA_ARGS__))
+
+/*
+ * TSA_ACQUIRE() is used to annotate functions: the caller of the
+ * function MUST NOT hold the resource, the function will acquire the
+ * resource, but NOT release it.
+ *
+ * More than one mutex may be specified, comma-separated.
+ *
+ * void Foo(void) TSA_ACQUIRE(mutex);
+ */
+#define TSA_ACQUIRE(...) TSA(acquire_capability(__VA_ARGS__))
+#define TSA_ACQUIRE_SHARED(...) TSA(acquire_shared_capability(__VA_ARGS__))
+
+/*
+ * TSA_RELEASE() is used to annotate functions: the caller of the
+ * function MUST hold the resource, but the function will then release it.
+ *
+ * More than one mutex may be specified, comma-separated.
+ *
+ * void Foo(void) TSA_RELEASE(mutex);
+ */
+#define TSA_RELEASE(...) TSA(release_capability(__VA_ARGS__))
+#define TSA_RELEASE_SHARED(...) TSA(release_shared_capability(__VA_ARGS__))
+
+/*
+ * TSA_NO_TSA is used to annotate functions. Use only when you need to.
+ *
+ * void Foo(void) TSA_NO_TSA;
+ */
+#define TSA_NO_TSA TSA(no_thread_safety_analysis)
+
+/*
+ * TSA_ASSERT() is used to annotate functions: This function will assert that
+ * the lock is held. When it returns, the caller of the function is assumed to
+ * already hold the resource.
+ *
+ * More than one mutex may be specified, comma-separated.
+ */
+#define TSA_ASSERT(...) TSA(assert_capability(__VA_ARGS__))
+#define TSA_ASSERT_SHARED(...) TSA(assert_shared_capability(__VA_ARGS__))
+
+/*
* Ugly CPP trick that is like "defined FOO", but also works in C
* code. Useful to replace #ifdef with "if" statements; assumes
* the symbol was defined with Meson's "config.set()", so it is empty
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
index ff30845..e545bbf 100644
--- a/include/qemu/coroutine.h
+++ b/include/qemu/coroutine.h
@@ -16,6 +16,7 @@
#define QEMU_COROUTINE_H
#include "qemu/coroutine-core.h"
+#include "qemu/atomic.h"
#include "qemu/queue.h"
#include "qemu/timer.h"
diff --git a/include/qemu/crc-ccitt.h b/include/qemu/crc-ccitt.h
index 8918daf..ce28e29 100644
--- a/include/qemu/crc-ccitt.h
+++ b/include/qemu/crc-ccitt.h
@@ -8,7 +8,7 @@
*
* From Linux kernel v5.10 include/linux/crc-ccitt.h
*
- * SPDX-License-Identifier: GPL-2.0
+ * SPDX-License-Identifier: GPL-2.0-only
*/
#ifndef CRC_CCITT_H
diff --git a/include/qemu/cutils.h b/include/qemu/cutils.h
index da15547..36c68ce 100644
--- a/include/qemu/cutils.h
+++ b/include/qemu/cutils.h
@@ -241,13 +241,10 @@ int uleb128_decode_small(const uint8_t *in, uint32_t *n);
int qemu_pstrcmp0(const char **str1, const char **str2);
/* Find program directory, and save it for later usage with
- * qemu_get_exec_dir().
+ * get_relocated_path().
* Try OS specific API first, if not working, parse from argv0. */
void qemu_init_exec_dir(const char *argv0);
-/* Get the saved exec dir. */
-const char *qemu_get_exec_dir(void);
-
/**
* get_relocated_path:
* @dir: the directory (typically a `CONFIG_*DIR` variable) to be relocated.
@@ -305,4 +302,19 @@ GString *qemu_hexdump_line(GString *str, const void *buf, size_t len,
void qemu_hexdump(FILE *fp, const char *prefix,
const void *bufptr, size_t size);
+/**
+ * qemu_hexdump_to_buffer:
+ * @buffer: output string buffer
+ * @buffer_size: amount of available space in buffer. Must be at least
+ * data_size*2+1.
+ * @data: input bytes
+ * @data_size: number of bytes in data
+ *
+ * Converts the @data_size bytes in @data into hex digit pairs, writing them to
+ * @buffer. Finally, a nul terminating character is written; @buffer therefore
+ * needs space for (data_size*2+1) chars.
+ */
+void qemu_hexdump_to_buffer(char *restrict buffer, size_t buffer_size,
+ const uint8_t *restrict data, size_t data_size);
+
#endif
diff --git a/include/qemu/datadir.h b/include/qemu/datadir.h
index 21f9097..cca32af 100644
--- a/include/qemu/datadir.h
+++ b/include/qemu/datadir.h
@@ -1,11 +1,16 @@
#ifndef QEMU_DATADIR_H
#define QEMU_DATADIR_H
-#define QEMU_FILE_TYPE_BIOS 0
-#define QEMU_FILE_TYPE_KEYMAP 1
+typedef enum {
+ QEMU_FILE_TYPE_BIOS,
+ QEMU_FILE_TYPE_DTB,
+ QEMU_FILE_TYPE_KEYMAP,
+} QemuFileType;
+
/**
* qemu_find_file:
* @type: QEMU_FILE_TYPE_BIOS (for BIOS, VGA BIOS)
+ * QEMU_FILE_TYPE_DTB (for device tree blobs)
* or QEMU_FILE_TYPE_KEYMAP (for keymaps).
* @name: Relative or absolute file name
*
@@ -20,7 +25,7 @@
*
* Returns: a path that can access @name, or NULL if no matching file exists.
*/
-char *qemu_find_file(int type, const char *name);
+char *qemu_find_file(QemuFileType type, const char *name);
void qemu_add_default_firmwarepath(void);
void qemu_add_data_dir(char *path);
void qemu_list_data_dirs(void);
diff --git a/include/qemu/envlist.h b/include/qemu/envlist.h
index 6006dfa..b2883f6 100644
--- a/include/qemu/envlist.h
+++ b/include/qemu/envlist.h
@@ -7,8 +7,6 @@ envlist_t *envlist_create(void);
void envlist_free(envlist_t *);
int envlist_setenv(envlist_t *, const char *);
int envlist_unsetenv(envlist_t *, const char *);
-int envlist_parse_set(envlist_t *, const char *);
-int envlist_parse_unset(envlist_t *, const char *);
char **envlist_to_environ(const envlist_t *, size_t *);
#endif /* ENVLIST_H */
diff --git a/include/qemu/fifo8.h b/include/qemu/fifo8.h
index c6295c6..4f768d4 100644
--- a/include/qemu/fifo8.h
+++ b/include/qemu/fifo8.h
@@ -15,10 +15,9 @@ typedef struct {
* @fifo: struct Fifo8 to initialise with new FIFO
* @capacity: capacity of the newly created FIFO
*
- * Create a FIFO of the specified size. Clients should call fifo8_destroy()
+ * Create a FIFO of the specified capacity. Clients should call fifo8_destroy()
* when finished using the fifo. The FIFO is initially empty.
*/
-
void fifo8_create(Fifo8 *fifo, uint32_t capacity);
/**
@@ -26,9 +25,8 @@ void fifo8_create(Fifo8 *fifo, uint32_t capacity);
* @fifo: FIFO to cleanup
*
* Cleanup a FIFO created with fifo8_create(). Frees memory created for FIFO
- *storage. The FIFO is no longer usable after this has been called.
+ * storage. The FIFO is no longer usable after this has been called.
*/
-
void fifo8_destroy(Fifo8 *fifo);
/**
@@ -39,7 +37,6 @@ void fifo8_destroy(Fifo8 *fifo);
* Push a data byte to the FIFO. Behaviour is undefined if the FIFO is full.
* Clients are responsible for checking for fullness using fifo8_is_full().
*/
-
void fifo8_push(Fifo8 *fifo, uint8_t data);
/**
@@ -52,7 +49,6 @@ void fifo8_push(Fifo8 *fifo, uint8_t data);
* Clients are responsible for checking the space left in the FIFO using
* fifo8_num_free().
*/
-
void fifo8_push_all(Fifo8 *fifo, const uint8_t *data, uint32_t num);
/**
@@ -64,25 +60,65 @@ void fifo8_push_all(Fifo8 *fifo, const uint8_t *data, uint32_t num);
*
* Returns: The popped data byte.
*/
-
uint8_t fifo8_pop(Fifo8 *fifo);
/**
+ * fifo8_peek:
+ * @fifo: fifo to peek from
+ *
+ * Peek the data byte at the current head of the FIFO. Clients are responsible
+ * for checking for emptyness using fifo8_is_empty().
+ *
+ * Returns: The peeked data byte.
+ */
+uint8_t fifo8_peek(Fifo8 *fifo);
+
+/**
* fifo8_pop_buf:
* @fifo: FIFO to pop from
+ * @dest: the buffer to write the data into (can be NULL)
+ * @destlen: size of @dest and maximum number of bytes to pop
+ *
+ * Pop a number of elements from the FIFO up to a maximum of @destlen.
+ * The popped data is copied into the @dest buffer.
+ * Care is taken when the data wraps around in the ring buffer.
+ *
+ * Returns: number of bytes popped.
+ */
+uint32_t fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, uint32_t destlen);
+
+/**
+ * fifo8_peek_buf:
+ * @fifo: FIFO to read from
+ * @dest: the buffer to write the data into (can be NULL)
+ * @destlen: size of @dest and maximum number of bytes to peek
+ *
+ * Peek a number of elements from the FIFO up to a maximum of @destlen.
+ * The peeked data is copied into the @dest buffer.
+ * Care is taken when the data wraps around in the ring buffer.
+ *
+ * Returns: number of bytes peeked.
+ */
+uint32_t fifo8_peek_buf(Fifo8 *fifo, uint8_t *dest, uint32_t destlen);
+
+/**
+ * fifo8_pop_bufptr:
+ * @fifo: FIFO to pop from
* @max: maximum number of bytes to pop
* @numptr: pointer filled with number of bytes returned (can be NULL)
*
- * Pop a number of elements from the FIFO up to a maximum of max. The buffer
+ * New code should prefer to use fifo8_pop_buf() instead of fifo8_pop_bufptr().
+ *
+ * Pop a number of elements from the FIFO up to a maximum of @max. The buffer
* containing the popped data is returned. This buffer points directly into
- * the FIFO backing store and data is invalidated once any of the fifo8_* APIs
- * are called on the FIFO.
+ * the internal FIFO backing store and data (without checking for overflow!)
+ * and is invalidated once any of the fifo8_* APIs are called on the FIFO.
*
* The function may return fewer bytes than requested when the data wraps
* around in the ring buffer; in this case only a contiguous part of the data
* is returned.
*
- * The number of valid bytes returned is populated in *numptr; will always
+ * The number of valid bytes returned is populated in *@numptr; will always
* return at least 1 byte. max must not be 0 or greater than the number of
* bytes in the FIFO.
*
@@ -91,15 +127,15 @@ uint8_t fifo8_pop(Fifo8 *fifo);
*
* Returns: A pointer to popped data.
*/
-const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr);
+const uint8_t *fifo8_pop_bufptr(Fifo8 *fifo, uint32_t max, uint32_t *numptr);
/**
- * fifo8_peek_buf: read upto max bytes from the fifo
+ * fifo8_peek_bufptr: read upto max bytes from the fifo
* @fifo: FIFO to read from
* @max: maximum number of bytes to peek
* @numptr: pointer filled with number of bytes returned (can be NULL)
*
- * Peek into a number of elements from the FIFO up to a maximum of max.
+ * Peek into a number of elements from the FIFO up to a maximum of @max.
* The buffer containing the data peeked into is returned. This buffer points
* directly into the FIFO backing store. Since data is invalidated once any
* of the fifo8_* APIs are called on the FIFO, it is the caller responsibility
@@ -109,7 +145,7 @@ const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr);
* around in the ring buffer; in this case only a contiguous part of the data
* is returned.
*
- * The number of valid bytes returned is populated in *numptr; will always
+ * The number of valid bytes returned is populated in *@numptr; will always
* return at least 1 byte. max must not be 0 or greater than the number of
* bytes in the FIFO.
*
@@ -118,7 +154,16 @@ const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr);
*
* Returns: A pointer to peekable data.
*/
-const uint8_t *fifo8_peek_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr);
+const uint8_t *fifo8_peek_bufptr(Fifo8 *fifo, uint32_t max, uint32_t *numptr);
+
+/**
+ * fifo8_drop:
+ * @fifo: FIFO to drop bytes
+ * @len: number of bytes to drop
+ *
+ * Drop (consume) bytes from a FIFO.
+ */
+void fifo8_drop(Fifo8 *fifo, uint32_t len);
/**
* fifo8_reset:
@@ -126,7 +171,6 @@ const uint8_t *fifo8_peek_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr);
*
* Reset a FIFO. All data is discarded and the FIFO is emptied.
*/
-
void fifo8_reset(Fifo8 *fifo);
/**
@@ -137,7 +181,6 @@ void fifo8_reset(Fifo8 *fifo);
*
* Returns: True if the fifo is empty, false otherwise.
*/
-
bool fifo8_is_empty(Fifo8 *fifo);
/**
@@ -148,7 +191,6 @@ bool fifo8_is_empty(Fifo8 *fifo);
*
* Returns: True if the fifo is full, false otherwise.
*/
-
bool fifo8_is_full(Fifo8 *fifo);
/**
@@ -159,7 +201,6 @@ bool fifo8_is_full(Fifo8 *fifo);
*
* Returns: Number of free bytes.
*/
-
uint32_t fifo8_num_free(Fifo8 *fifo);
/**
@@ -170,7 +211,6 @@ uint32_t fifo8_num_free(Fifo8 *fifo);
*
* Returns: Number of used bytes.
*/
-
uint32_t fifo8_num_used(Fifo8 *fifo);
extern const VMStateDescription vmstate_fifo8;
diff --git a/include/qemu/futex.h b/include/qemu/futex.h
index 91ae889..607613e 100644
--- a/include/qemu/futex.h
+++ b/include/qemu/futex.h
@@ -1,5 +1,5 @@
/*
- * Wrappers around Linux futex syscall
+ * Wrappers around Linux futex syscall and similar
*
* Copyright Red Hat, Inc. 2017
*
@@ -11,17 +11,35 @@
*
*/
+/*
+ * Note that a wake-up can also be caused by common futex usage patterns in
+ * unrelated code that happened to have previously used the futex word's
+ * memory location (e.g., typical futex-based implementations of Pthreads
+ * mutexes can cause this under some conditions). Therefore, qemu_futex_wait()
+ * callers should always conservatively assume that it is a spurious wake-up,
+ * and use the futex word's value (i.e., the user-space synchronization scheme)
+ * to decide whether to continue to block or not.
+ */
+
#ifndef QEMU_FUTEX_H
#define QEMU_FUTEX_H
+#define HAVE_FUTEX
+
+#ifdef CONFIG_LINUX
#include <sys/syscall.h>
#include <linux/futex.h>
#define qemu_futex(...) syscall(__NR_futex, __VA_ARGS__)
-static inline void qemu_futex_wake(void *f, int n)
+static inline void qemu_futex_wake_all(void *f)
{
- qemu_futex(f, FUTEX_WAKE, n, NULL, NULL, 0);
+ qemu_futex(f, FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
+}
+
+static inline void qemu_futex_wake_single(void *f)
+{
+ qemu_futex(f, FUTEX_WAKE, 1, NULL, NULL, 0);
}
static inline void qemu_futex_wait(void *f, unsigned val)
@@ -37,5 +55,25 @@ static inline void qemu_futex_wait(void *f, unsigned val)
}
}
}
+#elif defined(CONFIG_WIN32)
+#include <synchapi.h>
+
+static inline void qemu_futex_wake_all(void *f)
+{
+ WakeByAddressAll(f);
+}
+
+static inline void qemu_futex_wake_single(void *f)
+{
+ WakeByAddressSingle(f);
+}
+
+static inline void qemu_futex_wait(void *f, unsigned val)
+{
+ WaitOnAddress(f, &val, sizeof(val), INFINITE);
+}
+#else
+#undef HAVE_FUTEX
+#endif
#endif /* QEMU_FUTEX_H */
diff --git a/include/qemu/help-texts.h b/include/qemu/help-texts.h
index 353ab2a..bc8fab9 100644
--- a/include/qemu/help-texts.h
+++ b/include/qemu/help-texts.h
@@ -2,7 +2,7 @@
#define QEMU_HELP_TEXTS_H
/* Copyright string for -version arguments, About dialogs, etc */
-#define QEMU_COPYRIGHT "Copyright (c) 2003-2024 " \
+#define QEMU_COPYRIGHT "Copyright (c) 2003-2025 " \
"Fabrice Bellard and the QEMU Project developers"
/* Bug reporting information for --help arguments, About dialogs, etc */
diff --git a/include/qemu/host-pci-mmio.h b/include/qemu/host-pci-mmio.h
new file mode 100644
index 0000000..a8ed993
--- /dev/null
+++ b/include/qemu/host-pci-mmio.h
@@ -0,0 +1,136 @@
+/*
+ * API for host PCI MMIO accesses (e.g. Linux VFIO BARs)
+ *
+ * Copyright 2025 IBM Corp.
+ * Author(s): Farhan Ali <alifm@linux.ibm.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HOST_PCI_MMIO_H
+#define HOST_PCI_MMIO_H
+
+#include "qemu/bswap.h"
+#include "qemu/s390x_pci_mmio.h"
+
+static inline uint8_t host_pci_ldub_p(const void *ioaddr)
+{
+ uint8_t ret = 0;
+#ifdef __s390x__
+ ret = s390x_pci_mmio_read_8(ioaddr);
+#else
+ ret = ldub_p(ioaddr);
+#endif
+
+ return ret;
+}
+
+static inline uint16_t host_pci_lduw_le_p(const void *ioaddr)
+{
+ uint16_t ret = 0;
+#ifdef __s390x__
+ ret = le16_to_cpu(s390x_pci_mmio_read_16(ioaddr));
+#else
+ ret = lduw_le_p(ioaddr);
+#endif
+
+ return ret;
+}
+
+static inline uint32_t host_pci_ldl_le_p(const void *ioaddr)
+{
+ uint32_t ret = 0;
+#ifdef __s390x__
+ ret = le32_to_cpu(s390x_pci_mmio_read_32(ioaddr));
+#else
+ ret = ldl_le_p(ioaddr);
+#endif
+
+ return ret;
+}
+
+static inline uint64_t host_pci_ldq_le_p(const void *ioaddr)
+{
+ uint64_t ret = 0;
+#ifdef __s390x__
+ ret = le64_to_cpu(s390x_pci_mmio_read_64(ioaddr));
+#else
+ ret = ldq_le_p(ioaddr);
+#endif
+
+ return ret;
+}
+
+static inline void host_pci_stb_p(void *ioaddr, uint8_t val)
+{
+#ifdef __s390x__
+ s390x_pci_mmio_write_8(ioaddr, val);
+#else
+ stb_p(ioaddr, val);
+#endif
+}
+
+static inline void host_pci_stw_le_p(void *ioaddr, uint16_t val)
+{
+#ifdef __s390x__
+ s390x_pci_mmio_write_16(ioaddr, cpu_to_le16(val));
+#else
+ stw_le_p(ioaddr, val);
+#endif
+}
+
+static inline void host_pci_stl_le_p(void *ioaddr, uint32_t val)
+{
+#ifdef __s390x__
+ s390x_pci_mmio_write_32(ioaddr, cpu_to_le32(val));
+#else
+ stl_le_p(ioaddr, val);
+#endif
+}
+
+static inline void host_pci_stq_le_p(void *ioaddr, uint64_t val)
+{
+#ifdef __s390x__
+ s390x_pci_mmio_write_64(ioaddr, cpu_to_le64(val));
+#else
+ stq_le_p(ioaddr, val);
+#endif
+}
+
+static inline uint64_t host_pci_ldn_le_p(const void *ioaddr, int sz)
+{
+ switch (sz) {
+ case 1:
+ return host_pci_ldub_p(ioaddr);
+ case 2:
+ return host_pci_lduw_le_p(ioaddr);
+ case 4:
+ return host_pci_ldl_le_p(ioaddr);
+ case 8:
+ return host_pci_ldq_le_p(ioaddr);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline void host_pci_stn_le_p(void *ioaddr, int sz, uint64_t v)
+{
+ switch (sz) {
+ case 1:
+ host_pci_stb_p(ioaddr, v);
+ break;
+ case 2:
+ host_pci_stw_le_p(ioaddr, v);
+ break;
+ case 4:
+ host_pci_stl_le_p(ioaddr, v);
+ break;
+ case 8:
+ host_pci_stq_le_p(ioaddr, v);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+#endif
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
index ead97d3..4d28fa2 100644
--- a/include/qemu/host-utils.h
+++ b/include/qemu/host-utils.h
@@ -313,6 +313,15 @@ static inline int ctpop8(uint8_t val)
return __builtin_popcount(val);
}
+/*
+ * parity8 - return the parity (1 = odd) of an 8-bit value.
+ * @val: The value to search
+ */
+static inline int parity8(uint8_t val)
+{
+ return __builtin_parity(val);
+}
+
/**
* ctpop16 - count the population of one bits in a 16-bit value.
* @val: The value to search
diff --git a/include/qemu/iov.h b/include/qemu/iov.h
index 63a1c01..9535673 100644
--- a/include/qemu/iov.h
+++ b/include/qemu/iov.h
@@ -1,6 +1,7 @@
/*
* Helpers for using (partial) iovecs.
*
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
* Copyright (C) 2010 Red Hat, Inc.
*
* Author(s):
@@ -30,7 +31,7 @@ size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt);
* only part of data will be copied, up to the end of the iovec.
* Number of bytes actually copied will be returned, which is
* min(bytes, iov_size(iov)-offset)
- * `Offset' must point to the inside of iovec.
+ * Returns 0 when `offset' points to the outside of iovec.
*/
size_t iov_from_buf_full(const struct iovec *iov, unsigned int iov_cnt,
size_t offset, const void *buf, size_t bytes);
@@ -66,16 +67,43 @@ iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt,
/**
* Set data bytes pointed out by iovec `iov' of size `iov_cnt' elements,
* starting at byte offset `start', to value `fillc', repeating it
- * `bytes' number of times. `Offset' must point to the inside of iovec.
+ * `bytes' number of times.
* If `bytes' is large enough, only last bytes portion of iovec,
* up to the end of it, will be filled with the specified value.
* Function return actual number of bytes processed, which is
* min(size, iov_size(iov) - offset).
+ * Returns 0 when `offset' points to the outside of iovec.
*/
size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt,
size_t offset, int fillc, size_t bytes);
/*
+ * Send/recv data from/to iovec buffers directly, with the provided
+ * socket flags.
+ *
+ * `offset' bytes in the beginning of iovec buffer are skipped and
+ * next `bytes' bytes are used, which must be within data of iovec.
+ *
+ * r = iov_send_recv_with_flags(sockfd, sockflags, iov, iovcnt,
+ * offset, bytes, true);
+ *
+ * is logically equivalent to
+ *
+ * char *buf = malloc(bytes);
+ * iov_to_buf(iov, iovcnt, offset, buf, bytes);
+ * r = send(sockfd, buf, bytes, sockflags);
+ * free(buf);
+ *
+ * For iov_send_recv_with_flags() _whole_ area being sent or received
+ * should be within the iovec, not only beginning of it.
+ */
+ssize_t iov_send_recv_with_flags(int sockfd, int sockflags,
+ const struct iovec *iov,
+ unsigned iov_cnt, size_t offset,
+ size_t bytes,
+ bool do_send);
+
+/*
* Send/recv data from/to iovec buffers directly
*
* `offset' bytes in the beginning of iovec buffer are skipped and
diff --git a/include/qemu/iova-tree.h b/include/qemu/iova-tree.h
index 2a10a70..14e82a2 100644
--- a/include/qemu/iova-tree.h
+++ b/include/qemu/iova-tree.h
@@ -23,7 +23,7 @@
* for the thread safety issue.
*/
-#include "exec/memory.h"
+#include "system/memory.h"
#include "exec/hwaddr.h"
#define IOVA_OK (0)
@@ -41,6 +41,28 @@ typedef struct DMAMap {
typedef gboolean (*iova_tree_iterator)(DMAMap *map);
/**
+ * gpa_tree_new:
+ *
+ * Create a new GPA->IOVA tree.
+ *
+ * Returns: the tree point on success, or NULL otherwise.
+ */
+IOVATree *gpa_tree_new(void);
+
+/**
+ * gpa_tree_insert:
+ *
+ * @tree: The GPA->IOVA tree we're inserting the mapping to
+ * @map: The GPA->IOVA mapping to insert
+ *
+ * Inserts a GPA range to the GPA->IOVA tree. If there are overlapped
+ * ranges, IOVA_ERR_OVERLAP will be returned.
+ *
+ * Return: 0 if successful, < 0 otherwise.
+ */
+int gpa_tree_insert(IOVATree *tree, const DMAMap *map);
+
+/**
* iova_tree_new:
*
* Create a new iova tree.
@@ -112,31 +134,6 @@ const DMAMap *iova_tree_find(const IOVATree *tree, const DMAMap *map);
const DMAMap *iova_tree_find_iova(const IOVATree *tree, const DMAMap *map);
/**
- * iova_tree_find_address:
- *
- * @tree: the iova tree to search from
- * @iova: the iova address to find
- *
- * Similar to iova_tree_find(), but it tries to find mapping with
- * range iova=iova & size=0.
- *
- * Return: same as iova_tree_find().
- */
-const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova);
-
-/**
- * iova_tree_foreach:
- *
- * @tree: the iova tree to iterate on
- * @iterator: the iterator for the mappings, return true to stop
- *
- * Iterate over the iova tree.
- *
- * Return: 1 if found any overlap, 0 if not, <0 if error.
- */
-void iova_tree_foreach(IOVATree *tree, iova_tree_iterator iterator);
-
-/**
* iova_tree_alloc_map:
*
* @tree: the iova tree to allocate from
diff --git a/include/qemu/job.h b/include/qemu/job.h
index 2b873f2..a5a0415 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -545,6 +545,9 @@ bool job_is_ready(Job *job);
/* Same as job_is_ready(), but called with job lock held. */
bool job_is_ready_locked(Job *job);
+/** Returns whether the job is paused. Called with job_mutex *not* held. */
+bool job_is_paused(Job *job);
+
/**
* Request @job to pause at the next pause point. Must be paired with
* job_resume(). If the job is supposed to be resumed by user action, call
diff --git a/include/qemu/lockcnt.h b/include/qemu/lockcnt.h
new file mode 100644
index 0000000..5a2800e
--- /dev/null
+++ b/include/qemu/lockcnt.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * QemuLockCnt implementation
+ *
+ * Copyright Red Hat, Inc. 2017
+ *
+ * Author:
+ * Paolo Bonzini <pbonzini@redhat.com>
+ *
+ */
+
+#ifndef QEMU_LOCKCNT_H
+#define QEMU_LOCKCNT_H
+
+#include "qemu/thread.h"
+
+typedef struct QemuLockCnt QemuLockCnt;
+
+struct QemuLockCnt {
+#ifndef HAVE_FUTEX
+ QemuMutex mutex;
+#endif
+ unsigned count;
+};
+
+/**
+ * qemu_lockcnt_init: initialize a QemuLockcnt
+ * @lockcnt: the lockcnt to initialize
+ *
+ * Initialize lockcnt's counter to zero and prepare its mutex
+ * for usage.
+ */
+void qemu_lockcnt_init(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_destroy: destroy a QemuLockcnt
+ * @lockcnt: the lockcnt to destruct
+ *
+ * Destroy lockcnt's mutex.
+ */
+void qemu_lockcnt_destroy(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_inc: increment a QemuLockCnt's counter
+ * @lockcnt: the lockcnt to operate on
+ *
+ * If the lockcnt's count is zero, wait for critical sections
+ * to finish and increment lockcnt's count to 1. If the count
+ * is not zero, just increment it.
+ *
+ * Because this function can wait on the mutex, it must not be
+ * called while the lockcnt's mutex is held by the current thread.
+ * For the same reason, qemu_lockcnt_inc can also contribute to
+ * AB-BA deadlocks. This is a sample deadlock scenario::
+ *
+ * thread 1 thread 2
+ * -------------------------------------------------------
+ * qemu_lockcnt_lock(&lc1);
+ * qemu_lockcnt_lock(&lc2);
+ * qemu_lockcnt_inc(&lc2);
+ * qemu_lockcnt_inc(&lc1);
+ */
+void qemu_lockcnt_inc(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_dec: decrement a QemuLockCnt's counter
+ * @lockcnt: the lockcnt to operate on
+ */
+void qemu_lockcnt_dec(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and
+ * possibly lock it.
+ * @lockcnt: the lockcnt to operate on
+ *
+ * Decrement lockcnt's count. If the new count is zero, lock
+ * the mutex and return true. Otherwise, return false.
+ */
+bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and
+ * lock it.
+ * @lockcnt: the lockcnt to operate on
+ *
+ * If the count is 1, decrement the count to zero, lock
+ * the mutex and return true. Otherwise, return false.
+ */
+bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_lock: lock a QemuLockCnt's mutex.
+ * @lockcnt: the lockcnt to operate on
+ *
+ * Remember that concurrent visits are not blocked unless the count is
+ * also zero. You can use qemu_lockcnt_count to check for this inside a
+ * critical section.
+ */
+void qemu_lockcnt_lock(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_unlock: release a QemuLockCnt's mutex.
+ * @lockcnt: the lockcnt to operate on.
+ */
+void qemu_lockcnt_unlock(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt.
+ * @lockcnt: the lockcnt to operate on.
+ *
+ * This is the same as
+ *
+ * qemu_lockcnt_unlock(lockcnt);
+ * qemu_lockcnt_inc(lockcnt);
+ *
+ * but more efficient.
+ */
+void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_count: query a LockCnt's count.
+ * @lockcnt: the lockcnt to query.
+ *
+ * Note that the count can change at any time. Still, while the
+ * lockcnt is locked, one can usefully check whether the count
+ * is non-zero.
+ */
+unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt);
+
+#endif
diff --git a/include/qemu/log.h b/include/qemu/log.h
index e10e24c..60da703 100644
--- a/include/qemu/log.h
+++ b/include/qemu/log.h
@@ -37,6 +37,7 @@ bool qemu_log_separate(void);
#define LOG_PER_THREAD (1 << 20)
#define CPU_LOG_TB_VPU (1 << 21)
#define LOG_TB_OP_PLUGIN (1 << 22)
+#define LOG_INVALID_MEM (1 << 23)
/* Lock/unlock output. */
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
index 5764db1..4e2436b 100644
--- a/include/qemu/main-loop.h
+++ b/include/qemu/main-loop.h
@@ -27,7 +27,7 @@
#include "block/aio.h"
#include "qom/object.h"
-#include "sysemu/event-loop-base.h"
+#include "system/event-loop-base.h"
#define SIG_IPI SIGUSR1
@@ -248,6 +248,14 @@ GSource *iohandler_get_g_source(void);
AioContext *iohandler_get_aio_context(void);
/**
+ * rust_bql_mock_lock:
+ *
+ * Called from Rust doctests to make bql_lock() return true.
+ * Do not touch.
+ */
+void rust_bql_mock_lock(void);
+
+/**
* bql_locked: Return lock status of the Big QEMU Lock (BQL)
*
* The Big QEMU Lock (BQL) is the coarsest lock in QEMU, and as such it
@@ -263,6 +271,21 @@ AioContext *iohandler_get_aio_context(void);
bool bql_locked(void);
/**
+ * bql_block: Allow/deny releasing the BQL
+ *
+ * The Big QEMU Lock (BQL) is used to provide interior mutability to
+ * Rust code, but this only works if other threads cannot run while
+ * the Rust code has an active borrow. This is because C code in
+ * other threads could come in and mutate data under the Rust code's
+ * feet.
+ *
+ * @increase: Whether to increase or decrease the blocking counter.
+ * Releasing the BQL while the counter is nonzero triggers
+ * an assertion failure.
+ */
+void bql_block_unlock(bool increase);
+
+/**
* qemu_in_main_thread: return whether it's possible to safely access
* the global state of the block layer.
*
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
index 191916f..96fe51b 100644
--- a/include/qemu/osdep.h
+++ b/include/qemu/osdep.h
@@ -8,7 +8,7 @@
* To avoid getting into possible circular include dependencies, this
* file should not include any other QEMU headers, with the exceptions
* of config-host.h, config-target.h, qemu/compiler.h,
- * sysemu/os-posix.h, sysemu/os-win32.h, glib-compat.h and
+ * system/os-posix.h, system/os-win32.h, system/os-wasm.h, glib-compat.h and
* qemu/typedefs.h, all of which are doing a similar job to this file
* and are under similar constraints.
*
@@ -128,7 +128,7 @@ QEMU_EXTERN_C int daemon(int, int);
#include <sys/stat.h>
#include <sys/time.h>
#include <assert.h>
-/* setjmp must be declared before sysemu/os-win32.h
+/* setjmp must be declared before system/os-win32.h
* because it is redefined there. */
#include <setjmp.h>
#include <signal.h>
@@ -161,11 +161,15 @@ QEMU_EXTERN_C int daemon(int, int);
#include "glib-compat.h"
#ifdef _WIN32
-#include "sysemu/os-win32.h"
+#include "system/os-win32.h"
#endif
-#ifdef CONFIG_POSIX
-#include "sysemu/os-posix.h"
+#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN)
+#include "system/os-posix.h"
+#endif
+
+#if defined(EMSCRIPTEN)
+#include "system/os-wasm.h"
#endif
#ifdef __cplusplus
@@ -297,6 +301,10 @@ void QEMU_ERROR("code path is reachable")
#error building with G_DISABLE_ASSERT is not supported
#endif
+#ifndef OFF_MAX
+#define OFF_MAX (sizeof (off_t) == 8 ? INT64_MAX : INT32_MAX)
+#endif
+
#ifndef O_LARGEFILE
#define O_LARGEFILE 0
#endif
@@ -399,7 +407,7 @@ void QEMU_ERROR("code path is reachable")
})
#undef MIN
#define MIN(a, b) \
- MIN_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
+ MIN_INTERNAL((a), (b), MAKE_IDENTIFIER(_a), MAKE_IDENTIFIER(_b))
#define MAX_INTERNAL(a, b, _a, _b) \
({ \
@@ -408,7 +416,7 @@ void QEMU_ERROR("code path is reachable")
})
#undef MAX
#define MAX(a, b) \
- MAX_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
+ MAX_INTERNAL((a), (b), MAKE_IDENTIFIER(_a), MAKE_IDENTIFIER(_b))
#ifdef __COVERITY__
# define MIN_CONST(a, b) ((a) < (b) ? (a) : (b))
@@ -440,7 +448,7 @@ void QEMU_ERROR("code path is reachable")
_a == 0 ? _b : (_b == 0 || _b > _a) ? _a : _b; \
})
#define MIN_NON_ZERO(a, b) \
- MIN_NON_ZERO_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
+ MIN_NON_ZERO_INTERNAL((a), (b), MAKE_IDENTIFIER(_a), MAKE_IDENTIFIER(_b))
/*
* Round number down to multiple. Safe when m is not a power of 2 (see
@@ -505,6 +513,7 @@ int qemu_daemon(int nochdir, int noclose);
void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared,
bool noreserve);
void qemu_anon_ram_free(void *ptr, size_t size);
+int qemu_shm_alloc(size_t size, Error **errp);
#ifdef _WIN32
#define HAVE_CHARDEV_SERIAL 1
@@ -626,6 +635,15 @@ bool qemu_write_pidfile(const char *pidfile, Error **errp);
int qemu_get_thread_id(void);
+/**
+ * qemu_kill_thread:
+ * @tid: thread id.
+ * @sig: host signal.
+ *
+ * Send @sig to one of QEMU's own threads with identifier @tid.
+ */
+int qemu_kill_thread(int tid, int sig);
+
#ifndef CONFIG_IOVEC
struct iovec {
void *iov_base;
@@ -758,6 +776,17 @@ static inline void qemu_reset_optind(void)
int qemu_fdatasync(int fd);
/**
+ * qemu_close_all_open_fd:
+ *
+ * Close all open file descriptors except the ones supplied in the @skip array
+ *
+ * @skip: ordered array of distinct file descriptors that should not be closed
+ * if any, or NULL.
+ * @nskip: number of entries in the @skip array or 0 if @skip is NULL.
+ */
+void qemu_close_all_open_fd(const int *skip, unsigned int nskip);
+
+/**
* Sync changes made to the memory mapped file back to the backing
* storage. For POSIX compliant systems this will fallback
* to regular msync call. Otherwise it will trigger whole file sync
@@ -786,8 +815,7 @@ size_t qemu_get_host_physmem(void);
* Toggle write/execute on the pages marked MAP_JIT
* for the current thread.
*/
-#if defined(MAC_OS_VERSION_11_0) && \
- MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0
+#ifdef __APPLE__
static inline void qemu_thread_jit_execute(void)
{
pthread_jit_write_protect_np(true);
diff --git a/include/qemu/plugin-memory.h b/include/qemu/plugin-memory.h
index 71c1123..6065ec7 100644
--- a/include/qemu/plugin-memory.h
+++ b/include/qemu/plugin-memory.h
@@ -9,7 +9,6 @@
#ifndef PLUGIN_MEMORY_H
#define PLUGIN_MEMORY_H
-#include "exec/cpu-defs.h"
#include "exec/hwaddr.h"
struct qemu_plugin_hwaddr {
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
index af5f9db..9726a9e 100644
--- a/include/qemu/plugin.h
+++ b/include/qemu/plugin.h
@@ -167,6 +167,8 @@ qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1,
void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret);
void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
+ uint64_t value_low,
+ uint64_t value_high,
MemOpIdx oi, enum qemu_plugin_mem_rw rw);
void qemu_plugin_flush_cb(void);
@@ -251,6 +253,8 @@ void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret)
{ }
static inline void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
+ uint64_t value_low,
+ uint64_t value_high,
MemOpIdx oi,
enum qemu_plugin_mem_rw rw)
{ }
diff --git a/include/qemu/pmem.h b/include/qemu/pmem.h
index d2d7ad0..e12a67b 100644
--- a/include/qemu/pmem.h
+++ b/include/qemu/pmem.h
@@ -22,7 +22,6 @@ pmem_memcpy_persist(void *pmemdest, const void *src, size_t len)
/* If 'pmem' option is 'on', we should always have libpmem support,
or qemu will report a error and exit, never come here. */
g_assert_not_reached();
- return NULL;
}
static inline void
diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h
index c71c705..3a850aa 100644
--- a/include/qemu/qemu-plugin.h
+++ b/include/qemu/qemu-plugin.h
@@ -57,11 +57,19 @@ typedef uint64_t qemu_plugin_id_t;
* - Remove qemu_plugin_register_vcpu_{tb, insn, mem}_exec_inline.
* Those functions are replaced by *_per_vcpu variants, which guarantee
* thread-safety for operations.
+ *
+ * version 3:
+ * - modified arguments and return value of qemu_plugin_insn_data to copy
+ * the data into a user-provided buffer instead of returning a pointer
+ * to the data.
+ *
+ * version 4:
+ * - added qemu_plugin_read_memory_vaddr
*/
extern QEMU_PLUGIN_EXPORT int qemu_plugin_version;
-#define QEMU_PLUGIN_VERSION 3
+#define QEMU_PLUGIN_VERSION 4
/**
* struct qemu_info_t - system information for plugins
@@ -262,6 +270,29 @@ enum qemu_plugin_mem_rw {
QEMU_PLUGIN_MEM_RW,
};
+enum qemu_plugin_mem_value_type {
+ QEMU_PLUGIN_MEM_VALUE_U8,
+ QEMU_PLUGIN_MEM_VALUE_U16,
+ QEMU_PLUGIN_MEM_VALUE_U32,
+ QEMU_PLUGIN_MEM_VALUE_U64,
+ QEMU_PLUGIN_MEM_VALUE_U128,
+};
+
+/* typedef qemu_plugin_mem_value - value accessed during a load/store */
+typedef struct {
+ enum qemu_plugin_mem_value_type type;
+ union {
+ uint8_t u8;
+ uint16_t u16;
+ uint32_t u32;
+ uint64_t u64;
+ struct {
+ uint64_t low;
+ uint64_t high;
+ } u128;
+ } data;
+} qemu_plugin_mem_value;
+
/**
* enum qemu_plugin_cond - condition to enable callback
*
@@ -552,6 +583,15 @@ QEMU_PLUGIN_API
bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info);
/**
+ * qemu_plugin_mem_get_value() - return last value loaded/stored
+ * @info: opaque memory transaction handle
+ *
+ * Returns: memory value
+ */
+QEMU_PLUGIN_API
+qemu_plugin_mem_value qemu_plugin_mem_get_value(qemu_plugin_meminfo_t info);
+
+/**
* qemu_plugin_get_hwaddr() - return handle for memory operation
* @info: opaque memory info structure
* @vaddr: the virtual address of the memory operation
@@ -763,6 +803,7 @@ void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id,
qemu_plugin_udata_cb_t cb, void *userdata);
/* returns how many vcpus were started at this point */
+QEMU_PLUGIN_API
int qemu_plugin_num_vcpus(void);
/**
@@ -853,6 +894,28 @@ QEMU_PLUGIN_API
GArray *qemu_plugin_get_registers(void);
/**
+ * qemu_plugin_read_memory_vaddr() - read from memory using a virtual address
+ *
+ * @addr: A virtual address to read from
+ * @data: A byte array to store data into
+ * @len: The number of bytes to read, starting from @addr
+ *
+ * @len bytes of data is read starting at @addr and stored into @data. If @data
+ * is not large enough to hold @len bytes, it will be expanded to the necessary
+ * size, reallocating if necessary. @len must be greater than 0.
+ *
+ * This function does not ensure writes are flushed prior to reading, so
+ * callers should take care when calling this function in plugin callbacks to
+ * avoid attempting to read data which may not yet be written and should use
+ * the memory callback API instead.
+ *
+ * Returns true on success and false on failure.
+ */
+QEMU_PLUGIN_API
+bool qemu_plugin_read_memory_vaddr(uint64_t addr,
+ GByteArray *data, size_t len);
+
+/**
* qemu_plugin_read_register() - read register for current vCPU
*
* @handle: a @qemu_plugin_reg_handle handle
diff --git a/include/qemu/range.h b/include/qemu/range.h
index 4ce694a..d446ad8 100644
--- a/include/qemu/range.h
+++ b/include/qemu/range.h
@@ -210,8 +210,8 @@ static inline int range_covers_byte(uint64_t offset, uint64_t len,
/* Check whether 2 given ranges overlap.
* Undefined if ranges that wrap around 0. */
-static inline int ranges_overlap(uint64_t first1, uint64_t len1,
- uint64_t first2, uint64_t len2)
+static inline bool ranges_overlap(uint64_t first1, uint64_t len1,
+ uint64_t first2, uint64_t len2)
{
uint64_t last1 = range_get_last(first1, len1);
uint64_t last2 = range_get_last(first2, len2);
diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h
index fea058a..020dbe4 100644
--- a/include/qemu/rcu.h
+++ b/include/qemu/rcu.h
@@ -17,8 +17,8 @@
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ * License along with this library; if not, see
+ * <https://www.gnu.org/licenses/>.
*
* IBM's contributions to this file may be relicensed under LGPLv2 or later.
*/
diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h
index 4e6298d..bfd5900 100644
--- a/include/qemu/rcu_queue.h
+++ b/include/qemu/rcu_queue.h
@@ -17,8 +17,8 @@
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ * License along with this library; if not, see
+ * <https://www.gnu.org/licenses/>.
*
* Copyright (c) 2013 Mike D. Day, IBM Corporation.
*
diff --git a/include/qemu/reserved-region.h b/include/qemu/reserved-region.h
index 8e6f0a9..9026cf0 100644
--- a/include/qemu/reserved-region.h
+++ b/include/qemu/reserved-region.h
@@ -20,7 +20,7 @@
#ifndef QEMU_RESERVED_REGION_H
#define QEMU_RESERVED_REGION_H
-#include "exec/memory.h"
+#include "system/memory.h"
/*
* Insert a new region into a sorted list of reserved regions. In case
diff --git a/include/qemu/s390x_pci_mmio.h b/include/qemu/s390x_pci_mmio.h
new file mode 100644
index 0000000..c5f63ec
--- /dev/null
+++ b/include/qemu/s390x_pci_mmio.h
@@ -0,0 +1,24 @@
+/*
+ * s390x PCI MMIO definitions
+ *
+ * Copyright 2025 IBM Corp.
+ * Author(s): Farhan Ali <alifm@linux.ibm.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef S390X_PCI_MMIO_H
+#define S390X_PCI_MMIO_H
+
+#ifdef __s390x__
+uint8_t s390x_pci_mmio_read_8(const void *ioaddr);
+uint16_t s390x_pci_mmio_read_16(const void *ioaddr);
+uint32_t s390x_pci_mmio_read_32(const void *ioaddr);
+uint64_t s390x_pci_mmio_read_64(const void *ioaddr);
+
+void s390x_pci_mmio_write_8(void *ioaddr, uint8_t val);
+void s390x_pci_mmio_write_16(void *ioaddr, uint16_t val);
+void s390x_pci_mmio_write_32(void *ioaddr, uint32_t val);
+void s390x_pci_mmio_write_64(void *ioaddr, uint64_t val);
+#endif /* __s390x__ */
+
+#endif /* S390X_PCI_MMIO_H */
diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h
index d935fd8..c562690 100644
--- a/include/qemu/sockets.h
+++ b/include/qemu/sockets.h
@@ -61,7 +61,6 @@ int socket_set_fast_reuse(int fd);
int inet_ai_family_from_address(InetSocketAddress *addr,
Error **errp);
int inet_parse(InetSocketAddress *addr, const char *str, Error **errp);
-int inet_connect(const char *str, Error **errp);
int inet_connect_saddr(InetSocketAddress *saddr, Error **errp);
NetworkAddressFamily inet_netfamily(int family);
@@ -118,21 +117,6 @@ socket_sockaddr_to_address(struct sockaddr_storage *sa,
SocketAddress *socket_local_address(int fd, Error **errp);
/**
- * socket_remote_address:
- * @fd: the socket file handle
- * @errp: pointer to uninitialized error object
- *
- * Get the string representation of the remote socket
- * address. A pointer to the allocated address information
- * struct will be returned, which the caller is required to
- * release with a call qapi_free_SocketAddress() when no
- * longer required.
- *
- * Returns: the socket address struct, or NULL on error
- */
-SocketAddress *socket_remote_address(int fd, Error **errp);
-
-/**
* socket_address_flatten:
* @addr: the socket address to flatten
*
diff --git a/include/qemu/target-info-impl.h b/include/qemu/target-info-impl.h
new file mode 100644
index 0000000..1b51cbc
--- /dev/null
+++ b/include/qemu/target-info-impl.h
@@ -0,0 +1,32 @@
+/*
+ * QEMU TargetInfo structure definition
+ *
+ * Copyright (c) Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef QEMU_TARGET_INFO_IMPL_H
+#define QEMU_TARGET_INFO_IMPL_H
+
+#include "qemu/target-info.h"
+
+typedef struct TargetInfo {
+ /* runtime equivalent of TARGET_NAME definition */
+ const char *target_name;
+ /* runtime equivalent of TARGET_LONG_BITS definition */
+ unsigned long_bits;
+ /* runtime equivalent of CPU_RESOLVING_TYPE definition */
+ const char *cpu_type;
+ /* QOM typename machines for this binary must implement */
+ const char *machine_typename;
+} TargetInfo;
+
+/**
+ * target_info:
+ *
+ * Returns: The TargetInfo structure definition for this target binary.
+ */
+const TargetInfo *target_info(void);
+
+#endif
diff --git a/include/qemu/target-info.h b/include/qemu/target-info.h
new file mode 100644
index 0000000..850a295
--- /dev/null
+++ b/include/qemu/target-info.h
@@ -0,0 +1,41 @@
+/*
+ * QEMU target info API
+ *
+ * Copyright (c) Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef QEMU_TARGET_INFO_H
+#define QEMU_TARGET_INFO_H
+
+/**
+ * target_name:
+ *
+ * Returns: Canonical target name (i.e. "i386").
+ */
+const char *target_name(void);
+
+/**
+ * target_long_bits:
+ *
+ * Returns: number of bits in a long type for this target (i.e. 64).
+ */
+unsigned target_long_bits(void);
+
+/**
+ * target_machine_typename:
+ *
+ * Returns: Name of the QOM interface implemented by machines
+ * usable on this target binary.
+ */
+const char *target_machine_typename(void);
+
+/**
+ * target_cpu_type:
+ *
+ * Returns: target CPU base QOM type name (i.e. TYPE_X86_CPU).
+ */
+const char *target_cpu_type(void);
+
+#endif
diff --git a/include/qemu/thread-posix.h b/include/qemu/thread-posix.h
index 5f2f3d1..758808b 100644
--- a/include/qemu/thread-posix.h
+++ b/include/qemu/thread-posix.h
@@ -32,15 +32,6 @@ struct QemuSemaphore {
unsigned int count;
};
-struct QemuEvent {
-#ifndef __linux__
- pthread_mutex_t lock;
- pthread_cond_t cond;
-#endif
- unsigned value;
- bool initialized;
-};
-
struct QemuThread {
pthread_t thread;
};
diff --git a/include/qemu/thread-win32.h b/include/qemu/thread-win32.h
index d95af44..da9e732 100644
--- a/include/qemu/thread-win32.h
+++ b/include/qemu/thread-win32.h
@@ -28,12 +28,6 @@ struct QemuSemaphore {
bool initialized;
};
-struct QemuEvent {
- int value;
- HANDLE event;
- bool initialized;
-};
-
typedef struct QemuThreadData QemuThreadData;
struct QemuThread {
QemuThreadData *data;
diff --git a/include/qemu/thread.h b/include/qemu/thread.h
index fb74e21..f0302ed 100644
--- a/include/qemu/thread.h
+++ b/include/qemu/thread.h
@@ -3,14 +3,32 @@
#include "qemu/processor.h"
#include "qemu/atomic.h"
-#include "qemu/clang-tsa.h"
+#include "qemu/futex.h"
typedef struct QemuCond QemuCond;
typedef struct QemuSemaphore QemuSemaphore;
-typedef struct QemuEvent QemuEvent;
typedef struct QemuLockCnt QemuLockCnt;
typedef struct QemuThread QemuThread;
+/*
+ * QemuEvent
+ * =========
+ *
+ * QemuEvent is an implementation of Win32 manual-reset event object.
+ * For details, refer to:
+ * https://learn.microsoft.com/en-us/windows/win32/sync/using-event-objects
+ *
+ * QemuEvent is more lightweight than QemuSemaphore when HAVE_FUTEX is defined.
+ */
+typedef struct QemuEvent {
+#ifndef HAVE_FUTEX
+ pthread_mutex_t lock;
+ pthread_cond_t cond;
+#endif
+ unsigned value;
+ bool initialized;
+} QemuEvent;
+
#ifdef _WIN32
#include "qemu/thread-win32.h"
#else
@@ -293,115 +311,4 @@ static inline void qemu_spin_unlock(QemuSpin *spin)
#endif
}
-struct QemuLockCnt {
-#ifndef CONFIG_LINUX
- QemuMutex mutex;
-#endif
- unsigned count;
-};
-
-/**
- * qemu_lockcnt_init: initialize a QemuLockcnt
- * @lockcnt: the lockcnt to initialize
- *
- * Initialize lockcnt's counter to zero and prepare its mutex
- * for usage.
- */
-void qemu_lockcnt_init(QemuLockCnt *lockcnt);
-
-/**
- * qemu_lockcnt_destroy: destroy a QemuLockcnt
- * @lockcnt: the lockcnt to destruct
- *
- * Destroy lockcnt's mutex.
- */
-void qemu_lockcnt_destroy(QemuLockCnt *lockcnt);
-
-/**
- * qemu_lockcnt_inc: increment a QemuLockCnt's counter
- * @lockcnt: the lockcnt to operate on
- *
- * If the lockcnt's count is zero, wait for critical sections
- * to finish and increment lockcnt's count to 1. If the count
- * is not zero, just increment it.
- *
- * Because this function can wait on the mutex, it must not be
- * called while the lockcnt's mutex is held by the current thread.
- * For the same reason, qemu_lockcnt_inc can also contribute to
- * AB-BA deadlocks. This is a sample deadlock scenario:
- *
- * thread 1 thread 2
- * -------------------------------------------------------
- * qemu_lockcnt_lock(&lc1);
- * qemu_lockcnt_lock(&lc2);
- * qemu_lockcnt_inc(&lc2);
- * qemu_lockcnt_inc(&lc1);
- */
-void qemu_lockcnt_inc(QemuLockCnt *lockcnt);
-
-/**
- * qemu_lockcnt_dec: decrement a QemuLockCnt's counter
- * @lockcnt: the lockcnt to operate on
- */
-void qemu_lockcnt_dec(QemuLockCnt *lockcnt);
-
-/**
- * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and
- * possibly lock it.
- * @lockcnt: the lockcnt to operate on
- *
- * Decrement lockcnt's count. If the new count is zero, lock
- * the mutex and return true. Otherwise, return false.
- */
-bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt);
-
-/**
- * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and
- * lock it.
- * @lockcnt: the lockcnt to operate on
- *
- * If the count is 1, decrement the count to zero, lock
- * the mutex and return true. Otherwise, return false.
- */
-bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt);
-
-/**
- * qemu_lockcnt_lock: lock a QemuLockCnt's mutex.
- * @lockcnt: the lockcnt to operate on
- *
- * Remember that concurrent visits are not blocked unless the count is
- * also zero. You can use qemu_lockcnt_count to check for this inside a
- * critical section.
- */
-void qemu_lockcnt_lock(QemuLockCnt *lockcnt);
-
-/**
- * qemu_lockcnt_unlock: release a QemuLockCnt's mutex.
- * @lockcnt: the lockcnt to operate on.
- */
-void qemu_lockcnt_unlock(QemuLockCnt *lockcnt);
-
-/**
- * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt.
- * @lockcnt: the lockcnt to operate on.
- *
- * This is the same as
- *
- * qemu_lockcnt_unlock(lockcnt);
- * qemu_lockcnt_inc(lockcnt);
- *
- * but more efficient.
- */
-void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt);
-
-/**
- * qemu_lockcnt_count: query a LockCnt's count.
- * @lockcnt: the lockcnt to query.
- *
- * Note that the count can change at any time. Still, while the
- * lockcnt is locked, one can usefully check whether the count
- * is non-zero.
- */
-unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt);
-
#endif
diff --git a/include/qemu/timed-average.h b/include/qemu/timed-average.h
index 08245e7..dfd8d65 100644
--- a/include/qemu/timed-average.h
+++ b/include/qemu/timed-average.h
@@ -8,10 +8,12 @@
* BenoƮt Canet <benoit.canet@nodalink.com>
* Alberto Garcia <berto@igalia.com>
*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
- * (at your option) version 3 or any later version.
+ * (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
index fa56ec9..abd2204 100644
--- a/include/qemu/timer.h
+++ b/include/qemu/timer.h
@@ -191,16 +191,6 @@ bool qemu_clock_use_for_deadline(QEMUClockType type);
int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask);
/**
- * qemu_clock_get_main_loop_timerlist:
- * @type: the clock type
- *
- * Return the default timer list associated with a clock.
- *
- * Returns: the default timer list
- */
-QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type);
-
-/**
* qemu_clock_nofify:
* @type: the clock type
*
@@ -327,17 +317,6 @@ bool timerlist_expired(QEMUTimerList *timer_list);
int64_t timerlist_deadline_ns(QEMUTimerList *timer_list);
/**
- * timerlist_get_clock:
- * @timer_list: the timer list to operate on
- *
- * Determine the clock type associated with a timer list.
- *
- * Returns: the clock type associated with the
- * timer list.
- */
-QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list);
-
-/**
* timerlist_run_timers:
* @timer_list: the timer list to use
*
@@ -528,6 +507,8 @@ static inline void timer_init_ms(QEMUTimer *ts, QEMUClockType type,
* with an AioContext---each of them runs its timer callbacks in its own
* AioContext thread.
*
+ * The timer returned must be freed using timer_free().
+ *
* Returns: a pointer to the timer
*/
static inline QEMUTimer *timer_new_full(QEMUTimerListGroup *timer_list_group,
@@ -551,6 +532,8 @@ static inline QEMUTimer *timer_new_full(QEMUTimerListGroup *timer_list_group,
* and associate it with the default timer list for the clock type @type.
* See timer_new_full for details.
*
+ * The timer returned must be freed using timer_free().
+ *
* Returns: a pointer to the timer
*/
static inline QEMUTimer *timer_new(QEMUClockType type, int scale,
@@ -569,6 +552,8 @@ static inline QEMUTimer *timer_new(QEMUClockType type, int scale,
* associated with the clock.
* See timer_new_full for details.
*
+ * The timer returned must be freed using timer_free().
+ *
* Returns: a pointer to the newly created timer
*/
static inline QEMUTimer *timer_new_ns(QEMUClockType type, QEMUTimerCB *cb,
@@ -587,6 +572,8 @@ static inline QEMUTimer *timer_new_ns(QEMUClockType type, QEMUTimerCB *cb,
* associated with the clock.
* See timer_new_full for details.
*
+ * The timer returned must be freed using timer_free().
+ *
* Returns: a pointer to the newly created timer
*/
static inline QEMUTimer *timer_new_us(QEMUClockType type, QEMUTimerCB *cb,
@@ -605,6 +592,8 @@ static inline QEMUTimer *timer_new_us(QEMUClockType type, QEMUTimerCB *cb,
* associated with the clock.
* See timer_new_full for details.
*
+ * The timer returned must be freed using timer_free().
+ *
* Returns: a pointer to the newly created timer
*/
static inline QEMUTimer *timer_new_ms(QEMUClockType type, QEMUTimerCB *cb,
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index 9d222dc..507f081 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -22,6 +22,7 @@
* Please keep this list in case-insensitive alphabetical order.
*/
typedef struct AccelCPUState AccelCPUState;
+typedef struct AccelOpsClass AccelOpsClass;
typedef struct AccelState AccelState;
typedef struct AddressSpace AddressSpace;
typedef struct AioContext AioContext;
@@ -40,6 +41,7 @@ typedef struct ConfidentialGuestSupport ConfidentialGuestSupport;
typedef struct CPUArchState CPUArchState;
typedef struct CPUPluginState CPUPluginState;
typedef struct CPUState CPUState;
+typedef struct CPUTLBEntryFull CPUTLBEntryFull;
typedef struct DeviceState DeviceState;
typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot;
typedef struct DisasContextBase DisasContextBase;
@@ -107,6 +109,7 @@ typedef struct QString QString;
typedef struct RAMBlock RAMBlock;
typedef struct Range Range;
typedef struct ReservedRegion ReservedRegion;
+typedef struct SaveLiveCompletePrecopyThreadData SaveLiveCompletePrecopyThreadData;
typedef struct SHPCDevice SHPCDevice;
typedef struct SSIBus SSIBus;
typedef struct TCGCPUOps TCGCPUOps;
@@ -130,5 +133,9 @@ typedef struct IRQState *qemu_irq;
* Function types
*/
typedef void (*qemu_irq_handler)(void *opaque, int n, int level);
+typedef bool (*MigrationLoadThread)(void *opaque, bool *should_quit,
+ Error **errp);
+typedef bool (*SaveLiveCompletePrecopyThreadHandler)(SaveLiveCompletePrecopyThreadData *d,
+ Error **errp);
#endif /* QEMU_TYPEDEFS_H */
diff --git a/include/qemu/userfaultfd.h b/include/qemu/userfaultfd.h
index 18a4314..a197930 100644
--- a/include/qemu/userfaultfd.h
+++ b/include/qemu/userfaultfd.h
@@ -39,7 +39,6 @@ int uffd_copy_page(int uffd_fd, void *dst_addr, void *src_addr,
int uffd_zero_page(int uffd_fd, void *addr, uint64_t length, bool dont_wake);
int uffd_wakeup(int uffd_fd, void *addr, uint64_t length);
int uffd_read_events(int uffd_fd, struct uffd_msg *msgs, int count);
-bool uffd_poll_events(int uffd_fd, int tmo);
#endif /* CONFIG_LINUX */
diff --git a/include/qapi/qmp/json-parser.h b/include/qobject/json-parser.h
index 7345a9b..7345a9b 100644
--- a/include/qapi/qmp/json-parser.h
+++ b/include/qobject/json-parser.h
diff --git a/include/qapi/qmp/json-writer.h b/include/qobject/json-writer.h
index b70ba64..b70ba64 100644
--- a/include/qapi/qmp/json-writer.h
+++ b/include/qobject/json-writer.h
diff --git a/include/qobject/qbool.h b/include/qobject/qbool.h
new file mode 100644
index 0000000..b348e17
--- /dev/null
+++ b/include/qobject/qbool.h
@@ -0,0 +1,31 @@
+/*
+ * QBool Module
+ *
+ * Copyright IBM, Corp. 2009
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QBOOL_H
+#define QBOOL_H
+
+#include "qobject/qobject.h"
+
+struct QBool {
+ struct QObjectBase_ base;
+ bool value;
+};
+
+void qbool_unref(QBool *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QBool, qbool_unref)
+
+QBool *qbool_from_bool(bool value);
+bool qbool_get_bool(const QBool *qb);
+
+#endif /* QBOOL_H */
diff --git a/include/qobject/qdict.h b/include/qobject/qdict.h
new file mode 100644
index 0000000..903e6e5
--- /dev/null
+++ b/include/qobject/qdict.h
@@ -0,0 +1,71 @@
+/*
+ * QDict Module
+ *
+ * Copyright (C) 2009 Red Hat Inc.
+ *
+ * Authors:
+ * Luiz Capitulino <lcapitulino@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef QDICT_H
+#define QDICT_H
+
+#include "qobject/qobject.h"
+#include "qemu/queue.h"
+
+#define QDICT_BUCKET_MAX 512
+
+typedef struct QDictEntry {
+ char *key;
+ QObject *value;
+ QLIST_ENTRY(QDictEntry) next;
+} QDictEntry;
+
+struct QDict {
+ struct QObjectBase_ base;
+ size_t size;
+ QLIST_HEAD(,QDictEntry) table[QDICT_BUCKET_MAX];
+};
+
+void qdict_unref(QDict *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QDict, qdict_unref)
+
+/* Object API */
+QDict *qdict_new(void);
+const char *qdict_entry_key(const QDictEntry *entry);
+QObject *qdict_entry_value(const QDictEntry *entry);
+size_t qdict_size(const QDict *qdict);
+void qdict_put_obj(QDict *qdict, const char *key, QObject *value);
+void qdict_del(QDict *qdict, const char *key);
+int qdict_haskey(const QDict *qdict, const char *key);
+QObject *qdict_get(const QDict *qdict, const char *key);
+const QDictEntry *qdict_first(const QDict *qdict);
+const QDictEntry *qdict_next(const QDict *qdict, const QDictEntry *entry);
+
+/* Helper to qdict_put_obj(), accepts any object */
+#define qdict_put(qdict, key, obj) \
+ qdict_put_obj(qdict, key, QOBJECT(obj))
+
+void qdict_put_bool(QDict *qdict, const char *key, bool value);
+void qdict_put_int(QDict *qdict, const char *key, int64_t value);
+void qdict_put_null(QDict *qdict, const char *key);
+void qdict_put_str(QDict *qdict, const char *key, const char *value);
+
+double qdict_get_double(const QDict *qdict, const char *key);
+int64_t qdict_get_int(const QDict *qdict, const char *key);
+bool qdict_get_bool(const QDict *qdict, const char *key);
+QList *qdict_get_qlist(const QDict *qdict, const char *key);
+QDict *qdict_get_qdict(const QDict *qdict, const char *key);
+const char *qdict_get_str(const QDict *qdict, const char *key);
+int64_t qdict_get_try_int(const QDict *qdict, const char *key,
+ int64_t def_value);
+bool qdict_get_try_bool(const QDict *qdict, const char *key, bool def_value);
+const char *qdict_get_try_str(const QDict *qdict, const char *key);
+
+QDict *qdict_clone_shallow(const QDict *src);
+
+#endif /* QDICT_H */
diff --git a/include/qapi/qmp/qjson.h b/include/qobject/qjson.h
index 7bd8d2d..7bd8d2d 100644
--- a/include/qapi/qmp/qjson.h
+++ b/include/qobject/qjson.h
diff --git a/include/qobject/qlist.h b/include/qobject/qlist.h
new file mode 100644
index 0000000..0377bf8
--- /dev/null
+++ b/include/qobject/qlist.h
@@ -0,0 +1,69 @@
+/*
+ * QList Module
+ *
+ * Copyright (C) 2009 Red Hat Inc.
+ *
+ * Authors:
+ * Luiz Capitulino <lcapitulino@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef QLIST_H
+#define QLIST_H
+
+#include "qobject/qobject.h"
+#include "qemu/queue.h"
+
+typedef struct QListEntry {
+ QObject *value;
+ QTAILQ_ENTRY(QListEntry) next;
+} QListEntry;
+
+struct QList {
+ struct QObjectBase_ base;
+ QTAILQ_HEAD(,QListEntry) head;
+};
+
+void qlist_unref(QList *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QList, qlist_unref)
+
+#define qlist_append(qlist, obj) \
+ qlist_append_obj(qlist, QOBJECT(obj))
+
+void qlist_append_bool(QList *qlist, bool value);
+void qlist_append_int(QList *qlist, int64_t value);
+void qlist_append_null(QList *qlist);
+void qlist_append_str(QList *qlist, const char *value);
+
+#define QLIST_FOREACH_ENTRY(qlist, var) \
+ for ((var) = QTAILQ_FIRST(&(qlist)->head); \
+ (var); \
+ (var) = QTAILQ_NEXT((var), next))
+
+static inline QObject *qlist_entry_obj(const QListEntry *entry)
+{
+ return entry->value;
+}
+
+QList *qlist_new(void);
+QList *qlist_copy(QList *src);
+void qlist_append_obj(QList *qlist, QObject *obj);
+QObject *qlist_pop(QList *qlist);
+QObject *qlist_peek(QList *qlist);
+int qlist_empty(const QList *qlist);
+size_t qlist_size(const QList *qlist);
+
+static inline const QListEntry *qlist_first(const QList *qlist)
+{
+ return QTAILQ_FIRST(&qlist->head);
+}
+
+static inline const QListEntry *qlist_next(const QListEntry *entry)
+{
+ return QTAILQ_NEXT(entry, next);
+}
+
+#endif /* QLIST_H */
diff --git a/include/qapi/qmp/qlit.h b/include/qobject/qlit.h
index c0676d5..c0676d5 100644
--- a/include/qapi/qmp/qlit.h
+++ b/include/qobject/qlit.h
diff --git a/include/qobject/qnull.h b/include/qobject/qnull.h
new file mode 100644
index 0000000..4423836
--- /dev/null
+++ b/include/qobject/qnull.h
@@ -0,0 +1,33 @@
+/*
+ * QNull
+ *
+ * Copyright (C) 2015 Red Hat, Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1
+ * or later. See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef QNULL_H
+#define QNULL_H
+
+#include "qobject/qobject.h"
+
+struct QNull {
+ struct QObjectBase_ base;
+};
+
+extern QNull qnull_;
+
+static inline QNull *qnull(void)
+{
+ return qobject_ref(&qnull_);
+}
+
+void qnull_unref(QNull *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QNull, qnull_unref)
+
+#endif /* QNULL_H */
diff --git a/include/qobject/qnum.h b/include/qobject/qnum.h
new file mode 100644
index 0000000..1ce24b3
--- /dev/null
+++ b/include/qobject/qnum.h
@@ -0,0 +1,75 @@
+/*
+ * QNum Module
+ *
+ * Copyright (C) 2009 Red Hat Inc.
+ *
+ * Authors:
+ * Luiz Capitulino <lcapitulino@redhat.com>
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Marc-AndrƩ Lureau <marcandre.lureau@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef QNUM_H
+#define QNUM_H
+
+#include "qobject/qobject.h"
+
+typedef enum {
+ QNUM_I64,
+ QNUM_U64,
+ QNUM_DOUBLE
+} QNumKind;
+
+/*
+ * QNum encapsulates how our dialect of JSON fills in the blanks left
+ * by the JSON specification (RFC 8259) regarding numbers.
+ *
+ * Conceptually, we treat number as an abstract type with three
+ * concrete subtypes: floating-point, signed integer, unsigned
+ * integer. QNum implements this as a discriminated union of double,
+ * int64_t, uint64_t.
+ *
+ * The JSON parser picks the subtype as follows. If the number has a
+ * decimal point or an exponent, it is floating-point. Else if it
+ * fits into int64_t, it's signed integer. Else if it fits into
+ * uint64_t, it's unsigned integer. Else it's floating-point.
+ *
+ * Any number can serve as double: qnum_get_double() converts under
+ * the hood.
+ *
+ * An integer can serve as signed / unsigned integer as long as it is
+ * in range: qnum_get_try_int() / qnum_get_try_uint() check range and
+ * convert under the hood.
+ */
+struct QNum {
+ struct QObjectBase_ base;
+ QNumKind kind;
+ union {
+ int64_t i64;
+ uint64_t u64;
+ double dbl;
+ } u;
+};
+
+void qnum_unref(QNum *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QNum, qnum_unref)
+
+QNum *qnum_from_int(int64_t value);
+QNum *qnum_from_uint(uint64_t value);
+QNum *qnum_from_double(double value);
+
+bool qnum_get_try_int(const QNum *qn, int64_t *val);
+int64_t qnum_get_int(const QNum *qn);
+
+bool qnum_get_try_uint(const QNum *qn, uint64_t *val);
+uint64_t qnum_get_uint(const QNum *qn);
+
+double qnum_get_double(QNum *qn);
+
+char *qnum_to_string(QNum *qn);
+
+#endif /* QNUM_H */
diff --git a/include/qobject/qobject.h b/include/qobject/qobject.h
new file mode 100644
index 0000000..a6244d0
--- /dev/null
+++ b/include/qobject/qobject.h
@@ -0,0 +1,144 @@
+/*
+ * QEMU Object Model.
+ *
+ * Based on ideas by Avi Kivity <avi@redhat.com>
+ *
+ * Copyright (C) 2009, 2015 Red Hat Inc.
+ *
+ * Authors:
+ * Luiz Capitulino <lcapitulino@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ * QObject Reference Counts Terminology
+ * ------------------------------------
+ *
+ * - Returning references: A function that returns an object may
+ * return it as either a weak or a strong reference. If the
+ * reference is strong, you are responsible for calling
+ * qobject_unref() on the reference when you are done.
+ *
+ * If the reference is weak, the owner of the reference may free it at
+ * any time in the future. Before storing the reference anywhere, you
+ * should call qobject_ref() to make the reference strong.
+ *
+ * - Transferring ownership: when you transfer ownership of a reference
+ * by calling a function, you are no longer responsible for calling
+ * qobject_unref() when the reference is no longer needed. In other words,
+ * when the function returns you must behave as if the reference to the
+ * passed object was weak.
+ */
+#ifndef QOBJECT_H
+#define QOBJECT_H
+
+#include "qapi/qapi-builtin-types.h"
+
+/* Not for use outside include/qobject/ */
+struct QObjectBase_ {
+ QType type;
+ size_t refcnt;
+};
+
+/* this struct must have no other members than base */
+struct QObject {
+ struct QObjectBase_ base;
+};
+
+/*
+ * Preprocessor sorcery ahead: use a different identifier for the
+ * local variable in each expansion, so we can nest macro calls
+ * without shadowing variables.
+ */
+#define QOBJECT_INTERNAL(obj, _obj) ({ \
+ typeof(obj) _obj = (obj); \
+ _obj ? container_of(&_obj->base, QObject, base) : NULL; \
+})
+#define QOBJECT(obj) QOBJECT_INTERNAL((obj), MAKE_IDENTIFIER(_obj))
+
+/* Required for qobject_to() */
+#define QTYPE_CAST_TO_QNull QTYPE_QNULL
+#define QTYPE_CAST_TO_QNum QTYPE_QNUM
+#define QTYPE_CAST_TO_QString QTYPE_QSTRING
+#define QTYPE_CAST_TO_QDict QTYPE_QDICT
+#define QTYPE_CAST_TO_QList QTYPE_QLIST
+#define QTYPE_CAST_TO_QBool QTYPE_QBOOL
+
+QEMU_BUILD_BUG_MSG(QTYPE__MAX != 7,
+ "The QTYPE_CAST_TO_* list needs to be extended");
+
+#define qobject_to(type, obj) \
+ ((type *)qobject_check_type(obj, glue(QTYPE_CAST_TO_, type)))
+
+static inline void qobject_ref_impl(QObject *obj)
+{
+ if (obj) {
+ obj->base.refcnt++;
+ }
+}
+
+/**
+ * qobject_is_equal(): Return whether the two objects are equal.
+ *
+ * Any of the pointers may be NULL; return true if both are. Always
+ * return false if only one is (therefore a QNull object is not
+ * considered equal to a NULL pointer).
+ */
+bool qobject_is_equal(const QObject *x, const QObject *y);
+
+/**
+ * qobject_destroy(): Free resources used by the object
+ * For use via qobject_unref() only!
+ */
+void qobject_destroy(QObject *obj);
+
+static inline void qobject_unref_impl(QObject *obj)
+{
+ assert(!obj || obj->base.refcnt);
+ if (obj && --obj->base.refcnt == 0) {
+ qobject_destroy(obj);
+ }
+}
+
+/**
+ * qobject_ref(): Increment QObject's reference count
+ *
+ * Returns: the same @obj. The type of @obj will be propagated to the
+ * return type.
+ */
+#define qobject_ref(obj) ({ \
+ typeof(obj) _o = (obj); \
+ qobject_ref_impl(QOBJECT(_o)); \
+ _o; \
+})
+
+/**
+ * qobject_unref(): Decrement QObject's reference count, deallocate
+ * when it reaches zero
+ */
+#define qobject_unref(obj) qobject_unref_impl(QOBJECT(obj))
+
+/**
+ * qobject_type(): Return the QObject's type
+ */
+static inline QType qobject_type(const QObject *obj)
+{
+ assert(QTYPE_NONE < obj->base.type && obj->base.type < QTYPE__MAX);
+ return obj->base.type;
+}
+
+/**
+ * qobject_check_type(): Helper function for the qobject_to() macro.
+ * Return @obj, but only if @obj is not NULL and @type is equal to
+ * @obj's type. Return NULL otherwise.
+ */
+static inline QObject *qobject_check_type(const QObject *obj, QType type)
+{
+ if (obj && qobject_type(obj) == type) {
+ return (QObject *)obj;
+ } else {
+ return NULL;
+ }
+}
+
+#endif /* QOBJECT_H */
diff --git a/include/qobject/qstring.h b/include/qobject/qstring.h
new file mode 100644
index 0000000..1e2abe4
--- /dev/null
+++ b/include/qobject/qstring.h
@@ -0,0 +1,33 @@
+/*
+ * QString Module
+ *
+ * Copyright (C) 2009 Red Hat Inc.
+ *
+ * Authors:
+ * Luiz Capitulino <lcapitulino@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef QSTRING_H
+#define QSTRING_H
+
+#include "qobject/qobject.h"
+
+struct QString {
+ struct QObjectBase_ base;
+ const char *string;
+};
+
+void qstring_unref(QString *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QString, qstring_unref)
+
+QString *qstring_new(void);
+QString *qstring_from_str(const char *str);
+QString *qstring_from_substr(const char *str, size_t start, size_t end);
+QString *qstring_from_gstring(GString *gstr);
+const char *qstring_get_str(const QString *qstring);
+
+#endif /* QSTRING_H */
diff --git a/include/qom/object.h b/include/qom/object.h
index 13d3a65..26df613 100644
--- a/include/qom/object.h
+++ b/include/qom/object.h
@@ -26,6 +26,7 @@ typedef struct InterfaceClass InterfaceClass;
typedef struct InterfaceInfo InterfaceInfo;
#define TYPE_OBJECT "object"
+#define TYPE_CONTAINER "container"
typedef struct ObjectProperty ObjectProperty;
@@ -279,7 +280,7 @@ struct Object
static void \
module_obj_name##_finalize(Object *obj); \
static void \
- module_obj_name##_class_init(ObjectClass *oc, void *data); \
+ module_obj_name##_class_init(ObjectClass *oc, const void *data); \
static void \
module_obj_name##_init(Object *obj); \
\
@@ -293,7 +294,7 @@ struct Object
.class_size = CLASS_SIZE, \
.class_init = module_obj_name##_class_init, \
.abstract = ABSTRACT, \
- .interfaces = (InterfaceInfo[]) { __VA_ARGS__ } , \
+ .interfaces = (const InterfaceInfo[]) { __VA_ARGS__ } , \
}; \
\
static void \
@@ -444,7 +445,8 @@ struct Object
* class will have already been initialized so the type is only responsible
* for initializing its own members.
* @instance_post_init: This function is called to finish initialization of
- * an object, after all @instance_init functions were called.
+ * an object, after all @instance_init functions were called, as well as
+ * @instance_post_init functions for the parent classes.
* @instance_finalize: This function is called during object destruction. This
* is called before the parent @instance_finalize function has been called.
* An object should only free the members that are unique to its type in this
@@ -485,11 +487,11 @@ struct TypeInfo
bool abstract;
size_t class_size;
- void (*class_init)(ObjectClass *klass, void *data);
- void (*class_base_init)(ObjectClass *klass, void *data);
- void *class_data;
+ void (*class_init)(ObjectClass *klass, const void *data);
+ void (*class_base_init)(ObjectClass *klass, const void *data);
+ const void *class_data;
- InterfaceInfo *interfaces;
+ const InterfaceInfo *interfaces;
};
/**
@@ -572,12 +574,15 @@ struct InterfaceInfo {
*
* The class for all interfaces. Subclasses of this class should only add
* virtual methods.
+ *
+ * Note that most of the fields of ObjectClass are unused (all except
+ * "type", in fact). They are only present in InterfaceClass to allow
+ * @object_class_dynamic_cast to work with both regular classes and interfaces.
*/
struct InterfaceClass
{
ObjectClass parent_class;
/* private: */
- ObjectClass *concrete_class;
Type interface_type;
};
@@ -880,25 +885,11 @@ const char *object_get_typename(const Object *obj);
* type_register_static:
* @info: The #TypeInfo of the new type.
*
- * @info and all of the strings it points to should exist for the life time
- * that the type is registered.
- *
* Returns: the new #Type.
*/
Type type_register_static(const TypeInfo *info);
/**
- * type_register:
- * @info: The #TypeInfo of the new type
- *
- * Unlike type_register_static(), this call does not require @info or its
- * string members to continue to exist after the call returns.
- *
- * Returns: the new #Type.
- */
-Type type_register(const TypeInfo *info);
-
-/**
* type_register_static_array:
* @infos: The array of the new type #TypeInfo structures.
* @nr_infos: number of entries in @infos
@@ -1523,6 +1514,16 @@ const char *object_property_get_type(Object *obj, const char *name,
*/
Object *object_get_root(void);
+/**
+ * object_get_container:
+ * @name: the name of container to lookup
+ *
+ * Lookup a root level container.
+ *
+ * Returns: the container with @name.
+ */
+Object *object_get_container(const char *name);
+
/**
* object_get_objects_root:
@@ -1569,8 +1570,8 @@ char *object_get_canonical_path(const Object *obj);
/**
* object_resolve_path:
* @path: the path to resolve
- * @ambiguous: returns true if the path resolution failed because of an
- * ambiguous match
+ * @ambiguous: (out) (optional): location to store whether the lookup failed
+ * because it was ambiguous, or %NULL. Set to %false on success.
*
* There are two types of supported paths--absolute paths and partial paths.
*
@@ -1587,7 +1588,7 @@ char *object_get_canonical_path(const Object *obj);
* only one match is found. If more than one match is found, a flag is
* returned to indicate that the match was ambiguous.
*
- * Returns: The matched object or NULL on path lookup failure.
+ * Returns: The matched object or %NULL on path lookup failure.
*/
Object *object_resolve_path(const char *path, bool *ambiguous);
@@ -1595,10 +1596,10 @@ Object *object_resolve_path(const char *path, bool *ambiguous);
* object_resolve_path_type:
* @path: the path to resolve
* @typename: the type to look for.
- * @ambiguous: returns true if the path resolution failed because of an
- * ambiguous match
+ * @ambiguous: (out) (optional): location to store whether the lookup failed
+ * because it was ambiguous, or %NULL. Set to %false on success.
*
- * This is similar to object_resolve_path. However, when looking for a
+ * This is similar to object_resolve_path(). However, when looking for a
* partial path only matches that implement the given type are considered.
* This restricts the search and avoids spuriously flagging matches as
* ambiguous.
@@ -2020,25 +2021,18 @@ int object_child_foreach(Object *obj, int (*fn)(Object *child, void *opaque),
int object_child_foreach_recursive(Object *obj,
int (*fn)(Object *child, void *opaque),
void *opaque);
-/**
- * container_get:
- * @root: root of the #path, e.g., object_get_root()
- * @path: path to the container
- *
- * Return a container object whose path is @path. Create more containers
- * along the path if necessary.
- *
- * Returns: the container object.
- */
-Object *container_get(Object *root, const char *path);
/**
- * object_type_get_instance_size:
- * @typename: Name of the Type whose instance_size is required
+ * object_property_add_new_container:
+ * @obj: the parent object
+ * @name: the name of the parent object's property to add
+ *
+ * Add a newly created container object to a parent object.
*
- * Returns the instance_size of the given @typename.
+ * Returns: the newly created container object. Its reference count is 1,
+ * and the reference is owned by the parent object.
*/
-size_t object_type_get_instance_size(const char *typename);
+Object *object_property_add_new_container(Object *obj, const char *name);
/**
* object_property_help:
diff --git a/include/semihosting/console.h b/include/semihosting/console.h
index bd78e5f..1c12e17 100644
--- a/include/semihosting/console.h
+++ b/include/semihosting/console.h
@@ -9,8 +9,6 @@
#ifndef SEMIHOST_CONSOLE_H
#define SEMIHOST_CONSOLE_H
-#include "cpu.h"
-
/**
* qemu_semihosting_console_read:
* @cs: CPUState
diff --git a/include/semihosting/semihost.h b/include/semihosting/semihost.h
index 97d2a2b..b03e637 100644
--- a/include/semihosting/semihost.h
+++ b/include/semihosting/semihost.h
@@ -26,32 +26,6 @@ typedef enum SemihostingTarget {
SEMIHOSTING_TARGET_GDB
} SemihostingTarget;
-#ifdef CONFIG_USER_ONLY
-static inline bool semihosting_enabled(bool is_user)
-{
- return true;
-}
-
-static inline SemihostingTarget semihosting_get_target(void)
-{
- return SEMIHOSTING_TARGET_AUTO;
-}
-
-static inline const char *semihosting_get_arg(int i)
-{
- return NULL;
-}
-
-static inline int semihosting_get_argc(void)
-{
- return 0;
-}
-
-static inline const char *semihosting_get_cmdline(void)
-{
- return NULL;
-}
-#else /* !CONFIG_USER_ONLY */
/**
* semihosting_enabled:
* @is_user: true if guest code is in usermode (i.e. not privileged)
@@ -59,17 +33,18 @@ static inline const char *semihosting_get_cmdline(void)
* Return true if guest code is allowed to make semihosting calls.
*/
bool semihosting_enabled(bool is_user);
+
SemihostingTarget semihosting_get_target(void);
const char *semihosting_get_arg(int i);
int semihosting_get_argc(void);
const char *semihosting_get_cmdline(void);
void semihosting_arg_fallback(const char *file, const char *cmd);
+
/* for vl.c hooks */
void qemu_semihosting_enable(void);
int qemu_semihosting_config_options(const char *optstr);
void qemu_semihosting_chardev_init(void);
void qemu_semihosting_console_init(Chardev *);
-#endif /* CONFIG_USER_ONLY */
void qemu_semihosting_guestfd_init(void);
#endif /* SEMIHOST_H */
diff --git a/include/semihosting/syscalls.h b/include/semihosting/syscalls.h
index 3a5ec22..6627c45 100644
--- a/include/semihosting/syscalls.h
+++ b/include/semihosting/syscalls.h
@@ -9,6 +9,9 @@
#ifndef SEMIHOSTING_SYSCALLS_H
#define SEMIHOSTING_SYSCALLS_H
+#include "exec/cpu-defs.h"
+#include "gdbstub/syscalls.h"
+
/*
* Argument loading from the guest is performed by the caller;
* results are returned via the 'complete' callback.
diff --git a/include/semihosting/uaccess.h b/include/semihosting/uaccess.h
index c2fa5a6..6bc90b1 100644
--- a/include/semihosting/uaccess.h
+++ b/include/semihosting/uaccess.h
@@ -19,41 +19,96 @@
#include "exec/tswap.h"
#include "exec/page-protection.h"
+/**
+ * get_user_u64:
+ *
+ * Returns: 0 on success, -1 on error.
+ */
#define get_user_u64(val, addr) \
({ uint64_t val_ = 0; \
int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \
&val_, sizeof(val_), 0); \
(val) = tswap64(val_); ret_; })
+/**
+ * get_user_u32:
+ *
+ * Returns: 0 on success, -1 on error.
+ */
#define get_user_u32(val, addr) \
({ uint32_t val_ = 0; \
int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \
&val_, sizeof(val_), 0); \
(val) = tswap32(val_); ret_; })
+/**
+ * get_user_u8:
+ *
+ * Returns: 0 on success, -1 on error.
+ */
#define get_user_u8(val, addr) \
({ uint8_t val_ = 0; \
int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \
&val_, sizeof(val_), 0); \
(val) = val_; ret_; })
+/**
+ * get_user_ual:
+ *
+ * Returns: 0 on success, -1 on error.
+ */
#define get_user_ual(arg, p) get_user_u32(arg, p)
+/**
+ * put_user_u64:
+ *
+ * Returns: 0 on success, -1 on error.
+ */
#define put_user_u64(val, addr) \
({ uint64_t val_ = tswap64(val); \
cpu_memory_rw_debug(env_cpu(env), (addr), &val_, sizeof(val_), 1); })
+/**
+ * put_user_u32:
+ *
+ * Returns: 0 on success, -1 on error.
+ */
#define put_user_u32(val, addr) \
({ uint32_t val_ = tswap32(val); \
cpu_memory_rw_debug(env_cpu(env), (addr), &val_, sizeof(val_), 1); })
+/**
+ * put_user_ual:
+ *
+ * Returns: 0 on success, -1 on error.
+ */
#define put_user_ual(arg, p) put_user_u32(arg, p)
+/**
+ * uaccess_lock_user:
+ *
+ * The returned pointer should be freed using uaccess_unlock_user().
+ */
void *uaccess_lock_user(CPUArchState *env, target_ulong addr,
target_ulong len, bool copy);
+/**
+ * lock_user:
+ *
+ * The returned pointer should be freed using unlock_user().
+ */
#define lock_user(type, p, len, copy) uaccess_lock_user(env, p, len, copy)
+/**
+ * uaccess_lock_user_string:
+ *
+ * The returned string should be freed using uaccess_unlock_user().
+ */
char *uaccess_lock_user_string(CPUArchState *env, target_ulong addr);
+/**
+ * uaccess_lock_user_string:
+ *
+ * The returned string should be freed using unlock_user().
+ */
#define lock_user_string(p) uaccess_lock_user_string(env, p)
void uaccess_unlock_user(CPUArchState *env, void *p,
diff --git a/include/standard-headers/asm-x86/setup_data.h b/include/standard-headers/asm-x86/setup_data.h
index 09355f5..2e446c1d 100644
--- a/include/standard-headers/asm-x86/setup_data.h
+++ b/include/standard-headers/asm-x86/setup_data.h
@@ -13,12 +13,13 @@
#define SETUP_CC_BLOB 7
#define SETUP_IMA 8
#define SETUP_RNG_SEED 9
-#define SETUP_ENUM_MAX SETUP_RNG_SEED
+#define SETUP_KEXEC_KHO 10
+#define SETUP_ENUM_MAX SETUP_KEXEC_KHO
#define SETUP_INDIRECT (1<<31)
#define SETUP_TYPE_MAX (SETUP_ENUM_MAX | SETUP_INDIRECT)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include "standard-headers/linux/types.h"
@@ -78,6 +79,16 @@ struct ima_setup_data {
uint64_t size;
} QEMU_PACKED;
-#endif /* __ASSEMBLY__ */
+/*
+ * Locations of kexec handover metadata
+ */
+struct kho_data {
+ uint64_t fdt_addr;
+ uint64_t fdt_size;
+ uint64_t scratch_addr;
+ uint64_t scratch_size;
+} QEMU_PACKED;
+
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_SETUP_DATA_H */
diff --git a/include/standard-headers/drm/drm_fourcc.h b/include/standard-headers/drm/drm_fourcc.h
index b729170..c8309d3 100644
--- a/include/standard-headers/drm/drm_fourcc.h
+++ b/include/standard-headers/drm/drm_fourcc.h
@@ -420,6 +420,8 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_ARM 0x08
#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a
+#define DRM_FORMAT_MOD_VENDOR_MTK 0x0b
+#define DRM_FORMAT_MOD_VENDOR_APPLE 0x0c
/* add more to the end as needed */
@@ -702,6 +704,31 @@ extern "C" {
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
/*
+ * Intel Color Control Surfaces (CCS) for graphics ver. 20 unified compression
+ * on integrated graphics
+ *
+ * The main surface is Tile 4 and at plane index 0. For semi-planar formats
+ * like NV12, the Y and UV planes are Tile 4 and are located at plane indices
+ * 0 and 1, respectively. The CCS for all planes are stored outside of the
+ * GEM object in a reserved memory area dedicated for the storage of the
+ * CCS data for all compressible GEM objects.
+ */
+#define I915_FORMAT_MOD_4_TILED_LNL_CCS fourcc_mod_code(INTEL, 16)
+
+/*
+ * Intel Color Control Surfaces (CCS) for graphics ver. 20 unified compression
+ * on discrete graphics
+ *
+ * The main surface is Tile 4 and at plane index 0. For semi-planar formats
+ * like NV12, the Y and UV planes are Tile 4 and are located at plane indices
+ * 0 and 1, respectively. The CCS for all planes are stored outside of the
+ * GEM object in a reserved memory area dedicated for the storage of the
+ * CCS data for all compressible GEM objects. The GEM object must be stored in
+ * contiguous memory with a size aligned to 64KB
+ */
+#define I915_FORMAT_MOD_4_TILED_BMG_CCS fourcc_mod_code(INTEL, 17)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
@@ -1427,6 +1454,90 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier)
*/
#define AMLOGIC_FBC_OPTION_MEM_SAVING (1ULL << 0)
+/* MediaTek modifiers
+ * Bits Parameter Notes
+ * ----- ------------------------ ---------------------------------------------
+ * 7: 0 TILE LAYOUT Values are MTK_FMT_MOD_TILE_*
+ * 15: 8 COMPRESSION Values are MTK_FMT_MOD_COMPRESS_*
+ * 23:16 10 BIT LAYOUT Values are MTK_FMT_MOD_10BIT_LAYOUT_*
+ *
+ */
+
+#define DRM_FORMAT_MOD_MTK(__flags) fourcc_mod_code(MTK, __flags)
+
+/*
+ * MediaTek Tiled Modifier
+ * The lowest 8 bits of the modifier is used to specify the tiling
+ * layout. Only the 16L_32S tiling is used for now, but we define an
+ * "untiled" version and leave room for future expansion.
+ */
+#define MTK_FMT_MOD_TILE_MASK 0xf
+#define MTK_FMT_MOD_TILE_NONE 0x0
+#define MTK_FMT_MOD_TILE_16L32S 0x1
+
+/*
+ * Bits 8-15 specify compression options
+ */
+#define MTK_FMT_MOD_COMPRESS_MASK (0xf << 8)
+#define MTK_FMT_MOD_COMPRESS_NONE (0x0 << 8)
+#define MTK_FMT_MOD_COMPRESS_V1 (0x1 << 8)
+
+/*
+ * Bits 16-23 specify how the bits of 10 bit formats are
+ * stored out in memory
+ */
+#define MTK_FMT_MOD_10BIT_LAYOUT_MASK (0xf << 16)
+#define MTK_FMT_MOD_10BIT_LAYOUT_PACKED (0x0 << 16)
+#define MTK_FMT_MOD_10BIT_LAYOUT_LSBTILED (0x1 << 16)
+#define MTK_FMT_MOD_10BIT_LAYOUT_LSBRASTER (0x2 << 16)
+
+/* alias for the most common tiling format */
+#define DRM_FORMAT_MOD_MTK_16L_32S_TILE DRM_FORMAT_MOD_MTK(MTK_FMT_MOD_TILE_16L32S)
+
+/*
+ * Apple GPU-tiled layouts.
+ *
+ * Apple GPUs support nonlinear tilings with optional lossless compression.
+ *
+ * GPU-tiled images are divided into 16KiB tiles:
+ *
+ * Bytes per pixel Tile size
+ * --------------- ---------
+ * 1 128x128
+ * 2 128x64
+ * 4 64x64
+ * 8 64x32
+ * 16 32x32
+ *
+ * Tiles are raster-order. Pixels within a tile are interleaved (Morton order).
+ *
+ * Compressed images pad the body to 128-bytes and are immediately followed by a
+ * metadata section. The metadata section rounds the image dimensions to
+ * powers-of-two and contains 8 bytes for each 16x16 compression subtile.
+ * Subtiles are interleaved (Morton order).
+ *
+ * All images are 128-byte aligned.
+ *
+ * These layouts fundamentally do not have meaningful strides. No matter how we
+ * specify strides for these layouts, userspace unaware of Apple image layouts
+ * will be unable to use correctly the specified stride for any purpose.
+ * Userspace aware of the image layouts do not use strides. The most "correct"
+ * convention would be setting the image stride to 0. Unfortunately, some
+ * software assumes the stride is at least (width * bytes per pixel). We
+ * therefore require that stride equals (width * bytes per pixel). Since the
+ * stride is arbitrary here, we pick the simplest convention.
+ *
+ * Although containing two sections, compressed image layouts are treated in
+ * software as a single plane. This is modelled after AFBC, a similar
+ * scheme. Attempting to separate the sections to be "explicit" in DRM would
+ * only generate more confusion, as software does not treat the image this way.
+ *
+ * For detailed information on the hardware image layouts, see
+ * https://docs.mesa3d.org/drivers/asahi.html#image-layouts
+ */
+#define DRM_FORMAT_MOD_APPLE_GPU_TILED fourcc_mod_code(APPLE, 1)
+#define DRM_FORMAT_MOD_APPLE_GPU_TILED_COMPRESSED fourcc_mod_code(APPLE, 2)
+
/*
* AMD modifiers
*
@@ -1475,6 +1586,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier)
#define AMD_FMT_MOD_TILE_VER_GFX10 2
#define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
#define AMD_FMT_MOD_TILE_VER_GFX11 4
+#define AMD_FMT_MOD_TILE_VER_GFX12 5
/*
* 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
@@ -1485,13 +1597,31 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier)
/*
* 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has
* GFX9 as canonical version.
+ *
+ * 64K_D_2D on GFX12 is identical to 64K_D on GFX11.
*/
#define AMD_FMT_MOD_TILE_GFX9_64K_D 10
+#define AMD_FMT_MOD_TILE_GFX9_4K_D_X 22
#define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
#define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26
#define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
#define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
+/* Gfx12 swizzle modes:
+ * 0 - LINEAR
+ * 1 - 256B_2D - 2D block dimensions
+ * 2 - 4KB_2D
+ * 3 - 64KB_2D
+ * 4 - 256KB_2D
+ * 5 - 4KB_3D - 3D block dimensions
+ * 6 - 64KB_3D
+ * 7 - 256KB_3D
+ */
+#define AMD_FMT_MOD_TILE_GFX12_256B_2D 1
+#define AMD_FMT_MOD_TILE_GFX12_4K_2D 2
+#define AMD_FMT_MOD_TILE_GFX12_64K_2D 3
+#define AMD_FMT_MOD_TILE_GFX12_256K_2D 4
+
#define AMD_FMT_MOD_DCC_BLOCK_64B 0
#define AMD_FMT_MOD_DCC_BLOCK_128B 1
#define AMD_FMT_MOD_DCC_BLOCK_256B 2
diff --git a/include/standard-headers/linux/const.h b/include/standard-headers/linux/const.h
index 1eb84b5..95ede23 100644
--- a/include/standard-headers/linux/const.h
+++ b/include/standard-headers/linux/const.h
@@ -28,6 +28,23 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
+#if !defined(__ASSEMBLY__)
+/*
+ * Missing __asm__ support
+ *
+ * __BIT128() would not work in the __asm__ code, as it shifts an
+ * 'unsigned __int128' data type as direct representation of
+ * 128 bit constants is not supported in the gcc compiler, as
+ * they get silently truncated.
+ *
+ * TODO: Please revisit this implementation when gcc compiler
+ * starts representing 128 bit constants directly like long
+ * and unsigned long etc. Subsequently drop the comment for
+ * GENMASK_U128() which would then start supporting __asm__ code.
+ */
+#define _BIT128(x) ((unsigned __int128)(1) << (x))
+#endif
+
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
diff --git a/include/standard-headers/linux/ethtool.h b/include/standard-headers/linux/ethtool.h
index b0b4b68..cef0d20 100644
--- a/include/standard-headers/linux/ethtool.h
+++ b/include/standard-headers/linux/ethtool.h
@@ -681,6 +681,8 @@ enum ethtool_link_ext_substate_module {
* @ETH_SS_STATS_ETH_MAC: names of IEEE 802.3 MAC statistics
* @ETH_SS_STATS_ETH_CTRL: names of IEEE 802.3 MAC Control statistics
* @ETH_SS_STATS_RMON: names of RMON statistics
+ * @ETH_SS_STATS_PHY: names of PHY(dev) statistics
+ * @ETH_SS_TS_FLAGS: hardware timestamping flags
*
* @ETH_SS_COUNT: number of defined string sets
*/
@@ -706,6 +708,8 @@ enum ethtool_stringset {
ETH_SS_STATS_ETH_MAC,
ETH_SS_STATS_ETH_CTRL,
ETH_SS_STATS_RMON,
+ ETH_SS_STATS_PHY,
+ ETH_SS_TS_FLAGS,
/* add new constants above here */
ETH_SS_COUNT
@@ -753,6 +757,197 @@ enum ethtool_module_power_mode {
};
/**
+ * enum ethtool_c33_pse_ext_state - groups of PSE extended states
+ * functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION: Group of error_condition states
+ * @ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID: Group of mr_mps_valid states
+ * @ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE: Group of mr_pse_enable states
+ * @ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED: Group of option_detect_ted
+ * states
+ * @ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM: Group of option_vport_lim states
+ * @ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED: Group of ovld_detected states
+ * @ETHTOOL_C33_PSE_EXT_STATE_PD_DLL_POWER_TYPE: Group of pd_dll_power_type
+ * states
+ * @ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE: Group of power_not_available
+ * states
+ * @ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED: Group of short_detected states
+ */
+enum ethtool_c33_pse_ext_state {
+ ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION = 1,
+ ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID,
+ ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE,
+ ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED,
+ ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM,
+ ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED,
+ ETHTOOL_C33_PSE_EXT_STATE_PD_DLL_POWER_TYPE,
+ ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE,
+ ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_mr_mps_valid - mr_mps_valid states
+ * functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_DETECTED_UNDERLOAD: Underload
+ * state
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_CONNECTION_OPEN: Port is not
+ * connected
+ *
+ * The PSE monitors either the DC or AC Maintain Power Signature
+ * (MPS, see 33.2.9.1). This variable indicates the presence or absence of
+ * a valid MPS.
+ */
+enum ethtool_c33_pse_ext_substate_mr_mps_valid {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_DETECTED_UNDERLOAD = 1,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_CONNECTION_OPEN,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_error_condition - error_condition states
+ * functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT: Non-existing
+ * port number
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT: Undefined port
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT: Internal
+ * hardware fault
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_COMM_ERROR_AFTER_FORCE_ON:
+ * Communication error after force on
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS: Unknown
+ * port status
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_TURN_OFF: Host
+ * crash turn off
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_FORCE_SHUTDOWN:
+ * Host crash force shutdown
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE: Configuration
+ * change
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP: Over
+ * temperature detected
+ *
+ * error_condition is a variable indicating the status of
+ * implementation-specific fault conditions or optionally other system faults
+ * that prevent the PSE from meeting the specifications in Table 33–11 and that
+ * require the PSE not to source power. These error conditions are different
+ * from those monitored by the state diagrams in Figure 33–10.
+ */
+enum ethtool_c33_pse_ext_substate_error_condition {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT = 1,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_COMM_ERROR_AFTER_FORCE_ON,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_TURN_OFF,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_FORCE_SHUTDOWN,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_mr_pse_enable - mr_pse_enable states
+ * functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE: Disable
+ * pin active
+ *
+ * mr_pse_enable is control variable that selects PSE operation and test
+ * functions.
+ */
+enum ethtool_c33_pse_ext_substate_mr_pse_enable {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE = 1,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_option_detect_ted - option_detect_ted
+ * states functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS: Detection
+ * in process
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR:
+ * Connection check error
+ *
+ * option_detect_ted is a variable indicating if detection can be performed
+ * by the PSE during the ted_timer interval.
+ */
+enum ethtool_c33_pse_ext_substate_option_detect_ted {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS = 1,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_option_vport_lim - option_vport_lim states
+ * functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE: Main supply
+ * voltage is high
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE: Main supply
+ * voltage is low
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION: Voltage
+ * injection into the port
+ *
+ * option_vport_lim is an optional variable indicates if VPSE is out of the
+ * operating range during normal operating state.
+ */
+enum ethtool_c33_pse_ext_substate_option_vport_lim {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE = 1,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_ovld_detected - ovld_detected states
+ * functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD: Overload state
+ *
+ * ovld_detected is a variable indicating if the PSE output current has been
+ * in an overload condition (see 33.2.7.6) for at least TCUT of a one-second
+ * sliding time.
+ */
+enum ethtool_c33_pse_ext_substate_ovld_detected {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD = 1,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_power_not_available - power_not_available
+ * states functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED: Power
+ * budget exceeded for the controller
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET:
+ * Configured port power limit exceeded controller power budget
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT:
+ * Power request from PD exceeds port limit
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT: Power
+ * denied due to Hardware power limit
+ *
+ * power_not_available is a variable that is asserted in an
+ * implementation-dependent manner when the PSE is no longer capable of
+ * sourcing sufficient power to support the attached PD. Sufficient power
+ * is defined by classification; see 33.2.6.
+ */
+enum ethtool_c33_pse_ext_substate_power_not_available {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED = 1,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_short_detected - short_detected states
+ * functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION: Short
+ * condition was detected
+ *
+ * short_detected is a variable indicating if the PSE output current has been
+ * in a short circuit condition for TLIM within a sliding window (see 33.2.7.7).
+ */
+enum ethtool_c33_pse_ext_substate_short_detected {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION = 1,
+};
+
+/**
* enum ethtool_pse_types - Types of PSE controller.
* @ETHTOOL_PSE_UNKNOWN: Type of PSE controller is unknown
* @ETHTOOL_PSE_PODL: PSE controller which support PoDL
@@ -878,6 +1073,24 @@ enum ethtool_mm_verify_status {
};
/**
+ * enum ethtool_module_fw_flash_status - plug-in module firmware flashing status
+ * @ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED: The firmware flashing process has
+ * started.
+ * @ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS: The firmware flashing process
+ * is in progress.
+ * @ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED: The firmware flashing process was
+ * completed successfully.
+ * @ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR: The firmware flashing process was
+ * stopped due to an error.
+ */
+enum ethtool_module_fw_flash_status {
+ ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED = 1,
+ ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS,
+ ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED,
+ ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR,
+};
+
+/**
* struct ethtool_gstrings - string set for data tagging
* @cmd: Command number = %ETHTOOL_GSTRINGS
* @string_set: String set ID; one of &enum ethtool_stringset
@@ -1845,6 +2058,25 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99,
ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100,
ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101,
+ ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT = 102,
+ ETHTOOL_LINK_MODE_200000baseCR_Full_BIT = 103,
+ ETHTOOL_LINK_MODE_200000baseKR_Full_BIT = 104,
+ ETHTOOL_LINK_MODE_200000baseDR_Full_BIT = 105,
+ ETHTOOL_LINK_MODE_200000baseDR_2_Full_BIT = 106,
+ ETHTOOL_LINK_MODE_200000baseSR_Full_BIT = 107,
+ ETHTOOL_LINK_MODE_200000baseVR_Full_BIT = 108,
+ ETHTOOL_LINK_MODE_400000baseCR2_Full_BIT = 109,
+ ETHTOOL_LINK_MODE_400000baseKR2_Full_BIT = 110,
+ ETHTOOL_LINK_MODE_400000baseDR2_Full_BIT = 111,
+ ETHTOOL_LINK_MODE_400000baseDR2_2_Full_BIT = 112,
+ ETHTOOL_LINK_MODE_400000baseSR2_Full_BIT = 113,
+ ETHTOOL_LINK_MODE_400000baseVR2_Full_BIT = 114,
+ ETHTOOL_LINK_MODE_800000baseCR4_Full_BIT = 115,
+ ETHTOOL_LINK_MODE_800000baseKR4_Full_BIT = 116,
+ ETHTOOL_LINK_MODE_800000baseDR4_Full_BIT = 117,
+ ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT = 118,
+ ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT = 119,
+ ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT = 120,
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
@@ -2057,73 +2289,81 @@ static inline int ethtool_validate_duplex(uint8_t duplex)
* be exploited to reduce the RSS queue spread.
*/
#define RXH_XFRM_SYM_XOR (1 << 0)
+/* Similar to SYM_XOR, except that one copy of the XOR'ed fields is replaced by
+ * an OR of the same fields
+ */
+#define RXH_XFRM_SYM_OR_XOR (1 << 1)
#define RXH_XFRM_NO_CHANGE 0xff
-/* L2-L4 network traffic flow types */
-#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
-#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */
-#define SCTP_V4_FLOW 0x03 /* hash or spec (sctp_ip4_spec) */
-#define AH_ESP_V4_FLOW 0x04 /* hash only */
-#define TCP_V6_FLOW 0x05 /* hash or spec (tcp_ip6_spec; nfc only) */
-#define UDP_V6_FLOW 0x06 /* hash or spec (udp_ip6_spec; nfc only) */
-#define SCTP_V6_FLOW 0x07 /* hash or spec (sctp_ip6_spec; nfc only) */
-#define AH_ESP_V6_FLOW 0x08 /* hash only */
-#define AH_V4_FLOW 0x09 /* hash or spec (ah_ip4_spec) */
-#define ESP_V4_FLOW 0x0a /* hash or spec (esp_ip4_spec) */
-#define AH_V6_FLOW 0x0b /* hash or spec (ah_ip6_spec; nfc only) */
-#define ESP_V6_FLOW 0x0c /* hash or spec (esp_ip6_spec; nfc only) */
-#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */
-#define IP_USER_FLOW IPV4_USER_FLOW
-#define IPV6_USER_FLOW 0x0e /* spec only (usr_ip6_spec; nfc only) */
-#define IPV4_FLOW 0x10 /* hash only */
-#define IPV6_FLOW 0x11 /* hash only */
-#define ETHER_FLOW 0x12 /* spec only (ether_spec) */
-
-/* Used for GTP-U IPv4 and IPv6.
- * The format of GTP packets only includes
- * elements such as TEID and GTP version.
- * It is primarily intended for data communication of the UE.
- */
-#define GTPU_V4_FLOW 0x13 /* hash only */
-#define GTPU_V6_FLOW 0x14 /* hash only */
-
-/* Use for GTP-C IPv4 and v6.
- * The format of these GTP packets does not include TEID.
- * Primarily expected to be used for communication
- * to create sessions for UE data communication,
- * commonly referred to as CSR (Create Session Request).
- */
-#define GTPC_V4_FLOW 0x15 /* hash only */
-#define GTPC_V6_FLOW 0x16 /* hash only */
-
-/* Use for GTP-C IPv4 and v6.
- * Unlike GTPC_V4_FLOW, the format of these GTP packets includes TEID.
- * After session creation, it becomes this packet.
- * This is mainly used for requests to realize UE handover.
- */
-#define GTPC_TEID_V4_FLOW 0x17 /* hash only */
-#define GTPC_TEID_V6_FLOW 0x18 /* hash only */
-
-/* Use for GTP-U and extended headers for the PSC (PDU Session Container).
- * The format of these GTP packets includes TEID and QFI.
- * In 5G communication using UPF (User Plane Function),
- * data communication with this extended header is performed.
- */
-#define GTPU_EH_V4_FLOW 0x19 /* hash only */
-#define GTPU_EH_V6_FLOW 0x1a /* hash only */
-
-/* Use for GTP-U IPv4 and v6 PSC (PDU Session Container) extended headers.
- * This differs from GTPU_EH_V(4|6)_FLOW in that it is distinguished by
- * UL/DL included in the PSC.
- * There are differences in the data included based on Downlink/Uplink,
- * and can be used to distinguish packets.
- * The functions described so far are useful when you want to
- * handle communication from the mobile network in UPF, PGW, etc.
- */
-#define GTPU_UL_V4_FLOW 0x1b /* hash only */
-#define GTPU_UL_V6_FLOW 0x1c /* hash only */
-#define GTPU_DL_V4_FLOW 0x1d /* hash only */
-#define GTPU_DL_V6_FLOW 0x1e /* hash only */
+enum {
+ /* L2-L4 network traffic flow types */
+ TCP_V4_FLOW = 0x01, /* hash or spec (tcp_ip4_spec) */
+ UDP_V4_FLOW = 0x02, /* hash or spec (udp_ip4_spec) */
+ SCTP_V4_FLOW = 0x03, /* hash or spec (sctp_ip4_spec) */
+ AH_ESP_V4_FLOW = 0x04, /* hash only */
+ TCP_V6_FLOW = 0x05, /* hash or spec (tcp_ip6_spec; nfc only) */
+ UDP_V6_FLOW = 0x06, /* hash or spec (udp_ip6_spec; nfc only) */
+ SCTP_V6_FLOW = 0x07, /* hash or spec (sctp_ip6_spec; nfc only) */
+ AH_ESP_V6_FLOW = 0x08, /* hash only */
+ AH_V4_FLOW = 0x09, /* hash or spec (ah_ip4_spec) */
+ ESP_V4_FLOW = 0x0a, /* hash or spec (esp_ip4_spec) */
+ AH_V6_FLOW = 0x0b, /* hash or spec (ah_ip6_spec; nfc only) */
+ ESP_V6_FLOW = 0x0c, /* hash or spec (esp_ip6_spec; nfc only) */
+ IPV4_USER_FLOW = 0x0d, /* spec only (usr_ip4_spec) */
+ IP_USER_FLOW = IPV4_USER_FLOW,
+ IPV6_USER_FLOW = 0x0e, /* spec only (usr_ip6_spec; nfc only) */
+ IPV4_FLOW = 0x10, /* hash only */
+ IPV6_FLOW = 0x11, /* hash only */
+ ETHER_FLOW = 0x12, /* spec only (ether_spec) */
+
+ /* Used for GTP-U IPv4 and IPv6.
+ * The format of GTP packets only includes
+ * elements such as TEID and GTP version.
+ * It is primarily intended for data communication of the UE.
+ */
+ GTPU_V4_FLOW = 0x13, /* hash only */
+ GTPU_V6_FLOW = 0x14, /* hash only */
+
+ /* Use for GTP-C IPv4 and v6.
+ * The format of these GTP packets does not include TEID.
+ * Primarily expected to be used for communication
+ * to create sessions for UE data communication,
+ * commonly referred to as CSR (Create Session Request).
+ */
+ GTPC_V4_FLOW = 0x15, /* hash only */
+ GTPC_V6_FLOW = 0x16, /* hash only */
+
+ /* Use for GTP-C IPv4 and v6.
+ * Unlike GTPC_V4_FLOW, the format of these GTP packets includes TEID.
+ * After session creation, it becomes this packet.
+ * This is mainly used for requests to realize UE handover.
+ */
+ GTPC_TEID_V4_FLOW = 0x17, /* hash only */
+ GTPC_TEID_V6_FLOW = 0x18, /* hash only */
+
+ /* Use for GTP-U and extended headers for the PSC (PDU Session Container).
+ * The format of these GTP packets includes TEID and QFI.
+ * In 5G communication using UPF (User Plane Function),
+ * data communication with this extended header is performed.
+ */
+ GTPU_EH_V4_FLOW = 0x19, /* hash only */
+ GTPU_EH_V6_FLOW = 0x1a, /* hash only */
+
+ /* Use for GTP-U IPv4 and v6 PSC (PDU Session Container) extended headers.
+ * This differs from GTPU_EH_V(4|6)_FLOW in that it is distinguished by
+ * UL/DL included in the PSC.
+ * There are differences in the data included based on Downlink/Uplink,
+ * and can be used to distinguish packets.
+ * The functions described so far are useful when you want to
+ * handle communication from the mobile network in UPF, PGW, etc.
+ */
+ GTPU_UL_V4_FLOW = 0x1b, /* hash only */
+ GTPU_UL_V6_FLOW = 0x1c, /* hash only */
+ GTPU_DL_V4_FLOW = 0x1d, /* hash only */
+ GTPU_DL_V6_FLOW = 0x1e, /* hash only */
+
+ __FLOW_TYPE_COUNT,
+};
/* Flag to enable additional fields in struct ethtool_rx_flow_spec */
#define FLOW_EXT 0x80000000
@@ -2316,6 +2556,11 @@ struct ethtool_link_settings {
uint8_t master_slave_state;
uint8_t rate_matching;
uint32_t reserved[7];
+ /* Linux builds with -Wflex-array-member-not-at-end but does
+ * not use the "link_mode_masks" member. Leave it defined for
+ * userspace for now, and when userspace wants to start using
+ * -Wfamnae, we'll need a new solution.
+ */
uint32_t link_mode_masks[];
/* layout of link_mode_masks fields:
* uint32_t map_supported[link_mode_masks_nwords];
@@ -2323,4 +2568,20 @@ struct ethtool_link_settings {
* uint32_t map_lp_advertising[link_mode_masks_nwords];
*/
};
+
+/**
+ * enum phy_upstream - Represents the upstream component a given PHY device
+ * is connected to, as in what is on the other end of the MII bus. Most PHYs
+ * will be attached to an Ethernet MAC controller, but in some cases, there's
+ * an intermediate PHY used as a media-converter, which will driver another
+ * MII interface as its output.
+ * @PHY_UPSTREAM_MAC: Upstream component is a MAC (a switch port,
+ * or ethernet controller)
+ * @PHY_UPSTREAM_PHY: Upstream component is a PHY (likely a media converter)
+ */
+enum phy_upstream {
+ PHY_UPSTREAM_MAC,
+ PHY_UPSTREAM_PHY,
+};
+
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/standard-headers/linux/fuse.h b/include/standard-headers/linux/fuse.h
index bac9dbc..d8b2fd6 100644
--- a/include/standard-headers/linux/fuse.h
+++ b/include/standard-headers/linux/fuse.h
@@ -217,6 +217,24 @@
* - add backing_id to fuse_open_out, add FOPEN_PASSTHROUGH open flag
* - add FUSE_NO_EXPORT_SUPPORT init flag
* - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag
+ *
+ * 7.41
+ * - add FUSE_ALLOW_IDMAP
+ * 7.42
+ * - Add FUSE_OVER_IO_URING and all other io-uring related flags and data
+ * structures:
+ * - struct fuse_uring_ent_in_out
+ * - struct fuse_uring_req_header
+ * - struct fuse_uring_cmd_req
+ * - FUSE_URING_IN_OUT_HEADER_SZ
+ * - FUSE_URING_OP_IN_OUT_SZ
+ * - enum fuse_uring_cmd
+ *
+ * 7.43
+ * - add FUSE_REQUEST_TIMEOUT
+ *
+ * 7.44
+ * - add FUSE_NOTIFY_INC_EPOCH
*/
#ifndef _LINUX_FUSE_H
@@ -248,7 +266,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 40
+#define FUSE_KERNEL_MINOR_VERSION 44
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -417,6 +435,10 @@ struct fuse_file_lock {
* FUSE_NO_EXPORT_SUPPORT: explicitly disable export support
* FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit
* of the request ID indicates resend requests
+ * FUSE_ALLOW_IDMAP: allow creation of idmapped mounts
+ * FUSE_OVER_IO_URING: Indicate that client supports io-uring
+ * FUSE_REQUEST_TIMEOUT: kernel supports timing out requests.
+ * init_out.request_timeout contains the timeout (in secs)
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -459,9 +481,11 @@ struct fuse_file_lock {
#define FUSE_PASSTHROUGH (1ULL << 37)
#define FUSE_NO_EXPORT_SUPPORT (1ULL << 38)
#define FUSE_HAS_RESEND (1ULL << 39)
-
/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
+#define FUSE_ALLOW_IDMAP (1ULL << 40)
+#define FUSE_OVER_IO_URING (1ULL << 41)
+#define FUSE_REQUEST_TIMEOUT (1ULL << 42)
/**
* CUSE INIT request/reply flags
@@ -646,6 +670,7 @@ enum fuse_notify_code {
FUSE_NOTIFY_RETRIEVE = 5,
FUSE_NOTIFY_DELETE = 6,
FUSE_NOTIFY_RESEND = 7,
+ FUSE_NOTIFY_INC_EPOCH = 8,
FUSE_NOTIFY_CODE_MAX,
};
@@ -889,7 +914,8 @@ struct fuse_init_out {
uint16_t map_alignment;
uint32_t flags2;
uint32_t max_stack_depth;
- uint32_t unused[6];
+ uint16_t request_timeout;
+ uint16_t unused[11];
};
#define CUSE_INIT_INFO_MAX 4096
@@ -980,6 +1006,21 @@ struct fuse_fallocate_in {
*/
#define FUSE_UNIQUE_RESEND (1ULL << 63)
+/**
+ * This value will be set by the kernel to
+ * (struct fuse_in_header).{uid,gid} fields in
+ * case when:
+ * - fuse daemon enabled FUSE_ALLOW_IDMAP
+ * - idmapping information is not available and uid/gid
+ * can not be mapped in accordance with an idmapping.
+ *
+ * Note: an idmapping information always available
+ * for inode creation operations like:
+ * FUSE_MKNOD, FUSE_SYMLINK, FUSE_MKDIR, FUSE_TMPFILE,
+ * FUSE_CREATE and FUSE_RENAME2 (with RENAME_WHITEOUT).
+ */
+#define FUSE_INVALID_UIDGID ((uint32_t)(-1))
+
struct fuse_in_header {
uint32_t len;
uint32_t opcode;
@@ -1182,4 +1223,67 @@ struct fuse_supp_groups {
uint32_t groups[];
};
+/**
+ * Size of the ring buffer header
+ */
+#define FUSE_URING_IN_OUT_HEADER_SZ 128
+#define FUSE_URING_OP_IN_OUT_SZ 128
+
+/* Used as part of the fuse_uring_req_header */
+struct fuse_uring_ent_in_out {
+ uint64_t flags;
+
+ /*
+ * commit ID to be used in a reply to a ring request (see also
+ * struct fuse_uring_cmd_req)
+ */
+ uint64_t commit_id;
+
+ /* size of user payload buffer */
+ uint32_t payload_sz;
+ uint32_t padding;
+
+ uint64_t reserved;
+};
+
+/**
+ * Header for all fuse-io-uring requests
+ */
+struct fuse_uring_req_header {
+ /* struct fuse_in_header / struct fuse_out_header */
+ char in_out[FUSE_URING_IN_OUT_HEADER_SZ];
+
+ /* per op code header */
+ char op_in[FUSE_URING_OP_IN_OUT_SZ];
+
+ struct fuse_uring_ent_in_out ring_ent_in_out;
+};
+
+/**
+ * sqe commands to the kernel
+ */
+enum fuse_uring_cmd {
+ FUSE_IO_URING_CMD_INVALID = 0,
+
+ /* register the request buffer and fetch a fuse request */
+ FUSE_IO_URING_CMD_REGISTER = 1,
+
+ /* commit fuse request result and fetch next request */
+ FUSE_IO_URING_CMD_COMMIT_AND_FETCH = 2,
+};
+
+/**
+ * In the 80B command area of the SQE.
+ */
+struct fuse_uring_cmd_req {
+ uint64_t flags;
+
+ /* entry identifier for commits */
+ uint64_t commit_id;
+
+ /* queue the command is for (queue index) */
+ uint16_t qid;
+ uint8_t padding[6];
+};
+
#endif /* _LINUX_FUSE_H */
diff --git a/include/standard-headers/linux/input-event-codes.h b/include/standard-headers/linux/input-event-codes.h
index 2221b0c..a82ff79 100644
--- a/include/standard-headers/linux/input-event-codes.h
+++ b/include/standard-headers/linux/input-event-codes.h
@@ -519,6 +519,7 @@
#define KEY_NOTIFICATION_CENTER 0x1bc /* Show/hide the notification center */
#define KEY_PICKUP_PHONE 0x1bd /* Answer incoming call */
#define KEY_HANGUP_PHONE 0x1be /* Decline incoming call */
+#define KEY_LINK_PHONE 0x1bf /* AL Phone Syncing */
#define KEY_DEL_EOL 0x1c0
#define KEY_DEL_EOS 0x1c1
@@ -618,6 +619,8 @@
#define KEY_CAMERA_ACCESS_ENABLE 0x24b /* Enables programmatic access to camera devices. (HUTRR72) */
#define KEY_CAMERA_ACCESS_DISABLE 0x24c /* Disables programmatic access to camera devices. (HUTRR72) */
#define KEY_CAMERA_ACCESS_TOGGLE 0x24d /* Toggles the current state of the camera access control. (HUTRR72) */
+#define KEY_ACCESSIBILITY 0x24e /* Toggles the system bound accessibility UI/command (HUTRR116) */
+#define KEY_DO_NOT_DISTURB 0x24f /* Toggles the system-wide "Do Not Disturb" control (HUTRR94)*/
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
@@ -922,7 +925,8 @@
#define SW_MUTE_DEVICE 0x0e /* set = device disabled */
#define SW_PEN_INSERTED 0x0f /* set = pen inserted */
#define SW_MACHINE_COVER 0x10 /* set = cover closed */
-#define SW_MAX_ 0x10
+#define SW_USB_INSERT 0x11 /* set = USB audio device connected */
+#define SW_MAX_ 0x11
#define SW_CNT (SW_MAX_+1)
/*
diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h
index 94c0099..a3a3e94 100644
--- a/include/standard-headers/linux/pci_regs.h
+++ b/include/standard-headers/linux/pci_regs.h
@@ -340,7 +340,8 @@
#define PCI_MSIX_ENTRY_UPPER_ADDR 0x4 /* Message Upper Address */
#define PCI_MSIX_ENTRY_DATA 0x8 /* Message Data */
#define PCI_MSIX_ENTRY_VECTOR_CTRL 0xc /* Vector Control */
-#define PCI_MSIX_ENTRY_CTRL_MASKBIT 0x00000001
+#define PCI_MSIX_ENTRY_CTRL_MASKBIT 0x00000001 /* Mask Bit */
+#define PCI_MSIX_ENTRY_CTRL_ST 0xffff0000 /* Steering Tag */
/* CompactPCI Hotswap Register */
@@ -485,6 +486,7 @@
#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
+#define PCI_EXP_FLAGS_FLIT 0x8000 /* Flit Mode Supported */
#define PCI_EXP_DEVCAP 0x04 /* Device capabilities */
#define PCI_EXP_DEVCAP_PAYLOAD 0x00000007 /* Max_Payload_Size */
#define PCI_EXP_DEVCAP_PHANTOM 0x00000018 /* Phantom functions */
@@ -532,7 +534,7 @@
#define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1 12 /* v1 endpoints without link end here */
#define PCI_EXP_LNKCAP 0x0c /* Link Capabilities */
-#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
+#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Max Link Speed (prior to PCIe r3.0: Supported Link Speeds) */
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
@@ -634,9 +636,11 @@
#define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */
#define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */
#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
-#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
+#define PCI_EXP_RTCTL_RRS_SVE 0x0010 /* Config RRS Software Visibility Enable */
+#define PCI_EXP_RTCTL_CRSSVE PCI_EXP_RTCTL_RRS_SVE /* compatibility */
#define PCI_EXP_RTCAP 0x1e /* Root Capabilities */
-#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
+#define PCI_EXP_RTCAP_RRS_SV 0x0001 /* Config RRS Software Visibility */
+#define PCI_EXP_RTCAP_CRSVIS PCI_EXP_RTCAP_RRS_SV /* compatibility */
#define PCI_EXP_RTSTA 0x20 /* Root Status */
#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
@@ -657,10 +661,12 @@
#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* 64b AtomicOp completion */
#define PCI_EXP_DEVCAP2_ATOMIC_COMP128 0x00000200 /* 128b AtomicOp completion */
#define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */
+#define PCI_EXP_DEVCAP2_TPH_COMP_MASK 0x00003000 /* TPH completer support */
#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
#define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */
+#define PCI_EXP_DEVCAP2_EE_PREFIX_MAX 0x00c00000 /* Max End-End TLP Prefixes */
#define PCI_EXP_DEVCTL2 0x28 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */
@@ -676,6 +682,7 @@
#define PCI_EXP_DEVSTA2 0x2a /* Device Status 2 */
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 0x2c /* end of v2 EPs w/o link */
#define PCI_EXP_LNKCAP2 0x2c /* Link Capabilities 2 */
+#define PCI_EXP_LNKCAP2_SLS 0x000000fe /* Supported Link Speeds Vector */
#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */
#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */
@@ -740,9 +747,11 @@
#define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */
#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */
#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */
+#define PCI_EXT_CAP_ID_NPEM 0x29 /* Native PCIe Enclosure Management */
#define PCI_EXT_CAP_ID_PL_32GT 0x2A /* Physical Layer 32.0 GT/s */
#define PCI_EXT_CAP_ID_DOE 0x2E /* Data Object Exchange */
-#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DOE
+#define PCI_EXT_CAP_ID_PL_64GT 0x31 /* Physical Layer 64.0 GT/s */
+#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PL_64GT
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
@@ -783,10 +792,13 @@
/* Same bits as above */
#define PCI_ERR_CAP 0x18 /* Advanced Error Capabilities & Ctrl*/
#define PCI_ERR_CAP_FEP(x) ((x) & 0x1f) /* First Error Pointer */
-#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */
-#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */
-#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
-#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
+#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */
+#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */
+#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
+#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
+#define PCI_ERR_CAP_PREFIX_LOG_PRESENT 0x00000800 /* TLP Prefix Log Present */
+#define PCI_ERR_CAP_TLP_LOG_FLIT 0x00040000 /* TLP was logged in Flit Mode */
+#define PCI_ERR_CAP_TLP_LOG_SIZE 0x00f80000 /* Logged TLP Size (only in Flit mode) */
#define PCI_ERR_HEADER_LOG 0x1c /* Header Log Register (16 bytes) */
#define PCI_ERR_ROOT_COMMAND 0x2c /* Root Error Command */
#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */
@@ -802,6 +814,7 @@
#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
#define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */
#define PCI_ERR_ROOT_ERR_SRC 0x34 /* Error Source Identification */
+#define PCI_ERR_PREFIX_LOG 0x38 /* TLP Prefix LOG Register (up to 16 bytes) */
/* Virtual Channel */
#define PCI_VC_PORT_CAP1 0x04
@@ -995,9 +1008,6 @@
#define PCI_ACS_CTRL 0x06 /* ACS Control Register */
#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */
-#define PCI_VSEC_HDR 4 /* extended cap - vendor-specific */
-#define PCI_VSEC_HDR_LEN_SHIFT 20 /* shift for length field */
-
/* SATA capability */
#define PCI_SATA_REGS 4 /* SATA REGs specifier */
#define PCI_SATA_REGS_MASK 0xF /* location - BAR#/inline */
@@ -1007,7 +1017,7 @@
/* Resizable BARs */
#define PCI_REBAR_CAP 4 /* capability register */
-#define PCI_REBAR_CAP_SIZES 0x00FFFFF0 /* supported BAR sizes */
+#define PCI_REBAR_CAP_SIZES 0xFFFFFFF0 /* supported BAR sizes */
#define PCI_REBAR_CTRL 8 /* control register */
#define PCI_REBAR_CTRL_BAR_IDX 0x00000007 /* BAR index */
#define PCI_REBAR_CTRL_NBAR_MASK 0x000000E0 /* # of resizable BARs */
@@ -1020,15 +1030,34 @@
#define PCI_DPA_CAP_SUBSTATE_MASK 0x1F /* # substates - 1 */
#define PCI_DPA_BASE_SIZEOF 16 /* size with 0 substates */
+/* TPH Completer Support */
+#define PCI_EXP_DEVCAP2_TPH_COMP_NONE 0x0 /* None */
+#define PCI_EXP_DEVCAP2_TPH_COMP_TPH_ONLY 0x1 /* TPH only */
+#define PCI_EXP_DEVCAP2_TPH_COMP_EXT_TPH 0x3 /* TPH and Extended TPH */
+
/* TPH Requester */
#define PCI_TPH_CAP 4 /* capability register */
-#define PCI_TPH_CAP_LOC_MASK 0x600 /* location mask */
-#define PCI_TPH_LOC_NONE 0x000 /* no location */
-#define PCI_TPH_LOC_CAP 0x200 /* in capability */
-#define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */
-#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* ST table mask */
-#define PCI_TPH_CAP_ST_SHIFT 16 /* ST table shift */
-#define PCI_TPH_BASE_SIZEOF 0xc /* size with no ST table */
+#define PCI_TPH_CAP_ST_NS 0x00000001 /* No ST Mode Supported */
+#define PCI_TPH_CAP_ST_IV 0x00000002 /* Interrupt Vector Mode Supported */
+#define PCI_TPH_CAP_ST_DS 0x00000004 /* Device Specific Mode Supported */
+#define PCI_TPH_CAP_EXT_TPH 0x00000100 /* Ext TPH Requester Supported */
+#define PCI_TPH_CAP_LOC_MASK 0x00000600 /* ST Table Location */
+#define PCI_TPH_LOC_NONE 0x00000000 /* Not present */
+#define PCI_TPH_LOC_CAP 0x00000200 /* In capability */
+#define PCI_TPH_LOC_MSIX 0x00000400 /* In MSI-X */
+#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* ST Table Size */
+#define PCI_TPH_CAP_ST_SHIFT 16 /* ST Table Size shift */
+#define PCI_TPH_BASE_SIZEOF 0xc /* Size with no ST table */
+
+#define PCI_TPH_CTRL 8 /* control register */
+#define PCI_TPH_CTRL_MODE_SEL_MASK 0x00000007 /* ST Mode Select */
+#define PCI_TPH_ST_NS_MODE 0x0 /* No ST Mode */
+#define PCI_TPH_ST_IV_MODE 0x1 /* Interrupt Vector Mode */
+#define PCI_TPH_ST_DS_MODE 0x2 /* Device Specific Mode */
+#define PCI_TPH_CTRL_REQ_EN_MASK 0x00000300 /* TPH Requester Enable */
+#define PCI_TPH_REQ_DISABLE 0x0 /* No TPH requests allowed */
+#define PCI_TPH_REQ_TPH_ONLY 0x1 /* TPH only requests allowed */
+#define PCI_TPH_REQ_EXT_TPH 0x3 /* Extended TPH requests allowed */
/* Downstream Port Containment */
#define PCI_EXP_DPC_CAP 0x04 /* DPC Capability */
@@ -1036,8 +1065,9 @@
#define PCI_EXP_DPC_CAP_RP_EXT 0x0020 /* Root Port Extensions */
#define PCI_EXP_DPC_CAP_POISONED_TLP 0x0040 /* Poisoned TLP Egress Blocking Supported */
#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x0080 /* Software Triggering Supported */
-#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size */
+#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size [3:0] */
#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
+#define PCI_EXP_DPC_RP_PIO_LOG_SIZE4 0x2000 /* RP PIO Log Size [4] */
#define PCI_EXP_DPC_CTL 0x06 /* DPC control */
#define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */
@@ -1115,12 +1145,55 @@
#define PCI_DLF_CAP 0x04 /* Capabilities Register */
#define PCI_DLF_EXCHANGE_ENABLE 0x80000000 /* Data Link Feature Exchange Enable */
+/* Secondary PCIe Capability 8.0 GT/s */
+#define PCI_SECPCI_LE_CTRL 0x0c /* Lane Equalization Control Register */
+
/* Physical Layer 16.0 GT/s */
#define PCI_PL_16GT_LE_CTRL 0x20 /* Lane Equalization Control Register */
#define PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK 0x0000000F
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4
+/* Physical Layer 32.0 GT/s */
+#define PCI_PL_32GT_LE_CTRL 0x20 /* Lane Equalization Control Register */
+
+/* Physical Layer 64.0 GT/s */
+#define PCI_PL_64GT_LE_CTRL 0x20 /* Lane Equalization Control Register */
+
+/* Native PCIe Enclosure Management */
+#define PCI_NPEM_CAP 0x04 /* NPEM capability register */
+#define PCI_NPEM_CAP_CAPABLE 0x00000001 /* NPEM Capable */
+
+#define PCI_NPEM_CTRL 0x08 /* NPEM control register */
+#define PCI_NPEM_CTRL_ENABLE 0x00000001 /* NPEM Enable */
+
+/*
+ * Native PCIe Enclosure Management indication bits and Reset command bit
+ * are corresponding for capability and control registers.
+ */
+#define PCI_NPEM_CMD_RESET 0x00000002 /* Reset Command */
+#define PCI_NPEM_IND_OK 0x00000004 /* OK */
+#define PCI_NPEM_IND_LOCATE 0x00000008 /* Locate */
+#define PCI_NPEM_IND_FAIL 0x00000010 /* Fail */
+#define PCI_NPEM_IND_REBUILD 0x00000020 /* Rebuild */
+#define PCI_NPEM_IND_PFA 0x00000040 /* Predicted Failure Analysis */
+#define PCI_NPEM_IND_HOTSPARE 0x00000080 /* Hot Spare */
+#define PCI_NPEM_IND_ICA 0x00000100 /* In Critical Array */
+#define PCI_NPEM_IND_IFA 0x00000200 /* In Failed Array */
+#define PCI_NPEM_IND_IDT 0x00000400 /* Device Type */
+#define PCI_NPEM_IND_DISABLED 0x00000800 /* Disabled */
+#define PCI_NPEM_IND_SPEC_0 0x01000000
+#define PCI_NPEM_IND_SPEC_1 0x02000000
+#define PCI_NPEM_IND_SPEC_2 0x04000000
+#define PCI_NPEM_IND_SPEC_3 0x08000000
+#define PCI_NPEM_IND_SPEC_4 0x10000000
+#define PCI_NPEM_IND_SPEC_5 0x20000000
+#define PCI_NPEM_IND_SPEC_6 0x40000000
+#define PCI_NPEM_IND_SPEC_7 0x80000000
+
+#define PCI_NPEM_STATUS 0x0c /* NPEM status register */
+#define PCI_NPEM_STATUS_CC 0x00000001 /* Command Completed */
+
/* Data Object Exchange */
#define PCI_DOE_CAP 0x04 /* DOE Capabilities Register */
#define PCI_DOE_CAP_INT_SUP 0x00000001 /* Interrupt Support */
@@ -1146,9 +1219,12 @@
#define PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX 0x000000ff
#define PCI_DOE_DATA_OBJECT_DISC_REQ_3_VER 0x0000ff00
#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID 0x0000ffff
-#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL 0x00ff0000
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_TYPE 0x00ff0000
#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX 0xff000000
+/* Deprecated old name, replaced with PCI_DOE_DATA_OBJECT_DISC_RSP_3_TYPE */
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL PCI_DOE_DATA_OBJECT_DISC_RSP_3_TYPE
+
/* Compute Express Link (CXL r3.1, sec 8.1.5) */
#define PCI_DVSEC_CXL_PORT 3
#define PCI_DVSEC_CXL_PORT_CTL 0x0c
diff --git a/include/standard-headers/linux/virtio_balloon.h b/include/standard-headers/linux/virtio_balloon.h
index f343bfe..3121cd2 100644
--- a/include/standard-headers/linux/virtio_balloon.h
+++ b/include/standard-headers/linux/virtio_balloon.h
@@ -71,7 +71,13 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_CACHES 7 /* Disk caches */
#define VIRTIO_BALLOON_S_HTLB_PGALLOC 8 /* Hugetlb page allocations */
#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */
-#define VIRTIO_BALLOON_S_NR 10
+#define VIRTIO_BALLOON_S_OOM_KILL 10 /* OOM killer invocations */
+#define VIRTIO_BALLOON_S_ALLOC_STALL 11 /* Stall count of memory allocatoin */
+#define VIRTIO_BALLOON_S_ASYNC_SCAN 12 /* Amount of memory scanned asynchronously */
+#define VIRTIO_BALLOON_S_DIRECT_SCAN 13 /* Amount of memory scanned directly */
+#define VIRTIO_BALLOON_S_ASYNC_RECLAIM 14 /* Amount of memory reclaimed asynchronously */
+#define VIRTIO_BALLOON_S_DIRECT_RECLAIM 15 /* Amount of memory reclaimed directly */
+#define VIRTIO_BALLOON_S_NR 16
#define VIRTIO_BALLOON_S_NAMES_WITH_PREFIX(VIRTIO_BALLOON_S_NAMES_prefix) { \
VIRTIO_BALLOON_S_NAMES_prefix "swap-in", \
@@ -83,7 +89,13 @@ struct virtio_balloon_config {
VIRTIO_BALLOON_S_NAMES_prefix "available-memory", \
VIRTIO_BALLOON_S_NAMES_prefix "disk-caches", \
VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-allocations", \
- VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures" \
+ VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures", \
+ VIRTIO_BALLOON_S_NAMES_prefix "oom-kills", \
+ VIRTIO_BALLOON_S_NAMES_prefix "alloc-stalls", \
+ VIRTIO_BALLOON_S_NAMES_prefix "async-scans", \
+ VIRTIO_BALLOON_S_NAMES_prefix "direct-scans", \
+ VIRTIO_BALLOON_S_NAMES_prefix "async-reclaims", \
+ VIRTIO_BALLOON_S_NAMES_prefix "direct-reclaims" \
}
#define VIRTIO_BALLOON_S_NAMES VIRTIO_BALLOON_S_NAMES_WITH_PREFIX("")
diff --git a/include/standard-headers/linux/virtio_crypto.h b/include/standard-headers/linux/virtio_crypto.h
index 68066da..4d350ae 100644
--- a/include/standard-headers/linux/virtio_crypto.h
+++ b/include/standard-headers/linux/virtio_crypto.h
@@ -329,6 +329,7 @@ struct virtio_crypto_op_header {
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00)
#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01)
+ /* akcipher sign/verify opcodes are deprecated */
#define VIRTIO_CRYPTO_AKCIPHER_SIGN \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02)
#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \
diff --git a/include/standard-headers/linux/virtio_gpu.h b/include/standard-headers/linux/virtio_gpu.h
index 2db643e..00cd3f0 100644
--- a/include/standard-headers/linux/virtio_gpu.h
+++ b/include/standard-headers/linux/virtio_gpu.h
@@ -309,8 +309,10 @@ struct virtio_gpu_cmd_submit {
#define VIRTIO_GPU_CAPSET_VIRGL 1
#define VIRTIO_GPU_CAPSET_VIRGL2 2
-/* 3 is reserved for gfxstream */
+#define VIRTIO_GPU_CAPSET_GFXSTREAM_VULKAN 3
#define VIRTIO_GPU_CAPSET_VENUS 4
+#define VIRTIO_GPU_CAPSET_CROSS_DOMAIN 5
+#define VIRTIO_GPU_CAPSET_DRM 6
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
struct virtio_gpu_get_capset_info {
diff --git a/include/standard-headers/linux/virtio_net.h b/include/standard-headers/linux/virtio_net.h
index fc594fe..982e854 100644
--- a/include/standard-headers/linux/virtio_net.h
+++ b/include/standard-headers/linux/virtio_net.h
@@ -327,6 +327,19 @@ struct virtio_net_rss_config {
uint8_t hash_key_data[/* hash_key_length */];
};
+struct virtio_net_rss_config_hdr {
+ uint32_t hash_types;
+ uint16_t indirection_table_mask;
+ uint16_t unclassified_queue;
+ uint16_t indirection_table[/* 1 + indirection_table_mask */];
+};
+
+struct virtio_net_rss_config_trailer {
+ uint16_t max_tx_vq;
+ uint8_t hash_key_length;
+ uint8_t hash_key_data[/* hash_key_length */];
+};
+
#define VIRTIO_NET_CTRL_MQ_RSS_CONFIG 1
/*
diff --git a/include/standard-headers/linux/virtio_pci.h b/include/standard-headers/linux/virtio_pci.h
index 4010216..09e964e 100644
--- a/include/standard-headers/linux/virtio_pci.h
+++ b/include/standard-headers/linux/virtio_pci.h
@@ -40,6 +40,7 @@
#define _LINUX_VIRTIO_PCI_H
#include "standard-headers/linux/types.h"
+#include "standard-headers/linux/kernel.h"
#ifndef VIRTIO_PCI_NO_LEGACY
@@ -115,6 +116,8 @@
#define VIRTIO_PCI_CAP_PCI_CFG 5
/* Additional shared memory capability */
#define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8
+/* PCI vendor data configuration */
+#define VIRTIO_PCI_CAP_VENDOR_CFG 9
/* This is the PCI capability header: */
struct virtio_pci_cap {
@@ -129,6 +132,18 @@ struct virtio_pci_cap {
uint32_t length; /* Length of the structure, in bytes. */
};
+/* This is the PCI vendor data capability header: */
+struct virtio_pci_vndr_data {
+ uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ uint8_t cap_next; /* Generic PCI field: next ptr. */
+ uint8_t cap_len; /* Generic PCI field: capability length */
+ uint8_t cfg_type; /* Identifies the structure. */
+ uint16_t vendor_id; /* Identifies the vendor-specific format. */
+ /* For Vendor Definition */
+ /* Pads structure to a multiple of 4 bytes */
+ /* Reads must not have side effects */
+};
+
struct virtio_pci_cap64 {
struct virtio_pci_cap cap;
uint32_t offset_hi; /* Most sig 32 bits of offset */
@@ -231,6 +246,7 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_ADMIN_CMD_LIST_USE 0x1
/* Admin command group type. */
+#define VIRTIO_ADMIN_GROUP_TYPE_SELF 0x0
#define VIRTIO_ADMIN_GROUP_TYPE_SRIOV 0x1
/* Transitional device admin command. */
@@ -240,6 +256,17 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ 0x5
#define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO 0x6
+/* Device parts access commands. */
+#define VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY 0x7
+#define VIRTIO_ADMIN_CMD_DEVICE_CAP_GET 0x8
+#define VIRTIO_ADMIN_CMD_DRIVER_CAP_SET 0x9
+#define VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE 0xa
+#define VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY 0xd
+#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET 0xe
+#define VIRTIO_ADMIN_CMD_DEV_PARTS_GET 0xf
+#define VIRTIO_ADMIN_CMD_DEV_PARTS_SET 0x10
+#define VIRTIO_ADMIN_CMD_DEV_MODE_SET 0x11
+
struct virtio_admin_cmd_hdr {
uint16_t opcode;
/*
@@ -286,4 +313,123 @@ struct virtio_admin_cmd_notify_info_result {
struct virtio_admin_cmd_notify_info_data entries[VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO];
};
+#define VIRTIO_DEV_PARTS_CAP 0x0000
+
+struct virtio_dev_parts_cap {
+ uint8_t get_parts_resource_objects_limit;
+ uint8_t set_parts_resource_objects_limit;
+};
+
+#define MAX_CAP_ID __KERNEL_DIV_ROUND_UP(VIRTIO_DEV_PARTS_CAP + 1, 64)
+
+struct virtio_admin_cmd_query_cap_id_result {
+ uint64_t supported_caps[MAX_CAP_ID];
+};
+
+struct virtio_admin_cmd_cap_get_data {
+ uint16_t id;
+ uint8_t reserved[6];
+};
+
+struct virtio_admin_cmd_cap_set_data {
+ uint16_t id;
+ uint8_t reserved[6];
+ uint8_t cap_specific_data[];
+};
+
+struct virtio_admin_cmd_resource_obj_cmd_hdr {
+ uint16_t type;
+ uint8_t reserved[2];
+ uint32_t id; /* Indicates unique resource object id per resource object type */
+};
+
+struct virtio_admin_cmd_resource_obj_create_data {
+ struct virtio_admin_cmd_resource_obj_cmd_hdr hdr;
+ uint64_t flags;
+ uint8_t resource_obj_specific_data[];
+};
+
+#define VIRTIO_RESOURCE_OBJ_DEV_PARTS 0
+
+#define VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET 0
+#define VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_SET 1
+
+struct virtio_resource_obj_dev_parts {
+ uint8_t type;
+ uint8_t reserved[7];
+};
+
+#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE 0
+#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_COUNT 1
+#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_LIST 2
+
+struct virtio_admin_cmd_dev_parts_metadata_data {
+ struct virtio_admin_cmd_resource_obj_cmd_hdr hdr;
+ uint8_t type;
+ uint8_t reserved[7];
+};
+
+#define VIRTIO_DEV_PART_F_OPTIONAL 0
+
+struct virtio_dev_part_hdr {
+ uint16_t part_type;
+ uint8_t flags;
+ uint8_t reserved;
+ union {
+ struct {
+ uint32_t offset;
+ uint32_t reserved;
+ } pci_common_cfg;
+ struct {
+ uint16_t index;
+ uint8_t reserved[6];
+ } vq_index;
+ } selector;
+ uint32_t length;
+};
+
+struct virtio_dev_part {
+ struct virtio_dev_part_hdr hdr;
+ uint8_t value[];
+};
+
+struct virtio_admin_cmd_dev_parts_metadata_result {
+ union {
+ struct {
+ uint32_t size;
+ uint32_t reserved;
+ } parts_size;
+ struct {
+ uint32_t count;
+ uint32_t reserved;
+ } hdr_list_count;
+ struct {
+ uint32_t count;
+ uint32_t reserved;
+ struct virtio_dev_part_hdr hdrs[];
+ } hdr_list;
+ };
+};
+
+#define VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_SELECTED 0
+#define VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_ALL 1
+
+struct virtio_admin_cmd_dev_parts_get_data {
+ struct virtio_admin_cmd_resource_obj_cmd_hdr hdr;
+ uint8_t type;
+ uint8_t reserved[7];
+ struct virtio_dev_part_hdr hdr_list[];
+};
+
+struct virtio_admin_cmd_dev_parts_set_data {
+ struct virtio_admin_cmd_resource_obj_cmd_hdr hdr;
+ struct virtio_dev_part parts[];
+};
+
+#define VIRTIO_ADMIN_CMD_DEV_MODE_F_STOPPED 0
+
+struct virtio_admin_cmd_dev_mode_set_data {
+ uint8_t flags;
+};
+
#endif
diff --git a/include/standard-headers/linux/virtio_snd.h b/include/standard-headers/linux/virtio_snd.h
index 860f12e..160d578 100644
--- a/include/standard-headers/linux/virtio_snd.h
+++ b/include/standard-headers/linux/virtio_snd.h
@@ -25,7 +25,7 @@ struct virtio_snd_config {
uint32_t streams;
/* # of available channel maps */
uint32_t chmaps;
- /* # of available control elements */
+ /* # of available control elements (if VIRTIO_SND_F_CTLS) */
uint32_t controls;
};
diff --git a/include/standard-headers/linux/vmclock-abi.h b/include/standard-headers/linux/vmclock-abi.h
new file mode 100644
index 0000000..15b0316
--- /dev/null
+++ b/include/standard-headers/linux/vmclock-abi.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+
+/*
+ * This structure provides a vDSO-style clock to VM guests, exposing the
+ * relationship (or lack thereof) between the CPU clock (TSC, timebase, arch
+ * counter, etc.) and real time. It is designed to address the problem of
+ * live migration, which other clock enlightenments do not.
+ *
+ * When a guest is live migrated, this affects the clock in two ways.
+ *
+ * First, even between identical hosts the actual frequency of the underlying
+ * counter will change within the tolerances of its specification (typically
+ * ±50PPM, or 4 seconds a day). This frequency also varies over time on the
+ * same host, but can be tracked by NTP as it generally varies slowly. With
+ * live migration there is a step change in the frequency, with no warning.
+ *
+ * Second, there may be a step change in the value of the counter itself, as
+ * its accuracy is limited by the precision of the NTP synchronization on the
+ * source and destination hosts.
+ *
+ * So any calibration (NTP, PTP, etc.) which the guest has done on the source
+ * host before migration is invalid, and needs to be redone on the new host.
+ *
+ * In its most basic mode, this structure provides only an indication to the
+ * guest that live migration has occurred. This allows the guest to know that
+ * its clock is invalid and take remedial action. For applications that need
+ * reliable accurate timestamps (e.g. distributed databases), the structure
+ * can be mapped all the way to userspace. This allows the application to see
+ * directly for itself that the clock is disrupted and take appropriate
+ * action, even when using a vDSO-style method to get the time instead of a
+ * system call.
+ *
+ * In its more advanced mode. this structure can also be used to expose the
+ * precise relationship of the CPU counter to real time, as calibrated by the
+ * host. This means that userspace applications can have accurate time
+ * immediately after live migration, rather than having to pause operations
+ * and wait for NTP to recover. This mode does, of course, rely on the
+ * counter being reliable and consistent across CPUs.
+ *
+ * Note that this must be true UTC, never with smeared leap seconds. If a
+ * guest wishes to construct a smeared clock, it can do so. Presenting a
+ * smeared clock through this interface would be problematic because it
+ * actually messes with the apparent counter *period*. A linear smearing
+ * of 1 ms per second would effectively tweak the counter period by 1000PPM
+ * at the start/end of the smearing period, while a sinusoidal smear would
+ * basically be impossible to represent.
+ *
+ * This structure is offered with the intent that it be adopted into the
+ * nascent virtio-rtc standard, as a virtio-rtc that does not address the live
+ * migration problem seems a little less than fit for purpose. For that
+ * reason, certain fields use precisely the same numeric definitions as in
+ * the virtio-rtc proposal. The structure can also be exposed through an ACPI
+ * device with the CID "VMCLOCK", modelled on the "VMGENID" device except for
+ * the fact that it uses a real _CRS to convey the address of the structure
+ * (which should be a full page, to allow for mapping directly to userspace).
+ */
+
+#ifndef __VMCLOCK_ABI_H__
+#define __VMCLOCK_ABI_H__
+
+#include "standard-headers/linux/types.h"
+
+struct vmclock_abi {
+ /* CONSTANT FIELDS */
+ uint32_t magic;
+#define VMCLOCK_MAGIC 0x4b4c4356 /* "VCLK" */
+ uint32_t size; /* Size of region containing this structure */
+ uint16_t version; /* 1 */
+ uint8_t counter_id; /* Matches VIRTIO_RTC_COUNTER_xxx except INVALID */
+#define VMCLOCK_COUNTER_ARM_VCNT 0
+#define VMCLOCK_COUNTER_X86_TSC 1
+#define VMCLOCK_COUNTER_INVALID 0xff
+ uint8_t time_type; /* Matches VIRTIO_RTC_TYPE_xxx */
+#define VMCLOCK_TIME_UTC 0 /* Since 1970-01-01 00:00:00z */
+#define VMCLOCK_TIME_TAI 1 /* Since 1970-01-01 00:00:00z */
+#define VMCLOCK_TIME_MONOTONIC 2 /* Since undefined epoch */
+#define VMCLOCK_TIME_INVALID_SMEARED 3 /* Not supported */
+#define VMCLOCK_TIME_INVALID_MAYBE_SMEARED 4 /* Not supported */
+
+ /* NON-CONSTANT FIELDS PROTECTED BY SEQCOUNT LOCK */
+ uint32_t seq_count; /* Low bit means an update is in progress */
+ /*
+ * This field changes to another non-repeating value when the CPU
+ * counter is disrupted, for example on live migration. This lets
+ * the guest know that it should discard any calibration it has
+ * performed of the counter against external sources (NTP/PTP/etc.).
+ */
+ uint64_t disruption_marker;
+ uint64_t flags;
+ /* Indicates that the tai_offset_sec field is valid */
+#define VMCLOCK_FLAG_TAI_OFFSET_VALID (1 << 0)
+ /*
+ * Optionally used to notify guests of pending maintenance events.
+ * A guest which provides latency-sensitive services may wish to
+ * remove itself from service if an event is coming up. Two flags
+ * indicate the approximate imminence of the event.
+ */
+#define VMCLOCK_FLAG_DISRUPTION_SOON (1 << 1) /* About a day */
+#define VMCLOCK_FLAG_DISRUPTION_IMMINENT (1 << 2) /* About an hour */
+#define VMCLOCK_FLAG_PERIOD_ESTERROR_VALID (1 << 3)
+#define VMCLOCK_FLAG_PERIOD_MAXERROR_VALID (1 << 4)
+#define VMCLOCK_FLAG_TIME_ESTERROR_VALID (1 << 5)
+#define VMCLOCK_FLAG_TIME_MAXERROR_VALID (1 << 6)
+ /*
+ * If the MONOTONIC flag is set then (other than leap seconds) it is
+ * guaranteed that the time calculated according this structure at
+ * any given moment shall never appear to be later than the time
+ * calculated via the structure at any *later* moment.
+ *
+ * In particular, a timestamp based on a counter reading taken
+ * immediately after setting the low bit of seq_count (and the
+ * associated memory barrier), using the previously-valid time and
+ * period fields, shall never be later than a timestamp based on
+ * a counter reading taken immediately before *clearing* the low
+ * bit again after the update, using the about-to-be-valid fields.
+ */
+#define VMCLOCK_FLAG_TIME_MONOTONIC (1 << 7)
+
+ uint8_t pad[2];
+ uint8_t clock_status;
+#define VMCLOCK_STATUS_UNKNOWN 0
+#define VMCLOCK_STATUS_INITIALIZING 1
+#define VMCLOCK_STATUS_SYNCHRONIZED 2
+#define VMCLOCK_STATUS_FREERUNNING 3
+#define VMCLOCK_STATUS_UNRELIABLE 4
+
+ /*
+ * The time exposed through this device is never smeared. This field
+ * corresponds to the 'subtype' field in virtio-rtc, which indicates
+ * the smearing method. However in this case it provides a *hint* to
+ * the guest operating system, such that *if* the guest OS wants to
+ * provide its users with an alternative clock which does not follow
+ * UTC, it may do so in a fashion consistent with the other systems
+ * in the nearby environment.
+ */
+ uint8_t leap_second_smearing_hint; /* Matches VIRTIO_RTC_SUBTYPE_xxx */
+#define VMCLOCK_SMEARING_STRICT 0
+#define VMCLOCK_SMEARING_NOON_LINEAR 1
+#define VMCLOCK_SMEARING_UTC_SLS 2
+ uint16_t tai_offset_sec; /* Actually two's complement signed */
+ uint8_t leap_indicator;
+ /*
+ * This field is based on the VIRTIO_RTC_LEAP_xxx values as defined
+ * in the current draft of virtio-rtc, but since smearing cannot be
+ * used with the shared memory device, some values are not used.
+ *
+ * The _POST_POS and _POST_NEG values allow the guest to perform
+ * its own smearing during the day or so after a leap second when
+ * such smearing may need to continue being applied for a leap
+ * second which is now theoretically "historical".
+ */
+#define VMCLOCK_LEAP_NONE 0x00 /* No known nearby leap second */
+#define VMCLOCK_LEAP_PRE_POS 0x01 /* Positive leap second at EOM */
+#define VMCLOCK_LEAP_PRE_NEG 0x02 /* Negative leap second at EOM */
+#define VMCLOCK_LEAP_POS 0x03 /* Set during 23:59:60 second */
+#define VMCLOCK_LEAP_POST_POS 0x04
+#define VMCLOCK_LEAP_POST_NEG 0x05
+
+ /* Bit shift for counter_period_frac_sec and its error rate */
+ uint8_t counter_period_shift;
+ /*
+ * Paired values of counter and UTC at a given point in time.
+ */
+ uint64_t counter_value;
+ /*
+ * Counter period, and error margin of same. The unit of these
+ * fields is 1/2^(64 + counter_period_shift) of a second.
+ */
+ uint64_t counter_period_frac_sec;
+ uint64_t counter_period_esterror_rate_frac_sec;
+ uint64_t counter_period_maxerror_rate_frac_sec;
+
+ /*
+ * Time according to time_type field above.
+ */
+ uint64_t time_sec; /* Seconds since time_type epoch */
+ uint64_t time_frac_sec; /* Units of 1/2^64 of a second */
+ uint64_t time_esterror_nanosec;
+ uint64_t time_maxerror_nanosec;
+};
+
+#endif /* __VMCLOCK_ABI_H__ */
diff --git a/include/standard-headers/uefi/uefi.h b/include/standard-headers/uefi/uefi.h
new file mode 100644
index 0000000..5256349
--- /dev/null
+++ b/include/standard-headers/uefi/uefi.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2025 Intel Corporation
+ *
+ * Author: Isaku Yamahata <isaku.yamahata at gmail.com>
+ * <isaku.yamahata at intel.com>
+ * Xiaoyao Li <xiaoyao.li@intel.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_I386_UEFI_H
+#define HW_I386_UEFI_H
+
+/***************************************************************************/
+/*
+ * basic EFI definitions
+ * supplemented with UEFI Specification Version 2.8 (Errata A)
+ * released February 2020
+ */
+/* UEFI integer is little endian */
+
+typedef struct {
+ uint32_t Data1;
+ uint16_t Data2;
+ uint16_t Data3;
+ uint8_t Data4[8];
+} EFI_GUID;
+
+typedef enum {
+ EfiReservedMemoryType,
+ EfiLoaderCode,
+ EfiLoaderData,
+ EfiBootServicesCode,
+ EfiBootServicesData,
+ EfiRuntimeServicesCode,
+ EfiRuntimeServicesData,
+ EfiConventionalMemory,
+ EfiUnusableMemory,
+ EfiACPIReclaimMemory,
+ EfiACPIMemoryNVS,
+ EfiMemoryMappedIO,
+ EfiMemoryMappedIOPortSpace,
+ EfiPalCode,
+ EfiPersistentMemory,
+ EfiUnacceptedMemoryType,
+ EfiMaxMemoryType
+} EFI_MEMORY_TYPE;
+
+#define EFI_HOB_HANDOFF_TABLE_VERSION 0x0009
+
+#define EFI_HOB_TYPE_HANDOFF 0x0001
+#define EFI_HOB_TYPE_MEMORY_ALLOCATION 0x0002
+#define EFI_HOB_TYPE_RESOURCE_DESCRIPTOR 0x0003
+#define EFI_HOB_TYPE_GUID_EXTENSION 0x0004
+#define EFI_HOB_TYPE_FV 0x0005
+#define EFI_HOB_TYPE_CPU 0x0006
+#define EFI_HOB_TYPE_MEMORY_POOL 0x0007
+#define EFI_HOB_TYPE_FV2 0x0009
+#define EFI_HOB_TYPE_LOAD_PEIM_UNUSED 0x000A
+#define EFI_HOB_TYPE_UEFI_CAPSULE 0x000B
+#define EFI_HOB_TYPE_FV3 0x000C
+#define EFI_HOB_TYPE_UNUSED 0xFFFE
+#define EFI_HOB_TYPE_END_OF_HOB_LIST 0xFFFF
+
+typedef struct {
+ uint16_t HobType;
+ uint16_t HobLength;
+ uint32_t Reserved;
+} EFI_HOB_GENERIC_HEADER;
+
+typedef uint64_t EFI_PHYSICAL_ADDRESS;
+typedef uint32_t EFI_BOOT_MODE;
+
+typedef struct {
+ EFI_HOB_GENERIC_HEADER Header;
+ uint32_t Version;
+ EFI_BOOT_MODE BootMode;
+ EFI_PHYSICAL_ADDRESS EfiMemoryTop;
+ EFI_PHYSICAL_ADDRESS EfiMemoryBottom;
+ EFI_PHYSICAL_ADDRESS EfiFreeMemoryTop;
+ EFI_PHYSICAL_ADDRESS EfiFreeMemoryBottom;
+ EFI_PHYSICAL_ADDRESS EfiEndOfHobList;
+} EFI_HOB_HANDOFF_INFO_TABLE;
+
+#define EFI_RESOURCE_SYSTEM_MEMORY 0x00000000
+#define EFI_RESOURCE_MEMORY_MAPPED_IO 0x00000001
+#define EFI_RESOURCE_IO 0x00000002
+#define EFI_RESOURCE_FIRMWARE_DEVICE 0x00000003
+#define EFI_RESOURCE_MEMORY_MAPPED_IO_PORT 0x00000004
+#define EFI_RESOURCE_MEMORY_RESERVED 0x00000005
+#define EFI_RESOURCE_IO_RESERVED 0x00000006
+#define EFI_RESOURCE_MEMORY_UNACCEPTED 0x00000007
+#define EFI_RESOURCE_MAX_MEMORY_TYPE 0x00000008
+
+#define EFI_RESOURCE_ATTRIBUTE_PRESENT 0x00000001
+#define EFI_RESOURCE_ATTRIBUTE_INITIALIZED 0x00000002
+#define EFI_RESOURCE_ATTRIBUTE_TESTED 0x00000004
+#define EFI_RESOURCE_ATTRIBUTE_SINGLE_BIT_ECC 0x00000008
+#define EFI_RESOURCE_ATTRIBUTE_MULTIPLE_BIT_ECC 0x00000010
+#define EFI_RESOURCE_ATTRIBUTE_ECC_RESERVED_1 0x00000020
+#define EFI_RESOURCE_ATTRIBUTE_ECC_RESERVED_2 0x00000040
+#define EFI_RESOURCE_ATTRIBUTE_READ_PROTECTED 0x00000080
+#define EFI_RESOURCE_ATTRIBUTE_WRITE_PROTECTED 0x00000100
+#define EFI_RESOURCE_ATTRIBUTE_EXECUTION_PROTECTED 0x00000200
+#define EFI_RESOURCE_ATTRIBUTE_UNCACHEABLE 0x00000400
+#define EFI_RESOURCE_ATTRIBUTE_WRITE_COMBINEABLE 0x00000800
+#define EFI_RESOURCE_ATTRIBUTE_WRITE_THROUGH_CACHEABLE 0x00001000
+#define EFI_RESOURCE_ATTRIBUTE_WRITE_BACK_CACHEABLE 0x00002000
+#define EFI_RESOURCE_ATTRIBUTE_16_BIT_IO 0x00004000
+#define EFI_RESOURCE_ATTRIBUTE_32_BIT_IO 0x00008000
+#define EFI_RESOURCE_ATTRIBUTE_64_BIT_IO 0x00010000
+#define EFI_RESOURCE_ATTRIBUTE_UNCACHED_EXPORTED 0x00020000
+#define EFI_RESOURCE_ATTRIBUTE_READ_ONLY_PROTECTED 0x00040000
+#define EFI_RESOURCE_ATTRIBUTE_READ_ONLY_PROTECTABLE 0x00080000
+#define EFI_RESOURCE_ATTRIBUTE_READ_PROTECTABLE 0x00100000
+#define EFI_RESOURCE_ATTRIBUTE_WRITE_PROTECTABLE 0x00200000
+#define EFI_RESOURCE_ATTRIBUTE_EXECUTION_PROTECTABLE 0x00400000
+#define EFI_RESOURCE_ATTRIBUTE_PERSISTENT 0x00800000
+#define EFI_RESOURCE_ATTRIBUTE_PERSISTABLE 0x01000000
+#define EFI_RESOURCE_ATTRIBUTE_MORE_RELIABLE 0x02000000
+
+typedef uint32_t EFI_RESOURCE_TYPE;
+typedef uint32_t EFI_RESOURCE_ATTRIBUTE_TYPE;
+
+typedef struct {
+ EFI_HOB_GENERIC_HEADER Header;
+ EFI_GUID Owner;
+ EFI_RESOURCE_TYPE ResourceType;
+ EFI_RESOURCE_ATTRIBUTE_TYPE ResourceAttribute;
+ EFI_PHYSICAL_ADDRESS PhysicalStart;
+ uint64_t ResourceLength;
+} EFI_HOB_RESOURCE_DESCRIPTOR;
+
+typedef struct {
+ EFI_HOB_GENERIC_HEADER Header;
+ EFI_GUID Name;
+
+ /* guid specific data follows */
+} EFI_HOB_GUID_TYPE;
+
+typedef struct {
+ EFI_HOB_GENERIC_HEADER Header;
+ EFI_PHYSICAL_ADDRESS BaseAddress;
+ uint64_t Length;
+} EFI_HOB_FIRMWARE_VOLUME;
+
+typedef struct {
+ EFI_HOB_GENERIC_HEADER Header;
+ EFI_PHYSICAL_ADDRESS BaseAddress;
+ uint64_t Length;
+ EFI_GUID FvName;
+ EFI_GUID FileName;
+} EFI_HOB_FIRMWARE_VOLUME2;
+
+typedef struct {
+ EFI_HOB_GENERIC_HEADER Header;
+ EFI_PHYSICAL_ADDRESS BaseAddress;
+ uint64_t Length;
+ uint32_t AuthenticationStatus;
+ bool ExtractedFv;
+ EFI_GUID FvName;
+ EFI_GUID FileName;
+} EFI_HOB_FIRMWARE_VOLUME3;
+
+typedef struct {
+ EFI_HOB_GENERIC_HEADER Header;
+ uint8_t SizeOfMemorySpace;
+ uint8_t SizeOfIoSpace;
+ uint8_t Reserved[6];
+} EFI_HOB_CPU;
+
+typedef struct {
+ EFI_HOB_GENERIC_HEADER Header;
+} EFI_HOB_MEMORY_POOL;
+
+typedef struct {
+ EFI_HOB_GENERIC_HEADER Header;
+
+ EFI_PHYSICAL_ADDRESS BaseAddress;
+ uint64_t Length;
+} EFI_HOB_UEFI_CAPSULE;
+
+#define EFI_HOB_OWNER_ZERO \
+ ((EFI_GUID){ 0x00000000, 0x0000, 0x0000, \
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } })
+
+#endif
diff --git a/include/sysemu/accel-blocker.h b/include/sysemu/accel-blocker.h
deleted file mode 100644
index f07f368..0000000
--- a/include/sysemu/accel-blocker.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Accelerator blocking API, to prevent new ioctls from starting and wait the
- * running ones finish.
- * This mechanism differs from pause/resume_all_vcpus() in that it does not
- * release the BQL.
- *
- * Copyright (c) 2022 Red Hat Inc.
- *
- * Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-#ifndef ACCEL_BLOCKER_H
-#define ACCEL_BLOCKER_H
-
-#include "sysemu/cpus.h"
-
-void accel_blocker_init(void);
-
-/*
- * accel_{cpu_}ioctl_begin/end:
- * Mark when ioctl is about to run or just finished.
- *
- * accel_{cpu_}ioctl_begin will block after accel_ioctl_inhibit_begin() is
- * called, preventing new ioctls to run. They will continue only after
- * accel_ioctl_inibith_end().
- */
-void accel_ioctl_begin(void);
-void accel_ioctl_end(void);
-void accel_cpu_ioctl_begin(CPUState *cpu);
-void accel_cpu_ioctl_end(CPUState *cpu);
-
-/*
- * accel_ioctl_inhibit_begin: start critical section
- *
- * This function makes sure that:
- * 1) incoming accel_{cpu_}ioctl_begin() calls block
- * 2) wait that all ioctls that were already running reach
- * accel_{cpu_}ioctl_end(), kicking vcpus if necessary.
- *
- * This allows the caller to access shared data or perform operations without
- * worrying of concurrent vcpus accesses.
- */
-void accel_ioctl_inhibit_begin(void);
-
-/*
- * accel_ioctl_inhibit_end: end critical section started by
- * accel_ioctl_inhibit_begin()
- *
- * This function allows blocked accel_{cpu_}ioctl_begin() to continue.
- */
-void accel_ioctl_inhibit_end(void);
-
-#endif /* ACCEL_BLOCKER_H */
diff --git a/include/sysemu/accel-ops.h b/include/sysemu/accel-ops.h
deleted file mode 100644
index a088672..0000000
--- a/include/sysemu/accel-ops.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Accelerator OPS, used for cpus.c module
- *
- * Copyright 2021 SUSE LLC
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef ACCEL_OPS_H
-#define ACCEL_OPS_H
-
-#include "exec/cpu-common.h"
-#include "qom/object.h"
-
-#define ACCEL_OPS_SUFFIX "-ops"
-#define TYPE_ACCEL_OPS "accel" ACCEL_OPS_SUFFIX
-#define ACCEL_OPS_NAME(name) (name "-" TYPE_ACCEL_OPS)
-
-typedef struct AccelOpsClass AccelOpsClass;
-DECLARE_CLASS_CHECKERS(AccelOpsClass, ACCEL_OPS, TYPE_ACCEL_OPS)
-
-/**
- * struct AccelOpsClass - accelerator interfaces
- *
- * This structure is used to abstract accelerator differences from the
- * core CPU code. Not all have to be implemented.
- */
-struct AccelOpsClass {
- /*< private >*/
- ObjectClass parent_class;
- /*< public >*/
-
- /* initialization function called when accel is chosen */
- void (*ops_init)(AccelOpsClass *ops);
-
- bool (*cpus_are_resettable)(void);
- void (*cpu_reset_hold)(CPUState *cpu);
-
- void (*create_vcpu_thread)(CPUState *cpu); /* MANDATORY NON-NULL */
- void (*kick_vcpu_thread)(CPUState *cpu);
- bool (*cpu_thread_is_idle)(CPUState *cpu);
-
- void (*synchronize_post_reset)(CPUState *cpu);
- void (*synchronize_post_init)(CPUState *cpu);
- void (*synchronize_state)(CPUState *cpu);
- void (*synchronize_pre_loadvm)(CPUState *cpu);
- void (*synchronize_pre_resume)(bool step_pending);
-
- void (*handle_interrupt)(CPUState *cpu, int mask);
-
- /**
- * @get_virtual_clock: fetch virtual clock
- * @set_virtual_clock: set virtual clock
- *
- * These allow the timer subsystem to defer to the accelerator to
- * fetch time. The set function is needed if the accelerator wants
- * to track the changes to time as the timer is warped through
- * various timer events.
- */
- int64_t (*get_virtual_clock)(void);
- void (*set_virtual_clock)(int64_t time);
-
- int64_t (*get_elapsed_ticks)(void);
-
- /* gdbstub hooks */
- bool (*supports_guest_debug)(void);
- int (*update_guest_debug)(CPUState *cpu);
- int (*insert_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len);
- int (*remove_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len);
- void (*remove_all_breakpoints)(CPUState *cpu);
-};
-
-#endif /* ACCEL_OPS_H */
diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h
deleted file mode 100644
index 8d041aa..0000000
--- a/include/sysemu/arch_init.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef QEMU_ARCH_INIT_H
-#define QEMU_ARCH_INIT_H
-
-
-enum {
- QEMU_ARCH_ALL = -1,
- QEMU_ARCH_ALPHA = (1 << 0),
- QEMU_ARCH_ARM = (1 << 1),
- QEMU_ARCH_CRIS = (1 << 2),
- QEMU_ARCH_I386 = (1 << 3),
- QEMU_ARCH_M68K = (1 << 4),
- QEMU_ARCH_MICROBLAZE = (1 << 6),
- QEMU_ARCH_MIPS = (1 << 7),
- QEMU_ARCH_PPC = (1 << 8),
- QEMU_ARCH_S390X = (1 << 9),
- QEMU_ARCH_SH4 = (1 << 10),
- QEMU_ARCH_SPARC = (1 << 11),
- QEMU_ARCH_XTENSA = (1 << 12),
- QEMU_ARCH_OPENRISC = (1 << 13),
- QEMU_ARCH_TRICORE = (1 << 16),
- QEMU_ARCH_HPPA = (1 << 18),
- QEMU_ARCH_RISCV = (1 << 19),
- QEMU_ARCH_RX = (1 << 20),
- QEMU_ARCH_AVR = (1 << 21),
- QEMU_ARCH_HEXAGON = (1 << 22),
- QEMU_ARCH_LOONGARCH = (1 << 23),
-};
-
-extern const uint32_t arch_type;
-
-void qemu_init_arch_modules(void);
-
-#endif
diff --git a/include/sysemu/block-backend-global-state.h b/include/sysemu/block-backend-global-state.h
deleted file mode 100644
index 49c12b0f..0000000
--- a/include/sysemu/block-backend-global-state.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * QEMU Block backends
- *
- * Copyright (C) 2014-2016 Red Hat, Inc.
- *
- * Authors:
- * Markus Armbruster <armbru@redhat.com>,
- *
- * This work is licensed under the terms of the GNU LGPL, version 2.1
- * or later. See the COPYING.LIB file in the top-level directory.
- */
-
-#ifndef BLOCK_BACKEND_GLOBAL_STATE_H
-#define BLOCK_BACKEND_GLOBAL_STATE_H
-
-#include "block-backend-common.h"
-
-/*
- * Global state (GS) API. These functions run under the BQL.
- *
- * See include/block/block-global-state.h for more information about
- * the GS API.
- */
-
-BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm);
-
-BlockBackend * no_coroutine_fn
-blk_new_with_bs(BlockDriverState *bs, uint64_t perm, uint64_t shared_perm,
- Error **errp);
-
-BlockBackend * coroutine_fn no_co_wrapper
-blk_co_new_with_bs(BlockDriverState *bs, uint64_t perm, uint64_t shared_perm,
- Error **errp);
-
-BlockBackend * no_coroutine_fn
-blk_new_open(const char *filename, const char *reference, QDict *options,
- int flags, Error **errp);
-
-BlockBackend * coroutine_fn no_co_wrapper
-blk_co_new_open(const char *filename, const char *reference, QDict *options,
- int flags, Error **errp);
-
-int blk_get_refcnt(BlockBackend *blk);
-void blk_ref(BlockBackend *blk);
-
-void no_coroutine_fn blk_unref(BlockBackend *blk);
-void coroutine_fn no_co_wrapper blk_co_unref(BlockBackend *blk);
-
-void blk_remove_all_bs(void);
-BlockBackend *blk_by_name(const char *name);
-BlockBackend *blk_next(BlockBackend *blk);
-BlockBackend *blk_all_next(BlockBackend *blk);
-bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp);
-void monitor_remove_blk(BlockBackend *blk);
-
-BlockBackendPublic *blk_get_public(BlockBackend *blk);
-BlockBackend *blk_by_public(BlockBackendPublic *public);
-
-void blk_remove_bs(BlockBackend *blk);
-int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp);
-int blk_replace_bs(BlockBackend *blk, BlockDriverState *new_bs, Error **errp);
-bool GRAPH_RDLOCK bdrv_has_blk(BlockDriverState *bs);
-bool GRAPH_RDLOCK bdrv_is_root_node(BlockDriverState *bs);
-int GRAPH_UNLOCKED blk_set_perm(BlockBackend *blk, uint64_t perm,
- uint64_t shared_perm, Error **errp);
-void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm);
-
-void blk_iostatus_enable(BlockBackend *blk);
-BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk);
-void blk_iostatus_disable(BlockBackend *blk);
-void blk_iostatus_reset(BlockBackend *blk);
-int blk_attach_dev(BlockBackend *blk, DeviceState *dev);
-void blk_detach_dev(BlockBackend *blk, DeviceState *dev);
-DeviceState *blk_get_attached_dev(BlockBackend *blk);
-BlockBackend *blk_by_dev(void *dev);
-BlockBackend *blk_by_qdev_id(const char *id, Error **errp);
-void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque);
-
-void blk_activate(BlockBackend *blk, Error **errp);
-
-int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags);
-void blk_aio_cancel(BlockAIOCB *acb);
-int blk_commit_all(void);
-bool blk_in_drain(BlockBackend *blk);
-void blk_drain(BlockBackend *blk);
-void blk_drain_all(void);
-void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
- BlockdevOnError on_write_error);
-bool blk_supports_write_perm(BlockBackend *blk);
-bool blk_is_sg(BlockBackend *blk);
-void blk_set_enable_write_cache(BlockBackend *blk, bool wce);
-int blk_get_flags(BlockBackend *blk);
-bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp);
-void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason);
-void blk_op_block_all(BlockBackend *blk, Error *reason);
-void blk_op_unblock_all(BlockBackend *blk, Error *reason);
-int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
- Error **errp);
-void blk_add_aio_context_notifier(BlockBackend *blk,
- void (*attached_aio_context)(AioContext *new_context, void *opaque),
- void (*detach_aio_context)(void *opaque), void *opaque);
-void blk_remove_aio_context_notifier(BlockBackend *blk,
- void (*attached_aio_context)(AioContext *,
- void *),
- void (*detach_aio_context)(void *),
- void *opaque);
-void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify);
-void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify);
-BlockBackendRootState *blk_get_root_state(BlockBackend *blk);
-void blk_update_root_state(BlockBackend *blk);
-bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk);
-int blk_get_open_flags_from_root_state(BlockBackend *blk);
-
-int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
- int64_t pos, int size);
-int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size);
-int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz);
-int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo);
-
-void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg);
-void blk_io_limits_disable(BlockBackend *blk);
-void blk_io_limits_enable(BlockBackend *blk, const char *group);
-void blk_io_limits_update_group(BlockBackend *blk, const char *group);
-void blk_set_force_allow_inactivate(BlockBackend *blk);
-
-bool blk_register_buf(BlockBackend *blk, void *host, size_t size, Error **errp);
-void blk_unregister_buf(BlockBackend *blk, void *host, size_t size);
-
-const BdrvChild *blk_root(BlockBackend *blk);
-
-int blk_make_empty(BlockBackend *blk, Error **errp);
-
-#endif /* BLOCK_BACKEND_GLOBAL_STATE_H */
diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h
deleted file mode 100644
index d174275..0000000
--- a/include/sysemu/block-backend-io.h
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * QEMU Block backends
- *
- * Copyright (C) 2014-2016 Red Hat, Inc.
- *
- * Authors:
- * Markus Armbruster <armbru@redhat.com>,
- *
- * This work is licensed under the terms of the GNU LGPL, version 2.1
- * or later. See the COPYING.LIB file in the top-level directory.
- */
-
-#ifndef BLOCK_BACKEND_IO_H
-#define BLOCK_BACKEND_IO_H
-
-#include "block-backend-common.h"
-#include "block/accounting.h"
-
-/*
- * I/O API functions. These functions are thread-safe.
- *
- * See include/block/block-io.h for more information about
- * the I/O API.
- */
-
-const char *blk_name(const BlockBackend *blk);
-
-BlockDriverState *blk_bs(BlockBackend *blk);
-
-void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow);
-void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow);
-void blk_set_disable_request_queuing(BlockBackend *blk, bool disable);
-bool blk_iostatus_is_enabled(const BlockBackend *blk);
-
-char *blk_get_attached_dev_id(BlockBackend *blk);
-
-BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
- int64_t bytes, BdrvRequestFlags flags,
- BlockCompletionFunc *cb, void *opaque);
-
-BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
- QEMUIOVector *qiov, BdrvRequestFlags flags,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
- QEMUIOVector *qiov, BdrvRequestFlags flags,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *blk_aio_flush(BlockBackend *blk,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *blk_aio_zone_report(BlockBackend *blk, int64_t offset,
- unsigned int *nr_zones,
- BlockZoneDescriptor *zones,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
- int64_t offset, int64_t len,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *blk_aio_zone_append(BlockBackend *blk, int64_t *offset,
- QEMUIOVector *qiov, BdrvRequestFlags flags,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes,
- BlockCompletionFunc *cb, void *opaque);
-void blk_aio_cancel_async(BlockAIOCB *acb);
-BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
- BlockCompletionFunc *cb, void *opaque);
-
-void blk_inc_in_flight(BlockBackend *blk);
-void blk_dec_in_flight(BlockBackend *blk);
-
-bool coroutine_fn GRAPH_RDLOCK blk_co_is_inserted(BlockBackend *blk);
-bool co_wrapper_mixed_bdrv_rdlock blk_is_inserted(BlockBackend *blk);
-
-bool coroutine_fn GRAPH_RDLOCK blk_co_is_available(BlockBackend *blk);
-bool co_wrapper_mixed_bdrv_rdlock blk_is_available(BlockBackend *blk);
-
-void coroutine_fn blk_co_lock_medium(BlockBackend *blk, bool locked);
-void co_wrapper blk_lock_medium(BlockBackend *blk, bool locked);
-
-void coroutine_fn blk_co_eject(BlockBackend *blk, bool eject_flag);
-void co_wrapper blk_eject(BlockBackend *blk, bool eject_flag);
-
-int64_t coroutine_fn blk_co_getlength(BlockBackend *blk);
-int64_t co_wrapper_mixed blk_getlength(BlockBackend *blk);
-
-void coroutine_fn blk_co_get_geometry(BlockBackend *blk,
- uint64_t *nb_sectors_ptr);
-void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr);
-
-int64_t coroutine_fn blk_co_nb_sectors(BlockBackend *blk);
-int64_t blk_nb_sectors(BlockBackend *blk);
-
-void *blk_try_blockalign(BlockBackend *blk, size_t size);
-void *blk_blockalign(BlockBackend *blk, size_t size);
-bool blk_is_writable(BlockBackend *blk);
-bool blk_enable_write_cache(BlockBackend *blk);
-BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read);
-BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
- int error);
-void blk_error_action(BlockBackend *blk, BlockErrorAction action,
- bool is_read, int error);
-void blk_iostatus_set_err(BlockBackend *blk, int error);
-int blk_get_max_iov(BlockBackend *blk);
-int blk_get_max_hw_iov(BlockBackend *blk);
-
-AioContext *blk_get_aio_context(BlockBackend *blk);
-BlockAcctStats *blk_get_stats(BlockBackend *blk);
-void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
- BlockCompletionFunc *cb,
- void *opaque, int ret);
-
-uint32_t blk_get_request_alignment(BlockBackend *blk);
-uint32_t blk_get_max_transfer(BlockBackend *blk);
-uint64_t blk_get_max_hw_transfer(BlockBackend *blk);
-
-int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
- BlockBackend *blk_out, int64_t off_out,
- int64_t bytes, BdrvRequestFlags read_flags,
- BdrvRequestFlags write_flags);
-
-int coroutine_fn blk_co_block_status_above(BlockBackend *blk,
- BlockDriverState *base,
- int64_t offset, int64_t bytes,
- int64_t *pnum, int64_t *map,
- BlockDriverState **file);
-int coroutine_fn blk_co_is_allocated_above(BlockBackend *blk,
- BlockDriverState *base,
- bool include_base, int64_t offset,
- int64_t bytes, int64_t *pnum);
-
-/*
- * "I/O or GS" API functions. These functions can run without
- * the BQL, but only in one specific iothread/main loop.
- *
- * See include/block/block-io.h for more information about
- * the "I/O or GS" API.
- */
-
-int co_wrapper_mixed blk_pread(BlockBackend *blk, int64_t offset,
- int64_t bytes, void *buf,
- BdrvRequestFlags flags);
-int coroutine_fn blk_co_pread(BlockBackend *blk, int64_t offset, int64_t bytes,
- void *buf, BdrvRequestFlags flags);
-
-int co_wrapper_mixed blk_preadv(BlockBackend *blk, int64_t offset,
- int64_t bytes, QEMUIOVector *qiov,
- BdrvRequestFlags flags);
-int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
- int64_t bytes, QEMUIOVector *qiov,
- BdrvRequestFlags flags);
-
-int co_wrapper_mixed blk_preadv_part(BlockBackend *blk, int64_t offset,
- int64_t bytes, QEMUIOVector *qiov,
- size_t qiov_offset,
- BdrvRequestFlags flags);
-int coroutine_fn blk_co_preadv_part(BlockBackend *blk, int64_t offset,
- int64_t bytes, QEMUIOVector *qiov,
- size_t qiov_offset, BdrvRequestFlags flags);
-
-int co_wrapper_mixed blk_pwrite(BlockBackend *blk, int64_t offset,
- int64_t bytes, const void *buf,
- BdrvRequestFlags flags);
-int coroutine_fn blk_co_pwrite(BlockBackend *blk, int64_t offset, int64_t bytes,
- const void *buf, BdrvRequestFlags flags);
-
-int co_wrapper_mixed blk_pwritev(BlockBackend *blk, int64_t offset,
- int64_t bytes, QEMUIOVector *qiov,
- BdrvRequestFlags flags);
-int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
- int64_t bytes, QEMUIOVector *qiov,
- BdrvRequestFlags flags);
-
-int co_wrapper_mixed blk_pwritev_part(BlockBackend *blk, int64_t offset,
- int64_t bytes, QEMUIOVector *qiov,
- size_t qiov_offset,
- BdrvRequestFlags flags);
-int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset,
- int64_t bytes,
- QEMUIOVector *qiov, size_t qiov_offset,
- BdrvRequestFlags flags);
-
-int co_wrapper_mixed blk_pwrite_compressed(BlockBackend *blk,
- int64_t offset, int64_t bytes,
- const void *buf);
-int coroutine_fn blk_co_pwrite_compressed(BlockBackend *blk, int64_t offset,
- int64_t bytes, const void *buf);
-
-int co_wrapper_mixed blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
- int64_t bytes,
- BdrvRequestFlags flags);
-int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
- int64_t bytes, BdrvRequestFlags flags);
-
-int coroutine_fn blk_co_zone_report(BlockBackend *blk, int64_t offset,
- unsigned int *nr_zones,
- BlockZoneDescriptor *zones);
-int co_wrapper_mixed blk_zone_report(BlockBackend *blk, int64_t offset,
- unsigned int *nr_zones,
- BlockZoneDescriptor *zones);
-int coroutine_fn blk_co_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
- int64_t offset, int64_t len);
-int co_wrapper_mixed blk_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
- int64_t offset, int64_t len);
-int coroutine_fn blk_co_zone_append(BlockBackend *blk, int64_t *offset,
- QEMUIOVector *qiov,
- BdrvRequestFlags flags);
-int co_wrapper_mixed blk_zone_append(BlockBackend *blk, int64_t *offset,
- QEMUIOVector *qiov,
- BdrvRequestFlags flags);
-
-int co_wrapper_mixed blk_pdiscard(BlockBackend *blk, int64_t offset,
- int64_t bytes);
-int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset,
- int64_t bytes);
-
-int co_wrapper_mixed blk_flush(BlockBackend *blk);
-int coroutine_fn blk_co_flush(BlockBackend *blk);
-
-int co_wrapper_mixed blk_ioctl(BlockBackend *blk, unsigned long int req,
- void *buf);
-int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req,
- void *buf);
-
-int co_wrapper_mixed blk_truncate(BlockBackend *blk, int64_t offset,
- bool exact, PreallocMode prealloc,
- BdrvRequestFlags flags, Error **errp);
-int coroutine_fn blk_co_truncate(BlockBackend *blk, int64_t offset, bool exact,
- PreallocMode prealloc, BdrvRequestFlags flags,
- Error **errp);
-
-#endif /* BLOCK_BACKEND_IO_H */
diff --git a/include/sysemu/cpu-throttle.h b/include/sysemu/cpu-throttle.h
deleted file mode 100644
index d65bdef..0000000
--- a/include/sysemu/cpu-throttle.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2012 SUSE LINUX Products GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see
- * <http://www.gnu.org/licenses/gpl-2.0.html>
- */
-
-#ifndef SYSEMU_CPU_THROTTLE_H
-#define SYSEMU_CPU_THROTTLE_H
-
-#include "qemu/timer.h"
-
-/**
- * cpu_throttle_init:
- *
- * Initialize the CPU throttling API.
- */
-void cpu_throttle_init(void);
-
-/**
- * cpu_throttle_set:
- * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
- *
- * Throttles all vcpus by forcing them to sleep for the given percentage of
- * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
- * (example: 10ms sleep for every 30ms awake).
- *
- * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
- * Once the throttling starts, it will remain in effect until cpu_throttle_stop
- * is called.
- */
-void cpu_throttle_set(int new_throttle_pct);
-
-/**
- * cpu_throttle_stop:
- *
- * Stops the vcpu throttling started by cpu_throttle_set.
- */
-void cpu_throttle_stop(void);
-
-/**
- * cpu_throttle_active:
- *
- * Returns: %true if the vcpus are currently being throttled, %false otherwise.
- */
-bool cpu_throttle_active(void);
-
-/**
- * cpu_throttle_get_percentage:
- *
- * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
- *
- * Returns: The throttle percentage in range 1 to 99.
- */
-int cpu_throttle_get_percentage(void);
-
-#endif /* SYSEMU_CPU_THROTTLE_H */
diff --git a/include/sysemu/cpu-timers.h b/include/sysemu/cpu-timers.h
deleted file mode 100644
index 7bfa960..0000000
--- a/include/sysemu/cpu-timers.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * CPU timers state API
- *
- * Copyright 2020 SUSE LLC
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-#ifndef SYSEMU_CPU_TIMERS_H
-#define SYSEMU_CPU_TIMERS_H
-
-#include "qemu/timer.h"
-
-/* init the whole cpu timers API, including icount, ticks, and cpu_throttle */
-void cpu_timers_init(void);
-
-/* icount - Instruction Counter API */
-
-/**
- * ICountMode: icount enablement state:
- *
- * @ICOUNT_DISABLED: Disabled - Do not count executed instructions.
- * @ICOUNT_PRECISE: Enabled - Fixed conversion of insn to ns via "shift" option
- * @ICOUNT_ADAPTATIVE: Enabled - Runtime adaptive algorithm to compute shift
- */
-typedef enum {
- ICOUNT_DISABLED = 0,
- ICOUNT_PRECISE,
- ICOUNT_ADAPTATIVE,
-} ICountMode;
-
-#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
-extern ICountMode use_icount;
-#define icount_enabled() (use_icount)
-#else
-#define icount_enabled() ICOUNT_DISABLED
-#endif
-
-/*
- * Update the icount with the executed instructions. Called by
- * cpus-tcg vCPU thread so the main-loop can see time has moved forward.
- */
-void icount_update(CPUState *cpu);
-
-/* get raw icount value */
-int64_t icount_get_raw(void);
-
-/* return the virtual CPU time in ns, based on the instruction counter. */
-int64_t icount_get(void);
-/*
- * convert an instruction counter value to ns, based on the icount shift.
- * This shift is set as a fixed value with the icount "shift" option
- * (precise mode), or it is constantly approximated and corrected at
- * runtime in adaptive mode.
- */
-int64_t icount_to_ns(int64_t icount);
-
-/**
- * icount_configure: configure the icount options, including "shift"
- * @opts: Options to parse
- * @errp: pointer to a NULL-initialized error object
- *
- * Return: true on success, else false setting @errp with error
- */
-bool icount_configure(QemuOpts *opts, Error **errp);
-
-/* used by tcg vcpu thread to calc icount budget */
-int64_t icount_round(int64_t count);
-
-/* if the CPUs are idle, start accounting real time to virtual clock. */
-void icount_start_warp_timer(void);
-void icount_account_warp_timer(void);
-void icount_notify_exit(void);
-
-/*
- * CPU Ticks and Clock
- */
-
-/* Caller must hold BQL */
-void cpu_enable_ticks(void);
-/* Caller must hold BQL */
-void cpu_disable_ticks(void);
-
-/*
- * return the time elapsed in VM between vm_start and vm_stop.
- * cpu_get_ticks() uses units of the host CPU cycle counter.
- */
-int64_t cpu_get_ticks(void);
-
-/*
- * Returns the monotonic time elapsed in VM, i.e.,
- * the time between vm_start and vm_stop
- */
-int64_t cpu_get_clock(void);
-
-void qemu_timer_notify_cb(void *opaque, QEMUClockType type);
-
-/* get/set VIRTUAL clock and VM elapsed ticks via the cpus accel interface */
-int64_t cpus_get_virtual_clock(void);
-void cpus_set_virtual_clock(int64_t new_time);
-int64_t cpus_get_elapsed_ticks(void);
-
-#endif /* SYSEMU_CPU_TIMERS_H */
diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h
deleted file mode 100644
index b4a566c..0000000
--- a/include/sysemu/cpus.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef QEMU_CPUS_H
-#define QEMU_CPUS_H
-
-#include "sysemu/accel-ops.h"
-
-/* register accel-specific operations */
-void cpus_register_accel(const AccelOpsClass *i);
-
-/* return registers ops */
-const AccelOpsClass *cpus_get_accel(void);
-
-/* accel/dummy-cpus.c */
-
-/* Create a dummy vcpu for AccelOpsClass->create_vcpu_thread */
-void dummy_start_vcpu_thread(CPUState *);
-
-/* interface available for cpus accelerator threads */
-
-/* For temporary buffers for forming a name */
-#define VCPU_THREAD_NAME_SIZE 16
-
-void cpus_kick_thread(CPUState *cpu);
-bool cpu_work_list_empty(CPUState *cpu);
-bool cpu_thread_is_idle(CPUState *cpu);
-bool all_cpu_threads_idle(void);
-bool cpu_can_run(CPUState *cpu);
-void qemu_wait_io_event_common(CPUState *cpu);
-void qemu_wait_io_event(CPUState *cpu);
-void cpu_thread_signal_created(CPUState *cpu);
-void cpu_thread_signal_destroyed(CPUState *cpu);
-void cpu_handle_guest_debug(CPUState *cpu);
-
-/* end interface for cpus accelerator threads */
-
-bool qemu_in_vcpu_thread(void);
-void qemu_init_cpu_loop(void);
-void resume_all_vcpus(void);
-void pause_all_vcpus(void);
-void cpu_stop_current(void);
-
-extern int icount_align_option;
-
-/* Unblock cpu */
-void qemu_cpu_kick_self(void);
-
-bool cpus_are_resettable(void);
-
-void cpu_synchronize_all_states(void);
-void cpu_synchronize_all_post_reset(void);
-void cpu_synchronize_all_post_init(void);
-void cpu_synchronize_all_pre_loadvm(void);
-
-#endif
diff --git a/include/sysemu/cryptodev-vhost-user.h b/include/sysemu/cryptodev-vhost-user.h
deleted file mode 100644
index 6071050..0000000
--- a/include/sysemu/cryptodev-vhost-user.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * QEMU Crypto Device Common Vhost User Implement
- *
- * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
- *
- * Authors:
- * Gonglei <arei.gonglei@huawei.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- *
- */
-
-#ifndef CRYPTODEV_VHOST_USER_H
-#define CRYPTODEV_VHOST_USER_H
-
-#include "sysemu/cryptodev-vhost.h"
-
-#define VHOST_USER_MAX_AUTH_KEY_LEN 512
-#define VHOST_USER_MAX_CIPHER_KEY_LEN 64
-
-
-/**
- * cryptodev_vhost_user_get_vhost:
- * @cc: the client object for each queue
- * @b: the cryptodev backend common vhost object
- * @queue: the queue index
- *
- * Gets a new cryptodev backend common vhost object based on
- * @b and @queue
- *
- * Returns: the cryptodev backend common vhost object
- */
-CryptoDevBackendVhost *
-cryptodev_vhost_user_get_vhost(
- CryptoDevBackendClient *cc,
- CryptoDevBackend *b,
- uint16_t queue);
-
-#endif /* CRYPTODEV_VHOST_USER_H */
diff --git a/include/sysemu/cryptodev-vhost.h b/include/sysemu/cryptodev-vhost.h
deleted file mode 100644
index 4c3c22a..0000000
--- a/include/sysemu/cryptodev-vhost.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * QEMU Crypto Device Common Vhost Implement
- *
- * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
- *
- * Authors:
- * Gonglei <arei.gonglei@huawei.com>
- * Jay Zhou <jianjay.zhou@huawei.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- *
- */
-#ifndef CRYPTODEV_VHOST_H
-#define CRYPTODEV_VHOST_H
-
-#include "hw/virtio/vhost.h"
-#include "hw/virtio/vhost-backend.h"
-#include "chardev/char.h"
-
-#include "sysemu/cryptodev.h"
-
-
-typedef struct CryptoDevBackendVhostOptions {
- VhostBackendType backend_type;
- void *opaque;
- int total_queues;
- CryptoDevBackendClient *cc;
-} CryptoDevBackendVhostOptions;
-
-typedef struct CryptoDevBackendVhost {
- struct vhost_dev dev;
- struct vhost_virtqueue vqs[1];
- int backend;
- CryptoDevBackendClient *cc;
-} CryptoDevBackendVhost;
-
-/**
- * cryptodev_vhost_get_max_queues:
- * @crypto: the cryptodev backend common vhost object
- *
- * Get the maximum queue number of @crypto.
- *
- *
- * Returns: the maximum queue number
- */
-uint64_t
-cryptodev_vhost_get_max_queues(
- CryptoDevBackendVhost *crypto);
-
-
-/**
- * cryptodev_vhost_init:
- * @options: the common vhost object's option
- *
- * Creates a new cryptodev backend common vhost object
- *
- ** The returned object must be released with
- * cryptodev_vhost_cleanup() when no
- * longer required
- *
- * Returns: the cryptodev backend common vhost object
- */
-struct CryptoDevBackendVhost *
-cryptodev_vhost_init(
- CryptoDevBackendVhostOptions *options);
-
-/**
- * cryptodev_vhost_cleanup:
- * @crypto: the cryptodev backend common vhost object
- *
- * Clean the resource associated with @crypto that realizaed
- * by cryptodev_vhost_init()
- *
- */
-void cryptodev_vhost_cleanup(
- CryptoDevBackendVhost *crypto);
-
-/**
- * cryptodev_get_vhost:
- * @cc: the client object for each queue
- * @b: the cryptodev backend common vhost object
- * @queue: the cryptodev backend queue index
- *
- * Gets a new cryptodev backend common vhost object based on
- * @b and @queue
- *
- * Returns: the cryptodev backend common vhost object
- */
-CryptoDevBackendVhost *
-cryptodev_get_vhost(CryptoDevBackendClient *cc,
- CryptoDevBackend *b,
- uint16_t queue);
-/**
- * cryptodev_vhost_start:
- * @dev: the virtio crypto object
- * @total_queues: the total count of queue
- *
- * Starts the vhost crypto logic
- *
- * Returns: 0 for success, negative for errors
- */
-int cryptodev_vhost_start(VirtIODevice *dev, int total_queues);
-
-/**
- * cryptodev_vhost_stop:
- * @dev: the virtio crypto object
- * @total_queues: the total count of queue
- *
- * Stops the vhost crypto logic
- *
- */
-void cryptodev_vhost_stop(VirtIODevice *dev, int total_queues);
-
-/**
- * cryptodev_vhost_virtqueue_mask:
- * @dev: the virtio crypto object
- * @queue: the cryptodev backend queue index
- * @idx: the virtqueue index
- * @mask: mask or not (true or false)
- *
- * Mask/unmask events for @idx virtqueue on @dev device
- *
- */
-void cryptodev_vhost_virtqueue_mask(VirtIODevice *dev,
- int queue,
- int idx, bool mask);
-
-/**
- * cryptodev_vhost_virtqueue_pending:
- * @dev: the virtio crypto object
- * @queue: the cryptodev backend queue index
- * @idx: the virtqueue index
- *
- * Test and clear event pending status for @idx virtqueue on @dev device.
- * Should be called after unmask to avoid losing events.
- *
- * Returns: true for success, false for errors
- */
-bool cryptodev_vhost_virtqueue_pending(VirtIODevice *dev,
- int queue, int idx);
-
-#endif /* CRYPTODEV_VHOST_H */
diff --git a/include/sysemu/cryptodev.h b/include/sysemu/cryptodev.h
deleted file mode 100644
index 96d3998..0000000
--- a/include/sysemu/cryptodev.h
+++ /dev/null
@@ -1,447 +0,0 @@
-/*
- * QEMU Crypto Device Implementation
- *
- * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
- *
- * Authors:
- * Gonglei <arei.gonglei@huawei.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- *
- */
-#ifndef CRYPTODEV_H
-#define CRYPTODEV_H
-
-#include "qemu/queue.h"
-#include "qemu/throttle.h"
-#include "qom/object.h"
-#include "qapi/qapi-types-cryptodev.h"
-
-/**
- * CryptoDevBackend:
- *
- * The CryptoDevBackend object is an interface
- * for different cryptodev backends, which provides crypto
- * operation wrapper.
- *
- */
-
-#define TYPE_CRYPTODEV_BACKEND "cryptodev-backend"
-
-OBJECT_DECLARE_TYPE(CryptoDevBackend, CryptoDevBackendClass,
- CRYPTODEV_BACKEND)
-
-
-#define MAX_CRYPTO_QUEUE_NUM 64
-
-typedef struct CryptoDevBackendConf CryptoDevBackendConf;
-typedef struct CryptoDevBackendPeers CryptoDevBackendPeers;
-typedef struct CryptoDevBackendClient
- CryptoDevBackendClient;
-
-/**
- * CryptoDevBackendSymSessionInfo:
- *
- * @cipher_alg: algorithm type of CIPHER
- * @key_len: byte length of cipher key
- * @hash_alg: algorithm type of HASH/MAC
- * @hash_result_len: byte length of HASH operation result
- * @auth_key_len: byte length of authenticated key
- * @add_len: byte length of additional authenticated data
- * @op_type: operation type (refer to virtio_crypto.h)
- * @direction: encryption or direction for CIPHER
- * @hash_mode: HASH mode for HASH operation (refer to virtio_crypto.h)
- * @alg_chain_order: order of algorithm chaining (CIPHER then HASH,
- * or HASH then CIPHER)
- * @cipher_key: point to a key of CIPHER
- * @auth_key: point to an authenticated key of MAC
- *
- */
-typedef struct CryptoDevBackendSymSessionInfo {
- /* corresponding with virtio crypto spec */
- uint32_t cipher_alg;
- uint32_t key_len;
- uint32_t hash_alg;
- uint32_t hash_result_len;
- uint32_t auth_key_len;
- uint32_t add_len;
- uint8_t op_type;
- uint8_t direction;
- uint8_t hash_mode;
- uint8_t alg_chain_order;
- uint8_t *cipher_key;
- uint8_t *auth_key;
-} CryptoDevBackendSymSessionInfo;
-
-/**
- * CryptoDevBackendAsymSessionInfo:
- */
-typedef struct CryptoDevBackendRsaPara {
- uint32_t padding_algo;
- uint32_t hash_algo;
-} CryptoDevBackendRsaPara;
-
-typedef struct CryptoDevBackendAsymSessionInfo {
- /* corresponding with virtio crypto spec */
- uint32_t algo;
- uint32_t keytype;
- uint32_t keylen;
- uint8_t *key;
- union {
- CryptoDevBackendRsaPara rsa;
- } u;
-} CryptoDevBackendAsymSessionInfo;
-
-typedef struct CryptoDevBackendSessionInfo {
- uint32_t op_code;
- union {
- CryptoDevBackendSymSessionInfo sym_sess_info;
- CryptoDevBackendAsymSessionInfo asym_sess_info;
- } u;
- uint64_t session_id;
-} CryptoDevBackendSessionInfo;
-
-/**
- * CryptoDevBackendSymOpInfo:
- *
- * @aad_len: byte length of additional authenticated data
- * @iv_len: byte length of initialization vector or counter
- * @src_len: byte length of source data
- * @dst_len: byte length of destination data
- * @digest_result_len: byte length of hash digest result
- * @hash_start_src_offset: Starting point for hash processing, specified
- * as number of bytes from start of packet in source data, only used for
- * algorithm chain
- * @cipher_start_src_offset: Starting point for cipher processing, specified
- * as number of bytes from start of packet in source data, only used for
- * algorithm chain
- * @len_to_hash: byte length of source data on which the hash
- * operation will be computed, only used for algorithm chain
- * @len_to_cipher: byte length of source data on which the cipher
- * operation will be computed, only used for algorithm chain
- * @op_type: operation type (refer to virtio_crypto.h)
- * @iv: point to the initialization vector or counter
- * @src: point to the source data
- * @dst: point to the destination data
- * @aad_data: point to the additional authenticated data
- * @digest_result: point to the digest result data
- * @data[0]: point to the extensional memory by one memory allocation
- *
- */
-typedef struct CryptoDevBackendSymOpInfo {
- uint32_t aad_len;
- uint32_t iv_len;
- uint32_t src_len;
- uint32_t dst_len;
- uint32_t digest_result_len;
- uint32_t hash_start_src_offset;
- uint32_t cipher_start_src_offset;
- uint32_t len_to_hash;
- uint32_t len_to_cipher;
- uint8_t op_type;
- uint8_t *iv;
- uint8_t *src;
- uint8_t *dst;
- uint8_t *aad_data;
- uint8_t *digest_result;
- uint8_t data[];
-} CryptoDevBackendSymOpInfo;
-
-
-/**
- * CryptoDevBackendAsymOpInfo:
- *
- * @src_len: byte length of source data
- * @dst_len: byte length of destination data
- * @src: point to the source data
- * @dst: point to the destination data
- *
- */
-typedef struct CryptoDevBackendAsymOpInfo {
- uint32_t src_len;
- uint32_t dst_len;
- uint8_t *src;
- uint8_t *dst;
-} CryptoDevBackendAsymOpInfo;
-
-typedef void (*CryptoDevCompletionFunc) (void *opaque, int ret);
-
-typedef struct CryptoDevBackendOpInfo {
- QCryptodevBackendAlgType algtype;
- uint32_t op_code;
- uint32_t queue_index;
- CryptoDevCompletionFunc cb;
- void *opaque; /* argument for cb */
- uint64_t session_id;
- union {
- CryptoDevBackendSymOpInfo *sym_op_info;
- CryptoDevBackendAsymOpInfo *asym_op_info;
- } u;
- QTAILQ_ENTRY(CryptoDevBackendOpInfo) next;
-} CryptoDevBackendOpInfo;
-
-struct CryptoDevBackendClass {
- ObjectClass parent_class;
-
- void (*init)(CryptoDevBackend *backend, Error **errp);
- void (*cleanup)(CryptoDevBackend *backend, Error **errp);
-
- int (*create_session)(CryptoDevBackend *backend,
- CryptoDevBackendSessionInfo *sess_info,
- uint32_t queue_index,
- CryptoDevCompletionFunc cb,
- void *opaque);
-
- int (*close_session)(CryptoDevBackend *backend,
- uint64_t session_id,
- uint32_t queue_index,
- CryptoDevCompletionFunc cb,
- void *opaque);
-
- int (*do_op)(CryptoDevBackend *backend,
- CryptoDevBackendOpInfo *op_info);
-};
-
-struct CryptoDevBackendClient {
- QCryptodevBackendType type;
- char *info_str;
- unsigned int queue_index;
- int vring_enable;
- QTAILQ_ENTRY(CryptoDevBackendClient) next;
-};
-
-struct CryptoDevBackendPeers {
- CryptoDevBackendClient *ccs[MAX_CRYPTO_QUEUE_NUM];
- uint32_t queues;
-};
-
-struct CryptoDevBackendConf {
- CryptoDevBackendPeers peers;
-
- /* Supported service mask */
- uint32_t crypto_services;
-
- /* Detailed algorithms mask */
- uint32_t cipher_algo_l;
- uint32_t cipher_algo_h;
- uint32_t hash_algo;
- uint32_t mac_algo_l;
- uint32_t mac_algo_h;
- uint32_t aead_algo;
- uint32_t akcipher_algo;
- /* Maximum length of cipher key */
- uint32_t max_cipher_key_len;
- /* Maximum length of authenticated key */
- uint32_t max_auth_key_len;
- /* Maximum size of each crypto request's content */
- uint64_t max_size;
-};
-
-typedef struct CryptodevBackendSymStat {
- int64_t encrypt_ops;
- int64_t decrypt_ops;
- int64_t encrypt_bytes;
- int64_t decrypt_bytes;
-} CryptodevBackendSymStat;
-
-typedef struct CryptodevBackendAsymStat {
- int64_t encrypt_ops;
- int64_t decrypt_ops;
- int64_t sign_ops;
- int64_t verify_ops;
- int64_t encrypt_bytes;
- int64_t decrypt_bytes;
- int64_t sign_bytes;
- int64_t verify_bytes;
-} CryptodevBackendAsymStat;
-
-struct CryptoDevBackend {
- Object parent_obj;
-
- bool ready;
- /* Tag the cryptodev backend is used by virtio-crypto or not */
- bool is_used;
- CryptoDevBackendConf conf;
- CryptodevBackendSymStat *sym_stat;
- CryptodevBackendAsymStat *asym_stat;
-
- ThrottleState ts;
- ThrottleTimers tt;
- ThrottleConfig tc;
- QTAILQ_HEAD(, CryptoDevBackendOpInfo) opinfos;
-};
-
-#define CryptodevSymStatInc(be, op, bytes) do { \
- be->sym_stat->op##_bytes += (bytes); \
- be->sym_stat->op##_ops += 1; \
-} while (/*CONSTCOND*/0)
-
-#define CryptodevSymStatIncEncrypt(be, bytes) \
- CryptodevSymStatInc(be, encrypt, bytes)
-
-#define CryptodevSymStatIncDecrypt(be, bytes) \
- CryptodevSymStatInc(be, decrypt, bytes)
-
-#define CryptodevAsymStatInc(be, op, bytes) do { \
- be->asym_stat->op##_bytes += (bytes); \
- be->asym_stat->op##_ops += 1; \
-} while (/*CONSTCOND*/0)
-
-#define CryptodevAsymStatIncEncrypt(be, bytes) \
- CryptodevAsymStatInc(be, encrypt, bytes)
-
-#define CryptodevAsymStatIncDecrypt(be, bytes) \
- CryptodevAsymStatInc(be, decrypt, bytes)
-
-#define CryptodevAsymStatIncSign(be, bytes) \
- CryptodevAsymStatInc(be, sign, bytes)
-
-#define CryptodevAsymStatIncVerify(be, bytes) \
- CryptodevAsymStatInc(be, verify, bytes)
-
-
-/**
- * cryptodev_backend_new_client:
- *
- * Creates a new cryptodev backend client object.
- *
- * The returned object must be released with
- * cryptodev_backend_free_client() when no
- * longer required
- *
- * Returns: a new cryptodev backend client object
- */
-CryptoDevBackendClient *cryptodev_backend_new_client(void);
-
-/**
- * cryptodev_backend_free_client:
- * @cc: the cryptodev backend client object
- *
- * Release the memory associated with @cc that
- * was previously allocated by cryptodev_backend_new_client()
- */
-void cryptodev_backend_free_client(
- CryptoDevBackendClient *cc);
-
-/**
- * cryptodev_backend_cleanup:
- * @backend: the cryptodev backend object
- * @errp: pointer to a NULL-initialized error object
- *
- * Clean the resource associated with @backend that realizaed
- * by the specific backend's init() callback
- */
-void cryptodev_backend_cleanup(
- CryptoDevBackend *backend,
- Error **errp);
-
-/**
- * cryptodev_backend_create_session:
- * @backend: the cryptodev backend object
- * @sess_info: parameters needed by session creating
- * @queue_index: queue index of cryptodev backend client
- * @errp: pointer to a NULL-initialized error object
- * @cb: callback when session create is compeleted
- * @opaque: parameter passed to callback
- *
- * Create a session for symmetric/asymmetric algorithms
- *
- * Returns: 0 for success and cb will be called when creation is completed,
- * negative value for error, and cb will not be called.
- */
-int cryptodev_backend_create_session(
- CryptoDevBackend *backend,
- CryptoDevBackendSessionInfo *sess_info,
- uint32_t queue_index,
- CryptoDevCompletionFunc cb,
- void *opaque);
-
-/**
- * cryptodev_backend_close_session:
- * @backend: the cryptodev backend object
- * @session_id: the session id
- * @queue_index: queue index of cryptodev backend client
- * @errp: pointer to a NULL-initialized error object
- * @cb: callback when session create is compeleted
- * @opaque: parameter passed to callback
- *
- * Close a session for which was previously
- * created by cryptodev_backend_create_session()
- *
- * Returns: 0 for success and cb will be called when creation is completed,
- * negative value for error, and cb will not be called.
- */
-int cryptodev_backend_close_session(
- CryptoDevBackend *backend,
- uint64_t session_id,
- uint32_t queue_index,
- CryptoDevCompletionFunc cb,
- void *opaque);
-
-/**
- * cryptodev_backend_crypto_operation:
- * @backend: the cryptodev backend object
- * @op_info: pointer to a CryptoDevBackendOpInfo object
- *
- * Do crypto operation, such as encryption, decryption, signature and
- * verification
- *
- * Returns: 0 for success and cb will be called when creation is completed,
- * negative value for error, and cb will not be called.
- */
-int cryptodev_backend_crypto_operation(
- CryptoDevBackend *backend,
- CryptoDevBackendOpInfo *op_info);
-
-/**
- * cryptodev_backend_set_used:
- * @backend: the cryptodev backend object
- * @used: true or false
- *
- * Set the cryptodev backend is used by virtio-crypto or not
- */
-void cryptodev_backend_set_used(CryptoDevBackend *backend, bool used);
-
-/**
- * cryptodev_backend_is_used:
- * @backend: the cryptodev backend object
- *
- * Return the status that the cryptodev backend is used
- * by virtio-crypto or not
- *
- * Returns: true on used, or false on not used
- */
-bool cryptodev_backend_is_used(CryptoDevBackend *backend);
-
-/**
- * cryptodev_backend_set_ready:
- * @backend: the cryptodev backend object
- * @ready: true or false
- *
- * Set the cryptodev backend is ready or not, which is called
- * by the children of the cryptodev banckend interface.
- */
-void cryptodev_backend_set_ready(CryptoDevBackend *backend, bool ready);
-
-/**
- * cryptodev_backend_is_ready:
- * @backend: the cryptodev backend object
- *
- * Return the status that the cryptodev backend is ready or not
- *
- * Returns: true on ready, or false on not ready
- */
-bool cryptodev_backend_is_ready(CryptoDevBackend *backend);
-
-#endif /* CRYPTODEV_H */
diff --git a/include/sysemu/device_tree.h b/include/sysemu/device_tree.h
deleted file mode 100644
index eb60152..0000000
--- a/include/sysemu/device_tree.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Header with function prototypes to help device tree manipulation using
- * libfdt. It also provides functions to read entries from device tree proc
- * interface.
- *
- * Copyright 2008 IBM Corporation.
- * Authors: Jerone Young <jyoung5@us.ibm.com>
- * Hollis Blanchard <hollisb@us.ibm.com>
- *
- * This work is licensed under the GNU GPL license version 2 or later.
- *
- */
-
-#ifndef DEVICE_TREE_H
-#define DEVICE_TREE_H
-
-void *create_device_tree(int *sizep);
-void *load_device_tree(const char *filename_path, int *sizep);
-#ifdef CONFIG_LINUX
-/**
- * load_device_tree_from_sysfs: reads the device tree information in the
- * /proc/device-tree directory and return the corresponding binary blob
- * buffer pointer. Asserts in case of error.
- */
-void *load_device_tree_from_sysfs(void);
-#endif
-
-/**
- * qemu_fdt_node_path: return the paths of nodes matching a given
- * name and compat string
- * @fdt: pointer to the dt blob
- * @name: node name
- * @compat: compatibility string
- * @errp: handle to an error object
- *
- * returns a newly allocated NULL-terminated array of node paths.
- * Use g_strfreev() to free it. If one or more nodes were found, the
- * array contains the path of each node and the last element equals to
- * NULL. If there is no error but no matching node was found, the
- * returned array contains a single element equal to NULL. If an error
- * was encountered when parsing the blob, the function returns NULL
- *
- * @name may be NULL to wildcard names and only match compatibility
- * strings.
- */
-char **qemu_fdt_node_path(void *fdt, const char *name, const char *compat,
- Error **errp);
-
-/**
- * qemu_fdt_node_unit_path: return the paths of nodes matching a given
- * node-name, ie. node-name and node-name@unit-address
- * @fdt: pointer to the dt blob
- * @name: node name
- * @errp: handle to an error object
- *
- * returns a newly allocated NULL-terminated array of node paths.
- * Use g_strfreev() to free it. If one or more nodes were found, the
- * array contains the path of each node and the last element equals to
- * NULL. If there is no error but no matching node was found, the
- * returned array contains a single element equal to NULL. If an error
- * was encountered when parsing the blob, the function returns NULL
- */
-char **qemu_fdt_node_unit_path(void *fdt, const char *name, Error **errp);
-
-int qemu_fdt_setprop(void *fdt, const char *node_path,
- const char *property, const void *val, int size);
-int qemu_fdt_setprop_cell(void *fdt, const char *node_path,
- const char *property, uint32_t val);
-int qemu_fdt_setprop_u64(void *fdt, const char *node_path,
- const char *property, uint64_t val);
-int qemu_fdt_setprop_string(void *fdt, const char *node_path,
- const char *property, const char *string);
-
-/**
- * qemu_fdt_setprop_string_array: set a string array property
- *
- * @fdt: pointer to the dt blob
- * @name: node name
- * @prop: property array
- * @array: pointer to an array of string pointers
- * @len: length of array
- *
- * assigns a string array to a property. This function converts and
- * array of strings to a sequential string with \0 separators before
- * setting the property.
- */
-int qemu_fdt_setprop_string_array(void *fdt, const char *node_path,
- const char *prop, char **array, int len);
-
-int qemu_fdt_setprop_phandle(void *fdt, const char *node_path,
- const char *property,
- const char *target_node_path);
-/**
- * qemu_fdt_getprop: retrieve the value of a given property
- * @fdt: pointer to the device tree blob
- * @node_path: node path
- * @property: name of the property to find
- * @lenp: fdt error if any or length of the property on success
- * @errp: handle to an error object
- *
- * returns a pointer to the property on success and NULL on failure
- */
-const void *qemu_fdt_getprop(void *fdt, const char *node_path,
- const char *property, int *lenp,
- Error **errp);
-/**
- * qemu_fdt_getprop_cell: retrieve the value of a given 4 byte property
- * @fdt: pointer to the device tree blob
- * @node_path: node path
- * @property: name of the property to find
- * @lenp: fdt error if any or -EINVAL if the property size is different from
- * 4 bytes, or 4 (expected length of the property) upon success.
- * @errp: handle to an error object
- *
- * returns the property value on success
- */
-uint32_t qemu_fdt_getprop_cell(void *fdt, const char *node_path,
- const char *property, int *lenp,
- Error **errp);
-uint32_t qemu_fdt_get_phandle(void *fdt, const char *path);
-uint32_t qemu_fdt_alloc_phandle(void *fdt);
-int qemu_fdt_nop_node(void *fdt, const char *node_path);
-int qemu_fdt_add_subnode(void *fdt, const char *name);
-int qemu_fdt_add_path(void *fdt, const char *path);
-
-#define qemu_fdt_setprop_cells(fdt, node_path, property, ...) \
- do { \
- uint32_t qdt_tmp[] = { __VA_ARGS__ }; \
- for (unsigned i_ = 0; i_ < ARRAY_SIZE(qdt_tmp); i_++) { \
- qdt_tmp[i_] = cpu_to_be32(qdt_tmp[i_]); \
- } \
- qemu_fdt_setprop(fdt, node_path, property, qdt_tmp, \
- sizeof(qdt_tmp)); \
- } while (0)
-
-void qemu_fdt_dumpdtb(void *fdt, int size);
-
-/**
- * qemu_fdt_setprop_sized_cells_from_array:
- * @fdt: device tree blob
- * @node_path: node to set property on
- * @property: property to set
- * @numvalues: number of values
- * @values: array of number-of-cells, value pairs
- *
- * Set the specified property on the specified node in the device tree
- * to be an array of cells. The values of the cells are specified via
- * the values list, which alternates between "number of cells used by
- * this value" and "value".
- * number-of-cells must be either 1 or 2 (other values will result in
- * an error being returned). If a value is too large to fit in the
- * number of cells specified for it, an error is returned.
- *
- * This function is useful because device tree nodes often have cell arrays
- * which are either lists of addresses or lists of address,size tuples, but
- * the number of cells used for each element vary depending on the
- * #address-cells and #size-cells properties of their parent node.
- * If you know all your cell elements are one cell wide you can use the
- * simpler qemu_fdt_setprop_cells(). If you're not setting up the
- * array programmatically, qemu_fdt_setprop_sized_cells may be more
- * convenient.
- *
- * Return value: 0 on success, <0 on error.
- */
-int qemu_fdt_setprop_sized_cells_from_array(void *fdt,
- const char *node_path,
- const char *property,
- int numvalues,
- uint64_t *values);
-
-/**
- * qemu_fdt_setprop_sized_cells:
- * @fdt: device tree blob
- * @node_path: node to set property on
- * @property: property to set
- * @...: list of number-of-cells, value pairs
- *
- * Set the specified property on the specified node in the device tree
- * to be an array of cells. The values of the cells are specified via
- * the variable arguments, which alternates between "number of cells
- * used by this value" and "value".
- *
- * This is a convenience wrapper for the function
- * qemu_fdt_setprop_sized_cells_from_array().
- *
- * Return value: 0 on success, <0 on error.
- */
-#define qemu_fdt_setprop_sized_cells(fdt, node_path, property, ...) \
- ({ \
- uint64_t qdt_tmp[] = { __VA_ARGS__ }; \
- qemu_fdt_setprop_sized_cells_from_array(fdt, node_path, \
- property, \
- ARRAY_SIZE(qdt_tmp) / 2, \
- qdt_tmp); \
- })
-
-
-/**
- * qemu_fdt_randomize_seeds:
- * @fdt: device tree blob
- *
- * Re-randomize all "rng-seed" properties with new seeds.
- */
-void qemu_fdt_randomize_seeds(void *fdt);
-
-#define FDT_PCI_RANGE_RELOCATABLE 0x80000000
-#define FDT_PCI_RANGE_PREFETCHABLE 0x40000000
-#define FDT_PCI_RANGE_ALIASED 0x20000000
-#define FDT_PCI_RANGE_TYPE_MASK 0x03000000
-#define FDT_PCI_RANGE_MMIO_64BIT 0x03000000
-#define FDT_PCI_RANGE_MMIO 0x02000000
-#define FDT_PCI_RANGE_IOPORT 0x01000000
-#define FDT_PCI_RANGE_CONFIG 0x00000000
-
-#endif /* DEVICE_TREE_H */
diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
deleted file mode 100644
index a1ac5bc..0000000
--- a/include/sysemu/dma.h
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * DMA helper functions
- *
- * Copyright (c) 2009, 2020 Red Hat
- *
- * This work is licensed under the terms of the GNU General Public License
- * (GNU GPL), version 2 or later.
- */
-
-#ifndef DMA_H
-#define DMA_H
-
-#include "exec/memory.h"
-#include "exec/address-spaces.h"
-#include "block/block.h"
-#include "block/accounting.h"
-
-typedef enum {
- DMA_DIRECTION_TO_DEVICE = 0,
- DMA_DIRECTION_FROM_DEVICE = 1,
-} DMADirection;
-
-/*
- * When an IOMMU is present, bus addresses become distinct from
- * CPU/memory physical addresses and may be a different size. Because
- * the IOVA size depends more on the bus than on the platform, we more
- * or less have to treat these as 64-bit always to cover all (or at
- * least most) cases.
- */
-typedef uint64_t dma_addr_t;
-
-#define DMA_ADDR_BITS 64
-#define DMA_ADDR_FMT "%" PRIx64
-
-typedef struct ScatterGatherEntry ScatterGatherEntry;
-
-struct QEMUSGList {
- ScatterGatherEntry *sg;
- int nsg;
- int nalloc;
- dma_addr_t size;
- DeviceState *dev;
- AddressSpace *as;
-};
-
-static inline void dma_barrier(AddressSpace *as, DMADirection dir)
-{
- /*
- * This is called before DMA read and write operations
- * unless the _relaxed form is used and is responsible
- * for providing some sane ordering of accesses vs
- * concurrently running VCPUs.
- *
- * Users of map(), unmap() or lower level st/ld_*
- * operations are responsible for providing their own
- * ordering via barriers.
- *
- * This primitive implementation does a simple smp_mb()
- * before each operation which provides pretty much full
- * ordering.
- *
- * A smarter implementation can be devised if needed to
- * use lighter barriers based on the direction of the
- * transfer, the DMA context, etc...
- */
- smp_mb();
-}
-
-/* Checks that the given range of addresses is valid for DMA. This is
- * useful for certain cases, but usually you should just use
- * dma_memory_{read,write}() and check for errors */
-static inline bool dma_memory_valid(AddressSpace *as,
- dma_addr_t addr, dma_addr_t len,
- DMADirection dir, MemTxAttrs attrs)
-{
- return address_space_access_valid(as, addr, len,
- dir == DMA_DIRECTION_FROM_DEVICE,
- attrs);
-}
-
-static inline MemTxResult dma_memory_rw_relaxed(AddressSpace *as,
- dma_addr_t addr,
- void *buf, dma_addr_t len,
- DMADirection dir,
- MemTxAttrs attrs)
-{
- return address_space_rw(as, addr, attrs,
- buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
-}
-
-static inline MemTxResult dma_memory_read_relaxed(AddressSpace *as,
- dma_addr_t addr,
- void *buf, dma_addr_t len)
-{
- return dma_memory_rw_relaxed(as, addr, buf, len,
- DMA_DIRECTION_TO_DEVICE,
- MEMTXATTRS_UNSPECIFIED);
-}
-
-static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as,
- dma_addr_t addr,
- const void *buf,
- dma_addr_t len)
-{
- return dma_memory_rw_relaxed(as, addr, (void *)buf, len,
- DMA_DIRECTION_FROM_DEVICE,
- MEMTXATTRS_UNSPECIFIED);
-}
-
-/**
- * dma_memory_rw: Read from or write to an address space from DMA controller.
- *
- * Return a MemTxResult indicating whether the operation succeeded
- * or failed (eg unassigned memory, device rejected the transaction,
- * IOMMU fault).
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @buf: buffer with the data transferred
- * @len: the number of bytes to read or write
- * @dir: indicates the transfer direction
- * @attrs: memory transaction attributes
- */
-static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr,
- void *buf, dma_addr_t len,
- DMADirection dir, MemTxAttrs attrs)
-{
- dma_barrier(as, dir);
-
- return dma_memory_rw_relaxed(as, addr, buf, len, dir, attrs);
-}
-
-/**
- * dma_memory_read: Read from an address space from DMA controller.
- *
- * Return a MemTxResult indicating whether the operation succeeded
- * or failed (eg unassigned memory, device rejected the transaction,
- * IOMMU fault). Called within RCU critical section.
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @buf: buffer with the data transferred
- * @len: length of the data transferred
- * @attrs: memory transaction attributes
- */
-static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr,
- void *buf, dma_addr_t len,
- MemTxAttrs attrs)
-{
- return dma_memory_rw(as, addr, buf, len,
- DMA_DIRECTION_TO_DEVICE, attrs);
-}
-
-/**
- * address_space_write: Write to address space from DMA controller.
- *
- * Return a MemTxResult indicating whether the operation succeeded
- * or failed (eg unassigned memory, device rejected the transaction,
- * IOMMU fault).
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @buf: buffer with the data transferred
- * @len: the number of bytes to write
- * @attrs: memory transaction attributes
- */
-static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr,
- const void *buf, dma_addr_t len,
- MemTxAttrs attrs)
-{
- return dma_memory_rw(as, addr, (void *)buf, len,
- DMA_DIRECTION_FROM_DEVICE, attrs);
-}
-
-/**
- * dma_memory_set: Fill memory with a constant byte from DMA controller.
- *
- * Return a MemTxResult indicating whether the operation succeeded
- * or failed (eg unassigned memory, device rejected the transaction,
- * IOMMU fault).
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @c: constant byte to fill the memory
- * @len: the number of bytes to fill with the constant byte
- * @attrs: memory transaction attributes
- */
-MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
- uint8_t c, dma_addr_t len, MemTxAttrs attrs);
-
-/**
- * address_space_map: Map a physical memory region into a host virtual address.
- *
- * May map a subset of the requested range, given by and returned in @plen.
- * May return %NULL and set *@plen to zero(0), if resources needed to perform
- * the mapping are exhausted.
- * Use only for reads OR writes - not for read-modify-write operations.
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @len: pointer to length of buffer; updated on return
- * @dir: indicates the transfer direction
- * @attrs: memory attributes
- */
-static inline void *dma_memory_map(AddressSpace *as,
- dma_addr_t addr, dma_addr_t *len,
- DMADirection dir, MemTxAttrs attrs)
-{
- hwaddr xlen = *len;
- void *p;
-
- p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE,
- attrs);
- *len = xlen;
- return p;
-}
-
-/**
- * address_space_unmap: Unmaps a memory region previously mapped
- * by dma_memory_map()
- *
- * Will also mark the memory as dirty if @dir == %DMA_DIRECTION_FROM_DEVICE.
- * @access_len gives the amount of memory that was actually read or written
- * by the caller.
- *
- * @as: #AddressSpace used
- * @buffer: host pointer as returned by address_space_map()
- * @len: buffer length as returned by address_space_map()
- * @dir: indicates the transfer direction
- * @access_len: amount of data actually transferred
- */
-static inline void dma_memory_unmap(AddressSpace *as,
- void *buffer, dma_addr_t len,
- DMADirection dir, dma_addr_t access_len)
-{
- address_space_unmap(as, buffer, (hwaddr)len,
- dir == DMA_DIRECTION_FROM_DEVICE, access_len);
-}
-
-#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
- static inline MemTxResult ld##_lname##_##_end##_dma(AddressSpace *as, \
- dma_addr_t addr, \
- uint##_bits##_t *pval, \
- MemTxAttrs attrs) \
- { \
- MemTxResult res = dma_memory_read(as, addr, pval, (_bits) / 8, attrs); \
- _end##_bits##_to_cpus(pval); \
- return res; \
- } \
- static inline MemTxResult st##_sname##_##_end##_dma(AddressSpace *as, \
- dma_addr_t addr, \
- uint##_bits##_t val, \
- MemTxAttrs attrs) \
- { \
- val = cpu_to_##_end##_bits(val); \
- return dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \
- }
-
-static inline MemTxResult ldub_dma(AddressSpace *as, dma_addr_t addr,
- uint8_t *val, MemTxAttrs attrs)
-{
- return dma_memory_read(as, addr, val, 1, attrs);
-}
-
-static inline MemTxResult stb_dma(AddressSpace *as, dma_addr_t addr,
- uint8_t val, MemTxAttrs attrs)
-{
- return dma_memory_write(as, addr, &val, 1, attrs);
-}
-
-DEFINE_LDST_DMA(uw, w, 16, le);
-DEFINE_LDST_DMA(l, l, 32, le);
-DEFINE_LDST_DMA(q, q, 64, le);
-DEFINE_LDST_DMA(uw, w, 16, be);
-DEFINE_LDST_DMA(l, l, 32, be);
-DEFINE_LDST_DMA(q, q, 64, be);
-
-#undef DEFINE_LDST_DMA
-
-struct ScatterGatherEntry {
- dma_addr_t base;
- dma_addr_t len;
-};
-
-void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
- AddressSpace *as);
-void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
-void qemu_sglist_destroy(QEMUSGList *qsg);
-
-typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov,
- BlockCompletionFunc *cb, void *cb_opaque,
- void *opaque);
-
-BlockAIOCB *dma_blk_io(AioContext *ctx,
- QEMUSGList *sg, uint64_t offset, uint32_t align,
- DMAIOFunc *io_func, void *io_func_opaque,
- BlockCompletionFunc *cb, void *opaque, DMADirection dir);
-BlockAIOCB *dma_blk_read(BlockBackend *blk,
- QEMUSGList *sg, uint64_t offset, uint32_t align,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *dma_blk_write(BlockBackend *blk,
- QEMUSGList *sg, uint64_t offset, uint32_t align,
- BlockCompletionFunc *cb, void *opaque);
-MemTxResult dma_buf_read(void *ptr, dma_addr_t len, dma_addr_t *residual,
- QEMUSGList *sg, MemTxAttrs attrs);
-MemTxResult dma_buf_write(void *ptr, dma_addr_t len, dma_addr_t *residual,
- QEMUSGList *sg, MemTxAttrs attrs);
-
-void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
- QEMUSGList *sg, enum BlockAcctType type);
-
-/**
- * dma_aligned_pow2_mask: Return the address bit mask of the largest
- * power of 2 size less or equal than @end - @start + 1, aligned with @start,
- * and bounded by 1 << @max_addr_bits bits.
- *
- * @start: range start address
- * @end: range end address (greater than @start)
- * @max_addr_bits: max address bits (<= 64)
- */
-uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end,
- int max_addr_bits);
-
-#endif
diff --git a/include/sysemu/dump.h b/include/sysemu/dump.h
deleted file mode 100644
index d702854..0000000
--- a/include/sysemu/dump.h
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * QEMU dump
- *
- * Copyright Fujitsu, Corp. 2011, 2012
- *
- * Authors:
- * Wen Congyang <wency@cn.fujitsu.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifndef DUMP_H
-#define DUMP_H
-
-#include "qapi/qapi-types-dump.h"
-#include "qemu/thread.h"
-
-#define MAKEDUMPFILE_SIGNATURE "makedumpfile"
-#define MAX_SIZE_MDF_HEADER (4096) /* max size of makedumpfile_header */
-#define TYPE_FLAT_HEADER (1) /* type of flattened format */
-#define VERSION_FLAT_HEADER (1) /* version of flattened format */
-#define END_FLAG_FLAT_HEADER (-1)
-
-#ifndef ARCH_PFN_OFFSET
-#define ARCH_PFN_OFFSET (0)
-#endif
-
-/*
- * flag for compressed format
- */
-#define DUMP_DH_COMPRESSED_ZLIB (0x1)
-#define DUMP_DH_COMPRESSED_LZO (0x2)
-#define DUMP_DH_COMPRESSED_SNAPPY (0x4)
-
-#define KDUMP_SIGNATURE "KDUMP "
-#define SIG_LEN (sizeof(KDUMP_SIGNATURE) - 1)
-#define DUMP_LEVEL (1)
-#define DISKDUMP_HEADER_BLOCKS (1)
-
-#include "sysemu/dump-arch.h"
-#include "sysemu/memory_mapping.h"
-
-typedef struct QEMU_PACKED MakedumpfileHeader {
- char signature[16]; /* = "makedumpfile" */
- int64_t type;
- int64_t version;
-} MakedumpfileHeader;
-
-typedef struct QEMU_PACKED MakedumpfileDataHeader {
- int64_t offset;
- int64_t buf_size;
-} MakedumpfileDataHeader;
-
-typedef struct QEMU_PACKED NewUtsname {
- char sysname[65];
- char nodename[65];
- char release[65];
- char version[65];
- char machine[65];
- char domainname[65];
-} NewUtsname;
-
-typedef struct QEMU_PACKED DiskDumpHeader32 {
- char signature[SIG_LEN]; /* = "KDUMP " */
- uint32_t header_version; /* Dump header version */
- NewUtsname utsname; /* copy of system_utsname */
- char timestamp[10]; /* Time stamp */
- uint32_t status; /* Above flags */
- uint32_t block_size; /* Size of a block in byte */
- uint32_t sub_hdr_size; /* Size of arch dependent header in block */
- uint32_t bitmap_blocks; /* Size of Memory bitmap in block */
- uint32_t max_mapnr; /* = max_mapnr ,
- obsoleted in header_version 6 */
- uint32_t total_ram_blocks; /* Number of blocks should be written */
- uint32_t device_blocks; /* Number of total blocks in dump device */
- uint32_t written_blocks; /* Number of written blocks */
- uint32_t current_cpu; /* CPU# which handles dump */
- uint32_t nr_cpus; /* Number of CPUs */
-} DiskDumpHeader32;
-
-typedef struct QEMU_PACKED DiskDumpHeader64 {
- char signature[SIG_LEN]; /* = "KDUMP " */
- uint32_t header_version; /* Dump header version */
- NewUtsname utsname; /* copy of system_utsname */
- char timestamp[22]; /* Time stamp */
- uint32_t status; /* Above flags */
- uint32_t block_size; /* Size of a block in byte */
- uint32_t sub_hdr_size; /* Size of arch dependent header in block */
- uint32_t bitmap_blocks; /* Size of Memory bitmap in block */
- uint32_t max_mapnr; /* = max_mapnr,
- obsoleted in header_version 6 */
- uint32_t total_ram_blocks; /* Number of blocks should be written */
- uint32_t device_blocks; /* Number of total blocks in dump device */
- uint32_t written_blocks; /* Number of written blocks */
- uint32_t current_cpu; /* CPU# which handles dump */
- uint32_t nr_cpus; /* Number of CPUs */
-} DiskDumpHeader64;
-
-typedef struct QEMU_PACKED KdumpSubHeader32 {
- uint32_t phys_base;
- uint32_t dump_level; /* header_version 1 and later */
- uint32_t split; /* header_version 2 and later */
- uint32_t start_pfn; /* header_version 2 and later,
- obsoleted in header_version 6 */
- uint32_t end_pfn; /* header_version 2 and later,
- obsoleted in header_version 6 */
- uint64_t offset_vmcoreinfo; /* header_version 3 and later */
- uint32_t size_vmcoreinfo; /* header_version 3 and later */
- uint64_t offset_note; /* header_version 4 and later */
- uint32_t note_size; /* header_version 4 and later */
- uint64_t offset_eraseinfo; /* header_version 5 and later */
- uint32_t size_eraseinfo; /* header_version 5 and later */
- uint64_t start_pfn_64; /* header_version 6 and later */
- uint64_t end_pfn_64; /* header_version 6 and later */
- uint64_t max_mapnr_64; /* header_version 6 and later */
-} KdumpSubHeader32;
-
-typedef struct QEMU_PACKED KdumpSubHeader64 {
- uint64_t phys_base;
- uint32_t dump_level; /* header_version 1 and later */
- uint32_t split; /* header_version 2 and later */
- uint64_t start_pfn; /* header_version 2 and later,
- obsoleted in header_version 6 */
- uint64_t end_pfn; /* header_version 2 and later,
- obsoleted in header_version 6 */
- uint64_t offset_vmcoreinfo; /* header_version 3 and later */
- uint64_t size_vmcoreinfo; /* header_version 3 and later */
- uint64_t offset_note; /* header_version 4 and later */
- uint64_t note_size; /* header_version 4 and later */
- uint64_t offset_eraseinfo; /* header_version 5 and later */
- uint64_t size_eraseinfo; /* header_version 5 and later */
- uint64_t start_pfn_64; /* header_version 6 and later */
- uint64_t end_pfn_64; /* header_version 6 and later */
- uint64_t max_mapnr_64; /* header_version 6 and later */
-} KdumpSubHeader64;
-
-typedef struct DataCache {
- DumpState *state; /* dump state related to this data */
- uint8_t *buf; /* buffer for cached data */
- size_t buf_size; /* size of the buf */
- size_t data_size; /* size of cached data in buf */
- off_t offset; /* offset of the file */
-} DataCache;
-
-typedef struct QEMU_PACKED PageDescriptor {
- uint64_t offset; /* the offset of the page data*/
- uint32_t size; /* the size of this dump page */
- uint32_t flags; /* flags */
- uint64_t page_flags; /* page flags */
-} PageDescriptor;
-
-typedef struct DumpState {
- GuestPhysBlockList guest_phys_blocks;
- ArchDumpInfo dump_info;
- MemoryMappingList list;
- bool resume;
- bool detached;
- bool kdump_raw;
- hwaddr memory_offset;
- int fd;
-
- /*
- * Dump filter area variables
- *
- * A filtered dump only contains the guest memory designated by
- * the start address and length variables defined below.
- *
- * If length is 0, no filtering is applied.
- */
- int64_t filter_area_begin; /* Start address of partial guest memory area */
- int64_t filter_area_length; /* Length of partial guest memory area */
-
- /* Elf dump related data */
- uint32_t phdr_num;
- uint32_t shdr_num;
- ssize_t note_size;
- hwaddr shdr_offset;
- hwaddr phdr_offset;
- hwaddr section_offset;
- hwaddr note_offset;
-
- void *elf_section_hdrs; /* Pointer to section header buffer */
- void *elf_section_data; /* Pointer to section data buffer */
- uint64_t elf_section_data_size; /* Size of section data */
- GArray *string_table_buf; /* String table data buffer */
-
- uint8_t *note_buf; /* buffer for notes */
- size_t note_buf_offset; /* the writing place in note_buf */
- uint32_t nr_cpus; /* number of guest's cpu */
- uint64_t max_mapnr; /* the biggest guest's phys-mem's number */
- size_t len_dump_bitmap; /* the size of the place used to store
- dump_bitmap in vmcore */
- off_t offset_dump_bitmap; /* offset of dump_bitmap part in vmcore */
- off_t offset_page; /* offset of page part in vmcore */
- size_t num_dumpable; /* number of page that can be dumped */
- uint32_t flag_compress; /* indicate the compression format */
- DumpStatus status; /* current dump status */
-
- bool has_format; /* whether format is provided */
- DumpGuestMemoryFormat format; /* valid only if has_format == true */
- QemuThread dump_thread; /* thread for detached dump */
-
- int64_t total_size; /* total memory size (in bytes) to
- * be dumped. When filter is
- * enabled, this will only count
- * those to be written. */
- int64_t written_size; /* written memory size (in bytes),
- * this could be used to calculate
- * how much work we have
- * finished. */
- uint8_t *guest_note; /* ELF note content */
- size_t guest_note_size;
-} DumpState;
-
-uint16_t cpu_to_dump16(DumpState *s, uint16_t val);
-uint32_t cpu_to_dump32(DumpState *s, uint32_t val);
-uint64_t cpu_to_dump64(DumpState *s, uint64_t val);
-
-int64_t dump_filtered_memblock_size(GuestPhysBlock *block, int64_t filter_area_start,
- int64_t filter_area_length);
-int64_t dump_filtered_memblock_start(GuestPhysBlock *block, int64_t filter_area_start,
- int64_t filter_area_length);
-#endif
diff --git a/include/sysemu/host_iommu_device.h b/include/sysemu/host_iommu_device.h
deleted file mode 100644
index c1bf74a..0000000
--- a/include/sysemu/host_iommu_device.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Host IOMMU device abstract declaration
- *
- * Copyright (C) 2024 Intel Corporation.
- *
- * Authors: Zhenzhong Duan <zhenzhong.duan@intel.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- */
-
-#ifndef HOST_IOMMU_DEVICE_H
-#define HOST_IOMMU_DEVICE_H
-
-#include "qom/object.h"
-#include "qapi/error.h"
-
-/**
- * struct HostIOMMUDeviceCaps - Define host IOMMU device capabilities.
- *
- * @type: host platform IOMMU type.
- *
- * @aw_bits: host IOMMU address width. 0xff if no limitation.
- */
-typedef struct HostIOMMUDeviceCaps {
- uint32_t type;
- uint8_t aw_bits;
-} HostIOMMUDeviceCaps;
-
-#define TYPE_HOST_IOMMU_DEVICE "host-iommu-device"
-OBJECT_DECLARE_TYPE(HostIOMMUDevice, HostIOMMUDeviceClass, HOST_IOMMU_DEVICE)
-
-struct HostIOMMUDevice {
- Object parent_obj;
-
- char *name;
- void *agent; /* pointer to agent device, ie. VFIO or VDPA device */
- PCIBus *aliased_bus;
- int aliased_devfn;
- HostIOMMUDeviceCaps caps;
-};
-
-/**
- * struct HostIOMMUDeviceClass - The base class for all host IOMMU devices.
- *
- * Different types of host devices (e.g., VFIO or VDPA device) or devices
- * with different backend (e.g., VFIO legacy container or IOMMUFD backend)
- * will have different implementations of the HostIOMMUDeviceClass.
- */
-struct HostIOMMUDeviceClass {
- ObjectClass parent_class;
-
- /**
- * @realize: initialize host IOMMU device instance further.
- *
- * Mandatory callback.
- *
- * @hiod: pointer to a host IOMMU device instance.
- *
- * @opaque: pointer to agent device of this host IOMMU device,
- * e.g., VFIO base device or VDPA device.
- *
- * @errp: pass an Error out when realize fails.
- *
- * Returns: true on success, false on failure.
- */
- bool (*realize)(HostIOMMUDevice *hiod, void *opaque, Error **errp);
- /**
- * @get_cap: check if a host IOMMU device capability is supported.
- *
- * Optional callback, if not implemented, hint not supporting query
- * of @cap.
- *
- * @hiod: pointer to a host IOMMU device instance.
- *
- * @cap: capability to check.
- *
- * @errp: pass an Error out when fails to query capability.
- *
- * Returns: <0 on failure, 0 if a @cap is unsupported, or else
- * 1 or some positive value for some special @cap,
- * i.e., HOST_IOMMU_DEVICE_CAP_AW_BITS.
- */
- int (*get_cap)(HostIOMMUDevice *hiod, int cap, Error **errp);
- /**
- * @get_iova_ranges: Return the list of usable iova_ranges along with
- * @hiod Host IOMMU device
- *
- * @hiod: handle to the host IOMMU device
- */
- GList* (*get_iova_ranges)(HostIOMMUDevice *hiod);
- /**
- *
- * @get_page_size_mask: Return the page size mask supported along this
- * @hiod Host IOMMU device
- *
- * @hiod: handle to the host IOMMU device
- */
- uint64_t (*get_page_size_mask)(HostIOMMUDevice *hiod);
-};
-
-/*
- * Host IOMMU device capability list.
- */
-#define HOST_IOMMU_DEVICE_CAP_IOMMU_TYPE 0
-#define HOST_IOMMU_DEVICE_CAP_AW_BITS 1
-
-#define HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX 64
-#endif
diff --git a/include/sysemu/hostmem.h b/include/sysemu/hostmem.h
deleted file mode 100644
index de47ae5..0000000
--- a/include/sysemu/hostmem.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * QEMU Host Memory Backend
- *
- * Copyright (C) 2013-2014 Red Hat Inc
- *
- * Authors:
- * Igor Mammedov <imammedo@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef SYSEMU_HOSTMEM_H
-#define SYSEMU_HOSTMEM_H
-
-#include "sysemu/numa.h"
-#include "qapi/qapi-types-machine.h"
-#include "qom/object.h"
-#include "exec/memory.h"
-#include "qemu/bitmap.h"
-#include "qemu/thread-context.h"
-
-#define TYPE_MEMORY_BACKEND "memory-backend"
-OBJECT_DECLARE_TYPE(HostMemoryBackend, HostMemoryBackendClass,
- MEMORY_BACKEND)
-
-/* hostmem-ram.c */
-/**
- * @TYPE_MEMORY_BACKEND_RAM:
- * name of backend that uses mmap on the anonymous RAM
- */
-
-#define TYPE_MEMORY_BACKEND_RAM "memory-backend-ram"
-
-/* hostmem-file.c */
-/**
- * @TYPE_MEMORY_BACKEND_FILE:
- * name of backend that uses mmap on a file descriptor
- */
-#define TYPE_MEMORY_BACKEND_FILE "memory-backend-file"
-
-
-/**
- * HostMemoryBackendClass:
- * @parent_class: opaque parent class container
- */
-struct HostMemoryBackendClass {
- ObjectClass parent_class;
-
- /**
- * alloc: Allocate memory from backend.
- *
- * @backend: the #HostMemoryBackend.
- * @errp: pointer to Error*, to store an error if it happens.
- *
- * Return: true on success, else false setting @errp with error.
- */
- bool (*alloc)(HostMemoryBackend *backend, Error **errp);
-};
-
-/**
- * @HostMemoryBackend
- *
- * @parent: opaque parent object container
- * @size: amount of memory backend provides
- * @mr: MemoryRegion representing host memory belonging to backend
- * @prealloc_threads: number of threads to be used for preallocatining RAM
- */
-struct HostMemoryBackend {
- /* private */
- Object parent;
-
- /* protected */
- uint64_t size;
- bool merge, dump, use_canonical_path;
- bool prealloc, is_mapped, share, reserve;
- bool guest_memfd, aligned;
- uint32_t prealloc_threads;
- ThreadContext *prealloc_context;
- DECLARE_BITMAP(host_nodes, MAX_NODES + 1);
- HostMemPolicy policy;
-
- MemoryRegion mr;
-};
-
-bool host_memory_backend_mr_inited(HostMemoryBackend *backend);
-MemoryRegion *host_memory_backend_get_memory(HostMemoryBackend *backend);
-
-void host_memory_backend_set_mapped(HostMemoryBackend *backend, bool mapped);
-bool host_memory_backend_is_mapped(HostMemoryBackend *backend);
-size_t host_memory_backend_pagesize(HostMemoryBackend *memdev);
-char *host_memory_backend_get_name(HostMemoryBackend *backend);
-
-#endif
diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h
deleted file mode 100644
index 730f927..0000000
--- a/include/sysemu/hvf.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * QEMU Hypervisor.framework (HVF) support
- *
- * Copyright Google Inc., 2017
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-/* header to be included in non-HVF-specific code */
-
-#ifndef HVF_H
-#define HVF_H
-
-#include "qemu/accel.h"
-#include "qom/object.h"
-
-#ifdef COMPILING_PER_TARGET
-#include "cpu.h"
-
-#ifdef CONFIG_HVF
-extern bool hvf_allowed;
-#define hvf_enabled() (hvf_allowed)
-#else /* !CONFIG_HVF */
-#define hvf_enabled() 0
-#endif /* !CONFIG_HVF */
-
-#endif /* COMPILING_PER_TARGET */
-
-#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf")
-
-typedef struct HVFState HVFState;
-DECLARE_INSTANCE_CHECKER(HVFState, HVF_STATE,
- TYPE_HVF_ACCEL)
-
-#ifdef COMPILING_PER_TARGET
-struct hvf_sw_breakpoint {
- vaddr pc;
- vaddr saved_insn;
- int use_count;
- QTAILQ_ENTRY(hvf_sw_breakpoint) entry;
-};
-
-struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu,
- vaddr pc);
-int hvf_sw_breakpoints_active(CPUState *cpu);
-
-int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp);
-int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp);
-int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type);
-int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type);
-void hvf_arch_remove_all_hw_breakpoints(void);
-
-/*
- * hvf_update_guest_debug:
- * @cs: CPUState for the CPU to update
- *
- * Update guest to enable or disable debugging. Per-arch specifics will be
- * handled by calling down to hvf_arch_update_guest_debug.
- */
-int hvf_update_guest_debug(CPUState *cpu);
-void hvf_arch_update_guest_debug(CPUState *cpu);
-
-/*
- * Return whether the guest supports debugging.
- */
-bool hvf_arch_supports_guest_debug(void);
-#endif /* COMPILING_PER_TARGET */
-
-#endif
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
deleted file mode 100644
index 5b28d17..0000000
--- a/include/sysemu/hvf_int.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * QEMU Hypervisor.framework (HVF) support
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-/* header to be included in HVF-specific code */
-
-#ifndef HVF_INT_H
-#define HVF_INT_H
-
-#ifdef __aarch64__
-#include <Hypervisor/Hypervisor.h>
-typedef hv_vcpu_t hvf_vcpuid;
-#else
-#include <Hypervisor/hv.h>
-typedef hv_vcpuid_t hvf_vcpuid;
-#endif
-
-/* hvf_slot flags */
-#define HVF_SLOT_LOG (1 << 0)
-
-typedef struct hvf_slot {
- uint64_t start;
- uint64_t size;
- uint8_t *mem;
- int slot_id;
- uint32_t flags;
- MemoryRegion *region;
-} hvf_slot;
-
-typedef struct hvf_vcpu_caps {
- uint64_t vmx_cap_pinbased;
- uint64_t vmx_cap_procbased;
- uint64_t vmx_cap_procbased2;
- uint64_t vmx_cap_entry;
- uint64_t vmx_cap_exit;
- uint64_t vmx_cap_preemption_timer;
-} hvf_vcpu_caps;
-
-struct HVFState {
- AccelState parent;
- hvf_slot slots[32];
- int num_slots;
-
- hvf_vcpu_caps *hvf_caps;
- uint64_t vtimer_offset;
- QTAILQ_HEAD(, hvf_sw_breakpoint) hvf_sw_breakpoints;
-};
-extern HVFState *hvf_state;
-
-struct AccelCPUState {
- hvf_vcpuid fd;
- void *exit;
- bool vtimer_masked;
- sigset_t unblock_ipi_mask;
- bool guest_debug_enabled;
- bool dirty;
-};
-
-void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line,
- const char *exp);
-#define assert_hvf_ok(EX) assert_hvf_ok_impl((EX), __FILE__, __LINE__, #EX)
-const char *hvf_return_string(hv_return_t ret);
-int hvf_arch_init(void);
-int hvf_arch_init_vcpu(CPUState *cpu);
-void hvf_arch_vcpu_destroy(CPUState *cpu);
-int hvf_vcpu_exec(CPUState *);
-hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
-int hvf_put_registers(CPUState *);
-int hvf_get_registers(CPUState *);
-void hvf_kick_vcpu_thread(CPUState *cpu);
-
-#endif
diff --git a/include/sysemu/hw_accel.h b/include/sysemu/hw_accel.h
deleted file mode 100644
index c71b77e..0000000
--- a/include/sysemu/hw_accel.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * QEMU Hardware accelerators support
- *
- * Copyright 2016 Google, Inc.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifndef QEMU_HW_ACCEL_H
-#define QEMU_HW_ACCEL_H
-
-#include "hw/core/cpu.h"
-#include "sysemu/kvm.h"
-#include "sysemu/hvf.h"
-#include "sysemu/whpx.h"
-#include "sysemu/nvmm.h"
-
-void cpu_synchronize_state(CPUState *cpu);
-void cpu_synchronize_post_reset(CPUState *cpu);
-void cpu_synchronize_post_init(CPUState *cpu);
-void cpu_synchronize_pre_loadvm(CPUState *cpu);
-
-#endif /* QEMU_HW_ACCEL_H */
diff --git a/include/sysemu/iommufd.h b/include/sysemu/iommufd.h
deleted file mode 100644
index 9edfec6..0000000
--- a/include/sysemu/iommufd.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * iommufd container backend declaration
- *
- * Copyright (C) 2024 Intel Corporation.
- * Copyright Red Hat, Inc. 2024
- *
- * Authors: Yi Liu <yi.l.liu@intel.com>
- * Eric Auger <eric.auger@redhat.com>
- * Zhenzhong Duan <zhenzhong.duan@intel.com>
- *
- * SPDX-License-Identifier: GPL-2.0-or-later
- */
-
-#ifndef SYSEMU_IOMMUFD_H
-#define SYSEMU_IOMMUFD_H
-
-#include "qom/object.h"
-#include "exec/hwaddr.h"
-#include "exec/cpu-common.h"
-#include "sysemu/host_iommu_device.h"
-
-#define TYPE_IOMMUFD_BACKEND "iommufd"
-OBJECT_DECLARE_TYPE(IOMMUFDBackend, IOMMUFDBackendClass, IOMMUFD_BACKEND)
-
-struct IOMMUFDBackendClass {
- ObjectClass parent_class;
-};
-
-struct IOMMUFDBackend {
- Object parent;
-
- /*< protected >*/
- int fd; /* /dev/iommu file descriptor */
- bool owned; /* is the /dev/iommu opened internally */
- uint32_t users;
-
- /*< public >*/
-};
-
-bool iommufd_backend_connect(IOMMUFDBackend *be, Error **errp);
-void iommufd_backend_disconnect(IOMMUFDBackend *be);
-
-bool iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
- Error **errp);
-void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id);
-int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova,
- ram_addr_t size, void *vaddr, bool readonly);
-int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id,
- hwaddr iova, ram_addr_t size);
-bool iommufd_backend_get_device_info(IOMMUFDBackend *be, uint32_t devid,
- uint32_t *type, void *data, uint32_t len,
- Error **errp);
-
-#define TYPE_HOST_IOMMU_DEVICE_IOMMUFD TYPE_HOST_IOMMU_DEVICE "-iommufd"
-#endif
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
deleted file mode 100644
index 2102a90..0000000
--- a/include/sysemu/iothread.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Event loop thread
- *
- * Copyright Red Hat Inc., 2013
- *
- * Authors:
- * Stefan Hajnoczi <stefanha@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifndef IOTHREAD_H
-#define IOTHREAD_H
-
-#include "block/aio.h"
-#include "qemu/thread.h"
-#include "qom/object.h"
-#include "sysemu/event-loop-base.h"
-
-#define TYPE_IOTHREAD "iothread"
-
-struct IOThread {
- EventLoopBase parent_obj;
-
- QemuThread thread;
- AioContext *ctx;
- bool run_gcontext; /* whether we should run gcontext */
- GMainContext *worker_context;
- GMainLoop *main_loop;
- QemuSemaphore init_done_sem; /* is thread init done? */
- bool stopping; /* has iothread_stop() been called? */
- bool running; /* should iothread_run() continue? */
- int thread_id;
-
- /* AioContext poll parameters */
- int64_t poll_max_ns;
- int64_t poll_grow;
- int64_t poll_shrink;
-};
-typedef struct IOThread IOThread;
-
-DECLARE_INSTANCE_CHECKER(IOThread, IOTHREAD,
- TYPE_IOTHREAD)
-
-char *iothread_get_id(IOThread *iothread);
-IOThread *iothread_by_id(const char *id);
-AioContext *iothread_get_aio_context(IOThread *iothread);
-GMainContext *iothread_get_g_main_context(IOThread *iothread);
-
-/*
- * Helpers used to allocate iothreads for internal use. These
- * iothreads will not be seen by monitor clients when query using
- * "query-iothreads".
- */
-IOThread *iothread_create(const char *id, Error **errp);
-void iothread_stop(IOThread *iothread);
-void iothread_destroy(IOThread *iothread);
-
-/*
- * Returns true if executing within IOThread context,
- * false otherwise.
- */
-bool qemu_in_iothread(void);
-
-#endif /* IOTHREAD_H */
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
deleted file mode 100644
index c31d9c7..0000000
--- a/include/sysemu/kvm.h
+++ /dev/null
@@ -1,548 +0,0 @@
-/*
- * QEMU KVM support
- *
- * Copyright IBM, Corp. 2008
- *
- * Authors:
- * Anthony Liguori <aliguori@us.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-/* header to be included in non-KVM-specific code */
-
-#ifndef QEMU_KVM_H
-#define QEMU_KVM_H
-
-#include "exec/memattrs.h"
-#include "qemu/accel.h"
-#include "qom/object.h"
-
-#ifdef COMPILING_PER_TARGET
-# ifdef CONFIG_KVM
-# include <linux/kvm.h>
-# define CONFIG_KVM_IS_POSSIBLE
-# endif
-#else
-# define CONFIG_KVM_IS_POSSIBLE
-#endif
-
-#ifdef CONFIG_KVM_IS_POSSIBLE
-
-extern bool kvm_allowed;
-extern bool kvm_kernel_irqchip;
-extern bool kvm_split_irqchip;
-extern bool kvm_async_interrupts_allowed;
-extern bool kvm_halt_in_kernel_allowed;
-extern bool kvm_resamplefds_allowed;
-extern bool kvm_msi_via_irqfd_allowed;
-extern bool kvm_gsi_routing_allowed;
-extern bool kvm_gsi_direct_mapping;
-extern bool kvm_readonly_mem_allowed;
-extern bool kvm_msi_use_devid;
-
-#define kvm_enabled() (kvm_allowed)
-/**
- * kvm_irqchip_in_kernel:
- *
- * Returns: true if an in-kernel irqchip was created.
- * What this actually means is architecture and machine model
- * specific: on PC, for instance, it means that the LAPIC
- * is in kernel. This function should never be used from generic
- * target-independent code: use one of the following functions or
- * some other specific check instead.
- */
-#define kvm_irqchip_in_kernel() (kvm_kernel_irqchip)
-
-/**
- * kvm_irqchip_is_split:
- *
- * Returns: true if the irqchip implementation is split between
- * user and kernel space. The details are architecture and
- * machine specific. On PC, it means that the PIC, IOAPIC, and
- * PIT are in user space while the LAPIC is in the kernel.
- */
-#define kvm_irqchip_is_split() (kvm_split_irqchip)
-
-/**
- * kvm_async_interrupts_enabled:
- *
- * Returns: true if we can deliver interrupts to KVM
- * asynchronously (ie by ioctl from any thread at any time)
- * rather than having to do interrupt delivery synchronously
- * (where the vcpu must be stopped at a suitable point first).
- */
-#define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed)
-
-/**
- * kvm_halt_in_kernel
- *
- * Returns: true if halted cpus should still get a KVM_RUN ioctl to run
- * inside of kernel space. This only works if MP state is implemented.
- */
-#define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed)
-
-/**
- * kvm_irqfds_enabled:
- *
- * Returns: true if we can use irqfds to inject interrupts into
- * a KVM CPU (ie the kernel supports irqfds and we are running
- * with a configuration where it is meaningful to use them).
- *
- * Always available if running with in-kernel irqchip.
- */
-#define kvm_irqfds_enabled() kvm_irqchip_in_kernel()
-
-/**
- * kvm_resamplefds_enabled:
- *
- * Returns: true if we can use resamplefds to inject interrupts into
- * a KVM CPU (ie the kernel supports resamplefds and we are running
- * with a configuration where it is meaningful to use them).
- */
-#define kvm_resamplefds_enabled() (kvm_resamplefds_allowed)
-
-/**
- * kvm_msi_via_irqfd_enabled:
- *
- * Returns: true if we can route a PCI MSI (Message Signaled Interrupt)
- * to a KVM CPU via an irqfd. This requires that the kernel supports
- * this and that we're running in a configuration that permits it.
- */
-#define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed)
-
-/**
- * kvm_gsi_routing_enabled:
- *
- * Returns: true if GSI routing is enabled (ie the kernel supports
- * it and we're running in a configuration that permits it).
- */
-#define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed)
-
-/**
- * kvm_gsi_direct_mapping:
- *
- * Returns: true if GSI direct mapping is enabled.
- */
-#define kvm_gsi_direct_mapping() (kvm_gsi_direct_mapping)
-
-/**
- * kvm_readonly_mem_enabled:
- *
- * Returns: true if KVM readonly memory is enabled (ie the kernel
- * supports it and we're running in a configuration that permits it).
- */
-#define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed)
-
-/**
- * kvm_msi_devid_required:
- * Returns: true if KVM requires a device id to be provided while
- * defining an MSI routing entry.
- */
-#define kvm_msi_devid_required() (kvm_msi_use_devid)
-
-#else
-
-#define kvm_enabled() (0)
-#define kvm_irqchip_in_kernel() (false)
-#define kvm_irqchip_is_split() (false)
-#define kvm_async_interrupts_enabled() (false)
-#define kvm_halt_in_kernel() (false)
-#define kvm_irqfds_enabled() (false)
-#define kvm_resamplefds_enabled() (false)
-#define kvm_msi_via_irqfd_enabled() (false)
-#define kvm_gsi_routing_allowed() (false)
-#define kvm_gsi_direct_mapping() (false)
-#define kvm_readonly_mem_enabled() (false)
-#define kvm_msi_devid_required() (false)
-
-#endif /* CONFIG_KVM_IS_POSSIBLE */
-
-struct kvm_run;
-struct kvm_irq_routing_entry;
-
-typedef struct KVMCapabilityInfo {
- const char *name;
- int value;
-} KVMCapabilityInfo;
-
-#define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP }
-#define KVM_CAP_LAST_INFO { NULL, 0 }
-
-struct KVMState;
-
-#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
-typedef struct KVMState KVMState;
-DECLARE_INSTANCE_CHECKER(KVMState, KVM_STATE,
- TYPE_KVM_ACCEL)
-
-extern KVMState *kvm_state;
-typedef struct Notifier Notifier;
-
-typedef struct KVMRouteChange {
- KVMState *s;
- int changes;
-} KVMRouteChange;
-
-/* external API */
-
-unsigned int kvm_get_max_memslots(void);
-unsigned int kvm_get_free_memslots(void);
-bool kvm_has_sync_mmu(void);
-int kvm_has_vcpu_events(void);
-int kvm_max_nested_state_length(void);
-int kvm_has_gsi_routing(void);
-
-/**
- * kvm_arm_supports_user_irq
- *
- * Not all KVM implementations support notifications for kernel generated
- * interrupt events to user space. This function indicates whether the current
- * KVM implementation does support them.
- *
- * Returns: true if KVM supports using kernel generated IRQs from user space
- */
-bool kvm_arm_supports_user_irq(void);
-
-
-int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
-int kvm_on_sigbus(int code, void *addr);
-
-#ifdef COMPILING_PER_TARGET
-#include "cpu.h"
-
-void kvm_flush_coalesced_mmio_buffer(void);
-
-/**
- * kvm_update_guest_debug(): ensure KVM debug structures updated
- * @cs: the CPUState for this cpu
- * @reinject_trap: KVM trap injection control
- *
- * There are usually per-arch specifics which will be handled by
- * calling down to kvm_arch_update_guest_debug after the generic
- * fields have been set.
- */
-#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
-int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap);
-#else
-static inline int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
-{
- return -EINVAL;
-}
-#endif
-
-/* internal API */
-
-int kvm_ioctl(KVMState *s, int type, ...);
-
-int kvm_vm_ioctl(KVMState *s, int type, ...);
-
-int kvm_vcpu_ioctl(CPUState *cpu, int type, ...);
-
-/**
- * kvm_device_ioctl - call an ioctl on a kvm device
- * @fd: The KVM device file descriptor as returned from KVM_CREATE_DEVICE
- * @type: The device-ctrl ioctl number
- *
- * Returns: -errno on error, nonnegative on success
- */
-int kvm_device_ioctl(int fd, int type, ...);
-
-/**
- * kvm_vm_check_attr - check for existence of a specific vm attribute
- * @s: The KVMState pointer
- * @group: the group
- * @attr: the attribute of that group to query for
- *
- * Returns: 1 if the attribute exists
- * 0 if the attribute either does not exist or if the vm device
- * interface is unavailable
- */
-int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr);
-
-/**
- * kvm_device_check_attr - check for existence of a specific device attribute
- * @fd: The device file descriptor
- * @group: the group
- * @attr: the attribute of that group to query for
- *
- * Returns: 1 if the attribute exists
- * 0 if the attribute either does not exist or if the vm device
- * interface is unavailable
- */
-int kvm_device_check_attr(int fd, uint32_t group, uint64_t attr);
-
-/**
- * kvm_device_access - set or get value of a specific device attribute
- * @fd: The device file descriptor
- * @group: the group
- * @attr: the attribute of that group to set or get
- * @val: pointer to a storage area for the value
- * @write: true for set and false for get operation
- * @errp: error object handle
- *
- * Returns: 0 on success
- * < 0 on error
- * Use kvm_device_check_attr() in order to check for the availability
- * of optional attributes.
- */
-int kvm_device_access(int fd, int group, uint64_t attr,
- void *val, bool write, Error **errp);
-
-/**
- * kvm_create_device - create a KVM device for the device control API
- * @KVMState: The KVMState pointer
- * @type: The KVM device type (see Documentation/virtual/kvm/devices in the
- * kernel source)
- * @test: If true, only test if device can be created, but don't actually
- * create the device.
- *
- * Returns: -errno on error, nonnegative on success: @test ? 0 : device fd;
- */
-int kvm_create_device(KVMState *s, uint64_t type, bool test);
-
-/**
- * kvm_device_supported - probe whether KVM supports specific device
- *
- * @vmfd: The fd handler for VM
- * @type: type of device
- *
- * @return: true if supported, otherwise false.
- */
-bool kvm_device_supported(int vmfd, uint64_t type);
-
-/* Arch specific hooks */
-
-extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
-
-void kvm_arch_accel_class_init(ObjectClass *oc);
-
-void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
-MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
-
-int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run);
-
-int kvm_arch_process_async_events(CPUState *cpu);
-
-int kvm_arch_get_registers(CPUState *cpu);
-
-/* state subset only touched by the VCPU itself during runtime */
-#define KVM_PUT_RUNTIME_STATE 1
-/* state subset modified during VCPU reset */
-#define KVM_PUT_RESET_STATE 2
-/* full state set, modified during initialization or on vmload */
-#define KVM_PUT_FULL_STATE 3
-
-int kvm_arch_put_registers(CPUState *cpu, int level);
-
-int kvm_arch_get_default_type(MachineState *ms);
-
-int kvm_arch_init(MachineState *ms, KVMState *s);
-
-int kvm_arch_init_vcpu(CPUState *cpu);
-int kvm_arch_destroy_vcpu(CPUState *cpu);
-
-bool kvm_vcpu_id_is_valid(int vcpu_id);
-
-/* Returns VCPU ID to be used on KVM_CREATE_VCPU ioctl() */
-unsigned long kvm_arch_vcpu_id(CPUState *cpu);
-
-#ifdef KVM_HAVE_MCE_INJECTION
-void kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
-#endif
-
-void kvm_arch_init_irq_routing(KVMState *s);
-
-int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
- uint64_t address, uint32_t data, PCIDevice *dev);
-
-/* Notify arch about newly added MSI routes */
-int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
- int vector, PCIDevice *dev);
-/* Notify arch about released MSI routes */
-int kvm_arch_release_virq_post(int virq);
-
-int kvm_arch_msi_data_to_gsi(uint32_t data);
-
-int kvm_set_irq(KVMState *s, int irq, int level);
-int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg);
-
-void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin);
-
-void kvm_irqchip_add_change_notifier(Notifier *n);
-void kvm_irqchip_remove_change_notifier(Notifier *n);
-void kvm_irqchip_change_notify(void);
-
-struct kvm_guest_debug;
-struct kvm_debug_exit_arch;
-
-struct kvm_sw_breakpoint {
- vaddr pc;
- vaddr saved_insn;
- int use_count;
- QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
-};
-
-struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
- vaddr pc);
-
-int kvm_sw_breakpoints_active(CPUState *cpu);
-
-int kvm_arch_insert_sw_breakpoint(CPUState *cpu,
- struct kvm_sw_breakpoint *bp);
-int kvm_arch_remove_sw_breakpoint(CPUState *cpu,
- struct kvm_sw_breakpoint *bp);
-int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type);
-int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type);
-void kvm_arch_remove_all_hw_breakpoints(void);
-
-void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg);
-
-bool kvm_arch_stop_on_emulation_error(CPUState *cpu);
-
-int kvm_check_extension(KVMState *s, unsigned int extension);
-
-int kvm_vm_check_extension(KVMState *s, unsigned int extension);
-
-#define kvm_vm_enable_cap(s, capability, cap_flags, ...) \
- ({ \
- struct kvm_enable_cap cap = { \
- .cap = capability, \
- .flags = cap_flags, \
- }; \
- uint64_t args_tmp[] = { __VA_ARGS__ }; \
- size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \
- memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \
- kvm_vm_ioctl(s, KVM_ENABLE_CAP, &cap); \
- })
-
-#define kvm_vcpu_enable_cap(cpu, capability, cap_flags, ...) \
- ({ \
- struct kvm_enable_cap cap = { \
- .cap = capability, \
- .flags = cap_flags, \
- }; \
- uint64_t args_tmp[] = { __VA_ARGS__ }; \
- size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \
- memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \
- kvm_vcpu_ioctl(cpu, KVM_ENABLE_CAP, &cap); \
- })
-
-void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len);
-
-int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
- hwaddr *phys_addr);
-
-#endif /* COMPILING_PER_TARGET */
-
-void kvm_cpu_synchronize_state(CPUState *cpu);
-
-void kvm_init_cpu_signals(CPUState *cpu);
-
-/**
- * kvm_irqchip_add_msi_route - Add MSI route for specific vector
- * @c: KVMRouteChange instance.
- * @vector: which vector to add. This can be either MSI/MSIX
- * vector. The function will automatically detect whether
- * MSI/MSIX is enabled, and fetch corresponding MSI
- * message.
- * @dev: Owner PCI device to add the route. If @dev is specified
- * as @NULL, an empty MSI message will be inited.
- * @return: virq (>=0) when success, errno (<0) when failed.
- */
-int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev);
-int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
- PCIDevice *dev);
-void kvm_irqchip_commit_routes(KVMState *s);
-
-static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s)
-{
- return (KVMRouteChange) { .s = s, .changes = 0 };
-}
-
-static inline void kvm_irqchip_commit_route_changes(KVMRouteChange *c)
-{
- if (c->changes) {
- kvm_irqchip_commit_routes(c->s);
- c->changes = 0;
- }
-}
-
-int kvm_irqchip_get_virq(KVMState *s);
-void kvm_irqchip_release_virq(KVMState *s, int virq);
-
-void kvm_add_routing_entry(KVMState *s,
- struct kvm_irq_routing_entry *entry);
-
-int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
- EventNotifier *rn, int virq);
-int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
- int virq);
-int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
- EventNotifier *rn, qemu_irq irq);
-int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
- qemu_irq irq);
-void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi);
-void kvm_init_irq_routing(KVMState *s);
-
-bool kvm_kernel_irqchip_allowed(void);
-bool kvm_kernel_irqchip_required(void);
-bool kvm_kernel_irqchip_split(void);
-
-/**
- * kvm_arch_irqchip_create:
- * @KVMState: The KVMState pointer
- *
- * Allow architectures to create an in-kernel irq chip themselves.
- *
- * Returns: < 0: error
- * 0: irq chip was not created
- * > 0: irq chip was created
- */
-int kvm_arch_irqchip_create(KVMState *s);
-
-/**
- * kvm_set_one_reg - set a register value in KVM via KVM_SET_ONE_REG ioctl
- * @id: The register ID
- * @source: The pointer to the value to be set. It must point to a variable
- * of the correct type/size for the register being accessed.
- *
- * Returns: 0 on success, or a negative errno on failure.
- */
-int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source);
-
-/**
- * kvm_get_one_reg - get a register value from KVM via KVM_GET_ONE_REG ioctl
- * @id: The register ID
- * @target: The pointer where the value is to be stored. It must point to a
- * variable of the correct type/size for the register being accessed.
- *
- * Returns: 0 on success, or a negative errno on failure.
- */
-int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target);
-
-/* Notify resamplefd for EOI of specific interrupts. */
-void kvm_resample_fd_notify(int gsi);
-
-bool kvm_dirty_ring_enabled(void);
-
-uint32_t kvm_dirty_ring_size(void);
-
-void kvm_mark_guest_state_protected(void);
-
-/**
- * kvm_hwpoisoned_mem - indicate if there is any hwpoisoned page
- * reported for the VM.
- */
-bool kvm_hwpoisoned_mem(void);
-
-int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp);
-
-int kvm_set_memory_attributes_private(hwaddr start, uint64_t size);
-int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size);
-
-int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private);
-
-#endif
diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h
deleted file mode 100644
index 3f3d13f..0000000
--- a/include/sysemu/kvm_int.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Internal definitions for a target's KVM support
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifndef QEMU_KVM_INT_H
-#define QEMU_KVM_INT_H
-
-#include "exec/memory.h"
-#include "qapi/qapi-types-common.h"
-#include "qemu/accel.h"
-#include "qemu/queue.h"
-#include "sysemu/kvm.h"
-
-typedef struct KVMSlot
-{
- hwaddr start_addr;
- ram_addr_t memory_size;
- void *ram;
- int slot;
- int flags;
- int old_flags;
- /* Dirty bitmap cache for the slot */
- unsigned long *dirty_bmap;
- unsigned long dirty_bmap_size;
- /* Cache of the address space ID */
- int as_id;
- /* Cache of the offset in ram address space */
- ram_addr_t ram_start_offset;
- int guest_memfd;
- hwaddr guest_memfd_offset;
-} KVMSlot;
-
-typedef struct KVMMemoryUpdate {
- QSIMPLEQ_ENTRY(KVMMemoryUpdate) next;
- MemoryRegionSection section;
-} KVMMemoryUpdate;
-
-typedef struct KVMMemoryListener {
- MemoryListener listener;
- KVMSlot *slots;
- unsigned int nr_used_slots;
- int as_id;
- QSIMPLEQ_HEAD(, KVMMemoryUpdate) transaction_add;
- QSIMPLEQ_HEAD(, KVMMemoryUpdate) transaction_del;
-} KVMMemoryListener;
-
-#define KVM_MSI_HASHTAB_SIZE 256
-
-enum KVMDirtyRingReaperState {
- KVM_DIRTY_RING_REAPER_NONE = 0,
- /* The reaper is sleeping */
- KVM_DIRTY_RING_REAPER_WAIT,
- /* The reaper is reaping for dirty pages */
- KVM_DIRTY_RING_REAPER_REAPING,
-};
-
-/*
- * KVM reaper instance, responsible for collecting the KVM dirty bits
- * via the dirty ring.
- */
-struct KVMDirtyRingReaper {
- /* The reaper thread */
- QemuThread reaper_thr;
- volatile uint64_t reaper_iteration; /* iteration number of reaper thr */
- volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */
-};
-struct KVMState
-{
- AccelState parent_obj;
-
- int nr_slots;
- int fd;
- int vmfd;
- int coalesced_mmio;
- int coalesced_pio;
- struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
- bool coalesced_flush_in_progress;
- int vcpu_events;
-#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
- QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
-#endif
- int max_nested_state_len;
- int kvm_shadow_mem;
- bool kernel_irqchip_allowed;
- bool kernel_irqchip_required;
- OnOffAuto kernel_irqchip_split;
- bool sync_mmu;
- bool guest_state_protected;
- uint64_t manual_dirty_log_protect;
- /* The man page (and posix) say ioctl numbers are signed int, but
- * they're not. Linux, glibc and *BSD all treat ioctl numbers as
- * unsigned, and treating them as signed here can break things */
- unsigned irq_set_ioctl;
- unsigned int sigmask_len;
- GHashTable *gsimap;
-#ifdef KVM_CAP_IRQ_ROUTING
- struct kvm_irq_routing *irq_routes;
- int nr_allocated_irq_routes;
- unsigned long *used_gsi_bitmap;
- unsigned int gsi_count;
-#endif
- KVMMemoryListener memory_listener;
- QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
-
- /* For "info mtree -f" to tell if an MR is registered in KVM */
- int nr_as;
- struct KVMAs {
- KVMMemoryListener *ml;
- AddressSpace *as;
- } *as;
- uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */
- uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */
- bool kvm_dirty_ring_with_bitmap;
- uint64_t kvm_eager_split_size; /* Eager Page Splitting chunk size */
- struct KVMDirtyRingReaper reaper;
- NotifyVmexitOption notify_vmexit;
- uint32_t notify_window;
- uint32_t xen_version;
- uint32_t xen_caps;
- uint16_t xen_gnttab_max_frames;
- uint16_t xen_evtchn_max_pirq;
- char *device;
-};
-
-void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
- AddressSpace *as, int as_id, const char *name);
-
-void kvm_set_max_memslot_size(hwaddr max_slot_size);
-
-/**
- * kvm_hwpoison_page_add:
- *
- * Parameters:
- * @ram_addr: the address in the RAM for the poisoned page
- *
- * Add a poisoned page to the list
- *
- * Return: None.
- */
-void kvm_hwpoison_page_add(ram_addr_t ram_addr);
-#endif
diff --git a/include/sysemu/kvm_xen.h b/include/sysemu/kvm_xen.h
deleted file mode 100644
index 961c702..0000000
--- a/include/sysemu/kvm_xen.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Xen HVM emulation support in KVM
- *
- * Copyright Ā© 2019 Oracle and/or its affiliates. All rights reserved.
- * Copyright Ā© 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifndef QEMU_SYSEMU_KVM_XEN_H
-#define QEMU_SYSEMU_KVM_XEN_H
-
-/* The KVM API uses these to indicate "no GPA" or "no GFN" */
-#define INVALID_GPA UINT64_MAX
-#define INVALID_GFN UINT64_MAX
-
-/* QEMU plays the rƓle of dom0 for "interdomain" communication. */
-#define DOMID_QEMU 0
-
-int kvm_xen_soft_reset(void);
-uint32_t kvm_xen_get_caps(void);
-void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id);
-bool kvm_xen_has_vcpu_callback_vector(void);
-void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type);
-void kvm_xen_set_callback_asserted(void);
-int kvm_xen_set_vcpu_virq(uint32_t vcpu_id, uint16_t virq, uint16_t port);
-uint16_t kvm_xen_get_gnttab_max_frames(void);
-uint16_t kvm_xen_get_evtchn_max_pirq(void);
-
-#define kvm_xen_has_cap(cap) (!!(kvm_xen_get_caps() & \
- KVM_XEN_HVM_CONFIG_ ## cap))
-
-#define XEN_SPECIAL_AREA_ADDR 0xfeff8000UL
-#define XEN_SPECIAL_AREA_SIZE 0x4000UL
-
-#define XEN_SPECIALPAGE_CONSOLE 0
-#define XEN_SPECIALPAGE_XENSTORE 1
-
-#define XEN_SPECIAL_PFN(x) ((XEN_SPECIAL_AREA_ADDR >> TARGET_PAGE_BITS) + \
- XEN_SPECIALPAGE_##x)
-
-#endif /* QEMU_SYSEMU_KVM_XEN_H */
diff --git a/include/sysemu/numa.h b/include/sysemu/numa.h
deleted file mode 100644
index 0467614..0000000
--- a/include/sysemu/numa.h
+++ /dev/null
@@ -1,114 +0,0 @@
-#ifndef SYSEMU_NUMA_H
-#define SYSEMU_NUMA_H
-
-#include "qemu/bitmap.h"
-#include "qapi/qapi-types-machine.h"
-#include "exec/cpu-common.h"
-
-struct CPUArchId;
-
-#define MAX_NODES 128
-#define NUMA_NODE_UNASSIGNED MAX_NODES
-#define NUMA_DISTANCE_MIN 10
-#define NUMA_DISTANCE_DEFAULT 20
-#define NUMA_DISTANCE_MAX 254
-#define NUMA_DISTANCE_UNREACHABLE 255
-
-/* the value of AcpiHmatLBInfo flags */
-enum {
- HMAT_LB_MEM_MEMORY = 0,
- HMAT_LB_MEM_CACHE_1ST_LEVEL = 1,
- HMAT_LB_MEM_CACHE_2ND_LEVEL = 2,
- HMAT_LB_MEM_CACHE_3RD_LEVEL = 3,
- HMAT_LB_LEVELS /* must be the last entry */
-};
-
-/* the value of AcpiHmatLBInfo data type */
-enum {
- HMAT_LB_DATA_ACCESS_LATENCY = 0,
- HMAT_LB_DATA_READ_LATENCY = 1,
- HMAT_LB_DATA_WRITE_LATENCY = 2,
- HMAT_LB_DATA_ACCESS_BANDWIDTH = 3,
- HMAT_LB_DATA_READ_BANDWIDTH = 4,
- HMAT_LB_DATA_WRITE_BANDWIDTH = 5,
- HMAT_LB_TYPES /* must be the last entry */
-};
-
-#define UINT16_BITS 16
-
-typedef struct NodeInfo {
- uint64_t node_mem;
- struct HostMemoryBackend *node_memdev;
- bool present;
- bool has_cpu;
- bool has_gi;
- uint8_t lb_info_provided;
- uint16_t initiator;
- uint8_t distance[MAX_NODES];
-} NodeInfo;
-
-typedef struct NumaNodeMem {
- uint64_t node_mem;
- uint64_t node_plugged_mem;
-} NumaNodeMem;
-
-struct HMAT_LB_Data {
- uint8_t initiator;
- uint8_t target;
- uint64_t data;
-};
-typedef struct HMAT_LB_Data HMAT_LB_Data;
-
-struct HMAT_LB_Info {
- /* Indicates it's memory or the specified level memory side cache. */
- uint8_t hierarchy;
-
- /* Present the type of data, access/read/write latency or bandwidth. */
- uint8_t data_type;
-
- /* The range bitmap of bandwidth for calculating common base */
- uint64_t range_bitmap;
-
- /* The common base unit for latencies or bandwidths */
- uint64_t base;
-
- /* Array to store the latencies or bandwidths */
- GArray *list;
-};
-typedef struct HMAT_LB_Info HMAT_LB_Info;
-
-struct NumaState {
- /* Number of NUMA nodes */
- int num_nodes;
-
- /* Allow setting NUMA distance for different NUMA nodes */
- bool have_numa_distance;
-
- /* Detect if HMAT support is enabled. */
- bool hmat_enabled;
-
- /* NUMA nodes information */
- NodeInfo nodes[MAX_NODES];
-
- /* NUMA nodes HMAT Locality Latency and Bandwidth Information */
- HMAT_LB_Info *hmat_lb[HMAT_LB_LEVELS][HMAT_LB_TYPES];
-
- /* Memory Side Cache Information Structure */
- NumaHmatCacheOptions *hmat_cache[MAX_NODES][HMAT_LB_LEVELS];
-};
-typedef struct NumaState NumaState;
-
-void set_numa_options(MachineState *ms, NumaOptions *object, Error **errp);
-void parse_numa_opts(MachineState *ms);
-void parse_numa_hmat_lb(NumaState *numa_state, NumaHmatLBOptions *node,
- Error **errp);
-void parse_numa_hmat_cache(MachineState *ms, NumaHmatCacheOptions *node,
- Error **errp);
-void numa_complete_configuration(MachineState *ms);
-void query_numa_node_mem(NumaNodeMem node_mem[], MachineState *ms);
-extern QemuOptsList qemu_numa_opts;
-void numa_cpu_pre_plug(const struct CPUArchId *slot, DeviceState *dev,
- Error **errp);
-bool numa_uses_legacy_mem(void);
-
-#endif
diff --git a/include/sysemu/os-posix.h b/include/sysemu/os-posix.h
deleted file mode 100644
index b881ac6..0000000
--- a/include/sysemu/os-posix.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * posix specific declarations
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- * Copyright (c) 2010 Jes Sorensen <Jes.Sorensen@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifndef QEMU_OS_POSIX_H
-#define QEMU_OS_POSIX_H
-
-#include <sys/mman.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <arpa/inet.h>
-#include <netdb.h>
-#include <sys/un.h>
-
-#ifdef CONFIG_SYSMACROS
-#include <sys/sysmacros.h>
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void os_set_line_buffering(void);
-void os_setup_early_signal_handling(void);
-void os_set_proc_name(const char *s);
-void os_setup_signal_handling(void);
-int os_set_daemonize(bool d);
-bool is_daemonized(void);
-void os_daemonize(void);
-bool os_set_runas(const char *user_id);
-void os_set_chroot(const char *path);
-void os_setup_limits(void);
-void os_setup_post(void);
-int os_mlock(void);
-
-/**
- * qemu_alloc_stack:
- * @sz: pointer to a size_t holding the requested usable stack size
- *
- * Allocate memory that can be used as a stack, for instance for
- * coroutines. If the memory cannot be allocated, this function
- * will abort (like g_malloc()). This function also inserts an
- * additional guard page to catch a potential stack overflow.
- * Note that the memory required for the guard page and alignment
- * and minimal stack size restrictions will increase the value of sz.
- *
- * The allocated stack must be freed with qemu_free_stack().
- *
- * Returns: pointer to (the lowest address of) the stack memory.
- */
-void *qemu_alloc_stack(size_t *sz);
-
-/**
- * qemu_free_stack:
- * @stack: stack to free
- * @sz: size of stack in bytes
- *
- * Free a stack allocated via qemu_alloc_stack(). Note that sz must
- * be exactly the adjusted stack size returned by qemu_alloc_stack.
- */
-void qemu_free_stack(void *stack, size_t sz);
-
-/* POSIX and Mingw32 differ in the name of the stdio lock functions. */
-
-static inline void qemu_flockfile(FILE *f)
-{
- flockfile(f);
-}
-
-static inline void qemu_funlockfile(FILE *f)
-{
- funlockfile(f);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/include/sysemu/os-win32.h b/include/sysemu/os-win32.h
deleted file mode 100644
index b82a5d3..0000000
--- a/include/sysemu/os-win32.h
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * win32 specific declarations
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- * Copyright (c) 2010 Jes Sorensen <Jes.Sorensen@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifndef QEMU_OS_WIN32_H
-#define QEMU_OS_WIN32_H
-
-#include <winsock2.h>
-#include <windows.h>
-#include <ws2tcpip.h>
-#include "qemu/typedefs.h"
-
-#ifdef HAVE_AFUNIX_H
-#include <afunix.h>
-#else
-/*
- * Fallback definitions of things we need in afunix.h, if not available from
- * the used Windows SDK or MinGW headers.
- */
-#define UNIX_PATH_MAX 108
-
-typedef struct sockaddr_un {
- ADDRESS_FAMILY sun_family;
- char sun_path[UNIX_PATH_MAX];
-} SOCKADDR_UN, *PSOCKADDR_UN;
-
-#define SIO_AF_UNIX_GETPEERPID _WSAIOR(IOC_VENDOR, 256)
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if defined(__aarch64__)
-/*
- * On windows-arm64, setjmp is available in only one variant, and longjmp always
- * does stack unwinding. This crash with generated code.
- * Thus, we use another implementation of setjmp (not windows one), coming from
- * mingw, which never performs stack unwinding.
- */
-#undef setjmp
-#undef longjmp
-/*
- * These functions are not declared in setjmp.h because __aarch64__ defines
- * setjmp to _setjmpex instead. However, they are still defined in libmingwex.a,
- * which gets linked automatically.
- */
-int __mingw_setjmp(jmp_buf);
-void __attribute__((noreturn)) __mingw_longjmp(jmp_buf, int);
-#define setjmp(env) __mingw_setjmp(env)
-#define longjmp(env, val) __mingw_longjmp(env, val)
-#elif defined(_WIN64)
-/*
- * On windows-x64, setjmp is implemented by _setjmp which needs a second parameter.
- * If this parameter is NULL, longjump does no stack unwinding.
- * That is what we need for QEMU. Passing the value of register rsp (default)
- * lets longjmp try a stack unwinding which will crash with generated code.
- */
-# undef setjmp
-# define setjmp(env) _setjmp(env, NULL)
-#endif /* __aarch64__ */
-/* QEMU uses sigsetjmp()/siglongjmp() as the portable way to specify
- * "longjmp and don't touch the signal masks". Since we know that the
- * savemask parameter will always be zero we can safely define these
- * in terms of setjmp/longjmp on Win32.
- */
-#define sigjmp_buf jmp_buf
-#define sigsetjmp(env, savemask) setjmp(env)
-#define siglongjmp(env, val) longjmp(env, val)
-
-/* Missing POSIX functions. Don't use MinGW-w64 macros. */
-#ifndef _POSIX_THREAD_SAFE_FUNCTIONS
-#undef gmtime_r
-struct tm *gmtime_r(const time_t *timep, struct tm *result);
-#undef localtime_r
-struct tm *localtime_r(const time_t *timep, struct tm *result);
-#endif /* _POSIX_THREAD_SAFE_FUNCTIONS */
-
-static inline void os_setup_signal_handling(void) {}
-static inline void os_daemonize(void) {}
-static inline void os_setup_post(void) {}
-static inline void os_set_proc_name(const char *dummy) {}
-void os_set_line_buffering(void);
-void os_setup_early_signal_handling(void);
-
-int getpagesize(void);
-
-#if !defined(EPROTONOSUPPORT)
-# define EPROTONOSUPPORT EINVAL
-#endif
-
-static inline int os_set_daemonize(bool d)
-{
- if (d) {
- return -ENOTSUP;
- }
- return 0;
-}
-
-static inline bool is_daemonized(void)
-{
- return false;
-}
-
-static inline int os_mlock(void)
-{
- return -ENOSYS;
-}
-
-static inline void os_setup_limits(void)
-{
- return;
-}
-
-#define fsync _commit
-
-#if !defined(lseek)
-# define lseek _lseeki64
-#endif
-
-int qemu_ftruncate64(int, int64_t);
-
-#if !defined(ftruncate)
-# define ftruncate qemu_ftruncate64
-#endif
-
-static inline char *realpath(const char *path, char *resolved_path)
-{
- _fullpath(resolved_path, path, _MAX_PATH);
- return resolved_path;
-}
-
-/*
- * Older versions of MinGW do not import _lock_file and _unlock_file properly.
- * This was fixed for v6.0.0 with commit b48e3ac8969d.
- */
-static inline void qemu_flockfile(FILE *f)
-{
-#ifdef HAVE__LOCK_FILE
- _lock_file(f);
-#endif
-}
-
-static inline void qemu_funlockfile(FILE *f)
-{
-#ifdef HAVE__LOCK_FILE
- _unlock_file(f);
-#endif
-}
-
-/* Helper for WSAEventSelect, to report errors */
-bool qemu_socket_select(int sockfd, WSAEVENT hEventObject,
- long lNetworkEvents, Error **errp);
-
-bool qemu_socket_unselect(int sockfd, Error **errp);
-
-/* We wrap all the sockets functions so that we can set errno based on
- * WSAGetLastError(), and use file-descriptors instead of SOCKET.
- */
-
-/*
- * qemu_close_socket_osfhandle:
- * @fd: a file descriptor associated with a SOCKET
- *
- * Close only the C run-time file descriptor, leave the SOCKET opened.
- *
- * Returns zero on success. On error, -1 is returned, and errno is set to
- * indicate the error.
- */
-int qemu_close_socket_osfhandle(int fd);
-
-#undef close
-#define close qemu_close_wrap
-int qemu_close_wrap(int fd);
-
-#undef connect
-#define connect qemu_connect_wrap
-int qemu_connect_wrap(int sockfd, const struct sockaddr *addr,
- socklen_t addrlen);
-
-#undef listen
-#define listen qemu_listen_wrap
-int qemu_listen_wrap(int sockfd, int backlog);
-
-#undef bind
-#define bind qemu_bind_wrap
-int qemu_bind_wrap(int sockfd, const struct sockaddr *addr,
- socklen_t addrlen);
-
-#undef socket
-#define socket qemu_socket_wrap
-int qemu_socket_wrap(int domain, int type, int protocol);
-
-#undef accept
-#define accept qemu_accept_wrap
-int qemu_accept_wrap(int sockfd, struct sockaddr *addr,
- socklen_t *addrlen);
-
-#undef shutdown
-#define shutdown qemu_shutdown_wrap
-int qemu_shutdown_wrap(int sockfd, int how);
-
-#undef ioctlsocket
-#define ioctlsocket qemu_ioctlsocket_wrap
-int qemu_ioctlsocket_wrap(int fd, int req, void *val);
-
-#undef getsockopt
-#define getsockopt qemu_getsockopt_wrap
-int qemu_getsockopt_wrap(int sockfd, int level, int optname,
- void *optval, socklen_t *optlen);
-
-#undef setsockopt
-#define setsockopt qemu_setsockopt_wrap
-int qemu_setsockopt_wrap(int sockfd, int level, int optname,
- const void *optval, socklen_t optlen);
-
-#undef getpeername
-#define getpeername qemu_getpeername_wrap
-int qemu_getpeername_wrap(int sockfd, struct sockaddr *addr,
- socklen_t *addrlen);
-
-#undef getsockname
-#define getsockname qemu_getsockname_wrap
-int qemu_getsockname_wrap(int sockfd, struct sockaddr *addr,
- socklen_t *addrlen);
-
-#undef send
-#define send qemu_send_wrap
-ssize_t qemu_send_wrap(int sockfd, const void *buf, size_t len, int flags);
-
-#undef sendto
-#define sendto qemu_sendto_wrap
-ssize_t qemu_sendto_wrap(int sockfd, const void *buf, size_t len, int flags,
- const struct sockaddr *addr, socklen_t addrlen);
-
-#undef recv
-#define recv qemu_recv_wrap
-ssize_t qemu_recv_wrap(int sockfd, void *buf, size_t len, int flags);
-
-#undef recvfrom
-#define recvfrom qemu_recvfrom_wrap
-ssize_t qemu_recvfrom_wrap(int sockfd, void *buf, size_t len, int flags,
- struct sockaddr *addr, socklen_t *addrlen);
-
-EXCEPTION_DISPOSITION
-win32_close_exception_handler(struct _EXCEPTION_RECORD*, void*,
- struct _CONTEXT*, void*);
-
-void *qemu_win32_map_alloc(size_t size, HANDLE *h, Error **errp);
-void qemu_win32_map_free(void *ptr, HANDLE h, Error **errp);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/include/sysemu/qtest.h b/include/sysemu/qtest.h
deleted file mode 100644
index c161d75..0000000
--- a/include/sysemu/qtest.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Test Server
- *
- * Copyright IBM, Corp. 2011
- *
- * Authors:
- * Anthony Liguori <aliguori@us.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifndef QTEST_H
-#define QTEST_H
-
-#include "chardev/char.h"
-
-extern bool qtest_allowed;
-
-static inline bool qtest_enabled(void)
-{
- return qtest_allowed;
-}
-
-#ifndef CONFIG_USER_ONLY
-void qtest_send_prefix(CharBackend *chr);
-void G_GNUC_PRINTF(2, 3) qtest_sendf(CharBackend *chr, const char *fmt, ...);
-void qtest_set_command_cb(bool (*pc_cb)(CharBackend *chr, gchar **words));
-bool qtest_driver(void);
-
-void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error **errp);
-
-void qtest_server_set_send_handler(void (*send)(void *, const char *),
- void *opaque);
-void qtest_server_inproc_recv(void *opaque, const char *buf);
-#endif
-
-#endif
diff --git a/include/sysemu/replay.h b/include/sysemu/replay.h
deleted file mode 100644
index f229b21..0000000
--- a/include/sysemu/replay.h
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * QEMU replay (system interface)
- *
- * Copyright (c) 2010-2015 Institute for System Programming
- * of the Russian Academy of Sciences.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-#ifndef SYSEMU_REPLAY_H
-#define SYSEMU_REPLAY_H
-
-#ifdef CONFIG_USER_ONLY
-#error Cannot include this header from user emulation
-#endif
-
-#include "exec/replay-core.h"
-#include "qapi/qapi-types-misc.h"
-#include "qapi/qapi-types-run-state.h"
-#include "qapi/qapi-types-ui.h"
-#include "block/aio.h"
-
-/* replay clock kinds */
-enum ReplayClockKind {
- /* host_clock */
- REPLAY_CLOCK_HOST,
- /* virtual_rt_clock */
- REPLAY_CLOCK_VIRTUAL_RT,
- REPLAY_CLOCK_COUNT
-};
-typedef enum ReplayClockKind ReplayClockKind;
-
-/* IDs of the checkpoints */
-enum ReplayCheckpoint {
- CHECKPOINT_CLOCK_WARP_START,
- CHECKPOINT_CLOCK_WARP_ACCOUNT,
- CHECKPOINT_RESET_REQUESTED,
- CHECKPOINT_SUSPEND_REQUESTED,
- CHECKPOINT_CLOCK_VIRTUAL,
- CHECKPOINT_CLOCK_HOST,
- CHECKPOINT_CLOCK_VIRTUAL_RT,
- CHECKPOINT_INIT,
- CHECKPOINT_RESET,
- CHECKPOINT_COUNT
-};
-typedef enum ReplayCheckpoint ReplayCheckpoint;
-
-typedef struct ReplayNetState ReplayNetState;
-
-/* Name of the initial VM snapshot */
-extern char *replay_snapshot;
-
-/* Replay locking
- *
- * The locks are needed to protect the shared structures and log file
- * when doing record/replay. They also are the main sync-point between
- * the main-loop thread and the vCPU thread. This was a role
- * previously filled by the BQL which has been busy trying to reduce
- * its impact across the code. This ensures blocks of events stay
- * sequential and reproducible.
- */
-
-void replay_mutex_lock(void);
-void replay_mutex_unlock(void);
-
-/* Processing the instructions */
-
-/*! Returns number of executed instructions. */
-uint64_t replay_get_current_icount(void);
-/*! Returns number of instructions to execute in replay mode. */
-int replay_get_instructions(void);
-/*! Updates instructions counter in replay mode. */
-void replay_account_executed_instructions(void);
-
-/**
- * replay_can_wait: check if we should pause for wait-io
- */
-bool replay_can_wait(void);
-
-/* Processing clocks and other time sources */
-
-/*! Save the specified clock */
-int64_t replay_save_clock(ReplayClockKind kind, int64_t clock,
- int64_t raw_icount);
-/*! Read the specified clock from the log or return cached data */
-int64_t replay_read_clock(ReplayClockKind kind, int64_t raw_icount);
-/*! Saves or reads the clock depending on the current replay mode. */
-#define REPLAY_CLOCK(clock, value) \
- !icount_enabled() ? (value) : \
- (replay_mode == REPLAY_MODE_PLAY \
- ? replay_read_clock((clock), icount_get_raw()) \
- : replay_mode == REPLAY_MODE_RECORD \
- ? replay_save_clock((clock), (value), icount_get_raw()) \
- : (value))
-#define REPLAY_CLOCK_LOCKED(clock, value) \
- !icount_enabled() ? (value) : \
- (replay_mode == REPLAY_MODE_PLAY \
- ? replay_read_clock((clock), icount_get_raw_locked()) \
- : replay_mode == REPLAY_MODE_RECORD \
- ? replay_save_clock((clock), (value), icount_get_raw_locked()) \
- : (value))
-
-/* Events */
-
-/*! Called when qemu shutdown is requested. */
-void replay_shutdown_request(ShutdownCause cause);
-/*! Should be called at check points in the execution.
- These check points are skipped, if they were not met.
- Saves checkpoint in the SAVE mode and validates in the PLAY mode.
- Returns 0 in PLAY mode if checkpoint was not found.
- Returns 1 in all other cases. */
-bool replay_checkpoint(ReplayCheckpoint checkpoint);
-/*! Used to determine that checkpoint or async event is pending.
- Does not proceed to the next event in the log. */
-bool replay_has_event(void);
-/*
- * Processes the async events added to the queue (while recording)
- * or reads the events from the file (while replaying).
- */
-void replay_async_events(void);
-
-/* Asynchronous events queue */
-
-/*! Disables storing events in the queue */
-void replay_disable_events(void);
-/*! Enables storing events in the queue */
-void replay_enable_events(void);
-/*! Returns true when saving events is enabled */
-bool replay_events_enabled(void);
-/* Flushes events queue */
-void replay_flush_events(void);
-/*! Adds bottom half event to the queue */
-void replay_bh_schedule_event(QEMUBH *bh);
-/* Adds oneshot bottom half event to the queue */
-void replay_bh_schedule_oneshot_event(AioContext *ctx,
- QEMUBHFunc *cb, void *opaque);
-/*! Adds input event to the queue */
-void replay_input_event(QemuConsole *src, InputEvent *evt);
-/*! Adds input sync event to the queue */
-void replay_input_sync_event(void);
-/*! Adds block layer event to the queue */
-void replay_block_event(QEMUBH *bh, uint64_t id);
-/*! Returns ID for the next block event */
-uint64_t blkreplay_next_id(void);
-
-/* Character device */
-
-/*! Registers char driver to save it's events */
-void replay_register_char_driver(struct Chardev *chr);
-/*! Saves write to char device event to the log */
-void replay_chr_be_write(struct Chardev *s, const uint8_t *buf, int len);
-/*! Writes char write return value to the replay log. */
-void replay_char_write_event_save(int res, int offset);
-/*! Reads char write return value from the replay log. */
-void replay_char_write_event_load(int *res, int *offset);
-/*! Reads information about read_all character event. */
-int replay_char_read_all_load(uint8_t *buf);
-/*! Writes character read_all error code into the replay log. */
-void replay_char_read_all_save_error(int res);
-/*! Writes character read_all execution result into the replay log. */
-void replay_char_read_all_save_buf(uint8_t *buf, int offset);
-
-/* Network */
-
-/*! Registers replay network filter attached to some backend. */
-ReplayNetState *replay_register_net(NetFilterState *nfs);
-/*! Unregisters replay network filter. */
-void replay_unregister_net(ReplayNetState *rns);
-/*! Called to write network packet to the replay log. */
-void replay_net_packet_event(ReplayNetState *rns, unsigned flags,
- const struct iovec *iov, int iovcnt);
-
-/* Audio */
-
-/*! Saves/restores number of played samples of audio out operation. */
-void replay_audio_out(size_t *played);
-/*! Saves/restores recorded samples of audio in operation. */
-void replay_audio_in(size_t *recorded, void *samples, size_t *wpos, size_t size);
-
-/* VM state operations */
-
-/*! Called at the start of execution.
- Loads or saves initial vmstate depending on execution mode. */
-void replay_vmstate_init(void);
-/*! Called to ensure that replay state is consistent and VM snapshot
- can be created */
-bool replay_can_snapshot(void);
-
-#endif
diff --git a/include/sysemu/reset.h b/include/sysemu/reset.h
deleted file mode 100644
index ae43604..0000000
--- a/include/sysemu/reset.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Reset handlers.
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- * Copyright (c) 2016 Red Hat, Inc.
- * Copyright (c) 2024 Linaro, Ltd.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifndef QEMU_SYSEMU_RESET_H
-#define QEMU_SYSEMU_RESET_H
-
-#include "qapi/qapi-events-run-state.h"
-
-typedef void QEMUResetHandler(void *opaque);
-
-/**
- * qemu_register_resettable: Register an object to be reset
- * @obj: object to be reset: it must implement the Resettable interface
- *
- * Register @obj on the list of objects which will be reset when the
- * simulation is reset. These objects will be reset in the order
- * they were added, using the three-phase Resettable protocol,
- * so first all objects go through the enter phase, then all objects
- * go through the hold phase, and then finally all go through the
- * exit phase.
- *
- * It is not permitted to register or unregister reset functions or
- * resettable objects from within any of the reset phase methods of @obj.
- *
- * We assume that the caller holds the BQL.
- */
-void qemu_register_resettable(Object *obj);
-
-/**
- * qemu_unregister_resettable: Unregister an object to be reset
- * @obj: object to unregister
- *
- * Remove @obj from the list of objects which are reset when the
- * simulation is reset. It must have been previously added to
- * the list via qemu_register_resettable().
- *
- * We assume that the caller holds the BQL.
- */
-void qemu_unregister_resettable(Object *obj);
-
-/**
- * qemu_register_reset: Register a callback for system reset
- * @func: function to call
- * @opaque: opaque data to pass to @func
- *
- * Register @func on the list of functions which are called when the
- * entire system is reset. Functions registered with this API and
- * Resettable objects registered with qemu_register_resettable() are
- * handled together, in the order in which they were registered.
- * Functions registered with this API are called in the 'hold' phase
- * of the 3-phase reset.
- *
- * In general this function should not be used in new code where possible;
- * for instance, device model reset is better accomplished using the
- * methods on DeviceState.
- *
- * It is not permitted to register or unregister reset functions or
- * resettable objects from within the @func callback.
- *
- * We assume that the caller holds the BQL.
- */
-void qemu_register_reset(QEMUResetHandler *func, void *opaque);
-
-/**
- * qemu_register_reset_nosnapshotload: Register a callback for system reset
- * @func: function to call
- * @opaque: opaque data to pass to @func
- *
- * This is the same as qemu_register_reset(), except that @func is
- * not called if the reason that the system is being reset is to
- * put it into a clean state prior to loading a snapshot (i.e. for
- * SHUTDOWN_CAUSE_SNAPSHOT_LOAD).
- */
-void qemu_register_reset_nosnapshotload(QEMUResetHandler *func, void *opaque);
-
-/**
- * qemu_unregister_reset: Unregister a system reset callback
- * @func: function registered with qemu_register_reset()
- * @opaque: the same opaque data that was passed to qemu_register_reset()
- *
- * Undo the effects of a qemu_register_reset(). The @func and @opaque
- * must both match the arguments originally used with qemu_register_reset().
- *
- * We assume that the caller holds the BQL.
- */
-void qemu_unregister_reset(QEMUResetHandler *func, void *opaque);
-
-/**
- * qemu_devices_reset: Perform a complete system reset
- * @reason: reason for the reset
- *
- * This function performs the low-level work needed to do a complete reset
- * of the system (calling all the callbacks registered with
- * qemu_register_reset() and resetting all the Resettable objects registered
- * with qemu_register_resettable()). It should only be called by the code in a
- * MachineClass reset method.
- *
- * If you want to trigger a system reset from, for instance, a device
- * model, don't use this function. Use qemu_system_reset_request().
- */
-void qemu_devices_reset(ShutdownCause reason);
-
-#endif
diff --git a/include/sysemu/rtc.h b/include/sysemu/rtc.h
deleted file mode 100644
index 0fc8ad6..0000000
--- a/include/sysemu/rtc.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * RTC configuration and clock read
- *
- * Copyright (c) 2003-2021 QEMU contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifndef SYSEMU_RTC_H
-#define SYSEMU_RTC_H
-
-/**
- * qemu_get_timedate: Get the current RTC time
- * @tm: struct tm to fill in with RTC time
- * @offset: offset in seconds to adjust the RTC time by before
- * converting to struct tm format.
- *
- * This function fills in @tm with the current RTC time, as adjusted
- * by @offset (for example, if @offset is 3600 then the returned time/date
- * will be one hour further ahead than the current RTC time).
- *
- * The usual use is by RTC device models, which should call this function
- * to find the time/date value that they should return to the guest
- * when it reads the RTC registers.
- *
- * The behaviour of the clock whose value this function returns will
- * depend on the -rtc command line option passed by the user.
- */
-void qemu_get_timedate(struct tm *tm, time_t offset);
-
-/**
- * qemu_timedate_diff: Return difference between a struct tm and the RTC
- * @tm: struct tm containing the date/time to compare against
- *
- * Returns the difference in seconds between the RTC clock time
- * and the date/time specified in @tm. For example, if @tm specifies
- * a timestamp one hour further ahead than the current RTC time
- * then this function will return 3600.
- */
-time_t qemu_timedate_diff(struct tm *tm);
-
-#endif
diff --git a/include/sysemu/runstate.h b/include/sysemu/runstate.h
deleted file mode 100644
index e210a37..0000000
--- a/include/sysemu/runstate.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef SYSEMU_RUNSTATE_H
-#define SYSEMU_RUNSTATE_H
-
-#include "qapi/qapi-types-run-state.h"
-#include "qemu/notify.h"
-
-bool runstate_check(RunState state);
-void runstate_set(RunState new_state);
-RunState runstate_get(void);
-bool runstate_is_running(void);
-bool runstate_needs_reset(void);
-
-typedef void VMChangeStateHandler(void *opaque, bool running, RunState state);
-
-VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb,
- void *opaque);
-VMChangeStateEntry *qemu_add_vm_change_state_handler_prio(
- VMChangeStateHandler *cb, void *opaque, int priority);
-VMChangeStateEntry *
-qemu_add_vm_change_state_handler_prio_full(VMChangeStateHandler *cb,
- VMChangeStateHandler *prepare_cb,
- void *opaque, int priority);
-VMChangeStateEntry *qdev_add_vm_change_state_handler(DeviceState *dev,
- VMChangeStateHandler *cb,
- void *opaque);
-VMChangeStateEntry *qdev_add_vm_change_state_handler_full(
- DeviceState *dev, VMChangeStateHandler *cb,
- VMChangeStateHandler *prepare_cb, void *opaque);
-void qemu_del_vm_change_state_handler(VMChangeStateEntry *e);
-/**
- * vm_state_notify: Notify the state of the VM
- *
- * @running: whether the VM is running or not.
- * @state: the #RunState of the VM.
- */
-void vm_state_notify(bool running, RunState state);
-
-static inline bool shutdown_caused_by_guest(ShutdownCause cause)
-{
- return cause >= SHUTDOWN_CAUSE_GUEST_SHUTDOWN;
-}
-
-/*
- * In a "live" state, the vcpu clock is ticking, and the runstate notifiers
- * think we are running.
- */
-static inline bool runstate_is_live(RunState state)
-{
- return state == RUN_STATE_RUNNING || state == RUN_STATE_SUSPENDED;
-}
-
-void vm_start(void);
-
-/**
- * vm_prepare_start: Prepare for starting/resuming the VM
- *
- * @step_pending: whether any of the CPUs is about to be single-stepped by gdb
- */
-int vm_prepare_start(bool step_pending);
-
-/**
- * vm_resume: If @state is a live state, start the vm and set the state,
- * else just set the state.
- *
- * @state: the state to restore
- */
-void vm_resume(RunState state);
-
-int vm_stop(RunState state);
-int vm_stop_force_state(RunState state);
-int vm_shutdown(void);
-void vm_set_suspended(bool suspended);
-bool vm_get_suspended(void);
-
-typedef enum WakeupReason {
- /* Always keep QEMU_WAKEUP_REASON_NONE = 0 */
- QEMU_WAKEUP_REASON_NONE = 0,
- QEMU_WAKEUP_REASON_RTC,
- QEMU_WAKEUP_REASON_PMTIMER,
- QEMU_WAKEUP_REASON_OTHER,
-} WakeupReason;
-
-void qemu_system_reset_request(ShutdownCause reason);
-void qemu_system_suspend_request(void);
-void qemu_register_suspend_notifier(Notifier *notifier);
-bool qemu_wakeup_suspend_enabled(void);
-void qemu_system_wakeup_request(WakeupReason reason, Error **errp);
-void qemu_system_wakeup_enable(WakeupReason reason, bool enabled);
-void qemu_register_wakeup_notifier(Notifier *notifier);
-void qemu_register_wakeup_support(void);
-void qemu_system_shutdown_request_with_code(ShutdownCause reason,
- int exit_code);
-void qemu_system_shutdown_request(ShutdownCause reason);
-void qemu_system_powerdown_request(void);
-void qemu_register_powerdown_notifier(Notifier *notifier);
-void qemu_register_shutdown_notifier(Notifier *notifier);
-void qemu_system_debug_request(void);
-void qemu_system_vmstop_request(RunState reason);
-void qemu_system_vmstop_request_prepare(void);
-bool qemu_vmstop_requested(RunState *r);
-ShutdownCause qemu_shutdown_requested_get(void);
-ShutdownCause qemu_reset_requested_get(void);
-void qemu_system_killed(int signal, pid_t pid);
-void qemu_system_reset(ShutdownCause reason);
-void qemu_system_guest_panicked(GuestPanicInformation *info);
-void qemu_system_guest_crashloaded(GuestPanicInformation *info);
-void qemu_system_guest_pvshutdown(void);
-bool qemu_system_dump_in_progress(void);
-
-#endif
-
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
deleted file mode 100644
index 5b4397e..0000000
--- a/include/sysemu/sysemu.h
+++ /dev/null
@@ -1,115 +0,0 @@
-#ifndef SYSEMU_H
-#define SYSEMU_H
-/* Misc. things related to the system emulator. */
-
-#include "qemu/timer.h"
-#include "qemu/notify.h"
-#include "qemu/uuid.h"
-
-/* vl.c */
-
-extern int only_migratable;
-extern const char *qemu_name;
-extern QemuUUID qemu_uuid;
-extern bool qemu_uuid_set;
-
-const char *qemu_get_vm_name(void);
-
-void qemu_add_exit_notifier(Notifier *notify);
-void qemu_remove_exit_notifier(Notifier *notify);
-
-void qemu_add_machine_init_done_notifier(Notifier *notify);
-void qemu_remove_machine_init_done_notifier(Notifier *notify);
-
-void configure_rtc(QemuOpts *opts);
-
-void qemu_init_subsystems(void);
-
-extern int autostart;
-
-typedef enum {
- VGA_NONE, VGA_STD, VGA_CIRRUS, VGA_VMWARE, VGA_XENFB, VGA_QXL,
- VGA_TCX, VGA_CG3, VGA_DEVICE, VGA_VIRTIO,
- VGA_TYPE_MAX,
-} VGAInterfaceType;
-
-extern int vga_interface_type;
-extern bool vga_interface_created;
-
-extern int graphic_width;
-extern int graphic_height;
-extern int graphic_depth;
-extern int display_opengl;
-extern const char *keyboard_layout;
-extern int graphic_rotate;
-extern int old_param;
-extern uint8_t *boot_splash_filedata;
-extern bool enable_mlock;
-extern bool enable_cpu_pm;
-extern QEMUClockType rtc_clock;
-
-#define MAX_OPTION_ROMS 16
-typedef struct QEMUOptionRom {
- const char *name;
- int32_t bootindex;
-} QEMUOptionRom;
-extern QEMUOptionRom option_rom[MAX_OPTION_ROMS];
-extern int nb_option_roms;
-
-#define MAX_PROM_ENVS 128
-extern const char *prom_envs[MAX_PROM_ENVS];
-extern unsigned int nb_prom_envs;
-
-/* serial ports */
-
-/* Return the Chardev for serial port i, or NULL if none */
-Chardev *serial_hd(int i);
-
-/* parallel ports */
-
-#define MAX_PARALLEL_PORTS 3
-
-extern Chardev *parallel_hds[MAX_PARALLEL_PORTS];
-
-void add_boot_device_path(int32_t bootindex, DeviceState *dev,
- const char *suffix);
-char *get_boot_devices_list(size_t *size);
-
-DeviceState *get_boot_device(uint32_t position);
-void check_boot_index(int32_t bootindex, Error **errp);
-void del_boot_device_path(DeviceState *dev, const char *suffix);
-void device_add_bootindex_property(Object *obj, int32_t *bootindex,
- const char *name, const char *suffix,
- DeviceState *dev);
-void restore_boot_order(void *opaque);
-void validate_bootdevices(const char *devices, Error **errp);
-void add_boot_device_lchs(DeviceState *dev, const char *suffix,
- uint32_t lcyls, uint32_t lheads, uint32_t lsecs);
-void del_boot_device_lchs(DeviceState *dev, const char *suffix);
-char *get_boot_devices_lchs_list(size_t *size);
-
-/* handler to set the boot_device order for a specific type of MachineClass */
-typedef void QEMUBootSetHandler(void *opaque, const char *boot_order,
- Error **errp);
-void qemu_register_boot_set(QEMUBootSetHandler *func, void *opaque);
-void qemu_boot_set(const char *boot_order, Error **errp);
-
-bool defaults_enabled(void);
-
-void qemu_init(int argc, char **argv);
-int qemu_main_loop(void);
-void qemu_cleanup(int);
-
-extern QemuOptsList qemu_legacy_drive_opts;
-extern QemuOptsList qemu_common_drive_opts;
-extern QemuOptsList qemu_drive_opts;
-extern QemuOptsList bdrv_runtime_opts;
-extern QemuOptsList qemu_chardev_opts;
-extern QemuOptsList qemu_device_opts;
-extern QemuOptsList qemu_netdev_opts;
-extern QemuOptsList qemu_nic_opts;
-extern QemuOptsList qemu_net_opts;
-extern QemuOptsList qemu_global_opts;
-extern QemuOptsList qemu_semihosting_config_opts;
-
-#endif
diff --git a/include/sysemu/tcg.h b/include/sysemu/tcg.h
deleted file mode 100644
index 5e2ca9a..0000000
--- a/include/sysemu/tcg.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * QEMU TCG support
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-/* header to be included in non-TCG-specific code */
-
-#ifndef SYSEMU_TCG_H
-#define SYSEMU_TCG_H
-
-#ifdef CONFIG_TCG
-extern bool tcg_allowed;
-#define tcg_enabled() (tcg_allowed)
-#else
-#define tcg_enabled() 0
-#endif
-
-#endif
diff --git a/include/sysemu/tpm_backend.h b/include/sysemu/tpm_backend.h
deleted file mode 100644
index 7fabafe..0000000
--- a/include/sysemu/tpm_backend.h
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * QEMU TPM Backend
- *
- * Copyright IBM, Corp. 2013
- *
- * Authors:
- * Stefan Berger <stefanb@us.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef TPM_BACKEND_H
-#define TPM_BACKEND_H
-
-#include "qom/object.h"
-#include "qemu/option.h"
-#include "sysemu/tpm.h"
-#include "qapi/error.h"
-
-#ifdef CONFIG_TPM
-
-#define TYPE_TPM_BACKEND "tpm-backend"
-OBJECT_DECLARE_TYPE(TPMBackend, TPMBackendClass,
- TPM_BACKEND)
-
-
-typedef struct TPMBackendCmd {
- uint8_t locty;
- const uint8_t *in;
- uint32_t in_len;
- uint8_t *out;
- uint32_t out_len;
- bool selftest_done;
-} TPMBackendCmd;
-
-struct TPMBackend {
- Object parent;
-
- /*< protected >*/
- TPMIf *tpmif;
- bool opened;
- bool had_startup_error;
- TPMBackendCmd *cmd;
-
- /* <public> */
- char *id;
-
- QLIST_ENTRY(TPMBackend) list;
-};
-
-struct TPMBackendClass {
- ObjectClass parent_class;
-
- enum TpmType type;
- const QemuOptDesc *opts;
- /* get a descriptive text of the backend to display to the user */
- const char *desc;
-
- TPMBackend *(*create)(QemuOpts *opts);
-
- /* start up the TPM on the backend - optional */
- int (*startup_tpm)(TPMBackend *t, size_t buffersize);
-
- /* optional */
- void (*reset)(TPMBackend *t);
-
- void (*cancel_cmd)(TPMBackend *t);
-
- /* optional */
- bool (*get_tpm_established_flag)(TPMBackend *t);
-
- /* optional */
- int (*reset_tpm_established_flag)(TPMBackend *t, uint8_t locty);
-
- TPMVersion (*get_tpm_version)(TPMBackend *t);
-
- size_t (*get_buffer_size)(TPMBackend *t);
-
- TpmTypeOptions *(*get_tpm_options)(TPMBackend *t);
-
- void (*handle_request)(TPMBackend *s, TPMBackendCmd *cmd, Error **errp);
-};
-
-/**
- * tpm_backend_get_type:
- * @s: the backend
- *
- * Returns the TpmType of the backend.
- */
-enum TpmType tpm_backend_get_type(TPMBackend *s);
-
-/**
- * tpm_backend_init:
- * @s: the backend to initialized
- * @tpmif: TPM interface
- * @datacb: callback for sending data to frontend
- * @errp: a pointer to return the #Error object if an error occurs.
- *
- * Initialize the backend with the given variables.
- *
- * Returns 0 on success.
- */
-int tpm_backend_init(TPMBackend *s, TPMIf *tpmif, Error **errp);
-
-/**
- * tpm_backend_startup_tpm:
- * @s: the backend whose TPM support is to be started
- * @buffersize: the buffer size the TPM is supposed to use,
- * 0 to leave it as-is
- *
- * Returns 0 on success.
- */
-int tpm_backend_startup_tpm(TPMBackend *s, size_t buffersize);
-
-/**
- * tpm_backend_had_startup_error:
- * @s: the backend to query for a startup error
- *
- * Check whether the backend had an error during startup. Returns
- * false if no error occurred and the backend can be used, true
- * otherwise.
- */
-bool tpm_backend_had_startup_error(TPMBackend *s);
-
-/**
- * tpm_backend_deliver_request:
- * @s: the backend to send the request to
- * @cmd: the command to deliver
- *
- * Send a request to the backend. The backend will then send the request
- * to the TPM implementation.
- */
-void tpm_backend_deliver_request(TPMBackend *s, TPMBackendCmd *cmd);
-
-/**
- * tpm_backend_reset:
- * @s: the backend to reset
- *
- * Reset the backend into a well defined state with all previous errors
- * reset.
- */
-void tpm_backend_reset(TPMBackend *s);
-
-/**
- * tpm_backend_cancel_cmd:
- * @s: the backend
- *
- * Cancel any ongoing command being processed by the TPM implementation
- * on behalf of the QEMU guest.
- */
-void tpm_backend_cancel_cmd(TPMBackend *s);
-
-/**
- * tpm_backend_get_tpm_established_flag:
- * @s: the backend
- *
- * Get the TPM establishment flag. This function may be called very
- * frequently by the frontend since for example in the TIS implementation
- * this flag is part of a register.
- */
-bool tpm_backend_get_tpm_established_flag(TPMBackend *s);
-
-/**
- * tpm_backend_reset_tpm_established_flag:
- * @s: the backend
- * @locty: the locality number
- *
- * Reset the TPM establishment flag.
- */
-int tpm_backend_reset_tpm_established_flag(TPMBackend *s, uint8_t locty);
-
-/**
- * tpm_backend_get_tpm_version:
- * @s: the backend to call into
- *
- * Get the TPM Version that is emulated at the backend.
- *
- * Returns TPMVersion.
- */
-TPMVersion tpm_backend_get_tpm_version(TPMBackend *s);
-
-/**
- * tpm_backend_get_buffer_size:
- * @s: the backend to call into
- *
- * Get the TPM's buffer size.
- *
- * Returns buffer size.
- */
-size_t tpm_backend_get_buffer_size(TPMBackend *s);
-
-/**
- * tpm_backend_finish_sync:
- * @s: the backend to call into
- *
- * Finish the pending command synchronously (this will call aio_poll()
- * on qemu main AIOContext until it ends)
- */
-void tpm_backend_finish_sync(TPMBackend *s);
-
-/**
- * tpm_backend_query_tpm:
- * @s: the backend
- *
- * Query backend tpm info
- *
- * Returns newly allocated TPMInfo
- */
-TPMInfo *tpm_backend_query_tpm(TPMBackend *s);
-
-TPMBackend *qemu_find_tpm_be(const char *id);
-
-#endif /* CONFIG_TPM */
-
-#endif /* TPM_BACKEND_H */
diff --git a/include/sysemu/tpm_util.h b/include/sysemu/tpm_util.h
deleted file mode 100644
index 08f0517..0000000
--- a/include/sysemu/tpm_util.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * TPM utility functions
- *
- * Copyright (c) 2010 - 2015 IBM Corporation
- * Authors:
- * Stefan Berger <stefanb@us.ibm.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>
- */
-
-#ifndef SYSEMU_TPM_UTIL_H
-#define SYSEMU_TPM_UTIL_H
-
-#include "sysemu/tpm.h"
-#include "qemu/bswap.h"
-
-void tpm_util_write_fatal_error_response(uint8_t *out, uint32_t out_len);
-
-bool tpm_util_is_selftest(const uint8_t *in, uint32_t in_len);
-
-int tpm_util_test_tpmdev(int tpm_fd, TPMVersion *tpm_version);
-
-static inline uint16_t tpm_cmd_get_tag(const void *b)
-{
- return lduw_be_p(b);
-}
-
-static inline void tpm_cmd_set_tag(void *b, uint16_t tag)
-{
- stw_be_p(b, tag);
-}
-
-static inline uint32_t tpm_cmd_get_size(const void *b)
-{
- return ldl_be_p(b + 2);
-}
-
-static inline void tpm_cmd_set_size(void *b, uint32_t size)
-{
- stl_be_p(b + 2, size);
-}
-
-static inline uint32_t tpm_cmd_get_ordinal(const void *b)
-{
- return ldl_be_p(b + 6);
-}
-
-static inline uint32_t tpm_cmd_get_errcode(const void *b)
-{
- return ldl_be_p(b + 6);
-}
-
-static inline void tpm_cmd_set_error(void *b, uint32_t error)
-{
- stl_be_p(b + 6, error);
-}
-
-void tpm_util_show_buffer(const unsigned char *buffer,
- size_t buffer_size, const char *string);
-
-#endif /* SYSEMU_TPM_UTIL_H */
diff --git a/include/sysemu/vhost-user-backend.h b/include/sysemu/vhost-user-backend.h
deleted file mode 100644
index 327b0b8..0000000
--- a/include/sysemu/vhost-user-backend.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * QEMU vhost-user backend
- *
- * Copyright (C) 2018 Red Hat Inc
- *
- * Authors:
- * Marc-AndrƩ Lureau <marcandre.lureau@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-#ifndef QEMU_VHOST_USER_BACKEND_H
-#define QEMU_VHOST_USER_BACKEND_H
-
-#include "qom/object.h"
-#include "exec/memory.h"
-#include "qemu/option.h"
-#include "qemu/bitmap.h"
-#include "hw/virtio/vhost.h"
-#include "hw/virtio/vhost-user.h"
-#include "chardev/char-fe.h"
-#include "io/channel.h"
-
-#define TYPE_VHOST_USER_BACKEND "vhost-user-backend"
-OBJECT_DECLARE_SIMPLE_TYPE(VhostUserBackend,
- VHOST_USER_BACKEND)
-
-
-
-struct VhostUserBackend {
- /* private */
- Object parent;
-
- char *chr_name;
- CharBackend chr;
- VhostUserState vhost_user;
- struct vhost_dev dev;
- VirtIODevice *vdev;
- bool started;
- bool completed;
-};
-
-int vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev,
- unsigned nvqs, Error **errp);
-void vhost_user_backend_start(VhostUserBackend *b);
-void vhost_user_backend_stop(VhostUserBackend *b);
-
-#endif
diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h
deleted file mode 100644
index b5e3ea1..0000000
--- a/include/sysemu/xen-mapcache.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2011 Citrix Ltd.
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- */
-
-#ifndef XEN_MAPCACHE_H
-#define XEN_MAPCACHE_H
-
-#include "exec/cpu-common.h"
-#include "sysemu/xen.h"
-
-typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr phys_offset,
- ram_addr_t size);
-#ifdef CONFIG_XEN_IS_POSSIBLE
-
-void xen_map_cache_init(phys_offset_to_gaddr_t f,
- void *opaque);
-uint8_t *xen_map_cache(MemoryRegion *mr, hwaddr phys_addr, hwaddr size,
- ram_addr_t ram_addr_offset,
- uint8_t lock, bool dma,
- bool is_write);
-ram_addr_t xen_ram_addr_from_mapcache(void *ptr);
-void xen_invalidate_map_cache_entry(uint8_t *buffer);
-void xen_invalidate_map_cache(void);
-uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr,
- hwaddr new_phys_addr,
- hwaddr size);
-#else
-
-static inline void xen_map_cache_init(phys_offset_to_gaddr_t f,
- void *opaque)
-{
-}
-
-static inline uint8_t *xen_map_cache(MemoryRegion *mr,
- hwaddr phys_addr,
- hwaddr size,
- ram_addr_t ram_addr_offset,
- uint8_t lock,
- bool dma,
- bool is_write)
-{
- abort();
-}
-
-static inline ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
-{
- abort();
-}
-
-static inline void xen_invalidate_map_cache_entry(uint8_t *buffer)
-{
-}
-
-static inline void xen_invalidate_map_cache(void)
-{
-}
-
-static inline uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr,
- hwaddr new_phys_addr,
- hwaddr size)
-{
- abort();
-}
-
-#endif
-
-#endif /* XEN_MAPCACHE_H */
diff --git a/include/sysemu/xen.h b/include/sysemu/xen.h
deleted file mode 100644
index d70eacf..0000000
--- a/include/sysemu/xen.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * QEMU Xen support
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-/* header to be included in non-Xen-specific code */
-
-#ifndef SYSEMU_XEN_H
-#define SYSEMU_XEN_H
-
-#ifdef CONFIG_USER_ONLY
-#error Cannot include sysemu/xen.h from user emulation
-#endif
-
-#include "exec/cpu-common.h"
-
-#ifdef COMPILING_PER_TARGET
-# ifdef CONFIG_XEN
-# define CONFIG_XEN_IS_POSSIBLE
-# endif
-#else
-# define CONFIG_XEN_IS_POSSIBLE
-#endif /* COMPILING_PER_TARGET */
-
-#ifdef CONFIG_XEN_IS_POSSIBLE
-
-extern bool xen_allowed;
-
-#define xen_enabled() (xen_allowed)
-
-void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length);
-void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
- struct MemoryRegion *mr, Error **errp);
-
-#else /* !CONFIG_XEN_IS_POSSIBLE */
-
-#define xen_enabled() 0
-static inline void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
-{
- /* nothing */
-}
-static inline void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
- MemoryRegion *mr, Error **errp)
-{
- g_assert_not_reached();
-}
-
-#endif /* CONFIG_XEN_IS_POSSIBLE */
-
-bool xen_mr_is_memory(MemoryRegion *mr);
-bool xen_mr_is_grants(MemoryRegion *mr);
-#endif
diff --git a/include/system/accel-blocker.h b/include/system/accel-blocker.h
new file mode 100644
index 0000000..e10099d
--- /dev/null
+++ b/include/system/accel-blocker.h
@@ -0,0 +1,55 @@
+/*
+ * Accelerator blocking API, to prevent new ioctls from starting and wait the
+ * running ones finish.
+ * This mechanism differs from pause/resume_all_vcpus() in that it does not
+ * release the BQL.
+ *
+ * Copyright (c) 2022 Red Hat Inc.
+ *
+ * Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef ACCEL_BLOCKER_H
+#define ACCEL_BLOCKER_H
+
+#include "system/cpus.h"
+
+void accel_blocker_init(void);
+
+/*
+ * accel_{cpu_}ioctl_begin/end:
+ * Mark when ioctl is about to run or just finished.
+ *
+ * accel_{cpu_}ioctl_begin will block after accel_ioctl_inhibit_begin() is
+ * called, preventing new ioctls to run. They will continue only after
+ * accel_ioctl_inibith_end().
+ */
+void accel_ioctl_begin(void);
+void accel_ioctl_end(void);
+void accel_cpu_ioctl_begin(CPUState *cpu);
+void accel_cpu_ioctl_end(CPUState *cpu);
+
+/*
+ * accel_ioctl_inhibit_begin: start critical section
+ *
+ * This function makes sure that:
+ * 1) incoming accel_{cpu_}ioctl_begin() calls block
+ * 2) wait that all ioctls that were already running reach
+ * accel_{cpu_}ioctl_end(), kicking vcpus if necessary.
+ *
+ * This allows the caller to access shared data or perform operations without
+ * worrying of concurrent vcpus accesses.
+ */
+void accel_ioctl_inhibit_begin(void);
+
+/*
+ * accel_ioctl_inhibit_end: end critical section started by
+ * accel_ioctl_inhibit_begin()
+ *
+ * This function allows blocked accel_{cpu_}ioctl_begin() to continue.
+ */
+void accel_ioctl_inhibit_end(void);
+
+#endif /* ACCEL_BLOCKER_H */
diff --git a/include/system/accel-ops.h b/include/system/accel-ops.h
new file mode 100644
index 0000000..4c99d25
--- /dev/null
+++ b/include/system/accel-ops.h
@@ -0,0 +1,73 @@
+/*
+ * Accelerator OPS, used for cpus.c module
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef ACCEL_OPS_H
+#define ACCEL_OPS_H
+
+#include "exec/vaddr.h"
+#include "qom/object.h"
+
+#define ACCEL_OPS_SUFFIX "-ops"
+#define TYPE_ACCEL_OPS "accel" ACCEL_OPS_SUFFIX
+#define ACCEL_OPS_NAME(name) (name "-" TYPE_ACCEL_OPS)
+
+DECLARE_CLASS_CHECKERS(AccelOpsClass, ACCEL_OPS, TYPE_ACCEL_OPS)
+
+/**
+ * struct AccelOpsClass - accelerator interfaces
+ *
+ * This structure is used to abstract accelerator differences from the
+ * core CPU code. Not all have to be implemented.
+ */
+struct AccelOpsClass {
+ /*< private >*/
+ ObjectClass parent_class;
+ /*< public >*/
+
+ /* initialization function called when accel is chosen */
+ void (*ops_init)(AccelOpsClass *ops);
+
+ bool (*cpus_are_resettable)(void);
+ void (*cpu_reset_hold)(CPUState *cpu);
+
+ void (*create_vcpu_thread)(CPUState *cpu); /* MANDATORY NON-NULL */
+ void (*kick_vcpu_thread)(CPUState *cpu);
+ bool (*cpu_thread_is_idle)(CPUState *cpu);
+
+ void (*synchronize_post_reset)(CPUState *cpu);
+ void (*synchronize_post_init)(CPUState *cpu);
+ void (*synchronize_state)(CPUState *cpu);
+ void (*synchronize_pre_loadvm)(CPUState *cpu);
+ void (*synchronize_pre_resume)(bool step_pending);
+
+ void (*handle_interrupt)(CPUState *cpu, int mask);
+
+ /**
+ * @get_virtual_clock: fetch virtual clock
+ * @set_virtual_clock: set virtual clock
+ *
+ * These allow the timer subsystem to defer to the accelerator to
+ * fetch time. The set function is needed if the accelerator wants
+ * to track the changes to time as the timer is warped through
+ * various timer events.
+ */
+ int64_t (*get_virtual_clock)(void);
+ void (*set_virtual_clock)(int64_t time);
+
+ int64_t (*get_elapsed_ticks)(void);
+
+ /* gdbstub hooks */
+ bool (*supports_guest_debug)(void);
+ int (*update_guest_debug)(CPUState *cpu);
+ int (*insert_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len);
+ int (*remove_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len);
+ void (*remove_all_breakpoints)(CPUState *cpu);
+};
+
+#endif /* ACCEL_OPS_H */
diff --git a/include/system/address-spaces.h b/include/system/address-spaces.h
new file mode 100644
index 0000000..72d17af
--- /dev/null
+++ b/include/system/address-spaces.h
@@ -0,0 +1,35 @@
+/*
+ * Internal memory management interfaces
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef SYSTEM_ADDRESS_SPACES_H
+#define SYSTEM_ADDRESS_SPACES_H
+
+/*
+ * Internal interfaces between memory.c/exec.c/vl.c. Do not #include unless
+ * you're one of them.
+ */
+
+/* Get the root memory region. This interface should only be used temporarily
+ * until a proper bus interface is available.
+ */
+MemoryRegion *get_system_memory(void);
+
+/* Get the root I/O port region. This interface should only be used
+ * temporarily until a proper bus interface is available.
+ */
+MemoryRegion *get_system_io(void);
+
+extern AddressSpace address_space_memory;
+extern AddressSpace address_space_io;
+
+#endif
diff --git a/include/system/arch_init.h b/include/system/arch_init.h
new file mode 100644
index 0000000..51e24c3
--- /dev/null
+++ b/include/system/arch_init.h
@@ -0,0 +1,30 @@
+#ifndef QEMU_ARCH_INIT_H
+#define QEMU_ARCH_INIT_H
+
+
+enum {
+ QEMU_ARCH_ALL = -1,
+ QEMU_ARCH_ALPHA = (1 << 0),
+ QEMU_ARCH_ARM = (1 << 1),
+ QEMU_ARCH_I386 = (1 << 3),
+ QEMU_ARCH_M68K = (1 << 4),
+ QEMU_ARCH_MICROBLAZE = (1 << 6),
+ QEMU_ARCH_MIPS = (1 << 7),
+ QEMU_ARCH_PPC = (1 << 8),
+ QEMU_ARCH_S390X = (1 << 9),
+ QEMU_ARCH_SH4 = (1 << 10),
+ QEMU_ARCH_SPARC = (1 << 11),
+ QEMU_ARCH_XTENSA = (1 << 12),
+ QEMU_ARCH_OPENRISC = (1 << 13),
+ QEMU_ARCH_TRICORE = (1 << 16),
+ QEMU_ARCH_HPPA = (1 << 18),
+ QEMU_ARCH_RISCV = (1 << 19),
+ QEMU_ARCH_RX = (1 << 20),
+ QEMU_ARCH_AVR = (1 << 21),
+ QEMU_ARCH_HEXAGON = (1 << 22),
+ QEMU_ARCH_LOONGARCH = (1 << 23),
+};
+
+bool qemu_arch_available(unsigned qemu_arch_mask);
+
+#endif
diff --git a/include/sysemu/balloon.h b/include/system/balloon.h
index 867687b..867687b 100644
--- a/include/sysemu/balloon.h
+++ b/include/system/balloon.h
diff --git a/include/sysemu/block-backend-common.h b/include/system/block-backend-common.h
index 780cea7..780cea7 100644
--- a/include/sysemu/block-backend-common.h
+++ b/include/system/block-backend-common.h
diff --git a/include/system/block-backend-global-state.h b/include/system/block-backend-global-state.h
new file mode 100644
index 0000000..35b5e83
--- /dev/null
+++ b/include/system/block-backend-global-state.h
@@ -0,0 +1,124 @@
+/*
+ * QEMU Block backends
+ *
+ * Copyright (C) 2014-2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>,
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1
+ * or later. See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef BLOCK_BACKEND_GLOBAL_STATE_H
+#define BLOCK_BACKEND_GLOBAL_STATE_H
+
+#include "block-backend-common.h"
+
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
+BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm);
+
+BlockBackend * no_coroutine_fn
+blk_new_with_bs(BlockDriverState *bs, uint64_t perm, uint64_t shared_perm,
+ Error **errp);
+
+BlockBackend * coroutine_fn no_co_wrapper
+blk_co_new_with_bs(BlockDriverState *bs, uint64_t perm, uint64_t shared_perm,
+ Error **errp);
+
+BlockBackend * no_coroutine_fn
+blk_new_open(const char *filename, const char *reference, QDict *options,
+ int flags, Error **errp);
+
+BlockBackend * coroutine_fn no_co_wrapper
+blk_co_new_open(const char *filename, const char *reference, QDict *options,
+ int flags, Error **errp);
+
+int blk_get_refcnt(BlockBackend *blk);
+void blk_ref(BlockBackend *blk);
+
+void no_coroutine_fn blk_unref(BlockBackend *blk);
+void coroutine_fn no_co_wrapper blk_co_unref(BlockBackend *blk);
+
+void blk_remove_all_bs(void);
+BlockBackend *blk_by_name(const char *name);
+BlockBackend *blk_next(BlockBackend *blk);
+BlockBackend *blk_all_next(BlockBackend *blk);
+bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp);
+void monitor_remove_blk(BlockBackend *blk);
+
+BlockBackendPublic *blk_get_public(BlockBackend *blk);
+
+void blk_remove_bs(BlockBackend *blk);
+int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp);
+int blk_replace_bs(BlockBackend *blk, BlockDriverState *new_bs, Error **errp);
+bool GRAPH_RDLOCK bdrv_has_blk(BlockDriverState *bs);
+bool GRAPH_RDLOCK bdrv_is_root_node(BlockDriverState *bs);
+int GRAPH_UNLOCKED blk_set_perm(BlockBackend *blk, uint64_t perm,
+ uint64_t shared_perm, Error **errp);
+void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm);
+
+void blk_iostatus_enable(BlockBackend *blk);
+BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk);
+void blk_iostatus_reset(BlockBackend *blk);
+int blk_attach_dev(BlockBackend *blk, DeviceState *dev);
+void blk_detach_dev(BlockBackend *blk, DeviceState *dev);
+DeviceState *blk_get_attached_dev(BlockBackend *blk);
+BlockBackend *blk_by_dev(void *dev);
+BlockBackend *blk_by_qdev_id(const char *id, Error **errp);
+void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque);
+
+int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags);
+void blk_aio_cancel(BlockAIOCB *acb);
+int blk_commit_all(void);
+bool blk_in_drain(BlockBackend *blk);
+void blk_drain(BlockBackend *blk);
+void blk_drain_all(void);
+void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
+ BlockdevOnError on_write_error);
+bool blk_supports_write_perm(BlockBackend *blk);
+bool blk_is_sg(BlockBackend *blk);
+void blk_set_enable_write_cache(BlockBackend *blk, bool wce);
+int blk_get_flags(BlockBackend *blk);
+int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
+ Error **errp);
+void blk_add_aio_context_notifier(BlockBackend *blk,
+ void (*attached_aio_context)(AioContext *new_context, void *opaque),
+ void (*detach_aio_context)(void *opaque), void *opaque);
+void blk_remove_aio_context_notifier(BlockBackend *blk,
+ void (*attached_aio_context)(AioContext *,
+ void *),
+ void (*detach_aio_context)(void *),
+ void *opaque);
+void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify);
+BlockBackendRootState *blk_get_root_state(BlockBackend *blk);
+void blk_update_root_state(BlockBackend *blk);
+bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk);
+int blk_get_open_flags_from_root_state(BlockBackend *blk);
+
+int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
+ int64_t pos, int size);
+int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size);
+int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz);
+int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo);
+
+void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg);
+void blk_io_limits_disable(BlockBackend *blk);
+void blk_io_limits_enable(BlockBackend *blk, const char *group);
+void blk_io_limits_update_group(BlockBackend *blk, const char *group);
+void blk_set_force_allow_inactivate(BlockBackend *blk);
+
+bool blk_register_buf(BlockBackend *blk, void *host, size_t size, Error **errp);
+void blk_unregister_buf(BlockBackend *blk, void *host, size_t size);
+
+const BdrvChild *blk_root(BlockBackend *blk);
+
+int blk_make_empty(BlockBackend *blk, Error **errp);
+
+#endif /* BLOCK_BACKEND_GLOBAL_STATE_H */
diff --git a/include/system/block-backend-io.h b/include/system/block-backend-io.h
new file mode 100644
index 0000000..ba8dfcc
--- /dev/null
+++ b/include/system/block-backend-io.h
@@ -0,0 +1,237 @@
+/*
+ * QEMU Block backends
+ *
+ * Copyright (C) 2014-2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>,
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1
+ * or later. See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef BLOCK_BACKEND_IO_H
+#define BLOCK_BACKEND_IO_H
+
+#include "block-backend-common.h"
+#include "block/accounting.h"
+
+/*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
+const char *blk_name(const BlockBackend *blk);
+
+BlockDriverState *blk_bs(BlockBackend *blk);
+
+void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow);
+void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow);
+void blk_set_disable_request_queuing(BlockBackend *blk, bool disable);
+bool blk_iostatus_is_enabled(const BlockBackend *blk);
+
+/*
+ * Return the qdev ID, or if no ID is assigned the QOM path,
+ * of the block device attached to the BlockBackend.
+ *
+ * The caller is responsible for releasing the value returned
+ * with g_free() after use.
+ */
+char *blk_get_attached_dev_id(BlockBackend *blk);
+
+BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
+ int64_t bytes, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque);
+
+BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
+ QEMUIOVector *qiov, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
+ QEMUIOVector *qiov, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_flush(BlockBackend *blk,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_zone_report(BlockBackend *blk, int64_t offset,
+ unsigned int *nr_zones,
+ BlockZoneDescriptor *zones,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
+ int64_t offset, int64_t len,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_zone_append(BlockBackend *blk, int64_t *offset,
+ QEMUIOVector *qiov, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes,
+ BlockCompletionFunc *cb, void *opaque);
+void blk_aio_cancel_async(BlockAIOCB *acb);
+BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
+ BlockCompletionFunc *cb, void *opaque);
+
+void blk_inc_in_flight(BlockBackend *blk);
+void blk_dec_in_flight(BlockBackend *blk);
+
+bool coroutine_fn GRAPH_RDLOCK blk_co_is_inserted(BlockBackend *blk);
+bool co_wrapper_mixed_bdrv_rdlock blk_is_inserted(BlockBackend *blk);
+
+bool coroutine_fn GRAPH_RDLOCK blk_co_is_available(BlockBackend *blk);
+bool co_wrapper_mixed_bdrv_rdlock blk_is_available(BlockBackend *blk);
+
+void coroutine_fn blk_co_lock_medium(BlockBackend *blk, bool locked);
+void co_wrapper blk_lock_medium(BlockBackend *blk, bool locked);
+
+void coroutine_fn blk_co_eject(BlockBackend *blk, bool eject_flag);
+void co_wrapper blk_eject(BlockBackend *blk, bool eject_flag);
+
+int64_t coroutine_fn blk_co_getlength(BlockBackend *blk);
+int64_t co_wrapper_mixed blk_getlength(BlockBackend *blk);
+
+void coroutine_fn blk_co_get_geometry(BlockBackend *blk,
+ uint64_t *nb_sectors_ptr);
+void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr);
+
+int64_t coroutine_fn blk_co_nb_sectors(BlockBackend *blk);
+int64_t blk_nb_sectors(BlockBackend *blk);
+
+void *blk_try_blockalign(BlockBackend *blk, size_t size);
+void *blk_blockalign(BlockBackend *blk, size_t size);
+bool blk_is_writable(BlockBackend *blk);
+bool blk_enable_write_cache(BlockBackend *blk);
+BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read);
+BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
+ int error);
+void blk_error_action(BlockBackend *blk, BlockErrorAction action,
+ bool is_read, int error);
+void blk_iostatus_set_err(BlockBackend *blk, int error);
+int blk_get_max_iov(BlockBackend *blk);
+int blk_get_max_hw_iov(BlockBackend *blk);
+
+AioContext *blk_get_aio_context(BlockBackend *blk);
+BlockAcctStats *blk_get_stats(BlockBackend *blk);
+void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
+ BlockCompletionFunc *cb,
+ void *opaque, int ret);
+
+uint32_t blk_get_request_alignment(BlockBackend *blk);
+uint32_t blk_get_max_transfer(BlockBackend *blk);
+uint64_t blk_get_max_hw_transfer(BlockBackend *blk);
+
+int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
+ BlockBackend *blk_out, int64_t off_out,
+ int64_t bytes, BdrvRequestFlags read_flags,
+ BdrvRequestFlags write_flags);
+
+int coroutine_fn blk_co_block_status_above(BlockBackend *blk,
+ BlockDriverState *base,
+ int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map,
+ BlockDriverState **file);
+int coroutine_fn blk_co_is_allocated_above(BlockBackend *blk,
+ BlockDriverState *base,
+ bool include_base, int64_t offset,
+ int64_t bytes, int64_t *pnum);
+
+/*
+ * "I/O or GS" API functions. These functions can run without
+ * the BQL, but only in one specific iothread/main loop.
+ *
+ * See include/block/block-io.h for more information about
+ * the "I/O or GS" API.
+ */
+
+int co_wrapper_mixed blk_pread(BlockBackend *blk, int64_t offset,
+ int64_t bytes, void *buf,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pread(BlockBackend *blk, int64_t offset, int64_t bytes,
+ void *buf, BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_preadv(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_preadv_part(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ size_t qiov_offset,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_preadv_part(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ size_t qiov_offset, BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pwrite(BlockBackend *blk, int64_t offset,
+ int64_t bytes, const void *buf,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pwrite(BlockBackend *blk, int64_t offset, int64_t bytes,
+ const void *buf, BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pwritev(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pwritev_part(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ size_t qiov_offset,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset,
+ int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset,
+ BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pwrite_compressed(BlockBackend *blk,
+ int64_t offset, int64_t bytes,
+ const void *buf);
+int coroutine_fn blk_co_pwrite_compressed(BlockBackend *blk, int64_t offset,
+ int64_t bytes, const void *buf);
+
+int co_wrapper_mixed blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
+ int64_t bytes,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
+ int64_t bytes, BdrvRequestFlags flags);
+
+int coroutine_fn blk_co_zone_report(BlockBackend *blk, int64_t offset,
+ unsigned int *nr_zones,
+ BlockZoneDescriptor *zones);
+int co_wrapper_mixed blk_zone_report(BlockBackend *blk, int64_t offset,
+ unsigned int *nr_zones,
+ BlockZoneDescriptor *zones);
+int coroutine_fn blk_co_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
+ int64_t offset, int64_t len);
+int co_wrapper_mixed blk_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
+ int64_t offset, int64_t len);
+int coroutine_fn blk_co_zone_append(BlockBackend *blk, int64_t *offset,
+ QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+int co_wrapper_mixed blk_zone_append(BlockBackend *blk, int64_t *offset,
+ QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pdiscard(BlockBackend *blk, int64_t offset,
+ int64_t bytes);
+int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset,
+ int64_t bytes);
+
+int co_wrapper_mixed blk_flush(BlockBackend *blk);
+int coroutine_fn blk_co_flush(BlockBackend *blk);
+
+int co_wrapper_mixed blk_ioctl(BlockBackend *blk, unsigned long int req,
+ void *buf);
+int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req,
+ void *buf);
+
+int co_wrapper_mixed blk_truncate(BlockBackend *blk, int64_t offset,
+ bool exact, PreallocMode prealloc,
+ BdrvRequestFlags flags, Error **errp);
+int coroutine_fn blk_co_truncate(BlockBackend *blk, int64_t offset, bool exact,
+ PreallocMode prealloc, BdrvRequestFlags flags,
+ Error **errp);
+
+#endif /* BLOCK_BACKEND_IO_H */
diff --git a/include/sysemu/block-backend.h b/include/system/block-backend.h
index 038be9f..038be9f 100644
--- a/include/sysemu/block-backend.h
+++ b/include/system/block-backend.h
diff --git a/include/sysemu/block-ram-registrar.h b/include/system/block-ram-registrar.h
index d8b2f79..d8b2f79 100644
--- a/include/sysemu/block-ram-registrar.h
+++ b/include/system/block-ram-registrar.h
diff --git a/include/sysemu/blockdev.h b/include/system/blockdev.h
index 3211b16..3211b16 100644
--- a/include/sysemu/blockdev.h
+++ b/include/system/blockdev.h
diff --git a/include/system/confidential-guest-support.h b/include/system/confidential-guest-support.h
new file mode 100644
index 0000000..ea46b50
--- /dev/null
+++ b/include/system/confidential-guest-support.h
@@ -0,0 +1,95 @@
+/*
+ * QEMU Confidential Guest support
+ * This interface describes the common pieces between various
+ * schemes for protecting guest memory or other state against a
+ * compromised hypervisor. This includes memory encryption (AMD's
+ * SEV and Intel's MKTME) or special protection modes (PEF on POWER,
+ * or PV on s390x).
+ *
+ * Copyright Red Hat.
+ *
+ * Authors:
+ * David Gibson <david@gibson.dropbear.id.au>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef QEMU_CONFIDENTIAL_GUEST_SUPPORT_H
+#define QEMU_CONFIDENTIAL_GUEST_SUPPORT_H
+
+#include "qom/object.h"
+
+#define TYPE_CONFIDENTIAL_GUEST_SUPPORT "confidential-guest-support"
+OBJECT_DECLARE_TYPE(ConfidentialGuestSupport,
+ ConfidentialGuestSupportClass,
+ CONFIDENTIAL_GUEST_SUPPORT)
+
+
+struct ConfidentialGuestSupport {
+ Object parent;
+
+ /*
+ * True if the machine should use guest_memfd for RAM.
+ */
+ bool require_guest_memfd;
+
+ /*
+ * ready: flag set by CGS initialization code once it's ready to
+ * start executing instructions in a potentially-secure
+ * guest
+ *
+ * The definition here is a bit fuzzy, because this is essentially
+ * part of a self-sanity-check, rather than a strict mechanism.
+ *
+ * It's not feasible to have a single point in the common machine
+ * init path to configure confidential guest support, because
+ * different mechanisms have different interdependencies requiring
+ * initialization in different places, often in arch or machine
+ * type specific code. It's also usually not possible to check
+ * for invalid configurations until that initialization code.
+ * That means it would be very easy to have a bug allowing CGS
+ * init to be bypassed entirely in certain configurations.
+ *
+ * Silently ignoring a requested security feature would be bad, so
+ * to avoid that we check late in init that this 'ready' flag is
+ * set if CGS was requested. If the CGS init hasn't happened, and
+ * so 'ready' is not set, we'll abort.
+ */
+ bool ready;
+};
+
+typedef struct ConfidentialGuestSupportClass {
+ ObjectClass parent;
+
+ int (*kvm_init)(ConfidentialGuestSupport *cgs, Error **errp);
+ int (*kvm_reset)(ConfidentialGuestSupport *cgs, Error **errp);
+} ConfidentialGuestSupportClass;
+
+static inline int confidential_guest_kvm_init(ConfidentialGuestSupport *cgs,
+ Error **errp)
+{
+ ConfidentialGuestSupportClass *klass;
+
+ klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs);
+ if (klass->kvm_init) {
+ return klass->kvm_init(cgs, errp);
+ }
+
+ return 0;
+}
+
+static inline int confidential_guest_kvm_reset(ConfidentialGuestSupport *cgs,
+ Error **errp)
+{
+ ConfidentialGuestSupportClass *klass;
+
+ klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs);
+ if (klass->kvm_reset) {
+ return klass->kvm_reset(cgs, errp);
+ }
+
+ return 0;
+}
+
+#endif /* QEMU_CONFIDENTIAL_GUEST_SUPPORT_H */
diff --git a/include/system/cpu-throttle.h b/include/system/cpu-throttle.h
new file mode 100644
index 0000000..44bf6a5
--- /dev/null
+++ b/include/system/cpu-throttle.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ */
+
+#ifndef SYSTEM_CPU_THROTTLE_H
+#define SYSTEM_CPU_THROTTLE_H
+
+#include "qemu/timer.h"
+
+/**
+ * cpu_throttle_init:
+ *
+ * Initialize the CPU throttling API.
+ */
+void cpu_throttle_init(void);
+
+/**
+ * cpu_throttle_set:
+ * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
+ *
+ * Throttles all vcpus by forcing them to sleep for the given percentage of
+ * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
+ * (example: 10ms sleep for every 30ms awake).
+ *
+ * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
+ * Once the throttling starts, it will remain in effect until cpu_throttle_stop
+ * is called.
+ */
+void cpu_throttle_set(int new_throttle_pct);
+
+/**
+ * cpu_throttle_stop:
+ *
+ * Stops the vcpu throttling started by cpu_throttle_set.
+ */
+void cpu_throttle_stop(void);
+
+/**
+ * cpu_throttle_active:
+ *
+ * Returns: %true if the vcpus are currently being throttled, %false otherwise.
+ */
+bool cpu_throttle_active(void);
+
+/**
+ * cpu_throttle_get_percentage:
+ *
+ * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
+ *
+ * Returns: The throttle percentage in range 1 to 99.
+ */
+int cpu_throttle_get_percentage(void);
+
+/**
+ * cpu_throttle_dirty_sync_timer_tick:
+ *
+ * Dirty sync timer hook.
+ */
+void cpu_throttle_dirty_sync_timer_tick(void *opaque);
+
+/**
+ * cpu_throttle_dirty_sync_timer:
+ *
+ * Start or stop the dirty sync timer.
+ */
+void cpu_throttle_dirty_sync_timer(bool enable);
+
+#endif /* SYSTEM_CPU_THROTTLE_H */
diff --git a/include/sysemu/cpu-timers-internal.h b/include/system/cpu-timers-internal.h
index 94bb739..94bb739 100644
--- a/include/sysemu/cpu-timers-internal.h
+++ b/include/system/cpu-timers-internal.h
diff --git a/include/system/cpu-timers.h b/include/system/cpu-timers.h
new file mode 100644
index 0000000..a1abed0
--- /dev/null
+++ b/include/system/cpu-timers.h
@@ -0,0 +1,46 @@
+/*
+ * CPU timers state API
+ *
+ * Copyright 2020 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef SYSTEM_CPU_TIMERS_H
+#define SYSTEM_CPU_TIMERS_H
+
+#include "qemu/timer.h"
+
+/* init the whole cpu timers API, including icount, ticks, and cpu_throttle */
+void cpu_timers_init(void);
+
+/*
+ * CPU Ticks and Clock
+ */
+
+/* Caller must hold BQL */
+void cpu_enable_ticks(void);
+/* Caller must hold BQL */
+void cpu_disable_ticks(void);
+
+/*
+ * return the time elapsed in VM between vm_start and vm_stop.
+ * cpu_get_ticks() uses units of the host CPU cycle counter.
+ */
+int64_t cpu_get_ticks(void);
+
+/*
+ * Returns the monotonic time elapsed in VM, i.e.,
+ * the time between vm_start and vm_stop
+ */
+int64_t cpu_get_clock(void);
+
+void qemu_timer_notify_cb(void *opaque, QEMUClockType type);
+
+/* get/set VIRTUAL clock and VM elapsed ticks via the cpus accel interface */
+int64_t cpus_get_virtual_clock(void);
+void cpus_set_virtual_clock(int64_t new_time);
+int64_t cpus_get_elapsed_ticks(void);
+
+#endif /* SYSTEM_CPU_TIMERS_H */
diff --git a/include/system/cpus.h b/include/system/cpus.h
new file mode 100644
index 0000000..3226c76
--- /dev/null
+++ b/include/system/cpus.h
@@ -0,0 +1,49 @@
+#ifndef QEMU_CPUS_H
+#define QEMU_CPUS_H
+
+/* register accel-specific operations */
+void cpus_register_accel(const AccelOpsClass *i);
+
+/* return registers ops */
+const AccelOpsClass *cpus_get_accel(void);
+
+/* accel/dummy-cpus.c */
+
+/* Create a dummy vcpu for AccelOpsClass->create_vcpu_thread */
+void dummy_start_vcpu_thread(CPUState *);
+
+/* interface available for cpus accelerator threads */
+
+/* For temporary buffers for forming a name */
+#define VCPU_THREAD_NAME_SIZE 16
+
+void cpus_kick_thread(CPUState *cpu);
+bool cpu_work_list_empty(CPUState *cpu);
+bool cpu_thread_is_idle(CPUState *cpu);
+bool all_cpu_threads_idle(void);
+bool cpu_can_run(CPUState *cpu);
+void qemu_wait_io_event_common(CPUState *cpu);
+void qemu_wait_io_event(CPUState *cpu);
+void cpu_thread_signal_created(CPUState *cpu);
+void cpu_thread_signal_destroyed(CPUState *cpu);
+void cpu_handle_guest_debug(CPUState *cpu);
+
+/* end interface for cpus accelerator threads */
+
+bool qemu_in_vcpu_thread(void);
+void qemu_init_cpu_loop(void);
+void resume_all_vcpus(void);
+void pause_all_vcpus(void);
+void cpu_stop_current(void);
+
+/* Unblock cpu */
+void qemu_cpu_kick_self(void);
+
+bool cpus_are_resettable(void);
+
+void cpu_synchronize_all_states(void);
+void cpu_synchronize_all_post_reset(void);
+void cpu_synchronize_all_post_init(void);
+void cpu_synchronize_all_pre_loadvm(void);
+
+#endif
diff --git a/include/system/cryptodev-vhost-user.h b/include/system/cryptodev-vhost-user.h
new file mode 100644
index 0000000..5138c14
--- /dev/null
+++ b/include/system/cryptodev-vhost-user.h
@@ -0,0 +1,50 @@
+/*
+ * QEMU Crypto Device Common Vhost User Implement
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@huawei.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef CRYPTODEV_VHOST_USER_H
+#define CRYPTODEV_VHOST_USER_H
+
+#include "system/cryptodev-vhost.h"
+
+#define VHOST_USER_MAX_AUTH_KEY_LEN 512
+#define VHOST_USER_MAX_CIPHER_KEY_LEN 64
+
+
+/**
+ * cryptodev_vhost_user_get_vhost:
+ * @cc: the client object for each queue
+ * @b: the cryptodev backend common vhost object
+ * @queue: the queue index
+ *
+ * Gets a new cryptodev backend common vhost object based on
+ * @b and @queue
+ *
+ * Returns: the cryptodev backend common vhost object
+ */
+CryptoDevBackendVhost *
+cryptodev_vhost_user_get_vhost(
+ CryptoDevBackendClient *cc,
+ CryptoDevBackend *b,
+ uint16_t queue);
+
+#endif /* CRYPTODEV_VHOST_USER_H */
diff --git a/include/system/cryptodev-vhost.h b/include/system/cryptodev-vhost.h
new file mode 100644
index 0000000..b0bb09e
--- /dev/null
+++ b/include/system/cryptodev-vhost.h
@@ -0,0 +1,153 @@
+/*
+ * QEMU Crypto Device Common Vhost Implement
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@huawei.com>
+ * Jay Zhou <jianjay.zhou@huawei.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#ifndef CRYPTODEV_VHOST_H
+#define CRYPTODEV_VHOST_H
+
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-backend.h"
+#include "chardev/char.h"
+
+#include "system/cryptodev.h"
+
+
+typedef struct CryptoDevBackendVhostOptions {
+ VhostBackendType backend_type;
+ void *opaque;
+ int total_queues;
+ CryptoDevBackendClient *cc;
+} CryptoDevBackendVhostOptions;
+
+typedef struct CryptoDevBackendVhost {
+ struct vhost_dev dev;
+ struct vhost_virtqueue vqs[1];
+ int backend;
+ CryptoDevBackendClient *cc;
+} CryptoDevBackendVhost;
+
+/**
+ * cryptodev_vhost_get_max_queues:
+ * @crypto: the cryptodev backend common vhost object
+ *
+ * Get the maximum queue number of @crypto.
+ *
+ *
+ * Returns: the maximum queue number
+ */
+uint64_t
+cryptodev_vhost_get_max_queues(
+ CryptoDevBackendVhost *crypto);
+
+
+/**
+ * cryptodev_vhost_init:
+ * @options: the common vhost object's option
+ *
+ * Creates a new cryptodev backend common vhost object
+ *
+ ** The returned object must be released with
+ * cryptodev_vhost_cleanup() when no
+ * longer required
+ *
+ * Returns: the cryptodev backend common vhost object
+ */
+struct CryptoDevBackendVhost *
+cryptodev_vhost_init(
+ CryptoDevBackendVhostOptions *options);
+
+/**
+ * cryptodev_vhost_cleanup:
+ * @crypto: the cryptodev backend common vhost object
+ *
+ * Clean the resource associated with @crypto that realizaed
+ * by cryptodev_vhost_init()
+ *
+ */
+void cryptodev_vhost_cleanup(
+ CryptoDevBackendVhost *crypto);
+
+/**
+ * cryptodev_get_vhost:
+ * @cc: the client object for each queue
+ * @b: the cryptodev backend common vhost object
+ * @queue: the cryptodev backend queue index
+ *
+ * Gets a new cryptodev backend common vhost object based on
+ * @b and @queue
+ *
+ * Returns: the cryptodev backend common vhost object
+ */
+CryptoDevBackendVhost *
+cryptodev_get_vhost(CryptoDevBackendClient *cc,
+ CryptoDevBackend *b,
+ uint16_t queue);
+/**
+ * cryptodev_vhost_start:
+ * @dev: the virtio crypto object
+ * @total_queues: the total count of queue
+ *
+ * Starts the vhost crypto logic
+ *
+ * Returns: 0 for success, negative for errors
+ */
+int cryptodev_vhost_start(VirtIODevice *dev, int total_queues);
+
+/**
+ * cryptodev_vhost_stop:
+ * @dev: the virtio crypto object
+ * @total_queues: the total count of queue
+ *
+ * Stops the vhost crypto logic
+ *
+ */
+void cryptodev_vhost_stop(VirtIODevice *dev, int total_queues);
+
+/**
+ * cryptodev_vhost_virtqueue_mask:
+ * @dev: the virtio crypto object
+ * @queue: the cryptodev backend queue index
+ * @idx: the virtqueue index
+ * @mask: mask or not (true or false)
+ *
+ * Mask/unmask events for @idx virtqueue on @dev device
+ *
+ */
+void cryptodev_vhost_virtqueue_mask(VirtIODevice *dev,
+ int queue,
+ int idx, bool mask);
+
+/**
+ * cryptodev_vhost_virtqueue_pending:
+ * @dev: the virtio crypto object
+ * @queue: the cryptodev backend queue index
+ * @idx: the virtqueue index
+ *
+ * Test and clear event pending status for @idx virtqueue on @dev device.
+ * Should be called after unmask to avoid losing events.
+ *
+ * Returns: true for success, false for errors
+ */
+bool cryptodev_vhost_virtqueue_pending(VirtIODevice *dev,
+ int queue, int idx);
+
+#endif /* CRYPTODEV_VHOST_H */
diff --git a/include/system/cryptodev.h b/include/system/cryptodev.h
new file mode 100644
index 0000000..b20822d
--- /dev/null
+++ b/include/system/cryptodev.h
@@ -0,0 +1,447 @@
+/*
+ * QEMU Crypto Device Implementation
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@huawei.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#ifndef CRYPTODEV_H
+#define CRYPTODEV_H
+
+#include "qemu/queue.h"
+#include "qemu/throttle.h"
+#include "qom/object.h"
+#include "qapi/qapi-types-cryptodev.h"
+
+/**
+ * CryptoDevBackend:
+ *
+ * The CryptoDevBackend object is an interface
+ * for different cryptodev backends, which provides crypto
+ * operation wrapper.
+ *
+ */
+
+#define TYPE_CRYPTODEV_BACKEND "cryptodev-backend"
+
+OBJECT_DECLARE_TYPE(CryptoDevBackend, CryptoDevBackendClass,
+ CRYPTODEV_BACKEND)
+
+
+#define MAX_CRYPTO_QUEUE_NUM 64
+
+typedef struct CryptoDevBackendConf CryptoDevBackendConf;
+typedef struct CryptoDevBackendPeers CryptoDevBackendPeers;
+typedef struct CryptoDevBackendClient
+ CryptoDevBackendClient;
+
+/**
+ * CryptoDevBackendSymSessionInfo:
+ *
+ * @cipher_alg: algorithm type of CIPHER
+ * @key_len: byte length of cipher key
+ * @hash_alg: algorithm type of HASH/MAC
+ * @hash_result_len: byte length of HASH operation result
+ * @auth_key_len: byte length of authenticated key
+ * @add_len: byte length of additional authenticated data
+ * @op_type: operation type (refer to virtio_crypto.h)
+ * @direction: encryption or direction for CIPHER
+ * @hash_mode: HASH mode for HASH operation (refer to virtio_crypto.h)
+ * @alg_chain_order: order of algorithm chaining (CIPHER then HASH,
+ * or HASH then CIPHER)
+ * @cipher_key: point to a key of CIPHER
+ * @auth_key: point to an authenticated key of MAC
+ *
+ */
+typedef struct CryptoDevBackendSymSessionInfo {
+ /* corresponding with virtio crypto spec */
+ uint32_t cipher_alg;
+ uint32_t key_len;
+ uint32_t hash_alg;
+ uint32_t hash_result_len;
+ uint32_t auth_key_len;
+ uint32_t add_len;
+ uint8_t op_type;
+ uint8_t direction;
+ uint8_t hash_mode;
+ uint8_t alg_chain_order;
+ uint8_t *cipher_key;
+ uint8_t *auth_key;
+} CryptoDevBackendSymSessionInfo;
+
+/**
+ * CryptoDevBackendAsymSessionInfo:
+ */
+typedef struct CryptoDevBackendRsaPara {
+ uint32_t padding_algo;
+ uint32_t hash_algo;
+} CryptoDevBackendRsaPara;
+
+typedef struct CryptoDevBackendAsymSessionInfo {
+ /* corresponding with virtio crypto spec */
+ uint32_t algo;
+ uint32_t keytype;
+ uint32_t keylen;
+ uint8_t *key;
+ union {
+ CryptoDevBackendRsaPara rsa;
+ } u;
+} CryptoDevBackendAsymSessionInfo;
+
+typedef struct CryptoDevBackendSessionInfo {
+ uint32_t op_code;
+ union {
+ CryptoDevBackendSymSessionInfo sym_sess_info;
+ CryptoDevBackendAsymSessionInfo asym_sess_info;
+ } u;
+ uint64_t session_id;
+} CryptoDevBackendSessionInfo;
+
+/**
+ * CryptoDevBackendSymOpInfo:
+ *
+ * @aad_len: byte length of additional authenticated data
+ * @iv_len: byte length of initialization vector or counter
+ * @src_len: byte length of source data
+ * @dst_len: byte length of destination data
+ * @digest_result_len: byte length of hash digest result
+ * @hash_start_src_offset: Starting point for hash processing, specified
+ * as number of bytes from start of packet in source data, only used for
+ * algorithm chain
+ * @cipher_start_src_offset: Starting point for cipher processing, specified
+ * as number of bytes from start of packet in source data, only used for
+ * algorithm chain
+ * @len_to_hash: byte length of source data on which the hash
+ * operation will be computed, only used for algorithm chain
+ * @len_to_cipher: byte length of source data on which the cipher
+ * operation will be computed, only used for algorithm chain
+ * @op_type: operation type (refer to virtio_crypto.h)
+ * @iv: point to the initialization vector or counter
+ * @src: point to the source data
+ * @dst: point to the destination data
+ * @aad_data: point to the additional authenticated data
+ * @digest_result: point to the digest result data
+ * @data[0]: point to the extensional memory by one memory allocation
+ *
+ */
+typedef struct CryptoDevBackendSymOpInfo {
+ uint32_t aad_len;
+ uint32_t iv_len;
+ uint32_t src_len;
+ uint32_t dst_len;
+ uint32_t digest_result_len;
+ uint32_t hash_start_src_offset;
+ uint32_t cipher_start_src_offset;
+ uint32_t len_to_hash;
+ uint32_t len_to_cipher;
+ uint8_t op_type;
+ uint8_t *iv;
+ uint8_t *src;
+ uint8_t *dst;
+ uint8_t *aad_data;
+ uint8_t *digest_result;
+ uint8_t data[];
+} CryptoDevBackendSymOpInfo;
+
+
+/**
+ * CryptoDevBackendAsymOpInfo:
+ *
+ * @src_len: byte length of source data
+ * @dst_len: byte length of destination data
+ * @src: point to the source data
+ * @dst: point to the destination data
+ *
+ */
+typedef struct CryptoDevBackendAsymOpInfo {
+ uint32_t src_len;
+ uint32_t dst_len;
+ uint8_t *src;
+ uint8_t *dst;
+} CryptoDevBackendAsymOpInfo;
+
+typedef void (*CryptoDevCompletionFunc) (void *opaque, int ret);
+
+typedef struct CryptoDevBackendOpInfo {
+ QCryptodevBackendAlgoType algtype;
+ uint32_t op_code;
+ uint32_t queue_index;
+ CryptoDevCompletionFunc cb;
+ void *opaque; /* argument for cb */
+ uint64_t session_id;
+ union {
+ CryptoDevBackendSymOpInfo *sym_op_info;
+ CryptoDevBackendAsymOpInfo *asym_op_info;
+ } u;
+ QTAILQ_ENTRY(CryptoDevBackendOpInfo) next;
+} CryptoDevBackendOpInfo;
+
+struct CryptoDevBackendClass {
+ ObjectClass parent_class;
+
+ void (*init)(CryptoDevBackend *backend, Error **errp);
+ void (*cleanup)(CryptoDevBackend *backend, Error **errp);
+
+ int (*create_session)(CryptoDevBackend *backend,
+ CryptoDevBackendSessionInfo *sess_info,
+ uint32_t queue_index,
+ CryptoDevCompletionFunc cb,
+ void *opaque);
+
+ int (*close_session)(CryptoDevBackend *backend,
+ uint64_t session_id,
+ uint32_t queue_index,
+ CryptoDevCompletionFunc cb,
+ void *opaque);
+
+ int (*do_op)(CryptoDevBackend *backend,
+ CryptoDevBackendOpInfo *op_info);
+};
+
+struct CryptoDevBackendClient {
+ QCryptodevBackendType type;
+ char *info_str;
+ unsigned int queue_index;
+ int vring_enable;
+ QTAILQ_ENTRY(CryptoDevBackendClient) next;
+};
+
+struct CryptoDevBackendPeers {
+ CryptoDevBackendClient *ccs[MAX_CRYPTO_QUEUE_NUM];
+ uint32_t queues;
+};
+
+struct CryptoDevBackendConf {
+ CryptoDevBackendPeers peers;
+
+ /* Supported service mask */
+ uint32_t crypto_services;
+
+ /* Detailed algorithms mask */
+ uint32_t cipher_algo_l;
+ uint32_t cipher_algo_h;
+ uint32_t hash_algo;
+ uint32_t mac_algo_l;
+ uint32_t mac_algo_h;
+ uint32_t aead_algo;
+ uint32_t akcipher_algo;
+ /* Maximum length of cipher key */
+ uint32_t max_cipher_key_len;
+ /* Maximum length of authenticated key */
+ uint32_t max_auth_key_len;
+ /* Maximum size of each crypto request's content */
+ uint64_t max_size;
+};
+
+typedef struct CryptodevBackendSymStat {
+ int64_t encrypt_ops;
+ int64_t decrypt_ops;
+ int64_t encrypt_bytes;
+ int64_t decrypt_bytes;
+} CryptodevBackendSymStat;
+
+typedef struct CryptodevBackendAsymStat {
+ int64_t encrypt_ops;
+ int64_t decrypt_ops;
+ int64_t sign_ops;
+ int64_t verify_ops;
+ int64_t encrypt_bytes;
+ int64_t decrypt_bytes;
+ int64_t sign_bytes;
+ int64_t verify_bytes;
+} CryptodevBackendAsymStat;
+
+struct CryptoDevBackend {
+ Object parent_obj;
+
+ bool ready;
+ /* Tag the cryptodev backend is used by virtio-crypto or not */
+ bool is_used;
+ CryptoDevBackendConf conf;
+ CryptodevBackendSymStat *sym_stat;
+ CryptodevBackendAsymStat *asym_stat;
+
+ ThrottleState ts;
+ ThrottleTimers tt;
+ ThrottleConfig tc;
+ QTAILQ_HEAD(, CryptoDevBackendOpInfo) opinfos;
+};
+
+#define CryptodevSymStatInc(be, op, bytes) do { \
+ be->sym_stat->op##_bytes += (bytes); \
+ be->sym_stat->op##_ops += 1; \
+} while (/*CONSTCOND*/0)
+
+#define CryptodevSymStatIncEncrypt(be, bytes) \
+ CryptodevSymStatInc(be, encrypt, bytes)
+
+#define CryptodevSymStatIncDecrypt(be, bytes) \
+ CryptodevSymStatInc(be, decrypt, bytes)
+
+#define CryptodevAsymStatInc(be, op, bytes) do { \
+ be->asym_stat->op##_bytes += (bytes); \
+ be->asym_stat->op##_ops += 1; \
+} while (/*CONSTCOND*/0)
+
+#define CryptodevAsymStatIncEncrypt(be, bytes) \
+ CryptodevAsymStatInc(be, encrypt, bytes)
+
+#define CryptodevAsymStatIncDecrypt(be, bytes) \
+ CryptodevAsymStatInc(be, decrypt, bytes)
+
+#define CryptodevAsymStatIncSign(be, bytes) \
+ CryptodevAsymStatInc(be, sign, bytes)
+
+#define CryptodevAsymStatIncVerify(be, bytes) \
+ CryptodevAsymStatInc(be, verify, bytes)
+
+
+/**
+ * cryptodev_backend_new_client:
+ *
+ * Creates a new cryptodev backend client object.
+ *
+ * The returned object must be released with
+ * cryptodev_backend_free_client() when no
+ * longer required
+ *
+ * Returns: a new cryptodev backend client object
+ */
+CryptoDevBackendClient *cryptodev_backend_new_client(void);
+
+/**
+ * cryptodev_backend_free_client:
+ * @cc: the cryptodev backend client object
+ *
+ * Release the memory associated with @cc that
+ * was previously allocated by cryptodev_backend_new_client()
+ */
+void cryptodev_backend_free_client(
+ CryptoDevBackendClient *cc);
+
+/**
+ * cryptodev_backend_cleanup:
+ * @backend: the cryptodev backend object
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Clean the resource associated with @backend that realizaed
+ * by the specific backend's init() callback
+ */
+void cryptodev_backend_cleanup(
+ CryptoDevBackend *backend,
+ Error **errp);
+
+/**
+ * cryptodev_backend_create_session:
+ * @backend: the cryptodev backend object
+ * @sess_info: parameters needed by session creating
+ * @queue_index: queue index of cryptodev backend client
+ * @errp: pointer to a NULL-initialized error object
+ * @cb: callback when session create is compeleted
+ * @opaque: parameter passed to callback
+ *
+ * Create a session for symmetric/asymmetric algorithms
+ *
+ * Returns: 0 for success and cb will be called when creation is completed,
+ * negative value for error, and cb will not be called.
+ */
+int cryptodev_backend_create_session(
+ CryptoDevBackend *backend,
+ CryptoDevBackendSessionInfo *sess_info,
+ uint32_t queue_index,
+ CryptoDevCompletionFunc cb,
+ void *opaque);
+
+/**
+ * cryptodev_backend_close_session:
+ * @backend: the cryptodev backend object
+ * @session_id: the session id
+ * @queue_index: queue index of cryptodev backend client
+ * @errp: pointer to a NULL-initialized error object
+ * @cb: callback when session create is compeleted
+ * @opaque: parameter passed to callback
+ *
+ * Close a session for which was previously
+ * created by cryptodev_backend_create_session()
+ *
+ * Returns: 0 for success and cb will be called when creation is completed,
+ * negative value for error, and cb will not be called.
+ */
+int cryptodev_backend_close_session(
+ CryptoDevBackend *backend,
+ uint64_t session_id,
+ uint32_t queue_index,
+ CryptoDevCompletionFunc cb,
+ void *opaque);
+
+/**
+ * cryptodev_backend_crypto_operation:
+ * @backend: the cryptodev backend object
+ * @op_info: pointer to a CryptoDevBackendOpInfo object
+ *
+ * Do crypto operation, such as encryption, decryption, signature and
+ * verification
+ *
+ * Returns: 0 for success and cb will be called when creation is completed,
+ * negative value for error, and cb will not be called.
+ */
+int cryptodev_backend_crypto_operation(
+ CryptoDevBackend *backend,
+ CryptoDevBackendOpInfo *op_info);
+
+/**
+ * cryptodev_backend_set_used:
+ * @backend: the cryptodev backend object
+ * @used: true or false
+ *
+ * Set the cryptodev backend is used by virtio-crypto or not
+ */
+void cryptodev_backend_set_used(CryptoDevBackend *backend, bool used);
+
+/**
+ * cryptodev_backend_is_used:
+ * @backend: the cryptodev backend object
+ *
+ * Return the status that the cryptodev backend is used
+ * by virtio-crypto or not
+ *
+ * Returns: true on used, or false on not used
+ */
+bool cryptodev_backend_is_used(CryptoDevBackend *backend);
+
+/**
+ * cryptodev_backend_set_ready:
+ * @backend: the cryptodev backend object
+ * @ready: true or false
+ *
+ * Set the cryptodev backend is ready or not, which is called
+ * by the children of the cryptodev banckend interface.
+ */
+void cryptodev_backend_set_ready(CryptoDevBackend *backend, bool ready);
+
+/**
+ * cryptodev_backend_is_ready:
+ * @backend: the cryptodev backend object
+ *
+ * Return the status that the cryptodev backend is ready or not
+ *
+ * Returns: true on ready, or false on not ready
+ */
+bool cryptodev_backend_is_ready(CryptoDevBackend *backend);
+
+#endif /* CRYPTODEV_H */
diff --git a/include/system/device_tree.h b/include/system/device_tree.h
new file mode 100644
index 0000000..49d8482
--- /dev/null
+++ b/include/system/device_tree.h
@@ -0,0 +1,213 @@
+/*
+ * Header with function prototypes to help device tree manipulation using
+ * libfdt. It also provides functions to read entries from device tree proc
+ * interface.
+ *
+ * Copyright 2008 IBM Corporation.
+ * Authors: Jerone Young <jyoung5@us.ibm.com>
+ * Hollis Blanchard <hollisb@us.ibm.com>
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ *
+ */
+
+#ifndef DEVICE_TREE_H
+#define DEVICE_TREE_H
+
+void *create_device_tree(int *sizep);
+void *load_device_tree(const char *filename_path, int *sizep);
+#ifdef CONFIG_LINUX
+/**
+ * load_device_tree_from_sysfs: reads the device tree information in the
+ * /proc/device-tree directory and return the corresponding binary blob
+ * buffer pointer. Asserts in case of error.
+ */
+void *load_device_tree_from_sysfs(void);
+#endif
+
+/**
+ * qemu_fdt_node_path: return the paths of nodes matching a given
+ * name and compat string
+ * @fdt: pointer to the dt blob
+ * @name: node name
+ * @compat: compatibility string
+ * @errp: handle to an error object
+ *
+ * returns a newly allocated NULL-terminated array of node paths.
+ * Use g_strfreev() to free it. If one or more nodes were found, the
+ * array contains the path of each node and the last element equals to
+ * NULL. If there is no error but no matching node was found, the
+ * returned array contains a single element equal to NULL. If an error
+ * was encountered when parsing the blob, the function returns NULL
+ *
+ * @name may be NULL to wildcard names and only match compatibility
+ * strings.
+ */
+char **qemu_fdt_node_path(void *fdt, const char *name, const char *compat,
+ Error **errp);
+
+/**
+ * qemu_fdt_node_unit_path: return the paths of nodes matching a given
+ * node-name, ie. node-name and node-name@unit-address
+ * @fdt: pointer to the dt blob
+ * @name: node name
+ * @errp: handle to an error object
+ *
+ * returns a newly allocated NULL-terminated array of node paths.
+ * Use g_strfreev() to free it. If one or more nodes were found, the
+ * array contains the path of each node and the last element equals to
+ * NULL. If there is no error but no matching node was found, the
+ * returned array contains a single element equal to NULL. If an error
+ * was encountered when parsing the blob, the function returns NULL
+ */
+char **qemu_fdt_node_unit_path(void *fdt, const char *name, Error **errp);
+
+int qemu_fdt_setprop(void *fdt, const char *node_path,
+ const char *property, const void *val, int size);
+int qemu_fdt_setprop_cell(void *fdt, const char *node_path,
+ const char *property, uint32_t val);
+int qemu_fdt_setprop_u64(void *fdt, const char *node_path,
+ const char *property, uint64_t val);
+int qemu_fdt_setprop_string(void *fdt, const char *node_path,
+ const char *property, const char *string);
+
+/**
+ * qemu_fdt_setprop_string_array: set a string array property
+ *
+ * @fdt: pointer to the dt blob
+ * @name: node name
+ * @prop: property array
+ * @array: pointer to an array of string pointers
+ * @len: length of array
+ *
+ * assigns a string array to a property. This function converts and
+ * array of strings to a sequential string with \0 separators before
+ * setting the property.
+ */
+int qemu_fdt_setprop_string_array(void *fdt, const char *node_path,
+ const char *prop, char **array, int len);
+
+int qemu_fdt_setprop_phandle(void *fdt, const char *node_path,
+ const char *property,
+ const char *target_node_path);
+/**
+ * qemu_fdt_getprop: retrieve the value of a given property
+ * @fdt: pointer to the device tree blob
+ * @node_path: node path
+ * @property: name of the property to find
+ * @lenp: fdt error if any or length of the property on success
+ * @errp: handle to an error object
+ *
+ * returns a pointer to the property on success and NULL on failure
+ */
+const void *qemu_fdt_getprop(void *fdt, const char *node_path,
+ const char *property, int *lenp,
+ Error **errp);
+/**
+ * qemu_fdt_getprop_cell: retrieve the value of a given 4 byte property
+ * @fdt: pointer to the device tree blob
+ * @node_path: node path
+ * @property: name of the property to find
+ * @lenp: fdt error if any or -EINVAL if the property size is different from
+ * 4 bytes, or 4 (expected length of the property) upon success.
+ * @errp: handle to an error object
+ *
+ * returns the property value on success
+ */
+uint32_t qemu_fdt_getprop_cell(void *fdt, const char *node_path,
+ const char *property, int *lenp,
+ Error **errp);
+uint32_t qemu_fdt_get_phandle(void *fdt, const char *path);
+uint32_t qemu_fdt_alloc_phandle(void *fdt);
+int qemu_fdt_nop_node(void *fdt, const char *node_path);
+int qemu_fdt_add_subnode(void *fdt, const char *name);
+int qemu_fdt_add_path(void *fdt, const char *path);
+
+#define qemu_fdt_setprop_cells(fdt, node_path, property, ...) \
+ do { \
+ uint32_t qdt_tmp[] = { __VA_ARGS__ }; \
+ for (unsigned i_ = 0; i_ < ARRAY_SIZE(qdt_tmp); i_++) { \
+ qdt_tmp[i_] = cpu_to_be32(qdt_tmp[i_]); \
+ } \
+ qemu_fdt_setprop(fdt, node_path, property, qdt_tmp, \
+ sizeof(qdt_tmp)); \
+ } while (0)
+
+/**
+ * qemu_fdt_setprop_sized_cells_from_array:
+ * @fdt: device tree blob
+ * @node_path: node to set property on
+ * @property: property to set
+ * @numvalues: number of values
+ * @values: array of number-of-cells, value pairs
+ *
+ * Set the specified property on the specified node in the device tree
+ * to be an array of cells. The values of the cells are specified via
+ * the values list, which alternates between "number of cells used by
+ * this value" and "value".
+ * number-of-cells must be either 1 or 2 (other values will result in
+ * an error being returned). If a value is too large to fit in the
+ * number of cells specified for it, an error is returned.
+ *
+ * This function is useful because device tree nodes often have cell arrays
+ * which are either lists of addresses or lists of address,size tuples, but
+ * the number of cells used for each element vary depending on the
+ * #address-cells and #size-cells properties of their parent node.
+ * If you know all your cell elements are one cell wide you can use the
+ * simpler qemu_fdt_setprop_cells(). If you're not setting up the
+ * array programmatically, qemu_fdt_setprop_sized_cells may be more
+ * convenient.
+ *
+ * Return value: 0 on success, <0 on error.
+ */
+int qemu_fdt_setprop_sized_cells_from_array(void *fdt,
+ const char *node_path,
+ const char *property,
+ int numvalues,
+ uint64_t *values);
+
+/**
+ * qemu_fdt_setprop_sized_cells:
+ * @fdt: device tree blob
+ * @node_path: node to set property on
+ * @property: property to set
+ * @...: list of number-of-cells, value pairs
+ *
+ * Set the specified property on the specified node in the device tree
+ * to be an array of cells. The values of the cells are specified via
+ * the variable arguments, which alternates between "number of cells
+ * used by this value" and "value".
+ *
+ * This is a convenience wrapper for the function
+ * qemu_fdt_setprop_sized_cells_from_array().
+ *
+ * Return value: 0 on success, <0 on error.
+ */
+#define qemu_fdt_setprop_sized_cells(fdt, node_path, property, ...) \
+ ({ \
+ uint64_t qdt_tmp[] = { __VA_ARGS__ }; \
+ qemu_fdt_setprop_sized_cells_from_array(fdt, node_path, \
+ property, \
+ ARRAY_SIZE(qdt_tmp) / 2, \
+ qdt_tmp); \
+ })
+
+
+/**
+ * qemu_fdt_randomize_seeds:
+ * @fdt: device tree blob
+ *
+ * Re-randomize all "rng-seed" properties with new seeds.
+ */
+void qemu_fdt_randomize_seeds(void *fdt);
+
+#define FDT_PCI_RANGE_RELOCATABLE 0x80000000
+#define FDT_PCI_RANGE_PREFETCHABLE 0x40000000
+#define FDT_PCI_RANGE_ALIASED 0x20000000
+#define FDT_PCI_RANGE_TYPE_MASK 0x03000000
+#define FDT_PCI_RANGE_MMIO_64BIT 0x03000000
+#define FDT_PCI_RANGE_MMIO 0x02000000
+#define FDT_PCI_RANGE_IOPORT 0x01000000
+#define FDT_PCI_RANGE_CONFIG 0x00000000
+
+#endif /* DEVICE_TREE_H */
diff --git a/include/sysemu/dirtylimit.h b/include/system/dirtylimit.h
index d11ebbb..d11ebbb 100644
--- a/include/sysemu/dirtylimit.h
+++ b/include/system/dirtylimit.h
diff --git a/include/sysemu/dirtyrate.h b/include/system/dirtyrate.h
index 20813f3..20813f3 100644
--- a/include/sysemu/dirtyrate.h
+++ b/include/system/dirtyrate.h
diff --git a/include/system/dma.h b/include/system/dma.h
new file mode 100644
index 0000000..82e7ad5
--- /dev/null
+++ b/include/system/dma.h
@@ -0,0 +1,322 @@
+/*
+ * DMA helper functions
+ *
+ * Copyright (c) 2009, 2020 Red Hat
+ *
+ * This work is licensed under the terms of the GNU General Public License
+ * (GNU GPL), version 2 or later.
+ */
+
+#ifndef DMA_H
+#define DMA_H
+
+#include "system/memory.h"
+#include "system/address-spaces.h"
+#include "block/block.h"
+#include "block/accounting.h"
+
+typedef enum {
+ DMA_DIRECTION_TO_DEVICE = 0,
+ DMA_DIRECTION_FROM_DEVICE = 1,
+} DMADirection;
+
+/*
+ * When an IOMMU is present, bus addresses become distinct from
+ * CPU/memory physical addresses and may be a different size. Because
+ * the IOVA size depends more on the bus than on the platform, we more
+ * or less have to treat these as 64-bit always to cover all (or at
+ * least most) cases.
+ */
+typedef uint64_t dma_addr_t;
+
+#define DMA_ADDR_BITS 64
+#define DMA_ADDR_FMT "%" PRIx64
+
+typedef struct ScatterGatherEntry ScatterGatherEntry;
+
+struct QEMUSGList {
+ ScatterGatherEntry *sg;
+ int nsg;
+ int nalloc;
+ dma_addr_t size;
+ DeviceState *dev;
+ AddressSpace *as;
+};
+
+static inline void dma_barrier(AddressSpace *as, DMADirection dir)
+{
+ /*
+ * This is called before DMA read and write operations
+ * unless the _relaxed form is used and is responsible
+ * for providing some sane ordering of accesses vs
+ * concurrently running VCPUs.
+ *
+ * Users of map(), unmap() or lower level st/ld_*
+ * operations are responsible for providing their own
+ * ordering via barriers.
+ *
+ * This primitive implementation does a simple smp_mb()
+ * before each operation which provides pretty much full
+ * ordering.
+ *
+ * A smarter implementation can be devised if needed to
+ * use lighter barriers based on the direction of the
+ * transfer, the DMA context, etc...
+ */
+ smp_mb();
+}
+
+/* Checks that the given range of addresses is valid for DMA. This is
+ * useful for certain cases, but usually you should just use
+ * dma_memory_{read,write}() and check for errors */
+static inline bool dma_memory_valid(AddressSpace *as,
+ dma_addr_t addr, dma_addr_t len,
+ DMADirection dir, MemTxAttrs attrs)
+{
+ return address_space_access_valid(as, addr, len,
+ dir == DMA_DIRECTION_FROM_DEVICE,
+ attrs);
+}
+
+static inline MemTxResult dma_memory_rw_relaxed(AddressSpace *as,
+ dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir,
+ MemTxAttrs attrs)
+{
+ return address_space_rw(as, addr, attrs,
+ buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
+}
+
+static inline MemTxResult dma_memory_read_relaxed(AddressSpace *as,
+ dma_addr_t addr,
+ void *buf, dma_addr_t len)
+{
+ return dma_memory_rw_relaxed(as, addr, buf, len,
+ DMA_DIRECTION_TO_DEVICE,
+ MEMTXATTRS_UNSPECIFIED);
+}
+
+static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as,
+ dma_addr_t addr,
+ const void *buf,
+ dma_addr_t len)
+{
+ return dma_memory_rw_relaxed(as, addr, (void *)buf, len,
+ DMA_DIRECTION_FROM_DEVICE,
+ MEMTXATTRS_UNSPECIFIED);
+}
+
+/**
+ * dma_memory_rw: Read from or write to an address space from DMA controller.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to read or write
+ * @dir: indicates the transfer direction
+ * @attrs: memory transaction attributes
+ */
+static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir, MemTxAttrs attrs)
+{
+ dma_barrier(as, dir);
+
+ return dma_memory_rw_relaxed(as, addr, buf, len, dir, attrs);
+}
+
+/**
+ * dma_memory_read: Read from an address space from DMA controller.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault). Called within RCU critical section.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @len: length of the data transferred
+ * @attrs: memory transaction attributes
+ */
+static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ MemTxAttrs attrs)
+{
+ return dma_memory_rw(as, addr, buf, len,
+ DMA_DIRECTION_TO_DEVICE, attrs);
+}
+
+/**
+ * dma_memory_write: Write to address space from DMA controller.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to write
+ * @attrs: memory transaction attributes
+ */
+static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr,
+ const void *buf, dma_addr_t len,
+ MemTxAttrs attrs)
+{
+ return dma_memory_rw(as, addr, (void *)buf, len,
+ DMA_DIRECTION_FROM_DEVICE, attrs);
+}
+
+/**
+ * dma_memory_set: Fill memory with a constant byte from DMA controller.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @c: constant byte to fill the memory
+ * @len: the number of bytes to fill with the constant byte
+ * @attrs: memory transaction attributes
+ */
+MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
+ uint8_t c, dma_addr_t len, MemTxAttrs attrs);
+
+/**
+ * dma_memory_map: Map a physical memory region into a host virtual address.
+ *
+ * May map a subset of the requested range, given by and returned in @plen.
+ * May return %NULL and set *@plen to zero(0), if resources needed to perform
+ * the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @len: pointer to length of buffer; updated on return
+ * @dir: indicates the transfer direction
+ * @attrs: memory attributes
+ */
+static inline void *dma_memory_map(AddressSpace *as,
+ dma_addr_t addr, dma_addr_t *len,
+ DMADirection dir, MemTxAttrs attrs)
+{
+ hwaddr xlen = *len;
+ void *p;
+
+ p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE,
+ attrs);
+ *len = xlen;
+ return p;
+}
+
+/**
+ * dma_memory_unmap: Unmaps a memory region previously mapped by dma_memory_map()
+ *
+ * Will also mark the memory as dirty if @dir == %DMA_DIRECTION_FROM_DEVICE.
+ * @access_len gives the amount of memory that was actually read or written
+ * by the caller.
+ *
+ * @as: #AddressSpace used
+ * @buffer: host pointer as returned by dma_memory_map()
+ * @len: buffer length as returned by dma_memory_map()
+ * @dir: indicates the transfer direction
+ * @access_len: amount of data actually transferred
+ */
+static inline void dma_memory_unmap(AddressSpace *as,
+ void *buffer, dma_addr_t len,
+ DMADirection dir, dma_addr_t access_len)
+{
+ address_space_unmap(as, buffer, (hwaddr)len,
+ dir == DMA_DIRECTION_FROM_DEVICE, access_len);
+}
+
+#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
+ static inline MemTxResult ld##_lname##_##_end##_dma(AddressSpace *as, \
+ dma_addr_t addr, \
+ uint##_bits##_t *pval, \
+ MemTxAttrs attrs) \
+ { \
+ MemTxResult res = dma_memory_read(as, addr, pval, (_bits) / 8, attrs); \
+ _end##_bits##_to_cpus(pval); \
+ return res; \
+ } \
+ static inline MemTxResult st##_sname##_##_end##_dma(AddressSpace *as, \
+ dma_addr_t addr, \
+ uint##_bits##_t val, \
+ MemTxAttrs attrs) \
+ { \
+ val = cpu_to_##_end##_bits(val); \
+ return dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \
+ }
+
+static inline MemTxResult ldub_dma(AddressSpace *as, dma_addr_t addr,
+ uint8_t *val, MemTxAttrs attrs)
+{
+ return dma_memory_read(as, addr, val, 1, attrs);
+}
+
+static inline MemTxResult stb_dma(AddressSpace *as, dma_addr_t addr,
+ uint8_t val, MemTxAttrs attrs)
+{
+ return dma_memory_write(as, addr, &val, 1, attrs);
+}
+
+DEFINE_LDST_DMA(uw, w, 16, le);
+DEFINE_LDST_DMA(l, l, 32, le);
+DEFINE_LDST_DMA(q, q, 64, le);
+DEFINE_LDST_DMA(uw, w, 16, be);
+DEFINE_LDST_DMA(l, l, 32, be);
+DEFINE_LDST_DMA(q, q, 64, be);
+
+#undef DEFINE_LDST_DMA
+
+struct ScatterGatherEntry {
+ dma_addr_t base;
+ dma_addr_t len;
+};
+
+void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
+ AddressSpace *as);
+void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
+void qemu_sglist_destroy(QEMUSGList *qsg);
+
+typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov,
+ BlockCompletionFunc *cb, void *cb_opaque,
+ void *opaque);
+
+BlockAIOCB *dma_blk_io(QEMUSGList *sg, uint64_t offset, uint32_t align,
+ DMAIOFunc *io_func, void *io_func_opaque,
+ BlockCompletionFunc *cb, void *opaque, DMADirection dir);
+BlockAIOCB *dma_blk_read(BlockBackend *blk,
+ QEMUSGList *sg, uint64_t offset, uint32_t align,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *dma_blk_write(BlockBackend *blk,
+ QEMUSGList *sg, uint64_t offset, uint32_t align,
+ BlockCompletionFunc *cb, void *opaque);
+MemTxResult dma_buf_read(void *ptr, dma_addr_t len, dma_addr_t *residual,
+ QEMUSGList *sg, MemTxAttrs attrs);
+MemTxResult dma_buf_write(void *ptr, dma_addr_t len, dma_addr_t *residual,
+ QEMUSGList *sg, MemTxAttrs attrs);
+
+void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
+ QEMUSGList *sg, enum BlockAcctType type);
+
+/**
+ * dma_aligned_pow2_mask: Return the address bit mask of the largest
+ * power of 2 size less or equal than @end - @start + 1, aligned with @start,
+ * and bounded by 1 << @max_addr_bits bits.
+ *
+ * @start: range start address
+ * @end: range end address (greater than @start)
+ * @max_addr_bits: max address bits (<= 64)
+ */
+uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end,
+ int max_addr_bits);
+
+#endif
diff --git a/include/sysemu/dump-arch.h b/include/system/dump-arch.h
index 743916e..743916e 100644
--- a/include/sysemu/dump-arch.h
+++ b/include/system/dump-arch.h
diff --git a/include/system/dump.h b/include/system/dump.h
new file mode 100644
index 0000000..607bd7b
--- /dev/null
+++ b/include/system/dump.h
@@ -0,0 +1,225 @@
+/*
+ * QEMU dump
+ *
+ * Copyright Fujitsu, Corp. 2011, 2012
+ *
+ * Authors:
+ * Wen Congyang <wency@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef DUMP_H
+#define DUMP_H
+
+#include "qapi/qapi-types-dump.h"
+#include "qemu/thread.h"
+
+#define MAKEDUMPFILE_SIGNATURE "makedumpfile"
+#define MAX_SIZE_MDF_HEADER (4096) /* max size of makedumpfile_header */
+#define TYPE_FLAT_HEADER (1) /* type of flattened format */
+#define VERSION_FLAT_HEADER (1) /* version of flattened format */
+#define END_FLAG_FLAT_HEADER (-1)
+
+#ifndef ARCH_PFN_OFFSET
+#define ARCH_PFN_OFFSET (0)
+#endif
+
+/*
+ * flag for compressed format
+ */
+#define DUMP_DH_COMPRESSED_ZLIB (0x1)
+#define DUMP_DH_COMPRESSED_LZO (0x2)
+#define DUMP_DH_COMPRESSED_SNAPPY (0x4)
+
+#define KDUMP_SIGNATURE "KDUMP "
+#define SIG_LEN (sizeof(KDUMP_SIGNATURE) - 1)
+#define DUMP_LEVEL (1)
+#define DISKDUMP_HEADER_BLOCKS (1)
+
+#include "system/dump-arch.h"
+#include "system/memory_mapping.h"
+
+typedef struct QEMU_PACKED MakedumpfileHeader {
+ char signature[16]; /* = "makedumpfile" */
+ int64_t type;
+ int64_t version;
+} MakedumpfileHeader;
+
+typedef struct QEMU_PACKED MakedumpfileDataHeader {
+ int64_t offset;
+ int64_t buf_size;
+} MakedumpfileDataHeader;
+
+typedef struct QEMU_PACKED NewUtsname {
+ char sysname[65];
+ char nodename[65];
+ char release[65];
+ char version[65];
+ char machine[65];
+ char domainname[65];
+} NewUtsname;
+
+typedef struct QEMU_PACKED DiskDumpHeader32 {
+ char signature[SIG_LEN]; /* = "KDUMP " */
+ uint32_t header_version; /* Dump header version */
+ NewUtsname utsname; /* copy of system_utsname */
+ char timestamp[10]; /* Time stamp */
+ uint32_t status; /* Above flags */
+ uint32_t block_size; /* Size of a block in byte */
+ uint32_t sub_hdr_size; /* Size of arch dependent header in block */
+ uint32_t bitmap_blocks; /* Size of Memory bitmap in block */
+ uint32_t max_mapnr; /* = max_mapnr ,
+ obsoleted in header_version 6 */
+ uint32_t total_ram_blocks; /* Number of blocks should be written */
+ uint32_t device_blocks; /* Number of total blocks in dump device */
+ uint32_t written_blocks; /* Number of written blocks */
+ uint32_t current_cpu; /* CPU# which handles dump */
+ uint32_t nr_cpus; /* Number of CPUs */
+} DiskDumpHeader32;
+
+typedef struct QEMU_PACKED DiskDumpHeader64 {
+ char signature[SIG_LEN]; /* = "KDUMP " */
+ uint32_t header_version; /* Dump header version */
+ NewUtsname utsname; /* copy of system_utsname */
+ char timestamp[22]; /* Time stamp */
+ uint32_t status; /* Above flags */
+ uint32_t block_size; /* Size of a block in byte */
+ uint32_t sub_hdr_size; /* Size of arch dependent header in block */
+ uint32_t bitmap_blocks; /* Size of Memory bitmap in block */
+ uint32_t max_mapnr; /* = max_mapnr,
+ obsoleted in header_version 6 */
+ uint32_t total_ram_blocks; /* Number of blocks should be written */
+ uint32_t device_blocks; /* Number of total blocks in dump device */
+ uint32_t written_blocks; /* Number of written blocks */
+ uint32_t current_cpu; /* CPU# which handles dump */
+ uint32_t nr_cpus; /* Number of CPUs */
+} DiskDumpHeader64;
+
+typedef struct QEMU_PACKED KdumpSubHeader32 {
+ uint32_t phys_base;
+ uint32_t dump_level; /* header_version 1 and later */
+ uint32_t split; /* header_version 2 and later */
+ uint32_t start_pfn; /* header_version 2 and later,
+ obsoleted in header_version 6 */
+ uint32_t end_pfn; /* header_version 2 and later,
+ obsoleted in header_version 6 */
+ uint64_t offset_vmcoreinfo; /* header_version 3 and later */
+ uint32_t size_vmcoreinfo; /* header_version 3 and later */
+ uint64_t offset_note; /* header_version 4 and later */
+ uint32_t note_size; /* header_version 4 and later */
+ uint64_t offset_eraseinfo; /* header_version 5 and later */
+ uint32_t size_eraseinfo; /* header_version 5 and later */
+ uint64_t start_pfn_64; /* header_version 6 and later */
+ uint64_t end_pfn_64; /* header_version 6 and later */
+ uint64_t max_mapnr_64; /* header_version 6 and later */
+} KdumpSubHeader32;
+
+typedef struct QEMU_PACKED KdumpSubHeader64 {
+ uint64_t phys_base;
+ uint32_t dump_level; /* header_version 1 and later */
+ uint32_t split; /* header_version 2 and later */
+ uint64_t start_pfn; /* header_version 2 and later,
+ obsoleted in header_version 6 */
+ uint64_t end_pfn; /* header_version 2 and later,
+ obsoleted in header_version 6 */
+ uint64_t offset_vmcoreinfo; /* header_version 3 and later */
+ uint64_t size_vmcoreinfo; /* header_version 3 and later */
+ uint64_t offset_note; /* header_version 4 and later */
+ uint64_t note_size; /* header_version 4 and later */
+ uint64_t offset_eraseinfo; /* header_version 5 and later */
+ uint64_t size_eraseinfo; /* header_version 5 and later */
+ uint64_t start_pfn_64; /* header_version 6 and later */
+ uint64_t end_pfn_64; /* header_version 6 and later */
+ uint64_t max_mapnr_64; /* header_version 6 and later */
+} KdumpSubHeader64;
+
+typedef struct DataCache {
+ DumpState *state; /* dump state related to this data */
+ uint8_t *buf; /* buffer for cached data */
+ size_t buf_size; /* size of the buf */
+ size_t data_size; /* size of cached data in buf */
+ off_t offset; /* offset of the file */
+} DataCache;
+
+typedef struct QEMU_PACKED PageDescriptor {
+ uint64_t offset; /* the offset of the page data*/
+ uint32_t size; /* the size of this dump page */
+ uint32_t flags; /* flags */
+ uint64_t page_flags; /* page flags */
+} PageDescriptor;
+
+typedef struct DumpState {
+ GuestPhysBlockList guest_phys_blocks;
+ ArchDumpInfo dump_info;
+ MemoryMappingList list;
+ bool resume;
+ bool detached;
+ bool kdump_raw;
+ hwaddr memory_offset;
+ int fd;
+
+ /*
+ * Dump filter area variables
+ *
+ * A filtered dump only contains the guest memory designated by
+ * the start address and length variables defined below.
+ *
+ * If length is 0, no filtering is applied.
+ */
+ int64_t filter_area_begin; /* Start address of partial guest memory area */
+ int64_t filter_area_length; /* Length of partial guest memory area */
+
+ /* Elf dump related data */
+ uint32_t phdr_num;
+ uint32_t shdr_num;
+ ssize_t note_size;
+ hwaddr shdr_offset;
+ hwaddr phdr_offset;
+ hwaddr section_offset;
+ hwaddr note_offset;
+
+ void *elf_section_hdrs; /* Pointer to section header buffer */
+ void *elf_section_data; /* Pointer to section data buffer */
+ uint64_t elf_section_data_size; /* Size of section data */
+ GArray *string_table_buf; /* String table data buffer */
+
+ uint8_t *note_buf; /* buffer for notes */
+ size_t note_buf_offset; /* the writing place in note_buf */
+ uint32_t nr_cpus; /* number of guest's cpu */
+ uint64_t max_mapnr; /* the biggest guest's phys-mem's number */
+ size_t len_dump_bitmap; /* the size of the place used to store
+ dump_bitmap in vmcore */
+ off_t offset_dump_bitmap; /* offset of dump_bitmap part in vmcore */
+ off_t offset_page; /* offset of page part in vmcore */
+ size_t num_dumpable; /* number of page that can be dumped */
+ uint32_t flag_compress; /* indicate the compression format */
+ DumpStatus status; /* current dump status */
+
+ bool has_format; /* whether format is provided */
+ DumpGuestMemoryFormat format; /* valid only if has_format == true */
+ QemuThread dump_thread; /* thread for detached dump */
+
+ int64_t total_size; /* total memory size (in bytes) to
+ * be dumped. When filter is
+ * enabled, this will only count
+ * those to be written. */
+ int64_t written_size; /* written memory size (in bytes),
+ * this could be used to calculate
+ * how much work we have
+ * finished. */
+ uint8_t *guest_note; /* ELF note content */
+ size_t guest_note_size;
+} DumpState;
+
+uint16_t cpu_to_dump16(DumpState *s, uint16_t val);
+uint32_t cpu_to_dump32(DumpState *s, uint32_t val);
+uint64_t cpu_to_dump64(DumpState *s, uint64_t val);
+
+int64_t dump_filtered_memblock_size(GuestPhysBlock *block, int64_t filter_area_start,
+ int64_t filter_area_length);
+int64_t dump_filtered_memblock_start(GuestPhysBlock *block, int64_t filter_area_start,
+ int64_t filter_area_length);
+#endif
diff --git a/include/sysemu/event-loop-base.h b/include/system/event-loop-base.h
index a6c24f1..a6c24f1 100644
--- a/include/sysemu/event-loop-base.h
+++ b/include/system/event-loop-base.h
diff --git a/include/system/host_iommu_device.h b/include/system/host_iommu_device.h
new file mode 100644
index 0000000..ab849a4
--- /dev/null
+++ b/include/system/host_iommu_device.h
@@ -0,0 +1,125 @@
+/*
+ * Host IOMMU device abstract declaration
+ *
+ * Copyright (C) 2024 Intel Corporation.
+ *
+ * Authors: Zhenzhong Duan <zhenzhong.duan@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef HOST_IOMMU_DEVICE_H
+#define HOST_IOMMU_DEVICE_H
+
+#include "qom/object.h"
+#include "qapi/error.h"
+#ifdef CONFIG_LINUX
+#include "linux/iommufd.h"
+
+typedef union VendorCaps {
+ struct iommu_hw_info_vtd vtd;
+ struct iommu_hw_info_arm_smmuv3 smmuv3;
+} VendorCaps;
+
+/**
+ * struct HostIOMMUDeviceCaps - Define host IOMMU device capabilities.
+ *
+ * @type: host platform IOMMU type.
+ *
+ * @hw_caps: host platform IOMMU capabilities (e.g. on IOMMUFD this represents
+ * the @out_capabilities value returned from IOMMU_GET_HW_INFO ioctl)
+ *
+ * @vendor_caps: host platform IOMMU vendor specific capabilities (e.g. on
+ * IOMMUFD this represents a user-space buffer filled by kernel
+ * with host IOMMU @type specific hardware information data)
+ */
+typedef struct HostIOMMUDeviceCaps {
+ uint32_t type;
+ uint64_t hw_caps;
+ VendorCaps vendor_caps;
+} HostIOMMUDeviceCaps;
+#endif
+
+#define TYPE_HOST_IOMMU_DEVICE "host-iommu-device"
+OBJECT_DECLARE_TYPE(HostIOMMUDevice, HostIOMMUDeviceClass, HOST_IOMMU_DEVICE)
+
+struct HostIOMMUDevice {
+ Object parent_obj;
+
+ char *name;
+ void *agent; /* pointer to agent device, ie. VFIO or VDPA device */
+ PCIBus *aliased_bus;
+ int aliased_devfn;
+#ifdef CONFIG_LINUX
+ HostIOMMUDeviceCaps caps;
+#endif
+};
+
+/**
+ * struct HostIOMMUDeviceClass - The base class for all host IOMMU devices.
+ *
+ * Different types of host devices (e.g., VFIO or VDPA device) or devices
+ * with different backend (e.g., VFIO legacy container or IOMMUFD backend)
+ * will have different implementations of the HostIOMMUDeviceClass.
+ */
+struct HostIOMMUDeviceClass {
+ ObjectClass parent_class;
+
+ /**
+ * @realize: initialize host IOMMU device instance further.
+ *
+ * Mandatory callback.
+ *
+ * @hiod: pointer to a host IOMMU device instance.
+ *
+ * @opaque: pointer to agent device of this host IOMMU device,
+ * e.g., VFIO base device or VDPA device.
+ *
+ * @errp: pass an Error out when realize fails.
+ *
+ * Returns: true on success, false on failure.
+ */
+ bool (*realize)(HostIOMMUDevice *hiod, void *opaque, Error **errp);
+ /**
+ * @get_cap: check if a host IOMMU device capability is supported.
+ *
+ * Optional callback, if not implemented, hint not supporting query
+ * of @cap.
+ *
+ * @hiod: pointer to a host IOMMU device instance.
+ *
+ * @cap: capability to check.
+ *
+ * @errp: pass an Error out when fails to query capability.
+ *
+ * Returns: <0 on failure, 0 if a @cap is unsupported, or else
+ * 1 or some positive value for some special @cap,
+ * i.e., HOST_IOMMU_DEVICE_CAP_AW_BITS.
+ */
+ int (*get_cap)(HostIOMMUDevice *hiod, int cap, Error **errp);
+ /**
+ * @get_iova_ranges: Return the list of usable iova_ranges along with
+ * @hiod Host IOMMU device
+ *
+ * @hiod: handle to the host IOMMU device
+ */
+ GList* (*get_iova_ranges)(HostIOMMUDevice *hiod);
+ /**
+ *
+ * @get_page_size_mask: Return the page size mask supported along this
+ * @hiod Host IOMMU device
+ *
+ * @hiod: handle to the host IOMMU device
+ */
+ uint64_t (*get_page_size_mask)(HostIOMMUDevice *hiod);
+};
+
+/*
+ * Host IOMMU device capability list.
+ */
+#define HOST_IOMMU_DEVICE_CAP_IOMMU_TYPE 0
+#define HOST_IOMMU_DEVICE_CAP_AW_BITS 1
+
+#define HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX 64
+#endif
diff --git a/include/system/hostmem.h b/include/system/hostmem.h
new file mode 100644
index 0000000..88fa791
--- /dev/null
+++ b/include/system/hostmem.h
@@ -0,0 +1,99 @@
+/*
+ * QEMU Host Memory Backend
+ *
+ * Copyright (C) 2013-2014 Red Hat Inc
+ *
+ * Authors:
+ * Igor Mammedov <imammedo@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef SYSTEM_HOSTMEM_H
+#define SYSTEM_HOSTMEM_H
+
+#include "system/numa.h"
+#include "qapi/qapi-types-machine.h"
+#include "qom/object.h"
+#include "system/memory.h"
+#include "qemu/bitmap.h"
+#include "qemu/thread-context.h"
+
+#define TYPE_MEMORY_BACKEND "memory-backend"
+OBJECT_DECLARE_TYPE(HostMemoryBackend, HostMemoryBackendClass,
+ MEMORY_BACKEND)
+
+/* hostmem-ram.c */
+/**
+ * @TYPE_MEMORY_BACKEND_RAM:
+ * name of backend that uses mmap on the anonymous RAM
+ */
+
+#define TYPE_MEMORY_BACKEND_RAM "memory-backend-ram"
+
+/* hostmem-file.c */
+/**
+ * @TYPE_MEMORY_BACKEND_FILE:
+ * name of backend that uses mmap on a file descriptor
+ */
+#define TYPE_MEMORY_BACKEND_FILE "memory-backend-file"
+
+#define TYPE_MEMORY_BACKEND_MEMFD "memory-backend-memfd"
+
+
+/**
+ * HostMemoryBackendClass:
+ * @parent_class: opaque parent class container
+ */
+struct HostMemoryBackendClass {
+ ObjectClass parent_class;
+
+ /**
+ * alloc: Allocate memory from backend.
+ *
+ * @backend: the #HostMemoryBackend.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+ bool (*alloc)(HostMemoryBackend *backend, Error **errp);
+};
+
+/**
+ * @HostMemoryBackend
+ *
+ * @parent: opaque parent object container
+ * @size: amount of memory backend provides
+ * @mr: MemoryRegion representing host memory belonging to backend
+ * @prealloc_threads: number of threads to be used for preallocatining RAM
+ */
+struct HostMemoryBackend {
+ /* private */
+ Object parent;
+
+ /* protected */
+ uint64_t size;
+ bool merge, dump, use_canonical_path;
+ bool prealloc, is_mapped, share, reserve;
+ bool guest_memfd, aligned;
+ uint32_t prealloc_threads;
+ ThreadContext *prealloc_context;
+ DECLARE_BITMAP(host_nodes, MAX_NODES + 1);
+ HostMemPolicy policy;
+
+ MemoryRegion mr;
+};
+
+bool host_memory_backend_mr_inited(HostMemoryBackend *backend);
+MemoryRegion *host_memory_backend_get_memory(HostMemoryBackend *backend);
+
+void host_memory_backend_set_mapped(HostMemoryBackend *backend, bool mapped);
+bool host_memory_backend_is_mapped(HostMemoryBackend *backend);
+size_t host_memory_backend_pagesize(HostMemoryBackend *memdev);
+char *host_memory_backend_get_name(HostMemoryBackend *backend);
+
+long qemu_minrampagesize(void);
+long qemu_maxrampagesize(void);
+
+#endif
diff --git a/include/system/hvf.h b/include/system/hvf.h
new file mode 100644
index 0000000..a9a502f
--- /dev/null
+++ b/include/system/hvf.h
@@ -0,0 +1,77 @@
+/*
+ * QEMU Hypervisor.framework (HVF) support
+ *
+ * Copyright Google Inc., 2017
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+/* header to be included in non-HVF-specific code */
+
+#ifndef HVF_H
+#define HVF_H
+
+#include "qemu/accel.h"
+#include "qemu/queue.h"
+#include "exec/vaddr.h"
+#include "qom/object.h"
+#include "exec/vaddr.h"
+
+#ifdef COMPILING_PER_TARGET
+# ifdef CONFIG_HVF
+# define CONFIG_HVF_IS_POSSIBLE
+# endif /* !CONFIG_HVF */
+#else
+# define CONFIG_HVF_IS_POSSIBLE
+#endif /* COMPILING_PER_TARGET */
+
+#ifdef CONFIG_HVF_IS_POSSIBLE
+extern bool hvf_allowed;
+#define hvf_enabled() (hvf_allowed)
+#else /* !CONFIG_HVF_IS_POSSIBLE */
+#define hvf_enabled() 0
+#endif /* !CONFIG_HVF_IS_POSSIBLE */
+
+#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf")
+
+typedef struct HVFState HVFState;
+DECLARE_INSTANCE_CHECKER(HVFState, HVF_STATE,
+ TYPE_HVF_ACCEL)
+
+#ifdef COMPILING_PER_TARGET
+struct hvf_sw_breakpoint {
+ vaddr pc;
+ vaddr saved_insn;
+ int use_count;
+ QTAILQ_ENTRY(hvf_sw_breakpoint) entry;
+};
+
+struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu,
+ vaddr pc);
+int hvf_sw_breakpoints_active(CPUState *cpu);
+
+int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp);
+int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp);
+int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type);
+int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type);
+void hvf_arch_remove_all_hw_breakpoints(void);
+
+/*
+ * hvf_update_guest_debug:
+ * @cs: CPUState for the CPU to update
+ *
+ * Update guest to enable or disable debugging. Per-arch specifics will be
+ * handled by calling down to hvf_arch_update_guest_debug.
+ */
+int hvf_update_guest_debug(CPUState *cpu);
+void hvf_arch_update_guest_debug(CPUState *cpu);
+
+/*
+ * Return whether the guest supports debugging.
+ */
+bool hvf_arch_supports_guest_debug(void);
+#endif /* COMPILING_PER_TARGET */
+
+#endif
diff --git a/include/system/hvf_int.h b/include/system/hvf_int.h
new file mode 100644
index 0000000..d774e58
--- /dev/null
+++ b/include/system/hvf_int.h
@@ -0,0 +1,80 @@
+/*
+ * QEMU Hypervisor.framework (HVF) support
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+/* header to be included in HVF-specific code */
+
+#ifndef HVF_INT_H
+#define HVF_INT_H
+
+#include "qemu/queue.h"
+
+#ifdef __aarch64__
+#include <Hypervisor/Hypervisor.h>
+typedef hv_vcpu_t hvf_vcpuid;
+#else
+#include <Hypervisor/hv.h>
+typedef hv_vcpuid_t hvf_vcpuid;
+#endif
+
+/* hvf_slot flags */
+#define HVF_SLOT_LOG (1 << 0)
+
+typedef struct hvf_slot {
+ uint64_t start;
+ uint64_t size;
+ uint8_t *mem;
+ int slot_id;
+ uint32_t flags;
+ MemoryRegion *region;
+} hvf_slot;
+
+typedef struct hvf_vcpu_caps {
+ uint64_t vmx_cap_pinbased;
+ uint64_t vmx_cap_procbased;
+ uint64_t vmx_cap_procbased2;
+ uint64_t vmx_cap_entry;
+ uint64_t vmx_cap_exit;
+ uint64_t vmx_cap_preemption_timer;
+} hvf_vcpu_caps;
+
+struct HVFState {
+ AccelState parent;
+
+ hvf_slot slots[32];
+ int num_slots;
+
+ hvf_vcpu_caps *hvf_caps;
+ uint64_t vtimer_offset;
+ QTAILQ_HEAD(, hvf_sw_breakpoint) hvf_sw_breakpoints;
+};
+extern HVFState *hvf_state;
+
+struct AccelCPUState {
+ hvf_vcpuid fd;
+ void *exit;
+ bool vtimer_masked;
+ sigset_t unblock_ipi_mask;
+ bool guest_debug_enabled;
+ bool dirty;
+};
+
+void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line,
+ const char *exp);
+#define assert_hvf_ok(EX) assert_hvf_ok_impl((EX), __FILE__, __LINE__, #EX)
+const char *hvf_return_string(hv_return_t ret);
+int hvf_arch_init(void);
+hv_return_t hvf_arch_vm_create(MachineState *ms, uint32_t pa_range);
+int hvf_arch_init_vcpu(CPUState *cpu);
+void hvf_arch_vcpu_destroy(CPUState *cpu);
+int hvf_vcpu_exec(CPUState *);
+hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
+int hvf_put_registers(CPUState *);
+int hvf_get_registers(CPUState *);
+void hvf_kick_vcpu_thread(CPUState *cpu);
+
+#endif
diff --git a/include/system/hw_accel.h b/include/system/hw_accel.h
new file mode 100644
index 0000000..380e9e6
--- /dev/null
+++ b/include/system/hw_accel.h
@@ -0,0 +1,25 @@
+/*
+ * QEMU Hardware accelerators support
+ *
+ * Copyright 2016 Google, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_HW_ACCEL_H
+#define QEMU_HW_ACCEL_H
+
+#include "hw/core/cpu.h"
+#include "system/kvm.h"
+#include "system/hvf.h"
+#include "system/whpx.h"
+#include "system/nvmm.h"
+
+void cpu_synchronize_state(CPUState *cpu);
+void cpu_synchronize_post_reset(CPUState *cpu);
+void cpu_synchronize_post_init(CPUState *cpu);
+void cpu_synchronize_pre_loadvm(CPUState *cpu);
+
+#endif /* QEMU_HW_ACCEL_H */
diff --git a/include/system/iommufd.h b/include/system/iommufd.h
new file mode 100644
index 0000000..283861b
--- /dev/null
+++ b/include/system/iommufd.h
@@ -0,0 +1,120 @@
+/*
+ * iommufd container backend declaration
+ *
+ * Copyright (C) 2024 Intel Corporation.
+ * Copyright Red Hat, Inc. 2024
+ *
+ * Authors: Yi Liu <yi.l.liu@intel.com>
+ * Eric Auger <eric.auger@redhat.com>
+ * Zhenzhong Duan <zhenzhong.duan@intel.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef SYSTEM_IOMMUFD_H
+#define SYSTEM_IOMMUFD_H
+
+#include "qom/object.h"
+#include "exec/hwaddr.h"
+#include "exec/cpu-common.h"
+#include "system/host_iommu_device.h"
+
+#define TYPE_IOMMUFD_BACKEND "iommufd"
+OBJECT_DECLARE_TYPE(IOMMUFDBackend, IOMMUFDBackendClass, IOMMUFD_BACKEND)
+
+struct IOMMUFDBackendClass {
+ ObjectClass parent_class;
+};
+
+struct IOMMUFDBackend {
+ Object parent;
+
+ /*< protected >*/
+ int fd; /* /dev/iommu file descriptor */
+ bool owned; /* is the /dev/iommu opened internally */
+ uint32_t users;
+
+ /*< public >*/
+};
+
+bool iommufd_backend_connect(IOMMUFDBackend *be, Error **errp);
+void iommufd_backend_disconnect(IOMMUFDBackend *be);
+
+bool iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
+ Error **errp);
+void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id);
+int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova,
+ ram_addr_t size, void *vaddr, bool readonly);
+int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id,
+ hwaddr iova, ram_addr_t size);
+bool iommufd_backend_get_device_info(IOMMUFDBackend *be, uint32_t devid,
+ uint32_t *type, void *data, uint32_t len,
+ uint64_t *caps, Error **errp);
+bool iommufd_backend_alloc_hwpt(IOMMUFDBackend *be, uint32_t dev_id,
+ uint32_t pt_id, uint32_t flags,
+ uint32_t data_type, uint32_t data_len,
+ void *data_ptr, uint32_t *out_hwpt,
+ Error **errp);
+bool iommufd_backend_set_dirty_tracking(IOMMUFDBackend *be, uint32_t hwpt_id,
+ bool start, Error **errp);
+bool iommufd_backend_get_dirty_bitmap(IOMMUFDBackend *be, uint32_t hwpt_id,
+ uint64_t iova, ram_addr_t size,
+ uint64_t page_size, uint64_t *data,
+ Error **errp);
+bool iommufd_backend_invalidate_cache(IOMMUFDBackend *be, uint32_t id,
+ uint32_t data_type, uint32_t entry_len,
+ uint32_t *entry_num, void *data,
+ Error **errp);
+
+#define TYPE_HOST_IOMMU_DEVICE_IOMMUFD TYPE_HOST_IOMMU_DEVICE "-iommufd"
+OBJECT_DECLARE_TYPE(HostIOMMUDeviceIOMMUFD, HostIOMMUDeviceIOMMUFDClass,
+ HOST_IOMMU_DEVICE_IOMMUFD)
+
+/* Overload of the host IOMMU device for the iommufd backend */
+struct HostIOMMUDeviceIOMMUFD {
+ HostIOMMUDevice parent_obj;
+
+ IOMMUFDBackend *iommufd;
+ uint32_t devid;
+ uint32_t hwpt_id;
+};
+
+struct HostIOMMUDeviceIOMMUFDClass {
+ HostIOMMUDeviceClass parent_class;
+
+ /**
+ * @attach_hwpt: attach host IOMMU device to IOMMUFD hardware page table.
+ * VFIO and VDPA device can have different implementation.
+ *
+ * Mandatory callback.
+ *
+ * @idev: host IOMMU device backed by IOMMUFD backend.
+ *
+ * @hwpt_id: ID of IOMMUFD hardware page table.
+ *
+ * @errp: pass an Error out when attachment fails.
+ *
+ * Returns: true on success, false on failure.
+ */
+ bool (*attach_hwpt)(HostIOMMUDeviceIOMMUFD *idev, uint32_t hwpt_id,
+ Error **errp);
+ /**
+ * @detach_hwpt: detach host IOMMU device from IOMMUFD hardware page table.
+ * VFIO and VDPA device can have different implementation.
+ *
+ * Mandatory callback.
+ *
+ * @idev: host IOMMU device backed by IOMMUFD backend.
+ *
+ * @errp: pass an Error out when attachment fails.
+ *
+ * Returns: true on success, false on failure.
+ */
+ bool (*detach_hwpt)(HostIOMMUDeviceIOMMUFD *idev, Error **errp);
+};
+
+bool host_iommu_device_iommufd_attach_hwpt(HostIOMMUDeviceIOMMUFD *idev,
+ uint32_t hwpt_id, Error **errp);
+bool host_iommu_device_iommufd_detach_hwpt(HostIOMMUDeviceIOMMUFD *idev,
+ Error **errp);
+#endif
diff --git a/include/system/ioport.h b/include/system/ioport.h
new file mode 100644
index 0000000..780ea5a
--- /dev/null
+++ b/include/system/ioport.h
@@ -0,0 +1,75 @@
+/*
+ * defines ioport related functions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**************************************************************************
+ * IO ports API
+ */
+
+#ifndef SYSTEM_IOPORT_H
+#define SYSTEM_IOPORT_H
+
+#include "system/memory.h"
+
+#define MAX_IOPORTS (64 * 1024)
+#define IOPORTS_MASK (MAX_IOPORTS - 1)
+
+typedef struct MemoryRegionPortio {
+ uint32_t offset;
+ uint32_t len;
+ unsigned size;
+ uint32_t (*read)(void *opaque, uint32_t address);
+ void (*write)(void *opaque, uint32_t address, uint32_t data);
+} MemoryRegionPortio;
+
+#define PORTIO_END_OF_LIST() { }
+
+extern const MemoryRegionOps unassigned_io_ops;
+
+void cpu_outb(uint32_t addr, uint8_t val);
+void cpu_outw(uint32_t addr, uint16_t val);
+void cpu_outl(uint32_t addr, uint32_t val);
+uint8_t cpu_inb(uint32_t addr);
+uint16_t cpu_inw(uint32_t addr);
+uint32_t cpu_inl(uint32_t addr);
+
+typedef struct PortioList {
+ const struct MemoryRegionPortio *ports;
+ Object *owner;
+ struct MemoryRegion *address_space;
+ uint32_t addr;
+ unsigned nr;
+ struct MemoryRegion **regions;
+ void *opaque;
+ const char *name;
+ bool flush_coalesced_mmio;
+} PortioList;
+
+void portio_list_init(PortioList *piolist, Object *owner,
+ const struct MemoryRegionPortio *callbacks,
+ void *opaque, const char *name);
+void portio_list_set_flush_coalesced(PortioList *piolist);
+void portio_list_destroy(PortioList *piolist);
+void portio_list_add(PortioList *piolist,
+ struct MemoryRegion *address_space,
+ uint32_t addr);
+void portio_list_del(PortioList *piolist);
+void portio_list_set_enabled(PortioList *piolist, bool enabled);
+void portio_list_set_address(PortioList *piolist, uint32_t addr);
+
+#endif /* IOPORT_H */
diff --git a/include/system/iothread.h b/include/system/iothread.h
new file mode 100644
index 0000000..d95c17a
--- /dev/null
+++ b/include/system/iothread.h
@@ -0,0 +1,67 @@
+/*
+ * Event loop thread
+ *
+ * Copyright Red Hat Inc., 2013
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef IOTHREAD_H
+#define IOTHREAD_H
+
+#include "block/aio.h"
+#include "qemu/thread.h"
+#include "qom/object.h"
+#include "system/event-loop-base.h"
+
+#define TYPE_IOTHREAD "iothread"
+
+struct IOThread {
+ EventLoopBase parent_obj;
+
+ QemuThread thread;
+ AioContext *ctx;
+ bool run_gcontext; /* whether we should run gcontext */
+ GMainContext *worker_context;
+ GMainLoop *main_loop;
+ QemuSemaphore init_done_sem; /* is thread init done? */
+ bool stopping; /* has iothread_stop() been called? */
+ bool running; /* should iothread_run() continue? */
+ int thread_id;
+
+ /* AioContext poll parameters */
+ int64_t poll_max_ns;
+ int64_t poll_grow;
+ int64_t poll_shrink;
+};
+typedef struct IOThread IOThread;
+
+DECLARE_INSTANCE_CHECKER(IOThread, IOTHREAD,
+ TYPE_IOTHREAD)
+
+char *iothread_get_id(IOThread *iothread);
+IOThread *iothread_by_id(const char *id);
+AioContext *iothread_get_aio_context(IOThread *iothread);
+GMainContext *iothread_get_g_main_context(IOThread *iothread);
+
+/*
+ * Helpers used to allocate iothreads for internal use. These
+ * iothreads will not be seen by monitor clients when query using
+ * "query-iothreads".
+ */
+IOThread *iothread_create(const char *id, Error **errp);
+void iothread_stop(IOThread *iothread);
+void iothread_destroy(IOThread *iothread);
+
+/*
+ * Returns true if executing within IOThread context,
+ * false otherwise.
+ */
+bool qemu_in_iothread(void);
+
+#endif /* IOTHREAD_H */
diff --git a/include/system/kvm.h b/include/system/kvm.h
new file mode 100644
index 0000000..7cc60d2
--- /dev/null
+++ b/include/system/kvm.h
@@ -0,0 +1,589 @@
+/*
+ * QEMU KVM support
+ *
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+/* header to be included in non-KVM-specific code */
+
+#ifndef QEMU_KVM_H
+#define QEMU_KVM_H
+
+#include "exec/memattrs.h"
+#include "qemu/accel.h"
+#include "qom/object.h"
+
+#ifdef COMPILING_PER_TARGET
+# ifdef CONFIG_KVM
+# include <linux/kvm.h>
+# define CONFIG_KVM_IS_POSSIBLE
+# endif
+#else
+# define CONFIG_KVM_IS_POSSIBLE
+#endif
+
+#ifdef CONFIG_KVM_IS_POSSIBLE
+
+extern bool kvm_allowed;
+extern bool kvm_kernel_irqchip;
+extern bool kvm_split_irqchip;
+extern bool kvm_async_interrupts_allowed;
+extern bool kvm_halt_in_kernel_allowed;
+extern bool kvm_resamplefds_allowed;
+extern bool kvm_msi_via_irqfd_allowed;
+extern bool kvm_gsi_routing_allowed;
+extern bool kvm_gsi_direct_mapping;
+extern bool kvm_readonly_mem_allowed;
+extern bool kvm_msi_use_devid;
+extern bool kvm_pre_fault_memory_supported;
+
+#define kvm_enabled() (kvm_allowed)
+/**
+ * kvm_irqchip_in_kernel:
+ *
+ * Returns: true if an in-kernel irqchip was created.
+ * What this actually means is architecture and machine model
+ * specific: on PC, for instance, it means that the LAPIC
+ * is in kernel. This function should never be used from generic
+ * target-independent code: use one of the following functions or
+ * some other specific check instead.
+ */
+#define kvm_irqchip_in_kernel() (kvm_kernel_irqchip)
+
+/**
+ * kvm_irqchip_is_split:
+ *
+ * Returns: true if the irqchip implementation is split between
+ * user and kernel space. The details are architecture and
+ * machine specific. On PC, it means that the PIC, IOAPIC, and
+ * PIT are in user space while the LAPIC is in the kernel.
+ */
+#define kvm_irqchip_is_split() (kvm_split_irqchip)
+
+/**
+ * kvm_async_interrupts_enabled:
+ *
+ * Returns: true if we can deliver interrupts to KVM
+ * asynchronously (ie by ioctl from any thread at any time)
+ * rather than having to do interrupt delivery synchronously
+ * (where the vcpu must be stopped at a suitable point first).
+ */
+#define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed)
+
+/**
+ * kvm_halt_in_kernel
+ *
+ * Returns: true if halted cpus should still get a KVM_RUN ioctl to run
+ * inside of kernel space. This only works if MP state is implemented.
+ */
+#define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed)
+
+/**
+ * kvm_irqfds_enabled:
+ *
+ * Returns: true if we can use irqfds to inject interrupts into
+ * a KVM CPU (ie the kernel supports irqfds and we are running
+ * with a configuration where it is meaningful to use them).
+ *
+ * Always available if running with in-kernel irqchip.
+ */
+#define kvm_irqfds_enabled() kvm_irqchip_in_kernel()
+
+/**
+ * kvm_resamplefds_enabled:
+ *
+ * Returns: true if we can use resamplefds to inject interrupts into
+ * a KVM CPU (ie the kernel supports resamplefds and we are running
+ * with a configuration where it is meaningful to use them).
+ */
+#define kvm_resamplefds_enabled() (kvm_resamplefds_allowed)
+
+/**
+ * kvm_msi_via_irqfd_enabled:
+ *
+ * Returns: true if we can route a PCI MSI (Message Signaled Interrupt)
+ * to a KVM CPU via an irqfd. This requires that the kernel supports
+ * this and that we're running in a configuration that permits it.
+ */
+#define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed)
+
+/**
+ * kvm_gsi_routing_enabled:
+ *
+ * Returns: true if GSI routing is enabled (ie the kernel supports
+ * it and we're running in a configuration that permits it).
+ */
+#define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed)
+
+/**
+ * kvm_gsi_direct_mapping:
+ *
+ * Returns: true if GSI direct mapping is enabled.
+ */
+#define kvm_gsi_direct_mapping() (kvm_gsi_direct_mapping)
+
+/**
+ * kvm_readonly_mem_enabled:
+ *
+ * Returns: true if KVM readonly memory is enabled (ie the kernel
+ * supports it and we're running in a configuration that permits it).
+ */
+#define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed)
+
+/**
+ * kvm_msi_devid_required:
+ * Returns: true if KVM requires a device id to be provided while
+ * defining an MSI routing entry.
+ */
+#define kvm_msi_devid_required() (kvm_msi_use_devid)
+
+#else
+
+#define kvm_enabled() (0)
+#define kvm_irqchip_in_kernel() (false)
+#define kvm_irqchip_is_split() (false)
+#define kvm_async_interrupts_enabled() (false)
+#define kvm_halt_in_kernel() (false)
+#define kvm_irqfds_enabled() (false)
+#define kvm_resamplefds_enabled() (false)
+#define kvm_msi_via_irqfd_enabled() (false)
+#define kvm_gsi_routing_allowed() (false)
+#define kvm_gsi_direct_mapping() (false)
+#define kvm_readonly_mem_enabled() (false)
+#define kvm_msi_devid_required() (false)
+
+#endif /* CONFIG_KVM_IS_POSSIBLE */
+
+struct kvm_run;
+struct kvm_irq_routing_entry;
+
+typedef struct KVMCapabilityInfo {
+ const char *name;
+ int value;
+} KVMCapabilityInfo;
+
+#define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP }
+#define KVM_CAP_LAST_INFO { NULL, 0 }
+
+struct KVMState;
+
+#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
+typedef struct KVMState KVMState;
+DECLARE_INSTANCE_CHECKER(KVMState, KVM_STATE,
+ TYPE_KVM_ACCEL)
+
+extern KVMState *kvm_state;
+typedef struct Notifier Notifier;
+
+typedef struct KVMRouteChange {
+ KVMState *s;
+ int changes;
+} KVMRouteChange;
+
+/* external API */
+
+unsigned int kvm_get_max_memslots(void);
+unsigned int kvm_get_free_memslots(void);
+bool kvm_has_sync_mmu(void);
+int kvm_has_vcpu_events(void);
+int kvm_max_nested_state_length(void);
+int kvm_has_gsi_routing(void);
+
+/**
+ * kvm_arm_supports_user_irq
+ *
+ * Not all KVM implementations support notifications for kernel generated
+ * interrupt events to user space. This function indicates whether the current
+ * KVM implementation does support them.
+ *
+ * Returns: true if KVM supports using kernel generated IRQs from user space
+ */
+bool kvm_arm_supports_user_irq(void);
+
+
+int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
+int kvm_on_sigbus(int code, void *addr);
+
+int kvm_check_extension(KVMState *s, unsigned int extension);
+
+int kvm_vm_ioctl(KVMState *s, unsigned long type, ...);
+
+void kvm_flush_coalesced_mmio_buffer(void);
+
+#ifdef COMPILING_PER_TARGET
+#include "cpu.h"
+
+/**
+ * kvm_update_guest_debug(): ensure KVM debug structures updated
+ * @cs: the CPUState for this cpu
+ * @reinject_trap: KVM trap injection control
+ *
+ * There are usually per-arch specifics which will be handled by
+ * calling down to kvm_arch_update_guest_debug after the generic
+ * fields have been set.
+ */
+#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
+int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap);
+#else
+static inline int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
+{
+ return -EINVAL;
+}
+#endif
+
+/* internal API */
+
+int kvm_ioctl(KVMState *s, unsigned long type, ...);
+
+int kvm_vcpu_ioctl(CPUState *cpu, unsigned long type, ...);
+
+/**
+ * kvm_device_ioctl - call an ioctl on a kvm device
+ * @fd: The KVM device file descriptor as returned from KVM_CREATE_DEVICE
+ * @type: The device-ctrl ioctl number
+ *
+ * Returns: -errno on error, nonnegative on success
+ */
+int kvm_device_ioctl(int fd, unsigned long type, ...);
+
+/**
+ * kvm_vm_check_attr - check for existence of a specific vm attribute
+ * @s: The KVMState pointer
+ * @group: the group
+ * @attr: the attribute of that group to query for
+ *
+ * Returns: 1 if the attribute exists
+ * 0 if the attribute either does not exist or if the vm device
+ * interface is unavailable
+ */
+int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr);
+
+/**
+ * kvm_device_check_attr - check for existence of a specific device attribute
+ * @fd: The device file descriptor
+ * @group: the group
+ * @attr: the attribute of that group to query for
+ *
+ * Returns: 1 if the attribute exists
+ * 0 if the attribute either does not exist or if the vm device
+ * interface is unavailable
+ */
+int kvm_device_check_attr(int fd, uint32_t group, uint64_t attr);
+
+/**
+ * kvm_device_access - set or get value of a specific device attribute
+ * @fd: The device file descriptor
+ * @group: the group
+ * @attr: the attribute of that group to set or get
+ * @val: pointer to a storage area for the value
+ * @write: true for set and false for get operation
+ * @errp: error object handle
+ *
+ * Returns: 0 on success
+ * < 0 on error
+ * Use kvm_device_check_attr() in order to check for the availability
+ * of optional attributes.
+ */
+int kvm_device_access(int fd, int group, uint64_t attr,
+ void *val, bool write, Error **errp);
+
+/**
+ * kvm_create_device - create a KVM device for the device control API
+ * @KVMState: The KVMState pointer
+ * @type: The KVM device type (see Documentation/virtual/kvm/devices in the
+ * kernel source)
+ * @test: If true, only test if device can be created, but don't actually
+ * create the device.
+ *
+ * Returns: -errno on error, nonnegative on success: @test ? 0 : device fd;
+ */
+int kvm_create_device(KVMState *s, uint64_t type, bool test);
+
+/**
+ * kvm_device_supported - probe whether KVM supports specific device
+ *
+ * @vmfd: The fd handler for VM
+ * @type: type of device
+ *
+ * @return: true if supported, otherwise false.
+ */
+bool kvm_device_supported(int vmfd, uint64_t type);
+
+/**
+ * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU
+ * @cpu: QOM CPUState object for which KVM vCPU has to be fetched/created.
+ *
+ * @returns: 0 when success, errno (<0) when failed.
+ */
+int kvm_create_vcpu(CPUState *cpu);
+
+/**
+ * kvm_park_vcpu - Park QEMU KVM vCPU context
+ * @cpu: QOM CPUState object for which QEMU KVM vCPU context has to be parked.
+ *
+ * @returns: none
+ */
+void kvm_park_vcpu(CPUState *cpu);
+
+/**
+ * kvm_unpark_vcpu - unpark QEMU KVM vCPU context
+ * @s: KVM State
+ * @vcpu_id: Architecture vCPU ID of the parked vCPU
+ *
+ * @returns: KVM fd
+ */
+int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id);
+
+/**
+ * kvm_create_and_park_vcpu - Create and park a KVM vCPU
+ * @cpu: QOM CPUState object for which KVM vCPU has to be created and parked.
+ *
+ * @returns: 0 when success, errno (<0) when failed.
+ */
+int kvm_create_and_park_vcpu(CPUState *cpu);
+
+/* Arch specific hooks */
+
+extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
+
+void kvm_arch_accel_class_init(ObjectClass *oc);
+
+void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
+MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
+
+int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run);
+
+int kvm_arch_process_async_events(CPUState *cpu);
+
+int kvm_arch_get_registers(CPUState *cpu, Error **errp);
+
+/* state subset only touched by the VCPU itself during runtime */
+#define KVM_PUT_RUNTIME_STATE 1
+/* state subset modified during VCPU reset */
+#define KVM_PUT_RESET_STATE 2
+/* full state set, modified during initialization or on vmload */
+#define KVM_PUT_FULL_STATE 3
+
+int kvm_arch_put_registers(CPUState *cpu, int level, Error **errp);
+
+int kvm_arch_get_default_type(MachineState *ms);
+
+int kvm_arch_init(MachineState *ms, KVMState *s);
+
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp);
+int kvm_arch_init_vcpu(CPUState *cpu);
+int kvm_arch_destroy_vcpu(CPUState *cpu);
+
+#ifdef TARGET_KVM_HAVE_RESET_PARKED_VCPU
+void kvm_arch_reset_parked_vcpu(unsigned long vcpu_id, int kvm_fd);
+#else
+static inline void kvm_arch_reset_parked_vcpu(unsigned long vcpu_id, int kvm_fd)
+{
+}
+#endif
+
+bool kvm_vcpu_id_is_valid(int vcpu_id);
+
+/* Returns VCPU ID to be used on KVM_CREATE_VCPU ioctl() */
+unsigned long kvm_arch_vcpu_id(CPUState *cpu);
+
+void kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
+
+void kvm_arch_init_irq_routing(KVMState *s);
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, PCIDevice *dev);
+
+/* Notify arch about newly added MSI routes */
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+ int vector, PCIDevice *dev);
+/* Notify arch about released MSI routes */
+int kvm_arch_release_virq_post(int virq);
+
+int kvm_arch_msi_data_to_gsi(uint32_t data);
+
+int kvm_set_irq(KVMState *s, int irq, int level);
+int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg);
+
+void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin);
+
+void kvm_irqchip_add_change_notifier(Notifier *n);
+void kvm_irqchip_remove_change_notifier(Notifier *n);
+void kvm_irqchip_change_notify(void);
+
+struct kvm_guest_debug;
+struct kvm_debug_exit_arch;
+
+struct kvm_sw_breakpoint {
+ vaddr pc;
+ vaddr saved_insn;
+ int use_count;
+ QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
+};
+
+struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
+ vaddr pc);
+
+int kvm_sw_breakpoints_active(CPUState *cpu);
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cpu,
+ struct kvm_sw_breakpoint *bp);
+int kvm_arch_remove_sw_breakpoint(CPUState *cpu,
+ struct kvm_sw_breakpoint *bp);
+int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type);
+int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type);
+void kvm_arch_remove_all_hw_breakpoints(void);
+
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg);
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cpu);
+
+int kvm_vm_check_extension(KVMState *s, unsigned int extension);
+
+#define kvm_vm_enable_cap(s, capability, cap_flags, ...) \
+ ({ \
+ struct kvm_enable_cap cap = { \
+ .cap = capability, \
+ .flags = cap_flags, \
+ }; \
+ uint64_t args_tmp[] = { __VA_ARGS__ }; \
+ size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \
+ memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \
+ kvm_vm_ioctl(s, KVM_ENABLE_CAP, &cap); \
+ })
+
+#define kvm_vcpu_enable_cap(cpu, capability, cap_flags, ...) \
+ ({ \
+ struct kvm_enable_cap cap = { \
+ .cap = capability, \
+ .flags = cap_flags, \
+ }; \
+ uint64_t args_tmp[] = { __VA_ARGS__ }; \
+ size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \
+ memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \
+ kvm_vcpu_ioctl(cpu, KVM_ENABLE_CAP, &cap); \
+ })
+
+void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len);
+
+int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
+ hwaddr *phys_addr);
+
+#endif /* COMPILING_PER_TARGET */
+
+void kvm_cpu_synchronize_state(CPUState *cpu);
+
+void kvm_init_cpu_signals(CPUState *cpu);
+
+/**
+ * kvm_irqchip_add_msi_route - Add MSI route for specific vector
+ * @c: KVMRouteChange instance.
+ * @vector: which vector to add. This can be either MSI/MSIX
+ * vector. The function will automatically detect whether
+ * MSI/MSIX is enabled, and fetch corresponding MSI
+ * message.
+ * @dev: Owner PCI device to add the route. If @dev is specified
+ * as @NULL, an empty MSI message will be inited.
+ * @return: virq (>=0) when success, errno (<0) when failed.
+ */
+int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev);
+int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
+ PCIDevice *dev);
+void kvm_irqchip_commit_routes(KVMState *s);
+
+static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s)
+{
+ return (KVMRouteChange) { .s = s, .changes = 0 };
+}
+
+static inline void kvm_irqchip_commit_route_changes(KVMRouteChange *c)
+{
+ if (c->changes) {
+ kvm_irqchip_commit_routes(c->s);
+ c->changes = 0;
+ }
+}
+
+int kvm_irqchip_get_virq(KVMState *s);
+void kvm_irqchip_release_virq(KVMState *s, int virq);
+
+void kvm_add_routing_entry(KVMState *s,
+ struct kvm_irq_routing_entry *entry);
+
+int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
+ EventNotifier *rn, int virq);
+int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
+ int virq);
+int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
+ EventNotifier *rn, qemu_irq irq);
+int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
+ qemu_irq irq);
+void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi);
+void kvm_init_irq_routing(KVMState *s);
+
+bool kvm_kernel_irqchip_allowed(void);
+bool kvm_kernel_irqchip_required(void);
+bool kvm_kernel_irqchip_split(void);
+
+/**
+ * kvm_arch_irqchip_create:
+ * @KVMState: The KVMState pointer
+ *
+ * Allow architectures to create an in-kernel irq chip themselves.
+ *
+ * Returns: < 0: error
+ * 0: irq chip was not created
+ * > 0: irq chip was created
+ */
+int kvm_arch_irqchip_create(KVMState *s);
+
+/**
+ * kvm_set_one_reg - set a register value in KVM via KVM_SET_ONE_REG ioctl
+ * @id: The register ID
+ * @source: The pointer to the value to be set. It must point to a variable
+ * of the correct type/size for the register being accessed.
+ *
+ * Returns: 0 on success, or a negative errno on failure.
+ */
+int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source);
+
+/**
+ * kvm_get_one_reg - get a register value from KVM via KVM_GET_ONE_REG ioctl
+ * @id: The register ID
+ * @target: The pointer where the value is to be stored. It must point to a
+ * variable of the correct type/size for the register being accessed.
+ *
+ * Returns: 0 on success, or a negative errno on failure.
+ */
+int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target);
+
+/* Notify resamplefd for EOI of specific interrupts. */
+void kvm_resample_fd_notify(int gsi);
+
+bool kvm_dirty_ring_enabled(void);
+
+uint32_t kvm_dirty_ring_size(void);
+
+void kvm_mark_guest_state_protected(void);
+
+/**
+ * kvm_hwpoisoned_mem - indicate if there is any hwpoisoned page
+ * reported for the VM.
+ */
+bool kvm_hwpoisoned_mem(void);
+
+int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp);
+
+int kvm_set_memory_attributes_private(hwaddr start, uint64_t size);
+int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size);
+
+int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private);
+
+#endif
diff --git a/include/system/kvm_int.h b/include/system/kvm_int.h
new file mode 100644
index 0000000..756a3c0
--- /dev/null
+++ b/include/system/kvm_int.h
@@ -0,0 +1,187 @@
+/*
+ * Internal definitions for a target's KVM support
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_KVM_INT_H
+#define QEMU_KVM_INT_H
+
+#include "system/memory.h"
+#include "qapi/qapi-types-common.h"
+#include "qemu/accel.h"
+#include "qemu/queue.h"
+#include "system/kvm.h"
+#include "hw/boards.h"
+#include "hw/i386/topology.h"
+#include "io/channel-socket.h"
+
+typedef struct KVMSlot
+{
+ hwaddr start_addr;
+ ram_addr_t memory_size;
+ void *ram;
+ int slot;
+ int flags;
+ int old_flags;
+ /* Dirty bitmap cache for the slot */
+ unsigned long *dirty_bmap;
+ unsigned long dirty_bmap_size;
+ /* Cache of the address space ID */
+ int as_id;
+ /* Cache of the offset in ram address space */
+ ram_addr_t ram_start_offset;
+ int guest_memfd;
+ hwaddr guest_memfd_offset;
+} KVMSlot;
+
+typedef struct KVMMemoryUpdate {
+ QSIMPLEQ_ENTRY(KVMMemoryUpdate) next;
+ MemoryRegionSection section;
+} KVMMemoryUpdate;
+
+typedef struct KVMMemoryListener {
+ MemoryListener listener;
+ KVMSlot *slots;
+ unsigned int nr_slots_used;
+ unsigned int nr_slots_allocated;
+ int as_id;
+ QSIMPLEQ_HEAD(, KVMMemoryUpdate) transaction_add;
+ QSIMPLEQ_HEAD(, KVMMemoryUpdate) transaction_del;
+} KVMMemoryListener;
+
+#define KVM_MSI_HASHTAB_SIZE 256
+
+typedef struct KVMHostTopoInfo {
+ /* Number of package on the Host */
+ unsigned int maxpkgs;
+ /* Number of cpus on the Host */
+ unsigned int maxcpus;
+ /* Number of cpus on each different package */
+ unsigned int *pkg_cpu_count;
+ /* Each package can have different maxticks */
+ unsigned int *maxticks;
+} KVMHostTopoInfo;
+
+struct KVMMsrEnergy {
+ pid_t pid;
+ bool enable;
+ char *socket_path;
+ QIOChannelSocket *sioc;
+ QemuThread msr_thr;
+ unsigned int guest_vcpus;
+ unsigned int guest_vsockets;
+ X86CPUTopoInfo guest_topo_info;
+ KVMHostTopoInfo host_topo;
+ const CPUArchIdList *guest_cpu_list;
+ uint64_t *msr_value;
+ uint64_t msr_unit;
+ uint64_t msr_limit;
+ uint64_t msr_info;
+};
+
+enum KVMDirtyRingReaperState {
+ KVM_DIRTY_RING_REAPER_NONE = 0,
+ /* The reaper is sleeping */
+ KVM_DIRTY_RING_REAPER_WAIT,
+ /* The reaper is reaping for dirty pages */
+ KVM_DIRTY_RING_REAPER_REAPING,
+};
+
+/*
+ * KVM reaper instance, responsible for collecting the KVM dirty bits
+ * via the dirty ring.
+ */
+struct KVMDirtyRingReaper {
+ /* The reaper thread */
+ QemuThread reaper_thr;
+ volatile uint64_t reaper_iteration; /* iteration number of reaper thr */
+ volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */
+};
+struct KVMState
+{
+ AccelState parent_obj;
+ /* Max number of KVM slots supported */
+ int nr_slots_max;
+ int fd;
+ int vmfd;
+ int coalesced_mmio;
+ int coalesced_pio;
+ struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
+ bool coalesced_flush_in_progress;
+ int vcpu_events;
+#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
+ QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
+#endif
+ int max_nested_state_len;
+ int kvm_shadow_mem;
+ bool kernel_irqchip_allowed;
+ bool kernel_irqchip_required;
+ OnOffAuto kernel_irqchip_split;
+ bool sync_mmu;
+ bool guest_state_protected;
+ uint64_t manual_dirty_log_protect;
+ /*
+ * Older POSIX says that ioctl numbers are signed int, but in
+ * practice they are not. (Newer POSIX doesn't specify ioctl
+ * at all.) Linux, glibc and *BSD all treat ioctl numbers as
+ * unsigned, and real-world ioctl values like KVM_GET_XSAVE have
+ * bit 31 set, which means that passing them via an 'int' will
+ * result in sign-extension when they get converted back to the
+ * 'unsigned long' which the ioctl() prototype uses. Luckily Linux
+ * always treats the argument as an unsigned 32-bit int, so any
+ * possible sign-extension is deliberately ignored, but for
+ * consistency we keep to the same type that glibc is using.
+ */
+ unsigned long irq_set_ioctl;
+ unsigned int sigmask_len;
+ GHashTable *gsimap;
+#ifdef KVM_CAP_IRQ_ROUTING
+ struct kvm_irq_routing *irq_routes;
+ int nr_allocated_irq_routes;
+ unsigned long *used_gsi_bitmap;
+ unsigned int gsi_count;
+#endif
+ KVMMemoryListener memory_listener;
+ QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
+
+ /* For "info mtree -f" to tell if an MR is registered in KVM */
+ int nr_as;
+ struct KVMAs {
+ KVMMemoryListener *ml;
+ AddressSpace *as;
+ } *as;
+ uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */
+ uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */
+ bool kvm_dirty_ring_with_bitmap;
+ uint64_t kvm_eager_split_size; /* Eager Page Splitting chunk size */
+ struct KVMDirtyRingReaper reaper;
+ struct KVMMsrEnergy msr_energy;
+ NotifyVmexitOption notify_vmexit;
+ uint32_t notify_window;
+ uint32_t xen_version;
+ uint32_t xen_caps;
+ uint16_t xen_gnttab_max_frames;
+ uint16_t xen_evtchn_max_pirq;
+ char *device;
+};
+
+void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
+ AddressSpace *as, int as_id, const char *name);
+
+void kvm_set_max_memslot_size(hwaddr max_slot_size);
+
+/**
+ * kvm_hwpoison_page_add:
+ *
+ * Parameters:
+ * @ram_addr: the address in the RAM for the poisoned page
+ *
+ * Add a poisoned page to the list
+ *
+ * Return: None.
+ */
+void kvm_hwpoison_page_add(ram_addr_t ram_addr);
+#endif
diff --git a/include/system/kvm_xen.h b/include/system/kvm_xen.h
new file mode 100644
index 0000000..7d0e69f
--- /dev/null
+++ b/include/system/kvm_xen.h
@@ -0,0 +1,44 @@
+/*
+ * Xen HVM emulation support in KVM
+ *
+ * Copyright Ā© 2019 Oracle and/or its affiliates. All rights reserved.
+ * Copyright Ā© 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_SYSTEM_KVM_XEN_H
+#define QEMU_SYSTEM_KVM_XEN_H
+
+/* The KVM API uses these to indicate "no GPA" or "no GFN" */
+#define INVALID_GPA UINT64_MAX
+#define INVALID_GFN UINT64_MAX
+
+/* QEMU plays the rƓle of dom0 for "interdomain" communication. */
+#define DOMID_QEMU 0
+
+int kvm_xen_soft_reset(void);
+uint32_t kvm_xen_get_caps(void);
+void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id);
+bool kvm_xen_has_vcpu_callback_vector(void);
+void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type);
+void kvm_xen_set_callback_asserted(void);
+int kvm_xen_set_vcpu_virq(uint32_t vcpu_id, uint16_t virq, uint16_t port);
+uint16_t kvm_xen_get_gnttab_max_frames(void);
+uint16_t kvm_xen_get_evtchn_max_pirq(void);
+
+#define kvm_xen_has_cap(cap) (!!(kvm_xen_get_caps() & \
+ KVM_XEN_HVM_CONFIG_ ## cap))
+
+#define XEN_SPECIAL_AREA_ADDR 0xfeff8000UL
+#define XEN_SPECIAL_AREA_SIZE 0x4000UL
+
+#define XEN_SPECIALPAGE_CONSOLE 0
+#define XEN_SPECIALPAGE_XENSTORE 1
+
+#define XEN_SPECIAL_PFN(x) ((XEN_SPECIAL_AREA_ADDR >> TARGET_PAGE_BITS) + \
+ XEN_SPECIALPAGE_##x)
+
+#endif /* QEMU_SYSTEM_KVM_XEN_H */
diff --git a/include/system/memory.h b/include/system/memory.h
new file mode 100644
index 0000000..46248d4
--- /dev/null
+++ b/include/system/memory.h
@@ -0,0 +1,3267 @@
+/*
+ * Physical memory management API
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef SYSTEM_MEMORY_H
+#define SYSTEM_MEMORY_H
+
+#include "exec/cpu-common.h"
+#include "exec/hwaddr.h"
+#include "exec/memattrs.h"
+#include "exec/memop.h"
+#include "exec/ramlist.h"
+#include "exec/tswap.h"
+#include "qemu/bswap.h"
+#include "qemu/queue.h"
+#include "qemu/int128.h"
+#include "qemu/range.h"
+#include "qemu/notify.h"
+#include "qom/object.h"
+#include "qemu/rcu.h"
+
+#define RAM_ADDR_INVALID (~(ram_addr_t)0)
+
+#define MAX_PHYS_ADDR_SPACE_BITS 62
+#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
+
+#define TYPE_MEMORY_REGION "memory-region"
+DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
+ TYPE_MEMORY_REGION)
+
+#define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
+typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
+DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
+ IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
+
+#define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager"
+typedef struct RamDiscardManagerClass RamDiscardManagerClass;
+typedef struct RamDiscardManager RamDiscardManager;
+DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
+ RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
+
+#ifdef CONFIG_FUZZ
+void fuzz_dma_read_cb(size_t addr,
+ size_t len,
+ MemoryRegion *mr);
+#else
+static inline void fuzz_dma_read_cb(size_t addr,
+ size_t len,
+ MemoryRegion *mr)
+{
+ /* Do Nothing */
+}
+#endif
+
+/* Possible bits for global_dirty_log_{start|stop} */
+
+/* Dirty tracking enabled because migration is running */
+#define GLOBAL_DIRTY_MIGRATION (1U << 0)
+
+/* Dirty tracking enabled because measuring dirty rate */
+#define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
+
+/* Dirty tracking enabled because dirty limit */
+#define GLOBAL_DIRTY_LIMIT (1U << 2)
+
+#define GLOBAL_DIRTY_MASK (0x7)
+
+extern unsigned int global_dirty_tracking;
+
+typedef struct MemoryRegionOps MemoryRegionOps;
+
+struct ReservedRegion {
+ Range range;
+ unsigned type;
+};
+
+/**
+ * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
+ *
+ * @mr: the region, or %NULL if empty
+ * @fv: the flat view of the address space the region is mapped in
+ * @offset_within_region: the beginning of the section, relative to @mr's start
+ * @size: the size of the section; will not exceed @mr's boundaries
+ * @offset_within_address_space: the address of the first byte of the section
+ * relative to the region's address space
+ * @readonly: writes to this section are ignored
+ * @nonvolatile: this section is non-volatile
+ * @unmergeable: this section should not get merged with adjacent sections
+ */
+struct MemoryRegionSection {
+ Int128 size;
+ MemoryRegion *mr;
+ FlatView *fv;
+ hwaddr offset_within_region;
+ hwaddr offset_within_address_space;
+ bool readonly;
+ bool nonvolatile;
+ bool unmergeable;
+};
+
+typedef struct IOMMUTLBEntry IOMMUTLBEntry;
+
+/* See address_space_translate: bit 0 is read, bit 1 is write. */
+typedef enum {
+ IOMMU_NONE = 0,
+ IOMMU_RO = 1,
+ IOMMU_WO = 2,
+ IOMMU_RW = 3,
+} IOMMUAccessFlags;
+
+#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
+
+struct IOMMUTLBEntry {
+ AddressSpace *target_as;
+ hwaddr iova;
+ hwaddr translated_addr;
+ hwaddr addr_mask; /* 0xfff = 4k translation */
+ IOMMUAccessFlags perm;
+};
+
+/*
+ * Bitmap for different IOMMUNotifier capabilities. Each notifier can
+ * register with one or multiple IOMMU Notifier capability bit(s).
+ *
+ * Normally there're two use cases for the notifiers:
+ *
+ * (1) When the device needs accurate synchronizations of the vIOMMU page
+ * tables, it needs to register with both MAP|UNMAP notifies (which
+ * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
+ *
+ * Regarding to accurate synchronization, it's when the notified
+ * device maintains a shadow page table and must be notified on each
+ * guest MAP (page table entry creation) and UNMAP (invalidation)
+ * events (e.g. VFIO). Both notifications must be accurate so that
+ * the shadow page table is fully in sync with the guest view.
+ *
+ * (2) When the device doesn't need accurate synchronizations of the
+ * vIOMMU page tables, it needs to register only with UNMAP or
+ * DEVIOTLB_UNMAP notifies.
+ *
+ * It's when the device maintains a cache of IOMMU translations
+ * (IOTLB) and is able to fill that cache by requesting translations
+ * from the vIOMMU through a protocol similar to ATS (Address
+ * Translation Service).
+ *
+ * Note that in this mode the vIOMMU will not maintain a shadowed
+ * page table for the address space, and the UNMAP messages can cover
+ * more than the pages that used to get mapped. The IOMMU notifiee
+ * should be able to take care of over-sized invalidations.
+ */
+typedef enum {
+ IOMMU_NOTIFIER_NONE = 0,
+ /* Notify cache invalidations */
+ IOMMU_NOTIFIER_UNMAP = 0x1,
+ /* Notify entry changes (newly created entries) */
+ IOMMU_NOTIFIER_MAP = 0x2,
+ /* Notify changes on device IOTLB entries */
+ IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
+} IOMMUNotifierFlag;
+
+#define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
+#define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
+#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
+ IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
+
+struct IOMMUNotifier;
+typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
+ IOMMUTLBEntry *data);
+
+struct IOMMUNotifier {
+ IOMMUNotify notify;
+ IOMMUNotifierFlag notifier_flags;
+ /* Notify for address space range start <= addr <= end */
+ hwaddr start;
+ hwaddr end;
+ int iommu_idx;
+ void *opaque;
+ QLIST_ENTRY(IOMMUNotifier) node;
+};
+typedef struct IOMMUNotifier IOMMUNotifier;
+
+typedef struct IOMMUTLBEvent {
+ IOMMUNotifierFlag type;
+ IOMMUTLBEntry entry;
+} IOMMUTLBEvent;
+
+/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
+#define RAM_PREALLOC (1 << 0)
+
+/* RAM is mmap-ed with MAP_SHARED */
+#define RAM_SHARED (1 << 1)
+
+/* Only a portion of RAM (used_length) is actually used, and migrated.
+ * Resizing RAM while migrating can result in the migration being canceled.
+ */
+#define RAM_RESIZEABLE (1 << 2)
+
+/* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
+ * zero the page and wake waiting processes.
+ * (Set during postcopy)
+ */
+#define RAM_UF_ZEROPAGE (1 << 3)
+
+/* RAM can be migrated */
+#define RAM_MIGRATABLE (1 << 4)
+
+/* RAM is a persistent kind memory */
+#define RAM_PMEM (1 << 5)
+
+
+/*
+ * UFFDIO_WRITEPROTECT is used on this RAMBlock to
+ * support 'write-tracking' migration type.
+ * Implies ram_state->ram_wt_enabled.
+ */
+#define RAM_UF_WRITEPROTECT (1 << 6)
+
+/*
+ * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
+ * pages if applicable) is skipped: will bail out if not supported. When not
+ * set, the OS will do the reservation, if supported for the memory type.
+ */
+#define RAM_NORESERVE (1 << 7)
+
+/* RAM that isn't accessible through normal means. */
+#define RAM_PROTECTED (1 << 8)
+
+/* RAM is an mmap-ed named file */
+#define RAM_NAMED_FILE (1 << 9)
+
+/* RAM is mmap-ed read-only */
+#define RAM_READONLY (1 << 10)
+
+/* RAM FD is opened read-only */
+#define RAM_READONLY_FD (1 << 11)
+
+/* RAM can be private that has kvm guest memfd backend */
+#define RAM_GUEST_MEMFD (1 << 12)
+
+/*
+ * In RAMBlock creation functions, if MAP_SHARED is 0 in the flags parameter,
+ * the implementation may still create a shared mapping if other conditions
+ * require it. Callers who specifically want a private mapping, eg objects
+ * specified by the user, must pass RAM_PRIVATE.
+ * After RAMBlock creation, MAP_SHARED in the block's flags indicates whether
+ * the block is shared or private, and MAP_PRIVATE is omitted.
+ */
+#define RAM_PRIVATE (1 << 13)
+
+static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
+ IOMMUNotifierFlag flags,
+ hwaddr start, hwaddr end,
+ int iommu_idx)
+{
+ n->notify = fn;
+ n->notifier_flags = flags;
+ n->start = start;
+ n->end = end;
+ n->iommu_idx = iommu_idx;
+}
+
+/*
+ * Memory region callbacks
+ */
+struct MemoryRegionOps {
+ /* Read from the memory region. @addr is relative to @mr; @size is
+ * in bytes. */
+ uint64_t (*read)(void *opaque,
+ hwaddr addr,
+ unsigned size);
+ /* Write to the memory region. @addr is relative to @mr; @size is
+ * in bytes. */
+ void (*write)(void *opaque,
+ hwaddr addr,
+ uint64_t data,
+ unsigned size);
+
+ MemTxResult (*read_with_attrs)(void *opaque,
+ hwaddr addr,
+ uint64_t *data,
+ unsigned size,
+ MemTxAttrs attrs);
+ MemTxResult (*write_with_attrs)(void *opaque,
+ hwaddr addr,
+ uint64_t data,
+ unsigned size,
+ MemTxAttrs attrs);
+
+ enum device_endian endianness;
+ /* Guest-visible constraints: */
+ struct {
+ /* If nonzero, specify bounds on access sizes beyond which a machine
+ * check is thrown.
+ */
+ unsigned min_access_size;
+ unsigned max_access_size;
+ /* If true, unaligned accesses are supported. Otherwise unaligned
+ * accesses throw machine checks.
+ */
+ bool unaligned;
+ /*
+ * If present, and returns #false, the transaction is not accepted
+ * by the device (and results in machine dependent behaviour such
+ * as a machine check exception).
+ */
+ bool (*accepts)(void *opaque, hwaddr addr,
+ unsigned size, bool is_write,
+ MemTxAttrs attrs);
+ } valid;
+ /* Internal implementation constraints: */
+ struct {
+ /* If nonzero, specifies the minimum size implemented. Smaller sizes
+ * will be rounded upwards and a partial result will be returned.
+ */
+ unsigned min_access_size;
+ /* If nonzero, specifies the maximum size implemented. Larger sizes
+ * will be done as a series of accesses with smaller sizes.
+ */
+ unsigned max_access_size;
+ /* If true, unaligned accesses are supported. Otherwise all accesses
+ * are converted to (possibly multiple) naturally aligned accesses.
+ */
+ bool unaligned;
+ } impl;
+};
+
+typedef struct MemoryRegionClass {
+ /* private */
+ ObjectClass parent_class;
+} MemoryRegionClass;
+
+
+enum IOMMUMemoryRegionAttr {
+ IOMMU_ATTR_SPAPR_TCE_FD
+};
+
+/*
+ * IOMMUMemoryRegionClass:
+ *
+ * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
+ * and provide an implementation of at least the @translate method here
+ * to handle requests to the memory region. Other methods are optional.
+ *
+ * The IOMMU implementation must use the IOMMU notifier infrastructure
+ * to report whenever mappings are changed, by calling
+ * memory_region_notify_iommu() (or, if necessary, by calling
+ * memory_region_notify_iommu_one() for each registered notifier).
+ *
+ * Conceptually an IOMMU provides a mapping from input address
+ * to an output TLB entry. If the IOMMU is aware of memory transaction
+ * attributes and the output TLB entry depends on the transaction
+ * attributes, we represent this using IOMMU indexes. Each index
+ * selects a particular translation table that the IOMMU has:
+ *
+ * @attrs_to_index returns the IOMMU index for a set of transaction attributes
+ *
+ * @translate takes an input address and an IOMMU index
+ *
+ * and the mapping returned can only depend on the input address and the
+ * IOMMU index.
+ *
+ * Most IOMMUs don't care about the transaction attributes and support
+ * only a single IOMMU index. A more complex IOMMU might have one index
+ * for secure transactions and one for non-secure transactions.
+ */
+struct IOMMUMemoryRegionClass {
+ /* private: */
+ MemoryRegionClass parent_class;
+
+ /* public: */
+ /**
+ * @translate:
+ *
+ * Return a TLB entry that contains a given address.
+ *
+ * The IOMMUAccessFlags indicated via @flag are optional and may
+ * be specified as IOMMU_NONE to indicate that the caller needs
+ * the full translation information for both reads and writes. If
+ * the access flags are specified then the IOMMU implementation
+ * may use this as an optimization, to stop doing a page table
+ * walk as soon as it knows that the requested permissions are not
+ * allowed. If IOMMU_NONE is passed then the IOMMU must do the
+ * full page table walk and report the permissions in the returned
+ * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
+ * return different mappings for reads and writes.)
+ *
+ * The returned information remains valid while the caller is
+ * holding the big QEMU lock or is inside an RCU critical section;
+ * if the caller wishes to cache the mapping beyond that it must
+ * register an IOMMU notifier so it can invalidate its cached
+ * information when the IOMMU mapping changes.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ *
+ * @hwaddr: address to be translated within the memory region
+ *
+ * @flag: requested access permission
+ *
+ * @iommu_idx: IOMMU index for the translation
+ */
+ IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
+ IOMMUAccessFlags flag, int iommu_idx);
+ /**
+ * @get_min_page_size:
+ *
+ * Returns minimum supported page size in bytes.
+ *
+ * If this method is not provided then the minimum is assumed to
+ * be TARGET_PAGE_SIZE.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ */
+ uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
+ /**
+ * @notify_flag_changed:
+ *
+ * Called when IOMMU Notifier flag changes (ie when the set of
+ * events which IOMMU users are requesting notification for changes).
+ * Optional method -- need not be provided if the IOMMU does not
+ * need to know exactly which events must be notified.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ *
+ * @old_flags: events which previously needed to be notified
+ *
+ * @new_flags: events which now need to be notified
+ *
+ * Returns 0 on success, or a negative errno; in particular
+ * returns -EINVAL if the new flag bitmap is not supported by the
+ * IOMMU memory region. In case of failure, the error object
+ * must be created
+ */
+ int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
+ IOMMUNotifierFlag old_flags,
+ IOMMUNotifierFlag new_flags,
+ Error **errp);
+ /**
+ * @replay:
+ *
+ * Called to handle memory_region_iommu_replay().
+ *
+ * The default implementation of memory_region_iommu_replay() is to
+ * call the IOMMU translate method for every page in the address space
+ * with flag == IOMMU_NONE and then call the notifier if translate
+ * returns a valid mapping. If this method is implemented then it
+ * overrides the default behaviour, and must provide the full semantics
+ * of memory_region_iommu_replay(), by calling @notifier for every
+ * translation present in the IOMMU.
+ *
+ * Optional method -- an IOMMU only needs to provide this method
+ * if the default is inefficient or produces undesirable side effects.
+ *
+ * Note: this is not related to record-and-replay functionality.
+ */
+ void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
+
+ /**
+ * @get_attr:
+ *
+ * Get IOMMU misc attributes. This is an optional method that
+ * can be used to allow users of the IOMMU to get implementation-specific
+ * information. The IOMMU implements this method to handle calls
+ * by IOMMU users to memory_region_iommu_get_attr() by filling in
+ * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
+ * the IOMMU supports. If the method is unimplemented then
+ * memory_region_iommu_get_attr() will always return -EINVAL.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ *
+ * @attr: attribute being queried
+ *
+ * @data: memory to fill in with the attribute data
+ *
+ * Returns 0 on success, or a negative errno; in particular
+ * returns -EINVAL for unrecognized or unimplemented attribute types.
+ */
+ int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
+ void *data);
+
+ /**
+ * @attrs_to_index:
+ *
+ * Return the IOMMU index to use for a given set of transaction attributes.
+ *
+ * Optional method: if an IOMMU only supports a single IOMMU index then
+ * the default implementation of memory_region_iommu_attrs_to_index()
+ * will return 0.
+ *
+ * The indexes supported by an IOMMU must be contiguous, starting at 0.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ * @attrs: memory transaction attributes
+ */
+ int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
+
+ /**
+ * @num_indexes:
+ *
+ * Return the number of IOMMU indexes this IOMMU supports.
+ *
+ * Optional method: if this method is not provided, then
+ * memory_region_iommu_num_indexes() will return 1, indicating that
+ * only a single IOMMU index is supported.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ */
+ int (*num_indexes)(IOMMUMemoryRegion *iommu);
+};
+
+typedef struct RamDiscardListener RamDiscardListener;
+typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+
+struct RamDiscardListener {
+ /*
+ * @notify_populate:
+ *
+ * Notification that previously discarded memory is about to get populated.
+ * Listeners are able to object. If any listener objects, already
+ * successfully notified listeners are notified about a discard again.
+ *
+ * @rdl: the #RamDiscardListener getting notified
+ * @section: the #MemoryRegionSection to get populated. The section
+ * is aligned within the memory region to the minimum granularity
+ * unless it would exceed the registered section.
+ *
+ * Returns 0 on success. If the notification is rejected by the listener,
+ * an error is returned.
+ */
+ NotifyRamPopulate notify_populate;
+
+ /*
+ * @notify_discard:
+ *
+ * Notification that previously populated memory was discarded successfully
+ * and listeners should drop all references to such memory and prevent
+ * new population (e.g., unmap).
+ *
+ * @rdl: the #RamDiscardListener getting notified
+ * @section: the #MemoryRegionSection to get populated. The section
+ * is aligned within the memory region to the minimum granularity
+ * unless it would exceed the registered section.
+ */
+ NotifyRamDiscard notify_discard;
+
+ /*
+ * @double_discard_supported:
+ *
+ * The listener suppors getting @notify_discard notifications that span
+ * already discarded parts.
+ */
+ bool double_discard_supported;
+
+ MemoryRegionSection *section;
+ QLIST_ENTRY(RamDiscardListener) next;
+};
+
+static inline void ram_discard_listener_init(RamDiscardListener *rdl,
+ NotifyRamPopulate populate_fn,
+ NotifyRamDiscard discard_fn,
+ bool double_discard_supported)
+{
+ rdl->notify_populate = populate_fn;
+ rdl->notify_discard = discard_fn;
+ rdl->double_discard_supported = double_discard_supported;
+}
+
+/**
+ * typedef ReplayRamDiscardState:
+ *
+ * The callback handler for #RamDiscardManagerClass.replay_populated/
+ * #RamDiscardManagerClass.replay_discarded to invoke on populated/discarded
+ * parts.
+ *
+ * @section: the #MemoryRegionSection of populated/discarded part
+ * @opaque: pointer to forward to the callback
+ *
+ * Returns 0 on success, or a negative error if failed.
+ */
+typedef int (*ReplayRamDiscardState)(MemoryRegionSection *section,
+ void *opaque);
+
+/*
+ * RamDiscardManagerClass:
+ *
+ * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
+ * regions are currently populated to be used/accessed by the VM, notifying
+ * after parts were discarded (freeing up memory) and before parts will be
+ * populated (consuming memory), to be used/accessed by the VM.
+ *
+ * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
+ * #MemoryRegion isn't mapped into an address space yet (either directly
+ * or via an alias); it cannot change while the #MemoryRegion is
+ * mapped into an address space.
+ *
+ * The #RamDiscardManager is intended to be used by technologies that are
+ * incompatible with discarding of RAM (e.g., VFIO, which may pin all
+ * memory inside a #MemoryRegion), and require proper coordination to only
+ * map the currently populated parts, to hinder parts that are expected to
+ * remain discarded from silently getting populated and consuming memory.
+ * Technologies that support discarding of RAM don't have to bother and can
+ * simply map the whole #MemoryRegion.
+ *
+ * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
+ * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
+ * Logically unplugging memory consists of discarding RAM. The VM agreed to not
+ * access unplugged (discarded) memory - especially via DMA. virtio-mem will
+ * properly coordinate with listeners before memory is plugged (populated),
+ * and after memory is unplugged (discarded).
+ *
+ * Listeners are called in multiples of the minimum granularity (unless it
+ * would exceed the registered range) and changes are aligned to the minimum
+ * granularity within the #MemoryRegion. Listeners have to prepare for memory
+ * becoming discarded in a different granularity than it was populated and the
+ * other way around.
+ */
+struct RamDiscardManagerClass {
+ /* private */
+ InterfaceClass parent_class;
+
+ /* public */
+
+ /**
+ * @get_min_granularity:
+ *
+ * Get the minimum granularity in which listeners will get notified
+ * about changes within the #MemoryRegion via the #RamDiscardManager.
+ *
+ * @rdm: the #RamDiscardManager
+ * @mr: the #MemoryRegion
+ *
+ * Returns the minimum granularity.
+ */
+ uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
+ const MemoryRegion *mr);
+
+ /**
+ * @is_populated:
+ *
+ * Check whether the given #MemoryRegionSection is completely populated
+ * (i.e., no parts are currently discarded) via the #RamDiscardManager.
+ * There are no alignment requirements.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ *
+ * Returns whether the given range is completely populated.
+ */
+ bool (*is_populated)(const RamDiscardManager *rdm,
+ const MemoryRegionSection *section);
+
+ /**
+ * @replay_populated:
+ *
+ * Call the #ReplayRamDiscardState callback for all populated parts within
+ * the #MemoryRegionSection via the #RamDiscardManager.
+ *
+ * In case any call fails, no further calls are made.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ * @replay_fn: the #ReplayRamDiscardState callback
+ * @opaque: pointer to forward to the callback
+ *
+ * Returns 0 on success, or a negative error if any notification failed.
+ */
+ int (*replay_populated)(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscardState replay_fn, void *opaque);
+
+ /**
+ * @replay_discarded:
+ *
+ * Call the #ReplayRamDiscardState callback for all discarded parts within
+ * the #MemoryRegionSection via the #RamDiscardManager.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ * @replay_fn: the #ReplayRamDiscardState callback
+ * @opaque: pointer to forward to the callback
+ *
+ * Returns 0 on success, or a negative error if any notification failed.
+ */
+ int (*replay_discarded)(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscardState replay_fn, void *opaque);
+
+ /**
+ * @register_listener:
+ *
+ * Register a #RamDiscardListener for the given #MemoryRegionSection and
+ * immediately notify the #RamDiscardListener about all populated parts
+ * within the #MemoryRegionSection via the #RamDiscardManager.
+ *
+ * In case any notification fails, no further notifications are triggered
+ * and an error is logged.
+ *
+ * @rdm: the #RamDiscardManager
+ * @rdl: the #RamDiscardListener
+ * @section: the #MemoryRegionSection
+ */
+ void (*register_listener)(RamDiscardManager *rdm,
+ RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+
+ /**
+ * @unregister_listener:
+ *
+ * Unregister a previously registered #RamDiscardListener via the
+ * #RamDiscardManager after notifying the #RamDiscardListener about all
+ * populated parts becoming unpopulated within the registered
+ * #MemoryRegionSection.
+ *
+ * @rdm: the #RamDiscardManager
+ * @rdl: the #RamDiscardListener
+ */
+ void (*unregister_listener)(RamDiscardManager *rdm,
+ RamDiscardListener *rdl);
+};
+
+uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
+ const MemoryRegion *mr);
+
+bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
+ const MemoryRegionSection *section);
+
+/**
+ * ram_discard_manager_replay_populated:
+ *
+ * A wrapper to call the #RamDiscardManagerClass.replay_populated callback
+ * of the #RamDiscardManager.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ * @replay_fn: the #ReplayRamDiscardState callback
+ * @opaque: pointer to forward to the callback
+ *
+ * Returns 0 on success, or a negative error if any notification failed.
+ */
+int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscardState replay_fn,
+ void *opaque);
+
+/**
+ * ram_discard_manager_replay_discarded:
+ *
+ * A wrapper to call the #RamDiscardManagerClass.replay_discarded callback
+ * of the #RamDiscardManager.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ * @replay_fn: the #ReplayRamDiscardState callback
+ * @opaque: pointer to forward to the callback
+ *
+ * Returns 0 on success, or a negative error if any notification failed.
+ */
+int ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscardState replay_fn,
+ void *opaque);
+
+void ram_discard_manager_register_listener(RamDiscardManager *rdm,
+ RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+
+void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
+ RamDiscardListener *rdl);
+
+/**
+ * memory_translate_iotlb: Extract addresses from a TLB entry.
+ * Called with rcu_read_lock held.
+ *
+ * @iotlb: pointer to an #IOMMUTLBEntry
+ * @xlat_p: return the offset of the entry from the start of the returned
+ * MemoryRegion.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: On success, return the MemoryRegion containing the @iotlb translated
+ * addr. The MemoryRegion must not be accessed after rcu_read_unlock.
+ * On failure, return NULL, setting @errp with error.
+ */
+MemoryRegion *memory_translate_iotlb(IOMMUTLBEntry *iotlb, hwaddr *xlat_p,
+ Error **errp);
+
+typedef struct CoalescedMemoryRange CoalescedMemoryRange;
+typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
+
+/** MemoryRegion:
+ *
+ * A struct representing a memory region.
+ */
+struct MemoryRegion {
+ Object parent_obj;
+
+ /* private: */
+
+ /* The following fields should fit in a cache line */
+ bool romd_mode;
+ bool ram;
+ bool subpage;
+ bool readonly; /* For RAM regions */
+ bool nonvolatile;
+ bool rom_device;
+ bool flush_coalesced_mmio;
+ bool unmergeable;
+ uint8_t dirty_log_mask;
+ bool is_iommu;
+ RAMBlock *ram_block;
+ Object *owner;
+ /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
+ DeviceState *dev;
+
+ const MemoryRegionOps *ops;
+ void *opaque;
+ MemoryRegion *container;
+ int mapped_via_alias; /* Mapped via an alias, container might be NULL */
+ Int128 size;
+ hwaddr addr;
+ void (*destructor)(MemoryRegion *mr);
+ uint64_t align;
+ bool terminates;
+ bool ram_device;
+ bool enabled;
+ uint8_t vga_logging_count;
+ MemoryRegion *alias;
+ hwaddr alias_offset;
+ int32_t priority;
+ QTAILQ_HEAD(, MemoryRegion) subregions;
+ QTAILQ_ENTRY(MemoryRegion) subregions_link;
+ QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
+ const char *name;
+ unsigned ioeventfd_nb;
+ MemoryRegionIoeventfd *ioeventfds;
+ RamDiscardManager *rdm; /* Only for RAM */
+
+ /* For devices designed to perform re-entrant IO into their own IO MRs */
+ bool disable_reentrancy_guard;
+};
+
+struct IOMMUMemoryRegion {
+ MemoryRegion parent_obj;
+
+ QLIST_HEAD(, IOMMUNotifier) iommu_notify;
+ IOMMUNotifierFlag iommu_notify_flags;
+};
+
+#define IOMMU_NOTIFIER_FOREACH(n, mr) \
+ QLIST_FOREACH((n), &(mr)->iommu_notify, node)
+
+#define MEMORY_LISTENER_PRIORITY_MIN 0
+#define MEMORY_LISTENER_PRIORITY_ACCEL 10
+#define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10
+
+/**
+ * struct MemoryListener: callbacks structure for updates to the physical memory map
+ *
+ * Allows a component to adjust to changes in the guest-visible memory map.
+ * Use with memory_listener_register() and memory_listener_unregister().
+ */
+struct MemoryListener {
+ /**
+ * @begin:
+ *
+ * Called at the beginning of an address space update transaction.
+ * Followed by calls to #MemoryListener.region_add(),
+ * #MemoryListener.region_del(), #MemoryListener.region_nop(),
+ * #MemoryListener.log_start() and #MemoryListener.log_stop() in
+ * increasing address order.
+ *
+ * @listener: The #MemoryListener.
+ */
+ void (*begin)(MemoryListener *listener);
+
+ /**
+ * @commit:
+ *
+ * Called at the end of an address space update transaction,
+ * after the last call to #MemoryListener.region_add(),
+ * #MemoryListener.region_del() or #MemoryListener.region_nop(),
+ * #MemoryListener.log_start() and #MemoryListener.log_stop().
+ *
+ * @listener: The #MemoryListener.
+ */
+ void (*commit)(MemoryListener *listener);
+
+ /**
+ * @region_add:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that is new in this address space
+ * space since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ */
+ void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
+
+ /**
+ * @region_del:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has disappeared in the address
+ * space since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The old #MemoryRegionSection.
+ */
+ void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
+
+ /**
+ * @region_nop:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that is in the same place in the address
+ * space as in the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ */
+ void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
+
+ /**
+ * @log_start:
+ *
+ * Called during an address space update transaction, after
+ * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
+ * #MemoryListener.region_nop(), if dirty memory logging clients have
+ * become active since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ * @old: A bitmap of dirty memory logging clients that were active in
+ * the previous transaction.
+ * @new: A bitmap of dirty memory logging clients that are active in
+ * the current transaction.
+ */
+ void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
+ int old_val, int new_val);
+
+ /**
+ * @log_stop:
+ *
+ * Called during an address space update transaction, after
+ * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
+ * #MemoryListener.region_nop() and possibly after
+ * #MemoryListener.log_start(), if dirty memory logging clients have
+ * become inactive since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ * @old: A bitmap of dirty memory logging clients that were active in
+ * the previous transaction.
+ * @new: A bitmap of dirty memory logging clients that are active in
+ * the current transaction.
+ */
+ void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
+ int old_val, int new_val);
+
+ /**
+ * @log_sync:
+ *
+ * Called by memory_region_snapshot_and_clear_dirty() and
+ * memory_global_dirty_log_sync(), before accessing QEMU's "official"
+ * copy of the dirty memory bitmap for a #MemoryRegionSection.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ */
+ void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
+
+ /**
+ * @log_sync_global:
+ *
+ * This is the global version of @log_sync when the listener does
+ * not have a way to synchronize the log with finer granularity.
+ * When the listener registers with @log_sync_global defined, then
+ * its @log_sync must be NULL. Vice versa.
+ *
+ * @listener: The #MemoryListener.
+ * @last_stage: The last stage to synchronize the log during migration.
+ * The caller should guarantee that the synchronization with true for
+ * @last_stage is triggered for once after all VCPUs have been stopped.
+ */
+ void (*log_sync_global)(MemoryListener *listener, bool last_stage);
+
+ /**
+ * @log_clear:
+ *
+ * Called before reading the dirty memory bitmap for a
+ * #MemoryRegionSection.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ */
+ void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
+
+ /**
+ * @log_global_start:
+ *
+ * Called by memory_global_dirty_log_start(), which
+ * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
+ * the address space. #MemoryListener.log_global_start() is also
+ * called when a #MemoryListener is added, if global dirty logging is
+ * active at that time.
+ *
+ * @listener: The #MemoryListener.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+ bool (*log_global_start)(MemoryListener *listener, Error **errp);
+
+ /**
+ * @log_global_stop:
+ *
+ * Called by memory_global_dirty_log_stop(), which
+ * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
+ * the address space.
+ *
+ * @listener: The #MemoryListener.
+ */
+ void (*log_global_stop)(MemoryListener *listener);
+
+ /**
+ * @log_global_after_sync:
+ *
+ * Called after reading the dirty memory bitmap
+ * for any #MemoryRegionSection.
+ *
+ * @listener: The #MemoryListener.
+ */
+ void (*log_global_after_sync)(MemoryListener *listener);
+
+ /**
+ * @eventfd_add:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has had a new ioeventfd
+ * registration since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ * @match_data: The @match_data parameter for the new ioeventfd.
+ * @data: The @data parameter for the new ioeventfd.
+ * @e: The #EventNotifier parameter for the new ioeventfd.
+ */
+ void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
+ bool match_data, uint64_t data, EventNotifier *e);
+
+ /**
+ * @eventfd_del:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has dropped an ioeventfd
+ * registration since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ * @match_data: The @match_data parameter for the dropped ioeventfd.
+ * @data: The @data parameter for the dropped ioeventfd.
+ * @e: The #EventNotifier parameter for the dropped ioeventfd.
+ */
+ void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
+ bool match_data, uint64_t data, EventNotifier *e);
+
+ /**
+ * @coalesced_io_add:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has had a new coalesced
+ * MMIO range registration since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ * @addr: The starting address for the coalesced MMIO range.
+ * @len: The length of the coalesced MMIO range.
+ */
+ void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
+ hwaddr addr, hwaddr len);
+
+ /**
+ * @coalesced_io_del:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has dropped a coalesced
+ * MMIO range since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ * @addr: The starting address for the coalesced MMIO range.
+ * @len: The length of the coalesced MMIO range.
+ */
+ void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
+ hwaddr addr, hwaddr len);
+ /**
+ * @priority:
+ *
+ * Govern the order in which memory listeners are invoked. Lower priorities
+ * are invoked earlier for "add" or "start" callbacks, and later for "delete"
+ * or "stop" callbacks.
+ */
+ unsigned priority;
+
+ /**
+ * @name:
+ *
+ * Name of the listener. It can be used in contexts where we'd like to
+ * identify one memory listener with the rest.
+ */
+ const char *name;
+
+ /* private: */
+ AddressSpace *address_space;
+ QTAILQ_ENTRY(MemoryListener) link;
+ QTAILQ_ENTRY(MemoryListener) link_as;
+};
+
+typedef struct AddressSpaceMapClient {
+ QEMUBH *bh;
+ QLIST_ENTRY(AddressSpaceMapClient) link;
+} AddressSpaceMapClient;
+
+#define DEFAULT_MAX_BOUNCE_BUFFER_SIZE (4096)
+
+/**
+ * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
+ */
+struct AddressSpace {
+ /* private: */
+ struct rcu_head rcu;
+ char *name;
+ MemoryRegion *root;
+
+ /* Accessed via RCU. */
+ struct FlatView *current_map;
+
+ int ioeventfd_nb;
+ int ioeventfd_notifiers;
+ struct MemoryRegionIoeventfd *ioeventfds;
+ QTAILQ_HEAD(, MemoryListener) listeners;
+ QTAILQ_ENTRY(AddressSpace) address_spaces_link;
+
+ /*
+ * Maximum DMA bounce buffer size used for indirect memory map requests.
+ * This limits the total size of bounce buffer allocations made for
+ * DMA requests to indirect memory regions within this AddressSpace. DMA
+ * requests that exceed the limit (e.g. due to overly large requested size
+ * or concurrent DMA requests having claimed too much buffer space) will be
+ * rejected and left to the caller to handle.
+ */
+ size_t max_bounce_buffer_size;
+ /* Total size of bounce buffers currently allocated, atomically accessed */
+ size_t bounce_buffer_size;
+ /* List of callbacks to invoke when buffers free up */
+ QemuMutex map_client_list_lock;
+ QLIST_HEAD(, AddressSpaceMapClient) map_client_list;
+};
+
+typedef struct AddressSpaceDispatch AddressSpaceDispatch;
+typedef struct FlatRange FlatRange;
+
+/* Flattened global view of current active memory hierarchy. Kept in sorted
+ * order.
+ */
+struct FlatView {
+ struct rcu_head rcu;
+ unsigned ref;
+ FlatRange *ranges;
+ unsigned nr;
+ unsigned nr_allocated;
+ struct AddressSpaceDispatch *dispatch;
+ MemoryRegion *root;
+};
+
+static inline FlatView *address_space_to_flatview(AddressSpace *as)
+{
+ return qatomic_rcu_read(&as->current_map);
+}
+
+/**
+ * typedef flatview_cb: callback for flatview_for_each_range()
+ *
+ * @start: start address of the range within the FlatView
+ * @len: length of the range in bytes
+ * @mr: MemoryRegion covering this range
+ * @offset_in_region: offset of the first byte of the range within @mr
+ * @opaque: data pointer passed to flatview_for_each_range()
+ *
+ * Returns: true to stop the iteration, false to keep going.
+ */
+typedef bool (*flatview_cb)(Int128 start,
+ Int128 len,
+ const MemoryRegion *mr,
+ hwaddr offset_in_region,
+ void *opaque);
+
+/**
+ * flatview_for_each_range: Iterate through a FlatView
+ * @fv: the FlatView to iterate through
+ * @cb: function to call for each range
+ * @opaque: opaque data pointer to pass to @cb
+ *
+ * A FlatView is made up of a list of non-overlapping ranges, each of
+ * which is a slice of a MemoryRegion. This function iterates through
+ * each range in @fv, calling @cb. The callback function can terminate
+ * iteration early by returning 'true'.
+ */
+void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
+
+static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
+ MemoryRegionSection *b)
+{
+ return a->mr == b->mr &&
+ a->fv == b->fv &&
+ a->offset_within_region == b->offset_within_region &&
+ a->offset_within_address_space == b->offset_within_address_space &&
+ int128_eq(a->size, b->size) &&
+ a->readonly == b->readonly &&
+ a->nonvolatile == b->nonvolatile;
+}
+
+/**
+ * memory_region_section_new_copy: Copy a memory region section
+ *
+ * Allocate memory for a new copy, copy the memory region section, and
+ * properly take a reference on all relevant members.
+ *
+ * @s: the #MemoryRegionSection to copy
+ */
+MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
+
+/**
+ * memory_region_section_free_copy: Free a copied memory region section
+ *
+ * Free a copy of a memory section created via memory_region_section_new_copy().
+ * properly dropping references on all relevant members.
+ *
+ * @s: the #MemoryRegionSection to copy
+ */
+void memory_region_section_free_copy(MemoryRegionSection *s);
+
+/**
+ * memory_region_section_intersect_range: Adjust the memory section to cover
+ * the intersection with the given range.
+ *
+ * @s: the #MemoryRegionSection to be adjusted
+ * @offset: the offset of the given range in the memory region
+ * @size: the size of the given range
+ *
+ * Returns false if the intersection is empty, otherwise returns true.
+ */
+static inline bool memory_region_section_intersect_range(MemoryRegionSection *s,
+ uint64_t offset,
+ uint64_t size)
+{
+ uint64_t start = MAX(s->offset_within_region, offset);
+ Int128 end = int128_min(int128_add(int128_make64(s->offset_within_region),
+ s->size),
+ int128_add(int128_make64(offset),
+ int128_make64(size)));
+
+ if (int128_le(end, int128_make64(start))) {
+ return false;
+ }
+
+ s->offset_within_address_space += start - s->offset_within_region;
+ s->offset_within_region = start;
+ s->size = int128_sub(end, int128_make64(start));
+ return true;
+}
+
+/**
+ * memory_region_init: Initialize a memory region
+ *
+ * The region typically acts as a container for other memory regions. Use
+ * memory_region_add_subregion() to add subregions.
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @owner: the object that tracks the region's reference count
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region; any subregions beyond this size will be clipped
+ */
+void memory_region_init(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_ref: Add 1 to a memory region's reference count
+ *
+ * Whenever memory regions are accessed outside the BQL, they need to be
+ * preserved against hot-unplug. MemoryRegions actually do not have their
+ * own reference count; they piggyback on a QOM object, their "owner".
+ * This function adds a reference to the owner.
+ *
+ * All MemoryRegions must have an owner if they can disappear, even if the
+ * device they belong to operates exclusively under the BQL. This is because
+ * the region could be returned at any time by memory_region_find, and this
+ * is usually under guest control.
+ *
+ * @mr: the #MemoryRegion
+ */
+void memory_region_ref(MemoryRegion *mr);
+
+/**
+ * memory_region_unref: Remove 1 to a memory region's reference count
+ *
+ * Whenever memory regions are accessed outside the BQL, they need to be
+ * preserved against hot-unplug. MemoryRegions actually do not have their
+ * own reference count; they piggyback on a QOM object, their "owner".
+ * This function removes a reference to the owner and possibly destroys it.
+ *
+ * @mr: the #MemoryRegion
+ */
+void memory_region_unref(MemoryRegion *mr);
+
+/**
+ * memory_region_init_io: Initialize an I/O memory region.
+ *
+ * Accesses into the region will cause the callbacks in @ops to be called.
+ * if @size is nonzero, subregions will be clipped to @size.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @ops: a structure containing read and write callbacks to be used when
+ * I/O is performed on the region.
+ * @opaque: passed to the read and write callbacks of the @ops structure.
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_io(MemoryRegion *mr,
+ Object *owner,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
+ * into the region will modify memory
+ * directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+/**
+ * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region.
+ * Accesses into the region will
+ * modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE,
+ * RAM_GUEST_MEMFD.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ uint32_t ram_flags,
+ Error **errp);
+
+/**
+ * memory_region_init_resizeable_ram: Initialize memory region with resizable
+ * RAM. Accesses into the region will
+ * modify memory directly. Only an initial
+ * portion of this RAM is actually used.
+ * Changing the size while migrating
+ * can result in the migration being
+ * canceled.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: used size of the region.
+ * @max_size: max size of the region.
+ * @resized: callback to notify owner about used size change.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool memory_region_init_resizeable_ram(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ uint64_t max_size,
+ void (*resized)(const char*,
+ uint64_t length,
+ void *host),
+ Error **errp);
+#ifdef CONFIG_POSIX
+
+/**
+ * memory_region_init_ram_from_file: Initialize RAM memory region with a
+ * mmap-ed backend.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @align: alignment of the region base address; if 0, the default alignment
+ * (getpagesize()) will be used.
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
+ * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
+ * RAM_READONLY_FD, RAM_GUEST_MEMFD
+ * @path: the path in which to allocate the RAM.
+ * @offset: offset within the file referenced by path
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool memory_region_init_ram_from_file(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ uint64_t align,
+ uint32_t ram_flags,
+ const char *path,
+ ram_addr_t offset,
+ Error **errp);
+
+/**
+ * memory_region_init_ram_from_fd: Initialize RAM memory region with a
+ * mmap-ed backend.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: the name of the region.
+ * @size: size of the region.
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
+ * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
+ * RAM_READONLY_FD, RAM_GUEST_MEMFD
+ * @fd: the fd to mmap.
+ * @offset: offset within the file referenced by fd
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool memory_region_init_ram_from_fd(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ uint32_t ram_flags,
+ int fd,
+ ram_addr_t offset,
+ Error **errp);
+#endif
+
+/**
+ * memory_region_init_ram_ptr: Initialize RAM memory region from a
+ * user-provided pointer. Accesses into the
+ * region will modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @ptr: memory to be mapped; must contain at least @size bytes.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ */
+void memory_region_init_ram_ptr(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ void *ptr);
+
+/**
+ * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
+ * a user-provided pointer.
+ *
+ * A RAM device represents a mapping to a physical device, such as to a PCI
+ * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
+ * into the VM address space and access to the region will modify memory
+ * directly. However, the memory region should not be included in a memory
+ * dump (device may not be enabled/mapped at the time of the dump), and
+ * operations incompatible with manipulating MMIO should be avoided. Replaces
+ * skip_dump flag.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: the name of the region.
+ * @size: size of the region.
+ * @ptr: memory to be mapped; must contain at least @size bytes.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ * (For RAM device memory regions, migrating the contents rarely makes sense.)
+ */
+void memory_region_init_ram_device_ptr(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ void *ptr);
+
+/**
+ * memory_region_init_alias: Initialize a memory region that aliases all or a
+ * part of another memory region.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: used for debugging; not visible to the user or ABI
+ * @orig: the region to be referenced; @mr will be equivalent to
+ * @orig between @offset and @offset + @size - 1.
+ * @offset: start of the section in @orig to be referenced.
+ * @size: size of the region.
+ */
+void memory_region_init_alias(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ MemoryRegion *orig,
+ hwaddr offset,
+ uint64_t size);
+
+/**
+ * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
+ *
+ * This has the same effect as calling memory_region_init_ram_nomigrate()
+ * and then marking the resulting region read-only with
+ * memory_region_set_readonly().
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM side of the memory region to be migrated; that is the responsibility
+ * of the caller.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+/**
+ * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
+ * Writes are handled via callbacks.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM side of the memory region to be migrated; that is the responsibility
+ * of the caller.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @ops: callbacks for write access handling (must not be NULL).
+ * @opaque: passed to the read and write callbacks of the @ops structure.
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+/**
+ * memory_region_init_iommu: Initialize a memory region of a custom type
+ * that translates addresses
+ *
+ * An IOMMU region translates addresses and forwards accesses to a target
+ * memory region.
+ *
+ * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
+ * @_iommu_mr should be a pointer to enough memory for an instance of
+ * that subclass, @instance_size is the size of that subclass, and
+ * @mrtypename is its name. This function will initialize @_iommu_mr as an
+ * instance of the subclass, and its methods will then be called to handle
+ * accesses to the memory region. See the documentation of
+ * #IOMMUMemoryRegionClass for further details.
+ *
+ * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
+ * @instance_size: the IOMMUMemoryRegion subclass instance size
+ * @mrtypename: the type name of the #IOMMUMemoryRegion
+ * @owner: the object that tracks the region's reference count
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_iommu(void *_iommu_mr,
+ size_t instance_size,
+ const char *mrtypename,
+ Object *owner,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_init_ram - Initialize RAM memory region. Accesses into the
+ * region will modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @owner: the object that tracks the region's reference count (must be
+ * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
+ * @name: name of the memory region
+ * @size: size of the region in bytes
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * This function allocates RAM for a board model or device, and
+ * arranges for it to be migrated (by calling vmstate_register_ram()
+ * if @owner is a DeviceState, or vmstate_register_ram_global() if
+ * @owner is NULL).
+ *
+ * TODO: Currently we restrict @owner to being either NULL (for
+ * global RAM regions with no owner) or devices, so that we can
+ * give the RAM block a unique name for migration purposes.
+ * We should lift this restriction and allow arbitrary Objects.
+ * If you pass a non-NULL non-device @owner then we will assert.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool memory_region_init_ram(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+bool memory_region_init_ram_guest_memfd(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+/**
+ * memory_region_init_rom: Initialize a ROM memory region.
+ *
+ * This has the same effect as calling memory_region_init_ram()
+ * and then marking the resulting region read-only with
+ * memory_region_set_readonly(). This includes arranging for the
+ * contents to be migrated.
+ *
+ * TODO: Currently we restrict @owner to being either NULL (for
+ * global RAM regions with no owner) or devices, so that we can
+ * give the RAM block a unique name for migration purposes.
+ * We should lift this restriction and allow arbitrary Objects.
+ * If you pass a non-NULL non-device @owner then we will assert.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool memory_region_init_rom(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+/**
+ * memory_region_init_rom_device: Initialize a ROM memory region.
+ * Writes are handled via callbacks.
+ *
+ * This function initializes a memory region backed by RAM for reads
+ * and callbacks for writes, and arranges for the RAM backing to
+ * be migrated (by calling vmstate_register_ram()
+ * if @owner is a DeviceState, or vmstate_register_ram_global() if
+ * @owner is NULL).
+ *
+ * TODO: Currently we restrict @owner to being either NULL (for
+ * global RAM regions with no owner) or devices, so that we can
+ * give the RAM block a unique name for migration purposes.
+ * We should lift this restriction and allow arbitrary Objects.
+ * If you pass a non-NULL non-device @owner then we will assert.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @ops: callbacks for write access handling (must not be NULL).
+ * @opaque: passed to the read and write callbacks of the @ops structure.
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool memory_region_init_rom_device(MemoryRegion *mr,
+ Object *owner,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+
+/**
+ * memory_region_owner: get a memory region's owner.
+ *
+ * @mr: the memory region being queried.
+ */
+Object *memory_region_owner(MemoryRegion *mr);
+
+/**
+ * memory_region_size: get a memory region's size.
+ *
+ * @mr: the memory region being queried.
+ */
+uint64_t memory_region_size(MemoryRegion *mr);
+
+/**
+ * memory_region_is_ram: check whether a memory region is random access
+ *
+ * Returns %true if a memory region is random access.
+ *
+ * @mr: the memory region being queried
+ */
+static inline bool memory_region_is_ram(MemoryRegion *mr)
+{
+ return mr->ram;
+}
+
+/**
+ * memory_region_is_ram_device: check whether a memory region is a ram device
+ *
+ * Returns %true if a memory region is a device backed ram region
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_ram_device(MemoryRegion *mr);
+
+/**
+ * memory_region_is_romd: check whether a memory region is in ROMD mode
+ *
+ * Returns %true if a memory region is a ROM device and currently set to allow
+ * direct reads.
+ *
+ * @mr: the memory region being queried
+ */
+static inline bool memory_region_is_romd(MemoryRegion *mr)
+{
+ return mr->rom_device && mr->romd_mode;
+}
+
+/**
+ * memory_region_is_protected: check whether a memory region is protected
+ *
+ * Returns %true if a memory region is protected RAM and cannot be accessed
+ * via standard mechanisms, e.g. DMA.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_protected(MemoryRegion *mr);
+
+/**
+ * memory_region_has_guest_memfd: check whether a memory region has guest_memfd
+ * associated
+ *
+ * Returns %true if a memory region's ram_block has valid guest_memfd assigned.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_has_guest_memfd(MemoryRegion *mr);
+
+/**
+ * memory_region_get_iommu: check whether a memory region is an iommu
+ *
+ * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
+ * otherwise NULL.
+ *
+ * @mr: the memory region being queried
+ */
+static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
+{
+ if (mr->alias) {
+ return memory_region_get_iommu(mr->alias);
+ }
+ if (mr->is_iommu) {
+ return (IOMMUMemoryRegion *) mr;
+ }
+ return NULL;
+}
+
+/**
+ * memory_region_get_iommu_class_nocheck: returns iommu memory region class
+ * if an iommu or NULL if not
+ *
+ * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
+ * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
+ *
+ * @iommu_mr: the memory region being queried
+ */
+static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
+ IOMMUMemoryRegion *iommu_mr)
+{
+ return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
+}
+
+#define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
+
+/**
+ * memory_region_iommu_get_min_page_size: get minimum supported page size
+ * for an iommu
+ *
+ * Returns minimum supported page size for an iommu.
+ *
+ * @iommu_mr: the memory region being queried
+ */
+uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
+
+/**
+ * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
+ *
+ * Note: for any IOMMU implementation, an in-place mapping change
+ * should be notified with an UNMAP followed by a MAP.
+ *
+ * @iommu_mr: the memory region that was changed
+ * @iommu_idx: the IOMMU index for the translation table which has changed
+ * @event: TLB event with the new entry in the IOMMU translation table.
+ * The entry replaces all old entries for the same virtual I/O address
+ * range.
+ */
+void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
+ int iommu_idx,
+ const IOMMUTLBEvent event);
+
+/**
+ * memory_region_notify_iommu_one: notify a change in an IOMMU translation
+ * entry to a single notifier
+ *
+ * This works just like memory_region_notify_iommu(), but it only
+ * notifies a specific notifier, not all of them.
+ *
+ * @notifier: the notifier to be notified
+ * @event: TLB event with the new entry in the IOMMU translation table.
+ * The entry replaces all old entries for the same virtual I/O address
+ * range.
+ */
+void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
+ const IOMMUTLBEvent *event);
+
+/**
+ * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
+ * translation that covers the
+ * range of a notifier
+ *
+ * @notifier: the notifier to be notified
+ */
+void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
+
+
+/**
+ * memory_region_register_iommu_notifier: register a notifier for changes to
+ * IOMMU translation entries.
+ *
+ * Returns 0 on success, or a negative errno otherwise. In particular,
+ * -EINVAL indicates that at least one of the attributes of the notifier
+ * is not supported (flag/range) by the IOMMU memory region. In case of error
+ * the error object must be created.
+ *
+ * @mr: the memory region to observe
+ * @n: the IOMMUNotifier to be added; the notify callback receives a
+ * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
+ * ceases to be valid on exit from the notifier.
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+int memory_region_register_iommu_notifier(MemoryRegion *mr,
+ IOMMUNotifier *n, Error **errp);
+
+/**
+ * memory_region_iommu_replay: replay existing IOMMU translations to
+ * a notifier with the minimum page granularity returned by
+ * mr->iommu_ops->get_page_size().
+ *
+ * Note: this is not related to record-and-replay functionality.
+ *
+ * @iommu_mr: the memory region to observe
+ * @n: the notifier to which to replay iommu mappings
+ */
+void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
+
+/**
+ * memory_region_unregister_iommu_notifier: unregister a notifier for
+ * changes to IOMMU translation entries.
+ *
+ * @mr: the memory region which was observed and for which notify_stopped()
+ * needs to be called
+ * @n: the notifier to be removed.
+ */
+void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
+ IOMMUNotifier *n);
+
+/**
+ * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
+ * defined on the IOMMU.
+ *
+ * Returns 0 on success, or a negative errno otherwise. In particular,
+ * -EINVAL indicates that the IOMMU does not support the requested
+ * attribute.
+ *
+ * @iommu_mr: the memory region
+ * @attr: the requested attribute
+ * @data: a pointer to the requested attribute data
+ */
+int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
+ enum IOMMUMemoryRegionAttr attr,
+ void *data);
+
+/**
+ * memory_region_iommu_attrs_to_index: return the IOMMU index to
+ * use for translations with the given memory transaction attributes.
+ *
+ * @iommu_mr: the memory region
+ * @attrs: the memory transaction attributes
+ */
+int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
+ MemTxAttrs attrs);
+
+/**
+ * memory_region_iommu_num_indexes: return the total number of IOMMU
+ * indexes that this IOMMU supports.
+ *
+ * @iommu_mr: the memory region
+ */
+int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
+
+/**
+ * memory_region_name: get a memory region's name
+ *
+ * Returns the string that was used to initialize the memory region.
+ *
+ * @mr: the memory region being queried
+ */
+const char *memory_region_name(const MemoryRegion *mr);
+
+/**
+ * memory_region_is_logging: return whether a memory region is logging writes
+ *
+ * Returns %true if the memory region is logging writes for the given client
+ *
+ * @mr: the memory region being queried
+ * @client: the client being queried
+ */
+bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
+
+/**
+ * memory_region_get_dirty_log_mask: return the clients for which a
+ * memory region is logging writes.
+ *
+ * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
+ * are the bit indices.
+ *
+ * @mr: the memory region being queried
+ */
+uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
+
+/**
+ * memory_region_is_rom: check whether a memory region is ROM
+ *
+ * Returns %true if a memory region is read-only memory.
+ *
+ * @mr: the memory region being queried
+ */
+static inline bool memory_region_is_rom(MemoryRegion *mr)
+{
+ return mr->ram && mr->readonly;
+}
+
+/**
+ * memory_region_is_nonvolatile: check whether a memory region is non-volatile
+ *
+ * Returns %true is a memory region is non-volatile memory.
+ *
+ * @mr: the memory region being queried
+ */
+static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
+{
+ return mr->nonvolatile;
+}
+
+/**
+ * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
+ *
+ * Returns a file descriptor backing a file-based RAM memory region,
+ * or -1 if the region is not a file-based RAM memory region.
+ *
+ * @mr: the RAM or alias memory region being queried.
+ */
+int memory_region_get_fd(MemoryRegion *mr);
+
+/**
+ * memory_region_from_host: Convert a pointer into a RAM memory region
+ * and an offset within it.
+ *
+ * Given a host pointer inside a RAM memory region (created with
+ * memory_region_init_ram() or memory_region_init_ram_ptr()), return
+ * the MemoryRegion and the offset within it.
+ *
+ * Use with care; by the time this function returns, the returned pointer is
+ * not protected by RCU anymore. If the caller is not within an RCU critical
+ * section and does not hold the BQL, it must have other means of
+ * protecting the pointer, such as a reference to the region that includes
+ * the incoming ram_addr_t.
+ *
+ * @ptr: the host pointer to be converted
+ * @offset: the offset within memory region
+ */
+MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
+
+/**
+ * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
+ *
+ * Returns a host pointer to a RAM memory region (created with
+ * memory_region_init_ram() or memory_region_init_ram_ptr()).
+ *
+ * Use with care; by the time this function returns, the returned pointer is
+ * not protected by RCU anymore. If the caller is not within an RCU critical
+ * section and does not hold the BQL, it must have other means of
+ * protecting the pointer, such as a reference to the region that includes
+ * the incoming ram_addr_t.
+ *
+ * @mr: the memory region being queried.
+ */
+void *memory_region_get_ram_ptr(MemoryRegion *mr);
+
+/* memory_region_ram_resize: Resize a RAM region.
+ *
+ * Resizing RAM while migrating can result in the migration being canceled.
+ * Care has to be taken if the guest might have already detected the memory.
+ *
+ * @mr: a memory region created with @memory_region_init_resizeable_ram.
+ * @newsize: the new size the region
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
+ Error **errp);
+
+/**
+ * memory_region_msync: Synchronize selected address range of
+ * a memory mapped region
+ *
+ * @mr: the memory region to be msync
+ * @addr: the initial address of the range to be sync
+ * @size: the size of the range to be sync
+ */
+void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
+
+/**
+ * memory_region_writeback: Trigger cache writeback for
+ * selected address range
+ *
+ * @mr: the memory region to be updated
+ * @addr: the initial address of the range to be written back
+ * @size: the size of the range to be written back
+ */
+void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
+
+/**
+ * memory_region_set_log: Turn dirty logging on or off for a region.
+ *
+ * Turns dirty logging on or off for a specified client (display, migration).
+ * Only meaningful for RAM regions.
+ *
+ * @mr: the memory region being updated.
+ * @log: whether dirty logging is to be enabled or disabled.
+ * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
+ */
+void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
+
+/**
+ * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
+ *
+ * Marks a range of bytes as dirty, after it has been dirtied outside
+ * guest code.
+ *
+ * @mr: the memory region being dirtied.
+ * @addr: the address (relative to the start of the region) being dirtied.
+ * @size: size of the range being dirtied.
+ */
+void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size);
+
+/**
+ * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
+ *
+ * This function is called when the caller wants to clear the remote
+ * dirty bitmap of a memory range within the memory region. This can
+ * be used by e.g. KVM to manually clear dirty log when
+ * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
+ * kernel.
+ *
+ * @mr: the memory region to clear the dirty log upon
+ * @start: start address offset within the memory region
+ * @len: length of the memory region to clear dirty bitmap
+ */
+void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
+ hwaddr len);
+
+/**
+ * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
+ * bitmap and clear it.
+ *
+ * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
+ * returns the snapshot. The snapshot can then be used to query dirty
+ * status, using memory_region_snapshot_get_dirty. Snapshotting allows
+ * querying the same page multiple times, which is especially useful for
+ * display updates where the scanlines often are not page aligned.
+ *
+ * The dirty bitmap region which gets copied into the snapshot (and
+ * cleared afterwards) can be larger than requested. The boundaries
+ * are rounded up/down so complete bitmap longs (covering 64 pages on
+ * 64bit hosts) can be copied over into the bitmap snapshot. Which
+ * isn't a problem for display updates as the extra pages are outside
+ * the visible area, and in case the visible area changes a full
+ * display redraw is due anyway. Should other use cases for this
+ * function emerge we might have to revisit this implementation
+ * detail.
+ *
+ * Use g_free to release DirtyBitmapSnapshot.
+ *
+ * @mr: the memory region being queried.
+ * @addr: the address (relative to the start of the region) being queried.
+ * @size: the size of the range being queried.
+ * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
+ */
+DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
+ hwaddr addr,
+ hwaddr size,
+ unsigned client);
+
+/**
+ * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
+ * in the specified dirty bitmap snapshot.
+ *
+ * @mr: the memory region being queried.
+ * @snap: the dirty bitmap snapshot
+ * @addr: the address (relative to the start of the region) being queried.
+ * @size: the size of the range being queried.
+ */
+bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
+ DirtyBitmapSnapshot *snap,
+ hwaddr addr, hwaddr size);
+
+/**
+ * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
+ * client.
+ *
+ * Marks a range of pages as no longer dirty.
+ *
+ * @mr: the region being updated.
+ * @addr: the start of the subrange being cleaned.
+ * @size: the size of the subrange being cleaned.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ * %DIRTY_MEMORY_VGA.
+ */
+void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size, unsigned client);
+
+/**
+ * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
+ * TBs (for self-modifying code).
+ *
+ * The MemoryRegionOps->write() callback of a ROM device must use this function
+ * to mark byte ranges that have been modified internally, such as by directly
+ * accessing the memory returned by memory_region_get_ram_ptr().
+ *
+ * This function marks the range dirty and invalidates TBs so that TCG can
+ * detect self-modifying code.
+ *
+ * @mr: the region being flushed.
+ * @addr: the start, relative to the start of the region, of the range being
+ * flushed.
+ * @size: the size, in bytes, of the range being flushed.
+ */
+void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
+
+/**
+ * memory_region_set_readonly: Turn a memory region read-only (or read-write)
+ *
+ * Allows a memory region to be marked as read-only (turning it into a ROM).
+ * only useful on RAM regions.
+ *
+ * @mr: the region being updated.
+ * @readonly: whether the region is to be ROM or RAM.
+ */
+void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
+
+/**
+ * memory_region_set_nonvolatile: Turn a memory region non-volatile
+ *
+ * Allows a memory region to be marked as non-volatile.
+ * only useful on RAM regions.
+ *
+ * @mr: the region being updated.
+ * @nonvolatile: whether the region is to be non-volatile.
+ */
+void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
+
+/**
+ * memory_region_rom_device_set_romd: enable/disable ROMD mode
+ *
+ * Allows a ROM device (initialized with memory_region_init_rom_device() to
+ * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
+ * device is mapped to guest memory and satisfies read access directly.
+ * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
+ * Writes are always handled by the #MemoryRegion.write function.
+ *
+ * @mr: the memory region to be updated
+ * @romd_mode: %true to put the region into ROMD mode
+ */
+void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
+
+/**
+ * memory_region_set_coalescing: Enable memory coalescing for the region.
+ *
+ * Enabled writes to a region to be queued for later processing. MMIO ->write
+ * callbacks may be delayed until a non-coalesced MMIO is issued.
+ * Only useful for IO regions. Roughly similar to write-combining hardware.
+ *
+ * @mr: the memory region to be write coalesced
+ */
+void memory_region_set_coalescing(MemoryRegion *mr);
+
+/**
+ * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
+ * a region.
+ *
+ * Like memory_region_set_coalescing(), but works on a sub-range of a region.
+ * Multiple calls can be issued coalesced disjoint ranges.
+ *
+ * @mr: the memory region to be updated.
+ * @offset: the start of the range within the region to be coalesced.
+ * @size: the size of the subrange to be coalesced.
+ */
+void memory_region_add_coalescing(MemoryRegion *mr,
+ hwaddr offset,
+ uint64_t size);
+
+/**
+ * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
+ *
+ * Disables any coalescing caused by memory_region_set_coalescing() or
+ * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
+ * hardware.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_coalescing(MemoryRegion *mr);
+
+/**
+ * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
+ * accesses.
+ *
+ * Ensure that pending coalesced MMIO request are flushed before the memory
+ * region is accessed. This property is automatically enabled for all regions
+ * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_set_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
+ * accesses.
+ *
+ * Clear the automatic coalesced MMIO flushing enabled via
+ * memory_region_set_flush_coalesced. Note that this service has no effect on
+ * memory regions that have MMIO coalescing enabled for themselves. For them,
+ * automatic flushing will stop once coalescing is disabled.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_add_eventfd: Request an eventfd to be triggered when a word
+ * is written to a location.
+ *
+ * Marks a word in an IO region (initialized with memory_region_init_io())
+ * as a trigger for an eventfd event. The I/O callback will not be called.
+ * The caller must be prepared to handle failure (that is, take the required
+ * action if the callback _is_ called).
+ *
+ * @mr: the memory region being updated.
+ * @addr: the address within @mr that is to be monitored
+ * @size: the size of the access to trigger the eventfd
+ * @match_data: whether to match against @data, instead of just @addr
+ * @data: the data to match against the guest write
+ * @e: event notifier to be triggered when @addr, @size, and @data all match.
+ **/
+void memory_region_add_eventfd(MemoryRegion *mr,
+ hwaddr addr,
+ unsigned size,
+ bool match_data,
+ uint64_t data,
+ EventNotifier *e);
+
+/**
+ * memory_region_del_eventfd: Cancel an eventfd.
+ *
+ * Cancels an eventfd trigger requested by a previous
+ * memory_region_add_eventfd() call.
+ *
+ * @mr: the memory region being updated.
+ * @addr: the address within @mr that is to be monitored
+ * @size: the size of the access to trigger the eventfd
+ * @match_data: whether to match against @data, instead of just @addr
+ * @data: the data to match against the guest write
+ * @e: event notifier to be triggered when @addr, @size, and @data all match.
+ */
+void memory_region_del_eventfd(MemoryRegion *mr,
+ hwaddr addr,
+ unsigned size,
+ bool match_data,
+ uint64_t data,
+ EventNotifier *e);
+
+/**
+ * memory_region_add_subregion: Add a subregion to a container.
+ *
+ * Adds a subregion at @offset. The subregion may not overlap with other
+ * subregions (except for those explicitly marked as overlapping). A region
+ * may only be added once as a subregion (unless removed with
+ * memory_region_del_subregion()); use memory_region_init_alias() if you
+ * want a region to be a subregion in multiple locations.
+ *
+ * @mr: the region to contain the new subregion; must be a container
+ * initialized with memory_region_init().
+ * @offset: the offset relative to @mr where @subregion is added.
+ * @subregion: the subregion to be added.
+ */
+void memory_region_add_subregion(MemoryRegion *mr,
+ hwaddr offset,
+ MemoryRegion *subregion);
+/**
+ * memory_region_add_subregion_overlap: Add a subregion to a container
+ * with overlap.
+ *
+ * Adds a subregion at @offset. The subregion may overlap with other
+ * subregions. Conflicts are resolved by having a higher @priority hide a
+ * lower @priority. Subregions without priority are taken as @priority 0.
+ * A region may only be added once as a subregion (unless removed with
+ * memory_region_del_subregion()); use memory_region_init_alias() if you
+ * want a region to be a subregion in multiple locations.
+ *
+ * @mr: the region to contain the new subregion; must be a container
+ * initialized with memory_region_init().
+ * @offset: the offset relative to @mr where @subregion is added.
+ * @subregion: the subregion to be added.
+ * @priority: used for resolving overlaps; highest priority wins.
+ */
+void memory_region_add_subregion_overlap(MemoryRegion *mr,
+ hwaddr offset,
+ MemoryRegion *subregion,
+ int priority);
+
+/**
+ * memory_region_get_ram_addr: Get the ram address associated with a memory
+ * region
+ *
+ * @mr: the region to be queried
+ */
+ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
+
+uint64_t memory_region_get_alignment(const MemoryRegion *mr);
+/**
+ * memory_region_del_subregion: Remove a subregion.
+ *
+ * Removes a subregion from its container.
+ *
+ * @mr: the container to be updated.
+ * @subregion: the region being removed; must be a current subregion of @mr.
+ */
+void memory_region_del_subregion(MemoryRegion *mr,
+ MemoryRegion *subregion);
+
+/*
+ * memory_region_set_enabled: dynamically enable or disable a region
+ *
+ * Enables or disables a memory region. A disabled memory region
+ * ignores all accesses to itself and its subregions. It does not
+ * obscure sibling subregions with lower priority - it simply behaves as
+ * if it was removed from the hierarchy.
+ *
+ * Regions default to being enabled.
+ *
+ * @mr: the region to be updated
+ * @enabled: whether to enable or disable the region
+ */
+void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
+
+/*
+ * memory_region_set_address: dynamically update the address of a region
+ *
+ * Dynamically updates the address of a region, relative to its container.
+ * May be used on regions are currently part of a memory hierarchy.
+ *
+ * @mr: the region to be updated
+ * @addr: new address, relative to container region
+ */
+void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
+
+/*
+ * memory_region_set_size: dynamically update the size of a region.
+ *
+ * Dynamically updates the size of a region.
+ *
+ * @mr: the region to be updated
+ * @size: used size of the region.
+ */
+void memory_region_set_size(MemoryRegion *mr, uint64_t size);
+
+/*
+ * memory_region_set_alias_offset: dynamically update a memory alias's offset
+ *
+ * Dynamically updates the offset into the target region that an alias points
+ * to, as if the fourth argument to memory_region_init_alias() has changed.
+ *
+ * @mr: the #MemoryRegion to be updated; should be an alias.
+ * @offset: the new offset into the target memory region
+ */
+void memory_region_set_alias_offset(MemoryRegion *mr,
+ hwaddr offset);
+
+/*
+ * memory_region_set_unmergeable: Set a memory region unmergeable
+ *
+ * Mark a memory region unmergeable, resulting in the memory region (or
+ * everything contained in a memory region container) not getting merged when
+ * simplifying the address space and notifying memory listeners. Consequently,
+ * memory listeners will never get notified about ranges that are larger than
+ * the original memory regions.
+ *
+ * This is primarily useful when multiple aliases to a RAM memory region are
+ * mapped into a memory region container, and updates (e.g., enable/disable or
+ * map/unmap) of individual memory region aliases are not supposed to affect
+ * other memory regions in the same container.
+ *
+ * @mr: the #MemoryRegion to be updated
+ * @unmergeable: whether to mark the #MemoryRegion unmergeable
+ */
+void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable);
+
+/**
+ * memory_region_present: checks if an address relative to a @container
+ * translates into #MemoryRegion within @container
+ *
+ * Answer whether a #MemoryRegion within @container covers the address
+ * @addr.
+ *
+ * @container: a #MemoryRegion within which @addr is a relative address
+ * @addr: the area within @container to be searched
+ */
+bool memory_region_present(MemoryRegion *container, hwaddr addr);
+
+/**
+ * memory_region_is_mapped: returns true if #MemoryRegion is mapped
+ * into another memory region, which does not necessarily imply that it is
+ * mapped into an address space.
+ *
+ * @mr: a #MemoryRegion which should be checked if it's mapped
+ */
+bool memory_region_is_mapped(MemoryRegion *mr);
+
+/**
+ * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
+ * #MemoryRegion
+ *
+ * The #RamDiscardManager cannot change while a memory region is mapped.
+ *
+ * @mr: the #MemoryRegion
+ */
+RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
+
+/**
+ * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
+ * #RamDiscardManager assigned
+ *
+ * @mr: the #MemoryRegion
+ */
+static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
+{
+ return !!memory_region_get_ram_discard_manager(mr);
+}
+
+/**
+ * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
+ * #MemoryRegion
+ *
+ * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
+ * that does not cover RAM, or a #MemoryRegion that already has a
+ * #RamDiscardManager assigned. Return 0 if the rdm is set successfully.
+ *
+ * @mr: the #MemoryRegion
+ * @rdm: #RamDiscardManager to set
+ */
+int memory_region_set_ram_discard_manager(MemoryRegion *mr,
+ RamDiscardManager *rdm);
+
+/**
+ * memory_region_find: translate an address/size relative to a
+ * MemoryRegion into a #MemoryRegionSection.
+ *
+ * Locates the first #MemoryRegion within @mr that overlaps the range
+ * given by @addr and @size.
+ *
+ * Returns a #MemoryRegionSection that describes a contiguous overlap.
+ * It will have the following characteristics:
+ * - @size = 0 iff no overlap was found
+ * - @mr is non-%NULL iff an overlap was found
+ *
+ * Remember that in the return value the @offset_within_region is
+ * relative to the returned region (in the .@mr field), not to the
+ * @mr argument.
+ *
+ * Similarly, the .@offset_within_address_space is relative to the
+ * address space that contains both regions, the passed and the
+ * returned one. However, in the special case where the @mr argument
+ * has no container (and thus is the root of the address space), the
+ * following will hold:
+ * - @offset_within_address_space >= @addr
+ * - @offset_within_address_space + .@size <= @addr + @size
+ *
+ * @mr: a MemoryRegion within which @addr is a relative address
+ * @addr: start of the area within @as to be searched
+ * @size: size of the area to be searched
+ */
+MemoryRegionSection memory_region_find(MemoryRegion *mr,
+ hwaddr addr, uint64_t size);
+
+/**
+ * memory_global_dirty_log_sync: synchronize the dirty log for all memory
+ *
+ * Synchronizes the dirty page log for all address spaces.
+ *
+ * @last_stage: whether this is the last stage of live migration
+ */
+void memory_global_dirty_log_sync(bool last_stage);
+
+/**
+ * memory_global_after_dirty_log_sync: synchronize the dirty log for all memory
+ *
+ * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
+ * This function must be called after the dirty log bitmap is cleared, and
+ * before dirty guest memory pages are read. If you are using
+ * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
+ * care of doing this.
+ */
+void memory_global_after_dirty_log_sync(void);
+
+/**
+ * memory_region_transaction_begin: Start a transaction.
+ *
+ * During a transaction, changes will be accumulated and made visible
+ * only when the transaction ends (is committed).
+ */
+void memory_region_transaction_begin(void);
+
+/**
+ * memory_region_transaction_commit: Commit a transaction and make changes
+ * visible to the guest.
+ */
+void memory_region_transaction_commit(void);
+
+/**
+ * memory_listener_register: register callbacks to be called when memory
+ * sections are mapped or unmapped into an address
+ * space
+ *
+ * @listener: an object containing the callbacks to be called
+ * @filter: if non-%NULL, only regions in this address space will be observed
+ */
+void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
+
+/**
+ * memory_listener_unregister: undo the effect of memory_listener_register()
+ *
+ * @listener: an object containing the callbacks to be removed
+ */
+void memory_listener_unregister(MemoryListener *listener);
+
+/**
+ * memory_global_dirty_log_start: begin dirty logging for all regions
+ *
+ * @flags: purpose of starting dirty log, migration or dirty rate
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool memory_global_dirty_log_start(unsigned int flags, Error **errp);
+
+/**
+ * memory_global_dirty_log_stop: end dirty logging for all regions
+ *
+ * @flags: purpose of stopping dirty log, migration or dirty rate
+ */
+void memory_global_dirty_log_stop(unsigned int flags);
+
+void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
+
+bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
+ unsigned size, bool is_write,
+ MemTxAttrs attrs);
+
+/**
+ * memory_region_dispatch_read: perform a read directly to the specified
+ * MemoryRegion.
+ *
+ * @mr: #MemoryRegion to access
+ * @addr: address within that region
+ * @pval: pointer to uint64_t which the data is written to
+ * @op: size, sign, and endianness of the memory operation
+ * @attrs: memory transaction attributes to use for the access
+ */
+MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
+ hwaddr addr,
+ uint64_t *pval,
+ MemOp op,
+ MemTxAttrs attrs);
+/**
+ * memory_region_dispatch_write: perform a write directly to the specified
+ * MemoryRegion.
+ *
+ * @mr: #MemoryRegion to access
+ * @addr: address within that region
+ * @data: data to write
+ * @op: size, sign, and endianness of the memory operation
+ * @attrs: memory transaction attributes to use for the access
+ */
+MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
+ hwaddr addr,
+ uint64_t data,
+ MemOp op,
+ MemTxAttrs attrs);
+
+/**
+ * address_space_init: initializes an address space
+ *
+ * @as: an uninitialized #AddressSpace
+ * @root: a #MemoryRegion that routes addresses for the address space
+ * @name: an address space name. The name is only used for debugging
+ * output.
+ */
+void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
+
+/**
+ * address_space_destroy: destroy an address space
+ *
+ * Releases all resources associated with an address space. After an address space
+ * is destroyed, its root memory region (given by address_space_init()) may be destroyed
+ * as well.
+ *
+ * @as: address space to be destroyed
+ */
+void address_space_destroy(AddressSpace *as);
+
+/**
+ * address_space_remove_listeners: unregister all listeners of an address space
+ *
+ * Removes all callbacks previously registered with memory_listener_register()
+ * for @as.
+ *
+ * @as: an initialized #AddressSpace
+ */
+void address_space_remove_listeners(AddressSpace *as);
+
+/**
+ * address_space_rw: read from or write to an address space.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @attrs: memory transaction attributes
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to read or write
+ * @is_write: indicates the transfer direction
+ */
+MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, void *buf,
+ hwaddr len, bool is_write);
+
+/**
+ * address_space_write: write to address space.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @attrs: memory transaction attributes
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to write
+ */
+MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs,
+ const void *buf, hwaddr len);
+
+/**
+ * address_space_write_rom: write to address space, including ROM.
+ *
+ * This function writes to the specified address space, but will
+ * write data to both ROM and RAM. This is used for non-guest
+ * writes like writes from the gdb debug stub or initial loading
+ * of ROM contents.
+ *
+ * Note that portions of the write which attempt to write data to
+ * a device will be silently ignored -- only real RAM and ROM will
+ * be written to.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @attrs: memory transaction attributes
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to write
+ */
+MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs,
+ const void *buf, hwaddr len);
+
+/* address_space_ld*: load from an address space
+ * address_space_st*: store to an address space
+ *
+ * These functions perform a load or store of the byte, word,
+ * longword or quad to the specified address within the AddressSpace.
+ * The _le suffixed functions treat the data as little endian;
+ * _be indicates big endian; no suffix indicates "same endianness
+ * as guest CPU".
+ *
+ * The "guest CPU endianness" accessors are deprecated for use outside
+ * target-* code; devices should be CPU-agnostic and use either the LE
+ * or the BE accessors.
+ *
+ * @as #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @val: data value, for stores
+ * @attrs: memory transaction attributes
+ * @result: location to write the success/failure of the transaction;
+ * if NULL, this information is discarded
+ */
+
+#define SUFFIX
+#define ARG1 as
+#define ARG1_DECL AddressSpace *as
+#include "exec/memory_ldst.h.inc"
+
+static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
+{
+ address_space_stl_notdirty(as, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+#define SUFFIX
+#define ARG1 as
+#define ARG1_DECL AddressSpace *as
+#include "exec/memory_ldst_phys.h.inc"
+
+struct MemoryRegionCache {
+ uint8_t *ptr;
+ hwaddr xlat;
+ hwaddr len;
+ FlatView *fv;
+ MemoryRegionSection mrs;
+ bool is_write;
+};
+
+/* address_space_ld*_cached: load from a cached #MemoryRegion
+ * address_space_st*_cached: store into a cached #MemoryRegion
+ *
+ * These functions perform a load or store of the byte, word,
+ * longword or quad to the specified address. The address is
+ * a physical address in the AddressSpace, but it must lie within
+ * a #MemoryRegion that was mapped with address_space_cache_init.
+ *
+ * The _le suffixed functions treat the data as little endian;
+ * _be indicates big endian; no suffix indicates "same endianness
+ * as guest CPU".
+ *
+ * The "guest CPU endianness" accessors are deprecated for use outside
+ * target-* code; devices should be CPU-agnostic and use either the LE
+ * or the BE accessors.
+ *
+ * @cache: previously initialized #MemoryRegionCache to be accessed
+ * @addr: address within the address space
+ * @val: data value, for stores
+ * @attrs: memory transaction attributes
+ * @result: location to write the success/failure of the transaction;
+ * if NULL, this information is discarded
+ */
+
+#define SUFFIX _cached_slow
+#define ARG1 cache
+#define ARG1_DECL MemoryRegionCache *cache
+#include "exec/memory_ldst.h.inc"
+
+/* Inline fast path for direct RAM access. */
+static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len);
+ if (likely(cache->ptr)) {
+ return ldub_p(cache->ptr + addr);
+ } else {
+ return address_space_ldub_cached_slow(cache, addr, attrs, result);
+ }
+}
+
+static inline void address_space_stb_cached(MemoryRegionCache *cache,
+ hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len);
+ if (likely(cache->ptr)) {
+ stb_p(cache->ptr + addr, val);
+ } else {
+ address_space_stb_cached_slow(cache, addr, val, attrs, result);
+ }
+}
+
+#define ENDIANNESS
+#include "exec/memory_ldst_cached.h.inc"
+
+#define ENDIANNESS _le
+#include "exec/memory_ldst_cached.h.inc"
+
+#define ENDIANNESS _be
+#include "exec/memory_ldst_cached.h.inc"
+
+#define SUFFIX _cached
+#define ARG1 cache
+#define ARG1_DECL MemoryRegionCache *cache
+#include "exec/memory_ldst_phys.h.inc"
+
+/* address_space_cache_init: prepare for repeated access to a physical
+ * memory region
+ *
+ * @cache: #MemoryRegionCache to be filled
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @len: length of buffer
+ * @is_write: indicates the transfer direction
+ *
+ * Will only work with RAM, and may map a subset of the requested range by
+ * returning a value that is less than @len. On failure, return a negative
+ * errno value.
+ *
+ * Because it only works with RAM, this function can be used for
+ * read-modify-write operations. In this case, is_write should be %true.
+ *
+ * Note that addresses passed to the address_space_*_cached functions
+ * are relative to @addr.
+ */
+int64_t address_space_cache_init(MemoryRegionCache *cache,
+ AddressSpace *as,
+ hwaddr addr,
+ hwaddr len,
+ bool is_write);
+
+/**
+ * address_space_cache_init_empty: Initialize empty #MemoryRegionCache
+ *
+ * @cache: The #MemoryRegionCache to operate on.
+ *
+ * Initializes #MemoryRegionCache structure without memory region attached.
+ * Cache initialized this way can only be safely destroyed, but not used.
+ */
+static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
+{
+ cache->mrs.mr = NULL;
+ /* There is no real need to initialize fv, but it makes Coverity happy. */
+ cache->fv = NULL;
+}
+
+/**
+ * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
+ *
+ * @cache: The #MemoryRegionCache to operate on.
+ * @addr: The first physical address that was written, relative to the
+ * address that was passed to @address_space_cache_init.
+ * @access_len: The number of bytes that were written starting at @addr.
+ */
+void address_space_cache_invalidate(MemoryRegionCache *cache,
+ hwaddr addr,
+ hwaddr access_len);
+
+/**
+ * address_space_cache_destroy: free a #MemoryRegionCache
+ *
+ * @cache: The #MemoryRegionCache whose memory should be released.
+ */
+void address_space_cache_destroy(MemoryRegionCache *cache);
+
+/* address_space_get_iotlb_entry: translate an address into an IOTLB
+ * entry. Should be called from an RCU critical section.
+ */
+IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
+ bool is_write, MemTxAttrs attrs);
+
+/* address_space_translate: translate an address range into an address space
+ * into a MemoryRegion and an address range into that section. Should be
+ * called from an RCU critical section, to avoid that the last reference
+ * to the returned region disappears after address_space_translate returns.
+ *
+ * @fv: #FlatView to be accessed
+ * @addr: address within that address space
+ * @xlat: pointer to address within the returned memory region section's
+ * #MemoryRegion.
+ * @len: pointer to length
+ * @is_write: indicates the transfer direction
+ * @attrs: memory attributes
+ */
+MemoryRegion *flatview_translate(FlatView *fv,
+ hwaddr addr, hwaddr *xlat,
+ hwaddr *len, bool is_write,
+ MemTxAttrs attrs);
+
+static inline MemoryRegion *address_space_translate(AddressSpace *as,
+ hwaddr addr, hwaddr *xlat,
+ hwaddr *len, bool is_write,
+ MemTxAttrs attrs)
+{
+ return flatview_translate(address_space_to_flatview(as),
+ addr, xlat, len, is_write, attrs);
+}
+
+/* address_space_access_valid: check for validity of accessing an address
+ * space range
+ *
+ * Check whether memory is assigned to the given address space range, and
+ * access is permitted by any IOMMU regions that are active for the address
+ * space.
+ *
+ * For now, addr and len should be aligned to a page size. This limitation
+ * will be lifted in the future.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @len: length of the area to be checked
+ * @is_write: indicates the transfer direction
+ * @attrs: memory attributes
+ */
+bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
+ bool is_write, MemTxAttrs attrs);
+
+/* address_space_map: map a physical memory region into a host virtual address
+ *
+ * May map a subset of the requested range, given by and returned in @plen.
+ * May return %NULL and set *@plen to zero(0), if resources needed to perform
+ * the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ * Use address_space_register_map_client() to know when retrying the map
+ * operation is likely to succeed.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @plen: pointer to length of buffer; updated on return
+ * @is_write: indicates the transfer direction
+ * @attrs: memory attributes
+ */
+void *address_space_map(AddressSpace *as, hwaddr addr,
+ hwaddr *plen, bool is_write, MemTxAttrs attrs);
+
+/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
+ *
+ * Will also mark the memory as dirty if @is_write == %true. @access_len gives
+ * the amount of memory that was actually read or written by the caller.
+ *
+ * @as: #AddressSpace used
+ * @buffer: host pointer as returned by address_space_map()
+ * @len: buffer length as returned by address_space_map()
+ * @access_len: amount of data actually transferred
+ * @is_write: indicates the transfer direction
+ */
+void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
+ bool is_write, hwaddr access_len);
+
+/*
+ * address_space_register_map_client: Register a callback to invoke when
+ * resources for address_space_map() are available again.
+ *
+ * address_space_map may fail when there are not enough resources available,
+ * such as when bounce buffer memory would exceed the limit. The callback can
+ * be used to retry the address_space_map operation. Note that the callback
+ * gets automatically removed after firing.
+ *
+ * @as: #AddressSpace to be accessed
+ * @bh: callback to invoke when address_space_map() retry is appropriate
+ */
+void address_space_register_map_client(AddressSpace *as, QEMUBH *bh);
+
+/*
+ * address_space_unregister_map_client: Unregister a callback that has
+ * previously been registered and not fired yet.
+ *
+ * @as: #AddressSpace to be accessed
+ * @bh: callback to unregister
+ */
+void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh);
+
+/* Internal functions, part of the implementation of address_space_read. */
+MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, void *buf, hwaddr len);
+MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
+ MemTxAttrs attrs, void *buf,
+ hwaddr len, hwaddr addr1, hwaddr l,
+ MemoryRegion *mr);
+void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
+
+/* Internal functions, part of the implementation of address_space_read_cached
+ * and address_space_write_cached. */
+MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
+ hwaddr addr, void *buf, hwaddr len);
+MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
+ hwaddr addr, const void *buf,
+ hwaddr len);
+
+int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
+bool prepare_mmio_access(MemoryRegion *mr);
+
+static inline bool memory_region_supports_direct_access(MemoryRegion *mr)
+{
+ /* ROM DEVICE regions only allow direct access if in ROMD mode. */
+ if (memory_region_is_romd(mr)) {
+ return true;
+ }
+ if (!memory_region_is_ram(mr)) {
+ return false;
+ }
+ /*
+ * RAM DEVICE regions can be accessed directly using memcpy, but it might
+ * be MMIO and access using mempy can be wrong (e.g., using instructions not
+ * intended for MMIO access). So we treat this as IO.
+ */
+ return !memory_region_is_ram_device(mr);
+}
+
+static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write,
+ MemTxAttrs attrs)
+{
+ if (!memory_region_supports_direct_access(mr)) {
+ return false;
+ }
+ /* Debug access can write to ROM. */
+ if (is_write && !attrs.debug) {
+ return !mr->readonly && !mr->rom_device;
+ }
+ return true;
+}
+
+/**
+ * address_space_read: read from an address space.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault). Called within RCU critical section.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @attrs: memory transaction attributes
+ * @buf: buffer with the data transferred
+ * @len: length of the data transferred
+ */
+static inline __attribute__((__always_inline__))
+MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, void *buf,
+ hwaddr len)
+{
+ MemTxResult result = MEMTX_OK;
+ hwaddr l, addr1;
+ void *ptr;
+ MemoryRegion *mr;
+ FlatView *fv;
+
+ if (__builtin_constant_p(len)) {
+ if (len) {
+ RCU_READ_LOCK_GUARD();
+ fv = address_space_to_flatview(as);
+ l = len;
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
+ if (len == l && memory_access_is_direct(mr, false, attrs)) {
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
+ memcpy(buf, ptr, len);
+ } else {
+ result = flatview_read_continue(fv, addr, attrs, buf, len,
+ addr1, l, mr);
+ }
+ }
+ } else {
+ result = address_space_read_full(as, addr, attrs, buf, len);
+ }
+ return result;
+}
+
+/**
+ * address_space_read_cached: read from a cached RAM region
+ *
+ * @cache: Cached region to be addressed
+ * @addr: address relative to the base of the RAM region
+ * @buf: buffer with the data transferred
+ * @len: length of the data transferred
+ */
+static inline MemTxResult
+address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
+ void *buf, hwaddr len)
+{
+ assert(addr < cache->len && len <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
+ if (likely(cache->ptr)) {
+ memcpy(buf, cache->ptr + addr, len);
+ return MEMTX_OK;
+ } else {
+ return address_space_read_cached_slow(cache, addr, buf, len);
+ }
+}
+
+/**
+ * address_space_write_cached: write to a cached RAM region
+ *
+ * @cache: Cached region to be addressed
+ * @addr: address relative to the base of the RAM region
+ * @buf: buffer with the data transferred
+ * @len: length of the data transferred
+ */
+static inline MemTxResult
+address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
+ const void *buf, hwaddr len)
+{
+ assert(addr < cache->len && len <= cache->len - addr);
+ if (likely(cache->ptr)) {
+ memcpy(cache->ptr + addr, buf, len);
+ return MEMTX_OK;
+ } else {
+ return address_space_write_cached_slow(cache, addr, buf, len);
+ }
+}
+
+/**
+ * address_space_set: Fill address space with a constant byte.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @c: constant byte to fill the memory
+ * @len: the number of bytes to fill with the constant byte
+ * @attrs: memory transaction attributes
+ */
+MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
+ uint8_t c, hwaddr len, MemTxAttrs attrs);
+
+/*
+ * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
+ * to manage the actual amount of memory consumed by the VM (then, the memory
+ * provided by RAM blocks might be bigger than the desired memory consumption).
+ * This *must* be set if:
+ * - Discarding parts of a RAM blocks does not result in the change being
+ * reflected in the VM and the pages getting freed.
+ * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
+ * discards blindly.
+ * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
+ * encrypted VMs).
+ * Technologies that only temporarily pin the current working set of a
+ * driver are fine, because we don't expect such pages to be discarded
+ * (esp. based on guest action like balloon inflation).
+ *
+ * This is *not* to be used to protect from concurrent discards (esp.,
+ * postcopy).
+ *
+ * Returns 0 if successful. Returns -EBUSY if a technology that relies on
+ * discards to work reliably is active.
+ */
+int ram_block_discard_disable(bool state);
+
+/*
+ * See ram_block_discard_disable(): only disable uncoordinated discards,
+ * keeping coordinated discards (via the RamDiscardManager) enabled.
+ */
+int ram_block_uncoordinated_discard_disable(bool state);
+
+/*
+ * Inhibit technologies that disable discarding of pages in RAM blocks.
+ *
+ * Returns 0 if successful. Returns -EBUSY if discards are already set to
+ * broken.
+ */
+int ram_block_discard_require(bool state);
+
+/*
+ * See ram_block_discard_require(): only inhibit technologies that disable
+ * uncoordinated discarding of pages in RAM blocks, allowing co-existence with
+ * technologies that only inhibit uncoordinated discards (via the
+ * RamDiscardManager).
+ */
+int ram_block_coordinated_discard_require(bool state);
+
+/*
+ * Test if any discarding of memory in ram blocks is disabled.
+ */
+bool ram_block_discard_is_disabled(void);
+
+/*
+ * Test if any discarding of memory in ram blocks is required to work reliably.
+ */
+bool ram_block_discard_is_required(void);
+
+void ram_block_add_cpr_blocker(RAMBlock *rb, Error **errp);
+void ram_block_del_cpr_blocker(RAMBlock *rb);
+
+#endif
diff --git a/include/sysemu/memory_mapping.h b/include/system/memory_mapping.h
index 021e0a6..021e0a6 100644
--- a/include/sysemu/memory_mapping.h
+++ b/include/system/memory_mapping.h
diff --git a/include/system/numa.h b/include/system/numa.h
new file mode 100644
index 0000000..1044b0e
--- /dev/null
+++ b/include/system/numa.h
@@ -0,0 +1,113 @@
+#ifndef SYSTEM_NUMA_H
+#define SYSTEM_NUMA_H
+
+#include "qemu/bitmap.h"
+#include "qapi/qapi-types-machine.h"
+
+struct CPUArchId;
+
+#define MAX_NODES 128
+#define NUMA_NODE_UNASSIGNED MAX_NODES
+#define NUMA_DISTANCE_MIN 10
+#define NUMA_DISTANCE_DEFAULT 20
+#define NUMA_DISTANCE_MAX 254
+#define NUMA_DISTANCE_UNREACHABLE 255
+
+/* the value of AcpiHmatLBInfo flags */
+enum {
+ HMAT_LB_MEM_MEMORY = 0,
+ HMAT_LB_MEM_CACHE_1ST_LEVEL = 1,
+ HMAT_LB_MEM_CACHE_2ND_LEVEL = 2,
+ HMAT_LB_MEM_CACHE_3RD_LEVEL = 3,
+ HMAT_LB_LEVELS /* must be the last entry */
+};
+
+/* the value of AcpiHmatLBInfo data type */
+enum {
+ HMAT_LB_DATA_ACCESS_LATENCY = 0,
+ HMAT_LB_DATA_READ_LATENCY = 1,
+ HMAT_LB_DATA_WRITE_LATENCY = 2,
+ HMAT_LB_DATA_ACCESS_BANDWIDTH = 3,
+ HMAT_LB_DATA_READ_BANDWIDTH = 4,
+ HMAT_LB_DATA_WRITE_BANDWIDTH = 5,
+ HMAT_LB_TYPES /* must be the last entry */
+};
+
+#define UINT16_BITS 16
+
+typedef struct NodeInfo {
+ uint64_t node_mem;
+ struct HostMemoryBackend *node_memdev;
+ bool present;
+ bool has_cpu;
+ bool has_gi;
+ uint8_t lb_info_provided;
+ uint16_t initiator;
+ uint8_t distance[MAX_NODES];
+} NodeInfo;
+
+typedef struct NumaNodeMem {
+ uint64_t node_mem;
+ uint64_t node_plugged_mem;
+} NumaNodeMem;
+
+struct HMAT_LB_Data {
+ uint8_t initiator;
+ uint8_t target;
+ uint64_t data;
+};
+typedef struct HMAT_LB_Data HMAT_LB_Data;
+
+struct HMAT_LB_Info {
+ /* Indicates it's memory or the specified level memory side cache. */
+ uint8_t hierarchy;
+
+ /* Present the type of data, access/read/write latency or bandwidth. */
+ uint8_t data_type;
+
+ /* The range bitmap of bandwidth for calculating common base */
+ uint64_t range_bitmap;
+
+ /* The common base unit for latencies or bandwidths */
+ uint64_t base;
+
+ /* Array to store the latencies or bandwidths */
+ GArray *list;
+};
+typedef struct HMAT_LB_Info HMAT_LB_Info;
+
+struct NumaState {
+ /* Number of NUMA nodes */
+ int num_nodes;
+
+ /* Allow setting NUMA distance for different NUMA nodes */
+ bool have_numa_distance;
+
+ /* Detect if HMAT support is enabled. */
+ bool hmat_enabled;
+
+ /* NUMA nodes information */
+ NodeInfo nodes[MAX_NODES];
+
+ /* NUMA nodes HMAT Locality Latency and Bandwidth Information */
+ HMAT_LB_Info *hmat_lb[HMAT_LB_LEVELS][HMAT_LB_TYPES];
+
+ /* Memory Side Cache Information Structure */
+ NumaHmatCacheOptions *hmat_cache[MAX_NODES][HMAT_LB_LEVELS];
+};
+typedef struct NumaState NumaState;
+
+void set_numa_options(MachineState *ms, NumaOptions *object, Error **errp);
+void parse_numa_opts(MachineState *ms);
+void parse_numa_hmat_lb(NumaState *numa_state, NumaHmatLBOptions *node,
+ Error **errp);
+void parse_numa_hmat_cache(MachineState *ms, NumaHmatCacheOptions *node,
+ Error **errp);
+void numa_complete_configuration(MachineState *ms);
+void query_numa_node_mem(NumaNodeMem node_mem[], MachineState *ms);
+extern QemuOptsList qemu_numa_opts;
+void numa_cpu_pre_plug(const struct CPUArchId *slot, DeviceState *dev,
+ Error **errp);
+bool numa_uses_legacy_mem(void);
+
+#endif
diff --git a/include/sysemu/nvmm.h b/include/system/nvmm.h
index 6971ddb..6971ddb 100644
--- a/include/sysemu/nvmm.h
+++ b/include/system/nvmm.h
diff --git a/include/system/os-posix.h b/include/system/os-posix.h
new file mode 100644
index 0000000..ce5b3bc
--- /dev/null
+++ b/include/system/os-posix.h
@@ -0,0 +1,101 @@
+/*
+ * posix specific declarations
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2010 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_OS_POSIX_H
+#define QEMU_OS_POSIX_H
+
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <sys/un.h>
+
+#ifdef CONFIG_SYSMACROS
+#include <sys/sysmacros.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void os_set_line_buffering(void);
+void os_setup_early_signal_handling(void);
+void os_set_proc_name(const char *s);
+void os_setup_signal_handling(void);
+int os_set_daemonize(bool d);
+bool is_daemonized(void);
+void os_daemonize(void);
+bool os_set_runas(const char *user_id);
+void os_set_chroot(const char *path);
+void os_setup_limits(void);
+void os_setup_post(void);
+int os_mlock(bool on_fault);
+
+/**
+ * qemu_alloc_stack:
+ * @sz: pointer to a size_t holding the requested usable stack size
+ *
+ * Allocate memory that can be used as a stack, for instance for
+ * coroutines. If the memory cannot be allocated, this function
+ * will abort (like g_malloc()). This function also inserts an
+ * additional guard page to catch a potential stack overflow.
+ * Note that the memory required for the guard page and alignment
+ * and minimal stack size restrictions will increase the value of sz.
+ *
+ * The allocated stack must be freed with qemu_free_stack().
+ *
+ * Returns: pointer to (the lowest address of) the stack memory.
+ */
+void *qemu_alloc_stack(size_t *sz);
+
+/**
+ * qemu_free_stack:
+ * @stack: stack to free
+ * @sz: size of stack in bytes
+ *
+ * Free a stack allocated via qemu_alloc_stack(). Note that sz must
+ * be exactly the adjusted stack size returned by qemu_alloc_stack.
+ */
+void qemu_free_stack(void *stack, size_t sz);
+
+/* POSIX and Mingw32 differ in the name of the stdio lock functions. */
+
+static inline void qemu_flockfile(FILE *f)
+{
+ flockfile(f);
+}
+
+static inline void qemu_funlockfile(FILE *f)
+{
+ funlockfile(f);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/system/os-wasm.h b/include/system/os-wasm.h
new file mode 100644
index 0000000..3abb3aa
--- /dev/null
+++ b/include/system/os-wasm.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * posix specific declarations forked from os-posix.h, removing functions not
+ * working on Emscripten
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2010 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_OS_WASM_H
+#define QEMU_OS_WASM_H
+
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <sys/un.h>
+
+#ifdef CONFIG_SYSMACROS
+#include <sys/sysmacros.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void os_set_line_buffering(void);
+void os_setup_early_signal_handling(void);
+void os_set_proc_name(const char *s);
+void os_setup_signal_handling(void);
+void os_setup_limits(void);
+void os_setup_post(void);
+int os_mlock(bool on_fault);
+static inline int os_set_daemonize(bool d)
+{
+ return -1;
+};
+bool is_daemonized(void);
+static inline void os_daemonize(void) {}
+
+/**
+ * qemu_alloc_stack:
+ * @sz: pointer to a size_t holding the requested usable stack size
+ *
+ * Allocate memory that can be used as a stack, for instance for
+ * coroutines. If the memory cannot be allocated, this function
+ * will abort (like g_malloc()). This function also inserts an
+ * additional guard page to catch a potential stack overflow.
+ * Note that the memory required for the guard page and alignment
+ * and minimal stack size restrictions will increase the value of sz.
+ *
+ * The allocated stack must be freed with qemu_free_stack().
+ *
+ * Returns: pointer to (the lowest address of) the stack memory.
+ */
+void *qemu_alloc_stack(size_t *sz);
+
+/**
+ * qemu_free_stack:
+ * @stack: stack to free
+ * @sz: size of stack in bytes
+ *
+ * Free a stack allocated via qemu_alloc_stack(). Note that sz must
+ * be exactly the adjusted stack size returned by qemu_alloc_stack.
+ */
+void qemu_free_stack(void *stack, size_t sz);
+
+/* POSIX and Mingw32 differ in the name of the stdio lock functions. */
+
+static inline void qemu_flockfile(FILE *f)
+{
+ flockfile(f);
+}
+
+static inline void qemu_funlockfile(FILE *f)
+{
+ funlockfile(f);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/system/os-win32.h b/include/system/os-win32.h
new file mode 100644
index 0000000..3aa6cee
--- /dev/null
+++ b/include/system/os-win32.h
@@ -0,0 +1,276 @@
+/*
+ * win32 specific declarations
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2010 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_OS_WIN32_H
+#define QEMU_OS_WIN32_H
+
+#include <winsock2.h>
+#include <windows.h>
+#include <ws2tcpip.h>
+#include "qemu/typedefs.h"
+
+#ifdef HAVE_AFUNIX_H
+#include <afunix.h>
+#else
+/*
+ * Fallback definitions of things we need in afunix.h, if not available from
+ * the used Windows SDK or MinGW headers.
+ */
+#define UNIX_PATH_MAX 108
+
+typedef struct sockaddr_un {
+ ADDRESS_FAMILY sun_family;
+ char sun_path[UNIX_PATH_MAX];
+} SOCKADDR_UN, *PSOCKADDR_UN;
+
+#define SIO_AF_UNIX_GETPEERPID _WSAIOR(IOC_VENDOR, 256)
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(__aarch64__)
+/*
+ * On windows-arm64, setjmp is available in only one variant, and longjmp always
+ * does stack unwinding. This crash with generated code.
+ * Thus, we use another implementation of setjmp (not windows one), coming from
+ * mingw, which never performs stack unwinding.
+ */
+#undef setjmp
+#undef longjmp
+/*
+ * These functions are not declared in setjmp.h because __aarch64__ defines
+ * setjmp to _setjmpex instead. However, they are still defined in libmingwex.a,
+ * which gets linked automatically.
+ */
+int __mingw_setjmp(jmp_buf);
+void __attribute__((noreturn)) __mingw_longjmp(jmp_buf, int);
+#define setjmp(env) __mingw_setjmp(env)
+#define longjmp(env, val) __mingw_longjmp(env, val)
+#elif defined(_WIN64)
+/*
+ * On windows-x64, setjmp is implemented by _setjmp which needs a second parameter.
+ * If this parameter is NULL, longjump does no stack unwinding.
+ * That is what we need for QEMU. Passing the value of register rsp (default)
+ * lets longjmp try a stack unwinding which will crash with generated code.
+ */
+# undef setjmp
+# define setjmp(env) _setjmp(env, NULL)
+#endif /* __aarch64__ */
+/* QEMU uses sigsetjmp()/siglongjmp() as the portable way to specify
+ * "longjmp and don't touch the signal masks". Since we know that the
+ * savemask parameter will always be zero we can safely define these
+ * in terms of setjmp/longjmp on Win32.
+ */
+#define sigjmp_buf jmp_buf
+#define sigsetjmp(env, savemask) setjmp(env)
+#define siglongjmp(env, val) longjmp(env, val)
+
+/* Missing POSIX functions. Don't use MinGW-w64 macros. */
+#ifndef _POSIX_THREAD_SAFE_FUNCTIONS
+#undef gmtime_r
+struct tm *gmtime_r(const time_t *timep, struct tm *result);
+#undef localtime_r
+struct tm *localtime_r(const time_t *timep, struct tm *result);
+#endif /* _POSIX_THREAD_SAFE_FUNCTIONS */
+
+static inline void os_setup_signal_handling(void) {}
+static inline void os_daemonize(void) {}
+static inline void os_setup_post(void) {}
+static inline void os_set_proc_name(const char *dummy) {}
+void os_set_line_buffering(void);
+void os_setup_early_signal_handling(void);
+
+int getpagesize(void);
+
+#if !defined(EPROTONOSUPPORT)
+# define EPROTONOSUPPORT EINVAL
+#endif
+
+static inline int os_set_daemonize(bool d)
+{
+ if (d) {
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static inline bool is_daemonized(void)
+{
+ return false;
+}
+
+static inline int os_mlock(bool on_fault G_GNUC_UNUSED)
+{
+ return -ENOSYS;
+}
+
+static inline void os_setup_limits(void)
+{
+}
+
+#define fsync _commit
+
+#if !defined(lseek)
+# define lseek _lseeki64
+#endif
+
+int qemu_ftruncate64(int, int64_t);
+
+#if !defined(ftruncate)
+# define ftruncate qemu_ftruncate64
+#endif
+
+static inline char *realpath(const char *path, char *resolved_path)
+{
+ _fullpath(resolved_path, path, _MAX_PATH);
+ return resolved_path;
+}
+
+/*
+ * Older versions of MinGW do not import _lock_file and _unlock_file properly.
+ * This was fixed for v6.0.0 with commit b48e3ac8969d.
+ */
+static inline void qemu_flockfile(FILE *f)
+{
+#ifdef HAVE__LOCK_FILE
+ _lock_file(f);
+#endif
+}
+
+static inline void qemu_funlockfile(FILE *f)
+{
+#ifdef HAVE__LOCK_FILE
+ _unlock_file(f);
+#endif
+}
+
+/* Helper for WSAEventSelect, to report errors */
+bool qemu_socket_select(int sockfd, WSAEVENT hEventObject,
+ long lNetworkEvents, Error **errp);
+
+bool qemu_socket_unselect(int sockfd, Error **errp);
+
+/* We wrap all the sockets functions so that we can set errno based on
+ * WSAGetLastError(), and use file-descriptors instead of SOCKET.
+ */
+
+/*
+ * qemu_close_socket_osfhandle:
+ * @fd: a file descriptor associated with a SOCKET
+ *
+ * Close only the C run-time file descriptor, leave the SOCKET opened.
+ *
+ * Returns zero on success. On error, -1 is returned, and errno is set to
+ * indicate the error.
+ */
+int qemu_close_socket_osfhandle(int fd);
+
+#undef close
+#define close qemu_close_wrap
+int qemu_close_wrap(int fd);
+
+#undef connect
+#define connect qemu_connect_wrap
+int qemu_connect_wrap(int sockfd, const struct sockaddr *addr,
+ socklen_t addrlen);
+
+#undef listen
+#define listen qemu_listen_wrap
+int qemu_listen_wrap(int sockfd, int backlog);
+
+#undef bind
+#define bind qemu_bind_wrap
+int qemu_bind_wrap(int sockfd, const struct sockaddr *addr,
+ socklen_t addrlen);
+
+#undef socket
+#define socket qemu_socket_wrap
+int qemu_socket_wrap(int domain, int type, int protocol);
+
+#undef accept
+#define accept qemu_accept_wrap
+int qemu_accept_wrap(int sockfd, struct sockaddr *addr,
+ socklen_t *addrlen);
+
+#undef shutdown
+#define shutdown qemu_shutdown_wrap
+int qemu_shutdown_wrap(int sockfd, int how);
+
+#undef ioctlsocket
+#define ioctlsocket qemu_ioctlsocket_wrap
+int qemu_ioctlsocket_wrap(int fd, int req, void *val);
+
+#undef getsockopt
+#define getsockopt qemu_getsockopt_wrap
+int qemu_getsockopt_wrap(int sockfd, int level, int optname,
+ void *optval, socklen_t *optlen);
+
+#undef setsockopt
+#define setsockopt qemu_setsockopt_wrap
+int qemu_setsockopt_wrap(int sockfd, int level, int optname,
+ const void *optval, socklen_t optlen);
+
+#undef getpeername
+#define getpeername qemu_getpeername_wrap
+int qemu_getpeername_wrap(int sockfd, struct sockaddr *addr,
+ socklen_t *addrlen);
+
+#undef getsockname
+#define getsockname qemu_getsockname_wrap
+int qemu_getsockname_wrap(int sockfd, struct sockaddr *addr,
+ socklen_t *addrlen);
+
+#undef send
+#define send qemu_send_wrap
+ssize_t qemu_send_wrap(int sockfd, const void *buf, size_t len, int flags);
+
+#undef sendto
+#define sendto qemu_sendto_wrap
+ssize_t qemu_sendto_wrap(int sockfd, const void *buf, size_t len, int flags,
+ const struct sockaddr *addr, socklen_t addrlen);
+
+#undef recv
+#define recv qemu_recv_wrap
+ssize_t qemu_recv_wrap(int sockfd, void *buf, size_t len, int flags);
+
+#undef recvfrom
+#define recvfrom qemu_recvfrom_wrap
+ssize_t qemu_recvfrom_wrap(int sockfd, void *buf, size_t len, int flags,
+ struct sockaddr *addr, socklen_t *addrlen);
+
+EXCEPTION_DISPOSITION
+win32_close_exception_handler(struct _EXCEPTION_RECORD*, void*,
+ struct _CONTEXT*, void*);
+
+void *qemu_win32_map_alloc(size_t size, HANDLE *h, Error **errp);
+void qemu_win32_map_free(void *ptr, HANDLE h, Error **errp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/system/qtest.h b/include/system/qtest.h
new file mode 100644
index 0000000..84b1f8c
--- /dev/null
+++ b/include/system/qtest.h
@@ -0,0 +1,36 @@
+/*
+ * Test Server
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QTEST_H
+#define QTEST_H
+
+#include "chardev/char.h"
+
+extern bool qtest_allowed;
+
+static inline bool qtest_enabled(void)
+{
+ return qtest_allowed;
+}
+
+void G_GNUC_PRINTF(2, 3) qtest_sendf(CharBackend *chr, const char *fmt, ...);
+void qtest_set_command_cb(bool (*pc_cb)(CharBackend *chr, gchar **words));
+bool qtest_driver(void);
+
+void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error **errp);
+
+void qtest_server_set_send_handler(void (*send)(void *, const char *),
+ void *opaque);
+void qtest_server_inproc_recv(void *opaque, const char *buf);
+
+#endif
diff --git a/include/system/ram_addr.h b/include/system/ram_addr.h
new file mode 100644
index 0000000..15a1b1a
--- /dev/null
+++ b/include/system/ram_addr.h
@@ -0,0 +1,561 @@
+/*
+ * Declarations for cpu physical memory functions
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * This header is for use by exec.c and memory.c ONLY. Do not include it.
+ * The functions declared here will be removed soon.
+ */
+
+#ifndef SYSTEM_RAM_ADDR_H
+#define SYSTEM_RAM_ADDR_H
+
+#include "system/xen.h"
+#include "system/tcg.h"
+#include "exec/cputlb.h"
+#include "exec/ramlist.h"
+#include "system/ramblock.h"
+#include "system/memory.h"
+#include "exec/target_page.h"
+#include "qemu/rcu.h"
+
+#include "exec/hwaddr.h"
+#include "exec/cpu-common.h"
+
+extern uint64_t total_dirty_pages;
+
+/**
+ * clear_bmap_size: calculate clear bitmap size
+ *
+ * @pages: number of guest pages
+ * @shift: guest page number shift
+ *
+ * Returns: number of bits for the clear bitmap
+ */
+static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
+{
+ return DIV_ROUND_UP(pages, 1UL << shift);
+}
+
+/**
+ * clear_bmap_set: set clear bitmap for the page range. Must be with
+ * bitmap_mutex held.
+ *
+ * @rb: the ramblock to operate on
+ * @start: the start page number
+ * @size: number of pages to set in the bitmap
+ *
+ * Returns: None
+ */
+static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
+ uint64_t npages)
+{
+ uint8_t shift = rb->clear_bmap_shift;
+
+ bitmap_set(rb->clear_bmap, start >> shift, clear_bmap_size(npages, shift));
+}
+
+/**
+ * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set.
+ * Must be with bitmap_mutex held.
+ *
+ * @rb: the ramblock to operate on
+ * @page: the page number to check
+ *
+ * Returns: true if the bit was set, false otherwise
+ */
+static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
+{
+ uint8_t shift = rb->clear_bmap_shift;
+
+ return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1);
+}
+
+static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
+{
+ return (b && b->host && offset < b->used_length) ? true : false;
+}
+
+static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
+{
+ assert(offset_in_ramblock(block, offset));
+ return (char *)block->host + offset;
+}
+
+static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
+ RAMBlock *rb)
+{
+ uint64_t host_addr_offset =
+ (uint64_t)(uintptr_t)(host_addr - (void *)rb->host);
+ return host_addr_offset >> TARGET_PAGE_BITS;
+}
+
+bool ramblock_is_pmem(RAMBlock *rb);
+
+/**
+ * qemu_ram_alloc_from_file,
+ * qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing
+ * file or device
+ *
+ * Parameters:
+ * @size: the size in bytes of the ram block
+ * @max_size: the maximum size of the block after resizing
+ * @mr: the memory region where the ram block is
+ * @resized: callback after calls to qemu_ram_resize
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
+ * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
+ * RAM_READONLY_FD, RAM_GUEST_MEMFD
+ * @mem_path or @fd: specify the backing file or device
+ * @offset: Offset into target file
+ * @grow: extend file if necessary (but an empty file is always extended).
+ * @errp: pointer to Error*, to store an error if it happens
+ *
+ * Return:
+ * On success, return a pointer to the ram block.
+ * On failure, return NULL.
+ */
+typedef void (*qemu_ram_resize_cb)(const char *, uint64_t length, void *host);
+
+RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
+ uint32_t ram_flags, const char *mem_path,
+ off_t offset, Error **errp);
+RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, ram_addr_t max_size,
+ qemu_ram_resize_cb resized, MemoryRegion *mr,
+ uint32_t ram_flags, int fd, off_t offset,
+ bool grow,
+ Error **errp);
+
+RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
+ MemoryRegion *mr, Error **errp);
+RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, MemoryRegion *mr,
+ Error **errp);
+RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
+ qemu_ram_resize_cb resized,
+ MemoryRegion *mr, Error **errp);
+void qemu_ram_free(RAMBlock *block);
+
+int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
+
+void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length);
+
+/* Clear whole block of mem */
+static inline void qemu_ram_block_writeback(RAMBlock *block)
+{
+ qemu_ram_msync(block, 0, block->used_length);
+}
+
+#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
+#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
+
+static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
+ ram_addr_t length,
+ unsigned client)
+{
+ DirtyMemoryBlocks *blocks;
+ unsigned long end, page;
+ unsigned long idx, offset, base;
+ bool dirty = false;
+
+ assert(client < DIRTY_MEMORY_NUM);
+
+ end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
+ page = start >> TARGET_PAGE_BITS;
+
+ WITH_RCU_READ_LOCK_GUARD() {
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned long num = next - base;
+ unsigned long found = find_next_bit(blocks->blocks[idx],
+ num, offset);
+ if (found < num) {
+ dirty = true;
+ break;
+ }
+
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
+ }
+ }
+
+ return dirty;
+}
+
+static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
+ ram_addr_t length,
+ unsigned client)
+{
+ DirtyMemoryBlocks *blocks;
+ unsigned long end, page;
+ unsigned long idx, offset, base;
+ bool dirty = true;
+
+ assert(client < DIRTY_MEMORY_NUM);
+
+ end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
+ page = start >> TARGET_PAGE_BITS;
+
+ RCU_READ_LOCK_GUARD();
+
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned long num = next - base;
+ unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
+ if (found < num) {
+ dirty = false;
+ break;
+ }
+
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
+ }
+
+ return dirty;
+}
+
+static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
+ unsigned client)
+{
+ return cpu_physical_memory_get_dirty(addr, 1, client);
+}
+
+static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
+{
+ bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
+ bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
+ bool migration =
+ cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
+ return !(vga && code && migration);
+}
+
+static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
+ ram_addr_t length,
+ uint8_t mask)
+{
+ uint8_t ret = 0;
+
+ if (mask & (1 << DIRTY_MEMORY_VGA) &&
+ !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
+ ret |= (1 << DIRTY_MEMORY_VGA);
+ }
+ if (mask & (1 << DIRTY_MEMORY_CODE) &&
+ !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
+ ret |= (1 << DIRTY_MEMORY_CODE);
+ }
+ if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
+ !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
+ ret |= (1 << DIRTY_MEMORY_MIGRATION);
+ }
+ return ret;
+}
+
+static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
+ unsigned client)
+{
+ unsigned long page, idx, offset;
+ DirtyMemoryBlocks *blocks;
+
+ assert(client < DIRTY_MEMORY_NUM);
+
+ page = addr >> TARGET_PAGE_BITS;
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+
+ RCU_READ_LOCK_GUARD();
+
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ set_bit_atomic(offset, blocks->blocks[idx]);
+}
+
+static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
+ ram_addr_t length,
+ uint8_t mask)
+{
+ DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
+ unsigned long end, page;
+ unsigned long idx, offset, base;
+ int i;
+
+ if (!mask && !xen_enabled()) {
+ return;
+ }
+
+ end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
+ page = start >> TARGET_PAGE_BITS;
+
+ WITH_RCU_READ_LOCK_GUARD() {
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]);
+ }
+
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+
+ if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
+ offset, next - page);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
+ offset, next - page);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
+ offset, next - page);
+ }
+
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
+ }
+ }
+
+ if (xen_enabled()) {
+ xen_hvm_modified_memory(start, length);
+ }
+}
+
+#if !defined(_WIN32)
+
+/*
+ * Contrary to cpu_physical_memory_sync_dirty_bitmap() this function returns
+ * the number of dirty pages in @bitmap passed as argument. On the other hand,
+ * cpu_physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
+ * weren't set in the global migration bitmap.
+ */
+static inline
+uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
+ ram_addr_t start,
+ ram_addr_t pages)
+{
+ unsigned long i, j;
+ unsigned long page_number, c, nbits;
+ hwaddr addr;
+ ram_addr_t ram_addr;
+ uint64_t num_dirty = 0;
+ unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
+ unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE;
+ unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
+
+ /* start address is aligned at the start of a word? */
+ if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
+ (hpratio == 1)) {
+ unsigned long **blocks[DIRTY_MEMORY_NUM];
+ unsigned long idx;
+ unsigned long offset;
+ long k;
+ long nr = BITS_TO_LONGS(pages);
+
+ idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
+ DIRTY_MEMORY_BLOCK_SIZE);
+
+ WITH_RCU_READ_LOCK_GUARD() {
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] =
+ qatomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
+ }
+
+ for (k = 0; k < nr; k++) {
+ if (bitmap[k]) {
+ unsigned long temp = leul_to_cpu(bitmap[k]);
+
+ nbits = ctpopl(temp);
+ qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
+
+ if (global_dirty_tracking) {
+ qatomic_or(
+ &blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
+ temp);
+ if (unlikely(
+ global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
+ total_dirty_pages += nbits;
+ }
+ }
+
+ num_dirty += nbits;
+
+ if (tcg_enabled()) {
+ qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
+ temp);
+ }
+ }
+
+ if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
+ offset = 0;
+ idx++;
+ }
+ }
+ }
+
+ if (xen_enabled()) {
+ xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
+ }
+ } else {
+ uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
+
+ if (!global_dirty_tracking) {
+ clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
+ }
+
+ /*
+ * bitmap-traveling is faster than memory-traveling (for addr...)
+ * especially when most of the memory is not dirty.
+ */
+ for (i = 0; i < len; i++) {
+ if (bitmap[i] != 0) {
+ c = leul_to_cpu(bitmap[i]);
+ nbits = ctpopl(c);
+ if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
+ total_dirty_pages += nbits;
+ }
+ num_dirty += nbits;
+ do {
+ j = ctzl(c);
+ c &= ~(1ul << j);
+ page_number = (i * HOST_LONG_BITS + j) * hpratio;
+ addr = page_number * TARGET_PAGE_SIZE;
+ ram_addr = start + addr;
+ cpu_physical_memory_set_dirty_range(ram_addr,
+ TARGET_PAGE_SIZE * hpratio, clients);
+ } while (c != 0);
+ }
+ }
+ }
+
+ return num_dirty;
+}
+#endif /* not _WIN32 */
+
+static inline void cpu_physical_memory_dirty_bits_cleared(ram_addr_t start,
+ ram_addr_t length)
+{
+ if (tcg_enabled()) {
+ tlb_reset_dirty_range_all(start, length);
+ }
+
+}
+bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
+ ram_addr_t length,
+ unsigned client);
+
+DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
+ (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client);
+
+bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
+ ram_addr_t start,
+ ram_addr_t length);
+
+static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
+ ram_addr_t length)
+{
+ cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
+ cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
+ cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
+}
+
+
+/* Called with RCU critical section */
+static inline
+uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
+ ram_addr_t start,
+ ram_addr_t length)
+{
+ ram_addr_t addr;
+ unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
+ uint64_t num_dirty = 0;
+ unsigned long *dest = rb->bmap;
+
+ /* start address and length is aligned at the start of a word? */
+ if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
+ (start + rb->offset) &&
+ !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
+ int k;
+ int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
+ unsigned long * const *src;
+ unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
+ DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
+
+ src = qatomic_rcu_read(
+ &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
+
+ for (k = page; k < page + nr; k++) {
+ if (src[idx][offset]) {
+ unsigned long bits = qatomic_xchg(&src[idx][offset], 0);
+ unsigned long new_dirty;
+ new_dirty = ~dest[k];
+ dest[k] |= bits;
+ new_dirty &= bits;
+ num_dirty += ctpopl(new_dirty);
+ }
+
+ if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
+ offset = 0;
+ idx++;
+ }
+ }
+ if (num_dirty) {
+ cpu_physical_memory_dirty_bits_cleared(start, length);
+ }
+
+ if (rb->clear_bmap) {
+ /*
+ * Postpone the dirty bitmap clear to the point before we
+ * really send the pages, also we will split the clear
+ * dirty procedure into smaller chunks.
+ */
+ clear_bmap_set(rb, start >> TARGET_PAGE_BITS,
+ length >> TARGET_PAGE_BITS);
+ } else {
+ /* Slow path - still do that in a huge chunk */
+ memory_region_clear_dirty_bitmap(rb->mr, start, length);
+ }
+ } else {
+ ram_addr_t offset = rb->offset;
+
+ for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
+ if (cpu_physical_memory_test_and_clear_dirty(
+ start + addr + offset,
+ TARGET_PAGE_SIZE,
+ DIRTY_MEMORY_MIGRATION)) {
+ long k = (start + addr) >> TARGET_PAGE_BITS;
+ if (!test_and_set_bit(k, dest)) {
+ num_dirty++;
+ }
+ }
+ }
+ }
+
+ return num_dirty;
+}
+
+#endif
diff --git a/include/system/ramblock.h b/include/system/ramblock.h
new file mode 100644
index 0000000..87e847e
--- /dev/null
+++ b/include/system/ramblock.h
@@ -0,0 +1,116 @@
+/*
+ * Declarations for cpu physical memory functions
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * This header is for use by exec.c and memory.c ONLY. Do not include it.
+ * The functions declared here will be removed soon.
+ */
+
+#ifndef SYSTEM_RAMBLOCK_H
+#define SYSTEM_RAMBLOCK_H
+
+#include "exec/cpu-common.h"
+#include "qemu/rcu.h"
+#include "exec/ramlist.h"
+#include "system/hostmem.h"
+
+#define TYPE_RAM_BLOCK_ATTRIBUTES "ram-block-attributes"
+OBJECT_DECLARE_SIMPLE_TYPE(RamBlockAttributes, RAM_BLOCK_ATTRIBUTES)
+
+struct RAMBlock {
+ struct rcu_head rcu;
+ struct MemoryRegion *mr;
+ uint8_t *host;
+ uint8_t *colo_cache; /* For colo, VM's ram cache */
+ ram_addr_t offset;
+ ram_addr_t used_length;
+ ram_addr_t max_length;
+ void (*resized)(const char*, uint64_t length, void *host);
+ uint32_t flags;
+ /* Protected by the BQL. */
+ char idstr[256];
+ /* RCU-enabled, writes protected by the ramlist lock */
+ QLIST_ENTRY(RAMBlock) next;
+ QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
+ Error *cpr_blocker;
+ int fd;
+ uint64_t fd_offset;
+ int guest_memfd;
+ RamBlockAttributes *attributes;
+ size_t page_size;
+ /* dirty bitmap used during migration */
+ unsigned long *bmap;
+
+ /*
+ * Below fields are only used by mapped-ram migration
+ */
+ /* bitmap of pages present in the migration file */
+ unsigned long *file_bmap;
+ /*
+ * offset in the file pages belonging to this ramblock are saved,
+ * used only during migration to a file.
+ */
+ off_t bitmap_offset;
+ uint64_t pages_offset;
+
+ /* Bitmap of already received pages. Only used on destination side. */
+ unsigned long *receivedmap;
+
+ /*
+ * bitmap to track already cleared dirty bitmap. When the bit is
+ * set, it means the corresponding memory chunk needs a log-clear.
+ * Set this up to non-NULL to enable the capability to postpone
+ * and split clearing of dirty bitmap on the remote node (e.g.,
+ * KVM). The bitmap will be set only when doing global sync.
+ *
+ * It is only used during src side of ram migration, and it is
+ * protected by the global ram_state.bitmap_mutex.
+ *
+ * NOTE: this bitmap is different comparing to the other bitmaps
+ * in that one bit can represent multiple guest pages (which is
+ * decided by the `clear_bmap_shift' variable below). On
+ * destination side, this should always be NULL, and the variable
+ * `clear_bmap_shift' is meaningless.
+ */
+ unsigned long *clear_bmap;
+ uint8_t clear_bmap_shift;
+
+ /*
+ * RAM block length that corresponds to the used_length on the migration
+ * source (after RAM block sizes were synchronized). Especially, after
+ * starting to run the guest, used_length and postcopy_length can differ.
+ * Used to register/unregister uffd handlers and as the size of the received
+ * bitmap. Receiving any page beyond this length will bail out, as it
+ * could not have been valid on the source.
+ */
+ ram_addr_t postcopy_length;
+};
+
+struct RamBlockAttributes {
+ Object parent;
+
+ RAMBlock *ram_block;
+
+ /* 1-setting of the bitmap represents ram is populated (shared) */
+ unsigned bitmap_size;
+ unsigned long *bitmap;
+
+ QLIST_HEAD(, RamDiscardListener) rdl_list;
+};
+
+RamBlockAttributes *ram_block_attributes_create(RAMBlock *ram_block);
+void ram_block_attributes_destroy(RamBlockAttributes *attr);
+int ram_block_attributes_state_change(RamBlockAttributes *attr, uint64_t offset,
+ uint64_t size, bool to_discard);
+
+#endif
diff --git a/include/system/replay.h b/include/system/replay.h
new file mode 100644
index 0000000..1c87c97
--- /dev/null
+++ b/include/system/replay.h
@@ -0,0 +1,179 @@
+/*
+ * QEMU replay (system interface)
+ *
+ * Copyright (c) 2010-2015 Institute for System Programming
+ * of the Russian Academy of Sciences.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef SYSTEM_REPLAY_H
+#define SYSTEM_REPLAY_H
+
+#include "exec/replay-core.h"
+#include "qapi/qapi-types-misc.h"
+#include "qapi/qapi-types-run-state.h"
+#include "qapi/qapi-types-ui.h"
+#include "block/aio.h"
+
+/* replay clock kinds */
+enum ReplayClockKind {
+ /* host_clock */
+ REPLAY_CLOCK_HOST,
+ /* virtual_rt_clock */
+ REPLAY_CLOCK_VIRTUAL_RT,
+ REPLAY_CLOCK_COUNT
+};
+typedef enum ReplayClockKind ReplayClockKind;
+
+/* IDs of the checkpoints */
+enum ReplayCheckpoint {
+ CHECKPOINT_CLOCK_WARP_START,
+ CHECKPOINT_CLOCK_WARP_ACCOUNT,
+ CHECKPOINT_RESET_REQUESTED,
+ CHECKPOINT_SUSPEND_REQUESTED,
+ CHECKPOINT_CLOCK_VIRTUAL,
+ CHECKPOINT_CLOCK_HOST,
+ CHECKPOINT_CLOCK_VIRTUAL_RT,
+ CHECKPOINT_INIT,
+ CHECKPOINT_RESET,
+ CHECKPOINT_COUNT
+};
+typedef enum ReplayCheckpoint ReplayCheckpoint;
+
+typedef struct ReplayNetState ReplayNetState;
+
+/* Name of the initial VM snapshot */
+extern char *replay_snapshot;
+
+/* Replay locking
+ *
+ * The locks are needed to protect the shared structures and log file
+ * when doing record/replay. They also are the main sync-point between
+ * the main-loop thread and the vCPU thread. This was a role
+ * previously filled by the BQL which has been busy trying to reduce
+ * its impact across the code. This ensures blocks of events stay
+ * sequential and reproducible.
+ */
+
+void replay_mutex_lock(void);
+void replay_mutex_unlock(void);
+
+/* Processing the instructions */
+
+/*! Returns number of executed instructions. */
+uint64_t replay_get_current_icount(void);
+/*! Returns number of instructions to execute in replay mode. */
+int replay_get_instructions(void);
+/*! Updates instructions counter in replay mode. */
+void replay_account_executed_instructions(void);
+
+/* Processing clocks and other time sources */
+
+/*! Save the specified clock */
+int64_t replay_save_clock(ReplayClockKind kind, int64_t clock,
+ int64_t raw_icount);
+/*! Read the specified clock from the log or return cached data */
+int64_t replay_read_clock(ReplayClockKind kind, int64_t raw_icount);
+/*! Saves or reads the clock depending on the current replay mode. */
+#define REPLAY_CLOCK(clock, value) \
+ !icount_enabled() ? (value) : \
+ (replay_mode == REPLAY_MODE_PLAY \
+ ? replay_read_clock((clock), icount_get_raw()) \
+ : replay_mode == REPLAY_MODE_RECORD \
+ ? replay_save_clock((clock), (value), icount_get_raw()) \
+ : (value))
+#define REPLAY_CLOCK_LOCKED(clock, value) \
+ !icount_enabled() ? (value) : \
+ (replay_mode == REPLAY_MODE_PLAY \
+ ? replay_read_clock((clock), icount_get_raw_locked()) \
+ : replay_mode == REPLAY_MODE_RECORD \
+ ? replay_save_clock((clock), (value), icount_get_raw_locked()) \
+ : (value))
+
+/* Events */
+
+/*! Called when qemu shutdown is requested. */
+void replay_shutdown_request(ShutdownCause cause);
+/*! Should be called at check points in the execution.
+ These check points are skipped, if they were not met.
+ Saves checkpoint in the SAVE mode and validates in the PLAY mode.
+ Returns 0 in PLAY mode if checkpoint was not found.
+ Returns 1 in all other cases. */
+bool replay_checkpoint(ReplayCheckpoint checkpoint);
+/*! Used to determine that checkpoint or async event is pending.
+ Does not proceed to the next event in the log. */
+bool replay_has_event(void);
+/*
+ * Processes the async events added to the queue (while recording)
+ * or reads the events from the file (while replaying).
+ */
+void replay_async_events(void);
+
+/* Asynchronous events queue */
+
+/*! Enables storing events in the queue */
+void replay_enable_events(void);
+/*! Returns true when saving events is enabled */
+bool replay_events_enabled(void);
+/* Flushes events queue */
+void replay_flush_events(void);
+/*! Adds bottom half event to the queue */
+void replay_bh_schedule_event(QEMUBH *bh);
+/* Adds oneshot bottom half event to the queue */
+void replay_bh_schedule_oneshot_event(AioContext *ctx,
+ QEMUBHFunc *cb, void *opaque);
+/*! Adds input event to the queue */
+void replay_input_event(QemuConsole *src, InputEvent *evt);
+/*! Adds input sync event to the queue */
+void replay_input_sync_event(void);
+/*! Adds block layer event to the queue */
+void replay_block_event(QEMUBH *bh, uint64_t id);
+/*! Returns ID for the next block event */
+uint64_t blkreplay_next_id(void);
+
+/* Character device */
+
+/*! Registers char driver to save it's events */
+void replay_register_char_driver(struct Chardev *chr);
+/*! Saves write to char device event to the log */
+void replay_chr_be_write(struct Chardev *s, const uint8_t *buf, int len);
+/*! Writes char write return value to the replay log. */
+void replay_char_write_event_save(int res, int offset);
+/*! Reads char write return value from the replay log. */
+void replay_char_write_event_load(int *res, int *offset);
+/*! Reads information about read_all character event. */
+int replay_char_read_all_load(uint8_t *buf);
+/*! Writes character read_all error code into the replay log. */
+void replay_char_read_all_save_error(int res);
+/*! Writes character read_all execution result into the replay log. */
+void replay_char_read_all_save_buf(uint8_t *buf, int offset);
+
+/* Network */
+
+/*! Registers replay network filter attached to some backend. */
+ReplayNetState *replay_register_net(NetFilterState *nfs);
+/*! Unregisters replay network filter. */
+void replay_unregister_net(ReplayNetState *rns);
+/*! Called to write network packet to the replay log. */
+void replay_net_packet_event(ReplayNetState *rns, unsigned flags,
+ const struct iovec *iov, int iovcnt);
+
+/* Audio */
+
+/*! Saves/restores number of played samples of audio out operation. */
+void replay_audio_out(size_t *played);
+/*! Saves/restores recorded samples of audio in operation. */
+void replay_audio_in(size_t *recorded, void *samples, size_t *wpos, size_t size);
+
+/* VM state operations */
+
+/*! Called at the start of execution.
+ Loads or saves initial vmstate depending on execution mode. */
+void replay_vmstate_init(void);
+/*! Called to ensure that replay state is consistent and VM snapshot
+ can be created */
+bool replay_can_snapshot(void);
+
+#endif
diff --git a/include/system/reset.h b/include/system/reset.h
new file mode 100644
index 0000000..97131d9
--- /dev/null
+++ b/include/system/reset.h
@@ -0,0 +1,127 @@
+/*
+ * Reset handlers.
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2016 Red Hat, Inc.
+ * Copyright (c) 2024 Linaro, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_SYSTEM_RESET_H
+#define QEMU_SYSTEM_RESET_H
+
+#include "hw/resettable.h"
+#include "qapi/qapi-events-run-state.h"
+
+typedef void QEMUResetHandler(void *opaque);
+
+/**
+ * qemu_register_resettable: Register an object to be reset
+ * @obj: object to be reset: it must implement the Resettable interface
+ *
+ * Register @obj on the list of objects which will be reset when the
+ * simulation is reset. These objects will be reset in the order
+ * they were added, using the three-phase Resettable protocol,
+ * so first all objects go through the enter phase, then all objects
+ * go through the hold phase, and then finally all go through the
+ * exit phase.
+ *
+ * It is not permitted to register or unregister reset functions or
+ * resettable objects from within any of the reset phase methods of @obj.
+ *
+ * We assume that the caller holds the BQL.
+ */
+void qemu_register_resettable(Object *obj);
+
+/**
+ * qemu_unregister_resettable: Unregister an object to be reset
+ * @obj: object to unregister
+ *
+ * Remove @obj from the list of objects which are reset when the
+ * simulation is reset. It must have been previously added to
+ * the list via qemu_register_resettable().
+ *
+ * We assume that the caller holds the BQL.
+ */
+void qemu_unregister_resettable(Object *obj);
+
+/**
+ * qemu_register_reset: Register a callback for system reset
+ * @func: function to call
+ * @opaque: opaque data to pass to @func
+ *
+ * Register @func on the list of functions which are called when the
+ * entire system is reset. Functions registered with this API and
+ * Resettable objects registered with qemu_register_resettable() are
+ * handled together, in the order in which they were registered.
+ * Functions registered with this API are called in the 'hold' phase
+ * of the 3-phase reset.
+ *
+ * In general this function should not be used in new code where possible;
+ * for instance, device model reset is better accomplished using the
+ * methods on DeviceState.
+ *
+ * It is not permitted to register or unregister reset functions or
+ * resettable objects from within the @func callback.
+ *
+ * We assume that the caller holds the BQL.
+ */
+void qemu_register_reset(QEMUResetHandler *func, void *opaque);
+
+/**
+ * qemu_register_reset_nosnapshotload: Register a callback for system reset
+ * @func: function to call
+ * @opaque: opaque data to pass to @func
+ *
+ * This is the same as qemu_register_reset(), except that @func is
+ * not called if the reason that the system is being reset is to
+ * put it into a clean state prior to loading a snapshot (i.e. for
+ * SHUTDOWN_CAUSE_SNAPSHOT_LOAD).
+ */
+void qemu_register_reset_nosnapshotload(QEMUResetHandler *func, void *opaque);
+
+/**
+ * qemu_unregister_reset: Unregister a system reset callback
+ * @func: function registered with qemu_register_reset()
+ * @opaque: the same opaque data that was passed to qemu_register_reset()
+ *
+ * Undo the effects of a qemu_register_reset(). The @func and @opaque
+ * must both match the arguments originally used with qemu_register_reset().
+ *
+ * We assume that the caller holds the BQL.
+ */
+void qemu_unregister_reset(QEMUResetHandler *func, void *opaque);
+
+/**
+ * qemu_devices_reset: Perform a complete system reset
+ * @reason: type of the reset
+ *
+ * This function performs the low-level work needed to do a complete reset
+ * of the system (calling all the callbacks registered with
+ * qemu_register_reset() and resetting all the Resettable objects registered
+ * with qemu_register_resettable()). It should only be called by the code in a
+ * MachineClass reset method.
+ *
+ * If you want to trigger a system reset from, for instance, a device
+ * model, don't use this function. Use qemu_system_reset_request().
+ */
+void qemu_devices_reset(ResetType type);
+
+#endif
diff --git a/include/sysemu/rng-random.h b/include/system/rng-random.h
index 0fdc6c6..0fdc6c6 100644
--- a/include/sysemu/rng-random.h
+++ b/include/system/rng-random.h
diff --git a/include/sysemu/rng.h b/include/system/rng.h
index e383f87..e383f87 100644
--- a/include/sysemu/rng.h
+++ b/include/system/rng.h
diff --git a/include/system/rtc.h b/include/system/rtc.h
new file mode 100644
index 0000000..cde83fa
--- /dev/null
+++ b/include/system/rtc.h
@@ -0,0 +1,58 @@
+/*
+ * RTC configuration and clock read
+ *
+ * Copyright (c) 2003-2021 QEMU contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef SYSTEM_RTC_H
+#define SYSTEM_RTC_H
+
+/**
+ * qemu_get_timedate: Get the current RTC time
+ * @tm: struct tm to fill in with RTC time
+ * @offset: offset in seconds to adjust the RTC time by before
+ * converting to struct tm format.
+ *
+ * This function fills in @tm with the current RTC time, as adjusted
+ * by @offset (for example, if @offset is 3600 then the returned time/date
+ * will be one hour further ahead than the current RTC time).
+ *
+ * The usual use is by RTC device models, which should call this function
+ * to find the time/date value that they should return to the guest
+ * when it reads the RTC registers.
+ *
+ * The behaviour of the clock whose value this function returns will
+ * depend on the -rtc command line option passed by the user.
+ */
+void qemu_get_timedate(struct tm *tm, time_t offset);
+
+/**
+ * qemu_timedate_diff: Return difference between a struct tm and the RTC
+ * @tm: struct tm containing the date/time to compare against
+ *
+ * Returns the difference in seconds between the RTC clock time
+ * and the date/time specified in @tm. For example, if @tm specifies
+ * a timestamp one hour further ahead than the current RTC time
+ * then this function will return 3600.
+ */
+time_t qemu_timedate_diff(struct tm *tm);
+
+#endif
diff --git a/include/sysemu/runstate-action.h b/include/system/runstate-action.h
index db4e309..db4e309 100644
--- a/include/sysemu/runstate-action.h
+++ b/include/system/runstate-action.h
diff --git a/include/system/runstate.h b/include/system/runstate.h
new file mode 100644
index 0000000..fdd5c4a
--- /dev/null
+++ b/include/system/runstate.h
@@ -0,0 +1,119 @@
+#ifndef SYSTEM_RUNSTATE_H
+#define SYSTEM_RUNSTATE_H
+
+#include "qapi/qapi-types-run-state.h"
+#include "qemu/notify.h"
+
+bool runstate_check(RunState state);
+void runstate_set(RunState new_state);
+RunState runstate_get(void);
+bool runstate_is_running(void);
+bool runstate_needs_reset(void);
+void runstate_replay_enable(void);
+
+typedef void VMChangeStateHandler(void *opaque, bool running, RunState state);
+typedef int VMChangeStateHandlerWithRet(void *opaque, bool running, RunState state);
+
+VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb,
+ void *opaque);
+VMChangeStateEntry *qemu_add_vm_change_state_handler_prio(
+ VMChangeStateHandler *cb, void *opaque, int priority);
+VMChangeStateEntry *
+qemu_add_vm_change_state_handler_prio_full(VMChangeStateHandler *cb,
+ VMChangeStateHandler *prepare_cb,
+ VMChangeStateHandlerWithRet *cb_ret,
+ void *opaque, int priority);
+VMChangeStateEntry *qdev_add_vm_change_state_handler(DeviceState *dev,
+ VMChangeStateHandler *cb,
+ VMChangeStateHandlerWithRet *cb_ret,
+ void *opaque);
+VMChangeStateEntry *qdev_add_vm_change_state_handler_full(
+ DeviceState *dev, VMChangeStateHandler *cb, VMChangeStateHandler *prepare_cb,
+ VMChangeStateHandlerWithRet *cb_ret, void *opaque);
+void qemu_del_vm_change_state_handler(VMChangeStateEntry *e);
+/**
+ * vm_state_notify: Notify the state of the VM
+ *
+ * @running: whether the VM is running or not.
+ * @state: the #RunState of the VM.
+ *
+ * Return the result of the callback which has return value.
+ * If no callback has return value, still return 0 and the
+ * upper layer should not do additional processing.
+ */
+int vm_state_notify(bool running, RunState state);
+
+static inline bool shutdown_caused_by_guest(ShutdownCause cause)
+{
+ return cause >= SHUTDOWN_CAUSE_GUEST_SHUTDOWN;
+}
+
+/*
+ * In a "live" state, the vcpu clock is ticking, and the runstate notifiers
+ * think we are running.
+ */
+static inline bool runstate_is_live(RunState state)
+{
+ return state == RUN_STATE_RUNNING || state == RUN_STATE_SUSPENDED;
+}
+
+void vm_start(void);
+
+/**
+ * vm_prepare_start: Prepare for starting/resuming the VM
+ *
+ * @step_pending: whether any of the CPUs is about to be single-stepped by gdb
+ */
+int vm_prepare_start(bool step_pending);
+
+/**
+ * vm_resume: If @state is a live state, start the vm and set the state,
+ * else just set the state.
+ *
+ * @state: the state to restore
+ */
+void vm_resume(RunState state);
+
+int vm_stop(RunState state);
+int vm_stop_force_state(RunState state);
+int vm_shutdown(void);
+void vm_set_suspended(bool suspended);
+bool vm_get_suspended(void);
+
+typedef enum WakeupReason {
+ /* Always keep QEMU_WAKEUP_REASON_NONE = 0 */
+ QEMU_WAKEUP_REASON_NONE = 0,
+ QEMU_WAKEUP_REASON_RTC,
+ QEMU_WAKEUP_REASON_PMTIMER,
+ QEMU_WAKEUP_REASON_OTHER,
+} WakeupReason;
+
+void qemu_system_reset_request(ShutdownCause reason);
+void qemu_system_suspend_request(void);
+void qemu_register_suspend_notifier(Notifier *notifier);
+bool qemu_wakeup_suspend_enabled(void);
+void qemu_system_wakeup_request(WakeupReason reason, Error **errp);
+void qemu_system_wakeup_enable(WakeupReason reason, bool enabled);
+void qemu_register_wakeup_notifier(Notifier *notifier);
+void qemu_register_wakeup_support(void);
+void qemu_system_shutdown_request_with_code(ShutdownCause reason,
+ int exit_code);
+void qemu_system_shutdown_request(ShutdownCause reason);
+void qemu_system_powerdown_request(void);
+void qemu_register_powerdown_notifier(Notifier *notifier);
+void qemu_register_shutdown_notifier(Notifier *notifier);
+void qemu_system_debug_request(void);
+void qemu_system_vmstop_request(RunState reason);
+void qemu_system_vmstop_request_prepare(void);
+bool qemu_vmstop_requested(RunState *r);
+ShutdownCause qemu_shutdown_requested_get(void);
+ShutdownCause qemu_reset_requested_get(void);
+void qemu_system_killed(int signal, pid_t pid);
+void qemu_system_reset(ShutdownCause reason);
+void qemu_system_guest_panicked(GuestPanicInformation *info);
+void qemu_system_guest_crashloaded(GuestPanicInformation *info);
+void qemu_system_guest_pvshutdown(void);
+bool qemu_system_dump_in_progress(void);
+
+#endif
+
diff --git a/include/sysemu/seccomp.h b/include/system/seccomp.h
index fe85989..fe85989 100644
--- a/include/sysemu/seccomp.h
+++ b/include/system/seccomp.h
diff --git a/include/system/spdm-socket.h b/include/system/spdm-socket.h
new file mode 100644
index 0000000..5d8bd9a
--- /dev/null
+++ b/include/system/spdm-socket.h
@@ -0,0 +1,74 @@
+/*
+ * QEMU SPDM socket support
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef SPDM_REQUESTER_H
+#define SPDM_REQUESTER_H
+
+/**
+ * spdm_socket_connect: connect to an external SPDM socket
+ * @port: port to connect to
+ * @errp: error object handle
+ *
+ * This will connect to an external SPDM socket server. On error
+ * it will return -1 and errp will be set. On success this function
+ * will return the socket number.
+ */
+int spdm_socket_connect(uint16_t port, Error **errp);
+
+/**
+ * spdm_socket_rsp: send and receive a message to a SPDM server
+ * @socket: socket returned from spdm_socket_connect()
+ * @transport_type: SPDM_SOCKET_TRANSPORT_TYPE_* macro
+ * @req: request buffer
+ * @req_len: request buffer length
+ * @rsp: response buffer
+ * @rsp_len: response buffer length
+ *
+ * Send platform data to a SPDM server on socket and then receive
+ * a response.
+ */
+uint32_t spdm_socket_rsp(const int socket, uint32_t transport_type,
+ void *req, uint32_t req_len,
+ void *rsp, uint32_t rsp_len);
+
+/**
+ * spdm_socket_close: send a shutdown command to the server
+ * @socket: socket returned from spdm_socket_connect()
+ * @transport_type: SPDM_SOCKET_TRANSPORT_TYPE_* macro
+ *
+ * This will issue a shutdown command to the server.
+ */
+void spdm_socket_close(const int socket, uint32_t transport_type);
+
+#define SPDM_SOCKET_COMMAND_NORMAL 0x0001
+#define SPDM_SOCKET_COMMAND_OOB_ENCAP_KEY_UPDATE 0x8001
+#define SPDM_SOCKET_COMMAND_CONTINUE 0xFFFD
+#define SPDM_SOCKET_COMMAND_SHUTDOWN 0xFFFE
+#define SPDM_SOCKET_COMMAND_UNKOWN 0xFFFF
+#define SPDM_SOCKET_COMMAND_TEST 0xDEAD
+
+#define SPDM_SOCKET_TRANSPORT_TYPE_MCTP 0x01
+#define SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE 0x02
+
+#define SPDM_SOCKET_MAX_MESSAGE_BUFFER_SIZE 0x1200
+
+#endif
diff --git a/include/sysemu/stats.h b/include/system/stats.h
index 42c236c..42c236c 100644
--- a/include/sysemu/stats.h
+++ b/include/system/stats.h
diff --git a/include/system/system.h b/include/system/system.h
new file mode 100644
index 0000000..a7effe7
--- /dev/null
+++ b/include/system/system.h
@@ -0,0 +1,125 @@
+#ifndef SYSTEM_H
+#define SYSTEM_H
+/* Misc. things related to the system emulator. */
+
+#include "qemu/timer.h"
+#include "qemu/notify.h"
+#include "qemu/uuid.h"
+
+/* vl.c */
+
+extern int only_migratable;
+extern const char *qemu_name;
+extern QemuUUID qemu_uuid;
+extern bool qemu_uuid_set;
+
+const char *qemu_get_vm_name(void);
+
+/* Exit notifiers will run with BQL held. */
+void qemu_add_exit_notifier(Notifier *notify);
+void qemu_remove_exit_notifier(Notifier *notify);
+
+void qemu_add_machine_init_done_notifier(Notifier *notify);
+void qemu_remove_machine_init_done_notifier(Notifier *notify);
+
+void configure_rtc(QemuOpts *opts);
+
+void qemu_init_subsystems(void);
+
+extern int autostart;
+
+typedef enum {
+ VGA_NONE, VGA_STD, VGA_CIRRUS, VGA_VMWARE, VGA_XENFB, VGA_QXL,
+ VGA_TCX, VGA_CG3, VGA_DEVICE, VGA_VIRTIO,
+ VGA_TYPE_MAX,
+} VGAInterfaceType;
+
+extern int vga_interface_type;
+extern bool vga_interface_created;
+
+extern int graphic_width;
+extern int graphic_height;
+extern int graphic_depth;
+extern int display_opengl;
+extern const char *keyboard_layout;
+extern int old_param;
+extern uint8_t *boot_splash_filedata;
+extern bool enable_cpu_pm;
+extern QEMUClockType rtc_clock;
+
+typedef enum {
+ MLOCK_OFF = 0,
+ MLOCK_ON,
+ MLOCK_ON_FAULT,
+} MlockState;
+
+bool should_mlock(MlockState);
+bool is_mlock_on_fault(MlockState);
+
+extern MlockState mlock_state;
+
+#define MAX_OPTION_ROMS 16
+typedef struct QEMUOptionRom {
+ const char *name;
+ int32_t bootindex;
+} QEMUOptionRom;
+extern QEMUOptionRom option_rom[MAX_OPTION_ROMS];
+extern int nb_option_roms;
+
+#define MAX_PROM_ENVS 128
+extern const char *prom_envs[MAX_PROM_ENVS];
+extern unsigned int nb_prom_envs;
+
+/* serial ports */
+
+/* Return the Chardev for serial port i, or NULL if none */
+Chardev *serial_hd(int i);
+
+/* parallel ports */
+
+#define MAX_PARALLEL_PORTS 3
+
+extern Chardev *parallel_hds[MAX_PARALLEL_PORTS];
+
+void add_boot_device_path(int32_t bootindex, DeviceState *dev,
+ const char *suffix);
+char *get_boot_devices_list(size_t *size);
+
+DeviceState *get_boot_device(uint32_t position);
+void check_boot_index(int32_t bootindex, Error **errp);
+void del_boot_device_path(DeviceState *dev, const char *suffix);
+void device_add_bootindex_property(Object *obj, int32_t *bootindex,
+ const char *name, const char *suffix,
+ DeviceState *dev);
+void restore_boot_order(void *opaque);
+void validate_bootdevices(const char *devices, Error **errp);
+void add_boot_device_lchs(DeviceState *dev, const char *suffix,
+ uint32_t lcyls, uint32_t lheads, uint32_t lsecs);
+void del_boot_device_lchs(DeviceState *dev, const char *suffix);
+char *get_boot_devices_lchs_list(size_t *size);
+
+/* handler to set the boot_device order for a specific type of MachineClass */
+typedef void QEMUBootSetHandler(void *opaque, const char *boot_order,
+ Error **errp);
+void qemu_register_boot_set(QEMUBootSetHandler *func, void *opaque);
+void qemu_boot_set(const char *boot_order, Error **errp);
+
+bool defaults_enabled(void);
+
+void qemu_init(int argc, char **argv);
+int qemu_main_loop(void);
+void qemu_cleanup(int);
+
+extern QemuOptsList qemu_legacy_drive_opts;
+extern QemuOptsList qemu_common_drive_opts;
+extern QemuOptsList qemu_drive_opts;
+extern QemuOptsList bdrv_runtime_opts;
+extern QemuOptsList qemu_chardev_opts;
+extern QemuOptsList qemu_device_opts;
+extern QemuOptsList qemu_netdev_opts;
+extern QemuOptsList qemu_nic_opts;
+extern QemuOptsList qemu_net_opts;
+extern QemuOptsList qemu_global_opts;
+extern QemuOptsList qemu_semihosting_config_opts;
+
+#endif
diff --git a/include/system/tcg.h b/include/system/tcg.h
new file mode 100644
index 0000000..7622dce
--- /dev/null
+++ b/include/system/tcg.h
@@ -0,0 +1,28 @@
+/*
+ * QEMU TCG support
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+/* header to be included in non-TCG-specific code */
+
+#ifndef SYSTEM_TCG_H
+#define SYSTEM_TCG_H
+
+#ifdef CONFIG_TCG
+extern bool tcg_allowed;
+#define tcg_enabled() (tcg_allowed)
+#else
+#define tcg_enabled() 0
+#endif
+
+/**
+ * qemu_tcg_mttcg_enabled:
+ * Check whether we are running MultiThread TCG or not.
+ *
+ * Returns: %true if we are in MTTCG mode %false otherwise.
+ */
+bool qemu_tcg_mttcg_enabled(void);
+
+#endif
diff --git a/include/sysemu/tpm.h b/include/system/tpm.h
index 1ee568b..1ee568b 100644
--- a/include/sysemu/tpm.h
+++ b/include/system/tpm.h
diff --git a/include/system/tpm_backend.h b/include/system/tpm_backend.h
new file mode 100644
index 0000000..01b11f6
--- /dev/null
+++ b/include/system/tpm_backend.h
@@ -0,0 +1,216 @@
+/*
+ * QEMU TPM Backend
+ *
+ * Copyright IBM, Corp. 2013
+ *
+ * Authors:
+ * Stefan Berger <stefanb@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef TPM_BACKEND_H
+#define TPM_BACKEND_H
+
+#include "qom/object.h"
+#include "qemu/option.h"
+#include "system/tpm.h"
+#include "qapi/error.h"
+
+#ifdef CONFIG_TPM
+
+#define TYPE_TPM_BACKEND "tpm-backend"
+OBJECT_DECLARE_TYPE(TPMBackend, TPMBackendClass,
+ TPM_BACKEND)
+
+
+typedef struct TPMBackendCmd {
+ uint8_t locty;
+ const uint8_t *in;
+ uint32_t in_len;
+ uint8_t *out;
+ uint32_t out_len;
+ bool selftest_done;
+} TPMBackendCmd;
+
+struct TPMBackend {
+ Object parent;
+
+ /*< protected >*/
+ TPMIf *tpmif;
+ bool opened;
+ bool had_startup_error;
+ TPMBackendCmd *cmd;
+
+ /* <public> */
+ char *id;
+
+ QLIST_ENTRY(TPMBackend) list;
+};
+
+struct TPMBackendClass {
+ ObjectClass parent_class;
+
+ enum TpmType type;
+ const QemuOptDesc *opts;
+ /* get a descriptive text of the backend to display to the user */
+ const char *desc;
+
+ TPMBackend *(*create)(QemuOpts *opts);
+
+ /* start up the TPM on the backend - optional */
+ int (*startup_tpm)(TPMBackend *t, size_t buffersize);
+
+ /* optional */
+ void (*reset)(TPMBackend *t);
+
+ void (*cancel_cmd)(TPMBackend *t);
+
+ /* optional */
+ bool (*get_tpm_established_flag)(TPMBackend *t);
+
+ /* optional */
+ int (*reset_tpm_established_flag)(TPMBackend *t, uint8_t locty);
+
+ TPMVersion (*get_tpm_version)(TPMBackend *t);
+
+ size_t (*get_buffer_size)(TPMBackend *t);
+
+ TpmTypeOptions *(*get_tpm_options)(TPMBackend *t);
+
+ void (*handle_request)(TPMBackend *s, TPMBackendCmd *cmd, Error **errp);
+};
+
+/**
+ * tpm_backend_get_type:
+ * @s: the backend
+ *
+ * Returns the TpmType of the backend.
+ */
+enum TpmType tpm_backend_get_type(TPMBackend *s);
+
+/**
+ * tpm_backend_init:
+ * @s: the backend to initialized
+ * @tpmif: TPM interface
+ * @datacb: callback for sending data to frontend
+ * @errp: a pointer to return the #Error object if an error occurs.
+ *
+ * Initialize the backend with the given variables.
+ *
+ * Returns 0 on success.
+ */
+int tpm_backend_init(TPMBackend *s, TPMIf *tpmif, Error **errp);
+
+/**
+ * tpm_backend_startup_tpm:
+ * @s: the backend whose TPM support is to be started
+ * @buffersize: the buffer size the TPM is supposed to use,
+ * 0 to leave it as-is
+ *
+ * Returns 0 on success.
+ */
+int tpm_backend_startup_tpm(TPMBackend *s, size_t buffersize);
+
+/**
+ * tpm_backend_had_startup_error:
+ * @s: the backend to query for a startup error
+ *
+ * Check whether the backend had an error during startup. Returns
+ * false if no error occurred and the backend can be used, true
+ * otherwise.
+ */
+bool tpm_backend_had_startup_error(TPMBackend *s);
+
+/**
+ * tpm_backend_deliver_request:
+ * @s: the backend to send the request to
+ * @cmd: the command to deliver
+ *
+ * Send a request to the backend. The backend will then send the request
+ * to the TPM implementation.
+ */
+void tpm_backend_deliver_request(TPMBackend *s, TPMBackendCmd *cmd);
+
+/**
+ * tpm_backend_reset:
+ * @s: the backend to reset
+ *
+ * Reset the backend into a well defined state with all previous errors
+ * reset.
+ */
+void tpm_backend_reset(TPMBackend *s);
+
+/**
+ * tpm_backend_cancel_cmd:
+ * @s: the backend
+ *
+ * Cancel any ongoing command being processed by the TPM implementation
+ * on behalf of the QEMU guest.
+ */
+void tpm_backend_cancel_cmd(TPMBackend *s);
+
+/**
+ * tpm_backend_get_tpm_established_flag:
+ * @s: the backend
+ *
+ * Get the TPM establishment flag. This function may be called very
+ * frequently by the frontend since for example in the TIS implementation
+ * this flag is part of a register.
+ */
+bool tpm_backend_get_tpm_established_flag(TPMBackend *s);
+
+/**
+ * tpm_backend_reset_tpm_established_flag:
+ * @s: the backend
+ * @locty: the locality number
+ *
+ * Reset the TPM establishment flag.
+ */
+int tpm_backend_reset_tpm_established_flag(TPMBackend *s, uint8_t locty);
+
+/**
+ * tpm_backend_get_tpm_version:
+ * @s: the backend to call into
+ *
+ * Get the TPM Version that is emulated at the backend.
+ *
+ * Returns TPMVersion.
+ */
+TPMVersion tpm_backend_get_tpm_version(TPMBackend *s);
+
+/**
+ * tpm_backend_get_buffer_size:
+ * @s: the backend to call into
+ *
+ * Get the TPM's buffer size.
+ *
+ * Returns buffer size.
+ */
+size_t tpm_backend_get_buffer_size(TPMBackend *s);
+
+/**
+ * tpm_backend_finish_sync:
+ * @s: the backend to call into
+ *
+ * Finish the pending command synchronously (this will call aio_poll()
+ * on qemu main AIOContext until it ends)
+ */
+void tpm_backend_finish_sync(TPMBackend *s);
+
+/**
+ * tpm_backend_query_tpm:
+ * @s: the backend
+ *
+ * Query backend tpm info
+ *
+ * Returns newly allocated TPMInfo
+ */
+TPMInfo *tpm_backend_query_tpm(TPMBackend *s);
+
+TPMBackend *qemu_find_tpm_be(const char *id);
+
+#endif /* CONFIG_TPM */
+
+#endif /* TPM_BACKEND_H */
diff --git a/include/system/tpm_util.h b/include/system/tpm_util.h
new file mode 100644
index 0000000..1858693
--- /dev/null
+++ b/include/system/tpm_util.h
@@ -0,0 +1,72 @@
+/*
+ * TPM utility functions
+ *
+ * Copyright (c) 2010 - 2015 IBM Corporation
+ * Authors:
+ * Stefan Berger <stefanb@us.ibm.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+
+#ifndef SYSTEM_TPM_UTIL_H
+#define SYSTEM_TPM_UTIL_H
+
+#include "system/tpm.h"
+#include "qemu/bswap.h"
+
+void tpm_util_write_fatal_error_response(uint8_t *out, uint32_t out_len);
+
+bool tpm_util_is_selftest(const uint8_t *in, uint32_t in_len);
+
+int tpm_util_test_tpmdev(int tpm_fd, TPMVersion *tpm_version);
+
+static inline uint16_t tpm_cmd_get_tag(const void *b)
+{
+ return lduw_be_p(b);
+}
+
+static inline void tpm_cmd_set_tag(void *b, uint16_t tag)
+{
+ stw_be_p(b, tag);
+}
+
+static inline uint32_t tpm_cmd_get_size(const void *b)
+{
+ return ldl_be_p(b + 2);
+}
+
+static inline void tpm_cmd_set_size(void *b, uint32_t size)
+{
+ stl_be_p(b + 2, size);
+}
+
+static inline uint32_t tpm_cmd_get_ordinal(const void *b)
+{
+ return ldl_be_p(b + 6);
+}
+
+static inline uint32_t tpm_cmd_get_errcode(const void *b)
+{
+ return ldl_be_p(b + 6);
+}
+
+static inline void tpm_cmd_set_error(void *b, uint32_t error)
+{
+ stl_be_p(b + 6, error);
+}
+
+void tpm_util_show_buffer(const unsigned char *buffer,
+ size_t buffer_size, const char *string);
+
+#endif /* SYSTEM_TPM_UTIL_H */
diff --git a/include/system/vhost-user-backend.h b/include/system/vhost-user-backend.h
new file mode 100644
index 0000000..5634ebd
--- /dev/null
+++ b/include/system/vhost-user-backend.h
@@ -0,0 +1,48 @@
+/*
+ * QEMU vhost-user backend
+ *
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Marc-AndrƩ Lureau <marcandre.lureau@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_VHOST_USER_BACKEND_H
+#define QEMU_VHOST_USER_BACKEND_H
+
+#include "qom/object.h"
+#include "system/memory.h"
+#include "qemu/option.h"
+#include "qemu/bitmap.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+#include "chardev/char-fe.h"
+#include "io/channel.h"
+
+#define TYPE_VHOST_USER_BACKEND "vhost-user-backend"
+OBJECT_DECLARE_SIMPLE_TYPE(VhostUserBackend,
+ VHOST_USER_BACKEND)
+
+
+
+struct VhostUserBackend {
+ /* private */
+ Object parent;
+
+ char *chr_name;
+ CharBackend chr;
+ VhostUserState vhost_user;
+ struct vhost_dev dev;
+ VirtIODevice *vdev;
+ bool started;
+ bool completed;
+};
+
+int vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev,
+ unsigned nvqs, Error **errp);
+void vhost_user_backend_start(VhostUserBackend *b);
+int vhost_user_backend_stop(VhostUserBackend *b);
+
+#endif
diff --git a/include/sysemu/watchdog.h b/include/system/watchdog.h
index 745c89b..745c89b 100644
--- a/include/sysemu/watchdog.h
+++ b/include/system/watchdog.h
diff --git a/include/sysemu/whpx.h b/include/system/whpx.h
index 00ff409..00ff409 100644
--- a/include/sysemu/whpx.h
+++ b/include/system/whpx.h
diff --git a/include/system/xen-mapcache.h b/include/system/xen-mapcache.h
new file mode 100644
index 0000000..bb454a7
--- /dev/null
+++ b/include/system/xen-mapcache.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2011 Citrix Ltd.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef XEN_MAPCACHE_H
+#define XEN_MAPCACHE_H
+
+#include "exec/cpu-common.h"
+#include "system/xen.h"
+
+typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr phys_offset,
+ ram_addr_t size);
+void xen_map_cache_init(phys_offset_to_gaddr_t f,
+ void *opaque);
+uint8_t *xen_map_cache(MemoryRegion *mr, hwaddr phys_addr, hwaddr size,
+ ram_addr_t ram_addr_offset,
+ uint8_t lock, bool dma,
+ bool is_write);
+ram_addr_t xen_ram_addr_from_mapcache(void *ptr);
+void xen_invalidate_map_cache_entry(uint8_t *buffer);
+void xen_invalidate_map_cache(void);
+uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr,
+ hwaddr new_phys_addr,
+ hwaddr size);
+
+#endif /* XEN_MAPCACHE_H */
diff --git a/include/system/xen.h b/include/system/xen.h
new file mode 100644
index 0000000..c2f283d
--- /dev/null
+++ b/include/system/xen.h
@@ -0,0 +1,35 @@
+/*
+ * QEMU Xen support
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+/* header to be included in non-Xen-specific code */
+
+#ifndef SYSTEM_XEN_H
+#define SYSTEM_XEN_H
+
+#include "exec/cpu-common.h"
+
+#ifdef COMPILING_PER_TARGET
+# ifdef CONFIG_XEN
+# define CONFIG_XEN_IS_POSSIBLE
+# endif
+#else
+# define CONFIG_XEN_IS_POSSIBLE
+#endif /* COMPILING_PER_TARGET */
+
+#ifdef CONFIG_XEN_IS_POSSIBLE
+extern bool xen_allowed;
+#define xen_enabled() (xen_allowed)
+#else /* !CONFIG_XEN_IS_POSSIBLE */
+#define xen_enabled() 0
+#endif /* CONFIG_XEN_IS_POSSIBLE */
+
+void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length);
+void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
+ struct MemoryRegion *mr, Error **errp);
+bool xen_mr_is_memory(MemoryRegion *mr);
+bool xen_mr_is_grants(MemoryRegion *mr);
+#endif
diff --git a/include/tcg/insn-start-words.h b/include/tcg/insn-start-words.h
index 50c18bd..c52aec5 100644
--- a/include/tcg/insn-start-words.h
+++ b/include/tcg/insn-start-words.h
@@ -1,17 +1,12 @@
/* SPDX-License-Identifier: MIT */
/*
- * Define TARGET_INSN_START_WORDS
+ * Define INSN_START_WORDS
* Copyright (c) 2008 Fabrice Bellard
*/
-#ifndef TARGET_INSN_START_WORDS
+#ifndef TCG_INSN_START_WORDS
+#define TCG_INSN_START_WORDS
-#include "cpu.h"
+#define INSN_START_WORDS 3
-#ifndef TARGET_INSN_START_EXTRA_WORDS
-# define TARGET_INSN_START_WORDS 1
-#else
-# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
-#endif
-
-#endif /* TARGET_INSN_START_WORDS */
+#endif /* TCG_INSN_START_WORDS */
diff --git a/include/tcg/oversized-guest.h b/include/tcg/oversized-guest.h
deleted file mode 100644
index 641b974..0000000
--- a/include/tcg/oversized-guest.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Define TCG_OVERSIZED_GUEST
- * Copyright (c) 2008 Fabrice Bellard
- */
-
-#ifndef EXEC_TCG_OVERSIZED_GUEST_H
-#define EXEC_TCG_OVERSIZED_GUEST_H
-
-#include "tcg-target-reg-bits.h"
-#include "cpu-param.h"
-
-/*
- * Oversized TCG guests make things like MTTCG hard
- * as we can't use atomics for cputlb updates.
- */
-#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
-#define TCG_OVERSIZED_GUEST 1
-#else
-#define TCG_OVERSIZED_GUEST 0
-#endif
-
-#endif
diff --git a/include/tcg/startup.h b/include/tcg/startup.h
index f713057..95f574a 100644
--- a/include/tcg/startup.h
+++ b/include/tcg/startup.h
@@ -29,12 +29,12 @@
* tcg_init: Initialize the TCG runtime
* @tb_size: translation buffer size
* @splitwx: use separate rw and rx mappings
- * @max_cpus: number of vcpus in system mode
+ * @max_threads: number of vcpu threads in system mode
*
* Allocate and initialize TCG resources, especially the JIT buffer.
- * In user-only mode, @max_cpus is unused.
+ * In user-only mode, @max_threads is unused.
*/
-void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus);
+void tcg_init(size_t tb_size, int splitwx, unsigned max_threads);
/**
* tcg_register_thread: Register this thread with the TCG runtime
diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h
index 009e277..e1071ad 100644
--- a/include/tcg/tcg-op-common.h
+++ b/include/tcg/tcg-op-common.h
@@ -14,6 +14,7 @@
TCGv_i32 tcg_constant_i32(int32_t val);
TCGv_i64 tcg_constant_i64(int64_t val);
+TCGv_vaddr tcg_constant_vaddr(uintptr_t val);
TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val);
TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val);
@@ -135,6 +136,8 @@ void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
+void tcg_gen_addcio_i32(TCGv_i32 r, TCGv_i32 co,
+ TCGv_i32 a, TCGv_i32 b, TCGv_i32 ci);
void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
@@ -238,6 +241,8 @@ void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
+void tcg_gen_addcio_i64(TCGv_i64 r, TCGv_i64 co,
+ TCGv_i64 a, TCGv_i64 b, TCGv_i64 ci);
void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
diff --git a/include/tcg/tcg-op-gvec-common.h b/include/tcg/tcg-op-gvec-common.h
index 65553f5..ea0c87f 100644
--- a/include/tcg/tcg-op-gvec-common.h
+++ b/include/tcg/tcg-op-gvec-common.h
@@ -227,25 +227,66 @@ typedef struct {
bool prefer_i64;
} GVecGen4i;
+/* Expand (dbase+dofs) = op(abase+aofs), length @oprsz, clearing to @maxsz. */
+void tcg_gen_gvec_2_var(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen2 *op);
+/* Similarly, expand (env+dofs) = op(env+aofs). */
void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz, const GVecGen2 *);
+ uint32_t oprsz, uint32_t maxsz, const GVecGen2 *op);
+/* Similarly, expand (env+dofs) = op(env+aofs, c). */
void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- uint32_t maxsz, int64_t c, const GVecGen2i *);
+ uint32_t maxsz, int64_t c, const GVecGen2i *op);
+/* Similarly, expand (env+dofs) = op(env+aofs, s). */
void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- uint32_t maxsz, TCGv_i64 c, const GVecGen2s *);
+ uint32_t maxsz, TCGv_i64 c, const GVecGen2s *op);
+
+/*
+ * Expand (dbase+dofs) = op(abase+aofs, bbase+bofs),
+ * length @oprsz, clearing to @maxsz.
+ */
+void tcg_gen_gvec_3_var(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen3 *op);
+/* Similarly, expand (env+dofs) = op(env+aofs, env+bofs). */
void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, uint32_t maxsz, const GVecGen3 *);
+ uint32_t oprsz, uint32_t maxsz, const GVecGen3 *op);
+
+/*
+ * Depending on op->load_dest and op->write_aofs, expand
+ * (env+dofs) = op(env+aofs, env+bofs, c)
+ * or
+ * (env+dofs) = op(env+dofs, env+aofs, env+bofs, c)
+ * or
+ * (env+dofs), (env+aofs) = op(env+aofs, env+bofs, c)
+ * or
+ * (env+dofs), (env+aofs) = op(env+dofs, env+aofs, env+bofs, c)
+ */
void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t oprsz, uint32_t maxsz, int64_t c,
- const GVecGen3i *);
+ const GVecGen3i *op);
+
+/*
+ * Depending on op->write_aofs, expand
+ * (env+dofs) = op(env+aofs, env+bofs, env+cofs)
+ * or
+ * (env+dofs), (env+aofs) = op(env+aofs, env+bofs, env+cofs)
+ */
void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
uint32_t oprsz, uint32_t maxsz, const GVecGen4 *);
+
+/* Expand (env+dofs) = op(env+aofs, env+bofs, env+cofs, c). */
void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
uint32_t oprsz, uint32_t maxsz, int64_t c,
const GVecGen4i *);
/* Expand a specific vector operation. */
+void tcg_gen_gvec_mov_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz);
+
void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t oprsz, uint32_t maxsz);
void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
@@ -255,6 +296,15 @@ void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs,
void tcg_gen_gvec_abs(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_add_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sub_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz);
+
void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
@@ -336,6 +386,9 @@ void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t s,
void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t s,
uint32_t m, TCGv_i64);
+void tcg_gen_gvec_dup_imm_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ uint32_t oprsz, uint32_t maxsz, uint64_t imm);
+
void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t shift, uint32_t oprsz, uint32_t maxsz);
void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
index a028505..c912578 100644
--- a/include/tcg/tcg-op.h
+++ b/include/tcg/tcg-op.h
@@ -9,6 +9,8 @@
#define TCG_TCG_OP_H
#include "tcg/tcg-op-common.h"
+#include "tcg/insn-start-words.h"
+#include "exec/target_long.h"
#ifndef TARGET_LONG_BITS
#error must include QEMU headers
@@ -22,24 +24,34 @@
# error
#endif
-#ifndef TARGET_INSN_START_EXTRA_WORDS
+#if INSN_START_WORDS != 3
+# error Mismatch with insn-start-words.h
+#endif
+
+#if TARGET_INSN_START_EXTRA_WORDS == 0
static inline void tcg_gen_insn_start(target_ulong pc)
{
- TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 64 / TCG_TARGET_REG_BITS);
+ TCGOp *op = tcg_emit_op(INDEX_op_insn_start,
+ INSN_START_WORDS * 64 / TCG_TARGET_REG_BITS);
tcg_set_insn_start_param(op, 0, pc);
+ tcg_set_insn_start_param(op, 1, 0);
+ tcg_set_insn_start_param(op, 2, 0);
}
#elif TARGET_INSN_START_EXTRA_WORDS == 1
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1)
{
- TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 2 * 64 / TCG_TARGET_REG_BITS);
+ TCGOp *op = tcg_emit_op(INDEX_op_insn_start,
+ INSN_START_WORDS * 64 / TCG_TARGET_REG_BITS);
tcg_set_insn_start_param(op, 0, pc);
tcg_set_insn_start_param(op, 1, a1);
+ tcg_set_insn_start_param(op, 2, 0);
}
#elif TARGET_INSN_START_EXTRA_WORDS == 2
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
target_ulong a2)
{
- TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 3 * 64 / TCG_TARGET_REG_BITS);
+ TCGOp *op = tcg_emit_op(INDEX_op_insn_start,
+ INSN_START_WORDS * 64 / TCG_TARGET_REG_BITS);
tcg_set_insn_start_param(op, 0, pc);
tcg_set_insn_start_param(op, 1, a1);
tcg_set_insn_start_param(op, 2, a2);
@@ -252,6 +264,7 @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64)
#define tcg_gen_movcond_tl tcg_gen_movcond_i64
#define tcg_gen_add2_tl tcg_gen_add2_i64
#define tcg_gen_sub2_tl tcg_gen_sub2_i64
+#define tcg_gen_addcio_tl tcg_gen_addcio_i64
#define tcg_gen_mulu2_tl tcg_gen_mulu2_i64
#define tcg_gen_muls2_tl tcg_gen_muls2_i64
#define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i64
@@ -370,6 +383,7 @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64)
#define tcg_gen_movcond_tl tcg_gen_movcond_i32
#define tcg_gen_add2_tl tcg_gen_add2_i32
#define tcg_gen_sub2_tl tcg_gen_sub2_i32
+#define tcg_gen_addcio_tl tcg_gen_addcio_i32
#define tcg_gen_mulu2_tl tcg_gen_mulu2_i32
#define tcg_gen_muls2_tl tcg_gen_muls2_i32
#define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i32
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
index 546eb49..e988edd 100644
--- a/include/tcg/tcg-opc.h
+++ b/include/tcg/tcg-opc.h
@@ -33,286 +33,160 @@ DEF(set_label, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
/* variable number of parameters */
DEF(call, 0, 0, 3, TCG_OPF_CALL_CLOBBER | TCG_OPF_NOT_PRESENT)
-DEF(br, 0, 0, 1, TCG_OPF_BB_END)
-
-#define IMPL(X) (__builtin_constant_p(X) && (X) <= 0 ? TCG_OPF_NOT_PRESENT : 0)
-#if TCG_TARGET_REG_BITS == 32
-# define IMPL64 TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT
-#else
-# define IMPL64 TCG_OPF_64BIT
-#endif
-
-DEF(mb, 0, 0, 1, 0)
-
-DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT)
-DEF(setcond_i32, 1, 2, 1, 0)
-DEF(negsetcond_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_negsetcond_i32))
-DEF(movcond_i32, 1, 4, 1, 0)
-/* load/store */
-DEF(ld8u_i32, 1, 1, 1, 0)
-DEF(ld8s_i32, 1, 1, 1, 0)
-DEF(ld16u_i32, 1, 1, 1, 0)
-DEF(ld16s_i32, 1, 1, 1, 0)
-DEF(ld_i32, 1, 1, 1, 0)
-DEF(st8_i32, 0, 2, 1, 0)
-DEF(st16_i32, 0, 2, 1, 0)
-DEF(st_i32, 0, 2, 1, 0)
-/* arith */
-DEF(add_i32, 1, 2, 0, 0)
-DEF(sub_i32, 1, 2, 0, 0)
-DEF(mul_i32, 1, 2, 0, 0)
-DEF(div_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32))
-DEF(divu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32))
-DEF(rem_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32))
-DEF(remu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32))
-DEF(div2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32))
-DEF(divu2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32))
-DEF(and_i32, 1, 2, 0, 0)
-DEF(or_i32, 1, 2, 0, 0)
-DEF(xor_i32, 1, 2, 0, 0)
-/* shifts/rotates */
-DEF(shl_i32, 1, 2, 0, 0)
-DEF(shr_i32, 1, 2, 0, 0)
-DEF(sar_i32, 1, 2, 0, 0)
-DEF(rotl_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32))
-DEF(rotr_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32))
-DEF(deposit_i32, 1, 2, 2, IMPL(TCG_TARGET_HAS_deposit_i32))
-DEF(extract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_extract_i32))
-DEF(sextract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_sextract_i32))
-DEF(extract2_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_extract2_i32))
-
-DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
-
-DEF(add2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_add2_i32))
-DEF(sub2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_sub2_i32))
-DEF(mulu2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_mulu2_i32))
-DEF(muls2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_muls2_i32))
-DEF(muluh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i32))
-DEF(mulsh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i32))
-DEF(brcond2_i32, 0, 4, 2,
- TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL(TCG_TARGET_REG_BITS == 32))
-DEF(setcond2_i32, 1, 4, 1, IMPL(TCG_TARGET_REG_BITS == 32))
-
-DEF(ext8s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8s_i32))
-DEF(ext16s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16s_i32))
-DEF(ext8u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8u_i32))
-DEF(ext16u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16u_i32))
-DEF(bswap16_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap16_i32))
-DEF(bswap32_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap32_i32))
-DEF(not_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_not_i32))
-DEF(neg_i32, 1, 1, 0, 0)
-DEF(andc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_andc_i32))
-DEF(orc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_orc_i32))
-DEF(eqv_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_eqv_i32))
-DEF(nand_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nand_i32))
-DEF(nor_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nor_i32))
-DEF(clz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_clz_i32))
-DEF(ctz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_ctz_i32))
-DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32))
-
-DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
-DEF(setcond_i64, 1, 2, 1, IMPL64)
-DEF(negsetcond_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_negsetcond_i64))
-DEF(movcond_i64, 1, 4, 1, IMPL64)
-/* load/store */
-DEF(ld8u_i64, 1, 1, 1, IMPL64)
-DEF(ld8s_i64, 1, 1, 1, IMPL64)
-DEF(ld16u_i64, 1, 1, 1, IMPL64)
-DEF(ld16s_i64, 1, 1, 1, IMPL64)
-DEF(ld32u_i64, 1, 1, 1, IMPL64)
-DEF(ld32s_i64, 1, 1, 1, IMPL64)
-DEF(ld_i64, 1, 1, 1, IMPL64)
-DEF(st8_i64, 0, 2, 1, IMPL64)
-DEF(st16_i64, 0, 2, 1, IMPL64)
-DEF(st32_i64, 0, 2, 1, IMPL64)
-DEF(st_i64, 0, 2, 1, IMPL64)
-/* arith */
-DEF(add_i64, 1, 2, 0, IMPL64)
-DEF(sub_i64, 1, 2, 0, IMPL64)
-DEF(mul_i64, 1, 2, 0, IMPL64)
-DEF(div_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64))
-DEF(divu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64))
-DEF(rem_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64))
-DEF(remu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64))
-DEF(div2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64))
-DEF(divu2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64))
-DEF(and_i64, 1, 2, 0, IMPL64)
-DEF(or_i64, 1, 2, 0, IMPL64)
-DEF(xor_i64, 1, 2, 0, IMPL64)
-/* shifts/rotates */
-DEF(shl_i64, 1, 2, 0, IMPL64)
-DEF(shr_i64, 1, 2, 0, IMPL64)
-DEF(sar_i64, 1, 2, 0, IMPL64)
-DEF(rotl_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
-DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
-DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64))
-DEF(extract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_extract_i64))
-DEF(sextract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_sextract_i64))
-DEF(extract2_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_extract2_i64))
+DEF(br, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
+DEF(brcond, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | TCG_OPF_INT)
+
+DEF(mb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
+
+DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
+
+DEF(add, 1, 2, 0, TCG_OPF_INT)
+DEF(and, 1, 2, 0, TCG_OPF_INT)
+DEF(andc, 1, 2, 0, TCG_OPF_INT)
+DEF(bswap16, 1, 1, 1, TCG_OPF_INT)
+DEF(bswap32, 1, 1, 1, TCG_OPF_INT)
+DEF(bswap64, 1, 1, 1, TCG_OPF_INT)
+DEF(clz, 1, 2, 0, TCG_OPF_INT)
+DEF(ctpop, 1, 1, 0, TCG_OPF_INT)
+DEF(ctz, 1, 2, 0, TCG_OPF_INT)
+DEF(deposit, 1, 2, 2, TCG_OPF_INT)
+DEF(divs, 1, 2, 0, TCG_OPF_INT)
+DEF(divs2, 2, 3, 0, TCG_OPF_INT)
+DEF(divu, 1, 2, 0, TCG_OPF_INT)
+DEF(divu2, 2, 3, 0, TCG_OPF_INT)
+DEF(eqv, 1, 2, 0, TCG_OPF_INT)
+DEF(extract, 1, 1, 2, TCG_OPF_INT)
+DEF(extract2, 1, 2, 1, TCG_OPF_INT)
+DEF(ld8u, 1, 1, 1, TCG_OPF_INT)
+DEF(ld8s, 1, 1, 1, TCG_OPF_INT)
+DEF(ld16u, 1, 1, 1, TCG_OPF_INT)
+DEF(ld16s, 1, 1, 1, TCG_OPF_INT)
+DEF(ld32u, 1, 1, 1, TCG_OPF_INT)
+DEF(ld32s, 1, 1, 1, TCG_OPF_INT)
+DEF(ld, 1, 1, 1, TCG_OPF_INT)
+DEF(movcond, 1, 4, 1, TCG_OPF_INT)
+DEF(mul, 1, 2, 0, TCG_OPF_INT)
+DEF(muls2, 2, 2, 0, TCG_OPF_INT)
+DEF(mulsh, 1, 2, 0, TCG_OPF_INT)
+DEF(mulu2, 2, 2, 0, TCG_OPF_INT)
+DEF(muluh, 1, 2, 0, TCG_OPF_INT)
+DEF(nand, 1, 2, 0, TCG_OPF_INT)
+DEF(neg, 1, 1, 0, TCG_OPF_INT)
+DEF(negsetcond, 1, 2, 1, TCG_OPF_INT)
+DEF(nor, 1, 2, 0, TCG_OPF_INT)
+DEF(not, 1, 1, 0, TCG_OPF_INT)
+DEF(or, 1, 2, 0, TCG_OPF_INT)
+DEF(orc, 1, 2, 0, TCG_OPF_INT)
+DEF(rems, 1, 2, 0, TCG_OPF_INT)
+DEF(remu, 1, 2, 0, TCG_OPF_INT)
+DEF(rotl, 1, 2, 0, TCG_OPF_INT)
+DEF(rotr, 1, 2, 0, TCG_OPF_INT)
+DEF(sar, 1, 2, 0, TCG_OPF_INT)
+DEF(setcond, 1, 2, 1, TCG_OPF_INT)
+DEF(sextract, 1, 1, 2, TCG_OPF_INT)
+DEF(shl, 1, 2, 0, TCG_OPF_INT)
+DEF(shr, 1, 2, 0, TCG_OPF_INT)
+DEF(st8, 0, 2, 1, TCG_OPF_INT)
+DEF(st16, 0, 2, 1, TCG_OPF_INT)
+DEF(st32, 0, 2, 1, TCG_OPF_INT)
+DEF(st, 0, 2, 1, TCG_OPF_INT)
+DEF(sub, 1, 2, 0, TCG_OPF_INT)
+DEF(xor, 1, 2, 0, TCG_OPF_INT)
+
+DEF(addco, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_OUT)
+DEF(addc1o, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_OUT)
+DEF(addci, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN)
+DEF(addcio, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN | TCG_OPF_CARRY_OUT)
+
+DEF(subbo, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_OUT)
+DEF(subb1o, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_OUT)
+DEF(subbi, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN)
+DEF(subbio, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN | TCG_OPF_CARRY_OUT)
+
+DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
+DEF(setcond2_i32, 1, 4, 1, 0)
/* size changing ops */
-DEF(ext_i32_i64, 1, 1, 0, IMPL64)
-DEF(extu_i32_i64, 1, 1, 0, IMPL64)
-DEF(extrl_i64_i32, 1, 1, 0,
- IMPL(TCG_TARGET_HAS_extr_i64_i32)
- | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
-DEF(extrh_i64_i32, 1, 1, 0,
- IMPL(TCG_TARGET_HAS_extr_i64_i32)
- | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
-
-DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL64)
-DEF(ext8s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8s_i64))
-DEF(ext16s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16s_i64))
-DEF(ext32s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32s_i64))
-DEF(ext8u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8u_i64))
-DEF(ext16u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16u_i64))
-DEF(ext32u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32u_i64))
-DEF(bswap16_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap16_i64))
-DEF(bswap32_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap32_i64))
-DEF(bswap64_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap64_i64))
-DEF(not_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_not_i64))
-DEF(neg_i64, 1, 1, 0, IMPL64)
-DEF(andc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_andc_i64))
-DEF(orc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_orc_i64))
-DEF(eqv_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_eqv_i64))
-DEF(nand_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nand_i64))
-DEF(nor_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nor_i64))
-DEF(clz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_clz_i64))
-DEF(ctz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctz_i64))
-DEF(ctpop_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctpop_i64))
-
-DEF(add2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_add2_i64))
-DEF(sub2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_sub2_i64))
-DEF(mulu2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulu2_i64))
-DEF(muls2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muls2_i64))
-DEF(muluh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muluh_i64))
-DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64))
+DEF(ext_i32_i64, 1, 1, 0, 0)
+DEF(extu_i32_i64, 1, 1, 0, 0)
+DEF(extrl_i64_i32, 1, 1, 0, 0)
+DEF(extrh_i64_i32, 1, 1, 0, 0)
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
-/* There are tcg_ctx->insn_start_words here, not just one. */
-DEF(insn_start, 0, 0, DATA64_ARGS, TCG_OPF_NOT_PRESENT)
+DEF(insn_start, 0, 0, DATA64_ARGS * INSN_START_WORDS, TCG_OPF_NOT_PRESENT)
-DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
-DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
+DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
+DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
DEF(plugin_cb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
DEF(plugin_mem_cb, 0, 1, 1, TCG_OPF_NOT_PRESENT)
-/* Replicate ld/st ops for 32 and 64-bit guest addresses. */
-DEF(qemu_ld_a32_i32, 1, 1, 1,
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_st_a32_i32, 0, 1 + 1, 1,
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_ld_a32_i64, DATA64_ARGS, 1, 1,
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
-DEF(qemu_st_a32_i64, 0, DATA64_ARGS + 1, 1,
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
-
-DEF(qemu_ld_a64_i32, 1, DATA64_ARGS, 1,
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_st_a64_i32, 0, 1 + DATA64_ARGS, 1,
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_ld_a64_i64, DATA64_ARGS, DATA64_ARGS, 1,
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
-DEF(qemu_st_a64_i64, 0, DATA64_ARGS + DATA64_ARGS, 1,
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
-
-/* Only used by i386 to cope with stupid register constraints. */
-DEF(qemu_st8_a32_i32, 0, 1 + 1, 1,
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
- IMPL(TCG_TARGET_HAS_qemu_st8_i32))
-DEF(qemu_st8_a64_i32, 0, 1 + DATA64_ARGS, 1,
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
- IMPL(TCG_TARGET_HAS_qemu_st8_i32))
-
-/* Only for 64-bit hosts at the moment. */
-DEF(qemu_ld_a32_i128, 2, 1, 1,
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
- IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
-DEF(qemu_ld_a64_i128, 2, 1, 1,
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
- IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
-DEF(qemu_st_a32_i128, 0, 3, 1,
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
- IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
-DEF(qemu_st_a64_i128, 0, 3, 1,
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
- IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
+DEF(qemu_ld, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT)
+DEF(qemu_st, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT)
+DEF(qemu_ld2, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT)
+DEF(qemu_st2, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT)
/* Host vector support. */
-#define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec)
-
DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT)
-DEF(dup_vec, 1, 1, 0, IMPLVEC)
-DEF(dup2_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_REG_BITS == 32))
-
-DEF(ld_vec, 1, 1, 1, IMPLVEC)
-DEF(st_vec, 0, 2, 1, IMPLVEC)
-DEF(dupm_vec, 1, 1, 1, IMPLVEC)
-
-DEF(add_vec, 1, 2, 0, IMPLVEC)
-DEF(sub_vec, 1, 2, 0, IMPLVEC)
-DEF(mul_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_mul_vec))
-DEF(neg_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_neg_vec))
-DEF(abs_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_abs_vec))
-DEF(ssadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
-DEF(usadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
-DEF(sssub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
-DEF(ussub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
-DEF(smin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
-DEF(umin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
-DEF(smax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
-DEF(umax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
-
-DEF(and_vec, 1, 2, 0, IMPLVEC)
-DEF(or_vec, 1, 2, 0, IMPLVEC)
-DEF(xor_vec, 1, 2, 0, IMPLVEC)
-DEF(andc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_andc_vec))
-DEF(orc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_orc_vec))
-DEF(nand_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nand_vec))
-DEF(nor_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nor_vec))
-DEF(eqv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_eqv_vec))
-DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec))
-
-DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
-DEF(shri_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
-DEF(sari_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
-DEF(rotli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_roti_vec))
-
-DEF(shls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
-DEF(shrs_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
-DEF(sars_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
-DEF(rotls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rots_vec))
-
-DEF(shlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
-DEF(shrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
-DEF(sarv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
-DEF(rotlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec))
-DEF(rotrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec))
-
-DEF(cmp_vec, 1, 2, 1, IMPLVEC)
-
-DEF(bitsel_vec, 1, 3, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_bitsel_vec))
-DEF(cmpsel_vec, 1, 4, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_cmpsel_vec))
+DEF(dup_vec, 1, 1, 0, TCG_OPF_VECTOR)
+DEF(dup2_vec, 1, 2, 0, TCG_OPF_VECTOR)
+
+DEF(ld_vec, 1, 1, 1, TCG_OPF_VECTOR)
+DEF(st_vec, 0, 2, 1, TCG_OPF_VECTOR)
+DEF(dupm_vec, 1, 1, 1, TCG_OPF_VECTOR)
+
+DEF(add_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(sub_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(mul_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(neg_vec, 1, 1, 0, TCG_OPF_VECTOR)
+DEF(abs_vec, 1, 1, 0, TCG_OPF_VECTOR)
+DEF(ssadd_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(usadd_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(sssub_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(ussub_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(smin_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(umin_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(smax_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(umax_vec, 1, 2, 0, TCG_OPF_VECTOR)
+
+DEF(and_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(or_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(xor_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(andc_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(orc_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(nand_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(nor_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(eqv_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(not_vec, 1, 1, 0, TCG_OPF_VECTOR)
+
+DEF(shli_vec, 1, 1, 1, TCG_OPF_VECTOR)
+DEF(shri_vec, 1, 1, 1, TCG_OPF_VECTOR)
+DEF(sari_vec, 1, 1, 1, TCG_OPF_VECTOR)
+DEF(rotli_vec, 1, 1, 1, TCG_OPF_VECTOR)
+
+DEF(shls_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(shrs_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(sars_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(rotls_vec, 1, 2, 0, TCG_OPF_VECTOR)
+
+DEF(shlv_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(shrv_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(sarv_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(rotlv_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(rotrv_vec, 1, 2, 0, TCG_OPF_VECTOR)
+
+DEF(cmp_vec, 1, 2, 1, TCG_OPF_VECTOR)
+
+DEF(bitsel_vec, 1, 3, 0, TCG_OPF_VECTOR)
+DEF(cmpsel_vec, 1, 4, 1, TCG_OPF_VECTOR)
DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT)
-#if TCG_TARGET_MAYBE_vec
-#include "tcg-target.opc.h"
-#endif
-
-#ifdef TCG_TARGET_INTERPRETER
-/* These opcodes are only for use between the tci generator and interpreter. */
-DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT)
-DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT)
-#endif
+#include "tcg-target-opc.h.inc"
#undef DATA64_ARGS
-#undef IMPL
-#undef IMPL64
-#undef IMPLVEC
#undef DEF
diff --git a/include/tcg/tcg-temp-internal.h b/include/tcg/tcg-temp-internal.h
index 44192c5..98f91e6 100644
--- a/include/tcg/tcg-temp-internal.h
+++ b/include/tcg/tcg-temp-internal.h
@@ -42,4 +42,10 @@ TCGv_i64 tcg_temp_ebb_new_i64(void);
TCGv_ptr tcg_temp_ebb_new_ptr(void);
TCGv_i128 tcg_temp_ebb_new_i128(void);
+/* Forget all freed EBB temps, so that new allocations produce new temps. */
+static inline void tcg_temp_ebb_reset_freed(TCGContext *s)
+{
+ memset(s->free_temps, 0, sizeof(s->free_temps));
+}
+
#endif /* TCG_TEMP_FREE_H */
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
index 21d5884..125323f 100644
--- a/include/tcg/tcg.h
+++ b/include/tcg/tcg.h
@@ -34,6 +34,7 @@
#include "tcg-target-reg-bits.h"
#include "tcg-target.h"
#include "tcg/tcg-cond.h"
+#include "tcg/insn-start-words.h"
#include "tcg/debug-assert.h"
/* XXX: make safe guess about sizes */
@@ -64,111 +65,6 @@ typedef uint64_t TCGRegSet;
#error unsupported
#endif
-#if TCG_TARGET_REG_BITS == 32
-/* Turn some undef macros into false macros. */
-#define TCG_TARGET_HAS_extr_i64_i32 0
-#define TCG_TARGET_HAS_div_i64 0
-#define TCG_TARGET_HAS_rem_i64 0
-#define TCG_TARGET_HAS_div2_i64 0
-#define TCG_TARGET_HAS_rot_i64 0
-#define TCG_TARGET_HAS_ext8s_i64 0
-#define TCG_TARGET_HAS_ext16s_i64 0
-#define TCG_TARGET_HAS_ext32s_i64 0
-#define TCG_TARGET_HAS_ext8u_i64 0
-#define TCG_TARGET_HAS_ext16u_i64 0
-#define TCG_TARGET_HAS_ext32u_i64 0
-#define TCG_TARGET_HAS_bswap16_i64 0
-#define TCG_TARGET_HAS_bswap32_i64 0
-#define TCG_TARGET_HAS_bswap64_i64 0
-#define TCG_TARGET_HAS_not_i64 0
-#define TCG_TARGET_HAS_andc_i64 0
-#define TCG_TARGET_HAS_orc_i64 0
-#define TCG_TARGET_HAS_eqv_i64 0
-#define TCG_TARGET_HAS_nand_i64 0
-#define TCG_TARGET_HAS_nor_i64 0
-#define TCG_TARGET_HAS_clz_i64 0
-#define TCG_TARGET_HAS_ctz_i64 0
-#define TCG_TARGET_HAS_ctpop_i64 0
-#define TCG_TARGET_HAS_deposit_i64 0
-#define TCG_TARGET_HAS_extract_i64 0
-#define TCG_TARGET_HAS_sextract_i64 0
-#define TCG_TARGET_HAS_extract2_i64 0
-#define TCG_TARGET_HAS_negsetcond_i64 0
-#define TCG_TARGET_HAS_add2_i64 0
-#define TCG_TARGET_HAS_sub2_i64 0
-#define TCG_TARGET_HAS_mulu2_i64 0
-#define TCG_TARGET_HAS_muls2_i64 0
-#define TCG_TARGET_HAS_muluh_i64 0
-#define TCG_TARGET_HAS_mulsh_i64 0
-/* Turn some undef macros into true macros. */
-#define TCG_TARGET_HAS_add2_i32 1
-#define TCG_TARGET_HAS_sub2_i32 1
-#endif
-
-#ifndef TCG_TARGET_deposit_i32_valid
-#define TCG_TARGET_deposit_i32_valid(ofs, len) 1
-#endif
-#ifndef TCG_TARGET_deposit_i64_valid
-#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
-#endif
-#ifndef TCG_TARGET_extract_i32_valid
-#define TCG_TARGET_extract_i32_valid(ofs, len) 1
-#endif
-#ifndef TCG_TARGET_extract_i64_valid
-#define TCG_TARGET_extract_i64_valid(ofs, len) 1
-#endif
-
-/* Only one of DIV or DIV2 should be defined. */
-#if defined(TCG_TARGET_HAS_div_i32)
-#define TCG_TARGET_HAS_div2_i32 0
-#elif defined(TCG_TARGET_HAS_div2_i32)
-#define TCG_TARGET_HAS_div_i32 0
-#define TCG_TARGET_HAS_rem_i32 0
-#endif
-#if defined(TCG_TARGET_HAS_div_i64)
-#define TCG_TARGET_HAS_div2_i64 0
-#elif defined(TCG_TARGET_HAS_div2_i64)
-#define TCG_TARGET_HAS_div_i64 0
-#define TCG_TARGET_HAS_rem_i64 0
-#endif
-
-#if !defined(TCG_TARGET_HAS_v64) \
- && !defined(TCG_TARGET_HAS_v128) \
- && !defined(TCG_TARGET_HAS_v256)
-#define TCG_TARGET_MAYBE_vec 0
-#define TCG_TARGET_HAS_abs_vec 0
-#define TCG_TARGET_HAS_neg_vec 0
-#define TCG_TARGET_HAS_not_vec 0
-#define TCG_TARGET_HAS_andc_vec 0
-#define TCG_TARGET_HAS_orc_vec 0
-#define TCG_TARGET_HAS_nand_vec 0
-#define TCG_TARGET_HAS_nor_vec 0
-#define TCG_TARGET_HAS_eqv_vec 0
-#define TCG_TARGET_HAS_roti_vec 0
-#define TCG_TARGET_HAS_rots_vec 0
-#define TCG_TARGET_HAS_rotv_vec 0
-#define TCG_TARGET_HAS_shi_vec 0
-#define TCG_TARGET_HAS_shs_vec 0
-#define TCG_TARGET_HAS_shv_vec 0
-#define TCG_TARGET_HAS_mul_vec 0
-#define TCG_TARGET_HAS_sat_vec 0
-#define TCG_TARGET_HAS_minmax_vec 0
-#define TCG_TARGET_HAS_bitsel_vec 0
-#define TCG_TARGET_HAS_cmpsel_vec 0
-#define TCG_TARGET_HAS_tst_vec 0
-#else
-#define TCG_TARGET_MAYBE_vec 1
-#endif
-#ifndef TCG_TARGET_HAS_v64
-#define TCG_TARGET_HAS_v64 0
-#endif
-#ifndef TCG_TARGET_HAS_v128
-#define TCG_TARGET_HAS_v128 0
-#endif
-#ifndef TCG_TARGET_HAS_v256
-#define TCG_TARGET_HAS_v256 0
-#endif
-
typedef enum TCGOpcode {
#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
#include "tcg/tcg-opc.h"
@@ -281,29 +177,6 @@ static inline int tcg_type_size(TCGType t)
return 4 << i;
}
-/**
- * get_alignment_bits
- * @memop: MemOp value
- *
- * Extract the alignment size from the memop.
- */
-static inline unsigned get_alignment_bits(MemOp memop)
-{
- unsigned a = memop & MO_AMASK;
-
- if (a == MO_UNALN) {
- /* No alignment required. */
- a = 0;
- } else if (a == MO_ALIGN) {
- /* A natural alignment requirement. */
- a = memop & MO_SIZE;
- } else {
- /* A specific alignment requirement. */
- a = a >> MO_ASHIFT;
- }
- return a;
-}
-
typedef tcg_target_ulong TCGArg;
/* Define type and accessor macros for TCG variables.
@@ -316,6 +189,7 @@ typedef tcg_target_ulong TCGArg;
* TCGv_i64 : 64 bit integer type
* TCGv_i128 : 128 bit integer type
* TCGv_ptr : a host pointer type
+ * TCGv_vaddr: an integer type wide enough to hold a target pointer type
* TCGv_vec : a host vector type; the exact size is not exposed
to the CPU front-end code.
* TCGv : an integer type the same size as target_ulong
@@ -344,6 +218,14 @@ typedef struct TCGv_ptr_d *TCGv_ptr;
typedef struct TCGv_vec_d *TCGv_vec;
typedef TCGv_ptr TCGv_env;
+#if __SIZEOF_POINTER__ == 4
+typedef TCGv_i32 TCGv_vaddr;
+#elif __SIZEOF_POINTER__ == 8
+typedef TCGv_i64 TCGv_vaddr;
+#else
+# error "sizeof pointer is different from {4,8}"
+#endif /* __SIZEOF_POINTER__ */
+
/* call flags */
/* Helper does not read globals (either directly or through an exception). It
implies TCG_CALL_NO_WRITE_GLOBALS. */
@@ -462,7 +344,8 @@ struct TCGOp {
#define TCGOP_CALLI(X) (X)->param1
#define TCGOP_CALLO(X) (X)->param2
-#define TCGOP_VECL(X) (X)->param1
+#define TCGOP_TYPE(X) (X)->param1
+#define TCGOP_FLAGS(X) (X)->param2
#define TCGOP_VECE(X) (X)->param2
/* Make sure operands fit in the bitfields above. */
@@ -482,11 +365,6 @@ struct TCGContext {
int nb_indirects;
int nb_ops;
TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */
-
- int page_mask;
- uint8_t page_bits;
- uint8_t tlb_dyn_max_bits;
- uint8_t insn_start_words;
TCGBar guest_mo;
TCGRegSet reserved_regs;
@@ -520,12 +398,8 @@ struct TCGContext {
CPUState *cpu; /* *_trans */
/* These structures are private to tcg-target.c.inc. */
-#ifdef TCG_TARGET_NEED_LDST_LABELS
QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels;
-#endif
-#ifdef TCG_TARGET_NEED_POOL_LABELS
struct TCGLabelPoolData *pool_labels;
-#endif
TCGLabel *exitreq_label;
@@ -544,6 +418,17 @@ struct TCGContext {
struct qemu_plugin_insn *plugin_insn;
#endif
+ /* For host-specific values. */
+#ifdef __riscv
+ MemOp riscv_cur_vsew;
+ TCGType riscv_cur_type;
+#endif
+ /*
+ * During the tcg_reg_alloc_op loop, we are within a sequence of
+ * carry-using opcodes like addco+addci.
+ */
+ bool carry_live;
+
GHashTable *const_table[TCG_TYPE_COUNT];
TCGTempSet free_temps[TCG_TYPE_COUNT];
TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
@@ -697,23 +582,29 @@ static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
return (TCGv_ptr)temp_tcgv_i32(t);
}
+static inline TCGv_vaddr temp_tcgv_vaddr(TCGTemp *t)
+{
+ return (TCGv_vaddr)temp_tcgv_i32(t);
+}
+
static inline TCGv_vec temp_tcgv_vec(TCGTemp *t)
{
return (TCGv_vec)temp_tcgv_i32(t);
}
-static inline TCGArg tcg_get_insn_param(TCGOp *op, int arg)
+static inline TCGArg tcg_get_insn_param(TCGOp *op, unsigned arg)
{
return op->args[arg];
}
-static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
+static inline void tcg_set_insn_param(TCGOp *op, unsigned arg, TCGArg v)
{
op->args[arg] = v;
}
-static inline uint64_t tcg_get_insn_start_param(TCGOp *op, int arg)
+static inline uint64_t tcg_get_insn_start_param(TCGOp *op, unsigned arg)
{
+ tcg_debug_assert(arg < INSN_START_WORDS);
if (TCG_TARGET_REG_BITS == 64) {
return tcg_get_insn_param(op, arg);
} else {
@@ -722,8 +613,9 @@ static inline uint64_t tcg_get_insn_start_param(TCGOp *op, int arg)
}
}
-static inline void tcg_set_insn_start_param(TCGOp *op, int arg, uint64_t v)
+static inline void tcg_set_insn_start_param(TCGOp *op, unsigned arg, uint64_t v)
{
+ tcg_debug_assert(arg < INSN_START_WORDS);
if (TCG_TARGET_REG_BITS == 64) {
tcg_set_insn_param(op, arg, v);
} else {
@@ -763,10 +655,51 @@ void tcg_region_reset_all(void);
size_t tcg_code_size(void);
size_t tcg_code_capacity(void);
+/**
+ * tcg_tb_insert:
+ * @tb: translation block to insert
+ *
+ * Insert @tb into the region trees.
+ */
void tcg_tb_insert(TranslationBlock *tb);
+
+/**
+ * tcg_tb_remove:
+ * @tb: translation block to remove
+ *
+ * Remove @tb from the region trees.
+ */
void tcg_tb_remove(TranslationBlock *tb);
+
+/**
+ * tcg_tb_lookup:
+ * @tc_ptr: host PC to look up
+ *
+ * Look up a translation block inside the region trees by @tc_ptr. This is
+ * useful for exception handling, but must not be used for the purposes of
+ * executing the returned translation block. See struct tb_tc for more
+ * information.
+ *
+ * Returns: a translation block previously inserted into the region trees,
+ * such that @tc_ptr points anywhere inside the code generated for it, or
+ * NULL.
+ */
TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
+
+/**
+ * tcg_tb_foreach:
+ * @func: callback
+ * @user_data: opaque value to pass to @callback
+ *
+ * Call @func for each translation block inserted into the region trees.
+ */
void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
+
+/**
+ * tcg_nb_tbs:
+ *
+ * Returns: the number of translation blocks inserted into the region trees.
+ */
size_t tcg_nb_tbs(void);
/* user-mode: Called with mmap_lock held. */
@@ -797,7 +730,8 @@ void tb_target_set_jmp_target(const TranslationBlock *, int,
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
-#define TCG_CT_CONST 1 /* any constant of register size */
+#define TCG_CT_CONST 1 /* any constant of register size */
+#define TCG_CT_REG_ZERO 2 /* zero, in TCG_REG_ZERO */
typedef struct TCGArgConstraint {
unsigned ct : 16;
@@ -824,33 +758,42 @@ enum {
/* Instruction has side effects: it cannot be removed if its outputs
are not used, and might trigger exceptions. */
TCG_OPF_SIDE_EFFECTS = 0x08,
- /* Instruction operands are 64-bits (otherwise 32-bits). */
- TCG_OPF_64BIT = 0x10,
+ /* Instruction operands may be I32 or I64 */
+ TCG_OPF_INT = 0x10,
/* Instruction is optional and not implemented by the host, or insn
is generic and should not be implemented by the host. */
TCG_OPF_NOT_PRESENT = 0x20,
/* Instruction operands are vectors. */
TCG_OPF_VECTOR = 0x40,
/* Instruction is a conditional branch. */
- TCG_OPF_COND_BRANCH = 0x80
+ TCG_OPF_COND_BRANCH = 0x80,
+ /* Instruction produces carry out. */
+ TCG_OPF_CARRY_OUT = 0x100,
+ /* Instruction consumes carry in. */
+ TCG_OPF_CARRY_IN = 0x200,
};
typedef struct TCGOpDef {
const char *name;
uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
- uint8_t flags;
- TCGArgConstraint *args_ct;
+ uint16_t flags;
} TCGOpDef;
-extern TCGOpDef tcg_op_defs[];
+extern const TCGOpDef tcg_op_defs[];
extern const size_t tcg_op_defs_max;
-typedef struct TCGTargetOpDef {
- TCGOpcode op;
- const char *args_ct_str[TCG_MAX_OP_ARGS];
-} TCGTargetOpDef;
-
-bool tcg_op_supported(TCGOpcode op);
+/*
+ * tcg_op_supported:
+ * Query if @op, for @type and @flags, is supported by the host
+ * on which we are currently executing.
+ */
+bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags);
+/*
+ * tcg_op_deposit_valid:
+ * Query if a deposit into (ofs, len) is supported for @type by
+ * the host on which we are currently executing.
+ */
+bool tcg_op_deposit_valid(TCGType type, unsigned ofs, unsigned len);
void tcg_gen_call0(void *func, TCGHelperInfo *, TCGTemp *ret);
void tcg_gen_call1(void *func, TCGHelperInfo *, TCGTemp *ret, TCGTemp *);
@@ -871,10 +814,6 @@ void tcg_gen_call7(void *func, TCGHelperInfo *, TCGTemp *ret,
TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs);
void tcg_op_remove(TCGContext *s, TCGOp *op);
-TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op,
- TCGOpcode opc, unsigned nargs);
-TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op,
- TCGOpcode opc, unsigned nargs);
/**
* tcg_remove_ops_after:
@@ -1033,17 +972,10 @@ extern tcg_prologue_fn *tcg_qemu_tb_exec;
void tcg_register_jit(const void *buf, size_t buf_size);
-#if TCG_TARGET_MAYBE_vec
/* Return zero if the tuple (opc, type, vece) is unsupportable;
return > 0 if it is directly supportable;
return < 0 if we must call tcg_expand_vec_op. */
int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned);
-#else
-static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
-{
- return 0;
-}
-#endif
/* Expand the tuple (opc, type, vece) on the given arguments. */
void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...);
diff --git a/include/ui/clipboard.h b/include/ui/clipboard.h
index ab6acdb..62a96ce 100644
--- a/include/ui/clipboard.h
+++ b/include/ui/clipboard.h
@@ -2,6 +2,7 @@
#define QEMU_CLIPBOARD_H
#include "qemu/notify.h"
+#include "migration/vmstate.h"
/**
* DOC: Introduction
@@ -25,6 +26,9 @@ typedef enum QemuClipboardSelection QemuClipboardSelection;
typedef struct QemuClipboardPeer QemuClipboardPeer;
typedef struct QemuClipboardNotify QemuClipboardNotify;
typedef struct QemuClipboardInfo QemuClipboardInfo;
+typedef struct QemuClipboardContent QemuClipboardContent;
+
+extern const VMStateDescription vmstate_cbinfo;
/**
* enum QemuClipboardType
@@ -97,6 +101,24 @@ struct QemuClipboardNotify {
};
};
+
+/**
+ * struct QemuClipboardContent
+ *
+ * @available: whether the data is available
+ * @requested: whether the data was requested
+ * @size: the size of the @data
+ * @data: the clipboard data
+ *
+ * Clipboard content.
+ */
+struct QemuClipboardContent {
+ bool available;
+ bool requested;
+ uint32_t size;
+ void *data;
+};
+
/**
* struct QemuClipboardInfo
*
@@ -112,15 +134,10 @@ struct QemuClipboardNotify {
struct QemuClipboardInfo {
uint32_t refcount;
QemuClipboardPeer *owner;
- QemuClipboardSelection selection;
+ int selection; /* QemuClipboardSelection */
bool has_serial;
uint32_t serial;
- struct {
- bool available;
- bool requested;
- size_t size;
- void *data;
- } types[QEMU_CLIPBOARD_TYPE__COUNT];
+ QemuClipboardContent types[QEMU_CLIPBOARD_TYPE__COUNT];
};
/**
diff --git a/include/ui/console.h b/include/ui/console.h
index fa986ab..46b3128 100644
--- a/include/ui/console.h
+++ b/include/ui/console.h
@@ -70,8 +70,6 @@ typedef struct QEMUPutMouseEntry QEMUPutMouseEntry;
typedef struct QEMUPutKbdEntry QEMUPutKbdEntry;
typedef struct QEMUPutLEDEntry QEMUPutLEDEntry;
-QEMUPutKbdEntry *qemu_add_kbd_event_handler(QEMUPutKBDEvent *func,
- void *opaque);
QEMUPutMouseEntry *qemu_add_mouse_event_handler(QEMUPutMouseEvent *func,
void *opaque, int absolute,
const char *name);
@@ -175,7 +173,6 @@ int cursor_get_mono_bpl(QEMUCursor *c);
void cursor_set_mono(QEMUCursor *c,
uint32_t foreground, uint32_t background, uint8_t *image,
int transparent, uint8_t *mask);
-void cursor_get_mono_image(QEMUCursor *c, int foreground, uint8_t *mask);
void cursor_get_mono_mask(QEMUCursor *c, int transparent, uint8_t *mask);
typedef void *QEMUGLContext;
diff --git a/include/ui/dmabuf.h b/include/ui/dmabuf.h
index dc74ba8..3decdca 100644
--- a/include/ui/dmabuf.h
+++ b/include/ui/dmabuf.h
@@ -10,24 +10,29 @@
#ifndef DMABUF_H
#define DMABUF_H
+#define DMABUF_MAX_PLANES 4
+
typedef struct QemuDmaBuf QemuDmaBuf;
QemuDmaBuf *qemu_dmabuf_new(uint32_t width, uint32_t height,
- uint32_t stride, uint32_t x,
- uint32_t y, uint32_t backing_width,
- uint32_t backing_height, uint32_t fourcc,
- uint64_t modifier, int dmabuf_fd,
+ const uint32_t *offset, const uint32_t *stride,
+ uint32_t x, uint32_t y,
+ uint32_t backing_width, uint32_t backing_height,
+ uint32_t fourcc, uint64_t modifier,
+ const int32_t *dmabuf_fd, uint32_t num_planes,
bool allow_fences, bool y0_top);
void qemu_dmabuf_free(QemuDmaBuf *dmabuf);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuDmaBuf, qemu_dmabuf_free);
-int qemu_dmabuf_get_fd(QemuDmaBuf *dmabuf);
-int qemu_dmabuf_dup_fd(QemuDmaBuf *dmabuf);
+const int *qemu_dmabuf_get_fds(QemuDmaBuf *dmabuf, int *nfds);
+void qemu_dmabuf_dup_fds(QemuDmaBuf *dmabuf, int *fds, int nfds);
void qemu_dmabuf_close(QemuDmaBuf *dmabuf);
uint32_t qemu_dmabuf_get_width(QemuDmaBuf *dmabuf);
uint32_t qemu_dmabuf_get_height(QemuDmaBuf *dmabuf);
-uint32_t qemu_dmabuf_get_stride(QemuDmaBuf *dmabuf);
+const uint32_t *qemu_dmabuf_get_offsets(QemuDmaBuf *dmabuf, int *noffsets);
+const uint32_t *qemu_dmabuf_get_strides(QemuDmaBuf *dmabuf, int *nstrides);
+uint32_t qemu_dmabuf_get_num_planes(QemuDmaBuf *dmabuf);
uint32_t qemu_dmabuf_get_fourcc(QemuDmaBuf *dmabuf);
uint64_t qemu_dmabuf_get_modifier(QemuDmaBuf *dmabuf);
uint32_t qemu_dmabuf_get_texture(QemuDmaBuf *dmabuf);
@@ -44,6 +49,5 @@ void qemu_dmabuf_set_texture(QemuDmaBuf *dmabuf, uint32_t texture);
void qemu_dmabuf_set_fence_fd(QemuDmaBuf *dmabuf, int32_t fence_fd);
void qemu_dmabuf_set_sync(QemuDmaBuf *dmabuf, void *sync);
void qemu_dmabuf_set_draw_submitted(QemuDmaBuf *dmabuf, bool draw_submitted);
-void qemu_dmabuf_set_fd(QemuDmaBuf *dmabuf, int32_t fd);
#endif
diff --git a/include/ui/egl-helpers.h b/include/ui/egl-helpers.h
index 4b8c0d2..acf993f 100644
--- a/include/ui/egl-helpers.h
+++ b/include/ui/egl-helpers.h
@@ -17,6 +17,8 @@ extern bool qemu_egl_angle_d3d;
typedef struct egl_fb {
int width;
int height;
+ int x;
+ int y;
GLuint texture;
GLuint framebuffer;
bool delete_texture;
@@ -26,7 +28,7 @@ typedef struct egl_fb {
#define EGL_FB_INIT { 0, }
void egl_fb_destroy(egl_fb *fb);
-void egl_fb_setup_default(egl_fb *fb, int width, int height);
+void egl_fb_setup_default(egl_fb *fb, int width, int height, int x, int y);
void egl_fb_setup_for_tex(egl_fb *fb, int width, int height,
GLuint texture, bool delete);
void egl_fb_setup_new_tex(egl_fb *fb, int width, int height);
@@ -46,8 +48,9 @@ extern int qemu_egl_rn_fd;
extern struct gbm_device *qemu_egl_rn_gbm_dev;
int egl_rendernode_init(const char *rendernode, DisplayGLMode mode);
-int egl_get_fd_for_texture(uint32_t tex_id, EGLint *stride, EGLint *fourcc,
- EGLuint64KHR *modifier);
+bool egl_dmabuf_export_texture(uint32_t tex_id, int *fd, EGLint *offset,
+ EGLint *stride, EGLint *fourcc, int *num_planes,
+ EGLuint64KHR *modifier);
void egl_dmabuf_import_texture(QemuDmaBuf *dmabuf);
void egl_dmabuf_release_texture(QemuDmaBuf *dmabuf);
diff --git a/include/ui/gtk.h b/include/ui/gtk.h
index aa3d637..d394404 100644
--- a/include/ui/gtk.h
+++ b/include/ui/gtk.h
@@ -224,4 +224,6 @@ int gd_gl_area_make_current(DisplayGLCtx *dgc,
/* gtk-clipboard.c */
void gd_clipboard_init(GtkDisplayState *gd);
+void gd_update_scale(VirtualConsole *vc, int ww, int wh, int fbw, int fbh);
+
#endif /* UI_GTK_H */
diff --git a/include/ui/qemu-pixman.h b/include/ui/qemu-pixman.h
index ef13a82..2ca0ed7 100644
--- a/include/ui/qemu-pixman.h
+++ b/include/ui/qemu-pixman.h
@@ -12,6 +12,8 @@
#include "pixman-minimal.h"
#endif
+#include "qapi/error.h"
+
/*
* pixman image formats are defined to be native endian,
* that means host byte order on qemu. So we go define
@@ -73,12 +75,12 @@ PixelFormat qemu_pixelformat_from_pixman(pixman_format_code_t format);
pixman_format_code_t qemu_default_pixman_format(int bpp, bool native_endian);
pixman_format_code_t qemu_drm_format_to_pixman(uint32_t drm_format);
uint32_t qemu_pixman_to_drm_format(pixman_format_code_t pixman);
-int qemu_pixman_get_type(int rshift, int gshift, int bshift);
+int qemu_pixman_get_type(int rshift, int gshift, int bshift, int endian);
bool qemu_pixman_check_format(DisplayChangeListener *dcl,
pixman_format_code_t format);
#ifdef CONFIG_PIXMAN
-pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf);
+pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf, int endian);
pixman_image_t *qemu_pixman_linebuf_create(pixman_format_code_t format,
int width);
void qemu_pixman_linebuf_fill(pixman_image_t *linebuf, pixman_image_t *fb,
@@ -97,6 +99,28 @@ void qemu_pixman_glyph_render(pixman_image_t *glyph,
void qemu_pixman_image_unref(pixman_image_t *image);
+#ifdef WIN32
+typedef HANDLE qemu_pixman_shareable;
+#define SHAREABLE_NONE (NULL)
+#define SHAREABLE_TO_PTR(handle) (handle)
+#define PTR_TO_SHAREABLE(ptr) (ptr)
+#else
+typedef int qemu_pixman_shareable;
+#define SHAREABLE_NONE (-1)
+#define SHAREABLE_TO_PTR(handle) GINT_TO_POINTER(handle)
+#define PTR_TO_SHAREABLE(ptr) GPOINTER_TO_INT(ptr)
+#endif
+
+bool qemu_pixman_image_new_shareable(
+ pixman_image_t **image,
+ qemu_pixman_shareable *handle,
+ const char *name,
+ pixman_format_code_t format,
+ int width,
+ int height,
+ int rowstride_bytes,
+ Error **errp);
+
G_DEFINE_AUTOPTR_CLEANUP_FUNC(pixman_image_t, qemu_pixman_image_unref)
#endif /* QEMU_PIXMAN_H */
diff --git a/include/ui/sdl2.h b/include/ui/sdl2.h
index e3acc7c..dbe6e3d 100644
--- a/include/ui/sdl2.h
+++ b/include/ui/sdl2.h
@@ -42,6 +42,7 @@ struct sdl2_console {
int updates;
int idle_counter;
int ignore_hotkeys;
+ bool gui_keysym;
SDL_GLContext winctx;
QKbdState *kbd;
#ifdef CONFIG_OPENGL
@@ -60,6 +61,7 @@ void sdl2_poll_events(struct sdl2_console *scon);
void sdl2_process_key(struct sdl2_console *scon,
SDL_KeyboardEvent *ev);
+void sdl2_release_modifiers(struct sdl2_console *scon);
void sdl2_2d_update(DisplayChangeListener *dcl,
int x, int y, int w, int h);
diff --git a/include/ui/surface.h b/include/ui/surface.h
index 345b191..f16f7be 100644
--- a/include/ui/surface.h
+++ b/include/ui/surface.h
@@ -23,10 +23,8 @@ typedef struct DisplaySurface {
GLenum gltype;
GLuint texture;
#endif
-#ifdef WIN32
- HANDLE handle;
- uint32_t handle_offset;
-#endif
+ qemu_pixman_shareable share_handle;
+ uint32_t share_handle_offset;
} DisplaySurface;
PixelFormat qemu_default_pixelformat(int bpp);
@@ -37,10 +35,10 @@ DisplaySurface *qemu_create_displaysurface_from(int width, int height,
DisplaySurface *qemu_create_displaysurface_pixman(pixman_image_t *image);
DisplaySurface *qemu_create_placeholder_surface(int w, int h,
const char *msg);
-#ifdef WIN32
-void qemu_displaysurface_win32_set_handle(DisplaySurface *surface,
- HANDLE h, uint32_t offset);
-#endif
+
+void qemu_displaysurface_set_share_handle(DisplaySurface *surface,
+ qemu_pixman_shareable handle,
+ uint32_t offset);
DisplaySurface *qemu_create_displaysurface(int width, int height);
void qemu_free_displaysurface(DisplaySurface *surface);
diff --git a/include/user/abitypes.h b/include/user/abitypes.h
index 5c9a955..7528124 100644
--- a/include/user/abitypes.h
+++ b/include/user/abitypes.h
@@ -21,13 +21,6 @@
#define ABI_LLONG_ALIGNMENT 2
#endif
-#ifdef TARGET_CRIS
-#define ABI_SHORT_ALIGNMENT 1
-#define ABI_INT_ALIGNMENT 1
-#define ABI_LONG_ALIGNMENT 1
-#define ABI_LLONG_ALIGNMENT 1
-#endif
-
#if (defined(TARGET_I386) && !defined(TARGET_X86_64)) \
|| defined(TARGET_SH4) \
|| defined(TARGET_OPENRISC) \
diff --git a/include/user/cpu_loop.h b/include/user/cpu_loop.h
new file mode 100644
index 0000000..ad8a1d7
--- /dev/null
+++ b/include/user/cpu_loop.h
@@ -0,0 +1,88 @@
+/*
+ * qemu user cpu loop
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef USER_CPU_LOOP_H
+#define USER_CPU_LOOP_H
+
+#include "exec/vaddr.h"
+#include "exec/mmu-access-type.h"
+
+
+/**
+ * adjust_signal_pc:
+ * @pc: raw pc from the host signal ucontext_t.
+ * @is_write: host memory operation was write, or read-modify-write.
+ *
+ * Alter @pc as required for unwinding. Return the type of the
+ * guest memory access -- host reads may be for guest execution.
+ */
+MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
+
+/**
+ * handle_sigsegv_accerr_write:
+ * @cpu: the cpu context
+ * @old_set: the sigset_t from the signal ucontext_t
+ * @host_pc: the host pc, adjusted for the signal
+ * @host_addr: the host address of the fault
+ *
+ * Return true if the write fault has been handled, and should be re-tried.
+ */
+bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
+ uintptr_t host_pc, vaddr guest_addr);
+
+/**
+ * cpu_loop_exit_sigsegv:
+ * @cpu: the cpu context
+ * @addr: the guest address of the fault
+ * @access_type: access was read/write/execute
+ * @maperr: true for invalid page, false for permission fault
+ * @ra: host pc for unwinding
+ *
+ * Use the TCGCPUOps hook to record cpu state, do guest operating system
+ * specific things to raise SIGSEGV, and jump to the main cpu loop.
+ */
+G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+
+/**
+ * cpu_loop_exit_sigbus:
+ * @cpu: the cpu context
+ * @addr: the guest address of the alignment fault
+ * @access_type: access was read/write/execute
+ * @ra: host pc for unwinding
+ *
+ * Use the TCGCPUOps hook to record cpu state, do guest operating system
+ * specific things to raise SIGBUS, and jump to the main cpu loop.
+ */
+G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ uintptr_t ra);
+
+G_NORETURN void cpu_loop(CPUArchState *env);
+
+void target_exception_dump(CPUArchState *env, const char *fmt, int code);
+#define EXCP_DUMP(env, fmt, code) \
+ target_exception_dump(env, fmt, code)
+
+typedef struct target_pt_regs target_pt_regs;
+
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs);
+
+#endif
diff --git a/include/user/guest-host.h b/include/user/guest-host.h
new file mode 100644
index 0000000..8f7ef75
--- /dev/null
+++ b/include/user/guest-host.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * guest <-> host helpers.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ */
+
+#ifndef USER_GUEST_HOST_H
+#define USER_GUEST_HOST_H
+
+#include "exec/vaddr.h"
+#include "user/guest-base.h"
+#include "accel/tcg/cpu-ops.h"
+
+/*
+ * If non-zero, the guest virtual address space is a contiguous subset
+ * of the host virtual address space, i.e. '-R reserved_va' is in effect
+ * either from the command-line or by default. The value is the last
+ * byte of the guest address space e.g. UINT32_MAX.
+ *
+ * If zero, the host and guest virtual address spaces are intermingled.
+ */
+extern unsigned long reserved_va;
+
+/*
+ * The last byte of the guest address space.
+ * If reserved_va is non-zero, guest_addr_max matches.
+ * If reserved_va is zero, guest_addr_max equals the full guest space.
+ */
+extern unsigned long guest_addr_max;
+
+static inline vaddr cpu_untagged_addr(CPUState *cs, vaddr x)
+{
+ const TCGCPUOps *tcg_ops = cs->cc->tcg_ops;
+ if (tcg_ops->untagged_addr) {
+ return tcg_ops->untagged_addr(cs, x);
+ }
+ return x;
+}
+
+/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
+static inline void *g2h_untagged(vaddr x)
+{
+ return (void *)((uintptr_t)(x) + guest_base);
+}
+
+static inline void *g2h(CPUState *cs, vaddr x)
+{
+ return g2h_untagged(cpu_untagged_addr(cs, x));
+}
+
+static inline bool guest_addr_valid_untagged(vaddr x)
+{
+ return x <= guest_addr_max;
+}
+
+static inline bool guest_range_valid_untagged(vaddr start, vaddr len)
+{
+ return len - 1 <= guest_addr_max && start <= guest_addr_max - len + 1;
+}
+
+#define h2g_valid(x) \
+ ((uintptr_t)(x) - guest_base <= guest_addr_max)
+
+#define h2g_nocheck(x) ({ \
+ uintptr_t __ret = (uintptr_t)(x) - guest_base; \
+ (vaddr)__ret; \
+})
+
+#define h2g(x) ({ \
+ /* Check if given address fits target address space */ \
+ assert(h2g_valid(x)); \
+ h2g_nocheck(x); \
+})
+
+#endif
diff --git a/include/user/mmap.h b/include/user/mmap.h
new file mode 100644
index 0000000..4d5e9aa
--- /dev/null
+++ b/include/user/mmap.h
@@ -0,0 +1,32 @@
+/*
+ * MMAP declarations for QEMU user emulation
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef USER_MMAP_H
+#define USER_MMAP_H
+
+#include "user/abitypes.h"
+
+/*
+ * mmap_next_start: The base address for the next mmap without hint,
+ * increased after each successful map, starting at task_unmapped_base.
+ * This is an optimization within QEMU and not part of ADDR_COMPAT_LAYOUT.
+ */
+extern abi_ulong mmap_next_start;
+
+int target_mprotect(abi_ulong start, abi_ulong len, int prot);
+
+abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
+ int flags, int fd, off_t offset);
+int target_munmap(abi_ulong start, abi_ulong len);
+abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
+ abi_ulong new_size, unsigned long flags,
+ abi_ulong new_addr);
+
+abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong alignment);
+
+void TSA_NO_TSA mmap_fork_start(void);
+void TSA_NO_TSA mmap_fork_end(int child);
+
+#endif
diff --git a/include/user/page-protection.h b/include/user/page-protection.h
new file mode 100644
index 0000000..4bde664
--- /dev/null
+++ b/include/user/page-protection.h
@@ -0,0 +1,96 @@
+/*
+ * QEMU page protection declarations.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * SPDX-License-Identifier: LGPL-2.1+
+ */
+#ifndef USER_PAGE_PROTECTION_H
+#define USER_PAGE_PROTECTION_H
+
+#ifndef CONFIG_USER_ONLY
+#error Cannot include this header from system emulation
+#endif
+
+#include "exec/vaddr.h"
+#include "exec/translation-block.h"
+
+int page_unprotect(CPUState *cpu, tb_page_addr_t address, uintptr_t pc);
+
+int page_get_flags(vaddr address);
+
+/**
+ * page_set_flags:
+ * @start: first byte of range
+ * @last: last byte of range
+ * @flags: flags to set
+ * Context: holding mmap lock
+ *
+ * Modify the flags of a page and invalidate the code if necessary.
+ * The flag PAGE_WRITE_ORG is positioned automatically depending
+ * on PAGE_WRITE. The mmap_lock should already be held.
+ */
+void page_set_flags(vaddr start, vaddr last, int flags);
+
+void page_reset_target_data(vaddr start, vaddr last);
+
+/**
+ * page_check_range
+ * @start: first byte of range
+ * @len: length of range
+ * @flags: flags required for each page
+ *
+ * Return true if every page in [@start, @start+@len) has @flags set.
+ * Return false if any page is unmapped. Thus testing flags == 0 is
+ * equivalent to testing for flags == PAGE_VALID.
+ */
+bool page_check_range(vaddr start, vaddr last, int flags);
+
+/**
+ * page_check_range_empty:
+ * @start: first byte of range
+ * @last: last byte of range
+ * Context: holding mmap lock
+ *
+ * Return true if the entire range [@start, @last] is unmapped.
+ * The memory lock must be held so that the caller will can ensure
+ * the result stays true until a new mapping can be installed.
+ */
+bool page_check_range_empty(vaddr start, vaddr last);
+
+/**
+ * page_find_range_empty
+ * @min: first byte of search range
+ * @max: last byte of search range
+ * @len: size of the hole required
+ * @align: alignment of the hole required (power of 2)
+ *
+ * If there is a range [x, x+@len) within [@min, @max] such that
+ * x % @align == 0, then return x. Otherwise return -1.
+ * The memory lock must be held, as the caller will want to ensure
+ * the returned range stays empty until a new mapping can be installed.
+ */
+vaddr page_find_range_empty(vaddr min, vaddr max, vaddr len, vaddr align);
+
+/**
+ * page_get_target_data
+ * @address: guest virtual address
+ * @size: per-page size
+ *
+ * Return @size bytes of out-of-band data to associate
+ * with the guest page at @address, allocating it if necessary. The
+ * caller should already have verified that the address is valid.
+ * The value of @size must be the same for every call.
+ *
+ * The memory will be freed when the guest page is deallocated,
+ * e.g. with the munmap system call.
+ */
+__attribute__((returns_nonnull))
+void *page_get_target_data(vaddr address, size_t size);
+
+typedef int (*walk_memory_regions_fn)(void *, vaddr, vaddr, int);
+int walk_memory_regions(void *, walk_memory_regions_fn);
+
+void page_dump(FILE *f);
+
+#endif
diff --git a/include/user/signal.h b/include/user/signal.h
new file mode 100644
index 0000000..7fa33b0
--- /dev/null
+++ b/include/user/signal.h
@@ -0,0 +1,25 @@
+/*
+ * Signal-related declarations.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef USER_SIGNAL_H
+#define USER_SIGNAL_H
+
+#ifndef CONFIG_USER_ONLY
+#error Cannot include this header from system emulation
+#endif
+
+/**
+ * target_to_host_signal:
+ * @sig: target signal.
+ *
+ * On success, return the host signal between 0 (inclusive) and NSIG
+ * (exclusive) corresponding to the target signal @sig. Return any other value
+ * on failure.
+ */
+int target_to_host_signal(int sig);
+
+extern int host_interrupt_signal;
+
+#endif
diff --git a/io/channel-buffer.c b/io/channel-buffer.c
index 8096180..189fa67 100644
--- a/io/channel-buffer.c
+++ b/io/channel-buffer.c
@@ -225,7 +225,7 @@ static GSource *qio_channel_buffer_create_watch(QIOChannel *ioc,
static void qio_channel_buffer_class_init(ObjectClass *klass,
- void *class_data G_GNUC_UNUSED)
+ const void *class_data G_GNUC_UNUSED)
{
QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass);
diff --git a/io/channel-command.c b/io/channel-command.c
index 6d5f64e..8966dd3 100644
--- a/io/channel-command.c
+++ b/io/channel-command.c
@@ -358,7 +358,7 @@ static GSource *qio_channel_command_create_watch(QIOChannel *ioc,
static void qio_channel_command_class_init(ObjectClass *klass,
- void *class_data G_GNUC_UNUSED)
+ const void *class_data G_GNUC_UNUSED)
{
QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass);
diff --git a/io/channel-file.c b/io/channel-file.c
index 2ea8d08..ca3f180 100644
--- a/io/channel-file.c
+++ b/io/channel-file.c
@@ -290,7 +290,7 @@ static GSource *qio_channel_file_create_watch(QIOChannel *ioc,
}
static void qio_channel_file_class_init(ObjectClass *klass,
- void *class_data G_GNUC_UNUSED)
+ const void *class_data G_GNUC_UNUSED)
{
QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass);
diff --git a/io/channel-null.c b/io/channel-null.c
index ef99586..49f1c80 100644
--- a/io/channel-null.c
+++ b/io/channel-null.c
@@ -207,7 +207,7 @@ qio_channel_null_create_watch(QIOChannel *ioc,
static void
qio_channel_null_class_init(ObjectClass *klass,
- void *class_data G_GNUC_UNUSED)
+ const void *class_data G_GNUC_UNUSED)
{
QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass);
diff --git a/io/channel-socket.c b/io/channel-socket.c
index 3a899b0..3b7ca92 100644
--- a/io/channel-socket.c
+++ b/io/channel-socket.c
@@ -78,6 +78,17 @@ qio_channel_socket_new(void)
return sioc;
}
+int qio_channel_socket_set_send_buffer(QIOChannelSocket *ioc,
+ size_t size,
+ Error **errp)
+{
+ if (setsockopt(ioc->fd, SOL_SOCKET, SO_SNDBUF, &size, sizeof(size)) < 0) {
+ error_setg_errno(errp, errno, "Unable to set socket send buffer size");
+ return -1;
+ }
+
+ return 0;
+}
static int
qio_channel_socket_set_fd(QIOChannelSocket *sioc,
@@ -841,6 +852,33 @@ qio_channel_socket_set_cork(QIOChannel *ioc,
socket_set_cork(sioc->fd, v);
}
+static int
+qio_channel_socket_get_peerpid(QIOChannel *ioc,
+ unsigned int *pid,
+ Error **errp)
+{
+#ifdef CONFIG_LINUX
+ QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
+ Error *err = NULL;
+ socklen_t len = sizeof(struct ucred);
+
+ struct ucred cred;
+ if (getsockopt(sioc->fd,
+ SOL_SOCKET, SO_PEERCRED,
+ &cred, &len) == -1) {
+ error_setg_errno(&err, errno, "Unable to get peer credentials");
+ error_propagate(errp, err);
+ *pid = -1;
+ return -1;
+ }
+ *pid = (unsigned int)cred.pid;
+ return 0;
+#else
+ error_setg(errp, "Unsupported feature");
+ *pid = -1;
+ return -1;
+#endif
+}
static int
qio_channel_socket_close(QIOChannel *ioc,
@@ -922,7 +960,7 @@ static GSource *qio_channel_socket_create_watch(QIOChannel *ioc,
}
static void qio_channel_socket_class_init(ObjectClass *klass,
- void *class_data G_GNUC_UNUSED)
+ const void *class_data G_GNUC_UNUSED)
{
QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass);
@@ -938,6 +976,7 @@ static void qio_channel_socket_class_init(ObjectClass *klass,
#ifdef QEMU_MSG_ZEROCOPY
ioc_klass->io_flush = qio_channel_socket_flush;
#endif
+ ioc_klass->io_peerpid = qio_channel_socket_get_peerpid;
}
static const TypeInfo qio_channel_socket_info = {
diff --git a/io/channel-tls.c b/io/channel-tls.c
index 67b9700..db2ac1d 100644
--- a/io/channel-tls.c
+++ b/io/channel-tls.c
@@ -28,17 +28,16 @@
static ssize_t qio_channel_tls_write_handler(const char *buf,
size_t len,
- void *opaque)
+ void *opaque,
+ Error **errp)
{
QIOChannelTLS *tioc = QIO_CHANNEL_TLS(opaque);
ssize_t ret;
- ret = qio_channel_write(tioc->master, buf, len, NULL);
+ ret = qio_channel_write(tioc->master, buf, len, errp);
if (ret == QIO_CHANNEL_ERR_BLOCK) {
- errno = EAGAIN;
- return -1;
+ return QCRYPTO_TLS_SESSION_ERR_BLOCK;
} else if (ret < 0) {
- errno = EIO;
return -1;
}
return ret;
@@ -46,17 +45,16 @@ static ssize_t qio_channel_tls_write_handler(const char *buf,
static ssize_t qio_channel_tls_read_handler(char *buf,
size_t len,
- void *opaque)
+ void *opaque,
+ Error **errp)
{
QIOChannelTLS *tioc = QIO_CHANNEL_TLS(opaque);
ssize_t ret;
- ret = qio_channel_read(tioc->master, buf, len, NULL);
+ ret = qio_channel_read(tioc->master, buf, len, errp);
if (ret == QIO_CHANNEL_ERR_BLOCK) {
- errno = EAGAIN;
- return -1;
+ return QCRYPTO_TLS_SESSION_ERR_BLOCK;
} else if (ret < 0) {
- errno = EIO;
return -1;
}
return ret;
@@ -164,16 +162,17 @@ static void qio_channel_tls_handshake_task(QIOChannelTLS *ioc,
GMainContext *context)
{
Error *err = NULL;
- QCryptoTLSSessionHandshakeStatus status;
+ int status;
- if (qcrypto_tls_session_handshake(ioc->session, &err) < 0) {
+ status = qcrypto_tls_session_handshake(ioc->session, &err);
+
+ if (status < 0) {
trace_qio_channel_tls_handshake_fail(ioc);
qio_task_set_error(task, err);
qio_task_complete(task);
return;
}
- status = qcrypto_tls_session_get_handshake_status(ioc->session);
if (status == QCRYPTO_TLS_HANDSHAKE_COMPLETE) {
trace_qio_channel_tls_handshake_complete(ioc);
if (qcrypto_tls_session_check_credentials(ioc->session,
@@ -249,6 +248,85 @@ void qio_channel_tls_handshake(QIOChannelTLS *ioc,
qio_channel_tls_handshake_task(ioc, task, context);
}
+static gboolean qio_channel_tls_bye_io(QIOChannel *ioc, GIOCondition condition,
+ gpointer user_data);
+
+static void qio_channel_tls_bye_task(QIOChannelTLS *ioc, QIOTask *task,
+ GMainContext *context)
+{
+ GIOCondition condition;
+ QIOChannelTLSData *data;
+ int status;
+ Error *err = NULL;
+
+ status = qcrypto_tls_session_bye(ioc->session, &err);
+
+ if (status < 0) {
+ trace_qio_channel_tls_bye_fail(ioc);
+ qio_task_set_error(task, err);
+ qio_task_complete(task);
+ return;
+ }
+
+ if (status == QCRYPTO_TLS_BYE_COMPLETE) {
+ qio_task_complete(task);
+ return;
+ }
+
+ data = g_new0(typeof(*data), 1);
+ data->task = task;
+ data->context = context;
+
+ if (context) {
+ g_main_context_ref(context);
+ }
+
+ if (status == QCRYPTO_TLS_BYE_SENDING) {
+ condition = G_IO_OUT;
+ } else {
+ condition = G_IO_IN;
+ }
+
+ trace_qio_channel_tls_bye_pending(ioc, status);
+ ioc->bye_ioc_tag = qio_channel_add_watch_full(ioc->master, condition,
+ qio_channel_tls_bye_io,
+ data, NULL, context);
+}
+
+
+static gboolean qio_channel_tls_bye_io(QIOChannel *ioc, GIOCondition condition,
+ gpointer user_data)
+{
+ QIOChannelTLSData *data = user_data;
+ QIOTask *task = data->task;
+ GMainContext *context = data->context;
+ QIOChannelTLS *tioc = QIO_CHANNEL_TLS(qio_task_get_source(task));
+
+ tioc->bye_ioc_tag = 0;
+ g_free(data);
+ qio_channel_tls_bye_task(tioc, task, context);
+
+ if (context) {
+ g_main_context_unref(context);
+ }
+
+ return FALSE;
+}
+
+static void propagate_error(QIOTask *task, gpointer opaque)
+{
+ qio_task_propagate_error(task, opaque);
+}
+
+void qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp)
+{
+ QIOTask *task;
+
+ task = qio_task_new(OBJECT(ioc), propagate_error, errp, NULL);
+
+ trace_qio_channel_tls_bye_start(ioc);
+ qio_channel_tls_bye_task(ioc, task, NULL);
+}
static void qio_channel_tls_init(Object *obj G_GNUC_UNUSED)
{
@@ -277,24 +355,20 @@ static ssize_t qio_channel_tls_readv(QIOChannel *ioc,
ssize_t got = 0;
for (i = 0 ; i < niov ; i++) {
- ssize_t ret = qcrypto_tls_session_read(tioc->session,
- iov[i].iov_base,
- iov[i].iov_len);
- if (ret < 0) {
- if (errno == EAGAIN) {
- if (got) {
- return got;
- } else {
- return QIO_CHANNEL_ERR_BLOCK;
- }
- } else if (errno == ECONNABORTED &&
- (qatomic_load_acquire(&tioc->shutdown) &
- QIO_CHANNEL_SHUTDOWN_READ)) {
- return 0;
+ ssize_t ret = qcrypto_tls_session_read(
+ tioc->session,
+ iov[i].iov_base,
+ iov[i].iov_len,
+ flags & QIO_CHANNEL_READ_FLAG_RELAXED_EOF ||
+ qatomic_load_acquire(&tioc->shutdown) & QIO_CHANNEL_SHUTDOWN_READ,
+ errp);
+ if (ret == QCRYPTO_TLS_SESSION_ERR_BLOCK) {
+ if (got) {
+ return got;
+ } else {
+ return QIO_CHANNEL_ERR_BLOCK;
}
-
- error_setg_errno(errp, errno,
- "Cannot read from TLS channel");
+ } else if (ret < 0) {
return -1;
}
got += ret;
@@ -321,18 +395,15 @@ static ssize_t qio_channel_tls_writev(QIOChannel *ioc,
for (i = 0 ; i < niov ; i++) {
ssize_t ret = qcrypto_tls_session_write(tioc->session,
iov[i].iov_base,
- iov[i].iov_len);
- if (ret <= 0) {
- if (errno == EAGAIN) {
- if (done) {
- return done;
- } else {
- return QIO_CHANNEL_ERR_BLOCK;
- }
+ iov[i].iov_len,
+ errp);
+ if (ret == QCRYPTO_TLS_SESSION_ERR_BLOCK) {
+ if (done) {
+ return done;
+ } else {
+ return QIO_CHANNEL_ERR_BLOCK;
}
-
- error_setg_errno(errp, errno,
- "Cannot write to TLS channel");
+ } else if (ret < 0) {
return -1;
}
done += ret;
@@ -389,6 +460,11 @@ static int qio_channel_tls_close(QIOChannel *ioc,
g_clear_handle_id(&tioc->hs_ioc_tag, g_source_remove);
}
+ if (tioc->bye_ioc_tag) {
+ trace_qio_channel_tls_bye_cancel(ioc);
+ g_clear_handle_id(&tioc->bye_ioc_tag, g_source_remove);
+ }
+
return qio_channel_close(tioc->master, errp);
}
@@ -485,7 +561,7 @@ qio_channel_tls_get_session(QIOChannelTLS *ioc)
}
static void qio_channel_tls_class_init(ObjectClass *klass,
- void *class_data G_GNUC_UNUSED)
+ const void *class_data G_GNUC_UNUSED)
{
QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass);
diff --git a/io/channel-websock.c b/io/channel-websock.c
index de39f0d..08ddb27 100644
--- a/io/channel-websock.c
+++ b/io/channel-websock.c
@@ -351,7 +351,7 @@ static void qio_channel_websock_handshake_send_res_ok(QIOChannelWebsock *ioc,
QIO_CHANNEL_WEBSOCK_GUID_LEN + 1);
/* hash and encode it */
- if (qcrypto_hash_base64(QCRYPTO_HASH_ALG_SHA1,
+ if (qcrypto_hash_base64(QCRYPTO_HASH_ALGO_SHA1,
combined_key,
QIO_CHANNEL_WEBSOCK_CLIENT_KEY_LEN +
QIO_CHANNEL_WEBSOCK_GUID_LEN,
@@ -1308,7 +1308,7 @@ static GSource *qio_channel_websock_create_watch(QIOChannel *ioc,
}
static void qio_channel_websock_class_init(ObjectClass *klass,
- void *class_data G_GNUC_UNUSED)
+ const void *class_data G_GNUC_UNUSED)
{
QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass);
diff --git a/io/channel.c b/io/channel.c
index a1f12f8..ebd9322 100644
--- a/io/channel.c
+++ b/io/channel.c
@@ -115,7 +115,8 @@ int coroutine_mixed_fn qio_channel_readv_all_eof(QIOChannel *ioc,
size_t niov,
Error **errp)
{
- return qio_channel_readv_full_all_eof(ioc, iov, niov, NULL, NULL, errp);
+ return qio_channel_readv_full_all_eof(ioc, iov, niov, NULL, NULL, 0,
+ errp);
}
int coroutine_mixed_fn qio_channel_readv_all(QIOChannel *ioc,
@@ -130,6 +131,7 @@ int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int **fds, size_t *nfds,
+ int flags,
Error **errp)
{
int ret = -1;
@@ -155,7 +157,7 @@ int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc,
while ((nlocal_iov > 0) || local_fds) {
ssize_t len;
len = qio_channel_readv_full(ioc, local_iov, nlocal_iov, local_fds,
- local_nfds, 0, errp);
+ local_nfds, flags, errp);
if (len == QIO_CHANNEL_ERR_BLOCK) {
if (qemu_in_coroutine()) {
qio_channel_yield(ioc, G_IO_IN);
@@ -222,7 +224,8 @@ int coroutine_mixed_fn qio_channel_readv_full_all(QIOChannel *ioc,
int **fds, size_t *nfds,
Error **errp)
{
- int ret = qio_channel_readv_full_all_eof(ioc, iov, niov, fds, nfds, errp);
+ int ret = qio_channel_readv_full_all_eof(ioc, iov, niov, fds, nfds, 0,
+ errp);
if (ret == 0) {
error_setg(errp, "Unexpected end-of-file before all data were read");
@@ -548,6 +551,19 @@ void qio_channel_set_cork(QIOChannel *ioc,
}
}
+int qio_channel_get_peerpid(QIOChannel *ioc,
+ unsigned int *pid,
+ Error **errp)
+{
+ QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
+
+ if (!klass->io_peerpid) {
+ error_setg(errp, "Channel does not support peer pid");
+ return -1;
+ }
+ klass->io_peerpid(ioc, pid, errp);
+ return 0;
+}
off_t qio_channel_io_seek(QIOChannel *ioc,
off_t offset,
diff --git a/io/dns-resolver.c b/io/dns-resolver.c
index 53b0e84..3712438 100644
--- a/io/dns-resolver.c
+++ b/io/dns-resolver.c
@@ -111,22 +111,11 @@ static int qio_dns_resolver_lookup_sync_inet(QIODNSResolver *resolver,
uaddr, INET6_ADDRSTRLEN, uport, 32,
NI_NUMERICHOST | NI_NUMERICSERV);
- newaddr->u.inet = (InetSocketAddress){
- .host = g_strdup(uaddr),
- .port = g_strdup(uport),
- .has_numeric = true,
- .numeric = true,
- .has_to = iaddr->has_to,
- .to = iaddr->to,
- .has_ipv4 = iaddr->has_ipv4,
- .ipv4 = iaddr->ipv4,
- .has_ipv6 = iaddr->has_ipv6,
- .ipv6 = iaddr->ipv6,
-#ifdef HAVE_IPPROTO_MPTCP
- .has_mptcp = iaddr->has_mptcp,
- .mptcp = iaddr->mptcp,
-#endif
- };
+ newaddr->u.inet = *iaddr;
+ newaddr->u.inet.host = g_strdup(uaddr),
+ newaddr->u.inet.port = g_strdup(uport),
+ newaddr->u.inet.has_numeric = true,
+ newaddr->u.inet.numeric = true,
(*addrs)[i] = newaddr;
}
diff --git a/io/trace-events b/io/trace-events
index d4c0f84..dc3a63b 100644
--- a/io/trace-events
+++ b/io/trace-events
@@ -44,6 +44,11 @@ qio_channel_tls_handshake_pending(void *ioc, int status) "TLS handshake pending
qio_channel_tls_handshake_fail(void *ioc) "TLS handshake fail ioc=%p"
qio_channel_tls_handshake_complete(void *ioc) "TLS handshake complete ioc=%p"
qio_channel_tls_handshake_cancel(void *ioc) "TLS handshake cancel ioc=%p"
+qio_channel_tls_bye_start(void *ioc) "TLS termination start ioc=%p"
+qio_channel_tls_bye_pending(void *ioc, int status) "TLS termination pending ioc=%p status=%d"
+qio_channel_tls_bye_fail(void *ioc) "TLS termination fail ioc=%p"
+qio_channel_tls_bye_complete(void *ioc) "TLS termination complete ioc=%p"
+qio_channel_tls_bye_cancel(void *ioc) "TLS termination cancel ioc=%p"
qio_channel_tls_credentials_allow(void *ioc) "TLS credentials allow ioc=%p"
qio_channel_tls_credentials_deny(void *ioc) "TLS credentials deny ioc=%p"
diff --git a/iothread.c b/iothread.c
index e1e9e04..8810376 100644
--- a/iothread.c
+++ b/iothread.c
@@ -17,8 +17,8 @@
#include "qemu/module.h"
#include "block/aio.h"
#include "block/block.h"
-#include "sysemu/event-loop-base.h"
-#include "sysemu/iothread.h"
+#include "system/event-loop-base.h"
+#include "system/iothread.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-misc.h"
#include "qemu/error-report.h"
@@ -292,7 +292,7 @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
}
}
-static void iothread_class_init(ObjectClass *klass, void *class_data)
+static void iothread_class_init(ObjectClass *klass, const void *class_data)
{
EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(klass);
diff --git a/job.c b/job.c
index 660ce22..0653bc2 100644
--- a/job.c
+++ b/job.c
@@ -251,6 +251,12 @@ bool job_is_cancelled_locked(Job *job)
return job->force_cancel;
}
+bool job_is_paused(Job *job)
+{
+ JOB_LOCK_GUARD();
+ return job->paused;
+}
+
bool job_is_cancelled(Job *job)
{
JOB_LOCK_GUARD();
diff --git a/libdecnumber/decContext.c b/libdecnumber/decContext.c
index 1956edf..d99b080 100644
--- a/libdecnumber/decContext.c
+++ b/libdecnumber/decContext.c
@@ -24,9 +24,8 @@
for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING. If not, see
+ <https://www.gnu.org/licenses/>. */
/* ------------------------------------------------------------------ */
/* Decimal Context module */
diff --git a/libdecnumber/decNumber.c b/libdecnumber/decNumber.c
index 31282ad..4b57d8a 100644
--- a/libdecnumber/decNumber.c
+++ b/libdecnumber/decNumber.c
@@ -24,9 +24,8 @@
for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING. If not, see
+ <https://www.gnu.org/licenses/>. */
/* ------------------------------------------------------------------ */
/* Decimal Number arithmetic module */
diff --git a/libdecnumber/dpd/decimal128.c b/libdecnumber/dpd/decimal128.c
index ca4764e..1064fb2 100644
--- a/libdecnumber/dpd/decimal128.c
+++ b/libdecnumber/dpd/decimal128.c
@@ -24,9 +24,8 @@
for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING. If not, see
+ <https://www.gnu.org/licenses/>. */
/* ------------------------------------------------------------------ */
/* Decimal 128-bit format module */
diff --git a/libdecnumber/dpd/decimal32.c b/libdecnumber/dpd/decimal32.c
index 53f2978..34ff0fe 100644
--- a/libdecnumber/dpd/decimal32.c
+++ b/libdecnumber/dpd/decimal32.c
@@ -24,9 +24,8 @@
for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING. If not, see
+ <https://www.gnu.org/licenses/>. */
/* ------------------------------------------------------------------ */
/* Decimal 32-bit format module */
diff --git a/libdecnumber/dpd/decimal64.c b/libdecnumber/dpd/decimal64.c
index 290dbe8..11e0674 100644
--- a/libdecnumber/dpd/decimal64.c
+++ b/libdecnumber/dpd/decimal64.c
@@ -24,9 +24,8 @@
for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING. If not, see
+ <https://www.gnu.org/licenses/>. */
/* ------------------------------------------------------------------ */
/* Decimal 64-bit format module */
diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h
index 2af9931..f4d9baa 100644
--- a/linux-headers/asm-arm64/kvm.h
+++ b/linux-headers/asm-arm64/kvm.h
@@ -43,9 +43,6 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
-#define KVM_REG_SIZE(id) \
- (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
-
struct kvm_regs {
struct user_pt_regs regs; /* sp = sp_el0 */
@@ -108,6 +105,7 @@ struct kvm_regs {
#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */
#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */
#define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */
+#define KVM_ARM_VCPU_HAS_EL2_E2H0 8 /* Limit NV support to E2H RES0 */
struct kvm_vcpu_init {
__u32 target;
@@ -368,6 +366,7 @@ enum {
KVM_REG_ARM_STD_HYP_BIT_PV_TIME = 0,
};
+/* Vendor hyper call function numbers 0-63 */
#define KVM_REG_ARM_VENDOR_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(2)
enum {
@@ -375,6 +374,14 @@ enum {
KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1,
};
+/* Vendor hyper call function numbers 64-127 */
+#define KVM_REG_ARM_VENDOR_HYP_BMAP_2 KVM_REG_ARM_FW_FEAT_BMAP_REG(3)
+
+enum {
+ KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_VER = 0,
+ KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_CPUS = 1,
+};
+
/* Device Control API on vm fd */
#define KVM_ARM_VM_SMCCC_CTRL 0
#define KVM_ARM_VM_SMCCC_FILTER 0
@@ -397,6 +404,7 @@ enum {
#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
+#define KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ 9
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
(0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
@@ -411,10 +419,11 @@ enum {
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
-#define KVM_ARM_VCPU_PMU_V3_IRQ 0
-#define KVM_ARM_VCPU_PMU_V3_INIT 1
-#define KVM_ARM_VCPU_PMU_V3_FILTER 2
-#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3
+#define KVM_ARM_VCPU_PMU_V3_IRQ 0
+#define KVM_ARM_VCPU_PMU_V3_INIT 1
+#define KVM_ARM_VCPU_PMU_V3_FILTER 2
+#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3
+#define KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS 4
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
@@ -473,6 +482,12 @@ enum {
*/
#define KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 (1ULL << 0)
+/*
+ * Shutdown caused by a PSCI v1.3 SYSTEM_OFF2 call.
+ * Valid only when the system event has a type of KVM_SYSTEM_EVENT_SHUTDOWN.
+ */
+#define KVM_SYSTEM_EVENT_SHUTDOWN_FLAG_PSCI_OFF2 (1ULL << 0)
+
/* run->fail_entry.hardware_entry_failure_reason codes. */
#define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED (1ULL << 0)
diff --git a/linux-headers/asm-arm64/mman.h b/linux-headers/asm-arm64/mman.h
index d0dbfe9..7b500a3 100644
--- a/linux-headers/asm-arm64/mman.h
+++ b/linux-headers/asm-arm64/mman.h
@@ -7,4 +7,13 @@
#define PROT_BTI 0x10 /* BTI guarded page */
#define PROT_MTE 0x20 /* Normal Tagged mapping */
+/* Override any generic PKEY permission defines */
+#define PKEY_DISABLE_EXECUTE 0x4
+#define PKEY_DISABLE_READ 0x8
+#undef PKEY_ACCESS_MASK
+#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
+ PKEY_DISABLE_WRITE |\
+ PKEY_DISABLE_READ |\
+ PKEY_DISABLE_EXECUTE)
+
#endif /* ! _UAPI__ASM_MMAN_H */
diff --git a/linux-headers/asm-arm64/unistd.h b/linux-headers/asm-arm64/unistd.h
index ce2ee8f..df36f23 100644
--- a/linux-headers/asm-arm64/unistd.h
+++ b/linux-headers/asm-arm64/unistd.h
@@ -1,25 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define __ARCH_WANT_RENAMEAT
-#define __ARCH_WANT_NEW_STAT
-#define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_TIME32_SYSCALLS
-#define __ARCH_WANT_SYS_CLONE3
-#define __ARCH_WANT_MEMFD_SECRET
-
-#include <asm-generic/unistd.h>
+#include <asm/unistd_64.h>
diff --git a/linux-headers/asm-arm64/unistd_64.h b/linux-headers/asm-arm64/unistd_64.h
new file mode 100644
index 0000000..ee9aaeb
--- /dev/null
+++ b/linux-headers/asm-arm64/unistd_64.h
@@ -0,0 +1,329 @@
+#ifndef _ASM_UNISTD_64_H
+#define _ASM_UNISTD_64_H
+
+#define __NR_io_setup 0
+#define __NR_io_destroy 1
+#define __NR_io_submit 2
+#define __NR_io_cancel 3
+#define __NR_io_getevents 4
+#define __NR_setxattr 5
+#define __NR_lsetxattr 6
+#define __NR_fsetxattr 7
+#define __NR_getxattr 8
+#define __NR_lgetxattr 9
+#define __NR_fgetxattr 10
+#define __NR_listxattr 11
+#define __NR_llistxattr 12
+#define __NR_flistxattr 13
+#define __NR_removexattr 14
+#define __NR_lremovexattr 15
+#define __NR_fremovexattr 16
+#define __NR_getcwd 17
+#define __NR_lookup_dcookie 18
+#define __NR_eventfd2 19
+#define __NR_epoll_create1 20
+#define __NR_epoll_ctl 21
+#define __NR_epoll_pwait 22
+#define __NR_dup 23
+#define __NR_dup3 24
+#define __NR_fcntl 25
+#define __NR_inotify_init1 26
+#define __NR_inotify_add_watch 27
+#define __NR_inotify_rm_watch 28
+#define __NR_ioctl 29
+#define __NR_ioprio_set 30
+#define __NR_ioprio_get 31
+#define __NR_flock 32
+#define __NR_mknodat 33
+#define __NR_mkdirat 34
+#define __NR_unlinkat 35
+#define __NR_symlinkat 36
+#define __NR_linkat 37
+#define __NR_renameat 38
+#define __NR_umount2 39
+#define __NR_mount 40
+#define __NR_pivot_root 41
+#define __NR_nfsservctl 42
+#define __NR_statfs 43
+#define __NR_fstatfs 44
+#define __NR_truncate 45
+#define __NR_ftruncate 46
+#define __NR_fallocate 47
+#define __NR_faccessat 48
+#define __NR_chdir 49
+#define __NR_fchdir 50
+#define __NR_chroot 51
+#define __NR_fchmod 52
+#define __NR_fchmodat 53
+#define __NR_fchownat 54
+#define __NR_fchown 55
+#define __NR_openat 56
+#define __NR_close 57
+#define __NR_vhangup 58
+#define __NR_pipe2 59
+#define __NR_quotactl 60
+#define __NR_getdents64 61
+#define __NR_lseek 62
+#define __NR_read 63
+#define __NR_write 64
+#define __NR_readv 65
+#define __NR_writev 66
+#define __NR_pread64 67
+#define __NR_pwrite64 68
+#define __NR_preadv 69
+#define __NR_pwritev 70
+#define __NR_sendfile 71
+#define __NR_pselect6 72
+#define __NR_ppoll 73
+#define __NR_signalfd4 74
+#define __NR_vmsplice 75
+#define __NR_splice 76
+#define __NR_tee 77
+#define __NR_readlinkat 78
+#define __NR_newfstatat 79
+#define __NR_fstat 80
+#define __NR_sync 81
+#define __NR_fsync 82
+#define __NR_fdatasync 83
+#define __NR_sync_file_range 84
+#define __NR_timerfd_create 85
+#define __NR_timerfd_settime 86
+#define __NR_timerfd_gettime 87
+#define __NR_utimensat 88
+#define __NR_acct 89
+#define __NR_capget 90
+#define __NR_capset 91
+#define __NR_personality 92
+#define __NR_exit 93
+#define __NR_exit_group 94
+#define __NR_waitid 95
+#define __NR_set_tid_address 96
+#define __NR_unshare 97
+#define __NR_futex 98
+#define __NR_set_robust_list 99
+#define __NR_get_robust_list 100
+#define __NR_nanosleep 101
+#define __NR_getitimer 102
+#define __NR_setitimer 103
+#define __NR_kexec_load 104
+#define __NR_init_module 105
+#define __NR_delete_module 106
+#define __NR_timer_create 107
+#define __NR_timer_gettime 108
+#define __NR_timer_getoverrun 109
+#define __NR_timer_settime 110
+#define __NR_timer_delete 111
+#define __NR_clock_settime 112
+#define __NR_clock_gettime 113
+#define __NR_clock_getres 114
+#define __NR_clock_nanosleep 115
+#define __NR_syslog 116
+#define __NR_ptrace 117
+#define __NR_sched_setparam 118
+#define __NR_sched_setscheduler 119
+#define __NR_sched_getscheduler 120
+#define __NR_sched_getparam 121
+#define __NR_sched_setaffinity 122
+#define __NR_sched_getaffinity 123
+#define __NR_sched_yield 124
+#define __NR_sched_get_priority_max 125
+#define __NR_sched_get_priority_min 126
+#define __NR_sched_rr_get_interval 127
+#define __NR_restart_syscall 128
+#define __NR_kill 129
+#define __NR_tkill 130
+#define __NR_tgkill 131
+#define __NR_sigaltstack 132
+#define __NR_rt_sigsuspend 133
+#define __NR_rt_sigaction 134
+#define __NR_rt_sigprocmask 135
+#define __NR_rt_sigpending 136
+#define __NR_rt_sigtimedwait 137
+#define __NR_rt_sigqueueinfo 138
+#define __NR_rt_sigreturn 139
+#define __NR_setpriority 140
+#define __NR_getpriority 141
+#define __NR_reboot 142
+#define __NR_setregid 143
+#define __NR_setgid 144
+#define __NR_setreuid 145
+#define __NR_setuid 146
+#define __NR_setresuid 147
+#define __NR_getresuid 148
+#define __NR_setresgid 149
+#define __NR_getresgid 150
+#define __NR_setfsuid 151
+#define __NR_setfsgid 152
+#define __NR_times 153
+#define __NR_setpgid 154
+#define __NR_getpgid 155
+#define __NR_getsid 156
+#define __NR_setsid 157
+#define __NR_getgroups 158
+#define __NR_setgroups 159
+#define __NR_uname 160
+#define __NR_sethostname 161
+#define __NR_setdomainname 162
+#define __NR_getrlimit 163
+#define __NR_setrlimit 164
+#define __NR_getrusage 165
+#define __NR_umask 166
+#define __NR_prctl 167
+#define __NR_getcpu 168
+#define __NR_gettimeofday 169
+#define __NR_settimeofday 170
+#define __NR_adjtimex 171
+#define __NR_getpid 172
+#define __NR_getppid 173
+#define __NR_getuid 174
+#define __NR_geteuid 175
+#define __NR_getgid 176
+#define __NR_getegid 177
+#define __NR_gettid 178
+#define __NR_sysinfo 179
+#define __NR_mq_open 180
+#define __NR_mq_unlink 181
+#define __NR_mq_timedsend 182
+#define __NR_mq_timedreceive 183
+#define __NR_mq_notify 184
+#define __NR_mq_getsetattr 185
+#define __NR_msgget 186
+#define __NR_msgctl 187
+#define __NR_msgrcv 188
+#define __NR_msgsnd 189
+#define __NR_semget 190
+#define __NR_semctl 191
+#define __NR_semtimedop 192
+#define __NR_semop 193
+#define __NR_shmget 194
+#define __NR_shmctl 195
+#define __NR_shmat 196
+#define __NR_shmdt 197
+#define __NR_socket 198
+#define __NR_socketpair 199
+#define __NR_bind 200
+#define __NR_listen 201
+#define __NR_accept 202
+#define __NR_connect 203
+#define __NR_getsockname 204
+#define __NR_getpeername 205
+#define __NR_sendto 206
+#define __NR_recvfrom 207
+#define __NR_setsockopt 208
+#define __NR_getsockopt 209
+#define __NR_shutdown 210
+#define __NR_sendmsg 211
+#define __NR_recvmsg 212
+#define __NR_readahead 213
+#define __NR_brk 214
+#define __NR_munmap 215
+#define __NR_mremap 216
+#define __NR_add_key 217
+#define __NR_request_key 218
+#define __NR_keyctl 219
+#define __NR_clone 220
+#define __NR_execve 221
+#define __NR_mmap 222
+#define __NR_fadvise64 223
+#define __NR_swapon 224
+#define __NR_swapoff 225
+#define __NR_mprotect 226
+#define __NR_msync 227
+#define __NR_mlock 228
+#define __NR_munlock 229
+#define __NR_mlockall 230
+#define __NR_munlockall 231
+#define __NR_mincore 232
+#define __NR_madvise 233
+#define __NR_remap_file_pages 234
+#define __NR_mbind 235
+#define __NR_get_mempolicy 236
+#define __NR_set_mempolicy 237
+#define __NR_migrate_pages 238
+#define __NR_move_pages 239
+#define __NR_rt_tgsigqueueinfo 240
+#define __NR_perf_event_open 241
+#define __NR_accept4 242
+#define __NR_recvmmsg 243
+#define __NR_wait4 260
+#define __NR_prlimit64 261
+#define __NR_fanotify_init 262
+#define __NR_fanotify_mark 263
+#define __NR_name_to_handle_at 264
+#define __NR_open_by_handle_at 265
+#define __NR_clock_adjtime 266
+#define __NR_syncfs 267
+#define __NR_setns 268
+#define __NR_sendmmsg 269
+#define __NR_process_vm_readv 270
+#define __NR_process_vm_writev 271
+#define __NR_kcmp 272
+#define __NR_finit_module 273
+#define __NR_sched_setattr 274
+#define __NR_sched_getattr 275
+#define __NR_renameat2 276
+#define __NR_seccomp 277
+#define __NR_getrandom 278
+#define __NR_memfd_create 279
+#define __NR_bpf 280
+#define __NR_execveat 281
+#define __NR_userfaultfd 282
+#define __NR_membarrier 283
+#define __NR_mlock2 284
+#define __NR_copy_file_range 285
+#define __NR_preadv2 286
+#define __NR_pwritev2 287
+#define __NR_pkey_mprotect 288
+#define __NR_pkey_alloc 289
+#define __NR_pkey_free 290
+#define __NR_statx 291
+#define __NR_io_pgetevents 292
+#define __NR_rseq 293
+#define __NR_kexec_file_load 294
+#define __NR_pidfd_send_signal 424
+#define __NR_io_uring_setup 425
+#define __NR_io_uring_enter 426
+#define __NR_io_uring_register 427
+#define __NR_open_tree 428
+#define __NR_move_mount 429
+#define __NR_fsopen 430
+#define __NR_fsconfig 431
+#define __NR_fsmount 432
+#define __NR_fspick 433
+#define __NR_pidfd_open 434
+#define __NR_clone3 435
+#define __NR_close_range 436
+#define __NR_openat2 437
+#define __NR_pidfd_getfd 438
+#define __NR_faccessat2 439
+#define __NR_process_madvise 440
+#define __NR_epoll_pwait2 441
+#define __NR_mount_setattr 442
+#define __NR_quotactl_fd 443
+#define __NR_landlock_create_ruleset 444
+#define __NR_landlock_add_rule 445
+#define __NR_landlock_restrict_self 446
+#define __NR_memfd_secret 447
+#define __NR_process_mrelease 448
+#define __NR_futex_waitv 449
+#define __NR_set_mempolicy_home_node 450
+#define __NR_cachestat 451
+#define __NR_fchmodat2 452
+#define __NR_map_shadow_stack 453
+#define __NR_futex_wake 454
+#define __NR_futex_wait 455
+#define __NR_futex_requeue 456
+#define __NR_statmount 457
+#define __NR_listmount 458
+#define __NR_lsm_get_self_attr 459
+#define __NR_lsm_set_self_attr 460
+#define __NR_lsm_list_modules 461
+#define __NR_mseal 462
+#define __NR_setxattrat 463
+#define __NR_getxattrat 464
+#define __NR_listxattrat 465
+#define __NR_removexattrat 466
+#define __NR_open_tree_attr 467
+
+
+#endif /* _ASM_UNISTD_64_H */
diff --git a/linux-headers/asm-generic/mman-common.h b/linux-headers/asm-generic/mman-common.h
index 6ce1f1c..ef1c27f 100644
--- a/linux-headers/asm-generic/mman-common.h
+++ b/linux-headers/asm-generic/mman-common.h
@@ -79,9 +79,13 @@
#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
+#define MADV_GUARD_INSTALL 102 /* fatal signal on access to range */
+#define MADV_GUARD_REMOVE 103 /* unguard range */
+
/* compatibility flags */
#define MAP_FILE 0
+#define PKEY_UNRESTRICTED 0x0
#define PKEY_DISABLE_ACCESS 0x1
#define PKEY_DISABLE_WRITE 0x2
#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
diff --git a/linux-headers/asm-generic/mman.h b/linux-headers/asm-generic/mman.h
index 57e8195..5e3d61d 100644
--- a/linux-headers/asm-generic/mman.h
+++ b/linux-headers/asm-generic/mman.h
@@ -19,4 +19,8 @@
#define MCL_FUTURE 2 /* lock all future mappings */
#define MCL_ONFAULT 4 /* lock all pages that are faulted in */
+#define SHADOW_STACK_SET_TOKEN (1ULL << 0) /* Set up a restore token in the shadow stack */
+#define SHADOW_STACK_SET_MARKER (1ULL << 1) /* Set up a top of stack marker in the shadow stack */
+
+
#endif /* __ASM_GENERIC_MMAN_H */
diff --git a/linux-headers/asm-generic/unistd.h b/linux-headers/asm-generic/unistd.h
index d983c48..2892a45 100644
--- a/linux-headers/asm-generic/unistd.h
+++ b/linux-headers/asm-generic/unistd.h
@@ -737,7 +737,7 @@ __SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
#define __NR_ppoll_time64 414
__SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
#define __NR_io_pgetevents_time64 416
-__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
+__SC_COMP(__NR_io_pgetevents_time64, sys_io_pgetevents, compat_sys_io_pgetevents_time64)
#define __NR_recvmmsg_time64 417
__SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
#define __NR_mq_timedsend_time64 418
@@ -776,12 +776,8 @@ __SYSCALL(__NR_fsmount, sys_fsmount)
__SYSCALL(__NR_fspick, sys_fspick)
#define __NR_pidfd_open 434
__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
-
-#ifdef __ARCH_WANT_SYS_CLONE3
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
-#endif
-
#define __NR_close_range 436
__SYSCALL(__NR_close_range, sys_close_range)
#define __NR_openat2 437
@@ -845,8 +841,19 @@ __SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
#define __NR_mseal 462
__SYSCALL(__NR_mseal, sys_mseal)
+#define __NR_setxattrat 463
+__SYSCALL(__NR_setxattrat, sys_setxattrat)
+#define __NR_getxattrat 464
+__SYSCALL(__NR_getxattrat, sys_getxattrat)
+#define __NR_listxattrat 465
+__SYSCALL(__NR_listxattrat, sys_listxattrat)
+#define __NR_removexattrat 466
+__SYSCALL(__NR_removexattrat, sys_removexattrat)
+#define __NR_open_tree_attr 467
+__SYSCALL(__NR_open_tree_attr, sys_open_tree_attr)
+
#undef __NR_syscalls
-#define __NR_syscalls 463
+#define __NR_syscalls 468
/*
* 32 bit systems traditionally used different
diff --git a/linux-headers/asm-loongarch/kvm.h b/linux-headers/asm-loongarch/kvm.h
index f9abef3..5f354f5 100644
--- a/linux-headers/asm-loongarch/kvm.h
+++ b/linux-headers/asm-loongarch/kvm.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+#define __KVM_HAVE_IRQ_LINE
+
/*
* KVM LoongArch specific structures and definitions.
*
@@ -64,6 +66,7 @@ struct kvm_fpu {
#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL)
#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL)
#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL)
+#define KVM_REG_LOONGARCH_LBT (KVM_REG_LOONGARCH | 0x50000ULL)
#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL)
#define KVM_CSR_IDX_MASK 0x7fff
#define KVM_CPUCFG_IDX_MASK 0x7fff
@@ -77,11 +80,34 @@ struct kvm_fpu {
/* Debugging: Special instruction for software breakpoint */
#define KVM_REG_LOONGARCH_DEBUG_INST (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
+/* LBT registers */
+#define KVM_REG_LOONGARCH_LBT_SCR0 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_LOONGARCH_LBT_SCR1 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 2)
+#define KVM_REG_LOONGARCH_LBT_SCR2 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_LOONGARCH_LBT_SCR3 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_LOONGARCH_LBT_EFLAGS (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 5)
+#define KVM_REG_LOONGARCH_LBT_FTOP (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 6)
+
#define LOONGARCH_REG_SHIFT 3
#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
+
+/* Device Control API on vm fd */
+#define KVM_LOONGARCH_VM_FEAT_CTRL 0
+#define KVM_LOONGARCH_VM_FEAT_LSX 0
+#define KVM_LOONGARCH_VM_FEAT_LASX 1
+#define KVM_LOONGARCH_VM_FEAT_X86BT 2
+#define KVM_LOONGARCH_VM_FEAT_ARMBT 3
+#define KVM_LOONGARCH_VM_FEAT_MIPSBT 4
+#define KVM_LOONGARCH_VM_FEAT_PMU 5
+#define KVM_LOONGARCH_VM_FEAT_PV_IPI 6
+#define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7
+
+/* Device Control API on vcpu fd */
#define KVM_LOONGARCH_VCPU_CPUCFG 0
+#define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1
+#define KVM_LOONGARCH_VCPU_PVTIME_GPA 0
struct kvm_debug_exit_arch {
};
@@ -108,4 +134,22 @@ struct kvm_iocsr_entry {
#define KVM_IRQCHIP_NUM_PINS 64
#define KVM_MAX_CORES 256
+#define KVM_DEV_LOONGARCH_IPI_GRP_REGS 0x40000001
+
+#define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000002
+
+#define KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS 0x40000003
+#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU 0x0
+#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE 0x1
+#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE 0x2
+
+#define KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL 0x40000004
+#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU 0x0
+#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE 0x1
+#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED 0x3
+
+#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS 0x40000005
+#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL 0x40000006
+#define KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT 0
+
#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
diff --git a/linux-headers/asm-loongarch/kvm_para.h b/linux-headers/asm-loongarch/kvm_para.h
new file mode 100644
index 0000000..fd7f407
--- /dev/null
+++ b/linux-headers/asm-loongarch/kvm_para.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_KVM_PARA_H
+#define _ASM_KVM_PARA_H
+
+#include <linux/types.h>
+
+/*
+ * CPUCFG index area: 0x40000000 -- 0x400000ff
+ * SW emulation for KVM hypervirsor
+ */
+#define CPUCFG_KVM_BASE 0x40000000
+#define CPUCFG_KVM_SIZE 0x100
+#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0)
+#define KVM_SIGNATURE "KVM\0"
+#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
+#define KVM_FEATURE_IPI 1
+#define KVM_FEATURE_STEAL_TIME 2
+/* BIT 24 - 31 are features configurable by user space vmm */
+#define KVM_FEATURE_VIRT_EXTIOI 24
+#define KVM_FEATURE_USER_HCALL 25
+
+#endif /* _ASM_KVM_PARA_H */
diff --git a/linux-headers/asm-loongarch/unistd.h b/linux-headers/asm-loongarch/unistd.h
index fcb6689..1f01980 100644
--- a/linux-headers/asm-loongarch/unistd.h
+++ b/linux-headers/asm-loongarch/unistd.h
@@ -1,5 +1,3 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
-#include <asm-generic/unistd.h>
+#include <asm/unistd_64.h>
diff --git a/linux-headers/asm-loongarch/unistd_64.h b/linux-headers/asm-loongarch/unistd_64.h
new file mode 100644
index 0000000..50d22df
--- /dev/null
+++ b/linux-headers/asm-loongarch/unistd_64.h
@@ -0,0 +1,325 @@
+#ifndef _ASM_UNISTD_64_H
+#define _ASM_UNISTD_64_H
+
+#define __NR_io_setup 0
+#define __NR_io_destroy 1
+#define __NR_io_submit 2
+#define __NR_io_cancel 3
+#define __NR_io_getevents 4
+#define __NR_setxattr 5
+#define __NR_lsetxattr 6
+#define __NR_fsetxattr 7
+#define __NR_getxattr 8
+#define __NR_lgetxattr 9
+#define __NR_fgetxattr 10
+#define __NR_listxattr 11
+#define __NR_llistxattr 12
+#define __NR_flistxattr 13
+#define __NR_removexattr 14
+#define __NR_lremovexattr 15
+#define __NR_fremovexattr 16
+#define __NR_getcwd 17
+#define __NR_lookup_dcookie 18
+#define __NR_eventfd2 19
+#define __NR_epoll_create1 20
+#define __NR_epoll_ctl 21
+#define __NR_epoll_pwait 22
+#define __NR_dup 23
+#define __NR_dup3 24
+#define __NR_fcntl 25
+#define __NR_inotify_init1 26
+#define __NR_inotify_add_watch 27
+#define __NR_inotify_rm_watch 28
+#define __NR_ioctl 29
+#define __NR_ioprio_set 30
+#define __NR_ioprio_get 31
+#define __NR_flock 32
+#define __NR_mknodat 33
+#define __NR_mkdirat 34
+#define __NR_unlinkat 35
+#define __NR_symlinkat 36
+#define __NR_linkat 37
+#define __NR_umount2 39
+#define __NR_mount 40
+#define __NR_pivot_root 41
+#define __NR_nfsservctl 42
+#define __NR_statfs 43
+#define __NR_fstatfs 44
+#define __NR_truncate 45
+#define __NR_ftruncate 46
+#define __NR_fallocate 47
+#define __NR_faccessat 48
+#define __NR_chdir 49
+#define __NR_fchdir 50
+#define __NR_chroot 51
+#define __NR_fchmod 52
+#define __NR_fchmodat 53
+#define __NR_fchownat 54
+#define __NR_fchown 55
+#define __NR_openat 56
+#define __NR_close 57
+#define __NR_vhangup 58
+#define __NR_pipe2 59
+#define __NR_quotactl 60
+#define __NR_getdents64 61
+#define __NR_lseek 62
+#define __NR_read 63
+#define __NR_write 64
+#define __NR_readv 65
+#define __NR_writev 66
+#define __NR_pread64 67
+#define __NR_pwrite64 68
+#define __NR_preadv 69
+#define __NR_pwritev 70
+#define __NR_sendfile 71
+#define __NR_pselect6 72
+#define __NR_ppoll 73
+#define __NR_signalfd4 74
+#define __NR_vmsplice 75
+#define __NR_splice 76
+#define __NR_tee 77
+#define __NR_readlinkat 78
+#define __NR_newfstatat 79
+#define __NR_fstat 80
+#define __NR_sync 81
+#define __NR_fsync 82
+#define __NR_fdatasync 83
+#define __NR_sync_file_range 84
+#define __NR_timerfd_create 85
+#define __NR_timerfd_settime 86
+#define __NR_timerfd_gettime 87
+#define __NR_utimensat 88
+#define __NR_acct 89
+#define __NR_capget 90
+#define __NR_capset 91
+#define __NR_personality 92
+#define __NR_exit 93
+#define __NR_exit_group 94
+#define __NR_waitid 95
+#define __NR_set_tid_address 96
+#define __NR_unshare 97
+#define __NR_futex 98
+#define __NR_set_robust_list 99
+#define __NR_get_robust_list 100
+#define __NR_nanosleep 101
+#define __NR_getitimer 102
+#define __NR_setitimer 103
+#define __NR_kexec_load 104
+#define __NR_init_module 105
+#define __NR_delete_module 106
+#define __NR_timer_create 107
+#define __NR_timer_gettime 108
+#define __NR_timer_getoverrun 109
+#define __NR_timer_settime 110
+#define __NR_timer_delete 111
+#define __NR_clock_settime 112
+#define __NR_clock_gettime 113
+#define __NR_clock_getres 114
+#define __NR_clock_nanosleep 115
+#define __NR_syslog 116
+#define __NR_ptrace 117
+#define __NR_sched_setparam 118
+#define __NR_sched_setscheduler 119
+#define __NR_sched_getscheduler 120
+#define __NR_sched_getparam 121
+#define __NR_sched_setaffinity 122
+#define __NR_sched_getaffinity 123
+#define __NR_sched_yield 124
+#define __NR_sched_get_priority_max 125
+#define __NR_sched_get_priority_min 126
+#define __NR_sched_rr_get_interval 127
+#define __NR_restart_syscall 128
+#define __NR_kill 129
+#define __NR_tkill 130
+#define __NR_tgkill 131
+#define __NR_sigaltstack 132
+#define __NR_rt_sigsuspend 133
+#define __NR_rt_sigaction 134
+#define __NR_rt_sigprocmask 135
+#define __NR_rt_sigpending 136
+#define __NR_rt_sigtimedwait 137
+#define __NR_rt_sigqueueinfo 138
+#define __NR_rt_sigreturn 139
+#define __NR_setpriority 140
+#define __NR_getpriority 141
+#define __NR_reboot 142
+#define __NR_setregid 143
+#define __NR_setgid 144
+#define __NR_setreuid 145
+#define __NR_setuid 146
+#define __NR_setresuid 147
+#define __NR_getresuid 148
+#define __NR_setresgid 149
+#define __NR_getresgid 150
+#define __NR_setfsuid 151
+#define __NR_setfsgid 152
+#define __NR_times 153
+#define __NR_setpgid 154
+#define __NR_getpgid 155
+#define __NR_getsid 156
+#define __NR_setsid 157
+#define __NR_getgroups 158
+#define __NR_setgroups 159
+#define __NR_uname 160
+#define __NR_sethostname 161
+#define __NR_setdomainname 162
+#define __NR_getrusage 165
+#define __NR_umask 166
+#define __NR_prctl 167
+#define __NR_getcpu 168
+#define __NR_gettimeofday 169
+#define __NR_settimeofday 170
+#define __NR_adjtimex 171
+#define __NR_getpid 172
+#define __NR_getppid 173
+#define __NR_getuid 174
+#define __NR_geteuid 175
+#define __NR_getgid 176
+#define __NR_getegid 177
+#define __NR_gettid 178
+#define __NR_sysinfo 179
+#define __NR_mq_open 180
+#define __NR_mq_unlink 181
+#define __NR_mq_timedsend 182
+#define __NR_mq_timedreceive 183
+#define __NR_mq_notify 184
+#define __NR_mq_getsetattr 185
+#define __NR_msgget 186
+#define __NR_msgctl 187
+#define __NR_msgrcv 188
+#define __NR_msgsnd 189
+#define __NR_semget 190
+#define __NR_semctl 191
+#define __NR_semtimedop 192
+#define __NR_semop 193
+#define __NR_shmget 194
+#define __NR_shmctl 195
+#define __NR_shmat 196
+#define __NR_shmdt 197
+#define __NR_socket 198
+#define __NR_socketpair 199
+#define __NR_bind 200
+#define __NR_listen 201
+#define __NR_accept 202
+#define __NR_connect 203
+#define __NR_getsockname 204
+#define __NR_getpeername 205
+#define __NR_sendto 206
+#define __NR_recvfrom 207
+#define __NR_setsockopt 208
+#define __NR_getsockopt 209
+#define __NR_shutdown 210
+#define __NR_sendmsg 211
+#define __NR_recvmsg 212
+#define __NR_readahead 213
+#define __NR_brk 214
+#define __NR_munmap 215
+#define __NR_mremap 216
+#define __NR_add_key 217
+#define __NR_request_key 218
+#define __NR_keyctl 219
+#define __NR_clone 220
+#define __NR_execve 221
+#define __NR_mmap 222
+#define __NR_fadvise64 223
+#define __NR_swapon 224
+#define __NR_swapoff 225
+#define __NR_mprotect 226
+#define __NR_msync 227
+#define __NR_mlock 228
+#define __NR_munlock 229
+#define __NR_mlockall 230
+#define __NR_munlockall 231
+#define __NR_mincore 232
+#define __NR_madvise 233
+#define __NR_remap_file_pages 234
+#define __NR_mbind 235
+#define __NR_get_mempolicy 236
+#define __NR_set_mempolicy 237
+#define __NR_migrate_pages 238
+#define __NR_move_pages 239
+#define __NR_rt_tgsigqueueinfo 240
+#define __NR_perf_event_open 241
+#define __NR_accept4 242
+#define __NR_recvmmsg 243
+#define __NR_wait4 260
+#define __NR_prlimit64 261
+#define __NR_fanotify_init 262
+#define __NR_fanotify_mark 263
+#define __NR_name_to_handle_at 264
+#define __NR_open_by_handle_at 265
+#define __NR_clock_adjtime 266
+#define __NR_syncfs 267
+#define __NR_setns 268
+#define __NR_sendmmsg 269
+#define __NR_process_vm_readv 270
+#define __NR_process_vm_writev 271
+#define __NR_kcmp 272
+#define __NR_finit_module 273
+#define __NR_sched_setattr 274
+#define __NR_sched_getattr 275
+#define __NR_renameat2 276
+#define __NR_seccomp 277
+#define __NR_getrandom 278
+#define __NR_memfd_create 279
+#define __NR_bpf 280
+#define __NR_execveat 281
+#define __NR_userfaultfd 282
+#define __NR_membarrier 283
+#define __NR_mlock2 284
+#define __NR_copy_file_range 285
+#define __NR_preadv2 286
+#define __NR_pwritev2 287
+#define __NR_pkey_mprotect 288
+#define __NR_pkey_alloc 289
+#define __NR_pkey_free 290
+#define __NR_statx 291
+#define __NR_io_pgetevents 292
+#define __NR_rseq 293
+#define __NR_kexec_file_load 294
+#define __NR_pidfd_send_signal 424
+#define __NR_io_uring_setup 425
+#define __NR_io_uring_enter 426
+#define __NR_io_uring_register 427
+#define __NR_open_tree 428
+#define __NR_move_mount 429
+#define __NR_fsopen 430
+#define __NR_fsconfig 431
+#define __NR_fsmount 432
+#define __NR_fspick 433
+#define __NR_pidfd_open 434
+#define __NR_clone3 435
+#define __NR_close_range 436
+#define __NR_openat2 437
+#define __NR_pidfd_getfd 438
+#define __NR_faccessat2 439
+#define __NR_process_madvise 440
+#define __NR_epoll_pwait2 441
+#define __NR_mount_setattr 442
+#define __NR_quotactl_fd 443
+#define __NR_landlock_create_ruleset 444
+#define __NR_landlock_add_rule 445
+#define __NR_landlock_restrict_self 446
+#define __NR_process_mrelease 448
+#define __NR_futex_waitv 449
+#define __NR_set_mempolicy_home_node 450
+#define __NR_cachestat 451
+#define __NR_fchmodat2 452
+#define __NR_map_shadow_stack 453
+#define __NR_futex_wake 454
+#define __NR_futex_wait 455
+#define __NR_futex_requeue 456
+#define __NR_statmount 457
+#define __NR_listmount 458
+#define __NR_lsm_get_self_attr 459
+#define __NR_lsm_set_self_attr 460
+#define __NR_lsm_list_modules 461
+#define __NR_mseal 462
+#define __NR_setxattrat 463
+#define __NR_getxattrat 464
+#define __NR_listxattrat 465
+#define __NR_removexattrat 466
+#define __NR_open_tree_attr 467
+
+
+#endif /* _ASM_UNISTD_64_H */
diff --git a/linux-headers/asm-mips/mman.h b/linux-headers/asm-mips/mman.h
index 9c48d9a..b700dae 100644
--- a/linux-headers/asm-mips/mman.h
+++ b/linux-headers/asm-mips/mman.h
@@ -105,6 +105,9 @@
#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
+#define MADV_GUARD_INSTALL 102 /* fatal signal on access to range */
+#define MADV_GUARD_REMOVE 103 /* unguard range */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/linux-headers/asm-mips/unistd_n32.h b/linux-headers/asm-mips/unistd_n32.h
index fc93b3b..bdcc2f4 100644
--- a/linux-headers/asm-mips/unistd_n32.h
+++ b/linux-headers/asm-mips/unistd_n32.h
@@ -391,5 +391,10 @@
#define __NR_lsm_set_self_attr (__NR_Linux + 460)
#define __NR_lsm_list_modules (__NR_Linux + 461)
#define __NR_mseal (__NR_Linux + 462)
+#define __NR_setxattrat (__NR_Linux + 463)
+#define __NR_getxattrat (__NR_Linux + 464)
+#define __NR_listxattrat (__NR_Linux + 465)
+#define __NR_removexattrat (__NR_Linux + 466)
+#define __NR_open_tree_attr (__NR_Linux + 467)
#endif /* _ASM_UNISTD_N32_H */
diff --git a/linux-headers/asm-mips/unistd_n64.h b/linux-headers/asm-mips/unistd_n64.h
index e72a3eb..3b6b019 100644
--- a/linux-headers/asm-mips/unistd_n64.h
+++ b/linux-headers/asm-mips/unistd_n64.h
@@ -367,5 +367,10 @@
#define __NR_lsm_set_self_attr (__NR_Linux + 460)
#define __NR_lsm_list_modules (__NR_Linux + 461)
#define __NR_mseal (__NR_Linux + 462)
+#define __NR_setxattrat (__NR_Linux + 463)
+#define __NR_getxattrat (__NR_Linux + 464)
+#define __NR_listxattrat (__NR_Linux + 465)
+#define __NR_removexattrat (__NR_Linux + 466)
+#define __NR_open_tree_attr (__NR_Linux + 467)
#endif /* _ASM_UNISTD_N64_H */
diff --git a/linux-headers/asm-mips/unistd_o32.h b/linux-headers/asm-mips/unistd_o32.h
index b86eb07..4609a4b 100644
--- a/linux-headers/asm-mips/unistd_o32.h
+++ b/linux-headers/asm-mips/unistd_o32.h
@@ -437,5 +437,10 @@
#define __NR_lsm_set_self_attr (__NR_Linux + 460)
#define __NR_lsm_list_modules (__NR_Linux + 461)
#define __NR_mseal (__NR_Linux + 462)
+#define __NR_setxattrat (__NR_Linux + 463)
+#define __NR_getxattrat (__NR_Linux + 464)
+#define __NR_listxattrat (__NR_Linux + 465)
+#define __NR_removexattrat (__NR_Linux + 466)
+#define __NR_open_tree_attr (__NR_Linux + 467)
#endif /* _ASM_UNISTD_O32_H */
diff --git a/linux-headers/asm-powerpc/kvm.h b/linux-headers/asm-powerpc/kvm.h
index 1691297..eaeda00 100644
--- a/linux-headers/asm-powerpc/kvm.h
+++ b/linux-headers/asm-powerpc/kvm.h
@@ -645,6 +645,9 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3)
#define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4)
#define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5)
+#define KVM_REG_PPC_DEXCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc6)
+#define KVM_REG_PPC_HASHKEYR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc7)
+#define KVM_REG_PPC_HASHPKEYR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc8)
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
diff --git a/linux-headers/asm-powerpc/unistd_32.h b/linux-headers/asm-powerpc/unistd_32.h
index 28627b6..5d38a42 100644
--- a/linux-headers/asm-powerpc/unistd_32.h
+++ b/linux-headers/asm-powerpc/unistd_32.h
@@ -444,6 +444,11 @@
#define __NR_lsm_set_self_attr 460
#define __NR_lsm_list_modules 461
#define __NR_mseal 462
+#define __NR_setxattrat 463
+#define __NR_getxattrat 464
+#define __NR_listxattrat 465
+#define __NR_removexattrat 466
+#define __NR_open_tree_attr 467
#endif /* _ASM_UNISTD_32_H */
diff --git a/linux-headers/asm-powerpc/unistd_64.h b/linux-headers/asm-powerpc/unistd_64.h
index 1fc42a8..860a488 100644
--- a/linux-headers/asm-powerpc/unistd_64.h
+++ b/linux-headers/asm-powerpc/unistd_64.h
@@ -416,6 +416,11 @@
#define __NR_lsm_set_self_attr 460
#define __NR_lsm_list_modules 461
#define __NR_mseal 462
+#define __NR_setxattrat 463
+#define __NR_getxattrat 464
+#define __NR_listxattrat 465
+#define __NR_removexattrat 466
+#define __NR_open_tree_attr 467
#endif /* _ASM_UNISTD_64_H */
diff --git a/linux-headers/asm-riscv/kvm.h b/linux-headers/asm-riscv/kvm.h
index e878e7c..5f59fd2 100644
--- a/linux-headers/asm-riscv/kvm.h
+++ b/linux-headers/asm-riscv/kvm.h
@@ -168,6 +168,22 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZTSO,
KVM_RISCV_ISA_EXT_ZACAS,
KVM_RISCV_ISA_EXT_SSCOFPMF,
+ KVM_RISCV_ISA_EXT_ZIMOP,
+ KVM_RISCV_ISA_EXT_ZCA,
+ KVM_RISCV_ISA_EXT_ZCB,
+ KVM_RISCV_ISA_EXT_ZCD,
+ KVM_RISCV_ISA_EXT_ZCF,
+ KVM_RISCV_ISA_EXT_ZCMOP,
+ KVM_RISCV_ISA_EXT_ZAWRS,
+ KVM_RISCV_ISA_EXT_SMNPM,
+ KVM_RISCV_ISA_EXT_SSNPM,
+ KVM_RISCV_ISA_EXT_SVADE,
+ KVM_RISCV_ISA_EXT_SVADU,
+ KVM_RISCV_ISA_EXT_SVVPTC,
+ KVM_RISCV_ISA_EXT_ZABHA,
+ KVM_RISCV_ISA_EXT_ZICCRSE,
+ KVM_RISCV_ISA_EXT_ZAAMO,
+ KVM_RISCV_ISA_EXT_ZALRSC,
KVM_RISCV_ISA_EXT_MAX,
};
@@ -187,6 +203,7 @@ enum KVM_RISCV_SBI_EXT_ID {
KVM_RISCV_SBI_EXT_VENDOR,
KVM_RISCV_SBI_EXT_DBCN,
KVM_RISCV_SBI_EXT_STA,
+ KVM_RISCV_SBI_EXT_SUSP,
KVM_RISCV_SBI_EXT_MAX,
};
@@ -200,9 +217,6 @@ struct kvm_riscv_sbi_sta {
#define KVM_RISCV_TIMER_STATE_OFF 0
#define KVM_RISCV_TIMER_STATE_ON 1
-#define KVM_REG_SIZE(id) \
- (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
-
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_RISCV_TYPE_MASK 0x00000000FF000000
#define KVM_REG_RISCV_TYPE_SHIFT 24
diff --git a/linux-headers/asm-riscv/unistd.h b/linux-headers/asm-riscv/unistd.h
index 950ab3f..81896bb 100644
--- a/linux-headers/asm-riscv/unistd.h
+++ b/linux-headers/asm-riscv/unistd.h
@@ -14,41 +14,10 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
+#include <asm/bitsperlong.h>
-#if defined(__LP64__) && !defined(__SYSCALL_COMPAT)
-#define __ARCH_WANT_NEW_STAT
-#define __ARCH_WANT_SET_GET_RLIMIT
-#endif /* __LP64__ */
-
-#define __ARCH_WANT_SYS_CLONE3
-#define __ARCH_WANT_MEMFD_SECRET
-
-#include <asm-generic/unistd.h>
-
-/*
- * Allows the instruction cache to be flushed from userspace. Despite RISC-V
- * having a direct 'fence.i' instruction available to userspace (which we
- * can't trap!), that's not actually viable when running on Linux because the
- * kernel might schedule a process on another hart. There is no way for
- * userspace to handle this without invoking the kernel (as it doesn't know the
- * thread->hart mappings), so we've defined a RISC-V specific system call to
- * flush the instruction cache.
- *
- * __NR_riscv_flush_icache is defined to flush the instruction cache over an
- * address range, with the flush applying to either all threads or just the
- * caller. We don't currently do anything with the address range, that's just
- * in there for forwards compatibility.
- */
-#ifndef __NR_riscv_flush_icache
-#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
-#endif
-__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
-
-/*
- * Allows userspace to query the kernel for CPU architecture and
- * microarchitecture details across a given set of CPUs.
- */
-#ifndef __NR_riscv_hwprobe
-#define __NR_riscv_hwprobe (__NR_arch_specific_syscall + 14)
+#if __BITS_PER_LONG == 64
+#include <asm/unistd_64.h>
+#else
+#include <asm/unistd_32.h>
#endif
-__SYSCALL(__NR_riscv_hwprobe, sys_riscv_hwprobe)
diff --git a/linux-headers/asm-riscv/unistd_32.h b/linux-headers/asm-riscv/unistd_32.h
new file mode 100644
index 0000000..a5e769f
--- /dev/null
+++ b/linux-headers/asm-riscv/unistd_32.h
@@ -0,0 +1,320 @@
+#ifndef _ASM_UNISTD_32_H
+#define _ASM_UNISTD_32_H
+
+#define __NR_io_setup 0
+#define __NR_io_destroy 1
+#define __NR_io_submit 2
+#define __NR_io_cancel 3
+#define __NR_setxattr 5
+#define __NR_lsetxattr 6
+#define __NR_fsetxattr 7
+#define __NR_getxattr 8
+#define __NR_lgetxattr 9
+#define __NR_fgetxattr 10
+#define __NR_listxattr 11
+#define __NR_llistxattr 12
+#define __NR_flistxattr 13
+#define __NR_removexattr 14
+#define __NR_lremovexattr 15
+#define __NR_fremovexattr 16
+#define __NR_getcwd 17
+#define __NR_lookup_dcookie 18
+#define __NR_eventfd2 19
+#define __NR_epoll_create1 20
+#define __NR_epoll_ctl 21
+#define __NR_epoll_pwait 22
+#define __NR_dup 23
+#define __NR_dup3 24
+#define __NR_fcntl64 25
+#define __NR_inotify_init1 26
+#define __NR_inotify_add_watch 27
+#define __NR_inotify_rm_watch 28
+#define __NR_ioctl 29
+#define __NR_ioprio_set 30
+#define __NR_ioprio_get 31
+#define __NR_flock 32
+#define __NR_mknodat 33
+#define __NR_mkdirat 34
+#define __NR_unlinkat 35
+#define __NR_symlinkat 36
+#define __NR_linkat 37
+#define __NR_umount2 39
+#define __NR_mount 40
+#define __NR_pivot_root 41
+#define __NR_nfsservctl 42
+#define __NR_statfs64 43
+#define __NR_fstatfs64 44
+#define __NR_truncate64 45
+#define __NR_ftruncate64 46
+#define __NR_fallocate 47
+#define __NR_faccessat 48
+#define __NR_chdir 49
+#define __NR_fchdir 50
+#define __NR_chroot 51
+#define __NR_fchmod 52
+#define __NR_fchmodat 53
+#define __NR_fchownat 54
+#define __NR_fchown 55
+#define __NR_openat 56
+#define __NR_close 57
+#define __NR_vhangup 58
+#define __NR_pipe2 59
+#define __NR_quotactl 60
+#define __NR_getdents64 61
+#define __NR_llseek 62
+#define __NR_read 63
+#define __NR_write 64
+#define __NR_readv 65
+#define __NR_writev 66
+#define __NR_pread64 67
+#define __NR_pwrite64 68
+#define __NR_preadv 69
+#define __NR_pwritev 70
+#define __NR_sendfile64 71
+#define __NR_signalfd4 74
+#define __NR_vmsplice 75
+#define __NR_splice 76
+#define __NR_tee 77
+#define __NR_readlinkat 78
+#define __NR_sync 81
+#define __NR_fsync 82
+#define __NR_fdatasync 83
+#define __NR_sync_file_range 84
+#define __NR_timerfd_create 85
+#define __NR_acct 89
+#define __NR_capget 90
+#define __NR_capset 91
+#define __NR_personality 92
+#define __NR_exit 93
+#define __NR_exit_group 94
+#define __NR_waitid 95
+#define __NR_set_tid_address 96
+#define __NR_unshare 97
+#define __NR_set_robust_list 99
+#define __NR_get_robust_list 100
+#define __NR_getitimer 102
+#define __NR_setitimer 103
+#define __NR_kexec_load 104
+#define __NR_init_module 105
+#define __NR_delete_module 106
+#define __NR_timer_create 107
+#define __NR_timer_getoverrun 109
+#define __NR_timer_delete 111
+#define __NR_syslog 116
+#define __NR_ptrace 117
+#define __NR_sched_setparam 118
+#define __NR_sched_setscheduler 119
+#define __NR_sched_getscheduler 120
+#define __NR_sched_getparam 121
+#define __NR_sched_setaffinity 122
+#define __NR_sched_getaffinity 123
+#define __NR_sched_yield 124
+#define __NR_sched_get_priority_max 125
+#define __NR_sched_get_priority_min 126
+#define __NR_restart_syscall 128
+#define __NR_kill 129
+#define __NR_tkill 130
+#define __NR_tgkill 131
+#define __NR_sigaltstack 132
+#define __NR_rt_sigsuspend 133
+#define __NR_rt_sigaction 134
+#define __NR_rt_sigprocmask 135
+#define __NR_rt_sigpending 136
+#define __NR_rt_sigqueueinfo 138
+#define __NR_rt_sigreturn 139
+#define __NR_setpriority 140
+#define __NR_getpriority 141
+#define __NR_reboot 142
+#define __NR_setregid 143
+#define __NR_setgid 144
+#define __NR_setreuid 145
+#define __NR_setuid 146
+#define __NR_setresuid 147
+#define __NR_getresuid 148
+#define __NR_setresgid 149
+#define __NR_getresgid 150
+#define __NR_setfsuid 151
+#define __NR_setfsgid 152
+#define __NR_times 153
+#define __NR_setpgid 154
+#define __NR_getpgid 155
+#define __NR_getsid 156
+#define __NR_setsid 157
+#define __NR_getgroups 158
+#define __NR_setgroups 159
+#define __NR_uname 160
+#define __NR_sethostname 161
+#define __NR_setdomainname 162
+#define __NR_getrusage 165
+#define __NR_umask 166
+#define __NR_prctl 167
+#define __NR_getcpu 168
+#define __NR_getpid 172
+#define __NR_getppid 173
+#define __NR_getuid 174
+#define __NR_geteuid 175
+#define __NR_getgid 176
+#define __NR_getegid 177
+#define __NR_gettid 178
+#define __NR_sysinfo 179
+#define __NR_mq_open 180
+#define __NR_mq_unlink 181
+#define __NR_mq_notify 184
+#define __NR_mq_getsetattr 185
+#define __NR_msgget 186
+#define __NR_msgctl 187
+#define __NR_msgrcv 188
+#define __NR_msgsnd 189
+#define __NR_semget 190
+#define __NR_semctl 191
+#define __NR_semop 193
+#define __NR_shmget 194
+#define __NR_shmctl 195
+#define __NR_shmat 196
+#define __NR_shmdt 197
+#define __NR_socket 198
+#define __NR_socketpair 199
+#define __NR_bind 200
+#define __NR_listen 201
+#define __NR_accept 202
+#define __NR_connect 203
+#define __NR_getsockname 204
+#define __NR_getpeername 205
+#define __NR_sendto 206
+#define __NR_recvfrom 207
+#define __NR_setsockopt 208
+#define __NR_getsockopt 209
+#define __NR_shutdown 210
+#define __NR_sendmsg 211
+#define __NR_recvmsg 212
+#define __NR_readahead 213
+#define __NR_brk 214
+#define __NR_munmap 215
+#define __NR_mremap 216
+#define __NR_add_key 217
+#define __NR_request_key 218
+#define __NR_keyctl 219
+#define __NR_clone 220
+#define __NR_execve 221
+#define __NR_mmap2 222
+#define __NR_fadvise64_64 223
+#define __NR_swapon 224
+#define __NR_swapoff 225
+#define __NR_mprotect 226
+#define __NR_msync 227
+#define __NR_mlock 228
+#define __NR_munlock 229
+#define __NR_mlockall 230
+#define __NR_munlockall 231
+#define __NR_mincore 232
+#define __NR_madvise 233
+#define __NR_remap_file_pages 234
+#define __NR_mbind 235
+#define __NR_get_mempolicy 236
+#define __NR_set_mempolicy 237
+#define __NR_migrate_pages 238
+#define __NR_move_pages 239
+#define __NR_rt_tgsigqueueinfo 240
+#define __NR_perf_event_open 241
+#define __NR_accept4 242
+#define __NR_riscv_hwprobe 258
+#define __NR_riscv_flush_icache 259
+#define __NR_prlimit64 261
+#define __NR_fanotify_init 262
+#define __NR_fanotify_mark 263
+#define __NR_name_to_handle_at 264
+#define __NR_open_by_handle_at 265
+#define __NR_syncfs 267
+#define __NR_setns 268
+#define __NR_sendmmsg 269
+#define __NR_process_vm_readv 270
+#define __NR_process_vm_writev 271
+#define __NR_kcmp 272
+#define __NR_finit_module 273
+#define __NR_sched_setattr 274
+#define __NR_sched_getattr 275
+#define __NR_renameat2 276
+#define __NR_seccomp 277
+#define __NR_getrandom 278
+#define __NR_memfd_create 279
+#define __NR_bpf 280
+#define __NR_execveat 281
+#define __NR_userfaultfd 282
+#define __NR_membarrier 283
+#define __NR_mlock2 284
+#define __NR_copy_file_range 285
+#define __NR_preadv2 286
+#define __NR_pwritev2 287
+#define __NR_pkey_mprotect 288
+#define __NR_pkey_alloc 289
+#define __NR_pkey_free 290
+#define __NR_statx 291
+#define __NR_rseq 293
+#define __NR_kexec_file_load 294
+#define __NR_clock_gettime64 403
+#define __NR_clock_settime64 404
+#define __NR_clock_adjtime64 405
+#define __NR_clock_getres_time64 406
+#define __NR_clock_nanosleep_time64 407
+#define __NR_timer_gettime64 408
+#define __NR_timer_settime64 409
+#define __NR_timerfd_gettime64 410
+#define __NR_timerfd_settime64 411
+#define __NR_utimensat_time64 412
+#define __NR_pselect6_time64 413
+#define __NR_ppoll_time64 414
+#define __NR_io_pgetevents_time64 416
+#define __NR_recvmmsg_time64 417
+#define __NR_mq_timedsend_time64 418
+#define __NR_mq_timedreceive_time64 419
+#define __NR_semtimedop_time64 420
+#define __NR_rt_sigtimedwait_time64 421
+#define __NR_futex_time64 422
+#define __NR_sched_rr_get_interval_time64 423
+#define __NR_pidfd_send_signal 424
+#define __NR_io_uring_setup 425
+#define __NR_io_uring_enter 426
+#define __NR_io_uring_register 427
+#define __NR_open_tree 428
+#define __NR_move_mount 429
+#define __NR_fsopen 430
+#define __NR_fsconfig 431
+#define __NR_fsmount 432
+#define __NR_fspick 433
+#define __NR_pidfd_open 434
+#define __NR_clone3 435
+#define __NR_close_range 436
+#define __NR_openat2 437
+#define __NR_pidfd_getfd 438
+#define __NR_faccessat2 439
+#define __NR_process_madvise 440
+#define __NR_epoll_pwait2 441
+#define __NR_mount_setattr 442
+#define __NR_quotactl_fd 443
+#define __NR_landlock_create_ruleset 444
+#define __NR_landlock_add_rule 445
+#define __NR_landlock_restrict_self 446
+#define __NR_memfd_secret 447
+#define __NR_process_mrelease 448
+#define __NR_futex_waitv 449
+#define __NR_set_mempolicy_home_node 450
+#define __NR_cachestat 451
+#define __NR_fchmodat2 452
+#define __NR_map_shadow_stack 453
+#define __NR_futex_wake 454
+#define __NR_futex_wait 455
+#define __NR_futex_requeue 456
+#define __NR_statmount 457
+#define __NR_listmount 458
+#define __NR_lsm_get_self_attr 459
+#define __NR_lsm_set_self_attr 460
+#define __NR_lsm_list_modules 461
+#define __NR_mseal 462
+#define __NR_setxattrat 463
+#define __NR_getxattrat 464
+#define __NR_listxattrat 465
+#define __NR_removexattrat 466
+#define __NR_open_tree_attr 467
+
+
+#endif /* _ASM_UNISTD_32_H */
diff --git a/linux-headers/asm-riscv/unistd_64.h b/linux-headers/asm-riscv/unistd_64.h
new file mode 100644
index 0000000..8df4d64
--- /dev/null
+++ b/linux-headers/asm-riscv/unistd_64.h
@@ -0,0 +1,330 @@
+#ifndef _ASM_UNISTD_64_H
+#define _ASM_UNISTD_64_H
+
+#define __NR_io_setup 0
+#define __NR_io_destroy 1
+#define __NR_io_submit 2
+#define __NR_io_cancel 3
+#define __NR_io_getevents 4
+#define __NR_setxattr 5
+#define __NR_lsetxattr 6
+#define __NR_fsetxattr 7
+#define __NR_getxattr 8
+#define __NR_lgetxattr 9
+#define __NR_fgetxattr 10
+#define __NR_listxattr 11
+#define __NR_llistxattr 12
+#define __NR_flistxattr 13
+#define __NR_removexattr 14
+#define __NR_lremovexattr 15
+#define __NR_fremovexattr 16
+#define __NR_getcwd 17
+#define __NR_lookup_dcookie 18
+#define __NR_eventfd2 19
+#define __NR_epoll_create1 20
+#define __NR_epoll_ctl 21
+#define __NR_epoll_pwait 22
+#define __NR_dup 23
+#define __NR_dup3 24
+#define __NR_fcntl 25
+#define __NR_inotify_init1 26
+#define __NR_inotify_add_watch 27
+#define __NR_inotify_rm_watch 28
+#define __NR_ioctl 29
+#define __NR_ioprio_set 30
+#define __NR_ioprio_get 31
+#define __NR_flock 32
+#define __NR_mknodat 33
+#define __NR_mkdirat 34
+#define __NR_unlinkat 35
+#define __NR_symlinkat 36
+#define __NR_linkat 37
+#define __NR_umount2 39
+#define __NR_mount 40
+#define __NR_pivot_root 41
+#define __NR_nfsservctl 42
+#define __NR_statfs 43
+#define __NR_fstatfs 44
+#define __NR_truncate 45
+#define __NR_ftruncate 46
+#define __NR_fallocate 47
+#define __NR_faccessat 48
+#define __NR_chdir 49
+#define __NR_fchdir 50
+#define __NR_chroot 51
+#define __NR_fchmod 52
+#define __NR_fchmodat 53
+#define __NR_fchownat 54
+#define __NR_fchown 55
+#define __NR_openat 56
+#define __NR_close 57
+#define __NR_vhangup 58
+#define __NR_pipe2 59
+#define __NR_quotactl 60
+#define __NR_getdents64 61
+#define __NR_lseek 62
+#define __NR_read 63
+#define __NR_write 64
+#define __NR_readv 65
+#define __NR_writev 66
+#define __NR_pread64 67
+#define __NR_pwrite64 68
+#define __NR_preadv 69
+#define __NR_pwritev 70
+#define __NR_sendfile 71
+#define __NR_pselect6 72
+#define __NR_ppoll 73
+#define __NR_signalfd4 74
+#define __NR_vmsplice 75
+#define __NR_splice 76
+#define __NR_tee 77
+#define __NR_readlinkat 78
+#define __NR_newfstatat 79
+#define __NR_fstat 80
+#define __NR_sync 81
+#define __NR_fsync 82
+#define __NR_fdatasync 83
+#define __NR_sync_file_range 84
+#define __NR_timerfd_create 85
+#define __NR_timerfd_settime 86
+#define __NR_timerfd_gettime 87
+#define __NR_utimensat 88
+#define __NR_acct 89
+#define __NR_capget 90
+#define __NR_capset 91
+#define __NR_personality 92
+#define __NR_exit 93
+#define __NR_exit_group 94
+#define __NR_waitid 95
+#define __NR_set_tid_address 96
+#define __NR_unshare 97
+#define __NR_futex 98
+#define __NR_set_robust_list 99
+#define __NR_get_robust_list 100
+#define __NR_nanosleep 101
+#define __NR_getitimer 102
+#define __NR_setitimer 103
+#define __NR_kexec_load 104
+#define __NR_init_module 105
+#define __NR_delete_module 106
+#define __NR_timer_create 107
+#define __NR_timer_gettime 108
+#define __NR_timer_getoverrun 109
+#define __NR_timer_settime 110
+#define __NR_timer_delete 111
+#define __NR_clock_settime 112
+#define __NR_clock_gettime 113
+#define __NR_clock_getres 114
+#define __NR_clock_nanosleep 115
+#define __NR_syslog 116
+#define __NR_ptrace 117
+#define __NR_sched_setparam 118
+#define __NR_sched_setscheduler 119
+#define __NR_sched_getscheduler 120
+#define __NR_sched_getparam 121
+#define __NR_sched_setaffinity 122
+#define __NR_sched_getaffinity 123
+#define __NR_sched_yield 124
+#define __NR_sched_get_priority_max 125
+#define __NR_sched_get_priority_min 126
+#define __NR_sched_rr_get_interval 127
+#define __NR_restart_syscall 128
+#define __NR_kill 129
+#define __NR_tkill 130
+#define __NR_tgkill 131
+#define __NR_sigaltstack 132
+#define __NR_rt_sigsuspend 133
+#define __NR_rt_sigaction 134
+#define __NR_rt_sigprocmask 135
+#define __NR_rt_sigpending 136
+#define __NR_rt_sigtimedwait 137
+#define __NR_rt_sigqueueinfo 138
+#define __NR_rt_sigreturn 139
+#define __NR_setpriority 140
+#define __NR_getpriority 141
+#define __NR_reboot 142
+#define __NR_setregid 143
+#define __NR_setgid 144
+#define __NR_setreuid 145
+#define __NR_setuid 146
+#define __NR_setresuid 147
+#define __NR_getresuid 148
+#define __NR_setresgid 149
+#define __NR_getresgid 150
+#define __NR_setfsuid 151
+#define __NR_setfsgid 152
+#define __NR_times 153
+#define __NR_setpgid 154
+#define __NR_getpgid 155
+#define __NR_getsid 156
+#define __NR_setsid 157
+#define __NR_getgroups 158
+#define __NR_setgroups 159
+#define __NR_uname 160
+#define __NR_sethostname 161
+#define __NR_setdomainname 162
+#define __NR_getrlimit 163
+#define __NR_setrlimit 164
+#define __NR_getrusage 165
+#define __NR_umask 166
+#define __NR_prctl 167
+#define __NR_getcpu 168
+#define __NR_gettimeofday 169
+#define __NR_settimeofday 170
+#define __NR_adjtimex 171
+#define __NR_getpid 172
+#define __NR_getppid 173
+#define __NR_getuid 174
+#define __NR_geteuid 175
+#define __NR_getgid 176
+#define __NR_getegid 177
+#define __NR_gettid 178
+#define __NR_sysinfo 179
+#define __NR_mq_open 180
+#define __NR_mq_unlink 181
+#define __NR_mq_timedsend 182
+#define __NR_mq_timedreceive 183
+#define __NR_mq_notify 184
+#define __NR_mq_getsetattr 185
+#define __NR_msgget 186
+#define __NR_msgctl 187
+#define __NR_msgrcv 188
+#define __NR_msgsnd 189
+#define __NR_semget 190
+#define __NR_semctl 191
+#define __NR_semtimedop 192
+#define __NR_semop 193
+#define __NR_shmget 194
+#define __NR_shmctl 195
+#define __NR_shmat 196
+#define __NR_shmdt 197
+#define __NR_socket 198
+#define __NR_socketpair 199
+#define __NR_bind 200
+#define __NR_listen 201
+#define __NR_accept 202
+#define __NR_connect 203
+#define __NR_getsockname 204
+#define __NR_getpeername 205
+#define __NR_sendto 206
+#define __NR_recvfrom 207
+#define __NR_setsockopt 208
+#define __NR_getsockopt 209
+#define __NR_shutdown 210
+#define __NR_sendmsg 211
+#define __NR_recvmsg 212
+#define __NR_readahead 213
+#define __NR_brk 214
+#define __NR_munmap 215
+#define __NR_mremap 216
+#define __NR_add_key 217
+#define __NR_request_key 218
+#define __NR_keyctl 219
+#define __NR_clone 220
+#define __NR_execve 221
+#define __NR_mmap 222
+#define __NR_fadvise64 223
+#define __NR_swapon 224
+#define __NR_swapoff 225
+#define __NR_mprotect 226
+#define __NR_msync 227
+#define __NR_mlock 228
+#define __NR_munlock 229
+#define __NR_mlockall 230
+#define __NR_munlockall 231
+#define __NR_mincore 232
+#define __NR_madvise 233
+#define __NR_remap_file_pages 234
+#define __NR_mbind 235
+#define __NR_get_mempolicy 236
+#define __NR_set_mempolicy 237
+#define __NR_migrate_pages 238
+#define __NR_move_pages 239
+#define __NR_rt_tgsigqueueinfo 240
+#define __NR_perf_event_open 241
+#define __NR_accept4 242
+#define __NR_recvmmsg 243
+#define __NR_riscv_hwprobe 258
+#define __NR_riscv_flush_icache 259
+#define __NR_wait4 260
+#define __NR_prlimit64 261
+#define __NR_fanotify_init 262
+#define __NR_fanotify_mark 263
+#define __NR_name_to_handle_at 264
+#define __NR_open_by_handle_at 265
+#define __NR_clock_adjtime 266
+#define __NR_syncfs 267
+#define __NR_setns 268
+#define __NR_sendmmsg 269
+#define __NR_process_vm_readv 270
+#define __NR_process_vm_writev 271
+#define __NR_kcmp 272
+#define __NR_finit_module 273
+#define __NR_sched_setattr 274
+#define __NR_sched_getattr 275
+#define __NR_renameat2 276
+#define __NR_seccomp 277
+#define __NR_getrandom 278
+#define __NR_memfd_create 279
+#define __NR_bpf 280
+#define __NR_execveat 281
+#define __NR_userfaultfd 282
+#define __NR_membarrier 283
+#define __NR_mlock2 284
+#define __NR_copy_file_range 285
+#define __NR_preadv2 286
+#define __NR_pwritev2 287
+#define __NR_pkey_mprotect 288
+#define __NR_pkey_alloc 289
+#define __NR_pkey_free 290
+#define __NR_statx 291
+#define __NR_io_pgetevents 292
+#define __NR_rseq 293
+#define __NR_kexec_file_load 294
+#define __NR_pidfd_send_signal 424
+#define __NR_io_uring_setup 425
+#define __NR_io_uring_enter 426
+#define __NR_io_uring_register 427
+#define __NR_open_tree 428
+#define __NR_move_mount 429
+#define __NR_fsopen 430
+#define __NR_fsconfig 431
+#define __NR_fsmount 432
+#define __NR_fspick 433
+#define __NR_pidfd_open 434
+#define __NR_clone3 435
+#define __NR_close_range 436
+#define __NR_openat2 437
+#define __NR_pidfd_getfd 438
+#define __NR_faccessat2 439
+#define __NR_process_madvise 440
+#define __NR_epoll_pwait2 441
+#define __NR_mount_setattr 442
+#define __NR_quotactl_fd 443
+#define __NR_landlock_create_ruleset 444
+#define __NR_landlock_add_rule 445
+#define __NR_landlock_restrict_self 446
+#define __NR_memfd_secret 447
+#define __NR_process_mrelease 448
+#define __NR_futex_waitv 449
+#define __NR_set_mempolicy_home_node 450
+#define __NR_cachestat 451
+#define __NR_fchmodat2 452
+#define __NR_map_shadow_stack 453
+#define __NR_futex_wake 454
+#define __NR_futex_wait 455
+#define __NR_futex_requeue 456
+#define __NR_statmount 457
+#define __NR_listmount 458
+#define __NR_lsm_get_self_attr 459
+#define __NR_lsm_set_self_attr 460
+#define __NR_lsm_list_modules 461
+#define __NR_mseal 462
+#define __NR_setxattrat 463
+#define __NR_getxattrat 464
+#define __NR_listxattrat 465
+#define __NR_removexattrat 466
+#define __NR_open_tree_attr 467
+
+
+#endif /* _ASM_UNISTD_64_H */
diff --git a/linux-headers/asm-s390/kvm.h b/linux-headers/asm-s390/kvm.h
index 684c4e1..ab5a6bc 100644
--- a/linux-headers/asm-s390/kvm.h
+++ b/linux-headers/asm-s390/kvm.h
@@ -469,7 +469,8 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 kdsa[16]; /* with MSA9 */
__u8 sortl[32]; /* with STFLE.150 */
__u8 dfltcc[32]; /* with STFLE.151 */
- __u8 reserved[1728];
+ __u8 pfcr[16]; /* with STFLE.201 */
+ __u8 reserved[1712];
};
#define KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST 6
diff --git a/linux-headers/asm-s390/unistd_32.h b/linux-headers/asm-s390/unistd_32.h
index 7706c21..85eedbd 100644
--- a/linux-headers/asm-s390/unistd_32.h
+++ b/linux-headers/asm-s390/unistd_32.h
@@ -435,5 +435,10 @@
#define __NR_lsm_set_self_attr 460
#define __NR_lsm_list_modules 461
#define __NR_mseal 462
+#define __NR_setxattrat 463
+#define __NR_getxattrat 464
+#define __NR_listxattrat 465
+#define __NR_removexattrat 466
+#define __NR_open_tree_attr 467
#endif /* _ASM_S390_UNISTD_32_H */
diff --git a/linux-headers/asm-s390/unistd_64.h b/linux-headers/asm-s390/unistd_64.h
index 62082d5..c03b1b9 100644
--- a/linux-headers/asm-s390/unistd_64.h
+++ b/linux-headers/asm-s390/unistd_64.h
@@ -383,5 +383,10 @@
#define __NR_lsm_set_self_attr 460
#define __NR_lsm_list_modules 461
#define __NR_mseal 462
+#define __NR_setxattrat 463
+#define __NR_getxattrat 464
+#define __NR_listxattrat 465
+#define __NR_removexattrat 466
+#define __NR_open_tree_attr 467
#endif /* _ASM_S390_UNISTD_64_H */
diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h
index 1c8f918..cd275ae 100644
--- a/linux-headers/asm-x86/kvm.h
+++ b/linux-headers/asm-x86/kvm.h
@@ -106,6 +106,7 @@ struct kvm_ioapic_state {
#define KVM_RUN_X86_SMM (1 << 0)
#define KVM_RUN_X86_BUS_LOCK (1 << 1)
+#define KVM_RUN_X86_GUEST_MODE (1 << 2)
/* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs {
@@ -436,6 +437,9 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5)
#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)
+#define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7)
+#define KVM_X86_QUIRK_STUFF_FEATURE_MSRS (1 << 8)
+#define KVM_X86_QUIRK_IGNORE_GUEST_PAT (1 << 9)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1
@@ -554,6 +558,9 @@ struct kvm_x86_mce {
#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
#define KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA (1 << 8)
+#define KVM_XEN_MSR_MIN_INDEX 0x40000000u
+#define KVM_XEN_MSR_MAX_INDEX 0x4fffffffu
+
struct kvm_xen_hvm_config {
__u32 flags;
__u32 msr;
@@ -836,6 +843,7 @@ struct kvm_sev_snp_launch_start {
};
/* Kept in sync with firmware values for simplicity. */
+#define KVM_SEV_PAGE_TYPE_INVALID 0x0
#define KVM_SEV_SNP_PAGE_TYPE_NORMAL 0x1
#define KVM_SEV_SNP_PAGE_TYPE_ZERO 0x3
#define KVM_SEV_SNP_PAGE_TYPE_UNMEASURED 0x4
@@ -920,5 +928,76 @@ struct kvm_hyperv_eventfd {
#define KVM_X86_SEV_VM 2
#define KVM_X86_SEV_ES_VM 3
#define KVM_X86_SNP_VM 4
+#define KVM_X86_TDX_VM 5
+
+/* Trust Domain eXtension sub-ioctl() commands. */
+enum kvm_tdx_cmd_id {
+ KVM_TDX_CAPABILITIES = 0,
+ KVM_TDX_INIT_VM,
+ KVM_TDX_INIT_VCPU,
+ KVM_TDX_INIT_MEM_REGION,
+ KVM_TDX_FINALIZE_VM,
+ KVM_TDX_GET_CPUID,
+
+ KVM_TDX_CMD_NR_MAX,
+};
+
+struct kvm_tdx_cmd {
+ /* enum kvm_tdx_cmd_id */
+ __u32 id;
+ /* flags for sub-commend. If sub-command doesn't use this, set zero. */
+ __u32 flags;
+ /*
+ * data for each sub-command. An immediate or a pointer to the actual
+ * data in process virtual address. If sub-command doesn't use it,
+ * set zero.
+ */
+ __u64 data;
+ /*
+ * Auxiliary error code. The sub-command may return TDX SEAMCALL
+ * status code in addition to -Exxx.
+ */
+ __u64 hw_error;
+};
+
+struct kvm_tdx_capabilities {
+ __u64 supported_attrs;
+ __u64 supported_xfam;
+ __u64 reserved[254];
+
+ /* Configurable CPUID bits for userspace */
+ struct kvm_cpuid2 cpuid;
+};
+
+struct kvm_tdx_init_vm {
+ __u64 attributes;
+ __u64 xfam;
+ __u64 mrconfigid[6]; /* sha384 digest */
+ __u64 mrowner[6]; /* sha384 digest */
+ __u64 mrownerconfig[6]; /* sha384 digest */
+
+ /* The total space for TD_PARAMS before the CPUIDs is 256 bytes */
+ __u64 reserved[12];
+
+ /*
+ * Call KVM_TDX_INIT_VM before vcpu creation, thus before
+ * KVM_SET_CPUID2.
+ * This configuration supersedes KVM_SET_CPUID2s for VCPUs because the
+ * TDX module directly virtualizes those CPUIDs without VMM. The user
+ * space VMM, e.g. qemu, should make KVM_SET_CPUID2 consistent with
+ * those values. If it doesn't, KVM may have wrong idea of vCPUIDs of
+ * the guest, and KVM may wrongly emulate CPUIDs or MSRs that the TDX
+ * module doesn't virtualize.
+ */
+ struct kvm_cpuid2 cpuid;
+};
+
+#define KVM_TDX_MEASURE_MEMORY_REGION _BITULL(0)
+
+struct kvm_tdx_init_mem_region {
+ __u64 source_addr;
+ __u64 gpa;
+ __u64 nr_pages;
+};
#endif /* _ASM_X86_KVM_H */
diff --git a/linux-headers/asm-x86/mman.h b/linux-headers/asm-x86/mman.h
index 46cdc94..ac1e627 100644
--- a/linux-headers/asm-x86/mman.h
+++ b/linux-headers/asm-x86/mman.h
@@ -5,9 +5,6 @@
#define MAP_32BIT 0x40 /* only give out 32bit addresses */
#define MAP_ABOVE4G 0x80 /* only map above 4GB */
-/* Flags for map_shadow_stack(2) */
-#define SHADOW_STACK_SET_TOKEN (1ULL << 0) /* Set up a restore token in the shadow stack */
-
#include <asm-generic/mman.h>
#endif /* _ASM_X86_MMAN_H */
diff --git a/linux-headers/asm-x86/unistd_32.h b/linux-headers/asm-x86/unistd_32.h
index fb7b8b1..491d6b4 100644
--- a/linux-headers/asm-x86/unistd_32.h
+++ b/linux-headers/asm-x86/unistd_32.h
@@ -453,6 +453,11 @@
#define __NR_lsm_set_self_attr 460
#define __NR_lsm_list_modules 461
#define __NR_mseal 462
+#define __NR_setxattrat 463
+#define __NR_getxattrat 464
+#define __NR_listxattrat 465
+#define __NR_removexattrat 466
+#define __NR_open_tree_attr 467
#endif /* _ASM_UNISTD_32_H */
diff --git a/linux-headers/asm-x86/unistd_64.h b/linux-headers/asm-x86/unistd_64.h
index da439af..7cf88bf 100644
--- a/linux-headers/asm-x86/unistd_64.h
+++ b/linux-headers/asm-x86/unistd_64.h
@@ -336,6 +336,7 @@
#define __NR_statx 332
#define __NR_io_pgetevents 333
#define __NR_rseq 334
+#define __NR_uretprobe 335
#define __NR_pidfd_send_signal 424
#define __NR_io_uring_setup 425
#define __NR_io_uring_enter 426
@@ -375,6 +376,11 @@
#define __NR_lsm_set_self_attr 460
#define __NR_lsm_list_modules 461
#define __NR_mseal 462
+#define __NR_setxattrat 463
+#define __NR_getxattrat 464
+#define __NR_listxattrat 465
+#define __NR_removexattrat 466
+#define __NR_open_tree_attr 467
#endif /* _ASM_UNISTD_64_H */
diff --git a/linux-headers/asm-x86/unistd_x32.h b/linux-headers/asm-x86/unistd_x32.h
index 4fcb607..8295911 100644
--- a/linux-headers/asm-x86/unistd_x32.h
+++ b/linux-headers/asm-x86/unistd_x32.h
@@ -289,6 +289,7 @@
#define __NR_statx (__X32_SYSCALL_BIT + 332)
#define __NR_io_pgetevents (__X32_SYSCALL_BIT + 333)
#define __NR_rseq (__X32_SYSCALL_BIT + 334)
+#define __NR_uretprobe (__X32_SYSCALL_BIT + 335)
#define __NR_pidfd_send_signal (__X32_SYSCALL_BIT + 424)
#define __NR_io_uring_setup (__X32_SYSCALL_BIT + 425)
#define __NR_io_uring_enter (__X32_SYSCALL_BIT + 426)
@@ -328,6 +329,11 @@
#define __NR_lsm_set_self_attr (__X32_SYSCALL_BIT + 460)
#define __NR_lsm_list_modules (__X32_SYSCALL_BIT + 461)
#define __NR_mseal (__X32_SYSCALL_BIT + 462)
+#define __NR_setxattrat (__X32_SYSCALL_BIT + 463)
+#define __NR_getxattrat (__X32_SYSCALL_BIT + 464)
+#define __NR_listxattrat (__X32_SYSCALL_BIT + 465)
+#define __NR_removexattrat (__X32_SYSCALL_BIT + 466)
+#define __NR_open_tree_attr (__X32_SYSCALL_BIT + 467)
#define __NR_rt_sigaction (__X32_SYSCALL_BIT + 512)
#define __NR_rt_sigreturn (__X32_SYSCALL_BIT + 513)
#define __NR_ioctl (__X32_SYSCALL_BIT + 514)
diff --git a/linux-headers/linux/bits.h b/linux-headers/linux/bits.h
index d989777..9243f38 100644
--- a/linux-headers/linux/bits.h
+++ b/linux-headers/linux/bits.h
@@ -4,12 +4,11 @@
#ifndef _LINUX_BITS_H
#define _LINUX_BITS_H
-#define __GENMASK(h, l) \
- (((~_UL(0)) - (_UL(1) << (l)) + 1) & \
- (~_UL(0) >> (__BITS_PER_LONG - 1 - (h))))
+#define __GENMASK(h, l) (((~_UL(0)) << (l)) & (~_UL(0) >> (__BITS_PER_LONG - 1 - (h))))
-#define __GENMASK_ULL(h, l) \
- (((~_ULL(0)) - (_ULL(1) << (l)) + 1) & \
- (~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h))))
+#define __GENMASK_ULL(h, l) (((~_ULL(0)) << (l)) & (~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h))))
+
+#define __GENMASK_U128(h, l) \
+ ((_BIT128((h)) << 1) - (_BIT128(l)))
#endif /* _LINUX_BITS_H */
diff --git a/linux-headers/linux/const.h b/linux-headers/linux/const.h
index 1eb84b5..95ede23 100644
--- a/linux-headers/linux/const.h
+++ b/linux-headers/linux/const.h
@@ -28,6 +28,23 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
+#if !defined(__ASSEMBLY__)
+/*
+ * Missing __asm__ support
+ *
+ * __BIT128() would not work in the __asm__ code, as it shifts an
+ * 'unsigned __int128' data type as direct representation of
+ * 128 bit constants is not supported in the gcc compiler, as
+ * they get silently truncated.
+ *
+ * TODO: Please revisit this implementation when gcc compiler
+ * starts representing 128 bit constants directly like long
+ * and unsigned long etc. Subsequently drop the comment for
+ * GENMASK_U128() which would then start supporting __asm__ code.
+ */
+#define _BIT128(x) ((unsigned __int128)(1) << (x))
+#endif
+
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
diff --git a/linux-headers/linux/iommufd.h b/linux-headers/linux/iommufd.h
index 72e8f4b..cb0f7d6 100644
--- a/linux-headers/linux/iommufd.h
+++ b/linux-headers/linux/iommufd.h
@@ -4,8 +4,8 @@
#ifndef _IOMMUFD_H
#define _IOMMUFD_H
-#include <linux/types.h>
#include <linux/ioctl.h>
+#include <linux/types.h>
#define IOMMUFD_TYPE (';')
@@ -37,19 +37,25 @@
enum {
IOMMUFD_CMD_BASE = 0x80,
IOMMUFD_CMD_DESTROY = IOMMUFD_CMD_BASE,
- IOMMUFD_CMD_IOAS_ALLOC,
- IOMMUFD_CMD_IOAS_ALLOW_IOVAS,
- IOMMUFD_CMD_IOAS_COPY,
- IOMMUFD_CMD_IOAS_IOVA_RANGES,
- IOMMUFD_CMD_IOAS_MAP,
- IOMMUFD_CMD_IOAS_UNMAP,
- IOMMUFD_CMD_OPTION,
- IOMMUFD_CMD_VFIO_IOAS,
- IOMMUFD_CMD_HWPT_ALLOC,
- IOMMUFD_CMD_GET_HW_INFO,
- IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING,
- IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP,
- IOMMUFD_CMD_HWPT_INVALIDATE,
+ IOMMUFD_CMD_IOAS_ALLOC = 0x81,
+ IOMMUFD_CMD_IOAS_ALLOW_IOVAS = 0x82,
+ IOMMUFD_CMD_IOAS_COPY = 0x83,
+ IOMMUFD_CMD_IOAS_IOVA_RANGES = 0x84,
+ IOMMUFD_CMD_IOAS_MAP = 0x85,
+ IOMMUFD_CMD_IOAS_UNMAP = 0x86,
+ IOMMUFD_CMD_OPTION = 0x87,
+ IOMMUFD_CMD_VFIO_IOAS = 0x88,
+ IOMMUFD_CMD_HWPT_ALLOC = 0x89,
+ IOMMUFD_CMD_GET_HW_INFO = 0x8a,
+ IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING = 0x8b,
+ IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP = 0x8c,
+ IOMMUFD_CMD_HWPT_INVALIDATE = 0x8d,
+ IOMMUFD_CMD_FAULT_QUEUE_ALLOC = 0x8e,
+ IOMMUFD_CMD_IOAS_MAP_FILE = 0x8f,
+ IOMMUFD_CMD_VIOMMU_ALLOC = 0x90,
+ IOMMUFD_CMD_VDEVICE_ALLOC = 0x91,
+ IOMMUFD_CMD_IOAS_CHANGE_PROCESS = 0x92,
+ IOMMUFD_CMD_VEVENTQ_ALLOC = 0x93,
};
/**
@@ -213,6 +219,30 @@ struct iommu_ioas_map {
#define IOMMU_IOAS_MAP _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_MAP)
/**
+ * struct iommu_ioas_map_file - ioctl(IOMMU_IOAS_MAP_FILE)
+ * @size: sizeof(struct iommu_ioas_map_file)
+ * @flags: same as for iommu_ioas_map
+ * @ioas_id: same as for iommu_ioas_map
+ * @fd: the memfd to map
+ * @start: byte offset from start of file to map from
+ * @length: same as for iommu_ioas_map
+ * @iova: same as for iommu_ioas_map
+ *
+ * Set an IOVA mapping from a memfd file. All other arguments and semantics
+ * match those of IOMMU_IOAS_MAP.
+ */
+struct iommu_ioas_map_file {
+ __u32 size;
+ __u32 flags;
+ __u32 ioas_id;
+ __s32 fd;
+ __aligned_u64 start;
+ __aligned_u64 length;
+ __aligned_u64 iova;
+};
+#define IOMMU_IOAS_MAP_FILE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_MAP_FILE)
+
+/**
* struct iommu_ioas_copy - ioctl(IOMMU_IOAS_COPY)
* @size: sizeof(struct iommu_ioas_copy)
* @flags: Combination of enum iommufd_ioas_map_flags
@@ -268,7 +298,7 @@ struct iommu_ioas_unmap {
* ioctl(IOMMU_OPTION_HUGE_PAGES)
* @IOMMU_OPTION_RLIMIT_MODE:
* Change how RLIMIT_MEMLOCK accounting works. The caller must have privilege
- * to invoke this. Value 0 (default) is user based accouting, 1 uses process
+ * to invoke this. Value 0 (default) is user based accounting, 1 uses process
* based accounting. Global option, object_id must be 0
* @IOMMU_OPTION_HUGE_PAGES:
* Value 1 (default) allows contiguous pages to be combined when generating
@@ -356,10 +386,24 @@ struct iommu_vfio_ioas {
* the parent HWPT in a nesting configuration.
* @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is
* enforced on device attachment
+ * @IOMMU_HWPT_FAULT_ID_VALID: The fault_id field of hwpt allocation data is
+ * valid.
+ * @IOMMU_HWPT_ALLOC_PASID: Requests a domain that can be used with PASID. The
+ * domain can be attached to any PASID on the device.
+ * Any domain attached to the non-PASID part of the
+ * device must also be flagged, otherwise attaching a
+ * PASID will blocked.
+ * For the user that wants to attach PASID, ioas is
+ * not recommended for both the non-PASID part
+ * and PASID part of the device.
+ * If IOMMU does not support PASID it will return
+ * error (-EOPNOTSUPP).
*/
enum iommufd_hwpt_alloc_flags {
IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0,
IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1,
+ IOMMU_HWPT_FAULT_ID_VALID = 1 << 2,
+ IOMMU_HWPT_ALLOC_PASID = 1 << 3,
};
/**
@@ -391,13 +435,35 @@ struct iommu_hwpt_vtd_s1 {
};
/**
+ * struct iommu_hwpt_arm_smmuv3 - ARM SMMUv3 nested STE
+ * (IOMMU_HWPT_DATA_ARM_SMMUV3)
+ *
+ * @ste: The first two double words of the user space Stream Table Entry for
+ * the translation. Must be little-endian.
+ * Allowed fields: (Refer to "5.2 Stream Table Entry" in SMMUv3 HW Spec)
+ * - word-0: V, Cfg, S1Fmt, S1ContextPtr, S1CDMax
+ * - word-1: EATS, S1DSS, S1CIR, S1COR, S1CSH, S1STALLD
+ *
+ * -EIO will be returned if @ste is not legal or contains any non-allowed field.
+ * Cfg can be used to select a S1, Bypass or Abort configuration. A Bypass
+ * nested domain will translate the same as the nesting parent. The S1 will
+ * install a Context Descriptor Table pointing at userspace memory translated
+ * by the nesting parent.
+ */
+struct iommu_hwpt_arm_smmuv3 {
+ __aligned_le64 ste[2];
+};
+
+/**
* enum iommu_hwpt_data_type - IOMMU HWPT Data Type
* @IOMMU_HWPT_DATA_NONE: no data
* @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table
+ * @IOMMU_HWPT_DATA_ARM_SMMUV3: ARM SMMUv3 Context Descriptor Table
*/
enum iommu_hwpt_data_type {
- IOMMU_HWPT_DATA_NONE,
- IOMMU_HWPT_DATA_VTD_S1,
+ IOMMU_HWPT_DATA_NONE = 0,
+ IOMMU_HWPT_DATA_VTD_S1 = 1,
+ IOMMU_HWPT_DATA_ARM_SMMUV3 = 2,
};
/**
@@ -405,12 +471,15 @@ enum iommu_hwpt_data_type {
* @size: sizeof(struct iommu_hwpt_alloc)
* @flags: Combination of enum iommufd_hwpt_alloc_flags
* @dev_id: The device to allocate this HWPT for
- * @pt_id: The IOAS or HWPT to connect this HWPT to
+ * @pt_id: The IOAS or HWPT or vIOMMU to connect this HWPT to
* @out_hwpt_id: The ID of the new HWPT
* @__reserved: Must be 0
* @data_type: One of enum iommu_hwpt_data_type
* @data_len: Length of the type specific data
* @data_uptr: User pointer to the type specific data
+ * @fault_id: The ID of IOMMUFD_FAULT object. Valid only if flags field of
+ * IOMMU_HWPT_FAULT_ID_VALID is set.
+ * @__reserved2: Padding to 64-bit alignment. Must be 0.
*
* Explicitly allocate a hardware page table object. This is the same object
* type that is returned by iommufd_device_attach() and represents the
@@ -421,11 +490,13 @@ enum iommu_hwpt_data_type {
* IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a
* nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags.
*
- * A user-managed nested HWPT will be created from a given parent HWPT via
- * @pt_id, in which the parent HWPT must be allocated previously via the
- * same ioctl from a given IOAS (@pt_id). In this case, the @data_type
- * must be set to a pre-defined type corresponding to an I/O page table
- * type supported by the underlying IOMMU hardware.
+ * A user-managed nested HWPT will be created from a given vIOMMU (wrapping a
+ * parent HWPT) or a parent HWPT via @pt_id, in which the parent HWPT must be
+ * allocated previously via the same ioctl from a given IOAS (@pt_id). In this
+ * case, the @data_type must be set to a pre-defined type corresponding to an
+ * I/O page table type supported by the underlying IOMMU hardware. The device
+ * via @dev_id and the vIOMMU via @pt_id must be associated to the same IOMMU
+ * instance.
*
* If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and
* @data_uptr should be zero. Otherwise, both @data_len and @data_uptr
@@ -441,6 +512,8 @@ struct iommu_hwpt_alloc {
__u32 data_type;
__u32 data_len;
__aligned_u64 data_uptr;
+ __u32 fault_id;
+ __u32 __reserved2;
};
#define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC)
@@ -476,14 +549,58 @@ struct iommu_hw_info_vtd {
};
/**
+ * struct iommu_hw_info_arm_smmuv3 - ARM SMMUv3 hardware information
+ * (IOMMU_HW_INFO_TYPE_ARM_SMMUV3)
+ *
+ * @flags: Must be set to 0
+ * @__reserved: Must be 0
+ * @idr: Implemented features for ARM SMMU Non-secure programming interface
+ * @iidr: Information about the implementation and implementer of ARM SMMU,
+ * and architecture version supported
+ * @aidr: ARM SMMU architecture version
+ *
+ * For the details of @idr, @iidr and @aidr, please refer to the chapters
+ * from 6.3.1 to 6.3.6 in the SMMUv3 Spec.
+ *
+ * This reports the raw HW capability, and not all bits are meaningful to be
+ * read by userspace. Only the following fields should be used:
+ *
+ * idr[0]: ST_LEVEL, TERM_MODEL, STALL_MODEL, TTENDIAN , CD2L, ASID16, TTF
+ * idr[1]: SIDSIZE, SSIDSIZE
+ * idr[3]: BBML, RIL
+ * idr[5]: VAX, GRAN64K, GRAN16K, GRAN4K
+ *
+ * - S1P should be assumed to be true if a NESTED HWPT can be created
+ * - VFIO/iommufd only support platforms with COHACC, it should be assumed to be
+ * true.
+ * - ATS is a per-device property. If the VMM describes any devices as ATS
+ * capable in ACPI/DT it should set the corresponding idr.
+ *
+ * This list may expand in future (eg E0PD, AIE, PBHA, D128, DS etc). It is
+ * important that VMMs do not read bits outside the list to allow for
+ * compatibility with future kernels. Several features in the SMMUv3
+ * architecture are not currently supported by the kernel for nesting: HTTU,
+ * BTM, MPAM and others.
+ */
+struct iommu_hw_info_arm_smmuv3 {
+ __u32 flags;
+ __u32 __reserved;
+ __u32 idr[6];
+ __u32 iidr;
+ __u32 aidr;
+};
+
+/**
* enum iommu_hw_info_type - IOMMU Hardware Info Types
* @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware
* info
* @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type
+ * @IOMMU_HW_INFO_TYPE_ARM_SMMUV3: ARM SMMUv3 iommu info type
*/
enum iommu_hw_info_type {
- IOMMU_HW_INFO_TYPE_NONE,
- IOMMU_HW_INFO_TYPE_INTEL_VTD,
+ IOMMU_HW_INFO_TYPE_NONE = 0,
+ IOMMU_HW_INFO_TYPE_INTEL_VTD = 1,
+ IOMMU_HW_INFO_TYPE_ARM_SMMUV3 = 2,
};
/**
@@ -495,9 +612,17 @@ enum iommu_hw_info_type {
* IOMMU_HWPT_GET_DIRTY_BITMAP
* IOMMU_HWPT_SET_DIRTY_TRACKING
*
+ * @IOMMU_HW_CAP_PCI_PASID_EXEC: Execute Permission Supported, user ignores it
+ * when the struct
+ * iommu_hw_info::out_max_pasid_log2 is zero.
+ * @IOMMU_HW_CAP_PCI_PASID_PRIV: Privileged Mode Supported, user ignores it
+ * when the struct
+ * iommu_hw_info::out_max_pasid_log2 is zero.
*/
enum iommufd_hw_capabilities {
IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0,
+ IOMMU_HW_CAP_PCI_PASID_EXEC = 1 << 1,
+ IOMMU_HW_CAP_PCI_PASID_PRIV = 1 << 2,
};
/**
@@ -513,6 +638,9 @@ enum iommufd_hw_capabilities {
* iommu_hw_info_type.
* @out_capabilities: Output the generic iommu capability info type as defined
* in the enum iommu_hw_capabilities.
+ * @out_max_pasid_log2: Output the width of PASIDs. 0 means no PASID support.
+ * PCI devices turn to out_capabilities to check if the
+ * specific capabilities is supported or not.
* @__reserved: Must be 0
*
* Query an iommu type specific hardware information data from an iommu behind
@@ -536,7 +664,8 @@ struct iommu_hw_info {
__u32 data_len;
__aligned_u64 data_uptr;
__u32 out_data_type;
- __u32 __reserved;
+ __u8 out_max_pasid_log2;
+ __u8 __reserved[3];
__aligned_u64 out_capabilities;
};
#define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO)
@@ -618,9 +747,11 @@ struct iommu_hwpt_get_dirty_bitmap {
* enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation
* Data Type
* @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1
+ * @IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3: Invalidation data for ARM SMMUv3
*/
enum iommu_hwpt_invalidate_data_type {
- IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
+ IOMMU_HWPT_INVALIDATE_DATA_VTD_S1 = 0,
+ IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3 = 1,
};
/**
@@ -660,9 +791,31 @@ struct iommu_hwpt_vtd_s1_invalidate {
};
/**
+ * struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cache invalidation
+ * (IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3)
+ * @cmd: 128-bit cache invalidation command that runs in SMMU CMDQ.
+ * Must be little-endian.
+ *
+ * Supported command list only when passing in a vIOMMU via @hwpt_id:
+ * CMDQ_OP_TLBI_NSNH_ALL
+ * CMDQ_OP_TLBI_NH_VA
+ * CMDQ_OP_TLBI_NH_VAA
+ * CMDQ_OP_TLBI_NH_ALL
+ * CMDQ_OP_TLBI_NH_ASID
+ * CMDQ_OP_ATC_INV
+ * CMDQ_OP_CFGI_CD
+ * CMDQ_OP_CFGI_CD_ALL
+ *
+ * -EIO will be returned if the command is not supported.
+ */
+struct iommu_viommu_arm_smmuv3_invalidate {
+ __aligned_le64 cmd[2];
+};
+
+/**
* struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE)
* @size: sizeof(struct iommu_hwpt_invalidate)
- * @hwpt_id: ID of a nested HWPT for cache invalidation
+ * @hwpt_id: ID of a nested HWPT or a vIOMMU, for cache invalidation
* @data_uptr: User pointer to an array of driver-specific cache invalidation
* data.
* @data_type: One of enum iommu_hwpt_invalidate_data_type, defining the data
@@ -673,8 +826,11 @@ struct iommu_hwpt_vtd_s1_invalidate {
* Output the number of requests successfully handled by kernel.
* @__reserved: Must be 0.
*
- * Invalidate the iommu cache for user-managed page table. Modifications on a
- * user-managed page table should be followed by this operation to sync cache.
+ * Invalidate iommu cache for user-managed page table or vIOMMU. Modifications
+ * on a user-managed page table should be followed by this operation, if a HWPT
+ * is passed in via @hwpt_id. Other caches, such as device cache or descriptor
+ * cache can be flushed if a vIOMMU is passed in via the @hwpt_id field.
+ *
* Each ioctl can support one or more cache invalidation requests in the array
* that has a total size of @entry_len * @entry_num.
*
@@ -692,4 +848,297 @@ struct iommu_hwpt_invalidate {
__u32 __reserved;
};
#define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE)
+
+/**
+ * enum iommu_hwpt_pgfault_flags - flags for struct iommu_hwpt_pgfault
+ * @IOMMU_PGFAULT_FLAGS_PASID_VALID: The pasid field of the fault data is
+ * valid.
+ * @IOMMU_PGFAULT_FLAGS_LAST_PAGE: It's the last fault of a fault group.
+ */
+enum iommu_hwpt_pgfault_flags {
+ IOMMU_PGFAULT_FLAGS_PASID_VALID = (1 << 0),
+ IOMMU_PGFAULT_FLAGS_LAST_PAGE = (1 << 1),
+};
+
+/**
+ * enum iommu_hwpt_pgfault_perm - perm bits for struct iommu_hwpt_pgfault
+ * @IOMMU_PGFAULT_PERM_READ: request for read permission
+ * @IOMMU_PGFAULT_PERM_WRITE: request for write permission
+ * @IOMMU_PGFAULT_PERM_EXEC: (PCIE 10.4.1) request with a PASID that has the
+ * Execute Requested bit set in PASID TLP Prefix.
+ * @IOMMU_PGFAULT_PERM_PRIV: (PCIE 10.4.1) request with a PASID that has the
+ * Privileged Mode Requested bit set in PASID TLP
+ * Prefix.
+ */
+enum iommu_hwpt_pgfault_perm {
+ IOMMU_PGFAULT_PERM_READ = (1 << 0),
+ IOMMU_PGFAULT_PERM_WRITE = (1 << 1),
+ IOMMU_PGFAULT_PERM_EXEC = (1 << 2),
+ IOMMU_PGFAULT_PERM_PRIV = (1 << 3),
+};
+
+/**
+ * struct iommu_hwpt_pgfault - iommu page fault data
+ * @flags: Combination of enum iommu_hwpt_pgfault_flags
+ * @dev_id: id of the originated device
+ * @pasid: Process Address Space ID
+ * @grpid: Page Request Group Index
+ * @perm: Combination of enum iommu_hwpt_pgfault_perm
+ * @__reserved: Must be 0.
+ * @addr: Fault address
+ * @length: a hint of how much data the requestor is expecting to fetch. For
+ * example, if the PRI initiator knows it is going to do a 10MB
+ * transfer, it could fill in 10MB and the OS could pre-fault in
+ * 10MB of IOVA. It's default to 0 if there's no such hint.
+ * @cookie: kernel-managed cookie identifying a group of fault messages. The
+ * cookie number encoded in the last page fault of the group should
+ * be echoed back in the response message.
+ */
+struct iommu_hwpt_pgfault {
+ __u32 flags;
+ __u32 dev_id;
+ __u32 pasid;
+ __u32 grpid;
+ __u32 perm;
+ __u32 __reserved;
+ __aligned_u64 addr;
+ __u32 length;
+ __u32 cookie;
+};
+
+/**
+ * enum iommufd_page_response_code - Return status of fault handlers
+ * @IOMMUFD_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
+ * populated, retry the access. This is the
+ * "Success" defined in PCI 10.4.2.1.
+ * @IOMMUFD_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
+ * access. This is the "Invalid Request" in PCI
+ * 10.4.2.1.
+ */
+enum iommufd_page_response_code {
+ IOMMUFD_PAGE_RESP_SUCCESS = 0,
+ IOMMUFD_PAGE_RESP_INVALID = 1,
+};
+
+/**
+ * struct iommu_hwpt_page_response - IOMMU page fault response
+ * @cookie: The kernel-managed cookie reported in the fault message.
+ * @code: One of response code in enum iommufd_page_response_code.
+ */
+struct iommu_hwpt_page_response {
+ __u32 cookie;
+ __u32 code;
+};
+
+/**
+ * struct iommu_fault_alloc - ioctl(IOMMU_FAULT_QUEUE_ALLOC)
+ * @size: sizeof(struct iommu_fault_alloc)
+ * @flags: Must be 0
+ * @out_fault_id: The ID of the new FAULT
+ * @out_fault_fd: The fd of the new FAULT
+ *
+ * Explicitly allocate a fault handling object.
+ */
+struct iommu_fault_alloc {
+ __u32 size;
+ __u32 flags;
+ __u32 out_fault_id;
+ __u32 out_fault_fd;
+};
+#define IOMMU_FAULT_QUEUE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_FAULT_QUEUE_ALLOC)
+
+/**
+ * enum iommu_viommu_type - Virtual IOMMU Type
+ * @IOMMU_VIOMMU_TYPE_DEFAULT: Reserved for future use
+ * @IOMMU_VIOMMU_TYPE_ARM_SMMUV3: ARM SMMUv3 driver specific type
+ */
+enum iommu_viommu_type {
+ IOMMU_VIOMMU_TYPE_DEFAULT = 0,
+ IOMMU_VIOMMU_TYPE_ARM_SMMUV3 = 1,
+};
+
+/**
+ * struct iommu_viommu_alloc - ioctl(IOMMU_VIOMMU_ALLOC)
+ * @size: sizeof(struct iommu_viommu_alloc)
+ * @flags: Must be 0
+ * @type: Type of the virtual IOMMU. Must be defined in enum iommu_viommu_type
+ * @dev_id: The device's physical IOMMU will be used to back the virtual IOMMU
+ * @hwpt_id: ID of a nesting parent HWPT to associate to
+ * @out_viommu_id: Output virtual IOMMU ID for the allocated object
+ *
+ * Allocate a virtual IOMMU object, representing the underlying physical IOMMU's
+ * virtualization support that is a security-isolated slice of the real IOMMU HW
+ * that is unique to a specific VM. Operations global to the IOMMU are connected
+ * to the vIOMMU, such as:
+ * - Security namespace for guest owned ID, e.g. guest-controlled cache tags
+ * - Non-device-affiliated event reporting, e.g. invalidation queue errors
+ * - Access to a sharable nesting parent pagetable across physical IOMMUs
+ * - Virtualization of various platforms IDs, e.g. RIDs and others
+ * - Delivery of paravirtualized invalidation
+ * - Direct assigned invalidation queues
+ * - Direct assigned interrupts
+ */
+struct iommu_viommu_alloc {
+ __u32 size;
+ __u32 flags;
+ __u32 type;
+ __u32 dev_id;
+ __u32 hwpt_id;
+ __u32 out_viommu_id;
+};
+#define IOMMU_VIOMMU_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VIOMMU_ALLOC)
+
+/**
+ * struct iommu_vdevice_alloc - ioctl(IOMMU_VDEVICE_ALLOC)
+ * @size: sizeof(struct iommu_vdevice_alloc)
+ * @viommu_id: vIOMMU ID to associate with the virtual device
+ * @dev_id: The physical device to allocate a virtual instance on the vIOMMU
+ * @out_vdevice_id: Object handle for the vDevice. Pass to IOMMU_DESTORY
+ * @virt_id: Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID
+ * of AMD IOMMU, and vRID of a nested Intel VT-d to a Context Table
+ *
+ * Allocate a virtual device instance (for a physical device) against a vIOMMU.
+ * This instance holds the device's information (related to its vIOMMU) in a VM.
+ */
+struct iommu_vdevice_alloc {
+ __u32 size;
+ __u32 viommu_id;
+ __u32 dev_id;
+ __u32 out_vdevice_id;
+ __aligned_u64 virt_id;
+};
+#define IOMMU_VDEVICE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VDEVICE_ALLOC)
+
+/**
+ * struct iommu_ioas_change_process - ioctl(VFIO_IOAS_CHANGE_PROCESS)
+ * @size: sizeof(struct iommu_ioas_change_process)
+ * @__reserved: Must be 0
+ *
+ * This transfers pinned memory counts for every memory map in every IOAS
+ * in the context to the current process. This only supports maps created
+ * with IOMMU_IOAS_MAP_FILE, and returns EINVAL if other maps are present.
+ * If the ioctl returns a failure status, then nothing is changed.
+ *
+ * This API is useful for transferring operation of a device from one process
+ * to another, such as during userland live update.
+ */
+struct iommu_ioas_change_process {
+ __u32 size;
+ __u32 __reserved;
+};
+
+#define IOMMU_IOAS_CHANGE_PROCESS \
+ _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_CHANGE_PROCESS)
+
+/**
+ * enum iommu_veventq_flag - flag for struct iommufd_vevent_header
+ * @IOMMU_VEVENTQ_FLAG_LOST_EVENTS: vEVENTQ has lost vEVENTs
+ */
+enum iommu_veventq_flag {
+ IOMMU_VEVENTQ_FLAG_LOST_EVENTS = (1U << 0),
+};
+
+/**
+ * struct iommufd_vevent_header - Virtual Event Header for a vEVENTQ Status
+ * @flags: Combination of enum iommu_veventq_flag
+ * @sequence: The sequence index of a vEVENT in the vEVENTQ, with a range of
+ * [0, INT_MAX] where the following index of INT_MAX is 0
+ *
+ * Each iommufd_vevent_header reports a sequence index of the following vEVENT:
+ *
+ * +----------------------+-------+----------------------+-------+---+-------+
+ * | header0 {sequence=0} | data0 | header1 {sequence=1} | data1 |...| dataN |
+ * +----------------------+-------+----------------------+-------+---+-------+
+ *
+ * And this sequence index is expected to be monotonic to the sequence index of
+ * the previous vEVENT. If two adjacent sequence indexes has a delta larger than
+ * 1, it means that delta - 1 number of vEVENTs has lost, e.g. two lost vEVENTs:
+ *
+ * +-----+----------------------+-------+----------------------+-------+-----+
+ * | ... | header3 {sequence=3} | data3 | header6 {sequence=6} | data6 | ... |
+ * +-----+----------------------+-------+----------------------+-------+-----+
+ *
+ * If a vEVENT lost at the tail of the vEVENTQ and there is no following vEVENT
+ * providing the next sequence index, an IOMMU_VEVENTQ_FLAG_LOST_EVENTS header
+ * would be added to the tail, and no data would follow this header:
+ *
+ * +--+----------------------+-------+-----------------------------------------+
+ * |..| header3 {sequence=3} | data3 | header4 {flags=LOST_EVENTS, sequence=4} |
+ * +--+----------------------+-------+-----------------------------------------+
+ */
+struct iommufd_vevent_header {
+ __u32 flags;
+ __u32 sequence;
+};
+
+/**
+ * enum iommu_veventq_type - Virtual Event Queue Type
+ * @IOMMU_VEVENTQ_TYPE_DEFAULT: Reserved for future use
+ * @IOMMU_VEVENTQ_TYPE_ARM_SMMUV3: ARM SMMUv3 Virtual Event Queue
+ */
+enum iommu_veventq_type {
+ IOMMU_VEVENTQ_TYPE_DEFAULT = 0,
+ IOMMU_VEVENTQ_TYPE_ARM_SMMUV3 = 1,
+};
+
+/**
+ * struct iommu_vevent_arm_smmuv3 - ARM SMMUv3 Virtual Event
+ * (IOMMU_VEVENTQ_TYPE_ARM_SMMUV3)
+ * @evt: 256-bit ARM SMMUv3 Event record, little-endian.
+ * Reported event records: (Refer to "7.3 Event records" in SMMUv3 HW Spec)
+ * - 0x04 C_BAD_STE
+ * - 0x06 F_STREAM_DISABLED
+ * - 0x08 C_BAD_SUBSTREAMID
+ * - 0x0a C_BAD_CD
+ * - 0x10 F_TRANSLATION
+ * - 0x11 F_ADDR_SIZE
+ * - 0x12 F_ACCESS
+ * - 0x13 F_PERMISSION
+ *
+ * StreamID field reports a virtual device ID. To receive a virtual event for a
+ * device, a vDEVICE must be allocated via IOMMU_VDEVICE_ALLOC.
+ */
+struct iommu_vevent_arm_smmuv3 {
+ __aligned_le64 evt[4];
+};
+
+/**
+ * struct iommu_veventq_alloc - ioctl(IOMMU_VEVENTQ_ALLOC)
+ * @size: sizeof(struct iommu_veventq_alloc)
+ * @flags: Must be 0
+ * @viommu_id: virtual IOMMU ID to associate the vEVENTQ with
+ * @type: Type of the vEVENTQ. Must be defined in enum iommu_veventq_type
+ * @veventq_depth: Maximum number of events in the vEVENTQ
+ * @out_veventq_id: The ID of the new vEVENTQ
+ * @out_veventq_fd: The fd of the new vEVENTQ. User space must close the
+ * successfully returned fd after using it
+ * @__reserved: Must be 0
+ *
+ * Explicitly allocate a virtual event queue interface for a vIOMMU. A vIOMMU
+ * can have multiple FDs for different types, but is confined to one per @type.
+ * User space should open the @out_veventq_fd to read vEVENTs out of a vEVENTQ,
+ * if there are vEVENTs available. A vEVENTQ will lose events due to overflow,
+ * if the number of the vEVENTs hits @veventq_depth.
+ *
+ * Each vEVENT in a vEVENTQ encloses a struct iommufd_vevent_header followed by
+ * a type-specific data structure, in a normal case:
+ *
+ * +-+---------+-------+---------+-------+-----+---------+-------+-+
+ * | | header0 | data0 | header1 | data1 | ... | headerN | dataN | |
+ * +-+---------+-------+---------+-------+-----+---------+-------+-+
+ *
+ * unless a tailing IOMMU_VEVENTQ_FLAG_LOST_EVENTS header is logged (refer to
+ * struct iommufd_vevent_header).
+ */
+struct iommu_veventq_alloc {
+ __u32 size;
+ __u32 flags;
+ __u32 viommu_id;
+ __u32 type;
+ __u32 veventq_depth;
+ __u32 out_veventq_id;
+ __u32 out_veventq_fd;
+ __u32 __reserved;
+};
+#define IOMMU_VEVENTQ_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VEVENTQ_ALLOC)
#endif
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
index c93876c..0690743 100644
--- a/linux-headers/linux/kvm.h
+++ b/linux-headers/linux/kvm.h
@@ -178,6 +178,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_NOTIFY 37
#define KVM_EXIT_LOONGARCH_IOCSR 38
#define KVM_EXIT_MEMORY_FAULT 39
+#define KVM_EXIT_TDX 40
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -192,11 +193,20 @@ struct kvm_xen_exit {
/* Flags that describe what fields in emulation_failure hold valid data. */
#define KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES (1ULL << 0)
+/*
+ * struct kvm_run can be modified by userspace at any time, so KVM must be
+ * careful to avoid TOCTOU bugs. In order to protect KVM, HINT_UNSAFE_IN_KVM()
+ * renames fields in struct kvm_run from <symbol> to <symbol>__unsafe when
+ * compiled into the kernel, ensuring that any use within KVM is obvious and
+ * gets extra scrutiny.
+ */
+#define HINT_UNSAFE_IN_KVM(_symbol) _symbol
+
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
/* in */
__u8 request_interrupt_window;
- __u8 immediate_exit;
+ __u8 HINT_UNSAFE_IN_KVM(immediate_exit);
__u8 padding1[6];
/* out */
@@ -360,6 +370,7 @@ struct kvm_run {
#define KVM_SYSTEM_EVENT_WAKEUP 4
#define KVM_SYSTEM_EVENT_SUSPEND 5
#define KVM_SYSTEM_EVENT_SEV_TERM 6
+#define KVM_SYSTEM_EVENT_TDX_FATAL 7
__u32 type;
__u32 ndata;
union {
@@ -429,6 +440,27 @@ struct kvm_run {
__u64 gpa;
__u64 size;
} memory_fault;
+ /* KVM_EXIT_TDX */
+ struct {
+ __u64 flags;
+ __u64 nr;
+ union {
+ struct {
+ __u64 ret;
+ __u64 data[5];
+ } unknown;
+ struct {
+ __u64 ret;
+ __u64 gpa;
+ __u64 size;
+ } get_quote;
+ struct {
+ __u64 ret;
+ __u64 leaf;
+ __u64 r11, r12, r13, r14;
+ } get_tdvmcall_info;
+ };
+ } tdx;
/* Fix the size of the union. */
char padding[256];
};
@@ -600,10 +632,6 @@ struct kvm_ioeventfd {
#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
-#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
- KVM_X86_DISABLE_EXITS_HLT | \
- KVM_X86_DISABLE_EXITS_PAUSE | \
- KVM_X86_DISABLE_EXITS_CSTATE)
/* for KVM_ENABLE_CAP */
struct kvm_enable_cap {
@@ -913,6 +941,13 @@ struct kvm_enable_cap {
#define KVM_CAP_MEMORY_ATTRIBUTES 233
#define KVM_CAP_GUEST_MEMFD 234
#define KVM_CAP_VM_TYPES 235
+#define KVM_CAP_PRE_FAULT_MEMORY 236
+#define KVM_CAP_X86_APIC_BUS_CYCLES_NS 237
+#define KVM_CAP_X86_GUEST_MODE 238
+#define KVM_CAP_ARM_WRITABLE_IMP_ID_REGS 239
+#define KVM_CAP_ARM_EL2 240
+#define KVM_CAP_ARM_EL2_E2H0 241
+#define KVM_CAP_RISCV_MP_STATE_RESET 242
struct kvm_irq_routing_irqchip {
__u32 irqchip;
@@ -1050,6 +1085,10 @@ struct kvm_dirty_tlb {
#define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
+
+#define KVM_REG_SIZE(id) \
+ (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+
#define KVM_REG_SIZE_U8 0x0000000000000000ULL
#define KVM_REG_SIZE_U16 0x0010000000000000ULL
#define KVM_REG_SIZE_U32 0x0020000000000000ULL
@@ -1138,7 +1177,15 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME
KVM_DEV_TYPE_RISCV_AIA,
#define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA
+ KVM_DEV_TYPE_LOONGARCH_IPI,
+#define KVM_DEV_TYPE_LOONGARCH_IPI KVM_DEV_TYPE_LOONGARCH_IPI
+ KVM_DEV_TYPE_LOONGARCH_EIOINTC,
+#define KVM_DEV_TYPE_LOONGARCH_EIOINTC KVM_DEV_TYPE_LOONGARCH_EIOINTC
+ KVM_DEV_TYPE_LOONGARCH_PCHPIC,
+#define KVM_DEV_TYPE_LOONGARCH_PCHPIC KVM_DEV_TYPE_LOONGARCH_PCHPIC
+
KVM_DEV_TYPE_MAX,
+
};
struct kvm_vfio_spapr_tce {
@@ -1544,4 +1591,13 @@ struct kvm_create_guest_memfd {
__u64 reserved[6];
};
+#define KVM_PRE_FAULT_MEMORY _IOWR(KVMIO, 0xd5, struct kvm_pre_fault_memory)
+
+struct kvm_pre_fault_memory {
+ __u64 gpa;
+ __u64 size;
+ __u64 flags;
+ __u64 padding[5];
+};
+
#endif /* __LINUX_KVM_H */
diff --git a/linux-headers/linux/mman.h b/linux-headers/linux/mman.h
index 4e8cb60..2b83059 100644
--- a/linux-headers/linux/mman.h
+++ b/linux-headers/linux/mman.h
@@ -17,6 +17,7 @@
#define MAP_SHARED 0x01 /* Share changes */
#define MAP_PRIVATE 0x02 /* Changes are private */
#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
+#define MAP_DROPPABLE 0x08 /* Zero memory under memory pressure. */
/*
* Huge page size encoding when MAP_HUGETLB is specified, and a huge page
diff --git a/linux-headers/linux/psci.h b/linux-headers/linux/psci.h
index 74f3cb5..a982afd 100644
--- a/linux-headers/linux/psci.h
+++ b/linux-headers/linux/psci.h
@@ -59,6 +59,7 @@
#define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18)
#define PSCI_1_1_FN_MEM_PROTECT PSCI_0_2_FN(19)
#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN(20)
+#define PSCI_1_3_FN_SYSTEM_OFF2 PSCI_0_2_FN(21)
#define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND PSCI_0_2_FN64(12)
#define PSCI_1_0_FN64_NODE_HW_STATE PSCI_0_2_FN64(13)
@@ -68,6 +69,7 @@
#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18)
#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN64(20)
+#define PSCI_1_3_FN64_SYSTEM_OFF2 PSCI_0_2_FN64(21)
/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
@@ -100,6 +102,9 @@
#define PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET 0
#define PSCI_1_1_RESET_TYPE_VENDOR_START 0x80000000U
+/* PSCI v1.3 hibernate type for SYSTEM_OFF2 */
+#define PSCI_1_3_OFF_TYPE_HIBERNATE_OFF BIT(0)
+
/* PSCI version decoding (independent of PSCI version) */
#define PSCI_VERSION_MAJOR_SHIFT 16
#define PSCI_VERSION_MINOR_MASK \
diff --git a/linux-headers/linux/psp-sev.h b/linux-headers/linux/psp-sev.h
index c3046c6..113c4ce 100644
--- a/linux-headers/linux/psp-sev.h
+++ b/linux-headers/linux/psp-sev.h
@@ -31,6 +31,7 @@ enum {
SNP_PLATFORM_STATUS,
SNP_COMMIT,
SNP_SET_CONFIG,
+ SNP_VLEK_LOAD,
SEV_MAX,
};
@@ -50,6 +51,7 @@ typedef enum {
SEV_RET_INVALID_PLATFORM_STATE,
SEV_RET_INVALID_GUEST_STATE,
SEV_RET_INAVLID_CONFIG,
+ SEV_RET_INVALID_CONFIG = SEV_RET_INAVLID_CONFIG,
SEV_RET_INVALID_LEN,
SEV_RET_ALREADY_OWNED,
SEV_RET_INVALID_CERTIFICATE,
@@ -71,13 +73,20 @@ typedef enum {
SEV_RET_INVALID_PARAM,
SEV_RET_RESOURCE_LIMIT,
SEV_RET_SECURE_DATA_INVALID,
- SEV_RET_INVALID_KEY = 0x27,
- SEV_RET_INVALID_PAGE_SIZE,
- SEV_RET_INVALID_PAGE_STATE,
- SEV_RET_INVALID_MDATA_ENTRY,
- SEV_RET_INVALID_PAGE_OWNER,
- SEV_RET_INVALID_PAGE_AEAD_OFLOW,
- SEV_RET_RMP_INIT_REQUIRED,
+ SEV_RET_INVALID_PAGE_SIZE = 0x0019,
+ SEV_RET_INVALID_PAGE_STATE = 0x001A,
+ SEV_RET_INVALID_MDATA_ENTRY = 0x001B,
+ SEV_RET_INVALID_PAGE_OWNER = 0x001C,
+ SEV_RET_AEAD_OFLOW = 0x001D,
+ SEV_RET_EXIT_RING_BUFFER = 0x001F,
+ SEV_RET_RMP_INIT_REQUIRED = 0x0020,
+ SEV_RET_BAD_SVN = 0x0021,
+ SEV_RET_BAD_VERSION = 0x0022,
+ SEV_RET_SHUTDOWN_REQUIRED = 0x0023,
+ SEV_RET_UPDATE_FAILED = 0x0024,
+ SEV_RET_RESTORE_REQUIRED = 0x0025,
+ SEV_RET_RMP_INITIALIZATION_FAILED = 0x0026,
+ SEV_RET_INVALID_KEY = 0x0027,
SEV_RET_MAX,
} sev_ret_code;
@@ -215,6 +224,32 @@ struct sev_user_data_snp_config {
} __attribute__((packed));
/**
+ * struct sev_data_snp_vlek_load - SNP_VLEK_LOAD structure
+ *
+ * @len: length of the command buffer read by the PSP
+ * @vlek_wrapped_version: version of wrapped VLEK hashstick (Must be 0h)
+ * @rsvd: reserved
+ * @vlek_wrapped_address: address of a wrapped VLEK hashstick
+ * (struct sev_user_data_snp_wrapped_vlek_hashstick)
+ */
+struct sev_user_data_snp_vlek_load {
+ __u32 len; /* In */
+ __u8 vlek_wrapped_version; /* In */
+ __u8 rsvd[3]; /* In */
+ __u64 vlek_wrapped_address; /* In */
+} __attribute__((packed));
+
+/**
+ * struct sev_user_data_snp_vlek_wrapped_vlek_hashstick - Wrapped VLEK data
+ *
+ * @data: Opaque data provided by AMD KDS (as described in SEV-SNP Firmware ABI
+ * 1.54, SNP_VLEK_LOAD)
+ */
+struct sev_user_data_snp_wrapped_vlek_hashstick {
+ __u8 data[432]; /* In */
+} __attribute__((packed));
+
+/**
* struct sev_issue_cmd - SEV ioctl parameters
*
* @cmd: SEV commands to execute
diff --git a/linux-headers/linux/stddef.h b/linux-headers/linux/stddef.h
index 96aa341..e1fcfcf 100644
--- a/linux-headers/linux/stddef.h
+++ b/linux-headers/linux/stddef.h
@@ -8,6 +8,13 @@
#define __always_inline __inline__
#endif
+/* Not all C++ standards support type declarations inside an anonymous union */
+#ifndef __cplusplus
+#define __struct_group_tag(TAG) TAG
+#else
+#define __struct_group_tag(TAG)
+#endif
+
/**
* __struct_group() - Create a mirrored named and anonyomous struct
*
@@ -20,13 +27,13 @@
* and size: one anonymous and one named. The former's members can be used
* normally without sub-struct naming, and the latter can be used to
* reason about the start, end, and size of the group of struct members.
- * The named struct can also be explicitly tagged for layer reuse, as well
- * as both having struct attributes appended.
+ * The named struct can also be explicitly tagged for layer reuse (C only),
+ * as well as both having struct attributes appended.
*/
#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
union { \
struct { MEMBERS } ATTRS; \
- struct TAG { MEMBERS } ATTRS NAME; \
+ struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \
} ATTRS
#ifdef __cplusplus
@@ -63,4 +70,6 @@
#define __counted_by_be(m)
#endif
+#define __kernel_nonstring
+
#endif /* _LINUX_STDDEF_H */
diff --git a/linux-headers/linux/vduse.h b/linux-headers/linux/vduse.h
index 6d2ca06..f46269a 100644
--- a/linux-headers/linux/vduse.h
+++ b/linux-headers/linux/vduse.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
#ifndef _VDUSE_H_
#define _VDUSE_H_
diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h
index b4be37b..79bf8c0 100644
--- a/linux-headers/linux/vfio.h
+++ b/linux-headers/linux/vfio.h
@@ -35,7 +35,7 @@
#define VFIO_EEH 5
/* Two-stage IOMMU */
-#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
+#define __VFIO_RESERVED_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
#define VFIO_SPAPR_TCE_v2_IOMMU 7
@@ -671,6 +671,7 @@ enum {
*/
enum {
VFIO_AP_REQ_IRQ_INDEX,
+ VFIO_AP_CFG_CHG_IRQ_INDEX,
VFIO_AP_NUM_IRQS
};
@@ -931,29 +932,34 @@ struct vfio_device_bind_iommufd {
* VFIO_DEVICE_ATTACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 19,
* struct vfio_device_attach_iommufd_pt)
* @argsz: User filled size of this data.
- * @flags: Must be 0.
+ * @flags: Flags for attach.
* @pt_id: Input the target id which can represent an ioas or a hwpt
* allocated via iommufd subsystem.
* Output the input ioas id or the attached hwpt id which could
* be the specified hwpt itself or a hwpt automatically created
* for the specified ioas by kernel during the attachment.
+ * @pasid: The pasid to be attached, only meaningful when
+ * VFIO_DEVICE_ATTACH_PASID is set in @flags
*
* Associate the device with an address space within the bound iommufd.
* Undo by VFIO_DEVICE_DETACH_IOMMUFD_PT or device fd close. This is only
* allowed on cdev fds.
*
- * If a vfio device is currently attached to a valid hw_pagetable, without doing
- * a VFIO_DEVICE_DETACH_IOMMUFD_PT, a second VFIO_DEVICE_ATTACH_IOMMUFD_PT ioctl
- * passing in another hw_pagetable (hwpt) id is allowed. This action, also known
- * as a hw_pagetable replacement, will replace the device's currently attached
- * hw_pagetable with a new hw_pagetable corresponding to the given pt_id.
+ * If a vfio device or a pasid of this device is currently attached to a valid
+ * hw_pagetable (hwpt), without doing a VFIO_DEVICE_DETACH_IOMMUFD_PT, a second
+ * VFIO_DEVICE_ATTACH_IOMMUFD_PT ioctl passing in another hwpt id is allowed.
+ * This action, also known as a hw_pagetable replacement, will replace the
+ * currently attached hwpt of the device or the pasid of this device with a new
+ * hwpt corresponding to the given pt_id.
*
* Return: 0 on success, -errno on failure.
*/
struct vfio_device_attach_iommufd_pt {
__u32 argsz;
__u32 flags;
+#define VFIO_DEVICE_ATTACH_PASID (1 << 0)
__u32 pt_id;
+ __u32 pasid;
};
#define VFIO_DEVICE_ATTACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 19)
@@ -962,17 +968,21 @@ struct vfio_device_attach_iommufd_pt {
* VFIO_DEVICE_DETACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 20,
* struct vfio_device_detach_iommufd_pt)
* @argsz: User filled size of this data.
- * @flags: Must be 0.
+ * @flags: Flags for detach.
+ * @pasid: The pasid to be detached, only meaningful when
+ * VFIO_DEVICE_DETACH_PASID is set in @flags
*
- * Remove the association of the device and its current associated address
- * space. After it, the device should be in a blocking DMA state. This is only
- * allowed on cdev fds.
+ * Remove the association of the device or a pasid of the device and its current
+ * associated address space. After it, the device or the pasid should be in a
+ * blocking DMA state. This is only allowed on cdev fds.
*
* Return: 0 on success, -errno on failure.
*/
struct vfio_device_detach_iommufd_pt {
__u32 argsz;
__u32 flags;
+#define VFIO_DEVICE_DETACH_PASID (1 << 0)
+ __u32 pasid;
};
#define VFIO_DEVICE_DETACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 20)
diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h
index b95dd84..d4b3e2a 100644
--- a/linux-headers/linux/vhost.h
+++ b/linux-headers/linux/vhost.h
@@ -28,10 +28,10 @@
/* Set current process as the (exclusive) owner of this file descriptor. This
* must be called before any other vhost command. Further calls to
- * VHOST_OWNER_SET fail until VHOST_OWNER_RESET is called. */
+ * VHOST_SET_OWNER fail until VHOST_RESET_OWNER is called. */
#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
/* Give up ownership, and reset the device to default values.
- * Allows subsequent call to VHOST_OWNER_SET to succeed. */
+ * Allows subsequent call to VHOST_SET_OWNER to succeed. */
#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
/* Set up/modify memory layout */
diff --git a/linux-user/aarch64/Makefile.vdso b/linux-user/aarch64/Makefile.vdso
index 5999581..c33a679 100644
--- a/linux-user/aarch64/Makefile.vdso
+++ b/linux-user/aarch64/Makefile.vdso
@@ -5,8 +5,9 @@ VPATH += $(SUBDIR)
all: $(SUBDIR)/vdso-be.so $(SUBDIR)/vdso-le.so
-LDFLAGS = -nostdlib -shared -Wl,-h,linux-vdso.so.1 -Wl,--build-id=sha1 \
- -Wl,--hash-style=both -Wl,-T,$(SUBDIR)/vdso.ld
+LDFLAGS = -nostdlib -shared -Wl,-h,linux-vdso.so.1 \
+ -Wl,--build-id=sha1 -Wl,--hash-style=both \
+ -Wl,-z,max-page-size=4096 -Wl,-T,$(SUBDIR)/vdso.ld
$(SUBDIR)/vdso-be.so: vdso.S vdso.ld
$(CC) -o $@ $(LDFLAGS) -mbig-endian $<
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
index 71cdc8b..fea43ce 100644
--- a/linux-user/aarch64/cpu_loop.c
+++ b/linux-user/aarch64/cpu_loop.c
@@ -20,61 +20,13 @@
#include "qemu/osdep.h"
#include "qemu.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
#include "qemu/guest-random.h"
#include "semihosting/common-semi.h"
#include "target/arm/syndrome.h"
#include "target/arm/cpu-features.h"
-#define get_user_code_u32(x, gaddr, env) \
- ({ abi_long __r = get_user_u32((x), (gaddr)); \
- if (!__r && bswap_code(arm_sctlr_b(env))) { \
- (x) = bswap32(x); \
- } \
- __r; \
- })
-
-#define get_user_code_u16(x, gaddr, env) \
- ({ abi_long __r = get_user_u16((x), (gaddr)); \
- if (!__r && bswap_code(arm_sctlr_b(env))) { \
- (x) = bswap16(x); \
- } \
- __r; \
- })
-
-#define get_user_data_u32(x, gaddr, env) \
- ({ abi_long __r = get_user_u32((x), (gaddr)); \
- if (!__r && arm_cpu_bswap_data(env)) { \
- (x) = bswap32(x); \
- } \
- __r; \
- })
-
-#define get_user_data_u16(x, gaddr, env) \
- ({ abi_long __r = get_user_u16((x), (gaddr)); \
- if (!__r && arm_cpu_bswap_data(env)) { \
- (x) = bswap16(x); \
- } \
- __r; \
- })
-
-#define put_user_data_u32(x, gaddr, env) \
- ({ typeof(x) __x = (x); \
- if (arm_cpu_bswap_data(env)) { \
- __x = bswap32(__x); \
- } \
- put_user_u32(__x, (gaddr)); \
- })
-
-#define put_user_data_u16(x, gaddr, env) \
- ({ typeof(x) __x = (x); \
- if (arm_cpu_bswap_data(env)) { \
- __x = bswap16(__x); \
- } \
- put_user_u16(__x, (gaddr)); \
- })
-
/* AArch64 main loop */
void cpu_loop(CPUARMState *env)
{
@@ -185,7 +137,7 @@ void cpu_loop(CPUARMState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = env_cpu(env);
diff --git a/linux-user/aarch64/meson.build b/linux-user/aarch64/meson.build
index f75bb3c..f25a67a 100644
--- a/linux-user/aarch64/meson.build
+++ b/linux-user/aarch64/meson.build
@@ -11,3 +11,9 @@ vdso_le_inc = gen_vdso.process('vdso-le.so',
linux_user_ss.add(when: 'TARGET_AARCH64', if_true: [vdso_be_inc, vdso_le_inc])
linux_user_ss.add(when: 'TARGET_AARCH64', if_true: [files('mte_user_helper.c')])
+
+syscall_nr_generators += {
+ 'aarch64': generator(sh,
+ arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ],
+ output: '@BASENAME@_nr.h')
+}
diff --git a/linux-user/aarch64/mte_user_helper.h b/linux-user/aarch64/mte_user_helper.h
index 8685e51..0c53abd 100644
--- a/linux-user/aarch64/mte_user_helper.h
+++ b/linux-user/aarch64/mte_user_helper.h
@@ -9,6 +9,8 @@
#ifndef AARCH64_MTE_USER_HELPER_H
#define AARCH64_MTE USER_HELPER_H
+#include "user/abitypes.h"
+
#ifndef PR_MTE_TCF_SHIFT
# define PR_MTE_TCF_SHIFT 1
# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
diff --git a/linux-user/aarch64/syscall_64.tbl b/linux-user/aarch64/syscall_64.tbl
new file mode 100644
index 0000000..845e24e
--- /dev/null
+++ b/linux-user/aarch64/syscall_64.tbl
@@ -0,0 +1,405 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# This file contains the system call numbers for all of the
+# more recently added architectures.
+#
+# As a basic principle, no duplication of functionality
+# should be added, e.g. we don't use lseek when llseek
+# is present. New architectures should use this file
+# and implement the less feature-full calls in user space.
+#
+0 common io_setup sys_io_setup compat_sys_io_setup
+1 common io_destroy sys_io_destroy
+2 common io_submit sys_io_submit compat_sys_io_submit
+3 common io_cancel sys_io_cancel
+4 time32 io_getevents sys_io_getevents_time32
+4 64 io_getevents sys_io_getevents
+5 common setxattr sys_setxattr
+6 common lsetxattr sys_lsetxattr
+7 common fsetxattr sys_fsetxattr
+8 common getxattr sys_getxattr
+9 common lgetxattr sys_lgetxattr
+10 common fgetxattr sys_fgetxattr
+11 common listxattr sys_listxattr
+12 common llistxattr sys_llistxattr
+13 common flistxattr sys_flistxattr
+14 common removexattr sys_removexattr
+15 common lremovexattr sys_lremovexattr
+16 common fremovexattr sys_fremovexattr
+17 common getcwd sys_getcwd
+18 common lookup_dcookie sys_ni_syscall
+19 common eventfd2 sys_eventfd2
+20 common epoll_create1 sys_epoll_create1
+21 common epoll_ctl sys_epoll_ctl
+22 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+23 common dup sys_dup
+24 common dup3 sys_dup3
+25 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+25 64 fcntl sys_fcntl
+26 common inotify_init1 sys_inotify_init1
+27 common inotify_add_watch sys_inotify_add_watch
+28 common inotify_rm_watch sys_inotify_rm_watch
+29 common ioctl sys_ioctl compat_sys_ioctl
+30 common ioprio_set sys_ioprio_set
+31 common ioprio_get sys_ioprio_get
+32 common flock sys_flock
+33 common mknodat sys_mknodat
+34 common mkdirat sys_mkdirat
+35 common unlinkat sys_unlinkat
+36 common symlinkat sys_symlinkat
+37 common linkat sys_linkat
+# renameat is superseded with flags by renameat2
+38 renameat renameat sys_renameat
+39 common umount2 sys_umount
+40 common mount sys_mount
+41 common pivot_root sys_pivot_root
+42 common nfsservctl sys_ni_syscall
+43 32 statfs64 sys_statfs64 compat_sys_statfs64
+43 64 statfs sys_statfs
+44 32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+44 64 fstatfs sys_fstatfs
+45 32 truncate64 sys_truncate64 compat_sys_truncate64
+45 64 truncate sys_truncate
+46 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+46 64 ftruncate sys_ftruncate
+47 common fallocate sys_fallocate compat_sys_fallocate
+48 common faccessat sys_faccessat
+49 common chdir sys_chdir
+50 common fchdir sys_fchdir
+51 common chroot sys_chroot
+52 common fchmod sys_fchmod
+53 common fchmodat sys_fchmodat
+54 common fchownat sys_fchownat
+55 common fchown sys_fchown
+56 common openat sys_openat
+57 common close sys_close
+58 common vhangup sys_vhangup
+59 common pipe2 sys_pipe2
+60 common quotactl sys_quotactl
+61 common getdents64 sys_getdents64
+62 32 llseek sys_llseek
+62 64 lseek sys_lseek
+63 common read sys_read
+64 common write sys_write
+65 common readv sys_readv sys_readv
+66 common writev sys_writev sys_writev
+67 common pread64 sys_pread64 compat_sys_pread64
+68 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+69 common preadv sys_preadv compat_sys_preadv
+70 common pwritev sys_pwritev compat_sys_pwritev
+71 32 sendfile64 sys_sendfile64
+71 64 sendfile sys_sendfile64
+72 time32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+72 64 pselect6 sys_pselect6
+73 time32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+73 64 ppoll sys_ppoll
+74 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+75 common vmsplice sys_vmsplice
+76 common splice sys_splice
+77 common tee sys_tee
+78 common readlinkat sys_readlinkat
+79 stat64 fstatat64 sys_fstatat64
+79 64 newfstatat sys_newfstatat
+80 stat64 fstat64 sys_fstat64
+80 64 fstat sys_newfstat
+81 common sync sys_sync
+82 common fsync sys_fsync
+83 common fdatasync sys_fdatasync
+84 common sync_file_range sys_sync_file_range compat_sys_sync_file_range
+85 common timerfd_create sys_timerfd_create
+86 time32 timerfd_settime sys_timerfd_settime32
+86 64 timerfd_settime sys_timerfd_settime
+87 time32 timerfd_gettime sys_timerfd_gettime32
+87 64 timerfd_gettime sys_timerfd_gettime
+88 time32 utimensat sys_utimensat_time32
+88 64 utimensat sys_utimensat
+89 common acct sys_acct
+90 common capget sys_capget
+91 common capset sys_capset
+92 common personality sys_personality
+93 common exit sys_exit
+94 common exit_group sys_exit_group
+95 common waitid sys_waitid compat_sys_waitid
+96 common set_tid_address sys_set_tid_address
+97 common unshare sys_unshare
+98 time32 futex sys_futex_time32
+98 64 futex sys_futex
+99 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+100 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+101 time32 nanosleep sys_nanosleep_time32
+101 64 nanosleep sys_nanosleep
+102 common getitimer sys_getitimer compat_sys_getitimer
+103 common setitimer sys_setitimer compat_sys_setitimer
+104 common kexec_load sys_kexec_load compat_sys_kexec_load
+105 common init_module sys_init_module
+106 common delete_module sys_delete_module
+107 common timer_create sys_timer_create compat_sys_timer_create
+108 time32 timer_gettime sys_timer_gettime32
+108 64 timer_gettime sys_timer_gettime
+109 common timer_getoverrun sys_timer_getoverrun
+110 time32 timer_settime sys_timer_settime32
+110 64 timer_settime sys_timer_settime
+111 common timer_delete sys_timer_delete
+112 time32 clock_settime sys_clock_settime32
+112 64 clock_settime sys_clock_settime
+113 time32 clock_gettime sys_clock_gettime32
+113 64 clock_gettime sys_clock_gettime
+114 time32 clock_getres sys_clock_getres_time32
+114 64 clock_getres sys_clock_getres
+115 time32 clock_nanosleep sys_clock_nanosleep_time32
+115 64 clock_nanosleep sys_clock_nanosleep
+116 common syslog sys_syslog
+117 common ptrace sys_ptrace compat_sys_ptrace
+118 common sched_setparam sys_sched_setparam
+119 common sched_setscheduler sys_sched_setscheduler
+120 common sched_getscheduler sys_sched_getscheduler
+121 common sched_getparam sys_sched_getparam
+122 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+123 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+124 common sched_yield sys_sched_yield
+125 common sched_get_priority_max sys_sched_get_priority_max
+126 common sched_get_priority_min sys_sched_get_priority_min
+127 time32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+127 64 sched_rr_get_interval sys_sched_rr_get_interval
+128 common restart_syscall sys_restart_syscall
+129 common kill sys_kill
+130 common tkill sys_tkill
+131 common tgkill sys_tgkill
+132 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+133 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+134 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+135 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+136 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+137 time32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+137 64 rt_sigtimedwait sys_rt_sigtimedwait
+138 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+139 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
+140 common setpriority sys_setpriority
+141 common getpriority sys_getpriority
+142 common reboot sys_reboot
+143 common setregid sys_setregid
+144 common setgid sys_setgid
+145 common setreuid sys_setreuid
+146 common setuid sys_setuid
+147 common setresuid sys_setresuid
+148 common getresuid sys_getresuid
+149 common setresgid sys_setresgid
+150 common getresgid sys_getresgid
+151 common setfsuid sys_setfsuid
+152 common setfsgid sys_setfsgid
+153 common times sys_times compat_sys_times
+154 common setpgid sys_setpgid
+155 common getpgid sys_getpgid
+156 common getsid sys_getsid
+157 common setsid sys_setsid
+158 common getgroups sys_getgroups
+159 common setgroups sys_setgroups
+160 common uname sys_newuname
+161 common sethostname sys_sethostname
+162 common setdomainname sys_setdomainname
+# getrlimit and setrlimit are superseded with prlimit64
+163 rlimit getrlimit sys_getrlimit compat_sys_getrlimit
+164 rlimit setrlimit sys_setrlimit compat_sys_setrlimit
+165 common getrusage sys_getrusage compat_sys_getrusage
+166 common umask sys_umask
+167 common prctl sys_prctl
+168 common getcpu sys_getcpu
+169 time32 gettimeofday sys_gettimeofday compat_sys_gettimeofday
+169 64 gettimeofday sys_gettimeofday
+170 time32 settimeofday sys_settimeofday compat_sys_settimeofday
+170 64 settimeofday sys_settimeofday
+171 time32 adjtimex sys_adjtimex_time32
+171 64 adjtimex sys_adjtimex
+172 common getpid sys_getpid
+173 common getppid sys_getppid
+174 common getuid sys_getuid
+175 common geteuid sys_geteuid
+176 common getgid sys_getgid
+177 common getegid sys_getegid
+178 common gettid sys_gettid
+179 common sysinfo sys_sysinfo compat_sys_sysinfo
+180 common mq_open sys_mq_open compat_sys_mq_open
+181 common mq_unlink sys_mq_unlink
+182 time32 mq_timedsend sys_mq_timedsend_time32
+182 64 mq_timedsend sys_mq_timedsend
+183 time32 mq_timedreceive sys_mq_timedreceive_time32
+183 64 mq_timedreceive sys_mq_timedreceive
+184 common mq_notify sys_mq_notify compat_sys_mq_notify
+185 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+186 common msgget sys_msgget
+187 common msgctl sys_msgctl compat_sys_msgctl
+188 common msgrcv sys_msgrcv compat_sys_msgrcv
+189 common msgsnd sys_msgsnd compat_sys_msgsnd
+190 common semget sys_semget
+191 common semctl sys_semctl compat_sys_semctl
+192 time32 semtimedop sys_semtimedop_time32
+192 64 semtimedop sys_semtimedop
+193 common semop sys_semop
+194 common shmget sys_shmget
+195 common shmctl sys_shmctl compat_sys_shmctl
+196 common shmat sys_shmat compat_sys_shmat
+197 common shmdt sys_shmdt
+198 common socket sys_socket
+199 common socketpair sys_socketpair
+200 common bind sys_bind
+201 common listen sys_listen
+202 common accept sys_accept
+203 common connect sys_connect
+204 common getsockname sys_getsockname
+205 common getpeername sys_getpeername
+206 common sendto sys_sendto
+207 common recvfrom sys_recvfrom compat_sys_recvfrom
+208 common setsockopt sys_setsockopt sys_setsockopt
+209 common getsockopt sys_getsockopt sys_getsockopt
+210 common shutdown sys_shutdown
+211 common sendmsg sys_sendmsg compat_sys_sendmsg
+212 common recvmsg sys_recvmsg compat_sys_recvmsg
+213 common readahead sys_readahead compat_sys_readahead
+214 common brk sys_brk
+215 common munmap sys_munmap
+216 common mremap sys_mremap
+217 common add_key sys_add_key
+218 common request_key sys_request_key
+219 common keyctl sys_keyctl compat_sys_keyctl
+220 common clone sys_clone
+221 common execve sys_execve compat_sys_execve
+222 32 mmap2 sys_mmap2
+222 64 mmap sys_mmap
+223 32 fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64
+223 64 fadvise64 sys_fadvise64_64
+224 common swapon sys_swapon
+225 common swapoff sys_swapoff
+226 common mprotect sys_mprotect
+227 common msync sys_msync
+228 common mlock sys_mlock
+229 common munlock sys_munlock
+230 common mlockall sys_mlockall
+231 common munlockall sys_munlockall
+232 common mincore sys_mincore
+233 common madvise sys_madvise
+234 common remap_file_pages sys_remap_file_pages
+235 common mbind sys_mbind
+236 common get_mempolicy sys_get_mempolicy
+237 common set_mempolicy sys_set_mempolicy
+238 common migrate_pages sys_migrate_pages
+239 common move_pages sys_move_pages
+240 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+241 common perf_event_open sys_perf_event_open
+242 common accept4 sys_accept4
+243 time32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+243 64 recvmmsg sys_recvmmsg
+# Architectures may provide up to 16 syscalls of their own between 244 and 259
+244 arc cacheflush sys_cacheflush
+245 arc arc_settls sys_arc_settls
+246 arc arc_gettls sys_arc_gettls
+247 arc sysfs sys_sysfs
+248 arc arc_usr_cmpxchg sys_arc_usr_cmpxchg
+
+244 csky set_thread_area sys_set_thread_area
+245 csky cacheflush sys_cacheflush
+
+244 nios2 cacheflush sys_cacheflush
+
+244 or1k or1k_atomic sys_or1k_atomic
+
+258 riscv riscv_hwprobe sys_riscv_hwprobe
+259 riscv riscv_flush_icache sys_riscv_flush_icache
+
+260 time32 wait4 sys_wait4 compat_sys_wait4
+260 64 wait4 sys_wait4
+261 common prlimit64 sys_prlimit64
+262 common fanotify_init sys_fanotify_init
+263 common fanotify_mark sys_fanotify_mark
+264 common name_to_handle_at sys_name_to_handle_at
+265 common open_by_handle_at sys_open_by_handle_at
+266 time32 clock_adjtime sys_clock_adjtime32
+266 64 clock_adjtime sys_clock_adjtime
+267 common syncfs sys_syncfs
+268 common setns sys_setns
+269 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+270 common process_vm_readv sys_process_vm_readv
+271 common process_vm_writev sys_process_vm_writev
+272 common kcmp sys_kcmp
+273 common finit_module sys_finit_module
+274 common sched_setattr sys_sched_setattr
+275 common sched_getattr sys_sched_getattr
+276 common renameat2 sys_renameat2
+277 common seccomp sys_seccomp
+278 common getrandom sys_getrandom
+279 common memfd_create sys_memfd_create
+280 common bpf sys_bpf
+281 common execveat sys_execveat compat_sys_execveat
+282 common userfaultfd sys_userfaultfd
+283 common membarrier sys_membarrier
+284 common mlock2 sys_mlock2
+285 common copy_file_range sys_copy_file_range
+286 common preadv2 sys_preadv2 compat_sys_preadv2
+287 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+288 common pkey_mprotect sys_pkey_mprotect
+289 common pkey_alloc sys_pkey_alloc
+290 common pkey_free sys_pkey_free
+291 common statx sys_statx
+292 time32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+292 64 io_pgetevents sys_io_pgetevents
+293 common rseq sys_rseq
+294 common kexec_file_load sys_kexec_file_load
+# 295 through 402 are unassigned to sync up with generic numbers don't use
+403 32 clock_gettime64 sys_clock_gettime
+404 32 clock_settime64 sys_clock_settime
+405 32 clock_adjtime64 sys_clock_adjtime
+406 32 clock_getres_time64 sys_clock_getres
+407 32 clock_nanosleep_time64 sys_clock_nanosleep
+408 32 timer_gettime64 sys_timer_gettime
+409 32 timer_settime64 sys_timer_settime
+410 32 timerfd_gettime64 sys_timerfd_gettime
+411 32 timerfd_settime64 sys_timerfd_settime
+412 32 utimensat_time64 sys_utimensat
+413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 sys_mq_timedsend
+419 32 mq_timedreceive_time64 sys_mq_timedreceive
+420 32 semtimedop_time64 sys_semtimedop
+421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 sys_futex
+423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+447 memfd_secret memfd_secret sys_memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/linux-user/aarch64/syscall_nr.h b/linux-user/aarch64/syscall_nr.h
index 12ef002..760302c 100644
--- a/linux-user/aarch64/syscall_nr.h
+++ b/linux-user/aarch64/syscall_nr.h
@@ -1,313 +1 @@
-/*
- * This file contains the system call numbers.
- * Do not modify.
- * This file is generated by scripts/gensyscalls.sh
- */
-#ifndef LINUX_USER_AARCH64_SYSCALL_NR_H
-#define LINUX_USER_AARCH64_SYSCALL_NR_H
-
-#define TARGET_NR_io_setup 0
-#define TARGET_NR_io_destroy 1
-#define TARGET_NR_io_submit 2
-#define TARGET_NR_io_cancel 3
-#define TARGET_NR_io_getevents 4
-#define TARGET_NR_setxattr 5
-#define TARGET_NR_lsetxattr 6
-#define TARGET_NR_fsetxattr 7
-#define TARGET_NR_getxattr 8
-#define TARGET_NR_lgetxattr 9
-#define TARGET_NR_fgetxattr 10
-#define TARGET_NR_listxattr 11
-#define TARGET_NR_llistxattr 12
-#define TARGET_NR_flistxattr 13
-#define TARGET_NR_removexattr 14
-#define TARGET_NR_lremovexattr 15
-#define TARGET_NR_fremovexattr 16
-#define TARGET_NR_getcwd 17
-#define TARGET_NR_lookup_dcookie 18
-#define TARGET_NR_eventfd2 19
-#define TARGET_NR_epoll_create1 20
-#define TARGET_NR_epoll_ctl 21
-#define TARGET_NR_epoll_pwait 22
-#define TARGET_NR_dup 23
-#define TARGET_NR_dup3 24
-#define TARGET_NR_fcntl 25
-#define TARGET_NR_inotify_init1 26
-#define TARGET_NR_inotify_add_watch 27
-#define TARGET_NR_inotify_rm_watch 28
-#define TARGET_NR_ioctl 29
-#define TARGET_NR_ioprio_set 30
-#define TARGET_NR_ioprio_get 31
-#define TARGET_NR_flock 32
-#define TARGET_NR_mknodat 33
-#define TARGET_NR_mkdirat 34
-#define TARGET_NR_unlinkat 35
-#define TARGET_NR_symlinkat 36
-#define TARGET_NR_linkat 37
-#define TARGET_NR_renameat 38
-#define TARGET_NR_umount2 39
-#define TARGET_NR_mount 40
-#define TARGET_NR_pivot_root 41
-#define TARGET_NR_nfsservctl 42
-#define TARGET_NR_statfs 43
-#define TARGET_NR_fstatfs 44
-#define TARGET_NR_truncate 45
-#define TARGET_NR_ftruncate 46
-#define TARGET_NR_fallocate 47
-#define TARGET_NR_faccessat 48
-#define TARGET_NR_chdir 49
-#define TARGET_NR_fchdir 50
-#define TARGET_NR_chroot 51
-#define TARGET_NR_fchmod 52
-#define TARGET_NR_fchmodat 53
-#define TARGET_NR_fchownat 54
-#define TARGET_NR_fchown 55
-#define TARGET_NR_openat 56
-#define TARGET_NR_close 57
-#define TARGET_NR_vhangup 58
-#define TARGET_NR_pipe2 59
-#define TARGET_NR_quotactl 60
-#define TARGET_NR_getdents64 61
-#define TARGET_NR_lseek 62
-#define TARGET_NR_read 63
-#define TARGET_NR_write 64
-#define TARGET_NR_readv 65
-#define TARGET_NR_writev 66
-#define TARGET_NR_pread64 67
-#define TARGET_NR_pwrite64 68
-#define TARGET_NR_preadv 69
-#define TARGET_NR_pwritev 70
-#define TARGET_NR_sendfile 71
-#define TARGET_NR_pselect6 72
-#define TARGET_NR_ppoll 73
-#define TARGET_NR_signalfd4 74
-#define TARGET_NR_vmsplice 75
-#define TARGET_NR_splice 76
-#define TARGET_NR_tee 77
-#define TARGET_NR_readlinkat 78
-#define TARGET_NR_newfstatat 79
-#define TARGET_NR_fstat 80
-#define TARGET_NR_sync 81
-#define TARGET_NR_fsync 82
-#define TARGET_NR_fdatasync 83
-#define TARGET_NR_sync_file_range 84
-#define TARGET_NR_timerfd_create 85
-#define TARGET_NR_timerfd_settime 86
-#define TARGET_NR_timerfd_gettime 87
-#define TARGET_NR_utimensat 88
-#define TARGET_NR_acct 89
-#define TARGET_NR_capget 90
-#define TARGET_NR_capset 91
-#define TARGET_NR_personality 92
-#define TARGET_NR_exit 93
-#define TARGET_NR_exit_group 94
-#define TARGET_NR_waitid 95
-#define TARGET_NR_set_tid_address 96
-#define TARGET_NR_unshare 97
-#define TARGET_NR_futex 98
-#define TARGET_NR_set_robust_list 99
-#define TARGET_NR_get_robust_list 100
-#define TARGET_NR_nanosleep 101
-#define TARGET_NR_getitimer 102
-#define TARGET_NR_setitimer 103
-#define TARGET_NR_kexec_load 104
-#define TARGET_NR_init_module 105
-#define TARGET_NR_delete_module 106
-#define TARGET_NR_timer_create 107
-#define TARGET_NR_timer_gettime 108
-#define TARGET_NR_timer_getoverrun 109
-#define TARGET_NR_timer_settime 110
-#define TARGET_NR_timer_delete 111
-#define TARGET_NR_clock_settime 112
-#define TARGET_NR_clock_gettime 113
-#define TARGET_NR_clock_getres 114
-#define TARGET_NR_clock_nanosleep 115
-#define TARGET_NR_syslog 116
-#define TARGET_NR_ptrace 117
-#define TARGET_NR_sched_setparam 118
-#define TARGET_NR_sched_setscheduler 119
-#define TARGET_NR_sched_getscheduler 120
-#define TARGET_NR_sched_getparam 121
-#define TARGET_NR_sched_setaffinity 122
-#define TARGET_NR_sched_getaffinity 123
-#define TARGET_NR_sched_yield 124
-#define TARGET_NR_sched_get_priority_max 125
-#define TARGET_NR_sched_get_priority_min 126
-#define TARGET_NR_sched_rr_get_interval 127
-#define TARGET_NR_restart_syscall 128
-#define TARGET_NR_kill 129
-#define TARGET_NR_tkill 130
-#define TARGET_NR_tgkill 131
-#define TARGET_NR_sigaltstack 132
-#define TARGET_NR_rt_sigsuspend 133
-#define TARGET_NR_rt_sigaction 134
-#define TARGET_NR_rt_sigprocmask 135
-#define TARGET_NR_rt_sigpending 136
-#define TARGET_NR_rt_sigtimedwait 137
-#define TARGET_NR_rt_sigqueueinfo 138
-#define TARGET_NR_rt_sigreturn 139
-#define TARGET_NR_setpriority 140
-#define TARGET_NR_getpriority 141
-#define TARGET_NR_reboot 142
-#define TARGET_NR_setregid 143
-#define TARGET_NR_setgid 144
-#define TARGET_NR_setreuid 145
-#define TARGET_NR_setuid 146
-#define TARGET_NR_setresuid 147
-#define TARGET_NR_getresuid 148
-#define TARGET_NR_setresgid 149
-#define TARGET_NR_getresgid 150
-#define TARGET_NR_setfsuid 151
-#define TARGET_NR_setfsgid 152
-#define TARGET_NR_times 153
-#define TARGET_NR_setpgid 154
-#define TARGET_NR_getpgid 155
-#define TARGET_NR_getsid 156
-#define TARGET_NR_setsid 157
-#define TARGET_NR_getgroups 158
-#define TARGET_NR_setgroups 159
-#define TARGET_NR_uname 160
-#define TARGET_NR_sethostname 161
-#define TARGET_NR_setdomainname 162
-#define TARGET_NR_getrlimit 163
-#define TARGET_NR_setrlimit 164
-#define TARGET_NR_getrusage 165
-#define TARGET_NR_umask 166
-#define TARGET_NR_prctl 167
-#define TARGET_NR_getcpu 168
-#define TARGET_NR_gettimeofday 169
-#define TARGET_NR_settimeofday 170
-#define TARGET_NR_adjtimex 171
-#define TARGET_NR_getpid 172
-#define TARGET_NR_getppid 173
-#define TARGET_NR_getuid 174
-#define TARGET_NR_geteuid 175
-#define TARGET_NR_getgid 176
-#define TARGET_NR_getegid 177
-#define TARGET_NR_gettid 178
-#define TARGET_NR_sysinfo 179
-#define TARGET_NR_mq_open 180
-#define TARGET_NR_mq_unlink 181
-#define TARGET_NR_mq_timedsend 182
-#define TARGET_NR_mq_timedreceive 183
-#define TARGET_NR_mq_notify 184
-#define TARGET_NR_mq_getsetattr 185
-#define TARGET_NR_msgget 186
-#define TARGET_NR_msgctl 187
-#define TARGET_NR_msgrcv 188
-#define TARGET_NR_msgsnd 189
-#define TARGET_NR_semget 190
-#define TARGET_NR_semctl 191
-#define TARGET_NR_semtimedop 192
-#define TARGET_NR_semop 193
-#define TARGET_NR_shmget 194
-#define TARGET_NR_shmctl 195
-#define TARGET_NR_shmat 196
-#define TARGET_NR_shmdt 197
-#define TARGET_NR_socket 198
-#define TARGET_NR_socketpair 199
-#define TARGET_NR_bind 200
-#define TARGET_NR_listen 201
-#define TARGET_NR_accept 202
-#define TARGET_NR_connect 203
-#define TARGET_NR_getsockname 204
-#define TARGET_NR_getpeername 205
-#define TARGET_NR_sendto 206
-#define TARGET_NR_recvfrom 207
-#define TARGET_NR_setsockopt 208
-#define TARGET_NR_getsockopt 209
-#define TARGET_NR_shutdown 210
-#define TARGET_NR_sendmsg 211
-#define TARGET_NR_recvmsg 212
-#define TARGET_NR_readahead 213
-#define TARGET_NR_brk 214
-#define TARGET_NR_munmap 215
-#define TARGET_NR_mremap 216
-#define TARGET_NR_add_key 217
-#define TARGET_NR_request_key 218
-#define TARGET_NR_keyctl 219
-#define TARGET_NR_clone 220
-#define TARGET_NR_execve 221
-#define TARGET_NR_mmap 222
-#define TARGET_NR_fadvise64 223
-#define TARGET_NR_swapon 224
-#define TARGET_NR_swapoff 225
-#define TARGET_NR_mprotect 226
-#define TARGET_NR_msync 227
-#define TARGET_NR_mlock 228
-#define TARGET_NR_munlock 229
-#define TARGET_NR_mlockall 230
-#define TARGET_NR_munlockall 231
-#define TARGET_NR_mincore 232
-#define TARGET_NR_madvise 233
-#define TARGET_NR_remap_file_pages 234
-#define TARGET_NR_mbind 235
-#define TARGET_NR_get_mempolicy 236
-#define TARGET_NR_set_mempolicy 237
-#define TARGET_NR_migrate_pages 238
-#define TARGET_NR_move_pages 239
-#define TARGET_NR_rt_tgsigqueueinfo 240
-#define TARGET_NR_perf_event_open 241
-#define TARGET_NR_accept4 242
-#define TARGET_NR_recvmmsg 243
-#define TARGET_NR_arch_specific_syscall 244
-#define TARGET_NR_wait4 260
-#define TARGET_NR_prlimit64 261
-#define TARGET_NR_fanotify_init 262
-#define TARGET_NR_fanotify_mark 263
-#define TARGET_NR_name_to_handle_at 264
-#define TARGET_NR_open_by_handle_at 265
-#define TARGET_NR_clock_adjtime 266
-#define TARGET_NR_syncfs 267
-#define TARGET_NR_setns 268
-#define TARGET_NR_sendmmsg 269
-#define TARGET_NR_process_vm_readv 270
-#define TARGET_NR_process_vm_writev 271
-#define TARGET_NR_kcmp 272
-#define TARGET_NR_finit_module 273
-#define TARGET_NR_sched_setattr 274
-#define TARGET_NR_sched_getattr 275
-#define TARGET_NR_renameat2 276
-#define TARGET_NR_seccomp 277
-#define TARGET_NR_getrandom 278
-#define TARGET_NR_memfd_create 279
-#define TARGET_NR_bpf 280
-#define TARGET_NR_execveat 281
-#define TARGET_NR_userfaultfd 282
-#define TARGET_NR_membarrier 283
-#define TARGET_NR_mlock2 284
-#define TARGET_NR_copy_file_range 285
-#define TARGET_NR_preadv2 286
-#define TARGET_NR_pwritev2 287
-#define TARGET_NR_pkey_mprotect 288
-#define TARGET_NR_pkey_alloc 289
-#define TARGET_NR_pkey_free 290
-#define TARGET_NR_statx 291
-#define TARGET_NR_io_pgetevents 292
-#define TARGET_NR_rseq 293
-#define TARGET_NR_kexec_file_load 294
-#define TARGET_NR_pidfd_send_signal 424
-#define TARGET_NR_io_uring_setup 425
-#define TARGET_NR_io_uring_enter 426
-#define TARGET_NR_io_uring_register 427
-#define TARGET_NR_open_tree 428
-#define TARGET_NR_move_mount 429
-#define TARGET_NR_fsopen 430
-#define TARGET_NR_fsconfig 431
-#define TARGET_NR_fsmount 432
-#define TARGET_NR_fspick 433
-#define TARGET_NR_pidfd_open 434
-#define TARGET_NR_clone3 435
-#define TARGET_NR_close_range 436
-#define TARGET_NR_openat2 437
-#define TARGET_NR_pidfd_getfd 438
-#define TARGET_NR_faccessat2 439
-#define TARGET_NR_process_madvise 440
-#define TARGET_NR_epoll_pwait2 441
-#define TARGET_NR_mount_setattr 442
-#define TARGET_NR_landlock_create_ruleset 444
-#define TARGET_NR_landlock_add_rule 445
-#define TARGET_NR_landlock_restrict_self 446
-#define TARGET_NR_syscalls 447
-
-#endif /* LINUX_USER_AARCH64_SYSCALL_NR_H */
+#include "syscall_64_nr.h"
diff --git a/linux-user/aarch64/syscallhdr.sh b/linux-user/aarch64/syscallhdr.sh
new file mode 100644
index 0000000..dd6b586
--- /dev/null
+++ b/linux-user/aarch64/syscallhdr.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=LINUX_USER_AARCH64_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ echo "#ifndef ${fileguard}"
+ echo "#define ${fileguard} 1"
+ echo ""
+
+ while read nr abi name entry compat; do
+ if [ -z "$offset" ]; then
+ echo "#define TARGET_NR_${prefix}${name} $nr"
+ else
+ echo "#define TARGET_NR_${prefix}${name} ($offset + $nr)"
+ fi
+ done
+
+ echo ""
+ echo "#endif /* ${fileguard} */"
+) > "$out"
diff --git a/linux-user/aarch64/target_signal.h b/linux-user/aarch64/target_signal.h
index 40e399d..6f66a50 100644
--- a/linux-user/aarch64/target_signal.h
+++ b/linux-user/aarch64/target_signal.h
@@ -3,6 +3,8 @@
#include "../generic/signal.h"
+#define TARGET_SA_RESTORER 0x04000000
+
#define TARGET_SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
#define TARGET_SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
diff --git a/linux-user/aarch64/vdso-be.so b/linux-user/aarch64/vdso-be.so
index 808206a..d43c3b1 100755
--- a/linux-user/aarch64/vdso-be.so
+++ b/linux-user/aarch64/vdso-be.so
Binary files differ
diff --git a/linux-user/aarch64/vdso-le.so b/linux-user/aarch64/vdso-le.so
index 941aaf2..aaedc9d 100755
--- a/linux-user/aarch64/vdso-le.so
+++ b/linux-user/aarch64/vdso-le.so
Binary files differ
diff --git a/linux-user/alpha/cpu_loop.c b/linux-user/alpha/cpu_loop.c
index 2ea039a..80ad536 100644
--- a/linux-user/alpha/cpu_loop.c
+++ b/linux-user/alpha/cpu_loop.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
void cpu_loop(CPUAlphaState *env)
@@ -173,7 +173,7 @@ void cpu_loop(CPUAlphaState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
int i;
diff --git a/linux-user/alpha/syscall.tbl b/linux-user/alpha/syscall.tbl
index 3000a2e..54ee7aa 100644
--- a/linux-user/alpha/syscall.tbl
+++ b/linux-user/alpha/syscall.tbl
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
#
# system call numbers and entry vectors for alpha
#
@@ -125,8 +125,8 @@
116 common osf_gettimeofday sys_osf_gettimeofday
117 common osf_getrusage sys_osf_getrusage
118 common getsockopt sys_getsockopt
-120 common readv sys_osf_readv
-121 common writev sys_osf_writev
+120 common readv sys_readv
+121 common writev sys_writev
122 common osf_settimeofday sys_osf_settimeofday
123 common fchown sys_fchown
124 common fchmod sys_fchmod
@@ -230,7 +230,7 @@
259 common osf_swapctl sys_ni_syscall
260 common osf_memcntl sys_ni_syscall
261 common osf_fdatasync sys_ni_syscall
-300 common bdflush sys_bdflush
+300 common bdflush sys_ni_syscall
301 common sethae sys_sethae
302 common mount sys_mount
303 common old_adjtimex sys_old_adjtimex
@@ -334,7 +334,7 @@
401 common io_submit sys_io_submit
402 common io_cancel sys_io_cancel
405 common exit_group sys_exit_group
-406 common lookup_dcookie sys_lookup_dcookie
+406 common lookup_dcookie sys_ni_syscall
407 common epoll_create sys_epoll_create
408 common epoll_ctl sys_epoll_ctl
409 common epoll_wait sys_epoll_wait
@@ -474,7 +474,7 @@
542 common fsmount sys_fsmount
543 common fspick sys_fspick
544 common pidfd_open sys_pidfd_open
-# 545 reserved for clone3
+545 common clone3 alpha_clone3
546 common close_range sys_close_range
547 common openat2 sys_openat2
548 common pidfd_getfd sys_pidfd_getfd
@@ -482,7 +482,23 @@
550 common process_madvise sys_process_madvise
551 common epoll_pwait2 sys_epoll_pwait2
552 common mount_setattr sys_mount_setattr
-# 553 reserved for quotactl_path
+553 common quotactl_fd sys_quotactl_fd
554 common landlock_create_ruleset sys_landlock_create_ruleset
555 common landlock_add_rule sys_landlock_add_rule
556 common landlock_restrict_self sys_landlock_restrict_self
+# 557 reserved for memfd_secret
+558 common process_mrelease sys_process_mrelease
+559 common futex_waitv sys_futex_waitv
+560 common set_mempolicy_home_node sys_ni_syscall
+561 common cachestat sys_cachestat
+562 common fchmodat2 sys_fchmodat2
+563 common map_shadow_stack sys_map_shadow_stack
+564 common futex_wake sys_futex_wake
+565 common futex_wait sys_futex_wait
+566 common futex_requeue sys_futex_requeue
+567 common statmount sys_statmount
+568 common listmount sys_listmount
+569 common lsm_get_self_attr sys_lsm_get_self_attr
+570 common lsm_set_self_attr sys_lsm_set_self_attr
+571 common lsm_list_modules sys_lsm_list_modules
+572 common mseal sys_mseal
diff --git a/linux-user/alpha/syscallhdr.sh b/linux-user/alpha/syscallhdr.sh
index 55cafe6..6da0c95 100644
--- a/linux-user/alpha/syscallhdr.sh
+++ b/linux-user/alpha/syscallhdr.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
in="$1"
out="$2"
diff --git a/linux-user/alpha/target_proc.h b/linux-user/alpha/target_proc.h
index dac37df..da437ee 100644
--- a/linux-user/alpha/target_proc.h
+++ b/linux-user/alpha/target_proc.h
@@ -15,7 +15,7 @@ static int open_cpuinfo(CPUArchState *cpu_env, int fd)
const char *p, *q;
int t;
- p = object_class_get_name(OBJECT_CLASS(CPU_GET_CLASS(env_cpu(cpu_env))));
+ p = object_class_get_name(OBJECT_CLASS(env_cpu(cpu_env)->cc));
q = strchr(p, '-');
t = q - p;
assert(t < sizeof(model));
diff --git a/linux-user/arm/Makefile.vdso b/linux-user/arm/Makefile.vdso
index 2d098a5..ede489e 100644
--- a/linux-user/arm/Makefile.vdso
+++ b/linux-user/arm/Makefile.vdso
@@ -3,15 +3,18 @@ include $(BUILD_DIR)/tests/tcg/arm-linux-user/config-target.mak
SUBDIR = $(SRC_PATH)/linux-user/arm
VPATH += $(SUBDIR)
-all: $(SUBDIR)/vdso-be.so $(SUBDIR)/vdso-le.so
+all: $(SUBDIR)/vdso-be8.so $(SUBDIR)/vdso-be32.so $(SUBDIR)/vdso-le.so
# Adding -use-blx disables unneeded interworking without actually using blx.
-LDFLAGS = -nostdlib -shared -Wl,-use-blx \
+LDFLAGS = -nostdlib -shared -Wl,-use-blx -Wl,-z,max-page-size=4096 \
-Wl,-h,linux-vdso.so.1 -Wl,--build-id=sha1 \
-Wl,--hash-style=both -Wl,-T,$(SUBDIR)/vdso.ld
-$(SUBDIR)/vdso-be.so: vdso.S vdso.ld vdso-asmoffset.h
- $(CC) -o $@ $(LDFLAGS) -mbig-endian $<
+$(SUBDIR)/vdso-be8.so: vdso.S vdso.ld vdso-asmoffset.h
+ $(CC) -o $@ $(LDFLAGS) -mbig-endian -mbe8 $<
+
+$(SUBDIR)/vdso-be32.so: vdso.S vdso.ld vdso-asmoffset.h
+ $(CC) -o $@ $(LDFLAGS) -mbig-endian -mbe32 $<
$(SUBDIR)/vdso-le.so: vdso.S vdso.ld vdso-asmoffset.h
$(CC) -o $@ $(LDFLAGS) -mlittle-endian $<
diff --git a/linux-user/arm/cpu_loop.c b/linux-user/arm/cpu_loop.c
index ec66586..33f6395 100644
--- a/linux-user/arm/cpu_loop.c
+++ b/linux-user/arm/cpu_loop.c
@@ -21,10 +21,12 @@
#include "qemu.h"
#include "user-internals.h"
#include "elf.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
#include "semihosting/common-semi.h"
#include "exec/page-protection.h"
+#include "exec/mmap-lock.h"
+#include "user/page-protection.h"
#include "target/arm/syndrome.h"
#define get_user_code_u32(x, gaddr, env) \
@@ -35,45 +37,10 @@
__r; \
})
-#define get_user_code_u16(x, gaddr, env) \
- ({ abi_long __r = get_user_u16((x), (gaddr)); \
- if (!__r && bswap_code(arm_sctlr_b(env))) { \
- (x) = bswap16(x); \
- } \
- __r; \
- })
-
-#define get_user_data_u32(x, gaddr, env) \
- ({ abi_long __r = get_user_u32((x), (gaddr)); \
- if (!__r && arm_cpu_bswap_data(env)) { \
- (x) = bswap32(x); \
- } \
- __r; \
- })
-
-#define get_user_data_u16(x, gaddr, env) \
- ({ abi_long __r = get_user_u16((x), (gaddr)); \
- if (!__r && arm_cpu_bswap_data(env)) { \
- (x) = bswap16(x); \
- } \
- __r; \
- })
-
-#define put_user_data_u32(x, gaddr, env) \
- ({ typeof(x) __x = (x); \
- if (arm_cpu_bswap_data(env)) { \
- __x = bswap32(__x); \
- } \
- put_user_u32(__x, (gaddr)); \
- })
-
-#define put_user_data_u16(x, gaddr, env) \
- ({ typeof(x) __x = (x); \
- if (arm_cpu_bswap_data(env)) { \
- __x = bswap16(__x); \
- } \
- put_user_u16(__x, (gaddr)); \
- })
+/*
+ * Note that if we need to do data accesses here, they should do a
+ * bswap if arm_cpu_bswap_data() returns true.
+ */
/*
* Similar to code in accel/tcg/user-exec.c, but outside the execution loop.
@@ -396,6 +363,7 @@ void cpu_loop(CPUARMState *env)
switch (n) {
case ARM_NR_cacheflush:
/* nop */
+ env->regs[0] = 0;
break;
case ARM_NR_set_tls:
cpu_set_tls(env, env->regs[0]);
@@ -512,7 +480,7 @@ void cpu_loop(CPUARMState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
TaskState *ts = get_task_state(cpu);
diff --git a/linux-user/arm/meson.build b/linux-user/arm/meson.build
index c4bb9af..348ffb8 100644
--- a/linux-user/arm/meson.build
+++ b/linux-user/arm/meson.build
@@ -10,10 +10,17 @@ syscall_nr_generators += {
# is always true as far as source_set.apply() is concerned. Always build
# both header files and include the right one via #if.
-vdso_be_inc = gen_vdso.process('vdso-be.so',
- extra_args: ['-s', 'sigreturn_codes'])
+vdso_be8_inc = gen_vdso.process('vdso-be8.so',
+ extra_args: ['-s', 'sigreturn_codes',
+ '-p', 'vdso_be8'])
+
+vdso_be32_inc = gen_vdso.process('vdso-be32.so',
+ extra_args: ['-s', 'sigreturn_codes',
+ '-p', 'vdso_be32'])
vdso_le_inc = gen_vdso.process('vdso-le.so',
extra_args: ['-s', 'sigreturn_codes'])
-linux_user_ss.add(when: 'TARGET_ARM', if_true: [vdso_be_inc, vdso_le_inc])
+linux_user_ss.add(when: 'TARGET_ARM', if_true: [
+ vdso_be8_inc, vdso_be32_inc, vdso_le_inc
+])
diff --git a/linux-user/arm/nwfpe/fpa11.c b/linux-user/arm/nwfpe/fpa11.c
index 9a93610..0f1afbd 100644
--- a/linux-user/arm/nwfpe/fpa11.c
+++ b/linux-user/arm/nwfpe/fpa11.c
@@ -51,6 +51,29 @@ void resetFPA11(void)
#ifdef MAINTAIN_FPCR
fpa11->fpcr = MASK_RESET;
#endif
+
+ /*
+ * Real FPA11 hardware does not handle NaNs, but always takes an
+ * exception for them to be software-emulated (ARM7500FE datasheet
+ * section 10.4). There is no documented architectural requirement
+ * for NaN propagation rules and it will depend on how the OS
+ * level software emulation opted to do it. We here use prop_s_ab
+ * which matches the later VFP hardware choice and how QEMU's
+ * fpa11 emulation has worked in the past. The real Linux kernel
+ * does something slightly different: arch/arm/nwfpe/softfloat-specialize
+ * propagateFloat64NaN() has the curious behaviour that it prefers
+ * the QNaN over the SNaN, but if both are QNaN it picks A and
+ * if both are SNaN it picks B. In theory we could add this as
+ * a NaN propagation rule, but in practice FPA11 emulation is so
+ * close to totally dead that it's not worth trying to match it at
+ * this late date.
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_s_ab, &fpa11->fp_status);
+ /*
+ * Use the same default NaN value as Arm VFP. This doesn't match
+ * the Linux kernel's nwfpe emulation, which uses an all-1s value.
+ */
+ set_float_default_nan_pattern(0b01000000, &fpa11->fp_status);
}
void SetRoundingMode(const unsigned int opcode)
diff --git a/linux-user/arm/syscall.tbl b/linux-user/arm/syscall.tbl
index 28e03b5..23c9820 100644
--- a/linux-user/arm/syscall.tbl
+++ b/linux-user/arm/syscall.tbl
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
#
# Linux system call numbers and entry vectors
#
@@ -147,7 +148,7 @@
131 common quotactl sys_quotactl
132 common getpgid sys_getpgid
133 common fchdir sys_fchdir
-134 common bdflush sys_bdflush
+134 common bdflush sys_ni_syscall
135 common sysfs sys_sysfs
136 common personality sys_personality
# 137 was sys_afs_syscall
@@ -263,10 +264,10 @@
246 common io_submit sys_io_submit
247 common io_cancel sys_io_cancel
248 common exit_group sys_exit_group
-249 common lookup_dcookie sys_lookup_dcookie
+249 common lookup_dcookie sys_ni_syscall
250 common epoll_create sys_epoll_create
251 common epoll_ctl sys_epoll_ctl sys_oabi_epoll_ctl
-252 common epoll_wait sys_epoll_wait sys_oabi_epoll_wait
+252 common epoll_wait sys_epoll_wait
253 common remap_file_pages sys_remap_file_pages
# 254 for set_thread_area
# 255 for get_thread_area
@@ -456,7 +457,23 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/linux-user/arm/syscallhdr.sh b/linux-user/arm/syscallhdr.sh
index 4c952b2..692fd6a 100644
--- a/linux-user/arm/syscallhdr.sh
+++ b/linux-user/arm/syscallhdr.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
in="$1"
out="$2"
diff --git a/linux-user/arm/target_signal.h b/linux-user/arm/target_signal.h
index 0e6351d..ff1810b 100644
--- a/linux-user/arm/target_signal.h
+++ b/linux-user/arm/target_signal.h
@@ -3,6 +3,8 @@
#include "../generic/signal.h"
+#define TARGET_SA_RESTORER 0x04000000
+
#define TARGET_ARCH_HAS_SETUP_FRAME
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
diff --git a/linux-user/arm/vdso-be.so b/linux-user/arm/vdso-be.so
deleted file mode 100755
index 69cafbb..0000000
--- a/linux-user/arm/vdso-be.so
+++ /dev/null
Binary files differ
diff --git a/linux-user/arm/vdso-be32.so b/linux-user/arm/vdso-be32.so
new file mode 100755
index 0000000..b896d3d
--- /dev/null
+++ b/linux-user/arm/vdso-be32.so
Binary files differ
diff --git a/linux-user/arm/vdso-be8.so b/linux-user/arm/vdso-be8.so
new file mode 100755
index 0000000..784b7bd
--- /dev/null
+++ b/linux-user/arm/vdso-be8.so
Binary files differ
diff --git a/linux-user/arm/vdso-le.so b/linux-user/arm/vdso-le.so
index ad05a12..38d3d51 100755
--- a/linux-user/arm/vdso-le.so
+++ b/linux-user/arm/vdso-le.so
Binary files differ
diff --git a/linux-user/cpu_loop-common.h b/linux-user/cpu_loop-common.h
deleted file mode 100644
index e644d2e..0000000
--- a/linux-user/cpu_loop-common.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * qemu user cpu loop
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef CPU_LOOP_COMMON_H
-#define CPU_LOOP_COMMON_H
-
-#include "exec/log.h"
-#include "special-errno.h"
-
-void target_exception_dump(CPUArchState *env, const char *fmt, int code);
-#define EXCP_DUMP(env, fmt, code) \
- target_exception_dump(env, fmt, code)
-
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs);
-#endif
diff --git a/linux-user/cris/cpu_loop.c b/linux-user/cris/cpu_loop.c
deleted file mode 100644
index 04c9086..0000000
--- a/linux-user/cris/cpu_loop.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * qemu user cpu loop
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu.h"
-#include "user-internals.h"
-#include "cpu_loop-common.h"
-#include "signal-common.h"
-
-void cpu_loop(CPUCRISState *env)
-{
- CPUState *cs = env_cpu(env);
- int trapnr, ret;
-
- while (1) {
- cpu_exec_start(cs);
- trapnr = cpu_exec(cs);
- cpu_exec_end(cs);
- process_queued_cpu_work(cs);
-
- switch (trapnr) {
- case EXCP_INTERRUPT:
- /* just indicate that signals should be handled asap */
- break;
- case EXCP_BREAK:
- ret = do_syscall(env,
- env->regs[9],
- env->regs[10],
- env->regs[11],
- env->regs[12],
- env->regs[13],
- env->pregs[7],
- env->pregs[11],
- 0, 0);
- if (ret == -QEMU_ERESTARTSYS) {
- env->pc -= 2;
- } else if (ret != -QEMU_ESIGRETURN) {
- env->regs[10] = ret;
- }
- break;
- case EXCP_DEBUG:
- force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
- break;
- case EXCP_ATOMIC:
- cpu_exec_step_atomic(cs);
- break;
- default:
- fprintf(stderr, "Unhandled trap: 0x%x\n", trapnr);
- cpu_dump_state(cs, stderr, 0);
- exit(EXIT_FAILURE);
- }
- process_pending_signals (env);
- }
-}
-
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
-{
- CPUState *cpu = env_cpu(env);
- TaskState *ts = get_task_state(cpu);
- struct image_info *info = ts->info;
-
- env->regs[0] = regs->r0;
- env->regs[1] = regs->r1;
- env->regs[2] = regs->r2;
- env->regs[3] = regs->r3;
- env->regs[4] = regs->r4;
- env->regs[5] = regs->r5;
- env->regs[6] = regs->r6;
- env->regs[7] = regs->r7;
- env->regs[8] = regs->r8;
- env->regs[9] = regs->r9;
- env->regs[10] = regs->r10;
- env->regs[11] = regs->r11;
- env->regs[12] = regs->r12;
- env->regs[13] = regs->r13;
- env->regs[14] = info->start_stack;
- env->regs[15] = regs->acr;
- env->pc = regs->erp;
-}
diff --git a/linux-user/cris/signal.c b/linux-user/cris/signal.c
deleted file mode 100644
index 10948bc..0000000
--- a/linux-user/cris/signal.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Emulation of Linux signals
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "qemu/osdep.h"
-#include "qemu.h"
-#include "user-internals.h"
-#include "signal-common.h"
-#include "linux-user/trace.h"
-
-struct target_sigcontext {
- struct target_pt_regs regs; /* needs to be first */
- uint32_t oldmask;
- uint32_t usp; /* usp before stacking this gunk on it */
-};
-
-/* Signal frames. */
-struct target_signal_frame {
- struct target_sigcontext sc;
- uint32_t extramask[TARGET_NSIG_WORDS - 1];
- uint16_t retcode[4]; /* Trampoline code. */
-};
-
-static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
-{
- __put_user(env->regs[0], &sc->regs.r0);
- __put_user(env->regs[1], &sc->regs.r1);
- __put_user(env->regs[2], &sc->regs.r2);
- __put_user(env->regs[3], &sc->regs.r3);
- __put_user(env->regs[4], &sc->regs.r4);
- __put_user(env->regs[5], &sc->regs.r5);
- __put_user(env->regs[6], &sc->regs.r6);
- __put_user(env->regs[7], &sc->regs.r7);
- __put_user(env->regs[8], &sc->regs.r8);
- __put_user(env->regs[9], &sc->regs.r9);
- __put_user(env->regs[10], &sc->regs.r10);
- __put_user(env->regs[11], &sc->regs.r11);
- __put_user(env->regs[12], &sc->regs.r12);
- __put_user(env->regs[13], &sc->regs.r13);
- __put_user(env->regs[14], &sc->usp);
- __put_user(env->regs[15], &sc->regs.acr);
- __put_user(env->pregs[PR_MOF], &sc->regs.mof);
- __put_user(env->pregs[PR_SRP], &sc->regs.srp);
- __put_user(env->pc, &sc->regs.erp);
-}
-
-static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
-{
- __get_user(env->regs[0], &sc->regs.r0);
- __get_user(env->regs[1], &sc->regs.r1);
- __get_user(env->regs[2], &sc->regs.r2);
- __get_user(env->regs[3], &sc->regs.r3);
- __get_user(env->regs[4], &sc->regs.r4);
- __get_user(env->regs[5], &sc->regs.r5);
- __get_user(env->regs[6], &sc->regs.r6);
- __get_user(env->regs[7], &sc->regs.r7);
- __get_user(env->regs[8], &sc->regs.r8);
- __get_user(env->regs[9], &sc->regs.r9);
- __get_user(env->regs[10], &sc->regs.r10);
- __get_user(env->regs[11], &sc->regs.r11);
- __get_user(env->regs[12], &sc->regs.r12);
- __get_user(env->regs[13], &sc->regs.r13);
- __get_user(env->regs[14], &sc->usp);
- __get_user(env->regs[15], &sc->regs.acr);
- __get_user(env->pregs[PR_MOF], &sc->regs.mof);
- __get_user(env->pregs[PR_SRP], &sc->regs.srp);
- __get_user(env->pc, &sc->regs.erp);
-}
-
-static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
-{
- abi_ulong sp;
- /* Align the stack downwards to 4. */
- sp = (env->regs[R_SP] & ~3);
- return sp - framesize;
-}
-
-static void setup_sigreturn(uint16_t *retcode)
-{
- /* This is movu.w __NR_sigreturn, r9; break 13; */
- __put_user(0x9c5f, retcode + 0);
- __put_user(TARGET_NR_sigreturn, retcode + 1);
- __put_user(0xe93d, retcode + 2);
-}
-
-void setup_frame(int sig, struct target_sigaction *ka,
- target_sigset_t *set, CPUCRISState *env)
-{
- struct target_signal_frame *frame;
- abi_ulong frame_addr;
- int i;
-
- frame_addr = get_sigframe(env, sizeof *frame);
- trace_user_setup_frame(env, frame_addr);
- if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
- goto badframe;
-
- /*
- * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
- * use this trampoline anymore but it sets it up for GDB.
- */
- setup_sigreturn(frame->retcode);
-
- /* Save the mask. */
- __put_user(set->sig[0], &frame->sc.oldmask);
-
- for(i = 1; i < TARGET_NSIG_WORDS; i++) {
- __put_user(set->sig[i], &frame->extramask[i - 1]);
- }
-
- setup_sigcontext(&frame->sc, env);
-
- /* Move the stack and setup the arguments for the handler. */
- env->regs[R_SP] = frame_addr;
- env->regs[10] = sig;
- env->pc = (unsigned long) ka->_sa_handler;
- /* Link SRP so the guest returns through the trampoline. */
- env->pregs[PR_SRP] = default_sigreturn;
-
- unlock_user_struct(frame, frame_addr, 1);
- return;
-badframe:
- force_sigsegv(sig);
-}
-
-void setup_rt_frame(int sig, struct target_sigaction *ka,
- target_siginfo_t *info,
- target_sigset_t *set, CPUCRISState *env)
-{
- qemu_log_mask(LOG_UNIMP, "setup_rt_frame: not implemented\n");
-}
-
-long do_sigreturn(CPUCRISState *env)
-{
- struct target_signal_frame *frame;
- abi_ulong frame_addr;
- target_sigset_t target_set;
- sigset_t set;
- int i;
-
- frame_addr = env->regs[R_SP];
- trace_user_do_sigreturn(env, frame_addr);
- /* Make sure the guest isn't playing games. */
- if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
- goto badframe;
- }
-
- /* Restore blocked signals */
- __get_user(target_set.sig[0], &frame->sc.oldmask);
- for(i = 1; i < TARGET_NSIG_WORDS; i++) {
- __get_user(target_set.sig[i], &frame->extramask[i - 1]);
- }
- target_to_host_sigset_internal(&set, &target_set);
- set_sigmask(&set);
-
- restore_sigcontext(&frame->sc, env);
- unlock_user_struct(frame, frame_addr, 0);
- return -QEMU_ESIGRETURN;
-badframe:
- force_sig(TARGET_SIGSEGV);
- return -QEMU_ESIGRETURN;
-}
-
-long do_rt_sigreturn(CPUCRISState *env)
-{
- trace_user_do_rt_sigreturn(env, 0);
- qemu_log_mask(LOG_UNIMP, "do_rt_sigreturn: not implemented\n");
- return -TARGET_ENOSYS;
-}
-
-void setup_sigtramp(abi_ulong sigtramp_page)
-{
- uint16_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6, 0);
- assert(tramp != NULL);
-
- default_sigreturn = sigtramp_page;
- setup_sigreturn(tramp);
-
- unlock_user(tramp, sigtramp_page, 6);
-}
diff --git a/linux-user/cris/sockbits.h b/linux-user/cris/sockbits.h
deleted file mode 100644
index 0e4c8f0..0000000
--- a/linux-user/cris/sockbits.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../generic/sockbits.h"
diff --git a/linux-user/cris/syscall_nr.h b/linux-user/cris/syscall_nr.h
deleted file mode 100644
index 4b6cf65..0000000
--- a/linux-user/cris/syscall_nr.h
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * This file contains the system call numbers, and stub macros for libc.
- */
-
-#ifndef LINUX_USER_CRIS_SYSCALL_NR_H
-#define LINUX_USER_CRIS_SYSCALL_NR_H
-
-#define TARGET_NR_restart_syscall 0
-#define TARGET_NR_exit 1
-#define TARGET_NR_fork 2
-#define TARGET_NR_read 3
-#define TARGET_NR_write 4
-#define TARGET_NR_open 5
-#define TARGET_NR_close 6
-#define TARGET_NR_waitpid 7
-#define TARGET_NR_creat 8
-#define TARGET_NR_link 9
-#define TARGET_NR_unlink 10
-#define TARGET_NR_execve 11
-#define TARGET_NR_chdir 12
-#define TARGET_NR_time 13
-#define TARGET_NR_mknod 14
-#define TARGET_NR_chmod 15
-#define TARGET_NR_lchown 16
-#define TARGET_NR_break 17
-#define TARGET_NR_oldstat 18
-#define TARGET_NR_lseek 19
-#define TARGET_NR_getpid 20
-#define TARGET_NR_mount 21
-#define TARGET_NR_umount 22
-#define TARGET_NR_setuid 23
-#define TARGET_NR_getuid 24
-#define TARGET_NR_stime 25
-#define TARGET_NR_ptrace 26
-#define TARGET_NR_alarm 27
-#define TARGET_NR_oldfstat 28
-#define TARGET_NR_pause 29
-#define TARGET_NR_utime 30
-#define TARGET_NR_stty 31
-#define TARGET_NR_gtty 32
-#define TARGET_NR_access 33
-#define TARGET_NR_nice 34
-#define TARGET_NR_ftime 35
-#define TARGET_NR_sync 36
-#define TARGET_NR_kill 37
-#define TARGET_NR_rename 38
-#define TARGET_NR_mkdir 39
-#define TARGET_NR_rmdir 40
-#define TARGET_NR_dup 41
-#define TARGET_NR_pipe 42
-#define TARGET_NR_times 43
-#define TARGET_NR_prof 44
-#define TARGET_NR_brk 45
-#define TARGET_NR_setgid 46
-#define TARGET_NR_getgid 47
-#define TARGET_NR_signal 48
-#define TARGET_NR_geteuid 49
-#define TARGET_NR_getegid 50
-#define TARGET_NR_acct 51
-#define TARGET_NR_umount2 52
-#define TARGET_NR_lock 53
-#define TARGET_NR_ioctl 54
-#define TARGET_NR_fcntl 55
-#define TARGET_NR_mpx 56
-#define TARGET_NR_setpgid 57
-#define TARGET_NR_ulimit 58
-#define TARGET_NR_oldolduname 59
-#define TARGET_NR_umask 60
-#define TARGET_NR_chroot 61
-#define TARGET_NR_ustat 62
-#define TARGET_NR_dup2 63
-#define TARGET_NR_getppid 64
-#define TARGET_NR_getpgrp 65
-#define TARGET_NR_setsid 66
-#define TARGET_NR_sigaction 67
-#define TARGET_NR_sgetmask 68
-#define TARGET_NR_ssetmask 69
-#define TARGET_NR_setreuid 70
-#define TARGET_NR_setregid 71
-#define TARGET_NR_sigsuspend 72
-#define TARGET_NR_sigpending 73
-#define TARGET_NR_sethostname 74
-#define TARGET_NR_setrlimit 75
-#define TARGET_NR_getrlimit 76
-#define TARGET_NR_getrusage 77
-#define TARGET_NR_gettimeofday 78
-#define TARGET_NR_settimeofday 79
-#define TARGET_NR_getgroups 80
-#define TARGET_NR_setgroups 81
-#define TARGET_NR_select 82
-#define TARGET_NR_symlink 83
-#define TARGET_NR_oldlstat 84
-#define TARGET_NR_readlink 85
-#define TARGET_NR_uselib 86
-#define TARGET_NR_swapon 87
-#define TARGET_NR_reboot 88
-#define TARGET_NR_readdir 89
-#define TARGET_NR_mmap 90
-#define TARGET_NR_munmap 91
-#define TARGET_NR_truncate 92
-#define TARGET_NR_ftruncate 93
-#define TARGET_NR_fchmod 94
-#define TARGET_NR_fchown 95
-#define TARGET_NR_getpriority 96
-#define TARGET_NR_setpriority 97
-#define TARGET_NR_profil 98
-#define TARGET_NR_statfs 99
-#define TARGET_NR_fstatfs 100
-#define TARGET_NR_ioperm 101
-#define TARGET_NR_socketcall 102
-#define TARGET_NR_syslog 103
-#define TARGET_NR_setitimer 104
-#define TARGET_NR_getitimer 105
-#define TARGET_NR_stat 106
-#define TARGET_NR_lstat 107
-#define TARGET_NR_fstat 108
-#define TARGET_NR_olduname 109
-#define TARGET_NR_iopl 110
-#define TARGET_NR_vhangup 111
-#define TARGET_NR_idle 112
-#define TARGET_NR_vm86 113
-#define TARGET_NR_wait4 114
-#define TARGET_NR_swapoff 115
-#define TARGET_NR_sysinfo 116
-#define TARGET_NR_ipc 117
-#define TARGET_NR_fsync 118
-#define TARGET_NR_sigreturn 119
-#define TARGET_NR_clone 120
-#define TARGET_NR_setdomainname 121
-#define TARGET_NR_uname 122
-#define TARGET_NR_modify_ldt 123
-#define TARGET_NR_adjtimex 124
-#define TARGET_NR_mprotect 125
-#define TARGET_NR_sigprocmask 126
-#define TARGET_NR_create_module 127
-#define TARGET_NR_init_module 128
-#define TARGET_NR_delete_module 129
-#define TARGET_NR_get_kernel_syms 130
-#define TARGET_NR_quotactl 131
-#define TARGET_NR_getpgid 132
-#define TARGET_NR_fchdir 133
-#define TARGET_NR_bdflush 134
-#define TARGET_NR_sysfs 135
-#define TARGET_NR_personality 136
-#define TARGET_NR_afs_syscall 137 /* Syscall for Andrew File System */
-#define TARGET_NR_setfsuid 138
-#define TARGET_NR_setfsgid 139
-#define TARGET_NR__llseek 140
-#define TARGET_NR_getdents 141
-#define TARGET_NR__newselect 142
-#define TARGET_NR_flock 143
-#define TARGET_NR_msync 144
-#define TARGET_NR_readv 145
-#define TARGET_NR_writev 146
-#define TARGET_NR_getsid 147
-#define TARGET_NR_fdatasync 148
-#define TARGET_NR__sysctl 149
-#define TARGET_NR_mlock 150
-#define TARGET_NR_munlock 151
-#define TARGET_NR_mlockall 152
-#define TARGET_NR_munlockall 153
-#define TARGET_NR_sched_setparam 154
-#define TARGET_NR_sched_getparam 155
-#define TARGET_NR_sched_setscheduler 156
-#define TARGET_NR_sched_getscheduler 157
-#define TARGET_NR_sched_yield 158
-#define TARGET_NR_sched_get_priority_max 159
-#define TARGET_NR_sched_get_priority_min 160
-#define TARGET_NR_sched_rr_get_interval 161
-#define TARGET_NR_nanosleep 162
-#define TARGET_NR_mremap 163
-#define TARGET_NR_setresuid 164
-#define TARGET_NR_getresuid 165
-
-#define TARGET_NR_query_module 167
-#define TARGET_NR_poll 168
-#define TARGET_NR_nfsservctl 169
-#define TARGET_NR_setresgid 170
-#define TARGET_NR_getresgid 171
-#define TARGET_NR_prctl 172
-#define TARGET_NR_rt_sigreturn 173
-#define TARGET_NR_rt_sigaction 174
-#define TARGET_NR_rt_sigprocmask 175
-#define TARGET_NR_rt_sigpending 176
-#define TARGET_NR_rt_sigtimedwait 177
-#define TARGET_NR_rt_sigqueueinfo 178
-#define TARGET_NR_rt_sigsuspend 179
-#define TARGET_NR_pread64 180
-#define TARGET_NR_pwrite64 181
-#define TARGET_NR_chown 182
-#define TARGET_NR_getcwd 183
-#define TARGET_NR_capget 184
-#define TARGET_NR_capset 185
-#define TARGET_NR_sigaltstack 186
-#define TARGET_NR_sendfile 187
-#define TARGET_NR_getpmsg 188 /* some people actually want streams */
-#define TARGET_NR_putpmsg 189 /* some people actually want streams */
-#define TARGET_NR_vfork 190
-#define TARGET_NR_ugetrlimit 191 /* SuS compliant getrlimit */
-#define TARGET_NR_mmap2 192
-#define TARGET_NR_truncate64 193
-#define TARGET_NR_ftruncate64 194
-#define TARGET_NR_stat64 195
-#define TARGET_NR_lstat64 196
-#define TARGET_NR_fstat64 197
-#define TARGET_NR_lchown32 198
-#define TARGET_NR_getuid32 199
-#define TARGET_NR_getgid32 200
-#define TARGET_NR_geteuid32 201
-#define TARGET_NR_getegid32 202
-#define TARGET_NR_setreuid32 203
-#define TARGET_NR_setregid32 204
-#define TARGET_NR_getgroups32 205
-#define TARGET_NR_setgroups32 206
-#define TARGET_NR_fchown32 207
-#define TARGET_NR_setresuid32 208
-#define TARGET_NR_getresuid32 209
-#define TARGET_NR_setresgid32 210
-#define TARGET_NR_getresgid32 211
-#define TARGET_NR_chown32 212
-#define TARGET_NR_setuid32 213
-#define TARGET_NR_setgid32 214
-#define TARGET_NR_setfsuid32 215
-#define TARGET_NR_setfsgid32 216
-#define TARGET_NR_pivot_root 217
-#define TARGET_NR_mincore 218
-#define TARGET_NR_madvise 219
-#define TARGET_NR_getdents64 220
-#define TARGET_NR_fcntl64 221
-/* 223 is unused */
-#define TARGET_NR_gettid 224
-#define TARGET_NR_readahead 225
-#define TARGET_NR_setxattr 226
-#define TARGET_NR_lsetxattr 227
-#define TARGET_NR_fsetxattr 228
-#define TARGET_NR_getxattr 229
-#define TARGET_NR_lgetxattr 230
-#define TARGET_NR_fgetxattr 231
-#define TARGET_NR_listxattr 232
-#define TARGET_NR_llistxattr 233
-#define TARGET_NR_flistxattr 234
-#define TARGET_NR_removexattr 235
-#define TARGET_NR_lremovexattr 236
-#define TARGET_NR_fremovexattr 237
-#define TARGET_NR_tkill 238
-#define TARGET_NR_sendfile64 239
-#define TARGET_NR_futex 240
-#define TARGET_NR_sched_setaffinity 241
-#define TARGET_NR_sched_getaffinity 242
-#define TARGET_NR_set_thread_area 243
-#define TARGET_NR_get_thread_area 244
-#define TARGET_NR_io_setup 245
-#define TARGET_NR_io_destroy 246
-#define TARGET_NR_io_getevents 247
-#define TARGET_NR_io_submit 248
-#define TARGET_NR_io_cancel 249
-#define TARGET_NR_fadvise64 250
-#define TARGET_NR_exit_group 252
-#define TARGET_NR_lookup_dcookie 253
-#define TARGET_NR_epoll_create 254
-#define TARGET_NR_epoll_ctl 255
-#define TARGET_NR_epoll_wait 256
-#define TARGET_NR_remap_file_pages 257
-#define TARGET_NR_set_tid_address 258
-#define TARGET_NR_timer_create 259
-#define TARGET_NR_timer_settime (TARGET_NR_timer_create+1)
-#define TARGET_NR_timer_gettime (TARGET_NR_timer_create+2)
-#define TARGET_NR_timer_getoverrun (TARGET_NR_timer_create+3)
-#define TARGET_NR_timer_delete (TARGET_NR_timer_create+4)
-#define TARGET_NR_clock_settime (TARGET_NR_timer_create+5)
-#define TARGET_NR_clock_gettime (TARGET_NR_timer_create+6)
-#define TARGET_NR_clock_getres (TARGET_NR_timer_create+7)
-#define TARGET_NR_clock_nanosleep (TARGET_NR_timer_create+8)
-#define TARGET_NR_statfs64 268
-#define TARGET_NR_fstatfs64 269
-#define TARGET_NR_tgkill 270
-#define TARGET_NR_utimes 271
-#define TARGET_NR_fadvise64_64 272
-#define TARGET_NR_vserver 273
-#define TARGET_NR_mbind 274
-#define TARGET_NR_get_mempolicy 275
-#define TARGET_NR_set_mempolicy 276
-#define TARGET_NR_mq_open 277
-#define TARGET_NR_mq_unlink (TARGET_NR_mq_open+1)
-#define TARGET_NR_mq_timedsend (TARGET_NR_mq_open+2)
-#define TARGET_NR_mq_timedreceive (TARGET_NR_mq_open+3)
-#define TARGET_NR_mq_notify (TARGET_NR_mq_open+4)
-#define TARGET_NR_mq_getsetattr (TARGET_NR_mq_open+5)
-#define TARGET_NR_kexec_load 283
-#define TARGET_NR_waitid 284
-/* #define TARGET_NR_sys_setaltroot 285 */
-#define TARGET_NR_add_key 286
-#define TARGET_NR_request_key 287
-#define TARGET_NR_keyctl 288
-#define TARGET_NR_ioprio_set 289
-#define TARGET_NR_ioprio_get 290
-#define TARGET_NR_inotify_init 291
-#define TARGET_NR_inotify_add_watch 292
-#define TARGET_NR_inotify_rm_watch 293
-#define TARGET_NR_migrate_pages 294
-#define TARGET_NR_openat 295
-#define TARGET_NR_mkdirat 296
-#define TARGET_NR_mknodat 297
-#define TARGET_NR_fchownat 298
-#define TARGET_NR_futimesat 299
-#define TARGET_NR_fstatat64 300
-#define TARGET_NR_unlinkat 301
-#define TARGET_NR_renameat 302
-#define TARGET_NR_linkat 303
-#define TARGET_NR_symlinkat 304
-#define TARGET_NR_readlinkat 305
-#define TARGET_NR_fchmodat 306
-#define TARGET_NR_faccessat 307
-#define TARGET_NR_pselect6 308
-#define TARGET_NR_ppoll 309
-#define TARGET_NR_unshare 310
-#define TARGET_NR_set_robust_list 311
-#define TARGET_NR_get_robust_list 312
-#define TARGET_NR_splice 313
-#define TARGET_NR_sync_file_range 314
-#define TARGET_NR_tee 315
-#define TARGET_NR_vmsplice 316
-#define TARGET_NR_move_pages 317
-#define TARGET_NR_getcpu 318
-#define TARGET_NR_epoll_pwait 319
-#define TARGET_NR_utimensat 320
-#define TARGET_NR_signalfd 321
-#define TARGET_NR_timerfd_create 322
-#define TARGET_NR_eventfd 323
-#define TARGET_NR_fallocate 324
-#define TARGET_NR_timerfd_settime 325
-#define TARGET_NR_timerfd_gettime 326
-#define TARGET_NR_signalfd4 327
-#define TARGET_NR_eventfd2 328
-#define TARGET_NR_epoll_create1 329
-#define TARGET_NR_dup3 330
-#define TARGET_NR_pipe2 331
-#define TARGET_NR_inotify_init1 332
-#define TARGET_NR_preadv 333
-#define TARGET_NR_pwritev 334
-#define TARGET_NR_setns 335
-#define TARGET_NR_name_to_handle_at 336
-#define TARGET_NR_open_by_handle_at 337
-#define TARGET_NR_rt_tgsigqueueinfo 338
-#define TARGET_NR_perf_event_open 339
-#define TARGET_NR_recvmmsg 340
-#define TARGET_NR_accept4 341
-#define TARGET_NR_fanotify_init 342
-#define TARGET_NR_fanotify_mark 343
-#define TARGET_NR_prlimit64 344
-#define TARGET_NR_clock_adjtime 345
-#define TARGET_NR_syncfs 346
-#define TARGET_NR_sendmmsg 347
-#define TARGET_NR_process_vm_readv 348
-#define TARGET_NR_process_vm_writev 349
-#define TARGET_NR_kcmp 350
-#define TARGET_NR_finit_module 351
-#define TARGET_NR_sched_setattr 352
-#define TARGET_NR_sched_getattr 353
-#define TARGET_NR_renameat2 354
-#define TARGET_NR_seccomp 355
-#define TARGET_NR_getrandom 356
-#define TARGET_NR_memfd_create 357
-#define TARGET_NR_bpf 358
-#define TARGET_NR_execveat 359
-
-#endif
diff --git a/linux-user/cris/target_cpu.h b/linux-user/cris/target_cpu.h
deleted file mode 100644
index 7f6cade..0000000
--- a/linux-user/cris/target_cpu.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * CRIS specific CPU ABI and functions for linux-user
- *
- * Copyright (c) 2007 AXIS Communications AB
- * Written by Edgar E. Iglesias
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef CRIS_TARGET_CPU_H
-#define CRIS_TARGET_CPU_H
-
-static inline void cpu_clone_regs_child(CPUCRISState *env, target_ulong newsp,
- unsigned flags)
-{
- if (newsp) {
- env->regs[14] = newsp;
- }
- env->regs[10] = 0;
-}
-
-static inline void cpu_clone_regs_parent(CPUCRISState *env, unsigned flags)
-{
-}
-
-static inline void cpu_set_tls(CPUCRISState *env, target_ulong newtls)
-{
- env->pregs[PR_PID] = (env->pregs[PR_PID] & 0xff) | newtls;
-}
-
-static inline abi_ulong get_sp_from_cpustate(CPUCRISState *state)
-{
- return state->regs[14];
-}
-#endif
diff --git a/linux-user/cris/target_elf.h b/linux-user/cris/target_elf.h
deleted file mode 100644
index 99eb4ec..0000000
--- a/linux-user/cris/target_elf.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation, or (at your option) any
- * later version. See the COPYING file in the top-level directory.
- */
-
-#ifndef CRIS_TARGET_ELF_H
-#define CRIS_TARGET_ELF_H
-static inline const char *cpu_get_model(uint32_t eflags)
-{
- return "any";
-}
-#endif
diff --git a/linux-user/cris/target_errno_defs.h b/linux-user/cris/target_errno_defs.h
deleted file mode 100644
index 1cf43b1..0000000
--- a/linux-user/cris/target_errno_defs.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef CRIS_TARGET_ERRNO_DEFS_H
-#define CRIS_TARGET_ERRNO_DEFS_H
-
-/* Target uses generic errno */
-#include "../generic/target_errno_defs.h"
-
-#endif
diff --git a/linux-user/cris/target_fcntl.h b/linux-user/cris/target_fcntl.h
deleted file mode 100644
index df0acee..0000000
--- a/linux-user/cris/target_fcntl.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation, or (at your option) any
- * later version. See the COPYING file in the top-level directory.
- */
-
-#ifndef CRIS_TARGET_FCNTL_H
-#define CRIS_TARGET_FCNTL_H
-#include "../generic/fcntl.h"
-#endif
diff --git a/linux-user/cris/target_mman.h b/linux-user/cris/target_mman.h
deleted file mode 100644
index 9ace8ac..0000000
--- a/linux-user/cris/target_mman.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * arch/cris/include/asm/processor.h:
- * TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
- *
- * arch/cris/include/arch-v32/arch/processor.h
- * TASK_SIZE 0xb0000000
- */
-#define TASK_UNMAPPED_BASE TARGET_PAGE_ALIGN(0xb0000000 / 3)
-
-/* arch/cris/include/uapi/asm/elf.h */
-#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE * 2)
-
-#include "../generic/target_mman.h"
diff --git a/linux-user/cris/target_prctl.h b/linux-user/cris/target_prctl.h
deleted file mode 100644
index eb53b31..0000000
--- a/linux-user/cris/target_prctl.h
+++ /dev/null
@@ -1 +0,0 @@
-/* No special prctl support required. */
diff --git a/linux-user/cris/target_proc.h b/linux-user/cris/target_proc.h
deleted file mode 100644
index 43fe29c..0000000
--- a/linux-user/cris/target_proc.h
+++ /dev/null
@@ -1 +0,0 @@
-/* No target-specific /proc support */
diff --git a/linux-user/cris/target_resource.h b/linux-user/cris/target_resource.h
deleted file mode 100644
index 2272595..0000000
--- a/linux-user/cris/target_resource.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../generic/target_resource.h"
diff --git a/linux-user/cris/target_signal.h b/linux-user/cris/target_signal.h
deleted file mode 100644
index ab0653f..0000000
--- a/linux-user/cris/target_signal.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef CRIS_TARGET_SIGNAL_H
-#define CRIS_TARGET_SIGNAL_H
-
-#include "../generic/signal.h"
-
-#define TARGET_ARCH_HAS_SETUP_FRAME
-#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
-
-#endif /* CRIS_TARGET_SIGNAL_H */
diff --git a/linux-user/cris/target_structs.h b/linux-user/cris/target_structs.h
deleted file mode 100644
index 3a06f37..0000000
--- a/linux-user/cris/target_structs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../generic/target_structs.h"
diff --git a/linux-user/cris/target_syscall.h b/linux-user/cris/target_syscall.h
deleted file mode 100644
index 0b5ebf1..0000000
--- a/linux-user/cris/target_syscall.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef CRIS_TARGET_SYSCALL_H
-#define CRIS_TARGET_SYSCALL_H
-
-#define UNAME_MACHINE "cris"
-#define UNAME_MINIMUM_RELEASE "2.6.32"
-
-/* pt_regs not only specifies the format in the user-struct during
- * ptrace but is also the frame format used in the kernel prologue/epilogues
- * themselves
- */
-
-struct target_pt_regs {
- unsigned long orig_r10;
- /* pushed by movem r13, [sp] in SAVE_ALL. */
- unsigned long r0;
- unsigned long r1;
- unsigned long r2;
- unsigned long r3;
- unsigned long r4;
- unsigned long r5;
- unsigned long r6;
- unsigned long r7;
- unsigned long r8;
- unsigned long r9;
- unsigned long r10;
- unsigned long r11;
- unsigned long r12;
- unsigned long r13;
- unsigned long acr;
- unsigned long srs;
- unsigned long mof;
- unsigned long spc;
- unsigned long ccs;
- unsigned long srp;
- unsigned long erp; /* This is actually the debugged process's PC */
- /* For debugging purposes; saved only when needed. */
- unsigned long exs;
- unsigned long eda;
-};
-
-#define TARGET_CLONE_BACKWARDS2
-#define TARGET_MCL_CURRENT 1
-#define TARGET_MCL_FUTURE 2
-#define TARGET_MCL_ONFAULT 4
-
-#endif
diff --git a/linux-user/cris/termbits.h b/linux-user/cris/termbits.h
deleted file mode 100644
index 0c8d8fc..0000000
--- a/linux-user/cris/termbits.h
+++ /dev/null
@@ -1,225 +0,0 @@
-/* from asm/termbits.h */
-
-#ifndef LINUX_USER_CRIS_TERMBITS_H
-#define LINUX_USER_CRIS_TERMBITS_H
-
-#define TARGET_NCCS 19
-
-typedef unsigned char target_cc_t; /* cc_t */
-typedef unsigned int target_speed_t; /* speed_t */
-typedef unsigned int target_tcflag_t; /* tcflag_t */
-
-struct target_termios {
- target_tcflag_t c_iflag; /* input mode flags */
- target_tcflag_t c_oflag; /* output mode flags */
- target_tcflag_t c_cflag; /* control mode flags */
- target_tcflag_t c_lflag; /* local mode flags */
- target_cc_t c_line; /* line discipline */
- target_cc_t c_cc[TARGET_NCCS]; /* control characters */
-};
-
-/* c_iflag bits */
-#define TARGET_IGNBRK 0000001
-#define TARGET_BRKINT 0000002
-#define TARGET_IGNPAR 0000004
-#define TARGET_PARMRK 0000010
-#define TARGET_INPCK 0000020
-#define TARGET_ISTRIP 0000040
-#define TARGET_INLCR 0000100
-#define TARGET_IGNCR 0000200
-#define TARGET_ICRNL 0000400
-#define TARGET_IUCLC 0001000
-#define TARGET_IXON 0002000
-#define TARGET_IXANY 0004000
-#define TARGET_IXOFF 0010000
-#define TARGET_IMAXBEL 0020000
-#define TARGET_IUTF8 0040000
-
-/* c_oflag bits */
-#define TARGET_OPOST 0000001
-#define TARGET_OLCUC 0000002
-#define TARGET_ONLCR 0000004
-#define TARGET_OCRNL 0000010
-#define TARGET_ONOCR 0000020
-#define TARGET_ONLRET 0000040
-#define TARGET_OFILL 0000100
-#define TARGET_OFDEL 0000200
-#define TARGET_NLDLY 0000400
-#define TARGET_NL0 0000000
-#define TARGET_NL1 0000400
-#define TARGET_CRDLY 0003000
-#define TARGET_CR0 0000000
-#define TARGET_CR1 0001000
-#define TARGET_CR2 0002000
-#define TARGET_CR3 0003000
-#define TARGET_TABDLY 0014000
-#define TARGET_TAB0 0000000
-#define TARGET_TAB1 0004000
-#define TARGET_TAB2 0010000
-#define TARGET_TAB3 0014000
-#define TARGET_XTABS 0014000
-#define TARGET_BSDLY 0020000
-#define TARGET_BS0 0000000
-#define TARGET_BS1 0020000
-#define TARGET_VTDLY 0040000
-#define TARGET_VT0 0000000
-#define TARGET_VT1 0040000
-#define TARGET_FFDLY 0100000
-#define TARGET_FF0 0000000
-#define TARGET_FF1 0100000
-
-/* c_cflag bit meaning */
-#define TARGET_CBAUD 0010017
-#define TARGET_B0 0000000 /* hang up */
-#define TARGET_B50 0000001
-#define TARGET_B75 0000002
-#define TARGET_B110 0000003
-#define TARGET_B134 0000004
-#define TARGET_B150 0000005
-#define TARGET_B200 0000006
-#define TARGET_B300 0000007
-#define TARGET_B600 0000010
-#define TARGET_B1200 0000011
-#define TARGET_B1800 0000012
-#define TARGET_B2400 0000013
-#define TARGET_B4800 0000014
-#define TARGET_B9600 0000015
-#define TARGET_B19200 0000016
-#define TARGET_B38400 0000017
-#define TARGET_EXTA B19200
-#define TARGET_EXTB B38400
-#define TARGET_CSIZE 0000060
-#define TARGET_CS5 0000000
-#define TARGET_CS6 0000020
-#define TARGET_CS7 0000040
-#define TARGET_CS8 0000060
-#define TARGET_CSTOPB 0000100
-#define TARGET_CREAD 0000200
-#define TARGET_PARENB 0000400
-#define TARGET_PARODD 0001000
-#define TARGET_HUPCL 0002000
-#define TARGET_CLOCAL 0004000
-#define TARGET_CBAUDEX 0010000
-#define TARGET_B57600 0010001
-#define TARGET_B115200 0010002
-#define TARGET_B230400 0010003
-#define TARGET_B460800 0010004
-#define TARGET_CIBAUD 002003600000 /* input baud rate (not used) */
-#define TARGET_CRTSCTS 020000000000 /* flow control */
-
-/* c_lflag bits */
-#define TARGET_ISIG 0000001
-#define TARGET_ICANON 0000002
-#define TARGET_XCASE 0000004
-#define TARGET_ECHO 0000010
-#define TARGET_ECHOE 0000020
-#define TARGET_ECHOK 0000040
-#define TARGET_ECHONL 0000100
-#define TARGET_NOFLSH 0000200
-#define TARGET_TOSTOP 0000400
-#define TARGET_ECHOCTL 0001000
-#define TARGET_ECHOPRT 0002000
-#define TARGET_ECHOKE 0004000
-#define TARGET_FLUSHO 0010000
-#define TARGET_PENDIN 0040000
-#define TARGET_IEXTEN 0100000
-#define TARGET_EXTPROC 0200000
-
-/* c_cc character offsets */
-#define TARGET_VINTR 0
-#define TARGET_VQUIT 1
-#define TARGET_VERASE 2
-#define TARGET_VKILL 3
-#define TARGET_VEOF 4
-#define TARGET_VTIME 5
-#define TARGET_VMIN 6
-#define TARGET_VSWTC 7
-#define TARGET_VSTART 8
-#define TARGET_VSTOP 9
-#define TARGET_VSUSP 10
-#define TARGET_VEOL 11
-#define TARGET_VREPRINT 12
-#define TARGET_VDISCARD 13
-#define TARGET_VWERASE 14
-#define TARGET_VLNEXT 15
-#define TARGET_VEOL2 16
-
-/* ioctls */
-
-#define TARGET_TCGETS 0x5401
-#define TARGET_TCSETS 0x5402
-#define TARGET_TCSETSW 0x5403
-#define TARGET_TCSETSF 0x5404
-#define TARGET_TCGETA 0x5405
-#define TARGET_TCSETA 0x5406
-#define TARGET_TCSETAW 0x5407
-#define TARGET_TCSETAF 0x5408
-#define TARGET_TCSBRK 0x5409
-#define TARGET_TCXONC 0x540A
-#define TARGET_TCFLSH 0x540B
-
-#define TARGET_TIOCEXCL 0x540C
-#define TARGET_TIOCNXCL 0x540D
-#define TARGET_TIOCSCTTY 0x540E
-#define TARGET_TIOCGPGRP 0x540F
-#define TARGET_TIOCSPGRP 0x5410
-#define TARGET_TIOCOUTQ 0x5411
-#define TARGET_TIOCSTI 0x5412
-#define TARGET_TIOCGWINSZ 0x5413
-#define TARGET_TIOCSWINSZ 0x5414
-#define TARGET_TIOCMGET 0x5415
-#define TARGET_TIOCMBIS 0x5416
-#define TARGET_TIOCMBIC 0x5417
-#define TARGET_TIOCMSET 0x5418
-#define TARGET_TIOCGSOFTCAR 0x5419
-#define TARGET_TIOCSSOFTCAR 0x541A
-#define TARGET_FIONREAD 0x541B
-#define TARGET_TIOCINQ TARGET_FIONREAD
-#define TARGET_TIOCLINUX 0x541C
-#define TARGET_TIOCCONS 0x541D
-#define TARGET_TIOCGSERIAL 0x541E
-#define TARGET_TIOCSSERIAL 0x541F
-#define TARGET_TIOCPKT 0x5420
-#define TARGET_FIONBIO 0x5421
-#define TARGET_TIOCNOTTY 0x5422
-#define TARGET_TIOCSETD 0x5423
-#define TARGET_TIOCGETD 0x5424
-#define TARGET_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
-#define TARGET_TIOCTTYGSTRUCT 0x5426 /* For debugging only */
-#define TARGET_TIOCSBRK 0x5427 /* BSD compatibility */
-#define TARGET_TIOCCBRK 0x5428 /* BSD compatibility */
-#define TARGET_TIOCGSID 0x5429 /* Return the session ID of FD */
-#define TARGET_TIOCGPTN TARGET_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
-#define TARGET_TIOCSPTLCK TARGET_IOW('T',0x31, int) /* Lock/unlock Pty */
-#define TARGET_TIOCGPTPEER TARGET_IO('T', 0x41) /* Safely open the slave */
-
-#define TARGET_FIONCLEX 0x5450 /* these numbers need to be adjusted. */
-#define TARGET_FIOCLEX 0x5451
-#define TARGET_FIOASYNC 0x5452
-#define TARGET_TIOCSERCONFIG 0x5453
-#define TARGET_TIOCSERGWILD 0x5454
-#define TARGET_TIOCSERSWILD 0x5455
-#define TARGET_TIOCGLCKTRMIOS 0x5456
-#define TARGET_TIOCSLCKTRMIOS 0x5457
-#define TARGET_TIOCSERGSTRUCT 0x5458 /* For debugging only */
-#define TARGET_TIOCSERGETLSR 0x5459 /* Get line status register */
-#define TARGET_TIOCSERGETMULTI 0x545A /* Get multiport config */
-#define TARGET_TIOCSERSETMULTI 0x545B /* Set multiport config */
-
-#define TARGET_TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
-#define TARGET_TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
-#define TARGET_TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
-#define TARGET_TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
-
-/* Used for packet mode */
-#define TARGET_TIOCPKT_DATA 0
-#define TARGET_TIOCPKT_FLUSHREAD 1
-#define TARGET_TIOCPKT_FLUSHWRITE 2
-#define TARGET_TIOCPKT_STOP 4
-#define TARGET_TIOCPKT_START 8
-#define TARGET_TIOCPKT_NOSTOP 16
-#define TARGET_TIOCPKT_DOSTOP 32
-
-#define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */
-
-#endif
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 0d4dc1f..2add166 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -8,7 +8,11 @@
#include "qemu.h"
#include "user/tswap-target.h"
+#include "user/page-protection.h"
#include "exec/page-protection.h"
+#include "exec/mmap-lock.h"
+#include "exec/translation-block.h"
+#include "exec/tswap.h"
#include "user/guest-base.h"
#include "user-internals.h"
#include "signal-common.h"
@@ -203,7 +207,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
(*regs)[12] = tswapreg(env->regs[R_EDX]);
(*regs)[13] = tswapreg(env->regs[R_ESI]);
(*regs)[14] = tswapreg(env->regs[R_EDI]);
- (*regs)[15] = tswapreg(env->regs[R_EAX]); /* XXX */
+ (*regs)[15] = tswapreg(get_task_state(env_cpu_const(env))->orig_ax);
(*regs)[16] = tswapreg(env->eip);
(*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff);
(*regs)[18] = tswapreg(env->eflags);
@@ -306,7 +310,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
(*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff);
(*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff);
(*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff);
- (*regs)[11] = tswapreg(env->regs[R_EAX]); /* XXX */
+ (*regs)[11] = tswapreg(get_task_state(env_cpu_const(env))->orig_ax);
(*regs)[12] = tswapreg(env->eip);
(*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff);
(*regs)[14] = tswapreg(env->eflags);
@@ -659,6 +663,23 @@ static const char *get_elf_platform(void)
#undef END
}
+#if TARGET_BIG_ENDIAN
+#include "elf.h"
+#include "vdso-be8.c.inc"
+#include "vdso-be32.c.inc"
+
+static const VdsoImageInfo *vdso_image_info(uint32_t elf_flags)
+{
+ return (EF_ARM_EABI_VERSION(elf_flags) >= EF_ARM_EABI_VER4
+ && (elf_flags & EF_ARM_BE8)
+ ? &vdso_be8_image_info
+ : &vdso_be32_image_info);
+}
+#define vdso_image_info vdso_image_info
+#else
+# define VDSO_HEADER "vdso-le.c.inc"
+#endif
+
#else
/* 64 bit ARM definitions */
@@ -730,7 +751,23 @@ enum {
ARM_HWCAP_A64_SSBS = 1 << 28,
ARM_HWCAP_A64_SB = 1 << 29,
ARM_HWCAP_A64_PACA = 1 << 30,
- ARM_HWCAP_A64_PACG = 1UL << 31,
+ ARM_HWCAP_A64_PACG = 1ULL << 31,
+ ARM_HWCAP_A64_GCS = 1ULL << 32,
+ ARM_HWCAP_A64_CMPBR = 1ULL << 33,
+ ARM_HWCAP_A64_FPRCVT = 1ULL << 34,
+ ARM_HWCAP_A64_F8MM8 = 1ULL << 35,
+ ARM_HWCAP_A64_F8MM4 = 1ULL << 36,
+ ARM_HWCAP_A64_SVE_F16MM = 1ULL << 37,
+ ARM_HWCAP_A64_SVE_ELTPERM = 1ULL << 38,
+ ARM_HWCAP_A64_SVE_AES2 = 1ULL << 39,
+ ARM_HWCAP_A64_SVE_BFSCALE = 1ULL << 40,
+ ARM_HWCAP_A64_SVE2P2 = 1ULL << 41,
+ ARM_HWCAP_A64_SME2P2 = 1ULL << 42,
+ ARM_HWCAP_A64_SME_SBITPERM = 1ULL << 43,
+ ARM_HWCAP_A64_SME_AES = 1ULL << 44,
+ ARM_HWCAP_A64_SME_SFEXPA = 1ULL << 45,
+ ARM_HWCAP_A64_SME_STMOP = 1ULL << 46,
+ ARM_HWCAP_A64_SME_SMOP4 = 1ULL << 47,
ARM_HWCAP2_A64_DCPODP = 1 << 0,
ARM_HWCAP2_A64_SVE2 = 1 << 1,
@@ -777,6 +814,25 @@ enum {
ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42,
ARM_HWCAP2_A64_MOPS = 1ULL << 43,
ARM_HWCAP2_A64_HBC = 1ULL << 44,
+ ARM_HWCAP2_A64_SVE_B16B16 = 1ULL << 45,
+ ARM_HWCAP2_A64_LRCPC3 = 1ULL << 46,
+ ARM_HWCAP2_A64_LSE128 = 1ULL << 47,
+ ARM_HWCAP2_A64_FPMR = 1ULL << 48,
+ ARM_HWCAP2_A64_LUT = 1ULL << 49,
+ ARM_HWCAP2_A64_FAMINMAX = 1ULL << 50,
+ ARM_HWCAP2_A64_F8CVT = 1ULL << 51,
+ ARM_HWCAP2_A64_F8FMA = 1ULL << 52,
+ ARM_HWCAP2_A64_F8DP4 = 1ULL << 53,
+ ARM_HWCAP2_A64_F8DP2 = 1ULL << 54,
+ ARM_HWCAP2_A64_F8E4M3 = 1ULL << 55,
+ ARM_HWCAP2_A64_F8E5M2 = 1ULL << 56,
+ ARM_HWCAP2_A64_SME_LUTV2 = 1ULL << 57,
+ ARM_HWCAP2_A64_SME_F8F16 = 1ULL << 58,
+ ARM_HWCAP2_A64_SME_F8F32 = 1ULL << 59,
+ ARM_HWCAP2_A64_SME_SF8FMA = 1ULL << 60,
+ ARM_HWCAP2_A64_SME_SF8DP4 = 1ULL << 61,
+ ARM_HWCAP2_A64_SME_SF8DP2 = 1ULL << 62,
+ ARM_HWCAP2_A64_POE = 1ULL << 63,
};
#define ELF_HWCAP get_elf_hwcap()
@@ -865,7 +921,7 @@ uint64_t get_elf_hwcap2(void)
const char *elf_hwcap_str(uint32_t bit)
{
- static const char *hwcap_str[] = {
+ static const char * const hwcap_str[] = {
[__builtin_ctz(ARM_HWCAP_A64_FP )] = "fp",
[__builtin_ctz(ARM_HWCAP_A64_ASIMD )] = "asimd",
[__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm",
@@ -898,6 +954,22 @@ const char *elf_hwcap_str(uint32_t bit)
[__builtin_ctz(ARM_HWCAP_A64_SB )] = "sb",
[__builtin_ctz(ARM_HWCAP_A64_PACA )] = "paca",
[__builtin_ctz(ARM_HWCAP_A64_PACG )] = "pacg",
+ [__builtin_ctzll(ARM_HWCAP_A64_GCS )] = "gcs",
+ [__builtin_ctzll(ARM_HWCAP_A64_CMPBR )] = "cmpbr",
+ [__builtin_ctzll(ARM_HWCAP_A64_FPRCVT)] = "fprcvt",
+ [__builtin_ctzll(ARM_HWCAP_A64_F8MM8 )] = "f8mm8",
+ [__builtin_ctzll(ARM_HWCAP_A64_F8MM4 )] = "f8mm4",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE_F16MM)] = "svef16mm",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE_ELTPERM)] = "sveeltperm",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE_AES2)] = "sveaes2",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE_BFSCALE)] = "svebfscale",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE2P2)] = "sve2p2",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME2P2)] = "sme2p2",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_SBITPERM)] = "smesbitperm",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_AES)] = "smeaes",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_SFEXPA)] = "smesfexpa",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_STMOP)] = "smestmop",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_SMOP4)] = "smesmop4",
};
return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
@@ -905,7 +977,7 @@ const char *elf_hwcap_str(uint32_t bit)
const char *elf_hwcap2_str(uint32_t bit)
{
- static const char *hwcap_str[] = {
+ static const char * const hwcap_str[] = {
[__builtin_ctz(ARM_HWCAP2_A64_DCPODP )] = "dcpodp",
[__builtin_ctz(ARM_HWCAP2_A64_SVE2 )] = "sve2",
[__builtin_ctz(ARM_HWCAP2_A64_SVEAES )] = "sveaes",
@@ -951,6 +1023,24 @@ const char *elf_hwcap2_str(uint32_t bit)
[__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16",
[__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops",
[__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SVE_B16B16 )] = "sveb16b16",
+ [__builtin_ctzll(ARM_HWCAP2_A64_LRCPC3 )] = "lrcpc3",
+ [__builtin_ctzll(ARM_HWCAP2_A64_LSE128 )] = "lse128",
+ [__builtin_ctzll(ARM_HWCAP2_A64_FPMR )] = "fpmr",
+ [__builtin_ctzll(ARM_HWCAP2_A64_LUT )] = "lut",
+ [__builtin_ctzll(ARM_HWCAP2_A64_FAMINMAX )] = "faminmax",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8CVT )] = "f8cvt",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8FMA )] = "f8fma",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8DP4 )] = "f8dp4",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8DP2 )] = "f8dp2",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8E4M3 )] = "f8e4m3",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8E5M2 )] = "f8e5m2",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_LUTV2 )] = "smelutv2",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_F8F16 )] = "smef8f16",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_F8F32 )] = "smef8f32",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_SF8DP4 )] = "smesf8dp4",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_SF8DP2 )] = "smesf8dp2",
+ [__builtin_ctzll(ARM_HWCAP2_A64_POE )] = "poe",
};
return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
@@ -958,14 +1048,14 @@ const char *elf_hwcap2_str(uint32_t bit)
#undef GET_FEATURE_ID
-#endif /* not TARGET_AARCH64 */
-
#if TARGET_BIG_ENDIAN
# define VDSO_HEADER "vdso-be.c.inc"
#else
# define VDSO_HEADER "vdso-le.c.inc"
#endif
+#endif /* not TARGET_AARCH64 */
+
#endif /* TARGET_ARM */
#ifdef TARGET_SPARC
@@ -1647,21 +1737,6 @@ static uint32_t get_elf_hwcap(void)
#endif
-#ifdef TARGET_CRIS
-
-#define ELF_CLASS ELFCLASS32
-#define ELF_ARCH EM_CRIS
-
-static inline void init_thread(struct target_pt_regs *regs,
- struct image_info *infop)
-{
- regs->erp = infop->entry;
-}
-
-#define ELF_EXEC_PAGESIZE 8192
-
-#endif
-
#ifdef TARGET_M68K
#define ELF_CLASS ELFCLASS32
@@ -2117,9 +2192,12 @@ static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
memcpy(to, from, n);
}
-#ifdef BSWAP_NEEDED
static void bswap_ehdr(struct elfhdr *ehdr)
{
+ if (!target_needs_bswap()) {
+ return;
+ }
+
bswap16s(&ehdr->e_type); /* Object file type */
bswap16s(&ehdr->e_machine); /* Architecture */
bswap32s(&ehdr->e_version); /* Object file version */
@@ -2137,8 +2215,11 @@ static void bswap_ehdr(struct elfhdr *ehdr)
static void bswap_phdr(struct elf_phdr *phdr, int phnum)
{
- int i;
- for (i = 0; i < phnum; ++i, ++phdr) {
+ if (!target_needs_bswap()) {
+ return;
+ }
+
+ for (int i = 0; i < phnum; ++i, ++phdr) {
bswap32s(&phdr->p_type); /* Segment type */
bswap32s(&phdr->p_flags); /* Segment flags */
bswaptls(&phdr->p_offset); /* Segment file offset */
@@ -2152,8 +2233,11 @@ static void bswap_phdr(struct elf_phdr *phdr, int phnum)
static void bswap_shdr(struct elf_shdr *shdr, int shnum)
{
- int i;
- for (i = 0; i < shnum; ++i, ++shdr) {
+ if (!target_needs_bswap()) {
+ return;
+ }
+
+ for (int i = 0; i < shnum; ++i, ++shdr) {
bswap32s(&shdr->sh_name);
bswap32s(&shdr->sh_type);
bswaptls(&shdr->sh_flags);
@@ -2169,6 +2253,10 @@ static void bswap_shdr(struct elf_shdr *shdr, int shnum)
static void bswap_sym(struct elf_sym *sym)
{
+ if (!target_needs_bswap()) {
+ return;
+ }
+
bswap32s(&sym->st_name);
bswaptls(&sym->st_value);
bswaptls(&sym->st_size);
@@ -2178,6 +2266,10 @@ static void bswap_sym(struct elf_sym *sym)
#ifdef TARGET_MIPS
static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags)
{
+ if (!target_needs_bswap()) {
+ return;
+ }
+
bswap16s(&abiflags->version);
bswap32s(&abiflags->ases);
bswap32s(&abiflags->isa_ext);
@@ -2185,15 +2277,6 @@ static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags)
bswap32s(&abiflags->flags2);
}
#endif
-#else
-static inline void bswap_ehdr(struct elfhdr *ehdr) { }
-static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
-static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
-static inline void bswap_sym(struct elf_sym *sym) { }
-#ifdef TARGET_MIPS
-static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { }
-#endif
-#endif
#ifdef USE_ELF_CORE_DUMP
static int elf_core_dump(int, const CPUArchState *);
@@ -2913,7 +2996,7 @@ static uintptr_t pgb_try_itree(const PGBAddrs *ga, uintptr_t base,
static uintptr_t pgb_find_itree(const PGBAddrs *ga, IntervalTreeRoot *root,
uintptr_t align, uintptr_t brk)
{
- uintptr_t last = mmap_min_addr;
+ uintptr_t last = sizeof(uintptr_t) == 4 ? MiB : GiB;
uintptr_t base, skip;
while (true) {
@@ -3136,29 +3219,29 @@ static bool parse_elf_properties(const ImageSource *src,
}
/*
- * The contents of a valid PT_GNU_PROPERTY is a sequence
- * of uint32_t -- swap them all now.
+ * The contents of a valid PT_GNU_PROPERTY is a sequence of uint32_t.
+ * Swap most of them now, beyond the header and namesz.
*/
-#ifdef BSWAP_NEEDED
- for (int i = 0; i < n / 4; i++) {
- bswap32s(note.data + i);
+ if (target_needs_bswap()) {
+ for (int i = 4; i < n / 4; i++) {
+ bswap32s(note.data + i);
+ }
}
-#endif
/*
* Note that nhdr is 3 words, and that the "name" described by namesz
* immediately follows nhdr and is thus at the 4th word. Further, all
* of the inputs to the kernel's round_up are multiples of 4.
*/
- if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 ||
- note.nhdr.n_namesz != NOTE_NAME_SZ ||
+ if (tswap32(note.nhdr.n_type) != NT_GNU_PROPERTY_TYPE_0 ||
+ tswap32(note.nhdr.n_namesz) != NOTE_NAME_SZ ||
note.data[3] != GNU0_MAGIC) {
error_setg(errp, "Invalid note in PT_GNU_PROPERTY");
return false;
}
off = sizeof(note.nhdr) + NOTE_NAME_SZ;
- datasz = note.nhdr.n_descsz + off;
+ datasz = tswap32(note.nhdr.n_descsz) + off;
if (datasz > n) {
error_setg(errp, "Invalid note size in PT_GNU_PROPERTY");
return false;
@@ -3194,7 +3277,8 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
char **pinterp_name)
{
g_autofree struct elf_phdr *phdr = NULL;
- abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
+ abi_ulong load_addr, load_bias, loaddr, hiaddr, error, align;
+ size_t reserve_size, align_size;
int i, prot_exec;
Error *err = NULL;
@@ -3234,7 +3318,7 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
* amount of memory to handle that. Locate the interpreter, if any.
*/
loaddr = -1, hiaddr = 0;
- info->alignment = 0;
+ align = 0;
info->exec_stack = EXSTACK_DEFAULT;
for (i = 0; i < ehdr->e_phnum; ++i) {
struct elf_phdr *eppnt = phdr + i;
@@ -3248,7 +3332,7 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
hiaddr = a;
}
++info->nsegs;
- info->alignment |= eppnt->p_align;
+ align |= eppnt->p_align;
} else if (eppnt->p_type == PT_INTERP && pinterp_name) {
g_autofree char *interp_name = NULL;
@@ -3278,6 +3362,8 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
load_addr = loaddr;
+ align = pow2ceil(align);
+
if (pinterp_name != NULL) {
if (ehdr->e_type == ET_EXEC) {
/*
@@ -3286,8 +3372,6 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
*/
probe_guest_base(image_name, loaddr, hiaddr);
} else {
- abi_ulong align;
-
/*
* The binary is dynamic, but we still need to
* select guest_base. In this case we pass a size.
@@ -3305,10 +3389,7 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
* Since we do not have complete control over the guest
* address space, we prefer the kernel to choose some address
* rather than force the use of LOAD_ADDR via MAP_FIXED.
- * But without MAP_FIXED we cannot guarantee alignment,
- * only suggest it.
*/
- align = pow2ceil(info->alignment);
if (align) {
load_addr &= -align;
}
@@ -3332,13 +3413,35 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
* In both cases, we will overwrite pages in this range with mappings
* from the executable.
*/
- load_addr = target_mmap(load_addr, (size_t)hiaddr - loaddr + 1, PROT_NONE,
+ reserve_size = (size_t)hiaddr - loaddr + 1;
+ align_size = reserve_size;
+
+ if (ehdr->e_type != ET_EXEC && align > qemu_real_host_page_size()) {
+ align_size += align - 1;
+ }
+
+ load_addr = target_mmap(load_addr, align_size, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
(ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0),
-1, 0);
if (load_addr == -1) {
goto exit_mmap;
}
+
+ if (align_size != reserve_size) {
+ abi_ulong align_addr = ROUND_UP(load_addr, align);
+ abi_ulong align_end = TARGET_PAGE_ALIGN(align_addr + reserve_size);
+ abi_ulong load_end = TARGET_PAGE_ALIGN(load_addr + align_size);
+
+ if (align_addr != load_addr) {
+ target_munmap(load_addr, align_addr - load_addr);
+ }
+ if (align_end != load_end) {
+ target_munmap(align_end, load_end - align_end);
+ }
+ load_addr = align_addr;
+ }
+
load_bias = load_addr - loaddr;
if (elf_is_fdpic(ehdr)) {
@@ -3519,12 +3622,14 @@ static void load_elf_interp(const char *filename, struct image_info *info,
load_elf_image(filename, &src, info, &ehdr, NULL);
}
+#ifndef vdso_image_info
#ifdef VDSO_HEADER
#include VDSO_HEADER
-#define vdso_image_info() &vdso_image_info
+#define vdso_image_info(flags) &vdso_image_info
#else
-#define vdso_image_info() NULL
-#endif
+#define vdso_image_info(flags) NULL
+#endif /* VDSO_HEADER */
+#endif /* vdso_image_info */
static void load_elf_vdso(struct image_info *info, const VdsoImageInfo *vdso)
{
@@ -3855,7 +3960,7 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
* Load a vdso if available, which will amongst other things contain the
* signal trampolines. Otherwise, allocate a separate page for them.
*/
- const VdsoImageInfo *vdso = vdso_image_info();
+ const VdsoImageInfo *vdso = vdso_image_info(info->elf_flags);
if (vdso) {
load_elf_vdso(&vdso_info, vdso);
info->vdso = vdso_info.load_bias;
@@ -3894,7 +3999,6 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
}
#ifdef USE_ELF_CORE_DUMP
-#include "exec/translate-all.h"
/*
* Definitions to generate Intel SVR4-like core files.
@@ -3974,9 +4078,12 @@ struct target_elf_prpsinfo {
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
-#ifdef BSWAP_NEEDED
static void bswap_prstatus(struct target_elf_prstatus *prstatus)
{
+ if (!target_needs_bswap()) {
+ return;
+ }
+
prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo);
prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code);
prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno);
@@ -3994,6 +4101,10 @@ static void bswap_prstatus(struct target_elf_prstatus *prstatus)
static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
{
+ if (!target_needs_bswap()) {
+ return;
+ }
+
psinfo->pr_flag = tswapal(psinfo->pr_flag);
psinfo->pr_uid = tswap16(psinfo->pr_uid);
psinfo->pr_gid = tswap16(psinfo->pr_gid);
@@ -4005,21 +4116,19 @@ static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
static void bswap_note(struct elf_note *en)
{
+ if (!target_needs_bswap()) {
+ return;
+ }
+
bswap32s(&en->n_namesz);
bswap32s(&en->n_descsz);
bswap32s(&en->n_type);
}
-#else
-static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
-static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
-static inline void bswap_note(struct elf_note *en) { }
-#endif /* BSWAP_NEEDED */
/*
* Calculate file (dump) size of given memory region.
*/
-static size_t vma_dump_size(target_ulong start, target_ulong end,
- unsigned long flags)
+static size_t vma_dump_size(vaddr start, vaddr end, int flags)
{
/* The area must be readable. */
if (!(flags & PAGE_READ)) {
@@ -4102,8 +4211,7 @@ static void fill_elf_note_phdr(struct elf_phdr *phdr, size_t sz, off_t offset)
bswap_phdr(phdr, 1);
}
-static void fill_prstatus_note(void *data, const TaskState *ts,
- CPUState *cpu, int signr)
+static void fill_prstatus_note(void *data, CPUState *cpu, int signr)
{
/*
* Because note memory is only aligned to 4, and target_elf_prstatus
@@ -4113,7 +4221,7 @@ static void fill_prstatus_note(void *data, const TaskState *ts,
struct target_elf_prstatus prstatus = {
.pr_info.si_signo = signr,
.pr_cursig = signr,
- .pr_pid = ts->ts_tid,
+ .pr_pid = get_task_state(cpu)->ts_tid,
.pr_ppid = getppid(),
.pr_pgrp = getpgrp(),
.pr_sid = getsid(0),
@@ -4213,14 +4321,14 @@ static int dump_write(int fd, const void *ptr, size_t size)
return (0);
}
-static int wmr_page_unprotect_regions(void *opaque, target_ulong start,
- target_ulong end, unsigned long flags)
+static int wmr_page_unprotect_regions(void *opaque, vaddr start,
+ vaddr end, int flags)
{
if ((flags & (PAGE_WRITE | PAGE_WRITE_ORG)) == PAGE_WRITE_ORG) {
size_t step = MAX(TARGET_PAGE_SIZE, qemu_real_host_page_size());
while (1) {
- page_unprotect(start, 0);
+ page_unprotect(NULL, start, 0);
if (end - start <= step) {
break;
}
@@ -4235,8 +4343,8 @@ typedef struct {
size_t size;
} CountAndSizeRegions;
-static int wmr_count_and_size_regions(void *opaque, target_ulong start,
- target_ulong end, unsigned long flags)
+static int wmr_count_and_size_regions(void *opaque, vaddr start,
+ vaddr end, int flags)
{
CountAndSizeRegions *css = opaque;
@@ -4250,8 +4358,8 @@ typedef struct {
off_t offset;
} FillRegionPhdr;
-static int wmr_fill_region_phdr(void *opaque, target_ulong start,
- target_ulong end, unsigned long flags)
+static int wmr_fill_region_phdr(void *opaque, vaddr start,
+ vaddr end, int flags)
{
FillRegionPhdr *d = opaque;
struct elf_phdr *phdr = d->phdr;
@@ -4273,8 +4381,8 @@ static int wmr_fill_region_phdr(void *opaque, target_ulong start,
return 0;
}
-static int wmr_write_region(void *opaque, target_ulong start,
- target_ulong end, unsigned long flags)
+static int wmr_write_region(void *opaque, vaddr start,
+ vaddr end, int flags)
{
int fd = *(int *)opaque;
size_t size = vma_dump_size(start, end, flags);
@@ -4330,7 +4438,7 @@ static int wmr_write_region(void *opaque, target_ulong start,
*/
static int elf_core_dump(int signr, const CPUArchState *env)
{
- const CPUState *cpu = env_cpu((CPUArchState *)env);
+ const CPUState *cpu = env_cpu_const(env);
const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu);
struct rlimit dumpsize;
CountAndSizeRegions css;
@@ -4428,8 +4536,7 @@ static int elf_core_dump(int signr, const CPUArchState *env)
CPU_FOREACH(cpu_iter) {
dptr = fill_note(&hptr, NT_PRSTATUS, "CORE",
sizeof(struct target_elf_prstatus));
- fill_prstatus_note(dptr, ts, cpu_iter,
- cpu_iter == cpu ? signr : 0);
+ fill_prstatus_note(dptr, cpu_iter, cpu_iter == cpu ? signr : 0);
}
if (dump_write(fd, header, data_offset) < 0) {
diff --git a/linux-user/fd-trans.c b/linux-user/fd-trans.c
index c04a97c..f83d1f7 100644
--- a/linux-user/fd-trans.c
+++ b/linux-user/fd-trans.c
@@ -25,12 +25,32 @@
#ifdef CONFIG_RTNETLINK
#include <linux/rtnetlink.h>
#include <linux/if_bridge.h>
+#include <linux/neighbour.h>
#endif
#include "qemu.h"
#include "user-internals.h"
#include "fd-trans.h"
#include "signal-common.h"
+#define NDM_RTA(r) ((struct rtattr*)(((char*)(r)) + \
+ NLMSG_ALIGN(sizeof(struct ndmsg))))
+
+enum {
+ QEMU_IFA_UNSPEC,
+ QEMU_IFA_ADDRESS,
+ QEMU_IFA_LOCAL,
+ QEMU_IFA_LABEL,
+ QEMU_IFA_BROADCAST,
+ QEMU_IFA_ANYCAST,
+ QEMU_IFA_CACHEINFO,
+ QEMU_IFA_MULTICAST,
+ QEMU_IFA_FLAGS,
+ QEMU_IFA_RT_PRIORITY,
+ QEMU_IFA_TARGET_NETNSID,
+ QEMU_IFA_PROTO,
+ QEMU__IFA__MAX,
+};
+
enum {
QEMU_IFLA_BR_UNSPEC,
QEMU_IFLA_BR_FORWARD_DELAY,
@@ -141,6 +161,14 @@ enum {
QEMU_IFLA_PROTO_DOWN_REASON,
QEMU_IFLA_PARENT_DEV_NAME,
QEMU_IFLA_PARENT_DEV_BUS_NAME,
+ QEMU_IFLA_GRO_MAX_SIZE,
+ QEMU_IFLA_TSO_MAX_SIZE,
+ QEMU_IFLA_TSO_MAX_SEGS,
+ QEMU_IFLA_ALLMULTI,
+ QEMU_IFLA_DEVLINK_PORT,
+ QEMU_IFLA_GSO_IPV4_MAX_SIZE,
+ QEMU_IFLA_GRO_IPV4_MAX_SIZE,
+ QEMU_IFLA_DPLL_PIN,
QEMU___IFLA_MAX
};
@@ -982,6 +1010,22 @@ static abi_long host_to_target_data_vfinfo_nlattr(struct nlattr *nlattr,
return 0;
}
+static abi_long host_to_target_data_prop_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ switch (nlattr->nla_type) {
+ /* string */
+ case QEMU_IFLA_ALT_IFNAME:
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown host PROP type: %d\n",
+ nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
+
static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
{
uint32_t *u32;
@@ -990,7 +1034,7 @@ static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
struct rtnl_link_ifmap *map;
struct linkinfo_context li_context;
- switch (rtattr->rta_type) {
+ switch (rtattr->rta_type & NLA_TYPE_MASK) {
/* binary stream */
case QEMU_IFLA_ADDRESS:
case QEMU_IFLA_BROADCAST:
@@ -1028,6 +1072,12 @@ static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
case QEMU_IFLA_CARRIER_DOWN_COUNT:
case QEMU_IFLA_MIN_MTU:
case QEMU_IFLA_MAX_MTU:
+ case QEMU_IFLA_GRO_MAX_SIZE:
+ case QEMU_IFLA_TSO_MAX_SIZE:
+ case QEMU_IFLA_TSO_MAX_SEGS:
+ case QEMU_IFLA_ALLMULTI:
+ case QEMU_IFLA_GSO_IPV4_MAX_SIZE:
+ case QEMU_IFLA_GRO_IPV4_MAX_SIZE:
u32 = RTA_DATA(rtattr);
*u32 = tswap32(*u32);
break;
@@ -1123,6 +1173,10 @@ static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
NULL,
host_to_target_data_vfinfo_nlattr);
+ case QEMU_IFLA_PROP_LIST:
+ return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
+ NULL,
+ host_to_target_data_prop_nlattr);
default:
qemu_log_mask(LOG_UNIMP, "Unknown host QEMU_IFLA type: %d\n",
rtattr->rta_type);
@@ -1138,20 +1192,21 @@ static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
switch (rtattr->rta_type) {
/* binary: depends on family type */
- case IFA_ADDRESS:
- case IFA_LOCAL:
+ case QEMU_IFA_ADDRESS:
+ case QEMU_IFA_LOCAL:
+ case QEMU_IFA_PROTO:
break;
/* string */
- case IFA_LABEL:
+ case QEMU_IFA_LABEL:
break;
/* u32 */
- case IFA_FLAGS:
- case IFA_BROADCAST:
+ case QEMU_IFA_FLAGS:
+ case QEMU_IFA_BROADCAST:
u32 = RTA_DATA(rtattr);
*u32 = tswap32(*u32);
break;
/* struct ifa_cacheinfo */
- case IFA_CACHEINFO:
+ case QEMU_IFA_CACHEINFO:
ci = RTA_DATA(rtattr);
ci->ifa_prefered = tswap32(ci->ifa_prefered);
ci->ifa_valid = tswap32(ci->ifa_valid);
@@ -1209,6 +1264,35 @@ static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
return 0;
}
+static abi_long host_to_target_data_neigh_rtattr(struct rtattr *rtattr)
+{
+ struct nda_cacheinfo *ndac;
+ uint32_t *u32;
+
+ switch (rtattr->rta_type) {
+ case NDA_UNSPEC:
+ case NDA_DST:
+ case NDA_LLADDR:
+ break;
+ case NDA_PROBES:
+ u32 = RTA_DATA(rtattr);
+ *u32 = tswap32(*u32);
+ break;
+ case NDA_CACHEINFO:
+ ndac = RTA_DATA(rtattr);
+ ndac->ndm_confirmed = tswap32(ndac->ndm_confirmed);
+ ndac->ndm_used = tswap32(ndac->ndm_used);
+ ndac->ndm_updated = tswap32(ndac->ndm_updated);
+ ndac->ndm_refcnt = tswap32(ndac->ndm_refcnt);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown host to target NEIGH type: %d\n",
+ rtattr->rta_type);
+ break;
+ }
+ return 0;
+}
+
static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
uint32_t rtattr_len)
{
@@ -1230,12 +1314,20 @@ static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
host_to_target_data_route_rtattr);
}
+static abi_long host_to_target_neigh_rtattr(struct rtattr *rtattr,
+ uint32_t rtattr_len)
+{
+ return host_to_target_for_each_rtattr(rtattr, rtattr_len,
+ host_to_target_data_neigh_rtattr);
+}
+
static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
{
uint32_t nlmsg_len;
struct ifinfomsg *ifi;
struct ifaddrmsg *ifa;
struct rtmsg *rtm;
+ struct ndmsg *ndm;
nlmsg_len = nlh->nlmsg_len;
switch (nlh->nlmsg_type) {
@@ -1262,6 +1354,17 @@ static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
}
break;
+ case RTM_NEWNEIGH:
+ case RTM_DELNEIGH:
+ case RTM_GETNEIGH:
+ if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ndm))) {
+ ndm = NLMSG_DATA(nlh);
+ ndm->ndm_ifindex = tswap32(ndm->ndm_ifindex);
+ ndm->ndm_state = tswap16(ndm->ndm_state);
+ host_to_target_neigh_rtattr(NDM_RTA(ndm),
+ nlmsg_len - NLMSG_LENGTH(sizeof(*ndm)));
+ }
+ break;
case RTM_NEWROUTE:
case RTM_DELROUTE:
case RTM_GETROUTE:
@@ -1398,8 +1501,8 @@ static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
{
switch (rtattr->rta_type) {
/* binary: depends on family type */
- case IFA_LOCAL:
- case IFA_ADDRESS:
+ case QEMU_IFA_LOCAL:
+ case QEMU_IFA_ADDRESS:
break;
default:
qemu_log_mask(LOG_UNIMP, "Unknown target IFA type: %d\n",
@@ -1409,6 +1512,35 @@ static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
return 0;
}
+static abi_long target_to_host_data_neigh_rtattr(struct rtattr *rtattr)
+{
+ struct nda_cacheinfo *ndac;
+ uint32_t *u32;
+
+ switch (rtattr->rta_type) {
+ case NDA_UNSPEC:
+ case NDA_DST:
+ case NDA_LLADDR:
+ break;
+ case NDA_PROBES:
+ u32 = RTA_DATA(rtattr);
+ *u32 = tswap32(*u32);
+ break;
+ case NDA_CACHEINFO:
+ ndac = RTA_DATA(rtattr);
+ ndac->ndm_confirmed = tswap32(ndac->ndm_confirmed);
+ ndac->ndm_used = tswap32(ndac->ndm_used);
+ ndac->ndm_updated = tswap32(ndac->ndm_updated);
+ ndac->ndm_refcnt = tswap32(ndac->ndm_refcnt);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown target NEIGH type: %d\n",
+ rtattr->rta_type);
+ break;
+ }
+ return 0;
+}
+
static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
{
uint32_t *u32;
@@ -1447,6 +1579,13 @@ static void target_to_host_addr_rtattr(struct rtattr *rtattr,
target_to_host_data_addr_rtattr);
}
+static void target_to_host_neigh_rtattr(struct rtattr *rtattr,
+ uint32_t rtattr_len)
+{
+ target_to_host_for_each_rtattr(rtattr, rtattr_len,
+ target_to_host_data_neigh_rtattr);
+}
+
static void target_to_host_route_rtattr(struct rtattr *rtattr,
uint32_t rtattr_len)
{
@@ -1459,6 +1598,7 @@ static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
struct ifinfomsg *ifi;
struct ifaddrmsg *ifa;
struct rtmsg *rtm;
+ struct ndmsg *ndm;
switch (nlh->nlmsg_type) {
case RTM_NEWLINK:
@@ -1485,6 +1625,17 @@ static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
NLMSG_LENGTH(sizeof(*ifa)));
}
break;
+ case RTM_NEWNEIGH:
+ case RTM_DELNEIGH:
+ case RTM_GETNEIGH:
+ if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ndm))) {
+ ndm = NLMSG_DATA(nlh);
+ ndm->ndm_ifindex = tswap32(ndm->ndm_ifindex);
+ ndm->ndm_state = tswap16(ndm->ndm_state);
+ target_to_host_neigh_rtattr(NDM_RTA(ndm), nlh->nlmsg_len -
+ NLMSG_LENGTH(sizeof(*ndm)));
+ }
+ break;
case RTM_NEWROUTE:
case RTM_DELROUTE:
case RTM_GETROUTE:
diff --git a/linux-user/flatload.c b/linux-user/flatload.c
index 04d8138..4beb3ed 100644
--- a/linux-user/flatload.c
+++ b/linux-user/flatload.c
@@ -34,6 +34,8 @@
#include "qemu/osdep.h"
#include "qemu.h"
+#include "exec/page-protection.h"
+#include "exec/mmap-lock.h"
#include "user-internals.h"
#include "loader.h"
#include "user-mmap.h"
@@ -487,7 +489,10 @@ int load_flt_binary(struct linux_binprm *bprm, struct image_info *info)
stack_len += (bprm->envc + 1) * 4; /* the envp array */
+ mmap_lock();
res = load_flat_file(bprm, libinfo, 0, &stack_len);
+ mmap_unlock();
+
if (is_error(res)) {
return res;
}
diff --git a/linux-user/gen-vdso-elfn.c.inc b/linux-user/gen-vdso-elfn.c.inc
index 95856eb..b47019e 100644
--- a/linux-user/gen-vdso-elfn.c.inc
+++ b/linux-user/gen-vdso-elfn.c.inc
@@ -68,28 +68,45 @@ static void elfN(search_symtab)(ElfN(Shdr) *shdr, unsigned sym_idx,
void *buf, bool need_bswap)
{
unsigned str_idx = shdr[sym_idx].sh_link;
- ElfN(Sym) *sym = buf + shdr[sym_idx].sh_offset;
- unsigned sym_n = shdr[sym_idx].sh_size / sizeof(*sym);
+ ElfN(Sym) *target_sym = buf + shdr[sym_idx].sh_offset;
+ unsigned sym_n = shdr[sym_idx].sh_size / sizeof(*target_sym);
const char *str = buf + shdr[str_idx].sh_offset;
for (unsigned i = 0; i < sym_n; ++i) {
const char *name;
+ ElfN(Sym) sym;
+ memcpy(&sym, &target_sym[i], sizeof(sym));
if (need_bswap) {
- elfN(bswap_sym)(sym + i);
+ elfN(bswap_sym)(&sym);
}
- name = str + sym[i].st_name;
+ name = str + sym.st_name;
if (sigreturn_sym && strcmp(sigreturn_sym, name) == 0) {
- sigreturn_addr = sym[i].st_value;
+ sigreturn_addr = sym.st_value;
}
if (rt_sigreturn_sym && strcmp(rt_sigreturn_sym, name) == 0) {
- rt_sigreturn_addr = sym[i].st_value;
+ rt_sigreturn_addr = sym.st_value;
}
}
}
-static void elfN(process)(FILE *outf, void *buf, bool need_bswap)
+static void elfN(bswap_ps_hdrs)(ElfN(Ehdr) *ehdr)
+{
+ ElfN(Phdr) *phdr = (void *)ehdr + ehdr->e_phoff;
+ ElfN(Shdr) *shdr = (void *)ehdr + ehdr->e_shoff;
+ ElfN(Half) i;
+
+ for (i = 0; i < ehdr->e_phnum; ++i) {
+ elfN(bswap_phdr)(&phdr[i]);
+ }
+
+ for (i = 0; i < ehdr->e_shnum; ++i) {
+ elfN(bswap_shdr)(&shdr[i]);
+ }
+}
+
+static void elfN(process)(FILE *outf, void *buf, long len, bool need_bswap)
{
ElfN(Ehdr) *ehdr = buf;
ElfN(Phdr) *phdr;
@@ -103,24 +120,14 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap)
int errors = 0;
if (need_bswap) {
- elfN(bswap_ehdr)(ehdr);
+ elfN(bswap_ehdr)(buf);
+ elfN(bswap_ps_hdrs)(buf);
}
phnum = ehdr->e_phnum;
phdr = buf + ehdr->e_phoff;
- if (need_bswap) {
- for (unsigned i = 0; i < phnum; ++i) {
- elfN(bswap_phdr)(phdr + i);
- }
- }
-
shnum = ehdr->e_shnum;
shdr = buf + ehdr->e_shoff;
- if (need_bswap) {
- for (unsigned i = 0; i < shnum; ++i) {
- elfN(bswap_shdr)(shdr + i);
- }
- }
for (unsigned i = 0; i < shnum; ++i) {
switch (shdr[i].sh_type) {
case SHT_SYMTAB:
@@ -154,7 +161,24 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap)
fprintf(stderr, "LOAD segment not loaded at address 0\n");
errors++;
}
- first_segsz = phdr[i].p_filesz;
+ /*
+ * Extend the program header to cover the entire VDSO, so that
+ * load_elf_vdso() loads everything, including section headers.
+ *
+ * Require that there is no .bss, since it would break this
+ * approach.
+ */
+ if (phdr[i].p_filesz != phdr[i].p_memsz) {
+ fprintf(stderr, "LOAD segment's filesz and memsz differ\n");
+ errors++;
+ }
+ if (phdr[i].p_filesz > len) {
+ fprintf(stderr, "LOAD segment is larger than the whole VDSO\n");
+ errors++;
+ }
+ phdr[i].p_filesz = len;
+ phdr[i].p_memsz = len;
+ first_segsz = len;
if (first_segsz < ehdr->e_phoff + phnum * sizeof(*phdr)) {
fprintf(stderr, "LOAD segment does not cover PHDRs\n");
errors++;
@@ -197,17 +221,24 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap)
output_reloc(outf, buf, &phdr[i].p_paddr);
}
+ /* Relocate the section headers. */
+ for (unsigned i = 0; i < shnum; ++i) {
+ output_reloc(outf, buf, &shdr[i].sh_addr);
+ }
+
/* Relocate the DYNAMIC entries. */
if (dynamic_addr) {
- ElfN(Dyn) *dyn = buf + dynamic_ofs;
- __typeof(dyn->d_tag) tag;
+ ElfN(Dyn) *target_dyn = buf + dynamic_ofs;
+ __typeof(((ElfN(Dyn) *)target_dyn)->d_tag) tag;
do {
+ ElfN(Dyn) dyn;
+ memcpy(&dyn, target_dyn, sizeof(dyn));
if (need_bswap) {
- elfN(bswap_dyn)(dyn);
+ elfN(bswap_dyn)(&dyn);
}
- tag = dyn->d_tag;
+ tag = dyn.d_tag;
switch (tag) {
case DT_HASH:
@@ -218,7 +249,7 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap)
case DT_PLTGOT:
case DT_ADDRRNGLO ... DT_ADDRRNGHI:
/* These entries store an address in the entry. */
- output_reloc(outf, buf, &dyn->d_un.d_val);
+ output_reloc(outf, buf, &target_dyn->d_un.d_val);
break;
case DT_NULL:
@@ -235,7 +266,7 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap)
break;
case DT_SYMENT:
- if (dyn->d_un.d_val != sizeof(ElfN(Sym))) {
+ if (dyn.d_un.d_val != sizeof(ElfN(Sym))) {
fprintf(stderr, "VDSO has incorrect dynamic symbol size\n");
errors++;
}
@@ -251,7 +282,7 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap)
* ??? The RISC-V toolchain will emit these even when there
* are no relocations. Validate zeros.
*/
- if (dyn->d_un.d_val != 0) {
+ if (dyn.d_un.d_val != 0) {
fprintf(stderr, "VDSO has dynamic relocations\n");
errors++;
}
@@ -287,7 +318,7 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap)
errors++;
break;
}
- dyn++;
+ target_dyn++;
} while (tag != DT_NULL);
if (errors) {
exit(EXIT_FAILURE);
@@ -296,11 +327,11 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap)
/* Relocate the dynamic symbol table. */
if (dynsym_idx) {
- ElfN(Sym) *sym = buf + shdr[dynsym_idx].sh_offset;
- unsigned sym_n = shdr[dynsym_idx].sh_size / sizeof(*sym);
+ ElfN(Sym) *target_sym = buf + shdr[dynsym_idx].sh_offset;
+ unsigned sym_n = shdr[dynsym_idx].sh_size / sizeof(*target_sym);
for (unsigned i = 0; i < sym_n; ++i) {
- output_reloc(outf, buf, &sym[i].st_value);
+ output_reloc(outf, buf, &target_sym[i].st_value);
}
}
@@ -311,4 +342,9 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap)
if (symtab_idx) {
elfN(search_symtab)(shdr, symtab_idx, buf, need_bswap);
}
+
+ if (need_bswap) {
+ elfN(bswap_ps_hdrs)(buf);
+ elfN(bswap_ehdr)(buf);
+ }
}
diff --git a/linux-user/gen-vdso.c b/linux-user/gen-vdso.c
index 31e333b..fce9d5c 100644
--- a/linux-user/gen-vdso.c
+++ b/linux-user/gen-vdso.c
@@ -56,13 +56,14 @@ static unsigned rt_sigreturn_addr;
int main(int argc, char **argv)
{
- FILE *inf, *outf;
+ FILE *inf = NULL, *outf = NULL;
long total_len;
const char *prefix = "vdso";
const char *inf_name;
const char *outf_name = NULL;
- unsigned char *buf;
+ unsigned char *buf = NULL;
bool need_bswap;
+ int ret = EXIT_FAILURE;
while (1) {
int opt = getopt(argc, argv, "o:p:r:s:");
@@ -129,24 +130,6 @@ int main(int argc, char **argv)
fprintf(stderr, "%s: incomplete read\n", inf_name);
return EXIT_FAILURE;
}
- fclose(inf);
-
- /*
- * Write out the vdso image now, before we make local changes.
- */
-
- fprintf(outf,
- "/* Automatically generated from linux-user/gen-vdso.c. */\n"
- "\n"
- "static const uint8_t %s_image[] = {",
- prefix);
- for (long i = 0; i < total_len; ++i) {
- if (i % 12 == 0) {
- fputs("\n ", outf);
- }
- fprintf(outf, " 0x%02x,", buf[i]);
- }
- fprintf(outf, "\n};\n\n");
/*
* Identify which elf flavor we're processing.
@@ -179,14 +162,17 @@ int main(int argc, char **argv)
* Output relocation addresses as we go.
*/
- fprintf(outf, "static const unsigned %s_relocs[] = {\n", prefix);
+ fprintf(outf,
+ "/* Automatically generated by linux-user/gen-vdso.c. */\n"
+ "\n"
+ "static const unsigned %s_relocs[] = {\n", prefix);
switch (buf[EI_CLASS]) {
case ELFCLASS32:
- elf32_process(outf, buf, need_bswap);
+ elf32_process(outf, buf, total_len, need_bswap);
break;
case ELFCLASS64:
- elf64_process(outf, buf, need_bswap);
+ elf64_process(outf, buf, total_len, need_bswap);
break;
default:
fprintf(stderr, "%s: invalid elf EI_CLASS (%u)\n",
@@ -196,6 +182,20 @@ int main(int argc, char **argv)
fprintf(outf, "};\n\n"); /* end vdso_relocs. */
+ /*
+ * Write out the vdso image now, after we made local changes.
+ */
+ fprintf(outf,
+ "static const uint8_t %s_image[] = {",
+ prefix);
+ for (long i = 0; i < total_len; ++i) {
+ if (i % 12 == 0) {
+ fputs("\n ", outf);
+ }
+ fprintf(outf, " 0x%02x,", buf[i]);
+ }
+ fprintf(outf, "\n};\n\n");
+
fprintf(outf, "static const VdsoImageInfo %s_image_info = {\n", prefix);
fprintf(outf, " .image = %s_image,\n", prefix);
fprintf(outf, " .relocs = %s_relocs,\n", prefix);
@@ -205,19 +205,24 @@ int main(int argc, char **argv)
fprintf(outf, " .rt_sigreturn_ofs = 0x%x,\n", rt_sigreturn_addr);
fprintf(outf, "};\n");
- /*
- * Everything should have gone well.
- */
- if (fclose(outf)) {
- goto perror_outf;
+ ret = EXIT_SUCCESS;
+
+ cleanup:
+ free(buf);
+
+ if (outf && fclose(outf) != 0) {
+ ret = EXIT_FAILURE;
+ }
+ if (inf && fclose(inf) != 0) {
+ ret = EXIT_FAILURE;
}
- return EXIT_SUCCESS;
+ return ret;
perror_inf:
perror(inf_name);
- return EXIT_FAILURE;
+ goto cleanup;
perror_outf:
perror(outf_name);
- return EXIT_FAILURE;
+ goto cleanup;
}
diff --git a/linux-user/generic/signal.h b/linux-user/generic/signal.h
index 6fd05b7..b347402 100644
--- a/linux-user/generic/signal.h
+++ b/linux-user/generic/signal.h
@@ -15,7 +15,6 @@
#define TARGET_SA_RESTART 0x10000000
#define TARGET_SA_NODEFER 0x40000000
#define TARGET_SA_RESETHAND 0x80000000
-#define TARGET_SA_RESTORER 0x04000000
#define TARGET_SIGHUP 1
#define TARGET_SIGINT 2
diff --git a/linux-user/hexagon/cpu_loop.c b/linux-user/hexagon/cpu_loop.c
index d41159e..e18a018 100644
--- a/linux-user/hexagon/cpu_loop.c
+++ b/linux-user/hexagon/cpu_loop.c
@@ -21,7 +21,7 @@
#include "qemu/osdep.h"
#include "qemu.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
#include "internal.h"
@@ -42,7 +42,7 @@ void cpu_loop(CPUHexagonState *env)
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
- case HEX_EXCP_TRAP0:
+ case HEX_EVENT_TRAP0:
syscallnum = env->gpr[6];
env->gpr[HEX_REG_PC] += 4;
ret = do_syscall(env,
@@ -60,7 +60,7 @@ void cpu_loop(CPUHexagonState *env)
env->gpr[0] = ret;
}
break;
- case HEX_EXCP_PC_NOT_ALIGNED:
+ case HEX_CAUSE_PC_NOT_ALIGNED:
force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN,
env->gpr[HEX_REG_R31]);
break;
@@ -79,7 +79,7 @@ void cpu_loop(CPUHexagonState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
env->gpr[HEX_REG_PC] = regs->sepc;
env->gpr[HEX_REG_SP] = regs->sp;
diff --git a/linux-user/hexagon/meson.build b/linux-user/hexagon/meson.build
new file mode 100644
index 0000000..d203c3e
--- /dev/null
+++ b/linux-user/hexagon/meson.build
@@ -0,0 +1,6 @@
+
+syscall_nr_generators += {
+ 'hexagon': generator(sh,
+ arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ],
+ output: '@BASENAME@_nr.h')
+}
diff --git a/linux-user/hexagon/syscall.tbl b/linux-user/hexagon/syscall.tbl
new file mode 100644
index 0000000..845e24e
--- /dev/null
+++ b/linux-user/hexagon/syscall.tbl
@@ -0,0 +1,405 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# This file contains the system call numbers for all of the
+# more recently added architectures.
+#
+# As a basic principle, no duplication of functionality
+# should be added, e.g. we don't use lseek when llseek
+# is present. New architectures should use this file
+# and implement the less feature-full calls in user space.
+#
+0 common io_setup sys_io_setup compat_sys_io_setup
+1 common io_destroy sys_io_destroy
+2 common io_submit sys_io_submit compat_sys_io_submit
+3 common io_cancel sys_io_cancel
+4 time32 io_getevents sys_io_getevents_time32
+4 64 io_getevents sys_io_getevents
+5 common setxattr sys_setxattr
+6 common lsetxattr sys_lsetxattr
+7 common fsetxattr sys_fsetxattr
+8 common getxattr sys_getxattr
+9 common lgetxattr sys_lgetxattr
+10 common fgetxattr sys_fgetxattr
+11 common listxattr sys_listxattr
+12 common llistxattr sys_llistxattr
+13 common flistxattr sys_flistxattr
+14 common removexattr sys_removexattr
+15 common lremovexattr sys_lremovexattr
+16 common fremovexattr sys_fremovexattr
+17 common getcwd sys_getcwd
+18 common lookup_dcookie sys_ni_syscall
+19 common eventfd2 sys_eventfd2
+20 common epoll_create1 sys_epoll_create1
+21 common epoll_ctl sys_epoll_ctl
+22 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+23 common dup sys_dup
+24 common dup3 sys_dup3
+25 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+25 64 fcntl sys_fcntl
+26 common inotify_init1 sys_inotify_init1
+27 common inotify_add_watch sys_inotify_add_watch
+28 common inotify_rm_watch sys_inotify_rm_watch
+29 common ioctl sys_ioctl compat_sys_ioctl
+30 common ioprio_set sys_ioprio_set
+31 common ioprio_get sys_ioprio_get
+32 common flock sys_flock
+33 common mknodat sys_mknodat
+34 common mkdirat sys_mkdirat
+35 common unlinkat sys_unlinkat
+36 common symlinkat sys_symlinkat
+37 common linkat sys_linkat
+# renameat is superseded with flags by renameat2
+38 renameat renameat sys_renameat
+39 common umount2 sys_umount
+40 common mount sys_mount
+41 common pivot_root sys_pivot_root
+42 common nfsservctl sys_ni_syscall
+43 32 statfs64 sys_statfs64 compat_sys_statfs64
+43 64 statfs sys_statfs
+44 32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+44 64 fstatfs sys_fstatfs
+45 32 truncate64 sys_truncate64 compat_sys_truncate64
+45 64 truncate sys_truncate
+46 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+46 64 ftruncate sys_ftruncate
+47 common fallocate sys_fallocate compat_sys_fallocate
+48 common faccessat sys_faccessat
+49 common chdir sys_chdir
+50 common fchdir sys_fchdir
+51 common chroot sys_chroot
+52 common fchmod sys_fchmod
+53 common fchmodat sys_fchmodat
+54 common fchownat sys_fchownat
+55 common fchown sys_fchown
+56 common openat sys_openat
+57 common close sys_close
+58 common vhangup sys_vhangup
+59 common pipe2 sys_pipe2
+60 common quotactl sys_quotactl
+61 common getdents64 sys_getdents64
+62 32 llseek sys_llseek
+62 64 lseek sys_lseek
+63 common read sys_read
+64 common write sys_write
+65 common readv sys_readv sys_readv
+66 common writev sys_writev sys_writev
+67 common pread64 sys_pread64 compat_sys_pread64
+68 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+69 common preadv sys_preadv compat_sys_preadv
+70 common pwritev sys_pwritev compat_sys_pwritev
+71 32 sendfile64 sys_sendfile64
+71 64 sendfile sys_sendfile64
+72 time32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+72 64 pselect6 sys_pselect6
+73 time32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+73 64 ppoll sys_ppoll
+74 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+75 common vmsplice sys_vmsplice
+76 common splice sys_splice
+77 common tee sys_tee
+78 common readlinkat sys_readlinkat
+79 stat64 fstatat64 sys_fstatat64
+79 64 newfstatat sys_newfstatat
+80 stat64 fstat64 sys_fstat64
+80 64 fstat sys_newfstat
+81 common sync sys_sync
+82 common fsync sys_fsync
+83 common fdatasync sys_fdatasync
+84 common sync_file_range sys_sync_file_range compat_sys_sync_file_range
+85 common timerfd_create sys_timerfd_create
+86 time32 timerfd_settime sys_timerfd_settime32
+86 64 timerfd_settime sys_timerfd_settime
+87 time32 timerfd_gettime sys_timerfd_gettime32
+87 64 timerfd_gettime sys_timerfd_gettime
+88 time32 utimensat sys_utimensat_time32
+88 64 utimensat sys_utimensat
+89 common acct sys_acct
+90 common capget sys_capget
+91 common capset sys_capset
+92 common personality sys_personality
+93 common exit sys_exit
+94 common exit_group sys_exit_group
+95 common waitid sys_waitid compat_sys_waitid
+96 common set_tid_address sys_set_tid_address
+97 common unshare sys_unshare
+98 time32 futex sys_futex_time32
+98 64 futex sys_futex
+99 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+100 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+101 time32 nanosleep sys_nanosleep_time32
+101 64 nanosleep sys_nanosleep
+102 common getitimer sys_getitimer compat_sys_getitimer
+103 common setitimer sys_setitimer compat_sys_setitimer
+104 common kexec_load sys_kexec_load compat_sys_kexec_load
+105 common init_module sys_init_module
+106 common delete_module sys_delete_module
+107 common timer_create sys_timer_create compat_sys_timer_create
+108 time32 timer_gettime sys_timer_gettime32
+108 64 timer_gettime sys_timer_gettime
+109 common timer_getoverrun sys_timer_getoverrun
+110 time32 timer_settime sys_timer_settime32
+110 64 timer_settime sys_timer_settime
+111 common timer_delete sys_timer_delete
+112 time32 clock_settime sys_clock_settime32
+112 64 clock_settime sys_clock_settime
+113 time32 clock_gettime sys_clock_gettime32
+113 64 clock_gettime sys_clock_gettime
+114 time32 clock_getres sys_clock_getres_time32
+114 64 clock_getres sys_clock_getres
+115 time32 clock_nanosleep sys_clock_nanosleep_time32
+115 64 clock_nanosleep sys_clock_nanosleep
+116 common syslog sys_syslog
+117 common ptrace sys_ptrace compat_sys_ptrace
+118 common sched_setparam sys_sched_setparam
+119 common sched_setscheduler sys_sched_setscheduler
+120 common sched_getscheduler sys_sched_getscheduler
+121 common sched_getparam sys_sched_getparam
+122 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+123 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+124 common sched_yield sys_sched_yield
+125 common sched_get_priority_max sys_sched_get_priority_max
+126 common sched_get_priority_min sys_sched_get_priority_min
+127 time32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+127 64 sched_rr_get_interval sys_sched_rr_get_interval
+128 common restart_syscall sys_restart_syscall
+129 common kill sys_kill
+130 common tkill sys_tkill
+131 common tgkill sys_tgkill
+132 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+133 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+134 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+135 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+136 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+137 time32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+137 64 rt_sigtimedwait sys_rt_sigtimedwait
+138 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+139 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
+140 common setpriority sys_setpriority
+141 common getpriority sys_getpriority
+142 common reboot sys_reboot
+143 common setregid sys_setregid
+144 common setgid sys_setgid
+145 common setreuid sys_setreuid
+146 common setuid sys_setuid
+147 common setresuid sys_setresuid
+148 common getresuid sys_getresuid
+149 common setresgid sys_setresgid
+150 common getresgid sys_getresgid
+151 common setfsuid sys_setfsuid
+152 common setfsgid sys_setfsgid
+153 common times sys_times compat_sys_times
+154 common setpgid sys_setpgid
+155 common getpgid sys_getpgid
+156 common getsid sys_getsid
+157 common setsid sys_setsid
+158 common getgroups sys_getgroups
+159 common setgroups sys_setgroups
+160 common uname sys_newuname
+161 common sethostname sys_sethostname
+162 common setdomainname sys_setdomainname
+# getrlimit and setrlimit are superseded with prlimit64
+163 rlimit getrlimit sys_getrlimit compat_sys_getrlimit
+164 rlimit setrlimit sys_setrlimit compat_sys_setrlimit
+165 common getrusage sys_getrusage compat_sys_getrusage
+166 common umask sys_umask
+167 common prctl sys_prctl
+168 common getcpu sys_getcpu
+169 time32 gettimeofday sys_gettimeofday compat_sys_gettimeofday
+169 64 gettimeofday sys_gettimeofday
+170 time32 settimeofday sys_settimeofday compat_sys_settimeofday
+170 64 settimeofday sys_settimeofday
+171 time32 adjtimex sys_adjtimex_time32
+171 64 adjtimex sys_adjtimex
+172 common getpid sys_getpid
+173 common getppid sys_getppid
+174 common getuid sys_getuid
+175 common geteuid sys_geteuid
+176 common getgid sys_getgid
+177 common getegid sys_getegid
+178 common gettid sys_gettid
+179 common sysinfo sys_sysinfo compat_sys_sysinfo
+180 common mq_open sys_mq_open compat_sys_mq_open
+181 common mq_unlink sys_mq_unlink
+182 time32 mq_timedsend sys_mq_timedsend_time32
+182 64 mq_timedsend sys_mq_timedsend
+183 time32 mq_timedreceive sys_mq_timedreceive_time32
+183 64 mq_timedreceive sys_mq_timedreceive
+184 common mq_notify sys_mq_notify compat_sys_mq_notify
+185 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+186 common msgget sys_msgget
+187 common msgctl sys_msgctl compat_sys_msgctl
+188 common msgrcv sys_msgrcv compat_sys_msgrcv
+189 common msgsnd sys_msgsnd compat_sys_msgsnd
+190 common semget sys_semget
+191 common semctl sys_semctl compat_sys_semctl
+192 time32 semtimedop sys_semtimedop_time32
+192 64 semtimedop sys_semtimedop
+193 common semop sys_semop
+194 common shmget sys_shmget
+195 common shmctl sys_shmctl compat_sys_shmctl
+196 common shmat sys_shmat compat_sys_shmat
+197 common shmdt sys_shmdt
+198 common socket sys_socket
+199 common socketpair sys_socketpair
+200 common bind sys_bind
+201 common listen sys_listen
+202 common accept sys_accept
+203 common connect sys_connect
+204 common getsockname sys_getsockname
+205 common getpeername sys_getpeername
+206 common sendto sys_sendto
+207 common recvfrom sys_recvfrom compat_sys_recvfrom
+208 common setsockopt sys_setsockopt sys_setsockopt
+209 common getsockopt sys_getsockopt sys_getsockopt
+210 common shutdown sys_shutdown
+211 common sendmsg sys_sendmsg compat_sys_sendmsg
+212 common recvmsg sys_recvmsg compat_sys_recvmsg
+213 common readahead sys_readahead compat_sys_readahead
+214 common brk sys_brk
+215 common munmap sys_munmap
+216 common mremap sys_mremap
+217 common add_key sys_add_key
+218 common request_key sys_request_key
+219 common keyctl sys_keyctl compat_sys_keyctl
+220 common clone sys_clone
+221 common execve sys_execve compat_sys_execve
+222 32 mmap2 sys_mmap2
+222 64 mmap sys_mmap
+223 32 fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64
+223 64 fadvise64 sys_fadvise64_64
+224 common swapon sys_swapon
+225 common swapoff sys_swapoff
+226 common mprotect sys_mprotect
+227 common msync sys_msync
+228 common mlock sys_mlock
+229 common munlock sys_munlock
+230 common mlockall sys_mlockall
+231 common munlockall sys_munlockall
+232 common mincore sys_mincore
+233 common madvise sys_madvise
+234 common remap_file_pages sys_remap_file_pages
+235 common mbind sys_mbind
+236 common get_mempolicy sys_get_mempolicy
+237 common set_mempolicy sys_set_mempolicy
+238 common migrate_pages sys_migrate_pages
+239 common move_pages sys_move_pages
+240 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+241 common perf_event_open sys_perf_event_open
+242 common accept4 sys_accept4
+243 time32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+243 64 recvmmsg sys_recvmmsg
+# Architectures may provide up to 16 syscalls of their own between 244 and 259
+244 arc cacheflush sys_cacheflush
+245 arc arc_settls sys_arc_settls
+246 arc arc_gettls sys_arc_gettls
+247 arc sysfs sys_sysfs
+248 arc arc_usr_cmpxchg sys_arc_usr_cmpxchg
+
+244 csky set_thread_area sys_set_thread_area
+245 csky cacheflush sys_cacheflush
+
+244 nios2 cacheflush sys_cacheflush
+
+244 or1k or1k_atomic sys_or1k_atomic
+
+258 riscv riscv_hwprobe sys_riscv_hwprobe
+259 riscv riscv_flush_icache sys_riscv_flush_icache
+
+260 time32 wait4 sys_wait4 compat_sys_wait4
+260 64 wait4 sys_wait4
+261 common prlimit64 sys_prlimit64
+262 common fanotify_init sys_fanotify_init
+263 common fanotify_mark sys_fanotify_mark
+264 common name_to_handle_at sys_name_to_handle_at
+265 common open_by_handle_at sys_open_by_handle_at
+266 time32 clock_adjtime sys_clock_adjtime32
+266 64 clock_adjtime sys_clock_adjtime
+267 common syncfs sys_syncfs
+268 common setns sys_setns
+269 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+270 common process_vm_readv sys_process_vm_readv
+271 common process_vm_writev sys_process_vm_writev
+272 common kcmp sys_kcmp
+273 common finit_module sys_finit_module
+274 common sched_setattr sys_sched_setattr
+275 common sched_getattr sys_sched_getattr
+276 common renameat2 sys_renameat2
+277 common seccomp sys_seccomp
+278 common getrandom sys_getrandom
+279 common memfd_create sys_memfd_create
+280 common bpf sys_bpf
+281 common execveat sys_execveat compat_sys_execveat
+282 common userfaultfd sys_userfaultfd
+283 common membarrier sys_membarrier
+284 common mlock2 sys_mlock2
+285 common copy_file_range sys_copy_file_range
+286 common preadv2 sys_preadv2 compat_sys_preadv2
+287 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+288 common pkey_mprotect sys_pkey_mprotect
+289 common pkey_alloc sys_pkey_alloc
+290 common pkey_free sys_pkey_free
+291 common statx sys_statx
+292 time32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+292 64 io_pgetevents sys_io_pgetevents
+293 common rseq sys_rseq
+294 common kexec_file_load sys_kexec_file_load
+# 295 through 402 are unassigned to sync up with generic numbers don't use
+403 32 clock_gettime64 sys_clock_gettime
+404 32 clock_settime64 sys_clock_settime
+405 32 clock_adjtime64 sys_clock_adjtime
+406 32 clock_getres_time64 sys_clock_getres
+407 32 clock_nanosleep_time64 sys_clock_nanosleep
+408 32 timer_gettime64 sys_timer_gettime
+409 32 timer_settime64 sys_timer_settime
+410 32 timerfd_gettime64 sys_timerfd_gettime
+411 32 timerfd_settime64 sys_timerfd_settime
+412 32 utimensat_time64 sys_utimensat
+413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 sys_mq_timedsend
+419 32 mq_timedreceive_time64 sys_mq_timedreceive
+420 32 semtimedop_time64 sys_semtimedop
+421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 sys_futex
+423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+447 memfd_secret memfd_secret sys_memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/linux-user/hexagon/syscall_nr.h b/linux-user/hexagon/syscall_nr.h
deleted file mode 100644
index b047dbb..0000000
--- a/linux-user/hexagon/syscall_nr.h
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * This file contains the system call numbers.
- * Do not modify.
- * This file is generated by scripts/gensyscalls.sh
- */
-#ifndef LINUX_USER_HEXAGON_SYSCALL_NR_H
-#define LINUX_USER_HEXAGON_SYSCALL_NR_H
-
-#define TARGET_NR_io_setup 0
-#define TARGET_NR_io_destroy 1
-#define TARGET_NR_io_submit 2
-#define TARGET_NR_io_cancel 3
-#define TARGET_NR_io_getevents 4
-#define TARGET_NR_setxattr 5
-#define TARGET_NR_lsetxattr 6
-#define TARGET_NR_fsetxattr 7
-#define TARGET_NR_getxattr 8
-#define TARGET_NR_lgetxattr 9
-#define TARGET_NR_fgetxattr 10
-#define TARGET_NR_listxattr 11
-#define TARGET_NR_llistxattr 12
-#define TARGET_NR_flistxattr 13
-#define TARGET_NR_removexattr 14
-#define TARGET_NR_lremovexattr 15
-#define TARGET_NR_fremovexattr 16
-#define TARGET_NR_getcwd 17
-#define TARGET_NR_lookup_dcookie 18
-#define TARGET_NR_eventfd2 19
-#define TARGET_NR_epoll_create1 20
-#define TARGET_NR_epoll_ctl 21
-#define TARGET_NR_epoll_pwait 22
-#define TARGET_NR_dup 23
-#define TARGET_NR_dup3 24
-#define TARGET_NR_fcntl64 25
-#define TARGET_NR_inotify_init1 26
-#define TARGET_NR_inotify_add_watch 27
-#define TARGET_NR_inotify_rm_watch 28
-#define TARGET_NR_ioctl 29
-#define TARGET_NR_ioprio_set 30
-#define TARGET_NR_ioprio_get 31
-#define TARGET_NR_flock 32
-#define TARGET_NR_mknodat 33
-#define TARGET_NR_mkdirat 34
-#define TARGET_NR_unlinkat 35
-#define TARGET_NR_symlinkat 36
-#define TARGET_NR_linkat 37
-#define TARGET_NR_renameat 38
-#define TARGET_NR_umount2 39
-#define TARGET_NR_mount 40
-#define TARGET_NR_pivot_root 41
-#define TARGET_NR_nfsservctl 42
-#define TARGET_NR_statfs64 43
-#define TARGET_NR_fstatfs64 44
-#define TARGET_NR_truncate64 45
-#define TARGET_NR_ftruncate64 46
-#define TARGET_NR_fallocate 47
-#define TARGET_NR_faccessat 48
-#define TARGET_NR_chdir 49
-#define TARGET_NR_fchdir 50
-#define TARGET_NR_chroot 51
-#define TARGET_NR_fchmod 52
-#define TARGET_NR_fchmodat 53
-#define TARGET_NR_fchownat 54
-#define TARGET_NR_fchown 55
-#define TARGET_NR_openat 56
-#define TARGET_NR_close 57
-#define TARGET_NR_vhangup 58
-#define TARGET_NR_pipe2 59
-#define TARGET_NR_quotactl 60
-#define TARGET_NR_getdents64 61
-#define TARGET_NR_llseek 62
-#define TARGET_NR_read 63
-#define TARGET_NR_write 64
-#define TARGET_NR_readv 65
-#define TARGET_NR_writev 66
-#define TARGET_NR_pread64 67
-#define TARGET_NR_pwrite64 68
-#define TARGET_NR_preadv 69
-#define TARGET_NR_pwritev 70
-#define TARGET_NR_sendfile64 71
-#define TARGET_NR_pselect6 72
-#define TARGET_NR_ppoll 73
-#define TARGET_NR_signalfd4 74
-#define TARGET_NR_vmsplice 75
-#define TARGET_NR_splice 76
-#define TARGET_NR_tee 77
-#define TARGET_NR_readlinkat 78
-#define TARGET_NR_fstatat64 79
-#define TARGET_NR_fstat64 80
-#define TARGET_NR_sync 81
-#define TARGET_NR_fsync 82
-#define TARGET_NR_fdatasync 83
-#define TARGET_NR_sync_file_range 84
-#define TARGET_NR_timerfd_create 85
-#define TARGET_NR_timerfd_settime 86
-#define TARGET_NR_timerfd_gettime 87
-#define TARGET_NR_utimensat 88
-#define TARGET_NR_acct 89
-#define TARGET_NR_capget 90
-#define TARGET_NR_capset 91
-#define TARGET_NR_personality 92
-#define TARGET_NR_exit 93
-#define TARGET_NR_exit_group 94
-#define TARGET_NR_waitid 95
-#define TARGET_NR_set_tid_address 96
-#define TARGET_NR_unshare 97
-#define TARGET_NR_futex 98
-#define TARGET_NR_set_robust_list 99
-#define TARGET_NR_get_robust_list 100
-#define TARGET_NR_nanosleep 101
-#define TARGET_NR_getitimer 102
-#define TARGET_NR_setitimer 103
-#define TARGET_NR_kexec_load 104
-#define TARGET_NR_init_module 105
-#define TARGET_NR_delete_module 106
-#define TARGET_NR_timer_create 107
-#define TARGET_NR_timer_gettime 108
-#define TARGET_NR_timer_getoverrun 109
-#define TARGET_NR_timer_settime 110
-#define TARGET_NR_timer_delete 111
-#define TARGET_NR_clock_settime 112
-#define TARGET_NR_clock_gettime 113
-#define TARGET_NR_clock_getres 114
-#define TARGET_NR_clock_nanosleep 115
-#define TARGET_NR_syslog 116
-#define TARGET_NR_ptrace 117
-#define TARGET_NR_sched_setparam 118
-#define TARGET_NR_sched_setscheduler 119
-#define TARGET_NR_sched_getscheduler 120
-#define TARGET_NR_sched_getparam 121
-#define TARGET_NR_sched_setaffinity 122
-#define TARGET_NR_sched_getaffinity 123
-#define TARGET_NR_sched_yield 124
-#define TARGET_NR_sched_get_priority_max 125
-#define TARGET_NR_sched_get_priority_min 126
-#define TARGET_NR_sched_rr_get_interval 127
-#define TARGET_NR_restart_syscall 128
-#define TARGET_NR_kill 129
-#define TARGET_NR_tkill 130
-#define TARGET_NR_tgkill 131
-#define TARGET_NR_sigaltstack 132
-#define TARGET_NR_rt_sigsuspend 133
-#define TARGET_NR_rt_sigaction 134
-#define TARGET_NR_rt_sigprocmask 135
-#define TARGET_NR_rt_sigpending 136
-#define TARGET_NR_rt_sigtimedwait 137
-#define TARGET_NR_rt_sigqueueinfo 138
-#define TARGET_NR_rt_sigreturn 139
-#define TARGET_NR_setpriority 140
-#define TARGET_NR_getpriority 141
-#define TARGET_NR_reboot 142
-#define TARGET_NR_setregid 143
-#define TARGET_NR_setgid 144
-#define TARGET_NR_setreuid 145
-#define TARGET_NR_setuid 146
-#define TARGET_NR_setresuid 147
-#define TARGET_NR_getresuid 148
-#define TARGET_NR_setresgid 149
-#define TARGET_NR_getresgid 150
-#define TARGET_NR_setfsuid 151
-#define TARGET_NR_setfsgid 152
-#define TARGET_NR_times 153
-#define TARGET_NR_setpgid 154
-#define TARGET_NR_getpgid 155
-#define TARGET_NR_getsid 156
-#define TARGET_NR_setsid 157
-#define TARGET_NR_getgroups 158
-#define TARGET_NR_setgroups 159
-#define TARGET_NR_uname 160
-#define TARGET_NR_sethostname 161
-#define TARGET_NR_setdomainname 162
-#define TARGET_NR_getrlimit 163
-#define TARGET_NR_setrlimit 164
-#define TARGET_NR_getrusage 165
-#define TARGET_NR_umask 166
-#define TARGET_NR_prctl 167
-#define TARGET_NR_getcpu 168
-#define TARGET_NR_gettimeofday 169
-#define TARGET_NR_settimeofday 170
-#define TARGET_NR_adjtimex 171
-#define TARGET_NR_getpid 172
-#define TARGET_NR_getppid 173
-#define TARGET_NR_getuid 174
-#define TARGET_NR_geteuid 175
-#define TARGET_NR_getgid 176
-#define TARGET_NR_getegid 177
-#define TARGET_NR_gettid 178
-#define TARGET_NR_sysinfo 179
-#define TARGET_NR_mq_open 180
-#define TARGET_NR_mq_unlink 181
-#define TARGET_NR_mq_timedsend 182
-#define TARGET_NR_mq_timedreceive 183
-#define TARGET_NR_mq_notify 184
-#define TARGET_NR_mq_getsetattr 185
-#define TARGET_NR_msgget 186
-#define TARGET_NR_msgctl 187
-#define TARGET_NR_msgrcv 188
-#define TARGET_NR_msgsnd 189
-#define TARGET_NR_semget 190
-#define TARGET_NR_semctl 191
-#define TARGET_NR_semtimedop 192
-#define TARGET_NR_semop 193
-#define TARGET_NR_shmget 194
-#define TARGET_NR_shmctl 195
-#define TARGET_NR_shmat 196
-#define TARGET_NR_shmdt 197
-#define TARGET_NR_socket 198
-#define TARGET_NR_socketpair 199
-#define TARGET_NR_bind 200
-#define TARGET_NR_listen 201
-#define TARGET_NR_accept 202
-#define TARGET_NR_connect 203
-#define TARGET_NR_getsockname 204
-#define TARGET_NR_getpeername 205
-#define TARGET_NR_sendto 206
-#define TARGET_NR_recvfrom 207
-#define TARGET_NR_setsockopt 208
-#define TARGET_NR_getsockopt 209
-#define TARGET_NR_shutdown 210
-#define TARGET_NR_sendmsg 211
-#define TARGET_NR_recvmsg 212
-#define TARGET_NR_readahead 213
-#define TARGET_NR_brk 214
-#define TARGET_NR_munmap 215
-#define TARGET_NR_mremap 216
-#define TARGET_NR_add_key 217
-#define TARGET_NR_request_key 218
-#define TARGET_NR_keyctl 219
-#define TARGET_NR_clone 220
-#define TARGET_NR_execve 221
-#define TARGET_NR_mmap2 222
-#define TARGET_NR_fadvise64_64 223
-#define TARGET_NR_swapon 224
-#define TARGET_NR_swapoff 225
-#define TARGET_NR_mprotect 226
-#define TARGET_NR_msync 227
-#define TARGET_NR_mlock 228
-#define TARGET_NR_munlock 229
-#define TARGET_NR_mlockall 230
-#define TARGET_NR_munlockall 231
-#define TARGET_NR_mincore 232
-#define TARGET_NR_madvise 233
-#define TARGET_NR_remap_file_pages 234
-#define TARGET_NR_mbind 235
-#define TARGET_NR_get_mempolicy 236
-#define TARGET_NR_set_mempolicy 237
-#define TARGET_NR_migrate_pages 238
-#define TARGET_NR_move_pages 239
-#define TARGET_NR_rt_tgsigqueueinfo 240
-#define TARGET_NR_perf_event_open 241
-#define TARGET_NR_accept4 242
-#define TARGET_NR_recvmmsg 243
-#define TARGET_NR_arch_specific_syscall 244
-#define TARGET_NR_wait4 260
-#define TARGET_NR_prlimit64 261
-#define TARGET_NR_fanotify_init 262
-#define TARGET_NR_fanotify_mark 263
-#define TARGET_NR_name_to_handle_at 264
-#define TARGET_NR_open_by_handle_at 265
-#define TARGET_NR_clock_adjtime 266
-#define TARGET_NR_syncfs 267
-#define TARGET_NR_setns 268
-#define TARGET_NR_sendmmsg 269
-#define TARGET_NR_process_vm_readv 270
-#define TARGET_NR_process_vm_writev 271
-#define TARGET_NR_kcmp 272
-#define TARGET_NR_finit_module 273
-#define TARGET_NR_sched_setattr 274
-#define TARGET_NR_sched_getattr 275
-#define TARGET_NR_renameat2 276
-#define TARGET_NR_seccomp 277
-#define TARGET_NR_getrandom 278
-#define TARGET_NR_memfd_create 279
-#define TARGET_NR_bpf 280
-#define TARGET_NR_execveat 281
-#define TARGET_NR_userfaultfd 282
-#define TARGET_NR_membarrier 283
-#define TARGET_NR_mlock2 284
-#define TARGET_NR_copy_file_range 285
-#define TARGET_NR_preadv2 286
-#define TARGET_NR_pwritev2 287
-#define TARGET_NR_pkey_mprotect 288
-#define TARGET_NR_pkey_alloc 289
-#define TARGET_NR_pkey_free 290
-#define TARGET_NR_statx 291
-#define TARGET_NR_io_pgetevents 292
-#define TARGET_NR_rseq 293
-#define TARGET_NR_kexec_file_load 294
-#define TARGET_NR_clock_gettime64 403
-#define TARGET_NR_clock_settime64 404
-#define TARGET_NR_clock_adjtime64 405
-#define TARGET_NR_clock_getres_time64 406
-#define TARGET_NR_clock_nanosleep_time64 407
-#define TARGET_NR_timer_gettime64 408
-#define TARGET_NR_timer_settime64 409
-#define TARGET_NR_timerfd_gettime64 410
-#define TARGET_NR_timerfd_settime64 411
-#define TARGET_NR_utimensat_time64 412
-#define TARGET_NR_pselect6_time64 413
-#define TARGET_NR_ppoll_time64 414
-#define TARGET_NR_io_pgetevents_time64 416
-#define TARGET_NR_recvmmsg_time64 417
-#define TARGET_NR_mq_timedsend_time64 418
-#define TARGET_NR_mq_timedreceive_time64 419
-#define TARGET_NR_semtimedop_time64 420
-#define TARGET_NR_rt_sigtimedwait_time64 421
-#define TARGET_NR_futex_time64 422
-#define TARGET_NR_sched_rr_get_interval_time64 423
-#define TARGET_NR_pidfd_send_signal 424
-#define TARGET_NR_io_uring_setup 425
-#define TARGET_NR_io_uring_enter 426
-#define TARGET_NR_io_uring_register 427
-#define TARGET_NR_open_tree 428
-#define TARGET_NR_move_mount 429
-#define TARGET_NR_fsopen 430
-#define TARGET_NR_fsconfig 431
-#define TARGET_NR_fsmount 432
-#define TARGET_NR_fspick 433
-#define TARGET_NR_pidfd_open 434
-#define TARGET_NR_close_range 436
-#define TARGET_NR_openat2 437
-#define TARGET_NR_pidfd_getfd 438
-#define TARGET_NR_faccessat2 439
-#define TARGET_NR_process_madvise 440
-#define TARGET_NR_epoll_pwait2 441
-#define TARGET_NR_mount_setattr 442
-#define TARGET_NR_landlock_create_ruleset 444
-#define TARGET_NR_landlock_add_rule 445
-#define TARGET_NR_landlock_restrict_self 446
-#define TARGET_NR_syscalls 447
-
-#endif /* LINUX_USER_HEXAGON_SYSCALL_NR_H */
diff --git a/linux-user/hexagon/syscallhdr.sh b/linux-user/hexagon/syscallhdr.sh
new file mode 100644
index 0000000..ed605c0
--- /dev/null
+++ b/linux-user/hexagon/syscallhdr.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=LINUX_USER_HEXAGON_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ echo "#ifndef ${fileguard}"
+ echo "#define ${fileguard} 1"
+ echo ""
+
+ while read nr abi name entry compat ; do
+ if [ -z "$offset" ]; then
+ echo "#define TARGET_NR_${prefix}${name} $nr"
+ else
+ echo "#define TARGET_NR_${prefix}${name} ($offset + $nr)"
+ fi
+ done
+
+ echo ""
+ echo "#endif /* ${fileguard} */"
+) > "$out"
diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c
index bc093b8..9abaad5 100644
--- a/linux-user/hppa/cpu_loop.c
+++ b/linux-user/hppa/cpu_loop.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
static abi_ulong hppa_lws(CPUHPPAState *env)
@@ -99,6 +99,8 @@ static abi_ulong hppa_lws(CPUHPPAState *env)
#endif
}
break;
+ default:
+ g_assert_not_reached();
}
break;
}
@@ -110,7 +112,7 @@ static abi_ulong hppa_lws(CPUHPPAState *env)
void cpu_loop(CPUHPPAState *env)
{
CPUState *cs = env_cpu(env);
- abi_ulong ret;
+ abi_ulong ret, si_code = 0;
int trapnr;
while (1) {
@@ -167,7 +169,15 @@ void cpu_loop(CPUHPPAState *env)
force_sig_fault(TARGET_SIGFPE, TARGET_FPE_CONDTRAP, env->iaoq_f);
break;
case EXCP_ASSIST:
- force_sig_fault(TARGET_SIGFPE, 0, env->iaoq_f);
+ #define set_si_code(mask, val) \
+ if (env->fr[0] & mask) { si_code = val; }
+ set_si_code(R_FPSR_FLG_I_MASK, TARGET_FPE_FLTRES);
+ set_si_code(R_FPSR_FLG_U_MASK, TARGET_FPE_FLTUND);
+ set_si_code(R_FPSR_FLG_O_MASK, TARGET_FPE_FLTOVF);
+ set_si_code(R_FPSR_FLG_Z_MASK, TARGET_FPE_FLTDIV);
+ set_si_code(R_FPSR_FLG_V_MASK, TARGET_FPE_FLTINV);
+ #undef set_si_code
+ force_sig_fault(TARGET_SIGFPE, si_code, env->iaoq_f);
break;
case EXCP_BREAK:
force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->iaoq_f);
@@ -186,7 +196,7 @@ void cpu_loop(CPUHPPAState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
int i;
for (i = 1; i < 32; i++) {
diff --git a/linux-user/hppa/syscall.tbl b/linux-user/hppa/syscall.tbl
index aabc37f..647f08e 100644
--- a/linux-user/hppa/syscall.tbl
+++ b/linux-user/hppa/syscall.tbl
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
#
# system call numbers and entry vectors for parisc
#
@@ -108,7 +108,7 @@
95 common fchown sys_fchown
96 common getpriority sys_getpriority
97 common setpriority sys_setpriority
-98 common recv sys_recv
+98 common recv sys_recv compat_sys_recv
99 common statfs sys_statfs compat_sys_statfs
100 common fstatfs sys_fstatfs compat_sys_fstatfs
101 common stat64 sys_stat64
@@ -131,11 +131,11 @@
116 common sysinfo sys_sysinfo compat_sys_sysinfo
117 common shutdown sys_shutdown
118 common fsync sys_fsync
-119 common madvise sys_madvise
+119 common madvise parisc_madvise
120 common clone sys_clone_wrapper
121 common setdomainname sys_setdomainname
122 common sendfile sys_sendfile compat_sys_sendfile
-123 common recvfrom sys_recvfrom
+123 common recvfrom sys_recvfrom compat_sys_recvfrom
124 32 adjtimex sys_adjtimex_time32
124 64 adjtimex sys_adjtimex
125 common mprotect sys_mprotect
@@ -147,7 +147,7 @@
131 common quotactl sys_quotactl
132 common getpgid sys_getpgid
133 common fchdir sys_fchdir
-134 common bdflush sys_bdflush
+134 common bdflush sys_ni_syscall
135 common sysfs sys_sysfs
136 32 personality parisc_personality
136 64 personality sys_personality
@@ -245,7 +245,7 @@
# 220 was alloc_hugepages
# 221 was free_hugepages
222 common exit_group sys_exit_group
-223 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+223 common lookup_dcookie sys_ni_syscall
224 common epoll_create sys_epoll_create
225 common epoll_ctl sys_epoll_ctl
226 common epoll_wait sys_epoll_wait
@@ -292,9 +292,9 @@
258 32 clock_nanosleep sys_clock_nanosleep_time32
258 64 clock_nanosleep sys_clock_nanosleep
259 common tgkill sys_tgkill
-260 common mbind sys_mbind compat_sys_mbind
-261 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-262 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+260 common mbind sys_mbind
+261 common get_mempolicy sys_get_mempolicy
+262 common set_mempolicy sys_set_mempolicy
# 263 was vserver
264 common add_key sys_add_key
265 common request_key sys_request_key
@@ -331,7 +331,7 @@
292 64 sync_file_range sys_sync_file_range
293 common tee sys_tee
294 common vmsplice sys_vmsplice
-295 common move_pages sys_move_pages compat_sys_move_pages
+295 common move_pages sys_move_pages
296 common getcpu sys_getcpu
297 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
298 common statfs64 sys_statfs64 compat_sys_statfs64
@@ -364,7 +364,7 @@
320 common accept4 sys_accept4
321 common prlimit64 sys_prlimit64
322 common fanotify_init sys_fanotify_init
-323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark
+323 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
324 32 clock_adjtime sys_clock_adjtime32
324 64 clock_adjtime sys_clock_adjtime
325 common name_to_handle_at sys_name_to_handle_at
@@ -400,6 +400,7 @@
353 common pkey_free sys_pkey_free
354 common rseq sys_rseq
355 common kexec_file_load sys_kexec_file_load sys_kexec_file_load
+356 common cacheflush sys_cacheflush
# up to 402 is unassigned and reserved for arch specific syscalls
403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime
404 32 clock_settime64 sys_clock_settime sys_clock_settime
@@ -413,7 +414,7 @@
412 32 utimensat_time64 sys_utimensat sys_utimensat
413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
@@ -440,7 +441,23 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/linux-user/hppa/syscallhdr.sh b/linux-user/hppa/syscallhdr.sh
index ac91a95..bf1c1d4 100644
--- a/linux-user/hppa/syscallhdr.sh
+++ b/linux-user/hppa/syscallhdr.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
in="$1"
out="$2"
diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c
index 92beb68..d96d555 100644
--- a/linux-user/i386/cpu_loop.c
+++ b/linux-user/i386/cpu_loop.c
@@ -21,7 +21,7 @@
#include "qemu.h"
#include "qemu/timer.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
#include "user-mmap.h"
@@ -172,6 +172,7 @@ static void emulate_vsyscall(CPUX86State *env)
/*
* Perform the syscall. None of the vsyscalls should need restarting.
*/
+ get_task_state(env_cpu(env))->orig_ax = syscall;
ret = do_syscall(env, syscall, env->regs[R_EDI], env->regs[R_ESI],
env->regs[R_EDX], env->regs[10], env->regs[8],
env->regs[9], 0, 0);
@@ -221,6 +222,7 @@ void cpu_loop(CPUX86State *env)
case EXCP_SYSCALL:
#endif
/* linux syscall from int $0x80 */
+ get_task_state(cs)->orig_ax = env->regs[R_EAX];
ret = do_syscall(env,
env->regs[R_EAX],
env->regs[R_EBX],
@@ -239,6 +241,7 @@ void cpu_loop(CPUX86State *env)
#ifdef TARGET_X86_64
case EXCP_SYSCALL:
/* linux syscall from syscall instruction. */
+ get_task_state(cs)->orig_ax = env->regs[R_EAX];
ret = do_syscall(env,
env->regs[R_EAX],
env->regs[R_EDI],
@@ -328,7 +331,7 @@ static void target_cpu_free(void *obj)
g_free(obj);
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
bool is64 = (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) != 0;
diff --git a/linux-user/i386/signal.c b/linux-user/i386/signal.c
index cb90711..0f11dba 100644
--- a/linux-user/i386/signal.c
+++ b/linux-user/i386/signal.c
@@ -754,8 +754,8 @@ static bool restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
env->eip = tswapl(sc->rip);
#endif
- cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
- cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
+ cpu_x86_load_seg(env, R_CS, lduw_le_p(&sc->cs) | 3);
+ cpu_x86_load_seg(env, R_SS, lduw_le_p(&sc->ss) | 3);
tmpflags = tswapl(sc->eflags);
env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
diff --git a/linux-user/i386/syscall_32.tbl b/linux-user/i386/syscall_32.tbl
index 4bbc267..534c74b 100644
--- a/linux-user/i386/syscall_32.tbl
+++ b/linux-user/i386/syscall_32.tbl
@@ -1,8 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
#
# 32-bit system call numbers and entry vectors
#
# The format is:
-# <number> <abi> <name> <entry point> <compat entry point>
+# <number> <abi> <name> <entry point> [<compat entry point> [noreturn]]
#
# The __ia32_sys and __ia32_compat_sys stubs are created on-the-fly for
# sys_*() system calls and compat_sys_*() compat system calls if
@@ -12,7 +13,7 @@
# The abi is always "i386" for this file.
#
0 i386 restart_syscall sys_restart_syscall
-1 i386 exit sys_exit
+1 i386 exit sys_exit - noreturn
2 i386 fork sys_fork
3 i386 read sys_read
4 i386 write sys_write
@@ -145,7 +146,7 @@
131 i386 quotactl sys_quotactl
132 i386 getpgid sys_getpgid
133 i386 fchdir sys_fchdir
-134 i386 bdflush sys_bdflush
+134 i386 bdflush sys_ni_syscall
135 i386 sysfs sys_sysfs
136 i386 personality sys_personality
137 i386 afs_syscall
@@ -263,8 +264,8 @@
249 i386 io_cancel sys_io_cancel
250 i386 fadvise64 sys_ia32_fadvise64
# 251 is available for reuse (was briefly sys_set_zone_reclaim)
-252 i386 exit_group sys_exit_group
-253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+252 i386 exit_group sys_exit_group - noreturn
+253 i386 lookup_dcookie
254 i386 epoll_create sys_epoll_create
255 i386 epoll_ctl sys_epoll_ctl
256 i386 epoll_wait sys_epoll_wait
@@ -286,7 +287,7 @@
272 i386 fadvise64_64 sys_ia32_fadvise64_64
273 i386 vserver
274 i386 mbind sys_mbind
-275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
+275 i386 get_mempolicy sys_get_mempolicy
276 i386 set_mempolicy sys_set_mempolicy
277 i386 mq_open sys_mq_open compat_sys_mq_open
278 i386 mq_unlink sys_mq_unlink
@@ -328,7 +329,7 @@
314 i386 sync_file_range sys_ia32_sync_file_range
315 i386 tee sys_tee
316 i386 vmsplice sys_vmsplice
-317 i386 move_pages sys_move_pages compat_sys_move_pages
+317 i386 move_pages sys_move_pages
318 i386 getcpu sys_getcpu
319 i386 epoll_pwait sys_epoll_pwait
320 i386 utimensat sys_utimensat_time32
@@ -420,7 +421,7 @@
412 i386 utimensat_time64 sys_utimensat
413 i386 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 i386 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 i386 io_pgetevents_time64 sys_io_pgetevents
+416 i386 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 i386 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 i386 mq_timedsend_time64 sys_mq_timedsend
419 i386 mq_timedreceive_time64 sys_mq_timedreceive
@@ -447,7 +448,23 @@
440 i386 process_madvise sys_process_madvise
441 i386 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 i386 mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 i386 quotactl_fd sys_quotactl_fd
444 i386 landlock_create_ruleset sys_landlock_create_ruleset
445 i386 landlock_add_rule sys_landlock_add_rule
446 i386 landlock_restrict_self sys_landlock_restrict_self
+447 i386 memfd_secret sys_memfd_secret
+448 i386 process_mrelease sys_process_mrelease
+449 i386 futex_waitv sys_futex_waitv
+450 i386 set_mempolicy_home_node sys_set_mempolicy_home_node
+451 i386 cachestat sys_cachestat
+452 i386 fchmodat2 sys_fchmodat2
+453 i386 map_shadow_stack sys_map_shadow_stack
+454 i386 futex_wake sys_futex_wake
+455 i386 futex_wait sys_futex_wait
+456 i386 futex_requeue sys_futex_requeue
+457 i386 statmount sys_statmount
+458 i386 listmount sys_listmount
+459 i386 lsm_get_self_attr sys_lsm_get_self_attr
+460 i386 lsm_set_self_attr sys_lsm_set_self_attr
+461 i386 lsm_list_modules sys_lsm_list_modules
+462 i386 mseal sys_mseal
diff --git a/linux-user/i386/syscallhdr.sh b/linux-user/i386/syscallhdr.sh
index b2eca96..938a793 100644
--- a/linux-user/i386/syscallhdr.sh
+++ b/linux-user/i386/syscallhdr.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
in="$1"
out="$2"
diff --git a/linux-user/i386/target_signal.h b/linux-user/i386/target_signal.h
index 9315cba..eee792e 100644
--- a/linux-user/i386/target_signal.h
+++ b/linux-user/i386/target_signal.h
@@ -3,6 +3,8 @@
#include "../generic/signal.h"
+#define TARGET_SA_RESTORER 0x04000000
+
#define TARGET_ARCH_HAS_SETUP_FRAME
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
diff --git a/linux-user/loongarch64/Makefile.vdso b/linux-user/loongarch64/Makefile.vdso
index 369de13..1d760b1 100644
--- a/linux-user/loongarch64/Makefile.vdso
+++ b/linux-user/loongarch64/Makefile.vdso
@@ -8,4 +8,5 @@ all: $(SUBDIR)/vdso.so
$(SUBDIR)/vdso.so: vdso.S vdso.ld vdso-asmoffset.h
$(CC) -o $@ -nostdlib -shared -fpic -Wl,-h,linux-vdso.so.1 \
-Wl,--build-id=sha1 -Wl,--hash-style=both \
- -Wl,--no-warn-rwx-segments -Wl,-T,$(SUBDIR)/vdso.ld $<
+ -Wl,--no-warn-rwx-segments -Wl,-z,max-page-size=4096 \
+ -Wl,-T,$(SUBDIR)/vdso.ld $<
diff --git a/linux-user/loongarch64/cpu_loop.c b/linux-user/loongarch64/cpu_loop.c
index 73d7b67..ec8a06c 100644
--- a/linux-user/loongarch64/cpu_loop.c
+++ b/linux-user/loongarch64/cpu_loop.c
@@ -8,9 +8,15 @@
#include "qemu/osdep.h"
#include "qemu.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
+/* Break codes */
+enum {
+ BRK_OVERFLOW = 6,
+ BRK_DIVZERO = 7
+};
+
void cpu_loop(CPULoongArchState *env)
{
CPUState *cs = env_cpu(env);
@@ -66,9 +72,26 @@ void cpu_loop(CPULoongArchState *env)
force_sig_fault(TARGET_SIGFPE, si_code, env->pc);
break;
case EXCP_DEBUG:
- case EXCCODE_BRK:
force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
break;
+ case EXCCODE_BRK:
+ {
+ unsigned int opcode;
+
+ get_user_u32(opcode, env->pc);
+
+ switch (opcode & 0x7fff) {
+ case BRK_OVERFLOW:
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTOVF, env->pc);
+ break;
+ case BRK_DIVZERO:
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTDIV, env->pc);
+ break;
+ default:
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
+ }
+ }
+ break;
case EXCCODE_BCE:
force_sig_fault(TARGET_SIGSYS, TARGET_SI_KERNEL, env->pc);
break;
@@ -97,7 +120,7 @@ void cpu_loop(CPULoongArchState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
int i;
diff --git a/linux-user/loongarch64/meson.build b/linux-user/loongarch64/meson.build
index 1789653..64cb537 100644
--- a/linux-user/loongarch64/meson.build
+++ b/linux-user/loongarch64/meson.build
@@ -2,3 +2,10 @@ vdso_inc = gen_vdso.process('vdso.so',
extra_args: ['-r', '__vdso_rt_sigreturn'])
linux_user_ss.add(when: 'TARGET_LOONGARCH64', if_true: vdso_inc)
+
+
+syscall_nr_generators += {
+ 'loongarch64': generator(sh,
+ arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ],
+ output: '@BASENAME@_nr.h')
+}
diff --git a/linux-user/loongarch64/syscall.tbl b/linux-user/loongarch64/syscall.tbl
new file mode 100644
index 0000000..845e24e
--- /dev/null
+++ b/linux-user/loongarch64/syscall.tbl
@@ -0,0 +1,405 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# This file contains the system call numbers for all of the
+# more recently added architectures.
+#
+# As a basic principle, no duplication of functionality
+# should be added, e.g. we don't use lseek when llseek
+# is present. New architectures should use this file
+# and implement the less feature-full calls in user space.
+#
+0 common io_setup sys_io_setup compat_sys_io_setup
+1 common io_destroy sys_io_destroy
+2 common io_submit sys_io_submit compat_sys_io_submit
+3 common io_cancel sys_io_cancel
+4 time32 io_getevents sys_io_getevents_time32
+4 64 io_getevents sys_io_getevents
+5 common setxattr sys_setxattr
+6 common lsetxattr sys_lsetxattr
+7 common fsetxattr sys_fsetxattr
+8 common getxattr sys_getxattr
+9 common lgetxattr sys_lgetxattr
+10 common fgetxattr sys_fgetxattr
+11 common listxattr sys_listxattr
+12 common llistxattr sys_llistxattr
+13 common flistxattr sys_flistxattr
+14 common removexattr sys_removexattr
+15 common lremovexattr sys_lremovexattr
+16 common fremovexattr sys_fremovexattr
+17 common getcwd sys_getcwd
+18 common lookup_dcookie sys_ni_syscall
+19 common eventfd2 sys_eventfd2
+20 common epoll_create1 sys_epoll_create1
+21 common epoll_ctl sys_epoll_ctl
+22 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+23 common dup sys_dup
+24 common dup3 sys_dup3
+25 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+25 64 fcntl sys_fcntl
+26 common inotify_init1 sys_inotify_init1
+27 common inotify_add_watch sys_inotify_add_watch
+28 common inotify_rm_watch sys_inotify_rm_watch
+29 common ioctl sys_ioctl compat_sys_ioctl
+30 common ioprio_set sys_ioprio_set
+31 common ioprio_get sys_ioprio_get
+32 common flock sys_flock
+33 common mknodat sys_mknodat
+34 common mkdirat sys_mkdirat
+35 common unlinkat sys_unlinkat
+36 common symlinkat sys_symlinkat
+37 common linkat sys_linkat
+# renameat is superseded with flags by renameat2
+38 renameat renameat sys_renameat
+39 common umount2 sys_umount
+40 common mount sys_mount
+41 common pivot_root sys_pivot_root
+42 common nfsservctl sys_ni_syscall
+43 32 statfs64 sys_statfs64 compat_sys_statfs64
+43 64 statfs sys_statfs
+44 32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+44 64 fstatfs sys_fstatfs
+45 32 truncate64 sys_truncate64 compat_sys_truncate64
+45 64 truncate sys_truncate
+46 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+46 64 ftruncate sys_ftruncate
+47 common fallocate sys_fallocate compat_sys_fallocate
+48 common faccessat sys_faccessat
+49 common chdir sys_chdir
+50 common fchdir sys_fchdir
+51 common chroot sys_chroot
+52 common fchmod sys_fchmod
+53 common fchmodat sys_fchmodat
+54 common fchownat sys_fchownat
+55 common fchown sys_fchown
+56 common openat sys_openat
+57 common close sys_close
+58 common vhangup sys_vhangup
+59 common pipe2 sys_pipe2
+60 common quotactl sys_quotactl
+61 common getdents64 sys_getdents64
+62 32 llseek sys_llseek
+62 64 lseek sys_lseek
+63 common read sys_read
+64 common write sys_write
+65 common readv sys_readv sys_readv
+66 common writev sys_writev sys_writev
+67 common pread64 sys_pread64 compat_sys_pread64
+68 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+69 common preadv sys_preadv compat_sys_preadv
+70 common pwritev sys_pwritev compat_sys_pwritev
+71 32 sendfile64 sys_sendfile64
+71 64 sendfile sys_sendfile64
+72 time32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+72 64 pselect6 sys_pselect6
+73 time32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+73 64 ppoll sys_ppoll
+74 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+75 common vmsplice sys_vmsplice
+76 common splice sys_splice
+77 common tee sys_tee
+78 common readlinkat sys_readlinkat
+79 stat64 fstatat64 sys_fstatat64
+79 64 newfstatat sys_newfstatat
+80 stat64 fstat64 sys_fstat64
+80 64 fstat sys_newfstat
+81 common sync sys_sync
+82 common fsync sys_fsync
+83 common fdatasync sys_fdatasync
+84 common sync_file_range sys_sync_file_range compat_sys_sync_file_range
+85 common timerfd_create sys_timerfd_create
+86 time32 timerfd_settime sys_timerfd_settime32
+86 64 timerfd_settime sys_timerfd_settime
+87 time32 timerfd_gettime sys_timerfd_gettime32
+87 64 timerfd_gettime sys_timerfd_gettime
+88 time32 utimensat sys_utimensat_time32
+88 64 utimensat sys_utimensat
+89 common acct sys_acct
+90 common capget sys_capget
+91 common capset sys_capset
+92 common personality sys_personality
+93 common exit sys_exit
+94 common exit_group sys_exit_group
+95 common waitid sys_waitid compat_sys_waitid
+96 common set_tid_address sys_set_tid_address
+97 common unshare sys_unshare
+98 time32 futex sys_futex_time32
+98 64 futex sys_futex
+99 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+100 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+101 time32 nanosleep sys_nanosleep_time32
+101 64 nanosleep sys_nanosleep
+102 common getitimer sys_getitimer compat_sys_getitimer
+103 common setitimer sys_setitimer compat_sys_setitimer
+104 common kexec_load sys_kexec_load compat_sys_kexec_load
+105 common init_module sys_init_module
+106 common delete_module sys_delete_module
+107 common timer_create sys_timer_create compat_sys_timer_create
+108 time32 timer_gettime sys_timer_gettime32
+108 64 timer_gettime sys_timer_gettime
+109 common timer_getoverrun sys_timer_getoverrun
+110 time32 timer_settime sys_timer_settime32
+110 64 timer_settime sys_timer_settime
+111 common timer_delete sys_timer_delete
+112 time32 clock_settime sys_clock_settime32
+112 64 clock_settime sys_clock_settime
+113 time32 clock_gettime sys_clock_gettime32
+113 64 clock_gettime sys_clock_gettime
+114 time32 clock_getres sys_clock_getres_time32
+114 64 clock_getres sys_clock_getres
+115 time32 clock_nanosleep sys_clock_nanosleep_time32
+115 64 clock_nanosleep sys_clock_nanosleep
+116 common syslog sys_syslog
+117 common ptrace sys_ptrace compat_sys_ptrace
+118 common sched_setparam sys_sched_setparam
+119 common sched_setscheduler sys_sched_setscheduler
+120 common sched_getscheduler sys_sched_getscheduler
+121 common sched_getparam sys_sched_getparam
+122 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+123 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+124 common sched_yield sys_sched_yield
+125 common sched_get_priority_max sys_sched_get_priority_max
+126 common sched_get_priority_min sys_sched_get_priority_min
+127 time32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+127 64 sched_rr_get_interval sys_sched_rr_get_interval
+128 common restart_syscall sys_restart_syscall
+129 common kill sys_kill
+130 common tkill sys_tkill
+131 common tgkill sys_tgkill
+132 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+133 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+134 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+135 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+136 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+137 time32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+137 64 rt_sigtimedwait sys_rt_sigtimedwait
+138 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+139 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
+140 common setpriority sys_setpriority
+141 common getpriority sys_getpriority
+142 common reboot sys_reboot
+143 common setregid sys_setregid
+144 common setgid sys_setgid
+145 common setreuid sys_setreuid
+146 common setuid sys_setuid
+147 common setresuid sys_setresuid
+148 common getresuid sys_getresuid
+149 common setresgid sys_setresgid
+150 common getresgid sys_getresgid
+151 common setfsuid sys_setfsuid
+152 common setfsgid sys_setfsgid
+153 common times sys_times compat_sys_times
+154 common setpgid sys_setpgid
+155 common getpgid sys_getpgid
+156 common getsid sys_getsid
+157 common setsid sys_setsid
+158 common getgroups sys_getgroups
+159 common setgroups sys_setgroups
+160 common uname sys_newuname
+161 common sethostname sys_sethostname
+162 common setdomainname sys_setdomainname
+# getrlimit and setrlimit are superseded with prlimit64
+163 rlimit getrlimit sys_getrlimit compat_sys_getrlimit
+164 rlimit setrlimit sys_setrlimit compat_sys_setrlimit
+165 common getrusage sys_getrusage compat_sys_getrusage
+166 common umask sys_umask
+167 common prctl sys_prctl
+168 common getcpu sys_getcpu
+169 time32 gettimeofday sys_gettimeofday compat_sys_gettimeofday
+169 64 gettimeofday sys_gettimeofday
+170 time32 settimeofday sys_settimeofday compat_sys_settimeofday
+170 64 settimeofday sys_settimeofday
+171 time32 adjtimex sys_adjtimex_time32
+171 64 adjtimex sys_adjtimex
+172 common getpid sys_getpid
+173 common getppid sys_getppid
+174 common getuid sys_getuid
+175 common geteuid sys_geteuid
+176 common getgid sys_getgid
+177 common getegid sys_getegid
+178 common gettid sys_gettid
+179 common sysinfo sys_sysinfo compat_sys_sysinfo
+180 common mq_open sys_mq_open compat_sys_mq_open
+181 common mq_unlink sys_mq_unlink
+182 time32 mq_timedsend sys_mq_timedsend_time32
+182 64 mq_timedsend sys_mq_timedsend
+183 time32 mq_timedreceive sys_mq_timedreceive_time32
+183 64 mq_timedreceive sys_mq_timedreceive
+184 common mq_notify sys_mq_notify compat_sys_mq_notify
+185 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+186 common msgget sys_msgget
+187 common msgctl sys_msgctl compat_sys_msgctl
+188 common msgrcv sys_msgrcv compat_sys_msgrcv
+189 common msgsnd sys_msgsnd compat_sys_msgsnd
+190 common semget sys_semget
+191 common semctl sys_semctl compat_sys_semctl
+192 time32 semtimedop sys_semtimedop_time32
+192 64 semtimedop sys_semtimedop
+193 common semop sys_semop
+194 common shmget sys_shmget
+195 common shmctl sys_shmctl compat_sys_shmctl
+196 common shmat sys_shmat compat_sys_shmat
+197 common shmdt sys_shmdt
+198 common socket sys_socket
+199 common socketpair sys_socketpair
+200 common bind sys_bind
+201 common listen sys_listen
+202 common accept sys_accept
+203 common connect sys_connect
+204 common getsockname sys_getsockname
+205 common getpeername sys_getpeername
+206 common sendto sys_sendto
+207 common recvfrom sys_recvfrom compat_sys_recvfrom
+208 common setsockopt sys_setsockopt sys_setsockopt
+209 common getsockopt sys_getsockopt sys_getsockopt
+210 common shutdown sys_shutdown
+211 common sendmsg sys_sendmsg compat_sys_sendmsg
+212 common recvmsg sys_recvmsg compat_sys_recvmsg
+213 common readahead sys_readahead compat_sys_readahead
+214 common brk sys_brk
+215 common munmap sys_munmap
+216 common mremap sys_mremap
+217 common add_key sys_add_key
+218 common request_key sys_request_key
+219 common keyctl sys_keyctl compat_sys_keyctl
+220 common clone sys_clone
+221 common execve sys_execve compat_sys_execve
+222 32 mmap2 sys_mmap2
+222 64 mmap sys_mmap
+223 32 fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64
+223 64 fadvise64 sys_fadvise64_64
+224 common swapon sys_swapon
+225 common swapoff sys_swapoff
+226 common mprotect sys_mprotect
+227 common msync sys_msync
+228 common mlock sys_mlock
+229 common munlock sys_munlock
+230 common mlockall sys_mlockall
+231 common munlockall sys_munlockall
+232 common mincore sys_mincore
+233 common madvise sys_madvise
+234 common remap_file_pages sys_remap_file_pages
+235 common mbind sys_mbind
+236 common get_mempolicy sys_get_mempolicy
+237 common set_mempolicy sys_set_mempolicy
+238 common migrate_pages sys_migrate_pages
+239 common move_pages sys_move_pages
+240 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+241 common perf_event_open sys_perf_event_open
+242 common accept4 sys_accept4
+243 time32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+243 64 recvmmsg sys_recvmmsg
+# Architectures may provide up to 16 syscalls of their own between 244 and 259
+244 arc cacheflush sys_cacheflush
+245 arc arc_settls sys_arc_settls
+246 arc arc_gettls sys_arc_gettls
+247 arc sysfs sys_sysfs
+248 arc arc_usr_cmpxchg sys_arc_usr_cmpxchg
+
+244 csky set_thread_area sys_set_thread_area
+245 csky cacheflush sys_cacheflush
+
+244 nios2 cacheflush sys_cacheflush
+
+244 or1k or1k_atomic sys_or1k_atomic
+
+258 riscv riscv_hwprobe sys_riscv_hwprobe
+259 riscv riscv_flush_icache sys_riscv_flush_icache
+
+260 time32 wait4 sys_wait4 compat_sys_wait4
+260 64 wait4 sys_wait4
+261 common prlimit64 sys_prlimit64
+262 common fanotify_init sys_fanotify_init
+263 common fanotify_mark sys_fanotify_mark
+264 common name_to_handle_at sys_name_to_handle_at
+265 common open_by_handle_at sys_open_by_handle_at
+266 time32 clock_adjtime sys_clock_adjtime32
+266 64 clock_adjtime sys_clock_adjtime
+267 common syncfs sys_syncfs
+268 common setns sys_setns
+269 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+270 common process_vm_readv sys_process_vm_readv
+271 common process_vm_writev sys_process_vm_writev
+272 common kcmp sys_kcmp
+273 common finit_module sys_finit_module
+274 common sched_setattr sys_sched_setattr
+275 common sched_getattr sys_sched_getattr
+276 common renameat2 sys_renameat2
+277 common seccomp sys_seccomp
+278 common getrandom sys_getrandom
+279 common memfd_create sys_memfd_create
+280 common bpf sys_bpf
+281 common execveat sys_execveat compat_sys_execveat
+282 common userfaultfd sys_userfaultfd
+283 common membarrier sys_membarrier
+284 common mlock2 sys_mlock2
+285 common copy_file_range sys_copy_file_range
+286 common preadv2 sys_preadv2 compat_sys_preadv2
+287 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+288 common pkey_mprotect sys_pkey_mprotect
+289 common pkey_alloc sys_pkey_alloc
+290 common pkey_free sys_pkey_free
+291 common statx sys_statx
+292 time32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+292 64 io_pgetevents sys_io_pgetevents
+293 common rseq sys_rseq
+294 common kexec_file_load sys_kexec_file_load
+# 295 through 402 are unassigned to sync up with generic numbers don't use
+403 32 clock_gettime64 sys_clock_gettime
+404 32 clock_settime64 sys_clock_settime
+405 32 clock_adjtime64 sys_clock_adjtime
+406 32 clock_getres_time64 sys_clock_getres
+407 32 clock_nanosleep_time64 sys_clock_nanosleep
+408 32 timer_gettime64 sys_timer_gettime
+409 32 timer_settime64 sys_timer_settime
+410 32 timerfd_gettime64 sys_timerfd_gettime
+411 32 timerfd_settime64 sys_timerfd_settime
+412 32 utimensat_time64 sys_utimensat
+413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 sys_mq_timedsend
+419 32 mq_timedreceive_time64 sys_mq_timedreceive
+420 32 semtimedop_time64 sys_semtimedop
+421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 sys_futex
+423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+447 memfd_secret memfd_secret sys_memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/linux-user/loongarch64/syscall_nr.h b/linux-user/loongarch64/syscall_nr.h
deleted file mode 100644
index be00915..0000000
--- a/linux-user/loongarch64/syscall_nr.h
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * This file contains the system call numbers.
- * Do not modify.
- * This file is generated by scripts/gensyscalls.sh
- */
-#ifndef LINUX_USER_LOONGARCH_SYSCALL_NR_H
-#define LINUX_USER_LOONGARCH_SYSCALL_NR_H
-
-#define TARGET_NR_io_setup 0
-#define TARGET_NR_io_destroy 1
-#define TARGET_NR_io_submit 2
-#define TARGET_NR_io_cancel 3
-#define TARGET_NR_io_getevents 4
-#define TARGET_NR_setxattr 5
-#define TARGET_NR_lsetxattr 6
-#define TARGET_NR_fsetxattr 7
-#define TARGET_NR_getxattr 8
-#define TARGET_NR_lgetxattr 9
-#define TARGET_NR_fgetxattr 10
-#define TARGET_NR_listxattr 11
-#define TARGET_NR_llistxattr 12
-#define TARGET_NR_flistxattr 13
-#define TARGET_NR_removexattr 14
-#define TARGET_NR_lremovexattr 15
-#define TARGET_NR_fremovexattr 16
-#define TARGET_NR_getcwd 17
-#define TARGET_NR_lookup_dcookie 18
-#define TARGET_NR_eventfd2 19
-#define TARGET_NR_epoll_create1 20
-#define TARGET_NR_epoll_ctl 21
-#define TARGET_NR_epoll_pwait 22
-#define TARGET_NR_dup 23
-#define TARGET_NR_dup3 24
-#define TARGET_NR_fcntl 25
-#define TARGET_NR_inotify_init1 26
-#define TARGET_NR_inotify_add_watch 27
-#define TARGET_NR_inotify_rm_watch 28
-#define TARGET_NR_ioctl 29
-#define TARGET_NR_ioprio_set 30
-#define TARGET_NR_ioprio_get 31
-#define TARGET_NR_flock 32
-#define TARGET_NR_mknodat 33
-#define TARGET_NR_mkdirat 34
-#define TARGET_NR_unlinkat 35
-#define TARGET_NR_symlinkat 36
-#define TARGET_NR_linkat 37
-#define TARGET_NR_umount2 39
-#define TARGET_NR_mount 40
-#define TARGET_NR_pivot_root 41
-#define TARGET_NR_nfsservctl 42
-#define TARGET_NR_statfs 43
-#define TARGET_NR_fstatfs 44
-#define TARGET_NR_truncate 45
-#define TARGET_NR_ftruncate 46
-#define TARGET_NR_fallocate 47
-#define TARGET_NR_faccessat 48
-#define TARGET_NR_chdir 49
-#define TARGET_NR_fchdir 50
-#define TARGET_NR_chroot 51
-#define TARGET_NR_fchmod 52
-#define TARGET_NR_fchmodat 53
-#define TARGET_NR_fchownat 54
-#define TARGET_NR_fchown 55
-#define TARGET_NR_openat 56
-#define TARGET_NR_close 57
-#define TARGET_NR_vhangup 58
-#define TARGET_NR_pipe2 59
-#define TARGET_NR_quotactl 60
-#define TARGET_NR_getdents64 61
-#define TARGET_NR_lseek 62
-#define TARGET_NR_read 63
-#define TARGET_NR_write 64
-#define TARGET_NR_readv 65
-#define TARGET_NR_writev 66
-#define TARGET_NR_pread64 67
-#define TARGET_NR_pwrite64 68
-#define TARGET_NR_preadv 69
-#define TARGET_NR_pwritev 70
-#define TARGET_NR_sendfile 71
-#define TARGET_NR_pselect6 72
-#define TARGET_NR_ppoll 73
-#define TARGET_NR_signalfd4 74
-#define TARGET_NR_vmsplice 75
-#define TARGET_NR_splice 76
-#define TARGET_NR_tee 77
-#define TARGET_NR_readlinkat 78
-#define TARGET_NR_sync 81
-#define TARGET_NR_fsync 82
-#define TARGET_NR_fdatasync 83
-#define TARGET_NR_sync_file_range 84
-#define TARGET_NR_timerfd_create 85
-#define TARGET_NR_timerfd_settime 86
-#define TARGET_NR_timerfd_gettime 87
-#define TARGET_NR_utimensat 88
-#define TARGET_NR_acct 89
-#define TARGET_NR_capget 90
-#define TARGET_NR_capset 91
-#define TARGET_NR_personality 92
-#define TARGET_NR_exit 93
-#define TARGET_NR_exit_group 94
-#define TARGET_NR_waitid 95
-#define TARGET_NR_set_tid_address 96
-#define TARGET_NR_unshare 97
-#define TARGET_NR_futex 98
-#define TARGET_NR_set_robust_list 99
-#define TARGET_NR_get_robust_list 100
-#define TARGET_NR_nanosleep 101
-#define TARGET_NR_getitimer 102
-#define TARGET_NR_setitimer 103
-#define TARGET_NR_kexec_load 104
-#define TARGET_NR_init_module 105
-#define TARGET_NR_delete_module 106
-#define TARGET_NR_timer_create 107
-#define TARGET_NR_timer_gettime 108
-#define TARGET_NR_timer_getoverrun 109
-#define TARGET_NR_timer_settime 110
-#define TARGET_NR_timer_delete 111
-#define TARGET_NR_clock_settime 112
-#define TARGET_NR_clock_gettime 113
-#define TARGET_NR_clock_getres 114
-#define TARGET_NR_clock_nanosleep 115
-#define TARGET_NR_syslog 116
-#define TARGET_NR_ptrace 117
-#define TARGET_NR_sched_setparam 118
-#define TARGET_NR_sched_setscheduler 119
-#define TARGET_NR_sched_getscheduler 120
-#define TARGET_NR_sched_getparam 121
-#define TARGET_NR_sched_setaffinity 122
-#define TARGET_NR_sched_getaffinity 123
-#define TARGET_NR_sched_yield 124
-#define TARGET_NR_sched_get_priority_max 125
-#define TARGET_NR_sched_get_priority_min 126
-#define TARGET_NR_sched_rr_get_interval 127
-#define TARGET_NR_restart_syscall 128
-#define TARGET_NR_kill 129
-#define TARGET_NR_tkill 130
-#define TARGET_NR_tgkill 131
-#define TARGET_NR_sigaltstack 132
-#define TARGET_NR_rt_sigsuspend 133
-#define TARGET_NR_rt_sigaction 134
-#define TARGET_NR_rt_sigprocmask 135
-#define TARGET_NR_rt_sigpending 136
-#define TARGET_NR_rt_sigtimedwait 137
-#define TARGET_NR_rt_sigqueueinfo 138
-#define TARGET_NR_rt_sigreturn 139
-#define TARGET_NR_setpriority 140
-#define TARGET_NR_getpriority 141
-#define TARGET_NR_reboot 142
-#define TARGET_NR_setregid 143
-#define TARGET_NR_setgid 144
-#define TARGET_NR_setreuid 145
-#define TARGET_NR_setuid 146
-#define TARGET_NR_setresuid 147
-#define TARGET_NR_getresuid 148
-#define TARGET_NR_setresgid 149
-#define TARGET_NR_getresgid 150
-#define TARGET_NR_setfsuid 151
-#define TARGET_NR_setfsgid 152
-#define TARGET_NR_times 153
-#define TARGET_NR_setpgid 154
-#define TARGET_NR_getpgid 155
-#define TARGET_NR_getsid 156
-#define TARGET_NR_setsid 157
-#define TARGET_NR_getgroups 158
-#define TARGET_NR_setgroups 159
-#define TARGET_NR_uname 160
-#define TARGET_NR_sethostname 161
-#define TARGET_NR_setdomainname 162
-#define TARGET_NR_getrusage 165
-#define TARGET_NR_umask 166
-#define TARGET_NR_prctl 167
-#define TARGET_NR_getcpu 168
-#define TARGET_NR_gettimeofday 169
-#define TARGET_NR_settimeofday 170
-#define TARGET_NR_adjtimex 171
-#define TARGET_NR_getpid 172
-#define TARGET_NR_getppid 173
-#define TARGET_NR_getuid 174
-#define TARGET_NR_geteuid 175
-#define TARGET_NR_getgid 176
-#define TARGET_NR_getegid 177
-#define TARGET_NR_gettid 178
-#define TARGET_NR_sysinfo 179
-#define TARGET_NR_mq_open 180
-#define TARGET_NR_mq_unlink 181
-#define TARGET_NR_mq_timedsend 182
-#define TARGET_NR_mq_timedreceive 183
-#define TARGET_NR_mq_notify 184
-#define TARGET_NR_mq_getsetattr 185
-#define TARGET_NR_msgget 186
-#define TARGET_NR_msgctl 187
-#define TARGET_NR_msgrcv 188
-#define TARGET_NR_msgsnd 189
-#define TARGET_NR_semget 190
-#define TARGET_NR_semctl 191
-#define TARGET_NR_semtimedop 192
-#define TARGET_NR_semop 193
-#define TARGET_NR_shmget 194
-#define TARGET_NR_shmctl 195
-#define TARGET_NR_shmat 196
-#define TARGET_NR_shmdt 197
-#define TARGET_NR_socket 198
-#define TARGET_NR_socketpair 199
-#define TARGET_NR_bind 200
-#define TARGET_NR_listen 201
-#define TARGET_NR_accept 202
-#define TARGET_NR_connect 203
-#define TARGET_NR_getsockname 204
-#define TARGET_NR_getpeername 205
-#define TARGET_NR_sendto 206
-#define TARGET_NR_recvfrom 207
-#define TARGET_NR_setsockopt 208
-#define TARGET_NR_getsockopt 209
-#define TARGET_NR_shutdown 210
-#define TARGET_NR_sendmsg 211
-#define TARGET_NR_recvmsg 212
-#define TARGET_NR_readahead 213
-#define TARGET_NR_brk 214
-#define TARGET_NR_munmap 215
-#define TARGET_NR_mremap 216
-#define TARGET_NR_add_key 217
-#define TARGET_NR_request_key 218
-#define TARGET_NR_keyctl 219
-#define TARGET_NR_clone 220
-#define TARGET_NR_execve 221
-#define TARGET_NR_mmap 222
-#define TARGET_NR_fadvise64 223
-#define TARGET_NR_swapon 224
-#define TARGET_NR_swapoff 225
-#define TARGET_NR_mprotect 226
-#define TARGET_NR_msync 227
-#define TARGET_NR_mlock 228
-#define TARGET_NR_munlock 229
-#define TARGET_NR_mlockall 230
-#define TARGET_NR_munlockall 231
-#define TARGET_NR_mincore 232
-#define TARGET_NR_madvise 233
-#define TARGET_NR_remap_file_pages 234
-#define TARGET_NR_mbind 235
-#define TARGET_NR_get_mempolicy 236
-#define TARGET_NR_set_mempolicy 237
-#define TARGET_NR_migrate_pages 238
-#define TARGET_NR_move_pages 239
-#define TARGET_NR_rt_tgsigqueueinfo 240
-#define TARGET_NR_perf_event_open 241
-#define TARGET_NR_accept4 242
-#define TARGET_NR_recvmmsg 243
-#define TARGET_NR_arch_specific_syscall 244
-#define TARGET_NR_wait4 260
-#define TARGET_NR_prlimit64 261
-#define TARGET_NR_fanotify_init 262
-#define TARGET_NR_fanotify_mark 263
-#define TARGET_NR_name_to_handle_at 264
-#define TARGET_NR_open_by_handle_at 265
-#define TARGET_NR_clock_adjtime 266
-#define TARGET_NR_syncfs 267
-#define TARGET_NR_setns 268
-#define TARGET_NR_sendmmsg 269
-#define TARGET_NR_process_vm_readv 270
-#define TARGET_NR_process_vm_writev 271
-#define TARGET_NR_kcmp 272
-#define TARGET_NR_finit_module 273
-#define TARGET_NR_sched_setattr 274
-#define TARGET_NR_sched_getattr 275
-#define TARGET_NR_renameat2 276
-#define TARGET_NR_seccomp 277
-#define TARGET_NR_getrandom 278
-#define TARGET_NR_memfd_create 279
-#define TARGET_NR_bpf 280
-#define TARGET_NR_execveat 281
-#define TARGET_NR_userfaultfd 282
-#define TARGET_NR_membarrier 283
-#define TARGET_NR_mlock2 284
-#define TARGET_NR_copy_file_range 285
-#define TARGET_NR_preadv2 286
-#define TARGET_NR_pwritev2 287
-#define TARGET_NR_pkey_mprotect 288
-#define TARGET_NR_pkey_alloc 289
-#define TARGET_NR_pkey_free 290
-#define TARGET_NR_statx 291
-#define TARGET_NR_io_pgetevents 292
-#define TARGET_NR_rseq 293
-#define TARGET_NR_kexec_file_load 294
-#define TARGET_NR_pidfd_send_signal 424
-#define TARGET_NR_io_uring_setup 425
-#define TARGET_NR_io_uring_enter 426
-#define TARGET_NR_io_uring_register 427
-#define TARGET_NR_open_tree 428
-#define TARGET_NR_move_mount 429
-#define TARGET_NR_fsopen 430
-#define TARGET_NR_fsconfig 431
-#define TARGET_NR_fsmount 432
-#define TARGET_NR_fspick 433
-#define TARGET_NR_pidfd_open 434
-#define TARGET_NR_clone3 435
-#define TARGET_NR_close_range 436
-#define TARGET_NR_openat2 437
-#define TARGET_NR_pidfd_getfd 438
-#define TARGET_NR_faccessat2 439
-#define TARGET_NR_process_madvise 440
-#define TARGET_NR_epoll_pwait2 441
-#define TARGET_NR_mount_setattr 442
-#define TARGET_NR_quotactl_fd 443
-#define TARGET_NR_landlock_create_ruleset 444
-#define TARGET_NR_landlock_add_rule 445
-#define TARGET_NR_landlock_restrict_self 446
-#define TARGET_NR_process_mrelease 448
-#define TARGET_NR_futex_waitv 449
-#define TARGET_NR_set_mempolicy_home_node 450
-#define TARGET_NR_syscalls 451
-
-#endif /* LINUX_USER_LOONGARCH_SYSCALL_NR_H */
diff --git a/linux-user/loongarch64/syscallhdr.sh b/linux-user/loongarch64/syscallhdr.sh
new file mode 100644
index 0000000..3d8a993
--- /dev/null
+++ b/linux-user/loongarch64/syscallhdr.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=LINUX_USER_LOONGARCH64_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ echo "#ifndef ${fileguard}"
+ echo "#define ${fileguard} 1"
+ echo ""
+
+ while read nr abi name entry compat ; do
+ if [ -z "$offset" ]; then
+ echo "#define TARGET_NR_${prefix}${name} $nr"
+ else
+ echo "#define TARGET_NR_${prefix}${name} ($offset + $nr)"
+ fi
+ done
+
+ echo ""
+ echo "#endif /* ${fileguard} */"
+) > "$out"
diff --git a/linux-user/loongarch64/vdso.so b/linux-user/loongarch64/vdso.so
index bfaa26f..7c2de6c 100755
--- a/linux-user/loongarch64/vdso.so
+++ b/linux-user/loongarch64/vdso.so
Binary files differ
diff --git a/linux-user/m68k/cpu_loop.c b/linux-user/m68k/cpu_loop.c
index f79b8e4..5da91b9 100644
--- a/linux-user/m68k/cpu_loop.c
+++ b/linux-user/m68k/cpu_loop.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
void cpu_loop(CPUM68KState *env)
@@ -92,7 +92,7 @@ void cpu_loop(CPUM68KState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
TaskState *ts = get_task_state(cpu);
diff --git a/linux-user/m68k/syscall.tbl b/linux-user/m68k/syscall.tbl
index 79c2d24..b6094f8 100644
--- a/linux-user/m68k/syscall.tbl
+++ b/linux-user/m68k/syscall.tbl
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
#
# system call numbers and entry vectors for m68k
#
@@ -141,7 +141,7 @@
131 common quotactl sys_quotactl
132 common getpgid sys_getpgid
133 common fchdir sys_fchdir
-134 common bdflush sys_bdflush
+134 common bdflush sys_ni_syscall
135 common sysfs sys_sysfs
136 common personality sys_personality
# 137 was afs_syscall
@@ -255,7 +255,7 @@
245 common io_cancel sys_io_cancel
246 common fadvise64 sys_fadvise64
247 common exit_group sys_exit_group
-248 common lookup_dcookie sys_lookup_dcookie
+248 common lookup_dcookie sys_ni_syscall
249 common epoll_create sys_epoll_create
250 common epoll_ctl sys_epoll_ctl
251 common epoll_wait sys_epoll_wait
@@ -442,7 +442,23 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/linux-user/m68k/syscallhdr.sh b/linux-user/m68k/syscallhdr.sh
index eeb4d01..39b11dd 100644
--- a/linux-user/m68k/syscallhdr.sh
+++ b/linux-user/m68k/syscallhdr.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
in="$1"
out="$2"
diff --git a/linux-user/m68k/target_signal.h b/linux-user/m68k/target_signal.h
index 6e0f4b7..b05b930 100644
--- a/linux-user/m68k/target_signal.h
+++ b/linux-user/m68k/target_signal.h
@@ -3,6 +3,7 @@
#include "../generic/signal.h"
+#define TARGET_ARCH_HAS_SA_RESTORER 1
#define TARGET_ARCH_HAS_SETUP_FRAME
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
diff --git a/linux-user/main.c b/linux-user/main.c
index 7d3cf45..5ac5b55 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -39,7 +39,7 @@
#include "qemu/module.h"
#include "qemu/plugin.h"
#include "user/guest-base.h"
-#include "exec/exec-all.h"
+#include "user/page-protection.h"
#include "exec/gdbstub.h"
#include "gdbstub/user.h"
#include "tcg/startup.h"
@@ -49,7 +49,7 @@
#include "elf.h"
#include "trace/control.h"
#include "target_elf.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "crypto/init.h"
#include "fd-trans.h"
#include "signal-common.h"
@@ -71,6 +71,7 @@ char *exec_path;
char real_exec_path[PATH_MAX];
static bool opt_one_insn_per_tb;
+static unsigned long opt_tb_size;
static const char *argv0;
static const char *gdbstub;
static envlist_t *envlist;
@@ -121,6 +122,7 @@ static const char *last_log_filename;
#endif
unsigned long reserved_va;
+unsigned long guest_addr_max;
static void usage(int exitcode);
@@ -412,11 +414,25 @@ static void handle_arg_reserved_va(const char *arg)
reserved_va = val ? val - 1 : 0;
}
+static const char *rtsig_map = CONFIG_QEMU_RTSIG_MAP;
+
+static void handle_arg_rtsig_map(const char *arg)
+{
+ rtsig_map = arg;
+}
+
static void handle_arg_one_insn_per_tb(const char *arg)
{
opt_one_insn_per_tb = true;
}
+static void handle_arg_tb_size(const char *arg)
+{
+ if (qemu_strtoul(arg, NULL, 0, &opt_tb_size)) {
+ usage(EXIT_FAILURE);
+ }
+}
+
static void handle_arg_strace(const char *arg)
{
enable_strace = true;
@@ -494,6 +510,9 @@ static const struct qemu_argument arg_table[] = {
"address", "set guest_base address to 'address'"},
{"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va,
"size", "reserve 'size' bytes for guest virtual address space"},
+ {"t", "QEMU_RTSIG_MAP", true, handle_arg_rtsig_map,
+ "tsig hsig n[,...]",
+ "map target rt signals [tsig,tsig+n) to [hsig,hsig+n]"},
{"d", "QEMU_LOG", true, handle_arg_log,
"item[,...]", "enable logging of specified items "
"(use '-d help' for a list of items)"},
@@ -506,6 +525,8 @@ static const struct qemu_argument arg_table[] = {
{"one-insn-per-tb",
"QEMU_ONE_INSN_PER_TB", false, handle_arg_one_insn_per_tb,
"", "run with one guest instruction per emulated TB"},
+ {"tb-size", "QEMU_TB_SIZE", true, handle_arg_tb_size,
+ "size", "TCG translation block cache size"},
{"strace", "QEMU_STRACE", false, handle_arg_strace,
"", "log system calls"},
{"seed", "QEMU_RAND_SEED", true, handle_arg_seed,
@@ -755,8 +776,9 @@ int main(int argc, char **argv, char **envp)
/*
* Manage binfmt-misc open-binary flag
*/
+ errno = 0;
execfd = qemu_getauxval(AT_EXECFD);
- if (execfd == 0) {
+ if (errno != 0) {
execfd = open(exec_path, O_RDONLY);
if (execfd < 0) {
printf("Error while loading %s: %s\n", exec_path, strerror(errno));
@@ -796,6 +818,8 @@ int main(int argc, char **argv, char **envp)
accel_init_interfaces(ac);
object_property_set_bool(OBJECT(accel), "one-insn-per-tb",
opt_one_insn_per_tb, &error_abort);
+ object_property_set_int(OBJECT(accel), "tb-size",
+ opt_tb_size, &error_abort);
ac->init_machine(NULL);
}
@@ -835,6 +859,13 @@ int main(int argc, char **argv, char **envp)
/* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */
reserved_va = max_reserved_va;
}
+ if (reserved_va != 0) {
+ guest_addr_max = reserved_va;
+ } else if (MIN(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) {
+ guest_addr_max = UINT32_MAX;
+ } else {
+ guest_addr_max = ~0ul;
+ }
/*
* Temporarily disable
@@ -1001,7 +1032,7 @@ int main(int argc, char **argv, char **envp)
target_set_brk(info->brk);
syscall_init();
- signal_init();
+ signal_init(rtsig_map);
/* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
generating the prologue until now so that the prologue can take
@@ -1011,12 +1042,7 @@ int main(int argc, char **argv, char **envp)
target_cpu_copy_regs(env, regs);
if (gdbstub) {
- if (gdbserver_start(gdbstub) < 0) {
- fprintf(stderr, "qemu: could not open gdbserver on %s\n",
- gdbstub);
- exit(EXIT_FAILURE);
- }
- gdb_handlesig(cpu, 0, NULL, NULL, 0);
+ gdbserver_start(gdbstub, &error_fatal);
}
#ifdef CONFIG_SEMIHOSTING
diff --git a/linux-user/meson.build b/linux-user/meson.build
index bc41e8c..f47a213 100644
--- a/linux-user/meson.build
+++ b/linux-user/meson.build
@@ -27,6 +27,7 @@ linux_user_ss.add(libdw)
linux_user_ss.add(when: 'TARGET_HAS_BFLT', if_true: files('flatload.c'))
linux_user_ss.add(when: 'TARGET_I386', if_true: files('vm86.c'))
linux_user_ss.add(when: 'CONFIG_ARM_COMPATIBLE_SEMIHOSTING', if_true: files('semihost.c'))
+linux_user_ss.add(when: 'CONFIG_TCG_PLUGINS', if_true: files('plugin-api.c'))
syscall_nr_generators = {}
@@ -38,6 +39,7 @@ gen_vdso = generator(gen_vdso_exe, output: '@BASENAME@.c.inc',
subdir('aarch64')
subdir('alpha')
subdir('arm')
+subdir('hexagon')
subdir('hppa')
subdir('i386')
subdir('loongarch64')
@@ -45,6 +47,7 @@ subdir('m68k')
subdir('microblaze')
subdir('mips64')
subdir('mips')
+subdir('openrisc')
subdir('ppc')
subdir('riscv')
subdir('s390x')
diff --git a/linux-user/microblaze/cpu_loop.c b/linux-user/microblaze/cpu_loop.c
index 212e62d..87236c1 100644
--- a/linux-user/microblaze/cpu_loop.c
+++ b/linux-user/microblaze/cpu_loop.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
void cpu_loop(CPUMBState *env)
@@ -127,7 +127,7 @@ void cpu_loop(CPUMBState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
env->regs[0] = regs->r0;
env->regs[1] = regs->r1;
diff --git a/linux-user/microblaze/syscall.tbl b/linux-user/microblaze/syscall.tbl
index b11395a..e3b6438 100644
--- a/linux-user/microblaze/syscall.tbl
+++ b/linux-user/microblaze/syscall.tbl
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
#
# system call numbers and entry vectors for microblaze
#
@@ -141,7 +141,7 @@
131 common quotactl sys_quotactl
132 common getpgid sys_getpgid
133 common fchdir sys_fchdir
-134 common bdflush sys_bdflush
+134 common bdflush sys_ni_syscall
135 common sysfs sys_sysfs
136 common personality sys_personality
137 common afs_syscall sys_ni_syscall
@@ -260,7 +260,7 @@
250 common fadvise64 sys_fadvise64
# 251 is available for reuse (was briefly sys_set_zone_reclaim)
252 common exit_group sys_exit_group
-253 common lookup_dcookie sys_lookup_dcookie
+253 common lookup_dcookie sys_ni_syscall
254 common epoll_create sys_epoll_create
255 common epoll_ctl sys_epoll_ctl
256 common epoll_wait sys_epoll_wait
@@ -448,7 +448,23 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/linux-user/microblaze/syscallhdr.sh b/linux-user/microblaze/syscallhdr.sh
index f55dce8a..b42b669 100644
--- a/linux-user/microblaze/syscallhdr.sh
+++ b/linux-user/microblaze/syscallhdr.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
in="$1"
out="$2"
diff --git a/linux-user/microblaze/target_signal.h b/linux-user/microblaze/target_signal.h
index 7dc5c45..ffe4442 100644
--- a/linux-user/microblaze/target_signal.h
+++ b/linux-user/microblaze/target_signal.h
@@ -3,6 +3,8 @@
#include "../generic/signal.h"
+#define TARGET_SA_RESTORER 0x04000000
+
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
#endif /* MICROBLAZE_TARGET_SIGNAL_H */
diff --git a/linux-user/mips/cpu_loop.c b/linux-user/mips/cpu_loop.c
index 462387a..6405806 100644
--- a/linux-user/mips/cpu_loop.c
+++ b/linux-user/mips/cpu_loop.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
#include "elf.h"
#include "internal.h"
@@ -211,7 +211,7 @@ done_syscall:
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
TaskState *ts = get_task_state(cpu);
diff --git a/linux-user/mips/syscall-args-o32.c.inc b/linux-user/mips/syscall-args-o32.c.inc
index a6a2c5c..780c0a8 100644
--- a/linux-user/mips/syscall-args-o32.c.inc
+++ b/linux-user/mips/syscall-args-o32.c.inc
@@ -441,3 +441,23 @@
[ 440] = 5, /* process_madvise */
[ 441] = 6, /* epoll_pwait2 */
[ 442] = 5, /* mount_setattr */
+ [ 443] = 4, /* quotactl_fd */
+ [ 444] = 3, /* landlock_create_ruleset */
+ [ 445] = 4, /* landlock_add_rule */
+ [ 446] = 2, /* landlock_restrict_self */
+ [ 447] = 1, /* memfd_secret */
+ [ 448] = 2, /* process_mrelease */
+ [ 449] = 5, /* futex_waitv */
+ [ 450] = 4, /* set_mempolicy_home_node */
+ [ 451] = 4, /* cachestat */
+ [ 452] = 4, /* fchmodat2 */
+ [ 453] = 3, /* map_shadow_stack */
+ [ 454] = 4, /* futex_wake */
+ [ 455] = 6, /* futex_wait */
+ [ 456] = 4, /* futex_requeue */
+ [ 457] = 4, /* statmount */
+ [ 458] = 4, /* listmount */
+ [ 459] = 4, /* lsm_get_self_attr */
+ [ 460] = 4, /* lsm_set_self_attr */
+ [ 461] = 3, /* lsm_list_modules */
+ [ 462] = 3, /* mseal */
diff --git a/linux-user/mips/syscall_o32.tbl b/linux-user/mips/syscall_o32.tbl
index d560c46..360055c 100644
--- a/linux-user/mips/syscall_o32.tbl
+++ b/linux-user/mips/syscall_o32.tbl
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
#
# system call numbers and entry vectors for mips
#
@@ -27,7 +27,7 @@
17 o32 break sys_ni_syscall
# 18 was sys_stat
18 o32 unused18 sys_ni_syscall
-19 o32 lseek sys_lseek
+19 o32 lseek sys_lseek compat_sys_lseek
20 o32 getpid sys_getpid
21 o32 mount sys_mount
22 o32 umount sys_oldumount
@@ -145,7 +145,7 @@
131 o32 quotactl sys_quotactl
132 o32 getpgid sys_getpgid
133 o32 fchdir sys_fchdir
-134 o32 bdflush sys_bdflush
+134 o32 bdflush sys_ni_syscall
135 o32 sysfs sys_sysfs
136 o32 personality sys_personality sys_32_personality
137 o32 afs_syscall sys_ni_syscall
@@ -258,7 +258,7 @@
244 o32 io_submit sys_io_submit compat_sys_io_submit
245 o32 io_cancel sys_io_cancel
246 o32 exit_group sys_exit_group
-247 o32 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+247 o32 lookup_dcookie sys_ni_syscall
248 o32 epoll_create sys_epoll_create
249 o32 epoll_ctl sys_epoll_ctl
250 o32 epoll_wait sys_epoll_wait
@@ -279,9 +279,9 @@
265 o32 clock_nanosleep sys_clock_nanosleep_time32
266 o32 tgkill sys_tgkill
267 o32 utimes sys_utimes_time32
-268 o32 mbind sys_mbind compat_sys_mbind
-269 o32 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-270 o32 set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+268 o32 mbind sys_mbind
+269 o32 get_mempolicy sys_get_mempolicy
+270 o32 set_mempolicy sys_set_mempolicy
271 o32 mq_open sys_mq_open compat_sys_mq_open
272 o32 mq_unlink sys_mq_unlink
273 o32 mq_timedsend sys_mq_timedsend_time32
@@ -298,7 +298,7 @@
284 o32 inotify_init sys_inotify_init
285 o32 inotify_add_watch sys_inotify_add_watch
286 o32 inotify_rm_watch sys_inotify_rm_watch
-287 o32 migrate_pages sys_migrate_pages compat_sys_migrate_pages
+287 o32 migrate_pages sys_migrate_pages
288 o32 openat sys_openat compat_sys_openat
289 o32 mkdirat sys_mkdirat
290 o32 mknodat sys_mknodat
@@ -319,7 +319,7 @@
305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range
306 o32 tee sys_tee
307 o32 vmsplice sys_vmsplice
-308 o32 move_pages sys_move_pages compat_sys_move_pages
+308 o32 move_pages sys_move_pages
309 o32 set_robust_list sys_set_robust_list compat_sys_set_robust_list
310 o32 get_robust_list sys_get_robust_list compat_sys_get_robust_list
311 o32 kexec_load sys_kexec_load compat_sys_kexec_load
@@ -403,7 +403,7 @@
412 o32 utimensat_time64 sys_utimensat sys_utimensat
413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+416 o32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
@@ -430,7 +430,23 @@
440 o32 process_madvise sys_process_madvise
441 o32 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 o32 mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 o32 quotactl_fd sys_quotactl_fd
444 o32 landlock_create_ruleset sys_landlock_create_ruleset
445 o32 landlock_add_rule sys_landlock_add_rule
446 o32 landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 o32 process_mrelease sys_process_mrelease
+449 o32 futex_waitv sys_futex_waitv
+450 o32 set_mempolicy_home_node sys_set_mempolicy_home_node
+451 o32 cachestat sys_cachestat
+452 o32 fchmodat2 sys_fchmodat2
+453 o32 map_shadow_stack sys_map_shadow_stack
+454 o32 futex_wake sys_futex_wake
+455 o32 futex_wait sys_futex_wait
+456 o32 futex_requeue sys_futex_requeue
+457 o32 statmount sys_statmount
+458 o32 listmount sys_listmount
+459 o32 lsm_get_self_attr sys_lsm_get_self_attr
+460 o32 lsm_set_self_attr sys_lsm_set_self_attr
+461 o32 lsm_list_modules sys_lsm_list_modules
+462 o32 mseal sys_mseal
diff --git a/linux-user/mips/syscallhdr.sh b/linux-user/mips/syscallhdr.sh
index 761e3e4..cd7043e 100644
--- a/linux-user/mips/syscallhdr.sh
+++ b/linux-user/mips/syscallhdr.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
in="$1"
out="$2"
diff --git a/linux-user/mips/target_elf.h b/linux-user/mips/target_elf.h
index b965e86..71a3231 100644
--- a/linux-user/mips/target_elf.h
+++ b/linux-user/mips/target_elf.h
@@ -12,9 +12,6 @@ static inline const char *cpu_get_model(uint32_t eflags)
if ((eflags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) {
return "mips32r6-generic";
}
- if ((eflags & EF_MIPS_MACH) == EF_MIPS_MACH_5900) {
- return "R5900";
- }
if (eflags & EF_MIPS_NAN2008) {
return "P5600";
}
diff --git a/linux-user/mips64/syscall_n32.tbl b/linux-user/mips64/syscall_n32.tbl
index 9220909..793eca6 100644
--- a/linux-user/mips64/syscall_n32.tbl
+++ b/linux-user/mips64/syscall_n32.tbl
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
#
# system call numbers and entry vectors for mips
#
@@ -214,7 +214,7 @@
203 n32 io_submit compat_sys_io_submit
204 n32 io_cancel sys_io_cancel
205 n32 exit_group sys_exit_group
-206 n32 lookup_dcookie sys_lookup_dcookie
+206 n32 lookup_dcookie sys_ni_syscall
207 n32 epoll_create sys_epoll_create
208 n32 epoll_ctl sys_epoll_ctl
209 n32 epoll_wait sys_epoll_wait
@@ -239,9 +239,9 @@
228 n32 clock_nanosleep sys_clock_nanosleep_time32
229 n32 tgkill sys_tgkill
230 n32 utimes sys_utimes_time32
-231 n32 mbind compat_sys_mbind
-232 n32 get_mempolicy compat_sys_get_mempolicy
-233 n32 set_mempolicy compat_sys_set_mempolicy
+231 n32 mbind sys_mbind
+232 n32 get_mempolicy sys_get_mempolicy
+233 n32 set_mempolicy sys_set_mempolicy
234 n32 mq_open compat_sys_mq_open
235 n32 mq_unlink sys_mq_unlink
236 n32 mq_timedsend sys_mq_timedsend_time32
@@ -258,7 +258,7 @@
247 n32 inotify_init sys_inotify_init
248 n32 inotify_add_watch sys_inotify_add_watch
249 n32 inotify_rm_watch sys_inotify_rm_watch
-250 n32 migrate_pages compat_sys_migrate_pages
+250 n32 migrate_pages sys_migrate_pages
251 n32 openat sys_openat
252 n32 mkdirat sys_mkdirat
253 n32 mknodat sys_mknodat
@@ -279,7 +279,7 @@
268 n32 sync_file_range sys_sync_file_range
269 n32 tee sys_tee
270 n32 vmsplice sys_vmsplice
-271 n32 move_pages compat_sys_move_pages
+271 n32 move_pages sys_move_pages
272 n32 set_robust_list compat_sys_set_robust_list
273 n32 get_robust_list compat_sys_get_robust_list
274 n32 kexec_load compat_sys_kexec_load
@@ -354,7 +354,7 @@
412 n32 utimensat_time64 sys_utimensat
413 n32 pselect6_time64 compat_sys_pselect6_time64
414 n32 ppoll_time64 compat_sys_ppoll_time64
-416 n32 io_pgetevents_time64 sys_io_pgetevents
+416 n32 io_pgetevents_time64 compat_sys_io_pgetevents_time64
417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64
418 n32 mq_timedsend_time64 sys_mq_timedsend
419 n32 mq_timedreceive_time64 sys_mq_timedreceive
@@ -381,7 +381,23 @@
440 n32 process_madvise sys_process_madvise
441 n32 epoll_pwait2 compat_sys_epoll_pwait2
442 n32 mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 n32 quotactl_fd sys_quotactl_fd
444 n32 landlock_create_ruleset sys_landlock_create_ruleset
445 n32 landlock_add_rule sys_landlock_add_rule
446 n32 landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 n32 process_mrelease sys_process_mrelease
+449 n32 futex_waitv sys_futex_waitv
+450 n32 set_mempolicy_home_node sys_set_mempolicy_home_node
+451 n32 cachestat sys_cachestat
+452 n32 fchmodat2 sys_fchmodat2
+453 n32 map_shadow_stack sys_map_shadow_stack
+454 n32 futex_wake sys_futex_wake
+455 n32 futex_wait sys_futex_wait
+456 n32 futex_requeue sys_futex_requeue
+457 n32 statmount sys_statmount
+458 n32 listmount sys_listmount
+459 n32 lsm_get_self_attr sys_lsm_get_self_attr
+460 n32 lsm_set_self_attr sys_lsm_set_self_attr
+461 n32 lsm_list_modules sys_lsm_list_modules
+462 n32 mseal sys_mseal
diff --git a/linux-user/mips64/syscall_n64.tbl b/linux-user/mips64/syscall_n64.tbl
index 9cd1c34..ebff531 100644
--- a/linux-user/mips64/syscall_n64.tbl
+++ b/linux-user/mips64/syscall_n64.tbl
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
#
# system call numbers and entry vectors for mips
#
@@ -214,7 +214,7 @@
203 n64 io_submit sys_io_submit
204 n64 io_cancel sys_io_cancel
205 n64 exit_group sys_exit_group
-206 n64 lookup_dcookie sys_lookup_dcookie
+206 n64 lookup_dcookie sys_ni_syscall
207 n64 epoll_create sys_epoll_create
208 n64 epoll_ctl sys_epoll_ctl
209 n64 epoll_wait sys_epoll_wait
@@ -357,7 +357,23 @@
440 n64 process_madvise sys_process_madvise
441 n64 epoll_pwait2 sys_epoll_pwait2
442 n64 mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 n64 quotactl_fd sys_quotactl_fd
444 n64 landlock_create_ruleset sys_landlock_create_ruleset
445 n64 landlock_add_rule sys_landlock_add_rule
446 n64 landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 n64 process_mrelease sys_process_mrelease
+449 n64 futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 n64 cachestat sys_cachestat
+452 n64 fchmodat2 sys_fchmodat2
+453 n64 map_shadow_stack sys_map_shadow_stack
+454 n64 futex_wake sys_futex_wake
+455 n64 futex_wait sys_futex_wait
+456 n64 futex_requeue sys_futex_requeue
+457 n64 statmount sys_statmount
+458 n64 listmount sys_listmount
+459 n64 lsm_get_self_attr sys_lsm_get_self_attr
+460 n64 lsm_set_self_attr sys_lsm_set_self_attr
+461 n64 lsm_list_modules sys_lsm_list_modules
+462 n64 mseal sys_mseal
diff --git a/linux-user/mips64/syscallhdr.sh b/linux-user/mips64/syscallhdr.sh
index ed5a451..a4339b2 100644
--- a/linux-user/mips64/syscallhdr.sh
+++ b/linux-user/mips64/syscallhdr.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
in="$1"
out="$2"
diff --git a/linux-user/mips64/target_elf.h b/linux-user/mips64/target_elf.h
index 5f2f2df..502af9d 100644
--- a/linux-user/mips64/target_elf.h
+++ b/linux-user/mips64/target_elf.h
@@ -9,11 +9,27 @@
#define MIPS64_TARGET_ELF_H
static inline const char *cpu_get_model(uint32_t eflags)
{
- if ((eflags & EF_MIPS_ARCH) == EF_MIPS_ARCH_64R6) {
- return "I6400";
+ switch (eflags & EF_MIPS_MACH) {
+ case EF_MIPS_MACH_OCTEON:
+ case EF_MIPS_MACH_OCTEON2:
+ case EF_MIPS_MACH_OCTEON3:
+ return "Octeon68XX";
+ case EF_MIPS_MACH_LS2E:
+ return "Loongson-2E";
+ case EF_MIPS_MACH_LS2F:
+ return "Loongson-2F";
+ case EF_MIPS_MACH_LS3A:
+ return "Loongson-3A1000";
+ default:
+ break;
}
- if ((eflags & EF_MIPS_MACH) == EF_MIPS_MACH_5900) {
- return "R5900";
+ switch (eflags & EF_MIPS_ARCH) {
+ case EF_MIPS_ARCH_64R6:
+ return "I6400";
+ case EF_MIPS_ARCH_64R2:
+ return "MIPS64R2-generic";
+ default:
+ break;
}
return "5KEf";
}
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 4d09a72..002e1e6 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -21,7 +21,11 @@
#include "trace.h"
#include "exec/log.h"
#include "exec/page-protection.h"
+#include "exec/mmap-lock.h"
+#include "exec/tb-flush.h"
+#include "exec/translation-block.h"
#include "qemu.h"
+#include "user/page-protection.h"
#include "user-internals.h"
#include "user-mmap.h"
#include "target_mman.h"
@@ -284,6 +288,40 @@ static int do_munmap(void *addr, size_t len)
}
/*
+ * Perform a pread on behalf of target_mmap. We can reach EOF, we can be
+ * interrupted by signals, and in general there's no good error return path.
+ * If @zero, zero the rest of the block at EOF.
+ * Return true on success.
+ */
+static bool mmap_pread(int fd, void *p, size_t len, off_t offset, bool zero)
+{
+ while (1) {
+ ssize_t r = pread(fd, p, len, offset);
+
+ if (likely(r == len)) {
+ /* Complete */
+ return true;
+ }
+ if (r == 0) {
+ /* EOF */
+ if (zero) {
+ memset(p, 0, len);
+ }
+ return true;
+ }
+ if (r > 0) {
+ /* Short read */
+ p += r;
+ len -= r;
+ offset += r;
+ } else if (errno != EINTR) {
+ /* Error */
+ return false;
+ }
+ }
+}
+
+/*
* Map an incomplete host page.
*
* Here be dragons. This case will not work if there is an existing
@@ -357,10 +395,9 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
/* Read or zero the new guest pages. */
if (flags & MAP_ANONYMOUS) {
memset(g2h_untagged(start), 0, last - start + 1);
- } else {
- if (pread(fd, g2h_untagged(start), last - start + 1, offset) == -1) {
- return false;
- }
+ } else if (!mmap_pread(fd, g2h_untagged(start), last - start + 1,
+ offset, true)) {
+ return false;
}
/* Put final protection */
@@ -560,9 +597,13 @@ static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len,
int host_prot, int flags, int page_flags,
int fd, off_t offset)
{
- void *p, *want_p = g2h_untagged(start);
+ void *p, *want_p = NULL;
abi_ulong last;
+ if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
+ want_p = g2h_untagged(start);
+ }
+
p = mmap(want_p, len, host_prot, flags, fd, offset);
if (p == MAP_FAILED) {
return -1;
@@ -604,17 +645,21 @@ static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len,
*
* However, this case is rather common with executable images,
* so the workaround is important for even trivial tests, whereas
- * the mmap of of a file being extended is less common.
+ * the mmap of a file being extended is less common.
*/
static abi_long mmap_h_lt_g(abi_ulong start, abi_ulong len, int host_prot,
int mmap_flags, int page_flags, int fd,
off_t offset, int host_page_size)
{
- void *p, *want_p = g2h_untagged(start);
+ void *p, *want_p = NULL;
off_t fileend_adj = 0;
int flags = mmap_flags;
abi_ulong last, pass_last;
+ if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
+ want_p = g2h_untagged(start);
+ }
+
if (!(flags & MAP_ANONYMOUS)) {
struct stat sb;
@@ -740,12 +785,16 @@ static abi_long mmap_h_gt_g(abi_ulong start, abi_ulong len,
int flags, int page_flags, int fd,
off_t offset, int host_page_size)
{
- void *p, *want_p = g2h_untagged(start);
+ void *p, *want_p = NULL;
off_t host_offset = offset & -host_page_size;
abi_ulong last, real_start, real_last;
bool misaligned_offset = false;
size_t host_len;
+ if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
+ want_p = g2h_untagged(start);
+ }
+
if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
/*
* Adjust the offset to something representable on the host.
@@ -841,8 +890,7 @@ static abi_long mmap_h_gt_g(abi_ulong start, abi_ulong len,
}
if (misaligned_offset) {
- /* TODO: The read could be short. */
- if (pread(fd, p, host_len, offset + real_start - start) != host_len) {
+ if (!mmap_pread(fd, p, host_len, offset + real_start - start, false)) {
do_munmap(p, host_len);
return -1;
}
diff --git a/linux-user/openrisc/cpu_loop.c b/linux-user/openrisc/cpu_loop.c
index a7aa586..306b4f8 100644
--- a/linux-user/openrisc/cpu_loop.c
+++ b/linux-user/openrisc/cpu_loop.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
void cpu_loop(CPUOpenRISCState *env)
@@ -83,7 +83,7 @@ void cpu_loop(CPUOpenRISCState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
int i;
diff --git a/linux-user/openrisc/meson.build b/linux-user/openrisc/meson.build
new file mode 100644
index 0000000..273e7a0
--- /dev/null
+++ b/linux-user/openrisc/meson.build
@@ -0,0 +1,5 @@
+syscall_nr_generators += {
+ 'openrisc': generator(sh,
+ arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ],
+ output: '@BASENAME@_nr.h')
+}
diff --git a/linux-user/openrisc/syscall.tbl b/linux-user/openrisc/syscall.tbl
new file mode 100644
index 0000000..845e24e
--- /dev/null
+++ b/linux-user/openrisc/syscall.tbl
@@ -0,0 +1,405 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# This file contains the system call numbers for all of the
+# more recently added architectures.
+#
+# As a basic principle, no duplication of functionality
+# should be added, e.g. we don't use lseek when llseek
+# is present. New architectures should use this file
+# and implement the less feature-full calls in user space.
+#
+0 common io_setup sys_io_setup compat_sys_io_setup
+1 common io_destroy sys_io_destroy
+2 common io_submit sys_io_submit compat_sys_io_submit
+3 common io_cancel sys_io_cancel
+4 time32 io_getevents sys_io_getevents_time32
+4 64 io_getevents sys_io_getevents
+5 common setxattr sys_setxattr
+6 common lsetxattr sys_lsetxattr
+7 common fsetxattr sys_fsetxattr
+8 common getxattr sys_getxattr
+9 common lgetxattr sys_lgetxattr
+10 common fgetxattr sys_fgetxattr
+11 common listxattr sys_listxattr
+12 common llistxattr sys_llistxattr
+13 common flistxattr sys_flistxattr
+14 common removexattr sys_removexattr
+15 common lremovexattr sys_lremovexattr
+16 common fremovexattr sys_fremovexattr
+17 common getcwd sys_getcwd
+18 common lookup_dcookie sys_ni_syscall
+19 common eventfd2 sys_eventfd2
+20 common epoll_create1 sys_epoll_create1
+21 common epoll_ctl sys_epoll_ctl
+22 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+23 common dup sys_dup
+24 common dup3 sys_dup3
+25 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+25 64 fcntl sys_fcntl
+26 common inotify_init1 sys_inotify_init1
+27 common inotify_add_watch sys_inotify_add_watch
+28 common inotify_rm_watch sys_inotify_rm_watch
+29 common ioctl sys_ioctl compat_sys_ioctl
+30 common ioprio_set sys_ioprio_set
+31 common ioprio_get sys_ioprio_get
+32 common flock sys_flock
+33 common mknodat sys_mknodat
+34 common mkdirat sys_mkdirat
+35 common unlinkat sys_unlinkat
+36 common symlinkat sys_symlinkat
+37 common linkat sys_linkat
+# renameat is superseded with flags by renameat2
+38 renameat renameat sys_renameat
+39 common umount2 sys_umount
+40 common mount sys_mount
+41 common pivot_root sys_pivot_root
+42 common nfsservctl sys_ni_syscall
+43 32 statfs64 sys_statfs64 compat_sys_statfs64
+43 64 statfs sys_statfs
+44 32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+44 64 fstatfs sys_fstatfs
+45 32 truncate64 sys_truncate64 compat_sys_truncate64
+45 64 truncate sys_truncate
+46 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+46 64 ftruncate sys_ftruncate
+47 common fallocate sys_fallocate compat_sys_fallocate
+48 common faccessat sys_faccessat
+49 common chdir sys_chdir
+50 common fchdir sys_fchdir
+51 common chroot sys_chroot
+52 common fchmod sys_fchmod
+53 common fchmodat sys_fchmodat
+54 common fchownat sys_fchownat
+55 common fchown sys_fchown
+56 common openat sys_openat
+57 common close sys_close
+58 common vhangup sys_vhangup
+59 common pipe2 sys_pipe2
+60 common quotactl sys_quotactl
+61 common getdents64 sys_getdents64
+62 32 llseek sys_llseek
+62 64 lseek sys_lseek
+63 common read sys_read
+64 common write sys_write
+65 common readv sys_readv sys_readv
+66 common writev sys_writev sys_writev
+67 common pread64 sys_pread64 compat_sys_pread64
+68 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+69 common preadv sys_preadv compat_sys_preadv
+70 common pwritev sys_pwritev compat_sys_pwritev
+71 32 sendfile64 sys_sendfile64
+71 64 sendfile sys_sendfile64
+72 time32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+72 64 pselect6 sys_pselect6
+73 time32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+73 64 ppoll sys_ppoll
+74 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+75 common vmsplice sys_vmsplice
+76 common splice sys_splice
+77 common tee sys_tee
+78 common readlinkat sys_readlinkat
+79 stat64 fstatat64 sys_fstatat64
+79 64 newfstatat sys_newfstatat
+80 stat64 fstat64 sys_fstat64
+80 64 fstat sys_newfstat
+81 common sync sys_sync
+82 common fsync sys_fsync
+83 common fdatasync sys_fdatasync
+84 common sync_file_range sys_sync_file_range compat_sys_sync_file_range
+85 common timerfd_create sys_timerfd_create
+86 time32 timerfd_settime sys_timerfd_settime32
+86 64 timerfd_settime sys_timerfd_settime
+87 time32 timerfd_gettime sys_timerfd_gettime32
+87 64 timerfd_gettime sys_timerfd_gettime
+88 time32 utimensat sys_utimensat_time32
+88 64 utimensat sys_utimensat
+89 common acct sys_acct
+90 common capget sys_capget
+91 common capset sys_capset
+92 common personality sys_personality
+93 common exit sys_exit
+94 common exit_group sys_exit_group
+95 common waitid sys_waitid compat_sys_waitid
+96 common set_tid_address sys_set_tid_address
+97 common unshare sys_unshare
+98 time32 futex sys_futex_time32
+98 64 futex sys_futex
+99 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+100 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+101 time32 nanosleep sys_nanosleep_time32
+101 64 nanosleep sys_nanosleep
+102 common getitimer sys_getitimer compat_sys_getitimer
+103 common setitimer sys_setitimer compat_sys_setitimer
+104 common kexec_load sys_kexec_load compat_sys_kexec_load
+105 common init_module sys_init_module
+106 common delete_module sys_delete_module
+107 common timer_create sys_timer_create compat_sys_timer_create
+108 time32 timer_gettime sys_timer_gettime32
+108 64 timer_gettime sys_timer_gettime
+109 common timer_getoverrun sys_timer_getoverrun
+110 time32 timer_settime sys_timer_settime32
+110 64 timer_settime sys_timer_settime
+111 common timer_delete sys_timer_delete
+112 time32 clock_settime sys_clock_settime32
+112 64 clock_settime sys_clock_settime
+113 time32 clock_gettime sys_clock_gettime32
+113 64 clock_gettime sys_clock_gettime
+114 time32 clock_getres sys_clock_getres_time32
+114 64 clock_getres sys_clock_getres
+115 time32 clock_nanosleep sys_clock_nanosleep_time32
+115 64 clock_nanosleep sys_clock_nanosleep
+116 common syslog sys_syslog
+117 common ptrace sys_ptrace compat_sys_ptrace
+118 common sched_setparam sys_sched_setparam
+119 common sched_setscheduler sys_sched_setscheduler
+120 common sched_getscheduler sys_sched_getscheduler
+121 common sched_getparam sys_sched_getparam
+122 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+123 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+124 common sched_yield sys_sched_yield
+125 common sched_get_priority_max sys_sched_get_priority_max
+126 common sched_get_priority_min sys_sched_get_priority_min
+127 time32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+127 64 sched_rr_get_interval sys_sched_rr_get_interval
+128 common restart_syscall sys_restart_syscall
+129 common kill sys_kill
+130 common tkill sys_tkill
+131 common tgkill sys_tgkill
+132 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+133 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+134 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+135 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+136 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+137 time32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+137 64 rt_sigtimedwait sys_rt_sigtimedwait
+138 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+139 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
+140 common setpriority sys_setpriority
+141 common getpriority sys_getpriority
+142 common reboot sys_reboot
+143 common setregid sys_setregid
+144 common setgid sys_setgid
+145 common setreuid sys_setreuid
+146 common setuid sys_setuid
+147 common setresuid sys_setresuid
+148 common getresuid sys_getresuid
+149 common setresgid sys_setresgid
+150 common getresgid sys_getresgid
+151 common setfsuid sys_setfsuid
+152 common setfsgid sys_setfsgid
+153 common times sys_times compat_sys_times
+154 common setpgid sys_setpgid
+155 common getpgid sys_getpgid
+156 common getsid sys_getsid
+157 common setsid sys_setsid
+158 common getgroups sys_getgroups
+159 common setgroups sys_setgroups
+160 common uname sys_newuname
+161 common sethostname sys_sethostname
+162 common setdomainname sys_setdomainname
+# getrlimit and setrlimit are superseded with prlimit64
+163 rlimit getrlimit sys_getrlimit compat_sys_getrlimit
+164 rlimit setrlimit sys_setrlimit compat_sys_setrlimit
+165 common getrusage sys_getrusage compat_sys_getrusage
+166 common umask sys_umask
+167 common prctl sys_prctl
+168 common getcpu sys_getcpu
+169 time32 gettimeofday sys_gettimeofday compat_sys_gettimeofday
+169 64 gettimeofday sys_gettimeofday
+170 time32 settimeofday sys_settimeofday compat_sys_settimeofday
+170 64 settimeofday sys_settimeofday
+171 time32 adjtimex sys_adjtimex_time32
+171 64 adjtimex sys_adjtimex
+172 common getpid sys_getpid
+173 common getppid sys_getppid
+174 common getuid sys_getuid
+175 common geteuid sys_geteuid
+176 common getgid sys_getgid
+177 common getegid sys_getegid
+178 common gettid sys_gettid
+179 common sysinfo sys_sysinfo compat_sys_sysinfo
+180 common mq_open sys_mq_open compat_sys_mq_open
+181 common mq_unlink sys_mq_unlink
+182 time32 mq_timedsend sys_mq_timedsend_time32
+182 64 mq_timedsend sys_mq_timedsend
+183 time32 mq_timedreceive sys_mq_timedreceive_time32
+183 64 mq_timedreceive sys_mq_timedreceive
+184 common mq_notify sys_mq_notify compat_sys_mq_notify
+185 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+186 common msgget sys_msgget
+187 common msgctl sys_msgctl compat_sys_msgctl
+188 common msgrcv sys_msgrcv compat_sys_msgrcv
+189 common msgsnd sys_msgsnd compat_sys_msgsnd
+190 common semget sys_semget
+191 common semctl sys_semctl compat_sys_semctl
+192 time32 semtimedop sys_semtimedop_time32
+192 64 semtimedop sys_semtimedop
+193 common semop sys_semop
+194 common shmget sys_shmget
+195 common shmctl sys_shmctl compat_sys_shmctl
+196 common shmat sys_shmat compat_sys_shmat
+197 common shmdt sys_shmdt
+198 common socket sys_socket
+199 common socketpair sys_socketpair
+200 common bind sys_bind
+201 common listen sys_listen
+202 common accept sys_accept
+203 common connect sys_connect
+204 common getsockname sys_getsockname
+205 common getpeername sys_getpeername
+206 common sendto sys_sendto
+207 common recvfrom sys_recvfrom compat_sys_recvfrom
+208 common setsockopt sys_setsockopt sys_setsockopt
+209 common getsockopt sys_getsockopt sys_getsockopt
+210 common shutdown sys_shutdown
+211 common sendmsg sys_sendmsg compat_sys_sendmsg
+212 common recvmsg sys_recvmsg compat_sys_recvmsg
+213 common readahead sys_readahead compat_sys_readahead
+214 common brk sys_brk
+215 common munmap sys_munmap
+216 common mremap sys_mremap
+217 common add_key sys_add_key
+218 common request_key sys_request_key
+219 common keyctl sys_keyctl compat_sys_keyctl
+220 common clone sys_clone
+221 common execve sys_execve compat_sys_execve
+222 32 mmap2 sys_mmap2
+222 64 mmap sys_mmap
+223 32 fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64
+223 64 fadvise64 sys_fadvise64_64
+224 common swapon sys_swapon
+225 common swapoff sys_swapoff
+226 common mprotect sys_mprotect
+227 common msync sys_msync
+228 common mlock sys_mlock
+229 common munlock sys_munlock
+230 common mlockall sys_mlockall
+231 common munlockall sys_munlockall
+232 common mincore sys_mincore
+233 common madvise sys_madvise
+234 common remap_file_pages sys_remap_file_pages
+235 common mbind sys_mbind
+236 common get_mempolicy sys_get_mempolicy
+237 common set_mempolicy sys_set_mempolicy
+238 common migrate_pages sys_migrate_pages
+239 common move_pages sys_move_pages
+240 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+241 common perf_event_open sys_perf_event_open
+242 common accept4 sys_accept4
+243 time32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+243 64 recvmmsg sys_recvmmsg
+# Architectures may provide up to 16 syscalls of their own between 244 and 259
+244 arc cacheflush sys_cacheflush
+245 arc arc_settls sys_arc_settls
+246 arc arc_gettls sys_arc_gettls
+247 arc sysfs sys_sysfs
+248 arc arc_usr_cmpxchg sys_arc_usr_cmpxchg
+
+244 csky set_thread_area sys_set_thread_area
+245 csky cacheflush sys_cacheflush
+
+244 nios2 cacheflush sys_cacheflush
+
+244 or1k or1k_atomic sys_or1k_atomic
+
+258 riscv riscv_hwprobe sys_riscv_hwprobe
+259 riscv riscv_flush_icache sys_riscv_flush_icache
+
+260 time32 wait4 sys_wait4 compat_sys_wait4
+260 64 wait4 sys_wait4
+261 common prlimit64 sys_prlimit64
+262 common fanotify_init sys_fanotify_init
+263 common fanotify_mark sys_fanotify_mark
+264 common name_to_handle_at sys_name_to_handle_at
+265 common open_by_handle_at sys_open_by_handle_at
+266 time32 clock_adjtime sys_clock_adjtime32
+266 64 clock_adjtime sys_clock_adjtime
+267 common syncfs sys_syncfs
+268 common setns sys_setns
+269 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+270 common process_vm_readv sys_process_vm_readv
+271 common process_vm_writev sys_process_vm_writev
+272 common kcmp sys_kcmp
+273 common finit_module sys_finit_module
+274 common sched_setattr sys_sched_setattr
+275 common sched_getattr sys_sched_getattr
+276 common renameat2 sys_renameat2
+277 common seccomp sys_seccomp
+278 common getrandom sys_getrandom
+279 common memfd_create sys_memfd_create
+280 common bpf sys_bpf
+281 common execveat sys_execveat compat_sys_execveat
+282 common userfaultfd sys_userfaultfd
+283 common membarrier sys_membarrier
+284 common mlock2 sys_mlock2
+285 common copy_file_range sys_copy_file_range
+286 common preadv2 sys_preadv2 compat_sys_preadv2
+287 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+288 common pkey_mprotect sys_pkey_mprotect
+289 common pkey_alloc sys_pkey_alloc
+290 common pkey_free sys_pkey_free
+291 common statx sys_statx
+292 time32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+292 64 io_pgetevents sys_io_pgetevents
+293 common rseq sys_rseq
+294 common kexec_file_load sys_kexec_file_load
+# 295 through 402 are unassigned to sync up with generic numbers don't use
+403 32 clock_gettime64 sys_clock_gettime
+404 32 clock_settime64 sys_clock_settime
+405 32 clock_adjtime64 sys_clock_adjtime
+406 32 clock_getres_time64 sys_clock_getres
+407 32 clock_nanosleep_time64 sys_clock_nanosleep
+408 32 timer_gettime64 sys_timer_gettime
+409 32 timer_settime64 sys_timer_settime
+410 32 timerfd_gettime64 sys_timerfd_gettime
+411 32 timerfd_settime64 sys_timerfd_settime
+412 32 utimensat_time64 sys_utimensat
+413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 sys_mq_timedsend
+419 32 mq_timedreceive_time64 sys_mq_timedreceive
+420 32 semtimedop_time64 sys_semtimedop
+421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 sys_futex
+423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+447 memfd_secret memfd_secret sys_memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/linux-user/openrisc/syscall_nr.h b/linux-user/openrisc/syscall_nr.h
deleted file mode 100644
index f7faddb..0000000
--- a/linux-user/openrisc/syscall_nr.h
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * This file contains the system call numbers.
- * Do not modify.
- * This file is generated by scripts/gensyscalls.sh
- */
-#ifndef LINUX_USER_OPENRISC_SYSCALL_NR_H
-#define LINUX_USER_OPENRISC_SYSCALL_NR_H
-
-#define TARGET_NR_io_setup 0
-#define TARGET_NR_or1k_atomic TARGET_NR_arch_specific_syscall
-#define TARGET_NR_io_destroy 1
-#define TARGET_NR_io_submit 2
-#define TARGET_NR_io_cancel 3
-#define TARGET_NR_io_getevents 4
-#define TARGET_NR_setxattr 5
-#define TARGET_NR_lsetxattr 6
-#define TARGET_NR_fsetxattr 7
-#define TARGET_NR_getxattr 8
-#define TARGET_NR_lgetxattr 9
-#define TARGET_NR_fgetxattr 10
-#define TARGET_NR_listxattr 11
-#define TARGET_NR_llistxattr 12
-#define TARGET_NR_flistxattr 13
-#define TARGET_NR_removexattr 14
-#define TARGET_NR_lremovexattr 15
-#define TARGET_NR_fremovexattr 16
-#define TARGET_NR_getcwd 17
-#define TARGET_NR_lookup_dcookie 18
-#define TARGET_NR_eventfd2 19
-#define TARGET_NR_epoll_create1 20
-#define TARGET_NR_epoll_ctl 21
-#define TARGET_NR_epoll_pwait 22
-#define TARGET_NR_dup 23
-#define TARGET_NR_dup3 24
-#define TARGET_NR_fcntl64 25
-#define TARGET_NR_inotify_init1 26
-#define TARGET_NR_inotify_add_watch 27
-#define TARGET_NR_inotify_rm_watch 28
-#define TARGET_NR_ioctl 29
-#define TARGET_NR_ioprio_set 30
-#define TARGET_NR_ioprio_get 31
-#define TARGET_NR_flock 32
-#define TARGET_NR_mknodat 33
-#define TARGET_NR_mkdirat 34
-#define TARGET_NR_unlinkat 35
-#define TARGET_NR_symlinkat 36
-#define TARGET_NR_linkat 37
-#define TARGET_NR_renameat 38
-#define TARGET_NR_umount2 39
-#define TARGET_NR_mount 40
-#define TARGET_NR_pivot_root 41
-#define TARGET_NR_nfsservctl 42
-#define TARGET_NR_statfs64 43
-#define TARGET_NR_fstatfs64 44
-#define TARGET_NR_truncate64 45
-#define TARGET_NR_ftruncate64 46
-#define TARGET_NR_fallocate 47
-#define TARGET_NR_faccessat 48
-#define TARGET_NR_chdir 49
-#define TARGET_NR_fchdir 50
-#define TARGET_NR_chroot 51
-#define TARGET_NR_fchmod 52
-#define TARGET_NR_fchmodat 53
-#define TARGET_NR_fchownat 54
-#define TARGET_NR_fchown 55
-#define TARGET_NR_openat 56
-#define TARGET_NR_close 57
-#define TARGET_NR_vhangup 58
-#define TARGET_NR_pipe2 59
-#define TARGET_NR_quotactl 60
-#define TARGET_NR_getdents64 61
-#define TARGET_NR_llseek 62
-#define TARGET_NR_read 63
-#define TARGET_NR_write 64
-#define TARGET_NR_readv 65
-#define TARGET_NR_writev 66
-#define TARGET_NR_pread64 67
-#define TARGET_NR_pwrite64 68
-#define TARGET_NR_preadv 69
-#define TARGET_NR_pwritev 70
-#define TARGET_NR_sendfile64 71
-#define TARGET_NR_pselect6 72
-#define TARGET_NR_ppoll 73
-#define TARGET_NR_signalfd4 74
-#define TARGET_NR_vmsplice 75
-#define TARGET_NR_splice 76
-#define TARGET_NR_tee 77
-#define TARGET_NR_readlinkat 78
-#define TARGET_NR_fstatat64 79
-#define TARGET_NR_fstat64 80
-#define TARGET_NR_sync 81
-#define TARGET_NR_fsync 82
-#define TARGET_NR_fdatasync 83
-#define TARGET_NR_sync_file_range 84
-#define TARGET_NR_timerfd_create 85
-#define TARGET_NR_timerfd_settime 86
-#define TARGET_NR_timerfd_gettime 87
-#define TARGET_NR_utimensat 88
-#define TARGET_NR_acct 89
-#define TARGET_NR_capget 90
-#define TARGET_NR_capset 91
-#define TARGET_NR_personality 92
-#define TARGET_NR_exit 93
-#define TARGET_NR_exit_group 94
-#define TARGET_NR_waitid 95
-#define TARGET_NR_set_tid_address 96
-#define TARGET_NR_unshare 97
-#define TARGET_NR_futex 98
-#define TARGET_NR_set_robust_list 99
-#define TARGET_NR_get_robust_list 100
-#define TARGET_NR_nanosleep 101
-#define TARGET_NR_getitimer 102
-#define TARGET_NR_setitimer 103
-#define TARGET_NR_kexec_load 104
-#define TARGET_NR_init_module 105
-#define TARGET_NR_delete_module 106
-#define TARGET_NR_timer_create 107
-#define TARGET_NR_timer_gettime 108
-#define TARGET_NR_timer_getoverrun 109
-#define TARGET_NR_timer_settime 110
-#define TARGET_NR_timer_delete 111
-#define TARGET_NR_clock_settime 112
-#define TARGET_NR_clock_gettime 113
-#define TARGET_NR_clock_getres 114
-#define TARGET_NR_clock_nanosleep 115
-#define TARGET_NR_syslog 116
-#define TARGET_NR_ptrace 117
-#define TARGET_NR_sched_setparam 118
-#define TARGET_NR_sched_setscheduler 119
-#define TARGET_NR_sched_getscheduler 120
-#define TARGET_NR_sched_getparam 121
-#define TARGET_NR_sched_setaffinity 122
-#define TARGET_NR_sched_getaffinity 123
-#define TARGET_NR_sched_yield 124
-#define TARGET_NR_sched_get_priority_max 125
-#define TARGET_NR_sched_get_priority_min 126
-#define TARGET_NR_sched_rr_get_interval 127
-#define TARGET_NR_restart_syscall 128
-#define TARGET_NR_kill 129
-#define TARGET_NR_tkill 130
-#define TARGET_NR_tgkill 131
-#define TARGET_NR_sigaltstack 132
-#define TARGET_NR_rt_sigsuspend 133
-#define TARGET_NR_rt_sigaction 134
-#define TARGET_NR_rt_sigprocmask 135
-#define TARGET_NR_rt_sigpending 136
-#define TARGET_NR_rt_sigtimedwait 137
-#define TARGET_NR_rt_sigqueueinfo 138
-#define TARGET_NR_rt_sigreturn 139
-#define TARGET_NR_setpriority 140
-#define TARGET_NR_getpriority 141
-#define TARGET_NR_reboot 142
-#define TARGET_NR_setregid 143
-#define TARGET_NR_setgid 144
-#define TARGET_NR_setreuid 145
-#define TARGET_NR_setuid 146
-#define TARGET_NR_setresuid 147
-#define TARGET_NR_getresuid 148
-#define TARGET_NR_setresgid 149
-#define TARGET_NR_getresgid 150
-#define TARGET_NR_setfsuid 151
-#define TARGET_NR_setfsgid 152
-#define TARGET_NR_times 153
-#define TARGET_NR_setpgid 154
-#define TARGET_NR_getpgid 155
-#define TARGET_NR_getsid 156
-#define TARGET_NR_setsid 157
-#define TARGET_NR_getgroups 158
-#define TARGET_NR_setgroups 159
-#define TARGET_NR_uname 160
-#define TARGET_NR_sethostname 161
-#define TARGET_NR_setdomainname 162
-#define TARGET_NR_getrlimit 163
-#define TARGET_NR_setrlimit 164
-#define TARGET_NR_getrusage 165
-#define TARGET_NR_umask 166
-#define TARGET_NR_prctl 167
-#define TARGET_NR_getcpu 168
-#define TARGET_NR_gettimeofday 169
-#define TARGET_NR_settimeofday 170
-#define TARGET_NR_adjtimex 171
-#define TARGET_NR_getpid 172
-#define TARGET_NR_getppid 173
-#define TARGET_NR_getuid 174
-#define TARGET_NR_geteuid 175
-#define TARGET_NR_getgid 176
-#define TARGET_NR_getegid 177
-#define TARGET_NR_gettid 178
-#define TARGET_NR_sysinfo 179
-#define TARGET_NR_mq_open 180
-#define TARGET_NR_mq_unlink 181
-#define TARGET_NR_mq_timedsend 182
-#define TARGET_NR_mq_timedreceive 183
-#define TARGET_NR_mq_notify 184
-#define TARGET_NR_mq_getsetattr 185
-#define TARGET_NR_msgget 186
-#define TARGET_NR_msgctl 187
-#define TARGET_NR_msgrcv 188
-#define TARGET_NR_msgsnd 189
-#define TARGET_NR_semget 190
-#define TARGET_NR_semctl 191
-#define TARGET_NR_semtimedop 192
-#define TARGET_NR_semop 193
-#define TARGET_NR_shmget 194
-#define TARGET_NR_shmctl 195
-#define TARGET_NR_shmat 196
-#define TARGET_NR_shmdt 197
-#define TARGET_NR_socket 198
-#define TARGET_NR_socketpair 199
-#define TARGET_NR_bind 200
-#define TARGET_NR_listen 201
-#define TARGET_NR_accept 202
-#define TARGET_NR_connect 203
-#define TARGET_NR_getsockname 204
-#define TARGET_NR_getpeername 205
-#define TARGET_NR_sendto 206
-#define TARGET_NR_recvfrom 207
-#define TARGET_NR_setsockopt 208
-#define TARGET_NR_getsockopt 209
-#define TARGET_NR_shutdown 210
-#define TARGET_NR_sendmsg 211
-#define TARGET_NR_recvmsg 212
-#define TARGET_NR_readahead 213
-#define TARGET_NR_brk 214
-#define TARGET_NR_munmap 215
-#define TARGET_NR_mremap 216
-#define TARGET_NR_add_key 217
-#define TARGET_NR_request_key 218
-#define TARGET_NR_keyctl 219
-#define TARGET_NR_clone 220
-#define TARGET_NR_execve 221
-#define TARGET_NR_mmap2 222
-#define TARGET_NR_fadvise64_64 223
-#define TARGET_NR_swapon 224
-#define TARGET_NR_swapoff 225
-#define TARGET_NR_mprotect 226
-#define TARGET_NR_msync 227
-#define TARGET_NR_mlock 228
-#define TARGET_NR_munlock 229
-#define TARGET_NR_mlockall 230
-#define TARGET_NR_munlockall 231
-#define TARGET_NR_mincore 232
-#define TARGET_NR_madvise 233
-#define TARGET_NR_remap_file_pages 234
-#define TARGET_NR_mbind 235
-#define TARGET_NR_get_mempolicy 236
-#define TARGET_NR_set_mempolicy 237
-#define TARGET_NR_migrate_pages 238
-#define TARGET_NR_move_pages 239
-#define TARGET_NR_rt_tgsigqueueinfo 240
-#define TARGET_NR_perf_event_open 241
-#define TARGET_NR_accept4 242
-#define TARGET_NR_recvmmsg 243
-#define TARGET_NR_arch_specific_syscall 244
-#define TARGET_NR_wait4 260
-#define TARGET_NR_prlimit64 261
-#define TARGET_NR_fanotify_init 262
-#define TARGET_NR_fanotify_mark 263
-#define TARGET_NR_name_to_handle_at 264
-#define TARGET_NR_open_by_handle_at 265
-#define TARGET_NR_clock_adjtime 266
-#define TARGET_NR_syncfs 267
-#define TARGET_NR_setns 268
-#define TARGET_NR_sendmmsg 269
-#define TARGET_NR_process_vm_readv 270
-#define TARGET_NR_process_vm_writev 271
-#define TARGET_NR_kcmp 272
-#define TARGET_NR_finit_module 273
-#define TARGET_NR_sched_setattr 274
-#define TARGET_NR_sched_getattr 275
-#define TARGET_NR_renameat2 276
-#define TARGET_NR_seccomp 277
-#define TARGET_NR_getrandom 278
-#define TARGET_NR_memfd_create 279
-#define TARGET_NR_bpf 280
-#define TARGET_NR_execveat 281
-#define TARGET_NR_userfaultfd 282
-#define TARGET_NR_membarrier 283
-#define TARGET_NR_mlock2 284
-#define TARGET_NR_copy_file_range 285
-#define TARGET_NR_preadv2 286
-#define TARGET_NR_pwritev2 287
-#define TARGET_NR_pkey_mprotect 288
-#define TARGET_NR_pkey_alloc 289
-#define TARGET_NR_pkey_free 290
-#define TARGET_NR_statx 291
-#define TARGET_NR_io_pgetevents 292
-#define TARGET_NR_rseq 293
-#define TARGET_NR_kexec_file_load 294
-#define TARGET_NR_clock_gettime64 403
-#define TARGET_NR_clock_settime64 404
-#define TARGET_NR_clock_adjtime64 405
-#define TARGET_NR_clock_getres_time64 406
-#define TARGET_NR_clock_nanosleep_time64 407
-#define TARGET_NR_timer_gettime64 408
-#define TARGET_NR_timer_settime64 409
-#define TARGET_NR_timerfd_gettime64 410
-#define TARGET_NR_timerfd_settime64 411
-#define TARGET_NR_utimensat_time64 412
-#define TARGET_NR_pselect6_time64 413
-#define TARGET_NR_ppoll_time64 414
-#define TARGET_NR_io_pgetevents_time64 416
-#define TARGET_NR_recvmmsg_time64 417
-#define TARGET_NR_mq_timedsend_time64 418
-#define TARGET_NR_mq_timedreceive_time64 419
-#define TARGET_NR_semtimedop_time64 420
-#define TARGET_NR_rt_sigtimedwait_time64 421
-#define TARGET_NR_futex_time64 422
-#define TARGET_NR_sched_rr_get_interval_time64 423
-#define TARGET_NR_pidfd_send_signal 424
-#define TARGET_NR_io_uring_setup 425
-#define TARGET_NR_io_uring_enter 426
-#define TARGET_NR_io_uring_register 427
-#define TARGET_NR_open_tree 428
-#define TARGET_NR_move_mount 429
-#define TARGET_NR_fsopen 430
-#define TARGET_NR_fsconfig 431
-#define TARGET_NR_fsmount 432
-#define TARGET_NR_fspick 433
-#define TARGET_NR_pidfd_open 434
-#define TARGET_NR_clone3 435
-#define TARGET_NR_close_range 436
-#define TARGET_NR_openat2 437
-#define TARGET_NR_pidfd_getfd 438
-#define TARGET_NR_faccessat2 439
-#define TARGET_NR_process_madvise 440
-#define TARGET_NR_epoll_pwait2 441
-#define TARGET_NR_mount_setattr 442
-#define TARGET_NR_landlock_create_ruleset 444
-#define TARGET_NR_landlock_add_rule 445
-#define TARGET_NR_landlock_restrict_self 446
-#define TARGET_NR_syscalls 447
-
-#endif /* LINUX_USER_OPENRISC_SYSCALL_NR_H */
diff --git a/linux-user/openrisc/syscallhdr.sh b/linux-user/openrisc/syscallhdr.sh
new file mode 100644
index 0000000..047e9f7
--- /dev/null
+++ b/linux-user/openrisc/syscallhdr.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=LINUX_USER_OPENRISC_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ echo "#ifndef ${fileguard}"
+ echo "#define ${fileguard} 1"
+ echo ""
+
+ while read nr abi name entry ; do
+ if [ -z "$offset" ]; then
+ echo "#define TARGET_NR_${prefix}${name} $nr"
+ else
+ echo "#define TARGET_NR_${prefix}${name} ($offset + $nr)"
+ fi
+ done
+
+ echo ""
+ echo "#endif /* ${fileguard} */"
+) > "$out"
diff --git a/linux-user/plugin-api.c b/linux-user/plugin-api.c
new file mode 100644
index 0000000..66755df
--- /dev/null
+++ b/linux-user/plugin-api.c
@@ -0,0 +1,15 @@
+/*
+ * QEMU Plugin API - linux-user-mode only implementations
+ *
+ * Common user-mode only APIs are in plugins/api-user. These helpers
+ * are only specific to linux-user.
+ *
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ * Copyright (C) 2019-2025, Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu.h"
+#include "common-user/plugin-api.c.inc"
diff --git a/linux-user/ppc/Makefile.vdso b/linux-user/ppc/Makefile.vdso
index 3ca3c6b..e2b8fac 100644
--- a/linux-user/ppc/Makefile.vdso
+++ b/linux-user/ppc/Makefile.vdso
@@ -6,9 +6,11 @@ VPATH += $(SUBDIR)
all: $(SUBDIR)/vdso-32.so $(SUBDIR)/vdso-64.so $(SUBDIR)/vdso-64le.so
LDFLAGS32 = -nostdlib -shared -Wl,-T,$(SUBDIR)/vdso-32.ld \
- -Wl,-h,linux-vdso32.so.1 -Wl,--hash-style=both -Wl,--build-id=sha1
+ -Wl,-h,linux-vdso32.so.1 -Wl,--hash-style=both \
+ -Wl,--build-id=sha1 -Wl,-z,max-page-size=4096
LDFLAGS64 = -nostdlib -shared -Wl,-T,$(SUBDIR)/vdso-64.ld \
- -Wl,-h,linux-vdso64.so.1 -Wl,--hash-style=both -Wl,--build-id=sha1
+ -Wl,-h,linux-vdso64.so.1 -Wl,--hash-style=both \
+ -Wl,--build-id=sha1 -Wl,-z,max-page-size=4096
$(SUBDIR)/vdso-32.so: vdso.S vdso-32.ld vdso-asmoffset.h
$(CC) -o $@ $(LDFLAGS32) -m32 $<
diff --git a/linux-user/ppc/cpu_loop.c b/linux-user/ppc/cpu_loop.c
index 02204ad..2a0efaf 100644
--- a/linux-user/ppc/cpu_loop.c
+++ b/linux-user/ppc/cpu_loop.c
@@ -21,7 +21,7 @@
#include "qemu.h"
#include "qemu/timer.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
static inline uint64_t cpu_ppc_get_tb(CPUPPCState *env)
@@ -378,7 +378,7 @@ void cpu_loop(CPUPPCState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
int i;
diff --git a/linux-user/ppc/signal.c b/linux-user/ppc/signal.c
index a1d8c0b..24e5a02 100644
--- a/linux-user/ppc/signal.c
+++ b/linux-user/ppc/signal.c
@@ -628,7 +628,7 @@ static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
return 1;
- target_to_host_sigset_internal(&blocked, &set);
+ target_to_host_sigset(&blocked, &set);
set_sigmask(&blocked);
restore_user_regs(env, mcp, sig);
diff --git a/linux-user/ppc/syscall.tbl b/linux-user/ppc/syscall.tbl
index 8f052ff..4b428a4 100644
--- a/linux-user/ppc/syscall.tbl
+++ b/linux-user/ppc/syscall.tbl
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
#
# system call numbers and entry vectors for powerpc
#
@@ -110,7 +110,7 @@
79 common settimeofday sys_settimeofday compat_sys_settimeofday
80 common getgroups sys_getgroups
81 common setgroups sys_setgroups
-82 32 select ppc_select sys_ni_syscall
+82 32 select sys_old_select compat_sys_old_select
82 64 select sys_ni_syscall
82 spu select sys_ni_syscall
83 common symlink sys_symlink
@@ -176,11 +176,11 @@
131 nospu quotactl sys_quotactl
132 common getpgid sys_getpgid
133 common fchdir sys_fchdir
-134 common bdflush sys_bdflush
+134 common bdflush sys_ni_syscall
135 common sysfs sys_sysfs
-136 32 personality sys_personality ppc64_personality
-136 64 personality ppc64_personality
-136 spu personality ppc64_personality
+136 32 personality sys_personality compat_sys_ppc64_personality
+136 64 personality sys_ppc64_personality
+136 spu personality sys_ppc64_personality
137 common afs_syscall sys_ni_syscall
138 common setfsuid sys_setfsuid
139 common setfsgid sys_setfsgid
@@ -228,8 +228,12 @@
176 64 rt_sigtimedwait sys_rt_sigtimedwait
177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
-179 common pread64 sys_pread64 compat_sys_pread64
-180 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+179 32 pread64 sys_ppc_pread64 compat_sys_ppc_pread64
+179 64 pread64 sys_pread64
+179 spu pread64 sys_pread64
+180 32 pwrite64 sys_ppc_pwrite64 compat_sys_ppc_pwrite64
+180 64 pwrite64 sys_pwrite64
+180 spu pwrite64 sys_pwrite64
181 common chown sys_chown
182 common getcwd sys_getcwd
183 common capget sys_capget
@@ -242,10 +246,12 @@
188 common putpmsg sys_ni_syscall
189 nospu vfork sys_vfork
190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
-191 common readahead sys_readahead compat_sys_readahead
+191 32 readahead sys_ppc_readahead compat_sys_ppc_readahead
+191 64 readahead sys_readahead
+191 spu readahead sys_readahead
192 32 mmap2 sys_mmap2 compat_sys_mmap2
-193 32 truncate64 sys_truncate64 compat_sys_truncate64
-194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+193 32 truncate64 sys_ppc_truncate64 compat_sys_ppc_truncate64
+194 32 ftruncate64 sys_ppc_ftruncate64 compat_sys_ppc_ftruncate64
195 32 stat64 sys_stat64
196 32 lstat64 sys_lstat64
197 32 fstat64 sys_fstat64
@@ -288,9 +294,11 @@
230 common io_submit sys_io_submit compat_sys_io_submit
231 common io_cancel sys_io_cancel
232 nospu set_tid_address sys_set_tid_address
-233 common fadvise64 sys_fadvise64 ppc32_fadvise64
+233 32 fadvise64 sys_ppc32_fadvise64 compat_sys_ppc32_fadvise64
+233 64 fadvise64 sys_fadvise64
+233 spu fadvise64 sys_fadvise64
234 nospu exit_group sys_exit_group
-235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+235 nospu lookup_dcookie sys_ni_syscall
236 common epoll_create sys_epoll_create
237 common epoll_ctl sys_epoll_ctl
238 common epoll_wait sys_epoll_wait
@@ -323,17 +331,17 @@
251 spu utimes sys_utimes
252 common statfs64 sys_statfs64 compat_sys_statfs64
253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
-254 32 fadvise64_64 ppc_fadvise64_64
+254 32 fadvise64_64 sys_ppc_fadvise64_64
254 spu fadvise64_64 sys_ni_syscall
255 common rtas sys_rtas
256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall
256 64 sys_debug_setcontext sys_ni_syscall
256 spu sys_debug_setcontext sys_ni_syscall
# 257 reserved for vserver
-258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages
-259 nospu mbind sys_mbind compat_sys_mbind
-260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+258 nospu migrate_pages sys_migrate_pages
+259 nospu mbind sys_mbind
+260 nospu get_mempolicy sys_get_mempolicy
+261 nospu set_mempolicy sys_set_mempolicy
262 nospu mq_open sys_mq_open compat_sys_mq_open
263 nospu mq_unlink sys_mq_unlink
264 32 mq_timedsend sys_mq_timedsend_time32
@@ -381,7 +389,7 @@
298 common faccessat sys_faccessat
299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
-301 common move_pages sys_move_pages compat_sys_move_pages
+301 common move_pages sys_move_pages
302 common getcpu sys_getcpu
303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
304 32 utimensat sys_utimensat_time32
@@ -390,8 +398,11 @@
305 common signalfd sys_signalfd compat_sys_signalfd
306 common timerfd_create sys_timerfd_create
307 common eventfd sys_eventfd
-308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2
-309 nospu fallocate sys_fallocate compat_sys_fallocate
+308 32 sync_file_range2 sys_ppc_sync_file_range2 compat_sys_ppc_sync_file_range2
+308 64 sync_file_range2 sys_sync_file_range2
+308 spu sync_file_range2 sys_sync_file_range2
+309 32 fallocate sys_ppc_fallocate compat_sys_fallocate
+309 64 fallocate sys_fallocate
310 nospu subpage_prot sys_subpage_prot
311 32 timerfd_settime sys_timerfd_settime32
311 64 timerfd_settime sys_timerfd_settime
@@ -495,7 +506,7 @@
412 32 utimensat_time64 sys_utimensat sys_utimensat
413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
@@ -522,7 +533,23 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_ni_syscall
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/linux-user/ppc/syscallhdr.sh b/linux-user/ppc/syscallhdr.sh
index 6c44e0e..6e8b93d 100644
--- a/linux-user/ppc/syscallhdr.sh
+++ b/linux-user/ppc/syscallhdr.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
in="$1"
out="$2"
diff --git a/linux-user/ppc/target_signal.h b/linux-user/ppc/target_signal.h
index 5be24e1..53fae47 100644
--- a/linux-user/ppc/target_signal.h
+++ b/linux-user/ppc/target_signal.h
@@ -3,6 +3,8 @@
#include "../generic/signal.h"
+#define TARGET_SA_RESTORER 0x04000000
+
#if !defined(TARGET_PPC64)
#define TARGET_ARCH_HAS_SETUP_FRAME
#endif
diff --git a/linux-user/ppc/vdso-32.so b/linux-user/ppc/vdso-32.so
index b19baaf..0dc55e0 100755
--- a/linux-user/ppc/vdso-32.so
+++ b/linux-user/ppc/vdso-32.so
Binary files differ
diff --git a/linux-user/ppc/vdso-64.so b/linux-user/ppc/vdso-64.so
index 913c831..ac1ab25 100755
--- a/linux-user/ppc/vdso-64.so
+++ b/linux-user/ppc/vdso-64.so
Binary files differ
diff --git a/linux-user/ppc/vdso-64le.so b/linux-user/ppc/vdso-64le.so
index 258a03b..424abb4 100755
--- a/linux-user/ppc/vdso-64le.so
+++ b/linux-user/ppc/vdso-64le.so
Binary files differ
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
index 2e90a97..0b19fa4 100644
--- a/linux-user/qemu.h
+++ b/linux-user/qemu.h
@@ -2,9 +2,10 @@
#define QEMU_H
#include "cpu.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "user/abitypes.h"
+#include "user/page-protection.h"
#include "syscall_defs.h"
#include "target_syscall.h"
@@ -44,7 +45,6 @@ struct image_info {
abi_ulong file_string;
uint32_t elf_flags;
int personality;
- abi_ulong alignment;
bool exec_stack;
/* Generic semihosting knows about these pointers. */
@@ -114,6 +114,10 @@ struct TaskState {
uint32_t v86flags;
uint32_t v86mask;
#endif
+#if defined(TARGET_I386)
+ /* Last syscall number. */
+ target_ulong orig_ax;
+#endif
abi_ulong child_tidptr;
#ifdef TARGET_M68K
abi_ulong tp_value;
@@ -313,6 +317,15 @@ static inline bool access_ok(CPUState *cpu, int type,
int copy_from_user(void *hptr, abi_ulong gaddr, ssize_t len);
int copy_to_user(abi_ulong gaddr, void *hptr, ssize_t len);
+/*
+ * copy_struct_from_user() copies a target struct to a host struct, in
+ * a way that guarantees backwards-compatibility for struct syscall
+ * arguments.
+ *
+ * Similar to kernels uaccess.h:copy_struct_from_user()
+ */
+int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize);
+
/* Functions for accessing guest memory. The tget and tput functions
read/write single values, byteswapping as necessary. The lock_user function
gets a pointer to a contiguous area of guest memory, but does not perform
@@ -349,4 +362,7 @@ void *lock_user_string(abi_ulong guest_addr);
#define unlock_user_struct(host_ptr, guest_addr, copy) \
unlock_user(host_ptr, guest_addr, (copy) ? sizeof(*host_ptr) : 0)
+/* Clone cpu state */
+CPUArchState *cpu_copy(CPUArchState *env);
+
#endif /* QEMU_H */
diff --git a/linux-user/riscv/cpu_loop.c b/linux-user/riscv/cpu_loop.c
index 52c49c2..3ac8bbf 100644
--- a/linux-user/riscv/cpu_loop.c
+++ b/linux-user/riscv/cpu_loop.c
@@ -21,7 +21,7 @@
#include "qemu/error-report.h"
#include "qemu.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
#include "elf.h"
#include "semihosting/common-semi.h"
@@ -47,7 +47,7 @@ void cpu_loop(CPURISCVState *env)
break;
case RISCV_EXCP_U_ECALL:
env->pc += 4;
- if (env->gpr[xA7] == TARGET_NR_arch_specific_syscall + 15) {
+ if (env->gpr[xA7] == TARGET_NR_riscv_flush_icache) {
/* riscv_flush_icache_syscall is a no-op in QEMU as
self-modifying code is automatically detected */
ret = 0;
@@ -94,7 +94,7 @@ void cpu_loop(CPURISCVState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
CPUState *cpu = env_cpu(env);
TaskState *ts = get_task_state(cpu);
diff --git a/linux-user/riscv/meson.build b/linux-user/riscv/meson.build
index beb989a..b2e7df0 100644
--- a/linux-user/riscv/meson.build
+++ b/linux-user/riscv/meson.build
@@ -5,3 +5,9 @@ vdso_64_inc = gen_vdso.process('vdso-64.so',
linux_user_ss.add(when: 'TARGET_RISCV32', if_true: vdso_32_inc)
linux_user_ss.add(when: 'TARGET_RISCV64', if_true: vdso_64_inc)
+
+syscall_nr_generators += {
+ 'riscv': generator(sh,
+ arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ],
+ output: '@BASENAME@_nr.h')
+}
diff --git a/linux-user/riscv/syscall.tbl b/linux-user/riscv/syscall.tbl
new file mode 100644
index 0000000..845e24e
--- /dev/null
+++ b/linux-user/riscv/syscall.tbl
@@ -0,0 +1,405 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# This file contains the system call numbers for all of the
+# more recently added architectures.
+#
+# As a basic principle, no duplication of functionality
+# should be added, e.g. we don't use lseek when llseek
+# is present. New architectures should use this file
+# and implement the less feature-full calls in user space.
+#
+0 common io_setup sys_io_setup compat_sys_io_setup
+1 common io_destroy sys_io_destroy
+2 common io_submit sys_io_submit compat_sys_io_submit
+3 common io_cancel sys_io_cancel
+4 time32 io_getevents sys_io_getevents_time32
+4 64 io_getevents sys_io_getevents
+5 common setxattr sys_setxattr
+6 common lsetxattr sys_lsetxattr
+7 common fsetxattr sys_fsetxattr
+8 common getxattr sys_getxattr
+9 common lgetxattr sys_lgetxattr
+10 common fgetxattr sys_fgetxattr
+11 common listxattr sys_listxattr
+12 common llistxattr sys_llistxattr
+13 common flistxattr sys_flistxattr
+14 common removexattr sys_removexattr
+15 common lremovexattr sys_lremovexattr
+16 common fremovexattr sys_fremovexattr
+17 common getcwd sys_getcwd
+18 common lookup_dcookie sys_ni_syscall
+19 common eventfd2 sys_eventfd2
+20 common epoll_create1 sys_epoll_create1
+21 common epoll_ctl sys_epoll_ctl
+22 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+23 common dup sys_dup
+24 common dup3 sys_dup3
+25 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+25 64 fcntl sys_fcntl
+26 common inotify_init1 sys_inotify_init1
+27 common inotify_add_watch sys_inotify_add_watch
+28 common inotify_rm_watch sys_inotify_rm_watch
+29 common ioctl sys_ioctl compat_sys_ioctl
+30 common ioprio_set sys_ioprio_set
+31 common ioprio_get sys_ioprio_get
+32 common flock sys_flock
+33 common mknodat sys_mknodat
+34 common mkdirat sys_mkdirat
+35 common unlinkat sys_unlinkat
+36 common symlinkat sys_symlinkat
+37 common linkat sys_linkat
+# renameat is superseded with flags by renameat2
+38 renameat renameat sys_renameat
+39 common umount2 sys_umount
+40 common mount sys_mount
+41 common pivot_root sys_pivot_root
+42 common nfsservctl sys_ni_syscall
+43 32 statfs64 sys_statfs64 compat_sys_statfs64
+43 64 statfs sys_statfs
+44 32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+44 64 fstatfs sys_fstatfs
+45 32 truncate64 sys_truncate64 compat_sys_truncate64
+45 64 truncate sys_truncate
+46 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+46 64 ftruncate sys_ftruncate
+47 common fallocate sys_fallocate compat_sys_fallocate
+48 common faccessat sys_faccessat
+49 common chdir sys_chdir
+50 common fchdir sys_fchdir
+51 common chroot sys_chroot
+52 common fchmod sys_fchmod
+53 common fchmodat sys_fchmodat
+54 common fchownat sys_fchownat
+55 common fchown sys_fchown
+56 common openat sys_openat
+57 common close sys_close
+58 common vhangup sys_vhangup
+59 common pipe2 sys_pipe2
+60 common quotactl sys_quotactl
+61 common getdents64 sys_getdents64
+62 32 llseek sys_llseek
+62 64 lseek sys_lseek
+63 common read sys_read
+64 common write sys_write
+65 common readv sys_readv sys_readv
+66 common writev sys_writev sys_writev
+67 common pread64 sys_pread64 compat_sys_pread64
+68 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+69 common preadv sys_preadv compat_sys_preadv
+70 common pwritev sys_pwritev compat_sys_pwritev
+71 32 sendfile64 sys_sendfile64
+71 64 sendfile sys_sendfile64
+72 time32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+72 64 pselect6 sys_pselect6
+73 time32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+73 64 ppoll sys_ppoll
+74 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+75 common vmsplice sys_vmsplice
+76 common splice sys_splice
+77 common tee sys_tee
+78 common readlinkat sys_readlinkat
+79 stat64 fstatat64 sys_fstatat64
+79 64 newfstatat sys_newfstatat
+80 stat64 fstat64 sys_fstat64
+80 64 fstat sys_newfstat
+81 common sync sys_sync
+82 common fsync sys_fsync
+83 common fdatasync sys_fdatasync
+84 common sync_file_range sys_sync_file_range compat_sys_sync_file_range
+85 common timerfd_create sys_timerfd_create
+86 time32 timerfd_settime sys_timerfd_settime32
+86 64 timerfd_settime sys_timerfd_settime
+87 time32 timerfd_gettime sys_timerfd_gettime32
+87 64 timerfd_gettime sys_timerfd_gettime
+88 time32 utimensat sys_utimensat_time32
+88 64 utimensat sys_utimensat
+89 common acct sys_acct
+90 common capget sys_capget
+91 common capset sys_capset
+92 common personality sys_personality
+93 common exit sys_exit
+94 common exit_group sys_exit_group
+95 common waitid sys_waitid compat_sys_waitid
+96 common set_tid_address sys_set_tid_address
+97 common unshare sys_unshare
+98 time32 futex sys_futex_time32
+98 64 futex sys_futex
+99 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+100 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+101 time32 nanosleep sys_nanosleep_time32
+101 64 nanosleep sys_nanosleep
+102 common getitimer sys_getitimer compat_sys_getitimer
+103 common setitimer sys_setitimer compat_sys_setitimer
+104 common kexec_load sys_kexec_load compat_sys_kexec_load
+105 common init_module sys_init_module
+106 common delete_module sys_delete_module
+107 common timer_create sys_timer_create compat_sys_timer_create
+108 time32 timer_gettime sys_timer_gettime32
+108 64 timer_gettime sys_timer_gettime
+109 common timer_getoverrun sys_timer_getoverrun
+110 time32 timer_settime sys_timer_settime32
+110 64 timer_settime sys_timer_settime
+111 common timer_delete sys_timer_delete
+112 time32 clock_settime sys_clock_settime32
+112 64 clock_settime sys_clock_settime
+113 time32 clock_gettime sys_clock_gettime32
+113 64 clock_gettime sys_clock_gettime
+114 time32 clock_getres sys_clock_getres_time32
+114 64 clock_getres sys_clock_getres
+115 time32 clock_nanosleep sys_clock_nanosleep_time32
+115 64 clock_nanosleep sys_clock_nanosleep
+116 common syslog sys_syslog
+117 common ptrace sys_ptrace compat_sys_ptrace
+118 common sched_setparam sys_sched_setparam
+119 common sched_setscheduler sys_sched_setscheduler
+120 common sched_getscheduler sys_sched_getscheduler
+121 common sched_getparam sys_sched_getparam
+122 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+123 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+124 common sched_yield sys_sched_yield
+125 common sched_get_priority_max sys_sched_get_priority_max
+126 common sched_get_priority_min sys_sched_get_priority_min
+127 time32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+127 64 sched_rr_get_interval sys_sched_rr_get_interval
+128 common restart_syscall sys_restart_syscall
+129 common kill sys_kill
+130 common tkill sys_tkill
+131 common tgkill sys_tgkill
+132 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+133 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+134 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+135 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+136 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+137 time32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+137 64 rt_sigtimedwait sys_rt_sigtimedwait
+138 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+139 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
+140 common setpriority sys_setpriority
+141 common getpriority sys_getpriority
+142 common reboot sys_reboot
+143 common setregid sys_setregid
+144 common setgid sys_setgid
+145 common setreuid sys_setreuid
+146 common setuid sys_setuid
+147 common setresuid sys_setresuid
+148 common getresuid sys_getresuid
+149 common setresgid sys_setresgid
+150 common getresgid sys_getresgid
+151 common setfsuid sys_setfsuid
+152 common setfsgid sys_setfsgid
+153 common times sys_times compat_sys_times
+154 common setpgid sys_setpgid
+155 common getpgid sys_getpgid
+156 common getsid sys_getsid
+157 common setsid sys_setsid
+158 common getgroups sys_getgroups
+159 common setgroups sys_setgroups
+160 common uname sys_newuname
+161 common sethostname sys_sethostname
+162 common setdomainname sys_setdomainname
+# getrlimit and setrlimit are superseded with prlimit64
+163 rlimit getrlimit sys_getrlimit compat_sys_getrlimit
+164 rlimit setrlimit sys_setrlimit compat_sys_setrlimit
+165 common getrusage sys_getrusage compat_sys_getrusage
+166 common umask sys_umask
+167 common prctl sys_prctl
+168 common getcpu sys_getcpu
+169 time32 gettimeofday sys_gettimeofday compat_sys_gettimeofday
+169 64 gettimeofday sys_gettimeofday
+170 time32 settimeofday sys_settimeofday compat_sys_settimeofday
+170 64 settimeofday sys_settimeofday
+171 time32 adjtimex sys_adjtimex_time32
+171 64 adjtimex sys_adjtimex
+172 common getpid sys_getpid
+173 common getppid sys_getppid
+174 common getuid sys_getuid
+175 common geteuid sys_geteuid
+176 common getgid sys_getgid
+177 common getegid sys_getegid
+178 common gettid sys_gettid
+179 common sysinfo sys_sysinfo compat_sys_sysinfo
+180 common mq_open sys_mq_open compat_sys_mq_open
+181 common mq_unlink sys_mq_unlink
+182 time32 mq_timedsend sys_mq_timedsend_time32
+182 64 mq_timedsend sys_mq_timedsend
+183 time32 mq_timedreceive sys_mq_timedreceive_time32
+183 64 mq_timedreceive sys_mq_timedreceive
+184 common mq_notify sys_mq_notify compat_sys_mq_notify
+185 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+186 common msgget sys_msgget
+187 common msgctl sys_msgctl compat_sys_msgctl
+188 common msgrcv sys_msgrcv compat_sys_msgrcv
+189 common msgsnd sys_msgsnd compat_sys_msgsnd
+190 common semget sys_semget
+191 common semctl sys_semctl compat_sys_semctl
+192 time32 semtimedop sys_semtimedop_time32
+192 64 semtimedop sys_semtimedop
+193 common semop sys_semop
+194 common shmget sys_shmget
+195 common shmctl sys_shmctl compat_sys_shmctl
+196 common shmat sys_shmat compat_sys_shmat
+197 common shmdt sys_shmdt
+198 common socket sys_socket
+199 common socketpair sys_socketpair
+200 common bind sys_bind
+201 common listen sys_listen
+202 common accept sys_accept
+203 common connect sys_connect
+204 common getsockname sys_getsockname
+205 common getpeername sys_getpeername
+206 common sendto sys_sendto
+207 common recvfrom sys_recvfrom compat_sys_recvfrom
+208 common setsockopt sys_setsockopt sys_setsockopt
+209 common getsockopt sys_getsockopt sys_getsockopt
+210 common shutdown sys_shutdown
+211 common sendmsg sys_sendmsg compat_sys_sendmsg
+212 common recvmsg sys_recvmsg compat_sys_recvmsg
+213 common readahead sys_readahead compat_sys_readahead
+214 common brk sys_brk
+215 common munmap sys_munmap
+216 common mremap sys_mremap
+217 common add_key sys_add_key
+218 common request_key sys_request_key
+219 common keyctl sys_keyctl compat_sys_keyctl
+220 common clone sys_clone
+221 common execve sys_execve compat_sys_execve
+222 32 mmap2 sys_mmap2
+222 64 mmap sys_mmap
+223 32 fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64
+223 64 fadvise64 sys_fadvise64_64
+224 common swapon sys_swapon
+225 common swapoff sys_swapoff
+226 common mprotect sys_mprotect
+227 common msync sys_msync
+228 common mlock sys_mlock
+229 common munlock sys_munlock
+230 common mlockall sys_mlockall
+231 common munlockall sys_munlockall
+232 common mincore sys_mincore
+233 common madvise sys_madvise
+234 common remap_file_pages sys_remap_file_pages
+235 common mbind sys_mbind
+236 common get_mempolicy sys_get_mempolicy
+237 common set_mempolicy sys_set_mempolicy
+238 common migrate_pages sys_migrate_pages
+239 common move_pages sys_move_pages
+240 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+241 common perf_event_open sys_perf_event_open
+242 common accept4 sys_accept4
+243 time32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+243 64 recvmmsg sys_recvmmsg
+# Architectures may provide up to 16 syscalls of their own between 244 and 259
+244 arc cacheflush sys_cacheflush
+245 arc arc_settls sys_arc_settls
+246 arc arc_gettls sys_arc_gettls
+247 arc sysfs sys_sysfs
+248 arc arc_usr_cmpxchg sys_arc_usr_cmpxchg
+
+244 csky set_thread_area sys_set_thread_area
+245 csky cacheflush sys_cacheflush
+
+244 nios2 cacheflush sys_cacheflush
+
+244 or1k or1k_atomic sys_or1k_atomic
+
+258 riscv riscv_hwprobe sys_riscv_hwprobe
+259 riscv riscv_flush_icache sys_riscv_flush_icache
+
+260 time32 wait4 sys_wait4 compat_sys_wait4
+260 64 wait4 sys_wait4
+261 common prlimit64 sys_prlimit64
+262 common fanotify_init sys_fanotify_init
+263 common fanotify_mark sys_fanotify_mark
+264 common name_to_handle_at sys_name_to_handle_at
+265 common open_by_handle_at sys_open_by_handle_at
+266 time32 clock_adjtime sys_clock_adjtime32
+266 64 clock_adjtime sys_clock_adjtime
+267 common syncfs sys_syncfs
+268 common setns sys_setns
+269 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+270 common process_vm_readv sys_process_vm_readv
+271 common process_vm_writev sys_process_vm_writev
+272 common kcmp sys_kcmp
+273 common finit_module sys_finit_module
+274 common sched_setattr sys_sched_setattr
+275 common sched_getattr sys_sched_getattr
+276 common renameat2 sys_renameat2
+277 common seccomp sys_seccomp
+278 common getrandom sys_getrandom
+279 common memfd_create sys_memfd_create
+280 common bpf sys_bpf
+281 common execveat sys_execveat compat_sys_execveat
+282 common userfaultfd sys_userfaultfd
+283 common membarrier sys_membarrier
+284 common mlock2 sys_mlock2
+285 common copy_file_range sys_copy_file_range
+286 common preadv2 sys_preadv2 compat_sys_preadv2
+287 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+288 common pkey_mprotect sys_pkey_mprotect
+289 common pkey_alloc sys_pkey_alloc
+290 common pkey_free sys_pkey_free
+291 common statx sys_statx
+292 time32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+292 64 io_pgetevents sys_io_pgetevents
+293 common rseq sys_rseq
+294 common kexec_file_load sys_kexec_file_load
+# 295 through 402 are unassigned to sync up with generic numbers don't use
+403 32 clock_gettime64 sys_clock_gettime
+404 32 clock_settime64 sys_clock_settime
+405 32 clock_adjtime64 sys_clock_adjtime
+406 32 clock_getres_time64 sys_clock_getres
+407 32 clock_nanosleep_time64 sys_clock_nanosleep
+408 32 timer_gettime64 sys_timer_gettime
+409 32 timer_settime64 sys_timer_settime
+410 32 timerfd_gettime64 sys_timerfd_gettime
+411 32 timerfd_settime64 sys_timerfd_settime
+412 32 utimensat_time64 sys_utimensat
+413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 sys_mq_timedsend
+419 32 mq_timedreceive_time64 sys_mq_timedreceive
+420 32 semtimedop_time64 sys_semtimedop
+421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 sys_futex
+423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+447 memfd_secret memfd_secret sys_memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/linux-user/riscv/syscall32_nr.h b/linux-user/riscv/syscall32_nr.h
deleted file mode 100644
index 412e58e..0000000
--- a/linux-user/riscv/syscall32_nr.h
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * This file contains the system call numbers.
- * Do not modify.
- * This file is generated by scripts/gensyscalls.sh
- */
-#ifndef LINUX_USER_RISCV_SYSCALL32_NR_H
-#define LINUX_USER_RISCV_SYSCALL32_NR_H
-
-#define TARGET_NR_io_setup 0
-#define TARGET_NR_io_destroy 1
-#define TARGET_NR_io_submit 2
-#define TARGET_NR_io_cancel 3
-#define TARGET_NR_setxattr 5
-#define TARGET_NR_lsetxattr 6
-#define TARGET_NR_fsetxattr 7
-#define TARGET_NR_getxattr 8
-#define TARGET_NR_lgetxattr 9
-#define TARGET_NR_fgetxattr 10
-#define TARGET_NR_listxattr 11
-#define TARGET_NR_llistxattr 12
-#define TARGET_NR_flistxattr 13
-#define TARGET_NR_removexattr 14
-#define TARGET_NR_lremovexattr 15
-#define TARGET_NR_fremovexattr 16
-#define TARGET_NR_getcwd 17
-#define TARGET_NR_lookup_dcookie 18
-#define TARGET_NR_eventfd2 19
-#define TARGET_NR_epoll_create1 20
-#define TARGET_NR_epoll_ctl 21
-#define TARGET_NR_epoll_pwait 22
-#define TARGET_NR_dup 23
-#define TARGET_NR_dup3 24
-#define TARGET_NR_fcntl64 25
-#define TARGET_NR_inotify_init1 26
-#define TARGET_NR_inotify_add_watch 27
-#define TARGET_NR_inotify_rm_watch 28
-#define TARGET_NR_ioctl 29
-#define TARGET_NR_ioprio_set 30
-#define TARGET_NR_ioprio_get 31
-#define TARGET_NR_flock 32
-#define TARGET_NR_mknodat 33
-#define TARGET_NR_mkdirat 34
-#define TARGET_NR_unlinkat 35
-#define TARGET_NR_symlinkat 36
-#define TARGET_NR_linkat 37
-#define TARGET_NR_umount2 39
-#define TARGET_NR_mount 40
-#define TARGET_NR_pivot_root 41
-#define TARGET_NR_nfsservctl 42
-#define TARGET_NR_statfs64 43
-#define TARGET_NR_fstatfs64 44
-#define TARGET_NR_truncate64 45
-#define TARGET_NR_ftruncate64 46
-#define TARGET_NR_fallocate 47
-#define TARGET_NR_faccessat 48
-#define TARGET_NR_chdir 49
-#define TARGET_NR_fchdir 50
-#define TARGET_NR_chroot 51
-#define TARGET_NR_fchmod 52
-#define TARGET_NR_fchmodat 53
-#define TARGET_NR_fchownat 54
-#define TARGET_NR_fchown 55
-#define TARGET_NR_openat 56
-#define TARGET_NR_close 57
-#define TARGET_NR_vhangup 58
-#define TARGET_NR_pipe2 59
-#define TARGET_NR_quotactl 60
-#define TARGET_NR_getdents64 61
-#define TARGET_NR_llseek 62
-#define TARGET_NR_read 63
-#define TARGET_NR_write 64
-#define TARGET_NR_readv 65
-#define TARGET_NR_writev 66
-#define TARGET_NR_pread64 67
-#define TARGET_NR_pwrite64 68
-#define TARGET_NR_preadv 69
-#define TARGET_NR_pwritev 70
-#define TARGET_NR_sendfile64 71
-#define TARGET_NR_signalfd4 74
-#define TARGET_NR_vmsplice 75
-#define TARGET_NR_splice 76
-#define TARGET_NR_tee 77
-#define TARGET_NR_readlinkat 78
-#define TARGET_NR_fstatat64 79
-#define TARGET_NR_fstat64 80
-#define TARGET_NR_sync 81
-#define TARGET_NR_fsync 82
-#define TARGET_NR_fdatasync 83
-#define TARGET_NR_sync_file_range 84
-#define TARGET_NR_timerfd_create 85
-#define TARGET_NR_acct 89
-#define TARGET_NR_capget 90
-#define TARGET_NR_capset 91
-#define TARGET_NR_personality 92
-#define TARGET_NR_exit 93
-#define TARGET_NR_exit_group 94
-#define TARGET_NR_waitid 95
-#define TARGET_NR_set_tid_address 96
-#define TARGET_NR_unshare 97
-#define TARGET_NR_set_robust_list 99
-#define TARGET_NR_get_robust_list 100
-#define TARGET_NR_getitimer 102
-#define TARGET_NR_setitimer 103
-#define TARGET_NR_kexec_load 104
-#define TARGET_NR_init_module 105
-#define TARGET_NR_delete_module 106
-#define TARGET_NR_timer_create 107
-#define TARGET_NR_timer_getoverrun 109
-#define TARGET_NR_timer_delete 111
-#define TARGET_NR_syslog 116
-#define TARGET_NR_ptrace 117
-#define TARGET_NR_sched_setparam 118
-#define TARGET_NR_sched_setscheduler 119
-#define TARGET_NR_sched_getscheduler 120
-#define TARGET_NR_sched_getparam 121
-#define TARGET_NR_sched_setaffinity 122
-#define TARGET_NR_sched_getaffinity 123
-#define TARGET_NR_sched_yield 124
-#define TARGET_NR_sched_get_priority_max 125
-#define TARGET_NR_sched_get_priority_min 126
-#define TARGET_NR_restart_syscall 128
-#define TARGET_NR_kill 129
-#define TARGET_NR_tkill 130
-#define TARGET_NR_tgkill 131
-#define TARGET_NR_sigaltstack 132
-#define TARGET_NR_rt_sigsuspend 133
-#define TARGET_NR_rt_sigaction 134
-#define TARGET_NR_rt_sigprocmask 135
-#define TARGET_NR_rt_sigpending 136
-#define TARGET_NR_rt_sigqueueinfo 138
-#define TARGET_NR_rt_sigreturn 139
-#define TARGET_NR_setpriority 140
-#define TARGET_NR_getpriority 141
-#define TARGET_NR_reboot 142
-#define TARGET_NR_setregid 143
-#define TARGET_NR_setgid 144
-#define TARGET_NR_setreuid 145
-#define TARGET_NR_setuid 146
-#define TARGET_NR_setresuid 147
-#define TARGET_NR_getresuid 148
-#define TARGET_NR_setresgid 149
-#define TARGET_NR_getresgid 150
-#define TARGET_NR_setfsuid 151
-#define TARGET_NR_setfsgid 152
-#define TARGET_NR_times 153
-#define TARGET_NR_setpgid 154
-#define TARGET_NR_getpgid 155
-#define TARGET_NR_getsid 156
-#define TARGET_NR_setsid 157
-#define TARGET_NR_getgroups 158
-#define TARGET_NR_setgroups 159
-#define TARGET_NR_uname 160
-#define TARGET_NR_sethostname 161
-#define TARGET_NR_setdomainname 162
-#define TARGET_NR_getrlimit 163
-#define TARGET_NR_setrlimit 164
-#define TARGET_NR_getrusage 165
-#define TARGET_NR_umask 166
-#define TARGET_NR_prctl 167
-#define TARGET_NR_getcpu 168
-#define TARGET_NR_getpid 172
-#define TARGET_NR_getppid 173
-#define TARGET_NR_getuid 174
-#define TARGET_NR_geteuid 175
-#define TARGET_NR_getgid 176
-#define TARGET_NR_getegid 177
-#define TARGET_NR_gettid 178
-#define TARGET_NR_sysinfo 179
-#define TARGET_NR_mq_open 180
-#define TARGET_NR_mq_unlink 181
-#define TARGET_NR_mq_notify 184
-#define TARGET_NR_mq_getsetattr 185
-#define TARGET_NR_msgget 186
-#define TARGET_NR_msgctl 187
-#define TARGET_NR_msgrcv 188
-#define TARGET_NR_msgsnd 189
-#define TARGET_NR_semget 190
-#define TARGET_NR_semctl 191
-#define TARGET_NR_semop 193
-#define TARGET_NR_shmget 194
-#define TARGET_NR_shmctl 195
-#define TARGET_NR_shmat 196
-#define TARGET_NR_shmdt 197
-#define TARGET_NR_socket 198
-#define TARGET_NR_socketpair 199
-#define TARGET_NR_bind 200
-#define TARGET_NR_listen 201
-#define TARGET_NR_accept 202
-#define TARGET_NR_connect 203
-#define TARGET_NR_getsockname 204
-#define TARGET_NR_getpeername 205
-#define TARGET_NR_sendto 206
-#define TARGET_NR_recvfrom 207
-#define TARGET_NR_setsockopt 208
-#define TARGET_NR_getsockopt 209
-#define TARGET_NR_shutdown 210
-#define TARGET_NR_sendmsg 211
-#define TARGET_NR_recvmsg 212
-#define TARGET_NR_readahead 213
-#define TARGET_NR_brk 214
-#define TARGET_NR_munmap 215
-#define TARGET_NR_mremap 216
-#define TARGET_NR_add_key 217
-#define TARGET_NR_request_key 218
-#define TARGET_NR_keyctl 219
-#define TARGET_NR_clone 220
-#define TARGET_NR_execve 221
-#define TARGET_NR_mmap2 222
-#define TARGET_NR_fadvise64_64 223
-#define TARGET_NR_swapon 224
-#define TARGET_NR_swapoff 225
-#define TARGET_NR_mprotect 226
-#define TARGET_NR_msync 227
-#define TARGET_NR_mlock 228
-#define TARGET_NR_munlock 229
-#define TARGET_NR_mlockall 230
-#define TARGET_NR_munlockall 231
-#define TARGET_NR_mincore 232
-#define TARGET_NR_madvise 233
-#define TARGET_NR_remap_file_pages 234
-#define TARGET_NR_mbind 235
-#define TARGET_NR_get_mempolicy 236
-#define TARGET_NR_set_mempolicy 237
-#define TARGET_NR_migrate_pages 238
-#define TARGET_NR_move_pages 239
-#define TARGET_NR_rt_tgsigqueueinfo 240
-#define TARGET_NR_perf_event_open 241
-#define TARGET_NR_accept4 242
-#define TARGET_NR_arch_specific_syscall 244
-#define TARGET_NR_riscv_flush_icache (TARGET_NR_arch_specific_syscall + 15)
-#define TARGET_NR_riscv_hwprobe (TARGET_NR_arch_specific_syscall + 14)
-#define TARGET_NR_prlimit64 261
-#define TARGET_NR_fanotify_init 262
-#define TARGET_NR_fanotify_mark 263
-#define TARGET_NR_name_to_handle_at 264
-#define TARGET_NR_open_by_handle_at 265
-#define TARGET_NR_syncfs 267
-#define TARGET_NR_setns 268
-#define TARGET_NR_sendmmsg 269
-#define TARGET_NR_process_vm_readv 270
-#define TARGET_NR_process_vm_writev 271
-#define TARGET_NR_kcmp 272
-#define TARGET_NR_finit_module 273
-#define TARGET_NR_sched_setattr 274
-#define TARGET_NR_sched_getattr 275
-#define TARGET_NR_renameat2 276
-#define TARGET_NR_seccomp 277
-#define TARGET_NR_getrandom 278
-#define TARGET_NR_memfd_create 279
-#define TARGET_NR_bpf 280
-#define TARGET_NR_execveat 281
-#define TARGET_NR_userfaultfd 282
-#define TARGET_NR_membarrier 283
-#define TARGET_NR_mlock2 284
-#define TARGET_NR_copy_file_range 285
-#define TARGET_NR_preadv2 286
-#define TARGET_NR_pwritev2 287
-#define TARGET_NR_pkey_mprotect 288
-#define TARGET_NR_pkey_alloc 289
-#define TARGET_NR_pkey_free 290
-#define TARGET_NR_statx 291
-#define TARGET_NR_rseq 293
-#define TARGET_NR_kexec_file_load 294
-#define TARGET_NR_clock_gettime64 403
-#define TARGET_NR_clock_settime64 404
-#define TARGET_NR_clock_adjtime64 405
-#define TARGET_NR_clock_getres_time64 406
-#define TARGET_NR_clock_nanosleep_time64 407
-#define TARGET_NR_timer_gettime64 408
-#define TARGET_NR_timer_settime64 409
-#define TARGET_NR_timerfd_gettime64 410
-#define TARGET_NR_timerfd_settime64 411
-#define TARGET_NR_utimensat_time64 412
-#define TARGET_NR_pselect6_time64 413
-#define TARGET_NR_ppoll_time64 414
-#define TARGET_NR_io_pgetevents_time64 416
-#define TARGET_NR_recvmmsg_time64 417
-#define TARGET_NR_mq_timedsend_time64 418
-#define TARGET_NR_mq_timedreceive_time64 419
-#define TARGET_NR_semtimedop_time64 420
-#define TARGET_NR_rt_sigtimedwait_time64 421
-#define TARGET_NR_futex_time64 422
-#define TARGET_NR_sched_rr_get_interval_time64 423
-#define TARGET_NR_pidfd_send_signal 424
-#define TARGET_NR_io_uring_setup 425
-#define TARGET_NR_io_uring_enter 426
-#define TARGET_NR_io_uring_register 427
-#define TARGET_NR_open_tree 428
-#define TARGET_NR_move_mount 429
-#define TARGET_NR_fsopen 430
-#define TARGET_NR_fsconfig 431
-#define TARGET_NR_fsmount 432
-#define TARGET_NR_fspick 433
-#define TARGET_NR_pidfd_open 434
-#define TARGET_NR_clone3 435
-#define TARGET_NR_close_range 436
-#define TARGET_NR_openat2 437
-#define TARGET_NR_pidfd_getfd 438
-#define TARGET_NR_faccessat2 439
-#define TARGET_NR_process_madvise 440
-#define TARGET_NR_epoll_pwait2 441
-#define TARGET_NR_mount_setattr 442
-#define TARGET_NR_landlock_create_ruleset 444
-#define TARGET_NR_landlock_add_rule 445
-#define TARGET_NR_landlock_restrict_self 446
-#define TARGET_NR_syscalls 447
-
-#endif /* LINUX_USER_RISCV_SYSCALL32_NR_H */
diff --git a/linux-user/riscv/syscall64_nr.h b/linux-user/riscv/syscall64_nr.h
deleted file mode 100644
index 29e1eb2..0000000
--- a/linux-user/riscv/syscall64_nr.h
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * This file contains the system call numbers.
- * Do not modify.
- * This file is generated by scripts/gensyscalls.sh
- */
-#ifndef LINUX_USER_RISCV_SYSCALL64_NR_H
-#define LINUX_USER_RISCV_SYSCALL64_NR_H
-
-#define TARGET_NR_io_setup 0
-#define TARGET_NR_io_destroy 1
-#define TARGET_NR_io_submit 2
-#define TARGET_NR_io_cancel 3
-#define TARGET_NR_io_getevents 4
-#define TARGET_NR_setxattr 5
-#define TARGET_NR_lsetxattr 6
-#define TARGET_NR_fsetxattr 7
-#define TARGET_NR_getxattr 8
-#define TARGET_NR_lgetxattr 9
-#define TARGET_NR_fgetxattr 10
-#define TARGET_NR_listxattr 11
-#define TARGET_NR_llistxattr 12
-#define TARGET_NR_flistxattr 13
-#define TARGET_NR_removexattr 14
-#define TARGET_NR_lremovexattr 15
-#define TARGET_NR_fremovexattr 16
-#define TARGET_NR_getcwd 17
-#define TARGET_NR_lookup_dcookie 18
-#define TARGET_NR_eventfd2 19
-#define TARGET_NR_epoll_create1 20
-#define TARGET_NR_epoll_ctl 21
-#define TARGET_NR_epoll_pwait 22
-#define TARGET_NR_dup 23
-#define TARGET_NR_dup3 24
-#define TARGET_NR_fcntl 25
-#define TARGET_NR_inotify_init1 26
-#define TARGET_NR_inotify_add_watch 27
-#define TARGET_NR_inotify_rm_watch 28
-#define TARGET_NR_ioctl 29
-#define TARGET_NR_ioprio_set 30
-#define TARGET_NR_ioprio_get 31
-#define TARGET_NR_flock 32
-#define TARGET_NR_mknodat 33
-#define TARGET_NR_mkdirat 34
-#define TARGET_NR_unlinkat 35
-#define TARGET_NR_symlinkat 36
-#define TARGET_NR_linkat 37
-#define TARGET_NR_umount2 39
-#define TARGET_NR_mount 40
-#define TARGET_NR_pivot_root 41
-#define TARGET_NR_nfsservctl 42
-#define TARGET_NR_statfs 43
-#define TARGET_NR_fstatfs 44
-#define TARGET_NR_truncate 45
-#define TARGET_NR_ftruncate 46
-#define TARGET_NR_fallocate 47
-#define TARGET_NR_faccessat 48
-#define TARGET_NR_chdir 49
-#define TARGET_NR_fchdir 50
-#define TARGET_NR_chroot 51
-#define TARGET_NR_fchmod 52
-#define TARGET_NR_fchmodat 53
-#define TARGET_NR_fchownat 54
-#define TARGET_NR_fchown 55
-#define TARGET_NR_openat 56
-#define TARGET_NR_close 57
-#define TARGET_NR_vhangup 58
-#define TARGET_NR_pipe2 59
-#define TARGET_NR_quotactl 60
-#define TARGET_NR_getdents64 61
-#define TARGET_NR_lseek 62
-#define TARGET_NR_read 63
-#define TARGET_NR_write 64
-#define TARGET_NR_readv 65
-#define TARGET_NR_writev 66
-#define TARGET_NR_pread64 67
-#define TARGET_NR_pwrite64 68
-#define TARGET_NR_preadv 69
-#define TARGET_NR_pwritev 70
-#define TARGET_NR_sendfile 71
-#define TARGET_NR_pselect6 72
-#define TARGET_NR_ppoll 73
-#define TARGET_NR_signalfd4 74
-#define TARGET_NR_vmsplice 75
-#define TARGET_NR_splice 76
-#define TARGET_NR_tee 77
-#define TARGET_NR_readlinkat 78
-#define TARGET_NR_newfstatat 79
-#define TARGET_NR_fstat 80
-#define TARGET_NR_sync 81
-#define TARGET_NR_fsync 82
-#define TARGET_NR_fdatasync 83
-#define TARGET_NR_sync_file_range 84
-#define TARGET_NR_timerfd_create 85
-#define TARGET_NR_timerfd_settime 86
-#define TARGET_NR_timerfd_gettime 87
-#define TARGET_NR_utimensat 88
-#define TARGET_NR_acct 89
-#define TARGET_NR_capget 90
-#define TARGET_NR_capset 91
-#define TARGET_NR_personality 92
-#define TARGET_NR_exit 93
-#define TARGET_NR_exit_group 94
-#define TARGET_NR_waitid 95
-#define TARGET_NR_set_tid_address 96
-#define TARGET_NR_unshare 97
-#define TARGET_NR_futex 98
-#define TARGET_NR_set_robust_list 99
-#define TARGET_NR_get_robust_list 100
-#define TARGET_NR_nanosleep 101
-#define TARGET_NR_getitimer 102
-#define TARGET_NR_setitimer 103
-#define TARGET_NR_kexec_load 104
-#define TARGET_NR_init_module 105
-#define TARGET_NR_delete_module 106
-#define TARGET_NR_timer_create 107
-#define TARGET_NR_timer_gettime 108
-#define TARGET_NR_timer_getoverrun 109
-#define TARGET_NR_timer_settime 110
-#define TARGET_NR_timer_delete 111
-#define TARGET_NR_clock_settime 112
-#define TARGET_NR_clock_gettime 113
-#define TARGET_NR_clock_getres 114
-#define TARGET_NR_clock_nanosleep 115
-#define TARGET_NR_syslog 116
-#define TARGET_NR_ptrace 117
-#define TARGET_NR_sched_setparam 118
-#define TARGET_NR_sched_setscheduler 119
-#define TARGET_NR_sched_getscheduler 120
-#define TARGET_NR_sched_getparam 121
-#define TARGET_NR_sched_setaffinity 122
-#define TARGET_NR_sched_getaffinity 123
-#define TARGET_NR_sched_yield 124
-#define TARGET_NR_sched_get_priority_max 125
-#define TARGET_NR_sched_get_priority_min 126
-#define TARGET_NR_sched_rr_get_interval 127
-#define TARGET_NR_restart_syscall 128
-#define TARGET_NR_kill 129
-#define TARGET_NR_tkill 130
-#define TARGET_NR_tgkill 131
-#define TARGET_NR_sigaltstack 132
-#define TARGET_NR_rt_sigsuspend 133
-#define TARGET_NR_rt_sigaction 134
-#define TARGET_NR_rt_sigprocmask 135
-#define TARGET_NR_rt_sigpending 136
-#define TARGET_NR_rt_sigtimedwait 137
-#define TARGET_NR_rt_sigqueueinfo 138
-#define TARGET_NR_rt_sigreturn 139
-#define TARGET_NR_setpriority 140
-#define TARGET_NR_getpriority 141
-#define TARGET_NR_reboot 142
-#define TARGET_NR_setregid 143
-#define TARGET_NR_setgid 144
-#define TARGET_NR_setreuid 145
-#define TARGET_NR_setuid 146
-#define TARGET_NR_setresuid 147
-#define TARGET_NR_getresuid 148
-#define TARGET_NR_setresgid 149
-#define TARGET_NR_getresgid 150
-#define TARGET_NR_setfsuid 151
-#define TARGET_NR_setfsgid 152
-#define TARGET_NR_times 153
-#define TARGET_NR_setpgid 154
-#define TARGET_NR_getpgid 155
-#define TARGET_NR_getsid 156
-#define TARGET_NR_setsid 157
-#define TARGET_NR_getgroups 158
-#define TARGET_NR_setgroups 159
-#define TARGET_NR_uname 160
-#define TARGET_NR_sethostname 161
-#define TARGET_NR_setdomainname 162
-#define TARGET_NR_getrlimit 163
-#define TARGET_NR_setrlimit 164
-#define TARGET_NR_getrusage 165
-#define TARGET_NR_umask 166
-#define TARGET_NR_prctl 167
-#define TARGET_NR_getcpu 168
-#define TARGET_NR_gettimeofday 169
-#define TARGET_NR_settimeofday 170
-#define TARGET_NR_adjtimex 171
-#define TARGET_NR_getpid 172
-#define TARGET_NR_getppid 173
-#define TARGET_NR_getuid 174
-#define TARGET_NR_geteuid 175
-#define TARGET_NR_getgid 176
-#define TARGET_NR_getegid 177
-#define TARGET_NR_gettid 178
-#define TARGET_NR_sysinfo 179
-#define TARGET_NR_mq_open 180
-#define TARGET_NR_mq_unlink 181
-#define TARGET_NR_mq_timedsend 182
-#define TARGET_NR_mq_timedreceive 183
-#define TARGET_NR_mq_notify 184
-#define TARGET_NR_mq_getsetattr 185
-#define TARGET_NR_msgget 186
-#define TARGET_NR_msgctl 187
-#define TARGET_NR_msgrcv 188
-#define TARGET_NR_msgsnd 189
-#define TARGET_NR_semget 190
-#define TARGET_NR_semctl 191
-#define TARGET_NR_semtimedop 192
-#define TARGET_NR_semop 193
-#define TARGET_NR_shmget 194
-#define TARGET_NR_shmctl 195
-#define TARGET_NR_shmat 196
-#define TARGET_NR_shmdt 197
-#define TARGET_NR_socket 198
-#define TARGET_NR_socketpair 199
-#define TARGET_NR_bind 200
-#define TARGET_NR_listen 201
-#define TARGET_NR_accept 202
-#define TARGET_NR_connect 203
-#define TARGET_NR_getsockname 204
-#define TARGET_NR_getpeername 205
-#define TARGET_NR_sendto 206
-#define TARGET_NR_recvfrom 207
-#define TARGET_NR_setsockopt 208
-#define TARGET_NR_getsockopt 209
-#define TARGET_NR_shutdown 210
-#define TARGET_NR_sendmsg 211
-#define TARGET_NR_recvmsg 212
-#define TARGET_NR_readahead 213
-#define TARGET_NR_brk 214
-#define TARGET_NR_munmap 215
-#define TARGET_NR_mremap 216
-#define TARGET_NR_add_key 217
-#define TARGET_NR_request_key 218
-#define TARGET_NR_keyctl 219
-#define TARGET_NR_clone 220
-#define TARGET_NR_execve 221
-#define TARGET_NR_mmap 222
-#define TARGET_NR_fadvise64 223
-#define TARGET_NR_swapon 224
-#define TARGET_NR_swapoff 225
-#define TARGET_NR_mprotect 226
-#define TARGET_NR_msync 227
-#define TARGET_NR_mlock 228
-#define TARGET_NR_munlock 229
-#define TARGET_NR_mlockall 230
-#define TARGET_NR_munlockall 231
-#define TARGET_NR_mincore 232
-#define TARGET_NR_madvise 233
-#define TARGET_NR_remap_file_pages 234
-#define TARGET_NR_mbind 235
-#define TARGET_NR_get_mempolicy 236
-#define TARGET_NR_set_mempolicy 237
-#define TARGET_NR_migrate_pages 238
-#define TARGET_NR_move_pages 239
-#define TARGET_NR_rt_tgsigqueueinfo 240
-#define TARGET_NR_perf_event_open 241
-#define TARGET_NR_accept4 242
-#define TARGET_NR_recvmmsg 243
-#define TARGET_NR_arch_specific_syscall 244
-#define TARGET_NR_riscv_flush_icache (TARGET_NR_arch_specific_syscall + 15)
-#define TARGET_NR_riscv_hwprobe (TARGET_NR_arch_specific_syscall + 14)
-#define TARGET_NR_wait4 260
-#define TARGET_NR_prlimit64 261
-#define TARGET_NR_fanotify_init 262
-#define TARGET_NR_fanotify_mark 263
-#define TARGET_NR_name_to_handle_at 264
-#define TARGET_NR_open_by_handle_at 265
-#define TARGET_NR_clock_adjtime 266
-#define TARGET_NR_syncfs 267
-#define TARGET_NR_setns 268
-#define TARGET_NR_sendmmsg 269
-#define TARGET_NR_process_vm_readv 270
-#define TARGET_NR_process_vm_writev 271
-#define TARGET_NR_kcmp 272
-#define TARGET_NR_finit_module 273
-#define TARGET_NR_sched_setattr 274
-#define TARGET_NR_sched_getattr 275
-#define TARGET_NR_renameat2 276
-#define TARGET_NR_seccomp 277
-#define TARGET_NR_getrandom 278
-#define TARGET_NR_memfd_create 279
-#define TARGET_NR_bpf 280
-#define TARGET_NR_execveat 281
-#define TARGET_NR_userfaultfd 282
-#define TARGET_NR_membarrier 283
-#define TARGET_NR_mlock2 284
-#define TARGET_NR_copy_file_range 285
-#define TARGET_NR_preadv2 286
-#define TARGET_NR_pwritev2 287
-#define TARGET_NR_pkey_mprotect 288
-#define TARGET_NR_pkey_alloc 289
-#define TARGET_NR_pkey_free 290
-#define TARGET_NR_statx 291
-#define TARGET_NR_io_pgetevents 292
-#define TARGET_NR_rseq 293
-#define TARGET_NR_kexec_file_load 294
-#define TARGET_NR_pidfd_send_signal 424
-#define TARGET_NR_io_uring_setup 425
-#define TARGET_NR_io_uring_enter 426
-#define TARGET_NR_io_uring_register 427
-#define TARGET_NR_open_tree 428
-#define TARGET_NR_move_mount 429
-#define TARGET_NR_fsopen 430
-#define TARGET_NR_fsconfig 431
-#define TARGET_NR_fsmount 432
-#define TARGET_NR_fspick 433
-#define TARGET_NR_pidfd_open 434
-#define TARGET_NR_clone3 435
-#define TARGET_NR_close_range 436
-#define TARGET_NR_openat2 437
-#define TARGET_NR_pidfd_getfd 438
-#define TARGET_NR_faccessat2 439
-#define TARGET_NR_process_madvise 440
-#define TARGET_NR_epoll_pwait2 441
-#define TARGET_NR_mount_setattr 442
-#define TARGET_NR_landlock_create_ruleset 444
-#define TARGET_NR_landlock_add_rule 445
-#define TARGET_NR_landlock_restrict_self 446
-#define TARGET_NR_syscalls 447
-
-#endif /* LINUX_USER_RISCV_SYSCALL64_NR_H */
diff --git a/linux-user/riscv/syscall_nr.h b/linux-user/riscv/syscall_nr.h
deleted file mode 100644
index 0a5a2f2..0000000
--- a/linux-user/riscv/syscall_nr.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Syscall numbers from asm-generic, common for most
- * of recently-added arches including RISC-V.
- */
-
-#ifndef LINUX_USER_RISCV_SYSCALL_NR_H
-#define LINUX_USER_RISCV_SYSCALL_NR_H
-
-#ifdef TARGET_RISCV32
-# include "syscall32_nr.h"
-#else
-# include "syscall64_nr.h"
-#endif
-
-#endif
diff --git a/linux-user/riscv/syscallhdr.sh b/linux-user/riscv/syscallhdr.sh
new file mode 100644
index 0000000..4069dc5
--- /dev/null
+++ b/linux-user/riscv/syscallhdr.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=LINUX_USER_X86_64_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ echo "#ifndef ${fileguard}"
+ echo "#define ${fileguard} 1"
+ echo ""
+
+ while read nr abi name entry compat ; do
+ if [ -z "$offset" ]; then
+ echo "#define TARGET_NR_${prefix}${name} $nr"
+ else
+ echo "#define TARGET_NR_${prefix}${name} ($offset + $nr)"
+ fi
+ done
+
+ echo ""
+ echo "#endif /* ${fileguard} */"
+) > "$out"
diff --git a/linux-user/s390x/cpu_loop.c b/linux-user/s390x/cpu_loop.c
index 8b7ac28..c912444 100644
--- a/linux-user/s390x/cpu_loop.c
+++ b/linux-user/s390x/cpu_loop.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
@@ -180,7 +180,7 @@ void cpu_loop(CPUS390XState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
int i;
for (i = 0; i < 16; i++) {
diff --git a/linux-user/s390x/syscall.tbl b/linux-user/s390x/syscall.tbl
index 0690263..8e0d1f1 100644
--- a/linux-user/s390x/syscall.tbl
+++ b/linux-user/s390x/syscall.tbl
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
#
# System call table for s390
#
@@ -100,7 +100,7 @@
106 common stat sys_newstat compat_sys_newstat
107 common lstat sys_newlstat compat_sys_newlstat
108 common fstat sys_newfstat compat_sys_newfstat
-110 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+110 common lookup_dcookie - -
111 common vhangup sys_vhangup sys_vhangup
112 common idle - -
114 common wait4 sys_wait4 compat_sys_wait4
@@ -122,7 +122,7 @@
131 common quotactl sys_quotactl sys_quotactl
132 common getpgid sys_getpgid sys_getpgid
133 common fchdir sys_fchdir sys_fchdir
-134 common bdflush sys_bdflush sys_bdflush
+134 common bdflush sys_ni_syscall sys_ni_syscall
135 common sysfs sys_sysfs sys_sysfs
136 common personality sys_s390_personality sys_s390_personality
137 common afs_syscall - -
@@ -274,9 +274,9 @@
265 common statfs64 sys_statfs64 compat_sys_statfs64
266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
267 common remap_file_pages sys_remap_file_pages sys_remap_file_pages
-268 common mbind sys_mbind compat_sys_mbind
-269 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-270 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+268 common mbind sys_mbind sys_mbind
+269 common get_mempolicy sys_get_mempolicy sys_get_mempolicy
+270 common set_mempolicy sys_set_mempolicy sys_set_mempolicy
271 common mq_open sys_mq_open compat_sys_mq_open
272 common mq_unlink sys_mq_unlink sys_mq_unlink
273 common mq_timedsend sys_mq_timedsend sys_mq_timedsend_time32
@@ -293,7 +293,7 @@
284 common inotify_init sys_inotify_init sys_inotify_init
285 common inotify_add_watch sys_inotify_add_watch sys_inotify_add_watch
286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch
-287 common migrate_pages sys_migrate_pages compat_sys_migrate_pages
+287 common migrate_pages sys_migrate_pages sys_migrate_pages
288 common openat sys_openat compat_sys_openat
289 common mkdirat sys_mkdirat sys_mkdirat
290 common mknodat sys_mknodat sys_mknodat
@@ -317,7 +317,7 @@
307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range
308 common tee sys_tee sys_tee
309 common vmsplice sys_vmsplice sys_vmsplice
-310 common move_pages sys_move_pages compat_sys_move_pages
+310 common move_pages sys_move_pages sys_move_pages
311 common getcpu sys_getcpu sys_getcpu
312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
313 common utimes sys_utimes sys_utimes_time32
@@ -418,7 +418,7 @@
412 32 utimensat_time64 - sys_utimensat
413 32 pselect6_time64 - compat_sys_pselect6_time64
414 32 ppoll_time64 - compat_sys_ppoll_time64
-416 32 io_pgetevents_time64 - sys_io_pgetevents
+416 32 io_pgetevents_time64 - compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 - sys_mq_timedsend
419 32 mq_timedreceive_time64 - sys_mq_timedreceive
@@ -445,7 +445,23 @@
440 common process_madvise sys_process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 common quotactl_fd sys_quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
+447 common memfd_secret sys_memfd_secret sys_memfd_secret
+448 common process_mrelease sys_process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue sys_futex_requeue
+457 common statmount sys_statmount sys_statmount
+458 common listmount sys_listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal sys_mseal
diff --git a/linux-user/s390x/syscallhdr.sh b/linux-user/s390x/syscallhdr.sh
index 85a99c4..ac22d42 100755
--- a/linux-user/s390x/syscallhdr.sh
+++ b/linux-user/s390x/syscallhdr.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
in="$1"
out="$2"
diff --git a/linux-user/s390x/target_signal.h b/linux-user/s390x/target_signal.h
index 41e0e34..738e067 100644
--- a/linux-user/s390x/target_signal.h
+++ b/linux-user/s390x/target_signal.h
@@ -3,6 +3,8 @@
#include "../generic/signal.h"
+#define TARGET_SA_RESTORER 0x04000000
+
#define TARGET_ARCH_HAS_SETUP_FRAME
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
diff --git a/linux-user/sh4/cpu_loop.c b/linux-user/sh4/cpu_loop.c
index c805f9d..ee9eff3 100644
--- a/linux-user/sh4/cpu_loop.c
+++ b/linux-user/sh4/cpu_loop.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
void cpu_loop(CPUSH4State *env)
@@ -81,7 +81,7 @@ void cpu_loop(CPUSH4State *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
int i;
diff --git a/linux-user/sh4/syscall.tbl b/linux-user/sh4/syscall.tbl
index 0b91499..cf4ec04 100644
--- a/linux-user/sh4/syscall.tbl
+++ b/linux-user/sh4/syscall.tbl
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
#
# system call numbers and entry vectors for sh
#
@@ -141,7 +141,7 @@
131 common quotactl sys_quotactl
132 common getpgid sys_getpgid
133 common fchdir sys_fchdir
-134 common bdflush sys_bdflush
+134 common bdflush sys_ni_syscall
135 common sysfs sys_sysfs
136 common personality sys_personality
# 137 was afs_syscall
@@ -260,7 +260,7 @@
250 common fadvise64 sys_fadvise64
# 251 is unused
252 common exit_group sys_exit_group
-253 common lookup_dcookie sys_lookup_dcookie
+253 common lookup_dcookie sys_ni_syscall
254 common epoll_create sys_epoll_create
255 common epoll_ctl sys_epoll_ctl
256 common epoll_wait sys_epoll_wait
@@ -321,7 +321,7 @@
311 common set_robust_list sys_set_robust_list
312 common get_robust_list sys_get_robust_list
313 common splice sys_splice
-314 common sync_file_range sys_sync_file_range
+314 common sync_file_range sys_sh_sync_file_range6
315 common tee sys_tee
316 common vmsplice sys_vmsplice
317 common move_pages sys_move_pages
@@ -395,6 +395,7 @@
385 common pkey_alloc sys_pkey_alloc
386 common pkey_free sys_pkey_free
387 common rseq sys_rseq
+388 common sync_file_range2 sys_sync_file_range2
# room for arch specific syscalls
393 common semget sys_semget
394 common semctl sys_semctl
@@ -445,7 +446,23 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/linux-user/sh4/syscallhdr.sh b/linux-user/sh4/syscallhdr.sh
index 0807905..cb3a5de 100644
--- a/linux-user/sh4/syscallhdr.sh
+++ b/linux-user/sh4/syscallhdr.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
in="$1"
out="$2"
diff --git a/linux-user/sh4/target_signal.h b/linux-user/sh4/target_signal.h
index eee6a1a..0bde417 100644
--- a/linux-user/sh4/target_signal.h
+++ b/linux-user/sh4/target_signal.h
@@ -3,6 +3,8 @@
#include "../generic/signal.h"
+#define TARGET_SA_RESTORER 0x04000000
+
#define TARGET_ARCH_HAS_SETUP_FRAME
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
diff --git a/linux-user/signal-common.h b/linux-user/signal-common.h
index f4cbe61..196d240 100644
--- a/linux-user/signal-common.h
+++ b/linux-user/signal-common.h
@@ -56,12 +56,11 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
target_sigset_t *set, CPUArchState *env);
void process_pending_signals(CPUArchState *cpu_env);
-void signal_init(void);
+void signal_init(const char *rtsig_map);
void queue_signal(CPUArchState *env, int sig, int si_type,
target_siginfo_t *info);
void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info);
void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo);
-int target_to_host_signal(int sig);
int host_to_target_signal(int sig);
long do_sigreturn(CPUArchState *env);
long do_rt_sigreturn(CPUArchState *env);
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 63ac2df..cd0e739 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -18,9 +18,10 @@
*/
#include "qemu/osdep.h"
#include "qemu/bitops.h"
+#include "qemu/cutils.h"
#include "gdbstub/user.h"
#include "exec/page-protection.h"
-#include "hw/core/tcg-cpu-ops.h"
+#include "accel/tcg/cpu-ops.h"
#include <sys/ucontext.h>
#include <sys/resource.h>
@@ -32,7 +33,10 @@
#include "trace.h"
#include "signal-common.h"
#include "host-signal.h"
+#include "user/cpu_loop.h"
+#include "user/page-protection.h"
#include "user/safe-syscall.h"
+#include "user/signal.h"
#include "tcg/tcg.h"
/* target_siginfo_t must fit in gdbstub's siginfo save area. */
@@ -513,20 +517,83 @@ static int core_dump_signal(int sig)
}
}
-static void signal_table_init(void)
+int host_interrupt_signal;
+
+static void signal_table_init(const char *rtsig_map)
{
int hsig, tsig, count;
+ if (rtsig_map) {
+ /*
+ * Map host RT signals to target RT signals according to the
+ * user-provided specification.
+ */
+ const char *s = rtsig_map;
+
+ while (true) {
+ int i;
+
+ if (qemu_strtoi(s, &s, 10, &tsig) || *s++ != ' ') {
+ fprintf(stderr, "Malformed target signal in QEMU_RTSIG_MAP\n");
+ exit(EXIT_FAILURE);
+ }
+ if (qemu_strtoi(s, &s, 10, &hsig) || *s++ != ' ') {
+ fprintf(stderr, "Malformed host signal in QEMU_RTSIG_MAP\n");
+ exit(EXIT_FAILURE);
+ }
+ if (qemu_strtoi(s, &s, 10, &count) || (*s && *s != ',')) {
+ fprintf(stderr, "Malformed signal count in QEMU_RTSIG_MAP\n");
+ exit(EXIT_FAILURE);
+ }
+
+ for (i = 0; i < count; i++, tsig++, hsig++) {
+ if (tsig < TARGET_SIGRTMIN || tsig > TARGET_NSIG) {
+ fprintf(stderr, "%d is not a target rt signal\n", tsig);
+ exit(EXIT_FAILURE);
+ }
+ if (hsig < SIGRTMIN || hsig > SIGRTMAX) {
+ fprintf(stderr, "%d is not a host rt signal\n", hsig);
+ exit(EXIT_FAILURE);
+ }
+ if (host_to_target_signal_table[hsig]) {
+ fprintf(stderr, "%d already maps %d\n",
+ hsig, host_to_target_signal_table[hsig]);
+ exit(EXIT_FAILURE);
+ }
+ host_to_target_signal_table[hsig] = tsig;
+ }
+
+ if (*s) {
+ s++;
+ } else {
+ break;
+ }
+ }
+ } else {
+ /*
+ * Default host-to-target RT signal mapping.
+ *
+ * Signals are supported starting from TARGET_SIGRTMIN and going up
+ * until we run out of host realtime signals. Glibc uses the lower 2
+ * RT signals and (hopefully) nobody uses the upper ones.
+ * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32).
+ * To fix this properly we would need to do manual signal delivery
+ * multiplexed over a single host signal.
+ * Attempts for configure "missing" signals via sigaction will be
+ * silently ignored.
+ *
+ * Reserve two signals for internal usage (see below).
+ */
+
+ hsig = SIGRTMIN + 2;
+ for (tsig = TARGET_SIGRTMIN;
+ hsig <= SIGRTMAX && tsig <= TARGET_NSIG;
+ hsig++, tsig++) {
+ host_to_target_signal_table[hsig] = tsig;
+ }
+ }
+
/*
- * Signals are supported starting from TARGET_SIGRTMIN and going up
- * until we run out of host realtime signals. Glibc uses the lower 2
- * RT signals and (hopefully) nobody uses the upper ones.
- * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32).
- * To fix this properly we would need to do manual signal delivery
- * multiplexed over a single host signal.
- * Attempts for configure "missing" signals via sigaction will be
- * silently ignored.
- *
* Remap the target SIGABRT, so that we can distinguish host abort
* from guest abort. When the guest registers a signal handler or
* calls raise(SIGABRT), the host will raise SIG_RTn. If the guest
@@ -536,21 +603,32 @@ static void signal_table_init(void)
* parent sees the correct mapping from wait status.
*/
- hsig = SIGRTMIN;
host_to_target_signal_table[SIGABRT] = 0;
- host_to_target_signal_table[hsig++] = TARGET_SIGABRT;
-
- for (tsig = TARGET_SIGRTMIN;
- hsig <= SIGRTMAX && tsig <= TARGET_NSIG;
- hsig++, tsig++) {
- host_to_target_signal_table[hsig] = tsig;
+ for (hsig = SIGRTMIN; hsig <= SIGRTMAX; hsig++) {
+ if (!host_to_target_signal_table[hsig]) {
+ if (host_interrupt_signal) {
+ host_to_target_signal_table[hsig] = TARGET_SIGABRT;
+ break;
+ } else {
+ host_interrupt_signal = hsig;
+ }
+ }
+ }
+ if (hsig > SIGRTMAX) {
+ fprintf(stderr,
+ "No rt signals left for interrupt and SIGABRT mapping\n");
+ exit(EXIT_FAILURE);
}
/* Invert the mapping that has already been assigned. */
for (hsig = 1; hsig < _NSIG; hsig++) {
tsig = host_to_target_signal_table[hsig];
if (tsig) {
- assert(target_to_host_signal_table[tsig] == 0);
+ if (target_to_host_signal_table[tsig]) {
+ fprintf(stderr, "%d is already mapped to %d\n",
+ tsig, target_to_host_signal_table[tsig]);
+ exit(EXIT_FAILURE);
+ }
target_to_host_signal_table[tsig] = hsig;
}
}
@@ -573,13 +651,13 @@ static void signal_table_init(void)
trace_signal_table_init(count);
}
-void signal_init(void)
+void signal_init(const char *rtsig_map)
{
TaskState *ts = get_task_state(thread_cpu);
struct sigaction act, oact;
/* initialize signal conversion tables */
- signal_table_init();
+ signal_table_init(rtsig_map);
/* Set the signal mask from the host mask. */
sigprocmask(0, 0, &ts->signal_mask);
@@ -618,6 +696,8 @@ void signal_init(void)
}
sigact_table[tsig - 1]._sa_handler = thand;
}
+
+ sigaction(host_interrupt_signal, &act, NULL);
}
/* Force a synchronously taken signal. The kernel force_sig() function
@@ -670,10 +750,10 @@ void force_sigsegv(int oldsig)
}
#endif
-void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
+void cpu_loop_exit_sigsegv(CPUState *cpu, vaddr addr,
MMUAccessType access_type, bool maperr, uintptr_t ra)
{
- const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
if (tcg_ops->record_sigsegv) {
tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
@@ -686,10 +766,10 @@ void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
cpu_loop_exit_restore(cpu, ra);
}
-void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
+void cpu_loop_exit_sigbus(CPUState *cpu, vaddr addr,
MMUAccessType access_type, uintptr_t ra)
{
- const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
if (tcg_ops->record_sigbus) {
tcg_ops->record_sigbus(cpu, addr, access_type, ra);
@@ -965,6 +1045,12 @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
bool sync_sig = false;
void *sigmask;
+ if (host_sig == host_interrupt_signal) {
+ ts->signal_pending = 1;
+ cpu_exit(thread_cpu);
+ return;
+ }
+
/*
* Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
* handling wrt signal blocking and unwinding. Non-spoofed SIGILL,
diff --git a/linux-user/sparc/cpu_loop.c b/linux-user/sparc/cpu_loop.c
index 50424a5..68f1e8e 100644
--- a/linux-user/sparc/cpu_loop.c
+++ b/linux-user/sparc/cpu_loop.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
#define SPARC64_STACK_BIAS 2047
@@ -357,7 +357,7 @@ void cpu_loop (CPUSPARCState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
int i;
env->pc = regs->pc;
diff --git a/linux-user/sparc/syscall.tbl b/linux-user/sparc/syscall.tbl
index e34cc30..3bc8378 100644
--- a/linux-user/sparc/syscall.tbl
+++ b/linux-user/sparc/syscall.tbl
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
#
# system call numbers and entry vectors for sparc
#
@@ -117,7 +117,7 @@
90 common dup2 sys_dup2
91 32 setfsuid32 sys_setfsuid
92 common fcntl sys_fcntl compat_sys_fcntl
-93 common select sys_select
+93 common select sys_select compat_sys_select
94 32 setfsgid32 sys_setfsgid
95 common fsync sys_fsync
96 common setpriority sys_setpriority
@@ -155,7 +155,7 @@
123 32 fchown sys_fchown16
123 64 fchown sys_fchown
124 common fchmod sys_fchmod
-125 common recvfrom sys_recvfrom
+125 common recvfrom sys_recvfrom compat_sys_recvfrom
126 32 setreuid sys_setreuid16
126 64 setreuid sys_setreuid
127 32 setregid sys_setregid16
@@ -247,9 +247,9 @@
204 32 readdir sys_old_readdir compat_sys_old_readdir
204 64 readdir sys_nis_syscall
205 common readahead sys_readahead compat_sys_readahead
-206 common socketcall sys_socketcall sys32_socketcall
+206 common socketcall sys_socketcall compat_sys_socketcall
207 common syslog sys_syslog
-208 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+208 common lookup_dcookie sys_ni_syscall
209 common fadvise64 sys_fadvise64 compat_sys_fadvise64
210 common fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64
211 common tgkill sys_tgkill
@@ -270,7 +270,7 @@
222 common delete_module sys_delete_module
223 common get_kernel_syms sys_ni_syscall
224 common getpgid sys_getpgid
-225 common bdflush sys_bdflush
+225 common bdflush sys_ni_syscall
226 common sysfs sys_sysfs
227 common afs_syscall sys_nis_syscall
228 common setfsuid sys_setfsuid16
@@ -365,12 +365,12 @@
299 common unshare sys_unshare
300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
301 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
-302 common migrate_pages sys_migrate_pages compat_sys_migrate_pages
-303 common mbind sys_mbind compat_sys_mbind
-304 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-305 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+302 common migrate_pages sys_migrate_pages
+303 common mbind sys_mbind
+304 common get_mempolicy sys_get_mempolicy
+305 common set_mempolicy sys_set_mempolicy
306 common kexec_load sys_kexec_load compat_sys_kexec_load
-307 common move_pages sys_move_pages compat_sys_move_pages
+307 common move_pages sys_move_pages
308 common getcpu sys_getcpu
309 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
310 32 utimensat sys_utimensat_time32
@@ -461,7 +461,7 @@
412 32 utimensat_time64 sys_utimensat sys_utimensat
413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
@@ -488,7 +488,23 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/linux-user/sparc/syscallhdr.sh b/linux-user/sparc/syscallhdr.sh
index 34a99dc..938a02b 100644
--- a/linux-user/sparc/syscallhdr.sh
+++ b/linux-user/sparc/syscallhdr.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
in="$1"
out="$2"
diff --git a/linux-user/sparc/target_proc.h b/linux-user/sparc/target_proc.h
index 3bb3134..744fa10 100644
--- a/linux-user/sparc/target_proc.h
+++ b/linux-user/sparc/target_proc.h
@@ -8,7 +8,25 @@
static int open_cpuinfo(CPUArchState *cpu_env, int fd)
{
- dprintf(fd, "type\t\t: sun4u\n");
+ int i, num_cpus;
+ const char *cpu_type;
+
+ num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ if (cpu_env->def.features & CPU_FEATURE_HYPV) {
+ cpu_type = "sun4v";
+ } else {
+ cpu_type = "sun4u";
+ }
+
+ dprintf(fd, "cpu\t\t: %s (QEMU)\n", cpu_env->def.name);
+ dprintf(fd, "type\t\t: %s\n", cpu_type);
+ dprintf(fd, "ncpus probed\t: %d\n", num_cpus);
+ dprintf(fd, "ncpus active\t: %d\n", num_cpus);
+ dprintf(fd, "State:\n");
+ for (i = 0; i < num_cpus; i++) {
+ dprintf(fd, "CPU%d:\t\t: online\n", i);
+ }
+
return 0;
}
#define HAVE_ARCH_PROC_CPUINFO
diff --git a/linux-user/strace.c b/linux-user/strace.c
index b4d1098..3b744cc 100644
--- a/linux-user/strace.c
+++ b/linux-user/strace.c
@@ -13,6 +13,9 @@
#include <linux/if_packet.h>
#include <linux/in6.h>
#include <linux/netlink.h>
+#ifdef HAVE_OPENAT2_H
+#include <linux/openat2.h>
+#endif
#include <sched.h>
#include "qemu.h"
#include "user-internals.h"
@@ -158,19 +161,20 @@ static const char * const target_signal_name[] = {
};
static void
-print_signal(abi_ulong arg, int last)
+print_signal_1(abi_ulong arg)
{
- const char *signal_name = NULL;
-
if (arg < ARRAY_SIZE(target_signal_name)) {
- signal_name = target_signal_name[arg];
+ qemu_log("%s", target_signal_name[arg]);
+ } else {
+ qemu_log(TARGET_ABI_FMT_lu, arg);
}
+}
- if (signal_name == NULL) {
- print_raw_param("%ld", arg, last);
- return;
- }
- qemu_log("%s%s", signal_name, get_comma(last));
+static void
+print_signal(abi_ulong arg, int last)
+{
+ print_signal_1(arg);
+ qemu_log("%s", get_comma(last));
}
static void print_si_code(int arg)
@@ -373,7 +377,7 @@ print_sockaddr(abi_ulong addr, abi_long addrlen, int last)
un->sun_path[i]; i++) {
qemu_log("%c", un->sun_path[i]);
}
- qemu_log("\"}");
+ qemu_log("\"},");
break;
}
case AF_INET: {
@@ -383,7 +387,7 @@ print_sockaddr(abi_ulong addr, abi_long addrlen, int last)
ntohs(in->sin_port));
qemu_log("sin_addr=inet_addr(\"%d.%d.%d.%d\")",
c[0], c[1], c[2], c[3]);
- qemu_log("}");
+ qemu_log("},");
break;
}
case AF_PACKET: {
@@ -414,12 +418,12 @@ print_sockaddr(abi_ulong addr, abi_long addrlen, int last)
}
qemu_log(",sll_addr=%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
- qemu_log("}");
+ qemu_log("},");
break;
}
case AF_NETLINK: {
struct target_sockaddr_nl *nl = (struct target_sockaddr_nl *)sa;
- qemu_log("{nl_family=AF_NETLINK,nl_pid=%u,nl_groups=%u}",
+ qemu_log("{nl_family=AF_NETLINK,nl_pid=%u,nl_groups=%u},",
tswap32(nl->nl_pid), tswap32(nl->nl_groups));
break;
}
@@ -429,14 +433,14 @@ print_sockaddr(abi_ulong addr, abi_long addrlen, int last)
qemu_log("%02x, ", sa->sa_data[i]);
}
qemu_log("%02x}", sa->sa_data[i]);
- qemu_log("}");
+ qemu_log("},");
break;
}
unlock_user(sa, addr, 0);
} else {
- print_raw_param("0x"TARGET_ABI_FMT_lx, addr, 0);
+ print_pointer(addr, 0);
}
- qemu_log(", "TARGET_ABI_FMT_ld"%s", addrlen, get_comma(last));
+ qemu_log(TARGET_ABI_FMT_ld"%s", addrlen, get_comma(last));
}
static void
@@ -715,6 +719,51 @@ print_ipc(CPUArchState *cpu_env, const struct syscallname *name,
}
#endif
+#ifdef TARGET_NR_rt_sigprocmask
+static void print_target_sigset_t_1(target_sigset_t *set, int last)
+{
+ bool first = true;
+ int i, sig = 1;
+
+ qemu_log("[");
+ for (i = 0; i < TARGET_NSIG_WORDS; i++) {
+ abi_ulong bits = 0;
+ int j;
+
+ __get_user(bits, &set->sig[i]);
+ for (j = 0; j < sizeof(bits) * 8; j++) {
+ if (bits & ((abi_ulong)1 << j)) {
+ if (first) {
+ first = false;
+ } else {
+ qemu_log(" ");
+ }
+ print_signal_1(sig);
+ }
+ sig++;
+ }
+ }
+ qemu_log("]%s", get_comma(last));
+}
+
+static void print_target_sigset_t(abi_ulong addr, abi_ulong size, int last)
+{
+ if (addr && size == sizeof(target_sigset_t)) {
+ target_sigset_t *set;
+
+ set = lock_user(VERIFY_READ, addr, sizeof(target_sigset_t), 1);
+ if (set) {
+ print_target_sigset_t_1(set, last);
+ unlock_user(set, addr, 0);
+ } else {
+ print_pointer(addr, last);
+ }
+ } else {
+ print_pointer(addr, last);
+ }
+}
+#endif
+
/*
* Variants for the return value output function
*/
@@ -1063,6 +1112,18 @@ UNUSED static const struct flags open_flags[] = {
FLAG_END,
};
+UNUSED static const struct flags openat2_resolve_flags[] = {
+#ifdef HAVE_OPENAT2_H
+ FLAG_GENERIC(RESOLVE_NO_XDEV),
+ FLAG_GENERIC(RESOLVE_NO_MAGICLINKS),
+ FLAG_GENERIC(RESOLVE_NO_SYMLINKS),
+ FLAG_GENERIC(RESOLVE_BENEATH),
+ FLAG_GENERIC(RESOLVE_IN_ROOT),
+ FLAG_GENERIC(RESOLVE_CACHED),
+#endif
+ FLAG_END,
+};
+
UNUSED static const struct flags mount_flags[] = {
#ifdef MS_BIND
FLAG_GENERIC(MS_BIND),
@@ -1655,6 +1716,13 @@ print_buf(abi_long addr, abi_long len, int last)
}
}
+static void
+print_buf_len(abi_long addr, abi_long len, int last)
+{
+ print_buf(addr, len, 0);
+ print_raw_param(TARGET_ABI_FMT_ld, len, last);
+}
+
/*
* Prints out raw parameter using given format. Caller needs
* to do byte swapping if needed.
@@ -2742,8 +2810,7 @@ static void do_print_sendrecv(const char *name, abi_long arg1)
qemu_log("%s(", name);
print_sockfd(sockfd, 0);
- print_buf(msg, len, 0);
- print_raw_param(TARGET_ABI_FMT_ld, len, 0);
+ print_buf_len(msg, len, 0);
print_flags(msg_flags, flags, 1);
qemu_log(")");
}
@@ -2761,8 +2828,7 @@ static void do_print_msgaddr(const char *name, abi_long arg1)
qemu_log("%s(", name);
print_sockfd(sockfd, 0);
- print_buf(msg, len, 0);
- print_raw_param(TARGET_ABI_FMT_ld, len, 0);
+ print_buf_len(msg, len, 0);
print_flags(msg_flags, flags, 0);
print_sockaddr(addr, addrlen, 0);
qemu_log(")");
@@ -3122,6 +3188,38 @@ print_bind(CPUArchState *cpu_env, const struct syscallname *name,
}
#endif
+#ifdef TARGET_NR_recvfrom
+static void
+print_recvfrom(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ print_syscall_prologue(name);
+ print_sockfd(arg0, 0);
+ print_pointer(arg1, 0); /* output */
+ print_raw_param(TARGET_ABI_FMT_ld, arg2, 0);
+ print_flags(msg_flags, arg3, 0);
+ print_pointer(arg4, 0); /* output */
+ print_pointer(arg5, 1); /* in/out */
+ print_syscall_epilogue(name);
+}
+#endif
+
+#ifdef TARGET_NR_sendto
+static void
+print_sendto(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ print_syscall_prologue(name);
+ print_sockfd(arg0, 0);
+ print_buf_len(arg1, arg2, 0);
+ print_flags(msg_flags, arg3, 0);
+ print_sockaddr(arg4, arg5, 1);
+ print_syscall_epilogue(name);
+}
+#endif
+
#if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) || \
defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64)
static void
@@ -3260,11 +3358,29 @@ print_rt_sigprocmask(CPUArchState *cpu_env, const struct syscallname *name,
case TARGET_SIG_SETMASK: how = "SIG_SETMASK"; break;
}
qemu_log("%s,", how);
- print_pointer(arg1, 0);
+ print_target_sigset_t(arg1, arg3, 0);
print_pointer(arg2, 0);
print_raw_param("%u", arg3, 1);
print_syscall_epilogue(name);
}
+
+static void
+print_rt_sigprocmask_ret(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long ret, abi_long arg0, abi_long arg1,
+ abi_long arg2, abi_long arg3, abi_long arg4,
+ abi_long arg5)
+{
+ if (!print_syscall_err(ret)) {
+ qemu_log(TARGET_ABI_FMT_ld, ret);
+ if (arg2) {
+ qemu_log(" (oldset=");
+ print_target_sigset_t(arg2, arg3, 1);
+ qemu_log(")");
+ }
+ }
+
+ qemu_log("\n");
+}
#endif
#ifdef TARGET_NR_rt_sigqueueinfo
@@ -3483,6 +3599,38 @@ print_openat(CPUArchState *cpu_env, const struct syscallname *name,
}
#endif
+#ifdef TARGET_NR_openat2
+static void
+print_openat2(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ struct open_how_ver0 how;
+
+ print_syscall_prologue(name);
+ print_at_dirfd(arg0, 0);
+ print_string(arg1, 0);
+
+ if ((abi_ulong)arg3 >= sizeof(struct target_open_how_ver0) &&
+ copy_struct_from_user(&how, sizeof(how), arg2, arg3) == 0) {
+ how.flags = tswap64(how.flags);
+ how.mode = tswap64(how.mode);
+ how.resolve = tswap64(how.resolve);
+ qemu_log("{");
+ print_open_flags(how.flags, 0);
+ if (how.flags & TARGET_O_CREAT) {
+ print_file_mode(how.mode, 0);
+ }
+ print_flags(openat2_resolve_flags, how.resolve, 1);
+ qemu_log("},");
+ } else {
+ print_pointer(arg2, 0);
+ }
+ print_raw_param(TARGET_ABI_FMT_lu, arg3, 1);
+ print_syscall_epilogue(name);
+}
+#endif
+
#ifdef TARGET_NR_pidfd_send_signal
static void
print_pidfd_send_signal(CPUArchState *cpu_env, const struct syscallname *name,
@@ -3823,7 +3971,7 @@ print_mmap(CPUArchState *cpu_env, const struct syscallname *name,
{
return print_mmap_both(cpu_env, name, arg0, arg1, arg2, arg3,
arg4, arg5,
-#if defined(TARGET_NR_mmap2)
+#ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
true
#else
false
@@ -4168,6 +4316,63 @@ print_ioctl(CPUArchState *cpu_env, const struct syscallname *name,
}
#endif
+#if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
+static void print_wstatus(int wstatus)
+{
+ if (WIFSIGNALED(wstatus)) {
+ qemu_log("{WIFSIGNALED(s) && WTERMSIG(s) == ");
+ print_signal(WTERMSIG(wstatus), 1);
+ if (WCOREDUMP(wstatus)) {
+ qemu_log(" && WCOREDUMP(s)");
+ }
+ qemu_log("}");
+ } else if (WIFEXITED(wstatus)) {
+ qemu_log("{WIFEXITED(s) && WEXITSTATUS(s) == %d}",
+ WEXITSTATUS(wstatus));
+ } else {
+ print_number(wstatus, 1);
+ }
+}
+
+static void print_ret_wstatus(abi_long ret, abi_long wstatus_addr)
+{
+ int wstatus;
+
+ if (!print_syscall_err(ret)
+ && wstatus_addr
+ && get_user_s32(wstatus, wstatus_addr)) {
+ qemu_log(TARGET_ABI_FMT_ld " (wstatus=", ret);
+ print_wstatus(wstatus);
+ qemu_log(")");
+ }
+ qemu_log("\n");
+}
+#endif
+
+#ifdef TARGET_NR_wait4
+static void
+print_syscall_ret_wait4(CPUArchState *cpu_env,
+ const struct syscallname *name,
+ abi_long ret, abi_long arg0, abi_long arg1,
+ abi_long arg2, abi_long arg3, abi_long arg4,
+ abi_long arg5)
+{
+ print_ret_wstatus(ret, arg1);
+}
+#endif
+
+#ifdef TARGET_NR_waitpid
+static void
+print_syscall_ret_waitpid(CPUArchState *cpu_env,
+ const struct syscallname *name,
+ abi_long ret, abi_long arg0, abi_long arg1,
+ abi_long arg2, abi_long arg3, abi_long arg4,
+ abi_long arg5)
+{
+ print_ret_wstatus(ret, arg1);
+}
+#endif
+
/*
* An array of all of the syscalls we know about
*/
@@ -4196,7 +4401,7 @@ print_syscall(CPUArchState *cpu_env, int num,
if (!f) {
return;
}
- fprintf(f, "%d ", getpid());
+ fprintf(f, "%d ", get_task_state(env_cpu(cpu_env))->ts_tid);
for (i = 0; i < nsyscalls; i++) {
if (scnames[i].nr == num) {
diff --git a/linux-user/strace.list b/linux-user/strace.list
index dfd4237..fdf94ef 100644
--- a/linux-user/strace.list
+++ b/linux-user/strace.list
@@ -715,6 +715,9 @@
#ifdef TARGET_NR_openat
{ TARGET_NR_openat, "openat" , NULL, print_openat, NULL },
#endif
+#ifdef TARGET_NR_openat2
+{ TARGET_NR_openat2, "openat2" , NULL, print_openat2, NULL },
+#endif
#ifdef TARGET_NR_osf_adjtime
{ TARGET_NR_osf_adjtime, "osf_adjtime" , NULL, NULL, NULL },
#endif
@@ -1135,7 +1138,7 @@
{ TARGET_NR_recv, "recv" , "%s(%d,%p,%u,%d)", NULL, NULL },
#endif
#ifdef TARGET_NR_recvfrom
-{ TARGET_NR_recvfrom, "recvfrom" , NULL, NULL, NULL },
+{ TARGET_NR_recvfrom, "recvfrom" , NULL, print_recvfrom, NULL },
#endif
#ifdef TARGET_NR_recvmmsg
{ TARGET_NR_recvmmsg, "recvmmsg" , NULL, NULL, NULL },
@@ -1186,7 +1189,8 @@
{ TARGET_NR_rt_sigpending, "rt_sigpending" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_rt_sigprocmask
-{ TARGET_NR_rt_sigprocmask, "rt_sigprocmask" , NULL, print_rt_sigprocmask, NULL },
+{ TARGET_NR_rt_sigprocmask, "rt_sigprocmask" , NULL, print_rt_sigprocmask,
+ print_rt_sigprocmask_ret },
#endif
#ifdef TARGET_NR_rt_sigqueueinfo
{ TARGET_NR_rt_sigqueueinfo, "rt_sigqueueinfo" , NULL, print_rt_sigqueueinfo, NULL },
@@ -1285,7 +1289,7 @@
{ TARGET_NR_sendmsg, "sendmsg" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_sendto
-{ TARGET_NR_sendto, "sendto" , NULL, NULL, NULL },
+{ TARGET_NR_sendto, "sendto" , NULL, print_sendto, NULL },
#endif
#ifdef TARGET_NR_setdomainname
{ TARGET_NR_setdomainname, "setdomainname" , NULL, NULL, NULL },
@@ -1659,13 +1663,15 @@
{ TARGET_NR_vserver, "vserver" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_wait4
-{ TARGET_NR_wait4, "wait4" , "%s(%d,%p,%d,%p)", NULL, NULL },
+{ TARGET_NR_wait4, "wait4" , "%s(%d,%p,%d,%p)", NULL,
+ print_syscall_ret_wait4 },
#endif
#ifdef TARGET_NR_waitid
{ TARGET_NR_waitid, "waitid" , "%s(%#x,%d,%p,%#x)", NULL, NULL },
#endif
#ifdef TARGET_NR_waitpid
-{ TARGET_NR_waitpid, "waitpid" , "%s(%d,%p,%#x)", NULL, NULL },
+{ TARGET_NR_waitpid, "waitpid", "%s(%d,%p,%#x)", NULL,
+ print_syscall_ret_waitpid },
#endif
#ifdef TARGET_NR_write
{ TARGET_NR_write, "write" , "%s(%d,%#x,%d)", NULL, NULL },
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index b8c278b..fc37028 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -26,6 +26,9 @@
#include "tcg/startup.h"
#include "target_mman.h"
#include "exec/page-protection.h"
+#include "exec/mmap-lock.h"
+#include "exec/tb-flush.h"
+#include "exec/translation-block.h"
#include <elf.h>
#include <endian.h>
#include <grp.h>
@@ -54,7 +57,6 @@
#include <utime.h>
#include <sys/sysinfo.h>
#include <sys/signalfd.h>
-//#include <sys/user.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
@@ -136,14 +138,16 @@
#include "signal-common.h"
#include "loader.h"
#include "user-mmap.h"
+#include "user/page-protection.h"
#include "user/safe-syscall.h"
+#include "user/signal.h"
#include "qemu/guest-random.h"
#include "qemu/selfmap.h"
#include "user/syscall-trace.h"
#include "special-errno.h"
#include "qapi/error.h"
#include "fd-trans.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#ifndef CLONE_IO
#define CLONE_IO 0x80000000 /* Clone io context */
@@ -359,7 +363,8 @@ _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
#define __NR_sys_sched_setaffinity __NR_sched_setaffinity
_syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long *, user_mask_ptr);
-/* sched_attr is not defined in glibc */
+/* sched_attr is not defined in glibc < 2.41 */
+#ifndef SCHED_ATTR_SIZE_VER0
struct sched_attr {
uint32_t size;
uint32_t sched_policy;
@@ -372,6 +377,7 @@ struct sched_attr {
uint32_t sched_util_min;
uint32_t sched_util_max;
};
+#endif
#define __NR_sys_sched_getattr __NR_sched_getattr
_syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
unsigned int, size, unsigned int, flags);
@@ -602,6 +608,33 @@ static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
return 1;
}
+/*
+ * Copies a target struct to a host struct, in a way that guarantees
+ * backwards-compatibility for struct syscall arguments.
+ *
+ * Similar to kernels uaccess.h:copy_struct_from_user()
+ */
+int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
+{
+ size_t size = MIN(ksize, usize);
+ size_t rest = MAX(ksize, usize) - size;
+
+ /* Deal with trailing bytes. */
+ if (usize < ksize) {
+ memset(dst + size, 0, rest);
+ } else if (usize > ksize) {
+ int ret = check_zeroed_user(src, ksize, usize);
+ if (ret <= 0) {
+ return ret ?: -TARGET_E2BIG;
+ }
+ }
+ /* Copy the interoperable parts of the struct. */
+ if (copy_from_user(dst, src, size)) {
+ return -TARGET_EFAULT;
+ }
+ return 0;
+}
+
#define safe_syscall0(type, name) \
static type safe_##name(void) \
{ \
@@ -653,6 +686,10 @@ safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
int, flags, mode_t, mode)
+
+safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
+ const struct open_how_ver0 *, how, size_t, size)
+
#if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
struct rusage *, rusage)
@@ -759,10 +796,8 @@ safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
* the libc function.
*/
#define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
-/* Similarly for fcntl. Note that callers must always:
- * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
- * use the flock64 struct rather than unsuffixed flock
- * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
+/* Similarly for fcntl. Since we always build with LFS enabled,
+ * we should be using the 64-bit structures automatically.
*/
#ifdef __NR_fcntl64
#define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
@@ -1797,7 +1832,7 @@ static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
*dst = tswap32(*dst);
}
} else {
- qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
+ qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
cmsg->cmsg_level, cmsg->cmsg_type);
memcpy(data, target_data, len);
}
@@ -1968,6 +2003,16 @@ static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
(void *) &errh->offender, sizeof(errh->offender));
break;
}
+ case IP_PKTINFO:
+ {
+ struct in_pktinfo *pkti = data;
+ struct target_in_pktinfo *target_pi = target_data;
+
+ __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
+ target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
+ target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
+ break;
+ }
default:
goto unimplemented;
}
@@ -2019,7 +2064,7 @@ static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
default:
unimplemented:
- qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
+ qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
cmsg->cmsg_level, cmsg->cmsg_type);
memcpy(target_data, data, MIN(len, tgt_len));
if (tgt_len > len) {
@@ -2090,16 +2135,23 @@ static abi_long do_setsockopt(int sockfd, int level, int optname,
}
ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
break;
+ case IP_MULTICAST_IF:
case IP_ADD_MEMBERSHIP:
case IP_DROP_MEMBERSHIP:
{
struct ip_mreqn ip_mreq;
struct target_ip_mreqn *target_smreqn;
+ int min_size;
QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
sizeof(struct target_ip_mreq));
- if (optlen < sizeof (struct target_ip_mreq) ||
+ if (optname == IP_MULTICAST_IF) {
+ min_size = sizeof(struct in_addr);
+ } else {
+ min_size = sizeof(struct target_ip_mreq);
+ }
+ if (optlen < min_size ||
optlen > sizeof (struct target_ip_mreqn)) {
return -TARGET_EINVAL;
}
@@ -2109,13 +2161,14 @@ static abi_long do_setsockopt(int sockfd, int level, int optname,
return -TARGET_EFAULT;
}
ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
- ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
- if (optlen == sizeof(struct target_ip_mreqn)) {
- ip_mreq.imr_ifindex = tswapal(target_smreqn->imr_ifindex);
- optlen = sizeof(struct ip_mreqn);
+ if (optlen >= sizeof(struct target_ip_mreq)) {
+ ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
+ if (optlen >= sizeof(struct target_ip_mreqn)) {
+ __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
+ optlen = sizeof(struct ip_mreqn);
+ }
}
unlock_user(target_smreqn, optval_addr, 0);
-
ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
break;
}
@@ -6722,13 +6775,13 @@ static int target_to_host_fcntl_cmd(int cmd)
ret = cmd;
break;
case TARGET_F_GETLK:
- ret = F_GETLK64;
+ ret = F_GETLK;
break;
case TARGET_F_SETLK:
- ret = F_SETLK64;
+ ret = F_SETLK;
break;
case TARGET_F_SETLKW:
- ret = F_SETLKW64;
+ ret = F_SETLKW;
break;
case TARGET_F_GETOWN:
ret = F_GETOWN;
@@ -6744,13 +6797,13 @@ static int target_to_host_fcntl_cmd(int cmd)
break;
#if TARGET_ABI_BITS == 32
case TARGET_F_GETLK64:
- ret = F_GETLK64;
+ ret = F_GETLK;
break;
case TARGET_F_SETLK64:
- ret = F_SETLK64;
+ ret = F_SETLK;
break;
case TARGET_F_SETLKW64:
- ret = F_SETLKW64;
+ ret = F_SETLKW;
break;
#endif
case TARGET_F_SETLEASE:
@@ -6804,8 +6857,8 @@ static int target_to_host_fcntl_cmd(int cmd)
* them to 5, 6 and 7 before making the syscall(). Since we make the
* syscall directly, adjust to what is supported by the kernel.
*/
- if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
- ret -= F_GETLK64 - 5;
+ if (ret >= F_GETLK && ret <= F_SETLKW) {
+ ret -= F_GETLK - 5;
}
#endif
@@ -6838,7 +6891,7 @@ static int host_to_target_flock(int type)
return type;
}
-static inline abi_long copy_from_user_flock(struct flock64 *fl,
+static inline abi_long copy_from_user_flock(struct flock *fl,
abi_ulong target_flock_addr)
{
struct target_flock *target_fl;
@@ -6863,7 +6916,7 @@ static inline abi_long copy_from_user_flock(struct flock64 *fl,
}
static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
- const struct flock64 *fl)
+ const struct flock *fl)
{
struct target_flock *target_fl;
short l_type;
@@ -6882,8 +6935,8 @@ static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
return 0;
}
-typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
-typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
+typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
+typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
#if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
struct target_oabi_flock64 {
@@ -6894,7 +6947,7 @@ struct target_oabi_flock64 {
abi_int l_pid;
} QEMU_PACKED;
-static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
+static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
abi_ulong target_flock_addr)
{
struct target_oabi_flock64 *target_fl;
@@ -6919,7 +6972,7 @@ static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
}
static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
- const struct flock64 *fl)
+ const struct flock *fl)
{
struct target_oabi_flock64 *target_fl;
short l_type;
@@ -6939,7 +6992,7 @@ static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
}
#endif
-static inline abi_long copy_from_user_flock64(struct flock64 *fl,
+static inline abi_long copy_from_user_flock64(struct flock *fl,
abi_ulong target_flock_addr)
{
struct target_flock64 *target_fl;
@@ -6964,7 +7017,7 @@ static inline abi_long copy_from_user_flock64(struct flock64 *fl,
}
static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
- const struct flock64 *fl)
+ const struct flock *fl)
{
struct target_flock64 *target_fl;
short l_type;
@@ -6985,7 +7038,7 @@ static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
{
- struct flock64 fl64;
+ struct flock fl;
#ifdef F_GETOWN_EX
struct f_owner_ex fox;
struct target_f_owner_ex *target_fox;
@@ -6998,45 +7051,45 @@ static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
switch(cmd) {
case TARGET_F_GETLK:
- ret = copy_from_user_flock(&fl64, arg);
+ ret = copy_from_user_flock(&fl, arg);
if (ret) {
return ret;
}
- ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
+ ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
if (ret == 0) {
- ret = copy_to_user_flock(arg, &fl64);
+ ret = copy_to_user_flock(arg, &fl);
}
break;
case TARGET_F_SETLK:
case TARGET_F_SETLKW:
- ret = copy_from_user_flock(&fl64, arg);
+ ret = copy_from_user_flock(&fl, arg);
if (ret) {
return ret;
}
- ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
+ ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
break;
case TARGET_F_GETLK64:
case TARGET_F_OFD_GETLK:
- ret = copy_from_user_flock64(&fl64, arg);
+ ret = copy_from_user_flock64(&fl, arg);
if (ret) {
return ret;
}
- ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
+ ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
if (ret == 0) {
- ret = copy_to_user_flock64(arg, &fl64);
+ ret = copy_to_user_flock64(arg, &fl);
}
break;
case TARGET_F_SETLK64:
case TARGET_F_SETLKW64:
case TARGET_F_OFD_SETLK:
case TARGET_F_OFD_SETLKW:
- ret = copy_from_user_flock64(&fl64, arg);
+ ret = copy_from_user_flock64(&fl, arg);
if (ret) {
return ret;
}
- ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
+ ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
break;
case TARGET_F_GETFL:
@@ -7205,12 +7258,24 @@ static inline int tswapid(int id)
#else
#define __NR_sys_setgroups __NR_setgroups
#endif
+#ifdef __NR_sys_setreuid32
+#define __NR_sys_setreuid __NR_setreuid32
+#else
+#define __NR_sys_setreuid __NR_setreuid
+#endif
+#ifdef __NR_sys_setregid32
+#define __NR_sys_setregid __NR_setregid32
+#else
+#define __NR_sys_setregid __NR_setregid
+#endif
_syscall1(int, sys_setuid, uid_t, uid)
_syscall1(int, sys_setgid, gid_t, gid)
_syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
_syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
_syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
+_syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
+_syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
void syscall_init(void)
{
@@ -7267,7 +7332,7 @@ static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1
arg2 = arg3;
arg3 = arg4;
}
- return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
+ return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
}
#endif
@@ -7281,7 +7346,7 @@ static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
arg2 = arg3;
arg3 = arg4;
}
- return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
+ return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
}
#endif
@@ -8070,8 +8135,8 @@ static void open_self_maps_4(const struct open_self_maps_data *d,
* Callback for walk_memory_regions, when read_self_maps() fails.
* Proceed without the benefit of host /proc/self/maps cross-check.
*/
-static int open_self_maps_3(void *opaque, target_ulong guest_start,
- target_ulong guest_end, unsigned long flags)
+static int open_self_maps_3(void *opaque, vaddr guest_start,
+ vaddr guest_end, int flags)
{
static const MapInfo mi = { .is_priv = true };
@@ -8082,8 +8147,8 @@ static int open_self_maps_3(void *opaque, target_ulong guest_start,
/*
* Callback for walk_memory_regions, when read_self_maps() succeeds.
*/
-static int open_self_maps_2(void *opaque, target_ulong guest_start,
- target_ulong guest_end, unsigned long flags)
+static int open_self_maps_2(void *opaque, vaddr guest_start,
+ vaddr guest_end, int flags)
{
const struct open_self_maps_data *d = opaque;
uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
@@ -8122,17 +8187,19 @@ static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
{
struct open_self_maps_data d = {
.ts = get_task_state(env_cpu(env)),
- .host_maps = read_self_maps(),
.fd = fd,
.smaps = smaps
};
+ mmap_lock();
+ d.host_maps = read_self_maps();
if (d.host_maps) {
walk_memory_regions(&d, open_self_maps_2);
free_self_maps(d.host_maps);
} else {
walk_memory_regions(&d, open_self_maps_3);
}
+ mmap_unlock();
return 0;
}
@@ -8168,6 +8235,19 @@ static int open_self_stat(CPUArchState *cpu_env, int fd)
} else if (i == 3) {
/* ppid */
g_string_printf(buf, FMT_pid " ", getppid());
+ } else if (i == 4) {
+ /* pgid */
+ g_string_printf(buf, FMT_pid " ", getpgrp());
+ } else if (i == 19) {
+ /* num_threads */
+ int cpus = 0;
+ WITH_RCU_READ_LOCK_GUARD() {
+ CPUState *cpu_iter;
+ CPU_FOREACH(cpu_iter) {
+ cpus++;
+ }
+ }
+ g_string_printf(buf, "%d ", cpus);
} else if (i == 21) {
/* starttime */
g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
@@ -8324,8 +8404,9 @@ static int open_net_route(CPUArchState *cpu_env, int fd)
}
#endif
-int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname,
- int flags, mode_t mode, bool safe)
+static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
+ const char *fname, int flags, mode_t mode,
+ int openat2_resolve, bool safe)
{
g_autofree char *proc_name = NULL;
const char *pathname;
@@ -8362,6 +8443,12 @@ int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname,
}
if (is_proc_myself(pathname, "exe")) {
+ /* Honor openat2 resolve flags */
+ if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
+ (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
+ errno = ELOOP;
+ return -1;
+ }
if (safe) {
return safe_openat(dirfd, exec_path, flags, mode);
} else {
@@ -8408,6 +8495,17 @@ int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname,
return fd;
}
+ return -2;
+}
+
+int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
+ int flags, mode_t mode, bool safe)
+{
+ int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
+ if (fd > -2) {
+ return fd;
+ }
+
if (safe) {
return safe_openat(dirfd, path(pathname), flags, mode);
} else {
@@ -8415,6 +8513,49 @@ int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname,
}
}
+
+static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
+ abi_ptr guest_pathname, abi_ptr guest_open_how,
+ abi_ulong guest_size)
+{
+ struct open_how_ver0 how = {0};
+ char *pathname;
+ int ret;
+
+ if (guest_size < sizeof(struct target_open_how_ver0)) {
+ return -TARGET_EINVAL;
+ }
+ ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
+ if (ret) {
+ if (ret == -TARGET_E2BIG) {
+ qemu_log_mask(LOG_UNIMP,
+ "Unimplemented openat2 open_how size: "
+ TARGET_ABI_FMT_lu "\n", guest_size);
+ }
+ return ret;
+ }
+ pathname = lock_user_string(guest_pathname);
+ if (!pathname) {
+ return -TARGET_EFAULT;
+ }
+
+ how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
+ how.mode = tswap64(how.mode);
+ how.resolve = tswap64(how.resolve);
+ int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
+ how.resolve, true);
+ if (fd > -2) {
+ ret = get_errno(fd);
+ } else {
+ ret = get_errno(safe_openat2(dirfd, pathname, &how,
+ sizeof(struct open_how_ver0)));
+ }
+
+ fd_trans_unregister(ret);
+ unlock_user(pathname, guest_pathname, 0);
+ return ret;
+}
+
ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
{
ssize_t ret;
@@ -8656,7 +8797,7 @@ static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
void *tdirp;
int hlen, hoff, toff;
int hreclen, treclen;
- off64_t prev_diroff = 0;
+ off_t prev_diroff = 0;
hdirp = g_try_malloc(count);
if (!hdirp) {
@@ -8709,7 +8850,7 @@ static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
* Return what we have, resetting the file pointer to the
* location of the first record not returned.
*/
- lseek64(dirfd, prev_diroff, SEEK_SET);
+ lseek(dirfd, prev_diroff, SEEK_SET);
break;
}
@@ -8743,7 +8884,7 @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
void *tdirp;
int hlen, hoff, toff;
int hreclen, treclen;
- off64_t prev_diroff = 0;
+ off_t prev_diroff = 0;
hdirp = g_try_malloc(count);
if (!hdirp) {
@@ -8785,7 +8926,7 @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
* Return what we have, resetting the file pointer to the
* location of the first record not returned.
*/
- lseek64(dirfd, prev_diroff, SEEK_SET);
+ lseek(dirfd, prev_diroff, SEEK_SET);
break;
}
@@ -8843,7 +8984,7 @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28)
#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29)
#define RISCV_HWPROBE_EXT_ZVFH (1 << 30)
-#define RISCV_HWPROBE_EXT_ZVFHMIN (1 << 31)
+#define RISCV_HWPROBE_EXT_ZVFHMIN (1ULL << 31)
#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32)
#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33)
#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34)
@@ -8982,35 +9123,38 @@ static void risc_hwprobe_fill_pairs(CPURISCVState *env,
}
}
-static int cpu_set_valid(abi_long arg3, abi_long arg4)
+/*
+ * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT.
+ * If the cpumast_t has no bits set: -EINVAL.
+ * Otherwise the cpumask_t contains some bit set: 0.
+ * Unlike the kernel, we do not mask cpumask_t by the set of online cpus,
+ * nor bound the search by cpumask_size().
+ */
+static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus)
{
- int ret, i, tmp;
- size_t host_mask_size, target_mask_size;
- unsigned long *host_mask;
-
- /*
- * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
- * arg3 contains the cpu count.
- */
- tmp = (8 * sizeof(abi_ulong));
- target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
- host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
- ~(sizeof(*host_mask) - 1);
-
- host_mask = alloca(host_mask_size);
+ unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1);
+ int ret = -TARGET_EFAULT;
- ret = target_to_host_cpu_mask(host_mask, host_mask_size,
- arg4, target_mask_size);
- if (ret != 0) {
- return ret;
- }
-
- for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
- if (host_mask[i] != 0) {
- return 0;
+ if (p) {
+ ret = -TARGET_EINVAL;
+ /*
+ * Since we only care about the empty/non-empty state of the cpumask_t
+ * not the individual bits, we do not need to repartition the bits
+ * from target abi_ulong to host unsigned long.
+ *
+ * Note that the kernel does not round up cpusetsize to a multiple of
+ * sizeof(abi_ulong). After bounding cpusetsize by cpumask_size(),
+ * it copies exactly cpusetsize bytes into a zeroed buffer.
+ */
+ for (abi_ulong i = 0; i < cpusetsize; ++i) {
+ if (p[i]) {
+ ret = 0;
+ break;
+ }
}
+ unlock_user(p, target_cpus, 0);
}
- return -TARGET_EINVAL;
+ return ret;
}
static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
@@ -9027,7 +9171,7 @@ static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
/* check cpu_set */
if (arg3 != 0) {
- ret = cpu_set_valid(arg3, arg4);
+ ret = nonempty_cpu_set(arg3, arg4);
if (ret != 0) {
return ret;
}
@@ -9187,6 +9331,9 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
fd_trans_unregister(ret);
unlock_user(p, arg2, 0);
return ret;
+ case TARGET_NR_openat2:
+ ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
+ return ret;
#if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
case TARGET_NR_name_to_handle_at:
ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
@@ -10472,10 +10619,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
return ret;
#ifdef TARGET_NR_mmap
case TARGET_NR_mmap:
-#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
- (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
- defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
- || defined(TARGET_S390X)
+#ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
{
abi_ulong *v;
abi_ulong v1, v2, v3, v4, v5, v6;
@@ -11516,7 +11660,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
return -TARGET_EFAULT;
}
}
- ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
+ ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
unlock_user(p, arg2, ret);
return ret;
case TARGET_NR_pwrite64:
@@ -11533,7 +11677,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
return -TARGET_EFAULT;
}
}
- ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
+ ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
unlock_user(p, arg2, 0);
return ret;
#endif
@@ -11828,9 +11972,9 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
return get_errno(high2lowgid(getegid()));
#endif
case TARGET_NR_setreuid:
- return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
+ return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
case TARGET_NR_setregid:
- return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
+ return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
case TARGET_NR_getgroups:
{ /* the same code as for TARGET_NR_getgroups32 */
int gidsetsize = arg1;
@@ -12160,11 +12304,11 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
#endif
#ifdef TARGET_NR_setreuid32
case TARGET_NR_setreuid32:
- return get_errno(setreuid(arg1, arg2));
+ return get_errno(sys_setreuid(arg1, arg2));
#endif
#ifdef TARGET_NR_setregid32
case TARGET_NR_setregid32:
- return get_errno(setregid(arg1, arg2));
+ return get_errno(sys_setregid(arg1, arg2));
#endif
#ifdef TARGET_NR_getgroups32
case TARGET_NR_getgroups32:
@@ -12393,7 +12537,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
case TARGET_NR_fcntl64:
{
int cmd;
- struct flock64 fl;
+ struct flock fl;
from_flock64_fn *copyfrom = copy_from_user_flock64;
to_flock64_fn *copyto = copy_to_user_flock64;
@@ -12628,14 +12772,6 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
#if defined(TARGET_MIPS)
cpu_env->active_tc.CP0_UserLocal = arg1;
return 0;
-#elif defined(TARGET_CRIS)
- if (arg1 & 0xff)
- ret = -TARGET_EINVAL;
- else {
- cpu_env->pregs[PR_PID] = arg1;
- ret = 0;
- }
- return ret;
#elif defined(TARGET_I386) && defined(TARGET_ABI32)
return do_set_thread_area(cpu_env, arg1);
#elif defined(TARGET_M68K)
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index a00b617..5d22759 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -62,7 +62,7 @@
#if (defined(TARGET_I386) && defined(TARGET_ABI32)) \
|| (defined(TARGET_ARM) && defined(TARGET_ABI32)) \
|| (defined(TARGET_SPARC) && defined(TARGET_ABI32)) \
- || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
+ || defined(TARGET_M68K) || defined(TARGET_SH4)
/* 16 bit uid wrappers emulation */
#define USE_UID16
#define target_id uint16_t
@@ -71,7 +71,7 @@
#endif
#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SH4) \
- || defined(TARGET_M68K) || defined(TARGET_CRIS) \
+ || defined(TARGET_M68K) \
|| defined(TARGET_S390X) || defined(TARGET_OPENRISC) \
|| defined(TARGET_RISCV) \
|| defined(TARGET_XTENSA) || defined(TARGET_LOONGARCH64)
@@ -462,7 +462,7 @@ typedef struct {
abi_ulong sig[TARGET_NSIG_WORDS];
} target_sigset_t;
-#ifdef BSWAP_NEEDED
+#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
static inline void tswap_sigset(target_sigset_t *d, const target_sigset_t *s)
{
int i;
@@ -1234,8 +1234,7 @@ struct target_winsize {
#include "target_mman.h"
#if (defined(TARGET_I386) && defined(TARGET_ABI32)) \
- || (defined(TARGET_ARM) && defined(TARGET_ABI32)) \
- || defined(TARGET_CRIS)
+ || (defined(TARGET_ARM) && defined(TARGET_ABI32))
#define TARGET_STAT_HAVE_NSEC
struct target_stat {
abi_ushort st_dev;
@@ -1976,7 +1975,7 @@ struct target_stat64 {
};
#elif defined(TARGET_OPENRISC) \
- || defined(TARGET_RISCV) || defined(TARGET_HEXAGON)
+ || defined(TARGET_RISCV) || defined(TARGET_HEXAGON) || defined(TARGET_LOONGARCH)
/* These are the asm-generic versions of the stat and stat64 structures */
@@ -2086,11 +2085,6 @@ struct target_stat64 {
abi_uint target_st_ctime_nsec;
abi_ullong st_ino;
};
-
-#elif defined(TARGET_LOONGARCH64)
-
-/* LoongArch no newfstatat/fstat syscall. */
-
#else
#error unsupported CPU
#endif
@@ -2628,6 +2622,12 @@ struct target_ucred {
abi_uint gid;
};
+struct target_in_pktinfo {
+ abi_int ipi_ifindex;
+ struct target_in_addr ipi_spec_dst;
+ struct target_in_addr ipi_addr;
+};
+
typedef abi_int target_timer_t;
#define TARGET_SIGEV_MAX_SIZE 64
@@ -2754,4 +2754,29 @@ struct target_sched_param {
abi_int sched_priority;
};
+/* from kernel's include/uapi/linux/openat2.h */
+struct open_how_ver0 {
+ uint64_t flags;
+ uint64_t mode;
+ uint64_t resolve;
+};
+struct target_open_how_ver0 {
+ abi_ullong flags;
+ abi_ullong mode;
+ abi_ullong resolve;
+};
+#ifndef RESOLVE_NO_MAGICLINKS
+#define RESOLVE_NO_MAGICLINKS 0x02
+#endif
+#ifndef RESOLVE_NO_SYMLINKS
+#define RESOLVE_NO_SYMLINKS 0x04
+#endif
+
+#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
+ (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
+ defined(TARGET_M68K) || defined(TARGET_MICROBLAZE) || \
+ defined(TARGET_S390X)
+#define TARGET_ARCH_WANT_SYS_OLD_MMAP
+#endif
+
#endif
diff --git a/linux-user/user-internals.h b/linux-user/user-internals.h
index 5c7f173..691b9a1 100644
--- a/linux-user/user-internals.h
+++ b/linux-user/user-internals.h
@@ -19,8 +19,6 @@
#define LINUX_USER_USER_INTERNALS_H
#include "user/thunk.h"
-#include "exec/exec-all.h"
-#include "exec/tb-flush.h"
#include "qemu/log.h"
extern char *exec_path;
@@ -65,7 +63,6 @@ abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
abi_long arg5, abi_long arg6, abi_long arg7,
abi_long arg8);
extern __thread CPUState *thread_cpu;
-G_NORETURN void cpu_loop(CPUArchState *env);
abi_long get_errno(abi_long ret);
const char *target_strerror(int err);
int get_osversion(void);
@@ -102,7 +99,6 @@ int host_to_target_waitstatus(int status);
/* vm86.c */
void save_v86_state(CPUX86State *env);
void handle_vm86_trap(CPUX86State *env, int trapno);
-void handle_vm86_fault(CPUX86State *env);
int do_vm86(CPUX86State *env, long subfunction, abi_ulong v86_addr);
#elif defined(TARGET_SPARC64)
void sparc64_set_context(CPUSPARCState *env);
diff --git a/linux-user/user-mmap.h b/linux-user/user-mmap.h
index b94bcdc..dfc4477 100644
--- a/linux-user/user-mmap.h
+++ b/linux-user/user-mmap.h
@@ -18,6 +18,8 @@
#ifndef LINUX_USER_USER_MMAP_H
#define LINUX_USER_USER_MMAP_H
+#include "user/mmap.h"
+
/*
* Guest parameters for the ADDR_COMPAT_LAYOUT personality
* (at present this is the only layout supported by QEMU).
@@ -39,24 +41,7 @@
extern abi_ulong task_unmapped_base;
extern abi_ulong elf_et_dyn_base;
-/*
- * mmap_next_start: The base address for the next mmap without hint,
- * increased after each successful map, starting at task_unmapped_base.
- * This is an optimization within QEMU and not part of ADDR_COMPAT_LAYOUT.
- */
-extern abi_ulong mmap_next_start;
-
-int target_mprotect(abi_ulong start, abi_ulong len, int prot);
-abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
- int flags, int fd, off_t offset);
-int target_munmap(abi_ulong start, abi_ulong len);
-abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
- abi_ulong new_size, unsigned long flags,
- abi_ulong new_addr);
abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice);
-abi_ulong mmap_find_vma(abi_ulong, abi_ulong, abi_ulong);
-void mmap_fork_start(void);
-void mmap_fork_end(int child);
abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
abi_ulong shmaddr, int shmflg);
diff --git a/linux-user/vm86.c b/linux-user/vm86.c
index 9f512a2..5091d53 100644
--- a/linux-user/vm86.c
+++ b/linux-user/vm86.c
@@ -47,30 +47,6 @@ static inline void vm_putw(CPUX86State *env, uint32_t segptr,
cpu_stw_data(env, segptr + (reg16 & 0xffff), val);
}
-static inline void vm_putl(CPUX86State *env, uint32_t segptr,
- unsigned int reg16, unsigned int val)
-{
- cpu_stl_data(env, segptr + (reg16 & 0xffff), val);
-}
-
-static inline unsigned int vm_getb(CPUX86State *env,
- uint32_t segptr, unsigned int reg16)
-{
- return cpu_ldub_data(env, segptr + (reg16 & 0xffff));
-}
-
-static inline unsigned int vm_getw(CPUX86State *env,
- uint32_t segptr, unsigned int reg16)
-{
- return cpu_lduw_data(env, segptr + (reg16 & 0xffff));
-}
-
-static inline unsigned int vm_getl(CPUX86State *env,
- uint32_t segptr, unsigned int reg16)
-{
- return cpu_ldl_data(env, segptr + (reg16 & 0xffff));
-}
-
void save_v86_state(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
@@ -131,19 +107,6 @@ static inline void return_to_32bit(CPUX86State *env, int retval)
env->regs[R_EAX] = retval;
}
-static inline int set_IF(CPUX86State *env)
-{
- CPUState *cs = env_cpu(env);
- TaskState *ts = get_task_state(cs);
-
- ts->v86flags |= VIF_MASK;
- if (ts->v86flags & VIP_MASK) {
- return_to_32bit(env, TARGET_VM86_STI);
- return 1;
- }
- return 0;
-}
-
static inline void clear_IF(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
@@ -162,34 +125,6 @@ static inline void clear_AC(CPUX86State *env)
env->eflags &= ~AC_MASK;
}
-static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
-{
- CPUState *cs = env_cpu(env);
- TaskState *ts = get_task_state(cs);
-
- set_flags(ts->v86flags, eflags, ts->v86mask);
- set_flags(env->eflags, eflags, SAFE_MASK);
- if (eflags & IF_MASK)
- return set_IF(env);
- else
- clear_IF(env);
- return 0;
-}
-
-static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
-{
- CPUState *cs = env_cpu(env);
- TaskState *ts = get_task_state(cs);
-
- set_flags(ts->v86flags, flags, ts->v86mask & 0xffff);
- set_flags(env->eflags, flags, SAFE_MASK);
- if (flags & IF_MASK)
- return set_IF(env);
- else
- clear_IF(env);
- return 0;
-}
-
static inline unsigned int get_vflags(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
@@ -255,142 +190,6 @@ void handle_vm86_trap(CPUX86State *env, int trapno)
}
}
-#define CHECK_IF_IN_TRAP() \
- if ((ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) && \
- (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_TFpendig)) \
- newflags |= TF_MASK
-
-#define VM86_FAULT_RETURN \
- if ((ts->vm86plus.vm86plus.flags & TARGET_force_return_for_pic) && \
- (ts->v86flags & (IF_MASK | VIF_MASK))) \
- return_to_32bit(env, TARGET_VM86_PICRETURN); \
- return
-
-void handle_vm86_fault(CPUX86State *env)
-{
- CPUState *cs = env_cpu(env);
- TaskState *ts = get_task_state(cs);
- uint32_t csp, ssp;
- unsigned int ip, sp, newflags, newip, newcs, opcode, intno;
- int data32, pref_done;
-
- csp = env->segs[R_CS].selector << 4;
- ip = env->eip & 0xffff;
-
- ssp = env->segs[R_SS].selector << 4;
- sp = env->regs[R_ESP] & 0xffff;
-
- LOG_VM86("VM86 exception %04x:%08x\n",
- env->segs[R_CS].selector, env->eip);
-
- data32 = 0;
- pref_done = 0;
- do {
- opcode = vm_getb(env, csp, ip);
- ADD16(ip, 1);
- switch (opcode) {
- case 0x66: /* 32-bit data */ data32=1; break;
- case 0x67: /* 32-bit address */ break;
- case 0x2e: /* CS */ break;
- case 0x3e: /* DS */ break;
- case 0x26: /* ES */ break;
- case 0x36: /* SS */ break;
- case 0x65: /* GS */ break;
- case 0x64: /* FS */ break;
- case 0xf2: /* repnz */ break;
- case 0xf3: /* rep */ break;
- default: pref_done = 1;
- }
- } while (!pref_done);
-
- /* VM86 mode */
- switch(opcode) {
- case 0x9c: /* pushf */
- if (data32) {
- vm_putl(env, ssp, sp - 4, get_vflags(env));
- ADD16(env->regs[R_ESP], -4);
- } else {
- vm_putw(env, ssp, sp - 2, get_vflags(env));
- ADD16(env->regs[R_ESP], -2);
- }
- env->eip = ip;
- VM86_FAULT_RETURN;
-
- case 0x9d: /* popf */
- if (data32) {
- newflags = vm_getl(env, ssp, sp);
- ADD16(env->regs[R_ESP], 4);
- } else {
- newflags = vm_getw(env, ssp, sp);
- ADD16(env->regs[R_ESP], 2);
- }
- env->eip = ip;
- CHECK_IF_IN_TRAP();
- if (data32) {
- if (set_vflags_long(newflags, env))
- return;
- } else {
- if (set_vflags_short(newflags, env))
- return;
- }
- VM86_FAULT_RETURN;
-
- case 0xcd: /* int */
- intno = vm_getb(env, csp, ip);
- ADD16(ip, 1);
- env->eip = ip;
- if (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) {
- if ( (ts->vm86plus.vm86plus.vm86dbg_intxxtab[intno >> 3] >>
- (intno &7)) & 1) {
- return_to_32bit(env, TARGET_VM86_INTx + (intno << 8));
- return;
- }
- }
- do_int(env, intno);
- break;
-
- case 0xcf: /* iret */
- if (data32) {
- newip = vm_getl(env, ssp, sp) & 0xffff;
- newcs = vm_getl(env, ssp, sp + 4) & 0xffff;
- newflags = vm_getl(env, ssp, sp + 8);
- ADD16(env->regs[R_ESP], 12);
- } else {
- newip = vm_getw(env, ssp, sp);
- newcs = vm_getw(env, ssp, sp + 2);
- newflags = vm_getw(env, ssp, sp + 4);
- ADD16(env->regs[R_ESP], 6);
- }
- env->eip = newip;
- cpu_x86_load_seg(env, R_CS, newcs);
- CHECK_IF_IN_TRAP();
- if (data32) {
- if (set_vflags_long(newflags, env))
- return;
- } else {
- if (set_vflags_short(newflags, env))
- return;
- }
- VM86_FAULT_RETURN;
-
- case 0xfa: /* cli */
- env->eip = ip;
- clear_IF(env);
- VM86_FAULT_RETURN;
-
- case 0xfb: /* sti */
- env->eip = ip;
- if (set_IF(env))
- return;
- VM86_FAULT_RETURN;
-
- default:
- /* real VM86 GPF exception */
- return_to_32bit(env, TARGET_VM86_UNKNOWN);
- break;
- }
-}
-
int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr)
{
CPUState *cs = env_cpu(env);
diff --git a/linux-user/x86_64/syscall_64.tbl b/linux-user/x86_64/syscall_64.tbl
index ce18119..7093ee2 100644
--- a/linux-user/x86_64/syscall_64.tbl
+++ b/linux-user/x86_64/syscall_64.tbl
@@ -1,8 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
#
# 64-bit system call numbers and entry vectors
#
# The format is:
-# <number> <abi> <name> <entry point>
+# <number> <abi> <name> <entry point> [<compat entry point> [noreturn]]
#
# The __x64_sys_*() stubs are created on-the-fly for sys_*() system calls
#
@@ -68,7 +69,7 @@
57 common fork sys_fork
58 common vfork sys_vfork
59 64 execve sys_execve
-60 common exit sys_exit
+60 common exit sys_exit - noreturn
61 common wait4 sys_wait4
62 common kill sys_kill
63 common uname sys_newuname
@@ -220,7 +221,7 @@
209 64 io_submit sys_io_submit
210 common io_cancel sys_io_cancel
211 64 get_thread_area
-212 common lookup_dcookie sys_lookup_dcookie
+212 common lookup_dcookie
213 common epoll_create sys_epoll_create
214 64 epoll_ctl_old
215 64 epoll_wait_old
@@ -239,7 +240,7 @@
228 common clock_gettime sys_clock_gettime
229 common clock_getres sys_clock_getres
230 common clock_nanosleep sys_clock_nanosleep
-231 common exit_group sys_exit_group
+231 common exit_group sys_exit_group - noreturn
232 common epoll_wait sys_epoll_wait
233 common epoll_ctl sys_epoll_ctl
234 common tgkill sys_tgkill
@@ -343,6 +344,7 @@
332 common statx sys_statx
333 common io_pgetevents sys_io_pgetevents
334 common rseq sys_rseq
+335 common uretprobe sys_uretprobe
# don't use numbers 387 through 423, add new calls after the last
# 'common' entry
424 common pidfd_send_signal sys_pidfd_send_signal
@@ -364,10 +366,26 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
+447 common memfd_secret sys_memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
#
# Due to a historical design error, certain syscalls are numbered differently
@@ -396,7 +414,7 @@
530 x32 set_robust_list compat_sys_set_robust_list
531 x32 get_robust_list compat_sys_get_robust_list
532 x32 vmsplice sys_vmsplice
-533 x32 move_pages compat_sys_move_pages
+533 x32 move_pages sys_move_pages
534 x32 preadv compat_sys_preadv64
535 x32 pwritev compat_sys_pwritev64
536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
diff --git a/linux-user/x86_64/syscallhdr.sh b/linux-user/x86_64/syscallhdr.sh
index 182be52..988256b 100644
--- a/linux-user/x86_64/syscallhdr.sh
+++ b/linux-user/x86_64/syscallhdr.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
in="$1"
out="$2"
diff --git a/linux-user/x86_64/target_signal.h b/linux-user/x86_64/target_signal.h
index 9d97174..0af100c 100644
--- a/linux-user/x86_64/target_signal.h
+++ b/linux-user/x86_64/target_signal.h
@@ -3,6 +3,8 @@
#include "../generic/signal.h"
+#define TARGET_SA_RESTORER 0x04000000
+
/* For x86_64, use of SA_RESTORER is mandatory. */
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0
diff --git a/linux-user/xtensa/cpu_loop.c b/linux-user/xtensa/cpu_loop.c
index d51ce05..c0fcf74 100644
--- a/linux-user/xtensa/cpu_loop.c
+++ b/linux-user/xtensa/cpu_loop.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu.h"
#include "user-internals.h"
-#include "cpu_loop-common.h"
+#include "user/cpu_loop.h"
#include "signal-common.h"
static void xtensa_rfw(CPUXtensaState *env)
@@ -238,7 +238,7 @@ void cpu_loop(CPUXtensaState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
{
int i;
for (i = 0; i < 16; ++i) {
diff --git a/linux-user/xtensa/signal.c b/linux-user/xtensa/signal.c
index 6514b8d..ef8b0c3 100644
--- a/linux-user/xtensa/signal.c
+++ b/linux-user/xtensa/signal.c
@@ -241,7 +241,6 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
give_sigsegv:
force_sigsegv(sig);
- return;
}
static void restore_sigcontext(CPUXtensaState *env,
diff --git a/linux-user/xtensa/syscall.tbl b/linux-user/xtensa/syscall.tbl
index fd2f302..735a89b 100644
--- a/linux-user/xtensa/syscall.tbl
+++ b/linux-user/xtensa/syscall.tbl
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
#
# system call numbers and entry vectors for xtensa
#
@@ -223,7 +223,7 @@
# 205 was old nfsservctl
205 common nfsservctl sys_ni_syscall
206 common _sysctl sys_ni_syscall
-207 common bdflush sys_bdflush
+207 common bdflush sys_ni_syscall
208 common uname sys_newuname
209 common sysinfo sys_sysinfo
210 common init_module sys_init_module
@@ -273,7 +273,7 @@
252 common timer_getoverrun sys_timer_getoverrun
# System
253 common reserved253 sys_ni_syscall
-254 common lookup_dcookie sys_lookup_dcookie
+254 common lookup_dcookie sys_ni_syscall
255 common available255 sys_ni_syscall
256 common add_key sys_add_key
257 common request_key sys_request_key
@@ -413,7 +413,23 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/linux-user/xtensa/syscallhdr.sh b/linux-user/xtensa/syscallhdr.sh
index eef0644..dc787fb 100644
--- a/linux-user/xtensa/syscallhdr.sh
+++ b/linux-user/xtensa/syscallhdr.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
in="$1"
out="$2"
diff --git a/linux-user/xtensa/target_signal.h b/linux-user/xtensa/target_signal.h
index e4b1bea..8a198bf 100644
--- a/linux-user/xtensa/target_signal.h
+++ b/linux-user/xtensa/target_signal.h
@@ -3,6 +3,8 @@
#include "../generic/signal.h"
+#define TARGET_SA_RESTORER 0x04000000
+
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
#endif
diff --git a/meson.build b/meson.build
index a1e5127..dbc97bf 100644
--- a/meson.build
+++ b/meson.build
@@ -1,11 +1,16 @@
-project('qemu', ['c'], meson_version: '>=1.1.0',
+project('qemu', ['c'], meson_version: '>=1.5.0',
default_options: ['warning_level=1', 'c_std=gnu11', 'cpp_std=gnu++11', 'b_colorout=auto',
'b_staticpic=false', 'stdsplit=false', 'optimization=2', 'b_pie=true'],
version: files('VERSION'))
-add_test_setup('quick', exclude_suites: ['slow', 'thorough'], is_default: true)
-add_test_setup('slow', exclude_suites: ['thorough'], env: ['G_TEST_SLOW=1', 'SPEED=slow'])
-add_test_setup('thorough', env: ['G_TEST_SLOW=1', 'SPEED=thorough'])
+meson.add_devenv({ 'MESON_BUILD_ROOT' : meson.project_build_root() })
+
+add_test_setup('quick', exclude_suites: ['slow', 'thorough'], is_default: true,
+ env: ['RUST_BACKTRACE=1'])
+add_test_setup('slow', exclude_suites: ['thorough'],
+ env: ['G_TEST_SLOW=1', 'SPEED=slow', 'RUST_BACKTRACE=1'])
+add_test_setup('thorough',
+ env: ['G_TEST_SLOW=1', 'SPEED=thorough', 'RUST_BACKTRACE=1'])
meson.add_postconf_script(find_program('scripts/symlink-install-tree.py'))
@@ -15,6 +20,7 @@ meson.add_postconf_script(find_program('scripts/symlink-install-tree.py'))
not_found = dependency('', required: false)
keyval = import('keyval')
+rust = import('rust')
ss = import('sourceset')
fs = import('fs')
@@ -44,14 +50,25 @@ genh = []
qapi_trace_events = []
bsd_oses = ['gnu/kfreebsd', 'freebsd', 'netbsd', 'openbsd', 'dragonfly', 'darwin']
-supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux']
+supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux', 'emscripten']
supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv32', 'riscv64', 'x86', 'x86_64',
- 'arm', 'aarch64', 'loongarch64', 'mips', 'mips64', 'sparc64']
+ 'arm', 'aarch64', 'loongarch64', 'mips', 'mips64', 'sparc64', 'wasm32']
cpu = host_machine.cpu_family()
target_dirs = config_host['TARGET_DIRS'].split()
+# type of binaries to build
+have_linux_user = false
+have_bsd_user = false
+have_system = false
+foreach target : target_dirs
+ have_linux_user = have_linux_user or target.endswith('linux-user')
+ have_bsd_user = have_bsd_user or target.endswith('bsd-user')
+ have_system = have_system or target.endswith('-softmmu')
+endforeach
+have_user = have_linux_user or have_bsd_user
+
############
# Programs #
############
@@ -71,6 +88,66 @@ if host_os == 'darwin' and \
objc = meson.get_compiler('objc')
endif
+have_rust = add_languages('rust', native: false,
+ required: get_option('rust').disable_auto_if(not have_system))
+have_rust = have_rust and add_languages('rust', native: true,
+ required: get_option('rust').disable_auto_if(not have_system))
+if have_rust
+ rustc = meson.get_compiler('rust')
+ if rustc.version().version_compare('<1.77.0')
+ if get_option('rust').enabled()
+ error('rustc version ' + rustc.version() + ' is unsupported. Please upgrade to at least 1.77.0')
+ else
+ warning('rustc version ' + rustc.version() + ' is unsupported, disabling Rust compilation.')
+ message('Please upgrade to at least 1.77.0 to use Rust.')
+ have_rust = false
+ endif
+ endif
+endif
+
+if have_rust
+ rustdoc = find_program('rustdoc', required: get_option('rust'))
+ bindgen = find_program('bindgen', required: get_option('rust'))
+ if not bindgen.found() or bindgen.version().version_compare('<0.60.0')
+ if get_option('rust').enabled()
+ error('bindgen version ' + bindgen.version() + ' is unsupported. You can install a new version with "cargo install bindgen-cli"')
+ else
+ if bindgen.found()
+ warning('bindgen version ' + bindgen.version() + ' is unsupported, disabling Rust compilation.')
+ else
+ warning('bindgen not found, disabling Rust compilation.')
+ endif
+ message('To use Rust you can install a new version with "cargo install bindgen-cli"')
+ have_rust = false
+ endif
+ endif
+endif
+
+if have_rust
+ rustc_args = [find_program('scripts/rust/rustc_args.py'),
+ '--rustc-version', rustc.version(),
+ '--workspace', meson.project_source_root() / 'rust']
+ if get_option('strict_rust_lints')
+ rustc_args += ['--strict-lints']
+ endif
+
+ rustfmt = find_program('rustfmt', required: false)
+
+ rustc_lint_args = run_command(rustc_args, '--lints',
+ capture: true, check: true).stdout().strip().splitlines()
+
+ # Apart from procedural macros, our Rust executables will often link
+ # with C code, so include all the libraries that C code needs. This
+ # is safe; https://github.com/rust-lang/rust/pull/54675 says that
+ # passing -nodefaultlibs to the linker "was more ideological to
+ # start with than anything".
+ add_project_arguments(rustc_lint_args +
+ ['--cfg', 'MESON', '-C', 'default-linker-libraries'],
+ native: false, language: 'rust')
+ add_project_arguments(rustc_lint_args + ['--cfg', 'MESON'],
+ native: true, language: 'rust')
+endif
+
dtrace = not_found
stap = not_found
if 'dtrace' in get_option('trace_backends')
@@ -93,7 +170,7 @@ else
iasl = find_program(get_option('iasl'), required: true)
endif
-edk2_targets = [ 'arm-softmmu', 'aarch64-softmmu', 'i386-softmmu', 'x86_64-softmmu', 'riscv64-softmmu' ]
+edk2_targets = [ 'arm-softmmu', 'aarch64-softmmu', 'i386-softmmu', 'x86_64-softmmu', 'riscv64-softmmu', 'loongarch64-softmmu' ]
unpack_edk2_blobs = false
foreach target : edk2_targets
if target in target_dirs
@@ -171,16 +248,7 @@ have_vhost_net_vdpa = have_vhost_vdpa and get_option('vhost_net').allowed()
have_vhost_net_kernel = have_vhost_kernel and get_option('vhost_net').allowed()
have_vhost_net = have_vhost_net_kernel or have_vhost_net_user or have_vhost_net_vdpa
-# type of binaries to build
-have_linux_user = false
-have_bsd_user = false
-have_system = false
-foreach target : target_dirs
- have_linux_user = have_linux_user or target.endswith('linux-user')
- have_bsd_user = have_bsd_user or target.endswith('bsd-user')
- have_system = have_system or target.endswith('-softmmu')
-endforeach
-have_user = have_linux_user or have_bsd_user
+have_tcg = get_option('tcg').allowed() and (have_system or have_user)
have_tools = get_option('tools') \
.disable_auto_if(not have_system) \
@@ -215,30 +283,41 @@ else
host_arch = cpu
endif
-if cpu in ['x86', 'x86_64']
+if cpu == 'x86'
+ kvm_targets = ['i386-softmmu']
+elif cpu == 'x86_64'
kvm_targets = ['i386-softmmu', 'x86_64-softmmu']
elif cpu == 'aarch64'
kvm_targets = ['aarch64-softmmu']
elif cpu == 's390x'
kvm_targets = ['s390x-softmmu']
-elif cpu in ['ppc', 'ppc64']
+elif cpu == 'ppc'
+ kvm_targets = ['ppc-softmmu']
+elif cpu == 'ppc64'
kvm_targets = ['ppc-softmmu', 'ppc64-softmmu']
-elif cpu in ['mips', 'mips64']
+elif cpu == 'mips'
+ kvm_targets = ['mips-softmmu', 'mipsel-softmmu']
+elif cpu == 'mips64'
kvm_targets = ['mips-softmmu', 'mipsel-softmmu', 'mips64-softmmu', 'mips64el-softmmu']
-elif cpu in ['riscv32']
+elif cpu == 'riscv32'
kvm_targets = ['riscv32-softmmu']
-elif cpu in ['riscv64']
+elif cpu == 'riscv64'
kvm_targets = ['riscv64-softmmu']
-elif cpu in ['loongarch64']
+elif cpu == 'loongarch64'
kvm_targets = ['loongarch64-softmmu']
else
kvm_targets = []
endif
accelerator_targets = { 'CONFIG_KVM': kvm_targets }
-if cpu in ['x86', 'x86_64']
+if cpu == 'x86'
+ xen_targets = ['i386-softmmu']
+elif cpu == 'x86_64'
xen_targets = ['i386-softmmu', 'x86_64-softmmu']
-elif cpu in ['arm', 'aarch64']
+elif cpu == 'arm'
+ # i386 emulator provides xenpv machine type for multiple architectures
+ xen_targets = ['i386-softmmu']
+elif cpu == 'aarch64'
# i386 emulator provides xenpv machine type for multiple architectures
xen_targets = ['i386-softmmu', 'x86_64-softmmu', 'aarch64-softmmu']
else
@@ -246,13 +325,11 @@ else
endif
accelerator_targets += { 'CONFIG_XEN': xen_targets }
-if cpu in ['aarch64']
+if cpu == 'aarch64'
accelerator_targets += {
'CONFIG_HVF': ['aarch64-softmmu']
}
-endif
-
-if cpu in ['x86', 'x86_64']
+elif cpu == 'x86_64'
accelerator_targets += {
'CONFIG_HVF': ['x86_64-softmmu'],
'CONFIG_NVMM': ['i386-softmmu', 'x86_64-softmmu'],
@@ -260,12 +337,6 @@ if cpu in ['x86', 'x86_64']
}
endif
-modular_tcg = []
-# Darwin does not support references to thread-local variables in modules
-if host_os != 'darwin'
- modular_tcg = ['i386-softmmu', 'x86_64-softmmu']
-endif
-
##################
# Compiler flags #
##################
@@ -276,8 +347,8 @@ foreach lang : all_languages
# ok
elif compiler.get_id() == 'clang' and compiler.compiles('''
#ifdef __apple_build_version__
- # if __clang_major__ < 12 || (__clang_major__ == 12 && __clang_minor__ < 0)
- # error You need at least XCode Clang v12.0 to compile QEMU
+ # if __clang_major__ < 15 || (__clang_major__ == 15 && __clang_minor__ < 0)
+ # error You need at least XCode Clang v15.0 to compile QEMU
# endif
#else
# if __clang_major__ < 10 || (__clang_major__ == 10 && __clang_minor__ < 0)
@@ -285,8 +356,10 @@ foreach lang : all_languages
# endif
#endif''')
# ok
+ elif compiler.get_id() == 'emscripten'
+ # ok
else
- error('You either need GCC v7.4 or Clang v10.0 (or XCode Clang v12.0) to compile QEMU')
+ error('You either need GCC v7.4 or Clang v10.0 (or XCode Clang v15.0) to compile QEMU')
endif
endforeach
@@ -315,8 +388,17 @@ elif host_os == 'sunos'
qemu_common_flags += '-D__EXTENSIONS__'
elif host_os == 'haiku'
qemu_common_flags += ['-DB_USE_POSITIVE_POSIX_ERRORS', '-D_BSD_SOURCE', '-fPIC']
+elif host_os == 'windows'
+ # plugins use delaylib, and clang needs to be used with lld to make it work.
+ if compiler.get_id() == 'clang' and compiler.get_linker_id() != 'ld.lld'
+ error('On windows, you need to use lld with clang - use msys2 clang64/clangarm64 env')
+ endif
endif
+# Choose instruction set (currently x86-only)
+
+qemu_isa_flags = []
+
# __sync_fetch_and_and requires at least -march=i486. Many toolchains
# use i686 as default anyway, but for those that don't, an explicit
# specification is necessary
@@ -333,7 +415,7 @@ if host_arch == 'i386' and not cc.links('''
sfaa(&val);
return val;
}''')
- qemu_common_flags = ['-march=i486'] + qemu_common_flags
+ qemu_isa_flags += ['-march=i486']
endif
# Pick x86-64 baseline version
@@ -349,29 +431,31 @@ if host_arch in ['i386', 'x86_64']
else
# present on basically all processors but technically not part of
# x86-64-v1, so only include -mneeded for x86-64 version 2 and above
- qemu_common_flags = ['-mcx16'] + qemu_common_flags
+ qemu_isa_flags += ['-mcx16']
endif
endif
if get_option('x86_version') >= '2'
- qemu_common_flags = ['-mpopcnt'] + qemu_common_flags
- qemu_common_flags = cc.get_supported_arguments('-mneeded') + qemu_common_flags
+ qemu_isa_flags += ['-mpopcnt']
+ qemu_isa_flags += cc.get_supported_arguments('-mneeded')
endif
if get_option('x86_version') >= '3'
- qemu_common_flags = ['-mmovbe', '-mabm', '-mbmi1', '-mbmi2', '-mfma', '-mf16c'] + qemu_common_flags
+ qemu_isa_flags += ['-mmovbe', '-mabm', '-mbmi', '-mbmi2', '-mfma', '-mf16c']
endif
# add required vector instruction set (each level implies those below)
if get_option('x86_version') == '1'
- qemu_common_flags = ['-msse2'] + qemu_common_flags
+ qemu_isa_flags += ['-msse2']
elif get_option('x86_version') == '2'
- qemu_common_flags = ['-msse4.2'] + qemu_common_flags
+ qemu_isa_flags += ['-msse4.2']
elif get_option('x86_version') == '3'
- qemu_common_flags = ['-mavx2'] + qemu_common_flags
+ qemu_isa_flags += ['-mavx2']
elif get_option('x86_version') == '4'
- qemu_common_flags = ['-mavx512f', '-mavx512bw', '-mavx512cd', '-mavx512dq', '-mavx512vl'] + qemu_common_flags
+ qemu_isa_flags += ['-mavx512f', '-mavx512bw', '-mavx512cd', '-mavx512dq', '-mavx512vl']
endif
endif
+qemu_common_flags = qemu_isa_flags + qemu_common_flags
+
if get_option('prefer_static')
qemu_ldflags += get_option('b_pie') ? '-static-pie' : '-static'
endif
@@ -391,7 +475,10 @@ endif
# instead, we can't add -no-pie because it overrides -shared: the linker then
# tries to build an executable instead of a shared library and fails. So
# don't add -no-pie anywhere and cross fingers. :(
-if not get_option('b_pie')
+#
+# Emscripten doesn't support -no-pie but meson can't catch the compiler
+# warning. So explicitly omit the flag for Emscripten.
+if not get_option('b_pie') and host_os != 'emscripten'
qemu_common_flags += cc.get_supported_arguments('-fno-pie', '-no-pie')
endif
@@ -435,6 +522,8 @@ ucontext_probe = '''
supported_backends = []
if host_os == 'windows'
supported_backends += ['windows']
+elif host_os == 'emscripten'
+ supported_backends += ['wasm']
else
if host_os != 'darwin' and cc.links(ucontext_probe)
supported_backends += ['ucontext']
@@ -474,24 +563,38 @@ if get_option('safe_stack') and coroutine_backend != 'ucontext'
error('SafeStack is only supported with the ucontext coroutine backend')
endif
-if get_option('sanitizers')
+if get_option('asan')
if cc.has_argument('-fsanitize=address')
qemu_cflags = ['-fsanitize=address'] + qemu_cflags
qemu_ldflags = ['-fsanitize=address'] + qemu_ldflags
+ else
+ error('Your compiler does not support -fsanitize=address')
endif
+endif
- # Detect static linking issue with ubsan - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84285
+if get_option('ubsan')
+ # Detect static linking issue with ubsan:
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84285
if cc.links('int main(int argc, char **argv) { return argc + 1; }',
args: [qemu_ldflags, '-fsanitize=undefined'])
- qemu_cflags = ['-fsanitize=undefined'] + qemu_cflags
- qemu_ldflags = ['-fsanitize=undefined'] + qemu_ldflags
+ qemu_cflags += ['-fsanitize=undefined']
+ qemu_ldflags += ['-fsanitize=undefined']
+
+ # Suppress undefined behaviour from function call to mismatched type.
+ # In addition, tcg prologue does not emit function type prefix
+ # required by function call sanitizer.
+ if cc.has_argument('-fno-sanitize=function')
+ qemu_cflags += ['-fno-sanitize=function']
+ endif
+ else
+ error('Your compiler does not support -fsanitize=undefined')
endif
endif
# Thread sanitizer is, for now, much noisier than the other sanitizers;
# keep it separate until that is not the case.
if get_option('tsan')
- if get_option('sanitizers')
+ if get_option('asan') or get_option('ubsan')
error('TSAN is not supported with other sanitizers')
endif
if not cc.has_function('__tsan_create_fiber',
@@ -499,7 +602,15 @@ if get_option('tsan')
prefix: '#include <sanitizer/tsan_interface.h>')
error('Cannot enable TSAN due to missing fiber annotation interface')
endif
- qemu_cflags = ['-fsanitize=thread'] + qemu_cflags
+ tsan_warn_suppress = []
+ # gcc (>=11) will report constructions not supported by tsan:
+ # "error: ā€˜atomic_thread_fence’ is not supported with ā€˜-fsanitize=thread’"
+ # https://gcc.gnu.org/gcc-11/changes.html
+ # However, clang does not support this warning and this triggers an error.
+ if cc.has_argument('-Wno-tsan')
+ tsan_warn_suppress = ['-Wno-tsan']
+ endif
+ qemu_cflags = ['-fsanitize=thread'] + tsan_warn_suppress + qemu_cflags
qemu_ldflags = ['-fsanitize=thread'] + qemu_ldflags
endif
@@ -649,7 +760,19 @@ warn_flags = [
]
if host_os != 'darwin'
- warn_flags += ['-Wthread-safety']
+ tsa_has_cleanup = cc.compiles('''
+ struct __attribute__((capability("mutex"))) mutex {};
+ void lock(struct mutex *m) __attribute__((acquire_capability(m)));
+ void unlock(struct mutex *m) __attribute__((release_capability(m)));
+
+ void test(void) {
+ struct mutex __attribute__((cleanup(unlock))) m;
+ lock(&m);
+ }
+ ''', args: ['-Wthread-safety', '-Werror'])
+ if tsa_has_cleanup
+ warn_flags += ['-Wthread-safety']
+ endif
endif
# Set up C++ compiler flags
@@ -710,17 +833,22 @@ socket = []
version_res = []
coref = []
iokit = []
+pvg = not_found
emulator_link_args = []
midl = not_found
widl = not_found
pathcch = not_found
+synchronization = not_found
host_dsosuf = '.so'
if host_os == 'windows'
midl = find_program('midl', required: false)
widl = find_program('widl', required: false)
- pathcch = cc.find_library('pathcch')
- socket = cc.find_library('ws2_32')
- winmm = cc.find_library('winmm')
+
+ # MinGW uses lowercase for library names
+ pathcch = cc.find_library('pathcch', required: true)
+ synchronization = cc.find_library('synchronization', required: true)
+ socket = cc.find_library('ws2_32', required: true)
+ winmm = cc.find_library('winmm', required: true)
win = import('windows')
version_res = win.compile_resources('version.rc',
@@ -731,6 +859,8 @@ elif host_os == 'darwin'
coref = dependency('appleframeworks', modules: 'CoreFoundation')
iokit = dependency('appleframeworks', modules: 'IOKit', required: false)
host_dsosuf = '.dylib'
+ pvg = dependency('appleframeworks', modules: ['ParavirtualizedGraphics', 'Metal'],
+ required: get_option('pvg'))
elif host_os == 'sunos'
socket = [cc.find_library('socket'),
cc.find_library('nsl'),
@@ -740,7 +870,7 @@ elif host_os == 'haiku'
cc.find_library('network'),
cc.find_library('bsd')]
elif host_os == 'openbsd'
- if get_option('tcg').allowed() and target_dirs.length() > 0
+ if have_tcg
# Disable OpenBSD W^X if available
emulator_link_args = cc.get_supported_link_arguments('-Wl,-z,wxneeded')
endif
@@ -781,11 +911,15 @@ if host_os == 'netbsd'
endif
tcg_arch = host_arch
-if get_option('tcg').allowed()
+if have_tcg
if host_arch == 'unknown'
if not get_option('tcg_interpreter')
error('Unsupported CPU @0@, try --enable-tcg-interpreter'.format(cpu))
endif
+ elif host_arch == 'wasm32'
+ if not get_option('tcg_interpreter')
+ error('WebAssembly host requires --enable-tcg-interpreter')
+ endif
elif get_option('tcg_interpreter')
warning('Use of the TCG interpreter is not recommended on this host')
warning('architecture. There is a native TCG execution backend available')
@@ -904,7 +1038,9 @@ have_xen_pci_passthrough = get_option('xen_pci_passthrough') \
################
# When bumping glib minimum version, please check also whether to increase
-# the _WIN32_WINNT setting in osdep.h according to the value from glib
+# the _WIN32_WINNT setting in osdep.h according to the value from glib.
+# You should also check if any of the glib.version() checks
+# below can also be removed.
glib_req_ver = '>=2.66.0'
glib_pc = dependency('glib-2.0', version: glib_req_ver, required: true,
method: 'pkg-config')
@@ -954,6 +1090,9 @@ glib = declare_dependency(dependencies: [glib_pc, gmodule],
# TODO: remove this check and the corresponding workaround (qtree) when
# the minimum supported glib is >= 2.75.3
glib_has_gslice = glib.version().version_compare('<2.75.3')
+# Check whether glib has the aligned_alloc family of functions.
+# <https://docs.gtk.org/glib/func.aligned_alloc.html>
+glib_has_aligned_alloc = glib.version().version_compare('>=2.72.0')
# override glib dep to include the above refinements
meson.override_dependency('glib-2.0', glib)
@@ -981,7 +1120,7 @@ if not get_option('gio').auto() or have_system
gio = not_found
endif
if gio.found()
- gdbus_codegen = find_program(gio.get_variable('gdbus_codegen'),
+ gdbus_codegen = find_program('gdbus-codegen',
required: get_option('gio'))
gio_unix = dependency('gio-unix-2.0', required: get_option('gio'),
method: 'pkg-config')
@@ -1033,7 +1172,7 @@ endif
libnfs = not_found
if not get_option('libnfs').auto() or have_block
- libnfs = dependency('libnfs', version: '>=1.9.3',
+ libnfs = dependency('libnfs', version: ['>=1.9.3', '<6.0.0'],
required: get_option('libnfs'),
method: 'pkg-config')
endif
@@ -1245,6 +1384,14 @@ if not get_option('uadk').auto() or have_system
uadk = declare_dependency(dependencies: [libwd, libwd_comp])
endif
endif
+
+qatzip = not_found
+if not get_option('qatzip').auto() or have_system
+ qatzip = dependency('qatzip', version: '>=1.1.2',
+ required: get_option('qatzip'),
+ method: 'pkg-config')
+endif
+
virgl = not_found
have_vhost_user_gpu = have_tools and host_os == 'linux' and pixman.found()
@@ -1335,7 +1482,7 @@ iconv = not_found
curses = not_found
if have_system and get_option('curses').allowed()
curses_test = '''
- #if defined(__APPLE__) || defined(__OpenBSD__)
+ #ifdef __APPLE__
#define _XOPEN_SOURCE_EXTENDED 1
#endif
#include <locale.h>
@@ -1644,6 +1791,12 @@ if (have_system or have_tools) and (virgl.found() or opengl.found())
endif
have_vhost_user_gpu = have_vhost_user_gpu and virgl.found() and opengl.found() and gbm.found()
+libcbor = not_found
+if not get_option('libcbor').auto() or have_system
+ libcbor = dependency('libcbor', version: '>=0.7.0',
+ required: get_option('libcbor'))
+endif
+
gnutls = not_found
gnutls_crypto = not_found
if get_option('gnutls').enabled() or (get_option('gnutls').auto() and have_system)
@@ -1682,6 +1835,7 @@ gcrypt = not_found
nettle = not_found
hogweed = not_found
crypto_sm4 = not_found
+crypto_sm3 = not_found
xts = 'none'
if get_option('nettle').enabled() and get_option('gcrypt').enabled()
@@ -1696,7 +1850,6 @@ endif
if not gnutls_crypto.found()
if (not get_option('gcrypt').auto() or have_system) and not get_option('nettle').enabled()
gcrypt = dependency('libgcrypt', version: '>=1.8',
- method: 'config-tool',
required: get_option('gcrypt'))
# Debian has removed -lgpg-error from libgcrypt-config
# as it "spreads unnecessary dependencies" which in
@@ -1718,6 +1871,17 @@ if not gnutls_crypto.found()
}''', dependencies: gcrypt)
crypto_sm4 = not_found
endif
+ crypto_sm3 = gcrypt
+ # SM3 ALG is available in libgcrypt >= 1.9
+ if gcrypt.found() and not cc.links('''
+ #include <gcrypt.h>
+ int main(void) {
+ gcry_md_hd_t handler;
+ gcry_md_open(&handler, GCRY_MD_SM3, 0);
+ return 0;
+ }''', dependencies: gcrypt)
+ crypto_sm3 = not_found
+ endif
endif
if (not get_option('nettle').auto() or have_system) and not gcrypt.found()
nettle = dependency('nettle', version: '>=3.4',
@@ -1738,6 +1902,31 @@ if not gnutls_crypto.found()
}''', dependencies: nettle)
crypto_sm4 = not_found
endif
+ crypto_sm3 = nettle
+ # SM3 ALG is available in nettle >= 3.8
+ if nettle.found() and not cc.links('''
+ #include <nettle/sm3.h>
+ #include <nettle/hmac.h>
+ int main(void) {
+ struct sm3_ctx ctx;
+ struct hmac_sm3_ctx hmac_ctx;
+ unsigned char data[64] = {0};
+ unsigned char output[32];
+
+ // SM3 hash function test
+ sm3_init(&ctx);
+ sm3_update(&ctx, 64, data);
+ sm3_digest(&ctx, 32, data);
+
+ // HMAC-SM3 test
+ hmac_sm3_set_key(&hmac_ctx, 32, data);
+ hmac_sm3_update(&hmac_ctx, 64, data);
+ hmac_sm3_digest(&hmac_ctx, 32, output);
+
+ return 0;
+ }''', dependencies: nettle)
+ crypto_sm3 = not_found
+ endif
endif
endif
@@ -1979,6 +2168,7 @@ endif
tasn1 = not_found
if gnutls.found()
tasn1 = dependency('libtasn1',
+ required: false,
method: 'pkg-config')
endif
keyutils = not_found
@@ -2013,19 +2203,23 @@ if not has_malloc_trim and get_option('malloc_trim').enabled()
endif
endif
-gnu_source_prefix = '''
+osdep_prefix = '''
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
-'''
-
-# Check whether the glibc provides STATX_BASIC_STATS
-has_statx = cc.has_header_symbol('sys/stat.h', 'STATX_BASIC_STATS', prefix: gnu_source_prefix)
-
-# Check whether statx() provides mount ID information
+ #include <stddef.h>
+ #include <sys/types.h>
-has_statx_mnt_id = cc.has_header_symbol('sys/stat.h', 'STATX_MNT_ID', prefix: gnu_source_prefix)
+ #include <string.h>
+ #include <limits.h>
+ /* Put unistd.h before time.h as that triggers localtime_r/gmtime_r
+ * function availability on recentish Mingw-w64 platforms. */
+ #include <unistd.h>
+ #include <time.h>
+ #include <errno.h>
+ #include <fcntl.h>
+'''
have_vhost_user_blk_server = get_option('vhost_user_blk_server') \
.require(host_os == 'linux',
@@ -2100,8 +2294,14 @@ endif
# libxdp
libxdp = not_found
if not get_option('af_xdp').auto() or have_system
- libxdp = dependency('libxdp', required: get_option('af_xdp'),
- version: '>=1.4.0', method: 'pkg-config')
+ if libbpf.found()
+ libxdp = dependency('libxdp', required: get_option('af_xdp'),
+ version: '>=1.4.0', method: 'pkg-config')
+ else
+ if get_option('af_xdp').enabled()
+ error('libxdp requested, but libbpf is not available')
+ endif
+ endif
endif
# libdw
@@ -2119,6 +2319,7 @@ endif
config_host_data = configuration_data()
+config_host_data.set('CONFIG_HAVE_RUST', have_rust)
audio_drivers_selected = []
if have_system
audio_drivers_available = {
@@ -2180,12 +2381,18 @@ have_virtfs = get_option('virtfs') \
.disable_auto_if(not have_tools and not have_system) \
.allowed()
-have_virtfs_proxy_helper = get_option('virtfs_proxy_helper') \
- .require(host_os != 'darwin', error_message: 'the virtfs proxy helper is incompatible with macOS') \
- .require(have_virtfs, error_message: 'the virtfs proxy helper requires that virtfs is enabled') \
- .disable_auto_if(not have_tools) \
- .require(libcap_ng.found(), error_message: 'the virtfs proxy helper requires libcap-ng') \
- .allowed()
+qga_fsfreeze = false
+qga_fstrim = false
+if host_os == 'linux'
+ if cc.has_header_symbol('linux/fs.h', 'FIFREEZE')
+ qga_fsfreeze = true
+ endif
+ if cc.has_header_symbol('linux/fs.h', 'FITRIM')
+ qga_fstrim = true
+ endif
+elif host_os == 'freebsd' and cc.has_header_symbol('ufs/ffs/fs.h', 'UFSSUSPEND')
+ qga_fsfreeze = true
+endif
if get_option('block_drv_ro_whitelist') == ''
config_host_data.set('CONFIG_BDRV_RO_WHITELIST', '')
@@ -2263,6 +2470,7 @@ config_host_data.set('CONFIG_ATTR', libattr.found())
config_host_data.set('CONFIG_BDRV_WHITELIST_TOOLS', get_option('block_drv_whitelist_in_tools'))
config_host_data.set('CONFIG_BRLAPI', brlapi.found())
config_host_data.set('CONFIG_BSD', host_os in bsd_oses)
+config_host_data.set('CONFIG_FREEBSD', host_os == 'freebsd')
config_host_data.set('CONFIG_CAPSTONE', capstone.found())
config_host_data.set('CONFIG_COCOA', cocoa.found())
config_host_data.set('CONFIG_DARWIN', host_os == 'darwin')
@@ -2279,6 +2487,8 @@ config_host_data.set('CONFIG_BLKIO', blkio.found())
if blkio.found()
config_host_data.set('CONFIG_BLKIO_VHOST_VDPA_FD',
blkio.version().version_compare('>=1.3.0'))
+ config_host_data.set('CONFIG_BLKIO_WRITE_ZEROS_FUA',
+ blkio.version().version_compare('>=1.4.0'))
endif
config_host_data.set('CONFIG_CURL', curl.found())
config_host_data.set('CONFIG_CURSES', curses.found())
@@ -2331,7 +2541,7 @@ config_host_data.set('CONFIG_PIXMAN', pixman.found())
config_host_data.set('CONFIG_SLIRP', slirp.found())
config_host_data.set('CONFIG_SNAPPY', snappy.found())
config_host_data.set('CONFIG_SOLARIS', host_os == 'sunos')
-if get_option('tcg').allowed()
+if have_tcg
config_host_data.set('CONFIG_TCG', 1)
config_host_data.set('CONFIG_TCG_INTERPRETER', tcg_arch == 'tci')
endif
@@ -2355,10 +2565,7 @@ config_host_data.set('CONFIG_VNC', vnc.found())
config_host_data.set('CONFIG_VNC_JPEG', jpeg.found())
config_host_data.set('CONFIG_VNC_SASL', sasl.found())
if virgl.found()
- config_host_data.set('HAVE_VIRGL_D3D_INFO_EXT',
- cc.has_member('struct virgl_renderer_resource_info_ext', 'd3d_tex2d',
- prefix: '#include <virglrenderer.h>',
- dependencies: virgl))
+ config_host_data.set('VIRGL_VERSION_MAJOR', virgl.version().split('.')[0])
endif
config_host_data.set('CONFIG_VIRTFS', have_virtfs)
config_host_data.set('CONFIG_VTE', vte.found())
@@ -2371,14 +2578,14 @@ config_host_data.set('CONFIG_TASN1', tasn1.found())
config_host_data.set('CONFIG_GCRYPT', gcrypt.found())
config_host_data.set('CONFIG_NETTLE', nettle.found())
config_host_data.set('CONFIG_CRYPTO_SM4', crypto_sm4.found())
+config_host_data.set('CONFIG_CRYPTO_SM3', crypto_sm3.found())
config_host_data.set('CONFIG_HOGWEED', hogweed.found())
config_host_data.set('CONFIG_QEMU_PRIVATE_XTS', xts == 'private')
config_host_data.set('CONFIG_MALLOC_TRIM', has_malloc_trim)
-config_host_data.set('CONFIG_STATX', has_statx)
-config_host_data.set('CONFIG_STATX_MNT_ID', has_statx_mnt_id)
config_host_data.set('CONFIG_ZSTD', zstd.found())
config_host_data.set('CONFIG_QPL', qpl.found())
config_host_data.set('CONFIG_UADK', uadk.found())
+config_host_data.set('CONFIG_QATZIP', qatzip.found())
config_host_data.set('CONFIG_FUSE', fuse.found())
config_host_data.set('CONFIG_FUSE_LSEEK', fuse_lseek.found())
config_host_data.set('CONFIG_SPICE_PROTOCOL', spice_protocol.found())
@@ -2423,13 +2630,26 @@ config_host_data.set('CONFIG_DEBUG_TCG', get_option('debug_tcg'))
config_host_data.set('CONFIG_DEBUG_REMAP', get_option('debug_remap'))
config_host_data.set('CONFIG_QOM_CAST_DEBUG', get_option('qom_cast_debug'))
config_host_data.set('CONFIG_REPLICATION', get_option('replication').allowed())
+config_host_data.set('CONFIG_FSFREEZE', qga_fsfreeze)
+config_host_data.set('CONFIG_FSTRIM', qga_fstrim)
# has_header
config_host_data.set('CONFIG_EPOLL', cc.has_header('sys/epoll.h'))
config_host_data.set('CONFIG_LINUX_MAGIC_H', cc.has_header('linux/magic.h'))
-config_host_data.set('CONFIG_VALGRIND_H', cc.has_header('valgrind/valgrind.h'))
+valgrind = false
+if get_option('valgrind').allowed()
+ if cc.has_header('valgrind/valgrind.h')
+ valgrind = true
+ else
+ if get_option('valgrind').enabled()
+ error('valgrind requested but valgrind.h not found')
+ endif
+ endif
+endif
+config_host_data.set('CONFIG_VALGRIND_H', valgrind)
config_host_data.set('HAVE_BTRFS_H', cc.has_header('linux/btrfs.h'))
config_host_data.set('HAVE_DRM_H', cc.has_header('libdrm/drm.h'))
+config_host_data.set('HAVE_OPENAT2_H', cc.has_header('linux/openat2.h'))
config_host_data.set('HAVE_PTY_H', cc.has_header('pty.h'))
config_host_data.set('HAVE_SYS_DISK_H', cc.has_header('sys/disk.h'))
config_host_data.set('HAVE_SYS_IOCCOM_H', cc.has_header('sys/ioccom.h'))
@@ -2445,7 +2665,6 @@ config_host_data.set('CONFIG_CLOCK_ADJTIME', cc.has_function('clock_adjtime'))
config_host_data.set('CONFIG_DUP3', cc.has_function('dup3'))
config_host_data.set('CONFIG_FALLOCATE', cc.has_function('fallocate'))
config_host_data.set('CONFIG_POSIX_FALLOCATE', cc.has_function('posix_fallocate'))
-config_host_data.set('CONFIG_GETCPU', cc.has_function('getcpu', prefix: gnu_source_prefix))
config_host_data.set('CONFIG_SCHED_GETCPU', cc.has_function('sched_getcpu', prefix: '#include <sched.h>'))
# Note that we need to specify prefix: here to avoid incorrectly
# thinking that Windows has posix_memalign()
@@ -2461,11 +2680,13 @@ config_host_data.set('CONFIG_SETNS', cc.has_function('setns') and cc.has_functio
config_host_data.set('CONFIG_SYNCFS', cc.has_function('syncfs'))
config_host_data.set('CONFIG_SYNC_FILE_RANGE', cc.has_function('sync_file_range'))
config_host_data.set('CONFIG_TIMERFD', cc.has_function('timerfd_create'))
+config_host_data.set('CONFIG_GETLOADAVG', cc.has_function('getloadavg'))
config_host_data.set('HAVE_COPY_FILE_RANGE', cc.has_function('copy_file_range'))
config_host_data.set('HAVE_GETIFADDRS', cc.has_function('getifaddrs'))
config_host_data.set('HAVE_GLIB_WITH_SLICE_ALLOCATOR', glib_has_gslice)
+config_host_data.set('HAVE_GLIB_WITH_ALIGNED_ALLOC', glib_has_aligned_alloc)
config_host_data.set('HAVE_OPENPTY', cc.has_function('openpty', dependencies: util))
-config_host_data.set('HAVE_STRCHRNUL', cc.has_function('strchrnul'))
+config_host_data.set('HAVE_STRCHRNUL', cc.has_function('strchrnul', prefix: osdep_prefix))
config_host_data.set('HAVE_SYSTEM_FUNCTION', cc.has_function('system', prefix: '#include <stdlib.h>'))
if rbd.found()
config_host_data.set('HAVE_RBD_NAMESPACE_EXISTS',
@@ -2481,7 +2702,7 @@ if rdma.found()
endif
have_asan_fiber = false
-if get_option('sanitizers') and \
+if get_option('asan') and \
not cc.has_function('__sanitizer_start_switch_fiber',
args: '-fsanitize=address',
prefix: '#include <sanitizer/asan_interface.h>')
@@ -2521,6 +2742,8 @@ config_host_data.set('CONFIG_FALLOCATE_ZERO_RANGE',
config_host_data.set('CONFIG_FIEMAP',
cc.has_header('linux/fiemap.h') and
cc.has_header_symbol('linux/fs.h', 'FS_IOC_FIEMAP'))
+config_host_data.set('CONFIG_GETCPU',
+ cc.has_header_symbol('sched.h', 'getcpu', prefix: osdep_prefix))
config_host_data.set('CONFIG_GETRANDOM',
cc.has_function('getrandom') and
cc.has_header_symbol('sys/random.h', 'GRND_NONBLOCK'))
@@ -2534,6 +2757,44 @@ config_host_data.set('HAVE_OPTRESET',
cc.has_header_symbol('getopt.h', 'optreset'))
config_host_data.set('HAVE_IPPROTO_MPTCP',
cc.has_header_symbol('netinet/in.h', 'IPPROTO_MPTCP'))
+if libaio.found()
+ config_host_data.set('HAVE_IO_PREP_PWRITEV2',
+ cc.has_header_symbol('libaio.h', 'io_prep_pwritev2'))
+endif
+if linux_io_uring.found()
+ config_host_data.set('HAVE_IO_URING_PREP_WRITEV2',
+ cc.has_header_symbol('liburing.h', 'io_uring_prep_writev2'))
+endif
+config_host_data.set('HAVE_TCP_KEEPCNT',
+ cc.has_header_symbol('netinet/tcp.h', 'TCP_KEEPCNT') or
+ cc.compiles('''
+ #include <ws2tcpip.h>
+ #ifndef TCP_KEEPCNT
+ #error
+ #endif
+ int main(void) { return 0; }''',
+ name: 'Win32 TCP_KEEPCNT'))
+# On Darwin TCP_KEEPIDLE is available under different name, TCP_KEEPALIVE.
+# https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/bsd/man/man4/tcp.4#L172
+config_host_data.set('HAVE_TCP_KEEPIDLE',
+ cc.has_header_symbol('netinet/tcp.h', 'TCP_KEEPIDLE') or
+ cc.has_header_symbol('netinet/tcp.h', 'TCP_KEEPALIVE') or
+ cc.compiles('''
+ #include <ws2tcpip.h>
+ #ifndef TCP_KEEPIDLE
+ #error
+ #endif
+ int main(void) { return 0; }''',
+ name: 'Win32 TCP_KEEPIDLE'))
+config_host_data.set('HAVE_TCP_KEEPINTVL',
+ cc.has_header_symbol('netinet/tcp.h', 'TCP_KEEPINTVL') or
+ cc.compiles('''
+ #include <ws2tcpip.h>
+ #ifndef TCP_KEEPINTVL
+ #error
+ #endif
+ int main(void) { return 0; }''',
+ name: 'Win32 TCP_KEEPINTVL'))
# has_member
config_host_data.set('HAVE_SIGEV_NOTIFY_THREAD_ID',
@@ -2557,8 +2818,7 @@ config_host_data.set('HAVE_UTMPX',
config_host_data.set('CONFIG_EVENTFD', cc.links('''
#include <sys/eventfd.h>
int main(void) { return eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); }'''))
-config_host_data.set('CONFIG_FDATASYNC', cc.links(gnu_source_prefix + '''
- #include <unistd.h>
+config_host_data.set('CONFIG_FDATASYNC', cc.links(osdep_prefix + '''
int main(void) {
#if defined(_POSIX_SYNCHRONIZED_IO) && _POSIX_SYNCHRONIZED_IO > 0
return fdatasync(0);
@@ -2567,10 +2827,8 @@ config_host_data.set('CONFIG_FDATASYNC', cc.links(gnu_source_prefix + '''
#endif
}'''))
-has_madvise = cc.links(gnu_source_prefix + '''
- #include <sys/types.h>
+has_madvise = cc.links(osdep_prefix + '''
#include <sys/mman.h>
- #include <stddef.h>
int main(void) { return madvise(NULL, 0, MADV_DONTNEED); }''')
missing_madvise_proto = false
if has_madvise
@@ -2580,21 +2838,18 @@ if has_madvise
# missing-prototype case, we try again with a definitely-bogus prototype.
# This will only compile if the system headers don't provide the prototype;
# otherwise the conflicting prototypes will cause a compiler error.
- missing_madvise_proto = cc.links(gnu_source_prefix + '''
- #include <sys/types.h>
+ missing_madvise_proto = cc.links(osdep_prefix + '''>
#include <sys/mman.h>
- #include <stddef.h>
extern int madvise(int);
int main(void) { return madvise(0); }''')
endif
config_host_data.set('CONFIG_MADVISE', has_madvise)
config_host_data.set('HAVE_MADVISE_WITHOUT_PROTOTYPE', missing_madvise_proto)
-config_host_data.set('CONFIG_MEMFD', cc.links(gnu_source_prefix + '''
+config_host_data.set('CONFIG_MEMFD', cc.links(osdep_prefix + '''
#include <sys/mman.h>
int main(void) { return memfd_create("foo", MFD_ALLOW_SEALING); }'''))
-config_host_data.set('CONFIG_OPEN_BY_HANDLE', cc.links(gnu_source_prefix + '''
- #include <fcntl.h>
+config_host_data.set('CONFIG_OPEN_BY_HANDLE', cc.links(osdep_prefix + '''
#if !defined(AT_EMPTY_PATH)
# error missing definition
#else
@@ -2605,13 +2860,12 @@ config_host_data.set('CONFIG_OPEN_BY_HANDLE', cc.links(gnu_source_prefix + '''
# i.e. errno is set and -1 is returned. That's not really how POSIX defines the
# function. On the flip side, it has madvise() which is preferred anyways.
if host_os != 'darwin'
- config_host_data.set('CONFIG_POSIX_MADVISE', cc.links(gnu_source_prefix + '''
+ config_host_data.set('CONFIG_POSIX_MADVISE', cc.links(osdep_prefix + '''
#include <sys/mman.h>
- #include <stddef.h>
int main(void) { return posix_madvise(NULL, 0, POSIX_MADV_DONTNEED); }'''))
endif
-config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_W_TID', cc.links(gnu_source_prefix + '''
+config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_W_TID', cc.links(osdep_prefix + '''
#include <pthread.h>
static void *f(void *p) { return NULL; }
@@ -2622,7 +2876,7 @@ config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_W_TID', cc.links(gnu_source_pref
pthread_setname_np(thread, "QEMU");
return 0;
}''', dependencies: threads))
-config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_WO_TID', cc.links(gnu_source_prefix + '''
+config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_WO_TID', cc.links(osdep_prefix + '''
#include <pthread.h>
static void *f(void *p) { pthread_setname_np("QEMU"); return NULL; }
@@ -2632,7 +2886,7 @@ config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_WO_TID', cc.links(gnu_source_pre
pthread_create(&thread, 0, f, 0);
return 0;
}''', dependencies: threads))
-config_host_data.set('CONFIG_PTHREAD_SET_NAME_NP', cc.links(gnu_source_prefix + '''
+config_host_data.set('CONFIG_PTHREAD_SET_NAME_NP', cc.links(osdep_prefix + '''
#include <pthread.h>
#include <pthread_np.h>
@@ -2644,9 +2898,8 @@ config_host_data.set('CONFIG_PTHREAD_SET_NAME_NP', cc.links(gnu_source_prefix +
pthread_set_name_np(thread, "QEMU");
return 0;
}''', dependencies: threads))
-config_host_data.set('CONFIG_PTHREAD_CONDATTR_SETCLOCK', cc.links(gnu_source_prefix + '''
+config_host_data.set('CONFIG_PTHREAD_CONDATTR_SETCLOCK', cc.links(osdep_prefix + '''
#include <pthread.h>
- #include <time.h>
int main(void)
{
@@ -2655,7 +2908,7 @@ config_host_data.set('CONFIG_PTHREAD_CONDATTR_SETCLOCK', cc.links(gnu_source_pre
pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
return 0;
}''', dependencies: threads))
-config_host_data.set('CONFIG_PTHREAD_AFFINITY_NP', cc.links(gnu_source_prefix + '''
+config_host_data.set('CONFIG_PTHREAD_AFFINITY_NP', cc.links(osdep_prefix + '''
#include <pthread.h>
static void *f(void *p) { return NULL; }
@@ -2672,15 +2925,10 @@ config_host_data.set('CONFIG_PTHREAD_AFFINITY_NP', cc.links(gnu_source_prefix +
CPU_FREE(cpuset);
return 0;
}''', dependencies: threads))
-config_host_data.set('CONFIG_SIGNALFD', cc.links(gnu_source_prefix + '''
+config_host_data.set('CONFIG_SIGNALFD', cc.links(osdep_prefix + '''
#include <sys/signalfd.h>
- #include <stddef.h>
int main(void) { return signalfd(-1, NULL, SFD_CLOEXEC); }'''))
-config_host_data.set('CONFIG_SPLICE', cc.links(gnu_source_prefix + '''
- #include <unistd.h>
- #include <fcntl.h>
- #include <limits.h>
-
+config_host_data.set('CONFIG_SPLICE', cc.links(osdep_prefix + '''
int main(void)
{
int len, fd = 0;
@@ -2689,16 +2937,22 @@ config_host_data.set('CONFIG_SPLICE', cc.links(gnu_source_prefix + '''
return 0;
}'''))
-config_host_data.set('HAVE_MLOCKALL', cc.links(gnu_source_prefix + '''
+config_host_data.set('HAVE_MLOCKALL', cc.links(osdep_prefix + '''
#include <sys/mman.h>
int main(void) {
return mlockall(MCL_FUTURE);
}'''))
+config_host_data.set('HAVE_MLOCK_ONFAULT', cc.links(osdep_prefix + '''
+ #include <sys/mman.h>
+ int main(void) {
+ return mlockall(MCL_FUTURE | MCL_ONFAULT);
+ }'''))
+
have_l2tpv3 = false
if get_option('l2tpv3').allowed() and have_system
have_l2tpv3 = cc.has_type('struct mmsghdr',
- prefix: gnu_source_prefix + '''
+ prefix: osdep_prefix + '''
#include <sys/socket.h>
#include <linux/ip.h>''')
endif
@@ -2754,9 +3008,11 @@ config_host_data.set('CONFIG_ATOMIC64', cc.links('''
__atomic_exchange_n(&x, y, __ATOMIC_RELAXED);
__atomic_fetch_add(&x, y, __ATOMIC_RELAXED);
return 0;
- }'''))
+ }''', args: qemu_isa_flags))
-has_int128_type = cc.compiles('''
+# has_int128_type is set to false on Emscripten to avoid errors by libffi
+# during runtime.
+has_int128_type = host_os != 'emscripten' and cc.compiles('''
__int128_t a;
__uint128_t b;
int main(void) { b = a; }''')
@@ -2788,7 +3044,7 @@ if has_int128_type
__atomic_compare_exchange_n(&p[4], &p[5], p[6], 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
return 0;
}'''
- has_atomic128 = cc.links(atomic_test_128)
+ has_atomic128 = cc.links(atomic_test_128, args: qemu_isa_flags)
config_host_data.set('CONFIG_ATOMIC128', has_atomic128)
@@ -2797,7 +3053,8 @@ if has_int128_type
# without optimization enabled. Try again with optimizations locally
# enabled for the function. See
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
- has_atomic128_opt = cc.links('__attribute__((optimize("O1")))' + atomic_test_128)
+ has_atomic128_opt = cc.links('__attribute__((optimize("O1")))' + atomic_test_128,
+ args: qemu_isa_flags)
config_host_data.set('CONFIG_ATOMIC128_OPT', has_atomic128_opt)
if not has_atomic128_opt
@@ -2808,17 +3065,25 @@ if has_int128_type
__sync_val_compare_and_swap_16(&x, y, x);
return 0;
}
- '''))
+ ''', args: qemu_isa_flags))
endif
endif
endif
-config_host_data.set('CONFIG_GETAUXVAL', cc.links(gnu_source_prefix + '''
+config_host_data.set('CONFIG_GETAUXVAL', cc.links(osdep_prefix + '''
#include <sys/auxv.h>
int main(void) {
return getauxval(AT_HWCAP) == 0;
}'''))
+config_host_data.set('CONFIG_ELF_AUX_INFO', cc.links(osdep_prefix + '''
+ #include <sys/auxv.h>
+ int main(void) {
+ unsigned long hwcap = 0;
+ elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap));
+ return hwcap;
+ }'''))
+
config_host_data.set('CONFIG_USBFS', have_linux_user and cc.compiles('''
#include <linux/usbdevice_fs.h>
@@ -2869,22 +3134,16 @@ config_host_data.set('CONFIG_ASM_HWPROBE_H',
cc.has_header_symbol('asm/hwprobe.h',
'RISCV_HWPROBE_EXT_ZBA'))
-config_host_data.set('CONFIG_AVX2_OPT', get_option('avx2') \
- .require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX2') \
- .require(cc.links('''
- #include <cpuid.h>
+if have_cpuid_h
+ have_avx2 = cc.links('''
#include <immintrin.h>
static int __attribute__((target("avx2"))) bar(void *a) {
__m256i x = *(__m256i *)a;
return _mm256_testz_si256(x, x);
}
int main(int argc, char *argv[]) { return bar(argv[argc - 1]); }
- '''), error_message: 'AVX2 not available').allowed())
-
-config_host_data.set('CONFIG_AVX512BW_OPT', get_option('avx512bw') \
- .require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX512BW') \
- .require(cc.links('''
- #include <cpuid.h>
+ ''')
+ have_avx512bw = cc.links('''
#include <immintrin.h>
static int __attribute__((target("avx512bw"))) bar(void *a) {
__m512i *x = a;
@@ -2892,7 +3151,21 @@ config_host_data.set('CONFIG_AVX512BW_OPT', get_option('avx512bw') \
return res[1];
}
int main(int argc, char *argv[]) { return bar(argv[0]); }
- '''), error_message: 'AVX512BW not available').allowed())
+ ''')
+ if get_option('x86_version') >= '3' and not have_avx2
+ error('Cannot enable AVX optimizations due to missing intrinsics')
+ elif get_option('x86_version') >= '4' and not have_avx512bw
+ error('Cannot enable AVX512 optimizations due to missing intrinsics')
+ endif
+else
+ have_avx2 = false
+ have_avx512bw = false
+ if get_option('x86_version') >= '3'
+ error('Cannot enable AVX optimizations due to missing cpuid.h')
+ endif
+endif
+config_host_data.set('CONFIG_AVX2_OPT', have_avx2)
+config_host_data.set('CONFIG_AVX512BW_OPT', have_avx512bw)
# For both AArch64 and AArch32, detect if builtins are available.
config_host_data.set('CONFIG_ARM_AES_BUILTIN', cc.compiles('''
@@ -2924,9 +3197,7 @@ config_host_data.set('CONFIG_MEMBARRIER', get_option('membarrier') \
.allowed())
have_afalg = get_option('crypto_afalg') \
- .require(cc.compiles(gnu_source_prefix + '''
- #include <errno.h>
- #include <sys/types.h>
+ .require(cc.compiles(osdep_prefix + '''
#include <sys/socket.h>
#include <linux/if_alg.h>
int main(void) {
@@ -2965,6 +3236,11 @@ if host_os == 'windows'
}''', name: '_lock_file and _unlock_file'))
endif
+if spice.found()
+ config_host_data.set('HAVE_SPICE_QXL_GL_SCANOUT2',
+ cc.has_function('spice_qxl_gl_scanout2', dependencies: spice))
+endif
+
if host_os == 'windows'
mingw_has_setjmp_longjmp = cc.links('''
#include <setjmp.h>
@@ -2986,6 +3262,9 @@ if host_os == 'windows'
endif
endif
+# Detect host pointer size for the target configuration loop.
+host_long_bits = cc.sizeof('void *') * 8
+
########################
# Target configuration #
########################
@@ -2998,11 +3277,11 @@ config_devices_mak_list = []
config_devices_h = {}
config_target_h = {}
config_target_mak = {}
+config_base_arch_mak = {}
disassemblers = {
'alpha' : ['CONFIG_ALPHA_DIS'],
'avr' : ['CONFIG_AVR_DIS'],
- 'cris' : ['CONFIG_CRIS_DIS'],
'hexagon' : ['CONFIG_HEXAGON_DIS'],
'hppa' : ['CONFIG_HPPA_DIS'],
'i386' : ['CONFIG_I386_DIS'],
@@ -3029,6 +3308,8 @@ host_kconfig = \
(spice.found() ? ['CONFIG_SPICE=y'] : []) + \
(have_ivshmem ? ['CONFIG_IVSHMEM=y'] : []) + \
(opengl.found() ? ['CONFIG_OPENGL=y'] : []) + \
+ (libcbor.found() ? ['CONFIG_LIBCBOR=y'] : []) + \
+ (gnutls.found() ? ['CONFIG_GNUTLS=y'] : []) + \
(x11.found() ? ['CONFIG_X11=y'] : []) + \
(fdt.found() ? ['CONFIG_FDT=y'] : []) + \
(have_vhost_user ? ['CONFIG_VHOST_USER=y'] : []) + \
@@ -3038,7 +3319,8 @@ host_kconfig = \
(host_os == 'linux' ? ['CONFIG_LINUX=y'] : []) + \
(multiprocess_allowed ? ['CONFIG_MULTIPROCESS_ALLOWED=y'] : []) + \
(vfio_user_server_allowed ? ['CONFIG_VFIO_USER_SERVER_ALLOWED=y'] : []) + \
- (hv_balloon ? ['CONFIG_HV_BALLOON_POSSIBLE=y'] : [])
+ (hv_balloon ? ['CONFIG_HV_BALLOON_POSSIBLE=y'] : []) + \
+ (have_rust ? ['CONFIG_HAVE_RUST=y'] : [])
ignored = [ 'TARGET_XML_FILES', 'TARGET_ABI_DIR', 'TARGET_ARCH' ]
@@ -3071,20 +3353,22 @@ foreach target : target_dirs
config_target += {
'CONFIG_USER_ONLY': 'y',
'CONFIG_QEMU_INTERP_PREFIX':
- get_option('interp_prefix').replace('%M', config_target['TARGET_NAME'])
+ get_option('interp_prefix').replace('%M', config_target['TARGET_NAME']),
+ 'CONFIG_QEMU_RTSIG_MAP': get_option('rtsig_map'),
}
endif
+ config_target += keyval.load('configs/targets' / target + '.mak')
+
target_kconfig = []
foreach sym: accelerators
+ # Disallow 64-bit on 32-bit emulation and virtualization
+ if host_long_bits < config_target['TARGET_LONG_BITS'].to_int()
+ continue
+ endif
if sym == 'CONFIG_TCG' or target in accelerator_targets.get(sym, [])
config_target += { sym: 'y' }
config_all_accel += { sym: 'y' }
- if target in modular_tcg
- config_target += { 'CONFIG_TCG_MODULAR': 'y' }
- else
- config_target += { 'CONFIG_TCG_BUILTIN': 'y' }
- endif
target_kconfig += [ sym + '=y' ]
endif
endforeach
@@ -3095,9 +3379,6 @@ foreach target : target_dirs
error('No accelerator available for target @0@'.format(target))
endif
- config_target += keyval.load('configs/targets' / target + '.mak')
- config_target += { 'TARGET_' + config_target['TARGET_ARCH'].to_upper(): 'y' }
-
if 'TARGET_NEED_FDT' in config_target and not fdt.found()
if default_targets
warning('Disabling ' + target + ' due to missing libfdt')
@@ -3110,6 +3391,7 @@ foreach target : target_dirs
actual_target_dirs += target
# Add default keys
+ config_target += { 'TARGET_' + config_target['TARGET_ARCH'].to_upper(): 'y' }
if 'TARGET_BASE_ARCH' not in config_target
config_target += {'TARGET_BASE_ARCH': config_target['TARGET_ARCH']}
endif
@@ -3157,6 +3439,12 @@ foreach target : target_dirs
target_kconfig += 'CONFIG_' + config_target['TARGET_ARCH'].to_upper() + '=y'
target_kconfig += 'CONFIG_TARGET_BIG_ENDIAN=' + config_target['TARGET_BIG_ENDIAN']
+ # PVG is not cross-architecture. Use accelerator_targets as a proxy to
+ # figure out which target can support PVG on this host
+ if pvg.found() and target in accelerator_targets.get('CONFIG_HVF', [])
+ target_kconfig += 'CONFIG_MAC_PVG=y'
+ endif
+
config_input = meson.get_external_property(target, 'default')
config_devices_mak = target + '-config-devices.mak'
config_devices_mak = configure_file(
@@ -3181,6 +3469,11 @@ foreach target : target_dirs
config_all_devices += config_devices
endif
config_target_mak += {target: config_target}
+
+ # build a merged config for all targets with the same TARGET_BASE_ARCH
+ target_base_arch = config_target['TARGET_BASE_ARCH']
+ config_base_arch = config_base_arch_mak.get(target_base_arch, {}) + config_target
+ config_base_arch_mak += {target_base_arch: config_base_arch}
endforeach
target_dirs = actual_target_dirs
@@ -3226,7 +3519,8 @@ endif
# Generated sources #
#####################
-genh += configure_file(output: 'config-host.h', configuration: config_host_data)
+config_host_h = configure_file(output: 'config-host.h', configuration: config_host_data)
+genh += config_host_h
hxtool = find_program('scripts/hxtool')
shaderinclude = find_program('scripts/shaderinclude.py')
@@ -3244,6 +3538,7 @@ qapi_gen_depends = [ meson.current_source_dir() / 'scripts/qapi/__init__.py',
meson.current_source_dir() / 'scripts/qapi/schema.py',
meson.current_source_dir() / 'scripts/qapi/source.py',
meson.current_source_dir() / 'scripts/qapi/types.py',
+ meson.current_source_dir() / 'scripts/qapi/features.py',
meson.current_source_dir() / 'scripts/qapi/visit.py',
meson.current_source_dir() / 'scripts/qapi-gen.py'
]
@@ -3327,6 +3622,7 @@ if have_block
trace_events_subdirs += [
'authz',
'block',
+ 'chardev',
'io',
'nbd',
'scsi',
@@ -3338,7 +3634,6 @@ if have_system
'audio',
'backends',
'backends/tpm',
- 'chardev',
'ebpf',
'hw/9pfs',
'hw/acpi',
@@ -3373,19 +3668,24 @@ if have_system
'hw/pci-host',
'hw/ppc',
'hw/rtc',
+ 'hw/riscv',
'hw/s390x',
'hw/scsi',
'hw/sd',
+ 'hw/sensor',
'hw/sh4',
'hw/sparc',
'hw/sparc64',
'hw/ssi',
'hw/timer',
'hw/tpm',
+ 'hw/uefi',
'hw/ufs',
'hw/usb',
'hw/vfio',
+ 'hw/vfio-user',
'hw/virtio',
+ 'hw/vmapple',
'hw/watchdog',
'hw/xen',
'hw/gpio',
@@ -3432,6 +3732,7 @@ qom_ss = ss.source_set()
system_ss = ss.source_set()
specific_fuzz_ss = ss.source_set()
specific_ss = ss.source_set()
+rust_devices_ss = ss.source_set()
stub_ss = ss.source_set()
trace_ss = ss.source_set()
user_ss = ss.source_set()
@@ -3439,14 +3740,17 @@ util_ss = ss.source_set()
# accel modules
qtest_module_ss = ss.source_set()
-tcg_module_ss = ss.source_set()
modules = {}
target_modules = {}
+plugin_modules = []
hw_arch = {}
target_arch = {}
target_system_arch = {}
target_user_arch = {}
+hw_common_arch = {}
+target_common_arch = {}
+target_common_system_arch = {}
# NOTE: the trace/ subdirectory needs the qapi_trace_events variable
# that is filled in by qapi/.
@@ -3493,9 +3797,13 @@ libqemuutil = static_library('qemuutil',
build_by_default: false,
sources: util_ss.sources() + stub_ss.sources() + genh,
dependencies: [util_ss.dependencies(), libm, threads, glib, socket, malloc])
+qemuutil_deps = [event_loop_base]
+if host_os != 'windows'
+ qemuutil_deps += [rt]
+endif
qemuutil = declare_dependency(link_with: libqemuutil,
sources: genh + version_res,
- dependencies: [event_loop_base])
+ dependencies: qemuutil_deps)
if have_system or have_user
decodetree = generator(find_program('scripts/decodetree.py'),
@@ -3537,6 +3845,8 @@ if have_block
# os-win32.c does not
if host_os == 'windows'
system_ss.add(files('os-win32.c'))
+ elif host_os == 'emscripten'
+ blockdev_ss.add(files('os-wasm.c'))
else
blockdev_ss.add(files('os-posix.c'))
endif
@@ -3567,6 +3877,9 @@ endif
common_ss.add(pagevary)
specific_ss.add(files('page-target.c', 'page-vary-target.c'))
+common_ss.add(files('target-info.c'))
+specific_ss.add(files('target-info-stub.c'))
+
subdir('backends')
subdir('disas')
subdir('migration')
@@ -3581,6 +3894,10 @@ subdir('accel')
subdir('plugins')
subdir('ebpf')
+if 'CONFIG_TCG' in config_all_accel
+ subdir('contrib/plugins')
+endif
+
common_user_inc = []
subdir('common-user')
@@ -3592,11 +3909,7 @@ subdir('tests/qtest/libqos')
subdir('tests/qtest/fuzz')
# accel modules
-tcg_real_module_ss = ss.source_set()
-tcg_real_module_ss.add_all(when: 'CONFIG_TCG_MODULAR', if_true: tcg_module_ss)
-specific_ss.add_all(when: 'CONFIG_TCG_BUILTIN', if_true: tcg_module_ss)
-target_modules += { 'accel' : { 'qtest': qtest_module_ss,
- 'tcg': tcg_real_module_ss }}
+target_modules += { 'accel' : { 'qtest': qtest_module_ss }}
##############################################
# Internal static_libraries and dependencies #
@@ -3632,16 +3945,11 @@ foreach d, list : modules
install: true,
install_dir: qemu_moddir)
if module_ss.sources() != []
- # FIXME: Should use sl.extract_all_objects(recursive: true) as
- # input. Sources can be used multiple times but objects are
- # unique when it comes to lookup in compile_commands.json.
- # Depnds on a mesion version with
- # https://github.com/mesonbuild/meson/pull/8900
modinfo_files += custom_target(d + '-' + m + '.modinfo',
output: d + '-' + m + '.modinfo',
- input: module_ss.sources() + genh,
+ input: sl.extract_all_objects(recursive: true),
capture: true,
- command: [modinfo_collect, module_ss.sources()])
+ command: [modinfo_collect, '@INPUT@'])
endif
else
if d == 'block'
@@ -3680,12 +3988,11 @@ foreach d, list : target_modules
dependencies: target_module_ss.dependencies(),
install: true,
install_dir: qemu_moddir)
- # FIXME: Should use sl.extract_all_objects(recursive: true) too.
modinfo_files += custom_target(module_name + '.modinfo',
output: module_name + '.modinfo',
- input: target_module_ss.sources() + genh,
+ input: sl.extract_all_objects(recursive: true),
capture: true,
- command: [modinfo_collect, '--target', target, target_module_ss.sources()])
+ command: [modinfo_collect, '--target', target, '@INPUT@'])
endif
endif
endforeach
@@ -3790,7 +4097,7 @@ libchardev = static_library('chardev', chardev_ss.sources() + genh,
build_by_default: false)
chardev = declare_dependency(objects: libchardev.extract_all_objects(recursive: false),
- dependencies: chardev_ss.dependencies())
+ dependencies: [chardev_ss.dependencies(), io])
hwcore_ss = hwcore_ss.apply({})
libhwcore = static_library('hwcore', sources: hwcore_ss.sources() + genh,
@@ -3805,8 +4112,20 @@ common_ss.add(hwcore)
system_ss.add(authz, blockdev, chardev, crypto, io, qmp)
common_ss.add(qom, qemuutil)
-common_ss.add_all(when: 'CONFIG_SYSTEM_ONLY', if_true: [system_ss])
-common_ss.add_all(when: 'CONFIG_USER_ONLY', if_true: user_ss)
+libuser = static_library('user',
+ user_ss.all_sources() + genh,
+ c_args: ['-DCONFIG_USER_ONLY',
+ '-DCOMPILING_SYSTEM_VS_USER'],
+ include_directories: common_user_inc,
+ dependencies: user_ss.all_dependencies(),
+ build_by_default: false)
+
+libsystem = static_library('system',
+ system_ss.all_sources() + genh,
+ c_args: ['-DCONFIG_SOFTMMU',
+ '-DCOMPILING_SYSTEM_VS_USER'],
+ dependencies: system_ss.all_dependencies(),
+ build_by_default: false)
# Note that this library is never used directly (only through extract_objects)
# and is not built by default; therefore, source files not used by the build
@@ -3814,11 +4133,114 @@ common_ss.add_all(when: 'CONFIG_USER_ONLY', if_true: user_ss)
common_all = static_library('common',
build_by_default: false,
sources: common_ss.all_sources() + genh,
- include_directories: common_user_inc,
implicit_include_directories: false,
dependencies: common_ss.all_dependencies())
+# construct common libraries per base architecture
+target_common_arch_libs = {}
+target_common_system_arch_libs = {}
+foreach target_base_arch, config_base_arch : config_base_arch_mak
+ target_inc = [include_directories('target' / target_base_arch)]
+ inc = [common_user_inc + target_inc]
+
+ target_common = common_ss.apply(config_base_arch, strict: false)
+ target_system = system_ss.apply(config_base_arch, strict: false)
+ target_user = user_ss.apply(config_base_arch, strict: false)
+ common_deps = []
+ system_deps = []
+ user_deps = []
+ foreach dep: target_common.dependencies()
+ common_deps += dep.partial_dependency(compile_args: true, includes: true)
+ endforeach
+ foreach dep: target_system.dependencies()
+ system_deps += dep.partial_dependency(compile_args: true, includes: true)
+ endforeach
+ foreach dep: target_user.dependencies()
+ user_deps += dep.partial_dependency(compile_args: true, includes: true)
+ endforeach
+
+ # prevent common code to access cpu compile time definition,
+ # but still allow access to cpu.h
+ target_c_args = ['-DCPU_DEFS_H']
+ target_system_c_args = target_c_args + ['-DCOMPILING_SYSTEM_VS_USER', '-DCONFIG_SOFTMMU']
+
+ if target_base_arch in target_common_arch
+ src = target_common_arch[target_base_arch]
+ lib = static_library(
+ 'common_' + target_base_arch,
+ build_by_default: false,
+ sources: src.all_sources() + genh,
+ include_directories: inc,
+ c_args: target_c_args,
+ dependencies: src.all_dependencies() + common_deps +
+ system_deps + user_deps)
+ target_common_arch_libs += {target_base_arch: lib}
+ endif
+
+ # merge hw_common_arch in target_common_system_arch
+ if target_base_arch in hw_common_arch
+ hw_src = hw_common_arch[target_base_arch]
+ if target_base_arch in target_common_system_arch
+ target_common_system_arch[target_base_arch].add_all(hw_src)
+ else
+ target_common_system_arch += {target_base_arch: hw_src}
+ endif
+ endif
+
+ if target_base_arch in target_common_system_arch
+ src = target_common_system_arch[target_base_arch]
+ lib = static_library(
+ 'system_' + target_base_arch,
+ build_by_default: false,
+ sources: src.all_sources() + genh,
+ include_directories: inc,
+ c_args: target_system_c_args,
+ dependencies: src.all_dependencies() + common_deps + system_deps)
+ target_common_system_arch_libs += {target_base_arch: lib}
+ endif
+endforeach
+
+if have_rust
+ bindings_incdir = include_directories('.', 'include')
+ # We would like to use --generate-cstr, but it is only available
+ # starting with bindgen 0.66.0. The oldest supported versions
+ # is 0.60.x (Debian 12 has 0.60.1) which introduces --allowlist-file.
+ bindgen_args_common = [
+ '--disable-header-comment',
+ '--raw-line', '// @generated',
+ '--ctypes-prefix', 'std::os::raw',
+ '--generate-block',
+ '--impl-debug',
+ '--no-doc-comments',
+ '--with-derive-default',
+ '--no-layout-tests',
+ '--no-prepend-enum-name',
+ '--allowlist-file', meson.project_source_root() + '/include/.*',
+ '--allowlist-file', meson.project_source_root() + '/.*',
+ '--allowlist-file', meson.project_build_root() + '/.*'
+ ]
+ if not rustfmt.found()
+ if bindgen.version().version_compare('<0.65.0')
+ bindgen_args_common += ['--no-rustfmt-bindings']
+ else
+ bindgen_args_common += ['--formatter', 'none']
+ endif
+ endif
+ if bindgen.version().version_compare('>=0.66.0')
+ bindgen_args_common += ['--rust-target', '1.59']
+ endif
+ if bindgen.version().version_compare('<0.61.0')
+ # default in 0.61+
+ bindgen_args_common += ['--size_t-is-usize']
+ else
+ bindgen_args_common += ['--merge-extern-blocks']
+ endif
+ subdir('rust')
+endif
+
+
feature_to_c = find_program('scripts/feature_to_c.py')
+rust_root_crate = find_program('scripts/rust/rust_root_crate.sh')
if host_os == 'darwin'
entitlement = find_program('scripts/entitlement.sh')
@@ -3834,7 +4256,7 @@ foreach target : target_dirs
arch_deps = []
c_args = ['-DCOMPILING_PER_TARGET',
'-DCONFIG_TARGET="@0@-config-target.h"'.format(target),
- '-DCONFIG_DEVICES="@0@-config-devices.h"'.format(target)]
+ ]
link_args = emulator_link_args
target_inc = [include_directories('target' / config_target['TARGET_BASE_ARCH'])]
@@ -3854,6 +4276,7 @@ foreach target : target_dirs
arch_deps += hw.dependencies()
endif
+ c_args += ['-DCONFIG_DEVICES="@0@-config-devices.h"'.format(target)]
arch_srcs += config_devices_h[target]
link_args += ['@block.syms', '@qemu.syms']
else
@@ -3904,13 +4327,57 @@ foreach target : target_dirs
arch_deps += t.dependencies()
target_common = common_ss.apply(config_target, strict: false)
- objects = common_all.extract_objects(target_common.sources())
+ objects = [common_all.extract_objects(target_common.sources())]
arch_deps += target_common.dependencies()
+ if target_base_arch in target_common_arch_libs
+ src = target_common_arch[target_base_arch].apply(config_target, strict: false)
+ lib = target_common_arch_libs[target_base_arch]
+ objects += lib.extract_objects(src.sources())
+ arch_deps += src.dependencies()
+ endif
+ if target_type == 'system'
+ src = system_ss.apply(config_target, strict: false)
+ objects += libsystem.extract_objects(src.sources())
+ arch_deps += src.dependencies()
+ endif
+ if target_type == 'user'
+ src = user_ss.apply(config_target, strict: false)
+ objects += libuser.extract_objects(src.sources())
+ arch_deps += src.dependencies()
+ endif
+ if target_type == 'system' and target_base_arch in target_common_system_arch_libs
+ src = target_common_system_arch[target_base_arch].apply(config_target, strict: false)
+ lib = target_common_system_arch_libs[target_base_arch]
+ objects += lib.extract_objects(src.sources())
+ arch_deps += src.dependencies()
+ endif
target_specific = specific_ss.apply(config_target, strict: false)
arch_srcs += target_specific.sources()
arch_deps += target_specific.dependencies()
+ if have_rust and target_type == 'system'
+ target_rust = rust_devices_ss.apply(config_target, strict: false)
+ crates = []
+ foreach dep : target_rust.dependencies()
+ crates += dep.get_variable('crate')
+ endforeach
+ if crates.length() > 0
+ rlib_rs = custom_target('rust_' + target.underscorify() + '.rs',
+ output: 'rust_' + target.underscorify() + '.rs',
+ command: [rust_root_crate, crates],
+ capture: true,
+ build_by_default: true,
+ build_always_stale: true)
+ rlib = static_library('rust_' + target.underscorify(),
+ structured_sources([], {'.': rlib_rs}),
+ dependencies: target_rust.dependencies(),
+ override_options: ['rust_std=2021', 'build.rust_std=2021'],
+ rust_abi: 'c')
+ arch_deps += declare_dependency(link_whole: [rlib])
+ endif
+ endif
+
# allow using headers from the dependencies but do not include the sources,
# because this emulator only needs those in "objects". For external
# dependencies, the full dependency is included below in the executable.
@@ -3932,14 +4399,14 @@ foreach target : target_dirs
'name': 'qemu-system-' + target_name,
'win_subsystem': 'console',
'sources': files('system/main.c'),
- 'dependencies': []
+ 'dependencies': [sdl]
}]
if host_os == 'windows' and (sdl.found() or gtk.found())
execs += [{
'name': 'qemu-system-' + target_name + 'w',
'win_subsystem': 'windows',
'sources': files('system/main.c'),
- 'dependencies': []
+ 'dependencies': [sdl]
}]
endif
if get_option('fuzzing')
@@ -4053,7 +4520,7 @@ if have_tools
subdir('contrib/elf2dmp')
executable('qemu-edid', files('qemu-edid.c', 'hw/display/edid-generate.c'),
- dependencies: qemuutil,
+ dependencies: [qemuutil, rt],
install: true)
if have_vhost_user
@@ -4073,6 +4540,13 @@ if have_tools
dependencies: [authz, crypto, io, qom, qemuutil,
libcap_ng, mpathpersist],
install: true)
+
+ if cpu in ['x86', 'x86_64']
+ executable('qemu-vmsr-helper', files('tools/i386/qemu-vmsr-helper.c'),
+ dependencies: [authz, crypto, io, qom, qemuutil,
+ libcap_ng, mpathpersist],
+ install: true)
+ endif
endif
if have_ivshmem
@@ -4111,7 +4585,11 @@ subdir('scripts')
subdir('tools')
subdir('pc-bios')
subdir('docs')
-subdir('tests')
+# Tests are disabled on emscripten because they rely on host features that aren't
+# supported by emscripten (e.g. fork and unix socket).
+if host_os != 'emscripten'
+ subdir('tests')
+endif
if gtk.found()
subdir('po')
endif
@@ -4214,7 +4692,6 @@ summary_info += {'Trace backends': ','.join(get_option('trace_backends'))}
if 'simple' in get_option('trace_backends')
summary_info += {'Trace output file': get_option('trace_file') + '-<pid>'}
endif
-summary_info += {'D-Bus display': dbus_display}
summary_info += {'QOM debugging': get_option('qom_cast_debug')}
summary_info += {'Relocatable install': get_option('relocatable')}
summary_info += {'vhost-kernel support': have_vhost_kernel}
@@ -4242,6 +4719,15 @@ if 'objc' in all_languages
else
summary_info += {'Objective-C compiler': false}
endif
+summary_info += {'Rust support': have_rust}
+if have_rust
+ summary_info += {'Rust target': config_host['RUST_TARGET_TRIPLE']}
+ summary_info += {'rustc': ' '.join(rustc.cmd_array())}
+ summary_info += {'rustc version': rustc.version()}
+ summary_info += {'rustdoc': rustdoc}
+ summary_info += {'bindgen': bindgen.full_path()}
+ summary_info += {'bindgen version': bindgen.version()}
+endif
option_cflags = (get_option('debug') ? ['-g'] : [])
if get_option('optimization') != 'plain'
option_cflags += ['-O' + get_option('optimization')]
@@ -4347,7 +4833,6 @@ if have_block
summary_info += {'Block whitelist (ro)': get_option('block_drv_ro_whitelist')}
summary_info += {'Use block whitelist in tools': get_option('block_drv_whitelist_in_tools')}
summary_info += {'VirtFS (9P) support': have_virtfs}
- summary_info += {'VirtFS (9P) Proxy Helper support (deprecated)': have_virtfs_proxy_helper}
summary_info += {'replication support': config_host_data.get('CONFIG_REPLICATION')}
summary_info += {'bochs support': get_option('bochs').allowed()}
summary_info += {'cloop support': get_option('cloop').allowed()}
@@ -4378,6 +4863,7 @@ if nettle.found()
summary_info += {' XTS': xts != 'private'}
endif
summary_info += {'SM4 ALG support': crypto_sm4}
+summary_info += {'SM3 ALG support': crypto_sm3}
summary_info += {'AF_ALG support': have_afalg}
summary_info += {'rng-none': get_option('rng_none')}
summary_info += {'Linux keyring': have_keyring}
@@ -4389,6 +4875,7 @@ summary_info = {}
if host_os == 'darwin'
summary_info += {'Cocoa support': cocoa}
endif
+summary_info += {'D-Bus display': dbus_display}
summary_info += {'SDL support': sdl}
summary_info += {'SDL image support': sdl_image}
summary_info += {'GTK support': gtk}
@@ -4485,10 +4972,12 @@ summary_info += {'lzfse support': liblzfse}
summary_info += {'zstd support': zstd}
summary_info += {'Query Processing Library support': qpl}
summary_info += {'UADK Library support': uadk}
+summary_info += {'qatzip support': qatzip}
summary_info += {'NUMA host support': numa}
summary_info += {'capstone': capstone}
summary_info += {'libpmem support': libpmem}
summary_info += {'libdaxctl support': libdaxctl}
+summary_info += {'libcbor support': libcbor}
summary_info += {'libudev': libudev}
# Dummy dependency, keep .found()
summary_info += {'FUSE lseek': fuse_lseek.found()}
@@ -4497,6 +4986,10 @@ summary_info += {'libdw': libdw}
if host_os == 'freebsd'
summary_info += {'libinotify-kqueue': inotify}
endif
+if host_os == 'darwin'
+ summary_info += {'ParavirtualizedGraphics support': pvg}
+endif
+summary_info += {'valgrind': valgrind}
summary(summary_info, bool_yn: true, section: 'Dependencies')
if host_arch == 'unknown'
@@ -4508,11 +5001,17 @@ if host_arch == 'unknown'
message('compile or work on this host CPU. You can help by volunteering')
message('to maintain it and providing a build host for our continuous')
message('integration setup.')
- if get_option('tcg').allowed() and target_dirs.length() > 0
+ if have_tcg
message()
message('configure has succeeded and you can continue to build, but')
message('QEMU will use a slow interpreter to emulate the target CPU.')
endif
+elif host_long_bits < 64
+ message()
+ warning('DEPRECATED HOST CPU')
+ message()
+ message('Support for 32-bit CPU host architecture ' + cpu + ' is going')
+ message('to be dropped in a future QEMU release.')
endif
if not supported_oses.contains(host_os)
diff --git a/meson_options.txt b/meson_options.txt
index 0269fa0..a442be2 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -27,12 +27,14 @@ option('block_drv_ro_whitelist', type : 'string', value : '',
description: 'set block driver read-only whitelist (by default affects only QEMU, not tools like qemu-img)')
option('interp_prefix', type : 'string', value : '/usr/gnemul/qemu-%M',
description: 'where to find shared libraries etc., use %M for cpu name')
+option('rtsig_map', type : 'string', value : 'NULL',
+ description: 'default value of QEMU_RTSIG_MAP')
option('fuzzing_engine', type : 'string', value : '',
description: 'fuzzing engine library for OSS-Fuzz')
option('trace_file', type: 'string', value: 'trace',
description: 'Trace file prefix for simple backend')
option('coroutine_backend', type: 'combo',
- choices: ['ucontext', 'sigaltstack', 'windows', 'auto'],
+ choices: ['ucontext', 'sigaltstack', 'windows', 'wasm', 'auto'],
value: 'auto', description: 'coroutine backend to use')
# Everything else can be set via --enable/--disable-* option
@@ -91,8 +93,10 @@ option('tcg_interpreter', type: 'boolean', value: false,
description: 'TCG with bytecode interpreter (slow)')
option('safe_stack', type: 'boolean', value: false,
description: 'SafeStack Stack Smash Protection (requires clang/llvm and coroutine backend ucontext)')
-option('sanitizers', type: 'boolean', value: false,
- description: 'enable default sanitizers')
+option('asan', type: 'boolean', value: false,
+ description: 'enable address sanitizer')
+option('ubsan', type: 'boolean', value: false,
+ description: 'enable undefined behaviour sanitizer')
option('tsan', type: 'boolean', value: false,
description: 'enable thread sanitizer')
option('stack_protector', type: 'feature', value: 'auto',
@@ -111,16 +115,14 @@ option('dbus_display', type: 'feature', value: 'auto',
description: '-display dbus support')
option('tpm', type : 'feature', value : 'auto',
description: 'TPM support')
+option('valgrind', type : 'feature', value: 'auto',
+ description: 'valgrind debug support for coroutine stacks')
# Do not enable it by default even for Mingw32, because it doesn't
# work on Wine.
option('membarrier', type: 'feature', value: 'disabled',
description: 'membarrier system call (for Linux 4.14+ or Windows')
-option('avx2', type: 'feature', value: 'auto',
- description: 'AVX2 optimizations')
-option('avx512bw', type: 'feature', value: 'auto',
- description: 'AVX512BW optimizations')
option('keyring', type: 'feature', value: 'auto',
description: 'Linux keyring support')
option('libkeyutils', type: 'feature', value: 'auto',
@@ -166,6 +168,8 @@ option('iconv', type : 'feature', value : 'auto',
description: 'Font glyph conversion support')
option('curses', type : 'feature', value : 'auto',
description: 'curses UI')
+option('libcbor', type : 'feature', value : 'auto',
+ description: 'libcbor support')
option('gnutls', type : 'feature', value : 'auto',
description: 'GNUTLS cryptography support')
option('nettle', type : 'feature', value : 'auto',
@@ -192,6 +196,8 @@ option('lzfse', type : 'feature', value : 'auto',
description: 'lzfse support for DMG images')
option('lzo', type : 'feature', value : 'auto',
description: 'lzo compression support')
+option('pvg', type: 'feature', value: 'auto',
+ description: 'macOS paravirtualized graphics support')
option('rbd', type : 'feature', value : 'auto',
description: 'Ceph block device driver')
option('opengl', type : 'feature', value : 'auto',
@@ -261,6 +267,8 @@ option('qpl', type : 'feature', value : 'auto',
description: 'Query Processing Library support')
option('uadk', type : 'feature', value : 'auto',
description: 'UADK Library support')
+option('qatzip', type: 'feature', value: 'auto',
+ description: 'QATzip compression support')
option('fuse', type: 'feature', value: 'auto',
description: 'FUSE block device export')
option('fuse_lseek', type : 'feature', value : 'auto',
@@ -301,8 +309,6 @@ option('vhost_user_blk_server', type: 'feature', value: 'auto',
description: 'build vhost-user-blk server')
option('virtfs', type: 'feature', value: 'auto',
description: 'virtio-9p support')
-option('virtfs_proxy_helper', type: 'feature', value: 'auto',
- description: 'virtio-9p proxy helper support')
option('libvduse', type: 'feature', value: 'auto',
description: 'build VDUSE Library')
option('vduse_blk_export', type: 'feature', value: 'auto',
@@ -371,3 +377,8 @@ option('hexagon_idef_parser', type : 'boolean', value : true,
option('x86_version', type : 'combo', choices : ['0', '1', '2', '3', '4'], value: '1',
description: 'tweak required x86_64 architecture version beyond compiler default')
+
+option('rust', type: 'feature', value: 'disabled',
+ description: 'Rust support')
+option('strict_rust_lints', type: 'boolean', value: false,
+ description: 'Enable stricter set of Rust warnings')
diff --git a/migration/block-active.c b/migration/block-active.c
new file mode 100644
index 0000000..40e986a
--- /dev/null
+++ b/migration/block-active.c
@@ -0,0 +1,48 @@
+/*
+ * Block activation tracking for migration purpose
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (C) 2024 Red Hat, Inc.
+ */
+#include "qemu/osdep.h"
+#include "block/block.h"
+#include "qapi/error.h"
+#include "migration/migration.h"
+#include "qemu/error-report.h"
+#include "trace.h"
+
+bool migration_block_activate(Error **errp)
+{
+ ERRP_GUARD();
+
+ assert(bql_locked());
+
+ trace_migration_block_activation("active");
+
+ bdrv_activate_all(errp);
+ if (*errp) {
+ error_report_err(error_copy(*errp));
+ return false;
+ }
+
+ return true;
+}
+
+bool migration_block_inactivate(void)
+{
+ int ret;
+
+ assert(bql_locked());
+
+ trace_migration_block_activation("inactive");
+
+ ret = bdrv_inactivate_all();
+ if (ret) {
+ error_report("%s: bdrv_inactivate_all() failed: %d",
+ __func__, ret);
+ return false;
+ }
+
+ return true;
+}
diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c
index a7d5504..f2c352d 100644
--- a/migration/block-dirty-bitmap.c
+++ b/migration/block-dirty-bitmap.c
@@ -62,8 +62,8 @@
#include "block/block.h"
#include "block/block_int.h"
#include "block/dirty-bitmap.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/runstate.h"
+#include "system/block-backend.h"
+#include "system/runstate.h"
#include "qemu/main-loop.h"
#include "qemu/error-report.h"
#include "migration/misc.h"
diff --git a/migration/channel-block.c b/migration/channel-block.c
index fff8d87..97de5a6 100644
--- a/migration/channel-block.c
+++ b/migration/channel-block.c
@@ -123,7 +123,7 @@ qio_channel_block_seek(QIOChannel *ioc,
bioc->offset = offset;
break;
case SEEK_CUR:
- bioc->offset += whence;
+ bioc->offset += offset;
break;
case SEEK_END:
error_setg(errp, "Size of VMstate region is unknown");
@@ -170,7 +170,7 @@ qio_channel_block_set_aio_fd_handler(QIOChannel *ioc,
static void
qio_channel_block_class_init(ObjectClass *klass,
- void *class_data G_GNUC_UNUSED)
+ const void *class_data G_GNUC_UNUSED)
{
QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass);
diff --git a/migration/channel.c b/migration/channel.c
index f9de064..a547b1f 100644
--- a/migration/channel.c
+++ b/migration/channel.c
@@ -33,6 +33,7 @@
void migration_channel_process_incoming(QIOChannel *ioc)
{
MigrationState *s = migrate_get_current();
+ MigrationIncomingState *mis = migration_incoming_get_current();
Error *local_err = NULL;
trace_migration_set_incoming_channel(
@@ -47,6 +48,10 @@ void migration_channel_process_incoming(QIOChannel *ioc)
if (local_err) {
error_report_err(local_err);
+ migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
+ if (mis->exit_on_error) {
+ exit(EXIT_FAILURE);
+ }
}
}
@@ -74,7 +79,7 @@ void migration_channel_connect(MigrationState *s,
if (!error) {
/* tls_channel_connect will call back to this
* function after the TLS handshake,
- * so we mustn't call migrate_fd_connect until then
+ * so we mustn't call migration_connect until then
*/
return;
@@ -89,7 +94,7 @@ void migration_channel_connect(MigrationState *s,
qemu_mutex_unlock(&s->qemu_file_lock);
}
}
- migrate_fd_connect(s, error);
+ migration_connect(s, error);
error_free(error);
}
diff --git a/migration/colo.c b/migration/colo.c
index 6449490..e0f713c 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -11,7 +11,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-migration.h"
#include "migration.h"
@@ -30,8 +30,8 @@
#include "net/colo.h"
#include "block/block.h"
#include "qapi/qapi-events-migration.h"
-#include "sysemu/cpus.h"
-#include "sysemu/runstate.h"
+#include "system/cpus.h"
+#include "system/runstate.h"
#include "net/filter.h"
#include "options.h"
@@ -146,7 +146,7 @@ static void secondary_vm_do_failover(void)
return;
}
/* Notify COLO incoming thread that failover work is finished */
- qemu_sem_post(&mis->colo_incoming_sem);
+ qemu_event_set(&mis->colo_incoming_event);
/* For Secondary VM, jump to incoming co */
if (mis->colo_incoming_co) {
@@ -195,7 +195,7 @@ static void primary_vm_do_failover(void)
}
/* Notify COLO thread that failover work is finished */
- qemu_sem_post(&s->colo_exit_sem);
+ qemu_event_set(&s->colo_exit_event);
}
COLOMode get_colo_mode(void)
@@ -452,6 +452,9 @@ static int colo_do_checkpoint_transaction(MigrationState *s,
bql_unlock();
goto out;
}
+
+ qemu_savevm_maybe_send_switchover_start(s->to_dst_file);
+
/* Note: device state is saved into buffer */
ret = qemu_save_device_state(fb);
@@ -617,8 +620,8 @@ out:
}
/* Hope this not to be too long to wait here */
- qemu_sem_wait(&s->colo_exit_sem);
- qemu_sem_destroy(&s->colo_exit_sem);
+ qemu_event_wait(&s->colo_exit_event);
+ qemu_event_destroy(&s->colo_exit_event);
/*
* It is safe to unregister notifier after failover finished.
@@ -648,7 +651,7 @@ void migrate_start_colo_process(MigrationState *s)
s->colo_delay_timer = timer_new_ms(QEMU_CLOCK_HOST,
colo_checkpoint_notify_timer, NULL);
- qemu_sem_init(&s->colo_exit_sem, 0);
+ qemu_event_init(&s->colo_exit_event, false);
colo_process_checkpoint(s);
bql_lock();
}
@@ -805,11 +808,11 @@ void colo_shutdown(void)
case COLO_MODE_PRIMARY:
s = migrate_get_current();
qemu_event_set(&s->colo_checkpoint_event);
- qemu_sem_post(&s->colo_exit_sem);
+ qemu_event_set(&s->colo_exit_event);
break;
case COLO_MODE_SECONDARY:
mis = migration_incoming_get_current();
- qemu_sem_post(&mis->colo_incoming_sem);
+ qemu_event_set(&mis->colo_incoming_event);
break;
default:
break;
@@ -824,7 +827,7 @@ static void *colo_process_incoming_thread(void *opaque)
Error *local_err = NULL;
rcu_register_thread();
- qemu_sem_init(&mis->colo_incoming_sem, 0);
+ qemu_event_init(&mis->colo_incoming_event, false);
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
MIGRATION_STATUS_COLO);
@@ -836,7 +839,7 @@ static void *colo_process_incoming_thread(void *opaque)
/* Make sure all file formats throw away their mutable metadata */
bql_lock();
- bdrv_activate_all(&local_err);
+ migration_block_activate(&local_err);
bql_unlock();
if (local_err) {
error_report_err(local_err);
@@ -920,8 +923,8 @@ out:
}
/* Hope this not to be too long to loop here */
- qemu_sem_wait(&mis->colo_incoming_sem);
- qemu_sem_destroy(&mis->colo_incoming_sem);
+ qemu_event_wait(&mis->colo_incoming_event);
+ qemu_event_destroy(&mis->colo_incoming_event);
rcu_unregister_thread();
return NULL;
@@ -935,7 +938,8 @@ void coroutine_fn colo_incoming_co(void)
assert(bql_locked());
assert(migration_incoming_colo_enabled());
- qemu_thread_create(&th, "mig/dst/colo", colo_process_incoming_thread,
+ qemu_thread_create(&th, MIGRATION_THREAD_DST_COLO,
+ colo_process_incoming_thread,
mis, QEMU_THREAD_JOINABLE);
mis->colo_incoming_co = qemu_coroutine_self();
diff --git a/migration/cpr-transfer.c b/migration/cpr-transfer.c
new file mode 100644
index 0000000..00371d1
--- /dev/null
+++ b/migration/cpr-transfer.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2022, 2024 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "io/channel-file.h"
+#include "io/channel-socket.h"
+#include "io/net-listener.h"
+#include "migration/cpr.h"
+#include "migration/migration.h"
+#include "migration/savevm.h"
+#include "migration/qemu-file.h"
+#include "migration/vmstate.h"
+#include "trace.h"
+
+QEMUFile *cpr_transfer_output(MigrationChannel *channel, Error **errp)
+{
+ MigrationAddress *addr = channel->addr;
+
+ if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET &&
+ addr->u.socket.type == SOCKET_ADDRESS_TYPE_UNIX) {
+
+ g_autoptr(QIOChannelSocket) sioc = qio_channel_socket_new();
+ QIOChannel *ioc = QIO_CHANNEL(sioc);
+ SocketAddress *saddr = &addr->u.socket;
+
+ if (qio_channel_socket_connect_sync(sioc, saddr, errp) < 0) {
+ return NULL;
+ }
+ trace_cpr_transfer_output(addr->u.socket.u.q_unix.path);
+ qio_channel_set_name(ioc, "cpr-out");
+ return qemu_file_new_output(ioc);
+
+ } else {
+ error_setg(errp, "bad cpr channel address; must be unix");
+ return NULL;
+ }
+}
+
+QEMUFile *cpr_transfer_input(MigrationChannel *channel, Error **errp)
+{
+ MigrationAddress *addr = channel->addr;
+
+ if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET &&
+ (addr->u.socket.type == SOCKET_ADDRESS_TYPE_UNIX ||
+ addr->u.socket.type == SOCKET_ADDRESS_TYPE_FD)) {
+
+ g_autoptr(QIOChannelSocket) sioc = NULL;
+ SocketAddress *saddr = &addr->u.socket;
+ g_autoptr(QIONetListener) listener = qio_net_listener_new();
+ QIOChannel *ioc;
+
+ qio_net_listener_set_name(listener, "cpr-socket-listener");
+ if (qio_net_listener_open_sync(listener, saddr, 1, errp) < 0) {
+ return NULL;
+ }
+
+ sioc = qio_net_listener_wait_client(listener);
+ ioc = QIO_CHANNEL(sioc);
+ trace_cpr_transfer_input(
+ addr->u.socket.type == SOCKET_ADDRESS_TYPE_UNIX ?
+ addr->u.socket.u.q_unix.path : addr->u.socket.u.fd.str);
+ qio_channel_set_name(ioc, "cpr-in");
+ return qemu_file_new_input(ioc);
+
+ } else {
+ error_setg(errp, "bad cpr channel socket type; must be unix");
+ return NULL;
+ }
+}
diff --git a/migration/cpr.c b/migration/cpr.c
new file mode 100644
index 0000000..a50a57e
--- /dev/null
+++ b/migration/cpr.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2021-2024 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "migration/cpr.h"
+#include "migration/misc.h"
+#include "migration/options.h"
+#include "migration/qemu-file.h"
+#include "migration/savevm.h"
+#include "migration/vmstate.h"
+#include "system/runstate.h"
+#include "trace.h"
+
+/*************************************************************************/
+/* cpr state container for all information to be saved. */
+
+typedef QLIST_HEAD(CprFdList, CprFd) CprFdList;
+
+typedef struct CprState {
+ CprFdList fds;
+} CprState;
+
+static CprState cpr_state;
+
+/****************************************************************************/
+
+typedef struct CprFd {
+ char *name;
+ unsigned int namelen;
+ int id;
+ int fd;
+ QLIST_ENTRY(CprFd) next;
+} CprFd;
+
+static const VMStateDescription vmstate_cpr_fd = {
+ .name = "cpr fd",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(namelen, CprFd),
+ VMSTATE_VBUFFER_ALLOC_UINT32(name, CprFd, 0, NULL, namelen),
+ VMSTATE_INT32(id, CprFd),
+ VMSTATE_FD(fd, CprFd),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+void cpr_save_fd(const char *name, int id, int fd)
+{
+ CprFd *elem = g_new0(CprFd, 1);
+
+ trace_cpr_save_fd(name, id, fd);
+ elem->name = g_strdup(name);
+ elem->namelen = strlen(name) + 1;
+ elem->id = id;
+ elem->fd = fd;
+ QLIST_INSERT_HEAD(&cpr_state.fds, elem, next);
+}
+
+static CprFd *find_fd(CprFdList *head, const char *name, int id)
+{
+ CprFd *elem;
+
+ QLIST_FOREACH(elem, head, next) {
+ if (!strcmp(elem->name, name) && elem->id == id) {
+ return elem;
+ }
+ }
+ return NULL;
+}
+
+void cpr_delete_fd(const char *name, int id)
+{
+ CprFd *elem = find_fd(&cpr_state.fds, name, id);
+
+ if (elem) {
+ QLIST_REMOVE(elem, next);
+ g_free(elem->name);
+ g_free(elem);
+ }
+
+ trace_cpr_delete_fd(name, id);
+}
+
+int cpr_find_fd(const char *name, int id)
+{
+ CprFd *elem = find_fd(&cpr_state.fds, name, id);
+ int fd = elem ? elem->fd : -1;
+
+ trace_cpr_find_fd(name, id, fd);
+ return fd;
+}
+
+void cpr_resave_fd(const char *name, int id, int fd)
+{
+ CprFd *elem = find_fd(&cpr_state.fds, name, id);
+ int old_fd = elem ? elem->fd : -1;
+
+ if (old_fd < 0) {
+ cpr_save_fd(name, id, fd);
+ } else if (old_fd != fd) {
+ error_setg(&error_fatal,
+ "internal error: cpr fd '%s' id %d value %d "
+ "already saved with a different value %d",
+ name, id, fd, old_fd);
+ }
+}
+
+int cpr_open_fd(const char *path, int flags, const char *name, int id,
+ Error **errp)
+{
+ int fd = cpr_find_fd(name, id);
+
+ if (fd < 0) {
+ fd = qemu_open(path, flags, errp);
+ if (fd >= 0) {
+ cpr_save_fd(name, id, fd);
+ }
+ }
+ return fd;
+}
+
+/*************************************************************************/
+#define CPR_STATE "CprState"
+
+static const VMStateDescription vmstate_cpr_state = {
+ .name = CPR_STATE,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_QLIST_V(fds, CprState, 1, vmstate_cpr_fd, CprFd, next),
+ VMSTATE_END_OF_LIST()
+ }
+};
+/*************************************************************************/
+
+static QEMUFile *cpr_state_file;
+
+QIOChannel *cpr_state_ioc(void)
+{
+ return qemu_file_get_ioc(cpr_state_file);
+}
+
+static MigMode incoming_mode = MIG_MODE_NONE;
+
+MigMode cpr_get_incoming_mode(void)
+{
+ return incoming_mode;
+}
+
+void cpr_set_incoming_mode(MigMode mode)
+{
+ incoming_mode = mode;
+}
+
+bool cpr_is_incoming(void)
+{
+ return incoming_mode != MIG_MODE_NONE;
+}
+
+int cpr_state_save(MigrationChannel *channel, Error **errp)
+{
+ int ret;
+ QEMUFile *f;
+ MigMode mode = migrate_mode();
+
+ trace_cpr_state_save(MigMode_str(mode));
+
+ if (mode == MIG_MODE_CPR_TRANSFER) {
+ g_assert(channel);
+ f = cpr_transfer_output(channel, errp);
+ } else {
+ return 0;
+ }
+ if (!f) {
+ return -1;
+ }
+
+ qemu_put_be32(f, QEMU_CPR_FILE_MAGIC);
+ qemu_put_be32(f, QEMU_CPR_FILE_VERSION);
+
+ ret = vmstate_save_state(f, &vmstate_cpr_state, &cpr_state, 0);
+ if (ret) {
+ error_setg(errp, "vmstate_save_state error %d", ret);
+ qemu_fclose(f);
+ return ret;
+ }
+
+ /*
+ * Close the socket only partially so we can later detect when the other
+ * end closes by getting a HUP event.
+ */
+ qemu_fflush(f);
+ qio_channel_shutdown(qemu_file_get_ioc(f), QIO_CHANNEL_SHUTDOWN_WRITE,
+ NULL);
+ cpr_state_file = f;
+ return 0;
+}
+
+int cpr_state_load(MigrationChannel *channel, Error **errp)
+{
+ int ret;
+ uint32_t v;
+ QEMUFile *f;
+ MigMode mode = 0;
+
+ if (channel) {
+ mode = MIG_MODE_CPR_TRANSFER;
+ cpr_set_incoming_mode(mode);
+ f = cpr_transfer_input(channel, errp);
+ } else {
+ return 0;
+ }
+ if (!f) {
+ return -1;
+ }
+
+ trace_cpr_state_load(MigMode_str(mode));
+
+ v = qemu_get_be32(f);
+ if (v != QEMU_CPR_FILE_MAGIC) {
+ error_setg(errp, "Not a migration stream (bad magic %x)", v);
+ qemu_fclose(f);
+ return -EINVAL;
+ }
+ v = qemu_get_be32(f);
+ if (v != QEMU_CPR_FILE_VERSION) {
+ error_setg(errp, "Unsupported migration stream version %d", v);
+ qemu_fclose(f);
+ return -ENOTSUP;
+ }
+
+ ret = vmstate_load_state(f, &vmstate_cpr_state, &cpr_state, 1);
+ if (ret) {
+ error_setg(errp, "vmstate_load_state error %d", ret);
+ qemu_fclose(f);
+ return ret;
+ }
+
+ /*
+ * Let the caller decide when to close the socket (and generate a HUP event
+ * for the sending side).
+ */
+ cpr_state_file = f;
+
+ return ret;
+}
+
+void cpr_state_close(void)
+{
+ if (cpr_state_file) {
+ qemu_fclose(cpr_state_file);
+ cpr_state_file = NULL;
+ }
+}
+
+bool cpr_incoming_needed(void *opaque)
+{
+ MigMode mode = migrate_mode();
+ return mode == MIG_MODE_CPR_TRANSFER;
+}
diff --git a/migration/cpu-throttle.c b/migration/cpu-throttle.c
new file mode 100644
index 0000000..0642e6b
--- /dev/null
+++ b/migration/cpu-throttle.c
@@ -0,0 +1,199 @@
+/*
+ * QEMU System Emulator
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/thread.h"
+#include "hw/core/cpu.h"
+#include "qemu/main-loop.h"
+#include "system/cpus.h"
+#include "system/cpu-throttle.h"
+#include "migration.h"
+#include "migration-stats.h"
+#include "trace.h"
+
+/* vcpu throttling controls */
+static QEMUTimer *throttle_timer, *throttle_dirty_sync_timer;
+static unsigned int throttle_percentage;
+static bool throttle_dirty_sync_timer_active;
+static uint64_t throttle_dirty_sync_count_prev;
+
+#define CPU_THROTTLE_PCT_MIN 1
+#define CPU_THROTTLE_PCT_MAX 99
+#define CPU_THROTTLE_TIMESLICE_NS 10000000
+
+/* Making sure RAMBlock dirty bitmap is synchronized every five seconds */
+#define CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS 5000
+
+static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
+{
+ double pct;
+ double throttle_ratio;
+ int64_t sleeptime_ns, endtime_ns;
+
+ if (!cpu_throttle_get_percentage()) {
+ return;
+ }
+
+ pct = (double)cpu_throttle_get_percentage() / 100;
+ throttle_ratio = pct / (1 - pct);
+ /* Add 1ns to fix double's rounding error (like 0.9999999...) */
+ sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1);
+ endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
+ while (sleeptime_ns > 0 && !cpu->stop) {
+ if (sleeptime_ns > SCALE_MS) {
+ qemu_cond_timedwait_bql(cpu->halt_cond,
+ sleeptime_ns / SCALE_MS);
+ } else {
+ bql_unlock();
+ g_usleep(sleeptime_ns / SCALE_US);
+ bql_lock();
+ }
+ sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+ }
+ qatomic_set(&cpu->throttle_thread_scheduled, 0);
+}
+
+static void cpu_throttle_timer_tick(void *opaque)
+{
+ CPUState *cpu;
+ double pct;
+
+ /* Stop the timer if needed */
+ if (!cpu_throttle_get_percentage()) {
+ return;
+ }
+ CPU_FOREACH(cpu) {
+ if (!qatomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
+ async_run_on_cpu(cpu, cpu_throttle_thread,
+ RUN_ON_CPU_NULL);
+ }
+ }
+
+ pct = (double)cpu_throttle_get_percentage() / 100;
+ timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
+ CPU_THROTTLE_TIMESLICE_NS / (1 - pct));
+}
+
+void cpu_throttle_set(int new_throttle_pct)
+{
+ /*
+ * boolean to store whether throttle is already active or not,
+ * before modifying throttle_percentage
+ */
+ bool throttle_active = cpu_throttle_active();
+
+ trace_cpu_throttle_set(new_throttle_pct);
+
+ /* Ensure throttle percentage is within valid range */
+ new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
+ new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
+
+ qatomic_set(&throttle_percentage, new_throttle_pct);
+
+ if (!throttle_active) {
+ cpu_throttle_timer_tick(NULL);
+ }
+}
+
+void cpu_throttle_stop(void)
+{
+ qatomic_set(&throttle_percentage, 0);
+ cpu_throttle_dirty_sync_timer(false);
+}
+
+bool cpu_throttle_active(void)
+{
+ return (cpu_throttle_get_percentage() != 0);
+}
+
+int cpu_throttle_get_percentage(void)
+{
+ return qatomic_read(&throttle_percentage);
+}
+
+void cpu_throttle_dirty_sync_timer_tick(void *opaque)
+{
+ uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
+
+ /*
+ * The first iteration copies all memory anyhow and has no
+ * effect on guest performance, therefore omit it to avoid
+ * paying extra for the sync penalty.
+ */
+ if (sync_cnt <= 1) {
+ goto end;
+ }
+
+ if (sync_cnt == throttle_dirty_sync_count_prev) {
+ trace_cpu_throttle_dirty_sync();
+ WITH_RCU_READ_LOCK_GUARD() {
+ migration_bitmap_sync_precopy(false);
+ }
+ }
+
+end:
+ throttle_dirty_sync_count_prev = stat64_get(&mig_stats.dirty_sync_count);
+
+ timer_mod(throttle_dirty_sync_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
+ CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
+}
+
+static bool cpu_throttle_dirty_sync_active(void)
+{
+ return qatomic_read(&throttle_dirty_sync_timer_active);
+}
+
+void cpu_throttle_dirty_sync_timer(bool enable)
+{
+ assert(throttle_dirty_sync_timer);
+
+ if (enable) {
+ if (!cpu_throttle_dirty_sync_active()) {
+ /*
+ * Always reset the dirty sync count cache, in case migration
+ * was cancelled once.
+ */
+ throttle_dirty_sync_count_prev = 0;
+ timer_mod(throttle_dirty_sync_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
+ CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
+ qatomic_set(&throttle_dirty_sync_timer_active, 1);
+ }
+ } else {
+ if (cpu_throttle_dirty_sync_active()) {
+ timer_del(throttle_dirty_sync_timer);
+ qatomic_set(&throttle_dirty_sync_timer_active, 0);
+ }
+ }
+}
+
+void cpu_throttle_init(void)
+{
+ throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
+ cpu_throttle_timer_tick, NULL);
+ throttle_dirty_sync_timer =
+ timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
+ cpu_throttle_dirty_sync_timer_tick, NULL);
+}
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index 1d9db81..986624c 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -14,7 +14,7 @@
#include "qemu/error-report.h"
#include "hw/core/cpu.h"
#include "qapi/error.h"
-#include "exec/ramblock.h"
+#include "system/ramblock.h"
#include "exec/target_page.h"
#include "qemu/rcu_queue.h"
#include "qemu/main-loop.h"
@@ -24,11 +24,12 @@
#include "dirtyrate.h"
#include "monitor/hmp.h"
#include "monitor/monitor.h"
-#include "qapi/qmp/qdict.h"
-#include "sysemu/kvm.h"
-#include "sysemu/runstate.h"
-#include "exec/memory.h"
+#include "qobject/qdict.h"
+#include "system/kvm.h"
+#include "system/runstate.h"
+#include "system/memory.h"
#include "qemu/xxhash.h"
+#include "migration.h"
/*
* total_dirty_pages is procted by BQL and is used
@@ -149,12 +150,12 @@ int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms,
unsigned int flag,
bool one_shot)
{
- DirtyPageRecord *records;
+ DirtyPageRecord *records = NULL;
int64_t init_time_ms;
int64_t duration;
int64_t dirtyrate;
int i = 0;
- unsigned int gen_id;
+ unsigned int gen_id = 0;
retry:
init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
@@ -228,8 +229,7 @@ static int time_unit_to_power(TimeUnit time_unit)
case TIME_UNIT_MILLISECOND:
return -3;
default:
- assert(false); /* unreachable */
- return 0;
+ g_assert_not_reached();
}
}
@@ -437,6 +437,7 @@ static void get_ramblock_dirty_info(RAMBlock *block,
struct DirtyRateConfig *config)
{
uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
+ gsize len;
/* Right shift 30 bits to calc ramblock size in GB */
info->sample_pages_count = (qemu_ram_get_used_length(block) *
@@ -445,7 +446,9 @@ static void get_ramblock_dirty_info(RAMBlock *block,
info->ramblock_pages = qemu_ram_get_used_length(block) >>
qemu_target_page_bits();
info->ramblock_addr = qemu_ram_get_host_addr(block);
- strcpy(info->idstr, qemu_ram_get_idstr(block));
+ len = g_strlcpy(info->idstr, qemu_ram_get_idstr(block),
+ sizeof(info->idstr));
+ g_assert(len < sizeof(info->idstr));
}
static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
@@ -840,8 +843,9 @@ void qmp_calc_dirty_rate(int64_t calc_time,
init_dirtyrate_stat(config);
- qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread,
- (void *)&config, QEMU_THREAD_DETACHED);
+ qemu_thread_create(&thread, MIGRATION_THREAD_DIRTY_RATE,
+ get_dirtyrate_thread, (void *)&config,
+ QEMU_THREAD_DETACHED);
}
diff --git a/migration/dirtyrate.h b/migration/dirtyrate.h
index 869c060..35225c3 100644
--- a/migration/dirtyrate.h
+++ b/migration/dirtyrate.h
@@ -13,7 +13,7 @@
#ifndef QEMU_MIGRATION_DIRTYRATE_H
#define QEMU_MIGRATION_DIRTYRATE_H
-#include "sysemu/dirtyrate.h"
+#include "system/dirtyrate.h"
/*
* Sample 512 pages per GB as default.
diff --git a/migration/fd.c b/migration/fd.c
index aab5189..9bf9be6 100644
--- a/migration/fd.c
+++ b/migration/fd.c
@@ -25,6 +25,29 @@
#include "io/channel-util.h"
#include "trace.h"
+static bool fd_is_pipe(int fd)
+{
+ struct stat statbuf;
+
+ if (fstat(fd, &statbuf) == -1) {
+ return false;
+ }
+
+ return S_ISFIFO(statbuf.st_mode);
+}
+
+static bool migration_fd_valid(int fd)
+{
+ if (fd_is_socket(fd)) {
+ return true;
+ }
+
+ if (fd_is_pipe(fd)) {
+ return true;
+ }
+
+ return false;
+}
void fd_start_outgoing_migration(MigrationState *s, const char *fdname, Error **errp)
{
@@ -34,7 +57,7 @@ void fd_start_outgoing_migration(MigrationState *s, const char *fdname, Error **
return;
}
- if (!fd_is_socket(fd)) {
+ if (!migration_fd_valid(fd)) {
warn_report("fd: migration to a file is deprecated."
" Use file: instead.");
}
@@ -68,7 +91,7 @@ void fd_start_incoming_migration(const char *fdname, Error **errp)
return;
}
- if (!fd_is_socket(fd)) {
+ if (!migration_fd_valid(fd)) {
warn_report("fd: migration to a file is deprecated."
" Use file: instead.");
}
diff --git a/migration/file.c b/migration/file.c
index db870f2..bb8031e 100644
--- a/migration/file.c
+++ b/migration/file.c
@@ -6,7 +6,7 @@
*/
#include "qemu/osdep.h"
-#include "exec/ramblock.h"
+#include "system/ramblock.h"
#include "qemu/cutils.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
@@ -112,7 +112,6 @@ void file_start_outgoing_migration(MigrationState *s,
error_setg_errno(errp, errno,
"failed to truncate migration file to offset %" PRIx64,
offset);
- object_unref(OBJECT(fioc));
return;
}
@@ -120,7 +119,6 @@ void file_start_outgoing_migration(MigrationState *s,
ioc = QIO_CHANNEL(fioc);
if (offset && qio_channel_io_seek(ioc, offset, SEEK_SET, errp) < 0) {
- object_unref(OBJECT(fioc));
return;
}
qio_channel_set_name(ioc, "migration-file-outgoing");
@@ -198,12 +196,13 @@ void file_start_incoming_migration(FileMigrationArgs *file_args, Error **errp)
}
int file_write_ramblock_iov(QIOChannel *ioc, const struct iovec *iov,
- int niov, RAMBlock *block, Error **errp)
+ int niov, MultiFDPages_t *pages, Error **errp)
{
ssize_t ret = 0;
int i, slice_idx, slice_num;
uintptr_t base, next, offset;
size_t len;
+ RAMBlock *block = pages->block;
slice_idx = 0;
slice_num = 1;
diff --git a/migration/file.h b/migration/file.h
index 9f71e87..1a1115f 100644
--- a/migration/file.h
+++ b/migration/file.h
@@ -21,6 +21,6 @@ int file_parse_offset(char *filespec, uint64_t *offsetp, Error **errp);
void file_cleanup_outgoing_migration(void);
bool file_send_channel_create(gpointer opaque, Error **errp);
int file_write_ramblock_iov(QIOChannel *ioc, const struct iovec *iov,
- int niov, RAMBlock *block, Error **errp);
+ int niov, MultiFDPages_t *pages, Error **errp);
int multifd_file_recv_data(MultiFDRecvParams *p, Error **errp);
#endif
diff --git a/migration/global_state.c b/migration/global_state.c
index 3a9796c..c1f90fc 100644
--- a/migration/global_state.c
+++ b/migration/global_state.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "qemu/error-report.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "qapi/error.h"
#include "migration.h"
#include "migration/global_state.h"
diff --git a/migration/meson.build b/migration/meson.build
index 5ce2acb4..9aa48b2 100644
--- a/migration/meson.build
+++ b/migration/meson.build
@@ -11,8 +11,12 @@ migration_files = files(
system_ss.add(files(
'block-dirty-bitmap.c',
+ 'block-active.c',
'channel.c',
'channel-block.c',
+ 'cpr.c',
+ 'cpr-transfer.c',
+ 'cpu-throttle.c',
'dirtyrate.c',
'exec.c',
'fd.c',
@@ -21,6 +25,8 @@ system_ss.add(files(
'migration-hmp-cmds.c',
'migration.c',
'multifd.c',
+ 'multifd-device-state.c',
+ 'multifd-nocomp.c',
'multifd-zlib.c',
'multifd-zero-page.c',
'options.c',
@@ -41,6 +47,7 @@ system_ss.add(when: rdma, if_true: files('rdma.c'))
system_ss.add(when: zstd, if_true: files('multifd-zstd.c'))
system_ss.add(when: qpl, if_true: files('multifd-qpl.c'))
system_ss.add(when: uadk, if_true: files('multifd-uadk.c'))
+system_ss.add(when: qatzip, if_true: files('multifd-qatzip.c'))
specific_ss.add(when: 'CONFIG_SYSTEM_ONLY',
if_true: files('ram.c',
diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c
index 7d608d2..e8a563c 100644
--- a/migration/migration-hmp-cmds.c
+++ b/migration/migration-hmp-cmds.c
@@ -21,15 +21,15 @@
#include "qapi/error.h"
#include "qapi/qapi-commands-migration.h"
#include "qapi/qapi-visit-migration.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/string-input-visitor.h"
#include "qapi/string-output-visitor.h"
#include "qemu/cutils.h"
#include "qemu/error-report.h"
#include "qemu/sockets.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "ui/qemu-spice.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "options.h"
#include "migration.h"
@@ -37,27 +37,28 @@ static void migration_global_dump(Monitor *mon)
{
MigrationState *ms = migrate_get_current();
- monitor_printf(mon, "globals:\n");
- monitor_printf(mon, "store-global-state: %s\n",
+ monitor_printf(mon, "Globals:\n");
+ monitor_printf(mon, " store-global-state: %s\n",
ms->store_global_state ? "on" : "off");
- monitor_printf(mon, "only-migratable: %s\n",
+ monitor_printf(mon, " only-migratable: %s\n",
only_migratable ? "on" : "off");
- monitor_printf(mon, "send-configuration: %s\n",
+ monitor_printf(mon, " send-configuration: %s\n",
ms->send_configuration ? "on" : "off");
- monitor_printf(mon, "send-section-footer: %s\n",
+ monitor_printf(mon, " send-section-footer: %s\n",
ms->send_section_footer ? "on" : "off");
- monitor_printf(mon, "clear-bitmap-shift: %u\n",
+ monitor_printf(mon, " send-switchover-start: %s\n",
+ ms->send_switchover_start ? "on" : "off");
+ monitor_printf(mon, " clear-bitmap-shift: %u\n",
ms->clear_bitmap_shift);
}
void hmp_info_migrate(Monitor *mon, const QDict *qdict)
{
+ bool show_all = qdict_get_try_bool(qdict, "all", false);
MigrationInfo *info;
info = qmp_query_migrate(NULL);
- migration_global_dump(mon);
-
if (info->blocked_reasons) {
strList *reasons = info->blocked_reasons;
monitor_printf(mon, "Outgoing migration blocked:\n");
@@ -68,7 +69,7 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
}
if (info->has_status) {
- monitor_printf(mon, "Migration status: %s",
+ monitor_printf(mon, "Status: %s",
MigrationStatus_str(info->status));
if (info->status == MIGRATION_STATUS_FAILED && info->error_desc) {
monitor_printf(mon, " (%s)\n", info->error_desc);
@@ -76,107 +77,130 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
monitor_printf(mon, "\n");
}
- monitor_printf(mon, "total time: %" PRIu64 " ms\n",
- info->total_time);
- if (info->has_expected_downtime) {
- monitor_printf(mon, "expected downtime: %" PRIu64 " ms\n",
- info->expected_downtime);
- }
- if (info->has_downtime) {
- monitor_printf(mon, "downtime: %" PRIu64 " ms\n",
- info->downtime);
+ if (info->total_time) {
+ monitor_printf(mon, "Time (ms): total=%" PRIu64,
+ info->total_time);
+ if (info->has_setup_time) {
+ monitor_printf(mon, ", setup=%" PRIu64,
+ info->setup_time);
+ }
+ if (info->has_expected_downtime) {
+ monitor_printf(mon, ", exp_down=%" PRIu64,
+ info->expected_downtime);
+ }
+ if (info->has_downtime) {
+ monitor_printf(mon, ", down=%" PRIu64,
+ info->downtime);
+ }
+ monitor_printf(mon, "\n");
}
- if (info->has_setup_time) {
- monitor_printf(mon, "setup: %" PRIu64 " ms\n",
- info->setup_time);
+ }
+
+ if (info->has_socket_address) {
+ SocketAddressList *addr;
+
+ monitor_printf(mon, "Sockets: [\n");
+
+ for (addr = info->socket_address; addr; addr = addr->next) {
+ char *s = socket_uri(addr->value);
+ monitor_printf(mon, "\t%s\n", s);
+ g_free(s);
}
+ monitor_printf(mon, "]\n");
}
if (info->ram) {
- monitor_printf(mon, "transferred ram: %" PRIu64 " kbytes\n",
- info->ram->transferred >> 10);
- monitor_printf(mon, "throughput: %0.2f mbps\n",
+ monitor_printf(mon, "RAM info:\n");
+ monitor_printf(mon, " Throughput (Mbps): %0.2f\n",
info->ram->mbps);
- monitor_printf(mon, "remaining ram: %" PRIu64 " kbytes\n",
- info->ram->remaining >> 10);
- monitor_printf(mon, "total ram: %" PRIu64 " kbytes\n",
+ monitor_printf(mon, " Sizes (KiB): pagesize=%" PRIu64
+ ", total=%" PRIu64 ",\n",
+ info->ram->page_size >> 10,
info->ram->total >> 10);
- monitor_printf(mon, "duplicate: %" PRIu64 " pages\n",
- info->ram->duplicate);
- monitor_printf(mon, "normal: %" PRIu64 " pages\n",
- info->ram->normal);
- monitor_printf(mon, "normal bytes: %" PRIu64 " kbytes\n",
- info->ram->normal_bytes >> 10);
- monitor_printf(mon, "dirty sync count: %" PRIu64 "\n",
- info->ram->dirty_sync_count);
- monitor_printf(mon, "page size: %" PRIu64 " kbytes\n",
- info->ram->page_size >> 10);
- monitor_printf(mon, "multifd bytes: %" PRIu64 " kbytes\n",
- info->ram->multifd_bytes >> 10);
- monitor_printf(mon, "pages-per-second: %" PRIu64 "\n",
+ monitor_printf(mon, " transferred=%" PRIu64
+ ", remain=%" PRIu64 ",\n",
+ info->ram->transferred >> 10,
+ info->ram->remaining >> 10);
+ monitor_printf(mon, " precopy=%" PRIu64
+ ", multifd=%" PRIu64
+ ", postcopy=%" PRIu64,
+ info->ram->precopy_bytes >> 10,
+ info->ram->multifd_bytes >> 10,
+ info->ram->postcopy_bytes >> 10);
+
+ if (info->vfio) {
+ monitor_printf(mon, ", vfio=%" PRIu64,
+ info->vfio->transferred >> 10);
+ }
+ monitor_printf(mon, "\n");
+
+ monitor_printf(mon, " Pages: normal=%" PRIu64 ", zero=%" PRIu64
+ ", rate_per_sec=%" PRIu64 "\n",
+ info->ram->normal,
+ info->ram->duplicate,
info->ram->pages_per_second);
+ monitor_printf(mon, " Others: dirty_syncs=%" PRIu64,
+ info->ram->dirty_sync_count);
if (info->ram->dirty_pages_rate) {
- monitor_printf(mon, "dirty pages rate: %" PRIu64 " pages\n",
+ monitor_printf(mon, ", dirty_pages_rate=%" PRIu64,
info->ram->dirty_pages_rate);
}
if (info->ram->postcopy_requests) {
- monitor_printf(mon, "postcopy request count: %" PRIu64 "\n",
+ monitor_printf(mon, ", postcopy_req=%" PRIu64,
info->ram->postcopy_requests);
}
- if (info->ram->precopy_bytes) {
- monitor_printf(mon, "precopy ram: %" PRIu64 " kbytes\n",
- info->ram->precopy_bytes >> 10);
- }
if (info->ram->downtime_bytes) {
- monitor_printf(mon, "downtime ram: %" PRIu64 " kbytes\n",
- info->ram->downtime_bytes >> 10);
- }
- if (info->ram->postcopy_bytes) {
- monitor_printf(mon, "postcopy ram: %" PRIu64 " kbytes\n",
- info->ram->postcopy_bytes >> 10);
+ monitor_printf(mon, ", downtime_ram=%" PRIu64,
+ info->ram->downtime_bytes);
}
if (info->ram->dirty_sync_missed_zero_copy) {
- monitor_printf(mon,
- "Zero-copy-send fallbacks happened: %" PRIu64 " times\n",
+ monitor_printf(mon, ", zerocopy_fallbacks=%" PRIu64,
info->ram->dirty_sync_missed_zero_copy);
}
+ monitor_printf(mon, "\n");
+ }
+
+ if (!show_all) {
+ goto out;
}
+ migration_global_dump(mon);
+
if (info->xbzrle_cache) {
- monitor_printf(mon, "cache size: %" PRIu64 " bytes\n",
- info->xbzrle_cache->cache_size);
- monitor_printf(mon, "xbzrle transferred: %" PRIu64 " kbytes\n",
- info->xbzrle_cache->bytes >> 10);
- monitor_printf(mon, "xbzrle pages: %" PRIu64 " pages\n",
- info->xbzrle_cache->pages);
- monitor_printf(mon, "xbzrle cache miss: %" PRIu64 " pages\n",
- info->xbzrle_cache->cache_miss);
- monitor_printf(mon, "xbzrle cache miss rate: %0.2f\n",
- info->xbzrle_cache->cache_miss_rate);
- monitor_printf(mon, "xbzrle encoding rate: %0.2f\n",
- info->xbzrle_cache->encoding_rate);
- monitor_printf(mon, "xbzrle overflow: %" PRIu64 "\n",
+ monitor_printf(mon, "XBZRLE: size=%" PRIu64
+ ", transferred=%" PRIu64
+ ", pages=%" PRIu64
+ ", miss=%" PRIu64 "\n"
+ " miss_rate=%0.2f"
+ ", encode_rate=%0.2f"
+ ", overflow=%" PRIu64 "\n",
+ info->xbzrle_cache->cache_size,
+ info->xbzrle_cache->bytes,
+ info->xbzrle_cache->pages,
+ info->xbzrle_cache->cache_miss,
+ info->xbzrle_cache->cache_miss_rate,
+ info->xbzrle_cache->encoding_rate,
info->xbzrle_cache->overflow);
}
if (info->has_cpu_throttle_percentage) {
- monitor_printf(mon, "cpu throttle percentage: %" PRIu64 "\n",
+ monitor_printf(mon, "CPU Throttle (%%): %" PRIu64 "\n",
info->cpu_throttle_percentage);
}
if (info->has_dirty_limit_throttle_time_per_round) {
- monitor_printf(mon, "dirty-limit throttle time: %" PRIu64 " us\n",
+ monitor_printf(mon, "Dirty-limit Throttle (us): %" PRIu64 "\n",
info->dirty_limit_throttle_time_per_round);
}
if (info->has_dirty_limit_ring_full_time) {
- monitor_printf(mon, "dirty-limit ring full time: %" PRIu64 " us\n",
+ monitor_printf(mon, "Dirty-limit Ring Full (us): %" PRIu64 "\n",
info->dirty_limit_ring_full_time);
}
if (info->has_postcopy_blocktime) {
- monitor_printf(mon, "postcopy blocktime: %u\n",
+ monitor_printf(mon, "Postcopy Blocktime (ms): %" PRIu32 "\n",
info->postcopy_blocktime);
}
@@ -187,28 +211,12 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
visit_type_uint32List(v, NULL, &info->postcopy_vcpu_blocktime,
&error_abort);
visit_complete(v, &str);
- monitor_printf(mon, "postcopy vcpu blocktime: %s\n", str);
+ monitor_printf(mon, "Postcopy vCPU Blocktime: %s\n", str);
g_free(str);
visit_free(v);
}
- if (info->has_socket_address) {
- SocketAddressList *addr;
-
- monitor_printf(mon, "socket address: [\n");
-
- for (addr = info->socket_address; addr; addr = addr->next) {
- char *s = socket_uri(addr->value);
- monitor_printf(mon, "\t%s\n", s);
- g_free(s);
- }
- monitor_printf(mon, "]\n");
- }
-
- if (info->vfio) {
- monitor_printf(mon, "vfio device transferred: %" PRIu64 " kbytes\n",
- info->vfio->transferred >> 10);
- }
+out:
qapi_free_MigrationInfo(info);
}
@@ -576,6 +584,10 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict)
p->has_multifd_zlib_level = true;
visit_type_uint8(v, param, &p->multifd_zlib_level, &err);
break;
+ case MIGRATION_PARAMETER_MULTIFD_QATZIP_LEVEL:
+ p->has_multifd_qatzip_level = true;
+ visit_type_uint8(v, param, &p->multifd_qatzip_level, &err);
+ break;
case MIGRATION_PARAMETER_MULTIFD_ZSTD_LEVEL:
p->has_multifd_zstd_level = true;
visit_type_uint8(v, param, &p->multifd_zstd_level, &err);
@@ -636,7 +648,7 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict)
visit_type_bool(v, param, &p->direct_io, &err);
break;
default:
- assert(0);
+ g_assert_not_reached();
}
if (err) {
diff --git a/migration/migration.c b/migration/migration.c
index 3dea06d..4098870 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -14,6 +14,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/ctype.h"
#include "qemu/cutils.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
@@ -22,11 +23,12 @@
#include "fd.h"
#include "file.h"
#include "socket.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/cpu-throttle.h"
+#include "system/runstate.h"
+#include "system/system.h"
+#include "system/cpu-throttle.h"
#include "rdma.h"
#include "ram.h"
+#include "migration/cpr.h"
#include "migration/global_state.h"
#include "migration/misc.h"
#include "migration.h"
@@ -43,7 +45,7 @@
#include "qapi/qapi-commands-migration.h"
#include "qapi/qapi-events-migration.h"
#include "qapi/qmp/qerror.h"
-#include "qapi/qmp/qnull.h"
+#include "qobject/qnull.h"
#include "qemu/rcu.h"
#include "postcopy-ram.h"
#include "qemu/thread.h"
@@ -59,13 +61,13 @@
#include "multifd.h"
#include "threadinfo.h"
#include "qemu/yank.h"
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
#include "yank_functions.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#include "options.h"
-#include "sysemu/dirtylimit.h"
+#include "system/dirtylimit.h"
#include "qemu/sockets.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#define NOTIFIER_ELEM_INIT(array, elem) \
[elem] = NOTIFIER_WITH_RETURN_LIST_INITIALIZER((array)[elem])
@@ -75,6 +77,7 @@
static NotifierWithReturnList migration_state_notifiers[] = {
NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_NORMAL),
NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_CPR_REBOOT),
+ NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_CPR_TRANSFER),
};
/* Messages sent on the return path from destination to source */
@@ -92,6 +95,9 @@ enum mig_rp_message_type {
MIG_RP_MSG_MAX
};
+/* Migration channel types */
+enum { CH_MAIN, CH_MULTIFD, CH_POSTCOPY };
+
/* When we add fault tolerance, we could have several
migrations at once. For now we don't need to add
dynamic creation of migration */
@@ -102,12 +108,10 @@ static MigrationIncomingState *current_incoming;
static GSList *migration_blockers[MIG_MODE__MAX];
static bool migration_object_check(MigrationState *ms, Error **errp);
-static int migration_maybe_pause(MigrationState *s,
- int *current_active_state,
- int new_state);
-static void migrate_fd_cancel(MigrationState *s);
+static bool migration_switchover_start(MigrationState *s, Error **errp);
static bool close_return_path_on_source(MigrationState *s);
static void migration_completion_end(MigrationState *s);
+static void migrate_hup_delete(MigrationState *s);
static void migration_downtime_start(MigrationState *s)
{
@@ -115,6 +119,27 @@ static void migration_downtime_start(MigrationState *s)
s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
}
+/*
+ * This is unfortunate: incoming migration actually needs the outgoing
+ * migration state (MigrationState) to be there too, e.g. to query
+ * capabilities, parameters, using locks, setup errors, etc.
+ *
+ * NOTE: when calling this, making sure current_migration exists and not
+ * been freed yet! Otherwise trying to access the refcount is already
+ * an use-after-free itself..
+ *
+ * TODO: Move shared part of incoming / outgoing out into separate object.
+ * Then this is not needed.
+ */
+static void migrate_incoming_ref_outgoing_state(void)
+{
+ object_ref(migrate_get_current());
+}
+static void migrate_incoming_unref_outgoing_state(void)
+{
+ object_unref(migrate_get_current());
+}
+
static void migration_downtime_end(MigrationState *s)
{
int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
@@ -125,9 +150,19 @@ static void migration_downtime_end(MigrationState *s)
*/
if (!s->downtime) {
s->downtime = now - s->downtime_start;
+ trace_vmstate_downtime_checkpoint("src-downtime-end");
+ }
+}
+
+static void precopy_notify_complete(void)
+{
+ Error *local_err = NULL;
+
+ if (precopy_notify(PRECOPY_NOTIFY_COMPLETE, &local_err)) {
+ error_report_err(local_err);
}
- trace_vmstate_downtime_checkpoint("src-downtime-end");
+ trace_migration_precopy_complete();
}
static bool migration_needs_multiple_sockets(void)
@@ -135,6 +170,21 @@ static bool migration_needs_multiple_sockets(void)
return migrate_multifd() || migrate_postcopy_preempt();
}
+static RunState migration_get_target_runstate(void)
+{
+ /*
+ * When the global state is not migrated, it means we don't know the
+ * runstate of the src QEMU. We don't have much choice but assuming
+ * the VM is running. NOTE: this is pretty rare case, so far only Xen
+ * uses it.
+ */
+ if (!global_state_received()) {
+ return RUN_STATE_RUNNING;
+ }
+
+ return global_state_get_runstate();
+}
+
static bool transport_supports_multi_channels(MigrationAddress *addr)
{
if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) {
@@ -203,9 +253,33 @@ migration_channels_and_transport_compatible(MigrationAddress *addr,
return false;
}
+ if (migrate_mode() == MIG_MODE_CPR_TRANSFER &&
+ addr->transport == MIGRATION_ADDRESS_TYPE_FILE) {
+ error_setg(errp, "Migration requires streamable transport (eg unix)");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+migration_capabilities_and_transport_compatible(MigrationAddress *addr,
+ Error **errp)
+{
+ if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) {
+ return migrate_rdma_caps_check(migrate_get_current()->capabilities,
+ errp);
+ }
+
return true;
}
+static bool migration_transport_compatible(MigrationAddress *addr, Error **errp)
+{
+ return migration_channels_and_transport_compatible(addr, errp) &&
+ migration_capabilities_and_transport_compatible(addr, errp);
+}
+
static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp)
{
uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp;
@@ -263,6 +337,9 @@ void migration_object_init(void)
ram_mig_init();
dirty_bitmap_mig_init();
+
+ /* Initialize cpu throttle timers */
+ cpu_throttle_init();
}
typedef struct {
@@ -306,17 +383,6 @@ void migration_bh_schedule(QEMUBHFunc *cb, void *opaque)
qemu_bh_schedule(bh);
}
-void migration_cancel(const Error *error)
-{
- if (error) {
- migrate_set_error(current_migration, error);
- }
- if (migrate_dirty_limit()) {
- qmp_cancel_vcpu_dirty_limit(false, -1, NULL);
- }
- migrate_fd_cancel(current_migration);
-}
-
void migration_shutdown(void)
{
/*
@@ -329,7 +395,7 @@ void migration_shutdown(void)
* Cancel the current migration - that will (eventually)
* stop the migration using this structure
*/
- migration_cancel(NULL);
+ migration_cancel();
object_unref(OBJECT(current_migration));
/*
@@ -379,6 +445,24 @@ void migration_incoming_state_destroy(void)
multifd_recv_cleanup();
+ /*
+ * RAM state cleanup needs to happen after multifd cleanup, because
+ * multifd threads can use some of its states (receivedmap).
+ * The VFIO load_cleanup() implementation is BQL-sensitive. It requires
+ * BQL must NOT be taken when recycling load threads, so that it won't
+ * block the load threads from making progress on address space
+ * modification operations.
+ *
+ * To make it work, we could try to not take BQL for all load_cleanup(),
+ * or conditionally unlock BQL only if bql_locked() in VFIO.
+ *
+ * Since most existing call sites take BQL for load_cleanup(), make
+ * it simple by taking BQL always as the rule, so that VFIO can unlock
+ * BQL and retake unconditionally.
+ */
+ assert(bql_locked());
+ qemu_loadvm_state_cleanup(mis);
+
if (mis->to_src_file) {
/* Tell source that we are done */
migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
@@ -410,6 +494,7 @@ void migration_incoming_state_destroy(void)
mis->postcopy_qemufile_dst = NULL;
}
+ cpr_set_incoming_mode(MIG_MODE_NONE);
yank_unregister_instance(MIGRATION_YANK_INSTANCE);
}
@@ -563,6 +648,16 @@ void migrate_add_address(SocketAddress *address)
QAPI_CLONE(SocketAddress, address));
}
+bool migrate_is_uri(const char *uri)
+{
+ while (*uri && *uri != ':') {
+ if (!qemu_isalpha(*uri++)) {
+ return false;
+ }
+ }
+ return *uri == ':';
+}
+
bool migrate_uri_parse(const char *uri, MigrationChannel **channel,
Error **errp)
{
@@ -660,7 +755,8 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels,
if (channels) {
/* To verify that Migrate channel list has only item */
if (channels->next) {
- error_setg(errp, "Channel list has more than one entries");
+ error_setg(errp, "Channel list must have only one entry, "
+ "for type 'main'");
return;
}
addr = channels->value->addr;
@@ -675,7 +771,7 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels,
}
/* transport mechanism not suitable for migration? */
- if (!migration_channels_and_transport_compatible(addr, errp)) {
+ if (!migration_transport_compatible(addr, errp)) {
return;
}
@@ -694,14 +790,6 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels,
}
#ifdef CONFIG_RDMA
} else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) {
- if (migrate_xbzrle()) {
- error_setg(errp, "RDMA and XBZRLE can't be used together");
- return;
- }
- if (migrate_multifd()) {
- error_setg(errp, "RDMA and multifd can't be used together");
- return;
- }
rdma_start_incoming_migration(&addr->u.rdma, errp);
#endif
} else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) {
@@ -711,34 +799,17 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels,
} else {
error_setg(errp, "unknown migration protocol: %s", uri);
}
+
+ /* Close cpr socket to tell source that we are listening */
+ cpr_state_close();
}
static void process_incoming_migration_bh(void *opaque)
{
- Error *local_err = NULL;
MigrationIncomingState *mis = opaque;
trace_vmstate_downtime_checkpoint("dst-precopy-bh-enter");
- /* If capability late_block_activate is set:
- * Only fire up the block code now if we're going to restart the
- * VM, else 'cont' will do it.
- * This causes file locking to happen; so we don't want it to happen
- * unless we really are starting the VM.
- */
- if (!migrate_late_block_activate() ||
- (autostart && (!global_state_received() ||
- runstate_is_live(global_state_get_runstate())))) {
- /* Make sure all file formats throw away their mutable metadata.
- * If we get an error here, just don't restart the VM yet. */
- bdrv_activate_all(&local_err);
- if (local_err) {
- error_report_err(local_err);
- local_err = NULL;
- autostart = false;
- }
- }
-
/*
* This must happen after all error conditions are dealt with and
* we're sure the VM is going to be running on this host.
@@ -751,10 +822,23 @@ static void process_incoming_migration_bh(void *opaque)
dirty_bitmap_mig_before_vm_start();
- if (!global_state_received() ||
- runstate_is_live(global_state_get_runstate())) {
+ if (runstate_is_live(migration_get_target_runstate())) {
if (autostart) {
- vm_start();
+ /*
+ * Block activation is always delayed until VM starts, either
+ * here (which means we need to start the dest VM right now..),
+ * or until qmp_cont() later.
+ *
+ * We used to have cap 'late-block-activate' but now we do this
+ * unconditionally, as it has no harm but only benefit. E.g.,
+ * it's not part of migration ABI on the time of disk activation.
+ *
+ * Make sure all file formats throw away their mutable
+ * metadata. If error, don't restart the VM yet.
+ */
+ if (migration_block_activate(NULL)) {
+ vm_start();
+ }
} else {
runstate_set(RUN_STATE_PAUSED);
}
@@ -813,7 +897,7 @@ process_incoming_migration_co(void *opaque)
* postcopy thread.
*/
trace_process_incoming_migration_co_postcopy_end_main();
- return;
+ goto out;
}
/* Else if something went wrong then just fall out of the normal exit */
}
@@ -829,7 +913,8 @@ process_incoming_migration_co(void *opaque)
}
migration_bh_schedule(process_incoming_migration_bh, mis);
- return;
+ goto out;
+
fail:
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
MIGRATION_STATUS_FAILED);
@@ -846,6 +931,9 @@ fail:
exit(EXIT_FAILURE);
}
+out:
+ /* Pairs with the refcount taken in qmp_migrate_incoming() */
+ migrate_incoming_unref_outgoing_state();
}
/**
@@ -856,9 +944,8 @@ static void migration_incoming_setup(QEMUFile *f)
{
MigrationIncomingState *mis = migration_incoming_get_current();
- if (!mis->from_src_file) {
- mis->from_src_file = f;
- }
+ assert(!mis->from_src_file);
+ mis->from_src_file = f;
qemu_file_set_blocking(f, false);
}
@@ -910,28 +997,19 @@ void migration_fd_process_incoming(QEMUFile *f)
migration_incoming_process();
}
-/*
- * Returns true when we want to start a new incoming migration process,
- * false otherwise.
- */
-static bool migration_should_start_incoming(bool main_channel)
+static bool migration_has_main_and_multifd_channels(void)
{
- /* Multifd doesn't start unless all channels are established */
- if (migrate_multifd()) {
- return migration_has_all_channels();
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ if (!mis->from_src_file) {
+ /* main channel not established */
+ return false;
}
- /* Preempt channel only starts when the main channel is created */
- if (migrate_postcopy_preempt()) {
- return main_channel;
+ if (migrate_multifd() && !multifd_recv_all_channels_created()) {
+ return false;
}
- /*
- * For all the rest types of migration, we should only reach here when
- * it's the main channel that's being created, and we should always
- * proceed with this channel.
- */
- assert(main_channel);
+ /* main and all multifd channels are established */
return true;
}
@@ -940,59 +1018,81 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp)
MigrationIncomingState *mis = migration_incoming_get_current();
Error *local_err = NULL;
QEMUFile *f;
- bool default_channel = true;
+ uint8_t channel;
uint32_t channel_magic = 0;
int ret = 0;
- if (migrate_multifd() && !migrate_mapped_ram() &&
- !migrate_postcopy_ram() &&
- qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) {
- /*
- * With multiple channels, it is possible that we receive channels
- * out of order on destination side, causing incorrect mapping of
- * source channels on destination side. Check channel MAGIC to
- * decide type of channel. Please note this is best effort, postcopy
- * preempt channel does not send any magic number so avoid it for
- * postcopy live migration. Also tls live migration already does
- * tls handshake while initializing main channel so with tls this
- * issue is not possible.
- */
- ret = migration_channel_read_peek(ioc, (void *)&channel_magic,
- sizeof(channel_magic), errp);
+ if (!migration_has_main_and_multifd_channels()) {
+ if (qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) {
+ /*
+ * With multiple channels, it is possible that we receive channels
+ * out of order on destination side, causing incorrect mapping of
+ * source channels on destination side. Check channel MAGIC to
+ * decide type of channel. Please note this is best effort,
+ * postcopy preempt channel does not send any magic number so
+ * avoid it for postcopy live migration. Also tls live migration
+ * already does tls handshake while initializing main channel so
+ * with tls this issue is not possible.
+ */
+ ret = migration_channel_read_peek(ioc, (void *)&channel_magic,
+ sizeof(channel_magic), errp);
+ if (ret != 0) {
+ return;
+ }
- if (ret != 0) {
+ channel_magic = be32_to_cpu(channel_magic);
+ if (channel_magic == QEMU_VM_FILE_MAGIC) {
+ channel = CH_MAIN;
+ } else if (channel_magic == MULTIFD_MAGIC) {
+ assert(migrate_multifd());
+ channel = CH_MULTIFD;
+ } else if (!mis->from_src_file &&
+ mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
+ /* reconnect main channel for postcopy recovery */
+ channel = CH_MAIN;
+ } else {
+ error_setg(errp, "unknown channel magic: %u", channel_magic);
+ return;
+ }
+ } else if (mis->from_src_file && migrate_multifd()) {
+ /*
+ * Non-peekable channels like tls/file are processed as
+ * multifd channels when multifd is enabled.
+ */
+ channel = CH_MULTIFD;
+ } else if (!mis->from_src_file) {
+ channel = CH_MAIN;
+ } else {
+ error_setg(errp, "non-peekable channel used without multifd");
return;
}
-
- default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC));
} else {
- default_channel = !mis->from_src_file;
+ assert(migrate_postcopy_preempt());
+ channel = CH_POSTCOPY;
}
if (multifd_recv_setup(errp) != 0) {
return;
}
- if (default_channel) {
+ if (channel == CH_MAIN) {
f = qemu_file_new_input(ioc);
migration_incoming_setup(f);
- } else {
+ } else if (channel == CH_MULTIFD) {
/* Multiple connections */
- assert(migration_needs_multiple_sockets());
- if (migrate_multifd()) {
- multifd_recv_new_channel(ioc, &local_err);
- } else {
- assert(migrate_postcopy_preempt());
- f = qemu_file_new_input(ioc);
- postcopy_preempt_new_channel(mis, f);
- }
+ multifd_recv_new_channel(ioc, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
+ } else if (channel == CH_POSTCOPY) {
+ assert(!mis->postcopy_qemufile_dst);
+ f = qemu_file_new_input(ioc);
+ postcopy_preempt_new_channel(mis, f);
+ return;
}
- if (migration_should_start_incoming(default_channel)) {
+ if (migration_has_main_and_multifd_channels()) {
/* If it's a recovery, we're done */
if (postcopy_try_recover()) {
return;
@@ -1009,18 +1109,13 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp)
*/
bool migration_has_all_channels(void)
{
- MigrationIncomingState *mis = migration_incoming_get_current();
-
- if (!mis->from_src_file) {
+ if (!migration_has_main_and_multifd_channels()) {
return false;
}
- if (migrate_multifd()) {
- return multifd_recv_all_channels_created();
- }
-
- if (migrate_postcopy_preempt()) {
- return mis->postcopy_qemufile_dst != NULL;
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ if (migrate_postcopy_preempt() && !mis->postcopy_qemufile_dst) {
+ return false;
}
return true;
@@ -1105,14 +1200,14 @@ void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value)
migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf);
}
-/*
- * Return true if we're already in the middle of a migration
- * (i.e. any of the active or setup states)
- */
-bool migration_is_setup_or_active(void)
+bool migration_is_running(void)
{
MigrationState *s = current_migration;
+ if (!s) {
+ return false;
+ }
+
switch (s->state) {
case MIGRATION_STATUS_ACTIVE:
case MIGRATION_STATUS_POSTCOPY_ACTIVE:
@@ -1123,36 +1218,20 @@ bool migration_is_setup_or_active(void)
case MIGRATION_STATUS_PRE_SWITCHOVER:
case MIGRATION_STATUS_DEVICE:
case MIGRATION_STATUS_WAIT_UNPLUG:
+ case MIGRATION_STATUS_CANCELLING:
case MIGRATION_STATUS_COLO:
return true;
-
default:
return false;
-
}
}
-bool migration_is_running(void)
+static bool migration_is_active(void)
{
MigrationState *s = current_migration;
- switch (s->state) {
- case MIGRATION_STATUS_ACTIVE:
- case MIGRATION_STATUS_POSTCOPY_ACTIVE:
- case MIGRATION_STATUS_POSTCOPY_PAUSED:
- case MIGRATION_STATUS_POSTCOPY_RECOVER_SETUP:
- case MIGRATION_STATUS_POSTCOPY_RECOVER:
- case MIGRATION_STATUS_SETUP:
- case MIGRATION_STATUS_PRE_SWITCHOVER:
- case MIGRATION_STATUS_DEVICE:
- case MIGRATION_STATUS_WAIT_UNPLUG:
- case MIGRATION_STATUS_CANCELLING:
- return true;
-
- default:
- return false;
-
- }
+ return (s->state == MIGRATION_STATUS_ACTIVE ||
+ s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
}
static bool migrate_show_downtime(MigrationState *s)
@@ -1397,39 +1476,52 @@ void migrate_set_state(MigrationStatus *state, MigrationStatus old_state,
}
}
-static void migrate_fd_cleanup(MigrationState *s)
+static void migration_cleanup_json_writer(MigrationState *s)
+{
+ g_clear_pointer(&s->vmdesc, json_writer_free);
+}
+
+static void migration_cleanup(MigrationState *s)
{
MigrationEventType type;
+ QEMUFile *tmp = NULL;
+
+ trace_migration_cleanup();
+
+ migration_cleanup_json_writer(s);
g_free(s->hostname);
s->hostname = NULL;
- json_writer_free(s->vmdesc);
- s->vmdesc = NULL;
qemu_savevm_state_cleanup();
+ cpr_state_close();
+ migrate_hup_delete(s);
close_return_path_on_source(s);
- if (s->to_dst_file) {
- QEMUFile *tmp;
-
- trace_migrate_fd_cleanup();
+ if (s->migration_thread_running) {
bql_unlock();
- if (s->migration_thread_running) {
- qemu_thread_join(&s->thread);
- s->migration_thread_running = false;
- }
+ qemu_thread_join(&s->thread);
+ s->migration_thread_running = false;
bql_lock();
+ }
- multifd_send_shutdown();
- qemu_mutex_lock(&s->qemu_file_lock);
+ WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
+ /*
+ * Close the file handle without the lock to make sure the critical
+ * section won't block for long.
+ */
tmp = s->to_dst_file;
s->to_dst_file = NULL;
- qemu_mutex_unlock(&s->qemu_file_lock);
+ }
+
+ if (tmp) {
/*
- * Close the file handle without the lock to make sure the
- * critical section won't block for long.
+ * We only need to shutdown multifd if tmp!=NULL, because if
+ * tmp==NULL, it means the main channel isn't established, while
+ * multifd is only setup after that (in migration_thread()).
*/
+ multifd_send_shutdown();
migration_ioc_unregister_yank_from_file(tmp);
qemu_fclose(tmp);
}
@@ -1451,9 +1543,9 @@ static void migrate_fd_cleanup(MigrationState *s)
yank_unregister_instance(MIGRATION_YANK_INSTANCE);
}
-static void migrate_fd_cleanup_bh(void *opaque)
+static void migration_cleanup_bh(void *opaque)
{
- migrate_fd_cleanup(opaque);
+ migration_cleanup(opaque);
}
void migrate_set_error(MigrationState *s, const Error *error)
@@ -1483,7 +1575,7 @@ static void migrate_error_free(MigrationState *s)
}
}
-static void migrate_fd_error(MigrationState *s, const Error *error)
+static void migration_connect_set_error(MigrationState *s, const Error *error)
{
MigrationStatus current = s->state;
MigrationStatus next;
@@ -1512,11 +1604,17 @@ static void migrate_fd_error(MigrationState *s, const Error *error)
migrate_set_error(s, error);
}
-static void migrate_fd_cancel(MigrationState *s)
+void migration_cancel(void)
{
+ MigrationState *s = migrate_get_current();
int old_state ;
+ bool setup = (s->state == MIGRATION_STATUS_SETUP);
+
+ trace_migration_cancel();
- trace_migrate_fd_cancel();
+ if (migrate_dirty_limit()) {
+ qmp_cancel_vcpu_dirty_limit(false, -1, NULL);
+ }
WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
if (s->rp_state.from_dst_file) {
@@ -1532,7 +1630,7 @@ static void migrate_fd_cancel(MigrationState *s)
}
/* If the migration is paused, kick it out of the pause */
if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) {
- qemu_sem_post(&s->pause_sem);
+ qemu_event_set(&s->pause_event);
}
migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
} while (s->state != MIGRATION_STATUS_CANCELLING);
@@ -1549,15 +1647,16 @@ static void migrate_fd_cancel(MigrationState *s)
}
}
}
- if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
- Error *local_err = NULL;
- bdrv_activate_all(&local_err);
- if (local_err) {
- error_report_err(local_err);
- } else {
- s->block_inactive = false;
- }
+ /*
+ * If qmp_migrate_finish has not been called, then there is no path that
+ * will complete the cancellation. Do it now.
+ */
+ if (setup && !s->to_dst_file) {
+ migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
+ MIGRATION_STATUS_CANCELLED);
+ cpr_state_close();
+ migrate_hup_delete(s);
}
}
@@ -1644,42 +1743,7 @@ bool migration_incoming_postcopy_advised(void)
bool migration_in_bg_snapshot(void)
{
- return migrate_background_snapshot() &&
- migration_is_setup_or_active();
-}
-
-bool migration_is_idle(void)
-{
- MigrationState *s = current_migration;
-
- if (!s) {
- return true;
- }
-
- switch (s->state) {
- case MIGRATION_STATUS_NONE:
- case MIGRATION_STATUS_CANCELLED:
- case MIGRATION_STATUS_COMPLETED:
- case MIGRATION_STATUS_FAILED:
- return true;
- default:
- return false;
- }
-}
-
-bool migration_is_active(void)
-{
- MigrationState *s = current_migration;
-
- return (s->state == MIGRATION_STATUS_ACTIVE ||
- s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
-}
-
-bool migration_is_device(void)
-{
- MigrationState *s = current_migration;
-
- return s->state == MIGRATION_STATUS_DEVICE;
+ return migrate_background_snapshot() && migration_is_running();
}
bool migration_thread_is_self(void)
@@ -1691,7 +1755,9 @@ bool migration_thread_is_self(void)
bool migrate_mode_is_cpr(MigrationState *s)
{
- return s->parameters.mode == MIG_MODE_CPR_REBOOT;
+ MigMode mode = s->parameters.mode;
+ return mode == MIG_MODE_CPR_REBOOT ||
+ mode == MIG_MODE_CPR_TRANSFER;
}
int migrate_init(MigrationState *s, Error **errp)
@@ -1720,7 +1786,10 @@ int migrate_init(MigrationState *s, Error **errp)
s->migration_thread_running = false;
error_free(s->error);
s->error = NULL;
- s->vmdesc = NULL;
+
+ if (should_send_vmdesc()) {
+ s->vmdesc = json_writer_new(false);
+ }
migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
@@ -1745,7 +1814,7 @@ static bool is_busy(Error **reasonp, Error **errp)
ERRP_GUARD();
/* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */
- if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) {
+ if (runstate_check(RUN_STATE_SAVE_VM) || migration_is_running()) {
error_propagate_prepend(errp, *reasonp,
"disallowing migration blocker "
"(migration/snapshot in progress) for: ");
@@ -1877,6 +1946,17 @@ void qmp_migrate_incoming(const char *uri, bool has_channels,
return;
}
+ /*
+ * Making sure MigrationState is available until incoming migration
+ * completes.
+ *
+ * NOTE: QEMU _might_ leak this refcount in some failure paths, but
+ * that's OK. This is the minimum change we need to at least making
+ * sure success case is clean on the refcount. We can try harder to
+ * make it accurate for any kind of failures, but it might be an
+ * overkill and doesn't bring us much benefit.
+ */
+ migrate_incoming_ref_outgoing_state();
once = false;
}
@@ -2066,6 +2146,40 @@ static bool migrate_prepare(MigrationState *s, bool resume, Error **errp)
return true;
}
+static void qmp_migrate_finish(MigrationAddress *addr, bool resume_requested,
+ Error **errp);
+
+static void migrate_hup_add(MigrationState *s, QIOChannel *ioc, GSourceFunc cb,
+ void *opaque)
+{
+ s->hup_source = qio_channel_create_watch(ioc, G_IO_HUP);
+ g_source_set_callback(s->hup_source, cb, opaque, NULL);
+ g_source_attach(s->hup_source, NULL);
+}
+
+static void migrate_hup_delete(MigrationState *s)
+{
+ if (s->hup_source) {
+ g_source_destroy(s->hup_source);
+ g_source_unref(s->hup_source);
+ s->hup_source = NULL;
+ }
+}
+
+static gboolean qmp_migrate_finish_cb(QIOChannel *channel,
+ GIOCondition cond,
+ void *opaque)
+{
+ MigrationAddress *addr = opaque;
+
+ qmp_migrate_finish(addr, false, NULL);
+
+ cpr_state_close();
+ migrate_hup_delete(migrate_get_current());
+ qapi_free_MigrationAddress(addr);
+ return G_SOURCE_REMOVE;
+}
+
void qmp_migrate(const char *uri, bool has_channels,
MigrationChannelList *channels, bool has_detach, bool detach,
bool has_resume, bool resume, Error **errp)
@@ -2075,6 +2189,8 @@ void qmp_migrate(const char *uri, bool has_channels,
MigrationState *s = migrate_get_current();
g_autoptr(MigrationChannel) channel = NULL;
MigrationAddress *addr = NULL;
+ MigrationChannel *channelv[MIGRATION_CHANNEL_TYPE__MAX] = { NULL };
+ MigrationChannel *cpr_channel = NULL;
/*
* Having preliminary checks for uri and channel
@@ -2085,12 +2201,22 @@ void qmp_migrate(const char *uri, bool has_channels,
}
if (channels) {
- /* To verify that Migrate channel list has only item */
- if (channels->next) {
- error_setg(errp, "Channel list has more than one entries");
+ for ( ; channels; channels = channels->next) {
+ MigrationChannelType type = channels->value->channel_type;
+
+ if (channelv[type]) {
+ error_setg(errp, "Channel list has more than one %s entry",
+ MigrationChannelType_str(type));
+ return;
+ }
+ channelv[type] = channels->value;
+ }
+ cpr_channel = channelv[MIGRATION_CHANNEL_TYPE_CPR];
+ addr = channelv[MIGRATION_CHANNEL_TYPE_MAIN]->addr;
+ if (!addr) {
+ error_setg(errp, "Channel list has no main entry");
return;
}
- addr = channels->value->addr;
}
if (uri) {
@@ -2102,7 +2228,12 @@ void qmp_migrate(const char *uri, bool has_channels,
}
/* transport mechanism not suitable for migration? */
- if (!migration_channels_and_transport_compatible(addr, errp)) {
+ if (!migration_transport_compatible(addr, errp)) {
+ return;
+ }
+
+ if (s->parameters.mode == MIG_MODE_CPR_TRANSFER && !cpr_channel) {
+ error_setg(errp, "missing 'cpr' migration channel");
return;
}
@@ -2112,6 +2243,41 @@ void qmp_migrate(const char *uri, bool has_channels,
return;
}
+ if (cpr_state_save(cpr_channel, &local_err)) {
+ goto out;
+ }
+
+ /*
+ * For cpr-transfer, the target may not be listening yet on the migration
+ * channel, because first it must finish cpr_load_state. The target tells
+ * us it is listening by closing the cpr-state socket. Wait for that HUP
+ * event before connecting in qmp_migrate_finish.
+ *
+ * The HUP could occur because the target fails while reading CPR state,
+ * in which case the target will not listen for the incoming migration
+ * connection, so qmp_migrate_finish will fail to connect, and then recover.
+ */
+ if (s->parameters.mode == MIG_MODE_CPR_TRANSFER) {
+ migrate_hup_add(s, cpr_state_ioc(), (GSourceFunc)qmp_migrate_finish_cb,
+ QAPI_CLONE(MigrationAddress, addr));
+
+ } else {
+ qmp_migrate_finish(addr, resume_requested, errp);
+ }
+
+out:
+ if (local_err) {
+ migration_connect_set_error(s, local_err);
+ error_propagate(errp, local_err);
+ }
+}
+
+static void qmp_migrate_finish(MigrationAddress *addr, bool resume_requested,
+ Error **errp)
+{
+ MigrationState *s = migrate_get_current();
+ Error *local_err = NULL;
+
if (!resume_requested) {
if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
return;
@@ -2146,7 +2312,7 @@ void qmp_migrate(const char *uri, bool has_channels,
if (!resume_requested) {
yank_unregister_instance(MIGRATION_YANK_INSTANCE);
}
- migrate_fd_error(s, local_err);
+ migration_connect_set_error(s, local_err);
error_propagate(errp, local_err);
return;
}
@@ -2154,7 +2320,18 @@ void qmp_migrate(const char *uri, bool has_channels,
void qmp_migrate_cancel(Error **errp)
{
- migration_cancel(NULL);
+ /*
+ * After postcopy migration has started, the source machine is not
+ * recoverable in case of a migration error. This also means the
+ * cancel command cannot be used as cancel should allow the
+ * machine to continue operation.
+ */
+ if (migration_in_postcopy()) {
+ error_setg(errp, "Postcopy migration in progress, cannot cancel.");
+ return;
+ }
+
+ migration_cancel();
}
void qmp_migrate_continue(MigrationStatus state, Error **errp)
@@ -2165,7 +2342,7 @@ void qmp_migrate_continue(MigrationStatus state, Error **errp)
MigrationStatus_str(s->state));
return;
}
- qemu_sem_post(&s->pause_sem);
+ qemu_event_set(&s->pause_event);
}
int migration_rp_wait(MigrationState *s)
@@ -2273,7 +2450,7 @@ static bool migrate_handle_rp_resume_ack(MigrationState *s,
*/
static void migration_release_dst_files(MigrationState *ms)
{
- QEMUFile *file;
+ QEMUFile *file = NULL;
WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
/*
@@ -2318,7 +2495,7 @@ static void *source_return_path_thread(void *opaque)
trace_source_return_path_thread_entry();
rcu_register_thread();
- while (migration_is_setup_or_active()) {
+ while (migration_is_running()) {
trace_source_return_path_thread_loop_top();
header_type = qemu_get_be16(rp);
@@ -2473,7 +2650,7 @@ static int open_return_path_on_source(MigrationState *ms)
trace_open_return_path_on_source();
- qemu_thread_create(&ms->rp_state.rp_thread, "mig/src/rp-thr",
+ qemu_thread_create(&ms->rp_state.rp_thread, MIGRATION_THREAD_SRC_RETURN,
source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
ms->rp_state.rp_thread_created = true;
@@ -2528,23 +2705,30 @@ static int postcopy_start(MigrationState *ms, Error **errp)
int ret;
QIOChannelBuffer *bioc;
QEMUFile *fb;
- uint64_t bandwidth = migrate_max_postcopy_bandwidth();
- bool restart_block = false;
- int cur_state = MIGRATION_STATUS_ACTIVE;
+
+ /*
+ * Now we're 100% sure to switch to postcopy, so JSON writer won't be
+ * useful anymore. Free the resources early if it is there. Clearing
+ * the vmdesc also means any follow up vmstate_save()s will start to
+ * skip all JSON operations, which can shrink postcopy downtime.
+ */
+ migration_cleanup_json_writer(ms);
if (migrate_postcopy_preempt()) {
migration_wait_main_channel(ms);
if (postcopy_preempt_establish_channel(ms)) {
- migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED);
+ if (ms->state != MIGRATION_STATUS_CANCELLING) {
+ migrate_set_state(&ms->state, ms->state,
+ MIGRATION_STATUS_FAILED);
+ }
error_setg(errp, "%s: Failed to establish preempt channel",
__func__);
return -1;
}
}
- if (!migrate_pause_before_switchover()) {
- migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
- MIGRATION_STATUS_POSTCOPY_ACTIVE);
+ if (!qemu_savevm_state_postcopy_prepare(ms->to_dst_file, errp)) {
+ return -1;
}
trace_postcopy_start();
@@ -2557,27 +2741,19 @@ static int postcopy_start(MigrationState *ms, Error **errp)
goto fail;
}
- ret = migration_maybe_pause(ms, &cur_state,
- MIGRATION_STATUS_POSTCOPY_ACTIVE);
- if (ret < 0) {
- error_setg_errno(errp, -ret, "%s: Failed in migration_maybe_pause()",
- __func__);
+ if (!migration_switchover_start(ms, errp)) {
goto fail;
}
- ret = bdrv_inactivate_all();
- if (ret < 0) {
- error_setg_errno(errp, -ret, "%s: Failed in bdrv_inactivate_all()",
- __func__);
- goto fail;
- }
- restart_block = true;
-
/*
* Cause any non-postcopiable, but iterative devices to
* send out their final data.
*/
- qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false);
+ ret = qemu_savevm_state_complete_precopy_iterable(ms->to_dst_file, true);
+ if (ret) {
+ error_setg(errp, "Postcopy save non-postcopiable iterables failed");
+ goto fail;
+ }
/*
* in Finish migrate and with the io-lock held everything should
@@ -2589,12 +2765,6 @@ static int postcopy_start(MigrationState *ms, Error **errp)
ram_postcopy_send_discard_bitmap(ms);
}
- /*
- * send rest of state - note things that are doing postcopy
- * will notice we're in POSTCOPY_ACTIVE and not actually
- * wrap their state up here
- */
- migration_rate_set(bandwidth);
if (migrate_postcopy_ram()) {
/* Ping just for debugging, helps line traces up */
qemu_savevm_send_ping(ms->to_dst_file, 2);
@@ -2622,7 +2792,12 @@ static int postcopy_start(MigrationState *ms, Error **errp)
*/
qemu_savevm_send_postcopy_listen(fb);
- qemu_savevm_state_complete_precopy(fb, false, false);
+ ret = qemu_savevm_state_complete_precopy_non_iterable(fb, true);
+ if (ret) {
+ error_setg(errp, "Postcopy save non-iterable device states failed");
+ goto fail_closefb;
+ }
+
if (migrate_postcopy_ram()) {
qemu_savevm_send_ping(fb, 3);
}
@@ -2641,8 +2816,6 @@ static int postcopy_start(MigrationState *ms, Error **errp)
goto fail_closefb;
}
- restart_block = false;
-
/* Now send that blob */
if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
error_setg(errp, "%s: Failed to send packaged data", __func__);
@@ -2658,8 +2831,6 @@ static int postcopy_start(MigrationState *ms, Error **errp)
migration_downtime_end(ms);
- bql_unlock();
-
if (migrate_postcopy_ram()) {
/*
* Although this ping is just for debug, it could potentially be
@@ -2675,11 +2846,22 @@ static int postcopy_start(MigrationState *ms, Error **errp)
ret = qemu_file_get_error(ms->to_dst_file);
if (ret) {
error_setg_errno(errp, -ret, "postcopy_start: Migration stream error");
- bql_lock();
goto fail;
}
trace_postcopy_preempt_enabled(migrate_postcopy_preempt());
+ /*
+ * Now postcopy officially started, switch to postcopy bandwidth that
+ * user specified.
+ */
+ migration_rate_set(migrate_max_postcopy_bandwidth());
+
+ /* Now, switchover looks all fine, switching to postcopy-active */
+ migrate_set_state(&ms->state, MIGRATION_STATUS_DEVICE,
+ MIGRATION_STATUS_POSTCOPY_ACTIVE);
+
+ bql_unlock();
+
return ret;
fail_closefb:
@@ -2687,67 +2869,104 @@ fail_closefb:
fail:
migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
MIGRATION_STATUS_FAILED);
- if (restart_block) {
- /* A failure happened early enough that we know the destination hasn't
- * accessed block devices, so we're safe to recover.
- */
- Error *local_err = NULL;
-
- bdrv_activate_all(&local_err);
- if (local_err) {
- error_report_err(local_err);
- }
- }
+ migration_block_activate(NULL);
migration_call_notifiers(ms, MIG_EVENT_PRECOPY_FAILED, NULL);
bql_unlock();
return -1;
}
/**
- * migration_maybe_pause: Pause if required to by
- * migrate_pause_before_switchover called with the BQL locked
- * Returns: 0 on success
+ * @migration_switchover_prepare: Start VM switchover procedure
+ *
+ * @s: The migration state object pointer
+ *
+ * Prepares for the switchover, depending on "pause-before-switchover"
+ * capability.
+ *
+ * If cap set, state machine goes like:
+ * [postcopy-]active -> pre-switchover -> device
+ *
+ * If cap not set:
+ * [postcopy-]active -> device
+ *
+ * Returns: true on success, false on interruptions.
*/
-static int migration_maybe_pause(MigrationState *s,
- int *current_active_state,
- int new_state)
+static bool migration_switchover_prepare(MigrationState *s)
{
+ /* Concurrent cancellation? Quit */
+ if (s->state == MIGRATION_STATUS_CANCELLING) {
+ return false;
+ }
+
+ /*
+ * No matter precopy or postcopy, since we still hold BQL it must not
+ * change concurrently to CANCELLING, so it must be either ACTIVE or
+ * POSTCOPY_ACTIVE.
+ */
+ assert(migration_is_active());
+
+ /* If the pre stage not requested, directly switch to DEVICE */
if (!migrate_pause_before_switchover()) {
- return 0;
+ migrate_set_state(&s->state, s->state, MIGRATION_STATUS_DEVICE);
+ return true;
}
- /* Since leaving this state is not atomic with posting the semaphore
+ /*
+ * Since leaving this state is not atomic with setting the event
* it's possible that someone could have issued multiple migrate_continue
- * and the semaphore is incorrectly positive at this point;
- * the docs say it's undefined to reinit a semaphore that's already
- * init'd, so use timedwait to eat up any existing posts.
+ * and the event is incorrectly set at this point so reset it.
*/
- while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) {
- /* This block intentionally left blank */
- }
+ qemu_event_reset(&s->pause_event);
+
+ /* Update [POSTCOPY_]ACTIVE to PRE_SWITCHOVER */
+ migrate_set_state(&s->state, s->state, MIGRATION_STATUS_PRE_SWITCHOVER);
+ bql_unlock();
+
+ qemu_event_wait(&s->pause_event);
+ bql_lock();
/*
- * If the migration is cancelled when it is in the completion phase,
- * the migration state is set to MIGRATION_STATUS_CANCELLING.
- * So we don't need to wait a semaphore, otherwise we would always
- * wait for the 'pause_sem' semaphore.
+ * After BQL released and retaken, the state can be CANCELLING if it
+ * happend during sem_wait().. Only change the state if it's still
+ * pre-switchover.
*/
- if (s->state != MIGRATION_STATUS_CANCELLING) {
- bql_unlock();
- migrate_set_state(&s->state, *current_active_state,
- MIGRATION_STATUS_PRE_SWITCHOVER);
- qemu_sem_wait(&s->pause_sem);
- migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
- new_state);
- *current_active_state = new_state;
- bql_lock();
+ migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
+ MIGRATION_STATUS_DEVICE);
+
+ return s->state == MIGRATION_STATUS_DEVICE;
+}
+
+static bool migration_switchover_start(MigrationState *s, Error **errp)
+{
+ ERRP_GUARD();
+
+ if (!migration_switchover_prepare(s)) {
+ error_setg(errp, "Switchover is interrupted");
+ return false;
}
- return s->state == new_state ? 0 : -EINVAL;
+ /* Inactivate disks except in COLO */
+ if (!migrate_colo()) {
+ /*
+ * Inactivate before sending QEMU_VM_EOF so that the
+ * bdrv_activate_all() on the other end won't fail.
+ */
+ if (!migration_block_inactivate()) {
+ error_setg(errp, "Block inactivate failed during switchover");
+ return false;
+ }
+ }
+
+ migration_rate_set(RATE_LIMIT_DISABLED);
+
+ precopy_notify_complete();
+
+ qemu_savevm_maybe_send_switchover_start(s->to_dst_file);
+
+ return true;
}
-static int migration_completion_precopy(MigrationState *s,
- int *current_active_state)
+static int migration_completion_precopy(MigrationState *s)
{
int ret;
@@ -2760,20 +2979,12 @@ static int migration_completion_precopy(MigrationState *s,
}
}
- ret = migration_maybe_pause(s, current_active_state,
- MIGRATION_STATUS_DEVICE);
- if (ret < 0) {
+ if (!migration_switchover_start(s, NULL)) {
+ ret = -EFAULT;
goto out_unlock;
}
- /*
- * Inactivate disks except in COLO, and track that we have done so in order
- * to remember to reactivate them if migration fails or is cancelled.
- */
- s->block_inactive = !migrate_colo();
- migration_rate_set(RATE_LIMIT_DISABLED);
- ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
- s->block_inactive);
+ ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false);
out_unlock:
bql_unlock();
return ret;
@@ -2798,31 +3009,6 @@ static void migration_completion_postcopy(MigrationState *s)
trace_migration_completion_postcopy_end_after_complete();
}
-static void migration_completion_failed(MigrationState *s,
- int current_active_state)
-{
- if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE ||
- s->state == MIGRATION_STATUS_DEVICE)) {
- /*
- * If not doing postcopy, vm_start() will be called: let's
- * regain control on images.
- */
- Error *local_err = NULL;
-
- bql_lock();
- bdrv_activate_all(&local_err);
- if (local_err) {
- error_report_err(local_err);
- } else {
- s->block_inactive = false;
- }
- bql_unlock();
- }
-
- migrate_set_state(&s->state, current_active_state,
- MIGRATION_STATUS_FAILED);
-}
-
/**
* migration_completion: Used by migration_thread when there's not much left.
* The caller 'breaks' the loop when this returns.
@@ -2832,11 +3018,10 @@ static void migration_completion_failed(MigrationState *s,
static void migration_completion(MigrationState *s)
{
int ret = 0;
- int current_active_state = s->state;
Error *local_err = NULL;
if (s->state == MIGRATION_STATUS_ACTIVE) {
- ret = migration_completion_precopy(s, &current_active_state);
+ ret = migration_completion_precopy(s);
} else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
migration_completion_postcopy(s);
} else {
@@ -2876,7 +3061,9 @@ fail:
error_free(local_err);
}
- migration_completion_failed(s, current_active_state);
+ if (s->state != MIGRATION_STATUS_CANCELLING) {
+ migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
+ }
}
/**
@@ -2899,7 +3086,7 @@ static void bg_migration_completion(MigrationState *s)
qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage);
qemu_fflush(s->to_dst_file);
} else if (s->state == MIGRATION_STATUS_CANCELLING) {
- goto fail;
+ return;
}
if (qemu_file_get_error(s->to_dst_file)) {
@@ -3283,10 +3470,17 @@ static MigIterateState migration_iteration_run(MigrationState *s)
static void migration_iteration_finish(MigrationState *s)
{
- /* If we enabled cpu throttling for auto-converge, turn it off. */
- cpu_throttle_stop();
-
bql_lock();
+
+ /*
+ * If we enabled cpu throttling for auto-converge, turn it off.
+ * Stopping CPU throttle should be serialized by BQL to avoid
+ * racing for the throttle_dirty_sync_timer.
+ */
+ if (migrate_auto_converge()) {
+ cpu_throttle_stop();
+ }
+
switch (s->state) {
case MIGRATION_STATUS_COMPLETED:
runstate_set(RUN_STATE_POSTMIGRATE);
@@ -3299,6 +3493,11 @@ static void migration_iteration_finish(MigrationState *s)
case MIGRATION_STATUS_FAILED:
case MIGRATION_STATUS_CANCELLED:
case MIGRATION_STATUS_CANCELLING:
+ /*
+ * Re-activate the block drives if they're inactivated. Note, COLO
+ * shouldn't use block_active at all, so it should be no-op there.
+ */
+ migration_block_activate(NULL);
if (runstate_is_live(s->vm_old_state)) {
if (!runstate_check(RUN_STATE_SHUTDOWN)) {
vm_start();
@@ -3316,7 +3515,7 @@ static void migration_iteration_finish(MigrationState *s)
break;
}
- migration_bh_schedule(migrate_fd_cleanup_bh, s);
+ migration_bh_schedule(migration_cleanup_bh, s);
bql_unlock();
}
@@ -3344,7 +3543,7 @@ static void bg_migration_iteration_finish(MigrationState *s)
break;
}
- migration_bh_schedule(migrate_fd_cleanup_bh, s);
+ migration_bh_schedule(migration_cleanup_bh, s);
bql_unlock();
}
@@ -3462,11 +3661,11 @@ static void *migration_thread(void *opaque)
Error *local_err = NULL;
int ret;
- thread = migration_threads_add("live_migration", qemu_get_thread_id());
+ thread = migration_threads_add(MIGRATION_THREAD_SRC_MAIN,
+ qemu_get_thread_id());
rcu_register_thread();
- object_ref(OBJECT(s));
update_iteration_initial_status(s);
if (!multifd_send_setup()) {
@@ -3503,6 +3702,11 @@ static void *migration_thread(void *opaque)
qemu_savevm_send_colo_enable(s->to_dst_file);
}
+ if (migrate_auto_converge()) {
+ /* Start RAMBlock dirty bitmap sync timer */
+ cpu_throttle_dirty_sync_timer(true);
+ }
+
bql_lock();
ret = qemu_savevm_state_setup(s->to_dst_file, &local_err);
bql_unlock();
@@ -3599,7 +3803,6 @@ static void *bg_migration_thread(void *opaque)
int ret;
rcu_register_thread();
- object_ref(OBJECT(s));
migration_rate_set(RATE_LIMIT_DISABLED);
@@ -3657,12 +3860,8 @@ static void *bg_migration_thread(void *opaque)
if (migration_stop_vm(s, RUN_STATE_PAUSED)) {
goto fail;
}
- /*
- * Put vCPUs in sync with shadow context structures, then
- * save their state to channel-buffer along with devices.
- */
- cpu_synchronize_all_states();
- if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) {
+
+ if (qemu_savevm_state_complete_precopy_non_iterable(fb, false)) {
goto fail;
}
/*
@@ -3726,7 +3925,7 @@ fail_setup:
return NULL;
}
-void migrate_fd_connect(MigrationState *s, Error *error_in)
+void migration_connect(MigrationState *s, Error *error_in)
{
Error *local_err = NULL;
uint64_t rate_limit;
@@ -3736,24 +3935,24 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
/*
* If there's a previous error, free it and prepare for another one.
* Meanwhile if migration completes successfully, there won't have an error
- * dumped when calling migrate_fd_cleanup().
+ * dumped when calling migration_cleanup().
*/
migrate_error_free(s);
s->expected_downtime = migrate_downtime_limit();
if (error_in) {
- migrate_fd_error(s, error_in);
+ migration_connect_set_error(s, error_in);
if (resume) {
/*
* Don't do cleanup for resume if channel is invalid, but only dump
* the error. We wait for another channel connect from the user.
* The error_report still gives HMP user a hint on what failed.
- * It's normally done in migrate_fd_cleanup(), but call it here
+ * It's normally done in migration_cleanup(), but call it here
* explicitly.
*/
error_report_err(error_copy(s->error));
} else {
- migrate_fd_cleanup(s);
+ migration_cleanup(s);
}
return;
}
@@ -3811,11 +4010,19 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
}
}
+ /*
+ * Take a refcount to make sure the migration object won't get freed by
+ * the main thread already in migration_shutdown().
+ *
+ * The refcount will be released at the end of the thread function.
+ */
+ object_ref(OBJECT(s));
+
if (migrate_background_snapshot()) {
- qemu_thread_create(&s->thread, "mig/snapshot",
+ qemu_thread_create(&s->thread, MIGRATION_THREAD_SNAPSHOT,
bg_migration_thread, s, QEMU_THREAD_JOINABLE);
} else {
- qemu_thread_create(&s->thread, "mig/src/main",
+ qemu_thread_create(&s->thread, MIGRATION_THREAD_SRC_MAIN,
migration_thread, s, QEMU_THREAD_JOINABLE);
}
s->migration_thread_running = true;
@@ -3823,17 +4030,20 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
fail:
migrate_set_error(s, local_err);
- migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
+ if (s->state != MIGRATION_STATUS_CANCELLING) {
+ migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
+ }
error_report_err(local_err);
- migrate_fd_cleanup(s);
+ migration_cleanup(s);
}
-static void migration_class_init(ObjectClass *klass, void *data)
+static void migration_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->user_creatable = false;
- device_class_set_props(dc, migration_properties);
+ device_class_set_props_n(dc, migration_properties,
+ migration_properties_count);
}
static void migration_instance_finalize(Object *obj)
@@ -3844,7 +4054,7 @@ static void migration_instance_finalize(Object *obj)
qemu_mutex_destroy(&ms->qemu_file_lock);
qemu_sem_destroy(&ms->wait_unplug_sem);
qemu_sem_destroy(&ms->rate_limit_sem);
- qemu_sem_destroy(&ms->pause_sem);
+ qemu_event_destroy(&ms->pause_event);
qemu_sem_destroy(&ms->postcopy_pause_sem);
qemu_sem_destroy(&ms->rp_state.rp_sem);
qemu_sem_destroy(&ms->rp_state.rp_pong_acks);
@@ -3859,7 +4069,7 @@ static void migration_instance_init(Object *obj)
ms->state = MIGRATION_STATUS_NONE;
ms->mbps = -1;
ms->pages_per_second = -1;
- qemu_sem_init(&ms->pause_sem, 0);
+ qemu_event_init(&ms->pause_event, false);
qemu_mutex_init(&ms->error_mutex);
migrate_params_init(&ms->parameters);
diff --git a/migration/migration.h b/migration/migration.h
index 38aa140..739289d 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -17,7 +17,7 @@
#include "exec/cpu-common.h"
#include "hw/qdev-core.h"
#include "qapi/qapi-types-migration.h"
-#include "qapi/qmp/json-writer.h"
+#include "qobject/json-writer.h"
#include "qemu/thread.h"
#include "qemu/coroutine.h"
#include "io/channel.h"
@@ -25,10 +25,25 @@
#include "net/announce.h"
#include "qom/object.h"
#include "postcopy-ram.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "migration/misc.h"
+#define MIGRATION_THREAD_SNAPSHOT "mig/snapshot"
+#define MIGRATION_THREAD_DIRTY_RATE "mig/dirtyrate"
+
+#define MIGRATION_THREAD_SRC_MAIN "mig/src/main"
+#define MIGRATION_THREAD_SRC_MULTIFD "mig/src/send_%d"
+#define MIGRATION_THREAD_SRC_RETURN "mig/src/return"
+#define MIGRATION_THREAD_SRC_TLS "mig/src/tls"
+
+#define MIGRATION_THREAD_DST_COLO "mig/dst/colo"
+#define MIGRATION_THREAD_DST_MULTIFD "mig/dst/recv_%d"
+#define MIGRATION_THREAD_DST_FAULT "mig/dst/fault"
+#define MIGRATION_THREAD_DST_LISTEN "mig/dst/listen"
+#define MIGRATION_THREAD_DST_PREEMPT "mig/dst/preempt"
+
struct PostcopyBlocktimeContext;
+typedef struct ThreadPool ThreadPool;
#define MIGRATION_RESUME_ACK_VALUE (1)
@@ -83,9 +98,9 @@ struct MigrationIncomingState {
void (*transport_cleanup)(void *data);
/*
* Used to sync thread creations. Note that we can't create threads in
- * parallel with this sem.
+ * parallel with this event.
*/
- QemuSemaphore thread_sync_sem;
+ QemuEvent thread_sync_event;
/*
* Free at the start of the main state load, set as the main thread finishes
* loading state.
@@ -171,7 +186,11 @@ struct MigrationIncomingState {
/* The coroutine we should enter (back) after failover */
Coroutine *colo_incoming_co;
- QemuSemaphore colo_incoming_sem;
+ QemuEvent colo_incoming_event;
+
+ /* Optional load threads pool and its thread exit request flag */
+ ThreadPool *load_threads;
+ bool load_threads_abort;
/*
* PostcopyBlocktimeContext to keep information for postcopy
@@ -356,17 +375,14 @@ struct MigrationState {
/* Flag set once the migration thread is running (and needs joining) */
bool migration_thread_running;
- /* Flag set once the migration thread called bdrv_inactivate_all */
- bool block_inactive;
-
/* Migration is waiting for guest to unplug device */
QemuSemaphore wait_unplug_sem;
/* Migration is paused due to pause-before-switchover */
- QemuSemaphore pause_sem;
+ QemuEvent pause_event;
- /* The semaphore is used to notify COLO thread that failover is finished */
- QemuSemaphore colo_exit_sem;
+ /* The event is used to notify COLO thread that failover is finished */
+ QemuEvent colo_exit_event;
/* The event is used to notify COLO thread to do checkpoint */
QemuEvent colo_checkpoint_event;
@@ -389,6 +405,8 @@ struct MigrationState {
bool send_configuration;
/* Whether we send section footer during migration */
bool send_section_footer;
+ /* Whether we send switchover start notification during migration */
+ bool send_switchover_start;
/* Needed by postcopy-pause state */
QemuSemaphore postcopy_pause_sem;
@@ -432,6 +450,39 @@ struct MigrationState {
* Default value is false. (since 8.1)
*/
bool multifd_flush_after_each_section;
+
+ /*
+ * This variable only makes sense when set on the machine that is
+ * the destination of a multifd migration with TLS enabled. It
+ * affects the behavior of the last send->recv iteration with
+ * regards to termination of the TLS session.
+ *
+ * When set:
+ *
+ * - the destination QEMU instance can expect to never get a
+ * GNUTLS_E_PREMATURE_TERMINATION error. Manifested as the error
+ * message: "The TLS connection was non-properly terminated".
+ *
+ * When clear:
+ *
+ * - the destination QEMU instance can expect to see a
+ * GNUTLS_E_PREMATURE_TERMINATION error in any multifd channel
+ * whenever the last recv() call of that channel happens after
+ * the source QEMU instance has already issued shutdown() on the
+ * channel.
+ *
+ * Commit 637280aeb2 (since 9.1) introduced a side effect that
+ * causes the destination instance to not be affected by the
+ * premature termination, while commit 1d457daf86 (since 10.0)
+ * causes the premature termination condition to be once again
+ * reachable.
+ *
+ * NOTE: Regardless of the state of this option, a premature
+ * termination of the TLS connection might happen due to error at
+ * any moment prior to the last send->recv iteration.
+ */
+ bool multifd_clean_tls_termination;
+
/*
* This decides the size of guest memory chunk that will be used
* to track dirty bitmap clearing. The size of memory chunk will
@@ -457,6 +508,8 @@ struct MigrationState {
bool switchover_acked;
/* Is this a rdma migration */
bool rdma_migration;
+
+ GSource *hup_source;
};
void migrate_set_state(MigrationStatus *state, MigrationStatus old_state,
@@ -471,7 +524,7 @@ bool migration_has_all_channels(void);
void migrate_set_error(MigrationState *s, const Error *error);
bool migrate_has_error(MigrationState *s);
-void migrate_fd_connect(MigrationState *s, Error *error_in);
+void migration_connect(MigrationState *s, Error *error_in);
int migration_call_notifiers(MigrationState *s, MigrationEventType type,
Error **errp);
@@ -508,8 +561,6 @@ bool check_dirty_bitmap_mig_alias_map(const BitmapMigrationNodeAliasList *bbm,
Error **errp);
void migrate_add_address(SocketAddress *address);
-bool migrate_uri_parse(const char *uri, MigrationChannel **channel,
- Error **errp);
int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque);
#define qemu_ram_foreach_block \
@@ -519,7 +570,7 @@ void migration_make_urgent_request(void);
void migration_consume_urgent_request(void);
bool migration_rate_limit(void);
void migration_bh_schedule(QEMUBHFunc *cb, void *opaque);
-void migration_cancel(const Error *error);
+void migration_cancel(void);
void migration_populate_vfio_info(MigrationInfo *info);
void migration_reset_vfio_bytes_transferred(void);
@@ -537,4 +588,10 @@ int migration_rp_wait(MigrationState *s);
*/
void migration_rp_kick(MigrationState *s);
+void migration_bitmap_sync_precopy(bool last_stage);
+
+/* migration/block-dirty-bitmap.c */
+void dirty_bitmap_mig_init(void);
+bool should_send_vmdesc(void);
+
#endif
diff --git a/migration/multifd-device-state.c b/migration/multifd-device-state.c
new file mode 100644
index 0000000..94222d0
--- /dev/null
+++ b/migration/multifd-device-state.c
@@ -0,0 +1,212 @@
+/*
+ * Multifd device state migration
+ *
+ * Copyright (C) 2024,2025 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/lockable.h"
+#include "block/thread-pool.h"
+#include "migration.h"
+#include "migration/misc.h"
+#include "multifd.h"
+#include "options.h"
+
+static struct {
+ QemuMutex queue_job_mutex;
+
+ MultiFDSendData *send_data;
+
+ ThreadPool *threads;
+ bool threads_abort;
+} *multifd_send_device_state;
+
+void multifd_device_state_send_setup(void)
+{
+ assert(!multifd_send_device_state);
+ multifd_send_device_state = g_malloc(sizeof(*multifd_send_device_state));
+
+ qemu_mutex_init(&multifd_send_device_state->queue_job_mutex);
+
+ multifd_send_device_state->send_data = multifd_send_data_alloc();
+
+ multifd_send_device_state->threads = thread_pool_new();
+ multifd_send_device_state->threads_abort = false;
+}
+
+void multifd_device_state_send_cleanup(void)
+{
+ g_clear_pointer(&multifd_send_device_state->threads, thread_pool_free);
+ g_clear_pointer(&multifd_send_device_state->send_data,
+ multifd_send_data_free);
+
+ qemu_mutex_destroy(&multifd_send_device_state->queue_job_mutex);
+
+ g_clear_pointer(&multifd_send_device_state, g_free);
+}
+
+void multifd_send_data_clear_device_state(MultiFDDeviceState_t *device_state)
+{
+ g_clear_pointer(&device_state->idstr, g_free);
+ g_clear_pointer(&device_state->buf, g_free);
+}
+
+static void multifd_device_state_fill_packet(MultiFDSendParams *p)
+{
+ MultiFDDeviceState_t *device_state = &p->data->u.device_state;
+ MultiFDPacketDeviceState_t *packet = p->packet_device_state;
+
+ packet->hdr.flags = cpu_to_be32(p->flags);
+ strncpy(packet->idstr, device_state->idstr, sizeof(packet->idstr) - 1);
+ packet->idstr[sizeof(packet->idstr) - 1] = 0;
+ packet->instance_id = cpu_to_be32(device_state->instance_id);
+ packet->next_packet_size = cpu_to_be32(p->next_packet_size);
+}
+
+static void multifd_prepare_header_device_state(MultiFDSendParams *p)
+{
+ p->iov[0].iov_len = sizeof(*p->packet_device_state);
+ p->iov[0].iov_base = p->packet_device_state;
+ p->iovs_num++;
+}
+
+void multifd_device_state_send_prepare(MultiFDSendParams *p)
+{
+ MultiFDDeviceState_t *device_state = &p->data->u.device_state;
+
+ assert(multifd_payload_device_state(p->data));
+
+ multifd_prepare_header_device_state(p);
+
+ assert(!(p->flags & MULTIFD_FLAG_SYNC));
+
+ p->next_packet_size = device_state->buf_len;
+ if (p->next_packet_size > 0) {
+ p->iov[p->iovs_num].iov_base = device_state->buf;
+ p->iov[p->iovs_num].iov_len = p->next_packet_size;
+ p->iovs_num++;
+ }
+
+ p->flags |= MULTIFD_FLAG_NOCOMP | MULTIFD_FLAG_DEVICE_STATE;
+
+ multifd_device_state_fill_packet(p);
+}
+
+bool multifd_queue_device_state(char *idstr, uint32_t instance_id,
+ char *data, size_t len)
+{
+ /* Device state submissions can come from multiple threads */
+ QEMU_LOCK_GUARD(&multifd_send_device_state->queue_job_mutex);
+ MultiFDDeviceState_t *device_state;
+
+ assert(multifd_payload_empty(multifd_send_device_state->send_data));
+
+ multifd_set_payload_type(multifd_send_device_state->send_data,
+ MULTIFD_PAYLOAD_DEVICE_STATE);
+ device_state = &multifd_send_device_state->send_data->u.device_state;
+ device_state->idstr = g_strdup(idstr);
+ device_state->instance_id = instance_id;
+ device_state->buf = g_memdup2(data, len);
+ device_state->buf_len = len;
+
+ if (!multifd_send(&multifd_send_device_state->send_data)) {
+ multifd_send_data_clear(multifd_send_device_state->send_data);
+ return false;
+ }
+
+ return true;
+}
+
+bool multifd_device_state_supported(void)
+{
+ return migrate_multifd() && !migrate_mapped_ram() &&
+ migrate_multifd_compression() == MULTIFD_COMPRESSION_NONE;
+}
+
+static void multifd_device_state_save_thread_data_free(void *opaque)
+{
+ SaveLiveCompletePrecopyThreadData *data = opaque;
+
+ g_clear_pointer(&data->idstr, g_free);
+ g_free(data);
+}
+
+static int multifd_device_state_save_thread(void *opaque)
+{
+ SaveLiveCompletePrecopyThreadData *data = opaque;
+ g_autoptr(Error) local_err = NULL;
+
+ if (!data->hdlr(data, &local_err)) {
+ MigrationState *s = migrate_get_current();
+
+ /*
+ * Can't call abort_device_state_save_threads() here since new
+ * save threads could still be in process of being launched
+ * (if, for example, the very first save thread launched exited
+ * with an error very quickly).
+ */
+
+ assert(local_err);
+
+ /*
+ * In case of multiple save threads failing which thread error
+ * return we end setting is purely arbitrary.
+ */
+ migrate_set_error(s, local_err);
+ }
+
+ return 0;
+}
+
+bool multifd_device_state_save_thread_should_exit(void)
+{
+ return qatomic_read(&multifd_send_device_state->threads_abort);
+}
+
+void
+multifd_spawn_device_state_save_thread(SaveLiveCompletePrecopyThreadHandler hdlr,
+ char *idstr, uint32_t instance_id,
+ void *opaque)
+{
+ SaveLiveCompletePrecopyThreadData *data;
+
+ assert(multifd_device_state_supported());
+ assert(multifd_send_device_state);
+
+ assert(!qatomic_read(&multifd_send_device_state->threads_abort));
+
+ data = g_new(SaveLiveCompletePrecopyThreadData, 1);
+ data->hdlr = hdlr;
+ data->idstr = g_strdup(idstr);
+ data->instance_id = instance_id;
+ data->handler_opaque = opaque;
+
+ thread_pool_submit_immediate(multifd_send_device_state->threads,
+ multifd_device_state_save_thread,
+ data,
+ multifd_device_state_save_thread_data_free);
+}
+
+void multifd_abort_device_state_save_threads(void)
+{
+ assert(multifd_device_state_supported());
+
+ qatomic_set(&multifd_send_device_state->threads_abort, true);
+}
+
+bool multifd_join_device_state_save_threads(void)
+{
+ MigrationState *s = migrate_get_current();
+
+ assert(multifd_device_state_supported());
+
+ thread_pool_wait(multifd_send_device_state->threads);
+
+ return !migrate_has_error(s);
+}
diff --git a/migration/multifd-nocomp.c b/migration/multifd-nocomp.c
new file mode 100644
index 0000000..b48eae3
--- /dev/null
+++ b/migration/multifd-nocomp.c
@@ -0,0 +1,468 @@
+/*
+ * Multifd RAM migration without compression
+ *
+ * Copyright (c) 2019-2020 Red Hat Inc
+ *
+ * Authors:
+ * Juan Quintela <quintela@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "system/ramblock.h"
+#include "exec/target_page.h"
+#include "file.h"
+#include "migration-stats.h"
+#include "multifd.h"
+#include "options.h"
+#include "migration.h"
+#include "qapi/error.h"
+#include "qemu/cutils.h"
+#include "qemu/error-report.h"
+#include "trace.h"
+#include "qemu-file.h"
+
+static MultiFDSendData *multifd_ram_send;
+
+void multifd_ram_payload_alloc(MultiFDPages_t *pages)
+{
+ pages->offset = g_new0(ram_addr_t, multifd_ram_page_count());
+}
+
+void multifd_ram_payload_free(MultiFDPages_t *pages)
+{
+ g_clear_pointer(&pages->offset, g_free);
+}
+
+void multifd_ram_save_setup(void)
+{
+ multifd_ram_send = multifd_send_data_alloc();
+}
+
+void multifd_ram_save_cleanup(void)
+{
+ g_clear_pointer(&multifd_ram_send, multifd_send_data_free);
+}
+
+static void multifd_set_file_bitmap(MultiFDSendParams *p)
+{
+ MultiFDPages_t *pages = &p->data->u.ram;
+
+ assert(pages->block);
+
+ for (int i = 0; i < pages->normal_num; i++) {
+ ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], true);
+ }
+
+ for (int i = pages->normal_num; i < pages->num; i++) {
+ ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], false);
+ }
+}
+
+static int multifd_nocomp_send_setup(MultiFDSendParams *p, Error **errp)
+{
+ uint32_t page_count = multifd_ram_page_count();
+
+ if (migrate_zero_copy_send()) {
+ p->write_flags |= QIO_CHANNEL_WRITE_FLAG_ZERO_COPY;
+ }
+
+ if (!migrate_mapped_ram()) {
+ /* We need one extra place for the packet header */
+ p->iov = g_new0(struct iovec, page_count + 1);
+ } else {
+ p->iov = g_new0(struct iovec, page_count);
+ }
+
+ return 0;
+}
+
+static void multifd_nocomp_send_cleanup(MultiFDSendParams *p, Error **errp)
+{
+ g_free(p->iov);
+ p->iov = NULL;
+}
+
+static void multifd_ram_prepare_header(MultiFDSendParams *p)
+{
+ p->iov[0].iov_len = p->packet_len;
+ p->iov[0].iov_base = p->packet;
+ p->iovs_num++;
+}
+
+static void multifd_send_prepare_iovs(MultiFDSendParams *p)
+{
+ MultiFDPages_t *pages = &p->data->u.ram;
+ uint32_t page_size = multifd_ram_page_size();
+
+ for (int i = 0; i < pages->normal_num; i++) {
+ p->iov[p->iovs_num].iov_base = pages->block->host + pages->offset[i];
+ p->iov[p->iovs_num].iov_len = page_size;
+ p->iovs_num++;
+ }
+
+ p->next_packet_size = pages->normal_num * page_size;
+}
+
+static int multifd_nocomp_send_prepare(MultiFDSendParams *p, Error **errp)
+{
+ bool use_zero_copy_send = migrate_zero_copy_send();
+ int ret;
+
+ multifd_send_zero_page_detect(p);
+
+ if (migrate_mapped_ram()) {
+ multifd_send_prepare_iovs(p);
+ multifd_set_file_bitmap(p);
+
+ return 0;
+ }
+
+ if (!use_zero_copy_send) {
+ /*
+ * Only !zerocopy needs the header in IOV; zerocopy will
+ * send it separately.
+ */
+ multifd_ram_prepare_header(p);
+ }
+
+ multifd_send_prepare_iovs(p);
+ p->flags |= MULTIFD_FLAG_NOCOMP;
+
+ multifd_send_fill_packet(p);
+
+ if (use_zero_copy_send) {
+ /* Send header first, without zerocopy */
+ ret = qio_channel_write_all(p->c, (void *)p->packet,
+ p->packet_len, errp);
+ if (ret != 0) {
+ return -1;
+ }
+
+ stat64_add(&mig_stats.multifd_bytes, p->packet_len);
+ }
+
+ return 0;
+}
+
+static int multifd_nocomp_recv_setup(MultiFDRecvParams *p, Error **errp)
+{
+ p->iov = g_new0(struct iovec, multifd_ram_page_count());
+ return 0;
+}
+
+static void multifd_nocomp_recv_cleanup(MultiFDRecvParams *p)
+{
+ g_free(p->iov);
+ p->iov = NULL;
+}
+
+static int multifd_nocomp_recv(MultiFDRecvParams *p, Error **errp)
+{
+ uint32_t flags;
+
+ if (migrate_mapped_ram()) {
+ return multifd_file_recv_data(p, errp);
+ }
+
+ flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
+
+ if (flags != MULTIFD_FLAG_NOCOMP) {
+ error_setg(errp, "multifd %u: flags received %x flags expected %x",
+ p->id, flags, MULTIFD_FLAG_NOCOMP);
+ return -1;
+ }
+
+ multifd_recv_zero_page_process(p);
+
+ if (!p->normal_num) {
+ return 0;
+ }
+
+ for (int i = 0; i < p->normal_num; i++) {
+ p->iov[i].iov_base = p->host + p->normal[i];
+ p->iov[i].iov_len = multifd_ram_page_size();
+ ramblock_recv_bitmap_set_offset(p->block, p->normal[i]);
+ }
+ return qio_channel_readv_all(p->c, p->iov, p->normal_num, errp);
+}
+
+static void multifd_pages_reset(MultiFDPages_t *pages)
+{
+ /*
+ * We don't need to touch offset[] array, because it will be
+ * overwritten later when reused.
+ */
+ pages->num = 0;
+ pages->normal_num = 0;
+ pages->block = NULL;
+}
+
+void multifd_ram_fill_packet(MultiFDSendParams *p)
+{
+ MultiFDPacket_t *packet = p->packet;
+ MultiFDPages_t *pages = &p->data->u.ram;
+ uint32_t zero_num = pages->num - pages->normal_num;
+
+ packet->pages_alloc = cpu_to_be32(multifd_ram_page_count());
+ packet->normal_pages = cpu_to_be32(pages->normal_num);
+ packet->zero_pages = cpu_to_be32(zero_num);
+
+ if (pages->block) {
+ pstrcpy(packet->ramblock, sizeof(packet->ramblock),
+ pages->block->idstr);
+ }
+
+ for (int i = 0; i < pages->num; i++) {
+ /* there are architectures where ram_addr_t is 32 bit */
+ uint64_t temp = pages->offset[i];
+
+ packet->offset[i] = cpu_to_be64(temp);
+ }
+
+ trace_multifd_send_ram_fill(p->id, pages->normal_num,
+ zero_num);
+}
+
+int multifd_ram_unfill_packet(MultiFDRecvParams *p, Error **errp)
+{
+ MultiFDPacket_t *packet = p->packet;
+ uint32_t page_count = multifd_ram_page_count();
+ uint32_t page_size = multifd_ram_page_size();
+ uint32_t pages_per_packet = be32_to_cpu(packet->pages_alloc);
+ int i;
+
+ if (pages_per_packet > page_count) {
+ error_setg(errp, "multifd: received packet with %u pages, expected %u",
+ pages_per_packet, page_count);
+ return -1;
+ }
+
+ p->normal_num = be32_to_cpu(packet->normal_pages);
+ if (p->normal_num > pages_per_packet) {
+ error_setg(errp, "multifd: received packet with %u non-zero pages, "
+ "which exceeds maximum expected pages %u",
+ p->normal_num, pages_per_packet);
+ return -1;
+ }
+
+ p->zero_num = be32_to_cpu(packet->zero_pages);
+ if (p->zero_num > pages_per_packet - p->normal_num) {
+ error_setg(errp,
+ "multifd: received packet with %u zero pages, expected maximum %u",
+ p->zero_num, pages_per_packet - p->normal_num);
+ return -1;
+ }
+
+ if (p->normal_num == 0 && p->zero_num == 0) {
+ return 0;
+ }
+
+ /* make sure that ramblock is 0 terminated */
+ packet->ramblock[255] = 0;
+ p->block = qemu_ram_block_by_name(packet->ramblock);
+ if (!p->block) {
+ error_setg(errp, "multifd: unknown ram block %s",
+ packet->ramblock);
+ return -1;
+ }
+
+ p->host = p->block->host;
+ for (i = 0; i < p->normal_num; i++) {
+ uint64_t offset = be64_to_cpu(packet->offset[i]);
+
+ if (offset > (p->block->used_length - page_size)) {
+ error_setg(errp, "multifd: offset too long %" PRIu64
+ " (max " RAM_ADDR_FMT ")",
+ offset, p->block->used_length);
+ return -1;
+ }
+ p->normal[i] = offset;
+ }
+
+ for (i = 0; i < p->zero_num; i++) {
+ uint64_t offset = be64_to_cpu(packet->offset[p->normal_num + i]);
+
+ if (offset > (p->block->used_length - page_size)) {
+ error_setg(errp, "multifd: offset too long %" PRIu64
+ " (max " RAM_ADDR_FMT ")",
+ offset, p->block->used_length);
+ return -1;
+ }
+ p->zero[i] = offset;
+ }
+
+ return 0;
+}
+
+static inline bool multifd_queue_empty(MultiFDPages_t *pages)
+{
+ return pages->num == 0;
+}
+
+static inline bool multifd_queue_full(MultiFDPages_t *pages)
+{
+ return pages->num == multifd_ram_page_count();
+}
+
+static inline void multifd_enqueue(MultiFDPages_t *pages, ram_addr_t offset)
+{
+ pages->offset[pages->num++] = offset;
+}
+
+/* Returns true if enqueue successful, false otherwise */
+bool multifd_queue_page(RAMBlock *block, ram_addr_t offset)
+{
+ MultiFDPages_t *pages;
+
+retry:
+ pages = &multifd_ram_send->u.ram;
+
+ if (multifd_payload_empty(multifd_ram_send)) {
+ multifd_pages_reset(pages);
+ multifd_set_payload_type(multifd_ram_send, MULTIFD_PAYLOAD_RAM);
+ }
+
+ /* If the queue is empty, we can already enqueue now */
+ if (multifd_queue_empty(pages)) {
+ pages->block = block;
+ multifd_enqueue(pages, offset);
+ return true;
+ }
+
+ /*
+ * Not empty, meanwhile we need a flush. It can because of either:
+ *
+ * (1) The page is not on the same ramblock of previous ones, or,
+ * (2) The queue is full.
+ *
+ * After flush, always retry.
+ */
+ if (pages->block != block || multifd_queue_full(pages)) {
+ if (!multifd_send(&multifd_ram_send)) {
+ return false;
+ }
+ goto retry;
+ }
+
+ /* Not empty, and we still have space, do it! */
+ multifd_enqueue(pages, offset);
+ return true;
+}
+
+/*
+ * We have two modes for multifd flushes:
+ *
+ * - Per-section mode: this is the legacy way to flush, it requires one
+ * MULTIFD_FLAG_SYNC message for each RAM_SAVE_FLAG_EOS.
+ *
+ * - Per-round mode: this is the modern way to flush, it requires one
+ * MULTIFD_FLAG_SYNC message only for each round of RAM scan. Normally
+ * it's paired with a new RAM_SAVE_FLAG_MULTIFD_FLUSH message in network
+ * based migrations.
+ *
+ * One thing to mention is mapped-ram always use the modern way to sync.
+ */
+
+/* Do we need a per-section multifd flush (legacy way)? */
+bool multifd_ram_sync_per_section(void)
+{
+ if (!migrate_multifd()) {
+ return false;
+ }
+
+ if (migrate_mapped_ram()) {
+ return false;
+ }
+
+ return migrate_multifd_flush_after_each_section();
+}
+
+/* Do we need a per-round multifd flush (modern way)? */
+bool multifd_ram_sync_per_round(void)
+{
+ if (!migrate_multifd()) {
+ return false;
+ }
+
+ if (migrate_mapped_ram()) {
+ return true;
+ }
+
+ return !migrate_multifd_flush_after_each_section();
+}
+
+int multifd_ram_flush_and_sync(QEMUFile *f)
+{
+ MultiFDSyncReq req;
+ int ret;
+
+ if (!migrate_multifd() || migration_in_postcopy()) {
+ return 0;
+ }
+
+ if (!multifd_payload_empty(multifd_ram_send)) {
+ if (!multifd_send(&multifd_ram_send)) {
+ error_report("%s: multifd_send fail", __func__);
+ return -1;
+ }
+ }
+
+ /* File migrations only need to sync with threads */
+ req = migrate_mapped_ram() ? MULTIFD_SYNC_LOCAL : MULTIFD_SYNC_ALL;
+
+ ret = multifd_send_sync_main(req);
+ if (ret) {
+ return ret;
+ }
+
+ /* If we don't need to sync with remote at all, nothing else to do */
+ if (req == MULTIFD_SYNC_LOCAL) {
+ return 0;
+ }
+
+ /*
+ * Old QEMUs don't understand RAM_SAVE_FLAG_MULTIFD_FLUSH, it relies
+ * on RAM_SAVE_FLAG_EOS instead.
+ */
+ if (migrate_multifd_flush_after_each_section()) {
+ return 0;
+ }
+
+ qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
+ qemu_fflush(f);
+
+ return 0;
+}
+
+bool multifd_send_prepare_common(MultiFDSendParams *p)
+{
+ MultiFDPages_t *pages = &p->data->u.ram;
+ multifd_ram_prepare_header(p);
+ multifd_send_zero_page_detect(p);
+
+ if (!pages->normal_num) {
+ p->next_packet_size = 0;
+ return false;
+ }
+
+ return true;
+}
+
+static const MultiFDMethods multifd_nocomp_ops = {
+ .send_setup = multifd_nocomp_send_setup,
+ .send_cleanup = multifd_nocomp_send_cleanup,
+ .send_prepare = multifd_nocomp_send_prepare,
+ .recv_setup = multifd_nocomp_recv_setup,
+ .recv_cleanup = multifd_nocomp_recv_cleanup,
+ .recv = multifd_nocomp_recv
+};
+
+static void multifd_nocomp_register(void)
+{
+ multifd_register_ops(MULTIFD_COMPRESSION_NONE, &multifd_nocomp_ops);
+}
+
+migration_init(multifd_nocomp_register);
diff --git a/migration/multifd-qatzip.c b/migration/multifd-qatzip.c
new file mode 100644
index 0000000..7419e5d
--- /dev/null
+++ b/migration/multifd-qatzip.c
@@ -0,0 +1,395 @@
+/*
+ * Multifd QATzip compression implementation
+ *
+ * Copyright (c) Bytedance
+ *
+ * Authors:
+ * Bryan Zhang <bryan.zhang@bytedance.com>
+ * Hao Xiang <hao.xiang@bytedance.com>
+ * Yichen Wang <yichen.wang@bytedance.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "system/ramblock.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "qapi/qapi-types-migration.h"
+#include "options.h"
+#include "multifd.h"
+#include <qatzip.h>
+
+typedef struct {
+ /*
+ * Unique session for use with QATzip API
+ */
+ QzSession_T sess;
+
+ /*
+ * For compression: Buffer for pages to compress
+ * For decompression: Buffer for data to decompress
+ */
+ uint8_t *in_buf;
+ uint32_t in_len;
+
+ /*
+ * For compression: Output buffer of compressed data
+ * For decompression: Output buffer of decompressed data
+ */
+ uint8_t *out_buf;
+ uint32_t out_len;
+} QatzipData;
+
+/**
+ * qatzip_send_setup: Set up QATzip session and private buffers.
+ *
+ * @param p Multifd channel params
+ * @param errp Pointer to error, which will be set in case of error
+ * @return 0 on success, -1 on error (and *errp will be set)
+ */
+static int qatzip_send_setup(MultiFDSendParams *p, Error **errp)
+{
+ QatzipData *q;
+ QzSessionParamsDeflate_T params;
+ const char *err_msg;
+ int ret;
+
+ q = g_new0(QatzipData, 1);
+ p->compress_data = q;
+ /* We need one extra place for the packet header */
+ p->iov = g_new0(struct iovec, 2);
+
+ /*
+ * Initialize QAT device with software fallback by default. This allows
+ * QATzip to use CPU path when QAT hardware reaches maximum throughput.
+ */
+ ret = qzInit(&q->sess, true);
+ if (ret != QZ_OK && ret != QZ_DUPLICATE) {
+ err_msg = "qzInit failed";
+ goto err;
+ }
+
+ ret = qzGetDefaultsDeflate(&params);
+ if (ret != QZ_OK) {
+ err_msg = "qzGetDefaultsDeflate failed";
+ goto err;
+ }
+
+ /* Make sure to use configured QATzip compression level. */
+ params.common_params.comp_lvl = migrate_multifd_qatzip_level();
+ ret = qzSetupSessionDeflate(&q->sess, &params);
+ if (ret != QZ_OK && ret != QZ_DUPLICATE) {
+ err_msg = "qzSetupSessionDeflate failed";
+ goto err;
+ }
+
+ if (MULTIFD_PACKET_SIZE > UINT32_MAX) {
+ err_msg = "packet size too large for QAT";
+ goto err;
+ }
+
+ q->in_len = MULTIFD_PACKET_SIZE;
+ /*
+ * PINNED_MEM is an enum from qatzip headers, which means to use
+ * kzalloc_node() to allocate memory for QAT DMA purposes. When QAT device
+ * is not available or software fallback is used, the malloc flag needs to
+ * be set as COMMON_MEM.
+ */
+ q->in_buf = qzMalloc(q->in_len, 0, PINNED_MEM);
+ if (!q->in_buf) {
+ q->in_buf = qzMalloc(q->in_len, 0, COMMON_MEM);
+ if (!q->in_buf) {
+ err_msg = "qzMalloc failed";
+ goto err;
+ }
+ }
+
+ q->out_len = qzMaxCompressedLength(MULTIFD_PACKET_SIZE, &q->sess);
+ q->out_buf = qzMalloc(q->out_len, 0, PINNED_MEM);
+ if (!q->out_buf) {
+ q->out_buf = qzMalloc(q->out_len, 0, COMMON_MEM);
+ if (!q->out_buf) {
+ err_msg = "qzMalloc failed";
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ error_setg(errp, "multifd %u: [sender] %s", p->id, err_msg);
+ return -1;
+}
+
+/**
+ * qatzip_send_cleanup: Tear down QATzip session and release private buffers.
+ *
+ * @param p Multifd channel params
+ * @param errp Pointer to error, which will be set in case of error
+ * @return None
+ */
+static void qatzip_send_cleanup(MultiFDSendParams *p, Error **errp)
+{
+ QatzipData *q = p->compress_data;
+
+ if (q) {
+ if (q->in_buf) {
+ qzFree(q->in_buf);
+ }
+ if (q->out_buf) {
+ qzFree(q->out_buf);
+ }
+ (void)qzTeardownSession(&q->sess);
+ (void)qzClose(&q->sess);
+ g_free(q);
+ }
+
+ g_free(p->iov);
+ p->iov = NULL;
+ p->compress_data = NULL;
+}
+
+/**
+ * qatzip_send_prepare: Compress pages and update IO channel info.
+ *
+ * @param p Multifd channel params
+ * @param errp Pointer to error, which will be set in case of error
+ * @return 0 on success, -1 on error (and *errp will be set)
+ */
+static int qatzip_send_prepare(MultiFDSendParams *p, Error **errp)
+{
+ uint32_t page_size = multifd_ram_page_size();
+ MultiFDPages_t *pages = &p->data->u.ram;
+ QatzipData *q = p->compress_data;
+ int ret;
+ unsigned int in_len, out_len;
+
+ if (!multifd_send_prepare_common(p)) {
+ goto out;
+ }
+
+ /*
+ * Unlike other multifd compression implementations, we use a non-streaming
+ * API and place all the data into one buffer, rather than sending each
+ * page to the compression API at a time. Based on initial benchmarks, the
+ * non-streaming API outperforms the streaming API. Plus, the logic in QEMU
+ * is friendly to using the non-streaming API anyway. If either of these
+ * statements becomes no longer true, we can revisit adding a streaming
+ * implementation.
+ */
+ for (int i = 0; i < pages->normal_num; i++) {
+ memcpy(q->in_buf + (i * page_size),
+ pages->block->host + pages->offset[i],
+ page_size);
+ }
+
+ in_len = pages->normal_num * page_size;
+ if (in_len > q->in_len) {
+ error_setg(errp, "multifd %u: unexpectedly large input", p->id);
+ return -1;
+ }
+ out_len = q->out_len;
+
+ ret = qzCompress(&q->sess, q->in_buf, &in_len, q->out_buf, &out_len, 1);
+ if (ret != QZ_OK) {
+ error_setg(errp, "multifd %u: QATzip returned %d instead of QZ_OK",
+ p->id, ret);
+ return -1;
+ }
+ if (in_len != pages->normal_num * page_size) {
+ error_setg(errp, "multifd %u: QATzip failed to compress all input",
+ p->id);
+ return -1;
+ }
+
+ p->iov[p->iovs_num].iov_base = q->out_buf;
+ p->iov[p->iovs_num].iov_len = out_len;
+ p->iovs_num++;
+ p->next_packet_size = out_len;
+
+out:
+ p->flags |= MULTIFD_FLAG_QATZIP;
+ multifd_send_fill_packet(p);
+ return 0;
+}
+
+/**
+ * qatzip_recv_setup: Set up QATzip session and allocate private buffers.
+ *
+ * @param p Multifd channel params
+ * @param errp Pointer to error, which will be set in case of error
+ * @return 0 on success, -1 on error (and *errp will be set)
+ */
+static int qatzip_recv_setup(MultiFDRecvParams *p, Error **errp)
+{
+ QatzipData *q;
+ QzSessionParamsDeflate_T params;
+ const char *err_msg;
+ int ret;
+
+ q = g_new0(QatzipData, 1);
+ p->compress_data = q;
+
+ /*
+ * Initialize QAT device with software fallback by default. This allows
+ * QATzip to use CPU path when QAT hardware reaches maximum throughput.
+ */
+ ret = qzInit(&q->sess, true);
+ if (ret != QZ_OK && ret != QZ_DUPLICATE) {
+ err_msg = "qzInit failed";
+ goto err;
+ }
+
+ ret = qzGetDefaultsDeflate(&params);
+ if (ret != QZ_OK) {
+ err_msg = "qzGetDefaultsDeflate failed";
+ goto err;
+ }
+
+ ret = qzSetupSessionDeflate(&q->sess, &params);
+ if (ret != QZ_OK && ret != QZ_DUPLICATE) {
+ err_msg = "qzSetupSessionDeflate failed";
+ goto err;
+ }
+
+ /*
+ * Reserve extra spaces for the incoming packets. Current implementation
+ * doesn't send uncompressed pages in case the compression gets too big.
+ */
+ q->in_len = MULTIFD_PACKET_SIZE * 2;
+ /*
+ * PINNED_MEM is an enum from qatzip headers, which means to use
+ * kzalloc_node() to allocate memory for QAT DMA purposes. When QAT device
+ * is not available or software fallback is used, the malloc flag needs to
+ * be set as COMMON_MEM.
+ */
+ q->in_buf = qzMalloc(q->in_len, 0, PINNED_MEM);
+ if (!q->in_buf) {
+ q->in_buf = qzMalloc(q->in_len, 0, COMMON_MEM);
+ if (!q->in_buf) {
+ err_msg = "qzMalloc failed";
+ goto err;
+ }
+ }
+
+ q->out_len = MULTIFD_PACKET_SIZE;
+ q->out_buf = qzMalloc(q->out_len, 0, PINNED_MEM);
+ if (!q->out_buf) {
+ q->out_buf = qzMalloc(q->out_len, 0, COMMON_MEM);
+ if (!q->out_buf) {
+ err_msg = "qzMalloc failed";
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ error_setg(errp, "multifd %u: [receiver] %s", p->id, err_msg);
+ return -1;
+}
+
+/**
+ * qatzip_recv_cleanup: Tear down QATzip session and release private buffers.
+ *
+ * @param p Multifd channel params
+ * @return None
+ */
+static void qatzip_recv_cleanup(MultiFDRecvParams *p)
+{
+ QatzipData *q = p->compress_data;
+
+ if (q) {
+ if (q->in_buf) {
+ qzFree(q->in_buf);
+ }
+ if (q->out_buf) {
+ qzFree(q->out_buf);
+ }
+ (void)qzTeardownSession(&q->sess);
+ (void)qzClose(&q->sess);
+ g_free(q);
+ }
+ p->compress_data = NULL;
+}
+
+
+/**
+ * qatzip_recv: Decompress pages and copy them to the appropriate
+ * locations.
+ *
+ * @param p Multifd channel params
+ * @param errp Pointer to error, which will be set in case of error
+ * @return 0 on success, -1 on error (and *errp will be set)
+ */
+static int qatzip_recv(MultiFDRecvParams *p, Error **errp)
+{
+ QatzipData *q = p->compress_data;
+ int ret;
+ unsigned int in_len, out_len;
+ uint32_t in_size = p->next_packet_size;
+ uint32_t page_size = multifd_ram_page_size();
+ uint32_t expected_size = p->normal_num * page_size;
+ uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
+
+ if (in_size > q->in_len) {
+ error_setg(errp, "multifd %u: received unexpectedly large packet",
+ p->id);
+ return -1;
+ }
+
+ if (flags != MULTIFD_FLAG_QATZIP) {
+ error_setg(errp, "multifd %u: flags received %x flags expected %x",
+ p->id, flags, MULTIFD_FLAG_QATZIP);
+ return -1;
+ }
+
+ multifd_recv_zero_page_process(p);
+ if (!p->normal_num) {
+ assert(in_size == 0);
+ return 0;
+ }
+
+ ret = qio_channel_read_all(p->c, (void *)q->in_buf, in_size, errp);
+ if (ret != 0) {
+ return ret;
+ }
+
+ in_len = in_size;
+ out_len = q->out_len;
+ ret = qzDecompress(&q->sess, q->in_buf, &in_len, q->out_buf, &out_len);
+ if (ret != QZ_OK) {
+ error_setg(errp, "multifd %u: qzDecompress failed", p->id);
+ return -1;
+ }
+ if (out_len != expected_size) {
+ error_setg(errp, "multifd %u: packet size received %u size expected %u",
+ p->id, out_len, expected_size);
+ return -1;
+ }
+
+ /* Copy each page to its appropriate location. */
+ for (int i = 0; i < p->normal_num; i++) {
+ memcpy(p->host + p->normal[i], q->out_buf + page_size * i, page_size);
+ ramblock_recv_bitmap_set_offset(p->block, p->normal[i]);
+ }
+ return 0;
+}
+
+static MultiFDMethods multifd_qatzip_ops = {
+ .send_setup = qatzip_send_setup,
+ .send_cleanup = qatzip_send_cleanup,
+ .send_prepare = qatzip_send_prepare,
+ .recv_setup = qatzip_recv_setup,
+ .recv_cleanup = qatzip_recv_cleanup,
+ .recv = qatzip_recv
+};
+
+static void multifd_qatzip_register(void)
+{
+ multifd_register_ops(MULTIFD_COMPRESSION_QATZIP, &multifd_qatzip_ops);
+}
+
+migration_init(multifd_qatzip_register);
diff --git a/migration/multifd-qpl.c b/migration/multifd-qpl.c
index 9265098..52902eb 100644
--- a/migration/multifd-qpl.c
+++ b/migration/multifd-qpl.c
@@ -14,7 +14,7 @@
#include "qemu/module.h"
#include "qapi/error.h"
#include "qapi/qapi-types-migration.h"
-#include "exec/ramblock.h"
+#include "system/ramblock.h"
#include "multifd.h"
#include "qpl/qpl.h"
@@ -220,21 +220,13 @@ static void multifd_qpl_deinit(QplData *qpl)
}
}
-/**
- * multifd_qpl_send_setup: set up send side
- *
- * Set up the channel with QPL compression.
- *
- * Returns 0 on success or -1 on error
- *
- * @p: Params for the channel being used
- * @errp: pointer to an error
- */
static int multifd_qpl_send_setup(MultiFDSendParams *p, Error **errp)
{
QplData *qpl;
+ uint32_t page_size = multifd_ram_page_size();
+ uint32_t page_count = multifd_ram_page_count();
- qpl = multifd_qpl_init(p->page_count, p->page_size, errp);
+ qpl = multifd_qpl_init(page_count, page_size, errp);
if (!qpl) {
return -1;
}
@@ -245,18 +237,10 @@ static int multifd_qpl_send_setup(MultiFDSendParams *p, Error **errp)
* additional two IOVs are used to store packet header and compressed data
* length
*/
- p->iov = g_new0(struct iovec, p->page_count + 2);
+ p->iov = g_new0(struct iovec, page_count + 2);
return 0;
}
-/**
- * multifd_qpl_send_cleanup: clean up send side
- *
- * Close the channel and free memory.
- *
- * @p: Params for the channel being used
- * @errp: pointer to an error
- */
static void multifd_qpl_send_cleanup(MultiFDSendParams *p, Error **errp)
{
multifd_qpl_deinit(p->compress_data);
@@ -404,13 +388,14 @@ retry:
static void multifd_qpl_compress_pages_slow_path(MultiFDSendParams *p)
{
QplData *qpl = p->compress_data;
- uint32_t size = p->page_size;
+ MultiFDPages_t *pages = &p->data->u.ram;
+ uint32_t size = multifd_ram_page_size();
qpl_job *job = qpl->sw_job;
uint8_t *zbuf = qpl->zbuf;
uint8_t *buf;
- for (int i = 0; i < p->pages->normal_num; i++) {
- buf = p->pages->block->host + p->pages->offset[i];
+ for (int i = 0; i < pages->normal_num; i++) {
+ buf = pages->block->host + pages->offset[i];
multifd_qpl_prepare_comp_job(job, buf, zbuf, size);
if (qpl_execute_job(job) == QPL_STS_OK) {
multifd_qpl_fill_packet(i, p, zbuf, job->total_out);
@@ -434,8 +419,8 @@ static void multifd_qpl_compress_pages_slow_path(MultiFDSendParams *p)
static void multifd_qpl_compress_pages(MultiFDSendParams *p)
{
QplData *qpl = p->compress_data;
- MultiFDPages_t *pages = p->pages;
- uint32_t size = p->page_size;
+ MultiFDPages_t *pages = &p->data->u.ram;
+ uint32_t size = multifd_ram_page_size();
QplHwJob *hw_job;
uint8_t *buf;
uint8_t *zbuf;
@@ -484,20 +469,10 @@ static void multifd_qpl_compress_pages(MultiFDSendParams *p)
}
}
-/**
- * multifd_qpl_send_prepare: prepare data to be able to send
- *
- * Create a compressed buffer with all the pages that we are going to
- * send.
- *
- * Returns 0 on success or -1 on error
- *
- * @p: Params for the channel being used
- * @errp: pointer to an error
- */
static int multifd_qpl_send_prepare(MultiFDSendParams *p, Error **errp)
{
QplData *qpl = p->compress_data;
+ MultiFDPages_t *pages = &p->data->u.ram;
uint32_t len = 0;
if (!multifd_send_prepare_common(p)) {
@@ -505,7 +480,7 @@ static int multifd_qpl_send_prepare(MultiFDSendParams *p, Error **errp)
}
/* The first IOV is used to store the compressed page lengths */
- len = p->pages->normal_num * sizeof(uint32_t);
+ len = pages->normal_num * sizeof(uint32_t);
multifd_qpl_fill_iov(p, (uint8_t *) qpl->zlen, len);
if (qpl->hw_avail) {
multifd_qpl_compress_pages(p);
@@ -519,21 +494,13 @@ out:
return 0;
}
-/**
- * multifd_qpl_recv_setup: set up receive side
- *
- * Create the compressed channel and buffer.
- *
- * Returns 0 on success or -1 on error
- *
- * @p: Params for the channel being used
- * @errp: pointer to an error
- */
static int multifd_qpl_recv_setup(MultiFDRecvParams *p, Error **errp)
{
QplData *qpl;
+ uint32_t page_size = multifd_ram_page_size();
+ uint32_t page_count = multifd_ram_page_count();
- qpl = multifd_qpl_init(p->page_count, p->page_size, errp);
+ qpl = multifd_qpl_init(page_count, page_size, errp);
if (!qpl) {
return -1;
}
@@ -541,13 +508,6 @@ static int multifd_qpl_recv_setup(MultiFDRecvParams *p, Error **errp)
return 0;
}
-/**
- * multifd_qpl_recv_cleanup: set up receive side
- *
- * Close the channel and free memory.
- *
- * @p: Params for the channel being used
- */
static void multifd_qpl_recv_cleanup(MultiFDRecvParams *p)
{
multifd_qpl_deinit(p->compress_data);
@@ -600,7 +560,7 @@ static int multifd_qpl_decompress_pages_slow_path(MultiFDRecvParams *p,
Error **errp)
{
QplData *qpl = p->compress_data;
- uint32_t size = p->page_size;
+ uint32_t size = multifd_ram_page_size();
qpl_job *job = qpl->sw_job;
uint8_t *zbuf = qpl->zbuf;
uint8_t *addr;
@@ -638,7 +598,7 @@ static int multifd_qpl_decompress_pages_slow_path(MultiFDRecvParams *p,
static int multifd_qpl_decompress_pages(MultiFDRecvParams *p, Error **errp)
{
QplData *qpl = p->compress_data;
- uint32_t size = p->page_size;
+ uint32_t size = multifd_ram_page_size();
uint8_t *zbuf = qpl->zbuf;
uint8_t *addr;
uint32_t len;
@@ -688,17 +648,6 @@ static int multifd_qpl_decompress_pages(MultiFDRecvParams *p, Error **errp)
}
return 0;
}
-/**
- * multifd_qpl_recv: read the data from the channel into actual pages
- *
- * Read the compressed buffer, and uncompress it into the actual
- * pages.
- *
- * Returns 0 on success or -1 on error
- *
- * @p: Params for the channel being used
- * @errp: pointer to an error
- */
static int multifd_qpl_recv(MultiFDRecvParams *p, Error **errp)
{
QplData *qpl = p->compress_data;
@@ -728,8 +677,9 @@ static int multifd_qpl_recv(MultiFDRecvParams *p, Error **errp)
}
for (int i = 0; i < p->normal_num; i++) {
qpl->zlen[i] = be32_to_cpu(qpl->zlen[i]);
- assert(qpl->zlen[i] <= p->page_size);
+ assert(qpl->zlen[i] <= multifd_ram_page_size());
zbuf_len += qpl->zlen[i];
+ ramblock_recv_bitmap_set_offset(p->block, p->normal[i]);
}
/* read compressed pages */
@@ -745,7 +695,7 @@ static int multifd_qpl_recv(MultiFDRecvParams *p, Error **errp)
return multifd_qpl_decompress_pages_slow_path(p, errp);
}
-static MultiFDMethods multifd_qpl_ops = {
+static const MultiFDMethods multifd_qpl_ops = {
.send_setup = multifd_qpl_send_setup,
.send_cleanup = multifd_qpl_send_cleanup,
.send_prepare = multifd_qpl_send_prepare,
diff --git a/migration/multifd-uadk.c b/migration/multifd-uadk.c
index d12353f..fd7cd9b 100644
--- a/migration/multifd-uadk.c
+++ b/migration/multifd-uadk.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "qemu/module.h"
#include "qapi/error.h"
-#include "exec/ramblock.h"
+#include "system/ramblock.h"
#include "migration.h"
#include "multifd.h"
#include "options.h"
@@ -103,19 +103,13 @@ static void multifd_uadk_uninit_sess(struct wd_data *wd)
g_free(wd);
}
-/**
- * multifd_uadk_send_setup: setup send side
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
static int multifd_uadk_send_setup(MultiFDSendParams *p, Error **errp)
{
struct wd_data *wd;
+ uint32_t page_size = multifd_ram_page_size();
+ uint32_t page_count = multifd_ram_page_count();
- wd = multifd_uadk_init_sess(p->page_count, p->page_size, true, errp);
+ wd = multifd_uadk_init_sess(page_count, page_size, true, errp);
if (!wd) {
return -1;
}
@@ -128,24 +122,18 @@ static int multifd_uadk_send_setup(MultiFDSendParams *p, Error **errp)
* length
*/
- p->iov = g_new0(struct iovec, p->page_count + 2);
+ p->iov = g_new0(struct iovec, page_count + 2);
return 0;
}
-/**
- * multifd_uadk_send_cleanup: cleanup send side
- *
- * Close the channel and return memory.
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
static void multifd_uadk_send_cleanup(MultiFDSendParams *p, Error **errp)
{
struct wd_data *wd = p->compress_data;
multifd_uadk_uninit_sess(wd);
p->compress_data = NULL;
+ g_free(p->iov);
+ p->iov = NULL;
}
static inline void prepare_next_iov(MultiFDSendParams *p, void *base,
@@ -157,40 +145,31 @@ static inline void prepare_next_iov(MultiFDSendParams *p, void *base,
p->iovs_num++;
}
-/**
- * multifd_uadk_send_prepare: prepare data to be able to send
- *
- * Create a compressed buffer with all the pages that we are going to
- * send.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
static int multifd_uadk_send_prepare(MultiFDSendParams *p, Error **errp)
{
struct wd_data *uadk_data = p->compress_data;
uint32_t hdr_size;
+ uint32_t page_size = multifd_ram_page_size();
uint8_t *buf = uadk_data->buf;
int ret = 0;
+ MultiFDPages_t *pages = &p->data->u.ram;
if (!multifd_send_prepare_common(p)) {
goto out;
}
- hdr_size = p->pages->normal_num * sizeof(uint32_t);
+ hdr_size = pages->normal_num * sizeof(uint32_t);
/* prepare the header that stores the lengths of all compressed data */
prepare_next_iov(p, uadk_data->buf_hdr, hdr_size);
- for (int i = 0; i < p->pages->normal_num; i++) {
+ for (int i = 0; i < pages->normal_num; i++) {
struct wd_comp_req creq = {
.op_type = WD_DIR_COMPRESS,
- .src = p->pages->block->host + p->pages->offset[i],
- .src_len = p->page_size,
+ .src = pages->block->host + pages->offset[i],
+ .src_len = page_size,
.dst = buf,
/* Set dst_len to double the src in case compressed out >= page_size */
- .dst_len = p->page_size * 2,
+ .dst_len = page_size * 2,
};
if (uadk_data->handle) {
@@ -200,7 +179,7 @@ static int multifd_uadk_send_prepare(MultiFDSendParams *p, Error **errp)
p->id, ret, creq.status);
return -1;
}
- if (creq.dst_len < p->page_size) {
+ if (creq.dst_len < page_size) {
uadk_data->buf_hdr[i] = cpu_to_be32(creq.dst_len);
prepare_next_iov(p, buf, creq.dst_len);
buf += creq.dst_len;
@@ -212,11 +191,11 @@ static int multifd_uadk_send_prepare(MultiFDSendParams *p, Error **errp)
* than page_size as well because at the receive end we can skip the
* decompression. But it is tricky to find the right number here.
*/
- if (!uadk_data->handle || creq.dst_len >= p->page_size) {
- uadk_data->buf_hdr[i] = cpu_to_be32(p->page_size);
- prepare_next_iov(p, p->pages->block->host + p->pages->offset[i],
- p->page_size);
- buf += p->page_size;
+ if (!uadk_data->handle || creq.dst_len >= page_size) {
+ uadk_data->buf_hdr[i] = cpu_to_be32(page_size);
+ prepare_next_iov(p, pages->block->host + pages->offset[i],
+ page_size);
+ buf += page_size;
}
}
out:
@@ -225,21 +204,13 @@ out:
return 0;
}
-/**
- * multifd_uadk_recv_setup: setup receive side
- *
- * Create the compressed channel and buffer.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
static int multifd_uadk_recv_setup(MultiFDRecvParams *p, Error **errp)
{
struct wd_data *wd;
+ uint32_t page_size = multifd_ram_page_size();
+ uint32_t page_count = multifd_ram_page_count();
- wd = multifd_uadk_init_sess(p->page_count, p->page_size, false, errp);
+ wd = multifd_uadk_init_sess(page_count, page_size, false, errp);
if (!wd) {
return -1;
}
@@ -247,13 +218,6 @@ static int multifd_uadk_recv_setup(MultiFDRecvParams *p, Error **errp)
return 0;
}
-/**
- * multifd_uadk_recv_cleanup: cleanup receive side
- *
- * Close the channel and return memory.
- *
- * @p: Params for the channel that we are using
- */
static void multifd_uadk_recv_cleanup(MultiFDRecvParams *p)
{
struct wd_data *wd = p->compress_data;
@@ -262,17 +226,6 @@ static void multifd_uadk_recv_cleanup(MultiFDRecvParams *p)
p->compress_data = NULL;
}
-/**
- * multifd_uadk_recv: read the data from the channel into actual pages
- *
- * Read the compressed buffer, and uncompress it into the actual
- * pages.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
static int multifd_uadk_recv(MultiFDRecvParams *p, Error **errp)
{
struct wd_data *uadk_data = p->compress_data;
@@ -280,6 +233,7 @@ static int multifd_uadk_recv(MultiFDRecvParams *p, Error **errp)
uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
uint32_t hdr_len = p->normal_num * sizeof(uint32_t);
uint32_t data_len = 0;
+ uint32_t page_size = multifd_ram_page_size();
uint8_t *buf = uadk_data->buf;
int ret = 0;
@@ -306,7 +260,7 @@ static int multifd_uadk_recv(MultiFDRecvParams *p, Error **errp)
for (int i = 0; i < p->normal_num; i++) {
uadk_data->buf_hdr[i] = be32_to_cpu(uadk_data->buf_hdr[i]);
data_len += uadk_data->buf_hdr[i];
- assert(uadk_data->buf_hdr[i] <= p->page_size);
+ assert(uadk_data->buf_hdr[i] <= page_size);
}
/* read compressed data */
@@ -322,12 +276,12 @@ static int multifd_uadk_recv(MultiFDRecvParams *p, Error **errp)
.src = buf,
.src_len = uadk_data->buf_hdr[i],
.dst = p->host + p->normal[i],
- .dst_len = p->page_size,
+ .dst_len = page_size,
};
- if (uadk_data->buf_hdr[i] == p->page_size) {
- memcpy(p->host + p->normal[i], buf, p->page_size);
- buf += p->page_size;
+ if (uadk_data->buf_hdr[i] == page_size) {
+ memcpy(p->host + p->normal[i], buf, page_size);
+ buf += page_size;
continue;
}
@@ -343,7 +297,7 @@ static int multifd_uadk_recv(MultiFDRecvParams *p, Error **errp)
p->id, ret, creq.status);
return -1;
}
- if (creq.dst_len != p->page_size) {
+ if (creq.dst_len != page_size) {
error_setg(errp, "multifd %u: decompressed length error", p->id);
return -1;
}
@@ -353,7 +307,7 @@ static int multifd_uadk_recv(MultiFDRecvParams *p, Error **errp)
return 0;
}
-static MultiFDMethods multifd_uadk_ops = {
+static const MultiFDMethods multifd_uadk_ops = {
.send_setup = multifd_uadk_send_setup,
.send_cleanup = multifd_uadk_send_cleanup,
.send_prepare = multifd_uadk_send_prepare,
diff --git a/migration/multifd-zero-page.c b/migration/multifd-zero-page.c
index e1b8370..4cde868 100644
--- a/migration/multifd-zero-page.c
+++ b/migration/multifd-zero-page.c
@@ -12,8 +12,9 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
-#include "exec/ramblock.h"
+#include "system/ramblock.h"
#include "migration.h"
+#include "migration-stats.h"
#include "multifd.h"
#include "options.h"
#include "ram.h"
@@ -46,14 +47,14 @@ static void swap_page_offset(ram_addr_t *pages_offset, int a, int b)
*/
void multifd_send_zero_page_detect(MultiFDSendParams *p)
{
- MultiFDPages_t *pages = p->pages;
+ MultiFDPages_t *pages = &p->data->u.ram;
RAMBlock *rb = pages->block;
int i = 0;
int j = pages->num - 1;
if (!multifd_zero_page_enabled()) {
pages->normal_num = pages->num;
- return;
+ goto out;
}
/*
@@ -63,7 +64,7 @@ void multifd_send_zero_page_detect(MultiFDSendParams *p)
while (i <= j) {
uint64_t offset = pages->offset[i];
- if (!buffer_is_zero(rb->host + offset, p->page_size)) {
+ if (!buffer_is_zero(rb->host + offset, multifd_ram_page_size())) {
i++;
continue;
}
@@ -74,15 +75,37 @@ void multifd_send_zero_page_detect(MultiFDSendParams *p)
}
pages->normal_num = i;
+
+out:
+ stat64_add(&mig_stats.normal_pages, pages->normal_num);
+ stat64_add(&mig_stats.zero_pages, pages->num - pages->normal_num);
}
void multifd_recv_zero_page_process(MultiFDRecvParams *p)
{
for (int i = 0; i < p->zero_num; i++) {
void *page = p->host + p->zero[i];
- if (ramblock_recv_bitmap_test_byte_offset(p->block, p->zero[i])) {
- memset(page, 0, p->page_size);
- } else {
+ bool received =
+ ramblock_recv_bitmap_test_byte_offset(p->block, p->zero[i]);
+
+ /*
+ * During multifd migration zero page is written to the memory
+ * only if it is migrated more than once.
+ *
+ * It becomes a problem when both multifd & postcopy options are
+ * enabled. If the zero page which was skipped during multifd phase,
+ * is accessed during the postcopy phase of the migration, a page
+ * fault occurs. But this page fault is not served because the
+ * 'receivedmap' says the zero page is already received. Thus the
+ * thread accessing that page may hang.
+ *
+ * When postcopy is enabled, always write the zero page as and when
+ * it is migrated.
+ */
+ if (migrate_postcopy_ram() || received) {
+ memset(page, 0, multifd_ram_page_size());
+ }
+ if (!received) {
ramblock_recv_bitmap_set_offset(p->block, p->zero[i]);
}
}
diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c
index 2ced694..8820b2a 100644
--- a/migration/multifd-zlib.c
+++ b/migration/multifd-zlib.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include <zlib.h>
#include "qemu/rcu.h"
-#include "exec/ramblock.h"
+#include "system/ramblock.h"
#include "exec/target_page.h"
#include "qapi/error.h"
#include "migration.h"
@@ -34,17 +34,7 @@ struct zlib_data {
/* Multifd zlib compression */
-/**
- * zlib_send_setup: setup send side
- *
- * Setup each channel with zlib compression.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static int zlib_send_setup(MultiFDSendParams *p, Error **errp)
+static int multifd_zlib_send_setup(MultiFDSendParams *p, Error **errp)
{
struct zlib_data *z = g_new0(struct zlib_data, 1);
z_stream *zs = &z->zs;
@@ -86,15 +76,7 @@ err_free_z:
return -1;
}
-/**
- * zlib_send_cleanup: cleanup send side
- *
- * Close the channel and return memory.
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp)
+static void multifd_zlib_send_cleanup(MultiFDSendParams *p, Error **errp)
{
struct zlib_data *z = p->compress_data;
@@ -110,23 +92,13 @@ static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp)
p->iov = NULL;
}
-/**
- * zlib_send_prepare: prepare date to be able to send
- *
- * Create a compressed buffer with all the pages that we are going to
- * send.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static int zlib_send_prepare(MultiFDSendParams *p, Error **errp)
+static int multifd_zlib_send_prepare(MultiFDSendParams *p, Error **errp)
{
- MultiFDPages_t *pages = p->pages;
+ MultiFDPages_t *pages = &p->data->u.ram;
struct zlib_data *z = p->compress_data;
z_stream *zs = &z->zs;
uint32_t out_size = 0;
+ uint32_t page_size = multifd_ram_page_size();
int ret;
uint32_t i;
@@ -147,8 +119,8 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp)
* with compression. zlib does not guarantee that this is safe,
* therefore copy the page before calling deflate().
*/
- memcpy(z->buf, p->pages->block->host + pages->offset[i], p->page_size);
- zs->avail_in = p->page_size;
+ memcpy(z->buf, pages->block->host + pages->offset[i], page_size);
+ zs->avail_in = page_size;
zs->next_in = z->buf;
zs->avail_out = available;
@@ -188,17 +160,7 @@ out:
return 0;
}
-/**
- * zlib_recv_setup: setup receive side
- *
- * Create the compressed channel and buffer.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp)
+static int multifd_zlib_recv_setup(MultiFDRecvParams *p, Error **errp)
{
struct zlib_data *z = g_new0(struct zlib_data, 1);
z_stream *zs = &z->zs;
@@ -224,14 +186,7 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp)
return 0;
}
-/**
- * zlib_recv_cleanup: setup receive side
- *
- * For no compression this function does nothing.
- *
- * @p: Params for the channel that we are using
- */
-static void zlib_recv_cleanup(MultiFDRecvParams *p)
+static void multifd_zlib_recv_cleanup(MultiFDRecvParams *p)
{
struct zlib_data *z = p->compress_data;
@@ -242,25 +197,15 @@ static void zlib_recv_cleanup(MultiFDRecvParams *p)
p->compress_data = NULL;
}
-/**
- * zlib_recv: read the data from the channel into actual pages
- *
- * Read the compressed buffer, and uncompress it into the actual
- * pages.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static int zlib_recv(MultiFDRecvParams *p, Error **errp)
+static int multifd_zlib_recv(MultiFDRecvParams *p, Error **errp)
{
struct zlib_data *z = p->compress_data;
z_stream *zs = &z->zs;
uint32_t in_size = p->next_packet_size;
/* we measure the change of total_out */
uint32_t out_size = zs->total_out;
- uint32_t expected_size = p->normal_num * p->page_size;
+ uint32_t page_size = multifd_ram_page_size();
+ uint32_t expected_size = p->normal_num * page_size;
uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
int ret;
int i;
@@ -296,7 +241,7 @@ static int zlib_recv(MultiFDRecvParams *p, Error **errp)
flush = Z_SYNC_FLUSH;
}
- zs->avail_out = p->page_size;
+ zs->avail_out = page_size;
zs->next_out = p->host + p->normal[i];
/*
@@ -310,8 +255,8 @@ static int zlib_recv(MultiFDRecvParams *p, Error **errp)
do {
ret = inflate(zs, flush);
} while (ret == Z_OK && zs->avail_in
- && (zs->total_out - start) < p->page_size);
- if (ret == Z_OK && (zs->total_out - start) < p->page_size) {
+ && (zs->total_out - start) < page_size);
+ if (ret == Z_OK && (zs->total_out - start) < page_size) {
error_setg(errp, "multifd %u: inflate generated too few output",
p->id);
return -1;
@@ -332,13 +277,13 @@ static int zlib_recv(MultiFDRecvParams *p, Error **errp)
return 0;
}
-static MultiFDMethods multifd_zlib_ops = {
- .send_setup = zlib_send_setup,
- .send_cleanup = zlib_send_cleanup,
- .send_prepare = zlib_send_prepare,
- .recv_setup = zlib_recv_setup,
- .recv_cleanup = zlib_recv_cleanup,
- .recv = zlib_recv
+static const MultiFDMethods multifd_zlib_ops = {
+ .send_setup = multifd_zlib_send_setup,
+ .send_cleanup = multifd_zlib_send_cleanup,
+ .send_prepare = multifd_zlib_send_prepare,
+ .recv_setup = multifd_zlib_recv_setup,
+ .recv_cleanup = multifd_zlib_recv_cleanup,
+ .recv = multifd_zlib_recv
};
static void multifd_zlib_register(void)
diff --git a/migration/multifd-zstd.c b/migration/multifd-zstd.c
index ca17b7e..3c2dcf7 100644
--- a/migration/multifd-zstd.c
+++ b/migration/multifd-zstd.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include <zstd.h>
#include "qemu/rcu.h"
-#include "exec/ramblock.h"
+#include "system/ramblock.h"
#include "exec/target_page.h"
#include "qapi/error.h"
#include "migration.h"
@@ -37,17 +37,7 @@ struct zstd_data {
/* Multifd zstd compression */
-/**
- * zstd_send_setup: setup send side
- *
- * Setup each channel with zstd compression.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static int zstd_send_setup(MultiFDSendParams *p, Error **errp)
+static int multifd_zstd_send_setup(MultiFDSendParams *p, Error **errp)
{
struct zstd_data *z = g_new0(struct zstd_data, 1);
int res;
@@ -83,15 +73,7 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp)
return 0;
}
-/**
- * zstd_send_cleanup: cleanup send side
- *
- * Close the channel and return memory.
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static void zstd_send_cleanup(MultiFDSendParams *p, Error **errp)
+static void multifd_zstd_send_cleanup(MultiFDSendParams *p, Error **errp)
{
struct zstd_data *z = p->compress_data;
@@ -106,20 +88,9 @@ static void zstd_send_cleanup(MultiFDSendParams *p, Error **errp)
p->iov = NULL;
}
-/**
- * zstd_send_prepare: prepare date to be able to send
- *
- * Create a compressed buffer with all the pages that we are going to
- * send.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static int zstd_send_prepare(MultiFDSendParams *p, Error **errp)
+static int multifd_zstd_send_prepare(MultiFDSendParams *p, Error **errp)
{
- MultiFDPages_t *pages = p->pages;
+ MultiFDPages_t *pages = &p->data->u.ram;
struct zstd_data *z = p->compress_data;
int ret;
uint32_t i;
@@ -138,8 +109,8 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp)
if (i == pages->normal_num - 1) {
flush = ZSTD_e_flush;
}
- z->in.src = p->pages->block->host + pages->offset[i];
- z->in.size = p->page_size;
+ z->in.src = pages->block->host + pages->offset[i];
+ z->in.size = multifd_ram_page_size();
z->in.pos = 0;
/*
@@ -152,9 +123,9 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp)
*/
do {
ret = ZSTD_compressStream2(z->zcs, &z->out, &z->in, flush);
- } while (ret > 0 && (z->in.size - z->in.pos > 0)
- && (z->out.size - z->out.pos > 0));
- if (ret > 0 && (z->in.size - z->in.pos > 0)) {
+ } while (ret > 0 && (z->in.size > z->in.pos)
+ && (z->out.size > z->out.pos));
+ if (ret > 0 && (z->in.size > z->in.pos)) {
error_setg(errp, "multifd %u: compressStream buffer too small",
p->id);
return -1;
@@ -176,17 +147,7 @@ out:
return 0;
}
-/**
- * zstd_recv_setup: setup receive side
- *
- * Create the compressed channel and buffer.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp)
+static int multifd_zstd_recv_setup(MultiFDRecvParams *p, Error **errp)
{
struct zstd_data *z = g_new0(struct zstd_data, 1);
int ret;
@@ -220,14 +181,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp)
return 0;
}
-/**
- * zstd_recv_cleanup: setup receive side
- *
- * For no compression this function does nothing.
- *
- * @p: Params for the channel that we are using
- */
-static void zstd_recv_cleanup(MultiFDRecvParams *p)
+static void multifd_zstd_recv_cleanup(MultiFDRecvParams *p)
{
struct zstd_data *z = p->compress_data;
@@ -239,22 +193,12 @@ static void zstd_recv_cleanup(MultiFDRecvParams *p)
p->compress_data = NULL;
}
-/**
- * zstd_recv: read the data from the channel into actual pages
- *
- * Read the compressed buffer, and uncompress it into the actual
- * pages.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static int zstd_recv(MultiFDRecvParams *p, Error **errp)
+static int multifd_zstd_recv(MultiFDRecvParams *p, Error **errp)
{
uint32_t in_size = p->next_packet_size;
uint32_t out_size = 0;
- uint32_t expected_size = p->normal_num * p->page_size;
+ uint32_t page_size = multifd_ram_page_size();
+ uint32_t expected_size = p->normal_num * page_size;
uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
struct zstd_data *z = p->compress_data;
int ret;
@@ -286,7 +230,7 @@ static int zstd_recv(MultiFDRecvParams *p, Error **errp)
for (i = 0; i < p->normal_num; i++) {
ramblock_recv_bitmap_set_offset(p->block, p->normal[i]);
z->out.dst = p->host + p->normal[i];
- z->out.size = p->page_size;
+ z->out.size = page_size;
z->out.pos = 0;
/*
@@ -299,9 +243,9 @@ static int zstd_recv(MultiFDRecvParams *p, Error **errp)
*/
do {
ret = ZSTD_decompressStream(z->zds, &z->out, &z->in);
- } while (ret > 0 && (z->in.size - z->in.pos > 0)
- && (z->out.pos < p->page_size));
- if (ret > 0 && (z->out.pos < p->page_size)) {
+ } while (ret > 0 && (z->in.size > z->in.pos)
+ && (z->out.pos < page_size));
+ if (ret > 0 && (z->out.pos < page_size)) {
error_setg(errp, "multifd %u: decompressStream buffer too small",
p->id);
return -1;
@@ -321,13 +265,13 @@ static int zstd_recv(MultiFDRecvParams *p, Error **errp)
return 0;
}
-static MultiFDMethods multifd_zstd_ops = {
- .send_setup = zstd_send_setup,
- .send_cleanup = zstd_send_cleanup,
- .send_prepare = zstd_send_prepare,
- .recv_setup = zstd_recv_setup,
- .recv_cleanup = zstd_recv_cleanup,
- .recv = zstd_recv
+static const MultiFDMethods multifd_zstd_ops = {
+ .send_setup = multifd_zstd_send_setup,
+ .send_cleanup = multifd_zstd_send_cleanup,
+ .send_prepare = multifd_zstd_send_prepare,
+ .recv_setup = multifd_zstd_recv_setup,
+ .recv_cleanup = multifd_zstd_recv_cleanup,
+ .recv = multifd_zstd_recv
};
static void multifd_zstd_register(void)
diff --git a/migration/multifd.c b/migration/multifd.c
index 0b4cbad..b255778 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -12,15 +12,18 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
+#include "qemu/iov.h"
#include "qemu/rcu.h"
#include "exec/target_page.h"
-#include "sysemu/sysemu.h"
-#include "exec/ramblock.h"
+#include "system/system.h"
+#include "system/ramblock.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "file.h"
+#include "migration/misc.h"
#include "migration.h"
#include "migration-stats.h"
+#include "savevm.h"
#include "socket.h"
#include "tls.h"
#include "qemu-file.h"
@@ -33,11 +36,6 @@
#include "io/channel-socket.h"
#include "yank_functions.h"
-/* Multiple fd's */
-
-#define MULTIFD_MAGIC 0x11223344U
-#define MULTIFD_VERSION 1
-
typedef struct {
uint32_t magic;
uint32_t version;
@@ -49,8 +47,10 @@ typedef struct {
struct {
MultiFDSendParams *params;
- /* array of pages to sent */
- MultiFDPages_t *pages;
+
+ /* multifd_send() body is not thread safe, needs serialization */
+ QemuMutex multifd_send_mutex;
+
/*
* Global number of generated multifd packets.
*
@@ -78,7 +78,7 @@ struct {
*/
int exiting;
/* multifd ops */
- MultiFDMethods *ops;
+ const MultiFDMethods *ops;
} *multifd_send_state;
struct {
@@ -95,236 +95,70 @@ struct {
uint64_t packet_num;
int exiting;
/* multifd ops */
- MultiFDMethods *ops;
+ const MultiFDMethods *ops;
} *multifd_recv_state;
-static bool multifd_use_packets(void)
-{
- return !migrate_mapped_ram();
-}
-
-void multifd_send_channel_created(void)
-{
- qemu_sem_post(&multifd_send_state->channels_created);
-}
-
-static void multifd_set_file_bitmap(MultiFDSendParams *p)
+MultiFDSendData *multifd_send_data_alloc(void)
{
- MultiFDPages_t *pages = p->pages;
+ MultiFDSendData *new = g_new0(MultiFDSendData, 1);
- assert(pages->block);
+ multifd_ram_payload_alloc(&new->u.ram);
+ /* Device state allocates its payload on-demand */
- for (int i = 0; i < p->pages->normal_num; i++) {
- ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], true);
- }
-
- for (int i = p->pages->normal_num; i < p->pages->num; i++) {
- ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], false);
- }
+ return new;
}
-/* Multifd without compression */
-
-/**
- * nocomp_send_setup: setup send side
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static int nocomp_send_setup(MultiFDSendParams *p, Error **errp)
+void multifd_send_data_clear(MultiFDSendData *data)
{
- if (migrate_zero_copy_send()) {
- p->write_flags |= QIO_CHANNEL_WRITE_FLAG_ZERO_COPY;
+ if (multifd_payload_empty(data)) {
+ return;
}
- if (multifd_use_packets()) {
- /* We need one extra place for the packet header */
- p->iov = g_new0(struct iovec, p->page_count + 1);
- } else {
- p->iov = g_new0(struct iovec, p->page_count);
+ switch (data->type) {
+ case MULTIFD_PAYLOAD_DEVICE_STATE:
+ multifd_send_data_clear_device_state(&data->u.device_state);
+ break;
+ default:
+ /* Nothing to do */
+ break;
}
- return 0;
-}
-
-/**
- * nocomp_send_cleanup: cleanup send side
- *
- * For no compression this function does nothing.
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp)
-{
- g_free(p->iov);
- p->iov = NULL;
- return;
+ data->type = MULTIFD_PAYLOAD_NONE;
}
-static void multifd_send_prepare_iovs(MultiFDSendParams *p)
+void multifd_send_data_free(MultiFDSendData *data)
{
- MultiFDPages_t *pages = p->pages;
-
- for (int i = 0; i < pages->normal_num; i++) {
- p->iov[p->iovs_num].iov_base = pages->block->host + pages->offset[i];
- p->iov[p->iovs_num].iov_len = p->page_size;
- p->iovs_num++;
- }
-
- p->next_packet_size = pages->normal_num * p->page_size;
-}
-
-/**
- * nocomp_send_prepare: prepare date to be able to send
- *
- * For no compression we just have to calculate the size of the
- * packet.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp)
-{
- bool use_zero_copy_send = migrate_zero_copy_send();
- int ret;
-
- multifd_send_zero_page_detect(p);
-
- if (!multifd_use_packets()) {
- multifd_send_prepare_iovs(p);
- multifd_set_file_bitmap(p);
-
- return 0;
- }
-
- if (!use_zero_copy_send) {
- /*
- * Only !zerocopy needs the header in IOV; zerocopy will
- * send it separately.
- */
- multifd_send_prepare_header(p);
+ if (!data) {
+ return;
}
- multifd_send_prepare_iovs(p);
- p->flags |= MULTIFD_FLAG_NOCOMP;
+ /* This also free's device state payload */
+ multifd_send_data_clear(data);
- multifd_send_fill_packet(p);
-
- if (use_zero_copy_send) {
- /* Send header first, without zerocopy */
- ret = qio_channel_write_all(p->c, (void *)p->packet,
- p->packet_len, errp);
- if (ret != 0) {
- return -1;
- }
- }
+ multifd_ram_payload_free(&data->u.ram);
- return 0;
-}
-
-/**
- * nocomp_recv_setup: setup receive side
- *
- * For no compression this function does nothing.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static int nocomp_recv_setup(MultiFDRecvParams *p, Error **errp)
-{
- p->iov = g_new0(struct iovec, p->page_count);
- return 0;
+ g_free(data);
}
-/**
- * nocomp_recv_cleanup: setup receive side
- *
- * For no compression this function does nothing.
- *
- * @p: Params for the channel that we are using
- */
-static void nocomp_recv_cleanup(MultiFDRecvParams *p)
+static bool multifd_use_packets(void)
{
- g_free(p->iov);
- p->iov = NULL;
+ return !migrate_mapped_ram();
}
-/**
- * nocomp_recv: read the data from the channel
- *
- * For no compression we just need to read things into the correct place.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @errp: pointer to an error
- */
-static int nocomp_recv(MultiFDRecvParams *p, Error **errp)
+void multifd_send_channel_created(void)
{
- uint32_t flags;
-
- if (!multifd_use_packets()) {
- return multifd_file_recv_data(p, errp);
- }
-
- flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
-
- if (flags != MULTIFD_FLAG_NOCOMP) {
- error_setg(errp, "multifd %u: flags received %x flags expected %x",
- p->id, flags, MULTIFD_FLAG_NOCOMP);
- return -1;
- }
-
- multifd_recv_zero_page_process(p);
-
- if (!p->normal_num) {
- return 0;
- }
-
- for (int i = 0; i < p->normal_num; i++) {
- p->iov[i].iov_base = p->host + p->normal[i];
- p->iov[i].iov_len = p->page_size;
- ramblock_recv_bitmap_set_offset(p->block, p->normal[i]);
- }
- return qio_channel_readv_all(p->c, p->iov, p->normal_num, errp);
+ qemu_sem_post(&multifd_send_state->channels_created);
}
-static MultiFDMethods multifd_nocomp_ops = {
- .send_setup = nocomp_send_setup,
- .send_cleanup = nocomp_send_cleanup,
- .send_prepare = nocomp_send_prepare,
- .recv_setup = nocomp_recv_setup,
- .recv_cleanup = nocomp_recv_cleanup,
- .recv = nocomp_recv
-};
+static const MultiFDMethods *multifd_ops[MULTIFD_COMPRESSION__MAX] = {};
-static MultiFDMethods *multifd_ops[MULTIFD_COMPRESSION__MAX] = {
- [MULTIFD_COMPRESSION_NONE] = &multifd_nocomp_ops,
-};
-
-void multifd_register_ops(int method, MultiFDMethods *ops)
+void multifd_register_ops(int method, const MultiFDMethods *ops)
{
- assert(0 < method && method < MULTIFD_COMPRESSION__MAX);
+ assert(0 <= method && method < MULTIFD_COMPRESSION__MAX);
+ assert(!multifd_ops[method]);
multifd_ops[method] = ops;
}
-/* Reset a MultiFDPages_t* object for the next use */
-static void multifd_pages_reset(MultiFDPages_t *pages)
-{
- /*
- * We don't need to touch offset[] array, because it will be
- * overwritten later when reused.
- */
- pages->num = 0;
- pages->normal_num = 0;
- pages->block = NULL;
-}
-
static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
{
MultiFDInit_t msg = {};
@@ -389,160 +223,95 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
return msg.id;
}
-static MultiFDPages_t *multifd_pages_init(uint32_t n)
-{
- MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
-
- pages->allocated = n;
- pages->offset = g_new0(ram_addr_t, n);
-
- return pages;
-}
-
-static void multifd_pages_clear(MultiFDPages_t *pages)
-{
- multifd_pages_reset(pages);
- pages->allocated = 0;
- g_free(pages->offset);
- pages->offset = NULL;
- g_free(pages);
-}
-
+/* Fills a RAM multifd packet */
void multifd_send_fill_packet(MultiFDSendParams *p)
{
MultiFDPacket_t *packet = p->packet;
- MultiFDPages_t *pages = p->pages;
uint64_t packet_num;
- uint32_t zero_num = pages->num - pages->normal_num;
- int i;
+ bool sync_packet = p->flags & MULTIFD_FLAG_SYNC;
+
+ memset(packet, 0, p->packet_len);
- packet->flags = cpu_to_be32(p->flags);
- packet->pages_alloc = cpu_to_be32(p->pages->allocated);
- packet->normal_pages = cpu_to_be32(pages->normal_num);
- packet->zero_pages = cpu_to_be32(zero_num);
+ packet->hdr.magic = cpu_to_be32(MULTIFD_MAGIC);
+ packet->hdr.version = cpu_to_be32(MULTIFD_VERSION);
+
+ packet->hdr.flags = cpu_to_be32(p->flags);
packet->next_packet_size = cpu_to_be32(p->next_packet_size);
packet_num = qatomic_fetch_inc(&multifd_send_state->packet_num);
packet->packet_num = cpu_to_be64(packet_num);
- if (pages->block) {
- strncpy(packet->ramblock, pages->block->idstr, 256);
- }
-
- for (i = 0; i < pages->num; i++) {
- /* there are architectures where ram_addr_t is 32 bit */
- uint64_t temp = pages->offset[i];
+ p->packets_sent++;
- packet->offset[i] = cpu_to_be64(temp);
+ if (!sync_packet) {
+ multifd_ram_fill_packet(p);
}
- p->packets_sent++;
- p->total_normal_pages += pages->normal_num;
- p->total_zero_pages += zero_num;
-
- trace_multifd_send(p->id, packet_num, pages->normal_num, zero_num,
- p->flags, p->next_packet_size);
+ trace_multifd_send_fill(p->id, packet_num,
+ p->flags, p->next_packet_size);
}
-static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
+static int multifd_recv_unfill_packet_header(MultiFDRecvParams *p,
+ const MultiFDPacketHdr_t *hdr,
+ Error **errp)
{
- MultiFDPacket_t *packet = p->packet;
- int i;
+ uint32_t magic = be32_to_cpu(hdr->magic);
+ uint32_t version = be32_to_cpu(hdr->version);
- packet->magic = be32_to_cpu(packet->magic);
- if (packet->magic != MULTIFD_MAGIC) {
- error_setg(errp, "multifd: received packet "
- "magic %x and expected magic %x",
- packet->magic, MULTIFD_MAGIC);
+ if (magic != MULTIFD_MAGIC) {
+ error_setg(errp, "multifd: received packet magic %x, expected %x",
+ magic, MULTIFD_MAGIC);
return -1;
}
- packet->version = be32_to_cpu(packet->version);
- if (packet->version != MULTIFD_VERSION) {
- error_setg(errp, "multifd: received packet "
- "version %u and expected version %u",
- packet->version, MULTIFD_VERSION);
+ if (version != MULTIFD_VERSION) {
+ error_setg(errp, "multifd: received packet version %u, expected %u",
+ version, MULTIFD_VERSION);
return -1;
}
- p->flags = be32_to_cpu(packet->flags);
-
- packet->pages_alloc = be32_to_cpu(packet->pages_alloc);
- /*
- * If we received a packet that is 100 times bigger than expected
- * just stop migration. It is a magic number.
- */
- if (packet->pages_alloc > p->page_count) {
- error_setg(errp, "multifd: received packet "
- "with size %u and expected a size of %u",
- packet->pages_alloc, p->page_count) ;
- return -1;
- }
+ p->flags = be32_to_cpu(hdr->flags);
- p->normal_num = be32_to_cpu(packet->normal_pages);
- if (p->normal_num > packet->pages_alloc) {
- error_setg(errp, "multifd: received packet "
- "with %u normal pages and expected maximum pages are %u",
- p->normal_num, packet->pages_alloc) ;
- return -1;
- }
+ return 0;
+}
- p->zero_num = be32_to_cpu(packet->zero_pages);
- if (p->zero_num > packet->pages_alloc - p->normal_num) {
- error_setg(errp, "multifd: received packet "
- "with %u zero pages and expected maximum zero pages are %u",
- p->zero_num, packet->pages_alloc - p->normal_num) ;
- return -1;
- }
+static int multifd_recv_unfill_packet_device_state(MultiFDRecvParams *p,
+ Error **errp)
+{
+ MultiFDPacketDeviceState_t *packet = p->packet_dev_state;
+ packet->instance_id = be32_to_cpu(packet->instance_id);
p->next_packet_size = be32_to_cpu(packet->next_packet_size);
- p->packet_num = be64_to_cpu(packet->packet_num);
- p->packets_recved++;
- p->total_normal_pages += p->normal_num;
- p->total_zero_pages += p->zero_num;
- trace_multifd_recv(p->id, p->packet_num, p->normal_num, p->zero_num,
- p->flags, p->next_packet_size);
+ return 0;
+}
- if (p->normal_num == 0 && p->zero_num == 0) {
- return 0;
- }
+static int multifd_recv_unfill_packet_ram(MultiFDRecvParams *p, Error **errp)
+{
+ const MultiFDPacket_t *packet = p->packet;
+ int ret = 0;
- /* make sure that ramblock is 0 terminated */
- packet->ramblock[255] = 0;
- p->block = qemu_ram_block_by_name(packet->ramblock);
- if (!p->block) {
- error_setg(errp, "multifd: unknown ram block %s",
- packet->ramblock);
- return -1;
- }
+ p->next_packet_size = be32_to_cpu(packet->next_packet_size);
+ p->packet_num = be64_to_cpu(packet->packet_num);
- p->host = p->block->host;
- for (i = 0; i < p->normal_num; i++) {
- uint64_t offset = be64_to_cpu(packet->offset[i]);
+ /* Always unfill, old QEMUs (<9.0) send data along with SYNC */
+ ret = multifd_ram_unfill_packet(p, errp);
- if (offset > (p->block->used_length - p->page_size)) {
- error_setg(errp, "multifd: offset too long %" PRIu64
- " (max " RAM_ADDR_FMT ")",
- offset, p->block->used_length);
- return -1;
- }
- p->normal[i] = offset;
- }
+ trace_multifd_recv_unfill(p->id, p->packet_num, p->flags,
+ p->next_packet_size);
- for (i = 0; i < p->zero_num; i++) {
- uint64_t offset = be64_to_cpu(packet->offset[p->normal_num + i]);
+ return ret;
+}
- if (offset > (p->block->used_length - p->page_size)) {
- error_setg(errp, "multifd: offset too long %" PRIu64
- " (max " RAM_ADDR_FMT ")",
- offset, p->block->used_length);
- return -1;
- }
- p->zero[i] = offset;
+static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
+{
+ p->packets_recved++;
+
+ if (p->flags & MULTIFD_FLAG_DEVICE_STATE) {
+ return multifd_recv_unfill_packet_device_state(p, errp);
}
- return 0;
+ return multifd_recv_unfill_packet_ram(p, errp);
}
static bool multifd_send_should_exit(void)
@@ -568,35 +337,32 @@ static void multifd_send_kick_main(MultiFDSendParams *p)
}
/*
- * How we use multifd_send_state->pages and channel->pages?
+ * multifd_send() works by exchanging the MultiFDSendData object
+ * provided by the caller with an unused MultiFDSendData object from
+ * the next channel that is found to be idle.
*
- * We create a pages for each channel, and a main one. Each time that
- * we need to send a batch of pages we interchange the ones between
- * multifd_send_state and the channel that is sending it. There are
- * two reasons for that:
- * - to not have to do so many mallocs during migration
- * - to make easier to know what to free at the end of migration
+ * The channel owns the data until it finishes transmitting and the
+ * caller owns the empty object until it fills it with data and calls
+ * this function again. No locking necessary.
*
- * This way we always know who is the owner of each "pages" struct,
- * and we don't need any locking. It belongs to the migration thread
- * or to the channel thread. Switching is safe because the migration
- * thread is using the channel mutex when changing it, and the channel
- * have to had finish with its own, otherwise pending_job can't be
- * false.
+ * Switching is safe because both the migration thread and the channel
+ * thread have barriers in place to serialize access.
*
* Returns true if succeed, false otherwise.
*/
-static bool multifd_send_pages(void)
+bool multifd_send(MultiFDSendData **send_data)
{
int i;
static int next_channel;
MultiFDSendParams *p = NULL; /* make happy gcc */
- MultiFDPages_t *pages = multifd_send_state->pages;
+ MultiFDSendData *tmp;
if (multifd_send_should_exit()) {
return false;
}
+ QEMU_LOCK_GUARD(&multifd_send_state->multifd_send_mutex);
+
/* We wait here, until at least one channel is ready */
qemu_sem_wait(&multifd_send_state->channels_ready);
@@ -626,66 +392,24 @@ static bool multifd_send_pages(void)
* qatomic_store_release() in multifd_send_thread().
*/
smp_mb_acquire();
- assert(!p->pages->num);
- multifd_send_state->pages = p->pages;
- p->pages = pages;
- /*
- * Making sure p->pages is setup before marking pending_job=true. Pairs
- * with the qatomic_load_acquire() in multifd_send_thread().
- */
- qatomic_store_release(&p->pending_job, true);
- qemu_sem_post(&p->sem);
-
- return true;
-}
-
-static inline bool multifd_queue_empty(MultiFDPages_t *pages)
-{
- return pages->num == 0;
-}
-static inline bool multifd_queue_full(MultiFDPages_t *pages)
-{
- return pages->num == pages->allocated;
-}
-
-static inline void multifd_enqueue(MultiFDPages_t *pages, ram_addr_t offset)
-{
- pages->offset[pages->num++] = offset;
-}
+ assert(multifd_payload_empty(p->data));
-/* Returns true if enqueue successful, false otherwise */
-bool multifd_queue_page(RAMBlock *block, ram_addr_t offset)
-{
- MultiFDPages_t *pages;
-
-retry:
- pages = multifd_send_state->pages;
-
- /* If the queue is empty, we can already enqueue now */
- if (multifd_queue_empty(pages)) {
- pages->block = block;
- multifd_enqueue(pages, offset);
- return true;
- }
+ /*
+ * Swap the pointers. The channel gets the client data for
+ * transferring and the client gets back an unused data slot.
+ */
+ tmp = *send_data;
+ *send_data = p->data;
+ p->data = tmp;
/*
- * Not empty, meanwhile we need a flush. It can because of either:
- *
- * (1) The page is not on the same ramblock of previous ones, or,
- * (2) The queue is full.
- *
- * After flush, always retry.
+ * Making sure p->data is setup before marking pending_job=true. Pairs
+ * with the qatomic_load_acquire() in multifd_send_thread().
*/
- if (pages->block != block || multifd_queue_full(pages)) {
- if (!multifd_send_pages()) {
- return false;
- }
- goto retry;
- }
+ qatomic_store_release(&p->pending_job, true);
+ qemu_sem_post(&p->sem);
- /* Not empty, and we still have space, do it! */
- multifd_enqueue(pages, offset);
return true;
}
@@ -775,7 +499,7 @@ static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp)
* channels have no I/O handler callback registered when reaching
* here, because migration thread will wait for all multifd channel
* establishments to complete during setup. Since
- * migrate_fd_cleanup() will be scheduled in main thread too, all
+ * migration_cleanup() will be scheduled in main thread too, all
* previous callbacks should guarantee to be completed when
* reaching here. See multifd_send_state.channels_created and its
* usage. In the future, we could replace this with an assert
@@ -790,12 +514,13 @@ static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp)
qemu_sem_destroy(&p->sem_sync);
g_free(p->name);
p->name = NULL;
- multifd_pages_clear(p->pages);
- p->pages = NULL;
+ g_clear_pointer(&p->data, multifd_send_data_free);
p->packet_len = 0;
+ g_clear_pointer(&p->packet_device_state, g_free);
g_free(p->packet);
p->packet = NULL;
multifd_send_state->ops->send_cleanup(p, errp);
+ assert(!p->iov);
return *errp == NULL;
}
@@ -804,12 +529,12 @@ static void multifd_send_cleanup_state(void)
{
file_cleanup_outgoing_migration();
socket_cleanup_outgoing_migration();
+ multifd_device_state_send_cleanup();
qemu_sem_destroy(&multifd_send_state->channels_created);
qemu_sem_destroy(&multifd_send_state->channels_ready);
+ qemu_mutex_destroy(&multifd_send_state->multifd_send_mutex);
g_free(multifd_send_state->params);
multifd_send_state->params = NULL;
- multifd_pages_clear(multifd_send_state->pages);
- multifd_send_state->pages = NULL;
g_free(multifd_send_state);
multifd_send_state = NULL;
}
@@ -822,6 +547,36 @@ void multifd_send_shutdown(void)
return;
}
+ for (i = 0; i < migrate_multifd_channels(); i++) {
+ MultiFDSendParams *p = &multifd_send_state->params[i];
+
+ /* thread_created implies the TLS handshake has succeeded */
+ if (p->tls_thread_created && p->thread_created) {
+ Error *local_err = NULL;
+ /*
+ * The destination expects the TLS session to always be
+ * properly terminated. This helps to detect a premature
+ * termination in the middle of the stream. Note that
+ * older QEMUs always break the connection on the source
+ * and the destination always sees
+ * GNUTLS_E_PREMATURE_TERMINATION.
+ */
+ migration_tls_channel_end(p->c, &local_err);
+
+ /*
+ * The above can return an error in case the migration has
+ * already failed. If the migration succeeded, errors are
+ * not expected but there's no need to kill the source.
+ */
+ if (local_err && !migration_has_failed(migrate_get_current())) {
+ warn_report(
+ "multifd_send_%d: Failed to terminate TLS connection: %s",
+ p->id, error_get_pretty(local_err));
+ break;
+ }
+ }
+ }
+
multifd_send_terminate_threads();
for (i = 0; i < migrate_multifd_channels(); i++) {
@@ -854,20 +609,12 @@ static int multifd_zero_copy_flush(QIOChannel *c)
return ret;
}
-int multifd_send_sync_main(void)
+int multifd_send_sync_main(MultiFDSyncReq req)
{
int i;
bool flush_zero_copy;
- if (!migrate_multifd()) {
- return 0;
- }
- if (multifd_send_state->pages->num) {
- if (!multifd_send_pages()) {
- error_report("%s: multifd_send_pages fail", __func__);
- return -1;
- }
- }
+ assert(req != MULTIFD_SYNC_NONE);
flush_zero_copy = migrate_zero_copy_send();
@@ -884,8 +631,8 @@ int multifd_send_sync_main(void)
* We should be the only user so far, so not possible to be set by
* others concurrently.
*/
- assert(qatomic_read(&p->pending_sync) == false);
- qatomic_set(&p->pending_sync, true);
+ assert(qatomic_read(&p->pending_sync) == MULTIFD_SYNC_NONE);
+ qatomic_set(&p->pending_sync, req);
qemu_sem_post(&p->sem);
}
for (i = 0; i < migrate_multifd_channels(); i++) {
@@ -937,26 +684,46 @@ static void *multifd_send_thread(void *opaque)
}
/*
- * Read pending_job flag before p->pages. Pairs with the
- * qatomic_store_release() in multifd_send_pages().
+ * Read pending_job flag before p->data. Pairs with the
+ * qatomic_store_release() in multifd_send().
*/
if (qatomic_load_acquire(&p->pending_job)) {
- MultiFDPages_t *pages = p->pages;
+ bool is_device_state = multifd_payload_device_state(p->data);
+ size_t total_size;
+ int write_flags_masked = 0;
+ p->flags = 0;
p->iovs_num = 0;
- assert(pages->num);
+ assert(!multifd_payload_empty(p->data));
- ret = multifd_send_state->ops->send_prepare(p, &local_err);
- if (ret != 0) {
- break;
+ if (is_device_state) {
+ multifd_device_state_send_prepare(p);
+
+ /* Device state packets cannot be sent via zerocopy */
+ write_flags_masked |= QIO_CHANNEL_WRITE_FLAG_ZERO_COPY;
+ } else {
+ ret = multifd_send_state->ops->send_prepare(p, &local_err);
+ if (ret != 0) {
+ break;
+ }
}
+ /*
+ * The packet header in the zerocopy RAM case is accounted for
+ * in multifd_nocomp_send_prepare() - where it is actually
+ * being sent.
+ */
+ total_size = iov_size(p->iov, p->iovs_num);
+
if (migrate_mapped_ram()) {
+ assert(!is_device_state);
+
ret = file_write_ramblock_iov(p->c, p->iov, p->iovs_num,
- p->pages->block, &local_err);
+ &p->data->u.ram, &local_err);
} else {
ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num,
- NULL, 0, p->write_flags,
+ NULL, 0,
+ p->write_flags & ~write_flags_masked,
&local_err);
}
@@ -964,29 +731,29 @@ static void *multifd_send_thread(void *opaque)
break;
}
- stat64_add(&mig_stats.multifd_bytes,
- p->next_packet_size + p->packet_len);
- stat64_add(&mig_stats.normal_pages, pages->normal_num);
- stat64_add(&mig_stats.zero_pages, pages->num - pages->normal_num);
+ stat64_add(&mig_stats.multifd_bytes, total_size);
- multifd_pages_reset(p->pages);
p->next_packet_size = 0;
+ multifd_send_data_clear(p->data);
/*
- * Making sure p->pages is published before saying "we're
+ * Making sure p->data is published before saying "we're
* free". Pairs with the smp_mb_acquire() in
- * multifd_send_pages().
+ * multifd_send().
*/
qatomic_store_release(&p->pending_job, false);
} else {
+ MultiFDSyncReq req = qatomic_read(&p->pending_sync);
+
/*
* If not a normal job, must be a sync request. Note that
* pending_sync is a standalone flag (unlike pending_job), so
* it doesn't require explicit memory barriers.
*/
- assert(qatomic_read(&p->pending_sync));
+ assert(req != MULTIFD_SYNC_NONE);
- if (use_packets) {
+ /* Only push the SYNC message if it involves a remote sync */
+ if (req == MULTIFD_SYNC_ALL) {
p->flags = MULTIFD_FLAG_SYNC;
multifd_send_fill_packet(p);
ret = qio_channel_write_all(p->c, (void *)p->packet,
@@ -996,10 +763,9 @@ static void *multifd_send_thread(void *opaque)
}
/* p->next_packet_size will always be zero for a SYNC packet */
stat64_add(&mig_stats.multifd_bytes, p->packet_len);
- p->flags = 0;
}
- qatomic_set(&p->pending_sync, false);
+ qatomic_set(&p->pending_sync, MULTIFD_SYNC_NONE);
qemu_sem_post(&p->sem_sync);
}
}
@@ -1015,8 +781,7 @@ out:
rcu_unregister_thread();
migration_threads_remove(thread);
- trace_multifd_send_thread_end(p->id, p->packets_sent, p->total_normal_pages,
- p->total_zero_pages);
+ trace_multifd_send_thread_end(p->id, p->packets_sent);
return NULL;
}
@@ -1069,7 +834,7 @@ static bool multifd_tls_channel_connect(MultiFDSendParams *p,
args->p = p;
p->tls_thread_created = true;
- qemu_thread_create(&p->tls_thread, "mig/src/tls",
+ qemu_thread_create(&p->tls_thread, MIGRATION_THREAD_SRC_TLS,
multifd_tls_handshake_thread, args,
QEMU_THREAD_JOINABLE);
return true;
@@ -1156,9 +921,8 @@ static bool multifd_new_send_channel_create(gpointer opaque, Error **errp)
bool multifd_send_setup(void)
{
MigrationState *s = migrate_get_current();
- Error *local_err = NULL;
int thread_count, ret = 0;
- uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
+ uint32_t page_count = multifd_ram_page_count();
bool use_packets = multifd_use_packets();
uint8_t i;
@@ -1169,7 +933,7 @@ bool multifd_send_setup(void)
thread_count = migrate_multifd_channels();
multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
- multifd_send_state->pages = multifd_pages_init(page_count);
+ qemu_mutex_init(&multifd_send_state->multifd_send_mutex);
qemu_sem_init(&multifd_send_state->channels_created, 0);
qemu_sem_init(&multifd_send_state->channels_ready, 0);
qatomic_set(&multifd_send_state->exiting, 0);
@@ -1177,26 +941,27 @@ bool multifd_send_setup(void)
for (i = 0; i < thread_count; i++) {
MultiFDSendParams *p = &multifd_send_state->params[i];
+ Error *local_err = NULL;
qemu_sem_init(&p->sem, 0);
qemu_sem_init(&p->sem_sync, 0);
p->id = i;
- p->pages = multifd_pages_init(page_count);
+ p->data = multifd_send_data_alloc();
if (use_packets) {
p->packet_len = sizeof(MultiFDPacket_t)
+ sizeof(uint64_t) * page_count;
p->packet = g_malloc0(p->packet_len);
- p->packet->magic = cpu_to_be32(MULTIFD_MAGIC);
- p->packet->version = cpu_to_be32(MULTIFD_VERSION);
+ p->packet_device_state = g_malloc0(sizeof(*p->packet_device_state));
+ p->packet_device_state->hdr.magic = cpu_to_be32(MULTIFD_MAGIC);
+ p->packet_device_state->hdr.version = cpu_to_be32(MULTIFD_VERSION);
}
- p->name = g_strdup_printf("mig/src/send_%d", i);
- p->page_size = qemu_target_page_size();
- p->page_count = page_count;
+ p->name = g_strdup_printf(MIGRATION_THREAD_SRC_MULTIFD, i);
p->write_flags = 0;
if (!multifd_new_send_channel_create(p, &local_err)) {
- return false;
+ migrate_set_error(s, local_err);
+ ret = -1;
}
}
@@ -1209,24 +974,30 @@ bool multifd_send_setup(void)
qemu_sem_wait(&multifd_send_state->channels_created);
}
+ if (ret) {
+ goto err;
+ }
+
for (i = 0; i < thread_count; i++) {
MultiFDSendParams *p = &multifd_send_state->params[i];
+ Error *local_err = NULL;
ret = multifd_send_state->ops->send_setup(p, &local_err);
if (ret) {
- break;
+ migrate_set_error(s, local_err);
+ goto err;
}
+ assert(p->iov);
}
- if (ret) {
- migrate_set_error(s, local_err);
- error_report_err(local_err);
- migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
- MIGRATION_STATUS_FAILED);
- return false;
- }
+ multifd_device_state_send_setup();
return true;
+
+err:
+ migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
+ MIGRATION_STATUS_FAILED);
+ return false;
}
bool multifd_recv(void)
@@ -1353,11 +1124,14 @@ static void multifd_recv_cleanup_channel(MultiFDRecvParams *p)
qemu_mutex_destroy(&p->mutex);
qemu_sem_destroy(&p->sem_sync);
qemu_sem_destroy(&p->sem);
+ g_free(p->data);
+ p->data = NULL;
g_free(p->name);
p->name = NULL;
p->packet_len = 0;
g_free(p->packet);
p->packet = NULL;
+ g_clear_pointer(&p->packet_dev_state, g_free);
g_free(p->normal);
p->normal = NULL;
g_free(p->zero);
@@ -1459,8 +1233,37 @@ void multifd_recv_sync_main(void)
trace_multifd_recv_sync_main(multifd_recv_state->packet_num);
}
+static int multifd_device_state_recv(MultiFDRecvParams *p, Error **errp)
+{
+ g_autofree char *dev_state_buf = NULL;
+ int ret;
+
+ dev_state_buf = g_malloc(p->next_packet_size);
+
+ ret = qio_channel_read_all(p->c, dev_state_buf, p->next_packet_size, errp);
+ if (ret != 0) {
+ return ret;
+ }
+
+ if (p->packet_dev_state->idstr[sizeof(p->packet_dev_state->idstr) - 1]
+ != 0) {
+ error_setg(errp, "unterminated multifd device state idstr");
+ return -1;
+ }
+
+ if (!qemu_loadvm_load_state_buffer(p->packet_dev_state->idstr,
+ p->packet_dev_state->instance_id,
+ dev_state_buf, p->next_packet_size,
+ errp)) {
+ ret = -1;
+ }
+
+ return ret;
+}
+
static void *multifd_recv_thread(void *opaque)
{
+ MigrationState *s = migrate_get_current();
MultiFDRecvParams *p = opaque;
Error *local_err = NULL;
bool use_packets = multifd_use_packets();
@@ -1469,19 +1272,65 @@ static void *multifd_recv_thread(void *opaque)
trace_multifd_recv_thread_start(p->id);
rcu_register_thread();
+ if (!s->multifd_clean_tls_termination) {
+ p->read_flags = QIO_CHANNEL_READ_FLAG_RELAXED_EOF;
+ }
+
while (true) {
+ MultiFDPacketHdr_t hdr;
uint32_t flags = 0;
+ bool is_device_state = false;
bool has_data = false;
+ uint8_t *pkt_buf;
+ size_t pkt_len;
+
p->normal_num = 0;
if (use_packets) {
+ struct iovec iov = {
+ .iov_base = (void *)&hdr,
+ .iov_len = sizeof(hdr)
+ };
+
if (multifd_recv_should_exit()) {
break;
}
- ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
- p->packet_len, &local_err);
- if (ret == 0 || ret == -1) { /* 0: EOF -1: Error */
+ ret = qio_channel_readv_full_all_eof(p->c, &iov, 1, NULL, NULL,
+ p->read_flags, &local_err);
+ if (!ret) {
+ /* EOF */
+ assert(!local_err);
+ break;
+ }
+
+ if (ret == -1) {
+ break;
+ }
+
+ ret = multifd_recv_unfill_packet_header(p, &hdr, &local_err);
+ if (ret) {
+ break;
+ }
+
+ is_device_state = p->flags & MULTIFD_FLAG_DEVICE_STATE;
+ if (is_device_state) {
+ pkt_buf = (uint8_t *)p->packet_dev_state + sizeof(hdr);
+ pkt_len = sizeof(*p->packet_dev_state) - sizeof(hdr);
+ } else {
+ pkt_buf = (uint8_t *)p->packet + sizeof(hdr);
+ pkt_len = p->packet_len - sizeof(hdr);
+ }
+
+ ret = qio_channel_read_all_eof(p->c, (char *)pkt_buf, pkt_len,
+ &local_err);
+ if (!ret) {
+ /* EOF */
+ error_setg(&local_err, "multifd: unexpected EOF after packet header");
+ break;
+ }
+
+ if (ret == -1) {
break;
}
@@ -1495,7 +1344,18 @@ static void *multifd_recv_thread(void *opaque)
flags = p->flags;
/* recv methods don't know how to handle the SYNC flag */
p->flags &= ~MULTIFD_FLAG_SYNC;
- has_data = p->normal_num || p->zero_num;
+
+ if (is_device_state) {
+ has_data = p->next_packet_size > 0;
+ } else {
+ /*
+ * Even if it's a SYNC packet, this needs to be set
+ * because older QEMUs (<9.0) still send data along with
+ * the SYNC packet.
+ */
+ has_data = p->normal_num || p->zero_num;
+ }
+
qemu_mutex_unlock(&p->mutex);
} else {
/*
@@ -1524,19 +1384,40 @@ static void *multifd_recv_thread(void *opaque)
}
if (has_data) {
- ret = multifd_recv_state->ops->recv(p, &local_err);
+ /*
+ * multifd thread should not be active and receive data
+ * when migration is in the Postcopy phase. Two threads
+ * writing the same memory area could easily corrupt
+ * the guest state.
+ */
+ assert(!migration_in_postcopy());
+ if (is_device_state) {
+ assert(use_packets);
+ ret = multifd_device_state_recv(p, &local_err);
+ } else {
+ ret = multifd_recv_state->ops->recv(p, &local_err);
+ }
if (ret != 0) {
break;
}
+ } else if (is_device_state) {
+ error_setg(&local_err,
+ "multifd: received empty device state packet");
+ break;
}
if (use_packets) {
if (flags & MULTIFD_FLAG_SYNC) {
+ if (is_device_state) {
+ error_setg(&local_err,
+ "multifd: received SYNC device state packet");
+ break;
+ }
+
qemu_sem_post(&multifd_recv_state->sem_sync);
qemu_sem_wait(&p->sem_sync);
}
} else {
- p->total_normal_pages += p->data->size / qemu_target_page_size();
p->data->size = 0;
/*
* Order data->size update before clearing
@@ -1553,9 +1434,7 @@ static void *multifd_recv_thread(void *opaque)
}
rcu_unregister_thread();
- trace_multifd_recv_thread_end(p->id, p->packets_recved,
- p->total_normal_pages,
- p->total_zero_pages);
+ trace_multifd_recv_thread_end(p->id, p->packets_recved);
return NULL;
}
@@ -1563,7 +1442,7 @@ static void *multifd_recv_thread(void *opaque)
int multifd_recv_setup(Error **errp)
{
int thread_count;
- uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
+ uint32_t page_count = multifd_ram_page_count();
bool use_packets = multifd_use_packets();
uint8_t i;
@@ -1603,12 +1482,11 @@ int multifd_recv_setup(Error **errp)
p->packet_len = sizeof(MultiFDPacket_t)
+ sizeof(uint64_t) * page_count;
p->packet = g_malloc0(p->packet_len);
+ p->packet_dev_state = g_malloc0(sizeof(*p->packet_dev_state));
}
- p->name = g_strdup_printf("mig/dst/recv_%d", i);
+ p->name = g_strdup_printf(MIGRATION_THREAD_DST_MULTIFD, i);
p->normal = g_new0(ram_addr_t, page_count);
p->zero = g_new0(ram_addr_t, page_count);
- p->page_count = page_count;
- p->page_size = qemu_target_page_size();
}
for (i = 0; i < thread_count; i++) {
@@ -1681,17 +1559,3 @@ void multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
QEMU_THREAD_JOINABLE);
qatomic_inc(&multifd_recv_state->count);
}
-
-bool multifd_send_prepare_common(MultiFDSendParams *p)
-{
- multifd_send_zero_page_detect(p);
-
- if (!p->pages->normal_num) {
- p->next_packet_size = 0;
- return false;
- }
-
- multifd_send_prepare_header(p);
-
- return true;
-}
diff --git a/migration/multifd.h b/migration/multifd.h
index 0ecd6f4..9b6d81e 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -13,9 +13,27 @@
#ifndef QEMU_MIGRATION_MULTIFD_H
#define QEMU_MIGRATION_MULTIFD_H
+#include "exec/target_page.h"
#include "ram.h"
typedef struct MultiFDRecvData MultiFDRecvData;
+typedef struct MultiFDSendData MultiFDSendData;
+
+typedef enum {
+ /* No sync request */
+ MULTIFD_SYNC_NONE = 0,
+ /* Sync locally on the sender threads without pushing messages */
+ MULTIFD_SYNC_LOCAL,
+ /*
+ * Sync not only on the sender threads, but also push MULTIFD_FLAG_SYNC
+ * message to the wire for each iochannel (which is for a remote sync).
+ *
+ * When remote sync is used, need to be paired with a follow up
+ * RAM_SAVE_FLAG_EOS / RAM_SAVE_FLAG_MULTIFD_FLUSH message on the main
+ * channel.
+ */
+ MULTIFD_SYNC_ALL,
+} MultiFDSyncReq;
bool multifd_send_setup(void);
void multifd_send_shutdown(void);
@@ -26,22 +44,34 @@ void multifd_recv_shutdown(void);
bool multifd_recv_all_channels_created(void);
void multifd_recv_new_channel(QIOChannel *ioc, Error **errp);
void multifd_recv_sync_main(void);
-int multifd_send_sync_main(void);
+int multifd_send_sync_main(MultiFDSyncReq req);
bool multifd_queue_page(RAMBlock *block, ram_addr_t offset);
bool multifd_recv(void);
MultiFDRecvData *multifd_get_recv_data(void);
+/* Multiple fd's */
+
+#define MULTIFD_MAGIC 0x11223344U
+#define MULTIFD_VERSION 1
+
/* Multifd Compression flags */
#define MULTIFD_FLAG_SYNC (1 << 0)
-/* We reserve 4 bits for compression methods */
-#define MULTIFD_FLAG_COMPRESSION_MASK (0xf << 1)
+/* We reserve 5 bits for compression methods */
+#define MULTIFD_FLAG_COMPRESSION_MASK (0x1f << 1)
/* we need to be compatible. Before compression value was 0 */
#define MULTIFD_FLAG_NOCOMP (0 << 1)
#define MULTIFD_FLAG_ZLIB (1 << 1)
#define MULTIFD_FLAG_ZSTD (2 << 1)
#define MULTIFD_FLAG_QPL (4 << 1)
#define MULTIFD_FLAG_UADK (8 << 1)
+#define MULTIFD_FLAG_QATZIP (16 << 1)
+
+/*
+ * If set it means that this packet contains device state
+ * (MultiFDPacketDeviceState_t), not RAM data (MultiFDPacket_t).
+ */
+#define MULTIFD_FLAG_DEVICE_STATE (32 << 1)
/* This value needs to be a multiple of qemu_target_page_size() */
#define MULTIFD_PACKET_SIZE (512 * 1024)
@@ -50,6 +80,11 @@ typedef struct {
uint32_t magic;
uint32_t version;
uint32_t flags;
+} __attribute__((packed)) MultiFDPacketHdr_t;
+
+typedef struct {
+ MultiFDPacketHdr_t hdr;
+
/* maximum number of allocated pages */
uint32_t pages_alloc;
/* non zero pages */
@@ -71,15 +106,27 @@ typedef struct {
} __attribute__((packed)) MultiFDPacket_t;
typedef struct {
+ MultiFDPacketHdr_t hdr;
+
+ char idstr[256];
+ uint32_t instance_id;
+
+ /* size of the next packet that contains the actual data */
+ uint32_t next_packet_size;
+} __attribute__((packed)) MultiFDPacketDeviceState_t;
+
+typedef struct {
/* number of used pages */
uint32_t num;
/* number of normal pages */
uint32_t normal_num;
- /* number of allocated pages */
- uint32_t allocated;
- /* offset of each page */
- ram_addr_t *offset;
+ /*
+ * Pointer to the ramblock. NOTE: it's caller's responsibility to make
+ * sure the pointer is always valid!
+ */
RAMBlock *block;
+ /* offset array of each page, managed by multifd */
+ ram_addr_t *offset;
} MultiFDPages_t;
struct MultiFDRecvData {
@@ -90,6 +137,48 @@ struct MultiFDRecvData {
};
typedef struct {
+ char *idstr;
+ uint32_t instance_id;
+ char *buf;
+ size_t buf_len;
+} MultiFDDeviceState_t;
+
+typedef enum {
+ MULTIFD_PAYLOAD_NONE,
+ MULTIFD_PAYLOAD_RAM,
+ MULTIFD_PAYLOAD_DEVICE_STATE,
+} MultiFDPayloadType;
+
+typedef struct MultiFDPayload {
+ MultiFDPages_t ram;
+ MultiFDDeviceState_t device_state;
+} MultiFDPayload;
+
+struct MultiFDSendData {
+ MultiFDPayloadType type;
+ MultiFDPayload u;
+};
+
+static inline bool multifd_payload_empty(MultiFDSendData *data)
+{
+ return data->type == MULTIFD_PAYLOAD_NONE;
+}
+
+static inline bool multifd_payload_device_state(MultiFDSendData *data)
+{
+ return data->type == MULTIFD_PAYLOAD_DEVICE_STATE;
+}
+
+static inline void multifd_set_payload_type(MultiFDSendData *data,
+ MultiFDPayloadType type)
+{
+ assert(multifd_payload_empty(data));
+ assert(type != MULTIFD_PAYLOAD_NONE);
+
+ data->type = type;
+}
+
+typedef struct {
/* Fields are only written at creating/deletion time */
/* No lock required for them, they are read only */
@@ -106,10 +195,6 @@ typedef struct {
QIOChannel *c;
/* packet allocated len */
uint32_t packet_len;
- /* guest page size */
- uint32_t page_size;
- /* number of pages in a full packet */
- uint32_t page_count;
/* multifd flags for sending ram */
int write_flags;
@@ -121,7 +206,7 @@ typedef struct {
/* multifd flags for each packet */
uint32_t flags;
/*
- * The sender thread has work to do if either of below boolean is set.
+ * The sender thread has work to do if either of below field is set.
*
* @pending_job: a job is pending
* @pending_sync: a sync request is pending
@@ -130,26 +215,19 @@ typedef struct {
* cleared by the multifd sender threads.
*/
bool pending_job;
- bool pending_sync;
- /* array of pages to sent.
- * The owner of 'pages' depends of 'pending_job' value:
- * pending_job == 0 -> migration_thread can use it.
- * pending_job != 0 -> multifd_channel can use it.
- */
- MultiFDPages_t *pages;
+ MultiFDSyncReq pending_sync;
+
+ MultiFDSendData *data;
/* thread local variables. No locking required */
- /* pointer to the packet */
+ /* pointers to the possible packet types */
MultiFDPacket_t *packet;
+ MultiFDPacketDeviceState_t *packet_device_state;
/* size of the next packet that contains pages */
uint32_t next_packet_size;
/* packets sent through this channel */
uint64_t packets_sent;
- /* non zero pages sent through this channel */
- uint64_t total_normal_pages;
- /* zero pages sent through this channel */
- uint64_t total_zero_pages;
/* buffers to send */
struct iovec *iov;
/* number of iovs used */
@@ -173,10 +251,6 @@ typedef struct {
QIOChannel *c;
/* packet allocated len */
uint32_t packet_len;
- /* guest page size */
- uint32_t page_size;
- /* number of pages in a full packet */
- uint32_t page_count;
/* syncs main thread and channels */
QemuSemaphore sem_sync;
@@ -196,8 +270,9 @@ typedef struct {
/* thread local variables. No locking required */
- /* pointer to the packet */
+ /* pointers to the possible packet types */
MultiFDPacket_t *packet;
+ MultiFDPacketDeviceState_t *packet_dev_state;
/* size of the next packet that contains pages */
uint32_t next_packet_size;
/* packets received through this channel */
@@ -206,10 +281,6 @@ typedef struct {
RAMBlock *block;
/* ramblock host address */
uint8_t *host;
- /* non zero pages recv through this channel */
- uint64_t total_normal_pages;
- /* zero pages recv through this channel */
- uint64_t total_zero_pages;
/* buffers to recv */
struct iovec *iov;
/* Pages that are not zero */
@@ -222,36 +293,126 @@ typedef struct {
uint32_t zero_num;
/* used for de-compression methods */
void *compress_data;
+ /* Flags for the QIOChannel */
+ int read_flags;
} MultiFDRecvParams;
typedef struct {
- /* Setup for sending side */
+ /*
+ * The send_setup, send_cleanup, send_prepare are only called on
+ * the QEMU instance at the migration source.
+ */
+
+ /*
+ * Setup for sending side. Called once per channel during channel
+ * setup phase.
+ *
+ * Must allocate p->iov. If packets are in use (default), one
+ * extra iovec must be allocated for the packet header. Any memory
+ * allocated in this hook must be released at send_cleanup.
+ *
+ * p->write_flags may be used for passing flags to the QIOChannel.
+ *
+ * p->compression_data may be used by compression methods to store
+ * compression data.
+ */
int (*send_setup)(MultiFDSendParams *p, Error **errp);
- /* Cleanup for sending side */
+
+ /*
+ * Cleanup for sending side. Called once per channel during
+ * channel cleanup phase.
+ */
void (*send_cleanup)(MultiFDSendParams *p, Error **errp);
- /* Prepare the send packet */
+
+ /*
+ * Prepare the send packet. Called as a result of multifd_send()
+ * on the client side, with p pointing to the MultiFDSendParams of
+ * a channel that is currently idle.
+ *
+ * Must populate p->iov with the data to be sent, increment
+ * p->iovs_num to match the amount of iovecs used and set
+ * p->next_packet_size with the amount of data currently present
+ * in p->iov.
+ *
+ * Must indicate whether this is a compression packet by setting
+ * p->flags.
+ *
+ * As a last step, if packets are in use (default), must prepare
+ * the packet by calling multifd_send_fill_packet().
+ */
int (*send_prepare)(MultiFDSendParams *p, Error **errp);
- /* Setup for receiving side */
+
+ /*
+ * The recv_setup, recv_cleanup, recv are only called on the QEMU
+ * instance at the migration destination.
+ */
+
+ /*
+ * Setup for receiving side. Called once per channel during
+ * channel setup phase. May be empty.
+ *
+ * May allocate data structures for the receiving of data. May use
+ * p->iov. Compression methods may use p->compress_data.
+ */
int (*recv_setup)(MultiFDRecvParams *p, Error **errp);
- /* Cleanup for receiving side */
+
+ /*
+ * Cleanup for receiving side. Called once per channel during
+ * channel cleanup phase. May be empty.
+ */
void (*recv_cleanup)(MultiFDRecvParams *p);
- /* Read all data */
+
+ /*
+ * Data receive method. Called as a result of multifd_recv() on
+ * the client side, with p pointing to the MultiFDRecvParams of a
+ * channel that is currently idle. Only called if there is data
+ * available to receive.
+ *
+ * Must validate p->flags according to what was set at
+ * send_prepare.
+ *
+ * Must read the data from the QIOChannel p->c.
+ */
int (*recv)(MultiFDRecvParams *p, Error **errp);
} MultiFDMethods;
-void multifd_register_ops(int method, MultiFDMethods *ops);
+void multifd_register_ops(int method, const MultiFDMethods *ops);
void multifd_send_fill_packet(MultiFDSendParams *p);
bool multifd_send_prepare_common(MultiFDSendParams *p);
void multifd_send_zero_page_detect(MultiFDSendParams *p);
void multifd_recv_zero_page_process(MultiFDRecvParams *p);
-static inline void multifd_send_prepare_header(MultiFDSendParams *p)
+void multifd_channel_connect(MultiFDSendParams *p, QIOChannel *ioc);
+bool multifd_send(MultiFDSendData **send_data);
+MultiFDSendData *multifd_send_data_alloc(void);
+void multifd_send_data_clear(MultiFDSendData *data);
+void multifd_send_data_free(MultiFDSendData *data);
+
+static inline uint32_t multifd_ram_page_size(void)
{
- p->iov[0].iov_len = p->packet_len;
- p->iov[0].iov_base = p->packet;
- p->iovs_num++;
+ return qemu_target_page_size();
}
-void multifd_channel_connect(MultiFDSendParams *p, QIOChannel *ioc);
+static inline uint32_t multifd_ram_page_count(void)
+{
+ return MULTIFD_PACKET_SIZE / qemu_target_page_size();
+}
+
+void multifd_ram_save_setup(void);
+void multifd_ram_save_cleanup(void);
+int multifd_ram_flush_and_sync(QEMUFile *f);
+bool multifd_ram_sync_per_round(void);
+bool multifd_ram_sync_per_section(void);
+void multifd_ram_payload_alloc(MultiFDPages_t *pages);
+void multifd_ram_payload_free(MultiFDPages_t *pages);
+void multifd_ram_fill_packet(MultiFDSendParams *p);
+int multifd_ram_unfill_packet(MultiFDRecvParams *p, Error **errp);
+
+void multifd_send_data_clear_device_state(MultiFDDeviceState_t *device_state);
+
+void multifd_device_state_send_setup(void);
+void multifd_device_state_send_cleanup(void);
+
+void multifd_device_state_send_prepare(MultiFDSendParams *p);
#endif
diff --git a/migration/options.c b/migration/options.c
index 645f550..162c72c 100644
--- a/migration/options.c
+++ b/migration/options.c
@@ -19,16 +19,17 @@
#include "qapi/qapi-commands-migration.h"
#include "qapi/qapi-visit-migration.h"
#include "qapi/qmp/qerror.h"
-#include "qapi/qmp/qnull.h"
-#include "sysemu/runstate.h"
+#include "qobject/qnull.h"
+#include "system/runstate.h"
#include "migration/colo.h"
+#include "migration/cpr.h"
#include "migration/misc.h"
#include "migration.h"
#include "migration-stats.h"
#include "qemu-file.h"
#include "ram.h"
#include "options.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
/* Maximum migrate downtime set to 2000 seconds */
#define MAX_MIGRATE_DOWNTIME_SECONDS 2000
@@ -55,6 +56,13 @@
#define DEFAULT_MIGRATE_MULTIFD_COMPRESSION MULTIFD_COMPRESSION_NONE
/* 0: means nocompress, 1: best speed, ... 9: best compress ratio */
#define DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL 1
+/*
+ * 1: best speed, ... 9: best compress ratio
+ * There is some nuance here. Refer to QATzip documentation to understand
+ * the mapping of QATzip levels to standard deflate levels.
+ */
+#define DEFAULT_MIGRATE_MULTIFD_QATZIP_LEVEL 1
+
/* 0: means nocompress, 1: best speed, ... 20: best compress ratio */
#define DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL 1
@@ -78,19 +86,23 @@
#define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT_PERIOD 1000 /* milliseconds */
#define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT 1 /* MB/s */
-Property migration_properties[] = {
+const Property migration_properties[] = {
DEFINE_PROP_BOOL("store-global-state", MigrationState,
store_global_state, true),
DEFINE_PROP_BOOL("send-configuration", MigrationState,
send_configuration, true),
DEFINE_PROP_BOOL("send-section-footer", MigrationState,
send_section_footer, true),
+ DEFINE_PROP_BOOL("send-switchover-start", MigrationState,
+ send_switchover_start, true),
DEFINE_PROP_BOOL("multifd-flush-after-each-section", MigrationState,
multifd_flush_after_each_section, false),
DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState,
clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT),
DEFINE_PROP_BOOL("x-preempt-pre-7-2", MigrationState,
preempt_pre_7_2, false),
+ DEFINE_PROP_BOOL("multifd-clean-tls-termination", MigrationState,
+ multifd_clean_tls_termination, true),
/* Migration parameters */
DEFINE_PROP_UINT8("x-throttle-trigger-threshold", MigrationState,
@@ -123,6 +135,9 @@ Property migration_properties[] = {
DEFINE_PROP_UINT8("multifd-zlib-level", MigrationState,
parameters.multifd_zlib_level,
DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL),
+ DEFINE_PROP_UINT8("multifd-qatzip-level", MigrationState,
+ parameters.multifd_qatzip_level,
+ DEFAULT_MIGRATE_MULTIFD_QATZIP_LEVEL),
DEFINE_PROP_UINT8("multifd-zstd-level", MigrationState,
parameters.multifd_zstd_level,
DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL),
@@ -186,8 +201,8 @@ Property migration_properties[] = {
MIGRATION_CAPABILITY_SWITCHOVER_ACK),
DEFINE_PROP_MIG_CAP("x-dirty-limit", MIGRATION_CAPABILITY_DIRTY_LIMIT),
DEFINE_PROP_MIG_CAP("mapped-ram", MIGRATION_CAPABILITY_MAPPED_RAM),
- DEFINE_PROP_END_OF_LIST(),
};
+const size_t migration_properties_count = ARRAY_SIZE(migration_properties);
bool migrate_auto_converge(void)
{
@@ -196,6 +211,13 @@ bool migrate_auto_converge(void)
return s->capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
}
+bool migrate_send_switchover_start(void)
+{
+ MigrationState *s = migrate_get_current();
+
+ return s->send_switchover_start;
+}
+
bool migrate_background_snapshot(void)
{
MigrationState *s = migrate_get_current();
@@ -329,13 +351,6 @@ bool migrate_xbzrle(void)
return s->capabilities[MIGRATION_CAPABILITY_XBZRLE];
}
-bool migrate_zero_blocks(void)
-{
- MigrationState *s = migrate_get_current();
-
- return s->capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
-}
-
bool migrate_zero_copy_send(void)
{
MigrationState *s = migrate_get_current();
@@ -433,6 +448,24 @@ static bool migrate_incoming_started(void)
return !!migration_incoming_get_current()->transport_data;
}
+bool migrate_rdma_caps_check(bool *caps, Error **errp)
+{
+ if (caps[MIGRATION_CAPABILITY_XBZRLE]) {
+ error_setg(errp, "RDMA and XBZRLE can't be used together");
+ return false;
+ }
+ if (caps[MIGRATION_CAPABILITY_MULTIFD]) {
+ error_setg(errp, "RDMA and multifd can't be used together");
+ return false;
+ }
+ if (caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
+ error_setg(errp, "RDMA and postcopy-ram can't be used together");
+ return false;
+ }
+
+ return true;
+}
+
/**
* @migration_caps_check - check capability compatibility
*
@@ -447,6 +480,10 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
ERRP_GUARD();
MigrationIncomingState *mis = migration_incoming_get_current();
+ if (new_caps[MIGRATION_CAPABILITY_ZERO_BLOCKS]) {
+ warn_report("zero-blocks capability is deprecated");
+ }
+
#ifndef CONFIG_REPLICATION
if (new_caps[MIGRATION_CAPABILITY_X_COLO]) {
error_setg(errp, "QEMU compiled without replication module"
@@ -472,11 +509,6 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
error_setg(errp, "Postcopy is not compatible with ignore-shared");
return false;
}
-
- if (new_caps[MIGRATION_CAPABILITY_MULTIFD]) {
- error_setg(errp, "Postcopy is not yet compatible with multifd");
- return false;
- }
}
if (new_caps[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]) {
@@ -536,7 +568,7 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
return false;
}
- if (migrate_incoming_started()) {
+ if (!migrate_postcopy_preempt() && migrate_incoming_started()) {
error_setg(errp,
"Postcopy preempt must be set before incoming starts");
return false;
@@ -544,7 +576,7 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
}
if (new_caps[MIGRATION_CAPABILITY_MULTIFD]) {
- if (migrate_incoming_started()) {
+ if (!migrate_multifd() && migrate_incoming_started()) {
error_setg(errp, "Multifd must be set before incoming starts");
return false;
}
@@ -592,26 +624,13 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
}
}
- return true;
-}
-
-bool migrate_cap_set(int cap, bool value, Error **errp)
-{
- MigrationState *s = migrate_get_current();
- bool new_caps[MIGRATION_CAPABILITY__MAX];
-
- if (migration_is_running()) {
- error_setg(errp, "There's a migration process in progress");
- return false;
- }
-
- memcpy(new_caps, s->capabilities, sizeof(new_caps));
- new_caps[cap] = value;
-
- if (!migrate_caps_check(s->capabilities, new_caps, errp)) {
+ /*
+ * On destination side, check the cases that capability is being set
+ * after incoming thread has started.
+ */
+ if (migrate_rdma() && !migrate_rdma_caps_check(new_caps, errp)) {
return false;
}
- s->capabilities[cap] = value;
return true;
}
@@ -758,8 +777,11 @@ uint64_t migrate_max_postcopy_bandwidth(void)
MigMode migrate_mode(void)
{
- MigrationState *s = migrate_get_current();
- MigMode mode = s->parameters.mode;
+ MigMode mode = cpr_get_incoming_mode();
+
+ if (mode == MIG_MODE_NONE) {
+ mode = migrate_get_current()->parameters.mode;
+ }
assert(mode >= 0 && mode < MIG_MODE__MAX);
return mode;
@@ -787,6 +809,13 @@ int migrate_multifd_zlib_level(void)
return s->parameters.multifd_zlib_level;
}
+int migrate_multifd_qatzip_level(void)
+{
+ MigrationState *s = migrate_get_current();
+
+ return s->parameters.multifd_qatzip_level;
+}
+
int migrate_multifd_zstd_level(void)
{
MigrationState *s = migrate_get_current();
@@ -892,6 +921,8 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp)
params->multifd_compression = s->parameters.multifd_compression;
params->has_multifd_zlib_level = true;
params->multifd_zlib_level = s->parameters.multifd_zlib_level;
+ params->has_multifd_qatzip_level = true;
+ params->multifd_qatzip_level = s->parameters.multifd_qatzip_level;
params->has_multifd_zstd_level = true;
params->multifd_zstd_level = s->parameters.multifd_zstd_level;
params->has_xbzrle_cache_size = true;
@@ -946,6 +977,7 @@ void migrate_params_init(MigrationParameters *params)
params->has_multifd_channels = true;
params->has_multifd_compression = true;
params->has_multifd_zlib_level = true;
+ params->has_multifd_qatzip_level = true;
params->has_multifd_zstd_level = true;
params->has_xbzrle_cache_size = true;
params->has_max_postcopy_bandwidth = true;
@@ -1038,6 +1070,14 @@ bool migrate_params_check(MigrationParameters *params, Error **errp)
return false;
}
+ if (params->has_multifd_qatzip_level &&
+ ((params->multifd_qatzip_level > 9) ||
+ (params->multifd_qatzip_level < 1))) {
+ error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_qatzip_level",
+ "a value between 1 and 9");
+ return false;
+ }
+
if (params->has_multifd_zstd_level &&
(params->multifd_zstd_level > 20)) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zstd_level",
@@ -1173,6 +1213,11 @@ static void migrate_params_test_apply(MigrateSetParameters *params,
dest->tls_hostname = params->tls_hostname->u.s;
}
+ if (params->tls_authz) {
+ assert(params->tls_authz->type == QTYPE_QSTRING);
+ dest->tls_authz = params->tls_authz->u.s;
+ }
+
if (params->has_max_bandwidth) {
dest->max_bandwidth = params->max_bandwidth;
}
@@ -1195,6 +1240,9 @@ static void migrate_params_test_apply(MigrateSetParameters *params,
if (params->has_multifd_compression) {
dest->multifd_compression = params->multifd_compression;
}
+ if (params->has_multifd_qatzip_level) {
+ dest->multifd_qatzip_level = params->multifd_qatzip_level;
+ }
if (params->has_multifd_zlib_level) {
dest->multifd_zlib_level = params->multifd_zlib_level;
}
@@ -1315,6 +1363,9 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp)
if (params->has_multifd_compression) {
s->parameters.multifd_compression = params->multifd_compression;
}
+ if (params->has_multifd_qatzip_level) {
+ s->parameters.multifd_qatzip_level = params->multifd_qatzip_level;
+ }
if (params->has_multifd_zlib_level) {
s->parameters.multifd_zlib_level = params->multifd_zlib_level;
}
diff --git a/migration/options.h b/migration/options.h
index a239702..82d8397 100644
--- a/migration/options.h
+++ b/migration/options.h
@@ -20,7 +20,8 @@
/* migration properties */
-extern Property migration_properties[];
+extern const Property migration_properties[];
+extern const size_t migration_properties_count;
/* capabilities */
@@ -40,7 +41,6 @@ bool migrate_release_ram(void);
bool migrate_return_path(void);
bool migrate_validate_uuid(void);
bool migrate_xbzrle(void);
-bool migrate_zero_blocks(void);
bool migrate_zero_copy_send(void);
/*
@@ -57,8 +57,8 @@ bool migrate_tls(void);
/* capabilities helpers */
+bool migrate_rdma_caps_check(bool *caps, Error **errp);
bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp);
-bool migrate_cap_set(int cap, bool value, Error **errp);
/* parameters */
@@ -78,6 +78,7 @@ uint64_t migrate_max_postcopy_bandwidth(void);
int migrate_multifd_channels(void);
MultiFDCompression migrate_multifd_compression(void);
int migrate_multifd_zlib_level(void);
+int migrate_multifd_qatzip_level(void);
int migrate_multifd_zstd_level(void);
uint8_t migrate_throttle_trigger_threshold(void);
const char *migrate_tls_authz(void);
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index 1c374b7..75fd310 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -27,11 +27,11 @@
#include "qapi/error.h"
#include "qemu/notify.h"
#include "qemu/rcu.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qemu/error-report.h"
#include "trace.h"
#include "hw/boards.h"
-#include "exec/ramblock.h"
+#include "system/ramblock.h"
#include "socket.h"
#include "yank_functions.h"
#include "tls.h"
@@ -90,10 +90,10 @@ void postcopy_thread_create(MigrationIncomingState *mis,
QemuThread *thread, const char *name,
void *(*fn)(void *), int joinable)
{
- qemu_sem_init(&mis->thread_sync_sem, 0);
+ qemu_event_init(&mis->thread_sync_event, false);
qemu_thread_create(thread, name, fn, mis, joinable);
- qemu_sem_wait(&mis->thread_sync_sem);
- qemu_sem_destroy(&mis->thread_sync_sem);
+ qemu_event_wait(&mis->thread_sync_event);
+ qemu_event_destroy(&mis->thread_sync_event);
}
/* Postcopy needs to detect accesses to pages that haven't yet been copied
@@ -651,8 +651,8 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
mis->have_fault_thread = false;
}
- if (enable_mlock) {
- if (os_mlock() < 0) {
+ if (should_mlock(mlock_state)) {
+ if (os_mlock(is_mlock_on_fault(mlock_state)) < 0) {
error_report("mlock: %s", strerror(errno));
/*
* It doesn't feel right to fail at this point, we have a valid
@@ -746,18 +746,10 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd,
RAMBlock *rb)
{
size_t pagesize = qemu_ram_pagesize(rb);
- struct uffdio_range range;
- int ret;
trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb));
- range.start = ROUND_DOWN(client_addr, pagesize);
- range.len = pagesize;
- ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range);
- if (ret) {
- error_report("%s: Failed to wake: %zx in %s (%s)",
- __func__, (size_t)client_addr, qemu_ram_get_idstr(rb),
- strerror(errno));
- }
- return ret;
+ return uffd_wakeup(pcfd->fd,
+ (void *)(uintptr_t)ROUND_DOWN(client_addr, pagesize),
+ pagesize);
}
static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb,
@@ -972,7 +964,7 @@ static void *postcopy_ram_fault_thread(void *opaque)
trace_postcopy_ram_fault_thread_entry();
rcu_register_thread();
mis->last_rb = NULL; /* last RAMBlock we sent part of */
- qemu_sem_post(&mis->thread_sync_sem);
+ qemu_event_set(&mis->thread_sync_event);
struct pollfd *pfd;
size_t pfd_len = 2 + mis->postcopy_remote_fds->len;
@@ -1238,7 +1230,8 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
return -1;
}
- postcopy_thread_create(mis, &mis->fault_thread, "mig/dst/fault",
+ postcopy_thread_create(mis, &mis->fault_thread,
+ MIGRATION_THREAD_DST_FAULT,
postcopy_ram_fault_thread, QEMU_THREAD_JOINABLE);
mis->have_fault_thread = true;
@@ -1258,7 +1251,8 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
* This thread needs to be created after the temp pages because
* it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately.
*/
- postcopy_thread_create(mis, &mis->postcopy_prio_thread, "mig/dst/preempt",
+ postcopy_thread_create(mis, &mis->postcopy_prio_thread,
+ MIGRATION_THREAD_DST_PREEMPT,
postcopy_preempt_thread, QEMU_THREAD_JOINABLE);
mis->preempt_thread_status = PREEMPT_THREAD_CREATED;
}
@@ -1275,18 +1269,10 @@ static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr,
int ret;
if (from_addr) {
- struct uffdio_copy copy_struct;
- copy_struct.dst = (uint64_t)(uintptr_t)host_addr;
- copy_struct.src = (uint64_t)(uintptr_t)from_addr;
- copy_struct.len = pagesize;
- copy_struct.mode = 0;
- ret = ioctl(userfault_fd, UFFDIO_COPY, &copy_struct);
+ ret = uffd_copy_page(userfault_fd, host_addr, from_addr, pagesize,
+ false);
} else {
- struct uffdio_zeropage zero_struct;
- zero_struct.range.start = (uint64_t)(uintptr_t)host_addr;
- zero_struct.range.len = pagesize;
- zero_struct.mode = 0;
- ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct);
+ ret = uffd_zero_page(userfault_fd, host_addr, pagesize, false);
}
if (!ret) {
qemu_mutex_lock(&mis->page_request_mutex);
@@ -1343,18 +1329,16 @@ int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
RAMBlock *rb)
{
size_t pagesize = qemu_ram_pagesize(rb);
+ int e;
/* copy also acks to the kernel waking the stalled thread up
* TODO: We can inhibit that ack and only do it if it was requested
* which would be slightly cheaper, but we'd have to be careful
* of the order of updating our page state.
*/
- if (qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb)) {
- int e = errno;
- error_report("%s: %s copy host: %p from: %p (size: %zd)",
- __func__, strerror(e), host, from, pagesize);
-
- return -e;
+ e = qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb);
+ if (e) {
+ return e;
}
trace_postcopy_place_page(host);
@@ -1376,12 +1360,10 @@ int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
* but it's not available for everything (e.g. hugetlbpages)
*/
if (qemu_ram_is_uf_zeroable(rb)) {
- if (qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb)) {
- int e = errno;
- error_report("%s: %s zero host: %p",
- __func__, strerror(e), host);
-
- return -e;
+ int e;
+ e = qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb);
+ if (e) {
+ return e;
}
return postcopy_notify_shared_wake(rb,
qemu_ram_block_host_offset(rb,
@@ -1411,49 +1393,42 @@ int postcopy_ram_incoming_init(MigrationIncomingState *mis)
int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
{
- assert(0);
- return -1;
+ g_assert_not_reached();
}
int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
{
- assert(0);
- return -1;
+ g_assert_not_reached();
}
int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
uint64_t client_addr, uint64_t rb_offset)
{
- assert(0);
- return -1;
+ g_assert_not_reached();
}
int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
{
- assert(0);
- return -1;
+ g_assert_not_reached();
}
int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
RAMBlock *rb)
{
- assert(0);
- return -1;
+ g_assert_not_reached();
}
int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
RAMBlock *rb)
{
- assert(0);
- return -1;
+ g_assert_not_reached();
}
int postcopy_wake_shared(struct PostCopyFD *pcfd,
uint64_t client_addr,
RAMBlock *rb)
{
- assert(0);
- return -1;
+ g_assert_not_reached();
}
#endif
@@ -1741,7 +1716,7 @@ void *postcopy_preempt_thread(void *opaque)
rcu_register_thread();
- qemu_sem_post(&mis->thread_sync_sem);
+ qemu_event_set(&mis->thread_sync_event);
/*
* The preempt channel is established in asynchronous way. Wait
diff --git a/migration/qemu-file.c b/migration/qemu-file.c
index b6d2f58..b6ac190 100644
--- a/migration/qemu-file.c
+++ b/migration/qemu-file.c
@@ -37,6 +37,11 @@
#define IO_BUF_SIZE 32768
#define MAX_IOV_SIZE MIN_CONST(IOV_MAX, 64)
+typedef struct FdEntry {
+ QTAILQ_ENTRY(FdEntry) entry;
+ int fd;
+} FdEntry;
+
struct QEMUFile {
QIOChannel *ioc;
bool is_writable;
@@ -51,6 +56,9 @@ struct QEMUFile {
int last_error;
Error *last_error_obj;
+
+ bool can_pass_fd;
+ QTAILQ_HEAD(, FdEntry) fds;
};
/*
@@ -109,6 +117,8 @@ static QEMUFile *qemu_file_new_impl(QIOChannel *ioc, bool is_writable)
object_ref(ioc);
f->ioc = ioc;
f->is_writable = is_writable;
+ f->can_pass_fd = qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_FD_PASS);
+ QTAILQ_INIT(&f->fds);
return f;
}
@@ -310,6 +320,10 @@ static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f)
int len;
int pending;
Error *local_error = NULL;
+ g_autofree int *fds = NULL;
+ size_t nfd = 0;
+ int **pfds = f->can_pass_fd ? &fds : NULL;
+ size_t *pnfd = f->can_pass_fd ? &nfd : NULL;
assert(!qemu_file_is_writable(f));
@@ -325,10 +339,9 @@ static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f)
}
do {
- len = qio_channel_read(f->ioc,
- (char *)f->buf + pending,
- IO_BUF_SIZE - pending,
- &local_error);
+ struct iovec iov = { f->buf + pending, IO_BUF_SIZE - pending };
+ len = qio_channel_readv_full(f->ioc, &iov, 1, pfds, pnfd, 0,
+ &local_error);
if (len == QIO_CHANNEL_ERR_BLOCK) {
if (qemu_in_coroutine()) {
qio_channel_yield(f->ioc, G_IO_IN);
@@ -348,9 +361,66 @@ static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f)
qemu_file_set_error_obj(f, len, local_error);
}
+ for (int i = 0; i < nfd; i++) {
+ FdEntry *fde = g_new0(FdEntry, 1);
+ fde->fd = fds[i];
+ QTAILQ_INSERT_TAIL(&f->fds, fde, entry);
+ }
+
return len;
}
+int qemu_file_put_fd(QEMUFile *f, int fd)
+{
+ int ret = 0;
+ QIOChannel *ioc = qemu_file_get_ioc(f);
+ Error *err = NULL;
+ struct iovec iov = { (void *)" ", 1 };
+
+ /*
+ * Send a dummy byte so qemu_fill_buffer on the receiving side does not
+ * fail with a len=0 error. Flush first to maintain ordering wrt other
+ * data.
+ */
+
+ qemu_fflush(f);
+ if (qio_channel_writev_full(ioc, &iov, 1, &fd, 1, 0, &err) < 1) {
+ error_report_err(error_copy(err));
+ qemu_file_set_error_obj(f, -EIO, err);
+ ret = -1;
+ }
+ trace_qemu_file_put_fd(f->ioc->name, fd, ret);
+ return ret;
+}
+
+int qemu_file_get_fd(QEMUFile *f)
+{
+ int fd = -1;
+ FdEntry *fde;
+
+ if (!f->can_pass_fd) {
+ Error *err = NULL;
+ error_setg(&err, "%s does not support fd passing", f->ioc->name);
+ error_report_err(error_copy(err));
+ qemu_file_set_error_obj(f, -EIO, err);
+ goto out;
+ }
+
+ /* Force the dummy byte and its fd passenger to appear. */
+ qemu_peek_byte(f, 0);
+
+ fde = QTAILQ_FIRST(&f->fds);
+ if (fde) {
+ qemu_get_byte(f); /* Drop the dummy byte */
+ fd = fde->fd;
+ QTAILQ_REMOVE(&f->fds, fde, entry);
+ g_free(fde);
+ }
+out:
+ trace_qemu_file_get_fd(f->ioc->name, fd);
+ return fd;
+}
+
/** Closes the file
*
* Returns negative error value if any error happened on previous operations or
@@ -361,11 +431,17 @@ static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f)
*/
int qemu_fclose(QEMUFile *f)
{
+ FdEntry *fde, *next;
int ret = qemu_fflush(f);
int ret2 = qio_channel_close(f->ioc, NULL);
if (ret >= 0) {
ret = ret2;
}
+ QTAILQ_FOREACH_SAFE(fde, &f->fds, entry, next) {
+ warn_report("qemu_fclose: received fd %d was never claimed", fde->fd);
+ close(fde->fd);
+ g_free(fde);
+ }
g_clear_pointer(&f->ioc, object_unref);
error_free(f->last_error_obj);
g_free(f);
@@ -485,8 +561,6 @@ void qemu_put_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen,
}
stat64_add(&mig_stats.qemu_file_transferred, buflen);
-
- return;
}
diff --git a/migration/qemu-file.h b/migration/qemu-file.h
index 11c2120..f5b9f43 100644
--- a/migration/qemu-file.h
+++ b/migration/qemu-file.h
@@ -33,6 +33,8 @@ QEMUFile *qemu_file_new_input(QIOChannel *ioc);
QEMUFile *qemu_file_new_output(QIOChannel *ioc);
int qemu_fclose(QEMUFile *f);
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QEMUFile, qemu_fclose)
+
/*
* qemu_file_transferred:
*
@@ -79,5 +81,7 @@ size_t qemu_get_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen,
off_t pos);
QIOChannel *qemu_file_get_ioc(QEMUFile *file);
+int qemu_file_put_fd(QEMUFile *f, int fd);
+int qemu_file_get_fd(QEMUFile *f);
#endif
diff --git a/migration/ram.c b/migration/ram.c
index edec1a2..2140785 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -48,19 +48,19 @@
#include "qapi/qapi-commands-migration.h"
#include "qapi/qmp/qerror.h"
#include "trace.h"
-#include "exec/ram_addr.h"
+#include "system/ram_addr.h"
#include "exec/target_page.h"
#include "qemu/rcu_queue.h"
#include "migration/colo.h"
-#include "sysemu/cpu-throttle.h"
+#include "system/cpu-throttle.h"
#include "savevm.h"
#include "qemu/iov.h"
#include "multifd.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "rdma.h"
#include "options.h"
-#include "sysemu/dirtylimit.h"
-#include "sysemu/kvm.h"
+#include "system/dirtylimit.h"
+#include "system/kvm.h"
#include "hw/boards.h" /* for machine_dump_guest_core() */
@@ -72,27 +72,6 @@
/* ram save/restore */
/*
- * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
- * worked for pages that were filled with the same char. We switched
- * it to only search for the zero value. And to avoid confusion with
- * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it.
- *
- * RAM_SAVE_FLAG_FULL was obsoleted in 2009.
- *
- * RAM_SAVE_FLAG_COMPRESS_PAGE (0x100) was removed in QEMU 9.1.
- */
-#define RAM_SAVE_FLAG_FULL 0x01
-#define RAM_SAVE_FLAG_ZERO 0x02
-#define RAM_SAVE_FLAG_MEM_SIZE 0x04
-#define RAM_SAVE_FLAG_PAGE 0x08
-#define RAM_SAVE_FLAG_EOS 0x10
-#define RAM_SAVE_FLAG_CONTINUE 0x20
-#define RAM_SAVE_FLAG_XBZRLE 0x40
-/* 0x80 is reserved in rdma.h for RAM_SAVE_FLAG_HOOK */
-#define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200
-/* We can't use any flag that is bigger than 0x200 */
-
-/*
* mapped-ram migration supports O_DIRECT, so we need to make sure the
* userspace buffer, the IO operation size and the file offset are
* aligned according to the underlying device's block size. The first
@@ -112,6 +91,36 @@
XBZRLECacheStats xbzrle_counters;
+/*
+ * This structure locates a specific location of a guest page. In QEMU,
+ * it's described in a tuple of (ramblock, offset).
+ */
+struct PageLocation {
+ RAMBlock *block;
+ unsigned long offset;
+};
+typedef struct PageLocation PageLocation;
+
+/**
+ * PageLocationHint: describes a hint to a page location
+ *
+ * @valid set if the hint is vaild and to be consumed
+ * @location: the hint content
+ *
+ * In postcopy preempt mode, the urgent channel may provide hints to the
+ * background channel, so that QEMU source can try to migrate whatever is
+ * right after the requested urgent pages.
+ *
+ * This is based on the assumption that the VM (already running on the
+ * destination side) tends to access the memory with spatial locality.
+ * This is also the default behavior of vanilla postcopy (preempt off).
+ */
+struct PageLocationHint {
+ bool valid;
+ PageLocation location;
+};
+typedef struct PageLocationHint PageLocationHint;
+
/* used by the search for pages to send */
struct PageSearchStatus {
/* The migration channel used for a specific host page */
@@ -216,7 +225,9 @@ static bool postcopy_preempt_active(void)
bool migrate_ram_is_ignored(RAMBlock *block)
{
+ MigMode mode = migrate_mode();
return !qemu_ram_is_migratable(block) ||
+ mode == MIG_MODE_CPR_TRANSFER ||
(migrate_ignore_shared() && qemu_ram_is_shared(block)
&& qemu_ram_is_named_file(block));
}
@@ -414,6 +425,13 @@ struct RAMState {
* RAM migration.
*/
unsigned int postcopy_bmap_sync_requested;
+ /*
+ * Page hint during postcopy when preempt mode is on. Return path
+ * thread sets it, while background migration thread consumes it.
+ *
+ * Protected by @bitmap_mutex.
+ */
+ PageLocationHint page_hint;
};
typedef struct RAMState RAMState;
@@ -467,13 +485,6 @@ void ram_transferred_add(uint64_t bytes)
}
}
-struct MigrationOps {
- int (*ram_save_target_page)(RAMState *rs, PageSearchStatus *pss);
-};
-typedef struct MigrationOps MigrationOps;
-
-MigrationOps *migration_ops;
-
static int ram_save_host_page_urgent(PageSearchStatus *pss);
/* NOTE: page is the PFN not real ram_addr_t. */
@@ -820,14 +831,22 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
bool ret;
/*
- * Clear dirty bitmap if needed. This _must_ be called before we
- * send any of the page in the chunk because we need to make sure
- * we can capture further page content changes when we sync dirty
- * log the next time. So as long as we are going to send any of
- * the page in the chunk we clear the remote dirty bitmap for all.
- * Clearing it earlier won't be a problem, but too late will.
+ * During the last stage (after source VM stopped), resetting the write
+ * protections isn't needed as we know there will be either (1) no
+ * further writes if migration will complete, or (2) migration fails
+ * at last then tracking isn't needed either.
*/
- migration_clear_memory_region_dirty_bitmap(rb, page);
+ if (!rs->last_stage) {
+ /*
+ * Clear dirty bitmap if needed. This _must_ be called before we
+ * send any of the page in the chunk because we need to make sure
+ * we can capture further page content changes when we sync dirty
+ * log the next time. So as long as we are going to send any of
+ * the page in the chunk we clear the remote dirty bitmap for all.
+ * Clearing it earlier won't be a problem, but too late will.
+ */
+ migration_clear_memory_region_dirty_bitmap(rb, page);
+ }
ret = test_and_clear_bit(page, rb->bmap);
if (ret) {
@@ -837,8 +856,8 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
return ret;
}
-static void dirty_bitmap_clear_section(MemoryRegionSection *section,
- void *opaque)
+static int dirty_bitmap_clear_section(MemoryRegionSection *section,
+ void *opaque)
{
const hwaddr offset = section->offset_within_region;
const hwaddr size = int128_get64(section->size);
@@ -857,6 +876,7 @@ static void dirty_bitmap_clear_section(MemoryRegionSection *section,
}
*cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages);
bitmap_clear(rb->bmap, start, npages);
+ return 0;
}
/*
@@ -1088,9 +1108,10 @@ static void migration_bitmap_sync(RAMState *rs, bool last_stage)
}
}
-static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage)
+void migration_bitmap_sync_precopy(bool last_stage)
{
Error *local_err = NULL;
+ assert(ram_state);
/*
* The current notifier usage is just an optimization to migration, so we
@@ -1101,7 +1122,7 @@ static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage)
local_err = NULL;
}
- migration_bitmap_sync(rs, last_stage);
+ migration_bitmap_sync(ram_state, last_stage);
if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
error_report_err(local_err);
@@ -1169,32 +1190,6 @@ static int save_zero_page(RAMState *rs, PageSearchStatus *pss,
}
/*
- * @pages: the number of pages written by the control path,
- * < 0 - error
- * > 0 - number of pages written
- *
- * Return true if the pages has been saved, otherwise false is returned.
- */
-static bool control_save_page(PageSearchStatus *pss,
- ram_addr_t offset, int *pages)
-{
- int ret;
-
- ret = rdma_control_save_page(pss->pss_channel, pss->block->offset, offset,
- TARGET_PAGE_SIZE);
- if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
- return false;
- }
-
- if (ret == RAM_SAVE_CONTROL_DELAYED) {
- *pages = 1;
- return true;
- }
- *pages = ret;
- return true;
-}
-
-/*
* directly send the page to the stream
*
* Returns the number of pages written.
@@ -1322,19 +1317,12 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
pss->page = 0;
pss->block = QLIST_NEXT_RCU(pss->block, next);
if (!pss->block) {
- if (migrate_multifd() &&
- (!migrate_multifd_flush_after_each_section() ||
- migrate_mapped_ram())) {
+ if (multifd_ram_sync_per_round()) {
QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
- int ret = multifd_send_sync_main();
+ int ret = multifd_ram_flush_and_sync(f);
if (ret < 0) {
return ret;
}
-
- if (!migrate_mapped_ram()) {
- qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
- qemu_fflush(f);
- }
}
/* Hit the end of the list */
@@ -1765,19 +1753,17 @@ bool ram_write_tracking_available(void)
bool ram_write_tracking_compatible(void)
{
- assert(0);
- return false;
+ g_assert_not_reached();
}
int ram_write_tracking_start(void)
{
- assert(0);
- return -1;
+ g_assert_not_reached();
}
void ram_write_tracking_stop(void)
{
- assert(0);
+ g_assert_not_reached();
}
#endif /* defined(__linux__) */
@@ -1795,7 +1781,7 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
{
RAMBlock *block;
ram_addr_t offset;
- bool dirty;
+ bool dirty = false;
do {
block = unqueue_page(rs, &offset);
@@ -1987,53 +1973,40 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len,
}
/**
- * ram_save_target_page_legacy: save one target page
- *
- * Returns the number of pages written
+ * ram_save_target_page: save one target page to the precopy thread
+ * OR to multifd workers.
*
* @rs: current RAM state
* @pss: data about the page we want to send
*/
-static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
+static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
{
ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
int res;
- if (control_save_page(pss, offset, &res)) {
- return res;
- }
+ /* Hand over to RDMA first */
+ if (migrate_rdma()) {
+ res = rdma_control_save_page(pss->pss_channel, pss->block->offset,
+ offset, TARGET_PAGE_SIZE);
- if (save_zero_page(rs, pss, offset)) {
- return 1;
+ if (res == RAM_SAVE_CONTROL_DELAYED) {
+ res = 1;
+ }
+ return res;
}
- return ram_save_page(rs, pss);
-}
-
-/**
- * ram_save_target_page_multifd: send one target page to multifd workers
- *
- * Returns 1 if the page was queued, -1 otherwise.
- *
- * @rs: current RAM state
- * @pss: data about the page we want to send
- */
-static int ram_save_target_page_multifd(RAMState *rs, PageSearchStatus *pss)
-{
- RAMBlock *block = pss->block;
- ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
-
- /*
- * While using multifd live migration, we still need to handle zero
- * page checking on the migration main thread.
- */
- if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) {
+ if (!migrate_multifd()
+ || migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) {
if (save_zero_page(rs, pss, offset)) {
return 1;
}
}
- return ram_save_multifd_page(block, offset);
+ if (migrate_multifd() && !migration_in_postcopy()) {
+ return ram_save_multifd_page(pss->block, offset);
+ }
+
+ return ram_save_page(rs, pss);
}
/* Should be called before sending a host page */
@@ -2091,6 +2064,21 @@ static void pss_host_page_finish(PageSearchStatus *pss)
pss->host_page_start = pss->host_page_end = 0;
}
+static void ram_page_hint_update(RAMState *rs, PageSearchStatus *pss)
+{
+ PageLocationHint *hint = &rs->page_hint;
+
+ /* If there's a pending hint not consumed, don't bother */
+ if (hint->valid) {
+ return;
+ }
+
+ /* Provide a hint to the background stream otherwise */
+ hint->location.block = pss->block;
+ hint->location.offset = pss->page;
+ hint->valid = true;
+}
+
/*
* Send an urgent host page specified by `pss'. Need to be called with
* bitmap_mutex held.
@@ -2122,7 +2110,7 @@ static int ram_save_host_page_urgent(PageSearchStatus *pss)
if (page_dirty) {
/* Be strict to return code; it must be 1, or what else? */
- if (migration_ops->ram_save_target_page(rs, pss) != 1) {
+ if (ram_save_target_page(rs, pss) != 1) {
error_report_once("%s: ram_save_target_page failed", __func__);
ret = -1;
goto out;
@@ -2136,6 +2124,7 @@ out:
/* For urgent requests, flush immediately if sent */
if (sent) {
qemu_fflush(pss->pss_channel);
+ ram_page_hint_update(rs, pss);
}
return ret;
}
@@ -2191,7 +2180,7 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
if (preempt_active) {
qemu_mutex_unlock(&rs->bitmap_mutex);
}
- tmppages = migration_ops->ram_save_target_page(rs, pss);
+ tmppages = ram_save_target_page(rs, pss);
if (tmppages >= 0) {
pages += tmppages;
/*
@@ -2223,6 +2212,30 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
return (res < 0 ? res : pages);
}
+static bool ram_page_hint_valid(RAMState *rs)
+{
+ /* There's only page hint during postcopy preempt mode */
+ if (!postcopy_preempt_active()) {
+ return false;
+ }
+
+ return rs->page_hint.valid;
+}
+
+static void ram_page_hint_collect(RAMState *rs, RAMBlock **block,
+ unsigned long *page)
+{
+ PageLocationHint *hint = &rs->page_hint;
+
+ assert(hint->valid);
+
+ *block = hint->location.block;
+ *page = hint->location.offset;
+
+ /* Mark the hint consumed */
+ hint->valid = false;
+}
+
/**
* ram_find_and_save_block: finds a dirty page and sends it to f
*
@@ -2239,6 +2252,8 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
static int ram_find_and_save_block(RAMState *rs)
{
PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
+ unsigned long next_page;
+ RAMBlock *next_block;
int pages = 0;
/* No dirty page as there is zero RAM */
@@ -2258,7 +2273,14 @@ static int ram_find_and_save_block(RAMState *rs)
rs->last_page = 0;
}
- pss_init(pss, rs->last_seen_block, rs->last_page);
+ if (ram_page_hint_valid(rs)) {
+ ram_page_hint_collect(rs, &next_block, &next_page);
+ } else {
+ next_block = rs->last_seen_block;
+ next_page = rs->last_page;
+ }
+
+ pss_init(pss, next_block, next_page);
while (true){
if (!get_queued_page(rs, pss)) {
@@ -2387,9 +2409,15 @@ static void ram_save_cleanup(void *opaque)
ram_bitmaps_destroy();
xbzrle_cleanup();
+ multifd_ram_save_cleanup();
ram_state_cleanup(rsp);
- g_free(migration_ops);
- migration_ops = NULL;
+}
+
+static void ram_page_hint_reset(PageLocationHint *hint)
+{
+ hint->location.block = NULL;
+ hint->location.offset = 0;
+ hint->valid = false;
}
static void ram_state_reset(RAMState *rs)
@@ -2404,6 +2432,8 @@ static void ram_state_reset(RAMState *rs)
rs->last_page = 0;
rs->last_version = ram_list.version;
rs->xbzrle_started = false;
+
+ ram_page_hint_reset(&rs->page_hint);
}
#define MAX_WAIT 50 /* ms, half buffered_file limit */
@@ -2783,7 +2813,7 @@ static bool ram_init_bitmaps(RAMState *rs, Error **errp)
if (!ret) {
goto out_unlock;
}
- migration_bitmap_sync_precopy(rs, false);
+ migration_bitmap_sync_precopy(false);
}
}
out_unlock:
@@ -2860,7 +2890,7 @@ void qemu_guest_free_page_hint(void *addr, size_t len)
size_t used_len, start, npages;
/* This function is currently expected to be used during live migration */
- if (!migration_is_setup_or_active()) {
+ if (!migration_is_running()) {
return;
}
@@ -3055,27 +3085,43 @@ static int ram_save_setup(QEMUFile *f, void *opaque, Error **errp)
return ret;
}
- migration_ops = g_malloc0(sizeof(MigrationOps));
-
if (migrate_multifd()) {
- migration_ops->ram_save_target_page = ram_save_target_page_multifd;
- } else {
- migration_ops->ram_save_target_page = ram_save_target_page_legacy;
+ multifd_ram_save_setup();
}
+ /*
+ * This operation is unfortunate..
+ *
+ * For legacy QEMUs using per-section sync
+ * =======================================
+ *
+ * This must exist because the EOS below requires the SYNC messages
+ * per-channel to work.
+ *
+ * For modern QEMUs using per-round sync
+ * =====================================
+ *
+ * Logically such sync is not needed, and recv threads should not run
+ * until setup ready (using things like channels_ready on src). Then
+ * we should be all fine.
+ *
+ * However even if we add channels_ready to recv side in new QEMUs, old
+ * QEMU won't have them so this sync will still be needed to make sure
+ * multifd recv threads won't start processing guest pages early before
+ * ram_load_setup() is properly done.
+ *
+ * Let's stick with this. Fortunately the overhead is low to sync
+ * during setup because the VM is running, so at least it's not
+ * accounted as part of downtime.
+ */
bql_unlock();
- ret = multifd_send_sync_main();
+ ret = multifd_ram_flush_and_sync(f);
bql_lock();
if (ret < 0) {
error_setg(errp, "%s: multifd synchronization failed", __func__);
return ret;
}
- if (migrate_multifd() && !migrate_multifd_flush_after_each_section()
- && !migrate_mapped_ram()) {
- qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
- }
-
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
ret = qemu_fflush(f);
if (ret < 0) {
@@ -3207,11 +3253,9 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
}
out:
- if (ret >= 0
- && migration_is_setup_or_active()) {
- if (migrate_multifd() && migrate_multifd_flush_after_each_section() &&
- !migrate_mapped_ram()) {
- ret = multifd_send_sync_main();
+ if (ret >= 0 && migration_is_running()) {
+ if (multifd_ram_sync_per_section()) {
+ ret = multifd_ram_flush_and_sync(f);
if (ret < 0) {
return ret;
}
@@ -3248,7 +3292,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
WITH_RCU_READ_LOCK_GUARD() {
if (!migration_in_postcopy()) {
- migration_bitmap_sync_precopy(rs, true);
+ migration_bitmap_sync_precopy(true);
}
ret = rdma_registration_start(f, RAM_CONTROL_FINISH);
@@ -3283,9 +3327,15 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
}
}
- ret = multifd_send_sync_main();
- if (ret < 0) {
- return ret;
+ if (multifd_ram_sync_per_section()) {
+ /*
+ * Only the old dest QEMU will need this sync, because each EOS
+ * will require one SYNC message on each channel.
+ */
+ ret = multifd_ram_flush_and_sync(f);
+ if (ret < 0) {
+ return ret;
+ }
}
if (migrate_mapped_ram()) {
@@ -3330,7 +3380,7 @@ static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy,
if (!migration_in_postcopy()) {
bql_lock();
WITH_RCU_READ_LOCK_GUARD() {
- migration_bitmap_sync_precopy(rs, false);
+ migration_bitmap_sync_precopy(false);
}
bql_unlock();
}
@@ -3631,7 +3681,9 @@ static int ram_load_cleanup(void *opaque)
RAMBlock *rb;
RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
- qemu_ram_block_writeback(rb);
+ if (memory_region_is_nonvolatile(rb->mr)) {
+ qemu_ram_block_writeback(rb);
+ }
}
xbzrle_load_cleanup();
@@ -3796,15 +3848,7 @@ int ram_load_postcopy(QEMUFile *f, int channel)
TARGET_PAGE_SIZE);
}
break;
- case RAM_SAVE_FLAG_MULTIFD_FLUSH:
- multifd_recv_sync_main();
- break;
case RAM_SAVE_FLAG_EOS:
- /* normal exit */
- if (migrate_multifd() &&
- migrate_multifd_flush_after_each_section()) {
- multifd_recv_sync_main();
- }
break;
default:
error_report("Unknown combination of migration flags: 0x%x"
@@ -4004,8 +4048,6 @@ static void parse_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block,
/* Skip pages array */
qemu_set_offset(f, block->pages_offset + length, SEEK_SET);
-
- return;
}
static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length)
@@ -4294,6 +4336,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
* it will be necessary to reduce the granularity of this
* critical section.
*/
+ trace_ram_load_start();
WITH_RCU_READ_LOCK_GUARD() {
if (postcopy_running) {
/*
@@ -4460,6 +4503,42 @@ static int ram_resume_prepare(MigrationState *s, void *opaque)
return 0;
}
+static bool ram_save_postcopy_prepare(QEMUFile *f, void *opaque, Error **errp)
+{
+ int ret;
+
+ if (migrate_multifd()) {
+ /*
+ * When multifd is enabled, source QEMU needs to make sure all the
+ * pages queued before postcopy starts have been flushed.
+ *
+ * The load of these pages must happen before switching to postcopy.
+ * It's because loading of guest pages (so far) in multifd recv
+ * threads is still non-atomic, so the load cannot happen with vCPUs
+ * running on the destination side.
+ *
+ * This flush and sync will guarantee that those pages are loaded
+ * _before_ postcopy starts on the destination. The rationale is,
+ * this happens before VM stops (and before source QEMU sends all
+ * the rest of the postcopy messages). So when the destination QEMU
+ * receives the postcopy messages, it must have received the sync
+ * message on the main channel (either RAM_SAVE_FLAG_MULTIFD_FLUSH,
+ * or RAM_SAVE_FLAG_EOS), and such message would guarantee that
+ * all previous guest pages queued in the multifd channels are
+ * completely loaded.
+ */
+ ret = multifd_ram_flush_and_sync(f);
+ if (ret < 0) {
+ error_setg(errp, "%s: multifd flush and sync failed", __func__);
+ return false;
+ }
+ }
+
+ qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
+
+ return true;
+}
+
void postcopy_preempt_shutdown_file(MigrationState *s)
{
qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS);
@@ -4479,6 +4558,7 @@ static SaveVMHandlers savevm_ram_handlers = {
.load_setup = ram_load_setup,
.load_cleanup = ram_load_cleanup,
.resume_prepare = ram_resume_prepare,
+ .save_postcopy_prepare = ram_save_postcopy_prepare,
};
static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host,
@@ -4498,7 +4578,7 @@ static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host,
return;
}
- if (!migration_is_idle()) {
+ if (migration_is_running()) {
/*
* Precopy code on the source cannot deal with the size of RAM blocks
* changing at random points in time - especially after sending the
@@ -4506,8 +4586,10 @@ static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host,
* Abort and indicate a proper reason.
*/
error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr);
- migration_cancel(err);
+ migrate_set_error(migrate_get_current(), err);
error_free(err);
+
+ migration_cancel();
}
switch (ps) {
diff --git a/migration/ram.h b/migration/ram.h
index bc0318b..921c39a 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -33,6 +33,34 @@
#include "exec/cpu-common.h"
#include "io/channel.h"
+/*
+ * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
+ * worked for pages that were filled with the same char. We switched
+ * it to only search for the zero value. And to avoid confusion with
+ * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it.
+ *
+ * RAM_SAVE_FLAG_FULL (0x01) was obsoleted in 2009.
+ *
+ * RAM_SAVE_FLAG_COMPRESS_PAGE (0x100) was removed in QEMU 9.1.
+ *
+ * RAM_SAVE_FLAG_HOOK is only used in RDMA. Whenever this is found in the
+ * data stream, the flags will be passed to rdma functions in the
+ * incoming-migration side.
+ *
+ * We can't use any flag that is bigger than 0x200, because the flags are
+ * always assumed to be encoded in a ramblock address offset, which is
+ * multiple of PAGE_SIZE. Here it means QEMU supports migration with any
+ * architecture that has PAGE_SIZE>=1K (0x400).
+ */
+#define RAM_SAVE_FLAG_ZERO 0x002
+#define RAM_SAVE_FLAG_MEM_SIZE 0x004
+#define RAM_SAVE_FLAG_PAGE 0x008
+#define RAM_SAVE_FLAG_EOS 0x010
+#define RAM_SAVE_FLAG_CONTINUE 0x020
+#define RAM_SAVE_FLAG_XBZRLE 0x040
+#define RAM_SAVE_FLAG_HOOK 0x080
+#define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200
+
extern XBZRLECacheStats xbzrle_counters;
/* Should be holding either ram_list.mutex, or the RCU lock. */
@@ -44,6 +72,7 @@ extern XBZRLECacheStats xbzrle_counters;
INTERNAL_RAMBLOCK_FOREACH(block) \
if (!qemu_ram_is_migratable(block)) {} else
+void ram_mig_init(void);
int xbzrle_cache_resize(uint64_t new_size, Error **errp);
uint64_t ram_bytes_remaining(void);
uint64_t ram_bytes_total(void);
diff --git a/migration/rdma.c b/migration/rdma.c
index 855753c..2d839fc 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -30,7 +30,7 @@
#include "qemu/sockets.h"
#include "qemu/bitmap.h"
#include "qemu/coroutine.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include <sys/socket.h>
#include <netdb.h>
#include <arpa/inet.h>
@@ -768,156 +768,12 @@ static void qemu_rdma_dump_gid(const char *who, struct rdma_cm_id *id)
}
/*
- * As of now, IPv6 over RoCE / iWARP is not supported by linux.
- * We will try the next addrinfo struct, and fail if there are
- * no other valid addresses to bind against.
- *
- * If user is listening on '[::]', then we will not have a opened a device
- * yet and have no way of verifying if the device is RoCE or not.
- *
- * In this case, the source VM will throw an error for ALL types of
- * connections (both IPv4 and IPv6) if the destination machine does not have
- * a regular infiniband network available for use.
- *
- * The only way to guarantee that an error is thrown for broken kernels is
- * for the management software to choose a *specific* interface at bind time
- * and validate what time of hardware it is.
- *
- * Unfortunately, this puts the user in a fix:
- *
- * If the source VM connects with an IPv4 address without knowing that the
- * destination has bound to '[::]' the migration will unconditionally fail
- * unless the management software is explicitly listening on the IPv4
- * address while using a RoCE-based device.
- *
- * If the source VM connects with an IPv6 address, then we're OK because we can
- * throw an error on the source (and similarly on the destination).
- *
- * But in mixed environments, this will be broken for a while until it is fixed
- * inside linux.
- *
- * We do provide a *tiny* bit of help in this function: We can list all of the
- * devices in the system and check to see if all the devices are RoCE or
- * Infiniband.
- *
- * If we detect that we have a *pure* RoCE environment, then we can safely
- * thrown an error even if the management software has specified '[::]' as the
- * bind address.
- *
- * However, if there is are multiple hetergeneous devices, then we cannot make
- * this assumption and the user just has to be sure they know what they are
- * doing.
- *
- * Patches are being reviewed on linux-rdma.
- */
-static int qemu_rdma_broken_ipv6_kernel(struct ibv_context *verbs, Error **errp)
-{
- /* This bug only exists in linux, to our knowledge. */
-#ifdef CONFIG_LINUX
- struct ibv_port_attr port_attr;
-
- /*
- * Verbs are only NULL if management has bound to '[::]'.
- *
- * Let's iterate through all the devices and see if there any pure IB
- * devices (non-ethernet).
- *
- * If not, then we can safely proceed with the migration.
- * Otherwise, there are no guarantees until the bug is fixed in linux.
- */
- if (!verbs) {
- int num_devices;
- struct ibv_device **dev_list = ibv_get_device_list(&num_devices);
- bool roce_found = false;
- bool ib_found = false;
-
- for (int x = 0; x < num_devices; x++) {
- verbs = ibv_open_device(dev_list[x]);
- /*
- * ibv_open_device() is not documented to set errno. If
- * it does, it's somebody else's doc bug. If it doesn't,
- * the use of errno below is wrong.
- * TODO Find out whether ibv_open_device() sets errno.
- */
- if (!verbs) {
- if (errno == EPERM) {
- continue;
- } else {
- error_setg_errno(errp, errno,
- "could not open RDMA device context");
- return -1;
- }
- }
-
- if (ibv_query_port(verbs, 1, &port_attr)) {
- ibv_close_device(verbs);
- error_setg(errp,
- "RDMA ERROR: Could not query initial IB port");
- return -1;
- }
-
- if (port_attr.link_layer == IBV_LINK_LAYER_INFINIBAND) {
- ib_found = true;
- } else if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
- roce_found = true;
- }
-
- ibv_close_device(verbs);
-
- }
-
- if (roce_found) {
- if (ib_found) {
- warn_report("migrations may fail:"
- " IPv6 over RoCE / iWARP in linux"
- " is broken. But since you appear to have a"
- " mixed RoCE / IB environment, be sure to only"
- " migrate over the IB fabric until the kernel "
- " fixes the bug.");
- } else {
- error_setg(errp, "RDMA ERROR: "
- "You only have RoCE / iWARP devices in your systems"
- " and your management software has specified '[::]'"
- ", but IPv6 over RoCE / iWARP is not supported in Linux.");
- return -1;
- }
- }
-
- return 0;
- }
-
- /*
- * If we have a verbs context, that means that some other than '[::]' was
- * used by the management software for binding. In which case we can
- * actually warn the user about a potentially broken kernel.
- */
-
- /* IB ports start with 1, not 0 */
- if (ibv_query_port(verbs, 1, &port_attr)) {
- error_setg(errp, "RDMA ERROR: Could not query initial IB port");
- return -1;
- }
-
- if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
- error_setg(errp, "RDMA ERROR: "
- "Linux kernel's RoCE / iWARP does not support IPv6 "
- "(but patches on linux-rdma in progress)");
- return -1;
- }
-
-#endif
-
- return 0;
-}
-
-/*
* Figure out which RDMA device corresponds to the requested IP hostname
* Also create the initial connection manager identifiers for opening
* the connection.
*/
static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp)
{
- Error *err = NULL;
int ret;
struct rdma_addrinfo *res;
char port_str[16];
@@ -953,9 +809,8 @@ static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp)
goto err_resolve_get_addr;
}
- /* Try all addresses, saving the first error in @err */
+ /* Try all addresses, exit loop on first success of resolving address */
for (struct rdma_addrinfo *e = res; e != NULL; e = e->ai_next) {
- Error **local_errp = err ? NULL : &err;
inet_ntop(e->ai_family,
&((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
@@ -964,25 +819,12 @@ static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp)
ret = rdma_resolve_addr(rdma->cm_id, NULL, e->ai_dst_addr,
RDMA_RESOLVE_TIMEOUT_MS);
if (ret >= 0) {
- if (e->ai_family == AF_INET6) {
- ret = qemu_rdma_broken_ipv6_kernel(rdma->cm_id->verbs,
- local_errp);
- if (ret < 0) {
- continue;
- }
- }
- error_free(err);
goto route;
}
}
rdma_freeaddrinfo(res);
- if (err) {
- error_propagate(errp, err);
- } else {
- error_setg(errp, "RDMA ERROR: could not resolve address %s",
- rdma->host);
- }
+ error_setg(errp, "RDMA ERROR: could not resolve address %s", rdma->host);
goto err_resolve_get_addr;
route:
@@ -2611,7 +2453,6 @@ err_rdma_source_connect:
static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
{
- Error *err = NULL;
int ret;
struct rdma_cm_id *listen_id;
char ip[40] = "unknown";
@@ -2661,9 +2502,8 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
goto err_dest_init_bind_addr;
}
- /* Try all addresses, saving the first error in @err */
+ /* Try all addresses */
for (e = res; e != NULL; e = e->ai_next) {
- Error **local_errp = err ? NULL : &err;
inet_ntop(e->ai_family,
&((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
@@ -2672,24 +2512,12 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
if (ret < 0) {
continue;
}
- if (e->ai_family == AF_INET6) {
- ret = qemu_rdma_broken_ipv6_kernel(listen_id->verbs,
- local_errp);
- if (ret < 0) {
- continue;
- }
- }
- error_free(err);
break;
}
rdma_freeaddrinfo(res);
if (!e) {
- if (err) {
- error_propagate(errp, err);
- } else {
- error_setg(errp, "RDMA ERROR: Error: could not rdma_bind_addr!");
- }
+ error_setg(errp, "RDMA ERROR: Error: could not rdma_bind_addr!");
goto err_dest_init_bind_addr;
}
@@ -3284,14 +3112,11 @@ err:
int rdma_control_save_page(QEMUFile *f, ram_addr_t block_offset,
ram_addr_t offset, size_t size)
{
- if (!migrate_rdma() || migration_in_postcopy()) {
- return RAM_SAVE_CONTROL_NOT_SUPP;
- }
+ assert(migrate_rdma());
int ret = qemu_rdma_save_page(f, block_offset, offset, size);
- if (ret != RAM_SAVE_CONTROL_DELAYED &&
- ret != RAM_SAVE_CONTROL_NOT_SUPP) {
+ if (ret != RAM_SAVE_CONTROL_DELAYED) {
if (ret < 0) {
qemu_file_set_error(f, ret);
}
@@ -3829,7 +3654,7 @@ int rdma_block_notification_handle(QEMUFile *f, const char *name)
int rdma_registration_start(QEMUFile *f, uint64_t flags)
{
- if (!migrate_rdma() || migration_in_postcopy()) {
+ if (!migrate_rdma()) {
return 0;
}
@@ -3861,7 +3686,7 @@ int rdma_registration_stop(QEMUFile *f, uint64_t flags)
RDMAControlHeader head = { .len = 0, .repeat = 1 };
int ret;
- if (!migrate_rdma() || migration_in_postcopy()) {
+ if (!migrate_rdma()) {
return 0;
}
@@ -3985,7 +3810,7 @@ static void qio_channel_rdma_finalize(Object *obj)
}
static void qio_channel_rdma_class_init(ObjectClass *klass,
- void *class_data G_GNUC_UNUSED)
+ const void *class_data G_GNUC_UNUSED)
{
QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass);
@@ -4174,7 +3999,7 @@ void rdma_start_outgoing_migration(void *opaque,
s->to_dst_file = rdma_new_output(rdma);
s->rdma_migration = true;
- migrate_fd_connect(s, NULL);
+ migration_connect(s, NULL);
return;
return_path_err:
qemu_rdma_cleanup(rdma);
diff --git a/migration/rdma.h b/migration/rdma.h
index a8d27f3..f74f16a 100644
--- a/migration/rdma.h
+++ b/migration/rdma.h
@@ -19,7 +19,7 @@
#ifndef QEMU_MIGRATION_RDMA_H
#define QEMU_MIGRATION_RDMA_H
-#include "exec/memory.h"
+#include "system/memory.h"
void rdma_start_outgoing_migration(void *opaque, InetSocketAddress *host_port,
Error **errp);
@@ -33,14 +33,6 @@ void rdma_start_incoming_migration(InetSocketAddress *host_port, Error **errp);
#define RAM_CONTROL_ROUND 1
#define RAM_CONTROL_FINISH 3
-/*
- * Whenever this is found in the data stream, the flags
- * will be passed to rdma functions in the incoming-migration
- * side.
- */
-#define RAM_SAVE_FLAG_HOOK 0x80
-
-#define RAM_SAVE_CONTROL_NOT_SUPP -1000
#define RAM_SAVE_CONTROL_DELAYED -2000
#ifdef CONFIG_RDMA
@@ -63,7 +55,7 @@ static inline
int rdma_control_save_page(QEMUFile *f, ram_addr_t block_offset,
ram_addr_t offset, size_t size)
{
- return RAM_SAVE_CONTROL_NOT_SUPP;
+ g_assert_not_reached();
}
#endif
#endif
diff --git a/migration/savevm.c b/migration/savevm.c
index deb5783..bb04a45 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -37,6 +37,7 @@
#include "migration/register.h"
#include "migration/global_state.h"
#include "migration/channel-block.h"
+#include "multifd.h"
#include "ram.h"
#include "qemu-file.h"
#include "savevm.h"
@@ -46,27 +47,29 @@
#include "qapi/clone-visitor.h"
#include "qapi/qapi-builtin-visit.h"
#include "qemu/error-report.h"
-#include "sysemu/cpus.h"
-#include "exec/memory.h"
+#include "system/cpus.h"
+#include "system/memory.h"
#include "exec/target_page.h"
+#include "exec/page-vary.h"
#include "trace.h"
#include "qemu/iov.h"
#include "qemu/job.h"
#include "qemu/main-loop.h"
#include "block/snapshot.h"
+#include "block/thread-pool.h"
#include "qemu/cutils.h"
#include "io/channel-buffer.h"
#include "io/channel-file.h"
-#include "sysemu/replay.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/xen.h"
+#include "system/replay.h"
+#include "system/runstate.h"
+#include "system/system.h"
+#include "system/xen.h"
#include "migration/colo.h"
#include "qemu/bitmap.h"
#include "net/announce.h"
#include "qemu/yank.h"
#include "yank_functions.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#include "options.h"
const unsigned int postcopy_ram_discard_version;
@@ -90,6 +93,7 @@ enum qemu_vm_cmd {
MIG_CMD_ENABLE_COLO, /* Enable COLO */
MIG_CMD_POSTCOPY_RESUME, /* resume postcopy on dest */
MIG_CMD_RECV_BITMAP, /* Request for recved bitmap on dst */
+ MIG_CMD_SWITCHOVER_START, /* Switchover start notification */
MIG_CMD_MAX
};
@@ -109,6 +113,7 @@ static struct mig_cmd_args {
[MIG_CMD_POSTCOPY_RESUME] = { .len = 0, .name = "POSTCOPY_RESUME" },
[MIG_CMD_PACKAGED] = { .len = 4, .name = "PACKAGED" },
[MIG_CMD_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" },
+ [MIG_CMD_SWITCHOVER_START] = { .len = 0, .name = "SWITCHOVER_START" },
[MIG_CMD_MAX] = { .len = -1, .name = "MAX" },
};
@@ -130,6 +135,35 @@ static struct mig_cmd_args {
*/
/***********************************************************/
+/* Optional load threads pool support */
+
+static void qemu_loadvm_thread_pool_create(MigrationIncomingState *mis)
+{
+ assert(!mis->load_threads);
+ mis->load_threads = thread_pool_new();
+ mis->load_threads_abort = false;
+}
+
+static void qemu_loadvm_thread_pool_destroy(MigrationIncomingState *mis)
+{
+ qatomic_set(&mis->load_threads_abort, true);
+
+ bql_unlock(); /* Load threads might be waiting for BQL */
+ g_clear_pointer(&mis->load_threads, thread_pool_free);
+ bql_lock();
+}
+
+static bool qemu_loadvm_thread_pool_wait(MigrationState *s,
+ MigrationIncomingState *mis)
+{
+ bql_unlock(); /* Let load threads do work requiring BQL */
+ thread_pool_wait(mis->load_threads);
+ bql_lock();
+
+ return !migrate_has_error(s);
+}
+
+/***********************************************************/
/* savevm/loadvm support */
static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable)
@@ -232,7 +266,7 @@ typedef struct SaveState {
static SaveState savevm_state = {
.handlers = QTAILQ_HEAD_INITIALIZER(savevm_state.handlers),
- .handler_pri_head = { [MIG_PRI_DEFAULT ... MIG_PRI_MAX] = NULL },
+ .handler_pri_head = { [0 ... MIG_PRI_MAX] = NULL },
.global_section_id = 0,
};
@@ -306,7 +340,7 @@ static int configuration_pre_load(void *opaque)
* predates the variable-target-page-bits support and is using the
* minimum possible value for this CPU.
*/
- state->target_page_bits = qemu_target_page_bits_min();
+ state->target_page_bits = migration_legacy_page_bits();
return 0;
}
@@ -429,8 +463,7 @@ static const VMStateInfo vmstate_info_capability = {
*/
static bool vmstate_target_page_bits_needed(void *opaque)
{
- return qemu_target_page_bits()
- > qemu_target_page_bits_min();
+ return qemu_target_page_bits() > migration_legacy_page_bits();
}
static const VMStateDescription vmstate_target_page_bits = {
@@ -704,7 +737,7 @@ static int calculate_compat_instance_id(const char *idstr)
static inline MigrationPriority save_state_priority(SaveStateEntry *se)
{
- if (se->vmsd) {
+ if (se->vmsd && se->vmsd->priority) {
return se->vmsd->priority;
}
return MIG_PRI_DEFAULT;
@@ -860,23 +893,6 @@ static void vmstate_check(const VMStateDescription *vmsd)
}
}
-/*
- * See comment in hw/intc/xics.c:icp_realize()
- *
- * This function can be removed when
- * pre_2_10_vmstate_register_dummy_icp() is removed.
- */
-int vmstate_replace_hack_for_ppc(VMStateIf *obj, int instance_id,
- const VMStateDescription *vmsd,
- void *opaque)
-{
- SaveStateEntry *se = find_se(vmsd->name, instance_id);
-
- if (se) {
- savevm_state_handler_remove(se);
- }
- return vmstate_register(obj, instance_id, vmsd, opaque);
-}
int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id,
const VMStateDescription *vmsd,
@@ -1218,6 +1234,19 @@ void qemu_savevm_send_recv_bitmap(QEMUFile *f, char *block_name)
qemu_savevm_command_send(f, MIG_CMD_RECV_BITMAP, len + 1, (uint8_t *)buf);
}
+static void qemu_savevm_send_switchover_start(QEMUFile *f)
+{
+ trace_savevm_send_switchover_start();
+ qemu_savevm_command_send(f, MIG_CMD_SWITCHOVER_START, 0, NULL);
+}
+
+void qemu_savevm_maybe_send_switchover_start(QEMUFile *f)
+{
+ if (migrate_send_switchover_start()) {
+ qemu_savevm_send_switchover_start(f);
+ }
+}
+
bool qemu_savevm_state_blocked(Error **errp)
{
SaveStateEntry *se;
@@ -1248,8 +1277,7 @@ void qemu_savevm_non_migratable_list(strList **reasons)
void qemu_savevm_state_header(QEMUFile *f)
{
MigrationState *s = migrate_get_current();
-
- s->vmdesc = json_writer_new(false);
+ JSONWriter *vmdesc = s->vmdesc;
trace_savevm_state_header();
qemu_put_be32(f, QEMU_VM_FILE_MAGIC);
@@ -1258,16 +1286,21 @@ void qemu_savevm_state_header(QEMUFile *f)
if (s->send_configuration) {
qemu_put_byte(f, QEMU_VM_CONFIGURATION);
- /*
- * This starts the main json object and is paired with the
- * json_writer_end_object in
- * qemu_savevm_state_complete_precopy_non_iterable
- */
- json_writer_start_object(s->vmdesc, NULL);
+ if (vmdesc) {
+ /*
+ * This starts the main json object and is paired with the
+ * json_writer_end_object in
+ * qemu_savevm_state_complete_precopy_non_iterable
+ */
+ json_writer_start_object(vmdesc, NULL);
+ json_writer_start_object(vmdesc, "configuration");
+ }
- json_writer_start_object(s->vmdesc, "configuration");
- vmstate_save_state(f, &vmstate_configuration, &savevm_state, s->vmdesc);
- json_writer_end_object(s->vmdesc);
+ vmstate_save_state(f, &vmstate_configuration, &savevm_state, vmdesc);
+
+ if (vmdesc) {
+ json_writer_end_object(vmdesc);
+ }
}
}
@@ -1313,16 +1346,19 @@ int qemu_savevm_state_setup(QEMUFile *f, Error **errp)
{
ERRP_GUARD();
MigrationState *ms = migrate_get_current();
+ JSONWriter *vmdesc = ms->vmdesc;
SaveStateEntry *se;
int ret = 0;
- json_writer_int64(ms->vmdesc, "page_size", qemu_target_page_size());
- json_writer_start_array(ms->vmdesc, "devices");
+ if (vmdesc) {
+ json_writer_int64(vmdesc, "page_size", qemu_target_page_size());
+ json_writer_start_array(vmdesc, "devices");
+ }
trace_savevm_state_setup();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if (se->vmsd && se->vmsd->early_setup) {
- ret = vmstate_save(f, se, ms->vmdesc, errp);
+ ret = vmstate_save(f, se, vmdesc, errp);
if (ret) {
migrate_set_error(ms, *errp);
qemu_file_set_error(f, ret);
@@ -1441,11 +1477,11 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
return all_finished;
}
-static bool should_send_vmdesc(void)
+bool should_send_vmdesc(void)
{
MachineState *machine = MACHINE(qdev_get_machine());
- bool in_postcopy = migration_in_postcopy();
- return !machine->suppress_vmdesc && !in_postcopy;
+
+ return !machine->suppress_vmdesc;
}
/*
@@ -1487,12 +1523,62 @@ void qemu_savevm_state_complete_postcopy(QEMUFile *f)
qemu_fflush(f);
}
-static
+bool qemu_savevm_state_postcopy_prepare(QEMUFile *f, Error **errp)
+{
+ SaveStateEntry *se;
+ bool ret;
+
+ QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+ if (!se->ops || !se->ops->save_postcopy_prepare) {
+ continue;
+ }
+
+ if (se->ops->is_active) {
+ if (!se->ops->is_active(se->opaque)) {
+ continue;
+ }
+ }
+
+ trace_savevm_section_start(se->idstr, se->section_id);
+
+ save_section_header(f, se, QEMU_VM_SECTION_PART);
+ ret = se->ops->save_postcopy_prepare(f, se->opaque, errp);
+ save_section_footer(f, se);
+
+ trace_savevm_section_end(se->idstr, se->section_id, ret);
+
+ if (!ret) {
+ assert(*errp);
+ return false;
+ }
+ }
+
+ return true;
+}
+
int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy)
{
int64_t start_ts_each, end_ts_each;
SaveStateEntry *se;
int ret;
+ bool multifd_device_state = multifd_device_state_supported();
+
+ if (multifd_device_state) {
+ QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+ SaveLiveCompletePrecopyThreadHandler hdlr;
+
+ if (!se->ops || (in_postcopy && se->ops->has_postcopy &&
+ se->ops->has_postcopy(se->opaque)) ||
+ !se->ops->save_live_complete_precopy_thread) {
+ continue;
+ }
+
+ hdlr = se->ops->save_live_complete_precopy_thread;
+ multifd_spawn_device_state_save_thread(hdlr,
+ se->idstr, se->instance_id,
+ se->opaque);
+ }
+ }
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if (!se->ops ||
@@ -1518,21 +1604,39 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy)
save_section_footer(f, se);
if (ret < 0) {
qemu_file_set_error(f, ret);
- return -1;
+ goto ret_fail_abort_threads;
}
end_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME);
trace_vmstate_downtime_save("iterable", se->idstr, se->instance_id,
end_ts_each - start_ts_each);
}
+ if (multifd_device_state) {
+ if (migrate_has_error(migrate_get_current())) {
+ multifd_abort_device_state_save_threads();
+ }
+
+ if (!multifd_join_device_state_save_threads()) {
+ qemu_file_set_error(f, -EINVAL);
+ return -1;
+ }
+ }
+
trace_vmstate_downtime_checkpoint("src-iterable-saved");
return 0;
+
+ret_fail_abort_threads:
+ if (multifd_device_state) {
+ multifd_abort_device_state_save_threads();
+ multifd_join_device_state_save_threads();
+ }
+
+ return -1;
}
int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
- bool in_postcopy,
- bool inactivate_disks)
+ bool in_postcopy)
{
MigrationState *ms = migrate_get_current();
int64_t start_ts_each, end_ts_each;
@@ -1542,6 +1646,9 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
Error *local_err = NULL;
int ret;
+ /* Making sure cpu states are synchronized before saving non-iterable */
+ cpu_synchronize_all_states();
+
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if (se->vmsd && se->vmsd->early_setup) {
/* Already saved during qemu_savevm_state_setup(). */
@@ -1563,76 +1670,42 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
end_ts_each - start_ts_each);
}
- if (inactivate_disks) {
- /* Inactivate before sending QEMU_VM_EOF so that the
- * bdrv_activate_all() on the other end won't fail. */
- ret = bdrv_inactivate_all();
- if (ret) {
- error_setg(&local_err, "%s: bdrv_inactivate_all() failed (%d)",
- __func__, ret);
- migrate_set_error(ms, local_err);
- error_report_err(local_err);
- qemu_file_set_error(f, ret);
- return ret;
- }
- }
if (!in_postcopy) {
/* Postcopy stream will still be going */
qemu_put_byte(f, QEMU_VM_EOF);
- }
- json_writer_end_array(vmdesc);
- json_writer_end_object(vmdesc);
- vmdesc_len = strlen(json_writer_get(vmdesc));
+ if (vmdesc) {
+ json_writer_end_array(vmdesc);
+ json_writer_end_object(vmdesc);
+ vmdesc_len = strlen(json_writer_get(vmdesc));
- if (should_send_vmdesc()) {
- qemu_put_byte(f, QEMU_VM_VMDESCRIPTION);
- qemu_put_be32(f, vmdesc_len);
- qemu_put_buffer(f, (uint8_t *)json_writer_get(vmdesc), vmdesc_len);
+ qemu_put_byte(f, QEMU_VM_VMDESCRIPTION);
+ qemu_put_be32(f, vmdesc_len);
+ qemu_put_buffer(f, (uint8_t *)json_writer_get(vmdesc), vmdesc_len);
+ }
}
- /* Free it now to detect any inconsistencies. */
- json_writer_free(vmdesc);
- ms->vmdesc = NULL;
-
trace_vmstate_downtime_checkpoint("src-non-iterable-saved");
return 0;
}
-int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
- bool inactivate_disks)
+int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only)
{
int ret;
- Error *local_err = NULL;
- bool in_postcopy = migration_in_postcopy();
- if (precopy_notify(PRECOPY_NOTIFY_COMPLETE, &local_err)) {
- error_report_err(local_err);
+ ret = qemu_savevm_state_complete_precopy_iterable(f, false);
+ if (ret) {
+ return ret;
}
- trace_savevm_state_complete_precopy();
-
- cpu_synchronize_all_states();
-
- if (!in_postcopy || iterable_only) {
- ret = qemu_savevm_state_complete_precopy_iterable(f, in_postcopy);
+ if (!iterable_only) {
+ ret = qemu_savevm_state_complete_precopy_non_iterable(f, false);
if (ret) {
return ret;
}
}
- if (iterable_only) {
- goto flush;
- }
-
- ret = qemu_savevm_state_complete_precopy_non_iterable(f, in_postcopy,
- inactivate_disks);
- if (ret) {
- return ret;
- }
-
-flush:
return qemu_fflush(f);
}
@@ -1730,7 +1803,8 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
ret = qemu_file_get_error(f);
if (ret == 0) {
- qemu_savevm_state_complete_precopy(f, false, false);
+ qemu_savevm_maybe_send_switchover_start(f);
+ qemu_savevm_state_complete_precopy(f, false);
ret = qemu_file_get_error(f);
}
if (ret != 0) {
@@ -1756,7 +1830,7 @@ cleanup:
void qemu_savevm_live_state(QEMUFile *f)
{
/* save QEMU_VM_SECTION_END section */
- qemu_savevm_state_complete_precopy(f, true, false);
+ qemu_savevm_state_complete_precopy(f, true);
qemu_put_byte(f, QEMU_VM_EOF);
}
@@ -2004,7 +2078,7 @@ static void *postcopy_ram_listen_thread(void *opaque)
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
MIGRATION_STATUS_POSTCOPY_ACTIVE);
- qemu_sem_post(&mis->thread_sync_sem);
+ qemu_event_set(&mis->thread_sync_event);
trace_postcopy_ram_listen_thread_start();
rcu_register_thread();
@@ -2013,6 +2087,8 @@ static void *postcopy_ram_listen_thread(void *opaque)
* in qemu_file, and thus we must be blocking now.
*/
qemu_file_set_blocking(f, true);
+
+ /* TODO: sanity check that only postcopiable data will be loaded here */
load_res = qemu_loadvm_state_main(f, mis);
/*
@@ -2073,8 +2149,9 @@ static void *postcopy_ram_listen_thread(void *opaque)
* (If something broke then qemu will have to exit anyway since it's
* got a bad migration state).
*/
+ bql_lock();
migration_incoming_state_destroy();
- qemu_loadvm_state_cleanup();
+ bql_unlock();
rcu_unregister_thread();
mis->have_listen_thread = false;
@@ -2129,7 +2206,8 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
}
mis->have_listen_thread = true;
- postcopy_thread_create(mis, &mis->listen_thread, "mig/dst/listen",
+ postcopy_thread_create(mis, &mis->listen_thread,
+ MIGRATION_THREAD_DST_LISTEN,
postcopy_ram_listen_thread, QEMU_THREAD_DETACHED);
trace_loadvm_postcopy_handle_listen("return");
@@ -2138,7 +2216,6 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
static void loadvm_postcopy_handle_run_bh(void *opaque)
{
- Error *local_err = NULL;
MigrationIncomingState *mis = opaque;
trace_vmstate_downtime_checkpoint("dst-postcopy-bh-enter");
@@ -2154,22 +2231,20 @@ static void loadvm_postcopy_handle_run_bh(void *opaque)
trace_vmstate_downtime_checkpoint("dst-postcopy-bh-announced");
- /* Make sure all file formats throw away their mutable metadata.
- * If we get an error here, just don't restart the VM yet. */
- bdrv_activate_all(&local_err);
- if (local_err) {
- error_report_err(local_err);
- local_err = NULL;
- autostart = false;
- }
-
- trace_vmstate_downtime_checkpoint("dst-postcopy-bh-cache-invalidated");
-
dirty_bitmap_mig_before_vm_start();
if (autostart) {
- /* Hold onto your hats, starting the CPU */
- vm_start();
+ /*
+ * Make sure all file formats throw away their mutable metadata.
+ * If we get an error here, just don't restart the VM yet.
+ */
+ bool success = migration_block_activate(NULL);
+
+ trace_vmstate_downtime_checkpoint("dst-postcopy-bh-cache-invalidated");
+
+ if (success) {
+ vm_start();
+ }
} else {
/* leave it paused and let management decide when to start the CPU */
runstate_set(RUN_STATE_PAUSED);
@@ -2429,6 +2504,26 @@ static int loadvm_process_enable_colo(MigrationIncomingState *mis)
return ret;
}
+static int loadvm_postcopy_handle_switchover_start(void)
+{
+ SaveStateEntry *se;
+
+ QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+ int ret;
+
+ if (!se->ops || !se->ops->switchover_start) {
+ continue;
+ }
+
+ ret = se->ops->switchover_start(se->opaque);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/*
* Process an incoming 'QEMU_VM_COMMAND'
* 0 just a normal return
@@ -2527,6 +2622,9 @@ static int loadvm_process_command(QEMUFile *f)
case MIG_CMD_ENABLE_COLO:
return loadvm_process_enable_colo(mis);
+
+ case MIG_CMD_SWITCHOVER_START:
+ return loadvm_postcopy_handle_switchover_start();
}
return 0;
@@ -2576,8 +2674,7 @@ static bool check_section_footer(QEMUFile *f, SaveStateEntry *se)
}
static int
-qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis,
- uint8_t type)
+qemu_loadvm_section_start_full(QEMUFile *f, uint8_t type)
{
bool trace_downtime = (type == QEMU_VM_SECTION_FULL);
uint32_t instance_id, version_id, section_id;
@@ -2655,8 +2752,7 @@ qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis,
}
static int
-qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis,
- uint8_t type)
+qemu_loadvm_section_part_end(QEMUFile *f, uint8_t type)
{
bool trace_downtime = (type == QEMU_VM_SECTION_END);
int64_t start_ts, end_ts;
@@ -2732,13 +2828,11 @@ static int qemu_loadvm_state_header(QEMUFile *f)
if (migrate_get_current()->send_configuration) {
if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
error_report("Configuration section missing");
- qemu_loadvm_state_cleanup();
return -EINVAL;
}
ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0);
if (ret) {
- qemu_loadvm_state_cleanup();
return ret;
}
}
@@ -2790,16 +2884,68 @@ static int qemu_loadvm_state_setup(QEMUFile *f, Error **errp)
return 0;
}
-void qemu_loadvm_state_cleanup(void)
+struct LoadThreadData {
+ MigrationLoadThread function;
+ void *opaque;
+};
+
+static int qemu_loadvm_load_thread(void *thread_opaque)
+{
+ struct LoadThreadData *data = thread_opaque;
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ g_autoptr(Error) local_err = NULL;
+
+ if (!data->function(data->opaque, &mis->load_threads_abort, &local_err)) {
+ MigrationState *s = migrate_get_current();
+
+ /*
+ * Can't set load_threads_abort here since processing of main migration
+ * channel data could still be happening, resulting in launching of new
+ * load threads.
+ */
+
+ assert(local_err);
+
+ /*
+ * In case of multiple load threads failing which thread error
+ * return we end setting is purely arbitrary.
+ */
+ migrate_set_error(s, local_err);
+ }
+
+ return 0;
+}
+
+void qemu_loadvm_start_load_thread(MigrationLoadThread function,
+ void *opaque)
+{
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ struct LoadThreadData *data;
+
+ /* We only set it from this thread so it's okay to read it directly */
+ assert(!mis->load_threads_abort);
+
+ data = g_new(struct LoadThreadData, 1);
+ data->function = function;
+ data->opaque = opaque;
+
+ thread_pool_submit_immediate(mis->load_threads, qemu_loadvm_load_thread,
+ data, g_free);
+}
+
+void qemu_loadvm_state_cleanup(MigrationIncomingState *mis)
{
SaveStateEntry *se;
trace_loadvm_state_cleanup();
+
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if (se->ops && se->ops->load_cleanup) {
se->ops->load_cleanup(se->opaque);
}
}
+
+ qemu_loadvm_thread_pool_destroy(mis);
}
/* Return true if we should continue the migration, or false. */
@@ -2891,14 +3037,14 @@ retry:
switch (section_type) {
case QEMU_VM_SECTION_START:
case QEMU_VM_SECTION_FULL:
- ret = qemu_loadvm_section_start_full(f, mis, section_type);
+ ret = qemu_loadvm_section_start_full(f, section_type);
if (ret < 0) {
goto out;
}
break;
case QEMU_VM_SECTION_PART:
case QEMU_VM_SECTION_END:
- ret = qemu_loadvm_section_part_end(f, mis, section_type);
+ ret = qemu_loadvm_section_part_end(f, section_type);
if (ret < 0) {
goto out;
}
@@ -2950,6 +3096,7 @@ out:
int qemu_loadvm_state(QEMUFile *f)
{
+ MigrationState *s = migrate_get_current();
MigrationIncomingState *mis = migration_incoming_get_current();
Error *local_err = NULL;
int ret;
@@ -2959,6 +3106,8 @@ int qemu_loadvm_state(QEMUFile *f)
return -EINVAL;
}
+ qemu_loadvm_thread_pool_create(mis);
+
ret = qemu_loadvm_state_header(f);
if (ret) {
return ret;
@@ -2981,13 +3130,27 @@ int qemu_loadvm_state(QEMUFile *f)
trace_qemu_loadvm_state_post_main(ret);
if (mis->have_listen_thread) {
- /* Listen thread still going, can't clean up yet */
+ /*
+ * Postcopy listen thread still going, don't synchronize the
+ * cpus yet.
+ */
return ret;
}
+ /* When reaching here, it must be precopy */
if (ret == 0) {
- ret = qemu_file_get_error(f);
+ if (migrate_has_error(migrate_get_current()) ||
+ !qemu_loadvm_thread_pool_wait(s, mis)) {
+ ret = -EINVAL;
+ } else {
+ ret = qemu_file_get_error(f);
+ }
}
+ /*
+ * Set this flag unconditionally so we'll catch further attempts to
+ * start additional threads via an appropriate assert()
+ */
+ qatomic_set(&mis->load_threads_abort, true);
/*
* Try to read in the VMDESC section as well, so that dumping tools that
@@ -3024,7 +3187,6 @@ int qemu_loadvm_state(QEMUFile *f)
}
}
- qemu_loadvm_state_cleanup();
cpu_synchronize_all_post_init();
return ret;
@@ -3064,6 +3226,29 @@ int qemu_loadvm_approve_switchover(void)
return migrate_send_rp_switchover_ack(mis);
}
+bool qemu_loadvm_load_state_buffer(const char *idstr, uint32_t instance_id,
+ char *buf, size_t len, Error **errp)
+{
+ SaveStateEntry *se;
+
+ se = find_se(idstr, instance_id);
+ if (!se) {
+ error_setg(errp,
+ "Unknown idstr %s or instance id %u for load state buffer",
+ idstr, instance_id);
+ return false;
+ }
+
+ if (!se->ops || !se->ops->load_state_buffer) {
+ error_setg(errp,
+ "idstr %s / instance %u has no load state buffer operation",
+ idstr, instance_id);
+ return false;
+ }
+
+ return se->ops->load_state_buffer(se->opaque, buf, len, errp);
+}
+
bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
bool has_devices, strList *devices, Error **errp)
{
@@ -3211,11 +3396,7 @@ void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live,
* side of the migration take control of the images.
*/
if (live && !saved_vm_running) {
- ret = bdrv_inactivate_all();
- if (ret) {
- error_setg(errp, "%s: bdrv_inactivate_all() failed (%d)",
- __func__, ret);
- }
+ migration_block_inactivate();
}
}
@@ -3286,6 +3467,7 @@ bool load_snapshot(const char *name, const char *vmstate,
/* Don't even try to load empty VM states */
ret = bdrv_snapshot_find(bs_vm_state, &sn, name);
if (ret < 0) {
+ error_setg(errp, "Snapshot can not be found");
return false;
} else if (sn.vm_state_size == 0) {
error_setg(errp, "This is a disk-only snapshot. Revert to it "
@@ -3365,12 +3547,14 @@ void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev)
qemu_ram_set_idstr(mr->ram_block,
memory_region_name(mr), dev);
qemu_ram_set_migratable(mr->ram_block);
+ ram_block_add_cpr_blocker(mr->ram_block, &error_fatal);
}
void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev)
{
qemu_ram_unset_idstr(mr->ram_block);
qemu_ram_unset_migratable(mr->ram_block);
+ ram_block_del_cpr_blocker(mr->ram_block);
}
void vmstate_register_ram_global(MemoryRegion *mr)
diff --git a/migration/savevm.h b/migration/savevm.h
index 9ec96a9..2d5e9c7 100644
--- a/migration/savevm.h
+++ b/migration/savevm.h
@@ -39,12 +39,13 @@ void qemu_savevm_state_header(QEMUFile *f);
int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy);
void qemu_savevm_state_cleanup(void);
void qemu_savevm_state_complete_postcopy(QEMUFile *f);
-int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
- bool inactivate_disks);
+int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only);
void qemu_savevm_state_pending_exact(uint64_t *must_precopy,
uint64_t *can_postcopy);
void qemu_savevm_state_pending_estimate(uint64_t *must_precopy,
uint64_t *can_postcopy);
+int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy);
+bool qemu_savevm_state_postcopy_prepare(QEMUFile *f, Error **errp);
void qemu_savevm_send_ping(QEMUFile *f, uint32_t value);
void qemu_savevm_send_open_return_path(QEMUFile *f);
int qemu_savevm_send_packaged(QEMUFile *f, const uint8_t *buf, size_t len);
@@ -53,6 +54,7 @@ void qemu_savevm_send_postcopy_listen(QEMUFile *f);
void qemu_savevm_send_postcopy_run(QEMUFile *f);
void qemu_savevm_send_postcopy_resume(QEMUFile *f);
void qemu_savevm_send_recv_bitmap(QEMUFile *f, char *block_name);
+void qemu_savevm_maybe_send_switchover_start(QEMUFile *f);
void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, const char *name,
uint16_t len,
@@ -63,11 +65,14 @@ void qemu_savevm_live_state(QEMUFile *f);
int qemu_save_device_state(QEMUFile *f);
int qemu_loadvm_state(QEMUFile *f);
-void qemu_loadvm_state_cleanup(void);
+void qemu_loadvm_state_cleanup(MigrationIncomingState *mis);
int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis);
int qemu_load_device_state(QEMUFile *f);
int qemu_loadvm_approve_switchover(void);
int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
- bool in_postcopy, bool inactivate_disks);
+ bool in_postcopy);
+
+bool qemu_loadvm_load_state_buffer(const char *idstr, uint32_t instance_id,
+ char *buf, size_t len, Error **errp);
#endif
diff --git a/migration/socket.c b/migration/socket.c
index 9ab89b1..5ec65b8 100644
--- a/migration/socket.c
+++ b/migration/socket.c
@@ -42,24 +42,6 @@ void socket_send_channel_create(QIOTaskFunc f, void *data)
f, data, NULL, NULL);
}
-QIOChannel *socket_send_channel_create_sync(Error **errp)
-{
- QIOChannelSocket *sioc = qio_channel_socket_new();
-
- if (!outgoing_args.saddr) {
- object_unref(OBJECT(sioc));
- error_setg(errp, "Initial sock address not set!");
- return NULL;
- }
-
- if (qio_channel_socket_connect_sync(sioc, outgoing_args.saddr, errp) < 0) {
- object_unref(OBJECT(sioc));
- return NULL;
- }
-
- return QIO_CHANNEL(sioc);
-}
-
struct SocketConnectData {
MigrationState *s;
char *hostname;
diff --git a/migration/socket.h b/migration/socket.h
index 46c233e..04ebbe9 100644
--- a/migration/socket.h
+++ b/migration/socket.h
@@ -22,7 +22,6 @@
#include "qemu/sockets.h"
void socket_send_channel_create(QIOTaskFunc f, void *data);
-QIOChannel *socket_send_channel_create_sync(Error **errp);
void socket_start_incoming_migration(SocketAddress *saddr, Error **errp);
diff --git a/migration/target.c b/migration/target.c
index a6ffa9a..12fd399 100644
--- a/migration/target.c
+++ b/migration/target.c
@@ -11,21 +11,21 @@
#include CONFIG_DEVICES
#ifdef CONFIG_VFIO
-#include "hw/vfio/vfio-common.h"
+#include "hw/vfio/vfio-migration.h"
#endif
#ifdef CONFIG_VFIO
void migration_populate_vfio_info(MigrationInfo *info)
{
- if (vfio_mig_active()) {
+ if (vfio_migration_active()) {
info->vfio = g_malloc0(sizeof(*info->vfio));
- info->vfio->transferred = vfio_mig_bytes_transferred();
+ info->vfio->transferred = vfio_migration_bytes_transferred();
}
}
void migration_reset_vfio_bytes_transferred(void)
{
- vfio_reset_bytes_transferred();
+ vfio_migration_reset_bytes_transferred();
}
#else
void migration_populate_vfio_info(MigrationInfo *info)
diff --git a/migration/tls.c b/migration/tls.c
index fa03d91..5cbf952 100644
--- a/migration/tls.c
+++ b/migration/tls.c
@@ -156,6 +156,11 @@ void migration_tls_channel_connect(MigrationState *s,
NULL);
}
+void migration_tls_channel_end(QIOChannel *ioc, Error **errp)
+{
+ qio_channel_tls_bye(QIO_CHANNEL_TLS(ioc), errp);
+}
+
bool migrate_channel_requires_tls_upgrade(QIOChannel *ioc)
{
if (!migrate_tls()) {
diff --git a/migration/tls.h b/migration/tls.h
index 5797d15..58b25e1 100644
--- a/migration/tls.h
+++ b/migration/tls.h
@@ -36,7 +36,7 @@ void migration_tls_channel_connect(MigrationState *s,
QIOChannel *ioc,
const char *hostname,
Error **errp);
-
+void migration_tls_channel_end(QIOChannel *ioc, Error **errp);
/* Whether the QIO channel requires further TLS handshake? */
bool migrate_channel_requires_tls_upgrade(QIOChannel *ioc);
diff --git a/migration/trace-events b/migration/trace-events
index 0b7c332..c506e11 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -39,12 +39,12 @@ savevm_send_postcopy_run(void) ""
savevm_send_postcopy_resume(void) ""
savevm_send_colo_enable(void) ""
savevm_send_recv_bitmap(char *name) "%s"
+savevm_send_switchover_start(void) ""
savevm_state_setup(void) ""
savevm_state_resume_prepare(void) ""
savevm_state_header(void) ""
savevm_state_iterate(void) ""
savevm_state_cleanup(void) ""
-savevm_state_complete_precopy(void) ""
vmstate_save(const char *idstr, const char *vmsd_name) "%s, %s"
vmstate_load(const char *idstr, const char *vmsd_name) "%s, %s"
vmstate_downtime_save(const char *type, const char *idstr, uint32_t instance_id, int64_t downtime) "type=%s idstr=%s instance_id=%d downtime=%"PRIi64
@@ -88,6 +88,8 @@ put_qlist_end(const char *field_name, const char *vmsd_name) "%s(%s)"
# qemu-file.c
qemu_file_fclose(void) ""
+qemu_file_put_fd(const char *name, int fd, int ret) "ioc %s, fd %d -> status %d"
+qemu_file_get_fd(const char *name, int fd) "ioc %s -> fd %d"
# ram.c
get_queued_page(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/0x%" PRIx64 " page_abs=0x%lx"
@@ -115,6 +117,7 @@ colo_flush_ram_cache_end(void) ""
save_xbzrle_page_skipping(void) ""
save_xbzrle_page_overflow(void) ""
ram_save_iterate_big_wait(uint64_t milliconds, int iterations) "big wait: %" PRIu64 " milliseconds, %d iterations"
+ram_load_start(void) ""
ram_load_complete(int ret, uint64_t seq_iter) "exit_code %d seq iteration %" PRIu64
ram_write_tracking_ramblock_start(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu"
ram_write_tracking_ramblock_stop(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu"
@@ -128,21 +131,22 @@ postcopy_preempt_reset_channel(void) ""
# multifd.c
multifd_new_send_channel_async(uint8_t id) "channel %u"
multifd_new_send_channel_async_error(uint8_t id, void *err) "channel=%u err=%p"
-multifd_recv(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t zero, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u zero pages %u flags 0x%x next packet size %u"
+multifd_recv_unfill(uint8_t id, uint64_t packet_num, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " flags 0x%x next packet size %u"
multifd_recv_new_channel(uint8_t id) "channel %u"
multifd_recv_sync_main(long packet_num) "packet num %ld"
multifd_recv_sync_main_signal(uint8_t id) "channel %u"
multifd_recv_sync_main_wait(uint8_t id) "iter %u"
multifd_recv_terminate_threads(bool error) "error %d"
-multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages, uint64_t zero_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64 " zero pages %" PRIu64
+multifd_recv_thread_end(uint8_t id, uint64_t packets) "channel %u packets %" PRIu64
multifd_recv_thread_start(uint8_t id) "%u"
-multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal_pages, uint32_t zero_pages, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u zero pages %u flags 0x%x next packet size %u"
+multifd_send_fill(uint8_t id, uint64_t packet_num, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " flags 0x%x next packet size %u"
+multifd_send_ram_fill(uint8_t id, uint32_t normal, uint32_t zero) "channel %u normal pages %u zero pages %u"
multifd_send_error(uint8_t id) "channel %u"
multifd_send_sync_main(long packet_num) "packet num %ld"
multifd_send_sync_main_signal(uint8_t id) "channel %u"
multifd_send_sync_main_wait(uint8_t id) "channel %u"
multifd_send_terminate_threads(void) ""
-multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages, uint64_t zero_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64 " zero pages %" PRIu64
+multifd_send_thread_end(uint8_t id, uint64_t packets) "channel %u packets %" PRIu64
multifd_send_thread_start(uint8_t id) "%u"
multifd_tls_outgoing_handshake_start(void *ioc, void *tioc, const char *hostname) "ioc=%p tioc=%p hostname=%s"
multifd_tls_outgoing_handshake_error(void *ioc, const char *err) "ioc=%p err=%s"
@@ -151,9 +155,9 @@ multifd_set_outgoing_channel(void *ioc, const char *ioctype, const char *hostnam
# migration.c
migrate_set_state(const char *new_state) "new state %s"
-migrate_fd_cleanup(void) ""
+migration_cleanup(void) ""
migrate_error(const char *error_desc) "error=%s"
-migrate_fd_cancel(void) ""
+migration_cancel(void) ""
migrate_handle_rp_req_pages(const char *rbname, size_t start, size_t len) "in %s at 0x%zx len 0x%zx"
migrate_pending_exact(uint64_t size, uint64_t pre, uint64_t post) "exact pending size %" PRIu64 " (pre = %" PRIu64 " post=%" PRIu64 ")"
migrate_pending_estimate(uint64_t size, uint64_t pre, uint64_t post) "estimate pending size %" PRIu64 " (pre = %" PRIu64 " post=%" PRIu64 ")"
@@ -191,6 +195,7 @@ migrate_transferred(uint64_t transferred, uint64_t time_spent, uint64_t bandwidt
process_incoming_migration_co_end(int ret, int ps) "ret=%d postcopy-state=%d"
process_incoming_migration_co_postcopy_end_main(void) ""
postcopy_preempt_enabled(bool value) "%d"
+migration_precopy_complete(void) ""
# migration-stats
migration_transferred_bytes(uint64_t qemu_file, uint64_t multifd, uint64_t rdma) "qemu_file %" PRIu64 " multifd %" PRIu64 " RDMA %" PRIu64
@@ -340,6 +345,15 @@ colo_receive_message(const char *msg) "Receive '%s' message"
# colo-failover.c
colo_failover_set_state(const char *new_state) "new state %s"
+# cpr.c
+cpr_save_fd(const char *name, int id, int fd) "%s, id %d, fd %d"
+cpr_delete_fd(const char *name, int id) "%s, id %d"
+cpr_find_fd(const char *name, int id, int fd) "%s, id %d returns %d"
+cpr_state_save(const char *mode) "%s mode"
+cpr_state_load(const char *mode) "%s mode"
+cpr_transfer_input(const char *path) "%s"
+cpr_transfer_output(const char *path) "%s"
+
# block-dirty-bitmap.c
send_bitmap_header_enter(void) ""
send_bitmap_bits(uint32_t flags, uint64_t start_sector, uint32_t nr_sectors, uint64_t data_size) "flags: 0x%x, start_sector: %" PRIu64 ", nr_sectors: %" PRIu32 ", data_size: %" PRIu64
@@ -377,3 +391,10 @@ migration_block_progression(unsigned percent) "Completed %u%%"
# page_cache.c
migration_pagecache_init(int64_t max_num_items) "Setting cache buckets to %" PRId64
migration_pagecache_insert(void) "Error allocating page"
+
+# cpu-throttle.c
+cpu_throttle_set(int new_throttle_pct) "set guest CPU throttled by %d%%"
+cpu_throttle_dirty_sync(void) ""
+
+# block-active.c
+migration_block_activation(const char *name) "%s"
diff --git a/migration/vmstate-types.c b/migration/vmstate-types.c
index e83bfcc..741a588 100644
--- a/migration/vmstate-types.c
+++ b/migration/vmstate-types.c
@@ -15,6 +15,7 @@
#include "qemu-file.h"
#include "migration.h"
#include "migration/vmstate.h"
+#include "migration/client-options.h"
#include "qemu/error-report.h"
#include "qemu/queue.h"
#include "trace.h"
@@ -314,6 +315,29 @@ const VMStateInfo vmstate_info_uint64 = {
.put = put_uint64,
};
+/* File descriptor communicated via SCM_RIGHTS */
+
+static int get_fd(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ int32_t *v = pv;
+ *v = qemu_file_get_fd(f);
+ return 0;
+}
+
+static int put_fd(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ int32_t *v = pv;
+ return qemu_file_put_fd(f, *v);
+}
+
+const VMStateInfo vmstate_info_fd = {
+ .name = "fd",
+ .get = get_fd,
+ .put = put_fd,
+};
+
static int get_nullptr(QEMUFile *f, void *pv, size_t size,
const VMStateField *field)
@@ -338,7 +362,7 @@ static int put_nullptr(QEMUFile *f, void *pv, size_t size,
}
const VMStateInfo vmstate_info_nullptr = {
- .name = "uint64",
+ .name = "nullptr",
.get = get_nullptr,
.put = put_nullptr,
};
diff --git a/migration/vmstate.c b/migration/vmstate.c
index ff5d589..5feaa32 100644
--- a/migration/vmstate.c
+++ b/migration/vmstate.c
@@ -15,14 +15,15 @@
#include "migration/vmstate.h"
#include "savevm.h"
#include "qapi/error.h"
-#include "qapi/qmp/json-writer.h"
+#include "qobject/json-writer.h"
#include "qemu-file.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
#include "trace.h"
static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque, JSONWriter *vmdesc);
+ void *opaque, JSONWriter *vmdesc,
+ Error **errp);
static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque);
@@ -50,6 +51,36 @@ vmstate_field_exists(const VMStateDescription *vmsd, const VMStateField *field,
return result;
}
+/*
+ * Create a fake nullptr field when there's a NULL pointer detected in the
+ * array of a VMS_ARRAY_OF_POINTER VMSD field. It's needed because we
+ * can't dereference the NULL pointer.
+ */
+static const VMStateField *
+vmsd_create_fake_nullptr_field(const VMStateField *field)
+{
+ VMStateField *fake = g_new0(VMStateField, 1);
+
+ /* It can only happen on an array of pointers! */
+ assert(field->flags & VMS_ARRAY_OF_POINTER);
+
+ /* Some of fake's properties should match the original's */
+ fake->name = field->name;
+ fake->version_id = field->version_id;
+
+ /* Do not need "field_exists" check as it always exists (which is null) */
+ fake->field_exists = NULL;
+
+ /* See vmstate_info_nullptr - use 1 byte to represent nullptr */
+ fake->size = 1;
+ fake->info = &vmstate_info_nullptr;
+ fake->flags = VMS_SINGLE;
+
+ /* All the rest fields shouldn't matter.. */
+
+ return (const VMStateField *)fake;
+}
+
static int vmstate_n_elems(void *opaque, const VMStateField *field)
{
int n_elems = 1;
@@ -142,23 +173,39 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
}
for (i = 0; i < n_elems; i++) {
void *curr_elem = first_elem + size * i;
+ const VMStateField *inner_field;
if (field->flags & VMS_ARRAY_OF_POINTER) {
curr_elem = *(void **)curr_elem;
}
+
if (!curr_elem && size) {
- /* if null pointer check placeholder and do not follow */
- assert(field->flags & VMS_ARRAY_OF_POINTER);
- ret = vmstate_info_nullptr.get(f, curr_elem, size, NULL);
- } else if (field->flags & VMS_STRUCT) {
- ret = vmstate_load_state(f, field->vmsd, curr_elem,
- field->vmsd->version_id);
- } else if (field->flags & VMS_VSTRUCT) {
- ret = vmstate_load_state(f, field->vmsd, curr_elem,
- field->struct_version_id);
+ /*
+ * If null pointer found (which should only happen in
+ * an array of pointers), use null placeholder and do
+ * not follow.
+ */
+ inner_field = vmsd_create_fake_nullptr_field(field);
+ } else {
+ inner_field = field;
+ }
+
+ if (inner_field->flags & VMS_STRUCT) {
+ ret = vmstate_load_state(f, inner_field->vmsd, curr_elem,
+ inner_field->vmsd->version_id);
+ } else if (inner_field->flags & VMS_VSTRUCT) {
+ ret = vmstate_load_state(f, inner_field->vmsd, curr_elem,
+ inner_field->struct_version_id);
} else {
- ret = field->info->get(f, curr_elem, size, field);
+ ret = inner_field->info->get(f, curr_elem, size,
+ inner_field);
+ }
+
+ /* If we used a fake temp field.. free it now */
+ if (inner_field != field) {
+ g_clear_pointer((gpointer *)&inner_field, g_free);
}
+
if (ret >= 0) {
ret = qemu_file_get_error(f);
}
@@ -310,7 +357,7 @@ static void vmsd_desc_field_start(const VMStateDescription *vmsd,
static void vmsd_desc_field_end(const VMStateDescription *vmsd,
JSONWriter *vmdesc,
- const VMStateField *field, size_t size, int i)
+ const VMStateField *field, size_t size)
{
if (!vmdesc) {
return;
@@ -378,37 +425,91 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
int size = vmstate_size(opaque, field);
uint64_t old_offset, written_bytes;
JSONWriter *vmdesc_loop = vmdesc;
+ bool is_prev_null = false;
trace_vmstate_save_state_loop(vmsd->name, field->name, n_elems);
if (field->flags & VMS_POINTER) {
first_elem = *(void **)first_elem;
assert(first_elem || !n_elems || !size);
}
+
for (i = 0; i < n_elems; i++) {
void *curr_elem = first_elem + size * i;
+ const VMStateField *inner_field;
+ bool is_null;
+ int max_elems = n_elems - i;
- vmsd_desc_field_start(vmsd, vmdesc_loop, field, i, n_elems);
old_offset = qemu_file_transferred(f);
if (field->flags & VMS_ARRAY_OF_POINTER) {
assert(curr_elem);
curr_elem = *(void **)curr_elem;
}
+
if (!curr_elem && size) {
- /* if null pointer write placeholder and do not follow */
- assert(field->flags & VMS_ARRAY_OF_POINTER);
- ret = vmstate_info_nullptr.put(f, curr_elem, size, NULL,
- NULL);
- } else if (field->flags & VMS_STRUCT) {
- ret = vmstate_save_state(f, field->vmsd, curr_elem,
- vmdesc_loop);
- } else if (field->flags & VMS_VSTRUCT) {
- ret = vmstate_save_state_v(f, field->vmsd, curr_elem,
- vmdesc_loop,
- field->struct_version_id, errp);
+ /*
+ * If null pointer found (which should only happen in
+ * an array of pointers), use null placeholder and do
+ * not follow.
+ */
+ inner_field = vmsd_create_fake_nullptr_field(field);
+ is_null = true;
+ } else {
+ inner_field = field;
+ is_null = false;
+ }
+
+ /*
+ * This logic only matters when dumping VM Desc.
+ *
+ * Due to the fake nullptr handling above, if there's mixed
+ * null/non-null data, it doesn't make sense to emit a
+ * compressed array representation spanning the entire array
+ * because the field types will be different (e.g. struct
+ * vs. nullptr). Search ahead for the next null/non-null element
+ * and start a new compressed array if found.
+ */
+ if (vmdesc && (field->flags & VMS_ARRAY_OF_POINTER) &&
+ is_null != is_prev_null) {
+
+ is_prev_null = is_null;
+ vmdesc_loop = vmdesc;
+
+ for (int j = i + 1; j < n_elems; j++) {
+ void *elem = *(void **)(first_elem + size * j);
+ bool elem_is_null = !elem && size;
+
+ if (is_null != elem_is_null) {
+ max_elems = j - i;
+ break;
+ }
+ }
+ }
+
+ vmsd_desc_field_start(vmsd, vmdesc_loop, inner_field,
+ i, max_elems);
+
+ if (inner_field->flags & VMS_STRUCT) {
+ ret = vmstate_save_state(f, inner_field->vmsd,
+ curr_elem, vmdesc_loop);
+ } else if (inner_field->flags & VMS_VSTRUCT) {
+ ret = vmstate_save_state_v(f, inner_field->vmsd,
+ curr_elem, vmdesc_loop,
+ inner_field->struct_version_id,
+ errp);
} else {
- ret = field->info->put(f, curr_elem, size, field,
- vmdesc_loop);
+ ret = inner_field->info->put(f, curr_elem, size,
+ inner_field, vmdesc_loop);
+ }
+
+ written_bytes = qemu_file_transferred(f) - old_offset;
+ vmsd_desc_field_end(vmsd, vmdesc_loop, inner_field,
+ written_bytes);
+
+ /* If we used a fake temp field.. free it now */
+ if (is_null) {
+ g_clear_pointer((gpointer *)&inner_field, g_free);
}
+
if (ret) {
error_setg(errp, "Save of field %s/%s failed",
vmsd->name, field->name);
@@ -418,9 +519,6 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
return ret;
}
- written_bytes = qemu_file_transferred(f) - old_offset;
- vmsd_desc_field_end(vmsd, vmdesc_loop, field, written_bytes, i);
-
/* Compressed arrays only care about the first element */
if (vmdesc_loop && vmsd_can_compress(field)) {
vmdesc_loop = NULL;
@@ -441,12 +539,13 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
json_writer_end_array(vmdesc);
}
- ret = vmstate_subsection_save(f, vmsd, opaque, vmdesc);
+ ret = vmstate_subsection_save(f, vmsd, opaque, vmdesc, errp);
if (vmsd->post_save) {
int ps_ret = vmsd->post_save(opaque);
- if (!ret) {
+ if (!ret && ps_ret) {
ret = ps_ret;
+ error_setg(errp, "post-save failed: %s", vmsd->name);
}
}
return ret;
@@ -518,7 +617,8 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
}
static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque, JSONWriter *vmdesc)
+ void *opaque, JSONWriter *vmdesc,
+ Error **errp)
{
const VMStateDescription * const *sub = vmsd->subsections;
bool vmdesc_has_subsections = false;
@@ -546,7 +646,7 @@ static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
qemu_put_byte(f, len);
qemu_put_buffer(f, (uint8_t *)vmsdsub->name, len);
qemu_put_be32(f, vmsdsub->version_id);
- ret = vmstate_save_state(f, vmsdsub, opaque, vmdesc);
+ ret = vmstate_save_state_with_err(f, vmsdsub, opaque, vmdesc, errp);
if (ret) {
return ret;
}
diff --git a/monitor/fds.c b/monitor/fds.c
index b5416b5..cc35d2e 100644
--- a/monitor/fds.c
+++ b/monitor/fds.c
@@ -29,7 +29,7 @@
#include "qapi/qmp/qerror.h"
#include "qemu/ctype.h"
#include "qemu/cutils.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
/* file descriptors passed via SCM_RIGHTS */
typedef struct mon_fd_t mon_fd_t;
diff --git a/monitor/hmp-cmds-target.c b/monitor/hmp-cmds-target.c
index ff01cf9..8eaf70d 100644
--- a/monitor/hmp-cmds-target.c
+++ b/monitor/hmp-cmds-target.c
@@ -24,13 +24,14 @@
#include "qemu/osdep.h"
#include "disas/disas.h"
-#include "exec/address-spaces.h"
-#include "exec/memory.h"
+#include "system/address-spaces.h"
+#include "system/memory.h"
#include "monitor/hmp-target.h"
#include "monitor/monitor-internal.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
-#include "sysemu/hw_accel.h"
+#include "qobject/qdict.h"
+#include "system/hw_accel.h"
+#include "exec/target_page.h"
/* Set the current CPU defined by the user. Callers must hold BQL. */
int monitor_set_cpu(Monitor *mon, int cpu_index)
@@ -301,7 +302,6 @@ void hmp_gpa2hva(Monitor *mon, const QDict *qdict)
void hmp_gva2gpa(Monitor *mon, const QDict *qdict)
{
target_ulong addr = qdict_get_int(qdict, "addr");
- MemTxAttrs attrs;
CPUState *cs = mon_get_cpu(mon);
hwaddr gpa;
@@ -310,7 +310,7 @@ void hmp_gva2gpa(Monitor *mon, const QDict *qdict)
return;
}
- gpa = cpu_get_phys_page_attrs_debug(cs, addr & TARGET_PAGE_MASK, &attrs);
+ gpa = cpu_get_phys_page_debug(cs, addr & TARGET_PAGE_MASK);
if (gpa == -1) {
monitor_printf(mon, "Unmapped\n");
} else {
diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c
index f601d06..74a0f56 100644
--- a/monitor/hmp-cmds.c
+++ b/monitor/hmp-cmds.c
@@ -14,8 +14,8 @@
*/
#include "qemu/osdep.h"
-#include "exec/address-spaces.h"
-#include "exec/ioport.h"
+#include "system/address-spaces.h"
+#include "system/ioport.h"
#include "exec/gdbstub.h"
#include "gdbstub/enums.h"
#include "monitor/hmp.h"
@@ -25,10 +25,10 @@
#include "qapi/qapi-commands-control.h"
#include "qapi/qapi-commands-machine.h"
#include "qapi/qapi-commands-misc.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
bool hmp_handle_error(Monitor *mon, Error *err)
{
@@ -285,7 +285,7 @@ void hmp_gdbserver(Monitor *mon, const QDict *qdict)
device = "tcp::" DEFAULT_GDBSTUB_PORT;
}
- if (gdbserver_start(device) < 0) {
+ if (!gdbserver_start(device, &error_warn)) {
monitor_printf(mon, "Could not open gdbserver on device '%s'\n",
device);
} else if (strcmp(device, "none") == 0) {
@@ -431,6 +431,6 @@ void hmp_dumpdtb(Monitor *mon, const QDict *qdict)
return;
}
- monitor_printf(mon, "dtb dumped to %s", filename);
+ monitor_printf(mon, "DTB dumped to '%s'\n", filename);
}
#endif
diff --git a/monitor/hmp-target.c b/monitor/hmp-target.c
index 1eb72ac..37dfd7f 100644
--- a/monitor/hmp-target.c
+++ b/monitor/hmp-target.c
@@ -26,7 +26,7 @@
#include "monitor-internal.h"
#include "monitor/qdev.h"
#include "net/slirp.h"
-#include "sysemu/device_tree.h"
+#include "system/device_tree.h"
#include "monitor/hmp-target.h"
#include "monitor/hmp.h"
#include "block/block-hmp-cmds.h"
diff --git a/monitor/hmp.c b/monitor/hmp.c
index 460e883..34e2b8f 100644
--- a/monitor/hmp.c
+++ b/monitor/hmp.c
@@ -27,15 +27,15 @@
#include "hw/qdev-core.h"
#include "monitor-internal.h"
#include "monitor/hmp.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qnum.h"
+#include "qobject/qdict.h"
+#include "qobject/qnum.h"
#include "qemu/config-file.h"
#include "qemu/ctype.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
#include "qemu/option.h"
#include "qemu/units.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "trace.h"
static void monitor_command_cb(void *opaque, const char *cmdline,
diff --git a/monitor/monitor-internal.h b/monitor/monitor-internal.h
index cb628f6..5676eb3 100644
--- a/monitor/monitor-internal.h
+++ b/monitor/monitor-internal.h
@@ -28,10 +28,10 @@
#include "chardev/char-fe.h"
#include "monitor/monitor.h"
#include "qapi/qapi-types-control.h"
-#include "qapi/qmp/dispatch.h"
-#include "qapi/qmp/json-parser.h"
+#include "qapi/qmp-registry.h"
+#include "qobject/json-parser.h"
#include "qemu/readline.h"
-#include "sysemu/iothread.h"
+#include "system/iothread.h"
/*
* Supported types:
diff --git a/monitor/monitor.c b/monitor/monitor.c
index db52a9c..c5a5d30 100644
--- a/monitor/monitor.c
+++ b/monitor/monitor.c
@@ -28,10 +28,10 @@
#include "qapi/opts-visitor.h"
#include "qapi/qapi-emit-events.h"
#include "qapi/qapi-visit-control.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/error-report.h"
#include "qemu/option.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#include "trace.h"
/*
@@ -308,6 +308,7 @@ int error_printf_unless_qmp(const char *fmt, ...)
static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
/* Limit guest-triggerable events to 1 per second */
[QAPI_EVENT_RTC_CHANGE] = { 1000 * SCALE_MS },
+ [QAPI_EVENT_BLOCK_IO_ERROR] = { 1000 * SCALE_MS },
[QAPI_EVENT_WATCHDOG] = { 1000 * SCALE_MS },
[QAPI_EVENT_BALLOON_CHANGE] = { 1000 * SCALE_MS },
[QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS },
@@ -493,7 +494,8 @@ static unsigned int qapi_event_throttle_hash(const void *key)
hash += g_str_hash(qdict_get_str(evstate->data, "node-name"));
}
- if (evstate->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) {
+ if (evstate->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE ||
+ evstate->event == QAPI_EVENT_BLOCK_IO_ERROR) {
hash += g_str_hash(qdict_get_str(evstate->data, "qom-path"));
}
@@ -519,7 +521,8 @@ static gboolean qapi_event_throttle_equal(const void *a, const void *b)
qdict_get_str(evb->data, "node-name"));
}
- if (eva->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) {
+ if (eva->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE ||
+ eva->event == QAPI_EVENT_BLOCK_IO_ERROR) {
return !strcmp(qdict_get_str(eva->data, "qom-path"),
qdict_get_str(evb->data, "qom-path"));
}
diff --git a/monitor/qemu-config-qmp.c b/monitor/qemu-config-qmp.c
index 24477a0..9a3b183 100644
--- a/monitor/qemu-config-qmp.c
+++ b/monitor/qemu-config-qmp.c
@@ -2,7 +2,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-misc.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qlist.h"
#include "qemu/option.h"
#include "qemu/config-file.h"
#include "hw/boards.h"
diff --git a/monitor/qmp-cmds-control.c b/monitor/qmp-cmds-control.c
index f21506e..150ca9f 100644
--- a/monitor/qmp-cmds-control.c
+++ b/monitor/qmp-cmds-control.c
@@ -1,5 +1,5 @@
/*
- * QMP commands related to the monitor (common to sysemu and tools)
+ * QMP commands related to the monitor (common to system and tools)
*
* Copyright (c) 2003-2004 Fabrice Bellard
*
diff --git a/monitor/qmp-cmds.c b/monitor/qmp-cmds.c
index f84a0dc..1ca44fb 100644
--- a/monitor/qmp-cmds.c
+++ b/monitor/qmp-cmds.c
@@ -18,11 +18,11 @@
#include "monitor-internal.h"
#include "monitor/qdev.h"
#include "monitor/qmp-helpers.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/kvm.h"
-#include "sysemu/runstate.h"
-#include "sysemu/runstate-action.h"
-#include "sysemu/block-backend.h"
+#include "system/system.h"
+#include "system/kvm.h"
+#include "system/runstate.h"
+#include "system/runstate-action.h"
+#include "system/block-backend.h"
#include "qapi/error.h"
#include "qapi/qapi-init-commands.h"
#include "qapi/qapi-commands-control.h"
@@ -31,6 +31,7 @@
#include "qapi/type-helpers.h"
#include "hw/mem/memory-device.h"
#include "hw/intc/intc.h"
+#include "migration/misc.h"
NameInfo *qmp_query_name(Error **errp)
{
@@ -96,21 +97,18 @@ void qmp_cont(Error **errp)
}
}
- /* Continuing after completed migration. Images have been inactivated to
- * allow the destination to take control. Need to get control back now.
- *
- * If there are no inactive block nodes (e.g. because the VM was just
- * paused rather than completing a migration), bdrv_inactivate_all() simply
- * doesn't do anything. */
- bdrv_activate_all(&local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
-
if (runstate_check(RUN_STATE_INMIGRATE)) {
autostart = 1;
} else {
+ /*
+ * Continuing after completed migration. Images have been
+ * inactivated to allow the destination to take control. Need to
+ * get control back now.
+ */
+ if (!migration_block_activate(&local_err)) {
+ error_propagate(errp, local_err);
+ return;
+ }
vm_start();
}
}
diff --git a/monitor/qmp.c b/monitor/qmp.c
index 5e538f3..cb99a12 100644
--- a/monitor/qmp.c
+++ b/monitor/qmp.c
@@ -28,9 +28,9 @@
#include "monitor-internal.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-control.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qdict.h"
+#include "qobject/qjson.h"
+#include "qobject/qlist.h"
#include "trace.h"
/*
@@ -356,7 +356,8 @@ void qmp_dispatcher_co_wake(void)
/* Write request before reading qmp_dispatcher_co_busy. */
smp_mb__before_rmw();
- if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) {
+ if (!qatomic_xchg(&qmp_dispatcher_co_busy, true) &&
+ qatomic_read(&qmp_dispatcher_co)) {
aio_co_wake(qmp_dispatcher_co);
}
}
diff --git a/nbd/client-connection.c b/nbd/client-connection.c
index f9da67c..79ea97e 100644
--- a/nbd/client-connection.c
+++ b/nbd/client-connection.c
@@ -31,6 +31,8 @@
#include "qapi/clone-visitor.h"
#include "qemu/coroutine.h"
+#include "nbd/nbd-internal.h"
+
struct NBDClientConnection {
/* Initialization constants, never change */
SocketAddress *saddr; /* address to connect to */
@@ -140,6 +142,7 @@ static int nbd_connect(QIOChannelSocket *sioc, SocketAddress *addr,
return ret;
}
+ nbd_set_socket_send_buffer(sioc);
qio_channel_set_delay(QIO_CHANNEL(sioc), false);
if (!info) {
@@ -410,7 +413,7 @@ nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info,
*/
void nbd_co_establish_connection_cancel(NBDClientConnection *conn)
{
- Coroutine *wait_co;
+ Coroutine *wait_co = NULL;
WITH_QEMU_LOCK_GUARD(&conn->mutex) {
wait_co = g_steal_pointer(&conn->wait_co);
diff --git a/nbd/common.c b/nbd/common.c
index 589a748..2a133a6 100644
--- a/nbd/common.c
+++ b/nbd/common.c
@@ -18,6 +18,9 @@
#include "qemu/osdep.h"
#include "trace.h"
+#include "io/channel-socket.h"
+#include "qapi/error.h"
+#include "qemu/units.h"
#include "nbd-internal.h"
/* Discard length bytes from channel. Return -errno on failure and 0 on
@@ -264,3 +267,26 @@ const char *nbd_mode_lookup(NBDMode mode)
return "<unknown>";
}
}
+
+/*
+ * Testing shows that 2m send buffer is optimal. Changing the receive buffer
+ * size has no effect on performance.
+ * On Linux we need to increase net.core.wmem_max to make this effective.
+ */
+#if defined(__APPLE__) || defined(__linux__)
+#define UNIX_STREAM_SOCKET_SEND_BUFFER_SIZE (2 * MiB)
+#endif
+
+void nbd_set_socket_send_buffer(QIOChannelSocket *sioc)
+{
+#ifdef UNIX_STREAM_SOCKET_SEND_BUFFER_SIZE
+ if (sioc->localAddr.ss_family == AF_UNIX) {
+ size_t size = UNIX_STREAM_SOCKET_SEND_BUFFER_SIZE;
+ Error *errp = NULL;
+
+ if (qio_channel_socket_set_send_buffer(sioc, size, &errp) < 0) {
+ warn_report_err(errp);
+ }
+ }
+#endif /* UNIX_STREAM_SOCKET_SEND_BUFFER_SIZE */
+}
diff --git a/nbd/nbd-internal.h b/nbd/nbd-internal.h
index 9189510..6bafeef 100644
--- a/nbd/nbd-internal.h
+++ b/nbd/nbd-internal.h
@@ -10,7 +10,7 @@
#ifndef NBD_INTERNAL_H
#define NBD_INTERNAL_H
#include "block/nbd.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "io/channel-tls.h"
#include "qemu/iov.h"
@@ -74,4 +74,9 @@ static inline int nbd_write(QIOChannel *ioc, const void *buffer, size_t size,
int nbd_drop(QIOChannel *ioc, size_t size, Error **errp);
+/* nbd_set_socket_send_buffer
+ * Set the socket send buffer size for optimal performance.
+ */
+void nbd_set_socket_send_buffer(QIOChannelSocket *sioc);
+
#endif
diff --git a/nbd/server.c b/nbd/server.c
index 892797b..d242be9 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -124,12 +124,14 @@ struct NBDMetaContexts {
struct NBDClient {
int refcount; /* atomic */
void (*close_fn)(NBDClient *client, bool negotiated);
+ void *owner;
QemuMutex lock;
NBDExport *exp;
QCryptoTLSCreds *tlscreds;
char *tlsauthz;
+ uint32_t handshake_max_secs;
QIOChannelSocket *sioc; /* The underlying data channel */
QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */
@@ -1148,8 +1150,8 @@ nbd_negotiate_meta_queries(NBDClient *client, Error **errp)
* Return:
* -errno on error, errp is set
* 0 on successful negotiation, errp is not set
- * 1 if client sent NBD_OPT_ABORT, i.e. on valid disconnect,
- * errp is not set
+ * 1 if client sent NBD_OPT_ABORT (i.e. on valid disconnect) or never
+ * wrote anything (i.e. port probe); errp is not set
*/
static coroutine_fn int
nbd_negotiate_options(NBDClient *client, Error **errp)
@@ -1173,8 +1175,13 @@ nbd_negotiate_options(NBDClient *client, Error **errp)
... Rest of request
*/
- if (nbd_read32(client->ioc, &flags, "flags", errp) < 0) {
- return -EIO;
+ /*
+ * Intentionally ignore errors on this first read - we do not want
+ * to be noisy about a mere port probe, but only for clients that
+ * start talking the protocol and then quit abruptly.
+ */
+ if (nbd_read32(client->ioc, &flags, "flags", NULL) < 0) {
+ return 1;
}
client->mode = NBD_MODE_EXPORT_NAME;
trace_nbd_negotiate_options_flags(flags);
@@ -1381,8 +1388,8 @@ nbd_negotiate_options(NBDClient *client, Error **errp)
* Return:
* -errno on error, errp is set
* 0 on successful negotiation, errp is not set
- * 1 if client sent NBD_OPT_ABORT, i.e. on valid disconnect,
- * errp is not set
+ * 1 if client sent NBD_OPT_ABORT (i.e. on valid disconnect) or never
+ * wrote anything (i.e. port probe); errp is not set
*/
static coroutine_fn int nbd_negotiate(NBDClient *client, Error **errp)
{
@@ -1413,9 +1420,12 @@ static coroutine_fn int nbd_negotiate(NBDClient *client, Error **errp)
stq_be_p(buf + 8, NBD_OPTS_MAGIC);
stw_be_p(buf + 16, NBD_FLAG_FIXED_NEWSTYLE | NBD_FLAG_NO_ZEROES);
- if (nbd_write(client->ioc, buf, 18, errp) < 0) {
- error_prepend(errp, "write failed: ");
- return -EINVAL;
+ /*
+ * Be silent about failure to write our greeting: there is nothing
+ * wrong with a client testing if our port is alive.
+ */
+ if (nbd_write(client->ioc, buf, 18, NULL) < 0) {
+ return 1;
}
ret = nbd_negotiate_options(client, errp);
if (ret != 0) {
@@ -1972,7 +1982,7 @@ static void nbd_export_request_shutdown(BlockExport *blk_exp)
blk_exp_ref(&exp->common);
/*
- * TODO: Should we expand QMP NbdServerRemoveNode enum to allow a
+ * TODO: Should we expand QMP BlockExportRemoveMode enum to allow a
* close mode that stops advertising the export to new clients but
* still permits existing clients to run to completion? Because of
* that possibility, nbd_export_close() can be called more than
@@ -2016,6 +2026,7 @@ static void nbd_export_delete(BlockExport *blk_exp)
const BlockExportDriver blk_exp_nbd = {
.type = BLOCK_EXPORT_TYPE_NBD,
.instance_size = sizeof(NBDExport),
+ .supports_inactive = true,
.create = nbd_export_create,
.delete = nbd_export_delete,
.request_shutdown = nbd_export_request_shutdown,
@@ -2910,6 +2921,22 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
NBDExport *exp = client->exp;
char *msg;
size_t i;
+ bool inactive;
+
+ WITH_GRAPH_RDLOCK_GUARD() {
+ inactive = bdrv_is_inactive(blk_bs(exp->common.blk));
+ if (inactive) {
+ switch (request->type) {
+ case NBD_CMD_READ:
+ /* These commands are allowed on inactive nodes */
+ break;
+ default:
+ /* Return an error for the rest */
+ return nbd_send_generic_reply(client, request, -EPERM,
+ "export is inactive", errp);
+ }
+ }
+ }
switch (request->type) {
case NBD_CMD_CACHE:
@@ -3184,35 +3211,65 @@ static void nbd_client_receive_next_request(NBDClient *client)
}
}
+static void nbd_handshake_timer_cb(void *opaque)
+{
+ QIOChannel *ioc = opaque;
+
+ trace_nbd_handshake_timer_cb();
+ qio_channel_shutdown(ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
+}
+
static coroutine_fn void nbd_co_client_start(void *opaque)
{
NBDClient *client = opaque;
Error *local_err = NULL;
+ QEMUTimer *handshake_timer = NULL;
qemu_co_mutex_init(&client->send_lock);
+ /*
+ * Create a timer to bound the time spent in negotiation. If the
+ * timer expires, it is likely nbd_negotiate will fail because the
+ * socket was shutdown.
+ */
+ if (client->handshake_max_secs > 0) {
+ handshake_timer = aio_timer_new(qemu_get_aio_context(),
+ QEMU_CLOCK_REALTIME,
+ SCALE_NS,
+ nbd_handshake_timer_cb,
+ client->sioc);
+ timer_mod(handshake_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
+ client->handshake_max_secs * NANOSECONDS_PER_SECOND);
+ }
+
if (nbd_negotiate(client, &local_err)) {
if (local_err) {
error_report_err(local_err);
}
+ timer_free(handshake_timer);
client_close(client, false);
return;
}
+ timer_free(handshake_timer);
WITH_QEMU_LOCK_GUARD(&client->lock) {
nbd_client_receive_next_request(client);
}
}
/*
- * Create a new client listener using the given channel @sioc.
+ * Create a new client listener using the given channel @sioc and @owner.
* Begin servicing it in a coroutine. When the connection closes, call
- * @close_fn with an indication of whether the client completed negotiation.
+ * @close_fn with an indication of whether the client completed negotiation
+ * within @handshake_max_secs seconds (0 for unbounded).
*/
void nbd_client_new(QIOChannelSocket *sioc,
+ uint32_t handshake_max_secs,
QCryptoTLSCreds *tlscreds,
const char *tlsauthz,
- void (*close_fn)(NBDClient *, bool))
+ void (*close_fn)(NBDClient *, bool),
+ void *owner)
{
NBDClient *client;
Coroutine *co;
@@ -3225,13 +3282,23 @@ void nbd_client_new(QIOChannelSocket *sioc,
object_ref(OBJECT(client->tlscreds));
}
client->tlsauthz = g_strdup(tlsauthz);
+ client->handshake_max_secs = handshake_max_secs;
client->sioc = sioc;
qio_channel_set_delay(QIO_CHANNEL(sioc), false);
object_ref(OBJECT(client->sioc));
client->ioc = QIO_CHANNEL(sioc);
object_ref(OBJECT(client->ioc));
client->close_fn = close_fn;
+ client->owner = owner;
+
+ nbd_set_socket_send_buffer(sioc);
co = qemu_coroutine_create(nbd_co_client_start, client);
qemu_coroutine_enter(co);
}
+
+void *
+nbd_client_owner(NBDClient *client)
+{
+ return client->owner;
+}
diff --git a/nbd/trace-events b/nbd/trace-events
index 00ae321..cbd0a4a 100644
--- a/nbd/trace-events
+++ b/nbd/trace-events
@@ -76,6 +76,7 @@ nbd_co_receive_request_payload_received(uint64_t cookie, uint64_t len) "Payload
nbd_co_receive_ext_payload_compliance(uint64_t from, uint64_t len) "client sent non-compliant write without payload flag: from=0x%" PRIx64 ", len=0x%" PRIx64
nbd_co_receive_align_compliance(const char *op, uint64_t from, uint64_t len, uint32_t align) "client sent non-compliant unaligned %s request: from=0x%" PRIx64 ", len=0x%" PRIx64 ", align=0x%" PRIx32
nbd_trip(void) "Reading request"
+nbd_handshake_timer_cb(void) "client took too long to negotiate"
# client-connection.c
nbd_connect_thread_sleep(uint64_t timeout) "timeout %" PRIu64
diff --git a/net/can/can_core.c b/net/can/can_core.c
index 0115d78..77fe2b8 100644
--- a/net/can/can_core.c
+++ b/net/can/can_core.c
@@ -149,7 +149,7 @@ static bool can_bus_can_be_deleted(UserCreatable *uc)
}
static void can_bus_class_init(ObjectClass *klass,
- void *class_data G_GNUC_UNUSED)
+ const void *class_data G_GNUC_UNUSED)
{
UserCreatableClass *uc_klass = USER_CREATABLE_CLASS(klass);
@@ -162,7 +162,7 @@ static const TypeInfo can_bus_info = {
.instance_size = sizeof(CanBusState),
.instance_init = can_bus_instance_init,
.class_init = can_bus_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/net/can/can_host.c b/net/can/can_host.c
index b2fe553..3f9bb33 100644
--- a/net/can/can_host.c
+++ b/net/can/can_host.c
@@ -72,7 +72,7 @@ static void can_host_complete(UserCreatable *uc, Error **errp)
}
static void can_host_class_init(ObjectClass *klass,
- void *class_data G_GNUC_UNUSED)
+ const void *class_data G_GNUC_UNUSED)
{
UserCreatableClass *uc_klass = USER_CREATABLE_CLASS(klass);
@@ -92,7 +92,7 @@ static const TypeInfo can_host_info = {
.class_size = sizeof(CanHostClass),
.abstract = true,
.class_init = can_host_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/net/can/can_socketcan.c b/net/can/can_socketcan.c
index c1a1ad0..8a57ae0 100644
--- a/net/can/can_socketcan.c
+++ b/net/can/can_socketcan.c
@@ -308,7 +308,7 @@ static void can_host_socketcan_instance_init(Object *obj)
}
static void can_host_socketcan_class_init(ObjectClass *klass,
- void *class_data G_GNUC_UNUSED)
+ const void *class_data G_GNUC_UNUSED)
{
CanHostClass *chc = CAN_HOST_CLASS(klass);
diff --git a/net/checksum.c b/net/checksum.c
index 1a957e4..537457d 100644
--- a/net/checksum.c
+++ b/net/checksum.c
@@ -57,7 +57,7 @@ uint16_t net_checksum_tcpudp(uint16_t length, uint16_t proto,
return net_checksum_finish(sum);
}
-void net_checksum_calculate(uint8_t *data, int length, int csum_flag)
+void net_checksum_calculate(void *data, int length, int csum_flag)
{
int mac_hdr_len, ip_len;
struct ip_header *ip;
@@ -101,7 +101,7 @@ void net_checksum_calculate(uint8_t *data, int length, int csum_flag)
return;
}
- ip = (struct ip_header *)(data + mac_hdr_len);
+ ip = (struct ip_header *)((uint8_t *)data + mac_hdr_len);
if (IP_HEADER_VERSION(ip) != IP_HEADER_VERSION_4) {
return; /* not IPv4 */
diff --git a/net/colo-compare.c b/net/colo-compare.c
index c4ad0ab..0e1844e 100644
--- a/net/colo-compare.c
+++ b/net/colo-compare.c
@@ -25,7 +25,7 @@
#include "chardev/char-fe.h"
#include "qemu/sockets.h"
#include "colo.h"
-#include "sysemu/iothread.h"
+#include "system/iothread.h"
#include "net/colo-compare.h"
#include "migration/colo.h"
#include "util.h"
@@ -412,8 +412,7 @@ static void colo_compare_tcp(CompareState *s, Connection *conn)
* can ensure that the packet's payload is acknowledged by
* primary and secondary.
*/
- uint32_t min_ack = conn->pack - conn->sack > 0 ?
- conn->sack : conn->pack;
+ uint32_t min_ack = MIN(conn->pack, conn->sack);
pri:
if (g_queue_is_empty(&conn->primary_list)) {
@@ -1329,8 +1328,6 @@ static void colo_compare_complete(UserCreatable *uc, Error **errp)
}
QTAILQ_INSERT_TAIL(&net_compares, s, next);
qemu_mutex_unlock(&colo_compare_mutex);
-
- return;
}
static void colo_flush_packets(void *opaque, void *user_data)
@@ -1355,7 +1352,7 @@ static void colo_flush_packets(void *opaque, void *user_data)
}
}
-static void colo_compare_class_init(ObjectClass *oc, void *data)
+static void colo_compare_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
@@ -1479,7 +1476,7 @@ static const TypeInfo colo_compare_info = {
.instance_finalize = colo_compare_finalize,
.class_size = sizeof(CompareClass),
.class_init = colo_compare_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/net/dump.c b/net/dump.c
index 956e34a..581234b 100644
--- a/net/dump.c
+++ b/net/dump.c
@@ -32,7 +32,7 @@
#include "qapi/visitor.h"
#include "net/filter.h"
#include "qom/object.h"
-#include "sysemu/rtc.h"
+#include "system/rtc.h"
typedef struct DumpState {
int64_t start_ts;
@@ -155,7 +155,8 @@ static ssize_t filter_dump_receive_iov(NetFilterState *nf, NetClientState *sndr,
{
NetFilterDumpState *nfds = FILTER_DUMP(nf);
- dump_receive_iov(&nfds->ds, iov, iovcnt, qemu_get_vnet_hdr_len(nf->netdev));
+ dump_receive_iov(&nfds->ds, iov, iovcnt, flags & QEMU_NET_PACKET_FLAG_RAW ?
+ 0 : qemu_get_vnet_hdr_len(nf->netdev));
return 0;
}
@@ -233,7 +234,7 @@ static void filter_dump_instance_finalize(Object *obj)
g_free(nfds->filename);
}
-static void filter_dump_class_init(ObjectClass *oc, void *data)
+static void filter_dump_class_init(ObjectClass *oc, const void *data)
{
NetFilterClass *nfc = NETFILTER_CLASS(oc);
diff --git a/net/filter-buffer.c b/net/filter-buffer.c
index 283dc9c..a36be31 100644
--- a/net/filter-buffer.c
+++ b/net/filter-buffer.c
@@ -172,7 +172,7 @@ static void filter_buffer_set_interval(Object *obj, Visitor *v,
s->interval = value;
}
-static void filter_buffer_class_init(ObjectClass *oc, void *data)
+static void filter_buffer_class_init(ObjectClass *oc, const void *data)
{
NetFilterClass *nfc = NETFILTER_CLASS(oc);
diff --git a/net/filter-mirror.c b/net/filter-mirror.c
index 34a63b5..27734c9 100644
--- a/net/filter-mirror.c
+++ b/net/filter-mirror.c
@@ -410,7 +410,7 @@ static void filter_redirector_set_vnet_hdr(Object *obj,
s->vnet_hdr = value;
}
-static void filter_mirror_class_init(ObjectClass *oc, void *data)
+static void filter_mirror_class_init(ObjectClass *oc, const void *data)
{
NetFilterClass *nfc = NETFILTER_CLASS(oc);
@@ -425,7 +425,7 @@ static void filter_mirror_class_init(ObjectClass *oc, void *data)
nfc->receive_iov = filter_mirror_receive_iov;
}
-static void filter_redirector_class_init(ObjectClass *oc, void *data)
+static void filter_redirector_class_init(ObjectClass *oc, const void *data)
{
NetFilterClass *nfc = NETFILTER_CLASS(oc);
diff --git a/net/filter-replay.c b/net/filter-replay.c
index 5469067..451663c 100644
--- a/net/filter-replay.c
+++ b/net/filter-replay.c
@@ -17,7 +17,7 @@
#include "qemu/timer.h"
#include "qapi/visitor.h"
#include "net/filter.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "qom/object.h"
#define TYPE_FILTER_REPLAY "filter-replay"
@@ -65,7 +65,7 @@ static void filter_replay_instance_finalize(Object *obj)
replay_unregister_net(nfrs->rns);
}
-static void filter_replay_class_init(ObjectClass *oc, void *data)
+static void filter_replay_class_init(ObjectClass *oc, const void *data)
{
NetFilterClass *nfc = NETFILTER_CLASS(oc);
diff --git a/net/filter-rewriter.c b/net/filter-rewriter.c
index c18c4c2..cdf85aa 100644
--- a/net/filter-rewriter.c
+++ b/net/filter-rewriter.c
@@ -411,7 +411,7 @@ static void filter_rewriter_init(Object *obj)
s->failover_mode = FAILOVER_MODE_OFF;
}
-static void colo_rewriter_class_init(ObjectClass *oc, void *data)
+static void colo_rewriter_class_init(ObjectClass *oc, const void *data)
{
NetFilterClass *nfc = NETFILTER_CLASS(oc);
diff --git a/net/filter.c b/net/filter.c
index 3335908..c7cc661 100644
--- a/net/filter.c
+++ b/net/filter.c
@@ -333,7 +333,7 @@ static void default_handle_event(NetFilterState *nf, int event, Error **errp)
}
}
-static void netfilter_class_init(ObjectClass *oc, void *data)
+static void netfilter_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
NetFilterClass *nfc = NETFILTER_CLASS(oc);
@@ -363,7 +363,7 @@ static const TypeInfo netfilter_info = {
.instance_size = sizeof(NetFilterState),
.instance_init = netfilter_init,
.instance_finalize = netfilter_finalize,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/net/hub.c b/net/hub.c
index 4c8a469..cba20eb 100644
--- a/net/hub.c
+++ b/net/hub.c
@@ -20,7 +20,7 @@
#include "hub.h"
#include "qemu/iov.h"
#include "qemu/error-report.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
/*
* A hub broadcasts incoming packets to all its ports except the source port.
@@ -194,31 +194,6 @@ NetClientState *net_hub_add_port(int hub_id, const char *name,
}
/**
- * Find a available port on a hub; otherwise create one new port
- */
-NetClientState *net_hub_port_find(int hub_id)
-{
- NetHub *hub;
- NetHubPort *port;
- NetClientState *nc;
-
- QLIST_FOREACH(hub, &hubs, next) {
- if (hub->id == hub_id) {
- QLIST_FOREACH(port, &hub->ports, next) {
- nc = port->nc.peer;
- if (!nc) {
- return &(port->nc);
- }
- }
- break;
- }
- }
-
- nc = net_hub_add_port(hub_id, NULL, NULL);
- return nc;
-}
-
-/**
* Print hub configuration
*/
void net_hub_info(Monitor *mon)
diff --git a/net/meson.build b/net/meson.build
index e0cd714..bb97b4d 100644
--- a/net/meson.build
+++ b/net/meson.build
@@ -39,7 +39,7 @@ if have_netmap
system_ss.add(files('netmap.c'))
endif
-system_ss.add(when: libxdp, if_true: files('af-xdp.c'))
+system_ss.add(when: [libxdp, libbpf], if_true: files('af-xdp.c'))
if have_vhost_net_user
system_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost-user.c'), if_false: files('vhost-user-stub.c'))
diff --git a/net/net-hmp-cmds.c b/net/net-hmp-cmds.c
index 41d326b..e7c55d2 100644
--- a/net/net-hmp-cmds.c
+++ b/net/net-hmp-cmds.c
@@ -22,7 +22,7 @@
#include "qapi/clone-visitor.h"
#include "qapi/qapi-commands-net.h"
#include "qapi/qapi-visit-net.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/config-file.h"
#include "qemu/help_option.h"
#include "qemu/option.h"
diff --git a/net/net.c b/net/net.c
index 6938da0..39d6f28 100644
--- a/net/net.c
+++ b/net/net.c
@@ -36,7 +36,7 @@
#include "qemu/help_option.h"
#include "qapi/qapi-commands-net.h"
#include "qapi/qapi-visit-net.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/qmp/qerror.h"
#include "qemu/error-report.h"
#include "qemu/sockets.h"
@@ -51,7 +51,7 @@
#include "qemu/keyval.h"
#include "qapi/error.h"
#include "qapi/opts-visitor.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "net/colo-compare.h"
#include "net/filter.h"
#include "qapi/string-output-visitor.h"
@@ -381,9 +381,12 @@ NetClientState *qemu_get_peer(NetClientState *nc, int queue_index)
return ncs->peer;
}
-static void qemu_cleanup_net_client(NetClientState *nc)
+static void qemu_cleanup_net_client(NetClientState *nc,
+ bool remove_from_net_clients)
{
- QTAILQ_REMOVE(&net_clients, nc, next);
+ if (remove_from_net_clients) {
+ QTAILQ_REMOVE(&net_clients, nc, next);
+ }
if (nc->info->cleanup) {
nc->info->cleanup(nc);
@@ -425,7 +428,13 @@ void qemu_del_net_client(NetClientState *nc)
object_unparent(OBJECT(nf));
}
- /* If there is a peer NIC, delete and cleanup client, but do not free. */
+ /*
+ * If there is a peer NIC, transfer ownership to it. Delete the client
+ * from net_client list but do not cleanup nor free. This way NIC can
+ * still access to members of the backend.
+ *
+ * The cleanup and free will be done when the NIC is free.
+ */
if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
NICState *nic = qemu_get_nic(nc->peer);
if (nic->peer_deleted) {
@@ -435,21 +444,18 @@ void qemu_del_net_client(NetClientState *nc)
for (i = 0; i < queues; i++) {
ncs[i]->peer->link_down = true;
+ QTAILQ_REMOVE(&net_clients, ncs[i], next);
}
if (nc->peer->info->link_status_changed) {
nc->peer->info->link_status_changed(nc->peer);
}
- for (i = 0; i < queues; i++) {
- qemu_cleanup_net_client(ncs[i]);
- }
-
return;
}
for (i = 0; i < queues; i++) {
- qemu_cleanup_net_client(ncs[i]);
+ qemu_cleanup_net_client(ncs[i], true);
qemu_free_net_client(ncs[i]);
}
}
@@ -462,8 +468,12 @@ void qemu_del_nic(NICState *nic)
for (i = 0; i < queues; i++) {
NetClientState *nc = qemu_get_subqueue(nic, i);
- /* If this is a peer NIC and peer has already been deleted, free it now. */
+ /*
+ * If this is a peer NIC and peer has already been deleted, clean it up
+ * and free it now.
+ */
if (nic->peer_deleted) {
+ qemu_cleanup_net_client(nc->peer, false);
qemu_free_net_client(nc->peer);
} else if (nc->peer) {
/* if there are RX packets pending, complete them */
@@ -474,7 +484,7 @@ void qemu_del_nic(NICState *nic)
for (i = queues - 1; i >= 0; i--) {
NetClientState *nc = qemu_get_subqueue(nic, i);
- qemu_cleanup_net_client(nc);
+ qemu_cleanup_net_client(nc, true);
qemu_free_net_client(nc);
}
@@ -542,6 +552,10 @@ void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
int qemu_get_vnet_hdr_len(NetClientState *nc)
{
+ if (!nc) {
+ return 0;
+ }
+
return nc->vnet_hdr_len;
}
@@ -750,16 +764,6 @@ ssize_t qemu_receive_packet(NetClientState *nc, const uint8_t *buf, int size)
return qemu_net_queue_receive(nc->incoming_queue, buf, size);
}
-ssize_t qemu_receive_packet_iov(NetClientState *nc, const struct iovec *iov,
- int iovcnt)
-{
- if (!qemu_can_receive_packet(nc)) {
- return 0;
- }
-
- return qemu_net_queue_receive_iov(nc->incoming_queue, iov, iovcnt);
-}
-
ssize_t qemu_send_packet_raw(NetClientState *nc, const uint8_t *buf, int size)
{
return qemu_send_packet_async_with_flags(nc, QEMU_NET_PACKET_FLAG_RAW,
@@ -828,6 +832,7 @@ static ssize_t qemu_deliver_packet_iov(NetClientState *sender,
iov_copy[0].iov_len = nc->vnet_hdr_len;
memcpy(&iov_copy[1], iov, iovcnt * sizeof(*iov));
iov = iov_copy;
+ iovcnt++;
}
if (nc->info->receive_iov) {
@@ -1139,6 +1144,21 @@ NICInfo *qemu_find_nic_info(const char *typename, bool match_default,
return NULL;
}
+static bool is_nic_model_help_option(const char *model)
+{
+ if (model && is_help_option(model)) {
+ /*
+ * Trigger the help output by instantiating the hash table which
+ * will gather tha available models as they get registered.
+ */
+ if (!nic_model_help) {
+ nic_model_help = g_hash_table_new_full(g_str_hash, g_str_equal,
+ g_free, NULL);
+ }
+ return true;
+ }
+ return false;
+}
/* "I have created a device. Please configure it if you can" */
bool qemu_configure_nic_device(DeviceState *dev, bool match_default,
@@ -1668,6 +1688,9 @@ void net_cleanup(void)
* of the latest NET_CLIENT_DRIVER_NIC, and operate on *p as we walk
* the list.
*
+ * However, the NIC may have peers that trust to be clean beyond this
+ * point. For example, if they have been removed with device_del.
+ *
* The 'nc' variable isn't part of the list traversal; it's purely
* for convenience as too much '(*p)->' has a tendency to make the
* readers' eyes bleed.
@@ -1675,6 +1698,17 @@ void net_cleanup(void)
while (*p) {
nc = *p;
if (nc->info->type == NET_CLIENT_DRIVER_NIC) {
+ NICState *nic = qemu_get_nic(nc);
+
+ if (nic->peer_deleted) {
+ int queues = MAX(nic->conf->peers.queues, 1);
+
+ for (int i = 0; i < queues; i++) {
+ nc = qemu_get_subqueue(nic, i);
+ qemu_cleanup_net_client(nc->peer, false);
+ }
+ }
+
/* Skip NET_CLIENT_DRIVER_NIC entries */
p = &QTAILQ_NEXT(nc, next);
} else {
@@ -1722,6 +1756,12 @@ void net_check_clients(void)
static int net_init_client(void *dummy, QemuOpts *opts, Error **errp)
{
+ const char *model = qemu_opt_get(opts, "model");
+
+ if (is_nic_model_help_option(model)) {
+ return 0;
+ }
+
return net_client_init(opts, false, errp);
}
@@ -1778,9 +1818,7 @@ static int net_param_nic(void *dummy, QemuOpts *opts, Error **errp)
memset(ni, 0, sizeof(*ni));
ni->model = qemu_opt_get_del(opts, "model");
- if (!nic_model_help && !g_strcmp0(ni->model, "help")) {
- nic_model_help = g_hash_table_new_full(g_str_hash, g_str_equal,
- g_free, NULL);
+ if (is_nic_model_help_option(ni->model)) {
return 0;
}
diff --git a/net/queue.c b/net/queue.c
index c872d51..fb33856 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -193,17 +193,6 @@ ssize_t qemu_net_queue_receive(NetQueue *queue,
return qemu_net_queue_deliver(queue, NULL, 0, data, size);
}
-ssize_t qemu_net_queue_receive_iov(NetQueue *queue,
- const struct iovec *iov,
- int iovcnt)
-{
- if (queue->delivering) {
- return 0;
- }
-
- return qemu_net_queue_deliver_iov(queue, NULL, 0, iov, iovcnt);
-}
-
ssize_t qemu_net_queue_send(NetQueue *queue,
NetClientState *sender,
unsigned flags,
diff --git a/net/slirp.c b/net/slirp.c
index eb9a456..9657e86 100644
--- a/net/slirp.c
+++ b/net/slirp.c
@@ -40,10 +40,10 @@
#include "qemu/sockets.h"
#include <libslirp.h>
#include "chardev/char-fe.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "util.h"
#include "migration/register.h"
#include "migration/vmstate.h"
@@ -247,7 +247,14 @@ static void net_slirp_timer_mod(void *timer, int64_t expire_timer,
timer_mod(&t->timer, expire_timer);
}
-static void net_slirp_register_poll_fd(int fd, void *opaque)
+#if !SLIRP_CHECK_VERSION(4, 9, 0)
+# define slirp_os_socket int
+# define slirp_pollfds_fill_socket slirp_pollfds_fill
+# define register_poll_socket register_poll_fd
+# define unregister_poll_socket unregister_poll_fd
+#endif
+
+static void net_slirp_register_poll_sock(slirp_os_socket fd, void *opaque)
{
#ifdef WIN32
AioContext *ctxt = qemu_get_aio_context();
@@ -260,7 +267,7 @@ static void net_slirp_register_poll_fd(int fd, void *opaque)
#endif
}
-static void net_slirp_unregister_poll_fd(int fd, void *opaque)
+static void net_slirp_unregister_poll_sock(slirp_os_socket fd, void *opaque)
{
#ifdef WIN32
if (WSAEventSelect(fd, NULL, 0) != 0) {
@@ -286,8 +293,8 @@ static const SlirpCb slirp_cb = {
#endif
.timer_free = net_slirp_timer_free,
.timer_mod = net_slirp_timer_mod,
- .register_poll_fd = net_slirp_register_poll_fd,
- .unregister_poll_fd = net_slirp_unregister_poll_fd,
+ .register_poll_socket = net_slirp_register_poll_sock,
+ .unregister_poll_socket = net_slirp_unregister_poll_sock,
.notify = net_slirp_notify,
};
@@ -314,7 +321,7 @@ static int slirp_poll_to_gio(int events)
return ret;
}
-static int net_slirp_add_poll(int fd, int events, void *opaque)
+static int net_slirp_add_poll(slirp_os_socket fd, int events, void *opaque)
{
GArray *pollfds = opaque;
GPollFD pfd = {
@@ -363,8 +370,8 @@ static void net_slirp_poll_notify(Notifier *notifier, void *data)
switch (poll->state) {
case MAIN_LOOP_POLL_FILL:
- slirp_pollfds_fill(s->slirp, &poll->timeout,
- net_slirp_add_poll, poll->pollfds);
+ slirp_pollfds_fill_socket(s->slirp, &poll->timeout,
+ net_slirp_add_poll, poll->pollfds);
break;
case MAIN_LOOP_POLL_OK:
case MAIN_LOOP_POLL_ERR:
@@ -629,7 +636,9 @@ static int net_slirp_init(NetClientState *peer, const char *model,
s = DO_UPCAST(SlirpState, nc, nc);
- cfg.version = SLIRP_CHECK_VERSION(4,7,0) ? 4 : 1;
+ cfg.version =
+ SLIRP_CHECK_VERSION(4, 9, 0) ? 6 :
+ SLIRP_CHECK_VERSION(4, 7, 0) ? 4 : 1;
cfg.restricted = restricted;
cfg.in_enabled = ipv4;
cfg.vnetwork = net;
diff --git a/net/socket.c b/net/socket.c
index 8e3702e..784dda6 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -157,7 +157,7 @@ static void net_socket_send(void *opaque)
NetSocketState *s = opaque;
int size;
int ret;
- uint8_t buf1[NET_BUFSIZE];
+ QEMU_UNINITIALIZED uint8_t buf1[NET_BUFSIZE];
const uint8_t *buf;
size = recv(s->fd, buf1, sizeof(buf1), 0);
diff --git a/net/stream.c b/net/stream.c
index 97e6ec6..6152d2a 100644
--- a/net/stream.c
+++ b/net/stream.c
@@ -51,7 +51,7 @@ typedef struct NetStreamState {
guint ioc_write_tag;
SocketReadState rs;
unsigned int send_index; /* number of bytes sent*/
- uint32_t reconnect;
+ uint32_t reconnect_ms;
guint timer_tag;
SocketAddress *addr;
} NetStreamState;
@@ -148,7 +148,7 @@ static gboolean net_stream_send(QIOChannel *ioc,
NetStreamState *s = data;
int size;
int ret;
- char buf1[NET_BUFSIZE];
+ QEMU_UNINITIALIZED char buf1[NET_BUFSIZE];
const char *buf;
size = qio_channel_read(s->ioc, buf1, sizeof(buf1), NULL);
@@ -387,10 +387,9 @@ static gboolean net_stream_reconnect(gpointer data)
static void net_stream_arm_reconnect(NetStreamState *s)
{
- if (s->reconnect && s->timer_tag == 0) {
+ if (s->reconnect_ms && s->timer_tag == 0) {
qemu_set_info_str(&s->nc, "connecting");
- s->timer_tag = g_timeout_add_seconds(s->reconnect,
- net_stream_reconnect, s);
+ s->timer_tag = g_timeout_add(s->reconnect_ms, net_stream_reconnect, s);
}
}
@@ -398,7 +397,7 @@ static int net_stream_client_init(NetClientState *peer,
const char *model,
const char *name,
SocketAddress *addr,
- uint32_t reconnect,
+ uint32_t reconnect_ms,
Error **errp)
{
NetStreamState *s;
@@ -412,8 +411,8 @@ static int net_stream_client_init(NetClientState *peer,
s->ioc = QIO_CHANNEL(sioc);
s->nc.link_down = true;
- s->reconnect = reconnect;
- if (reconnect) {
+ s->reconnect_ms = reconnect_ms;
+ if (reconnect_ms) {
s->addr = QAPI_CLONE(SocketAddress, addr);
}
qio_channel_socket_connect_async(sioc, addr,
@@ -432,13 +431,24 @@ int net_init_stream(const Netdev *netdev, const char *name,
sock = &netdev->u.stream;
if (!sock->has_server || !sock->server) {
+ uint32_t reconnect_ms = 0;
+
+ if (sock->has_reconnect && sock->has_reconnect_ms) {
+ error_setg(errp, "'reconnect' and 'reconnect-ms' are mutually "
+ "exclusive");
+ return -1;
+ } else if (sock->has_reconnect_ms) {
+ reconnect_ms = sock->reconnect_ms;
+ } else if (sock->has_reconnect) {
+ reconnect_ms = sock->reconnect * 1000u;
+ }
+
return net_stream_client_init(peer, "stream", name, sock->addr,
- sock->has_reconnect ? sock->reconnect : 0,
- errp);
+ reconnect_ms, errp);
}
- if (sock->has_reconnect) {
- error_setg(errp, "'reconnect' option is incompatible with "
- "socket in server mode");
+ if (sock->has_reconnect || sock->has_reconnect_ms) {
+ error_setg(errp, "'reconnect' and 'reconnect-ms' options are "
+ "incompatible with socket in server mode");
return -1;
}
return net_stream_server_init(peer, "stream", name, sock->addr, errp);
diff --git a/net/tap-linux.c b/net/tap-linux.c
index 1226d5f..22ec2f4 100644
--- a/net/tap-linux.c
+++ b/net/tap-linux.c
@@ -45,10 +45,21 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr,
int len = sizeof(struct virtio_net_hdr);
unsigned int features;
- fd = RETRY_ON_EINTR(open(PATH_NET_TUN, O_RDWR));
+
+ ret = if_nametoindex(ifname);
+ if (ret) {
+ g_autofree char *file = g_strdup_printf("/dev/tap%d", ret);
+ fd = open(file, O_RDWR);
+ } else {
+ fd = -1;
+ }
+
if (fd < 0) {
- error_setg_errno(errp, errno, "could not open %s", PATH_NET_TUN);
- return -1;
+ fd = RETRY_ON_EINTR(open(PATH_NET_TUN, O_RDWR));
+ if (fd < 0) {
+ error_setg_errno(errp, errno, "could not open %s", PATH_NET_TUN);
+ return -1;
+ }
}
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
diff --git a/net/tap-win32.c b/net/tap-win32.c
index 7edbd71..671dee9 100644
--- a/net/tap-win32.c
+++ b/net/tap-win32.c
@@ -214,7 +214,7 @@ static int is_tap_win32_dev(const char *guid)
for (;;) {
char enum_name[256];
- char unit_string[256];
+ g_autofree char *unit_string = NULL;
HKEY unit_key;
char component_id_string[] = "ComponentId";
char component_id[256];
@@ -239,8 +239,7 @@ static int is_tap_win32_dev(const char *guid)
return FALSE;
}
- snprintf (unit_string, sizeof(unit_string), "%s\\%s",
- ADAPTER_KEY, enum_name);
+ unit_string = g_strdup_printf("%s\\%s", ADAPTER_KEY, enum_name);
status = RegOpenKeyEx(
HKEY_LOCAL_MACHINE,
@@ -315,7 +314,7 @@ static int get_device_guid(
while (!stop)
{
char enum_name[256];
- char connection_string[256];
+ g_autofree char *connection_string = NULL;
HKEY connection_key;
char name_data[256];
DWORD name_type;
@@ -338,9 +337,7 @@ static int get_device_guid(
return -1;
}
- snprintf(connection_string,
- sizeof(connection_string),
- "%s\\%s\\Connection",
+ connection_string = g_strdup_printf("%s\\%s\\Connection",
NETWORK_CONNECTIONS_KEY, enum_name);
status = RegOpenKeyEx(
@@ -595,7 +592,7 @@ static void tap_win32_free_buffer(tap_win32_overlapped_t *overlapped,
static int tap_win32_open(tap_win32_overlapped_t **phandle,
const char *preferred_name)
{
- char device_path[256];
+ g_autofree char *device_path = NULL;
char device_guid[0x100];
int rc;
HANDLE handle;
@@ -617,7 +614,7 @@ static int tap_win32_open(tap_win32_overlapped_t **phandle,
if (rc)
return -1;
- snprintf (device_path, sizeof(device_path), "%s%s%s",
+ device_path = g_strdup_printf("%s%s%s",
USERMODEDEVICEDIR,
device_guid,
TAPSUFFIX);
diff --git a/net/tap.c b/net/tap.c
index 51f7aec..ae1c7e3 100644
--- a/net/tap.c
+++ b/net/tap.c
@@ -36,7 +36,7 @@
#include "net/net.h"
#include "clients.h"
#include "monitor/monitor.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qapi/error.h"
#include "qemu/cutils.h"
#include "qemu/error-report.h"
@@ -385,6 +385,24 @@ static TAPState *net_tap_fd_init(NetClientState *peer,
return s;
}
+static void close_all_fds_after_fork(int excluded_fd)
+{
+ const int skip_fd[] = {STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO,
+ excluded_fd};
+ unsigned int nskip = ARRAY_SIZE(skip_fd);
+
+ /*
+ * skip_fd must be an ordered array of distinct fds, exclude
+ * excluded_fd if already included in the [STDIN_FILENO - STDERR_FILENO]
+ * range
+ */
+ if (excluded_fd <= STDERR_FILENO) {
+ nskip--;
+ }
+
+ qemu_close_all_open_fd(skip_fd, nskip);
+}
+
static void launch_script(const char *setup_script, const char *ifname,
int fd, Error **errp)
{
@@ -400,13 +418,7 @@ static void launch_script(const char *setup_script, const char *ifname,
return;
}
if (pid == 0) {
- int open_max = sysconf(_SC_OPEN_MAX), i;
-
- for (i = 3; i < open_max; i++) {
- if (i != fd) {
- close(i);
- }
- }
+ close_all_fds_after_fork(fd);
parg = args;
*parg++ = (char *)setup_script;
*parg++ = (char *)ifname;
@@ -490,17 +502,11 @@ static int net_bridge_run_helper(const char *helper, const char *bridge,
return -1;
}
if (pid == 0) {
- int open_max = sysconf(_SC_OPEN_MAX), i;
char *fd_buf = NULL;
char *br_buf = NULL;
char *helper_cmd = NULL;
- for (i = 3; i < open_max; i++) {
- if (i != sv[1]) {
- close(i);
- }
- }
-
+ close_all_fds_after_fork(sv[1]);
fd_buf = g_strdup_printf("%s%d", "--fd=", sv[1]);
if (strrchr(helper, ' ') || strrchr(helper, '\t')) {
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 1255551..0b235e5 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -16,6 +16,7 @@
#include "chardev/char-fe.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-net.h"
+#include "qapi/qapi-events-net.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "qemu/option.h"
@@ -271,6 +272,7 @@ static void chr_closed_bh(void *opaque)
if (err) {
error_report_err(err);
}
+ qapi_event_send_netdev_vhost_user_disconnected(name);
}
static void net_vhost_user_event(void *opaque, QEMUChrEvent event)
@@ -300,6 +302,7 @@ static void net_vhost_user_event(void *opaque, QEMUChrEvent event)
net_vhost_user_watch, s);
qmp_set_link(name, true, &err);
s->started = true;
+ qapi_event_send_netdev_vhost_user_connected(name, chr->label);
break;
case CHR_EVENT_CLOSED:
/* a close event may happen during a read/write, but vhost
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index daa3842..58d7389 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -62,6 +62,7 @@ const int vdpa_feature_bits[] = {
VIRTIO_F_RING_PACKED,
VIRTIO_F_RING_RESET,
VIRTIO_F_VERSION_1,
+ VIRTIO_F_IN_ORDER,
VIRTIO_F_NOTIFICATION_DATA,
VIRTIO_NET_F_CSUM,
VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
@@ -87,6 +88,7 @@ const int vdpa_feature_bits[] = {
VIRTIO_NET_F_MQ,
VIRTIO_NET_F_MRG_RXBUF,
VIRTIO_NET_F_MTU,
+ VIRTIO_NET_F_RSC_EXT,
VIRTIO_NET_F_RSS,
VIRTIO_NET_F_STATUS,
VIRTIO_RING_F_EVENT_IDX,
@@ -222,14 +224,6 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
- /*
- * If a peer NIC is attached, do not cleanup anything.
- * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
- * when the guest is shutting down.
- */
- if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
- return;
- }
munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
if (s->vhost_net) {
@@ -241,6 +235,7 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
return;
}
qemu_close(s->vhost_vdpa.shared->device_fd);
+ g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, vhost_iova_tree_delete);
g_free(s->vhost_vdpa.shared);
}
@@ -268,6 +263,18 @@ static bool vhost_vdpa_has_ufo(NetClientState *nc)
}
+/*
+ * FIXME: vhost_vdpa doesn't have an API to "set h/w endianness". But it's
+ * reasonable to assume that h/w is LE by default, because LE is what
+ * virtio 1.0 and later ask for. So, this function just says "yes, the h/w is
+ * LE". Otherwise, on a BE machine, higher-level code would mistakely think
+ * the h/w is BE and can't support VDPA for a virtio 1.0 client.
+ */
+static int vhost_vdpa_set_vnet_le(NetClientState *nc, bool enable)
+{
+ return 0;
+}
+
static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
Error **errp)
{
@@ -356,14 +363,8 @@ static int vdpa_net_migration_state_notifier(NotifierWithReturn *notifier,
static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
{
- struct vhost_vdpa *v = &s->vhost_vdpa;
-
migration_add_notifier(&s->migration_state,
vdpa_net_migration_state_notifier);
- if (v->shadow_vqs_enabled) {
- v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first,
- v->shared->iova_range.last);
- }
}
static int vhost_vdpa_net_data_start(NetClientState *nc)
@@ -373,8 +374,7 @@ static int vhost_vdpa_net_data_start(NetClientState *nc)
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
- if (s->always_svq ||
- migration_is_setup_or_active()) {
+ if (s->always_svq || migration_is_running()) {
v->shadow_vqs_enabled = true;
} else {
v->shadow_vqs_enabled = false;
@@ -411,19 +411,12 @@ static int vhost_vdpa_net_data_load(NetClientState *nc)
static void vhost_vdpa_net_client_stop(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
- struct vhost_dev *dev;
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
if (s->vhost_vdpa.index == 0) {
migration_remove_notifier(&s->migration_state);
}
-
- dev = s->vhost_vdpa.dev;
- if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
- g_clear_pointer(&s->vhost_vdpa.shared->iova_tree,
- vhost_iova_tree_delete);
- }
}
static NetClientInfo net_vhost_vdpa_info = {
@@ -436,6 +429,7 @@ static NetClientInfo net_vhost_vdpa_info = {
.cleanup = vhost_vdpa_cleanup,
.has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
.has_ufo = vhost_vdpa_has_ufo,
+ .set_vnet_le = vhost_vdpa_set_vnet_le,
.check_peer_type = vhost_vdpa_check_peer_type,
.set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
};
@@ -509,14 +503,20 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
bool write)
{
DMAMap map = {};
+ hwaddr taddr = (hwaddr)(uintptr_t)buf;
int r;
- map.translated_addr = (hwaddr)(uintptr_t)buf;
map.size = size - 1;
map.perm = write ? IOMMU_RW : IOMMU_RO,
- r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map);
+ r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map, taddr);
if (unlikely(r != IOVA_OK)) {
error_report("Cannot map injected element");
+
+ if (map.translated_addr == taddr) {
+ error_report("Insertion to IOVA->HVA tree failed");
+ /* Remove the mapping from the IOVA-only tree */
+ goto dma_map_err;
+ }
return r;
}
@@ -588,24 +588,6 @@ out:
return 0;
}
- /*
- * If other vhost_vdpa already have an iova_tree, reuse it for simplicity,
- * whether CVQ shares ASID with guest or not, because:
- * - Memory listener need access to guest's memory addresses allocated in
- * the IOVA tree.
- * - There should be plenty of IOVA address space for both ASID not to
- * worry about collisions between them. Guest's translations are still
- * validated with virtio virtqueue_pop so there is no risk for the guest
- * to access memory that it shouldn't.
- *
- * To allocate a iova tree per ASID is doable but it complicates the code
- * and it is not worth it for the moment.
- */
- if (!v->shared->iova_tree) {
- v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first,
- v->shared->iova_range.last);
- }
-
r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
vhost_vdpa_net_cvq_cmd_page_len(), false);
if (unlikely(r < 0)) {
@@ -642,7 +624,7 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
int r;
- r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL);
+ r = vhost_svq_add(svq, out_sg, out_num, NULL, in_sg, in_num, NULL, NULL);
if (unlikely(r != 0)) {
if (unlikely(r == -ENOSPC)) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
@@ -1714,6 +1696,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->vhost_vdpa.shared->device_fd = vdpa_device_fd;
s->vhost_vdpa.shared->iova_range = iova_range;
s->vhost_vdpa.shared->shadow_data = svq;
+ s->vhost_vdpa.shared->iova_tree = vhost_iova_tree_new(iova_range.first,
+ iova_range.last);
} else if (!is_datapath) {
s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
PROT_READ | PROT_WRITE,
diff --git a/net/vmnet-common.m b/net/vmnet-common.m
index 30c4e53..ab33ce2 100644
--- a/net/vmnet-common.m
+++ b/net/vmnet-common.m
@@ -17,7 +17,8 @@
#include "clients.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
+#include "net/eth.h"
#include <vmnet/vmnet.h>
#include <dispatch/dispatch.h>
@@ -93,7 +94,7 @@ ssize_t vmnet_receive_common(NetClientState *nc,
if_status = vmnet_write(s->vmnet_if, &packet, &pkt_cnt);
if (if_status != VMNET_SUCCESS) {
- error_report("vmnet: write error: %s\n",
+ error_report("vmnet: write error: %s",
vmnet_status_map_str(if_status));
return -1;
}
@@ -147,10 +148,26 @@ static int vmnet_read_packets(VmnetState *s)
*/
static void vmnet_write_packets_to_qemu(VmnetState *s)
{
+ uint8_t *pkt;
+ size_t pktsz;
+ uint8_t min_pkt[ETH_ZLEN];
+ size_t min_pktsz;
+ ssize_t size;
+
while (s->packets_send_current_pos < s->packets_send_end_pos) {
- ssize_t size = qemu_send_packet_async(&s->nc,
- s->iov_buf[s->packets_send_current_pos].iov_base,
- s->packets_buf[s->packets_send_current_pos].vm_pkt_size,
+ pkt = s->iov_buf[s->packets_send_current_pos].iov_base;
+ pktsz = s->packets_buf[s->packets_send_current_pos].vm_pkt_size;
+
+ if (net_peer_needs_padding(&s->nc)) {
+ min_pktsz = sizeof(min_pkt);
+
+ if (eth_pad_short_frame(min_pkt, &min_pktsz, pkt, pktsz)) {
+ pkt = min_pkt;
+ pktsz = min_pktsz;
+ }
+ }
+
+ size = qemu_send_packet_async(&s->nc, pkt, pktsz,
vmnet_send_completed);
if (size == 0) {
diff --git a/os-posix.c b/os-posix.c
index 43f9a43..52925c2 100644
--- a/os-posix.c
+++ b/os-posix.c
@@ -32,7 +32,7 @@
#include "qemu/error-report.h"
#include "qemu/log.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "qemu/cutils.h"
#ifdef CONFIG_LINUX
@@ -327,18 +327,29 @@ void os_set_line_buffering(void)
setvbuf(stdout, NULL, _IOLBF, 0);
}
-int os_mlock(void)
+int os_mlock(bool on_fault)
{
#ifdef HAVE_MLOCKALL
int ret = 0;
+ int flags = MCL_CURRENT | MCL_FUTURE;
- ret = mlockall(MCL_CURRENT | MCL_FUTURE);
+ if (on_fault) {
+#ifdef HAVE_MLOCK_ONFAULT
+ flags |= MCL_ONFAULT;
+#else
+ error_report("mlockall: on_fault not supported");
+ return -EINVAL;
+#endif
+ }
+
+ ret = mlockall(flags);
if (ret < 0) {
error_report("mlockall: %s", strerror(errno));
}
return ret;
#else
+ (void)on_fault;
return -ENOSYS;
#endif
}
diff --git a/os-wasm.c b/os-wasm.c
new file mode 100644
index 0000000..d240c18
--- /dev/null
+++ b/os-wasm.c
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * os-wasm.c
+ * Forked from os-posix.c, removing functions not working on Emscripten
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2010 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include <sys/resource.h>
+#include <sys/wait.h>
+#include <pwd.h>
+#include <grp.h>
+#include <libgen.h>
+
+#include "qemu/error-report.h"
+#include "qemu/log.h"
+#include "system/runstate.h"
+#include "qemu/cutils.h"
+
+void os_setup_post(void){}
+void os_set_line_buffering(void)
+{
+ setvbuf(stdout, NULL, _IOLBF, 0);
+}
+void os_setup_early_signal_handling(void)
+{
+ struct sigaction act;
+ sigfillset(&act.sa_mask);
+ act.sa_flags = 0;
+ act.sa_handler = SIG_IGN;
+ sigaction(SIGPIPE, &act, NULL);
+}
+void os_set_proc_name(const char *s)
+{
+ error_report("Change of process name not supported by your OS");
+ exit(1);
+}
+static void termsig_handler(int signal, siginfo_t *info, void *c)
+{
+ qemu_system_killed(info->si_signo, info->si_pid);
+}
+
+void os_setup_signal_handling(void)
+{
+ struct sigaction act;
+
+ memset(&act, 0, sizeof(act));
+ act.sa_sigaction = termsig_handler;
+ act.sa_flags = SA_SIGINFO;
+ sigaction(SIGINT, &act, NULL);
+ sigaction(SIGHUP, &act, NULL);
+ sigaction(SIGTERM, &act, NULL);
+}
+void os_setup_limits(void)
+{
+ struct rlimit nofile;
+
+ if (getrlimit(RLIMIT_NOFILE, &nofile) < 0) {
+ warn_report("unable to query NOFILE limit: %s", strerror(errno));
+ return;
+ }
+
+ if (nofile.rlim_cur == nofile.rlim_max) {
+ return;
+ }
+
+ nofile.rlim_cur = nofile.rlim_max;
+
+ if (setrlimit(RLIMIT_NOFILE, &nofile) < 0) {
+ warn_report("unable to set NOFILE limit: %s", strerror(errno));
+ return;
+ }
+}
+int os_mlock(bool on_fault)
+{
+#ifdef HAVE_MLOCKALL
+ int ret = 0;
+ int flags = MCL_CURRENT | MCL_FUTURE;
+
+ if (on_fault) {
+#ifdef HAVE_MLOCK_ONFAULT
+ flags |= MCL_ONFAULT;
+#else
+ error_report("mlockall: on_fault not supported");
+ return -EINVAL;
+#endif
+ }
+
+ ret = mlockall(flags);
+ if (ret < 0) {
+ error_report("mlockall: %s", strerror(errno));
+ }
+
+ return ret;
+#else
+ (void)on_fault;
+ return -ENOSYS;
+#endif
+}
diff --git a/os-win32.c b/os-win32.c
index 725ad65..c1bff80 100644
--- a/os-win32.c
+++ b/os-win32.c
@@ -26,7 +26,7 @@
#include "qemu/osdep.h"
#include <windows.h>
#include <mmsystem.h>
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
static BOOL WINAPI qemu_ctrl_handler(DWORD type)
{
diff --git a/page-target.c b/page-target.c
index 82211c8..8fcd544 100644
--- a/page-target.c
+++ b/page-target.c
@@ -8,29 +8,6 @@
#include "qemu/osdep.h"
#include "exec/target_page.h"
-#include "exec/cpu-defs.h"
-#include "cpu.h"
-#include "exec/cpu-all.h"
-
-size_t qemu_target_page_size(void)
-{
- return TARGET_PAGE_SIZE;
-}
-
-int qemu_target_page_mask(void)
-{
- return TARGET_PAGE_MASK;
-}
-
-int qemu_target_page_bits(void)
-{
- return TARGET_PAGE_BITS;
-}
-
-int qemu_target_page_bits_min(void)
-{
- return TARGET_PAGE_BITS_MIN;
-}
/* Convert target pages to MiB (2**20). */
size_t qemu_target_pages_to_MiB(size_t pages)
diff --git a/page-vary-target.c b/page-vary-target.c
index 343b4ad..49a32b4 100644
--- a/page-vary-target.c
+++ b/page-vary-target.c
@@ -21,12 +21,47 @@
#include "qemu/osdep.h"
#include "exec/page-vary.h"
-#include "exec/exec-all.h"
+#include "exec/target_page.h"
-bool set_preferred_target_page_bits(int bits)
+
+/*
+ * For system mode, the minimum comes from the number of bits
+ * required for maximum alignment (6) and the number of bits
+ * required for TLB_FLAGS_MASK (3).
+ *
+ * For user mode, TARGET_PAGE_BITS_VARY is a hack to allow the target
+ * page size to match the host page size. Mostly, this reduces the
+ * ordinary target page size to run on a host with 4KiB pages (i.e. x86).
+ * There is no true minimum required by the implementation, but keep the
+ * same minimum as for system mode for sanity.
+ * See linux-user/mmap.c, mmap_h_lt_g and mmap_h_gt_g.
+ */
+#define TARGET_PAGE_BITS_MIN 9
+
+#ifndef TARGET_PAGE_BITS_VARY
+QEMU_BUILD_BUG_ON(TARGET_PAGE_BITS < TARGET_PAGE_BITS_MIN);
+#endif
+
+#ifndef CONFIG_USER_ONLY
+#include "exec/tlb-flags.h"
+
+QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & ((1u < TARGET_PAGE_BITS_MIN) - 1));
+
+int migration_legacy_page_bits(void)
{
#ifdef TARGET_PAGE_BITS_VARY
+ QEMU_BUILD_BUG_ON(TARGET_PAGE_BITS_LEGACY < TARGET_PAGE_BITS_MIN);
+ return TARGET_PAGE_BITS_LEGACY;
+#else
+ return TARGET_PAGE_BITS;
+#endif
+}
+#endif
+
+bool set_preferred_target_page_bits(int bits)
+{
assert(bits >= TARGET_PAGE_BITS_MIN);
+#ifdef TARGET_PAGE_BITS_VARY
return set_preferred_target_page_bits_common(bits);
#else
return true;
@@ -35,7 +70,12 @@ bool set_preferred_target_page_bits(int bits)
void finalize_target_page_bits(void)
{
-#ifdef TARGET_PAGE_BITS_VARY
- finalize_target_page_bits_common(TARGET_PAGE_BITS_MIN);
+#ifndef TARGET_PAGE_BITS_VARY
+ finalize_target_page_bits_common(TARGET_PAGE_BITS);
+#elif defined(CONFIG_USER_ONLY)
+ assert(target_page.bits != 0);
+ finalize_target_page_bits_common(target_page.bits);
+#else
+ finalize_target_page_bits_common(TARGET_PAGE_BITS_LEGACY);
#endif
}
diff --git a/pc-bios/README b/pc-bios/README
index 7ffb2f4..d009c37 100644
--- a/pc-bios/README
+++ b/pc-bios/README
@@ -13,8 +13,8 @@
- SLOF (Slimline Open Firmware) is a free IEEE 1275 Open Firmware
implementation for certain IBM POWER hardware. The sources are at
- https://github.com/aik/SLOF, and the image currently in qemu is
- built from git tag qemu-slof-20230918.
+ https://gitlab.com/slof/slof, and the image currently in qemu is
+ built from git tag qemu-slof-20241106.
- VOF (Virtual Open Firmware) is a minimalistic firmware to work with
-machine pseries,x-vof=on. When enabled, the firmware acts as a slim shim and
@@ -43,6 +43,19 @@
run an hypervisor OS or simply a host OS on the "baremetal"
platform, also known as the PowerNV (Non-Virtualized) platform.
+- pnv-pnor.bin is a non-volatile RAM image used by PowerNV, which stores
+ NVRAM BIOS settings among other things. This image was created with the
+ following command (the ffspart tool can be found in the skiboot source tree):
+
+ ffspart -s 0x1000 -c 34 -i pnv-pnor.in -p pnv-pnor.bin
+
+ Where pnv-pnor.in contains the two lines (no leading whitespace):
+
+ NVRAM,0x01000,0x00020000,,,/dev/zero
+ VERSION,0x21000,0x00001000,,,/dev/zero
+
+ skiboot is then booted once to format the NVRAM partition.
+
- QemuMacDrivers (https://github.com/ozbenh/QemuMacDrivers) is a project to
provide virtualised drivers for PPC MacOS guests.
@@ -70,10 +83,16 @@
source code also contains code reused from other projects described here:
https://github.com/riscv/opensbi/blob/master/ThirdPartyNotices.md.
-- npcm7xx_bootrom.bin is a simplified, free (Apache 2.0) boot ROM for Nuvoton
- NPCM7xx BMC devices. It currently implements the bare minimum to load, parse,
- initialize and run boot images stored in SPI flash, but may grow more
- features over time as needed. The source code is available at:
+- npcm{7xx,8xx}_bootrom.bin is a simplified, free (Apache 2.0) boot ROM for
+ Nuvoton NPCM7xx/8xx BMC devices. It currently implements the bare minimum to
+ load, parse, initialize and run boot images stored in SPI flash, but may grow
+ more features over time as needed. The source code is available at:
+ https://github.com/google/vbootrom
+
+- ast27x0_bootrom.bin is a simplified, free (Apache 2.0) boot ROM for
+ ASPEED AST27x0 BMC SOC. It currently implements the bare minimum to
+ load, parse, initialize and run boot images stored in SPI flash, but may grow
+ more features over time as needed. The source code is available at:
https://github.com/google/vbootrom
- hppa-firmware.img (32-bit) and hppa-firmware64.img (64-bit) are firmware
diff --git a/pc-bios/ast27x0_bootrom.bin b/pc-bios/ast27x0_bootrom.bin
new file mode 100644
index 0000000..0b9b3a2
--- /dev/null
+++ b/pc-bios/ast27x0_bootrom.bin
Binary files differ
diff --git a/pc-bios/bios-256k.bin b/pc-bios/bios-256k.bin
index 48c3707..509f398 100644
--- a/pc-bios/bios-256k.bin
+++ b/pc-bios/bios-256k.bin
Binary files differ
diff --git a/pc-bios/bios-microvm.bin b/pc-bios/bios-microvm.bin
index c98351e..4870015 100644
--- a/pc-bios/bios-microvm.bin
+++ b/pc-bios/bios-microvm.bin
Binary files differ
diff --git a/pc-bios/bios.bin b/pc-bios/bios.bin
index 7e2d062..4b81a96 100644
--- a/pc-bios/bios.bin
+++ b/pc-bios/bios.bin
Binary files differ
diff --git a/pc-bios/descriptors/60-edk2-loongarch64.json b/pc-bios/descriptors/60-edk2-loongarch64.json
new file mode 100644
index 0000000..f174a1f
--- /dev/null
+++ b/pc-bios/descriptors/60-edk2-loongarch64.json
@@ -0,0 +1,31 @@
+{
+ "description": "UEFI firmware for loongarch64",
+ "interface-types": [
+ "uefi"
+ ],
+ "mapping": {
+ "device": "flash",
+ "executable": {
+ "filename": "@DATADIR@/edk2-loongarch64-code.fd",
+ "format": "raw"
+ },
+ "nvram-template": {
+ "filename": "@DATADIR@/edk2-loongarch64-vars.fd",
+ "format": "raw"
+ }
+ },
+ "targets": [
+ {
+ "architecture": "loongarch64",
+ "machines": [
+ "virt*"
+ ]
+ }
+ ],
+ "features": [
+
+ ],
+ "tags": [
+
+ ]
+}
diff --git a/pc-bios/descriptors/60-edk2-riscv64.json b/pc-bios/descriptors/60-edk2-riscv64.json
new file mode 100644
index 0000000..14811ca
--- /dev/null
+++ b/pc-bios/descriptors/60-edk2-riscv64.json
@@ -0,0 +1,31 @@
+{
+ "description": "UEFI firmware for riscv64",
+ "interface-types": [
+ "uefi"
+ ],
+ "mapping": {
+ "device": "flash",
+ "executable": {
+ "filename": "@DATADIR@/edk2-riscv-code.fd",
+ "format": "raw"
+ },
+ "nvram-template": {
+ "filename": "@DATADIR@/edk2-riscv-vars.fd",
+ "format": "raw"
+ }
+ },
+ "targets": [
+ {
+ "architecture": "riscv64",
+ "machines": [
+ "virt*"
+ ]
+ }
+ ],
+ "features": [
+
+ ],
+ "tags": [
+
+ ]
+}
diff --git a/pc-bios/descriptors/60-edk2-x86_64.json b/pc-bios/descriptors/60-edk2-x86_64.json
index 968cb65..4599c63 100644
--- a/pc-bios/descriptors/60-edk2-x86_64.json
+++ b/pc-bios/descriptors/60-edk2-x86_64.json
@@ -26,6 +26,7 @@
"features": [
"acpi-s3",
"amd-sev",
+ "amd-sev-es",
"verbose-dynamic"
],
"tags": [
diff --git a/pc-bios/descriptors/meson.build b/pc-bios/descriptors/meson.build
index 66f85d0..cdd0be0 100644
--- a/pc-bios/descriptors/meson.build
+++ b/pc-bios/descriptors/meson.build
@@ -5,7 +5,9 @@ if unpack_edk2_blobs and get_option('install_blobs')
'60-edk2-aarch64.json',
'60-edk2-arm.json',
'60-edk2-i386.json',
- '60-edk2-x86_64.json'
+ '60-edk2-x86_64.json',
+ '60-edk2-loongarch64.json',
+ '60-edk2-riscv64.json'
]
configure_file(input: files(f),
output: f,
diff --git a/pc-bios/bamboo.dtb b/pc-bios/dtb/bamboo.dtb
index d12e201..d12e201 100644
--- a/pc-bios/bamboo.dtb
+++ b/pc-bios/dtb/bamboo.dtb
Binary files differ
diff --git a/pc-bios/bamboo.dts b/pc-bios/dtb/bamboo.dts
index 62fabcc..62fabcc 100644
--- a/pc-bios/bamboo.dts
+++ b/pc-bios/dtb/bamboo.dts
diff --git a/pc-bios/canyonlands.dtb b/pc-bios/dtb/canyonlands.dtb
index 9dce344..9dce344 100644
--- a/pc-bios/canyonlands.dtb
+++ b/pc-bios/dtb/canyonlands.dtb
Binary files differ
diff --git a/pc-bios/canyonlands.dts b/pc-bios/dtb/canyonlands.dts
index 0d6ac92..0d6ac92 100644
--- a/pc-bios/canyonlands.dts
+++ b/pc-bios/dtb/canyonlands.dts
diff --git a/pc-bios/dtb/meson.build b/pc-bios/dtb/meson.build
new file mode 100644
index 0000000..9930329
--- /dev/null
+++ b/pc-bios/dtb/meson.build
@@ -0,0 +1,23 @@
+dtbs = [
+ 'bamboo.dtb',
+ 'canyonlands.dtb',
+ 'petalogix-ml605.dtb',
+ 'petalogix-s3adsp1800.dtb',
+]
+
+dtc = find_program('dtc', required: false)
+if dtc.found()
+ foreach out : dtbs
+ f = fs.replace_suffix(out, '.dts')
+ custom_target(out,
+ build_by_default: have_system,
+ input: files(f),
+ output: out,
+ install: get_option('install_blobs'),
+ install_dir: qemu_datadir / 'dtb',
+ command: [ dtc, '-q', '-I', 'dts', '-O', 'dtb',
+ '-o', '@OUTPUT@', '@INPUT0@' ])
+ endforeach
+else
+ install_data(dtbs, install_dir: qemu_datadir / 'dtb')
+endif
diff --git a/pc-bios/petalogix-ml605.dtb b/pc-bios/dtb/petalogix-ml605.dtb
index 9a05434..9a05434 100644
--- a/pc-bios/petalogix-ml605.dtb
+++ b/pc-bios/dtb/petalogix-ml605.dtb
Binary files differ
diff --git a/pc-bios/petalogix-ml605.dts b/pc-bios/dtb/petalogix-ml605.dts
index b307a29..b307a29 100644
--- a/pc-bios/petalogix-ml605.dts
+++ b/pc-bios/dtb/petalogix-ml605.dts
diff --git a/pc-bios/petalogix-s3adsp1800.dtb b/pc-bios/dtb/petalogix-s3adsp1800.dtb
index 2513599..2513599 100644
--- a/pc-bios/petalogix-s3adsp1800.dtb
+++ b/pc-bios/dtb/petalogix-s3adsp1800.dtb
Binary files differ
diff --git a/pc-bios/petalogix-s3adsp1800.dts b/pc-bios/dtb/petalogix-s3adsp1800.dts
index f53c36f..f53c36f 100644
--- a/pc-bios/petalogix-s3adsp1800.dts
+++ b/pc-bios/dtb/petalogix-s3adsp1800.dts
diff --git a/pc-bios/edk2-aarch64-code.fd.bz2 b/pc-bios/edk2-aarch64-code.fd.bz2
index e763982..2ce728c 100644
--- a/pc-bios/edk2-aarch64-code.fd.bz2
+++ b/pc-bios/edk2-aarch64-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-arm-code.fd.bz2 b/pc-bios/edk2-arm-code.fd.bz2
index 329646d..9b98490 100644
--- a/pc-bios/edk2-arm-code.fd.bz2
+++ b/pc-bios/edk2-arm-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-i386-code.fd.bz2 b/pc-bios/edk2-i386-code.fd.bz2
index 271ce65..50c9869 100644
--- a/pc-bios/edk2-i386-code.fd.bz2
+++ b/pc-bios/edk2-i386-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-i386-secure-code.fd.bz2 b/pc-bios/edk2-i386-secure-code.fd.bz2
index 00335cd..d58c16f 100644
--- a/pc-bios/edk2-i386-secure-code.fd.bz2
+++ b/pc-bios/edk2-i386-secure-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-loongarch64-code.fd.bz2 b/pc-bios/edk2-loongarch64-code.fd.bz2
new file mode 100644
index 0000000..ba12bc9
--- /dev/null
+++ b/pc-bios/edk2-loongarch64-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-loongarch64-vars.fd.bz2 b/pc-bios/edk2-loongarch64-vars.fd.bz2
new file mode 100644
index 0000000..8a13571
--- /dev/null
+++ b/pc-bios/edk2-loongarch64-vars.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-riscv-code.fd.bz2 b/pc-bios/edk2-riscv-code.fd.bz2
index f3a98d6..f4e243d 100644
--- a/pc-bios/edk2-riscv-code.fd.bz2
+++ b/pc-bios/edk2-riscv-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-x86_64-code.fd.bz2 b/pc-bios/edk2-x86_64-code.fd.bz2
index a1a8c05..cf043fc 100644
--- a/pc-bios/edk2-x86_64-code.fd.bz2
+++ b/pc-bios/edk2-x86_64-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-x86_64-microvm.fd.bz2 b/pc-bios/edk2-x86_64-microvm.fd.bz2
index 6b7cd54..c2b04f8 100644
--- a/pc-bios/edk2-x86_64-microvm.fd.bz2
+++ b/pc-bios/edk2-x86_64-microvm.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-x86_64-secure-code.fd.bz2 b/pc-bios/edk2-x86_64-secure-code.fd.bz2
index ef40a8b..50f5b36 100644
--- a/pc-bios/edk2-x86_64-secure-code.fd.bz2
+++ b/pc-bios/edk2-x86_64-secure-code.fd.bz2
Binary files differ
diff --git a/pc-bios/hppa-firmware.img b/pc-bios/hppa-firmware.img
index e065e48..d5f6f2f 100755
--- a/pc-bios/hppa-firmware.img
+++ b/pc-bios/hppa-firmware.img
Binary files differ
diff --git a/pc-bios/hppa-firmware64.img b/pc-bios/hppa-firmware64.img
index 7f6d837..577b0a1 100755
--- a/pc-bios/hppa-firmware64.img
+++ b/pc-bios/hppa-firmware64.img
Binary files differ
diff --git a/pc-bios/keymaps/meson.build b/pc-bios/keymaps/meson.build
index 0bd8ce0..a79a09b 100644
--- a/pc-bios/keymaps/meson.build
+++ b/pc-bios/keymaps/meson.build
@@ -39,19 +39,18 @@ else
native_qemu_keymap = qemu_keymap
endif
+keymap_targets = []
if native_qemu_keymap.found()
- t = []
foreach km, args: keymaps
# generate with qemu-kvm
- t += custom_target(km,
- build_by_default: true,
- output: km,
- command: [native_qemu_keymap, '-f', '@OUTPUT@', args.split()],
- install: have_system,
- install_dir: qemu_datadir / 'keymaps')
+ keymap_targets += custom_target(km,
+ build_by_default: true,
+ output: km,
+ command: [native_qemu_keymap, '-f', '@OUTPUT@', args.split()],
+ install: have_system,
+ install_dir: qemu_datadir / 'keymaps')
endforeach
-
- alias_target('update-keymaps', t)
+ alias_target('update-keymaps', keymap_targets)
else
install_data(keymaps.keys(), install_dir: qemu_datadir / 'keymaps')
endif
diff --git a/pc-bios/meson.build b/pc-bios/meson.build
index 8602b45..3c41620 100644
--- a/pc-bios/meson.build
+++ b/pc-bios/meson.build
@@ -11,6 +11,8 @@ if unpack_edk2_blobs
'edk2-i386-vars.fd',
'edk2-x86_64-code.fd',
'edk2-x86_64-secure-code.fd',
+ 'edk2-loongarch64-code.fd',
+ 'edk2-loongarch64-vars.fd',
]
foreach f : fds
@@ -26,6 +28,7 @@ if unpack_edk2_blobs
endif
blobs = [
+ 'ast27x0_bootrom.bin',
'bios.bin',
'bios-256k.bin',
'bios-microvm.bin',
@@ -66,9 +69,9 @@ blobs = [
'kvmvapic.bin',
'pvh.bin',
's390-ccw.img',
- 's390-netboot.img',
'slof.bin',
'skiboot.lid',
+ 'pnv-pnor.bin',
'palcode-clipper',
'u-boot.e500',
'u-boot-sam460-20100605.bin',
@@ -79,34 +82,15 @@ blobs = [
'opensbi-riscv32-generic-fw_dynamic.bin',
'opensbi-riscv64-generic-fw_dynamic.bin',
'npcm7xx_bootrom.bin',
+ 'npcm8xx_bootrom.bin',
'vof.bin',
'vof-nvram.bin',
]
-dtc = find_program('dtc', required: false)
-foreach f : [
- 'bamboo.dts',
- 'canyonlands.dts',
- 'petalogix-s3adsp1800.dts',
- 'petalogix-ml605.dts',
-]
- out = fs.replace_suffix(f, '.dtb')
- if dtc.found()
- custom_target(f,
- build_by_default: have_system,
- input: files(f),
- output: out,
- install: get_option('install_blobs'),
- install_dir: qemu_datadir,
- command: [ dtc, '-I', 'dts', '-O', 'dtb', '-o', '@OUTPUT@', '@INPUT0@' ])
- else
- blobs += out
- endif
-endforeach
-
if get_option('install_blobs')
- install_data(blobs, install_dir: qemu_datadir)
+ install_data(blobs, install_dir: qemu_datadir, install_mode: 'rw-r--r--')
endif
subdir('descriptors')
+subdir('dtb')
subdir('keymaps')
diff --git a/pc-bios/npcm7xx_bootrom.bin b/pc-bios/npcm7xx_bootrom.bin
index 38f89d1..903f126 100644
--- a/pc-bios/npcm7xx_bootrom.bin
+++ b/pc-bios/npcm7xx_bootrom.bin
Binary files differ
diff --git a/pc-bios/npcm8xx_bootrom.bin b/pc-bios/npcm8xx_bootrom.bin
new file mode 100644
index 0000000..6370d64
--- /dev/null
+++ b/pc-bios/npcm8xx_bootrom.bin
Binary files differ
diff --git a/pc-bios/openbios-ppc b/pc-bios/openbios-ppc
index 4af6002..6f472d4 100644
--- a/pc-bios/openbios-ppc
+++ b/pc-bios/openbios-ppc
Binary files differ
diff --git a/pc-bios/openbios-sparc32 b/pc-bios/openbios-sparc32
index 41b6a60..9679248 100644
--- a/pc-bios/openbios-sparc32
+++ b/pc-bios/openbios-sparc32
Binary files differ
diff --git a/pc-bios/openbios-sparc64 b/pc-bios/openbios-sparc64
index 902b4b3..0a13453 100644
--- a/pc-bios/openbios-sparc64
+++ b/pc-bios/openbios-sparc64
Binary files differ
diff --git a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
index 7ec260f..b2e7400 100644
--- a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
+++ b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
Binary files differ
diff --git a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
index 090c0cf..018b473 100644
--- a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
+++ b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
Binary files differ
diff --git a/pc-bios/pnv-pnor.bin b/pc-bios/pnv-pnor.bin
new file mode 100644
index 0000000..3e6f700
--- /dev/null
+++ b/pc-bios/pnv-pnor.bin
Binary files differ
diff --git a/pc-bios/s390-ccw.img b/pc-bios/s390-ccw.img
index f0d9ef6..47240f0 100644
--- a/pc-bios/s390-ccw.img
+++ b/pc-bios/s390-ccw.img
Binary files differ
diff --git a/pc-bios/s390-ccw/Makefile b/pc-bios/s390-ccw/Makefile
index 6207911..dc69dd4 100644
--- a/pc-bios/s390-ccw/Makefile
+++ b/pc-bios/s390-ccw/Makefile
@@ -3,7 +3,8 @@ all: build-all
@true
include config-host.mak
-CFLAGS = -O2 -g
+CFLAGS = -O2 -g -I $(SRC_PATH)/../../include/hw/s390x/ipl
+LDFLAGS ?=
MAKEFLAGS += -rR
GIT_SUBMODULES = roms/SLOF
@@ -32,15 +33,21 @@ QEMU_DGFLAGS = -MMD -MP -MT $@ -MF $(@D)/$(*F).d
.PHONY : all clean build-all distclean
-OBJECTS = start.o main.o bootmap.o jump2ipl.o sclp.o menu.o \
- virtio.o virtio-scsi.o virtio-blkdev.o libc.o cio.o dasd-ipl.o
+OBJECTS = start.o main.o bootmap.o jump2ipl.o sclp.o menu.o netmain.o \
+ virtio.o virtio-net.o virtio-scsi.o virtio-blkdev.o cio.o dasd-ipl.o
+
+SLOF_DIR := $(SRC_PATH)/../../roms/SLOF
+
+LIBC_INC := -nostdinc -I$(SLOF_DIR)/lib/libc/include
+LIBNET_INC := -I$(SLOF_DIR)/lib/libnet
EXTRA_CFLAGS += -Wall
EXTRA_CFLAGS += -ffreestanding -fno-delete-null-pointer-checks -fno-common -fPIE
EXTRA_CFLAGS += -fwrapv -fno-strict-aliasing -fno-asynchronous-unwind-tables
EXTRA_CFLAGS += -msoft-float
EXTRA_CFLAGS += -std=gnu99
-LDFLAGS += -Wl,-pie -nostdlib -z noexecstack
+EXTRA_CFLAGS += $(LIBC_INC) $(LIBNET_INC)
+EXTRA_LDFLAGS += -Wl,-pie -nostdlib -z noexecstack -z text
cc-test = $(CC) -Werror $1 -c -o /dev/null -xc /dev/null >/dev/null 2>/dev/null
cc-option = if $(call cc-test, $1); then \
@@ -55,19 +62,64 @@ config-cc.mak: Makefile
$(call cc-option,-march=z900,-march=z10)) 3> config-cc.mak
-include config-cc.mak
-build-all: s390-ccw.img s390-netboot.img
+# libc files:
+
+LIBC_CFLAGS = $(EXTRA_CFLAGS) $(CFLAGS) $(LIBC_INC) $(LIBNET_INC) \
+ -MMD -MP -MT $@ -MF $(@:%.o=%.d)
+
+CTYPE_OBJS = isdigit.o isxdigit.o toupper.o
+%.o : $(SLOF_DIR)/lib/libc/ctype/%.c
+ $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling)
+
+STRING_OBJS = strcat.o strchr.o strrchr.o strcpy.o strlen.o strncpy.o \
+ strcmp.o strncmp.o strcasecmp.o strncasecmp.o strstr.o \
+ memset.o memcpy.o memmove.o memcmp.o
+%.o : $(SLOF_DIR)/lib/libc/string/%.c
+ $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling)
+
+STDLIB_OBJS = atoi.o atol.o strtoul.o strtol.o rand.o malloc.o free.o
+%.o : $(SLOF_DIR)/lib/libc/stdlib/%.c
+ $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling)
+
+STDIO_OBJS = sprintf.o snprintf.o vfprintf.o vsnprintf.o vsprintf.o fprintf.o \
+ printf.o putc.o puts.o putchar.o stdchnls.o fileno.o
+%.o : $(SLOF_DIR)/lib/libc/stdio/%.c
+ $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling)
+
+sbrk.o: $(SLOF_DIR)/slof/sbrk.c
+ $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling)
+
+LIBCOBJS := $(STRING_OBJS) $(CTYPE_OBJS) $(STDLIB_OBJS) $(STDIO_OBJS) sbrk.o
-s390-ccw.elf: $(OBJECTS)
- $(call quiet-command,$(CC) $(LDFLAGS) -o $@ $(OBJECTS),Linking)
+libc.a: $(LIBCOBJS)
+ $(call quiet-command,$(AR) -rc $@ $^,Creating static library)
+
+# libnet files:
+
+LIBNETOBJS := args.o dhcp.o dns.o icmpv6.o ipv6.o tcp.o udp.o bootp.o \
+ dhcpv6.o ethernet.o ipv4.o ndp.o tftp.o pxelinux.o
+LIBNETCFLAGS = $(EXTRA_CFLAGS) $(CFLAGS) $(LIBC_INC) $(LIBNET_INC) \
+ -DDHCPARCH=0x1F -MMD -MP -MT $@ -MF $(@:%.o=%.d)
+
+%.o : $(SLOF_DIR)/lib/libnet/%.c
+ $(call quiet-command,$(CC) $(LIBNETCFLAGS) -c -o $@ $<,Compiling)
+
+libnet.a: $(LIBNETOBJS)
+ $(call quiet-command,$(AR) -rc $@ $^,Creating static library)
+
+# Main targets:
+
+build-all: s390-ccw.img
+
+s390-ccw.elf: $(OBJECTS) libnet.a libc.a
+ $(call quiet-command,$(CC) $(EXTRA_LDFLAGS) $(LDFLAGS) -o $@ $^,Linking)
s390-ccw.img: s390-ccw.elf
$(call quiet-command,$(STRIP) --strip-unneeded $< -o $@,Stripping $< into)
$(OBJECTS): Makefile
-include $(SRC_PATH)/netboot.mak
-
-ALL_OBJS = $(sort $(OBJECTS) $(NETOBJS) $(LIBCOBJS) $(LIBNETOBJS))
+ALL_OBJS = $(sort $(OBJECTS) $(LIBCOBJS) $(LIBNETOBJS))
-include $(ALL_OBJS:%.o=%.d)
clean:
diff --git a/pc-bios/s390-ccw/bootmap.c b/pc-bios/s390-ccw/bootmap.c
index a213744..0f8baa0 100644
--- a/pc-bios/s390-ccw/bootmap.c
+++ b/pc-bios/s390-ccw/bootmap.c
@@ -8,7 +8,8 @@
* directory.
*/
-#include "libc.h"
+#include <string.h>
+#include <stdio.h>
#include "s390-ccw.h"
#include "s390-arch.h"
#include "bootmap.h"
@@ -21,7 +22,7 @@
#ifdef DEBUG_FALLBACK
#define dputs(txt) \
- do { sclp_print("zipl: " txt); } while (0)
+ do { printf("zipl: " txt); } while (0)
#else
#define dputs(fmt, ...) \
do { } while (0)
@@ -61,15 +62,34 @@ static void *s2_prev_blk = _s2;
static void *s2_cur_blk = _s2 + MAX_SECTOR_SIZE;
static void *s2_next_blk = _s2 + MAX_SECTOR_SIZE * 2;
-static inline void verify_boot_info(BootInfo *bip)
+static inline int verify_boot_info(BootInfo *bip)
{
- IPL_assert(magic_match(bip->magic, ZIPL_MAGIC), "No zIPL sig in BootInfo");
- IPL_assert(bip->version == BOOT_INFO_VERSION, "Wrong zIPL version");
- IPL_assert(bip->bp_type == BOOT_INFO_BP_TYPE_IPL, "DASD is not for IPL");
- IPL_assert(bip->dev_type == BOOT_INFO_DEV_TYPE_ECKD, "DASD is not ECKD");
- IPL_assert(bip->flags == BOOT_INFO_FLAGS_ARCH, "Not for this arch");
- IPL_assert(block_size_ok(bip->bp.ipl.bm_ptr.eckd.bptr.size),
- "Bad block size in zIPL section of the 1st record.");
+ if (!magic_match(bip->magic, ZIPL_MAGIC)) {
+ puts("No zIPL sig in BootInfo");
+ return -EINVAL;
+ }
+ if (bip->version != BOOT_INFO_VERSION) {
+ puts("Wrong zIPL version");
+ return -EINVAL;
+ }
+ if (bip->bp_type != BOOT_INFO_BP_TYPE_IPL) {
+ puts("DASD is not for IPL");
+ return -ENODEV;
+ }
+ if (bip->dev_type != BOOT_INFO_DEV_TYPE_ECKD) {
+ puts("DASD is not ECKD");
+ return -ENODEV;
+ }
+ if (bip->flags != BOOT_INFO_FLAGS_ARCH) {
+ puts("Not for this arch");
+ return -EINVAL;
+ }
+ if (!block_size_ok(bip->bp.ipl.bm_ptr.eckd.bptr.size)) {
+ puts("Bad block size in zIPL section of 1st record");
+ return -EINVAL;
+ }
+
+ return 0;
}
static void eckd_format_chs(ExtEckdBlockPtr *ptr, bool ldipl,
@@ -144,14 +164,17 @@ static block_number_t load_eckd_segments(block_number_t blk, bool ldipl,
bool more_data;
memset(_bprs, FREE_SPACE_FILLER, sizeof(_bprs));
- read_block(blk, bprs, "BPRS read failed");
+ if (virtio_read(blk, bprs)) {
+ puts("BPRS read failed");
+ return ERROR_BLOCK_NR;
+ }
do {
more_data = false;
for (j = 0;; j++) {
block_nr = gen_eckd_block_num(&bprs[j].xeckd, ldipl);
if (is_null_block_number(block_nr)) { /* end of chunk */
- break;
+ return NULL_BLOCK_NR;
}
/* we need the updated blockno for the next indirect entry
@@ -162,15 +185,20 @@ static block_number_t load_eckd_segments(block_number_t blk, bool ldipl,
}
/* List directed pointer does not store block size */
- IPL_assert(ldipl || block_size_ok(bprs[j].xeckd.bptr.size),
- "bad chunk block size");
+ if (!ldipl && !block_size_ok(bprs[j].xeckd.bptr.size)) {
+ puts("Bad chunk block size");
+ return ERROR_BLOCK_NR;
+ }
if (!eckd_valid_address(&bprs[j].xeckd, ldipl)) {
/*
* If an invalid address is found during LD-IPL then break and
- * retry as CCW
+ * retry as CCW-IPL, otherwise abort on error
*/
- IPL_assert(ldipl, "bad chunk ECKD addr");
+ if (!ldipl) {
+ puts("Bad chunk ECKD address");
+ return ERROR_BLOCK_NR;
+ }
break;
}
@@ -188,7 +216,10 @@ static block_number_t load_eckd_segments(block_number_t blk, bool ldipl,
* I.e. the next ptr must point to the unused memory area
*/
memset(_bprs, FREE_SPACE_FILLER, sizeof(_bprs));
- read_block(block_nr, bprs, "BPRS continuation read failed");
+ if (virtio_read(block_nr, bprs)) {
+ puts("BPRS continuation read failed");
+ return ERROR_BLOCK_NR;
+ }
more_data = true;
break;
}
@@ -197,7 +228,10 @@ static block_number_t load_eckd_segments(block_number_t blk, bool ldipl,
* to memory (address).
*/
rc = virtio_read_many(block_nr, (void *)(*address), count + 1);
- IPL_assert(rc == 0, "code chunk read failed");
+ if (rc != 0) {
+ puts("Code chunk read failed");
+ return ERROR_BLOCK_NR;
+ }
*address += (count + 1) * virtio_get_block_size();
}
@@ -231,7 +265,10 @@ static int eckd_get_boot_menu_index(block_number_t s1b_block_nr)
/* Get Stage1b data */
memset(sec, FREE_SPACE_FILLER, sizeof(sec));
- read_block(s1b_block_nr, s1b, "Cannot read stage1b boot loader");
+ if (virtio_read(s1b_block_nr, s1b)) {
+ puts("Cannot read stage1b boot loader");
+ return -EIO;
+ }
memset(_s2, FREE_SPACE_FILLER, sizeof(_s2));
@@ -243,7 +280,10 @@ static int eckd_get_boot_menu_index(block_number_t s1b_block_nr)
break;
}
- read_block(cur_block_nr, s2_cur_blk, "Cannot read stage2 boot loader");
+ if (virtio_read(cur_block_nr, s2_cur_blk)) {
+ puts("Cannot read stage2 boot loader");
+ return -EIO;
+ }
if (find_zipl_boot_menu_banner(&banner_offset)) {
/*
@@ -251,8 +291,10 @@ static int eckd_get_boot_menu_index(block_number_t s1b_block_nr)
* possibility of menu data spanning multiple blocks.
*/
if (prev_block_nr) {
- read_block(prev_block_nr, s2_prev_blk,
- "Cannot read stage2 boot loader");
+ if (virtio_read(prev_block_nr, s2_prev_blk)) {
+ puts("Cannot read stage2 boot loader");
+ return -EIO;
+ }
}
if (i + 1 < STAGE2_BLK_CNT_MAX) {
@@ -260,8 +302,10 @@ static int eckd_get_boot_menu_index(block_number_t s1b_block_nr)
}
if (next_block_nr && !is_null_block_number(next_block_nr)) {
- read_block(next_block_nr, s2_next_blk,
- "Cannot read stage2 boot loader");
+ if (virtio_read(next_block_nr, s2_next_blk)) {
+ puts("Cannot read stage2 boot loader");
+ return -EIO;
+ }
}
return menu_get_zipl_boot_index(s2_cur_blk + banner_offset);
@@ -270,11 +314,11 @@ static int eckd_get_boot_menu_index(block_number_t s1b_block_nr)
prev_block_nr = cur_block_nr;
}
- sclp_print("No zipl boot menu data found. Booting default entry.");
+ printf("No zipl boot menu data found. Booting default entry.");
return 0;
}
-static void run_eckd_boot_script(block_number_t bmt_block_nr,
+static int run_eckd_boot_script(block_number_t bmt_block_nr,
block_number_t s1b_block_nr)
{
int i;
@@ -291,17 +335,27 @@ static void run_eckd_boot_script(block_number_t bmt_block_nr,
}
debug_print_int("loadparm", loadparm);
- IPL_assert(loadparm < MAX_BOOT_ENTRIES, "loadparm value greater than"
- " maximum number of boot entries allowed");
+ if (loadparm >= MAX_BOOT_ENTRIES) {
+ panic("loadparm value greater than max number of boot entries allowed");
+ }
memset(sec, FREE_SPACE_FILLER, sizeof(sec));
- read_block(bmt_block_nr, sec, "Cannot read Boot Map Table");
+ if (virtio_read(bmt_block_nr, sec)) {
+ puts("Cannot read Boot Map Table");
+ return -EIO;
+ }
block_nr = gen_eckd_block_num(&bmt->entry[loadparm].xeckd, ldipl);
- IPL_assert(block_nr != -1, "Cannot find Boot Map Table Entry");
+ if (block_nr == NULL_BLOCK_NR) {
+ printf("The requested boot entry (%d) is invalid\n", loadparm);
+ panic("Invalid loadparm");
+ }
memset(sec, FREE_SPACE_FILLER, sizeof(sec));
- read_block(block_nr, sec, "Cannot read Boot Map Script");
+ if (virtio_read(block_nr, sec)) {
+ puts("Cannot read Boot Map Script");
+ return -EIO;
+ }
for (i = 0; bms->entry[i].type == BOOT_SCRIPT_LOAD ||
bms->entry[i].type == BOOT_SCRIPT_SIGNATURE; i++) {
@@ -316,21 +370,27 @@ static void run_eckd_boot_script(block_number_t bmt_block_nr,
do {
block_nr = load_eckd_segments(block_nr, ldipl, &address);
- } while (block_nr != -1);
+ if (block_nr == ERROR_BLOCK_NR) {
+ return ldipl ? 0 : -EIO;
+ }
+ } while (block_nr != NULL_BLOCK_NR);
}
if (ldipl && bms->entry[i].type != BOOT_SCRIPT_EXEC) {
/* Abort LD-IPL and retry as CCW-IPL */
- return;
+ return 0;
}
- IPL_assert(bms->entry[i].type == BOOT_SCRIPT_EXEC,
- "Unknown script entry type");
- write_reset_psw(bms->entry[i].address.load_address); /* no return */
- jump_to_IPL_code(0); /* no return */
+ if (bms->entry[i].type != BOOT_SCRIPT_EXEC) {
+ puts("Unknown script entry type");
+ return -EINVAL;
+ }
+ write_reset_psw(bms->entry[i].address.load_address);
+ jump_to_IPL_code(0);
+ return -1;
}
-static void ipl_eckd_cdl(void)
+static int ipl_eckd_cdl(void)
{
XEckdMbr *mbr;
EckdCdlIpl2 *ipl2 = (void *)sec;
@@ -338,23 +398,26 @@ static void ipl_eckd_cdl(void)
block_number_t bmt_block_nr, s1b_block_nr;
/* we have just read the block #0 and recognized it as "IPL1" */
- sclp_print("CDL\n");
+ puts("CDL");
memset(sec, FREE_SPACE_FILLER, sizeof(sec));
- read_block(1, ipl2, "Cannot read IPL2 record at block 1");
+ if (virtio_read(1, ipl2)) {
+ puts("Cannot read IPL2 record at block 1");
+ return -EIO;
+ }
mbr = &ipl2->mbr;
if (!magic_match(mbr, ZIPL_MAGIC)) {
- sclp_print("No zIPL section in IPL2 record.\n");
- return;
+ puts("No zIPL section in IPL2 record.");
+ return 0;
}
if (!block_size_ok(mbr->blockptr.xeckd.bptr.size)) {
- sclp_print("Bad block size in zIPL section of IPL2 record.\n");
- return;
+ puts("Bad block size in zIPL section of IPL2 record.");
+ return 0;
}
if (mbr->dev_type != DEV_TYPE_ECKD) {
- sclp_print("Non-ECKD device type in zIPL section of IPL2 record.\n");
- return;
+ puts("Non-ECKD device type in zIPL section of IPL2 record.");
+ return 0;
}
/* save pointer to Boot Map Table */
@@ -364,19 +427,21 @@ static void ipl_eckd_cdl(void)
s1b_block_nr = eckd_block_num(&ipl2->stage1.seek[0].chs);
memset(sec, FREE_SPACE_FILLER, sizeof(sec));
- read_block(2, vlbl, "Cannot read Volume Label at block 2");
+ if (virtio_read(2, vlbl)) {
+ puts("Cannot read Volume Label at block 2");
+ return -EIO;
+ }
if (!magic_match(vlbl->key, VOL1_MAGIC)) {
- sclp_print("Invalid magic of volume label block.\n");
- return;
+ puts("Invalid magic of volume label block.");
+ return 0;
}
if (!magic_match(vlbl->f.key, VOL1_MAGIC)) {
- sclp_print("Invalid magic of volser block.\n");
- return;
+ puts("Invalid magic of volser block.");
+ return 0;
}
print_volser(vlbl->f.volser);
- run_eckd_boot_script(bmt_block_nr, s1b_block_nr);
- /* no return */
+ return run_eckd_boot_script(bmt_block_nr, s1b_block_nr);
}
static void print_eckd_ldl_msg(ECKD_IPL_mode_t mode)
@@ -384,8 +449,8 @@ static void print_eckd_ldl_msg(ECKD_IPL_mode_t mode)
LDL_VTOC *vlbl = (void *)sec; /* already read, 3rd block */
char msg[4] = { '?', '.', '\n', '\0' };
- sclp_print((mode == ECKD_CMS) ? "CMS" : "LDL");
- sclp_print(" version ");
+ printf((mode == ECKD_CMS) ? "CMS" : "LDL");
+ printf(" version ");
switch (vlbl->LDL_version) {
case LDL1_VERSION:
msg[0] = '1';
@@ -398,11 +463,11 @@ static void print_eckd_ldl_msg(ECKD_IPL_mode_t mode)
msg[1] = '?';
break;
}
- sclp_print(msg);
+ printf("%s", msg);
print_volser(vlbl->volser);
}
-static void ipl_eckd_ldl(ECKD_IPL_mode_t mode)
+static int ipl_eckd_ldl(ECKD_IPL_mode_t mode)
{
block_number_t bmt_block_nr, s1b_block_nr;
EckdLdlIpl1 *ipl1 = (void *)sec;
@@ -414,12 +479,15 @@ static void ipl_eckd_ldl(ECKD_IPL_mode_t mode)
/* DO NOT read BootMap pointer (only one, xECKD) at block #2 */
memset(sec, FREE_SPACE_FILLER, sizeof(sec));
- read_block(0, sec, "Cannot read block 0 to grab boot info.");
+ if (virtio_read(0, sec)) {
+ puts("Cannot read block 0 to grab boot info.");
+ return -EIO;
+ }
if (mode == ECKD_LDL_UNLABELED) {
if (!magic_match(ipl1->bip.magic, ZIPL_MAGIC)) {
- return; /* not applicable layout */
+ return 0; /* not applicable layout */
}
- sclp_print("unlabeled LDL.\n");
+ puts("unlabeled LDL.");
}
verify_boot_info(&ipl1->bip);
@@ -429,8 +497,7 @@ static void ipl_eckd_ldl(ECKD_IPL_mode_t mode)
/* save pointer to Stage1b Data */
s1b_block_nr = eckd_block_num(&ipl1->stage1.seek[0].chs);
- run_eckd_boot_script(bmt_block_nr, s1b_block_nr);
- /* no return */
+ return run_eckd_boot_script(bmt_block_nr, s1b_block_nr);
}
static block_number_t eckd_find_bmt(ExtEckdBlockPtr *ptr)
@@ -440,7 +507,10 @@ static block_number_t eckd_find_bmt(ExtEckdBlockPtr *ptr)
BootRecord *br;
blockno = gen_eckd_block_num(ptr, 0);
- read_block(blockno, tmp_sec, "Cannot read boot record");
+ if (virtio_read(blockno, tmp_sec)) {
+ puts("Cannot read boot record");
+ return ERROR_BLOCK_NR;
+ }
br = (BootRecord *)tmp_sec;
if (!magic_match(br->magic, ZIPL_MAGIC)) {
/* If the boot record is invalid, return and try CCW-IPL instead */
@@ -466,10 +536,10 @@ static void print_eckd_msg(void)
*p-- = ' ';
}
}
- sclp_print(msg);
+ printf("%s", msg);
}
-static void ipl_eckd(void)
+static int ipl_eckd(void)
{
IplVolumeLabel *vlbl = (void *)sec;
LDL_VTOC *vtoc = (void *)sec;
@@ -479,7 +549,10 @@ static void ipl_eckd(void)
/* Block 2 can contain either the CDL VOL1 label or the LDL VTOC */
memset(sec, FREE_SPACE_FILLER, sizeof(sec));
- read_block(2, vlbl, "Cannot read block 2");
+ if (virtio_read(2, vlbl)) {
+ puts("Cannot read block 2");
+ return -EIO;
+ }
/*
* First check for a list-directed-format pointer which would
@@ -487,43 +560,60 @@ static void ipl_eckd(void)
*/
if (eckd_valid_address((ExtEckdBlockPtr *)&vlbl->f.br, 0)) {
ldipl_bmt = eckd_find_bmt((ExtEckdBlockPtr *)&vlbl->f.br);
- if (ldipl_bmt) {
- sclp_print("List-Directed\n");
- /* LD-IPL does not use the S1B bock, just make it NULL */
- run_eckd_boot_script(ldipl_bmt, NULL_BLOCK_NR);
- /* Only return in error, retry as CCW-IPL */
- sclp_print("Retrying IPL ");
+ switch (ldipl_bmt) {
+ case ERROR_BLOCK_NR:
+ return -EIO;
+ case NULL_BLOCK_NR:
+ break; /* Invalid BMT but the device may still boot with CCW-IPL */
+ default:
+ puts("List-Directed");
+ /*
+ * LD-IPL does not use the S1B bock, just make it NULL_BLOCK_NR.
+ * In some failure cases retry IPL before aborting.
+ */
+ if (run_eckd_boot_script(ldipl_bmt, NULL_BLOCK_NR)) {
+ return -EIO;
+ }
+ /* Non-fatal error, retry as CCW-IPL */
+ printf("Retrying IPL ");
print_eckd_msg();
}
memset(sec, FREE_SPACE_FILLER, sizeof(sec));
- read_block(2, vtoc, "Cannot read block 2");
+ if (virtio_read(2, vtoc)) {
+ puts("Cannot read block 2");
+ return -EIO;
+ }
}
/* Not list-directed */
if (magic_match(vtoc->magic, VOL1_MAGIC)) {
- ipl_eckd_cdl(); /* may return in error */
+ if (ipl_eckd_cdl()) {
+ return -1;
+ }
}
if (magic_match(vtoc->magic, CMS1_MAGIC)) {
- ipl_eckd_ldl(ECKD_CMS); /* no return */
+ return ipl_eckd_ldl(ECKD_CMS);
}
if (magic_match(vtoc->magic, LNX1_MAGIC)) {
- ipl_eckd_ldl(ECKD_LDL); /* no return */
+ return ipl_eckd_ldl(ECKD_LDL);
}
- ipl_eckd_ldl(ECKD_LDL_UNLABELED); /* it still may return */
+ if (ipl_eckd_ldl(ECKD_LDL_UNLABELED)) {
+ return -1;
+ }
/*
* Ok, it is not a LDL by any means.
* It still might be a CDL with zero record keys for IPL1 and IPL2
*/
- ipl_eckd_cdl();
+ return ipl_eckd_cdl();
}
/***********************************************************************
* IPL a SCSI disk
*/
-static void zipl_load_segment(ComponentEntry *entry)
+static int zipl_load_segment(ComponentEntry *entry)
{
const int max_entries = (MAX_SECTOR_SIZE / sizeof(ScsiBlockPtr));
ScsiBlockPtr *bprs = (void *)sec;
@@ -543,7 +633,10 @@ static void zipl_load_segment(ComponentEntry *entry)
do {
memset(bprs, FREE_SPACE_FILLER, bprs_size);
fill_hex_val(blk_no, &blockno, sizeof(blockno));
- read_block(blockno, bprs, err_msg);
+ if (virtio_read(blockno, bprs)) {
+ puts(err_msg);
+ return -EIO;
+ }
for (i = 0;; i++) {
uint64_t *cur_desc = (void *)&bprs[i];
@@ -571,23 +664,37 @@ static void zipl_load_segment(ComponentEntry *entry)
}
address = virtio_load_direct(cur_desc[0], cur_desc[1], 0,
(void *)address);
- IPL_assert(address != -1, "zIPL load segment failed");
+ if (!address) {
+ puts("zIPL load segment failed");
+ return -EIO;
+ }
}
} while (blockno);
+
+ return 0;
}
/* Run a zipl program */
-static void zipl_run(ScsiBlockPtr *pte)
+static int zipl_run(ScsiBlockPtr *pte)
{
ComponentHeader *header;
ComponentEntry *entry;
uint8_t tmp_sec[MAX_SECTOR_SIZE];
- read_block(pte->blockno, tmp_sec, "Cannot read header");
+ if (virtio_read(pte->blockno, tmp_sec)) {
+ puts("Cannot read header");
+ return -EIO;
+ }
header = (ComponentHeader *)tmp_sec;
- IPL_assert(magic_match(tmp_sec, ZIPL_MAGIC), "No zIPL magic in header");
- IPL_assert(header->type == ZIPL_COMP_HEADER_IPL, "Bad header type");
+ if (!magic_match(tmp_sec, ZIPL_MAGIC)) {
+ puts("No zIPL magic in header");
+ return -EINVAL;
+ }
+ if (header->type != ZIPL_COMP_HEADER_IPL) {
+ puts("Bad header type");
+ return -EINVAL;
+ }
dputs("start loading images\n");
@@ -602,22 +709,30 @@ static void zipl_run(ScsiBlockPtr *pte)
continue;
}
- zipl_load_segment(entry);
+ if (zipl_load_segment(entry)) {
+ return -1;
+ }
entry++;
- IPL_assert((uint8_t *)(&entry[1]) <= (tmp_sec + MAX_SECTOR_SIZE),
- "Wrong entry value");
+ if ((uint8_t *)(&entry[1]) > (tmp_sec + MAX_SECTOR_SIZE)) {
+ puts("Wrong entry value");
+ return -EINVAL;
+ }
}
- IPL_assert(entry->component_type == ZIPL_COMP_ENTRY_EXEC, "No EXEC entry");
+ if (entry->component_type != ZIPL_COMP_ENTRY_EXEC) {
+ puts("No EXEC entry");
+ return -EINVAL;
+ }
/* should not return */
write_reset_psw(entry->compdat.load_psw);
jump_to_IPL_code(0);
+ return -1;
}
-static void ipl_scsi(void)
+static int ipl_scsi(void)
{
ScsiMbr *mbr = (void *)sec;
int program_table_entries = 0;
@@ -628,22 +743,34 @@ static void ipl_scsi(void)
/* Grab the MBR */
memset(sec, FREE_SPACE_FILLER, sizeof(sec));
- read_block(0, mbr, "Cannot read block 0");
+ if (virtio_read(0, mbr)) {
+ puts("Cannot read block 0");
+ return -EIO;
+ }
if (!magic_match(mbr->magic, ZIPL_MAGIC)) {
- return;
+ return 0;
}
- sclp_print("Using SCSI scheme.\n");
+ puts("Using SCSI scheme.");
debug_print_int("MBR Version", mbr->version_id);
IPL_check(mbr->version_id == 1,
"Unknown MBR layout version, assuming version 1");
debug_print_int("program table", mbr->pt.blockno);
- IPL_assert(mbr->pt.blockno, "No Program Table");
+ if (!mbr->pt.blockno) {
+ puts("No Program Table");
+ return -EINVAL;
+ }
/* Parse the program table */
- read_block(mbr->pt.blockno, sec, "Error reading Program Table");
- IPL_assert(magic_match(sec, ZIPL_MAGIC), "No zIPL magic in PT");
+ if (virtio_read(mbr->pt.blockno, sec)) {
+ puts("Error reading Program Table");
+ return -EIO;
+ }
+ if (!magic_match(sec, ZIPL_MAGIC)) {
+ puts("No zIPL magic in Program Table");
+ return -EINVAL;
+ }
for (i = 0; i < MAX_BOOT_ENTRIES; i++) {
if (prog_table->entry[i].scsi.blockno) {
@@ -653,17 +780,26 @@ static void ipl_scsi(void)
}
debug_print_int("program table entries", program_table_entries);
- IPL_assert(program_table_entries != 0, "Empty Program Table");
+ if (program_table_entries == 0) {
+ puts("Empty Program Table");
+ return -EINVAL;
+ }
if (menu_is_enabled_enum()) {
loadparm = menu_get_enum_boot_index(valid_entries);
}
debug_print_int("loadparm", loadparm);
- IPL_assert(loadparm < MAX_BOOT_ENTRIES, "loadparm value greater than"
- " maximum number of boot entries allowed");
+ if (loadparm >= MAX_BOOT_ENTRIES) {
+ panic("loadparm value greater than max number of boot entries allowed");
+ }
+
+ if (!valid_entries[loadparm]) {
+ printf("The requested boot entry (%d) is invalid\n", loadparm);
+ panic("Invalid loadparm");
+ }
- zipl_run(&prog_table->entry[loadparm].scsi); /* no return */
+ return zipl_run(&prog_table->entry[loadparm].scsi);
}
/***********************************************************************
@@ -677,8 +813,10 @@ static bool is_iso_bc_entry_compatible(IsoBcSection *s)
if (s->unused || !s->sector_count) {
return false;
}
- read_iso_sector(bswap32(s->load_rba), magic_sec,
- "Failed to read image sector 0");
+ if (virtio_read(bswap32(s->load_rba), magic_sec)) {
+ puts("Failed to read image sector 0");
+ return false;
+ }
/* Checking bytes 8 - 32 for S390 Linux magic */
return !memcmp(magic_sec + 8, linux_s390_magic, 24);
@@ -691,28 +829,35 @@ static uint32_t sec_offset[ISO9660_MAX_DIR_DEPTH];
/* Remained directory space in bytes */
static uint32_t dir_rem[ISO9660_MAX_DIR_DEPTH];
-static inline uint32_t iso_get_file_size(uint32_t load_rba)
+static inline long iso_get_file_size(uint32_t load_rba)
{
IsoVolDesc *vd = (IsoVolDesc *)sec;
IsoDirHdr *cur_record = &vd->vd.primary.rootdir;
uint8_t *temp = sec + ISO_SECTOR_SIZE;
int level = 0;
- read_iso_sector(ISO_PRIMARY_VD_SECTOR, sec,
- "Failed to read ISO primary descriptor");
+ if (virtio_read(ISO_PRIMARY_VD_SECTOR, sec)) {
+ puts("Failed to read ISO primary descriptor");
+ return -EIO;
+ }
+
sec_loc[0] = iso_733_to_u32(cur_record->ext_loc);
dir_rem[0] = 0;
sec_offset[0] = 0;
while (level >= 0) {
- IPL_assert(sec_offset[level] <= ISO_SECTOR_SIZE,
- "Directory tree structure violation");
+ if (sec_offset[level] > ISO_SECTOR_SIZE) {
+ puts("Directory tree structure violation");
+ return -EIO;
+ }
cur_record = (IsoDirHdr *)(temp + sec_offset[level]);
if (sec_offset[level] == 0) {
- read_iso_sector(sec_loc[level], temp,
- "Failed to read ISO directory");
+ if (virtio_read(sec_loc[level], temp)) {
+ puts("Failed to read ISO directory");
+ return -EIO;
+ }
if (dir_rem[level] == 0) {
/* Skip self and parent records */
dir_rem[level] = iso_733_to_u32(cur_record->data_len) -
@@ -743,7 +888,7 @@ static inline uint32_t iso_get_file_size(uint32_t load_rba)
if (cur_record->file_flags & 0x2) {
/* Subdirectory */
if (level == ISO9660_MAX_DIR_DEPTH - 1) {
- sclp_print("ISO-9660 directory depth limit exceeded\n");
+ puts("ISO-9660 directory depth limit exceeded");
} else {
level++;
sec_loc[level] = iso_733_to_u32(cur_record->ext_loc);
@@ -757,8 +902,10 @@ static inline uint32_t iso_get_file_size(uint32_t load_rba)
if (dir_rem[level] == 0) {
/* Nothing remaining */
level--;
- read_iso_sector(sec_loc[level], temp,
- "Failed to read ISO directory");
+ if (virtio_read(sec_loc[level], temp)) {
+ puts("Failed to read ISO directory");
+ return -EIO;
+ }
}
}
@@ -773,19 +920,24 @@ static void load_iso_bc_entry(IsoBcSection *load)
* is padded and ISO_SECTOR_SIZE bytes aligned
*/
uint32_t blks_to_load = bswap16(s.sector_count) >> ET_SECTOR_SHIFT;
- uint32_t real_size = iso_get_file_size(bswap32(s.load_rba));
+ long real_size = iso_get_file_size(bswap32(s.load_rba));
- if (real_size) {
+ if (real_size > 0) {
/* Round up blocks to load */
blks_to_load = (real_size + ISO_SECTOR_SIZE - 1) / ISO_SECTOR_SIZE;
- sclp_print("ISO boot image size verified\n");
+ puts("ISO boot image size verified");
} else {
- sclp_print("ISO boot image size could not be verified\n");
+ puts("ISO boot image size could not be verified");
+ if (real_size < 0) {
+ return;
+ }
}
- read_iso_boot_image(bswap32(s.load_rba),
+ if (read_iso_boot_image(bswap32(s.load_rba),
(void *)((uint64_t)bswap16(s.load_segment)),
- blks_to_load);
+ blks_to_load)) {
+ return;
+ }
jump_to_low_kernel();
}
@@ -808,17 +960,18 @@ static uint32_t find_iso_bc(void)
return bswap32(et->bc_offset);
}
}
- read_iso_sector(block_num++, sec,
- "Failed to read ISO volume descriptor");
+ if (virtio_read(block_num++, sec)) {
+ puts("Failed to read ISO volume descriptor");
+ return 0;
+ }
}
return 0;
}
-static IsoBcSection *find_iso_bc_entry(void)
+static IsoBcSection *find_iso_bc_entry(uint32_t offset)
{
IsoBcEntry *e = (IsoBcEntry *)sec;
- uint32_t offset = find_iso_bc();
int i;
unsigned int loadparm = get_loadparm_index();
@@ -826,11 +979,13 @@ static IsoBcSection *find_iso_bc_entry(void)
return NULL;
}
- read_iso_sector(offset, sec, "Failed to read El Torito boot catalog");
+ if (virtio_read(offset, sec)) {
+ puts("Failed to read El Torito boot catalog");
+ return NULL;
+ }
if (!is_iso_bc_valid(e)) {
/* The validation entry is mandatory */
- panic("No valid boot catalog found!\n");
return NULL;
}
@@ -850,19 +1005,25 @@ static IsoBcSection *find_iso_bc_entry(void)
}
}
- panic("No suitable boot entry found on ISO-9660 media!\n");
-
return NULL;
}
-static void ipl_iso_el_torito(void)
+static int ipl_iso_el_torito(void)
{
- IsoBcSection *s = find_iso_bc_entry();
+ uint32_t offset = find_iso_bc();
+ if (!offset) {
+ return 0;
+ }
+
+ IsoBcSection *s = find_iso_bc_entry(offset);
if (s) {
- load_iso_bc_entry(s);
- /* no return */
+ load_iso_bc_entry(s); /* only return in error */
+ return -1;
}
+
+ puts("No suitable boot entry found on ISO-9660 media!");
+ return -EIO;
}
/**
@@ -884,7 +1045,7 @@ static bool has_iso_signature(void)
* Bus specific IPL sequences
*/
-static void zipl_load_vblk(void)
+static int zipl_load_vblk(void)
{
int blksize = virtio_get_block_size();
@@ -892,26 +1053,30 @@ static void zipl_load_vblk(void)
if (blksize != VIRTIO_ISO_BLOCK_SIZE) {
virtio_assume_iso9660();
}
- ipl_iso_el_torito();
+ if (ipl_iso_el_torito()) {
+ return 0;
+ }
}
if (blksize != VIRTIO_DASD_DEFAULT_BLOCK_SIZE) {
- sclp_print("Using guessed DASD geometry.\n");
+ puts("Using guessed DASD geometry.");
virtio_assume_eckd();
}
- ipl_eckd();
+ return ipl_eckd();
}
-static void zipl_load_vscsi(void)
+static int zipl_load_vscsi(void)
{
if (virtio_get_block_size() == VIRTIO_ISO_BLOCK_SIZE) {
/* Is it an ISO image in non-CD drive? */
- ipl_iso_el_torito();
+ if (ipl_iso_el_torito()) {
+ return 0;
+ }
}
- sclp_print("Using guessed DASD geometry.\n");
+ puts("Using guessed DASD geometry.");
virtio_assume_eckd();
- ipl_eckd();
+ return ipl_eckd();
}
/***********************************************************************
@@ -924,14 +1089,20 @@ void zipl_load(void)
if (vdev->is_cdrom) {
ipl_iso_el_torito();
- panic("\n! Cannot IPL this ISO image !\n");
+ puts("Failed to IPL this ISO image!");
+ return;
}
if (virtio_get_device_type() == VIRTIO_ID_NET) {
- jump_to_IPL_code(vdev->netboot_start_addr);
+ netmain();
+ puts("Failed to IPL from this network!");
+ return;
}
- ipl_scsi();
+ if (ipl_scsi()) {
+ puts("Failed to IPL from this SCSI device!");
+ return;
+ }
switch (virtio_get_device_type()) {
case VIRTIO_ID_BLOCK:
@@ -941,8 +1112,9 @@ void zipl_load(void)
zipl_load_vscsi();
break;
default:
- panic("\n! Unknown IPL device type !\n");
+ puts("Unknown IPL device type!");
+ return;
}
- sclp_print("zIPL load failed.\n");
+ puts("zIPL load failed!");
}
diff --git a/pc-bios/s390-ccw/bootmap.h b/pc-bios/s390-ccw/bootmap.h
index d4690a8..9594344 100644
--- a/pc-bios/s390-ccw/bootmap.h
+++ b/pc-bios/s390-ccw/bootmap.h
@@ -16,6 +16,7 @@
typedef uint64_t block_number_t;
#define NULL_BLOCK_NR 0xffffffffffffffffULL
+#define ERROR_BLOCK_NR 0xfffffffffffffffeULL
#define FREE_SPACE_FILLER '\xAA'
@@ -336,9 +337,7 @@ static inline void print_volser(const void *volser)
ebcdic_to_ascii((char *)volser, ascii, 6);
ascii[6] = '\0';
- sclp_print("VOLSER=[");
- sclp_print(ascii);
- sclp_print("]\n");
+ printf("VOLSER=[%s]\n", ascii);
}
static inline bool unused_space(const void *p, size_t size)
@@ -387,17 +386,14 @@ static inline uint32_t iso_733_to_u32(uint64_t x)
#define ISO_PRIMARY_VD_SECTOR 16
-static inline void read_iso_sector(uint32_t block_offset, void *buf,
- const char *errmsg)
-{
- IPL_assert(virtio_read_many(block_offset, buf, 1) == 0, errmsg);
-}
-
-static inline void read_iso_boot_image(uint32_t block_offset, void *load_addr,
+static inline int read_iso_boot_image(uint32_t block_offset, void *load_addr,
uint32_t blks_to_load)
{
- IPL_assert(virtio_read_many(block_offset, load_addr, blks_to_load) == 0,
- "Failed to read boot image!");
+ if (virtio_read_many(block_offset, load_addr, blks_to_load)) {
+ puts("Failed to read boot image!");
+ return -1;
+ }
+ return 0;
}
#define ISO9660_MAX_DIR_DEPTH 8
diff --git a/pc-bios/s390-ccw/cio.c b/pc-bios/s390-ccw/cio.c
index 83ca27a..5d543da 100644
--- a/pc-bios/s390-ccw/cio.c
+++ b/pc-bios/s390-ccw/cio.c
@@ -11,7 +11,8 @@
* directory.
*/
-#include "libc.h"
+#include <string.h>
+#include <stdio.h>
#include "s390-ccw.h"
#include "s390-arch.h"
#include "helper.h"
@@ -58,7 +59,8 @@ uint16_t cu_type(SubChannelId schid)
};
if (do_cio(schid, CU_TYPE_UNKNOWN, ptr2u32(&sense_id_ccw), CCW_FMT1)) {
- panic("Failed to run SenseID CCw\n");
+ puts("Failed to run SenseID CCW");
+ return CU_TYPE_UNKNOWN;
}
return sense_data.cu_type;
@@ -90,9 +92,9 @@ static void print_eckd_dasd_sense_data(SenseDataEckdDasd *sd)
char msgline[512];
if (sd->config_info & 0x8000) {
- sclp_print("Eckd Dasd Sense Data (fmt 24-bytes):\n");
+ puts("Eckd Dasd Sense Data (fmt 24-bytes):");
} else {
- sclp_print("Eckd Dasd Sense Data (fmt 32-bytes):\n");
+ puts("Eckd Dasd Sense Data (fmt 32-bytes):");
}
strcat(msgline, " Sense Condition Flags :");
@@ -158,22 +160,21 @@ static void print_eckd_dasd_sense_data(SenseDataEckdDasd *sd)
if (sd->status[1] & SNS_STAT2_IMPRECISE_END) {
strcat(msgline, " [Imprecise-End]");
}
- strcat(msgline, "\n");
- sclp_print(msgline);
-
- print_int(" Residual Count =", sd->res_count);
- print_int(" Phys Drive ID =", sd->phys_drive_id);
- print_int(" low cyl address =", sd->low_cyl_addr);
- print_int(" head addr & hi cyl =", sd->head_high_cyl_addr);
- print_int(" format/message =", sd->fmt_msg);
- print_int(" fmt-dependent[0-7] =", sd->fmt_dependent_info[0]);
- print_int(" fmt-dependent[8-15]=", sd->fmt_dependent_info[1]);
- print_int(" prog action code =", sd->program_action_code);
- print_int(" Configuration info =", sd->config_info);
- print_int(" mcode / hi-cyl =", sd->mcode_hicyl);
- print_int(" cyl & head addr [0]=", sd->cyl_head_addr[0]);
- print_int(" cyl & head addr [1]=", sd->cyl_head_addr[1]);
- print_int(" cyl & head addr [2]=", sd->cyl_head_addr[2]);
+ puts(msgline);
+
+ printf(" Residual Count = 0x%X\n", sd->res_count);
+ printf(" Phys Drive ID = 0x%X\n", sd->phys_drive_id);
+ printf(" low cyl address = 0x%X\n", sd->low_cyl_addr);
+ printf(" head addr & hi cyl = 0x%X\n", sd->head_high_cyl_addr);
+ printf(" format/message = 0x%X\n", sd->fmt_msg);
+ printf(" fmt-dependent[0-7] = 0x%llX\n", sd->fmt_dependent_info[0]);
+ printf(" fmt-dependent[8-15]= 0x%llX\n", sd->fmt_dependent_info[1]);
+ printf(" prog action code = 0x%X\n", sd->program_action_code);
+ printf(" Configuration info = 0x%X\n", sd->config_info);
+ printf(" mcode / hi-cyl = 0x%X\n", sd->mcode_hicyl);
+ printf(" cyl & head addr [0]= 0x%X\n", sd->cyl_head_addr[0]);
+ printf(" cyl & head addr [1]= 0x%X\n", sd->cyl_head_addr[1]);
+ printf(" cyl & head addr [2]= 0x%X\n", sd->cyl_head_addr[2]);
}
static void print_irb_err(Irb *irb)
@@ -182,7 +183,7 @@ static void print_irb_err(Irb *irb)
uint64_t prev_ccw = *(uint64_t *)u32toptr(irb->scsw.cpa - 8);
char msgline[256];
- sclp_print("Interrupt Response Block Data:\n");
+ puts("Interrupt Response Block Data:");
strcat(msgline, " Function Ctrl :");
if (irb->scsw.ctrl & SCSW_FCTL_START_FUNC) {
@@ -194,8 +195,7 @@ static void print_irb_err(Irb *irb)
if (irb->scsw.ctrl & SCSW_FCTL_CLEAR_FUNC) {
strcat(msgline, " [Clear]");
}
- strcat(msgline, "\n");
- sclp_print(msgline);
+ puts(msgline);
msgline[0] = '\0';
strcat(msgline, " Activity Ctrl :");
@@ -220,8 +220,7 @@ static void print_irb_err(Irb *irb)
if (irb->scsw.ctrl & SCSW_ACTL_SUSPENDED) {
strcat(msgline, " [Suspended]");
}
- strcat(msgline, "\n");
- sclp_print(msgline);
+ puts(msgline);
msgline[0] = '\0';
strcat(msgline, " Status Ctrl :");
@@ -240,9 +239,7 @@ static void print_irb_err(Irb *irb)
if (irb->scsw.ctrl & SCSW_SCTL_STATUS_PEND) {
strcat(msgline, " [Status-Pending]");
}
-
- strcat(msgline, "\n");
- sclp_print(msgline);
+ puts(msgline);
msgline[0] = '\0';
strcat(msgline, " Device Status :");
@@ -270,8 +267,7 @@ static void print_irb_err(Irb *irb)
if (irb->scsw.dstat & SCSW_DSTAT_UEXCP) {
strcat(msgline, " [Unit-Exception]");
}
- strcat(msgline, "\n");
- sclp_print(msgline);
+ puts(msgline);
msgline[0] = '\0';
strcat(msgline, " Channel Status :");
@@ -299,12 +295,11 @@ static void print_irb_err(Irb *irb)
if (irb->scsw.cstat & SCSW_CSTAT_CHAINCHK) {
strcat(msgline, " [Chaining-Check]");
}
- strcat(msgline, "\n");
- sclp_print(msgline);
+ puts(msgline);
- print_int(" cpa=", irb->scsw.cpa);
- print_int(" prev_ccw=", prev_ccw);
- print_int(" this_ccw=", this_ccw);
+ printf(" cpa= 0x%X\n", irb->scsw.cpa);
+ printf(" prev_ccw= 0x%llX\n", prev_ccw);
+ printf(" this_ccw= 0x%llX\n", this_ccw);
}
/*
@@ -341,7 +336,7 @@ static int __do_cio(SubChannelId schid, uint32_t ccw_addr, int fmt, Irb *irb)
return -1;
}
if (rc) {
- print_int("ssch failed with cc=", rc);
+ printf("ssch failed with cc= 0x%x\n", rc);
return rc;
}
@@ -350,7 +345,7 @@ static int __do_cio(SubChannelId schid, uint32_t ccw_addr, int fmt, Irb *irb)
/* collect status */
rc = tsch(schid, irb);
if (rc) {
- print_int("tsch failed with cc=", rc);
+ printf("tsch failed with cc= 0x%X\n", rc);
}
return rc;
@@ -406,12 +401,12 @@ int do_cio(SubChannelId schid, uint16_t cutype, uint32_t ccw_addr, int fmt)
continue;
}
- sclp_print("cio device error\n");
- print_int(" ssid ", schid.ssid);
- print_int(" cssid ", schid.cssid);
- print_int(" sch_no", schid.sch_no);
- print_int(" ctrl-unit type", cutype);
- sclp_print("\n");
+ printf("cio device error\n");
+ printf(" ssid 0x%X\n", schid.ssid);
+ printf(" cssid 0x%X\n", schid.cssid);
+ printf(" sch_no 0x%X\n", schid.sch_no);
+ printf(" ctrl-unit type 0x%X\n", cutype);
+ printf("\n");
print_irb_err(&irb);
if (cutype == CU_TYPE_DASD_3990 || cutype == CU_TYPE_DASD_2107 ||
cutype == CU_TYPE_UNKNOWN) {
diff --git a/pc-bios/s390-ccw/cio.h b/pc-bios/s390-ccw/cio.h
index 8b18153..6a5e86b 100644
--- a/pc-bios/s390-ccw/cio.h
+++ b/pc-bios/s390-ccw/cio.h
@@ -361,6 +361,8 @@ typedef struct CcwSearchIdData {
uint8_t record;
} __attribute__((packed)) CcwSearchIdData;
+extern SubChannelId net_schid;
+
int enable_mss_facility(void);
void enable_subchannel(SubChannelId schid);
uint16_t cu_type(SubChannelId schid);
diff --git a/pc-bios/s390-ccw/dasd-ipl.c b/pc-bios/s390-ccw/dasd-ipl.c
index 254bb1a..babece9 100644
--- a/pc-bios/s390-ccw/dasd-ipl.c
+++ b/pc-bios/s390-ccw/dasd-ipl.c
@@ -8,7 +8,8 @@
* directory.
*/
-#include "libc.h"
+#include <string.h>
+#include <stdio.h>
#include "s390-ccw.h"
#include "s390-arch.h"
#include "dasd-ipl.h"
@@ -82,7 +83,7 @@ static int run_dynamic_ccw_program(SubChannelId schid, uint16_t cutype,
do {
has_next = dynamic_cp_fixup(cpa, &next_cpa);
- print_int("executing ccw chain at ", cpa);
+ printf("executing ccw chain at 0x%X\n", cpa);
enable_prefixing();
rc = do_cio(schid, cutype, cpa, CCW_FMT0);
disable_prefixing();
@@ -110,38 +111,29 @@ static void make_readipl(void)
ccwIplRead->count = 0x18; /* Read 0x18 bytes of data */
}
-static void run_readipl(SubChannelId schid, uint16_t cutype)
+static int run_readipl(SubChannelId schid, uint16_t cutype)
{
- if (do_cio(schid, cutype, 0x00, CCW_FMT0)) {
- panic("dasd-ipl: Failed to run Read IPL channel program\n");
- }
+ return do_cio(schid, cutype, 0x00, CCW_FMT0);
}
/*
* The architecture states that IPL1 data should consist of a psw followed by
* format-0 READ and TIC CCWs. Let's sanity check.
*/
-static void check_ipl1(void)
+static bool check_ipl1(void)
{
Ccw0 *ccwread = (Ccw0 *)0x08;
Ccw0 *ccwtic = (Ccw0 *)0x10;
- if (ccwread->cmd_code != CCW_CMD_DASD_READ ||
- ccwtic->cmd_code != CCW_CMD_TIC) {
- panic("dasd-ipl: IPL1 data invalid. Is this disk really bootable?\n");
- }
+ return (ccwread->cmd_code == CCW_CMD_DASD_READ &&
+ ccwtic->cmd_code == CCW_CMD_TIC);
}
-static void check_ipl2(uint32_t ipl2_addr)
+static bool check_ipl2(uint32_t ipl2_addr)
{
Ccw0 *ccw = u32toptr(ipl2_addr);
- if (ipl2_addr == 0x00) {
- panic("IPL2 address invalid. Is this disk really bootable?\n");
- }
- if (ccw->cmd_code == 0x00) {
- panic("IPL2 ccw data invalid. Is this disk really bootable?\n");
- }
+ return (ipl2_addr != 0x00 && ccw->cmd_code != 0x00);
}
static uint32_t read_ipl2_addr(void)
@@ -187,52 +179,67 @@ static void ipl1_fixup(void)
ccwSearchTic->cda = ptr2u32(ccwSearchID);
}
-static void run_ipl1(SubChannelId schid, uint16_t cutype)
+static int run_ipl1(SubChannelId schid, uint16_t cutype)
{
uint32_t startAddr = 0x08;
- if (do_cio(schid, cutype, startAddr, CCW_FMT0)) {
- panic("dasd-ipl: Failed to run IPL1 channel program\n");
- }
+ return do_cio(schid, cutype, startAddr, CCW_FMT0);
}
-static void run_ipl2(SubChannelId schid, uint16_t cutype, uint32_t addr)
+static int run_ipl2(SubChannelId schid, uint16_t cutype, uint32_t addr)
{
- if (run_dynamic_ccw_program(schid, cutype, addr)) {
- panic("dasd-ipl: Failed to run IPL2 channel program\n");
- }
+ return run_dynamic_ccw_program(schid, cutype, addr);
}
/*
* Limitations in vfio-ccw support complicate the IPL process. Details can
* be found in docs/devel/s390-dasd-ipl.rst
*/
-void dasd_ipl(SubChannelId schid, uint16_t cutype)
+int dasd_ipl(SubChannelId schid, uint16_t cutype)
{
PSWLegacy *pswl = (PSWLegacy *) 0x00;
uint32_t ipl2_addr;
/* Construct Read IPL CCW and run it to read IPL1 from boot disk */
make_readipl();
- run_readipl(schid, cutype);
+ if (run_readipl(schid, cutype)) {
+ puts("Failed to run Read IPL channel program");
+ return -EIO;
+ }
+
ipl2_addr = read_ipl2_addr();
- check_ipl1();
+
+ if (!check_ipl1()) {
+ puts("IPL1 invalid for DASD-IPL");
+ return -EINVAL;
+ }
/*
* Fixup IPL1 channel program to account for vfio-ccw limitations, then run
* it to read IPL2 channel program from boot disk.
*/
ipl1_fixup();
- run_ipl1(schid, cutype);
- check_ipl2(ipl2_addr);
+ if (run_ipl1(schid, cutype)) {
+ puts("Failed to run IPL1 channel program");
+ return -EIO;
+ }
+
+ if (!check_ipl2(ipl2_addr)) {
+ puts("IPL2 invalid for DASD-IPL");
+ return -EINVAL;
+ }
/*
* Run IPL2 channel program to read operating system code from boot disk
*/
- run_ipl2(schid, cutype, ipl2_addr);
+ if (run_ipl2(schid, cutype, ipl2_addr)) {
+ puts("Failed to run IPL2 channel program");
+ return -EIO;
+ }
/* Transfer control to the guest operating system */
pswl->mask |= PSW_MASK_EAMODE; /* Force z-mode */
pswl->addr |= PSW_MASK_BAMODE; /* ... */
jump_to_low_kernel();
+ return -1;
}
diff --git a/pc-bios/s390-ccw/dasd-ipl.h b/pc-bios/s390-ccw/dasd-ipl.h
index c394828..eb1898c 100644
--- a/pc-bios/s390-ccw/dasd-ipl.h
+++ b/pc-bios/s390-ccw/dasd-ipl.h
@@ -11,6 +11,6 @@
#ifndef DASD_IPL_H
#define DASD_IPL_H
-void dasd_ipl(SubChannelId schid, uint16_t cutype);
+int dasd_ipl(SubChannelId schid, uint16_t cutype);
#endif /* DASD_IPL_H */
diff --git a/pc-bios/s390-ccw/iplb.h b/pc-bios/s390-ccw/iplb.h
index cb6ac8a..08f259f 100644
--- a/pc-bios/s390-ccw/iplb.h
+++ b/pc-bios/s390-ccw/iplb.h
@@ -12,88 +12,16 @@
#ifndef IPLB_H
#define IPLB_H
-#define LOADPARM_LEN 8
-
-struct IplBlockCcw {
- uint8_t reserved0[85];
- uint8_t ssid;
- uint16_t devno;
- uint8_t vm_flags;
- uint8_t reserved3[3];
- uint32_t vm_parm_len;
- uint8_t nss_name[8];
- uint8_t vm_parm[64];
- uint8_t reserved4[8];
-} __attribute__ ((packed));
-typedef struct IplBlockCcw IplBlockCcw;
-
-struct IplBlockFcp {
- uint8_t reserved1[305 - 1];
- uint8_t opt;
- uint8_t reserved2[3];
- uint16_t reserved3;
- uint16_t devno;
- uint8_t reserved4[4];
- uint64_t wwpn;
- uint64_t lun;
- uint32_t bootprog;
- uint8_t reserved5[12];
- uint64_t br_lba;
- uint32_t scp_data_len;
- uint8_t reserved6[260];
- uint8_t scp_data[];
-} __attribute__ ((packed));
-typedef struct IplBlockFcp IplBlockFcp;
-
-struct IplBlockQemuScsi {
- uint32_t lun;
- uint16_t target;
- uint16_t channel;
- uint8_t reserved0[77];
- uint8_t ssid;
- uint16_t devno;
-} __attribute__ ((packed));
-typedef struct IplBlockQemuScsi IplBlockQemuScsi;
-
-struct IplParameterBlock {
- uint32_t len;
- uint8_t reserved0[3];
- uint8_t version;
- uint32_t blk0_len;
- uint8_t pbt;
- uint8_t flags;
- uint16_t reserved01;
- uint8_t loadparm[LOADPARM_LEN];
- union {
- IplBlockCcw ccw;
- IplBlockFcp fcp;
- IplBlockQemuScsi scsi;
- };
-} __attribute__ ((packed));
-typedef struct IplParameterBlock IplParameterBlock;
+#ifndef QEMU_PACKED
+#define QEMU_PACKED __attribute__((packed))
+#endif
-extern IplParameterBlock iplb __attribute__((__aligned__(PAGE_SIZE)));
-
-#define QIPL_ADDRESS 0xcc
-
-/* Boot Menu flags */
-#define QIPL_FLAG_BM_OPTS_CMD 0x80
-#define QIPL_FLAG_BM_OPTS_ZIPL 0x40
-
-/*
- * This definition must be kept in sync with the definition
- * in hw/s390x/ipl.h
- */
-struct QemuIplParameters {
- uint8_t qipl_flags;
- uint8_t reserved1[3];
- uint64_t netboot_start_addr;
- uint32_t boot_menu_timeout;
- uint8_t reserved2[12];
-} __attribute__ ((packed));
-typedef struct QemuIplParameters QemuIplParameters;
+#include <qipl.h>
+#include <string.h>
extern QemuIplParameters qipl;
+extern IplParameterBlock iplb __attribute__((__aligned__(PAGE_SIZE)));
+extern bool have_iplb;
#define S390_IPL_TYPE_FCP 0x00
#define S390_IPL_TYPE_CCW 0x02
@@ -123,4 +51,26 @@ static inline bool set_iplb(IplParameterBlock *iplb)
return manage_iplb(iplb, false);
}
+/*
+ * The IPL started on the device, but failed in some way. If the IPLB chain
+ * still has more devices left to try, use the next device in order.
+ */
+static inline bool load_next_iplb(void)
+{
+ IplParameterBlock *next_iplb;
+
+ if (qipl.chain_len < 1) {
+ return false;
+ }
+
+ qipl.index++;
+ next_iplb = (IplParameterBlock *) qipl.next_iplb;
+ memcpy(&iplb, next_iplb, sizeof(IplParameterBlock));
+
+ qipl.chain_len--;
+ qipl.next_iplb = qipl.next_iplb + sizeof(IplParameterBlock);
+
+ return true;
+}
+
#endif /* IPLB_H */
diff --git a/pc-bios/s390-ccw/jump2ipl.c b/pc-bios/s390-ccw/jump2ipl.c
index 78f5f46..86321d0 100644
--- a/pc-bios/s390-ccw/jump2ipl.c
+++ b/pc-bios/s390-ccw/jump2ipl.c
@@ -6,7 +6,8 @@
* directory.
*/
-#include "libc.h"
+#include <string.h>
+#include <stdio.h>
#include "s390-ccw.h"
#include "s390-arch.h"
@@ -32,16 +33,22 @@ static void jump_to_IPL_addr(void)
/* should not return */
}
-void jump_to_IPL_code(uint64_t address)
+int jump_to_IPL_code(uint64_t address)
{
/* store the subsystem information _after_ the bootmap was loaded */
write_subsystem_identification();
write_iplb_location();
- /* prevent unknown IPL types in the guest */
+ /*
+ * The IPLB for QEMU SCSI type devices must be rebuilt during re-ipl. The
+ * iplb.devno is set to the boot position of the target SCSI device.
+ */
if (iplb.pbt == S390_IPL_TYPE_QEMU_SCSI) {
- iplb.pbt = S390_IPL_TYPE_CCW;
- set_iplb(&iplb);
+ iplb.devno = qipl.index;
+ }
+
+ if (have_iplb && !set_iplb(&iplb)) {
+ panic("Failed to set IPLB");
}
/*
@@ -57,7 +64,7 @@ void jump_to_IPL_code(uint64_t address)
debug_print_int("set IPL addr to", address ?: *reset_psw & PSW_MASK_SHORT_ADDR);
/* Ensure the guest output starts fresh */
- sclp_print("\n");
+ printf("\n");
/*
* HACK ALERT.
@@ -67,7 +74,8 @@ void jump_to_IPL_code(uint64_t address)
asm volatile("lghi %%r1,1\n\t"
"diag %%r1,%%r1,0x308\n\t"
: : : "1", "memory");
- panic("\n! IPL returns !\n");
+ puts("IPL code jump failed");
+ return -1;
}
void jump_to_low_kernel(void)
diff --git a/pc-bios/s390-ccw/libc.c b/pc-bios/s390-ccw/libc.c
deleted file mode 100644
index 3187923..0000000
--- a/pc-bios/s390-ccw/libc.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * libc-style definitions and functions
- *
- * Copyright 2018 IBM Corp.
- * Author(s): Collin L. Walling <walling@linux.vnet.ibm.com>
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include "libc.h"
-#include "s390-ccw.h"
-
-/**
- * atoui:
- * @str: the string to be converted.
- *
- * Given a string @str, convert it to an integer. Leading spaces are
- * ignored. Any other non-numerical value will terminate the conversion
- * and return 0. This function only handles numbers between 0 and
- * UINT64_MAX inclusive.
- *
- * Returns: an integer converted from the string @str, or the number 0
- * if an error occurred.
- */
-uint64_t atoui(const char *str)
-{
- int val = 0;
-
- if (!str || !str[0]) {
- return 0;
- }
-
- while (*str == ' ') {
- str++;
- }
-
- while (*str) {
- if (!isdigit(*(unsigned char *)str)) {
- break;
- }
- val = val * 10 + *str - '0';
- str++;
- }
-
- return val;
-}
-
-/**
- * uitoa:
- * @num: an integer (base 10) to be converted.
- * @str: a pointer to a string to store the conversion.
- * @len: the length of the passed string.
- *
- * Given an integer @num, convert it to a string. The string @str must be
- * allocated beforehand. The resulting string will be null terminated and
- * returned. This function only handles numbers between 0 and UINT64_MAX
- * inclusive.
- *
- * Returns: the string @str of the converted integer @num
- */
-char *uitoa(uint64_t num, char *str, size_t len)
-{
- long num_idx = 1; /* account for NUL */
- uint64_t tmp = num;
-
- IPL_assert(str != NULL, "uitoa: no space allocated to store string");
-
- /* Count indices of num */
- while ((tmp /= 10) != 0) {
- num_idx++;
- }
-
- /* Check if we have enough space for num and NUL */
- IPL_assert(len > num_idx, "uitoa: array too small for conversion");
-
- str[num_idx--] = '\0';
-
- /* Convert int to string */
- while (num_idx >= 0) {
- str[num_idx--] = num % 10 + '0';
- num /= 10;
- }
-
- return str;
-}
diff --git a/pc-bios/s390-ccw/libc.h b/pc-bios/s390-ccw/libc.h
deleted file mode 100644
index bcdc457..0000000
--- a/pc-bios/s390-ccw/libc.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * libc-style definitions and functions
- *
- * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef S390_CCW_LIBC_H
-#define S390_CCW_LIBC_H
-
-typedef unsigned long size_t;
-typedef int bool;
-typedef unsigned char uint8_t;
-typedef unsigned short uint16_t;
-typedef unsigned int uint32_t;
-typedef unsigned long long uint64_t;
-
-static inline void *memset(void *s, int c, size_t n)
-{
- size_t i;
- unsigned char *p = s;
-
- for (i = 0; i < n; i++) {
- p[i] = c;
- }
-
- return s;
-}
-
-static inline void *memcpy(void *s1, const void *s2, size_t n)
-{
- uint8_t *dest = s1;
- const uint8_t *src = s2;
- size_t i;
-
- for (i = 0; i < n; i++) {
- dest[i] = src[i];
- }
-
- return s1;
-}
-
-static inline int memcmp(const void *s1, const void *s2, size_t n)
-{
- size_t i;
- const uint8_t *p1 = s1, *p2 = s2;
-
- for (i = 0; i < n; i++) {
- if (p1[i] != p2[i]) {
- return p1[i] > p2[i] ? 1 : -1;
- }
- }
-
- return 0;
-}
-
-static inline size_t strlen(const char *str)
-{
- size_t i;
- for (i = 0; *str; i++) {
- str++;
- }
- return i;
-}
-
-static inline char *strcat(char *dest, const char *src)
-{
- int i;
- char *dest_end = dest + strlen(dest);
-
- for (i = 0; i <= strlen(src); i++) {
- dest_end[i] = src[i];
- }
- return dest;
-}
-
-static inline int isdigit(int c)
-{
- return (c >= '0') && (c <= '9');
-}
-
-uint64_t atoui(const char *str);
-char *uitoa(uint64_t num, char *str, size_t len);
-
-#endif
diff --git a/pc-bios/s390-ccw/main.c b/pc-bios/s390-ccw/main.c
index 5506798..76bf743 100644
--- a/pc-bios/s390-ccw/main.c
+++ b/pc-bios/s390-ccw/main.c
@@ -8,7 +8,9 @@
* directory.
*/
-#include "libc.h"
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
#include "helper.h"
#include "s390-arch.h"
#include "s390-ccw.h"
@@ -21,7 +23,7 @@ static SubChannelId blk_schid = { .one = 1 };
static char loadparm_str[LOADPARM_LEN + 1];
QemuIplParameters qipl;
IplParameterBlock iplb __attribute__((__aligned__(PAGE_SIZE)));
-static bool have_iplb;
+bool have_iplb;
static uint16_t cutype;
LowCore *lowcore; /* Yes, this *is* a pointer to address 0 */
@@ -36,8 +38,13 @@ LowCore *lowcore; /* Yes, this *is* a pointer to address 0 */
*/
void write_subsystem_identification(void)
{
- lowcore->subchannel_id = blk_schid.sch_id;
- lowcore->subchannel_nr = blk_schid.sch_no;
+ if (cutype == CU_TYPE_VIRTIO && virtio_get_device_type() == VIRTIO_ID_NET) {
+ lowcore->subchannel_id = net_schid.sch_id;
+ lowcore->subchannel_nr = net_schid.sch_no;
+ } else {
+ lowcore->subchannel_id = blk_schid.sch_id;
+ lowcore->subchannel_nr = blk_schid.sch_no;
+ }
lowcore->io_int_parm = 0;
}
@@ -48,9 +55,15 @@ void write_iplb_location(void)
}
}
+static void copy_qipl(void)
+{
+ QemuIplParameters *early_qipl = (QemuIplParameters *)QIPL_ADDRESS;
+ memcpy(&qipl, early_qipl, sizeof(QemuIplParameters));
+}
+
unsigned int get_loadparm_index(void)
{
- return atoui(loadparm_str);
+ return atoi(loadparm_str);
}
static int is_dev_possibly_bootable(int dev_no, int sch_no)
@@ -70,6 +83,9 @@ static int is_dev_possibly_bootable(int dev_no, int sch_no)
enable_subchannel(blk_schid);
cutype = cu_type(blk_schid);
+ if (cutype == CU_TYPE_UNKNOWN) {
+ return -EIO;
+ }
/*
* Note: we always have to run virtio_is_supported() here to make
@@ -142,6 +158,7 @@ static void menu_setup(void)
/* If loadparm was set to any other value, then do not enable menu */
if (memcmp(loadparm_str, LOADPARM_EMPTY, LOADPARM_LEN) != 0) {
+ menu_set_parms(qipl.qipl_flags & ~BOOT_MENU_FLAG_MASK, 0);
return;
}
@@ -174,26 +191,34 @@ static void boot_setup(void)
{
char lpmsg[] = "LOADPARM=[________]\n";
- sclp_get_loadparm_ascii(loadparm_str);
+ if (have_iplb && memcmp(iplb.loadparm, NO_LOADPARM, LOADPARM_LEN) != 0) {
+ ebcdic_to_ascii((char *) iplb.loadparm, loadparm_str, LOADPARM_LEN);
+ } else {
+ sclp_get_loadparm_ascii(loadparm_str);
+ }
+
+ if (have_iplb) {
+ menu_setup();
+ }
+
memcpy(lpmsg + 10, loadparm_str, 8);
- sclp_print(lpmsg);
+ puts(lpmsg);
/*
* Clear out any potential S390EP magic (see jump_to_low_kernel()),
* so we don't taint our decision-making process during a reboot.
*/
memset((char *)S390EP, 0, 6);
-
- have_iplb = store_iplb(&iplb);
}
-static void find_boot_device(void)
+static bool find_boot_device(void)
{
VDev *vdev = virtio_get_device();
- bool found;
+ bool found = false;
switch (iplb.pbt) {
case S390_IPL_TYPE_CCW:
+ vdev->scsi_device_selected = false;
debug_print_int("device no. ", iplb.ccw.devno);
blk_schid.ssid = iplb.ccw.ssid & 0x3;
debug_print_int("ssid ", blk_schid.ssid);
@@ -208,28 +233,21 @@ static void find_boot_device(void)
found = find_subch(iplb.scsi.devno);
break;
default:
- panic("List-directed IPL not supported yet!\n");
+ puts("Unsupported IPLB");
}
- IPL_assert(found, "Boot device not found\n");
+ return found;
}
static int virtio_setup(void)
{
VDev *vdev = virtio_get_device();
- QemuIplParameters *early_qipl = (QemuIplParameters *)QIPL_ADDRESS;
+ vdev->is_cdrom = false;
int ret;
- memcpy(&qipl, early_qipl, sizeof(QemuIplParameters));
-
- if (have_iplb) {
- menu_setup();
- }
-
switch (vdev->senseid.cu_model) {
case VIRTIO_ID_NET:
- sclp_print("Network boot device detected\n");
- vdev->netboot_start_addr = qipl.netboot_start_addr;
+ puts("Network boot device detected");
return 0;
case VIRTIO_ID_BLOCK:
ret = virtio_blk_setup_device(blk_schid);
@@ -238,11 +256,13 @@ static int virtio_setup(void)
ret = virtio_scsi_setup_device(blk_schid);
break;
default:
- panic("\n! No IPL device available !\n");
+ puts("\n! No IPL device available !\n");
+ return -1;
}
- if (!ret) {
- IPL_assert(virtio_ipl_disk_is_valid(), "No valid IPL device detected");
+ if (!ret && !virtio_ipl_disk_is_valid()) {
+ puts("No valid IPL device detected");
+ return -ENODEV;
}
return ret;
@@ -253,16 +273,15 @@ static void ipl_boot_device(void)
switch (cutype) {
case CU_TYPE_DASD_3990:
case CU_TYPE_DASD_2107:
- dasd_ipl(blk_schid, cutype); /* no return */
+ dasd_ipl(blk_schid, cutype);
break;
case CU_TYPE_VIRTIO:
if (virtio_setup() == 0) {
- zipl_load(); /* Only returns in case of errors */
+ zipl_load();
}
break;
default:
- print_int("Attempting to boot from unexpected device type", cutype);
- panic("\nBoot failed.\n");
+ printf("Attempting to boot from unexpected device type 0x%X\n", cutype);
}
}
@@ -287,20 +306,28 @@ static void probe_boot_device(void)
}
}
- sclp_print("Could not find a suitable boot device (none specified)\n");
+ puts("Could not find a suitable boot device (none specified)");
}
void main(void)
{
+ copy_qipl();
sclp_setup();
css_setup();
- boot_setup();
- if (have_iplb) {
- find_boot_device();
- ipl_boot_device();
- } else {
+ have_iplb = store_iplb(&iplb);
+ if (!have_iplb) {
+ boot_setup();
probe_boot_device();
}
- panic("Failed to load OS from hard disk\n");
+ while (have_iplb) {
+ boot_setup();
+ if (have_iplb && find_boot_device()) {
+ ipl_boot_device();
+ }
+ have_iplb = load_next_iplb();
+ }
+
+ panic("No suitable device for IPL. Halting...");
+
}
diff --git a/pc-bios/s390-ccw/menu.c b/pc-bios/s390-ccw/menu.c
index d601952..84062e9 100644
--- a/pc-bios/s390-ccw/menu.c
+++ b/pc-bios/s390-ccw/menu.c
@@ -9,7 +9,10 @@
* directory.
*/
-#include "libc.h"
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
#include "s390-ccw.h"
#include "sclp.h"
#include "s390-time.h"
@@ -93,7 +96,7 @@ static int read_prompt(char *buf, size_t len)
case KEYCODE_BACKSP:
if (idx > 0) {
buf[--idx] = 0;
- sclp_print("\b \b");
+ printf("\b \b");
}
continue;
case KEYCODE_ENTER:
@@ -103,7 +106,7 @@ static int read_prompt(char *buf, size_t len)
/* Echo input and add to buffer */
if (idx < len) {
buf[idx++] = inp[0];
- sclp_print(inp);
+ printf("%s", inp);
}
}
}
@@ -140,22 +143,19 @@ static int get_index(void)
}
}
- return atoui(buf);
+ return atoi(buf);
}
static void boot_menu_prompt(bool retry)
{
- char tmp[11];
-
if (retry) {
- sclp_print("\nError: undefined configuration"
+ printf("\nError: undefined configuration"
"\nPlease choose:\n");
} else if (timeout > 0) {
- sclp_print("Please choose (default will boot in ");
- sclp_print(uitoa(timeout / 1000, tmp, sizeof(tmp)));
- sclp_print(" seconds):\n");
+ printf("Please choose (default will boot in %d seconds):\n",
+ (int)(timeout / 1000));
} else {
- sclp_print("Please choose:\n");
+ puts("Please choose:");
}
}
@@ -163,7 +163,6 @@ static int get_boot_index(bool *valid_entries)
{
int boot_index;
bool retry = false;
- char tmp[5];
do {
boot_menu_prompt(retry);
@@ -172,8 +171,7 @@ static int get_boot_index(bool *valid_entries)
} while (boot_index < 0 || boot_index >= MAX_BOOT_ENTRIES ||
!valid_entries[boot_index]);
- sclp_print("\nBooting entry #");
- sclp_print(uitoa(boot_index, tmp, sizeof(tmp)));
+ printf("\nBooting entry #%d", boot_index);
return boot_index;
}
@@ -187,9 +185,9 @@ static int zipl_print_entry(const char *data, size_t len)
buf[len] = '\n';
buf[len + 1] = '\0';
- sclp_print(buf);
+ printf("%s", buf);
- return buf[0] == ' ' ? atoui(buf + 1) : atoui(buf);
+ return buf[0] == ' ' ? atoi(buf + 1) : atoi(buf);
}
int menu_get_zipl_boot_index(const char *menu_data)
@@ -209,7 +207,7 @@ int menu_get_zipl_boot_index(const char *menu_data)
}
/* Print banner */
- sclp_print("s390-ccw zIPL Boot Menu\n\n");
+ puts("s390-ccw zIPL Boot Menu\n");
menu_data += strlen(menu_data) + 1;
/* Print entries */
@@ -221,37 +219,34 @@ int menu_get_zipl_boot_index(const char *menu_data)
valid_entries[entry] = true;
if (entry == 0) {
- sclp_print("\n");
+ printf("\n");
}
}
- sclp_print("\n");
+ printf("\n");
return get_boot_index(valid_entries);
}
int menu_get_enum_boot_index(bool *valid_entries)
{
- char tmp[3];
int i;
- sclp_print("s390-ccw Enumerated Boot Menu.\n\n");
+ puts("s390-ccw Enumerated Boot Menu.\n");
for (i = 0; i < MAX_BOOT_ENTRIES; i++) {
if (valid_entries[i]) {
if (i < 10) {
- sclp_print(" ");
+ printf(" ");
}
- sclp_print("[");
- sclp_print(uitoa(i, tmp, sizeof(tmp)));
- sclp_print("]");
+ printf("[%d]", i);
if (i == 0) {
- sclp_print(" default\n");
+ printf(" default\n");
}
- sclp_print("\n");
+ printf("\n");
}
}
- sclp_print("\n");
+ printf("\n");
return get_boot_index(valid_entries);
}
diff --git a/pc-bios/s390-ccw/netboot.mak b/pc-bios/s390-ccw/netboot.mak
deleted file mode 100644
index 046aa35..0000000
--- a/pc-bios/s390-ccw/netboot.mak
+++ /dev/null
@@ -1,62 +0,0 @@
-
-SLOF_DIR := $(SRC_PATH)/../../roms/SLOF
-
-NETOBJS := start.o sclp.o cio.o virtio.o virtio-net.o jump2ipl.o netmain.o
-
-LIBC_INC := -nostdinc -I$(SLOF_DIR)/lib/libc/include
-LIBNET_INC := -I$(SLOF_DIR)/lib/libnet
-
-NETLDFLAGS := $(LDFLAGS) -Wl,-Ttext=0x7800000
-
-$(NETOBJS): EXTRA_CFLAGS += $(LIBC_INC) $(LIBNET_INC)
-
-s390-netboot.elf: $(NETOBJS) libnet.a libc.a
- $(call quiet-command,$(CC) $(NETLDFLAGS) -o $@ $^,Linking)
-
-s390-netboot.img: s390-netboot.elf
- $(call quiet-command,$(STRIP) --strip-unneeded $< -o $@,Stripping $< into)
-
-# libc files:
-
-LIBC_CFLAGS = $(EXTRA_CFLAGS) $(CFLAGS) $(LIBC_INC) $(LIBNET_INC) \
- -MMD -MP -MT $@ -MF $(@:%.o=%.d)
-
-CTYPE_OBJS = isdigit.o isxdigit.o toupper.o
-%.o : $(SLOF_DIR)/lib/libc/ctype/%.c
- $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling)
-
-STRING_OBJS = strcat.o strchr.o strrchr.o strcpy.o strlen.o strncpy.o \
- strcmp.o strncmp.o strcasecmp.o strncasecmp.o strstr.o \
- memset.o memcpy.o memmove.o memcmp.o
-%.o : $(SLOF_DIR)/lib/libc/string/%.c
- $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling)
-
-STDLIB_OBJS = atoi.o atol.o strtoul.o strtol.o rand.o malloc.o free.o
-%.o : $(SLOF_DIR)/lib/libc/stdlib/%.c
- $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling)
-
-STDIO_OBJS = sprintf.o snprintf.o vfprintf.o vsnprintf.o vsprintf.o fprintf.o \
- printf.o putc.o puts.o putchar.o stdchnls.o fileno.o
-%.o : $(SLOF_DIR)/lib/libc/stdio/%.c
- $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling)
-
-sbrk.o: $(SLOF_DIR)/slof/sbrk.c
- $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling)
-
-LIBCOBJS := $(STRING_OBJS) $(CTYPE_OBJS) $(STDLIB_OBJS) $(STDIO_OBJS) sbrk.o
-
-libc.a: $(LIBCOBJS)
- $(call quiet-command,$(AR) -rc $@ $^,Creating static library)
-
-# libnet files:
-
-LIBNETOBJS := args.o dhcp.o dns.o icmpv6.o ipv6.o tcp.o udp.o bootp.o \
- dhcpv6.o ethernet.o ipv4.o ndp.o tftp.o pxelinux.o
-LIBNETCFLAGS = $(EXTRA_CFLAGS) $(CFLAGS) $(LIBC_INC) $(LIBNET_INC) \
- -DDHCPARCH=0x1F -MMD -MP -MT $@ -MF $(@:%.o=%.d)
-
-%.o : $(SLOF_DIR)/lib/libnet/%.c
- $(call quiet-command,$(CC) $(LIBNETCFLAGS) -c -o $@ $<,Compiling)
-
-libnet.a: $(LIBNETOBJS)
- $(call quiet-command,$(AR) -rc $@ $^,Creating static library)
diff --git a/pc-bios/s390-ccw/netmain.c b/pc-bios/s390-ccw/netmain.c
index 5cd619b..719a547 100644
--- a/pc-bios/s390-ccw/netmain.c
+++ b/pc-bios/s390-ccw/netmain.c
@@ -41,7 +41,6 @@
#define DEFAULT_TFTP_RETRIES 20
extern char _start[];
-void write_iplb_location(void) {}
#define KERNEL_ADDR ((void *)0L)
#define KERNEL_MAX_SIZE ((long)_start)
@@ -50,10 +49,9 @@ void write_iplb_location(void) {}
/* STSI 3.2.2 offset of first vmdb + offset of uuid inside vmdb */
#define STSI322_VMDB_UUID_OFFSET ((8 + 12) * 4)
-IplParameterBlock iplb __attribute__((aligned(PAGE_SIZE)));
static char cfgbuf[2048];
-static SubChannelId net_schid = { .one = 1 };
+SubChannelId net_schid = { .one = 1 };
static uint8_t mac[6];
static uint64_t dest_timer;
@@ -155,19 +153,10 @@ static int tftp_load(filename_ip_t *fnip, void *buffer, int len)
return rc;
}
-static int net_init(filename_ip_t *fn_ip)
+static int net_init_ip(filename_ip_t *fn_ip)
{
int rc;
- memset(fn_ip, 0, sizeof(filename_ip_t));
-
- rc = virtio_net_init(mac);
- if (rc < 0) {
- puts("Could not initialize network device");
- return -101;
- }
- fn_ip->fd = rc;
-
printf(" Using MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
@@ -179,6 +168,14 @@ static int net_init(filename_ip_t *fn_ip)
if (fn_ip->ip_version == 4) {
set_ipv4_address(fn_ip->own_ip);
}
+ } else if (rc == -2) {
+ printf("ARP request to TFTP server (%d.%d.%d.%d) failed\n",
+ (fn_ip->server_ip >> 24) & 0xFF, (fn_ip->server_ip >> 16) & 0xFF,
+ (fn_ip->server_ip >> 8) & 0xFF, fn_ip->server_ip & 0xFF);
+ return -102;
+ } else if (rc == -4 || rc == -3) {
+ puts("Can't obtain TFTP server IP address");
+ return -107;
} else {
puts("Could not get IP address");
return -101;
@@ -194,17 +191,6 @@ static int net_init(filename_ip_t *fn_ip)
printf(" Using IPv6 address: %s\n", ip6_str);
}
- if (rc == -2) {
- printf("ARP request to TFTP server (%d.%d.%d.%d) failed\n",
- (fn_ip->server_ip >> 24) & 0xFF, (fn_ip->server_ip >> 16) & 0xFF,
- (fn_ip->server_ip >> 8) & 0xFF, fn_ip->server_ip & 0xFF);
- return -102;
- }
- if (rc == -4 || rc == -3) {
- puts("Can't obtain TFTP server IP address");
- return -107;
- }
-
printf(" Using TFTP server: ");
if (fn_ip->ip_version == 4) {
printf("%d.%d.%d.%d\n",
@@ -223,11 +209,33 @@ static int net_init(filename_ip_t *fn_ip)
return rc;
}
+static int net_init(filename_ip_t *fn_ip)
+{
+ int rc;
+
+ memset(fn_ip, 0, sizeof(filename_ip_t));
+
+ rc = virtio_net_init(mac);
+ if (rc < 0) {
+ puts("Could not initialize network device");
+ return -101;
+ }
+ fn_ip->fd = rc;
+
+ rc = net_init_ip(fn_ip);
+ if (rc < 0) {
+ virtio_net_deinit();
+ }
+
+ return rc;
+}
+
static void net_release(filename_ip_t *fn_ip)
{
if (fn_ip->ip_version == 4) {
dhcp_send_release(fn_ip->fd);
}
+ virtio_net_deinit();
}
/**
@@ -293,7 +301,7 @@ static int load_kernel_with_initrd(filename_ip_t *fn_ip,
printf("Loading pxelinux.cfg entry '%s'\n", entry->label);
if (!entry->kernel) {
- printf("Kernel entry is missing!\n");
+ puts("Kernel entry is missing!\n");
return -1;
}
@@ -438,15 +446,6 @@ static int net_try_direct_tftp_load(filename_ip_t *fn_ip)
return rc;
}
-void write_subsystem_identification(void)
-{
- SubChannelId *schid = (SubChannelId *) 184;
- uint32_t *zeroes = (uint32_t *) 188;
-
- *schid = net_schid;
- *zeroes = 0;
-}
-
static bool find_net_dev(Schib *schib, int dev_no)
{
int i, r;
@@ -475,7 +474,7 @@ static bool find_net_dev(Schib *schib, int dev_no)
return false;
}
-static void virtio_setup(void)
+static bool virtio_setup(void)
{
Schib schib;
int ssid;
@@ -489,7 +488,7 @@ static void virtio_setup(void)
*/
enable_mss_facility();
- if (store_iplb(&iplb)) {
+ if (have_iplb || store_iplb(&iplb)) {
IPL_assert(iplb.pbt == S390_IPL_TYPE_CCW, "IPL_TYPE_CCW expected");
dev_no = iplb.ccw.devno;
debug_print_int("device no. ", dev_no);
@@ -506,22 +505,26 @@ static void virtio_setup(void)
}
}
- IPL_assert(found, "No virtio net device found");
+ return found;
}
-void main(void)
+int netmain(void)
{
filename_ip_t fn_ip;
int rc, fnlen;
sclp_setup();
- sclp_print("Network boot starting...\n");
+ puts("Network boot starting...");
- virtio_setup();
+ if (!virtio_setup()) {
+ puts("No virtio net device found.");
+ return -1;
+ }
rc = net_init(&fn_ip);
if (rc) {
- panic("Network initialization failed. Halting.\n");
+ puts("Network initialization failed.");
+ return -1;
}
fnlen = strlen(fn_ip.filename);
@@ -535,9 +538,10 @@ void main(void)
net_release(&fn_ip);
if (rc > 0) {
- sclp_print("Network loading done, starting kernel...\n");
+ puts("Network loading done, starting kernel...");
jump_to_low_kernel();
}
- panic("Failed to load OS from network\n");
+ puts("Failed to load OS from network.");
+ return -1;
}
diff --git a/pc-bios/s390-ccw/s390-ccw.h b/pc-bios/s390-ccw/s390-ccw.h
index c977a52..6cdce3e 100644
--- a/pc-bios/s390-ccw/s390-ccw.h
+++ b/pc-bios/s390-ccw/s390-ccw.h
@@ -13,6 +13,11 @@
/* #define DEBUG */
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
typedef unsigned char u8;
typedef unsigned short u16;
typedef unsigned int u32;
@@ -25,10 +30,8 @@ typedef unsigned long long u64;
#define EIO 1
#define EBUSY 2
#define ENODEV 3
+#define EINVAL 4
-#ifndef NULL
-#define NULL 0
-#endif
#ifndef MIN
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#endif
@@ -53,6 +56,9 @@ void write_iplb_location(void);
unsigned int get_loadparm_index(void);
void main(void);
+/* netmain.c */
+int netmain(void);
+
/* sclp.c */
void sclp_print(const char *string);
void sclp_set_write_mask(uint32_t receive_mask, uint32_t send_mask);
@@ -72,7 +78,7 @@ void zipl_load(void);
/* jump2ipl.c */
void write_reset_psw(uint64_t psw);
-void jump_to_IPL_code(uint64_t address);
+int jump_to_IPL_code(uint64_t address);
void jump_to_low_kernel(void);
/* menu.c */
@@ -87,7 +93,7 @@ bool menu_is_enabled_enum(void);
__attribute__ ((__noreturn__))
static inline void panic(const char *string)
{
- sclp_print(string);
+ printf("ERROR: %s\n ", string);
disabled_wait();
}
@@ -109,20 +115,10 @@ static inline void fill_hex_val(char *out, void *ptr, unsigned size)
}
}
-static inline void print_int(const char *desc, u64 addr)
-{
- char out[] = ": 0xffffffffffffffff\n";
-
- fill_hex_val(&out[4], &addr, sizeof(addr));
-
- sclp_print(desc);
- sclp_print(out);
-}
-
static inline void debug_print_int(const char *desc, u64 addr)
{
#ifdef DEBUG
- print_int(desc, addr);
+ printf("%s 0x%X\n", desc, addr);
#endif
}
@@ -147,18 +143,14 @@ static inline void debug_print_addr(const char *desc, void *p)
static inline void IPL_assert(bool term, const char *message)
{
if (!term) {
- sclp_print("\n! ");
- sclp_print(message);
- panic(" !\n"); /* no return */
+ panic(message); /* no return */
}
}
static inline void IPL_check(bool term, const char *message)
{
if (!term) {
- sclp_print("\n! WARNING: ");
- sclp_print(message);
- sclp_print(" !\n");
+ printf("WARNING: %s\n", message);
}
}
diff --git a/pc-bios/s390-ccw/sclp.c b/pc-bios/s390-ccw/sclp.c
index 7251f9a..4a07de0 100644
--- a/pc-bios/s390-ccw/sclp.c
+++ b/pc-bios/s390-ccw/sclp.c
@@ -8,7 +8,7 @@
* directory.
*/
-#include "libc.h"
+#include <string.h>
#include "s390-ccw.h"
#include "sclp.h"
@@ -101,11 +101,6 @@ long write(int fd, const void *str, size_t len)
return len;
}
-void sclp_print(const char *str)
-{
- write(1, str, strlen(str));
-}
-
void sclp_get_loadparm_ascii(char *loadparm)
{
diff --git a/pc-bios/s390-ccw/start.S b/pc-bios/s390-ccw/start.S
index 061b065..b70213e 100644
--- a/pc-bios/s390-ccw/start.S
+++ b/pc-bios/s390-ccw/start.S
@@ -112,9 +112,7 @@ io_new_code:
lctlg %c6,%c6,0(%r15)
br %r14
- .align 8
-bss_start_literal:
- .quad __bss_start
+ .balign 8
disabled_wait_psw:
.quad 0x0002000180000000,0x0000000000000000
enabled_wait_psw:
@@ -124,8 +122,13 @@ external_new_mask:
io_new_mask:
.quad 0x0000000180000000
+.data
+ .balign 8
+bss_start_literal:
+ .quad __bss_start
+
.bss
- .align 8
+ .balign 8
stack:
.space STACK_SIZE
.size stack,STACK_SIZE
diff --git a/pc-bios/s390-ccw/virtio-blkdev.c b/pc-bios/s390-ccw/virtio-blkdev.c
index a81207b..7b2d1e2 100644
--- a/pc-bios/s390-ccw/virtio-blkdev.c
+++ b/pc-bios/s390-ccw/virtio-blkdev.c
@@ -8,7 +8,7 @@
* directory.
*/
-#include "libc.h"
+#include <stdio.h>
#include "s390-ccw.h"
#include "virtio.h"
#include "virtio-scsi.h"
@@ -59,7 +59,7 @@ int virtio_read_many(unsigned long sector, void *load_addr, int sec_num)
case VIRTIO_ID_SCSI:
return virtio_scsi_read_many(vdev, sector, load_addr, sec_num);
}
- panic("\n! No readable IPL device !\n");
+
return -1;
}
@@ -73,13 +73,13 @@ unsigned long virtio_load_direct(unsigned long rec_list1, unsigned long rec_list
unsigned long addr = (unsigned long)load_addr;
if (sec_len != virtio_get_block_size()) {
- return -1;
+ return 0;
}
- sclp_print(".");
+ printf(".");
status = virtio_read_many(sec, (void *)addr, sec_num);
if (status) {
- panic("I/O Error");
+ return 0;
}
addr += sec_num * virtio_get_block_size();
@@ -230,7 +230,7 @@ int virtio_blk_setup_device(SubChannelId schid)
vdev->schid = schid;
virtio_setup_ccw(vdev);
- sclp_print("Using virtio-blk.\n");
+ puts("Using virtio-blk.");
return 0;
}
diff --git a/pc-bios/s390-ccw/virtio-net.c b/pc-bios/s390-ccw/virtio-net.c
index 2fcb0a5..301445b 100644
--- a/pc-bios/s390-ccw/virtio-net.c
+++ b/pc-bios/s390-ccw/virtio-net.c
@@ -51,11 +51,16 @@ int virtio_net_init(void *mac_addr)
void *buf;
int i;
+ rx_last_idx = 0;
+
vdev->guest_features[0] = VIRTIO_NET_F_MAC_BIT;
virtio_setup_ccw(vdev);
- IPL_assert(vdev->guest_features[0] & VIRTIO_NET_F_MAC_BIT,
- "virtio-net device does not support the MAC address feature");
+ if (!(vdev->guest_features[0] & VIRTIO_NET_F_MAC_BIT)) {
+ puts("virtio-net device does not support the MAC address feature");
+ return -1;
+ }
+
memcpy(mac_addr, vdev->config.net.mac, ETH_ALEN);
for (i = 0; i < 64; i++) {
@@ -135,3 +140,8 @@ int recv(int fd, void *buf, int maxlen, int flags)
return len;
}
+
+void virtio_net_deinit(void)
+{
+ virtio_reset(virtio_get_device());
+}
diff --git a/pc-bios/s390-ccw/virtio-scsi.c b/pc-bios/s390-ccw/virtio-scsi.c
index d1a84b9..71db75c 100644
--- a/pc-bios/s390-ccw/virtio-scsi.c
+++ b/pc-bios/s390-ccw/virtio-scsi.c
@@ -9,7 +9,8 @@
* directory.
*/
-#include "libc.h"
+#include <string.h>
+#include <stdio.h>
#include "s390-ccw.h"
#include "virtio.h"
#include "scsi.h"
@@ -25,20 +26,22 @@ static uint8_t scsi_inquiry_std_response[256];
static ScsiInquiryEvpdPages scsi_inquiry_evpd_pages_response;
static ScsiInquiryEvpdBl scsi_inquiry_evpd_bl_response;
-static inline void vs_assert(bool term, const char **msgs)
+static inline bool vs_assert(bool term, const char **msgs)
{
if (!term) {
int i = 0;
- sclp_print("\n! ");
+ printf("\n! ");
while (msgs[i]) {
- sclp_print(msgs[i++]);
+ printf("%s", msgs[i++]);
}
- panic(" !\n");
+ puts(" !");
}
+
+ return term;
}
-static void virtio_scsi_verify_response(VirtioScsiCmdResp *resp,
+static bool virtio_scsi_verify_response(VirtioScsiCmdResp *resp,
const char *title)
{
const char *mr[] = {
@@ -55,8 +58,8 @@ static void virtio_scsi_verify_response(VirtioScsiCmdResp *resp,
0
};
- vs_assert(resp->response == VIRTIO_SCSI_S_OK, mr);
- vs_assert(resp->status == CDB_STATUS_GOOD, ms);
+ return vs_assert(resp->response == VIRTIO_SCSI_S_OK, mr) &&
+ vs_assert(resp->status == CDB_STATUS_GOOD, ms);
}
static void prepare_request(VDev *vdev, const void *cdb, int cdb_size,
@@ -77,24 +80,31 @@ static void prepare_request(VDev *vdev, const void *cdb, int cdb_size,
}
}
-static inline void vs_io_assert(bool term, const char *msg)
+static inline bool vs_io_assert(bool term, const char *msg)
{
- if (!term) {
- virtio_scsi_verify_response(&resp, msg);
+ if (!term && !virtio_scsi_verify_response(&resp, msg)) {
+ return false;
}
+
+ return true;
}
-static void vs_run(const char *title, VirtioCmd *cmd, VDev *vdev,
+static int vs_run(const char *title, VirtioCmd *cmd, VDev *vdev,
const void *cdb, int cdb_size,
void *data, uint32_t data_size)
{
prepare_request(vdev, cdb, cdb_size, data, data_size);
- vs_io_assert(virtio_run(vdev, VR_REQUEST, cmd) == 0, title);
+ if (!vs_io_assert(virtio_run(vdev, VR_REQUEST, cmd) == 0, title)) {
+ puts(title);
+ return -EIO;
+ }
+
+ return 0;
}
/* SCSI protocol implementation routines */
-static bool scsi_inquiry(VDev *vdev, uint8_t evpd, uint8_t page,
+static int scsi_inquiry(VDev *vdev, uint8_t evpd, uint8_t page,
void *data, uint32_t data_size)
{
ScsiCdbInquiry cdb = {
@@ -109,12 +119,13 @@ static bool scsi_inquiry(VDev *vdev, uint8_t evpd, uint8_t page,
{ data, data_size, VRING_DESC_F_WRITE },
};
- vs_run("inquiry", inquiry, vdev, &cdb, sizeof(cdb), data, data_size);
+ int ret = vs_run("inquiry", inquiry,
+ vdev, &cdb, sizeof(cdb), data, data_size);
- return virtio_scsi_response_ok(&resp);
+ return ret ? ret : virtio_scsi_response_ok(&resp);
}
-static bool scsi_test_unit_ready(VDev *vdev)
+static int scsi_test_unit_ready(VDev *vdev)
{
ScsiCdbTestUnitReady cdb = {
.command = 0x00,
@@ -130,7 +141,7 @@ static bool scsi_test_unit_ready(VDev *vdev)
return virtio_scsi_response_ok(&resp);
}
-static bool scsi_report_luns(VDev *vdev, void *data, uint32_t data_size)
+static int scsi_report_luns(VDev *vdev, void *data, uint32_t data_size)
{
ScsiCdbReportLuns cdb = {
.command = 0xa0,
@@ -143,13 +154,13 @@ static bool scsi_report_luns(VDev *vdev, void *data, uint32_t data_size)
{ data, data_size, VRING_DESC_F_WRITE },
};
- vs_run("report luns", report_luns,
+ int ret = vs_run("report luns", report_luns,
vdev, &cdb, sizeof(cdb), data, data_size);
- return virtio_scsi_response_ok(&resp);
+ return ret ? ret : virtio_scsi_response_ok(&resp);
}
-static bool scsi_read_10(VDev *vdev,
+static int scsi_read_10(VDev *vdev,
unsigned long sector, int sectors, void *data,
unsigned int data_size)
{
@@ -167,12 +178,13 @@ static bool scsi_read_10(VDev *vdev,
debug_print_int("read_10 sector", sector);
debug_print_int("read_10 sectors", sectors);
- vs_run("read(10)", read_10, vdev, &cdb, sizeof(cdb), data, data_size);
+ int ret = vs_run("read(10)", read_10,
+ vdev, &cdb, sizeof(cdb), data, data_size);
- return virtio_scsi_response_ok(&resp);
+ return ret ? ret : virtio_scsi_response_ok(&resp);
}
-static bool scsi_read_capacity(VDev *vdev,
+static int scsi_read_capacity(VDev *vdev,
void *data, uint32_t data_size)
{
ScsiCdbReadCapacity16 cdb = {
@@ -186,10 +198,10 @@ static bool scsi_read_capacity(VDev *vdev,
{ data, data_size, VRING_DESC_F_WRITE },
};
- vs_run("read capacity", read_capacity_16,
+ int ret = vs_run("read capacity", read_capacity_16,
vdev, &cdb, sizeof(cdb), data, data_size);
- return virtio_scsi_response_ok(&resp);
+ return ret ? ret : virtio_scsi_response_ok(&resp);
}
/* virtio-scsi routines */
@@ -206,7 +218,7 @@ static int virtio_scsi_locate_device(VDev *vdev)
static uint8_t data[16 + 8 * 63];
ScsiLunReport *r = (void *) data;
ScsiDevice *sdev = vdev->scsi_device;
- int i, luns;
+ int i, ret, luns;
/* QEMU has hardcoded channel #0 in many places.
* If this hardcoded value is ever changed, we'll need to add code for
@@ -232,15 +244,23 @@ static int virtio_scsi_locate_device(VDev *vdev)
sdev->channel = channel;
sdev->target = target;
sdev->lun = 0; /* LUN has to be 0 for REPORT LUNS */
- if (!scsi_report_luns(vdev, data, sizeof(data))) {
+ ret = scsi_report_luns(vdev, data, sizeof(data));
+ if (ret < 0) {
+ return ret;
+ }
+
+ else if (ret == 0) {
if (resp.response == VIRTIO_SCSI_S_BAD_TARGET) {
continue;
}
- print_int("target", target);
- virtio_scsi_verify_response(&resp, "SCSI cannot report LUNs");
+ printf("target 0x%X\n", target);
+ if (!virtio_scsi_verify_response(&resp, "SCSI cannot report LUNs")) {
+ return -EIO;
+ }
}
+
if (r->lun_list_len == 0) {
- print_int("no LUNs for target", target);
+ printf("no LUNs for target 0x%X\n", target);
continue;
}
luns = r->lun_list_len / 8;
@@ -264,7 +284,7 @@ static int virtio_scsi_locate_device(VDev *vdev)
}
}
- sclp_print("Warning: Could not locate a usable virtio-scsi device\n");
+ puts("Warning: Could not locate a usable virtio-scsi device");
return -ENODEV;
}
@@ -282,7 +302,9 @@ int virtio_scsi_read_many(VDev *vdev,
data_size = sector_count * virtio_get_block_size() * f;
if (!scsi_read_10(vdev, sector * f, sector_count * f, load_addr,
data_size)) {
- virtio_scsi_verify_response(&resp, "virtio-scsi:read_many");
+ if (!virtio_scsi_verify_response(&resp, "virtio-scsi:read_many")) {
+ return -1;
+ }
}
load_addr += data_size;
sector += sector_count;
@@ -351,11 +373,16 @@ static int virtio_scsi_setup(VDev *vdev)
uint8_t code = resp.sense[0] & SCSI_SENSE_CODE_MASK;
uint8_t sense_key = resp.sense[2] & SCSI_SENSE_KEY_MASK;
- IPL_assert(resp.sense_len != 0, "virtio-scsi:setup: no SENSE data");
+ if (resp.sense_len == 0) {
+ puts("virtio-scsi: setup: no SENSE data");
+ return -EINVAL;
+ }
- IPL_assert(retry_test_unit_ready && code == 0x70 &&
- sense_key == SCSI_SENSE_KEY_UNIT_ATTENTION,
- "virtio-scsi:setup: cannot retry");
+ if (!retry_test_unit_ready || code != 0x70 ||
+ sense_key != SCSI_SENSE_KEY_UNIT_ATTENTION) {
+ puts("virtio-scsi:setup: cannot retry");
+ return -EIO;
+ }
/* retry on CHECK_CONDITION/UNIT_ATTENTION as it
* may not designate a real error, but it may be
@@ -366,30 +393,40 @@ static int virtio_scsi_setup(VDev *vdev)
continue;
}
- virtio_scsi_verify_response(&resp, "virtio-scsi:setup");
+ if (!virtio_scsi_verify_response(&resp, "virtio-scsi:setup")) {
+ return -1;
+ }
}
/* read and cache SCSI INQUIRY response */
- if (!scsi_inquiry(vdev,
+ ret = scsi_inquiry(vdev,
SCSI_INQUIRY_STANDARD,
SCSI_INQUIRY_STANDARD_NONE,
scsi_inquiry_std_response,
- sizeof(scsi_inquiry_std_response))) {
- virtio_scsi_verify_response(&resp, "virtio-scsi:setup:inquiry");
+ sizeof(scsi_inquiry_std_response));
+ if (ret < 1) {
+ if (ret != 0 || !virtio_scsi_verify_response(&resp,
+ "virtio-scsi:setup:inquiry")) {
+ return -1;
+ }
}
if (virtio_scsi_inquiry_response_is_cdrom(scsi_inquiry_std_response)) {
- sclp_print("SCSI CD-ROM detected.\n");
+ puts("SCSI CD-ROM detected.");
vdev->is_cdrom = true;
vdev->scsi_block_size = VIRTIO_ISO_BLOCK_SIZE;
}
- if (!scsi_inquiry(vdev,
+ ret = scsi_inquiry(vdev,
SCSI_INQUIRY_EVPD,
SCSI_INQUIRY_EVPD_SUPPORTED_PAGES,
evpd,
- sizeof(*evpd))) {
- virtio_scsi_verify_response(&resp, "virtio-scsi:setup:supported_pages");
+ sizeof(*evpd));
+ if (ret < 1) {
+ if (ret != 0 || !virtio_scsi_verify_response(&resp,
+ "virtio-scsi:setup:supported_pages")) {
+ return -1;
+ }
}
debug_print_int("EVPD length", evpd->page_length);
@@ -401,12 +438,16 @@ static int virtio_scsi_setup(VDev *vdev)
continue;
}
- if (!scsi_inquiry(vdev,
+ ret = scsi_inquiry(vdev,
SCSI_INQUIRY_EVPD,
SCSI_INQUIRY_EVPD_BLOCK_LIMITS,
evpd_bl,
- sizeof(*evpd_bl))) {
- virtio_scsi_verify_response(&resp, "virtio-scsi:setup:blocklimits");
+ sizeof(*evpd_bl));
+ if (ret < 1) {
+ if (ret != 0 || !virtio_scsi_verify_response(&resp,
+ "virtio-scsi:setup:blocklimits")) {
+ return -1;
+ }
}
debug_print_int("max transfer", evpd_bl->max_transfer);
@@ -422,8 +463,12 @@ static int virtio_scsi_setup(VDev *vdev)
vdev->max_transfer = MIN_NON_ZERO(VIRTIO_SCSI_MAX_SECTORS,
vdev->max_transfer);
- if (!scsi_read_capacity(vdev, data, data_size)) {
- virtio_scsi_verify_response(&resp, "virtio-scsi:setup:read_capacity");
+ ret = scsi_read_capacity(vdev, data, data_size);
+ if (ret < 1) {
+ if (ret != 0 || !virtio_scsi_verify_response(&resp,
+ "virtio-scsi:setup:read_capacity")) {
+ return -1;
+ }
}
scsi_parse_capacity_report(data, &vdev->scsi_last_block,
(uint32_t *) &vdev->scsi_block_size);
@@ -438,12 +483,17 @@ int virtio_scsi_setup_device(SubChannelId schid)
vdev->schid = schid;
virtio_setup_ccw(vdev);
- IPL_assert(vdev->config.scsi.sense_size == VIRTIO_SCSI_SENSE_SIZE,
- "Config: sense size mismatch");
- IPL_assert(vdev->config.scsi.cdb_size == VIRTIO_SCSI_CDB_SIZE,
- "Config: CDB size mismatch");
+ if (vdev->config.scsi.sense_size != VIRTIO_SCSI_SENSE_SIZE) {
+ puts("Config: sense size mismatch");
+ return -EINVAL;
+ }
+
+ if (vdev->config.scsi.cdb_size != VIRTIO_SCSI_CDB_SIZE) {
+ puts("Config: CDB size mismatch");
+ return -EINVAL;
+ }
- sclp_print("Using virtio-scsi.\n");
+ puts("Using virtio-scsi.");
return virtio_scsi_setup(vdev);
}
diff --git a/pc-bios/s390-ccw/virtio.c b/pc-bios/s390-ccw/virtio.c
index 5edd058..cd6c99c 100644
--- a/pc-bios/s390-ccw/virtio.c
+++ b/pc-bios/s390-ccw/virtio.c
@@ -8,7 +8,7 @@
* directory.
*/
-#include "libc.h"
+#include <string.h>
#include "s390-ccw.h"
#include "cio.h"
#include "virtio.h"
@@ -217,26 +217,36 @@ int virtio_run(VDev *vdev, int vqid, VirtioCmd *cmd)
return 0;
}
-void virtio_setup_ccw(VDev *vdev)
+int virtio_reset(VDev *vdev)
{
- int i, rc, cfg_size = 0;
+ return run_ccw(vdev, CCW_CMD_VDEV_RESET, NULL, 0, false);
+}
+
+int virtio_setup_ccw(VDev *vdev)
+{
+ int i, cfg_size = 0;
uint8_t status;
struct VirtioFeatureDesc {
uint32_t features;
uint8_t index;
} __attribute__((packed)) feats;
- IPL_assert(virtio_is_supported(vdev->schid), "PE");
+ if (!virtio_is_supported(vdev->schid)) {
+ puts("Virtio unsupported for this device ID");
+ return -ENODEV;
+ }
/* device ID has been established now */
vdev->config.blk.blk_size = 0; /* mark "illegal" - setup started... */
vdev->guessed_disk_nature = VIRTIO_GDN_NONE;
- run_ccw(vdev, CCW_CMD_VDEV_RESET, NULL, 0, false);
+ virtio_reset(vdev);
status = VIRTIO_CONFIG_S_ACKNOWLEDGE;
- rc = run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false);
- IPL_assert(rc == 0, "Could not write ACKNOWLEDGE status to host");
+ if (run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false)) {
+ puts("Could not write ACKNOWLEDGE status to host");
+ return -EIO;
+ }
switch (vdev->senseid.cu_model) {
case VIRTIO_ID_NET:
@@ -255,27 +265,37 @@ void virtio_setup_ccw(VDev *vdev)
cfg_size = sizeof(vdev->config.scsi);
break;
default:
- panic("Unsupported virtio device\n");
+ puts("Unsupported virtio device");
+ return -ENODEV;
}
status |= VIRTIO_CONFIG_S_DRIVER;
- rc = run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false);
- IPL_assert(rc == 0, "Could not write DRIVER status to host");
+ if (run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false)) {
+ puts("Could not write DRIVER status to host");
+ return -EIO;
+ }
/* Feature negotiation */
for (i = 0; i < ARRAY_SIZE(vdev->guest_features); i++) {
feats.features = 0;
feats.index = i;
- rc = run_ccw(vdev, CCW_CMD_READ_FEAT, &feats, sizeof(feats), false);
- IPL_assert(rc == 0, "Could not get features bits");
+ if (run_ccw(vdev, CCW_CMD_READ_FEAT, &feats, sizeof(feats), false)) {
+ puts("Could not get features bits");
+ return -EIO;
+ }
+
vdev->guest_features[i] &= bswap32(feats.features);
feats.features = bswap32(vdev->guest_features[i]);
- rc = run_ccw(vdev, CCW_CMD_WRITE_FEAT, &feats, sizeof(feats), false);
- IPL_assert(rc == 0, "Could not set features bits");
+ if (run_ccw(vdev, CCW_CMD_WRITE_FEAT, &feats, sizeof(feats), false)) {
+ puts("Could not set features bits");
+ return -EIO;
+ }
}
- rc = run_ccw(vdev, CCW_CMD_READ_CONF, &vdev->config, cfg_size, false);
- IPL_assert(rc == 0, "Could not get virtio device configuration");
+ if (run_ccw(vdev, CCW_CMD_READ_CONF, &vdev->config, cfg_size, false)) {
+ puts("Could not get virtio device configuration");
+ return -EIO;
+ }
for (i = 0; i < vdev->nr_vqs; i++) {
VqInfo info = {
@@ -289,19 +309,27 @@ void virtio_setup_ccw(VDev *vdev)
.num = 0,
};
- rc = run_ccw(vdev, CCW_CMD_READ_VQ_CONF, &config, sizeof(config), false);
- IPL_assert(rc == 0, "Could not get virtio device VQ configuration");
+ if (run_ccw(vdev, CCW_CMD_READ_VQ_CONF, &config, sizeof(config),
+ false)) {
+ puts("Could not get virtio device VQ config");
+ return -EIO;
+ }
info.num = config.num;
vring_init(&vdev->vrings[i], &info);
vdev->vrings[i].schid = vdev->schid;
- IPL_assert(
- run_ccw(vdev, CCW_CMD_SET_VQ, &info, sizeof(info), false) == 0,
- "Cannot set VQ info");
+ if (run_ccw(vdev, CCW_CMD_SET_VQ, &info, sizeof(info), false)) {
+ puts("Cannot set VQ info");
+ return -EIO;
+ }
}
status |= VIRTIO_CONFIG_S_DRIVER_OK;
- rc = run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false);
- IPL_assert(rc == 0, "Could not write DRIVER_OK status to host");
+ if (run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false)) {
+ puts("Could not write DRIVER_OK status to host");
+ return -EIO;
+ }
+
+ return 0;
}
bool virtio_is_supported(SubChannelId schid)
diff --git a/pc-bios/s390-ccw/virtio.h b/pc-bios/s390-ccw/virtio.h
index 85bd9d1..5c5e808 100644
--- a/pc-bios/s390-ccw/virtio.h
+++ b/pc-bios/s390-ccw/virtio.h
@@ -253,7 +253,6 @@ struct VDev {
uint8_t scsi_dev_heads;
bool scsi_device_selected;
ScsiDevice selected_scsi_device;
- uint64_t netboot_start_addr;
uint32_t max_transfer;
uint32_t guest_features[2];
};
@@ -275,8 +274,10 @@ void vring_send_buf(VRing *vr, void *p, int len, int flags);
int vr_poll(VRing *vr);
int vring_wait_reply(void);
int virtio_run(VDev *vdev, int vqid, VirtioCmd *cmd);
-void virtio_setup_ccw(VDev *vdev);
+int virtio_reset(VDev *vdev);
+int virtio_setup_ccw(VDev *vdev);
int virtio_net_init(void *mac_addr);
+void virtio_net_deinit(void);
#endif /* VIRTIO_H */
diff --git a/pc-bios/s390-netboot.img b/pc-bios/s390-netboot.img
deleted file mode 100644
index 6908e49..0000000
--- a/pc-bios/s390-netboot.img
+++ /dev/null
Binary files differ
diff --git a/pc-bios/skiboot.lid b/pc-bios/skiboot.lid
index 906bd51..ffc77ee 100644
--- a/pc-bios/skiboot.lid
+++ b/pc-bios/skiboot.lid
Binary files differ
diff --git a/pc-bios/slof.bin b/pc-bios/slof.bin
index 27fed09..4314e17 100644
--- a/pc-bios/slof.bin
+++ b/pc-bios/slof.bin
Binary files differ
diff --git a/pc-bios/vgabios-ati.bin b/pc-bios/vgabios-ati.bin
index e10cd26..011359e 100644
--- a/pc-bios/vgabios-ati.bin
+++ b/pc-bios/vgabios-ati.bin
Binary files differ
diff --git a/pc-bios/vgabios-bochs-display.bin b/pc-bios/vgabios-bochs-display.bin
index 416036d..1d233af 100644
--- a/pc-bios/vgabios-bochs-display.bin
+++ b/pc-bios/vgabios-bochs-display.bin
Binary files differ
diff --git a/pc-bios/vgabios-cirrus.bin b/pc-bios/vgabios-cirrus.bin
index 4ffaa43..f7b06f2 100644
--- a/pc-bios/vgabios-cirrus.bin
+++ b/pc-bios/vgabios-cirrus.bin
Binary files differ
diff --git a/pc-bios/vgabios-qxl.bin b/pc-bios/vgabios-qxl.bin
index 1b7a383..50dfeb2 100644
--- a/pc-bios/vgabios-qxl.bin
+++ b/pc-bios/vgabios-qxl.bin
Binary files differ
diff --git a/pc-bios/vgabios-ramfb.bin b/pc-bios/vgabios-ramfb.bin
index dba6cb8..b72279f 100644
--- a/pc-bios/vgabios-ramfb.bin
+++ b/pc-bios/vgabios-ramfb.bin
Binary files differ
diff --git a/pc-bios/vgabios-stdvga.bin b/pc-bios/vgabios-stdvga.bin
index 0d541c5..5b48ca8 100644
--- a/pc-bios/vgabios-stdvga.bin
+++ b/pc-bios/vgabios-stdvga.bin
Binary files differ
diff --git a/pc-bios/vgabios-virtio.bin b/pc-bios/vgabios-virtio.bin
index 2ce3557..f580c33 100644
--- a/pc-bios/vgabios-virtio.bin
+++ b/pc-bios/vgabios-virtio.bin
Binary files differ
diff --git a/pc-bios/vgabios-vmware.bin b/pc-bios/vgabios-vmware.bin
index b7cab15..03b6dbd 100644
--- a/pc-bios/vgabios-vmware.bin
+++ b/pc-bios/vgabios-vmware.bin
Binary files differ
diff --git a/pc-bios/vgabios.bin b/pc-bios/vgabios.bin
index ee748f6..3f71aae 100644
--- a/pc-bios/vgabios.bin
+++ b/pc-bios/vgabios.bin
Binary files differ
diff --git a/plugins/api-system.c b/plugins/api-system.c
new file mode 100644
index 0000000..cc190b1
--- /dev/null
+++ b/plugins/api-system.c
@@ -0,0 +1,131 @@
+/*
+ * QEMU Plugin API - System specific implementations
+ *
+ * This provides the APIs that have a specific system implementation
+ * or are only relevant to system-mode.
+ *
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ * Copyright (C) 2019-2025, Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "qapi/error.h"
+#include "migration/blocker.h"
+#include "hw/boards.h"
+#include "qemu/plugin-memory.h"
+#include "qemu/plugin.h"
+
+/*
+ * In system mode we cannot trace the binary being executed so the
+ * helpers all return NULL/0.
+ */
+const char *qemu_plugin_path_to_binary(void)
+{
+ return NULL;
+}
+
+uint64_t qemu_plugin_start_code(void)
+{
+ return 0;
+}
+
+uint64_t qemu_plugin_end_code(void)
+{
+ return 0;
+}
+
+uint64_t qemu_plugin_entry_code(void)
+{
+ return 0;
+}
+
+/*
+ * Virtual Memory queries
+ */
+
+static __thread struct qemu_plugin_hwaddr hwaddr_info;
+
+struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
+ uint64_t vaddr)
+{
+ CPUState *cpu = current_cpu;
+ unsigned int mmu_idx = get_mmuidx(info);
+ enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
+ hwaddr_info.is_store = (rw & QEMU_PLUGIN_MEM_W) != 0;
+
+ assert(mmu_idx < NB_MMU_MODES);
+
+ if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx,
+ hwaddr_info.is_store, &hwaddr_info)) {
+ error_report("invalid use of qemu_plugin_get_hwaddr");
+ return NULL;
+ }
+
+ return &hwaddr_info;
+}
+
+bool qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr *haddr)
+{
+ return haddr->is_io;
+}
+
+uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr)
+{
+ if (haddr) {
+ return haddr->phys_addr;
+ }
+ return 0;
+}
+
+const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h)
+{
+ if (h && h->is_io) {
+ MemoryRegion *mr = h->mr;
+ if (!mr->name) {
+ unsigned maddr = (uintptr_t)mr;
+ g_autofree char *temp = g_strdup_printf("anon%08x", maddr);
+ return g_intern_string(temp);
+ } else {
+ return g_intern_string(mr->name);
+ }
+ } else {
+ return g_intern_static_string("RAM");
+ }
+}
+
+/*
+ * Time control
+ */
+static bool has_control;
+static Error *migration_blocker;
+
+const void *qemu_plugin_request_time_control(void)
+{
+ if (!has_control) {
+ has_control = true;
+ error_setg(&migration_blocker,
+ "TCG plugin time control does not support migration");
+ migrate_add_blocker(&migration_blocker, NULL);
+ return &has_control;
+ }
+ return NULL;
+}
+
+static void advance_virtual_time__async(CPUState *cpu, run_on_cpu_data data)
+{
+ int64_t new_time = data.host_ulong;
+ qemu_clock_advance_virtual_time(new_time);
+}
+
+void qemu_plugin_update_ns(const void *handle, int64_t new_time)
+{
+ if (handle == &has_control) {
+ /* Need to execute out of cpu_exec, so bql can be locked. */
+ async_run_on_cpu(current_cpu,
+ advance_virtual_time__async,
+ RUN_ON_CPU_HOST_ULONG(new_time));
+ }
+}
diff --git a/plugins/api-user.c b/plugins/api-user.c
new file mode 100644
index 0000000..28704a8
--- /dev/null
+++ b/plugins/api-user.c
@@ -0,0 +1,57 @@
+/*
+ * QEMU Plugin API - user-mode only implementations
+ *
+ * This provides the APIs that have a user-mode specific
+ * implementations or are only relevant to user-mode.
+ *
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ * Copyright (C) 2019-2025, Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/plugin.h"
+#include "exec/log.h"
+
+/*
+ * Virtual Memory queries - these are all NOPs for user-mode which
+ * only ever has visibility of virtual addresses.
+ */
+
+struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
+ uint64_t vaddr)
+{
+ return NULL;
+}
+
+bool qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr *haddr)
+{
+ return false;
+}
+
+uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr)
+{
+ return 0;
+}
+
+const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h)
+{
+ return g_intern_static_string("Invalid");
+}
+
+/*
+ * Time control - for user mode the only real time is wall clock time
+ * so realistically all you can do in user mode is slow down execution
+ * which doesn't require the ability to mess with the clock.
+ */
+
+const void *qemu_plugin_request_time_control(void)
+{
+ return NULL;
+}
+
+void qemu_plugin_update_ns(const void *handle, int64_t new_time)
+{
+ qemu_log_mask(LOG_UNIMP, "user-mode can't control time");
+}
diff --git a/plugins/api.c b/plugins/api.c
index 2ff13d0..3c9d483 100644
--- a/plugins/api.c
+++ b/plugins/api.c
@@ -39,25 +39,13 @@
#include "qemu/main-loop.h"
#include "qemu/plugin.h"
#include "qemu/log.h"
-#include "qemu/timer.h"
#include "tcg/tcg.h"
-#include "exec/exec-all.h"
#include "exec/gdbstub.h"
+#include "exec/target_page.h"
+#include "exec/translation-block.h"
#include "exec/translator.h"
#include "disas/disas.h"
#include "plugin.h"
-#ifndef CONFIG_USER_ONLY
-#include "qapi/error.h"
-#include "migration/blocker.h"
-#include "exec/ram_addr.h"
-#include "qemu/plugin-memory.h"
-#include "hw/boards.h"
-#else
-#include "qemu.h"
-#ifdef CONFIG_LINUX
-#include "loader.h"
-#endif
-#endif
/* Uninstall and Reset handlers */
@@ -249,12 +237,10 @@ uint64_t qemu_plugin_tb_vaddr(const struct qemu_plugin_tb *tb)
struct qemu_plugin_insn *
qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx)
{
- struct qemu_plugin_insn *insn;
if (unlikely(idx >= tb->n)) {
return NULL;
}
- insn = g_ptr_array_index(tb->insns, idx);
- return insn;
+ return g_ptr_array_index(tb->insns, idx);
}
/*
@@ -286,7 +272,7 @@ uint64_t qemu_plugin_insn_vaddr(const struct qemu_plugin_insn *insn)
void *qemu_plugin_insn_haddr(const struct qemu_plugin_insn *insn)
{
const DisasContextBase *db = tcg_ctx->plugin_db;
- vaddr page0_last = db->pc_first | ~TARGET_PAGE_MASK;
+ vaddr page0_last = db->pc_first | ~qemu_target_page_mask();
if (db->fake_insn) {
return NULL;
@@ -351,74 +337,37 @@ bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info)
return get_plugin_meminfo_rw(info) & QEMU_PLUGIN_MEM_W;
}
-/*
- * Virtual Memory queries
- */
-
-#ifdef CONFIG_SOFTMMU
-static __thread struct qemu_plugin_hwaddr hwaddr_info;
-#endif
-
-struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
- uint64_t vaddr)
-{
-#ifdef CONFIG_SOFTMMU
- CPUState *cpu = current_cpu;
- unsigned int mmu_idx = get_mmuidx(info);
- enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
- hwaddr_info.is_store = (rw & QEMU_PLUGIN_MEM_W) != 0;
-
- assert(mmu_idx < NB_MMU_MODES);
-
- if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx,
- hwaddr_info.is_store, &hwaddr_info)) {
- error_report("invalid use of qemu_plugin_get_hwaddr");
- return NULL;
+qemu_plugin_mem_value qemu_plugin_mem_get_value(qemu_plugin_meminfo_t info)
+{
+ uint64_t low = current_cpu->neg.plugin_mem_value_low;
+ qemu_plugin_mem_value value;
+
+ switch (qemu_plugin_mem_size_shift(info)) {
+ case 0:
+ value.type = QEMU_PLUGIN_MEM_VALUE_U8;
+ value.data.u8 = (uint8_t)low;
+ break;
+ case 1:
+ value.type = QEMU_PLUGIN_MEM_VALUE_U16;
+ value.data.u16 = (uint16_t)low;
+ break;
+ case 2:
+ value.type = QEMU_PLUGIN_MEM_VALUE_U32;
+ value.data.u32 = (uint32_t)low;
+ break;
+ case 3:
+ value.type = QEMU_PLUGIN_MEM_VALUE_U64;
+ value.data.u64 = low;
+ break;
+ case 4:
+ value.type = QEMU_PLUGIN_MEM_VALUE_U128;
+ value.data.u128.low = low;
+ value.data.u128.high = current_cpu->neg.plugin_mem_value_high;
+ break;
+ default:
+ g_assert_not_reached();
}
-
- return &hwaddr_info;
-#else
- return NULL;
-#endif
-}
-
-bool qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr *haddr)
-{
-#ifdef CONFIG_SOFTMMU
- return haddr->is_io;
-#else
- return false;
-#endif
-}
-
-uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr)
-{
-#ifdef CONFIG_SOFTMMU
- if (haddr) {
- return haddr->phys_addr;
- }
-#endif
- return 0;
-}
-
-const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h)
-{
-#ifdef CONFIG_SOFTMMU
- if (h && h->is_io) {
- MemoryRegion *mr = h->mr;
- if (!mr->name) {
- unsigned maddr = (uintptr_t)mr;
- g_autofree char *temp = g_strdup_printf("anon%08x", maddr);
- return g_intern_string(temp);
- } else {
- return g_intern_string(mr->name);
- }
- } else {
- return g_intern_static_string("RAM");
- }
-#else
- return g_intern_static_string("Invalid");
-#endif
+ return value;
}
int qemu_plugin_num_vcpus(void)
@@ -440,49 +389,6 @@ bool qemu_plugin_bool_parse(const char *name, const char *value, bool *ret)
}
/*
- * Binary path, start and end locations
- */
-const char *qemu_plugin_path_to_binary(void)
-{
- char *path = NULL;
-#ifdef CONFIG_USER_ONLY
- TaskState *ts = get_task_state(current_cpu);
- path = g_strdup(ts->bprm->filename);
-#endif
- return path;
-}
-
-uint64_t qemu_plugin_start_code(void)
-{
- uint64_t start = 0;
-#ifdef CONFIG_USER_ONLY
- TaskState *ts = get_task_state(current_cpu);
- start = ts->info->start_code;
-#endif
- return start;
-}
-
-uint64_t qemu_plugin_end_code(void)
-{
- uint64_t end = 0;
-#ifdef CONFIG_USER_ONLY
- TaskState *ts = get_task_state(current_cpu);
- end = ts->info->end_code;
-#endif
- return end;
-}
-
-uint64_t qemu_plugin_entry_code(void)
-{
- uint64_t entry = 0;
-#ifdef CONFIG_USER_ONLY
- TaskState *ts = get_task_state(current_cpu);
- entry = ts->info->entry;
-#endif
- return entry;
-}
-
-/*
* Create register handles.
*
* We need to create a handle for each register so the plugin
@@ -527,6 +433,26 @@ GArray *qemu_plugin_get_registers(void)
return create_register_handles(regs);
}
+bool qemu_plugin_read_memory_vaddr(uint64_t addr, GByteArray *data, size_t len)
+{
+ g_assert(current_cpu);
+
+ if (len == 0) {
+ return false;
+ }
+
+ g_byte_array_set_size(data, len);
+
+ int result = cpu_memory_rw_debug(current_cpu, addr, data->data,
+ data->len, false);
+
+ if (result < 0) {
+ return false;
+ }
+
+ return true;
+}
+
int qemu_plugin_read_register(struct qemu_plugin_register *reg, GByteArray *buf)
{
g_assert(current_cpu);
@@ -587,44 +513,3 @@ uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry)
return total;
}
-/*
- * Time control
- */
-static bool has_control;
-#ifdef CONFIG_SOFTMMU
-static Error *migration_blocker;
-#endif
-
-const void *qemu_plugin_request_time_control(void)
-{
- if (!has_control) {
- has_control = true;
-#ifdef CONFIG_SOFTMMU
- error_setg(&migration_blocker,
- "TCG plugin time control does not support migration");
- migrate_add_blocker(&migration_blocker, NULL);
-#endif
- return &has_control;
- }
- return NULL;
-}
-
-#ifdef CONFIG_SOFTMMU
-static void advance_virtual_time__async(CPUState *cpu, run_on_cpu_data data)
-{
- int64_t new_time = data.host_ulong;
- qemu_clock_advance_virtual_time(new_time);
-}
-#endif
-
-void qemu_plugin_update_ns(const void *handle, int64_t new_time)
-{
-#ifdef CONFIG_SOFTMMU
- if (handle == &has_control) {
- /* Need to execute out of cpu_exec, so bql can be locked. */
- async_run_on_cpu(current_cpu,
- advance_virtual_time__async,
- RUN_ON_CPU_HOST_ULONG(new_time));
- }
-#endif
-}
diff --git a/plugins/core.c b/plugins/core.c
index 12c67b4..eb9281f 100644
--- a/plugins/core.c
+++ b/plugins/core.c
@@ -12,22 +12,14 @@
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
-#include "qemu/error-report.h"
-#include "qemu/config-file.h"
-#include "qapi/error.h"
#include "qemu/lockable.h"
#include "qemu/option.h"
#include "qemu/plugin.h"
#include "qemu/queue.h"
#include "qemu/rcu_queue.h"
-#include "qemu/xxhash.h"
#include "qemu/rcu.h"
-#include "hw/core/cpu.h"
-
-#include "exec/exec-all.h"
#include "exec/tb-flush.h"
-#include "tcg/tcg.h"
-#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-common.h"
#include "plugin.h"
struct qemu_plugin_cb {
@@ -214,30 +206,49 @@ CPUPluginState *qemu_plugin_create_vcpu_state(void)
static void plugin_grow_scoreboards__locked(CPUState *cpu)
{
- if (cpu->cpu_index < plugin.scoreboard_alloc_size) {
+ size_t scoreboard_size = plugin.scoreboard_alloc_size;
+ bool need_realloc = false;
+
+ if (cpu->cpu_index < scoreboard_size) {
return;
}
- bool need_realloc = FALSE;
- while (cpu->cpu_index >= plugin.scoreboard_alloc_size) {
- plugin.scoreboard_alloc_size *= 2;
- need_realloc = TRUE;
+ while (cpu->cpu_index >= scoreboard_size) {
+ scoreboard_size *= 2;
+ need_realloc = true;
}
+ if (!need_realloc) {
+ return;
+ }
- if (!need_realloc || QLIST_EMPTY(&plugin.scoreboards)) {
- /* nothing to do, we just updated sizes for future scoreboards */
+ if (QLIST_EMPTY(&plugin.scoreboards)) {
+ /* just update size for future scoreboards */
+ plugin.scoreboard_alloc_size = scoreboard_size;
return;
}
+ /*
+ * A scoreboard creation/deletion might be in progress. If a new vcpu is
+ * initialized at the same time, we are safe, as the new
+ * plugin.scoreboard_alloc_size was not yet written.
+ */
+ qemu_rec_mutex_unlock(&plugin.lock);
+
/* cpus must be stopped, as tb might still use an existing scoreboard. */
start_exclusive();
- struct qemu_plugin_scoreboard *score;
- QLIST_FOREACH(score, &plugin.scoreboards, entry) {
- g_array_set_size(score->data, plugin.scoreboard_alloc_size);
+ /* re-acquire lock */
+ qemu_rec_mutex_lock(&plugin.lock);
+ /* in case another vcpu is created between unlock and exclusive section. */
+ if (scoreboard_size > plugin.scoreboard_alloc_size) {
+ struct qemu_plugin_scoreboard *score;
+ QLIST_FOREACH(score, &plugin.scoreboards, entry) {
+ g_array_set_size(score->data, scoreboard_size);
+ }
+ plugin.scoreboard_alloc_size = scoreboard_size;
+ /* force all tb to be flushed, as scoreboard pointers were changed. */
+ tb_flush(cpu);
}
- /* force all tb to be flushed, as scoreboard pointers were changed. */
- tb_flush(cpu);
end_exclusive();
}
@@ -583,6 +594,8 @@ void exec_inline_op(enum plugin_dyn_cb_type type,
}
void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
+ uint64_t value_low,
+ uint64_t value_high,
MemOpIdx oi, enum qemu_plugin_mem_rw rw)
{
GArray *arr = cpu->neg.plugin_mem_cbs;
@@ -591,6 +604,10 @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
if (arr == NULL) {
return;
}
+
+ cpu->neg.plugin_mem_value_low = value_low;
+ cpu->neg.plugin_mem_value_high = value_high;
+
for (i = 0; i < arr->len; i++) {
struct qemu_plugin_dyn_cb *cb =
&g_array_index(arr, struct qemu_plugin_dyn_cb, i);
diff --git a/plugins/loader.c b/plugins/loader.c
index 513a429..8f0d75c 100644
--- a/plugins/loader.c
+++ b/plugins/loader.c
@@ -18,6 +18,7 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qemu/config-file.h"
+#include "qemu/help_option.h"
#include "qapi/error.h"
#include "qemu/lockable.h"
#include "qemu/option.h"
@@ -28,11 +29,8 @@
#include "qemu/xxhash.h"
#include "qemu/plugin.h"
#include "qemu/memalign.h"
-#include "hw/core/cpu.h"
+#include "qemu/target-info.h"
#include "exec/tb-flush.h"
-#ifndef CONFIG_USER_ONLY
-#include "hw/boards.h"
-#endif
#include "plugin.h"
@@ -98,7 +96,12 @@ static int plugin_add(void *opaque, const char *name, const char *value,
bool is_on;
char *fullarg;
- if (strcmp(name, "file") == 0) {
+ if (is_help_option(value)) {
+ printf("Plugin options\n");
+ printf(" file=<path/to/plugin.so>\n");
+ printf(" plugin specific arguments\n");
+ exit(0);
+ } else if (strcmp(name, "file") == 0) {
if (strcmp(value, "") == 0) {
error_setg(errp, "requires a non-empty argument");
return 1;
@@ -122,7 +125,7 @@ static int plugin_add(void *opaque, const char *name, const char *value,
/* Will treat arg="argname" as "argname=on" */
fullarg = g_strdup_printf("%s=%s", value, "on");
} else {
- fullarg = g_strdup_printf("%s", value);
+ fullarg = g_strdup(value);
}
warn_report("using 'arg=%s' is deprecated", value);
error_printf("Please use '%s' directly\n", fullarg);
@@ -291,17 +294,11 @@ int qemu_plugin_load_list(QemuPluginList *head, Error **errp)
struct qemu_plugin_desc *desc, *next;
g_autofree qemu_info_t *info = g_new0(qemu_info_t, 1);
- info->target_name = TARGET_NAME;
+ info->target_name = target_name();
info->version.min = QEMU_PLUGIN_MIN_VERSION;
info->version.cur = QEMU_PLUGIN_VERSION;
-#ifndef CONFIG_USER_ONLY
- MachineState *ms = MACHINE(qdev_get_machine());
- info->system_emulation = true;
- info->system.smp_vcpus = ms->smp.cpus;
- info->system.max_vcpus = ms->smp.max_cpus;
-#else
- info->system_emulation = false;
-#endif
+
+ qemu_plugin_fillin_mode_info(info);
QTAILQ_FOREACH_SAFE(desc, head, entry, next) {
int err;
@@ -373,7 +370,7 @@ static void plugin_reset_destroy(struct qemu_plugin_reset_data *data)
{
qemu_rec_mutex_lock(&plugin.lock);
plugin_reset_destroy__locked(data);
- qemu_rec_mutex_lock(&plugin.lock);
+ qemu_rec_mutex_unlock(&plugin.lock);
}
static void plugin_flush_destroy(CPUState *cpu, run_on_cpu_data arg)
diff --git a/plugins/meson.build b/plugins/meson.build
index 18a0303..62c991d 100644
--- a/plugins/meson.build
+++ b/plugins/meson.build
@@ -1,40 +1,68 @@
+if not get_option('plugins')
+ subdir_done()
+endif
+
+qemu_plugin_symbols = configure_file(
+ input: files('../include/qemu/qemu-plugin.h'),
+ output: 'qemu-plugin.symbols',
+ capture: true,
+ command: [files('../scripts/qemu-plugin-symbols.py'), '@INPUT@'])
+
# Modules need more symbols than just those in plugins/qemu-plugins.symbols
if not enable_modules
if host_os == 'darwin'
configure_file(
- input: files('qemu-plugins.symbols'),
+ input: qemu_plugin_symbols,
output: 'qemu-plugins-ld64.symbols',
capture: true,
command: ['sed', '-ne', 's/^[[:space:]]*\\(qemu_.*\\);/_\\1/p', '@INPUT@'])
emulator_link_args += ['-Wl,-exported_symbols_list,plugins/qemu-plugins-ld64.symbols']
+ elif host_os == 'windows' and meson.get_compiler('c').get_id() == 'clang'
+ # LLVM/lld does not support exporting specific symbols. However, it works
+ # out of the box with dllexport/dllimport attribute we set in the code.
else
- emulator_link_args += ['-Xlinker', '--dynamic-list=' + (meson.project_source_root() / 'plugins/qemu-plugins.symbols')]
+ emulator_link_args += ['-Xlinker', '--dynamic-list=' + qemu_plugin_symbols.full_path()]
endif
endif
-if get_option('plugins')
- if host_os == 'windows'
- dlltool = find_program('dlltool', required: true)
+if host_os == 'windows'
+ # Generate a .lib file for plugins to link against.
+ # First, create a .def file listing all the symbols a plugin should expect to have
+ # available in qemu
+ win32_plugin_def = configure_file(
+ input: qemu_plugin_symbols,
+ output: 'qemu_plugin_api.def',
+ capture: true,
+ command: [python, '-c', 'import fileinput, re; print("EXPORTS", end=""); [print(re.sub(r"[{};]", "", line), end="") for line in fileinput.input()]', '@INPUT@'])
- # Generate a .lib file for plugins to link against.
- # First, create a .def file listing all the symbols a plugin should expect to have
- # available in qemu
- win32_plugin_def = configure_file(
- input: files('qemu-plugins.symbols'),
- output: 'qemu_plugin_api.def',
- capture: true,
- command: ['sed', '-e', '0,/^/s//EXPORTS/; s/[{};]//g', '@INPUT@'])
- # then use dlltool to assemble a delaylib.
- win32_qemu_plugin_api_lib = configure_file(
- input: win32_plugin_def,
- output: 'libqemu_plugin_api.a',
- command: [dlltool, '--input-def', '@INPUT@',
- '--output-delaylib', '@OUTPUT@', '--dllname', 'qemu.exe']
- )
+ # then use dlltool to assemble a delaylib.
+ # The delaylib will have an "imaginary" name (qemu.exe), that is used by the
+ # linker file we add with plugins (win32_linker.c) to identify that we want
+ # to find missing symbols in current program.
+ win32_qemu_plugin_api_link_flags = ['-Lplugins', '-lqemu_plugin_api']
+ if meson.get_compiler('c').get_id() == 'clang'
+ # With LLVM/lld, delaylib is specified at link time (-delayload)
+ dlltool = find_program('llvm-dlltool', required: true)
+ dlltool_cmd = [dlltool, '-d', '@INPUT@', '-l', '@OUTPUT@', '-D', 'qemu.exe']
+ win32_qemu_plugin_api_link_flags += ['-Wl,-delayload=qemu.exe']
+ else
+ # With gcc/ld, delay lib is built with a specific delay parameter.
+ dlltool = find_program('dlltool', required: true)
+ dlltool_cmd = [dlltool, '--input-def', '@INPUT@',
+ '--output-delaylib', '@OUTPUT@', '--dllname', 'qemu.exe']
endif
- specific_ss.add(files(
- 'loader.c',
- 'core.c',
- 'api.c',
- ))
+ win32_qemu_plugin_api_lib = configure_file(
+ input: win32_plugin_def,
+ output: 'libqemu_plugin_api.a',
+ command: dlltool_cmd
+ )
endif
+
+user_ss.add(files('user.c', 'api-user.c'))
+system_ss.add(files('system.c', 'api-system.c'))
+
+user_ss.add(files('api.c', 'core.c'))
+system_ss.add(files('api.c', 'core.c'))
+
+common_ss.add(files('loader.c'))
+
diff --git a/plugins/plugin.h b/plugins/plugin.h
index 30e2299..6fbc443 100644
--- a/plugins/plugin.h
+++ b/plugins/plugin.h
@@ -13,6 +13,7 @@
#define PLUGIN_H
#include <gmodule.h>
+#include "qemu/queue.h"
#include "qemu/qht.h"
#define QEMU_PLUGIN_MIN_VERSION 2
@@ -118,4 +119,10 @@ struct qemu_plugin_scoreboard *plugin_scoreboard_new(size_t element_size);
void plugin_scoreboard_free(struct qemu_plugin_scoreboard *score);
+/**
+ * qemu_plugin_fillin_mode_info() - populate mode specific info
+ * info: pointer to qemu_info_t structure
+ */
+void qemu_plugin_fillin_mode_info(qemu_info_t *info);
+
#endif /* PLUGIN_H */
diff --git a/plugins/qemu-plugins.symbols b/plugins/qemu-plugins.symbols
deleted file mode 100644
index ca773d8..0000000
--- a/plugins/qemu-plugins.symbols
+++ /dev/null
@@ -1,57 +0,0 @@
-{
- qemu_plugin_bool_parse;
- qemu_plugin_end_code;
- qemu_plugin_entry_code;
- qemu_plugin_get_hwaddr;
- qemu_plugin_get_registers;
- qemu_plugin_hwaddr_device_name;
- qemu_plugin_hwaddr_is_io;
- qemu_plugin_hwaddr_phys_addr;
- qemu_plugin_insn_data;
- qemu_plugin_insn_disas;
- qemu_plugin_insn_haddr;
- qemu_plugin_insn_size;
- qemu_plugin_insn_symbol;
- qemu_plugin_insn_vaddr;
- qemu_plugin_mem_is_big_endian;
- qemu_plugin_mem_is_sign_extended;
- qemu_plugin_mem_is_store;
- qemu_plugin_mem_size_shift;
- qemu_plugin_num_vcpus;
- qemu_plugin_outs;
- qemu_plugin_path_to_binary;
- qemu_plugin_read_register;
- qemu_plugin_register_atexit_cb;
- qemu_plugin_register_flush_cb;
- qemu_plugin_register_vcpu_exit_cb;
- qemu_plugin_register_vcpu_idle_cb;
- qemu_plugin_register_vcpu_init_cb;
- qemu_plugin_register_vcpu_insn_exec_cb;
- qemu_plugin_register_vcpu_insn_exec_cond_cb;
- qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu;
- qemu_plugin_register_vcpu_mem_cb;
- qemu_plugin_register_vcpu_mem_inline_per_vcpu;
- qemu_plugin_register_vcpu_resume_cb;
- qemu_plugin_register_vcpu_syscall_cb;
- qemu_plugin_register_vcpu_syscall_ret_cb;
- qemu_plugin_register_vcpu_tb_exec_cb;
- qemu_plugin_register_vcpu_tb_exec_cond_cb;
- qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu;
- qemu_plugin_register_vcpu_tb_trans_cb;
- qemu_plugin_request_time_control;
- qemu_plugin_reset;
- qemu_plugin_scoreboard_free;
- qemu_plugin_scoreboard_find;
- qemu_plugin_scoreboard_new;
- qemu_plugin_start_code;
- qemu_plugin_tb_get_insn;
- qemu_plugin_tb_n_insns;
- qemu_plugin_tb_vaddr;
- qemu_plugin_u64_add;
- qemu_plugin_u64_get;
- qemu_plugin_u64_set;
- qemu_plugin_u64_sum;
- qemu_plugin_uninstall;
- qemu_plugin_update_ns;
- qemu_plugin_vcpu_for_each;
-};
diff --git a/plugins/system.c b/plugins/system.c
new file mode 100644
index 0000000..b3ecc33
--- /dev/null
+++ b/plugins/system.c
@@ -0,0 +1,24 @@
+/*
+ * QEMU Plugin system-emulation helpers
+ *
+ * Helpers that are specific to system emulation.
+ *
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ * Copyright (C) 2019-2025, Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/plugin.h"
+#include "hw/boards.h"
+
+#include "plugin.h"
+
+void qemu_plugin_fillin_mode_info(qemu_info_t *info)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ info->system_emulation = true;
+ info->system.smp_vcpus = ms->smp.cpus;
+ info->system.max_vcpus = ms->smp.max_cpus;
+}
diff --git a/plugins/user.c b/plugins/user.c
new file mode 100644
index 0000000..250d542
--- /dev/null
+++ b/plugins/user.c
@@ -0,0 +1,19 @@
+/*
+ * QEMU Plugin user-mode helpers
+ *
+ * Helpers that are specific to user-mode.
+ *
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ * Copyright (C) 2019-2025, Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/plugin.h"
+#include "plugin.h"
+
+void qemu_plugin_fillin_mode_info(qemu_info_t *info)
+{
+ info->system_emulation = false;
+}
diff --git a/po/it.po b/po/it.po
index c6d9517..363b9bd 100644
--- a/po/it.po
+++ b/po/it.po
@@ -65,7 +65,7 @@ msgid "Detach Tab"
msgstr "_Sposta in una nuova finestra"
msgid "Show Menubar"
-msgstr ""
+msgstr "Mostra _barra dei menu"
msgid "_Machine"
msgstr "_Macchina virtuale"
diff --git a/python/Makefile b/python/Makefile
index 1fa4ba2..764b79c 100644
--- a/python/Makefile
+++ b/python/Makefile
@@ -9,13 +9,13 @@ help:
@echo "make check-minreqs:"
@echo " Run tests in the minreqs virtual environment."
@echo " These tests use the oldest dependencies."
- @echo " Requires: Python 3.8"
- @echo " Hint (Fedora): 'sudo dnf install python3.8'"
+ @echo " Requires: Python 3.9"
+ @echo " Hint (Fedora): 'sudo dnf install python3.9'"
@echo ""
@echo "make check-tox:"
@echo " Run tests against multiple python versions."
@echo " These tests use the newest dependencies."
- @echo " Requires: Python 3.8 - 3.11, and tox."
+ @echo " Requires: Python 3.9 - 3.11, and tox."
@echo " Hint (Fedora): 'sudo dnf install python3-tox python3.11'"
@echo " The variable QEMU_TOX_EXTRA_ARGS can be use to pass extra"
@echo " arguments to tox".
@@ -59,7 +59,7 @@ PIP_INSTALL = pip install --disable-pip-version-check
min-venv: $(QEMU_MINVENV_DIR) $(QEMU_MINVENV_DIR)/bin/activate
$(QEMU_MINVENV_DIR) $(QEMU_MINVENV_DIR)/bin/activate: setup.cfg tests/minreqs.txt
@echo "VENV $(QEMU_MINVENV_DIR)"
- @python3.8 -m venv $(QEMU_MINVENV_DIR)
+ @python3.9 -m venv $(QEMU_MINVENV_DIR)
@( \
echo "ACTIVATE $(QEMU_MINVENV_DIR)"; \
. $(QEMU_MINVENV_DIR)/bin/activate; \
diff --git a/python/scripts/mkvenv.py b/python/scripts/mkvenv.py
index f2526af..8ac5b0b 100644
--- a/python/scripts/mkvenv.py
+++ b/python/scripts/mkvenv.py
@@ -379,6 +379,9 @@ def make_venv( # pylint: disable=too-many-arguments
try:
builder.create(str(env_dir))
except SystemExit as exc:
+ # pylint 3.3 bug:
+ # pylint: disable=raising-non-exception, raise-missing-from
+
# Some versions of the venv module raise SystemExit; *nasty*!
# We want the exception that prompted it. It might be a subprocess
# error that has output we *really* want to see.
diff --git a/python/scripts/vendor.py b/python/scripts/vendor.py
index 07aff97..b47db00 100755
--- a/python/scripts/vendor.py
+++ b/python/scripts/vendor.py
@@ -41,8 +41,8 @@ def main() -> int:
parser.parse_args()
packages = {
- "meson==1.2.3":
- "4533a43c34548edd1f63a276a42690fce15bde9409bcf20c4b8fa3d7e4d7cac1",
+ "meson==1.8.1":
+ "374bbf71247e629475fc10b0bd2ef66fc418c2d8f4890572f74de0f97d0d42da",
}
vendor_dir = Path(__file__, "..", "..", "wheels").resolve()
diff --git a/python/setup.cfg b/python/setup.cfg
index 3b4e2cc..d7f5dc7 100644
--- a/python/setup.cfg
+++ b/python/setup.cfg
@@ -14,7 +14,6 @@ classifiers =
Natural Language :: English
Operating System :: OS Independent
Programming Language :: Python :: 3 :: Only
- Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
@@ -23,7 +22,7 @@ classifiers =
Typing :: Typed
[options]
-python_requires = >= 3.8
+python_requires = >= 3.9
packages =
qemu.qmp
qemu.machine
@@ -47,6 +46,7 @@ devel =
urwid >= 2.1.2
urwid-readline >= 0.13
Pygments >= 2.9.0
+ sphinx >= 3.4.3
# Provides qom-fuse functionality
fuse =
@@ -78,8 +78,7 @@ exclude = __pycache__,
[mypy]
strict = True
-python_version = 3.8
-warn_unused_configs = True
+python_version = 3.9
namespace_packages = True
warn_unused_ignores = False
@@ -142,6 +141,7 @@ ignore_missing_imports = True
disable=consider-using-f-string,
consider-using-with,
too-many-arguments,
+ too-many-positional-arguments,
too-many-function-args, # mypy handles this with less false positives.
too-many-instance-attributes,
no-member, # mypy also handles this better.
@@ -185,7 +185,7 @@ multi_line_output=3
# of python available on your system to run this test.
[tox:tox]
-envlist = py38, py39, py310, py311, py312, py313
+envlist = py39, py310, py311, py312, py313
skip_missing_interpreters = true
[testenv]
diff --git a/python/tests/minreqs.txt b/python/tests/minreqs.txt
index a3f423e..cd2e2a8 100644
--- a/python/tests/minreqs.txt
+++ b/python/tests/minreqs.txt
@@ -1,5 +1,5 @@
# This file lists the ***oldest possible dependencies*** needed to run
-# "make check" successfully under ***Python 3.8***. It is used primarily
+# "make check" successfully under ***Python 3.9***. It is used primarily
# by GitLab CI to ensure that our stated minimum versions in setup.cfg
# are truthful and regularly validated.
#
@@ -11,6 +11,15 @@
# When adding new dependencies, pin the very oldest non-yanked version
# on PyPI that allows the test suite to pass.
+# For some reason, the presence of packaging==14.0 below requires us to
+# also pin setuptools to version 70 or below. Otherwise, the
+# installation of the QEMU package itself fails, failing to find
+# setuptools.
+setuptools<=70
+
+# Dependencies for qapidoc/qapi_domain et al
+sphinx==3.4.3
+
# Dependencies for the TUI addon (Required for successful linting)
urwid==2.1.2
urwid-readline==0.13
@@ -38,10 +47,32 @@ pyflakes==2.5.0
# Transitive mypy dependencies
mypy-extensions==1.0.0
+tomli==1.1.0
typing-extensions==4.7.1
# Transitive pylint dependencies
astroid==2.15.4
+dill==0.2
lazy-object-proxy==1.4.0
+platformdirs==2.2.0
toml==0.10.0
+tomlkit==0.10.1
wrapt==1.14.0
+
+# Transitive sphinx dependencies
+Jinja2==2.7
+MarkupSafe==1.1.0
+alabaster==0.7.1
+babel==1.3
+docutils==0.12
+imagesize==0.5.0
+packaging==14.0
+pytz==2011b0
+requests==2.5.0
+snowballstemmer==1.1
+sphinxcontrib-applehelp==1.0.0
+sphinxcontrib-devhelp==1.0.0
+sphinxcontrib-htmlhelp==1.0.0
+sphinxcontrib-jsmath==1.0.0
+sphinxcontrib-qthelp==1.0.0
+sphinxcontrib-serializinghtml==1.0.0
diff --git a/python/tests/qapi-flake8.sh b/python/tests/qapi-flake8.sh
new file mode 100755
index 0000000..c69f9ea
--- /dev/null
+++ b/python/tests/qapi-flake8.sh
@@ -0,0 +1,6 @@
+#!/bin/sh -e
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+python3 -m flake8 ../scripts/qapi/ \
+ ../docs/sphinx/qapidoc.py \
+ ../docs/sphinx/qapi_domain.py
diff --git a/python/tests/qapi-isort.sh b/python/tests/qapi-isort.sh
new file mode 100755
index 0000000..78dd947
--- /dev/null
+++ b/python/tests/qapi-isort.sh
@@ -0,0 +1,8 @@
+#!/bin/sh -e
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+python3 -m isort --sp . -c ../scripts/qapi/
+# Force isort to recognize "compat" as a local module and not third-party
+python3 -m isort --sp . -c -p compat -p qapidoc_legacy \
+ ../docs/sphinx/qapi_domain.py \
+ ../docs/sphinx/qapidoc.py
diff --git a/python/tests/qapi-mypy.sh b/python/tests/qapi-mypy.sh
new file mode 100755
index 0000000..363dbaf
--- /dev/null
+++ b/python/tests/qapi-mypy.sh
@@ -0,0 +1,4 @@
+#!/bin/sh -e
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+python3 -m mypy ../scripts/qapi
diff --git a/python/tests/qapi-pylint.sh b/python/tests/qapi-pylint.sh
new file mode 100755
index 0000000..8767d9d
--- /dev/null
+++ b/python/tests/qapi-pylint.sh
@@ -0,0 +1,8 @@
+#!/bin/sh -e
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+SETUPTOOLS_USE_DISTUTILS=stdlib python3 -m pylint \
+ --rcfile=../scripts/qapi/pylintrc \
+ ../scripts/qapi/ \
+ ../docs/sphinx/qapidoc.py \
+ ../docs/sphinx/qapi_domain.py
diff --git a/python/wheels/meson-1.2.3-py3-none-any.whl b/python/wheels/meson-1.2.3-py3-none-any.whl
deleted file mode 100644
index a8b84e5..0000000
--- a/python/wheels/meson-1.2.3-py3-none-any.whl
+++ /dev/null
Binary files differ
diff --git a/python/wheels/meson-1.8.1-py3-none-any.whl b/python/wheels/meson-1.8.1-py3-none-any.whl
new file mode 100644
index 0000000..a885f0e
--- /dev/null
+++ b/python/wheels/meson-1.8.1-py3-none-any.whl
Binary files differ
diff --git a/python/wheels/pycotap-1.3.1-py3-none-any.whl b/python/wheels/pycotap-1.3.1-py3-none-any.whl
new file mode 100644
index 0000000..9c2c7d2
--- /dev/null
+++ b/python/wheels/pycotap-1.3.1-py3-none-any.whl
Binary files differ
diff --git a/pythondeps.toml b/pythondeps.toml
index f6e590f..7884ab5 100644
--- a/pythondeps.toml
+++ b/pythondeps.toml
@@ -19,16 +19,13 @@
[meson]
# The install key should match the version in python/wheels/
-meson = { accepted = ">=1.1.0", installed = "1.2.3", canary = "meson" }
+meson = { accepted = ">=1.5.0", installed = "1.8.1", canary = "meson" }
+pycotap = { accepted = ">=1.1.0", installed = "1.3.1" }
[docs]
# Please keep the installed versions in sync with docs/requirements.txt
sphinx = { accepted = ">=3.4.3", installed = "5.3.0", canary = "sphinx-build" }
sphinx_rtd_theme = { accepted = ">=0.5", installed = "1.1.1" }
-[avocado]
-# Note that qemu.git/python/ is always implicitly installed.
-# Prefer an LTS version when updating the accepted versions of
-# avocado-framework, for example right now the limit is 92.x.
-avocado-framework = { accepted = "(>=88.1, <93.0)", installed = "88.1", canary = "avocado" }
-pycdlib = { accepted = ">=1.11.0" }
+[testdeps]
+qemu.qmp = { accepted = ">=0.0.3", installed = "0.0.3" }
diff --git a/qapi/acpi.json b/qapi/acpi.json
index 045dab6..2d53b82 100644
--- a/qapi/acpi.json
+++ b/qapi/acpi.json
@@ -80,7 +80,7 @@
##
# @ACPIOSTInfo:
#
-# OSPM Status Indication for a device For description of possible
+# OSPM Status Indication for a device. For description of possible
# values of @source and @status fields see "_OST (OSPM Status
# Indication)" chapter of ACPI5.0 spec.
#
diff --git a/qapi/audio.json b/qapi/audio.json
index 519697c..16de231 100644
--- a/qapi/audio.json
+++ b/qapi/audio.json
@@ -66,6 +66,26 @@
'*out': 'AudiodevPerDirectionOptions' } }
##
+# @AudiodevDBusOptions:
+#
+# Options of the D-Bus audio backend.
+#
+# @in: options of the capture stream
+#
+# @out: options of the playback stream
+#
+# @nsamples: set the number of samples per read/write calls (default to 480,
+# 10ms at 48kHz).
+#
+# Since: 10.0
+##
+{ 'struct': 'AudiodevDBusOptions',
+ 'data': {
+ '*in': 'AudiodevPerDirectionOptions',
+ '*out': 'AudiodevPerDirectionOptions',
+ '*nsamples': 'uint32'} }
+
+##
# @AudiodevAlsaPerDirectionOptions:
#
# Options of the ALSA backend that are used for both playback and
@@ -76,7 +96,7 @@
# @period-length: the period length in microseconds
#
# @try-poll: attempt to use poll mode, falling back to non-polling
-# access on failure (default true)
+# access on failure (default false)
#
# Since: 4.0
##
@@ -289,9 +309,9 @@
#
# @name: name of the sink/source to use
#
-# @stream-name: name of the PulseAudio stream created by qemu. Can be
+# @stream-name: name of the PulseAudio stream created by QEMU. Can be
# used to identify the stream in PulseAudio when you create
-# multiple PulseAudio devices or run multiple qemu instances
+# multiple PulseAudio devices or run multiple QEMU instances
# (default: audiodev's id, since 4.2)
#
# @latency: latency you want PulseAudio to achieve in microseconds
@@ -333,9 +353,9 @@
#
# @name: name of the sink/source to use
#
-# @stream-name: name of the PipeWire stream created by qemu. Can be
+# @stream-name: name of the PipeWire stream created by QEMU. Can be
# used to identify the stream in PipeWire when you create multiple
-# PipeWire devices or run multiple qemu instances (default:
+# PipeWire devices or run multiple QEMU instances (default:
# audiodev's id)
#
# @latency: latency you want PipeWire to achieve in microseconds
@@ -490,7 +510,7 @@
'if': 'CONFIG_AUDIO_ALSA' },
'coreaudio': { 'type': 'AudiodevCoreaudioOptions',
'if': 'CONFIG_AUDIO_COREAUDIO' },
- 'dbus': { 'type': 'AudiodevGenericOptions',
+ 'dbus': { 'type': 'AudiodevDBusOptions',
'if': 'CONFIG_DBUS_DISPLAY' },
'dsound': { 'type': 'AudiodevDsoundOptions',
'if': 'CONFIG_AUDIO_DSOUND' },
@@ -513,7 +533,7 @@
##
# @query-audiodevs:
#
-# Returns information about audiodev configuration
+# Return information about audiodev configuration
#
# Returns: array of @Audiodev
#
diff --git a/qapi/block-core.json b/qapi/block-core.json
index f400b33..1df6644 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -31,8 +31,8 @@
# @icount: Current instruction count. Appears when execution
# record/replay is enabled. Used for "time-traveling" to match
# the moment in the recorded execution with the snapshots. This
-# counter may be obtained through @query-replay command (since
-# 5.2)
+# counter may be obtained through @query-replay command
+# (since 5.2)
#
# Since: 1.3
##
@@ -486,6 +486,10 @@
# @backing_file_depth: number of files in the backing file chain
# (since: 1.2)
#
+# @active: true if the backend is active; typical cases for inactive backends
+# are on the migration source instance after migration completes and on the
+# destination before it completes. (since: 10.0)
+#
# @encrypted: true if the backing device is encrypted
#
# @detect_zeroes: detect and optimize zero writes (Since 2.1)
@@ -506,11 +510,11 @@
#
# @bps_max: total throughput limit during bursts, in bytes (Since 1.7)
#
-# @bps_rd_max: read throughput limit during bursts, in bytes (Since
-# 1.7)
+# @bps_rd_max: read throughput limit during bursts, in bytes
+# (Since 1.7)
#
-# @bps_wr_max: write throughput limit during bursts, in bytes (Since
-# 1.7)
+# @bps_wr_max: write throughput limit during bursts, in bytes
+# (Since 1.7)
#
# @iops_max: total I/O operations per second during bursts, in bytes
# (Since 1.7)
@@ -556,7 +560,7 @@
{ 'struct': 'BlockDeviceInfo',
'data': { 'file': 'str', '*node-name': 'str', 'ro': 'bool', 'drv': 'str',
'*backing_file': 'str', 'backing_file_depth': 'int',
- 'encrypted': 'bool',
+ 'active': 'bool', 'encrypted': 'bool',
'detect_zeroes': 'BlockdevDetectZeroesOptions',
'bps': 'int', 'bps_rd': 'int', 'bps_wr': 'int',
'iops': 'int', 'iops_rd': 'int', 'iops_wr': 'int',
@@ -947,11 +951,11 @@
# @unmap_operations: The number of unmap operations performed by the
# device (Since 4.2)
#
-# @rd_total_time_ns: Total time spent on reads in nanoseconds (since
-# 0.15).
+# @rd_total_time_ns: Total time spent on reads in nanoseconds
+# (since 0.15)
#
-# @wr_total_time_ns: Total time spent on writes in nanoseconds (since
-# 0.15).
+# @wr_total_time_ns: Total time spent on writes in nanoseconds
+# (since 0.15)
#
# @zone_append_total_time_ns: Total time spent on zone append writes
# in nanoseconds (since 8.1)
@@ -1158,11 +1162,11 @@
#
# @query-nodes: If true, the command will query all the block nodes
# that have a node name, in a list which will include "parent"
-# information, but not "backing". If false or omitted, the
+# information, but not "backing". If false or omitted, the
# behavior is as before - query all the device backends,
-# recursively including their "parent" and "backing". Filter nodes
-# that were created implicitly are skipped over in this mode.
-# (Since 2.3)
+# recursively including their "parent" and "backing". Filter
+# nodes that were created implicitly are skipped over in this
+# mode. (Since 2.3)
#
# Returns: A list of @BlockStats for each virtual block devices.
#
@@ -1286,7 +1290,7 @@
# jobs, cancel the job
#
# @ignore: ignore the error, only report a QMP event (BLOCK_IO_ERROR
-# or BLOCK_JOB_ERROR). The backup, mirror and commit block jobs
+# or BLOCK_JOB_ERROR). The backup, mirror and commit block jobs
# retry the failing request later and may still complete
# successfully. The stream block job continues to stream and will
# complete with an error.
@@ -1318,8 +1322,8 @@
# @incremental: only copy data described by the dirty bitmap.
# (since: 2.4)
#
-# @bitmap: only copy data described by the dirty bitmap. (since: 4.2)
-# Behavior on completion is determined by the BitmapSyncMode.
+# @bitmap: only copy data described by the dirty bitmap. Behavior on
+# completion is determined by the BitmapSyncMode. (since: 4.2)
#
# Since: 1.3
##
@@ -1333,7 +1337,7 @@
# bitmap when used for data copy operations.
#
# @on-success: The bitmap is only synced when the operation is
-# successful. This is the behavior always used for 'INCREMENTAL'
+# successful. This is the behavior always used for incremental
# backups.
#
# @never: The bitmap is never synchronized with the operation, and is
@@ -1413,8 +1417,8 @@
# @auto-finalize: Job will finalize itself when PENDING, moving to the
# CONCLUDED state. (since 2.12)
#
-# @auto-dismiss: Job will dismiss itself when CONCLUDED, moving to the
-# NULL state and disappearing from the query list. (since 2.12)
+# @auto-dismiss: Job will dismiss itself when CONCLUDED, and
+# disappear. (since 2.12)
#
# @error: Error information if the job did not complete successfully.
# Not set if the job completed successfully. (since 2.12.1)
@@ -1498,15 +1502,15 @@
#
# @device: the name of the device to take a snapshot of.
#
-# @node-name: graph node name to generate the snapshot from (Since
-# 2.0)
+# @node-name: graph node name to generate the snapshot from
+# (Since 2.0)
#
# @snapshot-file: the target of the new overlay image. If the file
# exists, or if it is a device, the overlay will be created in the
# existing file/device. Otherwise, a new file will be created.
#
-# @snapshot-node-name: the graph node name of the new image (Since
-# 2.0)
+# @snapshot-node-name: the graph node name of the new image
+# (Since 2.0)
#
# @format: the format of the overlay image, default is 'qcow2'.
#
@@ -1551,11 +1555,16 @@
# it should not be less than job cluster size which is calculated
# as maximum of target image cluster size and 64k. Default 0.
#
+# @min-cluster-size: Minimum size of blocks used by copy-before-write
+# and background copy operations. Has to be a power of 2. No
+# effect if smaller than the maximum of the target's cluster size
+# and 64 KiB. Default 0. (Since 9.2)
+#
# Since: 6.0
##
{ 'struct': 'BackupPerf',
- 'data': { '*use-copy-range': 'bool',
- '*max-workers': 'int', '*max-chunk': 'int64' } }
+ 'data': { '*use-copy-range': 'bool', '*max-workers': 'int',
+ '*max-chunk': 'int64', '*min-cluster-size': 'size' } }
##
# @BackupCommon:
@@ -1574,13 +1583,13 @@
# for unlimited.
#
# @bitmap: The name of a dirty bitmap to use. Must be present if sync
-# is "bitmap" or "incremental". Can be present if sync is "full"
+# is "bitmap" or "incremental". Can be present if sync is "full"
# or "top". Must not be present otherwise.
# (Since 2.4 (drive-backup), 3.1 (blockdev-backup))
#
# @bitmap-mode: Specifies the type of data the bitmap should contain
# after the operation concludes. Must be present if a bitmap was
-# provided, Must NOT be present otherwise. (Since 4.2)
+# provided, must **not** be present otherwise. (Since 4.2)
#
# @compress: true to compress data, if the target format supports it.
# (default: false) (since 2.8)
@@ -1593,17 +1602,19 @@
# default 'report' (no limitations, since this applies to a
# different block device than @device).
#
+# @on-cbw-error: policy defining behavior on I/O errors in
+# copy-before-write jobs; defaults to break-guest-write. (Since 10.1)
+#
# @auto-finalize: When false, this job will wait in a PENDING state
-# after it has finished its work, waiting for @block-job-finalize
-# before making any block graph changes. When true, this job will
+# after it has finished its work, waiting for @job-finalize before
+# making any block graph changes. When true, this job will
# automatically perform its abort or commit actions. Defaults to
# true. (Since 2.12)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state
# after it has completely ceased all work, and awaits
-# @block-job-dismiss. When true, this job will automatically
-# disappear from the query list without user intervention.
-# Defaults to true. (Since 2.12)
+# @job-dismiss. When true, this job will automatically disappear
+# without user intervention. Defaults to true. (Since 2.12)
#
# @filter-node-name: the node name that should be assigned to the
# filter driver that the backup job inserts into the graph above
@@ -1619,9 +1630,9 @@
#
# @unstable: Member @x-perf is experimental.
#
-# .. note:: @on-source-error and @on-target-error only affect background
-# I/O. If an error occurs during a guest write request, the device's
-# rerror/werror actions will be used.
+# .. note:: @on-source-error and @on-target-error only affect
+# background I/O. If an error occurs during a guest write request,
+# the device's rerror/werror actions will be used.
#
# Since: 4.2
##
@@ -1632,6 +1643,7 @@
'*compress': 'bool',
'*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError',
+ '*on-cbw-error': 'OnCbwError',
'*auto-finalize': 'bool', '*auto-dismiss': 'bool',
'*filter-node-name': 'str',
'*discard-source': 'bool',
@@ -1699,7 +1711,7 @@
# Takes a snapshot of a block device.
#
# Take a snapshot, by installing 'node' as the backing image of
-# 'overlay'. Additionally, if 'node' is associated with a block
+# 'overlay'. Additionally, if 'node' is associated with a block
# device, the block device changes to using 'overlay' as its new
# active image.
#
@@ -1738,7 +1750,7 @@
# Change the backing file in the image file metadata. This does not
# cause QEMU to reopen the image file to reparse the backing filename
# (it may, however, perform a reopen to change permissions from r/o ->
-# r/w -> r/o, if needed). The new backing file string is written into
+# r/w -> r/o, if needed). The new backing file string is written into
# the image file metadata, and the QEMU internal strings are updated.
#
# @image-node-name: The name of the block driver state node of the
@@ -1772,8 +1784,7 @@
# If top == base, that is an error. If top has no overlays on top of
# it, or if it is in use by a writer, the job will not be completed by
# itself. The user needs to complete the job with the
-# block-job-complete command after getting the ready event. (Since
-# 2.0)
+# job-complete command after getting the ready event. (Since 2.0)
#
# If the base image is smaller than top, then the base image will be
# resized to be the same size as top. If top is smaller than the base
@@ -1827,7 +1838,7 @@
# @speed: the maximum speed, in bytes per second
#
# @on-error: the action to take on an error. 'ignore' means that the
-# request should be retried. (default: report; Since: 5.0)
+# request should be retried. (default: report; since: 5.0)
#
# @filter-node-name: the node name that should be assigned to the
# filter driver that the commit job inserts into the graph above
@@ -1835,16 +1846,15 @@
# autogenerated. (Since: 2.9)
#
# @auto-finalize: When false, this job will wait in a PENDING state
-# after it has finished its work, waiting for @block-job-finalize
-# before making any block graph changes. When true, this job will
+# after it has finished its work, waiting for @job-finalize before
+# making any block graph changes. When true, this job will
# automatically perform its abort or commit actions. Defaults to
# true. (Since 3.1)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state
# after it has completely ceased all work, and awaits
-# @block-job-dismiss. When true, this job will automatically
-# disappear from the query list without user intervention.
-# Defaults to true. (Since 3.1)
+# @job-dismiss. When true, this job will automatically disappear
+# without user intervention. Defaults to true. (Since 3.1)
#
# Features:
#
@@ -1853,7 +1863,6 @@
#
# Errors:
# - If @device does not exist, DeviceNotFound
-# - Any other error returns a GenericError.
#
# Since: 1.3
#
@@ -1882,8 +1891,8 @@
# Start a point-in-time copy of a block device to a new destination.
# The status of ongoing drive-backup operations can be checked with
# query-block-jobs where the BlockJobInfo.type field has the value
-# 'backup'. The operation can be stopped before it has completed using
-# the block-job-cancel command.
+# 'backup'. The operation can be stopped before it has completed
+# using the job-cancel or block-job-cancel command.
#
# Features:
#
@@ -1913,8 +1922,8 @@
# Start a point-in-time copy of a block device to a new destination.
# The status of ongoing blockdev-backup operations can be checked with
# query-block-jobs where the BlockJobInfo.type field has the value
-# 'backup'. The operation can be stopped before it has completed using
-# the block-job-cancel command.
+# 'backup'. The operation can be stopped before it has completed
+# using the job-cancel or block-job-cancel command.
#
# Errors:
# - If @device is not a valid block device, DeviceNotFound
@@ -2018,7 +2027,7 @@
#
# @id: Block graph node identifier. This @id is generated only for
# x-debug-query-block-graph and does not relate to any other
-# identifiers in Qemu.
+# identifiers in QEMU.
#
# @type: Type of graph node. Can be one of block-backend, block-job
# or block-driver-state.
@@ -2157,8 +2166,8 @@
# @format: the format of the new destination, default is to probe if
# @mode is 'existing', else the format of the source
#
-# @node-name: the new block driver state node name in the graph (Since
-# 2.1)
+# @node-name: the new block driver state node name in the graph
+# (Since 2.1)
#
# @replaces: with sync=full graph node name to be replaced by the new
# image when a whole image copy is done. This can be used to
@@ -2200,16 +2209,15 @@
# 'background' (Since: 3.0)
#
# @auto-finalize: When false, this job will wait in a PENDING state
-# after it has finished its work, waiting for @block-job-finalize
-# before making any block graph changes. When true, this job will
+# after it has finished its work, waiting for @job-finalize before
+# making any block graph changes. When true, this job will
# automatically perform its abort or commit actions. Defaults to
# true. (Since 3.1)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state
# after it has completely ceased all work, and awaits
-# @block-job-dismiss. When true, this job will automatically
-# disappear from the query list without user intervention.
-# Defaults to true. (Since 3.1)
+# @job-dismiss. When true, this job will automatically disappear
+# without user intervention. Defaults to true. (Since 3.1)
#
# Since: 1.3
##
@@ -2299,7 +2307,7 @@
#
# Errors:
# - If @node is not a valid block device or node, DeviceNotFound
-# - If @name is already taken, GenericError with an explanation
+# - If @name is already taken, GenericError
#
# Since: 2.4
#
@@ -2322,7 +2330,7 @@
#
# Errors:
# - If @node is not a valid block device or node, DeviceNotFound
-# - If @name is not found, GenericError with an explanation
+# - If @name is not found, GenericError
# - if @name is frozen by an operation, GenericError
#
# Since: 2.4
@@ -2346,7 +2354,7 @@
#
# Errors:
# - If @node is not a valid block device, DeviceNotFound
-# - If @name is not found, GenericError with an explanation
+# - If @name is not found, GenericError
#
# Since: 2.4
#
@@ -2367,7 +2375,7 @@
#
# Errors:
# - If @node is not a valid block device, DeviceNotFound
-# - If @name is not found, GenericError with an explanation
+# - If @name is not found, GenericError
#
# Since: 4.0
#
@@ -2388,7 +2396,7 @@
#
# Errors:
# - If @node is not a valid block device, DeviceNotFound
-# - If @name is not found, GenericError with an explanation
+# - If @name is not found, GenericError
#
# Since: 4.0
#
@@ -2462,7 +2470,6 @@
# Errors:
# - If @node is not a valid block device, DeviceNotFound
# - If @name is not found or if hashing has failed, GenericError
-# with an explanation
#
# Since: 2.10
##
@@ -2520,16 +2527,20 @@
# 'background' (Since: 3.0)
#
# @auto-finalize: When false, this job will wait in a PENDING state
-# after it has finished its work, waiting for @block-job-finalize
-# before making any block graph changes. When true, this job will
+# after it has finished its work, waiting for @job-finalize before
+# making any block graph changes. When true, this job will
# automatically perform its abort or commit actions. Defaults to
# true. (Since 3.1)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state
# after it has completely ceased all work, and awaits
-# @block-job-dismiss. When true, this job will automatically
-# disappear from the query list without user intervention.
-# Defaults to true. (Since 3.1)
+# @job-dismiss. When true, this job will automatically disappear
+# without user intervention. Defaults to true. (Since 3.1)
+#
+# @target-is-zero: Assume the destination reads as all zeroes before
+# the mirror started. Setting this to true can speed up the
+# mirror. Setting this to true when the destination is not
+# actually all zero can corrupt the destination. (Since 10.1)
#
# Since: 2.6
#
@@ -2550,7 +2561,8 @@
'*on-target-error': 'BlockdevOnError',
'*filter-node-name': 'str',
'*copy-mode': 'MirrorCopyMode',
- '*auto-finalize': 'bool', '*auto-dismiss': 'bool' },
+ '*auto-finalize': 'bool', '*auto-dismiss': 'bool',
+ '*target-is-zero': 'bool'},
'allow-preconfig': true }
##
@@ -2576,11 +2588,11 @@
#
# @bps_max: total throughput limit during bursts, in bytes (Since 1.7)
#
-# @bps_rd_max: read throughput limit during bursts, in bytes (Since
-# 1.7)
+# @bps_rd_max: read throughput limit during bursts, in bytes
+# (Since 1.7)
#
-# @bps_wr_max: write throughput limit during bursts, in bytes (Since
-# 1.7)
+# @bps_wr_max: write throughput limit during bursts, in bytes
+# (Since 1.7)
#
# @iops_max: total I/O operations per second during bursts, in bytes
# (Since 1.7)
@@ -2650,7 +2662,7 @@
# @iops-total-max: I/O operations burst
#
# @iops-total-max-length: length of the iops-total-max burst period,
-# in seconds It must only be set if @iops-total-max is set as
+# in seconds. It must only be set if @iops-total-max is set as
# well.
#
# @iops-read: limit read operations per second
@@ -2658,14 +2670,14 @@
# @iops-read-max: I/O operations read burst
#
# @iops-read-max-length: length of the iops-read-max burst period, in
-# seconds It must only be set if @iops-read-max is set as well.
+# seconds. It must only be set if @iops-read-max is set as well.
#
# @iops-write: limit write operations per second
#
# @iops-write-max: I/O operations write burst
#
# @iops-write-max-length: length of the iops-write-max burst period,
-# in seconds It must only be set if @iops-write-max is set as
+# in seconds. It must only be set if @iops-write-max is set as
# well.
#
# @bps-total: limit total bytes per second
@@ -2680,14 +2692,14 @@
# @bps-read-max: total bytes read burst
#
# @bps-read-max-length: length of the bps-read-max burst period, in
-# seconds It must only be set if @bps-read-max is set as well.
+# seconds. It must only be set if @bps-read-max is set as well.
#
# @bps-write: limit write bytes per second
#
# @bps-write-max: total bytes write burst
#
# @bps-write-max-length: length of the bps-write-max burst period, in
-# seconds It must only be set if @bps-write-max is set as well.
+# seconds. It must only be set if @bps-write-max is set as well.
#
# @iops-size: when limiting by iops max size of an I/O in bytes
#
@@ -2772,12 +2784,12 @@
# immediately once streaming has started. The status of ongoing block
# streaming operations can be checked with query-block-jobs. The
# operation can be stopped before it has completed using the
-# block-job-cancel command.
+# job-cancel or block-job-cancel command.
#
# The node that receives the data is called the top image, can be
# located in any part of the chain (but always above the base image;
# see below) and can be specified using its device or node name.
-# Earlier qemu versions only allowed 'device' to name the top level
+# Earlier QEMU versions only allowed 'device' to name the top level
# node; presence of the 'base-node' parameter during introspection can
# be used as a witness of the enhanced semantics of 'device'.
#
@@ -2832,7 +2844,7 @@
#
# @speed: the maximum speed, in bytes per second
#
-# @on-error: the action to take on an error (default report). 'stop'
+# @on-error: the action to take on an error (default report). 'stop'
# and 'enospc' can only be used if the block device supports
# io-status (see BlockInfo). (Since 1.3)
#
@@ -2842,16 +2854,15 @@
# autogenerated. (Since: 6.0)
#
# @auto-finalize: When false, this job will wait in a PENDING state
-# after it has finished its work, waiting for @block-job-finalize
-# before making any block graph changes. When true, this job will
+# after it has finished its work, waiting for @job-finalize before
+# making any block graph changes. When true, this job will
# automatically perform its abort or commit actions. Defaults to
# true. (Since 3.1)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state
# after it has completely ceased all work, and awaits
-# @block-job-dismiss. When true, this job will automatically
-# disappear from the query list without user intervention.
-# Defaults to true. (Since 3.1)
+# @job-dismiss. When true, this job will automatically disappear
+# without user intervention. Defaults to true. (Since 3.1)
#
# Errors:
# - If @device does not exist, DeviceNotFound.
@@ -2949,18 +2960,24 @@
#
# Pause an active background block operation.
#
-# This command returns immediately after marking the active background
-# block operation for pausing. It is an error to call this command if
-# no operation is in progress or if the job is already paused.
+# This command returns immediately after marking the active job for
+# pausing. Pausing an already paused job is an error.
+#
+# The job will pause as soon as possible, which means transitioning
+# into the PAUSED state if it was RUNNING, or into STANDBY if it was
+# READY. The corresponding JOB_STATUS_CHANGE event will be emitted.
#
-# The operation will pause as soon as possible. No event is emitted
-# when the operation is actually paused. Cancelling a paused job
-# automatically resumes it.
+# Cancelling a paused job automatically resumes it.
#
# @device: The job identifier. This used to be a device name (hence
# the name of the parameter), but since QEMU 2.7 it can have other
# values.
#
+# Features:
+#
+# @deprecated: This command is deprecated. Use @job-pause
+# instead.
+#
# Errors:
# - If no background operation is active on this device,
# DeviceNotActive
@@ -2968,6 +2985,7 @@
# Since: 1.3
##
{ 'command': 'block-job-pause', 'data': { 'device': 'str' },
+ 'features': ['deprecated'],
'allow-preconfig': true }
##
@@ -2975,9 +2993,8 @@
#
# Resume an active background block operation.
#
-# This command returns immediately after resuming a paused background
-# block operation. It is an error to call this command if no
-# operation is in progress or if the job is not paused.
+# This command returns immediately after resuming a paused job.
+# Resuming an already running job is an error.
#
# This command also clears the error status of the job.
#
@@ -2985,6 +3002,11 @@
# the name of the parameter), but since QEMU 2.7 it can have other
# values.
#
+# Features:
+#
+# @deprecated: This command is deprecated. Use @job-resume
+# instead.
+#
# Errors:
# - If no background operation is active on this device,
# DeviceNotActive
@@ -2992,15 +3014,21 @@
# Since: 1.3
##
{ 'command': 'block-job-resume', 'data': { 'device': 'str' },
+ 'features': ['deprecated'],
'allow-preconfig': true }
##
# @block-job-complete:
#
-# Manually trigger completion of an active background block operation.
-# This is supported for drive mirroring, where it also switches the
-# device to write to the target path only. The ability to complete is
-# signaled with a BLOCK_JOB_READY event.
+# Manually trigger completion of an active job in the READY or STANDBY
+# state. Completing the job in any other state is an error.
+#
+# This is supported only for drive mirroring, where it also switches
+# the device to write to the target path only. Note that drive
+# mirroring includes drive-mirror, blockdev-mirror and block-commit
+# job (only in case of "active commit", when the node being commited
+# is used by the guest). The ability to complete is signaled with a
+# BLOCK_JOB_READY event.
#
# This command completes an active background block operation
# synchronously. The ordering of this command's return with the
@@ -3010,12 +3038,15 @@
# rerror/werror arguments that were specified when starting the
# operation.
#
-# A cancelled or paused job cannot be completed.
-#
# @device: The job identifier. This used to be a device name (hence
# the name of the parameter), but since QEMU 2.7 it can have other
# values.
#
+# Features:
+#
+# @deprecated: This command is deprecated. Use @job-complete
+# instead.
+#
# Errors:
# - If no background operation is active on this device,
# DeviceNotActive
@@ -3023,43 +3054,64 @@
# Since: 1.3
##
{ 'command': 'block-job-complete', 'data': { 'device': 'str' },
+ 'features': ['deprecated'],
'allow-preconfig': true }
##
# @block-job-dismiss:
#
-# For jobs that have already concluded, remove them from the
-# block-job-query list. This command only needs to be run for jobs
-# which were started with QEMU 2.12+ job lifetime management
-# semantics.
+# Deletes a job that is in the CONCLUDED state. This command only
+# needs to be run explicitly for jobs that don't have automatic
+# dismiss enabled. In turn, automatic dismiss may be enabled only
+# for jobs that have @auto-dismiss option, which are drive-backup,
+# blockdev-backup, drive-mirror, blockdev-mirror, block-commit and
+# block-stream. @auto-dismiss is enabled by default for these
+# jobs.
#
# This command will refuse to operate on any job that has not yet
-# reached its terminal state, JOB_STATUS_CONCLUDED. For jobs that make
-# use of the BLOCK_JOB_READY event, block-job-cancel or
-# block-job-complete will still need to be used as appropriate.
+# reached its terminal state, CONCLUDED. For jobs that make use of
+# the BLOCK_JOB_READY event, job-cancel, block-job-cancel or
+# job-complete will still need to be used as appropriate.
#
# @id: The job identifier.
#
+# Features:
+#
+# @deprecated: This command is deprecated. Use @job-dismiss
+# instead.
+#
# Since: 2.12
##
{ 'command': 'block-job-dismiss', 'data': { 'id': 'str' },
+ 'features': ['deprecated'],
'allow-preconfig': true }
##
# @block-job-finalize:
#
-# Once a job that has manual=true reaches the pending state, it can be
-# instructed to finalize any graph changes and do any necessary
-# cleanup via this command. For jobs in a transaction, instructing
-# one job to finalize will force ALL jobs in the transaction to
-# finalize, so it is only necessary to instruct a single member job to
-# finalize.
+# Instructs all jobs in a transaction (or a single job if it is not
+# part of any transaction) to finalize any graph changes and do any
+# necessary cleanup. This command requires that all involved jobs are
+# in the PENDING state.
+#
+# For jobs in a transaction, instructing one job to finalize will
+# force ALL jobs in the transaction to finalize, so it is only
+# necessary to instruct a single member job to finalize.
+#
+# The command is applicable only to jobs which have @auto-finalize option
+# and only when this option is set to false.
#
# @id: The job identifier.
#
+# Features:
+#
+# @deprecated: This command is deprecated. Use @job-finalize
+# instead.
+#
# Since: 2.12
##
{ 'command': 'block-job-finalize', 'data': { 'id': 'str' },
+ 'features': ['deprecated'],
'allow-preconfig': true }
##
@@ -3138,7 +3190,7 @@
#
# Selects the AIO backend to handle I/O requests
#
-# @threads: Use qemu's thread pool
+# @threads: Use QEMU's thread pool
#
# @native: Use native AIO backend (only Linux and Windows)
#
@@ -3187,12 +3239,18 @@
#
# @snapshot-access: Since 7.0
#
+# Features:
+#
+# @deprecated: Member @gluster is deprecated because GlusterFS
+# development ceased.
+#
# Since: 2.9
##
{ 'enum': 'BlockdevDriver',
'data': [ 'blkdebug', 'blklogwrites', 'blkreplay', 'blkverify', 'bochs',
'cloop', 'compress', 'copy-before-write', 'copy-on-read', 'dmg',
- 'file', 'snapshot-access', 'ftp', 'ftps', 'gluster',
+ 'file', 'snapshot-access', 'ftp', 'ftps',
+ {'name': 'gluster', 'features': [ 'deprecated' ] },
{'name': 'host_cdrom', 'if': 'HAVE_HOST_BLOCK_DEVICE' },
{'name': 'host_device', 'if': 'HAVE_HOST_BLOCK_DEVICE' },
'http', 'https',
@@ -3351,8 +3409,8 @@
# Driver specific block device options for LUKS.
#
# @key-secret: the ID of a QCryptoSecret object providing the
-# decryption key (since 2.6). Mandatory except when doing a
-# metadata-only probe of the image.
+# decryption key. Mandatory except when doing a metadata-only
+# probe of the image. (since 2.6)
#
# @header: block device holding a detached LUKS header. (since 9.0)
#
@@ -3591,8 +3649,8 @@
# this feature. (since 2.5)
#
# @encrypt: Image decryption options. Mandatory for encrypted images,
-# except when doing a metadata-only probe of the image. (since
-# 2.10)
+# except when doing a metadata-only probe of the image.
+# (since 2.10)
#
# @data-file: reference to or definition of the external data file.
# This may only be specified for images that require an external
@@ -3746,7 +3804,7 @@
#
# Since: 4.1
##
-{ 'enum': 'BlkdebugIOType', 'prefix': 'BLKDEBUG_IO_TYPE',
+{ 'enum': 'BlkdebugIOType',
'data': [ 'read', 'write', 'write-zeroes', 'discard', 'flush',
'block-status' ] }
@@ -4050,6 +4108,7 @@
# @path: path to the vhost-vdpa character device.
#
# Features:
+#
# @fdset: Member @path supports the special "/dev/fdset/N" path
# (since 8.1)
#
@@ -4162,7 +4221,7 @@
##
{ 'struct': 'RbdEncryptionCreateOptionsLUKSBase',
'base': 'RbdEncryptionOptionsLUKSBase',
- 'data': { '*cipher-alg': 'QCryptoCipherAlgorithm' } }
+ 'data': { '*cipher-alg': 'QCryptoCipherAlgo' } }
##
# @RbdEncryptionOptionsLUKS:
@@ -4261,8 +4320,8 @@
# @user: Ceph id name.
#
# @auth-client-required: Acceptable authentication modes. This maps
-# to Ceph configuration option "auth_client_required". (Since
-# 3.0)
+# to Ceph configuration option "auth_client_required".
+# (Since 3.0)
#
# @key-secret: ID of a QCryptoSecret object providing a key for cephx
# authentication. This maps to Ceph configuration option "key".
@@ -4427,7 +4486,7 @@
# curl backend. URLs must start with "http://".
#
# @cookie: List of cookies to set; format is "name1=content1;
-# name2=content2;" as explained by CURLOPT_COOKIE(3). Defaults to
+# name2=content2;" as explained by CURLOPT_COOKIE(3). Defaults to
# no cookies.
#
# @cookie-secret: ID of a QCryptoSecret object providing the cookie
@@ -4447,7 +4506,7 @@
# curl backend. URLs must start with "https://".
#
# @cookie: List of cookies to set; format is "name1=content1;
-# name2=content2;" as explained by CURLOPT_COOKIE(3). Defaults to
+# name2=content2;" as explained by CURLOPT_COOKIE(3). Defaults to
# no cookies.
#
# @sslverify: Whether to verify the SSL certificate's validity
@@ -4516,8 +4575,8 @@
# error. During the first @reconnect-delay seconds, all requests
# are paused and will be rerun on a successful reconnect. After
# that time, any delayed requests and all future requests before a
-# successful reconnect will immediately fail. Default 0 (Since
-# 4.2)
+# successful reconnect will immediately fail. Default 0
+# (Since 4.2)
#
# @open-timeout: In seconds. If zero, the nbd driver tries the
# connection only once, and fails to open if the connection fails.
@@ -4638,12 +4697,18 @@
# @on-cbw-error parameter will decide how this failure is handled.
# Default 0. (Since 7.1)
#
+# @min-cluster-size: Minimum size of blocks used by copy-before-write
+# operations. Has to be a power of 2. No effect if smaller than
+# the maximum of the target's cluster size and 64 KiB. Default 0.
+# (Since 9.2)
+#
# Since: 6.2
##
{ 'struct': 'BlockdevOptionsCbw',
'base': 'BlockdevOptionsGenericFormat',
'data': { 'target': 'BlockdevRef', '*bitmap': 'BlockDirtyBitmap',
- '*on-cbw-error': 'OnCbwError', '*cbw-timeout': 'uint32' } }
+ '*on-cbw-error': 'OnCbwError', '*cbw-timeout': 'uint32',
+ '*min-cluster-size': 'size' } }
##
# @BlockdevOptions:
@@ -4653,18 +4718,23 @@
#
# @driver: block driver name
#
-# @node-name: the node name of the new node (Since 2.0). This option
-# is required on the top level of blockdev-add. Valid node names
-# start with an alphabetic character and may contain only
-# alphanumeric characters, '-', '.' and '_'. Their maximum length
-# is 31 characters.
+# @node-name: the node name of the new node. This option is required
+# on the top level of blockdev-add. Valid node names start with
+# an alphabetic character and may contain only alphanumeric
+# characters, '-', '.' and '_'. Their maximum length is 31
+# characters. (Since 2.0)
#
# @discard: discard-related options (default: ignore)
#
# @cache: cache-related options
#
+# @active: whether the block node should be activated (default: true).
+# Having inactive block nodes is useful primarily for migration because it
+# allows opening an image on the destination while the source is still
+# holding locks for it. (Since 10.0)
+#
# @read-only: whether the block device should be read-only (default:
-# false). Note that some block drivers support only read-only
+# false). Note that some block drivers support only read-only
# access, either generally or in certain configurations. In this
# case, the default value does not work and the option must be
# specified explicitly.
@@ -4689,6 +4759,7 @@
'*node-name': 'str',
'*discard': 'BlockdevDiscardOptions',
'*cache': 'BlockdevCacheOptions',
+ '*active': 'bool',
'*read-only': 'bool',
'*auto-read-only': 'bool',
'*force-share': 'bool',
@@ -4870,7 +4941,7 @@
# 3) A reference to a different node: the current child is replaced
# with the specified one.
#
-# 4) NULL: the current child (if any) is detached.
+# 4) null: the current child (if any) is detached.
#
# Options (1) and (2) are supported in all cases. Option (3) is
# supported for @file and @backing, and option (4) for @backing only.
@@ -4920,6 +4991,38 @@
'allow-preconfig': true }
##
+# @blockdev-set-active:
+#
+# Activate or inactivate a block device. Use this to manage the handover of
+# block devices on migration with qemu-storage-daemon.
+#
+# Activating a node automatically activates all of its child nodes first.
+# Inactivating a node automatically inactivates any of its child nodes that are
+# not in use by a still active node.
+#
+# @node-name: Name of the graph node to activate or inactivate. By default, all
+# nodes are affected by the operation.
+#
+# @active: true if the nodes should be active when the command returns success,
+# false if they should be inactive.
+#
+# Since: 10.0
+#
+# .. qmp-example::
+#
+# -> { "execute": "blockdev-set-active",
+# "arguments": {
+# "node-name": "node0",
+# "active": false
+# }
+# }
+# <- { "return": {} }
+##
+{ 'command': 'blockdev-set-active',
+ 'data': { '*node-name': 'str', 'active': 'bool' },
+ 'allow-preconfig': true }
+
+##
# @BlockdevCreateOptionsFile:
#
# Driver specific image creation options for file.
@@ -5048,10 +5151,10 @@
##
# @BlockdevQcow2Version:
#
-# @v2: The original QCOW2 format as introduced in qemu 0.10 (version
+# @v2: The original QCOW2 format as introduced in QEMU 0.10 (version
# 2)
#
-# @v3: The extended QCOW2 format as introduced in qemu 1.1 (version 3)
+# @v3: The extended QCOW2 format as introduced in QEMU 1.1 (version 3)
#
# Since: 2.12
##
@@ -5231,8 +5334,8 @@
# monolithcFlat, twoGbMaxExtentSparse and twoGbMaxExtentFlat
# formats. For monolithicFlat, only one entry is required; for
# twoGbMaxExtent* formats, the number of entries required is
-# calculated as extent_number = virtual_size / 2GB. Providing more
-# extents than will be used is an error.
+# calculated as extent_number = virtual_size / 2GB. Providing
+# more extents than will be used is an error.
#
# @subformat: The subformat of the VMDK image. Default:
# "monolithicSparse".
@@ -5244,7 +5347,7 @@
# Default: ide.
#
# @hwversion: Hardware version. The meaningful options are "4" or
-# "6". Default: "4".
+# "6". Default: "4".
#
# @toolsversion: VMware guest tools version. Default: "2147483647"
# (Since 6.2)
@@ -5440,7 +5543,7 @@
##
# @BlockdevAmendOptionsQcow2:
#
-# Driver specific image amend options for qcow2. For now, only
+# Driver specific image amend options for qcow2. For now, only
# encryption options can be amended
#
# @encrypt: Encryption options to be amended
@@ -5471,7 +5574,7 @@
# @x-blockdev-amend:
#
# Starts a job to amend format specific options of an existing open
-# block device The job is automatically finalized, but a manual
+# block device. The job is automatically finalized, but a manual
# job-dismiss is required.
#
# @job-id: Identifier for the newly created job.
@@ -5480,7 +5583,7 @@
#
# @options: Options (driver specific)
#
-# @force: Allow unsafe operations, format specific For luks that
+# @force: Allow unsafe operations, format specific. For luks that
# allows erase of the last active keyslot (permanent loss of
# data), and replacement of an active keyslot (possible loss of
# data if IO error happens)
@@ -5543,8 +5646,8 @@
# after this event and must be repaired (Since 2.2; before, every
# BLOCK_IMAGE_CORRUPTED event was fatal)
#
-# .. note:: If action is "stop", a STOP event will eventually follow the
-# BLOCK_IO_ERROR event.
+# .. note:: If action is "stop", a STOP event will eventually follow
+# the BLOCK_IO_ERROR event.
#
# .. qmp-example::
#
@@ -5568,6 +5671,8 @@
#
# Emitted when a disk I/O error occurs
#
+# @qom-path: path to the device object in the QOM tree (since 9.2)
+#
# @device: device name. This is always present for compatibility
# reasons, but it can be empty ("") if the image does not have a
# device name associated.
@@ -5590,15 +5695,18 @@
# field is a debugging aid for humans, it should not be parsed by
# applications) (since: 2.2)
#
-# .. note:: If action is "stop", a STOP event will eventually follow the
-# BLOCK_IO_ERROR event.
+# .. note:: If action is "stop", a STOP event will eventually follow
+# the BLOCK_IO_ERROR event.
+#
+# .. note:: This event is rate-limited.
#
# Since: 0.13
#
# .. qmp-example::
#
# <- { "event": "BLOCK_IO_ERROR",
-# "data": { "device": "ide0-hd1",
+# "data": { "qom-path": "/machine/unattached/device[0]",
+# "device": "ide0-hd1",
# "node-name": "#block212",
# "operation": "write",
# "action": "stop",
@@ -5606,7 +5714,7 @@
# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
##
{ 'event': 'BLOCK_IO_ERROR',
- 'data': { 'device': 'str', '*node-name': 'str',
+ 'data': { 'qom-path': 'str', 'device': 'str', '*node-name': 'str',
'operation': 'IoOperationType',
'action': 'BlockErrorAction', '*nospace': 'bool',
'reason': 'str' } }
@@ -5752,7 +5860,7 @@
# @BLOCK_JOB_PENDING:
#
# Emitted when a block job is awaiting explicit authorization to
-# finalize graph changes via @block-job-finalize. If this job is part
+# finalize graph changes via @job-finalize. If this job is part
# of a transaction, it will not emit this event until the transaction
# has converged first.
#
@@ -5850,35 +5958,31 @@
##
# @x-blockdev-change:
#
-# Dynamically reconfigure the block driver state graph. It can be
-# used to add, remove, insert or replace a graph node. Currently only
-# the Quorum driver implements this feature to add or remove its
-# child. This is useful to fix a broken quorum child.
+# Dynamically reconfigure the block driver state graph.
#
-# If @node is specified, it will be inserted under @parent. @child
-# may not be specified in this case. If both @parent and @child are
-# specified but @node is not, @child will be detached from @parent.
+# Currently only supports adding and deleting quorum children. A
+# child will be added at the end of the list of children. Its
+# contents *must* be consistent with the other childrens' contents.
+# Deleting a child that is not last in the list of children is
+# problematic, because it "renumbers" the children following it.
#
# @parent: the id or name of the parent node.
#
-# @child: the name of a child under the given parent node.
+# @child: the name of a child to be deleted. Mutually exclusive with
+# @node.
#
-# @node: the name of the node that will be added.
+# @node: the name of the node to be added. Mutually exclusive with
+# @child.
#
# Features:
#
-# @unstable: This command is experimental, and its API is not stable.
-# It does not support all kinds of operations, all kinds of
-# children, nor all block drivers.
+# @unstable: This command is experimental.
#
-# FIXME Removing children from a quorum node means introducing
+# TODO: Removing children from a quorum node means introducing
# gaps in the child indices. This cannot be represented in the
# 'children' list of BlockdevOptionsQuorum, as returned by
# .bdrv_refresh_filename().
#
-# Warning: The data in a new quorum child MUST be consistent with
-# that of the rest of the array.
-#
# Since: 2.7
#
# .. qmp-example::
@@ -6046,10 +6150,6 @@
#
# @name: the name of the internal snapshot to be created
#
-# .. note:: In a transaction, if @name is empty or any snapshot matching
-# @name exists, the operation will fail. Only some image formats
-# support it; for example, qcow2, and rbd.
-#
# Since: 1.7
##
{ 'struct': 'BlockdevSnapshotInternal',
@@ -6070,6 +6170,9 @@
# - If the format of the image used does not support it,
# GenericError
#
+# .. note:: Only some image formats such as qcow2 and rbd support
+# internal snapshots.
+#
# Since: 1.7
#
# .. qmp-example::
diff --git a/qapi/block-export.json b/qapi/block-export.json
index 3919a2d..ed4deb5 100644
--- a/qapi/block-export.json
+++ b/qapi/block-export.json
@@ -9,13 +9,11 @@
{ 'include': 'block-core.json' }
##
-# @NbdServerOptions:
-#
-# Keep this type consistent with the nbd-server-start arguments. The
-# only intended difference is using SocketAddress instead of
-# SocketAddressLegacy.
+# @NbdServerOptionsBase:
#
-# @addr: Address on which to listen.
+# @handshake-max-seconds: Time limit, in seconds, at which a client
+# that has not completed the negotiation handshake will be
+# disconnected, or 0 for no limit (since 10.0; default: 10).
#
# @tls-creds: ID of the TLS credentials object (since 2.6).
#
@@ -28,42 +26,47 @@
# @max-connections: The maximum number of connections to allow at the
# same time, 0 for unlimited. Setting this to 1 also stops the
# server from advertising multiple client support (since 5.2;
-# default: 0)
-#
-# Since: 4.2
+# default: 100).
##
-{ 'struct': 'NbdServerOptions',
- 'data': { 'addr': 'SocketAddress',
+{ 'struct': 'NbdServerOptionsBase',
+ 'data': { '*handshake-max-seconds': 'uint32',
'*tls-creds': 'str',
'*tls-authz': 'str',
'*max-connections': 'uint32' } }
##
-# @nbd-server-start:
+# @NbdServerOptions:
#
-# Start an NBD server listening on the given host and port. Block
-# devices can then be exported using @nbd-server-add. The NBD server
-# will present them as named exports; for example, another QEMU
-# instance could refer to them as "nbd:HOST:PORT:exportname=NAME".
+# Keep this type consistent with the NbdServerOptionsLegacy type. The
+# only intended difference is using SocketAddress instead of
+# SocketAddressLegacy.
+#
+# @addr: Address on which to listen (since 4.2).
+##
+{ 'struct': 'NbdServerOptions',
+ 'base': 'NbdServerOptionsBase',
+ 'data': { 'addr': 'SocketAddress' } }
+
+##
+# @NbdServerOptionsLegacy:
#
# Keep this type consistent with the NbdServerOptions type. The only
# intended difference is using SocketAddressLegacy instead of
# SocketAddress.
#
-# @addr: Address on which to listen.
-#
-# @tls-creds: ID of the TLS credentials object (since 2.6).
-#
-# @tls-authz: ID of the QAuthZ authorization object used to validate
-# the client's x509 distinguished name. This object is is only
-# resolved at time of use, so can be deleted and recreated on the
-# fly while the NBD server is active. If missing, it will default
-# to denying access (since 4.0).
+# @addr: Address on which to listen (since 1.3).
+##
+{ 'struct': 'NbdServerOptionsLegacy',
+ 'base': 'NbdServerOptionsBase',
+ 'data': { 'addr': 'SocketAddressLegacy' } }
+
+##
+# @nbd-server-start:
#
-# @max-connections: The maximum number of connections to allow at the
-# same time, 0 for unlimited. Setting this to 1 also stops the
-# server from advertising multiple client support (since 5.2;
-# default: 0).
+# Start an NBD server listening on the given host and port. Block
+# devices can then be exported using @nbd-server-add. The NBD server
+# will present them as named exports; for example, another QEMU
+# instance could refer to them as "nbd:HOST:PORT:exportname=NAME".
#
# Errors:
# - if the server is already running
@@ -71,10 +74,7 @@
# Since: 1.3
##
{ 'command': 'nbd-server-start',
- 'data': { 'addr': 'SocketAddressLegacy',
- '*tls-creds': 'str',
- '*tls-authz': 'str',
- '*max-connections': 'uint32' },
+ 'data': 'NbdServerOptionsLegacy',
'allow-preconfig': true }
##
@@ -163,16 +163,16 @@
# Options for exporting a block graph node on some (file) mountpoint
# as a raw image.
#
-# @mountpoint: Path on which to export the block device via FUSE. This
-# must point to an existing regular file.
+# @mountpoint: Path on which to export the block device via FUSE.
+# This must point to an existing regular file.
#
# @growable: Whether writes beyond the EOF should grow the block node
# accordingly. (default: false)
#
-# @allow-other: If this is off, only qemu's user is allowed access to
+# @allow-other: If this is off, only QEMU's user is allowed access to
# this export. That cannot be changed even with chmod or chown.
# Enabling this option will allow other users access to the export
-# with the FUSE mount option "allow_other". Note that using
+# with the FUSE mount option "allow_other". Note that using
# allow_other as a non-root user requires user_allow_other to be
# enabled in the global fuse.conf configuration file. In auto
# mode (the default), the FUSE export driver will first attempt to
@@ -199,7 +199,7 @@
# @queue-size: the size of virtqueue. Defaults to 256.
#
# @logical-block-size: Logical block size in bytes. Range [512,
-# PAGE_SIZE] and must be power of 2. Defaults to 512 bytes.
+# PAGE_SIZE] and must be power of 2. Defaults to 512 bytes.
#
# @serial: the serial number of virtio block device. Defaults to
# empty string.
@@ -372,6 +372,13 @@
# cannot be moved to the iothread. The default is false.
# (since: 5.2)
#
+# @allow-inactive: If true, the export allows the exported node to be inactive.
+# If it is created for an inactive block node, the node remains inactive. If
+# the export type doesn't support running on an inactive node, an error is
+# returned. If false, inactive block nodes are automatically activated before
+# creating the export and trying to inactivate them later fails.
+# (since: 10.0; default: false)
+#
# Since: 4.2
##
{ 'union': 'BlockExportOptions',
@@ -381,7 +388,8 @@
'*iothread': 'str',
'node-name': 'str',
'*writable': 'bool',
- '*writethrough': 'bool' },
+ '*writethrough': 'bool',
+ '*allow-inactive': 'bool' },
'discriminator': 'type',
'data': {
'nbd': 'BlockExportOptionsNbd',
diff --git a/qapi/block.json b/qapi/block.json
index ce9490a..1490a1a 100644
--- a/qapi/block.json
+++ b/qapi/block.json
@@ -48,7 +48,7 @@
##
# @FloppyDriveType:
#
-# Type of Floppy drive to be emulated by the Floppy Disk Controller.
+# Type of floppy drive to be emulated by the Floppy Disk Controller.
#
# @144: 1.44MB 3.5" drive
#
@@ -83,7 +83,7 @@
##
# @query-pr-managers:
#
-# Returns a list of information about each persistent reservation
+# Return a list of information about each persistent reservation
# manager.
#
# Returns: a list of @PRManagerInfo for each persistent reservation
@@ -454,8 +454,8 @@
# different group. In this case the limits specified in the
# parameters will be applied to the new group only.
#
-# I/O limits can be disabled by setting all of them to 0. In this case
-# the device will be removed from its group and the rest of its
+# I/O limits can be disabled by setting all of them to 0. In this
+# case the device will be removed from its group and the rest of its
# members will not be affected. The 'group' parameter is ignored.
#
# Errors:
@@ -519,10 +519,10 @@
# @id: The name or QOM path of the guest device.
#
# @boundaries: list of interval boundary values (see description in
-# BlockLatencyHistogramInfo definition). If specified, all latency
-# histograms are removed, and empty ones created for all io types
-# with intervals corresponding to @boundaries (except for io
-# types, for which specific boundaries are set through the
+# BlockLatencyHistogramInfo definition). If specified, all
+# latency histograms are removed, and empty ones created for all
+# io types with intervals corresponding to @boundaries (except for
+# io types, for which specific boundaries are set through the
# following parameters).
#
# @boundaries-read: list of interval boundary values for read latency
diff --git a/qapi/char.json b/qapi/char.json
index 5e4aeb9..df6e325 100644
--- a/qapi/char.json
+++ b/qapi/char.json
@@ -34,7 +34,7 @@
##
# @query-chardev:
#
-# Returns information about current character devices.
+# Return information about current character devices.
#
# Returns: a list of @ChardevInfo
#
@@ -80,7 +80,7 @@
##
# @query-chardev-backends:
#
-# Returns information about character device backends.
+# Return information about character device backends.
#
# Returns: a list of @ChardevBackendInfo
#
@@ -258,7 +258,7 @@
# @server: create server socket (default: true)
#
# @wait: wait for incoming connection on server sockets (default:
-# false). Silently ignored with server: false. This use is
+# false). Silently ignored with server: false. This use is
# deprecated.
#
# @nodelay: set TCP_NODELAY socket option (default: false)
@@ -273,7 +273,19 @@
#
# @reconnect: For a client socket, if a socket is disconnected, then
# attempt a reconnect after the given number of seconds. Setting
-# this to zero disables this function. (default: 0) (Since: 2.2)
+# this to zero disables this function. The use of this member is
+# deprecated, use @reconnect-ms instead. (default: 0) (Since: 2.2)
+#
+# @reconnect-ms: For a client socket, if a socket is disconnected,
+# then attempt a reconnect after the given number of milliseconds.
+# Setting this to zero disables this function. This member is
+# mutually exclusive with @reconnect.
+# (default: 0) (Since: 9.2)
+#
+# Features:
+#
+# @deprecated: Member @reconnect is deprecated. Use @reconnect-ms
+# instead.
#
# Since: 1.4
##
@@ -287,7 +299,8 @@
'*telnet': 'bool',
'*tn3270': 'bool',
'*websocket': 'bool',
- '*reconnect': 'int' },
+ '*reconnect': { 'type': 'int', 'features': [ 'deprecated' ] },
+ '*reconnect-ms': 'int' },
'base': 'ChardevCommon' }
##
@@ -320,12 +333,25 @@
'base': 'ChardevCommon' }
##
+# @ChardevHub:
+#
+# Configuration info for hub chardevs.
+#
+# @chardevs: IDs to be added to this hub (maximum 4 devices).
+#
+# Since: 10.0
+##
+{ 'struct': 'ChardevHub',
+ 'data': { 'chardevs': ['str'] },
+ 'base': 'ChardevCommon' }
+
+##
# @ChardevStdio:
#
# Configuration info for stdio chardevs.
#
# @signal: Allow signals (such as SIGINT triggered by ^C) be delivered
-# to qemu. Default: true.
+# to QEMU. Default: true.
#
# Since: 1.5
##
@@ -388,9 +414,9 @@
#
# @rows: console height, in chars
#
-# .. note:: The options are only effective when the VNC or SDL graphical
-# display backend is active. They are ignored with the GTK, Spice,
-# VNC and D-Bus display backends.
+# .. note:: The options are only effective when the VNC or SDL
+# graphical display backend is active. They are ignored with the
+# GTK, Spice, VNC and D-Bus display backends.
#
# Since: 1.5
##
@@ -417,7 +443,7 @@
##
# @ChardevQemuVDAgent:
#
-# Configuration info for qemu vdagent implementation.
+# Configuration info for QEMU vdagent implementation.
#
# @mouse: enable/disable mouse, default is enabled.
#
@@ -432,39 +458,67 @@
'if': 'CONFIG_SPICE_PROTOCOL' }
##
+# @ChardevPty:
+#
+# Configuration info for pty implementation.
+#
+# @path: optional path to create a symbolic link that points to the
+# allocated PTY
+#
+# Since: 9.2
+##
+{ 'struct': 'ChardevPty',
+ 'data': { '*path': 'str' },
+ 'base': 'ChardevCommon' }
+
+##
# @ChardevBackendKind:
#
-# @pipe: Since 1.5
+# @file: regular files
#
-# @udp: Since 1.5
+# @serial: serial host device
#
-# @mux: Since 1.5
+# @parallel: parallel host device
#
-# @msmouse: Since 1.5
+# @pipe: pipes (since 1.5)
#
-# @wctablet: Since 2.9
+# @socket: stream socket
#
-# @braille: Since 1.5
+# @udp: datagram socket (since 1.5)
#
-# @testdev: Since 2.2
+# @pty: pseudo-terminal
#
-# @stdio: Since 1.5
+# @null: provides no input, throws away output
#
-# @console: Since 1.5
+# @mux: (since 1.5)
#
-# @spicevmc: Since 1.5
+# @hub: (since 10.0)
#
-# @spiceport: Since 1.5
+# @msmouse: emulated Microsoft serial mouse (since 1.5)
#
-# @qemu-vdagent: Since 6.1
+# @wctablet: emulated Wacom Penpartner serial tablet (since 2.9)
#
-# @dbus: Since 7.0
+# @braille: Baum Braille device (since 1.5)
#
-# @vc: v1.5
+# @testdev: device for test-suite control (since 2.2)
#
-# @ringbuf: Since 1.6
+# @stdio: standard I/O (since 1.5)
#
-# @memory: Since 1.5
+# @console: Windows console (since 1.5)
+#
+# @spicevmc: spice vm channel (since 1.5)
+#
+# @spiceport: Spice port channel (since 1.5)
+#
+# @qemu-vdagent: Spice vdagent (since 6.1)
+#
+# @dbus: D-Bus channel (since 7.0)
+#
+# @vc: virtual console (since 1.5)
+#
+# @ringbuf: memory ring buffer (since 1.6)
+#
+# @memory: synonym for @ringbuf (since 1.5)
#
# Features:
#
@@ -482,6 +536,7 @@
'pty',
'null',
'mux',
+ 'hub',
'msmouse',
'wctablet',
{ 'name': 'braille', 'if': 'CONFIG_BRLAPI' },
@@ -557,6 +612,16 @@
'data': { 'data': 'ChardevMux' } }
##
+# @ChardevHubWrapper:
+#
+# @data: Configuration info for hub chardevs
+#
+# Since: 10.0
+##
+{ 'struct': 'ChardevHubWrapper',
+ 'data': { 'data': 'ChardevHub' } }
+
+##
# @ChardevStdioWrapper:
#
# @data: Configuration info for stdio chardevs
@@ -591,7 +656,7 @@
##
# @ChardevQemuVDAgentWrapper:
#
-# @data: Configuration info for qemu vdagent implementation
+# @data: Configuration info for QEMU vdagent implementation
#
# Since: 6.1
##
@@ -630,6 +695,17 @@
{ 'struct': 'ChardevRingbufWrapper',
'data': { 'data': 'ChardevRingbuf' } }
+
+##
+# @ChardevPtyWrapper:
+#
+# @data: Configuration info for pty chardevs
+#
+# Since: 9.2
+##
+{ 'struct': 'ChardevPtyWrapper',
+ 'data': { 'data': 'ChardevPty' } }
+
##
# @ChardevBackend:
#
@@ -650,9 +726,10 @@
'pipe': 'ChardevHostdevWrapper',
'socket': 'ChardevSocketWrapper',
'udp': 'ChardevUdpWrapper',
- 'pty': 'ChardevCommonWrapper',
+ 'pty': 'ChardevPtyWrapper',
'null': 'ChardevCommonWrapper',
'mux': 'ChardevMuxWrapper',
+ 'hub': 'ChardevHubWrapper',
'msmouse': 'ChardevCommonWrapper',
'wctablet': 'ChardevCommonWrapper',
'braille': { 'type': 'ChardevCommonWrapper',
diff --git a/qapi/common.json b/qapi/common.json
index 7558ce5..0e3a0bb 100644
--- a/qapi/common.json
+++ b/qapi/common.json
@@ -183,7 +183,19 @@
##
# @GrabToggleKeys:
#
-# Keys to toggle input-linux between host and guest.
+# Key combinations to toggle input-linux between host and guest.
+#
+# @ctrl-ctrl: left and right control key
+#
+# @alt-alt: left and right alt key
+#
+# @shift-shift: left and right shift key
+#
+# @meta-meta: left and right meta key
+#
+# @scrolllock: scroll lock key
+#
+# @ctrl-scrolllock: either control key and scroll lock key
#
# Since: 4.0
##
@@ -200,3 +212,17 @@
##
{ 'struct': 'HumanReadableText',
'data': { 'human-readable-text': 'str' } }
+
+##
+# @EndianMode:
+#
+# @unspecified: Endianness not specified
+#
+# @little: Little endianness
+#
+# @big: Big endianness
+#
+# Since: 10.0
+##
+{ 'enum': 'EndianMode',
+ 'data': [ 'unspecified', 'little', 'big' ] }
diff --git a/qapi/control.json b/qapi/control.json
index 950443d..34b733f 100644
--- a/qapi/control.json
+++ b/qapi/control.json
@@ -22,13 +22,14 @@
# "arguments": { "enable": [ "oob" ] } }
# <- { "return": {} }
#
-# .. note:: This command is valid exactly when first connecting: it must
-# be issued before any other command will be accepted, and will fail
-# once the monitor is accepting other commands.
-# (see :doc:`/interop/qmp-spec`)
+# .. note:: This command is valid exactly when first connecting: it
+# must be issued before any other command will be accepted, and
+# will fail once the monitor is accepting other commands. (see
+# :doc:`/interop/qmp-spec`)
#
-# .. note:: The QMP client needs to explicitly enable QMP capabilities,
-# otherwise all the QMP capabilities will be turned off by default.
+# .. note:: The QMP client needs to explicitly enable QMP
+# capabilities, otherwise all the QMP capabilities will be turned
+# off by default.
#
# Since: 0.13
##
@@ -90,7 +91,7 @@
##
# @query-version:
#
-# Returns the current version of QEMU.
+# Return the current version of QEMU.
#
# Returns: A @VersionInfo object describing the current version of
# QEMU.
@@ -150,7 +151,6 @@
# }
#
# This example has been shortened as the real response is too long.
-#
##
{ 'command': 'query-commands', 'returns': ['CommandInfo'],
'allow-preconfig': true }
diff --git a/qapi/crypto.json b/qapi/crypto.json
index e102be3..9ec6301 100644
--- a/qapi/crypto.json
+++ b/qapi/crypto.json
@@ -20,7 +20,6 @@
# Since: 2.5
##
{ 'enum': 'QCryptoTLSCredsEndpoint',
- 'prefix': 'QCRYPTO_TLS_CREDS_ENDPOINT',
'data': ['client', 'server']}
##
@@ -36,21 +35,20 @@
# Since: 2.6
##
{ 'enum': 'QCryptoSecretFormat',
- 'prefix': 'QCRYPTO_SECRET_FORMAT',
'data': ['raw', 'base64']}
##
-# @QCryptoHashAlgorithm:
+# @QCryptoHashAlgo:
#
# The supported algorithms for computing content digests
#
-# @md5: MD5. Should not be used in any new code, legacy compat only
+# @md5: MD5. Should not be used in any new code, legacy compat only
#
-# @sha1: SHA-1. Should not be used in any new code, legacy compat only
+# @sha1: SHA-1. Should not be used in any new code, legacy compat only
#
# @sha224: SHA-224. (since 2.7)
#
-# @sha256: SHA-256. Current recommended strong hash.
+# @sha256: SHA-256. Current recommended strong hash.
#
# @sha384: SHA-384. (since 2.7)
#
@@ -58,14 +56,15 @@
#
# @ripemd160: RIPEMD-160. (since 2.7)
#
+# @sm3: SM3. (since 9.2.0)
+#
# Since: 2.6
##
-{ 'enum': 'QCryptoHashAlgorithm',
- 'prefix': 'QCRYPTO_HASH_ALG',
- 'data': ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', 'ripemd160']}
+{ 'enum': 'QCryptoHashAlgo',
+ 'data': ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', 'ripemd160', 'sm3']}
##
-# @QCryptoCipherAlgorithm:
+# @QCryptoCipherAlgo:
#
# The supported algorithms for content encryption ciphers
#
@@ -98,8 +97,7 @@
#
# Since: 2.6
##
-{ 'enum': 'QCryptoCipherAlgorithm',
- 'prefix': 'QCRYPTO_CIPHER_ALG',
+{ 'enum': 'QCryptoCipherAlgo',
'data': ['aes-128', 'aes-192', 'aes-256',
'des', '3des',
'cast5-128',
@@ -123,11 +121,10 @@
# Since: 2.6
##
{ 'enum': 'QCryptoCipherMode',
- 'prefix': 'QCRYPTO_CIPHER_MODE',
'data': ['ecb', 'cbc', 'xts', 'ctr']}
##
-# @QCryptoIVGenAlgorithm:
+# @QCryptoIVGenAlgo:
#
# The supported algorithms for generating initialization vectors for
# full disk encryption. The 'plain' generator should not be used for
@@ -143,8 +140,7 @@
#
# Since: 2.6
##
-{ 'enum': 'QCryptoIVGenAlgorithm',
- 'prefix': 'QCRYPTO_IVGEN_ALG',
+{ 'enum': 'QCryptoIVGenAlgo',
'data': ['plain', 'plain64', 'essiv']}
##
@@ -160,7 +156,6 @@
# Since: 2.6
##
{ 'enum': 'QCryptoBlockFormat',
-# 'prefix': 'QCRYPTO_BLOCK_FORMAT',
'data': ['qcow', 'luks']}
##
@@ -208,37 +203,34 @@
#
# The options that apply to LUKS encryption format initialization
#
-# @cipher-alg: the cipher algorithm for data encryption Currently
+# @cipher-alg: the cipher algorithm for data encryption. Currently
# defaults to 'aes-256'.
#
-# @cipher-mode: the cipher mode for data encryption Currently defaults
-# to 'xts'
+# @cipher-mode: the cipher mode for data encryption. Currently
+# defaults to 'xts'
#
-# @ivgen-alg: the initialization vector generator Currently defaults
+# @ivgen-alg: the initialization vector generator. Currently defaults
# to 'plain64'
#
-# @ivgen-hash-alg: the initialization vector generator hash Currently
-# defaults to 'sha256'
+# @ivgen-hash-alg: the initialization vector generator hash.
+# Currently defaults to 'sha256'
#
-# @hash-alg: the master key hash algorithm Currently defaults to
+# @hash-alg: the master key hash algorithm. Currently defaults to
# 'sha256'
#
# @iter-time: number of milliseconds to spend in PBKDF passphrase
# processing. Currently defaults to 2000. (since 2.8)
#
-# @detached-header: create a detached LUKS header. (since 9.0)
-#
# Since: 2.6
##
{ 'struct': 'QCryptoBlockCreateOptionsLUKS',
'base': 'QCryptoBlockOptionsLUKS',
- 'data': { '*cipher-alg': 'QCryptoCipherAlgorithm',
+ 'data': { '*cipher-alg': 'QCryptoCipherAlgo',
'*cipher-mode': 'QCryptoCipherMode',
- '*ivgen-alg': 'QCryptoIVGenAlgorithm',
- '*ivgen-hash-alg': 'QCryptoHashAlgorithm',
- '*hash-alg': 'QCryptoHashAlgorithm',
- '*iter-time': 'int',
- '*detached-header': 'bool'}}
+ '*ivgen-alg': 'QCryptoIVGenAlgo',
+ '*ivgen-hash-alg': 'QCryptoHashAlgo',
+ '*hash-alg': 'QCryptoHashAlgo',
+ '*iter-time': 'int' }}
##
# @QCryptoBlockOpenOptions:
@@ -330,11 +322,11 @@
# Since: 2.7
##
{ 'struct': 'QCryptoBlockInfoLUKS',
- 'data': {'cipher-alg': 'QCryptoCipherAlgorithm',
+ 'data': {'cipher-alg': 'QCryptoCipherAlgo',
'cipher-mode': 'QCryptoCipherMode',
- 'ivgen-alg': 'QCryptoIVGenAlgorithm',
- '*ivgen-hash-alg': 'QCryptoHashAlgorithm',
- 'hash-alg': 'QCryptoHashAlgorithm',
+ 'ivgen-alg': 'QCryptoIVGenAlgo',
+ '*ivgen-hash-alg': 'QCryptoHashAlgo',
+ 'hash-alg': 'QCryptoHashAlgo',
'detached-header': 'bool',
'payload-offset': 'int',
'master-key-iters': 'int',
@@ -379,11 +371,11 @@
# @new-secret: The ID of a QCryptoSecret object providing the password
# to be written into added active keyslots
#
-# @old-secret: Optional (for deactivation only) If given will
+# @old-secret: Optional (for deactivation only). If given will
# deactivate all keyslots that match password located in
# QCryptoSecret with this ID
#
-# @iter-time: Optional (for activation only) Number of milliseconds to
+# @iter-time: Optional (for activation only). Number of milliseconds to
# spend in PBKDF passphrase processing for the newly activated
# keyslot. Currently defaults to 2000.
#
@@ -429,11 +421,6 @@
#
# Properties for objects of classes derived from secret-common.
#
-# @loaded: if true, the secret is loaded immediately when applying
-# this option and will probably fail when processing the next
-# option. Don't use; only provided for compatibility.
-# (default: false)
-#
# @format: the data format that the secret is provided in
# (default: raw)
#
@@ -443,19 +430,13 @@
#
# @iv: the random initialization vector used for encryption of this
# particular secret. Should be a base64 encrypted string of the
-# 16-byte IV. Mandatory if @keyid is given. Ignored if @keyid is
+# 16-byte IV. Mandatory if @keyid is given. Ignored if @keyid is
# absent.
#
-# Features:
-#
-# @deprecated: Member @loaded is deprecated. Setting true doesn't
-# make sense, and false is already the default.
-#
# Since: 2.6
##
{ 'struct': 'SecretCommonProperties',
- 'data': { '*loaded': { 'type': 'bool', 'features': ['deprecated'] },
- '*format': 'QCryptoSecretFormat',
+ 'data': { '*format': 'QCryptoSecretFormat',
'*keyid': 'str',
'*iv': 'str' } }
@@ -488,7 +469,8 @@
##
{ 'struct': 'SecretKeyringProperties',
'base': 'SecretCommonProperties',
- 'data': { 'serial': 'int32' } }
+ 'data': { 'serial': 'int32' },
+ 'if': 'CONFIG_SECRET_KEYRING' }
##
# @TlsCredsProperties:
@@ -521,58 +503,32 @@
#
# Properties for tls-creds-anon objects.
#
-# @loaded: if true, the credentials are loaded immediately when
-# applying this option and will ignore options that are processed
-# later. Don't use; only provided for compatibility.
-# (default: false)
-#
-# Features:
-#
-# @deprecated: Member @loaded is deprecated. Setting true doesn't
-# make sense, and false is already the default.
-#
# Since: 2.5
##
{ 'struct': 'TlsCredsAnonProperties',
'base': 'TlsCredsProperties',
- 'data': { '*loaded': { 'type': 'bool', 'features': ['deprecated'] } } }
+ 'data': { } }
##
# @TlsCredsPskProperties:
#
# Properties for tls-creds-psk objects.
#
-# @loaded: if true, the credentials are loaded immediately when
-# applying this option and will ignore options that are processed
-# later. Don't use; only provided for compatibility.
-# (default: false)
-#
# @username: the username which will be sent to the server. For
# clients only. If absent, "qemu" is sent and the property will
# read back as an empty string.
#
-# Features:
-#
-# @deprecated: Member @loaded is deprecated. Setting true doesn't
-# make sense, and false is already the default.
-#
# Since: 3.0
##
{ 'struct': 'TlsCredsPskProperties',
'base': 'TlsCredsProperties',
- 'data': { '*loaded': { 'type': 'bool', 'features': ['deprecated'] },
- '*username': 'str' } }
+ 'data': { '*username': 'str' } }
##
# @TlsCredsX509Properties:
#
# Properties for tls-creds-x509 objects.
#
-# @loaded: if true, the credentials are loaded immediately when
-# applying this option and will ignore options that are processed
-# later. Don't use; only provided for compatibility.
-# (default: false)
-#
# @sanity-check: if true, perform some sanity checks before using the
# credentials (default: true)
#
@@ -582,20 +538,14 @@
# provides the ID of a previously created secret object containing
# the password for decryption.
#
-# Features:
-#
-# @deprecated: Member @loaded is deprecated. Setting true doesn't
-# make sense, and false is already the default.
-#
# Since: 2.5
##
{ 'struct': 'TlsCredsX509Properties',
'base': 'TlsCredsProperties',
- 'data': { '*loaded': { 'type': 'bool', 'features': ['deprecated'] },
- '*sanity-check': 'bool',
+ 'data': { '*sanity-check': 'bool',
'*passwordid': 'str' } }
##
-# @QCryptoAkCipherAlgorithm:
+# @QCryptoAkCipherAlgo:
#
# The supported algorithms for asymmetric encryption ciphers
#
@@ -603,8 +553,7 @@
#
# Since: 7.1
##
-{ 'enum': 'QCryptoAkCipherAlgorithm',
- 'prefix': 'QCRYPTO_AKCIPHER_ALG',
+{ 'enum': 'QCryptoAkCipherAlgo',
'data': ['rsa']}
##
@@ -612,14 +561,17 @@
#
# The type of asymmetric keys.
#
+# @public: public key
+#
+# @private: private key
+#
# Since: 7.1
##
{ 'enum': 'QCryptoAkCipherKeyType',
- 'prefix': 'QCRYPTO_AKCIPHER_KEY_TYPE',
'data': ['public', 'private']}
##
-# @QCryptoRSAPaddingAlgorithm:
+# @QCryptoRSAPaddingAlgo:
#
# The padding algorithm for RSA.
#
@@ -629,8 +581,7 @@
#
# Since: 7.1
##
-{ 'enum': 'QCryptoRSAPaddingAlgorithm',
- 'prefix': 'QCRYPTO_RSA_PADDING_ALG',
+{ 'enum': 'QCryptoRSAPaddingAlgo',
'data': ['raw', 'pkcs1']}
##
@@ -638,15 +589,15 @@
#
# Specific parameters for RSA algorithm.
#
-# @hash-alg: QCryptoHashAlgorithm
+# @hash-alg: QCryptoHashAlgo
#
-# @padding-alg: QCryptoRSAPaddingAlgorithm
+# @padding-alg: QCryptoRSAPaddingAlgo
#
# Since: 7.1
##
{ 'struct': 'QCryptoAkCipherOptionsRSA',
- 'data': { 'hash-alg':'QCryptoHashAlgorithm',
- 'padding-alg': 'QCryptoRSAPaddingAlgorithm'}}
+ 'data': { 'hash-alg':'QCryptoHashAlgo',
+ 'padding-alg': 'QCryptoRSAPaddingAlgo'}}
##
# @QCryptoAkCipherOptions:
@@ -659,6 +610,6 @@
# Since: 7.1
##
{ 'union': 'QCryptoAkCipherOptions',
- 'base': { 'alg': 'QCryptoAkCipherAlgorithm' },
+ 'base': { 'alg': 'QCryptoAkCipherAlgo' },
'discriminator': 'alg',
'data': { 'rsa': 'QCryptoAkCipherOptionsRSA' }}
diff --git a/qapi/cryptodev.json b/qapi/cryptodev.json
index 68289f4..b13db26 100644
--- a/qapi/cryptodev.json
+++ b/qapi/cryptodev.json
@@ -9,18 +9,17 @@
##
##
-# @QCryptodevBackendAlgType:
+# @QCryptodevBackendAlgoType:
#
# The supported algorithm types of a crypto device.
#
# @sym: symmetric encryption
#
-# @asym: asymmetric Encryption
+# @asym: asymmetric encryption
#
# Since: 8.0
##
-{ 'enum': 'QCryptodevBackendAlgType',
- 'prefix': 'QCRYPTODEV_BACKEND_ALG',
+{ 'enum': 'QCryptodevBackendAlgoType',
'data': ['sym', 'asym']}
##
@@ -28,10 +27,19 @@
#
# The supported service types of a crypto device.
#
+# @cipher: Symmetric Key Cipher service
+#
+# @hash: Hash service
+#
+# @mac: Message Authentication Codes service
+#
+# @aead: Authenticated Encryption with Associated Data service
+#
+# @akcipher: Asymmetric Key Cipher service
+#
# Since: 8.0
##
{ 'enum': 'QCryptodevBackendServiceType',
- 'prefix': 'QCRYPTODEV_BACKEND_SERVICE',
'data': ['cipher', 'hash', 'mac', 'aead', 'akcipher']}
##
@@ -48,7 +56,6 @@
# Since: 8.0
##
{ 'enum': 'QCryptodevBackendType',
- 'prefix': 'QCRYPTODEV_BACKEND_TYPE',
'data': ['builtin', 'vhost-user', 'lkcf']}
##
@@ -87,7 +94,7 @@
##
# @query-cryptodev:
#
-# Returns information about current crypto devices.
+# Return information about current crypto devices.
#
# Returns: a list of @QCryptodevInfo
#
diff --git a/qapi/cxl.json b/qapi/cxl.json
index bdfac67..8f2e923 100644
--- a/qapi/cxl.json
+++ b/qapi/cxl.json
@@ -117,7 +117,7 @@
# @nibble-mask: Identifies one or more nibbles that the error affects
#
# @bank-group: Bank group of the memory event location, incorporating
-# a number of Banks.
+# a number of banks.
#
# @bank: Bank of the memory event location. A single bank is accessed
# per read or write of the memory.
@@ -326,6 +326,9 @@
# @crc-threshold: Component specific and applicable to 68 byte Flit
# mode only.
#
+# @retry-threshold: Retry threshold hit in the Local Retry State
+# Machine, 68B Flits only.
+#
# @cache-poison-received: Received poison from a peer on CXL.cache.
#
# @mem-poison-received: Received poison from a peer on CXL.mem
@@ -369,8 +372,8 @@
# of memory by Device Physical Address within a single Dynamic
# Capacity Region on a CXL Type 3 Device.
#
-# @offset: The offset (in bytes) to the start of the region
-# where the extent belongs to.
+# @offset: The offset (in bytes) to the start of the region where the
+# extent belongs to.
#
# @len: The length of the extent in bytes.
#
@@ -404,16 +407,16 @@
#
# @enable-shared-access: Capacity has already been allocated to a
# different host using free, contiguous or prescriptive policy
-# with a known tag. This policy then instructs the device to
-# make the capacity with the specified tag available to an
-# additional host. Capacity is implicit as it matches that
-# already associated with the tag. Note that the extent list
-# (and hence Device Physical Addresses) used are per host, so
-# a device may use different representations on each host.
-# The ordering of the extents provided to each host is indicated
-# to the host using per extent sequence numbers generated by
-# the device. Has a similar meaning for temporal sharing, but
-# in that case there may be only one host involved.
+# with a known tag. This policy then instructs the device to make
+# the capacity with the specified tag available to an additional
+# host. Capacity is implicit as it matches that already
+# associated with the tag. Note that the extent list (and hence
+# Device Physical Addresses) used are per host, so a device may
+# use different representations on each host. The ordering of the
+# extents provided to each host is indicated to the host using per
+# extent sequence numbers generated by the device. Has a similar
+# meaning for temporal sharing, but in that case there may be only
+# one host involved.
#
# Since: 9.1
##
@@ -429,7 +432,7 @@
#
# Initiate adding dynamic capacity extents to a host. This simulates
# operations defined in Compute Express Link (CXL) Specification,
-# Revision 3.1, Section 7.6.7.6.5. Note that, currently, establishing
+# Revision 3.1, Section 7.6.7.6.5. Note that, currently, establishing
# success or failure of the full Add Dynamic Capacity flow requires
# out of band communication with the OS of the CXL host.
#
@@ -457,7 +460,7 @@
#
# @unstable: For now this command is subject to change.
#
-# Since : 9.1
+# Since: 9.1
##
{ 'command': 'cxl-add-dynamic-capacity',
'data': { 'path': 'str',
@@ -495,7 +498,7 @@
#
# Initiate release of dynamic capacity extents from a host. This
# simulates operations defined in Compute Express Link (CXL)
-# Specification, Revision 3.1, Section 7.6.7.6.6. Note that,
+# Specification, Revision 3.1, Section 7.6.7.6.6. Note that,
# currently, success or failure of the full Release Dynamic Capacity
# flow requires out of band communication with the OS of the CXL host.
#
@@ -514,13 +517,13 @@
# from the host. Instead, the host immediately looses access to
# the released capacity.
#
-# @sanitize-on-release: Bit[5] of the "Flags" field in Compute
-# Express Link (CXL) Specification, Revision 3.1, Table 7-71.
-# When set, the device should sanitize all released capacity as
-# a result of this request. This ensures that all user data
-# and metadata is made permanently unavailable by whatever
-# means is appropriate for the media type. Note that changing
-# encryption keys is not sufficient.
+# @sanitize-on-release: Bit[5] of the "Flags" field in Compute Express
+# Link (CXL) Specification, Revision 3.1, Table 7-71. When set,
+# the device should sanitize all released capacity as a result of
+# this request. This ensures that all user data and metadata is
+# made permanently unavailable by whatever means is appropriate
+# for the media type. Note that changing encryption keys is not
+# sufficient.
#
# @region: The "Region Number" field as defined in Compute Express
# Link Specification, Revision 3.1, Table 7-71. Valid range
@@ -536,7 +539,7 @@
#
# @unstable: For now this command is subject to change.
#
-# Since : 9.1
+# Since: 9.1
##
{ 'command': 'cxl-release-dynamic-capacity',
'data': { 'path': 'str',
diff --git a/qapi/dump.json b/qapi/dump.json
index d8145da..d0ba1f0 100644
--- a/qapi/dump.json
+++ b/qapi/dump.json
@@ -54,9 +54,9 @@
# @paging: if true, do paging to get guest's memory mapping. This
# allows using gdb to process the core file.
#
-# IMPORTANT: this option can make QEMU allocate several gigabytes
-# of RAM. This can happen for a large guest, or a malicious guest
-# pretending to be large.
+# **Important**: this option can make QEMU allocate several
+# gigabytes of RAM. This can happen for a large guest, or a
+# malicious guest pretending to be large.
#
# Also, paging=true has the following limitations:
#
@@ -195,7 +195,7 @@
##
# @query-dump-guest-memory-capability:
#
-# Returns the available formats for dump-guest-memory
+# Return the available formats for dump-guest-memory
#
# Returns: A @DumpGuestMemoryCapability object listing available
# formats for dump-guest-memory
diff --git a/qapi/ebpf.json b/qapi/ebpf.json
index e500b5a..db19ae8 100644
--- a/qapi/ebpf.json
+++ b/qapi/ebpf.json
@@ -8,7 +8,7 @@
# = eBPF Objects
#
# eBPF object is an ELF binary that contains the eBPF program and eBPF
-# map description(BTF). Overall, eBPF object should contain the
+# map description(BTF). Overall, eBPF object should contain the
# program and enough metadata to create/load eBPF with libbpf. As the
# eBPF maps/program should correspond to QEMU, the eBPF can't be used
# from different QEMU build.
diff --git a/qapi/introspect.json b/qapi/introspect.json
index b15052e..e9e0297 100644
--- a/qapi/introspect.json
+++ b/qapi/introspect.json
@@ -26,9 +26,9 @@
# the QAPI schema.
#
# Furthermore, while we strive to keep the QMP wire format
-# backwards-compatible across qemu versions, the introspection output
+# backwards-compatible across QEMU versions, the introspection output
# is not guaranteed to have the same stability. For example, one
-# version of qemu may list an object member as an optional
+# version of QEMU may list an object member as an optional
# non-variant, while another lists the same member only through the
# object's variants; or the type of a member may change from a generic
# string into a specific enum or from one specific type into an
@@ -42,8 +42,8 @@
# with different meta-types).
#
# .. note:: The QAPI schema is also used to help define *internal*
-# interfaces, by defining QAPI types. These are not part of the QMP
-# wire ABI, and therefore not returned by this command.
+# interfaces, by defining QAPI types. These are not part of the
+# QMP wire ABI, and therefore not returned by this command.
#
# Since: 2.5
##
@@ -127,6 +127,22 @@
# section 1, plus 'int' (split off 'number'), plus the obvious top
# type 'value'.
#
+# @string: JSON string
+#
+# @number: JSON number
+#
+# @int: JSON number that is an integer
+#
+# @boolean: literal ``false`` or ``true``
+#
+# @null: literal ``null``
+#
+# @object: JSON object
+#
+# @array: JSON array
+#
+# @value: any JSON value
+#
# Since: 2.5
##
{ 'enum': 'JSONType',
@@ -138,8 +154,8 @@
#
# Additional SchemaInfo members for meta-type 'enum'.
#
-# @members: the enum type's members, in no particular order (since
-# 6.2).
+# @members: the enum type's members, in no particular order.
+# (since 6.2)
#
# @values: the enumeration type's member names, in no particular
# order. Redundant with @members. Just for backward
diff --git a/qapi/job.json b/qapi/job.json
index b395720..126fa5c 100644
--- a/qapi/job.json
+++ b/qapi/job.json
@@ -20,14 +20,14 @@
#
# @create: image creation job type, see "blockdev-create" (since 3.0)
#
-# @amend: image options amend job type, see "x-blockdev-amend" (since
-# 5.1)
+# @amend: image options amend job type, see "x-blockdev-amend"
+# (since 5.1)
#
-# @snapshot-load: snapshot load job type, see "snapshot-load" (since
-# 6.0)
+# @snapshot-load: snapshot load job type, see "snapshot-load"
+# (since 6.0)
#
-# @snapshot-save: snapshot save job type, see "snapshot-save" (since
-# 6.0)
+# @snapshot-save: snapshot save job type, see "snapshot-save"
+# (since 6.0)
#
# @snapshot-delete: snapshot delete job type, see "snapshot-delete"
# (since 6.0)
@@ -74,7 +74,7 @@
# process.
#
# @concluded: The job has finished all work. If auto-dismiss was set
-# to false, the job will remain in the query list until it is
+# to false, the job will remain in this state until it is
# dismissed via @job-dismiss.
#
# @null: The job is in the process of being dismantled. This state
@@ -138,7 +138,7 @@
#
# The job will pause as soon as possible, which means transitioning
# into the PAUSED state if it was RUNNING, or into STANDBY if it was
-# READY. The corresponding JOB_STATUS_CHANGE event will be emitted.
+# READY. The corresponding JOB_STATUS_CHANGE event will be emitted.
#
# Cancelling a paused job automatically resumes it.
#
@@ -156,6 +156,9 @@
# This command returns immediately after resuming a paused job.
# Resuming an already running job is an error.
#
+# This command also clears the error status for block-jobs (stream,
+# commit, mirror, backup).
+#
# @id: The job identifier.
#
# Since: 3.0
@@ -184,7 +187,23 @@
##
# @job-complete:
#
-# Manually trigger completion of an active job in the READY state.
+# Manually trigger completion of an active job in the READY or STANDBY
+# state. Completing the job in any other state is an error.
+#
+# This is supported only for drive mirroring, where it also switches
+# the device to write to the target path only. Note that drive
+# mirroring includes drive-mirror, blockdev-mirror and block-commit
+# job (only in case of "active commit", when the node being commited
+# is used by the guest). The ability to complete is signaled with a
+# BLOCK_JOB_READY event.
+#
+# This command completes an active background block operation
+# synchronously. The ordering of this command's return with the
+# BLOCK_JOB_COMPLETED event is not defined. Note that if an I/O error
+# occurs during the processing of this command: 1) the command itself
+# will fail; 2) the error will be processed according to the
+# rerror/werror arguments that were specified when starting the
+# operation.
#
# @id: The job identifier.
#
@@ -197,12 +216,16 @@
#
# Deletes a job that is in the CONCLUDED state. This command only
# needs to be run explicitly for jobs that don't have automatic
-# dismiss enabled.
+# dismiss enabled. In turn, automatic dismiss may be enabled only
+# for jobs that have @auto-dismiss option, which are drive-backup,
+# blockdev-backup, drive-mirror, blockdev-mirror, block-commit and
+# block-stream. @auto-dismiss is enabled by default for these
+# jobs.
#
# This command will refuse to operate on any job that has not yet
-# reached its terminal state, JOB_STATUS_CONCLUDED. For jobs that make
-# use of JOB_READY event, job-cancel or job-complete will still need
-# to be used as appropriate.
+# reached its terminal state, CONCLUDED. For jobs that make use of
+# the JOB_READY event, job-cancel or job-complete will still need to
+# be used as appropriate.
#
# @id: The job identifier.
#
@@ -222,6 +245,9 @@
# force ALL jobs in the transaction to finalize, so it is only
# necessary to instruct a single member job to finalize.
#
+# The command is applicable only to jobs which have @auto-finalize option
+# and only when this option is set to false.
+#
# @id: The identifier of any job in the transaction, or of a job that
# is not part of any transaction.
#
diff --git a/qapi/machine-common.json b/qapi/machine-common.json
index fa6bd71..298e51f 100644
--- a/qapi/machine-common.json
+++ b/qapi/machine-common.json
@@ -5,17 +5,108 @@
# See the COPYING file in the top-level directory.
##
-# = Machines S390 data types
+# = Common machine types
##
##
-# @CpuS390Entitlement:
+# @S390CpuEntitlement:
#
# An enumeration of CPU entitlements that can be assumed by a virtual
# S390 CPU
#
# Since: 8.2
##
-{ 'enum': 'CpuS390Entitlement',
- 'prefix': 'S390_CPU_ENTITLEMENT',
+{ 'enum': 'S390CpuEntitlement',
'data': [ 'auto', 'low', 'medium', 'high' ] }
+
+##
+# @CpuTopologyLevel:
+#
+# An enumeration of CPU topology levels.
+#
+# @thread: thread level, which would also be called SMT level or
+# logical processor level. The @threads option in
+# SMPConfiguration is used to configure the topology of this
+# level.
+#
+# @core: core level. The @cores option in SMPConfiguration is used
+# to configure the topology of this level.
+#
+# @module: module level. The @modules option in SMPConfiguration is
+# used to configure the topology of this level.
+#
+# @cluster: cluster level. The @clusters option in SMPConfiguration
+# is used to configure the topology of this level.
+#
+# @die: die level. The @dies option in SMPConfiguration is used to
+# configure the topology of this level.
+#
+# @socket: socket level, which would also be called package level.
+# The @sockets option in SMPConfiguration is used to configure
+# the topology of this level.
+#
+# @book: book level. The @books option in SMPConfiguration is used
+# to configure the topology of this level.
+#
+# @drawer: drawer level. The @drawers option in SMPConfiguration is
+# used to configure the topology of this level.
+#
+# @default: default level. Some architectures will have default
+# topology settings (e.g., cache topology), and this special
+# level means following the architecture-specific settings.
+#
+# Since: 9.2
+##
+{ 'enum': 'CpuTopologyLevel',
+ 'data': [ 'thread', 'core', 'module', 'cluster', 'die',
+ 'socket', 'book', 'drawer', 'default' ] }
+
+##
+# @CacheLevelAndType:
+#
+# Caches a system may have. The enumeration value here is the
+# combination of cache level and cache type.
+#
+# @l1d: L1 data cache.
+#
+# @l1i: L1 instruction cache.
+#
+# @l2: L2 (unified) cache.
+#
+# @l3: L3 (unified) cache
+#
+# Since: 9.2
+##
+{ 'enum': 'CacheLevelAndType',
+ 'data': [ 'l1d', 'l1i', 'l2', 'l3' ] }
+
+##
+# @SmpCacheProperties:
+#
+# Cache information for SMP system.
+#
+# @cache: Cache name, which is the combination of cache level
+# and cache type.
+#
+# @topology: Cache topology level. It accepts the CPU topology
+# enumeration as the parameter, i.e., CPUs in the same
+# topology container share the same cache.
+#
+# Since: 9.2
+##
+{ 'struct': 'SmpCacheProperties',
+ 'data': {
+ 'cache': 'CacheLevelAndType',
+ 'topology': 'CpuTopologyLevel' } }
+
+##
+# @SmpCachePropertiesWrapper:
+#
+# List wrapper of SmpCacheProperties.
+#
+# @caches: the list of SmpCacheProperties.
+#
+# Since 9.2
+##
+{ 'struct': 'SmpCachePropertiesWrapper',
+ 'data': { 'caches': ['SmpCacheProperties'] } }
diff --git a/qapi/machine-s390x.json b/qapi/machine-s390x.json
new file mode 100644
index 0000000..966dbd6
--- /dev/null
+++ b/qapi/machine-s390x.json
@@ -0,0 +1,121 @@
+# -*- Mode: Python -*-
+# vim: filetype=python
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+
+{ 'include': 'machine-common.json' }
+
+##
+# @S390CpuPolarization:
+#
+# An enumeration of CPU polarization that can be assumed by a virtual
+# S390 CPU
+#
+# Since: 8.2
+##
+{ 'enum': 'S390CpuPolarization',
+ 'data': [ 'horizontal', 'vertical' ]
+}
+
+##
+# @set-cpu-topology:
+#
+# Modify the topology by moving the CPU inside the topology tree, or
+# by changing a modifier attribute of a CPU. Absent values will not
+# be modified.
+#
+# @core-id: the vCPU ID to be moved
+#
+# @socket-id: destination socket to move the vCPU to
+#
+# @book-id: destination book to move the vCPU to
+#
+# @drawer-id: destination drawer to move the vCPU to
+#
+# @entitlement: entitlement to set
+#
+# @dedicated: whether the provisioning of real to virtual CPU is
+# dedicated
+#
+# Features:
+#
+# @unstable: This command is experimental.
+#
+# Since: 8.2
+##
+{ 'command': 'set-cpu-topology',
+ 'data': {
+ 'core-id': 'uint16',
+ '*socket-id': 'uint16',
+ '*book-id': 'uint16',
+ '*drawer-id': 'uint16',
+ '*entitlement': 'S390CpuEntitlement',
+ '*dedicated': 'bool'
+ },
+ 'features': [ 'unstable' ]
+}
+
+##
+# @CPU_POLARIZATION_CHANGE:
+#
+# Emitted when the guest asks to change the polarization.
+#
+# The guest can tell the host (via the PTF instruction) whether the
+# CPUs should be provisioned using horizontal or vertical
+# polarization.
+#
+# On horizontal polarization the host is expected to provision all
+# vCPUs equally.
+#
+# On vertical polarization the host can provision each vCPU
+# differently. The guest will get information on the details of the
+# provisioning the next time it uses the STSI(15) instruction.
+#
+# @polarization: polarization specified by the guest
+#
+# Features:
+#
+# @unstable: This event is experimental.
+#
+# Since: 8.2
+#
+# .. qmp-example::
+#
+# <- { "event": "CPU_POLARIZATION_CHANGE",
+# "data": { "polarization": "horizontal" },
+# "timestamp": { "seconds": 1401385907, "microseconds": 422329 } }
+##
+{ 'event': 'CPU_POLARIZATION_CHANGE',
+ 'data': { 'polarization': 'S390CpuPolarization' },
+ 'features': [ 'unstable' ]
+}
+
+##
+# @CpuPolarizationInfo:
+#
+# The result of a CPU polarization query.
+#
+# @polarization: the CPU polarization
+#
+# Since: 8.2
+##
+{ 'struct': 'CpuPolarizationInfo',
+ 'data': { 'polarization': 'S390CpuPolarization' }
+}
+
+##
+# @query-s390x-cpu-polarization:
+#
+# Features:
+#
+# @unstable: This command is experimental.
+#
+# Returns: the machine's CPU polarization
+#
+# Since: 8.2
+##
+{ 'command': 'query-s390x-cpu-polarization', 'returns': 'CpuPolarizationInfo',
+ 'features': [ 'unstable' ]
+}
diff --git a/qapi/machine-target.json b/qapi/machine-target.json
deleted file mode 100644
index 7edb876..0000000
--- a/qapi/machine-target.json
+++ /dev/null
@@ -1,518 +0,0 @@
-# -*- Mode: Python -*-
-# vim: filetype=python
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or later.
-# See the COPYING file in the top-level directory.
-
-{ 'include': 'machine-common.json' }
-
-##
-# @CpuModelInfo:
-#
-# Virtual CPU model.
-#
-# A CPU model consists of the name of a CPU definition, to which delta
-# changes are applied (e.g. features added/removed). Most magic values
-# that an architecture might require should be hidden behind the name.
-# However, if required, architectures can expose relevant properties.
-#
-# @name: the name of the CPU definition the model is based on
-#
-# @props: a dictionary of QOM properties to be applied
-#
-# @deprecated-props: a list of properties that are flagged as deprecated
-# by the CPU vendor. These props are a subset of the full model's
-# definition list of properties. (since 9.1)
-#
-# Since: 2.8
-##
-{ 'struct': 'CpuModelInfo',
- 'data': { 'name': 'str',
- '*props': 'any',
- '*deprecated-props': ['str'] } }
-
-##
-# @CpuModelExpansionType:
-#
-# An enumeration of CPU model expansion types.
-#
-# @static: Expand to a static CPU model, a combination of a static
-# base model name and property delta changes. As the static base
-# model will never change, the expanded CPU model will be the
-# same, independent of QEMU version, machine type, machine
-# options, and accelerator options. Therefore, the resulting
-# model can be used by tooling without having to specify a
-# compatibility machine - e.g. when displaying the "host" model.
-# The @static CPU models are migration-safe.
-#
-# @full: Expand all properties. The produced model is not guaranteed
-# to be migration-safe, but allows tooling to get an insight and
-# work with model details.
-#
-# .. note:: When a non-migration-safe CPU model is expanded in static
-# mode, some features enabled by the CPU model may be omitted,
-# because they can't be implemented by a static CPU model definition
-# (e.g. cache info passthrough and PMU passthrough in x86). If you
-# need an accurate representation of the features enabled by a
-# non-migration-safe CPU model, use @full. If you need a static
-# representation that will keep ABI compatibility even when changing
-# QEMU version or machine-type, use @static (but keep in mind that
-# some features may be omitted).
-#
-# Since: 2.8
-##
-{ 'enum': 'CpuModelExpansionType',
- 'data': [ 'static', 'full' ] }
-
-##
-# @CpuModelCompareResult:
-#
-# An enumeration of CPU model comparison results. The result is
-# usually calculated using e.g. CPU features or CPU generations.
-#
-# @incompatible: If model A is incompatible to model B, model A is not
-# guaranteed to run where model B runs and the other way around.
-#
-# @identical: If model A is identical to model B, model A is
-# guaranteed to run where model B runs and the other way around.
-#
-# @superset: If model A is a superset of model B, model B is
-# guaranteed to run where model A runs. There are no guarantees
-# about the other way.
-#
-# @subset: If model A is a subset of model B, model A is guaranteed to
-# run where model B runs. There are no guarantees about the other
-# way.
-#
-# Since: 2.8
-##
-{ 'enum': 'CpuModelCompareResult',
- 'data': [ 'incompatible', 'identical', 'superset', 'subset' ] }
-
-##
-# @CpuModelBaselineInfo:
-#
-# The result of a CPU model baseline.
-#
-# @model: the baselined CpuModelInfo.
-#
-# Since: 2.8
-##
-{ 'struct': 'CpuModelBaselineInfo',
- 'data': { 'model': 'CpuModelInfo' },
- 'if': 'TARGET_S390X' }
-
-##
-# @CpuModelCompareInfo:
-#
-# The result of a CPU model comparison.
-#
-# @result: The result of the compare operation.
-#
-# @responsible-properties: List of properties that led to the
-# comparison result not being identical.
-#
-# @responsible-properties is a list of QOM property names that led to
-# both CPUs not being detected as identical. For identical models,
-# this list is empty. If a QOM property is read-only, that means
-# there's no known way to make the CPU models identical. If the
-# special property name "type" is included, the models are by
-# definition not identical and cannot be made identical.
-#
-# Since: 2.8
-##
-{ 'struct': 'CpuModelCompareInfo',
- 'data': { 'result': 'CpuModelCompareResult',
- 'responsible-properties': ['str'] },
- 'if': 'TARGET_S390X' }
-
-##
-# @query-cpu-model-comparison:
-#
-# Compares two CPU models, @modela and @modelb, returning how they
-# compare in a specific configuration. The results indicates how
-# both models compare regarding runnability. This result can be
-# used by tooling to make decisions if a certain CPU model will
-# run in a certain configuration or if a compatible CPU model has
-# to be created by baselining.
-#
-# Usually, a CPU model is compared against the maximum possible CPU
-# model of a certain configuration (e.g. the "host" model for KVM).
-# If that CPU model is identical or a subset, it will run in that
-# configuration.
-#
-# The result returned by this command may be affected by:
-#
-# * QEMU version: CPU models may look different depending on the QEMU
-# version. (Except for CPU models reported as "static" in
-# query-cpu-definitions.)
-# * machine-type: CPU model may look different depending on the
-# machine-type. (Except for CPU models reported as "static" in
-# query-cpu-definitions.)
-# * machine options (including accelerator): in some architectures,
-# CPU models may look different depending on machine and accelerator
-# options. (Except for CPU models reported as "static" in
-# query-cpu-definitions.)
-# * "-cpu" arguments and global properties: arguments to the -cpu
-# option and global properties may affect expansion of CPU models.
-# Using query-cpu-model-expansion while using these is not advised.
-#
-# Some architectures may not support comparing CPU models. s390x
-# supports comparing CPU models.
-#
-# @modela: description of the first CPU model to compare, referred to as
-# "model A" in CpuModelCompareResult
-#
-# @modelb: description of the second CPU model to compare, referred to as
-# "model B" in CpuModelCompareResult
-#
-# Returns: a CpuModelCompareInfo describing how both CPU models
-# compare
-#
-# Errors:
-# - if comparing CPU models is not supported
-# - if a model cannot be used
-# - if a model contains an unknown cpu definition name, unknown
-# properties or properties with wrong types.
-#
-# .. note:: This command isn't specific to s390x, but is only
-# implemented on this architecture currently.
-#
-# Since: 2.8
-##
-{ 'command': 'query-cpu-model-comparison',
- 'data': { 'modela': 'CpuModelInfo', 'modelb': 'CpuModelInfo' },
- 'returns': 'CpuModelCompareInfo',
- 'if': 'TARGET_S390X' }
-
-##
-# @query-cpu-model-baseline:
-#
-# Baseline two CPU models, @modela and @modelb, creating a compatible
-# third model. The created model will always be a static,
-# migration-safe CPU model (see "static" CPU model expansion for details).
-#
-# This interface can be used by tooling to create a compatible CPU
-# model out two CPU models. The created CPU model will be identical
-# to or a subset of both CPU models when comparing them. Therefore,
-# the created CPU model is guaranteed to run where the given CPU
-# models run.
-#
-# The result returned by this command may be affected by:
-#
-# * QEMU version: CPU models may look different depending on the QEMU
-# version. (Except for CPU models reported as "static" in
-# query-cpu-definitions.)
-# * machine-type: CPU model may look different depending on the
-# machine-type. (Except for CPU models reported as "static" in
-# query-cpu-definitions.)
-# * machine options (including accelerator): in some architectures,
-# CPU models may look different depending on machine and accelerator
-# options. (Except for CPU models reported as "static" in
-# query-cpu-definitions.)
-# * "-cpu" arguments and global properties: arguments to the -cpu
-# option and global properties may affect expansion of CPU models.
-# Using query-cpu-model-expansion while using these is not advised.
-#
-# Some architectures may not support baselining CPU models. s390x
-# supports baselining CPU models.
-#
-# @modela: description of the first CPU model to baseline
-#
-# @modelb: description of the second CPU model to baseline
-#
-# Returns: a CpuModelBaselineInfo describing the baselined CPU model
-#
-# Errors:
-# - if baselining CPU models is not supported
-# - if a model cannot be used
-# - if a model contains an unknown cpu definition name, unknown
-# properties or properties with wrong types.
-#
-# .. note:: This command isn't specific to s390x, but is only
-# implemented on this architecture currently.
-#
-# Since: 2.8
-##
-{ 'command': 'query-cpu-model-baseline',
- 'data': { 'modela': 'CpuModelInfo',
- 'modelb': 'CpuModelInfo' },
- 'returns': 'CpuModelBaselineInfo',
- 'if': 'TARGET_S390X' }
-
-##
-# @CpuModelExpansionInfo:
-#
-# The result of a cpu model expansion.
-#
-# @model: the expanded CpuModelInfo.
-#
-# Since: 2.8
-##
-{ 'struct': 'CpuModelExpansionInfo',
- 'data': { 'model': 'CpuModelInfo' },
- 'if': { 'any': [ 'TARGET_S390X',
- 'TARGET_I386',
- 'TARGET_ARM',
- 'TARGET_LOONGARCH64',
- 'TARGET_RISCV' ] } }
-
-##
-# @query-cpu-model-expansion:
-#
-# Expands a given CPU model, @model, (or a combination of CPU model +
-# additional options) to different granularities, specified by
-# @type, allowing tooling to get an understanding what a specific
-# CPU model looks like in QEMU under a certain configuration.
-#
-# This interface can be used to query the "host" CPU model.
-#
-# The data returned by this command may be affected by:
-#
-# * QEMU version: CPU models may look different depending on the QEMU
-# version. (Except for CPU models reported as "static" in
-# query-cpu-definitions.)
-# * machine-type: CPU model may look different depending on the
-# machine-type. (Except for CPU models reported as "static" in
-# query-cpu-definitions.)
-# * machine options (including accelerator): in some architectures,
-# CPU models may look different depending on machine and accelerator
-# options. (Except for CPU models reported as "static" in
-# query-cpu-definitions.)
-# * "-cpu" arguments and global properties: arguments to the -cpu
-# option and global properties may affect expansion of CPU models.
-# Using query-cpu-model-expansion while using these is not advised.
-#
-# Some architectures may not support all expansion types. s390x
-# supports "full" and "static". Arm only supports "full".
-#
-# @model: description of the CPU model to expand
-#
-# @type: expansion type, specifying how to expand the CPU model
-#
-# Returns: a CpuModelExpansionInfo describing the expanded CPU model
-#
-# Errors:
-# - if expanding CPU models is not supported
-# - if the model cannot be expanded
-# - if the model contains an unknown CPU definition name, unknown
-# properties or properties with a wrong type
-# - if an expansion type is not supported
-#
-# Since: 2.8
-##
-{ 'command': 'query-cpu-model-expansion',
- 'data': { 'type': 'CpuModelExpansionType',
- 'model': 'CpuModelInfo' },
- 'returns': 'CpuModelExpansionInfo',
- 'if': { 'any': [ 'TARGET_S390X',
- 'TARGET_I386',
- 'TARGET_ARM',
- 'TARGET_LOONGARCH64',
- 'TARGET_RISCV' ] } }
-
-##
-# @CpuDefinitionInfo:
-#
-# Virtual CPU definition.
-#
-# @name: the name of the CPU definition
-#
-# @migration-safe: whether a CPU definition can be safely used for
-# migration in combination with a QEMU compatibility machine when
-# migrating between different QEMU versions and between hosts with
-# different sets of (hardware or software) capabilities. If not
-# provided, information is not available and callers should not
-# assume the CPU definition to be migration-safe. (since 2.8)
-#
-# @static: whether a CPU definition is static and will not change
-# depending on QEMU version, machine type, machine options and
-# accelerator options. A static model is always migration-safe.
-# (since 2.8)
-#
-# @unavailable-features: List of properties that prevent the CPU model
-# from running in the current host. (since 2.8)
-#
-# @typename: Type name that can be used as argument to
-# @device-list-properties, to introspect properties configurable
-# using -cpu or -global. (since 2.9)
-#
-# @alias-of: Name of CPU model this model is an alias for. The target
-# of the CPU model alias may change depending on the machine type.
-# Management software is supposed to translate CPU model aliases
-# in the VM configuration, because aliases may stop being
-# migration-safe in the future (since 4.1)
-#
-# @deprecated: If true, this CPU model is deprecated and may be
-# removed in in some future version of QEMU according to the QEMU
-# deprecation policy. (since 5.2)
-#
-# @unavailable-features is a list of QOM property names that represent
-# CPU model attributes that prevent the CPU from running. If the QOM
-# property is read-only, that means there's no known way to make the
-# CPU model run in the current host. Implementations that choose not
-# to provide specific information return the property name "type". If
-# the property is read-write, it means that it MAY be possible to run
-# the CPU model in the current host if that property is changed.
-# Management software can use it as hints to suggest or choose an
-# alternative for the user, or just to generate meaningful error
-# messages explaining why the CPU model can't be used. If
-# @unavailable-features is an empty list, the CPU model is runnable
-# using the current host and machine-type. If @unavailable-features
-# is not present, runnability information for the CPU is not
-# available.
-#
-# Since: 1.2
-##
-{ 'struct': 'CpuDefinitionInfo',
- 'data': { 'name': 'str',
- '*migration-safe': 'bool',
- 'static': 'bool',
- '*unavailable-features': [ 'str' ],
- 'typename': 'str',
- '*alias-of' : 'str',
- 'deprecated' : 'bool' },
- 'if': { 'any': [ 'TARGET_PPC',
- 'TARGET_ARM',
- 'TARGET_I386',
- 'TARGET_S390X',
- 'TARGET_MIPS',
- 'TARGET_LOONGARCH64',
- 'TARGET_RISCV' ] } }
-
-##
-# @query-cpu-definitions:
-#
-# Return a list of supported virtual CPU definitions
-#
-# Returns: a list of CpuDefinitionInfo
-#
-# Since: 1.2
-##
-{ 'command': 'query-cpu-definitions', 'returns': ['CpuDefinitionInfo'],
- 'if': { 'any': [ 'TARGET_PPC',
- 'TARGET_ARM',
- 'TARGET_I386',
- 'TARGET_S390X',
- 'TARGET_MIPS',
- 'TARGET_LOONGARCH64',
- 'TARGET_RISCV' ] } }
-
-##
-# @CpuS390Polarization:
-#
-# An enumeration of CPU polarization that can be assumed by a virtual
-# S390 CPU
-#
-# Since: 8.2
-##
-{ 'enum': 'CpuS390Polarization',
- 'prefix': 'S390_CPU_POLARIZATION',
- 'data': [ 'horizontal', 'vertical' ],
- 'if': 'TARGET_S390X'
-}
-
-##
-# @set-cpu-topology:
-#
-# Modify the topology by moving the CPU inside the topology tree, or
-# by changing a modifier attribute of a CPU. Absent values will not
-# be modified.
-#
-# @core-id: the vCPU ID to be moved
-#
-# @socket-id: destination socket to move the vCPU to
-#
-# @book-id: destination book to move the vCPU to
-#
-# @drawer-id: destination drawer to move the vCPU to
-#
-# @entitlement: entitlement to set
-#
-# @dedicated: whether the provisioning of real to virtual CPU is
-# dedicated
-#
-# Features:
-#
-# @unstable: This command is experimental.
-#
-# Since: 8.2
-##
-{ 'command': 'set-cpu-topology',
- 'data': {
- 'core-id': 'uint16',
- '*socket-id': 'uint16',
- '*book-id': 'uint16',
- '*drawer-id': 'uint16',
- '*entitlement': 'CpuS390Entitlement',
- '*dedicated': 'bool'
- },
- 'features': [ 'unstable' ],
- 'if': { 'all': [ 'TARGET_S390X' , 'CONFIG_KVM' ] }
-}
-
-##
-# @CPU_POLARIZATION_CHANGE:
-#
-# Emitted when the guest asks to change the polarization.
-#
-# The guest can tell the host (via the PTF instruction) whether the
-# CPUs should be provisioned using horizontal or vertical
-# polarization.
-#
-# On horizontal polarization the host is expected to provision all
-# vCPUs equally.
-#
-# On vertical polarization the host can provision each vCPU
-# differently. The guest will get information on the details of the
-# provisioning the next time it uses the STSI(15) instruction.
-#
-# @polarization: polarization specified by the guest
-#
-# Features:
-#
-# @unstable: This event is experimental.
-#
-# Since: 8.2
-#
-# .. qmp-example::
-#
-# <- { "event": "CPU_POLARIZATION_CHANGE",
-# "data": { "polarization": "horizontal" },
-# "timestamp": { "seconds": 1401385907, "microseconds": 422329 } }
-##
-{ 'event': 'CPU_POLARIZATION_CHANGE',
- 'data': { 'polarization': 'CpuS390Polarization' },
- 'features': [ 'unstable' ],
- 'if': { 'all': [ 'TARGET_S390X', 'CONFIG_KVM' ] }
-}
-
-##
-# @CpuPolarizationInfo:
-#
-# The result of a CPU polarization query.
-#
-# @polarization: the CPU polarization
-#
-# Since: 8.2
-##
-{ 'struct': 'CpuPolarizationInfo',
- 'data': { 'polarization': 'CpuS390Polarization' },
- 'if': { 'all': [ 'TARGET_S390X', 'CONFIG_KVM' ] }
-}
-
-##
-# @query-s390x-cpu-polarization:
-#
-# Features:
-#
-# @unstable: This command is experimental.
-#
-# Returns: the machine's CPU polarization
-#
-# Since: 8.2
-##
-{ 'command': 'query-s390x-cpu-polarization', 'returns': 'CpuPolarizationInfo',
- 'features': [ 'unstable' ],
- 'if': { 'all': [ 'TARGET_S390X', 'CONFIG_KVM' ] }
-}
diff --git a/qapi/machine.json b/qapi/machine.json
index f9ea6b3..0650b8d 100644
--- a/qapi/machine.json
+++ b/qapi/machine.json
@@ -24,14 +24,16 @@
#
# @avr: since 5.1
#
+# @loongarch64: since 7.1
+#
# .. note:: The resulting QMP strings can be appended to the
-# "qemu-system-" prefix to produce the corresponding QEMU executable
-# name. This is true even for "qemu-system-x86_64".
+# "qemu-system-" prefix to produce the corresponding QEMU
+# executable name. This is true even for "qemu-system-x86_64".
#
# Since: 3.0
##
{ 'enum' : 'SysEmuTarget',
- 'data' : [ 'aarch64', 'alpha', 'arm', 'avr', 'cris', 'hppa', 'i386',
+ 'data' : [ 'aarch64', 'alpha', 'arm', 'avr', 'hppa', 'i386',
'loongarch64', 'm68k', 'microblaze', 'microblazeel', 'mips', 'mips64',
'mips64el', 'mipsel', 'or1k', 'ppc',
'ppc64', 'riscv32', 'riscv64', 'rx', 's390x', 'sh4',
@@ -39,15 +41,14 @@
'x86_64', 'xtensa', 'xtensaeb' ] }
##
-# @CpuS390State:
+# @S390CpuState:
#
# An enumeration of cpu states that can be assumed by a virtual S390
# CPU
#
# Since: 2.12
##
-{ 'enum': 'CpuS390State',
- 'prefix': 'S390_CPU_STATE',
+{ 'enum': 'S390CpuState',
'data': [ 'uninitialized', 'stopped', 'check-stop', 'operating', 'load' ] }
##
@@ -64,9 +65,9 @@
# Since: 2.12
##
{ 'struct': 'CpuInfoS390',
- 'data': { 'cpu-state': 'CpuS390State',
+ 'data': { 'cpu-state': 'S390CpuState',
'*dedicated': 'bool',
- '*entitlement': 'CpuS390Entitlement' } }
+ '*entitlement': 'S390CpuEntitlement' } }
##
# @CpuInfoFast:
@@ -98,7 +99,7 @@
##
# @query-cpus-fast:
#
-# Returns information about all virtual CPUs.
+# Return information about all virtual CPUs.
#
# Returns: list of @CpuInfoFast
#
@@ -181,8 +182,8 @@
# @default-cpu-type: default CPU model typename if none is requested
# via the -cpu argument. (since 4.2)
#
-# @default-ram-id: the default ID of initial RAM memory backend (since
-# 5.2)
+# @default-ram-id: the default ID of initial RAM memory backend
+# (since 5.2)
#
# @acpi: machine type supports ACPI (since 8.0)
#
@@ -274,15 +275,15 @@
{ 'command': 'query-current-machine', 'returns': 'CurrentMachineParams' }
##
-# @TargetInfo:
+# @QemuTargetInfo:
#
-# Information describing the QEMU target.
+# Information on the target configuration built into the QEMU binary.
#
# @arch: the target architecture
#
# Since: 1.2
##
-{ 'struct': 'TargetInfo',
+{ 'struct': 'QemuTargetInfo',
'data': { 'arch': 'SysEmuTarget' } }
##
@@ -290,11 +291,11 @@
#
# Return information about the target for this QEMU
#
-# Returns: TargetInfo
+# Returns: QemuTargetInfo
#
# Since: 1.2
##
-{ 'command': 'query-target', 'returns': 'TargetInfo' }
+{ 'command': 'query-target', 'returns': 'QemuTargetInfo' }
##
# @UuidInfo:
@@ -369,8 +370,8 @@
#
# .. note:: A guest may or may not respond to this command. This
# command returning does not indicate that a guest has accepted the
-# request or that it has shut down. Many guests will respond to this
-# command by prompting the user in some way.
+# request or that it has shut down. Many guests will respond to
+# this command by prompting the user in some way.
#
# .. qmp-example::
#
@@ -435,7 +436,7 @@
# @inject-nmi:
#
# Injects a Non-Maskable Interrupt into the default CPU (x86/s390) or
-# all CPUs (ppc64). The command fails when the guest doesn't support
+# all CPUs (ppc64). The command fails when the guest doesn't support
# injecting.
#
# Since: 0.14
@@ -466,7 +467,7 @@
##
# @query-kvm:
#
-# Returns information about KVM acceleration
+# Return information about KVM acceleration
#
# Returns: @KvmInfo
#
@@ -693,7 +694,7 @@
# Structure of HMAT (Heterogeneous Memory Attribute Table)
#
# For more information about @HmatLBDataType, see chapter 5.2.27.4:
-# Table 5-146: Field "Data Type" of ACPI 6.3 spec.
+# Table 5-146: Field "Data Type" of ACPI 6.3 spec.
#
# @access-latency: access latency (nanoseconds)
#
@@ -810,7 +811,7 @@
#
# @policy: the write policy, none/write-back/write-through.
#
-# @line: the cache Line size in bytes.
+# @line: the cache line size in bytes.
#
# Since: 5.0
##
@@ -850,7 +851,11 @@
# <- { "return": {} }
##
{ 'command': 'memsave',
- 'data': {'val': 'int', 'size': 'int', 'filename': 'str', '*cpu-index': 'int'} }
+ 'data': {
+ 'val': 'uint64',
+ 'size': 'size',
+ 'filename': 'str',
+ '*cpu-index': 'int' } }
##
# @pmemsave:
@@ -876,7 +881,10 @@
# <- { "return": {} }
##
{ 'command': 'pmemsave',
- 'data': {'val': 'int', 'size': 'int', 'filename': 'str'} }
+ 'data': {
+ 'val': 'uint64',
+ 'size': 'size',
+ 'filename': 'str' } }
##
# @Memdev:
@@ -922,7 +930,7 @@
##
# @query-memdev:
#
-# Returns information for all memory backends.
+# Return information for all memory backends.
#
# Returns: a list of @Memdev.
#
@@ -986,8 +994,8 @@
# @cluster-id: cluster number within the parent container the CPU
# belongs to (since 7.1)
#
-# @module-id: module number within the parent container the CPU belongs
-# to (since 9.1)
+# @module-id: module number within the parent container the CPU
+# belongs to (since 9.1)
#
# @core-id: core number within the parent container the CPU belongs to
#
@@ -1081,7 +1089,7 @@
# :annotated:
#
# For s390x-virtio-ccw machine type started with
-# ``-smp 1,maxcpus=2 -cpu qemu`` (Since: 2.11)::
+# ``-smp 1,maxcpus=2 -cpu qemu``::
#
# -> { "execute": "query-hotpluggable-cpus" }
# <- {"return": [
@@ -1130,8 +1138,8 @@
# - If no balloon device is present, DeviceNotActive
#
# .. note:: This command just issues a request to the guest. When it
-# returns, the balloon size may not have changed. A guest can change
-# the balloon size independent of this command.
+# returns, the balloon size may not have changed. A guest can
+# change the balloon size independent of this command.
#
# Since: 0.14
#
@@ -1152,7 +1160,7 @@
#
# Information about the guest balloon device.
#
-# @actual: the logical size of the VM in bytes Formula used:
+# @actual: the logical size of the VM in bytes. Formula used:
# logical_vm_size = vm_ram_size - balloon_size
#
# Since: 0.14
@@ -1191,7 +1199,7 @@
# is equivalent to the @actual field return by the 'query-balloon'
# command
#
-# @actual: the logical size of the VM in bytes Formula used:
+# @actual: the logical size of the VM in bytes. Formula used:
# logical_vm_size = vm_ram_size - balloon_size
#
# .. note:: This event is rate-limited.
@@ -1227,7 +1235,7 @@
##
# @query-hv-balloon-status-report:
#
-# Returns the hv-balloon driver data contained in the last received
+# Return the hv-balloon driver data contained in the last received
# "STATUS" message from the guest.
#
# Returns:
@@ -1657,8 +1665,8 @@
# The members other than @cpus and @maxcpus define a topology of
# containers.
#
-# The ordering from highest/coarsest to lowest/finest is:
-# @drawers, @books, @sockets, @dies, @clusters, @cores, @threads.
+# The ordering from highest/coarsest to lowest/finest is: @drawers,
+# @books, @sockets, @dies, @clusters, @cores, @threads.
#
# Different architectures support different subsets of topology
# containers.
@@ -1890,3 +1898,384 @@
{ 'command': 'x-query-interrupt-controllers',
'returns': 'HumanReadableText',
'features': [ 'unstable' ]}
+
+##
+# @dump-skeys:
+#
+# Dump the storage keys for an s390x guest
+#
+# @filename: the path to the file to dump to
+#
+# Since: 2.5
+#
+# .. qmp-example::
+#
+# -> { "execute": "dump-skeys",
+# "arguments": { "filename": "/tmp/skeys" } }
+# <- { "return": {} }
+##
+{ 'command': 'dump-skeys',
+ 'data': { 'filename': 'str' } }
+
+##
+# @CpuModelInfo:
+#
+# Virtual CPU model.
+#
+# A CPU model consists of the name of a CPU definition, to which delta
+# changes are applied (e.g. features added/removed). Most magic
+# values that an architecture might require should be hidden behind
+# the name. However, if required, architectures can expose relevant
+# properties.
+#
+# @name: the name of the CPU definition the model is based on
+#
+# @props: a dictionary of QOM properties to be applied
+#
+# Since: 2.8
+##
+{ 'struct': 'CpuModelInfo',
+ 'data': { 'name': 'str',
+ '*props': 'any' } }
+
+##
+# @CpuModelExpansionType:
+#
+# An enumeration of CPU model expansion types.
+#
+# @static: Expand to a static CPU model, a combination of a static
+# base model name and property delta changes. As the static base
+# model will never change, the expanded CPU model will be the
+# same, independent of QEMU version, machine type, machine
+# options, and accelerator options. Therefore, the resulting
+# model can be used by tooling without having to specify a
+# compatibility machine - e.g. when displaying the "host" model.
+# The @static CPU models are migration-safe.
+#
+# @full: Expand all properties. The produced model is not guaranteed
+# to be migration-safe, but allows tooling to get an insight and
+# work with model details.
+#
+# .. note:: When a non-migration-safe CPU model is expanded in static
+# mode, some features enabled by the CPU model may be omitted,
+# because they can't be implemented by a static CPU model
+# definition (e.g. cache info passthrough and PMU passthrough in
+# x86). If you need an accurate representation of the features
+# enabled by a non-migration-safe CPU model, use @full. If you
+# need a static representation that will keep ABI compatibility
+# even when changing QEMU version or machine-type, use @static (but
+# keep in mind that some features may be omitted).
+#
+# Since: 2.8
+##
+{ 'enum': 'CpuModelExpansionType',
+ 'data': [ 'static', 'full' ] }
+
+##
+# @CpuModelCompareResult:
+#
+# An enumeration of CPU model comparison results. The result is
+# usually calculated using e.g. CPU features or CPU generations.
+#
+# @incompatible: If model A is incompatible to model B, model A is not
+# guaranteed to run where model B runs and the other way around.
+#
+# @identical: If model A is identical to model B, model A is
+# guaranteed to run where model B runs and the other way around.
+#
+# @superset: If model A is a superset of model B, model B is
+# guaranteed to run where model A runs. There are no guarantees
+# about the other way.
+#
+# @subset: If model A is a subset of model B, model A is guaranteed to
+# run where model B runs. There are no guarantees about the other
+# way.
+#
+# Since: 2.8
+##
+{ 'enum': 'CpuModelCompareResult',
+ 'data': [ 'incompatible', 'identical', 'superset', 'subset' ] }
+
+##
+# @CpuModelBaselineInfo:
+#
+# The result of a CPU model baseline.
+#
+# @model: the baselined CpuModelInfo.
+#
+# Since: 2.8
+##
+{ 'struct': 'CpuModelBaselineInfo',
+ 'data': { 'model': 'CpuModelInfo' } }
+
+##
+# @CpuModelCompareInfo:
+#
+# The result of a CPU model comparison.
+#
+# @result: The result of the compare operation.
+#
+# @responsible-properties: List of properties that led to the
+# comparison result not being identical.
+#
+# @responsible-properties is a list of QOM property names that led to
+# both CPUs not being detected as identical. For identical models,
+# this list is empty. If a QOM property is read-only, that means
+# there's no known way to make the CPU models identical. If the
+# special property name "type" is included, the models are by
+# definition not identical and cannot be made identical.
+#
+# Since: 2.8
+##
+{ 'struct': 'CpuModelCompareInfo',
+ 'data': { 'result': 'CpuModelCompareResult',
+ 'responsible-properties': ['str'] } }
+
+##
+# @query-cpu-model-comparison:
+#
+# Compares two CPU models, @modela and @modelb, returning how they
+# compare in a specific configuration. The results indicates how
+# both models compare regarding runnability. This result can be
+# used by tooling to make decisions if a certain CPU model will
+# run in a certain configuration or if a compatible CPU model has
+# to be created by baselining.
+#
+# Usually, a CPU model is compared against the maximum possible CPU
+# model of a certain configuration (e.g. the "host" model for KVM).
+# If that CPU model is identical or a subset, it will run in that
+# configuration.
+#
+# The result returned by this command may be affected by:
+#
+# * QEMU version: CPU models may look different depending on the QEMU
+# version. (Except for CPU models reported as "static" in
+# query-cpu-definitions.)
+# * machine-type: CPU model may look different depending on the
+# machine-type. (Except for CPU models reported as "static" in
+# query-cpu-definitions.)
+# * machine options (including accelerator): in some architectures,
+# CPU models may look different depending on machine and accelerator
+# options. (Except for CPU models reported as "static" in
+# query-cpu-definitions.)
+# * "-cpu" arguments and global properties: arguments to the -cpu
+# option and global properties may affect expansion of CPU models.
+# Using query-cpu-model-expansion while using these is not advised.
+#
+# Some architectures may not support comparing CPU models. s390x
+# supports comparing CPU models.
+#
+# @modela: description of the first CPU model to compare, referred to
+# as "model A" in CpuModelCompareResult
+#
+# @modelb: description of the second CPU model to compare, referred to
+# as "model B" in CpuModelCompareResult
+#
+# Returns: a CpuModelCompareInfo describing how both CPU models
+# compare
+#
+# Errors:
+# - if comparing CPU models is not supported by the target
+# - if a model cannot be used
+# - if a model contains an unknown cpu definition name, unknown
+# properties or properties with wrong types.
+#
+# Since: 2.8
+##
+{ 'command': 'query-cpu-model-comparison',
+ 'data': { 'modela': 'CpuModelInfo', 'modelb': 'CpuModelInfo' },
+ 'returns': 'CpuModelCompareInfo' }
+
+##
+# @query-cpu-model-baseline:
+#
+# Baseline two CPU models, @modela and @modelb, creating a compatible
+# third model. The created model will always be a static,
+# migration-safe CPU model (see "static" CPU model expansion for
+# details).
+#
+# This interface can be used by tooling to create a compatible CPU
+# model out two CPU models. The created CPU model will be identical
+# to or a subset of both CPU models when comparing them. Therefore,
+# the created CPU model is guaranteed to run where the given CPU
+# models run.
+#
+# The result returned by this command may be affected by:
+#
+# * QEMU version: CPU models may look different depending on the QEMU
+# version. (Except for CPU models reported as "static" in
+# query-cpu-definitions.)
+# * machine-type: CPU model may look different depending on the
+# machine-type. (Except for CPU models reported as "static" in
+# query-cpu-definitions.)
+# * machine options (including accelerator): in some architectures,
+# CPU models may look different depending on machine and accelerator
+# options. (Except for CPU models reported as "static" in
+# query-cpu-definitions.)
+# * "-cpu" arguments and global properties: arguments to the -cpu
+# option and global properties may affect expansion of CPU models.
+# Using query-cpu-model-expansion while using these is not advised.
+#
+# Some architectures may not support baselining CPU models. s390x
+# supports baselining CPU models.
+#
+# @modela: description of the first CPU model to baseline
+#
+# @modelb: description of the second CPU model to baseline
+#
+# Returns: a CpuModelBaselineInfo describing the baselined CPU model
+#
+# Errors:
+# - if baselining CPU models is not supported by the target
+# - if a model cannot be used
+# - if a model contains an unknown cpu definition name, unknown
+# properties or properties with wrong types.
+#
+# Since: 2.8
+##
+{ 'command': 'query-cpu-model-baseline',
+ 'data': { 'modela': 'CpuModelInfo',
+ 'modelb': 'CpuModelInfo' },
+ 'returns': 'CpuModelBaselineInfo' }
+
+##
+# @CpuModelExpansionInfo:
+#
+# The result of a cpu model expansion.
+#
+# @model: the expanded CpuModelInfo.
+#
+# @deprecated-props: an optional list of properties that are flagged as
+# deprecated by the CPU vendor. The list depends on the
+# CpuModelExpansionType: "static" properties are a subset of the
+# enabled-properties for the expanded model; "full" properties are
+# a set of properties that are deprecated across all models for
+# the architecture. (since: 10.1 -- since 9.1 on s390x --).
+#
+# Since: 2.8
+##
+{ 'struct': 'CpuModelExpansionInfo',
+ 'data': { 'model': 'CpuModelInfo',
+ '*deprecated-props' : ['str'] } }
+
+##
+# @query-cpu-model-expansion:
+#
+# Expands a given CPU model, @model, (or a combination of CPU model +
+# additional options) to different granularities, specified by @type,
+# allowing tooling to get an understanding what a specific CPU model
+# looks like in QEMU under a certain configuration.
+#
+# This interface can be used to query the "host" CPU model.
+#
+# The data returned by this command may be affected by:
+#
+# * QEMU version: CPU models may look different depending on the QEMU
+# version. (Except for CPU models reported as "static" in
+# query-cpu-definitions.)
+# * machine-type: CPU model may look different depending on the
+# machine-type. (Except for CPU models reported as "static" in
+# query-cpu-definitions.)
+# * machine options (including accelerator): in some architectures,
+# CPU models may look different depending on machine and accelerator
+# options. (Except for CPU models reported as "static" in
+# query-cpu-definitions.)
+# * "-cpu" arguments and global properties: arguments to the -cpu
+# option and global properties may affect expansion of CPU models.
+# Using query-cpu-model-expansion while using these is not advised.
+#
+# Some architectures may not support all expansion types. s390x
+# supports "full" and "static". Arm only supports "full".
+#
+# @model: description of the CPU model to expand
+#
+# @type: expansion type, specifying how to expand the CPU model
+#
+# Returns: a CpuModelExpansionInfo describing the expanded CPU model
+#
+# Errors:
+# - if expanding CPU models is not supported
+# - if the model cannot be expanded
+# - if the model contains an unknown CPU definition name, unknown
+# properties or properties with a wrong type
+# - if an expansion type is not supported
+#
+# Since: 2.8
+##
+{ 'command': 'query-cpu-model-expansion',
+ 'data': { 'type': 'CpuModelExpansionType',
+ 'model': 'CpuModelInfo' },
+ 'returns': 'CpuModelExpansionInfo' }
+
+##
+# @CpuDefinitionInfo:
+#
+# Virtual CPU definition.
+#
+# @name: the name of the CPU definition
+#
+# @migration-safe: whether a CPU definition can be safely used for
+# migration in combination with a QEMU compatibility machine when
+# migrating between different QEMU versions and between hosts with
+# different sets of (hardware or software) capabilities. If not
+# provided, information is not available and callers should not
+# assume the CPU definition to be migration-safe. (since 2.8)
+#
+# @static: whether a CPU definition is static and will not change
+# depending on QEMU version, machine type, machine options and
+# accelerator options. A static model is always migration-safe.
+# (since 2.8)
+#
+# @unavailable-features: List of properties that prevent the CPU model
+# from running in the current host. (since 2.8)
+#
+# @typename: Type name that can be used as argument to
+# @device-list-properties, to introspect properties configurable
+# using -cpu or -global. (since 2.9)
+#
+# @alias-of: Name of CPU model this model is an alias for. The target
+# of the CPU model alias may change depending on the machine type.
+# Management software is supposed to translate CPU model aliases
+# in the VM configuration, because aliases may stop being
+# migration-safe in the future (since 4.1)
+#
+# @deprecated: If true, this CPU model is deprecated and may be
+# removed in some future version of QEMU according to the QEMU
+# deprecation policy. (since 5.2)
+#
+# @unavailable-features is a list of QOM property names that represent
+# CPU model attributes that prevent the CPU from running. If the QOM
+# property is read-only, that means there's no known way to make the
+# CPU model run in the current host. Implementations that choose not
+# to provide specific information return the property name "type". If
+# the property is read-write, it means that it MAY be possible to run
+# the CPU model in the current host if that property is changed.
+# Management software can use it as hints to suggest or choose an
+# alternative for the user, or just to generate meaningful error
+# messages explaining why the CPU model can't be used. If
+# @unavailable-features is an empty list, the CPU model is runnable
+# using the current host and machine-type. If @unavailable-features
+# is not present, runnability information for the CPU is not
+# available.
+#
+# Since: 1.2
+##
+{ 'struct': 'CpuDefinitionInfo',
+ 'data': { 'name': 'str',
+ '*migration-safe': 'bool',
+ 'static': 'bool',
+ '*unavailable-features': [ 'str' ],
+ 'typename': 'str',
+ '*alias-of' : 'str',
+ 'deprecated' : 'bool' } }
+
+##
+# @query-cpu-definitions:
+#
+# Return a list of supported virtual CPU definitions
+#
+# Returns: a list of CpuDefinitionInfo
+#
+# Since: 1.2
+##
+{ 'command': 'query-cpu-definitions', 'returns': ['CpuDefinitionInfo'] }
diff --git a/qapi/meson.build b/qapi/meson.build
index e7bc54e..3b035ae 100644
--- a/qapi/meson.build
+++ b/qapi/meson.build
@@ -39,10 +39,9 @@ qapi_all_modules = [
'job',
'machine-common',
'machine',
- 'machine-target',
+ 'machine-s390x',
'migration',
'misc',
- 'misc-target',
'net',
'pragma',
'qom',
@@ -64,7 +63,10 @@ if have_system
'qdev',
'pci',
'rocker',
+ 'misc-arm',
+ 'misc-i386',
'tpm',
+ 'uefi',
]
endif
if have_system or have_tools
@@ -83,14 +85,12 @@ qapi_nonmodule_outputs = [
'qapi-emit-events.c', 'qapi-emit-events.h',
]
-# First build all sources
-qapi_util_outputs = [
+qapi_outputs = qapi_nonmodule_outputs + [
'qapi-builtin-types.c', 'qapi-builtin-visit.c',
'qapi-builtin-types.h', 'qapi-builtin-visit.h',
]
qapi_inputs = []
-qapi_specific_outputs = []
foreach module : qapi_all_modules
qapi_inputs += [ files(module + '.json') ]
qapi_module_outputs = [
@@ -108,24 +108,17 @@ foreach module : qapi_all_modules
'qapi-commands-@0@.trace-events'.format(module),
]
endif
- if module.endswith('-target')
- qapi_specific_outputs += qapi_module_outputs
- else
- qapi_util_outputs += qapi_module_outputs
- endif
+ qapi_outputs += qapi_module_outputs
endforeach
qapi_files = custom_target('shared QAPI source files',
- output: qapi_util_outputs + qapi_specific_outputs + qapi_nonmodule_outputs,
+ output: qapi_outputs,
input: [ files('qapi-schema.json') ],
command: [ qapi_gen, '-o', 'qapi', '-b', '@INPUT0@' ],
depend_files: [ qapi_inputs, qapi_gen_depends ])
-# Now go through all the outputs and add them to the right sourceset.
-# These loops must be synchronized with the output of the above custom target.
-
i = 0
-foreach output : qapi_util_outputs
+foreach output : qapi_outputs
if output.endswith('.h')
genh += qapi_files[i]
endif
@@ -135,14 +128,3 @@ foreach output : qapi_util_outputs
util_ss.add(qapi_files[i])
i = i + 1
endforeach
-
-foreach output : qapi_specific_outputs + qapi_nonmodule_outputs
- if output.endswith('.h')
- genh += qapi_files[i]
- endif
- if output.endswith('.trace-events')
- qapi_trace_events += qapi_files[i]
- endif
- specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_true: qapi_files[i])
- i = i + 1
-endforeach
diff --git a/qapi/migration.json b/qapi/migration.json
index 073b67c..4963f6c 100644
--- a/qapi/migration.json
+++ b/qapi/migration.json
@@ -57,8 +57,8 @@
#
# @dirty-sync-missed-zero-copy: Number of times dirty RAM
# synchronization could not avoid copying dirty pages. This is
-# between 0 and @dirty-sync-count * @multifd-channels. (since
-# 7.1)
+# between 0 and @dirty-sync-count * @multifd-channels.
+# (since 7.1)
#
# Since: 0.14
##
@@ -137,16 +137,16 @@
#
# @active: in the process of doing migration.
#
-# @postcopy-active: like active, but now in postcopy mode. (since
-# 2.5)
+# @postcopy-active: like active, but now in postcopy mode.
+# (since 2.5)
#
# @postcopy-paused: during postcopy but paused. (since 3.0)
#
-# @postcopy-recover-setup: setup phase for a postcopy recovery process,
-# preparing for a recovery phase to start. (since 9.1)
+# @postcopy-recover-setup: setup phase for a postcopy recovery
+# process, preparing for a recovery phase to start. (since 9.1)
#
-# @postcopy-recover: trying to recover from a paused postcopy. (since
-# 3.0)
+# @postcopy-recover: trying to recover from a paused postcopy.
+# (since 3.0)
#
# @completed: migration is finished.
#
@@ -158,8 +158,11 @@
#
# @pre-switchover: Paused before device serialisation. (since 2.11)
#
-# @device: During device serialisation when pause-before-switchover is
-# enabled (since 2.11)
+# @device: During device serialisation (also known as switchover phase).
+# Before 9.2, this is only used when (1) in precopy, and (2) when
+# pre-switchover capability is enabled. After 10.0, this state will
+# always be present for every migration procedure as the switchover
+# phase. (since 2.11)
#
# @wait-unplug: wait for device unplug request by guest OS to be
# completed. (since 4.2)
@@ -245,10 +248,10 @@
# blocked. Present and non-empty when migration is blocked.
# (since 6.0)
#
-# @dirty-limit-throttle-time-per-round: Maximum throttle time
-# (in microseconds) of virtual CPUs each dirty ring full round,
-# which shows how MigrationCapability dirty-limit affects the
-# guest during live migration. (Since 8.1)
+# @dirty-limit-throttle-time-per-round: Maximum throttle time (in
+# microseconds) of virtual CPUs each dirty ring full round, which
+# shows how MigrationCapability dirty-limit affects the guest
+# during live migration. (Since 8.1)
#
# @dirty-limit-ring-full-time: Estimated average dirty ring full time
# (in microseconds) for each dirty ring full round. The value
@@ -279,7 +282,7 @@
##
# @query-migrate:
#
-# Returns information about current migration process. If migration
+# Return information about current migration process. If migration
# is active there will be another json-object with RAM migration
# status.
#
@@ -381,7 +384,7 @@
# Migration capabilities enumeration
#
# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length
-# Encoding). This feature allows us to minimize migration traffic
+# Encoding). This feature allows us to minimize migration traffic
# for certain work loads, by sending compressed difference of the
# pages
#
@@ -393,8 +396,8 @@
# efficiently. This essentially saves 1MB of zeroes per block on
# the wire. Enabling requires source and target VM to support
# this feature. To enable it is sufficient to enable the
-# capability on the source VM. The feature is disabled by default.
-# (since 1.6)
+# capability on the source VM. The feature is disabled by
+# default. (since 1.6)
#
# @events: generate events for each migration state change (since 2.4)
#
@@ -404,7 +407,7 @@
# @postcopy-ram: Start executing on the migration target before all of
# RAM has been migrated, pulling the remaining pages along as
# needed. The capacity must have the same setting on both source
-# and target or migration will not even start. NOTE: If the
+# and target or migration will not even start. **Note:** if the
# migration fails during postcopy the VM will fail. (since 2.6)
#
# @x-colo: If enabled, migration will never end, and the state of the
@@ -412,15 +415,15 @@
# on secondary side, this process is called COarse-Grain LOck
# Stepping (COLO) for Non-stop Service. (since 2.8)
#
-# @release-ram: if enabled, qemu will free the migrated ram pages on
+# @release-ram: if enabled, QEMU will free the migrated ram pages on
# the source during postcopy-ram migration. (since 2.9)
#
# @return-path: If enabled, migration will use the return path even
# for precopy. (since 2.10)
#
# @pause-before-switchover: Pause outgoing migration before
-# serialising device state and before disabling block IO (since
-# 2.11)
+# serialising device state and before disabling block IO
+# (since 2.11)
#
# @multifd: Use more than one fd for migration (since 4.0)
#
@@ -479,11 +482,14 @@
# Features:
#
# @unstable: Members @x-colo and @x-ignore-shared are experimental.
+# @deprecated: Member @zero-blocks is deprecated as being part of
+# block migration which was already removed.
#
# Since: 1.2
##
{ 'enum': 'MigrationCapability',
- 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
+ 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge',
+ { 'name': 'zero-blocks', 'features': [ 'deprecated' ] },
'events', 'postcopy-ram',
{ 'name': 'x-colo', 'features': [ 'unstable' ] },
'release-ram',
@@ -529,7 +535,7 @@
##
# @query-migrate-capabilities:
#
-# Returns information about the current migration capabilities status
+# Return information about the current migration capabilities status
#
# Returns: @MigrationCapabilityStatus
#
@@ -561,18 +567,22 @@
#
# @zstd: use zstd compression method.
#
+# @qatzip: use qatzip compression method. (Since 9.2)
+#
# @qpl: use qpl compression method. Query Processing Library(qpl) is
-# based on the deflate compression algorithm and use the Intel
-# In-Memory Analytics Accelerator(IAA) accelerated compression
-# and decompression. (Since 9.1)
+# based on the deflate compression algorithm and use the Intel
+# In-Memory Analytics Accelerator(IAA) accelerated compression and
+# decompression. (Since 9.1)
#
# @uadk: use UADK library compression method. (Since 9.1)
#
# Since: 5.0
##
{ 'enum': 'MultiFDCompression',
+ 'prefix': 'MULTIFD_COMPRESSION',
'data': [ 'none', 'zlib',
{ 'name': 'zstd', 'if': 'CONFIG_ZSTD' },
+ { 'name': 'qatzip', 'if': 'CONFIG_QATZIP'},
{ 'name': 'qpl', 'if': 'CONFIG_QPL' },
{ 'name': 'uadk', 'if': 'CONFIG_UADK' } ] }
@@ -607,9 +617,48 @@
# or COLO.
#
# (since 8.2)
+#
+# @cpr-transfer: This mode allows the user to transfer a guest to a
+# new QEMU instance on the same host with minimal guest pause
+# time by preserving guest RAM in place. Devices and their pinned
+# pages will also be preserved in a future QEMU release.
+#
+# The user starts new QEMU on the same host as old QEMU, with
+# command-line arguments to create the same machine, plus the
+# -incoming option for the main migration channel, like normal
+# live migration. In addition, the user adds a second -incoming
+# option with channel type "cpr". This CPR channel must support
+# file descriptor transfer with SCM_RIGHTS, i.e. it must be a
+# UNIX domain socket.
+#
+# To initiate CPR, the user issues a migrate command to old QEMU,
+# adding a second migration channel of type "cpr" in the channels
+# argument. Old QEMU stops the VM, saves state to the migration
+# channels, and enters the postmigrate state. Execution resumes
+# in new QEMU.
+#
+# New QEMU reads the CPR channel before opening a monitor, hence
+# the CPR channel cannot be specified in the list of channels for
+# a migrate-incoming command. It may only be specified on the
+# command line.
+#
+# The main channel address cannot be a file type, and for an
+# inet socket, the port cannot be 0 (meaning dynamically choose
+# a port).
+#
+# Memory-backend objects must have the share=on attribute, but
+# memory-backend-epc is not supported. The VM must be started
+# with the '-machine aux-ram-share=on' option.
+#
+# When using -incoming defer, you must issue the migrate command
+# to old QEMU before issuing any monitor commands to new QEMU.
+# However, new QEMU does not open and read the migration stream
+# until you issue the migrate incoming command.
+#
+# (since 10.0)
##
{ 'enum': 'MigMode',
- 'data': [ 'normal', 'cpr-reboot' ] }
+ 'data': [ 'normal', 'cpr-reboot', 'cpr-transfer' ] }
##
# @ZeroPageDetection:
@@ -648,8 +697,8 @@
# @alias: An alias name for migration (for example the bitmap name on
# the opposite site).
#
-# @transform: Allows the modification of the migrated bitmap. (since
-# 6.0)
+# @transform: Allows the modification of the migrated bitmap.
+# (since 6.0)
#
# Since: 5.2
##
@@ -711,9 +760,9 @@
# auto-converge detects that migration is not making progress.
# The default value is 10. (Since 2.7)
#
-# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
-# the tail stage of throttling, the Guest is very sensitive to CPU
-# percentage while the @cpu-throttle -increment is excessive
+# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage.
+# At the tail stage of throttling, the Guest is very sensitive to
+# CPU percentage while the @cpu-throttle -increment is excessive
# usually at tail stage. If this parameter is true, we will
# compute the ideal CPU percentage used by the Guest, which may
# exactly make the dirty rate match the dirty rate threshold.
@@ -721,8 +770,8 @@
# specified by @cpu-throttle-increment and the one generated by
# ideal CPU percentage. Therefore, it is compatible to
# traditional throttling, meanwhile the throttle increment won't
-# be excessive at tail stage. The default value is false. (Since
-# 5.1)
+# be excessive at tail stage. The default value is false.
+# (Since 5.1)
#
# @tls-creds: ID of the 'tls-creds' object that provides credentials
# for establishing a TLS connection over the migration data
@@ -752,10 +801,10 @@
# (Since 2.8)
#
# @avail-switchover-bandwidth: to set the available bandwidth that
-# migration can use during switchover phase. NOTE! This does not
-# limit the bandwidth during switchover, but only for calculations
-# when making decisions to switchover. By default, this value is
-# zero, which means QEMU will estimate the bandwidth
+# migration can use during switchover phase. **Note:** this does
+# not limit the bandwidth during switchover, but only for
+# calculations when making decisions to switchover. By default,
+# this value is zero, which means QEMU will estimate the bandwidth
# automatically. This can be set when the estimated value is not
# accurate, while the user is able to guarantee such bandwidth is
# available when switching over. When specified correctly, this
@@ -790,13 +839,18 @@
# migration, the compression level is an integer between 0 and 9,
# where 0 means no compression, 1 means the best compression
# speed, and 9 means best compression ratio which will consume
-# more CPU. Defaults to 1. (Since 5.0)
+# more CPU. Defaults to 1. (Since 5.0)
+#
+# @multifd-qatzip-level: Set the compression level to be used in live
+# migration. The level is an integer between 1 and 9, where 1 means
+# the best compression speed, and 9 means the best compression
+# ratio which will consume more CPU. Defaults to 1. (Since 9.2)
#
# @multifd-zstd-level: Set the compression level to be used in live
# migration, the compression level is an integer between 0 and 20,
# where 0 means no compression, 1 means the best compression
# speed, and 20 means best compression ratio which will consume
-# more CPU. Defaults to 1. (Since 5.0)
+# more CPU. Defaults to 1. (Since 5.0)
#
# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
# aliases for the purpose of dirty bitmap migration. Such aliases
@@ -852,6 +906,7 @@
'xbzrle-cache-size', 'max-postcopy-bandwidth',
'max-cpu-throttle', 'multifd-compression',
'multifd-zlib-level', 'multifd-zstd-level',
+ 'multifd-qatzip-level',
'block-bitmap-mapping',
{ 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] },
'vcpu-dirty-limit',
@@ -886,9 +941,9 @@
# auto-converge detects that migration is not making progress.
# The default value is 10. (Since 2.7)
#
-# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
-# the tail stage of throttling, the Guest is very sensitive to CPU
-# percentage while the @cpu-throttle -increment is excessive
+# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage.
+# At the tail stage of throttling, the Guest is very sensitive to
+# CPU percentage while the @cpu-throttle -increment is excessive
# usually at tail stage. If this parameter is true, we will
# compute the ideal CPU percentage used by the Guest, which may
# exactly make the dirty rate match the dirty rate threshold.
@@ -896,8 +951,8 @@
# specified by @cpu-throttle-increment and the one generated by
# ideal CPU percentage. Therefore, it is compatible to
# traditional throttling, meanwhile the throttle increment won't
-# be excessive at tail stage. The default value is false. (Since
-# 5.1)
+# be excessive at tail stage. The default value is false.
+# (Since 5.1)
#
# @tls-creds: ID of the 'tls-creds' object that provides credentials
# for establishing a TLS connection over the migration data
@@ -927,10 +982,10 @@
# (Since 2.8)
#
# @avail-switchover-bandwidth: to set the available bandwidth that
-# migration can use during switchover phase. NOTE! This does not
-# limit the bandwidth during switchover, but only for calculations
-# when making decisions to switchover. By default, this value is
-# zero, which means QEMU will estimate the bandwidth
+# migration can use during switchover phase. **Note:** this does
+# not limit the bandwidth during switchover, but only for
+# calculations when making decisions to switchover. By default,
+# this value is zero, which means QEMU will estimate the bandwidth
# automatically. This can be set when the estimated value is not
# accurate, while the user is able to guarantee such bandwidth is
# available when switching over. When specified correctly, this
@@ -965,13 +1020,18 @@
# migration, the compression level is an integer between 0 and 9,
# where 0 means no compression, 1 means the best compression
# speed, and 9 means best compression ratio which will consume
-# more CPU. Defaults to 1. (Since 5.0)
+# more CPU. Defaults to 1. (Since 5.0)
+#
+# @multifd-qatzip-level: Set the compression level to be used in live
+# migration. The level is an integer between 1 and 9, where 1 means
+# the best compression speed, and 9 means the best compression
+# ratio which will consume more CPU. Defaults to 1. (Since 9.2)
#
# @multifd-zstd-level: Set the compression level to be used in live
# migration, the compression level is an integer between 0 and 20,
# where 0 means no compression, 1 means the best compression
# speed, and 20 means best compression ratio which will consume
-# more CPU. Defaults to 1. (Since 5.0)
+# more CPU. Defaults to 1. (Since 5.0)
#
# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
# aliases for the purpose of dirty bitmap migration. Such aliases
@@ -1040,6 +1100,7 @@
'*max-cpu-throttle': 'uint8',
'*multifd-compression': 'MultiFDCompression',
'*multifd-zlib-level': 'uint8',
+ '*multifd-qatzip-level': 'uint8',
'*multifd-zstd-level': 'uint8',
'*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
'*x-vcpu-dirty-limit-period': { 'type': 'uint64',
@@ -1087,16 +1148,16 @@
# percentage. The default value is 50. (Since 5.0)
#
# @cpu-throttle-initial: Initial percentage of time guest cpus are
-# throttled when migration auto-converge is activated. (Since
-# 2.7)
+# throttled when migration auto-converge is activated.
+# (Since 2.7)
#
# @cpu-throttle-increment: throttle percentage increase each time
# auto-converge detects that migration is not making progress.
# (Since 2.7)
#
-# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
-# the tail stage of throttling, the Guest is very sensitive to CPU
-# percentage while the @cpu-throttle -increment is excessive
+# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage.
+# At the tail stage of throttling, the Guest is very sensitive to
+# CPU percentage while the @cpu-throttle -increment is excessive
# usually at tail stage. If this parameter is true, we will
# compute the ideal CPU percentage used by the Guest, which may
# exactly make the dirty rate match the dirty rate threshold.
@@ -1104,8 +1165,8 @@
# specified by @cpu-throttle-increment and the one generated by
# ideal CPU percentage. Therefore, it is compatible to
# traditional throttling, meanwhile the throttle increment won't
-# be excessive at tail stage. The default value is false. (Since
-# 5.1)
+# be excessive at tail stage. The default value is false.
+# (Since 5.1)
#
# @tls-creds: ID of the 'tls-creds' object that provides credentials
# for establishing a TLS connection over the migration data
@@ -1131,10 +1192,10 @@
# (Since 2.8)
#
# @avail-switchover-bandwidth: to set the available bandwidth that
-# migration can use during switchover phase. NOTE! This does not
-# limit the bandwidth during switchover, but only for calculations
-# when making decisions to switchover. By default, this value is
-# zero, which means QEMU will estimate the bandwidth
+# migration can use during switchover phase. **Note:** this does
+# not limit the bandwidth during switchover, but only for
+# calculations when making decisions to switchover. By default,
+# this value is zero, which means QEMU will estimate the bandwidth
# automatically. This can be set when the estimated value is not
# accurate, while the user is able to guarantee such bandwidth is
# available when switching over. When specified correctly, this
@@ -1169,13 +1230,18 @@
# migration, the compression level is an integer between 0 and 9,
# where 0 means no compression, 1 means the best compression
# speed, and 9 means best compression ratio which will consume
-# more CPU. Defaults to 1. (Since 5.0)
+# more CPU. Defaults to 1. (Since 5.0)
+#
+# @multifd-qatzip-level: Set the compression level to be used in live
+# migration. The level is an integer between 1 and 9, where 1 means
+# the best compression speed, and 9 means the best compression
+# ratio which will consume more CPU. Defaults to 1. (Since 9.2)
#
# @multifd-zstd-level: Set the compression level to be used in live
# migration, the compression level is an integer between 0 and 20,
# where 0 means no compression, 1 means the best compression
# speed, and 20 means best compression ratio which will consume
-# more CPU. Defaults to 1. (Since 5.0)
+# more CPU. Defaults to 1. (Since 5.0)
#
# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
# aliases for the purpose of dirty bitmap migration. Such aliases
@@ -1201,7 +1267,7 @@
# Defaults to 1. (Since 8.1)
#
# @mode: Migration mode. See description in @MigMode. Default is
-# 'normal'. (Since 8.2)
+# 'normal'. (Since 8.2)
#
# @zero-page-detection: Whether and how to detect zero pages.
# See description in @ZeroPageDetection. Default is 'multifd'.
@@ -1241,6 +1307,7 @@
'*max-cpu-throttle': 'uint8',
'*multifd-compression': 'MultiFDCompression',
'*multifd-zlib-level': 'uint8',
+ '*multifd-qatzip-level': 'uint8',
'*multifd-zstd-level': 'uint8',
'*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
'*x-vcpu-dirty-limit-period': { 'type': 'uint64',
@@ -1253,7 +1320,7 @@
##
# @query-migrate-parameters:
#
-# Returns information about the current migration parameters
+# Return information about the current migration parameters
#
# Returns: @MigrationParameters
#
@@ -1433,7 +1500,7 @@
##
# @x-colo-lost-heartbeat:
#
-# Tell qemu that heartbeat is lost, request it to do takeover
+# Tell QEMU that heartbeat is lost, request it to do takeover
# procedures. If this command is sent to the PVM, the Primary side
# will exit COLO mode. If sent to the Secondary, the Secondary side
# will run failover work, then takes over server operation to become
@@ -1457,10 +1524,12 @@
##
# @migrate_cancel:
#
-# Cancel the current executing migration process.
+# Cancel the currently executing migration process. Allows a new
+# migration to be started right after. When postcopy-ram is in use,
+# cancelling is not allowed after the postcopy phase has started.
#
-# .. note:: This command succeeds even if there is no migration process
-# running.
+# .. note:: This command succeeds even if there is no migration
+# process running.
#
# Since: 0.14
#
@@ -1553,11 +1622,12 @@
# The migration channel-type request options.
#
# @main: Main outbound migration channel.
+# @cpr: Checkpoint and restart state channel.
#
# Since: 8.1
##
{ 'enum': 'MigrationChannelType',
- 'data': [ 'main' ] }
+ 'data': [ 'main', 'cpr' ] }
##
# @MigrationChannel:
@@ -1590,6 +1660,10 @@
#
# @resume: resume one paused migration, default "off". (since 3.0)
#
+# Features:
+#
+# @deprecated: Argument @detach is deprecated.
+#
# Since: 0.14
#
# .. admonition:: Notes
@@ -1598,19 +1672,14 @@
# migration's progress and final result (this information is
# provided by the 'status' member).
#
-# 2. All boolean arguments default to false.
-#
-# 3. The user Monitor's "detach" argument is invalid in QMP and
-# should not be used.
-#
-# 4. The uri argument should have the Uniform Resource Identifier
-# of default destination VM. This connection will be bound to
+# 2. The uri argument should have the Uniform Resource Identifier
+# of default destination VM. This connection will be bound to
# default network.
#
-# 5. For now, number of migration streams is restricted to one,
+# 3. For now, number of migration streams is restricted to one,
# i.e. number of items in 'channels' list is just 1.
#
-# 6. The 'uri' and 'channels' arguments are mutually exclusive;
+# 4. The 'uri' and 'channels' arguments are mutually exclusive;
# exactly one of the two should be present.
#
# .. qmp-example::
@@ -1650,18 +1719,18 @@
# "filename": "/tmp/migfile",
# "offset": "0x1000" } } ] } }
# <- { "return": {} }
-#
##
{ 'command': 'migrate',
'data': {'*uri': 'str',
'*channels': [ 'MigrationChannel' ],
- '*detach': 'bool', '*resume': 'bool' } }
+ '*detach': { 'type': 'bool', 'features': [ 'deprecated' ] },
+ '*resume': 'bool' } }
##
# @migrate-incoming:
#
-# Start an incoming migration, the qemu must have been started with
-# -incoming defer
+# Start an incoming migration. QEMU must have been started with
+# -incoming defer.
#
# @uri: The Uniform Resource Identifier identifying the source or
# address to listen on
@@ -1671,7 +1740,8 @@
#
# @exit-on-error: Exit on incoming migration failure. Default true.
# When set to false, the failure triggers a MIGRATION event, and
-# error details could be retrieved with query-migrate. (since 9.1)
+# error details could be retrieved with query-migrate.
+# (since 9.1)
#
# Since: 2.3
#
@@ -1938,9 +2008,9 @@
# @UNPLUG_PRIMARY:
#
# Emitted from source side of a migration when migration state is
-# WAIT_UNPLUG. Device was unplugged by guest operating system. Device
-# resources in QEMU are kept on standby to be able to re-plug it in
-# case of migration failure.
+# WAIT_UNPLUG. Device was unplugged by guest operating system.
+# Device resources in QEMU are kept on standby to be able to re-plug
+# it in case of migration failure.
#
# @device-id: QEMU device id of the unplugged device
#
@@ -2084,16 +2154,16 @@
# This mode tracks page modification per each vCPU separately. It
# requires that KVM accelerator property "dirty-ring-size" is set.
#
-# @calc-time: time period for which dirty page rate is calculated.
-# By default it is specified in seconds, but the unit can be set
+# @calc-time: time period for which dirty page rate is calculated. By
+# default it is specified in seconds, but the unit can be set
# explicitly with @calc-time-unit. Note that larger @calc-time
# values will typically result in smaller dirty page rates because
-# page dirtying is a one-time event. Once some page is counted
-# as dirty during @calc-time period, further writes to this page
-# will not increase dirty page rate anymore.
+# page dirtying is a one-time event. Once some page is counted as
+# dirty during @calc-time period, further writes to this page will
+# not increase dirty page rate anymore.
#
-# @calc-time-unit: time unit in which @calc-time is specified.
-# By default it is seconds. (Since 8.2)
+# @calc-time-unit: time unit in which @calc-time is specified. By
+# default it is seconds. (Since 8.2)
#
# @sample-pages: number of sampled pages per each GiB of guest memory.
# Default value is 512. For 4KiB guest pages this corresponds to
@@ -2224,7 +2294,7 @@
##
# @query-vcpu-dirty-limit:
#
-# Returns information about virtual CPU dirty page rate limits, if
+# Return information about virtual CPU dirty page rate limits, if
# any.
#
# Since: 7.1
@@ -2257,14 +2327,19 @@
##
# @query-migrationthreads:
#
-# Returns information of migration threads
+# Return information of migration threads
+#
+# Features:
+#
+# @deprecated: This command is deprecated with no replacement yet.
#
# Returns: @MigrationThreadInfo
#
# Since: 7.2
##
{ 'command': 'query-migrationthreads',
- 'returns': ['MigrationThreadInfo'] }
+ 'returns': ['MigrationThreadInfo'],
+ 'features': ['deprecated'] }
##
# @snapshot-save:
diff --git a/qapi/misc-arm.json b/qapi/misc-arm.json
new file mode 100644
index 0000000..f534137
--- /dev/null
+++ b/qapi/misc-arm.json
@@ -0,0 +1,49 @@
+# -*- Mode: Python -*-
+# vim: filetype=python
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+##
+# @GICCapability:
+#
+# The struct describes capability for a specific GIC (Generic
+# Interrupt Controller) version. These bits are not only decided by
+# QEMU/KVM software version, but also decided by the hardware that the
+# program is running upon.
+#
+# @version: version of GIC to be described. Currently, only 2 and 3
+# are supported.
+#
+# @emulated: whether current QEMU/hardware supports emulated GIC
+# device in user space.
+#
+# @kernel: whether current QEMU/hardware supports hardware accelerated
+# GIC device in kernel.
+#
+# Since: 2.6
+##
+{ 'struct': 'GICCapability',
+ 'data': { 'version': 'int',
+ 'emulated': 'bool',
+ 'kernel': 'bool' } }
+
+##
+# @query-gic-capabilities:
+#
+# It will return a list of GICCapability objects that describe its
+# capability bits.
+#
+# On non-ARM targets this command will report an error as the GIC
+# technology is not applicable.
+#
+# Returns: a list of GICCapability objects.
+#
+# Since: 2.6
+#
+# .. qmp-example::
+#
+# -> { "execute": "query-gic-capabilities" }
+# <- { "return": [{ "version": 2, "emulated": true, "kernel": false },
+# { "version": 3, "emulated": false, "kernel": true } ] }
+##
+{ 'command': 'query-gic-capabilities', 'returns': ['GICCapability'] }
diff --git a/qapi/misc-i386.json b/qapi/misc-i386.json
new file mode 100644
index 0000000..5fefa0a
--- /dev/null
+++ b/qapi/misc-i386.json
@@ -0,0 +1,486 @@
+# -*- Mode: Python -*-
+# vim: filetype=python
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+##
+# @rtc-reset-reinjection:
+#
+# This command will reset the RTC interrupt reinjection backlog. Can
+# be used if another mechanism to synchronize guest time is in effect,
+# for example QEMU guest agent's guest-set-time command.
+#
+# Use of this command is only applicable for x86 machines with an RTC,
+# and on other machines will silently return without performing any
+# action.
+#
+# Since: 2.1
+#
+# .. qmp-example::
+#
+# -> { "execute": "rtc-reset-reinjection" }
+# <- { "return": {} }
+##
+{ 'command': 'rtc-reset-reinjection' }
+
+##
+# @SevState:
+#
+# An enumeration of SEV state information used during @query-sev.
+#
+# @uninit: The guest is uninitialized.
+#
+# @launch-update: The guest is currently being launched; plaintext
+# data and register state is being imported.
+#
+# @launch-secret: The guest is currently being launched; ciphertext
+# data is being imported.
+#
+# @running: The guest is fully launched or migrated in.
+#
+# @send-update: The guest is currently being migrated out to another
+# machine.
+#
+# @receive-update: The guest is currently being migrated from another
+# machine.
+#
+# Since: 2.12
+##
+{ 'enum': 'SevState',
+ 'data': ['uninit', 'launch-update', 'launch-secret', 'running',
+ 'send-update', 'receive-update' ] }
+
+##
+# @SevGuestType:
+#
+# An enumeration indicating the type of SEV guest being run.
+#
+# @sev: The guest is a legacy SEV or SEV-ES guest.
+#
+# @sev-snp: The guest is an SEV-SNP guest.
+#
+# Since: 6.2
+##
+{ 'enum': 'SevGuestType',
+ 'data': [ 'sev', 'sev-snp' ] }
+
+##
+# @SevGuestInfo:
+#
+# Information specific to legacy SEV/SEV-ES guests.
+#
+# @policy: SEV policy value
+#
+# @handle: SEV firmware handle
+#
+# Since: 2.12
+##
+{ 'struct': 'SevGuestInfo',
+ 'data': { 'policy': 'uint32',
+ 'handle': 'uint32' } }
+
+##
+# @SevSnpGuestInfo:
+#
+# Information specific to SEV-SNP guests.
+#
+# @snp-policy: SEV-SNP policy value
+#
+# Since: 9.1
+##
+{ 'struct': 'SevSnpGuestInfo',
+ 'data': { 'snp-policy': 'uint64' } }
+
+##
+# @SevInfo:
+#
+# Information about Secure Encrypted Virtualization (SEV) support
+#
+# @enabled: true if SEV is active
+#
+# @api-major: SEV API major version
+#
+# @api-minor: SEV API minor version
+#
+# @build-id: SEV FW build id
+#
+# @state: SEV guest state
+#
+# @sev-type: Type of SEV guest being run
+#
+# Since: 2.12
+##
+{ 'union': 'SevInfo',
+ 'base': { 'enabled': 'bool',
+ 'api-major': 'uint8',
+ 'api-minor' : 'uint8',
+ 'build-id' : 'uint8',
+ 'state' : 'SevState',
+ 'sev-type' : 'SevGuestType' },
+ 'discriminator': 'sev-type',
+ 'data': {
+ 'sev': 'SevGuestInfo',
+ 'sev-snp': 'SevSnpGuestInfo' } }
+
+
+##
+# @query-sev:
+#
+# Return information about SEV/SEV-ES/SEV-SNP.
+#
+# If unavailable due to an incompatible configuration the returned
+# @enabled field is set to 'false' and the state of all other fields
+# is unspecified.
+#
+# Returns: @SevInfo
+#
+# Since: 2.12
+#
+# .. qmp-example::
+#
+# -> { "execute": "query-sev" }
+# <- { "return": { "enabled": true, "api-major" : 0, "api-minor" : 0,
+# "build-id" : 0, "policy" : 0, "state" : "running",
+# "handle" : 1 } }
+##
+{ 'command': 'query-sev', 'returns': 'SevInfo' }
+
+##
+# @SevLaunchMeasureInfo:
+#
+# SEV Guest Launch measurement information
+#
+# @data: the measurement value encoded in base64
+#
+# Since: 2.12
+##
+{ 'struct': 'SevLaunchMeasureInfo', 'data': {'data': 'str'} }
+
+##
+# @query-sev-launch-measure:
+#
+# Query the SEV/SEV-ES guest launch information.
+#
+# This is only valid on x86 machines configured with KVM and the
+# 'sev-guest' confidential virtualization object. The launch
+# measurement for SEV-SNP guests is only available within the guest.
+#
+# Returns: The @SevLaunchMeasureInfo for the guest
+#
+# Errors:
+# - If the launch measurement is unavailable, either due to an
+# invalid guest configuration or if the guest has not reached
+# the required SEV state, GenericError
+#
+# Since: 2.12
+#
+# .. qmp-example::
+#
+# -> { "execute": "query-sev-launch-measure" }
+# <- { "return": { "data": "4l8LXeNlSPUDlXPJG5966/8%YZ" } }
+##
+{ 'command': 'query-sev-launch-measure', 'returns': 'SevLaunchMeasureInfo' }
+
+##
+# @SevCapability:
+#
+# The struct describes capability for a Secure Encrypted
+# Virtualization feature.
+#
+# @pdh: Platform Diffie-Hellman key (base64 encoded)
+#
+# @cert-chain: PDH certificate chain (base64 encoded)
+#
+# @cpu0-id: Unique ID of CPU0 (base64 encoded) (since 7.1)
+#
+# @cbitpos: C-bit location in page table entry
+#
+# @reduced-phys-bits: Number of physical address bit reduction when
+# SEV is enabled
+#
+# Since: 2.12
+##
+{ 'struct': 'SevCapability',
+ 'data': { 'pdh': 'str',
+ 'cert-chain': 'str',
+ 'cpu0-id': 'str',
+ 'cbitpos': 'int',
+ 'reduced-phys-bits': 'int'} }
+
+##
+# @query-sev-capabilities:
+#
+# Get SEV capabilities.
+#
+# This is only supported on AMD X86 platforms with KVM enabled.
+#
+# Returns: SevCapability objects.
+#
+# Errors:
+# - If SEV is not available on the platform, GenericError
+#
+# Since: 2.12
+#
+# .. qmp-example::
+#
+# -> { "execute": "query-sev-capabilities" }
+# <- { "return": { "pdh": "8CCDD8DDD", "cert-chain": "888CCCDDDEE",
+# "cpu0-id": "2lvmGwo+...61iEinw==",
+# "cbitpos": 47, "reduced-phys-bits": 1}}
+##
+{ 'command': 'query-sev-capabilities', 'returns': 'SevCapability' }
+
+##
+# @sev-inject-launch-secret:
+#
+# This command injects a secret blob into memory of a SEV/SEV-ES
+# guest.
+#
+# This is only valid on x86 machines configured with KVM and the
+# 'sev-guest' confidential virtualization object. SEV-SNP guests do
+# not support launch secret injection.
+#
+# @packet-header: the launch secret packet header encoded in base64
+#
+# @secret: the launch secret data to be injected encoded in base64
+#
+# @gpa: the guest physical address where secret will be injected.
+#
+# Errors:
+# - If launch secret injection is not possible, either due to
+# an invalid guest configuration, or if the guest has not
+# reached the required SEV state, GenericError
+#
+# Since: 6.0
+##
+{ 'command': 'sev-inject-launch-secret',
+ 'data': { 'packet-header': 'str', 'secret': 'str', '*gpa': 'uint64' } }
+
+##
+# @SevAttestationReport:
+#
+# The struct describes attestation report for a Secure Encrypted
+# Virtualization feature.
+#
+# @data: guest attestation report (base64 encoded)
+#
+# Since: 6.1
+##
+{ 'struct': 'SevAttestationReport',
+ 'data': { 'data': 'str'} }
+
+##
+# @query-sev-attestation-report:
+#
+# This command is used to get the SEV attestation report.
+#
+# This is only valid on x86 machines configured with KVM and the
+# 'sev-guest' confidential virtualization object. The attestation
+# report for SEV-SNP guests is only available within the guest.
+#
+# @mnonce: a random 16 bytes value encoded in base64 (it will be
+# included in report)
+#
+# Returns: SevAttestationReport objects.
+#
+# Errors:
+# - This will return an error if the attestation report is
+# unavailable, either due to an invalid guest configuration
+# or if the guest has not reached the required SEV state,
+# GenericError
+#
+# Since: 6.1
+#
+# .. qmp-example::
+#
+# -> { "execute" : "query-sev-attestation-report",
+# "arguments": { "mnonce": "aaaaaaa" } }
+# <- { "return" : { "data": "aaaaaaaabbbddddd"} }
+##
+{ 'command': 'query-sev-attestation-report',
+ 'data': { 'mnonce': 'str' },
+ 'returns': 'SevAttestationReport' }
+
+##
+# @SgxEpcSection:
+#
+# Information about intel SGX EPC section
+#
+# @node: the numa node
+#
+# @size: the size of EPC section
+#
+# Since: 7.0
+##
+{ 'struct': 'SgxEpcSection',
+ 'data': { 'node': 'int',
+ 'size': 'uint64'}}
+
+##
+# @SgxInfo:
+#
+# Information about intel Safe Guard eXtension (SGX) support
+#
+# @sgx: true if SGX is supported
+#
+# @sgx1: true if SGX1 is supported
+#
+# @sgx2: true if SGX2 is supported
+#
+# @flc: true if FLC is supported
+#
+# @sections: The EPC sections information (Since: 7.0)
+#
+# Since: 6.2
+##
+{ 'struct': 'SgxInfo',
+ 'data': { 'sgx': 'bool',
+ 'sgx1': 'bool',
+ 'sgx2': 'bool',
+ 'flc': 'bool',
+ 'sections': ['SgxEpcSection']} }
+
+##
+# @query-sgx:
+#
+# Return information about configured SGX capabilities of guest
+#
+# Returns: @SgxInfo
+#
+# Since: 6.2
+#
+# .. qmp-example::
+#
+# -> { "execute": "query-sgx" }
+# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true,
+# "flc": true,
+# "sections": [{"node": 0, "size": 67108864},
+# {"node": 1, "size": 29360128}]} }
+##
+{ 'command': 'query-sgx', 'returns': 'SgxInfo' }
+
+##
+# @query-sgx-capabilities:
+#
+# Return information about SGX capabilities of host
+#
+# Returns: @SgxInfo
+#
+# Since: 6.2
+#
+# .. qmp-example::
+#
+# -> { "execute": "query-sgx-capabilities" }
+# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true,
+# "flc": true,
+# "section" : [{"node": 0, "size": 67108864},
+# {"node": 1, "size": 29360128}]} }
+##
+{ 'command': 'query-sgx-capabilities', 'returns': 'SgxInfo' }
+
+##
+# @EvtchnPortType:
+#
+# An enumeration of Xen event channel port types.
+#
+# @closed: The port is unused.
+#
+# @unbound: The port is allocated and ready to be bound.
+#
+# @interdomain: The port is connected as an interdomain interrupt.
+#
+# @pirq: The port is bound to a physical IRQ (PIRQ).
+#
+# @virq: The port is bound to a virtual IRQ (VIRQ).
+#
+# @ipi: The post is an inter-processor interrupt (IPI).
+#
+# Since: 8.0
+##
+{ 'enum': 'EvtchnPortType',
+ 'data': ['closed', 'unbound', 'interdomain', 'pirq', 'virq', 'ipi'] }
+
+##
+# @EvtchnInfo:
+#
+# Information about a Xen event channel port
+#
+# @port: the port number
+#
+# @vcpu: target vCPU for this port
+#
+# @type: the port type
+#
+# @remote-domain: remote domain for interdomain ports
+#
+# @target: remote port ID, or virq/pirq number
+#
+# @pending: port is currently active pending delivery
+#
+# @masked: port is masked
+#
+# Since: 8.0
+##
+{ 'struct': 'EvtchnInfo',
+ 'data': {'port': 'uint16',
+ 'vcpu': 'uint32',
+ 'type': 'EvtchnPortType',
+ 'remote-domain': 'str',
+ 'target': 'uint16',
+ 'pending': 'bool',
+ 'masked': 'bool'} }
+
+
+##
+# @xen-event-list:
+#
+# Query the Xen event channels opened by the guest.
+#
+# Returns: list of open event channel ports.
+#
+# Since: 8.0
+#
+# .. qmp-example::
+#
+# -> { "execute": "xen-event-list" }
+# <- { "return": [
+# {
+# "pending": false,
+# "port": 1,
+# "vcpu": 1,
+# "remote-domain": "qemu",
+# "masked": false,
+# "type": "interdomain",
+# "target": 1
+# },
+# {
+# "pending": false,
+# "port": 2,
+# "vcpu": 0,
+# "remote-domain": "",
+# "masked": false,
+# "type": "virq",
+# "target": 0
+# }
+# ]
+# }
+##
+{ 'command': 'xen-event-list',
+ 'returns': ['EvtchnInfo'] }
+
+##
+# @xen-event-inject:
+#
+# Inject a Xen event channel port (interrupt) to the guest.
+#
+# @port: The port number
+#
+# Since: 8.0
+#
+# .. qmp-example::
+#
+# -> { "execute": "xen-event-inject", "arguments": { "port": 1 } }
+# <- { "return": { } }
+##
+{ 'command': 'xen-event-inject',
+ 'data': { 'port': 'uint32' } }
diff --git a/qapi/misc-target.json b/qapi/misc-target.json
deleted file mode 100644
index 8d70bd2..0000000
--- a/qapi/misc-target.json
+++ /dev/null
@@ -1,528 +0,0 @@
-# -*- Mode: Python -*-
-# vim: filetype=python
-#
-
-##
-# @rtc-reset-reinjection:
-#
-# This command will reset the RTC interrupt reinjection backlog. Can
-# be used if another mechanism to synchronize guest time is in effect,
-# for example QEMU guest agent's guest-set-time command.
-#
-# Since: 2.1
-#
-# .. qmp-example::
-#
-# -> { "execute": "rtc-reset-reinjection" }
-# <- { "return": {} }
-##
-{ 'command': 'rtc-reset-reinjection',
- 'if': 'TARGET_I386' }
-
-##
-# @SevState:
-#
-# An enumeration of SEV state information used during @query-sev.
-#
-# @uninit: The guest is uninitialized.
-#
-# @launch-update: The guest is currently being launched; plaintext
-# data and register state is being imported.
-#
-# @launch-secret: The guest is currently being launched; ciphertext
-# data is being imported.
-#
-# @running: The guest is fully launched or migrated in.
-#
-# @send-update: The guest is currently being migrated out to another
-# machine.
-#
-# @receive-update: The guest is currently being migrated from another
-# machine.
-#
-# Since: 2.12
-##
-{ 'enum': 'SevState',
- 'data': ['uninit', 'launch-update', 'launch-secret', 'running',
- 'send-update', 'receive-update' ],
- 'if': 'TARGET_I386' }
-
-##
-# @SevGuestType:
-#
-# An enumeration indicating the type of SEV guest being run.
-#
-# @sev: The guest is a legacy SEV or SEV-ES guest.
-#
-# @sev-snp: The guest is an SEV-SNP guest.
-#
-# Since: 6.2
-##
-{ 'enum': 'SevGuestType',
- 'data': [ 'sev', 'sev-snp' ],
- 'if': 'TARGET_I386' }
-
-##
-# @SevGuestInfo:
-#
-# Information specific to legacy SEV/SEV-ES guests.
-#
-# @policy: SEV policy value
-#
-# @handle: SEV firmware handle
-#
-# Since: 2.12
-##
-{ 'struct': 'SevGuestInfo',
- 'data': { 'policy': 'uint32',
- 'handle': 'uint32' },
- 'if': 'TARGET_I386' }
-
-##
-# @SevSnpGuestInfo:
-#
-# Information specific to SEV-SNP guests.
-#
-# @snp-policy: SEV-SNP policy value
-#
-# Since: 9.1
-##
-{ 'struct': 'SevSnpGuestInfo',
- 'data': { 'snp-policy': 'uint64' },
- 'if': 'TARGET_I386' }
-
-##
-# @SevInfo:
-#
-# Information about Secure Encrypted Virtualization (SEV) support
-#
-# @enabled: true if SEV is active
-#
-# @api-major: SEV API major version
-#
-# @api-minor: SEV API minor version
-#
-# @build-id: SEV FW build id
-#
-# @state: SEV guest state
-#
-# @sev-type: Type of SEV guest being run
-#
-# Since: 2.12
-##
-{ 'union': 'SevInfo',
- 'base': { 'enabled': 'bool',
- 'api-major': 'uint8',
- 'api-minor' : 'uint8',
- 'build-id' : 'uint8',
- 'state' : 'SevState',
- 'sev-type' : 'SevGuestType' },
- 'discriminator': 'sev-type',
- 'data': {
- 'sev': 'SevGuestInfo',
- 'sev-snp': 'SevSnpGuestInfo' },
- 'if': 'TARGET_I386' }
-
-
-##
-# @query-sev:
-#
-# Returns information about SEV
-#
-# Returns: @SevInfo
-#
-# Since: 2.12
-#
-# .. qmp-example::
-#
-# -> { "execute": "query-sev" }
-# <- { "return": { "enabled": true, "api-major" : 0, "api-minor" : 0,
-# "build-id" : 0, "policy" : 0, "state" : "running",
-# "handle" : 1 } }
-##
-{ 'command': 'query-sev', 'returns': 'SevInfo',
- 'if': 'TARGET_I386' }
-
-##
-# @SevLaunchMeasureInfo:
-#
-# SEV Guest Launch measurement information
-#
-# @data: the measurement value encoded in base64
-#
-# Since: 2.12
-##
-{ 'struct': 'SevLaunchMeasureInfo', 'data': {'data': 'str'},
- 'if': 'TARGET_I386' }
-
-##
-# @query-sev-launch-measure:
-#
-# Query the SEV guest launch information.
-#
-# Returns: The @SevLaunchMeasureInfo for the guest
-#
-# Since: 2.12
-#
-# .. qmp-example::
-#
-# -> { "execute": "query-sev-launch-measure" }
-# <- { "return": { "data": "4l8LXeNlSPUDlXPJG5966/8%YZ" } }
-##
-{ 'command': 'query-sev-launch-measure', 'returns': 'SevLaunchMeasureInfo',
- 'if': 'TARGET_I386' }
-
-##
-# @SevCapability:
-#
-# The struct describes capability for a Secure Encrypted
-# Virtualization feature.
-#
-# @pdh: Platform Diffie-Hellman key (base64 encoded)
-#
-# @cert-chain: PDH certificate chain (base64 encoded)
-#
-# @cpu0-id: Unique ID of CPU0 (base64 encoded) (since 7.1)
-#
-# @cbitpos: C-bit location in page table entry
-#
-# @reduced-phys-bits: Number of physical Address bit reduction when
-# SEV is enabled
-#
-# Since: 2.12
-##
-{ 'struct': 'SevCapability',
- 'data': { 'pdh': 'str',
- 'cert-chain': 'str',
- 'cpu0-id': 'str',
- 'cbitpos': 'int',
- 'reduced-phys-bits': 'int'},
- 'if': 'TARGET_I386' }
-
-##
-# @query-sev-capabilities:
-#
-# This command is used to get the SEV capabilities, and is supported
-# on AMD X86 platforms only.
-#
-# Returns: SevCapability objects.
-#
-# Since: 2.12
-#
-# .. qmp-example::
-#
-# -> { "execute": "query-sev-capabilities" }
-# <- { "return": { "pdh": "8CCDD8DDD", "cert-chain": "888CCCDDDEE",
-# "cpu0-id": "2lvmGwo+...61iEinw==",
-# "cbitpos": 47, "reduced-phys-bits": 1}}
-##
-{ 'command': 'query-sev-capabilities', 'returns': 'SevCapability',
- 'if': 'TARGET_I386' }
-
-##
-# @sev-inject-launch-secret:
-#
-# This command injects a secret blob into memory of SEV guest.
-#
-# @packet-header: the launch secret packet header encoded in base64
-#
-# @secret: the launch secret data to be injected encoded in base64
-#
-# @gpa: the guest physical address where secret will be injected.
-#
-# Since: 6.0
-##
-{ 'command': 'sev-inject-launch-secret',
- 'data': { 'packet-header': 'str', 'secret': 'str', '*gpa': 'uint64' },
- 'if': 'TARGET_I386' }
-
-##
-# @SevAttestationReport:
-#
-# The struct describes attestation report for a Secure Encrypted
-# Virtualization feature.
-#
-# @data: guest attestation report (base64 encoded)
-#
-# Since: 6.1
-##
-{ 'struct': 'SevAttestationReport',
- 'data': { 'data': 'str'},
- 'if': 'TARGET_I386' }
-
-##
-# @query-sev-attestation-report:
-#
-# This command is used to get the SEV attestation report, and is
-# supported on AMD X86 platforms only.
-#
-# @mnonce: a random 16 bytes value encoded in base64 (it will be
-# included in report)
-#
-# Returns: SevAttestationReport objects.
-#
-# Since: 6.1
-#
-# .. qmp-example::
-#
-# -> { "execute" : "query-sev-attestation-report",
-# "arguments": { "mnonce": "aaaaaaa" } }
-# <- { "return" : { "data": "aaaaaaaabbbddddd"} }
-##
-{ 'command': 'query-sev-attestation-report',
- 'data': { 'mnonce': 'str' },
- 'returns': 'SevAttestationReport',
- 'if': 'TARGET_I386' }
-
-##
-# @dump-skeys:
-#
-# Dump guest's storage keys
-#
-# @filename: the path to the file to dump to
-#
-# Since: 2.5
-#
-# .. qmp-example::
-#
-# -> { "execute": "dump-skeys",
-# "arguments": { "filename": "/tmp/skeys" } }
-# <- { "return": {} }
-##
-{ 'command': 'dump-skeys',
- 'data': { 'filename': 'str' },
- 'if': 'TARGET_S390X' }
-
-##
-# @GICCapability:
-#
-# The struct describes capability for a specific GIC (Generic
-# Interrupt Controller) version. These bits are not only decided by
-# QEMU/KVM software version, but also decided by the hardware that the
-# program is running upon.
-#
-# @version: version of GIC to be described. Currently, only 2 and 3
-# are supported.
-#
-# @emulated: whether current QEMU/hardware supports emulated GIC
-# device in user space.
-#
-# @kernel: whether current QEMU/hardware supports hardware accelerated
-# GIC device in kernel.
-#
-# Since: 2.6
-##
-{ 'struct': 'GICCapability',
- 'data': { 'version': 'int',
- 'emulated': 'bool',
- 'kernel': 'bool' },
- 'if': 'TARGET_ARM' }
-
-##
-# @query-gic-capabilities:
-#
-# This command is ARM-only. It will return a list of GICCapability
-# objects that describe its capability bits.
-#
-# Returns: a list of GICCapability objects.
-#
-# Since: 2.6
-#
-# .. qmp-example::
-#
-# -> { "execute": "query-gic-capabilities" }
-# <- { "return": [{ "version": 2, "emulated": true, "kernel": false },
-# { "version": 3, "emulated": false, "kernel": true } ] }
-##
-{ 'command': 'query-gic-capabilities', 'returns': ['GICCapability'],
- 'if': 'TARGET_ARM' }
-
-##
-# @SGXEPCSection:
-#
-# Information about intel SGX EPC section info
-#
-# @node: the numa node
-#
-# @size: the size of EPC section
-#
-# Since: 7.0
-##
-{ 'struct': 'SGXEPCSection',
- 'data': { 'node': 'int',
- 'size': 'uint64'}}
-
-##
-# @SGXInfo:
-#
-# Information about intel Safe Guard eXtension (SGX) support
-#
-# @sgx: true if SGX is supported
-#
-# @sgx1: true if SGX1 is supported
-#
-# @sgx2: true if SGX2 is supported
-#
-# @flc: true if FLC is supported
-#
-# @sections: The EPC sections info for guest (Since: 7.0)
-#
-# Since: 6.2
-##
-{ 'struct': 'SGXInfo',
- 'data': { 'sgx': 'bool',
- 'sgx1': 'bool',
- 'sgx2': 'bool',
- 'flc': 'bool',
- 'sections': ['SGXEPCSection']},
- 'if': 'TARGET_I386' }
-
-##
-# @query-sgx:
-#
-# Returns information about SGX
-#
-# Returns: @SGXInfo
-#
-# Since: 6.2
-#
-# .. qmp-example::
-#
-# -> { "execute": "query-sgx" }
-# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true,
-# "flc": true,
-# "sections": [{"node": 0, "size": 67108864},
-# {"node": 1, "size": 29360128}]} }
-##
-{ 'command': 'query-sgx', 'returns': 'SGXInfo', 'if': 'TARGET_I386' }
-
-##
-# @query-sgx-capabilities:
-#
-# Returns information from host SGX capabilities
-#
-# Returns: @SGXInfo
-#
-# Since: 6.2
-#
-# .. qmp-example::
-#
-# -> { "execute": "query-sgx-capabilities" }
-# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true,
-# "flc": true,
-# "section" : [{"node": 0, "size": 67108864},
-# {"node": 1, "size": 29360128}]} }
-##
-{ 'command': 'query-sgx-capabilities', 'returns': 'SGXInfo', 'if': 'TARGET_I386' }
-
-
-##
-# @EvtchnPortType:
-#
-# An enumeration of Xen event channel port types.
-#
-# @closed: The port is unused.
-#
-# @unbound: The port is allocated and ready to be bound.
-#
-# @interdomain: The port is connected as an interdomain interrupt.
-#
-# @pirq: The port is bound to a physical IRQ (PIRQ).
-#
-# @virq: The port is bound to a virtual IRQ (VIRQ).
-#
-# @ipi: The post is an inter-processor interrupt (IPI).
-#
-# Since: 8.0
-##
-{ 'enum': 'EvtchnPortType',
- 'data': ['closed', 'unbound', 'interdomain', 'pirq', 'virq', 'ipi'],
- 'if': 'TARGET_I386' }
-
-##
-# @EvtchnInfo:
-#
-# Information about a Xen event channel port
-#
-# @port: the port number
-#
-# @vcpu: target vCPU for this port
-#
-# @type: the port type
-#
-# @remote-domain: remote domain for interdomain ports
-#
-# @target: remote port ID, or virq/pirq number
-#
-# @pending: port is currently active pending delivery
-#
-# @masked: port is masked
-#
-# Since: 8.0
-##
-{ 'struct': 'EvtchnInfo',
- 'data': {'port': 'uint16',
- 'vcpu': 'uint32',
- 'type': 'EvtchnPortType',
- 'remote-domain': 'str',
- 'target': 'uint16',
- 'pending': 'bool',
- 'masked': 'bool'},
- 'if': 'TARGET_I386' }
-
-
-##
-# @xen-event-list:
-#
-# Query the Xen event channels opened by the guest.
-#
-# Returns: list of open event channel ports.
-#
-# Since: 8.0
-#
-# .. qmp-example::
-#
-# -> { "execute": "xen-event-list" }
-# <- { "return": [
-# {
-# "pending": false,
-# "port": 1,
-# "vcpu": 1,
-# "remote-domain": "qemu",
-# "masked": false,
-# "type": "interdomain",
-# "target": 1
-# },
-# {
-# "pending": false,
-# "port": 2,
-# "vcpu": 0,
-# "remote-domain": "",
-# "masked": false,
-# "type": "virq",
-# "target": 0
-# }
-# ]
-# }
-##
-{ 'command': 'xen-event-list',
- 'returns': ['EvtchnInfo'],
- 'if': 'TARGET_I386' }
-
-##
-# @xen-event-inject:
-#
-# Inject a Xen event channel port (interrupt) to the guest.
-#
-# @port: The port number
-#
-# Since: 8.0
-#
-# .. qmp-example::
-#
-# -> { "execute": "xen-event-inject", "arguments": { "port": 1 } }
-# <- { "return": { } }
-##
-{ 'command': 'xen-event-inject',
- 'data': { 'port': 'uint32' },
- 'if': 'TARGET_I386' }
diff --git a/qapi/misc.json b/qapi/misc.json
index 4a6f3ba..4b9e601 100644
--- a/qapi/misc.json
+++ b/qapi/misc.json
@@ -26,7 +26,7 @@
# @skipauth: whether to skip authentication. Only applies to "vnc"
# and "spice" protocols
#
-# @tls: whether to perform TLS. Only applies to the "spice" protocol
+# @tls: whether to perform TLS. Only applies to the "spice" protocol
#
# Since: 0.14
#
@@ -101,11 +101,11 @@
##
# @query-iothreads:
#
-# Returns a list of information about each iothread.
+# Return a list of information about each iothread.
#
# .. note:: This list excludes the QEMU main loop thread, which is not
-# declared using the ``-object iothread`` command-line option. It is
-# always the main thread of the process.
+# declared using the ``-object iothread`` command-line option. It
+# is always the main thread of the process.
#
# Returns: a list of @IOThreadInfo for each iothread
#
@@ -141,8 +141,8 @@
# guest remains paused once migration finishes, as if the ``-S``
# option was passed on the command line.
#
-# In the "suspended" state, it will completely stop the VM and cause
-# a transition to the "paused" state. (Since 9.0)
+# In the "suspended" state, it will completely stop the VM and
+# cause a transition to the "paused" state. (Since 9.0)
#
# .. qmp-example::
#
@@ -158,15 +158,15 @@
#
# Since: 0.14
#
-# .. note:: This command will succeed if the guest is currently running.
-# It will also succeed if the guest is in the "inmigrate" state; in
-# this case, the effect of the command is to make sure the guest
-# starts once migration finishes, removing the effect of the ``-S``
-# command line option if it was passed.
+# .. note:: This command will succeed if the guest is currently
+# running. It will also succeed if the guest is in the "inmigrate"
+# state; in this case, the effect of the command is to make sure
+# the guest starts once migration finishes, removing the effect of
+# the ``-S`` command line option if it was passed.
#
# If the VM was previously suspended, and not been reset or woken,
-# this command will transition back to the "suspended" state. (Since
-# 9.0)
+# this command will transition back to the "suspended" state.
+# (Since 9.0)
#
# .. qmp-example::
#
@@ -222,13 +222,13 @@
# .. note:: This command only exists as a stop-gap. Its use is highly
# discouraged. The semantics of this command are not guaranteed:
# this means that command names, arguments and responses can change
-# or be removed at ANY time. Applications that rely on long term
-# stability guarantees should NOT use this command.
+# or be removed at **any** time. Applications that rely on long
+# term stability guarantees should **not** use this command.
#
# Known limitations:
#
-# * This command is stateless, this means that commands that
-# depend on state information (such as getfd) might not work.
+# * This command is stateless, this means that commands that depend
+# on state information (such as getfd) might not work.
#
# * Commands that prompt the user for data don't currently work.
#
@@ -341,7 +341,8 @@
#
# .. note:: The list of fd sets is shared by all monitor connections.
#
-# .. note:: If @fdset-id is not specified, a new fd set will be created.
+# .. note:: If @fdset-id is not specified, a new fd set will be
+# created.
#
# Since: 1.2
#
diff --git a/qapi/net.json b/qapi/net.json
index 31b3417..97ea183 100644
--- a/qapi/net.json
+++ b/qapi/net.json
@@ -22,9 +22,9 @@
#
# Since: 0.14
#
-# .. note:: Not all network adapters support setting link status. This
-# command will succeed even if the network adapter does not support
-# link status notification.
+# .. note:: Not all network adapters support setting link status.
+# This command will succeed even if the network adapter does not
+# support link status notification.
#
# .. qmp-example::
#
@@ -150,12 +150,12 @@
# @domainname: guest-visible domain name of the virtual nameserver
# (since 3.0)
#
-# @ipv6-prefix: IPv6 network prefix (default is fec0::) (since 2.6).
-# The network prefix is given in the usual hexadecimal IPv6
-# address notation.
+# @ipv6-prefix: IPv6 network prefix (default is fec0::). The network
+# prefix is given in the usual hexadecimal IPv6 address notation.
+# (since 2.6)
#
-# @ipv6-prefixlen: IPv6 network prefix length (default is 64) (since
-# 2.6)
+# @ipv6-prefixlen: IPv6 network prefix length (default is 64)
+# (since 2.6)
#
# @ipv6-host: guest-visible IPv6 address of the host (since 2.6)
#
@@ -387,8 +387,8 @@
#
# @hubid: hub identifier number
#
-# @netdev: used to connect hub to a netdev instead of a device (since
-# 2.12)
+# @netdev: used to connect hub to a netdev instead of a device
+# (since 2.12)
#
# Since: 1.2
##
@@ -403,7 +403,7 @@
# Connect a client to a netmap-enabled NIC or to a VALE switch port
#
# @ifname: Either the name of an existing network interface supported
-# by netmap, or the name of a VALE port (created on the fly). A
+# by netmap, or the name of a VALE port (created on the fly). A
# VALE port name is in the form 'valeXXX:YYY', where XXX and YYY
# are non-negative integers. XXX identifies a switch and YYY
# identifies a port of the switch. VALE ports having the same XXX
@@ -510,8 +510,8 @@
# @queues: number of queues to be created for multiqueue vhost-vdpa
# (default: 1)
#
-# @x-svq: Start device with (experimental) shadow virtqueue. (Since
-# 7.1) (default: false)
+# @x-svq: Start device with (experimental) shadow virtqueue.
+# (Since 7.1) (default: false)
#
# Features:
#
@@ -535,13 +535,13 @@
# interfaces that are in host mode and also with the host.
#
# @start-address: The starting IPv4 address to use for the interface.
-# Must be in the private IP range (RFC 1918). Must be specified
+# Must be in the private IP range (RFC 1918). Must be specified
# along with @end-address and @subnet-mask. This address is used
# as the gateway address. The subsequent address up to and
# including end-address are placed in the DHCP pool.
#
# @end-address: The DHCP IPv4 range end address to use for the
-# interface. Must be in the private IP range (RFC 1918). Must be
+# interface. Must be in the private IP range (RFC 1918). Must be
# specified along with @start-address and @subnet-mask.
#
# @subnet-mask: The IPv4 subnet mask to use on the interface. Must be
@@ -556,7 +556,7 @@
# network vmnet interface should be added to. If set, no DHCP
# service is provided for this interface and network communication
# is allowed only with other interfaces added to this network
-# identified by the UUID. Requires at least macOS Big Sur 11.0.
+# identified by the UUID. Requires at least macOS Big Sur 11.0.
#
# Since: 7.1
##
@@ -575,20 +575,20 @@
# vmnet (shared mode) network backend.
#
# Allows traffic originating from the vmnet interface to reach the
-# Internet through a network address translator (NAT). The vmnet
+# Internet through a network address translator (NAT). The vmnet
# interface can communicate with the host and with other shared mode
# interfaces on the same subnet. If no DHCP settings, subnet mask and
# IPv6 prefix specified, the interface can communicate with any of
# other interfaces in shared mode.
#
# @start-address: The starting IPv4 address to use for the interface.
-# Must be in the private IP range (RFC 1918). Must be specified
+# Must be in the private IP range (RFC 1918). Must be specified
# along with @end-address and @subnet-mask. This address is used
# as the gateway address. The subsequent address up to and
# including end-address are placed in the DHCP pool.
#
# @end-address: The DHCP IPv4 range end address to use for the
-# interface. Must be in the private IP range (RFC 1918). Must be
+# interface. Must be in the private IP range (RFC 1918). Must be
# specified along with @start-address and @subnet-mask.
#
# @subnet-mask: The IPv4 subnet mask to use on the interface. Must be
@@ -650,15 +650,26 @@
# attempt a reconnect after the given number of seconds. Setting
# this to zero disables this function. (default: 0) (since 8.0)
#
+# @reconnect-ms: For a client socket, if a socket is disconnected, then
+# attempt a reconnect after the given number of milliseconds. Setting
+# this to zero disables this function. This member is mutually
+# exclusive with @reconnect. (default: 0) (Since: 9.2)
+#
# Only SocketAddress types 'unix', 'inet' and 'fd' are supported.
#
+# Features:
+#
+# @deprecated: Member @reconnect is deprecated. Use @reconnect-ms
+# instead.
+#
# Since: 7.2
##
{ 'struct': 'NetdevStreamOptions',
'data': {
'addr': 'SocketAddress',
'*server': 'bool',
- '*reconnect': 'uint32' } }
+ '*reconnect': { 'type': 'int', 'features': [ 'deprecated' ] },
+ '*reconnect-ms': 'int' } }
##
# @NetdevDgramOptions:
@@ -703,12 +714,19 @@
# Available netdev drivers.
#
# @l2tpv3: since 2.1
+#
# @vhost-vdpa: since 5.1
+#
# @vmnet-host: since 7.1
+#
# @vmnet-shared: since 7.1
+#
# @vmnet-bridged: since 7.1
+#
# @stream: since 7.2
+#
# @dgram: since 7.2
+#
# @af-xdp: since 8.2
#
# Since: 2.7
@@ -1013,3 +1031,43 @@
##
{ 'event': 'NETDEV_STREAM_DISCONNECTED',
'data': { 'netdev-id': 'str' } }
+
+##
+# @NETDEV_VHOST_USER_CONNECTED:
+#
+# Emitted when the vhost-user chardev is connected
+#
+# @netdev-id: QEMU netdev id that is connected
+#
+# @chardev-id: The character device id used by the QEMU netdev
+#
+# Since: 10.0
+#
+# .. qmp-example::
+#
+# <- { "timestamp": {"seconds": 1739538638, "microseconds": 354181 },
+# "event": "NETDEV_VHOST_USER_CONNECTED",
+# "data": { "netdev-id": "netdev0", "chardev-id": "chr0" } }
+#
+##
+{ 'event': 'NETDEV_VHOST_USER_CONNECTED',
+ 'data': { 'netdev-id': 'str', 'chardev-id': 'str' } }
+
+##
+# @NETDEV_VHOST_USER_DISCONNECTED:
+#
+# Emitted when the vhost-user chardev is disconnected
+#
+# @netdev-id: QEMU netdev id that is disconnected
+#
+# Since: 10.0
+#
+# .. qmp-example::
+#
+# <- { "timestamp": { "seconds": 1739538634, "microseconds": 920450 },
+# "event": "NETDEV_VHOST_USER_DISCONNECTED",
+# "data": { "netdev-id": "netdev0" } }
+#
+##
+{ 'event': 'NETDEV_VHOST_USER_DISCONNECTED',
+ 'data': { 'netdev-id': 'str' } }
diff --git a/qapi/pci.json b/qapi/pci.json
index ec28f1d..dc85a41 100644
--- a/qapi/pci.json
+++ b/qapi/pci.json
@@ -33,6 +33,8 @@
# - 'io' if the region is a PIO region
# - 'memory' if the region is a MMIO region
#
+# @address: memory address
+#
# @size: memory size
#
# @prefetch: if @type is 'memory', true if the memory is prefetchable
@@ -310,6 +312,5 @@
# }
#
# This example has been shortened as the real response is too long.
-#
##
{ 'command': 'query-pci', 'returns': ['PciInfo'] }
diff --git a/qapi/pragma.json b/qapi/pragma.json
index 59fbe74..023a2ef 100644
--- a/qapi/pragma.json
+++ b/qapi/pragma.json
@@ -46,34 +46,27 @@
'BlockdevSnapshotSyncWrapper',
'BlockdevSnapshotWrapper',
'BlockdevVmdkAdapterType',
- 'ChardevBackendKind',
- 'CpuS390Entitlement',
- 'CpuS390Polarization',
- 'CpuS390State',
- 'CxlCorErrorType',
'DisplayProtocol',
'DriveBackupWrapper',
'DummyBlockCoreForceArrays',
'DummyForceArrays',
'DummyVirtioForceArrays',
- 'GrabToggleKeys',
'HotKeyMod',
'ImageInfoSpecificKind',
'InputAxis',
'InputButton',
'IscsiHeaderDigest',
'IscsiTransport',
- 'JSONType',
'KeyValueKind',
'MemoryDeviceInfoKind',
'NetClientDriver',
'ObjectType',
- 'PciMemoryRegion',
- 'QCryptoAkCipherKeyType',
- 'QCryptodevBackendServiceType',
'QKeyCode',
'RbdAuthMode',
'RbdImageEncryptionFormat',
+ 'S390CpuEntitlement',
+ 'S390CpuPolarization',
+ 'S390CpuState',
'String',
'StringWrapper',
'SysEmuTarget',
@@ -83,9 +76,7 @@
'X86CPURegister32',
'XDbgBlockGraph',
'YankInstanceType',
- 'blockdev-reopen',
- 'query-rocker',
- 'query-rocker-ports' ],
+ 'blockdev-reopen' ],
# Externally visible types whose member names may use uppercase
'member-name-exceptions': [ # visible in:
'ACPISlotType', # query-acpi-ospm-status
diff --git a/qapi/qapi-clone-visitor.c b/qapi/qapi-clone-visitor.c
index bbf9536..3099763 100644
--- a/qapi/qapi-clone-visitor.c
+++ b/qapi/qapi-clone-visitor.c
@@ -12,7 +12,7 @@
#include "qapi/clone-visitor.h"
#include "qapi/visitor-impl.h"
#include "qapi/error.h"
-#include "qapi/qmp/qnull.h"
+#include "qobject/qnull.h"
struct QapiCloneVisitor {
Visitor visitor;
diff --git a/qapi/qapi-dealloc-visitor.c b/qapi/qapi-dealloc-visitor.c
index ef283f2..57a2c90 100644
--- a/qapi/qapi-dealloc-visitor.c
+++ b/qapi/qapi-dealloc-visitor.c
@@ -14,7 +14,7 @@
#include "qemu/osdep.h"
#include "qapi/dealloc-visitor.h"
-#include "qapi/qmp/qnull.h"
+#include "qobject/qnull.h"
#include "qapi/visitor-impl.h"
struct QapiDeallocVisitor
diff --git a/qapi/qapi-forward-visitor.c b/qapi/qapi-forward-visitor.c
index e36d9bc..d91d921 100644
--- a/qapi/qapi-forward-visitor.c
+++ b/qapi/qapi-forward-visitor.c
@@ -14,14 +14,14 @@
#include "qapi/forward-visitor.h"
#include "qapi/visitor-impl.h"
#include "qemu/queue.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qjson.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
#include "qapi/qmp/qerror.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qnull.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qlist.h"
+#include "qobject/qnull.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
#include "qemu/cutils.h"
struct ForwardFieldVisitor {
@@ -246,7 +246,7 @@ static void forward_field_optional(Visitor *v, const char *name, bool *present)
}
static bool forward_field_policy_reject(Visitor *v, const char *name,
- unsigned special_features,
+ uint64_t features,
Error **errp)
{
ForwardFieldVisitor *ffv = to_ffv(v);
@@ -254,18 +254,18 @@ static bool forward_field_policy_reject(Visitor *v, const char *name,
if (!forward_field_translate_name(ffv, &name, errp)) {
return true;
}
- return visit_policy_reject(ffv->target, name, special_features, errp);
+ return visit_policy_reject(ffv->target, name, features, errp);
}
static bool forward_field_policy_skip(Visitor *v, const char *name,
- unsigned special_features)
+ uint64_t features)
{
ForwardFieldVisitor *ffv = to_ffv(v);
if (!forward_field_translate_name(ffv, &name, NULL)) {
return true;
}
- return visit_policy_skip(ffv->target, name, special_features);
+ return visit_policy_skip(ffv->target, name, features);
}
static void forward_field_complete(Visitor *v, void *opaque)
diff --git a/qapi/qapi-schema.json b/qapi/qapi-schema.json
index b158198..a8f6616 100644
--- a/qapi/qapi-schema.json
+++ b/qapi/qapi-schema.json
@@ -3,35 +3,24 @@
##
# = Introduction
#
-# This document describes all commands currently supported by QMP.
+# This manual describes the commands and events supported by the QEMU
+# Monitor Protocol (QMP).
#
-# Most of the time their usage is exactly the same as in the user
-# Monitor, this means that any other document which also describe
-# commands (the manpage, QEMU's manual, etc) can and should be
-# consulted.
+# For locating a particular item, please see the `qapi-qmp-index`.
#
-# QMP has two types of commands: regular and query commands. Regular
-# commands usually change the Virtual Machine's state someway, while
-# query commands just return information. The sections below are
-# divided accordingly.
+# The following notation is used in examples:
#
-# It's important to observe that all communication examples are
-# formatted in a reader-friendly way, so that they're easier to
-# understand. However, in real protocol usage, they're emitted as a
-# single line.
+# .. qmp-example::
#
-# Also, the following notation is used to denote data flow:
+# -> ... text sent by client (commands) ...
+# <- ... text sent by server (command responses and events) ...
#
-# Example:
-#
-# ::
-#
-# -> data issued by the Client
-# <- Server data response
+# Example text is formatted for readability. However, in real
+# protocol usage, its commonly emitted as a single line.
#
# Please refer to the
# :doc:`QEMU Machine Protocol Specification </interop/qmp-spec>`
-# for detailed information on the Server command and response formats.
+# for the general format of commands, responses, and events.
##
{ 'include': 'pragma.json' }
@@ -68,11 +57,12 @@
{ 'include': 'qdev.json' }
{ 'include': 'machine-common.json' }
{ 'include': 'machine.json' }
-{ 'include': 'machine-target.json' }
+{ 'include': 'machine-s390x.json' }
{ 'include': 'replay.json' }
{ 'include': 'yank.json' }
{ 'include': 'misc.json' }
-{ 'include': 'misc-target.json' }
+{ 'include': 'misc-arm.json' }
+{ 'include': 'misc-i386.json' }
{ 'include': 'audio.json' }
{ 'include': 'acpi.json' }
{ 'include': 'pci.json' }
@@ -81,3 +71,4 @@
{ 'include': 'vfio.json' }
{ 'include': 'cryptodev.json' }
{ 'include': 'cxl.json' }
+{ 'include': 'uefi.json' }
diff --git a/qapi/qapi-util.c b/qapi/qapi-util.c
index 65a7d18..3d849fe 100644
--- a/qapi/qapi-util.c
+++ b/qapi/qapi-util.c
@@ -37,19 +37,19 @@ static bool compat_policy_input_ok1(const char *adjective,
}
}
-bool compat_policy_input_ok(unsigned special_features,
+bool compat_policy_input_ok(uint64_t features,
const CompatPolicy *policy,
ErrorClass error_class,
const char *kind, const char *name,
Error **errp)
{
- if ((special_features & 1u << QAPI_DEPRECATED)
+ if ((features & 1u << QAPI_DEPRECATED)
&& !compat_policy_input_ok1("Deprecated",
policy->deprecated_input,
error_class, kind, name, errp)) {
return false;
}
- if ((special_features & (1u << QAPI_UNSTABLE))
+ if ((features & (1u << QAPI_UNSTABLE))
&& !compat_policy_input_ok1("Unstable",
policy->unstable_input,
error_class, kind, name, errp)) {
diff --git a/qapi/qapi-visit-core.c b/qapi/qapi-visit-core.c
index 6c13510..706c61e 100644
--- a/qapi/qapi-visit-core.c
+++ b/qapi/qapi-visit-core.c
@@ -141,21 +141,21 @@ bool visit_optional(Visitor *v, const char *name, bool *present)
}
bool visit_policy_reject(Visitor *v, const char *name,
- unsigned special_features, Error **errp)
+ uint64_t features, Error **errp)
{
trace_visit_policy_reject(v, name);
if (v->policy_reject) {
- return v->policy_reject(v, name, special_features, errp);
+ return v->policy_reject(v, name, features, errp);
}
return false;
}
bool visit_policy_skip(Visitor *v, const char *name,
- unsigned special_features)
+ uint64_t features)
{
trace_visit_policy_skip(v, name);
if (v->policy_skip) {
- return v->policy_skip(v, name, special_features);
+ return v->policy_skip(v, name, features);
}
return false;
}
@@ -409,8 +409,8 @@ static bool input_type_enum(Visitor *v, const char *name, int *obj,
return false;
}
- if (lookup->special_features
- && !compat_policy_input_ok(lookup->special_features[value],
+ if (lookup->features
+ && !compat_policy_input_ok(lookup->features[value],
&v->compat_policy,
ERROR_CLASS_GENERIC_ERROR,
"value", enum_str, errp)) {
diff --git a/qapi/qdev.json b/qapi/qdev.json
index e91ca03..32c7d10 100644
--- a/qapi/qdev.json
+++ b/qapi/qdev.json
@@ -59,8 +59,8 @@
# the 'docs/qdev-device-use.txt' file.
#
# 3. It's possible to list device properties by running QEMU with
-# the ``-device DEVICE,help`` command-line argument, where DEVICE
-# is the device's name.
+# the ``-device DEVICE,help`` command-line argument, where
+# DEVICE is the device's name.
#
# .. qmp-example::
#
@@ -94,13 +94,13 @@
#
# .. note:: When this command completes, the device may not be removed
# from the guest. Hot removal is an operation that requires guest
-# cooperation. This command merely requests that the guest begin the
-# hot removal process. Completion of the device removal process is
-# signaled with a DEVICE_DELETED event. Guest reset will
-# automatically complete removal for all devices. If a guest-side
-# error in the hot removal process is detected, the device will not
-# be removed and a DEVICE_UNPLUG_GUEST_ERROR event is sent. Some
-# errors cannot be detected.
+# cooperation. This command merely requests that the guest begin
+# the hot removal process. Completion of the device removal
+# process is signaled with a DEVICE_DELETED event. Guest reset
+# will automatically complete removal for all devices. If a
+# guest-side error in the hot removal process is detected, the
+# device will not be removed and a DEVICE_UNPLUG_GUEST_ERROR event
+# is sent. Some errors cannot be detected.
#
# Since: 0.14
#
@@ -123,7 +123,7 @@
#
# Emitted whenever the device removal completion is acknowledged by
# the guest. At this point, it's safe to reuse the specified device
-# ID. Device removal can be initiated by the guest or by HMP/QMP
+# ID. Device removal can be initiated by the guest or by HMP/QMP
# commands.
#
# @device: the device's ID if it has one
@@ -163,3 +163,27 @@
##
{ 'event': 'DEVICE_UNPLUG_GUEST_ERROR',
'data': { '*device': 'str', 'path': 'str' } }
+
+##
+# @device-sync-config:
+#
+# Synchronize device configuration from host to guest part. First,
+# copy the configuration from the host part (backend) to the guest
+# part (frontend). Then notify guest software that device
+# configuration changed.
+#
+# The command may be used to notify the guest about block device
+# capacity change. Currently only vhost-user-blk device supports
+# this.
+#
+# @id: the device's ID or QOM path
+#
+# Features:
+#
+# @unstable: The command is experimental.
+#
+# Since: 9.2
+##
+{ 'command': 'device-sync-config',
+ 'features': [ 'unstable' ],
+ 'data': {'id': 'str'} }
diff --git a/qapi/qmp-dispatch.c b/qapi/qmp-dispatch.c
index 176b549..e569224 100644
--- a/qapi/qmp-dispatch.c
+++ b/qapi/qmp-dispatch.c
@@ -16,12 +16,12 @@
#include "block/aio.h"
#include "qapi/compat-policy.h"
#include "qapi/error.h"
-#include "qapi/qmp/dispatch.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qjson.h"
+#include "qapi/qmp-registry.h"
+#include "qobject/qdict.h"
+#include "qobject/qjson.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qobject-output-visitor.h"
-#include "qapi/qmp/qbool.h"
+#include "qobject/qbool.h"
#include "qemu/coroutine.h"
#include "qemu/main-loop.h"
@@ -173,7 +173,7 @@ QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList *cmds, QObject *requ
"The command %s has not been found", command);
goto out;
}
- if (!compat_policy_input_ok(cmd->special_features, &compat_policy,
+ if (!compat_policy_input_ok(cmd->features, &compat_policy,
ERROR_CLASS_COMMAND_NOT_FOUND,
"command", command, &err)) {
goto out;
diff --git a/qapi/qmp-event.c b/qapi/qmp-event.c
index 0fe0d0a..11cb6ac 100644
--- a/qapi/qmp-event.c
+++ b/qapi/qmp-event.c
@@ -14,9 +14,9 @@
#include "qemu/osdep.h"
#include "qapi/qmp-event.h"
-#include "qapi/qmp/qstring.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qjson.h"
+#include "qobject/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qjson.h"
static void timestamp_put(QDict *qdict)
{
diff --git a/qapi/qmp-registry.c b/qapi/qmp-registry.c
index 485bc5e..e2623f2 100644
--- a/qapi/qmp-registry.c
+++ b/qapi/qmp-registry.c
@@ -13,11 +13,11 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/dispatch.h"
+#include "qapi/qmp-registry.h"
void qmp_register_command(QmpCommandList *cmds, const char *name,
QmpCommandFunc *fn, QmpCommandOptions options,
- unsigned special_features)
+ uint64_t features)
{
QmpCommand *cmd = g_malloc0(sizeof(*cmd));
@@ -28,7 +28,7 @@ void qmp_register_command(QmpCommandList *cmds, const char *name,
cmd->fn = fn;
cmd->enabled = true;
cmd->options = options;
- cmd->special_features = special_features;
+ cmd->features = features;
QTAILQ_INSERT_TAIL(cmds, cmd, node);
}
diff --git a/qapi/qobject-input-visitor.c b/qapi/qobject-input-visitor.c
index f110a80..c52d369 100644
--- a/qapi/qobject-input-visitor.c
+++ b/qapi/qobject-input-visitor.c
@@ -19,14 +19,14 @@
#include "qapi/qobject-input-visitor.h"
#include "qapi/visitor-impl.h"
#include "qemu/queue.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qjson.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
#include "qapi/qmp/qerror.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qnull.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qlist.h"
+#include "qobject/qnull.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
#include "qemu/cutils.h"
#include "qemu/keyval.h"
@@ -664,10 +664,10 @@ static void qobject_input_optional(Visitor *v, const char *name, bool *present)
}
static bool qobject_input_policy_reject(Visitor *v, const char *name,
- unsigned special_features,
+ uint64_t features,
Error **errp)
{
- return !compat_policy_input_ok(special_features, &v->compat_policy,
+ return !compat_policy_input_ok(features, &v->compat_policy,
ERROR_CLASS_GENERIC_ERROR,
"parameter", name, errp);
}
diff --git a/qapi/qobject-output-visitor.c b/qapi/qobject-output-visitor.c
index 74770ed..de5b36b 100644
--- a/qapi/qobject-output-visitor.c
+++ b/qapi/qobject-output-visitor.c
@@ -17,12 +17,12 @@
#include "qapi/qobject-output-visitor.h"
#include "qapi/visitor-impl.h"
#include "qemu/queue.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qnull.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
+#include "qobject/qnull.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
typedef struct QStackEntry {
QObject *value;
@@ -210,13 +210,13 @@ static bool qobject_output_type_null(Visitor *v, const char *name,
}
static bool qobject_output_policy_skip(Visitor *v, const char *name,
- unsigned special_features)
+ uint64_t features)
{
CompatPolicy *pol = &v->compat_policy;
- return ((special_features & 1u << QAPI_DEPRECATED)
+ return ((features & 1u << QAPI_DEPRECATED)
&& pol->deprecated_output == COMPAT_POLICY_OUTPUT_HIDE)
- || ((special_features & 1u << QAPI_UNSTABLE)
+ || ((features & 1u << QAPI_UNSTABLE)
&& pol->unstable_output == COMPAT_POLICY_OUTPUT_HIDE);
}
diff --git a/qapi/qom.json b/qapi/qom.json
index 7e780e1..b133b06 100644
--- a/qapi/qom.json
+++ b/qapi/qom.json
@@ -222,7 +222,8 @@
##
{ 'struct': 'CanHostSocketcanProperties',
'data': { 'if': 'str',
- 'canbus': 'str' } }
+ 'canbus': 'str' },
+ 'if': 'CONFIG_LINUX' }
##
# @ColoCompareProperties:
@@ -305,7 +306,8 @@
##
{ 'struct': 'CryptodevVhostUserProperties',
'base': 'CryptodevBackendProperties',
- 'data': { 'chardev': 'str' } }
+ 'data': { 'chardev': 'str' },
+ 'if': 'CONFIG_VHOST_CRYPTO' }
##
# @DBusVMStateProperties:
@@ -354,7 +356,7 @@
# filter list. "head" means the filter is inserted at the head of
# the filter list, before any existing filters. "tail" means the
# filter is inserted at the tail of the filter list, behind any
-# existing filters (default). "id=<id>" means the filter is
+# existing filters (default). "id=<id>" means the filter is
# inserted before or behind the filter specified by <id>,
# depending on the @insert property. (default: "tail")
#
@@ -514,7 +516,8 @@
'data': { 'evdev': 'str',
'*grab_all': 'bool',
'*repeat': 'bool',
- '*grab-toggle': 'GrabToggleKeys' } }
+ '*grab-toggle': 'GrabToggleKeys' },
+ 'if': 'CONFIG_LINUX' }
##
# @EventLoopBaseProperties:
@@ -617,8 +620,8 @@
# .. note:: prealloc=true and reserve=false cannot be set at the same
# time. With reserve=true, the behavior depends on the operating
# system: for example, Linux will not reserve swap space for shared
-# file mappings -- "not applicable". In contrast, reserve=false will
-# bail out if it cannot be configured accordingly.
+# file mappings -- "not applicable". In contrast, reserve=false
+# will bail out if it cannot be configured accordingly.
#
# Since: 2.1
##
@@ -643,9 +646,9 @@
# @align: the base address alignment when QEMU mmap(2)s @mem-path.
# Some backend stores specified by @mem-path require an alignment
# different than the default one used by QEMU, e.g. the device DAX
-# /dev/dax0.0 requires 2M alignment rather than 4K. In such cases,
-# users can specify the required alignment via this option. 0
-# selects a default alignment (currently the page size).
+# /dev/dax0.0 requires 2M alignment rather than 4K. In such
+# cases, users can specify the required alignment via this option.
+# 0 selects a default alignment (currently the page size).
# (default: 0)
#
# @offset: the offset into the target file that the region starts at.
@@ -706,7 +709,7 @@
#
# @hugetlbsize: the hugetlb page size on systems that support multiple
# hugetlb page sizes (it must be a power of 2 value supported by
-# the system). 0 selects a default page size. This option is
+# the system). 0 selects a default page size. This option is
# ignored if @hugetlb is false. (default: 0)
#
# @seal: if true, create a sealed-file, which will block further
@@ -718,7 +721,8 @@
'base': 'MemoryBackendProperties',
'data': { '*hugetlb': 'bool',
'*hugetlbsize': 'size',
- '*seal': 'bool' } }
+ '*seal': 'bool' },
+ 'if': 'CONFIG_LINUX' }
##
# @MemoryBackendShmProperties:
@@ -748,7 +752,8 @@
##
{ 'struct': 'MemoryBackendEpcProperties',
'base': 'MemoryBackendProperties',
- 'data': {} }
+ 'data': {},
+ 'if': 'CONFIG_LINUX' }
##
# @PrManagerHelperProperties:
@@ -761,7 +766,8 @@
# Since: 2.11
##
{ 'struct': 'PrManagerHelperProperties',
- 'data': { 'path': 'str' } }
+ 'data': { 'path': 'str' },
+ 'if': 'CONFIG_LINUX' }
##
# @QtestProperties:
@@ -839,6 +845,45 @@
'node': 'uint32' } }
##
+# @AcpiGenericPortProperties:
+#
+# Properties for acpi-generic-port objects.
+#
+# @pci-bus: QOM path of the PCI bus of the hostbridge associated with
+# this SRAT Generic Port Affinity Structure. This is the same as
+# the bus parameter for the root ports attached to this host
+# bridge. The resulting SRAT Generic Port Affinity Structure will
+# refer to the ACPI object in DSDT that represents the host bridge
+# (e.g. ACPI0016 for CXL host bridges). See ACPI 6.5 Section
+# 5.2.16.7 for more information.
+#
+# @node: Similar to a NUMA node ID, but instead of providing a
+# reference point used for defining NUMA distances and access
+# characteristics to memory or from an initiator (e.g. CPU), this
+# node defines the boundary point between non-discoverable system
+# buses which must be described by firmware, and a discoverable
+# bus. NUMA distances and access characteristics are defined to
+# and from that point. For system software to establish full
+# initiator to target characteristics this information must be
+# combined with information retrieved from the discoverable part
+# of the path. An example would use CDAT (see UEFI.org)
+# information read from devices and switches in conjunction with
+# link characteristics read from PCIe Configuration space.
+# To get the full path latency from CPU to CXL attached DRAM
+# CXL device: Add the latency from CPU to Generic Port (from
+# HMAT indexed via the node ID in this SRAT structure) to
+# that for CXL bus links, the latency across intermediate switches
+# and from the EP port to the actual memory. Bandwidth is more
+# complex as there may be interleaving across multiple devices
+# and shared links in the path.
+#
+# Since: 9.2
+##
+{ 'struct': 'AcpiGenericPortProperties',
+ 'data': { 'pci-bus': 'str',
+ 'node': 'uint32' } }
+
+##
# @RngProperties:
#
# Properties for objects of classes derived from rng.
@@ -884,7 +929,8 @@
##
{ 'struct': 'RngRandomProperties',
'base': 'RngProperties',
- 'data': { '*filename': 'str' } }
+ 'data': { '*filename': 'str' },
+ 'if': 'CONFIG_POSIX' }
##
# @SevCommonProperties:
@@ -923,17 +969,17 @@
#
# @handle: SEV firmware handle (default: 0)
#
-# @legacy-vm-type: Use legacy KVM_SEV_INIT KVM interface for creating the VM.
-# The newer KVM_SEV_INIT2 interface, from Linux >= 6.10, syncs
-# additional vCPU state when initializing the VMSA structures,
-# which will result in a different guest measurement. Set
-# this to 'on' to force compatibility with older QEMU or kernel
-# versions that rely on legacy KVM_SEV_INIT behavior. 'auto'
-# will behave identically to 'on', but will automatically
-# switch to using KVM_SEV_INIT2 if the user specifies any
-# additional options that require it. If set to 'off', QEMU
-# will require KVM_SEV_INIT2 unconditionally.
-# (default: off) (since 9.1)
+# @legacy-vm-type: Use legacy KVM_SEV_INIT KVM interface for creating
+# the VM. The newer KVM_SEV_INIT2 interface, from Linux >= 6.10,
+# syncs additional vCPU state when initializing the VMSA
+# structures, which will result in a different guest measurement.
+# Set this to 'on' to force compatibility with older QEMU or kernel
+# versions that rely on legacy KVM_SEV_INIT behavior. 'auto' will
+# behave identically to 'on', but will automatically switch to
+# using KVM_SEV_INIT2 if the user specifies any additional options
+# that require it. If set to 'off', QEMU will require
+# KVM_SEV_INIT2 unconditionally.
+# (default: off) (since 9.1)
#
# Since: 2.12
##
@@ -985,7 +1031,7 @@
# @vcek-disabled: Guests are by default allowed to choose between VLEK
# (Versioned Loaded Endorsement Key) or VCEK (Versioned Chip
# Endorsement Key) when requesting attestation reports from
-# firmware. Set this to true to disable the use of VCEK.
+# firmware. Set this to true to disable the use of VCEK.
# (default: false) (since: 9.1)
#
# Since: 9.1
@@ -1002,6 +1048,45 @@
'*vcek-disabled': 'bool' } }
##
+# @TdxGuestProperties:
+#
+# Properties for tdx-guest objects.
+#
+# @attributes: The 'attributes' of a TD guest that is passed to
+# KVM_TDX_INIT_VM
+#
+# @sept-ve-disable: toggle bit 28 of TD attributes to control disabling
+# of EPT violation conversion to #VE on guest TD access of PENDING
+# pages. Some guest OS (e.g., Linux TD guest) may require this to
+# be set, otherwise they refuse to boot.
+#
+# @mrconfigid: ID for non-owner-defined configuration of the guest TD,
+# e.g., run-time or OS configuration (base64 encoded SHA384 digest).
+# Defaults to all zeros.
+#
+# @mrowner: ID for the guest TD’s owner (base64 encoded SHA384 digest).
+# Defaults to all zeros.
+#
+# @mrownerconfig: ID for owner-defined configuration of the guest TD,
+# e.g., specific to the workload rather than the run-time or OS
+# (base64 encoded SHA384 digest). Defaults to all zeros.
+#
+# @quote-generation-socket: socket address for Quote Generation
+# Service (QGS). QGS is a daemon running on the host. Without
+# it, the guest will not be able to get a TD quote for
+# attestation.
+#
+# Since: 10.1
+##
+{ 'struct': 'TdxGuestProperties',
+ 'data': { '*attributes': 'uint64',
+ '*sept-ve-disable': 'bool',
+ '*mrconfigid': 'str',
+ '*mrowner': 'str',
+ '*mrownerconfig': 'str',
+ '*quote-generation-socket': 'SocketAddress' } }
+
+##
# @ThreadContextProperties:
#
# Properties for thread context objects.
@@ -1036,6 +1121,7 @@
{ 'enum': 'ObjectType',
'data': [
'acpi-generic-initiator',
+ 'acpi-generic-port',
'authz-list',
'authz-listfile',
'authz-pam',
@@ -1085,6 +1171,7 @@
'sev-snp-guest',
'thread-context',
's390-pv-guest',
+ 'tdx-guest',
'throttle-group',
'tls-creds-anon',
'tls-creds-psk',
@@ -1111,6 +1198,7 @@
'discriminator': 'qom-type',
'data': {
'acpi-generic-initiator': 'AcpiGenericInitiatorProperties',
+ 'acpi-generic-port': 'AcpiGenericPortProperties',
'authz-list': 'AuthZListProperties',
'authz-listfile': 'AuthZListFileProperties',
'authz-pam': 'AuthZPAMProperties',
@@ -1156,6 +1244,7 @@
'if': 'CONFIG_SECRET_KEYRING' },
'sev-guest': 'SevGuestProperties',
'sev-snp-guest': 'SevSnpGuestProperties',
+ 'tdx-guest': 'TdxGuestProperties',
'thread-context': 'ThreadContextProperties',
'throttle-group': 'ThrottleGroupProperties',
'tls-creds-anon': 'TlsCredsAnonProperties',
diff --git a/qapi/rocker.json b/qapi/rocker.json
index 2e63dcb..0c7ef1f 100644
--- a/qapi/rocker.json
+++ b/qapi/rocker.json
@@ -26,6 +26,8 @@
#
# Return rocker switch information.
#
+# @name: switch name
+#
# Returns: @Rocker information
#
# Since: 2.4
@@ -42,7 +44,7 @@
##
# @RockerPortDuplex:
#
-# An eumeration of port duplex states.
+# An enumeration of port duplex states.
#
# @half: half duplex
#
@@ -55,7 +57,7 @@
##
# @RockerPortAutoneg:
#
-# An eumeration of port autoneg states.
+# An enumeration of port autoneg states.
#
# @off: autoneg is off
#
@@ -94,6 +96,8 @@
#
# Return rocker switch port information.
#
+# @name: port name
+#
# Returns: a list of @RockerPort information
#
# Since: 2.4
@@ -250,7 +254,7 @@
# "action": {"goto-tbl": 10},
# "mask": {"in-pport": 4294901760}
# },
-# {...},
+# ...
# ]}
##
{ 'command': 'query-rocker-of-dpa-flows',
@@ -288,8 +292,8 @@
#
# @ttl-check: perform TTL check
#
-# .. note:: Optional members may or may not appear in the group depending
-# if they're relevant to the group type.
+# .. note:: Optional members may or may not appear in the group
+# depending if they're relevant to the group type.
#
# Since: 2.4
##
diff --git a/qapi/run-state.json b/qapi/run-state.json
index 287691c..fd09beb 100644
--- a/qapi/run-state.json
+++ b/qapi/run-state.json
@@ -62,7 +62,7 @@
##
# @ShutdownCause:
#
-# An enumeration of reasons for a Shutdown.
+# An enumeration of reasons for a shutdown.
#
# @none: No shutdown request pending
#
@@ -135,19 +135,19 @@
##
# @SHUTDOWN:
#
-# Emitted when the virtual machine has shut down, indicating that qemu
+# Emitted when the virtual machine has shut down, indicating that QEMU
# is about to exit.
#
# @guest: If true, the shutdown was triggered by a guest request (such
# as a guest-initiated ACPI shutdown request or other
# hardware-specific action) rather than a host request (such as
-# sending qemu a SIGINT). (since 2.10)
+# sending QEMU a SIGINT). (since 2.10)
#
# @reason: The @ShutdownCause which resulted in the SHUTDOWN.
# (since 4.0)
#
# .. note:: If the command-line option ``-no-shutdown`` has been
-# specified, qemu will not exit, and a STOP event will eventually
+# specified, QEMU will not exit, and a STOP event will eventually
# follow the SHUTDOWN event.
#
# Since: 0.12
@@ -365,8 +365,8 @@
# @shutdown: Shutdown the VM and exit, according to the shutdown
# action
#
-# @exit-failure: Shutdown the VM and exit with nonzero status (since
-# 7.1)
+# @exit-failure: Shutdown the VM and exit with nonzero status
+# (since 7.1)
#
# Since: 6.0
##
@@ -501,10 +501,12 @@
#
# @s390: s390 guest panic information type (Since: 2.12)
#
+# @tdx: tdx guest panic information type (Since: 10.1)
+#
# Since: 2.9
##
{ 'enum': 'GuestPanicInformationType',
- 'data': [ 'hyper-v', 's390' ] }
+ 'data': [ 'hyper-v', 's390', 'tdx' ] }
##
# @GuestPanicInformation:
@@ -519,7 +521,8 @@
'base': {'type': 'GuestPanicInformationType'},
'discriminator': 'type',
'data': {'hyper-v': 'GuestPanicInformationHyperV',
- 's390': 'GuestPanicInformationS390'}}
+ 's390': 'GuestPanicInformationS390',
+ 'tdx' : 'GuestPanicInformationTdx'}}
##
# @GuestPanicInformationHyperV:
@@ -527,20 +530,20 @@
# Hyper-V specific guest panic information (HV crash MSRs)
#
# @arg1: for Windows, STOP code for the guest crash. For Linux,
-# an error code.
+# an error code.
#
# @arg2: for Windows, first argument of the STOP. For Linux, the
-# guest OS ID, which has the kernel version in bits 16-47
-# and 0x8100 in bits 48-63.
+# guest OS ID, which has the kernel version in bits 16-47 and
+# 0x8100 in bits 48-63.
#
# @arg3: for Windows, second argument of the STOP. For Linux, the
-# program counter of the guest.
+# program counter of the guest.
#
# @arg4: for Windows, third argument of the STOP. For Linux, the
-# RAX register (x86) or the stack pointer (aarch64) of the guest.
+# RAX register (x86) or the stack pointer (aarch64) of the guest.
#
# @arg5: for Windows, fourth argument of the STOP. For x86 Linux, the
-# stack pointer of the guest.
+# stack pointer of the guest.
#
# Since: 2.9
##
@@ -599,6 +602,30 @@
'reason': 'S390CrashReason'}}
##
+# @GuestPanicInformationTdx:
+#
+# TDX Guest panic information specific to TDX, as specified in the
+# "Guest-Hypervisor Communication Interface (GHCI) Specification",
+# section TDG.VP.VMCALL<ReportFatalError>.
+#
+# @error-code: TD-specific error code
+#
+# @message: Human-readable error message provided by the guest. Not
+# to be trusted.
+#
+# @gpa: guest-physical address of a page that contains more verbose
+# error information, as zero-terminated string. Present when the
+# "GPA valid" bit (bit 63) is set in @error-code.
+#
+#
+# Since: 10.1
+##
+{'struct': 'GuestPanicInformationTdx',
+ 'data': {'error-code': 'uint32',
+ 'message': 'str',
+ '*gpa': 'uint64'}}
+
+##
# @MEMORY_FAILURE:
#
# Emitted when a memory failure occurs on host side.
diff --git a/qapi/sockets.json b/qapi/sockets.json
index e76fdb9..f9f559d 100644
--- a/qapi/sockets.json
+++ b/qapi/sockets.json
@@ -29,6 +29,7 @@
# @InetSocketAddressBase:
#
# @host: host part of the address
+#
# @port: port part of the address
##
{ 'struct': 'InetSocketAddressBase',
@@ -55,8 +56,24 @@
# @ipv6: whether to accept IPv6 addresses, default try both IPv4 and
# IPv6
#
-# @keep-alive: enable keep-alive when connecting to this socket. Not
-# supported for passive sockets. (Since 4.2)
+# @keep-alive: enable keep-alive when connecting to/listening on this socket.
+# (Since 4.2, not supported for listening sockets until 10.1)
+#
+# @keep-alive-count: number of keep-alive packets sent before the connection is
+# closed. Only supported for TCP sockets on systems where TCP_KEEPCNT
+# socket option is defined (this includes Linux, Windows, macOS, FreeBSD,
+# but not OpenBSD). When set to 0, system setting is used. (Since 10.1)
+#
+# @keep-alive-idle: time in seconds the connection needs to be idle before
+# sending a keepalive packet. Only supported for TCP sockets on systems
+# where TCP_KEEPIDLE socket option is defined (this includes Linux,
+# Windows, macOS, FreeBSD, but not OpenBSD). When set to 0, system setting
+# is used. (Since 10.1)
+#
+# @keep-alive-interval: time in seconds between keep-alive packets. Only
+# supported for TCP sockets on systems where TCP_KEEPINTVL is defined (this
+# includes Linux, Windows, macOS, FreeBSD, but not OpenBSD). When set to
+# 0, system setting is used. (Since 10.1)
#
# @mptcp: enable multi-path TCP. (Since 6.1)
#
@@ -70,6 +87,9 @@
'*ipv4': 'bool',
'*ipv6': 'bool',
'*keep-alive': 'bool',
+ '*keep-alive-count': { 'type': 'uint32', 'if': 'HAVE_TCP_KEEPCNT' },
+ '*keep-alive-idle': { 'type': 'uint32', 'if': 'HAVE_TCP_KEEPIDLE' },
+ '*keep-alive-interval': { 'type': 'uint32', 'if': 'HAVE_TCP_KEEPINTVL' },
'*mptcp': { 'type': 'bool', 'if': 'HAVE_IPPROTO_MPTCP' } } }
##
@@ -104,8 +124,8 @@
#
# @port: port
#
-# .. note:: String types are used to allow for possible future hostname
-# or service resolution support.
+# .. note:: String types are used to allow for possible future
+# hostname or service resolution support.
#
# Since: 2.8
##
diff --git a/qapi/stats.json b/qapi/stats.json
index efbbe26..8902ef9 100644
--- a/qapi/stats.json
+++ b/qapi/stats.json
@@ -117,10 +117,10 @@
# information for that target.
#
# @target: the kind of objects to query. Note that each possible
-# target may enable additional filtering options
+# target may enable additional filtering options
#
-# @providers: which providers to request statistics from, and optionally
-# which named values to return within each provider
+# @providers: which providers to request statistics from, and
+# optionally which named values to return within each provider
#
# Since: 7.1
##
diff --git a/qapi/string-input-visitor.c b/qapi/string-input-visitor.c
index 3f1b9e9..f4eecc7 100644
--- a/qapi/string-input-visitor.c
+++ b/qapi/string-input-visitor.c
@@ -15,7 +15,7 @@
#include "qapi/string-input-visitor.h"
#include "qapi/visitor-impl.h"
#include "qapi/qmp/qerror.h"
-#include "qapi/qmp/qnull.h"
+#include "qobject/qnull.h"
#include "qemu/option.h"
#include "qemu/cutils.h"
diff --git a/qapi/transaction.json b/qapi/transaction.json
index b0ae343..9d9e7af 100644
--- a/qapi/transaction.json
+++ b/qapi/transaction.json
@@ -21,7 +21,7 @@
##
# @ActionCompletionMode:
#
-# An enumeration of Transactional completion modes.
+# An enumeration of transactional completion modes.
#
# @individual: Do not attempt to cancel any other Actions if any
# Actions fail after the Transaction request succeeds. All
@@ -223,7 +223,7 @@
# exists, the request will be rejected. Only some image formats
# support it, for example, qcow2, and rbd,
#
-# On failure, qemu will try delete the newly created internal snapshot
+# On failure, QEMU will try delete the newly created internal snapshot
# in the transaction. When an I/O error occurs during deletion, the
# user needs to fix it later with qemu-img or other command.
#
@@ -238,8 +238,8 @@
# - Any errors from commands in the transaction
#
# .. note:: The transaction aborts on the first failure. Therefore,
-# there will be information on only one failed operation returned in
-# an error condition, and subsequent actions will not have been
+# there will be information on only one failed operation returned
+# in an error condition, and subsequent actions will not have been
# attempted.
#
# Since: 1.1
diff --git a/qapi/uefi.json b/qapi/uefi.json
new file mode 100644
index 0000000..6592183
--- /dev/null
+++ b/qapi/uefi.json
@@ -0,0 +1,64 @@
+# -*- Mode: Python -*-
+# vim: filetype=python
+#
+
+##
+# = UEFI Variable Store
+#
+# The QEMU efi variable store implementation (hw/uefi/) uses this to
+# store non-volatile variables in json format on disk.
+#
+# This is an existing format already supported by (at least) two other
+# projects, specifically https://gitlab.com/kraxel/virt-firmware and
+# https://github.com/awslabs/python-uefivars.
+##
+
+##
+# @UefiVariable:
+#
+# UEFI Variable. Check the UEFI specifification for more detailed
+# information on the fields.
+#
+# @guid: variable namespace GUID
+#
+# @name: variable name, in UTF-8 encoding.
+#
+# @attr: variable attributes.
+#
+# @data: variable value, encoded as hex string.
+#
+# @time: variable modification time. EFI_TIME struct, encoded as hex
+# string. Used only for authenticated variables, where the
+# EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS attribute bit
+# is set.
+#
+# @digest: variable certificate digest. Used to verify the signature
+# of updates for authenticated variables. UEFI has two kinds of
+# authenticated variables. The secure boot variables ('PK',
+# 'KEK', 'db' and 'dbx') have hard coded signature checking rules.
+# For other authenticated variables the firmware stores a digest
+# of the signing certificate at variable creation time, and any
+# updates must be signed with the same certificate.
+#
+# Since: 10.0
+##
+{ 'struct' : 'UefiVariable',
+ 'data' : { 'guid' : 'str',
+ 'name' : 'str',
+ 'attr' : 'int',
+ 'data' : 'str',
+ '*time' : 'str',
+ '*digest' : 'str'}}
+
+##
+# @UefiVarStore:
+#
+# @version: currently always 2
+#
+# @variables: list of UEFI variables
+#
+# Since: 10.0
+##
+{ 'struct' : 'UefiVarStore',
+ 'data' : { 'version' : 'int',
+ 'variables' : [ 'UefiVariable' ] }}
diff --git a/qapi/ui.json b/qapi/ui.json
index 5daca51..514fa15 100644
--- a/qapi/ui.json
+++ b/qapi/ui.json
@@ -48,8 +48,8 @@
# @password: the new password
#
# @connected: How to handle existing clients when changing the
-# password. If nothing is specified, defaults to 'keep'. For VNC,
-# only 'keep' is currently implemented.
+# password. If nothing is specified, defaults to 'keep'. For
+# VNC, only 'keep' is currently implemented.
#
# Since: 7.0
##
@@ -107,10 +107,11 @@
# - '+INT' where INT is the number of seconds from now (integer)
# - 'INT' where INT is the absolute time in seconds
#
-# .. note:: Time is relative to the server and currently there is no way
-# to coordinate server time with client time. It is not recommended
-# to use the absolute time version of the @time parameter unless
-# you're sure you are on the same machine as the QEMU instance.
+# .. note:: Time is relative to the server and currently there is no
+# way to coordinate server time with client time. It is not
+# recommended to use the absolute time version of the @time
+# parameter unless you're sure you are on the same machine as the
+# QEMU instance.
#
# Since: 7.0
##
@@ -174,8 +175,8 @@
# @filename: the path of a new file to store the image
#
# @device: ID of the display device that should be dumped. If this
-# parameter is missing, the primary display will be used. (Since
-# 2.12)
+# parameter is missing, the primary display will be used.
+# (Since 2.12)
#
# @head: head to use in case the device supports multiple heads. If
# this parameter is missing, head #0 will be used. Also note that
@@ -322,7 +323,7 @@
##
# @query-spice:
#
-# Returns information about the current SPICE server
+# Return information about the current SPICE server
#
# Returns: @SpiceInfo
#
@@ -624,7 +625,7 @@
# @id: vnc server name.
#
# @server: A list of @VncBasincInfo describing all listening sockets.
-# The list can be empty (in case the vnc server is disabled). It
+# The list can be empty (in case the vnc server is disabled). It
# also may have multiple entries: normal + websocket, possibly
# also ipv4 + ipv6 in the future.
#
@@ -653,7 +654,7 @@
##
# @query-vnc:
#
-# Returns information about the current VNC server
+# Return information about the current VNC server
#
# Returns: @VncInfo
#
@@ -684,7 +685,7 @@
##
# @query-vnc-servers:
#
-# Returns a list of vnc servers. The list can be empty.
+# Return a list of vnc servers. The list can be empty.
#
# Returns: a list of @VncInfo2
#
@@ -719,8 +720,8 @@
#
# @client: client information
#
-# .. note:: This event is emitted before any authentication takes place,
-# thus the authentication ID is not provided.
+# .. note:: This event is emitted before any authentication takes
+# place, thus the authentication ID is not provided.
#
# Since: 0.13
#
@@ -819,7 +820,7 @@
##
# @query-mice:
#
-# Returns information about each active mouse device
+# Return information about each active mouse device
#
# Returns: a list of @MouseInfo for each device
#
@@ -948,6 +949,7 @@
# Since: 1.3
##
{ 'enum': 'QKeyCode',
+ 'prefix': 'Q_KEY_CODE',
'data': [ 'unmapped',
'shift', 'shift_r', 'alt', 'alt_r', 'ctrl',
'ctrl_r', 'menu', 'esc', '1', '2', '3', '4', '5', '6', '7', '8',
@@ -1131,7 +1133,7 @@
# @axis: Which axis is referenced by @value.
#
# @value: Pointer position. For absolute coordinates the valid range
-# is 0 -> 0x7ffff
+# is 0 to 0x7fff.
#
# Since: 2.0
##
@@ -1266,7 +1268,7 @@
# Since: 2.6
#
# .. note:: The consoles are visible in the qom tree, under
-# ``/backend/console[$index]``. They have a device link and head
+# ``/backend/console[$index]``. They have a device link and head
# property, so it is possible to map which console belongs to which
# device and display.
#
@@ -1416,11 +1418,11 @@
#
# @left-command-key: Enable/disable forwarding of left command key to
# guest. Allows command-tab window switching on the host without
-# sending this key to the guest when "off". Defaults to "on"
+# sending this key to the guest when "off". Defaults to "on"
#
# @full-grab: Capture all key presses, including system combos. This
# requires accessibility permissions, since it performs a global
-# grab on key events. (default: off) See
+# grab on key events. (default: off) See
# https://support.apple.com/en-in/guide/mac-help/mh32356/mac
#
# @swap-opt-cmd: Swap the Option and Command keys so that their key
@@ -1432,7 +1434,7 @@
# "off". (Since 8.2)
#
# @zoom-interpolation: Apply interpolation to smooth output when
-# zoom-to-fit is enabled. Defaults to "off". (Since 9.0)
+# zoom-to-fit is enabled. Defaults to "off". (Since 9.0)
#
# Since: 7.0
##
@@ -1524,12 +1526,12 @@
#
# Display (user interface) options.
#
-# @type: Which DisplayType qemu should use.
+# @type: Which DisplayType QEMU should use.
#
# @full-screen: Start user interface in fullscreen mode
# (default: off).
#
-# @window-close: Allow to quit qemu with window close button
+# @window-close: Allow to quit QEMU with window close button
# (default: on).
#
# @show-cursor: Force showing the mouse cursor (default: off).
@@ -1560,7 +1562,7 @@
##
# @query-display-options:
#
-# Returns information about display configuration
+# Return information about display configuration
#
# Returns: @DisplayOptions
#
diff --git a/qapi/vfio.json b/qapi/vfio.json
index 40cbcde..b53b7ca 100644
--- a/qapi/vfio.json
+++ b/qapi/vfio.json
@@ -7,7 +7,7 @@
##
##
-# @VfioMigrationState:
+# @QapiVfioMigrationState:
#
# An enumeration of the VFIO device migration states.
#
@@ -15,16 +15,16 @@
#
# @running: The device is running.
#
-# @stop-copy: The device is stopped and its internal state is available
-# for reading.
+# @stop-copy: The device is stopped and its internal state is
+# available for reading.
#
# @resuming: The device is stopped and its internal state is available
# for writing.
#
# @running-p2p: The device is running in the P2P quiescent state.
#
-# @pre-copy: The device is running, tracking its internal state and its
-# internal state is available for reading.
+# @pre-copy: The device is running, tracking its internal state and
+# its internal state is available for reading.
#
# @pre-copy-p2p: The device is running in the P2P quiescent state,
# tracking its internal state and its internal state is available
@@ -32,10 +32,9 @@
#
# Since: 9.1
##
-{ 'enum': 'VfioMigrationState',
+{ 'enum': 'QapiVfioMigrationState',
'data': [ 'stop', 'running', 'stop-copy', 'resuming', 'running-p2p',
- 'pre-copy', 'pre-copy-p2p' ],
- 'prefix': 'QAPI_VFIO_MIGRATION_STATE' }
+ 'pre-copy', 'pre-copy-p2p' ] }
##
# @VFIO_MIGRATION:
@@ -63,5 +62,5 @@
'data': {
'device-id': 'str',
'qom-path': 'str',
- 'device-state': 'VfioMigrationState'
+ 'device-state': 'QapiVfioMigrationState'
} }
diff --git a/qapi/virtio.json b/qapi/virtio.json
index 26df8b3..73df718 100644
--- a/qapi/virtio.json
+++ b/qapi/virtio.json
@@ -24,7 +24,7 @@
##
# @x-query-virtio:
#
-# Returns a list of all realized VirtIODevices
+# Return a list of all realized VirtIODevices
#
# Features:
#
@@ -568,9 +568,9 @@
# .. note:: last_avail_idx will not be displayed in the case where the
# selected VirtIODevice has a running vhost device and the
# VirtIODevice VirtQueue index (queue) does not exist for the
-# corresponding vhost device vhost_virtqueue. Also, shadow_avail_idx
-# will not be displayed in the case where the selected VirtIODevice
-# has a running vhost device.
+# corresponding vhost device vhost_virtqueue. Also,
+# shadow_avail_idx will not be displayed in the case where the
+# selected VirtIODevice has a running vhost device.
#
# Since: 7.2
#
@@ -992,3 +992,17 @@
##
{ 'enum': 'GranuleMode',
'data': [ '4k', '8k', '16k', '64k', 'host' ] }
+
+##
+# @VMAppleVirtioBlkVariant:
+#
+# @unspecified: The default, not a valid setting.
+#
+# @root: Block device holding the root volume
+#
+# @aux: Block device holding auxiliary data required for boot
+#
+# Since: 9.2
+##
+{ 'enum': 'VMAppleVirtioBlkVariant',
+ 'data': [ 'unspecified', 'root', 'aux' ] }
diff --git a/qemu-img.c b/qemu-img.c
index 7668f86..e757071 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -32,8 +32,8 @@
#include "qapi/qapi-commands-block-core.h"
#include "qapi/qapi-visit-block-core.h"
#include "qapi/qobject-output-visitor.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qjson.h"
+#include "qobject/qdict.h"
#include "qemu/cutils.h"
#include "qemu/config-file.h"
#include "qemu/option.h"
@@ -45,7 +45,7 @@
#include "qemu/units.h"
#include "qemu/memalign.h"
#include "qom/object_interfaces.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "block/block_int.h"
#include "block/blockjob.h"
#include "block/dirty-bitmap.h"
@@ -3505,6 +3505,7 @@ static int img_snapshot(int argc, char **argv)
break;
case SNAPSHOT_DELETE:
+ bdrv_drain_all_begin();
bdrv_graph_rdlock_main_loop();
ret = bdrv_snapshot_find(bs, &sn, snapshot_name);
if (ret < 0) {
@@ -3520,6 +3521,7 @@ static int img_snapshot(int argc, char **argv)
}
}
bdrv_graph_rdunlock_main_loop();
+ bdrv_drain_all_end();
break;
}
@@ -4488,7 +4490,11 @@ static void bench_cb(void *opaque, int ret)
*/
b->in_flight++;
b->offset += b->step;
- b->offset %= b->image_size;
+ if (b->image_size <= b->bufsize) {
+ b->offset = 0;
+ } else {
+ b->offset %= b->image_size - b->bufsize;
+ }
if (b->write) {
acb = blk_aio_pwritev(b->blk, offset, b->qiov, 0, bench_cb, b);
} else {
@@ -4567,7 +4573,7 @@ static int img_bench(int argc, char **argv)
{
unsigned long res;
- if (qemu_strtoul(optarg, NULL, 0, &res) < 0 || res > INT_MAX) {
+ if (qemu_strtoul(optarg, NULL, 0, &res) <= 0 || res > INT_MAX) {
error_report("Invalid queue depth specified");
return 1;
}
diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c
index e2fab57..13e0330 100644
--- a/qemu-io-cmds.c
+++ b/qemu-io-cmds.c
@@ -10,9 +10,9 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu-io.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "block/block.h"
#include "block/block_int.h" /* for info_f() */
#include "block/qapi.h"
diff --git a/qemu-io.c b/qemu-io.c
index 6cb1e00..8f2de83 100644
--- a/qemu-io.c
+++ b/qemu-io.c
@@ -27,10 +27,10 @@
#include "qemu/readline.h"
#include "qemu/log.h"
#include "qemu/sockets.h"
-#include "qapi/qmp/qstring.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qstring.h"
+#include "qobject/qdict.h"
#include "qom/object_interfaces.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "block/block_int.h"
#include "trace/control.h"
#include "crypto/init.h"
diff --git a/qemu-keymap.c b/qemu-keymap.c
index 701e433..1c081db 100644
--- a/qemu-keymap.c
+++ b/qemu-keymap.c
@@ -116,7 +116,6 @@ static void walk_map(struct xkb_keymap *map, xkb_keycode_t code, void *data)
if (kshift != kaltgrshift && kaltgr != kaltgrshift) {
print_sym(kaltgrshift, qcode, " shift altgr");
}
- return;
}
static void usage(FILE *out)
@@ -154,9 +153,9 @@ static xkb_mod_mask_t get_mod(struct xkb_keymap *map, const char *name)
int main(int argc, char *argv[])
{
- static struct xkb_context *ctx;
- static struct xkb_keymap *map;
- static struct xkb_state *state;
+ struct xkb_context *ctx;
+ struct xkb_keymap *map;
+ struct xkb_state *state;
xkb_mod_index_t mod, mods;
int rc;
@@ -213,6 +212,7 @@ int main(int argc, char *argv[])
ctx = xkb_context_new(XKB_CONTEXT_NO_FLAGS);
map = xkb_keymap_new_from_names(ctx, &names, XKB_KEYMAP_COMPILE_NO_FLAGS);
+ xkb_context_unref(ctx);
if (!map) {
/* libxkbcommon prints error */
exit(1);
@@ -234,6 +234,8 @@ int main(int argc, char *argv[])
state = xkb_state_new(map);
xkb_keymap_key_for_each(map, walk_map, state);
+ xkb_state_unref(state);
+ xkb_keymap_unref(map);
/* add quirks */
fprintf(outfile,
diff --git a/qemu-nbd.c b/qemu-nbd.c
index d7b3cca..ed58958 100644
--- a/qemu-nbd.c
+++ b/qemu-nbd.c
@@ -24,8 +24,8 @@
#include "qemu/help-texts.h"
#include "qapi/error.h"
#include "qemu/cutils.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/runstate.h" /* for qemu_system_killed() prototype */
+#include "system/block-backend.h"
+#include "system/runstate.h" /* for qemu_system_killed() prototype */
#include "block/block_int.h"
#include "block/nbd.h"
#include "qemu/main-loop.h"
@@ -37,8 +37,8 @@
#include "qemu/log.h"
#include "qemu/systemd.h"
#include "block/snapshot.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "qom/object_interfaces.h"
#include "io/channel-socket.h"
#include "io/net-listener.h"
@@ -57,19 +57,20 @@
#define HAVE_NBD_DEVICE 0
#endif
-#define SOCKET_PATH "/var/lock/qemu-nbd-%s"
-#define QEMU_NBD_OPT_CACHE 256
-#define QEMU_NBD_OPT_AIO 257
-#define QEMU_NBD_OPT_DISCARD 258
-#define QEMU_NBD_OPT_DETECT_ZEROES 259
-#define QEMU_NBD_OPT_OBJECT 260
-#define QEMU_NBD_OPT_TLSCREDS 261
-#define QEMU_NBD_OPT_IMAGE_OPTS 262
-#define QEMU_NBD_OPT_FORK 263
-#define QEMU_NBD_OPT_TLSAUTHZ 264
-#define QEMU_NBD_OPT_PID_FILE 265
-#define QEMU_NBD_OPT_SELINUX_LABEL 266
-#define QEMU_NBD_OPT_TLSHOSTNAME 267
+#define SOCKET_PATH "/var/lock/qemu-nbd-%s"
+#define QEMU_NBD_OPT_CACHE 256
+#define QEMU_NBD_OPT_AIO 257
+#define QEMU_NBD_OPT_DISCARD 258
+#define QEMU_NBD_OPT_DETECT_ZEROES 259
+#define QEMU_NBD_OPT_OBJECT 260
+#define QEMU_NBD_OPT_TLSCREDS 261
+#define QEMU_NBD_OPT_IMAGE_OPTS 262
+#define QEMU_NBD_OPT_FORK 263
+#define QEMU_NBD_OPT_TLSAUTHZ 264
+#define QEMU_NBD_OPT_PID_FILE 265
+#define QEMU_NBD_OPT_SELINUX_LABEL 266
+#define QEMU_NBD_OPT_TLSHOSTNAME 267
+#define QEMU_NBD_OPT_HANDSHAKE_LIMIT 268
#define MBR_SIZE 512
@@ -80,6 +81,7 @@ static int nb_fds;
static QIONetListener *server;
static QCryptoTLSCreds *tlscreds;
static const char *tlsauthz;
+static int handshake_limit = NBD_DEFAULT_HANDSHAKE_MAX_SECS;
static void usage(const char *name)
{
@@ -101,6 +103,7 @@ static void usage(const char *name)
" -v, --verbose display extra debugging information\n"
" -x, --export-name=NAME expose export by name (default is empty string)\n"
" -D, --description=TEXT export a human-readable description\n"
+" --handshake-limit=N limit client's handshake to N seconds (default 10)\n"
"\n"
"Exposing part of the image:\n"
" -o, --offset=OFFSET offset into the image\n"
@@ -390,7 +393,8 @@ static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
nb_fds++;
nbd_update_server_watch();
- nbd_client_new(cioc, tlscreds, tlsauthz, nbd_client_closed);
+ nbd_client_new(cioc, handshake_limit,
+ tlscreds, tlsauthz, nbd_client_closed, NULL);
}
static void nbd_update_server_watch(void)
@@ -567,6 +571,8 @@ int main(int argc, char **argv)
{ "object", required_argument, NULL, QEMU_NBD_OPT_OBJECT },
{ "export-name", required_argument, NULL, 'x' },
{ "description", required_argument, NULL, 'D' },
+ { "handshake-limit", required_argument, NULL,
+ QEMU_NBD_OPT_HANDSHAKE_LIMIT },
{ "tls-creds", required_argument, NULL, QEMU_NBD_OPT_TLSCREDS },
{ "tls-hostname", required_argument, NULL, QEMU_NBD_OPT_TLSHOSTNAME },
{ "tls-authz", required_argument, NULL, QEMU_NBD_OPT_TLSAUTHZ },
@@ -588,7 +594,8 @@ int main(int argc, char **argv)
pthread_t client_thread;
const char *fmt = NULL;
Error *local_err = NULL;
- BlockdevDetectZeroesOptions detect_zeroes = BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF;
+ BlockdevDetectZeroesOptions detect_zeroes =
+ BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF;
QDict *options = NULL;
const char *export_name = NULL; /* defaults to "" later for server mode */
const char *export_description = NULL;
@@ -812,6 +819,13 @@ int main(int argc, char **argv)
case QEMU_NBD_OPT_SELINUX_LABEL:
selinux_label = optarg;
break;
+ case QEMU_NBD_OPT_HANDSHAKE_LIMIT:
+ if (qemu_strtoi(optarg, NULL, 0, &handshake_limit) < 0 ||
+ handshake_limit < 0) {
+ error_report("Invalid handshake limit '%s'", optarg);
+ exit(EXIT_FAILURE);
+ }
+ break;
}
}
@@ -838,10 +852,6 @@ int main(int argc, char **argv)
export_name = "";
}
- if (!trace_init_backends()) {
- exit(1);
- }
- trace_init_file();
qemu_set_log(LOG_TRACE, &error_fatal);
socket_activation = check_socket_activation();
@@ -1031,6 +1041,18 @@ int main(int argc, char **argv)
#endif /* WIN32 */
}
+ /*
+ * trace_init must be done after daemonization. Why? Because at
+ * least the simple backend spins up a helper thread as well as an
+ * atexit() handler that waits on that thread, but the helper
+ * thread won't survive a fork, leading to deadlock in the child
+ * if we initialized pre-fork.
+ */
+ if (!trace_init_backends()) {
+ exit(1);
+ }
+ trace_init_file();
+
if (opts.device != NULL && sockpath == NULL) {
sockpath = g_malloc(128);
snprintf(sockpath, 128, SOCKET_PATH, basename(opts.device));
diff --git a/qemu-options.hx b/qemu-options.hx
index 369ae81..1f862b1 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -38,8 +38,12 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \
" nvdimm=on|off controls NVDIMM support (default=off)\n"
" memory-encryption=@var{} memory encryption object to use (default=none)\n"
" hmat=on|off controls ACPI HMAT support (default=off)\n"
+#ifdef CONFIG_POSIX
+ " aux-ram-share=on|off allocate auxiliary guest RAM as shared (default: off)\n"
+#endif
" memory-backend='backend-id' specifies explicitly provided backend for main RAM (default=none)\n"
- " cxl-fmw.0.targets.0=firsttarget,cxl-fmw.0.targets.1=secondtarget,cxl-fmw.0.size=size[,cxl-fmw.0.interleave-granularity=granularity]\n",
+ " cxl-fmw.0.targets.0=firsttarget,cxl-fmw.0.targets.1=secondtarget,cxl-fmw.0.size=size[,cxl-fmw.0.interleave-granularity=granularity]\n"
+ " smp-cache.0.cache=cachename,smp-cache.0.topology=topologylevel\n",
QEMU_ARCH_ALL)
SRST
``-machine [type=]name[,prop=value[,...]]``
@@ -68,8 +72,8 @@ SRST
``vmport=on|off|auto``
Enables emulation of VMWare IO port, for vmmouse etc. auto says
- to select the value based on accel. For accel=xen the default is
- off otherwise the default is on.
+ to select the value based on accel and i8042. For accel=xen or
+ i8042=off the default is off otherwise the default is on.
``dump-guest-core=on|off``
Include guest memory in a core dump. The default is on.
@@ -101,6 +105,16 @@ SRST
Enables or disables ACPI Heterogeneous Memory Attribute Table
(HMAT) support. The default is off.
+ ``aux-ram-share=on|off``
+ Allocate auxiliary guest RAM as an anonymous file that is
+ shareable with an external process. This option applies to
+ memory allocated as a side effect of creating various devices.
+ It does not apply to memory-backend-objects, whether explicitly
+ specified on the command line, or implicitly created by the -m
+ command line option. The default is off.
+
+ To use the cpr-transfer migration mode, you must set aux-ram-share=on.
+
``memory-backend='id'``
An alternative to legacy ``-mem-path`` and ``mem-prealloc`` options.
Allows to use a memory backend as main RAM.
@@ -159,6 +173,33 @@ SRST
::
-machine cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=128G,cxl-fmw.0.interleave-granularity=512
+
+ ``smp-cache.0.cache=cachename,smp-cache.0.topology=topologylevel``
+ Define cache properties for SMP system.
+
+ ``cache=cachename`` specifies the cache that the properties will be
+ applied on. This field is the combination of cache level and cache
+ type. It supports ``l1d`` (L1 data cache), ``l1i`` (L1 instruction
+ cache), ``l2`` (L2 unified cache) and ``l3`` (L3 unified cache).
+
+ ``topology=topologylevel`` sets the cache topology level. It accepts
+ CPU topology levels including ``core``, ``module``, ``cluster``, ``die``,
+ ``socket``, ``book``, ``drawer`` and a special value ``default``. If
+ ``default`` is set, then the cache topology will follow the architecture's
+ default cache topology model. If another topology level is set, the cache
+ will be shared at corresponding CPU topology level. For example,
+ ``topology=core`` makes the cache shared by all threads within a core.
+ The omitting cache will default to using the ``default`` level.
+
+ The default cache topology model for an i386 PC machine is as follows:
+ ``l1d``, ``l1i``, and ``l2`` caches are per ``core``, while the ``l3``
+ cache is per ``die``.
+
+ Example:
+
+ ::
+
+ -machine smp-cache.0.cache=l1d,smp-cache.0.topology=core,smp-cache.1.cache=l1i,smp-cache.1.topology=core
ERST
DEF("M", HAS_ARG, QEMU_OPTION_M,
@@ -924,7 +965,7 @@ SRST
Sets the period length in microseconds.
``in|out.try-poll=on|off``
- Attempt to use poll mode with the device. Default is on.
+ Attempt to use poll mode with the device. Default is off.
``threshold=threshold``
Threshold (in microseconds) when playback starts. Default is 0.
@@ -961,7 +1002,7 @@ SRST
``in|out.buffer-count=count``
Sets the count of the buffers.
- ``in|out.try-poll=on|of``
+ ``in|out.try-poll=on|off``
Attempt to use poll mode with the device. Default is on.
``try-mmap=on|off``
@@ -1102,7 +1143,7 @@ SRST
external entity that provides the IPMI services.
A connection is made to an external BMC simulator. If you do this,
- it is strongly recommended that you use the "reconnect=" chardev
+ it is strongly recommended that you use the "reconnect-ms=" chardev
option to reconnect to the simulator if the connection is lost. Note
that if this is not used carefully, it can be a security issue, as
the interface has the ability to send resets, NMIs, and power off
@@ -1766,29 +1807,18 @@ DEF("fsdev", HAS_ARG, QEMU_OPTION_fsdev,
" [[,throttling.bps-total-max=bm]|[[,throttling.bps-read-max=rm][,throttling.bps-write-max=wm]]]\n"
" [[,throttling.iops-total-max=im]|[[,throttling.iops-read-max=irm][,throttling.iops-write-max=iwm]]]\n"
" [[,throttling.iops-size=is]]\n"
- "-fsdev proxy,id=id,socket=socket[,writeout=immediate][,readonly=on]\n"
- "-fsdev proxy,id=id,sock_fd=sock_fd[,writeout=immediate][,readonly=on]\n"
"-fsdev synth,id=id\n",
QEMU_ARCH_ALL)
SRST
``-fsdev local,id=id,path=path,security_model=security_model [,writeout=writeout][,readonly=on][,fmode=fmode][,dmode=dmode] [,throttling.option=value[,throttling.option=value[,...]]]``
\
-``-fsdev proxy,id=id,socket=socket[,writeout=writeout][,readonly=on]``
- \
-``-fsdev proxy,id=id,sock_fd=sock_fd[,writeout=writeout][,readonly=on]``
- \
``-fsdev synth,id=id[,readonly=on]``
Define a new file system device. Valid options are:
``local``
Accesses to the filesystem are done by QEMU.
- ``proxy``
- Accesses to the filesystem are done by virtfs-proxy-helper(1). This
- option is deprecated (since QEMU 8.1) and will be removed in a future
- version of QEMU. Use ``local`` instead.
-
``synth``
Synthetic filesystem, only used by QTests.
@@ -1813,8 +1843,6 @@ SRST
security model is same as passthrough except the sever won't
report failures if it fails to set file attributes like
ownership. Security model is mandatory only for local fsdriver.
- Other fsdrivers (like proxy) don't take security model as a
- parameter.
``writeout=writeout``
This is an optional argument. The only supported value is
@@ -1827,16 +1855,6 @@ SRST
Enables exporting 9p share as a readonly mount for guests. By
default read-write access is given.
- ``socket=socket``
- Enables proxy filesystem driver to use passed socket file for
- communicating with virtfs-proxy-helper(1).
-
- ``sock_fd=sock_fd``
- Enables proxy filesystem driver to use passed socket descriptor
- for communicating with virtfs-proxy-helper(1). Usually a helper
- like libvirt will create socketpair and pass one of the fds as
- sock\_fd.
-
``fmode=fmode``
Specifies the default mode for newly created files on the host.
Works only with security models "mapped-xattr" and
@@ -1889,18 +1907,12 @@ ERST
DEF("virtfs", HAS_ARG, QEMU_OPTION_virtfs,
"-virtfs local,path=path,mount_tag=tag,security_model=mapped-xattr|mapped-file|passthrough|none\n"
" [,id=id][,writeout=immediate][,readonly=on][,fmode=fmode][,dmode=dmode][,multidevs=remap|forbid|warn]\n"
- "-virtfs proxy,mount_tag=tag,socket=socket[,id=id][,writeout=immediate][,readonly=on]\n"
- "-virtfs proxy,mount_tag=tag,sock_fd=sock_fd[,id=id][,writeout=immediate][,readonly=on]\n"
"-virtfs synth,mount_tag=tag[,id=id][,readonly=on]\n",
QEMU_ARCH_ALL)
SRST
``-virtfs local,path=path,mount_tag=mount_tag ,security_model=security_model[,writeout=writeout][,readonly=on] [,fmode=fmode][,dmode=dmode][,multidevs=multidevs]``
\
-``-virtfs proxy,socket=socket,mount_tag=mount_tag [,writeout=writeout][,readonly=on]``
- \
-``-virtfs proxy,sock_fd=sock_fd,mount_tag=mount_tag [,writeout=writeout][,readonly=on]``
- \
``-virtfs synth,mount_tag=mount_tag``
Define a new virtual filesystem device and expose it to the guest using
a virtio-9p-device (a.k.a. 9pfs), which essentially means that a certain
@@ -1917,11 +1929,6 @@ SRST
``local``
Accesses to the filesystem are done by QEMU.
- ``proxy``
- Accesses to the filesystem are done by virtfs-proxy-helper(1).
- This option is deprecated (since QEMU 8.1) and will be removed in a
- future version of QEMU. Use ``local`` instead.
-
``synth``
Synthetic filesystem, only used by QTests.
@@ -1946,8 +1953,6 @@ SRST
security model is same as passthrough except the sever won't
report failures if it fails to set file attributes like
ownership. Security model is mandatory only for local fsdriver.
- Other fsdrivers (like proxy) don't take security model as a
- parameter.
``writeout=writeout``
This is an optional argument. The only supported value is
@@ -1960,16 +1965,6 @@ SRST
Enables exporting 9p share as a readonly mount for guests. By
default read-write access is given.
- ``socket=socket``
- Enables proxy filesystem driver to use passed socket file for
- communicating with virtfs-proxy-helper(1). Usually a helper like
- libvirt will create socketpair and pass one of the fds as
- sock\_fd.
-
- ``sock_fd``
- Enables proxy filesystem driver to use passed 'sock\_fd' as the
- socket descriptor for interfacing with virtfs-proxy-helper(1).
-
``fmode=fmode``
Specifies the default mode for newly created files on the host.
Works only with security models "mapped-xattr" and
@@ -1984,32 +1979,37 @@ SRST
Specifies the tag name to be used by the guest to mount this
export point.
- ``multidevs=multidevs``
- Specifies how to deal with multiple devices being shared with a
- 9p export. Supported behaviours are either "remap", "forbid" or
- "warn". The latter is the default behaviour on which virtfs 9p
- expects only one device to be shared with the same export, and
- if more than one device is shared and accessed via the same 9p
- export then only a warning message is logged (once) by qemu on
- host side. In order to avoid file ID collisions on guest you
- should either create a separate virtfs export for each device to
- be shared with guests (recommended way) or you might use "remap"
- instead which allows you to share multiple devices with only one
- export instead, which is achieved by remapping the original
- inode numbers from host to guest in a way that would prevent
- such collisions. Remapping inodes in such use cases is required
+ ``multidevs=remap|forbid|warn``
+ Specifies how to deal with multiple devices being shared with
+ the same 9p export in order to avoid file ID collisions on guest.
+ Supported behaviours are either "remap" (default), "forbid" or
+ "warn".
+
+ ``remap`` : assumes the possibility that more than one device is
+ shared with the same 9p export. Therefore inode numbers from host
+ are remapped for guest in a way that would prevent file ID
+ collisions on guest. Remapping inodes in such cases is required
because the original device IDs from host are never passed and
exposed on guest. Instead all files of an export shared with
- virtfs always share the same device id on guest. So two files
+ virtfs always share the same device ID on guest. So two files
with identical inode numbers but from actually different devices
on host would otherwise cause a file ID collision and hence
- potential misbehaviours on guest. "forbid" on the other hand
- assumes like "warn" that only one device is shared by the same
- export, however it will not only log a warning message but also
- deny access to additional devices on guest. Note though that
- "forbid" does currently not block all possible file access
- operations (e.g. readdir() would still return entries from other
- devices).
+ potential severe misbehaviours on guest.
+
+ ``warn`` : virtfs 9p expects only one device to be shared with
+ the same export. If however more than one device is shared and
+ accessed via the same 9p export then only a warning message is
+ logged (once) by qemu on host side. No further action is performed
+ in this case that would prevent file ID collisions on guest. This
+ could thus lead to severe misbehaviours in this case like wrong
+ files being accessed and data corruption on the exported tree.
+
+ ``forbid`` : assumes like "warn" that only one device is shared
+ by the same 9p export, however it will not only log a warning
+ message but also deny access to additional devices on guest. Note
+ though that "forbid" does currently not block all possible file
+ access operations (e.g. readdir() would still return entries from
+ other devices).
ERST
DEF("iscsi", HAS_ARG, QEMU_OPTION_iscsi,
@@ -2377,22 +2377,6 @@ SRST
pick the first available. (Since 2.9)
ERST
-DEF("portrait", 0, QEMU_OPTION_portrait,
- "-portrait rotate graphical output 90 deg left (only PXA LCD)\n",
- QEMU_ARCH_ALL)
-SRST
-``-portrait``
- Rotate graphical output 90 deg left (only PXA LCD).
-ERST
-
-DEF("rotate", HAS_ARG, QEMU_OPTION_rotate,
- "-rotate <deg> rotate graphical output some deg left (only PXA LCD)\n",
- QEMU_ARCH_ALL)
-SRST
-``-rotate deg``
- Rotate graphical output some deg left (only PXA LCD).
-ERST
-
DEF("vga", HAS_ARG, QEMU_OPTION_vga,
"-vga [std|cirrus|vmware|qxl|xenfb|tcx|cg3|virtio|none]\n"
" select video card type\n", QEMU_ARCH_ALL)
@@ -2704,7 +2688,7 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios,
" specify SMBIOS type 3 fields\n"
"-smbios type=4[,sock_pfx=str][,manufacturer=str][,version=str][,serial=str]\n"
" [,asset=str][,part=str][,max-speed=%d][,current-speed=%d]\n"
- " [,processor-family=%d,processor-id=%d]\n"
+ " [,processor-family=%d][,processor-id=%d]\n"
" specify SMBIOS type 4 fields\n"
"-smbios type=8[,external_reference=str][,internal_reference=str][,connector_type=%d][,port_type=%d]\n"
" specify SMBIOS type 8 fields\n"
@@ -2895,9 +2879,9 @@ DEF("netdev", HAS_ARG, QEMU_OPTION_netdev,
"-netdev socket,id=str[,fd=h][,udp=host:port][,localaddr=host:port]\n"
" configure a network backend to connect to another network\n"
" using an UDP tunnel\n"
- "-netdev stream,id=str[,server=on|off],addr.type=inet,addr.host=host,addr.port=port[,to=maxport][,numeric=on|off][,keep-alive=on|off][,mptcp=on|off][,addr.ipv4=on|off][,addr.ipv6=on|off][,reconnect=seconds]\n"
- "-netdev stream,id=str[,server=on|off],addr.type=unix,addr.path=path[,abstract=on|off][,tight=on|off][,reconnect=seconds]\n"
- "-netdev stream,id=str[,server=on|off],addr.type=fd,addr.str=file-descriptor[,reconnect=seconds]\n"
+ "-netdev stream,id=str[,server=on|off],addr.type=inet,addr.host=host,addr.port=port[,to=maxport][,numeric=on|off][,keep-alive=on|off][,mptcp=on|off][,addr.ipv4=on|off][,addr.ipv6=on|off][,reconnect-ms=milliseconds]\n"
+ "-netdev stream,id=str[,server=on|off],addr.type=unix,addr.path=path[,abstract=on|off][,tight=on|off][,reconnect-ms=milliseconds]\n"
+ "-netdev stream,id=str[,server=on|off],addr.type=fd,addr.str=file-descriptor[,reconnect-ms=milliseconds]\n"
" configure a network backend to connect to another network\n"
" using a socket connection in stream mode.\n"
"-netdev dgram,id=str,remote.type=inet,remote.host=maddr,remote.port=port[,local.type=inet,local.host=addr]\n"
@@ -3353,6 +3337,195 @@ SRST
-device e1000,netdev=n1,mac=52:54:00:12:34:56 \\
-netdev socket,id=n1,mcast=239.192.168.1:1102,localaddr=1.2.3.4
+``-netdev stream,id=str[,server=on|off],addr.type=inet,addr.host=host,addr.port=port[,to=maxport][,numeric=on|off][,keep-alive=on|off][,mptcp=on|off][,addr.ipv4=on|off][,addr.ipv6=on|off][,reconnect-ms=milliseconds]``
+ Configure a network backend to connect to another QEMU virtual machine or a proxy using a TCP/IP socket.
+
+ ``server=on|off``
+ if ``on`` create a server socket
+
+ ``addr.host=host,addr.port=port``
+ socket address to listen on (server=on) or connect to (server=off)
+
+ ``to=maxport``
+ if present, this is range of possible addresses, with port between ``port`` and ``maxport``.
+
+ ``numeric=on|off``
+ if ``on`` ``host`` and ``port`` are guaranteed to be numeric, otherwise a name resolution should be attempted (default: ``off``)
+
+ ``keep-alive=on|off``
+ enable keep-alive when connecting to this socket. Not supported for passive sockets.
+
+ ``mptcp=on|off``
+ enable multipath TCP
+
+ ``ipv4=on|off``
+ whether to accept IPv4 addresses, default to try both IPv4 and IPv6
+
+ ``ipv6=on|off``
+ whether to accept IPv6 addresses, default to try both IPv4 and IPv6
+
+ ``reconnect-ms=milliseconds``
+ for a client socket, if a socket is disconnected, then attempt a reconnect after the given number of milliseconds.
+ Setting this to zero disables this function. (default: 0)
+
+ Example (two guests connected using a TCP/IP socket):
+
+ .. parsed-literal::
+
+ # first VM
+ |qemu_system| linux.img \\
+ -device virtio-net,netdev=net0,mac=52:54:00:12:34:56 \\
+ -netdev stream,id=net0,server=on,addr.type=inet,addr.host=localhost,addr.port=1234
+ # second VM
+ |qemu_system| linux.img \\
+ -device virtio-net,netdev=net0,mac=52:54:00:12:34:57 \\
+ -netdev stream,id=net0,server=off,addr.type=inet,addr.host=localhost,addr.port=1234,reconnect-ms=5000
+
+``-netdev stream,id=str[,server=on|off],addr.type=unix,addr.path=path[,abstract=on|off][,tight=on|off][,reconnect-ms=milliseconds]``
+ Configure a network backend to connect to another QEMU virtual machine or a proxy using a stream oriented unix domain socket.
+
+ ``server=on|off``
+ if ``on`` create a server socket
+
+ ``addr.path=path``
+ filesystem path to use
+
+ ``abstract=on|off``
+ if ``on``, this is a Linux abstract socket address.
+
+ ``tight=on|off``
+ if false, pad an abstract socket address with enough null bytes to make it fill struct sockaddr_un member sun_path.
+
+ ``reconnect-ms=milliseconds``
+ for a client socket, if a socket is disconnected, then attempt a reconnect after the given number of milliseconds.
+ Setting this to zero disables this function. (default: 0)
+
+ Example (using passt as a replacement of -netdev user):
+
+ .. parsed-literal::
+
+ # start passt server as a non privileged user
+ passt
+ UNIX domain socket bound at /tmp/passt_1.socket
+ # start QEMU to connect to passt
+ |qemu_system| linux.img \\
+ -device virtio-net,netdev=net0 \\
+ -netdev stream,id=net0,server=off,addr.type=unix,addr.path=/tmp/passt_1.socket
+
+ Example (two guests connected using a stream oriented unix domain socket):
+
+ .. parsed-literal::
+
+ # first VM
+ |qemu_system| linux.img \\
+ -device virtio-net,netdev=net0,mac=52:54:00:12:34:56 \\
+ netdev stream,id=net0,server=on,addr.type=unix,addr.path=/tmp/qemu0
+ # second VM
+ |qemu_system| linux.img \\
+ -device virtio-net,netdev=net0,mac=52:54:00:12:34:57 \\
+ -netdev stream,id=net0,server=off,addr.type=unix,addr.path=/tmp/qemu0,reconnect-ms=5000
+
+``-netdev stream,id=str[,server=on|off],addr.type=fd,addr.str=file-descriptor[,reconnect-ms=milliseconds]``
+ Configure a network backend to connect to another QEMU virtual machine or a proxy using a stream oriented socket file descriptor.
+
+ ``server=on|off``
+ if ``on`` create a server socket
+
+ ``addr.str=file-descriptor``
+ file descriptor number to use as a socket
+
+ ``reconnect-ms=milliseconds``
+ for a client socket, if a socket is disconnected, then attempt a reconnect after the given number of milliseconds.
+ Setting this to zero disables this function. (default: 0)
+
+``-netdev dgram,id=str,remote.type=inet,remote.host=maddr,remote.port=port[,local.type=inet,local.host=addr]``
+ Configure a network backend to connect to a multicast address.
+
+ ``remote.host=maddr,remote.port=port``
+ multicast address
+
+ ``local.host=addr``
+ specify the host address to send packets from
+
+ Example:
+
+ .. parsed-literal::
+
+ # launch one QEMU instance
+ |qemu_system| linux.img \\
+ -device virtio-net,netdev=net0,mac=52:54:00:12:34:56 \\
+ -netdev dgram,id=net0,remote.type=inet,remote.host=224.0.0.1,remote.port=1234
+ # launch another QEMU instance on same "bus"
+ |qemu_system| linux.img \\
+ -device virtio-net,netdev=net0,mac=52:54:00:12:34:57 \\
+ -netdev dgram,id=net0,remote.type=inet,remote.host=224.0.0.1,remote.port=1234
+ # launch yet another QEMU instance on same "bus"
+ |qemu_system| linux.img \\
+ -device virtio-net,netdev=net0,mac=52:54:00:12:34:58 \\
+ -netdev dgram,id=net0,remote.type=inet,remote.host=224.0.0.1,remote.port=1234
+
+``-netdev dgram,id=str,remote.type=inet,remote.host=maddr,remote.port=port[,local.type=fd,local.str=file-descriptor]``
+ Configure a network backend to connect to a multicast address using a UDP socket file descriptor.
+
+ ``remote.host=maddr,remote.port=port``
+ multicast address
+
+ ``local.str=file-descriptor``
+ File descriptor to use to send packets
+
+``-netdev dgram,id=str,local.type=inet,local.host=addr,local.port=port[,remote.type=inet,remote.host=addr,remote.port=port]``
+ Configure a network backend to connect to another QEMU virtual
+ machine or a proxy using a datagram oriented unix domain socket.
+
+ ``local.host=addr,local.port=port``
+ IP address to use to send the packets from
+
+ ``remote.host=addr,remote.port=port``
+ Destination IP address
+
+ Example (two guests connected using an UDP/IP socket):
+
+ .. parsed-literal::
+
+ # first VM
+ |qemu_system| linux.img \\
+ -device virtio-net,netdev=net0,mac=52:54:00:12:34:56 \\
+ -netdev dgram,id=net0,local.type=inet,local.host=localhost,local.port=1234,remote.type=inet,remote.host=localhost,remote.port=1235
+ # second VM
+ |qemu_system| linux.img \\
+ -device virtio-net,netdev=net0,mac=52:54:00:12:34:56 \\
+ -netdev dgram,id=net0,local.type=inet,local.host=localhost,local.port=1235,remote.type=inet,remote.host=localhost,remote.port=1234
+
+``-netdev dgram,id=str,local.type=unix,local.path=path[,remote.type=unix,remote.path=path]``
+ Configure a network backend to connect to another QEMU virtual
+ machine or a proxy using a datagram oriented unix socket.
+
+ ``local.path=path``
+ filesystem path to use to bind the socket
+
+ ``remote.path=path``
+ filesystem path to use as a destination (see sendto(2))
+
+ Example (two guests connected using an UDP/UNIX socket):
+
+ .. parsed-literal::
+
+ # first VM
+ |qemu_system| linux.img \\
+ -device virtio-net,netdev=net0,mac=52:54:00:12:34:56 \\
+ -netdev dgram,id=net0,local.type=unix,local.path=/tmp/qemu0,remote.type=unix,remote.path=/tmp/qemu1
+ # second VM
+ |qemu_system| linux.img \\
+ -device virtio-net,netdev=net0,mac=52:54:00:12:34:57 \\
+ -netdev dgram,id=net0,local.type=unix,local.path=/tmp/qemu1,remote.type=unix,remote.path=/tmp/qemu0
+
+``-netdev dgram,id=str,local.type=fd,local.str=file-descriptor``
+ Configure a network backend to connect to another QEMU virtual
+ machine or a proxy using a datagram oriented socket file descriptor.
+
+ ``local.str=file-descriptor``
+ File descriptor to use to send packets
+
``-netdev l2tpv3,id=id,src=srcaddr,dst=dstaddr[,srcport=srcport][,dstport=dstport],txsession=txsession[,rxsession=rxsession][,ipv6=on|off][,udp=on|off][,cookie64=on|off][,counter=on|off][,pincounter=on|off][,txcookie=txcookie][,rxcookie=rxcookie][,offset=offset]``
Configure a L2TPv3 pseudowire host network backend. L2TPv3 (RFC3931)
is a popular protocol to transport Ethernet (and other Layer 2) data
@@ -3552,9 +3725,9 @@ DEF("chardev", HAS_ARG, QEMU_OPTION_chardev,
"-chardev help\n"
"-chardev null,id=id[,mux=on|off][,logfile=PATH][,logappend=on|off]\n"
"-chardev socket,id=id[,host=host],port=port[,to=to][,ipv4=on|off][,ipv6=on|off][,nodelay=on|off]\n"
- " [,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect=seconds][,mux=on|off]\n"
+ " [,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect-ms=milliseconds][,mux=on|off]\n"
" [,logfile=PATH][,logappend=on|off][,tls-creds=ID][,tls-authz=ID] (tcp)\n"
- "-chardev socket,id=id,path=path[,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect=seconds]\n"
+ "-chardev socket,id=id,path=path[,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect-ms=milliseconds]\n"
" [,mux=on|off][,logfile=PATH][,logappend=on|off][,abstract=on|off][,tight=on|off] (unix)\n"
"-chardev udp,id=id[,host=host],port=port[,localaddr=localaddr]\n"
" [,localport=localport][,ipv4=on|off][,ipv6=on|off][,mux=on|off]\n"
@@ -3569,7 +3742,7 @@ DEF("chardev", HAS_ARG, QEMU_OPTION_chardev,
"-chardev console,id=id[,mux=on|off][,logfile=PATH][,logappend=on|off]\n"
"-chardev serial,id=id,path=path[,mux=on|off][,logfile=PATH][,logappend=on|off]\n"
#else
- "-chardev pty,id=id[,mux=on|off][,logfile=PATH][,logappend=on|off]\n"
+ "-chardev pty,id=id[,path=path][,mux=on|off][,logfile=PATH][,logappend=on|off]\n"
"-chardev stdio,id=id[,mux=on|off][,signal=on|off][,logfile=PATH][,logappend=on|off]\n"
#endif
#ifdef CONFIG_BRLAPI
@@ -3593,7 +3766,7 @@ SRST
The general form of a character device option is:
``-chardev backend,id=id[,mux=on|off][,options]``
- Backend is one of: ``null``, ``socket``, ``udp``, ``msmouse``,
+ Backend is one of: ``null``, ``socket``, ``udp``, ``msmouse``, ``hub``,
``vc``, ``ringbuf``, ``file``, ``pipe``, ``console``, ``serial``,
``pty``, ``stdio``, ``braille``, ``parallel``,
``spicevmc``, ``spiceport``. The specific backend will determine the
@@ -3650,9 +3823,10 @@ The general form of a character device option is:
the QEMU monitor, and ``-nographic`` also multiplexes the console
and the monitor to stdio.
- There is currently no support for multiplexing in the other
- direction (where a single QEMU front end takes input and output from
- multiple chardevs).
+ If you need to aggregate data in the opposite direction (where one
+ QEMU frontend interface receives input and output from multiple
+ backend chardev devices), please refer to the paragraph below
+ regarding chardev ``hub`` aggregator device configuration.
Every backend supports the ``logfile`` option, which supplies the
path to a file to record all data transmitted via the backend. The
@@ -3665,7 +3839,7 @@ The available backends are:
A void device. This device will not emit any data, and will drop any
data it receives. The null backend does not take any options.
-``-chardev socket,id=id[,TCP options or unix options][,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect=seconds][,tls-creds=id][,tls-authz=id]``
+``-chardev socket,id=id[,TCP options or unix options][,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect-ms=milliseconds][,tls-creds=id][,tls-authz=id]``
Create a two-way stream socket, which can be either a TCP or a unix
socket. A unix socket will be created if ``path`` is specified.
Behaviour is undefined if TCP options are specified for a unix
@@ -3682,9 +3856,9 @@ The available backends are:
``websocket=on|off`` specifies that the socket uses WebSocket protocol for
communication.
- ``reconnect`` sets the timeout for reconnecting on non-server
+ ``reconnect-ms`` sets the timeout for reconnecting on non-server
sockets when the remote end goes away. qemu will delay this many
- seconds and then attempt to reconnect. Zero disables reconnecting,
+ milliseconds and then attempt to reconnect. Zero disables reconnecting,
and is the default.
``tls-creds`` requests enablement of the TLS protocol for
@@ -3752,6 +3926,46 @@ The available backends are:
Forward QEMU's emulated msmouse events to the guest. ``msmouse``
does not take any options.
+``-chardev hub,id=id,chardevs.0=id[,chardevs.N=id]``
+ Explicitly create chardev backend hub device with the possibility
+ to aggregate input from multiple backend devices and forward it to
+ a single frontend device. Additionally, ``hub`` device takes the
+ output from the frontend device and sends it back to all the
+ connected backend devices. This allows for seamless interaction
+ between different backend devices and a single frontend
+ interface. Aggregation supported for up to 4 chardev
+ devices. (Since 10.0)
+
+ For example, the following is a use case of 2 backend devices:
+ virtual console ``vc0`` and a pseudo TTY ``pty0`` connected to
+ a single virtio hvc console frontend device with a hub ``hub0``
+ help. Virtual console renders text to an image, which can be
+ shared over the VNC protocol. In turn, pty backend provides
+ bidirectional communication to the virtio hvc console over the
+ pseudo TTY file. The example configuration can be as follows:
+
+ ::
+
+ -chardev pty,path=/tmp/pty,id=pty0 \
+ -chardev vc,id=vc0 \
+ -chardev hub,id=hub0,chardevs.0=pty0,chardevs.1=vc0 \
+ -device virtconsole,chardev=hub0 \
+ -vnc 0.0.0.0:0
+
+ Once QEMU starts VNC client and any TTY emulator can be used to
+ control a single hvc console:
+
+ ::
+
+ # Start TTY emulator
+ tio /tmp/pty
+
+ # Start VNC client and switch to virtual console Ctrl-Alt-2
+ vncviewer :0
+
+ Several frontend devices is not supported. Stacking of multiplexers
+ and hub devices is not supported as well.
+
``-chardev vc,id=id[[,width=width][,height=height]][[,cols=cols][,rows=rows]]``
Connect to a QEMU text console. ``vc`` may optionally be given a
specific size.
@@ -3808,12 +4022,22 @@ The available backends are:
``path`` specifies the name of the serial device to open.
-``-chardev pty,id=id``
- Create a new pseudo-terminal on the host and connect to it. ``pty``
- does not take any options.
+``-chardev pty,id=id[,path=path]``
+ Create a new pseudo-terminal on the host and connect to it.
``pty`` is not available on Windows hosts.
+ If ``path`` is specified, QEMU will create a symbolic link at
+ that location which points to the new PTY device.
+
+ This avoids having to make QMP or HMP monitor queries to find out
+ what the new PTY device path is.
+
+ Note that while QEMU will remove the symlink when it exits
+ gracefully, it will not do so in case of crashes or on certain
+ startup errors. It is recommended that the user checks and removes
+ the symlink after QEMU terminates to account for this.
+
``-chardev stdio,id=id[,signal=on|off]``
Connect to standard input and standard output of the QEMU process.
@@ -4008,6 +4232,13 @@ SRST
or in multiboot format.
ERST
+DEF("shim", HAS_ARG, QEMU_OPTION_shim, \
+ "-shim shim.efi use 'shim.efi' to boot the kernel\n", QEMU_ARCH_ALL)
+SRST
+``-shim shim.efi``
+ Use 'shim.efi' to boot the kernel
+ERST
+
DEF("append", HAS_ARG, QEMU_OPTION_append, \
"-append cmdline use 'cmdline' as kernel command line\n", QEMU_ARCH_ALL)
SRST
@@ -4171,8 +4402,19 @@ SRST
vc:80Cx24C
- ``pty``
- [Linux only] Pseudo TTY (a new PTY is automatically allocated)
+ ``pty[:path]``
+ [Linux only] Pseudo TTY (a new PTY is automatically allocated).
+
+ If ``path`` is specified, QEMU will create a symbolic link at
+ that location which points to the new PTY device.
+
+ This avoids having to make QMP or HMP monitor queries to find
+ out what the new PTY device path is.
+
+ Note that while QEMU will remove the symlink when it exits
+ gracefully, it will not do so in case of crashes or on certain
+ startup errors. It is recommended that the user checks and
+ removes the symlink after QEMU terminates to account for this.
``none``
No device is allocated. Note that for machine types which
@@ -4242,14 +4484,14 @@ SRST
``telnet options:``
localhost 5555
- ``tcp:[host]:port[,server=on|off][,wait=on|off][,nodelay=on|off][,reconnect=seconds]``
+ ``tcp:[host]:port[,server=on|off][,wait=on|off][,nodelay=on|off][,reconnect-ms=milliseconds]``
The TCP Net Console has two modes of operation. It can send the
serial I/O to a location or wait for a connection from a
location. By default the TCP Net Console is sent to host at the
port. If you use the ``server=on`` option QEMU will wait for a client
socket application to connect to the port before continuing,
unless the ``wait=on|off`` option was specified. The ``nodelay=on|off``
- option disables the Nagle buffering algorithm. The ``reconnect=on``
+ option disables the Nagle buffering algorithm. The ``reconnect-ms``
option only applies if ``server=no`` is set, if the connection goes
down it will attempt to reconnect at the given interval. If host
is omitted, 0.0.0.0 is assumed. Only one TCP connection at a
@@ -4279,7 +4521,7 @@ SRST
The WebSocket protocol is used instead of raw tcp socket. The
port acts as a WebSocket server. Client mode is not supported.
- ``unix:path[,server=on|off][,wait=on|off][,reconnect=seconds]``
+ ``unix:path[,server=on|off][,wait=on|off][,reconnect-ms=milliseconds]``
A unix domain socket is used instead of a tcp socket. The option
works the same as if you had specified ``-serial tcp`` except
the unix domain socket path is used for connections.
@@ -4418,21 +4660,25 @@ SRST
ERST
DEF("overcommit", HAS_ARG, QEMU_OPTION_overcommit,
- "-overcommit [mem-lock=on|off][cpu-pm=on|off]\n"
+ "-overcommit [mem-lock=on|off|on-fault][cpu-pm=on|off]\n"
" run qemu with overcommit hints\n"
- " mem-lock=on|off controls memory lock support (default: off)\n"
+ " mem-lock=on|off|on-fault controls memory lock support (default: off)\n"
" cpu-pm=on|off controls cpu power management (default: off)\n",
QEMU_ARCH_ALL)
SRST
-``-overcommit mem-lock=on|off``
+``-overcommit mem-lock=on|off|on-fault``
\
``-overcommit cpu-pm=on|off``
Run qemu with hints about host resource overcommit. The default is
to assume that host overcommits all resources.
Locking qemu and guest memory can be enabled via ``mem-lock=on``
- (disabled by default). This works when host memory is not
- overcommitted and reduces the worst-case latency for guest.
+ or ``mem-lock=on-fault`` (disabled by default). This works when
+ host memory is not overcommitted and reduces the worst-case latency for
+ guest. The on-fault option is better for reducing the memory footprint
+ since it makes allocations lazy, but the pages still get locked in place
+ once faulted by the guest or QEMU. Note that the two options are mutually
+ exclusive.
Guest ability to manage power state of host cpus (increasing latency
for other processes on the same host cpu, but decreasing latency for
@@ -4616,7 +4862,7 @@ SRST
Start right away with a saved state (``loadvm`` in monitor)
ERST
-#ifndef _WIN32
+#if !defined(_WIN32) && !defined(EMSCRIPTEN)
DEF("daemonize", 0, QEMU_OPTION_daemonize, \
"-daemonize daemonize QEMU after initializing\n", QEMU_ARCH_ALL)
#endif
@@ -4690,13 +4936,13 @@ SRST
with actual performance.
When the virtual cpu is sleeping, the virtual time will advance at
- default speed unless ``sleep=on`` is specified. With
- ``sleep=on``, the virtual time will jump to the next timer
+ default speed unless ``sleep=off`` is specified. With
+ ``sleep=off``, the virtual time will jump to the next timer
deadline instantly whenever the virtual cpu goes to sleep mode and
will not advance if no timer is enabled. This behavior gives
deterministic execution times from the guest point of view.
- The default if icount is enabled is ``sleep=off``.
- ``sleep=on`` cannot be used together with either ``shift=auto``
+ The default if icount is enabled is ``sleep=on``.
+ ``sleep=off`` cannot be used together with either ``shift=auto``
or ``align=on``.
``align=on`` will activate the delay algorithm which will try to
@@ -4774,10 +5020,18 @@ DEF("incoming", HAS_ARG, QEMU_OPTION_incoming, \
"-incoming exec:cmdline\n" \
" accept incoming migration on given file descriptor\n" \
" or from given external command\n" \
+ "-incoming <channel>\n" \
+ " accept incoming migration on the migration channel\n" \
"-incoming defer\n" \
" wait for the URI to be specified via migrate_incoming\n",
QEMU_ARCH_ALL)
SRST
+The -incoming option specifies the migration channel for an incoming
+migration. It may be used multiple times to specify multiple
+migration channel types. The channel type is specified in <channel>,
+or is 'main' for all other forms of -incoming. If multiple -incoming
+options are specified for a channel type, the last one takes precedence.
+
``-incoming tcp:[host]:port[,to=maxport][,ipv4=on|off][,ipv6=on|off]``
\
``-incoming rdma:host:port[,ipv4=on|off][,ipv6=on|off]``
@@ -4797,6 +5051,19 @@ SRST
Accept incoming migration as an output from specified external
command.
+``-incoming <channel>``
+ Accept incoming migration on the migration channel. For the syntax
+ of <channel>, see the QAPI documentation of ``MigrationChannel``.
+ Examples:
+ ::
+
+ -incoming '{"channel-type": "main",
+ "addr": { "transport": "socket",
+ "type": "unix",
+ "path": "my.sock" }}'
+
+ -incoming main,addr.transport=socket,addr.type=unix,addr.path=my.sock
+
``-incoming defer``
Wait for the URI to be specified via migrate\_incoming. The monitor
can be used to change settings (such as migration parameters) prior
@@ -4821,19 +5088,6 @@ SRST
``-nodefaults`` option will disable all those default devices.
ERST
-#ifndef _WIN32
-DEF("runas", HAS_ARG, QEMU_OPTION_runas, \
- "-runas user change to user id user just before starting the VM\n" \
- " user can be numeric uid:gid instead\n",
- QEMU_ARCH_ALL)
-#endif
-SRST
-``-runas user``
- Immediately before starting guest execution, drop root privileges,
- switching to the specified user. This option is deprecated, use
- ``-run-with user=...`` instead.
-ERST
-
DEF("prom-env", HAS_ARG, QEMU_OPTION_prom_env,
"-prom-env variable=value\n"
" set OpenBIOS nvram variables\n",
@@ -4995,7 +5249,7 @@ HXCOMM Internal use
DEF("qtest", HAS_ARG, QEMU_OPTION_qtest, "", QEMU_ARCH_ALL)
DEF("qtest-log", HAS_ARG, QEMU_OPTION_qtest_log, "", QEMU_ARCH_ALL)
-#ifdef CONFIG_POSIX
+#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN)
DEF("run-with", HAS_ARG, QEMU_OPTION_run_with,
"-run-with [async-teardown=on|off][,chroot=dir][user=username|uid:gid]\n"
" Set miscellaneous QEMU process lifecycle options:\n"
@@ -5021,7 +5275,7 @@ SRST
``chroot=dir`` can be used for doing a chroot to the specified directory
immediately before starting the guest execution. This is especially useful
- in combination with -runas.
+ in combination with ``user=...``.
``user=username`` or ``user=uid:gid`` can be used to drop root privileges
before starting guest execution. QEMU will use the ``setuid`` and ``setgid``
diff --git a/qemu.nsi b/qemu.nsi
index 564d617..d419986 100644
--- a/qemu.nsi
+++ b/qemu.nsi
@@ -7,7 +7,7 @@
; This program is free software: you can redistribute it and/or modify
; it under the terms of the GNU General Public License as published by
; the Free Software Foundation, either version 2 of the License, or
-; (at your option) version 3 or any later version.
+; (at your option) any later version.
;
; This program is distributed in the hope that it will be useful,
; but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -16,6 +16,8 @@
;
; You should have received a copy of the GNU General Public License
; along with this program. If not, see <http://www.gnu.org/licenses/>.
+;
+; SPDX-License-Identifier: GPL-2.0-or-later
; NSIS_WIN32_MAKENSIS
@@ -202,7 +204,6 @@ Section "Uninstall"
Delete "$INSTDIR\*.bmp"
Delete "$INSTDIR\*.bin"
Delete "$INSTDIR\*.dll"
- Delete "$INSTDIR\*.dtb"
Delete "$INSTDIR\*.fd"
Delete "$INSTDIR\*.img"
Delete "$INSTDIR\*.lid"
@@ -213,6 +214,7 @@ Section "Uninstall"
Delete "$INSTDIR\qemu-io.exe"
Delete "$INSTDIR\qemu.exe"
Delete "$INSTDIR\qemu-system-*.exe"
+ RMDir /r "$INSTDIR\dtb"
RMDir /r "$INSTDIR\doc"
RMDir /r "$INSTDIR\share"
; Remove generated files
diff --git a/qga/commands-bsd.c b/qga/commands-bsd.c
index 17bddda..94ff6fe 100644
--- a/qga/commands-bsd.c
+++ b/qga/commands-bsd.c
@@ -12,7 +12,6 @@
#include "qemu/osdep.h"
#include "qga-qapi-commands.h"
-#include "qapi/qmp/qerror.h"
#include "qapi/error.h"
#include "qemu/queue.h"
#include "commands-common.h"
@@ -149,30 +148,6 @@ int qmp_guest_fsfreeze_do_thaw(Error **errp)
}
return ret;
}
-
-GuestFilesystemInfoList *qmp_guest_get_fsinfo(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-GuestDiskInfoList *qmp_guest_get_disks(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-GuestDiskStatsInfoList *qmp_guest_get_diskstats(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-GuestCpuStatsList *qmp_guest_get_cpustats(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
#endif /* CONFIG_FSFREEZE */
#ifdef HAVE_GETIFADDRS
diff --git a/qga/commands-common.h b/qga/commands-common.h
index 8c1c56a..263e7c0 100644
--- a/qga/commands-common.h
+++ b/qga/commands-common.h
@@ -15,19 +15,10 @@
#if defined(__linux__)
#include <linux/fs.h>
-#ifdef FIFREEZE
-#define CONFIG_FSFREEZE
-#endif
-#ifdef FITRIM
-#define CONFIG_FSTRIM
-#endif
#endif /* __linux__ */
#ifdef __FreeBSD__
#include <ufs/ffs/fs.h>
-#ifdef UFSSUSPEND
-#define CONFIG_FSFREEZE
-#endif
#endif /* __FreeBSD__ */
#if defined(CONFIG_FSFREEZE) || defined(CONFIG_FSTRIM)
diff --git a/qga/commands-linux.c b/qga/commands-linux.c
index 214e408..9e8a934 100644
--- a/qga/commands-linux.c
+++ b/qga/commands-linux.c
@@ -13,10 +13,25 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
+#include "qga-qapi-commands.h"
+#include "qapi/error.h"
#include "commands-common.h"
#include "cutils.h"
#include <mntent.h>
#include <sys/ioctl.h>
+#include <mntent.h>
+#include <linux/nvme_ioctl.h>
+#include "block/nvme.h"
+
+#ifdef CONFIG_LIBUDEV
+#include <libudev.h>
+#endif
+
+#ifdef HAVE_GETIFADDRS
+#include <net/if.h>
+#endif
+
+#include <sys/statvfs.h>
#if defined(CONFIG_FSFREEZE) || defined(CONFIG_FSTRIM)
static int dev_major_minor(const char *devpath,
@@ -43,6 +58,22 @@ static int dev_major_minor(const char *devpath,
return -1;
}
+/*
+ * Check if we already have the devmajor:devminor in the mounts
+ * If thats the case return true.
+ */
+static bool dev_exists(FsMountList *mounts, unsigned int devmajor, unsigned int devminor)
+{
+ FsMount *mount;
+
+ QTAILQ_FOREACH(mount, mounts, next) {
+ if (mount->devmajor == devmajor && mount->devminor == devminor) {
+ return true;
+ }
+ }
+ return false;
+}
+
static bool build_fs_mount_list_from_mtab(FsMountList *mounts, Error **errp)
{
struct mntent *ment;
@@ -73,6 +104,10 @@ static bool build_fs_mount_list_from_mtab(FsMountList *mounts, Error **errp)
/* Skip bind mounts */
continue;
}
+ if (dev_exists(mounts, devmajor, devminor)) {
+ /* Skip already existing devices (bind mounts) */
+ continue;
+ }
mount = g_new0(FsMount, 1);
mount->dirname = g_strdup(ment->mnt_dir);
@@ -156,6 +191,11 @@ bool build_fs_mount_list(FsMountList *mounts, Error **errp)
}
}
+ if (dev_exists(mounts, devmajor, devminor)) {
+ /* Skip already existing devices (bind mounts) */
+ continue;
+ }
+
mount = g_new0(FsMount, 1);
mount->dirname = g_strdup(line + dir_s);
mount->devtype = g_strdup(dash + type_s);
@@ -284,3 +324,1921 @@ int qmp_guest_fsfreeze_do_thaw(Error **errp)
return i;
}
#endif /* CONFIG_FSFREEZE */
+
+#if defined(CONFIG_FSFREEZE)
+
+static char *get_pci_driver(char const *syspath, int pathlen, Error **errp)
+{
+ char *path;
+ char *dpath;
+ char *driver = NULL;
+ char buf[PATH_MAX];
+ ssize_t len;
+
+ path = g_strndup(syspath, pathlen);
+ dpath = g_strdup_printf("%s/driver", path);
+ len = readlink(dpath, buf, sizeof(buf) - 1);
+ if (len != -1) {
+ buf[len] = 0;
+ driver = g_path_get_basename(buf);
+ }
+ g_free(dpath);
+ g_free(path);
+ return driver;
+}
+
+static int compare_uint(const void *_a, const void *_b)
+{
+ unsigned int a = *(unsigned int *)_a;
+ unsigned int b = *(unsigned int *)_b;
+
+ return a < b ? -1 : a > b ? 1 : 0;
+}
+
+/* Walk the specified sysfs and build a sorted list of host or ata numbers */
+static int build_hosts(char const *syspath, char const *host, bool ata,
+ unsigned int *hosts, int hosts_max, Error **errp)
+{
+ char *path;
+ DIR *dir;
+ struct dirent *entry;
+ int i = 0;
+
+ path = g_strndup(syspath, host - syspath);
+ dir = opendir(path);
+ if (!dir) {
+ error_setg_errno(errp, errno, "opendir(\"%s\")", path);
+ g_free(path);
+ return -1;
+ }
+
+ while (i < hosts_max) {
+ entry = readdir(dir);
+ if (!entry) {
+ break;
+ }
+ if (ata && sscanf(entry->d_name, "ata%d", hosts + i) == 1) {
+ ++i;
+ } else if (!ata && sscanf(entry->d_name, "host%d", hosts + i) == 1) {
+ ++i;
+ }
+ }
+
+ qsort(hosts, i, sizeof(hosts[0]), compare_uint);
+
+ g_free(path);
+ closedir(dir);
+ return i;
+}
+
+/*
+ * Store disk device info for devices on the PCI bus.
+ * Returns true if information has been stored, or false for failure.
+ */
+static bool build_guest_fsinfo_for_pci_dev(char const *syspath,
+ GuestDiskAddress *disk,
+ Error **errp)
+{
+ unsigned int pci[4], host, hosts[8], tgt[3];
+ int i, nhosts = 0, pcilen;
+ GuestPCIAddress *pciaddr = disk->pci_controller;
+ bool has_ata = false, has_host = false, has_tgt = false;
+ char *p, *q, *driver = NULL;
+ bool ret = false;
+
+ p = strstr(syspath, "/devices/pci");
+ if (!p || sscanf(p + 12, "%*x:%*x/%x:%x:%x.%x%n",
+ pci, pci + 1, pci + 2, pci + 3, &pcilen) < 4) {
+ g_debug("only pci device is supported: sysfs path '%s'", syspath);
+ return false;
+ }
+
+ p += 12 + pcilen;
+ while (true) {
+ driver = get_pci_driver(syspath, p - syspath, errp);
+ if (driver && (g_str_equal(driver, "ata_piix") ||
+ g_str_equal(driver, "sym53c8xx") ||
+ g_str_equal(driver, "virtio-pci") ||
+ g_str_equal(driver, "ahci") ||
+ g_str_equal(driver, "nvme") ||
+ g_str_equal(driver, "xhci_hcd") ||
+ g_str_equal(driver, "ehci-pci"))) {
+ break;
+ }
+
+ g_free(driver);
+ if (sscanf(p, "/%x:%x:%x.%x%n",
+ pci, pci + 1, pci + 2, pci + 3, &pcilen) == 4) {
+ p += pcilen;
+ continue;
+ }
+
+ g_debug("unsupported driver or sysfs path '%s'", syspath);
+ return false;
+ }
+
+ p = strstr(syspath, "/target");
+ if (p && sscanf(p + 7, "%*u:%*u:%*u/%*u:%u:%u:%u",
+ tgt, tgt + 1, tgt + 2) == 3) {
+ has_tgt = true;
+ }
+
+ p = strstr(syspath, "/ata");
+ if (p) {
+ q = p + 4;
+ has_ata = true;
+ } else {
+ p = strstr(syspath, "/host");
+ q = p + 5;
+ }
+ if (p && sscanf(q, "%u", &host) == 1) {
+ has_host = true;
+ nhosts = build_hosts(syspath, p, has_ata, hosts,
+ ARRAY_SIZE(hosts), errp);
+ if (nhosts < 0) {
+ goto cleanup;
+ }
+ }
+
+ pciaddr->domain = pci[0];
+ pciaddr->bus = pci[1];
+ pciaddr->slot = pci[2];
+ pciaddr->function = pci[3];
+
+ if (strcmp(driver, "ata_piix") == 0) {
+ /* a host per ide bus, target*:0:<unit>:0 */
+ if (!has_host || !has_tgt) {
+ g_debug("invalid sysfs path '%s' (driver '%s')", syspath, driver);
+ goto cleanup;
+ }
+ for (i = 0; i < nhosts; i++) {
+ if (host == hosts[i]) {
+ disk->bus_type = GUEST_DISK_BUS_TYPE_IDE;
+ disk->bus = i;
+ disk->unit = tgt[1];
+ break;
+ }
+ }
+ if (i >= nhosts) {
+ g_debug("no host for '%s' (driver '%s')", syspath, driver);
+ goto cleanup;
+ }
+ } else if (strcmp(driver, "sym53c8xx") == 0) {
+ /* scsi(LSI Logic): target*:0:<unit>:0 */
+ if (!has_tgt) {
+ g_debug("invalid sysfs path '%s' (driver '%s')", syspath, driver);
+ goto cleanup;
+ }
+ disk->bus_type = GUEST_DISK_BUS_TYPE_SCSI;
+ disk->unit = tgt[1];
+ } else if (strcmp(driver, "virtio-pci") == 0) {
+ if (has_tgt) {
+ /* virtio-scsi: target*:0:0:<unit> */
+ disk->bus_type = GUEST_DISK_BUS_TYPE_SCSI;
+ disk->unit = tgt[2];
+ } else {
+ /* virtio-blk: 1 disk per 1 device */
+ disk->bus_type = GUEST_DISK_BUS_TYPE_VIRTIO;
+ }
+ } else if (strcmp(driver, "ahci") == 0) {
+ /* ahci: 1 host per 1 unit */
+ if (!has_host || !has_tgt) {
+ g_debug("invalid sysfs path '%s' (driver '%s')", syspath, driver);
+ goto cleanup;
+ }
+ for (i = 0; i < nhosts; i++) {
+ if (host == hosts[i]) {
+ disk->unit = i;
+ disk->bus_type = GUEST_DISK_BUS_TYPE_SATA;
+ break;
+ }
+ }
+ if (i >= nhosts) {
+ g_debug("no host for '%s' (driver '%s')", syspath, driver);
+ goto cleanup;
+ }
+ } else if (strcmp(driver, "nvme") == 0) {
+ disk->bus_type = GUEST_DISK_BUS_TYPE_NVME;
+ } else if (strcmp(driver, "ehci-pci") == 0 || strcmp(driver, "xhci_hcd") == 0) {
+ disk->bus_type = GUEST_DISK_BUS_TYPE_USB;
+ } else {
+ g_debug("unknown driver '%s' (sysfs path '%s')", driver, syspath);
+ goto cleanup;
+ }
+
+ ret = true;
+
+cleanup:
+ g_free(driver);
+ return ret;
+}
+
+/*
+ * Store disk device info for non-PCI virtio devices (for example s390x
+ * channel I/O devices). Returns true if information has been stored, or
+ * false for failure.
+ */
+static bool build_guest_fsinfo_for_nonpci_virtio(char const *syspath,
+ GuestDiskAddress *disk,
+ Error **errp)
+{
+ unsigned int tgt[3];
+ char *p;
+
+ if (!strstr(syspath, "/virtio") || !strstr(syspath, "/block")) {
+ g_debug("Unsupported virtio device '%s'", syspath);
+ return false;
+ }
+
+ p = strstr(syspath, "/target");
+ if (p && sscanf(p + 7, "%*u:%*u:%*u/%*u:%u:%u:%u",
+ &tgt[0], &tgt[1], &tgt[2]) == 3) {
+ /* virtio-scsi: target*:0:<target>:<unit> */
+ disk->bus_type = GUEST_DISK_BUS_TYPE_SCSI;
+ disk->bus = tgt[0];
+ disk->target = tgt[1];
+ disk->unit = tgt[2];
+ } else {
+ /* virtio-blk: 1 disk per 1 device */
+ disk->bus_type = GUEST_DISK_BUS_TYPE_VIRTIO;
+ }
+
+ return true;
+}
+
+/*
+ * Store disk device info for CCW devices (s390x channel I/O devices).
+ * Returns true if information has been stored, or false for failure.
+ */
+static bool build_guest_fsinfo_for_ccw_dev(char const *syspath,
+ GuestDiskAddress *disk,
+ Error **errp)
+{
+ unsigned int cssid, ssid, subchno, devno;
+ char *p;
+
+ p = strstr(syspath, "/devices/css");
+ if (!p || sscanf(p + 12, "%*x/%x.%x.%x/%*x.%*x.%x/",
+ &cssid, &ssid, &subchno, &devno) < 4) {
+ g_debug("could not parse ccw device sysfs path: %s", syspath);
+ return false;
+ }
+
+ disk->ccw_address = g_new0(GuestCCWAddress, 1);
+ disk->ccw_address->cssid = cssid;
+ disk->ccw_address->ssid = ssid;
+ disk->ccw_address->subchno = subchno;
+ disk->ccw_address->devno = devno;
+
+ if (strstr(p, "/virtio")) {
+ build_guest_fsinfo_for_nonpci_virtio(syspath, disk, errp);
+ }
+
+ return true;
+}
+
+/* Store disk device info specified by @sysfs into @fs */
+static void build_guest_fsinfo_for_real_device(char const *syspath,
+ GuestFilesystemInfo *fs,
+ Error **errp)
+{
+ GuestDiskAddress *disk;
+ GuestPCIAddress *pciaddr;
+ bool has_hwinf;
+#ifdef CONFIG_LIBUDEV
+ struct udev *udev = NULL;
+ struct udev_device *udevice = NULL;
+#endif
+
+ pciaddr = g_new0(GuestPCIAddress, 1);
+ pciaddr->domain = -1; /* -1 means field is invalid */
+ pciaddr->bus = -1;
+ pciaddr->slot = -1;
+ pciaddr->function = -1;
+
+ disk = g_new0(GuestDiskAddress, 1);
+ disk->pci_controller = pciaddr;
+ disk->bus_type = GUEST_DISK_BUS_TYPE_UNKNOWN;
+
+#ifdef CONFIG_LIBUDEV
+ udev = udev_new();
+ udevice = udev_device_new_from_syspath(udev, syspath);
+ if (udev == NULL || udevice == NULL) {
+ g_debug("failed to query udev");
+ } else {
+ const char *devnode, *serial;
+ devnode = udev_device_get_devnode(udevice);
+ if (devnode != NULL) {
+ disk->dev = g_strdup(devnode);
+ }
+ serial = udev_device_get_property_value(udevice, "ID_SERIAL");
+ if (serial != NULL && *serial != 0) {
+ disk->serial = g_strdup(serial);
+ }
+ }
+
+ udev_unref(udev);
+ udev_device_unref(udevice);
+#endif
+
+ if (strstr(syspath, "/devices/pci")) {
+ has_hwinf = build_guest_fsinfo_for_pci_dev(syspath, disk, errp);
+ } else if (strstr(syspath, "/devices/css")) {
+ has_hwinf = build_guest_fsinfo_for_ccw_dev(syspath, disk, errp);
+ } else if (strstr(syspath, "/virtio")) {
+ has_hwinf = build_guest_fsinfo_for_nonpci_virtio(syspath, disk, errp);
+ } else {
+ g_debug("Unsupported device type for '%s'", syspath);
+ has_hwinf = false;
+ }
+
+ if (has_hwinf || disk->dev || disk->serial) {
+ QAPI_LIST_PREPEND(fs->disk, disk);
+ } else {
+ qapi_free_GuestDiskAddress(disk);
+ }
+}
+
+static void build_guest_fsinfo_for_device(char const *devpath,
+ GuestFilesystemInfo *fs,
+ Error **errp);
+
+/* Store a list of slave devices of virtual volume specified by @syspath into
+ * @fs */
+static void build_guest_fsinfo_for_virtual_device(char const *syspath,
+ GuestFilesystemInfo *fs,
+ Error **errp)
+{
+ Error *err = NULL;
+ DIR *dir;
+ char *dirpath;
+ struct dirent *entry;
+
+ dirpath = g_strdup_printf("%s/slaves", syspath);
+ dir = opendir(dirpath);
+ if (!dir) {
+ if (errno != ENOENT) {
+ error_setg_errno(errp, errno, "opendir(\"%s\")", dirpath);
+ }
+ g_free(dirpath);
+ return;
+ }
+
+ for (;;) {
+ errno = 0;
+ entry = readdir(dir);
+ if (entry == NULL) {
+ if (errno) {
+ error_setg_errno(errp, errno, "readdir(\"%s\")", dirpath);
+ }
+ break;
+ }
+
+ if (entry->d_type == DT_LNK) {
+ char *path;
+
+ g_debug(" slave device '%s'", entry->d_name);
+ path = g_strdup_printf("%s/slaves/%s", syspath, entry->d_name);
+ build_guest_fsinfo_for_device(path, fs, &err);
+ g_free(path);
+
+ if (err) {
+ error_propagate(errp, err);
+ break;
+ }
+ }
+ }
+
+ g_free(dirpath);
+ closedir(dir);
+}
+
+static bool is_disk_virtual(const char *devpath, Error **errp)
+{
+ g_autofree char *syspath = realpath(devpath, NULL);
+
+ if (!syspath) {
+ error_setg_errno(errp, errno, "realpath(\"%s\")", devpath);
+ return false;
+ }
+ return strstr(syspath, "/devices/virtual/block/") != NULL;
+}
+
+/* Dispatch to functions for virtual/real device */
+static void build_guest_fsinfo_for_device(char const *devpath,
+ GuestFilesystemInfo *fs,
+ Error **errp)
+{
+ ERRP_GUARD();
+ g_autofree char *syspath = NULL;
+ bool is_virtual = false;
+
+ syspath = realpath(devpath, NULL);
+ if (!syspath) {
+ if (errno != ENOENT) {
+ error_setg_errno(errp, errno, "realpath(\"%s\")", devpath);
+ return;
+ }
+
+ /* ENOENT: This devpath may not exist because of container config */
+ if (!fs->name) {
+ fs->name = g_path_get_basename(devpath);
+ }
+ return;
+ }
+
+ if (!fs->name) {
+ fs->name = g_path_get_basename(syspath);
+ }
+
+ g_debug(" parse sysfs path '%s'", syspath);
+ is_virtual = is_disk_virtual(syspath, errp);
+ if (*errp != NULL) {
+ return;
+ }
+ if (is_virtual) {
+ build_guest_fsinfo_for_virtual_device(syspath, fs, errp);
+ } else {
+ build_guest_fsinfo_for_real_device(syspath, fs, errp);
+ }
+}
+
+#ifdef CONFIG_LIBUDEV
+
+/*
+ * Wrapper around build_guest_fsinfo_for_device() for getting just
+ * the disk address.
+ */
+static GuestDiskAddress *get_disk_address(const char *syspath, Error **errp)
+{
+ g_autoptr(GuestFilesystemInfo) fs = NULL;
+
+ fs = g_new0(GuestFilesystemInfo, 1);
+ build_guest_fsinfo_for_device(syspath, fs, errp);
+ if (fs->disk != NULL) {
+ return g_steal_pointer(&fs->disk->value);
+ }
+ return NULL;
+}
+
+static char *get_alias_for_syspath(const char *syspath)
+{
+ struct udev *udev = NULL;
+ struct udev_device *udevice = NULL;
+ char *ret = NULL;
+
+ udev = udev_new();
+ if (udev == NULL) {
+ g_debug("failed to query udev");
+ goto out;
+ }
+ udevice = udev_device_new_from_syspath(udev, syspath);
+ if (udevice == NULL) {
+ g_debug("failed to query udev for path: %s", syspath);
+ goto out;
+ } else {
+ const char *alias = udev_device_get_property_value(
+ udevice, "DM_NAME");
+ /*
+ * NULL means there was an error and empty string means there is no
+ * alias. In case of no alias we return NULL instead of empty string.
+ */
+ if (alias == NULL) {
+ g_debug("failed to query udev for device alias for: %s",
+ syspath);
+ } else if (*alias != 0) {
+ ret = g_strdup(alias);
+ }
+ }
+
+out:
+ udev_unref(udev);
+ udev_device_unref(udevice);
+ return ret;
+}
+
+static char *get_device_for_syspath(const char *syspath)
+{
+ struct udev *udev = NULL;
+ struct udev_device *udevice = NULL;
+ char *ret = NULL;
+
+ udev = udev_new();
+ if (udev == NULL) {
+ g_debug("failed to query udev");
+ goto out;
+ }
+ udevice = udev_device_new_from_syspath(udev, syspath);
+ if (udevice == NULL) {
+ g_debug("failed to query udev for path: %s", syspath);
+ goto out;
+ } else {
+ ret = g_strdup(udev_device_get_devnode(udevice));
+ }
+
+out:
+ udev_unref(udev);
+ udev_device_unref(udevice);
+ return ret;
+}
+
+static void get_disk_deps(const char *disk_dir, GuestDiskInfo *disk)
+{
+ g_autofree char *deps_dir = NULL;
+ const gchar *dep;
+ GDir *dp_deps = NULL;
+
+ /* List dependent disks */
+ deps_dir = g_strdup_printf("%s/slaves", disk_dir);
+ g_debug(" listing entries in: %s", deps_dir);
+ dp_deps = g_dir_open(deps_dir, 0, NULL);
+ if (dp_deps == NULL) {
+ g_debug("failed to list entries in %s", deps_dir);
+ return;
+ }
+ disk->has_dependencies = true;
+ while ((dep = g_dir_read_name(dp_deps)) != NULL) {
+ g_autofree char *dep_dir = NULL;
+ char *dev_name;
+
+ /* Add dependent disks */
+ dep_dir = g_strdup_printf("%s/%s", deps_dir, dep);
+ dev_name = get_device_for_syspath(dep_dir);
+ if (dev_name != NULL) {
+ g_debug(" adding dependent device: %s", dev_name);
+ QAPI_LIST_PREPEND(disk->dependencies, dev_name);
+ }
+ }
+ g_dir_close(dp_deps);
+}
+
+/*
+ * Detect partitions subdirectory, name is "<disk_name><number>" or
+ * "<disk_name>p<number>"
+ *
+ * @disk_name -- last component of /sys path (e.g. sda)
+ * @disk_dir -- sys path of the disk (e.g. /sys/block/sda)
+ * @disk_dev -- device node of the disk (e.g. /dev/sda)
+ */
+static GuestDiskInfoList *get_disk_partitions(
+ GuestDiskInfoList *list,
+ const char *disk_name, const char *disk_dir,
+ const char *disk_dev)
+{
+ GuestDiskInfoList *ret = list;
+ struct dirent *de_disk;
+ DIR *dp_disk = NULL;
+ size_t len = strlen(disk_name);
+
+ dp_disk = opendir(disk_dir);
+ while ((de_disk = readdir(dp_disk)) != NULL) {
+ g_autofree char *partition_dir = NULL;
+ char *dev_name;
+ GuestDiskInfo *partition;
+
+ if (!(de_disk->d_type & DT_DIR)) {
+ continue;
+ }
+
+ if (!(strncmp(disk_name, de_disk->d_name, len) == 0 &&
+ ((*(de_disk->d_name + len) == 'p' &&
+ isdigit(*(de_disk->d_name + len + 1))) ||
+ isdigit(*(de_disk->d_name + len))))) {
+ continue;
+ }
+
+ partition_dir = g_strdup_printf("%s/%s",
+ disk_dir, de_disk->d_name);
+ dev_name = get_device_for_syspath(partition_dir);
+ if (dev_name == NULL) {
+ g_debug("Failed to get device name for syspath: %s",
+ disk_dir);
+ continue;
+ }
+ partition = g_new0(GuestDiskInfo, 1);
+ partition->name = dev_name;
+ partition->partition = true;
+ partition->has_dependencies = true;
+ /* Add parent disk as dependent for easier tracking of hierarchy */
+ QAPI_LIST_PREPEND(partition->dependencies, g_strdup(disk_dev));
+
+ QAPI_LIST_PREPEND(ret, partition);
+ }
+ closedir(dp_disk);
+
+ return ret;
+}
+
+static void get_nvme_smart(GuestDiskInfo *disk)
+{
+ int fd;
+ GuestNVMeSmart *smart;
+ NvmeSmartLog log = {0};
+ struct nvme_admin_cmd cmd = {
+ .opcode = NVME_ADM_CMD_GET_LOG_PAGE,
+ .nsid = NVME_NSID_BROADCAST,
+ .addr = (uintptr_t)&log,
+ .data_len = sizeof(log),
+ .cdw10 = NVME_LOG_SMART_INFO | (1 << 15) /* RAE bit */
+ | (((sizeof(log) >> 2) - 1) << 16)
+ };
+
+ fd = qga_open_cloexec(disk->name, O_RDONLY, 0);
+ if (fd == -1) {
+ g_debug("Failed to open device: %s: %s", disk->name, g_strerror(errno));
+ return;
+ }
+
+ if (ioctl(fd, NVME_IOCTL_ADMIN_CMD, &cmd)) {
+ g_debug("Failed to get smart: %s: %s", disk->name, g_strerror(errno));
+ close(fd);
+ return;
+ }
+
+ disk->smart = g_new0(GuestDiskSmart, 1);
+ disk->smart->type = GUEST_DISK_BUS_TYPE_NVME;
+
+ smart = &disk->smart->u.nvme;
+ smart->critical_warning = log.critical_warning;
+ smart->temperature = lduw_le_p(&log.temperature); /* unaligned field */
+ smart->available_spare = log.available_spare;
+ smart->available_spare_threshold = log.available_spare_threshold;
+ smart->percentage_used = log.percentage_used;
+ smart->data_units_read_lo = le64_to_cpu(log.data_units_read[0]);
+ smart->data_units_read_hi = le64_to_cpu(log.data_units_read[1]);
+ smart->data_units_written_lo = le64_to_cpu(log.data_units_written[0]);
+ smart->data_units_written_hi = le64_to_cpu(log.data_units_written[1]);
+ smart->host_read_commands_lo = le64_to_cpu(log.host_read_commands[0]);
+ smart->host_read_commands_hi = le64_to_cpu(log.host_read_commands[1]);
+ smart->host_write_commands_lo = le64_to_cpu(log.host_write_commands[0]);
+ smart->host_write_commands_hi = le64_to_cpu(log.host_write_commands[1]);
+ smart->controller_busy_time_lo = le64_to_cpu(log.controller_busy_time[0]);
+ smart->controller_busy_time_hi = le64_to_cpu(log.controller_busy_time[1]);
+ smart->power_cycles_lo = le64_to_cpu(log.power_cycles[0]);
+ smart->power_cycles_hi = le64_to_cpu(log.power_cycles[1]);
+ smart->power_on_hours_lo = le64_to_cpu(log.power_on_hours[0]);
+ smart->power_on_hours_hi = le64_to_cpu(log.power_on_hours[1]);
+ smart->unsafe_shutdowns_lo = le64_to_cpu(log.unsafe_shutdowns[0]);
+ smart->unsafe_shutdowns_hi = le64_to_cpu(log.unsafe_shutdowns[1]);
+ smart->media_errors_lo = le64_to_cpu(log.media_errors[0]);
+ smart->media_errors_hi = le64_to_cpu(log.media_errors[1]);
+ smart->number_of_error_log_entries_lo =
+ le64_to_cpu(log.number_of_error_log_entries[0]);
+ smart->number_of_error_log_entries_hi =
+ le64_to_cpu(log.number_of_error_log_entries[1]);
+
+ close(fd);
+}
+
+static void get_disk_smart(GuestDiskInfo *disk)
+{
+ if (disk->address
+ && (disk->address->bus_type == GUEST_DISK_BUS_TYPE_NVME)) {
+ get_nvme_smart(disk);
+ }
+}
+
+GuestDiskInfoList *qmp_guest_get_disks(Error **errp)
+{
+ GuestDiskInfoList *ret = NULL;
+ GuestDiskInfo *disk;
+ DIR *dp = NULL;
+ struct dirent *de = NULL;
+
+ g_debug("listing /sys/block directory");
+ dp = opendir("/sys/block");
+ if (dp == NULL) {
+ error_setg_errno(errp, errno, "Can't open directory \"/sys/block\"");
+ return NULL;
+ }
+ while ((de = readdir(dp)) != NULL) {
+ g_autofree char *disk_dir = NULL, *line = NULL,
+ *size_path = NULL;
+ char *dev_name;
+ Error *local_err = NULL;
+ if (de->d_type != DT_LNK) {
+ g_debug(" skipping entry: %s", de->d_name);
+ continue;
+ }
+
+ /* Check size and skip zero-sized disks */
+ g_debug(" checking disk size");
+ size_path = g_strdup_printf("/sys/block/%s/size", de->d_name);
+ if (!g_file_get_contents(size_path, &line, NULL, NULL)) {
+ g_debug(" failed to read disk size");
+ continue;
+ }
+ if (g_strcmp0(line, "0\n") == 0) {
+ g_debug(" skipping zero-sized disk");
+ continue;
+ }
+
+ g_debug(" adding %s", de->d_name);
+ disk_dir = g_strdup_printf("/sys/block/%s", de->d_name);
+ dev_name = get_device_for_syspath(disk_dir);
+ if (dev_name == NULL) {
+ g_debug("Failed to get device name for syspath: %s",
+ disk_dir);
+ continue;
+ }
+ disk = g_new0(GuestDiskInfo, 1);
+ disk->name = dev_name;
+ disk->partition = false;
+ disk->alias = get_alias_for_syspath(disk_dir);
+ QAPI_LIST_PREPEND(ret, disk);
+
+ /* Get address for non-virtual devices */
+ bool is_virtual = is_disk_virtual(disk_dir, &local_err);
+ if (local_err != NULL) {
+ g_debug(" failed to check disk path, ignoring error: %s",
+ error_get_pretty(local_err));
+ error_free(local_err);
+ local_err = NULL;
+ /* Don't try to get the address */
+ is_virtual = true;
+ }
+ if (!is_virtual) {
+ disk->address = get_disk_address(disk_dir, &local_err);
+ if (local_err != NULL) {
+ g_debug(" failed to get device info, ignoring error: %s",
+ error_get_pretty(local_err));
+ error_free(local_err);
+ local_err = NULL;
+ }
+ }
+
+ get_disk_deps(disk_dir, disk);
+ get_disk_smart(disk);
+ ret = get_disk_partitions(ret, de->d_name, disk_dir, dev_name);
+ }
+
+ closedir(dp);
+
+ return ret;
+}
+
+#endif
+
+/* Return a list of the disk device(s)' info which @mount lies on */
+static GuestFilesystemInfo *build_guest_fsinfo(struct FsMount *mount,
+ Error **errp)
+{
+ GuestFilesystemInfo *fs = g_malloc0(sizeof(*fs));
+ struct statvfs buf;
+ unsigned long used, nonroot_total, fr_size;
+ char *devpath = g_strdup_printf("/sys/dev/block/%u:%u",
+ mount->devmajor, mount->devminor);
+
+ fs->mountpoint = g_strdup(mount->dirname);
+ fs->type = g_strdup(mount->devtype);
+ build_guest_fsinfo_for_device(devpath, fs, errp);
+
+ if (statvfs(fs->mountpoint, &buf) == 0) {
+ fr_size = buf.f_frsize;
+ used = buf.f_blocks - buf.f_bfree;
+ nonroot_total = used + buf.f_bavail;
+ fs->used_bytes = used * fr_size;
+ fs->total_bytes = nonroot_total * fr_size;
+ fs->total_bytes_privileged = buf.f_blocks * fr_size;
+
+ fs->has_total_bytes = true;
+ fs->has_total_bytes_privileged = true;
+ fs->has_used_bytes = true;
+ }
+
+ g_free(devpath);
+
+ return fs;
+}
+
+GuestFilesystemInfoList *qmp_guest_get_fsinfo(Error **errp)
+{
+ FsMountList mounts;
+ struct FsMount *mount;
+ GuestFilesystemInfoList *ret = NULL;
+ Error *local_err = NULL;
+
+ QTAILQ_INIT(&mounts);
+ if (!build_fs_mount_list(&mounts, &local_err)) {
+ error_propagate(errp, local_err);
+ return NULL;
+ }
+
+ QTAILQ_FOREACH(mount, &mounts, next) {
+ g_debug("Building guest fsinfo for '%s'", mount->dirname);
+
+ QAPI_LIST_PREPEND(ret, build_guest_fsinfo(mount, &local_err));
+ if (local_err) {
+ error_propagate(errp, local_err);
+ qapi_free_GuestFilesystemInfoList(ret);
+ ret = NULL;
+ break;
+ }
+ }
+
+ free_fs_mount_list(&mounts);
+ return ret;
+}
+#endif /* CONFIG_FSFREEZE */
+
+#if defined(CONFIG_FSTRIM)
+/*
+ * Walk list of mounted file systems in the guest, and trim them.
+ */
+GuestFilesystemTrimResponse *
+qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp)
+{
+ GuestFilesystemTrimResponse *response;
+ GuestFilesystemTrimResult *result;
+ int ret = 0;
+ FsMountList mounts;
+ struct FsMount *mount;
+ int fd;
+ struct fstrim_range r;
+
+ slog("guest-fstrim called");
+
+ QTAILQ_INIT(&mounts);
+ if (!build_fs_mount_list(&mounts, errp)) {
+ return NULL;
+ }
+
+ response = g_malloc0(sizeof(*response));
+
+ QTAILQ_FOREACH(mount, &mounts, next) {
+ result = g_malloc0(sizeof(*result));
+ result->path = g_strdup(mount->dirname);
+
+ QAPI_LIST_PREPEND(response->paths, result);
+
+ fd = qga_open_cloexec(mount->dirname, O_RDONLY, 0);
+ if (fd == -1) {
+ result->error = g_strdup_printf("failed to open: %s",
+ strerror(errno));
+ continue;
+ }
+
+ /* We try to cull filesystems we know won't work in advance, but other
+ * filesystems may not implement fstrim for less obvious reasons.
+ * These will report EOPNOTSUPP; while in some other cases ENOTTY
+ * will be reported (e.g. CD-ROMs).
+ * Any other error means an unexpected error.
+ */
+ r.start = 0;
+ r.len = -1;
+ r.minlen = has_minimum ? minimum : 0;
+ ret = ioctl(fd, FITRIM, &r);
+ if (ret == -1) {
+ if (errno == ENOTTY || errno == EOPNOTSUPP) {
+ result->error = g_strdup("trim not supported");
+ } else {
+ result->error = g_strdup_printf("failed to trim: %s",
+ strerror(errno));
+ }
+ close(fd);
+ continue;
+ }
+
+ result->has_minimum = true;
+ result->minimum = r.minlen;
+ result->has_trimmed = true;
+ result->trimmed = r.len;
+ close(fd);
+ }
+
+ free_fs_mount_list(&mounts);
+ return response;
+}
+#endif /* CONFIG_FSTRIM */
+
+#define LINUX_SYS_STATE_FILE "/sys/power/state"
+#define SUSPEND_SUPPORTED 0
+#define SUSPEND_NOT_SUPPORTED 1
+
+typedef enum {
+ SUSPEND_MODE_DISK = 0,
+ SUSPEND_MODE_RAM = 1,
+ SUSPEND_MODE_HYBRID = 2,
+} SuspendMode;
+
+/*
+ * Executes a command in a child process using g_spawn_sync,
+ * returning an int >= 0 representing the exit status of the
+ * process.
+ *
+ * If the program wasn't found in path, returns -1.
+ *
+ * If a problem happened when creating the child process,
+ * returns -1 and errp is set.
+ */
+static int run_process_child(const char *command[], Error **errp)
+{
+ int exit_status, spawn_flag;
+ GError *g_err = NULL;
+ bool success;
+
+ spawn_flag = G_SPAWN_SEARCH_PATH | G_SPAWN_STDOUT_TO_DEV_NULL |
+ G_SPAWN_STDERR_TO_DEV_NULL;
+
+ success = g_spawn_sync(NULL, (char **)command, NULL, spawn_flag,
+ NULL, NULL, NULL, NULL,
+ &exit_status, &g_err);
+
+ if (success) {
+ return WEXITSTATUS(exit_status);
+ }
+
+ if (g_err && (g_err->code != G_SPAWN_ERROR_NOENT)) {
+ error_setg(errp, "failed to create child process, error '%s'",
+ g_err->message);
+ }
+
+ g_error_free(g_err);
+ return -1;
+}
+
+static bool systemd_supports_mode(SuspendMode mode, Error **errp)
+{
+ const char *systemctl_args[3] = {"systemd-hibernate", "systemd-suspend",
+ "systemd-hybrid-sleep"};
+ const char *cmd[4] = {"systemctl", "status", systemctl_args[mode], NULL};
+ int status;
+
+ status = run_process_child(cmd, errp);
+
+ /*
+ * systemctl status uses LSB return codes so we can expect
+ * status > 0 and be ok. To assert if the guest has support
+ * for the selected suspend mode, status should be < 4. 4 is
+ * the code for unknown service status, the return value when
+ * the service does not exist. A common value is status = 3
+ * (program is not running).
+ */
+ if (status > 0 && status < 4) {
+ return true;
+ }
+
+ return false;
+}
+
+static void systemd_suspend(SuspendMode mode, Error **errp)
+{
+ Error *local_err = NULL;
+ const char *systemctl_args[3] = {"hibernate", "suspend", "hybrid-sleep"};
+ const char *cmd[3] = {"systemctl", systemctl_args[mode], NULL};
+ int status;
+
+ status = run_process_child(cmd, &local_err);
+
+ if (status == 0) {
+ return;
+ }
+
+ if ((status == -1) && !local_err) {
+ error_setg(errp, "the helper program 'systemctl %s' was not found",
+ systemctl_args[mode]);
+ return;
+ }
+
+ if (local_err) {
+ error_propagate(errp, local_err);
+ } else {
+ error_setg(errp, "the helper program 'systemctl %s' returned an "
+ "unexpected exit status code (%d)",
+ systemctl_args[mode], status);
+ }
+}
+
+static bool pmutils_supports_mode(SuspendMode mode, Error **errp)
+{
+ Error *local_err = NULL;
+ const char *pmutils_args[3] = {"--hibernate", "--suspend",
+ "--suspend-hybrid"};
+ const char *cmd[3] = {"pm-is-supported", pmutils_args[mode], NULL};
+ int status;
+
+ status = run_process_child(cmd, &local_err);
+
+ if (status == SUSPEND_SUPPORTED) {
+ return true;
+ }
+
+ if ((status == -1) && !local_err) {
+ return false;
+ }
+
+ if (local_err) {
+ error_propagate(errp, local_err);
+ } else {
+ error_setg(errp,
+ "the helper program '%s' returned an unexpected exit"
+ " status code (%d)", "pm-is-supported", status);
+ }
+
+ return false;
+}
+
+static void pmutils_suspend(SuspendMode mode, Error **errp)
+{
+ Error *local_err = NULL;
+ const char *pmutils_binaries[3] = {"pm-hibernate", "pm-suspend",
+ "pm-suspend-hybrid"};
+ const char *cmd[2] = {pmutils_binaries[mode], NULL};
+ int status;
+
+ status = run_process_child(cmd, &local_err);
+
+ if (status == 0) {
+ return;
+ }
+
+ if ((status == -1) && !local_err) {
+ error_setg(errp, "the helper program '%s' was not found",
+ pmutils_binaries[mode]);
+ return;
+ }
+
+ if (local_err) {
+ error_propagate(errp, local_err);
+ } else {
+ error_setg(errp,
+ "the helper program '%s' returned an unexpected exit"
+ " status code (%d)", pmutils_binaries[mode], status);
+ }
+}
+
+static bool linux_sys_state_supports_mode(SuspendMode mode, Error **errp)
+{
+ const char *sysfile_strs[3] = {"disk", "mem", NULL};
+ const char *sysfile_str = sysfile_strs[mode];
+ char buf[32]; /* hopefully big enough */
+ int fd;
+ ssize_t ret;
+
+ if (!sysfile_str) {
+ error_setg(errp, "unknown guest suspend mode");
+ return false;
+ }
+
+ fd = open(LINUX_SYS_STATE_FILE, O_RDONLY);
+ if (fd < 0) {
+ return false;
+ }
+
+ ret = read(fd, buf, sizeof(buf) - 1);
+ close(fd);
+ if (ret <= 0) {
+ return false;
+ }
+ buf[ret] = '\0';
+
+ if (strstr(buf, sysfile_str)) {
+ return true;
+ }
+ return false;
+}
+
+static void linux_sys_state_suspend(SuspendMode mode, Error **errp)
+{
+ g_autoptr(GError) local_gerr = NULL;
+ const char *sysfile_strs[3] = {"disk", "mem", NULL};
+ const char *sysfile_str = sysfile_strs[mode];
+
+ if (!sysfile_str) {
+ error_setg(errp, "unknown guest suspend mode");
+ return;
+ }
+
+ if (!g_file_set_contents(LINUX_SYS_STATE_FILE, sysfile_str,
+ -1, &local_gerr)) {
+ error_setg(errp, "suspend: cannot write to '%s': %s",
+ LINUX_SYS_STATE_FILE, local_gerr->message);
+ return;
+ }
+}
+
+static void guest_suspend(SuspendMode mode, Error **errp)
+{
+ Error *local_err = NULL;
+ bool mode_supported = false;
+
+ if (systemd_supports_mode(mode, &local_err)) {
+ mode_supported = true;
+ systemd_suspend(mode, &local_err);
+
+ if (!local_err) {
+ return;
+ }
+ }
+
+ error_free(local_err);
+ local_err = NULL;
+
+ if (pmutils_supports_mode(mode, &local_err)) {
+ mode_supported = true;
+ pmutils_suspend(mode, &local_err);
+
+ if (!local_err) {
+ return;
+ }
+ }
+
+ error_free(local_err);
+ local_err = NULL;
+
+ if (linux_sys_state_supports_mode(mode, &local_err)) {
+ mode_supported = true;
+ linux_sys_state_suspend(mode, &local_err);
+ }
+
+ if (!mode_supported) {
+ error_free(local_err);
+ error_setg(errp,
+ "the requested suspend mode is not supported by the guest");
+ } else {
+ error_propagate(errp, local_err);
+ }
+}
+
+void qmp_guest_suspend_disk(Error **errp)
+{
+ guest_suspend(SUSPEND_MODE_DISK, errp);
+}
+
+void qmp_guest_suspend_ram(Error **errp)
+{
+ guest_suspend(SUSPEND_MODE_RAM, errp);
+}
+
+void qmp_guest_suspend_hybrid(Error **errp)
+{
+ guest_suspend(SUSPEND_MODE_HYBRID, errp);
+}
+
+/* Transfer online/offline status between @vcpu and the guest system.
+ *
+ * On input either @errp or *@errp must be NULL.
+ *
+ * In system-to-@vcpu direction, the following @vcpu fields are accessed:
+ * - R: vcpu->logical_id
+ * - W: vcpu->online
+ * - W: vcpu->can_offline
+ *
+ * In @vcpu-to-system direction, the following @vcpu fields are accessed:
+ * - R: vcpu->logical_id
+ * - R: vcpu->online
+ *
+ * Written members remain unmodified on error.
+ */
+static void transfer_vcpu(GuestLogicalProcessor *vcpu, bool sys2vcpu,
+ char *dirpath, Error **errp)
+{
+ int fd;
+ int res;
+ int dirfd;
+ static const char fn[] = "online";
+
+ dirfd = open(dirpath, O_RDONLY | O_DIRECTORY);
+ if (dirfd == -1) {
+ error_setg_errno(errp, errno, "open(\"%s\")", dirpath);
+ return;
+ }
+
+ fd = openat(dirfd, fn, sys2vcpu ? O_RDONLY : O_RDWR);
+ if (fd == -1) {
+ if (errno != ENOENT) {
+ error_setg_errno(errp, errno, "open(\"%s/%s\")", dirpath, fn);
+ } else if (sys2vcpu) {
+ vcpu->online = true;
+ vcpu->can_offline = false;
+ } else if (!vcpu->online) {
+ error_setg(errp, "logical processor #%" PRId64 " can't be "
+ "offlined", vcpu->logical_id);
+ } /* otherwise pretend successful re-onlining */
+ } else {
+ unsigned char status;
+
+ res = pread(fd, &status, 1, 0);
+ if (res == -1) {
+ error_setg_errno(errp, errno, "pread(\"%s/%s\")", dirpath, fn);
+ } else if (res == 0) {
+ error_setg(errp, "pread(\"%s/%s\"): unexpected EOF", dirpath,
+ fn);
+ } else if (sys2vcpu) {
+ vcpu->online = (status != '0');
+ vcpu->can_offline = true;
+ } else if (vcpu->online != (status != '0')) {
+ status = '0' + vcpu->online;
+ if (pwrite(fd, &status, 1, 0) == -1) {
+ error_setg_errno(errp, errno, "pwrite(\"%s/%s\")", dirpath,
+ fn);
+ }
+ } /* otherwise pretend successful re-(on|off)-lining */
+
+ res = close(fd);
+ g_assert(res == 0);
+ }
+
+ res = close(dirfd);
+ g_assert(res == 0);
+}
+
+GuestLogicalProcessorList *qmp_guest_get_vcpus(Error **errp)
+{
+ GuestLogicalProcessorList *head, **tail;
+ const char *cpu_dir = "/sys/devices/system/cpu";
+ const gchar *line;
+ g_autoptr(GDir) cpu_gdir = NULL;
+ Error *local_err = NULL;
+
+ head = NULL;
+ tail = &head;
+ cpu_gdir = g_dir_open(cpu_dir, 0, NULL);
+
+ if (cpu_gdir == NULL) {
+ error_setg_errno(errp, errno, "failed to list entries: %s", cpu_dir);
+ return NULL;
+ }
+
+ while (local_err == NULL && (line = g_dir_read_name(cpu_gdir)) != NULL) {
+ GuestLogicalProcessor *vcpu;
+ int64_t id;
+ if (sscanf(line, "cpu%" PRId64, &id)) {
+ g_autofree char *path = g_strdup_printf("/sys/devices/system/cpu/"
+ "cpu%" PRId64 "/", id);
+ vcpu = g_malloc0(sizeof *vcpu);
+ vcpu->logical_id = id;
+ vcpu->has_can_offline = true; /* lolspeak ftw */
+ transfer_vcpu(vcpu, true, path, &local_err);
+ QAPI_LIST_APPEND(tail, vcpu);
+ }
+ }
+
+ if (local_err == NULL) {
+ /* there's no guest with zero VCPUs */
+ g_assert(head != NULL);
+ return head;
+ }
+
+ qapi_free_GuestLogicalProcessorList(head);
+ error_propagate(errp, local_err);
+ return NULL;
+}
+
+int64_t qmp_guest_set_vcpus(GuestLogicalProcessorList *vcpus, Error **errp)
+{
+ int64_t processed;
+ Error *local_err = NULL;
+
+ processed = 0;
+ while (vcpus != NULL) {
+ char *path = g_strdup_printf("/sys/devices/system/cpu/cpu%" PRId64 "/",
+ vcpus->value->logical_id);
+
+ transfer_vcpu(vcpus->value, false, path, &local_err);
+ g_free(path);
+ if (local_err != NULL) {
+ break;
+ }
+ ++processed;
+ vcpus = vcpus->next;
+ }
+
+ if (local_err != NULL) {
+ if (processed == 0) {
+ error_propagate(errp, local_err);
+ } else {
+ error_free(local_err);
+ }
+ }
+
+ return processed;
+}
+
+
+static void ga_read_sysfs_file(int dirfd, const char *pathname, char *buf,
+ int size, Error **errp)
+{
+ int fd;
+ int res;
+
+ errno = 0;
+ fd = openat(dirfd, pathname, O_RDONLY);
+ if (fd == -1) {
+ error_setg_errno(errp, errno, "open sysfs file \"%s\"", pathname);
+ return;
+ }
+
+ res = pread(fd, buf, size, 0);
+ if (res == -1) {
+ error_setg_errno(errp, errno, "pread sysfs file \"%s\"", pathname);
+ } else if (res == 0) {
+ error_setg(errp, "pread sysfs file \"%s\": unexpected EOF", pathname);
+ }
+ close(fd);
+}
+
+static void ga_write_sysfs_file(int dirfd, const char *pathname,
+ const char *buf, int size, Error **errp)
+{
+ int fd;
+
+ errno = 0;
+ fd = openat(dirfd, pathname, O_WRONLY);
+ if (fd == -1) {
+ error_setg_errno(errp, errno, "open sysfs file \"%s\"", pathname);
+ return;
+ }
+
+ if (pwrite(fd, buf, size, 0) == -1) {
+ error_setg_errno(errp, errno, "pwrite sysfs file \"%s\"", pathname);
+ }
+
+ close(fd);
+}
+
+/* Transfer online/offline status between @mem_blk and the guest system.
+ *
+ * On input either @errp or *@errp must be NULL.
+ *
+ * In system-to-@mem_blk direction, the following @mem_blk fields are accessed:
+ * - R: mem_blk->phys_index
+ * - W: mem_blk->online
+ * - W: mem_blk->can_offline
+ *
+ * In @mem_blk-to-system direction, the following @mem_blk fields are accessed:
+ * - R: mem_blk->phys_index
+ * - R: mem_blk->online
+ *- R: mem_blk->can_offline
+ * Written members remain unmodified on error.
+ */
+static void transfer_memory_block(GuestMemoryBlock *mem_blk, bool sys2memblk,
+ GuestMemoryBlockResponse *result,
+ Error **errp)
+{
+ char *dirpath;
+ int dirfd;
+ char *status;
+ Error *local_err = NULL;
+
+ if (!sys2memblk) {
+ DIR *dp;
+
+ if (!result) {
+ error_setg(errp, "Internal error, 'result' should not be NULL");
+ return;
+ }
+ errno = 0;
+ dp = opendir("/sys/devices/system/memory/");
+ /* if there is no 'memory' directory in sysfs,
+ * we think this VM does not support online/offline memory block,
+ * any other solution?
+ */
+ if (!dp) {
+ if (errno == ENOENT) {
+ result->response =
+ GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_NOT_SUPPORTED;
+ }
+ goto out1;
+ }
+ closedir(dp);
+ }
+
+ dirpath = g_strdup_printf("/sys/devices/system/memory/memory%" PRId64 "/",
+ mem_blk->phys_index);
+ dirfd = open(dirpath, O_RDONLY | O_DIRECTORY);
+ if (dirfd == -1) {
+ if (sys2memblk) {
+ error_setg_errno(errp, errno, "open(\"%s\")", dirpath);
+ } else {
+ if (errno == ENOENT) {
+ result->response = GUEST_MEMORY_BLOCK_RESPONSE_TYPE_NOT_FOUND;
+ } else {
+ result->response =
+ GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_FAILED;
+ }
+ }
+ g_free(dirpath);
+ goto out1;
+ }
+ g_free(dirpath);
+
+ status = g_malloc0(10);
+ ga_read_sysfs_file(dirfd, "state", status, 10, &local_err);
+ if (local_err) {
+ /* treat with sysfs file that not exist in old kernel */
+ if (errno == ENOENT) {
+ error_free(local_err);
+ if (sys2memblk) {
+ mem_blk->online = true;
+ mem_blk->can_offline = false;
+ } else if (!mem_blk->online) {
+ result->response =
+ GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_NOT_SUPPORTED;
+ }
+ } else {
+ if (sys2memblk) {
+ error_propagate(errp, local_err);
+ } else {
+ error_free(local_err);
+ result->response =
+ GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_FAILED;
+ }
+ }
+ goto out2;
+ }
+
+ if (sys2memblk) {
+ char removable = '0';
+
+ mem_blk->online = (strncmp(status, "online", 6) == 0);
+
+ ga_read_sysfs_file(dirfd, "removable", &removable, 1, &local_err);
+ if (local_err) {
+ /* if no 'removable' file, it doesn't support offline mem blk */
+ if (errno == ENOENT) {
+ error_free(local_err);
+ mem_blk->can_offline = false;
+ } else {
+ error_propagate(errp, local_err);
+ }
+ } else {
+ mem_blk->can_offline = (removable != '0');
+ }
+ } else {
+ if (mem_blk->online != (strncmp(status, "online", 6) == 0)) {
+ const char *new_state = mem_blk->online ? "online" : "offline";
+
+ ga_write_sysfs_file(dirfd, "state", new_state, strlen(new_state),
+ &local_err);
+ if (local_err) {
+ error_free(local_err);
+ result->response =
+ GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_FAILED;
+ goto out2;
+ }
+
+ result->response = GUEST_MEMORY_BLOCK_RESPONSE_TYPE_SUCCESS;
+ result->has_error_code = false;
+ } /* otherwise pretend successful re-(on|off)-lining */
+ }
+ g_free(status);
+ close(dirfd);
+ return;
+
+out2:
+ g_free(status);
+ close(dirfd);
+out1:
+ if (!sys2memblk) {
+ result->has_error_code = true;
+ result->error_code = errno;
+ }
+}
+
+GuestMemoryBlockList *qmp_guest_get_memory_blocks(Error **errp)
+{
+ GuestMemoryBlockList *head, **tail;
+ Error *local_err = NULL;
+ struct dirent *de;
+ DIR *dp;
+
+ head = NULL;
+ tail = &head;
+
+ dp = opendir("/sys/devices/system/memory/");
+ if (!dp) {
+ /* it's ok if this happens to be a system that doesn't expose
+ * memory blocks via sysfs, but otherwise we should report
+ * an error
+ */
+ if (errno != ENOENT) {
+ error_setg_errno(errp, errno, "Can't open directory"
+ "\"/sys/devices/system/memory/\"");
+ }
+ return NULL;
+ }
+
+ /* Note: the phys_index of memory block may be discontinuous,
+ * this is because a memblk is the unit of the Sparse Memory design, which
+ * allows discontinuous memory ranges (ex. NUMA), so here we should
+ * traverse the memory block directory.
+ */
+ while ((de = readdir(dp)) != NULL) {
+ GuestMemoryBlock *mem_blk;
+
+ if ((strncmp(de->d_name, "memory", 6) != 0) ||
+ !(de->d_type & DT_DIR)) {
+ continue;
+ }
+
+ mem_blk = g_malloc0(sizeof *mem_blk);
+ /* The d_name is "memoryXXX", phys_index is block id, same as XXX */
+ mem_blk->phys_index = strtoul(&de->d_name[6], NULL, 10);
+ mem_blk->has_can_offline = true; /* lolspeak ftw */
+ transfer_memory_block(mem_blk, true, NULL, &local_err);
+ if (local_err) {
+ break;
+ }
+
+ QAPI_LIST_APPEND(tail, mem_blk);
+ }
+
+ closedir(dp);
+ if (local_err == NULL) {
+ /* there's no guest with zero memory blocks */
+ if (head == NULL) {
+ error_setg(errp, "guest reported zero memory blocks!");
+ }
+ return head;
+ }
+
+ qapi_free_GuestMemoryBlockList(head);
+ error_propagate(errp, local_err);
+ return NULL;
+}
+
+GuestMemoryBlockResponseList *
+qmp_guest_set_memory_blocks(GuestMemoryBlockList *mem_blks, Error **errp)
+{
+ GuestMemoryBlockResponseList *head, **tail;
+ Error *local_err = NULL;
+
+ head = NULL;
+ tail = &head;
+
+ while (mem_blks != NULL) {
+ GuestMemoryBlockResponse *result;
+ GuestMemoryBlock *current_mem_blk = mem_blks->value;
+
+ result = g_malloc0(sizeof(*result));
+ result->phys_index = current_mem_blk->phys_index;
+ transfer_memory_block(current_mem_blk, false, result, &local_err);
+ if (local_err) { /* should never happen */
+ goto err;
+ }
+
+ QAPI_LIST_APPEND(tail, result);
+ mem_blks = mem_blks->next;
+ }
+
+ return head;
+err:
+ qapi_free_GuestMemoryBlockResponseList(head);
+ error_propagate(errp, local_err);
+ return NULL;
+}
+
+GuestMemoryBlockInfo *qmp_guest_get_memory_block_info(Error **errp)
+{
+ Error *local_err = NULL;
+ char *dirpath;
+ int dirfd;
+ char *buf;
+ GuestMemoryBlockInfo *info;
+
+ dirpath = g_strdup_printf("/sys/devices/system/memory/");
+ dirfd = open(dirpath, O_RDONLY | O_DIRECTORY);
+ if (dirfd == -1) {
+ error_setg_errno(errp, errno, "open(\"%s\")", dirpath);
+ g_free(dirpath);
+ return NULL;
+ }
+ g_free(dirpath);
+
+ buf = g_malloc0(20);
+ ga_read_sysfs_file(dirfd, "block_size_bytes", buf, 20, &local_err);
+ close(dirfd);
+ if (local_err) {
+ g_free(buf);
+ error_propagate(errp, local_err);
+ return NULL;
+ }
+
+ info = g_new0(GuestMemoryBlockInfo, 1);
+ info->size = strtol(buf, NULL, 16); /* the unit is bytes */
+
+ g_free(buf);
+
+ return info;
+}
+
+#define MAX_NAME_LEN 128
+static GuestDiskStatsInfoList *guest_get_diskstats(Error **errp)
+{
+ GuestDiskStatsInfoList *head = NULL, **tail = &head;
+ const char *diskstats = "/proc/diskstats";
+ FILE *fp;
+ size_t n;
+ char *line = NULL;
+
+ fp = fopen(diskstats, "r");
+ if (fp == NULL) {
+ error_setg_errno(errp, errno, "open(\"%s\")", diskstats);
+ return NULL;
+ }
+
+ while (getline(&line, &n, fp) != -1) {
+ g_autofree GuestDiskStatsInfo *diskstatinfo = NULL;
+ g_autofree GuestDiskStats *diskstat = NULL;
+ char dev_name[MAX_NAME_LEN];
+ unsigned int ios_pgr, tot_ticks, rq_ticks, wr_ticks, dc_ticks, fl_ticks;
+ unsigned long rd_ios, rd_merges_or_rd_sec, rd_ticks_or_wr_sec, wr_ios;
+ unsigned long wr_merges, rd_sec_or_wr_ios, wr_sec;
+ unsigned long dc_ios, dc_merges, dc_sec, fl_ios;
+ unsigned int major, minor;
+ int i;
+
+ i = sscanf(line, "%u %u %s %lu %lu %lu"
+ "%lu %lu %lu %lu %u %u %u %u"
+ "%lu %lu %lu %u %lu %u",
+ &major, &minor, dev_name,
+ &rd_ios, &rd_merges_or_rd_sec, &rd_sec_or_wr_ios,
+ &rd_ticks_or_wr_sec, &wr_ios, &wr_merges, &wr_sec,
+ &wr_ticks, &ios_pgr, &tot_ticks, &rq_ticks,
+ &dc_ios, &dc_merges, &dc_sec, &dc_ticks,
+ &fl_ios, &fl_ticks);
+
+ if (i < 7) {
+ continue;
+ }
+
+ diskstatinfo = g_new0(GuestDiskStatsInfo, 1);
+ diskstatinfo->name = g_strdup(dev_name);
+ diskstatinfo->major = major;
+ diskstatinfo->minor = minor;
+
+ diskstat = g_new0(GuestDiskStats, 1);
+ if (i == 7) {
+ diskstat->has_read_ios = true;
+ diskstat->read_ios = rd_ios;
+ diskstat->has_read_sectors = true;
+ diskstat->read_sectors = rd_merges_or_rd_sec;
+ diskstat->has_write_ios = true;
+ diskstat->write_ios = rd_sec_or_wr_ios;
+ diskstat->has_write_sectors = true;
+ diskstat->write_sectors = rd_ticks_or_wr_sec;
+ }
+ if (i >= 14) {
+ diskstat->has_read_ios = true;
+ diskstat->read_ios = rd_ios;
+ diskstat->has_read_sectors = true;
+ diskstat->read_sectors = rd_sec_or_wr_ios;
+ diskstat->has_read_merges = true;
+ diskstat->read_merges = rd_merges_or_rd_sec;
+ diskstat->has_read_ticks = true;
+ diskstat->read_ticks = rd_ticks_or_wr_sec;
+ diskstat->has_write_ios = true;
+ diskstat->write_ios = wr_ios;
+ diskstat->has_write_sectors = true;
+ diskstat->write_sectors = wr_sec;
+ diskstat->has_write_merges = true;
+ diskstat->write_merges = wr_merges;
+ diskstat->has_write_ticks = true;
+ diskstat->write_ticks = wr_ticks;
+ diskstat->has_ios_pgr = true;
+ diskstat->ios_pgr = ios_pgr;
+ diskstat->has_total_ticks = true;
+ diskstat->total_ticks = tot_ticks;
+ diskstat->has_weight_ticks = true;
+ diskstat->weight_ticks = rq_ticks;
+ }
+ if (i >= 18) {
+ diskstat->has_discard_ios = true;
+ diskstat->discard_ios = dc_ios;
+ diskstat->has_discard_merges = true;
+ diskstat->discard_merges = dc_merges;
+ diskstat->has_discard_sectors = true;
+ diskstat->discard_sectors = dc_sec;
+ diskstat->has_discard_ticks = true;
+ diskstat->discard_ticks = dc_ticks;
+ }
+ if (i >= 20) {
+ diskstat->has_flush_ios = true;
+ diskstat->flush_ios = fl_ios;
+ diskstat->has_flush_ticks = true;
+ diskstat->flush_ticks = fl_ticks;
+ }
+
+ diskstatinfo->stats = g_steal_pointer(&diskstat);
+ QAPI_LIST_APPEND(tail, diskstatinfo);
+ diskstatinfo = NULL;
+ }
+ free(line);
+ fclose(fp);
+ return head;
+}
+
+GuestDiskStatsInfoList *qmp_guest_get_diskstats(Error **errp)
+{
+ return guest_get_diskstats(errp);
+}
+
+GuestCpuStatsList *qmp_guest_get_cpustats(Error **errp)
+{
+ GuestCpuStatsList *head = NULL, **tail = &head;
+ const char *cpustats = "/proc/stat";
+ int clk_tck = sysconf(_SC_CLK_TCK);
+ FILE *fp;
+ size_t n;
+ char *line = NULL;
+
+ fp = fopen(cpustats, "r");
+ if (fp == NULL) {
+ error_setg_errno(errp, errno, "open(\"%s\")", cpustats);
+ return NULL;
+ }
+
+ while (getline(&line, &n, fp) != -1) {
+ GuestCpuStats *cpustat = NULL;
+ GuestLinuxCpuStats *linuxcpustat;
+ int i;
+ unsigned long user, system, idle, iowait, irq, softirq, steal, guest;
+ unsigned long nice, guest_nice;
+ char name[64];
+
+ i = sscanf(line, "%s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
+ name, &user, &nice, &system, &idle, &iowait, &irq, &softirq,
+ &steal, &guest, &guest_nice);
+
+ /* drop "cpu 1 2 3 ...", get "cpuX 1 2 3 ..." only */
+ if ((i == EOF) || strncmp(name, "cpu", 3) || (name[3] == '\0')) {
+ continue;
+ }
+
+ if (i < 5) {
+ slog("Parsing cpu stat from %s failed, see \"man proc\"", cpustats);
+ break;
+ }
+
+ cpustat = g_new0(GuestCpuStats, 1);
+ cpustat->type = GUEST_CPU_STATS_TYPE_LINUX;
+
+ linuxcpustat = &cpustat->u.q_linux;
+ linuxcpustat->cpu = atoi(&name[3]);
+ linuxcpustat->user = user * 1000 / clk_tck;
+ linuxcpustat->nice = nice * 1000 / clk_tck;
+ linuxcpustat->system = system * 1000 / clk_tck;
+ linuxcpustat->idle = idle * 1000 / clk_tck;
+
+ if (i > 5) {
+ linuxcpustat->has_iowait = true;
+ linuxcpustat->iowait = iowait * 1000 / clk_tck;
+ }
+
+ if (i > 6) {
+ linuxcpustat->has_irq = true;
+ linuxcpustat->irq = irq * 1000 / clk_tck;
+ linuxcpustat->has_softirq = true;
+ linuxcpustat->softirq = softirq * 1000 / clk_tck;
+ }
+
+ if (i > 8) {
+ linuxcpustat->has_steal = true;
+ linuxcpustat->steal = steal * 1000 / clk_tck;
+ }
+
+ if (i > 9) {
+ linuxcpustat->has_guest = true;
+ linuxcpustat->guest = guest * 1000 / clk_tck;
+ }
+
+ if (i > 10) {
+ linuxcpustat->has_guest = true;
+ linuxcpustat->guest = guest * 1000 / clk_tck;
+ linuxcpustat->has_guestnice = true;
+ linuxcpustat->guestnice = guest_nice * 1000 / clk_tck;
+ }
+
+ QAPI_LIST_APPEND(tail, cpustat);
+ }
+
+ free(line);
+ fclose(fp);
+ return head;
+}
+
+static char *hex_to_ip_address(const void *hex_value, int is_ipv6)
+{
+ if (is_ipv6) {
+ char addr[INET6_ADDRSTRLEN];
+ struct in6_addr in6;
+ const char *hex_str = (const char *)hex_value;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ if (sscanf(&hex_str[i * 2], "%02hhx", &in6.s6_addr[i]) != 1) {
+ return NULL;
+ }
+ }
+ inet_ntop(AF_INET6, &in6, addr, INET6_ADDRSTRLEN);
+
+ return g_strdup(addr);
+ } else {
+ unsigned int hex_int = *(unsigned int *)hex_value;
+ unsigned int byte1 = (hex_int >> 24) & 0xFF;
+ unsigned int byte2 = (hex_int >> 16) & 0xFF;
+ unsigned int byte3 = (hex_int >> 8) & 0xFF;
+ unsigned int byte4 = hex_int & 0xFF;
+
+ return g_strdup_printf("%u.%u.%u.%u", byte4, byte3, byte2, byte1);
+ }
+}
+
+GuestNetworkRouteList *qmp_guest_network_get_route(Error **errp)
+{
+ GuestNetworkRouteList *head = NULL, **tail = &head;
+ const char *route_files[] = {"/proc/net/route", "/proc/net/ipv6_route"};
+ FILE *fp;
+ size_t n = 0;
+ char *line = NULL;
+ int firstLine;
+ int is_ipv6;
+ int i;
+ char iface[IFNAMSIZ];
+
+ for (i = 0; i < 2; i++) {
+ firstLine = 1;
+ is_ipv6 = (i == 1);
+ fp = fopen(route_files[i], "r");
+ if (fp == NULL) {
+ error_setg_errno(errp, errno, "open(\"%s\")", route_files[i]);
+ continue;
+ }
+
+ while (getline(&line, &n, fp) != -1) {
+ if (firstLine && !is_ipv6) {
+ firstLine = 0;
+ continue;
+ }
+ g_autoptr(GuestNetworkRoute) route = g_new0(GuestNetworkRoute, 1);
+
+ if (is_ipv6) {
+ char destination[33], source[33], next_hop[33];
+ int des_prefixlen, src_prefixlen, metric, refcnt, use, flags;
+ if (sscanf(line, "%32s %x %32s %x %32s %x %x %x %x %s",
+ destination, &des_prefixlen, source,
+ &src_prefixlen, next_hop, &metric, &refcnt,
+ &use, &flags, iface) != 10) {
+ continue;
+ }
+
+ route->destination = hex_to_ip_address(destination, 1);
+ if (route->destination == NULL) {
+ continue;
+ }
+ route->iface = g_strdup(iface);
+ route->source = hex_to_ip_address(source, 1);
+ route->nexthop = hex_to_ip_address(next_hop, 1);
+ route->desprefixlen = g_strdup_printf("%d", des_prefixlen);
+ route->srcprefixlen = g_strdup_printf("%d", src_prefixlen);
+ route->metric = metric;
+ route->has_flags = true;
+ route->flags = flags;
+ route->has_refcnt = true;
+ route->refcnt = refcnt;
+ route->has_use = true;
+ route->use = use;
+ route->version = 6;
+ } else {
+ unsigned int destination, gateway, mask, flags;
+ int refcnt, use, metric, mtu, window, irtt;
+ if (sscanf(line, "%s %X %X %x %d %d %d %X %d %d %d",
+ iface, &destination, &gateway, &flags, &refcnt,
+ &use, &metric, &mask, &mtu, &window, &irtt) != 11) {
+ continue;
+ }
+
+ route->destination = hex_to_ip_address(&destination, 0);
+ if (route->destination == NULL) {
+ continue;
+ }
+ route->iface = g_strdup(iface);
+ route->gateway = hex_to_ip_address(&gateway, 0);
+ route->mask = hex_to_ip_address(&mask, 0);
+ route->metric = metric;
+ route->has_flags = true;
+ route->flags = flags;
+ route->has_refcnt = true;
+ route->refcnt = refcnt;
+ route->has_use = true;
+ route->use = use;
+ route->has_mtu = true;
+ route->mtu = mtu;
+ route->has_window = true;
+ route->window = window;
+ route->has_irtt = true;
+ route->irtt = irtt;
+ route->version = 4;
+ }
+
+ QAPI_LIST_APPEND(tail, route);
+ route = NULL;
+ }
+
+ fclose(fp);
+ }
+
+ free(line);
+ return head;
+}
diff --git a/qga/commands-posix.c b/qga/commands-posix.c
index 7f05996..12bc086 100644
--- a/qga/commands-posix.c
+++ b/qga/commands-posix.c
@@ -18,29 +18,17 @@
#include <dirent.h>
#include "qga-qapi-commands.h"
#include "qapi/error.h"
-#include "qapi/qmp/qerror.h"
#include "qemu/host-utils.h"
#include "qemu/sockets.h"
#include "qemu/base64.h"
#include "qemu/cutils.h"
#include "commands-common.h"
-#include "block/nvme.h"
#include "cutils.h"
#ifdef HAVE_UTMPX
#include <utmpx.h>
#endif
-#if defined(__linux__)
-#include <mntent.h>
-#include <sys/statvfs.h>
-#include <linux/nvme_ioctl.h>
-
-#ifdef CONFIG_LIBUDEV
-#include <libudev.h>
-#endif
-#endif
-
#ifdef HAVE_GETIFADDRS
#include <arpa/inet.h>
#include <sys/socket.h>
@@ -59,7 +47,7 @@
#endif
#endif
-static void ga_wait_child(pid_t pid, int *status, Error **errp)
+static bool ga_wait_child(pid_t pid, int *status, Error **errp)
{
pid_t rpid;
@@ -70,10 +58,11 @@ static void ga_wait_child(pid_t pid, int *status, Error **errp)
if (rpid == -1) {
error_setg_errno(errp, errno, "failed to wait for child (pid: %d)",
pid);
- return;
+ return false;
}
g_assert(rpid == pid);
+ return true;
}
static ssize_t ga_pipe_read_str(int fd[2], char **str)
@@ -95,7 +84,7 @@ static ssize_t ga_pipe_read_str(int fd[2], char **str)
*str = g_realloc(*str, len + n + 1);
memcpy(*str + len, buf, n);
len += n;
- *str[len] = '\0';
+ (*str)[len] = '\0';
}
close(fd[0]);
fd[0] = -1;
@@ -178,8 +167,7 @@ static int ga_run_command(const char *argv[], const char *in_str,
goto out;
}
- ga_wait_child(pid, &status, errp);
- if (*errp) {
+ if (!ga_wait_child(pid, &status, errp)) {
goto out;
}
@@ -817,8 +805,10 @@ int64_t qmp_guest_fsfreeze_thaw(Error **errp)
int ret;
ret = qmp_guest_fsfreeze_do_thaw(errp);
+
if (ret >= 0) {
ga_unset_frozen(ga_state);
+ slog("guest-fsthaw called");
execute_fsfreeze_hook(FSFREEZE_HOOK_THAW, errp);
} else {
ret = 0;
@@ -842,1308 +832,6 @@ static void guest_fsfreeze_cleanup(void)
}
#endif
-/* linux-specific implementations. avoid this if at all possible. */
-#if defined(__linux__)
-#if defined(CONFIG_FSFREEZE)
-
-static char *get_pci_driver(char const *syspath, int pathlen, Error **errp)
-{
- char *path;
- char *dpath;
- char *driver = NULL;
- char buf[PATH_MAX];
- ssize_t len;
-
- path = g_strndup(syspath, pathlen);
- dpath = g_strdup_printf("%s/driver", path);
- len = readlink(dpath, buf, sizeof(buf) - 1);
- if (len != -1) {
- buf[len] = 0;
- driver = g_path_get_basename(buf);
- }
- g_free(dpath);
- g_free(path);
- return driver;
-}
-
-static int compare_uint(const void *_a, const void *_b)
-{
- unsigned int a = *(unsigned int *)_a;
- unsigned int b = *(unsigned int *)_b;
-
- return a < b ? -1 : a > b ? 1 : 0;
-}
-
-/* Walk the specified sysfs and build a sorted list of host or ata numbers */
-static int build_hosts(char const *syspath, char const *host, bool ata,
- unsigned int *hosts, int hosts_max, Error **errp)
-{
- char *path;
- DIR *dir;
- struct dirent *entry;
- int i = 0;
-
- path = g_strndup(syspath, host - syspath);
- dir = opendir(path);
- if (!dir) {
- error_setg_errno(errp, errno, "opendir(\"%s\")", path);
- g_free(path);
- return -1;
- }
-
- while (i < hosts_max) {
- entry = readdir(dir);
- if (!entry) {
- break;
- }
- if (ata && sscanf(entry->d_name, "ata%d", hosts + i) == 1) {
- ++i;
- } else if (!ata && sscanf(entry->d_name, "host%d", hosts + i) == 1) {
- ++i;
- }
- }
-
- qsort(hosts, i, sizeof(hosts[0]), compare_uint);
-
- g_free(path);
- closedir(dir);
- return i;
-}
-
-/*
- * Store disk device info for devices on the PCI bus.
- * Returns true if information has been stored, or false for failure.
- */
-static bool build_guest_fsinfo_for_pci_dev(char const *syspath,
- GuestDiskAddress *disk,
- Error **errp)
-{
- unsigned int pci[4], host, hosts[8], tgt[3];
- int i, nhosts = 0, pcilen;
- GuestPCIAddress *pciaddr = disk->pci_controller;
- bool has_ata = false, has_host = false, has_tgt = false;
- char *p, *q, *driver = NULL;
- bool ret = false;
-
- p = strstr(syspath, "/devices/pci");
- if (!p || sscanf(p + 12, "%*x:%*x/%x:%x:%x.%x%n",
- pci, pci + 1, pci + 2, pci + 3, &pcilen) < 4) {
- g_debug("only pci device is supported: sysfs path '%s'", syspath);
- return false;
- }
-
- p += 12 + pcilen;
- while (true) {
- driver = get_pci_driver(syspath, p - syspath, errp);
- if (driver && (g_str_equal(driver, "ata_piix") ||
- g_str_equal(driver, "sym53c8xx") ||
- g_str_equal(driver, "virtio-pci") ||
- g_str_equal(driver, "ahci") ||
- g_str_equal(driver, "nvme") ||
- g_str_equal(driver, "xhci_hcd") ||
- g_str_equal(driver, "ehci-pci"))) {
- break;
- }
-
- g_free(driver);
- if (sscanf(p, "/%x:%x:%x.%x%n",
- pci, pci + 1, pci + 2, pci + 3, &pcilen) == 4) {
- p += pcilen;
- continue;
- }
-
- g_debug("unsupported driver or sysfs path '%s'", syspath);
- return false;
- }
-
- p = strstr(syspath, "/target");
- if (p && sscanf(p + 7, "%*u:%*u:%*u/%*u:%u:%u:%u",
- tgt, tgt + 1, tgt + 2) == 3) {
- has_tgt = true;
- }
-
- p = strstr(syspath, "/ata");
- if (p) {
- q = p + 4;
- has_ata = true;
- } else {
- p = strstr(syspath, "/host");
- q = p + 5;
- }
- if (p && sscanf(q, "%u", &host) == 1) {
- has_host = true;
- nhosts = build_hosts(syspath, p, has_ata, hosts,
- ARRAY_SIZE(hosts), errp);
- if (nhosts < 0) {
- goto cleanup;
- }
- }
-
- pciaddr->domain = pci[0];
- pciaddr->bus = pci[1];
- pciaddr->slot = pci[2];
- pciaddr->function = pci[3];
-
- if (strcmp(driver, "ata_piix") == 0) {
- /* a host per ide bus, target*:0:<unit>:0 */
- if (!has_host || !has_tgt) {
- g_debug("invalid sysfs path '%s' (driver '%s')", syspath, driver);
- goto cleanup;
- }
- for (i = 0; i < nhosts; i++) {
- if (host == hosts[i]) {
- disk->bus_type = GUEST_DISK_BUS_TYPE_IDE;
- disk->bus = i;
- disk->unit = tgt[1];
- break;
- }
- }
- if (i >= nhosts) {
- g_debug("no host for '%s' (driver '%s')", syspath, driver);
- goto cleanup;
- }
- } else if (strcmp(driver, "sym53c8xx") == 0) {
- /* scsi(LSI Logic): target*:0:<unit>:0 */
- if (!has_tgt) {
- g_debug("invalid sysfs path '%s' (driver '%s')", syspath, driver);
- goto cleanup;
- }
- disk->bus_type = GUEST_DISK_BUS_TYPE_SCSI;
- disk->unit = tgt[1];
- } else if (strcmp(driver, "virtio-pci") == 0) {
- if (has_tgt) {
- /* virtio-scsi: target*:0:0:<unit> */
- disk->bus_type = GUEST_DISK_BUS_TYPE_SCSI;
- disk->unit = tgt[2];
- } else {
- /* virtio-blk: 1 disk per 1 device */
- disk->bus_type = GUEST_DISK_BUS_TYPE_VIRTIO;
- }
- } else if (strcmp(driver, "ahci") == 0) {
- /* ahci: 1 host per 1 unit */
- if (!has_host || !has_tgt) {
- g_debug("invalid sysfs path '%s' (driver '%s')", syspath, driver);
- goto cleanup;
- }
- for (i = 0; i < nhosts; i++) {
- if (host == hosts[i]) {
- disk->unit = i;
- disk->bus_type = GUEST_DISK_BUS_TYPE_SATA;
- break;
- }
- }
- if (i >= nhosts) {
- g_debug("no host for '%s' (driver '%s')", syspath, driver);
- goto cleanup;
- }
- } else if (strcmp(driver, "nvme") == 0) {
- disk->bus_type = GUEST_DISK_BUS_TYPE_NVME;
- } else if (strcmp(driver, "ehci-pci") == 0 || strcmp(driver, "xhci_hcd") == 0) {
- disk->bus_type = GUEST_DISK_BUS_TYPE_USB;
- } else {
- g_debug("unknown driver '%s' (sysfs path '%s')", driver, syspath);
- goto cleanup;
- }
-
- ret = true;
-
-cleanup:
- g_free(driver);
- return ret;
-}
-
-/*
- * Store disk device info for non-PCI virtio devices (for example s390x
- * channel I/O devices). Returns true if information has been stored, or
- * false for failure.
- */
-static bool build_guest_fsinfo_for_nonpci_virtio(char const *syspath,
- GuestDiskAddress *disk,
- Error **errp)
-{
- unsigned int tgt[3];
- char *p;
-
- if (!strstr(syspath, "/virtio") || !strstr(syspath, "/block")) {
- g_debug("Unsupported virtio device '%s'", syspath);
- return false;
- }
-
- p = strstr(syspath, "/target");
- if (p && sscanf(p + 7, "%*u:%*u:%*u/%*u:%u:%u:%u",
- &tgt[0], &tgt[1], &tgt[2]) == 3) {
- /* virtio-scsi: target*:0:<target>:<unit> */
- disk->bus_type = GUEST_DISK_BUS_TYPE_SCSI;
- disk->bus = tgt[0];
- disk->target = tgt[1];
- disk->unit = tgt[2];
- } else {
- /* virtio-blk: 1 disk per 1 device */
- disk->bus_type = GUEST_DISK_BUS_TYPE_VIRTIO;
- }
-
- return true;
-}
-
-/*
- * Store disk device info for CCW devices (s390x channel I/O devices).
- * Returns true if information has been stored, or false for failure.
- */
-static bool build_guest_fsinfo_for_ccw_dev(char const *syspath,
- GuestDiskAddress *disk,
- Error **errp)
-{
- unsigned int cssid, ssid, subchno, devno;
- char *p;
-
- p = strstr(syspath, "/devices/css");
- if (!p || sscanf(p + 12, "%*x/%x.%x.%x/%*x.%*x.%x/",
- &cssid, &ssid, &subchno, &devno) < 4) {
- g_debug("could not parse ccw device sysfs path: %s", syspath);
- return false;
- }
-
- disk->ccw_address = g_new0(GuestCCWAddress, 1);
- disk->ccw_address->cssid = cssid;
- disk->ccw_address->ssid = ssid;
- disk->ccw_address->subchno = subchno;
- disk->ccw_address->devno = devno;
-
- if (strstr(p, "/virtio")) {
- build_guest_fsinfo_for_nonpci_virtio(syspath, disk, errp);
- }
-
- return true;
-}
-
-/* Store disk device info specified by @sysfs into @fs */
-static void build_guest_fsinfo_for_real_device(char const *syspath,
- GuestFilesystemInfo *fs,
- Error **errp)
-{
- GuestDiskAddress *disk;
- GuestPCIAddress *pciaddr;
- bool has_hwinf;
-#ifdef CONFIG_LIBUDEV
- struct udev *udev = NULL;
- struct udev_device *udevice = NULL;
-#endif
-
- pciaddr = g_new0(GuestPCIAddress, 1);
- pciaddr->domain = -1; /* -1 means field is invalid */
- pciaddr->bus = -1;
- pciaddr->slot = -1;
- pciaddr->function = -1;
-
- disk = g_new0(GuestDiskAddress, 1);
- disk->pci_controller = pciaddr;
- disk->bus_type = GUEST_DISK_BUS_TYPE_UNKNOWN;
-
-#ifdef CONFIG_LIBUDEV
- udev = udev_new();
- udevice = udev_device_new_from_syspath(udev, syspath);
- if (udev == NULL || udevice == NULL) {
- g_debug("failed to query udev");
- } else {
- const char *devnode, *serial;
- devnode = udev_device_get_devnode(udevice);
- if (devnode != NULL) {
- disk->dev = g_strdup(devnode);
- }
- serial = udev_device_get_property_value(udevice, "ID_SERIAL");
- if (serial != NULL && *serial != 0) {
- disk->serial = g_strdup(serial);
- }
- }
-
- udev_unref(udev);
- udev_device_unref(udevice);
-#endif
-
- if (strstr(syspath, "/devices/pci")) {
- has_hwinf = build_guest_fsinfo_for_pci_dev(syspath, disk, errp);
- } else if (strstr(syspath, "/devices/css")) {
- has_hwinf = build_guest_fsinfo_for_ccw_dev(syspath, disk, errp);
- } else if (strstr(syspath, "/virtio")) {
- has_hwinf = build_guest_fsinfo_for_nonpci_virtio(syspath, disk, errp);
- } else {
- g_debug("Unsupported device type for '%s'", syspath);
- has_hwinf = false;
- }
-
- if (has_hwinf || disk->dev || disk->serial) {
- QAPI_LIST_PREPEND(fs->disk, disk);
- } else {
- qapi_free_GuestDiskAddress(disk);
- }
-}
-
-static void build_guest_fsinfo_for_device(char const *devpath,
- GuestFilesystemInfo *fs,
- Error **errp);
-
-/* Store a list of slave devices of virtual volume specified by @syspath into
- * @fs */
-static void build_guest_fsinfo_for_virtual_device(char const *syspath,
- GuestFilesystemInfo *fs,
- Error **errp)
-{
- Error *err = NULL;
- DIR *dir;
- char *dirpath;
- struct dirent *entry;
-
- dirpath = g_strdup_printf("%s/slaves", syspath);
- dir = opendir(dirpath);
- if (!dir) {
- if (errno != ENOENT) {
- error_setg_errno(errp, errno, "opendir(\"%s\")", dirpath);
- }
- g_free(dirpath);
- return;
- }
-
- for (;;) {
- errno = 0;
- entry = readdir(dir);
- if (entry == NULL) {
- if (errno) {
- error_setg_errno(errp, errno, "readdir(\"%s\")", dirpath);
- }
- break;
- }
-
- if (entry->d_type == DT_LNK) {
- char *path;
-
- g_debug(" slave device '%s'", entry->d_name);
- path = g_strdup_printf("%s/slaves/%s", syspath, entry->d_name);
- build_guest_fsinfo_for_device(path, fs, &err);
- g_free(path);
-
- if (err) {
- error_propagate(errp, err);
- break;
- }
- }
- }
-
- g_free(dirpath);
- closedir(dir);
-}
-
-static bool is_disk_virtual(const char *devpath, Error **errp)
-{
- g_autofree char *syspath = realpath(devpath, NULL);
-
- if (!syspath) {
- error_setg_errno(errp, errno, "realpath(\"%s\")", devpath);
- return false;
- }
- return strstr(syspath, "/devices/virtual/block/") != NULL;
-}
-
-/* Dispatch to functions for virtual/real device */
-static void build_guest_fsinfo_for_device(char const *devpath,
- GuestFilesystemInfo *fs,
- Error **errp)
-{
- ERRP_GUARD();
- g_autofree char *syspath = NULL;
- bool is_virtual = false;
-
- syspath = realpath(devpath, NULL);
- if (!syspath) {
- if (errno != ENOENT) {
- error_setg_errno(errp, errno, "realpath(\"%s\")", devpath);
- return;
- }
-
- /* ENOENT: This devpath may not exist because of container config */
- if (!fs->name) {
- fs->name = g_path_get_basename(devpath);
- }
- return;
- }
-
- if (!fs->name) {
- fs->name = g_path_get_basename(syspath);
- }
-
- g_debug(" parse sysfs path '%s'", syspath);
- is_virtual = is_disk_virtual(syspath, errp);
- if (*errp != NULL) {
- return;
- }
- if (is_virtual) {
- build_guest_fsinfo_for_virtual_device(syspath, fs, errp);
- } else {
- build_guest_fsinfo_for_real_device(syspath, fs, errp);
- }
-}
-
-#ifdef CONFIG_LIBUDEV
-
-/*
- * Wrapper around build_guest_fsinfo_for_device() for getting just
- * the disk address.
- */
-static GuestDiskAddress *get_disk_address(const char *syspath, Error **errp)
-{
- g_autoptr(GuestFilesystemInfo) fs = NULL;
-
- fs = g_new0(GuestFilesystemInfo, 1);
- build_guest_fsinfo_for_device(syspath, fs, errp);
- if (fs->disk != NULL) {
- return g_steal_pointer(&fs->disk->value);
- }
- return NULL;
-}
-
-static char *get_alias_for_syspath(const char *syspath)
-{
- struct udev *udev = NULL;
- struct udev_device *udevice = NULL;
- char *ret = NULL;
-
- udev = udev_new();
- if (udev == NULL) {
- g_debug("failed to query udev");
- goto out;
- }
- udevice = udev_device_new_from_syspath(udev, syspath);
- if (udevice == NULL) {
- g_debug("failed to query udev for path: %s", syspath);
- goto out;
- } else {
- const char *alias = udev_device_get_property_value(
- udevice, "DM_NAME");
- /*
- * NULL means there was an error and empty string means there is no
- * alias. In case of no alias we return NULL instead of empty string.
- */
- if (alias == NULL) {
- g_debug("failed to query udev for device alias for: %s",
- syspath);
- } else if (*alias != 0) {
- ret = g_strdup(alias);
- }
- }
-
-out:
- udev_unref(udev);
- udev_device_unref(udevice);
- return ret;
-}
-
-static char *get_device_for_syspath(const char *syspath)
-{
- struct udev *udev = NULL;
- struct udev_device *udevice = NULL;
- char *ret = NULL;
-
- udev = udev_new();
- if (udev == NULL) {
- g_debug("failed to query udev");
- goto out;
- }
- udevice = udev_device_new_from_syspath(udev, syspath);
- if (udevice == NULL) {
- g_debug("failed to query udev for path: %s", syspath);
- goto out;
- } else {
- ret = g_strdup(udev_device_get_devnode(udevice));
- }
-
-out:
- udev_unref(udev);
- udev_device_unref(udevice);
- return ret;
-}
-
-static void get_disk_deps(const char *disk_dir, GuestDiskInfo *disk)
-{
- g_autofree char *deps_dir = NULL;
- const gchar *dep;
- GDir *dp_deps = NULL;
-
- /* List dependent disks */
- deps_dir = g_strdup_printf("%s/slaves", disk_dir);
- g_debug(" listing entries in: %s", deps_dir);
- dp_deps = g_dir_open(deps_dir, 0, NULL);
- if (dp_deps == NULL) {
- g_debug("failed to list entries in %s", deps_dir);
- return;
- }
- disk->has_dependencies = true;
- while ((dep = g_dir_read_name(dp_deps)) != NULL) {
- g_autofree char *dep_dir = NULL;
- char *dev_name;
-
- /* Add dependent disks */
- dep_dir = g_strdup_printf("%s/%s", deps_dir, dep);
- dev_name = get_device_for_syspath(dep_dir);
- if (dev_name != NULL) {
- g_debug(" adding dependent device: %s", dev_name);
- QAPI_LIST_PREPEND(disk->dependencies, dev_name);
- }
- }
- g_dir_close(dp_deps);
-}
-
-/*
- * Detect partitions subdirectory, name is "<disk_name><number>" or
- * "<disk_name>p<number>"
- *
- * @disk_name -- last component of /sys path (e.g. sda)
- * @disk_dir -- sys path of the disk (e.g. /sys/block/sda)
- * @disk_dev -- device node of the disk (e.g. /dev/sda)
- */
-static GuestDiskInfoList *get_disk_partitions(
- GuestDiskInfoList *list,
- const char *disk_name, const char *disk_dir,
- const char *disk_dev)
-{
- GuestDiskInfoList *ret = list;
- struct dirent *de_disk;
- DIR *dp_disk = NULL;
- size_t len = strlen(disk_name);
-
- dp_disk = opendir(disk_dir);
- while ((de_disk = readdir(dp_disk)) != NULL) {
- g_autofree char *partition_dir = NULL;
- char *dev_name;
- GuestDiskInfo *partition;
-
- if (!(de_disk->d_type & DT_DIR)) {
- continue;
- }
-
- if (!(strncmp(disk_name, de_disk->d_name, len) == 0 &&
- ((*(de_disk->d_name + len) == 'p' &&
- isdigit(*(de_disk->d_name + len + 1))) ||
- isdigit(*(de_disk->d_name + len))))) {
- continue;
- }
-
- partition_dir = g_strdup_printf("%s/%s",
- disk_dir, de_disk->d_name);
- dev_name = get_device_for_syspath(partition_dir);
- if (dev_name == NULL) {
- g_debug("Failed to get device name for syspath: %s",
- disk_dir);
- continue;
- }
- partition = g_new0(GuestDiskInfo, 1);
- partition->name = dev_name;
- partition->partition = true;
- partition->has_dependencies = true;
- /* Add parent disk as dependent for easier tracking of hierarchy */
- QAPI_LIST_PREPEND(partition->dependencies, g_strdup(disk_dev));
-
- QAPI_LIST_PREPEND(ret, partition);
- }
- closedir(dp_disk);
-
- return ret;
-}
-
-static void get_nvme_smart(GuestDiskInfo *disk)
-{
- int fd;
- GuestNVMeSmart *smart;
- NvmeSmartLog log = {0};
- struct nvme_admin_cmd cmd = {
- .opcode = NVME_ADM_CMD_GET_LOG_PAGE,
- .nsid = NVME_NSID_BROADCAST,
- .addr = (uintptr_t)&log,
- .data_len = sizeof(log),
- .cdw10 = NVME_LOG_SMART_INFO | (1 << 15) /* RAE bit */
- | (((sizeof(log) >> 2) - 1) << 16)
- };
-
- fd = qga_open_cloexec(disk->name, O_RDONLY, 0);
- if (fd == -1) {
- g_debug("Failed to open device: %s: %s", disk->name, g_strerror(errno));
- return;
- }
-
- if (ioctl(fd, NVME_IOCTL_ADMIN_CMD, &cmd)) {
- g_debug("Failed to get smart: %s: %s", disk->name, g_strerror(errno));
- close(fd);
- return;
- }
-
- disk->smart = g_new0(GuestDiskSmart, 1);
- disk->smart->type = GUEST_DISK_BUS_TYPE_NVME;
-
- smart = &disk->smart->u.nvme;
- smart->critical_warning = log.critical_warning;
- smart->temperature = lduw_le_p(&log.temperature); /* unaligned field */
- smart->available_spare = log.available_spare;
- smart->available_spare_threshold = log.available_spare_threshold;
- smart->percentage_used = log.percentage_used;
- smart->data_units_read_lo = le64_to_cpu(log.data_units_read[0]);
- smart->data_units_read_hi = le64_to_cpu(log.data_units_read[1]);
- smart->data_units_written_lo = le64_to_cpu(log.data_units_written[0]);
- smart->data_units_written_hi = le64_to_cpu(log.data_units_written[1]);
- smart->host_read_commands_lo = le64_to_cpu(log.host_read_commands[0]);
- smart->host_read_commands_hi = le64_to_cpu(log.host_read_commands[1]);
- smart->host_write_commands_lo = le64_to_cpu(log.host_write_commands[0]);
- smart->host_write_commands_hi = le64_to_cpu(log.host_write_commands[1]);
- smart->controller_busy_time_lo = le64_to_cpu(log.controller_busy_time[0]);
- smart->controller_busy_time_hi = le64_to_cpu(log.controller_busy_time[1]);
- smart->power_cycles_lo = le64_to_cpu(log.power_cycles[0]);
- smart->power_cycles_hi = le64_to_cpu(log.power_cycles[1]);
- smart->power_on_hours_lo = le64_to_cpu(log.power_on_hours[0]);
- smart->power_on_hours_hi = le64_to_cpu(log.power_on_hours[1]);
- smart->unsafe_shutdowns_lo = le64_to_cpu(log.unsafe_shutdowns[0]);
- smart->unsafe_shutdowns_hi = le64_to_cpu(log.unsafe_shutdowns[1]);
- smart->media_errors_lo = le64_to_cpu(log.media_errors[0]);
- smart->media_errors_hi = le64_to_cpu(log.media_errors[1]);
- smart->number_of_error_log_entries_lo =
- le64_to_cpu(log.number_of_error_log_entries[0]);
- smart->number_of_error_log_entries_hi =
- le64_to_cpu(log.number_of_error_log_entries[1]);
-
- close(fd);
-}
-
-static void get_disk_smart(GuestDiskInfo *disk)
-{
- if (disk->address
- && (disk->address->bus_type == GUEST_DISK_BUS_TYPE_NVME)) {
- get_nvme_smart(disk);
- }
-}
-
-GuestDiskInfoList *qmp_guest_get_disks(Error **errp)
-{
- GuestDiskInfoList *ret = NULL;
- GuestDiskInfo *disk;
- DIR *dp = NULL;
- struct dirent *de = NULL;
-
- g_debug("listing /sys/block directory");
- dp = opendir("/sys/block");
- if (dp == NULL) {
- error_setg_errno(errp, errno, "Can't open directory \"/sys/block\"");
- return NULL;
- }
- while ((de = readdir(dp)) != NULL) {
- g_autofree char *disk_dir = NULL, *line = NULL,
- *size_path = NULL;
- char *dev_name;
- Error *local_err = NULL;
- if (de->d_type != DT_LNK) {
- g_debug(" skipping entry: %s", de->d_name);
- continue;
- }
-
- /* Check size and skip zero-sized disks */
- g_debug(" checking disk size");
- size_path = g_strdup_printf("/sys/block/%s/size", de->d_name);
- if (!g_file_get_contents(size_path, &line, NULL, NULL)) {
- g_debug(" failed to read disk size");
- continue;
- }
- if (g_strcmp0(line, "0\n") == 0) {
- g_debug(" skipping zero-sized disk");
- continue;
- }
-
- g_debug(" adding %s", de->d_name);
- disk_dir = g_strdup_printf("/sys/block/%s", de->d_name);
- dev_name = get_device_for_syspath(disk_dir);
- if (dev_name == NULL) {
- g_debug("Failed to get device name for syspath: %s",
- disk_dir);
- continue;
- }
- disk = g_new0(GuestDiskInfo, 1);
- disk->name = dev_name;
- disk->partition = false;
- disk->alias = get_alias_for_syspath(disk_dir);
- QAPI_LIST_PREPEND(ret, disk);
-
- /* Get address for non-virtual devices */
- bool is_virtual = is_disk_virtual(disk_dir, &local_err);
- if (local_err != NULL) {
- g_debug(" failed to check disk path, ignoring error: %s",
- error_get_pretty(local_err));
- error_free(local_err);
- local_err = NULL;
- /* Don't try to get the address */
- is_virtual = true;
- }
- if (!is_virtual) {
- disk->address = get_disk_address(disk_dir, &local_err);
- if (local_err != NULL) {
- g_debug(" failed to get device info, ignoring error: %s",
- error_get_pretty(local_err));
- error_free(local_err);
- local_err = NULL;
- }
- }
-
- get_disk_deps(disk_dir, disk);
- get_disk_smart(disk);
- ret = get_disk_partitions(ret, de->d_name, disk_dir, dev_name);
- }
-
- closedir(dp);
-
- return ret;
-}
-
-#else
-
-GuestDiskInfoList *qmp_guest_get_disks(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-#endif
-
-/* Return a list of the disk device(s)' info which @mount lies on */
-static GuestFilesystemInfo *build_guest_fsinfo(struct FsMount *mount,
- Error **errp)
-{
- GuestFilesystemInfo *fs = g_malloc0(sizeof(*fs));
- struct statvfs buf;
- unsigned long used, nonroot_total, fr_size;
- char *devpath = g_strdup_printf("/sys/dev/block/%u:%u",
- mount->devmajor, mount->devminor);
-
- fs->mountpoint = g_strdup(mount->dirname);
- fs->type = g_strdup(mount->devtype);
- build_guest_fsinfo_for_device(devpath, fs, errp);
-
- if (statvfs(fs->mountpoint, &buf) == 0) {
- fr_size = buf.f_frsize;
- used = buf.f_blocks - buf.f_bfree;
- nonroot_total = used + buf.f_bavail;
- fs->used_bytes = used * fr_size;
- fs->total_bytes = nonroot_total * fr_size;
- fs->total_bytes_privileged = buf.f_blocks * fr_size;
-
- fs->has_total_bytes = true;
- fs->has_total_bytes_privileged = true;
- fs->has_used_bytes = true;
- }
-
- g_free(devpath);
-
- return fs;
-}
-
-GuestFilesystemInfoList *qmp_guest_get_fsinfo(Error **errp)
-{
- FsMountList mounts;
- struct FsMount *mount;
- GuestFilesystemInfoList *ret = NULL;
- Error *local_err = NULL;
-
- QTAILQ_INIT(&mounts);
- if (!build_fs_mount_list(&mounts, &local_err)) {
- error_propagate(errp, local_err);
- return NULL;
- }
-
- QTAILQ_FOREACH(mount, &mounts, next) {
- g_debug("Building guest fsinfo for '%s'", mount->dirname);
-
- QAPI_LIST_PREPEND(ret, build_guest_fsinfo(mount, &local_err));
- if (local_err) {
- error_propagate(errp, local_err);
- qapi_free_GuestFilesystemInfoList(ret);
- ret = NULL;
- break;
- }
- }
-
- free_fs_mount_list(&mounts);
- return ret;
-}
-#endif /* CONFIG_FSFREEZE */
-
-#if defined(CONFIG_FSTRIM)
-/*
- * Walk list of mounted file systems in the guest, and trim them.
- */
-GuestFilesystemTrimResponse *
-qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp)
-{
- GuestFilesystemTrimResponse *response;
- GuestFilesystemTrimResult *result;
- int ret = 0;
- FsMountList mounts;
- struct FsMount *mount;
- int fd;
- struct fstrim_range r;
-
- slog("guest-fstrim called");
-
- QTAILQ_INIT(&mounts);
- if (!build_fs_mount_list(&mounts, errp)) {
- return NULL;
- }
-
- response = g_malloc0(sizeof(*response));
-
- QTAILQ_FOREACH(mount, &mounts, next) {
- result = g_malloc0(sizeof(*result));
- result->path = g_strdup(mount->dirname);
-
- QAPI_LIST_PREPEND(response->paths, result);
-
- fd = qga_open_cloexec(mount->dirname, O_RDONLY, 0);
- if (fd == -1) {
- result->error = g_strdup_printf("failed to open: %s",
- strerror(errno));
- continue;
- }
-
- /* We try to cull filesystems we know won't work in advance, but other
- * filesystems may not implement fstrim for less obvious reasons.
- * These will report EOPNOTSUPP; while in some other cases ENOTTY
- * will be reported (e.g. CD-ROMs).
- * Any other error means an unexpected error.
- */
- r.start = 0;
- r.len = -1;
- r.minlen = has_minimum ? minimum : 0;
- ret = ioctl(fd, FITRIM, &r);
- if (ret == -1) {
- if (errno == ENOTTY || errno == EOPNOTSUPP) {
- result->error = g_strdup("trim not supported");
- } else {
- result->error = g_strdup_printf("failed to trim: %s",
- strerror(errno));
- }
- close(fd);
- continue;
- }
-
- result->has_minimum = true;
- result->minimum = r.minlen;
- result->has_trimmed = true;
- result->trimmed = r.len;
- close(fd);
- }
-
- free_fs_mount_list(&mounts);
- return response;
-}
-#endif /* CONFIG_FSTRIM */
-
-
-#define LINUX_SYS_STATE_FILE "/sys/power/state"
-#define SUSPEND_SUPPORTED 0
-#define SUSPEND_NOT_SUPPORTED 1
-
-typedef enum {
- SUSPEND_MODE_DISK = 0,
- SUSPEND_MODE_RAM = 1,
- SUSPEND_MODE_HYBRID = 2,
-} SuspendMode;
-
-/*
- * Executes a command in a child process using g_spawn_sync,
- * returning an int >= 0 representing the exit status of the
- * process.
- *
- * If the program wasn't found in path, returns -1.
- *
- * If a problem happened when creating the child process,
- * returns -1 and errp is set.
- */
-static int run_process_child(const char *command[], Error **errp)
-{
- int exit_status, spawn_flag;
- GError *g_err = NULL;
- bool success;
-
- spawn_flag = G_SPAWN_SEARCH_PATH | G_SPAWN_STDOUT_TO_DEV_NULL |
- G_SPAWN_STDERR_TO_DEV_NULL;
-
- success = g_spawn_sync(NULL, (char **)command, NULL, spawn_flag,
- NULL, NULL, NULL, NULL,
- &exit_status, &g_err);
-
- if (success) {
- return WEXITSTATUS(exit_status);
- }
-
- if (g_err && (g_err->code != G_SPAWN_ERROR_NOENT)) {
- error_setg(errp, "failed to create child process, error '%s'",
- g_err->message);
- }
-
- g_error_free(g_err);
- return -1;
-}
-
-static bool systemd_supports_mode(SuspendMode mode, Error **errp)
-{
- const char *systemctl_args[3] = {"systemd-hibernate", "systemd-suspend",
- "systemd-hybrid-sleep"};
- const char *cmd[4] = {"systemctl", "status", systemctl_args[mode], NULL};
- int status;
-
- status = run_process_child(cmd, errp);
-
- /*
- * systemctl status uses LSB return codes so we can expect
- * status > 0 and be ok. To assert if the guest has support
- * for the selected suspend mode, status should be < 4. 4 is
- * the code for unknown service status, the return value when
- * the service does not exist. A common value is status = 3
- * (program is not running).
- */
- if (status > 0 && status < 4) {
- return true;
- }
-
- return false;
-}
-
-static void systemd_suspend(SuspendMode mode, Error **errp)
-{
- Error *local_err = NULL;
- const char *systemctl_args[3] = {"hibernate", "suspend", "hybrid-sleep"};
- const char *cmd[3] = {"systemctl", systemctl_args[mode], NULL};
- int status;
-
- status = run_process_child(cmd, &local_err);
-
- if (status == 0) {
- return;
- }
-
- if ((status == -1) && !local_err) {
- error_setg(errp, "the helper program 'systemctl %s' was not found",
- systemctl_args[mode]);
- return;
- }
-
- if (local_err) {
- error_propagate(errp, local_err);
- } else {
- error_setg(errp, "the helper program 'systemctl %s' returned an "
- "unexpected exit status code (%d)",
- systemctl_args[mode], status);
- }
-}
-
-static bool pmutils_supports_mode(SuspendMode mode, Error **errp)
-{
- Error *local_err = NULL;
- const char *pmutils_args[3] = {"--hibernate", "--suspend",
- "--suspend-hybrid"};
- const char *cmd[3] = {"pm-is-supported", pmutils_args[mode], NULL};
- int status;
-
- status = run_process_child(cmd, &local_err);
-
- if (status == SUSPEND_SUPPORTED) {
- return true;
- }
-
- if ((status == -1) && !local_err) {
- return false;
- }
-
- if (local_err) {
- error_propagate(errp, local_err);
- } else {
- error_setg(errp,
- "the helper program '%s' returned an unexpected exit"
- " status code (%d)", "pm-is-supported", status);
- }
-
- return false;
-}
-
-static void pmutils_suspend(SuspendMode mode, Error **errp)
-{
- Error *local_err = NULL;
- const char *pmutils_binaries[3] = {"pm-hibernate", "pm-suspend",
- "pm-suspend-hybrid"};
- const char *cmd[2] = {pmutils_binaries[mode], NULL};
- int status;
-
- status = run_process_child(cmd, &local_err);
-
- if (status == 0) {
- return;
- }
-
- if ((status == -1) && !local_err) {
- error_setg(errp, "the helper program '%s' was not found",
- pmutils_binaries[mode]);
- return;
- }
-
- if (local_err) {
- error_propagate(errp, local_err);
- } else {
- error_setg(errp,
- "the helper program '%s' returned an unexpected exit"
- " status code (%d)", pmutils_binaries[mode], status);
- }
-}
-
-static bool linux_sys_state_supports_mode(SuspendMode mode, Error **errp)
-{
- const char *sysfile_strs[3] = {"disk", "mem", NULL};
- const char *sysfile_str = sysfile_strs[mode];
- char buf[32]; /* hopefully big enough */
- int fd;
- ssize_t ret;
-
- if (!sysfile_str) {
- error_setg(errp, "unknown guest suspend mode");
- return false;
- }
-
- fd = open(LINUX_SYS_STATE_FILE, O_RDONLY);
- if (fd < 0) {
- return false;
- }
-
- ret = read(fd, buf, sizeof(buf) - 1);
- close(fd);
- if (ret <= 0) {
- return false;
- }
- buf[ret] = '\0';
-
- if (strstr(buf, sysfile_str)) {
- return true;
- }
- return false;
-}
-
-static void linux_sys_state_suspend(SuspendMode mode, Error **errp)
-{
- g_autoptr(GError) local_gerr = NULL;
- const char *sysfile_strs[3] = {"disk", "mem", NULL};
- const char *sysfile_str = sysfile_strs[mode];
-
- if (!sysfile_str) {
- error_setg(errp, "unknown guest suspend mode");
- return;
- }
-
- if (!g_file_set_contents(LINUX_SYS_STATE_FILE, sysfile_str,
- -1, &local_gerr)) {
- error_setg(errp, "suspend: cannot write to '%s': %s",
- LINUX_SYS_STATE_FILE, local_gerr->message);
- return;
- }
-}
-
-static void guest_suspend(SuspendMode mode, Error **errp)
-{
- Error *local_err = NULL;
- bool mode_supported = false;
-
- if (systemd_supports_mode(mode, &local_err)) {
- mode_supported = true;
- systemd_suspend(mode, &local_err);
-
- if (!local_err) {
- return;
- }
- }
-
- error_free(local_err);
- local_err = NULL;
-
- if (pmutils_supports_mode(mode, &local_err)) {
- mode_supported = true;
- pmutils_suspend(mode, &local_err);
-
- if (!local_err) {
- return;
- }
- }
-
- error_free(local_err);
- local_err = NULL;
-
- if (linux_sys_state_supports_mode(mode, &local_err)) {
- mode_supported = true;
- linux_sys_state_suspend(mode, &local_err);
- }
-
- if (!mode_supported) {
- error_free(local_err);
- error_setg(errp,
- "the requested suspend mode is not supported by the guest");
- } else {
- error_propagate(errp, local_err);
- }
-}
-
-void qmp_guest_suspend_disk(Error **errp)
-{
- guest_suspend(SUSPEND_MODE_DISK, errp);
-}
-
-void qmp_guest_suspend_ram(Error **errp)
-{
- guest_suspend(SUSPEND_MODE_RAM, errp);
-}
-
-void qmp_guest_suspend_hybrid(Error **errp)
-{
- guest_suspend(SUSPEND_MODE_HYBRID, errp);
-}
-
-/* Transfer online/offline status between @vcpu and the guest system.
- *
- * On input either @errp or *@errp must be NULL.
- *
- * In system-to-@vcpu direction, the following @vcpu fields are accessed:
- * - R: vcpu->logical_id
- * - W: vcpu->online
- * - W: vcpu->can_offline
- *
- * In @vcpu-to-system direction, the following @vcpu fields are accessed:
- * - R: vcpu->logical_id
- * - R: vcpu->online
- *
- * Written members remain unmodified on error.
- */
-static void transfer_vcpu(GuestLogicalProcessor *vcpu, bool sys2vcpu,
- char *dirpath, Error **errp)
-{
- int fd;
- int res;
- int dirfd;
- static const char fn[] = "online";
-
- dirfd = open(dirpath, O_RDONLY | O_DIRECTORY);
- if (dirfd == -1) {
- error_setg_errno(errp, errno, "open(\"%s\")", dirpath);
- return;
- }
-
- fd = openat(dirfd, fn, sys2vcpu ? O_RDONLY : O_RDWR);
- if (fd == -1) {
- if (errno != ENOENT) {
- error_setg_errno(errp, errno, "open(\"%s/%s\")", dirpath, fn);
- } else if (sys2vcpu) {
- vcpu->online = true;
- vcpu->can_offline = false;
- } else if (!vcpu->online) {
- error_setg(errp, "logical processor #%" PRId64 " can't be "
- "offlined", vcpu->logical_id);
- } /* otherwise pretend successful re-onlining */
- } else {
- unsigned char status;
-
- res = pread(fd, &status, 1, 0);
- if (res == -1) {
- error_setg_errno(errp, errno, "pread(\"%s/%s\")", dirpath, fn);
- } else if (res == 0) {
- error_setg(errp, "pread(\"%s/%s\"): unexpected EOF", dirpath,
- fn);
- } else if (sys2vcpu) {
- vcpu->online = (status != '0');
- vcpu->can_offline = true;
- } else if (vcpu->online != (status != '0')) {
- status = '0' + vcpu->online;
- if (pwrite(fd, &status, 1, 0) == -1) {
- error_setg_errno(errp, errno, "pwrite(\"%s/%s\")", dirpath,
- fn);
- }
- } /* otherwise pretend successful re-(on|off)-lining */
-
- res = close(fd);
- g_assert(res == 0);
- }
-
- res = close(dirfd);
- g_assert(res == 0);
-}
-
-GuestLogicalProcessorList *qmp_guest_get_vcpus(Error **errp)
-{
- GuestLogicalProcessorList *head, **tail;
- const char *cpu_dir = "/sys/devices/system/cpu";
- const gchar *line;
- g_autoptr(GDir) cpu_gdir = NULL;
- Error *local_err = NULL;
-
- head = NULL;
- tail = &head;
- cpu_gdir = g_dir_open(cpu_dir, 0, NULL);
-
- if (cpu_gdir == NULL) {
- error_setg_errno(errp, errno, "failed to list entries: %s", cpu_dir);
- return NULL;
- }
-
- while (local_err == NULL && (line = g_dir_read_name(cpu_gdir)) != NULL) {
- GuestLogicalProcessor *vcpu;
- int64_t id;
- if (sscanf(line, "cpu%" PRId64, &id)) {
- g_autofree char *path = g_strdup_printf("/sys/devices/system/cpu/"
- "cpu%" PRId64 "/", id);
- vcpu = g_malloc0(sizeof *vcpu);
- vcpu->logical_id = id;
- vcpu->has_can_offline = true; /* lolspeak ftw */
- transfer_vcpu(vcpu, true, path, &local_err);
- QAPI_LIST_APPEND(tail, vcpu);
- }
- }
-
- if (local_err == NULL) {
- /* there's no guest with zero VCPUs */
- g_assert(head != NULL);
- return head;
- }
-
- qapi_free_GuestLogicalProcessorList(head);
- error_propagate(errp, local_err);
- return NULL;
-}
-
-int64_t qmp_guest_set_vcpus(GuestLogicalProcessorList *vcpus, Error **errp)
-{
- int64_t processed;
- Error *local_err = NULL;
-
- processed = 0;
- while (vcpus != NULL) {
- char *path = g_strdup_printf("/sys/devices/system/cpu/cpu%" PRId64 "/",
- vcpus->value->logical_id);
-
- transfer_vcpu(vcpus->value, false, path, &local_err);
- g_free(path);
- if (local_err != NULL) {
- break;
- }
- ++processed;
- vcpus = vcpus->next;
- }
-
- if (local_err != NULL) {
- if (processed == 0) {
- error_propagate(errp, local_err);
- } else {
- error_free(local_err);
- }
- }
-
- return processed;
-}
-#endif /* __linux__ */
-
#if defined(__linux__) || defined(__FreeBSD__)
void qmp_guest_set_user_password(const char *username,
const char *password,
@@ -2190,574 +878,8 @@ void qmp_guest_set_user_password(const char *username,
return;
}
}
-#else /* __linux__ || __FreeBSD__ */
-void qmp_guest_set_user_password(const char *username,
- const char *password,
- bool crypted,
- Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
-}
#endif /* __linux__ || __FreeBSD__ */
-#ifdef __linux__
-static void ga_read_sysfs_file(int dirfd, const char *pathname, char *buf,
- int size, Error **errp)
-{
- int fd;
- int res;
-
- errno = 0;
- fd = openat(dirfd, pathname, O_RDONLY);
- if (fd == -1) {
- error_setg_errno(errp, errno, "open sysfs file \"%s\"", pathname);
- return;
- }
-
- res = pread(fd, buf, size, 0);
- if (res == -1) {
- error_setg_errno(errp, errno, "pread sysfs file \"%s\"", pathname);
- } else if (res == 0) {
- error_setg(errp, "pread sysfs file \"%s\": unexpected EOF", pathname);
- }
- close(fd);
-}
-
-static void ga_write_sysfs_file(int dirfd, const char *pathname,
- const char *buf, int size, Error **errp)
-{
- int fd;
-
- errno = 0;
- fd = openat(dirfd, pathname, O_WRONLY);
- if (fd == -1) {
- error_setg_errno(errp, errno, "open sysfs file \"%s\"", pathname);
- return;
- }
-
- if (pwrite(fd, buf, size, 0) == -1) {
- error_setg_errno(errp, errno, "pwrite sysfs file \"%s\"", pathname);
- }
-
- close(fd);
-}
-
-/* Transfer online/offline status between @mem_blk and the guest system.
- *
- * On input either @errp or *@errp must be NULL.
- *
- * In system-to-@mem_blk direction, the following @mem_blk fields are accessed:
- * - R: mem_blk->phys_index
- * - W: mem_blk->online
- * - W: mem_blk->can_offline
- *
- * In @mem_blk-to-system direction, the following @mem_blk fields are accessed:
- * - R: mem_blk->phys_index
- * - R: mem_blk->online
- *- R: mem_blk->can_offline
- * Written members remain unmodified on error.
- */
-static void transfer_memory_block(GuestMemoryBlock *mem_blk, bool sys2memblk,
- GuestMemoryBlockResponse *result,
- Error **errp)
-{
- char *dirpath;
- int dirfd;
- char *status;
- Error *local_err = NULL;
-
- if (!sys2memblk) {
- DIR *dp;
-
- if (!result) {
- error_setg(errp, "Internal error, 'result' should not be NULL");
- return;
- }
- errno = 0;
- dp = opendir("/sys/devices/system/memory/");
- /* if there is no 'memory' directory in sysfs,
- * we think this VM does not support online/offline memory block,
- * any other solution?
- */
- if (!dp) {
- if (errno == ENOENT) {
- result->response =
- GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_NOT_SUPPORTED;
- }
- goto out1;
- }
- closedir(dp);
- }
-
- dirpath = g_strdup_printf("/sys/devices/system/memory/memory%" PRId64 "/",
- mem_blk->phys_index);
- dirfd = open(dirpath, O_RDONLY | O_DIRECTORY);
- if (dirfd == -1) {
- if (sys2memblk) {
- error_setg_errno(errp, errno, "open(\"%s\")", dirpath);
- } else {
- if (errno == ENOENT) {
- result->response = GUEST_MEMORY_BLOCK_RESPONSE_TYPE_NOT_FOUND;
- } else {
- result->response =
- GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_FAILED;
- }
- }
- g_free(dirpath);
- goto out1;
- }
- g_free(dirpath);
-
- status = g_malloc0(10);
- ga_read_sysfs_file(dirfd, "state", status, 10, &local_err);
- if (local_err) {
- /* treat with sysfs file that not exist in old kernel */
- if (errno == ENOENT) {
- error_free(local_err);
- if (sys2memblk) {
- mem_blk->online = true;
- mem_blk->can_offline = false;
- } else if (!mem_blk->online) {
- result->response =
- GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_NOT_SUPPORTED;
- }
- } else {
- if (sys2memblk) {
- error_propagate(errp, local_err);
- } else {
- error_free(local_err);
- result->response =
- GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_FAILED;
- }
- }
- goto out2;
- }
-
- if (sys2memblk) {
- char removable = '0';
-
- mem_blk->online = (strncmp(status, "online", 6) == 0);
-
- ga_read_sysfs_file(dirfd, "removable", &removable, 1, &local_err);
- if (local_err) {
- /* if no 'removable' file, it doesn't support offline mem blk */
- if (errno == ENOENT) {
- error_free(local_err);
- mem_blk->can_offline = false;
- } else {
- error_propagate(errp, local_err);
- }
- } else {
- mem_blk->can_offline = (removable != '0');
- }
- } else {
- if (mem_blk->online != (strncmp(status, "online", 6) == 0)) {
- const char *new_state = mem_blk->online ? "online" : "offline";
-
- ga_write_sysfs_file(dirfd, "state", new_state, strlen(new_state),
- &local_err);
- if (local_err) {
- error_free(local_err);
- result->response =
- GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_FAILED;
- goto out2;
- }
-
- result->response = GUEST_MEMORY_BLOCK_RESPONSE_TYPE_SUCCESS;
- result->has_error_code = false;
- } /* otherwise pretend successful re-(on|off)-lining */
- }
- g_free(status);
- close(dirfd);
- return;
-
-out2:
- g_free(status);
- close(dirfd);
-out1:
- if (!sys2memblk) {
- result->has_error_code = true;
- result->error_code = errno;
- }
-}
-
-GuestMemoryBlockList *qmp_guest_get_memory_blocks(Error **errp)
-{
- GuestMemoryBlockList *head, **tail;
- Error *local_err = NULL;
- struct dirent *de;
- DIR *dp;
-
- head = NULL;
- tail = &head;
-
- dp = opendir("/sys/devices/system/memory/");
- if (!dp) {
- /* it's ok if this happens to be a system that doesn't expose
- * memory blocks via sysfs, but otherwise we should report
- * an error
- */
- if (errno != ENOENT) {
- error_setg_errno(errp, errno, "Can't open directory"
- "\"/sys/devices/system/memory/\"");
- }
- return NULL;
- }
-
- /* Note: the phys_index of memory block may be discontinuous,
- * this is because a memblk is the unit of the Sparse Memory design, which
- * allows discontinuous memory ranges (ex. NUMA), so here we should
- * traverse the memory block directory.
- */
- while ((de = readdir(dp)) != NULL) {
- GuestMemoryBlock *mem_blk;
-
- if ((strncmp(de->d_name, "memory", 6) != 0) ||
- !(de->d_type & DT_DIR)) {
- continue;
- }
-
- mem_blk = g_malloc0(sizeof *mem_blk);
- /* The d_name is "memoryXXX", phys_index is block id, same as XXX */
- mem_blk->phys_index = strtoul(&de->d_name[6], NULL, 10);
- mem_blk->has_can_offline = true; /* lolspeak ftw */
- transfer_memory_block(mem_blk, true, NULL, &local_err);
- if (local_err) {
- break;
- }
-
- QAPI_LIST_APPEND(tail, mem_blk);
- }
-
- closedir(dp);
- if (local_err == NULL) {
- /* there's no guest with zero memory blocks */
- if (head == NULL) {
- error_setg(errp, "guest reported zero memory blocks!");
- }
- return head;
- }
-
- qapi_free_GuestMemoryBlockList(head);
- error_propagate(errp, local_err);
- return NULL;
-}
-
-GuestMemoryBlockResponseList *
-qmp_guest_set_memory_blocks(GuestMemoryBlockList *mem_blks, Error **errp)
-{
- GuestMemoryBlockResponseList *head, **tail;
- Error *local_err = NULL;
-
- head = NULL;
- tail = &head;
-
- while (mem_blks != NULL) {
- GuestMemoryBlockResponse *result;
- GuestMemoryBlock *current_mem_blk = mem_blks->value;
-
- result = g_malloc0(sizeof(*result));
- result->phys_index = current_mem_blk->phys_index;
- transfer_memory_block(current_mem_blk, false, result, &local_err);
- if (local_err) { /* should never happen */
- goto err;
- }
-
- QAPI_LIST_APPEND(tail, result);
- mem_blks = mem_blks->next;
- }
-
- return head;
-err:
- qapi_free_GuestMemoryBlockResponseList(head);
- error_propagate(errp, local_err);
- return NULL;
-}
-
-GuestMemoryBlockInfo *qmp_guest_get_memory_block_info(Error **errp)
-{
- Error *local_err = NULL;
- char *dirpath;
- int dirfd;
- char *buf;
- GuestMemoryBlockInfo *info;
-
- dirpath = g_strdup_printf("/sys/devices/system/memory/");
- dirfd = open(dirpath, O_RDONLY | O_DIRECTORY);
- if (dirfd == -1) {
- error_setg_errno(errp, errno, "open(\"%s\")", dirpath);
- g_free(dirpath);
- return NULL;
- }
- g_free(dirpath);
-
- buf = g_malloc0(20);
- ga_read_sysfs_file(dirfd, "block_size_bytes", buf, 20, &local_err);
- close(dirfd);
- if (local_err) {
- g_free(buf);
- error_propagate(errp, local_err);
- return NULL;
- }
-
- info = g_new0(GuestMemoryBlockInfo, 1);
- info->size = strtol(buf, NULL, 16); /* the unit is bytes */
-
- g_free(buf);
-
- return info;
-}
-
-#define MAX_NAME_LEN 128
-static GuestDiskStatsInfoList *guest_get_diskstats(Error **errp)
-{
-#ifdef CONFIG_LINUX
- GuestDiskStatsInfoList *head = NULL, **tail = &head;
- const char *diskstats = "/proc/diskstats";
- FILE *fp;
- size_t n;
- char *line = NULL;
-
- fp = fopen(diskstats, "r");
- if (fp == NULL) {
- error_setg_errno(errp, errno, "open(\"%s\")", diskstats);
- return NULL;
- }
-
- while (getline(&line, &n, fp) != -1) {
- g_autofree GuestDiskStatsInfo *diskstatinfo = NULL;
- g_autofree GuestDiskStats *diskstat = NULL;
- char dev_name[MAX_NAME_LEN];
- unsigned int ios_pgr, tot_ticks, rq_ticks, wr_ticks, dc_ticks, fl_ticks;
- unsigned long rd_ios, rd_merges_or_rd_sec, rd_ticks_or_wr_sec, wr_ios;
- unsigned long wr_merges, rd_sec_or_wr_ios, wr_sec;
- unsigned long dc_ios, dc_merges, dc_sec, fl_ios;
- unsigned int major, minor;
- int i;
-
- i = sscanf(line, "%u %u %s %lu %lu %lu"
- "%lu %lu %lu %lu %u %u %u %u"
- "%lu %lu %lu %u %lu %u",
- &major, &minor, dev_name,
- &rd_ios, &rd_merges_or_rd_sec, &rd_sec_or_wr_ios,
- &rd_ticks_or_wr_sec, &wr_ios, &wr_merges, &wr_sec,
- &wr_ticks, &ios_pgr, &tot_ticks, &rq_ticks,
- &dc_ios, &dc_merges, &dc_sec, &dc_ticks,
- &fl_ios, &fl_ticks);
-
- if (i < 7) {
- continue;
- }
-
- diskstatinfo = g_new0(GuestDiskStatsInfo, 1);
- diskstatinfo->name = g_strdup(dev_name);
- diskstatinfo->major = major;
- diskstatinfo->minor = minor;
-
- diskstat = g_new0(GuestDiskStats, 1);
- if (i == 7) {
- diskstat->has_read_ios = true;
- diskstat->read_ios = rd_ios;
- diskstat->has_read_sectors = true;
- diskstat->read_sectors = rd_merges_or_rd_sec;
- diskstat->has_write_ios = true;
- diskstat->write_ios = rd_sec_or_wr_ios;
- diskstat->has_write_sectors = true;
- diskstat->write_sectors = rd_ticks_or_wr_sec;
- }
- if (i >= 14) {
- diskstat->has_read_ios = true;
- diskstat->read_ios = rd_ios;
- diskstat->has_read_sectors = true;
- diskstat->read_sectors = rd_sec_or_wr_ios;
- diskstat->has_read_merges = true;
- diskstat->read_merges = rd_merges_or_rd_sec;
- diskstat->has_read_ticks = true;
- diskstat->read_ticks = rd_ticks_or_wr_sec;
- diskstat->has_write_ios = true;
- diskstat->write_ios = wr_ios;
- diskstat->has_write_sectors = true;
- diskstat->write_sectors = wr_sec;
- diskstat->has_write_merges = true;
- diskstat->write_merges = wr_merges;
- diskstat->has_write_ticks = true;
- diskstat->write_ticks = wr_ticks;
- diskstat->has_ios_pgr = true;
- diskstat->ios_pgr = ios_pgr;
- diskstat->has_total_ticks = true;
- diskstat->total_ticks = tot_ticks;
- diskstat->has_weight_ticks = true;
- diskstat->weight_ticks = rq_ticks;
- }
- if (i >= 18) {
- diskstat->has_discard_ios = true;
- diskstat->discard_ios = dc_ios;
- diskstat->has_discard_merges = true;
- diskstat->discard_merges = dc_merges;
- diskstat->has_discard_sectors = true;
- diskstat->discard_sectors = dc_sec;
- diskstat->has_discard_ticks = true;
- diskstat->discard_ticks = dc_ticks;
- }
- if (i >= 20) {
- diskstat->has_flush_ios = true;
- diskstat->flush_ios = fl_ios;
- diskstat->has_flush_ticks = true;
- diskstat->flush_ticks = fl_ticks;
- }
-
- diskstatinfo->stats = g_steal_pointer(&diskstat);
- QAPI_LIST_APPEND(tail, diskstatinfo);
- diskstatinfo = NULL;
- }
- free(line);
- fclose(fp);
- return head;
-#else
- g_debug("disk stats reporting available only for Linux");
- return NULL;
-#endif
-}
-
-GuestDiskStatsInfoList *qmp_guest_get_diskstats(Error **errp)
-{
- return guest_get_diskstats(errp);
-}
-
-GuestCpuStatsList *qmp_guest_get_cpustats(Error **errp)
-{
- GuestCpuStatsList *head = NULL, **tail = &head;
- const char *cpustats = "/proc/stat";
- int clk_tck = sysconf(_SC_CLK_TCK);
- FILE *fp;
- size_t n;
- char *line = NULL;
-
- fp = fopen(cpustats, "r");
- if (fp == NULL) {
- error_setg_errno(errp, errno, "open(\"%s\")", cpustats);
- return NULL;
- }
-
- while (getline(&line, &n, fp) != -1) {
- GuestCpuStats *cpustat = NULL;
- GuestLinuxCpuStats *linuxcpustat;
- int i;
- unsigned long user, system, idle, iowait, irq, softirq, steal, guest;
- unsigned long nice, guest_nice;
- char name[64];
-
- i = sscanf(line, "%s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
- name, &user, &nice, &system, &idle, &iowait, &irq, &softirq,
- &steal, &guest, &guest_nice);
-
- /* drop "cpu 1 2 3 ...", get "cpuX 1 2 3 ..." only */
- if ((i == EOF) || strncmp(name, "cpu", 3) || (name[3] == '\0')) {
- continue;
- }
-
- if (i < 5) {
- slog("Parsing cpu stat from %s failed, see \"man proc\"", cpustats);
- break;
- }
-
- cpustat = g_new0(GuestCpuStats, 1);
- cpustat->type = GUEST_CPU_STATS_TYPE_LINUX;
-
- linuxcpustat = &cpustat->u.q_linux;
- linuxcpustat->cpu = atoi(&name[3]);
- linuxcpustat->user = user * 1000 / clk_tck;
- linuxcpustat->nice = nice * 1000 / clk_tck;
- linuxcpustat->system = system * 1000 / clk_tck;
- linuxcpustat->idle = idle * 1000 / clk_tck;
-
- if (i > 5) {
- linuxcpustat->has_iowait = true;
- linuxcpustat->iowait = iowait * 1000 / clk_tck;
- }
-
- if (i > 6) {
- linuxcpustat->has_irq = true;
- linuxcpustat->irq = irq * 1000 / clk_tck;
- linuxcpustat->has_softirq = true;
- linuxcpustat->softirq = softirq * 1000 / clk_tck;
- }
-
- if (i > 8) {
- linuxcpustat->has_steal = true;
- linuxcpustat->steal = steal * 1000 / clk_tck;
- }
-
- if (i > 9) {
- linuxcpustat->has_guest = true;
- linuxcpustat->guest = guest * 1000 / clk_tck;
- }
-
- if (i > 10) {
- linuxcpustat->has_guest = true;
- linuxcpustat->guest = guest * 1000 / clk_tck;
- linuxcpustat->has_guestnice = true;
- linuxcpustat->guestnice = guest_nice * 1000 / clk_tck;
- }
-
- QAPI_LIST_APPEND(tail, cpustat);
- }
-
- free(line);
- fclose(fp);
- return head;
-}
-
-#else /* defined(__linux__) */
-
-void qmp_guest_suspend_disk(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
-}
-
-void qmp_guest_suspend_ram(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
-}
-
-void qmp_guest_suspend_hybrid(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
-}
-
-GuestLogicalProcessorList *qmp_guest_get_vcpus(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-int64_t qmp_guest_set_vcpus(GuestLogicalProcessorList *vcpus, Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return -1;
-}
-
-GuestMemoryBlockList *qmp_guest_get_memory_blocks(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-GuestMemoryBlockResponseList *
-qmp_guest_set_memory_blocks(GuestMemoryBlockList *mem_blks, Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-GuestMemoryBlockInfo *qmp_guest_get_memory_block_info(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-#endif
-
#ifdef HAVE_GETIFADDRS
static GuestNetworkInterface *
guest_find_interface(GuestNetworkInterfaceList *head,
@@ -3013,131 +1135,8 @@ error:
return NULL;
}
-#else
-
-GuestNetworkInterfaceList *qmp_guest_network_get_interfaces(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
#endif /* HAVE_GETIFADDRS */
-#if !defined(CONFIG_FSFREEZE)
-
-GuestFilesystemInfoList *qmp_guest_get_fsinfo(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-GuestFsfreezeStatus qmp_guest_fsfreeze_status(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
-
- return 0;
-}
-
-int64_t qmp_guest_fsfreeze_freeze(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
-
- return 0;
-}
-
-int64_t qmp_guest_fsfreeze_freeze_list(bool has_mountpoints,
- strList *mountpoints,
- Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
-
- return 0;
-}
-
-int64_t qmp_guest_fsfreeze_thaw(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
-
- return 0;
-}
-
-GuestDiskInfoList *qmp_guest_get_disks(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-GuestDiskStatsInfoList *qmp_guest_get_diskstats(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-GuestCpuStatsList *qmp_guest_get_cpustats(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-#endif /* CONFIG_FSFREEZE */
-
-#if !defined(CONFIG_FSTRIM)
-GuestFilesystemTrimResponse *
-qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-#endif
-
-/* add unsupported commands to the list of blocked RPCs */
-GList *ga_command_init_blockedrpcs(GList *blockedrpcs)
-{
-#if !defined(__linux__)
- {
- const char *list[] = {
- "guest-suspend-disk", "guest-suspend-ram",
- "guest-suspend-hybrid", "guest-get-vcpus", "guest-set-vcpus",
- "guest-get-memory-blocks", "guest-set-memory-blocks",
- "guest-get-memory-block-size", "guest-get-memory-block-info",
- NULL};
- char **p = (char **)list;
-
- while (*p) {
- blockedrpcs = g_list_append(blockedrpcs, g_strdup(*p++));
- }
- }
-#endif
-
-#if !defined(HAVE_GETIFADDRS)
- blockedrpcs = g_list_append(blockedrpcs,
- g_strdup("guest-network-get-interfaces"));
-#endif
-
-#if !defined(CONFIG_FSFREEZE)
- {
- const char *list[] = {
- "guest-get-fsinfo", "guest-fsfreeze-status",
- "guest-fsfreeze-freeze", "guest-fsfreeze-freeze-list",
- "guest-fsfreeze-thaw", "guest-get-fsinfo",
- "guest-get-disks", NULL};
- char **p = (char **)list;
-
- while (*p) {
- blockedrpcs = g_list_append(blockedrpcs, g_strdup(*p++));
- }
- }
-#endif
-
-#if !defined(CONFIG_FSTRIM)
- blockedrpcs = g_list_append(blockedrpcs, g_strdup("guest-fstrim"));
-#endif
-
- blockedrpcs = g_list_append(blockedrpcs, g_strdup("guest-get-devices"));
-
- return blockedrpcs;
-}
-
/* register init/cleanup routines for stateful command groups */
void ga_command_state_init(GAState *s, GACommandState *cs)
{
@@ -3200,15 +1199,7 @@ GuestUserList *qmp_guest_get_users(Error **errp)
return head;
}
-#else
-
-GuestUserList *qmp_guest_get_users(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-#endif
+#endif /* HAVE_UTMPX */
/* Replace escaped special characters with their real values. The replacement
* is done in place -- returned value is in the original string.
@@ -3345,13 +1336,6 @@ GuestOSInfo *qmp_guest_get_osinfo(Error **errp)
return info;
}
-GuestDeviceInfoList *qmp_guest_get_devices(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
-
- return NULL;
-}
-
#ifndef HOST_NAME_MAX
# ifdef _POSIX_HOST_NAME_MAX
# define HOST_NAME_MAX _POSIX_HOST_NAME_MAX
@@ -3386,3 +1370,23 @@ char *qga_get_host_name(Error **errp)
return g_steal_pointer(&hostname);
}
+
+#ifdef CONFIG_GETLOADAVG
+GuestLoadAverage *qmp_guest_get_load(Error **errp)
+{
+ double loadavg[3];
+ GuestLoadAverage *ret = NULL;
+
+ if (getloadavg(loadavg, G_N_ELEMENTS(loadavg)) < 0) {
+ error_setg_errno(errp, errno,
+ "cannot query load average");
+ return NULL;
+ }
+
+ ret = g_new0(GuestLoadAverage, 1);
+ ret->load1m = loadavg[0];
+ ret->load5m = loadavg[1];
+ ret->load15m = loadavg[2];
+ return ret;
+}
+#endif
diff --git a/qga/commands-win32.c b/qga/commands-win32.c
index 0d1b836..8227480 100644
--- a/qga/commands-win32.c
+++ b/qga/commands-win32.c
@@ -27,6 +27,7 @@
#include <lm.h>
#include <wtsapi32.h>
#include <wininet.h>
+#include <pdh.h>
#include "guest-agent-core.h"
#include "vss-win32.h"
@@ -119,6 +120,28 @@ static OpenFlags guest_file_open_modes[] = {
{"a+b", FILE_GENERIC_APPEND | GENERIC_READ, OPEN_ALWAYS }
};
+/*
+ * We use an exponentially weighted moving average, just like Unix systems do
+ * https://en.wikipedia.org/wiki/Load_(computing)#Unix-style_load_calculation
+ *
+ * These constants serve as the damping factor and are calculated with
+ * 1 / exp(sampling interval in seconds / window size in seconds)
+ *
+ * This formula comes from linux's include/linux/sched/loadavg.h
+ * https://github.com/torvalds/linux/blob/345671ea0f9258f410eb057b9ced9cefbbe5dc78/include/linux/sched/loadavg.h#L20-L23
+ */
+#define LOADAVG_FACTOR_1F 0.9200444146293232478931553241
+#define LOADAVG_FACTOR_5F 0.9834714538216174894737477501
+#define LOADAVG_FACTOR_15F 0.9944598480048967508795473394
+/*
+ * The time interval in seconds between taking load counts, same as Linux
+ */
+#define LOADAVG_SAMPLING_INTERVAL 5
+
+double load_avg_1m;
+double load_avg_5m;
+double load_avg_15m;
+
#define debug_error(msg) do { \
char *suffix = g_win32_error_message(GetLastError()); \
g_debug("%s: %s", (msg), suffix); \
@@ -826,8 +849,6 @@ static void get_disk_properties(HANDLE vol_h, GuestDiskAddress *disk,
}
out_free:
g_free(dev_desc);
-
- return;
}
static void get_single_disk_info(int disk_number,
@@ -891,7 +912,6 @@ static void get_single_disk_info(int disk_number,
err_close:
CloseHandle(disk_h);
- return;
}
/* VSS provider works with volumes, thus there is no difference if
@@ -1203,7 +1223,7 @@ GuestFilesystemInfoList *qmp_guest_get_fsinfo(Error **errp)
GuestFsfreezeStatus qmp_guest_fsfreeze_status(Error **errp)
{
if (!vss_initialized()) {
- error_setg(errp, QERR_UNSUPPORTED);
+ error_setg(errp, "fsfreeze not possible as VSS failed to initialize");
return 0;
}
@@ -1231,7 +1251,7 @@ int64_t qmp_guest_fsfreeze_freeze_list(bool has_mountpoints,
Error *local_err = NULL;
if (!vss_initialized()) {
- error_setg(errp, QERR_UNSUPPORTED);
+ error_setg(errp, "fsfreeze not possible as VSS failed to initialize");
return 0;
}
@@ -1266,13 +1286,16 @@ int64_t qmp_guest_fsfreeze_thaw(Error **errp)
int i;
if (!vss_initialized()) {
- error_setg(errp, QERR_UNSUPPORTED);
+ error_setg(errp, "fsfreeze not possible as VSS failed to initialize");
return 0;
}
qga_vss_fsfreeze(&i, false, NULL, errp);
ga_unset_frozen(ga_state);
+
+ slog("guest-fsthaw called");
+
return i;
}
@@ -1494,11 +1517,6 @@ out:
}
}
-void qmp_guest_suspend_hybrid(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
-}
-
static IP_ADAPTER_ADDRESSES *guest_get_adapters_addresses(Error **errp)
{
IP_ADAPTER_ADDRESSES *adptr_addrs = NULL;
@@ -1862,12 +1880,6 @@ GuestLogicalProcessorList *qmp_guest_get_vcpus(Error **errp)
return NULL;
}
-int64_t qmp_guest_set_vcpus(GuestLogicalProcessorList *vcpus, Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return -1;
-}
-
static gchar *
get_net_error_message(gint error)
{
@@ -1925,7 +1937,7 @@ void qmp_guest_set_user_password(const char *username,
GError *gerr = NULL;
if (crypted) {
- error_setg(errp, QERR_UNSUPPORTED);
+ error_setg(errp, "'crypted' must be off on this host");
return;
}
@@ -1969,55 +1981,6 @@ done:
g_free(rawpasswddata);
}
-GuestMemoryBlockList *qmp_guest_get_memory_blocks(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-GuestMemoryBlockResponseList *
-qmp_guest_set_memory_blocks(GuestMemoryBlockList *mem_blks, Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-GuestMemoryBlockInfo *qmp_guest_get_memory_block_info(Error **errp)
-{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
-}
-
-/* add unsupported commands to the list of blocked RPCs */
-GList *ga_command_init_blockedrpcs(GList *blockedrpcs)
-{
- const char *list_unsupported[] = {
- "guest-suspend-hybrid",
- "guest-set-vcpus",
- "guest-get-memory-blocks", "guest-set-memory-blocks",
- "guest-get-memory-block-size", "guest-get-memory-block-info",
- NULL};
- char **p = (char **)list_unsupported;
-
- while (*p) {
- blockedrpcs = g_list_append(blockedrpcs, g_strdup(*p++));
- }
-
- if (!vss_init(true)) {
- g_debug("vss_init failed, vss commands are going to be disabled");
- const char *list[] = {
- "guest-get-fsinfo", "guest-fsfreeze-status",
- "guest-fsfreeze-freeze", "guest-fsfreeze-thaw", NULL};
- p = (char **)list;
-
- while (*p) {
- blockedrpcs = g_list_append(blockedrpcs, g_strdup(*p++));
- }
- }
-
- return blockedrpcs;
-}
-
/* register init/cleanup routines for stateful command groups */
void ga_command_state_init(GAState *s, GACommandState *cs)
{
@@ -2148,7 +2111,7 @@ static const ga_win_10_0_t WIN_10_0_SERVER_VERSION_MATRIX[] = {
{14393, "Microsoft Windows Server 2016", "2016"},
{17763, "Microsoft Windows Server 2019", "2019"},
{20344, "Microsoft Windows Server 2022", "2022"},
- {26040, "MIcrosoft Windows Server 2025", "2025"},
+ {26040, "Microsoft Windows Server 2025", "2025"},
{ }
};
@@ -2174,7 +2137,6 @@ static void ga_get_win_version(RTL_OSVERSIONINFOEXW *info, Error **errp)
rtl_get_version_t rtl_get_version = (rtl_get_version_t)fun;
rtl_get_version(info);
- return;
}
static char *ga_get_win_name(const OSVERSIONINFOEXW *os_version, bool id)
@@ -2506,14 +2468,127 @@ char *qga_get_host_name(Error **errp)
return g_utf16_to_utf8(tmp, size, NULL, NULL, NULL);
}
-GuestDiskStatsInfoList *qmp_guest_get_diskstats(Error **errp)
+
+static VOID CALLBACK load_avg_callback(PVOID hCounter, BOOLEAN timedOut)
{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
+ PDH_FMT_COUNTERVALUE displayValue;
+ double currentLoad;
+ PDH_STATUS err;
+
+ err = PdhGetFormattedCounterValue(
+ (PDH_HCOUNTER)hCounter, PDH_FMT_DOUBLE, 0, &displayValue);
+ /* Skip updating the load if we can't get the value successfully */
+ if (err != ERROR_SUCCESS) {
+ slog("PdhGetFormattedCounterValue failed to get load value with 0x%lx",
+ err);
+ return;
+ }
+ currentLoad = displayValue.doubleValue;
+
+ load_avg_1m = load_avg_1m * LOADAVG_FACTOR_1F + currentLoad * \
+ (1.0 - LOADAVG_FACTOR_1F);
+ load_avg_5m = load_avg_5m * LOADAVG_FACTOR_5F + currentLoad * \
+ (1.0 - LOADAVG_FACTOR_5F);
+ load_avg_15m = load_avg_15m * LOADAVG_FACTOR_15F + currentLoad * \
+ (1.0 - LOADAVG_FACTOR_15F);
}
-GuestCpuStatsList *qmp_guest_get_cpustats(Error **errp)
+static BOOL init_load_avg_counter(Error **errp)
{
- error_setg(errp, QERR_UNSUPPORTED);
- return NULL;
+ CONST WCHAR *szCounterPath = L"\\System\\Processor Queue Length";
+ PDH_STATUS status;
+ BOOL ret;
+ HQUERY hQuery;
+ HCOUNTER hCounter;
+ HANDLE event;
+ HANDLE waitHandle;
+
+ status = PdhOpenQueryW(NULL, 0, &hQuery);
+ if (status != ERROR_SUCCESS) {
+ /*
+ * If the function fails, the return value is a system error code or
+ * a PDH error code. error_setg_win32 cant translate PDH error code
+ * properly, so just report it as is.
+ */
+ error_setg_win32(errp, (DWORD)status,
+ "PdhOpenQueryW failed with 0x%lx", status);
+ return FALSE;
+ }
+
+ status = PdhAddEnglishCounterW(hQuery, szCounterPath, 0, &hCounter);
+ if (status != ERROR_SUCCESS) {
+ error_setg_win32(errp, (DWORD)status,
+ "PdhAddEnglishCounterW failed with 0x%lx. Performance counters may be disabled.",
+ status);
+ PdhCloseQuery(hQuery);
+ return FALSE;
+ }
+
+ event = CreateEventW(NULL, FALSE, FALSE, L"LoadUpdateEvent");
+ if (event == NULL) {
+ error_setg_win32(errp, GetLastError(), "Create LoadUpdateEvent failed");
+ PdhCloseQuery(hQuery);
+ return FALSE;
+ }
+
+ status = PdhCollectQueryDataEx(hQuery, LOADAVG_SAMPLING_INTERVAL, event);
+ if (status != ERROR_SUCCESS) {
+ error_setg_win32(errp, (DWORD)status,
+ "PdhCollectQueryDataEx failed with 0x%lx", status);
+ CloseHandle(event);
+ PdhCloseQuery(hQuery);
+ return FALSE;
+ }
+
+ ret = RegisterWaitForSingleObject(
+ &waitHandle,
+ event,
+ (WAITORTIMERCALLBACK)load_avg_callback,
+ (PVOID)hCounter,
+ INFINITE,
+ WT_EXECUTEDEFAULT);
+
+ if (ret == 0) {
+ error_setg_win32(errp, GetLastError(),
+ "RegisterWaitForSingleObject failed");
+ CloseHandle(event);
+ PdhCloseQuery(hQuery);
+ return FALSE;
+ }
+
+ ga_set_load_avg_wait_handle(ga_state, waitHandle);
+ ga_set_load_avg_event(ga_state, event);
+ ga_set_load_avg_pdh_query(ga_state, hQuery);
+
+ return TRUE;
+}
+
+GuestLoadAverage *qmp_guest_get_load(Error **errp)
+{
+ /*
+ * The load average logic calls PerformaceCounterAPI, which can result
+ * in a performance penalty. This avoids running the load average logic
+ * until a management application actually requests it. The load average
+ * will not initially be very accurate, but assuming that any interested
+ * management application will request it repeatedly throughout the lifetime
+ * of the VM, this seems like a good mitigation.
+ */
+ if (ga_get_load_avg_pdh_query(ga_state) == NULL) {
+ /* set initial values */
+ load_avg_1m = 0;
+ load_avg_5m = 0;
+ load_avg_15m = 0;
+
+ if (init_load_avg_counter(errp) == false) {
+ return NULL;
+ }
+ }
+
+ GuestLoadAverage *ret = NULL;
+
+ ret = g_new0(GuestLoadAverage, 1);
+ ret->load1m = load_avg_1m;
+ ret->load5m = load_avg_5m;
+ ret->load15m = load_avg_15m;
+ return ret;
}
diff --git a/qga/commands-windows-ssh.c b/qga/commands-windows-ssh.c
index 6a642e3..df45c17 100644
--- a/qga/commands-windows-ssh.c
+++ b/qga/commands-windows-ssh.c
@@ -377,7 +377,7 @@ error:
static bool set_file_permissions(PWindowsUserInfo userInfo, Error **errp)
{
PACL pACL = NULL;
- PSID userPSID;
+ PSID userPSID = NULL;
/* Creates the access control structure */
if (!create_acl(userInfo, &pACL, errp)) {
diff --git a/qga/guest-agent-core.h b/qga/guest-agent-core.h
index b4e7c52..d9f3922 100644
--- a/qga/guest-agent-core.h
+++ b/qga/guest-agent-core.h
@@ -13,7 +13,11 @@
#ifndef GUEST_AGENT_CORE_H
#define GUEST_AGENT_CORE_H
-#include "qapi/qmp/dispatch.h"
+#ifdef _WIN32
+#include <pdh.h>
+#endif
+
+#include "qapi/qmp-registry.h"
#include "qga-qapi-types.h"
#define QGA_READ_COUNT_DEFAULT 4096
@@ -41,6 +45,12 @@ void ga_set_response_delimited(GAState *s);
bool ga_is_frozen(GAState *s);
void ga_set_frozen(GAState *s);
void ga_unset_frozen(GAState *s);
+#ifdef _WIN32
+void ga_set_load_avg_event(GAState *s, HANDLE event);
+void ga_set_load_avg_wait_handle(GAState *s, HANDLE wait_handle);
+void ga_set_load_avg_pdh_query(GAState *s, HQUERY query);
+HQUERY ga_get_load_avg_pdh_query(GAState *s);
+#endif
const char *ga_fsfreeze_hook(GAState *s);
int64_t ga_get_fd_handle(GAState *s, Error **errp);
int ga_parse_whence(GuestFileWhence *whence, Error **errp);
diff --git a/qga/main.c b/qga/main.c
index f4d5f15..6c02f3e 100644
--- a/qga/main.c
+++ b/qga/main.c
@@ -19,9 +19,9 @@
#include <sys/wait.h>
#endif
#include "qemu/help-texts.h"
-#include "qapi/qmp/json-parser.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qjson.h"
+#include "qobject/json-parser.h"
+#include "qobject/qdict.h"
+#include "qobject/qjson.h"
#include "guest-agent-core.h"
#include "qga-qapi-init-commands.h"
#include "qapi/error.h"
@@ -33,6 +33,7 @@
#include "qemu-version.h"
#ifdef _WIN32
#include <dbt.h>
+#include <pdh.h>
#include "qga/service-win32.h"
#include "qga/vss-win32.h"
#endif
@@ -70,6 +71,28 @@ typedef struct GAPersistentState {
typedef struct GAConfig GAConfig;
+struct GAConfig {
+ char *channel_path;
+ char *method;
+ char *log_filepath;
+ char *pid_filepath;
+#ifdef CONFIG_FSFREEZE
+ char *fsfreeze_hook;
+#endif
+ char *state_dir;
+#ifdef _WIN32
+ const char *service;
+#endif
+ gchar *bliststr; /* blockedrpcs may point to this string */
+ gchar *aliststr; /* allowedrpcs may point to this string */
+ GList *blockedrpcs;
+ GList *allowedrpcs;
+ int daemonize;
+ GLogLevelFlags log_level;
+ int dumpconf;
+ bool retry_path;
+};
+
struct GAState {
JSONMessageParser parser;
GMainLoop *main_loop;
@@ -83,6 +106,9 @@ struct GAState {
GAService service;
HANDLE wakeup_event;
HANDLE event_log;
+ HANDLE load_avg_wait_handle;
+ HANDLE load_avg_event;
+ HQUERY load_avg_pdh_query;
#endif
bool delimit_response;
bool frozen;
@@ -226,12 +252,16 @@ static void usage(const char *cmd)
#ifdef CONFIG_FSFREEZE
g_autofree char *fsfreeze_hook = get_relocated_path(QGA_FSFREEZE_HOOK_DEFAULT);
#endif
+ g_autofree char *conf_path = get_relocated_path(QGA_CONF_DEFAULT);
printf(
"Usage: %s [-m <method> -p <path>] [<options>]\n"
"QEMU Guest Agent " QEMU_FULL_VERSION "\n"
QEMU_COPYRIGHT "\n"
"\n"
+" -c, --config=PATH configuration file path (default is\n"
+" %s/qemu-ga.conf\n"
+" unless overridden by the QGA_CONF environment variable)\n"
" -m, --method transport method: one of unix-listen, virtio-serial,\n"
" isa-serial, or vsock-listen (virtio-serial is the default)\n"
" -p, --path device/socket path (the default for virtio-serial is:\n"
@@ -272,8 +302,8 @@ QEMU_COPYRIGHT "\n"
" plug/unplug, etc.)\n"
" -h, --help display this help and exit\n"
"\n"
-QEMU_HELP_BOTTOM "\n"
- , cmd, QGA_VIRTIO_PATH_DEFAULT, QGA_SERIAL_PATH_DEFAULT,
+QEMU_HELP_BOTTOM "\n",
+ cmd, conf_path, QGA_VIRTIO_PATH_DEFAULT, QGA_SERIAL_PATH_DEFAULT,
dfl_pathnames.pidfile,
#ifdef CONFIG_FSFREEZE
fsfreeze_hook,
@@ -397,60 +427,79 @@ static gint ga_strcmp(gconstpointer str1, gconstpointer str2)
return strcmp(str1, str2);
}
-/* disable commands that aren't safe for fsfreeze */
-static void ga_disable_not_allowed_freeze(const QmpCommand *cmd, void *opaque)
+static bool ga_command_is_allowed(const QmpCommand *cmd, GAState *state)
{
- bool allowed = false;
int i = 0;
+ GAConfig *config = state->config;
const char *name = qmp_command_name(cmd);
+ /* Fallback policy is allow everything */
+ bool allowed = true;
- while (ga_freeze_allowlist[i] != NULL) {
- if (strcmp(name, ga_freeze_allowlist[i]) == 0) {
+ if (config->allowedrpcs) {
+ /*
+ * If an allow-list is given, this changes the fallback
+ * policy to deny everything
+ */
+ allowed = false;
+
+ if (g_list_find_custom(config->allowedrpcs, name, ga_strcmp) != NULL) {
allowed = true;
}
- i++;
- }
- if (!allowed) {
- g_debug("disabling command: %s", name);
- qmp_disable_command(&ga_commands, name, "the agent is in frozen state");
}
-}
-
-/* [re-]enable all commands, except those explicitly blocked by user */
-static void ga_enable_non_blocked(const QmpCommand *cmd, void *opaque)
-{
- GAState *s = opaque;
- GList *blockedrpcs = s->blockedrpcs;
- GList *allowedrpcs = s->allowedrpcs;
- const char *name = qmp_command_name(cmd);
- if (g_list_find_custom(blockedrpcs, name, ga_strcmp) == NULL) {
- if (qmp_command_is_enabled(cmd)) {
- return;
+ /*
+ * If both allowedrpcs and blockedrpcs are set, the blocked
+ * list will take priority
+ */
+ if (config->blockedrpcs) {
+ if (g_list_find_custom(config->blockedrpcs, name, ga_strcmp) != NULL) {
+ allowed = false;
}
+ }
- if (allowedrpcs &&
- g_list_find_custom(allowedrpcs, name, ga_strcmp) == NULL) {
- return;
- }
+ /*
+ * If frozen, this filtering must take priority over
+ * absolutely everything
+ */
+ if (state->frozen) {
+ allowed = false;
- g_debug("enabling command: %s", name);
- qmp_enable_command(&ga_commands, name);
+ while (ga_freeze_allowlist[i] != NULL) {
+ if (strcmp(name, ga_freeze_allowlist[i]) == 0) {
+ allowed = true;
+ }
+ i++;
+ }
}
+
+ return allowed;
}
-/* disable commands that aren't allowed */
-static void ga_disable_not_allowed(const QmpCommand *cmd, void *opaque)
+static void ga_apply_command_filters_iter(const QmpCommand *cmd, void *opaque)
{
- GList *allowedrpcs = opaque;
+ GAState *state = opaque;
+ bool want = ga_command_is_allowed(cmd, state);
+ bool have = qmp_command_is_enabled(cmd);
const char *name = qmp_command_name(cmd);
- if (g_list_find_custom(allowedrpcs, name, ga_strcmp) == NULL) {
+ if (want == have) {
+ return;
+ }
+
+ if (have) {
g_debug("disabling command: %s", name);
qmp_disable_command(&ga_commands, name, "the command is not allowed");
+ } else {
+ g_debug("enabling command: %s", name);
+ qmp_enable_command(&ga_commands, name);
}
}
+static void ga_apply_command_filters(GAState *state)
+{
+ qmp_for_each_command(&ga_commands, ga_apply_command_filters_iter, state);
+}
+
static bool ga_create_file(const char *path)
{
int fd = open(path, O_CREAT | O_WRONLY, S_IWUSR | S_IRUSR);
@@ -483,15 +532,14 @@ void ga_set_frozen(GAState *s)
if (ga_is_frozen(s)) {
return;
}
- /* disable all forbidden (for frozen state) commands */
- qmp_for_each_command(&ga_commands, ga_disable_not_allowed_freeze, NULL);
g_warning("disabling logging due to filesystem freeze");
- ga_disable_logging(s);
s->frozen = true;
if (!ga_create_file(s->state_filepath_isfrozen)) {
g_warning("unable to create %s, fsfreeze may not function properly",
s->state_filepath_isfrozen);
}
+ ga_apply_command_filters(s);
+ ga_disable_logging(s);
}
void ga_unset_frozen(GAState *s)
@@ -523,12 +571,12 @@ void ga_unset_frozen(GAState *s)
}
/* enable all disabled, non-blocked and allowed commands */
- qmp_for_each_command(&ga_commands, ga_enable_non_blocked, s);
s->frozen = false;
if (!ga_delete_file(s->state_filepath_isfrozen)) {
g_warning("unable to delete %s, fsfreeze may not function properly",
s->state_filepath_isfrozen);
}
+ ga_apply_command_filters(s);
}
#ifdef CONFIG_FSFREEZE
@@ -538,6 +586,25 @@ const char *ga_fsfreeze_hook(GAState *s)
}
#endif
+#ifdef _WIN32
+void ga_set_load_avg_wait_handle(GAState *s, HANDLE wait_handle)
+{
+ s->load_avg_wait_handle = wait_handle;
+}
+void ga_set_load_avg_event(GAState *s, HANDLE event)
+{
+ s->load_avg_event = event;
+}
+void ga_set_load_avg_pdh_query(GAState *s, HQUERY query)
+{
+ s->load_avg_pdh_query = query;
+}
+HQUERY ga_get_load_avg_pdh_query(GAState *s)
+{
+ return s->load_avg_pdh_query;
+}
+#endif
+
static void become_daemon(const char *pidfile)
{
#ifndef _WIN32
@@ -996,38 +1063,14 @@ static GList *split_list(const gchar *str, const gchar *delim)
return list;
}
-struct GAConfig {
- char *channel_path;
- char *method;
- char *log_filepath;
- char *pid_filepath;
-#ifdef CONFIG_FSFREEZE
- char *fsfreeze_hook;
-#endif
- char *state_dir;
-#ifdef _WIN32
- const char *service;
-#endif
- gchar *bliststr; /* blockedrpcs may point to this string */
- gchar *aliststr; /* allowedrpcs may point to this string */
- GList *blockedrpcs;
- GList *allowedrpcs;
- int daemonize;
- GLogLevelFlags log_level;
- int dumpconf;
- bool retry_path;
-};
-
-static void config_load(GAConfig *config)
+static void config_load(GAConfig *config, const char *confpath, bool required)
{
GError *gerr = NULL;
GKeyFile *keyfile;
- g_autofree char *conf = g_strdup(g_getenv("QGA_CONF")) ?: get_relocated_path(QGA_CONF_DEFAULT);
- const gchar *blockrpcs_key = "block-rpcs";
/* read system config */
keyfile = g_key_file_new();
- if (!g_key_file_load_from_file(keyfile, conf, 0, &gerr)) {
+ if (!g_key_file_load_from_file(keyfile, confpath, 0, &gerr)) {
goto end;
}
if (g_key_file_has_key(keyfile, "general", "daemon", NULL)) {
@@ -1071,9 +1114,9 @@ static void config_load(GAConfig *config)
g_key_file_get_boolean(keyfile, "general", "retry-path", &gerr);
}
- if (g_key_file_has_key(keyfile, "general", blockrpcs_key, NULL)) {
+ if (g_key_file_has_key(keyfile, "general", "block-rpcs", NULL)) {
config->bliststr =
- g_key_file_get_string(keyfile, "general", blockrpcs_key, &gerr);
+ g_key_file_get_string(keyfile, "general", "block-rpcs", &gerr);
config->blockedrpcs = g_list_concat(config->blockedrpcs,
split_list(config->bliststr, ","));
}
@@ -1084,19 +1127,12 @@ static void config_load(GAConfig *config)
split_list(config->aliststr, ","));
}
- if (g_key_file_has_key(keyfile, "general", blockrpcs_key, NULL) &&
- g_key_file_has_key(keyfile, "general", "allow-rpcs", NULL)) {
- g_critical("wrong config, using 'block-rpcs' and 'allow-rpcs' keys at"
- " the same time is not allowed");
- exit(EXIT_FAILURE);
- }
-
end:
g_key_file_free(keyfile);
- if (gerr &&
- !(gerr->domain == G_FILE_ERROR && gerr->code == G_FILE_ERROR_NOENT)) {
+ if (gerr && (required ||
+ !(gerr->domain == G_FILE_ERROR && gerr->code == G_FILE_ERROR_NOENT))) {
g_critical("error loading configuration from path: %s, %s",
- conf, gerr->message);
+ confpath, gerr->message);
exit(EXIT_FAILURE);
}
g_clear_error(&gerr);
@@ -1168,12 +1204,12 @@ static void config_dump(GAConfig *config)
static void config_parse(GAConfig *config, int argc, char **argv)
{
- const char *sopt = "hVvdm:p:l:f:F::b:a:s:t:Dr";
+ const char *sopt = "hVvdc:m:p:l:f:F::b:a:s:t:Dr";
int opt_ind = 0, ch;
- bool block_rpcs = false, allow_rpcs = false;
const struct option lopt[] = {
{ "help", 0, NULL, 'h' },
{ "version", 0, NULL, 'V' },
+ { "config", 1, NULL, 'c' },
{ "dump-conf", 0, NULL, 'D' },
{ "logfile", 1, NULL, 'l' },
{ "pidfile", 1, NULL, 'f' },
@@ -1193,6 +1229,26 @@ static void config_parse(GAConfig *config, int argc, char **argv)
{ "retry-path", 0, NULL, 'r' },
{ NULL, 0, NULL, 0 }
};
+ g_autofree char *confpath = g_strdup(g_getenv("QGA_CONF")) ?:
+ get_relocated_path(QGA_CONF_DEFAULT);
+ bool confrequired = false;
+
+ while ((ch = getopt_long(argc, argv, sopt, lopt, NULL)) != -1) {
+ switch (ch) {
+ case 'c':
+ g_free(confpath);
+ confpath = g_strdup(optarg);
+ confrequired = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ config_load(config, confpath, confrequired);
+
+ /* Reset for second pass */
+ optind = 1;
while ((ch = getopt_long(argc, argv, sopt, lopt, &opt_ind)) != -1) {
switch (ch) {
@@ -1245,7 +1301,6 @@ static void config_parse(GAConfig *config, int argc, char **argv)
}
config->blockedrpcs = g_list_concat(config->blockedrpcs,
split_list(optarg, ","));
- block_rpcs = true;
break;
}
case 'a': {
@@ -1255,7 +1310,6 @@ static void config_parse(GAConfig *config, int argc, char **argv)
}
config->allowedrpcs = g_list_concat(config->allowedrpcs,
split_list(optarg, ","));
- allow_rpcs = true;
break;
}
#ifdef _WIN32
@@ -1296,12 +1350,6 @@ static void config_parse(GAConfig *config, int argc, char **argv)
exit(EXIT_FAILURE);
}
}
-
- if (block_rpcs && allow_rpcs) {
- g_critical("wrong commandline, using --block-rpcs and --allow-rpcs at the"
- " same time is not allowed");
- exit(EXIT_FAILURE);
- }
}
static void config_free(GAConfig *config)
@@ -1377,6 +1425,10 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation)
g_debug("Guest agent version %s started", QEMU_FULL_VERSION);
#ifdef _WIN32
+ s->load_avg_wait_handle = INVALID_HANDLE_VALUE;
+ s->load_avg_event = INVALID_HANDLE_VALUE;
+ s->load_avg_pdh_query = NULL;
+
s->event_log = RegisterEventSource(NULL, "qemu-ga");
if (!s->event_log) {
g_autofree gchar *errmsg = g_win32_error_message(GetLastError());
@@ -1395,24 +1447,23 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation)
" '%s': %s", config->state_dir, strerror(errno));
return NULL;
}
+
+ if (!vss_init(true)) {
+ g_debug("vss_init failed, vss commands will not function");
+ }
#endif
if (ga_is_frozen(s)) {
if (config->daemonize) {
/* delay opening/locking of pidfile till filesystems are unfrozen */
s->deferred_options.pid_filepath = config->pid_filepath;
- become_daemon(NULL);
}
if (config->log_filepath) {
/* delay opening the log file till filesystems are unfrozen */
s->deferred_options.log_filepath = config->log_filepath;
}
ga_disable_logging(s);
- qmp_for_each_command(&ga_commands, ga_disable_not_allowed_freeze, NULL);
} else {
- if (config->daemonize) {
- become_daemon(config->pid_filepath);
- }
if (config->log_filepath) {
FILE *log_file = ga_open_logfile(config->log_filepath);
if (!log_file) {
@@ -1432,25 +1483,6 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation)
return NULL;
}
- if (config->allowedrpcs) {
- qmp_for_each_command(&ga_commands, ga_disable_not_allowed, config->allowedrpcs);
- s->allowedrpcs = config->allowedrpcs;
- }
-
- /*
- * Some commands can be blocked due to system limitation.
- * Initialize blockedrpcs list even if allowedrpcs specified.
- */
- config->blockedrpcs = ga_command_init_blockedrpcs(config->blockedrpcs);
- if (config->blockedrpcs) {
- GList *l = config->blockedrpcs;
- s->blockedrpcs = config->blockedrpcs;
- do {
- g_debug("disabling command: %s", (char *)l->data);
- qmp_disable_command(&ga_commands, l->data, NULL);
- l = g_list_next(l);
- } while (l);
- }
s->command_state = ga_command_state_new();
ga_command_state_init(s, s->command_state);
ga_command_state_init_all(s->command_state);
@@ -1476,6 +1508,22 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation)
}
#endif
+ ga_apply_command_filters(s);
+
+ if (!channel_init(s, s->config->method, s->config->channel_path,
+ s->socket_activation ? FIRST_SOCKET_ACTIVATION_FD : -1)) {
+ g_critical("failed to initialize guest agent channel");
+ return NULL;
+ }
+
+ if (config->daemonize) {
+ if (ga_is_frozen(s)) {
+ become_daemon(NULL);
+ } else {
+ become_daemon(config->pid_filepath);
+ }
+ }
+
ga_state = s;
return s;
}
@@ -1485,6 +1533,18 @@ static void cleanup_agent(GAState *s)
#ifdef _WIN32
CloseHandle(s->wakeup_event);
CloseHandle(s->event_log);
+
+ if (s->load_avg_wait_handle != INVALID_HANDLE_VALUE) {
+ UnregisterWait(s->load_avg_wait_handle);
+ }
+
+ if (s->load_avg_event != INVALID_HANDLE_VALUE) {
+ CloseHandle(s->load_avg_event);
+ }
+
+ if (s->load_avg_pdh_query) {
+ PdhCloseQuery(s->load_avg_pdh_query);
+ }
#endif
if (s->command_state) {
ga_command_state_cleanup_all(s->command_state);
@@ -1502,16 +1562,18 @@ static void cleanup_agent(GAState *s)
static int run_agent_once(GAState *s)
{
- if (!channel_init(s, s->config->method, s->config->channel_path,
- s->socket_activation ? FIRST_SOCKET_ACTIVATION_FD : -1)) {
+ if (!s->channel &&
+ channel_init(s, s->config->method, s->config->channel_path,
+ s->socket_activation ? FIRST_SOCKET_ACTIVATION_FD : -1)) {
g_critical("failed to initialize guest agent channel");
return EXIT_FAILURE;
}
- g_main_loop_run(ga_state->main_loop);
+ g_main_loop_run(s->main_loop);
if (s->channel) {
ga_channel_free(s->channel);
+ s->channel = NULL;
}
return EXIT_SUCCESS;
@@ -1568,7 +1630,7 @@ static void stop_agent(GAState *s, bool requested)
int main(int argc, char **argv)
{
- int ret = EXIT_SUCCESS;
+ int ret = EXIT_FAILURE;
GAState *s;
GAConfig *config = g_new0(GAConfig, 1);
int socket_activation;
@@ -1579,7 +1641,6 @@ int main(int argc, char **argv)
qga_qmp_init_marshal(&ga_commands);
init_dfl_pathnames();
- config_load(config);
config_parse(config, argc, argv);
if (config->pid_filepath == NULL) {
@@ -1597,7 +1658,6 @@ int main(int argc, char **argv)
socket_activation = check_socket_activation();
if (socket_activation > 1) {
g_critical("qemu-ga only supports listening on one socket");
- ret = EXIT_FAILURE;
goto end;
}
if (socket_activation) {
@@ -1621,7 +1681,6 @@ int main(int argc, char **argv)
if (!config->method) {
g_critical("unsupported listen fd type");
- ret = EXIT_FAILURE;
goto end;
}
} else if (config->channel_path == NULL) {
@@ -1633,13 +1692,13 @@ int main(int argc, char **argv)
config->channel_path = g_strdup(QGA_SERIAL_PATH_DEFAULT);
} else {
g_critical("must specify a path for this channel");
- ret = EXIT_FAILURE;
goto end;
}
}
if (config->dumpconf) {
config_dump(config);
+ ret = EXIT_SUCCESS;
goto end;
}
@@ -1654,6 +1713,7 @@ int main(int argc, char **argv)
SERVICE_TABLE_ENTRY service_table[] = {
{ (char *)QGA_SERVICE_NAME, service_main }, { NULL, NULL } };
StartServiceCtrlDispatcher(service_table);
+ ret = EXIT_SUCCESS;
} else {
ret = run_agent(s);
}
diff --git a/qga/meson.build b/qga/meson.build
index 587ec4e..89a4a8f 100644
--- a/qga/meson.build
+++ b/qga/meson.build
@@ -95,7 +95,7 @@ gen_tlb = []
qga_libs = []
if host_os == 'windows'
qga_libs += ['-lws2_32', '-lwinmm', '-lpowrprof', '-lwtsapi32', '-lwininet', '-liphlpapi', '-lnetapi32',
- '-lsetupapi', '-lcfgmgr32', '-luserenv']
+ '-lsetupapi', '-lcfgmgr32', '-luserenv', '-lpdh' ]
if have_qga_vss
qga_libs += ['-lole32', '-loleaut32', '-lshlwapi', '-lstdc++', '-Wl,--enable-stdcall-fixup']
subdir('vss-win32')
diff --git a/qga/qapi-schema.json b/qga/qapi-schema.json
index 1273d85..6d770f7 100644
--- a/qga/qapi-schema.json
+++ b/qga/qapi-schema.json
@@ -2,16 +2,24 @@
# vim: filetype=python
##
-# = General note concerning the use of guest agent interfaces
+# This manual describes the commands supported by the QEMU Guest
+# Agent Protocol.
#
-# "unsupported" is a higher-level error than the errors that
-# individual commands might document. The caller should always be
-# prepared to receive QERR_UNSUPPORTED, even if the given command
-# doesn't specify it, or doesn't document any failure mode at all.
-##
-
-##
-# = QEMU guest agent protocol commands and structs
+# For locating a particular item, please see the `qapi-qga-index`.
+#
+# The following notation is used in examples:
+#
+# .. qmp-example::
+#
+# -> ... text sent by client (commands) ...
+# <- ... text sent by server (command responses and events) ...
+#
+# Example text is formatted for readability. However, in real
+# protocol usage, its commonly emitted as a single line.
+#
+# Please refer to the
+# :doc:`QEMU Machine Protocol Specification </interop/qmp-spec>`
+# for the general format of commands, responses, and events.
##
{ 'pragma': { 'doc-required': true } }
@@ -412,7 +420,8 @@
# Since: 0.15.0
##
{ 'enum': 'GuestFsfreezeStatus',
- 'data': [ 'thawed', 'frozen' ] }
+ 'data': [ 'thawed', 'frozen' ],
+ 'if': { 'any': ['CONFIG_WIN32', 'CONFIG_FSFREEZE'] } }
##
# @guest-fsfreeze-status:
@@ -429,7 +438,8 @@
# Since: 0.15.0
##
{ 'command': 'guest-fsfreeze-status',
- 'returns': 'GuestFsfreezeStatus' }
+ 'returns': 'GuestFsfreezeStatus',
+ 'if': { 'any': ['CONFIG_WIN32', 'CONFIG_FSFREEZE'] } }
##
# @guest-fsfreeze-freeze:
@@ -451,7 +461,8 @@
# Since: 0.15.0
##
{ 'command': 'guest-fsfreeze-freeze',
- 'returns': 'int' }
+ 'returns': 'int',
+ 'if': { 'any': ['CONFIG_WIN32', 'CONFIG_FSFREEZE'] } }
##
# @guest-fsfreeze-freeze-list:
@@ -471,7 +482,8 @@
##
{ 'command': 'guest-fsfreeze-freeze-list',
'data': { '*mountpoints': ['str'] },
- 'returns': 'int' }
+ 'returns': 'int',
+ 'if': { 'any': ['CONFIG_WIN32', 'CONFIG_FSFREEZE'] } }
##
# @guest-fsfreeze-thaw:
@@ -488,7 +500,8 @@
# Since: 0.15.0
##
{ 'command': 'guest-fsfreeze-thaw',
- 'returns': 'int' }
+ 'returns': 'int',
+ 'if': { 'any': ['CONFIG_WIN32', 'CONFIG_FSFREEZE'] } }
##
# @GuestFilesystemTrimResult:
@@ -505,7 +518,8 @@
##
{ 'struct': 'GuestFilesystemTrimResult',
'data': {'path': 'str',
- '*trimmed': 'int', '*minimum': 'int', '*error': 'str'} }
+ '*trimmed': 'int', '*minimum': 'int', '*error': 'str'},
+ 'if': { 'any': ['CONFIG_WIN32', 'CONFIG_FSTRIM'] } }
##
# @GuestFilesystemTrimResponse:
@@ -515,7 +529,8 @@
# Since: 2.4
##
{ 'struct': 'GuestFilesystemTrimResponse',
- 'data': {'paths': ['GuestFilesystemTrimResult']} }
+ 'data': {'paths': ['GuestFilesystemTrimResult']},
+ 'if': { 'any': ['CONFIG_WIN32', 'CONFIG_FSTRIM'] } }
##
# @guest-fstrim:
@@ -537,7 +552,8 @@
##
{ 'command': 'guest-fstrim',
'data': { '*minimum': 'int' },
- 'returns': 'GuestFilesystemTrimResponse' }
+ 'returns': 'GuestFilesystemTrimResponse',
+ 'if': { 'any': ['CONFIG_WIN32', 'CONFIG_FSTRIM'] } }
##
# @guest-suspend-disk:
@@ -566,7 +582,8 @@
#
# Since: 1.1
##
-{ 'command': 'guest-suspend-disk', 'success-response': false }
+{ 'command': 'guest-suspend-disk', 'success-response': false,
+ 'if': { 'any': ['CONFIG_LINUX', 'CONFIG_WIN32'] } }
##
# @guest-suspend-ram:
@@ -602,7 +619,8 @@
#
# Since: 1.1
##
-{ 'command': 'guest-suspend-ram', 'success-response': false }
+{ 'command': 'guest-suspend-ram', 'success-response': false,
+ 'if': { 'any': ['CONFIG_LINUX', 'CONFIG_WIN32'] } }
##
# @guest-suspend-hybrid:
@@ -637,7 +655,8 @@
#
# Since: 1.1
##
-{ 'command': 'guest-suspend-hybrid', 'success-response': false }
+{ 'command': 'guest-suspend-hybrid', 'success-response': false,
+ 'if': 'CONFIG_LINUX' }
##
# @GuestIpAddressType:
@@ -651,7 +670,8 @@
# Since: 1.1
##
{ 'enum': 'GuestIpAddressType',
- 'data': [ 'ipv4', 'ipv6' ] }
+ 'data': [ 'ipv4', 'ipv6' ],
+ 'if': { 'any': ['CONFIG_WIN32', 'HAVE_GETIFADDRS'] } }
##
# @GuestIpAddress:
@@ -667,7 +687,8 @@
{ 'struct': 'GuestIpAddress',
'data': {'ip-address': 'str',
'ip-address-type': 'GuestIpAddressType',
- 'prefix': 'int'} }
+ 'prefix': 'int'},
+ 'if': { 'any': ['CONFIG_WIN32', 'HAVE_GETIFADDRS'] } }
##
# @GuestNetworkInterfaceStat:
@@ -699,7 +720,8 @@
'tx-packets': 'uint64',
'tx-errs': 'uint64',
'tx-dropped': 'uint64'
- } }
+ },
+ 'if': { 'any': ['CONFIG_WIN32', 'HAVE_GETIFADDRS'] } }
##
# @GuestNetworkInterface:
@@ -719,7 +741,8 @@
'data': {'name': 'str',
'*hardware-address': 'str',
'*ip-addresses': ['GuestIpAddress'],
- '*statistics': 'GuestNetworkInterfaceStat' } }
+ '*statistics': 'GuestNetworkInterfaceStat' },
+ 'if': { 'any': ['CONFIG_WIN32', 'HAVE_GETIFADDRS'] } }
##
# @guest-network-get-interfaces:
@@ -731,7 +754,8 @@
# Since: 1.1
##
{ 'command': 'guest-network-get-interfaces',
- 'returns': ['GuestNetworkInterface'] }
+ 'returns': ['GuestNetworkInterface'],
+ 'if': { 'any': ['CONFIG_WIN32', 'HAVE_GETIFADDRS'] } }
##
# @GuestLogicalProcessor:
@@ -750,7 +774,8 @@
{ 'struct': 'GuestLogicalProcessor',
'data': {'logical-id': 'int',
'online': 'bool',
- '*can-offline': 'bool'} }
+ '*can-offline': 'bool'},
+ 'if': { 'any': ['CONFIG_LINUX', 'CONFIG_WIN32'] } }
##
# @guest-get-vcpus:
@@ -765,7 +790,8 @@
# Since: 1.5
##
{ 'command': 'guest-get-vcpus',
- 'returns': ['GuestLogicalProcessor'] }
+ 'returns': ['GuestLogicalProcessor'],
+ 'if': { 'any': ['CONFIG_LINUX', 'CONFIG_WIN32'] } }
##
# @guest-set-vcpus:
@@ -807,7 +833,8 @@
##
{ 'command': 'guest-set-vcpus',
'data': {'vcpus': ['GuestLogicalProcessor'] },
- 'returns': 'int' }
+ 'returns': 'int',
+ 'if': 'CONFIG_LINUX' }
##
# @GuestDiskBusType:
@@ -859,7 +886,8 @@
{ 'enum': 'GuestDiskBusType',
'data': [ 'ide', 'fdc', 'scsi', 'virtio', 'xen', 'usb', 'uml', 'sata',
'sd', 'unknown', 'ieee1394', 'ssa', 'fibre', 'raid', 'iscsi',
- 'sas', 'mmc', 'virtual', 'file-backed-virtual', 'nvme' ] }
+ 'sas', 'mmc', 'virtual', 'file-backed-virtual', 'nvme' ],
+ 'if': { 'any': [ 'CONFIG_WIN32', 'CONFIG_LINUX' ] } }
##
@@ -877,7 +905,8 @@
##
{ 'struct': 'GuestPCIAddress',
'data': {'domain': 'int', 'bus': 'int',
- 'slot': 'int', 'function': 'int'} }
+ 'slot': 'int', 'function': 'int'},
+ 'if': { 'any': [ 'CONFIG_WIN32', 'CONFIG_LINUX' ] } }
##
# @GuestCCWAddress:
@@ -896,7 +925,8 @@
'data': {'cssid': 'int',
'ssid': 'int',
'subchno': 'int',
- 'devno': 'int'} }
+ 'devno': 'int'},
+ 'if': { 'any': [ 'CONFIG_WIN32', 'CONFIG_LINUX' ] } }
##
# @GuestDiskAddress:
@@ -925,7 +955,8 @@
'bus-type': 'GuestDiskBusType',
'bus': 'int', 'target': 'int', 'unit': 'int',
'*serial': 'str', '*dev': 'str',
- '*ccw-address': 'GuestCCWAddress'} }
+ '*ccw-address': 'GuestCCWAddress'},
+ 'if': { 'any': [ 'CONFIG_WIN32', 'CONFIG_LINUX' ] } }
##
# @GuestNVMeSmart:
@@ -962,7 +993,8 @@
'media-errors-lo': 'uint64',
'media-errors-hi': 'uint64',
'number-of-error-log-entries-lo': 'uint64',
- 'number-of-error-log-entries-hi': 'uint64' } }
+ 'number-of-error-log-entries-hi': 'uint64' },
+ 'if': { 'any': [ 'CONFIG_WIN32', 'CONFIG_LIBUDEV' ] } }
##
# @GuestDiskSmart:
@@ -976,7 +1008,8 @@
{ 'union': 'GuestDiskSmart',
'base': { 'type': 'GuestDiskBusType' },
'discriminator': 'type',
- 'data': { 'nvme': 'GuestNVMeSmart' } }
+ 'data': { 'nvme': 'GuestNVMeSmart' },
+ 'if': { 'any': [ 'CONFIG_WIN32', 'CONFIG_LIBUDEV' ] } }
##
# @GuestDiskInfo:
@@ -1001,7 +1034,8 @@
{ 'struct': 'GuestDiskInfo',
'data': {'name': 'str', 'partition': 'bool', '*dependencies': ['str'],
'*address': 'GuestDiskAddress', '*alias': 'str',
- '*smart': 'GuestDiskSmart'} }
+ '*smart': 'GuestDiskSmart'},
+ 'if': { 'any': [ 'CONFIG_WIN32', 'CONFIG_LIBUDEV' ] } }
##
# @guest-get-disks:
@@ -1014,7 +1048,8 @@
# Since: 5.2
##
{ 'command': 'guest-get-disks',
- 'returns': ['GuestDiskInfo'] }
+ 'returns': ['GuestDiskInfo'],
+ 'if': { 'any': [ 'CONFIG_WIN32', 'CONFIG_LIBUDEV' ] } }
##
# @GuestFilesystemInfo:
@@ -1040,7 +1075,8 @@
{ 'struct': 'GuestFilesystemInfo',
'data': {'name': 'str', 'mountpoint': 'str', 'type': 'str',
'*used-bytes': 'uint64', '*total-bytes': 'uint64',
- '*total-bytes-privileged': 'uint64', 'disk': ['GuestDiskAddress']} }
+ '*total-bytes-privileged': 'uint64', 'disk': ['GuestDiskAddress']},
+ 'if': { 'any': [ 'CONFIG_WIN32', 'CONFIG_LINUX' ] } }
##
# @guest-get-fsinfo:
@@ -1053,7 +1089,8 @@
# Since: 2.2
##
{ 'command': 'guest-get-fsinfo',
- 'returns': ['GuestFilesystemInfo'] }
+ 'returns': ['GuestFilesystemInfo'],
+ 'if': { 'any': [ 'CONFIG_WIN32', 'CONFIG_LINUX' ] } }
##
# @guest-set-user-password:
@@ -1080,7 +1117,8 @@
# Since: 2.3
##
{ 'command': 'guest-set-user-password',
- 'data': { 'username': 'str', 'password': 'str', 'crypted': 'bool' } }
+ 'data': { 'username': 'str', 'password': 'str', 'crypted': 'bool' },
+ 'if': { 'any': [ 'CONFIG_WIN32', 'CONFIG_LINUX', 'CONFIG_FREEBSD'] } }
##
# @GuestMemoryBlock:
@@ -1100,7 +1138,8 @@
{ 'struct': 'GuestMemoryBlock',
'data': {'phys-index': 'uint64',
'online': 'bool',
- '*can-offline': 'bool'} }
+ '*can-offline': 'bool'},
+ 'if': 'CONFIG_LINUX' }
##
# @guest-get-memory-blocks:
@@ -1116,7 +1155,8 @@
# Since: 2.3
##
{ 'command': 'guest-get-memory-blocks',
- 'returns': ['GuestMemoryBlock'] }
+ 'returns': ['GuestMemoryBlock'],
+ 'if': 'CONFIG_LINUX' }
##
# @GuestMemoryBlockResponseType:
@@ -1139,7 +1179,8 @@
##
{ 'enum': 'GuestMemoryBlockResponseType',
'data': ['success', 'not-found', 'operation-not-supported',
- 'operation-failed'] }
+ 'operation-failed'],
+ 'if': 'CONFIG_LINUX' }
##
# @GuestMemoryBlockResponse:
@@ -1157,7 +1198,8 @@
{ 'struct': 'GuestMemoryBlockResponse',
'data': { 'phys-index': 'uint64',
'response': 'GuestMemoryBlockResponseType',
- '*error-code': 'int' }}
+ '*error-code': 'int' },
+ 'if': 'CONFIG_LINUX'}
##
# @guest-set-memory-blocks:
@@ -1188,7 +1230,8 @@
##
{ 'command': 'guest-set-memory-blocks',
'data': {'mem-blks': ['GuestMemoryBlock'] },
- 'returns': ['GuestMemoryBlockResponse'] }
+ 'returns': ['GuestMemoryBlockResponse'],
+ 'if': 'CONFIG_LINUX' }
##
# @GuestMemoryBlockInfo:
@@ -1200,7 +1243,8 @@
# Since: 2.3
##
{ 'struct': 'GuestMemoryBlockInfo',
- 'data': {'size': 'uint64'} }
+ 'data': {'size': 'uint64'},
+ 'if': 'CONFIG_LINUX' }
##
# @guest-get-memory-block-info:
@@ -1212,7 +1256,8 @@
# Since: 2.3
##
{ 'command': 'guest-get-memory-block-info',
- 'returns': 'GuestMemoryBlockInfo' }
+ 'returns': 'GuestMemoryBlockInfo',
+ 'if': 'CONFIG_LINUX' }
##
# @GuestExecStatus:
@@ -1378,7 +1423,8 @@
# Since: 2.10
##
{ 'struct': 'GuestUser',
- 'data': { 'user': 'str', 'login-time': 'number', '*domain': 'str' } }
+ 'data': { 'user': 'str', 'login-time': 'number', '*domain': 'str' },
+ 'if': { 'any': ['CONFIG_WIN32', 'HAVE_UTMPX' ] } }
##
# @guest-get-users:
@@ -1390,7 +1436,8 @@
# Since: 2.10
##
{ 'command': 'guest-get-users',
- 'returns': ['GuestUser'] }
+ 'returns': ['GuestUser'],
+ 'if': { 'any': ['CONFIG_WIN32', 'HAVE_UTMPX' ] } }
##
# @GuestTimezone:
@@ -1499,7 +1546,8 @@
# @pci: PCI device
##
{ 'enum': 'GuestDeviceType',
- 'data': [ 'pci' ] }
+ 'data': [ 'pci' ],
+ 'if': 'CONFIG_WIN32' }
##
# @GuestDeviceIdPCI:
@@ -1511,7 +1559,8 @@
# Since: 5.2
##
{ 'struct': 'GuestDeviceIdPCI',
- 'data': { 'vendor-id': 'uint16', 'device-id': 'uint16' } }
+ 'data': { 'vendor-id': 'uint16', 'device-id': 'uint16' },
+ 'if': 'CONFIG_WIN32' }
##
# @GuestDeviceId:
@@ -1525,7 +1574,8 @@
{ 'union': 'GuestDeviceId',
'base': { 'type': 'GuestDeviceType' },
'discriminator': 'type',
- 'data': { 'pci': 'GuestDeviceIdPCI' } }
+ 'data': { 'pci': 'GuestDeviceIdPCI' },
+ 'if': 'CONFIG_WIN32' }
##
# @GuestDeviceInfo:
@@ -1546,7 +1596,8 @@
'*driver-date': 'int',
'*driver-version': 'str',
'*id': 'GuestDeviceId'
- } }
+ },
+ 'if': 'CONFIG_WIN32' }
##
# @guest-get-devices:
@@ -1558,7 +1609,8 @@
# Since: 5.2
##
{ 'command': 'guest-get-devices',
- 'returns': ['GuestDeviceInfo'] }
+ 'returns': ['GuestDeviceInfo'],
+ 'if': 'CONFIG_WIN32' }
##
# @GuestAuthorizedKeys:
@@ -1685,7 +1737,8 @@
'*ios-pgr': 'uint64',
'*total-ticks': 'uint64',
'*weight-ticks': 'uint64'
- } }
+ },
+ 'if': 'CONFIG_LINUX' }
##
# @GuestDiskStatsInfo:
@@ -1702,7 +1755,8 @@
'data': {'name': 'str',
'major': 'uint64',
'minor': 'uint64',
- 'stats': 'GuestDiskStats' } }
+ 'stats': 'GuestDiskStats' },
+ 'if': 'CONFIG_LINUX' }
##
# @guest-get-diskstats:
@@ -1714,7 +1768,8 @@
# Since: 7.1
##
{ 'command': 'guest-get-diskstats',
- 'returns': ['GuestDiskStatsInfo']
+ 'returns': ['GuestDiskStatsInfo'],
+ 'if': 'CONFIG_LINUX'
}
##
@@ -1727,7 +1782,8 @@
# Since: 7.1
##
{ 'enum': 'GuestCpuStatsType',
- 'data': [ 'linux' ] }
+ 'data': [ 'linux' ],
+ 'if': 'CONFIG_LINUX' }
##
@@ -1772,7 +1828,8 @@
'*steal': 'uint64',
'*guest': 'uint64',
'*guestnice': 'uint64'
- } }
+ },
+ 'if': 'CONFIG_LINUX' }
##
# @GuestCpuStats:
@@ -1786,7 +1843,8 @@
{ 'union': 'GuestCpuStats',
'base': { 'type': 'GuestCpuStatsType' },
'discriminator': 'type',
- 'data': { 'linux': 'GuestLinuxCpuStats' } }
+ 'data': { 'linux': 'GuestLinuxCpuStats' },
+ 'if': 'CONFIG_LINUX' }
##
# @guest-get-cpustats:
@@ -1798,5 +1856,121 @@
# Since: 7.1
##
{ 'command': 'guest-get-cpustats',
- 'returns': ['GuestCpuStats']
+ 'returns': ['GuestCpuStats'],
+ 'if': 'CONFIG_LINUX'
+}
+
+
+##
+# @GuestLoadAverage:
+#
+# Statistics about process load information
+#
+# @load1m: 1-minute load avage
+#
+# @load5m: 5-minute load avage
+#
+# @load15m: 15-minute load avage
+#
+# Since: 10.0
+##
+{ 'struct': 'GuestLoadAverage',
+ 'data': {
+ 'load1m': 'number',
+ 'load5m': 'number',
+ 'load15m': 'number'
+ },
+ 'if': { 'any': ['CONFIG_WIN32', 'CONFIG_GETLOADAVG'] }
+}
+
+##
+# @guest-get-load:
+#
+# Retrieve CPU process load information
+#
+# .. note:: Windows does not have load average API, so QGA emulates it by
+# calculating the average CPU usage in the last 1, 5, 15 minutes
+# similar as Linux does this.
+# Calculation starts from the first time this command is called.
+#
+# Returns: load information
+#
+# Since: 10.0
+##
+{ 'command': 'guest-get-load',
+ 'returns': 'GuestLoadAverage',
+ 'if': { 'any': ['CONFIG_WIN32', 'CONFIG_GETLOADAVG'] }
+}
+
+##
+# @GuestNetworkRoute:
+#
+# Route information, currently, only linux supported.
+#
+# @iface: The destination network or host's egress network interface in the routing table
+#
+# @destination: The IP address of the target network or host, The final destination of the packet
+#
+# @metric: Route metric
+#
+# @gateway: The IP address of the next hop router
+#
+# @mask: Subnet Mask (IPv4 only)
+#
+# @irtt: Initial round-trip delay (not for windows, IPv4 only)
+#
+# @flags: Route flags (not for windows)
+#
+# @refcnt: The route's reference count (not for windows)
+#
+# @use: Route usage count (not for windows)
+#
+# @window: TCP window size, used for flow control (not for windows, IPv4 only)
+#
+# @mtu: Data link layer maximum packet size (not for windows)
+#
+# @desprefixlen: Destination prefix length (for IPv6)
+#
+# @source: Source IP address (for IPv6)
+#
+# @srcprefixlen: Source prefix length (for IPv6)
+#
+# @nexthop: Next hop IP address (for IPv6)
+#
+# @version: IP version (4 or 6)
+#
+# Since: 9.1
+
+##
+{ 'struct': 'GuestNetworkRoute',
+ 'data': {'iface': 'str',
+ 'destination': 'str',
+ 'metric': 'int',
+ '*gateway': 'str',
+ '*mask': 'str',
+ '*irtt': 'int',
+ '*flags': 'uint64',
+ '*refcnt': 'int',
+ '*use': 'int',
+ '*window': 'int',
+ '*mtu': 'int',
+ '*desprefixlen': 'str',
+ '*source': 'str',
+ '*srcprefixlen': 'str',
+ '*nexthop': 'str',
+ 'version': 'int'
+ },
+ 'if': 'CONFIG_LINUX' }
+
+##
+# @guest-network-get-route:
+#
+# Retrieve information about route of network.
+# Returns: List of route info of guest.
+#
+# Since: 9.1
+##
+{ 'command': 'guest-network-get-route',
+ 'returns': ['GuestNetworkRoute'],
+ 'if': 'CONFIG_LINUX'
}
diff --git a/qga/vss-win32/install.cpp b/qga/vss-win32/install.cpp
index 8494413..5cea5bc 100644
--- a/qga/vss-win32/install.cpp
+++ b/qga/vss-win32/install.cpp
@@ -39,7 +39,7 @@ const GUID CLSID_WbemLocator = { 0x4590f811, 0x1d3a, 0x11d0,
const GUID IID_IWbemLocator = { 0xdc12a687, 0x737f, 0x11cf,
{0x88, 0x4d, 0x00, 0xaa, 0x00, 0x4b, 0x2e, 0x24} };
-void errmsg(DWORD err, const char *text)
+static void errmsg(DWORD err, const char *text)
{
/*
* `text' contains function call statement when errmsg is called via chk().
@@ -242,6 +242,7 @@ out:
}
/* Unregister this module from COM+ Applications Catalog */
+STDAPI COMUnregister(void);
STDAPI COMUnregister(void)
{
qga_debug_begin;
@@ -256,6 +257,7 @@ out:
}
/* Register this module to COM+ Applications Catalog */
+STDAPI COMRegister(void);
STDAPI COMRegister(void)
{
qga_debug_begin;
@@ -380,11 +382,13 @@ out:
return hr;
}
+STDAPI_(void) CALLBACK DLLCOMRegister(HWND, HINSTANCE, LPSTR, int);
STDAPI_(void) CALLBACK DLLCOMRegister(HWND, HINSTANCE, LPSTR, int)
{
COMRegister();
}
+STDAPI_(void) CALLBACK DLLCOMUnregister(HWND, HINSTANCE, LPSTR, int);
STDAPI_(void) CALLBACK DLLCOMUnregister(HWND, HINSTANCE, LPSTR, int)
{
COMUnregister();
diff --git a/qga/vss-win32/provider.cpp b/qga/vss-win32/provider.cpp
index cc72e5e..a102a23 100644
--- a/qga/vss-win32/provider.cpp
+++ b/qga/vss-win32/provider.cpp
@@ -45,7 +45,7 @@ const IID IID_IVssEnumObject = { 0xAE1C7110, 0x2F60, 0x11d3,
{0x8A, 0x39, 0x00, 0xC0, 0x4F, 0x72, 0xD8, 0xE3} };
-void LockModule(BOOL lock)
+static void LockModule(BOOL lock)
{
if (lock) {
InterlockedIncrement(&g_nComObjsInUse);
@@ -528,6 +528,9 @@ STDAPI DllCanUnloadNow()
}
EXTERN_C
+BOOL WINAPI DllMain(HINSTANCE hinstDll, DWORD dwReason, LPVOID lpReserved);
+
+EXTERN_C
BOOL WINAPI DllMain(HINSTANCE hinstDll, DWORD dwReason, LPVOID lpReserved)
{
qga_debug("begin, reason = %lu", dwReason);
diff --git a/qga/vss-win32/requester.cpp b/qga/vss-win32/requester.cpp
index 9884c65..4401d55 100644
--- a/qga/vss-win32/requester.cpp
+++ b/qga/vss-win32/requester.cpp
@@ -254,8 +254,8 @@ out:
qga_debug_end;
}
-DWORD get_reg_dword_value(HKEY baseKey, LPCSTR subKey, LPCSTR valueName,
- DWORD defaultData)
+static DWORD get_reg_dword_value(HKEY baseKey, LPCSTR subKey, LPCSTR valueName,
+ DWORD defaultData)
{
qga_debug_begin;
@@ -272,12 +272,12 @@ DWORD get_reg_dword_value(HKEY baseKey, LPCSTR subKey, LPCSTR valueName,
return dwordData;
}
-bool is_valid_vss_backup_type(VSS_BACKUP_TYPE vssBT)
+static bool is_valid_vss_backup_type(VSS_BACKUP_TYPE vssBT)
{
return (vssBT > VSS_BT_UNDEFINED && vssBT < VSS_BT_OTHER);
}
-VSS_BACKUP_TYPE get_vss_backup_type(
+static VSS_BACKUP_TYPE get_vss_backup_type(
VSS_BACKUP_TYPE defaultVssBT = DEFAULT_VSS_BACKUP_TYPE)
{
qga_debug_begin;
diff --git a/qobject/block-qdict.c b/qobject/block-qdict.c
index 4a83bda..d0e1c63 100644
--- a/qobject/block-qdict.c
+++ b/qobject/block-qdict.c
@@ -9,10 +9,10 @@
#include "qemu/osdep.h"
#include "block/qdict.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qbool.h"
+#include "qobject/qlist.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
#include "qapi/qobject-input-visitor.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
diff --git a/qobject/json-parser-int.h b/qobject/json-parser-int.h
index 16a25d0..8c01f23 100644
--- a/qobject/json-parser-int.h
+++ b/qobject/json-parser-int.h
@@ -14,7 +14,7 @@
#ifndef JSON_PARSER_INT_H
#define JSON_PARSER_INT_H
-#include "qapi/qmp/json-parser.h"
+#include "qobject/json-parser.h"
typedef enum json_token_type {
JSON_ERROR = 0, /* must be zero, see json_lexer[] */
diff --git a/qobject/json-parser.c b/qobject/json-parser.c
index d498db6..7483e58 100644
--- a/qobject/json-parser.c
+++ b/qobject/json-parser.c
@@ -16,12 +16,12 @@
#include "qemu/cutils.h"
#include "qemu/unicode.h"
#include "qapi/error.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qnull.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
+#include "qobject/qnull.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
#include "json-parser-int.h"
struct JSONToken {
diff --git a/qobject/json-writer.c b/qobject/json-writer.c
index 309a31d..aac2c6a 100644
--- a/qobject/json-writer.c
+++ b/qobject/json-writer.c
@@ -14,7 +14,7 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/json-writer.h"
+#include "qobject/json-writer.h"
#include "qemu/unicode.h"
struct JSONWriter {
diff --git a/qobject/qbool.c b/qobject/qbool.c
index c7049c0..00d7066 100644
--- a/qobject/qbool.c
+++ b/qobject/qbool.c
@@ -12,7 +12,7 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qbool.h"
+#include "qobject/qbool.h"
#include "qobject-internal.h"
/**
diff --git a/qobject/qdict.c b/qobject/qdict.c
index 8faff23..a90ac9a 100644
--- a/qobject/qdict.c
+++ b/qobject/qdict.c
@@ -11,11 +11,11 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qnull.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qnum.h"
+#include "qobject/qdict.h"
+#include "qobject/qbool.h"
+#include "qobject/qnull.h"
+#include "qobject/qstring.h"
#include "qobject-internal.h"
/**
diff --git a/qobject/qjson.c b/qobject/qjson.c
index 167fcb4..c858daf 100644
--- a/qobject/qjson.c
+++ b/qobject/qjson.c
@@ -13,14 +13,14 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qmp/json-parser.h"
-#include "qapi/qmp/json-writer.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/json-parser.h"
+#include "qobject/json-writer.h"
+#include "qobject/qjson.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
typedef struct JSONParsingState {
JSONMessageParser parser;
diff --git a/qobject/qlist.c b/qobject/qlist.c
index 356ad94..41e6876 100644
--- a/qobject/qlist.c
+++ b/qobject/qlist.c
@@ -11,11 +11,11 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qnull.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qbool.h"
+#include "qobject/qlist.h"
+#include "qobject/qnull.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
#include "qemu/queue.h"
#include "qobject-internal.h"
diff --git a/qobject/qlit.c b/qobject/qlit.c
index be83321..a44f47e 100644
--- a/qobject/qlit.c
+++ b/qobject/qlit.c
@@ -15,13 +15,13 @@
#include "qemu/osdep.h"
-#include "qapi/qmp/qlit.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
-#include "qapi/qmp/qnull.h"
+#include "qobject/qlit.h"
+#include "qobject/qbool.h"
+#include "qobject/qlist.h"
+#include "qobject/qnum.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
+#include "qobject/qnull.h"
static bool qlit_equal_qdict(const QLitObject *lhs, const QDict *qdict)
{
@@ -118,7 +118,7 @@ QObject *qobject_from_qlit(const QLitObject *qlit)
case QTYPE_QBOOL:
return QOBJECT(qbool_from_bool(qlit->value.qbool));
default:
- assert(0);
+ g_assert_not_reached();
}
return NULL;
diff --git a/qobject/qnull.c b/qobject/qnull.c
index 445a5db..0fb78cb 100644
--- a/qobject/qnull.c
+++ b/qobject/qnull.c
@@ -11,7 +11,7 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qnull.h"
+#include "qobject/qnull.h"
#include "qobject-internal.h"
QNull qnull_ = {
diff --git a/qobject/qnum.c b/qobject/qnum.c
index 2bbeaed..a938b64 100644
--- a/qobject/qnum.c
+++ b/qobject/qnum.c
@@ -13,7 +13,7 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qnum.h"
+#include "qobject/qnum.h"
#include "qobject-internal.h"
/**
@@ -85,8 +85,7 @@ bool qnum_get_try_int(const QNum *qn, int64_t *val)
return false;
}
- assert(0);
- return false;
+ g_assert_not_reached();
}
/**
@@ -123,8 +122,7 @@ bool qnum_get_try_uint(const QNum *qn, uint64_t *val)
return false;
}
- assert(0);
- return false;
+ g_assert_not_reached();
}
/**
@@ -156,8 +154,7 @@ double qnum_get_double(QNum *qn)
return qn->u.dbl;
}
- assert(0);
- return 0.0;
+ g_assert_not_reached();
}
char *qnum_to_string(QNum *qn)
@@ -172,8 +169,7 @@ char *qnum_to_string(QNum *qn)
return g_strdup_printf("%.17g", qn->u.dbl);
}
- assert(0);
- return NULL;
+ g_assert_not_reached();
}
/**
diff --git a/qobject/qobject-internal.h b/qobject/qobject-internal.h
index b310c8e..0c7679f 100644
--- a/qobject/qobject-internal.h
+++ b/qobject/qobject-internal.h
@@ -10,7 +10,7 @@
#ifndef QOBJECT_INTERNAL_H
#define QOBJECT_INTERNAL_H
-#include "qapi/qmp/qobject.h"
+#include "qobject/qobject.h"
static inline void qobject_init(QObject *obj, QType type)
{
diff --git a/qobject/qobject.c b/qobject/qobject.c
index d7077b8..78d1e05 100644
--- a/qobject/qobject.c
+++ b/qobject/qobject.c
@@ -8,12 +8,12 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qnull.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qbool.h"
+#include "qobject/qnull.h"
+#include "qobject/qnum.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
+#include "qobject/qstring.h"
#include "qobject-internal.h"
QEMU_BUILD_BUG_MSG(
diff --git a/qobject/qstring.c b/qobject/qstring.c
index 794f8c9..d316604 100644
--- a/qobject/qstring.c
+++ b/qobject/qstring.c
@@ -11,7 +11,7 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qstring.h"
#include "qobject-internal.h"
/**
diff --git a/qom/container.c b/qom/container.c
index 455e841..38a27ec 100644
--- a/qom/container.c
+++ b/qom/container.c
@@ -15,7 +15,7 @@
#include "qemu/module.h"
static const TypeInfo container_info = {
- .name = "container",
+ .name = TYPE_CONTAINER,
.parent = TYPE_OBJECT,
};
@@ -24,29 +24,14 @@ static void container_register_types(void)
type_register_static(&container_info);
}
-Object *container_get(Object *root, const char *path)
+Object *object_property_add_new_container(Object *obj, const char *name)
{
- Object *obj, *child;
- char **parts;
- int i;
+ Object *child = object_new(TYPE_CONTAINER);
- parts = g_strsplit(path, "/", 0);
- assert(parts != NULL && parts[0] != NULL && !parts[0][0]);
- obj = root;
+ object_property_add_child(obj, name, child);
+ object_unref(child);
- for (i = 1; parts[i] != NULL; i++, obj = child) {
- child = object_resolve_path_component(obj, parts[i]);
- if (!child) {
- child = object_new("container");
- object_property_add_child(obj, parts[i], child);
- object_unref(child);
- }
- }
-
- g_strfreev(parts);
-
- return obj;
+ return child;
}
-
type_init(container_register_types)
diff --git a/qom/object.c b/qom/object.c
index 157a45c..1856bb3 100644
--- a/qom/object.c
+++ b/qom/object.c
@@ -23,16 +23,16 @@
#include "qapi/qobject-input-visitor.h"
#include "qapi/forward-visitor.h"
#include "qapi/qapi-builtin-visit.h"
-#include "qapi/qmp/qjson.h"
+#include "qobject/qjson.h"
#include "trace.h"
/* TODO: replace QObject with a simpler visitor to avoid a dependency
* of the QOM core on QObject? */
#include "qom/qom-qobject.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qbool.h"
+#include "qobject/qlist.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
#include "qemu/error-report.h"
#define MAX_INTERFACES 32
@@ -54,10 +54,10 @@ struct TypeImpl
size_t instance_size;
size_t instance_align;
- void (*class_init)(ObjectClass *klass, void *data);
- void (*class_base_init)(ObjectClass *klass, void *data);
+ void (*class_init)(ObjectClass *klass, const void *data);
+ void (*class_base_init)(ObjectClass *klass, const void *data);
- void *class_data;
+ const void *class_data;
void (*instance_init)(Object *obj);
void (*instance_post_init)(Object *obj);
@@ -175,17 +175,12 @@ static TypeImpl *type_register_internal(const TypeInfo *info)
return ti;
}
-TypeImpl *type_register(const TypeInfo *info)
+TypeImpl *type_register_static(const TypeInfo *info)
{
assert(info->parent);
return type_register_internal(info);
}
-TypeImpl *type_register_static(const TypeInfo *info)
-{
- return type_register(info);
-}
-
void type_register_static_array(const TypeInfo *infos, int nr_infos)
{
int i;
@@ -195,7 +190,7 @@ void type_register_static_array(const TypeInfo *infos, int nr_infos)
}
}
-static TypeImpl *type_get_by_name(const char *name)
+static TypeImpl *type_get_by_name_noload(const char *name)
{
if (name == NULL) {
return NULL;
@@ -204,10 +199,32 @@ static TypeImpl *type_get_by_name(const char *name)
return type_table_lookup(name);
}
+static TypeImpl *type_get_or_load_by_name(const char *name, Error **errp)
+{
+ TypeImpl *type = type_get_by_name_noload(name);
+
+#ifdef CONFIG_MODULES
+ if (!type) {
+ int rv = module_load_qom(name, errp);
+ if (rv > 0) {
+ type = type_get_by_name_noload(name);
+ } else {
+ error_prepend(errp, "could not load a module for type '%s'", name);
+ return NULL;
+ }
+ }
+#endif
+ if (!type) {
+ error_setg(errp, "unknown type '%s'", name);
+ }
+
+ return type;
+}
+
static TypeImpl *type_get_parent(TypeImpl *type)
{
if (!type->parent_type && type->parent) {
- type->parent_type = type_get_by_name(type->parent);
+ type->parent_type = type_get_by_name_noload(type->parent);
if (!type->parent_type) {
fprintf(stderr, "Type '%s' is missing its parent '%s'\n",
type->name, type->parent);
@@ -262,14 +279,6 @@ static size_t type_object_get_align(TypeImpl *ti)
return 0;
}
-size_t object_type_get_instance_size(const char *typename)
-{
- TypeImpl *type = type_get_by_name(typename);
-
- g_assert(type != NULL);
- return type_object_get_size(type);
-}
-
static bool type_is_ancestor(TypeImpl *type, TypeImpl *target_type)
{
assert(target_type);
@@ -305,7 +314,6 @@ static void type_initialize_interface(TypeImpl *ti, TypeImpl *interface_type,
g_free((char *)info.name);
new_iface = (InterfaceClass *)iface_impl->class;
- new_iface->concrete_class = ti->class;
new_iface->interface_type = interface_type;
ti->class->interfaces = g_slist_append(ti->class->interfaces, new_iface);
@@ -371,7 +379,7 @@ static void type_initialize(TypeImpl *ti)
}
for (i = 0; i < ti->num_interfaces; i++) {
- TypeImpl *t = type_get_by_name(ti->interfaces[i].typename);
+ TypeImpl *t = type_get_by_name_noload(ti->interfaces[i].typename);
if (!t) {
error_report("missing interface '%s' for object '%s'",
ti->interfaces[i].typename, parent->name);
@@ -423,13 +431,13 @@ static void object_init_with_type(Object *obj, TypeImpl *ti)
static void object_post_init_with_type(Object *obj, TypeImpl *ti)
{
- if (ti->instance_post_init) {
- ti->instance_post_init(obj);
- }
-
if (type_has_parent(ti)) {
object_post_init_with_type(obj, type_get_parent(ti));
}
+
+ if (ti->instance_post_init) {
+ ti->instance_post_init(obj);
+ }
}
bool object_apply_global_props(Object *obj, const GPtrArray *props,
@@ -477,7 +485,7 @@ bool object_apply_global_props(Object *obj, const GPtrArray *props,
* Slot 0: accelerator's global property defaults
* Slot 1: machine's global property defaults
* Slot 2: global properties from legacy command line option
- * Each is a GPtrArray of of GlobalProperty.
+ * Each is a GPtrArray of GlobalProperty.
* Applied in order, later entries override earlier ones.
*/
static GPtrArray *object_compat_props[3];
@@ -565,23 +573,7 @@ static void object_initialize_with_type(Object *obj, size_t size, TypeImpl *type
void object_initialize(void *data, size_t size, const char *typename)
{
- TypeImpl *type = type_get_by_name(typename);
-
-#ifdef CONFIG_MODULES
- if (!type) {
- int rv = module_load_qom(typename, &error_fatal);
- if (rv > 0) {
- type = type_get_by_name(typename);
- } else {
- error_report("missing object type '%s'", typename);
- exit(1);
- }
- }
-#endif
- if (!type) {
- error_report("missing object type '%s'", typename);
- abort();
- }
+ TypeImpl *type = type_get_or_load_by_name(typename, &error_fatal);
object_initialize_with_type(data, size, type);
}
@@ -792,7 +784,7 @@ Object *object_new_with_class(ObjectClass *klass)
Object *object_new(const char *typename)
{
- TypeImpl *ti = type_get_by_name(typename);
+ TypeImpl *ti = type_get_or_load_by_name(typename, &error_fatal);
return object_new_with_type(ti);
}
@@ -965,7 +957,7 @@ ObjectClass *object_class_dynamic_cast(ObjectClass *class,
return class;
}
- target_type = type_get_by_name(typename);
+ target_type = type_get_by_name_noload(typename);
if (!target_type) {
/* target class type unknown, so fail the cast */
return NULL;
@@ -1063,7 +1055,7 @@ const char *object_class_get_name(ObjectClass *klass)
ObjectClass *object_class_by_name(const char *typename)
{
- TypeImpl *type = type_get_by_name(typename);
+ TypeImpl *type = type_get_by_name_noload(typename);
if (!type) {
return NULL;
@@ -1076,21 +1068,15 @@ ObjectClass *object_class_by_name(const char *typename)
ObjectClass *module_object_class_by_name(const char *typename)
{
- ObjectClass *oc;
+ TypeImpl *type = type_get_or_load_by_name(typename, NULL);
- oc = object_class_by_name(typename);
-#ifdef CONFIG_MODULES
- if (!oc) {
- Error *local_err = NULL;
- int rv = module_load_qom(typename, &local_err);
- if (rv > 0) {
- oc = object_class_by_name(typename);
- } else if (rv < 0) {
- error_report_err(local_err);
- }
+ if (!type) {
+ return NULL;
}
-#endif
- return oc;
+
+ type_initialize(type);
+
+ return type->class;
}
ObjectClass *object_class_get_parent(ObjectClass *class)
@@ -1205,7 +1191,7 @@ GSList *object_class_get_list(const char *implements_type,
return list;
}
-static gint object_class_cmp(gconstpointer a, gconstpointer b)
+static gint object_class_cmp(gconstpointer a, gconstpointer b, gpointer d)
{
return strcasecmp(object_class_get_name((ObjectClass *)a),
object_class_get_name((ObjectClass *)b));
@@ -1214,8 +1200,9 @@ static gint object_class_cmp(gconstpointer a, gconstpointer b)
GSList *object_class_get_list_sorted(const char *implements_type,
bool include_abstract)
{
- return g_slist_sort(object_class_get_list(implements_type, include_abstract),
- object_class_cmp);
+ return g_slist_sort_with_data(
+ object_class_get_list(implements_type, include_abstract),
+ object_class_cmp, NULL);
}
Object *object_ref(void *objptr)
@@ -1742,12 +1729,44 @@ const char *object_property_get_type(Object *obj, const char *name, Error **errp
return prop->type;
}
+static const char *const root_containers[] = {
+ "chardevs",
+ "objects",
+ "backend"
+};
+
+static Object *object_root_initialize(void)
+{
+ Object *root = object_new(TYPE_CONTAINER);
+ int i;
+
+ /*
+ * Create all QEMU system containers. "machine" and its sub-containers
+ * are only created when machine initializes (qemu_create_machine()).
+ */
+ for (i = 0; i < ARRAY_SIZE(root_containers); i++) {
+ object_property_add_new_container(root, root_containers[i]);
+ }
+
+ return root;
+}
+
+Object *object_get_container(const char *name)
+{
+ Object *container;
+
+ container = object_resolve_path_component(object_get_root(), name);
+ assert(object_dynamic_cast(container, TYPE_CONTAINER));
+
+ return container;
+}
+
Object *object_get_root(void)
{
static Object *root;
if (!root) {
- root = object_new("container");
+ root = object_root_initialize();
}
return root;
@@ -1755,7 +1774,7 @@ Object *object_get_root(void)
Object *object_get_objects_root(void)
{
- return container_get(object_get_root(), "/objects");
+ return object_get_container("objects");
}
Object *object_get_internal_root(void)
@@ -1763,7 +1782,7 @@ Object *object_get_internal_root(void)
static Object *internal_root;
if (!internal_root) {
- internal_root = object_new("container");
+ internal_root = object_new(TYPE_CONTAINER);
}
return internal_root;
@@ -2079,7 +2098,6 @@ const char *object_get_canonical_path_component(const Object *obj)
/* obj had a parent but was not a child, should never happen */
g_assert_not_reached();
- return NULL;
}
char *object_get_canonical_path(const Object *obj)
@@ -2185,7 +2203,7 @@ static Object *object_resolve_partial_path(Object *parent,
}
Object *object_resolve_path_type(const char *path, const char *typename,
- bool *ambiguousp)
+ bool *ambiguous)
{
Object *obj;
char **parts;
@@ -2194,14 +2212,17 @@ Object *object_resolve_path_type(const char *path, const char *typename,
assert(parts);
if (parts[0] == NULL || strcmp(parts[0], "") != 0) {
- bool ambiguous = false;
+ bool ambig = false;
obj = object_resolve_partial_path(object_get_root(), parts,
- typename, &ambiguous);
- if (ambiguousp) {
- *ambiguousp = ambiguous;
+ typename, &ambig);
+ if (ambiguous) {
+ *ambiguous = ambig;
}
} else {
obj = object_resolve_abs_path(object_get_root(), parts + 1, typename);
+ if (ambiguous) {
+ *ambiguous = false;
+ }
}
g_strfreev(parts);
@@ -2227,7 +2248,7 @@ Object *object_resolve_path_at(Object *parent, const char *path)
Object *object_resolve_type_unambiguous(const char *typename, Error **errp)
{
- bool ambig;
+ bool ambig = false;
Object *o = object_resolve_path_type("", typename, &ambig);
if (ambig) {
@@ -2871,7 +2892,7 @@ void object_class_property_set_description(ObjectClass *klass,
op->description = g_strdup(description);
}
-static void object_class_init(ObjectClass *klass, void *data)
+static void object_class_init(ObjectClass *klass, const void *data)
{
object_class_property_add_str(klass, "type", object_get_type,
NULL);
diff --git a/qom/object_interfaces.c b/qom/object_interfaces.c
index e0833c8..1ffea1a 100644
--- a/qom/object_interfaces.c
+++ b/qom/object_interfaces.c
@@ -3,10 +3,12 @@
#include "qemu/cutils.h"
#include "qapi/error.h"
#include "qapi/qapi-visit-qom.h"
-#include "qapi/qmp/qobject.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qobject.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
#include "qapi/qmp/qerror.h"
-#include "qapi/qmp/qjson.h"
+#include "qobject/qjson.h"
+#include "qobject/qstring.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qobject-output-visitor.h"
#include "qom/object_interfaces.h"
@@ -90,7 +92,7 @@ Object *user_creatable_add_type(const char *type, const char *id,
return NULL;
}
- klass = object_class_by_name(type);
+ klass = module_object_class_by_name(type);
if (!klass) {
error_setg(errp, "invalid object type: %s", type);
return NULL;
@@ -108,7 +110,7 @@ Object *user_creatable_add_type(const char *type, const char *id,
}
assert(qdict);
- obj = object_new(type);
+ obj = object_new_with_class(klass);
object_set_properties_from_qdict(obj, qdict, v, &local_err);
if (local_err) {
goto out;
@@ -177,9 +179,25 @@ char *object_property_help(const char *name, const char *type,
g_string_append(str, description);
}
if (defval) {
- g_autofree char *def_json = g_string_free(qobject_to_json(defval),
- false);
- g_string_append_printf(str, " (default: %s)", def_json);
+ g_autofree char *def_json = NULL;
+ const char *def;
+
+ switch (qobject_type(defval)) {
+ case QTYPE_QSTRING:
+ def = qstring_get_str(qobject_to(QString, defval));
+ break;
+
+ case QTYPE_QBOOL:
+ def = qbool_get_bool(qobject_to(QBool, defval)) ? "on" : "off";
+ break;
+
+ default:
+ def_json = g_string_free(qobject_to_json(defval), false);
+ def = def_json;
+ break;
+ }
+
+ g_string_append_printf(str, " (default: %s)", def);
}
return g_string_free(str, false);
diff --git a/qom/qom-hmp-cmds.c b/qom/qom-hmp-cmds.c
index 6e3a217..a00a564 100644
--- a/qom/qom-hmp-cmds.c
+++ b/qom/qom-hmp-cmds.c
@@ -11,8 +11,8 @@
#include "monitor/monitor.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-qom.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qjson.h"
+#include "qobject/qdict.h"
+#include "qobject/qjson.h"
#include "qemu/readline.h"
#include "qom/object.h"
#include "qom/object_interfaces.h"
diff --git a/qom/qom-qmp-cmds.c b/qom/qom-qmp-cmds.c
index e91a235..293755f 100644
--- a/qom/qom-qmp-cmds.c
+++ b/qom/qom-qmp-cmds.c
@@ -20,7 +20,7 @@
#include "qapi/qapi-commands-qdev.h"
#include "qapi/qapi-commands-qom.h"
#include "qapi/qapi-visit-qom.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/qmp/qerror.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qobject-output-visitor.h"
@@ -28,15 +28,11 @@
#include "qom/object_interfaces.h"
#include "qom/qom-qobject.h"
-ObjectPropertyInfoList *qmp_qom_list(const char *path, Error **errp)
+static Object *qom_resolve_path(const char *path, Error **errp)
{
- Object *obj;
bool ambiguous = false;
- ObjectPropertyInfoList *props = NULL;
- ObjectProperty *prop;
- ObjectPropertyIterator iter;
+ Object *obj = object_resolve_path(path, &ambiguous);
- obj = object_resolve_path(path, &ambiguous);
if (obj == NULL) {
if (ambiguous) {
error_setg(errp, "Path '%s' is ambiguous", path);
@@ -44,6 +40,19 @@ ObjectPropertyInfoList *qmp_qom_list(const char *path, Error **errp)
error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
"Device '%s' not found", path);
}
+ }
+ return obj;
+}
+
+ObjectPropertyInfoList *qmp_qom_list(const char *path, Error **errp)
+{
+ Object *obj;
+ ObjectPropertyInfoList *props = NULL;
+ ObjectProperty *prop;
+ ObjectPropertyIterator iter;
+
+ obj = qom_resolve_path(path, errp);
+ if (obj == NULL) {
return NULL;
}
@@ -141,7 +150,7 @@ ObjectPropertyInfoList *qmp_device_list_properties(const char *typename,
return NULL;
}
- obj = object_new(typename);
+ obj = object_new_with_class(klass);
object_property_iter_init(&iter, obj);
while ((prop = object_property_iter_next(&iter))) {
@@ -186,7 +195,7 @@ ObjectPropertyInfoList *qmp_qom_list_properties(const char *typename,
ObjectPropertyIterator iter;
ObjectPropertyInfoList *prop_list = NULL;
- klass = object_class_by_name(typename);
+ klass = module_object_class_by_name(typename);
if (klass == NULL) {
error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
"Class '%s' not found", typename);
diff --git a/replay/replay-audio.c b/replay/replay-audio.c
index 91854f0..ed2ba21 100644
--- a/replay/replay-audio.c
+++ b/replay/replay-audio.c
@@ -11,7 +11,7 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "replay-internal.h"
#include "audio/audio.h"
diff --git a/replay/replay-char.c b/replay/replay-char.c
index 72b1f83..81dc416 100644
--- a/replay/replay-char.c
+++ b/replay/replay-char.c
@@ -11,7 +11,7 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "replay-internal.h"
#include "chardev/char.h"
diff --git a/replay/replay-debugging.c b/replay/replay-debugging.c
index 82c66ff..1105364 100644
--- a/replay/replay-debugging.c
+++ b/replay/replay-debugging.c
@@ -11,13 +11,13 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "sysemu/replay.h"
-#include "sysemu/runstate.h"
+#include "system/replay.h"
+#include "system/runstate.h"
#include "replay-internal.h"
#include "monitor/hmp.h"
#include "monitor/monitor.h"
#include "qapi/qapi-commands-replay.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/timer.h"
#include "block/snapshot.h"
#include "migration/snapshot.h"
diff --git a/replay/replay-events.c b/replay/replay-events.c
index af0721c..8959da9 100644
--- a/replay/replay-events.c
+++ b/replay/replay-events.c
@@ -11,7 +11,7 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "replay-internal.h"
#include "block/aio.h"
#include "ui/input.h"
@@ -92,15 +92,6 @@ void replay_flush_events(void)
}
}
-void replay_disable_events(void)
-{
- if (replay_mode != REPLAY_MODE_NONE) {
- events_enabled = false;
- /* Flush events queue before waiting of completion */
- replay_flush_events();
- }
-}
-
/*! Adds specified async event to the queue */
void replay_add_event(ReplayAsyncEventKind event_kind,
void *opaque,
diff --git a/replay/replay-input.c b/replay/replay-input.c
index bee3dbe..562bbf3 100644
--- a/replay/replay-input.c
+++ b/replay/replay-input.c
@@ -10,7 +10,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "replay-internal.h"
#include "qemu/notify.h"
#include "ui/input.h"
diff --git a/replay/replay-internal.c b/replay/replay-internal.c
index 13fcbdd..c2a7200 100644
--- a/replay/replay-internal.c
+++ b/replay/replay-internal.c
@@ -10,8 +10,8 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/replay.h"
-#include "sysemu/runstate.h"
+#include "system/replay.h"
+#include "system/runstate.h"
#include "replay-internal.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
diff --git a/replay/replay-net.c b/replay/replay-net.c
index 3b70f71..d4b197e 100644
--- a/replay/replay-net.c
+++ b/replay/replay-net.c
@@ -11,7 +11,7 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "replay-internal.h"
#include "net/net.h"
#include "net/filter.h"
diff --git a/replay/replay-random.c b/replay/replay-random.c
index afc7a0f..7f4c46f 100644
--- a/replay/replay-random.c
+++ b/replay/replay-random.c
@@ -11,7 +11,7 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "replay-internal.h"
void replay_save_random(int ret, void *buf, size_t len)
diff --git a/replay/replay-snapshot.c b/replay/replay-snapshot.c
index ccb4d89..3c0a894 100644
--- a/replay/replay-snapshot.c
+++ b/replay/replay-snapshot.c
@@ -11,10 +11,10 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "replay-internal.h"
#include "monitor/monitor.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qstring.h"
#include "qemu/error-report.h"
#include "migration/vmstate.h"
#include "migration/snapshot.h"
diff --git a/replay/replay-time.c b/replay/replay-time.c
index ee0ebfc..f3d62e1 100644
--- a/replay/replay-time.c
+++ b/replay/replay-time.c
@@ -10,7 +10,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "replay-internal.h"
#include "qemu/error-report.h"
diff --git a/replay/replay.c b/replay/replay.c
index a2c576c..a3e24c9 100644
--- a/replay/replay.c
+++ b/replay/replay.c
@@ -11,13 +11,13 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "sysemu/cpu-timers.h"
-#include "sysemu/replay.h"
-#include "sysemu/runstate.h"
+#include "exec/icount.h"
+#include "system/replay.h"
+#include "system/runstate.h"
#include "replay-internal.h"
#include "qemu/main-loop.h"
#include "qemu/option.h"
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
#include "qemu/error-report.h"
/* Current version of the replay mechanism.
@@ -385,6 +385,8 @@ static void replay_enable(const char *fname, int mode)
replay_fetch_data_kind();
}
+ runstate_replay_enable();
+
replay_init_events();
}
@@ -449,27 +451,6 @@ void replay_start(void)
replay_enable_events();
}
-/*
- * For none/record the answer is yes.
- */
-bool replay_can_wait(void)
-{
- if (replay_mode == REPLAY_MODE_PLAY) {
- /*
- * For playback we shouldn't ever be at a point we wait. If
- * the instruction count has reached zero and we have an
- * unconsumed event we should go around again and consume it.
- */
- if (replay_state.instruction_count == 0 && replay_state.has_unread_data) {
- return false;
- } else {
- replay_sync_error("Playback shouldn't have to iowait");
- }
- }
- return true;
-}
-
-
void replay_finish(void)
{
if (replay_mode == REPLAY_MODE_NONE) {
diff --git a/replay/stubs-system.c b/replay/stubs-system.c
index 50cefdb..8f2b2d3 100644
--- a/replay/stubs-system.c
+++ b/replay/stubs-system.c
@@ -1,5 +1,5 @@
#include "qemu/osdep.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "ui/input.h"
void replay_input_event(QemuConsole *src, InputEvent *evt)
diff --git a/roms/Makefile b/roms/Makefile
index dfed2b2..beff58d 100644
--- a/roms/Makefile
+++ b/roms/Makefile
@@ -34,6 +34,7 @@ find-cross-gcc = $(firstword $(wildcard $(patsubst %ld,%gcc,$(call find-cross-ld
# finally strip off path + toolname so we get the prefix
find-cross-prefix = $(subst gcc,,$(notdir $(call find-cross-gcc,$(1))))
+aarch64_cross_prefix := $(call find-cross-prefix,aarch64)
arm_cross_prefix := $(call find-cross-prefix,arm)
powerpc64_cross_prefix := $(call find-cross-prefix,powerpc64)
powerpc_cross_prefix := $(call find-cross-prefix,powerpc)
@@ -66,6 +67,7 @@ default help:
@echo " u-boot.e500 -- update u-boot.e500"
@echo " u-boot.sam460 -- update u-boot.sam460"
@echo " npcm7xx_bootrom -- update vbootrom for npcm7xx"
+ @echo " npcm8xx_bootrom -- update vbootrom for npcm8xx"
@echo " efi -- update UEFI (edk2) platform firmware"
@echo " opensbi32-generic -- update OpenSBI for 32-bit generic machine"
@echo " opensbi64-generic -- update OpenSBI for 64-bit generic machine"
@@ -157,6 +159,11 @@ edk2-version: edk2
touch $@; \
fi
+edk2-basetools: edk2-version
+ $(PYTHON) edk2-build.py --config edk2-build.config \
+ --silent --no-logs \
+ --match none # build only basetools
+
efi: edk2-version
$(PYTHON) edk2-build.py --config edk2-build.config \
--version-override "$(EDK2_STABLE)$(FIRMWARE_EXTRAVERSION)" \
@@ -189,6 +196,10 @@ npcm7xx_bootrom:
$(MAKE) -C vbootrom CROSS_COMPILE=$(arm_cross_prefix)
cp vbootrom/npcm7xx_bootrom.bin ../pc-bios/npcm7xx_bootrom.bin
+npcm8xx_bootrom:
+ $(MAKE) -C vbootrom CROSS_COMPILE=$(aarch64_cross_prefix)
+ cp vbootrom/npcm8xx_bootrom.bin ../pc-bios/npcm8xx_bootrom.bin
+
hppa-firmware:
$(MAKE) -C seabios-hppa parisc
cp seabios-hppa/out/hppa-firmware.img ../pc-bios/
diff --git a/roms/edk2 b/roms/edk2
-Subproject edc6681206c1a8791981a2f911d2fb8b3d2f576
+Subproject 4dfdca63a93497203f197ec98ba20e2327e4afe
diff --git a/roms/edk2-build.config b/roms/edk2-build.config
index cc9b211..9e45361 100644
--- a/roms/edk2-build.config
+++ b/roms/edk2-build.config
@@ -131,3 +131,16 @@ cpy1 = FV/RISCV_VIRT_CODE.fd edk2-riscv-code.fd
cpy2 = FV/RISCV_VIRT_VARS.fd edk2-riscv-vars.fd
pad1 = edk2-riscv-code.fd 32m
pad2 = edk2-riscv-vars.fd 32m
+
+####################################################################################
+# LoongArch64
+
+[build.loongarch64.qemu]
+conf = OvmfPkg/LoongArchVirt/LoongArchVirtQemu.dsc
+arch = LOONGARCH64
+plat = LoongArchVirtQemu
+dest = ../pc-bios
+cpy1 = FV/QEMU_EFI.fd edk2-loongarch64-code.fd
+pad1 = edk2-loongarch64-code.fd 16m
+cpy2 = FV/QEMU_VARS.fd edk2-loongarch64-vars.fd
+pad2 = edk2-loongarch64-vars.fd 16m
diff --git a/roms/edk2-version b/roms/edk2-version
index 1594ed8..069f19f 100644
--- a/roms/edk2-version
+++ b/roms/edk2-version
@@ -1,2 +1,2 @@
-EDK2_STABLE = edk2-stable202402
-EDK2_DATE = 02/14/2024
+EDK2_STABLE = edk2-stable202408
+EDK2_DATE = 08/13/2024
diff --git a/roms/openbios b/roms/openbios
-Subproject af97fd7af5e7c18f591a7b987291d3db4ffb28b
+Subproject c3a19c1e54977a53027d6232050e1e3e39a98a1
diff --git a/roms/opensbi b/roms/opensbi
-Subproject 455de672dd7c2aa1992df54dfb08dc11abbc1b1
+Subproject 43cace6c3671e5172d0df0a8963e552bb04b7b2
diff --git a/roms/seabios b/roms/seabios
-Subproject a6ed6b701f0a57db0569ab98b0661c12a6ec3ff
+Subproject b52ca86e094d19b58e2304417787e96b940e39c
diff --git a/roms/seabios-hppa b/roms/seabios-hppa
-Subproject 03774edaad3bfae090ac96ca5450353c641637d
+Subproject 3391c580960febcb9fa8f686f9666adaa462c34
diff --git a/roms/skiboot b/roms/skiboot
-Subproject 24a7eb35966d93455520bc2debdd7954314b638
+Subproject 785a5e3070a86e18521e62fe202b87209de30fa
diff --git a/roms/vbootrom b/roms/vbootrom
-Subproject 0c37a43527f0ee2b9584e7fb2fdc805e902635a
+Subproject 1287b6e42e839ba2ab0f06268c5b53ae60df353
diff --git a/rust/.gitignore b/rust/.gitignore
new file mode 100644
index 0000000..1bf71b1
--- /dev/null
+++ b/rust/.gitignore
@@ -0,0 +1,3 @@
+# Ignore any cargo development build artifacts; for qemu-wide builds, all build
+# artifacts will go to the meson build directory.
+target
diff --git a/rust/Cargo.lock b/rust/Cargo.lock
new file mode 100644
index 0000000..b785c71
--- /dev/null
+++ b/rust/Cargo.lock
@@ -0,0 +1,177 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "anyhow"
+version = "1.0.98"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
+
+[[package]]
+name = "arbitrary-int"
+version = "1.2.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c84fc003e338a6f69fbd4f7fe9f92b535ff13e9af8997f3b14b6ddff8b1df46d"
+
+[[package]]
+name = "bilge"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc707ed8ebf81de5cd6c7f48f54b4c8621760926cdf35a57000747c512e67b57"
+dependencies = [
+ "arbitrary-int",
+ "bilge-impl",
+]
+
+[[package]]
+name = "bilge-impl"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "feb11e002038ad243af39c2068c8a72bcf147acf05025dcdb916fcc000adb2d8"
+dependencies = [
+ "itertools",
+ "proc-macro-error",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "bits"
+version = "0.1.0"
+dependencies = [
+ "qemu_api_macros",
+]
+
+[[package]]
+name = "either"
+version = "1.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b"
+
+[[package]]
+name = "foreign"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "17ca1b5be8c9d320daf386f1809c7acc0cb09accbae795c2001953fa50585846"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "hpet"
+version = "0.1.0"
+dependencies = [
+ "qemu_api",
+ "qemu_api_macros",
+]
+
+[[package]]
+name = "itertools"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.162"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398"
+
+[[package]]
+name = "pl011"
+version = "0.1.0"
+dependencies = [
+ "bilge",
+ "bilge-impl",
+ "bits",
+ "qemu_api",
+ "qemu_api_macros",
+]
+
+[[package]]
+name = "proc-macro-error"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
+dependencies = [
+ "proc-macro-error-attr",
+ "proc-macro2",
+ "quote",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro-error-attr"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.84"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec96c6a92621310b51366f1e28d05ef11489516e93be030060e5fc12024a49d6"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "qemu_api"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "foreign",
+ "libc",
+ "qemu_api_macros",
+]
+
+[[package]]
+name = "qemu_api_macros"
+version = "0.1.0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.36"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.66"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
diff --git a/rust/Cargo.toml b/rust/Cargo.toml
new file mode 100644
index 0000000..0868e1b
--- /dev/null
+++ b/rust/Cargo.toml
@@ -0,0 +1,101 @@
+[workspace]
+resolver = "2"
+members = [
+ "bits",
+ "qemu-api-macros",
+ "qemu-api",
+ "hw/char/pl011",
+ "hw/timer/hpet",
+]
+
+[workspace.package]
+edition = "2021"
+homepage = "https://www.qemu.org"
+license = "GPL-2.0-or-later"
+repository = "https://gitlab.com/qemu-project/qemu/"
+rust-version = "1.77.0"
+
+[workspace.lints.rust]
+unexpected_cfgs = { level = "deny", check-cfg = [
+ 'cfg(MESON)', 'cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)',
+] }
+
+# Occasionally, we may need to silence warnings and clippy lints that
+# were only introduced in newer Rust compiler versions. Do not croak
+# in that case; a CI job with rust_strict_lints == true disables this
+# and ensures that we do not have misspelled allow() attributes.
+unknown_lints = "allow"
+
+# Prohibit code that is forbidden in Rust 2024
+unsafe_op_in_unsafe_fn = "deny"
+
+[workspace.lints.rustdoc]
+private_intra_doc_links = "allow"
+
+broken_intra_doc_links = "deny"
+invalid_html_tags = "deny"
+invalid_rust_codeblocks = "deny"
+bare_urls = "deny"
+unescaped_backticks = "deny"
+redundant_explicit_links = "deny"
+
+[workspace.lints.clippy]
+# default-warn lints
+result_unit_err = "allow"
+should_implement_trait = "deny"
+# can be for a reason, e.g. in callbacks
+unused_self = "allow"
+# common in device crates
+upper_case_acronyms = "allow"
+
+# default-allow lints
+as_ptr_cast_mut = "deny"
+as_underscore = "deny"
+assertions_on_result_states = "deny"
+bool_to_int_with_if = "deny"
+borrow_as_ptr = "deny"
+cast_lossless = "deny"
+dbg_macro = "deny"
+debug_assert_with_mut_call = "deny"
+derive_partial_eq_without_eq = "deny"
+doc_markdown = "deny"
+empty_structs_with_brackets = "deny"
+ignored_unit_patterns = "deny"
+implicit_clone = "deny"
+macro_use_imports = "deny"
+missing_safety_doc = "deny"
+mut_mut = "deny"
+needless_bitwise_bool = "deny"
+needless_pass_by_ref_mut = "deny"
+needless_update = "deny"
+no_effect_underscore_binding = "deny"
+option_option = "deny"
+or_fun_call = "deny"
+ptr_as_ptr = "deny"
+ptr_cast_constness = "deny"
+pub_underscore_fields = "deny"
+redundant_clone = "deny"
+redundant_closure_for_method_calls = "deny"
+redundant_else = "deny"
+redundant_pub_crate = "deny"
+ref_binding_to_reference = "deny"
+ref_option_ref = "deny"
+return_self_not_must_use = "deny"
+same_name_method = "deny"
+semicolon_inside_block = "deny"
+shadow_unrelated = "deny"
+significant_drop_in_scrutinee = "deny"
+significant_drop_tightening = "deny"
+suspicious_operation_groupings = "deny"
+transmute_ptr_to_ptr = "deny"
+transmute_undefined_repr = "deny"
+type_repetition_in_bounds = "deny"
+uninlined_format_args = "deny"
+used_underscore_binding = "deny"
+
+# nice to have, but cannot be enabled yet
+#wildcard_imports = "deny" # still have many bindings::* imports
+
+# these may have false positives
+#option_if_let_else = "deny"
+cognitive_complexity = "deny"
diff --git a/rust/Kconfig b/rust/Kconfig
new file mode 100644
index 0000000..f9f5c39
--- /dev/null
+++ b/rust/Kconfig
@@ -0,0 +1 @@
+source hw/Kconfig
diff --git a/rust/bits/Cargo.toml b/rust/bits/Cargo.toml
new file mode 100644
index 0000000..1ff38a4
--- /dev/null
+++ b/rust/bits/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+name = "bits"
+version = "0.1.0"
+authors = ["Paolo Bonzini <pbonzini@redhat.com>"]
+description = "const-friendly bit flags"
+resolver = "2"
+publish = false
+
+edition.workspace = true
+homepage.workspace = true
+license.workspace = true
+repository.workspace = true
+rust-version.workspace = true
+
+[dependencies]
+qemu_api_macros = { path = "../qemu-api-macros" }
+
+[lints]
+workspace = true
diff --git a/rust/bits/meson.build b/rust/bits/meson.build
new file mode 100644
index 0000000..2a41e13
--- /dev/null
+++ b/rust/bits/meson.build
@@ -0,0 +1,16 @@
+_bits_rs = static_library(
+ 'bits',
+ 'src/lib.rs',
+ override_options: ['rust_std=2021', 'build.rust_std=2021'],
+ rust_abi: 'rust',
+ dependencies: [qemu_api_macros],
+)
+
+bits_rs = declare_dependency(link_with: _bits_rs)
+
+rust.test('rust-bits-tests', _bits_rs,
+ suite: ['unit', 'rust'])
+
+rust.doctest('rust-bits-doctests', _bits_rs,
+ dependencies: bits_rs,
+ suite: ['doc', 'rust'])
diff --git a/rust/bits/src/lib.rs b/rust/bits/src/lib.rs
new file mode 100644
index 0000000..d485d6b
--- /dev/null
+++ b/rust/bits/src/lib.rs
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: MIT or Apache-2.0 or GPL-2.0-or-later
+
+/// # Definition entry point
+///
+/// Define a struct with a single field of type $type. Include public constants
+/// for each element listed in braces.
+///
+/// The unnamed element at the end, if present, can be used to enlarge the set
+/// of valid bits. Bits that are valid but not listed are treated normally for
+/// the purpose of arithmetic operations, and are printed with their hexadecimal
+/// value.
+///
+/// The struct implements the following traits: [`BitAnd`](std::ops::BitAnd),
+/// [`BitOr`](std::ops::BitOr), [`BitXor`](std::ops::BitXor),
+/// [`Not`](std::ops::Not), [`Sub`](std::ops::Sub); [`Debug`](std::fmt::Debug),
+/// [`Display`](std::fmt::Display), [`Binary`](std::fmt::Binary),
+/// [`Octal`](std::fmt::Octal), [`LowerHex`](std::fmt::LowerHex),
+/// [`UpperHex`](std::fmt::UpperHex); [`From`]`<type>`/[`Into`]`<type>` where
+/// type is the type specified in the definition.
+///
+/// ## Example
+///
+/// ```
+/// # use bits::bits;
+/// bits! {
+/// pub struct Colors(u8) {
+/// BLACK = 0,
+/// RED = 1,
+/// GREEN = 1 << 1,
+/// BLUE = 1 << 2,
+/// WHITE = (1 << 0) | (1 << 1) | (1 << 2),
+/// }
+/// }
+/// ```
+///
+/// ```
+/// # use bits::bits;
+/// # bits! { pub struct Colors(u8) { BLACK = 0, RED = 1, GREEN = 1 << 1, BLUE = 1 << 2, } }
+///
+/// bits! {
+/// pub struct Colors8(u8) {
+/// BLACK = 0,
+/// RED = 1,
+/// GREEN = 1 << 1,
+/// BLUE = 1 << 2,
+/// WHITE = (1 << 0) | (1 << 1) | (1 << 2),
+///
+/// _ = 255,
+/// }
+/// }
+///
+/// // The previously defined struct ignores bits not explicitly defined.
+/// assert_eq!(
+/// Colors::from(255).into_bits(),
+/// (Colors::RED | Colors::GREEN | Colors::BLUE).into_bits()
+/// );
+///
+/// // Adding "_ = 255" makes it retain other bits as well.
+/// assert_eq!(Colors8::from(255).into_bits(), 255);
+///
+/// // all() does not include the additional bits, valid_bits() does
+/// assert_eq!(Colors8::all().into_bits(), Colors::all().into_bits());
+/// assert_eq!(Colors8::valid_bits().into_bits(), 255);
+/// ```
+///
+/// # Evaluation entry point
+///
+/// Return a constant corresponding to the boolean expression `$expr`.
+/// Identifiers in the expression correspond to values defined for the
+/// type `$type`. Supported operators are `!` (unary), `-`, `&`, `^`, `|`.
+///
+/// ## Examples
+///
+/// ```
+/// # use bits::bits;
+/// bits! {
+/// pub struct Colors(u8) {
+/// BLACK = 0,
+/// RED = 1,
+/// GREEN = 1 << 1,
+/// BLUE = 1 << 2,
+/// // same as "WHITE = 7",
+/// WHITE = bits!(Self as u8: RED | GREEN | BLUE),
+/// }
+/// }
+///
+/// let rgb = bits! { Colors: RED | GREEN | BLUE };
+/// assert_eq!(rgb, Colors::WHITE);
+/// ```
+#[macro_export]
+macro_rules! bits {
+ {
+ $(#[$struct_meta:meta])*
+ $struct_vis:vis struct $struct_name:ident($field_vis:vis $type:ty) {
+ $($(#[$const_meta:meta])* $const:ident = $val:expr),+
+ $(,_ = $mask:expr)?
+ $(,)?
+ }
+ } => {
+ $(#[$struct_meta])*
+ #[derive(Clone, Copy, PartialEq, Eq)]
+ #[repr(transparent)]
+ $struct_vis struct $struct_name($field_vis $type);
+
+ impl $struct_name {
+ $( #[allow(dead_code)] $(#[$const_meta])*
+ pub const $const: $struct_name = $struct_name($val); )+
+
+ #[doc(hidden)]
+ const VALID__: $type = $( Self::$const.0 )|+ $(|$mask)?;
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub const fn empty() -> Self {
+ Self(0)
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub const fn all() -> Self {
+ Self($( Self::$const.0 )|+)
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub const fn valid_bits() -> Self {
+ Self(Self::VALID__)
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub const fn valid(val: $type) -> bool {
+ (val & !Self::VALID__) == 0
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub const fn any_set(self, mask: Self) -> bool {
+ (self.0 & mask.0) != 0
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub const fn all_set(self, mask: Self) -> bool {
+ (self.0 & mask.0) == mask.0
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub const fn none_set(self, mask: Self) -> bool {
+ (self.0 & mask.0) == 0
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub const fn from_bits(value: $type) -> Self {
+ $struct_name(value)
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub const fn into_bits(self) -> $type {
+ self.0
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub fn set(&mut self, rhs: Self) {
+ self.0 |= rhs.0;
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub fn clear(&mut self, rhs: Self) {
+ self.0 &= !rhs.0;
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub fn toggle(&mut self, rhs: Self) {
+ self.0 ^= rhs.0;
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub const fn intersection(self, rhs: Self) -> Self {
+ $struct_name(self.0 & rhs.0)
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub const fn difference(self, rhs: Self) -> Self {
+ $struct_name(self.0 & !rhs.0)
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub const fn symmetric_difference(self, rhs: Self) -> Self {
+ $struct_name(self.0 ^ rhs.0)
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub const fn union(self, rhs: Self) -> Self {
+ $struct_name(self.0 | rhs.0)
+ }
+
+ #[allow(dead_code)]
+ #[inline(always)]
+ pub const fn invert(self) -> Self {
+ $struct_name(self.0 ^ Self::VALID__)
+ }
+ }
+
+ impl ::std::fmt::Binary for $struct_name {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ // If no width, use the highest valid bit
+ let width = f.width().unwrap_or((Self::VALID__.ilog2() + 1) as usize);
+ write!(f, "{:0>width$.precision$b}", self.0,
+ width = width,
+ precision = f.precision().unwrap_or(width))
+ }
+ }
+
+ impl ::std::fmt::LowerHex for $struct_name {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ <$type as ::std::fmt::LowerHex>::fmt(&self.0, f)
+ }
+ }
+
+ impl ::std::fmt::Octal for $struct_name {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ <$type as ::std::fmt::Octal>::fmt(&self.0, f)
+ }
+ }
+
+ impl ::std::fmt::UpperHex for $struct_name {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ <$type as ::std::fmt::UpperHex>::fmt(&self.0, f)
+ }
+ }
+
+ impl ::std::fmt::Debug for $struct_name {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ write!(f, "{}({})", stringify!($struct_name), self)
+ }
+ }
+
+ impl ::std::fmt::Display for $struct_name {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ use ::std::fmt::Display;
+ let mut first = true;
+ let mut left = self.0;
+ $(if Self::$const.0.is_power_of_two() && (self & Self::$const).0 != 0 {
+ if first { first = false } else { Display::fmt(&'|', f)?; }
+ Display::fmt(stringify!($const), f)?;
+ left -= Self::$const.0;
+ })+
+ if first {
+ Display::fmt(&'0', f)
+ } else if left != 0 {
+ write!(f, "|{left:#x}")
+ } else {
+ Ok(())
+ }
+ }
+ }
+
+ impl ::std::cmp::PartialEq<$type> for $struct_name {
+ fn eq(&self, rhs: &$type) -> bool {
+ self.0 == *rhs
+ }
+ }
+
+ impl ::std::ops::BitAnd<$struct_name> for &$struct_name {
+ type Output = $struct_name;
+ fn bitand(self, rhs: $struct_name) -> Self::Output {
+ $struct_name(self.0 & rhs.0)
+ }
+ }
+
+ impl ::std::ops::BitAndAssign<$struct_name> for $struct_name {
+ fn bitand_assign(&mut self, rhs: $struct_name) {
+ self.0 = self.0 & rhs.0
+ }
+ }
+
+ impl ::std::ops::BitXor<$struct_name> for &$struct_name {
+ type Output = $struct_name;
+ fn bitxor(self, rhs: $struct_name) -> Self::Output {
+ $struct_name(self.0 ^ rhs.0)
+ }
+ }
+
+ impl ::std::ops::BitXorAssign<$struct_name> for $struct_name {
+ fn bitxor_assign(&mut self, rhs: $struct_name) {
+ self.0 = self.0 ^ rhs.0
+ }
+ }
+
+ impl ::std::ops::BitOr<$struct_name> for &$struct_name {
+ type Output = $struct_name;
+ fn bitor(self, rhs: $struct_name) -> Self::Output {
+ $struct_name(self.0 | rhs.0)
+ }
+ }
+
+ impl ::std::ops::BitOrAssign<$struct_name> for $struct_name {
+ fn bitor_assign(&mut self, rhs: $struct_name) {
+ self.0 = self.0 | rhs.0
+ }
+ }
+
+ impl ::std::ops::Sub<$struct_name> for &$struct_name {
+ type Output = $struct_name;
+ fn sub(self, rhs: $struct_name) -> Self::Output {
+ $struct_name(self.0 & !rhs.0)
+ }
+ }
+
+ impl ::std::ops::SubAssign<$struct_name> for $struct_name {
+ fn sub_assign(&mut self, rhs: $struct_name) {
+ self.0 = self.0 - rhs.0
+ }
+ }
+
+ impl ::std::ops::Not for &$struct_name {
+ type Output = $struct_name;
+ fn not(self) -> Self::Output {
+ $struct_name(self.0 ^ $struct_name::VALID__)
+ }
+ }
+
+ impl ::std::ops::BitAnd<$struct_name> for $struct_name {
+ type Output = Self;
+ fn bitand(self, rhs: Self) -> Self::Output {
+ $struct_name(self.0 & rhs.0)
+ }
+ }
+
+ impl ::std::ops::BitXor<$struct_name> for $struct_name {
+ type Output = Self;
+ fn bitxor(self, rhs: Self) -> Self::Output {
+ $struct_name(self.0 ^ rhs.0)
+ }
+ }
+
+ impl ::std::ops::BitOr<$struct_name> for $struct_name {
+ type Output = Self;
+ fn bitor(self, rhs: Self) -> Self::Output {
+ $struct_name(self.0 | rhs.0)
+ }
+ }
+
+ impl ::std::ops::Sub<$struct_name> for $struct_name {
+ type Output = Self;
+ fn sub(self, rhs: Self) -> Self::Output {
+ $struct_name(self.0 & !rhs.0)
+ }
+ }
+
+ impl ::std::ops::Not for $struct_name {
+ type Output = Self;
+ fn not(self) -> Self::Output {
+ $struct_name(self.0 ^ Self::VALID__)
+ }
+ }
+
+ impl From<$struct_name> for $type {
+ fn from(x: $struct_name) -> $type {
+ x.0
+ }
+ }
+
+ impl From<$type> for $struct_name {
+ fn from(x: $type) -> Self {
+ $struct_name(x & Self::VALID__)
+ }
+ }
+ };
+
+ { $type:ty: $expr:expr } => {
+ ::qemu_api_macros::bits_const_internal! { $type @ ($expr) }
+ };
+
+ { $type:ty as $int_type:ty: $expr:expr } => {
+ (::qemu_api_macros::bits_const_internal! { $type @ ($expr) }.into_bits()) as $int_type
+ };
+}
+
+#[cfg(test)]
+mod test {
+ bits! {
+ pub struct InterruptMask(u32) {
+ OE = 1 << 10,
+ BE = 1 << 9,
+ PE = 1 << 8,
+ FE = 1 << 7,
+ RT = 1 << 6,
+ TX = 1 << 5,
+ RX = 1 << 4,
+ DSR = 1 << 3,
+ DCD = 1 << 2,
+ CTS = 1 << 1,
+ RI = 1 << 0,
+
+ E = bits!(Self as u32: OE | BE | PE | FE),
+ MS = bits!(Self as u32: RI | DSR | DCD | CTS),
+ }
+ }
+
+ #[test]
+ pub fn test_not() {
+ assert_eq!(
+ !InterruptMask::from(InterruptMask::RT.0),
+ InterruptMask::E | InterruptMask::MS | InterruptMask::TX | InterruptMask::RX
+ );
+ }
+
+ #[test]
+ pub fn test_and() {
+ assert_eq!(
+ InterruptMask::from(0),
+ InterruptMask::MS & InterruptMask::OE
+ )
+ }
+
+ #[test]
+ pub fn test_or() {
+ assert_eq!(
+ InterruptMask::E,
+ InterruptMask::OE | InterruptMask::BE | InterruptMask::PE | InterruptMask::FE
+ );
+ }
+
+ #[test]
+ pub fn test_xor() {
+ assert_eq!(
+ InterruptMask::E ^ InterruptMask::BE,
+ InterruptMask::OE | InterruptMask::PE | InterruptMask::FE
+ );
+ }
+}
diff --git a/rust/hw/Kconfig b/rust/hw/Kconfig
new file mode 100644
index 0000000..36f92ec
--- /dev/null
+++ b/rust/hw/Kconfig
@@ -0,0 +1,3 @@
+# devices Kconfig
+source char/Kconfig
+source timer/Kconfig
diff --git a/rust/hw/char/Kconfig b/rust/hw/char/Kconfig
new file mode 100644
index 0000000..5fe800c
--- /dev/null
+++ b/rust/hw/char/Kconfig
@@ -0,0 +1,2 @@
+config X_PL011_RUST
+ bool
diff --git a/rust/hw/char/meson.build b/rust/hw/char/meson.build
new file mode 100644
index 0000000..5716dc4
--- /dev/null
+++ b/rust/hw/char/meson.build
@@ -0,0 +1 @@
+subdir('pl011')
diff --git a/rust/hw/char/pl011/Cargo.toml b/rust/hw/char/pl011/Cargo.toml
new file mode 100644
index 0000000..003ef96
--- /dev/null
+++ b/rust/hw/char/pl011/Cargo.toml
@@ -0,0 +1,26 @@
+[package]
+name = "pl011"
+version = "0.1.0"
+authors = ["Manos Pitsidianakis <manos.pitsidianakis@linaro.org>"]
+description = "pl011 device model for QEMU"
+resolver = "2"
+publish = false
+
+edition.workspace = true
+homepage.workspace = true
+license.workspace = true
+repository.workspace = true
+rust-version.workspace = true
+
+[lib]
+crate-type = ["staticlib"]
+
+[dependencies]
+bilge = { version = "0.2.0" }
+bilge-impl = { version = "0.2.0" }
+bits = { path = "../../../bits" }
+qemu_api = { path = "../../../qemu-api" }
+qemu_api_macros = { path = "../../../qemu-api-macros" }
+
+[lints]
+workspace = true
diff --git a/rust/hw/char/pl011/meson.build b/rust/hw/char/pl011/meson.build
new file mode 100644
index 0000000..2a1be32
--- /dev/null
+++ b/rust/hw/char/pl011/meson.build
@@ -0,0 +1,21 @@
+_libpl011_rs = static_library(
+ 'pl011',
+ files('src/lib.rs'),
+ override_options: ['rust_std=2021', 'build.rust_std=2021'],
+ rust_abi: 'rust',
+ dependencies: [
+ bilge_rs,
+ bilge_impl_rs,
+ bits_rs,
+ qemu_api,
+ qemu_api_macros,
+ ],
+)
+
+rust_devices_ss.add(when: 'CONFIG_X_PL011_RUST', if_true: [declare_dependency(
+ link_whole: [_libpl011_rs],
+ # Putting proc macro crates in `dependencies` is necessary for Meson to find
+ # them when compiling the root per-target static rust lib.
+ dependencies: [bilge_impl_rs, qemu_api_macros],
+ variables: {'crate': 'pl011'},
+)])
diff --git a/rust/hw/char/pl011/src/device.rs b/rust/hw/char/pl011/src/device.rs
new file mode 100644
index 0000000..5b53f26
--- /dev/null
+++ b/rust/hw/char/pl011/src/device.rs
@@ -0,0 +1,714 @@
+// Copyright 2024, Linaro Limited
+// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+use std::{ffi::CStr, mem::size_of};
+
+use qemu_api::{
+ chardev::{CharBackend, Chardev, Event},
+ impl_vmstate_forward,
+ irq::{IRQState, InterruptSource},
+ log::Log,
+ log_mask_ln,
+ memory::{hwaddr, MemoryRegion, MemoryRegionOps, MemoryRegionOpsBuilder},
+ prelude::*,
+ qdev::{Clock, ClockEvent, DeviceImpl, DeviceState, Property, ResetType, ResettablePhasesImpl},
+ qom::{ObjectImpl, Owned, ParentField, ParentInit},
+ static_assert,
+ sysbus::{SysBusDevice, SysBusDeviceImpl},
+ uninit_field_mut,
+ vmstate::VMStateDescription,
+};
+
+use crate::{
+ device_class,
+ registers::{self, Interrupt, RegisterOffset},
+};
+
+// TODO: You must disable the UART before any of the control registers are
+// reprogrammed. When the UART is disabled in the middle of transmission or
+// reception, it completes the current character before stopping
+
+/// Integer Baud Rate Divider, `UARTIBRD`
+const IBRD_MASK: u32 = 0xffff;
+
+/// Fractional Baud Rate Divider, `UARTFBRD`
+const FBRD_MASK: u32 = 0x3f;
+
+/// QEMU sourced constant.
+pub const PL011_FIFO_DEPTH: u32 = 16;
+
+#[derive(Clone, Copy)]
+struct DeviceId(&'static [u8; 8]);
+
+impl std::ops::Index<hwaddr> for DeviceId {
+ type Output = u8;
+
+ fn index(&self, idx: hwaddr) -> &Self::Output {
+ &self.0[idx as usize]
+ }
+}
+
+// FIFOs use 32-bit indices instead of usize, for compatibility with
+// the migration stream produced by the C version of this device.
+#[repr(transparent)]
+#[derive(Debug, Default)]
+pub struct Fifo([registers::Data; PL011_FIFO_DEPTH as usize]);
+impl_vmstate_forward!(Fifo);
+
+impl Fifo {
+ const fn len(&self) -> u32 {
+ self.0.len() as u32
+ }
+}
+
+impl std::ops::IndexMut<u32> for Fifo {
+ fn index_mut(&mut self, idx: u32) -> &mut Self::Output {
+ &mut self.0[idx as usize]
+ }
+}
+
+impl std::ops::Index<u32> for Fifo {
+ type Output = registers::Data;
+
+ fn index(&self, idx: u32) -> &Self::Output {
+ &self.0[idx as usize]
+ }
+}
+
+#[repr(C)]
+#[derive(Debug, Default)]
+pub struct PL011Registers {
+ #[doc(alias = "fr")]
+ pub flags: registers::Flags,
+ #[doc(alias = "lcr")]
+ pub line_control: registers::LineControl,
+ #[doc(alias = "rsr")]
+ pub receive_status_error_clear: registers::ReceiveStatusErrorClear,
+ #[doc(alias = "cr")]
+ pub control: registers::Control,
+ pub dmacr: u32,
+ pub int_enabled: Interrupt,
+ pub int_level: Interrupt,
+ pub read_fifo: Fifo,
+ pub ilpr: u32,
+ pub ibrd: u32,
+ pub fbrd: u32,
+ pub ifl: u32,
+ pub read_pos: u32,
+ pub read_count: u32,
+ pub read_trigger: u32,
+}
+
+#[repr(C)]
+#[derive(qemu_api_macros::Object)]
+/// PL011 Device Model in QEMU
+pub struct PL011State {
+ pub parent_obj: ParentField<SysBusDevice>,
+ pub iomem: MemoryRegion,
+ #[doc(alias = "chr")]
+ pub char_backend: CharBackend,
+ pub regs: BqlRefCell<PL011Registers>,
+ /// QEMU interrupts
+ ///
+ /// ```text
+ /// * sysbus MMIO region 0: device registers
+ /// * sysbus IRQ 0: `UARTINTR` (combined interrupt line)
+ /// * sysbus IRQ 1: `UARTRXINTR` (receive FIFO interrupt line)
+ /// * sysbus IRQ 2: `UARTTXINTR` (transmit FIFO interrupt line)
+ /// * sysbus IRQ 3: `UARTRTINTR` (receive timeout interrupt line)
+ /// * sysbus IRQ 4: `UARTMSINTR` (momem status interrupt line)
+ /// * sysbus IRQ 5: `UARTEINTR` (error interrupt line)
+ /// ```
+ #[doc(alias = "irq")]
+ pub interrupts: [InterruptSource; IRQMASK.len()],
+ #[doc(alias = "clk")]
+ pub clock: Owned<Clock>,
+ #[doc(alias = "migrate_clk")]
+ pub migrate_clock: bool,
+}
+
+// Some C users of this device embed its state struct into their own
+// structs, so the size of the Rust version must not be any larger
+// than the size of the C one. If this assert triggers you need to
+// expand the padding_for_rust[] array in the C PL011State struct.
+static_assert!(size_of::<PL011State>() <= size_of::<qemu_api::bindings::PL011State>());
+
+qom_isa!(PL011State : SysBusDevice, DeviceState, Object);
+
+#[repr(C)]
+pub struct PL011Class {
+ parent_class: <SysBusDevice as ObjectType>::Class,
+ /// The byte string that identifies the device.
+ device_id: DeviceId,
+}
+
+trait PL011Impl: SysBusDeviceImpl + IsA<PL011State> {
+ const DEVICE_ID: DeviceId;
+}
+
+impl PL011Class {
+ fn class_init<T: PL011Impl>(&mut self) {
+ self.device_id = T::DEVICE_ID;
+ self.parent_class.class_init::<T>();
+ }
+}
+
+unsafe impl ObjectType for PL011State {
+ type Class = PL011Class;
+ const TYPE_NAME: &'static CStr = crate::TYPE_PL011;
+}
+
+impl PL011Impl for PL011State {
+ const DEVICE_ID: DeviceId = DeviceId(&[0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1]);
+}
+
+impl ObjectImpl for PL011State {
+ type ParentType = SysBusDevice;
+
+ const INSTANCE_INIT: Option<unsafe fn(ParentInit<Self>)> = Some(Self::init);
+ const INSTANCE_POST_INIT: Option<fn(&Self)> = Some(Self::post_init);
+ const CLASS_INIT: fn(&mut Self::Class) = Self::Class::class_init::<Self>;
+}
+
+impl DeviceImpl for PL011State {
+ fn properties() -> &'static [Property] {
+ &device_class::PL011_PROPERTIES
+ }
+ fn vmsd() -> Option<&'static VMStateDescription> {
+ Some(&device_class::VMSTATE_PL011)
+ }
+ const REALIZE: Option<fn(&Self) -> qemu_api::Result<()>> = Some(Self::realize);
+}
+
+impl ResettablePhasesImpl for PL011State {
+ const HOLD: Option<fn(&Self, ResetType)> = Some(Self::reset_hold);
+}
+
+impl SysBusDeviceImpl for PL011State {}
+
+impl PL011Registers {
+ pub(self) fn read(&mut self, offset: RegisterOffset) -> (bool, u32) {
+ use RegisterOffset::*;
+
+ let mut update = false;
+ let result = match offset {
+ DR => self.read_data_register(&mut update),
+ RSR => u32::from(self.receive_status_error_clear),
+ FR => u32::from(self.flags),
+ FBRD => self.fbrd,
+ ILPR => self.ilpr,
+ IBRD => self.ibrd,
+ LCR_H => u32::from(self.line_control),
+ CR => u32::from(self.control),
+ FLS => self.ifl,
+ IMSC => u32::from(self.int_enabled),
+ RIS => u32::from(self.int_level),
+ MIS => u32::from(self.int_level & self.int_enabled),
+ ICR => {
+ // "The UARTICR Register is the interrupt clear register and is write-only"
+ // Source: ARM DDI 0183G 3.3.13 Interrupt Clear Register, UARTICR
+ 0
+ }
+ DMACR => self.dmacr,
+ };
+ (update, result)
+ }
+
+ pub(self) fn write(
+ &mut self,
+ offset: RegisterOffset,
+ value: u32,
+ char_backend: &CharBackend,
+ ) -> bool {
+ // eprintln!("write offset {offset} value {value}");
+ use RegisterOffset::*;
+ match offset {
+ DR => return self.write_data_register(value),
+ RSR => {
+ self.receive_status_error_clear = 0.into();
+ }
+ FR => {
+ // flag writes are ignored
+ }
+ ILPR => {
+ self.ilpr = value;
+ }
+ IBRD => {
+ self.ibrd = value;
+ }
+ FBRD => {
+ self.fbrd = value;
+ }
+ LCR_H => {
+ let new_val: registers::LineControl = value.into();
+ // Reset the FIFO state on FIFO enable or disable
+ if self.line_control.fifos_enabled() != new_val.fifos_enabled() {
+ self.reset_rx_fifo();
+ self.reset_tx_fifo();
+ }
+ let update = (self.line_control.send_break() != new_val.send_break()) && {
+ let break_enable = new_val.send_break();
+ let _ = char_backend.send_break(break_enable);
+ self.loopback_break(break_enable)
+ };
+ self.line_control = new_val;
+ self.set_read_trigger();
+ return update;
+ }
+ CR => {
+ // ??? Need to implement the enable bit.
+ self.control = value.into();
+ return self.loopback_mdmctrl();
+ }
+ FLS => {
+ self.ifl = value;
+ self.set_read_trigger();
+ }
+ IMSC => {
+ self.int_enabled = Interrupt::from(value);
+ return true;
+ }
+ RIS => {}
+ MIS => {}
+ ICR => {
+ self.int_level &= !Interrupt::from(value);
+ return true;
+ }
+ DMACR => {
+ self.dmacr = value;
+ if value & 3 > 0 {
+ log_mask_ln!(Log::Unimp, "pl011: DMA not implemented");
+ }
+ }
+ }
+ false
+ }
+
+ fn read_data_register(&mut self, update: &mut bool) -> u32 {
+ self.flags.set_receive_fifo_full(false);
+ let c = self.read_fifo[self.read_pos];
+
+ if self.read_count > 0 {
+ self.read_count -= 1;
+ self.read_pos = (self.read_pos + 1) & (self.fifo_depth() - 1);
+ }
+ if self.read_count == 0 {
+ self.flags.set_receive_fifo_empty(true);
+ }
+ if self.read_count + 1 == self.read_trigger {
+ self.int_level &= !Interrupt::RX;
+ }
+ self.receive_status_error_clear.set_from_data(c);
+ *update = true;
+ u32::from(c)
+ }
+
+ fn write_data_register(&mut self, value: u32) -> bool {
+ if !self.control.enable_uart() {
+ log_mask_ln!(Log::GuestError, "PL011 data written to disabled UART");
+ }
+ if !self.control.enable_transmit() {
+ log_mask_ln!(Log::GuestError, "PL011 data written to disabled TX UART");
+ }
+ // interrupts always checked
+ let _ = self.loopback_tx(value.into());
+ self.int_level |= Interrupt::TX;
+ true
+ }
+
+ #[inline]
+ #[must_use]
+ fn loopback_tx(&mut self, value: registers::Data) -> bool {
+ // Caveat:
+ //
+ // In real hardware, TX loopback happens at the serial-bit level
+ // and then reassembled by the RX logics back into bytes and placed
+ // into the RX fifo. That is, loopback happens after TX fifo.
+ //
+ // Because the real hardware TX fifo is time-drained at the frame
+ // rate governed by the configured serial format, some loopback
+ // bytes in TX fifo may still be able to get into the RX fifo
+ // that could be full at times while being drained at software
+ // pace.
+ //
+ // In such scenario, the RX draining pace is the major factor
+ // deciding which loopback bytes get into the RX fifo, unless
+ // hardware flow-control is enabled.
+ //
+ // For simplicity, the above described is not emulated.
+ self.loopback_enabled() && self.fifo_rx_put(value)
+ }
+
+ #[must_use]
+ fn loopback_mdmctrl(&mut self) -> bool {
+ if !self.loopback_enabled() {
+ return false;
+ }
+
+ /*
+ * Loopback software-driven modem control outputs to modem status inputs:
+ * FR.RI <= CR.Out2
+ * FR.DCD <= CR.Out1
+ * FR.CTS <= CR.RTS
+ * FR.DSR <= CR.DTR
+ *
+ * The loopback happens immediately even if this call is triggered
+ * by setting only CR.LBE.
+ *
+ * CTS/RTS updates due to enabled hardware flow controls are not
+ * dealt with here.
+ */
+
+ self.flags.set_ring_indicator(self.control.out_2());
+ self.flags.set_data_carrier_detect(self.control.out_1());
+ self.flags.set_clear_to_send(self.control.request_to_send());
+ self.flags
+ .set_data_set_ready(self.control.data_transmit_ready());
+
+ // Change interrupts based on updated FR
+ let mut il = self.int_level;
+
+ il &= !Interrupt::MS;
+
+ if self.flags.data_set_ready() {
+ il |= Interrupt::DSR;
+ }
+ if self.flags.data_carrier_detect() {
+ il |= Interrupt::DCD;
+ }
+ if self.flags.clear_to_send() {
+ il |= Interrupt::CTS;
+ }
+ if self.flags.ring_indicator() {
+ il |= Interrupt::RI;
+ }
+ self.int_level = il;
+ true
+ }
+
+ fn loopback_break(&mut self, enable: bool) -> bool {
+ enable && self.loopback_tx(registers::Data::BREAK)
+ }
+
+ fn set_read_trigger(&mut self) {
+ self.read_trigger = 1;
+ }
+
+ pub fn reset(&mut self) {
+ self.line_control.reset();
+ self.receive_status_error_clear.reset();
+ self.dmacr = 0;
+ self.int_enabled = 0.into();
+ self.int_level = 0.into();
+ self.ilpr = 0;
+ self.ibrd = 0;
+ self.fbrd = 0;
+ self.read_trigger = 1;
+ self.ifl = 0x12;
+ self.control.reset();
+ self.flags.reset();
+ self.reset_rx_fifo();
+ self.reset_tx_fifo();
+ }
+
+ pub fn reset_rx_fifo(&mut self) {
+ self.read_count = 0;
+ self.read_pos = 0;
+
+ // Reset FIFO flags
+ self.flags.set_receive_fifo_full(false);
+ self.flags.set_receive_fifo_empty(true);
+ }
+
+ pub fn reset_tx_fifo(&mut self) {
+ // Reset FIFO flags
+ self.flags.set_transmit_fifo_full(false);
+ self.flags.set_transmit_fifo_empty(true);
+ }
+
+ #[inline]
+ pub fn fifo_enabled(&self) -> bool {
+ self.line_control.fifos_enabled() == registers::Mode::FIFO
+ }
+
+ #[inline]
+ pub fn loopback_enabled(&self) -> bool {
+ self.control.enable_loopback()
+ }
+
+ #[inline]
+ pub fn fifo_depth(&self) -> u32 {
+ // Note: FIFO depth is expected to be power-of-2
+ if self.fifo_enabled() {
+ return PL011_FIFO_DEPTH;
+ }
+ 1
+ }
+
+ #[must_use]
+ pub fn fifo_rx_put(&mut self, value: registers::Data) -> bool {
+ let depth = self.fifo_depth();
+ assert!(depth > 0);
+ let slot = (self.read_pos + self.read_count) & (depth - 1);
+ self.read_fifo[slot] = value;
+ self.read_count += 1;
+ self.flags.set_receive_fifo_empty(false);
+ if self.read_count == depth {
+ self.flags.set_receive_fifo_full(true);
+ }
+
+ if self.read_count == self.read_trigger {
+ self.int_level |= Interrupt::RX;
+ return true;
+ }
+ false
+ }
+
+ pub fn post_load(&mut self) -> Result<(), ()> {
+ /* Sanity-check input state */
+ if self.read_pos >= self.read_fifo.len() || self.read_count > self.read_fifo.len() {
+ return Err(());
+ }
+
+ if !self.fifo_enabled() && self.read_count > 0 && self.read_pos > 0 {
+ // Older versions of PL011 didn't ensure that the single
+ // character in the FIFO in FIFO-disabled mode is in
+ // element 0 of the array; convert to follow the current
+ // code's assumptions.
+ self.read_fifo[0] = self.read_fifo[self.read_pos];
+ self.read_pos = 0;
+ }
+
+ self.ibrd &= IBRD_MASK;
+ self.fbrd &= FBRD_MASK;
+
+ Ok(())
+ }
+}
+
+impl PL011State {
+ /// Initializes a pre-allocated, uninitialized instance of `PL011State`.
+ ///
+ /// # Safety
+ ///
+ /// `self` must point to a correctly sized and aligned location for the
+ /// `PL011State` type. It must not be called more than once on the same
+ /// location/instance. All its fields are expected to hold uninitialized
+ /// values with the sole exception of `parent_obj`.
+ unsafe fn init(mut this: ParentInit<Self>) {
+ static PL011_OPS: MemoryRegionOps<PL011State> = MemoryRegionOpsBuilder::<PL011State>::new()
+ .read(&PL011State::read)
+ .write(&PL011State::write)
+ .native_endian()
+ .impl_sizes(4, 4)
+ .build();
+
+ // SAFETY: this and this.iomem are guaranteed to be valid at this point
+ MemoryRegion::init_io(
+ &mut uninit_field_mut!(*this, iomem),
+ &PL011_OPS,
+ "pl011",
+ 0x1000,
+ );
+
+ uninit_field_mut!(*this, regs).write(Default::default());
+
+ let clock = DeviceState::init_clock_in(
+ &mut this,
+ "clk",
+ &Self::clock_update,
+ ClockEvent::ClockUpdate,
+ );
+ uninit_field_mut!(*this, clock).write(clock);
+ }
+
+ const fn clock_update(&self, _event: ClockEvent) {
+ /* pl011_trace_baudrate_change(s); */
+ }
+
+ fn post_init(&self) {
+ self.init_mmio(&self.iomem);
+ for irq in self.interrupts.iter() {
+ self.init_irq(irq);
+ }
+ }
+
+ fn read(&self, offset: hwaddr, _size: u32) -> u64 {
+ match RegisterOffset::try_from(offset) {
+ Err(v) if (0x3f8..0x400).contains(&(v >> 2)) => {
+ let device_id = self.get_class().device_id;
+ u64::from(device_id[(offset - 0xfe0) >> 2])
+ }
+ Err(_) => {
+ log_mask_ln!(Log::GuestError, "PL011State::read: Bad offset {offset}");
+ 0
+ }
+ Ok(field) => {
+ let (update_irq, result) = self.regs.borrow_mut().read(field);
+ if update_irq {
+ self.update();
+ self.char_backend.accept_input();
+ }
+ result.into()
+ }
+ }
+ }
+
+ fn write(&self, offset: hwaddr, value: u64, _size: u32) {
+ let mut update_irq = false;
+ if let Ok(field) = RegisterOffset::try_from(offset) {
+ // qemu_chr_fe_write_all() calls into the can_receive
+ // callback, so handle writes before entering PL011Registers.
+ if field == RegisterOffset::DR {
+ // ??? Check if transmitter is enabled.
+ let ch: [u8; 1] = [value as u8];
+ // XXX this blocks entire thread. Rewrite to use
+ // qemu_chr_fe_write and background I/O callbacks
+ let _ = self.char_backend.write_all(&ch);
+ }
+
+ update_irq = self
+ .regs
+ .borrow_mut()
+ .write(field, value as u32, &self.char_backend);
+ } else {
+ log_mask_ln!(
+ Log::GuestError,
+ "PL011State::write: Bad offset {offset} value {value}"
+ );
+ }
+ if update_irq {
+ self.update();
+ }
+ }
+
+ fn can_receive(&self) -> u32 {
+ let regs = self.regs.borrow();
+ // trace_pl011_can_receive(s->lcr, s->read_count, r);
+ regs.fifo_depth() - regs.read_count
+ }
+
+ fn receive(&self, buf: &[u8]) {
+ let mut regs = self.regs.borrow_mut();
+ if regs.loopback_enabled() {
+ // In loopback mode, the RX input signal is internally disconnected
+ // from the entire receiving logics; thus, all inputs are ignored,
+ // and BREAK detection on RX input signal is also not performed.
+ return;
+ }
+
+ let mut update_irq = false;
+ for &c in buf {
+ let c: u32 = c.into();
+ update_irq |= regs.fifo_rx_put(c.into());
+ }
+
+ // Release the BqlRefCell before calling self.update()
+ drop(regs);
+ if update_irq {
+ self.update();
+ }
+ }
+
+ fn event(&self, event: Event) {
+ let mut update_irq = false;
+ let mut regs = self.regs.borrow_mut();
+ if event == Event::CHR_EVENT_BREAK && !regs.loopback_enabled() {
+ update_irq = regs.fifo_rx_put(registers::Data::BREAK);
+ }
+ // Release the BqlRefCell before calling self.update()
+ drop(regs);
+
+ if update_irq {
+ self.update()
+ }
+ }
+
+ fn realize(&self) -> qemu_api::Result<()> {
+ self.char_backend
+ .enable_handlers(self, Self::can_receive, Self::receive, Self::event);
+ Ok(())
+ }
+
+ fn reset_hold(&self, _type: ResetType) {
+ self.regs.borrow_mut().reset();
+ }
+
+ fn update(&self) {
+ let regs = self.regs.borrow();
+ let flags = regs.int_level & regs.int_enabled;
+ for (irq, i) in self.interrupts.iter().zip(IRQMASK) {
+ irq.set(flags.any_set(i));
+ }
+ }
+
+ pub fn post_load(&self, _version_id: u32) -> Result<(), ()> {
+ self.regs.borrow_mut().post_load()
+ }
+}
+
+/// Which bits in the interrupt status matter for each outbound IRQ line ?
+const IRQMASK: [Interrupt; 6] = [
+ Interrupt::all(),
+ Interrupt::RX,
+ Interrupt::TX,
+ Interrupt::RT,
+ Interrupt::MS,
+ Interrupt::E,
+];
+
+/// # Safety
+///
+/// We expect the FFI user of this function to pass a valid pointer for `chr`
+/// and `irq`.
+#[no_mangle]
+pub unsafe extern "C" fn pl011_create(
+ addr: u64,
+ irq: *mut IRQState,
+ chr: *mut Chardev,
+) -> *mut DeviceState {
+ // SAFETY: The callers promise that they have owned references.
+ // They do not gift them to pl011_create, so use `Owned::from`.
+ let irq = unsafe { Owned::<IRQState>::from(&*irq) };
+
+ let dev = PL011State::new();
+ if !chr.is_null() {
+ let chr = unsafe { Owned::<Chardev>::from(&*chr) };
+ dev.prop_set_chr("chardev", &chr);
+ }
+ dev.sysbus_realize();
+ dev.mmio_map(0, addr);
+ dev.connect_irq(0, &irq);
+
+ // The pointer is kept alive by the QOM tree; drop the owned ref
+ dev.as_mut_ptr()
+}
+
+#[repr(C)]
+#[derive(qemu_api_macros::Object)]
+/// PL011 Luminary device model.
+pub struct PL011Luminary {
+ parent_obj: ParentField<PL011State>,
+}
+
+qom_isa!(PL011Luminary : PL011State, SysBusDevice, DeviceState, Object);
+
+unsafe impl ObjectType for PL011Luminary {
+ type Class = <PL011State as ObjectType>::Class;
+ const TYPE_NAME: &'static CStr = crate::TYPE_PL011_LUMINARY;
+}
+
+impl ObjectImpl for PL011Luminary {
+ type ParentType = PL011State;
+
+ const CLASS_INIT: fn(&mut Self::Class) = Self::Class::class_init::<Self>;
+}
+
+impl PL011Impl for PL011Luminary {
+ const DEVICE_ID: DeviceId = DeviceId(&[0x11, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1]);
+}
+
+impl DeviceImpl for PL011Luminary {}
+impl ResettablePhasesImpl for PL011Luminary {}
+impl SysBusDeviceImpl for PL011Luminary {}
diff --git a/rust/hw/char/pl011/src/device_class.rs b/rust/hw/char/pl011/src/device_class.rs
new file mode 100644
index 0000000..d328d84
--- /dev/null
+++ b/rust/hw/char/pl011/src/device_class.rs
@@ -0,0 +1,103 @@
+// Copyright 2024, Linaro Limited
+// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+use std::{
+ ffi::{c_int, c_void},
+ ptr::NonNull,
+};
+
+use qemu_api::{
+ bindings::{qdev_prop_bool, qdev_prop_chr},
+ prelude::*,
+ vmstate::VMStateDescription,
+ vmstate_clock, vmstate_fields, vmstate_of, vmstate_struct, vmstate_subsections, vmstate_unused,
+ zeroable::Zeroable,
+};
+
+use crate::device::{PL011Registers, PL011State};
+
+extern "C" fn pl011_clock_needed(opaque: *mut c_void) -> bool {
+ let state = NonNull::new(opaque).unwrap().cast::<PL011State>();
+ unsafe { state.as_ref().migrate_clock }
+}
+
+/// Migration subsection for [`PL011State`] clock.
+static VMSTATE_PL011_CLOCK: VMStateDescription = VMStateDescription {
+ name: c"pl011/clock".as_ptr(),
+ version_id: 1,
+ minimum_version_id: 1,
+ needed: Some(pl011_clock_needed),
+ fields: vmstate_fields! {
+ vmstate_clock!(PL011State, clock),
+ },
+ ..Zeroable::ZERO
+};
+
+extern "C" fn pl011_post_load(opaque: *mut c_void, version_id: c_int) -> c_int {
+ let state = NonNull::new(opaque).unwrap().cast::<PL011State>();
+ let result = unsafe { state.as_ref().post_load(version_id as u32) };
+ if result.is_err() {
+ -1
+ } else {
+ 0
+ }
+}
+
+static VMSTATE_PL011_REGS: VMStateDescription = VMStateDescription {
+ name: c"pl011/regs".as_ptr(),
+ version_id: 2,
+ minimum_version_id: 2,
+ fields: vmstate_fields! {
+ vmstate_of!(PL011Registers, flags),
+ vmstate_of!(PL011Registers, line_control),
+ vmstate_of!(PL011Registers, receive_status_error_clear),
+ vmstate_of!(PL011Registers, control),
+ vmstate_of!(PL011Registers, dmacr),
+ vmstate_of!(PL011Registers, int_enabled),
+ vmstate_of!(PL011Registers, int_level),
+ vmstate_of!(PL011Registers, read_fifo),
+ vmstate_of!(PL011Registers, ilpr),
+ vmstate_of!(PL011Registers, ibrd),
+ vmstate_of!(PL011Registers, fbrd),
+ vmstate_of!(PL011Registers, ifl),
+ vmstate_of!(PL011Registers, read_pos),
+ vmstate_of!(PL011Registers, read_count),
+ vmstate_of!(PL011Registers, read_trigger),
+ },
+ ..Zeroable::ZERO
+};
+
+pub static VMSTATE_PL011: VMStateDescription = VMStateDescription {
+ name: c"pl011".as_ptr(),
+ version_id: 2,
+ minimum_version_id: 2,
+ post_load: Some(pl011_post_load),
+ fields: vmstate_fields! {
+ vmstate_unused!(core::mem::size_of::<u32>()),
+ vmstate_struct!(PL011State, regs, &VMSTATE_PL011_REGS, BqlRefCell<PL011Registers>),
+ },
+ subsections: vmstate_subsections! {
+ VMSTATE_PL011_CLOCK
+ },
+ ..Zeroable::ZERO
+};
+
+qemu_api::declare_properties! {
+ PL011_PROPERTIES,
+ qemu_api::define_property!(
+ c"chardev",
+ PL011State,
+ char_backend,
+ unsafe { &qdev_prop_chr },
+ CharBackend
+ ),
+ qemu_api::define_property!(
+ c"migrate-clk",
+ PL011State,
+ migrate_clock,
+ unsafe { &qdev_prop_bool },
+ bool,
+ default = true
+ ),
+}
diff --git a/rust/hw/char/pl011/src/lib.rs b/rust/hw/char/pl011/src/lib.rs
new file mode 100644
index 0000000..5c4fbc9
--- /dev/null
+++ b/rust/hw/char/pl011/src/lib.rs
@@ -0,0 +1,22 @@
+// Copyright 2024, Linaro Limited
+// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! PL011 QEMU Device Model
+//!
+//! This library implements a device model for the PrimeCellĀ® UART (PL011)
+//! device in QEMU.
+//!
+//! # Library crate
+//!
+//! See [`PL011State`](crate::device::PL011State) for the device model type and
+//! the [`registers`] module for register types.
+
+mod device;
+mod device_class;
+mod registers;
+
+pub use device::pl011_create;
+
+pub const TYPE_PL011: &::std::ffi::CStr = c"pl011";
+pub const TYPE_PL011_LUMINARY: &::std::ffi::CStr = c"pl011_luminary";
diff --git a/rust/hw/char/pl011/src/registers.rs b/rust/hw/char/pl011/src/registers.rs
new file mode 100644
index 0000000..7ececd3
--- /dev/null
+++ b/rust/hw/char/pl011/src/registers.rs
@@ -0,0 +1,350 @@
+// Copyright 2024, Linaro Limited
+// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! Device registers exposed as typed structs which are backed by arbitrary
+//! integer bitmaps. [`Data`], [`Control`], [`LineControl`], etc.
+
+// For more detail see the PL011 Technical Reference Manual DDI0183:
+// https://developer.arm.com/documentation/ddi0183/latest/
+
+use bilge::prelude::*;
+use bits::bits;
+use qemu_api::{impl_vmstate_bitsized, impl_vmstate_forward};
+
+/// Offset of each register from the base memory address of the device.
+#[doc(alias = "offset")]
+#[allow(non_camel_case_types)]
+#[repr(u64)]
+#[derive(Debug, Eq, PartialEq, qemu_api_macros::TryInto)]
+pub enum RegisterOffset {
+ /// Data Register
+ ///
+ /// A write to this register initiates the actual data transmission
+ #[doc(alias = "UARTDR")]
+ DR = 0x000,
+ /// Receive Status Register or Error Clear Register
+ #[doc(alias = "UARTRSR")]
+ #[doc(alias = "UARTECR")]
+ RSR = 0x004,
+ /// Flag Register
+ ///
+ /// A read of this register shows if transmission is complete
+ #[doc(alias = "UARTFR")]
+ FR = 0x018,
+ /// Fractional Baud Rate Register
+ ///
+ /// responsible for baud rate speed
+ #[doc(alias = "UARTFBRD")]
+ FBRD = 0x028,
+ /// `IrDA` Low-Power Counter Register
+ #[doc(alias = "UARTILPR")]
+ ILPR = 0x020,
+ /// Integer Baud Rate Register
+ ///
+ /// Responsible for baud rate speed
+ #[doc(alias = "UARTIBRD")]
+ IBRD = 0x024,
+ /// line control register (data frame format)
+ #[doc(alias = "UARTLCR_H")]
+ LCR_H = 0x02C,
+ /// Toggle UART, transmission or reception
+ #[doc(alias = "UARTCR")]
+ CR = 0x030,
+ /// Interrupt FIFO Level Select Register
+ #[doc(alias = "UARTIFLS")]
+ FLS = 0x034,
+ /// Interrupt Mask Set/Clear Register
+ #[doc(alias = "UARTIMSC")]
+ IMSC = 0x038,
+ /// Raw Interrupt Status Register
+ #[doc(alias = "UARTRIS")]
+ RIS = 0x03C,
+ /// Masked Interrupt Status Register
+ #[doc(alias = "UARTMIS")]
+ MIS = 0x040,
+ /// Interrupt Clear Register
+ #[doc(alias = "UARTICR")]
+ ICR = 0x044,
+ /// DMA control Register
+ #[doc(alias = "UARTDMACR")]
+ DMACR = 0x048,
+ ///// Reserved, offsets `0x04C` to `0x07C`.
+ //Reserved = 0x04C,
+}
+
+/// Receive Status Register / Data Register common error bits
+///
+/// The `UARTRSR` register is updated only when a read occurs
+/// from the `UARTDR` register with the same status information
+/// that can also be obtained by reading the `UARTDR` register
+#[bitsize(8)]
+#[derive(Clone, Copy, Default, DebugBits, FromBits)]
+pub struct Errors {
+ pub framing_error: bool,
+ pub parity_error: bool,
+ pub break_error: bool,
+ pub overrun_error: bool,
+ _reserved_unpredictable: u4,
+}
+
+/// Data Register, `UARTDR`
+///
+/// The `UARTDR` register is the data register; write for TX and
+/// read for RX. It is a 12-bit register, where bits 7..0 are the
+/// character and bits 11..8 are error bits.
+#[bitsize(32)]
+#[derive(Clone, Copy, Default, DebugBits, FromBits)]
+#[doc(alias = "UARTDR")]
+pub struct Data {
+ pub data: u8,
+ pub errors: Errors,
+ _reserved: u16,
+}
+impl_vmstate_bitsized!(Data);
+
+impl Data {
+ // bilge is not very const-friendly, unfortunately
+ pub const BREAK: Self = Self { value: 1 << 10 };
+}
+
+/// Receive Status Register / Error Clear Register, `UARTRSR/UARTECR`
+///
+/// This register provides a different way to read the four receive
+/// status error bits that can be found in bits 11..8 of the UARTDR
+/// on a read. It gets updated when the guest reads UARTDR, and the
+/// status bits correspond to that character that was just read.
+///
+/// The TRM confusingly describes this offset as UARTRSR for reads
+/// and UARTECR for writes, but really it's a single error status
+/// register where writing anything to the register clears the error
+/// bits.
+#[bitsize(32)]
+#[derive(Clone, Copy, DebugBits, FromBits)]
+pub struct ReceiveStatusErrorClear {
+ pub errors: Errors,
+ _reserved_unpredictable: u24,
+}
+impl_vmstate_bitsized!(ReceiveStatusErrorClear);
+
+impl ReceiveStatusErrorClear {
+ pub fn set_from_data(&mut self, data: Data) {
+ self.set_errors(data.errors());
+ }
+
+ pub fn reset(&mut self) {
+ // All the bits are cleared to 0 on reset.
+ *self = Self::default();
+ }
+}
+
+impl Default for ReceiveStatusErrorClear {
+ fn default() -> Self {
+ 0.into()
+ }
+}
+
+#[bitsize(32)]
+#[derive(Clone, Copy, DebugBits, FromBits)]
+/// Flag Register, `UARTFR`
+///
+/// This has the usual inbound RS232 modem-control signals, plus flags
+/// for RX and TX FIFO fill levels and a BUSY flag.
+#[doc(alias = "UARTFR")]
+pub struct Flags {
+ /// CTS: Clear to send
+ pub clear_to_send: bool,
+ /// DSR: Data set ready
+ pub data_set_ready: bool,
+ /// DCD: Data carrier detect
+ pub data_carrier_detect: bool,
+ /// BUSY: UART busy. In real hardware, set while the UART is
+ /// busy transmitting data. QEMU's implementation never sets BUSY.
+ pub busy: bool,
+ /// RXFE: Receive FIFO empty
+ pub receive_fifo_empty: bool,
+ /// TXFF: Transmit FIFO full
+ pub transmit_fifo_full: bool,
+ /// RXFF: Receive FIFO full
+ pub receive_fifo_full: bool,
+ /// TXFE: Transmit FIFO empty
+ pub transmit_fifo_empty: bool,
+ /// RI: Ring indicator
+ pub ring_indicator: bool,
+ _reserved_zero_no_modify: u23,
+}
+impl_vmstate_bitsized!(Flags);
+
+impl Flags {
+ pub fn reset(&mut self) {
+ *self = Self::default();
+ }
+}
+
+impl Default for Flags {
+ fn default() -> Self {
+ let mut ret: Self = 0.into();
+ // After reset TXFF, RXFF, and BUSY are 0, and TXFE and RXFE are 1
+ ret.set_receive_fifo_empty(true);
+ ret.set_transmit_fifo_empty(true);
+ ret
+ }
+}
+
+#[bitsize(32)]
+#[derive(Clone, Copy, DebugBits, FromBits)]
+/// Line Control Register, `UARTLCR_H`
+#[doc(alias = "UARTLCR_H")]
+pub struct LineControl {
+ /// BRK: Send break
+ pub send_break: bool,
+ /// PEN: Parity enable
+ pub parity_enabled: bool,
+ /// EPS: Even parity select
+ pub parity: Parity,
+ /// STP2: Two stop bits select
+ pub two_stops_bits: bool,
+ /// FEN: Enable FIFOs
+ pub fifos_enabled: Mode,
+ /// WLEN: Word length in bits
+ /// b11 = 8 bits
+ /// b10 = 7 bits
+ /// b01 = 6 bits
+ /// b00 = 5 bits.
+ pub word_length: WordLength,
+ /// SPS Stick parity select
+ pub sticky_parity: bool,
+ /// 31:8 - Reserved, do not modify, read as zero.
+ _reserved_zero_no_modify: u24,
+}
+impl_vmstate_bitsized!(LineControl);
+
+impl LineControl {
+ pub fn reset(&mut self) {
+ // All the bits are cleared to 0 when reset.
+ *self = 0.into();
+ }
+}
+
+impl Default for LineControl {
+ fn default() -> Self {
+ 0.into()
+ }
+}
+
+#[bitsize(1)]
+#[derive(Clone, Copy, Debug, Eq, FromBits, PartialEq)]
+/// `EPS` "Even parity select", field of [Line Control
+/// register](LineControl).
+pub enum Parity {
+ Odd = 0,
+ Even = 1,
+}
+
+#[bitsize(1)]
+#[derive(Clone, Copy, Debug, Eq, FromBits, PartialEq)]
+/// `FEN` "Enable FIFOs" or Device mode, field of [Line Control
+/// register](LineControl).
+pub enum Mode {
+ /// 0 = FIFOs are disabled (character mode) that is, the FIFOs become
+ /// 1-byte-deep holding registers
+ Character = 0,
+ /// 1 = transmit and receive FIFO buffers are enabled (FIFO mode).
+ FIFO = 1,
+}
+
+#[bitsize(2)]
+#[derive(Clone, Copy, Debug, Eq, FromBits, PartialEq)]
+/// `WLEN` Word length, field of [Line Control register](LineControl).
+///
+/// These bits indicate the number of data bits transmitted or received in a
+/// frame as follows:
+pub enum WordLength {
+ /// b11 = 8 bits
+ _8Bits = 0b11,
+ /// b10 = 7 bits
+ _7Bits = 0b10,
+ /// b01 = 6 bits
+ _6Bits = 0b01,
+ /// b00 = 5 bits.
+ _5Bits = 0b00,
+}
+
+/// Control Register, `UARTCR`
+///
+/// The `UARTCR` register is the control register. It contains various
+/// enable bits, and the bits to write to set the usual outbound RS232
+/// modem control signals. All bits reset to 0 except TXE and RXE.
+#[bitsize(32)]
+#[doc(alias = "UARTCR")]
+#[derive(Clone, Copy, DebugBits, FromBits)]
+pub struct Control {
+ /// `UARTEN` UART enable: 0 = UART is disabled.
+ pub enable_uart: bool,
+ /// `SIREN` `SIR` enable: disable or enable IrDA SIR ENDEC.
+ /// QEMU does not model this.
+ pub enable_sir: bool,
+ /// `SIRLP` SIR low-power IrDA mode. QEMU does not model this.
+ pub sir_lowpower_irda_mode: u1,
+ /// Reserved, do not modify, read as zero.
+ _reserved_zero_no_modify: u4,
+ /// `LBE` Loopback enable: feed UART output back to the input
+ pub enable_loopback: bool,
+ /// `TXE` Transmit enable
+ pub enable_transmit: bool,
+ /// `RXE` Receive enable
+ pub enable_receive: bool,
+ /// `DTR` Data transmit ready
+ pub data_transmit_ready: bool,
+ /// `RTS` Request to send
+ pub request_to_send: bool,
+ /// `Out1` UART Out1 signal; can be used as DCD
+ pub out_1: bool,
+ /// `Out2` UART Out2 signal; can be used as RI
+ pub out_2: bool,
+ /// `RTSEn` RTS hardware flow control enable
+ pub rts_hardware_flow_control_enable: bool,
+ /// `CTSEn` CTS hardware flow control enable
+ pub cts_hardware_flow_control_enable: bool,
+ /// 31:16 - Reserved, do not modify, read as zero.
+ _reserved_zero_no_modify2: u16,
+}
+impl_vmstate_bitsized!(Control);
+
+impl Control {
+ pub fn reset(&mut self) {
+ *self = 0.into();
+ self.set_enable_receive(true);
+ self.set_enable_transmit(true);
+ }
+}
+
+impl Default for Control {
+ fn default() -> Self {
+ let mut ret: Self = 0.into();
+ ret.reset();
+ ret
+ }
+}
+
+bits! {
+ /// Interrupt status bits in UARTRIS, UARTMIS, UARTIMSC
+ #[derive(Default)]
+ pub struct Interrupt(u32) {
+ OE = 1 << 10,
+ BE = 1 << 9,
+ PE = 1 << 8,
+ FE = 1 << 7,
+ RT = 1 << 6,
+ TX = 1 << 5,
+ RX = 1 << 4,
+ DSR = 1 << 3,
+ DCD = 1 << 2,
+ CTS = 1 << 1,
+ RI = 1 << 0,
+
+ E = bits!(Self as u32: OE | BE | PE | FE),
+ MS = bits!(Self as u32: RI | DSR | DCD | CTS),
+ }
+}
+impl_vmstate_forward!(Interrupt);
diff --git a/rust/hw/meson.build b/rust/hw/meson.build
new file mode 100644
index 0000000..9749d4a
--- /dev/null
+++ b/rust/hw/meson.build
@@ -0,0 +1,2 @@
+subdir('char')
+subdir('timer')
diff --git a/rust/hw/timer/Kconfig b/rust/hw/timer/Kconfig
new file mode 100644
index 0000000..afd9803
--- /dev/null
+++ b/rust/hw/timer/Kconfig
@@ -0,0 +1,2 @@
+config X_HPET_RUST
+ bool
diff --git a/rust/hw/timer/hpet/Cargo.toml b/rust/hw/timer/hpet/Cargo.toml
new file mode 100644
index 0000000..6f07502
--- /dev/null
+++ b/rust/hw/timer/hpet/Cargo.toml
@@ -0,0 +1,21 @@
+[package]
+name = "hpet"
+version = "0.1.0"
+authors = ["Zhao Liu <zhao1.liu@intel.com>"]
+description = "IA-PC High Precision Event Timer emulation in Rust"
+
+edition.workspace = true
+homepage.workspace = true
+license.workspace = true
+repository.workspace = true
+rust-version.workspace = true
+
+[lib]
+crate-type = ["staticlib"]
+
+[dependencies]
+qemu_api = { path = "../../../qemu-api" }
+qemu_api_macros = { path = "../../../qemu-api-macros" }
+
+[lints]
+workspace = true
diff --git a/rust/hw/timer/hpet/meson.build b/rust/hw/timer/hpet/meson.build
new file mode 100644
index 0000000..c2d7c05
--- /dev/null
+++ b/rust/hw/timer/hpet/meson.build
@@ -0,0 +1,18 @@
+_libhpet_rs = static_library(
+ 'hpet',
+ files('src/lib.rs'),
+ override_options: ['rust_std=2021', 'build.rust_std=2021'],
+ rust_abi: 'rust',
+ dependencies: [
+ qemu_api,
+ qemu_api_macros,
+ ],
+)
+
+rust_devices_ss.add(when: 'CONFIG_X_HPET_RUST', if_true: [declare_dependency(
+ link_whole: [_libhpet_rs],
+ # Putting proc macro crates in `dependencies` is necessary for Meson to find
+ # them when compiling the root per-target static rust lib.
+ dependencies: [qemu_api_macros],
+ variables: {'crate': 'hpet'},
+)])
diff --git a/rust/hw/timer/hpet/src/device.rs b/rust/hw/timer/hpet/src/device.rs
new file mode 100644
index 0000000..acf7251
--- /dev/null
+++ b/rust/hw/timer/hpet/src/device.rs
@@ -0,0 +1,1050 @@
+// Copyright (C) 2024 Intel Corporation.
+// Author(s): Zhao Liu <zhao1.liu@intel.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+use std::{
+ ffi::{c_int, c_void, CStr},
+ mem::MaybeUninit,
+ pin::Pin,
+ ptr::{addr_of_mut, null_mut, NonNull},
+ slice::from_ref,
+};
+
+use qemu_api::{
+ bindings::{
+ address_space_memory, address_space_stl_le, qdev_prop_bit, qdev_prop_bool,
+ qdev_prop_uint32, qdev_prop_usize,
+ },
+ cell::{BqlCell, BqlRefCell},
+ irq::InterruptSource,
+ memory::{
+ hwaddr, MemoryRegion, MemoryRegionOps, MemoryRegionOpsBuilder, MEMTXATTRS_UNSPECIFIED,
+ },
+ prelude::*,
+ qdev::{DeviceImpl, DeviceState, Property, ResetType, ResettablePhasesImpl},
+ qom::{ObjectImpl, ObjectType, ParentField, ParentInit},
+ qom_isa,
+ sysbus::{SysBusDevice, SysBusDeviceImpl},
+ timer::{Timer, CLOCK_VIRTUAL, NANOSECONDS_PER_SECOND},
+ uninit_field_mut,
+ vmstate::VMStateDescription,
+ vmstate_fields, vmstate_of, vmstate_struct, vmstate_subsections, vmstate_validate,
+ zeroable::Zeroable,
+};
+
+use crate::fw_cfg::HPETFwConfig;
+
+/// Register space for each timer block (`HPET_BASE` is defined in hpet.h).
+const HPET_REG_SPACE_LEN: u64 = 0x400; // 1024 bytes
+
+/// Minimum recommended hardware implementation.
+const HPET_MIN_TIMERS: usize = 3;
+/// Maximum timers in each timer block.
+const HPET_MAX_TIMERS: usize = 32;
+
+/// Flags that HPETState.flags supports.
+const HPET_FLAG_MSI_SUPPORT_SHIFT: usize = 0;
+
+const HPET_NUM_IRQ_ROUTES: usize = 32;
+const HPET_LEGACY_PIT_INT: u32 = 0; // HPET_LEGACY_RTC_INT isn't defined here.
+const RTC_ISA_IRQ: usize = 8;
+
+const HPET_CLK_PERIOD: u64 = 10; // 10 ns
+const FS_PER_NS: u64 = 1000000; // 1000000 femtoseconds == 1 ns
+
+/// Revision ID (bits 0:7). Revision 1 is implemented (refer to v1.0a spec).
+const HPET_CAP_REV_ID_VALUE: u64 = 0x1;
+const HPET_CAP_REV_ID_SHIFT: usize = 0;
+/// Number of Timers (bits 8:12)
+const HPET_CAP_NUM_TIM_SHIFT: usize = 8;
+/// Counter Size (bit 13)
+const HPET_CAP_COUNT_SIZE_CAP_SHIFT: usize = 13;
+/// Legacy Replacement Route Capable (bit 15)
+const HPET_CAP_LEG_RT_CAP_SHIFT: usize = 15;
+/// Vendor ID (bits 16:31)
+const HPET_CAP_VENDER_ID_VALUE: u64 = 0x8086;
+const HPET_CAP_VENDER_ID_SHIFT: usize = 16;
+/// Main Counter Tick Period (bits 32:63)
+const HPET_CAP_CNT_CLK_PERIOD_SHIFT: usize = 32;
+
+/// Overall Enable (bit 0)
+const HPET_CFG_ENABLE_SHIFT: usize = 0;
+/// Legacy Replacement Route (bit 1)
+const HPET_CFG_LEG_RT_SHIFT: usize = 1;
+/// Other bits are reserved.
+const HPET_CFG_WRITE_MASK: u64 = 0x003;
+
+/// bit 0, 7, and bits 16:31 are reserved.
+/// bit 4, 5, 15, and bits 32:64 are read-only.
+const HPET_TN_CFG_WRITE_MASK: u64 = 0x7f4e;
+/// Timer N Interrupt Type (bit 1)
+const HPET_TN_CFG_INT_TYPE_SHIFT: usize = 1;
+/// Timer N Interrupt Enable (bit 2)
+const HPET_TN_CFG_INT_ENABLE_SHIFT: usize = 2;
+/// Timer N Type (Periodic enabled or not, bit 3)
+const HPET_TN_CFG_PERIODIC_SHIFT: usize = 3;
+/// Timer N Periodic Interrupt Capable (support Periodic or not, bit 4)
+const HPET_TN_CFG_PERIODIC_CAP_SHIFT: usize = 4;
+/// Timer N Size (timer size is 64-bits or 32 bits, bit 5)
+const HPET_TN_CFG_SIZE_CAP_SHIFT: usize = 5;
+/// Timer N Value Set (bit 6)
+const HPET_TN_CFG_SETVAL_SHIFT: usize = 6;
+/// Timer N 32-bit Mode (bit 8)
+const HPET_TN_CFG_32BIT_SHIFT: usize = 8;
+/// Timer N Interrupt Rout (bits 9:13)
+const HPET_TN_CFG_INT_ROUTE_MASK: u64 = 0x3e00;
+const HPET_TN_CFG_INT_ROUTE_SHIFT: usize = 9;
+/// Timer N FSB Interrupt Enable (bit 14)
+const HPET_TN_CFG_FSB_ENABLE_SHIFT: usize = 14;
+/// Timer N FSB Interrupt Delivery (bit 15)
+const HPET_TN_CFG_FSB_CAP_SHIFT: usize = 15;
+/// Timer N Interrupt Routing Capability (bits 32:63)
+const HPET_TN_CFG_INT_ROUTE_CAP_SHIFT: usize = 32;
+
+#[derive(qemu_api_macros::TryInto)]
+#[repr(u64)]
+#[allow(non_camel_case_types)]
+/// Timer registers, masked by 0x18
+enum TimerRegister {
+ /// Timer N Configuration and Capability Register
+ CFG = 0,
+ /// Timer N Comparator Value Register
+ CMP = 8,
+ /// Timer N FSB Interrupt Route Register
+ ROUTE = 16,
+}
+
+#[derive(qemu_api_macros::TryInto)]
+#[repr(u64)]
+#[allow(non_camel_case_types)]
+/// Global registers
+enum GlobalRegister {
+ /// General Capabilities and ID Register
+ CAP = 0,
+ /// General Configuration Register
+ CFG = 0x10,
+ /// General Interrupt Status Register
+ INT_STATUS = 0x20,
+ /// Main Counter Value Register
+ COUNTER = 0xF0,
+}
+
+enum HPETRegister<'a> {
+ /// Global register in the range from `0` to `0xff`
+ Global(GlobalRegister),
+
+ /// Register in the timer block `0x100`...`0x3ff`
+ Timer(&'a BqlRefCell<HPETTimer>, TimerRegister),
+
+ /// Invalid address
+ #[allow(dead_code)]
+ Unknown(hwaddr),
+}
+
+struct HPETAddrDecode<'a> {
+ shift: u32,
+ len: u32,
+ reg: HPETRegister<'a>,
+}
+
+const fn hpet_next_wrap(cur_tick: u64) -> u64 {
+ (cur_tick | 0xffffffff) + 1
+}
+
+const fn hpet_time_after(a: u64, b: u64) -> bool {
+ ((b - a) as i64) < 0
+}
+
+const fn ticks_to_ns(value: u64) -> u64 {
+ value * HPET_CLK_PERIOD
+}
+
+const fn ns_to_ticks(value: u64) -> u64 {
+ value / HPET_CLK_PERIOD
+}
+
+// Avoid touching the bits that cannot be written.
+const fn hpet_fixup_reg(new: u64, old: u64, mask: u64) -> u64 {
+ (new & mask) | (old & !mask)
+}
+
+const fn activating_bit(old: u64, new: u64, shift: usize) -> bool {
+ let mask: u64 = 1 << shift;
+ (old & mask == 0) && (new & mask != 0)
+}
+
+const fn deactivating_bit(old: u64, new: u64, shift: usize) -> bool {
+ let mask: u64 = 1 << shift;
+ (old & mask != 0) && (new & mask == 0)
+}
+
+fn timer_handler(timer_cell: &BqlRefCell<HPETTimer>) {
+ timer_cell.borrow_mut().callback()
+}
+
+/// HPET Timer Abstraction
+#[repr(C)]
+#[derive(Debug)]
+pub struct HPETTimer {
+ /// timer N index within the timer block (`HPETState`)
+ #[doc(alias = "tn")]
+ index: u8,
+ qemu_timer: Timer,
+ /// timer block abstraction containing this timer
+ state: NonNull<HPETState>,
+
+ // Memory-mapped, software visible timer registers
+ /// Timer N Configuration and Capability Register
+ config: u64,
+ /// Timer N Comparator Value Register
+ cmp: u64,
+ /// Timer N FSB Interrupt Route Register
+ fsb: u64,
+
+ // Hidden register state
+ /// comparator (extended to counter width)
+ cmp64: u64,
+ /// Last value written to comparator
+ period: u64,
+ /// timer pop will indicate wrap for one-shot 32-bit
+ /// mode. Next pop will be actual timer expiration.
+ wrap_flag: u8,
+ /// last value armed, to avoid timer storms
+ last: u64,
+}
+
+impl HPETTimer {
+ fn new(index: u8, state: *const HPETState) -> HPETTimer {
+ HPETTimer {
+ index,
+ // SAFETY: the HPETTimer will only be used after the timer
+ // is initialized below.
+ qemu_timer: unsafe { Timer::new() },
+ state: NonNull::new(state.cast_mut()).unwrap(),
+ config: 0,
+ cmp: 0,
+ fsb: 0,
+ cmp64: 0,
+ period: 0,
+ wrap_flag: 0,
+ last: 0,
+ }
+ }
+
+ fn init_timer_with_cell(cell: &BqlRefCell<Self>) {
+ let mut timer = cell.borrow_mut();
+ // SAFETY: HPETTimer is only used as part of HPETState, which is
+ // always pinned.
+ let qemu_timer = unsafe { Pin::new_unchecked(&mut timer.qemu_timer) };
+ qemu_timer.init_full(None, CLOCK_VIRTUAL, Timer::NS, 0, timer_handler, cell);
+ }
+
+ fn get_state(&self) -> &HPETState {
+ // SAFETY:
+ // the pointer is convertible to a reference
+ unsafe { self.state.as_ref() }
+ }
+
+ fn is_int_active(&self) -> bool {
+ self.get_state().is_timer_int_active(self.index.into())
+ }
+
+ const fn is_fsb_route_enabled(&self) -> bool {
+ self.config & (1 << HPET_TN_CFG_FSB_ENABLE_SHIFT) != 0
+ }
+
+ const fn is_periodic(&self) -> bool {
+ self.config & (1 << HPET_TN_CFG_PERIODIC_SHIFT) != 0
+ }
+
+ const fn is_int_enabled(&self) -> bool {
+ self.config & (1 << HPET_TN_CFG_INT_ENABLE_SHIFT) != 0
+ }
+
+ const fn is_32bit_mod(&self) -> bool {
+ self.config & (1 << HPET_TN_CFG_32BIT_SHIFT) != 0
+ }
+
+ const fn is_valset_enabled(&self) -> bool {
+ self.config & (1 << HPET_TN_CFG_SETVAL_SHIFT) != 0
+ }
+
+ fn clear_valset(&mut self) {
+ self.config &= !(1 << HPET_TN_CFG_SETVAL_SHIFT);
+ }
+
+ /// True if timer interrupt is level triggered; otherwise, edge triggered.
+ const fn is_int_level_triggered(&self) -> bool {
+ self.config & (1 << HPET_TN_CFG_INT_TYPE_SHIFT) != 0
+ }
+
+ /// calculate next value of the general counter that matches the
+ /// target (either entirely, or the low 32-bit only depending on
+ /// the timer mode).
+ fn calculate_cmp64(&self, cur_tick: u64, target: u64) -> u64 {
+ if self.is_32bit_mod() {
+ let mut result: u64 = cur_tick.deposit(0, 32, target);
+ if result < cur_tick {
+ result += 0x100000000;
+ }
+ result
+ } else {
+ target
+ }
+ }
+
+ const fn get_individual_route(&self) -> usize {
+ ((self.config & HPET_TN_CFG_INT_ROUTE_MASK) >> HPET_TN_CFG_INT_ROUTE_SHIFT) as usize
+ }
+
+ fn get_int_route(&self) -> usize {
+ if self.index <= 1 && self.get_state().is_legacy_mode() {
+ // If LegacyReplacement Route bit is set, HPET specification requires
+ // timer0 be routed to IRQ0 in NON-APIC or IRQ2 in the I/O APIC,
+ // timer1 be routed to IRQ8 in NON-APIC or IRQ8 in the I/O APIC.
+ //
+ // If the LegacyReplacement Route bit is set, the individual routing
+ // bits for timers 0 and 1 (APIC or FSB) will have no impact.
+ //
+ // FIXME: Consider I/O APIC case.
+ if self.index == 0 {
+ 0
+ } else {
+ RTC_ISA_IRQ
+ }
+ } else {
+ // (If the LegacyReplacement Route bit is set) Timer 2-n will be
+ // routed as per the routing in the timer n config registers.
+ // ...
+ // If the LegacyReplacement Route bit is not set, the individual
+ // routing bits for each of the timers are used.
+ self.get_individual_route()
+ }
+ }
+
+ fn set_irq(&mut self, set: bool) {
+ let route = self.get_int_route();
+
+ if set && self.is_int_enabled() && self.get_state().is_hpet_enabled() {
+ if self.is_fsb_route_enabled() {
+ // SAFETY:
+ // the parameters are valid.
+ unsafe {
+ address_space_stl_le(
+ addr_of_mut!(address_space_memory),
+ self.fsb >> 32, // Timer N FSB int addr
+ self.fsb as u32, // Timer N FSB int value, truncate!
+ MEMTXATTRS_UNSPECIFIED,
+ null_mut(),
+ );
+ }
+ } else if self.is_int_level_triggered() {
+ self.get_state().irqs[route].raise();
+ } else {
+ self.get_state().irqs[route].pulse();
+ }
+ } else if !self.is_fsb_route_enabled() {
+ self.get_state().irqs[route].lower();
+ }
+ }
+
+ fn update_irq(&mut self, set: bool) {
+ // If Timer N Interrupt Enable bit is 0, "the timer will
+ // still operate and generate appropriate status bits, but
+ // will not cause an interrupt"
+ self.get_state()
+ .update_int_status(self.index.into(), set && self.is_int_level_triggered());
+ self.set_irq(set);
+ }
+
+ fn arm_timer(&mut self, tick: u64) {
+ let mut ns = self.get_state().get_ns(tick);
+
+ // Clamp period to reasonable min value (1 us)
+ if self.is_periodic() && ns - self.last < 1000 {
+ ns = self.last + 1000;
+ }
+
+ self.last = ns;
+ self.qemu_timer.modify(self.last);
+ }
+
+ fn set_timer(&mut self) {
+ let cur_tick: u64 = self.get_state().get_ticks();
+
+ self.wrap_flag = 0;
+ self.cmp64 = self.calculate_cmp64(cur_tick, self.cmp);
+ if self.is_32bit_mod() {
+ // HPET spec says in one-shot 32-bit mode, generate an interrupt when
+ // counter wraps in addition to an interrupt with comparator match.
+ if !self.is_periodic() && self.cmp64 > hpet_next_wrap(cur_tick) {
+ self.wrap_flag = 1;
+ self.arm_timer(hpet_next_wrap(cur_tick));
+ return;
+ }
+ }
+ self.arm_timer(self.cmp64);
+ }
+
+ fn del_timer(&mut self) {
+ // Just remove the timer from the timer_list without destroying
+ // this timer instance.
+ self.qemu_timer.delete();
+
+ if self.is_int_active() {
+ // For level-triggered interrupt, this leaves interrupt status
+ // register set but lowers irq.
+ self.update_irq(true);
+ }
+ }
+
+ /// Configuration and Capability Register
+ fn set_tn_cfg_reg(&mut self, shift: u32, len: u32, val: u64) {
+ // TODO: Add trace point - trace_hpet_ram_write_tn_cfg(addr & 4)
+ let old_val: u64 = self.config;
+ let mut new_val: u64 = old_val.deposit(shift, len, val);
+ new_val = hpet_fixup_reg(new_val, old_val, HPET_TN_CFG_WRITE_MASK);
+
+ // Switch level-type interrupt to edge-type.
+ if deactivating_bit(old_val, new_val, HPET_TN_CFG_INT_TYPE_SHIFT) {
+ // Do this before changing timer.config; otherwise, if
+ // HPET_TN_FSB is set, update_irq will not lower the qemu_irq.
+ self.update_irq(false);
+ }
+
+ self.config = new_val;
+
+ if activating_bit(old_val, new_val, HPET_TN_CFG_INT_ENABLE_SHIFT) && self.is_int_active() {
+ self.update_irq(true);
+ }
+
+ if self.is_32bit_mod() {
+ self.cmp = u64::from(self.cmp as u32); // truncate!
+ self.period = u64::from(self.period as u32); // truncate!
+ }
+
+ if self.get_state().is_hpet_enabled() {
+ self.set_timer();
+ }
+ }
+
+ /// Comparator Value Register
+ fn set_tn_cmp_reg(&mut self, shift: u32, len: u32, val: u64) {
+ let mut length = len;
+ let mut value = val;
+
+ // TODO: Add trace point - trace_hpet_ram_write_tn_cmp(addr & 4)
+ if self.is_32bit_mod() {
+ // High 32-bits are zero, leave them untouched.
+ if shift != 0 {
+ // TODO: Add trace point - trace_hpet_ram_write_invalid_tn_cmp()
+ return;
+ }
+ length = 64;
+ value = u64::from(value as u32); // truncate!
+ }
+
+ if !self.is_periodic() || self.is_valset_enabled() {
+ self.cmp = self.cmp.deposit(shift, length, value);
+ }
+
+ if self.is_periodic() {
+ self.period = self.period.deposit(shift, length, value);
+ }
+
+ self.clear_valset();
+ if self.get_state().is_hpet_enabled() {
+ self.set_timer();
+ }
+ }
+
+ /// FSB Interrupt Route Register
+ fn set_tn_fsb_route_reg(&mut self, shift: u32, len: u32, val: u64) {
+ self.fsb = self.fsb.deposit(shift, len, val);
+ }
+
+ fn reset(&mut self) {
+ self.del_timer();
+ self.cmp = u64::MAX; // Comparator Match Registers reset to all 1's.
+ self.config = (1 << HPET_TN_CFG_PERIODIC_CAP_SHIFT) | (1 << HPET_TN_CFG_SIZE_CAP_SHIFT);
+ if self.get_state().has_msi_flag() {
+ self.config |= 1 << HPET_TN_CFG_FSB_CAP_SHIFT;
+ }
+ // advertise availability of ioapic int
+ self.config |=
+ (u64::from(self.get_state().int_route_cap)) << HPET_TN_CFG_INT_ROUTE_CAP_SHIFT;
+ self.period = 0;
+ self.wrap_flag = 0;
+ }
+
+ /// timer expiration callback
+ fn callback(&mut self) {
+ let period: u64 = self.period;
+ let cur_tick: u64 = self.get_state().get_ticks();
+
+ if self.is_periodic() && period != 0 {
+ while hpet_time_after(cur_tick, self.cmp64) {
+ self.cmp64 += period;
+ }
+ if self.is_32bit_mod() {
+ self.cmp = u64::from(self.cmp64 as u32); // truncate!
+ } else {
+ self.cmp = self.cmp64;
+ }
+ self.arm_timer(self.cmp64);
+ } else if self.wrap_flag != 0 {
+ self.wrap_flag = 0;
+ self.arm_timer(self.cmp64);
+ }
+ self.update_irq(true);
+ }
+
+ const fn read(&self, reg: TimerRegister) -> u64 {
+ use TimerRegister::*;
+ match reg {
+ CFG => self.config, // including interrupt capabilities
+ CMP => self.cmp, // comparator register
+ ROUTE => self.fsb,
+ }
+ }
+
+ fn write(&mut self, reg: TimerRegister, value: u64, shift: u32, len: u32) {
+ use TimerRegister::*;
+ match reg {
+ CFG => self.set_tn_cfg_reg(shift, len, value),
+ CMP => self.set_tn_cmp_reg(shift, len, value),
+ ROUTE => self.set_tn_fsb_route_reg(shift, len, value),
+ }
+ }
+}
+
+/// HPET Event Timer Block Abstraction
+#[repr(C)]
+#[derive(qemu_api_macros::Object)]
+pub struct HPETState {
+ parent_obj: ParentField<SysBusDevice>,
+ iomem: MemoryRegion,
+
+ // HPET block Registers: Memory-mapped, software visible registers
+ /// General Capabilities and ID Register
+ capability: BqlCell<u64>,
+ /// General Configuration Register
+ config: BqlCell<u64>,
+ /// General Interrupt Status Register
+ #[doc(alias = "isr")]
+ int_status: BqlCell<u64>,
+ /// Main Counter Value Register
+ #[doc(alias = "hpet_counter")]
+ counter: BqlCell<u64>,
+
+ // Internal state
+ /// Capabilities that QEMU HPET supports.
+ /// bit 0: MSI (or FSB) support.
+ flags: u32,
+
+ /// Offset of main counter relative to qemu clock.
+ hpet_offset: BqlCell<u64>,
+ hpet_offset_saved: bool,
+
+ irqs: [InterruptSource; HPET_NUM_IRQ_ROUTES],
+ rtc_irq_level: BqlCell<u32>,
+ pit_enabled: InterruptSource,
+
+ /// Interrupt Routing Capability.
+ /// This field indicates to which interrupts in the I/O (x) APIC
+ /// the timers' interrupt can be routed, and is encoded in the
+ /// bits 32:64 of timer N's config register:
+ #[doc(alias = "intcap")]
+ int_route_cap: u32,
+
+ /// HPET timer array managed by this timer block.
+ #[doc(alias = "timer")]
+ timers: [BqlRefCell<HPETTimer>; HPET_MAX_TIMERS],
+ num_timers: usize,
+ num_timers_save: BqlCell<u8>,
+
+ /// Instance id (HPET timer block ID).
+ hpet_id: BqlCell<usize>,
+}
+
+impl HPETState {
+ const fn has_msi_flag(&self) -> bool {
+ self.flags & (1 << HPET_FLAG_MSI_SUPPORT_SHIFT) != 0
+ }
+
+ fn is_legacy_mode(&self) -> bool {
+ self.config.get() & (1 << HPET_CFG_LEG_RT_SHIFT) != 0
+ }
+
+ fn is_hpet_enabled(&self) -> bool {
+ self.config.get() & (1 << HPET_CFG_ENABLE_SHIFT) != 0
+ }
+
+ fn is_timer_int_active(&self, index: usize) -> bool {
+ self.int_status.get() & (1 << index) != 0
+ }
+
+ fn get_ticks(&self) -> u64 {
+ ns_to_ticks(CLOCK_VIRTUAL.get_ns() + self.hpet_offset.get())
+ }
+
+ fn get_ns(&self, tick: u64) -> u64 {
+ ticks_to_ns(tick) - self.hpet_offset.get()
+ }
+
+ fn handle_legacy_irq(&self, irq: u32, level: u32) {
+ if irq == HPET_LEGACY_PIT_INT {
+ if !self.is_legacy_mode() {
+ self.irqs[0].set(level != 0);
+ }
+ } else {
+ self.rtc_irq_level.set(level);
+ if !self.is_legacy_mode() {
+ self.irqs[RTC_ISA_IRQ].set(level != 0);
+ }
+ }
+ }
+
+ fn init_timers(this: &mut MaybeUninit<Self>) {
+ let state = this.as_ptr();
+ for index in 0..HPET_MAX_TIMERS {
+ let mut timer = uninit_field_mut!(*this, timers[index]);
+
+ // Initialize in two steps, to avoid calling Timer::init_full on a
+ // temporary that can be moved.
+ let timer = timer.write(BqlRefCell::new(HPETTimer::new(
+ index.try_into().unwrap(),
+ state,
+ )));
+ HPETTimer::init_timer_with_cell(timer);
+ }
+ }
+
+ fn update_int_status(&self, index: u32, level: bool) {
+ self.int_status
+ .set(self.int_status.get().deposit(index, 1, u64::from(level)));
+ }
+
+ /// General Configuration Register
+ fn set_cfg_reg(&self, shift: u32, len: u32, val: u64) {
+ let old_val = self.config.get();
+ let mut new_val = old_val.deposit(shift, len, val);
+
+ new_val = hpet_fixup_reg(new_val, old_val, HPET_CFG_WRITE_MASK);
+ self.config.set(new_val);
+
+ if activating_bit(old_val, new_val, HPET_CFG_ENABLE_SHIFT) {
+ // Enable main counter and interrupt generation.
+ self.hpet_offset
+ .set(ticks_to_ns(self.counter.get()) - CLOCK_VIRTUAL.get_ns());
+
+ for timer in self.timers.iter().take(self.num_timers) {
+ let mut t = timer.borrow_mut();
+
+ if t.is_int_enabled() && t.is_int_active() {
+ t.update_irq(true);
+ }
+ t.set_timer();
+ }
+ } else if deactivating_bit(old_val, new_val, HPET_CFG_ENABLE_SHIFT) {
+ // Halt main counter and disable interrupt generation.
+ self.counter.set(self.get_ticks());
+
+ for timer in self.timers.iter().take(self.num_timers) {
+ timer.borrow_mut().del_timer();
+ }
+ }
+
+ // i8254 and RTC output pins are disabled when HPET is in legacy mode
+ if activating_bit(old_val, new_val, HPET_CFG_LEG_RT_SHIFT) {
+ self.pit_enabled.set(false);
+ self.irqs[0].lower();
+ self.irqs[RTC_ISA_IRQ].lower();
+ } else if deactivating_bit(old_val, new_val, HPET_CFG_LEG_RT_SHIFT) {
+ // TODO: Add irq binding: qemu_irq_lower(s->irqs[0])
+ self.irqs[0].lower();
+ self.pit_enabled.set(true);
+ self.irqs[RTC_ISA_IRQ].set(self.rtc_irq_level.get() != 0);
+ }
+ }
+
+ /// General Interrupt Status Register: Read/Write Clear
+ fn set_int_status_reg(&self, shift: u32, _len: u32, val: u64) {
+ let new_val = val << shift;
+ let cleared = new_val & self.int_status.get();
+
+ for (index, timer) in self.timers.iter().take(self.num_timers).enumerate() {
+ if cleared & (1 << index) != 0 {
+ timer.borrow_mut().update_irq(false);
+ }
+ }
+ }
+
+ /// Main Counter Value Register
+ fn set_counter_reg(&self, shift: u32, len: u32, val: u64) {
+ if self.is_hpet_enabled() {
+ // TODO: Add trace point -
+ // trace_hpet_ram_write_counter_write_while_enabled()
+ //
+ // HPET spec says that writes to this register should only be
+ // done while the counter is halted. So this is an undefined
+ // behavior. There's no need to forbid it, but when HPET is
+ // enabled, the changed counter value will not affect the
+ // tick count (i.e., the previously calculated offset will
+ // not be changed as well).
+ }
+ self.counter
+ .set(self.counter.get().deposit(shift, len, val));
+ }
+
+ unsafe fn init(mut this: ParentInit<Self>) {
+ static HPET_RAM_OPS: MemoryRegionOps<HPETState> =
+ MemoryRegionOpsBuilder::<HPETState>::new()
+ .read(&HPETState::read)
+ .write(&HPETState::write)
+ .native_endian()
+ .valid_sizes(4, 8)
+ .impl_sizes(4, 8)
+ .build();
+
+ MemoryRegion::init_io(
+ &mut uninit_field_mut!(*this, iomem),
+ &HPET_RAM_OPS,
+ "hpet",
+ HPET_REG_SPACE_LEN,
+ );
+
+ Self::init_timers(&mut this);
+ }
+
+ fn post_init(&self) {
+ self.init_mmio(&self.iomem);
+ for irq in self.irqs.iter() {
+ self.init_irq(irq);
+ }
+ }
+
+ fn realize(&self) -> qemu_api::Result<()> {
+ if self.num_timers < HPET_MIN_TIMERS || self.num_timers > HPET_MAX_TIMERS {
+ Err(format!(
+ "hpet.num_timers must be between {HPET_MIN_TIMERS} and {HPET_MAX_TIMERS}"
+ ))?;
+ }
+ if self.int_route_cap == 0 {
+ Err("hpet.hpet-intcap property not initialized")?;
+ }
+
+ self.hpet_id.set(HPETFwConfig::assign_hpet_id()?);
+
+ // 64-bit General Capabilities and ID Register; LegacyReplacementRoute.
+ self.capability.set(
+ HPET_CAP_REV_ID_VALUE << HPET_CAP_REV_ID_SHIFT |
+ 1 << HPET_CAP_COUNT_SIZE_CAP_SHIFT |
+ 1 << HPET_CAP_LEG_RT_CAP_SHIFT |
+ HPET_CAP_VENDER_ID_VALUE << HPET_CAP_VENDER_ID_SHIFT |
+ ((self.num_timers - 1) as u64) << HPET_CAP_NUM_TIM_SHIFT | // indicate the last timer
+ (HPET_CLK_PERIOD * FS_PER_NS) << HPET_CAP_CNT_CLK_PERIOD_SHIFT, // 10 ns
+ );
+
+ self.init_gpio_in(2, HPETState::handle_legacy_irq);
+ self.init_gpio_out(from_ref(&self.pit_enabled));
+ Ok(())
+ }
+
+ fn reset_hold(&self, _type: ResetType) {
+ for timer in self.timers.iter().take(self.num_timers) {
+ timer.borrow_mut().reset();
+ }
+
+ self.counter.set(0);
+ self.config.set(0);
+ self.pit_enabled.set(true);
+ self.hpet_offset.set(0);
+
+ HPETFwConfig::update_hpet_cfg(
+ self.hpet_id.get(),
+ self.capability.get() as u32,
+ self.mmio_addr(0).unwrap(),
+ );
+
+ // to document that the RTC lowers its output on reset as well
+ self.rtc_irq_level.set(0);
+ }
+
+ fn decode(&self, mut addr: hwaddr, size: u32) -> HPETAddrDecode<'_> {
+ let shift = ((addr & 4) * 8) as u32;
+ let len = std::cmp::min(size * 8, 64 - shift);
+
+ addr &= !4;
+ let reg = if (0..=0xff).contains(&addr) {
+ GlobalRegister::try_from(addr).map(HPETRegister::Global)
+ } else {
+ let timer_id: usize = ((addr - 0x100) / 0x20) as usize;
+ if timer_id < self.num_timers {
+ // TODO: Add trace point - trace_hpet_ram_[read|write]_timer_id(timer_id)
+ TimerRegister::try_from(addr & 0x18)
+ .map(|reg| HPETRegister::Timer(&self.timers[timer_id], reg))
+ } else {
+ // TODO: Add trace point - trace_hpet_timer_id_out_of_range(timer_id)
+ Err(addr)
+ }
+ };
+
+ // reg is now a Result<HPETRegister, hwaddr>
+ // convert the Err case into HPETRegister as well
+ let reg = reg.unwrap_or_else(HPETRegister::Unknown);
+ HPETAddrDecode { shift, len, reg }
+ }
+
+ fn read(&self, addr: hwaddr, size: u32) -> u64 {
+ // TODO: Add trace point - trace_hpet_ram_read(addr)
+ let HPETAddrDecode { shift, reg, .. } = self.decode(addr, size);
+
+ use GlobalRegister::*;
+ use HPETRegister::*;
+ (match reg {
+ Timer(timer, tn_reg) => timer.borrow_mut().read(tn_reg),
+ Global(CAP) => self.capability.get(), /* including HPET_PERIOD 0x004 */
+ Global(CFG) => self.config.get(),
+ Global(INT_STATUS) => self.int_status.get(),
+ Global(COUNTER) => {
+ // TODO: Add trace point
+ // trace_hpet_ram_read_reading_counter(addr & 4, cur_tick)
+ if self.is_hpet_enabled() {
+ self.get_ticks()
+ } else {
+ self.counter.get()
+ }
+ }
+ Unknown(_) => {
+ // TODO: Add trace point- trace_hpet_ram_read_invalid()
+ 0
+ }
+ }) >> shift
+ }
+
+ fn write(&self, addr: hwaddr, value: u64, size: u32) {
+ let HPETAddrDecode { shift, len, reg } = self.decode(addr, size);
+
+ // TODO: Add trace point - trace_hpet_ram_write(addr, value)
+ use GlobalRegister::*;
+ use HPETRegister::*;
+ match reg {
+ Timer(timer, tn_reg) => timer.borrow_mut().write(tn_reg, value, shift, len),
+ Global(CAP) => {} // General Capabilities and ID Register: Read Only
+ Global(CFG) => self.set_cfg_reg(shift, len, value),
+ Global(INT_STATUS) => self.set_int_status_reg(shift, len, value),
+ Global(COUNTER) => self.set_counter_reg(shift, len, value),
+ Unknown(_) => {
+ // TODO: Add trace point - trace_hpet_ram_write_invalid()
+ }
+ }
+ }
+
+ fn pre_save(&self) -> i32 {
+ if self.is_hpet_enabled() {
+ self.counter.set(self.get_ticks());
+ }
+
+ /*
+ * The number of timers must match on source and destination, but it was
+ * also added to the migration stream. Check that it matches the value
+ * that was configured.
+ */
+ self.num_timers_save.set(self.num_timers as u8);
+ 0
+ }
+
+ fn post_load(&self, _version_id: u8) -> i32 {
+ for timer in self.timers.iter().take(self.num_timers) {
+ let mut t = timer.borrow_mut();
+
+ t.cmp64 = t.calculate_cmp64(t.get_state().counter.get(), t.cmp);
+ t.last = CLOCK_VIRTUAL.get_ns() - NANOSECONDS_PER_SECOND;
+ }
+
+ // Recalculate the offset between the main counter and guest time
+ if !self.hpet_offset_saved {
+ self.hpet_offset
+ .set(ticks_to_ns(self.counter.get()) - CLOCK_VIRTUAL.get_ns());
+ }
+
+ 0
+ }
+
+ fn is_rtc_irq_level_needed(&self) -> bool {
+ self.rtc_irq_level.get() != 0
+ }
+
+ fn is_offset_needed(&self) -> bool {
+ self.is_hpet_enabled() && self.hpet_offset_saved
+ }
+
+ fn validate_num_timers(&self, _version_id: u8) -> bool {
+ self.num_timers == self.num_timers_save.get().into()
+ }
+}
+
+qom_isa!(HPETState: SysBusDevice, DeviceState, Object);
+
+unsafe impl ObjectType for HPETState {
+ // No need for HPETClass. Just like OBJECT_DECLARE_SIMPLE_TYPE in C.
+ type Class = <SysBusDevice as ObjectType>::Class;
+ const TYPE_NAME: &'static CStr = crate::TYPE_HPET;
+}
+
+impl ObjectImpl for HPETState {
+ type ParentType = SysBusDevice;
+
+ const INSTANCE_INIT: Option<unsafe fn(ParentInit<Self>)> = Some(Self::init);
+ const INSTANCE_POST_INIT: Option<fn(&Self)> = Some(Self::post_init);
+ const CLASS_INIT: fn(&mut Self::Class) = Self::Class::class_init::<Self>;
+}
+
+// TODO: Make these properties user-configurable!
+qemu_api::declare_properties! {
+ HPET_PROPERTIES,
+ qemu_api::define_property!(
+ c"timers",
+ HPETState,
+ num_timers,
+ unsafe { &qdev_prop_usize },
+ u8,
+ default = HPET_MIN_TIMERS
+ ),
+ qemu_api::define_property!(
+ c"msi",
+ HPETState,
+ flags,
+ unsafe { &qdev_prop_bit },
+ u32,
+ bit = HPET_FLAG_MSI_SUPPORT_SHIFT as u8,
+ default = false,
+ ),
+ qemu_api::define_property!(
+ c"hpet-intcap",
+ HPETState,
+ int_route_cap,
+ unsafe { &qdev_prop_uint32 },
+ u32,
+ default = 0
+ ),
+ qemu_api::define_property!(
+ c"hpet-offset-saved",
+ HPETState,
+ hpet_offset_saved,
+ unsafe { &qdev_prop_bool },
+ bool,
+ default = true
+ ),
+}
+
+unsafe extern "C" fn hpet_rtc_irq_level_needed(opaque: *mut c_void) -> bool {
+ // SAFETY:
+ // the pointer is convertible to a reference
+ let state: &HPETState = unsafe { NonNull::new(opaque.cast::<HPETState>()).unwrap().as_ref() };
+ state.is_rtc_irq_level_needed()
+}
+
+unsafe extern "C" fn hpet_offset_needed(opaque: *mut c_void) -> bool {
+ // SAFETY:
+ // the pointer is convertible to a reference
+ let state: &HPETState = unsafe { NonNull::new(opaque.cast::<HPETState>()).unwrap().as_ref() };
+ state.is_offset_needed()
+}
+
+unsafe extern "C" fn hpet_pre_save(opaque: *mut c_void) -> c_int {
+ // SAFETY:
+ // the pointer is convertible to a reference
+ let state: &mut HPETState =
+ unsafe { NonNull::new(opaque.cast::<HPETState>()).unwrap().as_mut() };
+ state.pre_save() as c_int
+}
+
+unsafe extern "C" fn hpet_post_load(opaque: *mut c_void, version_id: c_int) -> c_int {
+ // SAFETY:
+ // the pointer is convertible to a reference
+ let state: &mut HPETState =
+ unsafe { NonNull::new(opaque.cast::<HPETState>()).unwrap().as_mut() };
+ let version: u8 = version_id.try_into().unwrap();
+ state.post_load(version) as c_int
+}
+
+static VMSTATE_HPET_RTC_IRQ_LEVEL: VMStateDescription = VMStateDescription {
+ name: c"hpet/rtc_irq_level".as_ptr(),
+ version_id: 1,
+ minimum_version_id: 1,
+ needed: Some(hpet_rtc_irq_level_needed),
+ fields: vmstate_fields! {
+ vmstate_of!(HPETState, rtc_irq_level),
+ },
+ ..Zeroable::ZERO
+};
+
+static VMSTATE_HPET_OFFSET: VMStateDescription = VMStateDescription {
+ name: c"hpet/offset".as_ptr(),
+ version_id: 1,
+ minimum_version_id: 1,
+ needed: Some(hpet_offset_needed),
+ fields: vmstate_fields! {
+ vmstate_of!(HPETState, hpet_offset),
+ },
+ ..Zeroable::ZERO
+};
+
+static VMSTATE_HPET_TIMER: VMStateDescription = VMStateDescription {
+ name: c"hpet_timer".as_ptr(),
+ version_id: 1,
+ minimum_version_id: 1,
+ fields: vmstate_fields! {
+ vmstate_of!(HPETTimer, index),
+ vmstate_of!(HPETTimer, config),
+ vmstate_of!(HPETTimer, cmp),
+ vmstate_of!(HPETTimer, fsb),
+ vmstate_of!(HPETTimer, period),
+ vmstate_of!(HPETTimer, wrap_flag),
+ vmstate_of!(HPETTimer, qemu_timer),
+ },
+ ..Zeroable::ZERO
+};
+
+const VALIDATE_TIMERS_NAME: &CStr = c"num_timers must match";
+
+static VMSTATE_HPET: VMStateDescription = VMStateDescription {
+ name: c"hpet".as_ptr(),
+ version_id: 2,
+ minimum_version_id: 2,
+ pre_save: Some(hpet_pre_save),
+ post_load: Some(hpet_post_load),
+ fields: vmstate_fields! {
+ vmstate_of!(HPETState, config),
+ vmstate_of!(HPETState, int_status),
+ vmstate_of!(HPETState, counter),
+ vmstate_of!(HPETState, num_timers_save),
+ vmstate_validate!(HPETState, VALIDATE_TIMERS_NAME, HPETState::validate_num_timers),
+ vmstate_struct!(HPETState, timers[0 .. num_timers_save], &VMSTATE_HPET_TIMER, BqlRefCell<HPETTimer>, HPETState::validate_num_timers).with_version_id(0),
+ },
+ subsections: vmstate_subsections! {
+ VMSTATE_HPET_RTC_IRQ_LEVEL,
+ VMSTATE_HPET_OFFSET,
+ },
+ ..Zeroable::ZERO
+};
+
+impl DeviceImpl for HPETState {
+ fn properties() -> &'static [Property] {
+ &HPET_PROPERTIES
+ }
+
+ fn vmsd() -> Option<&'static VMStateDescription> {
+ Some(&VMSTATE_HPET)
+ }
+
+ const REALIZE: Option<fn(&Self) -> qemu_api::Result<()>> = Some(Self::realize);
+}
+
+impl ResettablePhasesImpl for HPETState {
+ const HOLD: Option<fn(&Self, ResetType)> = Some(Self::reset_hold);
+}
+
+impl SysBusDeviceImpl for HPETState {}
diff --git a/rust/hw/timer/hpet/src/fw_cfg.rs b/rust/hw/timer/hpet/src/fw_cfg.rs
new file mode 100644
index 0000000..619d662
--- /dev/null
+++ b/rust/hw/timer/hpet/src/fw_cfg.rs
@@ -0,0 +1,68 @@
+// Copyright (C) 2024 Intel Corporation.
+// Author(s): Zhao Liu <zhao1.liu@intel.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+use std::ptr::addr_of_mut;
+
+use qemu_api::{cell::bql_locked, zeroable::Zeroable};
+
+/// Each `HPETState` represents a Event Timer Block. The v1 spec supports
+/// up to 8 blocks. QEMU only uses 1 block (in PC machine).
+const HPET_MAX_NUM_EVENT_TIMER_BLOCK: usize = 8;
+
+#[repr(C, packed)]
+#[derive(Copy, Clone, Default)]
+pub struct HPETFwEntry {
+ pub event_timer_block_id: u32,
+ pub address: u64,
+ pub min_tick: u16,
+ pub page_prot: u8,
+}
+unsafe impl Zeroable for HPETFwEntry {}
+
+#[repr(C, packed)]
+#[derive(Copy, Clone, Default)]
+pub struct HPETFwConfig {
+ pub count: u8,
+ pub hpet: [HPETFwEntry; HPET_MAX_NUM_EVENT_TIMER_BLOCK],
+}
+unsafe impl Zeroable for HPETFwConfig {}
+
+#[allow(non_upper_case_globals)]
+#[no_mangle]
+pub static mut hpet_fw_cfg: HPETFwConfig = HPETFwConfig {
+ count: u8::MAX,
+ ..Zeroable::ZERO
+};
+
+impl HPETFwConfig {
+ pub(crate) fn assign_hpet_id() -> Result<usize, &'static str> {
+ assert!(bql_locked());
+ // SAFETY: all accesses go through these methods, which guarantee
+ // that the accesses are protected by the BQL.
+ let mut fw_cfg = unsafe { *addr_of_mut!(hpet_fw_cfg) };
+
+ if fw_cfg.count == u8::MAX {
+ // first instance
+ fw_cfg.count = 0;
+ }
+
+ if fw_cfg.count == 8 {
+ Err("Only 8 instances of HPET are allowed")?;
+ }
+
+ let id: usize = fw_cfg.count.into();
+ fw_cfg.count += 1;
+ Ok(id)
+ }
+
+ pub(crate) fn update_hpet_cfg(hpet_id: usize, timer_block_id: u32, address: u64) {
+ assert!(bql_locked());
+ // SAFETY: all accesses go through these methods, which guarantee
+ // that the accesses are protected by the BQL.
+ let mut fw_cfg = unsafe { *addr_of_mut!(hpet_fw_cfg) };
+
+ fw_cfg.hpet[hpet_id].event_timer_block_id = timer_block_id;
+ fw_cfg.hpet[hpet_id].address = address;
+ }
+}
diff --git a/rust/hw/timer/hpet/src/lib.rs b/rust/hw/timer/hpet/src/lib.rs
new file mode 100644
index 0000000..a95cf14
--- /dev/null
+++ b/rust/hw/timer/hpet/src/lib.rs
@@ -0,0 +1,13 @@
+// Copyright (C) 2024 Intel Corporation.
+// Author(s): Zhao Liu <zhao1.liu@intel.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! # HPET QEMU Device Model
+//!
+//! This library implements a device model for the IA-PC HPET (High
+//! Precision Event Timers) device in QEMU.
+
+pub mod device;
+pub mod fw_cfg;
+
+pub const TYPE_HPET: &::std::ffi::CStr = c"hpet";
diff --git a/rust/hw/timer/meson.build b/rust/hw/timer/meson.build
new file mode 100644
index 0000000..22a84f1
--- /dev/null
+++ b/rust/hw/timer/meson.build
@@ -0,0 +1 @@
+subdir('hpet')
diff --git a/rust/meson.build b/rust/meson.build
new file mode 100644
index 0000000..331f11b
--- /dev/null
+++ b/rust/meson.build
@@ -0,0 +1,39 @@
+subproject('anyhow-1-rs', required: true)
+subproject('bilge-0.2-rs', required: true)
+subproject('bilge-impl-0.2-rs', required: true)
+subproject('foreign-0.3-rs', required: true)
+subproject('libc-0.2-rs', required: true)
+
+anyhow_rs = dependency('anyhow-1-rs')
+bilge_rs = dependency('bilge-0.2-rs')
+bilge_impl_rs = dependency('bilge-impl-0.2-rs')
+foreign_rs = dependency('foreign-0.3-rs')
+libc_rs = dependency('libc-0.2-rs')
+
+subproject('proc-macro2-1-rs', required: true)
+subproject('quote-1-rs', required: true)
+subproject('syn-2-rs', required: true)
+
+quote_rs_native = dependency('quote-1-rs', native: true)
+syn_rs_native = dependency('syn-2-rs', native: true)
+proc_macro2_rs_native = dependency('proc-macro2-1-rs', native: true)
+
+qemuutil_rs = qemuutil.partial_dependency(link_args: true, links: true)
+
+genrs = []
+
+subdir('qemu-api-macros')
+subdir('bits')
+subdir('qemu-api')
+
+subdir('hw')
+
+cargo = find_program('cargo', required: false)
+
+if cargo.found()
+ run_target('rustfmt',
+ command: [config_host['MESON'], 'devenv',
+ '--workdir', '@CURRENT_SOURCE_DIR@',
+ cargo, 'fmt'],
+ depends: genrs)
+endif
diff --git a/rust/qemu-api-macros/Cargo.toml b/rust/qemu-api-macros/Cargo.toml
new file mode 100644
index 0000000..0cd40c8
--- /dev/null
+++ b/rust/qemu-api-macros/Cargo.toml
@@ -0,0 +1,24 @@
+[package]
+name = "qemu_api_macros"
+version = "0.1.0"
+authors = ["Manos Pitsidianakis <manos.pitsidianakis@linaro.org>"]
+description = "Rust bindings for QEMU - Utility macros"
+resolver = "2"
+publish = false
+
+edition.workspace = true
+homepage.workspace = true
+license.workspace = true
+repository.workspace = true
+rust-version.workspace = true
+
+[lib]
+proc-macro = true
+
+[dependencies]
+proc-macro2 = "1"
+quote = "1"
+syn = { version = "2", features = ["extra-traits"] }
+
+[lints]
+workspace = true
diff --git a/rust/qemu-api-macros/meson.build b/rust/qemu-api-macros/meson.build
new file mode 100644
index 0000000..8610ce1
--- /dev/null
+++ b/rust/qemu-api-macros/meson.build
@@ -0,0 +1,19 @@
+_qemu_api_macros_rs = rust.proc_macro(
+ 'qemu_api_macros',
+ files('src/lib.rs'),
+ override_options: ['rust_std=2021', 'build.rust_std=2021'],
+ rust_args: [
+ '--cfg', 'use_fallback',
+ '--cfg', 'feature="syn-error"',
+ '--cfg', 'feature="proc-macro"',
+ ],
+ dependencies: [
+ proc_macro2_rs_native,
+ quote_rs_native,
+ syn_rs_native,
+ ],
+)
+
+qemu_api_macros = declare_dependency(
+ link_with: _qemu_api_macros_rs,
+)
diff --git a/rust/qemu-api-macros/src/bits.rs b/rust/qemu-api-macros/src/bits.rs
new file mode 100644
index 0000000..5ba8475
--- /dev/null
+++ b/rust/qemu-api-macros/src/bits.rs
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: MIT or Apache-2.0 or GPL-2.0-or-later
+
+// shadowing is useful together with "if let"
+#![allow(clippy::shadow_unrelated)]
+
+use proc_macro2::{
+ Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree, TokenTree as TT,
+};
+
+use crate::utils::MacroError;
+
+pub struct BitsConstInternal {
+ typ: TokenTree,
+}
+
+fn paren(ts: TokenStream) -> TokenTree {
+ TT::Group(Group::new(Delimiter::Parenthesis, ts))
+}
+
+fn ident(s: &'static str) -> TokenTree {
+ TT::Ident(Ident::new(s, Span::call_site()))
+}
+
+fn punct(ch: char) -> TokenTree {
+ TT::Punct(Punct::new(ch, Spacing::Alone))
+}
+
+/// Implements a recursive-descent parser that translates Boolean expressions on
+/// bitmasks to invocations of `const` functions defined by the `bits!` macro.
+impl BitsConstInternal {
+ // primary ::= '(' or ')'
+ // | ident
+ // | '!' ident
+ fn parse_primary(
+ &self,
+ tok: TokenTree,
+ it: &mut dyn Iterator<Item = TokenTree>,
+ out: &mut TokenStream,
+ ) -> Result<Option<TokenTree>, MacroError> {
+ let next = match tok {
+ TT::Group(ref g) => {
+ if g.delimiter() != Delimiter::Parenthesis && g.delimiter() != Delimiter::None {
+ return Err(MacroError::Message("expected parenthesis".into(), g.span()));
+ }
+ let mut stream = g.stream().into_iter();
+ let Some(first_tok) = stream.next() else {
+ return Err(MacroError::Message(
+ "expected operand, found ')'".into(),
+ g.span(),
+ ));
+ };
+ let mut output = TokenStream::new();
+ // start from the lowest precedence
+ let next = self.parse_or(first_tok, &mut stream, &mut output)?;
+ if let Some(tok) = next {
+ return Err(MacroError::Message(
+ format!("unexpected token {tok}"),
+ tok.span(),
+ ));
+ }
+ out.extend(Some(paren(output)));
+ it.next()
+ }
+ TT::Ident(_) => {
+ let mut output = TokenStream::new();
+ output.extend([
+ self.typ.clone(),
+ TT::Punct(Punct::new(':', Spacing::Joint)),
+ TT::Punct(Punct::new(':', Spacing::Joint)),
+ tok,
+ ]);
+ out.extend(Some(paren(output)));
+ it.next()
+ }
+ TT::Punct(ref p) => {
+ if p.as_char() != '!' {
+ return Err(MacroError::Message("expected operand".into(), p.span()));
+ }
+ let Some(rhs_tok) = it.next() else {
+ return Err(MacroError::Message(
+ "expected operand at end of input".into(),
+ p.span(),
+ ));
+ };
+ let next = self.parse_primary(rhs_tok, it, out)?;
+ out.extend([punct('.'), ident("invert"), paren(TokenStream::new())]);
+ next
+ }
+ _ => {
+ return Err(MacroError::Message("unexpected literal".into(), tok.span()));
+ }
+ };
+ Ok(next)
+ }
+
+ fn parse_binop<
+ F: Fn(
+ &Self,
+ TokenTree,
+ &mut dyn Iterator<Item = TokenTree>,
+ &mut TokenStream,
+ ) -> Result<Option<TokenTree>, MacroError>,
+ >(
+ &self,
+ tok: TokenTree,
+ it: &mut dyn Iterator<Item = TokenTree>,
+ out: &mut TokenStream,
+ ch: char,
+ f: F,
+ method: &'static str,
+ ) -> Result<Option<TokenTree>, MacroError> {
+ let mut next = f(self, tok, it, out)?;
+ while next.is_some() {
+ let op = next.as_ref().unwrap();
+ let TT::Punct(ref p) = op else { break };
+ if p.as_char() != ch {
+ break;
+ }
+
+ let Some(rhs_tok) = it.next() else {
+ return Err(MacroError::Message(
+ "expected operand at end of input".into(),
+ p.span(),
+ ));
+ };
+ let mut rhs = TokenStream::new();
+ next = f(self, rhs_tok, it, &mut rhs)?;
+ out.extend([punct('.'), ident(method), paren(rhs)]);
+ }
+ Ok(next)
+ }
+
+ // sub ::= primary ('-' primary)*
+ pub fn parse_sub(
+ &self,
+ tok: TokenTree,
+ it: &mut dyn Iterator<Item = TokenTree>,
+ out: &mut TokenStream,
+ ) -> Result<Option<TokenTree>, MacroError> {
+ self.parse_binop(tok, it, out, '-', Self::parse_primary, "difference")
+ }
+
+ // and ::= sub ('&' sub)*
+ fn parse_and(
+ &self,
+ tok: TokenTree,
+ it: &mut dyn Iterator<Item = TokenTree>,
+ out: &mut TokenStream,
+ ) -> Result<Option<TokenTree>, MacroError> {
+ self.parse_binop(tok, it, out, '&', Self::parse_sub, "intersection")
+ }
+
+ // xor ::= and ('&' and)*
+ fn parse_xor(
+ &self,
+ tok: TokenTree,
+ it: &mut dyn Iterator<Item = TokenTree>,
+ out: &mut TokenStream,
+ ) -> Result<Option<TokenTree>, MacroError> {
+ self.parse_binop(tok, it, out, '^', Self::parse_and, "symmetric_difference")
+ }
+
+ // or ::= xor ('|' xor)*
+ pub fn parse_or(
+ &self,
+ tok: TokenTree,
+ it: &mut dyn Iterator<Item = TokenTree>,
+ out: &mut TokenStream,
+ ) -> Result<Option<TokenTree>, MacroError> {
+ self.parse_binop(tok, it, out, '|', Self::parse_xor, "union")
+ }
+
+ pub fn parse(
+ it: &mut dyn Iterator<Item = TokenTree>,
+ ) -> Result<proc_macro2::TokenStream, MacroError> {
+ let mut pos = Span::call_site();
+ let mut typ = proc_macro2::TokenStream::new();
+
+ // Gobble everything up to an `@` sign, which is followed by a
+ // parenthesized expression; that is, all token trees except the
+ // last two form the type.
+ let next = loop {
+ let tok = it.next();
+ if let Some(ref t) = tok {
+ pos = t.span();
+ }
+ match tok {
+ None => break None,
+ Some(TT::Punct(ref p)) if p.as_char() == '@' => {
+ let tok = it.next();
+ if let Some(ref t) = tok {
+ pos = t.span();
+ }
+ break tok;
+ }
+ Some(x) => typ.extend(Some(x)),
+ }
+ };
+
+ let Some(tok) = next else {
+ return Err(MacroError::Message(
+ "expected expression, do not call this macro directly".into(),
+ pos,
+ ));
+ };
+ let TT::Group(ref _group) = tok else {
+ return Err(MacroError::Message(
+ "expected parenthesis, do not call this macro directly".into(),
+ tok.span(),
+ ));
+ };
+ let mut out = TokenStream::new();
+ let state = Self {
+ typ: TT::Group(Group::new(Delimiter::None, typ)),
+ };
+
+ let next = state.parse_primary(tok, it, &mut out)?;
+
+ // A parenthesized expression is a single production of the grammar,
+ // so the input must have reached the last token.
+ if let Some(tok) = next {
+ return Err(MacroError::Message(
+ format!("unexpected token {tok}"),
+ tok.span(),
+ ));
+ }
+ Ok(out)
+ }
+}
diff --git a/rust/qemu-api-macros/src/lib.rs b/rust/qemu-api-macros/src/lib.rs
new file mode 100644
index 0000000..c18bb4e
--- /dev/null
+++ b/rust/qemu-api-macros/src/lib.rs
@@ -0,0 +1,262 @@
+// Copyright 2024, Linaro Limited
+// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+use proc_macro::TokenStream;
+use quote::quote;
+use syn::{
+ parse_macro_input, parse_quote, punctuated::Punctuated, spanned::Spanned, token::Comma, Data,
+ DeriveInput, Field, Fields, FieldsUnnamed, Ident, Meta, Path, Token, Variant,
+};
+
+mod utils;
+use utils::MacroError;
+
+mod bits;
+use bits::BitsConstInternal;
+
+fn get_fields<'a>(
+ input: &'a DeriveInput,
+ msg: &str,
+) -> Result<&'a Punctuated<Field, Comma>, MacroError> {
+ let Data::Struct(ref s) = &input.data else {
+ return Err(MacroError::Message(
+ format!("Struct required for {msg}"),
+ input.ident.span(),
+ ));
+ };
+ let Fields::Named(ref fs) = &s.fields else {
+ return Err(MacroError::Message(
+ format!("Named fields required for {msg}"),
+ input.ident.span(),
+ ));
+ };
+ Ok(&fs.named)
+}
+
+fn get_unnamed_field<'a>(input: &'a DeriveInput, msg: &str) -> Result<&'a Field, MacroError> {
+ let Data::Struct(ref s) = &input.data else {
+ return Err(MacroError::Message(
+ format!("Struct required for {msg}"),
+ input.ident.span(),
+ ));
+ };
+ let Fields::Unnamed(FieldsUnnamed { ref unnamed, .. }) = &s.fields else {
+ return Err(MacroError::Message(
+ format!("Tuple struct required for {msg}"),
+ s.fields.span(),
+ ));
+ };
+ if unnamed.len() != 1 {
+ return Err(MacroError::Message(
+ format!("A single field is required for {msg}"),
+ s.fields.span(),
+ ));
+ }
+ Ok(&unnamed[0])
+}
+
+fn is_c_repr(input: &DeriveInput, msg: &str) -> Result<(), MacroError> {
+ let expected = parse_quote! { #[repr(C)] };
+
+ if input.attrs.iter().any(|attr| attr == &expected) {
+ Ok(())
+ } else {
+ Err(MacroError::Message(
+ format!("#[repr(C)] required for {msg}"),
+ input.ident.span(),
+ ))
+ }
+}
+
+fn is_transparent_repr(input: &DeriveInput, msg: &str) -> Result<(), MacroError> {
+ let expected = parse_quote! { #[repr(transparent)] };
+
+ if input.attrs.iter().any(|attr| attr == &expected) {
+ Ok(())
+ } else {
+ Err(MacroError::Message(
+ format!("#[repr(transparent)] required for {msg}"),
+ input.ident.span(),
+ ))
+ }
+}
+
+fn derive_object_or_error(input: DeriveInput) -> Result<proc_macro2::TokenStream, MacroError> {
+ is_c_repr(&input, "#[derive(Object)]")?;
+
+ let name = &input.ident;
+ let parent = &get_fields(&input, "#[derive(Object)]")?[0].ident;
+
+ Ok(quote! {
+ ::qemu_api::assert_field_type!(#name, #parent,
+ ::qemu_api::qom::ParentField<<#name as ::qemu_api::qom::ObjectImpl>::ParentType>);
+
+ ::qemu_api::module_init! {
+ MODULE_INIT_QOM => unsafe {
+ ::qemu_api::bindings::type_register_static(&<#name as ::qemu_api::qom::ObjectImpl>::TYPE_INFO);
+ }
+ }
+ })
+}
+
+#[proc_macro_derive(Object)]
+pub fn derive_object(input: TokenStream) -> TokenStream {
+ let input = parse_macro_input!(input as DeriveInput);
+ let expanded = derive_object_or_error(input).unwrap_or_else(Into::into);
+
+ TokenStream::from(expanded)
+}
+
+fn derive_opaque_or_error(input: DeriveInput) -> Result<proc_macro2::TokenStream, MacroError> {
+ is_transparent_repr(&input, "#[derive(Wrapper)]")?;
+
+ let name = &input.ident;
+ let field = &get_unnamed_field(&input, "#[derive(Wrapper)]")?;
+ let typ = &field.ty;
+
+ // TODO: how to add "::qemu_api"? For now, this is only used in the
+ // qemu_api crate so it's not a problem.
+ Ok(quote! {
+ unsafe impl crate::cell::Wrapper for #name {
+ type Wrapped = <#typ as crate::cell::Wrapper>::Wrapped;
+ }
+ impl #name {
+ pub unsafe fn from_raw<'a>(ptr: *mut <Self as crate::cell::Wrapper>::Wrapped) -> &'a Self {
+ let ptr = ::std::ptr::NonNull::new(ptr).unwrap().cast::<Self>();
+ unsafe { ptr.as_ref() }
+ }
+
+ pub const fn as_mut_ptr(&self) -> *mut <Self as crate::cell::Wrapper>::Wrapped {
+ self.0.as_mut_ptr()
+ }
+
+ pub const fn as_ptr(&self) -> *const <Self as crate::cell::Wrapper>::Wrapped {
+ self.0.as_ptr()
+ }
+
+ pub const fn as_void_ptr(&self) -> *mut ::core::ffi::c_void {
+ self.0.as_void_ptr()
+ }
+
+ pub const fn raw_get(slot: *mut Self) -> *mut <Self as crate::cell::Wrapper>::Wrapped {
+ slot.cast()
+ }
+ }
+ })
+}
+
+#[proc_macro_derive(Wrapper)]
+pub fn derive_opaque(input: TokenStream) -> TokenStream {
+ let input = parse_macro_input!(input as DeriveInput);
+ let expanded = derive_opaque_or_error(input).unwrap_or_else(Into::into);
+
+ TokenStream::from(expanded)
+}
+
+#[allow(non_snake_case)]
+fn get_repr_uN(input: &DeriveInput, msg: &str) -> Result<Path, MacroError> {
+ let repr = input.attrs.iter().find(|attr| attr.path().is_ident("repr"));
+ if let Some(repr) = repr {
+ let nested = repr.parse_args_with(Punctuated::<Meta, Token![,]>::parse_terminated)?;
+ for meta in nested {
+ match meta {
+ Meta::Path(path) if path.is_ident("u8") => return Ok(path),
+ Meta::Path(path) if path.is_ident("u16") => return Ok(path),
+ Meta::Path(path) if path.is_ident("u32") => return Ok(path),
+ Meta::Path(path) if path.is_ident("u64") => return Ok(path),
+ _ => {}
+ }
+ }
+ }
+
+ Err(MacroError::Message(
+ format!("#[repr(u8/u16/u32/u64) required for {msg}"),
+ input.ident.span(),
+ ))
+}
+
+fn get_variants(input: &DeriveInput) -> Result<&Punctuated<Variant, Comma>, MacroError> {
+ let Data::Enum(ref e) = &input.data else {
+ return Err(MacroError::Message(
+ "Cannot derive TryInto for union or struct.".to_string(),
+ input.ident.span(),
+ ));
+ };
+ if let Some(v) = e.variants.iter().find(|v| v.fields != Fields::Unit) {
+ return Err(MacroError::Message(
+ "Cannot derive TryInto for enum with non-unit variants.".to_string(),
+ v.fields.span(),
+ ));
+ }
+ Ok(&e.variants)
+}
+
+#[rustfmt::skip::macros(quote)]
+fn derive_tryinto_body(
+ name: &Ident,
+ variants: &Punctuated<Variant, Comma>,
+ repr: &Path,
+) -> Result<proc_macro2::TokenStream, MacroError> {
+ let discriminants: Vec<&Ident> = variants.iter().map(|f| &f.ident).collect();
+
+ Ok(quote! {
+ #(const #discriminants: #repr = #name::#discriminants as #repr;)*;
+ match value {
+ #(#discriminants => core::result::Result::Ok(#name::#discriminants),)*
+ _ => core::result::Result::Err(value),
+ }
+ })
+}
+
+#[rustfmt::skip::macros(quote)]
+fn derive_tryinto_or_error(input: DeriveInput) -> Result<proc_macro2::TokenStream, MacroError> {
+ let repr = get_repr_uN(&input, "#[derive(TryInto)]")?;
+ let name = &input.ident;
+ let body = derive_tryinto_body(name, get_variants(&input)?, &repr)?;
+ let errmsg = format!("invalid value for {name}");
+
+ Ok(quote! {
+ impl #name {
+ #[allow(dead_code)]
+ pub const fn into_bits(self) -> #repr {
+ self as #repr
+ }
+
+ #[allow(dead_code)]
+ pub const fn from_bits(value: #repr) -> Self {
+ match ({
+ #body
+ }) {
+ Ok(x) => x,
+ Err(_) => panic!(#errmsg)
+ }
+ }
+ }
+ impl core::convert::TryFrom<#repr> for #name {
+ type Error = #repr;
+
+ #[allow(ambiguous_associated_items)]
+ fn try_from(value: #repr) -> Result<Self, #repr> {
+ #body
+ }
+ }
+ })
+}
+
+#[proc_macro_derive(TryInto)]
+pub fn derive_tryinto(input: TokenStream) -> TokenStream {
+ let input = parse_macro_input!(input as DeriveInput);
+ let expanded = derive_tryinto_or_error(input).unwrap_or_else(Into::into);
+
+ TokenStream::from(expanded)
+}
+
+#[proc_macro]
+pub fn bits_const_internal(ts: TokenStream) -> TokenStream {
+ let ts = proc_macro2::TokenStream::from(ts);
+ let mut it = ts.into_iter();
+
+ let expanded = BitsConstInternal::parse(&mut it).unwrap_or_else(Into::into);
+ TokenStream::from(expanded)
+}
diff --git a/rust/qemu-api-macros/src/utils.rs b/rust/qemu-api-macros/src/utils.rs
new file mode 100644
index 0000000..02c91ae
--- /dev/null
+++ b/rust/qemu-api-macros/src/utils.rs
@@ -0,0 +1,26 @@
+// Procedural macro utilities.
+// Author(s): Paolo Bonzini <pbonzini@redhat.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+use proc_macro2::Span;
+use quote::quote_spanned;
+
+pub enum MacroError {
+ Message(String, Span),
+ ParseError(syn::Error),
+}
+
+impl From<syn::Error> for MacroError {
+ fn from(err: syn::Error) -> Self {
+ MacroError::ParseError(err)
+ }
+}
+
+impl From<MacroError> for proc_macro2::TokenStream {
+ fn from(err: MacroError) -> Self {
+ match err {
+ MacroError::Message(msg, span) => quote_spanned! { span => compile_error!(#msg); },
+ MacroError::ParseError(err) => err.into_compile_error(),
+ }
+ }
+}
diff --git a/rust/qemu-api/.gitignore b/rust/qemu-api/.gitignore
new file mode 100644
index 0000000..df6c216
--- /dev/null
+++ b/rust/qemu-api/.gitignore
@@ -0,0 +1,2 @@
+# Ignore generated bindings file overrides.
+/src/bindings.inc.rs
diff --git a/rust/qemu-api/Cargo.toml b/rust/qemu-api/Cargo.toml
new file mode 100644
index 0000000..db7000d
--- /dev/null
+++ b/rust/qemu-api/Cargo.toml
@@ -0,0 +1,28 @@
+[package]
+name = "qemu_api"
+version = "0.1.0"
+authors = ["Manos Pitsidianakis <manos.pitsidianakis@linaro.org>"]
+description = "Rust bindings for QEMU"
+readme = "README.md"
+resolver = "2"
+publish = false
+
+edition.workspace = true
+homepage.workspace = true
+license.workspace = true
+repository.workspace = true
+rust-version.workspace = true
+
+[dependencies]
+qemu_api_macros = { path = "../qemu-api-macros" }
+anyhow = "~1.0"
+libc = "0.2.162"
+foreign = "~0.3.1"
+
+[features]
+default = ["debug_cell"]
+allocator = []
+debug_cell = []
+
+[lints]
+workspace = true
diff --git a/rust/qemu-api/README.md b/rust/qemu-api/README.md
new file mode 100644
index 0000000..ed1b7ab
--- /dev/null
+++ b/rust/qemu-api/README.md
@@ -0,0 +1,19 @@
+# QEMU bindings and API wrappers
+
+This library exports helper Rust types, Rust macros and C FFI bindings for internal QEMU APIs.
+
+The C bindings can be generated with `bindgen`, using this build target:
+
+```console
+$ make bindings.inc.rs
+```
+
+## Generate Rust documentation
+
+Common Cargo tasks can be performed from the QEMU build directory
+
+```console
+$ make clippy
+$ make rustfmt
+$ make rustdoc
+```
diff --git a/rust/qemu-api/build.rs b/rust/qemu-api/build.rs
new file mode 100644
index 0000000..7849486
--- /dev/null
+++ b/rust/qemu-api/build.rs
@@ -0,0 +1,41 @@
+// Copyright 2024, Linaro Limited
+// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#[cfg(unix)]
+use std::os::unix::fs::symlink as symlink_file;
+#[cfg(windows)]
+use std::os::windows::fs::symlink_file;
+use std::{env, fs::remove_file, io::Result, path::Path};
+
+fn main() -> Result<()> {
+ // Placing bindings.inc.rs in the source directory is supported
+ // but not documented or encouraged.
+ let path = env::var("MESON_BUILD_ROOT")
+ .unwrap_or_else(|_| format!("{}/src", env!("CARGO_MANIFEST_DIR")));
+
+ let file = format!("{path}/rust/qemu-api/bindings.inc.rs");
+ let file = Path::new(&file);
+ if !Path::new(&file).exists() {
+ panic!(concat!(
+ "\n",
+ " No generated C bindings found! Maybe you wanted one of\n",
+ " `make clippy`, `make rustfmt`, `make rustdoc`?\n",
+ "\n",
+ " For other uses of `cargo`, start a subshell with\n",
+ " `pyvenv/bin/meson devenv`, or point MESON_BUILD_ROOT to\n",
+ " the top of the build tree."
+ ));
+ }
+
+ let out_dir = env::var("OUT_DIR").unwrap();
+ let dest_path = format!("{out_dir}/bindings.inc.rs");
+ let dest_path = Path::new(&dest_path);
+ if dest_path.symlink_metadata().is_ok() {
+ remove_file(dest_path)?;
+ }
+ symlink_file(file, dest_path)?;
+
+ println!("cargo:rerun-if-changed=build.rs");
+ Ok(())
+}
diff --git a/rust/qemu-api/meson.build b/rust/qemu-api/meson.build
new file mode 100644
index 0000000..a090297
--- /dev/null
+++ b/rust/qemu-api/meson.build
@@ -0,0 +1,114 @@
+_qemu_api_cfg = run_command(rustc_args,
+ '--config-headers', config_host_h, '--features', files('Cargo.toml'),
+ capture: true, check: true).stdout().strip().splitlines()
+
+# _qemu_api_cfg += ['--cfg', 'feature="allocator"']
+if get_option('debug_mutex')
+ _qemu_api_cfg += ['--cfg', 'feature="debug_cell"']
+endif
+
+c_enums = [
+ 'DeviceCategory',
+ 'GpioPolarity',
+ 'MachineInitPhase',
+ 'MemoryDeviceInfoKind',
+ 'MigrationPolicy',
+ 'MigrationPriority',
+ 'QEMUChrEvent',
+ 'QEMUClockType',
+ 'ResetType',
+ 'device_endian',
+ 'module_init_type',
+]
+_qemu_api_bindgen_args = []
+foreach enum : c_enums
+ _qemu_api_bindgen_args += ['--rustified-enum', enum]
+endforeach
+c_bitfields = [
+ 'ClockEvent',
+ 'VMStateFlags',
+]
+foreach enum : c_bitfields
+ _qemu_api_bindgen_args += ['--bitfield-enum', enum]
+endforeach
+
+# TODO: Remove this comment when the clang/libclang mismatch issue is solved.
+#
+# Rust bindings generation with `bindgen` might fail in some cases where the
+# detected `libclang` does not match the expected `clang` version/target. In
+# this case you must pass the path to `clang` and `libclang` to your build
+# command invocation using the environment variables CLANG_PATH and
+# LIBCLANG_PATH
+_qemu_api_bindings_inc_rs = rust.bindgen(
+ input: 'wrapper.h',
+ dependencies: common_ss.all_dependencies(),
+ output: 'bindings.inc.rs',
+ include_directories: bindings_incdir,
+ bindgen_version: ['>=0.60.0'],
+ args: bindgen_args_common + _qemu_api_bindgen_args,
+ )
+
+_qemu_api_rs = static_library(
+ 'qemu_api',
+ structured_sources(
+ [
+ 'src/lib.rs',
+ 'src/assertions.rs',
+ 'src/bindings.rs',
+ 'src/bitops.rs',
+ 'src/callbacks.rs',
+ 'src/cell.rs',
+ 'src/chardev.rs',
+ 'src/errno.rs',
+ 'src/error.rs',
+ 'src/irq.rs',
+ 'src/log.rs',
+ 'src/memory.rs',
+ 'src/module.rs',
+ 'src/prelude.rs',
+ 'src/qdev.rs',
+ 'src/qom.rs',
+ 'src/sysbus.rs',
+ 'src/timer.rs',
+ 'src/uninit.rs',
+ 'src/vmstate.rs',
+ 'src/zeroable.rs',
+ ],
+ {'.' : _qemu_api_bindings_inc_rs},
+ ),
+ override_options: ['rust_std=2021', 'build.rust_std=2021'],
+ rust_abi: 'rust',
+ rust_args: _qemu_api_cfg,
+ dependencies: [anyhow_rs, foreign_rs, libc_rs, qemu_api_macros, qemuutil_rs,
+ qom, hwcore, chardev, migration],
+)
+
+rust.test('rust-qemu-api-tests', _qemu_api_rs,
+ suite: ['unit', 'rust'])
+
+qemu_api = declare_dependency(link_with: [_qemu_api_rs],
+ dependencies: [qemu_api_macros, qom, hwcore, chardev, migration])
+
+# Doctests are essentially integration tests, so they need the same dependencies.
+# Note that running them requires the object files for C code, so place them
+# in a separate suite that is run by the "build" CI jobs rather than "check".
+rust.doctest('rust-qemu-api-doctests',
+ _qemu_api_rs,
+ protocol: 'rust',
+ dependencies: qemu_api,
+ suite: ['doc', 'rust'])
+
+test('rust-qemu-api-integration',
+ executable(
+ 'rust-qemu-api-integration',
+ files('tests/tests.rs', 'tests/vmstate_tests.rs'),
+ override_options: ['rust_std=2021', 'build.rust_std=2021'],
+ rust_args: ['--test'],
+ install: false,
+ dependencies: [qemu_api]),
+ args: [
+ '--test', '--test-threads', '1',
+ '--format', 'pretty',
+ ],
+ protocol: 'rust',
+ suite: ['unit', 'rust'])
diff --git a/rust/qemu-api/src/assertions.rs b/rust/qemu-api/src/assertions.rs
new file mode 100644
index 0000000..a2d38c8
--- /dev/null
+++ b/rust/qemu-api/src/assertions.rs
@@ -0,0 +1,152 @@
+// Copyright 2024, Red Hat Inc.
+// Author(s): Paolo Bonzini <pbonzini@redhat.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#![doc(hidden)]
+//! This module provides macros to check the equality of types and
+//! the type of `struct` fields. This can be useful to ensure that
+//! types match the expectations of C code.
+//!
+//! Documentation is hidden because it only exposes macros, which
+//! are exported directly from `qemu_api`.
+
+// Based on https://stackoverflow.com/questions/64251852/x/70978292#70978292
+// (stackoverflow answers are released under MIT license).
+
+#[doc(hidden)]
+pub trait EqType {
+ type Itself;
+}
+
+impl<T> EqType for T {
+ type Itself = T;
+}
+
+/// Assert that two types are the same.
+///
+/// # Examples
+///
+/// ```
+/// # use qemu_api::assert_same_type;
+/// # use std::ops::Deref;
+/// assert_same_type!(u32, u32);
+/// assert_same_type!(<Box<u32> as Deref>::Target, u32);
+/// ```
+///
+/// Different types will cause a compile failure
+///
+/// ```compile_fail
+/// # use qemu_api::assert_same_type;
+/// assert_same_type!(&Box<u32>, &u32);
+/// ```
+#[macro_export]
+macro_rules! assert_same_type {
+ ($t1:ty, $t2:ty) => {
+ const _: () = {
+ #[allow(unused)]
+ fn assert_same_type(v: $t1) {
+ fn types_must_be_equal<T, U>(_: T)
+ where
+ T: $crate::assertions::EqType<Itself = U>,
+ {
+ }
+ types_must_be_equal::<_, $t2>(v);
+ }
+ };
+ };
+}
+
+/// Assert that a field of a struct has the given type.
+///
+/// # Examples
+///
+/// ```
+/// # use qemu_api::assert_field_type;
+/// pub struct A {
+/// field1: u32,
+/// }
+///
+/// assert_field_type!(A, field1, u32);
+/// ```
+///
+/// Different types will cause a compile failure
+///
+/// ```compile_fail
+/// # use qemu_api::assert_field_type;
+/// # pub struct A { field1: u32 }
+/// assert_field_type!(A, field1, i32);
+/// ```
+#[macro_export]
+macro_rules! assert_field_type {
+ (@internal $param_name:ident, $ti:ty, $t:ty, $($field:tt)*) => {
+ const _: () = {
+ #[allow(unused)]
+ fn assert_field_type($param_name: &$t) {
+ fn types_must_be_equal<T, U>(_: &T)
+ where
+ T: $crate::assertions::EqType<Itself = U>,
+ {
+ }
+ types_must_be_equal::<_, $ti>(&$($field)*);
+ }
+ };
+ };
+
+ ($t:ty, $i:tt, $ti:ty) => {
+ $crate::assert_field_type!(@internal v, $ti, $t, v.$i);
+ };
+
+ ($t:ty, $i:tt, $ti:ty, num = $num:ident) => {
+ $crate::assert_field_type!(@internal v, $ti, $t, v.$i[0]);
+ };
+}
+
+/// Assert that an expression matches a pattern. This can also be
+/// useful to compare enums that do not implement `Eq`.
+///
+/// # Examples
+///
+/// ```
+/// # use qemu_api::assert_match;
+/// // JoinHandle does not implement `Eq`, therefore the result
+/// // does not either.
+/// let result: Result<std::thread::JoinHandle<()>, u32> = Err(42);
+/// assert_match!(result, Err(42));
+/// ```
+#[macro_export]
+macro_rules! assert_match {
+ ($a:expr, $b:pat) => {
+ assert!(
+ match $a {
+ $b => true,
+ _ => false,
+ },
+ "{} = {:?} does not match {}",
+ stringify!($a),
+ $a,
+ stringify!($b)
+ );
+ };
+}
+
+/// Assert at compile time that an expression is true. This is similar
+/// to `const { assert!(...); }` but it works outside functions, as well as
+/// on versions of Rust before 1.79.
+///
+/// # Examples
+///
+/// ```
+/// # use qemu_api::static_assert;
+/// static_assert!("abc".len() == 3);
+/// ```
+///
+/// ```compile_fail
+/// # use qemu_api::static_assert;
+/// static_assert!("abc".len() == 2); // does not compile
+/// ```
+#[macro_export]
+macro_rules! static_assert {
+ ($x:expr) => {
+ const _: () = assert!($x);
+ };
+}
diff --git a/rust/qemu-api/src/bindings.rs b/rust/qemu-api/src/bindings.rs
new file mode 100644
index 0000000..057de4b
--- /dev/null
+++ b/rust/qemu-api/src/bindings.rs
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#![allow(
+ dead_code,
+ improper_ctypes_definitions,
+ improper_ctypes,
+ non_camel_case_types,
+ non_snake_case,
+ non_upper_case_globals,
+ unsafe_op_in_unsafe_fn,
+ clippy::pedantic,
+ clippy::restriction,
+ clippy::style,
+ clippy::missing_const_for_fn,
+ clippy::ptr_offset_with_cast,
+ clippy::useless_transmute,
+ clippy::missing_safety_doc
+)]
+
+//! `bindgen`-generated declarations.
+
+#[cfg(MESON)]
+include!("bindings.inc.rs");
+
+#[cfg(not(MESON))]
+include!(concat!(env!("OUT_DIR"), "/bindings.inc.rs"));
+
+// SAFETY: these are implemented in C; the bindings need to assert that the
+// BQL is taken, either directly or via `BqlCell` and `BqlRefCell`.
+// When bindings for character devices are introduced, this can be
+// moved to the Opaque<> wrapper in src/chardev.rs.
+unsafe impl Send for CharBackend {}
+unsafe impl Sync for CharBackend {}
+
+// SAFETY: this is a pure data struct
+unsafe impl Send for CoalescedMemoryRange {}
+unsafe impl Sync for CoalescedMemoryRange {}
+
+// SAFETY: these are constants and vtables; the Send and Sync requirements
+// are deferred to the unsafe callbacks that they contain
+unsafe impl Send for MemoryRegionOps {}
+unsafe impl Sync for MemoryRegionOps {}
+
+unsafe impl Send for Property {}
+unsafe impl Sync for Property {}
+
+unsafe impl Send for TypeInfo {}
+unsafe impl Sync for TypeInfo {}
+
+unsafe impl Send for VMStateDescription {}
+unsafe impl Sync for VMStateDescription {}
+
+unsafe impl Send for VMStateField {}
+unsafe impl Sync for VMStateField {}
+
+unsafe impl Send for VMStateInfo {}
+unsafe impl Sync for VMStateInfo {}
diff --git a/rust/qemu-api/src/bitops.rs b/rust/qemu-api/src/bitops.rs
new file mode 100644
index 0000000..b1e3a53
--- /dev/null
+++ b/rust/qemu-api/src/bitops.rs
@@ -0,0 +1,119 @@
+// Copyright (C) 2024 Intel Corporation.
+// Author(s): Zhao Liu <zhao1.liu@intel.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! This module provides bit operation extensions to integer types.
+//! It is usually included via the `qemu_api` prelude.
+
+use std::ops::{
+ Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign,
+ Mul, MulAssign, Not, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign,
+};
+
+/// Trait for extensions to integer types
+pub trait IntegerExt:
+ Add<Self, Output = Self> + AddAssign<Self> +
+ BitAnd<Self, Output = Self> + BitAndAssign<Self> +
+ BitOr<Self, Output = Self> + BitOrAssign<Self> +
+ BitXor<Self, Output = Self> + BitXorAssign<Self> +
+ Copy +
+ Div<Self, Output = Self> + DivAssign<Self> +
+ Eq +
+ Mul<Self, Output = Self> + MulAssign<Self> +
+ Not<Output = Self> + Ord + PartialOrd +
+ Rem<Self, Output = Self> + RemAssign<Self> +
+ Shl<Self, Output = Self> + ShlAssign<Self> +
+ Shl<u32, Output = Self> + ShlAssign<u32> + // add more as needed
+ Shr<Self, Output = Self> + ShrAssign<Self> +
+ Shr<u32, Output = Self> + ShrAssign<u32> // add more as needed
+{
+ const BITS: u32;
+ const MAX: Self;
+ const MIN: Self;
+ const ONE: Self;
+ const ZERO: Self;
+
+ #[inline]
+ #[must_use]
+ fn bit(start: u32) -> Self
+ {
+ debug_assert!(start < Self::BITS);
+
+ Self::ONE << start
+ }
+
+ #[inline]
+ #[must_use]
+ fn mask(start: u32, length: u32) -> Self
+ {
+ /* FIXME: Implement a more elegant check with error handling support? */
+ debug_assert!(start < Self::BITS && length > 0 && length <= Self::BITS - start);
+
+ (Self::MAX >> (Self::BITS - length)) << start
+ }
+
+ #[inline]
+ #[must_use]
+ fn deposit<U: IntegerExt>(self, start: u32, length: u32,
+ fieldval: U) -> Self
+ where Self: From<U>
+ {
+ debug_assert!(length <= U::BITS);
+
+ let mask = Self::mask(start, length);
+ (self & !mask) | ((Self::from(fieldval) << start) & mask)
+ }
+
+ #[inline]
+ #[must_use]
+ fn extract(self, start: u32, length: u32) -> Self
+ {
+ let mask = Self::mask(start, length);
+ (self & mask) >> start
+ }
+}
+
+macro_rules! impl_num_ext {
+ ($type:ty) => {
+ impl IntegerExt for $type {
+ const BITS: u32 = <$type>::BITS;
+ const MAX: Self = <$type>::MAX;
+ const MIN: Self = <$type>::MIN;
+ const ONE: Self = 1;
+ const ZERO: Self = 0;
+ }
+ };
+}
+
+impl_num_ext!(u8);
+impl_num_ext!(u16);
+impl_num_ext!(u32);
+impl_num_ext!(u64);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_deposit() {
+ assert_eq!(15u32.deposit(8, 8, 1u32), 256 + 15);
+ assert_eq!(15u32.deposit(8, 1, 255u8), 256 + 15);
+ }
+
+ #[test]
+ fn test_extract() {
+ assert_eq!(15u32.extract(2, 4), 3);
+ }
+
+ #[test]
+ fn test_bit() {
+ assert_eq!(u8::bit(7), 128);
+ assert_eq!(u32::bit(16), 0x10000);
+ }
+
+ #[test]
+ fn test_mask() {
+ assert_eq!(u8::mask(7, 1), 128);
+ assert_eq!(u32::mask(8, 8), 0xff00);
+ }
+}
diff --git a/rust/qemu-api/src/callbacks.rs b/rust/qemu-api/src/callbacks.rs
new file mode 100644
index 0000000..9642a16
--- /dev/null
+++ b/rust/qemu-api/src/callbacks.rs
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: MIT
+
+//! Utility functions to deal with callbacks from C to Rust.
+
+use std::{mem, ptr::NonNull};
+
+/// Trait for functions (types implementing [`Fn`]) that can be used as
+/// callbacks. These include both zero-capture closures and function pointers.
+///
+/// In Rust, calling a function through the `Fn` trait normally requires a
+/// `self` parameter, even though for zero-sized functions (including function
+/// pointers) the type itself contains all necessary information to call the
+/// function. This trait provides a `call` function that doesn't require `self`,
+/// allowing zero-sized functions to be called using only their type.
+///
+/// This enables zero-sized functions to be passed entirely through generic
+/// parameters and resolved at compile-time. A typical use is a function
+/// receiving an unused parameter of generic type `F` and calling it via
+/// `F::call` or passing it to another function via `func::<F>`.
+///
+/// QEMU uses this trick to create wrappers to C callbacks. The wrappers
+/// are needed to convert an opaque `*mut c_void` into a Rust reference,
+/// but they only have a single opaque that they can use. The `FnCall`
+/// trait makes it possible to use that opaque for `self` or any other
+/// reference:
+///
+/// ```ignore
+/// // The compiler creates a new `rust_bh_cb` wrapper for each function
+/// // passed to `qemu_bh_schedule_oneshot` below.
+/// unsafe extern "C" fn rust_bh_cb<T, F: for<'a> FnCall<(&'a T,)>>(
+/// opaque: *mut c_void,
+/// ) {
+/// // SAFETY: the opaque was passed as a reference to `T`.
+/// F::call((unsafe { &*(opaque.cast::<T>()) }, ))
+/// }
+///
+/// // The `_f` parameter is unused but it helps the compiler build the appropriate `F`.
+/// // Using a reference allows usage in const context.
+/// fn qemu_bh_schedule_oneshot<T, F: for<'a> FnCall<(&'a T,)>>(_f: &F, opaque: &T) {
+/// let cb: unsafe extern "C" fn(*mut c_void) = rust_bh_cb::<T, F>;
+/// unsafe {
+/// bindings::qemu_bh_schedule_oneshot(cb, opaque as *const T as *const c_void as *mut c_void)
+/// }
+/// }
+/// ```
+///
+/// Each wrapper is a separate instance of `rust_bh_cb` and is therefore
+/// compiled to a separate function ("monomorphization"). If you wanted
+/// to pass `self` as the opaque value, the generic parameters would be
+/// `rust_bh_cb::<Self, F>`.
+///
+/// `Args` is a tuple type whose types are the arguments of the function,
+/// while `R` is the returned type.
+///
+/// # Examples
+///
+/// ```
+/// # use qemu_api::callbacks::FnCall;
+/// fn call_it<F: for<'a> FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String {
+/// F::call((s,))
+/// }
+///
+/// let s: String = call_it(&str::to_owned, "hello world");
+/// assert_eq!(s, "hello world");
+/// ```
+///
+/// Note that the compiler will produce a different version of `call_it` for
+/// each function that is passed to it. Therefore the argument is not really
+/// used, except to decide what is `F` and what `F::call` does.
+///
+/// Attempting to pass a non-zero-sized closure causes a compile-time failure:
+///
+/// ```compile_fail
+/// # use qemu_api::callbacks::FnCall;
+/// # fn call_it<'a, F: FnCall<(&'a str,), String>>(_f: &F, s: &'a str) -> String {
+/// # F::call((s,))
+/// # }
+/// let x: &'static str = "goodbye world";
+/// call_it(&move |_| String::from(x), "hello workd");
+/// ```
+///
+/// `()` can be used to indicate "no function":
+///
+/// ```
+/// # use qemu_api::callbacks::FnCall;
+/// fn optional<F: for<'a> FnCall<(&'a str,), String>>(_f: &F, s: &str) -> Option<String> {
+/// if F::IS_SOME {
+/// Some(F::call((s,)))
+/// } else {
+/// None
+/// }
+/// }
+///
+/// assert!(optional(&(), "hello world").is_none());
+/// ```
+///
+/// Invoking `F::call` will then be a run-time error.
+///
+/// ```should_panic
+/// # use qemu_api::callbacks::FnCall;
+/// # fn call_it<F: for<'a> FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String {
+/// # F::call((s,))
+/// # }
+/// let s: String = call_it(&(), "hello world"); // panics
+/// ```
+///
+/// # Safety
+///
+/// Because `Self` is a zero-sized type, all instances of the type are
+/// equivalent. However, in addition to this, `Self` must have no invariants
+/// that could be violated by creating a reference to it.
+///
+/// This is always true for zero-capture closures and function pointers, as long
+/// as the code is able to name the function in the first place.
+pub unsafe trait FnCall<Args, R = ()>: 'static + Sync + Sized {
+ /// Referring to this internal constant asserts that the `Self` type is
+ /// zero-sized. Can be replaced by an inline const expression in
+ /// Rust 1.79.0+.
+ const ASSERT_ZERO_SIZED: () = { assert!(mem::size_of::<Self>() == 0) };
+
+ /// Referring to this constant asserts that the `Self` type is an actual
+ /// function type, which can be used to catch incorrect use of `()`
+ /// at compile time.
+ ///
+ /// # Examples
+ ///
+ /// ```compile_fail
+ /// # use qemu_api::callbacks::FnCall;
+ /// fn call_it<F: for<'a> FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String {
+ /// let _: () = F::ASSERT_IS_SOME;
+ /// F::call((s,))
+ /// }
+ ///
+ /// let s: String = call_it((), "hello world"); // does not compile
+ /// ```
+ ///
+ /// Note that this can be more simply `const { assert!(F::IS_SOME) }` in
+ /// Rust 1.79.0 or newer.
+ const ASSERT_IS_SOME: () = { assert!(Self::IS_SOME) };
+
+ /// `true` if `Self` is an actual function type and not `()`.
+ ///
+ /// # Examples
+ ///
+ /// You can use `IS_SOME` to catch this at compile time:
+ ///
+ /// ```compile_fail
+ /// # use qemu_api::callbacks::FnCall;
+ /// fn call_it<F: for<'a> FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String {
+ /// const { assert!(F::IS_SOME) }
+ /// F::call((s,))
+ /// }
+ ///
+ /// let s: String = call_it((), "hello world"); // does not compile
+ /// ```
+ const IS_SOME: bool;
+
+ /// `false` if `Self` is an actual function type, `true` if it is `()`.
+ fn is_none() -> bool {
+ !Self::IS_SOME
+ }
+
+ /// `true` if `Self` is an actual function type, `false` if it is `()`.
+ fn is_some() -> bool {
+ Self::IS_SOME
+ }
+
+ /// Call the function with the arguments in args.
+ fn call(a: Args) -> R;
+}
+
+/// `()` acts as a "null" callback. Using `()` and `function` is nicer
+/// than `None` and `Some(function)`, because the compiler is unable to
+/// infer the type of just `None`. Therefore, the trait itself acts as the
+/// option type, with functions [`FnCall::is_some`] and [`FnCall::is_none`].
+unsafe impl<Args, R> FnCall<Args, R> for () {
+ const IS_SOME: bool = false;
+
+ /// Call the function with the arguments in args.
+ fn call(_a: Args) -> R {
+ panic!("callback not specified")
+ }
+}
+
+macro_rules! impl_call {
+ ($($args:ident,)* ) => (
+ // SAFETY: because each function is treated as a separate type,
+ // accessing `FnCall` is only possible in code that would be
+ // allowed to call the function.
+ unsafe impl<F, $($args,)* R> FnCall<($($args,)*), R> for F
+ where
+ F: 'static + Sync + Sized + Fn($($args, )*) -> R,
+ {
+ const IS_SOME: bool = true;
+
+ #[inline(always)]
+ fn call(a: ($($args,)*)) -> R {
+ let _: () = Self::ASSERT_ZERO_SIZED;
+
+ // SAFETY: the safety of this method is the condition for implementing
+ // `FnCall`. As to the `NonNull` idiom to create a zero-sized type,
+ // see https://github.com/rust-lang/libs-team/issues/292.
+ let f: &'static F = unsafe { &*NonNull::<Self>::dangling().as_ptr() };
+ let ($($args,)*) = a;
+ f($($args,)*)
+ }
+ }
+ )
+}
+
+impl_call!(_1, _2, _3, _4, _5,);
+impl_call!(_1, _2, _3, _4,);
+impl_call!(_1, _2, _3,);
+impl_call!(_1, _2,);
+impl_call!(_1,);
+impl_call!();
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ // The `_f` parameter is unused but it helps the compiler infer `F`.
+ fn do_test_call<'a, F: FnCall<(&'a str,), String>>(_f: &F) -> String {
+ F::call(("hello world",))
+ }
+
+ #[test]
+ fn test_call() {
+ assert_eq!(do_test_call(&str::to_owned), "hello world")
+ }
+
+ // The `_f` parameter is unused but it helps the compiler infer `F`.
+ fn do_test_is_some<'a, F: FnCall<(&'a str,), String>>(_f: &F) {
+ assert!(F::is_some());
+ }
+
+ #[test]
+ fn test_is_some() {
+ do_test_is_some(&str::to_owned);
+ }
+}
diff --git a/rust/qemu-api/src/cell.rs b/rust/qemu-api/src/cell.rs
new file mode 100644
index 0000000..27063b0
--- /dev/null
+++ b/rust/qemu-api/src/cell.rs
@@ -0,0 +1,1101 @@
+// SPDX-License-Identifier: MIT
+//
+// This file is based on library/core/src/cell.rs from
+// Rust 1.82.0.
+//
+// Permission is hereby granted, free of charge, to any
+// person obtaining a copy of this software and associated
+// documentation files (the "Software"), to deal in the
+// Software without restriction, including without
+// limitation the rights to use, copy, modify, merge,
+// publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following
+// conditions:
+//
+// The above copyright notice and this permission notice
+// shall be included in all copies or substantial portions
+// of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+// DEALINGS IN THE SOFTWARE.
+
+//! QEMU-specific mutable containers
+//!
+//! Rust memory safety is based on this rule: Given an object `T`, it is only
+//! possible to have one of the following:
+//!
+//! - Having several immutable references (`&T`) to the object (also known as
+//! **aliasing**).
+//! - Having one mutable reference (`&mut T`) to the object (also known as
+//! **mutability**).
+//!
+//! This is enforced by the Rust compiler. However, there are situations where
+//! this rule is not flexible enough. Sometimes it is required to have multiple
+//! references to an object and yet mutate it. In particular, QEMU objects
+//! usually have their pointer shared with the "outside world very early in
+//! their lifetime", for example when they create their
+//! [`MemoryRegion`s](crate::bindings::MemoryRegion). Therefore, individual
+//! parts of a device must be made mutable in a controlled manner; this module
+//! provides the tools to do so.
+//!
+//! ## Cell types
+//!
+//! [`BqlCell<T>`] and [`BqlRefCell<T>`] allow doing this via the Big QEMU Lock.
+//! While they are essentially the same single-threaded primitives that are
+//! available in `std::cell`, the BQL allows them to be used from a
+//! multi-threaded context and to share references across threads, while
+//! maintaining Rust's safety guarantees. For this reason, unlike
+//! their `std::cell` counterparts, `BqlCell` and `BqlRefCell` implement the
+//! `Sync` trait.
+//!
+//! BQL checks are performed in debug builds but can be optimized away in
+//! release builds, providing runtime safety during development with no overhead
+//! in production.
+//!
+//! The two provide different ways of handling interior mutability.
+//! `BqlRefCell` is best suited for data that is primarily accessed by the
+//! device's own methods, where multiple reads and writes can be grouped within
+//! a single borrow and a mutable reference can be passed around. Instead,
+//! [`BqlCell`] is a better choice when sharing small pieces of data with
+//! external code (especially C code), because it provides simple get/set
+//! operations that can be used one at a time.
+//!
+//! Warning: While `BqlCell` and `BqlRefCell` are similar to their `std::cell`
+//! counterparts, they are not interchangeable. Using `std::cell` types in
+//! QEMU device implementations is usually incorrect and can lead to
+//! thread-safety issues.
+//!
+//! ### Example
+//!
+//! ```
+//! # use qemu_api::prelude::*;
+//! # use qemu_api::{cell::BqlRefCell, irq::InterruptSource, irq::IRQState};
+//! # use qemu_api::{sysbus::SysBusDevice, qom::Owned, qom::ParentField};
+//! # const N_GPIOS: usize = 8;
+//! # struct PL061Registers { /* ... */ }
+//! # unsafe impl ObjectType for PL061State {
+//! # type Class = <SysBusDevice as ObjectType>::Class;
+//! # const TYPE_NAME: &'static std::ffi::CStr = c"pl061";
+//! # }
+//! struct PL061State {
+//! parent_obj: ParentField<SysBusDevice>,
+//!
+//! // Configuration is read-only after initialization
+//! pullups: u32,
+//! pulldowns: u32,
+//!
+//! // Single values shared with C code use BqlCell, in this case via InterruptSource
+//! out: [InterruptSource; N_GPIOS],
+//! interrupt: InterruptSource,
+//!
+//! // Larger state accessed by device methods uses BqlRefCell or Mutex
+//! registers: BqlRefCell<PL061Registers>,
+//! }
+//! ```
+//!
+//! ### `BqlCell<T>`
+//!
+//! [`BqlCell<T>`] implements interior mutability by moving values in and out of
+//! the cell. That is, an `&mut T` to the inner value can never be obtained as
+//! long as the cell is shared. The value itself cannot be directly obtained
+//! without copying it, cloning it, or replacing it with something else. This
+//! type provides the following methods, all of which can be called only while
+//! the BQL is held:
+//!
+//! - For types that implement [`Copy`], the [`get`](BqlCell::get) method
+//! retrieves the current interior value by duplicating it.
+//! - For types that implement [`Default`], the [`take`](BqlCell::take) method
+//! replaces the current interior value with [`Default::default()`] and
+//! returns the replaced value.
+//! - All types have:
+//! - [`replace`](BqlCell::replace): replaces the current interior value and
+//! returns the replaced value.
+//! - [`set`](BqlCell::set): this method replaces the interior value,
+//! dropping the replaced value.
+//!
+//! ### `BqlRefCell<T>`
+//!
+//! [`BqlRefCell<T>`] uses Rust's lifetimes to implement "dynamic borrowing", a
+//! process whereby one can claim temporary, exclusive, mutable access to the
+//! inner value:
+//!
+//! ```ignore
+//! fn clear_interrupts(&self, val: u32) {
+//! // A mutable borrow gives read-write access to the registers
+//! let mut regs = self.registers.borrow_mut();
+//! let old = regs.interrupt_status();
+//! regs.update_interrupt_status(old & !val);
+//! }
+//! ```
+//!
+//! Borrows for `BqlRefCell<T>`s are tracked at _runtime_, unlike Rust's native
+//! reference types which are entirely tracked statically, at compile time.
+//! Multiple immutable borrows are allowed via [`borrow`](BqlRefCell::borrow),
+//! or a single mutable borrow via [`borrow_mut`](BqlRefCell::borrow_mut). The
+//! thread will panic if these rules are violated or if the BQL is not held.
+//!
+//! ## Opaque wrappers
+//!
+//! The cell types from the previous section are useful at the boundaries
+//! of code that requires interior mutability. When writing glue code that
+//! interacts directly with C structs, however, it is useful to operate
+//! at a lower level.
+//!
+//! C functions often violate Rust's fundamental assumptions about memory
+//! safety by modifying memory even if it is shared. Furthermore, C structs
+//! often start their life uninitialized and may be populated lazily.
+//!
+//! For this reason, this module provides the [`Opaque<T>`] type to opt out
+//! of Rust's usual guarantees about the wrapped type. Access to the wrapped
+//! value is always through raw pointers, obtained via methods like
+//! [`as_mut_ptr()`](Opaque::as_mut_ptr) and [`as_ptr()`](Opaque::as_ptr). These
+//! pointers can then be passed to C functions or dereferenced; both actions
+//! require `unsafe` blocks, making it clear where safety guarantees must be
+//! manually verified. For example
+//!
+//! ```ignore
+//! unsafe {
+//! let state = Opaque::<MyStruct>::uninit();
+//! qemu_struct_init(state.as_mut_ptr());
+//! }
+//! ```
+//!
+//! [`Opaque<T>`] will usually be wrapped one level further, so that
+//! bridge methods can be added to the wrapper:
+//!
+//! ```ignore
+//! pub struct MyStruct(Opaque<bindings::MyStruct>);
+//!
+//! impl MyStruct {
+//! fn new() -> Pin<Box<MyStruct>> {
+//! let result = Box::pin(unsafe { Opaque::uninit() });
+//! unsafe { qemu_struct_init(result.as_mut_ptr()) };
+//! result
+//! }
+//! }
+//! ```
+//!
+//! This pattern of wrapping bindgen-generated types in [`Opaque<T>`] provides
+//! several advantages:
+//!
+//! * The choice of traits to be implemented is not limited by the
+//! bindgen-generated code. For example, [`Drop`] can be added without
+//! disabling [`Copy`] on the underlying bindgen type
+//!
+//! * [`Send`] and [`Sync`] implementations can be controlled by the wrapper
+//! type rather than being automatically derived from the C struct's layout
+//!
+//! * Methods can be implemented in a separate crate from the bindgen-generated
+//! bindings
+//!
+//! * [`Debug`](std::fmt::Debug) and [`Display`](std::fmt::Display)
+//! implementations can be customized to be more readable than the raw C
+//! struct representation
+//!
+//! The [`Opaque<T>`] type does not include BQL validation; it is possible to
+//! assert in the code that the right lock is taken, to use it together
+//! with a custom lock guard type, or to let C code take the lock, as
+//! appropriate. It is also possible to use it with non-thread-safe
+//! types, since by default (unlike [`BqlCell`] and [`BqlRefCell`]
+//! it is neither `Sync` nor `Send`.
+//!
+//! While [`Opaque<T>`] is necessary for C interop, it should be used sparingly
+//! and only at FFI boundaries. For QEMU-specific types that need interior
+//! mutability, prefer [`BqlCell`] or [`BqlRefCell`].
+
+use std::{
+ cell::{Cell, UnsafeCell},
+ cmp::Ordering,
+ fmt,
+ marker::{PhantomData, PhantomPinned},
+ mem::{self, MaybeUninit},
+ ops::{Deref, DerefMut},
+ ptr::NonNull,
+};
+
+use crate::bindings;
+
+/// An internal function that is used by doctests.
+pub fn bql_start_test() {
+ // SAFETY: integration tests are run with --test-threads=1, while
+ // unit tests and doctests are not multithreaded and do not have
+ // any BQL-protected data. Just set bql_locked to true.
+ unsafe {
+ bindings::rust_bql_mock_lock();
+ }
+}
+
+pub fn bql_locked() -> bool {
+ // SAFETY: the function does nothing but return a thread-local bool
+ unsafe { bindings::bql_locked() }
+}
+
+fn bql_block_unlock(increase: bool) {
+ // SAFETY: this only adjusts a counter
+ unsafe {
+ bindings::bql_block_unlock(increase);
+ }
+}
+
+/// A mutable memory location that is protected by the Big QEMU Lock.
+///
+/// # Memory layout
+///
+/// `BqlCell<T>` has the same in-memory representation as its inner type `T`.
+#[repr(transparent)]
+pub struct BqlCell<T> {
+ value: UnsafeCell<T>,
+}
+
+// SAFETY: Same as for std::sync::Mutex. In the end this *is* a Mutex,
+// except it is stored out-of-line
+unsafe impl<T: Send> Send for BqlCell<T> {}
+unsafe impl<T: Send> Sync for BqlCell<T> {}
+
+impl<T: Copy> Clone for BqlCell<T> {
+ #[inline]
+ fn clone(&self) -> BqlCell<T> {
+ BqlCell::new(self.get())
+ }
+}
+
+impl<T: Default> Default for BqlCell<T> {
+ /// Creates a `BqlCell<T>`, with the `Default` value for T.
+ #[inline]
+ fn default() -> BqlCell<T> {
+ BqlCell::new(Default::default())
+ }
+}
+
+impl<T: PartialEq + Copy> PartialEq for BqlCell<T> {
+ #[inline]
+ fn eq(&self, other: &BqlCell<T>) -> bool {
+ self.get() == other.get()
+ }
+}
+
+impl<T: Eq + Copy> Eq for BqlCell<T> {}
+
+impl<T: PartialOrd + Copy> PartialOrd for BqlCell<T> {
+ #[inline]
+ fn partial_cmp(&self, other: &BqlCell<T>) -> Option<Ordering> {
+ self.get().partial_cmp(&other.get())
+ }
+}
+
+impl<T: Ord + Copy> Ord for BqlCell<T> {
+ #[inline]
+ fn cmp(&self, other: &BqlCell<T>) -> Ordering {
+ self.get().cmp(&other.get())
+ }
+}
+
+impl<T> From<T> for BqlCell<T> {
+ /// Creates a new `BqlCell<T>` containing the given value.
+ fn from(t: T) -> BqlCell<T> {
+ BqlCell::new(t)
+ }
+}
+
+impl<T: fmt::Debug + Copy> fmt::Debug for BqlCell<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.get().fmt(f)
+ }
+}
+
+impl<T: fmt::Display + Copy> fmt::Display for BqlCell<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.get().fmt(f)
+ }
+}
+
+impl<T> BqlCell<T> {
+ /// Creates a new `BqlCell` containing the given value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use qemu_api::cell::BqlCell;
+ /// # qemu_api::cell::bql_start_test();
+ ///
+ /// let c = BqlCell::new(5);
+ /// ```
+ #[inline]
+ pub const fn new(value: T) -> BqlCell<T> {
+ BqlCell {
+ value: UnsafeCell::new(value),
+ }
+ }
+
+ /// Sets the contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use qemu_api::cell::BqlCell;
+ /// # qemu_api::cell::bql_start_test();
+ ///
+ /// let c = BqlCell::new(5);
+ ///
+ /// c.set(10);
+ /// ```
+ #[inline]
+ pub fn set(&self, val: T) {
+ self.replace(val);
+ }
+
+ /// Replaces the contained value with `val`, and returns the old contained
+ /// value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use qemu_api::cell::BqlCell;
+ /// # qemu_api::cell::bql_start_test();
+ ///
+ /// let cell = BqlCell::new(5);
+ /// assert_eq!(cell.get(), 5);
+ /// assert_eq!(cell.replace(10), 5);
+ /// assert_eq!(cell.get(), 10);
+ /// ```
+ #[inline]
+ pub fn replace(&self, val: T) -> T {
+ assert!(bql_locked());
+ // SAFETY: This can cause data races if called from multiple threads,
+ // but it won't happen as long as C code accesses the value
+ // under BQL protection only.
+ mem::replace(unsafe { &mut *self.value.get() }, val)
+ }
+
+ /// Unwraps the value, consuming the cell.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use qemu_api::cell::BqlCell;
+ /// # qemu_api::cell::bql_start_test();
+ ///
+ /// let c = BqlCell::new(5);
+ /// let five = c.into_inner();
+ ///
+ /// assert_eq!(five, 5);
+ /// ```
+ pub fn into_inner(self) -> T {
+ assert!(bql_locked());
+ self.value.into_inner()
+ }
+}
+
+impl<T: Copy> BqlCell<T> {
+ /// Returns a copy of the contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use qemu_api::cell::BqlCell;
+ /// # qemu_api::cell::bql_start_test();
+ ///
+ /// let c = BqlCell::new(5);
+ ///
+ /// let five = c.get();
+ /// ```
+ #[inline]
+ pub fn get(&self) -> T {
+ assert!(bql_locked());
+ // SAFETY: This can cause data races if called from multiple threads,
+ // but it won't happen as long as C code accesses the value
+ // under BQL protection only.
+ unsafe { *self.value.get() }
+ }
+}
+
+impl<T> BqlCell<T> {
+ /// Returns a raw pointer to the underlying data in this cell.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use qemu_api::cell::BqlCell;
+ /// # qemu_api::cell::bql_start_test();
+ ///
+ /// let c = BqlCell::new(5);
+ ///
+ /// let ptr = c.as_ptr();
+ /// ```
+ #[inline]
+ pub const fn as_ptr(&self) -> *mut T {
+ self.value.get()
+ }
+}
+
+impl<T: Default> BqlCell<T> {
+ /// Takes the value of the cell, leaving `Default::default()` in its place.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use qemu_api::cell::BqlCell;
+ /// # qemu_api::cell::bql_start_test();
+ ///
+ /// let c = BqlCell::new(5);
+ /// let five = c.take();
+ ///
+ /// assert_eq!(five, 5);
+ /// assert_eq!(c.into_inner(), 0);
+ /// ```
+ pub fn take(&self) -> T {
+ self.replace(Default::default())
+ }
+}
+
+/// A mutable memory location with dynamically checked borrow rules,
+/// protected by the Big QEMU Lock.
+///
+/// See the [module-level documentation](self) for more.
+///
+/// # Memory layout
+///
+/// `BqlRefCell<T>` starts with the same in-memory representation as its
+/// inner type `T`.
+#[repr(C)]
+pub struct BqlRefCell<T> {
+ // It is important that this is the first field (which is not the case
+ // for std::cell::BqlRefCell), so that we can use offset_of! on it.
+ // UnsafeCell and repr(C) both prevent usage of niches.
+ value: UnsafeCell<T>,
+ borrow: Cell<BorrowFlag>,
+ // Stores the location of the earliest currently active borrow.
+ // This gets updated whenever we go from having zero borrows
+ // to having a single borrow. When a borrow occurs, this gets included
+ // in the panic message
+ #[cfg(feature = "debug_cell")]
+ borrowed_at: Cell<Option<&'static std::panic::Location<'static>>>,
+}
+
+// Positive values represent the number of `BqlRef` active. Negative values
+// represent the number of `BqlRefMut` active. Right now QEMU's implementation
+// does not allow to create `BqlRefMut`s that refer to distinct, nonoverlapping
+// components of a `BqlRefCell` (e.g., different ranges of a slice).
+//
+// `BqlRef` and `BqlRefMut` are both two words in size, and so there will likely
+// never be enough `BqlRef`s or `BqlRefMut`s in existence to overflow half of
+// the `usize` range. Thus, a `BorrowFlag` will probably never overflow or
+// underflow. However, this is not a guarantee, as a pathological program could
+// repeatedly create and then mem::forget `BqlRef`s or `BqlRefMut`s. Thus, all
+// code must explicitly check for overflow and underflow in order to avoid
+// unsafety, or at least behave correctly in the event that overflow or
+// underflow happens (e.g., see BorrowRef::new).
+type BorrowFlag = isize;
+const UNUSED: BorrowFlag = 0;
+
+#[inline(always)]
+const fn is_writing(x: BorrowFlag) -> bool {
+ x < UNUSED
+}
+
+#[inline(always)]
+const fn is_reading(x: BorrowFlag) -> bool {
+ x > UNUSED
+}
+
+impl<T> BqlRefCell<T> {
+ /// Creates a new `BqlRefCell` containing `value`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use qemu_api::cell::BqlRefCell;
+ ///
+ /// let c = BqlRefCell::new(5);
+ /// ```
+ #[inline]
+ pub const fn new(value: T) -> BqlRefCell<T> {
+ BqlRefCell {
+ value: UnsafeCell::new(value),
+ borrow: Cell::new(UNUSED),
+ #[cfg(feature = "debug_cell")]
+ borrowed_at: Cell::new(None),
+ }
+ }
+}
+
+// This ensures the panicking code is outlined from `borrow_mut` for
+// `BqlRefCell`.
+#[inline(never)]
+#[cold]
+#[cfg(feature = "debug_cell")]
+fn panic_already_borrowed(source: &Cell<Option<&'static std::panic::Location<'static>>>) -> ! {
+ // If a borrow occurred, then we must already have an outstanding borrow,
+ // so `borrowed_at` will be `Some`
+ panic!("already borrowed at {:?}", source.take().unwrap())
+}
+
+#[inline(never)]
+#[cold]
+#[cfg(not(feature = "debug_cell"))]
+fn panic_already_borrowed() -> ! {
+ panic!("already borrowed")
+}
+
+impl<T> BqlRefCell<T> {
+ #[inline]
+ #[allow(clippy::unused_self)]
+ fn panic_already_borrowed(&self) -> ! {
+ #[cfg(feature = "debug_cell")]
+ {
+ panic_already_borrowed(&self.borrowed_at)
+ }
+ #[cfg(not(feature = "debug_cell"))]
+ {
+ panic_already_borrowed()
+ }
+ }
+
+ /// Immutably borrows the wrapped value.
+ ///
+ /// The borrow lasts until the returned `BqlRef` exits scope. Multiple
+ /// immutable borrows can be taken out at the same time.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently mutably borrowed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use qemu_api::cell::BqlRefCell;
+ /// # qemu_api::cell::bql_start_test();
+ ///
+ /// let c = BqlRefCell::new(5);
+ ///
+ /// let borrowed_five = c.borrow();
+ /// let borrowed_five2 = c.borrow();
+ /// ```
+ ///
+ /// An example of panic:
+ ///
+ /// ```should_panic
+ /// use qemu_api::cell::BqlRefCell;
+ /// # qemu_api::cell::bql_start_test();
+ ///
+ /// let c = BqlRefCell::new(5);
+ ///
+ /// let m = c.borrow_mut();
+ /// let b = c.borrow(); // this causes a panic
+ /// ```
+ #[inline]
+ #[track_caller]
+ pub fn borrow(&self) -> BqlRef<'_, T> {
+ if let Some(b) = BorrowRef::new(&self.borrow) {
+ // `borrowed_at` is always the *first* active borrow
+ if b.borrow.get() == 1 {
+ #[cfg(feature = "debug_cell")]
+ self.borrowed_at.set(Some(std::panic::Location::caller()));
+ }
+
+ bql_block_unlock(true);
+
+ // SAFETY: `BorrowRef` ensures that there is only immutable access
+ // to the value while borrowed.
+ let value = unsafe { NonNull::new_unchecked(self.value.get()) };
+ BqlRef { value, borrow: b }
+ } else {
+ self.panic_already_borrowed()
+ }
+ }
+
+ /// Mutably borrows the wrapped value.
+ ///
+ /// The borrow lasts until the returned `BqlRefMut` or all `BqlRefMut`s
+ /// derived from it exit scope. The value cannot be borrowed while this
+ /// borrow is active.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently borrowed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use qemu_api::cell::BqlRefCell;
+ /// # qemu_api::cell::bql_start_test();
+ ///
+ /// let c = BqlRefCell::new("hello".to_owned());
+ ///
+ /// *c.borrow_mut() = "bonjour".to_owned();
+ ///
+ /// assert_eq!(&*c.borrow(), "bonjour");
+ /// ```
+ ///
+ /// An example of panic:
+ ///
+ /// ```should_panic
+ /// use qemu_api::cell::BqlRefCell;
+ /// # qemu_api::cell::bql_start_test();
+ ///
+ /// let c = BqlRefCell::new(5);
+ /// let m = c.borrow();
+ ///
+ /// let b = c.borrow_mut(); // this causes a panic
+ /// ```
+ #[inline]
+ #[track_caller]
+ pub fn borrow_mut(&self) -> BqlRefMut<'_, T> {
+ if let Some(b) = BorrowRefMut::new(&self.borrow) {
+ #[cfg(feature = "debug_cell")]
+ {
+ self.borrowed_at.set(Some(std::panic::Location::caller()));
+ }
+
+ // SAFETY: this only adjusts a counter
+ bql_block_unlock(true);
+
+ // SAFETY: `BorrowRefMut` guarantees unique access.
+ let value = unsafe { NonNull::new_unchecked(self.value.get()) };
+ BqlRefMut {
+ value,
+ _borrow: b,
+ marker: PhantomData,
+ }
+ } else {
+ self.panic_already_borrowed()
+ }
+ }
+
+ /// Returns a raw pointer to the underlying data in this cell.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use qemu_api::cell::BqlRefCell;
+ ///
+ /// let c = BqlRefCell::new(5);
+ ///
+ /// let ptr = c.as_ptr();
+ /// ```
+ #[inline]
+ pub const fn as_ptr(&self) -> *mut T {
+ self.value.get()
+ }
+}
+
+// SAFETY: Same as for std::sync::Mutex. In the end this is a Mutex that is
+// stored out-of-line. Even though BqlRefCell includes Cells, they are
+// themselves protected by the Big QEMU Lock. Furtheremore, the Big QEMU
+// Lock cannot be released while any borrows is active.
+unsafe impl<T> Send for BqlRefCell<T> where T: Send {}
+unsafe impl<T> Sync for BqlRefCell<T> {}
+
+impl<T: Clone> Clone for BqlRefCell<T> {
+ /// # Panics
+ ///
+ /// Panics if the value is currently mutably borrowed.
+ #[inline]
+ #[track_caller]
+ fn clone(&self) -> BqlRefCell<T> {
+ BqlRefCell::new(self.borrow().clone())
+ }
+
+ /// # Panics
+ ///
+ /// Panics if `source` is currently mutably borrowed.
+ #[inline]
+ #[track_caller]
+ fn clone_from(&mut self, source: &Self) {
+ self.value.get_mut().clone_from(&source.borrow())
+ }
+}
+
+impl<T: Default> Default for BqlRefCell<T> {
+ /// Creates a `BqlRefCell<T>`, with the `Default` value for T.
+ #[inline]
+ fn default() -> BqlRefCell<T> {
+ BqlRefCell::new(Default::default())
+ }
+}
+
+impl<T: PartialEq> PartialEq for BqlRefCell<T> {
+ /// # Panics
+ ///
+ /// Panics if the value in either `BqlRefCell` is currently mutably
+ /// borrowed.
+ #[inline]
+ fn eq(&self, other: &BqlRefCell<T>) -> bool {
+ *self.borrow() == *other.borrow()
+ }
+}
+
+impl<T: Eq> Eq for BqlRefCell<T> {}
+
+impl<T: PartialOrd> PartialOrd for BqlRefCell<T> {
+ /// # Panics
+ ///
+ /// Panics if the value in either `BqlRefCell` is currently mutably
+ /// borrowed.
+ #[inline]
+ fn partial_cmp(&self, other: &BqlRefCell<T>) -> Option<Ordering> {
+ self.borrow().partial_cmp(&*other.borrow())
+ }
+}
+
+impl<T: Ord> Ord for BqlRefCell<T> {
+ /// # Panics
+ ///
+ /// Panics if the value in either `BqlRefCell` is currently mutably
+ /// borrowed.
+ #[inline]
+ fn cmp(&self, other: &BqlRefCell<T>) -> Ordering {
+ self.borrow().cmp(&*other.borrow())
+ }
+}
+
+impl<T> From<T> for BqlRefCell<T> {
+ /// Creates a new `BqlRefCell<T>` containing the given value.
+ fn from(t: T) -> BqlRefCell<T> {
+ BqlRefCell::new(t)
+ }
+}
+
+struct BorrowRef<'b> {
+ borrow: &'b Cell<BorrowFlag>,
+}
+
+impl<'b> BorrowRef<'b> {
+ #[inline]
+ fn new(borrow: &'b Cell<BorrowFlag>) -> Option<BorrowRef<'b>> {
+ let b = borrow.get().wrapping_add(1);
+ if !is_reading(b) {
+ // Incrementing borrow can result in a non-reading value (<= 0) in these cases:
+ // 1. It was < 0, i.e. there are writing borrows, so we can't allow a read
+ // borrow due to Rust's reference aliasing rules
+ // 2. It was isize::MAX (the max amount of reading borrows) and it overflowed
+ // into isize::MIN (the max amount of writing borrows) so we can't allow an
+ // additional read borrow because isize can't represent so many read borrows
+ // (this can only happen if you mem::forget more than a small constant amount
+ // of `BqlRef`s, which is not good practice)
+ None
+ } else {
+ // Incrementing borrow can result in a reading value (> 0) in these cases:
+ // 1. It was = 0, i.e. it wasn't borrowed, and we are taking the first read
+ // borrow
+ // 2. It was > 0 and < isize::MAX, i.e. there were read borrows, and isize is
+ // large enough to represent having one more read borrow
+ borrow.set(b);
+ Some(BorrowRef { borrow })
+ }
+ }
+}
+
+impl Drop for BorrowRef<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ let borrow = self.borrow.get();
+ debug_assert!(is_reading(borrow));
+ self.borrow.set(borrow - 1);
+ bql_block_unlock(false)
+ }
+}
+
+impl Clone for BorrowRef<'_> {
+ #[inline]
+ fn clone(&self) -> Self {
+ BorrowRef::new(self.borrow).unwrap()
+ }
+}
+
+/// Wraps a borrowed reference to a value in a `BqlRefCell` box.
+/// A wrapper type for an immutably borrowed value from a `BqlRefCell<T>`.
+///
+/// See the [module-level documentation](self) for more.
+pub struct BqlRef<'b, T: 'b> {
+ // NB: we use a pointer instead of `&'b T` to avoid `noalias` violations, because a
+ // `BqlRef` argument doesn't hold immutability for its whole scope, only until it drops.
+ // `NonNull` is also covariant over `T`, just like we would have with `&T`.
+ value: NonNull<T>,
+ borrow: BorrowRef<'b>,
+}
+
+impl<T> Deref for BqlRef<'_, T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ // SAFETY: the value is accessible as long as we hold our borrow.
+ unsafe { self.value.as_ref() }
+ }
+}
+
+impl<'b, T> BqlRef<'b, T> {
+ /// Copies a `BqlRef`.
+ ///
+ /// The `BqlRefCell` is already immutably borrowed, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `BqlRef::clone(...)`. A `Clone` implementation or a method would
+ /// interfere with the widespread use of `r.borrow().clone()` to clone
+ /// the contents of a `BqlRefCell`.
+ #[must_use]
+ #[inline]
+ #[allow(clippy::should_implement_trait)]
+ pub fn clone(orig: &BqlRef<'b, T>) -> BqlRef<'b, T> {
+ BqlRef {
+ value: orig.value,
+ borrow: orig.borrow.clone(),
+ }
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for BqlRef<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+impl<T: fmt::Display> fmt::Display for BqlRef<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+struct BorrowRefMut<'b> {
+ borrow: &'b Cell<BorrowFlag>,
+}
+
+impl<'b> BorrowRefMut<'b> {
+ #[inline]
+ fn new(borrow: &'b Cell<BorrowFlag>) -> Option<BorrowRefMut<'b>> {
+ // There must currently be no existing references when borrow_mut() is
+ // called, so we explicitly only allow going from UNUSED to UNUSED - 1.
+ match borrow.get() {
+ UNUSED => {
+ borrow.set(UNUSED - 1);
+ Some(BorrowRefMut { borrow })
+ }
+ _ => None,
+ }
+ }
+}
+
+impl Drop for BorrowRefMut<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ let borrow = self.borrow.get();
+ debug_assert!(is_writing(borrow));
+ self.borrow.set(borrow + 1);
+ bql_block_unlock(false)
+ }
+}
+
+/// A wrapper type for a mutably borrowed value from a `BqlRefCell<T>`.
+///
+/// See the [module-level documentation](self) for more.
+pub struct BqlRefMut<'b, T: 'b> {
+ // NB: we use a pointer instead of `&'b mut T` to avoid `noalias` violations, because a
+ // `BqlRefMut` argument doesn't hold exclusivity for its whole scope, only until it drops.
+ value: NonNull<T>,
+ _borrow: BorrowRefMut<'b>,
+ // `NonNull` is covariant over `T`, so we need to reintroduce invariance.
+ marker: PhantomData<&'b mut T>,
+}
+
+impl<T> Deref for BqlRefMut<'_, T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ // SAFETY: the value is accessible as long as we hold our borrow.
+ unsafe { self.value.as_ref() }
+ }
+}
+
+impl<T> DerefMut for BqlRefMut<'_, T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: the value is accessible as long as we hold our borrow.
+ unsafe { self.value.as_mut() }
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for BqlRefMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+impl<T: fmt::Display> fmt::Display for BqlRefMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+/// Stores an opaque value that is shared with C code.
+///
+/// Often, C structs can changed when calling a C function even if they are
+/// behind a shared Rust reference, or they can be initialized lazily and have
+/// invalid bit patterns (e.g. `3` for a [`bool`]). This goes against Rust's
+/// strict aliasing rules, which normally prevent mutation through shared
+/// references.
+///
+/// Wrapping the struct with `Opaque<T>` ensures that the Rust compiler does not
+/// assume the usual constraints that Rust structs require, and allows using
+/// shared references on the Rust side.
+///
+/// `Opaque<T>` is `#[repr(transparent)]`, so that it matches the memory layout
+/// of `T`.
+#[repr(transparent)]
+pub struct Opaque<T> {
+ value: UnsafeCell<MaybeUninit<T>>,
+ // PhantomPinned also allows multiple references to the `Opaque<T>`, i.e.
+ // one `&mut Opaque<T>` can coexist with a `&mut T` or any number of `&T`;
+ // see https://docs.rs/pinned-aliasable/latest/pinned_aliasable/.
+ _pin: PhantomPinned,
+}
+
+impl<T> Opaque<T> {
+ /// Creates a new shared reference from a C pointer
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be valid, though it need not point to a valid value.
+ pub unsafe fn from_raw<'a>(ptr: *mut T) -> &'a Self {
+ let ptr = NonNull::new(ptr).unwrap().cast::<Self>();
+ // SAFETY: Self is a transparent wrapper over T
+ unsafe { ptr.as_ref() }
+ }
+
+ /// Creates a new opaque object with uninitialized contents.
+ ///
+ /// # Safety
+ ///
+ /// Ultimately the pointer to the returned value will be dereferenced
+ /// in another `unsafe` block, for example when passing it to a C function,
+ /// but the functions containing the dereference are usually safe. The
+ /// value returned from `uninit()` must be initialized and pinned before
+ /// calling them.
+ #[allow(clippy::missing_const_for_fn)]
+ pub unsafe fn uninit() -> Self {
+ Self {
+ value: UnsafeCell::new(MaybeUninit::uninit()),
+ _pin: PhantomPinned,
+ }
+ }
+
+ /// Creates a new opaque object with zeroed contents.
+ ///
+ /// # Safety
+ ///
+ /// Ultimately the pointer to the returned value will be dereferenced
+ /// in another `unsafe` block, for example when passing it to a C function,
+ /// but the functions containing the dereference are usually safe. The
+ /// value returned from `uninit()` must be pinned (and possibly initialized)
+ /// before calling them.
+ #[allow(clippy::missing_const_for_fn)]
+ pub unsafe fn zeroed() -> Self {
+ Self {
+ value: UnsafeCell::new(MaybeUninit::zeroed()),
+ _pin: PhantomPinned,
+ }
+ }
+
+ /// Returns a raw mutable pointer to the opaque data.
+ pub const fn as_mut_ptr(&self) -> *mut T {
+ UnsafeCell::get(&self.value).cast()
+ }
+
+ /// Returns a raw pointer to the opaque data.
+ pub const fn as_ptr(&self) -> *const T {
+ self.as_mut_ptr().cast_const()
+ }
+
+ /// Returns a raw pointer to the opaque data that can be passed to a
+ /// C function as `void *`.
+ pub const fn as_void_ptr(&self) -> *mut std::ffi::c_void {
+ UnsafeCell::get(&self.value).cast()
+ }
+
+ /// Converts a raw pointer to the wrapped type.
+ pub const fn raw_get(slot: *mut Self) -> *mut T {
+ // Compare with Linux's raw_get method, which goes through an UnsafeCell
+ // because it takes a *const Self instead.
+ slot.cast()
+ }
+}
+
+impl<T> fmt::Debug for Opaque<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let mut name: String = "Opaque<".to_string();
+ name += std::any::type_name::<T>();
+ name += ">";
+ f.debug_tuple(&name).field(&self.as_ptr()).finish()
+ }
+}
+
+impl<T: Default> Opaque<T> {
+ /// Creates a new opaque object with default contents.
+ ///
+ /// # Safety
+ ///
+ /// Ultimately the pointer to the returned value will be dereferenced
+ /// in another `unsafe` block, for example when passing it to a C function,
+ /// but the functions containing the dereference are usually safe. The
+ /// value returned from `uninit()` must be pinned before calling them.
+ pub unsafe fn new() -> Self {
+ Self {
+ value: UnsafeCell::new(MaybeUninit::new(T::default())),
+ _pin: PhantomPinned,
+ }
+ }
+}
+
+/// Annotates [`Self`] as a transparent wrapper for another type.
+///
+/// Usually defined via the [`qemu_api_macros::Wrapper`] derive macro.
+///
+/// # Examples
+///
+/// ```
+/// # use std::mem::ManuallyDrop;
+/// # use qemu_api::cell::Wrapper;
+/// #[repr(transparent)]
+/// pub struct Example {
+/// inner: ManuallyDrop<String>,
+/// }
+///
+/// unsafe impl Wrapper for Example {
+/// type Wrapped = String;
+/// }
+/// ```
+///
+/// # Safety
+///
+/// `Self` must be a `#[repr(transparent)]` wrapper for the `Wrapped` type,
+/// whether directly or indirectly.
+///
+/// # Methods
+///
+/// By convention, types that implement Wrapper also implement the following
+/// methods:
+///
+/// ```ignore
+/// pub const unsafe fn from_raw<'a>(value: *mut Self::Wrapped) -> &'a Self;
+/// pub const unsafe fn as_mut_ptr(&self) -> *mut Self::Wrapped;
+/// pub const unsafe fn as_ptr(&self) -> *const Self::Wrapped;
+/// pub const unsafe fn raw_get(slot: *mut Self) -> *const Self::Wrapped;
+/// ```
+///
+/// They are not defined here to allow them to be `const`.
+pub unsafe trait Wrapper {
+ type Wrapped;
+}
+
+unsafe impl<T> Wrapper for Opaque<T> {
+ type Wrapped = T;
+}
diff --git a/rust/qemu-api/src/chardev.rs b/rust/qemu-api/src/chardev.rs
new file mode 100644
index 0000000..6e0590d
--- /dev/null
+++ b/rust/qemu-api/src/chardev.rs
@@ -0,0 +1,260 @@
+// Copyright 2024 Red Hat, Inc.
+// Author(s): Paolo Bonzini <pbonzini@redhat.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! Bindings for character devices
+//!
+//! Character devices in QEMU can run under the big QEMU lock or in a separate
+//! `GMainContext`. Here we only support the former, because the bindings
+//! enforce that the BQL is taken whenever the functions in [`CharBackend`] are
+//! called.
+
+use std::{
+ ffi::{c_int, c_void, CStr},
+ fmt::{self, Debug},
+ io::{self, ErrorKind, Write},
+ marker::PhantomPinned,
+ ptr::addr_of_mut,
+ slice,
+};
+
+use crate::{
+ bindings,
+ callbacks::FnCall,
+ cell::{BqlRefMut, Opaque},
+ prelude::*,
+};
+
+/// A safe wrapper around [`bindings::Chardev`].
+#[repr(transparent)]
+#[derive(qemu_api_macros::Wrapper)]
+pub struct Chardev(Opaque<bindings::Chardev>);
+
+pub type ChardevClass = bindings::ChardevClass;
+pub type Event = bindings::QEMUChrEvent;
+
+/// A safe wrapper around [`bindings::CharBackend`], denoting the character
+/// back-end that is used for example by a device. Compared to the
+/// underlying C struct it adds BQL protection, and is marked as pinned
+/// because the QOM object ([`bindings::Chardev`]) contains a pointer to
+/// the `CharBackend`.
+pub struct CharBackend {
+ inner: BqlRefCell<bindings::CharBackend>,
+ _pin: PhantomPinned,
+}
+
+impl Write for BqlRefMut<'_, bindings::CharBackend> {
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ let chr: &mut bindings::CharBackend = self;
+
+ let len = buf.len().try_into().unwrap();
+ let r = unsafe { bindings::qemu_chr_fe_write(addr_of_mut!(*chr), buf.as_ptr(), len) };
+ errno::into_io_result(r).map(|cnt| cnt as usize)
+ }
+
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ let chr: &mut bindings::CharBackend = self;
+
+ let len = buf.len().try_into().unwrap();
+ let r = unsafe { bindings::qemu_chr_fe_write_all(addr_of_mut!(*chr), buf.as_ptr(), len) };
+ errno::into_io_result(r).and_then(|cnt| {
+ if cnt as usize == buf.len() {
+ Ok(())
+ } else {
+ Err(ErrorKind::WriteZero.into())
+ }
+ })
+ }
+}
+
+impl Debug for CharBackend {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // SAFETY: accessed just to print the values
+ let chr = self.inner.as_ptr();
+ Debug::fmt(unsafe { &*chr }, f)
+ }
+}
+
+// FIXME: use something like PinnedDrop from the pinned_init crate
+impl Drop for CharBackend {
+ fn drop(&mut self) {
+ self.disable_handlers();
+ }
+}
+
+impl CharBackend {
+ /// Enable the front-end's character device handlers, if there is an
+ /// associated `Chardev`.
+ pub fn enable_handlers<
+ 'chardev,
+ 'owner: 'chardev,
+ T,
+ CanReceiveFn: for<'a> FnCall<(&'a T,), u32>,
+ ReceiveFn: for<'a, 'b> FnCall<(&'a T, &'b [u8])>,
+ EventFn: for<'a> FnCall<(&'a T, Event)>,
+ >(
+ // When "self" is dropped, the handlers are automatically disabled.
+ // However, this is not necessarily true if the owner is dropped.
+ // So require the owner to outlive the character device.
+ &'chardev self,
+ owner: &'owner T,
+ _can_receive: CanReceiveFn,
+ _receive: ReceiveFn,
+ _event: EventFn,
+ ) {
+ unsafe extern "C" fn rust_can_receive_cb<T, F: for<'a> FnCall<(&'a T,), u32>>(
+ opaque: *mut c_void,
+ ) -> c_int {
+ // SAFETY: the values are safe according to the contract of
+ // enable_handlers() and qemu_chr_fe_set_handlers()
+ let owner: &T = unsafe { &*(opaque.cast::<T>()) };
+ let r = F::call((owner,));
+ r.try_into().unwrap()
+ }
+
+ unsafe extern "C" fn rust_receive_cb<T, F: for<'a, 'b> FnCall<(&'a T, &'b [u8])>>(
+ opaque: *mut c_void,
+ buf: *const u8,
+ size: c_int,
+ ) {
+ // SAFETY: the values are safe according to the contract of
+ // enable_handlers() and qemu_chr_fe_set_handlers()
+ let owner: &T = unsafe { &*(opaque.cast::<T>()) };
+ let buf = unsafe { slice::from_raw_parts(buf, size.try_into().unwrap()) };
+ F::call((owner, buf))
+ }
+
+ unsafe extern "C" fn rust_event_cb<T, F: for<'a> FnCall<(&'a T, Event)>>(
+ opaque: *mut c_void,
+ event: Event,
+ ) {
+ // SAFETY: the values are safe according to the contract of
+ // enable_handlers() and qemu_chr_fe_set_handlers()
+ let owner: &T = unsafe { &*(opaque.cast::<T>()) };
+ F::call((owner, event))
+ }
+
+ let _: () = CanReceiveFn::ASSERT_IS_SOME;
+ let receive_cb: Option<unsafe extern "C" fn(*mut c_void, *const u8, c_int)> =
+ if ReceiveFn::is_some() {
+ Some(rust_receive_cb::<T, ReceiveFn>)
+ } else {
+ None
+ };
+ let event_cb: Option<unsafe extern "C" fn(*mut c_void, Event)> = if EventFn::is_some() {
+ Some(rust_event_cb::<T, EventFn>)
+ } else {
+ None
+ };
+
+ let mut chr = self.inner.borrow_mut();
+ // SAFETY: the borrow promises that the BQL is taken
+ unsafe {
+ bindings::qemu_chr_fe_set_handlers(
+ addr_of_mut!(*chr),
+ Some(rust_can_receive_cb::<T, CanReceiveFn>),
+ receive_cb,
+ event_cb,
+ None,
+ (owner as *const T).cast_mut().cast::<c_void>(),
+ core::ptr::null_mut(),
+ true,
+ );
+ }
+ }
+
+ /// Disable the front-end's character device handlers.
+ pub fn disable_handlers(&self) {
+ let mut chr = self.inner.borrow_mut();
+ // SAFETY: the borrow promises that the BQL is taken
+ unsafe {
+ bindings::qemu_chr_fe_set_handlers(
+ addr_of_mut!(*chr),
+ None,
+ None,
+ None,
+ None,
+ core::ptr::null_mut(),
+ core::ptr::null_mut(),
+ true,
+ );
+ }
+ }
+
+ /// Notify that the frontend is ready to receive data.
+ pub fn accept_input(&self) {
+ let mut chr = self.inner.borrow_mut();
+ // SAFETY: the borrow promises that the BQL is taken
+ unsafe { bindings::qemu_chr_fe_accept_input(addr_of_mut!(*chr)) }
+ }
+
+ /// Temporarily borrow the character device, allowing it to be used
+ /// as an implementor of `Write`. Note that it is not valid to drop
+ /// the big QEMU lock while the character device is borrowed, as
+ /// that might cause C code to write to the character device.
+ pub fn borrow_mut(&self) -> impl Write + '_ {
+ self.inner.borrow_mut()
+ }
+
+ /// Send a continuous stream of zero bits on the line if `enabled` is
+ /// true, or a short stream if `enabled` is false.
+ pub fn send_break(&self, long: bool) -> io::Result<()> {
+ let mut chr = self.inner.borrow_mut();
+ let mut duration: c_int = long.into();
+ // SAFETY: the borrow promises that the BQL is taken
+ let r = unsafe {
+ bindings::qemu_chr_fe_ioctl(
+ addr_of_mut!(*chr),
+ bindings::CHR_IOCTL_SERIAL_SET_BREAK as i32,
+ addr_of_mut!(duration).cast::<c_void>(),
+ )
+ };
+
+ errno::into_io_result(r).map(|_| ())
+ }
+
+ /// Write data to a character backend from the front end. This function
+ /// will send data from the front end to the back end. Unlike
+ /// `write`, this function will block if the back end cannot
+ /// consume all of the data attempted to be written.
+ ///
+ /// Returns the number of bytes consumed (0 if no associated Chardev) or an
+ /// error.
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ let len = buf.len().try_into().unwrap();
+ // SAFETY: qemu_chr_fe_write is thread-safe
+ let r = unsafe { bindings::qemu_chr_fe_write(self.inner.as_ptr(), buf.as_ptr(), len) };
+ errno::into_io_result(r).map(|cnt| cnt as usize)
+ }
+
+ /// Write data to a character backend from the front end. This function
+ /// will send data from the front end to the back end. Unlike
+ /// `write`, this function will block if the back end cannot
+ /// consume all of the data attempted to be written.
+ ///
+ /// Returns the number of bytes consumed (0 if no associated Chardev) or an
+ /// error.
+ pub fn write_all(&self, buf: &[u8]) -> io::Result<()> {
+ let len = buf.len().try_into().unwrap();
+ // SAFETY: qemu_chr_fe_write_all is thread-safe
+ let r = unsafe { bindings::qemu_chr_fe_write_all(self.inner.as_ptr(), buf.as_ptr(), len) };
+ errno::into_io_result(r).and_then(|cnt| {
+ if cnt as usize == buf.len() {
+ Ok(())
+ } else {
+ Err(ErrorKind::WriteZero.into())
+ }
+ })
+ }
+}
+
+unsafe impl ObjectType for Chardev {
+ type Class = ChardevClass;
+ const TYPE_NAME: &'static CStr =
+ unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_CHARDEV) };
+}
+qom_isa!(Chardev: Object);
diff --git a/rust/qemu-api/src/errno.rs b/rust/qemu-api/src/errno.rs
new file mode 100644
index 0000000..18d1014
--- /dev/null
+++ b/rust/qemu-api/src/errno.rs
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! Utility functions to convert `errno` to and from
+//! [`io::Error`]/[`io::Result`]
+//!
+//! QEMU C functions often have a "positive success/negative `errno`" calling
+//! convention. This module provides functions to portably convert an integer
+//! into an [`io::Result`] and back.
+
+use std::{convert::TryFrom, io, io::ErrorKind};
+
+/// An `errno` value that can be converted into an [`io::Error`]
+pub struct Errno(pub u16);
+
+// On Unix, from_raw_os_error takes an errno value and OS errors
+// are printed using strerror. On Windows however it takes a
+// GetLastError() value; therefore we need to convert errno values
+// into io::Error by hand. This is the same mapping that the
+// standard library uses to retrieve the kind of OS errors
+// (`std::sys::pal::unix::decode_error_kind`).
+impl From<Errno> for ErrorKind {
+ fn from(value: Errno) -> ErrorKind {
+ use ErrorKind::*;
+ let Errno(errno) = value;
+ match i32::from(errno) {
+ libc::EPERM | libc::EACCES => PermissionDenied,
+ libc::ENOENT => NotFound,
+ libc::EINTR => Interrupted,
+ x if x == libc::EAGAIN || x == libc::EWOULDBLOCK => WouldBlock,
+ libc::ENOMEM => OutOfMemory,
+ libc::EEXIST => AlreadyExists,
+ libc::EINVAL => InvalidInput,
+ libc::EPIPE => BrokenPipe,
+ libc::EADDRINUSE => AddrInUse,
+ libc::EADDRNOTAVAIL => AddrNotAvailable,
+ libc::ECONNABORTED => ConnectionAborted,
+ libc::ECONNREFUSED => ConnectionRefused,
+ libc::ECONNRESET => ConnectionReset,
+ libc::ENOTCONN => NotConnected,
+ libc::ENOTSUP => Unsupported,
+ libc::ETIMEDOUT => TimedOut,
+ _ => Other,
+ }
+ }
+}
+
+// This is used on Windows for all io::Errors, but also on Unix if the
+// io::Error does not have a raw OS error. This is the reversed
+// mapping of the above; EIO is returned for unknown ErrorKinds.
+impl From<io::ErrorKind> for Errno {
+ fn from(value: io::ErrorKind) -> Errno {
+ use ErrorKind::*;
+ let errno = match value {
+ // can be both EPERM or EACCES :( pick one
+ PermissionDenied => libc::EPERM,
+ NotFound => libc::ENOENT,
+ Interrupted => libc::EINTR,
+ WouldBlock => libc::EAGAIN,
+ OutOfMemory => libc::ENOMEM,
+ AlreadyExists => libc::EEXIST,
+ InvalidInput => libc::EINVAL,
+ BrokenPipe => libc::EPIPE,
+ AddrInUse => libc::EADDRINUSE,
+ AddrNotAvailable => libc::EADDRNOTAVAIL,
+ ConnectionAborted => libc::ECONNABORTED,
+ ConnectionRefused => libc::ECONNREFUSED,
+ ConnectionReset => libc::ECONNRESET,
+ NotConnected => libc::ENOTCONN,
+ Unsupported => libc::ENOTSUP,
+ TimedOut => libc::ETIMEDOUT,
+ _ => libc::EIO,
+ };
+ Errno(errno as u16)
+ }
+}
+
+impl From<Errno> for io::Error {
+ #[cfg(unix)]
+ fn from(value: Errno) -> io::Error {
+ let Errno(errno) = value;
+ io::Error::from_raw_os_error(errno.into())
+ }
+
+ #[cfg(windows)]
+ fn from(value: Errno) -> io::Error {
+ let error_kind: ErrorKind = value.into();
+ error_kind.into()
+ }
+}
+
+impl From<io::Error> for Errno {
+ fn from(value: io::Error) -> Errno {
+ if cfg!(unix) {
+ if let Some(errno) = value.raw_os_error() {
+ return Errno(u16::try_from(errno).unwrap());
+ }
+ }
+ value.kind().into()
+ }
+}
+
+/// Internal traits; used to enable [`into_io_result`] and [`into_neg_errno`]
+/// for the "right" set of types.
+mod traits {
+ use super::Errno;
+
+ /// A signed type that can be converted into an
+ /// [`io::Result`](std::io::Result)
+ pub trait GetErrno {
+ /// Unsigned variant of `Self`, used as the type for the `Ok` case.
+ type Out;
+
+ /// Return `Ok(self)` if positive, `Err(Errno(-self))` if negative
+ fn into_errno_result(self) -> Result<Self::Out, Errno>;
+ }
+
+ /// A type that can be taken out of an [`io::Result`](std::io::Result) and
+ /// converted into "positive success/negative `errno`" convention.
+ pub trait MergeErrno {
+ /// Signed variant of `Self`, used as the return type of
+ /// [`into_neg_errno`](super::into_neg_errno).
+ type Out: From<u16> + std::ops::Neg<Output = Self::Out>;
+
+ /// Return `self`, asserting that it is in range
+ fn map_ok(self) -> Self::Out;
+ }
+
+ macro_rules! get_errno {
+ ($t:ty, $out:ty) => {
+ impl GetErrno for $t {
+ type Out = $out;
+ fn into_errno_result(self) -> Result<Self::Out, Errno> {
+ match self {
+ 0.. => Ok(self as $out),
+ -65535..=-1 => Err(Errno(-self as u16)),
+ _ => panic!("{self} is not a negative errno"),
+ }
+ }
+ }
+ };
+ }
+
+ get_errno!(i32, u32);
+ get_errno!(i64, u64);
+ get_errno!(isize, usize);
+
+ macro_rules! merge_errno {
+ ($t:ty, $out:ty) => {
+ impl MergeErrno for $t {
+ type Out = $out;
+ fn map_ok(self) -> Self::Out {
+ self.try_into().unwrap()
+ }
+ }
+ };
+ }
+
+ merge_errno!(u8, i32);
+ merge_errno!(u16, i32);
+ merge_errno!(u32, i32);
+ merge_errno!(u64, i64);
+
+ impl MergeErrno for () {
+ type Out = i32;
+ fn map_ok(self) -> i32 {
+ 0
+ }
+ }
+}
+
+use traits::{GetErrno, MergeErrno};
+
+/// Convert an integer value into a [`io::Result`].
+///
+/// Positive values are turned into an `Ok` result; negative values
+/// are interpreted as negated `errno` and turned into an `Err`.
+///
+/// ```
+/// # use qemu_api::errno::into_io_result;
+/// # use std::io::ErrorKind;
+/// let ok = into_io_result(1i32).unwrap();
+/// assert_eq!(ok, 1u32);
+///
+/// let err = into_io_result(-1i32).unwrap_err(); // -EPERM
+/// assert_eq!(err.kind(), ErrorKind::PermissionDenied);
+/// ```
+///
+/// # Panics
+///
+/// Since the result is an unsigned integer, negative values must
+/// be close to 0; values that are too far away are considered
+/// likely overflows and will panic:
+///
+/// ```should_panic
+/// # use qemu_api::errno::into_io_result;
+/// # #[allow(dead_code)]
+/// let err = into_io_result(-0x1234_5678i32); // panic
+/// ```
+pub fn into_io_result<T: GetErrno>(value: T) -> io::Result<T::Out> {
+ value.into_errno_result().map_err(Into::into)
+}
+
+/// Convert a [`Result`] into an integer value, using negative `errno`
+/// values to report errors.
+///
+/// ```
+/// # use qemu_api::errno::into_neg_errno;
+/// # use std::io::{self, ErrorKind};
+/// let ok: io::Result<()> = Ok(());
+/// assert_eq!(into_neg_errno(ok), 0);
+///
+/// let err: io::Result<()> = Err(ErrorKind::InvalidInput.into());
+/// assert_eq!(into_neg_errno(err), -22); // -EINVAL
+/// ```
+///
+/// Since this module also provides the ability to convert [`io::Error`]
+/// to an `errno` value, [`io::Result`] is the most commonly used type
+/// for the argument of this function:
+///
+/// # Panics
+///
+/// Since the result is a signed integer, integer `Ok` values must remain
+/// positive:
+///
+/// ```should_panic
+/// # use qemu_api::errno::into_neg_errno;
+/// # use std::io;
+/// let err: io::Result<u32> = Ok(0x8899_AABB);
+/// into_neg_errno(err) // panic
+/// # ;
+/// ```
+pub fn into_neg_errno<T: MergeErrno, E: Into<Errno>>(value: Result<T, E>) -> T::Out {
+ match value {
+ Ok(x) => x.map_ok(),
+ Err(err) => -T::Out::from(err.into().0),
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::io::ErrorKind;
+
+ use super::*;
+ use crate::assert_match;
+
+ #[test]
+ pub fn test_from_u8() {
+ let ok: io::Result<_> = Ok(42u8);
+ assert_eq!(into_neg_errno(ok), 42);
+
+ let err: io::Result<u8> = Err(io::ErrorKind::PermissionDenied.into());
+ assert_eq!(into_neg_errno(err), -1);
+
+ if cfg!(unix) {
+ let os_err: io::Result<u8> = Err(io::Error::from_raw_os_error(10));
+ assert_eq!(into_neg_errno(os_err), -10);
+ }
+ }
+
+ #[test]
+ pub fn test_from_u16() {
+ let ok: io::Result<_> = Ok(1234u16);
+ assert_eq!(into_neg_errno(ok), 1234);
+
+ let err: io::Result<u16> = Err(io::ErrorKind::PermissionDenied.into());
+ assert_eq!(into_neg_errno(err), -1);
+
+ if cfg!(unix) {
+ let os_err: io::Result<u16> = Err(io::Error::from_raw_os_error(10));
+ assert_eq!(into_neg_errno(os_err), -10);
+ }
+ }
+
+ #[test]
+ pub fn test_i32() {
+ assert_match!(into_io_result(1234i32), Ok(1234));
+
+ let err = into_io_result(-1i32).unwrap_err();
+ #[cfg(unix)]
+ assert_match!(err.raw_os_error(), Some(1));
+ assert_match!(err.kind(), ErrorKind::PermissionDenied);
+ }
+
+ #[test]
+ pub fn test_from_u32() {
+ let ok: io::Result<_> = Ok(1234u32);
+ assert_eq!(into_neg_errno(ok), 1234);
+
+ let err: io::Result<u32> = Err(io::ErrorKind::PermissionDenied.into());
+ assert_eq!(into_neg_errno(err), -1);
+
+ if cfg!(unix) {
+ let os_err: io::Result<u32> = Err(io::Error::from_raw_os_error(10));
+ assert_eq!(into_neg_errno(os_err), -10);
+ }
+ }
+
+ #[test]
+ pub fn test_i64() {
+ assert_match!(into_io_result(1234i64), Ok(1234));
+
+ let err = into_io_result(-22i64).unwrap_err();
+ #[cfg(unix)]
+ assert_match!(err.raw_os_error(), Some(22));
+ assert_match!(err.kind(), ErrorKind::InvalidInput);
+ }
+
+ #[test]
+ pub fn test_from_u64() {
+ let ok: io::Result<_> = Ok(1234u64);
+ assert_eq!(into_neg_errno(ok), 1234);
+
+ let err: io::Result<u64> = Err(io::ErrorKind::InvalidInput.into());
+ assert_eq!(into_neg_errno(err), -22);
+
+ if cfg!(unix) {
+ let os_err: io::Result<u64> = Err(io::Error::from_raw_os_error(6));
+ assert_eq!(into_neg_errno(os_err), -6);
+ }
+ }
+
+ #[test]
+ pub fn test_isize() {
+ assert_match!(into_io_result(1234isize), Ok(1234));
+
+ let err = into_io_result(-4isize).unwrap_err();
+ #[cfg(unix)]
+ assert_match!(err.raw_os_error(), Some(4));
+ assert_match!(err.kind(), ErrorKind::Interrupted);
+ }
+
+ #[test]
+ pub fn test_from_unit() {
+ let ok: io::Result<_> = Ok(());
+ assert_eq!(into_neg_errno(ok), 0);
+
+ let err: io::Result<()> = Err(io::ErrorKind::OutOfMemory.into());
+ assert_eq!(into_neg_errno(err), -12);
+
+ if cfg!(unix) {
+ let os_err: io::Result<()> = Err(io::Error::from_raw_os_error(2));
+ assert_eq!(into_neg_errno(os_err), -2);
+ }
+ }
+}
diff --git a/rust/qemu-api/src/error.rs b/rust/qemu-api/src/error.rs
new file mode 100644
index 0000000..e114fc4
--- /dev/null
+++ b/rust/qemu-api/src/error.rs
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! Error propagation for QEMU Rust code
+//!
+//! This module contains [`Error`], the bridge between Rust errors and
+//! [`Result`](std::result::Result)s and QEMU's C [`Error`](bindings::Error)
+//! struct.
+//!
+//! For FFI code, [`Error`] provides functions to simplify conversion between
+//! the Rust ([`Result<>`](std::result::Result)) and C (`Error**`) conventions:
+//!
+//! * [`ok_or_propagate`](crate::Error::ok_or_propagate),
+//! [`bool_or_propagate`](crate::Error::bool_or_propagate),
+//! [`ptr_or_propagate`](crate::Error::ptr_or_propagate) can be used to build
+//! a C return value while also propagating an error condition
+//!
+//! * [`err_or_else`](crate::Error::err_or_else) and
+//! [`err_or_unit`](crate::Error::err_or_unit) can be used to build a `Result`
+//!
+//! This module is most commonly used at the boundary between C and Rust code;
+//! other code will usually access it through the
+//! [`qemu_api::Result`](crate::Result) type alias, and will use the
+//! [`std::error::Error`] interface to let C errors participate in Rust's error
+//! handling functionality.
+//!
+//! Rust code can also create use this module to create an error object that
+//! will be passed up to C code, though in most cases this will be done
+//! transparently through the `?` operator. Errors can be constructed from a
+//! simple error string, from an [`anyhow::Error`] to pass any other Rust error
+//! type up to C code, or from a combination of the two.
+//!
+//! The third case, corresponding to [`Error::with_error`], is the only one that
+//! requires mentioning [`qemu_api::Error`](crate::Error) explicitly. Similar
+//! to how QEMU's C code handles errno values, the string and the
+//! `anyhow::Error` object will be concatenated with `:` as the separator.
+
+use std::{
+ borrow::Cow,
+ ffi::{c_char, c_int, c_void, CStr},
+ fmt::{self, Display},
+ panic, ptr,
+};
+
+use foreign::{prelude::*, OwnedPointer};
+
+use crate::bindings;
+
+pub type Result<T> = std::result::Result<T, Error>;
+
+#[derive(Debug)]
+pub struct Error {
+ msg: Option<Cow<'static, str>>,
+ /// Appends the print string of the error to the msg if not None
+ cause: Option<anyhow::Error>,
+ file: &'static str,
+ line: u32,
+}
+
+impl std::error::Error for Error {
+ fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
+ self.cause.as_ref().map(AsRef::as_ref)
+ }
+
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ self.msg
+ .as_deref()
+ .or_else(|| self.cause.as_deref().map(std::error::Error::description))
+ .expect("no message nor cause?")
+ }
+}
+
+impl Display for Error {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let mut prefix = "";
+ if let Some(ref msg) = self.msg {
+ write!(f, "{msg}")?;
+ prefix = ": ";
+ }
+ if let Some(ref cause) = self.cause {
+ write!(f, "{prefix}{cause}")?;
+ } else if prefix.is_empty() {
+ panic!("no message nor cause?");
+ }
+ Ok(())
+ }
+}
+
+impl From<String> for Error {
+ #[track_caller]
+ fn from(msg: String) -> Self {
+ let location = panic::Location::caller();
+ Error {
+ msg: Some(Cow::Owned(msg)),
+ cause: None,
+ file: location.file(),
+ line: location.line(),
+ }
+ }
+}
+
+impl From<&'static str> for Error {
+ #[track_caller]
+ fn from(msg: &'static str) -> Self {
+ let location = panic::Location::caller();
+ Error {
+ msg: Some(Cow::Borrowed(msg)),
+ cause: None,
+ file: location.file(),
+ line: location.line(),
+ }
+ }
+}
+
+impl From<anyhow::Error> for Error {
+ #[track_caller]
+ fn from(error: anyhow::Error) -> Self {
+ let location = panic::Location::caller();
+ Error {
+ msg: None,
+ cause: Some(error),
+ file: location.file(),
+ line: location.line(),
+ }
+ }
+}
+
+impl Error {
+ /// Create a new error, prepending `msg` to the
+ /// description of `cause`
+ #[track_caller]
+ pub fn with_error(msg: impl Into<Cow<'static, str>>, cause: impl Into<anyhow::Error>) -> Self {
+ let location = panic::Location::caller();
+ Error {
+ msg: Some(msg.into()),
+ cause: Some(cause.into()),
+ file: location.file(),
+ line: location.line(),
+ }
+ }
+
+ /// Consume a result, returning `false` if it is an error and
+ /// `true` if it is successful. The error is propagated into
+ /// `errp` like the C API `error_propagate` would do.
+ ///
+ /// # Safety
+ ///
+ /// `errp` must be a valid argument to `error_propagate`;
+ /// typically it is received from C code and need not be
+ /// checked further at the Rust↔C boundary.
+ pub unsafe fn bool_or_propagate(result: Result<()>, errp: *mut *mut bindings::Error) -> bool {
+ // SAFETY: caller guarantees errp is valid
+ unsafe { Self::ok_or_propagate(result, errp) }.is_some()
+ }
+
+ /// Consume a result, returning a `NULL` pointer if it is an error and
+ /// a C representation of the contents if it is successful. This is
+ /// similar to the C API `error_propagate`, but it panics if `*errp`
+ /// is not `NULL`.
+ ///
+ /// # Safety
+ ///
+ /// `errp` must be a valid argument to `error_propagate`;
+ /// typically it is received from C code and need not be
+ /// checked further at the Rust↔C boundary.
+ ///
+ /// See [`propagate`](Error::propagate) for more information.
+ #[must_use]
+ pub unsafe fn ptr_or_propagate<T: CloneToForeign>(
+ result: Result<T>,
+ errp: *mut *mut bindings::Error,
+ ) -> *mut T::Foreign {
+ // SAFETY: caller guarantees errp is valid
+ unsafe { Self::ok_or_propagate(result, errp) }.clone_to_foreign_ptr()
+ }
+
+ /// Consume a result in the same way as `self.ok()`, but also propagate
+ /// a possible error into `errp`. This is similar to the C API
+ /// `error_propagate`, but it panics if `*errp` is not `NULL`.
+ ///
+ /// # Safety
+ ///
+ /// `errp` must be a valid argument to `error_propagate`;
+ /// typically it is received from C code and need not be
+ /// checked further at the Rust↔C boundary.
+ ///
+ /// See [`propagate`](Error::propagate) for more information.
+ pub unsafe fn ok_or_propagate<T>(
+ result: Result<T>,
+ errp: *mut *mut bindings::Error,
+ ) -> Option<T> {
+ result.map_err(|err| unsafe { err.propagate(errp) }).ok()
+ }
+
+ /// Equivalent of the C function `error_propagate`. Fill `*errp`
+ /// with the information container in `self` if `errp` is not NULL;
+ /// then consume it.
+ ///
+ /// This is similar to the C API `error_propagate`, but it panics if
+ /// `*errp` is not `NULL`.
+ ///
+ /// # Safety
+ ///
+ /// `errp` must be a valid argument to `error_propagate`; it can be
+ /// `NULL` or it can point to any of:
+ /// * `error_abort`
+ /// * `error_fatal`
+ /// * a local variable of (C) type `Error *`
+ ///
+ /// Typically `errp` is received from C code and need not be
+ /// checked further at the Rust↔C boundary.
+ pub unsafe fn propagate(self, errp: *mut *mut bindings::Error) {
+ if errp.is_null() {
+ return;
+ }
+
+ // SAFETY: caller guarantees that errp and *errp are valid
+ unsafe {
+ assert_eq!(*errp, ptr::null_mut());
+ bindings::error_propagate(errp, self.clone_to_foreign_ptr());
+ }
+ }
+
+ /// Convert a C `Error*` into a Rust `Result`, using
+ /// `Ok(())` if `c_error` is NULL. Free the `Error*`.
+ ///
+ /// # Safety
+ ///
+ /// `c_error` must be `NULL` or valid; typically it was initialized
+ /// with `ptr::null_mut()` and passed by reference to a C function.
+ pub unsafe fn err_or_unit(c_error: *mut bindings::Error) -> Result<()> {
+ // SAFETY: caller guarantees c_error is valid
+ unsafe { Self::err_or_else(c_error, || ()) }
+ }
+
+ /// Convert a C `Error*` into a Rust `Result`, calling `f()` to
+ /// obtain an `Ok` value if `c_error` is NULL. Free the `Error*`.
+ ///
+ /// # Safety
+ ///
+ /// `c_error` must be `NULL` or point to a valid C [`struct
+ /// Error`](bindings::Error); typically it was initialized with
+ /// `ptr::null_mut()` and passed by reference to a C function.
+ pub unsafe fn err_or_else<T, F: FnOnce() -> T>(
+ c_error: *mut bindings::Error,
+ f: F,
+ ) -> Result<T> {
+ // SAFETY: caller guarantees c_error is valid
+ let err = unsafe { Option::<Self>::from_foreign(c_error) };
+ match err {
+ None => Ok(f()),
+ Some(err) => Err(err),
+ }
+ }
+}
+
+impl FreeForeign for Error {
+ type Foreign = bindings::Error;
+
+ unsafe fn free_foreign(p: *mut bindings::Error) {
+ // SAFETY: caller guarantees p is valid
+ unsafe {
+ bindings::error_free(p);
+ }
+ }
+}
+
+impl CloneToForeign for Error {
+ fn clone_to_foreign(&self) -> OwnedPointer<Self> {
+ // SAFETY: all arguments are controlled by this function
+ unsafe {
+ let err: *mut c_void = libc::malloc(std::mem::size_of::<bindings::Error>());
+ let err: &mut bindings::Error = &mut *err.cast();
+ *err = bindings::Error {
+ msg: format!("{self}").clone_to_foreign_ptr(),
+ err_class: bindings::ERROR_CLASS_GENERIC_ERROR,
+ src_len: self.file.len() as c_int,
+ src: self.file.as_ptr().cast::<c_char>(),
+ line: self.line as c_int,
+ func: ptr::null_mut(),
+ hint: ptr::null_mut(),
+ };
+ OwnedPointer::new(err)
+ }
+ }
+}
+
+impl FromForeign for Error {
+ unsafe fn cloned_from_foreign(c_error: *const bindings::Error) -> Self {
+ // SAFETY: caller guarantees c_error is valid
+ unsafe {
+ let error = &*c_error;
+ let file = if error.src_len < 0 {
+ // NUL-terminated
+ CStr::from_ptr(error.src).to_str()
+ } else {
+ // Can become str::from_utf8 with Rust 1.87.0
+ std::str::from_utf8(std::slice::from_raw_parts(
+ &*error.src.cast::<u8>(),
+ error.src_len as usize,
+ ))
+ };
+
+ Error {
+ msg: FromForeign::cloned_from_foreign(error.msg),
+ cause: None,
+ file: file.unwrap(),
+ line: error.line as u32,
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::ffi::CStr;
+
+ use anyhow::anyhow;
+ use foreign::OwnedPointer;
+
+ use super::*;
+ use crate::{assert_match, bindings};
+
+ #[track_caller]
+ fn error_for_test(msg: &CStr) -> OwnedPointer<Error> {
+ // SAFETY: all arguments are controlled by this function
+ let location = panic::Location::caller();
+ unsafe {
+ let err: *mut c_void = libc::malloc(std::mem::size_of::<bindings::Error>());
+ let err: &mut bindings::Error = &mut *err.cast();
+ *err = bindings::Error {
+ msg: msg.clone_to_foreign_ptr(),
+ err_class: bindings::ERROR_CLASS_GENERIC_ERROR,
+ src_len: location.file().len() as c_int,
+ src: location.file().as_ptr().cast::<c_char>(),
+ line: location.line() as c_int,
+ func: ptr::null_mut(),
+ hint: ptr::null_mut(),
+ };
+ OwnedPointer::new(err)
+ }
+ }
+
+ unsafe fn error_get_pretty<'a>(local_err: *mut bindings::Error) -> &'a CStr {
+ unsafe { CStr::from_ptr(bindings::error_get_pretty(local_err)) }
+ }
+
+ #[test]
+ #[allow(deprecated)]
+ fn test_description() {
+ use std::error::Error;
+
+ assert_eq!(super::Error::from("msg").description(), "msg");
+ assert_eq!(super::Error::from("msg".to_owned()).description(), "msg");
+ }
+
+ #[test]
+ fn test_display() {
+ assert_eq!(&*format!("{}", Error::from("msg")), "msg");
+ assert_eq!(&*format!("{}", Error::from("msg".to_owned())), "msg");
+ assert_eq!(&*format!("{}", Error::from(anyhow!("msg"))), "msg");
+
+ assert_eq!(
+ &*format!("{}", Error::with_error("msg", anyhow!("cause"))),
+ "msg: cause"
+ );
+ }
+
+ #[test]
+ fn test_bool_or_propagate() {
+ unsafe {
+ let mut local_err: *mut bindings::Error = ptr::null_mut();
+
+ assert!(Error::bool_or_propagate(Ok(()), &mut local_err));
+ assert_eq!(local_err, ptr::null_mut());
+
+ let my_err = Error::from("msg");
+ assert!(!Error::bool_or_propagate(Err(my_err), &mut local_err));
+ assert_ne!(local_err, ptr::null_mut());
+ assert_eq!(error_get_pretty(local_err), c"msg");
+ bindings::error_free(local_err);
+ }
+ }
+
+ #[test]
+ fn test_ptr_or_propagate() {
+ unsafe {
+ let mut local_err: *mut bindings::Error = ptr::null_mut();
+
+ let ret = Error::ptr_or_propagate(Ok("abc".to_owned()), &mut local_err);
+ assert_eq!(String::from_foreign(ret), "abc");
+ assert_eq!(local_err, ptr::null_mut());
+
+ let my_err = Error::from("msg");
+ assert_eq!(
+ Error::ptr_or_propagate(Err::<String, _>(my_err), &mut local_err),
+ ptr::null_mut()
+ );
+ assert_ne!(local_err, ptr::null_mut());
+ assert_eq!(error_get_pretty(local_err), c"msg");
+ bindings::error_free(local_err);
+ }
+ }
+
+ #[test]
+ fn test_err_or_unit() {
+ unsafe {
+ let result = Error::err_or_unit(ptr::null_mut());
+ assert_match!(result, Ok(()));
+
+ let err = error_for_test(c"msg");
+ let err = Error::err_or_unit(err.into_inner()).unwrap_err();
+ assert_eq!(&*format!("{err}"), "msg");
+ }
+ }
+}
diff --git a/rust/qemu-api/src/irq.rs b/rust/qemu-api/src/irq.rs
new file mode 100644
index 0000000..1526e6f
--- /dev/null
+++ b/rust/qemu-api/src/irq.rs
@@ -0,0 +1,115 @@
+// Copyright 2024 Red Hat, Inc.
+// Author(s): Paolo Bonzini <pbonzini@redhat.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! Bindings for interrupt sources
+
+use std::{
+ ffi::{c_int, CStr},
+ marker::PhantomData,
+ ptr,
+};
+
+use crate::{
+ bindings::{self, qemu_set_irq},
+ cell::Opaque,
+ prelude::*,
+ qom::ObjectClass,
+};
+
+/// An opaque wrapper around [`bindings::IRQState`].
+#[repr(transparent)]
+#[derive(Debug, qemu_api_macros::Wrapper)]
+pub struct IRQState(Opaque<bindings::IRQState>);
+
+/// Interrupt sources are used by devices to pass changes to a value (typically
+/// a boolean). The interrupt sink is usually an interrupt controller or
+/// GPIO controller.
+///
+/// As far as devices are concerned, interrupt sources are always active-high:
+/// for example, `InterruptSource<bool>`'s [`raise`](InterruptSource::raise)
+/// method sends a `true` value to the sink. If the guest has to see a
+/// different polarity, that change is performed by the board between the
+/// device and the interrupt controller.
+///
+/// Interrupts are implemented as a pointer to the interrupt "sink", which has
+/// type [`IRQState`]. A device exposes its source as a QOM link property using
+/// a function such as [`SysBusDeviceMethods::init_irq`], and
+/// initially leaves the pointer to a NULL value, representing an unconnected
+/// interrupt. To connect it, whoever creates the device fills the pointer with
+/// the sink's `IRQState *`, for example using `sysbus_connect_irq`. Because
+/// devices are generally shared objects, interrupt sources are an example of
+/// the interior mutability pattern.
+///
+/// Interrupt sources can only be triggered under the Big QEMU Lock; `BqlCell`
+/// allows access from whatever thread has it.
+#[derive(Debug)]
+#[repr(transparent)]
+pub struct InterruptSource<T = bool>
+where
+ c_int: From<T>,
+{
+ cell: BqlCell<*mut bindings::IRQState>,
+ _marker: PhantomData<T>,
+}
+
+// SAFETY: the implementation asserts via `BqlCell` that the BQL is taken
+unsafe impl<T> Sync for InterruptSource<T> where c_int: From<T> {}
+
+impl InterruptSource<bool> {
+ /// Send a low (`false`) value to the interrupt sink.
+ pub fn lower(&self) {
+ self.set(false);
+ }
+
+ /// Send a high-low pulse to the interrupt sink.
+ pub fn pulse(&self) {
+ self.set(true);
+ self.set(false);
+ }
+
+ /// Send a high (`true`) value to the interrupt sink.
+ pub fn raise(&self) {
+ self.set(true);
+ }
+}
+
+impl<T> InterruptSource<T>
+where
+ c_int: From<T>,
+{
+ /// Send `level` to the interrupt sink.
+ pub fn set(&self, level: T) {
+ let ptr = self.cell.get();
+ // SAFETY: the pointer is retrieved under the BQL and remains valid
+ // until the BQL is released, which is after qemu_set_irq() is entered.
+ unsafe {
+ qemu_set_irq(ptr, level.into());
+ }
+ }
+
+ pub(crate) const fn as_ptr(&self) -> *mut *mut bindings::IRQState {
+ self.cell.as_ptr()
+ }
+
+ pub(crate) const fn slice_as_ptr(slice: &[Self]) -> *mut *mut bindings::IRQState {
+ assert!(!slice.is_empty());
+ slice[0].as_ptr()
+ }
+}
+
+impl Default for InterruptSource {
+ fn default() -> Self {
+ InterruptSource {
+ cell: BqlCell::new(ptr::null_mut()),
+ _marker: PhantomData,
+ }
+ }
+}
+
+unsafe impl ObjectType for IRQState {
+ type Class = ObjectClass;
+ const TYPE_NAME: &'static CStr =
+ unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_IRQ) };
+}
+qom_isa!(IRQState: Object);
diff --git a/rust/qemu-api/src/lib.rs b/rust/qemu-api/src/lib.rs
new file mode 100644
index 0000000..86dcd8e
--- /dev/null
+++ b/rust/qemu-api/src/lib.rs
@@ -0,0 +1,170 @@
+// Copyright 2024, Linaro Limited
+// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#![cfg_attr(not(MESON), doc = include_str!("../README.md"))]
+#![deny(clippy::missing_const_for_fn)]
+
+#[rustfmt::skip]
+pub mod bindings;
+
+// preserve one-item-per-"use" syntax, it is clearer
+// for prelude-like modules
+#[rustfmt::skip]
+pub mod prelude;
+
+pub mod assertions;
+pub mod bitops;
+pub mod callbacks;
+pub mod cell;
+pub mod chardev;
+pub mod errno;
+pub mod error;
+pub mod irq;
+pub mod log;
+pub mod memory;
+pub mod module;
+pub mod qdev;
+pub mod qom;
+pub mod sysbus;
+pub mod timer;
+pub mod uninit;
+pub mod vmstate;
+pub mod zeroable;
+
+use std::{
+ alloc::{GlobalAlloc, Layout},
+ ffi::c_void,
+};
+
+pub use error::{Error, Result};
+
+#[cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)]
+extern "C" {
+ fn g_aligned_alloc0(
+ n_blocks: bindings::gsize,
+ n_block_bytes: bindings::gsize,
+ alignment: bindings::gsize,
+ ) -> bindings::gpointer;
+ fn g_aligned_free(mem: bindings::gpointer);
+}
+
+#[cfg(not(HAVE_GLIB_WITH_ALIGNED_ALLOC))]
+extern "C" {
+ fn qemu_memalign(alignment: usize, size: usize) -> *mut c_void;
+ fn qemu_vfree(ptr: *mut c_void);
+}
+
+extern "C" {
+ fn g_malloc0(n_bytes: bindings::gsize) -> bindings::gpointer;
+ fn g_free(mem: bindings::gpointer);
+}
+
+/// An allocator that uses the same allocator as QEMU in C.
+///
+/// It is enabled by default with the `allocator` feature.
+///
+/// To set it up manually as a global allocator in your crate:
+///
+/// ```ignore
+/// use qemu_api::QemuAllocator;
+///
+/// #[global_allocator]
+/// static GLOBAL: QemuAllocator = QemuAllocator::new();
+/// ```
+#[derive(Clone, Copy, Debug)]
+#[repr(C)]
+pub struct QemuAllocator {
+ _unused: [u8; 0],
+}
+
+#[cfg_attr(all(feature = "allocator", not(test)), global_allocator)]
+pub static GLOBAL: QemuAllocator = QemuAllocator::new();
+
+impl QemuAllocator {
+ // From the glibc documentation, on GNU systems, malloc guarantees 16-byte
+ // alignment on 64-bit systems and 8-byte alignment on 32-bit systems. See
+ // https://www.gnu.org/software/libc/manual/html_node/Malloc-Examples.html.
+ // This alignment guarantee also applies to Windows and Android. On Darwin
+ // and OpenBSD, the alignment is 16 bytes on both 64-bit and 32-bit systems.
+ #[cfg(all(
+ target_pointer_width = "32",
+ not(any(target_os = "macos", target_os = "openbsd"))
+ ))]
+ pub const DEFAULT_ALIGNMENT_BYTES: Option<usize> = Some(8);
+ #[cfg(all(
+ target_pointer_width = "64",
+ not(any(target_os = "macos", target_os = "openbsd"))
+ ))]
+ pub const DEFAULT_ALIGNMENT_BYTES: Option<usize> = Some(16);
+ #[cfg(all(
+ any(target_pointer_width = "32", target_pointer_width = "64"),
+ any(target_os = "macos", target_os = "openbsd")
+ ))]
+ pub const DEFAULT_ALIGNMENT_BYTES: Option<usize> = Some(16);
+ #[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
+ pub const DEFAULT_ALIGNMENT_BYTES: Option<usize> = None;
+
+ pub const fn new() -> Self {
+ Self { _unused: [] }
+ }
+}
+
+impl Default for QemuAllocator {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+// Sanity check.
+const _: [(); 8] = [(); ::core::mem::size_of::<*mut c_void>()];
+
+unsafe impl GlobalAlloc for QemuAllocator {
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ if matches!(Self::DEFAULT_ALIGNMENT_BYTES, Some(default) if default.checked_rem(layout.align()) == Some(0))
+ {
+ // SAFETY: g_malloc0() is safe to call.
+ unsafe { g_malloc0(layout.size().try_into().unwrap()).cast::<u8>() }
+ } else {
+ #[cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)]
+ {
+ // SAFETY: g_aligned_alloc0() is safe to call.
+ unsafe {
+ g_aligned_alloc0(
+ layout.size().try_into().unwrap(),
+ 1,
+ layout.align().try_into().unwrap(),
+ )
+ .cast::<u8>()
+ }
+ }
+ #[cfg(not(HAVE_GLIB_WITH_ALIGNED_ALLOC))]
+ {
+ // SAFETY: qemu_memalign() is safe to call.
+ unsafe { qemu_memalign(layout.align(), layout.size()).cast::<u8>() }
+ }
+ }
+ }
+
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ if matches!(Self::DEFAULT_ALIGNMENT_BYTES, Some(default) if default.checked_rem(layout.align()) == Some(0))
+ {
+ // SAFETY: `ptr` must have been allocated by Self::alloc thus a valid
+ // glib-allocated pointer, so `g_free`ing is safe.
+ unsafe { g_free(ptr.cast::<_>()) }
+ } else {
+ #[cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)]
+ {
+ // SAFETY: `ptr` must have been allocated by Self::alloc thus a valid aligned
+ // glib-allocated pointer, so `g_aligned_free`ing is safe.
+ unsafe { g_aligned_free(ptr.cast::<_>()) }
+ }
+ #[cfg(not(HAVE_GLIB_WITH_ALIGNED_ALLOC))]
+ {
+ // SAFETY: `ptr` must have been allocated by Self::alloc thus a valid aligned
+ // glib-allocated pointer, so `qemu_vfree`ing is safe.
+ unsafe { qemu_vfree(ptr.cast::<_>()) }
+ }
+ }
+ }
+}
diff --git a/rust/qemu-api/src/log.rs b/rust/qemu-api/src/log.rs
new file mode 100644
index 0000000..d6c3d6c
--- /dev/null
+++ b/rust/qemu-api/src/log.rs
@@ -0,0 +1,73 @@
+// Copyright 2025 Bernhard Beschow <shentey@gmail.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! Bindings for QEMU's logging infrastructure
+
+#[repr(u32)]
+/// Represents specific error categories within QEMU's logging system.
+///
+/// The `Log` enum provides a Rust abstraction for logging errors, corresponding
+/// to a subset of the error categories defined in the C implementation.
+pub enum Log {
+ /// Log invalid access caused by the guest.
+ /// Corresponds to `LOG_GUEST_ERROR` in the C implementation.
+ GuestError = crate::bindings::LOG_GUEST_ERROR,
+
+ /// Log guest access of unimplemented functionality.
+ /// Corresponds to `LOG_UNIMP` in the C implementation.
+ Unimp = crate::bindings::LOG_UNIMP,
+}
+
+/// A macro to log messages conditionally based on a provided mask.
+///
+/// The `log_mask_ln` macro checks whether the given mask matches the current
+/// log level and, if so, formats and logs the message. It is the Rust
+/// counterpart of the `qemu_log_mask()` macro in the C implementation.
+///
+/// # Parameters
+///
+/// - `$mask`: A log level mask. This should be a variant of the `Log` enum.
+/// - `$fmt`: A format string following the syntax and rules of the `format!`
+/// macro. It specifies the structure of the log message.
+/// - `$args`: Optional arguments to be interpolated into the format string.
+///
+/// # Example
+///
+/// ```
+/// use qemu_api::{log::Log, log_mask_ln};
+///
+/// let error_address = 0xbad;
+/// log_mask_ln!(Log::GuestError, "Address 0x{error_address:x} out of range");
+/// ```
+///
+/// It is also possible to use printf-style formatting, as well as having a
+/// trailing `,`:
+///
+/// ```
+/// use qemu_api::{log::Log, log_mask_ln};
+///
+/// let error_address = 0xbad;
+/// log_mask_ln!(
+/// Log::GuestError,
+/// "Address 0x{:x} out of range",
+/// error_address,
+/// );
+/// ```
+#[macro_export]
+macro_rules! log_mask_ln {
+ ($mask:expr, $fmt:tt $($args:tt)*) => {{
+ // Type assertion to enforce type `Log` for $mask
+ let _: Log = $mask;
+
+ if unsafe {
+ (::qemu_api::bindings::qemu_loglevel & ($mask as std::os::raw::c_int)) != 0
+ } {
+ let formatted_string = format!("{}\n", format_args!($fmt $($args)*));
+ let c_string = std::ffi::CString::new(formatted_string).unwrap();
+
+ unsafe {
+ ::qemu_api::bindings::qemu_log(c_string.as_ptr());
+ }
+ }
+ }};
+}
diff --git a/rust/qemu-api/src/memory.rs b/rust/qemu-api/src/memory.rs
new file mode 100644
index 0000000..e40fad6
--- /dev/null
+++ b/rust/qemu-api/src/memory.rs
@@ -0,0 +1,204 @@
+// Copyright 2024 Red Hat, Inc.
+// Author(s): Paolo Bonzini <pbonzini@redhat.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! Bindings for `MemoryRegion`, `MemoryRegionOps` and `MemTxAttrs`
+
+use std::{
+ ffi::{c_uint, c_void, CStr, CString},
+ marker::PhantomData,
+};
+
+pub use bindings::{hwaddr, MemTxAttrs};
+
+use crate::{
+ bindings::{self, device_endian, memory_region_init_io},
+ callbacks::FnCall,
+ cell::Opaque,
+ prelude::*,
+ uninit::MaybeUninitField,
+ zeroable::Zeroable,
+};
+
+pub struct MemoryRegionOps<T>(
+ bindings::MemoryRegionOps,
+ // Note: quite often you'll see PhantomData<fn(&T)> mentioned when discussing
+ // covariance and contravariance; you don't need any of those to understand
+ // this usage of PhantomData. Quite simply, MemoryRegionOps<T> *logically*
+ // holds callbacks that take an argument of type &T, except the type is erased
+ // before the callback is stored in the bindings::MemoryRegionOps field.
+ // The argument of PhantomData is a function pointer in order to represent
+ // that relationship; while that will also provide desirable and safe variance
+ // for T, variance is not the point but just a consequence.
+ PhantomData<fn(&T)>,
+);
+
+// SAFETY: When a *const T is passed to the callbacks, the call itself
+// is done in a thread-safe manner. The invocation is okay as long as
+// T itself is `Sync`.
+unsafe impl<T: Sync> Sync for MemoryRegionOps<T> {}
+
+#[derive(Clone)]
+pub struct MemoryRegionOpsBuilder<T>(bindings::MemoryRegionOps, PhantomData<fn(&T)>);
+
+unsafe extern "C" fn memory_region_ops_read_cb<T, F: for<'a> FnCall<(&'a T, hwaddr, u32), u64>>(
+ opaque: *mut c_void,
+ addr: hwaddr,
+ size: c_uint,
+) -> u64 {
+ F::call((unsafe { &*(opaque.cast::<T>()) }, addr, size))
+}
+
+unsafe extern "C" fn memory_region_ops_write_cb<T, F: for<'a> FnCall<(&'a T, hwaddr, u64, u32)>>(
+ opaque: *mut c_void,
+ addr: hwaddr,
+ data: u64,
+ size: c_uint,
+) {
+ F::call((unsafe { &*(opaque.cast::<T>()) }, addr, data, size))
+}
+
+impl<T> MemoryRegionOpsBuilder<T> {
+ #[must_use]
+ pub const fn read<F: for<'a> FnCall<(&'a T, hwaddr, u32), u64>>(mut self, _f: &F) -> Self {
+ self.0.read = Some(memory_region_ops_read_cb::<T, F>);
+ self
+ }
+
+ #[must_use]
+ pub const fn write<F: for<'a> FnCall<(&'a T, hwaddr, u64, u32)>>(mut self, _f: &F) -> Self {
+ self.0.write = Some(memory_region_ops_write_cb::<T, F>);
+ self
+ }
+
+ #[must_use]
+ pub const fn big_endian(mut self) -> Self {
+ self.0.endianness = device_endian::DEVICE_BIG_ENDIAN;
+ self
+ }
+
+ #[must_use]
+ pub const fn little_endian(mut self) -> Self {
+ self.0.endianness = device_endian::DEVICE_LITTLE_ENDIAN;
+ self
+ }
+
+ #[must_use]
+ pub const fn native_endian(mut self) -> Self {
+ self.0.endianness = device_endian::DEVICE_NATIVE_ENDIAN;
+ self
+ }
+
+ #[must_use]
+ pub const fn valid_sizes(mut self, min: u32, max: u32) -> Self {
+ self.0.valid.min_access_size = min;
+ self.0.valid.max_access_size = max;
+ self
+ }
+
+ #[must_use]
+ pub const fn valid_unaligned(mut self) -> Self {
+ self.0.valid.unaligned = true;
+ self
+ }
+
+ #[must_use]
+ pub const fn impl_sizes(mut self, min: u32, max: u32) -> Self {
+ self.0.impl_.min_access_size = min;
+ self.0.impl_.max_access_size = max;
+ self
+ }
+
+ #[must_use]
+ pub const fn impl_unaligned(mut self) -> Self {
+ self.0.impl_.unaligned = true;
+ self
+ }
+
+ #[must_use]
+ pub const fn build(self) -> MemoryRegionOps<T> {
+ MemoryRegionOps::<T>(self.0, PhantomData)
+ }
+
+ #[must_use]
+ pub const fn new() -> Self {
+ Self(bindings::MemoryRegionOps::ZERO, PhantomData)
+ }
+}
+
+impl<T> Default for MemoryRegionOpsBuilder<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+/// A safe wrapper around [`bindings::MemoryRegion`].
+#[repr(transparent)]
+#[derive(qemu_api_macros::Wrapper)]
+pub struct MemoryRegion(Opaque<bindings::MemoryRegion>);
+
+unsafe impl Send for MemoryRegion {}
+unsafe impl Sync for MemoryRegion {}
+
+impl MemoryRegion {
+ // inline to ensure that it is not included in tests, which only
+ // link to hwcore and qom. FIXME: inlining is actually the opposite
+ // of what we want, since this is the type-erased version of the
+ // init_io function below. Look into splitting the qemu_api crate.
+ #[inline(always)]
+ unsafe fn do_init_io(
+ slot: *mut bindings::MemoryRegion,
+ owner: *mut bindings::Object,
+ ops: &'static bindings::MemoryRegionOps,
+ name: &'static str,
+ size: u64,
+ ) {
+ unsafe {
+ let cstr = CString::new(name).unwrap();
+ memory_region_init_io(
+ slot,
+ owner,
+ ops,
+ owner.cast::<c_void>(),
+ cstr.as_ptr(),
+ size,
+ );
+ }
+ }
+
+ pub fn init_io<T: IsA<Object>>(
+ this: &mut MaybeUninitField<'_, T, Self>,
+ ops: &'static MemoryRegionOps<T>,
+ name: &'static str,
+ size: u64,
+ ) {
+ unsafe {
+ Self::do_init_io(
+ this.as_mut_ptr().cast(),
+ MaybeUninitField::parent_mut(this).cast(),
+ &ops.0,
+ name,
+ size,
+ );
+ }
+ }
+}
+
+unsafe impl ObjectType for MemoryRegion {
+ type Class = bindings::MemoryRegionClass;
+ const TYPE_NAME: &'static CStr =
+ unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_MEMORY_REGION) };
+}
+qom_isa!(MemoryRegion: Object);
+
+/// A special `MemTxAttrs` constant, used to indicate that no memory
+/// attributes are specified.
+///
+/// Bus masters which don't specify any attributes will get this,
+/// which has all attribute bits clear except the topmost one
+/// (so that we can distinguish "all attributes deliberately clear"
+/// from "didn't specify" if necessary).
+pub const MEMTXATTRS_UNSPECIFIED: MemTxAttrs = MemTxAttrs {
+ unspecified: true,
+ ..Zeroable::ZERO
+};
diff --git a/rust/qemu-api/src/module.rs b/rust/qemu-api/src/module.rs
new file mode 100644
index 0000000..fa5cea3
--- /dev/null
+++ b/rust/qemu-api/src/module.rs
@@ -0,0 +1,43 @@
+// Copyright 2024, Linaro Limited
+// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! Macro to register blocks of code that run as QEMU starts up.
+
+#[macro_export]
+macro_rules! module_init {
+ ($type:ident => $body:block) => {
+ const _: () = {
+ #[used]
+ #[cfg_attr(
+ not(any(target_vendor = "apple", target_os = "windows")),
+ link_section = ".init_array"
+ )]
+ #[cfg_attr(target_vendor = "apple", link_section = "__DATA,__mod_init_func")]
+ #[cfg_attr(target_os = "windows", link_section = ".CRT$XCU")]
+ pub static LOAD_MODULE: extern "C" fn() = {
+ extern "C" fn init_fn() {
+ $body
+ }
+
+ extern "C" fn ctor_fn() {
+ unsafe {
+ $crate::bindings::register_module_init(
+ Some(init_fn),
+ $crate::bindings::module_init_type::$type,
+ );
+ }
+ }
+
+ ctor_fn
+ };
+ };
+ };
+
+ // shortcut because it's quite common that $body needs unsafe {}
+ ($type:ident => unsafe $body:block) => {
+ $crate::module_init! {
+ $type => { unsafe { $body } }
+ }
+ };
+}
diff --git a/rust/qemu-api/src/prelude.rs b/rust/qemu-api/src/prelude.rs
new file mode 100644
index 0000000..8f9e23e
--- /dev/null
+++ b/rust/qemu-api/src/prelude.rs
@@ -0,0 +1,31 @@
+// Copyright 2024 Red Hat, Inc.
+// Author(s): Paolo Bonzini <pbonzini@redhat.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! Commonly used traits and types for QEMU.
+
+pub use crate::bitops::IntegerExt;
+
+pub use crate::cell::BqlCell;
+pub use crate::cell::BqlRefCell;
+
+pub use crate::errno;
+
+pub use crate::log_mask_ln;
+
+pub use crate::qdev::DeviceMethods;
+
+pub use crate::qom::InterfaceType;
+pub use crate::qom::IsA;
+pub use crate::qom::Object;
+pub use crate::qom::ObjectCast;
+pub use crate::qom::ObjectDeref;
+pub use crate::qom::ObjectClassMethods;
+pub use crate::qom::ObjectMethods;
+pub use crate::qom::ObjectType;
+
+pub use crate::qom_isa;
+
+pub use crate::sysbus::SysBusDeviceMethods;
+
+pub use crate::vmstate::VMState;
diff --git a/rust/qemu-api/src/qdev.rs b/rust/qemu-api/src/qdev.rs
new file mode 100644
index 0000000..36f02fb
--- /dev/null
+++ b/rust/qemu-api/src/qdev.rs
@@ -0,0 +1,410 @@
+// Copyright 2024, Linaro Limited
+// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! Bindings to create devices and access device functionality from Rust.
+
+use std::{
+ ffi::{c_int, c_void, CStr, CString},
+ ptr::NonNull,
+};
+
+pub use bindings::{ClockEvent, DeviceClass, Property, ResetType};
+
+use crate::{
+ bindings::{self, qdev_init_gpio_in, qdev_init_gpio_out, ResettableClass},
+ callbacks::FnCall,
+ cell::{bql_locked, Opaque},
+ chardev::Chardev,
+ error::{Error, Result},
+ irq::InterruptSource,
+ prelude::*,
+ qom::{ObjectClass, ObjectImpl, Owned, ParentInit},
+ vmstate::VMStateDescription,
+};
+
+/// A safe wrapper around [`bindings::Clock`].
+#[repr(transparent)]
+#[derive(Debug, qemu_api_macros::Wrapper)]
+pub struct Clock(Opaque<bindings::Clock>);
+
+unsafe impl Send for Clock {}
+unsafe impl Sync for Clock {}
+
+/// A safe wrapper around [`bindings::DeviceState`].
+#[repr(transparent)]
+#[derive(Debug, qemu_api_macros::Wrapper)]
+pub struct DeviceState(Opaque<bindings::DeviceState>);
+
+unsafe impl Send for DeviceState {}
+unsafe impl Sync for DeviceState {}
+
+/// Trait providing the contents of the `ResettablePhases` struct,
+/// which is part of the QOM `Resettable` interface.
+pub trait ResettablePhasesImpl {
+ /// If not None, this is called when the object enters reset. It
+ /// can reset local state of the object, but it must not do anything that
+ /// has a side-effect on other objects, such as raising or lowering an
+ /// [`InterruptSource`], or reading or writing guest memory. It takes the
+ /// reset's type as argument.
+ const ENTER: Option<fn(&Self, ResetType)> = None;
+
+ /// If not None, this is called when the object for entry into reset, once
+ /// every object in the system which is being reset has had its
+ /// `ResettablePhasesImpl::ENTER` method called. At this point devices
+ /// can do actions that affect other objects.
+ ///
+ /// If in doubt, implement this method.
+ const HOLD: Option<fn(&Self, ResetType)> = None;
+
+ /// If not None, this phase is called when the object leaves the reset
+ /// state. Actions affecting other objects are permitted.
+ const EXIT: Option<fn(&Self, ResetType)> = None;
+}
+
+/// # Safety
+///
+/// We expect the FFI user of this function to pass a valid pointer that
+/// can be downcasted to type `T`. We also expect the device is
+/// readable/writeable from one thread at any time.
+unsafe extern "C" fn rust_resettable_enter_fn<T: ResettablePhasesImpl>(
+ obj: *mut bindings::Object,
+ typ: ResetType,
+) {
+ let state = NonNull::new(obj).unwrap().cast::<T>();
+ T::ENTER.unwrap()(unsafe { state.as_ref() }, typ);
+}
+
+/// # Safety
+///
+/// We expect the FFI user of this function to pass a valid pointer that
+/// can be downcasted to type `T`. We also expect the device is
+/// readable/writeable from one thread at any time.
+unsafe extern "C" fn rust_resettable_hold_fn<T: ResettablePhasesImpl>(
+ obj: *mut bindings::Object,
+ typ: ResetType,
+) {
+ let state = NonNull::new(obj).unwrap().cast::<T>();
+ T::HOLD.unwrap()(unsafe { state.as_ref() }, typ);
+}
+
+/// # Safety
+///
+/// We expect the FFI user of this function to pass a valid pointer that
+/// can be downcasted to type `T`. We also expect the device is
+/// readable/writeable from one thread at any time.
+unsafe extern "C" fn rust_resettable_exit_fn<T: ResettablePhasesImpl>(
+ obj: *mut bindings::Object,
+ typ: ResetType,
+) {
+ let state = NonNull::new(obj).unwrap().cast::<T>();
+ T::EXIT.unwrap()(unsafe { state.as_ref() }, typ);
+}
+
+/// Trait providing the contents of [`DeviceClass`].
+pub trait DeviceImpl: ObjectImpl + ResettablePhasesImpl + IsA<DeviceState> {
+ /// _Realization_ is the second stage of device creation. It contains
+ /// all operations that depend on device properties and can fail (note:
+ /// this is not yet supported for Rust devices).
+ ///
+ /// If not `None`, the parent class's `realize` method is overridden
+ /// with the function pointed to by `REALIZE`.
+ const REALIZE: Option<fn(&Self) -> Result<()>> = None;
+
+ /// An array providing the properties that the user can set on the
+ /// device. Not a `const` because referencing statics in constants
+ /// is unstable until Rust 1.83.0.
+ fn properties() -> &'static [Property] {
+ &[]
+ }
+
+ /// A `VMStateDescription` providing the migration format for the device
+ /// Not a `const` because referencing statics in constants is unstable
+ /// until Rust 1.83.0.
+ fn vmsd() -> Option<&'static VMStateDescription> {
+ None
+ }
+}
+
+/// # Safety
+///
+/// This function is only called through the QOM machinery and
+/// used by `DeviceClass::class_init`.
+/// We expect the FFI user of this function to pass a valid pointer that
+/// can be downcasted to type `T`. We also expect the device is
+/// readable/writeable from one thread at any time.
+unsafe extern "C" fn rust_realize_fn<T: DeviceImpl>(
+ dev: *mut bindings::DeviceState,
+ errp: *mut *mut bindings::Error,
+) {
+ let state = NonNull::new(dev).unwrap().cast::<T>();
+ let result = T::REALIZE.unwrap()(unsafe { state.as_ref() });
+ unsafe {
+ Error::ok_or_propagate(result, errp);
+ }
+}
+
+unsafe impl InterfaceType for ResettableClass {
+ const TYPE_NAME: &'static CStr =
+ unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_RESETTABLE_INTERFACE) };
+}
+
+impl ResettableClass {
+ /// Fill in the virtual methods of `ResettableClass` based on the
+ /// definitions in the `ResettablePhasesImpl` trait.
+ pub fn class_init<T: ResettablePhasesImpl>(&mut self) {
+ if <T as ResettablePhasesImpl>::ENTER.is_some() {
+ self.phases.enter = Some(rust_resettable_enter_fn::<T>);
+ }
+ if <T as ResettablePhasesImpl>::HOLD.is_some() {
+ self.phases.hold = Some(rust_resettable_hold_fn::<T>);
+ }
+ if <T as ResettablePhasesImpl>::EXIT.is_some() {
+ self.phases.exit = Some(rust_resettable_exit_fn::<T>);
+ }
+ }
+}
+
+impl DeviceClass {
+ /// Fill in the virtual methods of `DeviceClass` based on the definitions in
+ /// the `DeviceImpl` trait.
+ pub fn class_init<T: DeviceImpl>(&mut self) {
+ if <T as DeviceImpl>::REALIZE.is_some() {
+ self.realize = Some(rust_realize_fn::<T>);
+ }
+ if let Some(vmsd) = <T as DeviceImpl>::vmsd() {
+ self.vmsd = vmsd;
+ }
+ let prop = <T as DeviceImpl>::properties();
+ if !prop.is_empty() {
+ unsafe {
+ bindings::device_class_set_props_n(self, prop.as_ptr(), prop.len());
+ }
+ }
+
+ ResettableClass::cast::<DeviceState>(self).class_init::<T>();
+ self.parent_class.class_init::<T>();
+ }
+}
+
+#[macro_export]
+macro_rules! define_property {
+ ($name:expr, $state:ty, $field:ident, $prop:expr, $type:ty, bit = $bitnr:expr, default = $defval:expr$(,)*) => {
+ $crate::bindings::Property {
+ // use associated function syntax for type checking
+ name: ::std::ffi::CStr::as_ptr($name),
+ info: $prop,
+ offset: ::std::mem::offset_of!($state, $field) as isize,
+ bitnr: $bitnr,
+ set_default: true,
+ defval: $crate::bindings::Property__bindgen_ty_1 { u: $defval as u64 },
+ ..$crate::zeroable::Zeroable::ZERO
+ }
+ };
+ ($name:expr, $state:ty, $field:ident, $prop:expr, $type:ty, default = $defval:expr$(,)*) => {
+ $crate::bindings::Property {
+ // use associated function syntax for type checking
+ name: ::std::ffi::CStr::as_ptr($name),
+ info: $prop,
+ offset: ::std::mem::offset_of!($state, $field) as isize,
+ set_default: true,
+ defval: $crate::bindings::Property__bindgen_ty_1 { u: $defval as u64 },
+ ..$crate::zeroable::Zeroable::ZERO
+ }
+ };
+ ($name:expr, $state:ty, $field:ident, $prop:expr, $type:ty$(,)*) => {
+ $crate::bindings::Property {
+ // use associated function syntax for type checking
+ name: ::std::ffi::CStr::as_ptr($name),
+ info: $prop,
+ offset: ::std::mem::offset_of!($state, $field) as isize,
+ set_default: false,
+ ..$crate::zeroable::Zeroable::ZERO
+ }
+ };
+}
+
+#[macro_export]
+macro_rules! declare_properties {
+ ($ident:ident, $($prop:expr),*$(,)*) => {
+ pub static $ident: [$crate::bindings::Property; {
+ let mut len = 0;
+ $({
+ _ = stringify!($prop);
+ len += 1;
+ })*
+ len
+ }] = [
+ $($prop),*,
+ ];
+ };
+}
+
+unsafe impl ObjectType for DeviceState {
+ type Class = DeviceClass;
+ const TYPE_NAME: &'static CStr =
+ unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_DEVICE) };
+}
+qom_isa!(DeviceState: Object);
+
+/// Initialization methods take a [`ParentInit`] and can be called as
+/// associated functions.
+impl DeviceState {
+ /// Add an input clock named `name`. Invoke the callback with
+ /// `self` as the first parameter for the events that are requested.
+ ///
+ /// The resulting clock is added as a child of `self`, but it also
+ /// stays alive until after `Drop::drop` is called because C code
+ /// keeps an extra reference to it until `device_finalize()` calls
+ /// `qdev_finalize_clocklist()`. Therefore (unlike most cases in
+ /// which Rust code has a reference to a child object) it would be
+ /// possible for this function to return a `&Clock` too.
+ #[inline]
+ pub fn init_clock_in<T: DeviceImpl, F: for<'a> FnCall<(&'a T, ClockEvent)>>(
+ this: &mut ParentInit<T>,
+ name: &str,
+ _cb: &F,
+ events: ClockEvent,
+ ) -> Owned<Clock>
+ where
+ T::ParentType: IsA<DeviceState>,
+ {
+ fn do_init_clock_in(
+ dev: &DeviceState,
+ name: &str,
+ cb: Option<unsafe extern "C" fn(*mut c_void, ClockEvent)>,
+ events: ClockEvent,
+ ) -> Owned<Clock> {
+ assert!(bql_locked());
+
+ // SAFETY: the clock is heap allocated, but qdev_init_clock_in()
+ // does not gift the reference to its caller; so use Owned::from to
+ // add one. The callback is disabled automatically when the clock
+ // is unparented, which happens before the device is finalized.
+ unsafe {
+ let cstr = CString::new(name).unwrap();
+ let clk = bindings::qdev_init_clock_in(
+ dev.0.as_mut_ptr(),
+ cstr.as_ptr(),
+ cb,
+ dev.0.as_void_ptr(),
+ events.0,
+ );
+
+ let clk: &Clock = Clock::from_raw(clk);
+ Owned::from(clk)
+ }
+ }
+
+ let cb: Option<unsafe extern "C" fn(*mut c_void, ClockEvent)> = if F::is_some() {
+ unsafe extern "C" fn rust_clock_cb<T, F: for<'a> FnCall<(&'a T, ClockEvent)>>(
+ opaque: *mut c_void,
+ event: ClockEvent,
+ ) {
+ // SAFETY: the opaque is "this", which is indeed a pointer to T
+ F::call((unsafe { &*(opaque.cast::<T>()) }, event))
+ }
+ Some(rust_clock_cb::<T, F>)
+ } else {
+ None
+ };
+
+ do_init_clock_in(unsafe { this.upcast_mut() }, name, cb, events)
+ }
+
+ /// Add an output clock named `name`.
+ ///
+ /// The resulting clock is added as a child of `self`, but it also
+ /// stays alive until after `Drop::drop` is called because C code
+ /// keeps an extra reference to it until `device_finalize()` calls
+ /// `qdev_finalize_clocklist()`. Therefore (unlike most cases in
+ /// which Rust code has a reference to a child object) it would be
+ /// possible for this function to return a `&Clock` too.
+ #[inline]
+ pub fn init_clock_out<T: DeviceImpl>(this: &mut ParentInit<T>, name: &str) -> Owned<Clock>
+ where
+ T::ParentType: IsA<DeviceState>,
+ {
+ unsafe {
+ let cstr = CString::new(name).unwrap();
+ let dev: &mut DeviceState = this.upcast_mut();
+ let clk = bindings::qdev_init_clock_out(dev.0.as_mut_ptr(), cstr.as_ptr());
+
+ let clk: &Clock = Clock::from_raw(clk);
+ Owned::from(clk)
+ }
+ }
+}
+
+/// Trait for methods exposed by the [`DeviceState`] class. The methods can be
+/// called on all objects that have the trait `IsA<DeviceState>`.
+///
+/// The trait should only be used through the blanket implementation,
+/// which guarantees safety via `IsA`.
+pub trait DeviceMethods: ObjectDeref
+where
+ Self::Target: IsA<DeviceState>,
+{
+ fn prop_set_chr(&self, propname: &str, chr: &Owned<Chardev>) {
+ assert!(bql_locked());
+ let c_propname = CString::new(propname).unwrap();
+ let chr: &Chardev = chr;
+ unsafe {
+ bindings::qdev_prop_set_chr(
+ self.upcast().as_mut_ptr(),
+ c_propname.as_ptr(),
+ chr.as_mut_ptr(),
+ );
+ }
+ }
+
+ fn init_gpio_in<F: for<'a> FnCall<(&'a Self::Target, u32, u32)>>(
+ &self,
+ num_lines: u32,
+ _cb: F,
+ ) {
+ fn do_init_gpio_in(
+ dev: &DeviceState,
+ num_lines: u32,
+ gpio_in_cb: unsafe extern "C" fn(*mut c_void, c_int, c_int),
+ ) {
+ unsafe {
+ qdev_init_gpio_in(dev.as_mut_ptr(), Some(gpio_in_cb), num_lines as c_int);
+ }
+ }
+
+ let _: () = F::ASSERT_IS_SOME;
+ unsafe extern "C" fn rust_irq_handler<T, F: for<'a> FnCall<(&'a T, u32, u32)>>(
+ opaque: *mut c_void,
+ line: c_int,
+ level: c_int,
+ ) {
+ // SAFETY: the opaque was passed as a reference to `T`
+ F::call((unsafe { &*(opaque.cast::<T>()) }, line as u32, level as u32))
+ }
+
+ let gpio_in_cb: unsafe extern "C" fn(*mut c_void, c_int, c_int) =
+ rust_irq_handler::<Self::Target, F>;
+
+ do_init_gpio_in(self.upcast(), num_lines, gpio_in_cb);
+ }
+
+ fn init_gpio_out(&self, pins: &[InterruptSource]) {
+ unsafe {
+ qdev_init_gpio_out(
+ self.upcast().as_mut_ptr(),
+ InterruptSource::slice_as_ptr(pins),
+ pins.len() as c_int,
+ );
+ }
+ }
+}
+
+impl<R: ObjectDeref> DeviceMethods for R where R::Target: IsA<DeviceState> {}
+
+unsafe impl ObjectType for Clock {
+ type Class = ObjectClass;
+ const TYPE_NAME: &'static CStr =
+ unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_CLOCK) };
+}
+qom_isa!(Clock: Object);
diff --git a/rust/qemu-api/src/qom.rs b/rust/qemu-api/src/qom.rs
new file mode 100644
index 0000000..e20ee01
--- /dev/null
+++ b/rust/qemu-api/src/qom.rs
@@ -0,0 +1,950 @@
+// Copyright 2024, Linaro Limited
+// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! Bindings to access QOM functionality from Rust.
+//!
+//! The QEMU Object Model (QOM) provides inheritance and dynamic typing for QEMU
+//! devices. This module makes QOM's features available in Rust through three
+//! main mechanisms:
+//!
+//! * Automatic creation and registration of `TypeInfo` for classes that are
+//! written in Rust, as well as mapping between Rust traits and QOM vtables.
+//!
+//! * Type-safe casting between parent and child classes, through the [`IsA`]
+//! trait and methods such as [`upcast`](ObjectCast::upcast) and
+//! [`downcast`](ObjectCast::downcast).
+//!
+//! * Automatic delegation of parent class methods to child classes. When a
+//! trait uses [`IsA`] as a bound, its contents become available to all child
+//! classes through blanket implementations. This works both for class methods
+//! and for instance methods accessed through references or smart pointers.
+//!
+//! # Structure of a class
+//!
+//! A leaf class only needs a struct holding instance state. The struct must
+//! implement the [`ObjectType`] and [`IsA`] traits, as well as any `*Impl`
+//! traits that exist for its superclasses.
+//!
+//! If a class has subclasses, it will also provide a struct for instance data,
+//! with the same characteristics as for concrete classes, but it also needs
+//! additional components to support virtual methods:
+//!
+//! * a struct for class data, for example `DeviceClass`. This corresponds to
+//! the C "class struct" and holds the vtable that is used by instances of the
+//! class and its subclasses. It must start with its parent's class struct.
+//!
+//! * a trait for virtual method implementations, for example `DeviceImpl`.
+//! Child classes implement this trait to provide their own behavior for
+//! virtual methods. The trait's methods take `&self` to access instance data.
+//! The traits have the appropriate specialization of `IsA<>` as a supertrait,
+//! for example `IsA<DeviceState>` for `DeviceImpl`.
+//!
+//! * a trait for instance methods, for example `DeviceMethods`. This trait is
+//! automatically implemented for any reference or smart pointer to a device
+//! instance. It calls into the vtable provides access across all subclasses
+//! to methods defined for the class.
+//!
+//! * optionally, a trait for class methods, for example `DeviceClassMethods`.
+//! This provides access to class-wide functionality that doesn't depend on
+//! instance data. Like instance methods, these are automatically inherited by
+//! child classes.
+//!
+//! # Class structures
+//!
+//! Each QOM class that has virtual methods describes them in a
+//! _class struct_. Class structs include a parent field corresponding
+//! to the vtable of the parent class, all the way up to [`ObjectClass`].
+//!
+//! As mentioned above, virtual methods are defined via traits such as
+//! `DeviceImpl`. Class structs do not define any trait but, conventionally,
+//! all of them have a `class_init` method to initialize the virtual methods
+//! based on the trait and then call the same method on the superclass.
+//!
+//! ```ignore
+//! impl YourSubclassClass
+//! {
+//! pub fn class_init<T: YourSubclassImpl>(&mut self) {
+//! ...
+//! klass.parent_class::class_init<T>();
+//! }
+//! }
+//! ```
+//!
+//! If a class implements a QOM interface. In that case, the function must
+//! contain, for each interface, an extra forwarding call as follows:
+//!
+//! ```ignore
+//! ResettableClass::cast::<Self>(self).class_init::<Self>();
+//! ```
+//!
+//! These `class_init` functions are methods on the class rather than a trait,
+//! because the bound on `T` (`DeviceImpl` in this case), will change for every
+//! class struct. The functions are pointed to by the
+//! [`ObjectImpl::CLASS_INIT`] function pointer. While there is no default
+//! implementation, in most cases it will be enough to write it as follows:
+//!
+//! ```ignore
+//! const CLASS_INIT: fn(&mut Self::Class)> = Self::Class::class_init::<Self>;
+//! ```
+//!
+//! This design incurs a small amount of code duplication but, by not using
+//! traits, it allows the flexibility of implementing bindings in any crate,
+//! without incurring into violations of orphan rules for traits.
+
+use std::{
+ ffi::{c_void, CStr},
+ fmt,
+ marker::PhantomData,
+ mem::{ManuallyDrop, MaybeUninit},
+ ops::{Deref, DerefMut},
+ ptr::NonNull,
+};
+
+pub use bindings::ObjectClass;
+
+use crate::{
+ bindings::{
+ self, object_class_dynamic_cast, object_dynamic_cast, object_get_class,
+ object_get_typename, object_new, object_ref, object_unref, TypeInfo,
+ },
+ cell::{bql_locked, Opaque},
+};
+
+/// A safe wrapper around [`bindings::Object`].
+#[repr(transparent)]
+#[derive(Debug, qemu_api_macros::Wrapper)]
+pub struct Object(Opaque<bindings::Object>);
+
+unsafe impl Send for Object {}
+unsafe impl Sync for Object {}
+
+/// Marker trait: `Self` can be statically upcasted to `P` (i.e. `P` is a direct
+/// or indirect parent of `Self`).
+///
+/// # Safety
+///
+/// The struct `Self` must be `#[repr(C)]` and must begin, directly or
+/// indirectly, with a field of type `P`. This ensures that invalid casts,
+/// which rely on `IsA<>` for static checking, are rejected at compile time.
+pub unsafe trait IsA<P: ObjectType>: ObjectType {}
+
+// SAFETY: it is always safe to cast to your own type
+unsafe impl<T: ObjectType> IsA<T> for T {}
+
+/// Macro to mark superclasses of QOM classes. This enables type-safe
+/// up- and downcasting.
+///
+/// # Safety
+///
+/// This macro is a thin wrapper around the [`IsA`] trait and performs
+/// no checking whatsoever of what is declared. It is the caller's
+/// responsibility to have $struct begin, directly or indirectly, with
+/// a field of type `$parent`.
+#[macro_export]
+macro_rules! qom_isa {
+ ($struct:ty : $($parent:ty),* ) => {
+ $(
+ // SAFETY: it is the caller responsibility to have $parent as the
+ // first field
+ unsafe impl $crate::qom::IsA<$parent> for $struct {}
+
+ impl AsRef<$parent> for $struct {
+ fn as_ref(&self) -> &$parent {
+ // SAFETY: follows the same rules as for IsA<U>, which is
+ // declared above.
+ let ptr: *const Self = self;
+ unsafe { &*ptr.cast::<$parent>() }
+ }
+ }
+ )*
+ };
+}
+
+/// This is the same as [`ManuallyDrop<T>`](std::mem::ManuallyDrop), though
+/// it hides the standard methods of `ManuallyDrop`.
+///
+/// The first field of an `ObjectType` must be of type `ParentField<T>`.
+/// (Technically, this is only necessary if there is at least one Rust
+/// superclass in the hierarchy). This is to ensure that the parent field is
+/// dropped after the subclass; this drop order is enforced by the C
+/// `object_deinit` function.
+///
+/// # Examples
+///
+/// ```ignore
+/// #[repr(C)]
+/// #[derive(qemu_api_macros::Object)]
+/// pub struct MyDevice {
+/// parent: ParentField<DeviceState>,
+/// ...
+/// }
+/// ```
+#[derive(Debug)]
+#[repr(transparent)]
+pub struct ParentField<T: ObjectType>(std::mem::ManuallyDrop<T>);
+
+impl<T: ObjectType> Deref for ParentField<T> {
+ type Target = T;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl<T: ObjectType> DerefMut for ParentField<T> {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+impl<T: fmt::Display + ObjectType> fmt::Display for ParentField<T> {
+ #[inline(always)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
+ self.0.fmt(f)
+ }
+}
+
+/// This struct knows that the superclasses of the object have already been
+/// initialized.
+///
+/// The declaration of `ParentInit` is.. *"a kind of magic"*. It uses a
+/// technique that is found in several crates, the main ones probably being
+/// `ghost-cell` (in fact it was introduced by the [`GhostCell` paper](https://plv.mpi-sws.org/rustbelt/ghostcell/))
+/// and `generativity`.
+///
+/// The `PhantomData` makes the `ParentInit` type *invariant* with respect to
+/// the lifetime argument `'init`. This, together with the `for<'...>` in
+/// `[ParentInit::with]`, block any attempt of the compiler to be creative when
+/// operating on types of type `ParentInit` and to extend their lifetimes. In
+/// particular, it ensures that the `ParentInit` cannot be made to outlive the
+/// `rust_instance_init()` function that creates it, and therefore that the
+/// `&'init T` reference is valid.
+///
+/// This implementation of the same concept, without the QOM baggage, can help
+/// understanding the effect:
+///
+/// ```
+/// use std::marker::PhantomData;
+///
+/// #[derive(PartialEq, Eq)]
+/// pub struct Jail<'closure, T: Copy>(&'closure T, PhantomData<fn(&'closure ()) -> &'closure ()>);
+///
+/// impl<'closure, T: Copy> Jail<'closure, T> {
+/// fn get(&self) -> T {
+/// *self.0
+/// }
+///
+/// #[inline]
+/// fn with<U>(v: T, f: impl for<'id> FnOnce(Jail<'id, T>) -> U) -> U {
+/// let parent_init = Jail(&v, PhantomData);
+/// f(parent_init)
+/// }
+/// }
+/// ```
+///
+/// It's impossible to escape the `Jail`; `token1` cannot be moved out of the
+/// closure:
+///
+/// ```ignore
+/// let x = 42;
+/// let escape = Jail::with(&x, |token1| {
+/// println!("{}", token1.get());
+/// // fails to compile...
+/// token1
+/// });
+/// // ... so you cannot do this:
+/// println!("{}", escape.get());
+/// ```
+///
+/// Likewise, in the QOM case the `ParentInit` cannot be moved out of
+/// `instance_init()`. Without this trick it would be possible to stash a
+/// `ParentInit` and use it later to access uninitialized memory.
+///
+/// Here is another example, showing how separately-created "identities" stay
+/// isolated:
+///
+/// ```ignore
+/// impl<'closure, T: Copy> Clone for Jail<'closure, T> {
+/// fn clone(&self) -> Jail<'closure, T> {
+/// Jail(self.0, PhantomData)
+/// }
+/// }
+///
+/// fn main() {
+/// Jail::with(42, |token1| {
+/// // this works and returns true: the clone has the same "identity"
+/// println!("{}", token1 == token1.clone());
+/// Jail::with(42, |token2| {
+/// // here the outer token remains accessible...
+/// println!("{}", token1.get());
+/// // ... but the two are separate: this fails to compile:
+/// println!("{}", token1 == token2);
+/// });
+/// });
+/// }
+/// ```
+pub struct ParentInit<'init, T>(
+ &'init mut MaybeUninit<T>,
+ PhantomData<fn(&'init ()) -> &'init ()>,
+);
+
+impl<'init, T> ParentInit<'init, T> {
+ #[inline]
+ pub fn with(obj: &'init mut MaybeUninit<T>, f: impl for<'id> FnOnce(ParentInit<'id, T>)) {
+ let parent_init = ParentInit(obj, PhantomData);
+ f(parent_init)
+ }
+}
+
+impl<T: ObjectType> ParentInit<'_, T> {
+ /// Return the receiver as a mutable raw pointer to Object.
+ ///
+ /// # Safety
+ ///
+ /// Fields beyond `Object` could be uninitialized and it's your
+ /// responsibility to avoid that they're used when the pointer is
+ /// dereferenced, either directly or through a cast.
+ pub fn as_object_mut_ptr(&self) -> *mut bindings::Object {
+ self.as_object_ptr().cast_mut()
+ }
+
+ /// Return the receiver as a mutable raw pointer to Object.
+ ///
+ /// # Safety
+ ///
+ /// Fields beyond `Object` could be uninitialized and it's your
+ /// responsibility to avoid that they're used when the pointer is
+ /// dereferenced, either directly or through a cast.
+ pub fn as_object_ptr(&self) -> *const bindings::Object {
+ self.0.as_ptr().cast()
+ }
+}
+
+impl<'a, T: ObjectImpl> ParentInit<'a, T> {
+ /// Convert from a derived type to one of its parent types, which
+ /// have already been initialized.
+ ///
+ /// # Safety
+ ///
+ /// Structurally this is always a safe operation; the [`IsA`] trait
+ /// provides static verification trait that `Self` dereferences to `U` or
+ /// a child of `U`, and only parent types of `T` are allowed.
+ ///
+ /// However, while the fields of the resulting reference are initialized,
+ /// calls might use uninitialized fields of the subclass. It is your
+ /// responsibility to avoid this.
+ pub unsafe fn upcast<U: ObjectType>(&self) -> &'a U
+ where
+ T::ParentType: IsA<U>,
+ {
+ // SAFETY: soundness is declared via IsA<U>, which is an unsafe trait;
+ // the parent has been initialized before `instance_init `is called
+ unsafe { &*(self.0.as_ptr().cast::<U>()) }
+ }
+
+ /// Convert from a derived type to one of its parent types, which
+ /// have already been initialized.
+ ///
+ /// # Safety
+ ///
+ /// Structurally this is always a safe operation; the [`IsA`] trait
+ /// provides static verification trait that `Self` dereferences to `U` or
+ /// a child of `U`, and only parent types of `T` are allowed.
+ ///
+ /// However, while the fields of the resulting reference are initialized,
+ /// calls might use uninitialized fields of the subclass. It is your
+ /// responsibility to avoid this.
+ pub unsafe fn upcast_mut<U: ObjectType>(&mut self) -> &'a mut U
+ where
+ T::ParentType: IsA<U>,
+ {
+ // SAFETY: soundness is declared via IsA<U>, which is an unsafe trait;
+ // the parent has been initialized before `instance_init `is called
+ unsafe { &mut *(self.0.as_mut_ptr().cast::<U>()) }
+ }
+}
+
+impl<T> Deref for ParentInit<'_, T> {
+ type Target = MaybeUninit<T>;
+
+ fn deref(&self) -> &Self::Target {
+ self.0
+ }
+}
+
+impl<T> DerefMut for ParentInit<'_, T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.0
+ }
+}
+
+unsafe extern "C" fn rust_instance_init<T: ObjectImpl>(obj: *mut bindings::Object) {
+ let mut state = NonNull::new(obj).unwrap().cast::<MaybeUninit<T>>();
+
+ // SAFETY: obj is an instance of T, since rust_instance_init<T>
+ // is called from QOM core as the instance_init function
+ // for class T
+ unsafe {
+ ParentInit::with(state.as_mut(), |parent_init| {
+ T::INSTANCE_INIT.unwrap()(parent_init);
+ });
+ }
+}
+
+unsafe extern "C" fn rust_instance_post_init<T: ObjectImpl>(obj: *mut bindings::Object) {
+ let state = NonNull::new(obj).unwrap().cast::<T>();
+ // SAFETY: obj is an instance of T, since rust_instance_post_init<T>
+ // is called from QOM core as the instance_post_init function
+ // for class T
+ T::INSTANCE_POST_INIT.unwrap()(unsafe { state.as_ref() });
+}
+
+unsafe extern "C" fn rust_class_init<T: ObjectType + ObjectImpl>(
+ klass: *mut ObjectClass,
+ _data: *const c_void,
+) {
+ let mut klass = NonNull::new(klass)
+ .unwrap()
+ .cast::<<T as ObjectType>::Class>();
+ // SAFETY: klass is a T::Class, since rust_class_init<T>
+ // is called from QOM core as the class_init function
+ // for class T
+ <T as ObjectImpl>::CLASS_INIT(unsafe { klass.as_mut() })
+}
+
+unsafe extern "C" fn drop_object<T: ObjectImpl>(obj: *mut bindings::Object) {
+ // SAFETY: obj is an instance of T, since drop_object<T> is called
+ // from the QOM core function object_deinit() as the instance_finalize
+ // function for class T. Note that while object_deinit() will drop the
+ // superclass field separately after this function returns, `T` must
+ // implement the unsafe trait ObjectType; the safety rules for the
+ // trait mandate that the parent field is manually dropped.
+ unsafe { std::ptr::drop_in_place(obj.cast::<T>()) }
+}
+
+/// Trait exposed by all structs corresponding to QOM objects.
+///
+/// # Safety
+///
+/// For classes declared in C:
+///
+/// - `Class` and `TYPE` must match the data in the `TypeInfo`;
+///
+/// - the first field of the struct must be of the instance type corresponding
+/// to the superclass, as declared in the `TypeInfo`
+///
+/// - likewise, the first field of the `Class` struct must be of the class type
+/// corresponding to the superclass
+///
+/// For classes declared in Rust and implementing [`ObjectImpl`]:
+///
+/// - the struct must be `#[repr(C)]`;
+///
+/// - the first field of the struct must be of type
+/// [`ParentField<T>`](ParentField), where `T` is the parent type
+/// [`ObjectImpl::ParentType`]
+///
+/// - the first field of the `Class` must be of the class struct corresponding
+/// to the superclass, which is `ObjectImpl::ParentType::Class`. `ParentField`
+/// is not needed here.
+///
+/// In both cases, having a separate class type is not necessary if the subclass
+/// does not add any field.
+pub unsafe trait ObjectType: Sized {
+ /// The QOM class object corresponding to this struct. This is used
+ /// to automatically generate a `class_init` method.
+ type Class;
+
+ /// The name of the type, which can be passed to `object_new()` to
+ /// generate an instance of this type.
+ const TYPE_NAME: &'static CStr;
+
+ /// Return the receiver as an Object. This is always safe, even
+ /// if this type represents an interface.
+ fn as_object(&self) -> &Object {
+ unsafe { &*self.as_ptr().cast() }
+ }
+
+ /// Return the receiver as a const raw pointer to Object.
+ /// This is preferable to `as_object_mut_ptr()` if a C
+ /// function only needs a `const Object *`.
+ fn as_object_ptr(&self) -> *const bindings::Object {
+ self.as_object().as_ptr()
+ }
+
+ /// Return the receiver as a mutable raw pointer to Object.
+ ///
+ /// # Safety
+ ///
+ /// This cast is always safe, but because the result is mutable
+ /// and the incoming reference is not, this should only be used
+ /// for calls to C functions, and only if needed.
+ unsafe fn as_object_mut_ptr(&self) -> *mut bindings::Object {
+ self.as_object().as_mut_ptr()
+ }
+}
+
+/// Trait exposed by all structs corresponding to QOM interfaces.
+/// Unlike `ObjectType`, it is implemented on the class type (which provides
+/// the vtable for the interfaces).
+///
+/// # Safety
+///
+/// `TYPE` must match the contents of the `TypeInfo` as found in the C code;
+/// right now, interfaces can only be declared in C.
+pub unsafe trait InterfaceType: Sized {
+ /// The name of the type, which can be passed to
+ /// `object_class_dynamic_cast()` to obtain the pointer to the vtable
+ /// for this interface.
+ const TYPE_NAME: &'static CStr;
+
+ /// Return the vtable for the interface; `U` is the type that
+ /// lists the interface in its `TypeInfo`.
+ ///
+ /// # Examples
+ ///
+ /// This function is usually called by a `class_init` method in `U::Class`.
+ /// For example, `DeviceClass::class_init<T>` initializes its `Resettable`
+ /// interface as follows:
+ ///
+ /// ```ignore
+ /// ResettableClass::cast::<DeviceState>(self).class_init::<T>();
+ /// ```
+ ///
+ /// where `T` is the concrete subclass that is being initialized.
+ ///
+ /// # Panics
+ ///
+ /// Panic if the incoming argument if `T` does not implement the interface.
+ fn cast<U: ObjectType>(klass: &mut U::Class) -> &mut Self {
+ unsafe {
+ // SAFETY: upcasting to ObjectClass is always valid, and the
+ // return type is either NULL or the argument itself
+ let result: *mut Self = object_class_dynamic_cast(
+ (klass as *mut U::Class).cast(),
+ Self::TYPE_NAME.as_ptr(),
+ )
+ .cast();
+ result.as_mut().unwrap()
+ }
+ }
+}
+
+/// This trait provides safe casting operations for QOM objects to raw pointers,
+/// to be used for example for FFI. The trait can be applied to any kind of
+/// reference or smart pointers, and enforces correctness through the [`IsA`]
+/// trait.
+pub trait ObjectDeref: Deref
+where
+ Self::Target: ObjectType,
+{
+ /// Convert to a const Rust pointer, to be used for example for FFI.
+ /// The target pointer type must be the type of `self` or a superclass
+ fn as_ptr<U: ObjectType>(&self) -> *const U
+ where
+ Self::Target: IsA<U>,
+ {
+ let ptr: *const Self::Target = self.deref();
+ ptr.cast::<U>()
+ }
+
+ /// Convert to a mutable Rust pointer, to be used for example for FFI.
+ /// The target pointer type must be the type of `self` or a superclass.
+ /// Used to implement interior mutability for objects.
+ ///
+ /// # Safety
+ ///
+ /// This method is safe because only the actual dereference of the pointer
+ /// has to be unsafe. Bindings to C APIs will use it a lot, but care has
+ /// to be taken because it overrides the const-ness of `&self`.
+ fn as_mut_ptr<U: ObjectType>(&self) -> *mut U
+ where
+ Self::Target: IsA<U>,
+ {
+ #[allow(clippy::as_ptr_cast_mut)]
+ {
+ self.as_ptr::<U>().cast_mut()
+ }
+ }
+}
+
+/// Trait that adds extra functionality for `&T` where `T` is a QOM
+/// object type. Allows conversion to/from C objects in generic code.
+pub trait ObjectCast: ObjectDeref + Copy
+where
+ Self::Target: ObjectType,
+{
+ /// Safely convert from a derived type to one of its parent types.
+ ///
+ /// This is always safe; the [`IsA`] trait provides static verification
+ /// trait that `Self` dereferences to `U` or a child of `U`.
+ fn upcast<'a, U: ObjectType>(self) -> &'a U
+ where
+ Self::Target: IsA<U>,
+ Self: 'a,
+ {
+ // SAFETY: soundness is declared via IsA<U>, which is an unsafe trait
+ unsafe { self.unsafe_cast::<U>() }
+ }
+
+ /// Attempt to convert to a derived type.
+ ///
+ /// Returns `None` if the object is not actually of type `U`. This is
+ /// verified at runtime by checking the object's type information.
+ fn downcast<'a, U: IsA<Self::Target>>(self) -> Option<&'a U>
+ where
+ Self: 'a,
+ {
+ self.dynamic_cast::<U>()
+ }
+
+ /// Attempt to convert between any two types in the QOM hierarchy.
+ ///
+ /// Returns `None` if the object is not actually of type `U`. This is
+ /// verified at runtime by checking the object's type information.
+ fn dynamic_cast<'a, U: ObjectType>(self) -> Option<&'a U>
+ where
+ Self: 'a,
+ {
+ unsafe {
+ // SAFETY: upcasting to Object is always valid, and the
+ // return type is either NULL or the argument itself
+ let result: *const U =
+ object_dynamic_cast(self.as_object_mut_ptr(), U::TYPE_NAME.as_ptr()).cast();
+
+ result.as_ref()
+ }
+ }
+
+ /// Convert to any QOM type without verification.
+ ///
+ /// # Safety
+ ///
+ /// What safety? You need to know yourself that the cast is correct; only
+ /// use when performance is paramount. It is still better than a raw
+ /// pointer `cast()`, which does not even check that you remain in the
+ /// realm of QOM `ObjectType`s.
+ ///
+ /// `unsafe_cast::<Object>()` is always safe.
+ unsafe fn unsafe_cast<'a, U: ObjectType>(self) -> &'a U
+ where
+ Self: 'a,
+ {
+ unsafe { &*(self.as_ptr::<Self::Target>().cast::<U>()) }
+ }
+}
+
+impl<T: ObjectType> ObjectDeref for &T {}
+impl<T: ObjectType> ObjectCast for &T {}
+
+impl<T: ObjectType> ObjectDeref for &mut T {}
+
+/// Trait a type must implement to be registered with QEMU.
+pub trait ObjectImpl: ObjectType + IsA<Object> {
+ /// The parent of the type. This should match the first field of the
+ /// struct that implements `ObjectImpl`, minus the `ParentField<_>` wrapper.
+ type ParentType: ObjectType;
+
+ /// Whether the object can be instantiated
+ const ABSTRACT: bool = false;
+
+ /// Function that is called to initialize an object. The parent class will
+ /// have already been initialized so the type is only responsible for
+ /// initializing its own members.
+ ///
+ /// FIXME: The argument is not really a valid reference. `&mut
+ /// MaybeUninit<Self>` would be a better description.
+ const INSTANCE_INIT: Option<unsafe fn(ParentInit<Self>)> = None;
+
+ /// Function that is called to finish initialization of an object, once
+ /// `INSTANCE_INIT` functions have been called.
+ const INSTANCE_POST_INIT: Option<fn(&Self)> = None;
+
+ /// Called on descendant classes after all parent class initialization
+ /// has occurred, but before the class itself is initialized. This
+ /// is only useful if a class is not a leaf, and can be used to undo
+ /// the effects of copying the contents of the parent's class struct
+ /// to the descendants.
+ const CLASS_BASE_INIT: Option<
+ unsafe extern "C" fn(klass: *mut ObjectClass, data: *const c_void),
+ > = None;
+
+ const TYPE_INFO: TypeInfo = TypeInfo {
+ name: Self::TYPE_NAME.as_ptr(),
+ parent: Self::ParentType::TYPE_NAME.as_ptr(),
+ instance_size: core::mem::size_of::<Self>(),
+ instance_align: core::mem::align_of::<Self>(),
+ instance_init: match Self::INSTANCE_INIT {
+ None => None,
+ Some(_) => Some(rust_instance_init::<Self>),
+ },
+ instance_post_init: match Self::INSTANCE_POST_INIT {
+ None => None,
+ Some(_) => Some(rust_instance_post_init::<Self>),
+ },
+ instance_finalize: Some(drop_object::<Self>),
+ abstract_: Self::ABSTRACT,
+ class_size: core::mem::size_of::<Self::Class>(),
+ class_init: Some(rust_class_init::<Self>),
+ class_base_init: Self::CLASS_BASE_INIT,
+ class_data: core::ptr::null(),
+ interfaces: core::ptr::null(),
+ };
+
+ // methods on ObjectClass
+ const UNPARENT: Option<fn(&Self)> = None;
+
+ /// Store into the argument the virtual method implementations
+ /// for `Self`. On entry, the virtual method pointers are set to
+ /// the default values coming from the parent classes; the function
+ /// can change them to override virtual methods of a parent class.
+ ///
+ /// Usually defined simply as `Self::Class::class_init::<Self>`;
+ /// however a default implementation cannot be included here, because the
+ /// bounds that the `Self::Class::class_init` method places on `Self` are
+ /// not known in advance.
+ ///
+ /// # Safety
+ ///
+ /// While `klass`'s parent class is initialized on entry, the other fields
+ /// are all zero; it is therefore assumed that all fields in `T` can be
+ /// zeroed, otherwise it would not be possible to provide the class as a
+ /// `&mut T`. TODO: it may be possible to add an unsafe trait that checks
+ /// that all fields *after the parent class* (but not the parent class
+ /// itself) are Zeroable. This unsafe trait can be added via a derive
+ /// macro.
+ const CLASS_INIT: fn(&mut Self::Class);
+}
+
+/// # Safety
+///
+/// We expect the FFI user of this function to pass a valid pointer that
+/// can be downcasted to type `T`. We also expect the device is
+/// readable/writeable from one thread at any time.
+unsafe extern "C" fn rust_unparent_fn<T: ObjectImpl>(dev: *mut bindings::Object) {
+ let state = NonNull::new(dev).unwrap().cast::<T>();
+ T::UNPARENT.unwrap()(unsafe { state.as_ref() });
+}
+
+impl ObjectClass {
+ /// Fill in the virtual methods of `ObjectClass` based on the definitions in
+ /// the `ObjectImpl` trait.
+ pub fn class_init<T: ObjectImpl>(&mut self) {
+ if <T as ObjectImpl>::UNPARENT.is_some() {
+ self.unparent = Some(rust_unparent_fn::<T>);
+ }
+ }
+}
+
+unsafe impl ObjectType for Object {
+ type Class = ObjectClass;
+ const TYPE_NAME: &'static CStr =
+ unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_OBJECT) };
+}
+
+/// A reference-counted pointer to a QOM object.
+///
+/// `Owned<T>` wraps `T` with automatic reference counting. It increases the
+/// reference count when created via [`Owned::from`] or cloned, and decreases
+/// it when dropped. This ensures that the reference count remains elevated
+/// as long as any `Owned<T>` references to it exist.
+///
+/// `Owned<T>` can be used for two reasons:
+/// * because the lifetime of the QOM object is unknown and someone else could
+/// take a reference (similar to `Arc<T>`, for example): in this case, the
+/// object can escape and outlive the Rust struct that contains the `Owned<T>`
+/// field;
+///
+/// * to ensure that the object stays alive until after `Drop::drop` is called
+/// on the Rust struct: in this case, the object will always die together with
+/// the Rust struct that contains the `Owned<T>` field.
+///
+/// Child properties are an example of the second case: in C, an object that
+/// is created with `object_initialize_child` will die *before*
+/// `instance_finalize` is called, whereas Rust expects the struct to have valid
+/// contents when `Drop::drop` is called. Therefore Rust structs that have
+/// child properties need to keep a reference to the child object. Right now
+/// this can be done with `Owned<T>`; in the future one might have a separate
+/// `Child<'parent, T>` smart pointer that keeps a reference to a `T`, like
+/// `Owned`, but does not allow cloning.
+///
+/// Note that dropping an `Owned<T>` requires the big QEMU lock to be taken.
+#[repr(transparent)]
+#[derive(PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub struct Owned<T: ObjectType>(NonNull<T>);
+
+// The following rationale for safety is taken from Linux's kernel::sync::Arc.
+
+// SAFETY: It is safe to send `Owned<T>` to another thread when the underlying
+// `T` is `Sync` because it effectively means sharing `&T` (which is safe
+// because `T` is `Sync`); additionally, it needs `T` to be `Send` because any
+// thread that has an `Owned<T>` may ultimately access `T` using a
+// mutable reference when the reference count reaches zero and `T` is dropped.
+unsafe impl<T: ObjectType + Send + Sync> Send for Owned<T> {}
+
+// SAFETY: It is safe to send `&Owned<T>` to another thread when the underlying
+// `T` is `Sync` because it effectively means sharing `&T` (which is safe
+// because `T` is `Sync`); additionally, it needs `T` to be `Send` because any
+// thread that has a `&Owned<T>` may clone it and get an `Owned<T>` on that
+// thread, so the thread may ultimately access `T` using a mutable reference
+// when the reference count reaches zero and `T` is dropped.
+unsafe impl<T: ObjectType + Sync + Send> Sync for Owned<T> {}
+
+impl<T: ObjectType> Owned<T> {
+ /// Convert a raw C pointer into an owned reference to the QOM
+ /// object it points to. The object's reference count will be
+ /// decreased when the `Owned` is dropped.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `ptr` is NULL.
+ ///
+ /// # Safety
+ ///
+ /// The caller must indeed own a reference to the QOM object.
+ /// The object must not be embedded in another unless the outer
+ /// object is guaranteed to have a longer lifetime.
+ ///
+ /// A raw pointer obtained via [`Owned::into_raw()`] can always be passed
+ /// back to `from_raw()` (assuming the original `Owned` was valid!),
+ /// since the owned reference remains there between the calls to
+ /// `into_raw()` and `from_raw()`.
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ // SAFETY NOTE: while NonNull requires a mutable pointer, only
+ // Deref is implemented so the pointer passed to from_raw
+ // remains const
+ Owned(NonNull::new(ptr.cast_mut()).unwrap())
+ }
+
+ /// Obtain a raw C pointer from a reference. `src` is consumed
+ /// and the reference is leaked.
+ #[allow(clippy::missing_const_for_fn)]
+ pub fn into_raw(src: Owned<T>) -> *mut T {
+ let src = ManuallyDrop::new(src);
+ src.0.as_ptr()
+ }
+
+ /// Increase the reference count of a QOM object and return
+ /// a new owned reference to it.
+ ///
+ /// # Safety
+ ///
+ /// The object must not be embedded in another, unless the outer
+ /// object is guaranteed to have a longer lifetime.
+ pub unsafe fn from(obj: &T) -> Self {
+ unsafe {
+ object_ref(obj.as_object_mut_ptr().cast::<c_void>());
+
+ // SAFETY NOTE: while NonNull requires a mutable pointer, only
+ // Deref is implemented so the reference passed to from_raw
+ // remains shared
+ Owned(NonNull::new_unchecked(obj.as_mut_ptr()))
+ }
+ }
+}
+
+impl<T: ObjectType> Clone for Owned<T> {
+ fn clone(&self) -> Self {
+ // SAFETY: creation method is unsafe; whoever calls it has
+ // responsibility that the pointer is valid, and remains valid
+ // throughout the lifetime of the `Owned<T>` and its clones.
+ unsafe { Owned::from(self.deref()) }
+ }
+}
+
+impl<T: ObjectType> Deref for Owned<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: creation method is unsafe; whoever calls it has
+ // responsibility that the pointer is valid, and remains valid
+ // throughout the lifetime of the `Owned<T>` and its clones.
+ // With that guarantee, reference counting ensures that
+ // the object remains alive.
+ unsafe { &*self.0.as_ptr() }
+ }
+}
+impl<T: ObjectType> ObjectDeref for Owned<T> {}
+
+impl<T: ObjectType> Drop for Owned<T> {
+ fn drop(&mut self) {
+ assert!(bql_locked());
+ // SAFETY: creation method is unsafe, and whoever calls it has
+ // responsibility that the pointer is valid, and remains valid
+ // throughout the lifetime of the `Owned<T>` and its clones.
+ unsafe {
+ object_unref(self.as_object_mut_ptr().cast::<c_void>());
+ }
+ }
+}
+
+impl<T: IsA<Object>> fmt::Debug for Owned<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.deref().debug_fmt(f)
+ }
+}
+
+/// Trait for class methods exposed by the Object class. The methods can be
+/// called on all objects that have the trait `IsA<Object>`.
+///
+/// The trait should only be used through the blanket implementation,
+/// which guarantees safety via `IsA`
+pub trait ObjectClassMethods: IsA<Object> {
+ /// Return a new reference counted instance of this class
+ fn new() -> Owned<Self> {
+ assert!(bql_locked());
+ // SAFETY: the object created by object_new is allocated on
+ // the heap and has a reference count of 1
+ unsafe {
+ let raw_obj = object_new(Self::TYPE_NAME.as_ptr());
+ let obj = Object::from_raw(raw_obj).unsafe_cast::<Self>();
+ Owned::from_raw(obj)
+ }
+ }
+}
+
+/// Trait for methods exposed by the Object class. The methods can be
+/// called on all objects that have the trait `IsA<Object>`.
+///
+/// The trait should only be used through the blanket implementation,
+/// which guarantees safety via `IsA`
+pub trait ObjectMethods: ObjectDeref
+where
+ Self::Target: IsA<Object>,
+{
+ /// Return the name of the type of `self`
+ fn typename(&self) -> std::borrow::Cow<'_, str> {
+ let obj = self.upcast::<Object>();
+ // SAFETY: safety of this is the requirement for implementing IsA
+ // The result of the C API has static lifetime
+ unsafe {
+ let p = object_get_typename(obj.as_mut_ptr());
+ CStr::from_ptr(p).to_string_lossy()
+ }
+ }
+
+ fn get_class(&self) -> &'static <Self::Target as ObjectType>::Class {
+ let obj = self.upcast::<Object>();
+
+ // SAFETY: all objects can call object_get_class; the actual class
+ // type is guaranteed by the implementation of `ObjectType` and
+ // `ObjectImpl`.
+ let klass: &'static <Self::Target as ObjectType>::Class =
+ unsafe { &*object_get_class(obj.as_mut_ptr()).cast() };
+
+ klass
+ }
+
+ /// Convenience function for implementing the Debug trait
+ fn debug_fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_tuple(&self.typename())
+ .field(&(self as *const Self))
+ .finish()
+ }
+}
+
+impl<T> ObjectClassMethods for T where T: IsA<Object> {}
+impl<R: ObjectDeref> ObjectMethods for R where R::Target: IsA<Object> {}
diff --git a/rust/qemu-api/src/sysbus.rs b/rust/qemu-api/src/sysbus.rs
new file mode 100644
index 0000000..e92502a
--- /dev/null
+++ b/rust/qemu-api/src/sysbus.rs
@@ -0,0 +1,122 @@
+// Copyright 2024 Red Hat, Inc.
+// Author(s): Paolo Bonzini <pbonzini@redhat.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! Bindings to access `sysbus` functionality from Rust.
+
+use std::{ffi::CStr, ptr::addr_of_mut};
+
+pub use bindings::SysBusDeviceClass;
+
+use crate::{
+ bindings,
+ cell::{bql_locked, Opaque},
+ irq::{IRQState, InterruptSource},
+ memory::MemoryRegion,
+ prelude::*,
+ qdev::{DeviceImpl, DeviceState},
+ qom::Owned,
+};
+
+/// A safe wrapper around [`bindings::SysBusDevice`].
+#[repr(transparent)]
+#[derive(Debug, qemu_api_macros::Wrapper)]
+pub struct SysBusDevice(Opaque<bindings::SysBusDevice>);
+
+unsafe impl Send for SysBusDevice {}
+unsafe impl Sync for SysBusDevice {}
+
+unsafe impl ObjectType for SysBusDevice {
+ type Class = SysBusDeviceClass;
+ const TYPE_NAME: &'static CStr =
+ unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_SYS_BUS_DEVICE) };
+}
+qom_isa!(SysBusDevice: DeviceState, Object);
+
+// TODO: add virtual methods
+pub trait SysBusDeviceImpl: DeviceImpl + IsA<SysBusDevice> {}
+
+impl SysBusDeviceClass {
+ /// Fill in the virtual methods of `SysBusDeviceClass` based on the
+ /// definitions in the `SysBusDeviceImpl` trait.
+ pub fn class_init<T: SysBusDeviceImpl>(self: &mut SysBusDeviceClass) {
+ self.parent_class.class_init::<T>();
+ }
+}
+
+/// Trait for methods of [`SysBusDevice`] and its subclasses.
+pub trait SysBusDeviceMethods: ObjectDeref
+where
+ Self::Target: IsA<SysBusDevice>,
+{
+ /// Expose a memory region to the board so that it can give it an address
+ /// in guest memory. Note that the ordering of calls to `init_mmio` is
+ /// important, since whoever creates the sysbus device will refer to the
+ /// region with a number that corresponds to the order of calls to
+ /// `init_mmio`.
+ fn init_mmio(&self, iomem: &MemoryRegion) {
+ assert!(bql_locked());
+ unsafe {
+ bindings::sysbus_init_mmio(self.upcast().as_mut_ptr(), iomem.as_mut_ptr());
+ }
+ }
+
+ /// Expose an interrupt source outside the device as a qdev GPIO output.
+ /// Note that the ordering of calls to `init_irq` is important, since
+ /// whoever creates the sysbus device will refer to the interrupts with
+ /// a number that corresponds to the order of calls to `init_irq`.
+ fn init_irq(&self, irq: &InterruptSource) {
+ assert!(bql_locked());
+ unsafe {
+ bindings::sysbus_init_irq(self.upcast().as_mut_ptr(), irq.as_ptr());
+ }
+ }
+
+ // TODO: do we want a type like GuestAddress here?
+ fn mmio_addr(&self, id: u32) -> Option<u64> {
+ assert!(bql_locked());
+ // SAFETY: the BQL ensures that no one else writes to sbd.mmio[], and
+ // the SysBusDevice must be initialized to get an IsA<SysBusDevice>.
+ let sbd = unsafe { *self.upcast().as_ptr() };
+ let id: usize = id.try_into().unwrap();
+ if sbd.mmio[id].memory.is_null() {
+ None
+ } else {
+ Some(sbd.mmio[id].addr)
+ }
+ }
+
+ // TODO: do we want a type like GuestAddress here?
+ fn mmio_map(&self, id: u32, addr: u64) {
+ assert!(bql_locked());
+ let id: i32 = id.try_into().unwrap();
+ unsafe {
+ bindings::sysbus_mmio_map(self.upcast().as_mut_ptr(), id, addr);
+ }
+ }
+
+ // Owned<> is used here because sysbus_connect_irq (via
+ // object_property_set_link) adds a reference to the IRQState,
+ // which can prolong its life
+ fn connect_irq(&self, id: u32, irq: &Owned<IRQState>) {
+ assert!(bql_locked());
+ let id: i32 = id.try_into().unwrap();
+ let irq: &IRQState = irq;
+ unsafe {
+ bindings::sysbus_connect_irq(self.upcast().as_mut_ptr(), id, irq.as_mut_ptr());
+ }
+ }
+
+ fn sysbus_realize(&self) {
+ // TODO: return an Error
+ assert!(bql_locked());
+ unsafe {
+ bindings::sysbus_realize(
+ self.upcast().as_mut_ptr(),
+ addr_of_mut!(bindings::error_fatal),
+ );
+ }
+ }
+}
+
+impl<R: ObjectDeref> SysBusDeviceMethods for R where R::Target: IsA<SysBusDevice> {}
diff --git a/rust/qemu-api/src/timer.rs b/rust/qemu-api/src/timer.rs
new file mode 100644
index 0000000..0a2d111
--- /dev/null
+++ b/rust/qemu-api/src/timer.rs
@@ -0,0 +1,125 @@
+// Copyright (C) 2024 Intel Corporation.
+// Author(s): Zhao Liu <zhao1.liu@intel.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+use std::{
+ ffi::{c_int, c_void},
+ pin::Pin,
+};
+
+use crate::{
+ bindings::{self, qemu_clock_get_ns, timer_del, timer_init_full, timer_mod, QEMUClockType},
+ callbacks::FnCall,
+ cell::Opaque,
+};
+
+/// A safe wrapper around [`bindings::QEMUTimer`].
+#[repr(transparent)]
+#[derive(Debug, qemu_api_macros::Wrapper)]
+pub struct Timer(Opaque<bindings::QEMUTimer>);
+
+unsafe impl Send for Timer {}
+unsafe impl Sync for Timer {}
+
+#[repr(transparent)]
+#[derive(qemu_api_macros::Wrapper)]
+pub struct TimerListGroup(Opaque<bindings::QEMUTimerListGroup>);
+
+unsafe impl Send for TimerListGroup {}
+unsafe impl Sync for TimerListGroup {}
+
+impl Timer {
+ pub const MS: u32 = bindings::SCALE_MS;
+ pub const US: u32 = bindings::SCALE_US;
+ pub const NS: u32 = bindings::SCALE_NS;
+
+ /// Create a `Timer` struct without initializing it.
+ ///
+ /// # Safety
+ ///
+ /// The timer must be initialized before it is armed with
+ /// [`modify`](Self::modify).
+ pub unsafe fn new() -> Self {
+ // SAFETY: requirements relayed to callers of Timer::new
+ Self(unsafe { Opaque::zeroed() })
+ }
+
+ /// Create a new timer with the given attributes.
+ pub fn init_full<'timer, 'opaque: 'timer, T, F>(
+ self: Pin<&'timer mut Self>,
+ timer_list_group: Option<&TimerListGroup>,
+ clk_type: ClockType,
+ scale: u32,
+ attributes: u32,
+ _cb: F,
+ opaque: &'opaque T,
+ ) where
+ F: for<'a> FnCall<(&'a T,)>,
+ {
+ let _: () = F::ASSERT_IS_SOME;
+
+ /// timer expiration callback
+ unsafe extern "C" fn rust_timer_handler<T, F: for<'a> FnCall<(&'a T,)>>(
+ opaque: *mut c_void,
+ ) {
+ // SAFETY: the opaque was passed as a reference to `T`.
+ F::call((unsafe { &*(opaque.cast::<T>()) },))
+ }
+
+ let timer_cb: unsafe extern "C" fn(*mut c_void) = rust_timer_handler::<T, F>;
+
+ // SAFETY: the opaque outlives the timer
+ unsafe {
+ timer_init_full(
+ self.as_mut_ptr(),
+ if let Some(g) = timer_list_group {
+ g as *const TimerListGroup as *mut _
+ } else {
+ ::core::ptr::null_mut()
+ },
+ clk_type.id,
+ scale as c_int,
+ attributes as c_int,
+ Some(timer_cb),
+ (opaque as *const T).cast::<c_void>().cast_mut(),
+ )
+ }
+ }
+
+ pub fn modify(&self, expire_time: u64) {
+ // SAFETY: the only way to obtain a Timer safely is via methods that
+ // take a Pin<&mut Self>, therefore the timer is pinned
+ unsafe { timer_mod(self.as_mut_ptr(), expire_time as i64) }
+ }
+
+ pub fn delete(&self) {
+ // SAFETY: the only way to obtain a Timer safely is via methods that
+ // take a Pin<&mut Self>, therefore the timer is pinned
+ unsafe { timer_del(self.as_mut_ptr()) }
+ }
+}
+
+// FIXME: use something like PinnedDrop from the pinned_init crate
+impl Drop for Timer {
+ fn drop(&mut self) {
+ self.delete()
+ }
+}
+
+pub struct ClockType {
+ id: QEMUClockType,
+}
+
+impl ClockType {
+ pub fn get_ns(&self) -> u64 {
+ // SAFETY: cannot be created outside this module, therefore id
+ // is valid
+ (unsafe { qemu_clock_get_ns(self.id) }) as u64
+ }
+}
+
+pub const CLOCK_VIRTUAL: ClockType = ClockType {
+ id: QEMUClockType::QEMU_CLOCK_VIRTUAL,
+};
+
+pub const NANOSECONDS_PER_SECOND: u64 = 1000000000;
diff --git a/rust/qemu-api/src/uninit.rs b/rust/qemu-api/src/uninit.rs
new file mode 100644
index 0000000..04123b4
--- /dev/null
+++ b/rust/qemu-api/src/uninit.rs
@@ -0,0 +1,85 @@
+//! Access fields of a [`MaybeUninit`]
+
+use std::{
+ mem::MaybeUninit,
+ ops::{Deref, DerefMut},
+};
+
+pub struct MaybeUninitField<'a, T, U> {
+ parent: &'a mut MaybeUninit<T>,
+ child: *mut U,
+}
+
+impl<'a, T, U> MaybeUninitField<'a, T, U> {
+ #[doc(hidden)]
+ pub fn new(parent: &'a mut MaybeUninit<T>, child: *mut U) -> Self {
+ MaybeUninitField { parent, child }
+ }
+
+ /// Return a constant pointer to the containing object of the field.
+ ///
+ /// Because the `MaybeUninitField` remembers the containing object,
+ /// it is possible to use it in foreign APIs that initialize the
+ /// child.
+ pub fn parent(f: &Self) -> *const T {
+ f.parent.as_ptr()
+ }
+
+ /// Return a mutable pointer to the containing object.
+ ///
+ /// Because the `MaybeUninitField` remembers the containing object,
+ /// it is possible to use it in foreign APIs that initialize the
+ /// child.
+ pub fn parent_mut(f: &mut Self) -> *mut T {
+ f.parent.as_mut_ptr()
+ }
+}
+
+impl<'a, T, U> Deref for MaybeUninitField<'a, T, U> {
+ type Target = MaybeUninit<U>;
+
+ fn deref(&self) -> &MaybeUninit<U> {
+ // SAFETY: self.child was obtained by dereferencing a valid mutable
+ // reference; the content of the memory may be invalid or uninitialized
+ // but MaybeUninit<_> makes no assumption on it
+ unsafe { &*(self.child.cast()) }
+ }
+}
+
+impl<'a, T, U> DerefMut for MaybeUninitField<'a, T, U> {
+ fn deref_mut(&mut self) -> &mut MaybeUninit<U> {
+ // SAFETY: self.child was obtained by dereferencing a valid mutable
+ // reference; the content of the memory may be invalid or uninitialized
+ // but MaybeUninit<_> makes no assumption on it
+ unsafe { &mut *(self.child.cast()) }
+ }
+}
+
+/// ```
+/// #[derive(Debug)]
+/// struct S {
+/// x: u32,
+/// y: u32,
+/// }
+///
+/// # use std::mem::MaybeUninit;
+/// # use qemu_api::{assert_match, uninit_field_mut};
+///
+/// let mut s: MaybeUninit<S> = MaybeUninit::zeroed();
+/// uninit_field_mut!(s, x).write(5);
+/// let s = unsafe { s.assume_init() };
+/// assert_match!(s, S { x: 5, y: 0 });
+/// ```
+#[macro_export]
+macro_rules! uninit_field_mut {
+ ($container:expr, $($field:tt)+) => {{
+ let container__: &mut ::std::mem::MaybeUninit<_> = &mut $container;
+ let container_ptr__ = container__.as_mut_ptr();
+
+ // SAFETY: the container is not used directly, only through a MaybeUninit<>,
+ // so the safety is delegated to the caller and to final invocation of
+ // assume_init()
+ let target__ = unsafe { std::ptr::addr_of_mut!((*container_ptr__).$($field)+) };
+ $crate::uninit::MaybeUninitField::new(container__, target__)
+ }};
+}
diff --git a/rust/qemu-api/src/vmstate.rs b/rust/qemu-api/src/vmstate.rs
new file mode 100644
index 0000000..812f390
--- /dev/null
+++ b/rust/qemu-api/src/vmstate.rs
@@ -0,0 +1,604 @@
+// Copyright 2024, Linaro Limited
+// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! Helper macros to declare migration state for device models.
+//!
+//! This module includes four families of macros:
+//!
+//! * [`vmstate_unused!`](crate::vmstate_unused) and
+//! [`vmstate_of!`](crate::vmstate_of), which are used to express the
+//! migration format for a struct. This is based on the [`VMState`] trait,
+//! which is defined by all migratable types.
+//!
+//! * [`impl_vmstate_forward`](crate::impl_vmstate_forward) and
+//! [`impl_vmstate_bitsized`](crate::impl_vmstate_bitsized), which help with
+//! the definition of the [`VMState`] trait (respectively for transparent
+//! structs and for `bilge`-defined types)
+//!
+//! * helper macros to declare a device model state struct, in particular
+//! [`vmstate_subsections`](crate::vmstate_subsections) and
+//! [`vmstate_fields`](crate::vmstate_fields).
+//!
+//! * direct equivalents to the C macros declared in
+//! `include/migration/vmstate.h`. These are not type-safe and only provide
+//! functionality that is missing from `vmstate_of!`.
+
+use core::{marker::PhantomData, mem, ptr::NonNull};
+use std::ffi::{c_int, c_void};
+
+pub use crate::bindings::{VMStateDescription, VMStateField};
+use crate::{
+ bindings::VMStateFlags, callbacks::FnCall, prelude::*, qom::Owned, zeroable::Zeroable,
+};
+
+/// This macro is used to call a function with a generic argument bound
+/// to the type of a field. The function must take a
+/// [`PhantomData`]`<T>` argument; `T` is the type of
+/// field `$field` in the `$typ` type.
+///
+/// # Examples
+///
+/// ```
+/// # use qemu_api::call_func_with_field;
+/// # use core::marker::PhantomData;
+/// const fn size_of_field<T>(_: PhantomData<T>) -> usize {
+/// std::mem::size_of::<T>()
+/// }
+///
+/// struct Foo {
+/// x: u16,
+/// };
+/// // calls size_of_field::<u16>()
+/// assert_eq!(call_func_with_field!(size_of_field, Foo, x), 2);
+/// ```
+#[macro_export]
+macro_rules! call_func_with_field {
+ // Based on the answer by user steffahn (Frank Steffahn) at
+ // https://users.rust-lang.org/t/inferring-type-of-field/122857
+ // and used under MIT license
+ ($func:expr, $typ:ty, $($field:tt).+) => {
+ $func(loop {
+ #![allow(unreachable_code)]
+ const fn phantom__<T>(_: &T) -> ::core::marker::PhantomData<T> { ::core::marker::PhantomData }
+ // Unreachable code is exempt from checks on uninitialized values.
+ // Use that trick to infer the type of this PhantomData.
+ break ::core::marker::PhantomData;
+ break phantom__(&{ let value__: $typ; value__.$($field).+ });
+ })
+ };
+}
+
+/// Workaround for lack of `const_refs_static`: references to global variables
+/// can be included in a `static`, but not in a `const`; unfortunately, this
+/// is exactly what would go in the `VMStateField`'s `info` member.
+///
+/// This enum contains the contents of the `VMStateField`'s `info` member,
+/// but as an `enum` instead of a pointer.
+#[allow(non_camel_case_types)]
+pub enum VMStateFieldType {
+ null,
+ vmstate_info_bool,
+ vmstate_info_int8,
+ vmstate_info_int16,
+ vmstate_info_int32,
+ vmstate_info_int64,
+ vmstate_info_uint8,
+ vmstate_info_uint16,
+ vmstate_info_uint32,
+ vmstate_info_uint64,
+ vmstate_info_timer,
+}
+
+/// Workaround for lack of `const_refs_static`. Converts a `VMStateFieldType`
+/// to a `*const VMStateInfo`, for inclusion in a `VMStateField`.
+#[macro_export]
+macro_rules! info_enum_to_ref {
+ ($e:expr) => {
+ unsafe {
+ match $e {
+ $crate::vmstate::VMStateFieldType::null => ::core::ptr::null(),
+ $crate::vmstate::VMStateFieldType::vmstate_info_bool => {
+ ::core::ptr::addr_of!($crate::bindings::vmstate_info_bool)
+ }
+ $crate::vmstate::VMStateFieldType::vmstate_info_int8 => {
+ ::core::ptr::addr_of!($crate::bindings::vmstate_info_int8)
+ }
+ $crate::vmstate::VMStateFieldType::vmstate_info_int16 => {
+ ::core::ptr::addr_of!($crate::bindings::vmstate_info_int16)
+ }
+ $crate::vmstate::VMStateFieldType::vmstate_info_int32 => {
+ ::core::ptr::addr_of!($crate::bindings::vmstate_info_int32)
+ }
+ $crate::vmstate::VMStateFieldType::vmstate_info_int64 => {
+ ::core::ptr::addr_of!($crate::bindings::vmstate_info_int64)
+ }
+ $crate::vmstate::VMStateFieldType::vmstate_info_uint8 => {
+ ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint8)
+ }
+ $crate::vmstate::VMStateFieldType::vmstate_info_uint16 => {
+ ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint16)
+ }
+ $crate::vmstate::VMStateFieldType::vmstate_info_uint32 => {
+ ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint32)
+ }
+ $crate::vmstate::VMStateFieldType::vmstate_info_uint64 => {
+ ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint64)
+ }
+ $crate::vmstate::VMStateFieldType::vmstate_info_timer => {
+ ::core::ptr::addr_of!($crate::bindings::vmstate_info_timer)
+ }
+ }
+ }
+ };
+}
+
+/// A trait for types that can be included in a device's migration stream. It
+/// provides the base contents of a `VMStateField` (minus the name and offset).
+///
+/// # Safety
+///
+/// The contents of this trait go straight into structs that are parsed by C
+/// code and used to introspect into other structs. Generally, you don't need
+/// to implement it except via macros that do it for you, such as
+/// `impl_vmstate_bitsized!`.
+pub unsafe trait VMState {
+ /// The `info` member of a `VMStateField` is a pointer and as such cannot
+ /// yet be included in the [`BASE`](VMState::BASE) associated constant;
+ /// this is only allowed by Rust 1.83.0 and newer. For now, include the
+ /// member as an enum which is stored in a separate constant.
+ const SCALAR_TYPE: VMStateFieldType = VMStateFieldType::null;
+
+ /// The base contents of a `VMStateField` (minus the name and offset) for
+ /// the type that is implementing the trait.
+ const BASE: VMStateField;
+
+ /// A flag that is added to another field's `VMStateField` to specify the
+ /// length's type in a variable-sized array. If this is not a supported
+ /// type for the length (i.e. if it is not `u8`, `u16`, `u32`), using it
+ /// in a call to [`vmstate_of!`](crate::vmstate_of) will cause a
+ /// compile-time error.
+ const VARRAY_FLAG: VMStateFlags = {
+ panic!("invalid type for variable-sized array");
+ };
+}
+
+/// Internal utility function to retrieve a type's `VMStateFieldType`;
+/// used by [`vmstate_of!`](crate::vmstate_of).
+pub const fn vmstate_scalar_type<T: VMState>(_: PhantomData<T>) -> VMStateFieldType {
+ T::SCALAR_TYPE
+}
+
+/// Internal utility function to retrieve a type's `VMStateField`;
+/// used by [`vmstate_of!`](crate::vmstate_of).
+pub const fn vmstate_base<T: VMState>(_: PhantomData<T>) -> VMStateField {
+ T::BASE
+}
+
+/// Internal utility function to retrieve a type's `VMStateFlags` when it
+/// is used as the element count of a `VMSTATE_VARRAY`; used by
+/// [`vmstate_of!`](crate::vmstate_of).
+pub const fn vmstate_varray_flag<T: VMState>(_: PhantomData<T>) -> VMStateFlags {
+ T::VARRAY_FLAG
+}
+
+/// Return the `VMStateField` for a field of a struct. The field must be
+/// visible in the current scope.
+///
+/// Only a limited set of types is supported out of the box:
+/// * scalar types (integer and `bool`)
+/// * the C struct `QEMUTimer`
+/// * a transparent wrapper for any of the above (`Cell`, `UnsafeCell`,
+/// [`BqlCell`], [`BqlRefCell`]
+/// * a raw pointer to any of the above
+/// * a `NonNull` pointer, a `Box` or an [`Owned`] for any of the above
+/// * an array of any of the above
+///
+/// In order to support other types, the trait `VMState` must be implemented
+/// for them. The macros
+/// [`impl_vmstate_bitsized!`](crate::impl_vmstate_bitsized)
+/// and [`impl_vmstate_forward!`](crate::impl_vmstate_forward) help with this.
+#[macro_export]
+macro_rules! vmstate_of {
+ ($struct_name:ty, $field_name:ident $([0 .. $num:ident $(* $factor:expr)?])? $(, $test_fn:expr)? $(,)?) => {
+ $crate::bindings::VMStateField {
+ name: ::core::concat!(::core::stringify!($field_name), "\0")
+ .as_bytes()
+ .as_ptr() as *const ::std::os::raw::c_char,
+ offset: ::std::mem::offset_of!($struct_name, $field_name),
+ $(num_offset: ::std::mem::offset_of!($struct_name, $num),)?
+ $(field_exists: $crate::vmstate_exist_fn!($struct_name, $test_fn),)?
+ // The calls to `call_func_with_field!` are the magic that
+ // computes most of the VMStateField from the type of the field.
+ info: $crate::info_enum_to_ref!($crate::call_func_with_field!(
+ $crate::vmstate::vmstate_scalar_type,
+ $struct_name,
+ $field_name
+ )),
+ ..$crate::call_func_with_field!(
+ $crate::vmstate::vmstate_base,
+ $struct_name,
+ $field_name
+ )$(.with_varray_flag($crate::call_func_with_field!(
+ $crate::vmstate::vmstate_varray_flag,
+ $struct_name,
+ $num))
+ $(.with_varray_multiply($factor))?)?
+ }
+ };
+}
+
+impl VMStateFlags {
+ const VMS_VARRAY_FLAGS: VMStateFlags = VMStateFlags(
+ VMStateFlags::VMS_VARRAY_INT32.0
+ | VMStateFlags::VMS_VARRAY_UINT8.0
+ | VMStateFlags::VMS_VARRAY_UINT16.0
+ | VMStateFlags::VMS_VARRAY_UINT32.0,
+ );
+}
+
+// Add a couple builder-style methods to VMStateField, allowing
+// easy derivation of VMStateField constants from other types.
+impl VMStateField {
+ #[must_use]
+ pub const fn with_version_id(mut self, version_id: i32) -> Self {
+ assert!(version_id >= 0);
+ self.version_id = version_id;
+ self
+ }
+
+ #[must_use]
+ pub const fn with_array_flag(mut self, num: usize) -> Self {
+ assert!(num <= 0x7FFF_FFFFusize);
+ assert!((self.flags.0 & VMStateFlags::VMS_ARRAY.0) == 0);
+ assert!((self.flags.0 & VMStateFlags::VMS_VARRAY_FLAGS.0) == 0);
+ if (self.flags.0 & VMStateFlags::VMS_POINTER.0) != 0 {
+ self.flags = VMStateFlags(self.flags.0 & !VMStateFlags::VMS_POINTER.0);
+ self.flags = VMStateFlags(self.flags.0 | VMStateFlags::VMS_ARRAY_OF_POINTER.0);
+ // VMS_ARRAY_OF_POINTER flag stores the size of pointer.
+ // FIXME: *const, *mut, NonNull and Box<> have the same size as usize.
+ // Resize if more smart pointers are supported.
+ self.size = std::mem::size_of::<usize>();
+ }
+ self.flags = VMStateFlags(self.flags.0 & !VMStateFlags::VMS_SINGLE.0);
+ self.flags = VMStateFlags(self.flags.0 | VMStateFlags::VMS_ARRAY.0);
+ self.num = num as i32;
+ self
+ }
+
+ #[must_use]
+ pub const fn with_pointer_flag(mut self) -> Self {
+ assert!((self.flags.0 & VMStateFlags::VMS_POINTER.0) == 0);
+ self.flags = VMStateFlags(self.flags.0 | VMStateFlags::VMS_POINTER.0);
+ self
+ }
+
+ #[must_use]
+ pub const fn with_varray_flag_unchecked(mut self, flag: VMStateFlags) -> VMStateField {
+ self.flags = VMStateFlags(self.flags.0 & !VMStateFlags::VMS_ARRAY.0);
+ self.flags = VMStateFlags(self.flags.0 | flag.0);
+ self.num = 0; // varray uses num_offset instead of num.
+ self
+ }
+
+ #[must_use]
+ #[allow(unused_mut)]
+ pub const fn with_varray_flag(mut self, flag: VMStateFlags) -> VMStateField {
+ assert!((self.flags.0 & VMStateFlags::VMS_ARRAY.0) != 0);
+ self.with_varray_flag_unchecked(flag)
+ }
+
+ #[must_use]
+ pub const fn with_varray_multiply(mut self, num: u32) -> VMStateField {
+ assert!(num <= 0x7FFF_FFFFu32);
+ self.flags = VMStateFlags(self.flags.0 | VMStateFlags::VMS_MULTIPLY_ELEMENTS.0);
+ self.num = num as i32;
+ self
+ }
+}
+
+/// This macro can be used (by just passing it a type) to forward the `VMState`
+/// trait to the first field of a tuple. This is a workaround for lack of
+/// support of nested [`offset_of`](core::mem::offset_of) until Rust 1.82.0.
+///
+/// # Examples
+///
+/// ```
+/// # use qemu_api::impl_vmstate_forward;
+/// pub struct Fifo([u8; 16]);
+/// impl_vmstate_forward!(Fifo);
+/// ```
+#[macro_export]
+macro_rules! impl_vmstate_forward {
+ // This is similar to impl_vmstate_transparent below, but it
+ // uses the same trick as vmstate_of! to obtain the type of
+ // the first field of the tuple
+ ($tuple:ty) => {
+ unsafe impl $crate::vmstate::VMState for $tuple {
+ const SCALAR_TYPE: $crate::vmstate::VMStateFieldType =
+ $crate::call_func_with_field!($crate::vmstate::vmstate_scalar_type, $tuple, 0);
+ const BASE: $crate::bindings::VMStateField =
+ $crate::call_func_with_field!($crate::vmstate::vmstate_base, $tuple, 0);
+ }
+ };
+}
+
+// Transparent wrappers: just use the internal type
+
+macro_rules! impl_vmstate_transparent {
+ ($type:ty where $base:tt: VMState $($where:tt)*) => {
+ unsafe impl<$base> VMState for $type where $base: VMState $($where)* {
+ const SCALAR_TYPE: VMStateFieldType = <$base as VMState>::SCALAR_TYPE;
+ const BASE: VMStateField = VMStateField {
+ size: mem::size_of::<$type>(),
+ ..<$base as VMState>::BASE
+ };
+ const VARRAY_FLAG: VMStateFlags = <$base as VMState>::VARRAY_FLAG;
+ }
+ };
+}
+
+impl_vmstate_transparent!(std::cell::Cell<T> where T: VMState);
+impl_vmstate_transparent!(std::cell::UnsafeCell<T> where T: VMState);
+impl_vmstate_transparent!(std::pin::Pin<T> where T: VMState);
+impl_vmstate_transparent!(crate::cell::BqlCell<T> where T: VMState);
+impl_vmstate_transparent!(crate::cell::BqlRefCell<T> where T: VMState);
+impl_vmstate_transparent!(crate::cell::Opaque<T> where T: VMState);
+
+#[macro_export]
+macro_rules! impl_vmstate_bitsized {
+ ($type:ty) => {
+ unsafe impl $crate::vmstate::VMState for $type {
+ const SCALAR_TYPE: $crate::vmstate::VMStateFieldType =
+ <<<$type as ::bilge::prelude::Bitsized>::ArbitraryInt
+ as ::bilge::prelude::Number>::UnderlyingType
+ as $crate::vmstate::VMState>::SCALAR_TYPE;
+ const BASE: $crate::bindings::VMStateField =
+ <<<$type as ::bilge::prelude::Bitsized>::ArbitraryInt
+ as ::bilge::prelude::Number>::UnderlyingType
+ as $crate::vmstate::VMState>::BASE;
+ const VARRAY_FLAG: $crate::bindings::VMStateFlags =
+ <<<$type as ::bilge::prelude::Bitsized>::ArbitraryInt
+ as ::bilge::prelude::Number>::UnderlyingType
+ as $crate::vmstate::VMState>::VARRAY_FLAG;
+ }
+ };
+}
+
+// Scalar types using predefined VMStateInfos
+
+macro_rules! impl_vmstate_scalar {
+ ($info:ident, $type:ty$(, $varray_flag:ident)?) => {
+ unsafe impl VMState for $type {
+ const SCALAR_TYPE: VMStateFieldType = VMStateFieldType::$info;
+ const BASE: VMStateField = VMStateField {
+ size: mem::size_of::<$type>(),
+ flags: VMStateFlags::VMS_SINGLE,
+ ..Zeroable::ZERO
+ };
+ $(const VARRAY_FLAG: VMStateFlags = VMStateFlags::$varray_flag;)?
+ }
+ };
+}
+
+impl_vmstate_scalar!(vmstate_info_bool, bool);
+impl_vmstate_scalar!(vmstate_info_int8, i8);
+impl_vmstate_scalar!(vmstate_info_int16, i16);
+impl_vmstate_scalar!(vmstate_info_int32, i32);
+impl_vmstate_scalar!(vmstate_info_int64, i64);
+impl_vmstate_scalar!(vmstate_info_uint8, u8, VMS_VARRAY_UINT8);
+impl_vmstate_scalar!(vmstate_info_uint16, u16, VMS_VARRAY_UINT16);
+impl_vmstate_scalar!(vmstate_info_uint32, u32, VMS_VARRAY_UINT32);
+impl_vmstate_scalar!(vmstate_info_uint64, u64);
+impl_vmstate_scalar!(vmstate_info_timer, crate::timer::Timer);
+
+// Pointer types using the underlying type's VMState plus VMS_POINTER
+// Note that references are not supported, though references to cells
+// could be allowed.
+
+macro_rules! impl_vmstate_pointer {
+ ($type:ty where $base:tt: VMState $($where:tt)*) => {
+ unsafe impl<$base> VMState for $type where $base: VMState $($where)* {
+ const SCALAR_TYPE: VMStateFieldType = <T as VMState>::SCALAR_TYPE;
+ const BASE: VMStateField = <$base as VMState>::BASE.with_pointer_flag();
+ }
+ };
+}
+
+impl_vmstate_pointer!(*const T where T: VMState);
+impl_vmstate_pointer!(*mut T where T: VMState);
+impl_vmstate_pointer!(NonNull<T> where T: VMState);
+
+// Unlike C pointers, Box is always non-null therefore there is no need
+// to specify VMS_ALLOC.
+impl_vmstate_pointer!(Box<T> where T: VMState);
+impl_vmstate_pointer!(Owned<T> where T: VMState + ObjectType);
+
+// Arrays using the underlying type's VMState plus
+// VMS_ARRAY/VMS_ARRAY_OF_POINTER
+
+unsafe impl<T: VMState, const N: usize> VMState for [T; N] {
+ const SCALAR_TYPE: VMStateFieldType = <T as VMState>::SCALAR_TYPE;
+ const BASE: VMStateField = <T as VMState>::BASE.with_array_flag(N);
+}
+
+#[doc(alias = "VMSTATE_UNUSED")]
+#[macro_export]
+macro_rules! vmstate_unused {
+ ($size:expr) => {{
+ $crate::bindings::VMStateField {
+ name: c"unused".as_ptr(),
+ size: $size,
+ info: unsafe { ::core::ptr::addr_of!($crate::bindings::vmstate_info_unused_buffer) },
+ flags: $crate::bindings::VMStateFlags::VMS_BUFFER,
+ ..$crate::zeroable::Zeroable::ZERO
+ }
+ }};
+}
+
+pub extern "C" fn rust_vms_test_field_exists<T, F: for<'a> FnCall<(&'a T, u8), bool>>(
+ opaque: *mut c_void,
+ version_id: c_int,
+) -> bool {
+ // SAFETY: the opaque was passed as a reference to `T`.
+ let owner: &T = unsafe { &*(opaque.cast::<T>()) };
+ let version: u8 = version_id.try_into().unwrap();
+ F::call((owner, version))
+}
+
+pub type VMSFieldExistCb = unsafe extern "C" fn(
+ opaque: *mut std::os::raw::c_void,
+ version_id: std::os::raw::c_int,
+) -> bool;
+
+#[macro_export]
+macro_rules! vmstate_exist_fn {
+ ($struct_name:ty, $test_fn:expr) => {{
+ const fn test_cb_builder__<T, F: for<'a> $crate::callbacks::FnCall<(&'a T, u8), bool>>(
+ _phantom: ::core::marker::PhantomData<F>,
+ ) -> $crate::vmstate::VMSFieldExistCb {
+ let _: () = F::ASSERT_IS_SOME;
+ $crate::vmstate::rust_vms_test_field_exists::<T, F>
+ }
+
+ const fn phantom__<T>(_: &T) -> ::core::marker::PhantomData<T> {
+ ::core::marker::PhantomData
+ }
+ Some(test_cb_builder__::<$struct_name, _>(phantom__(&$test_fn)))
+ }};
+}
+
+// FIXME: including the `vmsd` field in a `const` is not possible without
+// the const_refs_static feature (stabilized in Rust 1.83.0). Without it,
+// it is not possible to use VMS_STRUCT in a transparent manner using
+// `vmstate_of!`. While VMSTATE_CLOCK can at least try to be type-safe,
+// VMSTATE_STRUCT includes $type only for documentation purposes; it
+// is checked against $field_name and $struct_name, but not against $vmsd
+// which is what really would matter.
+#[doc(alias = "VMSTATE_STRUCT")]
+#[macro_export]
+macro_rules! vmstate_struct {
+ ($struct_name:ty, $field_name:ident $([0 .. $num:ident $(* $factor:expr)?])?, $vmsd:expr, $type:ty $(, $test_fn:expr)? $(,)?) => {
+ $crate::bindings::VMStateField {
+ name: ::core::concat!(::core::stringify!($field_name), "\0")
+ .as_bytes()
+ .as_ptr() as *const ::std::os::raw::c_char,
+ $(num_offset: ::std::mem::offset_of!($struct_name, $num),)?
+ offset: {
+ $crate::assert_field_type!($struct_name, $field_name, $type $(, num = $num)?);
+ ::std::mem::offset_of!($struct_name, $field_name)
+ },
+ size: ::core::mem::size_of::<$type>(),
+ flags: $crate::bindings::VMStateFlags::VMS_STRUCT,
+ vmsd: $vmsd,
+ $(field_exists: $crate::vmstate_exist_fn!($struct_name, $test_fn),)?
+ ..$crate::zeroable::Zeroable::ZERO
+ } $(.with_varray_flag_unchecked(
+ $crate::call_func_with_field!(
+ $crate::vmstate::vmstate_varray_flag,
+ $struct_name,
+ $num
+ )
+ )
+ $(.with_varray_multiply($factor))?)?
+ };
+}
+
+#[doc(alias = "VMSTATE_CLOCK")]
+#[macro_export]
+macro_rules! vmstate_clock {
+ ($struct_name:ty, $field_name:ident $([0 .. $num:ident $(* $factor:expr)?])?) => {{
+ $crate::bindings::VMStateField {
+ name: ::core::concat!(::core::stringify!($field_name), "\0")
+ .as_bytes()
+ .as_ptr() as *const ::std::os::raw::c_char,
+ offset: {
+ $crate::assert_field_type!(
+ $struct_name,
+ $field_name,
+ $crate::qom::Owned<$crate::qdev::Clock> $(, num = $num)?
+ );
+ ::std::mem::offset_of!($struct_name, $field_name)
+ },
+ size: ::core::mem::size_of::<*const $crate::qdev::Clock>(),
+ flags: $crate::bindings::VMStateFlags(
+ $crate::bindings::VMStateFlags::VMS_STRUCT.0
+ | $crate::bindings::VMStateFlags::VMS_POINTER.0,
+ ),
+ vmsd: unsafe { ::core::ptr::addr_of!($crate::bindings::vmstate_clock) },
+ ..$crate::zeroable::Zeroable::ZERO
+ } $(.with_varray_flag_unchecked(
+ $crate::call_func_with_field!(
+ $crate::vmstate::vmstate_varray_flag,
+ $struct_name,
+ $num
+ )
+ )
+ $(.with_varray_multiply($factor))?)?
+ }};
+}
+
+/// Helper macro to declare a list of
+/// ([`VMStateField`](`crate::bindings::VMStateField`)) into a static and return
+/// a pointer to the array of values it created.
+#[macro_export]
+macro_rules! vmstate_fields {
+ ($($field:expr),*$(,)*) => {{
+ static _FIELDS: &[$crate::bindings::VMStateField] = &[
+ $($field),*,
+ $crate::bindings::VMStateField {
+ flags: $crate::bindings::VMStateFlags::VMS_END,
+ ..$crate::zeroable::Zeroable::ZERO
+ }
+ ];
+ _FIELDS.as_ptr()
+ }}
+}
+
+#[doc(alias = "VMSTATE_VALIDATE")]
+#[macro_export]
+macro_rules! vmstate_validate {
+ ($struct_name:ty, $test_name:expr, $test_fn:expr $(,)?) => {
+ $crate::bindings::VMStateField {
+ name: ::std::ffi::CStr::as_ptr($test_name),
+ field_exists: $crate::vmstate_exist_fn!($struct_name, $test_fn),
+ flags: $crate::bindings::VMStateFlags(
+ $crate::bindings::VMStateFlags::VMS_MUST_EXIST.0
+ | $crate::bindings::VMStateFlags::VMS_ARRAY.0,
+ ),
+ num: 0, // 0 elements: no data, only run test_fn callback
+ ..$crate::zeroable::Zeroable::ZERO
+ }
+ };
+}
+
+/// A transparent wrapper type for the `subsections` field of
+/// [`VMStateDescription`].
+///
+/// This is necessary to be able to declare subsection descriptions as statics,
+/// because the only way to implement `Sync` for a foreign type (and `*const`
+/// pointers are foreign types in Rust) is to create a wrapper struct and
+/// `unsafe impl Sync` for it.
+///
+/// This struct is used in the
+/// [`vm_state_subsections`](crate::vmstate_subsections) macro implementation.
+#[repr(transparent)]
+pub struct VMStateSubsectionsWrapper(pub &'static [*const crate::bindings::VMStateDescription]);
+
+unsafe impl Sync for VMStateSubsectionsWrapper {}
+
+/// Helper macro to declare a list of subsections ([`VMStateDescription`])
+/// into a static and return a pointer to the array of pointers it created.
+#[macro_export]
+macro_rules! vmstate_subsections {
+ ($($subsection:expr),*$(,)*) => {{
+ static _SUBSECTIONS: $crate::vmstate::VMStateSubsectionsWrapper = $crate::vmstate::VMStateSubsectionsWrapper(&[
+ $({
+ static _SUBSECTION: $crate::bindings::VMStateDescription = $subsection;
+ ::core::ptr::addr_of!(_SUBSECTION)
+ }),*,
+ ::core::ptr::null()
+ ]);
+ _SUBSECTIONS.0.as_ptr()
+ }}
+}
diff --git a/rust/qemu-api/src/zeroable.rs b/rust/qemu-api/src/zeroable.rs
new file mode 100644
index 0000000..d8239d0
--- /dev/null
+++ b/rust/qemu-api/src/zeroable.rs
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+//! Defines a trait for structs that can be safely initialized with zero bytes.
+
+/// Encapsulates the requirement that
+/// `MaybeUninit::<Self>::zeroed().assume_init()` does not cause undefined
+/// behavior.
+///
+/// # Safety
+///
+/// Do not add this trait to a type unless all-zeroes is a valid value for the
+/// type. In particular, raw pointers can be zero, but references and
+/// `NonNull<T>` cannot.
+pub unsafe trait Zeroable: Default {
+ /// Return a value of Self whose memory representation consists of all
+ /// zeroes, with the possible exclusion of padding bytes.
+ const ZERO: Self = unsafe { ::core::mem::MaybeUninit::<Self>::zeroed().assume_init() };
+}
+
+// bindgen does not derive Default here
+#[allow(clippy::derivable_impls)]
+impl Default for crate::bindings::VMStateFlags {
+ fn default() -> Self {
+ Self(0)
+ }
+}
+
+unsafe impl Zeroable for crate::bindings::Property__bindgen_ty_1 {}
+unsafe impl Zeroable for crate::bindings::Property {}
+unsafe impl Zeroable for crate::bindings::VMStateFlags {}
+unsafe impl Zeroable for crate::bindings::VMStateField {}
+unsafe impl Zeroable for crate::bindings::VMStateDescription {}
+unsafe impl Zeroable for crate::bindings::MemoryRegionOps__bindgen_ty_1 {}
+unsafe impl Zeroable for crate::bindings::MemoryRegionOps__bindgen_ty_2 {}
+unsafe impl Zeroable for crate::bindings::MemoryRegionOps {}
+unsafe impl Zeroable for crate::bindings::MemTxAttrs {}
+unsafe impl Zeroable for crate::bindings::CharBackend {}
diff --git a/rust/qemu-api/tests/tests.rs b/rust/qemu-api/tests/tests.rs
new file mode 100644
index 0000000..a658a49
--- /dev/null
+++ b/rust/qemu-api/tests/tests.rs
@@ -0,0 +1,180 @@
+// Copyright 2024, Linaro Limited
+// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+use std::{ffi::CStr, ptr::addr_of};
+
+use qemu_api::{
+ bindings::{module_call_init, module_init_type, qdev_prop_bool},
+ cell::{self, BqlCell},
+ declare_properties, define_property,
+ prelude::*,
+ qdev::{DeviceImpl, DeviceState, Property, ResettablePhasesImpl},
+ qom::{ObjectImpl, ParentField},
+ sysbus::SysBusDevice,
+ vmstate::VMStateDescription,
+ zeroable::Zeroable,
+};
+
+mod vmstate_tests;
+
+// Test that macros can compile.
+pub static VMSTATE: VMStateDescription = VMStateDescription {
+ name: c"name".as_ptr(),
+ unmigratable: true,
+ ..Zeroable::ZERO
+};
+
+#[repr(C)]
+#[derive(qemu_api_macros::Object)]
+pub struct DummyState {
+ parent: ParentField<DeviceState>,
+ migrate_clock: bool,
+}
+
+qom_isa!(DummyState: Object, DeviceState);
+
+pub struct DummyClass {
+ parent_class: <DeviceState as ObjectType>::Class,
+}
+
+impl DummyClass {
+ pub fn class_init<T: DeviceImpl>(self: &mut DummyClass) {
+ self.parent_class.class_init::<T>();
+ }
+}
+
+declare_properties! {
+ DUMMY_PROPERTIES,
+ define_property!(
+ c"migrate-clk",
+ DummyState,
+ migrate_clock,
+ unsafe { &qdev_prop_bool },
+ bool
+ ),
+}
+
+unsafe impl ObjectType for DummyState {
+ type Class = DummyClass;
+ const TYPE_NAME: &'static CStr = c"dummy";
+}
+
+impl ObjectImpl for DummyState {
+ type ParentType = DeviceState;
+ const ABSTRACT: bool = false;
+ const CLASS_INIT: fn(&mut DummyClass) = DummyClass::class_init::<Self>;
+}
+
+impl ResettablePhasesImpl for DummyState {}
+
+impl DeviceImpl for DummyState {
+ fn properties() -> &'static [Property] {
+ &DUMMY_PROPERTIES
+ }
+ fn vmsd() -> Option<&'static VMStateDescription> {
+ Some(&VMSTATE)
+ }
+}
+
+#[repr(C)]
+#[derive(qemu_api_macros::Object)]
+pub struct DummyChildState {
+ parent: ParentField<DummyState>,
+}
+
+qom_isa!(DummyChildState: Object, DeviceState, DummyState);
+
+pub struct DummyChildClass {
+ parent_class: <DummyState as ObjectType>::Class,
+}
+
+unsafe impl ObjectType for DummyChildState {
+ type Class = DummyChildClass;
+ const TYPE_NAME: &'static CStr = c"dummy_child";
+}
+
+impl ObjectImpl for DummyChildState {
+ type ParentType = DummyState;
+ const ABSTRACT: bool = false;
+ const CLASS_INIT: fn(&mut DummyChildClass) = DummyChildClass::class_init::<Self>;
+}
+
+impl ResettablePhasesImpl for DummyChildState {}
+impl DeviceImpl for DummyChildState {}
+
+impl DummyChildClass {
+ pub fn class_init<T: DeviceImpl>(self: &mut DummyChildClass) {
+ self.parent_class.class_init::<T>();
+ }
+}
+
+fn init_qom() {
+ static ONCE: BqlCell<bool> = BqlCell::new(false);
+
+ cell::bql_start_test();
+ if !ONCE.get() {
+ unsafe {
+ module_call_init(module_init_type::MODULE_INIT_QOM);
+ }
+ ONCE.set(true);
+ }
+}
+
+#[test]
+/// Create and immediately drop an instance.
+fn test_object_new() {
+ init_qom();
+ drop(DummyState::new());
+ drop(DummyChildState::new());
+}
+
+#[test]
+#[allow(clippy::redundant_clone)]
+/// Create, clone and then drop an instance.
+fn test_clone() {
+ init_qom();
+ let p = DummyState::new();
+ assert_eq!(p.clone().typename(), "dummy");
+ drop(p);
+}
+
+#[test]
+/// Try invoking a method on an object.
+fn test_typename() {
+ init_qom();
+ let p = DummyState::new();
+ assert_eq!(p.typename(), "dummy");
+}
+
+// a note on all "cast" tests: usually, especially for downcasts the desired
+// class would be placed on the right, for example:
+//
+// let sbd_ref = p.dynamic_cast::<SysBusDevice>();
+//
+// Here I am doing the opposite to check that the resulting type is correct.
+
+#[test]
+#[allow(clippy::shadow_unrelated)]
+/// Test casts on shared references.
+fn test_cast() {
+ init_qom();
+ let p = DummyState::new();
+ let p_ptr: *mut DummyState = p.as_mut_ptr();
+ let p_ref: &mut DummyState = unsafe { &mut *p_ptr };
+
+ let obj_ref: &Object = p_ref.upcast();
+ assert_eq!(addr_of!(*obj_ref), p_ptr.cast());
+
+ let sbd_ref: Option<&SysBusDevice> = obj_ref.dynamic_cast();
+ assert!(sbd_ref.is_none());
+
+ let dev_ref: Option<&DeviceState> = obj_ref.downcast();
+ assert_eq!(addr_of!(*dev_ref.unwrap()), p_ptr.cast());
+
+ // SAFETY: the cast is wrong, but the value is only used for comparison
+ unsafe {
+ let sbd_ref: &SysBusDevice = obj_ref.unsafe_cast();
+ assert_eq!(addr_of!(*sbd_ref), p_ptr.cast());
+ }
+}
diff --git a/rust/qemu-api/tests/vmstate_tests.rs b/rust/qemu-api/tests/vmstate_tests.rs
new file mode 100644
index 0000000..bded836
--- /dev/null
+++ b/rust/qemu-api/tests/vmstate_tests.rs
@@ -0,0 +1,505 @@
+// Copyright (C) 2025 Intel Corporation.
+// Author(s): Zhao Liu <zhao1.liu@intel.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+use std::{
+ ffi::{c_void, CStr},
+ mem::size_of,
+ ptr::NonNull,
+ slice,
+};
+
+use qemu_api::{
+ bindings::{
+ vmstate_info_bool, vmstate_info_int32, vmstate_info_int64, vmstate_info_int8,
+ vmstate_info_uint64, vmstate_info_uint8, vmstate_info_unused_buffer, VMStateFlags,
+ },
+ cell::{BqlCell, Opaque},
+ impl_vmstate_forward,
+ vmstate::{VMStateDescription, VMStateField},
+ vmstate_fields, vmstate_of, vmstate_struct, vmstate_unused, vmstate_validate,
+ zeroable::Zeroable,
+};
+
+const FOO_ARRAY_MAX: usize = 3;
+
+// =========================== Test VMSTATE_FOOA ===========================
+// Test the use cases of the vmstate macro, corresponding to the following C
+// macro variants:
+// * VMSTATE_FOOA:
+// - VMSTATE_U16
+// - VMSTATE_UNUSED
+// - VMSTATE_VARRAY_UINT16_UNSAFE
+// - VMSTATE_VARRAY_MULTIPLY
+#[repr(C)]
+#[derive(Default)]
+struct FooA {
+ arr: [u8; FOO_ARRAY_MAX],
+ num: u16,
+ arr_mul: [i8; FOO_ARRAY_MAX],
+ num_mul: u32,
+ elem: i8,
+}
+
+static VMSTATE_FOOA: VMStateDescription = VMStateDescription {
+ name: c"foo_a".as_ptr(),
+ version_id: 1,
+ minimum_version_id: 1,
+ fields: vmstate_fields! {
+ vmstate_of!(FooA, elem),
+ vmstate_unused!(size_of::<i64>()),
+ vmstate_of!(FooA, arr[0 .. num]).with_version_id(0),
+ vmstate_of!(FooA, arr_mul[0 .. num_mul * 16]),
+ },
+ ..Zeroable::ZERO
+};
+
+#[test]
+fn test_vmstate_uint16() {
+ let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) };
+
+ // 1st VMStateField ("elem") in VMSTATE_FOOA (corresponding to VMSTATE_UINT16)
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[0].name) }.to_bytes_with_nul(),
+ b"elem\0"
+ );
+ assert_eq!(foo_fields[0].offset, 16);
+ assert_eq!(foo_fields[0].num_offset, 0);
+ assert_eq!(foo_fields[0].info, unsafe { &vmstate_info_int8 });
+ assert_eq!(foo_fields[0].version_id, 0);
+ assert_eq!(foo_fields[0].size, 1);
+ assert_eq!(foo_fields[0].num, 0);
+ assert_eq!(foo_fields[0].flags, VMStateFlags::VMS_SINGLE);
+ assert!(foo_fields[0].vmsd.is_null());
+ assert!(foo_fields[0].field_exists.is_none());
+}
+
+#[test]
+fn test_vmstate_unused() {
+ let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) };
+
+ // 2nd VMStateField ("unused") in VMSTATE_FOOA (corresponding to VMSTATE_UNUSED)
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[1].name) }.to_bytes_with_nul(),
+ b"unused\0"
+ );
+ assert_eq!(foo_fields[1].offset, 0);
+ assert_eq!(foo_fields[1].num_offset, 0);
+ assert_eq!(foo_fields[1].info, unsafe { &vmstate_info_unused_buffer });
+ assert_eq!(foo_fields[1].version_id, 0);
+ assert_eq!(foo_fields[1].size, 8);
+ assert_eq!(foo_fields[1].num, 0);
+ assert_eq!(foo_fields[1].flags, VMStateFlags::VMS_BUFFER);
+ assert!(foo_fields[1].vmsd.is_null());
+ assert!(foo_fields[1].field_exists.is_none());
+}
+
+#[test]
+fn test_vmstate_varray_uint16_unsafe() {
+ let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) };
+
+ // 3rd VMStateField ("arr") in VMSTATE_FOOA (corresponding to
+ // VMSTATE_VARRAY_UINT16_UNSAFE)
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[2].name) }.to_bytes_with_nul(),
+ b"arr\0"
+ );
+ assert_eq!(foo_fields[2].offset, 0);
+ assert_eq!(foo_fields[2].num_offset, 4);
+ assert_eq!(foo_fields[2].info, unsafe { &vmstate_info_uint8 });
+ assert_eq!(foo_fields[2].version_id, 0);
+ assert_eq!(foo_fields[2].size, 1);
+ assert_eq!(foo_fields[2].num, 0);
+ assert_eq!(foo_fields[2].flags, VMStateFlags::VMS_VARRAY_UINT16);
+ assert!(foo_fields[2].vmsd.is_null());
+ assert!(foo_fields[2].field_exists.is_none());
+}
+
+#[test]
+fn test_vmstate_varray_multiply() {
+ let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) };
+
+ // 4th VMStateField ("arr_mul") in VMSTATE_FOOA (corresponding to
+ // VMSTATE_VARRAY_MULTIPLY)
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[3].name) }.to_bytes_with_nul(),
+ b"arr_mul\0"
+ );
+ assert_eq!(foo_fields[3].offset, 6);
+ assert_eq!(foo_fields[3].num_offset, 12);
+ assert_eq!(foo_fields[3].info, unsafe { &vmstate_info_int8 });
+ assert_eq!(foo_fields[3].version_id, 0);
+ assert_eq!(foo_fields[3].size, 1);
+ assert_eq!(foo_fields[3].num, 16);
+ assert_eq!(
+ foo_fields[3].flags.0,
+ VMStateFlags::VMS_VARRAY_UINT32.0 | VMStateFlags::VMS_MULTIPLY_ELEMENTS.0
+ );
+ assert!(foo_fields[3].vmsd.is_null());
+ assert!(foo_fields[3].field_exists.is_none());
+
+ // The last VMStateField in VMSTATE_FOOA.
+ assert_eq!(foo_fields[4].flags, VMStateFlags::VMS_END);
+}
+
+// =========================== Test VMSTATE_FOOB ===========================
+// Test the use cases of the vmstate macro, corresponding to the following C
+// macro variants:
+// * VMSTATE_FOOB:
+// - VMSTATE_BOOL_V
+// - VMSTATE_U64
+// - VMSTATE_STRUCT_VARRAY_UINT8
+// - (no C version) MULTIPLY variant of VMSTATE_STRUCT_VARRAY_UINT32
+// - VMSTATE_ARRAY
+// - VMSTATE_STRUCT_VARRAY_UINT8 with BqlCell wrapper & test_fn
+#[repr(C)]
+#[derive(Default)]
+struct FooB {
+ arr_a: [FooA; FOO_ARRAY_MAX],
+ num_a: u8,
+ arr_a_mul: [FooA; FOO_ARRAY_MAX],
+ num_a_mul: u32,
+ wrap: BqlCell<u64>,
+ val: bool,
+ // FIXME: Use Timer array. Now we can't since it's hard to link savevm.c to test.
+ arr_i64: [i64; FOO_ARRAY_MAX],
+ arr_a_wrap: [FooA; FOO_ARRAY_MAX],
+ num_a_wrap: BqlCell<u32>,
+}
+
+fn validate_foob(_state: &FooB, _version_id: u8) -> bool {
+ true
+}
+
+static VMSTATE_FOOB: VMStateDescription = VMStateDescription {
+ name: c"foo_b".as_ptr(),
+ version_id: 2,
+ minimum_version_id: 1,
+ fields: vmstate_fields! {
+ vmstate_of!(FooB, val).with_version_id(2),
+ vmstate_of!(FooB, wrap),
+ vmstate_struct!(FooB, arr_a[0 .. num_a], &VMSTATE_FOOA, FooA).with_version_id(1),
+ vmstate_struct!(FooB, arr_a_mul[0 .. num_a_mul * 32], &VMSTATE_FOOA, FooA).with_version_id(2),
+ vmstate_of!(FooB, arr_i64),
+ vmstate_struct!(FooB, arr_a_wrap[0 .. num_a_wrap], &VMSTATE_FOOA, FooA, validate_foob),
+ },
+ ..Zeroable::ZERO
+};
+
+#[test]
+fn test_vmstate_bool_v() {
+ let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) };
+
+ // 1st VMStateField ("val") in VMSTATE_FOOB (corresponding to VMSTATE_BOOL_V)
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[0].name) }.to_bytes_with_nul(),
+ b"val\0"
+ );
+ assert_eq!(foo_fields[0].offset, 136);
+ assert_eq!(foo_fields[0].num_offset, 0);
+ assert_eq!(foo_fields[0].info, unsafe { &vmstate_info_bool });
+ assert_eq!(foo_fields[0].version_id, 2);
+ assert_eq!(foo_fields[0].size, 1);
+ assert_eq!(foo_fields[0].num, 0);
+ assert_eq!(foo_fields[0].flags, VMStateFlags::VMS_SINGLE);
+ assert!(foo_fields[0].vmsd.is_null());
+ assert!(foo_fields[0].field_exists.is_none());
+}
+
+#[test]
+fn test_vmstate_uint64() {
+ let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) };
+
+ // 2nd VMStateField ("wrap") in VMSTATE_FOOB (corresponding to VMSTATE_U64)
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[1].name) }.to_bytes_with_nul(),
+ b"wrap\0"
+ );
+ assert_eq!(foo_fields[1].offset, 128);
+ assert_eq!(foo_fields[1].num_offset, 0);
+ assert_eq!(foo_fields[1].info, unsafe { &vmstate_info_uint64 });
+ assert_eq!(foo_fields[1].version_id, 0);
+ assert_eq!(foo_fields[1].size, 8);
+ assert_eq!(foo_fields[1].num, 0);
+ assert_eq!(foo_fields[1].flags, VMStateFlags::VMS_SINGLE);
+ assert!(foo_fields[1].vmsd.is_null());
+ assert!(foo_fields[1].field_exists.is_none());
+}
+
+#[test]
+fn test_vmstate_struct_varray_uint8() {
+ let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) };
+
+ // 3rd VMStateField ("arr_a") in VMSTATE_FOOB (corresponding to
+ // VMSTATE_STRUCT_VARRAY_UINT8)
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[2].name) }.to_bytes_with_nul(),
+ b"arr_a\0"
+ );
+ assert_eq!(foo_fields[2].offset, 0);
+ assert_eq!(foo_fields[2].num_offset, 60);
+ assert!(foo_fields[2].info.is_null()); // VMSTATE_STRUCT_VARRAY_UINT8 doesn't set info field.
+ assert_eq!(foo_fields[2].version_id, 1);
+ assert_eq!(foo_fields[2].size, 20);
+ assert_eq!(foo_fields[2].num, 0);
+ assert_eq!(
+ foo_fields[2].flags.0,
+ VMStateFlags::VMS_STRUCT.0 | VMStateFlags::VMS_VARRAY_UINT8.0
+ );
+ assert_eq!(foo_fields[2].vmsd, &VMSTATE_FOOA);
+ assert!(foo_fields[2].field_exists.is_none());
+}
+
+#[test]
+fn test_vmstate_struct_varray_uint32_multiply() {
+ let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) };
+
+ // 4th VMStateField ("arr_a_mul") in VMSTATE_FOOB (corresponding to
+ // (no C version) MULTIPLY variant of VMSTATE_STRUCT_VARRAY_UINT32)
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[3].name) }.to_bytes_with_nul(),
+ b"arr_a_mul\0"
+ );
+ assert_eq!(foo_fields[3].offset, 64);
+ assert_eq!(foo_fields[3].num_offset, 124);
+ assert!(foo_fields[3].info.is_null()); // VMSTATE_STRUCT_VARRAY_UINT8 doesn't set info field.
+ assert_eq!(foo_fields[3].version_id, 2);
+ assert_eq!(foo_fields[3].size, 20);
+ assert_eq!(foo_fields[3].num, 32);
+ assert_eq!(
+ foo_fields[3].flags.0,
+ VMStateFlags::VMS_STRUCT.0
+ | VMStateFlags::VMS_VARRAY_UINT32.0
+ | VMStateFlags::VMS_MULTIPLY_ELEMENTS.0
+ );
+ assert_eq!(foo_fields[3].vmsd, &VMSTATE_FOOA);
+ assert!(foo_fields[3].field_exists.is_none());
+}
+
+#[test]
+fn test_vmstate_macro_array() {
+ let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) };
+
+ // 5th VMStateField ("arr_i64") in VMSTATE_FOOB (corresponding to
+ // VMSTATE_ARRAY)
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[4].name) }.to_bytes_with_nul(),
+ b"arr_i64\0"
+ );
+ assert_eq!(foo_fields[4].offset, 144);
+ assert_eq!(foo_fields[4].num_offset, 0);
+ assert_eq!(foo_fields[4].info, unsafe { &vmstate_info_int64 });
+ assert_eq!(foo_fields[4].version_id, 0);
+ assert_eq!(foo_fields[4].size, 8);
+ assert_eq!(foo_fields[4].num, FOO_ARRAY_MAX as i32);
+ assert_eq!(foo_fields[4].flags, VMStateFlags::VMS_ARRAY);
+ assert!(foo_fields[4].vmsd.is_null());
+ assert!(foo_fields[4].field_exists.is_none());
+}
+
+#[test]
+fn test_vmstate_struct_varray_uint8_wrapper() {
+ let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) };
+ let mut foo_b: FooB = Default::default();
+ let foo_b_p = std::ptr::addr_of_mut!(foo_b).cast::<c_void>();
+
+ // 6th VMStateField ("arr_a_wrap") in VMSTATE_FOOB (corresponding to
+ // VMSTATE_STRUCT_VARRAY_UINT8). Other fields are checked in
+ // test_vmstate_struct_varray_uint8.
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[5].name) }.to_bytes_with_nul(),
+ b"arr_a_wrap\0"
+ );
+ assert_eq!(foo_fields[5].num_offset, 228);
+ assert!(unsafe { foo_fields[5].field_exists.unwrap()(foo_b_p, 0) });
+
+ // The last VMStateField in VMSTATE_FOOB.
+ assert_eq!(foo_fields[6].flags, VMStateFlags::VMS_END);
+}
+
+// =========================== Test VMSTATE_FOOC ===========================
+// Test the use cases of the vmstate macro, corresponding to the following C
+// macro variants:
+// * VMSTATE_FOOC:
+// - VMSTATE_POINTER
+// - VMSTATE_ARRAY_OF_POINTER
+struct FooCWrapper([Opaque<*mut u8>; FOO_ARRAY_MAX]); // Though Opaque<> array is almost impossible.
+
+impl_vmstate_forward!(FooCWrapper);
+
+#[repr(C)]
+struct FooC {
+ ptr: *const i32,
+ ptr_a: NonNull<FooA>,
+ arr_ptr: [Box<u8>; FOO_ARRAY_MAX],
+ arr_ptr_wrap: FooCWrapper,
+}
+
+static VMSTATE_FOOC: VMStateDescription = VMStateDescription {
+ name: c"foo_c".as_ptr(),
+ version_id: 3,
+ minimum_version_id: 1,
+ fields: vmstate_fields! {
+ vmstate_of!(FooC, ptr).with_version_id(2),
+ // FIXME: Currently vmstate_struct doesn't support the pointer to structure.
+ // VMSTATE_STRUCT_POINTER: vmstate_struct!(FooC, ptr_a, VMSTATE_FOOA, NonNull<FooA>)
+ vmstate_unused!(size_of::<NonNull<FooA>>()),
+ vmstate_of!(FooC, arr_ptr),
+ vmstate_of!(FooC, arr_ptr_wrap),
+ },
+ ..Zeroable::ZERO
+};
+
+const PTR_SIZE: usize = size_of::<*mut ()>();
+
+#[test]
+fn test_vmstate_pointer() {
+ let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOC.fields, 6) };
+
+ // 1st VMStateField ("ptr") in VMSTATE_FOOC (corresponding to VMSTATE_POINTER)
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[0].name) }.to_bytes_with_nul(),
+ b"ptr\0"
+ );
+ assert_eq!(foo_fields[0].offset, 0);
+ assert_eq!(foo_fields[0].num_offset, 0);
+ assert_eq!(foo_fields[0].info, unsafe { &vmstate_info_int32 });
+ assert_eq!(foo_fields[0].version_id, 2);
+ assert_eq!(foo_fields[0].size, 4);
+ assert_eq!(foo_fields[0].num, 0);
+ assert_eq!(
+ foo_fields[0].flags.0,
+ VMStateFlags::VMS_SINGLE.0 | VMStateFlags::VMS_POINTER.0
+ );
+ assert!(foo_fields[0].vmsd.is_null());
+ assert!(foo_fields[0].field_exists.is_none());
+}
+
+#[test]
+fn test_vmstate_macro_array_of_pointer() {
+ let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOC.fields, 6) };
+
+ // 3rd VMStateField ("arr_ptr") in VMSTATE_FOOC (corresponding to
+ // VMSTATE_ARRAY_OF_POINTER)
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[2].name) }.to_bytes_with_nul(),
+ b"arr_ptr\0"
+ );
+ assert_eq!(foo_fields[2].offset, 2 * PTR_SIZE);
+ assert_eq!(foo_fields[2].num_offset, 0);
+ assert_eq!(foo_fields[2].info, unsafe { &vmstate_info_uint8 });
+ assert_eq!(foo_fields[2].version_id, 0);
+ assert_eq!(foo_fields[2].size, PTR_SIZE);
+ assert_eq!(foo_fields[2].num, FOO_ARRAY_MAX as i32);
+ assert_eq!(
+ foo_fields[2].flags.0,
+ VMStateFlags::VMS_ARRAY.0 | VMStateFlags::VMS_ARRAY_OF_POINTER.0
+ );
+ assert!(foo_fields[2].vmsd.is_null());
+ assert!(foo_fields[2].field_exists.is_none());
+}
+
+#[test]
+fn test_vmstate_macro_array_of_pointer_wrapped() {
+ let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOC.fields, 6) };
+
+ // 4th VMStateField ("arr_ptr_wrap") in VMSTATE_FOOC (corresponding to
+ // VMSTATE_ARRAY_OF_POINTER)
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[3].name) }.to_bytes_with_nul(),
+ b"arr_ptr_wrap\0"
+ );
+ assert_eq!(foo_fields[3].offset, (FOO_ARRAY_MAX + 2) * PTR_SIZE);
+ assert_eq!(foo_fields[3].num_offset, 0);
+ assert_eq!(foo_fields[3].info, unsafe { &vmstate_info_uint8 });
+ assert_eq!(foo_fields[3].version_id, 0);
+ assert_eq!(foo_fields[3].size, PTR_SIZE);
+ assert_eq!(foo_fields[3].num, FOO_ARRAY_MAX as i32);
+ assert_eq!(
+ foo_fields[3].flags.0,
+ VMStateFlags::VMS_ARRAY.0 | VMStateFlags::VMS_ARRAY_OF_POINTER.0
+ );
+ assert!(foo_fields[3].vmsd.is_null());
+ assert!(foo_fields[3].field_exists.is_none());
+
+ // The last VMStateField in VMSTATE_FOOC.
+ assert_eq!(foo_fields[4].flags, VMStateFlags::VMS_END);
+}
+
+// =========================== Test VMSTATE_FOOD ===========================
+// Test the use cases of the vmstate macro, corresponding to the following C
+// macro variants:
+// * VMSTATE_FOOD:
+// - VMSTATE_VALIDATE
+
+// Add more member fields when vmstate_of/vmstate_struct support "test"
+// parameter.
+struct FooD;
+
+impl FooD {
+ fn validate_food_0(&self, _version_id: u8) -> bool {
+ true
+ }
+
+ fn validate_food_1(_state: &FooD, _version_id: u8) -> bool {
+ false
+ }
+}
+
+fn validate_food_2(_state: &FooD, _version_id: u8) -> bool {
+ true
+}
+
+static VMSTATE_FOOD: VMStateDescription = VMStateDescription {
+ name: c"foo_d".as_ptr(),
+ version_id: 3,
+ minimum_version_id: 1,
+ fields: vmstate_fields! {
+ vmstate_validate!(FooD, c"foo_d_0", FooD::validate_food_0),
+ vmstate_validate!(FooD, c"foo_d_1", FooD::validate_food_1),
+ vmstate_validate!(FooD, c"foo_d_2", validate_food_2),
+ },
+ ..Zeroable::ZERO
+};
+
+#[test]
+fn test_vmstate_validate() {
+ let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOD.fields, 4) };
+ let mut foo_d = FooD;
+ let foo_d_p = std::ptr::addr_of_mut!(foo_d).cast::<c_void>();
+
+ // 1st VMStateField in VMSTATE_FOOD
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[0].name) }.to_bytes_with_nul(),
+ b"foo_d_0\0"
+ );
+ assert_eq!(foo_fields[0].offset, 0);
+ assert_eq!(foo_fields[0].num_offset, 0);
+ assert!(foo_fields[0].info.is_null());
+ assert_eq!(foo_fields[0].version_id, 0);
+ assert_eq!(foo_fields[0].size, 0);
+ assert_eq!(foo_fields[0].num, 0);
+ assert_eq!(
+ foo_fields[0].flags.0,
+ VMStateFlags::VMS_ARRAY.0 | VMStateFlags::VMS_MUST_EXIST.0
+ );
+ assert!(foo_fields[0].vmsd.is_null());
+ assert!(unsafe { foo_fields[0].field_exists.unwrap()(foo_d_p, 0) });
+
+ // 2nd VMStateField in VMSTATE_FOOD
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[1].name) }.to_bytes_with_nul(),
+ b"foo_d_1\0"
+ );
+ assert!(!unsafe { foo_fields[1].field_exists.unwrap()(foo_d_p, 1) });
+
+ // 3rd VMStateField in VMSTATE_FOOD
+ assert_eq!(
+ unsafe { CStr::from_ptr(foo_fields[2].name) }.to_bytes_with_nul(),
+ b"foo_d_2\0"
+ );
+ assert!(unsafe { foo_fields[2].field_exists.unwrap()(foo_d_p, 2) });
+
+ // The last VMStateField in VMSTATE_FOOD.
+ assert_eq!(foo_fields[3].flags, VMStateFlags::VMS_END);
+}
diff --git a/rust/qemu-api/wrapper.h b/rust/qemu-api/wrapper.h
new file mode 100644
index 0000000..15a1b19
--- /dev/null
+++ b/rust/qemu-api/wrapper.h
@@ -0,0 +1,71 @@
+/*
+ * QEMU System Emulator
+ *
+ * Copyright (c) 2024 Linaro Ltd.
+ *
+ * Authors: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+
+/*
+ * This header file is meant to be used as input to the `bindgen` application
+ * in order to generate C FFI compatible Rust bindings.
+ */
+
+#ifndef __CLANG_STDATOMIC_H
+#define __CLANG_STDATOMIC_H
+/*
+ * Fix potential missing stdatomic.h error in case bindgen does not insert the
+ * correct libclang header paths on its own. We do not use stdatomic.h symbols
+ * in QEMU code, so it's fine to declare dummy types instead.
+ */
+typedef enum memory_order {
+ memory_order_relaxed,
+ memory_order_consume,
+ memory_order_acquire,
+ memory_order_release,
+ memory_order_acq_rel,
+ memory_order_seq_cst,
+} memory_order;
+#endif /* __CLANG_STDATOMIC_H */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/log-for-trace.h"
+#include "qemu/module.h"
+#include "qemu-io.h"
+#include "system/system.h"
+#include "hw/sysbus.h"
+#include "system/memory.h"
+#include "chardev/char-fe.h"
+#include "hw/clock.h"
+#include "hw/qdev-clock.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
+#include "hw/irq.h"
+#include "qapi/error.h"
+#include "qapi/error-internal.h"
+#include "migration/vmstate.h"
+#include "chardev/char-serial.h"
+#include "exec/memattrs.h"
+#include "qemu/timer.h"
+#include "system/address-spaces.h"
+#include "hw/char/pl011.h"
diff --git a/rust/rustfmt.toml b/rust/rustfmt.toml
new file mode 100644
index 0000000..ebecb99
--- /dev/null
+++ b/rust/rustfmt.toml
@@ -0,0 +1,7 @@
+edition = "2021"
+format_generated_files = false
+format_code_in_doc_comments = true
+format_strings = true
+imports_granularity = "Crate"
+group_imports = "StdExternalCrate"
+wrap_comments = true
diff --git a/scripts/analyze-inclusions b/scripts/analyze-inclusions
index b6280f2..d2c5666 100644
--- a/scripts/analyze-inclusions
+++ b/scripts/analyze-inclusions
@@ -53,7 +53,7 @@ echo $(grep_include -F 'trace/generated-tracers.h') files include generated-trac
echo $(grep_include -F 'qapi/error.h') files include qapi/error.h
echo $(grep_include -F 'qom/object.h') files include qom/object.h
echo $(grep_include -F 'block/aio.h') files include block/aio.h
-echo $(grep_include -F 'exec/memory.h') files include exec/memory.h
+echo $(grep_include -F 'system/memory.h') files include system/memory.h
echo $(grep_include -F 'fpu/softfloat.h') files include fpu/softfloat.h
echo $(grep_include -F 'qemu/bswap.h') files include qemu/bswap.h
echo
diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
index 8a254a5..67631ac 100755
--- a/scripts/analyze-migration.py
+++ b/scripts/analyze-migration.py
@@ -65,6 +65,9 @@ class MigrationFile(object):
def tell(self):
return self.file.tell()
+ def seek(self, a, b):
+ return self.file.seek(a, b)
+
# The VMSD description is at the end of the file, after EOF. Look for
# the last NULL byte, then for the beginning brace of JSON.
def read_migration_debug_json(self):
@@ -272,11 +275,24 @@ class S390StorageAttributes(object):
self.section_key = section_key
def read(self):
+ pos = 0
while True:
addr_flags = self.file.read64()
flags = addr_flags & 0xfff
- if (flags & (self.STATTR_FLAG_DONE | self.STATTR_FLAG_EOS)):
+
+ if flags & self.STATTR_FLAG_DONE:
+ pos = self.file.tell()
+ continue
+ elif flags & self.STATTR_FLAG_EOS:
return
+ else:
+ # No EOS came after DONE, that's OK, but rewind the
+ # stream because this is not our data.
+ if pos:
+ self.file.seek(pos, os.SEEK_SET)
+ return
+ raise Exception("Unknown flags %x", flags)
+
if (flags & self.STATTR_FLAG_ERROR):
raise Exception("Error in migration stream")
count = self.file.read64()
@@ -401,6 +417,28 @@ class VMSDFieldIntLE(VMSDFieldInt):
super(VMSDFieldIntLE, self).__init__(desc, file)
self.dtype = '<i%d' % self.size
+class VMSDFieldNull(VMSDFieldGeneric):
+ NULL_PTR_MARKER = b'0'
+
+ def __init__(self, desc, file):
+ super(VMSDFieldNull, self).__init__(desc, file)
+
+ def __repr__(self):
+ # A NULL pointer is encoded in the stream as a '0' to
+ # disambiguate from a mere 0x0 value and avoid consumers
+ # trying to follow the NULL pointer. Displaying '0', 0x30 or
+ # 0x0 when analyzing the JSON debug stream could become
+ # confusing, so use an explicit term instead.
+ return "nullptr"
+
+ def __str__(self):
+ return self.__repr__()
+
+ def read(self):
+ super(VMSDFieldNull, self).read()
+ assert(self.data == self.NULL_PTR_MARKER)
+ return self.data
+
class VMSDFieldBool(VMSDFieldGeneric):
def __init__(self, desc, file):
super(VMSDFieldBool, self).__init__(desc, file)
@@ -429,6 +467,9 @@ class VMSDFieldStruct(VMSDFieldGeneric):
super(VMSDFieldStruct, self).__init__(desc, file)
self.data = collections.OrderedDict()
+ if 'fields' not in self.desc['struct']:
+ raise Exception("No fields in struct. VMSD:\n%s" % self.desc)
+
# When we see compressed array elements, unfold them here
new_fields = []
for field in self.desc['struct']['fields']:
@@ -461,15 +502,25 @@ class VMSDFieldStruct(VMSDFieldGeneric):
field['data'] = reader(field, self.file)
field['data'].read()
- if 'index' in field:
- if field['name'] not in self.data:
- self.data[field['name']] = []
- a = self.data[field['name']]
- if len(a) != int(field['index']):
- raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
- a.append(field['data'])
+ fname = field['name']
+ fdata = field['data']
+
+ # The field could be:
+ # i) a single data entry, e.g. uint64
+ # ii) an array, indicated by it containing the 'index' key
+ #
+ # However, the overall data after parsing the whole
+ # stream, could be a mix of arrays and single data fields,
+ # all sharing the same field name due to how QEMU breaks
+ # up arrays with NULL pointers into multiple compressed
+ # array segments.
+ if fname not in self.data:
+ self.data[fname] = fdata
+ elif type(self.data[fname]) == list:
+ self.data[fname].append(fdata)
else:
- self.data[field['name']] = field['data']
+ tmp = self.data[fname]
+ self.data[fname] = [tmp, fdata]
if 'subsections' in self.desc['struct']:
for subsection in self.desc['struct']['subsections']:
@@ -477,6 +528,10 @@ class VMSDFieldStruct(VMSDFieldGeneric):
raise Exception("Subsection %s not found at offset %x" % ( subsection['vmsd_name'], self.file.tell()))
name = self.file.readstr()
version_id = self.file.read32()
+
+ if not subsection:
+ raise Exception("Empty description for subsection: %s" % name)
+
self.data[name] = VMSDSection(self.file, version_id, subsection, (name, 0))
self.data[name].read()
@@ -535,6 +590,7 @@ vmsd_field_readers = {
"bitmap" : VMSDFieldGeneric,
"struct" : VMSDFieldStruct,
"capability": VMSDFieldCap,
+ "nullptr": VMSDFieldNull,
"unknown" : VMSDFieldGeneric,
}
@@ -564,7 +620,9 @@ class MigrationDump(object):
QEMU_VM_SUBSECTION = 0x05
QEMU_VM_VMDESCRIPTION = 0x06
QEMU_VM_CONFIGURATION = 0x07
+ QEMU_VM_COMMAND = 0x08
QEMU_VM_SECTION_FOOTER= 0x7e
+ QEMU_MIG_CMD_SWITCHOVER_START = 0x0b
def __init__(self, filename):
self.section_classes = {
@@ -574,10 +632,13 @@ class MigrationDump(object):
}
self.filename = filename
self.vmsd_desc = None
+ self.vmsd_json = ""
- def read(self, desc_only = False, dump_memory = False, write_memory = False):
+ def read(self, desc_only = False, dump_memory = False,
+ write_memory = False):
# Read in the whole file
file = MigrationFile(self.filename)
+ self.vmsd_json = file.read_migration_debug_json()
# File magic
data = file.read32()
@@ -626,6 +687,15 @@ class MigrationDump(object):
elif section_type == self.QEMU_VM_SECTION_PART or section_type == self.QEMU_VM_SECTION_END:
section_id = file.read32()
self.sections[section_id].read()
+ elif section_type == self.QEMU_VM_COMMAND:
+ command_type = file.read16()
+ command_data_len = file.read16()
+ if command_type != self.QEMU_MIG_CMD_SWITCHOVER_START:
+ raise Exception("Unknown QEMU_VM_COMMAND: %x" %
+ (command_type))
+ if command_data_len != 0:
+ raise Exception("Invalid SWITCHOVER_START length: %x" %
+ (command_data_len))
elif section_type == self.QEMU_VM_SECTION_FOOTER:
read_section_id = file.read32()
if read_section_id != section_id:
@@ -635,9 +705,11 @@ class MigrationDump(object):
file.close()
def load_vmsd_json(self, file):
- vmsd_json = file.read_migration_debug_json()
- self.vmsd_desc = json.loads(vmsd_json, object_pairs_hook=collections.OrderedDict)
+ self.vmsd_desc = json.loads(self.vmsd_json,
+ object_pairs_hook=collections.OrderedDict)
for device in self.vmsd_desc['devices']:
+ if 'fields' not in device:
+ raise Exception("vmstate for device %s has no fields" % device['name'])
key = (device['name'], device['instance_id'])
value = ( VMSDSection, device )
self.section_classes[key] = value
@@ -666,31 +738,34 @@ args = parser.parse_args()
jsonenc = JSONEncoder(indent=4, separators=(',', ': '))
-if args.extract:
- dump = MigrationDump(args.file)
+if not any([args.extract, args.dump == "state", args.dump == "desc"]):
+ raise Exception("Please specify either -x, -d state or -d desc")
- dump.read(desc_only = True)
- print("desc.json")
- f = open("desc.json", "w")
- f.truncate()
- f.write(jsonenc.encode(dump.vmsd_desc))
- f.close()
-
- dump.read(write_memory = True)
- dict = dump.getDict()
- print("state.json")
- f = open("state.json", "w")
- f.truncate()
- f.write(jsonenc.encode(dict))
- f.close()
-elif args.dump == "state":
+try:
dump = MigrationDump(args.file)
- dump.read(dump_memory = args.memory)
- dict = dump.getDict()
- print(jsonenc.encode(dict))
-elif args.dump == "desc":
- dump = MigrationDump(args.file)
- dump.read(desc_only = True)
- print(jsonenc.encode(dump.vmsd_desc))
-else:
- raise Exception("Please specify either -x, -d state or -d desc")
+
+ if args.extract:
+ dump.read(desc_only = True)
+
+ print("desc.json")
+ f = open("desc.json", "w")
+ f.truncate()
+ f.write(jsonenc.encode(dump.vmsd_desc))
+ f.close()
+
+ dump.read(write_memory = True)
+ dict = dump.getDict()
+ print("state.json")
+ f = open("state.json", "w")
+ f.truncate()
+ f.write(jsonenc.encode(dict))
+ f.close()
+ elif args.dump == "state":
+ dump.read(dump_memory = args.memory)
+ dict = dump.getDict()
+ print(jsonenc.encode(dict))
+ elif args.dump == "desc":
+ dump.read(desc_only = True)
+ print(jsonenc.encode(dump.vmsd_desc))
+except Exception:
+ raise Exception("Full JSON dump:\n%s", dump.vmsd_json)
diff --git a/scripts/archive-source.sh b/scripts/archive-source.sh
index 65af806..035828c 100755
--- a/scripts/archive-source.sh
+++ b/scripts/archive-source.sh
@@ -26,7 +26,12 @@ sub_file="${sub_tdir}/submodule.tar"
# independent of what the developer currently has initialized
# in their checkout, because the build environment is completely
# different to the host OS.
-subprojects="keycodemapdb libvfio-user berkeley-softfloat-3 berkeley-testfloat-3"
+subprojects="keycodemapdb libvfio-user berkeley-softfloat-3
+ berkeley-testfloat-3 anyhow-1-rs arbitrary-int-1-rs bilge-0.2-rs
+ bilge-impl-0.2-rs either-1-rs foreign-0.3-rs itertools-0.11-rs
+ libc-0.2-rs proc-macro2-1-rs
+ proc-macro-error-1-rs proc-macro-error-attr-1-rs quote-1-rs
+ syn-2-rs unicode-ident-1-rs"
sub_deinit=""
function cleanup() {
@@ -48,13 +53,34 @@ function tree_ish() {
echo "$retval"
}
+function subproject_dir() {
+ if test ! -f "subprojects/$1.wrap"; then
+ error "scripts/archive-source.sh should only process wrap subprojects"
+ fi
+
+ # Print the directory key of the wrap file, defaulting to the
+ # subproject name. The wrap file is in ini format and should
+ # have a single section only. There should be only one section
+ # named "[wrap-*]", which helps keeping the script simple.
+ local dir
+ dir=$(sed -n \
+ -e '/^\[wrap-[a-z][a-z]*\]$/,/^\[/{' \
+ -e '/^directory *= */!b' \
+ -e 's///p' \
+ -e 'q' \
+ -e '}' \
+ "subprojects/$1.wrap")
+
+ echo "${dir:-$1}"
+}
+
git archive --format tar "$(tree_ish)" > "$tar_file"
test $? -ne 0 && error "failed to archive qemu"
for sp in $subprojects; do
meson subprojects download $sp
test $? -ne 0 && error "failed to download subproject $sp"
- tar --append --file "$tar_file" --exclude=.git subprojects/$sp
+ tar --append --file "$tar_file" --exclude=.git subprojects/"$(subproject_dir $sp)"
test $? -ne 0 && error "failed to append subproject $sp to $tar_file"
done
exit 0
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index ff373a7..833f20f 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -365,6 +365,18 @@ our @typeList = (
qr{guintptr},
);
+# Match text found in common license boilerplate comments:
+# for new files the SPDX-License-Identifier line is sufficient.
+our @LICENSE_BOILERPLATE = (
+ "licensed under the terms of the GNU GPL",
+ "under the terms of the GNU General Public License",
+ "under the terms of the GNU Lesser General Public",
+ "Permission is hereby granted, free of charge",
+ "GNU GPL, version 2 or later",
+ "See the COPYING file"
+);
+our $LICENSE_BOILERPLATE_RE = join("|", @LICENSE_BOILERPLATE);
+
# Load common spelling mistakes and build regular expression list.
my $misspellings;
my %spelling_fix;
@@ -1330,26 +1342,179 @@ sub WARN {
}
}
-# According to tests/qtest/bios-tables-test.c: do not
-# change expected file in the same commit with adding test
-sub checkfilename {
- my ($name, $acpi_testexpected, $acpi_nontestexpected) = @_;
-
- # Note: shell script that rebuilds the expected files is in the same
- # directory as files themselves.
- # Note: allowed diff list can be changed both when changing expected
- # files and when changing tests.
- if ($name =~ m#^tests/data/acpi/# and not $name =~ m#^\.sh$#) {
- $$acpi_testexpected = $name;
- } elsif ($name !~ m#^tests/qtest/bios-tables-test-allowed-diff.h$#) {
- $$acpi_nontestexpected = $name;
+sub checkspdx {
+ my ($file, $expr) = @_;
+
+ # Imported Linux headers probably have SPDX tags, but if they
+ # don't we're not requiring contributors to fix this, as these
+ # files are not expected to be modified locally in QEMU.
+ # Also don't accidentally detect own checking code.
+ if ($file =~ m,include/standard-headers, ||
+ $file =~ m,linux-headers, ||
+ $file =~ m,checkpatch.pl,) {
+ return;
+ }
+
+ my $origexpr = $expr;
+
+ # Flatten sub-expressions
+ $expr =~ s/\(|\)/ /g;
+ $expr =~ s/OR|AND/ /g;
+
+ # Merge WITH exceptions to the license
+ $expr =~ s/\s+WITH\s+/-WITH-/g;
+
+ # Cull more leading/trailing whitespace
+ $expr =~ s/^\s*//g;
+ $expr =~ s/\s*$//g;
+
+ my @bits = split / +/, $expr;
+
+ my $prefer = "GPL-2.0-or-later";
+ my @valid = qw(
+ GPL-2.0-only
+ LGPL-2.1-only
+ LGPL-2.1-or-later
+ BSD-2-Clause
+ BSD-3-Clause
+ MIT
+ );
+
+ my $nonpreferred = 0;
+ my @unknown = ();
+ foreach my $bit (@bits) {
+ if ($bit eq $prefer) {
+ next;
+ }
+ if (grep /^$bit$/, @valid) {
+ $nonpreferred = 1;
+ } else {
+ push @unknown, $bit;
+ }
+ }
+ if (@unknown) {
+ ERROR("Saw unacceptable licenses '" . join(',', @unknown) .
+ "', valid choices for QEMU are:\n" . join("\n", $prefer, @valid));
+ }
+
+ if ($nonpreferred) {
+ WARN("Saw acceptable license '$origexpr' but note '$prefer' is " .
+ "preferred for new files unless the code is derived from a " .
+ "source file with an existing declared license that must be " .
+ "retained. Please explain the license choice in the commit " .
+ "message.");
+ }
+}
+
+# All three of the methods below take a 'file info' record
+# which is a hash ref containing
+#
+# 'isgit': 1 if an enhanced git diff or 0 for a plain diff
+# 'githeader': 1 if still parsing git patch header, 0 otherwise
+# 'linestart': line number of start of file diff
+# 'lineend': line number of end of file diff
+# 'filenew': the new filename
+# 'fileold': the old filename (same as 'new filename' except
+# for renames in git diffs)
+# 'action': one of 'modified' (always) or 'new' or 'deleted' or
+# 'renamed' (git diffs only)
+# 'mode': file mode for new/deleted files (git diffs only)
+# 'similarity': file similarity when renamed (git diffs only)
+# 'facts': hash ref for storing any metadata related to checks
+#
+
+# Called at the end of each patch, with the list of
+# real filenames that were seen in the patch
+sub process_file_list {
+ my @fileinfos = @_;
+
+ # According to tests/qtest/bios-tables-test.c: do not
+ # change expected file in the same commit with adding test
+ my @acpi_testexpected;
+ my @acpi_nontestexpected;
+
+ foreach my $fileinfo (@fileinfos) {
+ # Note: shell script that rebuilds the expected files is in
+ # the same directory as files themselves.
+ # Note: allowed diff list can be changed both when changing
+ # expected files and when changing tests.
+ if ($fileinfo->{filenew} =~ m#^tests/data/acpi/# &&
+ $fileinfo->{filenew} !~ m#^\.sh$#) {
+ push @acpi_testexpected, $fileinfo->{filenew};
+ } elsif ($fileinfo->{filenew} !~
+ m#^tests/qtest/bios-tables-test-allowed-diff.h$#) {
+ push @acpi_nontestexpected, $fileinfo->{filenew};
+ }
}
- if (defined $$acpi_testexpected and defined $$acpi_nontestexpected) {
+ if (int(@acpi_testexpected) > 0 and int(@acpi_nontestexpected) > 0) {
ERROR("Do not add expected files together with tests, " .
"follow instructions in " .
- "tests/qtest/bios-tables-test.c: both " .
- $$acpi_testexpected . " and " .
- $$acpi_nontestexpected . " found\n");
+ "tests/qtest/bios-tables-test.c. Files\n\n " .
+ join("\n ", @acpi_testexpected) .
+ "\n\nand\n\n " .
+ join("\n ", @acpi_nontestexpected) .
+ "\n\nfound in the same patch\n");
+ }
+
+ my $sawmaintainers = 0;
+ my @maybemaintainers;
+ foreach my $fileinfo (@fileinfos) {
+ if ($fileinfo->{action} ne "modified" &&
+ $fileinfo->{filenew} !~ m#^tests/data/acpi/#) {
+ push @maybemaintainers, $fileinfo->{filenew};
+ }
+ if ($fileinfo->{filenew} eq "MAINTAINERS") {
+ $sawmaintainers = 1;
+ }
+ }
+
+ # If we don't see a MAINTAINERS update, prod the user to check
+ if (int(@maybemaintainers) > 0 && !$sawmaintainers) {
+ WARN("added, moved or deleted file(s):\n\n " .
+ join("\n ", @maybemaintainers) .
+ "\n\nDoes MAINTAINERS need updating?\n");
+ }
+}
+
+# Called at the start of processing a diff hunk for a file
+sub process_start_of_file {
+ my $fileinfo = shift;
+
+ # Check for incorrect file permissions
+ if ($fileinfo->{action} eq "new" && ($fileinfo->{mode} & 0111)) {
+ my $permhere = $fileinfo->{linestart} . "FILE: " .
+ $fileinfo->{filenew} . "\n";
+ if ($fileinfo->{filenew} =~
+ /(\bMakefile.*|\.(c|cc|cpp|h|mak|s|S))$/) {
+ ERROR("do not set execute permissions for source " .
+ "files\n" . $permhere);
+ }
+ }
+}
+
+# Called at the end of processing a diff hunk for a file
+sub process_end_of_file {
+ my $fileinfo = shift;
+
+ if ($fileinfo->{action} eq "new" &&
+ !exists $fileinfo->{facts}->{sawspdx}) {
+ if ($fileinfo->{filenew} =~
+ /(\.(c|h|py|pl|sh|json|inc|rs)|Makefile.*)$/) {
+ # source code files MUST have SPDX license declared
+ ERROR("New file '" . $fileinfo->{filenew} .
+ "' requires 'SPDX-License-Identifier'");
+ } else {
+ # Other files MAY have SPDX license if appropriate
+ WARN("Does new file '" . $fileinfo->{filenew} .
+ "' need 'SPDX-License-Identifier'?");
+ }
+ }
+ if ($fileinfo->{action} eq "new" &&
+ exists $fileinfo->{facts}->{sawboilerplate}) {
+ ERROR("New file '" . $fileinfo->{filenew} . "' must " .
+ "not have license boilerplate header text, only " .
+ "the SPDX-License-Identifier, unless this file was " .
+ "copied from existing code already having such text.");
}
}
@@ -1373,7 +1538,9 @@ sub process {
my $in_header_lines = $file ? 0 : 1;
my $in_commit_log = 0; #Scanning lines before patch
- my $reported_maintainer_file = 0;
+ my $reported_mixing_imported_file = 0;
+ my $in_imported_file = 0;
+ my $in_no_imported_file = 0;
my $non_utf8_charset = 0;
our @report = ();
@@ -1386,7 +1553,10 @@ sub process {
my $realfile = '';
my $realline = 0;
my $realcnt = 0;
+ my $fileinfo;
+ my @fileinfolist;
my $here = '';
+ my $oldhere = '';
my $in_comment = 0;
my $comment_edge = 0;
my $first_line = 0;
@@ -1399,9 +1569,6 @@ sub process {
my %suppress_whiletrailers;
my %suppress_export;
- my $acpi_testexpected;
- my $acpi_nontestexpected;
-
# Pre-scan the patch sanitizing the lines.
sanitise_line_reset();
@@ -1524,18 +1691,54 @@ sub process {
$prefix = "$filename:$realline: " if ($emacs && $file);
$prefix = "$filename:$linenr: " if ($emacs && !$file);
+ $oldhere = $here;
$here = "#$linenr: " if (!$file);
$here = "#$realline: " if ($file);
# extract the filename as it passes
- if ($line =~ /^diff --git.*?(\S+)$/) {
- $realfile = $1;
- $realfile =~ s@^([^/]*)/@@ if (!$file);
- checkfilename($realfile, \$acpi_testexpected, \$acpi_nontestexpected);
+ if ($line =~ /^diff --git\s+(\S+)\s+(\S+)$/) {
+ my $fileold = $1;
+ my $filenew = $2;
+
+ if (defined $fileinfo) {
+ $fileinfo->{lineend} = $oldhere;
+ process_end_of_file($fileinfo)
+ }
+ $fileold =~ s@^([^/]*)/@@ if (!$file);
+ $filenew =~ s@^([^/]*)/@@ if (!$file);
+ $realfile = $filenew;
+
+ $fileinfo = {
+ "isgit" => 1,
+ "githeader" => 1,
+ "linestart" => $here,
+ "lineend" => 0,
+ "fileold" => $fileold,
+ "filenew" => $filenew,
+ "action" => "modified",
+ "mode" => 0,
+ "similarity" => 0,
+ "facts" => {},
+ };
+ push @fileinfolist, $fileinfo;
+ } elsif (defined $fileinfo && $fileinfo->{githeader} &&
+ $line =~ /^(new|deleted) (?:file )?mode\s+([0-7]+)$/) {
+ $fileinfo->{action} = $1;
+ $fileinfo->{mode} = oct($2);
+ } elsif (defined $fileinfo && $fileinfo->{githeader} &&
+ $line =~ /^similarity index (\d+)%/) {
+ $fileinfo->{similarity} = int($1);
+ } elsif (defined $fileinfo && $fileinfo->{githeader} &&
+ $line =~ /^rename (from|to) [\w\/\.\-]+\s*$/) {
+ $fileinfo->{action} = "renamed";
+ # For a no-change rename, we'll never have any "+++..."
+ # lines, so trigger actions now
+ if ($1 eq "to" && $fileinfo->{similarity} == 100) {
+ process_start_of_file($fileinfo);
+ }
} elsif ($line =~ /^\+\+\+\s+(\S+)/) {
$realfile = $1;
$realfile =~ s@^([^/]*)/@@ if (!$file);
- checkfilename($realfile, \$acpi_testexpected, \$acpi_nontestexpected);
$p1_prefix = $1;
if (!$file && $tree && $p1_prefix ne '' &&
@@ -1543,6 +1746,30 @@ sub process {
WARN("patch prefix '$p1_prefix' exists, appears to be a -p0 patch\n");
}
+ if (defined $fileinfo && !$fileinfo->{isgit}) {
+ $fileinfo->{lineend} = $oldhere;
+ process_end_of_file($fileinfo);
+ }
+
+ if (!defined $fileinfo || !$fileinfo->{isgit}) {
+ $fileinfo = {
+ "isgit" => 0,
+ "githeader" => 0,
+ "linestart" => $here,
+ "lineend" => 0,
+ "fileold" => $realfile,
+ "filenew" => $realfile,
+ "action" => "modified",
+ "mode" => 0,
+ "similarity" => 0,
+ "facts" => {},
+ };
+ push @fileinfolist, $fileinfo;
+ } else {
+ $fileinfo->{githeader} = 0;
+ }
+ process_start_of_file($fileinfo);
+
next;
}
@@ -1554,14 +1781,6 @@ sub process {
$cnt_lines++ if ($realcnt != 0);
-# Check for incorrect file permissions
- if ($line =~ /^new (file )?mode.*[7531]\d{0,2}$/) {
- my $permhere = $here . "FILE: $realfile\n";
- if ($realfile =~ /(\bMakefile(?:\.objs)?|\.c|\.cc|\.cpp|\.h|\.mak|\.[sS])$/) {
- ERROR("do not set execute permissions for source files\n" . $permhere);
- }
- }
-
# Only allow Python 3 interpreter
if ($realline == 1 &&
$line =~ /^\+#!\ *\/usr\/bin\/(?:env )?python$/) {
@@ -1593,23 +1812,27 @@ sub process {
}
}
-# Check if MAINTAINERS is being updated. If so, there's probably no need to
-# emit the "does MAINTAINERS need updating?" message on file add/move/delete
- if ($line =~ /^\s*MAINTAINERS\s*\|/) {
- $reported_maintainer_file = 1;
+# Check SPDX-License-Identifier references a permitted license
+ if ($rawline =~ m,SPDX-License-Identifier: (.*?)(\*/)?\s*$,) {
+ $fileinfo->{facts}->{sawspdx} = 1;
+ &checkspdx($realfile, $1);
}
-# Check for added, moved or deleted files
- if (!$reported_maintainer_file && !$in_commit_log &&
- ($line =~ /^(?:new|deleted) file mode\s*\d+\s*$/ ||
- $line =~ /^rename (?:from|to) [\w\/\.\-]+\s*$/ ||
- ($line =~ /\{\s*([\w\/\.\-]*)\s*\=\>\s*([\w\/\.\-]*)\s*\}/ &&
- (defined($1) || defined($2)))) &&
- !(($realfile ne '') &&
- defined($acpi_testexpected) &&
- ($realfile eq $acpi_testexpected))) {
- $reported_maintainer_file = 1;
- WARN("added, moved or deleted file(s), does MAINTAINERS need updating?\n" . $herecurr);
+ if ($rawline =~ /$LICENSE_BOILERPLATE_RE/) {
+ $fileinfo->{facts}->{sawboilerplate} = 1;
+ }
+
+ if ($rawline =~ m,(SPDX-[a-zA-Z0-9-_]+):,) {
+ my $tag = $1;
+ my @permitted = qw(
+ SPDX-License-Identifier
+ );
+
+ unless (grep { /^$tag$/ } @permitted) {
+ ERROR("Tag $tag not permitted in QEMU code, " .
+ "valid choices are: " .
+ join(", ", @permitted));
+ }
}
# Check for wrappage within a valid hunk of the file
@@ -1673,6 +1896,27 @@ sub process {
# ignore non-hunk lines and lines being removed
next if (!$hunk_line || $line =~ /^-/);
+# Check that updating imported files from Linux are not mixed with other changes
+ if ($realfile =~ /^(linux-headers|include\/standard-headers)\//) {
+ if (!$in_imported_file) {
+ WARN("added, moved or deleted file(s) " .
+ "imported from Linux, are you using " .
+ "scripts/update-linux-headers.sh?\n" .
+ $herecurr);
+ }
+ $in_imported_file = 1;
+ } else {
+ $in_no_imported_file = 1;
+ }
+
+ if (!$reported_mixing_imported_file &&
+ $in_imported_file && $in_no_imported_file) {
+ ERROR("headers imported from Linux should be self-" .
+ "contained in a patch with no other changes\n" .
+ $herecurr);
+ $reported_mixing_imported_file = 1;
+ }
+
# ignore files that are being periodically imported from Linux
next if ($realfile =~ /^(linux-headers|include\/standard-headers)\//);
@@ -2169,7 +2413,7 @@ sub process {
# missing space after union, struct or enum definition
if ($line =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident)?(?:\s+$Ident)?[=\{]/) {
- ERROR("missing space after $1 definition\n" . $herecurr);
+ ERROR("missing space after $1 definition\n" . $herecurr);
}
# check for spacing round square brackets; allowed:
@@ -2222,7 +2466,7 @@ sub process {
}
}
# Check operator spacing.
- if (!($line=~/\#\s*include/)) {
+ if (!($line=~/\#\s*(include|import)/)) {
my $ops = qr{
<<=|>>=|<=|>=|==|!=|
\+=|-=|\*=|\/=|%=|\^=|\|=|&=|
@@ -2464,7 +2708,7 @@ sub process {
if ($line =~ /^.\s*(Q(?:S?LIST|SIMPLEQ|TAILQ)_HEAD)\s*\(\s*[^,]/ &&
$line !~ /^.typedef/) {
- ERROR("named $1 should be typedefed separately\n" . $herecurr);
+ ERROR("named $1 should be typedefed separately\n" . $herecurr);
}
# Need a space before open parenthesis after if, while etc
@@ -3013,48 +3257,50 @@ sub process {
# Qemu error function tests
- # Find newlines in error messages
- my $qemu_error_funcs = qr{error_setg|
- error_setg_errno|
- error_setg_win32|
- error_setg_file_open|
- error_set|
- error_prepend|
- warn_reportf_err|
- error_reportf_err|
- error_vreport|
- warn_vreport|
- info_vreport|
- error_report|
- warn_report|
- info_report|
- g_test_message}x;
-
- if ($rawline =~ /\b(?:$qemu_error_funcs)\s*\(.*\".*\\n/) {
- ERROR("Error messages should not contain newlines\n" . $herecurr);
- }
+ # Find newlines in error messages
+ my $qemu_error_funcs = qr{error_setg|
+ error_setg_errno|
+ error_setg_win32|
+ error_setg_file_open|
+ error_set|
+ error_prepend|
+ warn_reportf_err|
+ error_reportf_err|
+ error_vreport|
+ warn_vreport|
+ info_vreport|
+ error_report|
+ warn_report|
+ info_report|
+ g_test_message}x;
+
+ if ($rawline =~ /\b(?:$qemu_error_funcs)\s*\(.*\".*\\n/) {
+ ERROR("Error messages should not contain newlines\n" . $herecurr);
+ }
- # Continue checking for error messages that contains newlines. This
- # check handles cases where string literals are spread over multiple lines.
- # Example:
- # error_report("Error msg line #1"
- # "Error msg line #2\n");
- my $quoted_newline_regex = qr{\+\s*\".*\\n.*\"};
- my $continued_str_literal = qr{\+\s*\".*\"};
+ # Continue checking for error messages that contains newlines.
+ # This check handles cases where string literals are spread
+ # over multiple lines.
+ # Example:
+ # error_report("Error msg line #1"
+ # "Error msg line #2\n");
+ my $quoted_newline_regex = qr{\+\s*\".*\\n.*\"};
+ my $continued_str_literal = qr{\+\s*\".*\"};
- if ($rawline =~ /$quoted_newline_regex/) {
- # Backtrack to first line that does not contain only a quoted literal
- # and assume that it is the start of the statement.
- my $i = $linenr - 2;
+ if ($rawline =~ /$quoted_newline_regex/) {
+ # Backtrack to first line that does not contain only
+ # a quoted literal and assume that it is the start
+ # of the statement.
+ my $i = $linenr - 2;
- while (($i >= 0) & $rawlines[$i] =~ /$continued_str_literal/) {
- $i--;
- }
+ while (($i >= 0) & $rawlines[$i] =~ /$continued_str_literal/) {
+ $i--;
+ }
- if ($rawlines[$i] =~ /\b(?:$qemu_error_funcs)\s*\(/) {
- ERROR("Error messages should not contain newlines\n" . $herecurr);
+ if ($rawlines[$i] =~ /\b(?:$qemu_error_funcs)\s*\(/) {
+ ERROR("Error messages should not contain newlines\n" . $herecurr);
+ }
}
- }
# check for non-portable libc calls that have portable alternatives in QEMU
if ($line =~ /\bffs\(/) {
@@ -3078,6 +3324,10 @@ sub process {
if ($line =~ /\b(g_)?assert\(0\)/) {
ERROR("use g_assert_not_reached() instead of assert(0)\n" . $herecurr);
}
+ if ($line =~ /\b(g_)?assert\(false\)/) {
+ ERROR("use g_assert_not_reached() instead of assert(false)\n" .
+ $herecurr);
+ }
if ($line =~ /\bstrerrorname_np\(/) {
ERROR("use strerror() instead of strerrorname_np()\n" . $herecurr);
}
@@ -3104,6 +3354,11 @@ sub process {
}
}
+ if (defined $fileinfo) {
+ process_end_of_file($fileinfo);
+ }
+ process_file_list(@fileinfolist);
+
if ($is_patch && $chk_signoff && $signoff == 0) {
ERROR("Missing Signed-off-by: line(s)\n");
}
diff --git a/scripts/ci/gitlab-ci-section b/scripts/ci/gitlab-ci-section
new file mode 100644
index 0000000..9bbe804
--- /dev/null
+++ b/scripts/ci/gitlab-ci-section
@@ -0,0 +1,29 @@
+# Copyright (c) 2024 Linaro Ltd
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# gitlab-ci-section: This is a shell script fragment which defines
+# functions section_start and section_end which will emit marker lines
+# that GitLab will interpret as the beginning or end of a "collapsible
+# section" in a CI job log. See
+# https://docs.gitlab.com/ee/ci/yaml/script.html#expand-and-collapse-job-log-sections
+#
+# This is intended to be sourced in the before_script section of
+# a CI config; the section_start and section_end functions will
+# then be available for use in the before_script and script sections.
+
+# Section names are [-_.A-Za-z0-9] and the section_start pairs with
+# a section_end with the same section name.
+# The description can be any printable text without newlines; this is
+# what will appear in the log.
+
+# Usage:
+# section_start section_name "Description of the section"
+section_start () {
+ printf "section_start:%s:%s\r\e[0K%s\n" "$(date +%s)" "$1" "$2"
+}
+
+# Usage:
+# section_end section_name
+section_end () {
+ printf "section_end:%s:%s\r\e[0K\n" "$(date +%s)" "$1"
+}
diff --git a/scripts/ci/setup/gitlab-runner.yml b/scripts/ci/setup/gitlab-runner.yml
index 7bdafab..57e7fae 100644
--- a/scripts/ci/setup/gitlab-runner.yml
+++ b/scripts/ci/setup/gitlab-runner.yml
@@ -49,30 +49,51 @@
- debug:
msg: gitlab-runner arch is {{ gitlab_runner_arch }}
- - name: Download the matching gitlab-runner (DEB)
+ # Debian/Ubuntu setup
+ - name: Get gitlab-runner repo setup script (DEB)
get_url:
dest: "/root/"
- url: "https://gitlab-runner-downloads.s3.amazonaws.com/latest/deb/gitlab-runner_{{ gitlab_runner_arch }}.deb"
+ url: "https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh"
+ mode: 0755
when:
- ansible_facts['distribution'] == 'Ubuntu'
- - name: Download the matching gitlab-runner (RPM)
+ - name: Run gitlab-runner repo setup script (DEB)
+ shell: "/root/script.deb.sh"
+ when:
+ - ansible_facts['distribution'] == 'Ubuntu'
+
+ - name: Install gitlab-runner (DEB)
+ ansible.builtin.apt:
+ name: gitlab-runner
+ update_cache: yes
+ state: present
+ when:
+ - ansible_facts['distribution'] == 'Ubuntu'
+
+ # RPM setup
+ - name: Get gitlab-runner repo setup script (RPM)
get_url:
dest: "/root/"
- url: "https://gitlab-runner-downloads.s3.amazonaws.com/latest/rpm/gitlab-runner_{{ gitlab_runner_arch }}.rpm"
+ url: "https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh"
+ mode: 0755
when:
- ansible_facts['distribution'] == 'CentOS'
- - name: Install gitlab-runner via package manager (DEB)
- apt: deb="/root/gitlab-runner_{{ gitlab_runner_arch }}.deb"
+ - name: Run gitlab-runner repo setup script (RPM)
+ shell: "/root/script.rpm.sh"
when:
- - ansible_facts['distribution'] == 'Ubuntu'
+ - ansible_facts['distribution'] == 'CentOS'
- - name: Install gitlab-runner via package manager (RPM)
- yum: name="/root/gitlab-runner_{{ gitlab_runner_arch }}.rpm"
+ - name: Install gitlab-runner (RPM)
+ yum:
+ name: gitlab-runner
+ update_cache: yes
+ state: present
when:
- ansible_facts['distribution'] == 'CentOS'
+ # Register Runners
- name: Register the gitlab-runner
command: "/usr/bin/gitlab-runner register --non-interactive --url {{ gitlab_runner_server_url }} --registration-token {{ gitlab_runner_registration_token }} --executor shell --tag-list {{ ansible_facts[\"architecture\"] }},{{ ansible_facts[\"distribution\"]|lower }}_{{ ansible_facts[\"distribution_version\"] }} --description '{{ ansible_facts[\"distribution\"] }} {{ ansible_facts[\"distribution_version\"] }} {{ ansible_facts[\"architecture\"] }} ({{ ansible_facts[\"os_family\"] }})'"
diff --git a/scripts/ci/setup/ubuntu/build-environment.yml b/scripts/ci/setup/ubuntu/build-environment.yml
index edf1900..56b5160 100644
--- a/scripts/ci/setup/ubuntu/build-environment.yml
+++ b/scripts/ci/setup/ubuntu/build-environment.yml
@@ -39,7 +39,6 @@
when:
- ansible_facts['distribution'] == 'Ubuntu'
- ansible_facts['distribution_version'] == '22.04'
- - ansible_facts['architecture'] == 'aarch64' or ansible_facts['architecture'] == 'x86_64'
- name: Install packages for QEMU on Ubuntu 22.04
package:
@@ -47,7 +46,6 @@
when:
- ansible_facts['distribution'] == 'Ubuntu'
- ansible_facts['distribution_version'] == '22.04'
- - ansible_facts['architecture'] == 'aarch64' or ansible_facts['architecture'] == 'x86_64'
- name: Install armhf cross-compile packages to build QEMU on AArch64 Ubuntu 22.04
package:
diff --git a/scripts/ci/setup/ubuntu/ubuntu-2204-aarch64.yaml b/scripts/ci/setup/ubuntu/ubuntu-2204-aarch64.yaml
index fd5489c..f11e980 100644
--- a/scripts/ci/setup/ubuntu/ubuntu-2204-aarch64.yaml
+++ b/scripts/ci/setup/ubuntu/ubuntu-2204-aarch64.yaml
@@ -35,6 +35,7 @@ packages:
- libcacard-dev
- libcap-ng-dev
- libcapstone-dev
+ - libcbor-dev
- libcmocka-dev
- libcurl4-gnutls-dev
- libdaxctl-dev
@@ -49,6 +50,7 @@ packages:
- libglusterfs-dev
- libgnutls28-dev
- libgtk-3-dev
+ - libgtk-vnc-2.0-dev
- libibverbs-dev
- libiscsi-dev
- libjemalloc-dev
@@ -112,6 +114,7 @@ packages:
- python3-venv
- python3-yaml
- rpm2cpio
+ - rustc-1.77
- sed
- socat
- sparse
@@ -120,6 +123,7 @@ packages:
- tar
- tesseract-ocr
- tesseract-ocr-eng
+ - vulkan-tools
- xorriso
- zlib1g-dev
- zstd
diff --git a/scripts/ci/setup/ubuntu/ubuntu-2204-s390x.yaml b/scripts/ci/setup/ubuntu/ubuntu-2204-s390x.yaml
index afa0450..6559cb2 100644
--- a/scripts/ci/setup/ubuntu/ubuntu-2204-s390x.yaml
+++ b/scripts/ci/setup/ubuntu/ubuntu-2204-s390x.yaml
@@ -35,6 +35,7 @@ packages:
- libcacard-dev
- libcap-ng-dev
- libcapstone-dev
+ - libcbor-dev
- libcmocka-dev
- libcurl4-gnutls-dev
- libdaxctl-dev
@@ -49,6 +50,7 @@ packages:
- libglusterfs-dev
- libgnutls28-dev
- libgtk-3-dev
+ - libgtk-vnc-2.0-dev
- libibverbs-dev
- libiscsi-dev
- libjemalloc-dev
@@ -110,6 +112,7 @@ packages:
- python3-venv
- python3-yaml
- rpm2cpio
+ - rustc-1.77
- sed
- socat
- sparse
@@ -118,6 +121,7 @@ packages:
- tar
- tesseract-ocr
- tesseract-ocr-eng
+ - vulkan-tools
- xorriso
- zlib1g-dev
- zstd
diff --git a/scripts/clean-includes b/scripts/clean-includes
index bdbf404..25dbf16 100755
--- a/scripts/clean-includes
+++ b/scripts/clean-includes
@@ -130,8 +130,8 @@ for f in "$@"; do
*include/qemu/compiler.h | \
*include/qemu/qemu-plugin.h | \
*include/glib-compat.h | \
- *include/sysemu/os-posix.h | \
- *include/sysemu/os-win32.h | \
+ *include/system/os-posix.h | \
+ *include/system/os-win32.h | \
*include/standard-headers/ )
# Removing include lines from osdep.h itself would be counterproductive.
echo "SKIPPING $f (special case header)"
@@ -174,7 +174,7 @@ for f in "$@"; do
<limits.h> <unistd.h> <time.h> <ctype.h> <errno.h> <fcntl.h>
<sys/stat.h> <sys/time.h> <assert.h> <signal.h> <glib.h>
<sys/stat.h> <sys/time.h> <assert.h> <signal.h> <glib.h> <sys/mman.h>
- "sysemu/os-posix.h, sysemu/os-win32.h "glib-compat.h"
+ "system/os-posix.h, system/os-win32.h "glib-compat.h"
"qemu/typedefs.h"
))' "$f"
diff --git a/scripts/cocci-macro-file.h b/scripts/cocci-macro-file.h
index d247a50..c64831d 100644
--- a/scripts/cocci-macro-file.h
+++ b/scripts/cocci-macro-file.h
@@ -23,11 +23,7 @@
#define G_GNUC_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
#define G_GNUC_NULL_TERMINATED __attribute__((sentinel))
-#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
-# define QEMU_PACKED __attribute__((gcc_struct, packed))
-#else
-# define QEMU_PACKED __attribute__((packed))
-#endif
+#define QEMU_PACKED __attribute__((packed))
#define cat(x,y) x ## y
#define cat2(x,y) cat(x,y)
diff --git a/scripts/coccinelle/device-reset.cocci b/scripts/coccinelle/device-reset.cocci
new file mode 100644
index 0000000..510042a
--- /dev/null
+++ b/scripts/coccinelle/device-reset.cocci
@@ -0,0 +1,30 @@
+// Convert opencoded DeviceClass::reset assignments to calls to
+// device_class_set_legacy_reset()
+//
+// Copyright Linaro Ltd 2024
+// This work is licensed under the terms of the GNU GPLv2 or later.
+//
+// spatch --macro-file scripts/cocci-macro-file.h \
+// --sp-file scripts/coccinelle/device-reset.cocci \
+// --keep-comments --smpl-spacing --in-place --include-headers --dir hw
+//
+// For simplicity we assume some things about the code we're modifying
+// that happen to be true for all our targets:
+// * all cpu_class_set_parent_reset() callsites have a 'DeviceClass *dc' local
+// * the parent reset field in the target CPU class is 'parent_reset'
+// * no reset function already has a 'dev' local
+
+@@
+identifier dc, resetfn;
+@@
+ DeviceClass *dc;
+ ...
+- dc->reset = resetfn;
++ device_class_set_legacy_reset(dc, resetfn);
+@@
+identifier dc, resetfn;
+@@
+ DeviceClass *dc;
+ ...
+- dc->reset = &resetfn;
++ device_class_set_legacy_reset(dc, resetfn);
diff --git a/scripts/codeconverter/codeconverter/qom_type_info.py b/scripts/codeconverter/codeconverter/qom_type_info.py
index 255cb59..22a2556 100644
--- a/scripts/codeconverter/codeconverter/qom_type_info.py
+++ b/scripts/codeconverter/codeconverter/qom_type_info.py
@@ -798,7 +798,8 @@ class RedundantTypeSizes(TypeInfoVar):
#
#
# if 'class_init' not in fields:
-# yield self.prepend(('static void %s_class_init(ObjectClass *oc, void *data)\n'
+# yield self.prepend(('static void %s_class_init(ObjectClass *oc,\n'
+# 'const void *data)\n'
# '{\n'
# '}\n\n') % (ids.lowercase))
# yield self.append_field('class_init', ids.lowercase+'_class_init')
@@ -901,26 +902,6 @@ class TypeRegisterCall(FileMatch):
regexp = S(r'^[ \t]*', NAMED('func_name', 'type_register'),
r'\s*\(&\s*', NAMED('name', RE_IDENTIFIER), r'\s*\);[ \t]*\n')
-class MakeTypeRegisterStatic(TypeRegisterCall):
- """Make type_register() call static if variable is static const"""
- def gen_patches(self):
- var = self.file.find_match(TypeInfoVar, self.name)
- if var is None:
- self.warn("can't find TypeInfo var declaration for %s", self.name)
- return
- if var.is_static() and var.is_const():
- yield self.group_match('func_name').make_patch('type_register_static')
-
-class MakeTypeRegisterNotStatic(TypeRegisterStaticCall):
- """Make type_register() call static if variable is static const"""
- def gen_patches(self):
- var = self.file.find_match(TypeInfoVar, self.name)
- if var is None:
- self.warn("can't find TypeInfo var declaration for %s", self.name)
- return
- if not var.is_static() or not var.is_const():
- yield self.group_match('func_name').make_patch('type_register')
-
class TypeInfoMacro(FileMatch):
"""TYPE_INFO macro usage"""
regexp = S(r'^[ \t]*TYPE_INFO\s*\(\s*', NAMED('name', RE_IDENTIFIER), r'\s*\)[ \t]*;?[ \t]*\n')
diff --git a/scripts/codeconverter/codeconverter/test_regexps.py b/scripts/codeconverter/codeconverter/test_regexps.py
index a445634..4526268 100644
--- a/scripts/codeconverter/codeconverter/test_regexps.py
+++ b/scripts/codeconverter/codeconverter/test_regexps.py
@@ -70,15 +70,15 @@ static const TypeInfo char_file_type_info = {
.name = armsse_variants[i].name,
.parent = TYPE_ARMSSE,
.class_init = armsse_class_init,
- .class_data = (void *)&armsse_variants[i],
+ .class_data = &armsse_variants[i],
};''', re.MULTILINE)
print(RE_ARRAY_ITEM)
assert fullmatch(RE_ARRAY_ITEM, '{ TYPE_HOTPLUG_HANDLER },')
assert fullmatch(RE_ARRAY_ITEM, '{ TYPE_ACPI_DEVICE_IF },')
assert fullmatch(RE_ARRAY_ITEM, '{ }')
- assert fullmatch(RE_ARRAY_CAST, '(InterfaceInfo[])')
- assert fullmatch(RE_ARRAY, '''(InterfaceInfo[]) {
+ assert fullmatch(RE_ARRAY_CAST, '(const InterfaceInfo[])')
+ assert fullmatch(RE_ARRAY, '''(const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ TYPE_ACPI_DEVICE_IF },
{ }
@@ -98,7 +98,7 @@ static const TypeInfo char_file_type_info = {
.parent = TYPE_DEVICE,
.instance_size = sizeof(CRBState),
.class_init = tpm_crb_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_TPM_IF },
{ }
}
@@ -134,7 +134,7 @@ static const TypeInfo char_file_type_info = {
.instance_size = sizeof(AcpiGedState),
.instance_init = acpi_ged_initfn,
.class_init = acpi_ged_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ TYPE_ACPI_DEVICE_IF },
{ }
@@ -164,7 +164,7 @@ static const TypeInfo char_file_type_info = {
.parent = TYPE_DEVICE,
.instance_size = sizeof(CRBState),
.class_init = tpm_crb_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_TPM_IF },
{ }
}
@@ -269,7 +269,7 @@ def test_initial_includes():
#include "hw/pci/pci.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
/* Missing stuff:
SCTRL_P[12](END|ST)INC
@@ -278,5 +278,5 @@ def test_initial_includes():
m = InitialIncludes.domatch(c)
assert m
print(repr(m.group(0)))
- assert m.group(0).endswith('#include "sysemu/dma.h"\n')
+ assert m.group(0).endswith('#include "system/dma.h"\n')
diff --git a/scripts/coverity-scan/COMPONENTS.md b/scripts/coverity-scan/COMPONENTS.md
index 858190b..7299590 100644
--- a/scripts/coverity-scan/COMPONENTS.md
+++ b/scripts/coverity-scan/COMPONENTS.md
@@ -9,9 +9,6 @@ arm
avr
~ .*/qemu((/include)?/hw/avr/.*|/target/avr/.*)
-cris
- ~ .*/qemu((/include)?/hw/cris/.*|/target/cris/.*)
-
hexagon-gen (component should be ignored in analysis)
~ .*/qemu(/target/hexagon/.*generated.*)
@@ -79,7 +76,7 @@ chardev
~ .*/qemu((/include)?/chardev/.*)
crypto
- ~ .*/qemu((/include)?/crypto/.*|/hw/.*/.*crypto.*|(/include/sysemu|/backends)/cryptodev.*|/host/include/.*/host/crypto/.*)
+ ~ .*/qemu((/include)?/crypto/.*|/hw/.*/.*crypto.*|(/include/system|/backends)/cryptodev.*|/host/include/.*/host/crypto/.*)
disas
~ .*/qemu((/include)?/disas.*)
@@ -147,7 +144,7 @@ kvm
tcg
~ .*/qemu(/accel/tcg|/replay|/tcg)/.*
-sysemu
+system
~ .*/qemu(/system/.*|/accel/.*)
(headers)
diff --git a/scripts/device-crash-test b/scripts/device-crash-test
index da8b56e..1ecb966 100755
--- a/scripts/device-crash-test
+++ b/scripts/device-crash-test
@@ -16,8 +16,7 @@
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# with this program; if not, see <https://www.gnu.org/licenses/>.
"""
Run QEMU with all combinations of -machine and -device types,
diff --git a/scripts/gensyscalls.sh b/scripts/gensyscalls.sh
deleted file mode 100755
index 8495728..0000000
--- a/scripts/gensyscalls.sh
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/bin/sh
-#
-# Update syscall_nr.h files from linux headers asm-generic/unistd.h
-#
-# This code is licensed under the GPL version 2 or later. See
-# the COPYING file in the top-level directory.
-#
-
-linux="$1"
-output="$2"
-
-TMP=$(mktemp -d)
-
-if [ "$linux" = "" ] ; then
- echo "Needs path to linux source tree" 1>&2
- exit 1
-fi
-
-if [ "$output" = "" ] ; then
- output="$PWD"
-fi
-
-upper()
-{
- echo "$1" | tr "[:lower:]" "[:upper:]" | tr "[:punct:]" "_"
-}
-
-qemu_arch()
-{
- case "$1" in
- arm64)
- echo "aarch64"
- ;;
- *)
- echo "$1"
- ;;
- esac
-}
-
-read_includes()
-{
- arch=$1
- bits=$2
-
- cpp -P -nostdinc -fdirectives-only \
- -D_UAPI_ASM_$(upper ${arch})_BITSPERLONG_H \
- -D__ASM_$(upper ${arch})_BITSPERLONG_H \
- -D__BITS_PER_LONG=${bits} \
- -I${linux}/arch/${arch}/include/uapi/ \
- -I${linux}/include/uapi \
- -I${TMP} \
- "${linux}/arch/${arch}/include/uapi/asm/unistd.h"
-}
-
-filter_defines()
-{
- grep -e "#define __NR_" -e "#define __NR3264"
-}
-
-rename_defines()
-{
- sed "s/ __NR_/ TARGET_NR_/g;s/(__NR_/(TARGET_NR_/g"
-}
-
-evaluate_values()
-{
- sed "s/#define TARGET_NR_/QEMU TARGET_NR_/" | \
- cpp -P -nostdinc | \
- sed "s/^QEMU /#define /"
-}
-
-generate_syscall_nr()
-{
- arch=$1
- bits=$2
- file="$3"
- guard="$(upper LINUX_USER_$(qemu_arch $arch)_$(basename "$file"))"
-
- (echo "/*"
- echo " * This file contains the system call numbers."
- echo " * Do not modify."
- echo " * This file is generated by scripts/gensyscalls.sh"
- echo " */"
- echo "#ifndef ${guard}"
- echo "#define ${guard}"
- echo
- read_includes $arch $bits | filter_defines | rename_defines | \
- evaluate_values | sort -n -k 3
- echo
- echo "#endif /* ${guard} */") > "$file"
-}
-
-mkdir "$TMP/asm"
-> "$TMP/asm/bitsperlong.h"
-
-generate_syscall_nr arm64 64 "$output/linux-user/aarch64/syscall_nr.h"
-generate_syscall_nr openrisc 32 "$output/linux-user/openrisc/syscall_nr.h"
-
-generate_syscall_nr riscv 32 "$output/linux-user/riscv/syscall32_nr.h"
-generate_syscall_nr riscv 64 "$output/linux-user/riscv/syscall64_nr.h"
-generate_syscall_nr hexagon 32 "$output/linux-user/hexagon/syscall_nr.h"
-generate_syscall_nr loongarch 64 "$output/linux-user/loongarch64/syscall_nr.h"
-rm -fr "$TMP"
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 240923d..fec83f5 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1,5 +1,5 @@
#!/usr/bin/env perl
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
use warnings;
use strict;
diff --git a/scripts/make-release b/scripts/make-release
index 6e0433d..4509a9f 100755
--- a/scripts/make-release
+++ b/scripts/make-release
@@ -10,6 +10,28 @@
# This work is licensed under the terms of the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
+function subproject_dir() {
+ if test ! -f "$src/subprojects/$1.wrap"; then
+ echo "scripts/archive-source.sh should only process wrap subprojects"
+ exit 1
+ fi
+
+ # Print the directory key of the wrap file, defaulting to the
+ # subproject name. The wrap file is in ini format and should
+ # have a single section only. There should be only one section
+ # named "[wrap-*]", which helps keeping the script simple.
+ local dir
+ dir=$(sed -n \
+ -e '/^\[wrap-[a-z][a-z]*\]$/,/^\[/{' \
+ -e '/^directory *= */!b' \
+ -e 's///p' \
+ -e 'q' \
+ -e '}' \
+ "$src/subprojects/$1.wrap")
+
+ echo "${dir:-$1}"
+}
+
if [ $# -ne 2 ]; then
echo "Usage:"
echo " $0 gitrepo version"
@@ -17,7 +39,12 @@ if [ $# -ne 2 ]; then
fi
# Only include wraps that are invoked with subproject()
-SUBPROJECTS="libvfio-user keycodemapdb berkeley-softfloat-3 berkeley-testfloat-3"
+SUBPROJECTS="libvfio-user keycodemapdb berkeley-softfloat-3
+ berkeley-testfloat-3 anyhow-1-rs arbitrary-int-1-rs bilge-0.2-rs
+ bilge-impl-0.2-rs either-1-rs foreign-0.3-rs itertools-0.11-rs
+ libc-0.2-rs proc-macro2-1-rs
+ proc-macro-error-1-rs proc-macro-error-attr-1-rs quote-1-rs
+ syn-2-rs unicode-ident-1-rs"
src="$1"
version="$2"
@@ -47,5 +74,13 @@ meson subprojects download $SUBPROJECTS
CryptoPkg/Library/OpensslLib/openssl \
MdeModulePkg/Library/BrotliCustomDecompressLib/brotli)
popd
-tar --exclude=.git -cJf ${destination}.tar.xz ${destination}
+
+exclude=(--exclude=.git)
+# include the tarballs in subprojects/packagecache but not their expansion
+for sp in $SUBPROJECTS; do
+ if grep -xqF "[wrap-file]" $src/subprojects/$sp.wrap; then
+ exclude+=(--exclude=subprojects/"$(subproject_dir $sp)")
+ fi
+done
+tar "${exclude[@]}" -cJf ${destination}.tar.xz ${destination}
rm -rf ${destination}
diff --git a/scripts/meson-buildoptions.py b/scripts/meson-buildoptions.py
index 4814a8f..a3e2247 100644
--- a/scripts/meson-buildoptions.py
+++ b/scripts/meson-buildoptions.py
@@ -241,8 +241,14 @@ def print_parse(options):
print(" esac")
print("}")
-
-options = load_options(json.load(sys.stdin))
+json_data = sys.stdin.read()
+try:
+ options = load_options(json.loads(json_data))
+except:
+ print("Failure in scripts/meson-buildoptions.py parsing stdin as json",
+ file=sys.stderr)
+ print(json_data, file=sys.stderr)
+ sys.exit(1)
print("# This file is generated by meson-buildoptions.py, do not edit!")
print_help(options)
print_parse(options)
diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh
index c97079a..73e0770 100644
--- a/scripts/meson-buildoptions.sh
+++ b/scripts/meson-buildoptions.sh
@@ -21,6 +21,7 @@ meson_options_help() {
printf "%s\n" ' --disable-relocatable toggle relocatable install'
printf "%s\n" ' --docdir=VALUE Base directory for documentation installation'
printf "%s\n" ' (can be empty) [share/doc]'
+ printf "%s\n" ' --enable-asan enable address sanitizer'
printf "%s\n" ' --enable-block-drv-whitelist-in-tools'
printf "%s\n" ' use block whitelist also in tools instead of only'
printf "%s\n" ' QEMU'
@@ -46,13 +47,15 @@ meson_options_help() {
printf "%s\n" ' getrandom()'
printf "%s\n" ' --enable-safe-stack SafeStack Stack Smash Protection (requires'
printf "%s\n" ' clang/llvm and coroutine backend ucontext)'
- printf "%s\n" ' --enable-sanitizers enable default sanitizers'
+ printf "%s\n" ' --enable-strict-rust-lints'
+ printf "%s\n" ' Enable stricter set of Rust warnings'
printf "%s\n" ' --enable-strip Strip targets on install'
printf "%s\n" ' --enable-tcg-interpreter TCG with bytecode interpreter (slow)'
printf "%s\n" ' --enable-trace-backends=CHOICES'
printf "%s\n" ' Set available tracing backends [log] (choices:'
printf "%s\n" ' dtrace/ftrace/log/nop/simple/syslog/ust)'
printf "%s\n" ' --enable-tsan enable thread sanitizer'
+ printf "%s\n" ' --enable-ubsan enable undefined behaviour sanitizer'
printf "%s\n" ' --firmwarepath=VALUES search PATH for firmware files [share/qemu-'
printf "%s\n" ' firmware]'
printf "%s\n" ' --iasl=VALUE Path to ACPI disassembler'
@@ -71,12 +74,13 @@ meson_options_help() {
printf "%s\n" ' "manufacturer" name for qemu-ga registry entries'
printf "%s\n" ' [QEMU]'
printf "%s\n" ' --qemu-ga-version=VALUE version number for qemu-ga installer'
+ printf "%s\n" ' --rtsig-map=VALUE default value of QEMU_RTSIG_MAP [NULL]'
printf "%s\n" ' --smbd=VALUE Path to smbd for slirp networking'
printf "%s\n" ' --sysconfdir=VALUE Sysconf data directory [etc]'
printf "%s\n" ' --tls-priority=VALUE Default TLS protocol/cipher priority string'
printf "%s\n" ' [NORMAL]'
printf "%s\n" ' --with-coroutine=CHOICE coroutine backend to use (choices:'
- printf "%s\n" ' auto/sigaltstack/ucontext/windows)'
+ printf "%s\n" ' auto/sigaltstack/ucontext/wasm/windows)'
printf "%s\n" ' --with-pkgversion=VALUE use specified string as sub-version of the'
printf "%s\n" ' package'
printf "%s\n" ' --with-suffix=VALUE Suffix for QEMU data/modules/config directories'
@@ -93,8 +97,6 @@ meson_options_help() {
printf "%s\n" ' alsa ALSA sound support'
printf "%s\n" ' attr attr/xattr support'
printf "%s\n" ' auth-pam PAM access control'
- printf "%s\n" ' avx2 AVX2 optimizations'
- printf "%s\n" ' avx512bw AVX512BW optimizations'
printf "%s\n" ' blkio libblkio block device driver'
printf "%s\n" ' bochs bochs image format support'
printf "%s\n" ' bpf eBPF support'
@@ -132,6 +134,7 @@ meson_options_help() {
printf "%s\n" ' keyring Linux keyring support'
printf "%s\n" ' kvm KVM acceleration support'
printf "%s\n" ' l2tpv3 l2tpv3 network backend support'
+ printf "%s\n" ' libcbor libcbor support'
printf "%s\n" ' libdaxctl libdaxctl support'
printf "%s\n" ' libdw debuginfo support'
printf "%s\n" ' libiscsi libiscsi userspace initiator'
@@ -163,6 +166,8 @@ meson_options_help() {
printf "%s\n" ' pixman pixman support'
printf "%s\n" ' plugins TCG plugins via shared library loading'
printf "%s\n" ' png PNG support with libpng'
+ printf "%s\n" ' pvg macOS paravirtualized graphics support'
+ printf "%s\n" ' qatzip QATzip compression support'
printf "%s\n" ' qcow1 qcow1 image format support'
printf "%s\n" ' qed qed image format support'
printf "%s\n" ' qga-vss build QGA VSS support (broken with MinGW)'
@@ -170,6 +175,7 @@ meson_options_help() {
printf "%s\n" ' rbd Ceph block device driver'
printf "%s\n" ' rdma Enable RDMA-based migration'
printf "%s\n" ' replication replication support'
+ printf "%s\n" ' rust Rust support'
printf "%s\n" ' rutabaga-gfx rutabaga_gfx support'
printf "%s\n" ' sdl SDL user interface'
printf "%s\n" ' sdl-image SDL Image support for icons'
@@ -190,6 +196,7 @@ meson_options_help() {
printf "%s\n" ' u2f U2F emulation support'
printf "%s\n" ' uadk UADK Library support'
printf "%s\n" ' usb-redir libusbredir support'
+ printf "%s\n" ' valgrind valgrind debug support for coroutine stacks'
printf "%s\n" ' vde vde network backend support'
printf "%s\n" ' vdi vdi image format support'
printf "%s\n" ' vduse-blk-export'
@@ -206,8 +213,6 @@ meson_options_help() {
printf "%s\n" ' vhost-vdpa vhost-vdpa kernel backend support'
printf "%s\n" ' virglrenderer virgl rendering support'
printf "%s\n" ' virtfs virtio-9p support'
- printf "%s\n" ' virtfs-proxy-helper'
- printf "%s\n" ' virtio-9p proxy helper support'
printf "%s\n" ' vmdk vmdk image format support'
printf "%s\n" ' vmnet vmnet.framework network backend support'
printf "%s\n" ' vnc VNC server'
@@ -230,15 +235,13 @@ _meson_option_parse() {
--disable-af-xdp) printf "%s" -Daf_xdp=disabled ;;
--enable-alsa) printf "%s" -Dalsa=enabled ;;
--disable-alsa) printf "%s" -Dalsa=disabled ;;
+ --enable-asan) printf "%s" -Dasan=true ;;
+ --disable-asan) printf "%s" -Dasan=false ;;
--enable-attr) printf "%s" -Dattr=enabled ;;
--disable-attr) printf "%s" -Dattr=disabled ;;
--audio-drv-list=*) quote_sh "-Daudio_drv_list=$2" ;;
--enable-auth-pam) printf "%s" -Dauth_pam=enabled ;;
--disable-auth-pam) printf "%s" -Dauth_pam=disabled ;;
- --enable-avx2) printf "%s" -Davx2=enabled ;;
- --disable-avx2) printf "%s" -Davx2=disabled ;;
- --enable-avx512bw) printf "%s" -Davx512bw=enabled ;;
- --disable-avx512bw) printf "%s" -Davx512bw=disabled ;;
--enable-gcov) printf "%s" -Db_coverage=true ;;
--disable-gcov) printf "%s" -Db_coverage=false ;;
--enable-lto) printf "%s" -Db_lto=true ;;
@@ -355,6 +358,8 @@ _meson_option_parse() {
--disable-kvm) printf "%s" -Dkvm=disabled ;;
--enable-l2tpv3) printf "%s" -Dl2tpv3=enabled ;;
--disable-l2tpv3) printf "%s" -Dl2tpv3=disabled ;;
+ --enable-libcbor) printf "%s" -Dlibcbor=enabled ;;
+ --disable-libcbor) printf "%s" -Dlibcbor=disabled ;;
--enable-libdaxctl) printf "%s" -Dlibdaxctl=enabled ;;
--disable-libdaxctl) printf "%s" -Dlibdaxctl=disabled ;;
--libdir=*) quote_sh "-Dlibdir=$2" ;;
@@ -427,6 +432,10 @@ _meson_option_parse() {
--enable-png) printf "%s" -Dpng=enabled ;;
--disable-png) printf "%s" -Dpng=disabled ;;
--prefix=*) quote_sh "-Dprefix=$2" ;;
+ --enable-pvg) printf "%s" -Dpvg=enabled ;;
+ --disable-pvg) printf "%s" -Dpvg=disabled ;;
+ --enable-qatzip) printf "%s" -Dqatzip=enabled ;;
+ --disable-qatzip) printf "%s" -Dqatzip=disabled ;;
--enable-qcow1) printf "%s" -Dqcow1=enabled ;;
--disable-qcow1) printf "%s" -Dqcow1=disabled ;;
--enable-qed) printf "%s" -Dqed=enabled ;;
@@ -452,12 +461,13 @@ _meson_option_parse() {
--disable-replication) printf "%s" -Dreplication=disabled ;;
--enable-rng-none) printf "%s" -Drng_none=true ;;
--disable-rng-none) printf "%s" -Drng_none=false ;;
+ --rtsig-map=*) quote_sh "-Drtsig_map=$2" ;;
+ --enable-rust) printf "%s" -Drust=enabled ;;
+ --disable-rust) printf "%s" -Drust=disabled ;;
--enable-rutabaga-gfx) printf "%s" -Drutabaga_gfx=enabled ;;
--disable-rutabaga-gfx) printf "%s" -Drutabaga_gfx=disabled ;;
--enable-safe-stack) printf "%s" -Dsafe_stack=true ;;
--disable-safe-stack) printf "%s" -Dsafe_stack=false ;;
- --enable-sanitizers) printf "%s" -Dsanitizers=true ;;
- --disable-sanitizers) printf "%s" -Dsanitizers=false ;;
--enable-sdl) printf "%s" -Dsdl=enabled ;;
--disable-sdl) printf "%s" -Dsdl=disabled ;;
--enable-sdl-image) printf "%s" -Dsdl_image=enabled ;;
@@ -485,6 +495,8 @@ _meson_option_parse() {
--disable-spice-protocol) printf "%s" -Dspice_protocol=disabled ;;
--enable-stack-protector) printf "%s" -Dstack_protector=enabled ;;
--disable-stack-protector) printf "%s" -Dstack_protector=disabled ;;
+ --enable-strict-rust-lints) printf "%s" -Dstrict_rust_lints=true ;;
+ --disable-strict-rust-lints) printf "%s" -Dstrict_rust_lints=false ;;
--enable-strip) printf "%s" -Dstrip=true ;;
--disable-strip) printf "%s" -Dstrip=false ;;
--sysconfdir=*) quote_sh "-Dsysconfdir=$2" ;;
@@ -505,8 +517,12 @@ _meson_option_parse() {
--disable-u2f) printf "%s" -Du2f=disabled ;;
--enable-uadk) printf "%s" -Duadk=enabled ;;
--disable-uadk) printf "%s" -Duadk=disabled ;;
+ --enable-ubsan) printf "%s" -Dubsan=true ;;
+ --disable-ubsan) printf "%s" -Dubsan=false ;;
--enable-usb-redir) printf "%s" -Dusb_redir=enabled ;;
--disable-usb-redir) printf "%s" -Dusb_redir=disabled ;;
+ --enable-valgrind) printf "%s" -Dvalgrind=enabled ;;
+ --disable-valgrind) printf "%s" -Dvalgrind=disabled ;;
--enable-vde) printf "%s" -Dvde=enabled ;;
--disable-vde) printf "%s" -Dvde=disabled ;;
--enable-vdi) printf "%s" -Dvdi=enabled ;;
@@ -533,8 +549,6 @@ _meson_option_parse() {
--disable-virglrenderer) printf "%s" -Dvirglrenderer=disabled ;;
--enable-virtfs) printf "%s" -Dvirtfs=enabled ;;
--disable-virtfs) printf "%s" -Dvirtfs=disabled ;;
- --enable-virtfs-proxy-helper) printf "%s" -Dvirtfs_proxy_helper=enabled ;;
- --disable-virtfs-proxy-helper) printf "%s" -Dvirtfs_proxy_helper=disabled ;;
--enable-vmdk) printf "%s" -Dvmdk=enabled ;;
--disable-vmdk) printf "%s" -Dvmdk=disabled ;;
--enable-vmnet) printf "%s" -Dvmnet=enabled ;;
diff --git a/scripts/minikconf.py b/scripts/minikconf.py
index bcd9101..6f7f43b 100644
--- a/scripts/minikconf.py
+++ b/scripts/minikconf.py
@@ -112,7 +112,7 @@ class KconfigData:
def set_value(self, val, clause):
self.clauses_for_var.append(clause)
if self.has_value() and self.value != val:
- print("The following clauses were found for " + self.name)
+ print("The following clauses were found for " + self.name, file=sys.stderr)
for i in self.clauses_for_var:
print(" " + str(i), file=sys.stderr)
raise KconfigDataError('contradiction between clauses when setting %s' % self)
diff --git a/scripts/modinfo-collect.py b/scripts/modinfo-collect.py
index 4e7584d..48bd92b 100644
--- a/scripts/modinfo-collect.py
+++ b/scripts/modinfo-collect.py
@@ -7,15 +7,6 @@ import json
import shlex
import subprocess
-def find_command(src, target, compile_commands):
- for command in compile_commands:
- if command['file'] != src:
- continue
- if target != '' and command['command'].find(target) == -1:
- continue
- return command['command']
- return 'false'
-
def process_command(src, command):
skip = False
out = []
@@ -43,14 +34,22 @@ def main(args):
print("MODINFO_DEBUG target %s" % target)
arch = target[:-8] # cut '-softmmu'
print("MODINFO_START arch \"%s\" MODINFO_END" % arch)
+
with open('compile_commands.json') as f:
- compile_commands = json.load(f)
- for src in args:
+ compile_commands_json = json.load(f)
+ compile_commands = { x['output']: x for x in compile_commands_json }
+
+ for obj in args:
+ entry = compile_commands.get(obj, None)
+ if not entry:
+ sys.stderr.print('modinfo: Could not find object file', obj)
+ sys.exit(1)
+ src = entry['file']
if not src.endswith('.c'):
print("MODINFO_DEBUG skip %s" % src)
continue
+ command = entry['command']
print("MODINFO_DEBUG src %s" % src)
- command = find_command(src, target, compile_commands)
cmdline = process_command(src, command)
print("MODINFO_DEBUG cmd", cmdline)
result = subprocess.run(cmdline, stdout = subprocess.PIPE,
diff --git a/scripts/mtest2make.py b/scripts/mtest2make.py
index eb01a05..2ef375f 100644
--- a/scripts/mtest2make.py
+++ b/scripts/mtest2make.py
@@ -27,7 +27,7 @@ SPEED = quick
.speed.slow = $(foreach s,$(sort $(filter-out %-thorough, $1)), --suite $s)
.speed.thorough = $(foreach s,$(sort $1), --suite $s)
-TIMEOUT_MULTIPLIER = 1
+TIMEOUT_MULTIPLIER ?= 1
.mtestargs = --no-rebuild -t $(TIMEOUT_MULTIPLIER)
ifneq ($(SPEED), quick)
.mtestargs += --setup $(SPEED)
diff --git a/scripts/nsis.py b/scripts/nsis.py
index 03ed760..8f46963 100644
--- a/scripts/nsis.py
+++ b/scripts/nsis.py
@@ -23,7 +23,7 @@ def find_deps(exe_or_dll, search_path, analyzed_deps):
output = subprocess.check_output(["objdump", "-p", exe_or_dll], text=True)
output = output.split("\n")
for line in output:
- if not line.startswith("\tDLL Name: "):
+ if not line.lstrip().startswith("DLL Name: "):
continue
dep = line.split("DLL Name: ")[1].strip()
@@ -37,10 +37,10 @@ def find_deps(exe_or_dll, search_path, analyzed_deps):
analyzed_deps.add(dep)
# locate the dll dependencies recursively
- rdeps = find_deps(dll, search_path, analyzed_deps)
+ analyzed_deps, rdeps = find_deps(dll, search_path, analyzed_deps)
deps.extend(rdeps)
- return deps
+ return analyzed_deps, deps
def main():
parser = argparse.ArgumentParser(description="QEMU NSIS build helper.")
@@ -92,18 +92,18 @@ def main():
dlldir = os.path.join(destdir + prefix, "dll")
os.mkdir(dlldir)
+ analyzed_deps = set()
for exe in glob.glob(os.path.join(destdir + prefix, "*.exe")):
signcode(exe)
# find all dll dependencies
- deps = set(find_deps(exe, search_path, set()))
+ analyzed_deps, deps = find_deps(exe, search_path, analyzed_deps)
+ deps = set(deps)
deps.remove(exe)
# copy all dlls to the DLLDIR
for dep in deps:
dllfile = os.path.join(dlldir, os.path.basename(dep))
- if (os.path.exists(dllfile)):
- continue
print("Copying '%s' to '%s'" % (dep, dllfile))
shutil.copy(dep, dllfile)
diff --git a/scripts/probe-gdb-support.py b/scripts/probe-gdb-support.py
index 46d6c00..6bcadce 100644
--- a/scripts/probe-gdb-support.py
+++ b/scripts/probe-gdb-support.py
@@ -19,59 +19,61 @@
import argparse
import re
-from subprocess import check_output, STDOUT
+from subprocess import check_output, STDOUT, CalledProcessError
+import sys
-# mappings from gdb arch to QEMU target
-mappings = {
- "alpha" : "alpha",
+# Mappings from gdb arch to QEMU target
+MAP = {
+ "alpha" : ["alpha"],
"aarch64" : ["aarch64", "aarch64_be"],
- "armv7": "arm",
+ "armv7": ["arm"],
"armv8-a" : ["aarch64", "aarch64_be"],
- "avr" : "avr",
- "cris" : "cris",
+ "avr" : ["avr"],
# no hexagon in upstream gdb
- "hppa1.0" : "hppa",
- "i386" : "i386",
- "i386:x86-64" : "x86_64",
- "Loongarch64" : "loongarch64",
- "m68k" : "m68k",
- "MicroBlaze" : "microblaze",
+ "hppa1.0" : ["hppa"],
+ "i386" : ["i386"],
+ "i386:x86-64" : ["x86_64"],
+ "Loongarch64" : ["loongarch64"],
+ "m68k" : ["m68k"],
+ "MicroBlaze" : ["microblaze"],
"mips:isa64" : ["mips64", "mips64el"],
- "or1k" : "or1k",
- "powerpc:common" : "ppc",
+ "or1k" : ["or1k"],
+ "powerpc:common" : ["ppc"],
"powerpc:common64" : ["ppc64", "ppc64le"],
- "riscv:rv32" : "riscv32",
- "riscv:rv64" : "riscv64",
- "s390:64-bit" : "s390x",
+ "riscv:rv32" : ["riscv32"],
+ "riscv:rv64" : ["riscv64"],
+ "s390:64-bit" : ["s390x"],
"sh4" : ["sh4", "sh4eb"],
- "sparc": "sparc",
- "sparc:v8plus": "sparc32plus",
- "sparc:v9a" : "sparc64",
+ "sparc": ["sparc"],
+ "sparc:v8plus": ["sparc32plus"],
+ "sparc:v9a" : ["sparc64"],
# no tricore in upstream gdb
"xtensa" : ["xtensa", "xtensaeb"]
}
+
def do_probe(gdb):
- gdb_out = check_output([gdb,
- "-ex", "set architecture",
- "-ex", "quit"], stderr=STDOUT)
+ try:
+ gdb_out = check_output([gdb,
+ "-ex", "set architecture",
+ "-ex", "quit"], stderr=STDOUT, encoding="utf-8")
+ except (OSError) as e:
+ sys.exit(e)
+ except CalledProcessError as e:
+ sys.exit(f'{e}. Output:\n\n{e.output}')
+
+ found_gdb_archs = re.search(r'Valid arguments are (.*)', gdb_out)
- m = re.search(r"Valid arguments are (.*)",
- gdb_out.decode("utf-8"))
+ targets = set()
+ if found_gdb_archs:
+ gdb_archs = found_gdb_archs.group(1).split(", ")
+ mapped_gdb_archs = [arch for arch in gdb_archs if arch in MAP]
- valid_arches = set()
+ targets = {target for arch in mapped_gdb_archs for target in MAP[arch]}
- if m.group(1):
- for arch in m.group(1).split(", "):
- if arch in mappings:
- mapping = mappings[arch]
- if isinstance(mapping, str):
- valid_arches.add(mapping)
- else:
- for entry in mapping:
- valid_arches.add(entry)
+ # QEMU targets
+ return targets
- return valid_arches
def main() -> None:
parser = argparse.ArgumentParser(description='Probe GDB Architectures')
diff --git a/scripts/qapi/.flake8 b/scripts/qapi/.flake8
deleted file mode 100644
index a873ff6..0000000
--- a/scripts/qapi/.flake8
+++ /dev/null
@@ -1,3 +0,0 @@
-[flake8]
-# Prefer pylint's bare-except checks to flake8's
-extend-ignore = E722
diff --git a/scripts/qapi/.isort.cfg b/scripts/qapi/.isort.cfg
deleted file mode 100644
index 643caa1..0000000
--- a/scripts/qapi/.isort.cfg
+++ /dev/null
@@ -1,7 +0,0 @@
-[settings]
-force_grid_wrap=4
-force_sort_within_sections=True
-include_trailing_comma=True
-line_length=72
-lines_after_imports=2
-multi_line_output=3
diff --git a/scripts/qapi/backend.py b/scripts/qapi/backend.py
new file mode 100644
index 0000000..49ae6ec
--- /dev/null
+++ b/scripts/qapi/backend.py
@@ -0,0 +1,65 @@
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+
+from abc import ABC, abstractmethod
+
+from .commands import gen_commands
+from .events import gen_events
+from .features import gen_features
+from .introspect import gen_introspect
+from .schema import QAPISchema
+from .types import gen_types
+from .visit import gen_visit
+
+
+class QAPIBackend(ABC):
+ # pylint: disable=too-few-public-methods
+
+ @abstractmethod
+ def generate(self,
+ schema: QAPISchema,
+ output_dir: str,
+ prefix: str,
+ unmask: bool,
+ builtins: bool,
+ gen_tracing: bool) -> None:
+ """
+ Generate code for the given schema into the target directory.
+
+ :param schema: The primary QAPI schema object.
+ :param output_dir: The output directory to store generated code.
+ :param prefix: Optional C-code prefix for symbol names.
+ :param unmask: Expose non-ABI names through introspection?
+ :param builtins: Generate code for built-in types?
+
+ :raise QAPIError: On failures.
+ """
+
+
+class QAPICBackend(QAPIBackend):
+ # pylint: disable=too-few-public-methods
+
+ def generate(self,
+ schema: QAPISchema,
+ output_dir: str,
+ prefix: str,
+ unmask: bool,
+ builtins: bool,
+ gen_tracing: bool) -> None:
+ """
+ Generate C code for the given schema into the target directory.
+
+ :param schema_file: The primary QAPI schema file.
+ :param output_dir: The output directory to store generated code.
+ :param prefix: Optional C-code prefix for symbol names.
+ :param unmask: Expose non-ABI names through introspection?
+ :param builtins: Generate code for built-in types?
+
+ :raise QAPIError: On failures.
+ """
+ gen_types(schema, output_dir, prefix, builtins)
+ gen_features(schema, output_dir, prefix)
+ gen_visit(schema, output_dir, prefix, builtins)
+ gen_commands(schema, output_dir, prefix, gen_tracing)
+ gen_events(schema, output_dir, prefix)
+ gen_introspect(schema, output_dir, prefix, unmask)
diff --git a/scripts/qapi/commands.py b/scripts/qapi/commands.py
index 79951a8..7914227 100644
--- a/scripts/qapi/commands.py
+++ b/scripts/qapi/commands.py
@@ -25,7 +25,7 @@ from .gen import (
QAPIGenC,
QAPISchemaModularCVisitor,
build_params,
- gen_special_features,
+ gen_features,
ifcontext,
)
from .schema import (
@@ -298,7 +298,7 @@ def gen_register_command(name: str,
''',
name=name, c_name=c_name(name),
opts=' | '.join(options) or 0,
- feats=gen_special_features(features))
+ feats=gen_features(features))
return ret
@@ -320,7 +320,7 @@ class QAPISchemaGenCommandVisitor(QAPISchemaModularCVisitor):
#include "qemu/osdep.h"
#include "qapi/compat-policy.h"
#include "qapi/visitor.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/dealloc-visitor.h"
#include "qapi/error.h"
#include "%(visit)s.h"
@@ -330,7 +330,7 @@ class QAPISchemaGenCommandVisitor(QAPISchemaModularCVisitor):
if self._gen_tracing and commands != 'qapi-commands':
self._genc.add(mcgen('''
-#include "qapi/qmp/qjson.h"
+#include "qobject/qjson.h"
#include "trace/trace-%(nm)s_trace_events.h"
''',
nm=c_name(commands, protect=False)))
@@ -346,7 +346,7 @@ class QAPISchemaGenCommandVisitor(QAPISchemaModularCVisitor):
def visit_begin(self, schema: QAPISchema) -> None:
self._add_module('./init', ' * QAPI Commands initialization')
self._genh.add(mcgen('''
-#include "qapi/qmp/dispatch.h"
+#include "qapi/qmp-registry.h"
void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds);
''',
@@ -355,6 +355,7 @@ void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds);
#include "qemu/osdep.h"
#include "%(prefix)sqapi-commands.h"
#include "%(prefix)sqapi-init-commands.h"
+#include "%(prefix)sqapi-features.h"
void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds)
{
diff --git a/scripts/qapi/common.py b/scripts/qapi/common.py
index 737b059..d7c8aa3 100644
--- a/scripts/qapi/common.py
+++ b/scripts/qapi/common.py
@@ -40,22 +40,28 @@ def camel_to_upper(value: str) -> str:
ENUM_Name2 -> ENUM_NAME2
ENUM24_Name -> ENUM24_NAME
"""
- c_fun_str = c_name(value, False)
- if value.isupper():
- return c_fun_str
-
- new_name = ''
- length = len(c_fun_str)
- for i in range(length):
- char = c_fun_str[i]
- # When char is upper case and no '_' appears before, do more checks
- if char.isupper() and (i > 0) and c_fun_str[i - 1] != '_':
- if i < length - 1 and c_fun_str[i + 1].islower():
- new_name += '_'
- elif c_fun_str[i - 1].isdigit():
- new_name += '_'
- new_name += char
- return new_name.lstrip('_').upper()
+ ret = value[0]
+ upc = value[0].isupper()
+
+ # Copy remainder of ``value`` to ``ret`` with '_' inserted
+ for ch in value[1:]:
+ if ch.isupper() == upc:
+ pass
+ elif upc:
+ # ``ret`` ends in upper case, next char isn't: insert '_'
+ # before the last upper case char unless there is one
+ # already, or it's at the beginning
+ if len(ret) > 2 and ret[-2].isalnum():
+ ret = ret[:-1] + '_' + ret[-1]
+ else:
+ # ``ret`` doesn't end in upper case, next char is: insert
+ # '_' before it
+ if ret[-1].isalnum():
+ ret += '_'
+ ret += ch
+ upc = ch.isupper()
+
+ return c_name(ret.upper()).lstrip('_')
def c_enum_const(type_name: str,
@@ -68,9 +74,9 @@ def c_enum_const(type_name: str,
:param const_name: The name of this constant.
:param prefix: Optional, prefix that overrides the type_name.
"""
- if prefix is not None:
- type_name = prefix
- return camel_to_upper(type_name) + '_' + c_name(const_name, False).upper()
+ if prefix is None:
+ prefix = camel_to_upper(type_name)
+ return prefix + '_' + c_name(const_name, False).upper()
def c_name(name: str, protect: bool = True) -> str:
diff --git a/scripts/qapi/events.py b/scripts/qapi/events.py
index d1f6399..d179b0e 100644
--- a/scripts/qapi/events.py
+++ b/scripts/qapi/events.py
@@ -194,7 +194,7 @@ class QAPISchemaGenEventVisitor(QAPISchemaModularCVisitor):
#include "%(visit)s.h"
#include "qapi/compat-policy.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/qmp-event.h"
''',
events=events, visit=visit,
diff --git a/scripts/qapi/features.py b/scripts/qapi/features.py
new file mode 100644
index 0000000..5756320
--- /dev/null
+++ b/scripts/qapi/features.py
@@ -0,0 +1,48 @@
+"""
+QAPI features generator
+
+Copyright 2024 Red Hat
+
+This work is licensed under the terms of the GNU GPL, version 2.
+# See the COPYING file in the top-level directory.
+"""
+
+from typing import ValuesView
+
+from .common import c_enum_const, c_name
+from .gen import QAPISchemaMonolithicCVisitor
+from .schema import QAPISchema, QAPISchemaFeature
+
+
+class QAPISchemaGenFeatureVisitor(QAPISchemaMonolithicCVisitor):
+
+ def __init__(self, prefix: str):
+ super().__init__(
+ prefix, 'qapi-features',
+ ' * Schema-defined QAPI features',
+ __doc__)
+
+ self.features: ValuesView[QAPISchemaFeature]
+
+ def visit_begin(self, schema: QAPISchema) -> None:
+ self.features = schema.features()
+ self._genh.add("#include \"qapi/util.h\"\n\n")
+
+ def visit_end(self) -> None:
+ self._genh.add("typedef enum {\n")
+ for f in self.features:
+ self._genh.add(f" {c_enum_const('qapi_feature', f.name)}")
+ if f.name in QAPISchemaFeature.SPECIAL_NAMES:
+ self._genh.add(f" = {c_enum_const('qapi', f.name)},\n")
+ else:
+ self._genh.add(",\n")
+
+ self._genh.add("} " + c_name('QapiFeature') + ";\n")
+
+
+def gen_features(schema: QAPISchema,
+ output_dir: str,
+ prefix: str) -> None:
+ vis = QAPISchemaGenFeatureVisitor(prefix)
+ schema.visit(vis)
+ vis.write(output_dir)
diff --git a/scripts/qapi/gen.py b/scripts/qapi/gen.py
index 6a8abe0..d3c56d45 100644
--- a/scripts/qapi/gen.py
+++ b/scripts/qapi/gen.py
@@ -24,6 +24,7 @@ from typing import (
)
from .common import (
+ c_enum_const,
c_fname,
c_name,
guardend,
@@ -40,10 +41,10 @@ from .schema import (
from .source import QAPISourceInfo
-def gen_special_features(features: Sequence[QAPISchemaFeature]) -> str:
- special_features = [f"1u << QAPI_{feat.name.upper()}"
- for feat in features if feat.is_special()]
- return ' | '.join(special_features) or '0'
+def gen_features(features: Sequence[QAPISchemaFeature]) -> str:
+ feats = [f"1u << {c_enum_const('qapi_feature', feat.name)}"
+ for feat in features]
+ return ' | '.join(feats) or '0'
class QAPIGen:
diff --git a/scripts/qapi/introspect.py b/scripts/qapi/introspect.py
index ac14b20..89ee5d5 100644
--- a/scripts/qapi/introspect.py
+++ b/scripts/qapi/introspect.py
@@ -11,6 +11,7 @@ This work is licensed under the terms of the GNU GPL, version 2.
See the COPYING file in the top-level directory.
"""
+from dataclasses import dataclass
from typing import (
Any,
Dict,
@@ -79,19 +80,16 @@ SchemaInfoCommand = Dict[str, object]
_ValueT = TypeVar('_ValueT', bound=_Value)
+@dataclass
class Annotated(Generic[_ValueT]):
"""
Annotated generally contains a SchemaInfo-like type (as a dict),
But it also used to wrap comments/ifconds around scalar leaf values,
for the benefit of features and enums.
"""
- # TODO: Remove after Python 3.7 adds @dataclass:
- # pylint: disable=too-few-public-methods
- def __init__(self, value: _ValueT, ifcond: QAPISchemaIfCond,
- comment: Optional[str] = None):
- self.value = value
- self.comment: Optional[str] = comment
- self.ifcond = ifcond
+ value: _ValueT
+ ifcond: QAPISchemaIfCond
+ comment: Optional[str] = None
def _tree_to_qlit(obj: JSONValue,
@@ -197,7 +195,7 @@ class QAPISchemaGenIntrospectVisitor(QAPISchemaMonolithicCVisitor):
# generate C
name = c_name(self._prefix, protect=False) + 'qmp_schema_qlit'
self._genh.add(mcgen('''
-#include "qapi/qmp/qlit.h"
+#include "qobject/qlit.h"
extern const QLitObject %(c_name)s;
''',
diff --git a/scripts/qapi/main.py b/scripts/qapi/main.py
index 316736b..0e2a6ae 100644
--- a/scripts/qapi/main.py
+++ b/scripts/qapi/main.py
@@ -8,17 +8,14 @@ This is the main entry point for generating C code from the QAPI schema.
"""
import argparse
+from importlib import import_module
import sys
from typing import Optional
-from .commands import gen_commands
+from .backend import QAPIBackend, QAPICBackend
from .common import must_match
from .error import QAPIError
-from .events import gen_events
-from .introspect import gen_introspect
from .schema import QAPISchema
-from .types import gen_types
-from .visit import gen_visit
def invalid_prefix_char(prefix: str) -> Optional[str]:
@@ -28,31 +25,36 @@ def invalid_prefix_char(prefix: str) -> Optional[str]:
return None
-def generate(schema_file: str,
- output_dir: str,
- prefix: str,
- unmask: bool = False,
- builtins: bool = False,
- gen_tracing: bool = False) -> None:
- """
- Generate C code for the given schema into the target directory.
+def create_backend(path: str) -> QAPIBackend:
+ if path is None:
+ return QAPICBackend()
- :param schema_file: The primary QAPI schema file.
- :param output_dir: The output directory to store generated code.
- :param prefix: Optional C-code prefix for symbol names.
- :param unmask: Expose non-ABI names through introspection?
- :param builtins: Generate code for built-in types?
+ module_path, dot, class_name = path.rpartition('.')
+ if not dot:
+ raise QAPIError("argument of -B must be of the form MODULE.CLASS")
- :raise QAPIError: On failures.
- """
- assert invalid_prefix_char(prefix) is None
+ try:
+ mod = import_module(module_path)
+ except Exception as ex:
+ raise QAPIError(f"unable to import '{module_path}': {ex}") from ex
+
+ try:
+ klass = getattr(mod, class_name)
+ except AttributeError as ex:
+ raise QAPIError(
+ f"module '{module_path}' has no class '{class_name}'") from ex
+
+ try:
+ backend = klass()
+ except Exception as ex:
+ raise QAPIError(
+ f"backend '{path}' cannot be instantiated: {ex}") from ex
+
+ if not isinstance(backend, QAPIBackend):
+ raise QAPIError(
+ f"backend '{path}' must be an instance of QAPIBackend")
- schema = QAPISchema(schema_file)
- gen_types(schema, output_dir, prefix, builtins)
- gen_visit(schema, output_dir, prefix, builtins)
- gen_commands(schema, output_dir, prefix, gen_tracing)
- gen_events(schema, output_dir, prefix)
- gen_introspect(schema, output_dir, prefix, unmask)
+ return backend
def main() -> int:
@@ -75,6 +77,8 @@ def main() -> int:
parser.add_argument('-u', '--unmask-non-abi-names', action='store_true',
dest='unmask',
help="expose non-ABI names in introspection")
+ parser.add_argument('-B', '--backend', default=None,
+ help="Python module name for code generator")
# Option --suppress-tracing exists so we can avoid solving build system
# problems. TODO Drop it when we no longer need it.
@@ -91,12 +95,14 @@ def main() -> int:
return 1
try:
- generate(args.schema,
- output_dir=args.output_dir,
- prefix=args.prefix,
- unmask=args.unmask,
- builtins=args.builtins,
- gen_tracing=not args.suppress_tracing)
+ schema = QAPISchema(args.schema)
+ backend = create_backend(args.backend)
+ backend.generate(schema,
+ output_dir=args.output_dir,
+ prefix=args.prefix,
+ unmask=args.unmask,
+ builtins=args.builtins,
+ gen_tracing=not args.suppress_tracing)
except QAPIError as err:
print(err, file=sys.stderr)
return 1
diff --git a/scripts/qapi/mypy.ini b/scripts/qapi/mypy.ini
deleted file mode 100644
index 8109470..0000000
--- a/scripts/qapi/mypy.ini
+++ /dev/null
@@ -1,4 +0,0 @@
-[mypy]
-strict = True
-disallow_untyped_calls = False
-python_version = 3.8
diff --git a/scripts/qapi/parser.py b/scripts/qapi/parser.py
index adc85b5..949d9e8 100644
--- a/scripts/qapi/parser.py
+++ b/scripts/qapi/parser.py
@@ -14,7 +14,7 @@
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
-from collections import OrderedDict
+import enum
import os
import re
from typing import (
@@ -154,7 +154,7 @@ class QAPISchemaParser:
"value of 'include' must be a string")
incl_fname = os.path.join(os.path.dirname(self._fname),
include)
- self._add_expr(OrderedDict({'include': incl_fname}), info)
+ self._add_expr({'include': incl_fname}, info)
exprs_include = self._include(include, info, incl_fname,
self._included)
if exprs_include:
@@ -355,7 +355,7 @@ class QAPISchemaParser:
raise QAPIParseError(self, "stray '%s'" % match.group(0))
def get_members(self) -> Dict[str, object]:
- expr: Dict[str, object] = OrderedDict()
+ expr: Dict[str, object] = {}
if self.tok == '}':
self.accept()
return expr
@@ -575,7 +575,10 @@ class QAPISchemaParser:
)
raise QAPIParseError(self, emsg)
- doc.new_tagged_section(self.info, match.group(1))
+ doc.new_tagged_section(
+ self.info,
+ QAPIDoc.Kind.from_string(match.group(1))
+ )
text = line[match.end():]
if text:
doc.append_line(text)
@@ -586,7 +589,7 @@ class QAPISchemaParser:
self,
"unexpected '=' markup in definition documentation")
else:
- # tag-less paragraph
+ # plain paragraph
doc.ensure_untagged_section(self.info)
doc.append_line(line)
line = self.get_doc_paragraph(doc)
@@ -635,23 +638,51 @@ class QAPIDoc:
Free-form documentation blocks consist only of a body section.
"""
+ class Kind(enum.Enum):
+ PLAIN = 0
+ MEMBER = 1
+ FEATURE = 2
+ RETURNS = 3
+ ERRORS = 4
+ SINCE = 5
+ TODO = 6
+
+ @staticmethod
+ def from_string(kind: str) -> 'QAPIDoc.Kind':
+ return QAPIDoc.Kind[kind.upper()]
+
+ def __str__(self) -> str:
+ return self.name.title()
+
class Section:
# pylint: disable=too-few-public-methods
- def __init__(self, info: QAPISourceInfo,
- tag: Optional[str] = None):
+ def __init__(
+ self,
+ info: QAPISourceInfo,
+ kind: 'QAPIDoc.Kind',
+ ):
# section source info, i.e. where it begins
self.info = info
- # section tag, if any ('Returns', '@name', ...)
- self.tag = tag
+ # section kind
+ self.kind = kind
# section text without tag
self.text = ''
+ def __repr__(self) -> str:
+ return f"<QAPIDoc.Section kind={self.kind!r} text={self.text!r}>"
+
def append_line(self, line: str) -> None:
self.text += line + '\n'
class ArgSection(Section):
- def __init__(self, info: QAPISourceInfo, tag: str):
- super().__init__(info, tag)
+ def __init__(
+ self,
+ info: QAPISourceInfo,
+ kind: 'QAPIDoc.Kind',
+ name: str
+ ):
+ super().__init__(info, kind)
+ self.name = name
self.member: Optional['QAPISchemaMember'] = None
def connect(self, member: 'QAPISchemaMember') -> None:
@@ -663,7 +694,9 @@ class QAPIDoc:
# definition doc's symbol, None for free-form doc
self.symbol: Optional[str] = symbol
# the sections in textual order
- self.all_sections: List[QAPIDoc.Section] = [QAPIDoc.Section(info)]
+ self.all_sections: List[QAPIDoc.Section] = [
+ QAPIDoc.Section(info, QAPIDoc.Kind.PLAIN)
+ ]
# the body section
self.body: Optional[QAPIDoc.Section] = self.all_sections[0]
# dicts mapping parameter/feature names to their description
@@ -680,55 +713,71 @@ class QAPIDoc:
def end(self) -> None:
for section in self.all_sections:
section.text = section.text.strip('\n')
- if section.tag is not None and section.text == '':
+ if section.kind != QAPIDoc.Kind.PLAIN and section.text == '':
raise QAPISemError(
- section.info, "text required after '%s:'" % section.tag)
+ section.info, "text required after '%s:'" % section.kind)
def ensure_untagged_section(self, info: QAPISourceInfo) -> None:
- if self.all_sections and not self.all_sections[-1].tag:
+ kind = QAPIDoc.Kind.PLAIN
+
+ if self.all_sections and self.all_sections[-1].kind == kind:
# extend current section
- self.all_sections[-1].text += '\n'
+ section = self.all_sections[-1]
+ if not section.text:
+ # Section is empty so far; update info to start *here*.
+ section.info = info
+ section.text += '\n'
return
+
# start new section
- section = self.Section(info)
+ section = self.Section(info, kind)
self.sections.append(section)
self.all_sections.append(section)
- def new_tagged_section(self, info: QAPISourceInfo, tag: str) -> None:
- section = self.Section(info, tag)
- if tag == 'Returns':
+ def new_tagged_section(
+ self,
+ info: QAPISourceInfo,
+ kind: 'QAPIDoc.Kind',
+ ) -> None:
+ section = self.Section(info, kind)
+ if kind == QAPIDoc.Kind.RETURNS:
if self.returns:
raise QAPISemError(
- info, "duplicated '%s' section" % tag)
+ info, "duplicated '%s' section" % kind)
self.returns = section
- elif tag == 'Errors':
+ elif kind == QAPIDoc.Kind.ERRORS:
if self.errors:
raise QAPISemError(
- info, "duplicated '%s' section" % tag)
+ info, "duplicated '%s' section" % kind)
self.errors = section
- elif tag == 'Since':
+ elif kind == QAPIDoc.Kind.SINCE:
if self.since:
raise QAPISemError(
- info, "duplicated '%s' section" % tag)
+ info, "duplicated '%s' section" % kind)
self.since = section
self.sections.append(section)
self.all_sections.append(section)
- def _new_description(self, info: QAPISourceInfo, name: str,
- desc: Dict[str, ArgSection]) -> None:
+ def _new_description(
+ self,
+ info: QAPISourceInfo,
+ name: str,
+ kind: 'QAPIDoc.Kind',
+ desc: Dict[str, ArgSection]
+ ) -> None:
if not name:
raise QAPISemError(info, "invalid parameter name")
if name in desc:
raise QAPISemError(info, "'%s' parameter name duplicated" % name)
- section = self.ArgSection(info, '@' + name)
+ section = self.ArgSection(info, kind, name)
self.all_sections.append(section)
desc[name] = section
def new_argument(self, info: QAPISourceInfo, name: str) -> None:
- self._new_description(info, name, self.args)
+ self._new_description(info, name, QAPIDoc.Kind.MEMBER, self.args)
def new_feature(self, info: QAPISourceInfo, name: str) -> None:
- self._new_description(info, name, self.features)
+ self._new_description(info, name, QAPIDoc.Kind.FEATURE, self.features)
def append_line(self, line: str) -> None:
self.all_sections[-1].append_line(line)
@@ -740,8 +789,23 @@ class QAPIDoc:
raise QAPISemError(member.info,
"%s '%s' lacks documentation"
% (member.role, member.name))
- self.args[member.name] = QAPIDoc.ArgSection(
- self.info, '@' + member.name)
+ # Insert stub documentation section for missing member docs.
+ # TODO: drop when undocumented members are outlawed
+
+ section = QAPIDoc.ArgSection(
+ self.info, QAPIDoc.Kind.MEMBER, member.name)
+ self.args[member.name] = section
+
+ # Determine where to insert stub doc - it should go at the
+ # end of the members section(s), if any. Note that index 0
+ # is assumed to be an untagged intro section, even if it is
+ # empty.
+ index = 1
+ if len(self.all_sections) > 1:
+ while self.all_sections[index].kind == QAPIDoc.Kind.MEMBER:
+ index += 1
+ self.all_sections.insert(index, section)
+
self.args[member.name].connect(member)
def connect_feature(self, feature: 'QAPISchemaFeature') -> None:
diff --git a/scripts/qapi/pylintrc b/scripts/qapi/pylintrc
index c028a1f..e16283a 100644
--- a/scripts/qapi/pylintrc
+++ b/scripts/qapi/pylintrc
@@ -17,7 +17,9 @@ disable=consider-using-f-string,
too-many-arguments,
too-many-branches,
too-many-instance-attributes,
+ too-many-positional-arguments,
too-many-statements,
+ unknown-option-value,
useless-option-value,
[REPORTS]
diff --git a/scripts/qapi/schema.py b/scripts/qapi/schema.py
index d65c35f..cbe3b5a 100644
--- a/scripts/qapi/schema.py
+++ b/scripts/qapi/schema.py
@@ -19,7 +19,6 @@
from __future__ import annotations
from abc import ABC, abstractmethod
-from collections import OrderedDict
import os
import re
from typing import (
@@ -29,6 +28,7 @@ from typing import (
List,
Optional,
Union,
+ ValuesView,
cast,
)
@@ -556,7 +556,7 @@ class QAPISchemaObjectType(QAPISchemaType):
super().check(schema)
assert self._checked and not self._check_complete
- seen = OrderedDict()
+ seen = {}
if self._base_name:
self.base = schema.resolve_type(self._base_name, self.info,
"'base'")
@@ -933,8 +933,11 @@ class QAPISchemaEnumMember(QAPISchemaMember):
class QAPISchemaFeature(QAPISchemaMember):
role = 'feature'
+ # Features which are standardized across all schemas
+ SPECIAL_NAMES = ['deprecated', 'unstable']
+
def is_special(self) -> bool:
- return self.name in ('deprecated', 'unstable')
+ return self.name in QAPISchemaFeature.SPECIAL_NAMES
class QAPISchemaObjectTypeMember(QAPISchemaMember):
@@ -1137,7 +1140,17 @@ class QAPISchema:
self.docs = parser.docs
self._entity_list: List[QAPISchemaEntity] = []
self._entity_dict: Dict[str, QAPISchemaDefinition] = {}
- self._module_dict: Dict[str, QAPISchemaModule] = OrderedDict()
+ self._module_dict: Dict[str, QAPISchemaModule] = {}
+ # NB, values in the dict will identify the first encountered
+ # usage of a named feature only
+ self._feature_dict: Dict[str, QAPISchemaFeature] = {}
+
+ # All schemas get the names defined in the QapiSpecialFeature enum.
+ # Rely on dict iteration order matching insertion order so that
+ # the special names are emitted first when generating code.
+ for f in QAPISchemaFeature.SPECIAL_NAMES:
+ self._feature_dict[f] = QAPISchemaFeature(f, None)
+
self._schema_dir = os.path.dirname(fname)
self._make_module(QAPISchemaModule.BUILTIN_MODULE_NAME)
self._make_module(fname)
@@ -1147,6 +1160,9 @@ class QAPISchema:
self._def_exprs(exprs)
self.check()
+ def features(self) -> ValuesView[QAPISchemaFeature]:
+ return self._feature_dict.values()
+
def _def_entity(self, ent: QAPISchemaEntity) -> None:
self._entity_list.append(ent)
@@ -1249,7 +1265,7 @@ class QAPISchema:
[{'name': n} for n in qtypes], None)
self._def_definition(QAPISchemaEnumType(
- 'QType', None, None, None, None, qtype_values, 'QTYPE'))
+ 'QType', None, None, None, None, qtype_values, None))
def _make_features(
self,
@@ -1258,6 +1274,12 @@ class QAPISchema:
) -> List[QAPISchemaFeature]:
if features is None:
return []
+
+ for f in features:
+ feat = QAPISchemaFeature(f['name'], info)
+ if feat.name not in self._feature_dict:
+ self._feature_dict[feat.name] = feat
+
return [QAPISchemaFeature(f['name'], info,
QAPISchemaIfCond(f.get('if')))
for f in features]
@@ -1431,7 +1453,7 @@ class QAPISchema:
ifcond = QAPISchemaIfCond(expr.get('if'))
info = expr.info
features = self._make_features(expr.get('features'), info)
- if isinstance(data, OrderedDict):
+ if isinstance(data, dict):
data = self._make_implicit_object_type(
name, info, ifcond,
'arg', self._make_members(data, info))
@@ -1450,7 +1472,7 @@ class QAPISchema:
ifcond = QAPISchemaIfCond(expr.get('if'))
info = expr.info
features = self._make_features(expr.get('features'), info)
- if isinstance(data, OrderedDict):
+ if isinstance(data, dict):
data = self._make_implicit_object_type(
name, info, ifcond,
'arg', self._make_members(data, info))
@@ -1485,6 +1507,12 @@ class QAPISchema:
for doc in self.docs:
doc.check()
+ features = list(self._feature_dict.values())
+ if len(features) > 64:
+ raise QAPISemError(
+ features[64].info,
+ "Maximum of 64 schema features is permitted")
+
def visit(self, visitor: QAPISchemaVisitor) -> None:
visitor.visit_begin(self)
for mod in self._module_dict.values():
diff --git a/scripts/qapi/source.py b/scripts/qapi/source.py
index 7b379fd..ffdc3f4 100644
--- a/scripts/qapi/source.py
+++ b/scripts/qapi/source.py
@@ -47,9 +47,9 @@ class QAPISourceInfo:
self.defn_meta = meta
self.defn_name = name
- def next_line(self: T) -> T:
+ def next_line(self: T, n: int = 1) -> T:
info = copy.copy(self)
- info.line += 1
+ info.line += n
return info
def loc(self) -> str:
diff --git a/scripts/qapi/types.py b/scripts/qapi/types.py
index 0dd0b00..2bf7533 100644
--- a/scripts/qapi/types.py
+++ b/scripts/qapi/types.py
@@ -16,11 +16,7 @@ This work is licensed under the terms of the GNU GPL, version 2.
from typing import List, Optional
from .common import c_enum_const, c_name, mcgen
-from .gen import (
- QAPISchemaModularCVisitor,
- gen_special_features,
- ifcontext,
-)
+from .gen import QAPISchemaModularCVisitor, gen_features, ifcontext
from .schema import (
QAPISchema,
QAPISchemaAlternatives,
@@ -61,17 +57,17 @@ const QEnumLookup %(c_name)s_lookup = {
index=index, name=memb.name)
ret += memb.ifcond.gen_endif()
- special_features = gen_special_features(memb.features)
- if special_features != '0':
+ features = gen_features(memb.features)
+ if features != '0':
feats += mcgen('''
- [%(index)s] = %(special_features)s,
+ [%(index)s] = %(features)s,
''',
- index=index, special_features=special_features)
+ index=index, features=features)
if feats:
ret += mcgen('''
},
- .special_features = (const unsigned char[%(max_index)s]) {
+ .features = (const uint64_t[%(max_index)s]) {
''',
max_index=max_index)
ret += feats
@@ -308,11 +304,14 @@ class QAPISchemaGenTypeVisitor(QAPISchemaModularCVisitor):
#include "qapi/dealloc-visitor.h"
#include "%(types)s.h"
#include "%(visit)s.h"
+#include "%(prefix)sqapi-features.h"
''',
- types=types, visit=visit))
+ types=types, visit=visit,
+ prefix=self._prefix))
self._genh.preamble_add(mcgen('''
#include "qapi/qapi-builtin-types.h"
-'''))
+''',
+ prefix=self._prefix))
def visit_begin(self, schema: QAPISchema) -> None:
# gen_object() is recursive, ensure it doesn't visit the empty type
diff --git a/scripts/qapi/visit.py b/scripts/qapi/visit.py
index 12f92e4..36e2409 100644
--- a/scripts/qapi/visit.py
+++ b/scripts/qapi/visit.py
@@ -21,11 +21,7 @@ from .common import (
indent,
mcgen,
)
-from .gen import (
- QAPISchemaModularCVisitor,
- gen_special_features,
- ifcontext,
-)
+from .gen import QAPISchemaModularCVisitor, gen_features, ifcontext
from .schema import (
QAPISchema,
QAPISchemaAlternatives,
@@ -103,15 +99,15 @@ bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp)
''',
name=memb.name, has=has)
indent.increase()
- special_features = gen_special_features(memb.features)
- if special_features != '0':
+ features = gen_features(memb.features)
+ if features != '0':
ret += mcgen('''
- if (visit_policy_reject(v, "%(name)s", %(special_features)s, errp)) {
+ if (visit_policy_reject(v, "%(name)s", %(features)s, errp)) {
return false;
}
- if (!visit_policy_skip(v, "%(name)s", %(special_features)s)) {
+ if (!visit_policy_skip(v, "%(name)s", %(features)s)) {
''',
- name=memb.name, special_features=special_features)
+ name=memb.name, features=features)
indent.increase()
ret += mcgen('''
if (!visit_type_%(c_type)s(v, "%(name)s", &obj->%(c_name)s, errp)) {
@@ -120,7 +116,7 @@ bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp)
''',
c_type=memb.type.c_name(), name=memb.name,
c_name=c_name(memb.name))
- if special_features != '0':
+ if features != '0':
indent.decrease()
ret += mcgen('''
}
@@ -360,8 +356,9 @@ class QAPISchemaGenVisitVisitor(QAPISchemaModularCVisitor):
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "%(visit)s.h"
+#include "%(prefix)sqapi-features.h"
''',
- visit=visit))
+ visit=visit, prefix=self._prefix))
self._genh.preamble_add(mcgen('''
#include "qapi/qapi-builtin-visit.h"
#include "%(types)s.h"
diff --git a/scripts/qcow2-to-stdout.py b/scripts/qcow2-to-stdout.py
new file mode 100755
index 0000000..06b7c13
--- /dev/null
+++ b/scripts/qcow2-to-stdout.py
@@ -0,0 +1,449 @@
+#!/usr/bin/env python3
+
+# This tool reads a disk image in any format and converts it to qcow2,
+# writing the result directly to stdout.
+#
+# Copyright (C) 2024 Igalia, S.L.
+#
+# Authors: Alberto Garcia <berto@igalia.com>
+# Madeeha Javed <javed@igalia.com>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# qcow2 files produced by this script are always arranged like this:
+#
+# - qcow2 header
+# - refcount table
+# - refcount blocks
+# - L1 table
+# - L2 tables
+# - Data clusters
+#
+# A note about variable names: in qcow2 there is one refcount table
+# and one (active) L1 table, although each can occupy several
+# clusters. For the sake of simplicity the code sometimes talks about
+# refcount tables and L1 tables when referring to those clusters.
+
+import argparse
+import errno
+import math
+import os
+import signal
+import struct
+import subprocess
+import sys
+import tempfile
+import time
+from contextlib import contextmanager
+
+QCOW2_DEFAULT_CLUSTER_SIZE = 65536
+QCOW2_DEFAULT_REFCOUNT_BITS = 16
+QCOW2_FEATURE_NAME_TABLE = 0x6803F857
+QCOW2_DATA_FILE_NAME_STRING = 0x44415441
+QCOW2_V3_HEADER_LENGTH = 112 # Header length in QEMU 9.0. Must be a multiple of 8
+QCOW2_INCOMPAT_DATA_FILE_BIT = 2
+QCOW2_AUTOCLEAR_DATA_FILE_RAW_BIT = 1
+QCOW_OFLAG_COPIED = 1 << 63
+QEMU_STORAGE_DAEMON = "qemu-storage-daemon"
+
+
+def bitmap_set(bitmap, idx):
+ bitmap[idx // 8] |= 1 << (idx % 8)
+
+
+def bitmap_is_set(bitmap, idx):
+ return (bitmap[idx // 8] & (1 << (idx % 8))) != 0
+
+
+def bitmap_iterator(bitmap, length):
+ for idx in range(length):
+ if bitmap_is_set(bitmap, idx):
+ yield idx
+
+
+def align_up(num, d):
+ return d * math.ceil(num / d)
+
+
+# Holes in the input file contain only zeroes so we can skip them and
+# save time. This function returns the indexes of the clusters that
+# are known to contain data. Those are the ones that we need to read.
+def clusters_with_data(fd, cluster_size):
+ data_to = 0
+ while True:
+ try:
+ data_from = os.lseek(fd, data_to, os.SEEK_DATA)
+ data_to = align_up(os.lseek(fd, data_from, os.SEEK_HOLE), cluster_size)
+ for idx in range(data_from // cluster_size, data_to // cluster_size):
+ yield idx
+ except OSError as err:
+ if err.errno == errno.ENXIO: # End of file reached
+ break
+ raise err
+
+
+# write_qcow2_content() expects a raw input file. If we have a different
+# format we can use qemu-storage-daemon to make it appear as raw.
+@contextmanager
+def get_input_as_raw_file(input_file, input_format):
+ if input_format == "raw":
+ yield input_file
+ return
+ try:
+ temp_dir = tempfile.mkdtemp()
+ pid_file = os.path.join(temp_dir, "pid")
+ raw_file = os.path.join(temp_dir, "raw")
+ open(raw_file, "wb").close()
+ ret = subprocess.run(
+ [
+ QEMU_STORAGE_DAEMON,
+ "--daemonize",
+ "--pidfile", pid_file,
+ "--blockdev", f"driver=file,node-name=file0,driver=file,filename={input_file},read-only=on",
+ "--blockdev", f"driver={input_format},node-name=disk0,file=file0,read-only=on",
+ "--export", f"type=fuse,id=export0,node-name=disk0,mountpoint={raw_file},writable=off",
+ ],
+ capture_output=True,
+ )
+ if ret.returncode != 0:
+ sys.exit("[Error] Could not start the qemu-storage-daemon:\n" +
+ ret.stderr.decode().rstrip('\n'))
+ yield raw_file
+ finally:
+ # Kill the storage daemon on exit
+ # and remove all temporary files
+ if os.path.exists(pid_file):
+ with open(pid_file, "r") as f:
+ pid = int(f.readline())
+ os.kill(pid, signal.SIGTERM)
+ while os.path.exists(pid_file):
+ time.sleep(0.1)
+ os.unlink(raw_file)
+ os.rmdir(temp_dir)
+
+
+def write_features(cluster, offset, data_file_name):
+ if data_file_name is not None:
+ encoded_name = data_file_name.encode("utf-8")
+ padded_name_len = align_up(len(encoded_name), 8)
+ struct.pack_into(f">II{padded_name_len}s", cluster, offset,
+ QCOW2_DATA_FILE_NAME_STRING,
+ len(encoded_name),
+ encoded_name)
+ offset += 8 + padded_name_len
+
+ qcow2_features = [
+ # Incompatible
+ (0, 0, "dirty bit"),
+ (0, 1, "corrupt bit"),
+ (0, 2, "external data file"),
+ (0, 3, "compression type"),
+ (0, 4, "extended L2 entries"),
+ # Compatible
+ (1, 0, "lazy refcounts"),
+ # Autoclear
+ (2, 0, "bitmaps"),
+ (2, 1, "raw external data"),
+ ]
+ struct.pack_into(">I", cluster, offset, QCOW2_FEATURE_NAME_TABLE)
+ struct.pack_into(">I", cluster, offset + 4, len(qcow2_features) * 48)
+ offset += 8
+ for feature_type, feature_bit, feature_name in qcow2_features:
+ struct.pack_into(">BB46s", cluster, offset,
+ feature_type, feature_bit, feature_name.encode("ascii"))
+ offset += 48
+
+
+def write_qcow2_content(input_file, cluster_size, refcount_bits, data_file_name, data_file_raw):
+ # Some basic values
+ l1_entries_per_table = cluster_size // 8
+ l2_entries_per_table = cluster_size // 8
+ refcounts_per_table = cluster_size // 8
+ refcounts_per_block = cluster_size * 8 // refcount_bits
+
+ # Virtual disk size, number of data clusters and L1 entries
+ disk_size = align_up(os.path.getsize(input_file), 512)
+ total_data_clusters = math.ceil(disk_size / cluster_size)
+ l1_entries = math.ceil(total_data_clusters / l2_entries_per_table)
+ allocated_l1_tables = math.ceil(l1_entries / l1_entries_per_table)
+
+ # Max L1 table size is 32 MB (QCOW_MAX_L1_SIZE in block/qcow2.h)
+ if (l1_entries * 8) > (32 * 1024 * 1024):
+ sys.exit("[Error] The image size is too large. Try using a larger cluster size.")
+
+ # Two bitmaps indicating which L1 and L2 entries are set
+ l1_bitmap = bytearray(allocated_l1_tables * l1_entries_per_table // 8)
+ l2_bitmap = bytearray(l1_entries * l2_entries_per_table // 8)
+ allocated_l2_tables = 0
+ allocated_data_clusters = 0
+
+ if data_file_raw:
+ # If data_file_raw is set then all clusters are allocated and
+ # we don't need to read the input file at all.
+ allocated_l2_tables = l1_entries
+ for idx in range(l1_entries):
+ bitmap_set(l1_bitmap, idx)
+ for idx in range(total_data_clusters):
+ bitmap_set(l2_bitmap, idx)
+ else:
+ # Open the input file for reading
+ fd = os.open(input_file, os.O_RDONLY)
+ zero_cluster = bytes(cluster_size)
+ # Read all the clusters that contain data
+ for idx in clusters_with_data(fd, cluster_size):
+ cluster = os.pread(fd, cluster_size, cluster_size * idx)
+ # If the last cluster is smaller than cluster_size pad it with zeroes
+ if len(cluster) < cluster_size:
+ cluster += bytes(cluster_size - len(cluster))
+ # If a cluster has non-zero data then it must be allocated
+ # in the output file and its L2 entry must be set
+ if cluster != zero_cluster:
+ bitmap_set(l2_bitmap, idx)
+ allocated_data_clusters += 1
+ # Allocated data clusters also need their corresponding L1 entry and L2 table
+ l1_idx = math.floor(idx / l2_entries_per_table)
+ if not bitmap_is_set(l1_bitmap, l1_idx):
+ bitmap_set(l1_bitmap, l1_idx)
+ allocated_l2_tables += 1
+
+ # Total amount of allocated clusters excluding the refcount blocks and table
+ total_allocated_clusters = 1 + allocated_l1_tables + allocated_l2_tables
+ if data_file_name is None:
+ total_allocated_clusters += allocated_data_clusters
+
+ # Clusters allocated for the refcount blocks and table
+ allocated_refcount_blocks = math.ceil(total_allocated_clusters / refcounts_per_block)
+ allocated_refcount_tables = math.ceil(allocated_refcount_blocks / refcounts_per_table)
+
+ # Now we have a problem because allocated_refcount_blocks and allocated_refcount_tables...
+ # (a) increase total_allocated_clusters, and
+ # (b) need to be recalculated when total_allocated_clusters is increased
+ # So we need to repeat the calculation as long as the numbers change
+ while True:
+ new_total_allocated_clusters = total_allocated_clusters + allocated_refcount_tables + allocated_refcount_blocks
+ new_allocated_refcount_blocks = math.ceil(new_total_allocated_clusters / refcounts_per_block)
+ if new_allocated_refcount_blocks > allocated_refcount_blocks:
+ allocated_refcount_blocks = new_allocated_refcount_blocks
+ allocated_refcount_tables = math.ceil(allocated_refcount_blocks / refcounts_per_table)
+ else:
+ break
+
+ # Now that we have the final numbers we can update total_allocated_clusters
+ total_allocated_clusters += allocated_refcount_tables + allocated_refcount_blocks
+
+ # At this point we have the exact number of clusters that the output
+ # image is going to use so we can calculate all the offsets.
+ current_cluster_idx = 1
+
+ refcount_table_offset = current_cluster_idx * cluster_size
+ current_cluster_idx += allocated_refcount_tables
+
+ refcount_block_offset = current_cluster_idx * cluster_size
+ current_cluster_idx += allocated_refcount_blocks
+
+ l1_table_offset = current_cluster_idx * cluster_size
+ current_cluster_idx += allocated_l1_tables
+
+ l2_table_offset = current_cluster_idx * cluster_size
+ current_cluster_idx += allocated_l2_tables
+
+ data_clusters_offset = current_cluster_idx * cluster_size
+
+ # Calculate some values used in the qcow2 header
+ if allocated_l1_tables == 0:
+ l1_table_offset = 0
+
+ hdr_cluster_bits = int(math.log2(cluster_size))
+ hdr_refcount_bits = int(math.log2(refcount_bits))
+ hdr_length = QCOW2_V3_HEADER_LENGTH
+ hdr_incompat_features = 0
+ if data_file_name is not None:
+ hdr_incompat_features |= 1 << QCOW2_INCOMPAT_DATA_FILE_BIT
+ hdr_autoclear_features = 0
+ if data_file_raw:
+ hdr_autoclear_features |= 1 << QCOW2_AUTOCLEAR_DATA_FILE_RAW_BIT
+
+ ### Write qcow2 header
+ cluster = bytearray(cluster_size)
+ struct.pack_into(">4sIQIIQIIQQIIQQQQII", cluster, 0,
+ b"QFI\xfb", # QCOW magic string
+ 3, # version
+ 0, # backing file offset
+ 0, # backing file sizes
+ hdr_cluster_bits,
+ disk_size,
+ 0, # encryption method
+ l1_entries,
+ l1_table_offset,
+ refcount_table_offset,
+ allocated_refcount_tables,
+ 0, # number of snapshots
+ 0, # snapshot table offset
+ hdr_incompat_features,
+ 0, # compatible features
+ hdr_autoclear_features,
+ hdr_refcount_bits,
+ hdr_length,
+ )
+
+ write_features(cluster, hdr_length, data_file_name)
+
+ sys.stdout.buffer.write(cluster)
+
+ ### Write refcount table
+ cur_offset = refcount_block_offset
+ remaining_refcount_table_entries = allocated_refcount_blocks # Each entry is a pointer to a refcount block
+ while remaining_refcount_table_entries > 0:
+ cluster = bytearray(cluster_size)
+ to_write = min(remaining_refcount_table_entries, refcounts_per_table)
+ remaining_refcount_table_entries -= to_write
+ for idx in range(to_write):
+ struct.pack_into(">Q", cluster, idx * 8, cur_offset)
+ cur_offset += cluster_size
+ sys.stdout.buffer.write(cluster)
+
+ ### Write refcount blocks
+ remaining_refcount_block_entries = total_allocated_clusters # One entry for each allocated cluster
+ for tbl in range(allocated_refcount_blocks):
+ cluster = bytearray(cluster_size)
+ to_write = min(remaining_refcount_block_entries, refcounts_per_block)
+ remaining_refcount_block_entries -= to_write
+ # All refcount entries contain the number 1. The only difference
+ # is their bit width, defined when the image is created.
+ for idx in range(to_write):
+ if refcount_bits == 64:
+ struct.pack_into(">Q", cluster, idx * 8, 1)
+ elif refcount_bits == 32:
+ struct.pack_into(">L", cluster, idx * 4, 1)
+ elif refcount_bits == 16:
+ struct.pack_into(">H", cluster, idx * 2, 1)
+ elif refcount_bits == 8:
+ cluster[idx] = 1
+ elif refcount_bits == 4:
+ cluster[idx // 2] |= 1 << ((idx % 2) * 4)
+ elif refcount_bits == 2:
+ cluster[idx // 4] |= 1 << ((idx % 4) * 2)
+ elif refcount_bits == 1:
+ cluster[idx // 8] |= 1 << (idx % 8)
+ sys.stdout.buffer.write(cluster)
+
+ ### Write L1 table
+ cur_offset = l2_table_offset
+ for tbl in range(allocated_l1_tables):
+ cluster = bytearray(cluster_size)
+ for idx in range(l1_entries_per_table):
+ l1_idx = tbl * l1_entries_per_table + idx
+ if bitmap_is_set(l1_bitmap, l1_idx):
+ struct.pack_into(">Q", cluster, idx * 8, cur_offset | QCOW_OFLAG_COPIED)
+ cur_offset += cluster_size
+ sys.stdout.buffer.write(cluster)
+
+ ### Write L2 tables
+ cur_offset = data_clusters_offset
+ for tbl in range(l1_entries):
+ # Skip the empty L2 tables. We can identify them because
+ # there is no L1 entry pointing at them.
+ if bitmap_is_set(l1_bitmap, tbl):
+ cluster = bytearray(cluster_size)
+ for idx in range(l2_entries_per_table):
+ l2_idx = tbl * l2_entries_per_table + idx
+ if bitmap_is_set(l2_bitmap, l2_idx):
+ if data_file_name is None:
+ struct.pack_into(">Q", cluster, idx * 8, cur_offset | QCOW_OFLAG_COPIED)
+ cur_offset += cluster_size
+ else:
+ struct.pack_into(">Q", cluster, idx * 8, (l2_idx * cluster_size) | QCOW_OFLAG_COPIED)
+ sys.stdout.buffer.write(cluster)
+
+ ### Write data clusters
+ if data_file_name is None:
+ for idx in bitmap_iterator(l2_bitmap, total_data_clusters):
+ cluster = os.pread(fd, cluster_size, cluster_size * idx)
+ # If the last cluster is smaller than cluster_size pad it with zeroes
+ if len(cluster) < cluster_size:
+ cluster += bytes(cluster_size - len(cluster))
+ sys.stdout.buffer.write(cluster)
+
+ if not data_file_raw:
+ os.close(fd)
+
+
+def main():
+ # Command-line arguments
+ parser = argparse.ArgumentParser(
+ description="This program converts a QEMU disk image to qcow2 "
+ "and writes it to the standard output"
+ )
+ parser.add_argument("input_file", help="name of the input file")
+ parser.add_argument(
+ "-f",
+ dest="input_format",
+ metavar="input_format",
+ help="format of the input file (default: raw)",
+ default="raw",
+ )
+ parser.add_argument(
+ "-c",
+ dest="cluster_size",
+ metavar="cluster_size",
+ help=f"qcow2 cluster size (default: {QCOW2_DEFAULT_CLUSTER_SIZE})",
+ default=QCOW2_DEFAULT_CLUSTER_SIZE,
+ type=int,
+ choices=[1 << x for x in range(9, 22)],
+ )
+ parser.add_argument(
+ "-r",
+ dest="refcount_bits",
+ metavar="refcount_bits",
+ help=f"width of the reference count entries (default: {QCOW2_DEFAULT_REFCOUNT_BITS})",
+ default=QCOW2_DEFAULT_REFCOUNT_BITS,
+ type=int,
+ choices=[1 << x for x in range(7)],
+ )
+ parser.add_argument(
+ "-d",
+ dest="data_file",
+ help="create an image with input_file as an external data file",
+ action="store_true",
+ )
+ parser.add_argument(
+ "-R",
+ dest="data_file_raw",
+ help="enable data_file_raw on the generated image (implies -d)",
+ action="store_true",
+ )
+ args = parser.parse_args()
+
+ if args.data_file_raw:
+ args.data_file = True
+
+ if not os.path.isfile(args.input_file):
+ sys.exit(f"[Error] {args.input_file} does not exist or is not a regular file.")
+
+ if args.data_file and args.input_format != "raw":
+ sys.exit("[Error] External data files can only be used with raw input images")
+
+ # A 512 byte header is too small for the data file name extension
+ if args.data_file and args.cluster_size == 512:
+ sys.exit("[Error] External data files require a larger cluster size")
+
+ if sys.stdout.isatty():
+ sys.exit("[Error] Refusing to write to a tty. Try redirecting stdout.")
+
+ if args.data_file:
+ data_file_name = args.input_file
+ else:
+ data_file_name = None
+
+ with get_input_as_raw_file(args.input_file, args.input_format) as raw_file:
+ write_qcow2_content(
+ raw_file,
+ args.cluster_size,
+ args.refcount_bits,
+ data_file_name,
+ args.data_file_raw,
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh
index 6ef9f11..5fd462b 100755
--- a/scripts/qemu-binfmt-conf.sh
+++ b/scripts/qemu-binfmt-conf.sh
@@ -144,35 +144,35 @@ loongarch64_magic='\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x
loongarch64_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
loongarch64_family=loongarch
-qemu_get_family() {
- cpu=${HOST_ARCH:-$(uname -m)}
+# Converts the name of a host CPU architecture to the corresponding QEMU
+# target.
+#
+# FIXME: This can probably be simplified a lot by dropping most entries.
+# Remember that the script is only used on Linux, so we only need to
+# handle the strings Linux uses to report the host CPU architecture.
+qemu_normalize() {
+ cpu="$1"
case "$cpu" in
- amd64|i386|i486|i586|i686|i86pc|BePC|x86_64)
+ i[3-6]86)
echo "i386"
;;
- mips*)
- echo "mips"
+ amd64)
+ echo "x86_64"
;;
- "Power Macintosh"|ppc64|powerpc|ppc)
+ powerpc)
echo "ppc"
;;
- ppc64el|ppc64le)
- echo "ppcle"
+ ppc64el)
+ echo "ppc64le"
;;
- arm|armel|armhf|arm64|armv[4-9]*l|aarch64)
+ armel|armhf|armv[4-9]*l)
echo "arm"
;;
- armeb|armv[4-9]*b|aarch64_be)
+ armv[4-9]*b)
echo "armeb"
;;
- sparc*)
- echo "sparc"
- ;;
- riscv*)
- echo "riscv"
- ;;
- loongarch*)
- echo "loongarch"
+ arm64)
+ echo "aarch64"
;;
*)
echo "$cpu"
@@ -205,6 +205,9 @@ Usage: qemu-binfmt-conf.sh [--qemu-path PATH][--debian][--systemd CPU]
--persistent: if yes, the interpreter is loaded when binfmt is
configured and remains in memory. All future uses
are cloned from the open file.
+ --ignore-family: if yes, it is assumed that the host CPU (e.g. riscv64)
+ can't natively run programs targeting a CPU that is
+ part of the same family (e.g. riscv32).
--preserve-argv0 preserve argv[0]
To import templates with update-binfmts, use :
@@ -309,7 +312,13 @@ EOF
qemu_set_binfmts() {
# probe cpu type
- host_family=$(qemu_get_family)
+ host_cpu=$(qemu_normalize ${HOST_ARCH:-$(uname -m)})
+ host_family=$(eval echo \$${host_cpu}_family)
+
+ if [ "$host_family" = "" ] ; then
+ echo "INTERNAL ERROR: unknown host cpu $host_cpu" 1>&2
+ exit 1
+ fi
# register the interpreter for each cpu except for the native one
@@ -318,20 +327,28 @@ qemu_set_binfmts() {
mask=$(eval echo \$${cpu}_mask)
family=$(eval echo \$${cpu}_family)
+ target="$cpu"
+ if [ "$cpu" = "i486" ] ; then
+ target="i386"
+ fi
+
+ qemu="$QEMU_PATH/qemu-$target$QEMU_SUFFIX"
+
if [ "$magic" = "" ] || [ "$mask" = "" ] || [ "$family" = "" ] ; then
echo "INTERNAL ERROR: unknown cpu $cpu" 1>&2
continue
fi
- qemu="$QEMU_PATH/qemu-$cpu"
- if [ "$cpu" = "i486" ] ; then
- qemu="$QEMU_PATH/qemu-i386"
+ if [ "$host_family" = "$family" ] ; then
+ # When --ignore-family is used, we have to generate rules even
+ # for targets that are in the same family as the host CPU. The
+ # only exception is of course when the CPU types exactly match
+ if [ "$target" = "$host_cpu" ] || [ "$IGNORE_FAMILY" = "no" ] ; then
+ continue
+ fi
fi
- qemu="$qemu$QEMU_SUFFIX"
- if [ "$host_family" != "$family" ] ; then
- $BINFMT_SET
- fi
+ $BINFMT_SET
done
}
@@ -346,10 +363,11 @@ CREDENTIAL=no
PERSISTENT=no
PRESERVE_ARG0=no
QEMU_SUFFIX=""
+IGNORE_FAMILY=no
_longopts="debian,systemd:,qemu-path:,qemu-suffix:,exportdir:,help,credential:,\
-persistent:,preserve-argv0:"
-options=$(getopt -o ds:Q:S:e:hc:p:g:F: -l ${_longopts} -- "$@")
+persistent:,preserve-argv0:,ignore-family:"
+options=$(getopt -o ds:Q:S:e:hc:p:g:F:i: -l ${_longopts} -- "$@")
eval set -- "$options"
while true ; do
@@ -409,6 +427,10 @@ while true ; do
shift
PRESERVE_ARG0="$1"
;;
+ -i|--ignore-family)
+ shift
+ IGNORE_FAMILY="$1"
+ ;;
*)
break
;;
diff --git a/scripts/qemu-gdb.py b/scripts/qemu-gdb.py
index 4d2a9f6..cfae94a 100644
--- a/scripts/qemu-gdb.py
+++ b/scripts/qemu-gdb.py
@@ -45,3 +45,5 @@ coroutine.CoroutineBt()
# Default to silently passing through SIGUSR1, because QEMU sends it
# to itself a lot.
gdb.execute('handle SIGUSR1 pass noprint nostop')
+# Always print full stack for python errors, easier to debug and report issues
+gdb.execute('set python print-stack full')
diff --git a/scripts/qemu-guest-agent/fsfreeze-hook b/scripts/qemu-guest-agent/fsfreeze-hook
index 13aafd4..c1feb6f 100755
--- a/scripts/qemu-guest-agent/fsfreeze-hook
+++ b/scripts/qemu-guest-agent/fsfreeze-hook
@@ -19,15 +19,43 @@ is_ignored_file() {
return 1
}
+USE_SYSLOG=0
+# if log file is not writable, fallback to syslog
+[ ! -w "$LOGFILE" ] && USE_SYSLOG=1
+# try to update log file and fallback to syslog if it fails
+touch "$LOGFILE" &>/dev/null || USE_SYSLOG=1
+
+# Ensure the log file is writable, fallback to syslog if not
+log_message() {
+ local message="$1"
+ if [ "$USE_SYSLOG" -eq 0 ]; then
+ printf "%s: %s\n" "$(date)" "$message" >>"$LOGFILE"
+ else
+ logger -t qemu-ga-freeze-hook "$message"
+ fi
+}
+
# Iterate executables in directory "fsfreeze-hook.d" with the specified args
[ ! -d "$FSFREEZE_D" ] && exit 0
+
for file in "$FSFREEZE_D"/* ; do
is_ignored_file "$file" && continue
[ -x "$file" ] || continue
- printf "$(date): execute $file $@\n" >>$LOGFILE
- "$file" "$@" >>$LOGFILE 2>&1
- STATUS=$?
- printf "$(date): $file finished with status=$STATUS\n" >>$LOGFILE
+
+ log_message "Executing $file $@"
+ if [ "$USE_SYSLOG" -eq 0 ]; then
+ "$file" "$@" >>"$LOGFILE" 2>&1
+ STATUS=$?
+ else
+ "$file" "$@" 2>&1 | logger -t qemu-ga-freeze-hook
+ STATUS=${PIPESTATUS[0]}
+ fi
+
+ if [ $STATUS -ne 0 ]; then
+ log_message "Error: $file finished with status=$STATUS"
+ else
+ log_message "$file finished successfully"
+ fi
done
exit 0
diff --git a/scripts/qemu-plugin-symbols.py b/scripts/qemu-plugin-symbols.py
new file mode 100755
index 0000000..e285ebb
--- /dev/null
+++ b/scripts/qemu-plugin-symbols.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Extract QEMU Plugin API symbols from a header file
+#
+# Copyright 2024 Linaro Ltd
+#
+# Author: Pierrick Bouvier <pierrick.bouvier@linaro.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import argparse
+import re
+
+def extract_symbols(plugin_header):
+ with open(plugin_header) as file:
+ content = file.read()
+ # Remove QEMU_PLUGIN_API macro definition.
+ content = content.replace('#define QEMU_PLUGIN_API', '')
+ expected = content.count('QEMU_PLUGIN_API')
+ # Find last word between QEMU_PLUGIN_API and (, matching on several lines.
+ # We use *? non-greedy quantifier.
+ syms = re.findall(r'QEMU_PLUGIN_API.*?(\w+)\s*\(', content, re.DOTALL)
+ syms.sort()
+ # Ensure we found as many symbols as API markers.
+ assert len(syms) == expected
+ return syms
+
+def main() -> None:
+ parser = argparse.ArgumentParser(description='Extract QEMU plugin symbols')
+ parser.add_argument('plugin_header', help='Path to QEMU plugin header.')
+ args = parser.parse_args()
+
+ syms = extract_symbols(args.plugin_header)
+
+ print('{')
+ for s in syms:
+ print(" {};".format(s))
+ print('};')
+
+if __name__ == '__main__':
+ main()
diff --git a/scripts/qemu-trace-stap b/scripts/qemu-trace-stap
index eb6e951..e983460 100755
--- a/scripts/qemu-trace-stap
+++ b/scripts/qemu-trace-stap
@@ -56,6 +56,7 @@ def tapset_dir(binary):
def cmd_run(args):
+ stap = which("stap")
prefix = probe_prefix(args.binary)
tapsets = tapset_dir(args.binary)
@@ -76,7 +77,7 @@ def cmd_run(args):
# We request an 8MB buffer, since the stap default 1MB buffer
# can be easily overflowed by frequently firing QEMU traces
- stapargs = ["stap", "-s", "8", "-I", tapsets ]
+ stapargs = [stap, "-s", "8", "-I", tapsets ]
if args.pid is not None:
stapargs.extend(["-x", args.pid])
stapargs.extend(["-e", script])
@@ -84,6 +85,7 @@ def cmd_run(args):
def cmd_list(args):
+ stap = which("stap")
tapsets = tapset_dir(args.binary)
if args.verbose:
@@ -96,7 +98,7 @@ def cmd_list(args):
if verbose:
print("Listing probes with name '%s'" % script)
- proc = subprocess.Popen(["stap", "-I", tapsets, "-l", script],
+ proc = subprocess.Popen([stap, "-I", tapsets, "-l", script],
stdout=subprocess.PIPE,
universal_newlines=True)
out, err = proc.communicate()
diff --git a/scripts/qemugdb/coroutine.py b/scripts/qemugdb/coroutine.py
index 7db46d4..e98fc48 100644
--- a/scripts/qemugdb/coroutine.py
+++ b/scripts/qemugdb/coroutine.py
@@ -13,28 +13,9 @@ import gdb
VOID_PTR = gdb.lookup_type('void').pointer()
-def get_fs_base():
- '''Fetch %fs base value using arch_prctl(ARCH_GET_FS). This is
- pthread_self().'''
- # %rsp - 120 is scratch space according to the SystemV ABI
- old = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)')
- gdb.execute('call (int)arch_prctl(0x1003, $rsp - 120)', False, True)
- fs_base = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)')
- gdb.execute('set *(uint64_t*)($rsp - 120) = %s' % old, False, True)
- return fs_base
-
def pthread_self():
- '''Fetch pthread_self() from the glibc start_thread function.'''
- f = gdb.newest_frame()
- while f.name() != 'start_thread':
- f = f.older()
- if f is None:
- return get_fs_base()
-
- try:
- return f.read_var("arg")
- except ValueError:
- return get_fs_base()
+ '''Fetch the base address of TLS.'''
+ return gdb.parse_and_eval("$fs_base")
def get_glibc_pointer_guard():
'''Fetch glibc pointer guard value'''
@@ -65,9 +46,60 @@ def get_jmpbuf_regs(jmpbuf):
'r15': jmpbuf[JB_R15],
'rip': glibc_ptr_demangle(jmpbuf[JB_PC], pointer_guard) }
-def bt_jmpbuf(jmpbuf):
- '''Backtrace a jmpbuf'''
- regs = get_jmpbuf_regs(jmpbuf)
+def symbol_lookup(addr):
+ # Example: "__clone3 + 44 in section .text of /lib64/libc.so.6"
+ result = gdb.execute(f"info symbol {hex(addr)}", to_string=True).strip()
+ try:
+ if "+" in result:
+ (func, result) = result.split(" + ")
+ (offset, result) = result.split(" in ")
+ else:
+ offset = "0"
+ (func, result) = result.split(" in ")
+ func_str = f"{func}<+{offset}> ()"
+ except:
+ return f"??? ({result})"
+
+ # Example: Line 321 of "../util/coroutine-ucontext.c" starts at address
+ # 0x55cf3894d993 <qemu_coroutine_switch+99> and ends at 0x55cf3894d9ab
+ # <qemu_coroutine_switch+123>.
+ result = gdb.execute(f"info line *{hex(addr)}", to_string=True).strip()
+ if not result.startswith("Line "):
+ return func_str
+ result = result[5:]
+
+ try:
+ result = result.split(" starts ")[0]
+ (line, path) = result.split(" of ")
+ path = path.replace("\"", "")
+ except:
+ return func_str
+
+ return f"{func_str} at {path}:{line}"
+
+def dump_backtrace(regs):
+ '''
+ Backtrace dump with raw registers, mimic GDB command 'bt'.
+ '''
+ # Here only rbp and rip that matter..
+ rbp = regs['rbp']
+ rip = regs['rip']
+ i = 0
+
+ while rbp:
+ # For all return addresses on stack, we want to look up symbol/line
+ # on the CALL command, because the return address is the next
+ # instruction instead of the CALL. Here -1 would work for any
+ # sized CALL instruction.
+ print(f"#{i} {hex(rip)} in {symbol_lookup(rip if i == 0 else rip-1)}")
+ rip = gdb.parse_and_eval(f"*(uint64_t *)(uint64_t)({hex(rbp)} + 8)")
+ rbp = gdb.parse_and_eval(f"*(uint64_t *)(uint64_t)({hex(rbp)})")
+ i += 1
+
+def dump_backtrace_live(regs):
+ '''
+ Backtrace dump with gdb's 'bt' command, only usable in a live session.
+ '''
old = dict()
# remember current stack frame and select the topmost
@@ -88,6 +120,17 @@ def bt_jmpbuf(jmpbuf):
selected_frame.select()
+def bt_jmpbuf(jmpbuf):
+ '''Backtrace a jmpbuf'''
+ regs = get_jmpbuf_regs(jmpbuf)
+ try:
+ # This reuses gdb's "bt" command, which can be slightly prettier
+ # but only works with live sessions.
+ dump_backtrace_live(regs)
+ except:
+ # If above doesn't work, fallback to poor man's unwind
+ dump_backtrace(regs)
+
def co_cast(co):
return co.cast(gdb.lookup_type('CoroutineUContext').pointer())
@@ -120,10 +163,15 @@ class CoroutineBt(gdb.Command):
gdb.execute("bt")
- if gdb.parse_and_eval("qemu_in_coroutine()") == False:
- return
+ try:
+ # This only works with a live session
+ co_ptr = gdb.parse_and_eval("qemu_coroutine_self()")
+ except:
+ # Fallback to use hard-coded ucontext vars if it's coredump
+ co_ptr = gdb.parse_and_eval("co_tls_current")
- co_ptr = gdb.parse_and_eval("qemu_coroutine_self()")
+ if co_ptr == False:
+ return
while True:
co = co_cast(co_ptr)
diff --git a/scripts/qom-cast-macro-clean-cocci-gen.py b/scripts/qom-cast-macro-clean-cocci-gen.py
index 2fa8438..5aa51d0 100644
--- a/scripts/qom-cast-macro-clean-cocci-gen.py
+++ b/scripts/qom-cast-macro-clean-cocci-gen.py
@@ -13,8 +13,11 @@
# --in-place \
# --dir .
#
-# SPDX-FileContributor: Philippe Mathieu-DaudƩ <philmd@linaro.org>
-# SPDX-FileCopyrightText: 2023 Linaro Ltd.
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Authors:
+# Philippe Mathieu-DaudƩ
+#
# SPDX-License-Identifier: GPL-2.0-or-later
import re
diff --git a/scripts/rdma-migration-helper.sh b/scripts/rdma-migration-helper.sh
new file mode 100755
index 0000000..d784d15
--- /dev/null
+++ b/scripts/rdma-migration-helper.sh
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+# Copied from blktests
+get_ipv4_addr()
+{
+ ip -4 -o addr show dev "$1" |
+ sed -n 's/.*[[:blank:]]inet[[:blank:]]*\([^[:blank:]/]*\).*/\1/p' |
+ head -1 | tr -d '\n'
+}
+
+get_ipv6_addr() {
+ ipv6=$(ip -6 -o addr show dev "$1" |
+ sed -n 's/.*[[:blank:]]inet6[[:blank:]]*\([^[:blank:]/]*\).*/\1/p' |
+ head -1 | tr -d '\n')
+
+ [ $? -eq 0 ] || return
+
+ if [[ "$ipv6" =~ ^fe80: ]]; then
+ echo -n "[$ipv6%$1]"
+ else
+ echo -n "[$ipv6]"
+ fi
+}
+
+# existing rdma interfaces
+rdma_interfaces()
+{
+ rdma link show | sed -nE 's/^link .* netdev ([^ ]+).*$/\1 /p' |
+ grep -Ev '^(lo|tun|tap)'
+}
+
+# existing valid ipv4 interfaces
+ipv4_interfaces()
+{
+ ip -o addr show | awk '/inet / {print $2}' | grep -Ev '^(lo|tun|tap)'
+}
+
+ipv6_interfaces()
+{
+ ip -o addr show | awk '/inet6 / {print $2}' | grep -Ev '^(lo|tun|tap)'
+}
+
+rdma_rxe_detect()
+{
+ family=$1
+ for r in $(rdma_interfaces)
+ do
+ "$family"_interfaces | grep -qw $r && get_"$family"_addr $r && return
+ done
+
+ return 1
+}
+
+rdma_rxe_setup()
+{
+ family=$1
+ for i in $("$family"_interfaces)
+ do
+ if rdma_interfaces | grep -qw $i; then
+ echo "$family: Reuse the existing rdma/rxe ${i}_rxe" \
+ "for $i with $(get_"$family"_addr $i)"
+ return
+ fi
+
+ rdma link add "${i}_rxe" type rxe netdev "$i" && {
+ echo "$family: Setup new rdma/rxe ${i}_rxe" \
+ "for $i with $(get_"$family"_addr $i)"
+ return
+ }
+ done
+
+ echo "$family: Failed to setup any new rdma/rxe link" >&2
+ return 1
+}
+
+rdma_rxe_clean()
+{
+ modprobe -r rdma_rxe
+}
+
+IP_FAMILY=${IP_FAMILY:-ipv4}
+if [ "$IP_FAMILY" != "ipv6" ] && [ "$IP_FAMILY" != "ipv4" ]; then
+ echo "Unknown ip family '$IP_FAMILY', only ipv4 or ipv6 is supported." >&2
+ exit 1
+fi
+
+operation=${1:-detect}
+
+command -v rdma >/dev/null || {
+ echo "Command 'rdma' is not available, please install it first." >&2
+ exit 1
+}
+
+if [ "$operation" == "setup" ] || [ "$operation" == "clean" ]; then
+ [ "$UID" == 0 ] || {
+ echo "Root privilege is required to setup/clean a rdma/rxe link" >&2
+ exit 1
+ }
+ if [ "$operation" == "setup" ]; then
+ rdma_rxe_setup ipv4
+ rdma_rxe_setup ipv6
+ else
+ rdma_rxe_clean
+ fi
+elif [ "$operation" == "detect" ]; then
+ rdma_rxe_detect "$IP_FAMILY"
+else
+ echo "Usage: $0 [setup | detect | clean]"
+fi
diff --git a/scripts/replay-dump.py b/scripts/replay-dump.py
index d668193..4ce7ff5 100755
--- a/scripts/replay-dump.py
+++ b/scripts/replay-dump.py
@@ -20,6 +20,8 @@
import argparse
import struct
+import os
+import sys
from collections import namedtuple
from os import path
@@ -99,7 +101,7 @@ def call_decode(table, index, dumpfile):
print("Could not decode index: %d" % (index))
print("Entry is: %s" % (decoder))
print("Decode Table is:\n%s" % (table))
- return False
+ raise(Exception("unknown event"))
else:
return decoder.fn(decoder.eid, decoder.name, dumpfile)
@@ -120,7 +122,7 @@ def print_event(eid, name, string=None, event_count=None):
def decode_unimp(eid, name, _unused_dumpfile):
"Unimplemented decoder, will trigger exit"
print("%s not handled - will now stop" % (name))
- return False
+ raise(Exception("unhandled event"))
def decode_plain(eid, name, _unused_dumpfile):
"Plain events without additional data"
@@ -134,6 +136,30 @@ def swallow_async_qword(eid, name, dumpfile):
print(" %s(%d) @ %d" % (name, eid, step_id))
return True
+def swallow_bytes(eid, name, dumpfile, nr):
+ """Swallow nr bytes of data without looking at it"""
+ dumpfile.seek(nr, os.SEEK_CUR)
+
+total_insns = 0
+
+def decode_instruction(eid, name, dumpfile):
+ global total_insns
+ ins_diff = read_dword(dumpfile)
+ total_insns += ins_diff
+ print_event(eid, name, "+ %d -> %d" % (ins_diff, total_insns))
+ return True
+
+def decode_interrupt(eid, name, dumpfile):
+ print_event(eid, name)
+ return True
+
+def decode_exception(eid, name, dumpfile):
+ print_event(eid, name)
+ return True
+
+# v12 does away with the additional event byte and encodes it in the main type
+# Between v8 and v9, REPLAY_ASYNC_BH_ONESHOT was added, but we don't decode
+# those versions so leave it out.
async_decode_table = [ Decoder(0, "REPLAY_ASYNC_EVENT_BH", swallow_async_qword),
Decoder(1, "REPLAY_ASYNC_INPUT", decode_unimp),
Decoder(2, "REPLAY_ASYNC_INPUT_SYNC", decode_unimp),
@@ -142,8 +168,8 @@ async_decode_table = [ Decoder(0, "REPLAY_ASYNC_EVENT_BH", swallow_async_qword),
Decoder(5, "REPLAY_ASYNC_EVENT_NET", decode_unimp),
]
# See replay_read_events/replay_read_event
-def decode_async(eid, name, dumpfile):
- """Decode an ASYNC event"""
+def decode_async_old(eid, name, dumpfile):
+ """Decode an ASYNC event (pre-v8)"""
print_event(eid, name)
@@ -157,13 +183,37 @@ def decode_async(eid, name, dumpfile):
return call_decode(async_decode_table, async_event_kind, dumpfile)
-total_insns = 0
+def decode_async_bh(eid, name, dumpfile):
+ op_id = read_qword(dumpfile)
+ print_event(eid, name)
+ return True
-def decode_instruction(eid, name, dumpfile):
- global total_insns
- ins_diff = read_dword(dumpfile)
- total_insns += ins_diff
- print_event(eid, name, "+ %d -> %d" % (ins_diff, total_insns))
+def decode_async_bh_oneshot(eid, name, dumpfile):
+ op_id = read_qword(dumpfile)
+ print_event(eid, name)
+ return True
+
+def decode_async_char_read(eid, name, dumpfile):
+ char_id = read_byte(dumpfile)
+ size = read_dword(dumpfile)
+ print_event(eid, name, "device:%x chars:%s" % (char_id, dumpfile.read(size)))
+ return True
+
+def decode_async_block(eid, name, dumpfile):
+ op_id = read_qword(dumpfile)
+ print_event(eid, name)
+ return True
+
+def decode_async_net(eid, name, dumpfile):
+ net_id = read_byte(dumpfile)
+ flags = read_dword(dumpfile)
+ size = read_dword(dumpfile)
+ swallow_bytes(eid, name, dumpfile, size)
+ print_event(eid, name, "net:%x flags:%x bytes:%d" % (net_id, flags, size))
+ return True
+
+def decode_shutdown(eid, name, dumpfile):
+ print_event(eid, name)
return True
def decode_char_write(eid, name, dumpfile):
@@ -177,7 +227,22 @@ def decode_audio_out(eid, name, dumpfile):
print_event(eid, name, "%d" % (audio_data))
return True
-def decode_checkpoint(eid, name, dumpfile):
+def decode_random(eid, name, dumpfile):
+ ret = read_dword(dumpfile)
+ size = read_dword(dumpfile)
+ swallow_bytes(eid, name, dumpfile, size)
+ if (ret):
+ print_event(eid, name, "%d bytes (getrandom failed)" % (size))
+ else:
+ print_event(eid, name, "%d bytes" % (size))
+ return True
+
+def decode_clock(eid, name, dumpfile):
+ clock_data = read_qword(dumpfile)
+ print_event(eid, name, "0x%x" % (clock_data))
+ return True
+
+def __decode_checkpoint(eid, name, dumpfile, old):
"""Decode a checkpoint.
Checkpoints contain a series of async events with their own specific data.
@@ -189,38 +254,33 @@ def decode_checkpoint(eid, name, dumpfile):
# if the next event is EVENT_ASYNC there are a bunch of
# async events to read, otherwise we are done
- if next_event != 3:
- print_event(eid, name, "no additional data", event_number)
- else:
+ if (old and next_event == 3) or (not old and next_event >= 3 and next_event <= 9):
print_event(eid, name, "more data follows", event_number)
+ else:
+ print_event(eid, name, "no additional data", event_number)
replay_state.reuse_event(next_event)
return True
+def decode_checkpoint_old(eid, name, dumpfile):
+ return __decode_checkpoint(eid, name, dumpfile, False)
+
+def decode_checkpoint(eid, name, dumpfile):
+ return __decode_checkpoint(eid, name, dumpfile, True)
+
def decode_checkpoint_init(eid, name, dumpfile):
print_event(eid, name)
return True
-def decode_interrupt(eid, name, dumpfile):
+def decode_end(eid, name, dumpfile):
print_event(eid, name)
- return True
-
-def decode_clock(eid, name, dumpfile):
- clock_data = read_qword(dumpfile)
- print_event(eid, name, "0x%x" % (clock_data))
- return True
-
-def decode_random(eid, name, dumpfile):
- ret = read_dword(dumpfile)
- data = read_array(dumpfile)
- print_event(eid, "%d bytes of random data" % len(data))
- return True
+ return False
# pre-MTTCG merge
v5_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction),
Decoder(1, "EVENT_INTERRUPT", decode_interrupt),
Decoder(2, "EVENT_EXCEPTION", decode_plain),
- Decoder(3, "EVENT_ASYNC", decode_async),
+ Decoder(3, "EVENT_ASYNC", decode_async_old),
Decoder(4, "EVENT_SHUTDOWN", decode_unimp),
Decoder(5, "EVENT_CHAR_WRITE", decode_char_write),
Decoder(6, "EVENT_CHAR_READ_ALL", decode_unimp),
@@ -242,7 +302,7 @@ v5_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction),
v6_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction),
Decoder(1, "EVENT_INTERRUPT", decode_interrupt),
Decoder(2, "EVENT_EXCEPTION", decode_plain),
- Decoder(3, "EVENT_ASYNC", decode_async),
+ Decoder(3, "EVENT_ASYNC", decode_async_old),
Decoder(4, "EVENT_SHUTDOWN", decode_unimp),
Decoder(5, "EVENT_CHAR_WRITE", decode_char_write),
Decoder(6, "EVENT_CHAR_READ_ALL", decode_unimp),
@@ -266,7 +326,7 @@ v6_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction),
v7_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction),
Decoder(1, "EVENT_INTERRUPT", decode_interrupt),
Decoder(2, "EVENT_EXCEPTION", decode_unimp),
- Decoder(3, "EVENT_ASYNC", decode_async),
+ Decoder(3, "EVENT_ASYNC", decode_async_old),
Decoder(4, "EVENT_SHUTDOWN", decode_unimp),
Decoder(5, "EVENT_SHUTDOWN_HOST_ERR", decode_unimp),
Decoder(6, "EVENT_SHUTDOWN_HOST_QMP", decode_unimp),
@@ -296,32 +356,31 @@ v7_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction),
v12_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction),
Decoder(1, "EVENT_INTERRUPT", decode_interrupt),
- Decoder(2, "EVENT_EXCEPTION", decode_plain),
- Decoder(3, "EVENT_ASYNC", decode_async),
- Decoder(4, "EVENT_ASYNC", decode_async),
- Decoder(5, "EVENT_ASYNC", decode_async),
- Decoder(6, "EVENT_ASYNC", decode_async),
- Decoder(6, "EVENT_ASYNC", decode_async),
- Decoder(8, "EVENT_ASYNC", decode_async),
- Decoder(9, "EVENT_ASYNC", decode_async),
- Decoder(10, "EVENT_ASYNC", decode_async),
- Decoder(11, "EVENT_SHUTDOWN", decode_unimp),
- Decoder(12, "EVENT_SHUTDOWN_HOST_ERR", decode_unimp),
- Decoder(13, "EVENT_SHUTDOWN_HOST_QMP_QUIT", decode_unimp),
- Decoder(14, "EVENT_SHUTDOWN_HOST_QMP_RESET", decode_unimp),
- Decoder(14, "EVENT_SHUTDOWN_HOST_SIGNAL", decode_unimp),
- Decoder(15, "EVENT_SHUTDOWN_HOST_UI", decode_unimp),
- Decoder(16, "EVENT_SHUTDOWN_GUEST_SHUTDOWN", decode_unimp),
- Decoder(17, "EVENT_SHUTDOWN_GUEST_RESET", decode_unimp),
- Decoder(18, "EVENT_SHUTDOWN_GUEST_PANIC", decode_unimp),
- Decoder(19, "EVENT_SHUTDOWN_GUEST_SUBSYSTEM_RESET", decode_unimp),
- Decoder(20, "EVENT_SHUTDOWN_GUEST_SNAPSHOT_LOAD", decode_unimp),
- Decoder(21, "EVENT_SHUTDOWN___MAX", decode_unimp),
+ Decoder(2, "EVENT_EXCEPTION", decode_exception),
+ Decoder(3, "EVENT_ASYNC_BH", decode_async_bh),
+ Decoder(4, "EVENT_ASYNC_BH_ONESHOT", decode_async_bh_oneshot),
+ Decoder(5, "EVENT_ASYNC_INPUT", decode_unimp),
+ Decoder(6, "EVENT_ASYNC_INPUT_SYNC", decode_unimp),
+ Decoder(7, "EVENT_ASYNC_CHAR_READ", decode_async_char_read),
+ Decoder(8, "EVENT_ASYNC_BLOCK", decode_async_block),
+ Decoder(9, "EVENT_ASYNC_NET", decode_async_net),
+ Decoder(10, "EVENT_SHUTDOWN", decode_shutdown),
+ Decoder(11, "EVENT_SHUTDOWN_HOST_ERR", decode_shutdown),
+ Decoder(12, "EVENT_SHUTDOWN_HOST_QMP_QUIT", decode_shutdown),
+ Decoder(13, "EVENT_SHUTDOWN_HOST_QMP_RESET", decode_shutdown),
+ Decoder(14, "EVENT_SHUTDOWN_HOST_SIGNAL", decode_shutdown),
+ Decoder(15, "EVENT_SHUTDOWN_HOST_UI", decode_shutdown),
+ Decoder(16, "EVENT_SHUTDOWN_GUEST_SHUTDOWN", decode_shutdown),
+ Decoder(17, "EVENT_SHUTDOWN_GUEST_RESET", decode_shutdown),
+ Decoder(18, "EVENT_SHUTDOWN_GUEST_PANIC", decode_shutdown),
+ Decoder(19, "EVENT_SHUTDOWN_SUBSYS_RESET", decode_shutdown),
+ Decoder(20, "EVENT_SHUTDOWN_SNAPSHOT_LOAD", decode_shutdown),
+ Decoder(21, "EVENT_SHUTDOWN___MAX", decode_shutdown),
Decoder(22, "EVENT_CHAR_WRITE", decode_char_write),
Decoder(23, "EVENT_CHAR_READ_ALL", decode_unimp),
Decoder(24, "EVENT_CHAR_READ_ALL_ERROR", decode_unimp),
- Decoder(25, "EVENT_AUDIO_IN", decode_unimp),
- Decoder(26, "EVENT_AUDIO_OUT", decode_audio_out),
+ Decoder(25, "EVENT_AUDIO_OUT", decode_audio_out),
+ Decoder(26, "EVENT_AUDIO_IN", decode_unimp),
Decoder(27, "EVENT_RANDOM", decode_random),
Decoder(28, "EVENT_CLOCK_HOST", decode_clock),
Decoder(29, "EVENT_CLOCK_VIRTUAL_RT", decode_clock),
@@ -334,6 +393,7 @@ v12_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction),
Decoder(36, "EVENT_CP_CLOCK_VIRTUAL_RT", decode_checkpoint),
Decoder(37, "EVENT_CP_INIT", decode_checkpoint_init),
Decoder(38, "EVENT_CP_RESET", decode_checkpoint),
+ Decoder(39, "EVENT_END", decode_end),
]
def parse_arguments():
@@ -375,6 +435,7 @@ def decode_file(filename):
dumpfile)
except Exception as inst:
print(f"error {inst}")
+ sys.exit(1)
finally:
print(f"Reached {dumpfile.tell()} of {dumpsize} bytes")
diff --git a/scripts/rust/rust_root_crate.sh b/scripts/rust/rust_root_crate.sh
new file mode 100755
index 0000000..975bddf
--- /dev/null
+++ b/scripts/rust/rust_root_crate.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -eu
+
+cat <<EOF
+// @generated
+// This file is autogenerated by scripts/rust_root_crate.sh
+
+EOF
+
+for crate in $*; do
+ echo "extern crate $crate;"
+done
diff --git a/scripts/rust/rustc_args.py b/scripts/rust/rustc_args.py
new file mode 100644
index 0000000..63b0748
--- /dev/null
+++ b/scripts/rust/rustc_args.py
@@ -0,0 +1,232 @@
+#!/usr/bin/env python3
+
+"""Generate rustc arguments for meson rust builds.
+
+This program generates --cfg compile flags for the configuration headers passed
+as arguments.
+
+Copyright (c) 2024 Linaro Ltd.
+
+Authors:
+ Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import argparse
+from dataclasses import dataclass
+import logging
+from pathlib import Path
+from typing import Any, Iterable, List, Mapping, Optional, Set
+
+try:
+ import tomllib
+except ImportError:
+ import tomli as tomllib
+
+STRICT_LINTS = {"unknown_lints", "warnings"}
+
+
+class CargoTOML:
+ tomldata: Mapping[Any, Any]
+ workspace_data: Mapping[Any, Any]
+ check_cfg: Set[str]
+
+ def __init__(self, path: Optional[str], workspace: Optional[str]):
+ if path is not None:
+ with open(path, 'rb') as f:
+ self.tomldata = tomllib.load(f)
+ else:
+ self.tomldata = {"lints": {"workspace": True}}
+
+ if workspace is not None:
+ with open(workspace, 'rb') as f:
+ self.workspace_data = tomllib.load(f)
+ if "workspace" not in self.workspace_data:
+ self.workspace_data["workspace"] = {}
+
+ self.check_cfg = set(self.find_check_cfg())
+
+ def find_check_cfg(self) -> Iterable[str]:
+ toml_lints = self.lints
+ rust_lints = toml_lints.get("rust", {})
+ cfg_lint = rust_lints.get("unexpected_cfgs", {})
+ return cfg_lint.get("check-cfg", [])
+
+ @property
+ def lints(self) -> Mapping[Any, Any]:
+ return self.get_table("lints", True)
+
+ def get_table(self, key: str, can_be_workspace: bool = False) -> Mapping[Any, Any]:
+ table = self.tomldata.get(key, {})
+ if can_be_workspace and table.get("workspace", False) is True:
+ table = self.workspace_data["workspace"].get(key, {})
+
+ return table
+
+
+@dataclass
+class LintFlag:
+ flags: List[str]
+ priority: int
+
+
+def generate_lint_flags(cargo_toml: CargoTOML, strict_lints: bool) -> Iterable[str]:
+ """Converts Cargo.toml lints to rustc -A/-D/-F/-W flags."""
+
+ toml_lints = cargo_toml.lints
+
+ lint_list = []
+ for k, v in toml_lints.items():
+ prefix = "" if k == "rust" else k + "::"
+ for lint, data in v.items():
+ level = data if isinstance(data, str) else data["level"]
+ priority = 0 if isinstance(data, str) else data.get("priority", 0)
+ if level == "deny":
+ flag = "-D"
+ elif level == "allow":
+ flag = "-A"
+ elif level == "warn":
+ flag = "-W"
+ elif level == "forbid":
+ flag = "-F"
+ else:
+ raise Exception(f"invalid level {level} for {prefix}{lint}")
+
+ if not (strict_lints and lint in STRICT_LINTS):
+ lint_list.append(LintFlag(flags=[flag, prefix + lint], priority=priority))
+
+ if strict_lints:
+ for lint in STRICT_LINTS:
+ lint_list.append(LintFlag(flags=["-D", lint], priority=1000000))
+
+ lint_list.sort(key=lambda x: x.priority)
+ for lint in lint_list:
+ yield from lint.flags
+
+
+def generate_cfg_flags(header: str, cargo_toml: CargoTOML) -> Iterable[str]:
+ """Converts defines from config[..].h headers to rustc --cfg flags."""
+
+ with open(header, encoding="utf-8") as cfg:
+ config = [l.split()[1:] for l in cfg if l.startswith("#define")]
+
+ cfg_list = []
+ for cfg in config:
+ name = cfg[0]
+ if f'cfg({name})' not in cargo_toml.check_cfg:
+ continue
+ if len(cfg) >= 2 and cfg[1] != "1":
+ continue
+ cfg_list.append("--cfg")
+ cfg_list.append(name)
+ return cfg_list
+
+
+def main() -> None:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-v", "--verbose", action="store_true")
+ parser.add_argument(
+ "--config-headers",
+ metavar="CONFIG_HEADER",
+ action="append",
+ dest="config_headers",
+ help="paths to any configuration C headers (*.h files), if any",
+ required=False,
+ default=[],
+ )
+ parser.add_argument(
+ metavar="TOML_FILE",
+ action="store",
+ dest="cargo_toml",
+ help="path to Cargo.toml file",
+ nargs='?',
+ )
+ parser.add_argument(
+ "--workspace",
+ metavar="DIR",
+ action="store",
+ dest="workspace",
+ help="path to root of the workspace",
+ required=False,
+ default=None,
+ )
+ parser.add_argument(
+ "--features",
+ action="store_true",
+ dest="features",
+ help="generate --check-cfg arguments for features",
+ required=False,
+ default=None,
+ )
+ parser.add_argument(
+ "--lints",
+ action="store_true",
+ dest="lints",
+ help="generate arguments from [lints] table",
+ required=False,
+ default=None,
+ )
+ parser.add_argument(
+ "--rustc-version",
+ metavar="VERSION",
+ dest="rustc_version",
+ action="store",
+ help="version of rustc",
+ required=False,
+ default="1.0.0",
+ )
+ parser.add_argument(
+ "--strict-lints",
+ action="store_true",
+ dest="strict_lints",
+ help="apply stricter checks (for nightly Rust)",
+ default=False,
+ )
+ args = parser.parse_args()
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ logging.debug("args: %s", args)
+
+ rustc_version = tuple((int(x) for x in args.rustc_version.split('.')[0:2]))
+ if args.workspace:
+ workspace_cargo_toml = Path(args.workspace, "Cargo.toml").resolve()
+ cargo_toml = CargoTOML(args.cargo_toml, str(workspace_cargo_toml))
+ else:
+ cargo_toml = CargoTOML(args.cargo_toml, None)
+
+ if args.lints:
+ for tok in generate_lint_flags(cargo_toml, args.strict_lints):
+ print(tok)
+
+ if rustc_version >= (1, 80):
+ if args.lints:
+ print("--check-cfg")
+ print("cfg(test)")
+ for cfg in sorted(cargo_toml.check_cfg):
+ print("--check-cfg")
+ print(cfg)
+ if args.features:
+ for feature in cargo_toml.get_table("features"):
+ if feature != "default":
+ print("--check-cfg")
+ print(f'cfg(feature,values("{feature}"))')
+
+ for header in args.config_headers:
+ for tok in generate_cfg_flags(header, cargo_toml):
+ print(tok)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/symlink-install-tree.py b/scripts/symlink-install-tree.py
index 8ed97e3..b725638 100644
--- a/scripts/symlink-install-tree.py
+++ b/scripts/symlink-install-tree.py
@@ -4,6 +4,7 @@ from pathlib import PurePath
import errno
import json
import os
+import shlex
import subprocess
import sys
@@ -14,7 +15,7 @@ def destdir_join(d1: str, d2: str) -> str:
return str(PurePath(d1, *PurePath(d2).parts[1:]))
introspect = os.environ.get('MESONINTROSPECT')
-out = subprocess.run([*introspect.split(' '), '--installed'],
+out = subprocess.run([*shlex.split(introspect), '--installed'],
stdout=subprocess.PIPE, check=True).stdout
for source, dest in json.loads(out).items():
bundle_dest = destdir_join('qemu-bundle', dest)
diff --git a/scripts/tracetool/__init__.py b/scripts/tracetool/__init__.py
index bc03238..6dfcbf7 100644
--- a/scripts/tracetool/__init__.py
+++ b/scripts/tracetool/__init__.py
@@ -12,12 +12,14 @@ __maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
+import os
import re
import sys
import weakref
+from pathlib import PurePath
-import tracetool.format
import tracetool.backend
+import tracetool.format
def error_write(*lines):
@@ -36,7 +38,7 @@ out_fobj = sys.stdout
def out_open(filename):
global out_filename, out_fobj
- out_filename = filename
+ out_filename = posix_relpath(filename)
out_fobj = open(filename, 'wt')
def out(*lines, **kwargs):
@@ -308,7 +310,7 @@ class Event(object):
fmt = [fmt_trans, fmt]
args = Arguments.build(groups["args"])
- return Event(name, props, fmt, args, lineno, filename)
+ return Event(name, props, fmt, args, lineno, posix_relpath(filename))
def __repr__(self):
"""Evaluable string representation for this object."""
@@ -447,3 +449,10 @@ def generate(events, group, format, backends,
tracetool.backend.dtrace.PROBEPREFIX = probe_prefix
tracetool.format.generate(events, format, backend, group)
+
+def posix_relpath(path, start=None):
+ try:
+ path = os.path.relpath(path, start)
+ except ValueError:
+ pass
+ return PurePath(path).as_posix()
diff --git a/scripts/tracetool/backend/ftrace.py b/scripts/tracetool/backend/ftrace.py
index baed2ae..5fa30cc 100644
--- a/scripts/tracetool/backend/ftrace.py
+++ b/scripts/tracetool/backend/ftrace.py
@@ -12,8 +12,6 @@ __maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
-import os.path
-
from tracetool import out
@@ -47,7 +45,7 @@ def generate_h(event, group):
args=event.args,
event_id="TRACE_" + event.name.upper(),
event_lineno=event.lineno,
- event_filename=os.path.relpath(event.filename),
+ event_filename=event.filename,
fmt=event.fmt.rstrip("\n"),
argnames=argnames)
diff --git a/scripts/tracetool/backend/log.py b/scripts/tracetool/backend/log.py
index de27b7e..17ba1cd 100644
--- a/scripts/tracetool/backend/log.py
+++ b/scripts/tracetool/backend/log.py
@@ -12,8 +12,6 @@ __maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
-import os.path
-
from tracetool import out
@@ -55,7 +53,7 @@ def generate_h(event, group):
' }',
cond=cond,
event_lineno=event.lineno,
- event_filename=os.path.relpath(event.filename),
+ event_filename=event.filename,
name=event.name,
fmt=event.fmt.rstrip("\n"),
argnames=argnames)
diff --git a/scripts/tracetool/backend/simple.py b/scripts/tracetool/backend/simple.py
index a74d61f..2688d4b 100644
--- a/scripts/tracetool/backend/simple.py
+++ b/scripts/tracetool/backend/simple.py
@@ -36,8 +36,17 @@ def generate_h_begin(events, group):
def generate_h(event, group):
- out(' _simple_%(api)s(%(args)s);',
+ event_id = 'TRACE_' + event.name.upper()
+ if "vcpu" in event.properties:
+ # already checked on the generic format code
+ cond = "true"
+ else:
+ cond = "trace_event_get_state(%s)" % event_id
+ out(' if (%(cond)s) {',
+ ' _simple_%(api)s(%(args)s);',
+ ' }',
api=event.api(),
+ cond=cond,
args=", ".join(event.args.names()))
@@ -72,22 +81,10 @@ def generate_c(event, group):
if len(event.args) == 0:
sizestr = '0'
- event_id = 'TRACE_' + event.name.upper()
- if "vcpu" in event.properties:
- # already checked on the generic format code
- cond = "true"
- else:
- cond = "trace_event_get_state(%s)" % event_id
-
out('',
- ' if (!%(cond)s) {',
- ' return;',
- ' }',
- '',
' if (trace_record_start(&rec, %(event_obj)s.id, %(size_str)s)) {',
' return; /* Trace Buffer Full, Event Dropped ! */',
' }',
- cond=cond,
event_obj=event.api(event.QEMU_EVENT),
size_str=sizestr)
diff --git a/scripts/tracetool/backend/syslog.py b/scripts/tracetool/backend/syslog.py
index 012970f..5a3a00f 100644
--- a/scripts/tracetool/backend/syslog.py
+++ b/scripts/tracetool/backend/syslog.py
@@ -12,8 +12,6 @@ __maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
-import os.path
-
from tracetool import out
@@ -43,7 +41,7 @@ def generate_h(event, group):
' }',
cond=cond,
event_lineno=event.lineno,
- event_filename=os.path.relpath(event.filename),
+ event_filename=event.filename,
name=event.name,
fmt=event.fmt.rstrip("\n"),
argnames=argnames)
diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh
index c34ac64..b43b8ef 100755
--- a/scripts/update-linux-headers.sh
+++ b/scripts/update-linux-headers.sh
@@ -163,6 +163,7 @@ EOF
fi
if [ $arch = arm64 ]; then
cp "$hdrdir/include/asm/sve_context.h" "$output/linux-headers/asm-arm64/"
+ cp "$hdrdir/include/asm/unistd_64.h" "$output/linux-headers/asm-arm64/"
fi
if [ $arch = x86 ]; then
cp "$hdrdir/include/asm/unistd_32.h" "$output/linux-headers/asm-x86/"
@@ -176,7 +177,7 @@ EOF
# Remove everything except the macros from bootparam.h avoiding the
# unnecessary import of several video/ist/etc headers
- sed -e '/__ASSEMBLY__/,/__ASSEMBLY__/d' \
+ sed -e '/__ASSEMBLER__/,/__ASSEMBLER__/d' \
"$hdrdir/include/asm/bootparam.h" > "$hdrdir/bootparam.h"
cp_portable "$hdrdir/bootparam.h" \
"$output/include/standard-headers/asm-$arch"
@@ -185,6 +186,12 @@ EOF
fi
if [ $arch = riscv ]; then
cp "$hdrdir/include/asm/ptrace.h" "$output/linux-headers/asm-riscv/"
+ cp "$hdrdir/include/asm/unistd_32.h" "$output/linux-headers/asm-riscv/"
+ cp "$hdrdir/include/asm/unistd_64.h" "$output/linux-headers/asm-riscv/"
+ fi
+ if [ $arch = loongarch ]; then
+ cp "$hdrdir/include/asm/kvm_para.h" "$output/linux-headers/asm-loongarch/"
+ cp "$hdrdir/include/asm/unistd_64.h" "$output/linux-headers/asm-loongarch/"
fi
done
arch=
@@ -251,6 +258,7 @@ for i in "$hdrdir"/include/linux/*virtio*.h \
"$hdrdir/include/linux/kernel.h" \
"$hdrdir/include/linux/kvm_para.h" \
"$hdrdir/include/linux/vhost_types.h" \
+ "$hdrdir/include/linux/vmclock-abi.h" \
"$hdrdir/include/linux/sysinfo.h"; do
cp_portable "$i" "$output/include/standard-headers/linux"
done
diff --git a/scripts/update-syscalltbl.sh b/scripts/update-syscalltbl.sh
index 2d23e56..f0927c5 100755
--- a/scripts/update-syscalltbl.sh
+++ b/scripts/update-syscalltbl.sh
@@ -1,13 +1,18 @@
TBL_LIST="\
arch/alpha/kernel/syscalls/syscall.tbl,linux-user/alpha/syscall.tbl \
arch/arm/tools/syscall.tbl,linux-user/arm/syscall.tbl \
+scripts/syscall.tbl,linux-user/aarch64/syscall_64.tbl \
+scripts/syscall.tbl,linux-user/hexagon/syscall.tbl \
+scripts/syscall.tbl,linux-user/loongarch64/syscall.tbl \
arch/m68k/kernel/syscalls/syscall.tbl,linux-user/m68k/syscall.tbl \
arch/microblaze/kernel/syscalls/syscall.tbl,linux-user/microblaze/syscall.tbl \
arch/mips/kernel/syscalls/syscall_n32.tbl,linux-user/mips64/syscall_n32.tbl \
arch/mips/kernel/syscalls/syscall_n64.tbl,linux-user/mips64/syscall_n64.tbl \
arch/mips/kernel/syscalls/syscall_o32.tbl,linux-user/mips/syscall_o32.tbl \
+scripts/syscall.tbl,linux-user/openrisc/syscall.tbl \
arch/parisc/kernel/syscalls/syscall.tbl,linux-user/hppa/syscall.tbl \
arch/powerpc/kernel/syscalls/syscall.tbl,linux-user/ppc/syscall.tbl \
+scripts/syscall.tbl,linux-user/riscv/syscall.tbl \
arch/s390/kernel/syscalls/syscall.tbl,linux-user/s390x/syscall.tbl \
arch/sh/kernel/syscalls/syscall.tbl,linux-user/sh4/syscall.tbl \
arch/sparc/kernel/syscalls/syscall.tbl,linux-user/sparc64/syscall.tbl \
diff --git a/scripts/vmstate-static-checker.py b/scripts/vmstate-static-checker.py
index 9c0e6b8..2335e25 100755
--- a/scripts/vmstate-static-checker.py
+++ b/scripts/vmstate-static-checker.py
@@ -42,6 +42,7 @@ def check_fields_match(name, s_field, d_field):
# Some fields changed names between qemu versions. This list
# is used to allow such changes in each section / description.
changed_names = {
+ 'acpi-ghes': ['ghes_addr_le', 'hw_error_le'],
'apic': ['timer', 'timer_expiry'],
'e1000': ['dev', 'parent_obj'],
'ehci': ['dev', 'pcidev'],
@@ -90,6 +91,7 @@ def check_fields_match(name, s_field, d_field):
'mem_win_size', 'mig_mem_win_size',
'io_win_addr', 'mig_io_win_addr',
'io_win_size', 'mig_io_win_size'],
+ 'hpet': ['num_timers', 'num_timers_save'],
}
if not name in changed_names:
diff --git a/scsi/pr-manager-helper.c b/scsi/pr-manager-helper.c
index 3be52a9..6b86f01 100644
--- a/scsi/pr-manager-helper.c
+++ b/scsi/pr-manager-helper.c
@@ -300,7 +300,7 @@ static void pr_manager_helper_instance_init(Object *obj)
}
static void pr_manager_helper_class_init(ObjectClass *klass,
- void *class_data G_GNUC_UNUSED)
+ const void *class_data G_GNUC_UNUSED)
{
PRManagerClass *prmgr_klass = PR_MANAGER_CLASS(klass);
UserCreatableClass *uc_klass = USER_CREATABLE_CLASS(klass);
diff --git a/scsi/pr-manager.c b/scsi/pr-manager.c
index fb5fc29..40e1210 100644
--- a/scsi/pr-manager.c
+++ b/scsi/pr-manager.c
@@ -21,7 +21,7 @@
#include "qemu/module.h"
#include "qapi/qapi-commands-block.h"
-#define PR_MANAGER_PATH "/objects"
+#define PR_MANAGER_PATH "objects"
typedef struct PRManagerData {
PRManager *pr_mgr;
@@ -77,7 +77,7 @@ static const TypeInfo pr_manager_info = {
.name = TYPE_PR_MANAGER,
.class_size = sizeof(PRManagerClass),
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
@@ -135,7 +135,7 @@ PRManagerInfoList *qmp_query_pr_managers(Error **errp)
{
PRManagerInfoList *head = NULL;
PRManagerInfoList **prev = &head;
- Object *container = container_get(object_get_root(), PR_MANAGER_PATH);
+ Object *container = object_get_container(PR_MANAGER_PATH);
object_child_foreach(container, query_one_pr_manager, &prev);
return head;
diff --git a/scsi/qemu-pr-helper.c b/scsi/qemu-pr-helper.c
index c6c6347..b69dd98 100644
--- a/scsi/qemu-pr-helper.c
+++ b/scsi/qemu-pr-helper.c
@@ -47,7 +47,7 @@
#include "qemu/log.h"
#include "qemu/systemd.h"
#include "qapi/util.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qstring.h"
#include "io/channel-socket.h"
#include "trace/control.h"
#include "qemu-version.h"
diff --git a/scsi/utils.c b/scsi/utils.c
index 357b036..545956f 100644
--- a/scsi/utils.c
+++ b/scsi/utils.c
@@ -587,20 +587,27 @@ int scsi_sense_from_errno(int errno_value, SCSISense *sense)
return GOOD;
case EDOM:
return TASK_SET_FULL;
+#if ENODEV != ENOMEDIUM
+ case ENODEV:
+ /*
+ * Some of the BSDs have ENODEV and ENOMEDIUM as synonyms. For
+ * everyone else, give a more severe sense code for ENODEV.
+ */
+#endif
#ifdef CONFIG_LINUX
/* These errno mapping are specific to Linux. For more information:
* - scsi_check_sense and scsi_decide_disposition in drivers/scsi/scsi_error.c
* - scsi_result_to_blk_status in drivers/scsi/scsi_lib.c
* - blk_errors[] in block/blk-core.c
*/
+ case EREMOTEIO:
+ *sense = SENSE_CODE(TARGET_FAILURE);
+ return CHECK_CONDITION;
case EBADE:
return RESERVATION_CONFLICT;
case ENODATA:
*sense = SENSE_CODE(READ_ERROR);
return CHECK_CONDITION;
- case EREMOTEIO:
- *sense = SENSE_CODE(TARGET_FAILURE);
- return CHECK_CONDITION;
#endif
case ENOMEDIUM:
*sense = SENSE_CODE(NO_MEDIUM);
diff --git a/semihosting/Kconfig b/semihosting/Kconfig
index eaf3a20..fbe6ac8 100644
--- a/semihosting/Kconfig
+++ b/semihosting/Kconfig
@@ -1,6 +1,7 @@
config SEMIHOSTING
bool
+ depends on TCG
config ARM_COMPATIBLE_SEMIHOSTING
bool
diff --git a/semihosting/arm-compat-semi.c b/semihosting/arm-compat-semi.c
index d78c642..86e5260 100644
--- a/semihosting/arm-compat-semi.c
+++ b/semihosting/arm-compat-semi.c
@@ -166,6 +166,7 @@ static LayoutInfo common_semi_find_bases(CPUState *cs)
#endif
+#include "cpu.h"
#include "common-semi-target.h"
/*
diff --git a/semihosting/console.c b/semihosting/console.c
index 60102bb..c3683a1 100644
--- a/semihosting/console.c
+++ b/semihosting/console.c
@@ -18,14 +18,15 @@
#include "qemu/osdep.h"
#include "semihosting/semihost.h"
#include "semihosting/console.h"
+#include "exec/cpu-common.h"
#include "exec/gdbstub.h"
-#include "exec/exec-all.h"
#include "qemu/log.h"
#include "chardev/char.h"
#include "chardev/char-fe.h"
#include "qemu/main-loop.h"
#include "qapi/error.h"
#include "qemu/fifo8.h"
+#include "hw/core/cpu.h"
/* Access to this structure is protected by the BQL */
typedef struct SemihostingConsole {
diff --git a/semihosting/meson.build b/semihosting/meson.build
index 34933e5..f3d38dd 100644
--- a/semihosting/meson.build
+++ b/semihosting/meson.build
@@ -4,13 +4,17 @@ specific_ss.add(when: 'CONFIG_SEMIHOSTING', if_true: files(
))
specific_ss.add(when: ['CONFIG_SEMIHOSTING', 'CONFIG_SYSTEM_ONLY'], if_true: files(
- 'config.c',
- 'console.c',
'uaccess.c',
))
-common_ss.add(when: ['CONFIG_SEMIHOSTING', 'CONFIG_SYSTEM_ONLY'], if_false: files('stubs-all.c'))
-system_ss.add(when: ['CONFIG_SEMIHOSTING'], if_false: files('stubs-system.c'))
+common_ss.add(when: 'CONFIG_SEMIHOSTING', if_false: files('stubs-all.c'))
+user_ss.add(when: 'CONFIG_SEMIHOSTING', if_true: files('user.c'))
+system_ss.add(when: 'CONFIG_SEMIHOSTING', if_true: files(
+ 'config.c',
+ 'console.c',
+), if_false: files(
+ 'stubs-system.c',
+))
specific_ss.add(when: ['CONFIG_ARM_COMPATIBLE_SEMIHOSTING'],
if_true: files('arm-compat-semi.c'))
diff --git a/semihosting/stubs-all.c b/semihosting/stubs-all.c
index a2a1fc9..c001c84 100644
--- a/semihosting/stubs-all.c
+++ b/semihosting/stubs-all.c
@@ -11,6 +11,12 @@
#include "qemu/osdep.h"
#include "semihosting/semihost.h"
+/* Queries to config status default to off */
+bool semihosting_enabled(bool is_user)
+{
+ return false;
+}
+
SemihostingTarget semihosting_get_target(void)
{
return SEMIHOSTING_TARGET_AUTO;
diff --git a/semihosting/stubs-system.c b/semihosting/stubs-system.c
index f26cbb7..989789f 100644
--- a/semihosting/stubs-system.c
+++ b/semihosting/stubs-system.c
@@ -22,12 +22,6 @@ QemuOptsList qemu_semihosting_config_opts = {
},
};
-/* Queries to config status default to off */
-bool semihosting_enabled(bool is_user)
-{
- return false;
-}
-
/*
* All the rest are empty subs. We could g_assert_not_reached() but
* that adds extra weight to the final binary. Waste not want not.
diff --git a/semihosting/syscalls.c b/semihosting/syscalls.c
index c40348f..f6451d9 100644
--- a/semihosting/syscalls.c
+++ b/semihosting/syscalls.c
@@ -7,6 +7,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/log.h"
#include "cpu.h"
#include "gdbstub/syscalls.h"
#include "semihosting/guestfd.h"
@@ -287,6 +288,7 @@ static void host_open(CPUState *cs, gdb_syscall_complete_cb complete,
ret = open(p, host_flags, mode);
if (ret < 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to open %s\n", __func__, p);
complete(cs, -1, errno);
} else {
int guestfd = alloc_guestfd();
diff --git a/semihosting/uaccess.c b/semihosting/uaccess.c
index dc587d7..4554844 100644
--- a/semihosting/uaccess.c
+++ b/semihosting/uaccess.c
@@ -8,7 +8,10 @@
*/
#include "qemu/osdep.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-mmu-index.h"
+#include "accel/tcg/probe.h"
+#include "exec/target_page.h"
+#include "exec/tlb-flags.h"
#include "semihosting/uaccess.h"
void *uaccess_lock_user(CPUArchState *env, target_ulong addr,
diff --git a/semihosting/user.c b/semihosting/user.c
new file mode 100644
index 0000000..98c144c
--- /dev/null
+++ b/semihosting/user.c
@@ -0,0 +1,21 @@
+/*
+ * Semihosting for user emulation
+ *
+ * Copyright (c) 2019 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "semihosting/semihost.h"
+
+bool semihosting_enabled(bool is_user)
+{
+ assert(is_user);
+ return true;
+}
+
+SemihostingTarget semihosting_get_target(void)
+{
+ return SEMIHOSTING_TARGET_AUTO;
+}
diff --git a/stats/stats-hmp-cmds.c b/stats/stats-hmp-cmds.c
index 1f91bf8..b93b471 100644
--- a/stats/stats-hmp-cmds.c
+++ b/stats/stats-hmp-cmds.c
@@ -11,7 +11,7 @@
#include "monitor/monitor.h"
#include "qemu/cutils.h"
#include "hw/core/cpu.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/error.h"
static void print_stats_schema_value(Monitor *mon, StatsSchemaValue *value)
diff --git a/stats/stats-qmp-cmds.c b/stats/stats-qmp-cmds.c
index e214b96..884674e 100644
--- a/stats/stats-qmp-cmds.c
+++ b/stats/stats-qmp-cmds.c
@@ -6,7 +6,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/stats.h"
+#include "system/stats.h"
#include "qapi/qapi-commands-stats.h"
#include "qemu/queue.h"
#include "qapi/error.h"
diff --git a/storage-daemon/qapi/qapi-schema.json b/storage-daemon/qapi/qapi-schema.json
index f10c949..0427594 100644
--- a/storage-daemon/qapi/qapi-schema.json
+++ b/storage-daemon/qapi/qapi-schema.json
@@ -13,6 +13,30 @@
# the array type in the main schema, even if it is unused outside of the
# storage daemon.
+##
+# = Introduction
+#
+# This manual describes the commands and events supported by the QEMU
+# storage daemon QMP.
+#
+# For locating a particular item, please see the `qapi-qsd-index`.
+#
+# The following notation is used in examples:
+#
+# .. qmp-example::
+#
+# -> ... text sent by client (commands) ...
+# <- ... text sent by server (command responses and events) ...
+#
+# Example text is formatted for readability. However, in real
+# protocol usage, its commonly emitted as a single line.
+#
+# Please refer to the
+# :doc:`QEMU Machine Protocol Specification </interop/qmp-spec>`
+# for the general format of commands, responses, and events.
+##
+
+
{ 'include': '../../qapi/pragma.json' }
# Documentation generated with qapi-gen.py is in source order, with
diff --git a/storage-daemon/qemu-storage-daemon.c b/storage-daemon/qemu-storage-daemon.c
index 0e9354f..eb72561 100644
--- a/storage-daemon/qemu-storage-daemon.c
+++ b/storage-daemon/qemu-storage-daemon.c
@@ -38,8 +38,8 @@
#include "qapi/qapi-visit-block-core.h"
#include "qapi/qapi-visit-block-export.h"
#include "qapi/qapi-visit-control.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "qapi/qobject-input-visitor.h"
#include "qemu/help-texts.h"
@@ -58,7 +58,7 @@
#include "storage-daemon/qapi/qapi-commands.h"
#include "storage-daemon/qapi/qapi-init-commands.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "trace/control.h"
static const char *pid_file;
diff --git a/stubs/blk-commit-all.c b/stubs/blk-commit-all.c
index e156c57..76b0827 100644
--- a/stubs/blk-commit-all.c
+++ b/stubs/blk-commit-all.c
@@ -1,5 +1,5 @@
#include "qemu/osdep.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
int blk_commit_all(void)
{
diff --git a/stubs/change-state-handler.c b/stubs/change-state-handler.c
index d1ed46b..002d248 100644
--- a/stubs/change-state-handler.c
+++ b/stubs/change-state-handler.c
@@ -1,5 +1,5 @@
#include "qemu/osdep.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb,
void *opaque)
diff --git a/stubs/cpu-get-clock.c b/stubs/cpu-get-clock.c
index 9e92404..53b9c83 100644
--- a/stubs/cpu-get-clock.c
+++ b/stubs/cpu-get-clock.c
@@ -1,5 +1,5 @@
#include "qemu/osdep.h"
-#include "sysemu/cpu-timers.h"
+#include "system/cpu-timers.h"
#include "qemu/main-loop.h"
int64_t cpu_get_clock(void)
diff --git a/stubs/cpu-synchronize-state.c b/stubs/cpu-synchronize-state.c
index d9211da..2ed09ff 100644
--- a/stubs/cpu-synchronize-state.c
+++ b/stubs/cpu-synchronize-state.c
@@ -1,5 +1,5 @@
#include "qemu/osdep.h"
-#include "sysemu/hw_accel.h"
+#include "system/hw_accel.h"
void cpu_synchronize_state(CPUState *cpu)
{
diff --git a/stubs/cpus-virtual-clock.c b/stubs/cpus-virtual-clock.c
index af7c1a1..0b83a92 100644
--- a/stubs/cpus-virtual-clock.c
+++ b/stubs/cpus-virtual-clock.c
@@ -1,5 +1,5 @@
#include "qemu/osdep.h"
-#include "sysemu/cpu-timers.h"
+#include "system/cpu-timers.h"
#include "qemu/main-loop.h"
int64_t cpus_get_virtual_clock(void)
diff --git a/stubs/dump.c b/stubs/dump.c
index 1f28ec2..df7897b 100644
--- a/stubs/dump.c
+++ b/stubs/dump.c
@@ -12,7 +12,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/dump-arch.h"
+#include "system/dump-arch.h"
int cpu_get_dump_info(ArchDumpInfo *info,
const struct GuestPhysBlockList *guest_phys_blocks)
diff --git a/stubs/get-vm-name.c b/stubs/get-vm-name.c
index 0906303..4cfac48 100644
--- a/stubs/get-vm-name.c
+++ b/stubs/get-vm-name.c
@@ -1,5 +1,5 @@
#include "qemu/osdep.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
const char *qemu_get_vm_name(void)
{
diff --git a/stubs/icount.c b/stubs/icount.c
index 9f9a59f..ceb73b4 100644
--- a/stubs/icount.c
+++ b/stubs/icount.c
@@ -1,6 +1,6 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "sysemu/cpu-timers.h"
+#include "exec/icount.h"
/* icount - Instruction Counter API */
diff --git a/stubs/iothread-lock.c b/stubs/iothread-lock.c
index d7890e5..6050c08 100644
--- a/stubs/iothread-lock.c
+++ b/stubs/iothread-lock.c
@@ -1,9 +1,17 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
+static bool bql_is_locked = false;
+static uint32_t bql_unlock_blocked;
+
bool bql_locked(void)
{
- return false;
+ return bql_is_locked;
+}
+
+void rust_bql_mock_lock(void)
+{
+ bql_is_locked = true;
}
void bql_lock_impl(const char *file, int line)
@@ -12,4 +20,17 @@ void bql_lock_impl(const char *file, int line)
void bql_unlock(void)
{
+ assert(!bql_unlock_blocked);
+}
+
+void bql_block_unlock(bool increase)
+{
+ uint32_t new_value;
+
+ assert(bql_locked());
+
+ /* check for overflow! */
+ new_value = bql_unlock_blocked + increase - !increase;
+ assert((new_value > bql_unlock_blocked) == increase);
+ bql_unlock_blocked = new_value;
}
diff --git a/stubs/meson.build b/stubs/meson.build
index 772a3e8..cef046e 100644
--- a/stubs/meson.build
+++ b/stubs/meson.build
@@ -55,7 +55,14 @@ endif
if have_user
# Symbols that are used by hw/core.
stub_ss.add(files('cpu-synchronize-state.c'))
- stub_ss.add(files('qdev.c'))
+
+ # Stubs for QAPI events. Those can always be included in the build, but
+ # they are not built at all for --disable-system builds.
+ if not have_system
+ stub_ss.add(files('qdev.c'))
+ endif
+
+ stub_ss.add(files('monitor-internal.c'))
endif
if have_system
@@ -70,6 +77,14 @@ if have_system
stub_ss.add(files('target-monitor-defs.c'))
stub_ss.add(files('win32-kbd-hook.c'))
stub_ss.add(files('xen-hw-stub.c'))
+ stub_ss.add(files('monitor-arm-gic.c'))
+ stub_ss.add(files('monitor-i386-rtc.c'))
+ stub_ss.add(files('monitor-i386-sev.c'))
+ stub_ss.add(files('monitor-i386-sgx.c'))
+ stub_ss.add(files('monitor-i386-xen.c'))
+ stub_ss.add(files('monitor-cpu.c'))
+ stub_ss.add(files('monitor-cpu-s390x.c'))
+ stub_ss.add(files('monitor-cpu-s390x-kvm.c'))
endif
if have_system or have_user
diff --git a/stubs/monitor-arm-gic.c b/stubs/monitor-arm-gic.c
new file mode 100644
index 0000000..b342924
--- /dev/null
+++ b/stubs/monitor-arm-gic.c
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-misc-arm.h"
+
+
+GICCapabilityList *qmp_query_gic_capabilities(Error **errp)
+{
+ error_setg(errp, "GIC hardware is not available on this target");
+ return NULL;
+}
diff --git a/stubs/monitor-cpu-s390x-kvm.c b/stubs/monitor-cpu-s390x-kvm.c
new file mode 100644
index 0000000..8683dd2
--- /dev/null
+++ b/stubs/monitor-cpu-s390x-kvm.c
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-machine-s390x.h"
+
+void qmp_set_cpu_topology(uint16_t core,
+ bool has_socket, uint16_t socket,
+ bool has_book, uint16_t book,
+ bool has_drawer, uint16_t drawer,
+ bool has_entitlement, S390CpuEntitlement entitlement,
+ bool has_dedicated, bool dedicated,
+ Error **errp)
+{
+ error_setg(errp, "CPU topology change is not supported on this target");
+}
+
+CpuPolarizationInfo *qmp_query_s390x_cpu_polarization(Error **errp)
+{
+ error_setg(errp, "CPU polarization is not supported on this target");
+ return NULL;
+}
diff --git a/stubs/monitor-cpu-s390x.c b/stubs/monitor-cpu-s390x.c
new file mode 100644
index 0000000..71e7944
--- /dev/null
+++ b/stubs/monitor-cpu-s390x.c
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-machine.h"
+
+CpuModelCompareInfo *
+qmp_query_cpu_model_comparison(CpuModelInfo *infoa,
+ CpuModelInfo *infob,
+ Error **errp)
+{
+ error_setg(errp, "CPU model comparison is not supported on this target");
+ return NULL;
+}
+
+CpuModelBaselineInfo *
+qmp_query_cpu_model_baseline(CpuModelInfo *infoa,
+ CpuModelInfo *infob,
+ Error **errp)
+{
+ error_setg(errp, "CPU model baseline is not supported on this target");
+ return NULL;
+}
diff --git a/stubs/monitor-cpu.c b/stubs/monitor-cpu.c
new file mode 100644
index 0000000..a8c7ee8
--- /dev/null
+++ b/stubs/monitor-cpu.c
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-machine.h"
+
+CpuModelExpansionInfo *
+qmp_query_cpu_model_expansion(CpuModelExpansionType type,
+ CpuModelInfo *model,
+ Error **errp)
+{
+ error_setg(errp, "CPU model expansion is not supported on this target");
+ return NULL;
+}
+
+CpuDefinitionInfoList *
+qmp_query_cpu_definitions(Error **errp)
+{
+ error_setg(errp, "CPU model definitions are not supported on this target");
+ return NULL;
+}
diff --git a/stubs/monitor-i386-rtc.c b/stubs/monitor-i386-rtc.c
new file mode 100644
index 0000000..8420d7c
--- /dev/null
+++ b/stubs/monitor-i386-rtc.c
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-misc-i386.h"
+
+void qmp_rtc_reset_reinjection(Error **errp)
+{
+ error_setg(errp,
+ "RTC interrupt reinjection backlog reset is not available for"
+ "this machine");
+}
diff --git a/stubs/monitor-i386-sev.c b/stubs/monitor-i386-sev.c
new file mode 100644
index 0000000..d4f0241
--- /dev/null
+++ b/stubs/monitor-i386-sev.c
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-misc-i386.h"
+
+SevInfo *qmp_query_sev(Error **errp)
+{
+ error_setg(errp, "SEV is not available in this QEMU");
+ return NULL;
+}
+
+SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
+{
+ error_setg(errp, "SEV is not available in this QEMU");
+ return NULL;
+}
+
+SevCapability *qmp_query_sev_capabilities(Error **errp)
+{
+ error_setg(errp, "SEV is not available in this QEMU");
+ return NULL;
+}
+
+void qmp_sev_inject_launch_secret(const char *packet_header, const char *secret,
+ bool has_gpa, uint64_t gpa, Error **errp)
+{
+ error_setg(errp, "SEV is not available in this QEMU");
+}
+
+SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce,
+ Error **errp)
+{
+ error_setg(errp, "SEV is not available in this QEMU");
+ return NULL;
+}
diff --git a/stubs/monitor-i386-sgx.c b/stubs/monitor-i386-sgx.c
new file mode 100644
index 0000000..00e081d
--- /dev/null
+++ b/stubs/monitor-i386-sgx.c
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-misc-i386.h"
+
+SgxInfo *qmp_query_sgx(Error **errp)
+{
+ error_setg(errp, "SGX support is not compiled in");
+ return NULL;
+}
+
+SgxInfo *qmp_query_sgx_capabilities(Error **errp)
+{
+ error_setg(errp, "SGX support is not compiled in");
+ return NULL;
+}
diff --git a/stubs/monitor-i386-xen.c b/stubs/monitor-i386-xen.c
new file mode 100644
index 0000000..95b826f
--- /dev/null
+++ b/stubs/monitor-i386-xen.c
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-misc-i386.h"
+
+EvtchnInfoList *qmp_xen_event_list(Error **errp)
+{
+ error_setg(errp, "Xen event channel emulation not enabled");
+ return NULL;
+}
+
+void qmp_xen_event_inject(uint32_t port, Error **errp)
+{
+ error_setg(errp, "Xen event channel emulation not enabled");
+}
diff --git a/stubs/qemu-timer-notify-cb.c b/stubs/qemu-timer-notify-cb.c
index 845e46f..b57b983 100644
--- a/stubs/qemu-timer-notify-cb.c
+++ b/stubs/qemu-timer-notify-cb.c
@@ -1,5 +1,5 @@
#include "qemu/osdep.h"
-#include "sysemu/cpu-timers.h"
+#include "system/cpu-timers.h"
#include "qemu/main-loop.h"
void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
diff --git a/stubs/qmp-command-available.c b/stubs/qmp-command-available.c
index 46540af..8851fac 100644
--- a/stubs/qmp-command-available.c
+++ b/stubs/qmp-command-available.c
@@ -1,5 +1,5 @@
#include "qemu/osdep.h"
-#include "qapi/qmp/dispatch.h"
+#include "qapi/qmp-registry.h"
bool qmp_command_available(const QmpCommand *cmd, Error **errp)
{
diff --git a/stubs/qmp-quit.c b/stubs/qmp-quit.c
index a3ff47f..8fb523e 100644
--- a/stubs/qmp-quit.c
+++ b/stubs/qmp-quit.c
@@ -1,6 +1,6 @@
#include "qemu/osdep.h"
#include "qapi/qapi-commands-control.h"
-#include "qapi/qmp/dispatch.h"
+#include "qapi/qmp-registry.h"
void qmp_quit(Error **errp)
{
diff --git a/stubs/qtest.c b/stubs/qtest.c
index 39e376e..6c39725 100644
--- a/stubs/qtest.c
+++ b/stubs/qtest.c
@@ -9,7 +9,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
/* Needed for qtest_allowed() */
bool qtest_allowed;
diff --git a/stubs/ram-block.c b/stubs/ram-block.c
index 1081976..e88fab3 100644
--- a/stubs/ram-block.c
+++ b/stubs/ram-block.c
@@ -1,7 +1,7 @@
#include "qemu/osdep.h"
#include "exec/ramlist.h"
#include "exec/cpu-common.h"
-#include "exec/memory.h"
+#include "system/memory.h"
void *qemu_ram_get_host_addr(RAMBlock *rb)
{
diff --git a/stubs/replay-mode.c b/stubs/replay-mode.c
index 264be9d..439d97e 100644
--- a/stubs/replay-mode.c
+++ b/stubs/replay-mode.c
@@ -1,4 +1,4 @@
#include "qemu/osdep.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
ReplayMode replay_mode;
diff --git a/stubs/replay-tools.c b/stubs/replay-tools.c
index 3e8ca32..c537485 100644
--- a/stubs/replay-tools.c
+++ b/stubs/replay-tools.c
@@ -1,5 +1,5 @@
#include "qemu/osdep.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "block/aio.h"
bool replay_events_enabled(void)
diff --git a/stubs/runstate-check.c b/stubs/runstate-check.c
index 2ccda2b..c47abdf 100644
--- a/stubs/runstate-check.c
+++ b/stubs/runstate-check.c
@@ -1,6 +1,6 @@
#include "qemu/osdep.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
bool runstate_check(RunState state)
{
return state == RUN_STATE_PRELAUNCH;
diff --git a/stubs/vm-stop.c b/stubs/vm-stop.c
index 7f8a9da..e139aab 100644
--- a/stubs/vm-stop.c
+++ b/stubs/vm-stop.c
@@ -1,6 +1,6 @@
#include "qemu/osdep.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
void qemu_system_vmstop_request_prepare(void)
{
abort();
diff --git a/stubs/vmstate.c b/stubs/vmstate.c
index 8513d92..c190762 100644
--- a/stubs/vmstate.c
+++ b/stubs/vmstate.c
@@ -1,5 +1,7 @@
#include "qemu/osdep.h"
#include "migration/vmstate.h"
+#include "qapi/qapi-types-migration.h"
+#include "migration/client-options.h"
int vmstate_register_with_alias_id(VMStateIf *obj,
uint32_t instance_id,
@@ -21,3 +23,8 @@ bool vmstate_check_only_migratable(const VMStateDescription *vmsd)
{
return true;
}
+
+MigMode migrate_mode(void)
+{
+ return MIG_MODE_NORMAL;
+}
diff --git a/subprojects/.gitignore b/subprojects/.gitignore
index adca026..f428193 100644
--- a/subprojects/.gitignore
+++ b/subprojects/.gitignore
@@ -6,3 +6,17 @@
/keycodemapdb
/libvfio-user
/slirp
+/anyhow-1.0.98
+/arbitrary-int-1.2.7
+/bilge-0.2.0
+/bilge-impl-0.2.0
+/either-1.12.0
+/foreign-0.3.1
+/itertools-0.11.0
+/libc-0.2.162
+/proc-macro-error-1.0.4
+/proc-macro-error-attr-1.0.4
+/proc-macro2-1.0.84
+/quote-1.0.36
+/syn-2.0.66
+/unicode-ident-1.0.12
diff --git a/subprojects/anyhow-1-rs.wrap b/subprojects/anyhow-1-rs.wrap
new file mode 100644
index 0000000..a69a364
--- /dev/null
+++ b/subprojects/anyhow-1-rs.wrap
@@ -0,0 +1,7 @@
+[wrap-file]
+directory = anyhow-1.0.98
+source_url = https://crates.io/api/v1/crates/anyhow/1.0.98/download
+source_filename = anyhow-1.0.98.tar.gz
+source_hash = e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487
+#method = cargo
+patch_directory = anyhow-1-rs
diff --git a/subprojects/arbitrary-int-1-rs.wrap b/subprojects/arbitrary-int-1-rs.wrap
new file mode 100644
index 0000000..a1838b2
--- /dev/null
+++ b/subprojects/arbitrary-int-1-rs.wrap
@@ -0,0 +1,10 @@
+[wrap-file]
+directory = arbitrary-int-1.2.7
+source_url = https://crates.io/api/v1/crates/arbitrary-int/1.2.7/download
+source_filename = arbitrary-int-1.2.7.tar.gz
+source_hash = c84fc003e338a6f69fbd4f7fe9f92b535ff13e9af8997f3b14b6ddff8b1df46d
+#method = cargo
+patch_directory = arbitrary-int-1-rs
+
+# bump this version number on every change to meson.build or the patches:
+# v2
diff --git a/subprojects/bilge-0.2-rs.wrap b/subprojects/bilge-0.2-rs.wrap
new file mode 100644
index 0000000..900bb14
--- /dev/null
+++ b/subprojects/bilge-0.2-rs.wrap
@@ -0,0 +1,10 @@
+[wrap-file]
+directory = bilge-0.2.0
+source_url = https://crates.io/api/v1/crates/bilge/0.2.0/download
+source_filename = bilge-0.2.0.tar.gz
+source_hash = dc707ed8ebf81de5cd6c7f48f54b4c8621760926cdf35a57000747c512e67b57
+#method = cargo
+patch_directory = bilge-0.2-rs
+
+# bump this version number on every change to meson.build or the patches:
+# v2
diff --git a/subprojects/bilge-impl-0.2-rs.wrap b/subprojects/bilge-impl-0.2-rs.wrap
new file mode 100644
index 0000000..4f84eca
--- /dev/null
+++ b/subprojects/bilge-impl-0.2-rs.wrap
@@ -0,0 +1,10 @@
+[wrap-file]
+directory = bilge-impl-0.2.0
+source_url = https://crates.io/api/v1/crates/bilge-impl/0.2.0/download
+source_filename = bilge-impl-0.2.0.tar.gz
+source_hash = feb11e002038ad243af39c2068c8a72bcf147acf05025dcdb916fcc000adb2d8
+#method = cargo
+patch_directory = bilge-impl-0.2-rs
+
+# bump this version number on every change to meson.build or the patches:
+# v2
diff --git a/subprojects/either-1-rs.wrap b/subprojects/either-1-rs.wrap
new file mode 100644
index 0000000..352e11c
--- /dev/null
+++ b/subprojects/either-1-rs.wrap
@@ -0,0 +1,10 @@
+[wrap-file]
+directory = either-1.12.0
+source_url = https://crates.io/api/v1/crates/either/1.12.0/download
+source_filename = either-1.12.0.tar.gz
+source_hash = 3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b
+#method = cargo
+patch_directory = either-1-rs
+
+# bump this version number on every change to meson.build or the patches:
+# v2
diff --git a/subprojects/foreign-0.3-rs.wrap b/subprojects/foreign-0.3-rs.wrap
new file mode 100644
index 0000000..0d218ec
--- /dev/null
+++ b/subprojects/foreign-0.3-rs.wrap
@@ -0,0 +1,7 @@
+[wrap-file]
+directory = foreign-0.3.1
+source_url = https://crates.io/api/v1/crates/foreign/0.3.1/download
+source_filename = foreign-0.3.1.tar.gz
+source_hash = 17ca1b5be8c9d320daf386f1809c7acc0cb09accbae795c2001953fa50585846
+#method = cargo
+patch_directory = foreign-0.3-rs
diff --git a/subprojects/itertools-0.11-rs.wrap b/subprojects/itertools-0.11-rs.wrap
new file mode 100644
index 0000000..ee12d00
--- /dev/null
+++ b/subprojects/itertools-0.11-rs.wrap
@@ -0,0 +1,10 @@
+[wrap-file]
+directory = itertools-0.11.0
+source_url = https://crates.io/api/v1/crates/itertools/0.11.0/download
+source_filename = itertools-0.11.0.tar.gz
+source_hash = b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57
+#method = cargo
+patch_directory = itertools-0.11-rs
+
+# bump this version number on every change to meson.build or the patches:
+# v2
diff --git a/subprojects/libc-0.2-rs.wrap b/subprojects/libc-0.2-rs.wrap
new file mode 100644
index 0000000..bbe08f8
--- /dev/null
+++ b/subprojects/libc-0.2-rs.wrap
@@ -0,0 +1,7 @@
+[wrap-file]
+directory = libc-0.2.162
+source_url = https://crates.io/api/v1/crates/libc/0.2.162/download
+source_filename = libc-0.2.162.tar.gz
+source_hash = 18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398
+#method = cargo
+patch_directory = libc-0.2-rs
diff --git a/subprojects/libvhost-user/libvhost-user.h b/subprojects/libvhost-user/libvhost-user.h
index deb40e7..2ffc58c 100644
--- a/subprojects/libvhost-user/libvhost-user.h
+++ b/subprojects/libvhost-user/libvhost-user.h
@@ -186,11 +186,7 @@ typedef struct VhostUserShared {
unsigned char uuid[UUID_LEN];
} VhostUserShared;
-#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
-# define VU_PACKED __attribute__((gcc_struct, packed))
-#else
-# define VU_PACKED __attribute__((packed))
-#endif
+#define VU_PACKED __attribute__((packed))
typedef struct VhostUserMsg {
int request;
diff --git a/subprojects/packagefiles/anyhow-1-rs/meson.build b/subprojects/packagefiles/anyhow-1-rs/meson.build
new file mode 100644
index 0000000..348bab9
--- /dev/null
+++ b/subprojects/packagefiles/anyhow-1-rs/meson.build
@@ -0,0 +1,33 @@
+project('anyhow-1-rs', 'rust',
+ meson_version: '>=1.5.0',
+ version: '1.0.98',
+ license: 'MIT OR Apache-2.0',
+ default_options: [])
+
+rustc = meson.get_compiler('rust')
+
+rust_args = ['--cap-lints', 'allow']
+rust_args += ['--cfg', 'feature="std"']
+if rustc.version().version_compare('<1.65.0')
+ error('rustc version ' + rustc.version() + ' is unsupported. Please upgrade to at least 1.65.0')
+endif
+rust_args += [ '--cfg', 'std_backtrace' ] # >= 1.65.0
+if rustc.version().version_compare('<1.81.0')
+ rust_args += [ '--cfg', 'anyhow_no_core_error' ]
+endif
+
+_anyhow_rs = static_library(
+ 'anyhow',
+ files('src/lib.rs'),
+ gnu_symbol_visibility: 'hidden',
+ override_options: ['rust_std=2018', 'build.rust_std=2018'],
+ rust_abi: 'rust',
+ rust_args: rust_args,
+ dependencies: [],
+)
+
+anyhow_dep = declare_dependency(
+ link_with: _anyhow_rs,
+)
+
+meson.override_dependency('anyhow-1-rs', anyhow_dep)
diff --git a/subprojects/packagefiles/arbitrary-int-1-rs/meson.build b/subprojects/packagefiles/arbitrary-int-1-rs/meson.build
new file mode 100644
index 0000000..00733d1
--- /dev/null
+++ b/subprojects/packagefiles/arbitrary-int-1-rs/meson.build
@@ -0,0 +1,21 @@
+project('arbitrary-int-1-rs', 'rust',
+ meson_version: '>=1.5.0',
+ version: '1.2.7',
+ license: 'MIT',
+ default_options: [])
+
+_arbitrary_int_rs = static_library(
+ 'arbitrary_int',
+ files('src/lib.rs'),
+ gnu_symbol_visibility: 'hidden',
+ override_options: ['rust_std=2021', 'build.rust_std=2021'],
+ rust_args: ['--cap-lints', 'allow'],
+ rust_abi: 'rust',
+ dependencies: [],
+)
+
+arbitrary_int_dep = declare_dependency(
+ link_with: _arbitrary_int_rs,
+)
+
+meson.override_dependency('arbitrary-int-1-rs', arbitrary_int_dep)
diff --git a/subprojects/packagefiles/bilge-0.2-rs/meson.build b/subprojects/packagefiles/bilge-0.2-rs/meson.build
new file mode 100644
index 0000000..ce13d0f
--- /dev/null
+++ b/subprojects/packagefiles/bilge-0.2-rs/meson.build
@@ -0,0 +1,31 @@
+project(
+ 'bilge-0.2-rs',
+ 'rust',
+ meson_version: '>=1.5.0',
+ version : '0.2.0',
+ license : 'MIT or Apache-2.0',
+)
+
+subproject('arbitrary-int-1-rs', required: true)
+subproject('bilge-impl-0.2-rs', required: true)
+
+arbitrary_int_dep = dependency('arbitrary-int-1-rs')
+bilge_impl_dep = dependency('bilge-impl-0.2-rs')
+
+lib = static_library(
+ 'bilge',
+ 'src/lib.rs',
+ override_options : ['rust_std=2021', 'build.rust_std=2021'],
+ rust_abi : 'rust',
+ rust_args: ['--cap-lints', 'allow'],
+ dependencies: [
+ arbitrary_int_dep,
+ bilge_impl_dep,
+ ],
+)
+
+bilge_dep = declare_dependency(
+ link_with : [lib],
+)
+
+meson.override_dependency('bilge-0.2-rs', bilge_dep)
diff --git a/subprojects/packagefiles/bilge-impl-0.2-rs/meson.build b/subprojects/packagefiles/bilge-impl-0.2-rs/meson.build
new file mode 100644
index 0000000..42b03dc
--- /dev/null
+++ b/subprojects/packagefiles/bilge-impl-0.2-rs/meson.build
@@ -0,0 +1,47 @@
+project('bilge-impl-0.2-rs', 'rust',
+ meson_version: '>=1.5.0',
+ version: '0.2.0',
+ license: 'MIT OR Apache-2.0',
+ default_options: [])
+
+subproject('itertools-0.11-rs', required: true)
+subproject('proc-macro-error-attr-1-rs', required: true)
+subproject('proc-macro-error-1-rs', required: true)
+subproject('quote-1-rs', required: true)
+subproject('syn-2-rs', required: true)
+subproject('proc-macro2-1-rs', required: true)
+
+itertools_dep = dependency('itertools-0.11-rs', native: true)
+proc_macro_error_attr_dep = dependency('proc-macro-error-attr-1-rs', native: true)
+proc_macro_error_dep = dependency('proc-macro-error-1-rs', native: true)
+quote_dep = dependency('quote-1-rs', native: true)
+syn_dep = dependency('syn-2-rs', native: true)
+proc_macro2_dep = dependency('proc-macro2-1-rs', native: true)
+
+rust = import('rust')
+
+_bilge_impl_rs = rust.proc_macro(
+ 'bilge_impl',
+ files('src/lib.rs'),
+ override_options: ['rust_std=2021', 'build.rust_std=2021'],
+ rust_args: [
+ '--cap-lints', 'allow',
+ '--cfg', 'use_fallback',
+ '--cfg', 'feature="syn-error"',
+ '--cfg', 'feature="proc-macro"',
+ ],
+ dependencies: [
+ itertools_dep,
+ proc_macro_error_attr_dep,
+ proc_macro_error_dep,
+ quote_dep,
+ syn_dep,
+ proc_macro2_dep,
+ ],
+)
+
+bilge_impl_dep = declare_dependency(
+ link_with: _bilge_impl_rs,
+)
+
+meson.override_dependency('bilge-impl-0.2-rs', bilge_impl_dep)
diff --git a/subprojects/packagefiles/either-1-rs/meson.build b/subprojects/packagefiles/either-1-rs/meson.build
new file mode 100644
index 0000000..04c96cc
--- /dev/null
+++ b/subprojects/packagefiles/either-1-rs/meson.build
@@ -0,0 +1,26 @@
+project('either-1-rs', 'rust',
+ meson_version: '>=1.5.0',
+ version: '1.12.0',
+ license: 'MIT OR Apache-2.0',
+ default_options: [])
+
+_either_rs = static_library(
+ 'either',
+ files('src/lib.rs'),
+ gnu_symbol_visibility: 'hidden',
+ override_options: ['rust_std=2018', 'build.rust_std=2018'],
+ rust_abi: 'rust',
+ rust_args: [
+ '--cap-lints', 'allow',
+ '--cfg', 'feature="use_std"',
+ '--cfg', 'feature="use_alloc"',
+ ],
+ dependencies: [],
+ native: true,
+)
+
+either_dep = declare_dependency(
+ link_with: _either_rs,
+)
+
+meson.override_dependency('either-1-rs', either_dep, native: true)
diff --git a/subprojects/packagefiles/foreign-0.3-rs/meson.build b/subprojects/packagefiles/foreign-0.3-rs/meson.build
new file mode 100644
index 0000000..0901c02
--- /dev/null
+++ b/subprojects/packagefiles/foreign-0.3-rs/meson.build
@@ -0,0 +1,26 @@
+project('foreign-0.3-rs', 'rust',
+ meson_version: '>=1.5.0',
+ version: '0.2.0',
+ license: 'MIT OR Apache-2.0',
+ default_options: [])
+
+subproject('libc-0.2-rs', required: true)
+libc_rs = dependency('libc-0.2-rs')
+
+_foreign_rs = static_library(
+ 'foreign',
+ files('src/lib.rs'),
+ gnu_symbol_visibility: 'hidden',
+ override_options: ['rust_std=2021', 'build.rust_std=2021'],
+ rust_abi: 'rust',
+ rust_args: [
+ '--cap-lints', 'allow',
+ ],
+ dependencies: [libc_rs],
+)
+
+foreign_dep = declare_dependency(
+ link_with: _foreign_rs,
+)
+
+meson.override_dependency('foreign-0.3-rs', foreign_dep)
diff --git a/subprojects/packagefiles/itertools-0.11-rs/meson.build b/subprojects/packagefiles/itertools-0.11-rs/meson.build
new file mode 100644
index 0000000..2a3fbe9
--- /dev/null
+++ b/subprojects/packagefiles/itertools-0.11-rs/meson.build
@@ -0,0 +1,32 @@
+project('itertools-0.11-rs', 'rust',
+ meson_version: '>=1.5.0',
+ version: '0.11.0',
+ license: 'MIT OR Apache-2.0',
+ default_options: [])
+
+subproject('either-1-rs', required: true)
+
+either_dep = dependency('either-1-rs', native: true)
+
+_itertools_rs = static_library(
+ 'itertools',
+ files('src/lib.rs'),
+ gnu_symbol_visibility: 'hidden',
+ override_options: ['rust_std=2018', 'build.rust_std=2018'],
+ rust_abi: 'rust',
+ rust_args: [
+ '--cap-lints', 'allow',
+ '--cfg', 'feature="use_std"',
+ '--cfg', 'feature="use_alloc"',
+ ],
+ dependencies: [
+ either_dep,
+ ],
+ native: true,
+)
+
+itertools_dep = declare_dependency(
+ link_with: _itertools_rs,
+)
+
+meson.override_dependency('itertools-0.11-rs', itertools_dep, native: true)
diff --git a/subprojects/packagefiles/libc-0.2-rs/meson.build b/subprojects/packagefiles/libc-0.2-rs/meson.build
new file mode 100644
index 0000000..ac4f80d
--- /dev/null
+++ b/subprojects/packagefiles/libc-0.2-rs/meson.build
@@ -0,0 +1,37 @@
+project('libc-0.2-rs', 'rust',
+ meson_version: '>=1.5.0',
+ version: '0.2.162',
+ license: 'MIT OR Apache-2.0',
+ default_options: [])
+
+_libc_rs = static_library(
+ 'libc',
+ files('src/lib.rs'),
+ gnu_symbol_visibility: 'hidden',
+ override_options: ['rust_std=2015', 'build.rust_std=2015'],
+ rust_abi: 'rust',
+ rust_args: [
+ '--cap-lints', 'allow',
+ '--cfg', 'freebsd11',
+ '--cfg', 'libc_priv_mod_use',
+ '--cfg', 'libc_union',
+ '--cfg', 'libc_const_size_of',
+ '--cfg', 'libc_align',
+ '--cfg', 'libc_int128',
+ '--cfg', 'libc_core_cvoid',
+ '--cfg', 'libc_packedN',
+ '--cfg', 'libc_cfg_target_vendor',
+ '--cfg', 'libc_non_exhaustive',
+ '--cfg', 'libc_long_array',
+ '--cfg', 'libc_ptr_addr_of',
+ '--cfg', 'libc_underscore_const_names',
+ '--cfg', 'libc_const_extern_fn',
+ ],
+ dependencies: [],
+)
+
+libc_dep = declare_dependency(
+ link_with: _libc_rs,
+)
+
+meson.override_dependency('libc-0.2-rs', libc_dep)
diff --git a/subprojects/packagefiles/proc-macro-error-1-rs/meson.build b/subprojects/packagefiles/proc-macro-error-1-rs/meson.build
new file mode 100644
index 0000000..10c2741
--- /dev/null
+++ b/subprojects/packagefiles/proc-macro-error-1-rs/meson.build
@@ -0,0 +1,42 @@
+project('proc-macro-error-1-rs', 'rust',
+ meson_version: '>=1.5.0',
+ version: '1.0.4',
+ license: 'MIT OR Apache-2.0',
+ default_options: [])
+
+subproject('proc-macro-error-attr-1-rs', required: true)
+subproject('quote-1-rs', required: true)
+subproject('syn-2-rs', required: true)
+subproject('proc-macro2-1-rs', required: true)
+
+proc_macro_error_attr_dep = dependency('proc-macro-error-attr-1-rs', native: true)
+proc_macro2_dep = dependency('proc-macro2-1-rs', native: true)
+quote_dep = dependency('quote-1-rs', native: true)
+syn_dep = dependency('syn-2-rs', native: true)
+
+_proc_macro_error_rs = static_library(
+ 'proc_macro_error',
+ files('src/lib.rs'),
+ override_options: ['rust_std=2018', 'build.rust_std=2018'],
+ rust_abi: 'rust',
+ rust_args: [
+ '--cap-lints', 'allow',
+ '--cfg', 'use_fallback',
+ '--cfg', 'feature="syn-error"',
+ '--cfg', 'feature="proc-macro"',
+ '-A', 'non_fmt_panics'
+ ],
+ dependencies: [
+ proc_macro_error_attr_dep,
+ proc_macro2_dep,
+ quote_dep,
+ syn_dep,
+ ],
+ native: true,
+)
+
+proc_macro_error_dep = declare_dependency(
+ link_with: _proc_macro_error_rs,
+)
+
+meson.override_dependency('proc-macro-error-1-rs', proc_macro_error_dep, native: true)
diff --git a/subprojects/packagefiles/proc-macro-error-attr-1-rs/meson.build b/subprojects/packagefiles/proc-macro-error-attr-1-rs/meson.build
new file mode 100644
index 0000000..c4c4c5e
--- /dev/null
+++ b/subprojects/packagefiles/proc-macro-error-attr-1-rs/meson.build
@@ -0,0 +1,34 @@
+project('proc-macro-error-attr-1-rs', 'rust',
+ meson_version: '>=1.5.0',
+ version: '1.12.0',
+ license: 'MIT OR Apache-2.0',
+ default_options: [])
+
+subproject('proc-macro2-1-rs', required: true)
+subproject('quote-1-rs', required: true)
+
+proc_macro2_dep = dependency('proc-macro2-1-rs', native: true)
+quote_dep = dependency('quote-1-rs', native: true)
+
+rust = import('rust')
+_proc_macro_error_attr_rs = rust.proc_macro(
+ 'proc_macro_error_attr',
+ files('src/lib.rs'),
+ override_options: ['rust_std=2018', 'build.rust_std=2018'],
+ rust_args: [
+ '--cap-lints', 'allow',
+ '--cfg', 'use_fallback',
+ '--cfg', 'feature="syn-error"',
+ '--cfg', 'feature="proc-macro"'
+ ],
+ dependencies: [
+ proc_macro2_dep,
+ quote_dep,
+ ],
+)
+
+proc_macro_error_attr_dep = declare_dependency(
+ link_with: _proc_macro_error_attr_rs,
+)
+
+meson.override_dependency('proc-macro-error-attr-1-rs', proc_macro_error_attr_dep, native: true)
diff --git a/subprojects/packagefiles/proc-macro2-1-rs/meson.build b/subprojects/packagefiles/proc-macro2-1-rs/meson.build
new file mode 100644
index 0000000..5759df3
--- /dev/null
+++ b/subprojects/packagefiles/proc-macro2-1-rs/meson.build
@@ -0,0 +1,35 @@
+project('proc-macro2-1-rs', 'rust',
+ meson_version: '>=1.5.0',
+ version: '1.0.84',
+ license: 'MIT OR Apache-2.0',
+ default_options: [])
+
+subproject('unicode-ident-1-rs', required: true)
+
+unicode_ident_dep = dependency('unicode-ident-1-rs', native: true)
+
+_proc_macro2_rs = static_library(
+ 'proc_macro2',
+ files('src/lib.rs'),
+ gnu_symbol_visibility: 'hidden',
+ override_options: ['rust_std=2021', 'build.rust_std=2021'],
+ rust_abi: 'rust',
+ rust_args: [
+ '--cap-lints', 'allow',
+ '--cfg', 'feature="proc-macro"',
+ '--cfg', 'no_literal_byte_character',
+ '--cfg', 'no_literal_c_string',
+ '--cfg', 'no_source_text',
+ '--cfg', 'wrap_proc_macro',
+ ],
+ dependencies: [
+ unicode_ident_dep,
+ ],
+ native: true,
+)
+
+proc_macro2_dep = declare_dependency(
+ link_with: _proc_macro2_rs,
+)
+
+meson.override_dependency('proc-macro2-1-rs', proc_macro2_dep, native: true)
diff --git a/subprojects/packagefiles/quote-1-rs/meson.build b/subprojects/packagefiles/quote-1-rs/meson.build
new file mode 100644
index 0000000..bf41fad
--- /dev/null
+++ b/subprojects/packagefiles/quote-1-rs/meson.build
@@ -0,0 +1,31 @@
+project('quote-1-rs', 'rust',
+ meson_version: '>=1.5.0',
+ version: '1.12.0',
+ license: 'MIT OR Apache-2.0',
+ default_options: [])
+
+subproject('proc-macro2-1-rs', required: true)
+
+proc_macro2_dep = dependency('proc-macro2-1-rs', native: true)
+
+_quote_rs = static_library(
+ 'quote',
+ files('src/lib.rs'),
+ gnu_symbol_visibility: 'hidden',
+ override_options: ['rust_std=2021', 'build.rust_std=2021'],
+ rust_abi: 'rust',
+ rust_args: [
+ '--cap-lints', 'allow',
+ '--cfg', 'feature="proc-macro"',
+ ],
+ dependencies: [
+ proc_macro2_dep,
+ ],
+ native: true,
+)
+
+quote_dep = declare_dependency(
+ link_with: _quote_rs,
+)
+
+meson.override_dependency('quote-1-rs', quote_dep, native: true)
diff --git a/subprojects/packagefiles/syn-2-rs/meson.build b/subprojects/packagefiles/syn-2-rs/meson.build
new file mode 100644
index 0000000..a009417
--- /dev/null
+++ b/subprojects/packagefiles/syn-2-rs/meson.build
@@ -0,0 +1,43 @@
+project('syn-2-rs', 'rust',
+ meson_version: '>=1.5.0',
+ version: '2.0.66',
+ license: 'MIT OR Apache-2.0',
+ default_options: [])
+
+subproject('proc-macro2-1-rs', required: true)
+subproject('quote-1-rs', required: true)
+subproject('unicode-ident-1-rs', required: true)
+
+proc_macro2_dep = dependency('proc-macro2-1-rs', native: true)
+quote_dep = dependency('quote-1-rs', native: true)
+unicode_ident_dep = dependency('unicode-ident-1-rs', native: true)
+
+_syn_rs = static_library(
+ 'syn',
+ files('src/lib.rs'),
+ gnu_symbol_visibility: 'hidden',
+ override_options: ['rust_std=2021', 'build.rust_std=2021'],
+ rust_abi: 'rust',
+ rust_args: [
+ '--cap-lints', 'allow',
+ '--cfg', 'feature="full"',
+ '--cfg', 'feature="derive"',
+ '--cfg', 'feature="parsing"',
+ '--cfg', 'feature="printing"',
+ '--cfg', 'feature="clone-impls"',
+ '--cfg', 'feature="proc-macro"',
+ '--cfg', 'feature="extra-traits"',
+ ],
+ dependencies: [
+ quote_dep,
+ proc_macro2_dep,
+ unicode_ident_dep,
+ ],
+ native: true,
+)
+
+syn_dep = declare_dependency(
+ link_with: _syn_rs,
+)
+
+meson.override_dependency('syn-2-rs', syn_dep, native: true)
diff --git a/subprojects/packagefiles/unicode-ident-1-rs/meson.build b/subprojects/packagefiles/unicode-ident-1-rs/meson.build
new file mode 100644
index 0000000..11a5dab
--- /dev/null
+++ b/subprojects/packagefiles/unicode-ident-1-rs/meson.build
@@ -0,0 +1,22 @@
+project('unicode-ident-1-rs', 'rust',
+ meson_version: '>=1.5.0',
+ version: '1.0.12',
+ license: '(MIT OR Apache-2.0) AND Unicode-DFS-2016',
+ default_options: [])
+
+_unicode_ident_rs = static_library(
+ 'unicode_ident',
+ files('src/lib.rs'),
+ gnu_symbol_visibility: 'hidden',
+ override_options: ['rust_std=2021', 'build.rust_std=2021'],
+ rust_abi: 'rust',
+ rust_args: ['--cap-lints', 'allow'],
+ dependencies: [],
+ native: true,
+)
+
+unicode_ident_dep = declare_dependency(
+ link_with: _unicode_ident_rs,
+)
+
+meson.override_dependency('unicode-ident-1-rs', unicode_ident_dep, native: true)
diff --git a/subprojects/proc-macro-error-1-rs.wrap b/subprojects/proc-macro-error-1-rs.wrap
new file mode 100644
index 0000000..59f892f
--- /dev/null
+++ b/subprojects/proc-macro-error-1-rs.wrap
@@ -0,0 +1,10 @@
+[wrap-file]
+directory = proc-macro-error-1.0.4
+source_url = https://crates.io/api/v1/crates/proc-macro-error/1.0.4/download
+source_filename = proc-macro-error-1.0.4.tar.gz
+source_hash = da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c
+#method = cargo
+patch_directory = proc-macro-error-1-rs
+
+# bump this version number on every change to meson.build or the patches:
+# v2
diff --git a/subprojects/proc-macro-error-attr-1-rs.wrap b/subprojects/proc-macro-error-attr-1-rs.wrap
new file mode 100644
index 0000000..5aeb224
--- /dev/null
+++ b/subprojects/proc-macro-error-attr-1-rs.wrap
@@ -0,0 +1,10 @@
+[wrap-file]
+directory = proc-macro-error-attr-1.0.4
+source_url = https://crates.io/api/v1/crates/proc-macro-error-attr/1.0.4/download
+source_filename = proc-macro-error-attr-1.0.4.tar.gz
+source_hash = a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869
+#method = cargo
+patch_directory = proc-macro-error-attr-1-rs
+
+# bump this version number on every change to meson.build or the patches:
+# v2
diff --git a/subprojects/proc-macro2-1-rs.wrap b/subprojects/proc-macro2-1-rs.wrap
new file mode 100644
index 0000000..6c9369f
--- /dev/null
+++ b/subprojects/proc-macro2-1-rs.wrap
@@ -0,0 +1,10 @@
+[wrap-file]
+directory = proc-macro2-1.0.84
+source_url = https://crates.io/api/v1/crates/proc-macro2/1.0.84/download
+source_filename = proc-macro2-1.0.84.0.tar.gz
+source_hash = ec96c6a92621310b51366f1e28d05ef11489516e93be030060e5fc12024a49d6
+#method = cargo
+patch_directory = proc-macro2-1-rs
+
+# bump this version number on every change to meson.build or the patches:
+# v2
diff --git a/subprojects/quote-1-rs.wrap b/subprojects/quote-1-rs.wrap
new file mode 100644
index 0000000..8b721df
--- /dev/null
+++ b/subprojects/quote-1-rs.wrap
@@ -0,0 +1,10 @@
+[wrap-file]
+directory = quote-1.0.36
+source_url = https://crates.io/api/v1/crates/quote/1.0.36/download
+source_filename = quote-1.0.36.0.tar.gz
+source_hash = 0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7
+#method = cargo
+patch_directory = quote-1-rs
+
+# bump this version number on every change to meson.build or the patches:
+# v2
diff --git a/subprojects/syn-2-rs.wrap b/subprojects/syn-2-rs.wrap
new file mode 100644
index 0000000..d79cf75
--- /dev/null
+++ b/subprojects/syn-2-rs.wrap
@@ -0,0 +1,10 @@
+[wrap-file]
+directory = syn-2.0.66
+source_url = https://crates.io/api/v1/crates/syn/2.0.66/download
+source_filename = syn-2.0.66.0.tar.gz
+source_hash = c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5
+#method = cargo
+patch_directory = syn-2-rs
+
+# bump this version number on every change to meson.build or the patches:
+# v2
diff --git a/subprojects/unicode-ident-1-rs.wrap b/subprojects/unicode-ident-1-rs.wrap
new file mode 100644
index 0000000..50988f6
--- /dev/null
+++ b/subprojects/unicode-ident-1-rs.wrap
@@ -0,0 +1,10 @@
+[wrap-file]
+directory = unicode-ident-1.0.12
+source_url = https://crates.io/api/v1/crates/unicode-ident/1.0.12/download
+source_filename = unicode-ident-1.0.12.tar.gz
+source_hash = 3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b
+#method = cargo
+patch_directory = unicode-ident-1-rs
+
+# bump this version number on every change to meson.build or the patches:
+# v2
diff --git a/system/arch_init.c b/system/arch_init.c
index 79716f9..e857368 100644
--- a/system/arch_init.c
+++ b/system/arch_init.c
@@ -22,29 +22,9 @@
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
-#include "qemu/module.h"
-#include "sysemu/arch_init.h"
+#include "system/arch_init.h"
-#ifdef TARGET_SPARC
-int graphic_width = 1024;
-int graphic_height = 768;
-int graphic_depth = 8;
-#elif defined(TARGET_M68K)
-int graphic_width = 800;
-int graphic_height = 600;
-int graphic_depth = 8;
-#else
-int graphic_width = 800;
-int graphic_height = 600;
-int graphic_depth = 32;
-#endif
-
-const uint32_t arch_type = QEMU_ARCH;
-
-void qemu_init_arch_modules(void)
+bool qemu_arch_available(unsigned qemu_arch_mask)
{
-#ifdef CONFIG_MODULES
- module_init_info(qemu_modinfo);
- module_allow_arch(TARGET_NAME);
-#endif
+ return qemu_arch_mask & QEMU_ARCH;
}
diff --git a/system/async-teardown.c b/system/async-teardown.c
index 396963c..9148ee8 100644
--- a/system/async-teardown.c
+++ b/system/async-teardown.c
@@ -26,40 +26,6 @@
static pid_t the_ppid;
-/*
- * Close all open file descriptors.
- */
-static void close_all_open_fd(void)
-{
- struct dirent *de;
- int fd, dfd;
- DIR *dir;
-
-#ifdef CONFIG_CLOSE_RANGE
- int r = close_range(0, ~0U, 0);
- if (!r) {
- /* Success, no need to try other ways. */
- return;
- }
-#endif
-
- dir = opendir("/proc/self/fd");
- if (!dir) {
- /* If /proc is not mounted, there is nothing that can be done. */
- return;
- }
- /* Avoid closing the directory. */
- dfd = dirfd(dir);
-
- for (de = readdir(dir); de; de = readdir(dir)) {
- fd = atoi(de->d_name);
- if (fd != dfd) {
- close(fd);
- }
- }
- closedir(dir);
-}
-
static void hup_handler(int signal)
{
/* Check every second if this process has been reparented. */
@@ -85,9 +51,8 @@ static int async_teardown_fn(void *arg)
/*
* Close all file descriptors that might have been inherited from the
* main qemu process when doing clone, needed to make libvirt happy.
- * Not using close_range for increased compatibility with older kernels.
*/
- close_all_open_fd();
+ qemu_close_all_open_fd(NULL, 0);
/* Set up a handler for SIGHUP and unblock SIGHUP. */
sigaction(SIGHUP, &sa, NULL);
diff --git a/system/balloon.c b/system/balloon.c
index fda7af8..311fa50 100644
--- a/system/balloon.c
+++ b/system/balloon.c
@@ -26,8 +26,8 @@
#include "qemu/osdep.h"
#include "qemu/atomic.h"
-#include "sysemu/kvm.h"
-#include "sysemu/balloon.h"
+#include "system/kvm.h"
+#include "system/balloon.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-machine.h"
#include "qapi/qmp/qerror.h"
diff --git a/system/bootdevice.c b/system/bootdevice.c
index 2579b26..1845be4 100644
--- a/system/bootdevice.c
+++ b/system/bootdevice.c
@@ -24,10 +24,10 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qapi/visitor.h"
#include "qemu/error-report.h"
-#include "sysemu/reset.h"
+#include "system/reset.h"
#include "hw/qdev-core.h"
#include "hw/boards.h"
diff --git a/system/cpu-throttle.c b/system/cpu-throttle.c
deleted file mode 100644
index c951a6c..0000000
--- a/system/cpu-throttle.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * QEMU System Emulator
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/thread.h"
-#include "hw/core/cpu.h"
-#include "qemu/main-loop.h"
-#include "sysemu/cpus.h"
-#include "sysemu/cpu-throttle.h"
-
-/* vcpu throttling controls */
-static QEMUTimer *throttle_timer;
-static unsigned int throttle_percentage;
-
-#define CPU_THROTTLE_PCT_MIN 1
-#define CPU_THROTTLE_PCT_MAX 99
-#define CPU_THROTTLE_TIMESLICE_NS 10000000
-
-static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
-{
- double pct;
- double throttle_ratio;
- int64_t sleeptime_ns, endtime_ns;
-
- if (!cpu_throttle_get_percentage()) {
- return;
- }
-
- pct = (double)cpu_throttle_get_percentage() / 100;
- throttle_ratio = pct / (1 - pct);
- /* Add 1ns to fix double's rounding error (like 0.9999999...) */
- sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1);
- endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
- while (sleeptime_ns > 0 && !cpu->stop) {
- if (sleeptime_ns > SCALE_MS) {
- qemu_cond_timedwait_bql(cpu->halt_cond,
- sleeptime_ns / SCALE_MS);
- } else {
- bql_unlock();
- g_usleep(sleeptime_ns / SCALE_US);
- bql_lock();
- }
- sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
- }
- qatomic_set(&cpu->throttle_thread_scheduled, 0);
-}
-
-static void cpu_throttle_timer_tick(void *opaque)
-{
- CPUState *cpu;
- double pct;
-
- /* Stop the timer if needed */
- if (!cpu_throttle_get_percentage()) {
- return;
- }
- CPU_FOREACH(cpu) {
- if (!qatomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
- async_run_on_cpu(cpu, cpu_throttle_thread,
- RUN_ON_CPU_NULL);
- }
- }
-
- pct = (double)cpu_throttle_get_percentage() / 100;
- timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
- CPU_THROTTLE_TIMESLICE_NS / (1 - pct));
-}
-
-void cpu_throttle_set(int new_throttle_pct)
-{
- /*
- * boolean to store whether throttle is already active or not,
- * before modifying throttle_percentage
- */
- bool throttle_active = cpu_throttle_active();
-
- /* Ensure throttle percentage is within valid range */
- new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
- new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
-
- qatomic_set(&throttle_percentage, new_throttle_pct);
-
- if (!throttle_active) {
- cpu_throttle_timer_tick(NULL);
- }
-}
-
-void cpu_throttle_stop(void)
-{
- qatomic_set(&throttle_percentage, 0);
-}
-
-bool cpu_throttle_active(void)
-{
- return (cpu_throttle_get_percentage() != 0);
-}
-
-int cpu_throttle_get_percentage(void)
-{
- return qatomic_read(&throttle_percentage);
-}
-
-void cpu_throttle_init(void)
-{
- throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
- cpu_throttle_timer_tick, NULL);
-}
diff --git a/system/cpu-timers.c b/system/cpu-timers.c
index 0b31c9a..cb35fa6 100644
--- a/system/cpu-timers.c
+++ b/system/cpu-timers.c
@@ -27,16 +27,16 @@
#include "migration/vmstate.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
#include "qemu/main-loop.h"
#include "qemu/option.h"
#include "qemu/seqlock.h"
-#include "sysemu/replay.h"
-#include "sysemu/runstate.h"
+#include "system/replay.h"
+#include "system/runstate.h"
#include "hw/core/cpu.h"
-#include "sysemu/cpu-timers.h"
-#include "sysemu/cpu-throttle.h"
-#include "sysemu/cpu-timers-internal.h"
+#include "system/cpu-timers.h"
+#include "system/cpu-timers-internal.h"
+#include "exec/icount.h"
/* clock and ticks */
@@ -272,6 +272,4 @@ void cpu_timers_init(void)
seqlock_init(&timers_state.vm_clock_seqlock);
qemu_spin_init(&timers_state.vm_clock_lock);
vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
-
- cpu_throttle_init();
}
diff --git a/system/cpus.c b/system/cpus.c
index 5e3a988..d16b0df 100644
--- a/system/cpus.c
+++ b/system/cpus.c
@@ -31,18 +31,19 @@
#include "qapi/qapi-events-run-state.h"
#include "qapi/qmp/qerror.h"
#include "exec/gdbstub.h"
-#include "sysemu/hw_accel.h"
+#include "system/accel-ops.h"
+#include "system/hw_accel.h"
#include "exec/cpu-common.h"
#include "qemu/thread.h"
#include "qemu/main-loop.h"
#include "qemu/plugin.h"
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
#include "qemu/guest-random.h"
#include "hw/nmi.h"
-#include "sysemu/replay.h"
-#include "sysemu/runstate.h"
-#include "sysemu/cpu-timers.h"
-#include "sysemu/whpx.h"
+#include "system/replay.h"
+#include "system/runstate.h"
+#include "system/cpu-timers.h"
+#include "system/whpx.h"
#include "hw/boards.h"
#include "hw/hw.h"
#include "trace.h"
@@ -298,14 +299,18 @@ static int do_vm_stop(RunState state, bool send_stop)
if (oldstate == RUN_STATE_RUNNING) {
pause_all_vcpus();
}
- vm_state_notify(0, state);
+ ret = vm_state_notify(0, state);
if (send_stop) {
qapi_event_send_stop();
}
}
bdrv_drain_all();
- ret = bdrv_flush_all();
+ /*
+ * Even if vm_state_notify() return failure,
+ * it would be better to flush as before.
+ */
+ ret |= bdrv_flush_all();
trace_vm_stop_flush_all(ret);
return ret;
@@ -514,6 +519,20 @@ bool qemu_in_vcpu_thread(void)
QEMU_DEFINE_STATIC_CO_TLS(bool, bql_locked)
+static uint32_t bql_unlock_blocked;
+
+void bql_block_unlock(bool increase)
+{
+ uint32_t new_value;
+
+ assert(bql_locked());
+
+ /* check for overflow! */
+ new_value = bql_unlock_blocked + increase - !increase;
+ assert((new_value > bql_unlock_blocked) == increase);
+ bql_unlock_blocked = new_value;
+}
+
bool bql_locked(void)
{
return get_bql_locked();
@@ -524,6 +543,12 @@ bool qemu_in_main_thread(void)
return bql_locked();
}
+void rust_bql_mock_lock(void)
+{
+ error_report("This function should be used only from tests");
+ abort();
+}
+
/*
* The BQL is taken from so many places that it is worth profiling the
* callers directly, instead of funneling them all through a single function.
@@ -540,6 +565,7 @@ void bql_lock_impl(const char *file, int line)
void bql_unlock(void)
{
g_assert(bql_locked());
+ g_assert(!bql_unlock_blocked);
set_bql_locked(false);
qemu_mutex_unlock(&bql);
}
@@ -666,7 +692,6 @@ void qemu_init_vcpu(CPUState *cpu)
{
MachineState *ms = MACHINE(qdev_get_machine());
- cpu->nr_cores = machine_topo_get_cores_per_socket(ms);
cpu->nr_threads = ms->smp.threads;
cpu->stopped = true;
cpu->random_seed = qemu_guest_random_seed_thread_part1();
@@ -792,14 +817,14 @@ int vm_stop_force_state(RunState state)
}
}
-void qmp_memsave(int64_t addr, int64_t size, const char *filename,
+void qmp_memsave(uint64_t addr, uint64_t size, const char *filename,
bool has_cpu, int64_t cpu_index, Error **errp)
{
FILE *f;
- uint32_t l;
+ uint64_t l;
CPUState *cpu;
uint8_t buf[1024];
- int64_t orig_addr = addr, orig_size = size;
+ uint64_t orig_addr = addr, orig_size = size;
if (!has_cpu) {
cpu_index = 0;
@@ -823,7 +848,7 @@ void qmp_memsave(int64_t addr, int64_t size, const char *filename,
if (l > size)
l = size;
if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
- error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
+ error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRIu64
" specified", orig_addr, orig_size);
goto exit;
}
@@ -840,11 +865,11 @@ exit:
fclose(f);
}
-void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
+void qmp_pmemsave(uint64_t addr, uint64_t size, const char *filename,
Error **errp)
{
FILE *f;
- uint32_t l;
+ uint64_t l;
uint8_t buf[1024];
f = fopen(filename, "wb");
diff --git a/system/datadir.c b/system/datadir.c
index c9237cb..f96f8fc 100644
--- a/system/datadir.c
+++ b/system/datadir.c
@@ -30,7 +30,7 @@
static const char *data_dir[16];
static int data_dir_idx;
-char *qemu_find_file(int type, const char *name)
+char *qemu_find_file(QemuFileType type, const char *name)
{
int i;
const char *subdir;
@@ -46,6 +46,9 @@ char *qemu_find_file(int type, const char *name)
case QEMU_FILE_TYPE_BIOS:
subdir = "";
break;
+ case QEMU_FILE_TYPE_DTB:
+ subdir = "dtb/";
+ break;
case QEMU_FILE_TYPE_KEYMAP:
subdir = "keymaps/";
break;
diff --git a/system/device_tree-stub.c b/system/device_tree-stub.c
index bddda6f..428330b 100644
--- a/system/device_tree-stub.c
+++ b/system/device_tree-stub.c
@@ -5,6 +5,9 @@
#ifdef CONFIG_FDT
void qmp_dumpdtb(const char *filename, Error **errp)
{
- error_setg(errp, "This machine doesn't have a FDT");
+ ERRP_GUARD();
+
+ error_setg(errp, "This machine doesn't have an FDT");
+ error_append_hint(errp, "(this machine type definitely doesn't use FDT)\n");
}
#endif
diff --git a/system/device_tree.c b/system/device_tree.c
index 2e38259..aa3fe95 100644
--- a/system/device_tree.c
+++ b/system/device_tree.c
@@ -23,12 +23,12 @@
#include "qemu/bswap.h"
#include "qemu/cutils.h"
#include "qemu/guest-random.h"
-#include "sysemu/device_tree.h"
+#include "system/device_tree.h"
#include "hw/loader.h"
#include "hw/boards.h"
#include "qemu/config-file.h"
#include "qapi/qapi-commands-machine.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "monitor/hmp.h"
#include <libfdt.h>
@@ -594,21 +594,6 @@ int qemu_fdt_add_path(void *fdt, const char *path)
return retval;
}
-void qemu_fdt_dumpdtb(void *fdt, int size)
-{
- const char *dumpdtb = current_machine->dumpdtb;
-
- if (dumpdtb) {
- /* Dump the dtb to a file and quit */
- if (g_file_set_contents(dumpdtb, fdt, size, NULL)) {
- info_report("dtb dumped to %s. Exiting.", dumpdtb);
- exit(0);
- }
- error_report("%s: Failed dumping dtb to %s", __func__, dumpdtb);
- exit(1);
- }
-}
-
int qemu_fdt_setprop_sized_cells_from_array(void *fdt,
const char *node_path,
const char *property,
@@ -650,11 +635,16 @@ out:
void qmp_dumpdtb(const char *filename, Error **errp)
{
+ ERRP_GUARD();
+
g_autoptr(GError) err = NULL;
uint32_t size;
if (!current_machine->fdt) {
- error_setg(errp, "This machine doesn't have a FDT");
+ error_setg(errp, "This machine doesn't have an FDT");
+ error_append_hint(errp,
+ "(Perhaps it doesn't support FDT at all, or perhaps "
+ "you need to provide an FDT with the -fdt option?)\n");
return;
}
diff --git a/system/dirtylimit.c b/system/dirtylimit.c
index ab20da3..b48c0d4 100644
--- a/system/dirtylimit.c
+++ b/system/dirtylimit.c
@@ -13,16 +13,16 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "qapi/qapi-commands-migration.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/error.h"
-#include "sysemu/dirtyrate.h"
-#include "sysemu/dirtylimit.h"
+#include "system/dirtyrate.h"
+#include "system/dirtylimit.h"
#include "monitor/hmp.h"
#include "monitor/monitor.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "exec/target_page.h"
#include "hw/boards.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "trace.h"
#include "migration/misc.h"
@@ -80,8 +80,7 @@ static void vcpu_dirty_rate_stat_collect(void)
int i = 0;
int64_t period = DIRTYLIMIT_CALC_TIME_MS;
- if (migrate_dirty_limit() &&
- migration_is_active()) {
+ if (migrate_dirty_limit() && migration_is_running()) {
period = migrate_vcpu_dirty_limit_period();
}
@@ -338,8 +337,6 @@ static void dirtylimit_adjust_throttle(CPUState *cpu)
if (!dirtylimit_done(quota, current)) {
dirtylimit_set_throttle(cpu, quota, current);
}
-
- return;
}
void dirtylimit_process(void)
diff --git a/system/dma-helpers.c b/system/dma-helpers.c
index 7401330..0d592f6 100644
--- a/system/dma-helpers.c
+++ b/system/dma-helpers.c
@@ -8,12 +8,12 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/dma.h"
-#include "trace/trace-root.h"
+#include "system/block-backend.h"
+#include "system/dma.h"
+#include "trace.h"
#include "qemu/thread.h"
#include "qemu/main-loop.h"
-#include "sysemu/cpu-timers.h"
+#include "exec/icount.h"
#include "qemu/range.h"
/* #define DEBUG_IOMMU */
@@ -211,7 +211,7 @@ static const AIOCBInfo dma_aiocb_info = {
.cancel_async = dma_aio_cancel,
};
-BlockAIOCB *dma_blk_io(AioContext *ctx,
+BlockAIOCB *dma_blk_io(
QEMUSGList *sg, uint64_t offset, uint32_t align,
DMAIOFunc *io_func, void *io_func_opaque,
BlockCompletionFunc *cb,
@@ -223,7 +223,7 @@ BlockAIOCB *dma_blk_io(AioContext *ctx,
dbs->acb = NULL;
dbs->sg = sg;
- dbs->ctx = ctx;
+ dbs->ctx = qemu_get_current_aio_context();
dbs->offset = offset;
dbs->align = align;
dbs->sg_cur_index = 0;
@@ -251,7 +251,7 @@ BlockAIOCB *dma_blk_read(BlockBackend *blk,
QEMUSGList *sg, uint64_t offset, uint32_t align,
void (*cb)(void *opaque, int ret), void *opaque)
{
- return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
+ return dma_blk_io(sg, offset, align,
dma_blk_read_io_func, blk, cb, opaque,
DMA_DIRECTION_FROM_DEVICE);
}
@@ -269,7 +269,7 @@ BlockAIOCB *dma_blk_write(BlockBackend *blk,
QEMUSGList *sg, uint64_t offset, uint32_t align,
void (*cb)(void *opaque, int ret), void *opaque)
{
- return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
+ return dma_blk_io(sg, offset, align,
dma_blk_write_io_func, blk, cb, opaque,
DMA_DIRECTION_TO_DEVICE);
}
diff --git a/system/globals-target.c b/system/globals-target.c
new file mode 100644
index 0000000..9897205
--- /dev/null
+++ b/system/globals-target.c
@@ -0,0 +1,24 @@
+/*
+ * Global variables that should not exist (target specific)
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "qemu/osdep.h"
+#include "system/system.h"
+
+#ifdef TARGET_SPARC
+int graphic_width = 1024;
+int graphic_height = 768;
+int graphic_depth = 8;
+#elif defined(TARGET_M68K)
+int graphic_width = 800;
+int graphic_height = 600;
+int graphic_depth = 8;
+#else
+int graphic_width = 800;
+int graphic_height = 600;
+int graphic_depth = 32;
+#endif
diff --git a/system/globals.c b/system/globals.c
index d602a04..9640c95 100644
--- a/system/globals.c
+++ b/system/globals.c
@@ -28,19 +28,28 @@
#include "hw/loader.h"
#include "hw/xen/xen.h"
#include "net/net.h"
-#include "sysemu/cpus.h"
-#include "sysemu/sysemu.h"
+#include "system/cpus.h"
+#include "system/system.h"
+
+bool should_mlock(MlockState state)
+{
+ return state == MLOCK_ON || state == MLOCK_ON_FAULT;
+}
+
+bool is_mlock_on_fault(MlockState state)
+{
+ return state == MLOCK_ON_FAULT;
+}
enum vga_retrace_method vga_retrace_method = VGA_RETRACE_DUMB;
int display_opengl;
const char* keyboard_layout;
-bool enable_mlock;
+MlockState mlock_state;
bool enable_cpu_pm;
int autostart = 1;
int vga_interface_type = VGA_NONE;
bool vga_interface_created;
Chardev *parallel_hds[MAX_PARALLEL_PORTS];
-int graphic_rotate;
QEMUOptionRom option_rom[MAX_OPTION_ROMS];
int nb_option_roms;
int old_param;
@@ -49,7 +58,6 @@ unsigned int nb_prom_envs;
const char *prom_envs[MAX_PROM_ENVS];
uint8_t *boot_splash_filedata;
int only_migratable; /* turn it off unless user states otherwise */
-int icount_align_option;
/* The bytes in qemu_uuid are in the order specified by RFC4122, _not_ in the
* little-endian "wire format" described in the SMBIOS 2.6 specification.
diff --git a/system/ioport.c b/system/ioport.c
index fd551d0..4f96e91 100644
--- a/system/ioport.c
+++ b/system/ioport.c
@@ -26,10 +26,9 @@
*/
#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/ioport.h"
-#include "exec/memory.h"
-#include "exec/address-spaces.h"
+#include "system/ioport.h"
+#include "system/memory.h"
+#include "system/address-spaces.h"
#include "trace.h"
struct MemoryRegionPortioList {
@@ -258,7 +257,7 @@ static void portio_list_add_1(PortioList *piolist,
object_ref(&mrpio->mr);
object_unparent(OBJECT(&mrpio->mr));
if (!piolist->owner) {
- owner = container_get(qdev_get_machine(), "/unattached");
+ owner = machine_get_container("unattached");
} else {
owner = piolist->owner;
}
diff --git a/system/main.c b/system/main.c
index 9b91d21..b8f7157 100644
--- a/system/main.c
+++ b/system/main.c
@@ -24,26 +24,73 @@
#include "qemu/osdep.h"
#include "qemu-main.h"
-#include "sysemu/sysemu.h"
+#include "qemu/main-loop.h"
+#include "system/replay.h"
+#include "system/system.h"
#ifdef CONFIG_SDL
+/*
+ * SDL insists on wrapping the main() function with its own implementation on
+ * some platforms; it does so via a macro that renames our main function, so
+ * <SDL.h> must be #included here even with no SDL code called from this file.
+ */
#include <SDL.h>
#endif
-int qemu_default_main(void)
+#ifdef CONFIG_DARWIN
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+static void *qemu_default_main(void *opaque)
{
int status;
+ replay_mutex_lock();
+ bql_lock();
status = qemu_main_loop();
qemu_cleanup(status);
+ bql_unlock();
+ replay_mutex_unlock();
- return status;
+ exit(status);
}
-int (*qemu_main)(void) = qemu_default_main;
+int (*qemu_main)(void);
+
+#ifdef CONFIG_DARWIN
+static int os_darwin_cfrunloop_main(void)
+{
+ CFRunLoopRun();
+ g_assert_not_reached();
+}
+int (*qemu_main)(void) = os_darwin_cfrunloop_main;
+#endif
int main(int argc, char **argv)
{
qemu_init(argc, argv);
- return qemu_main();
+
+ /*
+ * qemu_init acquires the BQL and replay mutex lock. BQL is acquired when
+ * initializing cpus, to block associated threads until initialization is
+ * complete. Replay_mutex lock is acquired on initialization, because it
+ * must be held when configuring icount_mode.
+ *
+ * On MacOS, qemu main event loop runs in a background thread, as main
+ * thread must be reserved for UI. Thus, we need to transfer lock ownership,
+ * and the simplest way to do that is to release them, and reacquire them
+ * from qemu_default_main.
+ */
+ bql_unlock();
+ replay_mutex_unlock();
+
+ if (qemu_main) {
+ QemuThread main_loop_thread;
+ qemu_thread_create(&main_loop_thread, "qemu_main",
+ qemu_default_main, NULL, QEMU_THREAD_DETACHED);
+ return qemu_main();
+ } else {
+ qemu_default_main(NULL);
+ g_assert_not_reached();
+ }
}
diff --git a/system/memory-internal.h b/system/memory-internal.h
new file mode 100644
index 0000000..46f758f
--- /dev/null
+++ b/system/memory-internal.h
@@ -0,0 +1,57 @@
+/*
+ * Declarations for functions which are internal to the memory subsystem.
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMORY_INTERNAL_H
+#define MEMORY_INTERNAL_H
+
+#ifndef CONFIG_USER_ONLY
+static inline AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
+{
+ return fv->dispatch;
+}
+
+static inline AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
+{
+ return flatview_to_dispatch(address_space_to_flatview(as));
+}
+
+FlatView *address_space_get_flatview(AddressSpace *as);
+void flatview_unref(FlatView *view);
+
+extern const MemoryRegionOps unassigned_mem_ops;
+
+void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section);
+AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv);
+void address_space_dispatch_compact(AddressSpaceDispatch *d);
+void address_space_dispatch_free(AddressSpaceDispatch *d);
+
+void mtree_print_dispatch(struct AddressSpaceDispatch *d,
+ MemoryRegion *root);
+
+/* returns true if end is big endian. */
+static inline bool devend_big_endian(enum device_endian end)
+{
+ if (end == DEVICE_NATIVE_ENDIAN) {
+ return target_big_endian();
+ }
+ return end == DEVICE_BIG_ENDIAN;
+}
+
+/* enum device_endian to MemOp. */
+static inline MemOp devend_memop(enum device_endian end)
+{
+ return devend_big_endian(end) ? MO_BE : MO_LE;
+}
+
+#endif
+#endif
diff --git a/system/memory.c b/system/memory.c
index 5e6eb45..76b44b8 100644
--- a/system/memory.c
+++ b/system/memory.c
@@ -16,7 +16,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qapi/error.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qapi/visitor.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
@@ -24,16 +24,16 @@
#include "qemu/qemu-print.h"
#include "qom/object.h"
#include "trace.h"
-
-#include "exec/memory-internal.h"
-#include "exec/ram_addr.h"
-#include "sysemu/kvm.h"
-#include "sysemu/runstate.h"
-#include "sysemu/tcg.h"
+#include "system/ram_addr.h"
+#include "system/kvm.h"
+#include "system/runstate.h"
+#include "system/tcg.h"
#include "qemu/accel.h"
#include "hw/boards.h"
#include "migration/vmstate.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
+
+#include "memory-internal.h"
//#define DEBUG_UNASSIGNED
@@ -353,15 +353,6 @@ static void flatview_simplify(FlatView *view)
}
}
-static bool memory_region_big_endian(MemoryRegion *mr)
-{
-#if TARGET_BIG_ENDIAN
- return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
-#else
- return mr->ops->endianness == DEVICE_BIG_ENDIAN;
-#endif
-}
-
static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
{
if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
@@ -563,7 +554,7 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
/* FIXME: support unaligned access? */
access_size = MAX(MIN(size, access_size_max), access_size_min);
access_mask = MAKE_64BIT_MASK(0, access_size * 8);
- if (memory_region_big_endian(mr)) {
+ if (devend_big_endian(mr->ops->endianness)) {
for (i = 0; i < size; i += access_size) {
r |= access_fn(mr, addr + i, value, access_size,
(size - access_size - i) * 8, access_mask, attrs);
@@ -941,6 +932,38 @@ static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
}
}
+static void
+flat_range_coalesced_io_notify_listener_add_del(FlatRange *fr,
+ MemoryRegionSection *mrs,
+ MemoryListener *listener,
+ AddressSpace *as, bool add)
+{
+ CoalescedMemoryRange *cmr;
+ MemoryRegion *mr = fr->mr;
+ AddrRange tmp;
+
+ QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
+ tmp = addrrange_shift(cmr->addr,
+ int128_sub(fr->addr.start,
+ int128_make64(fr->offset_in_region)));
+
+ if (!addrrange_intersects(tmp, fr->addr)) {
+ return;
+ }
+ tmp = addrrange_intersection(tmp, fr->addr);
+
+ if (add && listener->coalesced_io_add) {
+ listener->coalesced_io_add(listener, mrs,
+ int128_get64(tmp.start),
+ int128_get64(tmp.size));
+ } else if (!add && listener->coalesced_io_del) {
+ listener->coalesced_io_del(listener, mrs,
+ int128_get64(tmp.start),
+ int128_get64(tmp.size));
+ }
+ }
+}
+
static void address_space_update_topology_pass(AddressSpace *as,
const FlatView *old_view,
const FlatView *new_view,
@@ -1206,7 +1229,7 @@ static void memory_region_do_init(MemoryRegion *mr,
char *name_array = g_strdup_printf("%s[*]", escaped_name);
if (!owner) {
- owner = container_get(qdev_get_machine(), "/unattached");
+ owner = machine_get_container("unattached");
}
object_property_add_child(owner, name_array, OBJECT(mr));
@@ -1359,7 +1382,7 @@ static void memory_region_ram_device_write(void *opaque, hwaddr addr,
static const MemoryRegionOps ram_device_mem_ops = {
.read = memory_region_ram_device_read,
.write = memory_region_ram_device_write,
- .endianness = DEVICE_HOST_ENDIAN,
+ .endianness = HOST_BIG_ENDIAN ? DEVICE_BIG_ENDIAN : DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 1,
.max_access_size = 8,
@@ -1380,7 +1403,7 @@ bool memory_region_access_valid(MemoryRegion *mr,
{
if (mr->ops->valid.accepts
&& !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
+ qemu_log_mask(LOG_INVALID_MEM, "Invalid %s at addr 0x%" HWADDR_PRIX
", size %u, region '%s', reason: rejected\n",
is_write ? "write" : "read",
addr, size, memory_region_name(mr));
@@ -1388,7 +1411,7 @@ bool memory_region_access_valid(MemoryRegion *mr,
}
if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
+ qemu_log_mask(LOG_INVALID_MEM, "Invalid %s at addr 0x%" HWADDR_PRIX
", size %u, region '%s', reason: unaligned\n",
is_write ? "write" : "read",
addr, size, memory_region_name(mr));
@@ -1402,7 +1425,7 @@ bool memory_region_access_valid(MemoryRegion *mr,
if (size > mr->ops->valid.max_access_size
|| size < mr->ops->valid.min_access_size) {
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
+ qemu_log_mask(LOG_INVALID_MEM, "Invalid %s at addr 0x%" HWADDR_PRIX
", size %u, region '%s', reason: invalid size "
"(min:%u max:%u)\n",
is_write ? "write" : "read",
@@ -1604,7 +1627,7 @@ bool memory_region_init_resizeable_ram(MemoryRegion *mr,
return true;
}
-#ifdef CONFIG_POSIX
+#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN)
bool memory_region_init_ram_from_file(MemoryRegion *mr,
Object *owner,
const char *name,
@@ -1648,8 +1671,8 @@ bool memory_region_init_ram_from_fd(MemoryRegion *mr,
mr->readonly = !!(ram_flags & RAM_READONLY);
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset,
- &err);
+ mr->ram_block = qemu_ram_alloc_from_fd(size, size, NULL, mr, ram_flags, fd,
+ offset, false, &err);
if (err) {
mr->size = int128_zero();
object_unparent(OBJECT(mr));
@@ -2083,12 +2106,16 @@ RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr)
return mr->rdm;
}
-void memory_region_set_ram_discard_manager(MemoryRegion *mr,
- RamDiscardManager *rdm)
+int memory_region_set_ram_discard_manager(MemoryRegion *mr,
+ RamDiscardManager *rdm)
{
g_assert(memory_region_is_ram(mr));
- g_assert(!rdm || !mr->rdm);
+ if (mr->rdm && rdm) {
+ return -EBUSY;
+ }
+
mr->rdm = rdm;
+ return 0;
}
uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
@@ -2111,7 +2138,7 @@ bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
MemoryRegionSection *section,
- ReplayRamPopulate replay_fn,
+ ReplayRamDiscardState replay_fn,
void *opaque)
{
RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
@@ -2120,15 +2147,15 @@ int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
return rdmc->replay_populated(rdm, section, replay_fn, opaque);
}
-void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
- MemoryRegionSection *section,
- ReplayRamDiscard replay_fn,
- void *opaque)
+int ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscardState replay_fn,
+ void *opaque)
{
RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
g_assert(rdmc->replay_discarded);
- rdmc->replay_discarded(rdm, section, replay_fn, opaque);
+ return rdmc->replay_discarded(rdm, section, replay_fn, opaque);
}
void ram_discard_manager_register_listener(RamDiscardManager *rdm,
@@ -2151,18 +2178,14 @@ void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
}
/* Called with rcu_read_lock held. */
-bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
- ram_addr_t *ram_addr, bool *read_only,
- bool *mr_has_discard_manager, Error **errp)
+MemoryRegion *memory_translate_iotlb(IOMMUTLBEntry *iotlb, hwaddr *xlat_p,
+ Error **errp)
{
MemoryRegion *mr;
hwaddr xlat;
hwaddr len = iotlb->addr_mask + 1;
bool writable = iotlb->perm & IOMMU_WO;
- if (mr_has_discard_manager) {
- *mr_has_discard_manager = false;
- }
/*
* The IOMMU TLB entry we have just covers translation through
* this IOMMU to its immediate target. We need to translate
@@ -2172,7 +2195,7 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
&xlat, &len, writable, MEMTXATTRS_UNSPECIFIED);
if (!memory_region_is_ram(mr)) {
error_setg(errp, "iommu map to non memory area %" HWADDR_PRIx "", xlat);
- return false;
+ return NULL;
} else if (memory_region_has_ram_discard_manager(mr)) {
RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
MemoryRegionSection tmp = {
@@ -2180,9 +2203,6 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
.offset_within_region = xlat,
.size = int128_make64(len),
};
- if (mr_has_discard_manager) {
- *mr_has_discard_manager = true;
- }
/*
* Malicious VMs can map memory into the IOMMU, which is expected
* to remain discarded. vfio will pin all pages, populating memory.
@@ -2193,7 +2213,7 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
error_setg(errp, "iommu map to discarded memory (e.g., unplugged"
" via virtio-mem): %" HWADDR_PRIx "",
iotlb->translated_addr);
- return false;
+ return NULL;
}
}
@@ -2203,22 +2223,11 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
*/
if (len & iotlb->addr_mask) {
error_setg(errp, "iommu has granularity incompatible with target AS");
- return false;
- }
-
- if (vaddr) {
- *vaddr = memory_region_get_ram_ptr(mr) + xlat;
- }
-
- if (ram_addr) {
- *ram_addr = memory_region_get_ram_addr(mr) + xlat;
- }
-
- if (read_only) {
- *read_only = !writable || mr->readonly;
+ return NULL;
}
- return true;
+ *xlat_p = xlat;
+ return mr;
}
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
@@ -2552,7 +2561,8 @@ void memory_region_add_eventfd(MemoryRegion *mr,
unsigned i;
if (size) {
- adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
+ MemOp mop = (target_big_endian() ? MO_BE : MO_LE) | size_memop(size);
+ adjust_endianness(mr, &mrfd.data, mop);
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
@@ -2587,7 +2597,8 @@ void memory_region_del_eventfd(MemoryRegion *mr,
unsigned i;
if (size) {
- adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
+ MemOp mop = (target_big_endian() ? MO_BE : MO_LE) | size_memop(size);
+ adjust_endianness(mr, &mrfd.data, mop);
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
@@ -3015,8 +3026,10 @@ void memory_global_dirty_log_stop(unsigned int flags)
static void listener_add_address_space(MemoryListener *listener,
AddressSpace *as)
{
+ unsigned i;
FlatView *view;
FlatRange *fr;
+ MemoryRegionIoeventfd *fd;
if (listener->begin) {
listener->begin(listener);
@@ -3041,10 +3054,34 @@ static void listener_add_address_space(MemoryListener *listener,
if (listener->region_add) {
listener->region_add(listener, &section);
}
+
+ /* send coalesced io add notifications */
+ flat_range_coalesced_io_notify_listener_add_del(fr, &section,
+ listener, as, true);
+
if (fr->dirty_log_mask && listener->log_start) {
listener->log_start(listener, &section, 0, fr->dirty_log_mask);
}
}
+
+ /*
+ * register all eventfds for this address space for the newly registered
+ * listener.
+ */
+ for (i = 0; i < as->ioeventfd_nb; i++) {
+ fd = &as->ioeventfds[i];
+ MemoryRegionSection section = (MemoryRegionSection) {
+ .fv = view,
+ .offset_within_address_space = int128_get64(fd->addr.start),
+ .size = fd->addr.size,
+ };
+
+ if (listener->eventfd_add) {
+ listener->eventfd_add(listener, &section,
+ fd->match_data, fd->data, fd->e);
+ }
+ }
+
if (listener->commit) {
listener->commit(listener);
}
@@ -3054,8 +3091,10 @@ static void listener_add_address_space(MemoryListener *listener,
static void listener_del_address_space(MemoryListener *listener,
AddressSpace *as)
{
+ unsigned i;
FlatView *view;
FlatRange *fr;
+ MemoryRegionIoeventfd *fd;
if (listener->begin) {
listener->begin(listener);
@@ -3067,10 +3106,33 @@ static void listener_del_address_space(MemoryListener *listener,
if (fr->dirty_log_mask && listener->log_stop) {
listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
}
+
+ /* send coalesced io del notifications */
+ flat_range_coalesced_io_notify_listener_add_del(fr, &section,
+ listener, as, false);
if (listener->region_del) {
listener->region_del(listener, &section);
}
}
+
+ /*
+ * de-register all eventfds for this address space for the current
+ * listener.
+ */
+ for (i = 0; i < as->ioeventfd_nb; i++) {
+ fd = &as->ioeventfds[i];
+ MemoryRegionSection section = (MemoryRegionSection) {
+ .fv = view,
+ .offset_within_address_space = int128_get64(fd->addr.start),
+ .size = fd->addr.size,
+ };
+
+ if (listener->eventfd_del) {
+ listener->eventfd_del(listener, &section,
+ fd->match_data, fd->data, fd->e);
+ }
+ }
+
if (listener->commit) {
listener->commit(listener);
}
@@ -3148,7 +3210,8 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
as->ioeventfds = NULL;
QTAILQ_INIT(&as->listeners);
QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
- as->bounce.in_use = false;
+ as->max_bounce_buffer_size = DEFAULT_MAX_BOUNCE_BUFFER_SIZE;
+ as->bounce_buffer_size = 0;
qemu_mutex_init(&as->map_client_list_lock);
QLIST_INIT(&as->map_client_list);
as->name = g_strdup(name ? name : "anonymous");
@@ -3158,7 +3221,7 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
static void do_address_space_destroy(AddressSpace *as)
{
- assert(!qatomic_read(&as->bounce.in_use));
+ assert(qatomic_read(&as->bounce_buffer_size) == 0);
assert(QLIST_EMPTY(&as->map_client_list));
qemu_mutex_destroy(&as->map_client_list_lock);
diff --git a/system/memory_ldst.c.inc b/system/memory_ldst.c.inc
index 0e6f394..7f32d3d 100644
--- a/system/memory_ldst.c.inc
+++ b/system/memory_ldst.c.inc
@@ -34,7 +34,7 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
RCU_READ_LOCK();
mr = TRANSLATE(addr, &addr1, &l, false, attrs);
- if (l < 4 || !memory_access_is_direct(mr, false)) {
+ if (l < 4 || !memory_access_is_direct(mr, false, attrs)) {
release_lock |= prepare_mmio_access(mr);
/* I/O case */
@@ -103,7 +103,7 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
RCU_READ_LOCK();
mr = TRANSLATE(addr, &addr1, &l, false, attrs);
- if (l < 8 || !memory_access_is_direct(mr, false)) {
+ if (l < 8 || !memory_access_is_direct(mr, false, attrs)) {
release_lock |= prepare_mmio_access(mr);
/* I/O case */
@@ -170,7 +170,7 @@ uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
RCU_READ_LOCK();
mr = TRANSLATE(addr, &addr1, &l, false, attrs);
- if (!memory_access_is_direct(mr, false)) {
+ if (!memory_access_is_direct(mr, false, attrs)) {
release_lock |= prepare_mmio_access(mr);
/* I/O case */
@@ -207,7 +207,7 @@ static inline uint16_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
RCU_READ_LOCK();
mr = TRANSLATE(addr, &addr1, &l, false, attrs);
- if (l < 2 || !memory_access_is_direct(mr, false)) {
+ if (l < 2 || !memory_access_is_direct(mr, false, attrs)) {
release_lock |= prepare_mmio_access(mr);
/* I/O case */
@@ -277,7 +277,7 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
RCU_READ_LOCK();
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
- if (l < 4 || !memory_access_is_direct(mr, true)) {
+ if (l < 4 || !memory_access_is_direct(mr, true, attrs)) {
release_lock |= prepare_mmio_access(mr);
r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs);
@@ -314,7 +314,7 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
RCU_READ_LOCK();
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
- if (l < 4 || !memory_access_is_direct(mr, true)) {
+ if (l < 4 || !memory_access_is_direct(mr, true, attrs)) {
release_lock |= prepare_mmio_access(mr);
r = memory_region_dispatch_write(mr, addr1, val,
MO_32 | devend_memop(endian), attrs);
@@ -377,7 +377,7 @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL,
RCU_READ_LOCK();
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
- if (!memory_access_is_direct(mr, true)) {
+ if (!memory_access_is_direct(mr, true, attrs)) {
release_lock |= prepare_mmio_access(mr);
r = memory_region_dispatch_write(mr, addr1, val, MO_8, attrs);
} else {
@@ -410,7 +410,7 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
RCU_READ_LOCK();
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
- if (l < 2 || !memory_access_is_direct(mr, true)) {
+ if (l < 2 || !memory_access_is_direct(mr, true, attrs)) {
release_lock |= prepare_mmio_access(mr);
r = memory_region_dispatch_write(mr, addr1, val,
MO_16 | devend_memop(endian), attrs);
@@ -474,7 +474,7 @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
RCU_READ_LOCK();
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
- if (l < 8 || !memory_access_is_direct(mr, true)) {
+ if (l < 8 || !memory_access_is_direct(mr, true, attrs)) {
release_lock |= prepare_mmio_access(mr);
r = memory_region_dispatch_write(mr, addr1, val,
MO_64 | devend_memop(endian), attrs);
diff --git a/system/memory_mapping.c b/system/memory_mapping.c
index 6f884c5..da708a0 100644
--- a/system/memory_mapping.c
+++ b/system/memory_mapping.c
@@ -12,11 +12,12 @@
*/
#include "qemu/osdep.h"
+#include "qemu/range.h"
#include "qapi/error.h"
-#include "sysemu/memory_mapping.h"
-#include "exec/memory.h"
-#include "exec/address-spaces.h"
+#include "system/memory_mapping.h"
+#include "system/memory.h"
+#include "system/address-spaces.h"
#include "hw/core/cpu.h"
//#define DEBUG_GUEST_PHYS_REGION_ADD
@@ -353,8 +354,7 @@ void memory_mapping_filter(MemoryMappingList *list, int64_t begin,
MemoryMapping *cur, *next;
QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) {
- if (cur->phys_addr >= begin + length ||
- cur->phys_addr + cur->length <= begin) {
+ if (!ranges_overlap(cur->phys_addr, cur->length, begin, length)) {
QTAILQ_REMOVE(&list->head, cur, next);
g_free(cur);
list->num--;
diff --git a/system/meson.build b/system/meson.build
index a296270..6d21ff9 100644
--- a/system/meson.build
+++ b/system/meson.build
@@ -1,22 +1,26 @@
specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_true: [files(
'arch_init.c',
- 'ioport.c',
- 'memory.c',
- 'physmem.c',
- 'watchpoint.c',
+ 'globals-target.c',
)])
system_ss.add(files(
+ 'vl.c',
+), sdl, libpmem, libdaxctl)
+
+system_ss.add(files(
'balloon.c',
'bootdevice.c',
'cpus.c',
- 'cpu-throttle.c',
'cpu-timers.c',
'datadir.c',
'dirtylimit.c',
'dma-helpers.c',
'globals.c',
+ 'ioport.c',
+ 'ram-block-attributes.c',
'memory_mapping.c',
+ 'memory.c',
+ 'physmem.c',
'qdev-monitor.c',
'qtest.c',
'rtc.c',
@@ -24,8 +28,8 @@ system_ss.add(files(
'runstate-hmp-cmds.c',
'runstate.c',
'tpm-hmp-cmds.c',
- 'vl.c',
-), sdl, libpmem, libdaxctl)
+ 'watchpoint.c',
+))
if have_tpm
system_ss.add(files('tpm.c'))
diff --git a/system/physmem.c b/system/physmem.c
index 9a3b3a7..ff0ca40 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -28,31 +28,34 @@
#include "qemu/lockable.h"
#ifdef CONFIG_TCG
-#include "hw/core/tcg-cpu-ops.h"
+#include "accel/tcg/cpu-ops.h"
+#include "accel/tcg/iommu.h"
#endif /* CONFIG_TCG */
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
+#include "exec/translation-block.h"
#include "hw/qdev-core.h"
#include "hw/qdev-properties.h"
#include "hw/boards.h"
-#include "sysemu/xen.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
-#include "sysemu/qtest.h"
+#include "system/xen.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
+#include "system/qtest.h"
#include "qemu/timer.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "qemu/qemu-print.h"
#include "qemu/log.h"
#include "qemu/memalign.h"
-#include "exec/memory.h"
-#include "exec/ioport.h"
-#include "sysemu/dma.h"
-#include "sysemu/hostmem.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/xen-mapcache.h"
+#include "qemu/memfd.h"
+#include "system/memory.h"
+#include "system/ioport.h"
+#include "system/dma.h"
+#include "system/hostmem.h"
+#include "system/hw_accel.h"
+#include "system/xen-mapcache.h"
#include "trace.h"
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
@@ -61,14 +64,16 @@
#include "qemu/rcu_queue.h"
#include "qemu/main-loop.h"
-#include "exec/translate-all.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
-#include "exec/memory-internal.h"
-#include "exec/ram_addr.h"
+#include "system/ram_addr.h"
#include "qemu/pmem.h"
+#include "qapi/qapi-types-migration.h"
+#include "migration/blocker.h"
+#include "migration/cpr.h"
+#include "migration/options.h"
#include "migration/vmstate.h"
#include "qemu/range.h"
@@ -82,6 +87,8 @@
#include <daxctl/libdaxctl.h>
#endif
+#include "memory-internal.h"
+
//#define DEBUG_SUBPAGE
/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
@@ -152,6 +159,7 @@ static void io_mem_init(void);
static void memory_map_init(void);
static void tcg_log_global_after_sync(MemoryListener *listener);
static void tcg_commit(MemoryListener *listener);
+static bool ram_is_cpr_compatible(RAMBlock *rb);
/**
* CPUAddressSpace: all the information a CPU needs about an AddressSpace
@@ -571,7 +579,7 @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
is_write, true, &as, attrs);
mr = section.mr;
- if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
+ if (xen_enabled() && memory_access_is_direct(mr, is_write, attrs)) {
hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
*plen = MIN(page, *plen);
}
@@ -579,6 +587,8 @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
return mr;
}
+#ifdef CONFIG_TCG
+
typedef struct TCGIOMMUNotifier {
IOMMUNotifier n;
MemoryRegion *mr;
@@ -738,6 +748,33 @@ translate_fail:
return &d->map.sections[PHYS_SECTION_UNASSIGNED];
}
+MemoryRegionSection *iotlb_to_section(CPUState *cpu,
+ hwaddr index, MemTxAttrs attrs)
+{
+ int asidx = cpu_asidx_from_attrs(cpu, attrs);
+ CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
+ AddressSpaceDispatch *d = cpuas->memory_dispatch;
+ int section_index = index & ~TARGET_PAGE_MASK;
+ MemoryRegionSection *ret;
+
+ assert(section_index < d->map.sections_nb);
+ ret = d->map.sections + section_index;
+ assert(ret->mr);
+ assert(ret->mr->ops);
+
+ return ret;
+}
+
+/* Called from RCU critical section */
+hwaddr memory_region_section_get_iotlb(CPUState *cpu,
+ MemoryRegionSection *section)
+{
+ AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
+ return section - d->map.sections;
+}
+
+#endif /* CONFIG_TCG */
+
void cpu_address_space_init(CPUState *cpu, int asidx,
const char *prefix, MemoryRegion *mr)
{
@@ -763,6 +800,7 @@ void cpu_address_space_init(CPUState *cpu, int asidx,
if (!cpu->cpu_ases) {
cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
+ cpu->cpu_ases_count = cpu->num_ases;
}
newas = &cpu->cpu_ases[asidx];
@@ -776,6 +814,34 @@ void cpu_address_space_init(CPUState *cpu, int asidx,
}
}
+void cpu_address_space_destroy(CPUState *cpu, int asidx)
+{
+ CPUAddressSpace *cpuas;
+
+ assert(cpu->cpu_ases);
+ assert(asidx >= 0 && asidx < cpu->num_ases);
+ /* KVM cannot currently support multiple address spaces. */
+ assert(asidx == 0 || !kvm_enabled());
+
+ cpuas = &cpu->cpu_ases[asidx];
+ if (tcg_enabled()) {
+ memory_listener_unregister(&cpuas->tcg_as_listener);
+ }
+
+ address_space_destroy(cpuas->as);
+ g_free_rcu(cpuas->as, rcu);
+
+ if (asidx == 0) {
+ /* reset the convenience alias for address space 0 */
+ cpu->as = NULL;
+ }
+
+ if (--cpu->cpu_ases_count == 0) {
+ g_free(cpu->cpu_ases);
+ cpu->cpu_ases = NULL;
+ }
+}
+
AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
{
/* Return the AddressSpace corresponding to the specified index */
@@ -894,13 +960,19 @@ DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
(MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client)
{
DirtyMemoryBlocks *blocks;
- ram_addr_t start = memory_region_get_ram_addr(mr) + offset;
+ ram_addr_t start, first, last;
unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL);
- ram_addr_t first = QEMU_ALIGN_DOWN(start, align);
- ram_addr_t last = QEMU_ALIGN_UP(start + length, align);
DirtyBitmapSnapshot *snap;
unsigned long page, end, dest;
+ start = memory_region_get_ram_addr(mr);
+ /* We know we're only called for RAM MemoryRegions */
+ assert(start != RAM_ADDR_INVALID);
+ start += offset;
+
+ first = QEMU_ALIGN_DOWN(start, align);
+ last = QEMU_ALIGN_UP(start + length, align);
+
snap = g_malloc0(sizeof(*snap) +
((last - first) >> (TARGET_PAGE_BITS + 3)));
snap->start = first;
@@ -959,14 +1031,6 @@ bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
return false;
}
-/* Called from RCU critical section */
-hwaddr memory_region_section_get_iotlb(CPUState *cpu,
- MemoryRegionSection *section)
-{
- AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
- return section - d->map.sections;
-}
-
static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
uint16_t section);
static subpage_t *subpage_init(FlatView *fv, hwaddr base);
@@ -1200,7 +1264,7 @@ long qemu_maxrampagesize(void)
return pagesize;
}
-#ifdef CONFIG_POSIX
+#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN)
static int64_t get_file_size(int fd)
{
int64_t size;
@@ -1499,18 +1563,6 @@ static ram_addr_t find_ram_offset(ram_addr_t size)
return offset;
}
-static unsigned long last_ram_page(void)
-{
- RAMBlock *block;
- ram_addr_t last = 0;
-
- RCU_READ_LOCK_GUARD();
- RAMBLOCK_FOREACH(block) {
- last = MAX(last, block->offset + block->max_length);
- }
- return last >> TARGET_PAGE_BITS;
-}
-
static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
{
int ret;
@@ -1637,6 +1689,18 @@ void qemu_ram_unset_idstr(RAMBlock *block)
}
}
+static char *cpr_name(MemoryRegion *mr)
+{
+ const char *mr_name = memory_region_name(mr);
+ g_autofree char *id = mr->dev ? qdev_get_dev_path(mr->dev) : NULL;
+
+ if (id) {
+ return g_strdup_printf("%s/%s", id, mr_name);
+ } else {
+ return g_strdup(mr_name);
+ }
+}
+
size_t qemu_ram_pagesize(RAMBlock *rb)
{
return rb->page_size;
@@ -1764,13 +1828,11 @@ void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length)
}
/* Called with ram_list.mutex held */
-static void dirty_memory_extend(ram_addr_t old_ram_size,
- ram_addr_t new_ram_size)
+static void dirty_memory_extend(ram_addr_t new_ram_size)
{
- ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
- DIRTY_MEMORY_BLOCK_SIZE);
- ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
- DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned int old_num_blocks = ram_list.num_dirty_blocks;
+ unsigned int new_num_blocks = DIV_ROUND_UP(new_ram_size,
+ DIRTY_MEMORY_BLOCK_SIZE);
int i;
/* Only need to extend if block count increased */
@@ -1802,6 +1864,8 @@ static void dirty_memory_extend(ram_addr_t old_ram_size,
g_free_rcu(old_blocks, rcu);
}
}
+
+ ram_list.num_dirty_blocks = new_num_blocks;
}
static void ram_block_add(RAMBlock *new_block, Error **errp)
@@ -1811,11 +1875,9 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
RAMBlock *block;
RAMBlock *last_block = NULL;
bool free_on_error = false;
- ram_addr_t old_ram_size, new_ram_size;
+ ram_addr_t ram_size;
Error *err = NULL;
- old_ram_size = last_ram_page();
-
qemu_mutex_lock_ramlist();
new_block->offset = find_ram_offset(new_block->max_length);
@@ -1847,10 +1909,14 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
if (new_block->flags & RAM_GUEST_MEMFD) {
int ret;
- assert(kvm_enabled());
+ if (!kvm_enabled()) {
+ error_setg(errp, "cannot set up private guest memory for %s: KVM required",
+ object_get_typename(OBJECT(current_machine->cgs)));
+ goto out_free;
+ }
assert(new_block->guest_memfd < 0);
- ret = ram_block_discard_require(true);
+ ret = ram_block_coordinated_discard_require(true);
if (ret < 0) {
error_setg_errno(errp, -ret,
"cannot set up private guest memory: discard currently blocked");
@@ -1864,13 +1930,41 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
qemu_mutex_unlock_ramlist();
goto out_free;
}
- }
- new_ram_size = MAX(old_ram_size,
- (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
- if (new_ram_size > old_ram_size) {
- dirty_memory_extend(old_ram_size, new_ram_size);
+ /*
+ * The attribute bitmap of the RamBlockAttributes is default to
+ * discarded, which mimics the behavior of kvm_set_phys_mem() when it
+ * calls kvm_set_memory_attributes_private(). This leads to a brief
+ * period of inconsistency between the creation of the RAMBlock and its
+ * mapping into the physical address space. However, this is not
+ * problematic, as no users rely on the attribute status to perform
+ * any actions during this interval.
+ */
+ new_block->attributes = ram_block_attributes_create(new_block);
+ if (!new_block->attributes) {
+ error_setg(errp, "Failed to create ram block attribute");
+ close(new_block->guest_memfd);
+ ram_block_coordinated_discard_require(false);
+ qemu_mutex_unlock_ramlist();
+ goto out_free;
+ }
+
+ /*
+ * Add a specific guest_memfd blocker if a generic one would not be
+ * added by ram_block_add_cpr_blocker.
+ */
+ if (ram_is_cpr_compatible(new_block)) {
+ error_setg(&new_block->cpr_blocker,
+ "Memory region %s uses guest_memfd, "
+ "which is not supported with CPR.",
+ memory_region_name(new_block->mr));
+ migrate_add_blocker_modes(&new_block->cpr_blocker, errp,
+ MIG_MODE_CPR_TRANSFER, -1);
+ }
}
+
+ ram_size = (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS;
+ dirty_memory_extend(ram_size);
/* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
* QLIST (which has an RCU-friendly variant) does not have insertion at
* tail, so save the last element in last_block.
@@ -1923,19 +2017,28 @@ out_free:
}
}
-#ifdef CONFIG_POSIX
-RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
+#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN)
+RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, ram_addr_t max_size,
+ qemu_ram_resize_cb resized, MemoryRegion *mr,
uint32_t ram_flags, int fd, off_t offset,
+ bool grow,
Error **errp)
{
+ ERRP_GUARD();
RAMBlock *new_block;
Error *local_err = NULL;
- int64_t file_size, file_align;
+ int64_t file_size, file_align, share_flags;
+
+ share_flags = ram_flags & (RAM_PRIVATE | RAM_SHARED);
+ assert(share_flags != (RAM_SHARED | RAM_PRIVATE));
+ ram_flags &= ~RAM_PRIVATE;
/* Just support these ram flags by now. */
assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE |
RAM_PROTECTED | RAM_NAMED_FILE | RAM_READONLY |
- RAM_READONLY_FD | RAM_GUEST_MEMFD)) == 0);
+ RAM_READONLY_FD | RAM_GUEST_MEMFD |
+ RAM_RESIZEABLE)) == 0);
+ assert(max_size >= size);
if (xen_enabled()) {
error_setg(errp, "-mem-path not supported with Xen");
@@ -1950,12 +2053,16 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
size = TARGET_PAGE_ALIGN(size);
size = REAL_HOST_PAGE_ALIGN(size);
+ max_size = TARGET_PAGE_ALIGN(max_size);
+ max_size = REAL_HOST_PAGE_ALIGN(max_size);
file_size = get_file_size(fd);
- if (file_size > offset && file_size < (offset + size)) {
- error_setg(errp, "backing store size 0x%" PRIx64
- " does not match 'size' option 0x" RAM_ADDR_FMT,
- file_size, size);
+ if (file_size && file_size < offset + max_size && !grow) {
+ error_setg(errp, "%s backing store size 0x%" PRIx64
+ " is too small for 'size' option 0x" RAM_ADDR_FMT
+ " plus 'offset' option 0x%" PRIx64,
+ memory_region_name(mr), file_size, max_size,
+ (uint64_t)offset);
return NULL;
}
@@ -1970,11 +2077,13 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
new_block = g_malloc0(sizeof(*new_block));
new_block->mr = mr;
new_block->used_length = size;
- new_block->max_length = size;
+ new_block->max_length = max_size;
+ new_block->resized = resized;
new_block->flags = ram_flags;
new_block->guest_memfd = -1;
- new_block->host = file_ram_alloc(new_block, size, fd, !file_size, offset,
- errp);
+ new_block->host = file_ram_alloc(new_block, max_size, fd,
+ file_size < offset + max_size,
+ offset, errp);
if (!new_block->host) {
g_free(new_block);
return NULL;
@@ -2026,7 +2135,8 @@ RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
return NULL;
}
- block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset, errp);
+ block = qemu_ram_alloc_from_fd(size, size, NULL, mr, ram_flags, fd, offset,
+ false, errp);
if (!block) {
if (created) {
unlink(mem_path);
@@ -2039,21 +2149,98 @@ RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
}
#endif
+#ifdef CONFIG_POSIX
+/*
+ * Create MAP_SHARED RAMBlocks by mmap'ing a file descriptor, so it can be
+ * shared with another process if CPR is being used. Use memfd if available
+ * because it has no size limits, else use POSIX shm.
+ */
+static int qemu_ram_get_shared_fd(const char *name, bool *reused, Error **errp)
+{
+ int fd = cpr_find_fd(name, 0);
+
+ if (fd >= 0) {
+ *reused = true;
+ return fd;
+ }
+
+ if (qemu_memfd_check(0)) {
+ fd = qemu_memfd_create(name, 0, 0, 0, 0, errp);
+ } else {
+ fd = qemu_shm_alloc(0, errp);
+ }
+
+ if (fd >= 0) {
+ cpr_save_fd(name, 0, fd);
+ }
+ *reused = false;
+ return fd;
+}
+#endif
+
static
RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
- void (*resized)(const char*,
- uint64_t length,
- void *host),
+ qemu_ram_resize_cb resized,
void *host, uint32_t ram_flags,
MemoryRegion *mr, Error **errp)
{
RAMBlock *new_block;
Error *local_err = NULL;
- int align;
+ int align, share_flags;
+
+ share_flags = ram_flags & (RAM_PRIVATE | RAM_SHARED);
+ assert(share_flags != (RAM_SHARED | RAM_PRIVATE));
+ ram_flags &= ~RAM_PRIVATE;
assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC |
RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0);
assert(!host ^ (ram_flags & RAM_PREALLOC));
+ assert(max_size >= size);
+
+ /* ignore RAM_SHARED for Windows and emscripten*/
+#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN)
+ if (!host) {
+ if (!share_flags && current_machine->aux_ram_share) {
+ ram_flags |= RAM_SHARED;
+ }
+ if (ram_flags & RAM_SHARED) {
+ bool reused;
+ g_autofree char *name = cpr_name(mr);
+ int fd = qemu_ram_get_shared_fd(name, &reused, errp);
+
+ if (fd < 0) {
+ return NULL;
+ }
+
+ /* Use same alignment as qemu_anon_ram_alloc */
+ mr->align = QEMU_VMALLOC_ALIGN;
+
+ /*
+ * This can fail if the shm mount size is too small, or alloc from
+ * fd is not supported, but previous QEMU versions that called
+ * qemu_anon_ram_alloc for anonymous shared memory could have
+ * succeeded. Quietly fail and fall back.
+ *
+ * After cpr-transfer, new QEMU could create a memory region
+ * with a larger max size than old, so pass reused to grow the
+ * region if necessary. The extra space will be usable after a
+ * guest reset.
+ */
+ new_block = qemu_ram_alloc_from_fd(size, max_size, resized, mr,
+ ram_flags, fd, 0, reused, NULL);
+ if (new_block) {
+ trace_qemu_ram_alloc_shared(name, new_block->used_length,
+ new_block->max_length, fd,
+ new_block->host);
+ return new_block;
+ }
+
+ cpr_delete_fd(name, 0);
+ close(fd);
+ /* fall back to anon allocation */
+ }
+ }
+#endif
align = qemu_real_host_page_size();
align = MAX(align, TARGET_PAGE_SIZE);
@@ -2065,7 +2252,6 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
new_block->resized = resized;
new_block->used_length = size;
new_block->max_length = max_size;
- assert(max_size >= size);
new_block->fd = -1;
new_block->guest_memfd = -1;
new_block->page_size = qemu_real_host_page_size();
@@ -2090,15 +2276,14 @@ RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags,
MemoryRegion *mr, Error **errp)
{
- assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0);
+ assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE | RAM_GUEST_MEMFD |
+ RAM_PRIVATE)) == 0);
return qemu_ram_alloc_internal(size, size, NULL, NULL, ram_flags, mr, errp);
}
RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
- void (*resized)(const char*,
- uint64_t length,
- void *host),
- MemoryRegion *mr, Error **errp)
+ qemu_ram_resize_cb resized,
+ MemoryRegion *mr, Error **errp)
{
return qemu_ram_alloc_internal(size, maxsz, resized, NULL,
RAM_RESIZEABLE, mr, errp);
@@ -2110,7 +2295,7 @@ static void reclaim_ramblock(RAMBlock *block)
;
} else if (xen_enabled()) {
xen_invalidate_map_cache_entry(block->host);
-#ifndef _WIN32
+#if !defined(_WIN32) && !defined(EMSCRIPTEN)
} else if (block->fd >= 0) {
qemu_ram_munmap(block->fd, block->host, block->max_length);
close(block->fd);
@@ -2120,8 +2305,9 @@ static void reclaim_ramblock(RAMBlock *block)
}
if (block->guest_memfd >= 0) {
+ ram_block_attributes_destroy(block->attributes);
close(block->guest_memfd);
- ram_block_discard_require(false);
+ ram_block_coordinated_discard_require(false);
}
g_free(block);
@@ -2129,6 +2315,8 @@ static void reclaim_ramblock(RAMBlock *block)
void qemu_ram_free(RAMBlock *block)
{
+ g_autofree char *name = NULL;
+
if (!block) {
return;
}
@@ -2139,6 +2327,8 @@ void qemu_ram_free(RAMBlock *block)
}
qemu_mutex_lock_ramlist();
+ name = cpr_name(block->mr);
+ cpr_delete_fd(name, 0);
QLIST_REMOVE_RCU(block, next);
ram_list.mru_block = NULL;
/* Write list before version */
@@ -2149,45 +2339,80 @@ void qemu_ram_free(RAMBlock *block)
}
#ifndef _WIN32
-void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
+/* Simply remap the given VM memory location from start to start+length */
+static int qemu_ram_remap_mmap(RAMBlock *block, uint64_t start, size_t length)
+{
+ int flags, prot;
+ void *area;
+ void *host_startaddr = block->host + start;
+
+ assert(block->fd < 0);
+ flags = MAP_FIXED | MAP_ANONYMOUS;
+ flags |= block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE;
+ flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0;
+ prot = PROT_READ;
+ prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE;
+ area = mmap(host_startaddr, length, prot, flags, -1, 0);
+ return area != host_startaddr ? -errno : 0;
+}
+
+/*
+ * qemu_ram_remap - remap a single RAM page
+ *
+ * @addr: address in ram_addr_t address space.
+ *
+ * This function will try remapping a single page of guest RAM identified by
+ * @addr, essentially discarding memory to recover from previously poisoned
+ * memory (MCE). The page size depends on the RAMBlock (i.e., hugetlb). @addr
+ * does not have to point at the start of the page.
+ *
+ * This function is only to be used during system resets; it will kill the
+ * VM if remapping failed.
+ */
+void qemu_ram_remap(ram_addr_t addr)
{
RAMBlock *block;
- ram_addr_t offset;
- int flags;
- void *area, *vaddr;
- int prot;
+ uint64_t offset;
+ void *vaddr;
+ size_t page_size;
RAMBLOCK_FOREACH(block) {
offset = addr - block->offset;
if (offset < block->max_length) {
+ /* Respect the pagesize of our RAMBlock */
+ page_size = qemu_ram_pagesize(block);
+ offset = QEMU_ALIGN_DOWN(offset, page_size);
+
vaddr = ramblock_ptr(block, offset);
if (block->flags & RAM_PREALLOC) {
;
} else if (xen_enabled()) {
abort();
} else {
- flags = MAP_FIXED;
- flags |= block->flags & RAM_SHARED ?
- MAP_SHARED : MAP_PRIVATE;
- flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0;
- prot = PROT_READ;
- prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE;
- if (block->fd >= 0) {
- area = mmap(vaddr, length, prot, flags, block->fd,
- offset + block->fd_offset);
- } else {
- flags |= MAP_ANONYMOUS;
- area = mmap(vaddr, length, prot, flags, -1, 0);
- }
- if (area != vaddr) {
- error_report("Could not remap addr: "
- RAM_ADDR_FMT "@" RAM_ADDR_FMT "",
- length, addr);
- exit(1);
+ if (ram_block_discard_range(block, offset, page_size) != 0) {
+ /*
+ * Fall back to using mmap() only for anonymous mapping,
+ * as if a backing file is associated we may not be able
+ * to recover the memory in all cases.
+ * So don't take the risk of using only mmap and fail now.
+ */
+ if (block->fd >= 0) {
+ error_report("Could not remap RAM %s:%" PRIx64 "+%"
+ PRIx64 " +%zx", block->idstr, offset,
+ block->fd_offset, page_size);
+ exit(1);
+ }
+ if (qemu_ram_remap_mmap(block, offset, page_size) != 0) {
+ error_report("Could not remap RAM %s:%" PRIx64 " +%zx",
+ block->idstr, offset, page_size);
+ exit(1);
+ }
}
- memory_try_enable_merging(vaddr, length);
- qemu_ram_setup_dump(vaddr, length);
+ memory_try_enable_merging(vaddr, page_size);
+ qemu_ram_setup_dump(vaddr, page_size);
}
+
+ break;
}
}
}
@@ -2485,23 +2710,6 @@ static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
return phys_section_add(map, &section);
}
-MemoryRegionSection *iotlb_to_section(CPUState *cpu,
- hwaddr index, MemTxAttrs attrs)
-{
- int asidx = cpu_asidx_from_attrs(cpu, attrs);
- CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
- AddressSpaceDispatch *d = cpuas->memory_dispatch;
- int section_index = index & ~TARGET_PAGE_MASK;
- MemoryRegionSection *ret;
-
- assert(section_index < d->map.sections_nb);
- ret = d->map.sections + section_index;
- assert(ret->mr);
- assert(ret->mr->ops);
-
- return ret;
-}
-
static void io_mem_init(void)
{
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
@@ -2630,7 +2838,11 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr length)
{
uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
- addr += memory_region_get_ram_addr(mr);
+ ram_addr_t ramaddr = memory_region_get_ram_addr(mr);
+
+ /* We know we're only called for RAM MemoryRegions */
+ assert(ramaddr != RAM_ADDR_INVALID);
+ addr += ramaddr;
/* No early return if dirty_log_mask is or becomes 0, because
* cpu_physical_memory_set_dirty_range will still call
@@ -2642,7 +2854,7 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
}
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
assert(tcg_enabled());
- tb_invalidate_phys_range(addr, addr + length - 1);
+ tb_invalidate_phys_range(NULL, addr, addr + length - 1);
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
}
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
@@ -2723,7 +2935,7 @@ static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs,
if (memory_region_is_ram(mr)) {
return true;
}
- qemu_log_mask(LOG_GUEST_ERROR,
+ qemu_log_mask(LOG_INVALID_MEM,
"Invalid access to non-RAM device at "
"addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", "
"region '%s'\n", addr, len, memory_region_name(mr));
@@ -2739,7 +2951,7 @@ static MemTxResult flatview_write_continue_step(MemTxAttrs attrs,
return MEMTX_ACCESS_ERROR;
}
- if (!memory_access_is_direct(mr, true)) {
+ if (!memory_access_is_direct(mr, true, attrs)) {
uint64_t val;
MemTxResult result;
bool release_lock = prepare_mmio_access(mr);
@@ -2835,7 +3047,7 @@ static MemTxResult flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf,
return MEMTX_ACCESS_ERROR;
}
- if (!memory_access_is_direct(mr, false)) {
+ if (!memory_access_is_direct(mr, false, attrs)) {
/* I/O case */
uint64_t val;
MemTxResult result;
@@ -3007,8 +3219,7 @@ static inline MemTxResult address_space_write_rom_internal(AddressSpace *as,
l = len;
mr = address_space_translate(as, addr, &addr1, &l, true, attrs);
- if (!(memory_region_is_ram(mr) ||
- memory_region_is_romd(mr))) {
+ if (!memory_region_supports_direct_access(mr)) {
l = memory_access_size(mr, l, addr1);
} else {
/* ROM/RAM case */
@@ -3056,6 +3267,20 @@ void cpu_flush_icache_range(hwaddr start, hwaddr len)
NULL, len, FLUSH_CACHE);
}
+/*
+ * A magic value stored in the first 8 bytes of the bounce buffer struct. Used
+ * to detect illegal pointers passed to address_space_unmap.
+ */
+#define BOUNCE_BUFFER_MAGIC 0xb4017ceb4ffe12ed
+
+typedef struct {
+ uint64_t magic;
+ MemoryRegion *mr;
+ hwaddr addr;
+ size_t len;
+ uint8_t buffer[];
+} BounceBuffer;
+
static void
address_space_unregister_map_client_do(AddressSpaceMapClient *client)
{
@@ -3081,9 +3306,9 @@ void address_space_register_map_client(AddressSpace *as, QEMUBH *bh)
QEMU_LOCK_GUARD(&as->map_client_list_lock);
client->bh = bh;
QLIST_INSERT_HEAD(&as->map_client_list, client, link);
- /* Write map_client_list before reading in_use. */
+ /* Write map_client_list before reading bounce_buffer_size. */
smp_mb();
- if (!qatomic_read(&as->bounce.in_use)) {
+ if (qatomic_read(&as->bounce_buffer_size) < as->max_bounce_buffer_size) {
address_space_notify_map_clients_locked(as);
}
}
@@ -3131,7 +3356,7 @@ static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
while (len > 0) {
l = len;
mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
- if (!memory_access_is_direct(mr, is_write)) {
+ if (!memory_access_is_direct(mr, is_write, attrs)) {
l = memory_access_size(mr, l, addr);
if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) {
return false;
@@ -3211,29 +3436,41 @@ void *address_space_map(AddressSpace *as,
fv = address_space_to_flatview(as);
mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
- if (!memory_access_is_direct(mr, is_write)) {
- if (qatomic_xchg(&as->bounce.in_use, true)) {
+ if (!memory_access_is_direct(mr, is_write, attrs)) {
+ size_t used = qatomic_read(&as->bounce_buffer_size);
+ for (;;) {
+ hwaddr alloc = MIN(as->max_bounce_buffer_size - used, l);
+ size_t new_size = used + alloc;
+ size_t actual =
+ qatomic_cmpxchg(&as->bounce_buffer_size, used, new_size);
+ if (actual == used) {
+ l = alloc;
+ break;
+ }
+ used = actual;
+ }
+
+ if (l == 0) {
*plen = 0;
return NULL;
}
- /* Avoid unbounded allocations */
- l = MIN(l, TARGET_PAGE_SIZE);
- as->bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
- as->bounce.addr = addr;
- as->bounce.len = l;
+ BounceBuffer *bounce = g_malloc0(l + sizeof(BounceBuffer));
+ bounce->magic = BOUNCE_BUFFER_MAGIC;
memory_region_ref(mr);
- as->bounce.mr = mr;
+ bounce->mr = mr;
+ bounce->addr = addr;
+ bounce->len = l;
+
if (!is_write) {
- flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED,
- as->bounce.buffer, l);
+ flatview_read(fv, addr, attrs,
+ bounce->buffer, l);
}
*plen = l;
- return as->bounce.buffer;
+ return bounce->buffer;
}
-
memory_region_ref(mr);
*plen = flatview_extend_translation(fv, addr, len, mr, xlat,
l, is_write, attrs);
@@ -3248,12 +3485,11 @@ void *address_space_map(AddressSpace *as,
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
bool is_write, hwaddr access_len)
{
- if (buffer != as->bounce.buffer) {
- MemoryRegion *mr;
- ram_addr_t addr1;
+ MemoryRegion *mr;
+ ram_addr_t addr1;
- mr = memory_region_from_host(buffer, &addr1);
- assert(mr != NULL);
+ mr = memory_region_from_host(buffer, &addr1);
+ if (mr != NULL) {
if (is_write) {
invalidate_and_set_dirty(mr, addr1, access_len);
}
@@ -3263,15 +3499,22 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
memory_region_unref(mr);
return;
}
+
+
+ BounceBuffer *bounce = container_of(buffer, BounceBuffer, buffer);
+ assert(bounce->magic == BOUNCE_BUFFER_MAGIC);
+
if (is_write) {
- address_space_write(as, as->bounce.addr, MEMTXATTRS_UNSPECIFIED,
- as->bounce.buffer, access_len);
- }
- qemu_vfree(as->bounce.buffer);
- as->bounce.buffer = NULL;
- memory_region_unref(as->bounce.mr);
- /* Clear in_use before reading map_client_list. */
- qatomic_set_mb(&as->bounce.in_use, false);
+ address_space_write(as, bounce->addr, MEMTXATTRS_UNSPECIFIED,
+ bounce->buffer, access_len);
+ }
+
+ qatomic_sub(&as->bounce_buffer_size, bounce->len);
+ bounce->magic = ~BOUNCE_BUFFER_MAGIC;
+ memory_region_unref(bounce->mr);
+ g_free(bounce);
+ /* Write bounce_buffer_size before reading map_client_list. */
+ smp_mb();
address_space_notify_map_clients(as);
}
@@ -3326,7 +3569,7 @@ int64_t address_space_cache_init(MemoryRegionCache *cache,
mr = cache->mrs.mr;
memory_region_ref(mr);
- if (memory_access_is_direct(mr, is_write)) {
+ if (memory_access_is_direct(mr, is_write, MEMTXATTRS_UNSPECIFIED)) {
/* We don't care about the memory attributes here as we're only
* doing this if we found actual RAM, which behaves the same
* regardless of attributes; so UNSPECIFIED is fine.
@@ -3519,13 +3762,8 @@ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
if (l > len)
l = len;
phys_addr += (addr & ~TARGET_PAGE_MASK);
- if (is_write) {
- res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
- attrs, buf, l);
- } else {
- res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr,
- attrs, buf, l);
- }
+ res = address_space_rw(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf,
+ l, is_write);
if (res != MEMTX_OK) {
return -1;
}
@@ -3635,18 +3873,19 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
}
ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
- start, length);
+ start + rb->fd_offset, length);
if (ret) {
ret = -errno;
- error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)",
- __func__, rb->idstr, start, length, ret);
+ error_report("%s: Failed to fallocate %s:%" PRIx64 "+%" PRIx64
+ " +%zx (%d)", __func__, rb->idstr, start,
+ rb->fd_offset, length, ret);
goto err;
}
#else
ret = -ENOSYS;
error_report("%s: fallocate not available/file"
- "%s:%" PRIx64 " +%zx (%d)",
- __func__, rb->idstr, start, length, ret);
+ "%s:%" PRIx64 "+%" PRIx64 " +%zx (%d)", __func__,
+ rb->idstr, start, rb->fd_offset, length, ret);
goto err;
#endif
}
@@ -3693,6 +3932,7 @@ int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start,
int ret = -1;
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
+ /* ignore fd_offset with guest_memfd */
ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
start, length);
@@ -3897,3 +4137,58 @@ bool ram_block_discard_is_required(void)
return qatomic_read(&ram_block_discard_required_cnt) ||
qatomic_read(&ram_block_coordinated_discard_required_cnt);
}
+
+/*
+ * Return true if ram is compatible with CPR. Do not exclude rom,
+ * because the rom file could change in new QEMU.
+ */
+static bool ram_is_cpr_compatible(RAMBlock *rb)
+{
+ MemoryRegion *mr = rb->mr;
+
+ if (!mr || !memory_region_is_ram(mr)) {
+ return true;
+ }
+
+ /* Ram device is remapped in new QEMU */
+ if (memory_region_is_ram_device(mr)) {
+ return true;
+ }
+
+ /*
+ * A file descriptor is passed to new QEMU and remapped, or its backing
+ * file is reopened and mapped. It must be shared to avoid COW.
+ */
+ if (rb->fd >= 0 && qemu_ram_is_shared(rb)) {
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Add a blocker for each volatile ram block. This function should only be
+ * called after we know that the block is migratable. Non-migratable blocks
+ * are either re-created in new QEMU, or are handled specially, or are covered
+ * by a device-level CPR blocker.
+ */
+void ram_block_add_cpr_blocker(RAMBlock *rb, Error **errp)
+{
+ assert(qemu_ram_is_migratable(rb));
+
+ if (ram_is_cpr_compatible(rb)) {
+ return;
+ }
+
+ error_setg(&rb->cpr_blocker,
+ "Memory region %s is not compatible with CPR. share=on is "
+ "required for memory-backend objects, and aux-ram-share=on is "
+ "required.", memory_region_name(rb->mr));
+ migrate_add_blocker_modes(&rb->cpr_blocker, errp, MIG_MODE_CPR_TRANSFER,
+ -1);
+}
+
+void ram_block_del_cpr_blocker(RAMBlock *rb)
+{
+ migrate_del_blocker(&rb->cpr_blocker);
+}
diff --git a/system/qdev-monitor.c b/system/qdev-monitor.c
index 6af6ef7..5588ed2 100644
--- a/system/qdev-monitor.c
+++ b/system/qdev-monitor.c
@@ -22,13 +22,14 @@
#include "monitor/hmp.h"
#include "monitor/monitor.h"
#include "monitor/qdev.h"
-#include "sysemu/arch_init.h"
+#include "system/arch_init.h"
+#include "system/runstate.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-qdev.h"
-#include "qapi/qmp/dispatch.h"
-#include "qapi/qmp/qdict.h"
+#include "qapi/qmp-registry.h"
+#include "qobject/qdict.h"
#include "qapi/qmp/qerror.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qstring.h"
#include "qapi/qobject-input-visitor.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
@@ -36,7 +37,7 @@
#include "qemu/option.h"
#include "qemu/qemu-print.h"
#include "qemu/option_int.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "migration/misc.h"
#include "qemu/cutils.h"
#include "hw/qdev-properties.h"
@@ -55,12 +56,18 @@ typedef struct QDevAlias
} QDevAlias;
/* default virtio transport per architecture */
-#define QEMU_ARCH_VIRTIO_PCI (QEMU_ARCH_ALPHA | QEMU_ARCH_ARM | \
- QEMU_ARCH_HPPA | QEMU_ARCH_I386 | \
- QEMU_ARCH_MIPS | QEMU_ARCH_PPC | \
- QEMU_ARCH_RISCV | QEMU_ARCH_SH4 | \
- QEMU_ARCH_SPARC | QEMU_ARCH_XTENSA | \
- QEMU_ARCH_LOONGARCH)
+#define QEMU_ARCH_VIRTIO_PCI (QEMU_ARCH_ALPHA | \
+ QEMU_ARCH_ARM | \
+ QEMU_ARCH_HPPA | \
+ QEMU_ARCH_I386 | \
+ QEMU_ARCH_LOONGARCH | \
+ QEMU_ARCH_MIPS | \
+ QEMU_ARCH_OPENRISC | \
+ QEMU_ARCH_PPC | \
+ QEMU_ARCH_RISCV | \
+ QEMU_ARCH_SH4 | \
+ QEMU_ARCH_SPARC | \
+ QEMU_ARCH_XTENSA)
#define QEMU_ARCH_VIRTIO_CCW (QEMU_ARCH_S390X)
#define QEMU_ARCH_VIRTIO_MMIO (QEMU_ARCH_M68K)
@@ -125,7 +132,7 @@ static const char *qdev_class_get_alias(DeviceClass *dc)
for (i = 0; qdev_alias_table[i].typename; i++) {
if (qdev_alias_table[i].arch_mask &&
- !(qdev_alias_table[i].arch_mask & arch_type)) {
+ !qemu_arch_available(qdev_alias_table[i].arch_mask)) {
continue;
}
@@ -211,7 +218,7 @@ static const char *find_typename_by_alias(const char *alias)
for (i = 0; qdev_alias_table[i].alias; i++) {
if (qdev_alias_table[i].arch_mask &&
- !(qdev_alias_table[i].arch_mask & arch_type)) {
+ !qemu_arch_available(qdev_alias_table[i].arch_mask)) {
continue;
}
@@ -256,8 +263,7 @@ static DeviceClass *qdev_get_device_class(const char **driver, Error **errp)
}
dc = DEVICE_CLASS(oc);
- if (!dc->user_creatable ||
- (phase_check(PHASE_MACHINE_READY) && !dc->hotpluggable)) {
+ if (!dc->user_creatable) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "driver",
"a pluggable device type");
return NULL;
@@ -341,7 +347,7 @@ static Object *qdev_get_peripheral(void)
static Object *dev;
if (dev == NULL) {
- dev = container_get(qdev_get_machine(), "/peripheral");
+ dev = machine_get_container("peripheral");
}
return dev;
@@ -352,7 +358,7 @@ static Object *qdev_get_peripheral_anon(void)
static Object *dev;
if (dev == NULL) {
- dev = container_get(qdev_get_machine(), "/peripheral-anon");
+ dev = machine_get_container("peripheral-anon");
}
return dev;
@@ -624,6 +630,7 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts,
char *id;
DeviceState *dev = NULL;
BusState *bus = NULL;
+ QDict *properties;
driver = qdict_get_try_str(opts, "driver");
if (!driver) {
@@ -668,12 +675,7 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts,
return NULL;
}
- if (phase_check(PHASE_MACHINE_READY) && bus && !qbus_is_hotpluggable(bus)) {
- error_setg(errp, "Bus '%s' does not support hotplugging", bus->name);
- return NULL;
- }
-
- if (!migration_is_idle()) {
+ if (migration_is_running()) {
error_setg(errp, "device_add not allowed while migrating");
return NULL;
}
@@ -682,17 +684,9 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts,
dev = qdev_new(driver);
/* Check whether the hotplug is allowed by the machine */
- if (phase_check(PHASE_MACHINE_READY)) {
- if (!qdev_hotplug_allowed(dev, errp)) {
- goto err_del_dev;
- }
-
- if (!bus && !qdev_get_machine_hotplug_handler(dev)) {
- /* No bus, no machine hotplug handler --> device is not hotpluggable */
- error_setg(errp, "Device '%s' can not be hotplugged on this machine",
- driver);
- goto err_del_dev;
- }
+ if (phase_check(PHASE_MACHINE_READY) &&
+ !qdev_hotplug_allowed(dev, bus, errp)) {
+ goto err_del_dev;
}
/*
@@ -705,13 +699,14 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts,
}
/* set properties */
- dev->opts = qdict_clone_shallow(opts);
- qdict_del(dev->opts, "driver");
- qdict_del(dev->opts, "bus");
- qdict_del(dev->opts, "id");
+ properties = qdict_clone_shallow(opts);
+ qdict_del(properties, "driver");
+ qdict_del(properties, "bus");
+ qdict_del(properties, "id");
- object_set_properties_from_keyval(&dev->parent_obj, dev->opts, from_json,
+ object_set_properties_from_keyval(&dev->parent_obj, properties, from_json,
errp);
+ qobject_unref(properties);
if (*errp) {
goto err_del_dev;
}
@@ -745,19 +740,18 @@ DeviceState *qdev_device_add(QemuOpts *opts, Error **errp)
#define qdev_printf(fmt, ...) monitor_printf(mon, "%*s" fmt, indent, "", ## __VA_ARGS__)
-static void qdev_print_props(Monitor *mon, DeviceState *dev, Property *props,
+static void qdev_print_props(Monitor *mon, DeviceState *dev, DeviceClass *dc,
int indent)
{
- if (!props)
- return;
- for (; props->name; props++) {
+ for (int i = 0, n = dc->props_count_; i < n; ++i) {
+ const Property *prop = &dc->props_[i];
char *value;
- char *legacy_name = g_strdup_printf("legacy-%s", props->name);
+ char *legacy_name = g_strdup_printf("legacy-%s", prop->name);
if (object_property_get_type(OBJECT(dev), legacy_name, NULL)) {
value = object_property_get_str(OBJECT(dev), legacy_name, NULL);
} else {
- value = object_property_print(OBJECT(dev), props->name, true,
+ value = object_property_print(OBJECT(dev), prop->name, true,
NULL);
}
g_free(legacy_name);
@@ -765,7 +759,7 @@ static void qdev_print_props(Monitor *mon, DeviceState *dev, Property *props,
if (!value) {
continue;
}
- qdev_printf("%s = %s\n", props->name,
+ qdev_printf("%s = %s\n", prop->name,
*value ? value : "<null>");
g_free(value);
}
@@ -805,7 +799,7 @@ static void qdev_print(Monitor *mon, DeviceState *dev, int indent)
}
class = object_get_class(OBJECT(dev));
do {
- qdev_print_props(mon, dev, DEVICE_CLASS(class)->props_, indent);
+ qdev_print_props(mon, dev, DEVICE_CLASS(class), indent);
class = object_class_get_parent(class);
} while (class != object_class_by_name(TYPE_DEVICE));
bus_print_dev(dev->parent_bus, mon, dev, indent);
@@ -849,18 +843,9 @@ void hmp_info_qdm(Monitor *mon, const QDict *qdict)
void qmp_device_add(QDict *qdict, QObject **ret_data, Error **errp)
{
- QemuOpts *opts;
DeviceState *dev;
- opts = qemu_opts_from_qdict(qemu_find_opts("device"), qdict, errp);
- if (!opts) {
- return;
- }
- if (!monitor_cur_is_qmp() && qdev_device_help(opts)) {
- qemu_opts_del(opts);
- return;
- }
- dev = qdev_device_add(opts, errp);
+ dev = qdev_device_add_from_qdict(qdict, true, errp);
if (!dev) {
/*
* Drain all pending RCU callbacks. This is done because
@@ -872,20 +857,24 @@ void qmp_device_add(QDict *qdict, QObject **ret_data, Error **errp)
* to the user
*/
drain_call_rcu();
-
- qemu_opts_del(opts);
- return;
}
object_unref(OBJECT(dev));
}
-static DeviceState *find_device_state(const char *id, Error **errp)
+/*
+ * Note that creating new APIs using error classes other than GenericError is
+ * not recommended. Set use_generic_error=true for new interfaces.
+ */
+static DeviceState *find_device_state(const char *id, bool use_generic_error,
+ Error **errp)
{
Object *obj = object_resolve_path_at(qdev_get_peripheral(), id);
DeviceState *dev;
if (!obj) {
- error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
+ error_set(errp,
+ (use_generic_error ?
+ ERROR_CLASS_GENERIC_ERROR : ERROR_CLASS_DEVICE_NOT_FOUND),
"Device '%s' not found", id);
return NULL;
}
@@ -901,28 +890,15 @@ static DeviceState *find_device_state(const char *id, Error **errp)
void qdev_unplug(DeviceState *dev, Error **errp)
{
- DeviceClass *dc = DEVICE_GET_CLASS(dev);
HotplugHandler *hotplug_ctrl;
HotplugHandlerClass *hdc;
Error *local_err = NULL;
- if (qdev_unplug_blocked(dev, errp)) {
- return;
- }
-
- if (dev->parent_bus && !qbus_is_hotpluggable(dev->parent_bus)) {
- error_setg(errp, "Bus '%s' does not support hotplugging",
- dev->parent_bus->name);
- return;
- }
-
- if (!dc->hotpluggable) {
- error_setg(errp, "Device '%s' does not support hotplugging",
- object_get_typename(OBJECT(dev)));
+ if (!qdev_hotunplug_allowed(dev, errp)) {
return;
}
- if (!migration_is_idle() && !dev->allow_unplug_during_migration) {
+ if (migration_is_running() && !dev->allow_unplug_during_migration) {
error_setg(errp, "device_del not allowed while migrating");
return;
}
@@ -950,7 +926,7 @@ void qdev_unplug(DeviceState *dev, Error **errp)
void qmp_device_del(const char *id, Error **errp)
{
- DeviceState *dev = find_device_state(id, errp);
+ DeviceState *dev = find_device_state(id, false, errp);
if (dev != NULL) {
if (dev->pending_deleted_event &&
(dev->pending_deleted_expires_ms == 0 ||
@@ -964,11 +940,74 @@ void qmp_device_del(const char *id, Error **errp)
}
}
+int qdev_sync_config(DeviceState *dev, Error **errp)
+{
+ DeviceClass *dc = DEVICE_GET_CLASS(dev);
+
+ if (!dc->sync_config) {
+ error_setg(errp, "device-sync-config is not supported for '%s'",
+ object_get_typename(OBJECT(dev)));
+ return -ENOTSUP;
+ }
+
+ return dc->sync_config(dev, errp);
+}
+
+void qmp_device_sync_config(const char *id, Error **errp)
+{
+ DeviceState *dev;
+
+ /*
+ * During migration there is a race between syncing`configuration
+ * and migrating it (if migrate first, that target would get
+ * outdated version), so let's just not allow it.
+ */
+
+ if (migration_is_running()) {
+ error_setg(errp, "Config synchronization is not allowed "
+ "during migration");
+ return;
+ }
+
+ dev = find_device_state(id, true, errp);
+ if (!dev) {
+ return;
+ }
+
+ qdev_sync_config(dev, errp);
+}
+
void hmp_device_add(Monitor *mon, const QDict *qdict)
{
Error *err = NULL;
+ QemuOpts *opts;
+ DeviceState *dev;
+
+ opts = qemu_opts_from_qdict(qemu_find_opts("device"), qdict, &err);
+ if (!opts) {
+ goto out;
+ }
+ if (qdev_device_help(opts)) {
+ qemu_opts_del(opts);
+ return;
+ }
+ dev = qdev_device_add(opts, &err);
+ if (!dev) {
+ /*
+ * Drain all pending RCU callbacks. This is done because
+ * some bus related operations can delay a device removal
+ * (in this case this can happen if device is added and then
+ * removed due to a configuration error)
+ * to a RCU callback, but user might expect that this interface
+ * will finish its job completely once qmp command returns result
+ * to the user
+ */
+ drain_call_rcu();
- qmp_device_add((QDict *)qdict, NULL, &err);
+ qemu_opts_del(opts);
+ }
+ object_unref(dev);
+out:
hmp_handle_error(mon, err);
}
@@ -1034,7 +1073,7 @@ static GSList *qdev_build_hotpluggable_device_list(Object *peripheral)
static void peripheral_device_del_completion(ReadLineState *rs,
const char *str)
{
- Object *peripheral = container_get(qdev_get_machine(), "/peripheral");
+ Object *peripheral = machine_get_container("peripheral");
GSList *list, *item;
list = qdev_build_hotpluggable_device_list(peripheral);
@@ -1070,7 +1109,7 @@ BlockBackend *blk_by_qdev_id(const char *id, Error **errp)
GLOBAL_STATE_CODE();
- dev = find_device_state(id, errp);
+ dev = find_device_state(id, false, errp);
if (dev == NULL) {
return NULL;
}
diff --git a/system/qemu-seccomp.c b/system/qemu-seccomp.c
index 98ffce0..f8e1238 100644
--- a/system/qemu-seccomp.c
+++ b/system/qemu-seccomp.c
@@ -20,7 +20,7 @@
#include "qemu/module.h"
#include <sys/prctl.h>
#include <seccomp.h>
-#include "sysemu/seccomp.h"
+#include "system/seccomp.h"
#include <linux/seccomp.h>
/* For some architectures (notably ARM) cacheflush is not supported until
@@ -47,10 +47,10 @@ const struct scmp_arg_cmp sched_setscheduler_arg[] = {
};
/*
- * See 'NOTES' in 'man 2 clone' - s390 & cross have 'flags' in
+ * See 'NOTES' in 'man 2 clone' - s390 has 'flags' in
* different position to other architectures
*/
-#if defined(HOST_S390X) || defined(HOST_S390) || defined(HOST_CRIS)
+#if defined(HOST_S390X) || defined(HOST_S390)
#define CLONE_FLAGS_ARG 1
#else
#define CLONE_FLAGS_ARG 0
diff --git a/system/qtest.c b/system/qtest.c
index 12703a2..301b03b 100644
--- a/system/qtest.c
+++ b/system/qtest.c
@@ -13,17 +13,17 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "sysemu/qtest.h"
-#include "sysemu/runstate.h"
+#include "system/qtest.h"
+#include "system/runstate.h"
#include "chardev/char-fe.h"
-#include "exec/ioport.h"
-#include "exec/memory.h"
+#include "system/ioport.h"
+#include "system/memory.h"
#include "exec/tswap.h"
#include "hw/qdev-core.h"
#include "hw/irq.h"
#include "hw/core/cpu.h"
#include "qemu/accel.h"
-#include "sysemu/cpu-timers.h"
+#include "system/cpu-timers.h"
#include "qemu/config-file.h"
#include "qemu/option.h"
#include "qemu/error-report.h"
@@ -78,6 +78,11 @@ static void *qtest_server_send_opaque;
* let you adjust the value of the clock (monotonically). All the commands
* return the current value of the clock in nanoseconds.
*
+ * If the commands FAIL then time wasn't advanced which is likely
+ * because the machine was in a paused state or no timer events exist
+ * in the future. This will cause qtest to abort and the test will
+ * need to check its assumptions.
+ *
* .. code-block:: none
*
* > clock_step
@@ -260,7 +265,7 @@ static int hex2nib(char ch)
}
}
-void qtest_send_prefix(CharBackend *chr)
+static void qtest_log_timestamp(void)
{
if (!qtest_log_fp || !qtest_opened) {
return;
@@ -277,7 +282,7 @@ static void G_GNUC_PRINTF(1, 2) qtest_log_send(const char *fmt, ...)
return;
}
- qtest_send_prefix(NULL);
+ qtest_log_timestamp();
va_start(ap, fmt);
vfprintf(qtest_log_fp, fmt, ap);
@@ -296,6 +301,7 @@ static void qtest_server_char_be_send(void *opaque, const char *str)
static void qtest_send(CharBackend *chr, const char *str)
{
+ qtest_log_timestamp();
qtest_server_send(qtest_server_send_opaque, str);
}
@@ -319,7 +325,6 @@ static void qtest_irq_handler(void *opaque, int n, int level)
if (irq_levels[n] != level) {
CharBackend *chr = &qtest->qtest_chr;
irq_levels[n] = level;
- qtest_send_prefix(chr);
qtest_sendf(chr, "IRQ %s %d\n",
level ? "raise" : "lower", n);
}
@@ -375,19 +380,16 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
is_outbound = words[0][14] == 'o';
dev = DEVICE(object_resolve_path(words[1], NULL));
if (!dev) {
- qtest_send_prefix(chr);
qtest_send(chr, "FAIL Unknown device\n");
return;
}
if (is_named && !is_outbound) {
- qtest_send_prefix(chr);
qtest_send(chr, "FAIL Interception of named in-GPIOs not yet supported\n");
return;
}
if (irq_intercept_dev) {
- qtest_send_prefix(chr);
if (irq_intercept_dev != dev) {
qtest_send(chr, "FAIL IRQ intercept already enabled\n");
} else {
@@ -414,7 +416,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
}
}
- qtest_send_prefix(chr);
if (interception_succeeded) {
irq_intercept_dev = dev;
qtest_send(chr, "OK\n");
@@ -433,7 +434,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
dev = DEVICE(object_resolve_path(words[1], NULL));
if (!dev) {
- qtest_send_prefix(chr);
qtest_send(chr, "FAIL Unknown device\n");
return;
}
@@ -452,7 +452,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
irq = qdev_get_gpio_in_named(dev, name, num);
qemu_set_irq(irq, level);
- qtest_send_prefix(chr);
qtest_send(chr, "OK\n");
} else if (strcmp(words[0], "outb") == 0 ||
strcmp(words[0], "outw") == 0 ||
@@ -475,7 +474,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
} else if (words[0][3] == 'l') {
cpu_outl(addr, value);
}
- qtest_send_prefix(chr);
qtest_send(chr, "OK\n");
} else if (strcmp(words[0], "inb") == 0 ||
strcmp(words[0], "inw") == 0 ||
@@ -496,7 +494,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
} else if (words[0][2] == 'l') {
value = cpu_inl(addr);
}
- qtest_send_prefix(chr);
qtest_sendf(chr, "OK 0x%04x\n", value);
} else if (strcmp(words[0], "writeb") == 0 ||
strcmp(words[0], "writew") == 0 ||
@@ -532,7 +529,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED,
&data, 8);
}
- qtest_send_prefix(chr);
qtest_send(chr, "OK\n");
} else if (strcmp(words[0], "readb") == 0 ||
strcmp(words[0], "readw") == 0 ||
@@ -566,7 +562,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
&value, 8);
tswap64s(&value);
}
- qtest_send_prefix(chr);
qtest_sendf(chr, "OK 0x%016" PRIx64 "\n", value);
} else if (strcmp(words[0], "read") == 0) {
g_autoptr(GString) enc = NULL;
@@ -588,7 +583,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
enc = qemu_hexdump_line(NULL, data, len, 0, 0);
- qtest_send_prefix(chr);
qtest_sendf(chr, "OK 0x%s\n", enc->str);
g_free(data);
@@ -608,7 +602,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
address_space_read(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, data,
len);
b64_data = g_base64_encode(data, len);
- qtest_send_prefix(chr);
qtest_sendf(chr, "OK %s\n", b64_data);
g_free(data);
@@ -644,7 +637,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
len);
g_free(data);
- qtest_send_prefix(chr);
qtest_send(chr, "OK\n");
} else if (strcmp(words[0], "memset") == 0) {
uint64_t addr, len;
@@ -668,7 +660,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
g_free(data);
}
- qtest_send_prefix(chr);
qtest_send(chr, "OK\n");
} else if (strcmp(words[0], "b64write") == 0) {
uint64_t addr, len;
@@ -700,17 +691,16 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, data,
len);
- qtest_send_prefix(chr);
qtest_send(chr, "OK\n");
} else if (strcmp(words[0], "endianness") == 0) {
- qtest_send_prefix(chr);
- if (target_words_bigendian()) {
+ if (target_big_endian()) {
qtest_sendf(chr, "OK big\n");
} else {
qtest_sendf(chr, "OK little\n");
}
} else if (qtest_enabled() && strcmp(words[0], "clock_step") == 0) {
- int64_t ns;
+ int64_t old_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ int64_t ns, new_ns;
if (words[1]) {
int ret = qemu_strtoi64(words[1], NULL, 0, &ns);
@@ -718,18 +708,24 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
} else {
ns = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
QEMU_TIMER_ATTR_ALL);
+ if (ns < 0) {
+ qtest_send(chr, "FAIL "
+ "cannot advance clock to the next deadline "
+ "because there is no pending deadline\n");
+ return;
+ }
+ }
+ new_ns = qemu_clock_advance_virtual_time(old_ns + ns);
+ if (new_ns > old_ns) {
+ qtest_sendf(chr, "OK %"PRIi64"\n", new_ns);
+ } else {
+ qtest_sendf(chr, "FAIL could not advance time\n");
}
- qemu_clock_advance_virtual_time(
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns);
- qtest_send_prefix(chr);
- qtest_sendf(chr, "OK %"PRIi64"\n",
- (int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
} else if (strcmp(words[0], "module_load") == 0) {
Error *local_err = NULL;
int rv;
g_assert(words[1] && words[2]);
- qtest_send_prefix(chr);
rv = module_load(words[1], words[2], &local_err);
if (rv > 0) {
qtest_sendf(chr, "OK\n");
@@ -740,43 +736,37 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
qtest_sendf(chr, "FAIL\n");
}
} else if (qtest_enabled() && strcmp(words[0], "clock_set") == 0) {
- int64_t ns;
+ int64_t ns, new_ns;
int ret;
g_assert(words[1]);
ret = qemu_strtoi64(words[1], NULL, 0, &ns);
g_assert(ret == 0);
- qemu_clock_advance_virtual_time(ns);
- qtest_send_prefix(chr);
- qtest_sendf(chr, "OK %"PRIi64"\n",
- (int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+ new_ns = qemu_clock_advance_virtual_time(ns);
+ qtest_sendf(chr, "%s %"PRIi64"\n",
+ new_ns == ns ? "OK" : "FAIL", new_ns);
} else if (process_command_cb && process_command_cb(chr, words)) {
/* Command got consumed by the callback handler */
} else {
- qtest_send_prefix(chr);
qtest_sendf(chr, "FAIL Unknown command '%s'\n", words[0]);
}
}
+/*
+ * Process as much of @inbuf as we can in newline terminated chunks.
+ * Remove the processed commands from @inbuf as we go.
+ */
static void qtest_process_inbuf(CharBackend *chr, GString *inbuf)
{
char *end;
while ((end = strchr(inbuf->str, '\n')) != NULL) {
- size_t offset;
- GString *cmd;
- gchar **words;
-
- offset = end - inbuf->str;
+ size_t len = end - inbuf->str;
+ g_autofree char *cmd = g_strndup(inbuf->str, len);
+ g_auto(GStrv) words = g_strsplit(cmd, " ", 0);
- cmd = g_string_new_len(inbuf->str, offset);
- g_string_erase(inbuf, 0, offset + 1);
-
- words = g_strsplit(cmd->str, " ", 0);
+ g_string_erase(inbuf, 0, len + 1);
qtest_process_command(chr, words);
- g_strfreev(words);
-
- g_string_free(cmd, TRUE);
}
}
@@ -1004,7 +994,7 @@ static char *qtest_get_chardev(Object *obj, Error **errp)
return g_strdup(q->chr_name);
}
-static void qtest_class_init(ObjectClass *oc, void *data)
+static void qtest_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
@@ -1022,7 +1012,7 @@ static const TypeInfo qtest_info = {
.parent = TYPE_OBJECT,
.class_init = qtest_class_init,
.instance_size = sizeof(QTest),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/system/ram-block-attributes.c b/system/ram-block-attributes.c
new file mode 100644
index 0000000..68e8a02
--- /dev/null
+++ b/system/ram-block-attributes.c
@@ -0,0 +1,444 @@
+/*
+ * QEMU ram block attributes
+ *
+ * Copyright Intel
+ *
+ * Author:
+ * Chenyi Qiang <chenyi.qiang@intel.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "system/ramblock.h"
+#include "trace.h"
+
+OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(RamBlockAttributes,
+ ram_block_attributes,
+ RAM_BLOCK_ATTRIBUTES,
+ OBJECT,
+ { TYPE_RAM_DISCARD_MANAGER },
+ { })
+
+static size_t
+ram_block_attributes_get_block_size(const RamBlockAttributes *attr)
+{
+ /*
+ * Because page conversion could be manipulated in the size of at least 4K
+ * or 4K aligned, Use the host page size as the granularity to track the
+ * memory attribute.
+ */
+ g_assert(attr && attr->ram_block);
+ g_assert(attr->ram_block->page_size == qemu_real_host_page_size());
+ return attr->ram_block->page_size;
+}
+
+
+static bool
+ram_block_attributes_rdm_is_populated(const RamDiscardManager *rdm,
+ const MemoryRegionSection *section)
+{
+ const RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm);
+ const size_t block_size = ram_block_attributes_get_block_size(attr);
+ const uint64_t first_bit = section->offset_within_region / block_size;
+ const uint64_t last_bit =
+ first_bit + int128_get64(section->size) / block_size - 1;
+ unsigned long first_discarded_bit;
+
+ first_discarded_bit = find_next_zero_bit(attr->bitmap, last_bit + 1,
+ first_bit);
+ return first_discarded_bit > last_bit;
+}
+
+typedef int (*ram_block_attributes_section_cb)(MemoryRegionSection *s,
+ void *arg);
+
+static int
+ram_block_attributes_notify_populate_cb(MemoryRegionSection *section,
+ void *arg)
+{
+ RamDiscardListener *rdl = arg;
+
+ return rdl->notify_populate(rdl, section);
+}
+
+static int
+ram_block_attributes_notify_discard_cb(MemoryRegionSection *section,
+ void *arg)
+{
+ RamDiscardListener *rdl = arg;
+
+ rdl->notify_discard(rdl, section);
+ return 0;
+}
+
+static int
+ram_block_attributes_for_each_populated_section(const RamBlockAttributes *attr,
+ MemoryRegionSection *section,
+ void *arg,
+ ram_block_attributes_section_cb cb)
+{
+ unsigned long first_bit, last_bit;
+ uint64_t offset, size;
+ const size_t block_size = ram_block_attributes_get_block_size(attr);
+ int ret = 0;
+
+ first_bit = section->offset_within_region / block_size;
+ first_bit = find_next_bit(attr->bitmap, attr->bitmap_size,
+ first_bit);
+
+ while (first_bit < attr->bitmap_size) {
+ MemoryRegionSection tmp = *section;
+
+ offset = first_bit * block_size;
+ last_bit = find_next_zero_bit(attr->bitmap, attr->bitmap_size,
+ first_bit + 1) - 1;
+ size = (last_bit - first_bit + 1) * block_size;
+
+ if (!memory_region_section_intersect_range(&tmp, offset, size)) {
+ break;
+ }
+
+ ret = cb(&tmp, arg);
+ if (ret) {
+ error_report("%s: Failed to notify RAM discard listener: %s",
+ __func__, strerror(-ret));
+ break;
+ }
+
+ first_bit = find_next_bit(attr->bitmap, attr->bitmap_size,
+ last_bit + 2);
+ }
+
+ return ret;
+}
+
+static int
+ram_block_attributes_for_each_discarded_section(const RamBlockAttributes *attr,
+ MemoryRegionSection *section,
+ void *arg,
+ ram_block_attributes_section_cb cb)
+{
+ unsigned long first_bit, last_bit;
+ uint64_t offset, size;
+ const size_t block_size = ram_block_attributes_get_block_size(attr);
+ int ret = 0;
+
+ first_bit = section->offset_within_region / block_size;
+ first_bit = find_next_zero_bit(attr->bitmap, attr->bitmap_size,
+ first_bit);
+
+ while (first_bit < attr->bitmap_size) {
+ MemoryRegionSection tmp = *section;
+
+ offset = first_bit * block_size;
+ last_bit = find_next_bit(attr->bitmap, attr->bitmap_size,
+ first_bit + 1) - 1;
+ size = (last_bit - first_bit + 1) * block_size;
+
+ if (!memory_region_section_intersect_range(&tmp, offset, size)) {
+ break;
+ }
+
+ ret = cb(&tmp, arg);
+ if (ret) {
+ error_report("%s: Failed to notify RAM discard listener: %s",
+ __func__, strerror(-ret));
+ break;
+ }
+
+ first_bit = find_next_zero_bit(attr->bitmap,
+ attr->bitmap_size,
+ last_bit + 2);
+ }
+
+ return ret;
+}
+
+static uint64_t
+ram_block_attributes_rdm_get_min_granularity(const RamDiscardManager *rdm,
+ const MemoryRegion *mr)
+{
+ const RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm);
+
+ g_assert(mr == attr->ram_block->mr);
+ return ram_block_attributes_get_block_size(attr);
+}
+
+static void
+ram_block_attributes_rdm_register_listener(RamDiscardManager *rdm,
+ RamDiscardListener *rdl,
+ MemoryRegionSection *section)
+{
+ RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm);
+ int ret;
+
+ g_assert(section->mr == attr->ram_block->mr);
+ rdl->section = memory_region_section_new_copy(section);
+
+ QLIST_INSERT_HEAD(&attr->rdl_list, rdl, next);
+
+ ret = ram_block_attributes_for_each_populated_section(attr, section, rdl,
+ ram_block_attributes_notify_populate_cb);
+ if (ret) {
+ error_report("%s: Failed to register RAM discard listener: %s",
+ __func__, strerror(-ret));
+ exit(1);
+ }
+}
+
+static void
+ram_block_attributes_rdm_unregister_listener(RamDiscardManager *rdm,
+ RamDiscardListener *rdl)
+{
+ RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm);
+ int ret;
+
+ g_assert(rdl->section);
+ g_assert(rdl->section->mr == attr->ram_block->mr);
+
+ if (rdl->double_discard_supported) {
+ rdl->notify_discard(rdl, rdl->section);
+ } else {
+ ret = ram_block_attributes_for_each_populated_section(attr,
+ rdl->section, rdl, ram_block_attributes_notify_discard_cb);
+ if (ret) {
+ error_report("%s: Failed to unregister RAM discard listener: %s",
+ __func__, strerror(-ret));
+ exit(1);
+ }
+ }
+
+ memory_region_section_free_copy(rdl->section);
+ rdl->section = NULL;
+ QLIST_REMOVE(rdl, next);
+}
+
+typedef struct RamBlockAttributesReplayData {
+ ReplayRamDiscardState fn;
+ void *opaque;
+} RamBlockAttributesReplayData;
+
+static int ram_block_attributes_rdm_replay_cb(MemoryRegionSection *section,
+ void *arg)
+{
+ RamBlockAttributesReplayData *data = arg;
+
+ return data->fn(section, data->opaque);
+}
+
+static int
+ram_block_attributes_rdm_replay_populated(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscardState replay_fn,
+ void *opaque)
+{
+ RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm);
+ RamBlockAttributesReplayData data = { .fn = replay_fn, .opaque = opaque };
+
+ g_assert(section->mr == attr->ram_block->mr);
+ return ram_block_attributes_for_each_populated_section(attr, section, &data,
+ ram_block_attributes_rdm_replay_cb);
+}
+
+static int
+ram_block_attributes_rdm_replay_discarded(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscardState replay_fn,
+ void *opaque)
+{
+ RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm);
+ RamBlockAttributesReplayData data = { .fn = replay_fn, .opaque = opaque };
+
+ g_assert(section->mr == attr->ram_block->mr);
+ return ram_block_attributes_for_each_discarded_section(attr, section, &data,
+ ram_block_attributes_rdm_replay_cb);
+}
+
+static bool
+ram_block_attributes_is_valid_range(RamBlockAttributes *attr, uint64_t offset,
+ uint64_t size)
+{
+ MemoryRegion *mr = attr->ram_block->mr;
+
+ g_assert(mr);
+
+ uint64_t region_size = memory_region_size(mr);
+ const size_t block_size = ram_block_attributes_get_block_size(attr);
+
+ if (!QEMU_IS_ALIGNED(offset, block_size) ||
+ !QEMU_IS_ALIGNED(size, block_size)) {
+ return false;
+ }
+ if (offset + size <= offset) {
+ return false;
+ }
+ if (offset + size > region_size) {
+ return false;
+ }
+ return true;
+}
+
+static void ram_block_attributes_notify_discard(RamBlockAttributes *attr,
+ uint64_t offset,
+ uint64_t size)
+{
+ RamDiscardListener *rdl;
+
+ QLIST_FOREACH(rdl, &attr->rdl_list, next) {
+ MemoryRegionSection tmp = *rdl->section;
+
+ if (!memory_region_section_intersect_range(&tmp, offset, size)) {
+ continue;
+ }
+ rdl->notify_discard(rdl, &tmp);
+ }
+}
+
+static int
+ram_block_attributes_notify_populate(RamBlockAttributes *attr,
+ uint64_t offset, uint64_t size)
+{
+ RamDiscardListener *rdl;
+ int ret = 0;
+
+ QLIST_FOREACH(rdl, &attr->rdl_list, next) {
+ MemoryRegionSection tmp = *rdl->section;
+
+ if (!memory_region_section_intersect_range(&tmp, offset, size)) {
+ continue;
+ }
+ ret = rdl->notify_populate(rdl, &tmp);
+ if (ret) {
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int ram_block_attributes_state_change(RamBlockAttributes *attr,
+ uint64_t offset, uint64_t size,
+ bool to_discard)
+{
+ const size_t block_size = ram_block_attributes_get_block_size(attr);
+ const unsigned long first_bit = offset / block_size;
+ const unsigned long nbits = size / block_size;
+ const unsigned long last_bit = first_bit + nbits - 1;
+ const bool is_discarded = find_next_bit(attr->bitmap, attr->bitmap_size,
+ first_bit) > last_bit;
+ const bool is_populated = find_next_zero_bit(attr->bitmap,
+ attr->bitmap_size, first_bit) > last_bit;
+ unsigned long bit;
+ int ret = 0;
+
+ if (!ram_block_attributes_is_valid_range(attr, offset, size)) {
+ error_report("%s, invalid range: offset 0x%" PRIx64 ", size "
+ "0x%" PRIx64, __func__, offset, size);
+ return -EINVAL;
+ }
+
+ trace_ram_block_attributes_state_change(offset, size,
+ is_discarded ? "discarded" :
+ is_populated ? "populated" :
+ "mixture",
+ to_discard ? "discarded" :
+ "populated");
+ if (to_discard) {
+ if (is_discarded) {
+ /* Already private */
+ } else if (is_populated) {
+ /* Completely shared */
+ bitmap_clear(attr->bitmap, first_bit, nbits);
+ ram_block_attributes_notify_discard(attr, offset, size);
+ } else {
+ /* Unexpected mixture: process individual blocks */
+ for (bit = first_bit; bit < first_bit + nbits; bit++) {
+ if (!test_bit(bit, attr->bitmap)) {
+ continue;
+ }
+ clear_bit(bit, attr->bitmap);
+ ram_block_attributes_notify_discard(attr, bit * block_size,
+ block_size);
+ }
+ }
+ } else {
+ if (is_populated) {
+ /* Already shared */
+ } else if (is_discarded) {
+ /* Completely private */
+ bitmap_set(attr->bitmap, first_bit, nbits);
+ ret = ram_block_attributes_notify_populate(attr, offset, size);
+ } else {
+ /* Unexpected mixture: process individual blocks */
+ for (bit = first_bit; bit < first_bit + nbits; bit++) {
+ if (test_bit(bit, attr->bitmap)) {
+ continue;
+ }
+ set_bit(bit, attr->bitmap);
+ ret = ram_block_attributes_notify_populate(attr,
+ bit * block_size,
+ block_size);
+ if (ret) {
+ break;
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+RamBlockAttributes *ram_block_attributes_create(RAMBlock *ram_block)
+{
+ const int block_size = qemu_real_host_page_size();
+ RamBlockAttributes *attr;
+ MemoryRegion *mr = ram_block->mr;
+
+ attr = RAM_BLOCK_ATTRIBUTES(object_new(TYPE_RAM_BLOCK_ATTRIBUTES));
+
+ attr->ram_block = ram_block;
+ if (memory_region_set_ram_discard_manager(mr, RAM_DISCARD_MANAGER(attr))) {
+ object_unref(OBJECT(attr));
+ return NULL;
+ }
+ attr->bitmap_size =
+ ROUND_UP(int128_get64(mr->size), block_size) / block_size;
+ attr->bitmap = bitmap_new(attr->bitmap_size);
+
+ return attr;
+}
+
+void ram_block_attributes_destroy(RamBlockAttributes *attr)
+{
+ g_assert(attr);
+
+ g_free(attr->bitmap);
+ memory_region_set_ram_discard_manager(attr->ram_block->mr, NULL);
+ object_unref(OBJECT(attr));
+}
+
+static void ram_block_attributes_init(Object *obj)
+{
+ RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(obj);
+
+ QLIST_INIT(&attr->rdl_list);
+}
+
+static void ram_block_attributes_finalize(Object *obj)
+{
+}
+
+static void ram_block_attributes_class_init(ObjectClass *klass,
+ const void *data)
+{
+ RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_CLASS(klass);
+
+ rdmc->get_min_granularity = ram_block_attributes_rdm_get_min_granularity;
+ rdmc->register_listener = ram_block_attributes_rdm_register_listener;
+ rdmc->unregister_listener = ram_block_attributes_rdm_unregister_listener;
+ rdmc->is_populated = ram_block_attributes_rdm_is_populated;
+ rdmc->replay_populated = ram_block_attributes_rdm_replay_populated;
+ rdmc->replay_discarded = ram_block_attributes_rdm_replay_discarded;
+}
diff --git a/system/rtc.c b/system/rtc.c
index dc44576..5695128 100644
--- a/system/rtc.c
+++ b/system/rtc.c
@@ -29,9 +29,9 @@
#include "qemu/option.h"
#include "qemu/timer.h"
#include "qom/object.h"
-#include "sysemu/replay.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/rtc.h"
+#include "system/replay.h"
+#include "system/system.h"
+#include "system/rtc.h"
#include "hw/rtc/mc146818rtc.h"
static enum {
@@ -62,7 +62,7 @@ static time_t qemu_ref_timedate(QEMUClockType clock)
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
return value;
}
diff --git a/system/runstate-action.c b/system/runstate-action.c
index ae0761a..f912bc8 100644
--- a/system/runstate-action.c
+++ b/system/runstate-action.c
@@ -7,8 +7,8 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/runstate-action.h"
-#include "sysemu/watchdog.h"
+#include "system/runstate-action.h"
+#include "system/watchdog.h"
#include "qemu/config-file.h"
#include "qapi/error.h"
#include "qemu/option_int.h"
diff --git a/system/runstate-hmp-cmds.c b/system/runstate-hmp-cmds.c
index 2df670f..be1d676 100644
--- a/system/runstate-hmp-cmds.c
+++ b/system/runstate-hmp-cmds.c
@@ -19,7 +19,7 @@
#include "monitor/monitor.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-run-state.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/accel.h"
void hmp_info_status(Monitor *mon, const QDict *qdict)
diff --git a/system/runstate.c b/system/runstate.c
index c833316..38900c9 100644
--- a/system/runstate.c
+++ b/system/runstate.c
@@ -32,6 +32,7 @@
#include "exec/cpu-common.h"
#include "gdbstub/syscalls.h"
#include "hw/boards.h"
+#include "hw/resettable.h"
#include "migration/misc.h"
#include "migration/postcopy-ram.h"
#include "monitor/monitor.h"
@@ -50,14 +51,14 @@
#include "qemu/thread.h"
#include "qom/object.h"
#include "qom/object_interfaces.h"
-#include "sysemu/cpus.h"
-#include "sysemu/qtest.h"
-#include "sysemu/replay.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "sysemu/runstate-action.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/tpm.h"
+#include "system/cpus.h"
+#include "system/qtest.h"
+#include "system/replay.h"
+#include "system/reset.h"
+#include "system/runstate.h"
+#include "system/runstate-action.h"
+#include "system/system.h"
+#include "system/tpm.h"
#include "trace.h"
static NotifierList exit_notifiers =
@@ -181,6 +182,12 @@ static const RunStateTransition runstate_transitions_def[] = {
{ RUN_STATE__MAX, RUN_STATE__MAX },
};
+static const RunStateTransition replay_play_runstate_transitions_def[] = {
+ { RUN_STATE_SHUTDOWN, RUN_STATE_RUNNING},
+
+ { RUN_STATE__MAX, RUN_STATE__MAX },
+};
+
static bool runstate_valid_transitions[RUN_STATE__MAX][RUN_STATE__MAX];
bool runstate_check(RunState state)
@@ -188,14 +195,33 @@ bool runstate_check(RunState state)
return current_run_state == state;
}
-static void runstate_init(void)
+static void transitions_set_valid(const RunStateTransition *rst)
{
const RunStateTransition *p;
- memset(&runstate_valid_transitions, 0, sizeof(runstate_valid_transitions));
- for (p = &runstate_transitions_def[0]; p->from != RUN_STATE__MAX; p++) {
+ for (p = rst; p->from != RUN_STATE__MAX; p++) {
runstate_valid_transitions[p->from][p->to] = true;
}
+}
+
+void runstate_replay_enable(void)
+{
+ assert(replay_mode != REPLAY_MODE_NONE);
+
+ if (replay_mode == REPLAY_MODE_PLAY) {
+ /*
+ * When reverse-debugging, it is possible to move state from
+ * shutdown to running.
+ */
+ transitions_set_valid(&replay_play_runstate_transitions_def[0]);
+ }
+}
+
+static void runstate_init(void)
+{
+ memset(&runstate_valid_transitions, 0, sizeof(runstate_valid_transitions));
+
+ transitions_set_valid(&runstate_transitions_def[0]);
qemu_mutex_init(&vmstop_lock);
}
@@ -271,6 +297,7 @@ void qemu_system_vmstop_request(RunState state)
struct VMChangeStateEntry {
VMChangeStateHandler *cb;
VMChangeStateHandler *prepare_cb;
+ VMChangeStateHandlerWithRet *cb_ret;
void *opaque;
QTAILQ_ENTRY(VMChangeStateEntry) entries;
int priority;
@@ -294,14 +321,15 @@ static QTAILQ_HEAD(, VMChangeStateEntry) vm_change_state_head =
VMChangeStateEntry *qemu_add_vm_change_state_handler_prio(
VMChangeStateHandler *cb, void *opaque, int priority)
{
- return qemu_add_vm_change_state_handler_prio_full(cb, NULL, opaque,
- priority);
+ return qemu_add_vm_change_state_handler_prio_full(cb, NULL, NULL,
+ opaque, priority);
}
/**
* qemu_add_vm_change_state_handler_prio_full:
* @cb: the main callback to invoke
* @prepare_cb: a callback to invoke before the main callback
+ * @cb_ret: the main callback to invoke with return value
* @opaque: user data passed to the callbacks
* @priority: low priorities execute first when the vm runs and the reverse is
* true when the vm stops
@@ -318,6 +346,7 @@ VMChangeStateEntry *qemu_add_vm_change_state_handler_prio(
VMChangeStateEntry *
qemu_add_vm_change_state_handler_prio_full(VMChangeStateHandler *cb,
VMChangeStateHandler *prepare_cb,
+ VMChangeStateHandlerWithRet *cb_ret,
void *opaque, int priority)
{
VMChangeStateEntry *e;
@@ -326,6 +355,7 @@ qemu_add_vm_change_state_handler_prio_full(VMChangeStateHandler *cb,
e = g_malloc0(sizeof(*e));
e->cb = cb;
e->prepare_cb = prepare_cb;
+ e->cb_ret = cb_ret;
e->opaque = opaque;
e->priority = priority;
@@ -353,9 +383,10 @@ void qemu_del_vm_change_state_handler(VMChangeStateEntry *e)
g_free(e);
}
-void vm_state_notify(bool running, RunState state)
+int vm_state_notify(bool running, RunState state)
{
VMChangeStateEntry *e, *next;
+ int ret = 0;
trace_vm_state_notify(running, state, RunState_str(state));
@@ -367,7 +398,17 @@ void vm_state_notify(bool running, RunState state)
}
QTAILQ_FOREACH_SAFE(e, &vm_change_state_head, entries, next) {
- e->cb(e->opaque, running, state);
+ if (e->cb) {
+ e->cb(e->opaque, running, state);
+ } else if (e->cb_ret) {
+ /*
+ * Here ignore the return value of cb_ret because
+ * we only care about the stopping the device during
+ * the VM live migration to indicate whether the
+ * connection between qemu and backend is normal.
+ */
+ e->cb_ret(e->opaque, running, state);
+ }
}
} else {
QTAILQ_FOREACH_REVERSE_SAFE(e, &vm_change_state_head, entries, next) {
@@ -377,9 +418,19 @@ void vm_state_notify(bool running, RunState state)
}
QTAILQ_FOREACH_REVERSE_SAFE(e, &vm_change_state_head, entries, next) {
- e->cb(e->opaque, running, state);
+ if (e->cb) {
+ e->cb(e->opaque, running, state);
+ } else if (e->cb_ret) {
+ /*
+ * We should execute all registered callbacks even if
+ * one of them returns failure, otherwise, some cleanup
+ * work of the device will be skipped.
+ */
+ ret |= e->cb_ret(e->opaque, running, state);
+ }
}
}
+ return ret;
}
static ShutdownCause reset_requested;
@@ -482,15 +533,23 @@ static int qemu_debug_requested(void)
void qemu_system_reset(ShutdownCause reason)
{
MachineClass *mc;
+ ResetType type;
mc = current_machine ? MACHINE_GET_CLASS(current_machine) : NULL;
cpu_synchronize_all_states();
+ switch (reason) {
+ case SHUTDOWN_CAUSE_SNAPSHOT_LOAD:
+ type = RESET_TYPE_SNAPSHOT_LOAD;
+ break;
+ default:
+ type = RESET_TYPE_COLD;
+ }
if (mc && mc->reset) {
- mc->reset(current_machine, reason);
+ mc->reset(current_machine, type);
} else {
- qemu_devices_reset(reason);
+ qemu_devices_reset(type);
}
switch (reason) {
case SHUTDOWN_CAUSE_NONE:
@@ -531,6 +590,58 @@ static void qemu_system_wakeup(void)
}
}
+static char *tdx_parse_panic_message(char *message)
+{
+ bool printable = false;
+ char *buf = NULL;
+ int len = 0, i;
+
+ /*
+ * Although message is defined as a json string, we shouldn't
+ * unconditionally treat it as is because the guest generated it and
+ * it's not necessarily trustable.
+ */
+ if (message) {
+ /* The caller guarantees the NULL-terminated string. */
+ len = strlen(message);
+
+ printable = len > 0;
+ for (i = 0; i < len; i++) {
+ if (!(0x20 <= message[i] && message[i] <= 0x7e)) {
+ printable = false;
+ break;
+ }
+ }
+ }
+
+ if (len == 0) {
+ buf = g_malloc(1);
+ buf[0] = '\0';
+ } else {
+ if (!printable) {
+ /* 3 = length of "%02x " */
+ buf = g_malloc(len * 3);
+ for (i = 0; i < len; i++) {
+ if (message[i] == '\0') {
+ break;
+ } else {
+ sprintf(buf + 3 * i, "%02x ", message[i]);
+ }
+ }
+ if (i > 0) {
+ /* replace the last ' '(space) to NULL */
+ buf[i * 3 - 1] = '\0';
+ } else {
+ buf[0] = '\0';
+ }
+ } else {
+ buf = g_strdup(message);
+ }
+ }
+
+ return buf;
+}
+
void qemu_system_guest_panicked(GuestPanicInformation *info)
{
qemu_log_mask(LOG_GUEST_ERROR, "Guest crashed");
@@ -572,7 +683,20 @@ void qemu_system_guest_panicked(GuestPanicInformation *info)
S390CrashReason_str(info->u.s390.reason),
info->u.s390.psw_mask,
info->u.s390.psw_addr);
+ } else if (info->type == GUEST_PANIC_INFORMATION_TYPE_TDX) {
+ char *message = tdx_parse_panic_message(info->u.tdx.message);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "\nTDX guest reports fatal error."
+ " error code: 0x%" PRIx32 " error message:\"%s\"\n",
+ info->u.tdx.error_code, message);
+ g_free(message);
+ if (info->u.tdx.gpa != -1ull) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Additional error information "
+ "can be found at gpa page: 0x%" PRIx64 "\n",
+ info->u.tdx.gpa);
+ }
}
+
qapi_free_GuestPanicInformation(info);
}
}
@@ -816,6 +940,7 @@ void qemu_remove_exit_notifier(Notifier *notify)
static void qemu_run_exit_notifiers(void)
{
+ BQL_LOCK_GUARD();
notifier_list_notify(&exit_notifiers, NULL);
}
diff --git a/system/tpm.c b/system/tpm.c
index 7164ea7..8df0f6e 100644
--- a/system/tpm.c
+++ b/system/tpm.c
@@ -17,8 +17,8 @@
#include "qapi/error.h"
#include "qapi/qapi-commands-tpm.h"
#include "qapi/qmp/qerror.h"
-#include "sysemu/tpm_backend.h"
-#include "sysemu/tpm.h"
+#include "system/tpm_backend.h"
+#include "system/tpm.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
diff --git a/system/trace-events b/system/trace-events
index 2ed1d59..82856e4 100644
--- a/system/trace-events
+++ b/system/trace-events
@@ -4,6 +4,13 @@
# Since requests are raised via monitor, not many tracepoints are needed.
balloon_event(void *opaque, unsigned long addr) "opaque %p addr %lu"
+# dma-helpers.c
+dma_blk_io(void *dbs, void *bs, int64_t offset, bool to_dev) "dbs=%p bs=%p offset=%" PRId64 " to_dev=%d"
+dma_aio_cancel(void *dbs) "dbs=%p"
+dma_complete(void *dbs, int ret, void *cb) "dbs=%p ret=%d cb=%p"
+dma_blk_cb(void *dbs, int ret) "dbs=%p ret=%d"
+dma_map_wait(void *dbs) "dbs=%p"
+
# ioport.c
cpu_in(unsigned int addr, char size, unsigned int val) "addr 0x%x(%c) value %u"
cpu_out(unsigned int addr, char size, unsigned int val) "addr 0x%x(%c) value %u"
@@ -26,6 +33,7 @@ address_space_map(void *as, uint64_t addr, uint64_t len, bool is_write, uint32_t
find_ram_offset(uint64_t size, uint64_t offset) "size: 0x%" PRIx64 " @ 0x%" PRIx64
find_ram_offset_loop(uint64_t size, uint64_t candidate, uint64_t offset, uint64_t next, uint64_t mingap) "trying size: 0x%" PRIx64 " @ 0x%" PRIx64 ", offset: 0x%" PRIx64" next: 0x%" PRIx64 " mingap: 0x%" PRIx64
ram_block_discard_range(const char *rbname, void *hva, size_t length, bool need_madvise, bool need_fallocate, int ret) "%s@%p + 0x%zx: madvise: %d fallocate: %d ret: %d"
+qemu_ram_alloc_shared(const char *name, size_t size, size_t max_size, int fd, void *host) "%s size %zu max_size %zu fd %d host %p"
# cpus.c
vm_stop_flush_all(int ret) "ret %d"
@@ -44,3 +52,6 @@ dirtylimit_state_finalize(void)
dirtylimit_throttle_pct(int cpu_index, uint64_t pct, int64_t time_us) "CPU[%d] throttle percent: %" PRIu64 ", throttle adjust time %"PRIi64 " us"
dirtylimit_set_vcpu(int cpu_index, uint64_t quota) "CPU[%d] set dirty page rate limit %"PRIu64
dirtylimit_vcpu_execute(int cpu_index, int64_t sleep_time_us) "CPU[%d] sleep %"PRIi64 " us"
+
+# ram-block-attributes.c
+ram_block_attributes_state_change(uint64_t offset, uint64_t size, const char *from, const char *to) "offset 0x%"PRIx64" size 0x%"PRIx64" from '%s' to '%s'"
diff --git a/system/vl.c b/system/vl.c
index 9e8f16f..3b7057e 100644
--- a/system/vl.c
+++ b/system/vl.c
@@ -26,25 +26,28 @@
#include "qemu/help-texts.h"
#include "qemu/datadir.h"
#include "qemu/units.h"
+#include "qemu/module.h"
+#include "qemu/target-info.h"
#include "exec/cpu-common.h"
#include "exec/page-vary.h"
#include "hw/qdev-properties.h"
#include "qapi/compat-policy.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
-#include "qapi/qmp/qjson.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
+#include "qobject/qjson.h"
#include "qemu-version.h"
#include "qemu/cutils.h"
#include "qemu/help_option.h"
#include "qemu/hw-version.h"
#include "qemu/uuid.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "sysemu/runstate-action.h"
-#include "sysemu/seccomp.h"
-#include "sysemu/tcg.h"
-#include "sysemu/xen.h"
+#include "qemu/target-info.h"
+#include "system/reset.h"
+#include "system/runstate.h"
+#include "system/runstate-action.h"
+#include "system/seccomp.h"
+#include "system/tcg.h"
+#include "system/xen.h"
#include "qemu/error-report.h"
#include "qemu/sockets.h"
@@ -53,6 +56,7 @@
#include "hw/usb.h"
#include "hw/isa/isa.h"
#include "hw/scsi/scsi.h"
+#include "hw/sd/sd.h"
#include "hw/display/vga.h"
#include "hw/firmware/smbios.h"
#include "hw/acpi/acpi.h"
@@ -64,30 +68,32 @@
#include "monitor/monitor.h"
#include "ui/console.h"
#include "ui/input.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/numa.h"
-#include "sysemu/hostmem.h"
+#include "system/system.h"
+#include "system/numa.h"
+#include "system/hostmem.h"
#include "exec/gdbstub.h"
#include "gdbstub/enums.h"
#include "qemu/timer.h"
#include "chardev/char.h"
#include "qemu/bitmap.h"
#include "qemu/log.h"
-#include "sysemu/blockdev.h"
+#include "system/blockdev.h"
#include "hw/block/block.h"
#include "hw/i386/x86.h"
#include "hw/i386/pc.h"
+#include "migration/cpr.h"
#include "migration/misc.h"
#include "migration/snapshot.h"
-#include "sysemu/tpm.h"
-#include "sysemu/dma.h"
+#include "system/tpm.h"
+#include "system/dma.h"
#include "hw/audio/soundhw.h"
#include "audio/audio.h"
-#include "sysemu/cpus.h"
-#include "sysemu/cpu-timers.h"
+#include "system/cpus.h"
+#include "system/cpu-timers.h"
+#include "exec/icount.h"
#include "migration/colo.h"
#include "migration/postcopy-ram.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "qapi/qobject-input-visitor.h"
#include "qemu/option.h"
#include "qemu/config-file.h"
@@ -95,7 +101,7 @@
#ifdef CONFIG_VIRTFS
#include "fsdev/qemu-fsdev.h"
#endif
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#ifdef CONFIG_TCG
#include "tcg/perf.h"
#endif
@@ -106,8 +112,8 @@
#include "trace/control.h"
#include "qemu/plugin.h"
#include "qemu/queue.h"
-#include "sysemu/arch_init.h"
-#include "exec/confidential-guest-support.h"
+#include "system/arch_init.h"
+#include "system/confidential-guest-support.h"
#include "ui/qemu-spice.h"
#include "qapi/string-input-visitor.h"
@@ -116,13 +122,14 @@
#include "qom/object_interfaces.h"
#include "semihosting/semihost.h"
#include "crypto/init.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
#include "qapi/qapi-events-run-state.h"
#include "qapi/qapi-types-audio.h"
#include "qapi/qapi-visit-audio.h"
#include "qapi/qapi-visit-block-core.h"
#include "qapi/qapi-visit-compat.h"
#include "qapi/qapi-visit-machine.h"
+#include "qapi/qapi-visit-migration.h"
#include "qapi/qapi-visit-ui.h"
#include "qapi/qapi-commands-block-core.h"
#include "qapi/qapi-commands-migration.h"
@@ -131,7 +138,7 @@
#include "qapi/qapi-commands-ui.h"
#include "block/qdict.h"
#include "qapi/qmp/qerror.h"
-#include "sysemu/iothread.h"
+#include "system/iothread.h"
#include "qemu/guest-random.h"
#include "qemu/keyval.h"
@@ -159,6 +166,8 @@ typedef struct DeviceOption {
static const char *cpu_option;
static const char *mem_path;
static const char *incoming;
+static const char *incoming_str[MIGRATION_CHANNEL_TYPE__MAX];
+static MigrationChannel *incoming_channels[MIGRATION_CHANNEL_TYPE__MAX];
static const char *loadvm;
static const char *accelerators;
static bool have_custom_ram_size;
@@ -190,7 +199,7 @@ static int default_parallel = 1;
static int default_monitor = 1;
static int default_floppy = 1;
static int default_cdrom = 1;
-static int default_sdcard = 1;
+static bool auto_create_sdcard = true;
static int default_vga = 1;
static int default_net = 1;
@@ -347,7 +356,7 @@ static QemuOptsList qemu_overcommit_opts = {
.desc = {
{
.name = "mem-lock",
- .type = QEMU_OPT_BOOL,
+ .type = QEMU_OPT_STRING,
},
{
.name = "cpu-pm",
@@ -714,7 +723,7 @@ static void configure_blockdev(BlockdevOptionsQueue *bdo_queue,
default_drive(default_cdrom, snapshot, machine_class->block_default_type, 2,
CDROM_OPTS);
default_drive(default_floppy, snapshot, IF_FLOPPY, 0, FD_OPTS);
- default_drive(default_sdcard, snapshot, IF_SD, 0, SD_OPTS);
+ default_drive(auto_create_sdcard, snapshot, IF_SD, 0, SD_OPTS);
}
@@ -759,7 +768,7 @@ static QemuOptsList qemu_smp_opts = {
},
};
-#if defined(CONFIG_POSIX)
+#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN)
static QemuOptsList qemu_run_with_opts = {
.name = "run-with",
.head = QTAILQ_HEAD_INITIALIZER(qemu_run_with_opts.head),
@@ -792,8 +801,8 @@ static QemuOptsList qemu_run_with_opts = {
static void realtime_init(void)
{
- if (enable_mlock) {
- if (os_mlock() < 0) {
+ if (should_mlock(mlock_state)) {
+ if (os_mlock(is_mlock_on_fault(mlock_state)) < 0) {
error_report("locking memory failed");
exit(1);
}
@@ -811,29 +820,15 @@ static void configure_msg(QemuOpts *opts)
/***********************************************************/
/* USB devices */
-static int usb_device_add(const char *devname)
+static bool usb_parse(const char *cmdline, Error **errp)
{
- USBDevice *dev = NULL;
-
- if (!machine_usb(current_machine)) {
- return -1;
- }
-
- dev = usbdevice_create(devname);
- if (!dev)
- return -1;
+ g_assert(machine_usb(current_machine));
- return 0;
-}
-
-static int usb_parse(const char *cmdline)
-{
- int r;
- r = usb_device_add(cmdline);
- if (r < 0) {
- error_report("could not add USB device '%s'", cmdline);
+ if (!usbdevice_create(cmdline)) {
+ error_setg(errp, "could not add USB device '%s'", cmdline);
+ return false;
}
- return r;
+ return true;
}
/***********************************************************/
@@ -885,11 +880,11 @@ static void help(int exitcode)
g_get_prgname());
#define DEF(option, opt_arg, opt_enum, opt_help, arch_mask) \
- if ((arch_mask) & arch_type) \
+ if (qemu_arch_available(arch_mask)) \
fputs(opt_help, stdout);
#define ARCHHEADING(text, arch_mask) \
- if ((arch_mask) & arch_type) \
+ if (qemu_arch_available(arch_mask)) \
puts(stringify(text));
#define DEFHEADING(text) ARCHHEADING(text, QEMU_ARCH_ALL)
@@ -1184,7 +1179,8 @@ static int parse_fw_cfg(void *opaque, QemuOpts *opts, Error **errp)
size = strlen(str); /* NUL terminator NOT included in fw_cfg blob */
buf = g_memdup(str, size);
} else if (nonempty_str(gen_id)) {
- if (!fw_cfg_add_from_generator(fw_cfg, name, gen_id, errp)) {
+ if (!fw_cfg_add_file_from_generator(fw_cfg, object_get_objects_root(),
+ gen_id, name, errp)) {
return -1;
}
return 0;
@@ -1196,10 +1192,7 @@ static int parse_fw_cfg(void *opaque, QemuOpts *opts, Error **errp)
return -1;
}
}
- /* For legacy, keep user files in a specific global order. */
- fw_cfg_set_order_override(fw_cfg, FW_CFG_ORDER_OVERRIDE_USER);
fw_cfg_add_file(fw_cfg, name, buf, size);
- fw_cfg_reset_order_override(fw_cfg);
return 0;
}
@@ -1306,22 +1299,27 @@ static void add_device_config(int type, const char *cmdline)
QTAILQ_INSERT_TAIL(&device_configs, conf, next);
}
-static int foreach_device_config(int type, int (*func)(const char *cmdline))
+/**
+ * foreach_device_config_or_exit(): process per-device configs
+ * @type: device_config type
+ * @func: device specific config function, returning pass/fail
+ *
+ * @func is called with the &error_fatal handler so device specific
+ * error messages can be reported on failure.
+ */
+static void foreach_device_config_or_exit(int type,
+ bool (*func)(const char *cmdline,
+ Error **errp))
{
struct device_config *conf;
- int rc;
QTAILQ_FOREACH(conf, &device_configs, next) {
if (conf->type != type)
continue;
loc_push_restore(&conf->loc);
- rc = func(conf->cmdline);
+ func(conf->cmdline, &error_fatal);
loc_pop(&conf->loc);
- if (rc) {
- return rc;
- }
}
- return 0;
}
static void qemu_disable_default_devices(void)
@@ -1350,8 +1348,8 @@ static void qemu_disable_default_devices(void)
if (!has_defaults || machine_class->no_cdrom) {
default_cdrom = 0;
}
- if (!has_defaults || machine_class->no_sdcard) {
- default_sdcard = 0;
+ if (!has_defaults || !machine_class->auto_create_sdcard) {
+ auto_create_sdcard = false;
}
if (!has_defaults) {
default_audio = 0;
@@ -1451,7 +1449,7 @@ static void qemu_create_default_devices(void)
}
}
-static int serial_parse(const char *devname)
+static bool serial_parse(const char *devname, Error **errp)
{
int index = num_serial_hds;
@@ -1466,13 +1464,13 @@ static int serial_parse(const char *devname)
serial_hds[index] = qemu_chr_new_mux_mon(label, devname, NULL);
if (!serial_hds[index]) {
- error_report("could not connect serial device"
- " to character backend '%s'", devname);
- return -1;
+ error_setg(errp, "could not connect serial device"
+ " to character backend '%s'", devname);
+ return false;
}
}
num_serial_hds++;
- return 0;
+ return true;
}
Chardev *serial_hd(int i)
@@ -1484,47 +1482,47 @@ Chardev *serial_hd(int i)
return NULL;
}
-static int parallel_parse(const char *devname)
+static bool parallel_parse(const char *devname, Error **errp)
{
static int index = 0;
char label[32];
if (strcmp(devname, "none") == 0)
- return 0;
+ return true;
if (index == MAX_PARALLEL_PORTS) {
- error_report("too many parallel ports");
- exit(1);
+ error_setg(errp, "too many parallel ports");
+ return false;
}
snprintf(label, sizeof(label), "parallel%d", index);
parallel_hds[index] = qemu_chr_new_mux_mon(label, devname, NULL);
if (!parallel_hds[index]) {
- error_report("could not connect parallel device"
- " to character backend '%s'", devname);
- return -1;
+ error_setg(errp, "could not connect parallel device"
+ " to character backend '%s'", devname);
+ return false;
}
index++;
- return 0;
+ return true;
}
-static int debugcon_parse(const char *devname)
+static bool debugcon_parse(const char *devname, Error **errp)
{
QemuOpts *opts;
if (!qemu_chr_new_mux_mon("debugcon", devname, NULL)) {
- error_report("invalid character backend '%s'", devname);
- exit(1);
+ error_setg(errp, "invalid character backend '%s'", devname);
+ return false;
}
opts = qemu_opts_create(qemu_find_opts("device"), "debugcon", 1, NULL);
if (!opts) {
- error_report("already have a debugcon device");
- exit(1);
+ error_setg(errp, "already have a debugcon device");
+ return false;
}
qemu_opt_set(opts, "driver", "isa-debugcon", &error_abort);
qemu_opt_set(opts, "chardev", "debugcon", &error_abort);
- return 0;
+ return true;
}
-static gint machine_class_cmp(gconstpointer a, gconstpointer b)
+static gint machine_class_cmp(gconstpointer a, gconstpointer b, gpointer d)
{
const MachineClass *mc1 = a, *mc2 = b;
int res;
@@ -1564,7 +1562,7 @@ static void machine_help_func(const QDict *qdict)
GSList *el;
const char *type = qdict_get_try_str(qdict, "type");
- machines = object_class_get_list(TYPE_MACHINE, false);
+ machines = object_class_get_list(target_machine_typename(), false);
if (type) {
ObjectClass *machine_class = OBJECT_CLASS(find_machine(type, machines));
if (machine_class) {
@@ -1574,7 +1572,7 @@ static void machine_help_func(const QDict *qdict)
}
printf("Supported machines are:\n");
- machines = g_slist_sort(machines, machine_class_cmp);
+ machines = g_slist_sort_with_data(machines, machine_class_cmp, NULL);
for (el = machines; el; el = el->next) {
MachineClass *mc = el->data;
if (mc->alias) {
@@ -1679,10 +1677,10 @@ static MachineClass *select_machine(QDict *qdict, Error **errp)
if (machine_type) {
machine_class = find_machine(machine_type, machines);
- qdict_del(qdict, "type");
if (!machine_class) {
- error_setg(errp, "unsupported machine type: \"%s\"", optarg);
+ error_setg(errp, "unsupported machine type: \"%s\"", machine_type);
}
+ qdict_del(qdict, "type");
} else {
machine_class = find_default_machine(machines);
if (!machine_class) {
@@ -1821,6 +1819,30 @@ static void object_option_add_visitor(Visitor *v)
QTAILQ_INSERT_TAIL(&object_opts, opt, next);
}
+static void incoming_option_parse(const char *str)
+{
+ MigrationChannelType type = MIGRATION_CHANNEL_TYPE_MAIN;
+ MigrationChannel *channel;
+ Visitor *v;
+
+ if (!strcmp(str, "defer")) {
+ channel = NULL;
+ } else if (migrate_is_uri(str)) {
+ migrate_uri_parse(str, &channel, &error_fatal);
+ } else {
+ v = qobject_input_visitor_new_str(str, "channel-type", &error_fatal);
+ visit_type_MigrationChannel(v, NULL, &channel, &error_fatal);
+ visit_free(v);
+ type = channel->channel_type;
+ }
+
+ /* New incoming spec replaces the previous */
+ qapi_free_MigrationChannel(incoming_channels[type]);
+ incoming_channels[type] = channel;
+ incoming_str[type] = str;
+ incoming = incoming_str[MIGRATION_CHANNEL_TYPE_MAIN];
+}
+
static void object_option_parse(const char *str)
{
QemuOpts *opts;
@@ -1841,7 +1863,8 @@ static void object_option_parse(const char *str)
type = qemu_opt_get(opts, "qom-type");
if (!type) {
- error_setg(&error_fatal, QERR_MISSING_PARAMETER, "qom-type");
+ error_report(QERR_MISSING_PARAMETER, "qom-type");
+ exit(1);
}
if (user_creatable_print_help(type, opts)) {
exit(0);
@@ -1854,6 +1877,44 @@ static void object_option_parse(const char *str)
visit_free(v);
}
+static void overcommit_parse(const char *str)
+{
+ QemuOpts *opts;
+ const char *mem_lock_opt;
+
+ opts = qemu_opts_parse_noisily(qemu_find_opts("overcommit"),
+ str, false);
+ if (!opts) {
+ exit(1);
+ }
+
+ enable_cpu_pm = qemu_opt_get_bool(opts, "cpu-pm", enable_cpu_pm);
+
+ mem_lock_opt = qemu_opt_get(opts, "mem-lock");
+ if (!mem_lock_opt) {
+ return;
+ }
+
+ if (strcmp(mem_lock_opt, "on") == 0) {
+ mlock_state = MLOCK_ON;
+ return;
+ }
+
+ if (strcmp(mem_lock_opt, "off") == 0) {
+ mlock_state = MLOCK_OFF;
+ return;
+ }
+
+ if (strcmp(mem_lock_opt, "on-fault") == 0) {
+ mlock_state = MLOCK_ON_FAULT;
+ return;
+ }
+
+ error_report("parameter 'mem-lock' expects one of "
+ "'on', 'off', 'on-fault'");
+ exit(1);
+}
+
/*
* Very early object creation, before the sandbox options have been activated.
*/
@@ -1971,11 +2032,12 @@ static void qemu_create_early_backends(void)
qemu_console_early_init();
- if (dpy.has_gl && dpy.gl != DISPLAYGL_MODE_OFF && display_opengl == 0) {
+ if (dpy.has_gl && dpy.gl != DISPLAY_GL_MODE_OFF && display_opengl == 0) {
#if defined(CONFIG_OPENGL)
- error_report("OpenGL is not supported by the display");
+ error_report("OpenGL is not supported by display backend '%s'",
+ DisplayType_str(dpy.type));
#else
- error_report("OpenGL support is disabled");
+ error_report("OpenGL support was not enabled in this build of QEMU");
#endif
exit(1);
}
@@ -2041,12 +2103,9 @@ static void qemu_create_late_backends(void)
qemu_opts_foreach(qemu_find_opts("mon"),
mon_init_func, NULL, &error_fatal);
- if (foreach_device_config(DEV_SERIAL, serial_parse) < 0)
- exit(1);
- if (foreach_device_config(DEV_PARALLEL, parallel_parse) < 0)
- exit(1);
- if (foreach_device_config(DEV_DEBUGCON, debugcon_parse) < 0)
- exit(1);
+ foreach_device_config_or_exit(DEV_SERIAL, serial_parse);
+ foreach_device_config_or_exit(DEV_PARALLEL, parallel_parse);
+ foreach_device_config_or_exit(DEV_DEBUGCON, debugcon_parse);
/* now chardevs have been created we may have semihosting to connect */
qemu_semihosting_chardev_init();
@@ -2110,6 +2169,19 @@ static void parse_memory_options(void)
loc_pop(&loc);
}
+static void qemu_create_machine_containers(Object *machine)
+{
+ static const char *const containers[] = {
+ "unattached",
+ "peripheral",
+ "peripheral-anon",
+ };
+
+ for (unsigned i = 0; i < ARRAY_SIZE(containers); i++) {
+ object_property_add_new_container(machine, containers[i]);
+ }
+}
+
static void qemu_create_machine(QDict *qdict)
{
MachineClass *machine_class = select_machine(qdict, &error_fatal);
@@ -2118,8 +2190,8 @@ static void qemu_create_machine(QDict *qdict)
current_machine = MACHINE(object_new_with_class(OBJECT_CLASS(machine_class)));
object_property_add_child(object_get_root(), "machine",
OBJECT(current_machine));
- object_property_add_child(container_get(OBJECT(current_machine),
- "/unattached"),
+ qemu_create_machine_containers(OBJECT(current_machine));
+ object_property_add_child(machine_get_container("unattached"),
"sysbus", OBJECT(sysbus_get_default()));
if (machine_class->minimum_page_bits) {
@@ -2360,6 +2432,7 @@ static void configure_accelerators(const char *progname)
/* Select the default accelerator */
bool have_tcg = accel_find("tcg");
bool have_kvm = accel_find("kvm");
+ bool have_hvf = accel_find("hvf");
if (have_tcg && have_kvm) {
if (g_str_has_suffix(progname, "kvm")) {
@@ -2372,6 +2445,8 @@ static void configure_accelerators(const char *progname)
accelerators = "kvm";
} else if (have_tcg) {
accelerators = "tcg";
+ } else if (have_hvf) {
+ accelerators = "hvf";
} else {
error_report("No accelerator selected and"
" no default accelerator available");
@@ -2421,19 +2496,25 @@ static void configure_accelerators(const char *progname)
static void qemu_validate_options(const QDict *machine_opts)
{
const char *kernel_filename = qdict_get_try_str(machine_opts, "kernel");
+ const char *shim_filename = qdict_get_try_str(machine_opts, "shim");
const char *initrd_filename = qdict_get_try_str(machine_opts, "initrd");
const char *kernel_cmdline = qdict_get_try_str(machine_opts, "append");
if (kernel_filename == NULL) {
- if (kernel_cmdline != NULL) {
- error_report("-append only allowed with -kernel option");
- exit(1);
- }
+ if (kernel_cmdline != NULL) {
+ error_report("-append only allowed with -kernel option");
+ exit(1);
+ }
+
+ if (shim_filename != NULL) {
+ error_report("-shim only allowed with -kernel option");
+ exit(1);
+ }
- if (initrd_filename != NULL) {
- error_report("-initrd only allowed with -kernel option");
- exit(1);
- }
+ if (initrd_filename != NULL) {
+ error_report("-initrd only allowed with -kernel option");
+ exit(1);
+ }
}
if (loadvm && incoming) {
@@ -2620,12 +2701,27 @@ static void qemu_init_displays(void)
static void qemu_init_board(void)
{
+ MachineClass *machine_class = MACHINE_GET_CLASS(current_machine);
+
/* process plugin before CPUs are created, but once -smp has been parsed */
qemu_plugin_load_list(&plugin_list, &error_fatal);
/* From here on we enter MACHINE_PHASE_INITIALIZED. */
machine_run_board_init(current_machine, mem_path, &error_fatal);
+ if (machine_class->auto_create_sdcard) {
+ bool ambigous;
+
+ /* Ensure there is a SD bus available to create SD card on */
+ Object *obj = object_resolve_path_type("", TYPE_SD_BUS, &ambigous);
+ if (!obj && !ambigous) {
+ fprintf(stderr, "Can not create sd-card on '%s' machine"
+ " because it lacks a sd-bus\n",
+ machine_class->name);
+ abort();
+ }
+ }
+
drive_check_orphaned();
realtime_init();
@@ -2642,29 +2738,20 @@ static void qemu_create_cli_devices(void)
/* init USB devices */
if (machine_usb(current_machine)) {
- if (foreach_device_config(DEV_USB, usb_parse) < 0)
- exit(1);
+ foreach_device_config_or_exit(DEV_USB, usb_parse);
}
/* init generic devices */
- rom_set_order_override(FW_CFG_ORDER_OVERRIDE_DEVICE);
qemu_opts_foreach(qemu_find_opts("device"),
device_init_func, NULL, &error_fatal);
QTAILQ_FOREACH(opt, &device_opts, next) {
- DeviceState *dev;
+ QObject *ret_data = NULL;
+
loc_push_restore(&opt->loc);
- /*
- * TODO Eventually we should call qmp_device_add() here to make sure it
- * behaves the same, but QMP still has to accept incorrectly typed
- * options until libvirt is fixed and we want to be strict on the CLI
- * from the start, so call qdev_device_add_from_qdict() directly for
- * now.
- */
- dev = qdev_device_add_from_qdict(opt->opts, true, &error_fatal);
- object_unref(OBJECT(dev));
+ qmp_device_add(opt->opts, &ret_data, &error_fatal);
+ assert(ret_data == NULL); /* error_fatal aborts */
loc_pop(&opt->loc);
}
- rom_reset_order_override();
}
static bool qemu_machine_creation_done(Error **errp)
@@ -2696,10 +2783,8 @@ static bool qemu_machine_creation_done(Error **errp)
exit(1);
}
- if (foreach_device_config(DEV_GDB, gdbserver_start) < 0) {
- error_setg(errp, "could not start gdbserver");
- return false;
- }
+ foreach_device_config_or_exit(DEV_GDB, gdbserver_start);
+
if (!vga_interface_created && !default_vga &&
vga_interface_type != VGA_NONE) {
warn_report("A -vga option was passed but this machine "
@@ -2734,8 +2819,11 @@ void qmp_x_exit_preconfig(Error **errp)
if (incoming) {
Error *local_err = NULL;
if (strcmp(incoming, "defer") != 0) {
- qmp_migrate_incoming(incoming, false, NULL, true, true,
- &local_err);
+ g_autofree MigrationChannelList *channels =
+ g_new0(MigrationChannelList, 1);
+
+ channels->value = incoming_channels[MIGRATION_CHANNEL_TYPE_MAIN];
+ qmp_migrate_incoming(NULL, true, channels, true, true, &local_err);
if (local_err) {
error_reportf_err(local_err, "-incoming %s: ", incoming);
exit(1);
@@ -2796,7 +2884,10 @@ void qemu_init(int argc, char **argv)
os_setup_limits();
- qemu_init_arch_modules();
+#ifdef CONFIG_MODULES
+ module_init_info(qemu_modinfo);
+ module_allow_arch(target_name());
+#endif
qemu_init_subsystems();
@@ -2835,7 +2926,7 @@ void qemu_init(int argc, char **argv)
const QEMUOption *popt;
popt = lookup_opt(argc, argv, &optarg, &optind);
- if (!(popt->arch_mask & arch_type)) {
+ if (!qemu_arch_available(popt->arch_mask)) {
error_report("Option not supported for this target");
exit(1);
}
@@ -2909,20 +3000,12 @@ void qemu_init(int argc, char **argv)
nographic = true;
dpy.type = DISPLAY_TYPE_NONE;
break;
- case QEMU_OPTION_portrait:
- graphic_rotate = 90;
- break;
- case QEMU_OPTION_rotate:
- graphic_rotate = strtol(optarg, (char **) &optarg, 10);
- if (graphic_rotate != 0 && graphic_rotate != 90 &&
- graphic_rotate != 180 && graphic_rotate != 270) {
- error_report("only 90, 180, 270 deg rotation is available");
- exit(1);
- }
- break;
case QEMU_OPTION_kernel:
qdict_put_str(machine_opts_dict, "kernel", optarg);
break;
+ case QEMU_OPTION_shim:
+ qdict_put_str(machine_opts_dict, "shim", optarg);
+ break;
case QEMU_OPTION_initrd:
qdict_put_str(machine_opts_dict, "initrd", optarg);
break;
@@ -3442,6 +3525,7 @@ void qemu_init(int argc, char **argv)
nb_prom_envs++;
break;
case QEMU_OPTION_old_param:
+ warn_report("-old-param is deprecated");
old_param = 1;
break;
case QEMU_OPTION_rtc:
@@ -3462,7 +3546,7 @@ void qemu_init(int argc, char **argv)
if (!incoming) {
runstate_set(RUN_STATE_INMIGRATE);
}
- incoming = optarg;
+ incoming_option_parse(optarg);
break;
case QEMU_OPTION_only_migratable:
only_migratable = 1;
@@ -3547,13 +3631,7 @@ void qemu_init(int argc, char **argv)
object_option_parse(optarg);
break;
case QEMU_OPTION_overcommit:
- opts = qemu_opts_parse_noisily(qemu_find_opts("overcommit"),
- optarg, false);
- if (!opts) {
- exit(1);
- }
- enable_mlock = qemu_opt_get_bool(opts, "mem-lock", enable_mlock);
- enable_cpu_pm = qemu_opt_get_bool(opts, "cpu-pm", enable_cpu_pm);
+ overcommit_parse(optarg);
break;
case QEMU_OPTION_compat:
{
@@ -3596,16 +3674,7 @@ void qemu_init(int argc, char **argv)
case QEMU_OPTION_nouserconfig:
/* Nothing to be parsed here. Especially, do not error out below. */
break;
-#if defined(CONFIG_POSIX)
- case QEMU_OPTION_runas:
- warn_report("-runas is deprecated, use '-run-with user=...' instead");
- if (!os_set_runas(optarg)) {
- error_report("User \"%s\" doesn't exist"
- " (and is not <uid>:<gid>)",
- optarg);
- exit(1);
- }
- break;
+#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN)
case QEMU_OPTION_daemonize:
os_set_daemonize(true);
break;
@@ -3689,6 +3758,12 @@ void qemu_init(int argc, char **argv)
qemu_create_machine(machine_opts_dict);
+ /*
+ * Load incoming CPR state before any devices are created, because it
+ * contains file descriptors that are needed in device initialization code.
+ */
+ cpr_state_load(incoming_channels[MIGRATION_CHANNEL_TYPE_CPR], &error_fatal);
+
suspend_mux_open();
qemu_disable_default_devices();
diff --git a/system/watchpoint.c b/system/watchpoint.c
index 2aa2a9e..21d0bb3 100644
--- a/system/watchpoint.c
+++ b/system/watchpoint.c
@@ -19,7 +19,9 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
+#include "exec/target_page.h"
+#include "exec/watchpoint.h"
#include "hw/core/cpu.h"
/* Add a watchpoint. */
diff --git a/target-info-stub.c b/target-info-stub.c
new file mode 100644
index 0000000..fecc0e7
--- /dev/null
+++ b/target-info-stub.c
@@ -0,0 +1,25 @@
+/*
+ * QEMU target info stubs (target specific)
+ *
+ * Copyright (c) Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/target-info.h"
+#include "qemu/target-info-impl.h"
+#include "hw/boards.h"
+#include "cpu.h"
+
+static const TargetInfo target_info_stub = {
+ .target_name = TARGET_NAME,
+ .long_bits = TARGET_LONG_BITS,
+ .cpu_type = CPU_RESOLVING_TYPE,
+ .machine_typename = TYPE_MACHINE,
+};
+
+const TargetInfo *target_info(void)
+{
+ return &target_info_stub;
+}
diff --git a/target-info.c b/target-info.c
new file mode 100644
index 0000000..16fdca7
--- /dev/null
+++ b/target-info.c
@@ -0,0 +1,31 @@
+/*
+ * QEMU target info helpers
+ *
+ * Copyright (c) Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/target-info.h"
+#include "qemu/target-info-impl.h"
+
+const char *target_name(void)
+{
+ return target_info()->target_name;
+}
+
+unsigned target_long_bits(void)
+{
+ return target_info()->long_bits;
+}
+
+const char *target_cpu_type(void)
+{
+ return target_info()->cpu_type;
+}
+
+const char *target_machine_typename(void)
+{
+ return target_info()->machine_typename;
+}
diff --git a/target/Kconfig b/target/Kconfig
index 7f64112..d0c7b59 100644
--- a/target/Kconfig
+++ b/target/Kconfig
@@ -1,7 +1,6 @@
source alpha/Kconfig
source arm/Kconfig
source avr/Kconfig
-source cris/Kconfig
source hppa/Kconfig
source i386/Kconfig
source loongarch/Kconfig
diff --git a/target/alpha/cpu-param.h b/target/alpha/cpu-param.h
index 5ce213a..a799f42 100644
--- a/target/alpha/cpu-param.h
+++ b/target/alpha/cpu-param.h
@@ -2,14 +2,12 @@
* Alpha cpu parameters for qemu.
*
* Copyright (c) 2007 Jocelyn Mayer
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#ifndef ALPHA_CPU_PARAM_H
#define ALPHA_CPU_PARAM_H
-#define TARGET_LONG_BITS 64
-
/* ??? EV4 has 34 phys addr bits, EV5 has 40, EV6 has 44. */
#define TARGET_PHYS_ADDR_SPACE_BITS 44
@@ -20,14 +18,12 @@
* a 4k minimum to match x86 host, which can minimize emulation issues.
*/
# define TARGET_PAGE_BITS_VARY
-# define TARGET_PAGE_BITS_MIN 12
# define TARGET_VIRT_ADDR_SPACE_BITS 63
#else
# define TARGET_PAGE_BITS 13
# define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS)
#endif
-/* Alpha processors have a weak memory model */
-#define TCG_GUEST_DEFAULT_MO (0)
+#define TARGET_INSN_START_EXTRA_WORDS 0
#endif
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
index 9db1dff..2082db4 100644
--- a/target/alpha/cpu.c
+++ b/target/alpha/cpu.c
@@ -23,7 +23,10 @@
#include "qapi/error.h"
#include "qemu/qemu-print.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/translation-block.h"
+#include "exec/target_page.h"
+#include "accel/tcg/cpu-ops.h"
+#include "fpu/softfloat.h"
static void alpha_cpu_set_pc(CPUState *cs, vaddr value)
@@ -38,6 +41,18 @@ static vaddr alpha_cpu_get_pc(CPUState *cs)
return env->pc;
}
+static TCGTBCPUState alpha_get_tb_cpu_state(CPUState *cs)
+{
+ CPUAlphaState *env = cpu_env(cs);
+ uint32_t flags = env->flags & ENV_FLAG_TB_MASK;
+
+#ifdef CONFIG_USER_ONLY
+ flags |= TB_FLAG_UNALIGN * !cs->prctl_unalign_sigbus;
+#endif
+
+ return (TCGTBCPUState){ .pc = env->pc, .flags = flags };
+}
+
static void alpha_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -61,6 +76,7 @@ static void alpha_restore_state_to_opc(CPUState *cs,
}
}
+#ifndef CONFIG_USER_ONLY
static bool alpha_cpu_has_work(CPUState *cs)
{
/* Here we are checking to see if the CPU should wake up from HALT.
@@ -75,6 +91,7 @@ static bool alpha_cpu_has_work(CPUState *cs)
| CPU_INTERRUPT_SMP
| CPU_INTERRUPT_MCHK);
}
+#endif /* !CONFIG_USER_ONLY */
static int alpha_cpu_mmu_index(CPUState *cs, bool ifetch)
{
@@ -83,6 +100,7 @@ static int alpha_cpu_mmu_index(CPUState *cs, bool ifetch)
static void alpha_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
{
+ info->endian = BFD_ENDIAN_LITTLE;
info->mach = bfd_mach_alpha_ev6;
info->print_insn = print_insn_alpha;
}
@@ -187,7 +205,26 @@ static void alpha_cpu_initfn(Object *obj)
{
CPUAlphaState *env = cpu_env(CPU(obj));
+ /* TODO all this should be done in reset, not init */
+
env->lock_addr = -1;
+
+ /*
+ * TODO: this is incorrect. The Alpha Architecture Handbook version 4
+ * describes NaN propagation in section 4.7.10.4. We should prefer
+ * the operand in Fb (whether it is a QNaN or an SNaN), then the
+ * operand in Fa. That is float_2nan_prop_ba.
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status);
+ /* Default NaN: sign bit clear, msb frac bit set */
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
+ /*
+ * TODO: this is incorrect. The Alpha Architecture Handbook version 4
+ * section 4.7.7.11 says that we flush to zero for underflow cases, so
+ * this should be float_ftz_after_rounding to match the
+ * tininess_after_rounding (which is specified in section 4.7.5).
+ */
+ set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status);
#if defined(CONFIG_USER_ONLY)
env->flags = ENV_FLAG_PS_USER | ENV_FLAG_FEN;
cpu_alpha_store_fpcr(env, (uint64_t)(FPCR_INVD | FPCR_DZED | FPCR_OVFD
@@ -202,31 +239,39 @@ static void alpha_cpu_initfn(Object *obj)
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps alpha_sysemu_ops = {
+ .has_work = alpha_cpu_has_work,
.get_phys_page_debug = alpha_cpu_get_phys_page_debug,
};
#endif
-#include "hw/core/tcg-cpu-ops.h"
-
static const TCGCPUOps alpha_tcg_ops = {
+ /* Alpha processors have a weak memory model */
+ .guest_default_memory_order = 0,
+ .mttcg_supported = true,
+
.initialize = alpha_translate_init,
+ .translate_code = alpha_translate_code,
+ .get_tb_cpu_state = alpha_get_tb_cpu_state,
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
.restore_state_to_opc = alpha_restore_state_to_opc,
+ .mmu_index = alpha_cpu_mmu_index,
#ifdef CONFIG_USER_ONLY
.record_sigsegv = alpha_cpu_record_sigsegv,
.record_sigbus = alpha_cpu_record_sigbus,
#else
.tlb_fill = alpha_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_notreached,
.cpu_exec_interrupt = alpha_cpu_exec_interrupt,
.cpu_exec_halt = alpha_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = alpha_cpu_do_interrupt,
.do_transaction_failed = alpha_cpu_do_transaction_failed,
.do_unaligned_access = alpha_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
};
-static void alpha_cpu_class_init(ObjectClass *oc, void *data)
+static void alpha_cpu_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
@@ -236,8 +281,6 @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
&acc->parent_realize);
cc->class_by_name = alpha_cpu_class_by_name;
- cc->has_work = alpha_cpu_has_work;
- cc->mmu_index = alpha_cpu_mmu_index;
cc->dump_state = alpha_cpu_dump_state;
cc->set_pc = alpha_cpu_set_pc;
cc->get_pc = alpha_cpu_get_pc;
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
index f9e2ecb..45944e4 100644
--- a/target/alpha/cpu.h
+++ b/target/alpha/cpu.h
@@ -21,7 +21,9 @@
#define ALPHA_CPU_H
#include "cpu-qom.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "qemu/cpu-float.h"
#define ICACHE_LINE_SIZE 32
@@ -267,7 +269,6 @@ struct ArchCPU {
/**
* AlphaCPUClass:
* @parent_realize: The parent class' realize handler.
- * @parent_reset: The parent class' reset handler.
*
* An Alpha CPU model.
*/
@@ -275,7 +276,6 @@ struct AlphaCPUClass {
CPUClass parent_class;
DeviceRealize parent_realize;
- DeviceReset parent_reset;
};
#ifndef CONFIG_USER_ONLY
@@ -289,8 +289,6 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags);
int alpha_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
-#include "exec/cpu-all.h"
-
enum {
FEATURE_ASN = 0x00000001,
FEATURE_SPS = 0x00000002,
@@ -433,6 +431,8 @@ enum {
};
void alpha_translate_init(void);
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
@@ -464,17 +464,6 @@ void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
MemTxResult response, uintptr_t retaddr);
#endif
-static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
-{
- *pc = env->pc;
- *cs_base = 0;
- *pflags = env->flags & ENV_FLAG_TB_MASK;
-#ifdef CONFIG_USER_ONLY
- *pflags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
-#endif
-}
-
#ifdef CONFIG_USER_ONLY
/* Copied from linux ieee_swcr_to_fpcr. */
static inline uint64_t alpha_ieee_swcr_to_fpcr(uint64_t swcr)
diff --git a/target/alpha/fpu_helper.c b/target/alpha/fpu_helper.c
index 63d9e9c..30f3c7f 100644
--- a/target/alpha/fpu_helper.c
+++ b/target/alpha/fpu_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
@@ -455,26 +454,27 @@ static uint64_t do_cvttq(CPUAlphaState *env, uint64_t a, int roundmode)
{
float64 fa;
int64_t ret;
- uint32_t exc;
+ uint32_t exc = 0;
+ int flags;
fa = t_to_float64(a);
ret = float64_to_int64_modulo(fa, roundmode, &FP_STATUS);
- exc = get_float_exception_flags(&FP_STATUS);
- if (unlikely(exc)) {
+ flags = get_float_exception_flags(&FP_STATUS);
+ if (unlikely(flags)) {
set_float_exception_flags(0, &FP_STATUS);
/* We need to massage the resulting exceptions. */
- if (exc & float_flag_invalid_cvti) {
+ if (flags & float_flag_invalid_cvti) {
/* Overflow, either normal or infinity. */
if (float64_is_infinity(fa)) {
exc = FPCR_INV;
} else {
exc = FPCR_IOV | FPCR_INE;
}
- } else if (exc & float_flag_invalid) {
+ } else if (flags & float_flag_invalid) {
exc = FPCR_INV;
- } else if (exc & float_flag_inexact) {
+ } else if (flags & float_flag_inexact) {
exc = FPCR_INE;
}
}
diff --git a/target/alpha/gdbstub.c b/target/alpha/gdbstub.c
index 13694fd..1a7e2dd 100644
--- a/target/alpha/gdbstub.c
+++ b/target/alpha/gdbstub.c
@@ -59,7 +59,7 @@ int alpha_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
int alpha_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
CPUAlphaState *env = cpu_env(cs);
- target_ulong tmp = ldtul_p(mem_buf);
+ target_ulong tmp = ldq_le_p(mem_buf);
CPU_DoubleU d;
switch (n) {
diff --git a/target/alpha/helper.c b/target/alpha/helper.c
index 2f1000c..096eac3 100644
--- a/target/alpha/helper.c
+++ b/target/alpha/helper.c
@@ -20,11 +20,13 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
#include "fpu/softfloat-types.h"
#include "exec/helper-proto.h"
#include "qemu/qemu-print.h"
+#include "system/memory.h"
#define CONVERT_BIT(X, SRC, DST) \
diff --git a/target/alpha/int_helper.c b/target/alpha/int_helper.c
index 5672696..6bfe635 100644
--- a/target/alpha/int_helper.c
+++ b/target/alpha/int_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
diff --git a/target/alpha/machine.c b/target/alpha/machine.c
index f09834f..5f302b1 100644
--- a/target/alpha/machine.c
+++ b/target/alpha/machine.c
@@ -74,7 +74,7 @@ static const VMStateDescription vmstate_env = {
};
static const VMStateField vmstate_cpu_fields[] = {
- VMSTATE_CPU(),
+ VMSTATE_STRUCT(parent_obj, AlphaCPU, 0, vmstate_cpu_common, CPUState),
VMSTATE_STRUCT(env, AlphaCPU, 1, vmstate_env, CPUAlphaState),
VMSTATE_END_OF_LIST()
};
diff --git a/target/alpha/mem_helper.c b/target/alpha/mem_helper.c
index 872955f..2113fe3 100644
--- a/target/alpha/mem_helper.c
+++ b/target/alpha/mem_helper.c
@@ -20,8 +20,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
static void do_unaligned_access(CPUAlphaState *env, vaddr addr, uintptr_t retaddr)
{
diff --git a/target/alpha/sys_helper.c b/target/alpha/sys_helper.c
index 768116e..51e3254 100644
--- a/target/alpha/sys_helper.c
+++ b/target/alpha/sys_helper.c
@@ -19,11 +19,11 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/tb-flush.h"
#include "exec/helper-proto.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/runstate.h"
+#include "system/system.h"
#include "qemu/timer.h"
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index fb6cac4..cebab03 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -19,13 +19,14 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
#include "exec/translator.h"
+#include "exec/translation-block.h"
+#include "exec/target_page.h"
#include "exec/log.h"
#define HELPER_H "helper.h"
@@ -2954,8 +2955,8 @@ static const TranslatorOps alpha_tr_ops = {
.tb_stop = alpha_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext dc;
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
diff --git a/target/alpha/vax_helper.c b/target/alpha/vax_helper.c
index f94fb51..c1d201e 100644
--- a/target/alpha/vax_helper.c
+++ b/target/alpha/vax_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
diff --git a/target/arm/arch_dump.c b/target/arm/arch_dump.c
index 06cdf4b..1dd7984 100644
--- a/target/arm/arch_dump.c
+++ b/target/arm/arch_dump.c
@@ -21,8 +21,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "elf.h"
-#include "sysemu/dump.h"
+#include "system/dump.h"
#include "cpu-features.h"
+#include "internals.h"
/* struct user_pt_regs from arch/arm64/include/uapi/asm/ptrace.h */
struct aarch64_user_regs {
@@ -142,7 +143,6 @@ static int aarch64_write_elf64_prfpreg(WriteCoreDumpFunction f,
return 0;
}
-#ifdef TARGET_AARCH64
static off_t sve_zreg_offset(uint32_t vq, int n)
{
off_t off = sizeof(struct aarch64_user_sve_header);
@@ -230,7 +230,6 @@ static int aarch64_write_elf64_sve(WriteCoreDumpFunction f,
return 0;
}
-#endif
int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, DumpState *s)
@@ -272,11 +271,9 @@ int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
return ret;
}
-#ifdef TARGET_AARCH64
if (cpu_isar_feature(aa64_sve, cpu)) {
ret = aarch64_write_elf64_sve(f, env, cpuid, s);
}
-#endif
return ret;
}
@@ -450,11 +447,9 @@ ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
if (class == ELFCLASS64) {
note_size = AARCH64_PRSTATUS_NOTE_SIZE;
note_size += AARCH64_PRFPREG_NOTE_SIZE;
-#ifdef TARGET_AARCH64
if (cpu_isar_feature(aa64_sve, cpu)) {
note_size += AARCH64_SVE_NOTE_SIZE(&cpu->env);
}
-#endif
} else {
note_size = ARM_PRSTATUS_NOTE_SIZE;
if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c
index 2b2055c..20c70c7 100644
--- a/target/arm/arm-powerctl.c
+++ b/target/arm/arm-powerctl.c
@@ -15,7 +15,7 @@
#include "arm-powerctl.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
-#include "sysemu/tcg.h"
+#include "system/tcg.h"
#include "target/arm/multiprocessing.h"
#ifndef DEBUG_ARM_POWERCTL
diff --git a/target/arm/arm-qmp-cmds.c b/target/arm/arm-qmp-cmds.c
index 3cc8cc7..cefd235 100644
--- a/target/arm/arm-qmp-cmds.c
+++ b/target/arm/arm-qmp-cmds.c
@@ -26,10 +26,11 @@
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qapi/qobject-input-visitor.h"
-#include "qapi/qapi-commands-machine-target.h"
-#include "qapi/qapi-commands-misc-target.h"
-#include "qapi/qmp/qdict.h"
+#include "qapi/qapi-commands-machine.h"
+#include "qapi/qapi-commands-misc-arm.h"
+#include "qobject/qdict.h"
#include "qom/qom-qobject.h"
+#include "cpu.h"
static GICCapability *gic_cap_new(int version)
{
@@ -46,7 +47,7 @@ static inline void gic_cap_kvm_probe(GICCapability *v2, GICCapability *v3)
#ifdef CONFIG_KVM
int fdarray[3];
- if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, NULL)) {
+ if (!kvm_arm_create_scratch_host_vcpu(fdarray, NULL)) {
return;
}
@@ -94,7 +95,7 @@ static const char *cpu_model_advertised_features[] = {
"sve640", "sve768", "sve896", "sve1024", "sve1152", "sve1280",
"sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048",
"kvm-no-adjvtime", "kvm-steal-time",
- "pauth", "pauth-impdef", "pauth-qarma3",
+ "pauth", "pauth-impdef", "pauth-qarma3", "pauth-qarma5",
NULL
};
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
index cc7c543..c1a7ae3 100644
--- a/target/arm/cpregs.h
+++ b/target/arm/cpregs.h
@@ -23,6 +23,7 @@
#include "hw/registerfields.h"
#include "target/arm/kvm-consts.h"
+#include "cpu.h"
/*
* ARMCPRegInfo type field bits:
@@ -126,6 +127,14 @@ enum {
* equivalent EL1 register when FEAT_NV2 is enabled.
*/
ARM_CP_NV2_REDIRECT = 1 << 20,
+ /*
+ * Flag: this is a TLBI insn which (when FEAT_XS is present) also has
+ * an NXS variant at the same encoding except that crn is 1 greater,
+ * so when registering this cpreg automatically also register one
+ * for the TLBI NXS variant. (For QEMU the NXS variant behaves
+ * identically to the normal one, other than FGT trapping handling.)
+ */
+ ARM_CP_ADD_TLBI_NXS = 1 << 21,
};
/*
@@ -320,20 +329,23 @@ typedef enum CPAccessResult {
* Access fails due to a configurable trap or enable which would
* result in a categorized exception syndrome giving information about
* the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
- * 0xc or 0x18).
+ * 0xc or 0x18). These traps are always to a specified target EL,
+ * never to the usual target EL.
*/
- CP_ACCESS_TRAP = (1 << 2),
- CP_ACCESS_TRAP_EL2 = CP_ACCESS_TRAP | 2,
- CP_ACCESS_TRAP_EL3 = CP_ACCESS_TRAP | 3,
+ CP_ACCESS_TRAP_BIT = (1 << 2),
+ CP_ACCESS_TRAP_EL1 = CP_ACCESS_TRAP_BIT | 1,
+ CP_ACCESS_TRAP_EL2 = CP_ACCESS_TRAP_BIT | 2,
+ CP_ACCESS_TRAP_EL3 = CP_ACCESS_TRAP_BIT | 3,
/*
- * Access fails and results in an exception syndrome 0x0 ("uncategorized").
+ * Access fails with UNDEFINED, i.e. an exception syndrome 0x0
+ * ("uncategorized"), which is what an undefined insn produces.
* Note that this is not a catch-all case -- the set of cases which may
* result in this failure is specifically defined by the architecture.
* This trap is always to the usual target EL, never directly to a
* specified target EL.
*/
- CP_ACCESS_TRAP_UNCATEGORIZED = (2 << 2),
+ CP_ACCESS_UNDEFINED = (2 << 2),
} CPAccessResult;
/* Indexes into fgt_read[] */
@@ -621,6 +633,7 @@ FIELD(HDFGWTR_EL2, NBRBCTL, 60, 1)
FIELD(HDFGWTR_EL2, NBRBDATA, 61, 1)
FIELD(HDFGWTR_EL2, NPMSNEVFR_EL1, 62, 1)
+FIELD(FGT, NXS, 13, 1) /* Honour HCR_EL2.FGTnXS to suppress FGT */
/* Which fine-grained trap bit register to check, if any */
FIELD(FGT, TYPE, 10, 3)
FIELD(FGT, REV, 9, 1) /* Is bit sense reversed? */
@@ -639,6 +652,17 @@ FIELD(FGT, BITPOS, 0, 6) /* Bit position within the uint64_t */
#define DO_REV_BIT(REG, BITNAME) \
FGT_##BITNAME = FGT_##REG | FGT_REV | R_##REG##_EL2_##BITNAME##_SHIFT
+/*
+ * The FGT bits for TLBI maintenance instructions accessible at EL1 always
+ * affect the "normal" TLBI insns; they affect the corresponding TLBI insns
+ * with the nXS qualifier only if HCRX_EL2.FGTnXS is 0. We define e.g.
+ * FGT_TLBIVAE1 to use for the normal insn, and FGT_TLBIVAE1NXS to use
+ * for the nXS qualified insn.
+ */
+#define DO_TLBINXS_BIT(REG, BITNAME) \
+ FGT_##BITNAME = FGT_##REG | R_##REG##_EL2_##BITNAME##_SHIFT, \
+ FGT_##BITNAME##NXS = FGT_##BITNAME | R_FGT_NXS_MASK
+
typedef enum FGTBit {
/*
* These bits tell us which register arrays to use:
@@ -772,36 +796,36 @@ typedef enum FGTBit {
DO_BIT(HFGITR, ATS1E0W),
DO_BIT(HFGITR, ATS1E1RP),
DO_BIT(HFGITR, ATS1E1WP),
- DO_BIT(HFGITR, TLBIVMALLE1OS),
- DO_BIT(HFGITR, TLBIVAE1OS),
- DO_BIT(HFGITR, TLBIASIDE1OS),
- DO_BIT(HFGITR, TLBIVAAE1OS),
- DO_BIT(HFGITR, TLBIVALE1OS),
- DO_BIT(HFGITR, TLBIVAALE1OS),
- DO_BIT(HFGITR, TLBIRVAE1OS),
- DO_BIT(HFGITR, TLBIRVAAE1OS),
- DO_BIT(HFGITR, TLBIRVALE1OS),
- DO_BIT(HFGITR, TLBIRVAALE1OS),
- DO_BIT(HFGITR, TLBIVMALLE1IS),
- DO_BIT(HFGITR, TLBIVAE1IS),
- DO_BIT(HFGITR, TLBIASIDE1IS),
- DO_BIT(HFGITR, TLBIVAAE1IS),
- DO_BIT(HFGITR, TLBIVALE1IS),
- DO_BIT(HFGITR, TLBIVAALE1IS),
- DO_BIT(HFGITR, TLBIRVAE1IS),
- DO_BIT(HFGITR, TLBIRVAAE1IS),
- DO_BIT(HFGITR, TLBIRVALE1IS),
- DO_BIT(HFGITR, TLBIRVAALE1IS),
- DO_BIT(HFGITR, TLBIRVAE1),
- DO_BIT(HFGITR, TLBIRVAAE1),
- DO_BIT(HFGITR, TLBIRVALE1),
- DO_BIT(HFGITR, TLBIRVAALE1),
- DO_BIT(HFGITR, TLBIVMALLE1),
- DO_BIT(HFGITR, TLBIVAE1),
- DO_BIT(HFGITR, TLBIASIDE1),
- DO_BIT(HFGITR, TLBIVAAE1),
- DO_BIT(HFGITR, TLBIVALE1),
- DO_BIT(HFGITR, TLBIVAALE1),
+ DO_TLBINXS_BIT(HFGITR, TLBIVMALLE1OS),
+ DO_TLBINXS_BIT(HFGITR, TLBIVAE1OS),
+ DO_TLBINXS_BIT(HFGITR, TLBIASIDE1OS),
+ DO_TLBINXS_BIT(HFGITR, TLBIVAAE1OS),
+ DO_TLBINXS_BIT(HFGITR, TLBIVALE1OS),
+ DO_TLBINXS_BIT(HFGITR, TLBIVAALE1OS),
+ DO_TLBINXS_BIT(HFGITR, TLBIRVAE1OS),
+ DO_TLBINXS_BIT(HFGITR, TLBIRVAAE1OS),
+ DO_TLBINXS_BIT(HFGITR, TLBIRVALE1OS),
+ DO_TLBINXS_BIT(HFGITR, TLBIRVAALE1OS),
+ DO_TLBINXS_BIT(HFGITR, TLBIVMALLE1IS),
+ DO_TLBINXS_BIT(HFGITR, TLBIVAE1IS),
+ DO_TLBINXS_BIT(HFGITR, TLBIASIDE1IS),
+ DO_TLBINXS_BIT(HFGITR, TLBIVAAE1IS),
+ DO_TLBINXS_BIT(HFGITR, TLBIVALE1IS),
+ DO_TLBINXS_BIT(HFGITR, TLBIVAALE1IS),
+ DO_TLBINXS_BIT(HFGITR, TLBIRVAE1IS),
+ DO_TLBINXS_BIT(HFGITR, TLBIRVAAE1IS),
+ DO_TLBINXS_BIT(HFGITR, TLBIRVALE1IS),
+ DO_TLBINXS_BIT(HFGITR, TLBIRVAALE1IS),
+ DO_TLBINXS_BIT(HFGITR, TLBIRVAE1),
+ DO_TLBINXS_BIT(HFGITR, TLBIRVAAE1),
+ DO_TLBINXS_BIT(HFGITR, TLBIRVALE1),
+ DO_TLBINXS_BIT(HFGITR, TLBIRVAALE1),
+ DO_TLBINXS_BIT(HFGITR, TLBIVMALLE1),
+ DO_TLBINXS_BIT(HFGITR, TLBIVAE1),
+ DO_TLBINXS_BIT(HFGITR, TLBIASIDE1),
+ DO_TLBINXS_BIT(HFGITR, TLBIVAAE1),
+ DO_TLBINXS_BIT(HFGITR, TLBIVALE1),
+ DO_TLBINXS_BIT(HFGITR, TLBIVAALE1),
DO_BIT(HFGITR, CFPRCTX),
DO_BIT(HFGITR, DVPRCTX),
DO_BIT(HFGITR, CPPRCTX),
@@ -1134,4 +1158,32 @@ static inline bool arm_cpreg_traps_in_nv(const ARMCPRegInfo *ri)
return ri->opc1 == 4 || ri->opc1 == 5;
}
+/* Macros for accessing a specified CP register bank */
+#define A32_BANKED_REG_GET(_env, _regname, _secure) \
+ ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
+
+#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \
+ do { \
+ if (_secure) { \
+ (_env)->cp15._regname##_s = (_val); \
+ } else { \
+ (_env)->cp15._regname##_ns = (_val); \
+ } \
+ } while (0)
+
+/*
+ * Macros for automatically accessing a specific CP register bank depending on
+ * the current secure state of the system. These macros are not intended for
+ * supporting instruction translation reads/writes as these are dependent
+ * solely on the SCR.NS bit and not the mode.
+ */
+#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
+ A32_BANKED_REG_GET((_env), _regname, \
+ (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)))
+
+#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
+ A32_BANKED_REG_SET((_env), _regname, \
+ (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \
+ (_val))
+
#endif /* TARGET_ARM_CPREGS_H */
diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h
index c59ca10..4452e7c 100644
--- a/target/arm/cpu-features.h
+++ b/target/arm/cpu-features.h
@@ -21,6 +21,8 @@
#define TARGET_ARM_FEATURES_H
#include "hw/registerfields.h"
+#include "qemu/host-utils.h"
+#include "cpu.h"
/*
* Naming convention for isar_feature functions:
@@ -473,6 +475,11 @@ static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
}
+static inline bool isar_feature_aa64_xs(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, XS) != 0;
+}
+
/*
* These are the values from APA/API/APA3.
* In general these must be compared '>=', per the normal Arm ARM
@@ -556,6 +563,11 @@ static inline bool isar_feature_aa64_bf16(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, BF16) != 0;
}
+static inline bool isar_feature_aa64_ebf16(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, BF16) > 1;
+}
+
static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0;
@@ -586,6 +598,11 @@ static inline bool isar_feature_aa64_mops(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, MOPS);
}
+static inline bool isar_feature_aa64_rpres(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, RPRES);
+}
+
static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id)
{
/* We always set the AdvSIMD and FP fields identically. */
@@ -791,11 +808,21 @@ static inline bool isar_feature_aa64_hcx(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HCX) != 0;
}
+static inline bool isar_feature_aa64_afp(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, AFP) != 0;
+}
+
static inline bool isar_feature_aa64_tidcp1(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, TIDCP1) != 0;
}
+static inline bool isar_feature_aa64_cmow(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, CMOW) != 0;
+}
+
static inline bool isar_feature_aa64_hafs(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HAFDBS) != 0;
@@ -1022,6 +1049,55 @@ static inline bool isar_feature_any_evt(const ARMISARegisters *id)
return isar_feature_aa64_evt(id) || isar_feature_aa32_evt(id);
}
+typedef enum {
+ CCSIDR_FORMAT_LEGACY,
+ CCSIDR_FORMAT_CCIDX,
+} CCSIDRFormat;
+
+static inline uint64_t make_ccsidr(CCSIDRFormat format, unsigned assoc,
+ unsigned linesize, unsigned cachesize,
+ uint8_t flags)
+{
+ unsigned lg_linesize = ctz32(linesize);
+ unsigned sets;
+ uint64_t ccsidr = 0;
+
+ assert(assoc != 0);
+ assert(is_power_of_2(linesize));
+ assert(lg_linesize >= 4 && lg_linesize <= 7 + 4);
+
+ /* sets * associativity * linesize == cachesize. */
+ sets = cachesize / (assoc * linesize);
+ assert(cachesize % (assoc * linesize) == 0);
+
+ if (format == CCSIDR_FORMAT_LEGACY) {
+ /*
+ * The 32-bit CCSIDR format is:
+ * [27:13] number of sets - 1
+ * [12:3] associativity - 1
+ * [2:0] log2(linesize) - 4
+ * so 0 == 16 bytes, 1 == 32 bytes, 2 == 64 bytes, etc
+ */
+ ccsidr = deposit32(ccsidr, 28, 4, flags);
+ ccsidr = deposit32(ccsidr, 13, 15, sets - 1);
+ ccsidr = deposit32(ccsidr, 3, 10, assoc - 1);
+ ccsidr = deposit32(ccsidr, 0, 3, lg_linesize - 4);
+ } else {
+ /*
+ * The 64-bit CCSIDR_EL1 format is:
+ * [55:32] number of sets - 1
+ * [23:3] associativity - 1
+ * [2:0] log2(linesize) - 4
+ * so 0 == 16 bytes, 1 == 32 bytes, 2 == 64 bytes, etc
+ */
+ ccsidr = deposit64(ccsidr, 32, 24, sets - 1);
+ ccsidr = deposit64(ccsidr, 3, 21, assoc - 1);
+ ccsidr = deposit64(ccsidr, 0, 3, lg_linesize - 4);
+ }
+
+ return ccsidr;
+}
+
/*
* Forward to the above feature tests given an ARMCPU pointer.
*/
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
index 2d5f3aa..8b46c7c 100644
--- a/target/arm/cpu-param.h
+++ b/target/arm/cpu-param.h
@@ -2,28 +2,24 @@
* ARM cpu parameters for qemu.
*
* Copyright (c) 2003 Fabrice Bellard
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#ifndef ARM_CPU_PARAM_H
#define ARM_CPU_PARAM_H
#ifdef TARGET_AARCH64
-# define TARGET_LONG_BITS 64
# define TARGET_PHYS_ADDR_SPACE_BITS 52
# define TARGET_VIRT_ADDR_SPACE_BITS 52
#else
-# define TARGET_LONG_BITS 32
# define TARGET_PHYS_ADDR_SPACE_BITS 40
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
#ifdef CONFIG_USER_ONLY
-# ifdef TARGET_AARCH64
-# define TARGET_TAGGED_ADDRESSES
+# if defined(TARGET_AARCH64) && defined(CONFIG_LINUX)
/* Allow user-only to vary page size from 4k */
# define TARGET_PAGE_BITS_VARY
-# define TARGET_PAGE_BITS_MIN 12
# else
# define TARGET_PAGE_BITS 12
# endif
@@ -33,10 +29,14 @@
* have to support 1K tiny pages.
*/
# define TARGET_PAGE_BITS_VARY
-# define TARGET_PAGE_BITS_MIN 10
+# define TARGET_PAGE_BITS_LEGACY 10
#endif /* !CONFIG_USER_ONLY */
-/* ARM processors have a weak memory model */
-#define TCG_GUEST_DEFAULT_MO (0)
+/*
+ * ARM-specific extra insn start words:
+ * 1: Conditional execution bits
+ * 2: Partial exception syndrome for data aborts
+ */
+#define TARGET_INSN_START_EXTRA_WORDS 2
#endif
diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h
index b497667..2fcb0e1 100644
--- a/target/arm/cpu-qom.h
+++ b/target/arm/cpu-qom.h
@@ -28,11 +28,6 @@ OBJECT_DECLARE_CPU_TYPE(ARMCPU, ARMCPUClass, ARM_CPU)
#define TYPE_ARM_MAX_CPU "max-" TYPE_ARM_CPU
-#define TYPE_AARCH64_CPU "aarch64-cpu"
-typedef struct AArch64CPUClass AArch64CPUClass;
-DECLARE_CLASS_CHECKERS(AArch64CPUClass, AARCH64_CPU,
- TYPE_AARCH64_CPU)
-
#define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU
#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 19191c2..e025e24 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -23,16 +23,18 @@
#include "qemu/timer.h"
#include "qemu/log.h"
#include "exec/page-vary.h"
+#include "exec/tswap.h"
#include "target/arm/idau.h"
#include "qemu/module.h"
#include "qapi/error.h"
#include "cpu.h"
#ifdef CONFIG_TCG
-#include "hw/core/tcg-cpu-ops.h"
+#include "exec/translation-block.h"
+#include "accel/tcg/cpu-ops.h"
#endif /* CONFIG_TCG */
#include "internals.h"
#include "cpu-features.h"
-#include "exec/exec-all.h"
+#include "exec/target_page.h"
#include "hw/qdev-properties.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/loader.h"
@@ -41,9 +43,9 @@
#include "hw/intc/armv7m_nvic.h"
#endif /* CONFIG_TCG */
#endif /* !CONFIG_USER_ONLY */
-#include "sysemu/tcg.h"
-#include "sysemu/qtest.h"
-#include "sysemu/hw_accel.h"
+#include "system/tcg.h"
+#include "system/qtest.h"
+#include "system/hw_accel.h"
#include "kvm_arm.h"
#include "disas/capstone.h"
#include "fpu/softfloat.h"
@@ -120,8 +122,15 @@ void arm_restore_state_to_opc(CPUState *cs,
env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
}
}
+
+int arm_cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+ return arm_env_mmu_index(cpu_env(cs));
+}
+
#endif /* CONFIG_TCG */
+#ifndef CONFIG_USER_ONLY
/*
* With SCTLR_ELx.NMI == 0, IRQ with Superpriority is masked identically with
* IRQ without Superpriority. Moreover, if the GIC is configured so that
@@ -140,11 +149,7 @@ static bool arm_cpu_has_work(CPUState *cs)
| CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
| CPU_INTERRUPT_EXITTB);
}
-
-static int arm_cpu_mmu_index(CPUState *cs, bool ifetch)
-{
- return arm_env_mmu_index(cpu_env(cs));
-}
+#endif /* !CONFIG_USER_ONLY */
void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
void *opaque)
@@ -545,18 +550,21 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type)
env->sau.ctrl = 0;
}
- set_flush_to_zero(1, &env->vfp.standard_fp_status);
- set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
- set_default_nan_mode(1, &env->vfp.standard_fp_status);
- set_default_nan_mode(1, &env->vfp.standard_fp_status_f16);
- set_float_detect_tininess(float_tininess_before_rounding,
- &env->vfp.fp_status);
- set_float_detect_tininess(float_tininess_before_rounding,
- &env->vfp.standard_fp_status);
- set_float_detect_tininess(float_tininess_before_rounding,
- &env->vfp.fp_status_f16);
- set_float_detect_tininess(float_tininess_before_rounding,
- &env->vfp.standard_fp_status_f16);
+ set_flush_to_zero(1, &env->vfp.fp_status[FPST_STD]);
+ set_flush_inputs_to_zero(1, &env->vfp.fp_status[FPST_STD]);
+ set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD]);
+ set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD_F16]);
+ arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A32]);
+ arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64]);
+ arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD]);
+ arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A32_F16]);
+ arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]);
+ arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD_F16]);
+ arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_AH]);
+ set_flush_to_zero(1, &env->vfp.fp_status[FPST_AH]);
+ set_flush_inputs_to_zero(1, &env->vfp.fp_status[FPST_AH]);
+ arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_AH_F16]);
+
#ifndef CONFIG_USER_ONLY
if (kvm_enabled()) {
kvm_arm_reset_vcpu(cpu);
@@ -826,7 +834,6 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
- CPUClass *cc = CPU_GET_CLASS(cs);
CPUARMState *env = cpu_env(cs);
uint32_t cur_el = arm_current_el(env);
bool secure = arm_is_secure(env);
@@ -926,7 +933,7 @@ static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
found:
cs->exception_index = excp_idx;
env->exception.target_el = target_el;
- cc->tcg_ops->do_interrupt(cs);
+ cs->cc->tcg_ops->do_interrupt(cs);
return true;
}
@@ -1092,37 +1099,6 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level)
}
}
-static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
-{
-#ifdef CONFIG_KVM
- ARMCPU *cpu = opaque;
- CPUARMState *env = &cpu->env;
- CPUState *cs = CPU(cpu);
- uint32_t linestate_bit;
- int irq_id;
-
- switch (irq) {
- case ARM_CPU_IRQ:
- irq_id = KVM_ARM_IRQ_CPU_IRQ;
- linestate_bit = CPU_INTERRUPT_HARD;
- break;
- case ARM_CPU_FIQ:
- irq_id = KVM_ARM_IRQ_CPU_FIQ;
- linestate_bit = CPU_INTERRUPT_FIQ;
- break;
- default:
- g_assert_not_reached();
- }
-
- if (level) {
- env->irq_line_state |= linestate_bit;
- } else {
- env->irq_line_state &= ~linestate_bit;
- }
- kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
-#endif
-}
-
static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -1167,7 +1143,7 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
{
ARMCPU *ac = ARM_CPU(cpu);
CPUARMState *env = &ac->env;
- bool sctlr_b;
+ bool sctlr_b = arm_sctlr_b(env);
if (is_a64(env)) {
info->cap_arch = CS_ARCH_ARM64;
@@ -1194,13 +1170,9 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
info->cap_mode = cap_mode;
}
- sctlr_b = arm_sctlr_b(env);
+ info->endian = BFD_ENDIAN_LITTLE;
if (bswap_code(sctlr_b)) {
-#if TARGET_BIG_ENDIAN
- info->endian = BFD_ENDIAN_LITTLE;
-#else
- info->endian = BFD_ENDIAN_BIG;
-#endif
+ info->endian = target_big_endian() ? BFD_ENDIAN_LITTLE : BFD_ENDIAN_BIG;
}
info->flags &= ~INSN_ARM_BE32;
#ifndef CONFIG_USER_ONLY
@@ -1210,8 +1182,6 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
#endif
}
-#ifdef TARGET_AARCH64
-
static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -1369,15 +1339,6 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
}
-#else
-
-static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
- g_assert_not_reached();
-}
-
-#endif
-
static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -1539,39 +1500,39 @@ static void arm_cpu_initfn(Object *obj)
* 0 means "unset, use the default value". That default might vary depending
* on the CPU type, and is set in the realize fn.
*/
-static Property arm_cpu_gt_cntfrq_property =
+static const Property arm_cpu_gt_cntfrq_property =
DEFINE_PROP_UINT64("cntfrq", ARMCPU, gt_cntfrq_hz, 0);
-static Property arm_cpu_reset_cbar_property =
+static const Property arm_cpu_reset_cbar_property =
DEFINE_PROP_UINT64("reset-cbar", ARMCPU, reset_cbar, 0);
-static Property arm_cpu_reset_hivecs_property =
+static const Property arm_cpu_reset_hivecs_property =
DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
#ifndef CONFIG_USER_ONLY
-static Property arm_cpu_has_el2_property =
+static const Property arm_cpu_has_el2_property =
DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
-static Property arm_cpu_has_el3_property =
+static const Property arm_cpu_has_el3_property =
DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
#endif
-static Property arm_cpu_cfgend_property =
+static const Property arm_cpu_cfgend_property =
DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
-static Property arm_cpu_has_vfp_property =
+static const Property arm_cpu_has_vfp_property =
DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true);
-static Property arm_cpu_has_vfp_d32_property =
+static const Property arm_cpu_has_vfp_d32_property =
DEFINE_PROP_BOOL("vfp-d32", ARMCPU, has_vfp_d32, true);
-static Property arm_cpu_has_neon_property =
+static const Property arm_cpu_has_neon_property =
DEFINE_PROP_BOOL("neon", ARMCPU, has_neon, true);
-static Property arm_cpu_has_dsp_property =
+static const Property arm_cpu_has_dsp_property =
DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true);
-static Property arm_cpu_has_mpu_property =
+static const Property arm_cpu_has_mpu_property =
DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
/* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
@@ -1579,7 +1540,7 @@ static Property arm_cpu_has_mpu_property =
* the right value for that particular CPU type, and we don't want
* to override that with an incorrect constant value.
*/
-static Property arm_cpu_pmsav7_dregion_property =
+static const Property arm_cpu_pmsav7_dregion_property =
DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
pmsav7_dregion,
qdev_prop_uint32, uint32_t);
@@ -1607,6 +1568,35 @@ static void arm_set_pmu(Object *obj, bool value, Error **errp)
cpu->has_pmu = value;
}
+static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
+}
+
+static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ /*
+ * At this time, this property is only allowed if KVM is enabled. This
+ * restriction allows us to avoid fixing up functionality that assumes a
+ * uniform execution state like do_interrupt.
+ */
+ if (value == false) {
+ if (!kvm_enabled() || !kvm_arm_aarch32_supported()) {
+ error_setg(errp, "'aarch64' feature cannot be disabled "
+ "unless KVM is enabled and 32-bit EL1 "
+ "is supported");
+ return;
+ }
+ unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ } else {
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ }
+}
+
unsigned int gt_cntfrq_period_ns(ARMCPU *cpu)
{
/*
@@ -1734,6 +1724,13 @@ void arm_cpu_post_init(Object *obj)
*/
arm_cpu_propagate_feature_implications(cpu);
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ object_property_add_bool(obj, "aarch64", aarch64_cpu_get_aarch64,
+ aarch64_cpu_set_aarch64);
+ object_property_set_description(obj, "aarch64",
+ "Set on/off to enable/disable aarch64 "
+ "execution state ");
+ }
if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property);
@@ -1916,7 +1913,6 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
{
Error *local_err = NULL;
-#ifdef TARGET_AARCH64
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
arm_cpu_sve_finalize(cpu, &local_err);
if (local_err != NULL) {
@@ -1952,7 +1948,6 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
return;
}
}
-#endif
if (kvm_enabled()) {
kvm_arm_steal_time_finalize(cpu, &local_err);
@@ -2069,6 +2064,10 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
arm_gt_stimer_cb, cpu);
cpu->gt_timer[GTIMER_HYPVIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
arm_gt_hvtimer_cb, cpu);
+ cpu->gt_timer[GTIMER_S_EL2_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
+ arm_gt_sel2timer_cb, cpu);
+ cpu->gt_timer[GTIMER_S_EL2_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
+ arm_gt_sel2vtimer_cb, cpu);
}
#endif
@@ -2390,14 +2389,22 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
#ifndef CONFIG_USER_ONLY
/*
- * If we do not have tag-memory provided by the machine,
- * reduce MTE support to instructions enabled at EL0.
+ * If we run with TCG and do not have tag-memory provided by
+ * the machine, then reduce MTE support to instructions enabled at EL0.
* This matches Cortex-A710 BROADCASTMTE input being LOW.
*/
- if (cpu->tag_memory == NULL) {
+ if (tcg_enabled() && cpu->tag_memory == NULL) {
cpu->isar.id_aa64pfr1 =
FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1);
}
+
+ /*
+ * If MTE is supported by the host, however it should not be
+ * enabled on the guest (i.e mte=off), clear guest's MTE bits."
+ */
+ if (kvm_enabled() && !cpu->kvm_mte) {
+ FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
+ }
#endif
}
@@ -2617,7 +2624,7 @@ static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
return oc;
}
-static Property arm_cpu_properties[] = {
+static const Property arm_cpu_properties[] = {
DEFINE_PROP_UINT64("midr", ARMCPU, midr, 0),
DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
mp_affinity, ARM64_AFFINITY_INVALID),
@@ -2625,7 +2632,8 @@ static Property arm_cpu_properties[] = {
DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1),
/* True to default to the backward-compat old CNTFRQ rather than 1Ghz */
DEFINE_PROP_BOOL("backcompat-cntfrq", ARMCPU, backcompat_cntfrq, false),
- DEFINE_PROP_END_OF_LIST()
+ DEFINE_PROP_BOOL("backcompat-pauth-default-use-qarma5", ARMCPU,
+ backcompat_pauth_default_use_qarma5, false),
};
static const gchar *arm_gdb_arch_name(CPUState *cs)
@@ -2633,16 +2641,58 @@ static const gchar *arm_gdb_arch_name(CPUState *cs)
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
+ if (arm_gdbstub_is_aarch64(cpu)) {
+ return "aarch64";
+ }
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
return "iwmmxt";
}
return "arm";
}
-#ifndef CONFIG_USER_ONLY
+static const char *arm_gdb_get_core_xml_file(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (arm_gdbstub_is_aarch64(cpu)) {
+ return "aarch64-core.xml";
+ }
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return "arm-m-profile.xml";
+ }
+ return "arm-core.xml";
+}
+
+#ifdef CONFIG_USER_ONLY
+/**
+ * aarch64_untagged_addr:
+ *
+ * Remove any address tag from @x. This is explicitly related to the
+ * linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
+ *
+ * There should be a better place to put this, but we need this in
+ * include/exec/cpu_ldst.h, and not some place linux-user specific.
+ *
+ * Note that arm-*-user will never set tagged_addr_enable.
+ */
+static vaddr aarch64_untagged_addr(CPUState *cs, vaddr x)
+{
+ CPUARMState *env = cpu_env(cs);
+ if (env->tagged_addr_enable) {
+ /*
+ * TBI is enabled for userspace but not kernelspace addresses.
+ * Only clear the tag if bit 55 is clear.
+ */
+ x &= sextract64(x, 0, 56);
+ }
+ return x;
+}
+#else
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps arm_sysemu_ops = {
+ .has_work = arm_cpu_has_work,
.get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug,
.asidx_from_attrs = arm_asidx_from_attrs,
.write_elf32_note = arm_cpu_write_elf32_note,
@@ -2653,19 +2703,52 @@ static const struct SysemuCPUOps arm_sysemu_ops = {
#endif
#ifdef CONFIG_TCG
+#ifndef CONFIG_USER_ONLY
+static vaddr aprofile_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ /*
+ * The Stage2 and Phys indexes are only used for ptw on arm32,
+ * and all pte's are aligned, so we never produce a wrap for these.
+ * Double check that we're not truncating a 40-bit physical address.
+ */
+ assert((unsigned)mmu_idx < (ARMMMUIdx_Stage2_S & ARM_MMU_IDX_COREIDX_MASK));
+
+ if (!is_a64(cpu_env(cs))) {
+ return (uint32_t)result;
+ }
+
+ /*
+ * TODO: For FEAT_CPA2, decide how to we want to resolve
+ * Unpredictable_CPACHECK in AddressIncrement.
+ */
+ return result;
+}
+#endif /* !CONFIG_USER_ONLY */
+
static const TCGCPUOps arm_tcg_ops = {
+ .mttcg_supported = true,
+ /* ARM processors have a weak memory model */
+ .guest_default_memory_order = 0,
+
.initialize = arm_translate_init,
+ .translate_code = arm_translate_code,
+ .get_tb_cpu_state = arm_get_tb_cpu_state,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
.debug_excp_handler = arm_debug_excp_handler,
.restore_state_to_opc = arm_restore_state_to_opc,
+ .mmu_index = arm_cpu_mmu_index,
#ifdef CONFIG_USER_ONLY
.record_sigsegv = arm_cpu_record_sigsegv,
.record_sigbus = arm_cpu_record_sigbus,
+ .untagged_addr = aarch64_untagged_addr,
#else
- .tlb_fill = arm_cpu_tlb_fill,
+ .tlb_fill_align = arm_cpu_tlb_fill_align,
+ .pointer_wrap = aprofile_pointer_wrap,
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
.cpu_exec_halt = arm_cpu_exec_halt,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = arm_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,
@@ -2676,7 +2759,7 @@ static const TCGCPUOps arm_tcg_ops = {
};
#endif /* CONFIG_TCG */
-static void arm_cpu_class_init(ObjectClass *oc, void *data)
+static void arm_cpu_class_init(ObjectClass *oc, const void *data)
{
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(acc);
@@ -2692,8 +2775,6 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
&acc->parent_phases);
cc->class_by_name = arm_cpu_class_by_name;
- cc->has_work = arm_cpu_has_work;
- cc->mmu_index = arm_cpu_mmu_index;
cc->dump_state = arm_cpu_dump_state;
cc->set_pc = arm_cpu_set_pc;
cc->get_pc = arm_cpu_get_pc;
@@ -2703,6 +2784,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->sysemu_ops = &arm_sysemu_ops;
#endif
cc->gdb_arch_name = arm_gdb_arch_name;
+ cc->gdb_get_core_xml_file = arm_gdb_get_core_xml_file;
cc->gdb_stop_before_watchpoint = true;
cc->disas_set_info = arm_disas_set_info;
@@ -2719,13 +2801,15 @@ static void arm_cpu_instance_init(Object *obj)
arm_cpu_post_init(obj);
}
-static void cpu_register_class_init(ObjectClass *oc, void *data)
+static void cpu_register_class_init(ObjectClass *oc, const void *data)
{
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(acc);
acc->info = data;
- cc->gdb_core_xml_file = "arm-core.xml";
+ if (acc->info->deprecation_note) {
+ cc->deprecation_note = acc->info->deprecation_note;
+ }
}
void arm_cpu_register(const ARMCPUInfo *info)
@@ -2734,11 +2818,11 @@ void arm_cpu_register(const ARMCPUInfo *info)
.parent = TYPE_ARM_CPU,
.instance_init = arm_cpu_instance_init,
.class_init = info->class_init ?: cpu_register_class_init,
- .class_data = (void *)info,
+ .class_data = info,
};
type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
- type_register(&type_info);
+ type_register_static(&type_info);
g_free((void *)type_info.name);
}
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index a12859f..302c24e 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -24,17 +24,15 @@
#include "qemu/cpu-float.h"
#include "hw/registerfields.h"
#include "cpu-qom.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "exec/gdbstub.h"
#include "exec/page-protection.h"
#include "qapi/qapi-types-common.h"
#include "target/arm/multiprocessing.h"
#include "target/arm/gtimer.h"
-#ifdef TARGET_AARCH64
-#define KVM_HAVE_MCE_INJECTION 1
-#endif
-
#define EXCP_UDEF 1 /* undefined instruction */
#define EXCP_SWI 2 /* software interrupt */
#define EXCP_PREFETCH_ABORT 3
@@ -62,6 +60,7 @@
#define EXCP_NMI 26
#define EXCP_VINMI 27
#define EXCP_VFNMI 28
+#define EXCP_MON_TRAP 29 /* AArch32 trap to Monitor mode */
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
#define ARMV7M_EXCP_RESET 1
@@ -99,12 +98,6 @@
#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
#endif
-/* ARM-specific extra insn start words:
- * 1: Conditional execution bits
- * 2: Partial exception syndrome for data aborts
- */
-#define TARGET_INSN_START_EXTRA_WORDS 2
-
/* The 2nd extra word holding syndrome info for data aborts does not use
* the upper 6 bits nor the lower 13 bits. We mask and shift it down to
* help the sleb128 encoder do a better job.
@@ -170,17 +163,12 @@ typedef struct ARMGenericTimer {
* Align the data for use with TCG host vector operations.
*/
-#ifdef TARGET_AARCH64
-# define ARM_MAX_VQ 16
-#else
-# define ARM_MAX_VQ 1
-#endif
+#define ARM_MAX_VQ 16
typedef struct ARMVectorReg {
uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16);
} ARMVectorReg;
-#ifdef TARGET_AARCH64
/* In AArch32 mode, predicate registers do not exist at all. */
typedef struct ARMPredicateReg {
uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16);
@@ -190,18 +178,72 @@ typedef struct ARMPredicateReg {
typedef struct ARMPACKey {
uint64_t lo, hi;
} ARMPACKey;
-#endif
/* See the commentary above the TBFLAG field definitions. */
typedef struct CPUARMTBFlags {
uint32_t flags;
- target_ulong flags2;
+ uint64_t flags2;
} CPUARMTBFlags;
typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
typedef struct NVICState NVICState;
+/*
+ * Enum for indexing vfp.fp_status[].
+ *
+ * FPST_A32: is the "normal" fp status for AArch32 insns
+ * FPST_A64: is the "normal" fp status for AArch64 insns
+ * FPST_A32_F16: used for AArch32 half-precision calculations
+ * FPST_A64_F16: used for AArch64 half-precision calculations
+ * FPST_STD: the ARM "Standard FPSCR Value"
+ * FPST_STD_F16: used for half-precision
+ * calculations with the ARM "Standard FPSCR Value"
+ * FPST_AH: used for the A64 insns which change behaviour
+ * when FPCR.AH == 1 (bfloat16 conversions and multiplies,
+ * and the reciprocal and square root estimate/step insns)
+ * FPST_AH_F16: used for the A64 insns which change behaviour
+ * when FPCR.AH == 1 (bfloat16 conversions and multiplies,
+ * and the reciprocal and square root estimate/step insns);
+ * for half-precision
+ *
+ * Half-precision operations are governed by a separate
+ * flush-to-zero control bit in FPSCR:FZ16. We pass a separate
+ * status structure to control this.
+ *
+ * The "Standard FPSCR", ie default-NaN, flush-to-zero,
+ * round-to-nearest and is used by any operations (generally
+ * Neon) which the architecture defines as controlled by the
+ * standard FPSCR value rather than the FPSCR.
+ *
+ * The "standard FPSCR but for fp16 ops" is needed because
+ * the "standard FPSCR" tracks the FPSCR.FZ16 bit rather than
+ * using a fixed value for it.
+ *
+ * FPST_AH is needed because some insns have different
+ * behaviour when FPCR.AH == 1: they don't update cumulative
+ * exception flags, they act like FPCR.{FZ,FIZ} = {1,1} and
+ * they ignore FPCR.RMode. But they don't ignore FPCR.FZ16,
+ * which means we need an FPST_AH_F16 as well.
+ *
+ * To avoid having to transfer exception bits around, we simply
+ * say that the FPSCR cumulative exception flags are the logical
+ * OR of the flags in the four fp statuses. This relies on the
+ * only thing which needs to read the exception flags being
+ * an explicit FPSCR read.
+ */
+typedef enum ARMFPStatusFlavour {
+ FPST_A32,
+ FPST_A64,
+ FPST_A32_F16,
+ FPST_A64_F16,
+ FPST_AH,
+ FPST_AH_F16,
+ FPST_STD,
+ FPST_STD_F16,
+} ARMFPStatusFlavour;
+#define FPST_COUNT 8
+
typedef struct CPUArchState {
/* Regs for current mode. */
uint32_t regs[16];
@@ -606,13 +648,11 @@ typedef struct CPUArchState {
struct {
ARMVectorReg zregs[32];
-#ifdef TARGET_AARCH64
/* Store FFR as pregs[16] to make it easier to treat as any other. */
#define FFR_PRED_NUM 16
ARMPredicateReg pregs[17];
/* Scratch space for aa64 sve predicate temporary. */
ARMPredicateReg preg_tmp;
-#endif
/* We store these fpcsr fields separately for convenience. */
uint32_t qc[4] QEMU_ALIGNED(16);
@@ -631,37 +671,8 @@ typedef struct CPUArchState {
/* Scratch space for aa32 neon expansion. */
uint32_t scratch[8];
- /* There are a number of distinct float control structures:
- *
- * fp_status: is the "normal" fp status.
- * fp_status_fp16: used for half-precision calculations
- * standard_fp_status : the ARM "Standard FPSCR Value"
- * standard_fp_status_fp16 : used for half-precision
- * calculations with the ARM "Standard FPSCR Value"
- *
- * Half-precision operations are governed by a separate
- * flush-to-zero control bit in FPSCR:FZ16. We pass a separate
- * status structure to control this.
- *
- * The "Standard FPSCR", ie default-NaN, flush-to-zero,
- * round-to-nearest and is used by any operations (generally
- * Neon) which the architecture defines as controlled by the
- * standard FPSCR value rather than the FPSCR.
- *
- * The "standard FPSCR but for fp16 ops" is needed because
- * the "standard FPSCR" tracks the FPSCR.FZ16 bit rather than
- * using a fixed value for it.
- *
- * To avoid having to transfer exception bits around, we simply
- * say that the FPSCR cumulative exception flags are the logical
- * OR of the flags in the four fp statuses. This relies on the
- * only thing which needs to read the exception flags being
- * an explicit FPSCR read.
- */
- float_status fp_status;
- float_status fp_status_f16;
- float_status standard_fp_status;
- float_status standard_fp_status_f16;
+ /* There are a number of distinct float control structures. */
+ float_status fp_status[FPST_COUNT];
uint64_t zcr_el[4]; /* ZCR_EL[1-3] */
uint64_t smcr_el[4]; /* SMCR_EL[1-3] */
@@ -686,7 +697,6 @@ typedef struct CPUArchState {
uint32_t cregs[16];
} iwmmxt;
-#ifdef TARGET_AARCH64
struct {
ARMPACKey apia;
ARMPACKey apib;
@@ -718,7 +728,6 @@ typedef struct CPUArchState {
* to keep the offsets into the rest of the structure smaller.
*/
ARMVectorReg zarray[ARM_MAX_VQ * 16];
-#endif
struct CPUBreakpoint *cpu_breakpoint[16];
struct CPUWatchpoint *cpu_watchpoint[16];
@@ -774,12 +783,9 @@ typedef struct CPUArchState {
#else /* CONFIG_USER_ONLY */
/* For usermode syscall translation. */
bool eabi;
-#endif /* CONFIG_USER_ONLY */
-
-#ifdef TARGET_TAGGED_ADDRESSES
/* Linux syscall tagged address support */
bool tagged_addr_enable;
-#endif
+#endif /* CONFIG_USER_ONLY */
} CPUARMState;
static inline void set_feature(CPUARMState *env, int feature)
@@ -922,6 +928,8 @@ struct ArchCPU {
/* CPU has memory protection unit */
bool has_mpu;
+ /* CPU has MTE enabled in KVM mode */
+ bool kvm_mte;
/* PMSAv7 MPU number of supported regions */
uint32_t pmsav7_dregion;
/* PMSAv8 MPU number of supported hyp regions */
@@ -944,7 +952,6 @@ struct ArchCPU {
*/
uint32_t kvm_target;
-#ifdef CONFIG_KVM
/* KVM init features for this CPU */
uint32_t kvm_init_features[7];
@@ -957,7 +964,6 @@ struct ArchCPU {
/* KVM steal time */
OnOffAuto kvm_steal_time;
-#endif /* CONFIG_KVM */
/* Uniprocessor system with MP extensions */
bool mp_is_up;
@@ -970,6 +976,9 @@ struct ArchCPU {
/* QOM property to indicate we should use the back-compat CNTFRQ default */
bool backcompat_cntfrq;
+ /* QOM property to indicate we should use the back-compat QARMA5 default */
+ bool backcompat_pauth_default_use_qarma5;
+
/* Specify the number of cores in this CPU cluster. Used for the L2CTLR
* register.
*/
@@ -1060,6 +1069,7 @@ struct ArchCPU {
bool prop_pauth;
bool prop_pauth_impdef;
bool prop_pauth_qarma3;
+ bool prop_pauth_qarma5;
bool prop_lpa2;
/* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
@@ -1108,8 +1118,9 @@ struct ArchCPU {
typedef struct ARMCPUInfo {
const char *name;
+ const char *deprecation_note;
void (*initfn)(Object *obj);
- void (*class_init)(ObjectClass *oc, void *data);
+ void (*class_init)(ObjectClass *oc, const void *data);
} ARMCPUInfo;
/**
@@ -1127,16 +1138,14 @@ struct ARMCPUClass {
ResettablePhases parent_phases;
};
-struct AArch64CPUClass {
- ARMCPUClass parent_class;
-};
-
/* Callback functions for the generic timer's timers. */
void arm_gt_ptimer_cb(void *opaque);
void arm_gt_vtimer_cb(void *opaque);
void arm_gt_htimer_cb(void *opaque);
void arm_gt_stimer_cb(void *opaque);
void arm_gt_hvtimer_cb(void *opaque);
+void arm_gt_sel2timer_cb(void *opaque);
+void arm_gt_sel2vtimer_cb(void *opaque);
unsigned int gt_cntfrq_period_ns(ARMCPU *cpu);
void gt_rme_post_el_change(ARMCPU *cpu, void *opaque);
@@ -1200,7 +1209,6 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
*/
void arm_emulate_firmware_reset(CPUState *cpustate, int target_el);
-#ifdef TARGET_AARCH64
int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
@@ -1232,13 +1240,6 @@ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr)
#endif
}
-#else
-static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
-static inline void aarch64_sve_change_el(CPUARMState *env, int o,
- int n, bool a)
-{ }
-#endif
-
void aarch64_sync_32_to_64(CPUARMState *env);
void aarch64_sync_64_to_32(CPUARMState *env);
@@ -1365,6 +1366,7 @@ void pmu_init(ARMCPU *cpu);
#define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */
#define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */
#define SCTLR_DSSBS_32 (1U << 31) /* v8.5, AArch32 only */
+#define SCTLR_CMOW (1ULL << 32) /* FEAT_CMOW */
#define SCTLR_MSCEN (1ULL << 33) /* FEAT_MOPS */
#define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */
#define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */
@@ -1702,11 +1704,15 @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val);
*/
/* FPCR bits */
+#define FPCR_FIZ (1 << 0) /* Flush Inputs to Zero (FEAT_AFP) */
+#define FPCR_AH (1 << 1) /* Alternate Handling (FEAT_AFP) */
+#define FPCR_NEP (1 << 2) /* SIMD scalar ops preserve elts (FEAT_AFP) */
#define FPCR_IOE (1 << 8) /* Invalid Operation exception trap enable */
#define FPCR_DZE (1 << 9) /* Divide by Zero exception trap enable */
#define FPCR_OFE (1 << 10) /* Overflow exception trap enable */
#define FPCR_UFE (1 << 11) /* Underflow exception trap enable */
#define FPCR_IXE (1 << 12) /* Inexact exception trap enable */
+#define FPCR_EBF (1 << 13) /* Extended BFloat16 behaviors */
#define FPCR_IDE (1 << 15) /* Input Denormal exception trap enable */
#define FPCR_LEN_MASK (7 << 16) /* LEN, A-profile only */
#define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */
@@ -2557,6 +2563,11 @@ static inline bool arm_is_secure_below_el3(CPUARMState *env)
return false;
}
+static inline bool arm_is_el3_or_mon(CPUARMState *env)
+{
+ return false;
+}
+
static inline ARMSecuritySpace arm_security_space(CPUARMState *env)
{
return ARMSS_NonSecure;
@@ -2589,81 +2600,15 @@ uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space);
uint64_t arm_hcr_el2_eff(CPUARMState *env);
uint64_t arm_hcrx_el2_eff(CPUARMState *env);
-/* Return true if the specified exception level is running in AArch64 state. */
-static inline bool arm_el_is_aa64(CPUARMState *env, int el)
-{
- /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
- * and if we're not in EL0 then the state of EL0 isn't well defined.)
- */
- assert(el >= 1 && el <= 3);
- bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
-
- /* The highest exception level is always at the maximum supported
- * register width, and then lower levels have a register width controlled
- * by bits in the SCR or HCR registers.
- */
- if (el == 3) {
- return aa64;
- }
-
- if (arm_feature(env, ARM_FEATURE_EL3) &&
- ((env->cp15.scr_el3 & SCR_NS) || !(env->cp15.scr_el3 & SCR_EEL2))) {
- aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW);
- }
-
- if (el == 2) {
- return aa64;
- }
-
- if (arm_is_el2_enabled(env)) {
- aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
- }
-
- return aa64;
-}
-
-/* Function for determining whether guest cp register reads and writes should
+/*
+ * Function for determining whether guest cp register reads and writes should
* access the secure or non-secure bank of a cp register. When EL3 is
* operating in AArch32 state, the NS-bit determines whether the secure
* instance of a cp register should be used. When EL3 is AArch64 (or if
* it doesn't exist at all) then there is no register banking, and all
* accesses are to the non-secure version.
*/
-static inline bool access_secure_reg(CPUARMState *env)
-{
- bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
- !arm_el_is_aa64(env, 3) &&
- !(env->cp15.scr_el3 & SCR_NS));
-
- return ret;
-}
-
-/* Macros for accessing a specified CP register bank */
-#define A32_BANKED_REG_GET(_env, _regname, _secure) \
- ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
-
-#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \
- do { \
- if (_secure) { \
- (_env)->cp15._regname##_s = (_val); \
- } else { \
- (_env)->cp15._regname##_ns = (_val); \
- } \
- } while (0)
-
-/* Macros for automatically accessing a specific CP register bank depending on
- * the current secure state of the system. These macros are not intended for
- * supporting instruction translation reads/writes as these are dependent
- * solely on the SCR.NS bit and not the mode.
- */
-#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
- A32_BANKED_REG_GET((_env), _regname, \
- (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)))
-
-#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
- A32_BANKED_REG_SET((_env), _regname, \
- (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \
- (_val))
+bool access_secure_reg(CPUARMState *env);
uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
uint32_t cur_el, bool secure);
@@ -2686,39 +2631,6 @@ static inline bool arm_v7m_is_handler_mode(CPUARMState *env)
return env->v7m.exception != 0;
}
-/* Return the current Exception Level (as per ARMv8; note that this differs
- * from the ARMv7 Privilege Level).
- */
-static inline int arm_current_el(CPUARMState *env)
-{
- if (arm_feature(env, ARM_FEATURE_M)) {
- return arm_v7m_is_handler_mode(env) ||
- !(env->v7m.control[env->v7m.secure] & 1);
- }
-
- if (is_a64(env)) {
- return extract32(env->pstate, 2, 2);
- }
-
- switch (env->uncached_cpsr & 0x1f) {
- case ARM_CPU_MODE_USR:
- return 0;
- case ARM_CPU_MODE_HYP:
- return 2;
- case ARM_CPU_MODE_MON:
- return 3;
- default:
- if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
- /* If EL3 is 32-bit then all secure privileged modes run in
- * EL3
- */
- return 3;
- }
-
- return 1;
- }
-}
-
/**
* write_list_to_cpustate
* @cpu: ARMCPU
@@ -2772,14 +2684,19 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
* + NonSecure EL1 & 0 stage 2
* + NonSecure EL2
* + NonSecure EL2 & 0 (ARMv8.1-VHE)
- * + Secure EL1 & 0
- * + Secure EL3
+ * + Secure EL1 & 0 stage 1
+ * + Secure EL1 & 0 stage 2 (FEAT_SEL2)
+ * + Secure EL2 (FEAT_SEL2)
+ * + Secure EL2 & 0 (FEAT_SEL2)
+ * + Realm EL1 & 0 stage 1 (FEAT_RME)
+ * + Realm EL1 & 0 stage 2 (FEAT_RME)
+ * + Realm EL2 (FEAT_RME)
+ * + EL3
* If EL3 is 32-bit:
* + NonSecure PL1 & 0 stage 1
* + NonSecure PL1 & 0 stage 2
* + NonSecure PL2
- * + Secure PL0
- * + Secure PL1
+ * + Secure PL1 & 0
* (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
*
* For QEMU, an mmu_idx is not quite the same as a translation regime because:
@@ -2805,29 +2722,34 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
* table over and over.
* 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
* Never (PAN) bit within PSTATE.
- * 7. we fold together the secure and non-secure regimes for A-profile,
+ * 7. we fold together most secure and non-secure regimes for A-profile,
* because there are no banked system registers for aarch64, so the
* process of switching between secure and non-secure is
* already heavyweight.
+ * 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure,
+ * because both are in use simultaneously for Secure EL2.
*
* This gives us the following list of cases:
*
- * EL0 EL1&0 stage 1+2 (aka NS PL0)
- * EL1 EL1&0 stage 1+2 (aka NS PL1)
- * EL1 EL1&0 stage 1+2 +PAN
+ * EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2)
+ * EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2)
+ * EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN)
* EL0 EL2&0
* EL2 EL2&0
* EL2 EL2&0 +PAN
* EL2 (aka NS PL2)
- * EL3 (aka S PL1)
- * Physical (NS & S)
- * Stage2 (NS & S)
+ * EL3 (aka AArch32 S PL1 PL1&0)
+ * AArch32 S PL0 PL1&0 (we call this EL30_0)
+ * AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN)
+ * Stage2 Secure
+ * Stage2 NonSecure
+ * plus one TLB per Physical address space: S, NS, Realm, Root
*
- * for a total of 12 different mmu_idx.
+ * for a total of 16 different mmu_idx.
*
* R profile CPUs have an MPU, but can use the same set of MMU indexes
* as A profile. They only need to distinguish EL0 and EL1 (and
- * EL2 if we ever model a Cortex-R52).
+ * EL2 for cores like the Cortex-R52).
*
* M profile CPUs are rather different as they do not have a true MMU.
* They have the following different MMU indexes:
@@ -2887,6 +2809,8 @@ typedef enum ARMMMUIdx {
ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A,
ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E30_0 = 8 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E30_3_PAN = 9 | ARM_MMU_IDX_A,
/*
* Used for second stage of an S12 page table walk, or for descriptor
@@ -2894,14 +2818,14 @@ typedef enum ARMMMUIdx {
* are in use simultaneously for SecureEL2: the security state for
* the S2 ptw is selected by the NS bit from the S1 ptw.
*/
- ARMMMUIdx_Stage2_S = 8 | ARM_MMU_IDX_A,
- ARMMMUIdx_Stage2 = 9 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Stage2_S = 10 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A,
/* TLBs with 1-1 mapping to the physical address spaces. */
- ARMMMUIdx_Phys_S = 10 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_NS = 11 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_Root = 12 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_Realm = 13 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_S = 12 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_NS = 13 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_Root = 14 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_Realm = 15 | ARM_MMU_IDX_A,
/*
* These are not allocated TLBs and are used only for AT system
@@ -2940,6 +2864,8 @@ typedef enum ARMMMUIdxBit {
TO_CORE_BIT(E20_2),
TO_CORE_BIT(E20_2_PAN),
TO_CORE_BIT(E3),
+ TO_CORE_BIT(E30_0),
+ TO_CORE_BIT(E30_3_PAN),
TO_CORE_BIT(Stage2),
TO_CORE_BIT(Stage2_S),
@@ -3005,60 +2931,15 @@ static inline bool arm_sctlr_b(CPUARMState *env)
uint64_t arm_sctlr(CPUARMState *env, int el);
-static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
- bool sctlr_b)
-{
-#ifdef CONFIG_USER_ONLY
- /*
- * In system mode, BE32 is modelled in line with the
- * architecture (as word-invariant big-endianness), where loads
- * and stores are done little endian but from addresses which
- * are adjusted by XORing with the appropriate constant. So the
- * endianness to use for the raw data access is not affected by
- * SCTLR.B.
- * In user mode, however, we model BE32 as byte-invariant
- * big-endianness (because user-only code cannot tell the
- * difference), and so we need to use a data access endianness
- * that depends on SCTLR.B.
- */
- if (sctlr_b) {
- return true;
- }
-#endif
- /* In 32bit endianness is determined by looking at CPSR's E bit */
- return env->uncached_cpsr & CPSR_E;
-}
-
-static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr)
-{
- return sctlr & (el ? SCTLR_EE : SCTLR_E0E);
-}
-
-/* Return true if the processor is in big-endian mode. */
-static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
-{
- if (!is_a64(env)) {
- return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env));
- } else {
- int cur_el = arm_current_el(env);
- uint64_t sctlr = arm_sctlr(env, cur_el);
- return arm_cpu_data_is_big_endian_a64(cur_el, sctlr);
- }
-}
-
-#include "exec/cpu-all.h"
-
/*
* We have more than 32-bits worth of state per TB, so we split the data
* between tb->flags and tb->cs_base, which is otherwise unused for ARM.
* We collect these two parts in CPUARMTBFlags where they are named
* flags and flags2 respectively.
*
- * The flags that are shared between all execution modes, TBFLAG_ANY,
- * are stored in flags. The flags that are specific to a given mode
- * are stores in flags2. Since cs_base is sized on the configured
- * address size, flags2 always has 64-bits for A64, and a minimum of
- * 32-bits for A32 and M32.
+ * The flags that are shared between all execution modes, TBFLAG_ANY, are stored
+ * in flags. The flags that are specific to a given mode are stored in flags2.
+ * flags2 always has 64-bits, even though only 32-bits are used for A32 and M32.
*
* The bits for 32-bit A-profile and M-profile partially overlap:
*
@@ -3168,6 +3049,8 @@ FIELD(TBFLAG_A64, NV2, 34, 1)
FIELD(TBFLAG_A64, NV2_MEM_E20, 35, 1)
/* Set if FEAT_NV2 RAM accesses are big-endian */
FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1)
+FIELD(TBFLAG_A64, AH, 37, 1) /* FPCR.AH */
+FIELD(TBFLAG_A64, NEP, 38, 1) /* FPCR.NEP */
/*
* Helpers for using the above. Note that only the A64 accessors use
@@ -3229,16 +3112,6 @@ static inline bool bswap_code(bool sctlr_b)
#endif
}
-#ifdef CONFIG_USER_ONLY
-static inline bool arm_cpu_bswap_data(CPUARMState *env)
-{
- return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env);
-}
-#endif
-
-void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags);
-
enum {
QEMU_PSCI_CONDUIT_DISABLED = 0,
QEMU_PSCI_CONDUIT_SMC = 1,
@@ -3336,34 +3209,4 @@ extern const uint64_t pred_esz_masks[5];
#define LOG2_TAG_GRANULE 4
#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
-#ifdef CONFIG_USER_ONLY
-#define TARGET_PAGE_DATA_SIZE (TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1))
-#endif
-
-#ifdef TARGET_TAGGED_ADDRESSES
-/**
- * cpu_untagged_addr:
- * @cs: CPU context
- * @x: tagged address
- *
- * Remove any address tag from @x. This is explicitly related to the
- * linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
- *
- * There should be a better place to put this, but we need this in
- * include/exec/cpu_ldst.h, and not some place linux-user specific.
- */
-static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x)
-{
- CPUARMState *env = cpu_env(cs);
- if (env->tagged_addr_enable) {
- /*
- * TBI is enabled for userspace but not kernelspace addresses.
- * Only clear the tag if bit 55 is clear.
- */
- x &= sextract64(x, 0, 56);
- }
- return x;
-}
-#endif
-
#endif
diff --git a/target/arm/cpu32-stubs.c b/target/arm/cpu32-stubs.c
new file mode 100644
index 0000000..81be44d
--- /dev/null
+++ b/target/arm/cpu32-stubs.c
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+#include "target/arm/cpu.h"
+#include "target/arm/internals.h"
+#include <glib.h>
+
+void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 262a1d6..200da1c 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -23,10 +23,11 @@
#include "cpu.h"
#include "cpregs.h"
#include "qemu/module.h"
-#include "sysemu/kvm.h"
-#include "sysemu/hvf.h"
-#include "sysemu/qtest.h"
-#include "sysemu/tcg.h"
+#include "qemu/units.h"
+#include "system/kvm.h"
+#include "system/hvf.h"
+#include "system/qtest.h"
+#include "system/tcg.h"
#include "kvm_arm.h"
#include "hvf_arm.h"
#include "qapi/visitor.h"
@@ -519,25 +520,40 @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
}
if (cpu->prop_pauth) {
- if (cpu->prop_pauth_impdef && cpu->prop_pauth_qarma3) {
+ if ((cpu->prop_pauth_impdef && cpu->prop_pauth_qarma3) ||
+ (cpu->prop_pauth_impdef && cpu->prop_pauth_qarma5) ||
+ (cpu->prop_pauth_qarma3 && cpu->prop_pauth_qarma5)) {
error_setg(errp,
- "cannot enable both pauth-impdef and pauth-qarma3");
+ "cannot enable pauth-impdef, pauth-qarma3 and "
+ "pauth-qarma5 at the same time");
return;
}
- if (cpu->prop_pauth_impdef) {
- isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, features);
- isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 1);
+ bool use_default = !cpu->prop_pauth_qarma5 &&
+ !cpu->prop_pauth_qarma3 &&
+ !cpu->prop_pauth_impdef;
+
+ if (cpu->prop_pauth_qarma5 ||
+ (use_default &&
+ cpu->backcompat_pauth_default_use_qarma5)) {
+ isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, features);
+ isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 1);
} else if (cpu->prop_pauth_qarma3) {
isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, features);
isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 1);
+ } else if (cpu->prop_pauth_impdef ||
+ (use_default &&
+ !cpu->backcompat_pauth_default_use_qarma5)) {
+ isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, features);
+ isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 1);
} else {
- isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, features);
- isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 1);
+ g_assert_not_reached();
}
- } else if (cpu->prop_pauth_impdef || cpu->prop_pauth_qarma3) {
- error_setg(errp, "cannot enable pauth-impdef or "
- "pauth-qarma3 without pauth");
+ } else if (cpu->prop_pauth_impdef ||
+ cpu->prop_pauth_qarma3 ||
+ cpu->prop_pauth_qarma5) {
+ error_setg(errp, "cannot enable pauth-impdef, pauth-qarma3 or "
+ "pauth-qarma5 without pauth");
error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
}
}
@@ -546,12 +562,14 @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
cpu->isar.id_aa64isar2 = isar2;
}
-static Property arm_cpu_pauth_property =
+static const Property arm_cpu_pauth_property =
DEFINE_PROP_BOOL("pauth", ARMCPU, prop_pauth, true);
-static Property arm_cpu_pauth_impdef_property =
+static const Property arm_cpu_pauth_impdef_property =
DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false);
-static Property arm_cpu_pauth_qarma3_property =
+static const Property arm_cpu_pauth_qarma3_property =
DEFINE_PROP_BOOL("pauth-qarma3", ARMCPU, prop_pauth_qarma3, false);
+static Property arm_cpu_pauth_qarma5_property =
+ DEFINE_PROP_BOOL("pauth-qarma5", ARMCPU, prop_pauth_qarma5, false);
void aarch64_add_pauth_properties(Object *obj)
{
@@ -572,6 +590,7 @@ void aarch64_add_pauth_properties(Object *obj)
} else {
qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property);
qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_qarma3_property);
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_qarma5_property);
}
}
@@ -642,9 +661,12 @@ static void aarch64_a57_initfn(Object *obj)
cpu->isar.dbgdevid1 = 0x2;
cpu->isar.reset_pmcr_el0 = 0x41013000;
cpu->clidr = 0x0a200023;
- cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
- cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
- cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
+ /* 32KB L1 dcache */
+ cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
+ /* 48KB L1 icache */
+ cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 3, 64, 48 * KiB, 2);
+ /* 2048KB L2 cache */
+ cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 2 * MiB, 7);
cpu->dcz_blocksize = 4; /* 64 bytes */
cpu->gic_num_lrs = 4;
cpu->gic_vpribits = 5;
@@ -700,9 +722,12 @@ static void aarch64_a53_initfn(Object *obj)
cpu->isar.dbgdevid1 = 0x1;
cpu->isar.reset_pmcr_el0 = 0x41033000;
cpu->clidr = 0x0a200023;
- cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
- cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
- cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */
+ /* 32KB L1 dcache */
+ cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
+ /* 32KB L1 icache */
+ cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 1, 64, 32 * KiB, 2);
+ /* 1024KB L2 cache */
+ cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 1 * MiB, 7);
cpu->dcz_blocksize = 4; /* 64 bytes */
cpu->gic_num_lrs = 4;
cpu->gic_vpribits = 5;
@@ -756,104 +781,12 @@ static const ARMCPUInfo aarch64_cpus[] = {
#endif
};
-static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
-{
- ARMCPU *cpu = ARM_CPU(obj);
-
- return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
-}
-
-static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
-{
- ARMCPU *cpu = ARM_CPU(obj);
-
- /* At this time, this property is only allowed if KVM is enabled. This
- * restriction allows us to avoid fixing up functionality that assumes a
- * uniform execution state like do_interrupt.
- */
- if (value == false) {
- if (!kvm_enabled() || !kvm_arm_aarch32_supported()) {
- error_setg(errp, "'aarch64' feature cannot be disabled "
- "unless KVM is enabled and 32-bit EL1 "
- "is supported");
- return;
- }
- unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
- } else {
- set_feature(&cpu->env, ARM_FEATURE_AARCH64);
- }
-}
-
-static void aarch64_cpu_finalizefn(Object *obj)
-{
-}
-
-static const gchar *aarch64_gdb_arch_name(CPUState *cs)
-{
- return "aarch64";
-}
-
-static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
-{
- CPUClass *cc = CPU_CLASS(oc);
-
- cc->gdb_read_register = aarch64_cpu_gdb_read_register;
- cc->gdb_write_register = aarch64_cpu_gdb_write_register;
- cc->gdb_core_xml_file = "aarch64-core.xml";
- cc->gdb_arch_name = aarch64_gdb_arch_name;
-
- object_class_property_add_bool(oc, "aarch64", aarch64_cpu_get_aarch64,
- aarch64_cpu_set_aarch64);
- object_class_property_set_description(oc, "aarch64",
- "Set on/off to enable/disable aarch64 "
- "execution state ");
-}
-
-static void aarch64_cpu_instance_init(Object *obj)
-{
- ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
-
- acc->info->initfn(obj);
- arm_cpu_post_init(obj);
-}
-
-static void cpu_register_class_init(ObjectClass *oc, void *data)
-{
- ARMCPUClass *acc = ARM_CPU_CLASS(oc);
-
- acc->info = data;
-}
-
-void aarch64_cpu_register(const ARMCPUInfo *info)
-{
- TypeInfo type_info = {
- .parent = TYPE_AARCH64_CPU,
- .instance_init = aarch64_cpu_instance_init,
- .class_init = info->class_init ?: cpu_register_class_init,
- .class_data = (void *)info,
- };
-
- type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
- type_register(&type_info);
- g_free((void *)type_info.name);
-}
-
-static const TypeInfo aarch64_cpu_type_info = {
- .name = TYPE_AARCH64_CPU,
- .parent = TYPE_ARM_CPU,
- .instance_finalize = aarch64_cpu_finalizefn,
- .abstract = true,
- .class_init = aarch64_cpu_class_init,
-};
-
static void aarch64_cpu_register_types(void)
{
size_t i;
- type_register_static(&aarch64_cpu_type_info);
-
for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
- aarch64_cpu_register(&aarch64_cpus[i]);
+ arm_cpu_register(&aarch64_cpus[i]);
}
}
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
index 7d856ac..69fb1d0 100644
--- a/target/arm/debug_helper.c
+++ b/target/arm/debug_helper.c
@@ -11,9 +11,11 @@
#include "internals.h"
#include "cpu-features.h"
#include "cpregs.h"
-#include "exec/exec-all.h"
-#include "exec/helper-proto.h"
-#include "sysemu/tcg.h"
+#include "exec/watchpoint.h"
+#include "system/tcg.h"
+
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
#ifdef CONFIG_TCG
/* Return the Exception Level targeted by debug exceptions. */
@@ -378,7 +380,7 @@ bool arm_debug_check_breakpoint(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
- target_ulong pc;
+ vaddr pc;
int n;
/*
@@ -875,12 +877,13 @@ static CPAccessResult access_tdcc(CPUARMState *env, const ARMCPRegInfo *ri,
(env->cp15.mdcr_el3 & MDCR_TDCC);
if (el < 1 && mdscr_el1_tdcc) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_TRAP_EL1;
}
if (el < 2 && (mdcr_el2_tda || mdcr_el2_tdcc)) {
return CP_ACCESS_TRAP_EL2;
}
- if (el < 3 && ((env->cp15.mdcr_el3 & MDCR_TDA) || mdcr_el3_tdcc)) {
+ if (!arm_is_el3_or_mon(env) &&
+ ((env->cp15.mdcr_el3 & MDCR_TDA) || mdcr_el3_tdcc)) {
return CP_ACCESS_TRAP_EL3;
}
return CP_ACCESS_OK;
@@ -1036,7 +1039,7 @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
{ .name = "DBGVCR",
.cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
.access = PL1_RW, .accessfn = access_tda,
- .type = ARM_CP_NOP },
+ .type = ARM_CP_CONST, .resetvalue = 0 },
/*
* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
* Channel but Linux may try to access this register. The 32-bit
@@ -1045,7 +1048,7 @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
{ .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
.access = PL1_RW, .accessfn = access_tdcc,
- .type = ARM_CP_NOP },
+ .type = ARM_CP_CONST, .resetvalue = 0 },
/*
* Dummy DBGCLAIM registers.
* "The architecture does not define any functionality for the CLAIM tag bits.",
@@ -1074,7 +1077,8 @@ static const ARMCPRegInfo debug_aa32_el1_reginfo[] = {
{ .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
.access = PL2_RW, .accessfn = access_dbgvcr32,
- .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP },
+ .type = ARM_CP_CONST | ARM_CP_EL3_NO_EL2_KEEP,
+ .resetvalue = 0 },
};
static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
index c3a9b5e..ce4497a 100644
--- a/target/arm/gdbstub.c
+++ b/target/arm/gdbstub.c
@@ -22,7 +22,7 @@
#include "exec/gdbstub.h"
#include "gdbstub/helpers.h"
#include "gdbstub/commands.h"
-#include "sysemu/tcg.h"
+#include "system/tcg.h"
#include "internals.h"
#include "cpu-features.h"
#include "cpregs.h"
@@ -44,6 +44,12 @@ int arm_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
+#ifdef TARGET_AARCH64
+ if (arm_gdbstub_is_aarch64(cpu)) {
+ return aarch64_cpu_gdb_read_register(cs, mem_buf, n);
+ }
+#endif
+
if (n < 16) {
/* Core integer register. */
return gdb_get_reg32(mem_buf, env->regs[n]);
@@ -66,6 +72,12 @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
CPUARMState *env = &cpu->env;
uint32_t tmp;
+#ifdef TARGET_AARCH64
+ if (arm_gdbstub_is_aarch64(cpu)) {
+ return aarch64_cpu_gdb_write_register(cs, mem_buf, n);
+ }
+#endif
+
tmp = ldl_p(mem_buf);
/*
@@ -477,11 +489,9 @@ static GDBFeature *arm_gen_dynamic_m_secextreg_feature(CPUState *cs,
void arm_cpu_register_gdb_commands(ARMCPU *cpu)
{
- GArray *query_table =
- g_array_new(FALSE, FALSE, sizeof(GdbCmdParseEntry));
- GArray *set_table =
- g_array_new(FALSE, FALSE, sizeof(GdbCmdParseEntry));
- GString *qsupported_features = g_string_new(NULL);
+ g_autoptr(GPtrArray) query_table = g_ptr_array_new();
+ g_autoptr(GPtrArray) set_table = g_ptr_array_new();
+ g_autoptr(GString) qsupported_features = g_string_new(NULL);
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
#ifdef TARGET_AARCH64
@@ -492,16 +502,12 @@ void arm_cpu_register_gdb_commands(ARMCPU *cpu)
/* Set arch-specific handlers for 'q' commands. */
if (query_table->len) {
- gdb_extend_query_table(&g_array_index(query_table,
- GdbCmdParseEntry, 0),
- query_table->len);
+ gdb_extend_query_table(query_table);
}
/* Set arch-specific handlers for 'Q' commands. */
if (set_table->len) {
- gdb_extend_set_table(&g_array_index(set_table,
- GdbCmdParseEntry, 0),
- set_table->len);
+ gdb_extend_set_table(set_table);
}
/* Set arch-specific qSupported feature. */
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
index 2e2bc27..64ee9b3 100644
--- a/target/arm/gdbstub64.c
+++ b/target/arm/gdbstub64.c
@@ -27,6 +27,10 @@
#include <sys/prctl.h>
#include "mte_user_helper.h"
#endif
+#ifdef CONFIG_TCG
+#include "accel/tcg/cpu-mmu-index.h"
+#include "exec/target_page.h"
+#endif
int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
@@ -404,6 +408,7 @@ int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg)
int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg)
{
+#if defined(CONFIG_LINUX)
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
@@ -425,12 +430,18 @@ int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg)
arm_set_mte_tcf0(env, tcf);
return 1;
+#else
+ return 0;
+#endif
}
+#endif /* CONFIG_USER_ONLY */
+#ifdef CONFIG_TCG
static void handle_q_memtag(GArray *params, void *user_ctx)
{
ARMCPU *cpu = ARM_CPU(user_ctx);
CPUARMState *env = &cpu->env;
+ uint32_t mmu_index;
uint64_t addr = gdb_get_cmd_param(params, 0)->val_ull;
uint64_t len = gdb_get_cmd_param(params, 1)->val_ul;
@@ -454,8 +465,10 @@ static void handle_q_memtag(GArray *params, void *user_ctx)
gdb_put_packet("E03");
}
+ /* Find out the current translation regime for probe. */
+ mmu_index = cpu_mmu_index(env_cpu(env), false);
/* Note that tags are packed here (2 tags packed in one byte). */
- tags = allocation_tag_mem_probe(env, 0, addr, MMU_DATA_LOAD, 8 /* 64-bit */,
+ tags = allocation_tag_mem_probe(env, mmu_index, addr, MMU_DATA_LOAD, 1,
MMU_DATA_LOAD, true, 0);
if (!tags) {
/* Address is not in a tagged region. */
@@ -474,13 +487,16 @@ static void handle_q_isaddresstagged(GArray *params, void *user_ctx)
{
ARMCPU *cpu = ARM_CPU(user_ctx);
CPUARMState *env = &cpu->env;
+ uint32_t mmu_index;
uint64_t addr = gdb_get_cmd_param(params, 0)->val_ull;
uint8_t *tags;
const char *reply;
- tags = allocation_tag_mem_probe(env, 0, addr, MMU_DATA_LOAD, 8 /* 64-bit */,
+ /* Find out the current translation regime for probe. */
+ mmu_index = cpu_mmu_index(env_cpu(env), false);
+ tags = allocation_tag_mem_probe(env, mmu_index, addr, MMU_DATA_LOAD, 1,
MMU_DATA_LOAD, true, 0);
reply = tags ? "01" : "00";
@@ -491,6 +507,7 @@ static void handle_Q_memtag(GArray *params, void *user_ctx)
{
ARMCPU *cpu = ARM_CPU(user_ctx);
CPUARMState *env = &cpu->env;
+ uint32_t mmu_index;
uint64_t start_addr = gdb_get_cmd_param(params, 0)->val_ull;
uint64_t len = gdb_get_cmd_param(params, 1)->val_ul;
@@ -523,8 +540,10 @@ static void handle_Q_memtag(GArray *params, void *user_ctx)
* Get all tags in the page starting from the tag of the start address.
* Note that there are two tags packed into a single byte here.
*/
- tags = allocation_tag_mem_probe(env, 0, start_addr, MMU_DATA_STORE,
- 8 /* 64-bit */, MMU_DATA_STORE, true, 0);
+ /* Find out the current translation regime for probe. */
+ mmu_index = cpu_mmu_index(env_cpu(env), false);
+ tags = allocation_tag_mem_probe(env, mmu_index, start_addr, MMU_DATA_STORE,
+ 1, MMU_DATA_STORE, true, 0);
if (!tags) {
/* Address is not in a tagged region. */
gdb_put_packet("E04");
@@ -564,7 +583,7 @@ enum Command {
NUM_CMDS
};
-static GdbCmdParseEntry cmd_handler_table[NUM_CMDS] = {
+static const GdbCmdParseEntry cmd_handler_table[NUM_CMDS] = {
[qMemTags] = {
.handler = handle_q_memtag,
.cmd_startswith = true,
@@ -587,20 +606,19 @@ static GdbCmdParseEntry cmd_handler_table[NUM_CMDS] = {
.need_cpu_context = true
},
};
-#endif /* CONFIG_USER_ONLY */
+#endif /* CONFIG_TCG */
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *qsupported,
- GArray *qtable, GArray *stable)
+ GPtrArray *qtable, GPtrArray *stable)
{
-#ifdef CONFIG_USER_ONLY
/* MTE */
+#ifdef CONFIG_TCG
if (cpu_isar_feature(aa64_mte, cpu)) {
g_string_append(qsupported, ";memory-tagging+");
- g_array_append_val(qtable, cmd_handler_table[qMemTags]);
- g_array_append_val(qtable, cmd_handler_table[qIsAddressTagged]);
-
- g_array_append_val(stable, cmd_handler_table[QMemTags]);
+ g_ptr_array_add(qtable, (gpointer) &cmd_handler_table[qMemTags]);
+ g_ptr_array_add(qtable, (gpointer) &cmd_handler_table[qIsAddressTagged]);
+ g_ptr_array_add(stable, (gpointer) &cmd_handler_table[QMemTags]);
}
#endif
}
diff --git a/target/arm/gtimer.h b/target/arm/gtimer.h
index b992941..d49c63c 100644
--- a/target/arm/gtimer.h
+++ b/target/arm/gtimer.h
@@ -10,12 +10,14 @@
#define TARGET_ARM_GTIMER_H
enum {
- GTIMER_PHYS = 0,
- GTIMER_VIRT = 1,
- GTIMER_HYP = 2,
- GTIMER_SEC = 3,
- GTIMER_HYPVIRT = 4,
-#define NUM_GTIMERS 5
+ GTIMER_PHYS = 0, /* CNTP_* ; EL1 physical timer */
+ GTIMER_VIRT = 1, /* CNTV_* ; EL1 virtual timer */
+ GTIMER_HYP = 2, /* CNTHP_* ; EL2 physical timer */
+ GTIMER_SEC = 3, /* CNTPS_* ; EL3 physical timer */
+ GTIMER_HYPVIRT = 4, /* CNTHV_* ; EL2 virtual timer ; only if FEAT_VHE */
+ GTIMER_S_EL2_PHYS = 5, /* CNTHPS_* ; only if FEAT_SEL2 */
+ GTIMER_S_EL2_VIRT = 6, /* CNTHVS_* ; only if FEAT_SEL2 */
+#define NUM_GTIMERS 7
};
#endif
diff --git a/target/arm/helper.c b/target/arm/helper.c
index ce31957..889d308 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -12,26 +12,32 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/helper-proto.h"
+#include "exec/page-protection.h"
+#include "exec/mmap-lock.h"
#include "qemu/main-loop.h"
#include "qemu/timer.h"
#include "qemu/bitops.h"
-#include "qemu/crc32c.h"
#include "qemu/qemu-print.h"
-#include "exec/exec-all.h"
-#include <zlib.h> /* For crc32 */
+#include "exec/cputlb.h"
+#include "exec/translation-block.h"
#include "hw/irq.h"
-#include "sysemu/cpu-timers.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
+#include "system/cpu-timers.h"
+#include "exec/icount.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
#include "qapi/error.h"
#include "qemu/guest-random.h"
#ifdef CONFIG_TCG
+#include "accel/tcg/probe.h"
+#include "accel/tcg/getpc.h"
#include "semihosting/common-semi.h"
#endif
#include "cpregs.h"
#include "target/arm/gtimer.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
static void switch_mode(CPUARMState *env, int mode);
@@ -219,7 +225,7 @@ static void count_cpreg(gpointer key, gpointer opaque)
}
}
-static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
+static gint cpreg_key_compare(gconstpointer a, gconstpointer b, gpointer d)
{
uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
@@ -243,7 +249,7 @@ void init_cpreg_list(ARMCPU *cpu)
int arraylen;
keys = g_hash_table_get_keys(cpu->cp_regs);
- keys = g_list_sort(keys, cpreg_key_compare);
+ keys = g_list_sort_with_data(keys, cpreg_key_compare, NULL);
cpu->cpreg_array_len = 0;
@@ -285,7 +291,7 @@ static CPAccessResult access_el3_aa32ns(CPUARMState *env,
{
if (!is_a64(env) && arm_current_el(env) == 3 &&
arm_is_secure_below_el3(env)) {
- return CP_ACCESS_TRAP_UNCATEGORIZED;
+ return CP_ACCESS_UNDEFINED;
}
return CP_ACCESS_OK;
}
@@ -310,7 +316,7 @@ static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
return CP_ACCESS_TRAP_EL3;
}
/* This will be EL1 NS and EL2 NS, which just UNDEF */
- return CP_ACCESS_TRAP_UNCATEGORIZED;
+ return CP_ACCESS_UNDEFINED;
}
/*
@@ -365,40 +371,6 @@ static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
return CP_ACCESS_OK;
}
-/* Check for traps from EL1 due to HCR_EL2.TTLB. */
-static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
- return CP_ACCESS_TRAP_EL2;
- }
- return CP_ACCESS_OK;
-}
-
-/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBIS. */
-static CPAccessResult access_ttlbis(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- if (arm_current_el(env) == 1 &&
- (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBIS))) {
- return CP_ACCESS_TRAP_EL2;
- }
- return CP_ACCESS_OK;
-}
-
-#ifdef TARGET_AARCH64
-/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBOS. */
-static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- if (arm_current_el(env) == 1 &&
- (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBOS))) {
- return CP_ACCESS_TRAP_EL2;
- }
- return CP_ACCESS_OK;
-}
-#endif
-
static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
ARMCPU *cpu = env_archcpu(env);
@@ -438,12 +410,15 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
raw_write(env, ri, value);
}
-static int alle1_tlbmask(CPUARMState *env)
+int alle1_tlbmask(CPUARMState *env)
{
/*
* Note that the 'ALL' scope must invalidate both stage 1 and
* stage 2 translations, whereas most other scopes only invalidate
* stage 1 translations.
+ *
+ * For AArch32 this is only used for TLBIALLNSNH and VTTBR
+ * writes, so only needs to apply to NS PL1&0, not S PL1&0.
*/
return (ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
@@ -452,174 +427,6 @@ static int alle1_tlbmask(CPUARMState *env)
ARMMMUIdxBit_Stage2_S);
}
-
-/* IS variants of TLB operations must affect all cores */
-static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
-
- tlb_flush_all_cpus_synced(cs);
-}
-
-static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
-
- tlb_flush_all_cpus_synced(cs);
-}
-
-static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
-
- tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
-}
-
-static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
-
- tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
-}
-
-/*
- * Non-IS variants of TLB operations are upgraded to
- * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
- * force broadcast of these operations.
- */
-static bool tlb_force_broadcast(CPUARMState *env)
-{
- return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
-}
-
-static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /* Invalidate all (TLBIALL) */
- CPUState *cs = env_cpu(env);
-
- if (tlb_force_broadcast(env)) {
- tlb_flush_all_cpus_synced(cs);
- } else {
- tlb_flush(cs);
- }
-}
-
-static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
- CPUState *cs = env_cpu(env);
-
- value &= TARGET_PAGE_MASK;
- if (tlb_force_broadcast(env)) {
- tlb_flush_page_all_cpus_synced(cs, value);
- } else {
- tlb_flush_page(cs, value);
- }
-}
-
-static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /* Invalidate by ASID (TLBIASID) */
- CPUState *cs = env_cpu(env);
-
- if (tlb_force_broadcast(env)) {
- tlb_flush_all_cpus_synced(cs);
- } else {
- tlb_flush(cs);
- }
-}
-
-static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
- CPUState *cs = env_cpu(env);
-
- value &= TARGET_PAGE_MASK;
- if (tlb_force_broadcast(env)) {
- tlb_flush_page_all_cpus_synced(cs, value);
- } else {
- tlb_flush_page(cs, value);
- }
-}
-
-static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
-
- tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
-}
-
-static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
-
- tlb_flush_by_mmuidx_all_cpus_synced(cs, alle1_tlbmask(env));
-}
-
-
-static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
-
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
-}
-
-static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
-
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
-}
-
-static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
-
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
-}
-
-static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
-
- tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- ARMMMUIdxBit_E2);
-}
-
-static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
-
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
-}
-
-static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
-
- tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2);
-}
-
static const ARMCPRegInfo cp_reginfo[] = {
/*
* Define the secure and non-secure FCSE identifier CP registers
@@ -729,22 +536,6 @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = {
*/
{ .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
- /*
- * MMU TLB control. Note that the wildcarding means we cover not just
- * the unified TLB ops but also the dside/iside/inner-shareable variants.
- */
- { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
- .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
- .type = ARM_CP_NO_RAW },
- { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
- .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
- .type = ARM_CP_NO_RAW },
- { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
- .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
- .type = ARM_CP_NO_RAW },
- { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
- .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
- .type = ARM_CP_NO_RAW },
{ .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
.opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
{ .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
@@ -1096,7 +887,7 @@ static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_TRAP_EL1;
}
if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
return CP_ACCESS_TRAP_EL2;
@@ -2113,7 +1904,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
.accessfn = pmreg_access,
.fgt = FGT_PMCNTEN,
- .writefn = pmcntenclr_write,
+ .writefn = pmcntenclr_write, .raw_writefn = raw_write,
.type = ARM_CP_ALIAS | ARM_CP_IO },
{ .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
@@ -2121,7 +1912,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.fgt = FGT_PMCNTEN,
.type = ARM_CP_ALIAS | ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
- .writefn = pmcntenclr_write },
+ .writefn = pmcntenclr_write, .raw_writefn = raw_write },
{ .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
.access = PL0_RW, .type = ARM_CP_IO,
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
@@ -2238,16 +2029,16 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
{ .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
.access = PL1_RW, .accessfn = access_tpm,
.fgt = FGT_PMINTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
- .writefn = pmintenclr_write, },
+ .writefn = pmintenclr_write, .raw_writefn = raw_write },
{ .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
.access = PL1_RW, .accessfn = access_tpm,
.fgt = FGT_PMINTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
- .writefn = pmintenclr_write },
+ .writefn = pmintenclr_write, .raw_writefn = raw_write },
{ .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
.access = PL1_R,
@@ -2328,55 +2119,6 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
.fgt = FGT_ISR_EL1,
.type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
- /* 32 bit ITLB invalidates */
- { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
- .writefn = tlbiall_write },
- { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
- .writefn = tlbimva_write },
- { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
- .writefn = tlbiasid_write },
- /* 32 bit DTLB invalidates */
- { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
- .writefn = tlbiall_write },
- { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
- .writefn = tlbimva_write },
- { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
- .writefn = tlbiasid_write },
- /* 32 bit TLB invalidates */
- { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
- .writefn = tlbiall_write },
- { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
- .writefn = tlbimva_write },
- { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
- .writefn = tlbiasid_write },
- { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
- .writefn = tlbimvaa_write },
-};
-
-static const ARMCPRegInfo v7mp_cp_reginfo[] = {
- /* 32 bit TLB invalidates, Inner Shareable */
- { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
- .writefn = tlbiall_is_write },
- { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
- .writefn = tlbimva_is_write },
- { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
- .writefn = tlbiasid_is_write },
- { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
- .writefn = tlbimvaa_is_write },
};
static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
@@ -2423,7 +2165,7 @@ static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
if (arm_current_el(env) == 0 && (env->teecr & 1)) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_TRAP_EL1;
}
return teecr_access(env, ri, isread);
}
@@ -2503,14 +2245,14 @@ static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
cntkctl = env->cp15.c14_cntkctl;
}
if (!extract32(cntkctl, 0, 2)) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_TRAP_EL1;
}
break;
case 1:
if (!isread && ri->state == ARM_CP_STATE_AA32 &&
arm_is_secure_below_el3(env)) {
/* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
- return CP_ACCESS_TRAP_UNCATEGORIZED;
+ return CP_ACCESS_UNDEFINED;
}
break;
case 2:
@@ -2519,7 +2261,7 @@ static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
}
if (!isread && el < arm_highest_el(env)) {
- return CP_ACCESS_TRAP_UNCATEGORIZED;
+ return CP_ACCESS_UNDEFINED;
}
return CP_ACCESS_OK;
@@ -2542,7 +2284,7 @@ static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
/* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_TRAP_EL1;
}
/* fall through */
case 1:
@@ -2583,7 +2325,7 @@ static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
* EL0 if EL0[PV]TEN is zero.
*/
if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_TRAP_EL1;
}
/* fall through */
@@ -2649,7 +2391,10 @@ static CPAccessResult gt_stimer_access(CPUARMState *env,
switch (arm_current_el(env)) {
case 1:
if (!arm_is_secure(env)) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_UNDEFINED;
+ }
+ if (arm_is_el2_enabled(env)) {
+ return CP_ACCESS_UNDEFINED;
}
if (!(env->cp15.scr_el3 & SCR_ST)) {
return CP_ACCESS_TRAP_EL3;
@@ -2657,7 +2402,7 @@ static CPAccessResult gt_stimer_access(CPUARMState *env,
return CP_ACCESS_OK;
case 0:
case 2:
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_UNDEFINED;
case 3:
return CP_ACCESS_OK;
default:
@@ -2665,6 +2410,45 @@ static CPAccessResult gt_stimer_access(CPUARMState *env,
}
}
+static CPAccessResult gt_sel2timer_access(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /*
+ * The AArch64 register view of the secure EL2 timers are mostly
+ * accessible from EL3 and EL2 although can also be trapped to EL2
+ * from EL1 depending on nested virt config.
+ */
+ switch (arm_current_el(env)) {
+ case 0: /* UNDEFINED */
+ return CP_ACCESS_UNDEFINED;
+ case 1:
+ if (!arm_is_secure(env)) {
+ /* UNDEFINED */
+ return CP_ACCESS_UNDEFINED;
+ } else if (arm_hcr_el2_eff(env) & HCR_NV) {
+ /* Aarch64.SystemAccessTrap(EL2, 0x18) */
+ return CP_ACCESS_TRAP_EL2;
+ }
+ /* UNDEFINED */
+ return CP_ACCESS_UNDEFINED;
+ case 2:
+ if (!arm_is_secure(env)) {
+ /* UNDEFINED */
+ return CP_ACCESS_UNDEFINED;
+ }
+ return CP_ACCESS_OK;
+ case 3:
+ if (env->cp15.scr_el3 & SCR_EEL2) {
+ return CP_ACCESS_OK;
+ } else {
+ return CP_ACCESS_UNDEFINED;
+ }
+ default:
+ g_assert_not_reached();
+ }
+}
+
uint64_t gt_get_countervalue(CPUARMState *env)
{
ARMCPU *cpu = env_archcpu(env);
@@ -2716,12 +2500,80 @@ static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env)
return 0;
}
-static uint64_t gt_phys_cnt_offset(CPUARMState *env)
+static uint64_t gt_indirect_access_timer_offset(CPUARMState *env, int timeridx)
{
- if (arm_current_el(env) >= 2) {
+ /*
+ * Return the timer offset to use for indirect accesses to the timer.
+ * This is the Offset value as defined in D12.2.4.1 "Operation of the
+ * CompareValue views of the timers".
+ *
+ * The condition here is not always the same as the condition for
+ * whether to apply an offset register when doing a direct read of
+ * the counter sysreg; those conditions are described in the
+ * access pseudocode for each counter register.
+ */
+ switch (timeridx) {
+ case GTIMER_PHYS:
+ return gt_phys_raw_cnt_offset(env);
+ case GTIMER_VIRT:
+ return env->cp15.cntvoff_el2;
+ case GTIMER_HYP:
+ case GTIMER_SEC:
+ case GTIMER_HYPVIRT:
+ case GTIMER_S_EL2_PHYS:
+ case GTIMER_S_EL2_VIRT:
+ return 0;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx)
+{
+ /*
+ * Return the timer offset to use for direct accesses to the
+ * counter registers CNTPCT and CNTVCT, and for direct accesses
+ * to the CNT*_TVAL registers.
+ *
+ * This isn't exactly the same as the indirect-access offset,
+ * because here we also care about what EL the register access
+ * is being made from.
+ *
+ * This corresponds to the access pseudocode for the registers.
+ */
+ uint64_t hcr;
+
+ switch (timeridx) {
+ case GTIMER_PHYS:
+ if (arm_current_el(env) >= 2) {
+ return 0;
+ }
+ return gt_phys_raw_cnt_offset(env);
+ case GTIMER_VIRT:
+ switch (arm_current_el(env)) {
+ case 2:
+ hcr = arm_hcr_el2_eff(env);
+ if (hcr & HCR_E2H) {
+ return 0;
+ }
+ break;
+ case 0:
+ hcr = arm_hcr_el2_eff(env);
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
+ return 0;
+ }
+ break;
+ }
+ return env->cp15.cntvoff_el2;
+ case GTIMER_HYP:
+ case GTIMER_SEC:
+ case GTIMER_HYPVIRT:
+ case GTIMER_S_EL2_PHYS:
+ case GTIMER_S_EL2_VIRT:
return 0;
+ default:
+ g_assert_not_reached();
}
- return gt_phys_raw_cnt_offset(env);
}
static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
@@ -2733,8 +2585,7 @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
* Timer enabled: calculate and set current ISTATUS, irq, and
* reset timer to when ISTATUS next has to change
*/
- uint64_t offset = timeridx == GTIMER_VIRT ?
- cpu->env.cp15.cntvoff_el2 : gt_phys_raw_cnt_offset(&cpu->env);
+ uint64_t offset = gt_indirect_access_timer_offset(&cpu->env, timeridx);
uint64_t count = gt_get_countervalue(&cpu->env);
/* Note that this must be unsigned 64 bit arithmetic: */
int istatus = count - offset >= gt->cval;
@@ -2797,34 +2648,14 @@ static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
- return gt_get_countervalue(env) - gt_phys_cnt_offset(env);
-}
-
-uint64_t gt_virt_cnt_offset(CPUARMState *env)
-{
- uint64_t hcr;
-
- switch (arm_current_el(env)) {
- case 2:
- hcr = arm_hcr_el2_eff(env);
- if (hcr & HCR_E2H) {
- return 0;
- }
- break;
- case 0:
- hcr = arm_hcr_el2_eff(env);
- if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
- return 0;
- }
- break;
- }
-
- return env->cp15.cntvoff_el2;
+ uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_PHYS);
+ return gt_get_countervalue(env) - offset;
}
static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
- return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
+ uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_VIRT);
+ return gt_get_countervalue(env) - offset;
}
static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -2836,47 +2667,38 @@ static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
gt_recalc_timer(env_archcpu(env), timeridx);
}
-static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
- int timeridx)
+static uint64_t do_tval_read(CPUARMState *env, int timeridx, uint64_t offset)
{
- uint64_t offset = 0;
-
- switch (timeridx) {
- case GTIMER_VIRT:
- case GTIMER_HYPVIRT:
- offset = gt_virt_cnt_offset(env);
- break;
- case GTIMER_PHYS:
- offset = gt_phys_cnt_offset(env);
- break;
- }
-
return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
(gt_get_countervalue(env) - offset));
}
-static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
- int timeridx,
- uint64_t value)
+static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx)
{
- uint64_t offset = 0;
+ uint64_t offset = gt_direct_access_timer_offset(env, timeridx);
- switch (timeridx) {
- case GTIMER_VIRT:
- case GTIMER_HYPVIRT:
- offset = gt_virt_cnt_offset(env);
- break;
- case GTIMER_PHYS:
- offset = gt_phys_cnt_offset(env);
- break;
- }
+ return do_tval_read(env, timeridx, offset);
+}
+static void do_tval_write(CPUARMState *env, int timeridx, uint64_t value,
+ uint64_t offset)
+{
trace_arm_gt_tval_write(timeridx, value);
env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
sextract64(value, 0, 32);
gt_recalc_timer(env_archcpu(env), timeridx);
}
+static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx,
+ uint64_t value)
+{
+ uint64_t offset = gt_direct_access_timer_offset(env, timeridx);
+
+ do_tval_write(env, timeridx, value, offset);
+}
+
static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
int timeridx,
uint64_t value)
@@ -3006,13 +2828,21 @@ static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
- return gt_tval_read(env, ri, GTIMER_VIRT);
+ /*
+ * This is CNTV_TVAL_EL02; unlike the underlying CNTV_TVAL_EL0
+ * we always apply CNTVOFF_EL2. Special case that here rather
+ * than going into the generic gt_tval_read() and then having
+ * to re-detect that it's this register.
+ * Note that the accessfn/perms mean we know we're at EL2 or EL3 here.
+ */
+ return do_tval_read(env, GTIMER_VIRT, env->cp15.cntvoff_el2);
}
static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- gt_tval_write(env, ri, GTIMER_VIRT, value);
+ /* Similarly for writes to CNTV_TVAL_EL02 */
+ do_tval_write(env, GTIMER_VIRT, value, env->cp15.cntvoff_el2);
}
static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3172,6 +3002,62 @@ static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
gt_ctl_write(env, ri, GTIMER_SEC, value);
}
+static void gt_sec_pel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_S_EL2_PHYS);
+}
+
+static void gt_sec_pel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_S_EL2_PHYS, value);
+}
+
+static uint64_t gt_sec_pel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_S_EL2_PHYS);
+}
+
+static void gt_sec_pel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_S_EL2_PHYS, value);
+}
+
+static void gt_sec_pel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_S_EL2_PHYS, value);
+}
+
+static void gt_sec_vel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_S_EL2_VIRT);
+}
+
+static void gt_sec_vel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_S_EL2_VIRT, value);
+}
+
+static uint64_t gt_sec_vel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_S_EL2_VIRT);
+}
+
+static void gt_sec_vel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_S_EL2_VIRT, value);
+}
+
+static void gt_sec_vel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_S_EL2_VIRT, value);
+}
+
static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
{
gt_timer_reset(env, ri, GTIMER_HYPVIRT);
@@ -3228,6 +3114,20 @@ void arm_gt_stimer_cb(void *opaque)
gt_recalc_timer(cpu, GTIMER_SEC);
}
+void arm_gt_sel2timer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ gt_recalc_timer(cpu, GTIMER_S_EL2_PHYS);
+}
+
+void arm_gt_sel2vtimer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ gt_recalc_timer(cpu, GTIMER_S_EL2_VIRT);
+}
+
void arm_gt_hvtimer_cb(void *opaque)
{
ARMCPU *cpu = opaque;
@@ -3568,7 +3468,7 @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
}
return CP_ACCESS_TRAP_EL3;
}
- return CP_ACCESS_TRAP_UNCATEGORIZED;
+ return CP_ACCESS_UNDEFINED;
}
}
return CP_ACCESS_OK;
@@ -3599,11 +3499,12 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
GetPhysAddrResult res = {};
/*
- * I_MXTJT: Granule protection checks are not performed on the final address
- * of a successful translation.
+ * I_MXTJT: Granule protection checks are not performed on the final
+ * address of a successful translation. This is a translation not a
+ * memory reference, so "memop = none = 0".
*/
- ret = get_phys_addr_with_space_nogpc(env, value, access_type, mmu_idx, ss,
- &res, &fi);
+ ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0,
+ mmu_idx, ss, &res, &fi);
/*
* ATS operations only do S1 or S1+S2 translations, so we never
@@ -3775,7 +3676,11 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
switch (el) {
case 3:
- mmu_idx = ARMMMUIdx_E3;
+ if (ri->crm == 9 && arm_pan_enabled(env)) {
+ mmu_idx = ARMMMUIdx_E30_3_PAN;
+ } else {
+ mmu_idx = ARMMMUIdx_E3;
+ }
break;
case 2:
g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
@@ -3795,7 +3700,7 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* stage 1 current state PL0: ATS1CUR, ATS1CUW */
switch (el) {
case 3:
- mmu_idx = ARMMMUIdx_E10_0;
+ mmu_idx = ARMMMUIdx_E30_0;
break;
case 2:
g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
@@ -3860,7 +3765,7 @@ static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri,
* scr_write() ensures that the NSE bit is not set otherwise.
*/
if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_UNDEFINED;
}
return CP_ACCESS_OK;
}
@@ -3870,7 +3775,7 @@ static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
{
if (arm_current_el(env) == 3 &&
!(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_UNDEFINED;
}
return at_e012_access(env, ri, isread);
}
@@ -4758,7 +4663,7 @@ static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_TRAP_EL1;
}
return CP_ACCESS_OK;
}
@@ -4848,9 +4753,9 @@ static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
/* Cache invalidate/clean to Point of Coherency or Persistence... */
switch (arm_current_el(env)) {
case 0:
- /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
+ /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set. */
if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_TRAP_EL1;
}
/* fall through */
case 1:
@@ -4868,9 +4773,9 @@ static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags)
/* Cache invalidate/clean to Point of Unification... */
switch (arm_current_el(env)) {
case 0:
- /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
+ /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set. */
if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_TRAP_EL1;
}
/* fall through */
case 1:
@@ -4895,489 +4800,6 @@ static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri,
return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU);
}
-/*
- * See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
- * Page D4-1736 (DDI0487A.b)
- */
-
-static int vae1_tlbmask(CPUARMState *env)
-{
- uint64_t hcr = arm_hcr_el2_eff(env);
- uint16_t mask;
-
- if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
- mask = ARMMMUIdxBit_E20_2 |
- ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E20_0;
- } else {
- mask = ARMMMUIdxBit_E10_1 |
- ARMMMUIdxBit_E10_1_PAN |
- ARMMMUIdxBit_E10_0;
- }
- return mask;
-}
-
-static int vae2_tlbmask(CPUARMState *env)
-{
- uint64_t hcr = arm_hcr_el2_eff(env);
- uint16_t mask;
-
- if (hcr & HCR_E2H) {
- mask = ARMMMUIdxBit_E20_2 |
- ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E20_0;
- } else {
- mask = ARMMMUIdxBit_E2;
- }
- return mask;
-}
-
-/* Return 56 if TBI is enabled, 64 otherwise. */
-static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
- uint64_t addr)
-{
- uint64_t tcr = regime_tcr(env, mmu_idx);
- int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
- int select = extract64(addr, 55, 1);
-
- return (tbi >> select) & 1 ? 56 : 64;
-}
-
-static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
-{
- uint64_t hcr = arm_hcr_el2_eff(env);
- ARMMMUIdx mmu_idx;
-
- /* Only the regime of the mmu_idx below is significant. */
- if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
- mmu_idx = ARMMMUIdx_E20_0;
- } else {
- mmu_idx = ARMMMUIdx_E10_0;
- }
-
- return tlbbits_for_regime(env, mmu_idx, addr);
-}
-
-static int vae2_tlbbits(CPUARMState *env, uint64_t addr)
-{
- uint64_t hcr = arm_hcr_el2_eff(env);
- ARMMMUIdx mmu_idx;
-
- /*
- * Only the regime of the mmu_idx below is significant.
- * Regime EL2&0 has two ranges with separate TBI configuration, while EL2
- * only has one.
- */
- if (hcr & HCR_E2H) {
- mmu_idx = ARMMMUIdx_E20_2;
- } else {
- mmu_idx = ARMMMUIdx_E2;
- }
-
- return tlbbits_for_regime(env, mmu_idx, addr);
-}
-
-static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- int mask = vae1_tlbmask(env);
-
- tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
-}
-
-static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- int mask = vae1_tlbmask(env);
-
- if (tlb_force_broadcast(env)) {
- tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
- } else {
- tlb_flush_by_mmuidx(cs, mask);
- }
-}
-
-static int e2_tlbmask(CPUARMState *env)
-{
- return (ARMMMUIdxBit_E20_0 |
- ARMMMUIdxBit_E20_2 |
- ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E2);
-}
-
-static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- int mask = alle1_tlbmask(env);
-
- tlb_flush_by_mmuidx(cs, mask);
-}
-
-static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- int mask = e2_tlbmask(env);
-
- tlb_flush_by_mmuidx(cs, mask);
-}
-
-static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- ARMCPU *cpu = env_archcpu(env);
- CPUState *cs = CPU(cpu);
-
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
-}
-
-static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- int mask = alle1_tlbmask(env);
-
- tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
-}
-
-static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- int mask = e2_tlbmask(env);
-
- tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
-}
-
-static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
-
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
-}
-
-static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /*
- * Invalidate by VA, EL2
- * Currently handles both VAE2 and VALE2, since we don't support
- * flush-last-level-only.
- */
- CPUState *cs = env_cpu(env);
- int mask = vae2_tlbmask(env);
- uint64_t pageaddr = sextract64(value << 12, 0, 56);
- int bits = vae2_tlbbits(env, pageaddr);
-
- tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
-}
-
-static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /*
- * Invalidate by VA, EL3
- * Currently handles both VAE3 and VALE3, since we don't support
- * flush-last-level-only.
- */
- ARMCPU *cpu = env_archcpu(env);
- CPUState *cs = CPU(cpu);
- uint64_t pageaddr = sextract64(value << 12, 0, 56);
-
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
-}
-
-static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- int mask = vae1_tlbmask(env);
- uint64_t pageaddr = sextract64(value << 12, 0, 56);
- int bits = vae1_tlbbits(env, pageaddr);
-
- tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
-}
-
-static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /*
- * Invalidate by VA, EL1&0 (AArch64 version).
- * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
- * since we don't support flush-for-specific-ASID-only or
- * flush-last-level-only.
- */
- CPUState *cs = env_cpu(env);
- int mask = vae1_tlbmask(env);
- uint64_t pageaddr = sextract64(value << 12, 0, 56);
- int bits = vae1_tlbbits(env, pageaddr);
-
- if (tlb_force_broadcast(env)) {
- tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
- } else {
- tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
- }
-}
-
-static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- int mask = vae2_tlbmask(env);
- uint64_t pageaddr = sextract64(value << 12, 0, 56);
- int bits = vae2_tlbbits(env, pageaddr);
-
- tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
-}
-
-static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- uint64_t pageaddr = sextract64(value << 12, 0, 56);
- int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
-
- tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
- ARMMMUIdxBit_E3, bits);
-}
-
-static int ipas2e1_tlbmask(CPUARMState *env, int64_t value)
-{
- /*
- * The MSB of value is the NS field, which only applies if SEL2
- * is implemented and SCR_EL3.NS is not set (i.e. in secure mode).
- */
- return (value >= 0
- && cpu_isar_feature(aa64_sel2, env_archcpu(env))
- && arm_is_secure_below_el3(env)
- ? ARMMMUIdxBit_Stage2_S
- : ARMMMUIdxBit_Stage2);
-}
-
-static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- int mask = ipas2e1_tlbmask(env, value);
- uint64_t pageaddr = sextract64(value << 12, 0, 56);
-
- if (tlb_force_broadcast(env)) {
- tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
- } else {
- tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
- }
-}
-
-static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
- int mask = ipas2e1_tlbmask(env, value);
- uint64_t pageaddr = sextract64(value << 12, 0, 56);
-
- tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
-}
-
-#ifdef TARGET_AARCH64
-typedef struct {
- uint64_t base;
- uint64_t length;
-} TLBIRange;
-
-static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg)
-{
- /*
- * Note that the TLBI range TG field encoding differs from both
- * TG0 and TG1 encodings.
- */
- switch (tg) {
- case 1:
- return Gran4K;
- case 2:
- return Gran16K;
- case 3:
- return Gran64K;
- default:
- return GranInvalid;
- }
-}
-
-static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
- uint64_t value)
-{
- unsigned int page_size_granule, page_shift, num, scale, exponent;
- /* Extract one bit to represent the va selector in use. */
- uint64_t select = sextract64(value, 36, 1);
- ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true, false);
- TLBIRange ret = { };
- ARMGranuleSize gran;
-
- page_size_granule = extract64(value, 46, 2);
- gran = tlbi_range_tg_to_gran_size(page_size_granule);
-
- /* The granule encoded in value must match the granule in use. */
- if (gran != param.gran) {
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n",
- page_size_granule);
- return ret;
- }
-
- page_shift = arm_granule_bits(gran);
- num = extract64(value, 39, 5);
- scale = extract64(value, 44, 2);
- exponent = (5 * scale) + 1;
-
- ret.length = (num + 1) << (exponent + page_shift);
-
- if (param.select) {
- ret.base = sextract64(value, 0, 37);
- } else {
- ret.base = extract64(value, 0, 37);
- }
- if (param.ds) {
- /*
- * With DS=1, BaseADDR is always shifted 16 so that it is able
- * to address all 52 va bits. The input address is perforce
- * aligned on a 64k boundary regardless of translation granule.
- */
- page_shift = 16;
- }
- ret.base <<= page_shift;
-
- return ret;
-}
-
-static void do_rvae_write(CPUARMState *env, uint64_t value,
- int idxmap, bool synced)
-{
- ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
- TLBIRange range;
- int bits;
-
- range = tlbi_aa64_get_range(env, one_idx, value);
- bits = tlbbits_for_regime(env, one_idx, range.base);
-
- if (synced) {
- tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
- range.base,
- range.length,
- idxmap,
- bits);
- } else {
- tlb_flush_range_by_mmuidx(env_cpu(env), range.base,
- range.length, idxmap, bits);
- }
-}
-
-static void tlbi_aa64_rvae1_write(CPUARMState *env,
- const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /*
- * Invalidate by VA range, EL1&0.
- * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
- * since we don't support flush-for-specific-ASID-only or
- * flush-last-level-only.
- */
-
- do_rvae_write(env, value, vae1_tlbmask(env),
- tlb_force_broadcast(env));
-}
-
-static void tlbi_aa64_rvae1is_write(CPUARMState *env,
- const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /*
- * Invalidate by VA range, Inner/Outer Shareable EL1&0.
- * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
- * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
- * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
- * shareable specific flushes.
- */
-
- do_rvae_write(env, value, vae1_tlbmask(env), true);
-}
-
-static void tlbi_aa64_rvae2_write(CPUARMState *env,
- const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /*
- * Invalidate by VA range, EL2.
- * Currently handles all of RVAE2 and RVALE2,
- * since we don't support flush-for-specific-ASID-only or
- * flush-last-level-only.
- */
-
- do_rvae_write(env, value, vae2_tlbmask(env),
- tlb_force_broadcast(env));
-
-
-}
-
-static void tlbi_aa64_rvae2is_write(CPUARMState *env,
- const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /*
- * Invalidate by VA range, Inner/Outer Shareable, EL2.
- * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
- * since we don't support flush-for-specific-ASID-only,
- * flush-last-level-only or inner/outer shareable specific flushes.
- */
-
- do_rvae_write(env, value, vae2_tlbmask(env), true);
-
-}
-
-static void tlbi_aa64_rvae3_write(CPUARMState *env,
- const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /*
- * Invalidate by VA range, EL3.
- * Currently handles all of RVAE3 and RVALE3,
- * since we don't support flush-for-specific-ASID-only or
- * flush-last-level-only.
- */
-
- do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
-}
-
-static void tlbi_aa64_rvae3is_write(CPUARMState *env,
- const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /*
- * Invalidate by VA range, EL3, Inner/Outer Shareable.
- * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
- * since we don't support flush-for-specific-ASID-only,
- * flush-last-level-only or inner/outer specific flushes.
- */
-
- do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
-}
-
-static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- do_rvae_write(env, value, ipas2e1_tlbmask(env, value),
- tlb_force_broadcast(env));
-}
-
-static void tlbi_aa64_ripas2e1is_write(CPUARMState *env,
- const ARMCPRegInfo *ri,
- uint64_t value)
-{
- do_rvae_write(env, value, ipas2e1_tlbmask(env, value), true);
-}
-#endif
-
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -5393,7 +4815,7 @@ static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
}
} else {
if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_TRAP_EL1;
}
if (hcr & HCR_TDZ) {
return CP_ACCESS_TRAP_EL2;
@@ -5426,7 +4848,7 @@ static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
* Access to SP_EL0 is undefined if it's being used as
* the stack pointer.
*/
- return CP_ACCESS_TRAP_UNCATEGORIZED;
+ return CP_ACCESS_UNDEFINED;
}
return CP_ACCESS_OK;
}
@@ -5568,7 +4990,7 @@ static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
mmap_lock();
- tb_invalidate_phys_range(start_address, end_address);
+ tb_invalidate_phys_range(env_cpu(env), start_address, end_address);
mmap_unlock();
}
@@ -5590,7 +5012,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
{ .name = "FPCR", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
- .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
+ .access = PL0_RW, .type = ARM_CP_FPU,
.readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
{ .name = "FPSR", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
@@ -5672,99 +5094,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
.fgt = FGT_DCCISW,
.access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
- /* TLBI operations */
- { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
- .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIVMALLE1IS,
- .writefn = tlbi_aa64_vmalle1is_write },
- { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
- .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIVAE1IS,
- .writefn = tlbi_aa64_vae1is_write },
- { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
- .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIASIDE1IS,
- .writefn = tlbi_aa64_vmalle1is_write },
- { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
- .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIVAAE1IS,
- .writefn = tlbi_aa64_vae1is_write },
- { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
- .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIVALE1IS,
- .writefn = tlbi_aa64_vae1is_write },
- { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
- .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIVAALE1IS,
- .writefn = tlbi_aa64_vae1is_write },
- { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
- .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIVMALLE1,
- .writefn = tlbi_aa64_vmalle1_write },
- { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
- .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIVAE1,
- .writefn = tlbi_aa64_vae1_write },
- { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
- .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIASIDE1,
- .writefn = tlbi_aa64_vmalle1_write },
- { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
- .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIVAAE1,
- .writefn = tlbi_aa64_vae1_write },
- { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
- .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIVALE1,
- .writefn = tlbi_aa64_vae1_write },
- { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
- .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIVAALE1,
- .writefn = tlbi_aa64_vae1_write },
- { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_ipas2e1is_write },
- { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_ipas2e1is_write },
- { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_alle1is_write },
- { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_alle1is_write },
- { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_ipas2e1_write },
- { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_ipas2e1_write },
- { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_alle1_write },
- { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_alle1is_write },
#ifndef CONFIG_USER_ONLY
/* 64 bit address translation operations */
{ .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
@@ -5820,42 +5149,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
.writefn = par_write },
#endif
- /* TLB invalidate last level of translation table walk */
- { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
- .writefn = tlbimva_is_write },
- { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
- .writefn = tlbimvaa_is_write },
- { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
- .writefn = tlbimva_write },
- { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
- .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
- .writefn = tlbimvaa_write },
- { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
- .type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbimva_hyp_write },
- { .name = "TLBIMVALHIS",
- .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
- .type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbimva_hyp_is_write },
- { .name = "TLBIIPAS2",
- .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
- .type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbiipas2_hyp_write },
- { .name = "TLBIIPAS2IS",
- .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
- .type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbiipas2is_hyp_write },
- { .name = "TLBIIPAS2L",
- .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
- .type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbiipas2_hyp_write },
- { .name = "TLBIIPAS2LIS",
- .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
- .type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbiipas2is_hyp_write },
/* 32 bit cache operations */
{ .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
.type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab },
@@ -6038,6 +5331,11 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
/* Clear RES0 bits. */
value &= valid_mask;
+ /* RW is RAO/WI if EL1 is AArch64 only */
+ if (!cpu_isar_feature(aa64_aa32_el1, cpu)) {
+ value |= HCR_RW;
+ }
+
/*
* These bits change the MMU setup:
* HCR_VM enables stage 2 translation
@@ -6095,6 +5393,12 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
}
+static void hcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* hcr_write will set the RES1 bits on an AArch64-only CPU */
+ hcr_write(env, ri, 0);
+}
+
/*
* Return the effective value of HCR_EL2, at the given security state.
* Bits that are not included here:
@@ -6216,6 +5520,14 @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (cpu_isar_feature(aa64_nmi, cpu)) {
valid_mask |= HCRX_TALLINT | HCRX_VINMI | HCRX_VFNMI;
}
+ /* FEAT_CMOW adds CMOW */
+ if (cpu_isar_feature(aa64_cmow, cpu)) {
+ valid_mask |= HCRX_CMOW;
+ }
+ /* FEAT_XS adds FGTnXS, FnXS */
+ if (cpu_isar_feature(aa64_xs, cpu)) {
+ valid_mask |= HCRX_FGTNXS | HCRX_FNXS;
+ }
/* Clear RES0 bits. */
env->cp15.hcrx_el2 = value & valid_mask;
@@ -6322,6 +5634,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
.nv2_redirect_offset = 0x78,
+ .resetfn = hcr_reset,
.writefn = hcr_write, .raw_writefn = raw_write },
{ .name = "HCR", .state = ARM_CP_STATE_AA32,
.type = ARM_CP_ALIAS | ARM_CP_IO,
@@ -6437,50 +5750,6 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
{ .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
.access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
- { .name = "TLBIALLNSNH",
- .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
- .type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbiall_nsnh_write },
- { .name = "TLBIALLNSNHIS",
- .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
- .type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbiall_nsnh_is_write },
- { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
- .type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbiall_hyp_write },
- { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
- .type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbiall_hyp_is_write },
- { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
- .type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbimva_hyp_write },
- { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
- .type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbimva_hyp_is_write },
- { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_alle2_write },
- { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_vae2_write },
- { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_vae2_write },
- { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_alle2is_write },
- { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_vae2is_write },
- { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_vae2is_write },
#ifndef CONFIG_USER_ONLY
/*
* Unlike the other EL2-related AT operations, these must
@@ -6581,7 +5850,7 @@ static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
return CP_ACCESS_OK;
}
- return CP_ACCESS_TRAP_UNCATEGORIZED;
+ return CP_ACCESS_UNDEFINED;
}
static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
@@ -6595,6 +5864,56 @@ static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
.access = PL2_RW, .accessfn = sel2_access,
.nv2_redirect_offset = 0x48,
.fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
+#ifndef CONFIG_USER_ONLY
+ /* Secure EL2 Physical Timer */
+ { .name = "CNTHPS_TVAL_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
+ .accessfn = gt_sel2timer_access,
+ .readfn = gt_sec_pel2_tval_read,
+ .writefn = gt_sec_pel2_tval_write,
+ .resetfn = gt_sec_pel2_timer_reset,
+ },
+ { .name = "CNTHPS_CTL_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 1,
+ .type = ARM_CP_IO, .access = PL2_RW,
+ .accessfn = gt_sel2timer_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].ctl),
+ .resetvalue = 0,
+ .writefn = gt_sec_pel2_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTHPS_CVAL_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 2,
+ .type = ARM_CP_IO, .access = PL2_RW,
+ .accessfn = gt_sel2timer_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].cval),
+ .writefn = gt_sec_pel2_cval_write, .raw_writefn = raw_write,
+ },
+ /* Secure EL2 Virtual Timer */
+ { .name = "CNTHVS_TVAL_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
+ .accessfn = gt_sel2timer_access,
+ .readfn = gt_sec_vel2_tval_read,
+ .writefn = gt_sec_vel2_tval_write,
+ .resetfn = gt_sec_vel2_timer_reset,
+ },
+ { .name = "CNTHVS_CTL_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 1,
+ .type = ARM_CP_IO, .access = PL2_RW,
+ .accessfn = gt_sel2timer_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].ctl),
+ .resetvalue = 0,
+ .writefn = gt_sec_vel2_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTHVS_CVAL_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 2,
+ .type = ARM_CP_IO, .access = PL2_RW,
+ .accessfn = gt_sel2timer_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].cval),
+ .writefn = gt_sec_vel2_cval_write, .raw_writefn = raw_write,
+ },
+#endif
};
static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -6617,7 +5936,7 @@ static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
if (isread) {
return CP_ACCESS_OK;
}
- return CP_ACCESS_TRAP_UNCATEGORIZED;
+ return CP_ACCESS_UNDEFINED;
}
static const ARMCPRegInfo el3_cp_reginfo[] = {
@@ -6693,30 +6012,6 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
.opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
.access = PL3_RW, .type = ARM_CP_CONST,
.resetvalue = 0 },
- { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_alle3is_write },
- { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vae3is_write },
- { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vae3is_write },
- { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_alle3_write },
- { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vae3_write },
- { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vae3_write },
};
#ifndef CONFIG_USER_ONLY
@@ -6729,7 +6024,7 @@ static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
return CP_ACCESS_OK;
}
if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
- return CP_ACCESS_TRAP_UNCATEGORIZED;
+ return CP_ACCESS_UNDEFINED;
}
return CP_ACCESS_OK;
}
@@ -6827,7 +6122,7 @@ static CPAccessResult el2_e2h_e12_access(CPUARMState *env,
}
/* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */
if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
- return CP_ACCESS_TRAP_UNCATEGORIZED;
+ return CP_ACCESS_UNDEFINED;
}
if (ri->orig_accessfn) {
return ri->orig_accessfn(env, ri->opaque, isread);
@@ -7004,7 +6299,7 @@ static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
}
} else {
if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_TRAP_EL1;
}
if (hcr & HCR_TID2) {
return CP_ACCESS_TRAP_EL2;
@@ -7034,7 +6329,7 @@ static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
return CP_ACCESS_TRAP_EL2;
}
- if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) {
+ if (!arm_is_el3_or_mon(env) && (env->cp15.scr_el3 & SCR_TERR)) {
return CP_ACCESS_TRAP_EL3;
}
return CP_ACCESS_OK;
@@ -7232,7 +6527,7 @@ uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm)
if (el <= 1 && !el_is_in_host(env, el)) {
len = MIN(len, 0xf & (uint32_t)cr[1]);
}
- if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
+ if (el <= 2 && arm_is_el2_enabled(env)) {
len = MIN(len, 0xf & (uint32_t)cr[2]);
}
if (arm_feature(env, ARM_FEATURE_EL3)) {
@@ -7294,7 +6589,6 @@ static const ARMCPRegInfo zcr_reginfo[] = {
.writefn = zcr_write, .raw_writefn = raw_write },
};
-#ifdef TARGET_AARCH64
static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -7303,7 +6597,7 @@ static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
if (el == 0) {
uint64_t sctlr = arm_sctlr(env, el);
if (!(sctlr & SCTLR_EnTP2)) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_TRAP_EL1;
}
}
/* TODO: FEAT_FGT */
@@ -7344,7 +6638,7 @@ static void arm_reset_sve_state(CPUARMState *env)
memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs));
/* Recall that FFR is stored as pregs[16]. */
memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs));
- vfp_set_fpcr(env, 0x0800009f);
+ vfp_set_fpsr(env, 0x0800009f);
}
void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
@@ -7459,14 +6753,6 @@ static const ARMCPRegInfo sme_reginfo[] = {
.type = ARM_CP_CONST, .resetvalue = 0 },
};
-static void tlbi_aa64_paall_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
-
- tlb_flush(cs);
-}
-
static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -7484,14 +6770,6 @@ static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
env_archcpu(env)->reset_l0gptsz);
}
-static void tlbi_aa64_paallos_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = env_cpu(env);
-
- tlb_flush_all_cpus_synced(cs);
-}
-
static const ARMCPRegInfo rme_reginfo[] = {
{ .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6,
@@ -7503,28 +6781,6 @@ static const ARMCPRegInfo rme_reginfo[] = {
{ .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5,
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) },
- { .name = "TLBI_PAALL", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 4,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_paall_write },
- { .name = "TLBI_PAALLOS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 4,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_paallos_write },
- /*
- * QEMU does not have a way to invalidate by physical address, thus
- * invalidating a range of physical addresses is accomplished by
- * flushing all tlb entries in the outer shareable domain,
- * just like PAALLOS.
- */
- { .name = "TLBI_RPALOS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 7,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_paallos_write },
- { .name = "TLBI_RPAOS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 3,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_paallos_write },
{ .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1,
.access = PL3_W, .type = ARM_CP_NOP },
@@ -7566,7 +6822,6 @@ static const ARMCPRegInfo nmi_reginfo[] = {
.writefn = aa64_allint_write, .readfn = aa64_allint_read,
.resetfn = arm_cp_reset_ignore },
};
-#endif /* TARGET_AARCH64 */
static void define_pmu_regs(ARMCPU *cpu)
{
@@ -7719,8 +6974,8 @@ static CPAccessResult access_lor_other(CPUARMState *env,
const ARMCPRegInfo *ri, bool isread)
{
if (arm_is_secure_below_el3(env)) {
- /* Access denied in secure mode. */
- return CP_ACCESS_TRAP;
+ /* UNDEF if SCR_EL3.NS == 0 */
+ return CP_ACCESS_UNDEFINED;
}
return access_lor_ns(env, ri, isread);
}
@@ -7758,7 +7013,6 @@ static const ARMCPRegInfo lor_reginfo[] = {
.type = ARM_CP_CONST, .resetvalue = 0 },
};
-#ifdef TARGET_AARCH64
static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -7830,210 +7084,6 @@ static const ARMCPRegInfo pauth_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
};
-static const ARMCPRegInfo tlbirange_reginfo[] = {
- { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
- .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIRVAE1IS,
- .writefn = tlbi_aa64_rvae1is_write },
- { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
- .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIRVAAE1IS,
- .writefn = tlbi_aa64_rvae1is_write },
- { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
- .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIRVALE1IS,
- .writefn = tlbi_aa64_rvae1is_write },
- { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
- .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIRVAALE1IS,
- .writefn = tlbi_aa64_rvae1is_write },
- { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
- .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIRVAE1OS,
- .writefn = tlbi_aa64_rvae1is_write },
- { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
- .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIRVAAE1OS,
- .writefn = tlbi_aa64_rvae1is_write },
- { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
- .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIRVALE1OS,
- .writefn = tlbi_aa64_rvae1is_write },
- { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
- .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIRVAALE1OS,
- .writefn = tlbi_aa64_rvae1is_write },
- { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
- .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIRVAE1,
- .writefn = tlbi_aa64_rvae1_write },
- { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
- .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIRVAAE1,
- .writefn = tlbi_aa64_rvae1_write },
- { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
- .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIRVALE1,
- .writefn = tlbi_aa64_rvae1_write },
- { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
- .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIRVAALE1,
- .writefn = tlbi_aa64_rvae1_write },
- { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_ripas2e1is_write },
- { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_ripas2e1is_write },
- { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_rvae2is_write },
- { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_rvae2is_write },
- { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_ripas2e1_write },
- { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_ripas2e1_write },
- { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_rvae2is_write },
- { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_rvae2is_write },
- { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_rvae2_write },
- { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_rvae2_write },
- { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_rvae3is_write },
- { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_rvae3is_write },
- { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_rvae3is_write },
- { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_rvae3is_write },
- { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_rvae3_write },
- { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_rvae3_write },
-};
-
-static const ARMCPRegInfo tlbios_reginfo[] = {
- { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
- .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIVMALLE1OS,
- .writefn = tlbi_aa64_vmalle1is_write },
- { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1,
- .fgt = FGT_TLBIVAE1OS,
- .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vae1is_write },
- { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
- .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIASIDE1OS,
- .writefn = tlbi_aa64_vmalle1is_write },
- { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3,
- .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIVAAE1OS,
- .writefn = tlbi_aa64_vae1is_write },
- { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5,
- .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIVALE1OS,
- .writefn = tlbi_aa64_vae1is_write },
- { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7,
- .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
- .fgt = FGT_TLBIVAALE1OS,
- .writefn = tlbi_aa64_vae1is_write },
- { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_alle2is_write },
- { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_vae2is_write },
- { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_alle1is_write },
- { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = tlbi_aa64_vae2is_write },
- { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_alle1is_write },
- { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
- .access = PL2_W, .type = ARM_CP_NOP },
- { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
- .access = PL2_W, .type = ARM_CP_NOP },
- { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
- .access = PL2_W, .type = ARM_CP_NOP },
- { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
- .access = PL2_W, .type = ARM_CP_NOP },
- { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_alle3is_write },
- { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vae3is_write },
- { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
- .access = PL3_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vae3is_write },
-};
-
static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
{
Error *err = NULL;
@@ -8345,7 +7395,7 @@ static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
if (hcr & HCR_TGE) {
return CP_ACCESS_TRAP_EL2;
}
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_TRAP_EL1;
}
} else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
return CP_ACCESS_TRAP_EL2;
@@ -8455,8 +7505,6 @@ static const ARMCPRegInfo nv2_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) },
};
-#endif /* TARGET_AARCH64 */
-
static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -8465,7 +7513,7 @@ static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
if (el == 0) {
uint64_t sctlr = arm_sctlr(env, el);
if (!(sctlr & SCTLR_EnRCTX)) {
- return CP_ACCESS_TRAP;
+ return CP_ACCESS_TRAP_EL1;
}
} else if (el == 1) {
uint64_t hcr = arm_hcr_el2_eff(env);
@@ -8716,6 +7764,10 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, not_v8_cp_reginfo);
}
+#ifndef CONFIG_USER_ONLY
+ define_tlb_insn_regs(cpu);
+#endif
+
if (arm_feature(env, ARM_FEATURE_V6)) {
/* The ID registers all have impdef reset values */
ARMCPRegInfo v6_idregs[] = {
@@ -8821,10 +7873,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_V6K)) {
define_arm_cp_regs(cpu, v6k_cp_reginfo);
}
- if (arm_feature(env, ARM_FEATURE_V7MP) &&
- !arm_feature(env, ARM_FEATURE_PMSA)) {
- define_arm_cp_regs(cpu, v7mp_cp_reginfo);
- }
if (arm_feature(env, ARM_FEATURE_V7VE)) {
define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
}
@@ -9899,7 +8947,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
}
-#ifdef TARGET_AARCH64
if (cpu_isar_feature(aa64_sme, cpu)) {
define_arm_cp_regs(cpu, sme_reginfo);
}
@@ -9909,12 +8956,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (cpu_isar_feature(aa64_rndr, cpu)) {
define_arm_cp_regs(cpu, rndr_reginfo);
}
- if (cpu_isar_feature(aa64_tlbirange, cpu)) {
- define_arm_cp_regs(cpu, tlbirange_reginfo);
- }
- if (cpu_isar_feature(aa64_tlbios, cpu)) {
- define_arm_cp_regs(cpu, tlbios_reginfo);
- }
/* Data Cache clean instructions up to PoP */
if (cpu_isar_feature(aa64_dcpop, cpu)) {
define_one_arm_cp_reg(cpu, dcpop_reg);
@@ -9966,7 +9007,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (cpu_isar_feature(aa64_nmi, cpu)) {
define_arm_cp_regs(cpu, nmi_reginfo);
}
-#endif
if (cpu_isar_feature(any_predinv, cpu)) {
define_arm_cp_regs(cpu, predinv_reginfo);
@@ -10327,6 +9367,31 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
continue;
}
+ if ((r->type & ARM_CP_ADD_TLBI_NXS) &&
+ cpu_isar_feature(aa64_xs, cpu)) {
+ /*
+ * This is a TLBI insn which has an NXS variant. The
+ * NXS variant is at the same encoding except that
+ * crn is +1, and has the same behaviour except for
+ * fine-grained trapping. Add the NXS insn here and
+ * then fall through to add the normal register.
+ * add_cpreg_to_hashtable() copies the cpreg struct
+ * and name that it is passed, so it's OK to use
+ * a local struct here.
+ */
+ ARMCPRegInfo nxs_ri = *r;
+ g_autofree char *name = g_strdup_printf("%sNXS", r->name);
+
+ assert(state == ARM_CP_STATE_AA64);
+ assert(nxs_ri.crn < 0xf);
+ nxs_ri.crn++;
+ if (nxs_ri.fgt) {
+ nxs_ri.fgt |= R_FGT_NXS_MASK;
+ }
+ add_cpreg_to_hashtable(cpu, &nxs_ri, opaque, state,
+ ARM_CP_SECSTATE_NS,
+ crm, opc1, opc2, name);
+ }
if (state == ARM_CP_STATE_AA32) {
/*
* Under AArch32 CP registers can be common
@@ -10765,7 +9830,7 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
uint64_t hcr_el2;
if (arm_feature(env, ARM_FEATURE_EL3)) {
- rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
+ rw = arm_scr_rw_eff(env);
} else {
/*
* Either EL2 is the highest EL (and so the EL2 register width
@@ -10840,6 +9905,7 @@ void arm_log_exception(CPUState *cs)
[EXCP_NMI] = "NMI",
[EXCP_VINMI] = "Virtual IRQ NMI",
[EXCP_VFNMI] = "Virtual FIQ NMI",
+ [EXCP_MON_TRAP] = "Monitor Trap",
};
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
@@ -11406,6 +10472,16 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
mask = CPSR_A | CPSR_I | CPSR_F;
offset = 0;
break;
+ case EXCP_MON_TRAP:
+ new_mode = ARM_CPU_MODE_MON;
+ addr = 0x04;
+ mask = CPSR_A | CPSR_I | CPSR_F;
+ if (env->thumb) {
+ offset = 2;
+ } else {
+ offset = 4;
+ }
+ break;
default:
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
@@ -11539,7 +10615,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
unsigned int new_el = env->exception.target_el;
- target_ulong addr = env->cp15.vbar_el[new_el];
+ vaddr addr = env->cp15.vbar_el[new_el];
unsigned int new_mode = aarch64_pstate_mode(new_el, true);
unsigned int old_mode;
unsigned int cur_el = arm_current_el(env);
@@ -11563,7 +10639,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
switch (new_el) {
case 3:
- is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
+ is_aa64 = arm_scr_rw_eff(env);
break;
case 2:
hcr = arm_hcr_el2_eff(env);
@@ -11861,10 +10937,20 @@ void arm_cpu_do_interrupt(CPUState *cs)
uint64_t arm_sctlr(CPUARMState *env, int el)
{
- /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
+ /* Only EL0 needs to be adjusted for EL1&0 or EL2&0 or EL3&0 */
if (el == 0) {
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
- el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
+ switch (mmu_idx) {
+ case ARMMMUIdx_E20_0:
+ el = 2;
+ break;
+ case ARMMMUIdx_E30_0:
+ el = 3;
+ break;
+ default:
+ el = 1;
+ break;
+ }
}
return env->cp15.sctlr_el[el];
}
@@ -12128,289 +11214,6 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
};
}
-/*
- * Note that signed overflow is undefined in C. The following routines are
- * careful to use unsigned types where modulo arithmetic is required.
- * Failure to do so _will_ break on newer gcc.
- */
-
-/* Signed saturating arithmetic. */
-
-/* Perform 16-bit signed saturating addition. */
-static inline uint16_t add16_sat(uint16_t a, uint16_t b)
-{
- uint16_t res;
-
- res = a + b;
- if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
- if (a & 0x8000) {
- res = 0x8000;
- } else {
- res = 0x7fff;
- }
- }
- return res;
-}
-
-/* Perform 8-bit signed saturating addition. */
-static inline uint8_t add8_sat(uint8_t a, uint8_t b)
-{
- uint8_t res;
-
- res = a + b;
- if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
- if (a & 0x80) {
- res = 0x80;
- } else {
- res = 0x7f;
- }
- }
- return res;
-}
-
-/* Perform 16-bit signed saturating subtraction. */
-static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
-{
- uint16_t res;
-
- res = a - b;
- if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
- if (a & 0x8000) {
- res = 0x8000;
- } else {
- res = 0x7fff;
- }
- }
- return res;
-}
-
-/* Perform 8-bit signed saturating subtraction. */
-static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
-{
- uint8_t res;
-
- res = a - b;
- if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
- if (a & 0x80) {
- res = 0x80;
- } else {
- res = 0x7f;
- }
- }
- return res;
-}
-
-#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
-#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
-#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
-#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
-#define PFX q
-
-#include "op_addsub.h"
-
-/* Unsigned saturating arithmetic. */
-static inline uint16_t add16_usat(uint16_t a, uint16_t b)
-{
- uint16_t res;
- res = a + b;
- if (res < a) {
- res = 0xffff;
- }
- return res;
-}
-
-static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
-{
- if (a > b) {
- return a - b;
- } else {
- return 0;
- }
-}
-
-static inline uint8_t add8_usat(uint8_t a, uint8_t b)
-{
- uint8_t res;
- res = a + b;
- if (res < a) {
- res = 0xff;
- }
- return res;
-}
-
-static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
-{
- if (a > b) {
- return a - b;
- } else {
- return 0;
- }
-}
-
-#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
-#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
-#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
-#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
-#define PFX uq
-
-#include "op_addsub.h"
-
-/* Signed modulo arithmetic. */
-#define SARITH16(a, b, n, op) do { \
- int32_t sum; \
- sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
- RESULT(sum, n, 16); \
- if (sum >= 0) \
- ge |= 3 << (n * 2); \
- } while (0)
-
-#define SARITH8(a, b, n, op) do { \
- int32_t sum; \
- sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
- RESULT(sum, n, 8); \
- if (sum >= 0) \
- ge |= 1 << n; \
- } while (0)
-
-
-#define ADD16(a, b, n) SARITH16(a, b, n, +)
-#define SUB16(a, b, n) SARITH16(a, b, n, -)
-#define ADD8(a, b, n) SARITH8(a, b, n, +)
-#define SUB8(a, b, n) SARITH8(a, b, n, -)
-#define PFX s
-#define ARITH_GE
-
-#include "op_addsub.h"
-
-/* Unsigned modulo arithmetic. */
-#define ADD16(a, b, n) do { \
- uint32_t sum; \
- sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
- RESULT(sum, n, 16); \
- if ((sum >> 16) == 1) \
- ge |= 3 << (n * 2); \
- } while (0)
-
-#define ADD8(a, b, n) do { \
- uint32_t sum; \
- sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
- RESULT(sum, n, 8); \
- if ((sum >> 8) == 1) \
- ge |= 1 << n; \
- } while (0)
-
-#define SUB16(a, b, n) do { \
- uint32_t sum; \
- sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
- RESULT(sum, n, 16); \
- if ((sum >> 16) == 0) \
- ge |= 3 << (n * 2); \
- } while (0)
-
-#define SUB8(a, b, n) do { \
- uint32_t sum; \
- sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
- RESULT(sum, n, 8); \
- if ((sum >> 8) == 0) \
- ge |= 1 << n; \
- } while (0)
-
-#define PFX u
-#define ARITH_GE
-
-#include "op_addsub.h"
-
-/* Halved signed arithmetic. */
-#define ADD16(a, b, n) \
- RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
-#define SUB16(a, b, n) \
- RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
-#define ADD8(a, b, n) \
- RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
-#define SUB8(a, b, n) \
- RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
-#define PFX sh
-
-#include "op_addsub.h"
-
-/* Halved unsigned arithmetic. */
-#define ADD16(a, b, n) \
- RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
-#define SUB16(a, b, n) \
- RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
-#define ADD8(a, b, n) \
- RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
-#define SUB8(a, b, n) \
- RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
-#define PFX uh
-
-#include "op_addsub.h"
-
-static inline uint8_t do_usad(uint8_t a, uint8_t b)
-{
- if (a > b) {
- return a - b;
- } else {
- return b - a;
- }
-}
-
-/* Unsigned sum of absolute byte differences. */
-uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
-{
- uint32_t sum;
- sum = do_usad(a, b);
- sum += do_usad(a >> 8, b >> 8);
- sum += do_usad(a >> 16, b >> 16);
- sum += do_usad(a >> 24, b >> 24);
- return sum;
-}
-
-/* For ARMv6 SEL instruction. */
-uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
-{
- uint32_t mask;
-
- mask = 0;
- if (flags & 1) {
- mask |= 0xff;
- }
- if (flags & 2) {
- mask |= 0xff00;
- }
- if (flags & 4) {
- mask |= 0xff0000;
- }
- if (flags & 8) {
- mask |= 0xff000000;
- }
- return (a & mask) | (b & ~mask);
-}
-
-/*
- * CRC helpers.
- * The upper bytes of val (above the number specified by 'bytes') must have
- * been zeroed out by the caller.
- */
-uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
-{
- uint8_t buf[4];
-
- stl_le_p(buf, val);
-
- /* zlib crc32 converts the accumulator and output to one's complement. */
- return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
-}
-
-uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
-{
- uint8_t buf[4];
-
- stl_le_p(buf, val);
-
- /* Linux crc32c converts the output to one's complement. */
- return crc32c(acc, buf, bytes) ^ 0xffffffff;
-}
/*
* Return the exception level to which FP-disabled exceptions should
@@ -12532,6 +11335,7 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
switch (mmu_idx) {
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E30_0:
return 0;
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
@@ -12541,6 +11345,7 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
case ARMMMUIdx_E20_2_PAN:
return 2;
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E30_3_PAN:
return 3;
default:
g_assert_not_reached();
@@ -12569,6 +11374,9 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
hcr = arm_hcr_el2_eff(env);
if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
idx = ARMMMUIdx_E20_0;
+ } else if (arm_is_secure_below_el3(env) &&
+ !arm_el_is_aa64(env, 3)) {
+ idx = ARMMMUIdx_E30_0;
} else {
idx = ARMMMUIdx_E10_0;
}
@@ -12593,6 +11401,9 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
}
break;
case 3:
+ if (!arm_el_is_aa64(env, 3) && arm_pan_enabled(env)) {
+ return ARMMMUIdx_E30_3_PAN;
+ }
return ARMMMUIdx_E3;
default:
g_assert_not_reached();
@@ -12606,116 +11417,6 @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env)
return arm_mmu_idx_el(env, arm_current_el(env));
}
-static bool mve_no_pred(CPUARMState *env)
-{
- /*
- * Return true if there is definitely no predication of MVE
- * instructions by VPR or LTPSIZE. (Returning false even if there
- * isn't any predication is OK; generated code will just be
- * a little worse.)
- * If the CPU does not implement MVE then this TB flag is always 0.
- *
- * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
- * logic in gen_update_fp_context() needs to be updated to match.
- *
- * We do not include the effect of the ECI bits here -- they are
- * tracked in other TB flags. This simplifies the logic for
- * "when did we emit code that changes the MVE_NO_PRED TB flag
- * and thus need to end the TB?".
- */
- if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
- return false;
- }
- if (env->v7m.vpr) {
- return false;
- }
- if (env->v7m.ltpsize < 4) {
- return false;
- }
- return true;
-}
-
-void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
-{
- CPUARMTBFlags flags;
-
- assert_hflags_rebuild_correctly(env);
- flags = env->hflags;
-
- if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
- *pc = env->pc;
- if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
- DP_TBFLAG_A64(flags, BTYPE, env->btype);
- }
- } else {
- *pc = env->regs[15];
-
- if (arm_feature(env, ARM_FEATURE_M)) {
- if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
- FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
- != env->v7m.secure) {
- DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
- }
-
- if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
- (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
- (env->v7m.secure &&
- !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
- /*
- * ASPEN is set, but FPCA/SFPA indicate that there is no
- * active FP context; we must create a new FP context before
- * executing any FP insn.
- */
- DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
- }
-
- bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
- if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
- DP_TBFLAG_M32(flags, LSPACT, 1);
- }
-
- if (mve_no_pred(env)) {
- DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
- }
- } else {
- /*
- * Note that XSCALE_CPAR shares bits with VECSTRIDE.
- * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
- */
- if (arm_feature(env, ARM_FEATURE_XSCALE)) {
- DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
- } else {
- DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
- DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
- }
- if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
- DP_TBFLAG_A32(flags, VFPEN, 1);
- }
- }
-
- DP_TBFLAG_AM32(flags, THUMB, env->thumb);
- DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
- }
-
- /*
- * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
- * states defined in the ARM ARM for software singlestep:
- * SS_ACTIVE PSTATE.SS State
- * 0 x Inactive (the TB flag for SS is always 0)
- * 1 0 Active-pending
- * 1 1 Active-not-pending
- * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
- */
- if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
- DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
- }
-
- *pflags = flags.flags;
- *cs_base = flags.flags2;
-}
-
-#ifdef TARGET_AARCH64
/*
* The manual says that when SVE is enabled and VQ is widened the
* implementation is allowed to zero the previously inaccessible
@@ -12830,7 +11531,6 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
aarch64_sve_narrow_vq(env, new_len + 1);
}
}
-#endif
#ifndef CONFIG_USER_ONLY
ARMSecuritySpace arm_security_space(CPUARMState *env)
diff --git a/target/arm/helper.h b/target/arm/helper.h
index 970d059..f340a49 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -1,1103 +1,6 @@
-DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
-DEF_HELPER_FLAGS_1(uxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
+/* SPDX-License-Identifier: GPL-2.0-or-later */
-DEF_HELPER_3(add_setq, i32, env, i32, i32)
-DEF_HELPER_3(add_saturate, i32, env, i32, i32)
-DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
-DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
-DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(sdiv, TCG_CALL_NO_RWG, s32, env, s32, s32)
-DEF_HELPER_FLAGS_3(udiv, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32)
-
-#define PAS_OP(pfx) \
- DEF_HELPER_3(pfx ## add8, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## sub8, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## sub16, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## add16, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## addsubx, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## subaddx, i32, i32, i32, ptr)
-
-PAS_OP(s)
-PAS_OP(u)
-#undef PAS_OP
-
-#define PAS_OP(pfx) \
- DEF_HELPER_2(pfx ## add8, i32, i32, i32) \
- DEF_HELPER_2(pfx ## sub8, i32, i32, i32) \
- DEF_HELPER_2(pfx ## sub16, i32, i32, i32) \
- DEF_HELPER_2(pfx ## add16, i32, i32, i32) \
- DEF_HELPER_2(pfx ## addsubx, i32, i32, i32) \
- DEF_HELPER_2(pfx ## subaddx, i32, i32, i32)
-PAS_OP(q)
-PAS_OP(sh)
-PAS_OP(uq)
-PAS_OP(uh)
-#undef PAS_OP
-
-DEF_HELPER_3(ssat, i32, env, i32, i32)
-DEF_HELPER_3(usat, i32, env, i32, i32)
-DEF_HELPER_3(ssat16, i32, env, i32, i32)
-DEF_HELPER_3(usat16, i32, env, i32, i32)
-
-DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32)
-
-DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
- i32, i32, i32, i32)
-DEF_HELPER_2(exception_internal, noreturn, env, i32)
-DEF_HELPER_3(exception_with_syndrome, noreturn, env, i32, i32)
-DEF_HELPER_4(exception_with_syndrome_el, noreturn, env, i32, i32, i32)
-DEF_HELPER_2(exception_bkpt_insn, noreturn, env, i32)
-DEF_HELPER_2(exception_swstep, noreturn, env, i32)
-DEF_HELPER_2(exception_pc_alignment, noreturn, env, tl)
-DEF_HELPER_1(setend, void, env)
-DEF_HELPER_2(wfi, void, env, i32)
-DEF_HELPER_1(wfe, void, env)
-DEF_HELPER_2(wfit, void, env, i64)
-DEF_HELPER_1(yield, void, env)
-DEF_HELPER_1(pre_hvc, void, env)
-DEF_HELPER_2(pre_smc, void, env, i32)
-DEF_HELPER_1(vesb, void, env)
-
-DEF_HELPER_3(cpsr_write, void, env, i32, i32)
-DEF_HELPER_2(cpsr_write_eret, void, env, i32)
-DEF_HELPER_1(cpsr_read, i32, env)
-
-DEF_HELPER_3(v7m_msr, void, env, i32, i32)
-DEF_HELPER_2(v7m_mrs, i32, env, i32)
-
-DEF_HELPER_2(v7m_bxns, void, env, i32)
-DEF_HELPER_2(v7m_blxns, void, env, i32)
-
-DEF_HELPER_3(v7m_tt, i32, env, i32, i32)
-
-DEF_HELPER_1(v7m_preserve_fp_state, void, env)
-
-DEF_HELPER_2(v7m_vlstm, void, env, i32)
-DEF_HELPER_2(v7m_vlldm, void, env, i32)
-
-DEF_HELPER_2(v8m_stackcheck, void, env, i32)
-
-DEF_HELPER_FLAGS_2(check_bxj_trap, TCG_CALL_NO_WG, void, env, i32)
-
-DEF_HELPER_4(access_check_cp_reg, cptr, env, i32, i32, i32)
-DEF_HELPER_FLAGS_2(lookup_cp_reg, TCG_CALL_NO_RWG_SE, cptr, env, i32)
-DEF_HELPER_FLAGS_2(tidcp_el0, TCG_CALL_NO_WG, void, env, i32)
-DEF_HELPER_FLAGS_2(tidcp_el1, TCG_CALL_NO_WG, void, env, i32)
-DEF_HELPER_3(set_cp_reg, void, env, cptr, i32)
-DEF_HELPER_2(get_cp_reg, i32, env, cptr)
-DEF_HELPER_3(set_cp_reg64, void, env, cptr, i64)
-DEF_HELPER_2(get_cp_reg64, i64, env, cptr)
-
-DEF_HELPER_2(get_r13_banked, i32, env, i32)
-DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
-
-DEF_HELPER_3(mrs_banked, i32, env, i32, i32)
-DEF_HELPER_4(msr_banked, void, env, i32, i32, i32)
-
-DEF_HELPER_2(get_user_reg, i32, env, i32)
-DEF_HELPER_3(set_user_reg, void, env, i32, i32)
-
-DEF_HELPER_FLAGS_1(rebuild_hflags_m32_newel, TCG_CALL_NO_RWG, void, env)
-DEF_HELPER_FLAGS_2(rebuild_hflags_m32, TCG_CALL_NO_RWG, void, env, int)
-DEF_HELPER_FLAGS_1(rebuild_hflags_a32_newel, TCG_CALL_NO_RWG, void, env)
-DEF_HELPER_FLAGS_2(rebuild_hflags_a32, TCG_CALL_NO_RWG, void, env, int)
-DEF_HELPER_FLAGS_2(rebuild_hflags_a64, TCG_CALL_NO_RWG, void, env, int)
-
-DEF_HELPER_FLAGS_5(probe_access, TCG_CALL_NO_WG, void, env, tl, i32, i32, i32)
-
-DEF_HELPER_1(vfp_get_fpscr, i32, env)
-DEF_HELPER_2(vfp_set_fpscr, void, env, i32)
-
-DEF_HELPER_3(vfp_addh, f16, f16, f16, ptr)
-DEF_HELPER_3(vfp_adds, f32, f32, f32, ptr)
-DEF_HELPER_3(vfp_addd, f64, f64, f64, ptr)
-DEF_HELPER_3(vfp_subh, f16, f16, f16, ptr)
-DEF_HELPER_3(vfp_subs, f32, f32, f32, ptr)
-DEF_HELPER_3(vfp_subd, f64, f64, f64, ptr)
-DEF_HELPER_3(vfp_mulh, f16, f16, f16, ptr)
-DEF_HELPER_3(vfp_muls, f32, f32, f32, ptr)
-DEF_HELPER_3(vfp_muld, f64, f64, f64, ptr)
-DEF_HELPER_3(vfp_divh, f16, f16, f16, ptr)
-DEF_HELPER_3(vfp_divs, f32, f32, f32, ptr)
-DEF_HELPER_3(vfp_divd, f64, f64, f64, ptr)
-DEF_HELPER_3(vfp_maxh, f16, f16, f16, ptr)
-DEF_HELPER_3(vfp_maxs, f32, f32, f32, ptr)
-DEF_HELPER_3(vfp_maxd, f64, f64, f64, ptr)
-DEF_HELPER_3(vfp_minh, f16, f16, f16, ptr)
-DEF_HELPER_3(vfp_mins, f32, f32, f32, ptr)
-DEF_HELPER_3(vfp_mind, f64, f64, f64, ptr)
-DEF_HELPER_3(vfp_maxnumh, f16, f16, f16, ptr)
-DEF_HELPER_3(vfp_maxnums, f32, f32, f32, ptr)
-DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, ptr)
-DEF_HELPER_3(vfp_minnumh, f16, f16, f16, ptr)
-DEF_HELPER_3(vfp_minnums, f32, f32, f32, ptr)
-DEF_HELPER_3(vfp_minnumd, f64, f64, f64, ptr)
-DEF_HELPER_2(vfp_sqrth, f16, f16, env)
-DEF_HELPER_2(vfp_sqrts, f32, f32, env)
-DEF_HELPER_2(vfp_sqrtd, f64, f64, env)
-DEF_HELPER_3(vfp_cmph, void, f16, f16, env)
-DEF_HELPER_3(vfp_cmps, void, f32, f32, env)
-DEF_HELPER_3(vfp_cmpd, void, f64, f64, env)
-DEF_HELPER_3(vfp_cmpeh, void, f16, f16, env)
-DEF_HELPER_3(vfp_cmpes, void, f32, f32, env)
-DEF_HELPER_3(vfp_cmped, void, f64, f64, env)
-
-DEF_HELPER_2(vfp_fcvtds, f64, f32, env)
-DEF_HELPER_2(vfp_fcvtsd, f32, f64, env)
-DEF_HELPER_FLAGS_2(bfcvt, TCG_CALL_NO_RWG, i32, f32, ptr)
-DEF_HELPER_FLAGS_2(bfcvt_pair, TCG_CALL_NO_RWG, i32, i64, ptr)
-
-DEF_HELPER_2(vfp_uitoh, f16, i32, ptr)
-DEF_HELPER_2(vfp_uitos, f32, i32, ptr)
-DEF_HELPER_2(vfp_uitod, f64, i32, ptr)
-DEF_HELPER_2(vfp_sitoh, f16, i32, ptr)
-DEF_HELPER_2(vfp_sitos, f32, i32, ptr)
-DEF_HELPER_2(vfp_sitod, f64, i32, ptr)
-
-DEF_HELPER_2(vfp_touih, i32, f16, ptr)
-DEF_HELPER_2(vfp_touis, i32, f32, ptr)
-DEF_HELPER_2(vfp_touid, i32, f64, ptr)
-DEF_HELPER_2(vfp_touizh, i32, f16, ptr)
-DEF_HELPER_2(vfp_touizs, i32, f32, ptr)
-DEF_HELPER_2(vfp_touizd, i32, f64, ptr)
-DEF_HELPER_2(vfp_tosih, s32, f16, ptr)
-DEF_HELPER_2(vfp_tosis, s32, f32, ptr)
-DEF_HELPER_2(vfp_tosid, s32, f64, ptr)
-DEF_HELPER_2(vfp_tosizh, s32, f16, ptr)
-DEF_HELPER_2(vfp_tosizs, s32, f32, ptr)
-DEF_HELPER_2(vfp_tosizd, s32, f64, ptr)
-
-DEF_HELPER_3(vfp_toshh_round_to_zero, i32, f16, i32, ptr)
-DEF_HELPER_3(vfp_toslh_round_to_zero, i32, f16, i32, ptr)
-DEF_HELPER_3(vfp_touhh_round_to_zero, i32, f16, i32, ptr)
-DEF_HELPER_3(vfp_toulh_round_to_zero, i32, f16, i32, ptr)
-DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, ptr)
-DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, ptr)
-DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, ptr)
-DEF_HELPER_3(vfp_touls_round_to_zero, i32, f32, i32, ptr)
-DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, ptr)
-DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, ptr)
-DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, ptr)
-DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, ptr)
-DEF_HELPER_3(vfp_touhh, i32, f16, i32, ptr)
-DEF_HELPER_3(vfp_toshh, i32, f16, i32, ptr)
-DEF_HELPER_3(vfp_toulh, i32, f16, i32, ptr)
-DEF_HELPER_3(vfp_toslh, i32, f16, i32, ptr)
-DEF_HELPER_3(vfp_touqh, i64, f16, i32, ptr)
-DEF_HELPER_3(vfp_tosqh, i64, f16, i32, ptr)
-DEF_HELPER_3(vfp_toshs, i32, f32, i32, ptr)
-DEF_HELPER_3(vfp_tosls, i32, f32, i32, ptr)
-DEF_HELPER_3(vfp_tosqs, i64, f32, i32, ptr)
-DEF_HELPER_3(vfp_touhs, i32, f32, i32, ptr)
-DEF_HELPER_3(vfp_touls, i32, f32, i32, ptr)
-DEF_HELPER_3(vfp_touqs, i64, f32, i32, ptr)
-DEF_HELPER_3(vfp_toshd, i64, f64, i32, ptr)
-DEF_HELPER_3(vfp_tosld, i64, f64, i32, ptr)
-DEF_HELPER_3(vfp_tosqd, i64, f64, i32, ptr)
-DEF_HELPER_3(vfp_touhd, i64, f64, i32, ptr)
-DEF_HELPER_3(vfp_tould, i64, f64, i32, ptr)
-DEF_HELPER_3(vfp_touqd, i64, f64, i32, ptr)
-DEF_HELPER_3(vfp_shtos, f32, i32, i32, ptr)
-DEF_HELPER_3(vfp_sltos, f32, i32, i32, ptr)
-DEF_HELPER_3(vfp_sqtos, f32, i64, i32, ptr)
-DEF_HELPER_3(vfp_uhtos, f32, i32, i32, ptr)
-DEF_HELPER_3(vfp_ultos, f32, i32, i32, ptr)
-DEF_HELPER_3(vfp_uqtos, f32, i64, i32, ptr)
-DEF_HELPER_3(vfp_shtod, f64, i64, i32, ptr)
-DEF_HELPER_3(vfp_sltod, f64, i64, i32, ptr)
-DEF_HELPER_3(vfp_sqtod, f64, i64, i32, ptr)
-DEF_HELPER_3(vfp_uhtod, f64, i64, i32, ptr)
-DEF_HELPER_3(vfp_ultod, f64, i64, i32, ptr)
-DEF_HELPER_3(vfp_uqtod, f64, i64, i32, ptr)
-DEF_HELPER_3(vfp_shtoh, f16, i32, i32, ptr)
-DEF_HELPER_3(vfp_uhtoh, f16, i32, i32, ptr)
-DEF_HELPER_3(vfp_sltoh, f16, i32, i32, ptr)
-DEF_HELPER_3(vfp_ultoh, f16, i32, i32, ptr)
-DEF_HELPER_3(vfp_sqtoh, f16, i64, i32, ptr)
-DEF_HELPER_3(vfp_uqtoh, f16, i64, i32, ptr)
-
-DEF_HELPER_3(vfp_shtos_round_to_nearest, f32, i32, i32, ptr)
-DEF_HELPER_3(vfp_sltos_round_to_nearest, f32, i32, i32, ptr)
-DEF_HELPER_3(vfp_uhtos_round_to_nearest, f32, i32, i32, ptr)
-DEF_HELPER_3(vfp_ultos_round_to_nearest, f32, i32, i32, ptr)
-DEF_HELPER_3(vfp_shtod_round_to_nearest, f64, i64, i32, ptr)
-DEF_HELPER_3(vfp_sltod_round_to_nearest, f64, i64, i32, ptr)
-DEF_HELPER_3(vfp_uhtod_round_to_nearest, f64, i64, i32, ptr)
-DEF_HELPER_3(vfp_ultod_round_to_nearest, f64, i64, i32, ptr)
-DEF_HELPER_3(vfp_shtoh_round_to_nearest, f16, i32, i32, ptr)
-DEF_HELPER_3(vfp_uhtoh_round_to_nearest, f16, i32, i32, ptr)
-DEF_HELPER_3(vfp_sltoh_round_to_nearest, f16, i32, i32, ptr)
-DEF_HELPER_3(vfp_ultoh_round_to_nearest, f16, i32, i32, ptr)
-
-DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, ptr)
-
-DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f32, TCG_CALL_NO_RWG, f32, f16, ptr, i32)
-DEF_HELPER_FLAGS_3(vfp_fcvt_f32_to_f16, TCG_CALL_NO_RWG, f16, f32, ptr, i32)
-DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, f16, ptr, i32)
-DEF_HELPER_FLAGS_3(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, f16, f64, ptr, i32)
-
-DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr)
-DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr)
-DEF_HELPER_4(vfp_muladdh, f16, f16, f16, f16, ptr)
-
-DEF_HELPER_FLAGS_2(recpe_f16, TCG_CALL_NO_RWG, f16, f16, ptr)
-DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, ptr)
-DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, ptr)
-DEF_HELPER_FLAGS_2(rsqrte_f16, TCG_CALL_NO_RWG, f16, f16, ptr)
-DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, ptr)
-DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, ptr)
-DEF_HELPER_FLAGS_1(recpe_u32, TCG_CALL_NO_RWG, i32, i32)
-DEF_HELPER_FLAGS_1(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32)
-DEF_HELPER_FLAGS_4(neon_tbl, TCG_CALL_NO_RWG, i64, env, i32, i64, i64)
-
-DEF_HELPER_3(shl_cc, i32, env, i32, i32)
-DEF_HELPER_3(shr_cc, i32, env, i32, i32)
-DEF_HELPER_3(sar_cc, i32, env, i32, i32)
-DEF_HELPER_3(ror_cc, i32, env, i32, i32)
-
-DEF_HELPER_FLAGS_2(rinth_exact, TCG_CALL_NO_RWG, f16, f16, ptr)
-DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, ptr)
-DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, ptr)
-DEF_HELPER_FLAGS_2(rinth, TCG_CALL_NO_RWG, f16, f16, ptr)
-DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, ptr)
-DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, ptr)
-
-DEF_HELPER_FLAGS_2(vjcvt, TCG_CALL_NO_RWG, i32, f64, env)
-DEF_HELPER_FLAGS_2(fjcvtzs, TCG_CALL_NO_RWG, i64, f64, ptr)
-
-DEF_HELPER_FLAGS_3(check_hcr_el2_trap, TCG_CALL_NO_WG, void, env, i32, i32)
-
-/* neon_helper.c */
-DEF_HELPER_2(neon_pmin_u8, i32, i32, i32)
-DEF_HELPER_2(neon_pmin_s8, i32, i32, i32)
-DEF_HELPER_2(neon_pmin_u16, i32, i32, i32)
-DEF_HELPER_2(neon_pmin_s16, i32, i32, i32)
-DEF_HELPER_2(neon_pmax_u8, i32, i32, i32)
-DEF_HELPER_2(neon_pmax_s8, i32, i32, i32)
-DEF_HELPER_2(neon_pmax_u16, i32, i32, i32)
-DEF_HELPER_2(neon_pmax_s16, i32, i32, i32)
-
-DEF_HELPER_2(neon_shl_u16, i32, i32, i32)
-DEF_HELPER_2(neon_shl_s16, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_u8, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_s8, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_u16, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_s16, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_u32, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_s32, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_u64, i64, i64, i64)
-DEF_HELPER_2(neon_rshl_s64, i64, i64, i64)
-DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64)
-DEF_HELPER_FLAGS_5(neon_sqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_uqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_uqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_uqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_uqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_uqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_uqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_uqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_uqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_srshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_srshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_srshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_srshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_urshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_urshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_urshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_urshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_2(neon_add_u8, i32, i32, i32)
-DEF_HELPER_2(neon_add_u16, i32, i32, i32)
-DEF_HELPER_2(neon_sub_u8, i32, i32, i32)
-DEF_HELPER_2(neon_sub_u16, i32, i32, i32)
-DEF_HELPER_2(neon_mul_u8, i32, i32, i32)
-DEF_HELPER_2(neon_mul_u16, i32, i32, i32)
-
-DEF_HELPER_2(neon_tst_u8, i32, i32, i32)
-DEF_HELPER_2(neon_tst_u16, i32, i32, i32)
-DEF_HELPER_2(neon_tst_u32, i32, i32, i32)
-
-DEF_HELPER_1(neon_clz_u8, i32, i32)
-DEF_HELPER_1(neon_clz_u16, i32, i32)
-DEF_HELPER_1(neon_cls_s8, i32, i32)
-DEF_HELPER_1(neon_cls_s16, i32, i32)
-DEF_HELPER_1(neon_cls_s32, i32, i32)
-DEF_HELPER_1(neon_cnt_u8, i32, i32)
-DEF_HELPER_FLAGS_1(neon_rbit_u8, TCG_CALL_NO_RWG_SE, i32, i32)
-
-DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32)
-DEF_HELPER_4(neon_qrdmlah_s16, i32, env, i32, i32, i32)
-DEF_HELPER_4(neon_qrdmlsh_s16, i32, env, i32, i32, i32)
-DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32)
-DEF_HELPER_4(neon_qrdmlah_s32, i32, env, s32, s32, s32)
-DEF_HELPER_4(neon_qrdmlsh_s32, i32, env, s32, s32, s32)
-
-DEF_HELPER_1(neon_narrow_u8, i32, i64)
-DEF_HELPER_1(neon_narrow_u16, i32, i64)
-DEF_HELPER_2(neon_unarrow_sat8, i32, env, i64)
-DEF_HELPER_2(neon_narrow_sat_u8, i32, env, i64)
-DEF_HELPER_2(neon_narrow_sat_s8, i32, env, i64)
-DEF_HELPER_2(neon_unarrow_sat16, i32, env, i64)
-DEF_HELPER_2(neon_narrow_sat_u16, i32, env, i64)
-DEF_HELPER_2(neon_narrow_sat_s16, i32, env, i64)
-DEF_HELPER_2(neon_unarrow_sat32, i32, env, i64)
-DEF_HELPER_2(neon_narrow_sat_u32, i32, env, i64)
-DEF_HELPER_2(neon_narrow_sat_s32, i32, env, i64)
-DEF_HELPER_1(neon_narrow_high_u8, i32, i64)
-DEF_HELPER_1(neon_narrow_high_u16, i32, i64)
-DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64)
-DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64)
-DEF_HELPER_1(neon_widen_u8, i64, i32)
-DEF_HELPER_1(neon_widen_s8, i64, i32)
-DEF_HELPER_1(neon_widen_u16, i64, i32)
-DEF_HELPER_1(neon_widen_s16, i64, i32)
-
-DEF_HELPER_2(neon_addl_u16, i64, i64, i64)
-DEF_HELPER_2(neon_addl_u32, i64, i64, i64)
-DEF_HELPER_2(neon_paddl_u16, i64, i64, i64)
-DEF_HELPER_2(neon_paddl_u32, i64, i64, i64)
-DEF_HELPER_2(neon_subl_u16, i64, i64, i64)
-DEF_HELPER_2(neon_subl_u32, i64, i64, i64)
-DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64)
-DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64)
-DEF_HELPER_2(neon_abdl_u16, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_s16, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_u32, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_s32, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_u64, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_s64, i64, i32, i32)
-DEF_HELPER_2(neon_mull_u8, i64, i32, i32)
-DEF_HELPER_2(neon_mull_s8, i64, i32, i32)
-DEF_HELPER_2(neon_mull_u16, i64, i32, i32)
-DEF_HELPER_2(neon_mull_s16, i64, i32, i32)
-
-DEF_HELPER_1(neon_negl_u16, i64, i64)
-DEF_HELPER_1(neon_negl_u32, i64, i64)
-
-DEF_HELPER_FLAGS_2(neon_qabs_s8, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qabs_s16, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qabs_s32, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qabs_s64, TCG_CALL_NO_RWG, i64, env, i64)
-DEF_HELPER_FLAGS_2(neon_qneg_s8, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qneg_s16, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qneg_s32, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qneg_s64, TCG_CALL_NO_RWG, i64, env, i64)
-
-DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, ptr)
-DEF_HELPER_3(neon_cge_f32, i32, i32, i32, ptr)
-DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, ptr)
-DEF_HELPER_3(neon_acge_f32, i32, i32, i32, ptr)
-DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, ptr)
-DEF_HELPER_3(neon_acge_f64, i64, i64, i64, ptr)
-DEF_HELPER_3(neon_acgt_f64, i64, i64, i64, ptr)
-
-/* iwmmxt_helper.c */
-DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64)
-DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64)
-
-#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \
-DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \
-DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \
-DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \
-
-DEF_IWMMXT_HELPER_SIZE_ENV(unpackl)
-DEF_IWMMXT_HELPER_SIZE_ENV(unpackh)
-
-DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64)
-
-DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq)
-DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu)
-DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts)
-
-DEF_IWMMXT_HELPER_SIZE_ENV(mins)
-DEF_IWMMXT_HELPER_SIZE_ENV(minu)
-DEF_IWMMXT_HELPER_SIZE_ENV(maxs)
-DEF_IWMMXT_HELPER_SIZE_ENV(maxu)
-
-DEF_IWMMXT_HELPER_SIZE_ENV(subn)
-DEF_IWMMXT_HELPER_SIZE_ENV(addn)
-DEF_IWMMXT_HELPER_SIZE_ENV(subu)
-DEF_IWMMXT_HELPER_SIZE_ENV(addu)
-DEF_IWMMXT_HELPER_SIZE_ENV(subs)
-DEF_IWMMXT_HELPER_SIZE_ENV(adds)
-
-DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64)
-
-DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32)
-DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32)
-
-DEF_HELPER_1(iwmmxt_bcstb, i64, i32)
-DEF_HELPER_1(iwmmxt_bcstw, i64, i32)
-DEF_HELPER_1(iwmmxt_bcstl, i64, i32)
-
-DEF_HELPER_1(iwmmxt_addcb, i64, i64)
-DEF_HELPER_1(iwmmxt_addcw, i64, i64)
-DEF_HELPER_1(iwmmxt_addcl, i64, i64)
-
-DEF_HELPER_1(iwmmxt_msbb, i32, i64)
-DEF_HELPER_1(iwmmxt_msbw, i32, i64)
-DEF_HELPER_1(iwmmxt_msbl, i32, i64)
-
-DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32)
-
-DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64)
-
-DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32)
-DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32)
-DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32)
-
-DEF_HELPER_FLAGS_2(neon_unzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_unzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qunzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qunzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qunzip32, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_zip8, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_zip16, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qzip32, TCG_CALL_NO_RWG, void, ptr, ptr)
-
-DEF_HELPER_FLAGS_4(crypto_aese, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_aesd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_aesmc, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_aesimc, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sha1su0, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha1c, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha1p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha1m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_sha1h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_sha1su1, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sha256h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_sha512su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha512su1, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sm3tt1a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3tt1b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3tt2a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3tt2b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3partw1, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3partw2, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sm4e, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm4ekey, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_rax1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
-DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
-
-DEF_HELPER_FLAGS_5(gvec_qrdmlah_s16, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s16, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_qrdmlah_s32, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s32, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(sve2_sqrdmlah_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlah_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlah_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlah_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_sdot_idx_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_udot_idx_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sdot_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_udot_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sudot_idx_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usdot_idx_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_6(gvec_fcmlah, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_6(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_6(gvec_fcmlas, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_6(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_6(gvec_fcmlad, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_sstoh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sitos, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_ustoh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uitos, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_tosszh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_tosizs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_touszh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_touizs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vcvt_sf, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_uf, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_fs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_fu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_uh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_hs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ss, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_us, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_uh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vrint_rm_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_vrint_rm_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vrintx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_vrintx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_frsqrte_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_frsqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_frsqrte_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fcgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_fcgt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fcge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_fcge0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_fceq0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fcle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_fcle0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fclt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_fclt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fceq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fceq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fceq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fcge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fcgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_facge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_facge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_facge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_facgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_facgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_facgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmax_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmin_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmaxnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fminnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fminnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_recps_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_recps_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmla_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmls_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_vfma_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_vfma_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_vfma_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_ftsmul_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmul_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmul_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmul_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_6(gvec_fmla_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_6(gvec_fmla_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_6(gvec_fmla_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_uqadd_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqadd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqadd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqadd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqadd_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqadd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqadd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqadd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqsub_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqsub_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqsub_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqsub_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqsub_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqsub_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqsub_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqsub_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usqadd_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usqadd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usqadd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usqadd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_suqadd_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_suqadd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_suqadd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_suqadd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmlal_a32, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmlal_a64, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a32, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a64, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_2(frint32_s, TCG_CALL_NO_RWG, f32, f32, ptr)
-DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, ptr)
-DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, ptr)
-DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, ptr)
-
-DEF_HELPER_FLAGS_3(gvec_ceq0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_clt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_clt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cle0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cgt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cge0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_smulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_umulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(neon_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_ssra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ssra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ssra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ssra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_usra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_usra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_usra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_usra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_srshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_urshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_urshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_urshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_urshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_srsra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srsra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srsra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srsra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_ursra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ursra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ursra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ursra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_sri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_sli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sli_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sli_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sli_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_sabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_uabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_saba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_saba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_saba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_saba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_uaba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uaba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uaba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uaba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_mul_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_mul_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_mul_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_mla_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_mla_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_mla_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_mls_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_mls_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_mls_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqdmulh_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqdmulh_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_6(sve2_fmlal_zzzw_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_6(sve2_fmlal_zzxw_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_smmla_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_ummla_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usmmla_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_bfdot, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_bfdot_idx, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_bfmmla, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_faddp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_faddp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_faddp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fminp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmaxnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fminnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fminnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fminnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_addp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_addp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_addp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_addp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_smaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_sminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_umaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_uminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+#include "tcg/helper.h"
#ifdef TARGET_AARCH64
#include "tcg/helper-a64.h"
diff --git a/target/arm/hvf-stub.c b/target/arm/hvf-stub.c
new file mode 100644
index 0000000..ff13726
--- /dev/null
+++ b/target/arm/hvf-stub.c
@@ -0,0 +1,20 @@
+/*
+ * QEMU Hypervisor.framework (HVF) stubs for ARM
+ *
+ * Copyright (c) Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hvf_arm.h"
+
+uint32_t hvf_arm_get_default_ipa_bit_size(void)
+{
+ g_assert_not_reached();
+}
+
+uint32_t hvf_arm_get_max_ipa_bit_size(void)
+{
+ g_assert_not_reached();
+}
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
index eb090e6..42258cc 100644
--- a/target/arm/hvf/hvf.c
+++ b/target/arm/hvf/hvf.c
@@ -11,26 +11,29 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
+#include "qemu/log.h"
-#include "sysemu/runstate.h"
-#include "sysemu/hvf.h"
-#include "sysemu/hvf_int.h"
-#include "sysemu/hw_accel.h"
+#include "system/runstate.h"
+#include "system/hvf.h"
+#include "system/hvf_int.h"
+#include "system/hw_accel.h"
#include "hvf_arm.h"
#include "cpregs.h"
#include <mach/mach_time.h>
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
+#include "system/memory.h"
+#include "hw/boards.h"
#include "hw/irq.h"
#include "qemu/main-loop.h"
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
#include "arm-powerctl.h"
#include "target/arm/cpu.h"
#include "target/arm/internals.h"
#include "target/arm/multiprocessing.h"
#include "target/arm/gtimer.h"
-#include "trace/trace-target_arm_hvf.h"
+#include "trace.h"
#include "migration/vmstate.h"
#include "gdbstub/enums.h"
@@ -183,6 +186,7 @@ void hvf_arm_init_debug(void)
#define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4)
#define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4)
#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1)
+#define SYSREG_CNTP_CTL_EL0 SYSREG(3, 3, 14, 2, 1)
#define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0)
#define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0)
#define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
@@ -297,6 +301,8 @@ void hvf_arm_init_debug(void)
static void hvf_wfi(CPUState *cpu);
+static uint32_t chosen_ipa_bit_size;
+
typedef struct HVFVTimer {
/* Vtimer value during migration and paused state */
uint64_t vtimer_val;
@@ -839,6 +845,16 @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt)
return val;
}
+static void clamp_id_aa64mmfr0_parange_to_ipa_size(uint64_t *id_aa64mmfr0)
+{
+ uint32_t ipa_size = chosen_ipa_bit_size ?
+ chosen_ipa_bit_size : hvf_arm_get_max_ipa_bit_size();
+
+ /* Clamp down the PARange to the IPA size the kernel supports. */
+ uint8_t index = round_down_to_parange_index(ipa_size);
+ *id_aa64mmfr0 = (*id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index;
+}
+
static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
{
ARMISARegisters host_isar = {};
@@ -882,6 +898,20 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
r |= hv_vcpu_destroy(fd);
+ clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar.id_aa64mmfr0);
+
+ /*
+ * Disable SME, which is not properly handled by QEMU hvf yet.
+ * To allow this through we would need to:
+ * - make sure that the SME state is correctly handled in the
+ * get_registers/put_registers functions
+ * - get the SME-specific CPU properties to work with accelerators
+ * other than TCG
+ * - fix any assumptions we made that SME implies SVE (since
+ * on the M4 there is SME but not SVE)
+ */
+ host_isar.id_aa64pfr1 &= ~R_ID_AA64PFR1_SME_MASK;
+
ahcf->isar = host_isar;
/*
@@ -904,6 +934,30 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
return r == HV_SUCCESS;
}
+uint32_t hvf_arm_get_default_ipa_bit_size(void)
+{
+ uint32_t default_ipa_size;
+ hv_return_t ret = hv_vm_config_get_default_ipa_size(&default_ipa_size);
+ assert_hvf_ok(ret);
+
+ return default_ipa_size;
+}
+
+uint32_t hvf_arm_get_max_ipa_bit_size(void)
+{
+ uint32_t max_ipa_size;
+ hv_return_t ret = hv_vm_config_get_max_ipa_size(&max_ipa_size);
+ assert_hvf_ok(ret);
+
+ /*
+ * We clamp any IPA size we want to back the VM with to a valid PARange
+ * value so the guest doesn't try and map memory outside of the valid range.
+ * This logic just clamps the passed in IPA bit size to the first valid
+ * PARange value <= to it.
+ */
+ return round_down_to_parange_bit_size(max_ipa_size);
+}
+
void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu)
{
if (!arm_host_cpu_features.dtb_compatible) {
@@ -929,6 +983,25 @@ void hvf_arch_vcpu_destroy(CPUState *cpu)
{
}
+hv_return_t hvf_arch_vm_create(MachineState *ms, uint32_t pa_range)
+{
+ hv_return_t ret;
+ hv_vm_config_t config = hv_vm_config_create();
+
+ ret = hv_vm_config_set_ipa_size(config, pa_range);
+ if (ret != HV_SUCCESS) {
+ goto cleanup;
+ }
+ chosen_ipa_bit_size = pa_range;
+
+ ret = hv_vm_create(config);
+
+cleanup:
+ os_release(config);
+
+ return ret;
+}
+
int hvf_arch_init_vcpu(CPUState *cpu)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
@@ -995,6 +1068,11 @@ int hvf_arch_init_vcpu(CPUState *cpu)
&arm_cpu->isar.id_aa64mmfr0);
assert_hvf_ok(ret);
+ clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar.id_aa64mmfr0);
+ ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
+ arm_cpu->isar.id_aa64mmfr0);
+ assert_hvf_ok(ret);
+
return 0;
}
@@ -1199,57 +1277,61 @@ static bool hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg, uint64_t *val)
return false;
}
-static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
+static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
CPUARMState *env = &arm_cpu->env;
- uint64_t val = 0;
+
+ if (arm_feature(env, ARM_FEATURE_PMU)) {
+ switch (reg) {
+ case SYSREG_PMCR_EL0:
+ *val = env->cp15.c9_pmcr;
+ return 0;
+ case SYSREG_PMCCNTR_EL0:
+ pmu_op_start(env);
+ *val = env->cp15.c15_ccnt;
+ pmu_op_finish(env);
+ return 0;
+ case SYSREG_PMCNTENCLR_EL0:
+ *val = env->cp15.c9_pmcnten;
+ return 0;
+ case SYSREG_PMOVSCLR_EL0:
+ *val = env->cp15.c9_pmovsr;
+ return 0;
+ case SYSREG_PMSELR_EL0:
+ *val = env->cp15.c9_pmselr;
+ return 0;
+ case SYSREG_PMINTENCLR_EL1:
+ *val = env->cp15.c9_pminten;
+ return 0;
+ case SYSREG_PMCCFILTR_EL0:
+ *val = env->cp15.pmccfiltr_el0;
+ return 0;
+ case SYSREG_PMCNTENSET_EL0:
+ *val = env->cp15.c9_pmcnten;
+ return 0;
+ case SYSREG_PMUSERENR_EL0:
+ *val = env->cp15.c9_pmuserenr;
+ return 0;
+ case SYSREG_PMCEID0_EL0:
+ case SYSREG_PMCEID1_EL0:
+ /* We can't really count anything yet, declare all events invalid */
+ *val = 0;
+ return 0;
+ }
+ }
switch (reg) {
case SYSREG_CNTPCT_EL0:
- val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
+ *val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
gt_cntfrq_period_ns(arm_cpu);
- break;
- case SYSREG_PMCR_EL0:
- val = env->cp15.c9_pmcr;
- break;
- case SYSREG_PMCCNTR_EL0:
- pmu_op_start(env);
- val = env->cp15.c15_ccnt;
- pmu_op_finish(env);
- break;
- case SYSREG_PMCNTENCLR_EL0:
- val = env->cp15.c9_pmcnten;
- break;
- case SYSREG_PMOVSCLR_EL0:
- val = env->cp15.c9_pmovsr;
- break;
- case SYSREG_PMSELR_EL0:
- val = env->cp15.c9_pmselr;
- break;
- case SYSREG_PMINTENCLR_EL1:
- val = env->cp15.c9_pminten;
- break;
- case SYSREG_PMCCFILTR_EL0:
- val = env->cp15.pmccfiltr_el0;
- break;
- case SYSREG_PMCNTENSET_EL0:
- val = env->cp15.c9_pmcnten;
- break;
- case SYSREG_PMUSERENR_EL0:
- val = env->cp15.c9_pmuserenr;
- break;
- case SYSREG_PMCEID0_EL0:
- case SYSREG_PMCEID1_EL0:
- /* We can't really count anything yet, declare all events invalid */
- val = 0;
- break;
+ return 0;
case SYSREG_OSLSR_EL1:
- val = env->cp15.oslsr_el1;
- break;
+ *val = env->cp15.oslsr_el1;
+ return 0;
case SYSREG_OSDLR_EL1:
/* Dummy register */
- break;
+ return 0;
case SYSREG_ICC_AP0R0_EL1:
case SYSREG_ICC_AP0R1_EL1:
case SYSREG_ICC_AP0R2_EL1:
@@ -1276,9 +1358,8 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
case SYSREG_ICC_SRE_EL1:
case SYSREG_ICC_CTLR_EL1:
/* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
- if (!hvf_sysreg_read_cp(cpu, reg, &val)) {
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
- return 1;
+ if (hvf_sysreg_read_cp(cpu, reg, val)) {
+ return 0;
}
break;
case SYSREG_DBGBVR0_EL1:
@@ -1297,8 +1378,8 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
case SYSREG_DBGBVR13_EL1:
case SYSREG_DBGBVR14_EL1:
case SYSREG_DBGBVR15_EL1:
- val = env->cp15.dbgbvr[SYSREG_CRM(reg)];
- break;
+ *val = env->cp15.dbgbvr[SYSREG_CRM(reg)];
+ return 0;
case SYSREG_DBGBCR0_EL1:
case SYSREG_DBGBCR1_EL1:
case SYSREG_DBGBCR2_EL1:
@@ -1315,8 +1396,8 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
case SYSREG_DBGBCR13_EL1:
case SYSREG_DBGBCR14_EL1:
case SYSREG_DBGBCR15_EL1:
- val = env->cp15.dbgbcr[SYSREG_CRM(reg)];
- break;
+ *val = env->cp15.dbgbcr[SYSREG_CRM(reg)];
+ return 0;
case SYSREG_DBGWVR0_EL1:
case SYSREG_DBGWVR1_EL1:
case SYSREG_DBGWVR2_EL1:
@@ -1333,8 +1414,8 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
case SYSREG_DBGWVR13_EL1:
case SYSREG_DBGWVR14_EL1:
case SYSREG_DBGWVR15_EL1:
- val = env->cp15.dbgwvr[SYSREG_CRM(reg)];
- break;
+ *val = env->cp15.dbgwvr[SYSREG_CRM(reg)];
+ return 0;
case SYSREG_DBGWCR0_EL1:
case SYSREG_DBGWCR1_EL1:
case SYSREG_DBGWCR2_EL1:
@@ -1351,35 +1432,25 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
case SYSREG_DBGWCR13_EL1:
case SYSREG_DBGWCR14_EL1:
case SYSREG_DBGWCR15_EL1:
- val = env->cp15.dbgwcr[SYSREG_CRM(reg)];
- break;
+ *val = env->cp15.dbgwcr[SYSREG_CRM(reg)];
+ return 0;
default:
if (is_id_sysreg(reg)) {
/* ID system registers read as RES0 */
- val = 0;
- break;
+ *val = 0;
+ return 0;
}
- cpu_synchronize_state(cpu);
- trace_hvf_unhandled_sysreg_read(env->pc, reg,
- SYSREG_OP0(reg),
- SYSREG_OP1(reg),
- SYSREG_CRN(reg),
- SYSREG_CRM(reg),
- SYSREG_OP2(reg));
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
- return 1;
}
- trace_hvf_sysreg_read(reg,
- SYSREG_OP0(reg),
- SYSREG_OP1(reg),
- SYSREG_CRN(reg),
- SYSREG_CRM(reg),
- SYSREG_OP2(reg),
- val);
- hvf_set_reg(cpu, rt, val);
-
- return 0;
+ cpu_synchronize_state(cpu);
+ trace_hvf_unhandled_sysreg_read(env->pc, reg,
+ SYSREG_OP0(reg),
+ SYSREG_OP1(reg),
+ SYSREG_CRN(reg),
+ SYSREG_CRM(reg),
+ SYSREG_OP2(reg));
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ return 1;
}
static void pmu_update_irq(CPUARMState *env)
@@ -1498,70 +1569,82 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
SYSREG_OP2(reg),
val);
- switch (reg) {
- case SYSREG_PMCCNTR_EL0:
- pmu_op_start(env);
- env->cp15.c15_ccnt = val;
- pmu_op_finish(env);
- break;
- case SYSREG_PMCR_EL0:
- pmu_op_start(env);
-
- if (val & PMCRC) {
- /* The counter has been reset */
- env->cp15.c15_ccnt = 0;
- }
+ if (arm_feature(env, ARM_FEATURE_PMU)) {
+ switch (reg) {
+ case SYSREG_PMCCNTR_EL0:
+ pmu_op_start(env);
+ env->cp15.c15_ccnt = val;
+ pmu_op_finish(env);
+ return 0;
+ case SYSREG_PMCR_EL0:
+ pmu_op_start(env);
+
+ if (val & PMCRC) {
+ /* The counter has been reset */
+ env->cp15.c15_ccnt = 0;
+ }
- if (val & PMCRP) {
- unsigned int i;
- for (i = 0; i < pmu_num_counters(env); i++) {
- env->cp15.c14_pmevcntr[i] = 0;
+ if (val & PMCRP) {
+ unsigned int i;
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ env->cp15.c14_pmevcntr[i] = 0;
+ }
}
- }
- env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
- env->cp15.c9_pmcr |= (val & PMCR_WRITABLE_MASK);
+ env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
+ env->cp15.c9_pmcr |= (val & PMCR_WRITABLE_MASK);
+
+ pmu_op_finish(env);
+ return 0;
+ case SYSREG_PMUSERENR_EL0:
+ env->cp15.c9_pmuserenr = val & 0xf;
+ return 0;
+ case SYSREG_PMCNTENSET_EL0:
+ env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
+ return 0;
+ case SYSREG_PMCNTENCLR_EL0:
+ env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
+ return 0;
+ case SYSREG_PMINTENCLR_EL1:
+ pmu_op_start(env);
+ env->cp15.c9_pminten |= val;
+ pmu_op_finish(env);
+ return 0;
+ case SYSREG_PMOVSCLR_EL0:
+ pmu_op_start(env);
+ env->cp15.c9_pmovsr &= ~val;
+ pmu_op_finish(env);
+ return 0;
+ case SYSREG_PMSWINC_EL0:
+ pmu_op_start(env);
+ pmswinc_write(env, val);
+ pmu_op_finish(env);
+ return 0;
+ case SYSREG_PMSELR_EL0:
+ env->cp15.c9_pmselr = val & 0x1f;
+ return 0;
+ case SYSREG_PMCCFILTR_EL0:
+ pmu_op_start(env);
+ env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
+ pmu_op_finish(env);
+ return 0;
+ }
+ }
- pmu_op_finish(env);
- break;
- case SYSREG_PMUSERENR_EL0:
- env->cp15.c9_pmuserenr = val & 0xf;
- break;
- case SYSREG_PMCNTENSET_EL0:
- env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
- break;
- case SYSREG_PMCNTENCLR_EL0:
- env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
- break;
- case SYSREG_PMINTENCLR_EL1:
- pmu_op_start(env);
- env->cp15.c9_pminten |= val;
- pmu_op_finish(env);
- break;
- case SYSREG_PMOVSCLR_EL0:
- pmu_op_start(env);
- env->cp15.c9_pmovsr &= ~val;
- pmu_op_finish(env);
- break;
- case SYSREG_PMSWINC_EL0:
- pmu_op_start(env);
- pmswinc_write(env, val);
- pmu_op_finish(env);
- break;
- case SYSREG_PMSELR_EL0:
- env->cp15.c9_pmselr = val & 0x1f;
- break;
- case SYSREG_PMCCFILTR_EL0:
- pmu_op_start(env);
- env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
- pmu_op_finish(env);
- break;
+ switch (reg) {
case SYSREG_OSLAR_EL1:
env->cp15.oslsr_el1 = val & 1;
- break;
+ return 0;
+ case SYSREG_CNTP_CTL_EL0:
+ /*
+ * Guests should not rely on the physical counter, but macOS emits
+ * disable writes to it. Let it do so, but ignore the requests.
+ */
+ qemu_log_mask(LOG_UNIMP, "Unsupported write to CNTP_CTL_EL0\n");
+ return 0;
case SYSREG_OSDLR_EL1:
/* Dummy register */
- break;
+ return 0;
case SYSREG_ICC_AP0R0_EL1:
case SYSREG_ICC_AP0R1_EL1:
case SYSREG_ICC_AP0R2_EL1:
@@ -1588,13 +1671,13 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
case SYSREG_ICC_SGI1R_EL1:
case SYSREG_ICC_SRE_EL1:
/* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
- if (!hvf_sysreg_write_cp(cpu, reg, val)) {
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ if (hvf_sysreg_write_cp(cpu, reg, val)) {
+ return 0;
}
break;
case SYSREG_MDSCR_EL1:
env->cp15.mdscr_el1 = val;
- break;
+ return 0;
case SYSREG_DBGBVR0_EL1:
case SYSREG_DBGBVR1_EL1:
case SYSREG_DBGBVR2_EL1:
@@ -1612,7 +1695,7 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
case SYSREG_DBGBVR14_EL1:
case SYSREG_DBGBVR15_EL1:
env->cp15.dbgbvr[SYSREG_CRM(reg)] = val;
- break;
+ return 0;
case SYSREG_DBGBCR0_EL1:
case SYSREG_DBGBCR1_EL1:
case SYSREG_DBGBCR2_EL1:
@@ -1630,7 +1713,7 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
case SYSREG_DBGBCR14_EL1:
case SYSREG_DBGBCR15_EL1:
env->cp15.dbgbcr[SYSREG_CRM(reg)] = val;
- break;
+ return 0;
case SYSREG_DBGWVR0_EL1:
case SYSREG_DBGWVR1_EL1:
case SYSREG_DBGWVR2_EL1:
@@ -1648,7 +1731,7 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
case SYSREG_DBGWVR14_EL1:
case SYSREG_DBGWVR15_EL1:
env->cp15.dbgwvr[SYSREG_CRM(reg)] = val;
- break;
+ return 0;
case SYSREG_DBGWCR0_EL1:
case SYSREG_DBGWCR1_EL1:
case SYSREG_DBGWCR2_EL1:
@@ -1666,20 +1749,18 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
case SYSREG_DBGWCR14_EL1:
case SYSREG_DBGWCR15_EL1:
env->cp15.dbgwcr[SYSREG_CRM(reg)] = val;
- break;
- default:
- cpu_synchronize_state(cpu);
- trace_hvf_unhandled_sysreg_write(env->pc, reg,
- SYSREG_OP0(reg),
- SYSREG_OP1(reg),
- SYSREG_CRN(reg),
- SYSREG_CRM(reg),
- SYSREG_OP2(reg));
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
- return 1;
+ return 0;
}
- return 0;
+ cpu_synchronize_state(cpu);
+ trace_hvf_unhandled_sysreg_write(env->pc, reg,
+ SYSREG_OP0(reg),
+ SYSREG_OP1(reg),
+ SYSREG_CRN(reg),
+ SYSREG_CRM(reg),
+ SYSREG_OP2(reg));
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ return 1;
}
static int hvf_inject_interrupts(CPUState *cpu)
@@ -1903,6 +1984,7 @@ int hvf_vcpu_exec(CPUState *cpu)
bool isv = syndrome & ARM_EL_ISV;
bool iswrite = (syndrome >> 6) & 1;
bool s1ptw = (syndrome >> 7) & 1;
+ bool sse = (syndrome >> 21) & 1;
uint32_t sas = (syndrome >> 22) & 3;
uint32_t len = 1 << sas;
uint32_t srt = (syndrome >> 16) & 0x1f;
@@ -1930,6 +2012,9 @@ int hvf_vcpu_exec(CPUState *cpu)
address_space_read(&address_space_memory,
hvf_exit->exception.physical_address,
MEMTXATTRS_UNSPECIFIED, &val, len);
+ if (sse) {
+ val = sextract64(val, 0, len * 8);
+ }
hvf_set_reg(cpu, srt, val);
}
@@ -1944,7 +2029,17 @@ int hvf_vcpu_exec(CPUState *cpu)
int sysreg_ret = 0;
if (isread) {
- sysreg_ret = hvf_sysreg_read(cpu, reg, rt);
+ sysreg_ret = hvf_sysreg_read(cpu, reg, &val);
+ if (!sysreg_ret) {
+ trace_hvf_sysreg_read(reg,
+ SYSREG_OP0(reg),
+ SYSREG_OP1(reg),
+ SYSREG_CRN(reg),
+ SYSREG_CRM(reg),
+ SYSREG_OP2(reg),
+ val);
+ hvf_set_reg(cpu, rt, val);
+ }
} else {
val = hvf_get_reg(cpu, rt);
sysreg_ret = hvf_sysreg_write(cpu, reg, val);
@@ -2183,28 +2278,23 @@ static inline bool hvf_arm_hw_debug_active(CPUState *cpu)
return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
}
-static void hvf_arch_set_traps(void)
+static void hvf_arch_set_traps(CPUState *cpu)
{
- CPUState *cpu;
bool should_enable_traps = false;
hv_return_t r = HV_SUCCESS;
/* Check whether guest debugging is enabled for at least one vCPU; if it
* is, enable exiting the guest on all vCPUs */
- CPU_FOREACH(cpu) {
- should_enable_traps |= cpu->accel->guest_debug_enabled;
- }
- CPU_FOREACH(cpu) {
- /* Set whether debug exceptions exit the guest */
- r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
- should_enable_traps);
- assert_hvf_ok(r);
+ should_enable_traps |= cpu->accel->guest_debug_enabled;
+ /* Set whether debug exceptions exit the guest */
+ r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
+ should_enable_traps);
+ assert_hvf_ok(r);
- /* Set whether accesses to debug registers exit the guest */
- r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
- should_enable_traps);
- assert_hvf_ok(r);
- }
+ /* Set whether accesses to debug registers exit the guest */
+ r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
+ should_enable_traps);
+ assert_hvf_ok(r);
}
void hvf_arch_update_guest_debug(CPUState *cpu)
@@ -2245,7 +2335,7 @@ void hvf_arch_update_guest_debug(CPUState *cpu)
deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 0);
}
- hvf_arch_set_traps();
+ hvf_arch_set_traps(cpu);
}
bool hvf_arch_supports_guest_debug(void)
diff --git a/target/arm/hvf/trace.h b/target/arm/hvf/trace.h
new file mode 100644
index 0000000..04a19c1
--- /dev/null
+++ b/target/arm/hvf/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-target_arm_hvf.h"
diff --git a/target/arm/hvf_arm.h b/target/arm/hvf_arm.h
index e848c1d..ea82f26 100644
--- a/target/arm/hvf_arm.h
+++ b/target/arm/hvf_arm.h
@@ -11,7 +11,7 @@
#ifndef QEMU_HVF_ARM_H
#define QEMU_HVF_ARM_H
-#include "cpu.h"
+#include "target/arm/cpu-qom.h"
/**
* hvf_arm_init_debug() - initialize guest debug capabilities
@@ -22,4 +22,7 @@ void hvf_arm_init_debug(void);
void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu);
+uint32_t hvf_arm_get_default_ipa_bit_size(void);
+uint32_t hvf_arm_get_max_ipa_bit_size(void);
+
#endif
diff --git a/target/arm/hyp_gdbstub.c b/target/arm/hyp_gdbstub.c
index f120d55..bb59697 100644
--- a/target/arm/hyp_gdbstub.c
+++ b/target/arm/hyp_gdbstub.c
@@ -54,7 +54,7 @@ GArray *hw_breakpoints, *hw_watchpoints;
* here so future PC comparisons will work properly.
*/
-int insert_hw_breakpoint(target_ulong addr)
+int insert_hw_breakpoint(vaddr addr)
{
HWBreakpoint brk = {
.bcr = 0x1, /* BCR E=1, enable */
@@ -80,7 +80,7 @@ int insert_hw_breakpoint(target_ulong addr)
* Delete a breakpoint and shuffle any above down
*/
-int delete_hw_breakpoint(target_ulong pc)
+int delete_hw_breakpoint(vaddr pc)
{
int i;
for (i = 0; i < hw_breakpoints->len; i++) {
@@ -125,7 +125,7 @@ int delete_hw_breakpoint(target_ulong pc)
* need to ensure you mask the address as required and set BAS=0xff
*/
-int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type)
+int insert_hw_watchpoint(vaddr addr, vaddr len, int type)
{
HWWatchpoint wp = {
.wcr = R_DBGWCR_E_MASK, /* E=1, enable */
@@ -158,7 +158,6 @@ int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type)
break;
default:
g_assert_not_reached();
- break;
}
if (len <= 8) {
/* we align the address and set the bits in BAS */
@@ -183,7 +182,7 @@ int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type)
return 0;
}
-bool check_watchpoint_in_range(int i, target_ulong addr)
+bool check_watchpoint_in_range(int i, vaddr addr)
{
HWWatchpoint *wp = get_hw_wp(i);
uint64_t addr_top, addr_bottom = wp->wvr;
@@ -215,7 +214,7 @@ bool check_watchpoint_in_range(int i, target_ulong addr)
* Delete a breakpoint and shuffle any above down
*/
-int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type)
+int delete_hw_watchpoint(vaddr addr, vaddr len, int type)
{
int i;
for (i = 0; i < cur_hw_wps; i++) {
@@ -227,7 +226,7 @@ int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type)
return -ENOENT;
}
-bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
+bool find_hw_breakpoint(CPUState *cpu, vaddr pc)
{
int i;
@@ -240,7 +239,7 @@ bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
return false;
}
-CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
+CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, vaddr addr)
{
int i;
diff --git a/target/arm/internals.h b/target/arm/internals.h
index da22d04..3360de9 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -25,9 +25,13 @@
#ifndef TARGET_ARM_INTERNALS_H
#define TARGET_ARM_INTERNALS_H
+#include "exec/hwaddr.h"
+#include "exec/vaddr.h"
#include "exec/breakpoint.h"
+#include "accel/tcg/tb-cpu-state.h"
#include "hw/registerfields.h"
#include "tcg/tcg-gvec-desc.h"
+#include "system/memory.h"
#include "syndrome.h"
#include "cpu-features.h"
@@ -350,27 +354,30 @@ static inline int r14_bank_number(int mode)
}
void arm_cpu_register(const ARMCPUInfo *info);
-void aarch64_cpu_register(const ARMCPUInfo *info);
void register_cp_regs_for_features(ARMCPU *cpu);
void init_cpreg_list(ARMCPU *cpu);
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
void arm_translate_init(void);
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
-void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *, GArray *,
- GArray *);
+void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
+ GPtrArray *, GPtrArray *);
void arm_restore_state_to_opc(CPUState *cs,
const TranslationBlock *tb,
const uint64_t *data);
#ifdef CONFIG_TCG
+TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs);
void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
/* Our implementation of TCGCPUOps::cpu_exec_halt */
bool arm_cpu_exec_halt(CPUState *cs);
+int arm_cpu_mmu_index(CPUState *cs, bool ifetch);
#endif /* CONFIG_TCG */
typedef enum ARMFPRounding {
@@ -390,6 +397,141 @@ static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
return arm_rmode_to_sf_map[rmode];
}
+/* Return the effective value of SCR_EL3.RW */
+static inline bool arm_scr_rw_eff(CPUARMState *env)
+{
+ /*
+ * SCR_EL3.RW has an effective value of 1 if:
+ * - we are NS and EL2 is implemented but doesn't support AArch32
+ * - we are S and EL2 is enabled (in which case it must be AArch64)
+ */
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (env->cp15.scr_el3 & SCR_RW) {
+ return true;
+ }
+ if (env->cp15.scr_el3 & SCR_NS) {
+ return arm_feature(env, ARM_FEATURE_EL2) &&
+ !cpu_isar_feature(aa64_aa32_el2, cpu);
+ } else {
+ return env->cp15.scr_el3 & SCR_EEL2;
+ }
+}
+
+/* Return true if the specified exception level is running in AArch64 state. */
+static inline bool arm_el_is_aa64(CPUARMState *env, int el)
+{
+ /*
+ * This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
+ * and if we're not in EL0 then the state of EL0 isn't well defined.)
+ */
+ assert(el >= 1 && el <= 3);
+ bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
+
+ /*
+ * The highest exception level is always at the maximum supported
+ * register width, and then lower levels have a register width controlled
+ * by bits in the SCR or HCR registers.
+ */
+ if (el == 3) {
+ return aa64;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ aa64 = aa64 && arm_scr_rw_eff(env);
+ }
+
+ if (el == 2) {
+ return aa64;
+ }
+
+ if (arm_is_el2_enabled(env)) {
+ aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
+ }
+
+ return aa64;
+}
+
+/*
+ * Return the current Exception Level (as per ARMv8; note that this differs
+ * from the ARMv7 Privilege Level).
+ */
+static inline int arm_current_el(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return arm_v7m_is_handler_mode(env) ||
+ !(env->v7m.control[env->v7m.secure] & 1);
+ }
+
+ if (is_a64(env)) {
+ return extract32(env->pstate, 2, 2);
+ }
+
+ switch (env->uncached_cpsr & 0x1f) {
+ case ARM_CPU_MODE_USR:
+ return 0;
+ case ARM_CPU_MODE_HYP:
+ return 2;
+ case ARM_CPU_MODE_MON:
+ return 3;
+ default:
+ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
+ /* If EL3 is 32-bit then all secure privileged modes run in EL3 */
+ return 3;
+ }
+
+ return 1;
+ }
+}
+
+static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
+ bool sctlr_b)
+{
+#ifdef CONFIG_USER_ONLY
+ /*
+ * In system mode, BE32 is modelled in line with the
+ * architecture (as word-invariant big-endianness), where loads
+ * and stores are done little endian but from addresses which
+ * are adjusted by XORing with the appropriate constant. So the
+ * endianness to use for the raw data access is not affected by
+ * SCTLR.B.
+ * In user mode, however, we model BE32 as byte-invariant
+ * big-endianness (because user-only code cannot tell the
+ * difference), and so we need to use a data access endianness
+ * that depends on SCTLR.B.
+ */
+ if (sctlr_b) {
+ return true;
+ }
+#endif
+ /* In 32bit endianness is determined by looking at CPSR's E bit */
+ return env->uncached_cpsr & CPSR_E;
+}
+
+static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr)
+{
+ return sctlr & (el ? SCTLR_EE : SCTLR_E0E);
+}
+
+/* Return true if the processor is in big-endian mode. */
+static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
+{
+ if (!is_a64(env)) {
+ return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env));
+ } else {
+ int cur_el = arm_current_el(env);
+ uint64_t sctlr = arm_sctlr(env, cur_el);
+ return arm_cpu_data_is_big_endian_a64(cur_el, sctlr);
+ }
+}
+
+#ifdef CONFIG_USER_ONLY
+static inline bool arm_cpu_bswap_data(CPUARMState *env)
+{
+ return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env);
+}
+#endif
+
static inline void aarch64_save_sp(CPUARMState *env, int el)
{
if (env->pstate & PSTATE_SP) {
@@ -436,6 +578,25 @@ static inline void update_spsel(CPUARMState *env, uint32_t imm)
*/
unsigned int arm_pamax(ARMCPU *cpu);
+/*
+ * round_down_to_parange_index
+ * @bit_size: uint8_t
+ *
+ * Rounds down the bit_size supplied to the first supported ARM physical
+ * address range and returns the index for this. The index is intended to
+ * be used to set ID_AA64MMFR0_EL1's PARANGE bits.
+ */
+uint8_t round_down_to_parange_index(uint8_t bit_size);
+
+/*
+ * round_down_to_parange_bit_size
+ * @bit_size: uint8_t
+ *
+ * Rounds down the bit_size supplied to the first supported ARM physical
+ * address range bit size and returns this.
+ */
+uint8_t round_down_to_parange_bit_size(uint8_t bit_size);
+
/* Return true if extended addresses are enabled.
* This is always the case if our translation regime is 64 bit,
* but depends on TTBCR.EAE for 32 bit.
@@ -568,8 +729,8 @@ typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
struct ARMMMUFaultInfo {
ARMFaultType type;
ARMGPCF gpcf;
- target_ulong s2addr;
- target_ulong paddr;
+ hwaddr s2addr;
+ hwaddr paddr;
ARMSecuritySpace paddr_space;
int level;
int domain;
@@ -783,9 +944,9 @@ void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
MMUAccessType access_type, uintptr_t ra);
#else
-bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
+bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ MemOp memop, int size, bool probe, uintptr_t ra);
#endif
static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
@@ -852,7 +1013,16 @@ static inline void arm_call_el_change_hook(ARMCPU *cpu)
}
}
-/* Return true if this address translation regime has two ranges. */
+/*
+ * Return true if this address translation regime has two ranges.
+ * Note that this will not return the correct answer for AArch32
+ * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is
+ * never called from a context where EL3 can be AArch32. (The
+ * correct return value for ARMMMUIdx_E3 would be different for
+ * that case, so we can't just make the function return the
+ * correct value anyway; we would need an extra "bool e3_is_aarch32"
+ * argument which all the current callsites would pass as 'false'.)
+ */
static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
{
switch (mmu_idx) {
@@ -877,6 +1047,7 @@ static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_E10_1_PAN:
case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_E30_3_PAN:
return true;
default:
return false;
@@ -900,10 +1071,11 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
case ARMMMUIdx_E2:
return 2;
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E30_0:
+ case ARMMMUIdx_E30_3_PAN:
return 3;
case ARMMMUIdx_E10_0:
case ARMMMUIdx_Stage1_E0:
- return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_E10_1:
@@ -925,7 +1097,9 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
{
switch (mmu_idx) {
+ case ARMMMUIdx_E10_0:
case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E30_0:
case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_MUser:
case ARMMMUIdx_MSUser:
@@ -934,10 +1108,6 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
return true;
default:
return false;
- case ARMMMUIdx_E10_0:
- case ARMMMUIdx_E10_1:
- case ARMMMUIdx_E10_1_PAN:
- g_assert_not_reached();
}
}
@@ -1394,6 +1564,7 @@ typedef struct GetPhysAddrResult {
* @env: CPUARMState
* @address: virtual address to get physical address for
* @access_type: 0 for read, 1 for write, 2 for execute
+ * @memop: memory operation feeding this access, or 0 for none
* @mmu_idx: MMU index indicating required translation regime
* @result: set on translation success.
* @fi: set to fault info if the translation fails
@@ -1411,8 +1582,8 @@ typedef struct GetPhysAddrResult {
* * for PSMAv5 based systems we don't bother to return a full FSR format
* value.
*/
-bool get_phys_addr(CPUARMState *env, target_ulong address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
+bool get_phys_addr(CPUARMState *env, vaddr address,
+ MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
__attribute__((nonnull));
@@ -1422,6 +1593,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
* @env: CPUARMState
* @address: virtual address to get physical address for
* @access_type: 0 for read, 1 for write, 2 for execute
+ * @memop: memory operation feeding this access, or 0 for none
* @mmu_idx: MMU index indicating required translation regime
* @space: security space for the access
* @result: set on translation success.
@@ -1430,8 +1602,8 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
* Similar to get_phys_addr, but use the given security space and don't perform
* a Granule Protection Check on the resulting address.
*/
-bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address,
- MMUAccessType access_type,
+bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
+ MMUAccessType access_type, MemOp memop,
ARMMMUIdx mmu_idx, ARMSecuritySpace space,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
@@ -1639,7 +1811,6 @@ static inline uint64_t pmu_counter_mask(CPUARMState *env)
return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
}
-#ifdef TARGET_AARCH64
GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg);
int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg);
int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg);
@@ -1657,7 +1828,12 @@ void aarch64_max_tcg_initfn(Object *obj);
void aarch64_add_pauth_properties(Object *obj);
void aarch64_add_sve_properties(Object *obj);
void aarch64_add_sme_properties(Object *obj);
-#endif
+
+/* Return true if the gdbstub is presenting an AArch64 CPU */
+static inline bool arm_gdbstub_is_aarch64(ARMCPU *cpu)
+{
+ return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
+}
/* Read the CONTROL register as the MRS instruction would. */
uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
@@ -1697,6 +1873,9 @@ static inline uint64_t pauth_ptr_mask(ARMVAParameters param)
/* Add the cpreg definitions for debug related system registers */
void define_debug_regs(ARMCPU *cpu);
+/* Add the cpreg definitions for TLBI instructions */
+void define_tlb_insn_regs(ARMCPU *cpu);
+
/* Effective value of MDCR_EL2 */
static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
{
@@ -1728,8 +1907,6 @@ static inline bool arm_fgt_active(CPUARMState *env, int el)
(!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
}
-void assert_hflags_rebuild_correctly(CPUARMState *env);
-
/*
* Although the ARM implementation of hardware assisted debugging
* allows for different breakpoints per-core, the current GDB
@@ -1771,20 +1948,42 @@ extern GArray *hw_breakpoints, *hw_watchpoints;
#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
-bool find_hw_breakpoint(CPUState *cpu, target_ulong pc);
-int insert_hw_breakpoint(target_ulong pc);
-int delete_hw_breakpoint(target_ulong pc);
+bool find_hw_breakpoint(CPUState *cpu, vaddr pc);
+int insert_hw_breakpoint(vaddr pc);
+int delete_hw_breakpoint(vaddr pc);
-bool check_watchpoint_in_range(int i, target_ulong addr);
-CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr);
-int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type);
-int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type);
+bool check_watchpoint_in_range(int i, vaddr addr);
+CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, vaddr addr);
+int insert_hw_watchpoint(vaddr addr, vaddr len, int type);
+int delete_hw_watchpoint(vaddr addr, vaddr len, int type);
/* Return the current value of the system counter in ticks */
uint64_t gt_get_countervalue(CPUARMState *env);
/*
* Return the currently applicable offset between the system counter
- * and CNTVCT_EL0 (this will be either 0 or the value of CNTVOFF_EL2).
+ * and the counter for the specified timer, as used for direct register
+ * accesses.
*/
-uint64_t gt_virt_cnt_offset(CPUARMState *env);
+uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx);
+
+/*
+ * Return mask of ARMMMUIdxBit values corresponding to an "invalidate
+ * all EL1" scope; this covers stage 1 and stage 2.
+ */
+int alle1_tlbmask(CPUARMState *env);
+
+/* Set the float_status behaviour to match the Arm defaults */
+void arm_set_default_fp_behaviours(float_status *s);
+/* Set the float_status behaviour to match Arm FPCR.AH=1 behaviour */
+void arm_set_ah_fp_behaviours(float_status *s);
+/* Read the float_status info and return the appropriate FPSR value */
+uint32_t vfp_get_fpsr_from_host(CPUARMState *env);
+/* Clear the exception status flags from all float_status fields */
+void vfp_clear_float_status_exc_flags(CPUARMState *env);
+/*
+ * Update float_status fields to handle the bits of the FPCR
+ * specified by mask changing to the values in val.
+ */
+void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask);
+
#endif
diff --git a/target/arm/kvm-stub.c b/target/arm/kvm-stub.c
index 965a486..34e57fa 100644
--- a/target/arm/kvm-stub.c
+++ b/target/arm/kvm-stub.c
@@ -22,3 +22,100 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level)
{
g_assert_not_reached();
}
+
+/*
+ * It's safe to call these functions without KVM support.
+ * They should either do nothing or return "not supported".
+ */
+bool kvm_arm_aarch32_supported(void)
+{
+ return false;
+}
+
+bool kvm_arm_pmu_supported(void)
+{
+ return false;
+}
+
+bool kvm_arm_sve_supported(void)
+{
+ return false;
+}
+
+bool kvm_arm_mte_supported(void)
+{
+ return false;
+}
+
+/*
+ * These functions should never actually be called without KVM support.
+ */
+void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_add_vcpu_properties(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa)
+{
+ g_assert_not_reached();
+}
+
+int kvm_arm_vgic_probe(void)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_pmu_init(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_reset_vcpu(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+void arm_cpu_kvm_set_irq(void *arm_cpu, int irq, int level)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_cpu_pre_save(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+bool kvm_arm_cpu_post_load(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 70f79ed..74fda8b 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -20,17 +20,17 @@
#include "qemu/main-loop.h"
#include "qom/object.h"
#include "qapi/error.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/runstate.h"
-#include "sysemu/kvm.h"
-#include "sysemu/kvm_int.h"
+#include "system/system.h"
+#include "system/runstate.h"
+#include "system/kvm.h"
+#include "system/kvm_int.h"
#include "kvm_arm.h"
#include "cpu.h"
#include "trace.h"
#include "internals.h"
#include "hw/pci/pci.h"
#include "exec/memattrs.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "gdbstub/enums.h"
#include "hw/boards.h"
#include "hw/irq.h"
@@ -39,8 +39,10 @@
#include "hw/acpi/acpi.h"
#include "hw/acpi/ghes.h"
#include "target/arm/gtimer.h"
+#include "migration/blocker.h"
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_INFO(DEVICE_CTRL),
KVM_CAP_LAST_INFO
};
@@ -98,8 +100,7 @@ static int kvm_arm_vcpu_finalize(ARMCPU *cpu, int feature)
return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_FINALIZE, &feature);
}
-bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
- int *fdarray,
+bool kvm_arm_create_scratch_host_vcpu(int *fdarray,
struct kvm_vcpu_init *init)
{
int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1;
@@ -119,6 +120,21 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
if (vmfd < 0) {
goto err;
}
+
+ /*
+ * The MTE capability must be enabled by the VMM before creating
+ * any VCPUs in order to allow the MTE bits of the ID_AA64PFR1
+ * register to be probed correctly, as they are masked if MTE
+ * is not enabled.
+ */
+ if (kvm_arm_mte_supported()) {
+ KVMState kvm_state;
+
+ kvm_state.fd = kvmfd;
+ kvm_state.vmfd = vmfd;
+ kvm_vm_enable_cap(&kvm_state, KVM_CAP_ARM_MTE, 0);
+ }
+
cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
if (cpufd < 0) {
goto err;
@@ -133,40 +149,13 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
struct kvm_vcpu_init preferred;
ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &preferred);
- if (!ret) {
- init->target = preferred.target;
- }
- }
- if (ret >= 0) {
- ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
if (ret < 0) {
goto err;
}
- } else if (cpus_to_try) {
- /* Old kernel which doesn't know about the
- * PREFERRED_TARGET ioctl: we know it will only support
- * creating one kind of guest CPU which is its preferred
- * CPU type.
- */
- struct kvm_vcpu_init try;
-
- while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
- try.target = *cpus_to_try++;
- memcpy(try.features, init->features, sizeof(init->features));
- ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, &try);
- if (ret >= 0) {
- break;
- }
- }
- if (ret < 0) {
- goto err;
- }
- init->target = try.target;
- } else {
- /* Treat a NULL cpus_to_try argument the same as an empty
- * list, which means we will fail the call since this must
- * be an old kernel which doesn't support PREFERRED_TARGET.
- */
+ init->target = preferred.target;
+ }
+ ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
+ if (ret < 0) {
goto err;
}
@@ -242,17 +231,6 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
uint64_t features = 0;
int err;
- /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
- * we know these will only support creating one kind of guest CPU,
- * which is its preferred CPU type. Fortunately these old kernels
- * support only a very limited number of CPUs.
- */
- static const uint32_t cpus_to_try[] = {
- KVM_ARM_TARGET_AEM_V8,
- KVM_ARM_TARGET_FOUNDATION_V8,
- KVM_ARM_TARGET_CORTEX_A57,
- QEMU_KVM_ARM_TARGET_NONE
- };
/*
* target = -1 informs kvm_arm_create_scratch_host_vcpu()
* to use the preferred target
@@ -280,9 +258,10 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
if (kvm_arm_pmu_supported()) {
init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
pmu_supported = true;
+ features |= 1ULL << ARM_FEATURE_PMU;
}
- if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
+ if (!kvm_arm_create_scratch_host_vcpu(fdarray, &init)) {
return false;
}
@@ -448,7 +427,6 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
features |= 1ULL << ARM_FEATURE_V8;
features |= 1ULL << ARM_FEATURE_NEON;
features |= 1ULL << ARM_FEATURE_AARCH64;
- features |= 1ULL << ARM_FEATURE_PMU;
features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
ahcf->features = features;
@@ -675,19 +653,11 @@ static void kvm_arm_set_device_addr(KVMDevice *kd)
{
struct kvm_device_attr *attr = &kd->kdattr;
int ret;
+ uint64_t addr = kd->kda.addr;
- /* If the device control API is available and we have a device fd on the
- * KVMDevice struct, let's use the newer API
- */
- if (kd->dev_fd >= 0) {
- uint64_t addr = kd->kda.addr;
-
- addr |= kd->kda_addr_ormask;
- attr->addr = (uintptr_t)&addr;
- ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr);
- } else {
- ret = kvm_vm_ioctl(kvm_state, KVM_ARM_SET_DEVICE_ADDR, &kd->kda);
- }
+ addr |= kd->kda_addr_ormask;
+ attr->addr = (uintptr_t)&addr;
+ ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr);
if (ret < 0) {
fprintf(stderr, "Failed to set device address: %s\n",
@@ -968,13 +938,24 @@ void kvm_arm_cpu_pre_save(ARMCPU *cpu)
}
}
-void kvm_arm_cpu_post_load(ARMCPU *cpu)
+bool kvm_arm_cpu_post_load(ARMCPU *cpu)
{
+ if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
+ return false;
+ }
+ /* Note that it's OK for the TCG side not to know about
+ * every register in the list; KVM is authoritative if
+ * we're using it.
+ */
+ write_list_to_cpustate(cpu);
+
/* KVM virtual time adjustment */
if (cpu->kvm_adjvtime) {
cpu->kvm_vtime = *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT);
cpu->kvm_vtime_dirty = true;
}
+
+ return true;
}
void kvm_arm_reset_vcpu(ARMCPU *cpu)
@@ -1793,6 +1774,11 @@ bool kvm_arm_sve_supported(void)
return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
}
+bool kvm_arm_mte_supported(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE);
+}
+
QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
@@ -1821,7 +1807,7 @@ uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
probed = true;
- if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
+ if (!kvm_arm_create_scratch_host_vcpu(fdarray, &init)) {
error_report("failed to create scratch VCPU with SVE enabled");
abort();
}
@@ -1860,6 +1846,11 @@ static int kvm_arm_sve_set_vls(ARMCPU *cpu)
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
int ret;
@@ -1868,8 +1859,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
CPUARMState *env = &cpu->env;
uint64_t psciver;
- if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
- !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
+ if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
error_report("KVM is not supported for this guest CPU type");
return -EINVAL;
}
@@ -1888,13 +1878,8 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (!arm_feature(env, ARM_FEATURE_AARCH64)) {
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
}
- if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
- cpu->has_pmu = false;
- }
if (cpu->has_pmu) {
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
- } else {
- env->features &= ~(1ULL << ARM_FEATURE_PMU);
}
if (cpu_isar_feature(aa64_sve, cpu)) {
assert(kvm_arm_sve_supported());
@@ -2047,7 +2032,7 @@ static int kvm_arch_put_sve(CPUState *cs)
return 0;
}
-int kvm_arch_put_registers(CPUState *cs, int level)
+int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
{
uint64_t val;
uint32_t fpr;
@@ -2231,7 +2216,7 @@ static int kvm_arch_get_sve(CPUState *cs)
return 0;
}
-int kvm_arch_get_registers(CPUState *cs)
+int kvm_arch_get_registers(CPUState *cs, Error **errp)
{
uint64_t val;
unsigned int el;
@@ -2378,7 +2363,7 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
*/
if (code == BUS_MCEERR_AR) {
kvm_cpu_synchronize_state(c);
- if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) {
+ if (!acpi_ghes_memory_errors(ACPI_HEST_SRC_ID_SEA, paddr)) {
kvm_inject_arm_sea(c);
} else {
error_report("failed to record the error");
@@ -2422,3 +2407,69 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
}
return 0;
}
+
+void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
+{
+ static bool tried_to_enable;
+ static bool succeeded_to_enable;
+ Error *mte_migration_blocker = NULL;
+ ARMCPU *cpu = ARM_CPU(cpuobj);
+ int ret;
+
+ if (!tried_to_enable) {
+ /*
+ * MTE on KVM is enabled on a per-VM basis (and retrying doesn't make
+ * sense), and we only want a single migration blocker as well.
+ */
+ tried_to_enable = true;
+
+ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0);
+ if (ret) {
+ error_setg_errno(errp, -ret, "Failed to enable KVM_CAP_ARM_MTE");
+ return;
+ }
+
+ /* TODO: Add migration support with MTE enabled */
+ error_setg(&mte_migration_blocker,
+ "Live migration disabled due to MTE enabled");
+ if (migrate_add_blocker(&mte_migration_blocker, errp)) {
+ error_free(mte_migration_blocker);
+ return;
+ }
+
+ succeeded_to_enable = true;
+ }
+
+ if (succeeded_to_enable) {
+ cpu->kvm_mte = true;
+ }
+}
+
+void arm_cpu_kvm_set_irq(void *arm_cpu, int irq, int level)
+{
+ ARMCPU *cpu = arm_cpu;
+ CPUARMState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ uint32_t linestate_bit;
+ int irq_id;
+
+ switch (irq) {
+ case ARM_CPU_IRQ:
+ irq_id = KVM_ARM_IRQ_CPU_IRQ;
+ linestate_bit = CPU_INTERRUPT_HARD;
+ break;
+ case ARM_CPU_FIQ:
+ irq_id = KVM_ARM_IRQ_CPU_FIQ;
+ linestate_bit = CPU_INTERRUPT_FIQ;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (level) {
+ env->irq_line_state |= linestate_bit;
+ } else {
+ env->irq_line_state &= ~linestate_bit;
+ }
+ kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
+}
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index cfaa0d9..7dc83ca 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -11,7 +11,8 @@
#ifndef QEMU_KVM_ARM_H
#define QEMU_KVM_ARM_H
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
+#include "target/arm/cpu-qom.h"
#define KVM_ARM_VGIC_V2 (1 << 0)
#define KVM_ARM_VGIC_V3 (1 << 1)
@@ -22,17 +23,15 @@
* @devid: the KVM device ID
* @group: device control API group for setting addresses
* @attr: device control API address type
- * @dev_fd: device control device file descriptor (or -1 if not supported)
+ * @dev_fd: device control device file descriptor
* @addr_ormask: value to be OR'ed with resolved address
*
- * Remember the memory region @mr, and when it is mapped by the
- * machine model, tell the kernel that base address using the
- * KVM_ARM_SET_DEVICE_ADDRESS ioctl or the newer device control API. @devid
- * should be the ID of the device as defined by KVM_ARM_SET_DEVICE_ADDRESS or
- * the arm-vgic device in the device control API.
- * The machine model may map
- * and unmap the device multiple times; the kernel will only be told the final
- * address at the point where machine init is complete.
+ * Remember the memory region @mr, and when it is mapped by the machine
+ * model, tell the kernel that base address using the device control API.
+ * @devid should be the ID of the device as defined by the arm-vgic device
+ * in the device control API. The machine model may map and unmap the device
+ * multiple times; the kernel will only be told the final address at the
+ * point where machine init is complete.
*/
void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
uint64_t attr, int dev_fd, uint64_t addr_ormask);
@@ -85,8 +84,10 @@ void kvm_arm_cpu_pre_save(ARMCPU *cpu);
* @cpu: ARMCPU
*
* Called from cpu_post_load() to update KVM CPU state from the cpreg list.
+ *
+ * Returns: true on success, or false if write_list_to_kvmstate failed.
*/
-void kvm_arm_cpu_post_load(ARMCPU *cpu);
+bool kvm_arm_cpu_post_load(ARMCPU *cpu);
/**
* kvm_arm_reset_vcpu:
@@ -96,13 +97,9 @@ void kvm_arm_cpu_post_load(ARMCPU *cpu);
*/
void kvm_arm_reset_vcpu(ARMCPU *cpu);
-#ifdef CONFIG_KVM
+struct kvm_vcpu_init;
/**
* kvm_arm_create_scratch_host_vcpu:
- * @cpus_to_try: array of QEMU_KVM_ARM_TARGET_* values (terminated with
- * QEMU_KVM_ARM_TARGET_NONE) to try as fallback if the kernel does not
- * know the PREFERRED_TARGET ioctl. Passing NULL is the same as passing
- * an empty array.
* @fdarray: filled in with kvmfd, vmfd, cpufd file descriptors in that order
* @init: filled in with the necessary values for creating a host
* vcpu. If NULL is provided, will not init the vCPU (though the cpufd
@@ -115,8 +112,7 @@ void kvm_arm_reset_vcpu(ARMCPU *cpu);
* Returns: true on success (and fdarray and init are filled in),
* false on failure (and fdarray and init are not valid).
*/
-bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
- int *fdarray,
+bool kvm_arm_create_scratch_host_vcpu(int *fdarray,
struct kvm_vcpu_init *init);
/**
@@ -189,6 +185,13 @@ bool kvm_arm_pmu_supported(void);
bool kvm_arm_sve_supported(void);
/**
+ * kvm_arm_mte_supported:
+ *
+ * Returns: true if KVM can enable MTE, and false otherwise.
+ */
+bool kvm_arm_mte_supported(void);
+
+/**
* kvm_arm_get_max_vm_ipa_size:
* @ms: Machine state handle
* @fixed_ipa: True when the IPA limit is fixed at 40. This is the case
@@ -214,75 +217,8 @@ void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa);
int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
-#else
-
-/*
- * It's safe to call these functions without KVM support.
- * They should either do nothing or return "not supported".
- */
-static inline bool kvm_arm_aarch32_supported(void)
-{
- return false;
-}
-
-static inline bool kvm_arm_pmu_supported(void)
-{
- return false;
-}
-
-static inline bool kvm_arm_sve_supported(void)
-{
- return false;
-}
-
-/*
- * These functions should never actually be called without KVM support.
- */
-static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_add_vcpu_properties(ARMCPU *cpu)
-{
- g_assert_not_reached();
-}
-
-static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa)
-{
- g_assert_not_reached();
-}
-
-static inline int kvm_arm_vgic_probe(void)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq)
-{
- g_assert_not_reached();
-}
+void kvm_arm_enable_mte(Object *cpuobj, Error **errp);
-static inline void kvm_arm_pmu_init(ARMCPU *cpu)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
-{
- g_assert_not_reached();
-}
-
-static inline uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
-{
- g_assert_not_reached();
-}
-
-#endif
+void arm_cpu_kvm_set_irq(void *arm_cpu, int irq, int level);
#endif
diff --git a/target/arm/machine.c b/target/arm/machine.c
index a3c1e05..e442d48 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -1,12 +1,13 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/error-report.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
#include "kvm_arm.h"
#include "internals.h"
#include "cpu-features.h"
-#include "migration/cpu.h"
+#include "migration/qemu-file-types.h"
+#include "migration/vmstate.h"
#include "target/arm/gtimer.h"
static bool vfp_needed(void *opaque)
@@ -240,7 +241,6 @@ static const VMStateDescription vmstate_iwmmxt = {
}
};
-#ifdef TARGET_AARCH64
/* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
* and ARMPredicateReg is actively empty. This triggers errors
* in the expansion of the VMSTATE macros.
@@ -320,7 +320,6 @@ static const VMStateDescription vmstate_za = {
VMSTATE_END_OF_LIST()
}
};
-#endif /* AARCH64 */
static bool serror_needed(void *opaque)
{
@@ -977,15 +976,9 @@ static int cpu_post_load(void *opaque, int version_id)
}
if (kvm_enabled()) {
- if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
+ if (!kvm_arm_cpu_post_load(cpu)) {
return -1;
}
- /* Note that it's OK for the TCG side not to know about
- * every register in the list; KVM is authoritative if
- * we're using it.
- */
- write_list_to_cpustate(cpu);
- kvm_arm_cpu_post_load(cpu);
} else {
if (!write_list_to_cpustate(cpu)) {
return -1;
@@ -1101,10 +1094,8 @@ const VMStateDescription vmstate_arm_cpu = {
&vmstate_pmsav7,
&vmstate_pmsav8,
&vmstate_m_security,
-#ifdef TARGET_AARCH64
&vmstate_sve,
&vmstate_za,
-#endif
&vmstate_serror,
&vmstate_irq_line_state,
&vmstate_wfxt_timer,
diff --git a/target/arm/meson.build b/target/arm/meson.build
index 2e10464..7aa81e3 100644
--- a/target/arm/meson.build
+++ b/target/arm/meson.build
@@ -1,41 +1,58 @@
arm_ss = ss.source_set()
+arm_common_ss = ss.source_set()
arm_ss.add(files(
- 'cpu.c',
- 'debug_helper.c',
'gdbstub.c',
- 'helper.c',
- 'vfp_helper.c',
))
-arm_ss.add(zlib)
-
-arm_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c'), if_false: files('kvm-stub.c'))
-arm_ss.add(when: 'CONFIG_HVF', if_true: files('hyp_gdbstub.c'))
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
'cpu64.c',
- 'gdbstub64.c',
-))
+ 'gdbstub64.c'))
arm_system_ss = ss.source_set()
+arm_common_system_ss = ss.source_set()
arm_system_ss.add(files(
+ 'arm-qmp-cmds.c',
+))
+arm_system_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c'))
+arm_system_ss.add(when: 'CONFIG_HVF', if_true: files('hyp_gdbstub.c'))
+
+arm_user_ss = ss.source_set()
+arm_user_ss.add(files('cpu.c'))
+arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files(
+ 'cpu32-stubs.c',
+))
+arm_user_ss.add(files(
+ 'debug_helper.c',
+ 'helper.c',
+ 'vfp_fpscr.c',
+))
+
+arm_common_system_ss.add(files('cpu.c'))
+arm_common_system_ss.add(when: 'TARGET_AARCH64', if_false: files(
+ 'cpu32-stubs.c'))
+arm_common_system_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
+arm_common_system_ss.add(when: 'CONFIG_HVF', if_false: files('hvf-stub.c'))
+arm_common_system_ss.add(files(
'arch_dump.c',
'arm-powerctl.c',
- 'arm-qmp-cmds.c',
'cortex-regs.c',
+ 'debug_helper.c',
+ 'helper.c',
'machine.c',
'ptw.c',
+ 'vfp_fpscr.c',
))
-arm_user_ss = ss.source_set()
-
subdir('hvf')
if 'CONFIG_TCG' in config_all_accel
subdir('tcg')
else
- arm_ss.add(files('tcg-stubs.c'))
+ arm_common_system_ss.add(files('tcg-stubs.c'))
endif
target_arch += {'arm': arm_ss}
target_system_arch += {'arm': arm_system_ss}
target_user_arch += {'arm': arm_user_ss}
+target_common_arch += {'arm': arm_common_ss}
+target_common_system_arch += {'arm': arm_common_system_ss}
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index 4476b32..44170d8 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -10,15 +10,14 @@
#include "qemu/log.h"
#include "qemu/range.h"
#include "qemu/main-loop.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
+#include "exec/tlb-flags.h"
+#include "accel/tcg/probe.h"
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
#include "idau.h"
-#ifdef CONFIG_TCG
-# include "tcg/oversized-guest.h"
-#endif
typedef struct S1Translate {
/*
@@ -74,17 +73,21 @@ typedef struct S1Translate {
} S1Translate;
static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
- target_ulong address,
- MMUAccessType access_type,
+ vaddr address,
+ MMUAccessType access_type, MemOp memop,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi);
static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
- target_ulong address,
- MMUAccessType access_type,
+ vaddr address,
+ MMUAccessType access_type, MemOp memop,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi);
+static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
+ int user_rw, int prot_rw, int xn, int pxn,
+ ARMSecuritySpace in_pa, ARMSecuritySpace out_pa);
+
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
static const uint8_t pamax_map[] = {
[0] = 32,
@@ -96,6 +99,21 @@ static const uint8_t pamax_map[] = {
[6] = 52,
};
+uint8_t round_down_to_parange_index(uint8_t bit_size)
+{
+ for (int i = ARRAY_SIZE(pamax_map) - 1; i >= 0; i--) {
+ if (pamax_map[i] <= bit_size) {
+ return i;
+ }
+ }
+ g_assert_not_reached();
+}
+
+uint8_t round_down_to_parange_bit_size(uint8_t bit_size)
+{
+ return pamax_map[round_down_to_parange_index(bit_size)];
+}
+
/*
* The cpu-specific constant value of PAMax; also used by hw/arm/virt.
* Note that machvirt_init calls this on a CPU that is inited but not realized!
@@ -265,6 +283,8 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
case ARMMMUIdx_E20_2_PAN:
case ARMMMUIdx_E2:
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E30_0:
+ case ARMMMUIdx_E30_3_PAN:
break;
case ARMMMUIdx_Phys_S:
@@ -564,7 +584,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
};
GetPhysAddrResult s2 = { };
- if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, &s2, fi)) {
+ if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, 0, &s2, fi)) {
goto fail;
}
@@ -717,7 +737,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
uint64_t new_val, S1Translate *ptw,
ARMMMUFaultInfo *fi)
{
-#if defined(TARGET_AARCH64) && defined(CONFIG_TCG)
+#if defined(CONFIG_ATOMIC64) && defined(CONFIG_TCG)
uint64_t cur_val;
void *host = ptw->out_host;
@@ -819,7 +839,6 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
ptw->out_rw = true;
}
-#ifdef CONFIG_ATOMIC64
if (ptw->out_be) {
old_val = cpu_to_be64(old_val);
new_val = cpu_to_be64(new_val);
@@ -831,36 +850,6 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
cur_val = le64_to_cpu(cur_val);
}
-#else
- /*
- * We can't support the full 64-bit atomic cmpxchg on the host.
- * Because this is only used for FEAT_HAFDBS, which is only for AA64,
- * we know that TCG_OVERSIZED_GUEST is set, which means that we are
- * running in round-robin mode and could only race with dma i/o.
- */
-#if !TCG_OVERSIZED_GUEST
-# error "Unexpected configuration"
-#endif
- bool locked = bql_locked();
- if (!locked) {
- bql_lock();
- }
- if (ptw->out_be) {
- cur_val = ldq_be_p(host);
- if (cur_val == old_val) {
- stq_be_p(host, new_val);
- }
- } else {
- cur_val = ldq_le_p(host);
- if (cur_val == old_val) {
- stq_le_p(host, new_val);
- }
- }
- if (!locked) {
- bql_unlock();
- }
-#endif
-
return cur_val;
#else
/* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */
@@ -1131,7 +1120,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
hwaddr phys_addr;
uint32_t dacr;
bool ns;
- int user_prot;
+ ARMSecuritySpace out_space;
/* Pagetable walk. */
/* Lookup l1 descriptor. */
@@ -1223,16 +1212,19 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
g_assert_not_reached();
}
}
+ out_space = ptw->in_space;
+ if (ns) {
+ /*
+ * The NS bit will (as required by the architecture) have no effect if
+ * the CPU doesn't support TZ or this is a non-secure translation
+ * regime, because the output space will already be non-secure.
+ */
+ out_space = ARMSS_NonSecure;
+ }
if (domain_prot == 3) {
result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
} else {
- if (pxn && !regime_is_user(env, mmu_idx)) {
- xn = 1;
- }
- if (xn && access_type == MMU_INST_FETCH) {
- fi->type = ARMFault_Permission;
- goto do_fault;
- }
+ int user_rw, prot_rw;
if (arm_feature(env, ARM_FEATURE_V6K) &&
(regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
@@ -1242,37 +1234,23 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
fi->type = ARMFault_AccessFlag;
goto do_fault;
}
- result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
- user_prot = simple_ap_to_rw_prot_is_user(ap >> 1, 1);
+ prot_rw = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
+ user_rw = simple_ap_to_rw_prot_is_user(ap >> 1, 1);
} else {
- result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
- user_prot = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1);
- }
- if (result->f.prot && !xn) {
- result->f.prot |= PAGE_EXEC;
+ prot_rw = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
+ user_rw = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1);
}
+
+ result->f.prot = get_S1prot(env, mmu_idx, false, user_rw, prot_rw,
+ xn, pxn, result->f.attrs.space, out_space);
if (!(result->f.prot & (1 << access_type))) {
/* Access permission fault. */
fi->type = ARMFault_Permission;
goto do_fault;
}
- if (regime_is_pan(env, mmu_idx) &&
- !regime_is_user(env, mmu_idx) &&
- user_prot &&
- access_type != MMU_INST_FETCH) {
- /* Privileged Access Never fault */
- fi->type = ARMFault_Permission;
- goto do_fault;
- }
- }
- if (ns) {
- /* The NS bit will (as required by the architecture) have no effect if
- * the CPU doesn't support TZ or this is a non-secure translation
- * regime, because the attribute will already be non-secure.
- */
- result->f.attrs.secure = false;
- result->f.attrs.space = ARMSS_NonSecure;
}
+ result->f.attrs.space = out_space;
+ result->f.attrs.secure = arm_space_is_secure(out_space);
result->f.phys_addr = phys_addr;
return false;
do_fault:
@@ -1340,25 +1318,24 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
* @env: CPUARMState
* @mmu_idx: MMU index indicating required translation regime
* @is_aa64: TRUE if AArch64
- * @ap: The 2-bit simple AP (AP[2:1])
+ * @user_rw: Translated AP for user access
+ * @prot_rw: Translated AP for privileged access
* @xn: XN (execute-never) bit
* @pxn: PXN (privileged execute-never) bit
* @in_pa: The original input pa space
* @out_pa: The output pa space, modified by NSTable, NS, and NSE
*/
static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
- int ap, int xn, int pxn,
+ int user_rw, int prot_rw, int xn, int pxn,
ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
{
ARMCPU *cpu = env_archcpu(env);
bool is_user = regime_is_user(env, mmu_idx);
- int prot_rw, user_rw;
bool have_wxn;
int wxn = 0;
assert(!regime_is_stage2(mmu_idx));
- user_rw = simple_ap_to_rw_prot_is_user(ap, true);
if (is_user) {
prot_rw = user_rw;
} else {
@@ -1376,8 +1353,6 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
regime_is_pan(env, mmu_idx) &&
(regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) {
prot_rw = 0;
- } else {
- prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
}
}
@@ -1669,12 +1644,13 @@ static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw)
* @ptw: Current and next stage parameters for the walk.
* @address: virtual address to get physical address for
* @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
+ * @memop: memory operation feeding this access, or 0 for none
* @result: set on translation success,
* @fi: set to fault info if the translation fails
*/
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
uint64_t address,
- MMUAccessType access_type,
+ MMUAccessType access_type, MemOp memop,
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = env_archcpu(env);
@@ -1684,7 +1660,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
uint64_t ttbr;
hwaddr descaddr, indexmask, indexmask_grainsize;
uint32_t tableattrs;
- target_ulong page_size;
+ uint64_t page_size;
uint64_t attrs;
int32_t stride;
int addrsize, inputsize, outputsize;
@@ -1757,7 +1733,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
* validation to do here.
*/
if (inputsize < addrsize) {
- target_ulong top_bits = sextract64(address, inputsize,
+ uint64_t top_bits = sextract64(address, inputsize,
addrsize - inputsize);
if (-top_bits != param.select) {
/* The gap between the two regions is a Translation fault */
@@ -2013,8 +1989,21 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
xn = extract64(attrs, 53, 2);
result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
}
+
+ result->cacheattrs.is_s2_format = true;
+ result->cacheattrs.attrs = extract32(attrs, 2, 4);
+ /*
+ * Security state does not really affect HCR_EL2.FWB;
+ * we only need to filter FWB for aa32 or other FEAT.
+ */
+ device = S2_attrs_are_device(arm_hcr_el2_eff(env),
+ result->cacheattrs.attrs);
} else {
int nse, ns = extract32(attrs, 5, 1);
+ uint8_t attrindx;
+ uint64_t mair;
+ int user_rw, prot_rw;
+
switch (out_space) {
case ARMSS_Root:
/*
@@ -2080,12 +2069,58 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
xn = 0;
ap &= ~1;
}
+
+ user_rw = simple_ap_to_rw_prot_is_user(ap, true);
+ prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
/*
* Note that we modified ptw->in_space earlier for NSTable, but
* result->f.attrs retains a copy of the original security space.
*/
- result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn,
- result->f.attrs.space, out_space);
+ result->f.prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw,
+ xn, pxn, result->f.attrs.space, out_space);
+
+ /* Index into MAIR registers for cache attributes */
+ attrindx = extract32(attrs, 2, 3);
+ mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
+ assert(attrindx <= 7);
+ result->cacheattrs.is_s2_format = false;
+ result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
+
+ /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
+ if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
+ result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */
+ }
+ device = S1_attrs_are_device(result->cacheattrs.attrs);
+ }
+
+ /*
+ * Enable alignment checks on Device memory.
+ *
+ * Per R_XCHFJ, the correct ordering for alignment, permission,
+ * and stage 2 faults is:
+ * - Alignment fault caused by the memory type
+ * - Permission fault
+ * - A stage 2 fault on the memory access
+ * Perform the alignment check now, so that we recognize it in
+ * the correct order. Set TLB_CHECK_ALIGNED so that any subsequent
+ * softmmu tlb hit will also check the alignment; clear along the
+ * non-device path so that tlb_fill_flags is consistent in the
+ * event of restart_atomic_update.
+ *
+ * In v7, for a CPU without the Virtualization Extensions this
+ * access is UNPREDICTABLE; we choose to make it take the alignment
+ * fault as is required for a v7VE CPU. (QEMU doesn't emulate any
+ * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.)
+ */
+ if (device) {
+ unsigned a_bits = memop_atomicity_bits(memop);
+ if (address & ((1 << a_bits) - 1)) {
+ fi->type = ARMFault_Alignment;
+ goto do_fault;
+ }
+ result->f.tlb_fill_flags = TLB_CHECK_ALIGNED;
+ } else {
+ result->f.tlb_fill_flags = 0;
}
if (!(result->f.prot & (1 << access_type))) {
@@ -2115,51 +2150,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
result->f.attrs.space = out_space;
result->f.attrs.secure = arm_space_is_secure(out_space);
- if (regime_is_stage2(mmu_idx)) {
- result->cacheattrs.is_s2_format = true;
- result->cacheattrs.attrs = extract32(attrs, 2, 4);
- /*
- * Security state does not really affect HCR_EL2.FWB;
- * we only need to filter FWB for aa32 or other FEAT.
- */
- device = S2_attrs_are_device(arm_hcr_el2_eff(env),
- result->cacheattrs.attrs);
- } else {
- /* Index into MAIR registers for cache attributes */
- uint8_t attrindx = extract32(attrs, 2, 3);
- uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
- assert(attrindx <= 7);
- result->cacheattrs.is_s2_format = false;
- result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
-
- /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
- if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
- result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */
- }
- device = S1_attrs_are_device(result->cacheattrs.attrs);
- }
-
- /*
- * Enable alignment checks on Device memory.
- *
- * Per R_XCHFJ, this check is mis-ordered. The correct ordering
- * for alignment, permission, and stage 2 faults should be:
- * - Alignment fault caused by the memory type
- * - Permission fault
- * - A stage 2 fault on the memory access
- * but due to the way the TCG softmmu TLB operates, we will have
- * implicitly done the permission check and the stage2 lookup in
- * finding the TLB entry, so the alignment check cannot be done sooner.
- *
- * In v7, for a CPU without the Virtualization Extensions this
- * access is UNPREDICTABLE; we choose to make it take the alignment
- * fault as is required for a v7VE CPU. (QEMU doesn't emulate any
- * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.)
- */
- if (device) {
- result->f.tlb_fill_flags |= TLB_CHECK_ALIGNED;
- }
-
/*
* For FEAT_LPA2 and effective DS, the SH field in the attributes
* was re-purposed for output address bits. The SH attribute in
@@ -3202,7 +3192,7 @@ static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
*/
static bool get_phys_addr_disabled(CPUARMState *env,
S1Translate *ptw,
- target_ulong address,
+ vaddr address,
MMUAccessType access_type,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
@@ -3285,8 +3275,8 @@ static bool get_phys_addr_disabled(CPUARMState *env,
}
static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
- target_ulong address,
- MMUAccessType access_type,
+ vaddr address,
+ MMUAccessType access_type, MemOp memop,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
@@ -3298,7 +3288,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
ARMSecuritySpace ipa_space;
uint64_t hcr;
- ret = get_phys_addr_nogpc(env, ptw, address, access_type, result, fi);
+ ret = get_phys_addr_nogpc(env, ptw, address, access_type,
+ memop, result, fi);
/* If S1 fails, return early. */
if (ret) {
@@ -3324,7 +3315,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
cacheattrs1 = result->cacheattrs;
memset(result, 0, sizeof(*result));
- ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, result, fi);
+ ret = get_phys_addr_nogpc(env, ptw, ipa, access_type,
+ memop, result, fi);
fi->s2addr = ipa;
/* Combine the S1 and S2 perms. */
@@ -3390,8 +3382,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
}
static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
- target_ulong address,
- MMUAccessType access_type,
+ vaddr address,
+ MMUAccessType access_type, MemOp memop,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
@@ -3454,7 +3446,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
if (arm_feature(env, ARM_FEATURE_EL2) &&
!regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) {
return get_phys_addr_twostage(env, ptw, address, access_type,
- result, fi);
+ memop, result, fi);
}
/* fall through */
@@ -3517,7 +3509,8 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
}
if (regime_using_lpae_format(env, mmu_idx)) {
- return get_phys_addr_lpae(env, ptw, address, access_type, result, fi);
+ return get_phys_addr_lpae(env, ptw, address, access_type,
+ memop, result, fi);
} else if (arm_feature(env, ARM_FEATURE_V7) ||
regime_sctlr(env, mmu_idx) & SCTLR_XP) {
return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
@@ -3527,12 +3520,13 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
}
static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
- target_ulong address,
- MMUAccessType access_type,
+ vaddr address,
+ MMUAccessType access_type, MemOp memop,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
- if (get_phys_addr_nogpc(env, ptw, address, access_type, result, fi)) {
+ if (get_phys_addr_nogpc(env, ptw, address, access_type,
+ memop, result, fi)) {
return true;
}
if (!granule_protection_check(env, result->f.phys_addr,
@@ -3543,8 +3537,8 @@ static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
return false;
}
-bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address,
- MMUAccessType access_type,
+bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
+ MMUAccessType access_type, MemOp memop,
ARMMMUIdx mmu_idx, ARMSecuritySpace space,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
@@ -3553,16 +3547,13 @@ bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address,
.in_mmu_idx = mmu_idx,
.in_space = space,
};
- return get_phys_addr_nogpc(env, &ptw, address, access_type, result, fi);
+ return get_phys_addr_nogpc(env, &ptw, address, access_type,
+ memop, result, fi);
}
-bool get_phys_addr(CPUARMState *env, target_ulong address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
+static ARMSecuritySpace
+arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx)
{
- S1Translate ptw = {
- .in_mmu_idx = mmu_idx,
- };
ARMSecuritySpace ss;
switch (mmu_idx) {
@@ -3604,6 +3595,8 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
ss = ARMSS_Secure;
break;
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E30_0:
+ case ARMMMUIdx_E30_3_PAN:
if (arm_feature(env, ARM_FEATURE_AARCH64) &&
cpu_isar_feature(aa64_rme, env_archcpu(env))) {
ss = ARMSS_Root;
@@ -3621,27 +3614,33 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
g_assert_not_reached();
}
- ptw.in_space = ss;
- return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
+ return ss;
}
-hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
- MemTxAttrs *attrs)
+bool get_phys_addr(CPUARMState *env, vaddr address,
+ MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- ARMMMUIdx mmu_idx = arm_mmu_idx(env);
- ARMSecuritySpace ss = arm_security_space(env);
S1Translate ptw = {
.in_mmu_idx = mmu_idx,
- .in_space = ss,
+ .in_space = arm_mmu_idx_to_security_space(env, mmu_idx),
+ };
+
+ return get_phys_addr_gpc(env, &ptw, address, access_type,
+ memop, result, fi);
+}
+
+static hwaddr arm_cpu_get_phys_page(CPUARMState *env, vaddr addr,
+ MemTxAttrs *attrs, ARMMMUIdx mmu_idx)
+{
+ S1Translate ptw = {
+ .in_mmu_idx = mmu_idx,
+ .in_space = arm_mmu_idx_to_security_space(env, mmu_idx),
.in_debug = true,
};
GetPhysAddrResult res = {};
ARMMMUFaultInfo fi = {};
- bool ret;
-
- ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
+ bool ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi);
*attrs = res.f.attrs;
if (ret) {
@@ -3649,3 +3648,33 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
}
return res.f.phys_addr;
}
+
+hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
+ MemTxAttrs *attrs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+
+ hwaddr res = arm_cpu_get_phys_page(env, addr, attrs, mmu_idx);
+
+ if (res != -1) {
+ return res;
+ }
+
+ /*
+ * Memory may be accessible for an "unprivileged load/store" variant.
+ * In this case, get_a64_user_mem_index function generates an op using an
+ * unprivileged mmu idx, so we need to try with it.
+ */
+ switch (mmu_idx) {
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E10_0);
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E20_0);
+ default:
+ return -1;
+ }
+}
diff --git a/target/arm/tcg-stubs.c b/target/arm/tcg-stubs.c
index 152b172..5e5166c 100644
--- a/target/arm/tcg-stubs.c
+++ b/target/arm/tcg-stubs.c
@@ -21,7 +21,30 @@ void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
{
g_assert_not_reached();
}
-/* Temporarily while cpu_get_tb_cpu_state() is still in common code */
-void assert_hflags_rebuild_correctly(CPUARMState *env)
+
+/* TLBI insns are only used by TCG, so we don't need to do anything for KVM */
+void define_tlb_insn_regs(ARMCPU *cpu)
+{
+}
+
+/* With KVM, we never use float_status, so these can be no-ops */
+void arm_set_default_fp_behaviours(float_status *s)
+{
+}
+
+void arm_set_ah_fp_behaviours(float_status *s)
+{
+}
+
+uint32_t vfp_get_fpsr_from_host(CPUARMState *env)
+{
+ return 0;
+}
+
+void vfp_clear_float_status_exc_flags(CPUARMState *env)
+{
+}
+
+void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask)
{
}
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
index 62df4c4..8c798cd 100644
--- a/target/arm/tcg/a64.decode
+++ b/target/arm/tcg/a64.decode
@@ -21,28 +21,40 @@
%rd 0:5
%esz_sd 22:1 !function=plus_2
+%esz_hs 22:1 !function=plus_1
%esz_hsd 22:2 !function=xor_2
%hl 11:1 21:1
%hlm 11:1 20:2
&r rn
+&rrr rd rn rm
&ri rd imm
+&rr rd rn
+&rr_sf rd rn sf
&rri_sf rd rn imm sf
+&rrr_sf rd rn rm sf
&i imm
&rr_e rd rn esz
+&rri_e rd rn imm esz
&rrr_e rd rn rm esz
&rrx_e rd rn rm idx esz
&rrrr_e rd rn rm ra esz
&qrr_e q rd rn esz
+&qrri_e q rd rn imm esz
&qrrr_e q rd rn rm esz
&qrrx_e q rd rn rm idx esz
&qrrrr_e q rd rn rm ra esz
@rr_h ........ ... ..... ...... rn:5 rd:5 &rr_e esz=1
+@rr_s ........ ... ..... ...... rn:5 rd:5 &rr_e esz=2
@rr_d ........ ... ..... ...... rn:5 rd:5 &rr_e esz=3
+@rr_e ........ esz:2 . ..... ...... rn:5 rd:5 &rr_e
@rr_sd ........ ... ..... ...... rn:5 rd:5 &rr_e esz=%esz_sd
+@rr_hsd ........ ... ..... ...... rn:5 rd:5 &rr_e esz=%esz_hsd
+@rrr_b ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=0
@rrr_h ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=1
+@rrr_s ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=2
@rrr_d ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=3
@rrr_sd ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=%esz_sd
@rrr_hsd ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=%esz_hsd
@@ -54,11 +66,20 @@
@rrx_d ........ .. . rm:5 .... idx:1 . rn:5 rd:5 &rrx_e esz=3
@rr_q1e0 ........ ........ ...... rn:5 rd:5 &qrr_e q=1 esz=0
+@rr_q1e2 ........ ........ ...... rn:5 rd:5 &qrr_e q=1 esz=2
@r2r_q1e0 ........ ........ ...... rm:5 rd:5 &qrrr_e rn=%rd q=1 esz=0
@rrr_q1e0 ........ ... rm:5 ...... rn:5 rd:5 &qrrr_e q=1 esz=0
@rrr_q1e3 ........ ... rm:5 ...... rn:5 rd:5 &qrrr_e q=1 esz=3
@rrrr_q1e3 ........ ... rm:5 . ra:5 rn:5 rd:5 &qrrrr_e q=1 esz=3
+@qrr_b . q:1 ...... .. ...... ...... rn:5 rd:5 &qrr_e esz=0
+@qrr_h . q:1 ...... .. ...... ...... rn:5 rd:5 &qrr_e esz=1
+@qrr_s . q:1 ...... .. ...... ...... rn:5 rd:5 &qrr_e esz=2
+@qrr_bh . q:1 ...... . esz:1 ...... ...... rn:5 rd:5 &qrr_e
+@qrr_hs . q:1 ...... .. ...... ...... rn:5 rd:5 &qrr_e esz=%esz_hs
+@qrr_sd . q:1 ...... .. ...... ...... rn:5 rd:5 &qrr_e esz=%esz_sd
+@qrr_e . q:1 ...... esz:2 ...... ...... rn:5 rd:5 &qrr_e
+
@qrrr_b . q:1 ...... ... rm:5 ...... rn:5 rd:5 &qrrr_e esz=0
@qrrr_h . q:1 ...... ... rm:5 ...... rn:5 rd:5 &qrrr_e esz=1
@qrrr_s . q:1 ...... ... rm:5 ...... rn:5 rd:5 &qrrr_e esz=2
@@ -155,7 +176,7 @@ UBFM . 10 100110 . ...... ...... ..... ..... @bitfield_32
EXTR 1 00 100111 1 0 rm:5 imm:6 rn:5 rd:5 &extract sf=1
EXTR 0 00 100111 0 0 rm:5 0 imm:5 rn:5 rd:5 &extract sf=0
-# Branches
+### Branches
%imm26 0:s26 !function=times_4
@branch . ..... .......................... &i imm=%imm26
@@ -239,6 +260,9 @@ WFIT 1101 0101 0000 0011 0001 0000 001 rd:5
CLREX 1101 0101 0000 0011 0011 ---- 010 11111
DSB_DMB 1101 0101 0000 0011 0011 domain:2 types:2 10- 11111
+# For the DSB nXS variant, types always equals MBReqTypes_All and we ignore the
+# domain bits.
+DSB_nXS 1101 0101 0000 0011 0011 -- 10 001 11111
ISB 1101 0101 0000 0011 0011 ---- 110 11111
SB 1101 0101 0000 0011 0011 0000 111 11111
@@ -285,7 +309,7 @@ HLT 1101 0100 010 ................ 000 00 @i16
# DCPS2 1101 0100 101 ................ 000 10 @i16
# DCPS3 1101 0100 101 ................ 000 11 @i16
-# Loads and stores
+### Loads and stores
&stxr rn rt rt2 rs sz lasr
&stlr rn rt sz lasr
@@ -643,6 +667,138 @@ CPYP 00 011 1 01000 ..... .... 01 ..... ..... @cpy
CPYM 00 011 1 01010 ..... .... 01 ..... ..... @cpy
CPYE 00 011 1 01100 ..... .... 01 ..... ..... @cpy
+### Data Processing (register)
+
+# Data Processing (2-source)
+
+@rrr . .......... rm:5 ...... rn:5 rd:5 &rrr
+@rrr_sf sf:1 .......... rm:5 ...... rn:5 rd:5 &rrr_sf
+
+UDIV . 00 11010110 ..... 00001 0 ..... ..... @rrr_sf
+SDIV . 00 11010110 ..... 00001 1 ..... ..... @rrr_sf
+LSLV . 00 11010110 ..... 00100 0 ..... ..... @rrr_sf
+LSRV . 00 11010110 ..... 00100 1 ..... ..... @rrr_sf
+ASRV . 00 11010110 ..... 00101 0 ..... ..... @rrr_sf
+RORV . 00 11010110 ..... 00101 1 ..... ..... @rrr_sf
+
+CRC32 0 00 11010110 ..... 0100 00 ..... ..... @rrr_b
+CRC32 0 00 11010110 ..... 0100 01 ..... ..... @rrr_h
+CRC32 0 00 11010110 ..... 0100 10 ..... ..... @rrr_s
+CRC32 1 00 11010110 ..... 0100 11 ..... ..... @rrr_d
+
+CRC32C 0 00 11010110 ..... 0101 00 ..... ..... @rrr_b
+CRC32C 0 00 11010110 ..... 0101 01 ..... ..... @rrr_h
+CRC32C 0 00 11010110 ..... 0101 10 ..... ..... @rrr_s
+CRC32C 1 00 11010110 ..... 0101 11 ..... ..... @rrr_d
+
+SUBP 1 00 11010110 ..... 000000 ..... ..... @rrr
+SUBPS 1 01 11010110 ..... 000000 ..... ..... @rrr
+IRG 1 00 11010110 ..... 000100 ..... ..... @rrr
+GMI 1 00 11010110 ..... 000101 ..... ..... @rrr
+
+PACGA 1 00 11010110 ..... 001100 ..... ..... @rrr
+
+# Data Processing (1-source)
+
+@rr . .......... ..... ...... rn:5 rd:5 &rr
+@rr_sf sf:1 .......... ..... ...... rn:5 rd:5 &rr_sf
+
+RBIT . 10 11010110 00000 000000 ..... ..... @rr_sf
+REV16 . 10 11010110 00000 000001 ..... ..... @rr_sf
+REV32 . 10 11010110 00000 000010 ..... ..... @rr_sf
+REV64 1 10 11010110 00000 000011 ..... ..... @rr
+
+CLZ . 10 11010110 00000 000100 ..... ..... @rr_sf
+CLS . 10 11010110 00000 000101 ..... ..... @rr_sf
+
+&pacaut rd rn z
+@pacaut . .. ........ ..... .. z:1 ... rn:5 rd:5 &pacaut
+
+PACIA 1 10 11010110 00001 00.000 ..... ..... @pacaut
+PACIB 1 10 11010110 00001 00.001 ..... ..... @pacaut
+PACDA 1 10 11010110 00001 00.010 ..... ..... @pacaut
+PACDB 1 10 11010110 00001 00.011 ..... ..... @pacaut
+
+AUTIA 1 10 11010110 00001 00.100 ..... ..... @pacaut
+AUTIB 1 10 11010110 00001 00.101 ..... ..... @pacaut
+AUTDA 1 10 11010110 00001 00.110 ..... ..... @pacaut
+AUTDB 1 10 11010110 00001 00.111 ..... ..... @pacaut
+
+XPACI 1 10 11010110 00001 010000 11111 rd:5
+XPACD 1 10 11010110 00001 010001 11111 rd:5
+
+# Logical (shifted reg)
+
+&logic_shift rd rn rm sf sa st n
+@logic_shift sf:1 .. ..... st:2 n:1 rm:5 sa:6 rn:5 rd:5 &logic_shift
+
+AND_r . 00 01010 .. . ..... ...... ..... ..... @logic_shift
+ORR_r . 01 01010 .. . ..... ...... ..... ..... @logic_shift
+EOR_r . 10 01010 .. . ..... ...... ..... ..... @logic_shift
+ANDS_r . 11 01010 .. . ..... ...... ..... ..... @logic_shift
+
+# Add/subtract (shifted reg)
+
+&addsub_shift rd rn rm sf sa st
+@addsub_shift sf:1 .. ..... st:2 . rm:5 sa:6 rn:5 rd:5 &addsub_shift
+
+ADD_r . 00 01011 .. 0 ..... ...... ..... ..... @addsub_shift
+SUB_r . 10 01011 .. 0 ..... ...... ..... ..... @addsub_shift
+ADDS_r . 01 01011 .. 0 ..... ...... ..... ..... @addsub_shift
+SUBS_r . 11 01011 .. 0 ..... ...... ..... ..... @addsub_shift
+
+# Add/subtract (extended reg)
+
+&addsub_ext rd rn rm sf sa st
+@addsub_ext sf:1 .. ........ rm:5 st:3 sa:3 rn:5 rd:5 &addsub_ext
+
+ADD_ext . 00 01011001 ..... ... ... ..... ..... @addsub_ext
+SUB_ext . 10 01011001 ..... ... ... ..... ..... @addsub_ext
+ADDS_ext . 01 01011001 ..... ... ... ..... ..... @addsub_ext
+SUBS_ext . 11 01011001 ..... ... ... ..... ..... @addsub_ext
+
+# Add/subtract (carry)
+
+ADC . 00 11010000 ..... 000000 ..... ..... @rrr_sf
+ADCS . 01 11010000 ..... 000000 ..... ..... @rrr_sf
+SBC . 10 11010000 ..... 000000 ..... ..... @rrr_sf
+SBCS . 11 11010000 ..... 000000 ..... ..... @rrr_sf
+
+# Rotate right into flags
+
+RMIF 1 01 11010000 imm:6 00001 rn:5 0 mask:4
+
+# Evaluate into flags
+
+SETF8 0 01 11010000 00000 000010 rn:5 01101
+SETF16 0 01 11010000 00000 010010 rn:5 01101
+
+# Conditional compare
+
+CCMP sf:1 op:1 1 11010010 y:5 cond:4 imm:1 0 rn:5 0 nzcv:4
+
+# Conditional select
+
+CSEL sf:1 else_inv:1 011010100 rm:5 cond:4 0 else_inc:1 rn:5 rd:5
+
+# Data Processing (3-source)
+
+&rrrr rd rn rm ra
+@rrrr . .. ........ rm:5 . ra:5 rn:5 rd:5 &rrrr
+
+MADD_w 0 00 11011000 ..... 0 ..... ..... ..... @rrrr
+MSUB_w 0 00 11011000 ..... 1 ..... ..... ..... @rrrr
+MADD_x 1 00 11011000 ..... 0 ..... ..... ..... @rrrr
+MSUB_x 1 00 11011000 ..... 1 ..... ..... ..... @rrrr
+
+SMADDL 1 00 11011001 ..... 0 ..... ..... ..... @rrrr
+SMSUBL 1 00 11011001 ..... 1 ..... ..... ..... @rrrr
+UMADDL 1 00 11011101 ..... 0 ..... ..... ..... @rrrr
+UMSUBL 1 00 11011101 ..... 1 ..... ..... ..... @rrrr
+
+SMULH 1 00 11011010 ..... 0 11111 ..... ..... @rrr
+UMULH 1 00 11011110 ..... 0 11111 ..... ..... @rrr
+
### Cryptographic AES
AESE 01001110 00 10100 00100 10 ..... ..... @r2r_q1e0
@@ -1136,3 +1292,605 @@ FMADD 0001 1111 .. 0 ..... 0 ..... ..... ..... @rrrr_hsd
FMSUB 0001 1111 .. 0 ..... 1 ..... ..... ..... @rrrr_hsd
FNMADD 0001 1111 .. 1 ..... 0 ..... ..... ..... @rrrr_hsd
FNMSUB 0001 1111 .. 1 ..... 1 ..... ..... ..... @rrrr_hsd
+
+# Advanced SIMD Extract
+
+EXT_d 0010 1110 00 0 rm:5 00 imm:3 0 rn:5 rd:5
+EXT_q 0110 1110 00 0 rm:5 0 imm:4 0 rn:5 rd:5
+
+# Advanced SIMD Table Lookup
+
+TBL_TBX 0 q:1 00 1110 000 rm:5 0 len:2 tbx:1 00 rn:5 rd:5
+
+# Advanced SIMD Permute
+
+UZP1 0.00 1110 .. 0 ..... 0 001 10 ..... ..... @qrrr_e
+UZP2 0.00 1110 .. 0 ..... 0 101 10 ..... ..... @qrrr_e
+TRN1 0.00 1110 .. 0 ..... 0 010 10 ..... ..... @qrrr_e
+TRN2 0.00 1110 .. 0 ..... 0 110 10 ..... ..... @qrrr_e
+ZIP1 0.00 1110 .. 0 ..... 0 011 10 ..... ..... @qrrr_e
+ZIP2 0.00 1110 .. 0 ..... 0 111 10 ..... ..... @qrrr_e
+
+# Advanced SIMD Across Lanes
+
+ADDV 0.00 1110 .. 11000 11011 10 ..... ..... @qrr_e
+SADDLV 0.00 1110 .. 11000 00011 10 ..... ..... @qrr_e
+UADDLV 0.10 1110 .. 11000 00011 10 ..... ..... @qrr_e
+SMAXV 0.00 1110 .. 11000 01010 10 ..... ..... @qrr_e
+UMAXV 0.10 1110 .. 11000 01010 10 ..... ..... @qrr_e
+SMINV 0.00 1110 .. 11000 11010 10 ..... ..... @qrr_e
+UMINV 0.10 1110 .. 11000 11010 10 ..... ..... @qrr_e
+
+FMAXNMV_h 0.00 1110 00 11000 01100 10 ..... ..... @qrr_h
+FMAXNMV_s 0110 1110 00 11000 01100 10 ..... ..... @rr_q1e2
+
+FMINNMV_h 0.00 1110 10 11000 01100 10 ..... ..... @qrr_h
+FMINNMV_s 0110 1110 10 11000 01100 10 ..... ..... @rr_q1e2
+
+FMAXV_h 0.00 1110 00 11000 01111 10 ..... ..... @qrr_h
+FMAXV_s 0110 1110 00 11000 01111 10 ..... ..... @rr_q1e2
+
+FMINV_h 0.00 1110 10 11000 01111 10 ..... ..... @qrr_h
+FMINV_s 0110 1110 10 11000 01111 10 ..... ..... @rr_q1e2
+
+# Conversion between floating-point and fixed-point (general register)
+
+&fcvt rd rn esz sf shift
+%fcvt_shift32 10:5 !function=rsub_32
+%fcvt_shift64 10:6 !function=rsub_64
+
+@fcvt32 0 ....... .. ...... 1..... rn:5 rd:5 \
+ &fcvt sf=0 esz=%esz_hsd shift=%fcvt_shift32
+@fcvt64 1 ....... .. ...... ...... rn:5 rd:5 \
+ &fcvt sf=1 esz=%esz_hsd shift=%fcvt_shift64
+
+SCVTF_g . 0011110 .. 000010 ...... ..... ..... @fcvt32
+SCVTF_g . 0011110 .. 000010 ...... ..... ..... @fcvt64
+UCVTF_g . 0011110 .. 000011 ...... ..... ..... @fcvt32
+UCVTF_g . 0011110 .. 000011 ...... ..... ..... @fcvt64
+
+FCVTZS_g . 0011110 .. 011000 ...... ..... ..... @fcvt32
+FCVTZS_g . 0011110 .. 011000 ...... ..... ..... @fcvt64
+FCVTZU_g . 0011110 .. 011001 ...... ..... ..... @fcvt32
+FCVTZU_g . 0011110 .. 011001 ...... ..... ..... @fcvt64
+
+# Conversion between floating-point and integer (general register)
+
+@icvt sf:1 ....... .. ...... ...... rn:5 rd:5 \
+ &fcvt esz=%esz_hsd shift=0
+
+SCVTF_g . 0011110 .. 100010 000000 ..... ..... @icvt
+UCVTF_g . 0011110 .. 100011 000000 ..... ..... @icvt
+
+FCVTNS_g . 0011110 .. 100000 000000 ..... ..... @icvt
+FCVTNU_g . 0011110 .. 100001 000000 ..... ..... @icvt
+FCVTPS_g . 0011110 .. 101000 000000 ..... ..... @icvt
+FCVTPU_g . 0011110 .. 101001 000000 ..... ..... @icvt
+FCVTMS_g . 0011110 .. 110000 000000 ..... ..... @icvt
+FCVTMU_g . 0011110 .. 110001 000000 ..... ..... @icvt
+FCVTZS_g . 0011110 .. 111000 000000 ..... ..... @icvt
+FCVTZU_g . 0011110 .. 111001 000000 ..... ..... @icvt
+FCVTAS_g . 0011110 .. 100100 000000 ..... ..... @icvt
+FCVTAU_g . 0011110 .. 100101 000000 ..... ..... @icvt
+
+FJCVTZS 0 0011110 01 111110 000000 ..... ..... @rr
+
+FMOV_ws 0 0011110 00 100110 000000 ..... ..... @rr
+FMOV_sw 0 0011110 00 100111 000000 ..... ..... @rr
+
+FMOV_xd 1 0011110 01 100110 000000 ..... ..... @rr
+FMOV_dx 1 0011110 01 100111 000000 ..... ..... @rr
+
+# Move to/from upper half of 128-bit
+FMOV_xu 1 0011110 10 101110 000000 ..... ..... @rr
+FMOV_ux 1 0011110 10 101111 000000 ..... ..... @rr
+
+# Half-precision allows both sf=0 and sf=1 with identical results
+FMOV_xh - 0011110 11 100110 000000 ..... ..... @rr
+FMOV_hx - 0011110 11 100111 000000 ..... ..... @rr
+
+# Floating-point data processing (1 source)
+
+FMOV_s 00011110 .. 1 000000 10000 ..... ..... @rr_hsd
+FABS_s 00011110 .. 1 000001 10000 ..... ..... @rr_hsd
+FNEG_s 00011110 .. 1 000010 10000 ..... ..... @rr_hsd
+FSQRT_s 00011110 .. 1 000011 10000 ..... ..... @rr_hsd
+
+FRINTN_s 00011110 .. 1 001000 10000 ..... ..... @rr_hsd
+FRINTP_s 00011110 .. 1 001001 10000 ..... ..... @rr_hsd
+FRINTM_s 00011110 .. 1 001010 10000 ..... ..... @rr_hsd
+FRINTZ_s 00011110 .. 1 001011 10000 ..... ..... @rr_hsd
+FRINTA_s 00011110 .. 1 001100 10000 ..... ..... @rr_hsd
+FRINTX_s 00011110 .. 1 001110 10000 ..... ..... @rr_hsd
+FRINTI_s 00011110 .. 1 001111 10000 ..... ..... @rr_hsd
+
+BFCVT_s 00011110 01 1 000110 10000 ..... ..... @rr_s
+
+FRINT32Z_s 00011110 0. 1 010000 10000 ..... ..... @rr_sd
+FRINT32X_s 00011110 0. 1 010001 10000 ..... ..... @rr_sd
+FRINT64Z_s 00011110 0. 1 010010 10000 ..... ..... @rr_sd
+FRINT64X_s 00011110 0. 1 010011 10000 ..... ..... @rr_sd
+
+FCVT_s_ds 00011110 00 1 000101 10000 ..... ..... @rr
+FCVT_s_hs 00011110 00 1 000111 10000 ..... ..... @rr
+FCVT_s_sd 00011110 01 1 000100 10000 ..... ..... @rr
+FCVT_s_hd 00011110 01 1 000111 10000 ..... ..... @rr
+FCVT_s_sh 00011110 11 1 000100 10000 ..... ..... @rr
+FCVT_s_dh 00011110 11 1 000101 10000 ..... ..... @rr
+
+# Floating-point Immediate
+
+FMOVI_s 0001 1110 .. 1 imm:8 100 00000 rd:5 esz=%esz_hsd
+
+# Floating-point Compare
+
+FCMP 00011110 .. 1 rm:5 001000 rn:5 e:1 z:1 000 esz=%esz_hsd
+
+# Floating-point Conditional Compare
+
+FCCMP 00011110 .. 1 rm:5 cond:4 01 rn:5 e:1 nzcv:4 esz=%esz_hsd
+
+# Advanced SIMD Modified Immediate / Shift by Immediate
+
+%abcdefgh 16:3 5:5
+
+# Right shifts are encoded as N - shift, where N is the element size in bits.
+%neon_rshift_i6 16:6 !function=rsub_64
+%neon_rshift_i5 16:5 !function=rsub_32
+%neon_rshift_i4 16:4 !function=rsub_16
+%neon_rshift_i3 16:3 !function=rsub_8
+
+@q_shri_b . q:1 .. ..... 0001 ... ..... . rn:5 rd:5 \
+ &qrri_e esz=0 imm=%neon_rshift_i3
+@q_shri_h . q:1 .. ..... 001 .... ..... . rn:5 rd:5 \
+ &qrri_e esz=1 imm=%neon_rshift_i4
+@q_shri_s . q:1 .. ..... 01 ..... ..... . rn:5 rd:5 \
+ &qrri_e esz=2 imm=%neon_rshift_i5
+@q_shri_d . 1 .. ..... 1 ...... ..... . rn:5 rd:5 \
+ &qrri_e esz=3 imm=%neon_rshift_i6 q=1
+
+@q_shli_b . q:1 .. ..... 0001 imm:3 ..... . rn:5 rd:5 &qrri_e esz=0
+@q_shli_h . q:1 .. ..... 001 imm:4 ..... . rn:5 rd:5 &qrri_e esz=1
+@q_shli_s . q:1 .. ..... 01 imm:5 ..... . rn:5 rd:5 &qrri_e esz=2
+@q_shli_d . 1 .. ..... 1 imm:6 ..... . rn:5 rd:5 &qrri_e esz=3 q=1
+
+FMOVI_v_h 0 q:1 00 1111 00000 ... 1111 11 ..... rd:5 %abcdefgh
+
+# MOVI, MVNI, ORR, BIC, FMOV are all intermixed via cmode.
+Vimm 0 q:1 op:1 0 1111 00000 ... cmode:4 01 ..... rd:5 %abcdefgh
+
+SSHR_v 0.00 11110 .... ... 00000 1 ..... ..... @q_shri_b
+SSHR_v 0.00 11110 .... ... 00000 1 ..... ..... @q_shri_h
+SSHR_v 0.00 11110 .... ... 00000 1 ..... ..... @q_shri_s
+SSHR_v 0.00 11110 .... ... 00000 1 ..... ..... @q_shri_d
+
+USHR_v 0.10 11110 .... ... 00000 1 ..... ..... @q_shri_b
+USHR_v 0.10 11110 .... ... 00000 1 ..... ..... @q_shri_h
+USHR_v 0.10 11110 .... ... 00000 1 ..... ..... @q_shri_s
+USHR_v 0.10 11110 .... ... 00000 1 ..... ..... @q_shri_d
+
+SSRA_v 0.00 11110 .... ... 00010 1 ..... ..... @q_shri_b
+SSRA_v 0.00 11110 .... ... 00010 1 ..... ..... @q_shri_h
+SSRA_v 0.00 11110 .... ... 00010 1 ..... ..... @q_shri_s
+SSRA_v 0.00 11110 .... ... 00010 1 ..... ..... @q_shri_d
+
+USRA_v 0.10 11110 .... ... 00010 1 ..... ..... @q_shri_b
+USRA_v 0.10 11110 .... ... 00010 1 ..... ..... @q_shri_h
+USRA_v 0.10 11110 .... ... 00010 1 ..... ..... @q_shri_s
+USRA_v 0.10 11110 .... ... 00010 1 ..... ..... @q_shri_d
+
+SRSHR_v 0.00 11110 .... ... 00100 1 ..... ..... @q_shri_b
+SRSHR_v 0.00 11110 .... ... 00100 1 ..... ..... @q_shri_h
+SRSHR_v 0.00 11110 .... ... 00100 1 ..... ..... @q_shri_s
+SRSHR_v 0.00 11110 .... ... 00100 1 ..... ..... @q_shri_d
+
+URSHR_v 0.10 11110 .... ... 00100 1 ..... ..... @q_shri_b
+URSHR_v 0.10 11110 .... ... 00100 1 ..... ..... @q_shri_h
+URSHR_v 0.10 11110 .... ... 00100 1 ..... ..... @q_shri_s
+URSHR_v 0.10 11110 .... ... 00100 1 ..... ..... @q_shri_d
+
+SRSRA_v 0.00 11110 .... ... 00110 1 ..... ..... @q_shri_b
+SRSRA_v 0.00 11110 .... ... 00110 1 ..... ..... @q_shri_h
+SRSRA_v 0.00 11110 .... ... 00110 1 ..... ..... @q_shri_s
+SRSRA_v 0.00 11110 .... ... 00110 1 ..... ..... @q_shri_d
+
+URSRA_v 0.10 11110 .... ... 00110 1 ..... ..... @q_shri_b
+URSRA_v 0.10 11110 .... ... 00110 1 ..... ..... @q_shri_h
+URSRA_v 0.10 11110 .... ... 00110 1 ..... ..... @q_shri_s
+URSRA_v 0.10 11110 .... ... 00110 1 ..... ..... @q_shri_d
+
+SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_b
+SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_h
+SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_s
+SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_d
+
+SHL_v 0.00 11110 .... ... 01010 1 ..... ..... @q_shli_b
+SHL_v 0.00 11110 .... ... 01010 1 ..... ..... @q_shli_h
+SHL_v 0.00 11110 .... ... 01010 1 ..... ..... @q_shli_s
+SHL_v 0.00 11110 .... ... 01010 1 ..... ..... @q_shli_d
+
+SLI_v 0.10 11110 .... ... 01010 1 ..... ..... @q_shli_b
+SLI_v 0.10 11110 .... ... 01010 1 ..... ..... @q_shli_h
+SLI_v 0.10 11110 .... ... 01010 1 ..... ..... @q_shli_s
+SLI_v 0.10 11110 .... ... 01010 1 ..... ..... @q_shli_d
+
+SSHLL_v 0.00 11110 .... ... 10100 1 ..... ..... @q_shli_b
+SSHLL_v 0.00 11110 .... ... 10100 1 ..... ..... @q_shli_h
+SSHLL_v 0.00 11110 .... ... 10100 1 ..... ..... @q_shli_s
+
+USHLL_v 0.10 11110 .... ... 10100 1 ..... ..... @q_shli_b
+USHLL_v 0.10 11110 .... ... 10100 1 ..... ..... @q_shli_h
+USHLL_v 0.10 11110 .... ... 10100 1 ..... ..... @q_shli_s
+
+SHRN_v 0.00 11110 .... ... 10000 1 ..... ..... @q_shri_b
+SHRN_v 0.00 11110 .... ... 10000 1 ..... ..... @q_shri_h
+SHRN_v 0.00 11110 .... ... 10000 1 ..... ..... @q_shri_s
+
+RSHRN_v 0.00 11110 .... ... 10001 1 ..... ..... @q_shri_b
+RSHRN_v 0.00 11110 .... ... 10001 1 ..... ..... @q_shri_h
+RSHRN_v 0.00 11110 .... ... 10001 1 ..... ..... @q_shri_s
+
+SQSHL_vi 0.00 11110 .... ... 01110 1 ..... ..... @q_shli_b
+SQSHL_vi 0.00 11110 .... ... 01110 1 ..... ..... @q_shli_h
+SQSHL_vi 0.00 11110 .... ... 01110 1 ..... ..... @q_shli_s
+SQSHL_vi 0.00 11110 .... ... 01110 1 ..... ..... @q_shli_d
+
+UQSHL_vi 0.10 11110 .... ... 01110 1 ..... ..... @q_shli_b
+UQSHL_vi 0.10 11110 .... ... 01110 1 ..... ..... @q_shli_h
+UQSHL_vi 0.10 11110 .... ... 01110 1 ..... ..... @q_shli_s
+UQSHL_vi 0.10 11110 .... ... 01110 1 ..... ..... @q_shli_d
+
+SQSHLU_vi 0.10 11110 .... ... 01100 1 ..... ..... @q_shli_b
+SQSHLU_vi 0.10 11110 .... ... 01100 1 ..... ..... @q_shli_h
+SQSHLU_vi 0.10 11110 .... ... 01100 1 ..... ..... @q_shli_s
+SQSHLU_vi 0.10 11110 .... ... 01100 1 ..... ..... @q_shli_d
+
+SQSHRN_v 0.00 11110 .... ... 10010 1 ..... ..... @q_shri_b
+SQSHRN_v 0.00 11110 .... ... 10010 1 ..... ..... @q_shri_h
+SQSHRN_v 0.00 11110 .... ... 10010 1 ..... ..... @q_shri_s
+
+UQSHRN_v 0.10 11110 .... ... 10010 1 ..... ..... @q_shri_b
+UQSHRN_v 0.10 11110 .... ... 10010 1 ..... ..... @q_shri_h
+UQSHRN_v 0.10 11110 .... ... 10010 1 ..... ..... @q_shri_s
+
+SQSHRUN_v 0.10 11110 .... ... 10000 1 ..... ..... @q_shri_b
+SQSHRUN_v 0.10 11110 .... ... 10000 1 ..... ..... @q_shri_h
+SQSHRUN_v 0.10 11110 .... ... 10000 1 ..... ..... @q_shri_s
+
+SQRSHRN_v 0.00 11110 .... ... 10011 1 ..... ..... @q_shri_b
+SQRSHRN_v 0.00 11110 .... ... 10011 1 ..... ..... @q_shri_h
+SQRSHRN_v 0.00 11110 .... ... 10011 1 ..... ..... @q_shri_s
+
+UQRSHRN_v 0.10 11110 .... ... 10011 1 ..... ..... @q_shri_b
+UQRSHRN_v 0.10 11110 .... ... 10011 1 ..... ..... @q_shri_h
+UQRSHRN_v 0.10 11110 .... ... 10011 1 ..... ..... @q_shri_s
+
+SQRSHRUN_v 0.10 11110 .... ... 10001 1 ..... ..... @q_shri_b
+SQRSHRUN_v 0.10 11110 .... ... 10001 1 ..... ..... @q_shri_h
+SQRSHRUN_v 0.10 11110 .... ... 10001 1 ..... ..... @q_shri_s
+
+# Advanced SIMD scalar shift by immediate
+
+@shri_b .... ..... 0001 ... ..... . rn:5 rd:5 \
+ &rri_e esz=0 imm=%neon_rshift_i3
+@shri_h .... ..... 001 .... ..... . rn:5 rd:5 \
+ &rri_e esz=1 imm=%neon_rshift_i4
+@shri_s .... ..... 01 ..... ..... . rn:5 rd:5 \
+ &rri_e esz=2 imm=%neon_rshift_i5
+@shri_d .... ..... 1 ...... ..... . rn:5 rd:5 \
+ &rri_e esz=3 imm=%neon_rshift_i6
+
+@shli_b .... ..... 0001 imm:3 ..... . rn:5 rd:5 &rri_e esz=0
+@shli_h .... ..... 001 imm:4 ..... . rn:5 rd:5 &rri_e esz=1
+@shli_s .... ..... 01 imm:5 ..... . rn:5 rd:5 &rri_e esz=2
+@shli_d .... ..... 1 imm:6 ..... . rn:5 rd:5 &rri_e esz=3
+
+SSHR_s 0101 11110 .... ... 00000 1 ..... ..... @shri_d
+USHR_s 0111 11110 .... ... 00000 1 ..... ..... @shri_d
+SSRA_s 0101 11110 .... ... 00010 1 ..... ..... @shri_d
+USRA_s 0111 11110 .... ... 00010 1 ..... ..... @shri_d
+SRSHR_s 0101 11110 .... ... 00100 1 ..... ..... @shri_d
+URSHR_s 0111 11110 .... ... 00100 1 ..... ..... @shri_d
+SRSRA_s 0101 11110 .... ... 00110 1 ..... ..... @shri_d
+URSRA_s 0111 11110 .... ... 00110 1 ..... ..... @shri_d
+SRI_s 0111 11110 .... ... 01000 1 ..... ..... @shri_d
+
+SHL_s 0101 11110 .... ... 01010 1 ..... ..... @shli_d
+SLI_s 0111 11110 .... ... 01010 1 ..... ..... @shli_d
+
+SQSHL_si 0101 11110 .... ... 01110 1 ..... ..... @shli_b
+SQSHL_si 0101 11110 .... ... 01110 1 ..... ..... @shli_h
+SQSHL_si 0101 11110 .... ... 01110 1 ..... ..... @shli_s
+SQSHL_si 0101 11110 .... ... 01110 1 ..... ..... @shli_d
+
+UQSHL_si 0111 11110 .... ... 01110 1 ..... ..... @shli_b
+UQSHL_si 0111 11110 .... ... 01110 1 ..... ..... @shli_h
+UQSHL_si 0111 11110 .... ... 01110 1 ..... ..... @shli_s
+UQSHL_si 0111 11110 .... ... 01110 1 ..... ..... @shli_d
+
+SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_b
+SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_h
+SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_s
+SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_d
+
+SQSHRN_si 0101 11110 .... ... 10010 1 ..... ..... @shri_b
+SQSHRN_si 0101 11110 .... ... 10010 1 ..... ..... @shri_h
+SQSHRN_si 0101 11110 .... ... 10010 1 ..... ..... @shri_s
+
+UQSHRN_si 0111 11110 .... ... 10010 1 ..... ..... @shri_b
+UQSHRN_si 0111 11110 .... ... 10010 1 ..... ..... @shri_h
+UQSHRN_si 0111 11110 .... ... 10010 1 ..... ..... @shri_s
+
+SQSHRUN_si 0111 11110 .... ... 10000 1 ..... ..... @shri_b
+SQSHRUN_si 0111 11110 .... ... 10000 1 ..... ..... @shri_h
+SQSHRUN_si 0111 11110 .... ... 10000 1 ..... ..... @shri_s
+
+SQRSHRN_si 0101 11110 .... ... 10011 1 ..... ..... @shri_b
+SQRSHRN_si 0101 11110 .... ... 10011 1 ..... ..... @shri_h
+SQRSHRN_si 0101 11110 .... ... 10011 1 ..... ..... @shri_s
+
+UQRSHRN_si 0111 11110 .... ... 10011 1 ..... ..... @shri_b
+UQRSHRN_si 0111 11110 .... ... 10011 1 ..... ..... @shri_h
+UQRSHRN_si 0111 11110 .... ... 10011 1 ..... ..... @shri_s
+
+SQRSHRUN_si 0111 11110 .... ... 10001 1 ..... ..... @shri_b
+SQRSHRUN_si 0111 11110 .... ... 10001 1 ..... ..... @shri_h
+SQRSHRUN_si 0111 11110 .... ... 10001 1 ..... ..... @shri_s
+
+# Advanced SIMD scalar two-register miscellaneous
+
+SQABS_s 0101 1110 ..1 00000 01111 0 ..... ..... @rr_e
+SQNEG_s 0111 1110 ..1 00000 01111 0 ..... ..... @rr_e
+ABS_s 0101 1110 111 00000 10111 0 ..... ..... @rr
+NEG_s 0111 1110 111 00000 10111 0 ..... ..... @rr
+CMGT0_s 0101 1110 111 00000 10001 0 ..... ..... @rr
+CMGE0_s 0111 1110 111 00000 10001 0 ..... ..... @rr
+CMEQ0_s 0101 1110 111 00000 10011 0 ..... ..... @rr
+CMLE0_s 0111 1110 111 00000 10011 0 ..... ..... @rr
+CMLT0_s 0101 1110 111 00000 10101 0 ..... ..... @rr
+
+SQXTUN_s 0111 1110 ..1 00001 00101 0 ..... ..... @rr_e
+SQXTN_s 0101 1110 ..1 00001 01001 0 ..... ..... @rr_e
+UQXTN_s 0111 1110 ..1 00001 01001 0 ..... ..... @rr_e
+
+FCVTXN_s 0111 1110 011 00001 01101 0 ..... ..... @rr_s
+
+FCMGT0_s 0101 1110 111 11000 11001 0 ..... ..... @rr_h
+FCMGT0_s 0101 1110 1.1 00000 11001 0 ..... ..... @rr_sd
+
+FCMGE0_s 0111 1110 111 11000 11001 0 ..... ..... @rr_h
+FCMGE0_s 0111 1110 1.1 00000 11001 0 ..... ..... @rr_sd
+
+FCMEQ0_s 0101 1110 111 11000 11011 0 ..... ..... @rr_h
+FCMEQ0_s 0101 1110 1.1 00000 11011 0 ..... ..... @rr_sd
+
+FCMLE0_s 0111 1110 111 11000 11011 0 ..... ..... @rr_h
+FCMLE0_s 0111 1110 1.1 00000 11011 0 ..... ..... @rr_sd
+
+FCMLT0_s 0101 1110 111 11000 11101 0 ..... ..... @rr_h
+FCMLT0_s 0101 1110 1.1 00000 11101 0 ..... ..... @rr_sd
+
+FRECPE_s 0101 1110 111 11001 11011 0 ..... ..... @rr_h
+FRECPE_s 0101 1110 1.1 00001 11011 0 ..... ..... @rr_sd
+
+FRECPX_s 0101 1110 111 11001 11111 0 ..... ..... @rr_h
+FRECPX_s 0101 1110 1.1 00001 11111 0 ..... ..... @rr_sd
+
+FRSQRTE_s 0111 1110 111 11001 11011 0 ..... ..... @rr_h
+FRSQRTE_s 0111 1110 1.1 00001 11011 0 ..... ..... @rr_sd
+
+@icvt_h . ....... .. ...... ...... rn:5 rd:5 \
+ &fcvt sf=0 esz=1 shift=0
+@icvt_sd . ....... .. ...... ...... rn:5 rd:5 \
+ &fcvt sf=0 esz=%esz_sd shift=0
+
+SCVTF_f 0101 1110 011 11001 11011 0 ..... ..... @icvt_h
+SCVTF_f 0101 1110 0.1 00001 11011 0 ..... ..... @icvt_sd
+
+UCVTF_f 0111 1110 011 11001 11011 0 ..... ..... @icvt_h
+UCVTF_f 0111 1110 0.1 00001 11011 0 ..... ..... @icvt_sd
+
+FCVTNS_f 0101 1110 011 11001 10101 0 ..... ..... @icvt_h
+FCVTNS_f 0101 1110 0.1 00001 10101 0 ..... ..... @icvt_sd
+FCVTNU_f 0111 1110 011 11001 10101 0 ..... ..... @icvt_h
+FCVTNU_f 0111 1110 0.1 00001 10101 0 ..... ..... @icvt_sd
+
+FCVTPS_f 0101 1110 111 11001 10101 0 ..... ..... @icvt_h
+FCVTPS_f 0101 1110 1.1 00001 10101 0 ..... ..... @icvt_sd
+FCVTPU_f 0111 1110 111 11001 10101 0 ..... ..... @icvt_h
+FCVTPU_f 0111 1110 1.1 00001 10101 0 ..... ..... @icvt_sd
+
+FCVTMS_f 0101 1110 011 11001 10111 0 ..... ..... @icvt_h
+FCVTMS_f 0101 1110 0.1 00001 10111 0 ..... ..... @icvt_sd
+FCVTMU_f 0111 1110 011 11001 10111 0 ..... ..... @icvt_h
+FCVTMU_f 0111 1110 0.1 00001 10111 0 ..... ..... @icvt_sd
+
+FCVTZS_f 0101 1110 111 11001 10111 0 ..... ..... @icvt_h
+FCVTZS_f 0101 1110 1.1 00001 10111 0 ..... ..... @icvt_sd
+FCVTZU_f 0111 1110 111 11001 10111 0 ..... ..... @icvt_h
+FCVTZU_f 0111 1110 1.1 00001 10111 0 ..... ..... @icvt_sd
+
+FCVTAS_f 0101 1110 011 11001 11001 0 ..... ..... @icvt_h
+FCVTAS_f 0101 1110 0.1 00001 11001 0 ..... ..... @icvt_sd
+FCVTAU_f 0111 1110 011 11001 11001 0 ..... ..... @icvt_h
+FCVTAU_f 0111 1110 0.1 00001 11001 0 ..... ..... @icvt_sd
+
+%fcvt_f_sh_h 16:4 !function=rsub_16
+%fcvt_f_sh_s 16:5 !function=rsub_32
+%fcvt_f_sh_d 16:6 !function=rsub_64
+
+@fcvt_fixed_h .... .... . 001 .... ...... rn:5 rd:5 \
+ &fcvt sf=0 esz=1 shift=%fcvt_f_sh_h
+@fcvt_fixed_s .... .... . 01 ..... ...... rn:5 rd:5 \
+ &fcvt sf=0 esz=2 shift=%fcvt_f_sh_s
+@fcvt_fixed_d .... .... . 1 ...... ...... rn:5 rd:5 \
+ &fcvt sf=0 esz=3 shift=%fcvt_f_sh_d
+
+SCVTF_f 0101 1111 0 ....... 111001 ..... ..... @fcvt_fixed_h
+SCVTF_f 0101 1111 0 ....... 111001 ..... ..... @fcvt_fixed_s
+SCVTF_f 0101 1111 0 ....... 111001 ..... ..... @fcvt_fixed_d
+
+UCVTF_f 0111 1111 0 ....... 111001 ..... ..... @fcvt_fixed_h
+UCVTF_f 0111 1111 0 ....... 111001 ..... ..... @fcvt_fixed_s
+UCVTF_f 0111 1111 0 ....... 111001 ..... ..... @fcvt_fixed_d
+
+FCVTZS_f 0101 1111 0 ....... 111111 ..... ..... @fcvt_fixed_h
+FCVTZS_f 0101 1111 0 ....... 111111 ..... ..... @fcvt_fixed_s
+FCVTZS_f 0101 1111 0 ....... 111111 ..... ..... @fcvt_fixed_d
+
+FCVTZU_f 0111 1111 0 ....... 111111 ..... ..... @fcvt_fixed_h
+FCVTZU_f 0111 1111 0 ....... 111111 ..... ..... @fcvt_fixed_s
+FCVTZU_f 0111 1111 0 ....... 111111 ..... ..... @fcvt_fixed_d
+
+# Advanced SIMD two-register miscellaneous
+
+SQABS_v 0.00 1110 ..1 00000 01111 0 ..... ..... @qrr_e
+SQNEG_v 0.10 1110 ..1 00000 01111 0 ..... ..... @qrr_e
+ABS_v 0.00 1110 ..1 00000 10111 0 ..... ..... @qrr_e
+NEG_v 0.10 1110 ..1 00000 10111 0 ..... ..... @qrr_e
+CLS_v 0.00 1110 ..1 00000 01001 0 ..... ..... @qrr_e
+CLZ_v 0.10 1110 ..1 00000 01001 0 ..... ..... @qrr_e
+CNT_v 0.00 1110 001 00000 01011 0 ..... ..... @qrr_b
+NOT_v 0.10 1110 001 00000 01011 0 ..... ..... @qrr_b
+RBIT_v 0.10 1110 011 00000 01011 0 ..... ..... @qrr_b
+CMGT0_v 0.00 1110 ..1 00000 10001 0 ..... ..... @qrr_e
+CMGE0_v 0.10 1110 ..1 00000 10001 0 ..... ..... @qrr_e
+CMEQ0_v 0.00 1110 ..1 00000 10011 0 ..... ..... @qrr_e
+CMLE0_v 0.10 1110 ..1 00000 10011 0 ..... ..... @qrr_e
+CMLT0_v 0.00 1110 ..1 00000 10101 0 ..... ..... @qrr_e
+
+REV16_v 0.00 1110 001 00000 00011 0 ..... ..... @qrr_b
+REV32_v 0.10 1110 0.1 00000 00001 0 ..... ..... @qrr_bh
+REV64_v 0.00 1110 ..1 00000 00001 0 ..... ..... @qrr_e
+
+SADDLP_v 0.00 1110 ..1 00000 00101 0 ..... ..... @qrr_e
+UADDLP_v 0.10 1110 ..1 00000 00101 0 ..... ..... @qrr_e
+SADALP_v 0.00 1110 ..1 00000 01101 0 ..... ..... @qrr_e
+UADALP_v 0.10 1110 ..1 00000 01101 0 ..... ..... @qrr_e
+
+XTN 0.00 1110 ..1 00001 00101 0 ..... ..... @qrr_e
+SQXTUN_v 0.10 1110 ..1 00001 00101 0 ..... ..... @qrr_e
+SQXTN_v 0.00 1110 ..1 00001 01001 0 ..... ..... @qrr_e
+UQXTN_v 0.10 1110 ..1 00001 01001 0 ..... ..... @qrr_e
+
+FCVTN_v 0.00 1110 0.1 00001 01101 0 ..... ..... @qrr_hs
+FCVTXN_v 0.10 1110 011 00001 01101 0 ..... ..... @qrr_s
+BFCVTN_v 0.00 1110 101 00001 01101 0 ..... ..... @qrr_h
+
+SHLL_v 0.10 1110 ..1 00001 00111 0 ..... ..... @qrr_e
+
+FABS_v 0.00 1110 111 11000 11111 0 ..... ..... @qrr_h
+FABS_v 0.00 1110 1.1 00000 11111 0 ..... ..... @qrr_sd
+
+FNEG_v 0.10 1110 111 11000 11111 0 ..... ..... @qrr_h
+FNEG_v 0.10 1110 1.1 00000 11111 0 ..... ..... @qrr_sd
+
+FSQRT_v 0.10 1110 111 11001 11111 0 ..... ..... @qrr_h
+FSQRT_v 0.10 1110 1.1 00001 11111 0 ..... ..... @qrr_sd
+
+FRINTN_v 0.00 1110 011 11001 10001 0 ..... ..... @qrr_h
+FRINTN_v 0.00 1110 0.1 00001 10001 0 ..... ..... @qrr_sd
+
+FRINTM_v 0.00 1110 011 11001 10011 0 ..... ..... @qrr_h
+FRINTM_v 0.00 1110 0.1 00001 10011 0 ..... ..... @qrr_sd
+
+FRINTP_v 0.00 1110 111 11001 10001 0 ..... ..... @qrr_h
+FRINTP_v 0.00 1110 1.1 00001 10001 0 ..... ..... @qrr_sd
+
+FRINTZ_v 0.00 1110 111 11001 10011 0 ..... ..... @qrr_h
+FRINTZ_v 0.00 1110 1.1 00001 10011 0 ..... ..... @qrr_sd
+
+FRINTA_v 0.10 1110 011 11001 10001 0 ..... ..... @qrr_h
+FRINTA_v 0.10 1110 0.1 00001 10001 0 ..... ..... @qrr_sd
+
+FRINTX_v 0.10 1110 011 11001 10011 0 ..... ..... @qrr_h
+FRINTX_v 0.10 1110 0.1 00001 10011 0 ..... ..... @qrr_sd
+
+FRINTI_v 0.10 1110 111 11001 10011 0 ..... ..... @qrr_h
+FRINTI_v 0.10 1110 1.1 00001 10011 0 ..... ..... @qrr_sd
+
+FRINT32Z_v 0.00 1110 0.1 00001 11101 0 ..... ..... @qrr_sd
+FRINT32X_v 0.10 1110 0.1 00001 11101 0 ..... ..... @qrr_sd
+FRINT64Z_v 0.00 1110 0.1 00001 11111 0 ..... ..... @qrr_sd
+FRINT64X_v 0.10 1110 0.1 00001 11111 0 ..... ..... @qrr_sd
+
+SCVTF_vi 0.00 1110 011 11001 11011 0 ..... ..... @qrr_h
+SCVTF_vi 0.00 1110 0.1 00001 11011 0 ..... ..... @qrr_sd
+
+UCVTF_vi 0.10 1110 011 11001 11011 0 ..... ..... @qrr_h
+UCVTF_vi 0.10 1110 0.1 00001 11011 0 ..... ..... @qrr_sd
+
+FCVTNS_vi 0.00 1110 011 11001 10101 0 ..... ..... @qrr_h
+FCVTNS_vi 0.00 1110 0.1 00001 10101 0 ..... ..... @qrr_sd
+FCVTNU_vi 0.10 1110 011 11001 10101 0 ..... ..... @qrr_h
+FCVTNU_vi 0.10 1110 0.1 00001 10101 0 ..... ..... @qrr_sd
+
+FCVTPS_vi 0.00 1110 111 11001 10101 0 ..... ..... @qrr_h
+FCVTPS_vi 0.00 1110 1.1 00001 10101 0 ..... ..... @qrr_sd
+FCVTPU_vi 0.10 1110 111 11001 10101 0 ..... ..... @qrr_h
+FCVTPU_vi 0.10 1110 1.1 00001 10101 0 ..... ..... @qrr_sd
+
+FCVTMS_vi 0.00 1110 011 11001 10111 0 ..... ..... @qrr_h
+FCVTMS_vi 0.00 1110 0.1 00001 10111 0 ..... ..... @qrr_sd
+FCVTMU_vi 0.10 1110 011 11001 10111 0 ..... ..... @qrr_h
+FCVTMU_vi 0.10 1110 0.1 00001 10111 0 ..... ..... @qrr_sd
+
+FCVTZS_vi 0.00 1110 111 11001 10111 0 ..... ..... @qrr_h
+FCVTZS_vi 0.00 1110 1.1 00001 10111 0 ..... ..... @qrr_sd
+FCVTZU_vi 0.10 1110 111 11001 10111 0 ..... ..... @qrr_h
+FCVTZU_vi 0.10 1110 1.1 00001 10111 0 ..... ..... @qrr_sd
+
+FCVTAS_vi 0.00 1110 011 11001 11001 0 ..... ..... @qrr_h
+FCVTAS_vi 0.00 1110 0.1 00001 11001 0 ..... ..... @qrr_sd
+FCVTAU_vi 0.10 1110 011 11001 11001 0 ..... ..... @qrr_h
+FCVTAU_vi 0.10 1110 0.1 00001 11001 0 ..... ..... @qrr_sd
+
+FCMGT0_v 0.00 1110 111 11000 11001 0 ..... ..... @qrr_h
+FCMGT0_v 0.00 1110 1.1 00000 11001 0 ..... ..... @qrr_sd
+
+FCMGE0_v 0.10 1110 111 11000 11001 0 ..... ..... @qrr_h
+FCMGE0_v 0.10 1110 1.1 00000 11001 0 ..... ..... @qrr_sd
+
+FCMEQ0_v 0.00 1110 111 11000 11011 0 ..... ..... @qrr_h
+FCMEQ0_v 0.00 1110 1.1 00000 11011 0 ..... ..... @qrr_sd
+
+FCMLE0_v 0.10 1110 111 11000 11011 0 ..... ..... @qrr_h
+FCMLE0_v 0.10 1110 1.1 00000 11011 0 ..... ..... @qrr_sd
+
+FCMLT0_v 0.00 1110 111 11000 11101 0 ..... ..... @qrr_h
+FCMLT0_v 0.00 1110 1.1 00000 11101 0 ..... ..... @qrr_sd
+
+FRECPE_v 0.00 1110 111 11001 11011 0 ..... ..... @qrr_h
+FRECPE_v 0.00 1110 1.1 00001 11011 0 ..... ..... @qrr_sd
+
+FRSQRTE_v 0.10 1110 111 11001 11011 0 ..... ..... @qrr_h
+FRSQRTE_v 0.10 1110 1.1 00001 11011 0 ..... ..... @qrr_sd
+
+URECPE_v 0.00 1110 101 00001 11001 0 ..... ..... @qrr_s
+URSQRTE_v 0.10 1110 101 00001 11001 0 ..... ..... @qrr_s
+
+FCVTL_v 0.00 1110 0.1 00001 01111 0 ..... ..... @qrr_sd
+
+&fcvt_q rd rn esz q shift
+@fcvtq_h . q:1 . ...... 001 .... ...... rn:5 rd:5 \
+ &fcvt_q esz=1 shift=%fcvt_f_sh_h
+@fcvtq_s . q:1 . ...... 01 ..... ...... rn:5 rd:5 \
+ &fcvt_q esz=2 shift=%fcvt_f_sh_s
+@fcvtq_d . q:1 . ...... 1 ...... ...... rn:5 rd:5 \
+ &fcvt_q esz=3 shift=%fcvt_f_sh_d
+
+SCVTF_vf 0.00 11110 ....... 111001 ..... ..... @fcvtq_h
+SCVTF_vf 0.00 11110 ....... 111001 ..... ..... @fcvtq_s
+SCVTF_vf 0.00 11110 ....... 111001 ..... ..... @fcvtq_d
+
+UCVTF_vf 0.10 11110 ....... 111001 ..... ..... @fcvtq_h
+UCVTF_vf 0.10 11110 ....... 111001 ..... ..... @fcvtq_s
+UCVTF_vf 0.10 11110 ....... 111001 ..... ..... @fcvtq_d
+
+FCVTZS_vf 0.00 11110 ....... 111111 ..... ..... @fcvtq_h
+FCVTZS_vf 0.00 11110 ....... 111111 ..... ..... @fcvtq_s
+FCVTZS_vf 0.00 11110 ....... 111111 ..... ..... @fcvtq_d
+
+FCVTZU_vf 0.10 11110 ....... 111111 ..... ..... @fcvtq_h
+FCVTZU_vf 0.10 11110 ....... 111111 ..... ..... @fcvtq_s
+FCVTZU_vf 0.10 11110 ....... 111111 ..... ..... @fcvtq_d
diff --git a/target/arm/tcg/arith_helper.c b/target/arm/tcg/arith_helper.c
new file mode 100644
index 0000000..6701398
--- /dev/null
+++ b/target/arm/tcg/arith_helper.c
@@ -0,0 +1,297 @@
+/*
+ * ARM generic helpers for various arithmetical operations.
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "qemu/osdep.h"
+#include "qemu/crc32c.h"
+#include <zlib.h> /* for crc32 */
+
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
+/*
+ * Note that signed overflow is undefined in C. The following routines are
+ * careful to use unsigned types where modulo arithmetic is required.
+ * Failure to do so _will_ break on newer gcc.
+ */
+
+/* Signed saturating arithmetic. */
+
+/* Perform 16-bit signed saturating addition. */
+static inline uint16_t add16_sat(uint16_t a, uint16_t b)
+{
+ uint16_t res;
+
+ res = a + b;
+ if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
+ if (a & 0x8000) {
+ res = 0x8000;
+ } else {
+ res = 0x7fff;
+ }
+ }
+ return res;
+}
+
+/* Perform 8-bit signed saturating addition. */
+static inline uint8_t add8_sat(uint8_t a, uint8_t b)
+{
+ uint8_t res;
+
+ res = a + b;
+ if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
+ if (a & 0x80) {
+ res = 0x80;
+ } else {
+ res = 0x7f;
+ }
+ }
+ return res;
+}
+
+/* Perform 16-bit signed saturating subtraction. */
+static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
+{
+ uint16_t res;
+
+ res = a - b;
+ if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
+ if (a & 0x8000) {
+ res = 0x8000;
+ } else {
+ res = 0x7fff;
+ }
+ }
+ return res;
+}
+
+/* Perform 8-bit signed saturating subtraction. */
+static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
+{
+ uint8_t res;
+
+ res = a - b;
+ if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
+ if (a & 0x80) {
+ res = 0x80;
+ } else {
+ res = 0x7f;
+ }
+ }
+ return res;
+}
+
+#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
+#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
+#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
+#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
+#define PFX q
+
+#include "op_addsub.c.inc"
+
+/* Unsigned saturating arithmetic. */
+static inline uint16_t add16_usat(uint16_t a, uint16_t b)
+{
+ uint16_t res;
+ res = a + b;
+ if (res < a) {
+ res = 0xffff;
+ }
+ return res;
+}
+
+static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
+{
+ if (a > b) {
+ return a - b;
+ } else {
+ return 0;
+ }
+}
+
+static inline uint8_t add8_usat(uint8_t a, uint8_t b)
+{
+ uint8_t res;
+ res = a + b;
+ if (res < a) {
+ res = 0xff;
+ }
+ return res;
+}
+
+static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
+{
+ if (a > b) {
+ return a - b;
+ } else {
+ return 0;
+ }
+}
+
+#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
+#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
+#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
+#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
+#define PFX uq
+
+#include "op_addsub.c.inc"
+
+/* Signed modulo arithmetic. */
+#define SARITH16(a, b, n, op) do { \
+ int32_t sum; \
+ sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
+ RESULT(sum, n, 16); \
+ if (sum >= 0) \
+ ge |= 3 << (n * 2); \
+ } while (0)
+
+#define SARITH8(a, b, n, op) do { \
+ int32_t sum; \
+ sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
+ RESULT(sum, n, 8); \
+ if (sum >= 0) \
+ ge |= 1 << n; \
+ } while (0)
+
+
+#define ADD16(a, b, n) SARITH16(a, b, n, +)
+#define SUB16(a, b, n) SARITH16(a, b, n, -)
+#define ADD8(a, b, n) SARITH8(a, b, n, +)
+#define SUB8(a, b, n) SARITH8(a, b, n, -)
+#define PFX s
+#define ARITH_GE
+
+#include "op_addsub.c.inc"
+
+/* Unsigned modulo arithmetic. */
+#define ADD16(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
+ RESULT(sum, n, 16); \
+ if ((sum >> 16) == 1) \
+ ge |= 3 << (n * 2); \
+ } while (0)
+
+#define ADD8(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
+ RESULT(sum, n, 8); \
+ if ((sum >> 8) == 1) \
+ ge |= 1 << n; \
+ } while (0)
+
+#define SUB16(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
+ RESULT(sum, n, 16); \
+ if ((sum >> 16) == 0) \
+ ge |= 3 << (n * 2); \
+ } while (0)
+
+#define SUB8(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
+ RESULT(sum, n, 8); \
+ if ((sum >> 8) == 0) \
+ ge |= 1 << n; \
+ } while (0)
+
+#define PFX u
+#define ARITH_GE
+
+#include "op_addsub.c.inc"
+
+/* Halved signed arithmetic. */
+#define ADD16(a, b, n) \
+ RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
+#define SUB16(a, b, n) \
+ RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
+#define ADD8(a, b, n) \
+ RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
+#define SUB8(a, b, n) \
+ RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
+#define PFX sh
+
+#include "op_addsub.c.inc"
+
+/* Halved unsigned arithmetic. */
+#define ADD16(a, b, n) \
+ RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
+#define SUB16(a, b, n) \
+ RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
+#define ADD8(a, b, n) \
+ RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
+#define SUB8(a, b, n) \
+ RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
+#define PFX uh
+
+#include "op_addsub.c.inc"
+
+static inline uint8_t do_usad(uint8_t a, uint8_t b)
+{
+ if (a > b) {
+ return a - b;
+ } else {
+ return b - a;
+ }
+}
+
+/* Unsigned sum of absolute byte differences. */
+uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
+{
+ uint32_t sum;
+ sum = do_usad(a, b);
+ sum += do_usad(a >> 8, b >> 8);
+ sum += do_usad(a >> 16, b >> 16);
+ sum += do_usad(a >> 24, b >> 24);
+ return sum;
+}
+
+/* For ARMv6 SEL instruction. */
+uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (flags & 1) {
+ mask |= 0xff;
+ }
+ if (flags & 2) {
+ mask |= 0xff00;
+ }
+ if (flags & 4) {
+ mask |= 0xff0000;
+ }
+ if (flags & 8) {
+ mask |= 0xff000000;
+ }
+ return (a & mask) | (b & ~mask);
+}
+
+/*
+ * CRC helpers.
+ * The upper bytes of val (above the number specified by 'bytes') must have
+ * been zeroed out by the caller.
+ */
+uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
+{
+ uint8_t buf[4];
+
+ stl_le_p(buf, val);
+
+ /* zlib crc32 converts the accumulator and output to one's complement. */
+ return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
+}
+
+uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
+{
+ uint8_t buf[4];
+
+ stl_le_p(buf, val);
+
+ /* Linux crc32c converts the output to one's complement. */
+ return crc32c(acc, buf, bytes) ^ 0xffffffff;
+}
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
index 5496f14..8e1a083 100644
--- a/target/arm/tcg/cpu-v7m.c
+++ b/target/arm/tcg/cpu-v7m.c
@@ -10,7 +10,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "hw/core/tcg-cpu-ops.h"
+#include "accel/tcg/cpu-ops.h"
#include "internals.h"
#if !defined(CONFIG_USER_ONLY)
@@ -19,7 +19,6 @@
static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
- CPUClass *cc = CPU_GET_CLASS(cs);
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
bool ret = false;
@@ -35,7 +34,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
if (interrupt_request & CPU_INTERRUPT_HARD
&& (armv7m_nvic_can_take_pending_exception(env->nvic))) {
cs->exception_index = EXCP_IRQ;
- cc->tcg_ops->do_interrupt(cs);
+ cs->cc->tcg_ops->do_interrupt(cs);
ret = true;
}
return ret;
@@ -233,18 +232,27 @@ static void cortex_m55_initfn(Object *obj)
}
static const TCGCPUOps arm_v7m_tcg_ops = {
+ /* ARM processors have a weak memory model */
+ .guest_default_memory_order = 0,
+ .mttcg_supported = true,
+
.initialize = arm_translate_init,
+ .translate_code = arm_translate_code,
+ .get_tb_cpu_state = arm_get_tb_cpu_state,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
.debug_excp_handler = arm_debug_excp_handler,
.restore_state_to_opc = arm_restore_state_to_opc,
+ .mmu_index = arm_cpu_mmu_index,
#ifdef CONFIG_USER_ONLY
.record_sigsegv = arm_cpu_record_sigsegv,
.record_sigbus = arm_cpu_record_sigbus,
#else
- .tlb_fill = arm_cpu_tlb_fill,
+ .tlb_fill_align = arm_cpu_tlb_fill_align,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
.cpu_exec_halt = arm_cpu_exec_halt,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = arm_v7m_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,
@@ -254,14 +262,13 @@ static const TCGCPUOps arm_v7m_tcg_ops = {
#endif /* !CONFIG_USER_ONLY */
};
-static void arm_v7m_class_init(ObjectClass *oc, void *data)
+static void arm_v7m_class_init(ObjectClass *oc, const void *data)
{
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
acc->info = data;
cc->tcg_ops = &arm_v7m_tcg_ops;
- cc->gdb_core_xml_file = "arm-m-profile.xml";
}
static const ARMCPUInfo arm_v7m_cpus[] = {
diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c
index 20c2737..2c45b7e 100644
--- a/target/arm/tcg/cpu32.c
+++ b/target/arm/tcg/cpu32.c
@@ -10,7 +10,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "hw/core/tcg-cpu-ops.h"
+#include "accel/tcg/cpu-ops.h"
#include "internals.h"
#include "target/arm/idau.h"
#if !defined(CONFIG_USER_ONLY)
@@ -71,7 +71,7 @@ void aa32_max_features(ARMCPU *cpu)
cpu->isar.id_mmfr5 = t;
t = cpu->isar.id_pfr0;
- t = FIELD_DP32(t, ID_PFR0, CSV2, 2); /* FEAT_CVS2 */
+ t = FIELD_DP32(t, ID_PFR0, CSV2, 2); /* FEAT_CSV2 */
t = FIELD_DP32(t, ID_PFR0, DIT, 1); /* FEAT_DIT */
t = FIELD_DP32(t, ID_PFR0, RAS, 1); /* FEAT_RAS */
cpu->isar.id_pfr0 = t;
@@ -574,9 +574,9 @@ static void cortex_a15_initfn(Object *obj)
static const ARMCPRegInfo cortexr5_cp_reginfo[] = {
/* Dummy the TCM region regs for the moment */
- { .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
+ { .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_CONST },
- { .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
+ { .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
.access = PL1_RW, .type = ARM_CP_CONST },
{ .name = "DCACHE_INVAL", .cp = 15, .opc1 = 0, .crn = 15, .crm = 5,
.opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP },
@@ -1026,19 +1026,31 @@ static const ARMCPUInfo arm_tcg_cpus[] = {
{ .name = "ti925t", .initfn = ti925t_initfn },
{ .name = "sa1100", .initfn = sa1100_initfn },
{ .name = "sa1110", .initfn = sa1110_initfn },
- { .name = "pxa250", .initfn = pxa250_initfn },
- { .name = "pxa255", .initfn = pxa255_initfn },
- { .name = "pxa260", .initfn = pxa260_initfn },
- { .name = "pxa261", .initfn = pxa261_initfn },
- { .name = "pxa262", .initfn = pxa262_initfn },
+ { .name = "pxa250", .initfn = pxa250_initfn,
+ .deprecation_note = "iwMMXt CPUs are no longer supported", },
+ { .name = "pxa255", .initfn = pxa255_initfn,
+ .deprecation_note = "iwMMXt CPUs are no longer supported", },
+ { .name = "pxa260", .initfn = pxa260_initfn,
+ .deprecation_note = "iwMMXt CPUs are no longer supported", },
+ { .name = "pxa261", .initfn = pxa261_initfn,
+ .deprecation_note = "iwMMXt CPUs are no longer supported", },
+ { .name = "pxa262", .initfn = pxa262_initfn,
+ .deprecation_note = "iwMMXt CPUs are no longer supported", },
/* "pxa270" is an alias for "pxa270-a0" */
- { .name = "pxa270", .initfn = pxa270a0_initfn },
- { .name = "pxa270-a0", .initfn = pxa270a0_initfn },
- { .name = "pxa270-a1", .initfn = pxa270a1_initfn },
- { .name = "pxa270-b0", .initfn = pxa270b0_initfn },
- { .name = "pxa270-b1", .initfn = pxa270b1_initfn },
- { .name = "pxa270-c0", .initfn = pxa270c0_initfn },
- { .name = "pxa270-c5", .initfn = pxa270c5_initfn },
+ { .name = "pxa270", .initfn = pxa270a0_initfn,
+ .deprecation_note = "iwMMXt CPUs are no longer supported", },
+ { .name = "pxa270-a0", .initfn = pxa270a0_initfn,
+ .deprecation_note = "iwMMXt CPUs are no longer supported", },
+ { .name = "pxa270-a1", .initfn = pxa270a1_initfn,
+ .deprecation_note = "iwMMXt CPUs are no longer supported", },
+ { .name = "pxa270-b0", .initfn = pxa270b0_initfn,
+ .deprecation_note = "iwMMXt CPUs are no longer supported", },
+ { .name = "pxa270-b1", .initfn = pxa270b1_initfn,
+ .deprecation_note = "iwMMXt CPUs are no longer supported", },
+ { .name = "pxa270-c0", .initfn = pxa270c0_initfn,
+ .deprecation_note = "iwMMXt CPUs are no longer supported", },
+ { .name = "pxa270-c5", .initfn = pxa270c5_initfn,
+ .deprecation_note = "iwMMXt CPUs are no longer supported", },
#ifndef TARGET_AARCH64
{ .name = "max", .initfn = arm_max_initfn },
#endif
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
index fe232eb..5d8ed27 100644
--- a/target/arm/tcg/cpu64.c
+++ b/target/arm/tcg/cpu64.c
@@ -29,32 +29,6 @@
#include "cpu-features.h"
#include "cpregs.h"
-static uint64_t make_ccsidr64(unsigned assoc, unsigned linesize,
- unsigned cachesize)
-{
- unsigned lg_linesize = ctz32(linesize);
- unsigned sets;
-
- /*
- * The 64-bit CCSIDR_EL1 format is:
- * [55:32] number of sets - 1
- * [23:3] associativity - 1
- * [2:0] log2(linesize) - 4
- * so 0 == 16 bytes, 1 == 32 bytes, 2 == 64 bytes, etc
- */
- assert(assoc != 0);
- assert(is_power_of_2(linesize));
- assert(lg_linesize >= 4 && lg_linesize <= 7 + 4);
-
- /* sets * associativity * linesize == cachesize. */
- sets = cachesize / (assoc * linesize);
- assert(cachesize % (assoc * linesize) == 0);
-
- return ((uint64_t)(sets - 1) << 32)
- | ((assoc - 1) << 3)
- | (lg_linesize - 4);
-}
-
static void aarch64_a35_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
@@ -106,9 +80,12 @@ static void aarch64_a35_initfn(Object *obj)
cpu->isar.reset_pmcr_el0 = 0x410a3000;
/* From B2.29 Cache ID registers */
- cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
- cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
- cpu->ccsidr[2] = 0x703fe03a; /* 512KB L2 cache */
+ /* 32KB L1 dcache */
+ cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
+ /* 32KB L1 icache */
+ cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 2);
+ /* 512KB L2 cache */
+ cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 512 * KiB, 7);
/* From B3.5 VGIC Type register */
cpu->gic_num_lrs = 4;
@@ -221,7 +198,7 @@ static void cpu_max_get_l0gptsz(Object *obj, Visitor *v, const char *name,
visit_type_uint32(v, name, &value, errp);
}
-static Property arm_cpu_lpa2_property =
+static const Property arm_cpu_lpa2_property =
DEFINE_PROP_BOOL("lpa2", ARMCPU, prop_lpa2, true);
static void aarch64_a55_initfn(Object *obj)
@@ -272,9 +249,12 @@ static void aarch64_a55_initfn(Object *obj)
cpu->revidr = 0;
/* From B2.23 CCSIDR_EL1 */
- cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
- cpu->ccsidr[1] = 0x200fe01a; /* 32KB L1 icache */
- cpu->ccsidr[2] = 0x703fe07a; /* 512KB L2 cache */
+ /* 32KB L1 dcache */
+ cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
+ /* 32KB L1 icache */
+ cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 2);
+ /* 512KB L2 cache */
+ cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 512 * KiB, 7);
/* From B2.96 SCTLR_EL3 */
cpu->reset_sctlr = 0x30c50838;
@@ -338,9 +318,12 @@ static void aarch64_a72_initfn(Object *obj)
cpu->isar.dbgdevid1 = 0x2;
cpu->isar.reset_pmcr_el0 = 0x41023000;
cpu->clidr = 0x0a200023;
- cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
- cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
- cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */
+ /* 32KB L1 dcache */
+ cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
+ /* 48KB L1 dcache */
+ cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 3, 64, 48 * KiB, 2);
+ /* 1MB L2 cache */
+ cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 1 * MiB, 7);
cpu->dcz_blocksize = 4; /* 64 bytes */
cpu->gic_num_lrs = 4;
cpu->gic_vpribits = 5;
@@ -397,9 +380,12 @@ static void aarch64_a76_initfn(Object *obj)
cpu->revidr = 0;
/* From B2.18 CCSIDR_EL1 */
- cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */
- cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */
- cpu->ccsidr[2] = 0x707fe03a; /* 512KB L2 cache */
+ /* 64KB L1 dcache */
+ cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 7);
+ /* 64KB L1 icache */
+ cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 2);
+ /* 512KB L2 cache */
+ cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 8, 64, 512 * KiB, 7);
/* From B2.93 SCTLR_EL3 */
cpu->reset_sctlr = 0x30c50838;
@@ -449,9 +435,12 @@ static void aarch64_a64fx_initfn(Object *obj)
cpu->isar.id_aa64isar1 = 0x0000000000010001;
cpu->isar.id_aa64zfr0 = 0x0000000000000000;
cpu->clidr = 0x0000000080000023;
- cpu->ccsidr[0] = 0x7007e01c; /* 64KB L1 dcache */
- cpu->ccsidr[1] = 0x2007e01c; /* 64KB L1 icache */
- cpu->ccsidr[2] = 0x70ffe07c; /* 8MB L2 cache */
+ /* 64KB L1 dcache */
+ cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 256, 64 * KiB, 7);
+ /* 64KB L1 icache */
+ cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 256, 64 * KiB, 2);
+ /* 8MB L2 cache */
+ cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 256, 8 * MiB, 7);
cpu->dcz_blocksize = 6; /* 256 bytes */
cpu->gic_num_lrs = 4;
cpu->gic_vpribits = 5;
@@ -637,9 +626,12 @@ static void aarch64_neoverse_n1_initfn(Object *obj)
cpu->revidr = 0;
/* From B2.23 CCSIDR_EL1 */
- cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */
- cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */
- cpu->ccsidr[2] = 0x70ffe03a; /* 1MB L2 cache */
+ /* 64KB L1 dcache */
+ cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 7);
+ /* 64KB L1 icache */
+ cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 2);
+ /* 1MB L2 dcache */
+ cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 8, 64, 1 * MiB, 7);
/* From B2.98 SCTLR_EL3 */
cpu->reset_sctlr = 0x30c50838;
@@ -685,7 +677,7 @@ static void aarch64_neoverse_v1_initfn(Object *obj)
cpu->isar.id_aa64dfr0 = 0x000001f210305519ull;
cpu->isar.id_aa64dfr1 = 0x00000000;
cpu->isar.id_aa64isar0 = 0x1011111110212120ull; /* with FEAT_RNG */
- cpu->isar.id_aa64isar1 = 0x0111000001211032ull;
+ cpu->isar.id_aa64isar1 = 0x0011100001211032ull;
cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull;
cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
cpu->isar.id_aa64mmfr2 = 0x0220011102101011ull;
@@ -721,9 +713,12 @@ static void aarch64_neoverse_v1_initfn(Object *obj)
* L2: 8-way set associative, 64 byte line size, either 512K or 1MB.
* L3: No L3 (this matches the CLIDR_EL1 value).
*/
- cpu->ccsidr[0] = make_ccsidr64(4, 64, 64 * KiB); /* L1 dcache */
- cpu->ccsidr[1] = cpu->ccsidr[0]; /* L1 icache */
- cpu->ccsidr[2] = make_ccsidr64(8, 64, 1 * MiB); /* L2 cache */
+ /* 64KB L1 dcache */
+ cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 4, 64, 64 * KiB, 0);
+ /* 64KB L1 icache */
+ cpu->ccsidr[1] = cpu->ccsidr[0];
+ /* 1MB L2 cache */
+ cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 8, 64, 1 * MiB, 0);
/* From 3.2.115 SCTLR_EL3 */
cpu->reset_sctlr = 0x30c50838;
@@ -959,9 +954,12 @@ static void aarch64_a710_initfn(Object *obj)
* L1: 4-way set associative 64-byte line size, total either 32K or 64K.
* L2: 8-way set associative 64 byte line size, total either 256K or 512K.
*/
- cpu->ccsidr[0] = make_ccsidr64(4, 64, 64 * KiB); /* L1 dcache */
- cpu->ccsidr[1] = cpu->ccsidr[0]; /* L1 icache */
- cpu->ccsidr[2] = make_ccsidr64(8, 64, 512 * KiB); /* L2 cache */
+ /* L1 dcache */
+ cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 4, 64, 64 * KiB, 0);
+ /* L1 icache */
+ cpu->ccsidr[1] = cpu->ccsidr[0];
+ /* L2 cache */
+ cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 8, 64, 512 * KiB, 0);
/* FIXME: Not documented -- copied from neoverse-v1 */
cpu->reset_sctlr = 0x30c50838;
@@ -1057,10 +1055,12 @@ static void aarch64_neoverse_n2_initfn(Object *obj)
* L1: 4-way set associative 64-byte line size, total 64K.
* L2: 8-way set associative 64 byte line size, total either 512K or 1024K.
*/
- cpu->ccsidr[0] = make_ccsidr64(4, 64, 64 * KiB); /* L1 dcache */
- cpu->ccsidr[1] = cpu->ccsidr[0]; /* L1 icache */
- cpu->ccsidr[2] = make_ccsidr64(8, 64, 512 * KiB); /* L2 cache */
-
+ /* L1 dcache */
+ cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 4, 64, 64 * KiB, 0);
+ /* L1 icache */
+ cpu->ccsidr[1] = cpu->ccsidr[0];
+ /* L2 cache */
+ cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 8, 64, 512 * KiB, 0);
/* FIXME: Not documented -- copied from neoverse-v1 */
cpu->reset_sctlr = 0x30c50838;
@@ -1160,12 +1160,14 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1); /* FEAT_FRINTTS */
t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1); /* FEAT_SB */
t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1); /* FEAT_SPECRES */
- t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1); /* FEAT_BF16 */
+ t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 2); /* FEAT_BF16, FEAT_EBF16 */
t = FIELD_DP64(t, ID_AA64ISAR1, DGH, 1); /* FEAT_DGH */
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); /* FEAT_I8MM */
+ t = FIELD_DP64(t, ID_AA64ISAR1, XS, 1); /* FEAT_XS */
cpu->isar.id_aa64isar1 = t;
t = cpu->isar.id_aa64isar2;
+ t = FIELD_DP64(t, ID_AA64ISAR2, RPRES, 1); /* FEAT_RPRES */
t = FIELD_DP64(t, ID_AA64ISAR2, MOPS, 1); /* FEAT_MOPS */
t = FIELD_DP64(t, ID_AA64ISAR2, BC, 1); /* FEAT_HBC */
t = FIELD_DP64(t, ID_AA64ISAR2, WFXT, 2); /* FEAT_WFxT */
@@ -1217,7 +1219,9 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */
t = FIELD_DP64(t, ID_AA64MMFR1, ETS, 2); /* FEAT_ETS2 */
t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1); /* FEAT_HCX */
+ t = FIELD_DP64(t, ID_AA64MMFR1, AFP, 1); /* FEAT_AFP */
t = FIELD_DP64(t, ID_AA64MMFR1, TIDCP1, 1); /* FEAT_TIDCP1 */
+ t = FIELD_DP64(t, ID_AA64MMFR1, CMOW, 1); /* FEAT_CMOW */
cpu->isar.id_aa64mmfr1 = t;
t = cpu->isar.id_aa64mmfr2;
@@ -1244,7 +1248,7 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* FEAT_SVE_PMULL128 */
t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1); /* FEAT_SVE_BitPerm */
- t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1); /* FEAT_BF16 */
+ t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 2); /* FEAT_BF16, FEAT_EBF16 */
t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1); /* FEAT_SVE_SHA3 */
t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1); /* FEAT_SVE_SM4 */
t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1); /* FEAT_I8MM */
@@ -1312,7 +1316,7 @@ static void aarch64_cpu_register_types(void)
size_t i;
for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
- aarch64_cpu_register(&aarch64_cpus[i]);
+ arm_cpu_register(&aarch64_cpus[i]);
}
}
diff --git a/target/arm/tcg/crypto_helper.c b/target/arm/tcg/crypto_helper.c
index 7cadd61..3428bd1 100644
--- a/target/arm/tcg/crypto_helper.c
+++ b/target/arm/tcg/crypto_helper.c
@@ -10,14 +10,16 @@
*/
#include "qemu/osdep.h"
+#include "qemu/bitops.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
#include "crypto/aes-round.h"
#include "crypto/sm4.h"
#include "vec_internal.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
union CRYPTO_STATE {
uint8_t bytes[16];
uint32_t words[4];
diff --git a/target/arm/tcg/gengvec.c b/target/arm/tcg/gengvec.c
index 56a1dc1..01867f8 100644
--- a/target/arm/tcg/gengvec.c
+++ b/target/arm/tcg/gengvec.c
@@ -88,6 +88,25 @@ GEN_CMP0(gen_gvec_cgt0, TCG_COND_GT)
#undef GEN_CMP0
+void gen_gvec_sshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz)
+{
+ /* Signed shift out of range results in all-sign-bits */
+ shift = MIN(shift, (8 << vece) - 1);
+ tcg_gen_gvec_sari(vece, rd_ofs, rm_ofs, shift, opr_sz, max_sz);
+}
+
+void gen_gvec_ushr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz)
+{
+ /* Unsigned shift out of range results in all-zero-bits */
+ if (shift >= (8 << vece)) {
+ tcg_gen_gvec_dup_imm(vece, rd_ofs, opr_sz, max_sz, 0);
+ } else {
+ tcg_gen_gvec_shri(vece, rd_ofs, rm_ofs, shift, opr_sz, max_sz);
+ }
+}
+
static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
{
tcg_gen_vec_sar8i_i64(a, a, shift);
@@ -285,7 +304,7 @@ void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
tcg_gen_add_i32(d, d, t);
}
- void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
+void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
{
TCGv_i64 t = tcg_temp_new_i64();
@@ -297,10 +316,9 @@ void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
static void gen_srshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
{
TCGv_vec t = tcg_temp_new_vec_matching(d);
- TCGv_vec ones = tcg_temp_new_vec_matching(d);
+ TCGv_vec ones = tcg_constant_vec_matching(d, vece, 1);
tcg_gen_shri_vec(vece, t, a, sh - 1);
- tcg_gen_dupi_vec(vece, ones, 1);
tcg_gen_and_vec(vece, t, t, ones);
tcg_gen_sari_vec(vece, d, a, sh);
tcg_gen_add_vec(vece, d, d, t);
@@ -492,10 +510,9 @@ void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
static void gen_urshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift)
{
TCGv_vec t = tcg_temp_new_vec_matching(d);
- TCGv_vec ones = tcg_temp_new_vec_matching(d);
+ TCGv_vec ones = tcg_constant_vec_matching(d, vece, 1);
tcg_gen_shri_vec(vece, t, a, shift - 1);
- tcg_gen_dupi_vec(vece, ones, 1);
tcg_gen_and_vec(vece, t, t, ones);
tcg_gen_shri_vec(vece, d, a, shift);
tcg_gen_add_vec(vece, d, d, t);
@@ -685,9 +702,9 @@ static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
{
TCGv_vec t = tcg_temp_new_vec_matching(d);
- TCGv_vec m = tcg_temp_new_vec_matching(d);
+ int64_t mi = MAKE_64BIT_MASK((8 << vece) - sh, sh);
+ TCGv_vec m = tcg_constant_vec_matching(d, vece, mi);
- tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
tcg_gen_shri_vec(vece, t, a, sh);
tcg_gen_and_vec(vece, d, d, m);
tcg_gen_or_vec(vece, d, d, t);
@@ -773,10 +790,9 @@ static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
{
TCGv_vec t = tcg_temp_new_vec_matching(d);
- TCGv_vec m = tcg_temp_new_vec_matching(d);
+ TCGv_vec m = tcg_constant_vec_matching(d, vece, MAKE_64BIT_MASK(0, sh));
tcg_gen_shli_vec(vece, t, a, sh);
- tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
tcg_gen_and_vec(vece, d, d, m);
tcg_gen_or_vec(vece, d, d, t);
}
@@ -1044,14 +1060,13 @@ static void gen_ushl_vec(unsigned vece, TCGv_vec dst,
TCGv_vec rval = tcg_temp_new_vec_matching(dst);
TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
- TCGv_vec msk, max;
+ TCGv_vec max, zero;
tcg_gen_neg_vec(vece, rsh, shift);
if (vece == MO_8) {
tcg_gen_mov_vec(lsh, shift);
} else {
- msk = tcg_temp_new_vec_matching(dst);
- tcg_gen_dupi_vec(vece, msk, 0xff);
+ TCGv_vec msk = tcg_constant_vec_matching(dst, vece, 0xff);
tcg_gen_and_vec(vece, lsh, shift, msk);
tcg_gen_and_vec(vece, rsh, rsh, msk);
}
@@ -1064,26 +1079,21 @@ static void gen_ushl_vec(unsigned vece, TCGv_vec dst,
tcg_gen_shlv_vec(vece, lval, src, lsh);
tcg_gen_shrv_vec(vece, rval, src, rsh);
- max = tcg_temp_new_vec_matching(dst);
- tcg_gen_dupi_vec(vece, max, 8 << vece);
-
/*
- * The choice of LT (signed) and GEU (unsigned) are biased toward
+ * The choice of GE (signed) and GEU (unsigned) are biased toward
* the instructions of the x86_64 host. For MO_8, the whole byte
* is significant so we must use an unsigned compare; otherwise we
* have already masked to a byte and so a signed compare works.
* Other tcg hosts have a full set of comparisons and do not care.
*/
+ zero = tcg_constant_vec_matching(dst, vece, 0);
+ max = tcg_constant_vec_matching(dst, vece, 8 << vece);
if (vece == MO_8) {
- tcg_gen_cmp_vec(TCG_COND_GEU, vece, lsh, lsh, max);
- tcg_gen_cmp_vec(TCG_COND_GEU, vece, rsh, rsh, max);
- tcg_gen_andc_vec(vece, lval, lval, lsh);
- tcg_gen_andc_vec(vece, rval, rval, rsh);
+ tcg_gen_cmpsel_vec(TCG_COND_GEU, vece, lval, lsh, max, zero, lval);
+ tcg_gen_cmpsel_vec(TCG_COND_GEU, vece, rval, rsh, max, zero, rval);
} else {
- tcg_gen_cmp_vec(TCG_COND_LT, vece, lsh, lsh, max);
- tcg_gen_cmp_vec(TCG_COND_LT, vece, rsh, rsh, max);
- tcg_gen_and_vec(vece, lval, lval, lsh);
- tcg_gen_and_vec(vece, rval, rval, rsh);
+ tcg_gen_cmpsel_vec(TCG_COND_GE, vece, lval, lsh, max, zero, lval);
+ tcg_gen_cmpsel_vec(TCG_COND_GE, vece, rval, rsh, max, zero, rval);
}
tcg_gen_or_vec(vece, dst, lval, rval);
}
@@ -1093,7 +1103,7 @@ void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
{
static const TCGOpcode vecop_list[] = {
INDEX_op_neg_vec, INDEX_op_shlv_vec,
- INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0
+ INDEX_op_shrv_vec, INDEX_op_cmpsel_vec, 0
};
static const GVecGen3 ops[4] = {
{ .fniv = gen_ushl_vec,
@@ -1169,7 +1179,7 @@ static void gen_sshl_vec(unsigned vece, TCGv_vec dst,
TCGv_vec rval = tcg_temp_new_vec_matching(dst);
TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
- TCGv_vec tmp = tcg_temp_new_vec_matching(dst);
+ TCGv_vec max, zero;
/*
* Rely on the TCG guarantee that out of range shifts produce
@@ -1180,29 +1190,28 @@ static void gen_sshl_vec(unsigned vece, TCGv_vec dst,
if (vece == MO_8) {
tcg_gen_mov_vec(lsh, shift);
} else {
- tcg_gen_dupi_vec(vece, tmp, 0xff);
- tcg_gen_and_vec(vece, lsh, shift, tmp);
- tcg_gen_and_vec(vece, rsh, rsh, tmp);
+ TCGv_vec msk = tcg_constant_vec_matching(dst, vece, 0xff);
+ tcg_gen_and_vec(vece, lsh, shift, msk);
+ tcg_gen_and_vec(vece, rsh, rsh, msk);
}
/* Bound rsh so out of bound right shift gets -1. */
- tcg_gen_dupi_vec(vece, tmp, (8 << vece) - 1);
- tcg_gen_umin_vec(vece, rsh, rsh, tmp);
- tcg_gen_cmp_vec(TCG_COND_GT, vece, tmp, lsh, tmp);
+ max = tcg_constant_vec_matching(dst, vece, (8 << vece) - 1);
+ tcg_gen_umin_vec(vece, rsh, rsh, max);
tcg_gen_shlv_vec(vece, lval, src, lsh);
tcg_gen_sarv_vec(vece, rval, src, rsh);
/* Select in-bound left shift. */
- tcg_gen_andc_vec(vece, lval, lval, tmp);
+ zero = tcg_constant_vec_matching(dst, vece, 0);
+ tcg_gen_cmpsel_vec(TCG_COND_GT, vece, lval, lsh, max, zero, lval);
/* Select between left and right shift. */
if (vece == MO_8) {
- tcg_gen_dupi_vec(vece, tmp, 0);
- tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, rval, lval);
+ tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, zero, rval, lval);
} else {
- tcg_gen_dupi_vec(vece, tmp, 0x80);
- tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, lval, rval);
+ TCGv_vec sgn = tcg_constant_vec_matching(dst, vece, 0x80);
+ tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, sgn, lval, rval);
}
}
@@ -1211,7 +1220,7 @@ void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
{
static const TCGOpcode vecop_list[] = {
INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec,
- INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0
+ INDEX_op_sarv_vec, INDEX_op_cmpsel_vec, 0
};
static const GVecGen3 ops[4] = {
{ .fniv = gen_sshl_vec,
@@ -1304,6 +1313,42 @@ void gen_neon_uqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
opr_sz, max_sz, 0, fns[vece]);
}
+void gen_neon_sqshli(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ int64_t c, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_2_ptr * const fns[] = {
+ gen_helper_neon_sqshli_b, gen_helper_neon_sqshli_h,
+ gen_helper_neon_sqshli_s, gen_helper_neon_sqshli_d,
+ };
+ tcg_debug_assert(vece <= MO_64);
+ tcg_debug_assert(c >= 0 && c <= (8 << vece));
+ tcg_gen_gvec_2_ptr(rd_ofs, rn_ofs, tcg_env, opr_sz, max_sz, c, fns[vece]);
+}
+
+void gen_neon_uqshli(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ int64_t c, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_2_ptr * const fns[] = {
+ gen_helper_neon_uqshli_b, gen_helper_neon_uqshli_h,
+ gen_helper_neon_uqshli_s, gen_helper_neon_uqshli_d,
+ };
+ tcg_debug_assert(vece <= MO_64);
+ tcg_debug_assert(c >= 0 && c <= (8 << vece));
+ tcg_gen_gvec_2_ptr(rd_ofs, rn_ofs, tcg_env, opr_sz, max_sz, c, fns[vece]);
+}
+
+void gen_neon_sqshlui(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ int64_t c, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_2_ptr * const fns[] = {
+ gen_helper_neon_sqshlui_b, gen_helper_neon_sqshlui_h,
+ gen_helper_neon_sqshlui_s, gen_helper_neon_sqshlui_d,
+ };
+ tcg_debug_assert(vece <= MO_64);
+ tcg_debug_assert(c >= 0 && c <= (8 << vece));
+ tcg_gen_gvec_2_ptr(rd_ofs, rn_ofs, tcg_env, opr_sz, max_sz, c, fns[vece]);
+}
+
void gen_uqadd_bhs(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b, MemOp esz)
{
uint64_t max = MAKE_64BIT_MASK(0, 8 << esz);
@@ -2313,3 +2358,372 @@ void gen_gvec_urhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
assert(vece <= MO_32);
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
}
+
+void gen_gvec_cls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ static const GVecGen2 g[] = {
+ { .fni4 = gen_helper_neon_cls_s8,
+ .vece = MO_8 },
+ { .fni4 = gen_helper_neon_cls_s16,
+ .vece = MO_16 },
+ { .fni4 = tcg_gen_clrsb_i32,
+ .vece = MO_32 },
+ };
+ assert(vece <= MO_32);
+ tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+static void gen_clz32_i32(TCGv_i32 d, TCGv_i32 n)
+{
+ tcg_gen_clzi_i32(d, n, 32);
+}
+
+void gen_gvec_clz(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ static const GVecGen2 g[] = {
+ { .fni4 = gen_helper_neon_clz_u8,
+ .vece = MO_8 },
+ { .fni4 = gen_helper_neon_clz_u16,
+ .vece = MO_16 },
+ { .fni4 = gen_clz32_i32,
+ .vece = MO_32 },
+ };
+ assert(vece <= MO_32);
+ tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+void gen_gvec_cnt(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ assert(vece == MO_8);
+ tcg_gen_gvec_2_ool(rd_ofs, rn_ofs, opr_sz, max_sz, 0,
+ gen_helper_gvec_cnt_b);
+}
+
+void gen_gvec_rbit(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ assert(vece == MO_8);
+ tcg_gen_gvec_2_ool(rd_ofs, rn_ofs, opr_sz, max_sz, 0,
+ gen_helper_gvec_rbit_b);
+}
+
+void gen_gvec_rev16(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ assert(vece == MO_8);
+ tcg_gen_gvec_rotli(MO_16, rd_ofs, rn_ofs, 8, opr_sz, max_sz);
+}
+
+static void gen_bswap32_i64(TCGv_i64 d, TCGv_i64 n)
+{
+ tcg_gen_bswap64_i64(d, n);
+ tcg_gen_rotli_i64(d, d, 32);
+}
+
+void gen_gvec_rev32(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ static const GVecGen2 g = {
+ .fni8 = gen_bswap32_i64,
+ .fni4 = tcg_gen_bswap32_i32,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .vece = MO_32
+ };
+
+ switch (vece) {
+ case MO_16:
+ tcg_gen_gvec_rotli(MO_32, rd_ofs, rn_ofs, 16, opr_sz, max_sz);
+ break;
+ case MO_8:
+ tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+void gen_gvec_rev64(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ static const GVecGen2 g[] = {
+ { .fni8 = tcg_gen_bswap64_i64,
+ .vece = MO_64 },
+ { .fni8 = tcg_gen_hswap_i64,
+ .vece = MO_64 },
+ };
+
+ switch (vece) {
+ case MO_32:
+ tcg_gen_gvec_rotli(MO_64, rd_ofs, rn_ofs, 32, opr_sz, max_sz);
+ break;
+ case MO_8:
+ case MO_16:
+ tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void gen_saddlp_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ int half = 4 << vece;
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_shli_vec(vece, t, n, half);
+ tcg_gen_sari_vec(vece, d, n, half);
+ tcg_gen_sari_vec(vece, t, t, half);
+ tcg_gen_add_vec(vece, d, d, t);
+}
+
+static void gen_saddlp_s_i64(TCGv_i64 d, TCGv_i64 n)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_ext32s_i64(t, n);
+ tcg_gen_sari_i64(d, n, 32);
+ tcg_gen_add_i64(d, d, t);
+}
+
+void gen_gvec_saddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_shli_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen2 g[] = {
+ { .fniv = gen_saddlp_vec,
+ .fni8 = gen_helper_neon_addlp_s8,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fniv = gen_saddlp_vec,
+ .fni8 = gen_helper_neon_addlp_s16,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fniv = gen_saddlp_vec,
+ .fni8 = gen_saddlp_s_i64,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+ assert(vece <= MO_32);
+ tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+static void gen_sadalp_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ gen_saddlp_vec(vece, t, n);
+ tcg_gen_add_vec(vece, d, d, t);
+}
+
+static void gen_sadalp_b_i64(TCGv_i64 d, TCGv_i64 n)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ gen_helper_neon_addlp_s8(t, n);
+ tcg_gen_vec_add16_i64(d, d, t);
+}
+
+static void gen_sadalp_h_i64(TCGv_i64 d, TCGv_i64 n)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ gen_helper_neon_addlp_s16(t, n);
+ tcg_gen_vec_add32_i64(d, d, t);
+}
+
+static void gen_sadalp_s_i64(TCGv_i64 d, TCGv_i64 n)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ gen_saddlp_s_i64(t, n);
+ tcg_gen_add_i64(d, d, t);
+}
+
+void gen_gvec_sadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_shli_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen2 g[] = {
+ { .fniv = gen_sadalp_vec,
+ .fni8 = gen_sadalp_b_i64,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_16 },
+ { .fniv = gen_sadalp_vec,
+ .fni8 = gen_sadalp_h_i64,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_32 },
+ { .fniv = gen_sadalp_vec,
+ .fni8 = gen_sadalp_s_i64,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_64 },
+ };
+ assert(vece <= MO_32);
+ tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+static void gen_uaddlp_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ int half = 4 << vece;
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ TCGv_vec m = tcg_constant_vec_matching(d, vece, MAKE_64BIT_MASK(0, half));
+
+ tcg_gen_shri_vec(vece, t, n, half);
+ tcg_gen_and_vec(vece, d, n, m);
+ tcg_gen_add_vec(vece, d, d, t);
+}
+
+static void gen_uaddlp_b_i64(TCGv_i64 d, TCGv_i64 n)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+ TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0xff));
+
+ tcg_gen_shri_i64(t, n, 8);
+ tcg_gen_and_i64(d, n, m);
+ tcg_gen_and_i64(t, t, m);
+ /* No carry between widened unsigned elements. */
+ tcg_gen_add_i64(d, d, t);
+}
+
+static void gen_uaddlp_h_i64(TCGv_i64 d, TCGv_i64 n)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+ TCGv_i64 m = tcg_constant_i64(dup_const(MO_32, 0xffff));
+
+ tcg_gen_shri_i64(t, n, 16);
+ tcg_gen_and_i64(d, n, m);
+ tcg_gen_and_i64(t, t, m);
+ /* No carry between widened unsigned elements. */
+ tcg_gen_add_i64(d, d, t);
+}
+
+static void gen_uaddlp_s_i64(TCGv_i64 d, TCGv_i64 n)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_ext32u_i64(t, n);
+ tcg_gen_shri_i64(d, n, 32);
+ tcg_gen_add_i64(d, d, t);
+}
+
+void gen_gvec_uaddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen2 g[] = {
+ { .fniv = gen_uaddlp_vec,
+ .fni8 = gen_uaddlp_b_i64,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fniv = gen_uaddlp_vec,
+ .fni8 = gen_uaddlp_h_i64,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fniv = gen_uaddlp_vec,
+ .fni8 = gen_uaddlp_s_i64,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+ assert(vece <= MO_32);
+ tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+static void gen_uadalp_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ gen_uaddlp_vec(vece, t, n);
+ tcg_gen_add_vec(vece, d, d, t);
+}
+
+static void gen_uadalp_b_i64(TCGv_i64 d, TCGv_i64 n)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ gen_uaddlp_b_i64(t, n);
+ tcg_gen_vec_add16_i64(d, d, t);
+}
+
+static void gen_uadalp_h_i64(TCGv_i64 d, TCGv_i64 n)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ gen_uaddlp_h_i64(t, n);
+ tcg_gen_vec_add32_i64(d, d, t);
+}
+
+static void gen_uadalp_s_i64(TCGv_i64 d, TCGv_i64 n)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ gen_uaddlp_s_i64(t, n);
+ tcg_gen_add_i64(d, d, t);
+}
+
+void gen_gvec_uadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen2 g[] = {
+ { .fniv = gen_uadalp_vec,
+ .fni8 = gen_uadalp_b_i64,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fniv = gen_uadalp_vec,
+ .fni8 = gen_uadalp_h_i64,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fniv = gen_uadalp_vec,
+ .fni8 = gen_uadalp_s_i64,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+ assert(vece <= MO_32);
+ tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+void gen_gvec_fabs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz)
+{
+ uint64_t s_bit = 1ull << ((8 << vece) - 1);
+ tcg_gen_gvec_andi(vece, dofs, aofs, s_bit - 1, oprsz, maxsz);
+}
+
+void gen_gvec_fneg(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz)
+{
+ uint64_t s_bit = 1ull << ((8 << vece) - 1);
+ tcg_gen_gvec_xori(vece, dofs, aofs, s_bit, oprsz, maxsz);
+}
+
+void gen_gvec_urecpe(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ assert(vece == MO_32);
+ tcg_gen_gvec_2_ool(rd_ofs, rn_ofs, opr_sz, max_sz, 0,
+ gen_helper_gvec_urecpe_s);
+}
+
+void gen_gvec_ursqrte(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ assert(vece == MO_32);
+ tcg_gen_gvec_2_ool(rd_ofs, rn_ofs, opr_sz, max_sz, 0,
+ gen_helper_gvec_ursqrte_s);
+}
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
index 0ea8668..4f618ae 100644
--- a/target/arm/tcg/helper-a64.c
+++ b/target/arm/tcg/helper-a64.c
@@ -28,12 +28,20 @@
#include "qemu/bitops.h"
#include "internals.h"
#include "qemu/crc32c.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "exec/cpu-common.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/helper-retaddr.h"
+#include "accel/tcg/probe.h"
+#include "exec/target_page.h"
+#include "exec/tlb-flags.h"
#include "qemu/int128.h"
#include "qemu/atomic128.h"
#include "fpu/softfloat.h"
-#include <zlib.h> /* For crc32 */
+#include <zlib.h> /* for crc32 */
+#ifdef CONFIG_USER_ONLY
+#include "user/page-protection.h"
+#endif
+#include "vec_internal.h"
/* C2.4.7 Multiply and divide */
/* special cases for 0 and LLONG_MIN are mandated by the standard */
@@ -130,40 +138,38 @@ static inline uint32_t float_rel_to_flags(int res)
return flags;
}
-uint64_t HELPER(vfp_cmph_a64)(uint32_t x, uint32_t y, void *fp_status)
+uint64_t HELPER(vfp_cmph_a64)(uint32_t x, uint32_t y, float_status *fp_status)
{
return float_rel_to_flags(float16_compare_quiet(x, y, fp_status));
}
-uint64_t HELPER(vfp_cmpeh_a64)(uint32_t x, uint32_t y, void *fp_status)
+uint64_t HELPER(vfp_cmpeh_a64)(uint32_t x, uint32_t y, float_status *fp_status)
{
return float_rel_to_flags(float16_compare(x, y, fp_status));
}
-uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status)
+uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, float_status *fp_status)
{
return float_rel_to_flags(float32_compare_quiet(x, y, fp_status));
}
-uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status)
+uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, float_status *fp_status)
{
return float_rel_to_flags(float32_compare(x, y, fp_status));
}
-uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status)
+uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, float_status *fp_status)
{
return float_rel_to_flags(float64_compare_quiet(x, y, fp_status));
}
-uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status)
+uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, float_status *fp_status)
{
return float_rel_to_flags(float64_compare(x, y, fp_status));
}
-float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp)
+float32 HELPER(vfp_mulxs)(float32 a, float32 b, float_status *fpst)
{
- float_status *fpst = fpstp;
-
a = float32_squash_input_denormal(a, fpst);
b = float32_squash_input_denormal(b, fpst);
@@ -176,10 +182,8 @@ float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp)
return float32_mul(a, b, fpst);
}
-float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp)
+float64 HELPER(vfp_mulxd)(float64 a, float64 b, float_status *fpst)
{
- float_status *fpst = fpstp;
-
a = float64_squash_input_denormal(a, fpst);
b = float64_squash_input_denormal(b, fpst);
@@ -193,184 +197,71 @@ float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp)
}
/* 64bit/double versions of the neon float compare functions */
-uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp)
+uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, float_status *fpst)
{
- float_status *fpst = fpstp;
return -float64_eq_quiet(a, b, fpst);
}
-uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, void *fpstp)
+uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, float_status *fpst)
{
- float_status *fpst = fpstp;
return -float64_le(b, a, fpst);
}
-uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp)
+uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, float_status *fpst)
{
- float_status *fpst = fpstp;
return -float64_lt(b, a, fpst);
}
-/* Reciprocal step and sqrt step. Note that unlike the A32/T32
+/*
+ * Reciprocal step and sqrt step. Note that unlike the A32/T32
* versions, these do a fully fused multiply-add or
* multiply-add-and-halve.
+ * The FPCR.AH == 1 versions need to avoid flipping the sign of NaN.
*/
-
-uint32_t HELPER(recpsf_f16)(uint32_t a, uint32_t b, void *fpstp)
-{
- float_status *fpst = fpstp;
-
- a = float16_squash_input_denormal(a, fpst);
- b = float16_squash_input_denormal(b, fpst);
-
- a = float16_chs(a);
- if ((float16_is_infinity(a) && float16_is_zero(b)) ||
- (float16_is_infinity(b) && float16_is_zero(a))) {
- return float16_two;
- }
- return float16_muladd(a, b, float16_two, 0, fpst);
-}
-
-float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp)
-{
- float_status *fpst = fpstp;
-
- a = float32_squash_input_denormal(a, fpst);
- b = float32_squash_input_denormal(b, fpst);
-
- a = float32_chs(a);
- if ((float32_is_infinity(a) && float32_is_zero(b)) ||
- (float32_is_infinity(b) && float32_is_zero(a))) {
- return float32_two;
- }
- return float32_muladd(a, b, float32_two, 0, fpst);
-}
-
-float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp)
-{
- float_status *fpst = fpstp;
-
- a = float64_squash_input_denormal(a, fpst);
- b = float64_squash_input_denormal(b, fpst);
-
- a = float64_chs(a);
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
- (float64_is_infinity(b) && float64_is_zero(a))) {
- return float64_two;
- }
- return float64_muladd(a, b, float64_two, 0, fpst);
-}
-
-uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, void *fpstp)
-{
- float_status *fpst = fpstp;
-
- a = float16_squash_input_denormal(a, fpst);
- b = float16_squash_input_denormal(b, fpst);
-
- a = float16_chs(a);
- if ((float16_is_infinity(a) && float16_is_zero(b)) ||
- (float16_is_infinity(b) && float16_is_zero(a))) {
- return float16_one_point_five;
- }
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
-}
-
-float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp)
-{
- float_status *fpst = fpstp;
-
- a = float32_squash_input_denormal(a, fpst);
- b = float32_squash_input_denormal(b, fpst);
-
- a = float32_chs(a);
- if ((float32_is_infinity(a) && float32_is_zero(b)) ||
- (float32_is_infinity(b) && float32_is_zero(a))) {
- return float32_one_point_five;
- }
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
-}
-
-float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp)
-{
- float_status *fpst = fpstp;
-
- a = float64_squash_input_denormal(a, fpst);
- b = float64_squash_input_denormal(b, fpst);
-
- a = float64_chs(a);
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
- (float64_is_infinity(b) && float64_is_zero(a))) {
- return float64_one_point_five;
- }
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
-}
-
-/* Pairwise long add: add pairs of adjacent elements into
- * double-width elements in the result (eg _s8 is an 8x8->16 op)
- */
-uint64_t HELPER(neon_addlp_s8)(uint64_t a)
-{
- uint64_t nsignmask = 0x0080008000800080ULL;
- uint64_t wsignmask = 0x8000800080008000ULL;
- uint64_t elementmask = 0x00ff00ff00ff00ffULL;
- uint64_t tmp1, tmp2;
- uint64_t res, signres;
-
- /* Extract odd elements, sign extend each to a 16 bit field */
- tmp1 = a & elementmask;
- tmp1 ^= nsignmask;
- tmp1 |= wsignmask;
- tmp1 = (tmp1 - nsignmask) ^ wsignmask;
- /* Ditto for the even elements */
- tmp2 = (a >> 8) & elementmask;
- tmp2 ^= nsignmask;
- tmp2 |= wsignmask;
- tmp2 = (tmp2 - nsignmask) ^ wsignmask;
-
- /* calculate the result by summing bits 0..14, 16..22, etc,
- * and then adjusting the sign bits 15, 23, etc manually.
- * This ensures the addition can't overflow the 16 bit field.
- */
- signres = (tmp1 ^ tmp2) & wsignmask;
- res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask);
- res ^= signres;
-
- return res;
-}
-
-uint64_t HELPER(neon_addlp_u8)(uint64_t a)
-{
- uint64_t tmp;
-
- tmp = a & 0x00ff00ff00ff00ffULL;
- tmp += (a >> 8) & 0x00ff00ff00ff00ffULL;
- return tmp;
-}
-
-uint64_t HELPER(neon_addlp_s16)(uint64_t a)
-{
- int32_t reslo, reshi;
-
- reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16);
- reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48);
-
- return (uint32_t)reslo | (((uint64_t)reshi) << 32);
-}
-
-uint64_t HELPER(neon_addlp_u16)(uint64_t a)
-{
- uint64_t tmp;
-
- tmp = a & 0x0000ffff0000ffffULL;
- tmp += (a >> 16) & 0x0000ffff0000ffffULL;
- return tmp;
-}
+#define DO_RECPS(NAME, CTYPE, FLOATTYPE, CHSFN) \
+ CTYPE HELPER(NAME)(CTYPE a, CTYPE b, float_status *fpst) \
+ { \
+ a = FLOATTYPE ## _squash_input_denormal(a, fpst); \
+ b = FLOATTYPE ## _squash_input_denormal(b, fpst); \
+ a = FLOATTYPE ## _ ## CHSFN(a); \
+ if ((FLOATTYPE ## _is_infinity(a) && FLOATTYPE ## _is_zero(b)) || \
+ (FLOATTYPE ## _is_infinity(b) && FLOATTYPE ## _is_zero(a))) { \
+ return FLOATTYPE ## _two; \
+ } \
+ return FLOATTYPE ## _muladd(a, b, FLOATTYPE ## _two, 0, fpst); \
+ }
+
+DO_RECPS(recpsf_f16, uint32_t, float16, chs)
+DO_RECPS(recpsf_f32, float32, float32, chs)
+DO_RECPS(recpsf_f64, float64, float64, chs)
+DO_RECPS(recpsf_ah_f16, uint32_t, float16, ah_chs)
+DO_RECPS(recpsf_ah_f32, float32, float32, ah_chs)
+DO_RECPS(recpsf_ah_f64, float64, float64, ah_chs)
+
+#define DO_RSQRTSF(NAME, CTYPE, FLOATTYPE, CHSFN) \
+ CTYPE HELPER(NAME)(CTYPE a, CTYPE b, float_status *fpst) \
+ { \
+ a = FLOATTYPE ## _squash_input_denormal(a, fpst); \
+ b = FLOATTYPE ## _squash_input_denormal(b, fpst); \
+ a = FLOATTYPE ## _ ## CHSFN(a); \
+ if ((FLOATTYPE ## _is_infinity(a) && FLOATTYPE ## _is_zero(b)) || \
+ (FLOATTYPE ## _is_infinity(b) && FLOATTYPE ## _is_zero(a))) { \
+ return FLOATTYPE ## _one_point_five; \
+ } \
+ return FLOATTYPE ## _muladd_scalbn(a, b, FLOATTYPE ## _three, \
+ -1, 0, fpst); \
+ } \
+
+DO_RSQRTSF(rsqrtsf_f16, uint32_t, float16, chs)
+DO_RSQRTSF(rsqrtsf_f32, float32, float32, chs)
+DO_RSQRTSF(rsqrtsf_f64, float64, float64, chs)
+DO_RSQRTSF(rsqrtsf_ah_f16, uint32_t, float16, ah_chs)
+DO_RSQRTSF(rsqrtsf_ah_f32, float32, float32, ah_chs)
+DO_RSQRTSF(rsqrtsf_ah_f64, float64, float64, ah_chs)
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
-uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp)
+uint32_t HELPER(frecpx_f16)(uint32_t a, float_status *fpst)
{
- float_status *fpst = fpstp;
uint16_t val16, sbit;
int16_t exp;
@@ -401,9 +292,8 @@ uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp)
}
}
-float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
+float32 HELPER(frecpx_f32)(float32 a, float_status *fpst)
{
- float_status *fpst = fpstp;
uint32_t val32, sbit;
int32_t exp;
@@ -434,9 +324,8 @@ float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
}
}
-float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
+float64 HELPER(frecpx_f64)(float64 a, float_status *fpst)
{
- float_status *fpst = fpstp;
uint64_t val64, sbit;
int64_t exp;
@@ -467,28 +356,53 @@ float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
}
}
-float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env)
+float32 HELPER(fcvtx_f64_to_f32)(float64 a, float_status *fpst)
{
- /* Von Neumann rounding is implemented by using round-to-zero
- * and then setting the LSB of the result if Inexact was raised.
- */
float32 r;
- float_status *fpst = &env->vfp.fp_status;
- float_status tstat = *fpst;
- int exflags;
-
- set_float_rounding_mode(float_round_to_zero, &tstat);
- set_float_exception_flags(0, &tstat);
- r = float64_to_float32(a, &tstat);
- exflags = get_float_exception_flags(&tstat);
- if (exflags & float_flag_inexact) {
- r = make_float32(float32_val(r) | 1);
- }
- exflags |= get_float_exception_flags(fpst);
- set_float_exception_flags(exflags, fpst);
+ int old = get_float_rounding_mode(fpst);
+
+ set_float_rounding_mode(float_round_to_odd, fpst);
+ r = float64_to_float32(a, fpst);
+ set_float_rounding_mode(old, fpst);
return r;
}
+/*
+ * AH=1 min/max have some odd special cases:
+ * comparing two zeroes (regardless of sign), (NaN, anything),
+ * or (anything, NaN) should return the second argument (possibly
+ * squashed to zero).
+ * Also, denormal outputs are not squashed to zero regardless of FZ or FZ16.
+ */
+#define AH_MINMAX_HELPER(NAME, CTYPE, FLOATTYPE, MINMAX) \
+ CTYPE HELPER(NAME)(CTYPE a, CTYPE b, float_status *fpst) \
+ { \
+ bool save; \
+ CTYPE r; \
+ a = FLOATTYPE ## _squash_input_denormal(a, fpst); \
+ b = FLOATTYPE ## _squash_input_denormal(b, fpst); \
+ if (FLOATTYPE ## _is_zero(a) && FLOATTYPE ## _is_zero(b)) { \
+ return b; \
+ } \
+ if (FLOATTYPE ## _is_any_nan(a) || \
+ FLOATTYPE ## _is_any_nan(b)) { \
+ float_raise(float_flag_invalid, fpst); \
+ return b; \
+ } \
+ save = get_flush_to_zero(fpst); \
+ set_flush_to_zero(false, fpst); \
+ r = FLOATTYPE ## _ ## MINMAX(a, b, fpst); \
+ set_flush_to_zero(save, fpst); \
+ return r; \
+ }
+
+AH_MINMAX_HELPER(vfp_ah_minh, dh_ctype_f16, float16, min)
+AH_MINMAX_HELPER(vfp_ah_mins, float32, float32, min)
+AH_MINMAX_HELPER(vfp_ah_mind, float64, float64, min)
+AH_MINMAX_HELPER(vfp_ah_maxh, dh_ctype_f16, float16, max)
+AH_MINMAX_HELPER(vfp_ah_maxs, float32, float32, max)
+AH_MINMAX_HELPER(vfp_ah_maxd, float64, float64, max)
+
/* 64-bit versions of the CRC helpers. Note that although the operation
* (and the prototypes of crc32c() and crc32() mean that only the bottom
* 32 bits of the accumulator and result are used, we pass and return
@@ -524,27 +438,17 @@ uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
#define ADVSIMD_HELPER(name, suffix) HELPER(glue(glue(advsimd_, name), suffix))
#define ADVSIMD_HALFOP(name) \
-uint32_t ADVSIMD_HELPER(name, h)(uint32_t a, uint32_t b, void *fpstp) \
+uint32_t ADVSIMD_HELPER(name, h)(uint32_t a, uint32_t b, float_status *fpst) \
{ \
- float_status *fpst = fpstp; \
return float16_ ## name(a, b, fpst); \
}
-ADVSIMD_HALFOP(add)
-ADVSIMD_HALFOP(sub)
-ADVSIMD_HALFOP(mul)
-ADVSIMD_HALFOP(div)
-ADVSIMD_HALFOP(min)
-ADVSIMD_HALFOP(max)
-ADVSIMD_HALFOP(minnum)
-ADVSIMD_HALFOP(maxnum)
-
#define ADVSIMD_TWOHALFOP(name) \
-uint32_t ADVSIMD_HELPER(name, 2h)(uint32_t two_a, uint32_t two_b, void *fpstp) \
+uint32_t ADVSIMD_HELPER(name, 2h)(uint32_t two_a, uint32_t two_b, \
+ float_status *fpst) \
{ \
float16 a1, a2, b1, b2; \
uint32_t r1, r2; \
- float_status *fpst = fpstp; \
a1 = extract32(two_a, 0, 16); \
a2 = extract32(two_a, 16, 16); \
b1 = extract32(two_b, 0, 16); \
@@ -564,10 +468,8 @@ ADVSIMD_TWOHALFOP(minnum)
ADVSIMD_TWOHALFOP(maxnum)
/* Data processing - scalar floating-point and advanced SIMD */
-static float16 float16_mulx(float16 a, float16 b, void *fpstp)
+static float16 float16_mulx(float16 a, float16 b, float_status *fpst)
{
- float_status *fpst = fpstp;
-
a = float16_squash_input_denormal(a, fpst);
b = float16_squash_input_denormal(b, fpst);
@@ -585,16 +487,14 @@ ADVSIMD_TWOHALFOP(mulx)
/* fused multiply-accumulate */
uint32_t HELPER(advsimd_muladdh)(uint32_t a, uint32_t b, uint32_t c,
- void *fpstp)
+ float_status *fpst)
{
- float_status *fpst = fpstp;
return float16_muladd(a, b, c, 0, fpst);
}
uint32_t HELPER(advsimd_muladd2h)(uint32_t two_a, uint32_t two_b,
- uint32_t two_c, void *fpstp)
+ uint32_t two_c, float_status *fpst)
{
- float_status *fpst = fpstp;
float16 a1, a2, b1, b2, c1, c2;
uint32_t r1, r2;
a1 = extract32(two_a, 0, 16);
@@ -616,31 +516,27 @@ uint32_t HELPER(advsimd_muladd2h)(uint32_t two_a, uint32_t two_b,
#define ADVSIMD_CMPRES(test) (test) ? 0xffff : 0
-uint32_t HELPER(advsimd_ceq_f16)(uint32_t a, uint32_t b, void *fpstp)
+uint32_t HELPER(advsimd_ceq_f16)(uint32_t a, uint32_t b, float_status *fpst)
{
- float_status *fpst = fpstp;
int compare = float16_compare_quiet(a, b, fpst);
return ADVSIMD_CMPRES(compare == float_relation_equal);
}
-uint32_t HELPER(advsimd_cge_f16)(uint32_t a, uint32_t b, void *fpstp)
+uint32_t HELPER(advsimd_cge_f16)(uint32_t a, uint32_t b, float_status *fpst)
{
- float_status *fpst = fpstp;
int compare = float16_compare(a, b, fpst);
return ADVSIMD_CMPRES(compare == float_relation_greater ||
compare == float_relation_equal);
}
-uint32_t HELPER(advsimd_cgt_f16)(uint32_t a, uint32_t b, void *fpstp)
+uint32_t HELPER(advsimd_cgt_f16)(uint32_t a, uint32_t b, float_status *fpst)
{
- float_status *fpst = fpstp;
int compare = float16_compare(a, b, fpst);
return ADVSIMD_CMPRES(compare == float_relation_greater);
}
-uint32_t HELPER(advsimd_acge_f16)(uint32_t a, uint32_t b, void *fpstp)
+uint32_t HELPER(advsimd_acge_f16)(uint32_t a, uint32_t b, float_status *fpst)
{
- float_status *fpst = fpstp;
float16 f0 = float16_abs(a);
float16 f1 = float16_abs(b);
int compare = float16_compare(f0, f1, fpst);
@@ -648,9 +544,8 @@ uint32_t HELPER(advsimd_acge_f16)(uint32_t a, uint32_t b, void *fpstp)
compare == float_relation_equal);
}
-uint32_t HELPER(advsimd_acgt_f16)(uint32_t a, uint32_t b, void *fpstp)
+uint32_t HELPER(advsimd_acgt_f16)(uint32_t a, uint32_t b, float_status *fpst)
{
- float_status *fpst = fpstp;
float16 f0 = float16_abs(a);
float16 f1 = float16_abs(b);
int compare = float16_compare(f0, f1, fpst);
@@ -658,12 +553,12 @@ uint32_t HELPER(advsimd_acgt_f16)(uint32_t a, uint32_t b, void *fpstp)
}
/* round to integral */
-uint32_t HELPER(advsimd_rinth_exact)(uint32_t x, void *fp_status)
+uint32_t HELPER(advsimd_rinth_exact)(uint32_t x, float_status *fp_status)
{
return float16_round_to_int(x, fp_status);
}
-uint32_t HELPER(advsimd_rinth)(uint32_t x, void *fp_status)
+uint32_t HELPER(advsimd_rinth)(uint32_t x, float_status *fp_status)
{
int old_flags = get_float_exception_flags(fp_status), new_flags;
float16 ret;
@@ -679,38 +574,6 @@ uint32_t HELPER(advsimd_rinth)(uint32_t x, void *fp_status)
return ret;
}
-/*
- * Half-precision floating point conversion functions
- *
- * There are a multitude of conversion functions with various
- * different rounding modes. This is dealt with by the calling code
- * setting the mode appropriately before calling the helper.
- */
-
-uint32_t HELPER(advsimd_f16tosinth)(uint32_t a, void *fpstp)
-{
- float_status *fpst = fpstp;
-
- /* Invalid if we are passed a NaN */
- if (float16_is_any_nan(a)) {
- float_raise(float_flag_invalid, fpst);
- return 0;
- }
- return float16_to_int16(a, fpst);
-}
-
-uint32_t HELPER(advsimd_f16touinth)(uint32_t a, void *fpstp)
-{
- float_status *fpst = fpstp;
-
- /* Invalid if we are passed a NaN */
- if (float16_is_any_nan(a)) {
- float_raise(float_flag_invalid, fpst);
- return 0;
- }
- return float16_to_uint16(a, fpst);
-}
-
static int el_from_spsr(uint32_t spsr)
{
/* Return the exception level that this SPSR is requesting a return to,
@@ -771,6 +634,7 @@ static void cpsr_write_from_spsr_elx(CPUARMState *env,
void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
{
+ ARMCPU *cpu = env_archcpu(env);
int cur_el = arm_current_el(env);
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
uint32_t spsr = env->banked_spsr[spsr_idx];
@@ -817,12 +681,17 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
goto illegal_return;
}
+ if (!return_to_aa64 && !cpu_isar_feature(aa64_aa32, cpu)) {
+ /* Return to AArch32 when CPU is AArch64-only */
+ goto illegal_return;
+ }
+
if (new_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
goto illegal_return;
}
bql_lock();
- arm_call_pre_el_change_hook(env_archcpu(env));
+ arm_call_pre_el_change_hook(cpu);
bql_unlock();
if (!return_to_aa64) {
@@ -850,7 +719,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
int tbii;
env->aarch64 = true;
- spsr &= aarch64_pstate_valid_mask(&env_archcpu(env)->isar);
+ spsr &= aarch64_pstate_valid_mask(&cpu->isar);
pstate_write(env, spsr);
if (!arm_singlestep_active(env)) {
env->pstate &= ~PSTATE_SS;
@@ -889,7 +758,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64);
bql_lock();
- arm_call_el_change_hook(env_archcpu(env));
+ arm_call_el_change_hook(cpu);
bql_unlock();
return;
@@ -915,19 +784,10 @@ illegal_return:
"resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
}
-/*
- * Square Root and Reciprocal square root
- */
-
-uint32_t HELPER(sqrt_f16)(uint32_t a, void *fpstp)
-{
- float_status *s = fpstp;
-
- return float16_sqrt(a, s);
-}
-
void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
{
+ uintptr_t ra = GETPC();
+
/*
* Implement DC ZVA, which zeroes a fixed-length block of memory.
* Note that we do not implement the (architecturally mandated)
@@ -948,8 +808,6 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
#ifndef CONFIG_USER_ONLY
if (unlikely(!mem)) {
- uintptr_t ra = GETPC();
-
/*
* Trap if accessing an invalid page. DC_ZVA requires that we supply
* the original pointer for an invalid page. But watchpoints require
@@ -971,7 +829,9 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
}
#endif
+ set_helper_retaddr(ra);
memset(mem, 0, blocklen);
+ clear_helper_retaddr();
}
void HELPER(unaligned_access)(CPUARMState *env, uint64_t addr,
@@ -1120,7 +980,9 @@ static uint64_t set_step(CPUARMState *env, uint64_t toaddr,
}
#endif
/* Easy case: just memset the host memory */
+ set_helper_retaddr(ra);
memset(mem, data, setsize);
+ clear_helper_retaddr();
return setsize;
}
@@ -1163,7 +1025,9 @@ static uint64_t set_step_tags(CPUARMState *env, uint64_t toaddr,
}
#endif
/* Easy case: just memset the host memory */
+ set_helper_retaddr(ra);
memset(mem, data, setsize);
+ clear_helper_retaddr();
mte_mops_set_tags(env, toaddr, setsize, *mtedesc);
return setsize;
}
@@ -1286,7 +1150,6 @@ static void do_setp(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
env->ZF = 1; /* our env->ZF encoding is inverted */
env->CF = 0;
env->VF = 0;
- return;
}
void HELPER(setp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
@@ -1342,7 +1205,7 @@ static void do_setm(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
/* Do the actual memset: we leave the last partial page to SETE */
stagesetsize = setsize & TARGET_PAGE_MASK;
while (stagesetsize > 0) {
- step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra);
+ step = stepfn(env, toaddr, stagesetsize, data, memidx, &mtedesc, ra);
toaddr += step;
setsize -= step;
stagesetsize -= step;
@@ -1497,7 +1360,9 @@ static uint64_t copy_step(CPUARMState *env, uint64_t toaddr, uint64_t fromaddr,
}
#endif
/* Easy case: just memmove the host memory */
+ set_helper_retaddr(ra);
memmove(wmem, rmem, copysize);
+ clear_helper_retaddr();
return copysize;
}
@@ -1572,7 +1437,9 @@ static uint64_t copy_step_rev(CPUARMState *env, uint64_t toaddr,
* Easy case: just memmove the host memory. Note that wmem and
* rmem here point to the *last* byte to copy.
*/
+ set_helper_retaddr(ra);
memmove(wmem - (copysize - 1), rmem - (copysize - 1), copysize);
+ clear_helper_retaddr();
return copysize;
}
@@ -1682,7 +1549,6 @@ static void do_cpyp(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
env->ZF = 1; /* our env->ZF encoding is inverted */
env->CF = 0;
env->VF = 0;
- return;
}
void HELPER(cpyp)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
@@ -1867,3 +1733,42 @@ void HELPER(cpyfe)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
{
do_cpye(env, syndrome, wdesc, rdesc, false, GETPC());
}
+
+static bool is_guarded_page(CPUARMState *env, target_ulong addr, uintptr_t ra)
+{
+#ifdef CONFIG_USER_ONLY
+ return page_get_flags(addr) & PAGE_BTI;
+#else
+ CPUTLBEntryFull *full;
+ void *host;
+ int mmu_idx = cpu_mmu_index(env_cpu(env), true);
+ int flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx,
+ false, &host, &full, ra);
+
+ assert(!(flags & TLB_INVALID_MASK));
+ return full->extra.arm.guarded;
+#endif
+}
+
+void HELPER(guarded_page_check)(CPUARMState *env)
+{
+ /*
+ * We have already verified that bti is enabled, and that the
+ * instruction at PC is not ok for BTYPE. This is always at
+ * the beginning of a block, so PC is always up-to-date and
+ * no unwind is required.
+ */
+ if (is_guarded_page(env, env->pc, 0)) {
+ raise_exception(env, EXCP_UDEF, syn_btitrap(env->btype),
+ exception_target_el(env));
+ }
+}
+
+void HELPER(guarded_page_br)(CPUARMState *env, target_ulong pc)
+{
+ /*
+ * We have already checked for branch via x16 and x17.
+ * What remains for choosing BTYPE is checking for a guarded page.
+ */
+ env->btype = is_guarded_page(env, pc, GETPC()) ? 3 : 1;
+}
diff --git a/target/arm/tcg/helper-a64.h b/target/arm/tcg/helper-a64.h
index 371388f..8502346 100644
--- a/target/arm/tcg/helper-a64.h
+++ b/target/arm/tcg/helper-a64.h
@@ -23,64 +23,62 @@ DEF_HELPER_2(msr_i_spsel, void, env, i32)
DEF_HELPER_2(msr_i_daifset, void, env, i32)
DEF_HELPER_2(msr_i_daifclear, void, env, i32)
DEF_HELPER_1(msr_set_allint_el1, void, env)
-DEF_HELPER_3(vfp_cmph_a64, i64, f16, f16, ptr)
-DEF_HELPER_3(vfp_cmpeh_a64, i64, f16, f16, ptr)
-DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr)
-DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, ptr)
-DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, ptr)
-DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, ptr)
-DEF_HELPER_FLAGS_4(simd_tblx, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vfp_mulxs, TCG_CALL_NO_RWG, f32, f32, f32, ptr)
-DEF_HELPER_FLAGS_3(vfp_mulxd, TCG_CALL_NO_RWG, f64, f64, f64, ptr)
-DEF_HELPER_FLAGS_3(neon_ceq_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr)
-DEF_HELPER_FLAGS_3(neon_cge_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr)
-DEF_HELPER_FLAGS_3(neon_cgt_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr)
-DEF_HELPER_FLAGS_3(recpsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
-DEF_HELPER_FLAGS_3(recpsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr)
-DEF_HELPER_FLAGS_3(recpsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr)
-DEF_HELPER_FLAGS_3(rsqrtsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
-DEF_HELPER_FLAGS_3(rsqrtsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr)
-DEF_HELPER_FLAGS_3(rsqrtsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr)
-DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_FLAGS_1(neon_addlp_u8, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_FLAGS_1(neon_addlp_u16, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_FLAGS_2(frecpx_f64, TCG_CALL_NO_RWG, f64, f64, ptr)
-DEF_HELPER_FLAGS_2(frecpx_f32, TCG_CALL_NO_RWG, f32, f32, ptr)
-DEF_HELPER_FLAGS_2(frecpx_f16, TCG_CALL_NO_RWG, f16, f16, ptr)
-DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, env)
+DEF_HELPER_3(vfp_cmph_a64, i64, f16, f16, fpst)
+DEF_HELPER_3(vfp_cmpeh_a64, i64, f16, f16, fpst)
+DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, fpst)
+DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, fpst)
+DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, fpst)
+DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, fpst)
+DEF_HELPER_FLAGS_4(simd_tblx, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_3(vfp_mulxs, TCG_CALL_NO_RWG, f32, f32, f32, fpst)
+DEF_HELPER_FLAGS_3(vfp_mulxd, TCG_CALL_NO_RWG, f64, f64, f64, fpst)
+DEF_HELPER_FLAGS_3(neon_ceq_f64, TCG_CALL_NO_RWG, i64, i64, i64, fpst)
+DEF_HELPER_FLAGS_3(neon_cge_f64, TCG_CALL_NO_RWG, i64, i64, i64, fpst)
+DEF_HELPER_FLAGS_3(neon_cgt_f64, TCG_CALL_NO_RWG, i64, i64, i64, fpst)
+DEF_HELPER_FLAGS_3(recpsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, fpst)
+DEF_HELPER_FLAGS_3(recpsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, fpst)
+DEF_HELPER_FLAGS_3(recpsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, fpst)
+DEF_HELPER_FLAGS_3(recpsf_ah_f16, TCG_CALL_NO_RWG, f16, f16, f16, fpst)
+DEF_HELPER_FLAGS_3(recpsf_ah_f32, TCG_CALL_NO_RWG, f32, f32, f32, fpst)
+DEF_HELPER_FLAGS_3(recpsf_ah_f64, TCG_CALL_NO_RWG, f64, f64, f64, fpst)
+DEF_HELPER_FLAGS_3(rsqrtsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, fpst)
+DEF_HELPER_FLAGS_3(rsqrtsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, fpst)
+DEF_HELPER_FLAGS_3(rsqrtsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, fpst)
+DEF_HELPER_FLAGS_3(rsqrtsf_ah_f16, TCG_CALL_NO_RWG, f16, f16, f16, fpst)
+DEF_HELPER_FLAGS_3(rsqrtsf_ah_f32, TCG_CALL_NO_RWG, f32, f32, f32, fpst)
+DEF_HELPER_FLAGS_3(rsqrtsf_ah_f64, TCG_CALL_NO_RWG, f64, f64, f64, fpst)
+DEF_HELPER_FLAGS_2(frecpx_f64, TCG_CALL_NO_RWG, f64, f64, fpst)
+DEF_HELPER_FLAGS_2(frecpx_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(frecpx_f16, TCG_CALL_NO_RWG, f16, f16, fpst)
+DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, fpst)
DEF_HELPER_FLAGS_3(crc32_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32)
DEF_HELPER_FLAGS_3(crc32c_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32)
-DEF_HELPER_FLAGS_3(advsimd_maxh, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
-DEF_HELPER_FLAGS_3(advsimd_minh, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
-DEF_HELPER_FLAGS_3(advsimd_maxnumh, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
-DEF_HELPER_FLAGS_3(advsimd_minnumh, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
-DEF_HELPER_3(advsimd_addh, f16, f16, f16, ptr)
-DEF_HELPER_3(advsimd_subh, f16, f16, f16, ptr)
-DEF_HELPER_3(advsimd_mulh, f16, f16, f16, ptr)
-DEF_HELPER_3(advsimd_divh, f16, f16, f16, ptr)
-DEF_HELPER_3(advsimd_ceq_f16, i32, f16, f16, ptr)
-DEF_HELPER_3(advsimd_cge_f16, i32, f16, f16, ptr)
-DEF_HELPER_3(advsimd_cgt_f16, i32, f16, f16, ptr)
-DEF_HELPER_3(advsimd_acge_f16, i32, f16, f16, ptr)
-DEF_HELPER_3(advsimd_acgt_f16, i32, f16, f16, ptr)
-DEF_HELPER_3(advsimd_mulxh, f16, f16, f16, ptr)
-DEF_HELPER_4(advsimd_muladdh, f16, f16, f16, f16, ptr)
-DEF_HELPER_3(advsimd_add2h, i32, i32, i32, ptr)
-DEF_HELPER_3(advsimd_sub2h, i32, i32, i32, ptr)
-DEF_HELPER_3(advsimd_mul2h, i32, i32, i32, ptr)
-DEF_HELPER_3(advsimd_div2h, i32, i32, i32, ptr)
-DEF_HELPER_3(advsimd_max2h, i32, i32, i32, ptr)
-DEF_HELPER_3(advsimd_min2h, i32, i32, i32, ptr)
-DEF_HELPER_3(advsimd_maxnum2h, i32, i32, i32, ptr)
-DEF_HELPER_3(advsimd_minnum2h, i32, i32, i32, ptr)
-DEF_HELPER_3(advsimd_mulx2h, i32, i32, i32, ptr)
-DEF_HELPER_4(advsimd_muladd2h, i32, i32, i32, i32, ptr)
-DEF_HELPER_2(advsimd_rinth_exact, f16, f16, ptr)
-DEF_HELPER_2(advsimd_rinth, f16, f16, ptr)
-DEF_HELPER_2(advsimd_f16tosinth, i32, f16, ptr)
-DEF_HELPER_2(advsimd_f16touinth, i32, f16, ptr)
-DEF_HELPER_2(sqrt_f16, f16, f16, ptr)
+DEF_HELPER_3(advsimd_ceq_f16, i32, f16, f16, fpst)
+DEF_HELPER_3(advsimd_cge_f16, i32, f16, f16, fpst)
+DEF_HELPER_3(advsimd_cgt_f16, i32, f16, f16, fpst)
+DEF_HELPER_3(advsimd_acge_f16, i32, f16, f16, fpst)
+DEF_HELPER_3(advsimd_acgt_f16, i32, f16, f16, fpst)
+DEF_HELPER_3(advsimd_mulxh, f16, f16, f16, fpst)
+DEF_HELPER_4(advsimd_muladdh, f16, f16, f16, f16, fpst)
+DEF_HELPER_3(advsimd_add2h, i32, i32, i32, fpst)
+DEF_HELPER_3(advsimd_sub2h, i32, i32, i32, fpst)
+DEF_HELPER_3(advsimd_mul2h, i32, i32, i32, fpst)
+DEF_HELPER_3(advsimd_div2h, i32, i32, i32, fpst)
+DEF_HELPER_3(advsimd_max2h, i32, i32, i32, fpst)
+DEF_HELPER_3(advsimd_min2h, i32, i32, i32, fpst)
+DEF_HELPER_3(advsimd_maxnum2h, i32, i32, i32, fpst)
+DEF_HELPER_3(advsimd_minnum2h, i32, i32, i32, fpst)
+DEF_HELPER_3(advsimd_mulx2h, i32, i32, i32, fpst)
+DEF_HELPER_4(advsimd_muladd2h, i32, i32, i32, i32, fpst)
+DEF_HELPER_2(advsimd_rinth_exact, f16, f16, fpst)
+DEF_HELPER_2(advsimd_rinth, f16, f16, fpst)
+
+DEF_HELPER_3(vfp_ah_minh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_ah_mins, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_ah_mind, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_ah_maxh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_ah_maxs, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_ah_maxd, f64, f64, f64, fpst)
DEF_HELPER_2(exception_return, void, env, i64)
DEF_HELPER_FLAGS_2(dc_zva, TCG_CALL_NO_WG, void, env, i64)
@@ -133,14 +131,17 @@ DEF_HELPER_4(cpyfp, void, env, i32, i32, i32)
DEF_HELPER_4(cpyfm, void, env, i32, i32, i32)
DEF_HELPER_4(cpyfe, void, env, i32, i32, i32)
-DEF_HELPER_FLAGS_5(gvec_fdiv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fdiv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fdiv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_1(guarded_page_check, TCG_CALL_NO_WG, void, env)
+DEF_HELPER_FLAGS_2(guarded_page_br, TCG_CALL_NO_RWG, void, env, tl)
+
+DEF_HELPER_FLAGS_5(gvec_fdiv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fdiv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fdiv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmulx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmulx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmulx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmulx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmulx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmulx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmulx_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmulx_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fmulx_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmulx_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmulx_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmulx_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
diff --git a/target/arm/tcg/helper-sme.h b/target/arm/tcg/helper-sme.h
index 27eef49..858d691 100644
--- a/target/arm/tcg/helper-sme.h
+++ b/target/arm/tcg/helper-sme.h
@@ -121,13 +121,13 @@ DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_7(sme_fmopa_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_7(sme_bfmopa, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_FLAGS_6(sme_smopa_s, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_6(sme_umopa_s, TCG_CALL_NO_RWG,
diff --git a/target/arm/tcg/helper-sve.h b/target/arm/tcg/helper-sve.h
index cc4e1d8..0b1b588 100644
--- a/target/arm/tcg/helper-sve.h
+++ b/target/arm/tcg/helper-sve.h
@@ -541,10 +541,18 @@ DEF_HELPER_FLAGS_4(sve_fabs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_fabs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_fabs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_ah_fabs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_ah_fabs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_ah_fabs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(sve_fneg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_fneg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_fneg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_ah_fneg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_ah_fneg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_ah_fneg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(sve_not_zpz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_not_zpz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_not_zpz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
@@ -959,433 +967,545 @@ DEF_HELPER_FLAGS_4(sve_umini_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(sve_umini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_5(gvec_recps_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(gvec_recps_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(gvec_recps_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(gvec_rsqrts_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(gvec_rsqrts_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(gvec_rsqrts_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ah_recps_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_recps_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_recps_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ah_rsqrts_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_rsqrts_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_rsqrts_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ah_fmax_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fmax_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fmax_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ah_fmin_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fmin_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fmin_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ah_fmaxp_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fmaxp_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fmaxp_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ah_fminp_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fminp_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fminp_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_faddv_h, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_faddv_s, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_faddv_d, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_fmaxnmv_h, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_fmaxnmv_s, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_fmaxnmv_d, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_fminnmv_h, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_fminnmv_s, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_fminnmv_d, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_fmaxv_h, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_fmaxv_s, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_fmaxv_d, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_fminv_h, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_fminv_s, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve_fminv_d, TCG_CALL_NO_RWG,
- i64, ptr, ptr, ptr, i32)
+ i64, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(sve_ah_fmaxv_h, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(sve_ah_fmaxv_s, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(sve_ah_fmaxv_d, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(sve_ah_fminv_h, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(sve_ah_fminv_s, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(sve_ah_fminv_d, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fadda_h, TCG_CALL_NO_RWG,
- i64, i64, ptr, ptr, ptr, i32)
+ i64, i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fadda_s, TCG_CALL_NO_RWG,
- i64, i64, ptr, ptr, ptr, i32)
+ i64, i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fadda_d, TCG_CALL_NO_RWG,
- i64, i64, ptr, ptr, ptr, i32)
+ i64, i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmge0_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmge0_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmge0_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmgt0_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmgt0_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmgt0_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmlt0_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmlt0_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmlt0_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmle0_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmle0_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmle0_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmeq0_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmeq0_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmeq0_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmne0_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmne0_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcmne0_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fadd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fadd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fadd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fsub_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fsub_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fsub_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmul_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmul_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmul_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fdiv_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fdiv_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fdiv_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmin_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmin_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmin_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmax_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmax_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmax_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(sve_ah_fmin_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(sve_ah_fmin_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(sve_ah_fmin_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(sve_ah_fmax_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(sve_ah_fmax_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(sve_ah_fmax_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fminnum_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fminnum_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fminnum_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmaxnum_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmaxnum_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmaxnum_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fabd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fabd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fabd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(sve_ah_fabd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(sve_ah_fabd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(sve_ah_fabd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fscalbn_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fscalbn_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fscalbn_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmulx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmulx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmulx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fadds_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fadds_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fadds_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fsubs_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fsubs_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fsubs_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmuls_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmuls_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmuls_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fsubrs_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fsubrs_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fsubrs_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmaxnms_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmaxnms_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmaxnms_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fminnms_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fminnms_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fminnms_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmaxs_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmaxs_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmaxs_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmins_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmins_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fmins_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i64, ptr, i32)
+ void, ptr, ptr, ptr, i64, fpst, i32)
+
+DEF_HELPER_FLAGS_6(sve_ah_fmaxs_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, fpst, i32)
+DEF_HELPER_FLAGS_6(sve_ah_fmaxs_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, fpst, i32)
+DEF_HELPER_FLAGS_6(sve_ah_fmaxs_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, fpst, i32)
+
+DEF_HELPER_FLAGS_6(sve_ah_fmins_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, fpst, i32)
+DEF_HELPER_FLAGS_6(sve_ah_fmins_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, fpst, i32)
+DEF_HELPER_FLAGS_6(sve_ah_fmins_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvt_sh, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvt_dh, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvt_hs, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvt_ds, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvt_hd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvt_sd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_bfcvt, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvtzs_hh, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvtzs_hs, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvtzs_ss, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvtzs_ds, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvtzs_hd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvtzs_sd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvtzs_dd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvtzu_hh, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvtzu_hs, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvtzu_ss, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvtzu_ds, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvtzu_hd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvtzu_sd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fcvtzu_dd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_frint_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_frint_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_frint_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_frintx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_frintx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_frintx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_frecpx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_frecpx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_frecpx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fsqrt_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fsqrt_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fsqrt_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_scvt_hh, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_scvt_sh, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_scvt_dh, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_scvt_ss, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_scvt_sd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_scvt_ds, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_scvt_dd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_ucvt_hh, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_ucvt_sh, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_ucvt_dh, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_ucvt_ss, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_ucvt_sd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_ucvt_ds, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_ucvt_dd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmge_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmge_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmge_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmgt_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmgt_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmgt_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmeq_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmeq_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmeq_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmne_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmne_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmne_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmuo_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmuo_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcmuo_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_facge_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_facge_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_facge_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_facgt_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_facgt_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_facgt_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcadd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcadd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve_fcadd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_7(sve_ah_fnmls_zpzzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_7(sve_ah_fnmls_zpzzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_7(sve_ah_fnmls_zpzzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fcmla_zpzzz_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fcmla_zpzzz_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sve_fcmla_zpzzz_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(sve_ftmad_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve_ftmad_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve_ftmad_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_ftmad_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve_ftmad_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve_ftmad_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve2_saddl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_saddl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
@@ -2582,39 +2702,39 @@ DEF_HELPER_FLAGS_4(sve2_xar_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_xar_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve2_eor3, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve2_bcax, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
@@ -2682,8 +2802,8 @@ DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_s, TCG_CALL_NO_RWG,
DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_d, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_6(fmmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_6(fmmla_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(fmmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(fmmla_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve2_sqrdmlah_idx_h, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
@@ -2755,20 +2875,20 @@ DEF_HELPER_FLAGS_5(sve2_cdot_idx_d, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve2_fcvtnt_sh, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve2_fcvtnt_ds, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_bfcvtnt, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve2_fcvtlt_hs, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve2_fcvtlt_sd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+ void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(flogb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(flogb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(flogb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(flogb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(flogb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(flogb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sve2_sqshl_zpzi_b, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, i32)
diff --git a/target/arm/tcg/helper.h b/target/arm/tcg/helper.h
new file mode 100644
index 0000000..80db7c2
--- /dev/null
+++ b/target/arm/tcg/helper.h
@@ -0,0 +1,1153 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(uxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
+
+DEF_HELPER_3(add_setq, i32, env, i32, i32)
+DEF_HELPER_3(add_saturate, i32, env, i32, i32)
+DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
+DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
+DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(sdiv, TCG_CALL_NO_RWG, s32, env, s32, s32)
+DEF_HELPER_FLAGS_3(udiv, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32)
+
+#define PAS_OP(pfx) \
+ DEF_HELPER_3(pfx ## add8, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## sub8, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## sub16, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## add16, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## addsubx, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## subaddx, i32, i32, i32, ptr)
+
+PAS_OP(s)
+PAS_OP(u)
+#undef PAS_OP
+
+#define PAS_OP(pfx) \
+ DEF_HELPER_2(pfx ## add8, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## sub8, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## sub16, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## add16, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## addsubx, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## subaddx, i32, i32, i32)
+PAS_OP(q)
+PAS_OP(sh)
+PAS_OP(uq)
+PAS_OP(uh)
+#undef PAS_OP
+
+DEF_HELPER_3(ssat, i32, env, i32, i32)
+DEF_HELPER_3(usat, i32, env, i32, i32)
+DEF_HELPER_3(ssat16, i32, env, i32, i32)
+DEF_HELPER_3(usat16, i32, env, i32, i32)
+
+DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+
+DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
+ i32, i32, i32, i32)
+DEF_HELPER_2(exception_internal, noreturn, env, i32)
+DEF_HELPER_3(exception_with_syndrome, noreturn, env, i32, i32)
+DEF_HELPER_4(exception_with_syndrome_el, noreturn, env, i32, i32, i32)
+DEF_HELPER_2(exception_bkpt_insn, noreturn, env, i32)
+DEF_HELPER_2(exception_swstep, noreturn, env, i32)
+DEF_HELPER_2(exception_pc_alignment, noreturn, env, vaddr)
+DEF_HELPER_1(setend, void, env)
+DEF_HELPER_2(wfi, void, env, i32)
+DEF_HELPER_1(wfe, void, env)
+DEF_HELPER_2(wfit, void, env, i64)
+DEF_HELPER_1(yield, void, env)
+DEF_HELPER_1(pre_hvc, void, env)
+DEF_HELPER_2(pre_smc, void, env, i32)
+DEF_HELPER_1(vesb, void, env)
+
+DEF_HELPER_3(cpsr_write, void, env, i32, i32)
+DEF_HELPER_2(cpsr_write_eret, void, env, i32)
+DEF_HELPER_1(cpsr_read, i32, env)
+
+DEF_HELPER_3(v7m_msr, void, env, i32, i32)
+DEF_HELPER_2(v7m_mrs, i32, env, i32)
+
+DEF_HELPER_2(v7m_bxns, void, env, i32)
+DEF_HELPER_2(v7m_blxns, void, env, i32)
+
+DEF_HELPER_3(v7m_tt, i32, env, i32, i32)
+
+DEF_HELPER_1(v7m_preserve_fp_state, void, env)
+
+DEF_HELPER_2(v7m_vlstm, void, env, i32)
+DEF_HELPER_2(v7m_vlldm, void, env, i32)
+
+DEF_HELPER_2(v8m_stackcheck, void, env, i32)
+
+DEF_HELPER_FLAGS_2(check_bxj_trap, TCG_CALL_NO_WG, void, env, i32)
+
+DEF_HELPER_4(access_check_cp_reg, cptr, env, i32, i32, i32)
+DEF_HELPER_FLAGS_2(lookup_cp_reg, TCG_CALL_NO_RWG_SE, cptr, env, i32)
+DEF_HELPER_FLAGS_2(tidcp_el0, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_2(tidcp_el1, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_3(set_cp_reg, void, env, cptr, i32)
+DEF_HELPER_2(get_cp_reg, i32, env, cptr)
+DEF_HELPER_3(set_cp_reg64, void, env, cptr, i64)
+DEF_HELPER_2(get_cp_reg64, i64, env, cptr)
+
+DEF_HELPER_2(get_r13_banked, i32, env, i32)
+DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
+
+DEF_HELPER_3(mrs_banked, i32, env, i32, i32)
+DEF_HELPER_4(msr_banked, void, env, i32, i32, i32)
+
+DEF_HELPER_2(get_user_reg, i32, env, i32)
+DEF_HELPER_3(set_user_reg, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_1(rebuild_hflags_m32_newel, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(rebuild_hflags_m32, TCG_CALL_NO_RWG, void, env, int)
+DEF_HELPER_FLAGS_1(rebuild_hflags_a32_newel, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(rebuild_hflags_a32, TCG_CALL_NO_RWG, void, env, int)
+DEF_HELPER_FLAGS_2(rebuild_hflags_a64, TCG_CALL_NO_RWG, void, env, int)
+
+DEF_HELPER_FLAGS_5(probe_access, TCG_CALL_NO_WG, void, env, vaddr, i32, i32, i32)
+
+DEF_HELPER_1(vfp_get_fpscr, i32, env)
+DEF_HELPER_2(vfp_set_fpscr, void, env, i32)
+
+DEF_HELPER_3(vfp_addh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_adds, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_addd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_subh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_subs, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_subd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_mulh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_muls, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_muld, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_divh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_divs, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_divd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_maxh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_maxs, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_maxd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_minh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_mins, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_mind, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_maxnumh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_maxnums, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_minnumh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_minnums, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_minnumd, f64, f64, f64, fpst)
+DEF_HELPER_2(vfp_sqrth, f16, f16, fpst)
+DEF_HELPER_2(vfp_sqrts, f32, f32, fpst)
+DEF_HELPER_2(vfp_sqrtd, f64, f64, fpst)
+DEF_HELPER_3(vfp_cmph, void, f16, f16, env)
+DEF_HELPER_3(vfp_cmps, void, f32, f32, env)
+DEF_HELPER_3(vfp_cmpd, void, f64, f64, env)
+DEF_HELPER_3(vfp_cmpeh, void, f16, f16, env)
+DEF_HELPER_3(vfp_cmpes, void, f32, f32, env)
+DEF_HELPER_3(vfp_cmped, void, f64, f64, env)
+
+DEF_HELPER_2(vfp_fcvtds, f64, f32, fpst)
+DEF_HELPER_2(vfp_fcvtsd, f32, f64, fpst)
+DEF_HELPER_FLAGS_2(bfcvt, TCG_CALL_NO_RWG, i32, f32, fpst)
+DEF_HELPER_FLAGS_2(bfcvt_pair, TCG_CALL_NO_RWG, i32, i64, fpst)
+
+DEF_HELPER_2(vfp_uitoh, f16, i32, fpst)
+DEF_HELPER_2(vfp_uitos, f32, i32, fpst)
+DEF_HELPER_2(vfp_uitod, f64, i32, fpst)
+DEF_HELPER_2(vfp_sitoh, f16, i32, fpst)
+DEF_HELPER_2(vfp_sitos, f32, i32, fpst)
+DEF_HELPER_2(vfp_sitod, f64, i32, fpst)
+
+DEF_HELPER_2(vfp_touih, i32, f16, fpst)
+DEF_HELPER_2(vfp_touis, i32, f32, fpst)
+DEF_HELPER_2(vfp_touid, i32, f64, fpst)
+DEF_HELPER_2(vfp_touizh, i32, f16, fpst)
+DEF_HELPER_2(vfp_touizs, i32, f32, fpst)
+DEF_HELPER_2(vfp_touizd, i32, f64, fpst)
+DEF_HELPER_2(vfp_tosih, s32, f16, fpst)
+DEF_HELPER_2(vfp_tosis, s32, f32, fpst)
+DEF_HELPER_2(vfp_tosid, s32, f64, fpst)
+DEF_HELPER_2(vfp_tosizh, s32, f16, fpst)
+DEF_HELPER_2(vfp_tosizs, s32, f32, fpst)
+DEF_HELPER_2(vfp_tosizd, s32, f64, fpst)
+
+DEF_HELPER_3(vfp_toshh_round_to_zero, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toslh_round_to_zero, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_touhh_round_to_zero, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toulh_round_to_zero, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_touls_round_to_zero, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tosqd_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touqd_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touhh, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toshh, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toulh, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toslh, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_touqh, i64, f16, i32, fpst)
+DEF_HELPER_3(vfp_tosqh, i64, f16, i32, fpst)
+DEF_HELPER_3(vfp_toshs, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_tosls, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_tosqs, i64, f32, i32, fpst)
+DEF_HELPER_3(vfp_touhs, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_touls, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_touqs, i64, f32, i32, fpst)
+DEF_HELPER_3(vfp_toshd, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tosld, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tosqd, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touhd, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tould, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touqd, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_shtos, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_sltos, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_sqtos, f32, i64, i32, fpst)
+DEF_HELPER_3(vfp_uhtos, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_ultos, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_uqtos, f32, i64, i32, fpst)
+DEF_HELPER_3(vfp_shtod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_sltod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_sqtod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_uhtod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_ultod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_uqtod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_shtoh, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_uhtoh, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_sltoh, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_ultoh, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_sqtoh, f16, i64, i32, fpst)
+DEF_HELPER_3(vfp_uqtoh, f16, i64, i32, fpst)
+
+DEF_HELPER_3(vfp_shtos_round_to_nearest, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_sltos_round_to_nearest, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_uhtos_round_to_nearest, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_ultos_round_to_nearest, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_shtod_round_to_nearest, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_sltod_round_to_nearest, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_uhtod_round_to_nearest, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_ultod_round_to_nearest, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_shtoh_round_to_nearest, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_uhtoh_round_to_nearest, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_sltoh_round_to_nearest, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_ultoh_round_to_nearest, f16, i32, i32, fpst)
+
+DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, fpst)
+
+DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f32, TCG_CALL_NO_RWG, f32, f16, fpst, i32)
+DEF_HELPER_FLAGS_3(vfp_fcvt_f32_to_f16, TCG_CALL_NO_RWG, f16, f32, fpst, i32)
+DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, f16, fpst, i32)
+DEF_HELPER_FLAGS_3(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, f16, f64, fpst, i32)
+
+DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, fpst)
+DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, fpst)
+DEF_HELPER_4(vfp_muladdh, f16, f16, f16, f16, fpst)
+
+DEF_HELPER_FLAGS_2(recpe_f16, TCG_CALL_NO_RWG, f16, f16, fpst)
+DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(recpe_rpres_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, fpst)
+DEF_HELPER_FLAGS_2(rsqrte_f16, TCG_CALL_NO_RWG, f16, f16, fpst)
+DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(rsqrte_rpres_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, fpst)
+DEF_HELPER_FLAGS_1(recpe_u32, TCG_CALL_NO_RWG, i32, i32)
+DEF_HELPER_FLAGS_1(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32)
+DEF_HELPER_FLAGS_4(neon_tbl, TCG_CALL_NO_RWG, i64, env, i32, i64, i64)
+
+DEF_HELPER_3(shl_cc, i32, env, i32, i32)
+DEF_HELPER_3(shr_cc, i32, env, i32, i32)
+DEF_HELPER_3(sar_cc, i32, env, i32, i32)
+DEF_HELPER_3(ror_cc, i32, env, i32, i32)
+
+DEF_HELPER_FLAGS_2(rinth_exact, TCG_CALL_NO_RWG, f16, f16, fpst)
+DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, fpst)
+DEF_HELPER_FLAGS_2(rinth, TCG_CALL_NO_RWG, f16, f16, fpst)
+DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, fpst)
+
+DEF_HELPER_FLAGS_2(vjcvt, TCG_CALL_NO_RWG, i32, f64, env)
+DEF_HELPER_FLAGS_2(fjcvtzs, TCG_CALL_NO_RWG, i64, f64, fpst)
+
+DEF_HELPER_FLAGS_3(check_hcr_el2_trap, TCG_CALL_NO_WG, void, env, i32, i32)
+
+/* neon_helper.c */
+DEF_HELPER_2(neon_pmin_u8, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_s8, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_u16, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_s16, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_u8, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_s8, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_u16, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_s16, i32, i32, i32)
+
+DEF_HELPER_2(neon_shl_u16, i32, i32, i32)
+DEF_HELPER_2(neon_shl_s16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u8, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s8, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u32, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s32, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u64, i64, i64, i64)
+DEF_HELPER_2(neon_rshl_s64, i64, i64, i64)
+DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(neon_sqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshli_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshli_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshli_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshli_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_uqshli_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_uqshli_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_uqshli_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_uqshli_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshlui_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshlui_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshlui_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshlui_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(gvec_srshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_srshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_srshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_srshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_urshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_urshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_urshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_urshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_2(neon_add_u8, i32, i32, i32)
+DEF_HELPER_2(neon_add_u16, i32, i32, i32)
+DEF_HELPER_2(neon_sub_u8, i32, i32, i32)
+DEF_HELPER_2(neon_sub_u16, i32, i32, i32)
+DEF_HELPER_2(neon_mul_u8, i32, i32, i32)
+DEF_HELPER_2(neon_mul_u16, i32, i32, i32)
+
+DEF_HELPER_2(neon_tst_u8, i32, i32, i32)
+DEF_HELPER_2(neon_tst_u16, i32, i32, i32)
+DEF_HELPER_2(neon_tst_u32, i32, i32, i32)
+
+DEF_HELPER_1(neon_clz_u8, i32, i32)
+DEF_HELPER_1(neon_clz_u16, i32, i32)
+DEF_HELPER_1(neon_cls_s8, i32, i32)
+DEF_HELPER_1(neon_cls_s16, i32, i32)
+DEF_HELPER_1(neon_cls_s32, i32, i32)
+DEF_HELPER_FLAGS_3(gvec_cnt_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32)
+DEF_HELPER_4(neon_qrdmlah_s16, i32, env, i32, i32, i32)
+DEF_HELPER_4(neon_qrdmlsh_s16, i32, env, i32, i32, i32)
+DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32)
+DEF_HELPER_4(neon_qrdmlah_s32, i32, env, s32, s32, s32)
+DEF_HELPER_4(neon_qrdmlsh_s32, i32, env, s32, s32, s32)
+
+DEF_HELPER_1(neon_narrow_u8, i64, i64)
+DEF_HELPER_1(neon_narrow_u16, i64, i64)
+DEF_HELPER_2(neon_unarrow_sat8, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u8, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s8, i64, env, i64)
+DEF_HELPER_2(neon_unarrow_sat16, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u16, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s16, i64, env, i64)
+DEF_HELPER_2(neon_unarrow_sat32, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u32, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s32, i64, env, i64)
+DEF_HELPER_1(neon_narrow_high_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_high_u16, i32, i64)
+DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64)
+DEF_HELPER_1(neon_widen_u8, i64, i32)
+DEF_HELPER_1(neon_widen_s8, i64, i32)
+DEF_HELPER_1(neon_widen_u16, i64, i32)
+DEF_HELPER_1(neon_widen_s16, i64, i32)
+
+DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64)
+DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64)
+DEF_HELPER_2(neon_abdl_u16, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s16, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_u32, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s32, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_u64, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s64, i64, i32, i32)
+DEF_HELPER_2(neon_mull_u8, i64, i32, i32)
+DEF_HELPER_2(neon_mull_s8, i64, i32, i32)
+DEF_HELPER_2(neon_mull_u16, i64, i32, i32)
+DEF_HELPER_2(neon_mull_s16, i64, i32, i32)
+
+DEF_HELPER_1(neon_negl_u16, i64, i64)
+DEF_HELPER_1(neon_negl_u32, i64, i64)
+
+DEF_HELPER_FLAGS_2(neon_qabs_s8, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s16, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s32, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s64, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(neon_qneg_s8, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s16, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s32, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s64, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_cge_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_acge_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_acge_f64, i64, i64, i64, fpst)
+DEF_HELPER_3(neon_acgt_f64, i64, i64, i64, fpst)
+
+/* iwmmxt_helper.c */
+DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64)
+DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64)
+
+#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \
+DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \
+DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \
+DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \
+
+DEF_IWMMXT_HELPER_SIZE_ENV(unpackl)
+DEF_IWMMXT_HELPER_SIZE_ENV(unpackh)
+
+DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq)
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu)
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(mins)
+DEF_IWMMXT_HELPER_SIZE_ENV(minu)
+DEF_IWMMXT_HELPER_SIZE_ENV(maxs)
+DEF_IWMMXT_HELPER_SIZE_ENV(maxu)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(subn)
+DEF_IWMMXT_HELPER_SIZE_ENV(addn)
+DEF_IWMMXT_HELPER_SIZE_ENV(subu)
+DEF_IWMMXT_HELPER_SIZE_ENV(addu)
+DEF_IWMMXT_HELPER_SIZE_ENV(subs)
+DEF_IWMMXT_HELPER_SIZE_ENV(adds)
+
+DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64)
+
+DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32)
+DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32)
+
+DEF_HELPER_1(iwmmxt_bcstb, i64, i32)
+DEF_HELPER_1(iwmmxt_bcstw, i64, i32)
+DEF_HELPER_1(iwmmxt_bcstl, i64, i32)
+
+DEF_HELPER_1(iwmmxt_addcb, i64, i64)
+DEF_HELPER_1(iwmmxt_addcw, i64, i64)
+DEF_HELPER_1(iwmmxt_addcl, i64, i64)
+
+DEF_HELPER_1(iwmmxt_msbb, i32, i64)
+DEF_HELPER_1(iwmmxt_msbw, i32, i64)
+DEF_HELPER_1(iwmmxt_msbl, i32, i64)
+
+DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32)
+
+DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64)
+
+DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32)
+DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32)
+DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32)
+
+DEF_HELPER_FLAGS_2(neon_unzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_unzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qunzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qunzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qunzip32, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_zip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_zip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qzip32, TCG_CALL_NO_RWG, void, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(crypto_aese, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_aesd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_aesmc, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_aesimc, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sha1su0, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha1c, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha1p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha1m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha1h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha1su1, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sha256h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha512su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha512su1, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sm3tt1a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3tt1b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3tt2a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3tt2b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3partw1, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3partw2, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sm4e, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm4ekey, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_rax1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
+
+DEF_HELPER_FLAGS_5(gvec_qrdmlah_s16, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s16, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_qrdmlah_s32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_sdot_idx_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_idx_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sdot_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sudot_idx_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usdot_idx_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(gvec_fcmlah, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlas, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlad, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sstoh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_sitos, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_ustoh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_uitos, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_tosszh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_tosizs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_touszh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_touizs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_sf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_uf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_fs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_fu, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_uh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_hs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_hu, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_sd, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_ud, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_ds, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_du, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sd, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ud, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ss, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_us, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_uh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vrint_rm_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vrint_rm_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vrintx_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vrintx_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frecpe_rpres_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_frsqrte_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frsqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frsqrte_rpres_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frsqrte_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fcgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcgt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcgt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fcge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcge0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcge0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fceq0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fceq0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fcle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcle0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcle0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fclt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fclt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fclt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ah_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fceq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fceq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fceq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fcge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fcgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_facge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_facge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_facge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_facgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_facgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_facgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmax_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmin_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmaxnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_recps_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_recps_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmla_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmls_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_vfma_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_vfma_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_vfma_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ah_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ftsmul_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmul_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(gvec_fmla_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fmla_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fmla_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(gvec_fmls_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fmls_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fmls_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_uqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmlal_a32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_fmlal_a64, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a64, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_2(frint32_s, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, fpst)
+DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, fpst)
+
+DEF_HELPER_FLAGS_3(gvec_ceq0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_clt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_clt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cle0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cgt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cge0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_smulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_umulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(neon_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_ssra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ssra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ssra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ssra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_usra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_usra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_usra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_usra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_srshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_urshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_srsra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_ursra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_sri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_sli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sli_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sli_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sli_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_uabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_saba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_saba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_saba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_saba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_uaba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uaba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uaba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uaba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_mul_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_mla_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mla_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mla_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_mls_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mls_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mls_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqdmulh_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqdmulh_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fmlal_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(sve2_fmlal_zzxw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_smmla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_ummla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usmmla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(gvec_bfdot, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_bfdot_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_6(gvec_bfmmla, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_faddp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_faddp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_faddp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmaxnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fminnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_addp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_addp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_addp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_addp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_smaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_umaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_uminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_urecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c
index f03977b..1ccec63 100644
--- a/target/arm/tcg/hflags.c
+++ b/target/arm/tcg/hflags.c
@@ -9,9 +9,13 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/helper-proto.h"
+#include "exec/translation-block.h"
+#include "accel/tcg/cpu-ops.h"
#include "cpregs.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
static inline bool fgt_svc(CPUARMState *env, int el)
{
/*
@@ -63,6 +67,15 @@ static bool aprofile_require_alignment(CPUARMState *env, int el, uint64_t sctlr)
#endif
}
+bool access_secure_reg(CPUARMState *env)
+{
+ bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3) &&
+ !(env->cp15.scr_el3 & SCR_NS));
+
+ return ret;
+}
+
static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
ARMMMUIdx mmu_idx,
CPUARMTBFlags flags)
@@ -404,6 +417,19 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
}
+ if (env->vfp.fpcr & FPCR_AH) {
+ DP_TBFLAG_A64(flags, AH, 1);
+ }
+ if (env->vfp.fpcr & FPCR_NEP) {
+ /*
+ * In streaming-SVE without FA64, NEP behaves as if zero;
+ * compare pseudocode IsMerging()
+ */
+ if (!(EX_TBFLAG_A64(flags, PSTATE_SM) && !sme_fa64(env, el))) {
+ DP_TBFLAG_A64(flags, NEP, 1);
+ }
+ }
+
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
}
@@ -476,7 +502,7 @@ void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
}
-void assert_hflags_rebuild_correctly(CPUARMState *env)
+static void assert_hflags_rebuild_correctly(CPUARMState *env)
{
#ifdef CONFIG_DEBUG_TCG
CPUARMTBFlags c = env->hflags;
@@ -484,10 +510,123 @@ void assert_hflags_rebuild_correctly(CPUARMState *env)
if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
fprintf(stderr, "TCG hflags mismatch "
- "(current:(0x%08x,0x" TARGET_FMT_lx ")"
- " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
+ "(current:(0x%08x,0x%016" PRIx64 ")"
+ " rebuilt:(0x%08x,0x%016" PRIx64 ")\n",
c.flags, c.flags2, r.flags, r.flags2);
abort();
}
#endif
}
+
+static bool mve_no_pred(CPUARMState *env)
+{
+ /*
+ * Return true if there is definitely no predication of MVE
+ * instructions by VPR or LTPSIZE. (Returning false even if there
+ * isn't any predication is OK; generated code will just be
+ * a little worse.)
+ * If the CPU does not implement MVE then this TB flag is always 0.
+ *
+ * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
+ * logic in gen_update_fp_context() needs to be updated to match.
+ *
+ * We do not include the effect of the ECI bits here -- they are
+ * tracked in other TB flags. This simplifies the logic for
+ * "when did we emit code that changes the MVE_NO_PRED TB flag
+ * and thus need to end the TB?".
+ */
+ if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
+ return false;
+ }
+ if (env->v7m.vpr) {
+ return false;
+ }
+ if (env->v7m.ltpsize < 4) {
+ return false;
+ }
+ return true;
+}
+
+TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs)
+{
+ CPUARMState *env = cpu_env(cs);
+ CPUARMTBFlags flags;
+ vaddr pc;
+
+ assert_hflags_rebuild_correctly(env);
+ flags = env->hflags;
+
+ if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
+ pc = env->pc;
+ if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
+ DP_TBFLAG_A64(flags, BTYPE, env->btype);
+ }
+ } else {
+ pc = env->regs[15];
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
+ != env->v7m.secure) {
+ DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
+ }
+
+ if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
+ (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
+ (env->v7m.secure &&
+ !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
+ /*
+ * ASPEN is set, but FPCA/SFPA indicate that there is no
+ * active FP context; we must create a new FP context before
+ * executing any FP insn.
+ */
+ DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
+ }
+
+ bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
+ if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
+ DP_TBFLAG_M32(flags, LSPACT, 1);
+ }
+
+ if (mve_no_pred(env)) {
+ DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
+ }
+ } else {
+ /*
+ * Note that XSCALE_CPAR shares bits with VECSTRIDE.
+ * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
+ */
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
+ } else {
+ DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
+ DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
+ }
+ if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
+ DP_TBFLAG_A32(flags, VFPEN, 1);
+ }
+ }
+
+ DP_TBFLAG_AM32(flags, THUMB, env->thumb);
+ DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
+ }
+
+ /*
+ * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
+ * states defined in the ARM ARM for software singlestep:
+ * SS_ACTIVE PSTATE.SS State
+ * 0 x Inactive (the TB flag for SS is always 0)
+ * 1 0 Active-pending
+ * 1 1 Active-not-pending
+ * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
+ */
+ if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
+ DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
+ }
+
+ return (TCGTBCPUState){
+ .pc = pc,
+ .flags = flags.flags,
+ .cs_base = flags.flags2,
+ };
+}
diff --git a/target/arm/tcg/iwmmxt_helper.c b/target/arm/tcg/iwmmxt_helper.c
index 610b1b2..ba054b6 100644
--- a/target/arm/tcg/iwmmxt_helper.c
+++ b/target/arm/tcg/iwmmxt_helper.c
@@ -22,7 +22,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/helper-proto.h"
+
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
/* iwMMXt macros extracted from GNU gdb. */
diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
index 23d7f73..6614719 100644
--- a/target/arm/tcg/m_helper.c
+++ b/target/arm/tcg/m_helper.c
@@ -15,10 +15,9 @@
#include "qemu/main-loop.h"
#include "qemu/bitops.h"
#include "qemu/log.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#ifdef CONFIG_TCG
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "semihosting/common-semi.h"
#endif
#if !defined(CONFIG_USER_ONLY)
@@ -222,7 +221,7 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
int exc;
bool exc_secure;
- if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) {
+ if (get_phys_addr(env, addr, MMU_DATA_STORE, 0, mmu_idx, &res, &fi)) {
/* MPU/SAU lookup failed */
if (fi.type == ARMFault_QEMU_SFault) {
if (mode == STACK_LAZYFP) {
@@ -311,7 +310,7 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
bool exc_secure;
uint32_t value;
- if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
+ if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
/* MPU/SAU lookup failed */
if (fi.type == ARMFault_QEMU_SFault) {
qemu_log_mask(CPU_LOG_INT,
@@ -2009,7 +2008,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
"...really SecureFault with SFSR.INVEP\n");
return false;
}
- if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) {
+ if (get_phys_addr(env, addr, MMU_INST_FETCH, 0, mmu_idx, &res, &fi)) {
/* the MPU lookup failed */
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
@@ -2045,7 +2044,7 @@ static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
ARMMMUFaultInfo fi = {};
uint32_t value;
- if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
+ if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
/* MPU/SAU lookup failed */
if (fi.type == ARMFault_QEMU_SFault) {
qemu_log_mask(CPU_LOG_INT,
diff --git a/target/arm/tcg/meson.build b/target/arm/tcg/meson.build
index 508932a..c59f0f0 100644
--- a/target/arm/tcg/meson.build
+++ b/target/arm/tcg/meson.build
@@ -30,14 +30,9 @@ arm_ss.add(files(
'translate-mve.c',
'translate-neon.c',
'translate-vfp.c',
- 'crypto_helper.c',
- 'hflags.c',
- 'iwmmxt_helper.c',
'm_helper.c',
'mve_helper.c',
- 'neon_helper.c',
'op_helper.c',
- 'tlb_helper.c',
'vec_helper.c',
))
@@ -60,3 +55,26 @@ arm_system_ss.add(files(
arm_system_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('cpu-v7m.c'))
arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files('cpu-v7m.c'))
+
+arm_common_ss.add(zlib)
+
+arm_common_ss.add(files(
+ 'arith_helper.c',
+ 'crypto_helper.c',
+))
+
+arm_common_system_ss.add(files(
+ 'hflags.c',
+ 'iwmmxt_helper.c',
+ 'neon_helper.c',
+ 'tlb_helper.c',
+ 'tlb-insns.c',
+ 'vfp_helper.c',
+))
+arm_user_ss.add(files(
+ 'hflags.c',
+ 'iwmmxt_helper.c',
+ 'neon_helper.c',
+ 'tlb_helper.c',
+ 'vfp_helper.c',
+))
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index 9d2ba28..0efc18a 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -21,17 +21,22 @@
#include "qemu/log.h"
#include "cpu.h"
#include "internals.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
-#include "exec/ram_addr.h"
-#include "exec/cpu_ldst.h"
+#ifdef CONFIG_USER_ONLY
+#include "user/cpu_loop.h"
+#include "user/page-protection.h"
+#else
+#include "system/ram_addr.h"
+#endif
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/helper-proto.h"
-#include "hw/core/tcg-cpu-ops.h"
+#include "exec/tlb-flags.h"
+#include "accel/tcg/cpu-ops.h"
#include "qapi/error.h"
#include "qemu/guest-random.h"
#include "mte_helper.h"
-
static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
{
if (exclude == 0xffff) {
@@ -57,6 +62,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
bool probe, uintptr_t ra)
{
#ifdef CONFIG_USER_ONLY
+ const size_t page_data_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1);
uint64_t clean_ptr = useronly_clean_ptr(ptr);
int flags = page_get_flags(clean_ptr);
uint8_t *tags;
@@ -77,7 +83,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
return NULL;
}
- tags = page_get_target_data(clean_ptr);
+ tags = page_get_target_data(clean_ptr, page_data_size);
index = extract32(ptr, LOG2_TAG_GRANULE + 1,
TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
diff --git a/target/arm/tcg/mve_helper.c b/target/arm/tcg/mve_helper.c
index 03ebef5..506d1c3 100644
--- a/target/arm/tcg/mve_helper.c
+++ b/target/arm/tcg/mve_helper.c
@@ -22,8 +22,7 @@
#include "internals.h"
#include "vec_internal.h"
#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-ldst.h"
#include "tcg/tcg.h"
#include "fpu/softfloat.h"
#include "crypto/clmul.h"
@@ -2814,8 +2813,7 @@ DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN)
if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
continue; \
} \
- fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
- &env->vfp.standard_fp_status; \
+ fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
if (!(mask & 1)) { \
/* We need the result but without updating flags */ \
scratch_fpst = *fpst; \
@@ -2888,8 +2886,7 @@ DO_2OP_FP_ALL(vminnma, minnuma)
r[e] = 0; \
continue; \
} \
- fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
- &env->vfp.standard_fp_status; \
+ fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
if (!(tm & 1)) { \
/* We need the result but without updating flags */ \
scratch_fpst = *fpst; \
@@ -2926,8 +2923,7 @@ DO_VCADD_FP(vfcadd270s, 4, float32, float32_add, float32_sub)
if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
continue; \
} \
- fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
- &env->vfp.standard_fp_status; \
+ fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
if (!(mask & 1)) { \
/* We need the result but without updating flags */ \
scratch_fpst = *fpst; \
@@ -2964,8 +2960,7 @@ DO_VFMA(vfmss, 4, float32, true)
if ((mask & MAKE_64BIT_MASK(0, ESIZE * 2)) == 0) { \
continue; \
} \
- fpst0 = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
- &env->vfp.standard_fp_status; \
+ fpst0 = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
fpst1 = fpst0; \
if (!(mask & 1)) { \
scratch_fpst = *fpst0; \
@@ -3049,8 +3044,7 @@ DO_VCMLA(vcmla270s, 4, float32, 3, DO_VCMLAS)
if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
continue; \
} \
- fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
- &env->vfp.standard_fp_status; \
+ fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
if (!(mask & 1)) { \
/* We need the result but without updating flags */ \
scratch_fpst = *fpst; \
@@ -3084,8 +3078,7 @@ DO_2OP_FP_SCALAR_ALL(vfmul_scalar, mul)
if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
continue; \
} \
- fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
- &env->vfp.standard_fp_status; \
+ fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
if (!(mask & 1)) { \
/* We need the result but without updating flags */ \
scratch_fpst = *fpst; \
@@ -3116,9 +3109,8 @@ DO_2OP_FP_ACC_SCALAR(vfmas_scalars, 4, float32, DO_VFMAS_SCALARS)
unsigned e; \
TYPE *m = vm; \
TYPE ra = (TYPE)ra_in; \
- float_status *fpst = (ESIZE == 2) ? \
- &env->vfp.standard_fp_status_f16 : \
- &env->vfp.standard_fp_status; \
+ float_status *fpst = \
+ &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
if (mask & 1) { \
TYPE v = m[H##ESIZE(e)]; \
@@ -3168,8 +3160,7 @@ DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum)
if ((mask & emask) == 0) { \
continue; \
} \
- fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
- &env->vfp.standard_fp_status; \
+ fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
if (!(mask & (1 << (e * ESIZE)))) { \
/* We need the result but without updating flags */ \
scratch_fpst = *fpst; \
@@ -3202,8 +3193,7 @@ DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum)
if ((mask & emask) == 0) { \
continue; \
} \
- fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
- &env->vfp.standard_fp_status; \
+ fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
if (!(mask & (1 << (e * ESIZE)))) { \
/* We need the result but without updating flags */ \
scratch_fpst = *fpst; \
@@ -3267,8 +3257,7 @@ DO_VCMP_FP_BOTH(vfcmples, vfcmple_scalars, 4, float32, !DO_GT32)
if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
continue; \
} \
- fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
- &env->vfp.standard_fp_status; \
+ fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
if (!(mask & 1)) { \
/* We need the result but without updating flags */ \
scratch_fpst = *fpst; \
@@ -3300,9 +3289,8 @@ DO_VCVT_FIXED(vcvt_fu, 4, uint32_t, helper_vfp_touls_round_to_zero)
unsigned e; \
float_status *fpst; \
float_status scratch_fpst; \
- float_status *base_fpst = (ESIZE == 2) ? \
- &env->vfp.standard_fp_status_f16 : \
- &env->vfp.standard_fp_status; \
+ float_status *base_fpst = \
+ &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
uint32_t prev_rmode = get_float_rounding_mode(base_fpst); \
set_float_rounding_mode(rmode, base_fpst); \
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
@@ -3347,7 +3335,7 @@ static void do_vcvt_sh(CPUARMState *env, void *vd, void *vm, int top)
unsigned e;
float_status *fpst;
float_status scratch_fpst;
- float_status *base_fpst = &env->vfp.standard_fp_status;
+ float_status *base_fpst = &env->vfp.fp_status[FPST_STD];
bool old_fz = get_flush_to_zero(base_fpst);
set_flush_to_zero(false, base_fpst);
for (e = 0; e < 16 / 4; e++, mask >>= 4) {
@@ -3377,7 +3365,7 @@ static void do_vcvt_hs(CPUARMState *env, void *vd, void *vm, int top)
unsigned e;
float_status *fpst;
float_status scratch_fpst;
- float_status *base_fpst = &env->vfp.standard_fp_status;
+ float_status *base_fpst = &env->vfp.fp_status[FPST_STD];
bool old_fiz = get_flush_inputs_to_zero(base_fpst);
set_flush_inputs_to_zero(false, base_fpst);
for (e = 0; e < 16 / 4; e++, mask >>= 4) {
@@ -3427,8 +3415,7 @@ void HELPER(mve_vcvtt_hs)(CPUARMState *env, void *vd, void *vm)
if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
continue; \
} \
- fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
- &env->vfp.standard_fp_status; \
+ fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
if (!(mask & 1)) { \
/* We need the result but without updating flags */ \
scratch_fpst = *fpst; \
diff --git a/target/arm/tcg/neon-dp.decode b/target/arm/tcg/neon-dp.decode
index 788578c..e883c6a 100644
--- a/target/arm/tcg/neon-dp.decode
+++ b/target/arm/tcg/neon-dp.decode
@@ -291,17 +291,17 @@ VSLI_2sh 1111 001 1 1 . ...... .... 0101 . . . 1 .... @2reg_shl_s
VSLI_2sh 1111 001 1 1 . ...... .... 0101 . . . 1 .... @2reg_shl_h
VSLI_2sh 1111 001 1 1 . ...... .... 0101 . . . 1 .... @2reg_shl_b
-VQSHLU_64_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_d
+VQSHLU_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_d
VQSHLU_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_s
VQSHLU_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_h
VQSHLU_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_b
-VQSHL_S_64_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_d
+VQSHL_S_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_d
VQSHL_S_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_s
VQSHL_S_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_h
VQSHL_S_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_b
-VQSHL_U_64_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_d
+VQSHL_U_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_d
VQSHL_U_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_s
VQSHL_U_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_h
VQSHL_U_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_b
diff --git a/target/arm/tcg/neon_helper.c b/target/arm/tcg/neon_helper.c
index 082bfd8..2cc8241 100644
--- a/target/arm/tcg/neon_helper.c
+++ b/target/arm/tcg/neon_helper.c
@@ -9,11 +9,13 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
#include "fpu/softfloat.h"
#include "vec_internal.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)
@@ -130,17 +132,28 @@ void HELPER(name)(void *vd, void *vn, void *vm, uint32_t desc) \
}
#define NEON_GVEC_VOP2_ENV(name, vtype) \
-void HELPER(name)(void *vd, void *vn, void *vm, void *venv, uint32_t desc) \
+void HELPER(name)(void *vd, void *vn, void *vm, CPUARMState *env, uint32_t desc) \
{ \
intptr_t i, opr_sz = simd_oprsz(desc); \
vtype *d = vd, *n = vn, *m = vm; \
- CPUARMState *env = venv; \
for (i = 0; i < opr_sz / sizeof(vtype); i++) { \
NEON_FN(d[i], n[i], m[i]); \
} \
clear_tail(d, opr_sz, simd_maxsz(desc)); \
}
+#define NEON_GVEC_VOP2i_ENV(name, vtype) \
+void HELPER(name)(void *vd, void *vn, CPUARMState *env, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ int imm = simd_data(desc); \
+ vtype *d = vd, *n = vn; \
+ for (i = 0; i < opr_sz / sizeof(vtype); i++) { \
+ NEON_FN(d[i], n[i], imm); \
+ } \
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
+}
+
/* Pairwise operations. */
/* For 32-bit elements each segment only contains a single element, so
the elementwise and pairwise operations are the same. */
@@ -271,22 +284,26 @@ uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shift)
(dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
NEON_VOP_ENV(qshl_u8, neon_u8, 4)
NEON_GVEC_VOP2_ENV(neon_uqshl_b, uint8_t)
+NEON_GVEC_VOP2i_ENV(neon_uqshli_b, uint8_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
(dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
NEON_VOP_ENV(qshl_u16, neon_u16, 2)
NEON_GVEC_VOP2_ENV(neon_uqshl_h, uint16_t)
+NEON_GVEC_VOP2i_ENV(neon_uqshli_h, uint16_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
(dest = do_uqrshl_bhs(src1, (int8_t)src2, 32, false, env->vfp.qc))
NEON_GVEC_VOP2_ENV(neon_uqshl_s, uint32_t)
+NEON_GVEC_VOP2i_ENV(neon_uqshli_s, uint32_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
(dest = do_uqrshl_d(src1, (int8_t)src2, false, env->vfp.qc))
NEON_GVEC_VOP2_ENV(neon_uqshl_d, uint64_t)
+NEON_GVEC_VOP2i_ENV(neon_uqshli_d, uint64_t)
#undef NEON_FN
uint32_t HELPER(neon_qshl_u32)(CPUARMState *env, uint32_t val, uint32_t shift)
@@ -303,22 +320,26 @@ uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shift)
(dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
NEON_VOP_ENV(qshl_s8, neon_s8, 4)
NEON_GVEC_VOP2_ENV(neon_sqshl_b, int8_t)
+NEON_GVEC_VOP2i_ENV(neon_sqshli_b, int8_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
(dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
NEON_VOP_ENV(qshl_s16, neon_s16, 2)
NEON_GVEC_VOP2_ENV(neon_sqshl_h, int16_t)
+NEON_GVEC_VOP2i_ENV(neon_sqshli_h, int16_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
(dest = do_sqrshl_bhs(src1, (int8_t)src2, 32, false, env->vfp.qc))
NEON_GVEC_VOP2_ENV(neon_sqshl_s, int32_t)
+NEON_GVEC_VOP2i_ENV(neon_sqshli_s, int32_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
(dest = do_sqrshl_d(src1, (int8_t)src2, false, env->vfp.qc))
NEON_GVEC_VOP2_ENV(neon_sqshl_d, int64_t)
+NEON_GVEC_VOP2i_ENV(neon_sqshli_d, int64_t)
#undef NEON_FN
uint32_t HELPER(neon_qshl_s32)(CPUARMState *env, uint32_t val, uint32_t shift)
@@ -334,11 +355,13 @@ uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t val, uint64_t shift)
#define NEON_FN(dest, src1, src2) \
(dest = do_suqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
NEON_VOP_ENV(qshlu_s8, neon_s8, 4)
+NEON_GVEC_VOP2i_ENV(neon_sqshlui_b, int8_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
(dest = do_suqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
NEON_VOP_ENV(qshlu_s16, neon_s16, 2)
+NEON_GVEC_VOP2i_ENV(neon_sqshlui_h, int16_t)
#undef NEON_FN
uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t val, uint32_t shift)
@@ -352,6 +375,16 @@ uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t val, uint64_t shift)
}
#define NEON_FN(dest, src1, src2) \
+ (dest = do_suqrshl_bhs(src1, (int8_t)src2, 32, false, env->vfp.qc))
+NEON_GVEC_VOP2i_ENV(neon_sqshlui_s, int32_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_suqrshl_d(src1, (int8_t)src2, false, env->vfp.qc))
+NEON_GVEC_VOP2i_ENV(neon_sqshlui_d, int64_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
(dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, true, env->vfp.qc))
NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
NEON_GVEC_VOP2_ENV(neon_uqrshl_b, uint8_t)
@@ -492,27 +525,6 @@ uint32_t HELPER(neon_cls_s32)(uint32_t x)
return count - 1;
}
-/* Bit count. */
-uint32_t HELPER(neon_cnt_u8)(uint32_t x)
-{
- x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
- x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
- x = (x & 0x0f0f0f0f) + ((x >> 4) & 0x0f0f0f0f);
- return x;
-}
-
-/* Reverse bits in each 8 bit word */
-uint32_t HELPER(neon_rbit_u8)(uint32_t x)
-{
- x = ((x & 0xf0f0f0f0) >> 4)
- | ((x & 0x0f0f0f0f) << 4);
- x = ((x & 0x88888888) >> 3)
- | ((x & 0x44444444) >> 1)
- | ((x & 0x22222222) << 1)
- | ((x & 0x11111111) << 3);
- return x;
-}
-
#define NEON_QDMULH16(dest, src1, src2, round) do { \
uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
@@ -565,13 +577,15 @@ NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1)
#undef NEON_FN
#undef NEON_QDMULH32
-uint32_t HELPER(neon_narrow_u8)(uint64_t x)
+/* Only the low 32-bits of output are significant. */
+uint64_t HELPER(neon_narrow_u8)(uint64_t x)
{
return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u)
| ((x >> 24) & 0xff000000u);
}
-uint32_t HELPER(neon_narrow_u16)(uint64_t x)
+/* Only the low 32-bits of output are significant. */
+uint64_t HELPER(neon_narrow_u16)(uint64_t x)
{
return (x & 0xffffu) | ((x >> 16) & 0xffff0000u);
}
@@ -602,7 +616,8 @@ uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
}
-uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x)
+/* Only the low 32-bits of output are significant. */
+uint64_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x)
{
uint16_t s;
uint8_t d;
@@ -629,7 +644,8 @@ uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x)
return res;
}
-uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x)
+/* Only the low 32-bits of output are significant. */
+uint64_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x)
{
uint16_t s;
uint8_t d;
@@ -652,7 +668,8 @@ uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x)
return res;
}
-uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x)
+/* Only the low 32-bits of output are significant. */
+uint64_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x)
{
int16_t s;
uint8_t d;
@@ -675,7 +692,8 @@ uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x)
return res;
}
-uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x)
+/* Only the low 32-bits of output are significant. */
+uint64_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x)
{
uint32_t high;
uint32_t low;
@@ -695,10 +713,11 @@ uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x)
high = 0xffff;
SET_QC();
}
- return low | (high << 16);
+ return deposit32(low, 16, 16, high);
}
-uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x)
+/* Only the low 32-bits of output are significant. */
+uint64_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x)
{
uint32_t high;
uint32_t low;
@@ -712,10 +731,11 @@ uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x)
high = 0xffff;
SET_QC();
}
- return low | (high << 16);
+ return deposit32(low, 16, 16, high);
}
-uint32_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x)
+/* Only the low 32-bits of output are significant. */
+uint64_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x)
{
int32_t low;
int32_t high;
@@ -729,10 +749,11 @@ uint32_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x)
high = (high >> 31) ^ 0x7fff;
SET_QC();
}
- return (uint16_t)low | (high << 16);
+ return deposit32(low, 16, 16, high);
}
-uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x)
+/* Only the low 32-bits of output are significant. */
+uint64_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x)
{
if (x & 0x8000000000000000ull) {
SET_QC();
@@ -745,7 +766,8 @@ uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x)
return x;
}
-uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x)
+/* Only the low 32-bits of output are significant. */
+uint64_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x)
{
if (x > 0xffffffffu) {
SET_QC();
@@ -754,13 +776,14 @@ uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x)
return x;
}
-uint32_t HELPER(neon_narrow_sat_s32)(CPUARMState *env, uint64_t x)
+/* Only the low 32-bits of output are significant. */
+uint64_t HELPER(neon_narrow_sat_s32)(CPUARMState *env, uint64_t x)
{
if ((int64_t)x != (int32_t)x) {
SET_QC();
- return ((int64_t)x >> 63) ^ 0x7fffffff;
+ return (uint32_t)((int64_t)x >> 63) ^ 0x7fffffff;
}
- return x;
+ return (uint32_t)x;
}
uint64_t HELPER(neon_widen_u8)(uint32_t x)
@@ -803,62 +826,47 @@ uint64_t HELPER(neon_widen_s16)(uint32_t x)
return ((uint32_t)(int16_t)x) | (high << 32);
}
-uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b)
-{
- uint64_t mask;
- mask = (a ^ b) & 0x8000800080008000ull;
- a &= ~0x8000800080008000ull;
- b &= ~0x8000800080008000ull;
- return (a + b) ^ mask;
-}
-
-uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b)
-{
- uint64_t mask;
- mask = (a ^ b) & 0x8000000080000000ull;
- a &= ~0x8000000080000000ull;
- b &= ~0x8000000080000000ull;
- return (a + b) ^ mask;
-}
-
-uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b)
-{
- uint64_t tmp;
- uint64_t tmp2;
+/* Pairwise long add: add pairs of adjacent elements into
+ * double-width elements in the result (eg _s8 is an 8x8->16 op)
+ */
+uint64_t HELPER(neon_addlp_s8)(uint64_t a)
+{
+ uint64_t nsignmask = 0x0080008000800080ULL;
+ uint64_t wsignmask = 0x8000800080008000ULL;
+ uint64_t elementmask = 0x00ff00ff00ff00ffULL;
+ uint64_t tmp1, tmp2;
+ uint64_t res, signres;
+
+ /* Extract odd elements, sign extend each to a 16 bit field */
+ tmp1 = a & elementmask;
+ tmp1 ^= nsignmask;
+ tmp1 |= wsignmask;
+ tmp1 = (tmp1 - nsignmask) ^ wsignmask;
+ /* Ditto for the even elements */
+ tmp2 = (a >> 8) & elementmask;
+ tmp2 ^= nsignmask;
+ tmp2 |= wsignmask;
+ tmp2 = (tmp2 - nsignmask) ^ wsignmask;
+
+ /* calculate the result by summing bits 0..14, 16..22, etc,
+ * and then adjusting the sign bits 15, 23, etc manually.
+ * This ensures the addition can't overflow the 16 bit field.
+ */
+ signres = (tmp1 ^ tmp2) & wsignmask;
+ res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask);
+ res ^= signres;
- tmp = a & 0x0000ffff0000ffffull;
- tmp += (a >> 16) & 0x0000ffff0000ffffull;
- tmp2 = b & 0xffff0000ffff0000ull;
- tmp2 += (b << 16) & 0xffff0000ffff0000ull;
- return ( tmp & 0xffff)
- | ((tmp >> 16) & 0xffff0000ull)
- | ((tmp2 << 16) & 0xffff00000000ull)
- | ( tmp2 & 0xffff000000000000ull);
+ return res;
}
-uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b)
+uint64_t HELPER(neon_addlp_s16)(uint64_t a)
{
- uint32_t low = a + (a >> 32);
- uint32_t high = b + (b >> 32);
- return low + ((uint64_t)high << 32);
-}
+ int32_t reslo, reshi;
-uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b)
-{
- uint64_t mask;
- mask = (a ^ ~b) & 0x8000800080008000ull;
- a |= 0x8000800080008000ull;
- b &= ~0x8000800080008000ull;
- return (a - b) ^ mask;
-}
+ reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16);
+ reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48);
-uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b)
-{
- uint64_t mask;
- mask = (a ^ ~b) & 0x8000000080000000ull;
- a |= 0x8000000080000000ull;
- b &= ~0x8000000080000000ull;
- return (a - b) ^ mask;
+ return (uint32_t)reslo | (((uint64_t)reshi) << 32);
}
uint64_t HELPER(neon_addl_saturate_s32)(CPUARMState *env, uint64_t a, uint64_t b)
@@ -1172,51 +1180,44 @@ uint64_t HELPER(neon_qneg_s64)(CPUARMState *env, uint64_t x)
* Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
* Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
*/
-uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, void *fpstp)
+uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, float_status *fpst)
{
- float_status *fpst = fpstp;
return -float32_eq_quiet(make_float32(a), make_float32(b), fpst);
}
-uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, void *fpstp)
+uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, float_status *fpst)
{
- float_status *fpst = fpstp;
return -float32_le(make_float32(b), make_float32(a), fpst);
}
-uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, void *fpstp)
+uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, float_status *fpst)
{
- float_status *fpst = fpstp;
return -float32_lt(make_float32(b), make_float32(a), fpst);
}
-uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, void *fpstp)
+uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, float_status *fpst)
{
- float_status *fpst = fpstp;
float32 f0 = float32_abs(make_float32(a));
float32 f1 = float32_abs(make_float32(b));
return -float32_le(f1, f0, fpst);
}
-uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, void *fpstp)
+uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, float_status *fpst)
{
- float_status *fpst = fpstp;
float32 f0 = float32_abs(make_float32(a));
float32 f1 = float32_abs(make_float32(b));
return -float32_lt(f1, f0, fpst);
}
-uint64_t HELPER(neon_acge_f64)(uint64_t a, uint64_t b, void *fpstp)
+uint64_t HELPER(neon_acge_f64)(uint64_t a, uint64_t b, float_status *fpst)
{
- float_status *fpst = fpstp;
float64 f0 = float64_abs(make_float64(a));
float64 f1 = float64_abs(make_float64(b));
return -float64_le(f1, f0, fpst);
}
-uint64_t HELPER(neon_acgt_f64)(uint64_t a, uint64_t b, void *fpstp)
+uint64_t HELPER(neon_acgt_f64)(uint64_t a, uint64_t b, float_status *fpst)
{
- float_status *fpst = fpstp;
float64 f0 = float64_abs(make_float64(a));
float64 f1 = float64_abs(make_float64(b));
return -float64_lt(f1, f0, fpst);
diff --git a/target/arm/op_addsub.h b/target/arm/tcg/op_addsub.c.inc
index ca4a189..ca4a189 100644
--- a/target/arm/op_addsub.h
+++ b/target/arm/tcg/op_addsub.c.inc
diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c
index c083e5c..575e566 100644
--- a/target/arm/tcg/op_helper.c
+++ b/target/arm/tcg/op_helper.c
@@ -20,10 +20,11 @@
#include "qemu/main-loop.h"
#include "cpu.h"
#include "exec/helper-proto.h"
+#include "exec/target_page.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "cpregs.h"
#define SIGNBIT (uint32_t)0x80000000
@@ -313,15 +314,19 @@ void HELPER(check_bxj_trap)(CPUARMState *env, uint32_t rm)
}
#ifndef CONFIG_USER_ONLY
-/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
+/*
+ * Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
* The function returns the target EL (1-3) if the instruction is to be trapped;
* otherwise it returns 0 indicating it is not trapped.
+ * For a trap, *excp is updated with the EXCP_* trap type to use.
*/
-static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
+static inline int check_wfx_trap(CPUARMState *env, bool is_wfe, uint32_t *excp)
{
int cur_el = arm_current_el(env);
uint64_t mask;
+ *excp = EXCP_UDEF;
+
if (arm_feature(env, ARM_FEATURE_M)) {
/* M profile cores can never trap WFI/WFE. */
return 0;
@@ -331,18 +336,9 @@ static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
* WFx instructions being trapped to EL1. These trap bits don't exist in v7.
*/
if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
- int target_el;
-
mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
- if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
- /* Secure EL0 and Secure PL1 is at EL3 */
- target_el = 3;
- } else {
- target_el = 1;
- }
-
- if (!(env->cp15.sctlr_el[target_el] & mask)) {
- return target_el;
+ if (!(arm_sctlr(env, cur_el) & mask)) {
+ return exception_target_el(env);
}
}
@@ -358,9 +354,12 @@ static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
}
/* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
- if (cur_el < 3) {
+ if (arm_feature(env, ARM_FEATURE_V8) && !arm_is_el3_or_mon(env)) {
mask = (is_wfe) ? SCR_TWE : SCR_TWI;
if (env->cp15.scr_el3 & mask) {
+ if (!arm_el_is_aa64(env, 3)) {
+ *excp = EXCP_MON_TRAP;
+ }
return 3;
}
}
@@ -383,7 +382,8 @@ void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
return;
#else
CPUState *cs = env_cpu(env);
- int target_el = check_wfx_trap(env, false);
+ uint32_t excp;
+ int target_el = check_wfx_trap(env, false, &excp);
if (cpu_has_work(cs)) {
/* Don't bother to go into our "low power state" if
@@ -399,7 +399,7 @@ void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
env->regs[15] -= insn_len;
}
- raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
+ raise_exception(env, excp, syn_wfx(1, 0xe, 0, insn_len == 2),
target_el);
}
@@ -424,10 +424,17 @@ void HELPER(wfit)(CPUARMState *env, uint64_t timeout)
#else
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = env_cpu(env);
- int target_el = check_wfx_trap(env, false);
+ uint32_t excp;
+ int target_el = check_wfx_trap(env, false, &excp);
/* The WFIT should time out when CNTVCT_EL0 >= the specified value. */
uint64_t cntval = gt_get_countervalue(env);
- uint64_t offset = gt_virt_cnt_offset(env);
+ /*
+ * We want the value that we would get if we read CNTVCT_EL0 from
+ * the current exception level, so the direct_access offset, not
+ * the indirect_access one. Compare the pseudocode LocalTimeoutEvent(),
+ * which calls VirtualCounterTimer().
+ */
+ uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_VIRT);
uint64_t cntvct = cntval - offset;
uint64_t nexttick;
@@ -441,8 +448,7 @@ void HELPER(wfit)(CPUARMState *env, uint64_t timeout)
if (target_el) {
env->pc -= 4;
- raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, false),
- target_el);
+ raise_exception(env, excp, syn_wfx(1, 0xe, 0, false), target_el);
}
if (uadd64_overflow(timeout, offset, &nexttick)) {
@@ -758,12 +764,13 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
CPAccessResult res = CP_ACCESS_OK;
int target_el;
+ uint32_t excp;
assert(ri != NULL);
if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
&& extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
- res = CP_ACCESS_TRAP;
+ res = CP_ACCESS_UNDEFINED;
goto fail;
}
@@ -780,7 +787,7 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
* the other trap takes priority. So we take the "check HSTR_EL2" path
* for all of those cases.)
*/
- if (res != CP_ACCESS_OK && ((res & CP_ACCESS_EL_MASK) == 0) &&
+ if (res != CP_ACCESS_OK && ((res & CP_ACCESS_EL_MASK) < 2) &&
arm_current_el(env) == 0) {
goto fail;
}
@@ -817,6 +824,7 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
unsigned int idx = FIELD_EX32(ri->fgt, FGT, IDX);
unsigned int bitpos = FIELD_EX32(ri->fgt, FGT, BITPOS);
bool rev = FIELD_EX32(ri->fgt, FGT, REV);
+ bool nxs = FIELD_EX32(ri->fgt, FGT, NXS);
bool trapbit;
if (ri->fgt & FGT_EXEC) {
@@ -830,7 +838,15 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
trapword = env->cp15.fgt_write[idx];
}
- trapbit = extract64(trapword, bitpos, 1);
+ if (nxs && (arm_hcrx_el2_eff(env) & HCRX_FGTNXS)) {
+ /*
+ * If HCRX_EL2.FGTnXS is 1 then the fine-grained trap for
+ * TLBI maintenance insns does *not* apply to the nXS variant.
+ */
+ trapbit = 0;
+ } else {
+ trapbit = extract64(trapword, bitpos, 1);
+ }
if (trapbit != rev) {
res = CP_ACCESS_TRAP_EL2;
goto fail;
@@ -842,12 +858,25 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
}
fail:
- switch (res & ~CP_ACCESS_EL_MASK) {
- case CP_ACCESS_TRAP:
+ excp = EXCP_UDEF;
+ switch (res) {
+ /* CP_ACCESS_TRAP* traps are always direct to a specified EL */
+ case CP_ACCESS_TRAP_EL3:
+ /*
+ * If EL3 is AArch32 then there's no syndrome register; the cases
+ * where we would raise a SystemAccessTrap to AArch64 EL3 all become
+ * raising a Monitor trap exception. (Because there's no visible
+ * syndrome it doesn't matter what we pass to raise_exception().)
+ */
+ if (!arm_el_is_aa64(env, 3)) {
+ excp = EXCP_MON_TRAP;
+ }
break;
- case CP_ACCESS_TRAP_UNCATEGORIZED:
- /* Only CP_ACCESS_TRAP traps are direct to a specified EL */
- assert((res & CP_ACCESS_EL_MASK) == 0);
+ case CP_ACCESS_TRAP_EL2:
+ case CP_ACCESS_TRAP_EL1:
+ break;
+ case CP_ACCESS_UNDEFINED:
+ /* CP_ACCESS_UNDEFINED is never direct to a specified EL */
if (cpu_isar_feature(aa64_ids, cpu) && isread &&
arm_cpreg_in_idspace(ri)) {
/*
@@ -867,6 +896,9 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
case 0:
target_el = exception_target_el(env);
break;
+ case 1:
+ assert(arm_current_el(env) < 2);
+ break;
case 2:
assert(arm_current_el(env) != 3);
assert(arm_is_el2_enabled(env));
@@ -875,11 +907,10 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
assert(arm_feature(env, ARM_FEATURE_EL3));
break;
default:
- /* No "direct" traps to EL1 */
g_assert_not_reached();
}
- raise_exception(env, EXCP_UDEF, syndrome, target_el);
+ raise_exception(env, excp, syndrome, target_el);
}
const void *HELPER(lookup_cp_reg)(CPUARMState *env, uint32_t key)
@@ -912,7 +943,19 @@ void HELPER(tidcp_el0)(CPUARMState *env, uint32_t syndrome)
{
/* See arm_sctlr(), but we also need the sctlr el. */
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
- int target_el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
+ int target_el;
+
+ switch (mmu_idx) {
+ case ARMMMUIdx_E20_0:
+ target_el = 2;
+ break;
+ case ARMMMUIdx_E30_0:
+ target_el = 3;
+ break;
+ default:
+ target_el = 1;
+ break;
+ }
/*
* The bit is not valid unless the target el is aa64, but since the
@@ -1179,7 +1222,7 @@ uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
}
}
-void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
+void HELPER(probe_access)(CPUARMState *env, vaddr ptr,
uint32_t access_type, uint32_t mmu_idx,
uint32_t size)
{
diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c
index c4b1430..c591c30 100644
--- a/target/arm/tcg/pauth_helper.c
+++ b/target/arm/tcg/pauth_helper.c
@@ -21,8 +21,7 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
#include "qemu/xxhash.h"
diff --git a/target/arm/tcg/psci.c b/target/arm/tcg/psci.c
index 51d2ca3..cabed43 100644
--- a/target/arm/tcg/psci.c
+++ b/target/arm/tcg/psci.c
@@ -21,7 +21,7 @@
#include "exec/helper-proto.h"
#include "kvm-consts.h"
#include "qemu/main-loop.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "internals.h"
#include "arm-powerctl.h"
#include "target/arm/multiprocessing.h"
diff --git a/target/arm/tcg/sme_helper.c b/target/arm/tcg/sme_helper.c
index 5a6dd76..de0c6e5 100644
--- a/target/arm/tcg/sme_helper.c
+++ b/target/arm/tcg/sme_helper.c
@@ -22,8 +22,8 @@
#include "internals.h"
#include "tcg/tcg-gvec-desc.h"
#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/helper-retaddr.h"
#include "qemu/int128.h"
#include "fpu/softfloat.h"
#include "vec_internal.h"
@@ -517,6 +517,8 @@ void sme_ld1(CPUARMState *env, void *za, uint64_t *vg,
clr_fn(za, 0, reg_off);
}
+ set_helper_retaddr(ra);
+
while (reg_off <= reg_last) {
uint64_t pg = vg[reg_off >> 6];
do {
@@ -529,6 +531,8 @@ void sme_ld1(CPUARMState *env, void *za, uint64_t *vg,
} while (reg_off <= reg_last && (reg_off & 63));
}
+ clear_helper_retaddr();
+
/*
* Use the slow path to manage the cross-page misalignment.
* But we know this is RAM and cannot trap.
@@ -543,6 +547,8 @@ void sme_ld1(CPUARMState *env, void *za, uint64_t *vg,
reg_last = info.reg_off_last[1];
host = info.page[1].host;
+ set_helper_retaddr(ra);
+
do {
uint64_t pg = vg[reg_off >> 6];
do {
@@ -554,6 +560,8 @@ void sme_ld1(CPUARMState *env, void *za, uint64_t *vg,
reg_off += esize;
} while (reg_off & 63);
} while (reg_off <= reg_last);
+
+ clear_helper_retaddr();
}
}
@@ -701,6 +709,8 @@ void sme_st1(CPUARMState *env, void *za, uint64_t *vg,
reg_last = info.reg_off_last[0];
host = info.page[0].host;
+ set_helper_retaddr(ra);
+
while (reg_off <= reg_last) {
uint64_t pg = vg[reg_off >> 6];
do {
@@ -711,6 +721,8 @@ void sme_st1(CPUARMState *env, void *za, uint64_t *vg,
} while (reg_off <= reg_last && (reg_off & 63));
}
+ clear_helper_retaddr();
+
/*
* Use the slow path to manage the cross-page misalignment.
* But we know this is RAM and cannot trap.
@@ -725,6 +737,8 @@ void sme_st1(CPUARMState *env, void *za, uint64_t *vg,
reg_last = info.reg_off_last[1];
host = info.page[1].host;
+ set_helper_retaddr(ra);
+
do {
uint64_t pg = vg[reg_off >> 6];
do {
@@ -734,6 +748,8 @@ void sme_st1(CPUARMState *env, void *za, uint64_t *vg,
reg_off += 1 << esz;
} while (reg_off & 63);
} while (reg_off <= reg_last);
+
+ clear_helper_retaddr();
}
}
@@ -888,7 +904,7 @@ void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn,
}
void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn,
- void *vpm, void *vst, uint32_t desc)
+ void *vpm, float_status *fpst_in, uint32_t desc)
{
intptr_t row, col, oprsz = simd_maxsz(desc);
uint32_t neg = simd_data(desc) << 31;
@@ -900,7 +916,7 @@ void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn,
* update the cumulative fp exception status. It also produces
* default nans.
*/
- fpst = *(float_status *)vst;
+ fpst = *fpst_in;
set_default_nan_mode(true, &fpst);
for (row = 0; row < oprsz; ) {
@@ -930,13 +946,13 @@ void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn,
}
void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn,
- void *vpm, void *vst, uint32_t desc)
+ void *vpm, float_status *fpst_in, uint32_t desc)
{
intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
uint64_t neg = (uint64_t)simd_data(desc) << 63;
uint64_t *za = vza, *zn = vzn, *zm = vzm;
uint8_t *pn = vpn, *pm = vpm;
- float_status fpst = *(float_status *)vst;
+ float_status fpst = *fpst_in;
set_default_nan_mode(true, &fpst);
@@ -976,12 +992,23 @@ static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg)
}
static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2,
- float_status *s_std, float_status *s_odd)
+ float_status *s_f16, float_status *s_std,
+ float_status *s_odd)
{
- float64 e1r = float16_to_float64(e1 & 0xffff, true, s_std);
- float64 e1c = float16_to_float64(e1 >> 16, true, s_std);
- float64 e2r = float16_to_float64(e2 & 0xffff, true, s_std);
- float64 e2c = float16_to_float64(e2 >> 16, true, s_std);
+ /*
+ * We need three different float_status for different parts of this
+ * operation:
+ * - the input conversion of the float16 values must use the
+ * f16-specific float_status, so that the FPCR.FZ16 control is applied
+ * - operations on float32 including the final accumulation must use
+ * the normal float_status, so that FPCR.FZ is applied
+ * - we have pre-set-up copy of s_std which is set to round-to-odd,
+ * for the multiply (see below)
+ */
+ float64 e1r = float16_to_float64(e1 & 0xffff, true, s_f16);
+ float64 e1c = float16_to_float64(e1 >> 16, true, s_f16);
+ float64 e2r = float16_to_float64(e2 & 0xffff, true, s_f16);
+ float64 e2c = float16_to_float64(e2 >> 16, true, s_f16);
float64 t64;
float32 t32;
@@ -1003,20 +1030,23 @@ static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2,
}
void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn,
- void *vpm, void *vst, uint32_t desc)
+ void *vpm, CPUARMState *env, uint32_t desc)
{
intptr_t row, col, oprsz = simd_maxsz(desc);
uint32_t neg = simd_data(desc) * 0x80008000u;
uint16_t *pn = vpn, *pm = vpm;
- float_status fpst_odd, fpst_std;
+ float_status fpst_odd, fpst_std, fpst_f16;
/*
- * Make a copy of float_status because this operation does not
- * update the cumulative fp exception status. It also produces
- * default nans. Make a second copy with round-to-odd -- see above.
+ * Make copies of the fp status fields we use, because this operation
+ * does not update the cumulative fp exception status. It also
+ * produces default NaNs. We also need a second copy of fp_status with
+ * round-to-odd -- see above.
*/
- fpst_std = *(float_status *)vst;
+ fpst_f16 = env->vfp.fp_status[FPST_A64_F16];
+ fpst_std = env->vfp.fp_status[FPST_A64];
set_default_nan_mode(true, &fpst_std);
+ set_default_nan_mode(true, &fpst_f16);
fpst_odd = fpst_std;
set_float_rounding_mode(float_round_to_odd, &fpst_odd);
@@ -1036,7 +1066,8 @@ void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn,
uint32_t m = *(uint32_t *)(vzm + H1_4(col));
m = f16mop_adj_pair(m, pcol, 0);
- *a = f16_dotadd(*a, n, m, &fpst_std, &fpst_odd);
+ *a = f16_dotadd(*a, n, m,
+ &fpst_f16, &fpst_std, &fpst_odd);
}
col += 4;
pcol >>= 4;
@@ -1048,38 +1079,68 @@ void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn,
}
}
-void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
- void *vpm, uint32_t desc)
+void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm,
+ void *vpn, void *vpm, CPUARMState *env, uint32_t desc)
{
intptr_t row, col, oprsz = simd_maxsz(desc);
uint32_t neg = simd_data(desc) * 0x80008000u;
uint16_t *pn = vpn, *pm = vpm;
+ float_status fpst, fpst_odd;
- for (row = 0; row < oprsz; ) {
- uint16_t prow = pn[H2(row >> 4)];
- do {
- void *vza_row = vza + tile_vslice_offset(row);
- uint32_t n = *(uint32_t *)(vzn + H1_4(row));
+ if (is_ebf(env, &fpst, &fpst_odd)) {
+ for (row = 0; row < oprsz; ) {
+ uint16_t prow = pn[H2(row >> 4)];
+ do {
+ void *vza_row = vza + tile_vslice_offset(row);
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row));
- n = f16mop_adj_pair(n, prow, neg);
+ n = f16mop_adj_pair(n, prow, neg);
- for (col = 0; col < oprsz; ) {
- uint16_t pcol = pm[H2(col >> 4)];
- do {
- if (prow & pcol & 0b0101) {
- uint32_t *a = vza_row + H1_4(col);
- uint32_t m = *(uint32_t *)(vzm + H1_4(col));
+ for (col = 0; col < oprsz; ) {
+ uint16_t pcol = pm[H2(col >> 4)];
+ do {
+ if (prow & pcol & 0b0101) {
+ uint32_t *a = vza_row + H1_4(col);
+ uint32_t m = *(uint32_t *)(vzm + H1_4(col));
- m = f16mop_adj_pair(m, pcol, 0);
- *a = bfdotadd(*a, n, m);
- }
- col += 4;
- pcol >>= 4;
- } while (col & 15);
- }
- row += 4;
- prow >>= 4;
- } while (row & 15);
+ m = f16mop_adj_pair(m, pcol, 0);
+ *a = bfdotadd_ebf(*a, n, m, &fpst, &fpst_odd);
+ }
+ col += 4;
+ pcol >>= 4;
+ } while (col & 15);
+ }
+ row += 4;
+ prow >>= 4;
+ } while (row & 15);
+ }
+ } else {
+ for (row = 0; row < oprsz; ) {
+ uint16_t prow = pn[H2(row >> 4)];
+ do {
+ void *vza_row = vza + tile_vslice_offset(row);
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row));
+
+ n = f16mop_adj_pair(n, prow, neg);
+
+ for (col = 0; col < oprsz; ) {
+ uint16_t pcol = pm[H2(col >> 4)];
+ do {
+ if (prow & pcol & 0b0101) {
+ uint32_t *a = vza_row + H1_4(col);
+ uint32_t m = *(uint32_t *)(vzm + H1_4(col));
+
+ m = f16mop_adj_pair(m, pcol, 0);
+ *a = bfdotadd(*a, n, m, &fpst);
+ }
+ col += 4;
+ pcol >>= 4;
+ } while (col & 15);
+ }
+ row += 4;
+ prow >>= 4;
+ } while (row & 15);
+ }
}
}
@@ -1146,10 +1207,10 @@ static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \
uint64_t sum = 0; \
/* Apply P to N as a mask, making the inactive elements 0. */ \
n &= expand_pred_h(p); \
- sum += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \
- sum += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \
- sum += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \
- sum += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \
+ sum += (int64_t)(NTYPE)(n >> 0) * (MTYPE)(m >> 0); \
+ sum += (int64_t)(NTYPE)(n >> 16) * (MTYPE)(m >> 16); \
+ sum += (int64_t)(NTYPE)(n >> 32) * (MTYPE)(m >> 32); \
+ sum += (int64_t)(NTYPE)(n >> 48) * (MTYPE)(m >> 48); \
return neg ? a - sum : a + sum; \
}
diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c
index dd49e67..a2c363a 100644
--- a/target/arm/tcg/sve_helper.c
+++ b/target/arm/tcg/sve_helper.c
@@ -20,15 +20,22 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/helper-proto.h"
+#include "exec/target_page.h"
+#include "exec/tlb-flags.h"
#include "tcg/tcg-gvec-desc.h"
#include "fpu/softfloat.h"
#include "tcg/tcg.h"
#include "vec_internal.h"
#include "sve_ldst_internal.h"
-#include "hw/core/tcg-cpu-ops.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/helper-retaddr.h"
+#include "accel/tcg/cpu-ops.h"
+#include "accel/tcg/probe.h"
+#ifdef CONFIG_USER_ONLY
+#include "user/page-protection.h"
+#endif
/* Return a value for NZCV as per the ARM PredTest pseudofunction.
@@ -730,7 +737,7 @@ DO_ZPZZ_PAIR_D(sve2_sminp_zpzz_d, int64_t, DO_MIN)
#define DO_ZPZZ_PAIR_FP(NAME, TYPE, H, OP) \
void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \
- void *status, uint32_t desc) \
+ float_status *status, uint32_t desc) \
{ \
intptr_t i, opr_sz = simd_oprsz(desc); \
for (i = 0; i < opr_sz; ) { \
@@ -876,12 +883,28 @@ DO_ZPZ(sve_fabs_h, uint16_t, H1_2, DO_FABS)
DO_ZPZ(sve_fabs_s, uint32_t, H1_4, DO_FABS)
DO_ZPZ_D(sve_fabs_d, uint64_t, DO_FABS)
+#define DO_AH_FABS_H(N) (float16_is_any_nan(N) ? (N) : DO_FABS(N))
+#define DO_AH_FABS_S(N) (float32_is_any_nan(N) ? (N) : DO_FABS(N))
+#define DO_AH_FABS_D(N) (float64_is_any_nan(N) ? (N) : DO_FABS(N))
+
+DO_ZPZ(sve_ah_fabs_h, uint16_t, H1_2, DO_AH_FABS_H)
+DO_ZPZ(sve_ah_fabs_s, uint32_t, H1_4, DO_AH_FABS_S)
+DO_ZPZ_D(sve_ah_fabs_d, uint64_t, DO_AH_FABS_D)
+
#define DO_FNEG(N) (N ^ ~((__typeof(N))-1 >> 1))
DO_ZPZ(sve_fneg_h, uint16_t, H1_2, DO_FNEG)
DO_ZPZ(sve_fneg_s, uint32_t, H1_4, DO_FNEG)
DO_ZPZ_D(sve_fneg_d, uint64_t, DO_FNEG)
+#define DO_AH_FNEG_H(N) (float16_is_any_nan(N) ? (N) : DO_FNEG(N))
+#define DO_AH_FNEG_S(N) (float32_is_any_nan(N) ? (N) : DO_FNEG(N))
+#define DO_AH_FNEG_D(N) (float64_is_any_nan(N) ? (N) : DO_FNEG(N))
+
+DO_ZPZ(sve_ah_fneg_h, uint16_t, H1_2, DO_AH_FNEG_H)
+DO_ZPZ(sve_ah_fneg_s, uint32_t, H1_4, DO_AH_FNEG_S)
+DO_ZPZ_D(sve_ah_fneg_d, uint64_t, DO_AH_FNEG_D)
+
#define DO_NOT(N) (~N)
DO_ZPZ(sve_not_zpz_b, uint8_t, H1, DO_NOT)
@@ -2536,6 +2559,7 @@ void HELPER(sve_fexpa_d)(void *vd, void *vn, uint32_t desc)
void HELPER(sve_ftssel_h)(void *vd, void *vn, void *vm, uint32_t desc)
{
intptr_t i, opr_sz = simd_oprsz(desc) / 2;
+ bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT, 1);
uint16_t *d = vd, *n = vn, *m = vm;
for (i = 0; i < opr_sz; i += 1) {
uint16_t nn = n[i];
@@ -2543,13 +2567,17 @@ void HELPER(sve_ftssel_h)(void *vd, void *vn, void *vm, uint32_t desc)
if (mm & 1) {
nn = float16_one;
}
- d[i] = nn ^ (mm & 2) << 14;
+ if (mm & 2) {
+ nn = float16_maybe_ah_chs(nn, fpcr_ah);
+ }
+ d[i] = nn;
}
}
void HELPER(sve_ftssel_s)(void *vd, void *vn, void *vm, uint32_t desc)
{
intptr_t i, opr_sz = simd_oprsz(desc) / 4;
+ bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT, 1);
uint32_t *d = vd, *n = vn, *m = vm;
for (i = 0; i < opr_sz; i += 1) {
uint32_t nn = n[i];
@@ -2557,13 +2585,17 @@ void HELPER(sve_ftssel_s)(void *vd, void *vn, void *vm, uint32_t desc)
if (mm & 1) {
nn = float32_one;
}
- d[i] = nn ^ (mm & 2) << 30;
+ if (mm & 2) {
+ nn = float32_maybe_ah_chs(nn, fpcr_ah);
+ }
+ d[i] = nn;
}
}
void HELPER(sve_ftssel_d)(void *vd, void *vn, void *vm, uint32_t desc)
{
intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT, 1);
uint64_t *d = vd, *n = vn, *m = vm;
for (i = 0; i < opr_sz; i += 1) {
uint64_t nn = n[i];
@@ -2571,7 +2603,10 @@ void HELPER(sve_ftssel_d)(void *vd, void *vn, void *vm, uint32_t desc)
if (mm & 1) {
nn = float64_one;
}
- d[i] = nn ^ (mm & 2) << 62;
+ if (mm & 2) {
+ nn = float64_maybe_ah_chs(nn, fpcr_ah);
+ }
+ d[i] = nn;
}
}
@@ -4187,10 +4222,10 @@ static TYPE NAME##_reduce(TYPE *data, float_status *status, uintptr_t n) \
uintptr_t half = n / 2; \
TYPE lo = NAME##_reduce(data, status, half); \
TYPE hi = NAME##_reduce(data + half, status, half); \
- return TYPE##_##FUNC(lo, hi, status); \
+ return FUNC(lo, hi, status); \
} \
} \
-uint64_t HELPER(NAME)(void *vn, void *vg, void *vs, uint32_t desc) \
+uint64_t HELPER(NAME)(void *vn, void *vg, float_status *s, uint32_t desc) \
{ \
uintptr_t i, oprsz = simd_oprsz(desc), maxsz = simd_data(desc); \
TYPE data[sizeof(ARMVectorReg) / sizeof(TYPE)]; \
@@ -4205,34 +4240,45 @@ uint64_t HELPER(NAME)(void *vn, void *vg, void *vs, uint32_t desc) \
for (; i < maxsz; i += sizeof(TYPE)) { \
*(TYPE *)((void *)data + i) = IDENT; \
} \
- return NAME##_reduce(data, vs, maxsz / sizeof(TYPE)); \
+ return NAME##_reduce(data, s, maxsz / sizeof(TYPE)); \
}
-DO_REDUCE(sve_faddv_h, float16, H1_2, add, float16_zero)
-DO_REDUCE(sve_faddv_s, float32, H1_4, add, float32_zero)
-DO_REDUCE(sve_faddv_d, float64, H1_8, add, float64_zero)
+DO_REDUCE(sve_faddv_h, float16, H1_2, float16_add, float16_zero)
+DO_REDUCE(sve_faddv_s, float32, H1_4, float32_add, float32_zero)
+DO_REDUCE(sve_faddv_d, float64, H1_8, float64_add, float64_zero)
/* Identity is floatN_default_nan, without the function call. */
-DO_REDUCE(sve_fminnmv_h, float16, H1_2, minnum, 0x7E00)
-DO_REDUCE(sve_fminnmv_s, float32, H1_4, minnum, 0x7FC00000)
-DO_REDUCE(sve_fminnmv_d, float64, H1_8, minnum, 0x7FF8000000000000ULL)
+DO_REDUCE(sve_fminnmv_h, float16, H1_2, float16_minnum, 0x7E00)
+DO_REDUCE(sve_fminnmv_s, float32, H1_4, float32_minnum, 0x7FC00000)
+DO_REDUCE(sve_fminnmv_d, float64, H1_8, float64_minnum, 0x7FF8000000000000ULL)
+
+DO_REDUCE(sve_fmaxnmv_h, float16, H1_2, float16_maxnum, 0x7E00)
+DO_REDUCE(sve_fmaxnmv_s, float32, H1_4, float32_maxnum, 0x7FC00000)
+DO_REDUCE(sve_fmaxnmv_d, float64, H1_8, float64_maxnum, 0x7FF8000000000000ULL)
-DO_REDUCE(sve_fmaxnmv_h, float16, H1_2, maxnum, 0x7E00)
-DO_REDUCE(sve_fmaxnmv_s, float32, H1_4, maxnum, 0x7FC00000)
-DO_REDUCE(sve_fmaxnmv_d, float64, H1_8, maxnum, 0x7FF8000000000000ULL)
+DO_REDUCE(sve_fminv_h, float16, H1_2, float16_min, float16_infinity)
+DO_REDUCE(sve_fminv_s, float32, H1_4, float32_min, float32_infinity)
+DO_REDUCE(sve_fminv_d, float64, H1_8, float64_min, float64_infinity)
-DO_REDUCE(sve_fminv_h, float16, H1_2, min, float16_infinity)
-DO_REDUCE(sve_fminv_s, float32, H1_4, min, float32_infinity)
-DO_REDUCE(sve_fminv_d, float64, H1_8, min, float64_infinity)
+DO_REDUCE(sve_fmaxv_h, float16, H1_2, float16_max, float16_chs(float16_infinity))
+DO_REDUCE(sve_fmaxv_s, float32, H1_4, float32_max, float32_chs(float32_infinity))
+DO_REDUCE(sve_fmaxv_d, float64, H1_8, float64_max, float64_chs(float64_infinity))
-DO_REDUCE(sve_fmaxv_h, float16, H1_2, max, float16_chs(float16_infinity))
-DO_REDUCE(sve_fmaxv_s, float32, H1_4, max, float32_chs(float32_infinity))
-DO_REDUCE(sve_fmaxv_d, float64, H1_8, max, float64_chs(float64_infinity))
+DO_REDUCE(sve_ah_fminv_h, float16, H1_2, helper_vfp_ah_minh, float16_infinity)
+DO_REDUCE(sve_ah_fminv_s, float32, H1_4, helper_vfp_ah_mins, float32_infinity)
+DO_REDUCE(sve_ah_fminv_d, float64, H1_8, helper_vfp_ah_mind, float64_infinity)
+
+DO_REDUCE(sve_ah_fmaxv_h, float16, H1_2, helper_vfp_ah_maxh,
+ float16_chs(float16_infinity))
+DO_REDUCE(sve_ah_fmaxv_s, float32, H1_4, helper_vfp_ah_maxs,
+ float32_chs(float32_infinity))
+DO_REDUCE(sve_ah_fmaxv_d, float64, H1_8, helper_vfp_ah_maxd,
+ float64_chs(float64_infinity))
#undef DO_REDUCE
uint64_t HELPER(sve_fadda_h)(uint64_t nn, void *vm, void *vg,
- void *status, uint32_t desc)
+ float_status *status, uint32_t desc)
{
intptr_t i = 0, opr_sz = simd_oprsz(desc);
float16 result = nn;
@@ -4252,7 +4298,7 @@ uint64_t HELPER(sve_fadda_h)(uint64_t nn, void *vm, void *vg,
}
uint64_t HELPER(sve_fadda_s)(uint64_t nn, void *vm, void *vg,
- void *status, uint32_t desc)
+ float_status *status, uint32_t desc)
{
intptr_t i = 0, opr_sz = simd_oprsz(desc);
float32 result = nn;
@@ -4272,7 +4318,7 @@ uint64_t HELPER(sve_fadda_s)(uint64_t nn, void *vm, void *vg,
}
uint64_t HELPER(sve_fadda_d)(uint64_t nn, void *vm, void *vg,
- void *status, uint32_t desc)
+ float_status *status, uint32_t desc)
{
intptr_t i = 0, opr_sz = simd_oprsz(desc) / 8;
uint64_t *m = vm;
@@ -4292,7 +4338,7 @@ uint64_t HELPER(sve_fadda_d)(uint64_t nn, void *vm, void *vg,
*/
#define DO_ZPZZ_FP(NAME, TYPE, H, OP) \
void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \
- void *status, uint32_t desc) \
+ float_status *status, uint32_t desc) \
{ \
intptr_t i = simd_oprsz(desc); \
uint64_t *g = vg; \
@@ -4333,6 +4379,14 @@ DO_ZPZZ_FP(sve_fmax_h, uint16_t, H1_2, float16_max)
DO_ZPZZ_FP(sve_fmax_s, uint32_t, H1_4, float32_max)
DO_ZPZZ_FP(sve_fmax_d, uint64_t, H1_8, float64_max)
+DO_ZPZZ_FP(sve_ah_fmin_h, uint16_t, H1_2, helper_vfp_ah_minh)
+DO_ZPZZ_FP(sve_ah_fmin_s, uint32_t, H1_4, helper_vfp_ah_mins)
+DO_ZPZZ_FP(sve_ah_fmin_d, uint64_t, H1_8, helper_vfp_ah_mind)
+
+DO_ZPZZ_FP(sve_ah_fmax_h, uint16_t, H1_2, helper_vfp_ah_maxh)
+DO_ZPZZ_FP(sve_ah_fmax_s, uint32_t, H1_4, helper_vfp_ah_maxs)
+DO_ZPZZ_FP(sve_ah_fmax_d, uint64_t, H1_8, helper_vfp_ah_maxd)
+
DO_ZPZZ_FP(sve_fminnum_h, uint16_t, H1_2, float16_minnum)
DO_ZPZZ_FP(sve_fminnum_s, uint32_t, H1_4, float32_minnum)
DO_ZPZZ_FP(sve_fminnum_d, uint64_t, H1_8, float64_minnum)
@@ -4356,9 +4410,31 @@ static inline float64 abd_d(float64 a, float64 b, float_status *s)
return float64_abs(float64_sub(a, b, s));
}
+/* ABD when FPCR.AH = 1: avoid flipping sign bit of a NaN result */
+static float16 ah_abd_h(float16 op1, float16 op2, float_status *stat)
+{
+ float16 r = float16_sub(op1, op2, stat);
+ return float16_is_any_nan(r) ? r : float16_abs(r);
+}
+
+static float32 ah_abd_s(float32 op1, float32 op2, float_status *stat)
+{
+ float32 r = float32_sub(op1, op2, stat);
+ return float32_is_any_nan(r) ? r : float32_abs(r);
+}
+
+static float64 ah_abd_d(float64 op1, float64 op2, float_status *stat)
+{
+ float64 r = float64_sub(op1, op2, stat);
+ return float64_is_any_nan(r) ? r : float64_abs(r);
+}
+
DO_ZPZZ_FP(sve_fabd_h, uint16_t, H1_2, abd_h)
DO_ZPZZ_FP(sve_fabd_s, uint32_t, H1_4, abd_s)
DO_ZPZZ_FP(sve_fabd_d, uint64_t, H1_8, abd_d)
+DO_ZPZZ_FP(sve_ah_fabd_h, uint16_t, H1_2, ah_abd_h)
+DO_ZPZZ_FP(sve_ah_fabd_s, uint32_t, H1_4, ah_abd_s)
+DO_ZPZZ_FP(sve_ah_fabd_d, uint64_t, H1_8, ah_abd_d)
static inline float64 scalbn_d(float64 a, int64_t b, float_status *s)
{
@@ -4381,7 +4457,7 @@ DO_ZPZZ_FP(sve_fmulx_d, uint64_t, H1_8, helper_vfp_mulxd)
*/
#define DO_ZPZS_FP(NAME, TYPE, H, OP) \
void HELPER(NAME)(void *vd, void *vn, void *vg, uint64_t scalar, \
- void *status, uint32_t desc) \
+ float_status *status, uint32_t desc) \
{ \
intptr_t i = simd_oprsz(desc); \
uint64_t *g = vg; \
@@ -4445,11 +4521,20 @@ DO_ZPZS_FP(sve_fmins_h, float16, H1_2, float16_min)
DO_ZPZS_FP(sve_fmins_s, float32, H1_4, float32_min)
DO_ZPZS_FP(sve_fmins_d, float64, H1_8, float64_min)
+DO_ZPZS_FP(sve_ah_fmaxs_h, float16, H1_2, helper_vfp_ah_maxh)
+DO_ZPZS_FP(sve_ah_fmaxs_s, float32, H1_4, helper_vfp_ah_maxs)
+DO_ZPZS_FP(sve_ah_fmaxs_d, float64, H1_8, helper_vfp_ah_maxd)
+
+DO_ZPZS_FP(sve_ah_fmins_h, float16, H1_2, helper_vfp_ah_minh)
+DO_ZPZS_FP(sve_ah_fmins_s, float32, H1_4, helper_vfp_ah_mins)
+DO_ZPZS_FP(sve_ah_fmins_d, float64, H1_8, helper_vfp_ah_mind)
+
/* Fully general two-operand expander, controlled by a predicate,
* With the extra float_status parameter.
*/
#define DO_ZPZ_FP(NAME, TYPE, H, OP) \
-void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, \
+ float_status *status, uint32_t desc) \
{ \
intptr_t i = simd_oprsz(desc); \
uint64_t *g = vg; \
@@ -4654,7 +4739,7 @@ static int16_t do_float16_logb_as_int(float16 a, float_status *s)
return -15 - clz32(frac);
}
/* flush to zero */
- float_raise(float_flag_input_denormal, s);
+ float_raise(float_flag_input_denormal_flushed, s);
}
} else if (unlikely(exp == 0x1f)) {
if (frac == 0) {
@@ -4682,7 +4767,7 @@ static int32_t do_float32_logb_as_int(float32 a, float_status *s)
return -127 - clz32(frac);
}
/* flush to zero */
- float_raise(float_flag_input_denormal, s);
+ float_raise(float_flag_input_denormal_flushed, s);
}
} else if (unlikely(exp == 0xff)) {
if (frac == 0) {
@@ -4710,7 +4795,7 @@ static int64_t do_float64_logb_as_int(float64 a, float_status *s)
return -1023 - clz64(frac);
}
/* flush to zero */
- float_raise(float_flag_input_denormal, s);
+ float_raise(float_flag_input_denormal_flushed, s);
}
} else if (unlikely(exp == 0x7ff)) {
if (frac == 0) {
@@ -4733,7 +4818,7 @@ DO_ZPZ_FP(flogb_d, float64, H1_8, do_float64_logb_as_int)
static void do_fmla_zpzzz_h(void *vd, void *vn, void *vm, void *va, void *vg,
float_status *status, uint32_t desc,
- uint16_t neg1, uint16_t neg3)
+ uint16_t neg1, uint16_t neg3, int flags)
{
intptr_t i = simd_oprsz(desc);
uint64_t *g = vg;
@@ -4748,7 +4833,7 @@ static void do_fmla_zpzzz_h(void *vd, void *vn, void *vm, void *va, void *vg,
e1 = *(uint16_t *)(vn + H1_2(i)) ^ neg1;
e2 = *(uint16_t *)(vm + H1_2(i));
e3 = *(uint16_t *)(va + H1_2(i)) ^ neg3;
- r = float16_muladd(e1, e2, e3, 0, status);
+ r = float16_muladd(e1, e2, e3, flags, status);
*(uint16_t *)(vd + H1_2(i)) = r;
}
} while (i & 63);
@@ -4756,32 +4841,53 @@ static void do_fmla_zpzzz_h(void *vd, void *vn, void *vm, void *va, void *vg,
}
void HELPER(sve_fmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
{
- do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0);
+ do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0, 0);
}
void HELPER(sve_fmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
{
- do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0);
+ do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0, 0);
}
void HELPER(sve_fnmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
{
- do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0x8000);
+ do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0x8000, 0);
}
void HELPER(sve_fnmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
+{
+ do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0x8000, 0);
+}
+
+void HELPER(sve_ah_fmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
+ void *vg, float_status *status, uint32_t desc)
+{
+ do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0,
+ float_muladd_negate_product);
+}
+
+void HELPER(sve_ah_fnmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
+ void *vg, float_status *status, uint32_t desc)
{
- do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0x8000);
+ do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0,
+ float_muladd_negate_product | float_muladd_negate_c);
+}
+
+void HELPER(sve_ah_fnmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
+ void *vg, float_status *status, uint32_t desc)
+{
+ do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0,
+ float_muladd_negate_c);
}
static void do_fmla_zpzzz_s(void *vd, void *vn, void *vm, void *va, void *vg,
float_status *status, uint32_t desc,
- uint32_t neg1, uint32_t neg3)
+ uint32_t neg1, uint32_t neg3, int flags)
{
intptr_t i = simd_oprsz(desc);
uint64_t *g = vg;
@@ -4796,7 +4902,7 @@ static void do_fmla_zpzzz_s(void *vd, void *vn, void *vm, void *va, void *vg,
e1 = *(uint32_t *)(vn + H1_4(i)) ^ neg1;
e2 = *(uint32_t *)(vm + H1_4(i));
e3 = *(uint32_t *)(va + H1_4(i)) ^ neg3;
- r = float32_muladd(e1, e2, e3, 0, status);
+ r = float32_muladd(e1, e2, e3, flags, status);
*(uint32_t *)(vd + H1_4(i)) = r;
}
} while (i & 63);
@@ -4804,32 +4910,53 @@ static void do_fmla_zpzzz_s(void *vd, void *vn, void *vm, void *va, void *vg,
}
void HELPER(sve_fmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
{
- do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0);
+ do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0, 0);
}
void HELPER(sve_fmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
{
- do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0);
+ do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0, 0);
}
void HELPER(sve_fnmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
{
- do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0x80000000);
+ do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0x80000000, 0);
}
void HELPER(sve_fnmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
+{
+ do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0x80000000, 0);
+}
+
+void HELPER(sve_ah_fmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
+ void *vg, float_status *status, uint32_t desc)
+{
+ do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0,
+ float_muladd_negate_product);
+}
+
+void HELPER(sve_ah_fnmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
+ void *vg, float_status *status, uint32_t desc)
+{
+ do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0,
+ float_muladd_negate_product | float_muladd_negate_c);
+}
+
+void HELPER(sve_ah_fnmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
+ void *vg, float_status *status, uint32_t desc)
{
- do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0x80000000);
+ do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0,
+ float_muladd_negate_c);
}
static void do_fmla_zpzzz_d(void *vd, void *vn, void *vm, void *va, void *vg,
float_status *status, uint32_t desc,
- uint64_t neg1, uint64_t neg3)
+ uint64_t neg1, uint64_t neg3, int flags)
{
intptr_t i = simd_oprsz(desc);
uint64_t *g = vg;
@@ -4844,7 +4971,7 @@ static void do_fmla_zpzzz_d(void *vd, void *vn, void *vm, void *va, void *vg,
e1 = *(uint64_t *)(vn + i) ^ neg1;
e2 = *(uint64_t *)(vm + i);
e3 = *(uint64_t *)(va + i) ^ neg3;
- r = float64_muladd(e1, e2, e3, 0, status);
+ r = float64_muladd(e1, e2, e3, flags, status);
*(uint64_t *)(vd + i) = r;
}
} while (i & 63);
@@ -4852,27 +4979,48 @@ static void do_fmla_zpzzz_d(void *vd, void *vn, void *vm, void *va, void *vg,
}
void HELPER(sve_fmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
{
- do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0);
+ do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0, 0);
}
void HELPER(sve_fmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
{
- do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, 0);
+ do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, 0, 0);
}
void HELPER(sve_fnmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
{
- do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, INT64_MIN);
+ do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, INT64_MIN, 0);
}
void HELPER(sve_fnmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
+{
+ do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, INT64_MIN, 0);
+}
+
+void HELPER(sve_ah_fmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
+ void *vg, float_status *status, uint32_t desc)
+{
+ do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0,
+ float_muladd_negate_product);
+}
+
+void HELPER(sve_ah_fnmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
+ void *vg, float_status *status, uint32_t desc)
+{
+ do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0,
+ float_muladd_negate_product | float_muladd_negate_c);
+}
+
+void HELPER(sve_ah_fnmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
+ void *vg, float_status *status, uint32_t desc)
{
- do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, INT64_MIN);
+ do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0,
+ float_muladd_negate_c);
}
/* Two operand floating-point comparison controlled by a predicate.
@@ -4882,7 +5030,7 @@ void HELPER(sve_fnmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
*/
#define DO_FPCMP_PPZZ(NAME, TYPE, H, OP) \
void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \
- void *status, uint32_t desc) \
+ float_status *status, uint32_t desc) \
{ \
intptr_t i = simd_oprsz(desc), j = (i - 1) >> 6; \
uint64_t *d = vd, *g = vg; \
@@ -4944,7 +5092,7 @@ DO_FPCMP_PPZZ_ALL(sve_facgt, DO_FACGT)
*/
#define DO_FPCMP_PPZ0(NAME, TYPE, H, OP) \
void HELPER(NAME)(void *vd, void *vn, void *vg, \
- void *status, uint32_t desc) \
+ float_status *status, uint32_t desc) \
{ \
intptr_t i = simd_oprsz(desc), j = (i - 1) >> 6; \
uint64_t *d = vd, *g = vg; \
@@ -4982,27 +5130,37 @@ DO_FPCMP_PPZ0_ALL(sve_fcmne0, DO_FCMNE)
/* FP Trig Multiply-Add. */
-void HELPER(sve_ftmad_h)(void *vd, void *vn, void *vm, void *vs, uint32_t desc)
+void HELPER(sve_ftmad_h)(void *vd, void *vn, void *vm,
+ float_status *s, uint32_t desc)
{
static const float16 coeff[16] = {
0x3c00, 0xb155, 0x2030, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x3c00, 0xb800, 0x293a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
};
intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float16);
- intptr_t x = simd_data(desc);
+ intptr_t x = extract32(desc, SIMD_DATA_SHIFT, 3);
+ bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 3, 1);
float16 *d = vd, *n = vn, *m = vm;
+
for (i = 0; i < opr_sz; i++) {
float16 mm = m[i];
intptr_t xx = x;
+ int flags = 0;
+
if (float16_is_neg(mm)) {
- mm = float16_abs(mm);
+ if (fpcr_ah) {
+ flags = float_muladd_negate_product;
+ } else {
+ mm = float16_abs(mm);
+ }
xx += 8;
}
- d[i] = float16_muladd(n[i], mm, coeff[xx], 0, vs);
+ d[i] = float16_muladd(n[i], mm, coeff[xx], flags, s);
}
}
-void HELPER(sve_ftmad_s)(void *vd, void *vn, void *vm, void *vs, uint32_t desc)
+void HELPER(sve_ftmad_s)(void *vd, void *vn, void *vm,
+ float_status *s, uint32_t desc)
{
static const float32 coeff[16] = {
0x3f800000, 0xbe2aaaab, 0x3c088886, 0xb95008b9,
@@ -5011,20 +5169,29 @@ void HELPER(sve_ftmad_s)(void *vd, void *vn, void *vm, void *vs, uint32_t desc)
0x37cd37cc, 0x00000000, 0x00000000, 0x00000000,
};
intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float32);
- intptr_t x = simd_data(desc);
+ intptr_t x = extract32(desc, SIMD_DATA_SHIFT, 3);
+ bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 3, 1);
float32 *d = vd, *n = vn, *m = vm;
+
for (i = 0; i < opr_sz; i++) {
float32 mm = m[i];
intptr_t xx = x;
+ int flags = 0;
+
if (float32_is_neg(mm)) {
- mm = float32_abs(mm);
+ if (fpcr_ah) {
+ flags = float_muladd_negate_product;
+ } else {
+ mm = float32_abs(mm);
+ }
xx += 8;
}
- d[i] = float32_muladd(n[i], mm, coeff[xx], 0, vs);
+ d[i] = float32_muladd(n[i], mm, coeff[xx], flags, s);
}
}
-void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm, void *vs, uint32_t desc)
+void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm,
+ float_status *s, uint32_t desc)
{
static const float64 coeff[16] = {
0x3ff0000000000000ull, 0xbfc5555555555543ull,
@@ -5037,16 +5204,24 @@ void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm, void *vs, uint32_t desc)
0x3e21ee96d2641b13ull, 0xbda8f76380fbb401ull,
};
intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float64);
- intptr_t x = simd_data(desc);
+ intptr_t x = extract32(desc, SIMD_DATA_SHIFT, 3);
+ bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 3, 1);
float64 *d = vd, *n = vn, *m = vm;
+
for (i = 0; i < opr_sz; i++) {
float64 mm = m[i];
intptr_t xx = x;
+ int flags = 0;
+
if (float64_is_neg(mm)) {
- mm = float64_abs(mm);
+ if (fpcr_ah) {
+ flags = float_muladd_negate_product;
+ } else {
+ mm = float64_abs(mm);
+ }
xx += 8;
}
- d[i] = float64_muladd(n[i], mm, coeff[xx], 0, vs);
+ d[i] = float64_muladd(n[i], mm, coeff[xx], flags, s);
}
}
@@ -5055,12 +5230,12 @@ void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm, void *vs, uint32_t desc)
*/
void HELPER(sve_fcadd_h)(void *vd, void *vn, void *vm, void *vg,
- void *vs, uint32_t desc)
+ float_status *s, uint32_t desc)
{
intptr_t j, i = simd_oprsz(desc);
uint64_t *g = vg;
- float16 neg_imag = float16_set_sign(0, simd_data(desc));
- float16 neg_real = float16_chs(neg_imag);
+ bool rot = extract32(desc, SIMD_DATA_SHIFT, 1);
+ bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
do {
uint64_t pg = g[(i - 1) >> 6];
@@ -5072,27 +5247,33 @@ void HELPER(sve_fcadd_h)(void *vd, void *vn, void *vm, void *vg,
i -= 2 * sizeof(float16);
e0 = *(float16 *)(vn + H1_2(i));
- e1 = *(float16 *)(vm + H1_2(j)) ^ neg_real;
+ e1 = *(float16 *)(vm + H1_2(j));
e2 = *(float16 *)(vn + H1_2(j));
- e3 = *(float16 *)(vm + H1_2(i)) ^ neg_imag;
+ e3 = *(float16 *)(vm + H1_2(i));
+
+ if (rot) {
+ e3 = float16_maybe_ah_chs(e3, fpcr_ah);
+ } else {
+ e1 = float16_maybe_ah_chs(e1, fpcr_ah);
+ }
if (likely((pg >> (i & 63)) & 1)) {
- *(float16 *)(vd + H1_2(i)) = float16_add(e0, e1, vs);
+ *(float16 *)(vd + H1_2(i)) = float16_add(e0, e1, s);
}
if (likely((pg >> (j & 63)) & 1)) {
- *(float16 *)(vd + H1_2(j)) = float16_add(e2, e3, vs);
+ *(float16 *)(vd + H1_2(j)) = float16_add(e2, e3, s);
}
} while (i & 63);
} while (i != 0);
}
void HELPER(sve_fcadd_s)(void *vd, void *vn, void *vm, void *vg,
- void *vs, uint32_t desc)
+ float_status *s, uint32_t desc)
{
intptr_t j, i = simd_oprsz(desc);
uint64_t *g = vg;
- float32 neg_imag = float32_set_sign(0, simd_data(desc));
- float32 neg_real = float32_chs(neg_imag);
+ bool rot = extract32(desc, SIMD_DATA_SHIFT, 1);
+ bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
do {
uint64_t pg = g[(i - 1) >> 6];
@@ -5104,27 +5285,33 @@ void HELPER(sve_fcadd_s)(void *vd, void *vn, void *vm, void *vg,
i -= 2 * sizeof(float32);
e0 = *(float32 *)(vn + H1_2(i));
- e1 = *(float32 *)(vm + H1_2(j)) ^ neg_real;
+ e1 = *(float32 *)(vm + H1_2(j));
e2 = *(float32 *)(vn + H1_2(j));
- e3 = *(float32 *)(vm + H1_2(i)) ^ neg_imag;
+ e3 = *(float32 *)(vm + H1_2(i));
+
+ if (rot) {
+ e3 = float32_maybe_ah_chs(e3, fpcr_ah);
+ } else {
+ e1 = float32_maybe_ah_chs(e1, fpcr_ah);
+ }
if (likely((pg >> (i & 63)) & 1)) {
- *(float32 *)(vd + H1_2(i)) = float32_add(e0, e1, vs);
+ *(float32 *)(vd + H1_2(i)) = float32_add(e0, e1, s);
}
if (likely((pg >> (j & 63)) & 1)) {
- *(float32 *)(vd + H1_2(j)) = float32_add(e2, e3, vs);
+ *(float32 *)(vd + H1_2(j)) = float32_add(e2, e3, s);
}
} while (i & 63);
} while (i != 0);
}
void HELPER(sve_fcadd_d)(void *vd, void *vn, void *vm, void *vg,
- void *vs, uint32_t desc)
+ float_status *s, uint32_t desc)
{
intptr_t j, i = simd_oprsz(desc);
uint64_t *g = vg;
- float64 neg_imag = float64_set_sign(0, simd_data(desc));
- float64 neg_real = float64_chs(neg_imag);
+ bool rot = extract32(desc, SIMD_DATA_SHIFT, 1);
+ bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
do {
uint64_t pg = g[(i - 1) >> 6];
@@ -5136,15 +5323,21 @@ void HELPER(sve_fcadd_d)(void *vd, void *vn, void *vm, void *vg,
i -= 2 * sizeof(float64);
e0 = *(float64 *)(vn + H1_2(i));
- e1 = *(float64 *)(vm + H1_2(j)) ^ neg_real;
+ e1 = *(float64 *)(vm + H1_2(j));
e2 = *(float64 *)(vn + H1_2(j));
- e3 = *(float64 *)(vm + H1_2(i)) ^ neg_imag;
+ e3 = *(float64 *)(vm + H1_2(i));
+
+ if (rot) {
+ e3 = float64_maybe_ah_chs(e3, fpcr_ah);
+ } else {
+ e1 = float64_maybe_ah_chs(e1, fpcr_ah);
+ }
if (likely((pg >> (i & 63)) & 1)) {
- *(float64 *)(vd + H1_2(i)) = float64_add(e0, e1, vs);
+ *(float64 *)(vd + H1_2(i)) = float64_add(e0, e1, s);
}
if (likely((pg >> (j & 63)) & 1)) {
- *(float64 *)(vd + H1_2(j)) = float64_add(e2, e3, vs);
+ *(float64 *)(vd + H1_2(j)) = float64_add(e2, e3, s);
}
} while (i & 63);
} while (i != 0);
@@ -5155,16 +5348,21 @@ void HELPER(sve_fcadd_d)(void *vd, void *vn, void *vm, void *vg,
*/
void HELPER(sve_fcmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
{
intptr_t j, i = simd_oprsz(desc);
- unsigned rot = simd_data(desc);
- bool flip = rot & 1;
- float16 neg_imag, neg_real;
+ bool flip = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t negf_real = flip ^ negf_imag;
+ float16 negx_imag, negx_real;
uint64_t *g = vg;
- neg_imag = float16_set_sign(0, (rot & 2) != 0);
- neg_real = float16_set_sign(0, rot == 1 || rot == 2);
+ /* With AH=0, use negx; with AH=1 use negf. */
+ negx_real = (negf_real & ~fpcr_ah) << 15;
+ negx_imag = (negf_imag & ~fpcr_ah) << 15;
+ negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0);
+ negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0);
do {
uint64_t pg = g[(i - 1) >> 6];
@@ -5181,18 +5379,18 @@ void HELPER(sve_fcmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
mi = *(float16 *)(vm + H1_2(j));
e2 = (flip ? ni : nr);
- e1 = (flip ? mi : mr) ^ neg_real;
+ e1 = (flip ? mi : mr) ^ negx_real;
e4 = e2;
- e3 = (flip ? mr : mi) ^ neg_imag;
+ e3 = (flip ? mr : mi) ^ negx_imag;
if (likely((pg >> (i & 63)) & 1)) {
d = *(float16 *)(va + H1_2(i));
- d = float16_muladd(e2, e1, d, 0, status);
+ d = float16_muladd(e2, e1, d, negf_real, status);
*(float16 *)(vd + H1_2(i)) = d;
}
if (likely((pg >> (j & 63)) & 1)) {
d = *(float16 *)(va + H1_2(j));
- d = float16_muladd(e4, e3, d, 0, status);
+ d = float16_muladd(e4, e3, d, negf_imag, status);
*(float16 *)(vd + H1_2(j)) = d;
}
} while (i & 63);
@@ -5200,16 +5398,21 @@ void HELPER(sve_fcmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
}
void HELPER(sve_fcmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
{
intptr_t j, i = simd_oprsz(desc);
- unsigned rot = simd_data(desc);
- bool flip = rot & 1;
- float32 neg_imag, neg_real;
+ bool flip = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t negf_real = flip ^ negf_imag;
+ float32 negx_imag, negx_real;
uint64_t *g = vg;
- neg_imag = float32_set_sign(0, (rot & 2) != 0);
- neg_real = float32_set_sign(0, rot == 1 || rot == 2);
+ /* With AH=0, use negx; with AH=1 use negf. */
+ negx_real = (negf_real & ~fpcr_ah) << 31;
+ negx_imag = (negf_imag & ~fpcr_ah) << 31;
+ negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0);
+ negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0);
do {
uint64_t pg = g[(i - 1) >> 6];
@@ -5226,18 +5429,18 @@ void HELPER(sve_fcmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
mi = *(float32 *)(vm + H1_2(j));
e2 = (flip ? ni : nr);
- e1 = (flip ? mi : mr) ^ neg_real;
+ e1 = (flip ? mi : mr) ^ negx_real;
e4 = e2;
- e3 = (flip ? mr : mi) ^ neg_imag;
+ e3 = (flip ? mr : mi) ^ negx_imag;
if (likely((pg >> (i & 63)) & 1)) {
d = *(float32 *)(va + H1_2(i));
- d = float32_muladd(e2, e1, d, 0, status);
+ d = float32_muladd(e2, e1, d, negf_real, status);
*(float32 *)(vd + H1_2(i)) = d;
}
if (likely((pg >> (j & 63)) & 1)) {
d = *(float32 *)(va + H1_2(j));
- d = float32_muladd(e4, e3, d, 0, status);
+ d = float32_muladd(e4, e3, d, negf_imag, status);
*(float32 *)(vd + H1_2(j)) = d;
}
} while (i & 63);
@@ -5245,16 +5448,21 @@ void HELPER(sve_fcmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
}
void HELPER(sve_fcmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
- void *vg, void *status, uint32_t desc)
+ void *vg, float_status *status, uint32_t desc)
{
intptr_t j, i = simd_oprsz(desc);
- unsigned rot = simd_data(desc);
- bool flip = rot & 1;
- float64 neg_imag, neg_real;
+ bool flip = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t negf_real = flip ^ negf_imag;
+ float64 negx_imag, negx_real;
uint64_t *g = vg;
- neg_imag = float64_set_sign(0, (rot & 2) != 0);
- neg_real = float64_set_sign(0, rot == 1 || rot == 2);
+ /* With AH=0, use negx; with AH=1 use negf. */
+ negx_real = (uint64_t)(negf_real & ~fpcr_ah) << 63;
+ negx_imag = (uint64_t)(negf_imag & ~fpcr_ah) << 63;
+ negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0);
+ negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0);
do {
uint64_t pg = g[(i - 1) >> 6];
@@ -5271,18 +5479,18 @@ void HELPER(sve_fcmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
mi = *(float64 *)(vm + H1_2(j));
e2 = (flip ? ni : nr);
- e1 = (flip ? mi : mr) ^ neg_real;
+ e1 = (flip ? mi : mr) ^ negx_real;
e4 = e2;
- e3 = (flip ? mr : mi) ^ neg_imag;
+ e3 = (flip ? mr : mi) ^ negx_imag;
if (likely((pg >> (i & 63)) & 1)) {
d = *(float64 *)(va + H1_2(i));
- d = float64_muladd(e2, e1, d, 0, status);
+ d = float64_muladd(e2, e1, d, negf_real, status);
*(float64 *)(vd + H1_2(i)) = d;
}
if (likely((pg >> (j & 63)) & 1)) {
d = *(float64 *)(va + H1_2(j));
- d = float64_muladd(e4, e3, d, 0, status);
+ d = float64_muladd(e4, e3, d, negf_imag, status);
*(float64 *)(vd + H1_2(j)) = d;
}
} while (i & 63);
@@ -5738,6 +5946,8 @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
reg_last = info.reg_off_last[0];
host = info.page[0].host;
+ set_helper_retaddr(retaddr);
+
while (reg_off <= reg_last) {
uint64_t pg = vg[reg_off >> 6];
do {
@@ -5752,6 +5962,8 @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
} while (reg_off <= reg_last && (reg_off & 63));
}
+ clear_helper_retaddr();
+
/*
* Use the slow path to manage the cross-page misalignment.
* But we know this is RAM and cannot trap.
@@ -5771,6 +5983,8 @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
reg_last = info.reg_off_last[1];
host = info.page[1].host;
+ set_helper_retaddr(retaddr);
+
do {
uint64_t pg = vg[reg_off >> 6];
do {
@@ -5784,6 +5998,8 @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
mem_off += N << msz;
} while (reg_off & 63);
} while (reg_off <= reg_last);
+
+ clear_helper_retaddr();
}
}
@@ -5934,15 +6150,11 @@ DO_LDN_2(4, dd, MO_64)
/*
* Load contiguous data, first-fault and no-fault.
*
- * For user-only, one could argue that we should hold the mmap_lock during
- * the operation so that there is no race between page_check_range and the
- * load operation. However, unmapping pages out from under a running thread
- * is extraordinarily unlikely. This theoretical race condition also affects
- * linux-user/ in its get_user/put_user macros.
- *
- * TODO: Construct some helpers, written in assembly, that interact with
- * host_signal_handler to produce memory ops which can properly report errors
- * without racing.
+ * For user-only, we control the race between page_check_range and
+ * another thread's munmap by using set/clear_helper_retaddr. Any
+ * SEGV that occurs between those markers is assumed to be because
+ * the guest page vanished. Keep that block as small as possible
+ * so that unrelated QEMU bugs are not blamed on the guest.
*/
/* Fault on byte I. All bits in FFR from I are cleared. The vector
@@ -6093,6 +6305,8 @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
reg_last = info.reg_off_last[0];
host = info.page[0].host;
+ set_helper_retaddr(retaddr);
+
do {
uint64_t pg = *(uint64_t *)(vg + (reg_off >> 3));
do {
@@ -6101,9 +6315,11 @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
(cpu_watchpoint_address_matches
(env_cpu(env), addr + mem_off, 1 << msz)
& BP_MEM_READ)) {
+ clear_helper_retaddr();
goto do_fault;
}
if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
+ clear_helper_retaddr();
goto do_fault;
}
host_fn(vd, reg_off, host + mem_off);
@@ -6113,6 +6329,8 @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
} while (reg_off <= reg_last && (reg_off & 63));
} while (reg_off <= reg_last);
+ clear_helper_retaddr();
+
/*
* MemSingleNF is allowed to fail for any reason. We have special
* code above to handle the first element crossing a page boundary.
@@ -6307,9 +6525,6 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
flags = info.page[0].flags | info.page[1].flags;
if (unlikely(flags != 0)) {
-#ifdef CONFIG_USER_ONLY
- g_assert_not_reached();
-#else
/*
* At least one page includes MMIO.
* Any bus operation can fail with cpu_transaction_failed,
@@ -6340,7 +6555,6 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
} while (reg_off & 63);
} while (reg_off <= reg_last);
return;
-#endif
}
mem_off = info.mem_off_first[0];
@@ -6348,6 +6562,8 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
reg_last = info.reg_off_last[0];
host = info.page[0].host;
+ set_helper_retaddr(retaddr);
+
while (reg_off <= reg_last) {
uint64_t pg = vg[reg_off >> 6];
do {
@@ -6362,6 +6578,8 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
} while (reg_off <= reg_last && (reg_off & 63));
}
+ clear_helper_retaddr();
+
/*
* Use the slow path to manage the cross-page misalignment.
* But we know this is RAM and cannot trap.
@@ -6381,6 +6599,8 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
reg_last = info.reg_off_last[1];
host = info.page[1].host;
+ set_helper_retaddr(retaddr);
+
do {
uint64_t pg = vg[reg_off >> 6];
do {
@@ -6394,6 +6614,8 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
mem_off += N << msz;
} while (reg_off & 63);
} while (reg_off <= reg_last);
+
+ clear_helper_retaddr();
}
}
@@ -6560,7 +6782,9 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
if (unlikely(info.flags & TLB_MMIO)) {
tlb_fn(env, &scratch, reg_off, addr, retaddr);
} else {
+ set_helper_retaddr(retaddr);
host_fn(&scratch, reg_off, info.host);
+ clear_helper_retaddr();
}
} else {
/* Element crosses the page boundary. */
@@ -6782,7 +7006,9 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
goto fault;
}
+ set_helper_retaddr(retaddr);
host_fn(vd, reg_off, info.host);
+ clear_helper_retaddr();
}
reg_off += esize;
} while (reg_off & 63);
@@ -6986,7 +7212,9 @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
do {
void *h = host[i];
if (likely(h != NULL)) {
+ set_helper_retaddr(retaddr);
host_fn(vd, reg_off, h);
+ clear_helper_retaddr();
} else if ((vg[reg_off >> 6] >> (reg_off & 63)) & 1) {
target_ulong addr = base + (off_fn(vm, reg_off) << scale);
tlb_fn(env, vd, reg_off, addr, retaddr);
@@ -7369,7 +7597,7 @@ void HELPER(sve2_xar_s)(void *vd, void *vn, void *vm, uint32_t desc)
}
void HELPER(fmmla_s)(void *vd, void *vn, void *vm, void *va,
- void *status, uint32_t desc)
+ float_status *status, uint32_t desc)
{
intptr_t s, opr_sz = simd_oprsz(desc) / (sizeof(float32) * 4);
@@ -7407,7 +7635,7 @@ void HELPER(fmmla_s)(void *vd, void *vn, void *vm, void *va,
}
void HELPER(fmmla_d)(void *vd, void *vn, void *vm, void *va,
- void *status, uint32_t desc)
+ float_status *status, uint32_t desc)
{
intptr_t s, opr_sz = simd_oprsz(desc) / (sizeof(float64) * 4);
@@ -7443,7 +7671,8 @@ void HELPER(fmmla_d)(void *vd, void *vn, void *vm, void *va,
}
#define DO_FCVTNT(NAME, TYPEW, TYPEN, HW, HN, OP) \
-void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, \
+ float_status *status, uint32_t desc) \
{ \
intptr_t i = simd_oprsz(desc); \
uint64_t *g = vg; \
@@ -7464,7 +7693,8 @@ DO_FCVTNT(sve2_fcvtnt_sh, uint32_t, uint16_t, H1_4, H1_2, sve_f32_to_f16)
DO_FCVTNT(sve2_fcvtnt_ds, uint64_t, uint32_t, H1_8, H1_4, float64_to_float32)
#define DO_FCVTLT(NAME, TYPEW, TYPEN, HW, HN, OP) \
-void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, \
+ float_status *status, uint32_t desc) \
{ \
intptr_t i = simd_oprsz(desc); \
uint64_t *g = vg; \
diff --git a/target/arm/tcg/sve_ldst_internal.h b/target/arm/tcg/sve_ldst_internal.h
index 4f159ec..f2243da 100644
--- a/target/arm/tcg/sve_ldst_internal.h
+++ b/target/arm/tcg/sve_ldst_internal.h
@@ -20,7 +20,7 @@
#ifndef TARGET_ARM_SVE_LDST_INTERNAL_H
#define TARGET_ARM_SVE_LDST_INTERNAL_H
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
/*
* Load one element into @vd + @reg_off from @host.
diff --git a/target/arm/tcg/tlb-insns.c b/target/arm/tcg/tlb-insns.c
new file mode 100644
index 0000000..95c26c6
--- /dev/null
+++ b/target/arm/tcg/tlb-insns.c
@@ -0,0 +1,1306 @@
+/*
+ * Helpers for TLBI insns
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "exec/cputlb.h"
+#include "exec/target_page.h"
+#include "cpu.h"
+#include "internals.h"
+#include "cpu-features.h"
+#include "cpregs.h"
+
+/* Check for traps from EL1 due to HCR_EL2.TTLB. */
+static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBIS. */
+static CPAccessResult access_ttlbis(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1 &&
+ (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBIS))) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBOS. */
+static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1 &&
+ (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBOS))) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* IS variants of TLB operations must affect all cores */
+static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_all_cpus_synced(cs);
+}
+
+static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_all_cpus_synced(cs);
+}
+
+static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
+}
+
+static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
+}
+
+/*
+ * Non-IS variants of TLB operations are upgraded to
+ * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
+ * force broadcast of these operations.
+ */
+static bool tlb_force_broadcast(CPUARMState *env)
+{
+ return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
+}
+
+static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate all (TLBIALL) */
+ CPUState *cs = env_cpu(env);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_all_cpus_synced(cs);
+ } else {
+ tlb_flush(cs);
+ }
+}
+
+static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
+ CPUState *cs = env_cpu(env);
+
+ value &= TARGET_PAGE_MASK;
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_page_all_cpus_synced(cs, value);
+ } else {
+ tlb_flush_page(cs, value);
+ }
+}
+
+static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by ASID (TLBIASID) */
+ CPUState *cs = env_cpu(env);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_all_cpus_synced(cs);
+ } else {
+ tlb_flush(cs);
+ }
+}
+
+static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
+ CPUState *cs = env_cpu(env);
+
+ value &= TARGET_PAGE_MASK;
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_page_all_cpus_synced(cs, value);
+ } else {
+ tlb_flush_page(cs, value);
+ }
+}
+
+static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
+}
+
+static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
+
+ tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ ARMMMUIdxBit_E2);
+}
+
+static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
+}
+
+static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
+
+ tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2);
+}
+
+static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
+}
+
+static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, alle1_tlbmask(env));
+}
+
+
+static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
+}
+
+static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
+}
+
+/*
+ * See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
+ * Page D4-1736 (DDI0487A.b)
+ */
+
+static int vae1_tlbmask(CPUARMState *env)
+{
+ uint64_t hcr = arm_hcr_el2_eff(env);
+ uint16_t mask;
+
+ assert(arm_feature(env, ARM_FEATURE_AARCH64));
+
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
+ mask = ARMMMUIdxBit_E20_2 |
+ ARMMMUIdxBit_E20_2_PAN |
+ ARMMMUIdxBit_E20_0;
+ } else {
+ /* This is AArch64 only, so we don't need to touch the EL30_x TLBs */
+ mask = ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_0;
+ }
+ return mask;
+}
+
+static int vae2_tlbmask(CPUARMState *env)
+{
+ uint64_t hcr = arm_hcr_el2_eff(env);
+ uint16_t mask;
+
+ if (hcr & HCR_E2H) {
+ mask = ARMMMUIdxBit_E20_2 |
+ ARMMMUIdxBit_E20_2_PAN |
+ ARMMMUIdxBit_E20_0;
+ } else {
+ mask = ARMMMUIdxBit_E2;
+ }
+ return mask;
+}
+
+/* Return 56 if TBI is enabled, 64 otherwise. */
+static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
+ uint64_t addr)
+{
+ uint64_t tcr = regime_tcr(env, mmu_idx);
+ int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
+ int select = extract64(addr, 55, 1);
+
+ return (tbi >> select) & 1 ? 56 : 64;
+}
+
+static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
+{
+ uint64_t hcr = arm_hcr_el2_eff(env);
+ ARMMMUIdx mmu_idx;
+
+ assert(arm_feature(env, ARM_FEATURE_AARCH64));
+
+ /* Only the regime of the mmu_idx below is significant. */
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
+ mmu_idx = ARMMMUIdx_E20_0;
+ } else {
+ mmu_idx = ARMMMUIdx_E10_0;
+ }
+
+ return tlbbits_for_regime(env, mmu_idx, addr);
+}
+
+static int vae2_tlbbits(CPUARMState *env, uint64_t addr)
+{
+ uint64_t hcr = arm_hcr_el2_eff(env);
+ ARMMMUIdx mmu_idx;
+
+ /*
+ * Only the regime of the mmu_idx below is significant.
+ * Regime EL2&0 has two ranges with separate TBI configuration, while EL2
+ * only has one.
+ */
+ if (hcr & HCR_E2H) {
+ mmu_idx = ARMMMUIdx_E20_2;
+ } else {
+ mmu_idx = ARMMMUIdx_E2;
+ }
+
+ return tlbbits_for_regime(env, mmu_idx, addr);
+}
+
+static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = vae1_tlbmask(env);
+
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
+}
+
+static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = vae1_tlbmask(env);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
+ } else {
+ tlb_flush_by_mmuidx(cs, mask);
+ }
+}
+
+static int e2_tlbmask(CPUARMState *env)
+{
+ return (ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_2 |
+ ARMMMUIdxBit_E20_2_PAN |
+ ARMMMUIdxBit_E2);
+}
+
+static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = alle1_tlbmask(env);
+
+ tlb_flush_by_mmuidx(cs, mask);
+}
+
+static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = e2_tlbmask(env);
+
+ tlb_flush_by_mmuidx(cs, mask);
+}
+
+static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ CPUState *cs = CPU(cpu);
+
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
+}
+
+static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = alle1_tlbmask(env);
+
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
+}
+
+static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = e2_tlbmask(env);
+
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
+}
+
+static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
+}
+
+static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA, EL2
+ * Currently handles both VAE2 and VALE2, since we don't support
+ * flush-last-level-only.
+ */
+ CPUState *cs = env_cpu(env);
+ int mask = vae2_tlbmask(env);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+ int bits = vae2_tlbbits(env, pageaddr);
+
+ tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
+}
+
+static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA, EL3
+ * Currently handles both VAE3 and VALE3, since we don't support
+ * flush-last-level-only.
+ */
+ ARMCPU *cpu = env_archcpu(env);
+ CPUState *cs = CPU(cpu);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
+}
+
+static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = vae1_tlbmask(env);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+ int bits = vae1_tlbbits(env, pageaddr);
+
+ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
+}
+
+static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA, EL1&0 (AArch64 version).
+ * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+ CPUState *cs = env_cpu(env);
+ int mask = vae1_tlbmask(env);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+ int bits = vae1_tlbbits(env, pageaddr);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
+ } else {
+ tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
+ }
+}
+
+static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = vae2_tlbmask(env);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+ int bits = vae2_tlbbits(env, pageaddr);
+
+ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
+}
+
+static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+ int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
+
+ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ ARMMMUIdxBit_E3, bits);
+}
+
+static int ipas2e1_tlbmask(CPUARMState *env, int64_t value)
+{
+ /*
+ * The MSB of value is the NS field, which only applies if SEL2
+ * is implemented and SCR_EL3.NS is not set (i.e. in secure mode).
+ */
+ return (value >= 0
+ && cpu_isar_feature(aa64_sel2, env_archcpu(env))
+ && arm_is_secure_below_el3(env)
+ ? ARMMMUIdxBit_Stage2_S
+ : ARMMMUIdxBit_Stage2);
+}
+
+static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = ipas2e1_tlbmask(env, value);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
+ } else {
+ tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
+ }
+}
+
+static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = ipas2e1_tlbmask(env, value);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
+}
+
+static const ARMCPRegInfo tlbi_not_v7_cp_reginfo[] = {
+ /*
+ * MMU TLB control. Note that the wildcarding means we cover not just
+ * the unified TLB ops but also the dside/iside/inner-shareable variants.
+ */
+ { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
+ .type = ARM_CP_NO_RAW },
+ { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
+ .type = ARM_CP_NO_RAW },
+ { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
+ .type = ARM_CP_NO_RAW },
+ { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
+ .type = ARM_CP_NO_RAW },
+};
+
+static const ARMCPRegInfo tlbi_v7_cp_reginfo[] = {
+ /* 32 bit ITLB invalidates */
+ { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbiall_write },
+ { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimva_write },
+ { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbiasid_write },
+ /* 32 bit DTLB invalidates */
+ { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbiall_write },
+ { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimva_write },
+ { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbiasid_write },
+ /* 32 bit TLB invalidates */
+ { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbiall_write },
+ { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimva_write },
+ { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbiasid_write },
+ { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimvaa_write },
+};
+
+static const ARMCPRegInfo tlbi_v7mp_cp_reginfo[] = {
+ /* 32 bit TLB invalidates, Inner Shareable */
+ { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
+ .writefn = tlbiall_is_write },
+ { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
+ .writefn = tlbimva_is_write },
+ { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
+ .writefn = tlbiasid_is_write },
+ { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
+ .writefn = tlbimvaa_is_write },
+};
+
+static const ARMCPRegInfo tlbi_v8_cp_reginfo[] = {
+ /* AArch32 TLB invalidate last level of translation table walk */
+ { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
+ .writefn = tlbimva_is_write },
+ { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
+ .writefn = tlbimvaa_is_write },
+ { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimva_write },
+ { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimvaa_write },
+ { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbimva_hyp_write },
+ { .name = "TLBIMVALHIS",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbimva_hyp_is_write },
+ { .name = "TLBIIPAS2",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiipas2_hyp_write },
+ { .name = "TLBIIPAS2IS",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiipas2is_hyp_write },
+ { .name = "TLBIIPAS2L",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiipas2_hyp_write },
+ { .name = "TLBIIPAS2LIS",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiipas2is_hyp_write },
+ /* AArch64 TLBI operations */
+ { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
+ .access = PL1_W, .accessfn = access_ttlbis,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIVMALLE1IS,
+ .writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
+ .access = PL1_W, .accessfn = access_ttlbis,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIVAE1IS,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
+ .access = PL1_W, .accessfn = access_ttlbis,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIASIDE1IS,
+ .writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
+ .access = PL1_W, .accessfn = access_ttlbis,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIVAAE1IS,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
+ .access = PL1_W, .accessfn = access_ttlbis,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIVALE1IS,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
+ .access = PL1_W, .accessfn = access_ttlbis,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIVAALE1IS,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
+ .access = PL1_W, .accessfn = access_ttlb,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIVMALLE1,
+ .writefn = tlbi_aa64_vmalle1_write },
+ { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
+ .access = PL1_W, .accessfn = access_ttlb,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIVAE1,
+ .writefn = tlbi_aa64_vae1_write },
+ { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
+ .access = PL1_W, .accessfn = access_ttlb,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIASIDE1,
+ .writefn = tlbi_aa64_vmalle1_write },
+ { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
+ .access = PL1_W, .accessfn = access_ttlb,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIVAAE1,
+ .writefn = tlbi_aa64_vae1_write },
+ { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
+ .access = PL1_W, .accessfn = access_ttlb,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIVALE1,
+ .writefn = tlbi_aa64_vae1_write },
+ { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
+ .access = PL1_W, .accessfn = access_ttlb,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIVAALE1,
+ .writefn = tlbi_aa64_vae1_write },
+ { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_ipas2e1is_write },
+ { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_ipas2e1is_write },
+ { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_ipas2e1_write },
+ { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_ipas2e1_write },
+ { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_alle1_write },
+ { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_alle1is_write },
+};
+
+static const ARMCPRegInfo tlbi_el2_cp_reginfo[] = {
+ { .name = "TLBIALLNSNH",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiall_nsnh_write },
+ { .name = "TLBIALLNSNHIS",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiall_nsnh_is_write },
+ { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiall_hyp_write },
+ { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiall_hyp_is_write },
+ { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbimva_hyp_write },
+ { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbimva_hyp_is_write },
+ { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_alle2_write },
+ { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_vae2_write },
+ { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_vae2_write },
+ { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_alle2is_write },
+ { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_vae2is_write },
+ { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_vae2is_write },
+};
+
+static const ARMCPRegInfo tlbi_el3_cp_reginfo[] = {
+ { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_alle3is_write },
+ { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_vae3is_write },
+ { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_vae3is_write },
+ { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_alle3_write },
+ { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_vae3_write },
+ { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_vae3_write },
+};
+
+typedef struct {
+ uint64_t base;
+ uint64_t length;
+} TLBIRange;
+
+static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg)
+{
+ /*
+ * Note that the TLBI range TG field encoding differs from both
+ * TG0 and TG1 encodings.
+ */
+ switch (tg) {
+ case 1:
+ return Gran4K;
+ case 2:
+ return Gran16K;
+ case 3:
+ return Gran64K;
+ default:
+ return GranInvalid;
+ }
+}
+
+static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
+ uint64_t value)
+{
+ unsigned int page_size_granule, page_shift, num, scale, exponent;
+ /* Extract one bit to represent the va selector in use. */
+ uint64_t select = sextract64(value, 36, 1);
+ ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true, false);
+ TLBIRange ret = { };
+ ARMGranuleSize gran;
+
+ page_size_granule = extract64(value, 46, 2);
+ gran = tlbi_range_tg_to_gran_size(page_size_granule);
+
+ /* The granule encoded in value must match the granule in use. */
+ if (gran != param.gran) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n",
+ page_size_granule);
+ return ret;
+ }
+
+ page_shift = arm_granule_bits(gran);
+ num = extract64(value, 39, 5);
+ scale = extract64(value, 44, 2);
+ exponent = (5 * scale) + 1;
+
+ ret.length = (num + 1) << (exponent + page_shift);
+
+ if (param.select) {
+ ret.base = sextract64(value, 0, 37);
+ } else {
+ ret.base = extract64(value, 0, 37);
+ }
+ if (param.ds) {
+ /*
+ * With DS=1, BaseADDR is always shifted 16 so that it is able
+ * to address all 52 va bits. The input address is perforce
+ * aligned on a 64k boundary regardless of translation granule.
+ */
+ page_shift = 16;
+ }
+ ret.base <<= page_shift;
+
+ return ret;
+}
+
+static void do_rvae_write(CPUARMState *env, uint64_t value,
+ int idxmap, bool synced)
+{
+ ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
+ TLBIRange range;
+ int bits;
+
+ range = tlbi_aa64_get_range(env, one_idx, value);
+ bits = tlbbits_for_regime(env, one_idx, range.base);
+
+ if (synced) {
+ tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
+ range.base,
+ range.length,
+ idxmap,
+ bits);
+ } else {
+ tlb_flush_range_by_mmuidx(env_cpu(env), range.base,
+ range.length, idxmap, bits);
+ }
+}
+
+static void tlbi_aa64_rvae1_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL1&0.
+ * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+
+ do_rvae_write(env, value, vae1_tlbmask(env),
+ tlb_force_broadcast(env));
+}
+
+static void tlbi_aa64_rvae1is_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, Inner/Outer Shareable EL1&0.
+ * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
+ * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
+ * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
+ * shareable specific flushes.
+ */
+
+ do_rvae_write(env, value, vae1_tlbmask(env), true);
+}
+
+static void tlbi_aa64_rvae2_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL2.
+ * Currently handles all of RVAE2 and RVALE2,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+
+ do_rvae_write(env, value, vae2_tlbmask(env),
+ tlb_force_broadcast(env));
+
+
+}
+
+static void tlbi_aa64_rvae2is_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, Inner/Outer Shareable, EL2.
+ * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
+ * since we don't support flush-for-specific-ASID-only,
+ * flush-last-level-only or inner/outer shareable specific flushes.
+ */
+
+ do_rvae_write(env, value, vae2_tlbmask(env), true);
+
+}
+
+static void tlbi_aa64_rvae3_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL3.
+ * Currently handles all of RVAE3 and RVALE3,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+
+ do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
+}
+
+static void tlbi_aa64_rvae3is_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL3, Inner/Outer Shareable.
+ * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
+ * since we don't support flush-for-specific-ASID-only,
+ * flush-last-level-only or inner/outer specific flushes.
+ */
+
+ do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
+}
+
+static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ do_rvae_write(env, value, ipas2e1_tlbmask(env, value),
+ tlb_force_broadcast(env));
+}
+
+static void tlbi_aa64_ripas2e1is_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ do_rvae_write(env, value, ipas2e1_tlbmask(env, value), true);
+}
+
+static const ARMCPRegInfo tlbirange_reginfo[] = {
+ { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL1_W, .accessfn = access_ttlbis,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIRVAE1IS,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
+ .access = PL1_W, .accessfn = access_ttlbis,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIRVAAE1IS,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL1_W, .accessfn = access_ttlbis,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIRVALE1IS,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
+ .access = PL1_W, .accessfn = access_ttlbis,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIRVAALE1IS,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
+ .access = PL1_W, .accessfn = access_ttlbos,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIRVAE1OS,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
+ .access = PL1_W, .accessfn = access_ttlbos,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIRVAAE1OS,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
+ .access = PL1_W, .accessfn = access_ttlbos,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIRVALE1OS,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
+ .access = PL1_W, .accessfn = access_ttlbos,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIRVAALE1OS,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL1_W, .accessfn = access_ttlb,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIRVAE1,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
+ .access = PL1_W, .accessfn = access_ttlb,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIRVAAE1,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL1_W, .accessfn = access_ttlb,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIRVALE1,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
+ .access = PL1_W, .accessfn = access_ttlb,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIRVAALE1,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_ripas2e1is_write },
+ { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_ripas2e1is_write },
+ { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_ripas2e1_write },
+ { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_ripas2e1_write },
+ { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_rvae2_write },
+ { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_rvae2_write },
+ { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_rvae3_write },
+ { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_rvae3_write },
+};
+
+static const ARMCPRegInfo tlbios_reginfo[] = {
+ { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
+ .access = PL1_W, .accessfn = access_ttlbos,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIVMALLE1OS,
+ .writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1,
+ .fgt = FGT_TLBIVAE1OS,
+ .access = PL1_W, .accessfn = access_ttlbos,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
+ .access = PL1_W, .accessfn = access_ttlbos,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIASIDE1OS,
+ .writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3,
+ .access = PL1_W, .accessfn = access_ttlbos,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIVAAE1OS,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5,
+ .access = PL1_W, .accessfn = access_ttlbos,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIVALE1OS,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7,
+ .access = PL1_W, .accessfn = access_ttlbos,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .fgt = FGT_TLBIVAALE1OS,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_alle2is_write },
+ { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_vae2is_write },
+ { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
+ .access = PL2_W,
+ .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = tlbi_aa64_vae2is_write },
+ { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
+ .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_ADD_TLBI_NXS },
+ { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
+ .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_ADD_TLBI_NXS },
+ { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_ADD_TLBI_NXS },
+ { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
+ .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_ADD_TLBI_NXS },
+ { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_alle3is_write },
+ { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_vae3is_write },
+ { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS,
+ .writefn = tlbi_aa64_vae3is_write },
+};
+
+static void tlbi_aa64_paall_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush(cs);
+}
+
+static void tlbi_aa64_paallos_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_all_cpus_synced(cs);
+}
+
+static const ARMCPRegInfo tlbi_rme_reginfo[] = {
+ { .name = "TLBI_PAALL", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 4,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_paall_write },
+ { .name = "TLBI_PAALLOS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 4,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_paallos_write },
+ /*
+ * QEMU does not have a way to invalidate by physical address, thus
+ * invalidating a range of physical addresses is accomplished by
+ * flushing all tlb entries in the outer shareable domain,
+ * just like PAALLOS.
+ */
+ { .name = "TLBI_RPALOS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 7,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_paallos_write },
+ { .name = "TLBI_RPAOS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 3,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_paallos_write },
+};
+
+void define_tlb_insn_regs(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+
+ if (!arm_feature(env, ARM_FEATURE_V7)) {
+ define_arm_cp_regs(cpu, tlbi_not_v7_cp_reginfo);
+ } else {
+ define_arm_cp_regs(cpu, tlbi_v7_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_V7MP) &&
+ !arm_feature(env, ARM_FEATURE_PMSA)) {
+ define_arm_cp_regs(cpu, tlbi_v7mp_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ define_arm_cp_regs(cpu, tlbi_v8_cp_reginfo);
+ }
+ /*
+ * We retain the existing logic for when to register these TLBI
+ * ops (i.e. matching the condition for el2_cp_reginfo[] in
+ * helper.c), but we will be able to simplify this later.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ define_arm_cp_regs(cpu, tlbi_el2_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ define_arm_cp_regs(cpu, tlbi_el3_cp_reginfo);
+ }
+ if (cpu_isar_feature(aa64_tlbirange, cpu)) {
+ define_arm_cp_regs(cpu, tlbirange_reginfo);
+ }
+ if (cpu_isar_feature(aa64_tlbios, cpu)) {
+ define_arm_cp_regs(cpu, tlbios_reginfo);
+ }
+ if (cpu_isar_feature(aa64_rme, cpu)) {
+ define_arm_cp_regs(cpu, tlbi_rme_reginfo);
+ }
+}
diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c
index 885bf4e..23c72a9 100644
--- a/target/arm/tcg/tlb_helper.c
+++ b/target/arm/tcg/tlb_helper.c
@@ -9,9 +9,9 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/exec-all.h"
-#include "exec/helper-proto.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
/*
* Returns true if the stage 1 translation regime is using LPAE format page
@@ -277,7 +277,7 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
}
-void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc)
+void helper_exception_pc_alignment(CPUARMState *env, vaddr pc)
{
ARMMMUFaultInfo fi = { .type = ARMFault_Alignment };
int target_el = exception_target_el(env);
@@ -318,14 +318,13 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
-bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
+bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr address,
+ MMUAccessType access_type, int mmu_idx,
+ MemOp memop, int size, bool probe, uintptr_t ra)
{
ARMCPU *cpu = ARM_CPU(cs);
GetPhysAddrResult res = {};
ARMMMUFaultInfo local_fi, *fi;
- int ret;
/*
* Allow S1_ptw_translate to see any fault generated here.
@@ -339,37 +338,27 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
}
/*
- * Walk the page table and (if the mapping exists) add the page
- * to the TLB. On success, return true. Otherwise, if probing,
- * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
- * register format, and signal the fault.
+ * Per R_XCHFJ, alignment fault not due to memory type has
+ * highest precedence. Otherwise, walk the page table and
+ * and collect the page description.
*/
- ret = get_phys_addr(&cpu->env, address, access_type,
- core_to_arm_mmu_idx(&cpu->env, mmu_idx),
- &res, fi);
- if (likely(!ret)) {
- /*
- * Map a single [sub]page. Regions smaller than our declared
- * target page size are handled specially, so for those we
- * pass in the exact addresses.
- */
- if (res.f.lg_page_size >= TARGET_PAGE_BITS) {
- res.f.phys_addr &= TARGET_PAGE_MASK;
- address &= TARGET_PAGE_MASK;
- }
-
+ if (address & ((1 << memop_alignment_bits(memop)) - 1)) {
+ fi->type = ARMFault_Alignment;
+ } else if (!get_phys_addr(&cpu->env, address, access_type, memop,
+ core_to_arm_mmu_idx(&cpu->env, mmu_idx),
+ &res, fi)) {
res.f.extra.arm.pte_attrs = res.cacheattrs.attrs;
res.f.extra.arm.shareability = res.cacheattrs.shareability;
-
- tlb_set_page_full(cs, mmu_idx, address, &res.f);
+ *out = res.f;
return true;
- } else if (probe) {
+ }
+ if (probe) {
return false;
- } else {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
}
+
+ /* Now we have a real cpu fault. */
+ cpu_restore_state(cs, ra);
+ arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
}
#else
void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 148be28..ac80f57 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -17,8 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
-
-#include "exec/exec-all.h"
+#include "exec/target_page.h"
#include "translate.h"
#include "translate-a64.h"
#include "qemu/log.h"
@@ -75,17 +74,6 @@ static int scale_by_log2_tag_granule(DisasContext *s, int x)
#include "decode-sme-fa64.c.inc"
#include "decode-a64.c.inc"
-/* Table based decoder typedefs - used when the relevant bits for decode
- * are too awkwardly scattered across the instruction (eg SIMD).
- */
-typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
-
-typedef struct AArch64DecodeTable {
- uint32_t pattern;
- uint32_t mask;
- AArch64DecodeFn *disas_fn;
-} AArch64DecodeTable;
-
/* initialize TCG globals. */
void a64_translate_init(void)
{
@@ -294,7 +282,7 @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
- desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(memop));
+ desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(memop));
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1);
ret = tcg_temp_new_i64();
@@ -326,7 +314,7 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
- desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(single_mop));
+ desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(single_mop));
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
ret = tcg_temp_new_i64();
@@ -628,7 +616,16 @@ static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
return v;
}
-/* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
+static void clear_vec(DisasContext *s, int rd)
+{
+ unsigned ofs = fp_reg_offset(s, rd, MO_64);
+ unsigned vsz = vec_full_reg_size(s);
+
+ tcg_gen_gvec_dup_imm(MO_64, ofs, vsz, vsz, 0);
+}
+
+/*
+ * Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
* If SVE is not enabled, then there are only 128 bits in the vector.
*/
static void clear_vec_high(DisasContext *s, bool is_q, int rd)
@@ -656,6 +653,68 @@ static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
write_fp_dreg(s, reg, tmp);
}
+/*
+ * Write a double result to 128 bit vector register reg, honouring FPCR.NEP:
+ * - if FPCR.NEP == 0, clear the high elements of reg
+ * - if FPCR.NEP == 1, set the high elements of reg from mergereg
+ * (i.e. merge the result with those high elements)
+ * In either case, SVE register bits above 128 are zeroed (per R_WKYLB).
+ */
+static void write_fp_dreg_merging(DisasContext *s, int reg, int mergereg,
+ TCGv_i64 v)
+{
+ if (!s->fpcr_nep) {
+ write_fp_dreg(s, reg, v);
+ return;
+ }
+
+ /*
+ * Move from mergereg to reg; this sets the high elements and
+ * clears the bits above 128 as a side effect.
+ */
+ tcg_gen_gvec_mov(MO_64, vec_full_reg_offset(s, reg),
+ vec_full_reg_offset(s, mergereg),
+ 16, vec_full_reg_size(s));
+ tcg_gen_st_i64(v, tcg_env, vec_full_reg_offset(s, reg));
+}
+
+/*
+ * Write a single-prec result, but only clear the higher elements
+ * of the destination register if FPCR.NEP is 0; otherwise preserve them.
+ */
+static void write_fp_sreg_merging(DisasContext *s, int reg, int mergereg,
+ TCGv_i32 v)
+{
+ if (!s->fpcr_nep) {
+ write_fp_sreg(s, reg, v);
+ return;
+ }
+
+ tcg_gen_gvec_mov(MO_64, vec_full_reg_offset(s, reg),
+ vec_full_reg_offset(s, mergereg),
+ 16, vec_full_reg_size(s));
+ tcg_gen_st_i32(v, tcg_env, fp_reg_offset(s, reg, MO_32));
+}
+
+/*
+ * Write a half-prec result, but only clear the higher elements
+ * of the destination register if FPCR.NEP is 0; otherwise preserve them.
+ * The caller must ensure that the top 16 bits of v are zero.
+ */
+static void write_fp_hreg_merging(DisasContext *s, int reg, int mergereg,
+ TCGv_i32 v)
+{
+ if (!s->fpcr_nep) {
+ write_fp_sreg(s, reg, v);
+ return;
+ }
+
+ tcg_gen_gvec_mov(MO_64, vec_full_reg_offset(s, reg),
+ vec_full_reg_offset(s, mergereg),
+ 16, vec_full_reg_size(s));
+ tcg_gen_st16_i32(v, tcg_env, fp_reg_offset(s, reg, MO_16));
+}
+
/* Expand a 2-operand AdvSIMD vector operation using an expander function. */
static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
GVecGen2Fn *gvec_fn, int vece)
@@ -714,10 +773,10 @@ static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
* an out-of-line helper.
*/
static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
- int rm, bool is_fp16, int data,
+ int rm, ARMFPStatusFlavour fpsttype, int data,
gen_helper_gvec_3_ptr *fn)
{
- TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
+ TCGv_ptr fpst = fpstatus_ptr(fpsttype);
tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
vec_full_reg_offset(s, rn),
vec_full_reg_offset(s, rm), fpst,
@@ -736,14 +795,31 @@ static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
}
/*
+ * Expand a 4-operand operation using an out-of-line helper that takes
+ * a pointer to the CPU env.
+ */
+static void gen_gvec_op4_env(DisasContext *s, bool is_q, int rd, int rn,
+ int rm, int ra, int data,
+ gen_helper_gvec_4_ptr *fn)
+{
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, ra),
+ tcg_env,
+ is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
+}
+
+/*
* Expand a 4-operand + fpstatus pointer + simd data value operation using
* an out-of-line helper.
*/
static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
- int rm, int ra, bool is_fp16, int data,
+ int rm, int ra, ARMFPStatusFlavour fpsttype,
+ int data,
gen_helper_gvec_4_ptr *fn)
{
- TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
+ TCGv_ptr fpst = fpstatus_ptr(fpsttype);
tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
vec_full_reg_offset(s, rn),
vec_full_reg_offset(s, rm),
@@ -751,6 +827,111 @@ static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
}
+/*
+ * When FPCR.AH == 1, NEG and ABS do not flip the sign bit of a NaN.
+ * These functions implement
+ * d = floatN_is_any_nan(s) ? s : floatN_chs(s)
+ * which for float32 is
+ * d = (s & ~(1 << 31)) > 0x7f800000UL) ? s : (s ^ (1 << 31))
+ * and similarly for the other float sizes.
+ */
+static void gen_vfp_ah_negh(TCGv_i32 d, TCGv_i32 s)
+{
+ TCGv_i32 abs_s = tcg_temp_new_i32(), chs_s = tcg_temp_new_i32();
+
+ gen_vfp_negh(chs_s, s);
+ gen_vfp_absh(abs_s, s);
+ tcg_gen_movcond_i32(TCG_COND_GTU, d,
+ abs_s, tcg_constant_i32(0x7c00),
+ s, chs_s);
+}
+
+static void gen_vfp_ah_negs(TCGv_i32 d, TCGv_i32 s)
+{
+ TCGv_i32 abs_s = tcg_temp_new_i32(), chs_s = tcg_temp_new_i32();
+
+ gen_vfp_negs(chs_s, s);
+ gen_vfp_abss(abs_s, s);
+ tcg_gen_movcond_i32(TCG_COND_GTU, d,
+ abs_s, tcg_constant_i32(0x7f800000UL),
+ s, chs_s);
+}
+
+static void gen_vfp_ah_negd(TCGv_i64 d, TCGv_i64 s)
+{
+ TCGv_i64 abs_s = tcg_temp_new_i64(), chs_s = tcg_temp_new_i64();
+
+ gen_vfp_negd(chs_s, s);
+ gen_vfp_absd(abs_s, s);
+ tcg_gen_movcond_i64(TCG_COND_GTU, d,
+ abs_s, tcg_constant_i64(0x7ff0000000000000ULL),
+ s, chs_s);
+}
+
+/*
+ * These functions implement
+ * d = floatN_is_any_nan(s) ? s : floatN_abs(s)
+ * which for float32 is
+ * d = (s & ~(1 << 31)) > 0x7f800000UL) ? s : (s & ~(1 << 31))
+ * and similarly for the other float sizes.
+ */
+static void gen_vfp_ah_absh(TCGv_i32 d, TCGv_i32 s)
+{
+ TCGv_i32 abs_s = tcg_temp_new_i32();
+
+ gen_vfp_absh(abs_s, s);
+ tcg_gen_movcond_i32(TCG_COND_GTU, d,
+ abs_s, tcg_constant_i32(0x7c00),
+ s, abs_s);
+}
+
+static void gen_vfp_ah_abss(TCGv_i32 d, TCGv_i32 s)
+{
+ TCGv_i32 abs_s = tcg_temp_new_i32();
+
+ gen_vfp_abss(abs_s, s);
+ tcg_gen_movcond_i32(TCG_COND_GTU, d,
+ abs_s, tcg_constant_i32(0x7f800000UL),
+ s, abs_s);
+}
+
+static void gen_vfp_ah_absd(TCGv_i64 d, TCGv_i64 s)
+{
+ TCGv_i64 abs_s = tcg_temp_new_i64();
+
+ gen_vfp_absd(abs_s, s);
+ tcg_gen_movcond_i64(TCG_COND_GTU, d,
+ abs_s, tcg_constant_i64(0x7ff0000000000000ULL),
+ s, abs_s);
+}
+
+static void gen_vfp_maybe_ah_negh(DisasContext *dc, TCGv_i32 d, TCGv_i32 s)
+{
+ if (dc->fpcr_ah) {
+ gen_vfp_ah_negh(d, s);
+ } else {
+ gen_vfp_negh(d, s);
+ }
+}
+
+static void gen_vfp_maybe_ah_negs(DisasContext *dc, TCGv_i32 d, TCGv_i32 s)
+{
+ if (dc->fpcr_ah) {
+ gen_vfp_ah_negs(d, s);
+ } else {
+ gen_vfp_negs(d, s);
+ }
+}
+
+static void gen_vfp_maybe_ah_negd(DisasContext *dc, TCGv_i64 d, TCGv_i64 s)
+{
+ if (dc->fpcr_ah) {
+ gen_vfp_ah_negd(d, s);
+ } else {
+ gen_vfp_negd(d, s);
+ }
+}
+
/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
* than the 32 bit equivalent.
*/
@@ -894,11 +1075,9 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
TCGv_i64 cf_64 = tcg_temp_new_i64();
TCGv_i64 vf_64 = tcg_temp_new_i64();
TCGv_i64 tmp = tcg_temp_new_i64();
- TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_extu_i32_i64(cf_64, cpu_CF);
- tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero);
- tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero);
+ tcg_gen_addcio_i64(result, cf_64, t0, t1, cf_64);
tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
gen_set_NZ64(result);
@@ -912,12 +1091,10 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
TCGv_i32 t0_32 = tcg_temp_new_i32();
TCGv_i32 t1_32 = tcg_temp_new_i32();
TCGv_i32 tmp = tcg_temp_new_i32();
- TCGv_i32 zero = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(t0_32, t0);
tcg_gen_extrl_i64_i32(t1_32, t1);
- tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero);
- tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero);
+ tcg_gen_addcio_i32(cpu_NF, cpu_CF, t0_32, t1_32, cpu_CF);
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
@@ -1199,14 +1376,14 @@ static bool fp_access_check_only(DisasContext *s)
{
if (s->fp_excp_el) {
assert(!s->fp_access_checked);
- s->fp_access_checked = true;
+ s->fp_access_checked = -1;
gen_exception_insn_el(s, 0, EXCP_UDEF,
syn_fp_access_trap(1, 0xe, false, 0),
s->fp_excp_el);
return false;
}
- s->fp_access_checked = true;
+ s->fp_access_checked = 1;
return true;
}
@@ -1224,6 +1401,49 @@ static bool fp_access_check(DisasContext *s)
}
/*
+ * Return <0 for non-supported element sizes, with MO_16 controlled by
+ * FEAT_FP16; return 0 for fp disabled; otherwise return >0 for success.
+ */
+static int fp_access_check_scalar_hsd(DisasContext *s, MemOp esz)
+{
+ switch (esz) {
+ case MO_64:
+ case MO_32:
+ break;
+ case MO_16:
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ return -1;
+ }
+ break;
+ default:
+ return -1;
+ }
+ return fp_access_check(s);
+}
+
+/* Likewise, but vector MO_64 must have two elements. */
+static int fp_access_check_vector_hsd(DisasContext *s, bool is_q, MemOp esz)
+{
+ switch (esz) {
+ case MO_64:
+ if (!is_q) {
+ return -1;
+ }
+ break;
+ case MO_32:
+ break;
+ case MO_16:
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ return -1;
+ }
+ break;
+ default:
+ return -1;
+ }
+ return fp_access_check(s);
+}
+
+/*
* Check that SVE access is enabled. If it is, return true.
* If not, emit code to generate an appropriate exception and return false.
* This function corresponds to CheckSVEEnabled().
@@ -1231,23 +1451,23 @@ static bool fp_access_check(DisasContext *s)
bool sve_access_check(DisasContext *s)
{
if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) {
+ bool ret;
+
assert(dc_isar_feature(aa64_sme, s));
- if (!sme_sm_enabled_check(s)) {
- goto fail_exit;
- }
- } else if (s->sve_excp_el) {
+ ret = sme_sm_enabled_check(s);
+ s->sve_access_checked = (ret ? 1 : -1);
+ return ret;
+ }
+ if (s->sve_excp_el) {
+ /* Assert that we only raise one exception per instruction. */
+ assert(!s->sve_access_checked);
gen_exception_insn_el(s, 0, EXCP_UDEF,
syn_sve_access_trap(), s->sve_excp_el);
- goto fail_exit;
+ s->sve_access_checked = -1;
+ return false;
}
- s->sve_access_checked = true;
+ s->sve_access_checked = 1;
return fp_access_check(s);
-
- fail_exit:
- /* Assert that we only raise one exception per instruction. */
- assert(!s->sve_access_checked);
- s->sve_access_checked = true;
- return false;
}
/*
@@ -1275,8 +1495,9 @@ bool sme_enabled_check(DisasContext *s)
* sme_excp_el by itself for cpregs access checks.
*/
if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) {
- s->fp_access_checked = true;
- return sme_access_check(s);
+ bool ret = sme_access_check(s);
+ s->fp_access_checked = (ret ? 1 : -1);
+ return ret;
}
return fp_access_check_only(s);
}
@@ -1398,31 +1619,6 @@ static inline void gen_check_sp_alignment(DisasContext *s)
}
/*
- * This provides a simple table based table lookup decoder. It is
- * intended to be used when the relevant bits for decode are too
- * awkwardly placed and switch/if based logic would be confusing and
- * deeply nested. Since it's a linear search through the table, tables
- * should be kept small.
- *
- * It returns the first handler where insn & mask == pattern, or
- * NULL if there is no match.
- * The table is terminated by an empty mask (i.e. 0)
- */
-static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
- uint32_t insn)
-{
- const AArch64DecodeTable *tptr = table;
-
- while (tptr->mask) {
- if ((insn & tptr->mask) == tptr->pattern) {
- return tptr->disas_fn;
- }
- tptr++;
- }
- return NULL;
-}
-
-/*
* The instruction disassembly implemented here matches
* the instruction encoding classifications in chapter C4
* of the ARM Architecture Reference Manual (DDI0487B_a);
@@ -1507,7 +1703,14 @@ static void set_btype_for_br(DisasContext *s, int rn)
{
if (dc_isar_feature(aa64_bti, s)) {
/* BR to {x16,x17} or !guard -> 1, else 3. */
- set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
+ if (rn == 16 || rn == 17) {
+ set_btype(s, 1);
+ } else {
+ TCGv_i64 pc = tcg_temp_new_i64();
+ gen_pc_plus_diff(s, pc, 0);
+ gen_helper_guarded_page_br(tcg_env, pc);
+ s->btype = -1;
+ }
}
}
@@ -1521,8 +1724,8 @@ static void set_btype_for_blr(DisasContext *s)
static bool trans_BR(DisasContext *s, arg_r *a)
{
- gen_a64_set_pc(s, cpu_reg(s, a->rn));
set_btype_for_br(s, a->rn);
+ gen_a64_set_pc(s, cpu_reg(s, a->rn));
s->base.is_jmp = DISAS_JUMP;
return true;
}
@@ -1581,8 +1784,8 @@ static bool trans_BRAZ(DisasContext *s, arg_braz *a)
}
dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m);
- gen_a64_set_pc(s, dst);
set_btype_for_br(s, a->rn);
+ gen_a64_set_pc(s, dst);
s->base.is_jmp = DISAS_JUMP;
return true;
}
@@ -1936,6 +2139,15 @@ static bool trans_DSB_DMB(DisasContext *s, arg_DSB_DMB *a)
return true;
}
+static bool trans_DSB_nXS(DisasContext *s, arg_DSB_nXS *a)
+{
+ if (!dc_isar_feature(aa64_xs, s)) {
+ return false;
+ }
+ tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
+ return true;
+}
+
static bool trans_ISB(DisasContext *s, arg_ISB *a)
{
/*
@@ -4657,6 +4869,88 @@ static bool trans_EXTR(DisasContext *s, arg_extract *a)
return true;
}
+static bool trans_TBL_TBX(DisasContext *s, arg_TBL_TBX *a)
+{
+ if (fp_access_check(s)) {
+ int len = (a->len + 1) * 16;
+
+ tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rm), tcg_env,
+ a->q ? 16 : 8, vec_full_reg_size(s),
+ (len << 6) | (a->tbx << 5) | a->rn,
+ gen_helper_simd_tblx);
+ }
+ return true;
+}
+
+typedef int simd_permute_idx_fn(int i, int part, int elements);
+
+static bool do_simd_permute(DisasContext *s, arg_qrrr_e *a,
+ simd_permute_idx_fn *fn, int part)
+{
+ MemOp esz = a->esz;
+ int datasize = a->q ? 16 : 8;
+ int elements = datasize >> esz;
+ TCGv_i64 tcg_res[2], tcg_ele;
+
+ if (esz == MO_64 && !a->q) {
+ return false;
+ }
+ if (!fp_access_check(s)) {
+ return true;
+ }
+
+ tcg_res[0] = tcg_temp_new_i64();
+ tcg_res[1] = a->q ? tcg_temp_new_i64() : NULL;
+ tcg_ele = tcg_temp_new_i64();
+
+ for (int i = 0; i < elements; i++) {
+ int o, w, idx;
+
+ idx = fn(i, part, elements);
+ read_vec_element(s, tcg_ele, (idx & elements ? a->rm : a->rn),
+ idx & (elements - 1), esz);
+
+ w = (i << (esz + 3)) / 64;
+ o = (i << (esz + 3)) % 64;
+ if (o == 0) {
+ tcg_gen_mov_i64(tcg_res[w], tcg_ele);
+ } else {
+ tcg_gen_deposit_i64(tcg_res[w], tcg_res[w], tcg_ele, o, 8 << esz);
+ }
+ }
+
+ for (int i = a->q; i >= 0; --i) {
+ write_vec_element(s, tcg_res[i], a->rd, i, MO_64);
+ }
+ clear_vec_high(s, a->q, a->rd);
+ return true;
+}
+
+static int permute_load_uzp(int i, int part, int elements)
+{
+ return 2 * i + part;
+}
+
+TRANS(UZP1, do_simd_permute, a, permute_load_uzp, 0)
+TRANS(UZP2, do_simd_permute, a, permute_load_uzp, 1)
+
+static int permute_load_trn(int i, int part, int elements)
+{
+ return (i & 1) * elements + (i & ~1) + part;
+}
+
+TRANS(TRN1, do_simd_permute, a, permute_load_trn, 0)
+TRANS(TRN2, do_simd_permute, a, permute_load_trn, 1)
+
+static int permute_load_zip(int i, int part, int elements)
+{
+ return (i & 1) * elements + ((part * elements + i) >> 1);
+}
+
+TRANS(ZIP1, do_simd_permute, a, permute_load_zip, 0)
+TRANS(ZIP2, do_simd_permute, a, permute_load_zip, 1)
+
/*
* Cryptographic AES, SHA, SHA512
*/
@@ -4703,7 +4997,6 @@ static bool trans_SM3SS1(DisasContext *s, arg_SM3SS1 *a)
TCGv_i32 tcg_op2 = tcg_temp_new_i32();
TCGv_i32 tcg_op3 = tcg_temp_new_i32();
TCGv_i32 tcg_res = tcg_temp_new_i32();
- unsigned vsz, dofs;
read_vec_element_i32(s, tcg_op1, a->rn, 3, MO_32);
read_vec_element_i32(s, tcg_op2, a->rm, 3, MO_32);
@@ -4715,9 +5008,7 @@ static bool trans_SM3SS1(DisasContext *s, arg_SM3SS1 *a)
tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
/* Clear the whole register first, then store bits [127:96]. */
- vsz = vec_full_reg_size(s);
- dofs = vec_full_reg_offset(s, a->rd);
- tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0);
+ clear_vec(s, a->rd);
write_vec_element_i32(s, tcg_res, a->rd, 3, MO_32);
}
return true;
@@ -4898,23 +5189,25 @@ typedef struct FPScalar {
void (*gen_d)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
} FPScalar;
-static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f)
+static bool do_fp3_scalar_with_fpsttype(DisasContext *s, arg_rrr_e *a,
+ const FPScalar *f, int mergereg,
+ ARMFPStatusFlavour fpsttype)
{
switch (a->esz) {
case MO_64:
if (fp_access_check(s)) {
TCGv_i64 t0 = read_fp_dreg(s, a->rn);
TCGv_i64 t1 = read_fp_dreg(s, a->rm);
- f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_FPCR));
- write_fp_dreg(s, a->rd, t0);
+ f->gen_d(t0, t0, t1, fpstatus_ptr(fpsttype));
+ write_fp_dreg_merging(s, a->rd, mergereg, t0);
}
break;
case MO_32:
if (fp_access_check(s)) {
TCGv_i32 t0 = read_fp_sreg(s, a->rn);
TCGv_i32 t1 = read_fp_sreg(s, a->rm);
- f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_FPCR));
- write_fp_sreg(s, a->rd, t0);
+ f->gen_s(t0, t0, t1, fpstatus_ptr(fpsttype));
+ write_fp_sreg_merging(s, a->rd, mergereg, t0);
}
break;
case MO_16:
@@ -4924,8 +5217,8 @@ static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f)
if (fp_access_check(s)) {
TCGv_i32 t0 = read_fp_hreg(s, a->rn);
TCGv_i32 t1 = read_fp_hreg(s, a->rm);
- f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_FPCR_F16));
- write_fp_sreg(s, a->rd, t0);
+ f->gen_h(t0, t0, t1, fpstatus_ptr(fpsttype));
+ write_fp_hreg_merging(s, a->rd, mergereg, t0);
}
break;
default:
@@ -4934,68 +5227,103 @@ static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f)
return true;
}
+static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f,
+ int mergereg)
+{
+ return do_fp3_scalar_with_fpsttype(s, a, f, mergereg,
+ a->esz == MO_16 ?
+ FPST_A64_F16 : FPST_A64);
+}
+
+static bool do_fp3_scalar_ah_2fn(DisasContext *s, arg_rrr_e *a,
+ const FPScalar *fnormal, const FPScalar *fah,
+ int mergereg)
+{
+ return do_fp3_scalar_with_fpsttype(s, a, s->fpcr_ah ? fah : fnormal,
+ mergereg, select_ah_fpst(s, a->esz));
+}
+
+/* Some insns need to call different helpers when FPCR.AH == 1 */
+static bool do_fp3_scalar_2fn(DisasContext *s, arg_rrr_e *a,
+ const FPScalar *fnormal,
+ const FPScalar *fah,
+ int mergereg)
+{
+ return do_fp3_scalar(s, a, s->fpcr_ah ? fah : fnormal, mergereg);
+}
+
static const FPScalar f_scalar_fadd = {
gen_helper_vfp_addh,
gen_helper_vfp_adds,
gen_helper_vfp_addd,
};
-TRANS(FADD_s, do_fp3_scalar, a, &f_scalar_fadd)
+TRANS(FADD_s, do_fp3_scalar, a, &f_scalar_fadd, a->rn)
static const FPScalar f_scalar_fsub = {
gen_helper_vfp_subh,
gen_helper_vfp_subs,
gen_helper_vfp_subd,
};
-TRANS(FSUB_s, do_fp3_scalar, a, &f_scalar_fsub)
+TRANS(FSUB_s, do_fp3_scalar, a, &f_scalar_fsub, a->rn)
static const FPScalar f_scalar_fdiv = {
gen_helper_vfp_divh,
gen_helper_vfp_divs,
gen_helper_vfp_divd,
};
-TRANS(FDIV_s, do_fp3_scalar, a, &f_scalar_fdiv)
+TRANS(FDIV_s, do_fp3_scalar, a, &f_scalar_fdiv, a->rn)
static const FPScalar f_scalar_fmul = {
gen_helper_vfp_mulh,
gen_helper_vfp_muls,
gen_helper_vfp_muld,
};
-TRANS(FMUL_s, do_fp3_scalar, a, &f_scalar_fmul)
+TRANS(FMUL_s, do_fp3_scalar, a, &f_scalar_fmul, a->rn)
static const FPScalar f_scalar_fmax = {
- gen_helper_advsimd_maxh,
+ gen_helper_vfp_maxh,
gen_helper_vfp_maxs,
gen_helper_vfp_maxd,
};
-TRANS(FMAX_s, do_fp3_scalar, a, &f_scalar_fmax)
+static const FPScalar f_scalar_fmax_ah = {
+ gen_helper_vfp_ah_maxh,
+ gen_helper_vfp_ah_maxs,
+ gen_helper_vfp_ah_maxd,
+};
+TRANS(FMAX_s, do_fp3_scalar_2fn, a, &f_scalar_fmax, &f_scalar_fmax_ah, a->rn)
static const FPScalar f_scalar_fmin = {
- gen_helper_advsimd_minh,
+ gen_helper_vfp_minh,
gen_helper_vfp_mins,
gen_helper_vfp_mind,
};
-TRANS(FMIN_s, do_fp3_scalar, a, &f_scalar_fmin)
+static const FPScalar f_scalar_fmin_ah = {
+ gen_helper_vfp_ah_minh,
+ gen_helper_vfp_ah_mins,
+ gen_helper_vfp_ah_mind,
+};
+TRANS(FMIN_s, do_fp3_scalar_2fn, a, &f_scalar_fmin, &f_scalar_fmin_ah, a->rn)
static const FPScalar f_scalar_fmaxnm = {
- gen_helper_advsimd_maxnumh,
+ gen_helper_vfp_maxnumh,
gen_helper_vfp_maxnums,
gen_helper_vfp_maxnumd,
};
-TRANS(FMAXNM_s, do_fp3_scalar, a, &f_scalar_fmaxnm)
+TRANS(FMAXNM_s, do_fp3_scalar, a, &f_scalar_fmaxnm, a->rn)
static const FPScalar f_scalar_fminnm = {
- gen_helper_advsimd_minnumh,
+ gen_helper_vfp_minnumh,
gen_helper_vfp_minnums,
gen_helper_vfp_minnumd,
};
-TRANS(FMINNM_s, do_fp3_scalar, a, &f_scalar_fminnm)
+TRANS(FMINNM_s, do_fp3_scalar, a, &f_scalar_fminnm, a->rn)
static const FPScalar f_scalar_fmulx = {
gen_helper_advsimd_mulxh,
gen_helper_vfp_mulxs,
gen_helper_vfp_mulxd,
};
-TRANS(FMULX_s, do_fp3_scalar, a, &f_scalar_fmulx)
+TRANS(FMULX_s, do_fp3_scalar, a, &f_scalar_fmulx, a->rn)
static void gen_fnmul_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s)
{
@@ -5015,47 +5343,70 @@ static void gen_fnmul_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s)
gen_vfp_negd(d, d);
}
+static void gen_fnmul_ah_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s)
+{
+ gen_helper_vfp_mulh(d, n, m, s);
+ gen_vfp_ah_negh(d, d);
+}
+
+static void gen_fnmul_ah_s(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s)
+{
+ gen_helper_vfp_muls(d, n, m, s);
+ gen_vfp_ah_negs(d, d);
+}
+
+static void gen_fnmul_ah_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s)
+{
+ gen_helper_vfp_muld(d, n, m, s);
+ gen_vfp_ah_negd(d, d);
+}
+
static const FPScalar f_scalar_fnmul = {
gen_fnmul_h,
gen_fnmul_s,
gen_fnmul_d,
};
-TRANS(FNMUL_s, do_fp3_scalar, a, &f_scalar_fnmul)
+static const FPScalar f_scalar_ah_fnmul = {
+ gen_fnmul_ah_h,
+ gen_fnmul_ah_s,
+ gen_fnmul_ah_d,
+};
+TRANS(FNMUL_s, do_fp3_scalar_2fn, a, &f_scalar_fnmul, &f_scalar_ah_fnmul, a->rn)
static const FPScalar f_scalar_fcmeq = {
gen_helper_advsimd_ceq_f16,
gen_helper_neon_ceq_f32,
gen_helper_neon_ceq_f64,
};
-TRANS(FCMEQ_s, do_fp3_scalar, a, &f_scalar_fcmeq)
+TRANS(FCMEQ_s, do_fp3_scalar, a, &f_scalar_fcmeq, a->rm)
static const FPScalar f_scalar_fcmge = {
gen_helper_advsimd_cge_f16,
gen_helper_neon_cge_f32,
gen_helper_neon_cge_f64,
};
-TRANS(FCMGE_s, do_fp3_scalar, a, &f_scalar_fcmge)
+TRANS(FCMGE_s, do_fp3_scalar, a, &f_scalar_fcmge, a->rm)
static const FPScalar f_scalar_fcmgt = {
gen_helper_advsimd_cgt_f16,
gen_helper_neon_cgt_f32,
gen_helper_neon_cgt_f64,
};
-TRANS(FCMGT_s, do_fp3_scalar, a, &f_scalar_fcmgt)
+TRANS(FCMGT_s, do_fp3_scalar, a, &f_scalar_fcmgt, a->rm)
static const FPScalar f_scalar_facge = {
gen_helper_advsimd_acge_f16,
gen_helper_neon_acge_f32,
gen_helper_neon_acge_f64,
};
-TRANS(FACGE_s, do_fp3_scalar, a, &f_scalar_facge)
+TRANS(FACGE_s, do_fp3_scalar, a, &f_scalar_facge, a->rm)
static const FPScalar f_scalar_facgt = {
gen_helper_advsimd_acgt_f16,
gen_helper_neon_acgt_f32,
gen_helper_neon_acgt_f64,
};
-TRANS(FACGT_s, do_fp3_scalar, a, &f_scalar_facgt)
+TRANS(FACGT_s, do_fp3_scalar, a, &f_scalar_facgt, a->rm)
static void gen_fabd_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s)
{
@@ -5075,26 +5426,116 @@ static void gen_fabd_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s)
gen_vfp_absd(d, d);
}
+static void gen_fabd_ah_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s)
+{
+ gen_helper_vfp_subh(d, n, m, s);
+ gen_vfp_ah_absh(d, d);
+}
+
+static void gen_fabd_ah_s(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s)
+{
+ gen_helper_vfp_subs(d, n, m, s);
+ gen_vfp_ah_abss(d, d);
+}
+
+static void gen_fabd_ah_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s)
+{
+ gen_helper_vfp_subd(d, n, m, s);
+ gen_vfp_ah_absd(d, d);
+}
+
static const FPScalar f_scalar_fabd = {
gen_fabd_h,
gen_fabd_s,
gen_fabd_d,
};
-TRANS(FABD_s, do_fp3_scalar, a, &f_scalar_fabd)
+static const FPScalar f_scalar_ah_fabd = {
+ gen_fabd_ah_h,
+ gen_fabd_ah_s,
+ gen_fabd_ah_d,
+};
+TRANS(FABD_s, do_fp3_scalar_2fn, a, &f_scalar_fabd, &f_scalar_ah_fabd, a->rn)
static const FPScalar f_scalar_frecps = {
gen_helper_recpsf_f16,
gen_helper_recpsf_f32,
gen_helper_recpsf_f64,
};
-TRANS(FRECPS_s, do_fp3_scalar, a, &f_scalar_frecps)
+static const FPScalar f_scalar_ah_frecps = {
+ gen_helper_recpsf_ah_f16,
+ gen_helper_recpsf_ah_f32,
+ gen_helper_recpsf_ah_f64,
+};
+TRANS(FRECPS_s, do_fp3_scalar_ah_2fn, a,
+ &f_scalar_frecps, &f_scalar_ah_frecps, a->rn)
static const FPScalar f_scalar_frsqrts = {
gen_helper_rsqrtsf_f16,
gen_helper_rsqrtsf_f32,
gen_helper_rsqrtsf_f64,
};
-TRANS(FRSQRTS_s, do_fp3_scalar, a, &f_scalar_frsqrts)
+static const FPScalar f_scalar_ah_frsqrts = {
+ gen_helper_rsqrtsf_ah_f16,
+ gen_helper_rsqrtsf_ah_f32,
+ gen_helper_rsqrtsf_ah_f64,
+};
+TRANS(FRSQRTS_s, do_fp3_scalar_ah_2fn, a,
+ &f_scalar_frsqrts, &f_scalar_ah_frsqrts, a->rn)
+
+static bool do_fcmp0_s(DisasContext *s, arg_rr_e *a,
+ const FPScalar *f, bool swap)
+{
+ switch (a->esz) {
+ case MO_64:
+ if (fp_access_check(s)) {
+ TCGv_i64 t0 = read_fp_dreg(s, a->rn);
+ TCGv_i64 t1 = tcg_constant_i64(0);
+ if (swap) {
+ f->gen_d(t0, t1, t0, fpstatus_ptr(FPST_A64));
+ } else {
+ f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_A64));
+ }
+ write_fp_dreg(s, a->rd, t0);
+ }
+ break;
+ case MO_32:
+ if (fp_access_check(s)) {
+ TCGv_i32 t0 = read_fp_sreg(s, a->rn);
+ TCGv_i32 t1 = tcg_constant_i32(0);
+ if (swap) {
+ f->gen_s(t0, t1, t0, fpstatus_ptr(FPST_A64));
+ } else {
+ f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_A64));
+ }
+ write_fp_sreg(s, a->rd, t0);
+ }
+ break;
+ case MO_16:
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ TCGv_i32 t0 = read_fp_hreg(s, a->rn);
+ TCGv_i32 t1 = tcg_constant_i32(0);
+ if (swap) {
+ f->gen_h(t0, t1, t0, fpstatus_ptr(FPST_A64_F16));
+ } else {
+ f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_A64_F16));
+ }
+ write_fp_sreg(s, a->rd, t0);
+ }
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+TRANS(FCMEQ0_s, do_fcmp0_s, a, &f_scalar_fcmeq, false)
+TRANS(FCMGT0_s, do_fcmp0_s, a, &f_scalar_fcmgt, false)
+TRANS(FCMGE0_s, do_fcmp0_s, a, &f_scalar_fcmge, false)
+TRANS(FCMLT0_s, do_fcmp0_s, a, &f_scalar_fcmgt, true)
+TRANS(FCMLE0_s, do_fcmp0_s, a, &f_scalar_fcmge, true)
static bool do_satacc_s(DisasContext *s, arg_rrr_e *a,
MemOp sgn_n, MemOp sgn_m,
@@ -5290,34 +5731,46 @@ TRANS(CMHS_s, do_cmop_d, a, TCG_COND_GEU)
TRANS(CMEQ_s, do_cmop_d, a, TCG_COND_EQ)
TRANS(CMTST_s, do_cmop_d, a, TCG_COND_TSTNE)
-static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a, int data,
- gen_helper_gvec_3_ptr * const fns[3])
+static bool do_fp3_vector_with_fpsttype(DisasContext *s, arg_qrrr_e *a,
+ int data,
+ gen_helper_gvec_3_ptr * const fns[3],
+ ARMFPStatusFlavour fpsttype)
{
MemOp esz = a->esz;
+ int check = fp_access_check_vector_hsd(s, a->q, esz);
- switch (esz) {
- case MO_64:
- if (!a->q) {
- return false;
- }
- break;
- case MO_32:
- break;
- case MO_16:
- if (!dc_isar_feature(aa64_fp16, s)) {
- return false;
- }
- break;
- default:
- return false;
- }
- if (fp_access_check(s)) {
- gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm,
- esz == MO_16, data, fns[esz - 1]);
+ if (check <= 0) {
+ return check == 0;
}
+
+ gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm, fpsttype,
+ data, fns[esz - 1]);
return true;
}
+static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a, int data,
+ gen_helper_gvec_3_ptr * const fns[3])
+{
+ return do_fp3_vector_with_fpsttype(s, a, data, fns,
+ a->esz == MO_16 ?
+ FPST_A64_F16 : FPST_A64);
+}
+
+static bool do_fp3_vector_2fn(DisasContext *s, arg_qrrr_e *a, int data,
+ gen_helper_gvec_3_ptr * const fnormal[3],
+ gen_helper_gvec_3_ptr * const fah[3])
+{
+ return do_fp3_vector(s, a, data, s->fpcr_ah ? fah : fnormal);
+}
+
+static bool do_fp3_vector_ah_2fn(DisasContext *s, arg_qrrr_e *a, int data,
+ gen_helper_gvec_3_ptr * const fnormal[3],
+ gen_helper_gvec_3_ptr * const fah[3])
+{
+ return do_fp3_vector_with_fpsttype(s, a, data, s->fpcr_ah ? fah : fnormal,
+ select_ah_fpst(s, a->esz));
+}
+
static gen_helper_gvec_3_ptr * const f_vector_fadd[3] = {
gen_helper_gvec_fadd_h,
gen_helper_gvec_fadd_s,
@@ -5351,14 +5804,24 @@ static gen_helper_gvec_3_ptr * const f_vector_fmax[3] = {
gen_helper_gvec_fmax_s,
gen_helper_gvec_fmax_d,
};
-TRANS(FMAX_v, do_fp3_vector, a, 0, f_vector_fmax)
+static gen_helper_gvec_3_ptr * const f_vector_fmax_ah[3] = {
+ gen_helper_gvec_ah_fmax_h,
+ gen_helper_gvec_ah_fmax_s,
+ gen_helper_gvec_ah_fmax_d,
+};
+TRANS(FMAX_v, do_fp3_vector_2fn, a, 0, f_vector_fmax, f_vector_fmax_ah)
static gen_helper_gvec_3_ptr * const f_vector_fmin[3] = {
gen_helper_gvec_fmin_h,
gen_helper_gvec_fmin_s,
gen_helper_gvec_fmin_d,
};
-TRANS(FMIN_v, do_fp3_vector, a, 0, f_vector_fmin)
+static gen_helper_gvec_3_ptr * const f_vector_fmin_ah[3] = {
+ gen_helper_gvec_ah_fmin_h,
+ gen_helper_gvec_ah_fmin_s,
+ gen_helper_gvec_ah_fmin_d,
+};
+TRANS(FMIN_v, do_fp3_vector_2fn, a, 0, f_vector_fmin, f_vector_fmin_ah)
static gen_helper_gvec_3_ptr * const f_vector_fmaxnm[3] = {
gen_helper_gvec_fmaxnum_h,
@@ -5393,7 +5856,12 @@ static gen_helper_gvec_3_ptr * const f_vector_fmls[3] = {
gen_helper_gvec_vfms_s,
gen_helper_gvec_vfms_d,
};
-TRANS(FMLS_v, do_fp3_vector, a, 0, f_vector_fmls)
+static gen_helper_gvec_3_ptr * const f_vector_fmls_ah[3] = {
+ gen_helper_gvec_ah_vfms_h,
+ gen_helper_gvec_ah_vfms_s,
+ gen_helper_gvec_ah_vfms_d,
+};
+TRANS(FMLS_v, do_fp3_vector_2fn, a, 0, f_vector_fmls, f_vector_fmls_ah)
static gen_helper_gvec_3_ptr * const f_vector_fcmeq[3] = {
gen_helper_gvec_fceq_h,
@@ -5435,21 +5903,36 @@ static gen_helper_gvec_3_ptr * const f_vector_fabd[3] = {
gen_helper_gvec_fabd_s,
gen_helper_gvec_fabd_d,
};
-TRANS(FABD_v, do_fp3_vector, a, 0, f_vector_fabd)
+static gen_helper_gvec_3_ptr * const f_vector_ah_fabd[3] = {
+ gen_helper_gvec_ah_fabd_h,
+ gen_helper_gvec_ah_fabd_s,
+ gen_helper_gvec_ah_fabd_d,
+};
+TRANS(FABD_v, do_fp3_vector_2fn, a, 0, f_vector_fabd, f_vector_ah_fabd)
static gen_helper_gvec_3_ptr * const f_vector_frecps[3] = {
gen_helper_gvec_recps_h,
gen_helper_gvec_recps_s,
gen_helper_gvec_recps_d,
};
-TRANS(FRECPS_v, do_fp3_vector, a, 0, f_vector_frecps)
+static gen_helper_gvec_3_ptr * const f_vector_ah_frecps[3] = {
+ gen_helper_gvec_ah_recps_h,
+ gen_helper_gvec_ah_recps_s,
+ gen_helper_gvec_ah_recps_d,
+};
+TRANS(FRECPS_v, do_fp3_vector_ah_2fn, a, 0, f_vector_frecps, f_vector_ah_frecps)
static gen_helper_gvec_3_ptr * const f_vector_frsqrts[3] = {
gen_helper_gvec_rsqrts_h,
gen_helper_gvec_rsqrts_s,
gen_helper_gvec_rsqrts_d,
};
-TRANS(FRSQRTS_v, do_fp3_vector, a, 0, f_vector_frsqrts)
+static gen_helper_gvec_3_ptr * const f_vector_ah_frsqrts[3] = {
+ gen_helper_gvec_ah_rsqrts_h,
+ gen_helper_gvec_ah_rsqrts_s,
+ gen_helper_gvec_ah_rsqrts_d,
+};
+TRANS(FRSQRTS_v, do_fp3_vector_ah_2fn, a, 0, f_vector_frsqrts, f_vector_ah_frsqrts)
static gen_helper_gvec_3_ptr * const f_vector_faddp[3] = {
gen_helper_gvec_faddp_h,
@@ -5463,14 +5946,24 @@ static gen_helper_gvec_3_ptr * const f_vector_fmaxp[3] = {
gen_helper_gvec_fmaxp_s,
gen_helper_gvec_fmaxp_d,
};
-TRANS(FMAXP_v, do_fp3_vector, a, 0, f_vector_fmaxp)
+static gen_helper_gvec_3_ptr * const f_vector_ah_fmaxp[3] = {
+ gen_helper_gvec_ah_fmaxp_h,
+ gen_helper_gvec_ah_fmaxp_s,
+ gen_helper_gvec_ah_fmaxp_d,
+};
+TRANS(FMAXP_v, do_fp3_vector_2fn, a, 0, f_vector_fmaxp, f_vector_ah_fmaxp)
static gen_helper_gvec_3_ptr * const f_vector_fminp[3] = {
gen_helper_gvec_fminp_h,
gen_helper_gvec_fminp_s,
gen_helper_gvec_fminp_d,
};
-TRANS(FMINP_v, do_fp3_vector, a, 0, f_vector_fminp)
+static gen_helper_gvec_3_ptr * const f_vector_ah_fminp[3] = {
+ gen_helper_gvec_ah_fminp_h,
+ gen_helper_gvec_ah_fminp_s,
+ gen_helper_gvec_ah_fminp_d,
+};
+TRANS(FMINP_v, do_fp3_vector_2fn, a, 0, f_vector_fminp, f_vector_ah_fminp)
static gen_helper_gvec_3_ptr * const f_vector_fmaxnmp[3] = {
gen_helper_gvec_fmaxnump_h,
@@ -5601,11 +6094,20 @@ static bool do_dot_vector(DisasContext *s, arg_qrrr_e *a,
return true;
}
+static bool do_dot_vector_env(DisasContext *s, arg_qrrr_e *a,
+ gen_helper_gvec_4_ptr *fn)
+{
+ if (fp_access_check(s)) {
+ gen_gvec_op4_env(s, a->q, a->rd, a->rn, a->rm, a->rd, 0, fn);
+ }
+ return true;
+}
+
TRANS_FEAT(SDOT_v, aa64_dp, do_dot_vector, a, gen_helper_gvec_sdot_b)
TRANS_FEAT(UDOT_v, aa64_dp, do_dot_vector, a, gen_helper_gvec_udot_b)
TRANS_FEAT(USDOT_v, aa64_i8mm, do_dot_vector, a, gen_helper_gvec_usdot_b)
-TRANS_FEAT(BFDOT_v, aa64_bf16, do_dot_vector, a, gen_helper_gvec_bfdot)
-TRANS_FEAT(BFMMLA, aa64_bf16, do_dot_vector, a, gen_helper_gvec_bfmmla)
+TRANS_FEAT(BFDOT_v, aa64_bf16, do_dot_vector_env, a, gen_helper_gvec_bfdot)
+TRANS_FEAT(BFMMLA, aa64_bf16, do_dot_vector_env, a, gen_helper_gvec_bfmmla)
TRANS_FEAT(SMMLA, aa64_i8mm, do_dot_vector, a, gen_helper_gvec_smmla_b)
TRANS_FEAT(UMMLA, aa64_i8mm, do_dot_vector, a, gen_helper_gvec_ummla_b)
TRANS_FEAT(USMMLA, aa64_i8mm, do_dot_vector, a, gen_helper_gvec_usmmla_b)
@@ -5617,7 +6119,8 @@ static bool trans_BFMLAL_v(DisasContext *s, arg_qrrr_e *a)
}
if (fp_access_check(s)) {
/* Q bit selects BFMLALB vs BFMLALT. */
- gen_gvec_op4_fpst(s, true, a->rd, a->rn, a->rm, a->rd, false, a->q,
+ gen_gvec_op4_fpst(s, true, a->rd, a->rn, a->rm, a->rd,
+ s->fpcr_ah ? FPST_AH : FPST_A64, a->q,
gen_helper_gvec_bfmlal);
}
return true;
@@ -5628,39 +6131,36 @@ static gen_helper_gvec_3_ptr * const f_vector_fcadd[3] = {
gen_helper_gvec_fcadds,
gen_helper_gvec_fcaddd,
};
-TRANS_FEAT(FCADD_90, aa64_fcma, do_fp3_vector, a, 0, f_vector_fcadd)
-TRANS_FEAT(FCADD_270, aa64_fcma, do_fp3_vector, a, 1, f_vector_fcadd)
+/*
+ * Encode FPCR.AH into the data so the helper knows whether the
+ * negations it does should avoid flipping the sign bit on a NaN
+ */
+TRANS_FEAT(FCADD_90, aa64_fcma, do_fp3_vector, a, 0 | (s->fpcr_ah << 1),
+ f_vector_fcadd)
+TRANS_FEAT(FCADD_270, aa64_fcma, do_fp3_vector, a, 1 | (s->fpcr_ah << 1),
+ f_vector_fcadd)
static bool trans_FCMLA_v(DisasContext *s, arg_FCMLA_v *a)
{
- gen_helper_gvec_4_ptr *fn;
+ static gen_helper_gvec_4_ptr * const fn[] = {
+ [MO_16] = gen_helper_gvec_fcmlah,
+ [MO_32] = gen_helper_gvec_fcmlas,
+ [MO_64] = gen_helper_gvec_fcmlad,
+ };
+ int check;
if (!dc_isar_feature(aa64_fcma, s)) {
return false;
}
- switch (a->esz) {
- case MO_64:
- if (!a->q) {
- return false;
- }
- fn = gen_helper_gvec_fcmlad;
- break;
- case MO_32:
- fn = gen_helper_gvec_fcmlas;
- break;
- case MO_16:
- if (!dc_isar_feature(aa64_fp16, s)) {
- return false;
- }
- fn = gen_helper_gvec_fcmlah;
- break;
- default:
- return false;
- }
- if (fp_access_check(s)) {
- gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd,
- a->esz == MO_16, a->rot, fn);
+
+ check = fp_access_check_vector_hsd(s, a->q, a->esz);
+ if (check <= 0) {
+ return check == 0;
}
+
+ gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd,
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64,
+ a->rot | (s->fpcr_ah << 2), fn[a->esz]);
return true;
}
@@ -6028,8 +6528,8 @@ static bool do_fp3_scalar_idx(DisasContext *s, arg_rrx_e *a, const FPScalar *f)
TCGv_i64 t1 = tcg_temp_new_i64();
read_vec_element(s, t1, a->rm, a->idx, MO_64);
- f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_FPCR));
- write_fp_dreg(s, a->rd, t0);
+ f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_A64));
+ write_fp_dreg_merging(s, a->rd, a->rn, t0);
}
break;
case MO_32:
@@ -6038,8 +6538,8 @@ static bool do_fp3_scalar_idx(DisasContext *s, arg_rrx_e *a, const FPScalar *f)
TCGv_i32 t1 = tcg_temp_new_i32();
read_vec_element_i32(s, t1, a->rm, a->idx, MO_32);
- f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_FPCR));
- write_fp_sreg(s, a->rd, t0);
+ f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_A64));
+ write_fp_sreg_merging(s, a->rd, a->rn, t0);
}
break;
case MO_16:
@@ -6051,8 +6551,8 @@ static bool do_fp3_scalar_idx(DisasContext *s, arg_rrx_e *a, const FPScalar *f)
TCGv_i32 t1 = tcg_temp_new_i32();
read_vec_element_i32(s, t1, a->rm, a->idx, MO_16);
- f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_FPCR_F16));
- write_fp_sreg(s, a->rd, t0);
+ f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_A64_F16));
+ write_fp_hreg_merging(s, a->rd, a->rn, t0);
}
break;
default:
@@ -6075,10 +6575,10 @@ static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg)
read_vec_element(s, t2, a->rm, a->idx, MO_64);
if (neg) {
- gen_vfp_negd(t1, t1);
+ gen_vfp_maybe_ah_negd(s, t1, t1);
}
- gen_helper_vfp_muladdd(t0, t1, t2, t0, fpstatus_ptr(FPST_FPCR));
- write_fp_dreg(s, a->rd, t0);
+ gen_helper_vfp_muladdd(t0, t1, t2, t0, fpstatus_ptr(FPST_A64));
+ write_fp_dreg_merging(s, a->rd, a->rd, t0);
}
break;
case MO_32:
@@ -6089,10 +6589,10 @@ static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg)
read_vec_element_i32(s, t2, a->rm, a->idx, MO_32);
if (neg) {
- gen_vfp_negs(t1, t1);
+ gen_vfp_maybe_ah_negs(s, t1, t1);
}
- gen_helper_vfp_muladds(t0, t1, t2, t0, fpstatus_ptr(FPST_FPCR));
- write_fp_sreg(s, a->rd, t0);
+ gen_helper_vfp_muladds(t0, t1, t2, t0, fpstatus_ptr(FPST_A64));
+ write_fp_sreg_merging(s, a->rd, a->rd, t0);
}
break;
case MO_16:
@@ -6106,11 +6606,11 @@ static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg)
read_vec_element_i32(s, t2, a->rm, a->idx, MO_16);
if (neg) {
- gen_vfp_negh(t1, t1);
+ gen_vfp_maybe_ah_negh(s, t1, t1);
}
gen_helper_advsimd_muladdh(t0, t1, t2, t0,
- fpstatus_ptr(FPST_FPCR_F16));
- write_fp_sreg(s, a->rd, t0);
+ fpstatus_ptr(FPST_A64_F16));
+ write_fp_hreg_merging(s, a->rd, a->rd, t0);
}
break;
default:
@@ -6173,7 +6673,6 @@ static bool do_scalar_muladd_widening_idx(DisasContext *s, arg_rrx_e *a,
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
- unsigned vsz, dofs;
if (acc) {
read_vec_element(s, t0, a->rd, 0, a->esz + 1);
@@ -6183,9 +6682,7 @@ static bool do_scalar_muladd_widening_idx(DisasContext *s, arg_rrx_e *a,
fn(t0, t1, t2);
/* Clear the whole register first, then store scalar. */
- vsz = vec_full_reg_size(s);
- dofs = vec_full_reg_offset(s, a->rd);
- tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0);
+ clear_vec(s, a->rd);
write_vec_element(s, t0, a->rd, 0, a->esz + 1);
}
return true;
@@ -6202,27 +6699,15 @@ static bool do_fp3_vector_idx(DisasContext *s, arg_qrrx_e *a,
gen_helper_gvec_3_ptr * const fns[3])
{
MemOp esz = a->esz;
+ int check = fp_access_check_vector_hsd(s, a->q, esz);
- switch (esz) {
- case MO_64:
- if (!a->q) {
- return false;
- }
- break;
- case MO_32:
- break;
- case MO_16:
- if (!dc_isar_feature(aa64_fp16, s)) {
- return false;
- }
- break;
- default:
- g_assert_not_reached();
- }
- if (fp_access_check(s)) {
- gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm,
- esz == MO_16, a->idx, fns[esz - 1]);
+ if (check <= 0) {
+ return check == 0;
}
+
+ gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm,
+ esz == MO_16 ? FPST_A64_F16 : FPST_A64,
+ a->idx, fns[esz - 1]);
return true;
}
@@ -6242,34 +6727,27 @@ TRANS(FMULX_vi, do_fp3_vector_idx, a, f_vector_idx_fmulx)
static bool do_fmla_vector_idx(DisasContext *s, arg_qrrx_e *a, bool neg)
{
- static gen_helper_gvec_4_ptr * const fns[3] = {
- gen_helper_gvec_fmla_idx_h,
- gen_helper_gvec_fmla_idx_s,
- gen_helper_gvec_fmla_idx_d,
+ static gen_helper_gvec_4_ptr * const fns[3][3] = {
+ { gen_helper_gvec_fmla_idx_h,
+ gen_helper_gvec_fmla_idx_s,
+ gen_helper_gvec_fmla_idx_d },
+ { gen_helper_gvec_fmls_idx_h,
+ gen_helper_gvec_fmls_idx_s,
+ gen_helper_gvec_fmls_idx_d },
+ { gen_helper_gvec_ah_fmls_idx_h,
+ gen_helper_gvec_ah_fmls_idx_s,
+ gen_helper_gvec_ah_fmls_idx_d },
};
MemOp esz = a->esz;
+ int check = fp_access_check_vector_hsd(s, a->q, esz);
- switch (esz) {
- case MO_64:
- if (!a->q) {
- return false;
- }
- break;
- case MO_32:
- break;
- case MO_16:
- if (!dc_isar_feature(aa64_fp16, s)) {
- return false;
- }
- break;
- default:
- g_assert_not_reached();
- }
- if (fp_access_check(s)) {
- gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd,
- esz == MO_16, (a->idx << 1) | neg,
- fns[esz - 1]);
+ if (check <= 0) {
+ return check == 0;
}
+
+ gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd,
+ esz == MO_16 ? FPST_A64_F16 : FPST_A64,
+ a->idx, fns[neg ? 1 + s->fpcr_ah : 0][esz - 1]);
return true;
}
@@ -6378,13 +6856,22 @@ static bool do_dot_vector_idx(DisasContext *s, arg_qrrx_e *a,
return true;
}
+static bool do_dot_vector_idx_env(DisasContext *s, arg_qrrx_e *a,
+ gen_helper_gvec_4_ptr *fn)
+{
+ if (fp_access_check(s)) {
+ gen_gvec_op4_env(s, a->q, a->rd, a->rn, a->rm, a->rd, a->idx, fn);
+ }
+ return true;
+}
+
TRANS_FEAT(SDOT_vi, aa64_dp, do_dot_vector_idx, a, gen_helper_gvec_sdot_idx_b)
TRANS_FEAT(UDOT_vi, aa64_dp, do_dot_vector_idx, a, gen_helper_gvec_udot_idx_b)
TRANS_FEAT(SUDOT_vi, aa64_i8mm, do_dot_vector_idx, a,
gen_helper_gvec_sudot_idx_b)
TRANS_FEAT(USDOT_vi, aa64_i8mm, do_dot_vector_idx, a,
gen_helper_gvec_usdot_idx_b)
-TRANS_FEAT(BFDOT_vi, aa64_bf16, do_dot_vector_idx, a,
+TRANS_FEAT(BFDOT_vi, aa64_bf16, do_dot_vector_idx_env, a,
gen_helper_gvec_bfdot_idx)
static bool trans_BFMLAL_vi(DisasContext *s, arg_qrrx_e *a)
@@ -6394,7 +6881,8 @@ static bool trans_BFMLAL_vi(DisasContext *s, arg_qrrx_e *a)
}
if (fp_access_check(s)) {
/* Q bit selects BFMLALB vs BFMLALT. */
- gen_gvec_op4_fpst(s, true, a->rd, a->rn, a->rm, a->rd, 0,
+ gen_gvec_op4_fpst(s, true, a->rd, a->rn, a->rm, a->rd,
+ s->fpcr_ah ? FPST_AH : FPST_A64,
(a->idx << 1) | a->q,
gen_helper_gvec_bfmlal_idx);
}
@@ -6423,7 +6911,8 @@ static bool trans_FCMLA_vi(DisasContext *s, arg_FCMLA_vi *a)
}
if (fp_access_check(s)) {
gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd,
- a->esz == MO_16, (a->idx << 2) | a->rot, fn);
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64,
+ (s->fpcr_ah << 4) | (a->idx << 2) | a->rot, fn);
}
return true;
}
@@ -6442,7 +6931,7 @@ static bool do_fp3_scalar_pair(DisasContext *s, arg_rr_e *a, const FPScalar *f)
read_vec_element(s, t0, a->rn, 0, MO_64);
read_vec_element(s, t1, a->rn, 1, MO_64);
- f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_FPCR));
+ f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_A64));
write_fp_dreg(s, a->rd, t0);
}
break;
@@ -6453,7 +6942,7 @@ static bool do_fp3_scalar_pair(DisasContext *s, arg_rr_e *a, const FPScalar *f)
read_vec_element_i32(s, t0, a->rn, 0, MO_32);
read_vec_element_i32(s, t1, a->rn, 1, MO_32);
- f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_FPCR));
+ f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_A64));
write_fp_sreg(s, a->rd, t0);
}
break;
@@ -6467,7 +6956,7 @@ static bool do_fp3_scalar_pair(DisasContext *s, arg_rr_e *a, const FPScalar *f)
read_vec_element_i32(s, t0, a->rn, 0, MO_16);
read_vec_element_i32(s, t1, a->rn, 1, MO_16);
- f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_FPCR_F16));
+ f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_A64_F16));
write_fp_sreg(s, a->rd, t0);
}
break;
@@ -6477,9 +6966,16 @@ static bool do_fp3_scalar_pair(DisasContext *s, arg_rr_e *a, const FPScalar *f)
return true;
}
+static bool do_fp3_scalar_pair_2fn(DisasContext *s, arg_rr_e *a,
+ const FPScalar *fnormal,
+ const FPScalar *fah)
+{
+ return do_fp3_scalar_pair(s, a, s->fpcr_ah ? fah : fnormal);
+}
+
TRANS(FADDP_s, do_fp3_scalar_pair, a, &f_scalar_fadd)
-TRANS(FMAXP_s, do_fp3_scalar_pair, a, &f_scalar_fmax)
-TRANS(FMINP_s, do_fp3_scalar_pair, a, &f_scalar_fmin)
+TRANS(FMAXP_s, do_fp3_scalar_pair_2fn, a, &f_scalar_fmax, &f_scalar_fmax_ah)
+TRANS(FMINP_s, do_fp3_scalar_pair_2fn, a, &f_scalar_fmin, &f_scalar_fmin_ah)
TRANS(FMAXNMP_s, do_fp3_scalar_pair, a, &f_scalar_fmaxnm)
TRANS(FMINNMP_s, do_fp3_scalar_pair, a, &f_scalar_fminnm)
@@ -6505,22 +7001,10 @@ static bool trans_FCSEL(DisasContext *s, arg_FCSEL *a)
{
TCGv_i64 t_true, t_false;
DisasCompare64 c;
+ int check = fp_access_check_scalar_hsd(s, a->esz);
- switch (a->esz) {
- case MO_32:
- case MO_64:
- break;
- case MO_16:
- if (!dc_isar_feature(aa64_fp16, s)) {
- return false;
- }
- break;
- default:
- return false;
- }
-
- if (!fp_access_check(s)) {
- return true;
+ if (check <= 0) {
+ return check == 0;
}
/* Zero extend sreg & hreg inputs to 64 bits now. */
@@ -6542,6 +7026,54 @@ static bool trans_FCSEL(DisasContext *s, arg_FCSEL *a)
}
/*
+ * Advanced SIMD Extract
+ */
+
+static bool trans_EXT_d(DisasContext *s, arg_EXT_d *a)
+{
+ if (fp_access_check(s)) {
+ TCGv_i64 lo = read_fp_dreg(s, a->rn);
+ if (a->imm != 0) {
+ TCGv_i64 hi = read_fp_dreg(s, a->rm);
+ tcg_gen_extract2_i64(lo, lo, hi, a->imm * 8);
+ }
+ write_fp_dreg(s, a->rd, lo);
+ }
+ return true;
+}
+
+static bool trans_EXT_q(DisasContext *s, arg_EXT_q *a)
+{
+ TCGv_i64 lo, hi;
+ int pos = (a->imm & 7) * 8;
+ int elt = a->imm >> 3;
+
+ if (!fp_access_check(s)) {
+ return true;
+ }
+
+ lo = tcg_temp_new_i64();
+ hi = tcg_temp_new_i64();
+
+ read_vec_element(s, lo, a->rn, elt, MO_64);
+ elt++;
+ read_vec_element(s, hi, elt & 2 ? a->rm : a->rn, elt & 1, MO_64);
+ elt++;
+
+ if (pos != 0) {
+ TCGv_i64 hh = tcg_temp_new_i64();
+ tcg_gen_extract2_i64(lo, lo, hi, pos);
+ read_vec_element(s, hh, a->rm, elt & 1, MO_64);
+ tcg_gen_extract2_i64(hi, hi, hh, pos);
+ }
+
+ write_vec_element(s, lo, a->rd, 0, MO_64);
+ write_vec_element(s, hi, a->rd, 1, MO_64);
+ clear_vec_high(s, true, a->rd);
+ return true;
+}
+
+/*
* Floating-point data-processing (3 source)
*/
@@ -6562,14 +7094,14 @@ static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n)
TCGv_i64 ta = read_fp_dreg(s, a->ra);
if (neg_a) {
- gen_vfp_negd(ta, ta);
+ gen_vfp_maybe_ah_negd(s, ta, ta);
}
if (neg_n) {
- gen_vfp_negd(tn, tn);
+ gen_vfp_maybe_ah_negd(s, tn, tn);
}
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A64);
gen_helper_vfp_muladdd(ta, tn, tm, ta, fpst);
- write_fp_dreg(s, a->rd, ta);
+ write_fp_dreg_merging(s, a->rd, a->ra, ta);
}
break;
@@ -6580,14 +7112,14 @@ static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n)
TCGv_i32 ta = read_fp_sreg(s, a->ra);
if (neg_a) {
- gen_vfp_negs(ta, ta);
+ gen_vfp_maybe_ah_negs(s, ta, ta);
}
if (neg_n) {
- gen_vfp_negs(tn, tn);
+ gen_vfp_maybe_ah_negs(s, tn, tn);
}
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A64);
gen_helper_vfp_muladds(ta, tn, tm, ta, fpst);
- write_fp_sreg(s, a->rd, ta);
+ write_fp_sreg_merging(s, a->rd, a->ra, ta);
}
break;
@@ -6601,14 +7133,14 @@ static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n)
TCGv_i32 ta = read_fp_hreg(s, a->ra);
if (neg_a) {
- gen_vfp_negh(ta, ta);
+ gen_vfp_maybe_ah_negh(s, ta, ta);
}
if (neg_n) {
- gen_vfp_negh(tn, tn);
+ gen_vfp_maybe_ah_negh(s, tn, tn);
}
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A64_F16);
gen_helper_advsimd_muladdh(ta, tn, tm, ta, fpst);
- write_fp_sreg(s, a->rd, ta);
+ write_fp_hreg_merging(s, a->rd, a->ra, ta);
}
break;
@@ -6623,5248 +7155,2879 @@ TRANS(FNMADD, do_fmadd, a, true, true)
TRANS(FMSUB, do_fmadd, a, false, true)
TRANS(FNMSUB, do_fmadd, a, true, false)
-/* Shift a TCGv src by TCGv shift_amount, put result in dst.
- * Note that it is the caller's responsibility to ensure that the
- * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
- * mandated semantics for out of range shifts.
- */
-static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
- enum a64_shift_type shift_type, TCGv_i64 shift_amount)
-{
- switch (shift_type) {
- case A64_SHIFT_TYPE_LSL:
- tcg_gen_shl_i64(dst, src, shift_amount);
- break;
- case A64_SHIFT_TYPE_LSR:
- tcg_gen_shr_i64(dst, src, shift_amount);
- break;
- case A64_SHIFT_TYPE_ASR:
- if (!sf) {
- tcg_gen_ext32s_i64(dst, src);
- }
- tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
- break;
- case A64_SHIFT_TYPE_ROR:
- if (sf) {
- tcg_gen_rotr_i64(dst, src, shift_amount);
- } else {
- TCGv_i32 t0, t1;
- t0 = tcg_temp_new_i32();
- t1 = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(t0, src);
- tcg_gen_extrl_i64_i32(t1, shift_amount);
- tcg_gen_rotr_i32(t0, t0, t1);
- tcg_gen_extu_i32_i64(dst, t0);
- }
- break;
- default:
- assert(FALSE); /* all shift types should be handled */
- break;
- }
-
- if (!sf) { /* zero extend final result */
- tcg_gen_ext32u_i64(dst, dst);
- }
-}
-
-/* Shift a TCGv src by immediate, put result in dst.
- * The shift amount must be in range (this should always be true as the
- * relevant instructions will UNDEF on bad shift immediates).
+/*
+ * Advanced SIMD Across Lanes
*/
-static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
- enum a64_shift_type shift_type, unsigned int shift_i)
-{
- assert(shift_i < (sf ? 64 : 32));
-
- if (shift_i == 0) {
- tcg_gen_mov_i64(dst, src);
- } else {
- shift_reg(dst, src, sf, shift_type, tcg_constant_i64(shift_i));
- }
-}
-/* Logical (shifted register)
- * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
- * +----+-----+-----------+-------+---+------+--------+------+------+
- * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
- * +----+-----+-----------+-------+---+------+--------+------+------+
- */
-static void disas_logic_reg(DisasContext *s, uint32_t insn)
+static bool do_int_reduction(DisasContext *s, arg_qrr_e *a, bool widen,
+ MemOp src_sign, NeonGenTwo64OpFn *fn)
{
- TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
- unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
-
- sf = extract32(insn, 31, 1);
- opc = extract32(insn, 29, 2);
- shift_type = extract32(insn, 22, 2);
- invert = extract32(insn, 21, 1);
- rm = extract32(insn, 16, 5);
- shift_amount = extract32(insn, 10, 6);
- rn = extract32(insn, 5, 5);
- rd = extract32(insn, 0, 5);
-
- if (!sf && (shift_amount & (1 << 5))) {
- unallocated_encoding(s);
- return;
- }
-
- tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_res, tcg_elt;
+ MemOp src_mop = a->esz | src_sign;
+ int elements = (a->q ? 16 : 8) >> a->esz;
- if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
- /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
- * register-register MOV and MVN, so it is worth special casing.
- */
- tcg_rm = cpu_reg(s, rm);
- if (invert) {
- tcg_gen_not_i64(tcg_rd, tcg_rm);
- if (!sf) {
- tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
- }
- } else {
- if (sf) {
- tcg_gen_mov_i64(tcg_rd, tcg_rm);
- } else {
- tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
- }
- }
- return;
+ /* Reject MO_64, and MO_32 without Q: a minimum of 4 elements. */
+ if (elements < 4) {
+ return false;
}
-
- tcg_rm = read_cpu_reg(s, rm, sf);
-
- if (shift_amount) {
- shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
+ if (!fp_access_check(s)) {
+ return true;
}
- tcg_rn = cpu_reg(s, rn);
-
- switch (opc | (invert << 2)) {
- case 0: /* AND */
- case 3: /* ANDS */
- tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
- break;
- case 1: /* ORR */
- tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
- break;
- case 2: /* EOR */
- tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
- break;
- case 4: /* BIC */
- case 7: /* BICS */
- tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
- break;
- case 5: /* ORN */
- tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
- break;
- case 6: /* EON */
- tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
- break;
- default:
- assert(FALSE);
- break;
- }
+ tcg_res = tcg_temp_new_i64();
+ tcg_elt = tcg_temp_new_i64();
- if (!sf) {
- tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ read_vec_element(s, tcg_res, a->rn, 0, src_mop);
+ for (int i = 1; i < elements; i++) {
+ read_vec_element(s, tcg_elt, a->rn, i, src_mop);
+ fn(tcg_res, tcg_res, tcg_elt);
}
- if (opc == 3) {
- gen_logic_CC(sf, tcg_rd);
- }
+ tcg_gen_ext_i64(tcg_res, tcg_res, a->esz + widen);
+ write_fp_dreg(s, a->rd, tcg_res);
+ return true;
}
+TRANS(ADDV, do_int_reduction, a, false, 0, tcg_gen_add_i64)
+TRANS(SADDLV, do_int_reduction, a, true, MO_SIGN, tcg_gen_add_i64)
+TRANS(UADDLV, do_int_reduction, a, true, 0, tcg_gen_add_i64)
+TRANS(SMAXV, do_int_reduction, a, false, MO_SIGN, tcg_gen_smax_i64)
+TRANS(UMAXV, do_int_reduction, a, false, 0, tcg_gen_umax_i64)
+TRANS(SMINV, do_int_reduction, a, false, MO_SIGN, tcg_gen_smin_i64)
+TRANS(UMINV, do_int_reduction, a, false, 0, tcg_gen_umin_i64)
+
/*
- * Add/subtract (extended register)
- *
- * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
- * +--+--+--+-----------+-----+--+-------+------+------+----+----+
- * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
- * +--+--+--+-----------+-----+--+-------+------+------+----+----+
+ * do_fp_reduction helper
*
- * sf: 0 -> 32bit, 1 -> 64bit
- * op: 0 -> add , 1 -> sub
- * S: 1 -> set flags
- * opt: 00
- * option: extension type (see DecodeRegExtend)
- * imm3: optional shift to Rm
+ * This mirrors the Reduce() pseudocode in the ARM ARM. It is
+ * important for correct NaN propagation that we do these
+ * operations in exactly the order specified by the pseudocode.
*
- * Rd = Rn + LSL(extend(Rm), amount)
+ * This is a recursive function.
*/
-static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
-{
- int rd = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- int imm3 = extract32(insn, 10, 3);
- int option = extract32(insn, 13, 3);
- int rm = extract32(insn, 16, 5);
- int opt = extract32(insn, 22, 2);
- bool setflags = extract32(insn, 29, 1);
- bool sub_op = extract32(insn, 30, 1);
- bool sf = extract32(insn, 31, 1);
-
- TCGv_i64 tcg_rm, tcg_rn; /* temps */
- TCGv_i64 tcg_rd;
- TCGv_i64 tcg_result;
-
- if (imm3 > 4 || opt != 0) {
- unallocated_encoding(s);
- return;
- }
-
- /* non-flag setting ops may use SP */
- if (!setflags) {
- tcg_rd = cpu_reg_sp(s, rd);
+static TCGv_i32 do_reduction_op(DisasContext *s, int rn, MemOp esz,
+ int ebase, int ecount, TCGv_ptr fpst,
+ NeonGenTwoSingleOpFn *fn)
+{
+ if (ecount == 1) {
+ TCGv_i32 tcg_elem = tcg_temp_new_i32();
+ read_vec_element_i32(s, tcg_elem, rn, ebase, esz);
+ return tcg_elem;
} else {
- tcg_rd = cpu_reg(s, rd);
- }
- tcg_rn = read_cpu_reg_sp(s, rn, sf);
-
- tcg_rm = read_cpu_reg(s, rm, sf);
- ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
+ int half = ecount >> 1;
+ TCGv_i32 tcg_hi, tcg_lo, tcg_res;
- tcg_result = tcg_temp_new_i64();
+ tcg_hi = do_reduction_op(s, rn, esz, ebase + half, half, fpst, fn);
+ tcg_lo = do_reduction_op(s, rn, esz, ebase, half, fpst, fn);
+ tcg_res = tcg_temp_new_i32();
- if (!setflags) {
- if (sub_op) {
- tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
- } else {
- tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
- }
- } else {
- if (sub_op) {
- gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
- } else {
- gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
- }
+ fn(tcg_res, tcg_lo, tcg_hi, fpst);
+ return tcg_res;
}
+}
- if (sf) {
- tcg_gen_mov_i64(tcg_rd, tcg_result);
- } else {
- tcg_gen_ext32u_i64(tcg_rd, tcg_result);
+static bool do_fp_reduction(DisasContext *s, arg_qrr_e *a,
+ NeonGenTwoSingleOpFn *fnormal,
+ NeonGenTwoSingleOpFn *fah)
+{
+ if (fp_access_check(s)) {
+ MemOp esz = a->esz;
+ int elts = (a->q ? 16 : 8) >> esz;
+ TCGv_ptr fpst = fpstatus_ptr(esz == MO_16 ? FPST_A64_F16 : FPST_A64);
+ TCGv_i32 res = do_reduction_op(s, a->rn, esz, 0, elts, fpst,
+ s->fpcr_ah ? fah : fnormal);
+ write_fp_sreg(s, a->rd, res);
}
+ return true;
}
-/*
- * Add/subtract (shifted register)
- *
- * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
- * +--+--+--+-----------+-----+--+-------+---------+------+------+
- * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
- * +--+--+--+-----------+-----+--+-------+---------+------+------+
- *
- * sf: 0 -> 32bit, 1 -> 64bit
- * op: 0 -> add , 1 -> sub
- * S: 1 -> set flags
- * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
- * imm6: Shift amount to apply to Rm before the add/sub
- */
-static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
-{
- int rd = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- int imm6 = extract32(insn, 10, 6);
- int rm = extract32(insn, 16, 5);
- int shift_type = extract32(insn, 22, 2);
- bool setflags = extract32(insn, 29, 1);
- bool sub_op = extract32(insn, 30, 1);
- bool sf = extract32(insn, 31, 1);
-
- TCGv_i64 tcg_rd = cpu_reg(s, rd);
- TCGv_i64 tcg_rn, tcg_rm;
- TCGv_i64 tcg_result;
-
- if ((shift_type == 3) || (!sf && (imm6 > 31))) {
- unallocated_encoding(s);
- return;
- }
+TRANS_FEAT(FMAXNMV_h, aa64_fp16, do_fp_reduction, a,
+ gen_helper_vfp_maxnumh, gen_helper_vfp_maxnumh)
+TRANS_FEAT(FMINNMV_h, aa64_fp16, do_fp_reduction, a,
+ gen_helper_vfp_minnumh, gen_helper_vfp_minnumh)
+TRANS_FEAT(FMAXV_h, aa64_fp16, do_fp_reduction, a,
+ gen_helper_vfp_maxh, gen_helper_vfp_ah_maxh)
+TRANS_FEAT(FMINV_h, aa64_fp16, do_fp_reduction, a,
+ gen_helper_vfp_minh, gen_helper_vfp_ah_minh)
- tcg_rn = read_cpu_reg(s, rn, sf);
- tcg_rm = read_cpu_reg(s, rm, sf);
+TRANS(FMAXNMV_s, do_fp_reduction, a,
+ gen_helper_vfp_maxnums, gen_helper_vfp_maxnums)
+TRANS(FMINNMV_s, do_fp_reduction, a,
+ gen_helper_vfp_minnums, gen_helper_vfp_minnums)
+TRANS(FMAXV_s, do_fp_reduction, a, gen_helper_vfp_maxs, gen_helper_vfp_ah_maxs)
+TRANS(FMINV_s, do_fp_reduction, a, gen_helper_vfp_mins, gen_helper_vfp_ah_mins)
- shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
+/*
+ * Floating-point Immediate
+ */
- tcg_result = tcg_temp_new_i64();
+static bool trans_FMOVI_s(DisasContext *s, arg_FMOVI_s *a)
+{
+ int check = fp_access_check_scalar_hsd(s, a->esz);
+ uint64_t imm;
- if (!setflags) {
- if (sub_op) {
- tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
- } else {
- tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
- }
- } else {
- if (sub_op) {
- gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
- } else {
- gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
- }
+ if (check <= 0) {
+ return check == 0;
}
- if (sf) {
- tcg_gen_mov_i64(tcg_rd, tcg_result);
- } else {
- tcg_gen_ext32u_i64(tcg_rd, tcg_result);
- }
+ imm = vfp_expand_imm(a->esz, a->imm);
+ write_fp_dreg(s, a->rd, tcg_constant_i64(imm));
+ return true;
}
-/* Data-processing (3 source)
- *
- * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
- * +--+------+-----------+------+------+----+------+------+------+
- * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
- * +--+------+-----------+------+------+----+------+------+------+
+/*
+ * Floating point compare, conditional compare
*/
-static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
-{
- int rd = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- int ra = extract32(insn, 10, 5);
- int rm = extract32(insn, 16, 5);
- int op_id = (extract32(insn, 29, 3) << 4) |
- (extract32(insn, 21, 3) << 1) |
- extract32(insn, 15, 1);
- bool sf = extract32(insn, 31, 1);
- bool is_sub = extract32(op_id, 0, 1);
- bool is_high = extract32(op_id, 2, 1);
- bool is_signed = false;
- TCGv_i64 tcg_op1;
- TCGv_i64 tcg_op2;
- TCGv_i64 tcg_tmp;
-
- /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
- switch (op_id) {
- case 0x42: /* SMADDL */
- case 0x43: /* SMSUBL */
- case 0x44: /* SMULH */
- is_signed = true;
- break;
- case 0x0: /* MADD (32bit) */
- case 0x1: /* MSUB (32bit) */
- case 0x40: /* MADD (64bit) */
- case 0x41: /* MSUB (64bit) */
- case 0x4a: /* UMADDL */
- case 0x4b: /* UMSUBL */
- case 0x4c: /* UMULH */
- break;
- default:
- unallocated_encoding(s);
- return;
- }
- if (is_high) {
- TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
- TCGv_i64 tcg_rd = cpu_reg(s, rd);
- TCGv_i64 tcg_rn = cpu_reg(s, rn);
- TCGv_i64 tcg_rm = cpu_reg(s, rm);
+static void handle_fp_compare(DisasContext *s, int size,
+ unsigned int rn, unsigned int rm,
+ bool cmp_with_zero, bool signal_all_nans)
+{
+ TCGv_i64 tcg_flags = tcg_temp_new_i64();
+ TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_A64_F16 : FPST_A64);
- if (is_signed) {
- tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
+ if (size == MO_64) {
+ TCGv_i64 tcg_vn, tcg_vm;
+
+ tcg_vn = read_fp_dreg(s, rn);
+ if (cmp_with_zero) {
+ tcg_vm = tcg_constant_i64(0);
} else {
- tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
+ tcg_vm = read_fp_dreg(s, rm);
}
- return;
- }
-
- tcg_op1 = tcg_temp_new_i64();
- tcg_op2 = tcg_temp_new_i64();
- tcg_tmp = tcg_temp_new_i64();
-
- if (op_id < 0x42) {
- tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
- tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
- } else {
- if (is_signed) {
- tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
- tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
+ if (signal_all_nans) {
+ gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
} else {
- tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
- tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
+ gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
}
- }
-
- if (ra == 31 && !is_sub) {
- /* Special-case MADD with rA == XZR; it is the standard MUL alias */
- tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
} else {
- tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
- if (is_sub) {
- tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
+ TCGv_i32 tcg_vn = tcg_temp_new_i32();
+ TCGv_i32 tcg_vm = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_vn, rn, 0, size);
+ if (cmp_with_zero) {
+ tcg_gen_movi_i32(tcg_vm, 0);
} else {
- tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
+ read_vec_element_i32(s, tcg_vm, rm, 0, size);
}
- }
- if (!sf) {
- tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
+ switch (size) {
+ case MO_32:
+ if (signal_all_nans) {
+ gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ } else {
+ gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ }
+ break;
+ case MO_16:
+ if (signal_all_nans) {
+ gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ } else {
+ gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
-}
-/* Add/subtract (with carry)
- * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
- * +--+--+--+------------------------+------+-------------+------+-----+
- * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd |
- * +--+--+--+------------------------+------+-------------+------+-----+
- */
+ gen_set_nzcv(tcg_flags);
+}
-static void disas_adc_sbc(DisasContext *s, uint32_t insn)
+/* FCMP, FCMPE */
+static bool trans_FCMP(DisasContext *s, arg_FCMP *a)
{
- unsigned int sf, op, setflags, rm, rn, rd;
- TCGv_i64 tcg_y, tcg_rn, tcg_rd;
-
- sf = extract32(insn, 31, 1);
- op = extract32(insn, 30, 1);
- setflags = extract32(insn, 29, 1);
- rm = extract32(insn, 16, 5);
- rn = extract32(insn, 5, 5);
- rd = extract32(insn, 0, 5);
+ int check = fp_access_check_scalar_hsd(s, a->esz);
- tcg_rd = cpu_reg(s, rd);
- tcg_rn = cpu_reg(s, rn);
-
- if (op) {
- tcg_y = tcg_temp_new_i64();
- tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
- } else {
- tcg_y = cpu_reg(s, rm);
+ if (check <= 0) {
+ return check == 0;
}
- if (setflags) {
- gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
- } else {
- gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
- }
+ handle_fp_compare(s, a->esz, a->rn, a->rm, a->z, a->e);
+ return true;
}
-/*
- * Rotate right into flags
- * 31 30 29 21 15 10 5 4 0
- * +--+--+--+-----------------+--------+-----------+------+--+------+
- * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask |
- * +--+--+--+-----------------+--------+-----------+------+--+------+
- */
-static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
+/* FCCMP, FCCMPE */
+static bool trans_FCCMP(DisasContext *s, arg_FCCMP *a)
{
- int mask = extract32(insn, 0, 4);
- int o2 = extract32(insn, 4, 1);
- int rn = extract32(insn, 5, 5);
- int imm6 = extract32(insn, 15, 6);
- int sf_op_s = extract32(insn, 29, 3);
- TCGv_i64 tcg_rn;
- TCGv_i32 nzcv;
+ TCGLabel *label_continue = NULL;
+ int check = fp_access_check_scalar_hsd(s, a->esz);
- if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
- unallocated_encoding(s);
- return;
+ if (check <= 0) {
+ return check == 0;
}
- tcg_rn = read_cpu_reg(s, rn, 1);
- tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
+ if (a->cond < 0x0e) { /* not always */
+ TCGLabel *label_match = gen_new_label();
+ label_continue = gen_new_label();
+ arm_gen_test_cc(a->cond, label_match);
+ /* nomatch: */
+ gen_set_nzcv(tcg_constant_i64(a->nzcv << 28));
+ tcg_gen_br(label_continue);
+ gen_set_label(label_match);
+ }
- nzcv = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
+ handle_fp_compare(s, a->esz, a->rn, a->rm, false, a->e);
- if (mask & 8) { /* N */
- tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
- }
- if (mask & 4) { /* Z */
- tcg_gen_not_i32(cpu_ZF, nzcv);
- tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
- }
- if (mask & 2) { /* C */
- tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
- }
- if (mask & 1) { /* V */
- tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
+ if (label_continue) {
+ gen_set_label(label_continue);
}
+ return true;
}
/*
- * Evaluate into flags
- * 31 30 29 21 15 14 10 5 4 0
- * +--+--+--+-----------------+---------+----+---------+------+--+------+
- * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask |
- * +--+--+--+-----------------+---------+----+---------+------+--+------+
+ * Advanced SIMD Modified Immediate
*/
-static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
-{
- int o3_mask = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- int o2 = extract32(insn, 15, 6);
- int sz = extract32(insn, 14, 1);
- int sf_op_s = extract32(insn, 29, 3);
- TCGv_i32 tmp;
- int shift;
- if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
- !dc_isar_feature(aa64_condm_4, s)) {
- unallocated_encoding(s);
- return;
+static bool trans_FMOVI_v_h(DisasContext *s, arg_FMOVI_v_h *a)
+{
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ tcg_gen_gvec_dup_imm(MO_16, vec_full_reg_offset(s, a->rd),
+ a->q ? 16 : 8, vec_full_reg_size(s),
+ vfp_expand_imm(MO_16, a->abcdefgh));
}
- shift = sz ? 16 : 24; /* SETF16 or SETF8 */
+ return true;
+}
- tmp = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
- tcg_gen_shli_i32(cpu_NF, tmp, shift);
- tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
- tcg_gen_mov_i32(cpu_ZF, cpu_NF);
- tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
+static void gen_movi(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz)
+{
+ tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, c);
}
-/* Conditional compare (immediate / register)
- * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
- * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
- * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
- * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
- * [1] y [0] [0]
- */
-static void disas_cc(DisasContext *s, uint32_t insn)
+static bool trans_Vimm(DisasContext *s, arg_Vimm *a)
{
- unsigned int sf, op, y, cond, rn, nzcv, is_imm;
- TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
- TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
- DisasCompare c;
+ GVecGen2iFn *fn;
- if (!extract32(insn, 29, 1)) {
- unallocated_encoding(s);
- return;
+ /* Handle decode of cmode/op here between ORR/BIC/MOVI */
+ if ((a->cmode & 1) && a->cmode < 12) {
+ /* For op=1, the imm will be inverted, so BIC becomes AND. */
+ fn = a->op ? tcg_gen_gvec_andi : tcg_gen_gvec_ori;
+ } else {
+ /* There is one unallocated cmode/op combination in this space */
+ if (a->cmode == 15 && a->op == 1 && a->q == 0) {
+ return false;
+ }
+ fn = gen_movi;
}
- if (insn & (1 << 10 | 1 << 4)) {
- unallocated_encoding(s);
- return;
+
+ if (fp_access_check(s)) {
+ uint64_t imm = asimd_imm_const(a->abcdefgh, a->cmode, a->op);
+ gen_gvec_fn2i(s, a->q, a->rd, a->rd, imm, fn, MO_64);
}
- sf = extract32(insn, 31, 1);
- op = extract32(insn, 30, 1);
- is_imm = extract32(insn, 11, 1);
- y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
- cond = extract32(insn, 12, 4);
- rn = extract32(insn, 5, 5);
- nzcv = extract32(insn, 0, 4);
+ return true;
+}
- /* Set T0 = !COND. */
- tcg_t0 = tcg_temp_new_i32();
- arm_test_cc(&c, cond);
- tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
+/*
+ * Advanced SIMD Shift by Immediate
+ */
- /* Load the arguments for the new comparison. */
- if (is_imm) {
- tcg_y = tcg_temp_new_i64();
- tcg_gen_movi_i64(tcg_y, y);
- } else {
- tcg_y = cpu_reg(s, y);
+static bool do_vec_shift_imm(DisasContext *s, arg_qrri_e *a, GVecGen2iFn *fn)
+{
+ if (fp_access_check(s)) {
+ gen_gvec_fn2i(s, a->q, a->rd, a->rn, a->imm, fn, a->esz);
}
- tcg_rn = cpu_reg(s, rn);
+ return true;
+}
- /* Set the flags for the new comparison. */
- tcg_tmp = tcg_temp_new_i64();
- if (op) {
- gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
- } else {
- gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
+TRANS(SSHR_v, do_vec_shift_imm, a, gen_gvec_sshr)
+TRANS(USHR_v, do_vec_shift_imm, a, gen_gvec_ushr)
+TRANS(SSRA_v, do_vec_shift_imm, a, gen_gvec_ssra)
+TRANS(USRA_v, do_vec_shift_imm, a, gen_gvec_usra)
+TRANS(SRSHR_v, do_vec_shift_imm, a, gen_gvec_srshr)
+TRANS(URSHR_v, do_vec_shift_imm, a, gen_gvec_urshr)
+TRANS(SRSRA_v, do_vec_shift_imm, a, gen_gvec_srsra)
+TRANS(URSRA_v, do_vec_shift_imm, a, gen_gvec_ursra)
+TRANS(SRI_v, do_vec_shift_imm, a, gen_gvec_sri)
+TRANS(SHL_v, do_vec_shift_imm, a, tcg_gen_gvec_shli)
+TRANS(SLI_v, do_vec_shift_imm, a, gen_gvec_sli);
+TRANS(SQSHL_vi, do_vec_shift_imm, a, gen_neon_sqshli)
+TRANS(UQSHL_vi, do_vec_shift_imm, a, gen_neon_uqshli)
+TRANS(SQSHLU_vi, do_vec_shift_imm, a, gen_neon_sqshlui)
+
+static bool do_vec_shift_imm_wide(DisasContext *s, arg_qrri_e *a, bool is_u)
+{
+ TCGv_i64 tcg_rn, tcg_rd;
+ int esz = a->esz;
+ int esize;
+
+ if (!fp_access_check(s)) {
+ return true;
}
- /* If COND was false, force the flags to #nzcv. Compute two masks
- * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
- * For tcg hosts that support ANDC, we can make do with just T1.
- * In either case, allow the tcg optimizer to delete any unused mask.
+ /*
+ * For the LL variants the store is larger than the load,
+ * so if rd == rn we would overwrite parts of our input.
+ * So load everything right now and use shifts in the main loop.
*/
- tcg_t1 = tcg_temp_new_i32();
- tcg_t2 = tcg_temp_new_i32();
- tcg_gen_neg_i32(tcg_t1, tcg_t0);
- tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
+ tcg_rd = tcg_temp_new_i64();
+ tcg_rn = tcg_temp_new_i64();
+ read_vec_element(s, tcg_rn, a->rn, a->q, MO_64);
- if (nzcv & 8) { /* N */
- tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
- } else {
- if (TCG_TARGET_HAS_andc_i32) {
- tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
- } else {
- tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
- }
- }
- if (nzcv & 4) { /* Z */
- if (TCG_TARGET_HAS_andc_i32) {
- tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
- } else {
- tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
- }
- } else {
- tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
- }
- if (nzcv & 2) { /* C */
- tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
- } else {
- if (TCG_TARGET_HAS_andc_i32) {
- tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
- } else {
- tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
- }
- }
- if (nzcv & 1) { /* V */
- tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
- } else {
- if (TCG_TARGET_HAS_andc_i32) {
- tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
+ esize = 8 << esz;
+ for (int i = 0, elements = 8 >> esz; i < elements; i++) {
+ if (is_u) {
+ tcg_gen_extract_i64(tcg_rd, tcg_rn, i * esize, esize);
} else {
- tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
+ tcg_gen_sextract_i64(tcg_rd, tcg_rn, i * esize, esize);
}
+ tcg_gen_shli_i64(tcg_rd, tcg_rd, a->imm);
+ write_vec_element(s, tcg_rd, a->rd, i, esz + 1);
}
+ clear_vec_high(s, true, a->rd);
+ return true;
}
-/* Conditional select
- * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
- * +----+----+---+-----------------+------+------+-----+------+------+
- * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
- * +----+----+---+-----------------+------+------+-----+------+------+
- */
-static void disas_cond_select(DisasContext *s, uint32_t insn)
+TRANS(SSHLL_v, do_vec_shift_imm_wide, a, false)
+TRANS(USHLL_v, do_vec_shift_imm_wide, a, true)
+
+static void gen_sshr_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
{
- unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
- TCGv_i64 tcg_rd, zero;
- DisasCompare64 c;
+ assert(shift >= 0 && shift <= 64);
+ tcg_gen_sari_i64(dst, src, MIN(shift, 63));
+}
- if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
- /* S == 1 or op2<1> == 1 */
- unallocated_encoding(s);
- return;
+static void gen_ushr_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
+{
+ assert(shift >= 0 && shift <= 64);
+ if (shift == 64) {
+ tcg_gen_movi_i64(dst, 0);
+ } else {
+ tcg_gen_shri_i64(dst, src, shift);
}
- sf = extract32(insn, 31, 1);
- else_inv = extract32(insn, 30, 1);
- rm = extract32(insn, 16, 5);
- cond = extract32(insn, 12, 4);
- else_inc = extract32(insn, 10, 1);
- rn = extract32(insn, 5, 5);
- rd = extract32(insn, 0, 5);
+}
- tcg_rd = cpu_reg(s, rd);
+static void gen_ssra_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
+{
+ gen_sshr_d(src, src, shift);
+ tcg_gen_add_i64(dst, dst, src);
+}
- a64_test_cc(&c, cond);
- zero = tcg_constant_i64(0);
+static void gen_usra_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
+{
+ gen_ushr_d(src, src, shift);
+ tcg_gen_add_i64(dst, dst, src);
+}
- if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
- /* CSET & CSETM. */
- if (else_inv) {
- tcg_gen_negsetcond_i64(tcg_invert_cond(c.cond),
- tcg_rd, c.value, zero);
- } else {
- tcg_gen_setcond_i64(tcg_invert_cond(c.cond),
- tcg_rd, c.value, zero);
- }
+static void gen_srshr_bhs(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
+{
+ assert(shift >= 0 && shift <= 32);
+ if (shift) {
+ TCGv_i64 rnd = tcg_constant_i64(1ull << (shift - 1));
+ tcg_gen_add_i64(dst, src, rnd);
+ tcg_gen_sari_i64(dst, dst, shift);
} else {
- TCGv_i64 t_true = cpu_reg(s, rn);
- TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
- if (else_inv && else_inc) {
- tcg_gen_neg_i64(t_false, t_false);
- } else if (else_inv) {
- tcg_gen_not_i64(t_false, t_false);
- } else if (else_inc) {
- tcg_gen_addi_i64(t_false, t_false, 1);
- }
- tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
- }
-
- if (!sf) {
- tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ tcg_gen_mov_i64(dst, src);
}
}
-static void handle_clz(DisasContext *s, unsigned int sf,
- unsigned int rn, unsigned int rd)
+static void gen_urshr_bhs(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
{
- TCGv_i64 tcg_rd, tcg_rn;
- tcg_rd = cpu_reg(s, rd);
- tcg_rn = cpu_reg(s, rn);
-
- if (sf) {
- tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
+ assert(shift >= 0 && shift <= 32);
+ if (shift) {
+ TCGv_i64 rnd = tcg_constant_i64(1ull << (shift - 1));
+ tcg_gen_add_i64(dst, src, rnd);
+ tcg_gen_shri_i64(dst, dst, shift);
} else {
- TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
- tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
- tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
+ tcg_gen_mov_i64(dst, src);
}
}
-static void handle_cls(DisasContext *s, unsigned int sf,
- unsigned int rn, unsigned int rd)
+static void gen_srshr_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
{
- TCGv_i64 tcg_rd, tcg_rn;
- tcg_rd = cpu_reg(s, rd);
- tcg_rn = cpu_reg(s, rn);
-
- if (sf) {
- tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
+ assert(shift >= 0 && shift <= 64);
+ if (shift == 0) {
+ tcg_gen_mov_i64(dst, src);
+ } else if (shift == 64) {
+ /* Extension of sign bit (0,-1) plus sign bit (0,1) is zero. */
+ tcg_gen_movi_i64(dst, 0);
} else {
- TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
- tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
- tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
+ TCGv_i64 rnd = tcg_temp_new_i64();
+ tcg_gen_extract_i64(rnd, src, shift - 1, 1);
+ tcg_gen_sari_i64(dst, src, shift);
+ tcg_gen_add_i64(dst, dst, rnd);
}
}
-static void handle_rbit(DisasContext *s, unsigned int sf,
- unsigned int rn, unsigned int rd)
+static void gen_urshr_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
{
- TCGv_i64 tcg_rd, tcg_rn;
- tcg_rd = cpu_reg(s, rd);
- tcg_rn = cpu_reg(s, rn);
-
- if (sf) {
- gen_helper_rbit64(tcg_rd, tcg_rn);
+ assert(shift >= 0 && shift <= 64);
+ if (shift == 0) {
+ tcg_gen_mov_i64(dst, src);
+ } else if (shift == 64) {
+ /* Rounding will propagate bit 63 into bit 64. */
+ tcg_gen_shri_i64(dst, src, 63);
} else {
- TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
- gen_helper_rbit(tcg_tmp32, tcg_tmp32);
- tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
+ TCGv_i64 rnd = tcg_temp_new_i64();
+ tcg_gen_extract_i64(rnd, src, shift - 1, 1);
+ tcg_gen_shri_i64(dst, src, shift);
+ tcg_gen_add_i64(dst, dst, rnd);
}
}
-/* REV with sf==1, opcode==3 ("REV64") */
-static void handle_rev64(DisasContext *s, unsigned int sf,
- unsigned int rn, unsigned int rd)
+static void gen_srsra_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
{
- if (!sf) {
- unallocated_encoding(s);
- return;
- }
- tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
+ gen_srshr_d(src, src, shift);
+ tcg_gen_add_i64(dst, dst, src);
}
-/* REV with sf==0, opcode==2
- * REV32 (sf==1, opcode==2)
- */
-static void handle_rev32(DisasContext *s, unsigned int sf,
- unsigned int rn, unsigned int rd)
+static void gen_ursra_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
{
- TCGv_i64 tcg_rd = cpu_reg(s, rd);
- TCGv_i64 tcg_rn = cpu_reg(s, rn);
+ gen_urshr_d(src, src, shift);
+ tcg_gen_add_i64(dst, dst, src);
+}
- if (sf) {
- tcg_gen_bswap64_i64(tcg_rd, tcg_rn);
- tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32);
- } else {
- tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ);
+static void gen_sri_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
+{
+ /* If shift is 64, dst is unchanged. */
+ if (shift != 64) {
+ tcg_gen_shri_i64(src, src, shift);
+ tcg_gen_deposit_i64(dst, dst, src, 0, 64 - shift);
}
}
-/* REV16 (opcode==1) */
-static void handle_rev16(DisasContext *s, unsigned int sf,
- unsigned int rn, unsigned int rd)
+static void gen_sli_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
{
- TCGv_i64 tcg_rd = cpu_reg(s, rd);
- TCGv_i64 tcg_tmp = tcg_temp_new_i64();
- TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
- TCGv_i64 mask = tcg_constant_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
-
- tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
- tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
- tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
- tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
- tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
+ tcg_gen_deposit_i64(dst, dst, src, shift, 64 - shift);
}
-/* Data-processing (1 source)
- * 31 30 29 28 21 20 16 15 10 9 5 4 0
- * +----+---+---+-----------------+---------+--------+------+------+
- * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
- * +----+---+---+-----------------+---------+--------+------+------+
- */
-static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
+static bool do_vec_shift_imm_narrow(DisasContext *s, arg_qrri_e *a,
+ WideShiftImmFn * const fns[3], MemOp sign)
{
- unsigned int sf, opcode, opcode2, rn, rd;
- TCGv_i64 tcg_rd;
+ TCGv_i64 tcg_rn, tcg_rd;
+ int esz = a->esz;
+ int esize;
+ WideShiftImmFn *fn;
- if (extract32(insn, 29, 1)) {
- unallocated_encoding(s);
- return;
- }
+ tcg_debug_assert(esz >= MO_8 && esz <= MO_32);
- sf = extract32(insn, 31, 1);
- opcode = extract32(insn, 10, 6);
- opcode2 = extract32(insn, 16, 5);
- rn = extract32(insn, 5, 5);
- rd = extract32(insn, 0, 5);
+ if (!fp_access_check(s)) {
+ return true;
+ }
-#define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
+ tcg_rn = tcg_temp_new_i64();
+ tcg_rd = tcg_temp_new_i64();
+ tcg_gen_movi_i64(tcg_rd, 0);
- switch (MAP(sf, opcode2, opcode)) {
- case MAP(0, 0x00, 0x00): /* RBIT */
- case MAP(1, 0x00, 0x00):
- handle_rbit(s, sf, rn, rd);
- break;
- case MAP(0, 0x00, 0x01): /* REV16 */
- case MAP(1, 0x00, 0x01):
- handle_rev16(s, sf, rn, rd);
- break;
- case MAP(0, 0x00, 0x02): /* REV/REV32 */
- case MAP(1, 0x00, 0x02):
- handle_rev32(s, sf, rn, rd);
- break;
- case MAP(1, 0x00, 0x03): /* REV64 */
- handle_rev64(s, sf, rn, rd);
- break;
- case MAP(0, 0x00, 0x04): /* CLZ */
- case MAP(1, 0x00, 0x04):
- handle_clz(s, sf, rn, rd);
- break;
- case MAP(0, 0x00, 0x05): /* CLS */
- case MAP(1, 0x00, 0x05):
- handle_cls(s, sf, rn, rd);
- break;
- case MAP(1, 0x01, 0x00): /* PACIA */
- if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_pacia(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
- } else if (!dc_isar_feature(aa64_pauth, s)) {
- goto do_unallocated;
- }
- break;
- case MAP(1, 0x01, 0x01): /* PACIB */
- if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_pacib(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
- } else if (!dc_isar_feature(aa64_pauth, s)) {
- goto do_unallocated;
- }
- break;
- case MAP(1, 0x01, 0x02): /* PACDA */
- if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_pacda(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
- } else if (!dc_isar_feature(aa64_pauth, s)) {
- goto do_unallocated;
- }
- break;
- case MAP(1, 0x01, 0x03): /* PACDB */
- if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_pacdb(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
- } else if (!dc_isar_feature(aa64_pauth, s)) {
- goto do_unallocated;
- }
- break;
- case MAP(1, 0x01, 0x04): /* AUTIA */
- if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_autia(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
- } else if (!dc_isar_feature(aa64_pauth, s)) {
- goto do_unallocated;
- }
- break;
- case MAP(1, 0x01, 0x05): /* AUTIB */
- if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_autib(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
- } else if (!dc_isar_feature(aa64_pauth, s)) {
- goto do_unallocated;
- }
- break;
- case MAP(1, 0x01, 0x06): /* AUTDA */
- if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_autda(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
- } else if (!dc_isar_feature(aa64_pauth, s)) {
- goto do_unallocated;
- }
- break;
- case MAP(1, 0x01, 0x07): /* AUTDB */
- if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_autdb(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
- } else if (!dc_isar_feature(aa64_pauth, s)) {
- goto do_unallocated;
- }
- break;
- case MAP(1, 0x01, 0x08): /* PACIZA */
- if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
- goto do_unallocated;
- } else if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_pacia(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
- }
- break;
- case MAP(1, 0x01, 0x09): /* PACIZB */
- if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
- goto do_unallocated;
- } else if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_pacib(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
- }
- break;
- case MAP(1, 0x01, 0x0a): /* PACDZA */
- if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
- goto do_unallocated;
- } else if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_pacda(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
- }
- break;
- case MAP(1, 0x01, 0x0b): /* PACDZB */
- if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
- goto do_unallocated;
- } else if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_pacdb(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
- }
- break;
- case MAP(1, 0x01, 0x0c): /* AUTIZA */
- if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
- goto do_unallocated;
- } else if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_autia(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
- }
- break;
- case MAP(1, 0x01, 0x0d): /* AUTIZB */
- if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
- goto do_unallocated;
- } else if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_autib(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
- }
- break;
- case MAP(1, 0x01, 0x0e): /* AUTDZA */
- if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
- goto do_unallocated;
- } else if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_autda(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
- }
- break;
- case MAP(1, 0x01, 0x0f): /* AUTDZB */
- if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
- goto do_unallocated;
- } else if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_autdb(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
- }
- break;
- case MAP(1, 0x01, 0x10): /* XPACI */
- if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
- goto do_unallocated;
- } else if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_xpaci(tcg_rd, tcg_env, tcg_rd);
- }
- break;
- case MAP(1, 0x01, 0x11): /* XPACD */
- if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
- goto do_unallocated;
- } else if (s->pauth_active) {
- tcg_rd = cpu_reg(s, rd);
- gen_helper_xpacd(tcg_rd, tcg_env, tcg_rd);
- }
- break;
- default:
- do_unallocated:
- unallocated_encoding(s);
- break;
+ fn = fns[esz];
+ esize = 8 << esz;
+ for (int i = 0, elements = 8 >> esz; i < elements; i++) {
+ read_vec_element(s, tcg_rn, a->rn, i, (esz + 1) | sign);
+ fn(tcg_rn, tcg_rn, a->imm);
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, esize * i, esize);
}
-#undef MAP
+ write_vec_element(s, tcg_rd, a->rd, a->q, MO_64);
+ clear_vec_high(s, a->q, a->rd);
+ return true;
}
-static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
- unsigned int rm, unsigned int rn, unsigned int rd)
+static void gen_sqshrn_b(TCGv_i64 d, TCGv_i64 s, int64_t i)
{
- TCGv_i64 tcg_n, tcg_m, tcg_rd;
- tcg_rd = cpu_reg(s, rd);
-
- if (!sf && is_signed) {
- tcg_n = tcg_temp_new_i64();
- tcg_m = tcg_temp_new_i64();
- tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
- tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
- } else {
- tcg_n = read_cpu_reg(s, rn, sf);
- tcg_m = read_cpu_reg(s, rm, sf);
- }
-
- if (is_signed) {
- gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
- } else {
- gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
- }
+ tcg_gen_sari_i64(d, s, i);
+ tcg_gen_ext16u_i64(d, d);
+ gen_helper_neon_narrow_sat_s8(d, tcg_env, d);
+}
- if (!sf) { /* zero extend final result */
- tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
- }
+static void gen_sqshrn_h(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ tcg_gen_sari_i64(d, s, i);
+ tcg_gen_ext32u_i64(d, d);
+ gen_helper_neon_narrow_sat_s16(d, tcg_env, d);
}
-/* LSLV, LSRV, ASRV, RORV */
-static void handle_shift_reg(DisasContext *s,
- enum a64_shift_type shift_type, unsigned int sf,
- unsigned int rm, unsigned int rn, unsigned int rd)
+static void gen_sqshrn_s(TCGv_i64 d, TCGv_i64 s, int64_t i)
{
- TCGv_i64 tcg_shift = tcg_temp_new_i64();
- TCGv_i64 tcg_rd = cpu_reg(s, rd);
- TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
+ gen_sshr_d(d, s, i);
+ gen_helper_neon_narrow_sat_s32(d, tcg_env, d);
+}
- tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
- shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
+static void gen_uqshrn_b(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ tcg_gen_shri_i64(d, s, i);
+ gen_helper_neon_narrow_sat_u8(d, tcg_env, d);
}
-/* CRC32[BHWX], CRC32C[BHWX] */
-static void handle_crc32(DisasContext *s,
- unsigned int sf, unsigned int sz, bool crc32c,
- unsigned int rm, unsigned int rn, unsigned int rd)
+static void gen_uqshrn_h(TCGv_i64 d, TCGv_i64 s, int64_t i)
{
- TCGv_i64 tcg_acc, tcg_val;
- TCGv_i32 tcg_bytes;
+ tcg_gen_shri_i64(d, s, i);
+ gen_helper_neon_narrow_sat_u16(d, tcg_env, d);
+}
- if (!dc_isar_feature(aa64_crc32, s)
- || (sf == 1 && sz != 3)
- || (sf == 0 && sz == 3)) {
- unallocated_encoding(s);
- return;
- }
+static void gen_uqshrn_s(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ gen_ushr_d(d, s, i);
+ gen_helper_neon_narrow_sat_u32(d, tcg_env, d);
+}
- if (sz == 3) {
- tcg_val = cpu_reg(s, rm);
- } else {
- uint64_t mask;
- switch (sz) {
- case 0:
- mask = 0xFF;
- break;
- case 1:
- mask = 0xFFFF;
- break;
- case 2:
- mask = 0xFFFFFFFF;
- break;
- default:
- g_assert_not_reached();
- }
- tcg_val = tcg_temp_new_i64();
- tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
- }
+static void gen_sqshrun_b(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ tcg_gen_sari_i64(d, s, i);
+ tcg_gen_ext16u_i64(d, d);
+ gen_helper_neon_unarrow_sat8(d, tcg_env, d);
+}
- tcg_acc = cpu_reg(s, rn);
- tcg_bytes = tcg_constant_i32(1 << sz);
+static void gen_sqshrun_h(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ tcg_gen_sari_i64(d, s, i);
+ tcg_gen_ext32u_i64(d, d);
+ gen_helper_neon_unarrow_sat16(d, tcg_env, d);
+}
- if (crc32c) {
- gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
- } else {
- gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
- }
+static void gen_sqshrun_s(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ gen_sshr_d(d, s, i);
+ gen_helper_neon_unarrow_sat32(d, tcg_env, d);
}
-/* Data-processing (2 source)
- * 31 30 29 28 21 20 16 15 10 9 5 4 0
- * +----+---+---+-----------------+------+--------+------+------+
- * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
- * +----+---+---+-----------------+------+--------+------+------+
- */
-static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
+static void gen_sqrshrn_b(TCGv_i64 d, TCGv_i64 s, int64_t i)
{
- unsigned int sf, rm, opcode, rn, rd, setflag;
- sf = extract32(insn, 31, 1);
- setflag = extract32(insn, 29, 1);
- rm = extract32(insn, 16, 5);
- opcode = extract32(insn, 10, 6);
- rn = extract32(insn, 5, 5);
- rd = extract32(insn, 0, 5);
+ gen_srshr_bhs(d, s, i);
+ tcg_gen_ext16u_i64(d, d);
+ gen_helper_neon_narrow_sat_s8(d, tcg_env, d);
+}
- if (setflag && opcode != 0) {
- unallocated_encoding(s);
- return;
- }
+static void gen_sqrshrn_h(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ gen_srshr_bhs(d, s, i);
+ tcg_gen_ext32u_i64(d, d);
+ gen_helper_neon_narrow_sat_s16(d, tcg_env, d);
+}
- switch (opcode) {
- case 0: /* SUBP(S) */
- if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
- goto do_unallocated;
- } else {
- TCGv_i64 tcg_n, tcg_m, tcg_d;
+static void gen_sqrshrn_s(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ gen_srshr_d(d, s, i);
+ gen_helper_neon_narrow_sat_s32(d, tcg_env, d);
+}
- tcg_n = read_cpu_reg_sp(s, rn, true);
- tcg_m = read_cpu_reg_sp(s, rm, true);
- tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56);
- tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56);
- tcg_d = cpu_reg(s, rd);
+static void gen_uqrshrn_b(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ gen_urshr_bhs(d, s, i);
+ gen_helper_neon_narrow_sat_u8(d, tcg_env, d);
+}
- if (setflag) {
- gen_sub_CC(true, tcg_d, tcg_n, tcg_m);
- } else {
- tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m);
- }
- }
- break;
- case 2: /* UDIV */
- handle_div(s, false, sf, rm, rn, rd);
- break;
- case 3: /* SDIV */
- handle_div(s, true, sf, rm, rn, rd);
- break;
- case 4: /* IRG */
- if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
- goto do_unallocated;
- }
- if (s->ata[0]) {
- gen_helper_irg(cpu_reg_sp(s, rd), tcg_env,
- cpu_reg_sp(s, rn), cpu_reg(s, rm));
- } else {
- gen_address_with_allocation_tag0(cpu_reg_sp(s, rd),
- cpu_reg_sp(s, rn));
- }
- break;
- case 5: /* GMI */
- if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
- goto do_unallocated;
- } else {
- TCGv_i64 t = tcg_temp_new_i64();
+static void gen_uqrshrn_h(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ gen_urshr_bhs(d, s, i);
+ gen_helper_neon_narrow_sat_u16(d, tcg_env, d);
+}
- tcg_gen_extract_i64(t, cpu_reg_sp(s, rn), 56, 4);
- tcg_gen_shl_i64(t, tcg_constant_i64(1), t);
- tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t);
- }
- break;
- case 8: /* LSLV */
- handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
- break;
- case 9: /* LSRV */
- handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
- break;
- case 10: /* ASRV */
- handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
- break;
- case 11: /* RORV */
- handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
- break;
- case 12: /* PACGA */
- if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
- goto do_unallocated;
- }
- gen_helper_pacga(cpu_reg(s, rd), tcg_env,
- cpu_reg(s, rn), cpu_reg_sp(s, rm));
- break;
- case 16:
- case 17:
- case 18:
- case 19:
- case 20:
- case 21:
- case 22:
- case 23: /* CRC32 */
- {
- int sz = extract32(opcode, 0, 2);
- bool crc32c = extract32(opcode, 2, 1);
- handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
- break;
- }
- default:
- do_unallocated:
- unallocated_encoding(s);
- break;
- }
+static void gen_uqrshrn_s(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ gen_urshr_d(d, s, i);
+ gen_helper_neon_narrow_sat_u32(d, tcg_env, d);
}
-/*
- * Data processing - register
- * 31 30 29 28 25 21 20 16 10 0
- * +--+---+--+---+-------+-----+-------+-------+---------+
- * | |op0| |op1| 1 0 1 | op2 | | op3 | |
- * +--+---+--+---+-------+-----+-------+-------+---------+
- */
-static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
-{
- int op0 = extract32(insn, 30, 1);
- int op1 = extract32(insn, 28, 1);
- int op2 = extract32(insn, 21, 4);
- int op3 = extract32(insn, 10, 6);
-
- if (!op1) {
- if (op2 & 8) {
- if (op2 & 1) {
- /* Add/sub (extended register) */
- disas_add_sub_ext_reg(s, insn);
- } else {
- /* Add/sub (shifted register) */
- disas_add_sub_reg(s, insn);
- }
- } else {
- /* Logical (shifted register) */
- disas_logic_reg(s, insn);
- }
- return;
- }
+static void gen_sqrshrun_b(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ gen_srshr_bhs(d, s, i);
+ tcg_gen_ext16u_i64(d, d);
+ gen_helper_neon_unarrow_sat8(d, tcg_env, d);
+}
- switch (op2) {
- case 0x0:
- switch (op3) {
- case 0x00: /* Add/subtract (with carry) */
- disas_adc_sbc(s, insn);
- break;
+static void gen_sqrshrun_h(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ gen_srshr_bhs(d, s, i);
+ tcg_gen_ext32u_i64(d, d);
+ gen_helper_neon_unarrow_sat16(d, tcg_env, d);
+}
- case 0x01: /* Rotate right into flags */
- case 0x21:
- disas_rotate_right_into_flags(s, insn);
- break;
+static void gen_sqrshrun_s(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ gen_srshr_d(d, s, i);
+ gen_helper_neon_unarrow_sat32(d, tcg_env, d);
+}
- case 0x02: /* Evaluate into flags */
- case 0x12:
- case 0x22:
- case 0x32:
- disas_evaluate_into_flags(s, insn);
- break;
+static WideShiftImmFn * const shrn_fns[] = {
+ tcg_gen_shri_i64,
+ tcg_gen_shri_i64,
+ gen_ushr_d,
+};
+TRANS(SHRN_v, do_vec_shift_imm_narrow, a, shrn_fns, 0)
- default:
- goto do_unallocated;
- }
- break;
+static WideShiftImmFn * const rshrn_fns[] = {
+ gen_urshr_bhs,
+ gen_urshr_bhs,
+ gen_urshr_d,
+};
+TRANS(RSHRN_v, do_vec_shift_imm_narrow, a, rshrn_fns, 0)
- case 0x2: /* Conditional compare */
- disas_cc(s, insn); /* both imm and reg forms */
- break;
+static WideShiftImmFn * const sqshrn_fns[] = {
+ gen_sqshrn_b,
+ gen_sqshrn_h,
+ gen_sqshrn_s,
+};
+TRANS(SQSHRN_v, do_vec_shift_imm_narrow, a, sqshrn_fns, MO_SIGN)
- case 0x4: /* Conditional select */
- disas_cond_select(s, insn);
- break;
+static WideShiftImmFn * const uqshrn_fns[] = {
+ gen_uqshrn_b,
+ gen_uqshrn_h,
+ gen_uqshrn_s,
+};
+TRANS(UQSHRN_v, do_vec_shift_imm_narrow, a, uqshrn_fns, 0)
- case 0x6: /* Data-processing */
- if (op0) { /* (1 source) */
- disas_data_proc_1src(s, insn);
- } else { /* (2 source) */
- disas_data_proc_2src(s, insn);
- }
- break;
- case 0x8 ... 0xf: /* (3 source) */
- disas_data_proc_3src(s, insn);
- break;
+static WideShiftImmFn * const sqshrun_fns[] = {
+ gen_sqshrun_b,
+ gen_sqshrun_h,
+ gen_sqshrun_s,
+};
+TRANS(SQSHRUN_v, do_vec_shift_imm_narrow, a, sqshrun_fns, MO_SIGN)
- default:
- do_unallocated:
- unallocated_encoding(s);
- break;
- }
-}
+static WideShiftImmFn * const sqrshrn_fns[] = {
+ gen_sqrshrn_b,
+ gen_sqrshrn_h,
+ gen_sqrshrn_s,
+};
+TRANS(SQRSHRN_v, do_vec_shift_imm_narrow, a, sqrshrn_fns, MO_SIGN)
-static void handle_fp_compare(DisasContext *s, int size,
- unsigned int rn, unsigned int rm,
- bool cmp_with_zero, bool signal_all_nans)
-{
- TCGv_i64 tcg_flags = tcg_temp_new_i64();
- TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+static WideShiftImmFn * const uqrshrn_fns[] = {
+ gen_uqrshrn_b,
+ gen_uqrshrn_h,
+ gen_uqrshrn_s,
+};
+TRANS(UQRSHRN_v, do_vec_shift_imm_narrow, a, uqrshrn_fns, 0)
- if (size == MO_64) {
- TCGv_i64 tcg_vn, tcg_vm;
+static WideShiftImmFn * const sqrshrun_fns[] = {
+ gen_sqrshrun_b,
+ gen_sqrshrun_h,
+ gen_sqrshrun_s,
+};
+TRANS(SQRSHRUN_v, do_vec_shift_imm_narrow, a, sqrshrun_fns, MO_SIGN)
- tcg_vn = read_fp_dreg(s, rn);
- if (cmp_with_zero) {
- tcg_vm = tcg_constant_i64(0);
- } else {
- tcg_vm = read_fp_dreg(s, rm);
- }
- if (signal_all_nans) {
- gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
- } else {
- gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
- }
- } else {
- TCGv_i32 tcg_vn = tcg_temp_new_i32();
- TCGv_i32 tcg_vm = tcg_temp_new_i32();
+/*
+ * Advanced SIMD Scalar Shift by Immediate
+ */
- read_vec_element_i32(s, tcg_vn, rn, 0, size);
- if (cmp_with_zero) {
- tcg_gen_movi_i32(tcg_vm, 0);
- } else {
- read_vec_element_i32(s, tcg_vm, rm, 0, size);
- }
+static bool do_scalar_shift_imm(DisasContext *s, arg_rri_e *a,
+ WideShiftImmFn *fn, bool accumulate,
+ MemOp sign)
+{
+ if (fp_access_check(s)) {
+ TCGv_i64 rd = tcg_temp_new_i64();
+ TCGv_i64 rn = tcg_temp_new_i64();
- switch (size) {
- case MO_32:
- if (signal_all_nans) {
- gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
- } else {
- gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
- }
- break;
- case MO_16:
- if (signal_all_nans) {
- gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
- } else {
- gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
- }
- break;
- default:
- g_assert_not_reached();
+ read_vec_element(s, rn, a->rn, 0, a->esz | sign);
+ if (accumulate) {
+ read_vec_element(s, rd, a->rd, 0, a->esz | sign);
}
+ fn(rd, rn, a->imm);
+ write_fp_dreg(s, a->rd, rd);
}
-
- gen_set_nzcv(tcg_flags);
+ return true;
}
-/* Floating point compare
- * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
- * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
- * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
- * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
- */
-static void disas_fp_compare(DisasContext *s, uint32_t insn)
-{
- unsigned int mos, type, rm, op, rn, opc, op2r;
- int size;
+TRANS(SSHR_s, do_scalar_shift_imm, a, gen_sshr_d, false, 0)
+TRANS(USHR_s, do_scalar_shift_imm, a, gen_ushr_d, false, 0)
+TRANS(SSRA_s, do_scalar_shift_imm, a, gen_ssra_d, true, 0)
+TRANS(USRA_s, do_scalar_shift_imm, a, gen_usra_d, true, 0)
+TRANS(SRSHR_s, do_scalar_shift_imm, a, gen_srshr_d, false, 0)
+TRANS(URSHR_s, do_scalar_shift_imm, a, gen_urshr_d, false, 0)
+TRANS(SRSRA_s, do_scalar_shift_imm, a, gen_srsra_d, true, 0)
+TRANS(URSRA_s, do_scalar_shift_imm, a, gen_ursra_d, true, 0)
+TRANS(SRI_s, do_scalar_shift_imm, a, gen_sri_d, true, 0)
- mos = extract32(insn, 29, 3);
- type = extract32(insn, 22, 2);
- rm = extract32(insn, 16, 5);
- op = extract32(insn, 14, 2);
- rn = extract32(insn, 5, 5);
- opc = extract32(insn, 3, 2);
- op2r = extract32(insn, 0, 3);
+TRANS(SHL_s, do_scalar_shift_imm, a, tcg_gen_shli_i64, false, 0)
+TRANS(SLI_s, do_scalar_shift_imm, a, gen_sli_d, true, 0)
- if (mos || op || op2r) {
- unallocated_encoding(s);
- return;
- }
+static void trunc_i64_env_imm(TCGv_i64 d, TCGv_i64 s, int64_t i,
+ NeonGenTwoOpEnvFn *fn)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t, s);
+ fn(t, tcg_env, t, tcg_constant_i32(i));
+ tcg_gen_extu_i32_i64(d, t);
+}
- switch (type) {
- case 0:
- size = MO_32;
- break;
- case 1:
- size = MO_64;
- break;
- case 3:
- size = MO_16;
- if (dc_isar_feature(aa64_fp16, s)) {
- break;
- }
- /* fallthru */
- default:
- unallocated_encoding(s);
- return;
- }
+static void gen_sqshli_b(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ trunc_i64_env_imm(d, s, i, gen_helper_neon_qshl_s8);
+}
- if (!fp_access_check(s)) {
- return;
- }
+static void gen_sqshli_h(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ trunc_i64_env_imm(d, s, i, gen_helper_neon_qshl_s16);
+}
- handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
+static void gen_sqshli_s(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ trunc_i64_env_imm(d, s, i, gen_helper_neon_qshl_s32);
}
-/* Floating point conditional compare
- * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
- * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
- * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
- * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
- */
-static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
+static void gen_sqshli_d(TCGv_i64 d, TCGv_i64 s, int64_t i)
{
- unsigned int mos, type, rm, cond, rn, op, nzcv;
- TCGLabel *label_continue = NULL;
- int size;
+ gen_helper_neon_qshl_s64(d, tcg_env, s, tcg_constant_i64(i));
+}
- mos = extract32(insn, 29, 3);
- type = extract32(insn, 22, 2);
- rm = extract32(insn, 16, 5);
- cond = extract32(insn, 12, 4);
- rn = extract32(insn, 5, 5);
- op = extract32(insn, 4, 1);
- nzcv = extract32(insn, 0, 4);
+static void gen_uqshli_b(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ trunc_i64_env_imm(d, s, i, gen_helper_neon_qshl_u8);
+}
- if (mos) {
- unallocated_encoding(s);
- return;
- }
+static void gen_uqshli_h(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ trunc_i64_env_imm(d, s, i, gen_helper_neon_qshl_u16);
+}
- switch (type) {
- case 0:
- size = MO_32;
- break;
- case 1:
- size = MO_64;
- break;
- case 3:
- size = MO_16;
- if (dc_isar_feature(aa64_fp16, s)) {
- break;
- }
- /* fallthru */
- default:
- unallocated_encoding(s);
- return;
- }
+static void gen_uqshli_s(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ trunc_i64_env_imm(d, s, i, gen_helper_neon_qshl_u32);
+}
- if (!fp_access_check(s)) {
- return;
- }
+static void gen_uqshli_d(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ gen_helper_neon_qshl_u64(d, tcg_env, s, tcg_constant_i64(i));
+}
- if (cond < 0x0e) { /* not always */
- TCGLabel *label_match = gen_new_label();
- label_continue = gen_new_label();
- arm_gen_test_cc(cond, label_match);
- /* nomatch: */
- gen_set_nzcv(tcg_constant_i64(nzcv << 28));
- tcg_gen_br(label_continue);
- gen_set_label(label_match);
- }
+static void gen_sqshlui_b(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ trunc_i64_env_imm(d, s, i, gen_helper_neon_qshlu_s8);
+}
- handle_fp_compare(s, size, rn, rm, false, op);
+static void gen_sqshlui_h(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ trunc_i64_env_imm(d, s, i, gen_helper_neon_qshlu_s16);
+}
- if (cond < 0x0e) {
- gen_set_label(label_continue);
- }
+static void gen_sqshlui_s(TCGv_i64 d, TCGv_i64 s, int64_t i)
+{
+ trunc_i64_env_imm(d, s, i, gen_helper_neon_qshlu_s32);
}
-/* Floating-point data-processing (1 source) - half precision */
-static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
+static void gen_sqshlui_d(TCGv_i64 d, TCGv_i64 s, int64_t i)
{
- TCGv_ptr fpst = NULL;
- TCGv_i32 tcg_op = read_fp_hreg(s, rn);
- TCGv_i32 tcg_res = tcg_temp_new_i32();
+ gen_helper_neon_qshlu_s64(d, tcg_env, s, tcg_constant_i64(i));
+}
- switch (opcode) {
- case 0x0: /* FMOV */
- tcg_gen_mov_i32(tcg_res, tcg_op);
- break;
- case 0x1: /* FABS */
- gen_vfp_absh(tcg_res, tcg_op);
- break;
- case 0x2: /* FNEG */
- gen_vfp_negh(tcg_res, tcg_op);
- break;
- case 0x3: /* FSQRT */
- fpst = fpstatus_ptr(FPST_FPCR_F16);
- gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
- break;
- case 0x8: /* FRINTN */
- case 0x9: /* FRINTP */
- case 0xa: /* FRINTM */
- case 0xb: /* FRINTZ */
- case 0xc: /* FRINTA */
- {
- TCGv_i32 tcg_rmode;
+static WideShiftImmFn * const f_scalar_sqshli[] = {
+ gen_sqshli_b, gen_sqshli_h, gen_sqshli_s, gen_sqshli_d
+};
- fpst = fpstatus_ptr(FPST_FPCR_F16);
- tcg_rmode = gen_set_rmode(opcode & 7, fpst);
- gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
- gen_restore_rmode(tcg_rmode, fpst);
- break;
- }
- case 0xe: /* FRINTX */
- fpst = fpstatus_ptr(FPST_FPCR_F16);
- gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
- break;
- case 0xf: /* FRINTI */
- fpst = fpstatus_ptr(FPST_FPCR_F16);
- gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
- break;
- default:
- g_assert_not_reached();
- }
+static WideShiftImmFn * const f_scalar_uqshli[] = {
+ gen_uqshli_b, gen_uqshli_h, gen_uqshli_s, gen_uqshli_d
+};
- write_fp_sreg(s, rd, tcg_res);
-}
+static WideShiftImmFn * const f_scalar_sqshlui[] = {
+ gen_sqshlui_b, gen_sqshlui_h, gen_sqshlui_s, gen_sqshlui_d
+};
-/* Floating-point data-processing (1 source) - single precision */
-static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
-{
- void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
- TCGv_i32 tcg_op, tcg_res;
- TCGv_ptr fpst;
- int rmode = -1;
-
- tcg_op = read_fp_sreg(s, rn);
- tcg_res = tcg_temp_new_i32();
-
- switch (opcode) {
- case 0x0: /* FMOV */
- tcg_gen_mov_i32(tcg_res, tcg_op);
- goto done;
- case 0x1: /* FABS */
- gen_vfp_abss(tcg_res, tcg_op);
- goto done;
- case 0x2: /* FNEG */
- gen_vfp_negs(tcg_res, tcg_op);
- goto done;
- case 0x3: /* FSQRT */
- gen_helper_vfp_sqrts(tcg_res, tcg_op, tcg_env);
- goto done;
- case 0x6: /* BFCVT */
- gen_fpst = gen_helper_bfcvt;
- break;
- case 0x8: /* FRINTN */
- case 0x9: /* FRINTP */
- case 0xa: /* FRINTM */
- case 0xb: /* FRINTZ */
- case 0xc: /* FRINTA */
- rmode = opcode & 7;
- gen_fpst = gen_helper_rints;
- break;
- case 0xe: /* FRINTX */
- gen_fpst = gen_helper_rints_exact;
- break;
- case 0xf: /* FRINTI */
- gen_fpst = gen_helper_rints;
- break;
- case 0x10: /* FRINT32Z */
- rmode = FPROUNDING_ZERO;
- gen_fpst = gen_helper_frint32_s;
- break;
- case 0x11: /* FRINT32X */
- gen_fpst = gen_helper_frint32_s;
- break;
- case 0x12: /* FRINT64Z */
- rmode = FPROUNDING_ZERO;
- gen_fpst = gen_helper_frint64_s;
- break;
- case 0x13: /* FRINT64X */
- gen_fpst = gen_helper_frint64_s;
- break;
- default:
- g_assert_not_reached();
- }
+/* Note that the helpers sign-extend their inputs, so don't do it here. */
+TRANS(SQSHL_si, do_scalar_shift_imm, a, f_scalar_sqshli[a->esz], false, 0)
+TRANS(UQSHL_si, do_scalar_shift_imm, a, f_scalar_uqshli[a->esz], false, 0)
+TRANS(SQSHLU_si, do_scalar_shift_imm, a, f_scalar_sqshlui[a->esz], false, 0)
- fpst = fpstatus_ptr(FPST_FPCR);
- if (rmode >= 0) {
- TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst);
- gen_fpst(tcg_res, tcg_op, fpst);
- gen_restore_rmode(tcg_rmode, fpst);
- } else {
- gen_fpst(tcg_res, tcg_op, fpst);
- }
+static bool do_scalar_shift_imm_narrow(DisasContext *s, arg_rri_e *a,
+ WideShiftImmFn * const fns[3],
+ MemOp sign, bool zext)
+{
+ MemOp esz = a->esz;
- done:
- write_fp_sreg(s, rd, tcg_res);
-}
+ tcg_debug_assert(esz >= MO_8 && esz <= MO_32);
-/* Floating-point data-processing (1 source) - double precision */
-static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
-{
- void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
- TCGv_i64 tcg_op, tcg_res;
- TCGv_ptr fpst;
- int rmode = -1;
+ if (fp_access_check(s)) {
+ TCGv_i64 rd = tcg_temp_new_i64();
+ TCGv_i64 rn = tcg_temp_new_i64();
- switch (opcode) {
- case 0x0: /* FMOV */
- gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
- return;
+ read_vec_element(s, rn, a->rn, 0, (esz + 1) | sign);
+ fns[esz](rd, rn, a->imm);
+ if (zext) {
+ tcg_gen_ext_i64(rd, rd, esz);
+ }
+ write_fp_dreg(s, a->rd, rd);
}
+ return true;
+}
- tcg_op = read_fp_dreg(s, rn);
- tcg_res = tcg_temp_new_i64();
+TRANS(SQSHRN_si, do_scalar_shift_imm_narrow, a, sqshrn_fns, MO_SIGN, true)
+TRANS(SQRSHRN_si, do_scalar_shift_imm_narrow, a, sqrshrn_fns, MO_SIGN, true)
+TRANS(UQSHRN_si, do_scalar_shift_imm_narrow, a, uqshrn_fns, 0, false)
+TRANS(UQRSHRN_si, do_scalar_shift_imm_narrow, a, uqrshrn_fns, 0, false)
+TRANS(SQSHRUN_si, do_scalar_shift_imm_narrow, a, sqshrun_fns, MO_SIGN, false)
+TRANS(SQRSHRUN_si, do_scalar_shift_imm_narrow, a, sqrshrun_fns, MO_SIGN, false)
- switch (opcode) {
- case 0x1: /* FABS */
- gen_vfp_absd(tcg_res, tcg_op);
- goto done;
- case 0x2: /* FNEG */
- gen_vfp_negd(tcg_res, tcg_op);
- goto done;
- case 0x3: /* FSQRT */
- gen_helper_vfp_sqrtd(tcg_res, tcg_op, tcg_env);
- goto done;
- case 0x8: /* FRINTN */
- case 0x9: /* FRINTP */
- case 0xa: /* FRINTM */
- case 0xb: /* FRINTZ */
- case 0xc: /* FRINTA */
- rmode = opcode & 7;
- gen_fpst = gen_helper_rintd;
- break;
- case 0xe: /* FRINTX */
- gen_fpst = gen_helper_rintd_exact;
- break;
- case 0xf: /* FRINTI */
- gen_fpst = gen_helper_rintd;
- break;
- case 0x10: /* FRINT32Z */
- rmode = FPROUNDING_ZERO;
- gen_fpst = gen_helper_frint32_d;
- break;
- case 0x11: /* FRINT32X */
- gen_fpst = gen_helper_frint32_d;
- break;
- case 0x12: /* FRINT64Z */
- rmode = FPROUNDING_ZERO;
- gen_fpst = gen_helper_frint64_d;
- break;
- case 0x13: /* FRINT64X */
- gen_fpst = gen_helper_frint64_d;
- break;
- default:
- g_assert_not_reached();
+static bool do_div(DisasContext *s, arg_rrr_sf *a, bool is_signed)
+{
+ TCGv_i64 tcg_n, tcg_m, tcg_rd;
+ tcg_rd = cpu_reg(s, a->rd);
+
+ if (!a->sf && is_signed) {
+ tcg_n = tcg_temp_new_i64();
+ tcg_m = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, a->rn));
+ tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, a->rm));
+ } else {
+ tcg_n = read_cpu_reg(s, a->rn, a->sf);
+ tcg_m = read_cpu_reg(s, a->rm, a->sf);
}
- fpst = fpstatus_ptr(FPST_FPCR);
- if (rmode >= 0) {
- TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst);
- gen_fpst(tcg_res, tcg_op, fpst);
- gen_restore_rmode(tcg_rmode, fpst);
+ if (is_signed) {
+ gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
} else {
- gen_fpst(tcg_res, tcg_op, fpst);
+ gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
}
- done:
- write_fp_dreg(s, rd, tcg_res);
+ if (!a->sf) { /* zero extend final result */
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+ return true;
}
-static void handle_fp_fcvt(DisasContext *s, int opcode,
- int rd, int rn, int dtype, int ntype)
-{
- switch (ntype) {
- case 0x0:
- {
- TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
- if (dtype == 1) {
- /* Single to double */
- TCGv_i64 tcg_rd = tcg_temp_new_i64();
- gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, tcg_env);
- write_fp_dreg(s, rd, tcg_rd);
- } else {
- /* Single to half */
- TCGv_i32 tcg_rd = tcg_temp_new_i32();
- TCGv_i32 ahp = get_ahp_flag();
- TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
+TRANS(SDIV, do_div, a, true)
+TRANS(UDIV, do_div, a, false)
- gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
- /* write_fp_sreg is OK here because top half of tcg_rd is zero */
- write_fp_sreg(s, rd, tcg_rd);
- }
+/* Shift a TCGv src by TCGv shift_amount, put result in dst.
+ * Note that it is the caller's responsibility to ensure that the
+ * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
+ * mandated semantics for out of range shifts.
+ */
+static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
+ enum a64_shift_type shift_type, TCGv_i64 shift_amount)
+{
+ switch (shift_type) {
+ case A64_SHIFT_TYPE_LSL:
+ tcg_gen_shl_i64(dst, src, shift_amount);
break;
- }
- case 0x1:
- {
- TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
- TCGv_i32 tcg_rd = tcg_temp_new_i32();
- if (dtype == 0) {
- /* Double to single */
- gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, tcg_env);
- } else {
- TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
- TCGv_i32 ahp = get_ahp_flag();
- /* Double to half */
- gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
- /* write_fp_sreg is OK here because top half of tcg_rd is zero */
+ case A64_SHIFT_TYPE_LSR:
+ tcg_gen_shr_i64(dst, src, shift_amount);
+ break;
+ case A64_SHIFT_TYPE_ASR:
+ if (!sf) {
+ tcg_gen_ext32s_i64(dst, src);
}
- write_fp_sreg(s, rd, tcg_rd);
+ tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
break;
- }
- case 0x3:
- {
- TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
- TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR);
- TCGv_i32 tcg_ahp = get_ahp_flag();
- tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
- if (dtype == 0) {
- /* Half to single */
- TCGv_i32 tcg_rd = tcg_temp_new_i32();
- gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
- write_fp_sreg(s, rd, tcg_rd);
+ case A64_SHIFT_TYPE_ROR:
+ if (sf) {
+ tcg_gen_rotr_i64(dst, src, shift_amount);
} else {
- /* Half to double */
- TCGv_i64 tcg_rd = tcg_temp_new_i64();
- gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
- write_fp_dreg(s, rd, tcg_rd);
+ TCGv_i32 t0, t1;
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t0, src);
+ tcg_gen_extrl_i64_i32(t1, shift_amount);
+ tcg_gen_rotr_i32(t0, t0, t1);
+ tcg_gen_extu_i32_i64(dst, t0);
}
break;
- }
default:
- g_assert_not_reached();
+ assert(FALSE); /* all shift types should be handled */
+ break;
+ }
+
+ if (!sf) { /* zero extend final result */
+ tcg_gen_ext32u_i64(dst, dst);
}
}
-/* Floating point data-processing (1 source)
- * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
- * +---+---+---+-----------+------+---+--------+-----------+------+------+
- * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
- * +---+---+---+-----------+------+---+--------+-----------+------+------+
+/* Shift a TCGv src by immediate, put result in dst.
+ * The shift amount must be in range (this should always be true as the
+ * relevant instructions will UNDEF on bad shift immediates).
*/
-static void disas_fp_1src(DisasContext *s, uint32_t insn)
+static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
+ enum a64_shift_type shift_type, unsigned int shift_i)
{
- int mos = extract32(insn, 29, 3);
- int type = extract32(insn, 22, 2);
- int opcode = extract32(insn, 15, 6);
- int rn = extract32(insn, 5, 5);
- int rd = extract32(insn, 0, 5);
-
- if (mos) {
- goto do_unallocated;
- }
-
- switch (opcode) {
- case 0x4: case 0x5: case 0x7:
- {
- /* FCVT between half, single and double precision */
- int dtype = extract32(opcode, 0, 2);
- if (type == 2 || dtype == type) {
- goto do_unallocated;
- }
- if (!fp_access_check(s)) {
- return;
- }
+ assert(shift_i < (sf ? 64 : 32));
- handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
- break;
+ if (shift_i == 0) {
+ tcg_gen_mov_i64(dst, src);
+ } else {
+ shift_reg(dst, src, sf, shift_type, tcg_constant_i64(shift_i));
}
+}
- case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
- if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
- goto do_unallocated;
- }
- /* fall through */
- case 0x0 ... 0x3:
- case 0x8 ... 0xc:
- case 0xe ... 0xf:
- /* 32-to-32 and 64-to-64 ops */
- switch (type) {
- case 0:
- if (!fp_access_check(s)) {
- return;
- }
- handle_fp_1src_single(s, opcode, rd, rn);
- break;
- case 1:
- if (!fp_access_check(s)) {
- return;
- }
- handle_fp_1src_double(s, opcode, rd, rn);
- break;
- case 3:
- if (!dc_isar_feature(aa64_fp16, s)) {
- goto do_unallocated;
- }
-
- if (!fp_access_check(s)) {
- return;
- }
- handle_fp_1src_half(s, opcode, rd, rn);
- break;
- default:
- goto do_unallocated;
- }
- break;
-
- case 0x6:
- switch (type) {
- case 1: /* BFCVT */
- if (!dc_isar_feature(aa64_bf16, s)) {
- goto do_unallocated;
- }
- if (!fp_access_check(s)) {
- return;
- }
- handle_fp_1src_single(s, opcode, rd, rn);
- break;
- default:
- goto do_unallocated;
- }
- break;
+static bool do_shift_reg(DisasContext *s, arg_rrr_sf *a,
+ enum a64_shift_type shift_type)
+{
+ TCGv_i64 tcg_shift = tcg_temp_new_i64();
+ TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
+ TCGv_i64 tcg_rn = read_cpu_reg(s, a->rn, a->sf);
- default:
- do_unallocated:
- unallocated_encoding(s);
- break;
- }
+ tcg_gen_andi_i64(tcg_shift, cpu_reg(s, a->rm), a->sf ? 63 : 31);
+ shift_reg(tcg_rd, tcg_rn, a->sf, shift_type, tcg_shift);
+ return true;
}
-/* Floating point immediate
- * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
- * +---+---+---+-----------+------+---+------------+-------+------+------+
- * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
- * +---+---+---+-----------+------+---+------------+-------+------+------+
- */
-static void disas_fp_imm(DisasContext *s, uint32_t insn)
-{
- int rd = extract32(insn, 0, 5);
- int imm5 = extract32(insn, 5, 5);
- int imm8 = extract32(insn, 13, 8);
- int type = extract32(insn, 22, 2);
- int mos = extract32(insn, 29, 3);
- uint64_t imm;
- MemOp sz;
+TRANS(LSLV, do_shift_reg, a, A64_SHIFT_TYPE_LSL)
+TRANS(LSRV, do_shift_reg, a, A64_SHIFT_TYPE_LSR)
+TRANS(ASRV, do_shift_reg, a, A64_SHIFT_TYPE_ASR)
+TRANS(RORV, do_shift_reg, a, A64_SHIFT_TYPE_ROR)
- if (mos || imm5) {
- unallocated_encoding(s);
- return;
- }
+static bool do_crc32(DisasContext *s, arg_rrr_e *a, bool crc32c)
+{
+ TCGv_i64 tcg_acc, tcg_val, tcg_rd;
+ TCGv_i32 tcg_bytes;
- switch (type) {
- case 0:
- sz = MO_32;
+ switch (a->esz) {
+ case MO_8:
+ case MO_16:
+ case MO_32:
+ tcg_val = tcg_temp_new_i64();
+ tcg_gen_extract_i64(tcg_val, cpu_reg(s, a->rm), 0, 8 << a->esz);
break;
- case 1:
- sz = MO_64;
+ case MO_64:
+ tcg_val = cpu_reg(s, a->rm);
break;
- case 3:
- sz = MO_16;
- if (dc_isar_feature(aa64_fp16, s)) {
- break;
- }
- /* fallthru */
default:
- unallocated_encoding(s);
- return;
+ g_assert_not_reached();
}
+ tcg_acc = cpu_reg(s, a->rn);
+ tcg_bytes = tcg_constant_i32(1 << a->esz);
+ tcg_rd = cpu_reg(s, a->rd);
- if (!fp_access_check(s)) {
- return;
+ if (crc32c) {
+ gen_helper_crc32c_64(tcg_rd, tcg_acc, tcg_val, tcg_bytes);
+ } else {
+ gen_helper_crc32_64(tcg_rd, tcg_acc, tcg_val, tcg_bytes);
}
-
- imm = vfp_expand_imm(sz, imm8);
- write_fp_dreg(s, rd, tcg_constant_i64(imm));
+ return true;
}
-/* Handle floating point <=> fixed point conversions. Note that we can
- * also deal with fp <=> integer conversions as a special case (scale == 64)
- * OPTME: consider handling that special case specially or at least skipping
- * the call to scalbn in the helpers for zero shifts.
- */
-static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
- bool itof, int rmode, int scale, int sf, int type)
-{
- bool is_signed = !(opcode & 1);
- TCGv_ptr tcg_fpstatus;
- TCGv_i32 tcg_shift, tcg_single;
- TCGv_i64 tcg_double;
-
- tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR);
-
- tcg_shift = tcg_constant_i32(64 - scale);
-
- if (itof) {
- TCGv_i64 tcg_int = cpu_reg(s, rn);
- if (!sf) {
- TCGv_i64 tcg_extend = tcg_temp_new_i64();
-
- if (is_signed) {
- tcg_gen_ext32s_i64(tcg_extend, tcg_int);
- } else {
- tcg_gen_ext32u_i64(tcg_extend, tcg_int);
- }
-
- tcg_int = tcg_extend;
- }
-
- switch (type) {
- case 1: /* float64 */
- tcg_double = tcg_temp_new_i64();
- if (is_signed) {
- gen_helper_vfp_sqtod(tcg_double, tcg_int,
- tcg_shift, tcg_fpstatus);
- } else {
- gen_helper_vfp_uqtod(tcg_double, tcg_int,
- tcg_shift, tcg_fpstatus);
- }
- write_fp_dreg(s, rd, tcg_double);
- break;
+TRANS_FEAT(CRC32, aa64_crc32, do_crc32, a, false)
+TRANS_FEAT(CRC32C, aa64_crc32, do_crc32, a, true)
- case 0: /* float32 */
- tcg_single = tcg_temp_new_i32();
- if (is_signed) {
- gen_helper_vfp_sqtos(tcg_single, tcg_int,
- tcg_shift, tcg_fpstatus);
- } else {
- gen_helper_vfp_uqtos(tcg_single, tcg_int,
- tcg_shift, tcg_fpstatus);
- }
- write_fp_sreg(s, rd, tcg_single);
- break;
+static bool do_subp(DisasContext *s, arg_rrr *a, bool setflag)
+{
+ TCGv_i64 tcg_n = read_cpu_reg_sp(s, a->rn, true);
+ TCGv_i64 tcg_m = read_cpu_reg_sp(s, a->rm, true);
+ TCGv_i64 tcg_d = cpu_reg(s, a->rd);
- case 3: /* float16 */
- tcg_single = tcg_temp_new_i32();
- if (is_signed) {
- gen_helper_vfp_sqtoh(tcg_single, tcg_int,
- tcg_shift, tcg_fpstatus);
- } else {
- gen_helper_vfp_uqtoh(tcg_single, tcg_int,
- tcg_shift, tcg_fpstatus);
- }
- write_fp_sreg(s, rd, tcg_single);
- break;
+ tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56);
+ tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56);
- default:
- g_assert_not_reached();
- }
+ if (setflag) {
+ gen_sub_CC(true, tcg_d, tcg_n, tcg_m);
} else {
- TCGv_i64 tcg_int = cpu_reg(s, rd);
- TCGv_i32 tcg_rmode;
-
- if (extract32(opcode, 2, 1)) {
- /* There are too many rounding modes to all fit into rmode,
- * so FCVTA[US] is a special case.
- */
- rmode = FPROUNDING_TIEAWAY;
- }
-
- tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
-
- switch (type) {
- case 1: /* float64 */
- tcg_double = read_fp_dreg(s, rn);
- if (is_signed) {
- if (!sf) {
- gen_helper_vfp_tosld(tcg_int, tcg_double,
- tcg_shift, tcg_fpstatus);
- } else {
- gen_helper_vfp_tosqd(tcg_int, tcg_double,
- tcg_shift, tcg_fpstatus);
- }
- } else {
- if (!sf) {
- gen_helper_vfp_tould(tcg_int, tcg_double,
- tcg_shift, tcg_fpstatus);
- } else {
- gen_helper_vfp_touqd(tcg_int, tcg_double,
- tcg_shift, tcg_fpstatus);
- }
- }
- if (!sf) {
- tcg_gen_ext32u_i64(tcg_int, tcg_int);
- }
- break;
+ tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m);
+ }
+ return true;
+}
- case 0: /* float32 */
- tcg_single = read_fp_sreg(s, rn);
- if (sf) {
- if (is_signed) {
- gen_helper_vfp_tosqs(tcg_int, tcg_single,
- tcg_shift, tcg_fpstatus);
- } else {
- gen_helper_vfp_touqs(tcg_int, tcg_single,
- tcg_shift, tcg_fpstatus);
- }
- } else {
- TCGv_i32 tcg_dest = tcg_temp_new_i32();
- if (is_signed) {
- gen_helper_vfp_tosls(tcg_dest, tcg_single,
- tcg_shift, tcg_fpstatus);
- } else {
- gen_helper_vfp_touls(tcg_dest, tcg_single,
- tcg_shift, tcg_fpstatus);
- }
- tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
- }
- break;
+TRANS_FEAT(SUBP, aa64_mte_insn_reg, do_subp, a, false)
+TRANS_FEAT(SUBPS, aa64_mte_insn_reg, do_subp, a, true)
- case 3: /* float16 */
- tcg_single = read_fp_sreg(s, rn);
- if (sf) {
- if (is_signed) {
- gen_helper_vfp_tosqh(tcg_int, tcg_single,
- tcg_shift, tcg_fpstatus);
- } else {
- gen_helper_vfp_touqh(tcg_int, tcg_single,
- tcg_shift, tcg_fpstatus);
- }
- } else {
- TCGv_i32 tcg_dest = tcg_temp_new_i32();
- if (is_signed) {
- gen_helper_vfp_toslh(tcg_dest, tcg_single,
- tcg_shift, tcg_fpstatus);
- } else {
- gen_helper_vfp_toulh(tcg_dest, tcg_single,
- tcg_shift, tcg_fpstatus);
- }
- tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
- }
- break;
+static bool trans_IRG(DisasContext *s, arg_rrr *a)
+{
+ if (dc_isar_feature(aa64_mte_insn_reg, s)) {
+ TCGv_i64 tcg_rd = cpu_reg_sp(s, a->rd);
+ TCGv_i64 tcg_rn = cpu_reg_sp(s, a->rn);
- default:
- g_assert_not_reached();
+ if (s->ata[0]) {
+ gen_helper_irg(tcg_rd, tcg_env, tcg_rn, cpu_reg(s, a->rm));
+ } else {
+ gen_address_with_allocation_tag0(tcg_rd, tcg_rn);
}
-
- gen_restore_rmode(tcg_rmode, tcg_fpstatus);
+ return true;
}
+ return false;
}
-/* Floating point <-> fixed point conversions
- * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
- * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
- * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
- * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
- */
-static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
-{
- int rd = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- int scale = extract32(insn, 10, 6);
- int opcode = extract32(insn, 16, 3);
- int rmode = extract32(insn, 19, 2);
- int type = extract32(insn, 22, 2);
- bool sbit = extract32(insn, 29, 1);
- bool sf = extract32(insn, 31, 1);
- bool itof;
-
- if (sbit || (!sf && scale < 32)) {
- unallocated_encoding(s);
- return;
- }
+static bool trans_GMI(DisasContext *s, arg_rrr *a)
+{
+ if (dc_isar_feature(aa64_mte_insn_reg, s)) {
+ TCGv_i64 t = tcg_temp_new_i64();
- switch (type) {
- case 0: /* float32 */
- case 1: /* float64 */
- break;
- case 3: /* float16 */
- if (dc_isar_feature(aa64_fp16, s)) {
- break;
- }
- /* fallthru */
- default:
- unallocated_encoding(s);
- return;
+ tcg_gen_extract_i64(t, cpu_reg_sp(s, a->rn), 56, 4);
+ tcg_gen_shl_i64(t, tcg_constant_i64(1), t);
+ tcg_gen_or_i64(cpu_reg(s, a->rd), cpu_reg(s, a->rm), t);
+ return true;
}
+ return false;
+}
- switch ((rmode << 3) | opcode) {
- case 0x2: /* SCVTF */
- case 0x3: /* UCVTF */
- itof = true;
- break;
- case 0x18: /* FCVTZS */
- case 0x19: /* FCVTZU */
- itof = false;
- break;
- default:
- unallocated_encoding(s);
- return;
+static bool trans_PACGA(DisasContext *s, arg_rrr *a)
+{
+ if (dc_isar_feature(aa64_pauth, s)) {
+ gen_helper_pacga(cpu_reg(s, a->rd), tcg_env,
+ cpu_reg(s, a->rn), cpu_reg_sp(s, a->rm));
+ return true;
}
+ return false;
+}
- if (!fp_access_check(s)) {
- return;
- }
+typedef void ArithOneOp(TCGv_i64, TCGv_i64);
- handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
+static bool gen_rr(DisasContext *s, int rd, int rn, ArithOneOp fn)
+{
+ fn(cpu_reg(s, rd), cpu_reg(s, rn));
+ return true;
}
-static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
+static void gen_rbit32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn)
{
- /* FMOV: gpr to or from float, double, or top half of quad fp reg,
- * without conversion.
- */
+ TCGv_i32 t32 = tcg_temp_new_i32();
- if (itof) {
- TCGv_i64 tcg_rn = cpu_reg(s, rn);
- TCGv_i64 tmp;
+ tcg_gen_extrl_i64_i32(t32, tcg_rn);
+ gen_helper_rbit(t32, t32);
+ tcg_gen_extu_i32_i64(tcg_rd, t32);
+}
- switch (type) {
- case 0:
- /* 32 bit */
- tmp = tcg_temp_new_i64();
- tcg_gen_ext32u_i64(tmp, tcg_rn);
- write_fp_dreg(s, rd, tmp);
- break;
- case 1:
- /* 64 bit */
- write_fp_dreg(s, rd, tcg_rn);
- break;
- case 2:
- /* 64 bit to top half. */
- tcg_gen_st_i64(tcg_rn, tcg_env, fp_reg_hi_offset(s, rd));
- clear_vec_high(s, true, rd);
- break;
- case 3:
- /* 16 bit */
- tmp = tcg_temp_new_i64();
- tcg_gen_ext16u_i64(tmp, tcg_rn);
- write_fp_dreg(s, rd, tmp);
- break;
- default:
- g_assert_not_reached();
- }
- } else {
- TCGv_i64 tcg_rd = cpu_reg(s, rd);
+static void gen_rev16_xx(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 mask)
+{
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
- switch (type) {
- case 0:
- /* 32 bit */
- tcg_gen_ld32u_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_32));
- break;
- case 1:
- /* 64 bit */
- tcg_gen_ld_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_64));
- break;
- case 2:
- /* 64 bits from top half */
- tcg_gen_ld_i64(tcg_rd, tcg_env, fp_reg_hi_offset(s, rn));
- break;
- case 3:
- /* 16 bit */
- tcg_gen_ld16u_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_16));
- break;
- default:
- g_assert_not_reached();
- }
- }
+ tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
+ tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
+ tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
+ tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
+ tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
}
-static void handle_fjcvtzs(DisasContext *s, int rd, int rn)
+static void gen_rev16_32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn)
{
- TCGv_i64 t = read_fp_dreg(s, rn);
- TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR);
+ gen_rev16_xx(tcg_rd, tcg_rn, tcg_constant_i64(0x00ff00ff));
+}
- gen_helper_fjcvtzs(t, t, fpstatus);
+static void gen_rev16_64(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn)
+{
+ gen_rev16_xx(tcg_rd, tcg_rn, tcg_constant_i64(0x00ff00ff00ff00ffull));
+}
- tcg_gen_ext32u_i64(cpu_reg(s, rd), t);
- tcg_gen_extrh_i64_i32(cpu_ZF, t);
- tcg_gen_movi_i32(cpu_CF, 0);
- tcg_gen_movi_i32(cpu_NF, 0);
- tcg_gen_movi_i32(cpu_VF, 0);
+static void gen_rev_32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn)
+{
+ tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ);
}
-/* Floating point <-> integer conversions
- * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
- * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
- * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
- * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
- */
-static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
-{
- int rd = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- int opcode = extract32(insn, 16, 3);
- int rmode = extract32(insn, 19, 2);
- int type = extract32(insn, 22, 2);
- bool sbit = extract32(insn, 29, 1);
- bool sf = extract32(insn, 31, 1);
- bool itof = false;
-
- if (sbit) {
- goto do_unallocated;
- }
-
- switch (opcode) {
- case 2: /* SCVTF */
- case 3: /* UCVTF */
- itof = true;
- /* fallthru */
- case 4: /* FCVTAS */
- case 5: /* FCVTAU */
- if (rmode != 0) {
- goto do_unallocated;
- }
- /* fallthru */
- case 0: /* FCVT[NPMZ]S */
- case 1: /* FCVT[NPMZ]U */
- switch (type) {
- case 0: /* float32 */
- case 1: /* float64 */
- break;
- case 3: /* float16 */
- if (!dc_isar_feature(aa64_fp16, s)) {
- goto do_unallocated;
- }
- break;
- default:
- goto do_unallocated;
- }
- if (!fp_access_check(s)) {
- return;
- }
- handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
- break;
+static void gen_rev32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn)
+{
+ tcg_gen_bswap64_i64(tcg_rd, tcg_rn);
+ tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32);
+}
- default:
- switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
- case 0b01100110: /* FMOV half <-> 32-bit int */
- case 0b01100111:
- case 0b11100110: /* FMOV half <-> 64-bit int */
- case 0b11100111:
- if (!dc_isar_feature(aa64_fp16, s)) {
- goto do_unallocated;
- }
- /* fallthru */
- case 0b00000110: /* FMOV 32-bit */
- case 0b00000111:
- case 0b10100110: /* FMOV 64-bit */
- case 0b10100111:
- case 0b11001110: /* FMOV top half of 128-bit */
- case 0b11001111:
- if (!fp_access_check(s)) {
- return;
- }
- itof = opcode & 1;
- handle_fmov(s, rd, rn, type, itof);
- break;
+TRANS(RBIT, gen_rr, a->rd, a->rn, a->sf ? gen_helper_rbit64 : gen_rbit32)
+TRANS(REV16, gen_rr, a->rd, a->rn, a->sf ? gen_rev16_64 : gen_rev16_32)
+TRANS(REV32, gen_rr, a->rd, a->rn, a->sf ? gen_rev32 : gen_rev_32)
+TRANS(REV64, gen_rr, a->rd, a->rn, tcg_gen_bswap64_i64)
- case 0b00111110: /* FJCVTZS */
- if (!dc_isar_feature(aa64_jscvt, s)) {
- goto do_unallocated;
- } else if (fp_access_check(s)) {
- handle_fjcvtzs(s, rd, rn);
- }
- break;
+static void gen_clz32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn)
+{
+ TCGv_i32 t32 = tcg_temp_new_i32();
- default:
- do_unallocated:
- unallocated_encoding(s);
- return;
- }
- break;
- }
+ tcg_gen_extrl_i64_i32(t32, tcg_rn);
+ tcg_gen_clzi_i32(t32, t32, 32);
+ tcg_gen_extu_i32_i64(tcg_rd, t32);
}
-/* FP-specific subcases of table C3-6 (SIMD and FP data processing)
- * 31 30 29 28 25 24 0
- * +---+---+---+---------+-----------------------------+
- * | | 0 | | 1 1 1 1 | |
- * +---+---+---+---------+-----------------------------+
- */
-static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
+static void gen_clz64(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn)
{
- if (extract32(insn, 24, 1)) {
- unallocated_encoding(s); /* in decodetree */
- } else if (extract32(insn, 21, 1) == 0) {
- /* Floating point to fixed point conversions */
- disas_fp_fixed_conv(s, insn);
- } else {
- switch (extract32(insn, 10, 2)) {
- case 1:
- /* Floating point conditional compare */
- disas_fp_ccomp(s, insn);
- break;
- case 2:
- /* Floating point data-processing (2 source) */
- unallocated_encoding(s); /* in decodetree */
- break;
- case 3:
- /* Floating point conditional select */
- unallocated_encoding(s); /* in decodetree */
- break;
- case 0:
- switch (ctz32(extract32(insn, 12, 4))) {
- case 0: /* [15:12] == xxx1 */
- /* Floating point immediate */
- disas_fp_imm(s, insn);
- break;
- case 1: /* [15:12] == xx10 */
- /* Floating point compare */
- disas_fp_compare(s, insn);
- break;
- case 2: /* [15:12] == x100 */
- /* Floating point data-processing (1 source) */
- disas_fp_1src(s, insn);
- break;
- case 3: /* [15:12] == 1000 */
- unallocated_encoding(s);
- break;
- default: /* [15:12] == 0000 */
- /* Floating point <-> integer conversions */
- disas_fp_int_conv(s, insn);
- break;
- }
- break;
- }
- }
+ tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
}
-static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
- int pos)
+static void gen_cls32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn)
{
- /* Extract 64 bits from the middle of two concatenated 64 bit
- * vector register slices left:right. The extracted bits start
- * at 'pos' bits into the right (least significant) side.
- * We return the result in tcg_right, and guarantee not to
- * trash tcg_left.
- */
- TCGv_i64 tcg_tmp = tcg_temp_new_i64();
- assert(pos > 0 && pos < 64);
+ TCGv_i32 t32 = tcg_temp_new_i32();
- tcg_gen_shri_i64(tcg_right, tcg_right, pos);
- tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
- tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
+ tcg_gen_extrl_i64_i32(t32, tcg_rn);
+ tcg_gen_clrsb_i32(t32, t32);
+ tcg_gen_extu_i32_i64(tcg_rd, t32);
}
-/* EXT
- * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
- * +---+---+-------------+-----+---+------+---+------+---+------+------+
- * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
- * +---+---+-------------+-----+---+------+---+------+---+------+------+
- */
-static void disas_simd_ext(DisasContext *s, uint32_t insn)
-{
- int is_q = extract32(insn, 30, 1);
- int op2 = extract32(insn, 22, 2);
- int imm4 = extract32(insn, 11, 4);
- int rm = extract32(insn, 16, 5);
- int rn = extract32(insn, 5, 5);
- int rd = extract32(insn, 0, 5);
- int pos = imm4 << 3;
- TCGv_i64 tcg_resl, tcg_resh;
-
- if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
- unallocated_encoding(s);
- return;
- }
+TRANS(CLZ, gen_rr, a->rd, a->rn, a->sf ? gen_clz64 : gen_clz32)
+TRANS(CLS, gen_rr, a->rd, a->rn, a->sf ? tcg_gen_clrsb_i64 : gen_cls32)
- if (!fp_access_check(s)) {
- return;
- }
-
- tcg_resh = tcg_temp_new_i64();
- tcg_resl = tcg_temp_new_i64();
+static bool gen_pacaut(DisasContext *s, arg_pacaut *a, NeonGenTwo64OpEnvFn fn)
+{
+ TCGv_i64 tcg_rd, tcg_rn;
- /* Vd gets bits starting at pos bits into Vm:Vn. This is
- * either extracting 128 bits from a 128:128 concatenation, or
- * extracting 64 bits from a 64:64 concatenation.
- */
- if (!is_q) {
- read_vec_element(s, tcg_resl, rn, 0, MO_64);
- if (pos != 0) {
- read_vec_element(s, tcg_resh, rm, 0, MO_64);
- do_ext64(s, tcg_resh, tcg_resl, pos);
+ if (a->z) {
+ if (a->rn != 31) {
+ return false;
}
+ tcg_rn = tcg_constant_i64(0);
} else {
- TCGv_i64 tcg_hh;
- typedef struct {
- int reg;
- int elt;
- } EltPosns;
- EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
- EltPosns *elt = eltposns;
-
- if (pos >= 64) {
- elt++;
- pos -= 64;
- }
-
- read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
- elt++;
- read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
- elt++;
- if (pos != 0) {
- do_ext64(s, tcg_resh, tcg_resl, pos);
- tcg_hh = tcg_temp_new_i64();
- read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
- do_ext64(s, tcg_hh, tcg_resh, pos);
- }
+ tcg_rn = cpu_reg_sp(s, a->rn);
}
-
- write_vec_element(s, tcg_resl, rd, 0, MO_64);
- if (is_q) {
- write_vec_element(s, tcg_resh, rd, 1, MO_64);
+ if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, a->rd);
+ fn(tcg_rd, tcg_env, tcg_rd, tcg_rn);
}
- clear_vec_high(s, is_q, rd);
+ return true;
}
-/* TBL/TBX
- * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
- * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
- * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
- * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
- */
-static void disas_simd_tb(DisasContext *s, uint32_t insn)
-{
- int op2 = extract32(insn, 22, 2);
- int is_q = extract32(insn, 30, 1);
- int rm = extract32(insn, 16, 5);
- int rn = extract32(insn, 5, 5);
- int rd = extract32(insn, 0, 5);
- int is_tbx = extract32(insn, 12, 1);
- int len = (extract32(insn, 13, 2) + 1) * 16;
+TRANS_FEAT(PACIA, aa64_pauth, gen_pacaut, a, gen_helper_pacia)
+TRANS_FEAT(PACIB, aa64_pauth, gen_pacaut, a, gen_helper_pacib)
+TRANS_FEAT(PACDA, aa64_pauth, gen_pacaut, a, gen_helper_pacda)
+TRANS_FEAT(PACDB, aa64_pauth, gen_pacaut, a, gen_helper_pacdb)
- if (op2 != 0) {
- unallocated_encoding(s);
- return;
- }
+TRANS_FEAT(AUTIA, aa64_pauth, gen_pacaut, a, gen_helper_autia)
+TRANS_FEAT(AUTIB, aa64_pauth, gen_pacaut, a, gen_helper_autib)
+TRANS_FEAT(AUTDA, aa64_pauth, gen_pacaut, a, gen_helper_autda)
+TRANS_FEAT(AUTDB, aa64_pauth, gen_pacaut, a, gen_helper_autdb)
- if (!fp_access_check(s)) {
- return;
+static bool do_xpac(DisasContext *s, int rd, NeonGenOne64OpEnvFn *fn)
+{
+ if (s->pauth_active) {
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ fn(tcg_rd, tcg_env, tcg_rd);
}
-
- tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
- vec_full_reg_offset(s, rm), tcg_env,
- is_q ? 16 : 8, vec_full_reg_size(s),
- (len << 6) | (is_tbx << 5) | rn,
- gen_helper_simd_tblx);
+ return true;
}
-/* ZIP/UZP/TRN
- * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
- * +---+---+-------------+------+---+------+---+------------------+------+
- * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
- * +---+---+-------------+------+---+------+---+------------------+------+
- */
-static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
-{
- int rd = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- int rm = extract32(insn, 16, 5);
- int size = extract32(insn, 22, 2);
- /* opc field bits [1:0] indicate ZIP/UZP/TRN;
- * bit 2 indicates 1 vs 2 variant of the insn.
- */
- int opcode = extract32(insn, 12, 2);
- bool part = extract32(insn, 14, 1);
- bool is_q = extract32(insn, 30, 1);
- int esize = 8 << size;
- int i;
- int datasize = is_q ? 128 : 64;
- int elements = datasize / esize;
- TCGv_i64 tcg_res[2], tcg_ele;
+TRANS_FEAT(XPACI, aa64_pauth, do_xpac, a->rd, gen_helper_xpaci)
+TRANS_FEAT(XPACD, aa64_pauth, do_xpac, a->rd, gen_helper_xpacd)
- if (opcode == 0 || (size == 3 && !is_q)) {
- unallocated_encoding(s);
- return;
+static bool do_logic_reg(DisasContext *s, arg_logic_shift *a,
+ ArithTwoOp *fn, ArithTwoOp *inv_fn, bool setflags)
+{
+ TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
+
+ if (!a->sf && (a->sa & (1 << 5))) {
+ return false;
}
- if (!fp_access_check(s)) {
- return;
+ tcg_rd = cpu_reg(s, a->rd);
+ tcg_rn = cpu_reg(s, a->rn);
+
+ tcg_rm = read_cpu_reg(s, a->rm, a->sf);
+ if (a->sa) {
+ shift_reg_imm(tcg_rm, tcg_rm, a->sf, a->st, a->sa);
}
- tcg_res[0] = tcg_temp_new_i64();
- tcg_res[1] = is_q ? tcg_temp_new_i64() : NULL;
- tcg_ele = tcg_temp_new_i64();
+ (a->n ? inv_fn : fn)(tcg_rd, tcg_rn, tcg_rm);
+ if (!a->sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+ if (setflags) {
+ gen_logic_CC(a->sf, tcg_rd);
+ }
+ return true;
+}
- for (i = 0; i < elements; i++) {
- int o, w;
+static bool trans_ORR_r(DisasContext *s, arg_logic_shift *a)
+{
+ /*
+ * Unshifted ORR and ORN with WZR/XZR is the standard encoding for
+ * register-register MOV and MVN, so it is worth special casing.
+ */
+ if (a->sa == 0 && a->st == 0 && a->rn == 31) {
+ TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
+ TCGv_i64 tcg_rm = cpu_reg(s, a->rm);
- switch (opcode) {
- case 1: /* UZP1/2 */
- {
- int midpoint = elements / 2;
- if (i < midpoint) {
- read_vec_element(s, tcg_ele, rn, 2 * i + part, size);
- } else {
- read_vec_element(s, tcg_ele, rm,
- 2 * (i - midpoint) + part, size);
- }
- break;
- }
- case 2: /* TRN1/2 */
- if (i & 1) {
- read_vec_element(s, tcg_ele, rm, (i & ~1) + part, size);
- } else {
- read_vec_element(s, tcg_ele, rn, (i & ~1) + part, size);
+ if (a->n) {
+ tcg_gen_not_i64(tcg_rd, tcg_rm);
+ if (!a->sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
}
- break;
- case 3: /* ZIP1/2 */
- {
- int base = part * elements / 2;
- if (i & 1) {
- read_vec_element(s, tcg_ele, rm, base + (i >> 1), size);
+ } else {
+ if (a->sf) {
+ tcg_gen_mov_i64(tcg_rd, tcg_rm);
} else {
- read_vec_element(s, tcg_ele, rn, base + (i >> 1), size);
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
}
- break;
- }
- default:
- g_assert_not_reached();
- }
-
- w = (i * esize) / 64;
- o = (i * esize) % 64;
- if (o == 0) {
- tcg_gen_mov_i64(tcg_res[w], tcg_ele);
- } else {
- tcg_gen_shli_i64(tcg_ele, tcg_ele, o);
- tcg_gen_or_i64(tcg_res[w], tcg_res[w], tcg_ele);
}
+ return true;
}
- for (i = 0; i <= is_q; ++i) {
- write_vec_element(s, tcg_res[i], rd, i, MO_64);
- }
- clear_vec_high(s, is_q, rd);
+ return do_logic_reg(s, a, tcg_gen_or_i64, tcg_gen_orc_i64, false);
}
-/*
- * do_reduction_op helper
- *
- * This mirrors the Reduce() pseudocode in the ARM ARM. It is
- * important for correct NaN propagation that we do these
- * operations in exactly the order specified by the pseudocode.
- *
- * This is a recursive function, TCG temps should be freed by the
- * calling function once it is done with the values.
- */
-static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
- int esize, int size, int vmap, TCGv_ptr fpst)
+TRANS(AND_r, do_logic_reg, a, tcg_gen_and_i64, tcg_gen_andc_i64, false)
+TRANS(ANDS_r, do_logic_reg, a, tcg_gen_and_i64, tcg_gen_andc_i64, true)
+TRANS(EOR_r, do_logic_reg, a, tcg_gen_xor_i64, tcg_gen_eqv_i64, false)
+
+static bool do_addsub_ext(DisasContext *s, arg_addsub_ext *a,
+ bool sub_op, bool setflags)
{
- if (esize == size) {
- int element;
- MemOp msize = esize == 16 ? MO_16 : MO_32;
- TCGv_i32 tcg_elem;
+ TCGv_i64 tcg_rm, tcg_rn, tcg_rd, tcg_result;
- /* We should have one register left here */
- assert(ctpop8(vmap) == 1);
- element = ctz32(vmap);
- assert(element < 8);
+ if (a->sa > 4) {
+ return false;
+ }
- tcg_elem = tcg_temp_new_i32();
- read_vec_element_i32(s, tcg_elem, rn, element, msize);
- return tcg_elem;
+ /* non-flag setting ops may use SP */
+ if (!setflags) {
+ tcg_rd = cpu_reg_sp(s, a->rd);
} else {
- int bits = size / 2;
- int shift = ctpop8(vmap) / 2;
- int vmap_lo = (vmap >> shift) & vmap;
- int vmap_hi = (vmap & ~vmap_lo);
- TCGv_i32 tcg_hi, tcg_lo, tcg_res;
-
- tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
- tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
- tcg_res = tcg_temp_new_i32();
-
- switch (fpopcode) {
- case 0x0c: /* fmaxnmv half-precision */
- gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
- break;
- case 0x0f: /* fmaxv half-precision */
- gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
- break;
- case 0x1c: /* fminnmv half-precision */
- gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
- break;
- case 0x1f: /* fminv half-precision */
- gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
- break;
- case 0x2c: /* fmaxnmv */
- gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
- break;
- case 0x2f: /* fmaxv */
- gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
- break;
- case 0x3c: /* fminnmv */
- gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
- break;
- case 0x3f: /* fminv */
- gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
- break;
- default:
- g_assert_not_reached();
- }
- return tcg_res;
+ tcg_rd = cpu_reg(s, a->rd);
}
-}
+ tcg_rn = read_cpu_reg_sp(s, a->rn, a->sf);
-/* AdvSIMD across lanes
- * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
- * +---+---+---+-----------+------+-----------+--------+-----+------+------+
- * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
- * +---+---+---+-----------+------+-----------+--------+-----+------+------+
- */
-static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
-{
- int rd = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- int size = extract32(insn, 22, 2);
- int opcode = extract32(insn, 12, 5);
- bool is_q = extract32(insn, 30, 1);
- bool is_u = extract32(insn, 29, 1);
- bool is_fp = false;
- bool is_min = false;
- int esize;
- int elements;
- int i;
- TCGv_i64 tcg_res, tcg_elt;
+ tcg_rm = read_cpu_reg(s, a->rm, a->sf);
+ ext_and_shift_reg(tcg_rm, tcg_rm, a->st, a->sa);
- switch (opcode) {
- case 0x1b: /* ADDV */
- if (is_u) {
- unallocated_encoding(s);
- return;
- }
- /* fall through */
- case 0x3: /* SADDLV, UADDLV */
- case 0xa: /* SMAXV, UMAXV */
- case 0x1a: /* SMINV, UMINV */
- if (size == 3 || (size == 2 && !is_q)) {
- unallocated_encoding(s);
- return;
+ tcg_result = tcg_temp_new_i64();
+ if (!setflags) {
+ if (sub_op) {
+ tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
+ } else {
+ tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
}
- break;
- case 0xc: /* FMAXNMV, FMINNMV */
- case 0xf: /* FMAXV, FMINV */
- /* Bit 1 of size field encodes min vs max and the actual size
- * depends on the encoding of the U bit. If not set (and FP16
- * enabled) then we do half-precision float instead of single
- * precision.
- */
- is_min = extract32(size, 1, 1);
- is_fp = true;
- if (!is_u && dc_isar_feature(aa64_fp16, s)) {
- size = 1;
- } else if (!is_u || !is_q || extract32(size, 0, 1)) {
- unallocated_encoding(s);
- return;
+ } else {
+ if (sub_op) {
+ gen_sub_CC(a->sf, tcg_result, tcg_rn, tcg_rm);
} else {
- size = 2;
+ gen_add_CC(a->sf, tcg_result, tcg_rn, tcg_rm);
}
- break;
- default:
- unallocated_encoding(s);
- return;
}
- if (!fp_access_check(s)) {
- return;
+ if (a->sf) {
+ tcg_gen_mov_i64(tcg_rd, tcg_result);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_result);
}
+ return true;
+}
- esize = 8 << size;
- elements = (is_q ? 128 : 64) / esize;
-
- tcg_res = tcg_temp_new_i64();
- tcg_elt = tcg_temp_new_i64();
-
- /* These instructions operate across all lanes of a vector
- * to produce a single result. We can guarantee that a 64
- * bit intermediate is sufficient:
- * + for [US]ADDLV the maximum element size is 32 bits, and
- * the result type is 64 bits
- * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
- * same as the element size, which is 32 bits at most
- * For the integer operations we can choose to work at 64
- * or 32 bits and truncate at the end; for simplicity
- * we use 64 bits always. The floating point
- * ops do require 32 bit intermediates, though.
- */
- if (!is_fp) {
- read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
-
- for (i = 1; i < elements; i++) {
- read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
-
- switch (opcode) {
- case 0x03: /* SADDLV / UADDLV */
- case 0x1b: /* ADDV */
- tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
- break;
- case 0x0a: /* SMAXV / UMAXV */
- if (is_u) {
- tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
- } else {
- tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
- }
- break;
- case 0x1a: /* SMINV / UMINV */
- if (is_u) {
- tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
- } else {
- tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
- }
- break;
- default:
- g_assert_not_reached();
- }
+TRANS(ADD_ext, do_addsub_ext, a, false, false)
+TRANS(SUB_ext, do_addsub_ext, a, true, false)
+TRANS(ADDS_ext, do_addsub_ext, a, false, true)
+TRANS(SUBS_ext, do_addsub_ext, a, true, true)
- }
- } else {
- /* Floating point vector reduction ops which work across 32
- * bit (single) or 16 bit (half-precision) intermediates.
- * Note that correct NaN propagation requires that we do these
- * operations in exactly the order specified by the pseudocode.
- */
- TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
- int fpopcode = opcode | is_min << 4 | is_u << 5;
- int vmap = (1 << elements) - 1;
- TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
- (is_q ? 128 : 64), vmap, fpst);
- tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
- }
+static bool do_addsub_reg(DisasContext *s, arg_addsub_shift *a,
+ bool sub_op, bool setflags)
+{
+ TCGv_i64 tcg_rd, tcg_rn, tcg_rm, tcg_result;
- /* Now truncate the result to the width required for the final output */
- if (opcode == 0x03) {
- /* SADDLV, UADDLV: result is 2*esize */
- size++;
+ if (a->st == 3 || (!a->sf && (a->sa & 32))) {
+ return false;
}
- switch (size) {
- case 0:
- tcg_gen_ext8u_i64(tcg_res, tcg_res);
- break;
- case 1:
- tcg_gen_ext16u_i64(tcg_res, tcg_res);
- break;
- case 2:
- tcg_gen_ext32u_i64(tcg_res, tcg_res);
- break;
- case 3:
- break;
- default:
- g_assert_not_reached();
- }
+ tcg_rd = cpu_reg(s, a->rd);
+ tcg_rn = read_cpu_reg(s, a->rn, a->sf);
+ tcg_rm = read_cpu_reg(s, a->rm, a->sf);
- write_fp_dreg(s, rd, tcg_res);
-}
+ shift_reg_imm(tcg_rm, tcg_rm, a->sf, a->st, a->sa);
-/* AdvSIMD modified immediate
- * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
- * +---+---+----+---------------------+-----+-------+----+---+-------+------+
- * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
- * +---+---+----+---------------------+-----+-------+----+---+-------+------+
- *
- * There are a number of operations that can be carried out here:
- * MOVI - move (shifted) imm into register
- * MVNI - move inverted (shifted) imm into register
- * ORR - bitwise OR of (shifted) imm with register
- * BIC - bitwise clear of (shifted) imm with register
- * With ARMv8.2 we also have:
- * FMOV half-precision
- */
-static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
-{
- int rd = extract32(insn, 0, 5);
- int cmode = extract32(insn, 12, 4);
- int o2 = extract32(insn, 11, 1);
- uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
- bool is_neg = extract32(insn, 29, 1);
- bool is_q = extract32(insn, 30, 1);
- uint64_t imm = 0;
-
- if (o2) {
- if (cmode != 0xf || is_neg) {
- unallocated_encoding(s);
- return;
- }
- /* FMOV (vector, immediate) - half-precision */
- if (!dc_isar_feature(aa64_fp16, s)) {
- unallocated_encoding(s);
- return;
+ tcg_result = tcg_temp_new_i64();
+ if (!setflags) {
+ if (sub_op) {
+ tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
+ } else {
+ tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
}
- imm = vfp_expand_imm(MO_16, abcdefgh);
- /* now duplicate across the lanes */
- imm = dup_const(MO_16, imm);
} else {
- if (cmode == 0xf && is_neg && !is_q) {
- unallocated_encoding(s);
- return;
+ if (sub_op) {
+ gen_sub_CC(a->sf, tcg_result, tcg_rn, tcg_rm);
+ } else {
+ gen_add_CC(a->sf, tcg_result, tcg_rn, tcg_rm);
}
- imm = asimd_imm_const(abcdefgh, cmode, is_neg);
- }
-
- if (!fp_access_check(s)) {
- return;
}
- if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
- /* MOVI or MVNI, with MVNI negation handled above. */
- tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
- vec_full_reg_size(s), imm);
+ if (a->sf) {
+ tcg_gen_mov_i64(tcg_rd, tcg_result);
} else {
- /* ORR or BIC, with BIC negation to AND handled above. */
- if (is_neg) {
- gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
- } else {
- gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
- }
+ tcg_gen_ext32u_i64(tcg_rd, tcg_result);
}
+ return true;
}
-/*
- * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
- *
- * This code is handles the common shifting code and is used by both
- * the vector and scalar code.
- */
-static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
- TCGv_i64 tcg_rnd, bool accumulate,
- bool is_u, int size, int shift)
-{
- bool extended_result = false;
- bool round = tcg_rnd != NULL;
- int ext_lshift = 0;
- TCGv_i64 tcg_src_hi;
-
- if (round && size == 3) {
- extended_result = true;
- ext_lshift = 64 - shift;
- tcg_src_hi = tcg_temp_new_i64();
- } else if (shift == 64) {
- if (!accumulate && is_u) {
- /* result is zero */
- tcg_gen_movi_i64(tcg_res, 0);
- return;
- }
- }
+TRANS(ADD_r, do_addsub_reg, a, false, false)
+TRANS(SUB_r, do_addsub_reg, a, true, false)
+TRANS(ADDS_r, do_addsub_reg, a, false, true)
+TRANS(SUBS_r, do_addsub_reg, a, true, true)
- /* Deal with the rounding step */
- if (round) {
- if (extended_result) {
- TCGv_i64 tcg_zero = tcg_constant_i64(0);
- if (!is_u) {
- /* take care of sign extending tcg_res */
- tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
- tcg_gen_add2_i64(tcg_src, tcg_src_hi,
- tcg_src, tcg_src_hi,
- tcg_rnd, tcg_zero);
- } else {
- tcg_gen_add2_i64(tcg_src, tcg_src_hi,
- tcg_src, tcg_zero,
- tcg_rnd, tcg_zero);
- }
- } else {
- tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
- }
+static bool do_mulh(DisasContext *s, arg_rrr *a,
+ void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 discard = tcg_temp_new_i64();
+ TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
+ TCGv_i64 tcg_rn = cpu_reg(s, a->rn);
+ TCGv_i64 tcg_rm = cpu_reg(s, a->rm);
+
+ fn(discard, tcg_rd, tcg_rn, tcg_rm);
+ return true;
+}
+
+TRANS(SMULH, do_mulh, a, tcg_gen_muls2_i64)
+TRANS(UMULH, do_mulh, a, tcg_gen_mulu2_i64)
+
+static bool do_muladd(DisasContext *s, arg_rrrr *a,
+ bool sf, bool is_sub, MemOp mop)
+{
+ TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
+ TCGv_i64 tcg_op1, tcg_op2;
+
+ if (mop == MO_64) {
+ tcg_op1 = cpu_reg(s, a->rn);
+ tcg_op2 = cpu_reg(s, a->rm);
+ } else {
+ tcg_op1 = tcg_temp_new_i64();
+ tcg_op2 = tcg_temp_new_i64();
+ tcg_gen_ext_i64(tcg_op1, cpu_reg(s, a->rn), mop);
+ tcg_gen_ext_i64(tcg_op2, cpu_reg(s, a->rm), mop);
}
- /* Now do the shift right */
- if (round && extended_result) {
- /* extended case, >64 bit precision required */
- if (ext_lshift == 0) {
- /* special case, only high bits matter */
- tcg_gen_mov_i64(tcg_src, tcg_src_hi);
- } else {
- tcg_gen_shri_i64(tcg_src, tcg_src, shift);
- tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
- tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
- }
+ if (a->ra == 31 && !is_sub) {
+ /* Special-case MADD with rA == XZR; it is the standard MUL alias */
+ tcg_gen_mul_i64(tcg_rd, tcg_op1, tcg_op2);
} else {
- if (is_u) {
- if (shift == 64) {
- /* essentially shifting in 64 zeros */
- tcg_gen_movi_i64(tcg_src, 0);
- } else {
- tcg_gen_shri_i64(tcg_src, tcg_src, shift);
- }
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+ TCGv_i64 tcg_ra = cpu_reg(s, a->ra);
+
+ tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
+ if (is_sub) {
+ tcg_gen_sub_i64(tcg_rd, tcg_ra, tcg_tmp);
} else {
- if (shift == 64) {
- /* effectively extending the sign-bit */
- tcg_gen_sari_i64(tcg_src, tcg_src, 63);
- } else {
- tcg_gen_sari_i64(tcg_src, tcg_src, shift);
- }
+ tcg_gen_add_i64(tcg_rd, tcg_ra, tcg_tmp);
}
}
- if (accumulate) {
- tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
- } else {
- tcg_gen_mov_i64(tcg_res, tcg_src);
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
}
+ return true;
}
-/* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
-static void handle_scalar_simd_shri(DisasContext *s,
- bool is_u, int immh, int immb,
- int opcode, int rn, int rd)
-{
- const int size = 3;
- int immhb = immh << 3 | immb;
- int shift = 2 * (8 << size) - immhb;
- bool accumulate = false;
- bool round = false;
- bool insert = false;
- TCGv_i64 tcg_rn;
- TCGv_i64 tcg_rd;
- TCGv_i64 tcg_round;
+TRANS(MADD_w, do_muladd, a, false, false, MO_64)
+TRANS(MSUB_w, do_muladd, a, false, true, MO_64)
+TRANS(MADD_x, do_muladd, a, true, false, MO_64)
+TRANS(MSUB_x, do_muladd, a, true, true, MO_64)
- if (!extract32(immh, 3, 1)) {
- unallocated_encoding(s);
- return;
- }
+TRANS(SMADDL, do_muladd, a, true, false, MO_SL)
+TRANS(SMSUBL, do_muladd, a, true, true, MO_SL)
+TRANS(UMADDL, do_muladd, a, true, false, MO_UL)
+TRANS(UMSUBL, do_muladd, a, true, true, MO_UL)
- if (!fp_access_check(s)) {
- return;
- }
+static bool do_adc_sbc(DisasContext *s, arg_rrr_sf *a,
+ bool is_sub, bool setflags)
+{
+ TCGv_i64 tcg_y, tcg_rn, tcg_rd;
- switch (opcode) {
- case 0x02: /* SSRA / USRA (accumulate) */
- accumulate = true;
- break;
- case 0x04: /* SRSHR / URSHR (rounding) */
- round = true;
- break;
- case 0x06: /* SRSRA / URSRA (accum + rounding) */
- accumulate = round = true;
- break;
- case 0x08: /* SRI */
- insert = true;
- break;
- }
+ tcg_rd = cpu_reg(s, a->rd);
+ tcg_rn = cpu_reg(s, a->rn);
- if (round) {
- tcg_round = tcg_constant_i64(1ULL << (shift - 1));
+ if (is_sub) {
+ tcg_y = tcg_temp_new_i64();
+ tcg_gen_not_i64(tcg_y, cpu_reg(s, a->rm));
} else {
- tcg_round = NULL;
+ tcg_y = cpu_reg(s, a->rm);
}
- tcg_rn = read_fp_dreg(s, rn);
- tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
-
- if (insert) {
- /* shift count same as element size is valid but does nothing;
- * special case to avoid potential shift by 64.
- */
- int esize = 8 << size;
- if (shift != esize) {
- tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
- tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
- }
+ if (setflags) {
+ gen_adc_CC(a->sf, tcg_rd, tcg_rn, tcg_y);
} else {
- handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
- accumulate, is_u, size, shift);
+ gen_adc(a->sf, tcg_rd, tcg_rn, tcg_y);
}
-
- write_fp_dreg(s, rd, tcg_rd);
+ return true;
}
-/* SHL/SLI - Scalar shift left */
-static void handle_scalar_simd_shli(DisasContext *s, bool insert,
- int immh, int immb, int opcode,
- int rn, int rd)
+TRANS(ADC, do_adc_sbc, a, false, false)
+TRANS(SBC, do_adc_sbc, a, true, false)
+TRANS(ADCS, do_adc_sbc, a, false, true)
+TRANS(SBCS, do_adc_sbc, a, true, true)
+
+static bool trans_RMIF(DisasContext *s, arg_RMIF *a)
{
- int size = 32 - clz32(immh) - 1;
- int immhb = immh << 3 | immb;
- int shift = immhb - (8 << size);
+ int mask = a->mask;
TCGv_i64 tcg_rn;
- TCGv_i64 tcg_rd;
+ TCGv_i32 nzcv;
- if (!extract32(immh, 3, 1)) {
- unallocated_encoding(s);
- return;
+ if (!dc_isar_feature(aa64_condm_4, s)) {
+ return false;
}
- if (!fp_access_check(s)) {
- return;
+ tcg_rn = read_cpu_reg(s, a->rn, 1);
+ tcg_gen_rotri_i64(tcg_rn, tcg_rn, a->imm);
+
+ nzcv = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
+
+ if (mask & 8) { /* N */
+ tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
+ }
+ if (mask & 4) { /* Z */
+ tcg_gen_not_i32(cpu_ZF, nzcv);
+ tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
+ }
+ if (mask & 2) { /* C */
+ tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
}
+ if (mask & 1) { /* V */
+ tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
+ }
+ return true;
+}
- tcg_rn = read_fp_dreg(s, rn);
- tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
+static bool do_setf(DisasContext *s, int rn, int shift)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
- if (insert) {
- tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
- } else {
- tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
- }
-
- write_fp_dreg(s, rd, tcg_rd);
-}
-
-/* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
- * (signed/unsigned) narrowing */
-static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
- bool is_u_shift, bool is_u_narrow,
- int immh, int immb, int opcode,
- int rn, int rd)
-{
- int immhb = immh << 3 | immb;
- int size = 32 - clz32(immh) - 1;
- int esize = 8 << size;
- int shift = (2 * esize) - immhb;
- int elements = is_scalar ? 1 : (64 / esize);
- bool round = extract32(opcode, 0, 1);
- MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
- TCGv_i64 tcg_rn, tcg_rd, tcg_round;
- TCGv_i32 tcg_rd_narrowed;
- TCGv_i64 tcg_final;
-
- static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
- { gen_helper_neon_narrow_sat_s8,
- gen_helper_neon_unarrow_sat8 },
- { gen_helper_neon_narrow_sat_s16,
- gen_helper_neon_unarrow_sat16 },
- { gen_helper_neon_narrow_sat_s32,
- gen_helper_neon_unarrow_sat32 },
- { NULL, NULL },
- };
- static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
- gen_helper_neon_narrow_sat_u8,
- gen_helper_neon_narrow_sat_u16,
- gen_helper_neon_narrow_sat_u32,
- NULL
- };
- NeonGenNarrowEnvFn *narrowfn;
+ tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
+ tcg_gen_shli_i32(cpu_NF, tmp, shift);
+ tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
+ return true;
+}
- int i;
+TRANS_FEAT(SETF8, aa64_condm_4, do_setf, a->rn, 24)
+TRANS_FEAT(SETF16, aa64_condm_4, do_setf, a->rn, 16)
- assert(size < 4);
+/* CCMP, CCMN */
+static bool trans_CCMP(DisasContext *s, arg_CCMP *a)
+{
+ TCGv_i32 tcg_t0 = tcg_temp_new_i32();
+ TCGv_i32 tcg_t1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_t2 = tcg_temp_new_i32();
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+ TCGv_i64 tcg_rn, tcg_y;
+ DisasCompare c;
+ unsigned nzcv;
+ bool has_andc;
- if (extract32(immh, 3, 1)) {
- unallocated_encoding(s);
- return;
- }
+ /* Set T0 = !COND. */
+ arm_test_cc(&c, a->cond);
+ tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
- if (!fp_access_check(s)) {
- return;
+ /* Load the arguments for the new comparison. */
+ if (a->imm) {
+ tcg_y = tcg_constant_i64(a->y);
+ } else {
+ tcg_y = cpu_reg(s, a->y);
}
+ tcg_rn = cpu_reg(s, a->rn);
- if (is_u_shift) {
- narrowfn = unsigned_narrow_fns[size];
+ /* Set the flags for the new comparison. */
+ if (a->op) {
+ gen_sub_CC(a->sf, tcg_tmp, tcg_rn, tcg_y);
} else {
- narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
+ gen_add_CC(a->sf, tcg_tmp, tcg_rn, tcg_y);
}
- tcg_rn = tcg_temp_new_i64();
- tcg_rd = tcg_temp_new_i64();
- tcg_rd_narrowed = tcg_temp_new_i32();
- tcg_final = tcg_temp_new_i64();
+ /*
+ * If COND was false, force the flags to #nzcv. Compute two masks
+ * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
+ * For tcg hosts that support ANDC, we can make do with just T1.
+ * In either case, allow the tcg optimizer to delete any unused mask.
+ */
+ tcg_gen_neg_i32(tcg_t1, tcg_t0);
+ tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
- if (round) {
- tcg_round = tcg_constant_i64(1ULL << (shift - 1));
+ nzcv = a->nzcv;
+ has_andc = tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0);
+ if (nzcv & 8) { /* N */
+ tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
+ } else {
+ if (has_andc) {
+ tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
+ }
+ }
+ if (nzcv & 4) { /* Z */
+ if (has_andc) {
+ tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
+ }
} else {
- tcg_round = NULL;
+ tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
}
-
- for (i = 0; i < elements; i++) {
- read_vec_element(s, tcg_rn, rn, i, ldop);
- handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
- false, is_u_shift, size+1, shift);
- narrowfn(tcg_rd_narrowed, tcg_env, tcg_rd);
- tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
- if (i == 0) {
- tcg_gen_extract_i64(tcg_final, tcg_rd, 0, esize);
+ if (nzcv & 2) { /* C */
+ tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
+ } else {
+ if (has_andc) {
+ tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
} else {
- tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
+ tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
}
}
-
- if (!is_q) {
- write_vec_element(s, tcg_final, rd, 0, MO_64);
+ if (nzcv & 1) { /* V */
+ tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
} else {
- write_vec_element(s, tcg_final, rd, 1, MO_64);
+ if (has_andc) {
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
+ }
}
- clear_vec_high(s, is_q, rd);
+ return true;
}
-/* SQSHLU, UQSHL, SQSHL: saturating left shifts */
-static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
- bool src_unsigned, bool dst_unsigned,
- int immh, int immb, int rn, int rd)
+static bool trans_CSEL(DisasContext *s, arg_CSEL *a)
{
- int immhb = immh << 3 | immb;
- int size = 32 - clz32(immh) - 1;
- int shift = immhb - (8 << size);
- int pass;
+ TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
+ TCGv_i64 zero = tcg_constant_i64(0);
+ DisasCompare64 c;
- assert(immh != 0);
- assert(!(scalar && is_q));
+ a64_test_cc(&c, a->cond);
- if (!scalar) {
- if (!is_q && extract32(immh, 3, 1)) {
- unallocated_encoding(s);
- return;
+ if (a->rn == 31 && a->rm == 31 && (a->else_inc ^ a->else_inv)) {
+ /* CSET & CSETM. */
+ if (a->else_inv) {
+ tcg_gen_negsetcond_i64(tcg_invert_cond(c.cond),
+ tcg_rd, c.value, zero);
+ } else {
+ tcg_gen_setcond_i64(tcg_invert_cond(c.cond),
+ tcg_rd, c.value, zero);
}
+ } else {
+ TCGv_i64 t_true = cpu_reg(s, a->rn);
+ TCGv_i64 t_false = read_cpu_reg(s, a->rm, 1);
- /* Since we use the variable-shift helpers we must
- * replicate the shift count into each element of
- * the tcg_shift value.
- */
- switch (size) {
- case 0:
- shift |= shift << 8;
- /* fall through */
- case 1:
- shift |= shift << 16;
- break;
- case 2:
- case 3:
- break;
- default:
- g_assert_not_reached();
+ if (a->else_inv && a->else_inc) {
+ tcg_gen_neg_i64(t_false, t_false);
+ } else if (a->else_inv) {
+ tcg_gen_not_i64(t_false, t_false);
+ } else if (a->else_inc) {
+ tcg_gen_addi_i64(t_false, t_false, 1);
}
+ tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
}
- if (!fp_access_check(s)) {
- return;
+ if (!a->sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
}
+ return true;
+}
- if (size == 3) {
- TCGv_i64 tcg_shift = tcg_constant_i64(shift);
- static NeonGenTwo64OpEnvFn * const fns[2][2] = {
- { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
- { NULL, gen_helper_neon_qshl_u64 },
- };
- NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
- int maxpass = is_q ? 2 : 1;
-
- for (pass = 0; pass < maxpass; pass++) {
- TCGv_i64 tcg_op = tcg_temp_new_i64();
+typedef struct FPScalar1Int {
+ void (*gen_h)(TCGv_i32, TCGv_i32);
+ void (*gen_s)(TCGv_i32, TCGv_i32);
+ void (*gen_d)(TCGv_i64, TCGv_i64);
+} FPScalar1Int;
- read_vec_element(s, tcg_op, rn, pass, MO_64);
- genfn(tcg_op, tcg_env, tcg_op, tcg_shift);
- write_vec_element(s, tcg_op, rd, pass, MO_64);
- }
- clear_vec_high(s, is_q, rd);
- } else {
- TCGv_i32 tcg_shift = tcg_constant_i32(shift);
- static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
- {
- { gen_helper_neon_qshl_s8,
- gen_helper_neon_qshl_s16,
- gen_helper_neon_qshl_s32 },
- { gen_helper_neon_qshlu_s8,
- gen_helper_neon_qshlu_s16,
- gen_helper_neon_qshlu_s32 }
- }, {
- { NULL, NULL, NULL },
- { gen_helper_neon_qshl_u8,
- gen_helper_neon_qshl_u16,
- gen_helper_neon_qshl_u32 }
+static bool do_fp1_scalar_int(DisasContext *s, arg_rr_e *a,
+ const FPScalar1Int *f,
+ bool merging)
+{
+ switch (a->esz) {
+ case MO_64:
+ if (fp_access_check(s)) {
+ TCGv_i64 t = read_fp_dreg(s, a->rn);
+ f->gen_d(t, t);
+ if (merging) {
+ write_fp_dreg_merging(s, a->rd, a->rd, t);
+ } else {
+ write_fp_dreg(s, a->rd, t);
}
- };
- NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
- MemOp memop = scalar ? size : MO_32;
- int maxpass = scalar ? 1 : is_q ? 4 : 2;
-
- for (pass = 0; pass < maxpass; pass++) {
- TCGv_i32 tcg_op = tcg_temp_new_i32();
-
- read_vec_element_i32(s, tcg_op, rn, pass, memop);
- genfn(tcg_op, tcg_env, tcg_op, tcg_shift);
- if (scalar) {
- switch (size) {
- case 0:
- tcg_gen_ext8u_i32(tcg_op, tcg_op);
- break;
- case 1:
- tcg_gen_ext16u_i32(tcg_op, tcg_op);
- break;
- case 2:
- break;
- default:
- g_assert_not_reached();
- }
- write_fp_sreg(s, rd, tcg_op);
+ }
+ break;
+ case MO_32:
+ if (fp_access_check(s)) {
+ TCGv_i32 t = read_fp_sreg(s, a->rn);
+ f->gen_s(t, t);
+ if (merging) {
+ write_fp_sreg_merging(s, a->rd, a->rd, t);
} else {
- write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
+ write_fp_sreg(s, a->rd, t);
}
}
-
- if (!scalar) {
- clear_vec_high(s, is_q, rd);
+ break;
+ case MO_16:
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ TCGv_i32 t = read_fp_hreg(s, a->rn);
+ f->gen_h(t, t);
+ if (merging) {
+ write_fp_hreg_merging(s, a->rd, a->rd, t);
+ } else {
+ write_fp_sreg(s, a->rd, t);
+ }
}
+ break;
+ default:
+ return false;
}
+ return true;
}
-/* Common vector code for handling integer to FP conversion */
-static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
- int elements, int is_signed,
- int fracbits, int size)
+static bool do_fp1_scalar_int_2fn(DisasContext *s, arg_rr_e *a,
+ const FPScalar1Int *fnormal,
+ const FPScalar1Int *fah)
{
- TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
- TCGv_i32 tcg_shift = NULL;
+ return do_fp1_scalar_int(s, a, s->fpcr_ah ? fah : fnormal, true);
+}
- MemOp mop = size | (is_signed ? MO_SIGN : 0);
- int pass;
+static const FPScalar1Int f_scalar_fmov = {
+ tcg_gen_mov_i32,
+ tcg_gen_mov_i32,
+ tcg_gen_mov_i64,
+};
+TRANS(FMOV_s, do_fp1_scalar_int, a, &f_scalar_fmov, false)
- if (fracbits || size == MO_64) {
- tcg_shift = tcg_constant_i32(fracbits);
- }
+static const FPScalar1Int f_scalar_fabs = {
+ gen_vfp_absh,
+ gen_vfp_abss,
+ gen_vfp_absd,
+};
+static const FPScalar1Int f_scalar_ah_fabs = {
+ gen_vfp_ah_absh,
+ gen_vfp_ah_abss,
+ gen_vfp_ah_absd,
+};
+TRANS(FABS_s, do_fp1_scalar_int_2fn, a, &f_scalar_fabs, &f_scalar_ah_fabs)
- if (size == MO_64) {
- TCGv_i64 tcg_int64 = tcg_temp_new_i64();
- TCGv_i64 tcg_double = tcg_temp_new_i64();
+static const FPScalar1Int f_scalar_fneg = {
+ gen_vfp_negh,
+ gen_vfp_negs,
+ gen_vfp_negd,
+};
+static const FPScalar1Int f_scalar_ah_fneg = {
+ gen_vfp_ah_negh,
+ gen_vfp_ah_negs,
+ gen_vfp_ah_negd,
+};
+TRANS(FNEG_s, do_fp1_scalar_int_2fn, a, &f_scalar_fneg, &f_scalar_ah_fneg)
- for (pass = 0; pass < elements; pass++) {
- read_vec_element(s, tcg_int64, rn, pass, mop);
+typedef struct FPScalar1 {
+ void (*gen_h)(TCGv_i32, TCGv_i32, TCGv_ptr);
+ void (*gen_s)(TCGv_i32, TCGv_i32, TCGv_ptr);
+ void (*gen_d)(TCGv_i64, TCGv_i64, TCGv_ptr);
+} FPScalar1;
- if (is_signed) {
- gen_helper_vfp_sqtod(tcg_double, tcg_int64,
- tcg_shift, tcg_fpst);
- } else {
- gen_helper_vfp_uqtod(tcg_double, tcg_int64,
- tcg_shift, tcg_fpst);
- }
- if (elements == 1) {
- write_fp_dreg(s, rd, tcg_double);
- } else {
- write_vec_element(s, tcg_double, rd, pass, MO_64);
- }
- }
- } else {
- TCGv_i32 tcg_int32 = tcg_temp_new_i32();
- TCGv_i32 tcg_float = tcg_temp_new_i32();
-
- for (pass = 0; pass < elements; pass++) {
- read_vec_element_i32(s, tcg_int32, rn, pass, mop);
-
- switch (size) {
- case MO_32:
- if (fracbits) {
- if (is_signed) {
- gen_helper_vfp_sltos(tcg_float, tcg_int32,
- tcg_shift, tcg_fpst);
- } else {
- gen_helper_vfp_ultos(tcg_float, tcg_int32,
- tcg_shift, tcg_fpst);
- }
- } else {
- if (is_signed) {
- gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
- } else {
- gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
- }
- }
- break;
- case MO_16:
- if (fracbits) {
- if (is_signed) {
- gen_helper_vfp_sltoh(tcg_float, tcg_int32,
- tcg_shift, tcg_fpst);
- } else {
- gen_helper_vfp_ultoh(tcg_float, tcg_int32,
- tcg_shift, tcg_fpst);
- }
- } else {
- if (is_signed) {
- gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
- } else {
- gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
- }
- }
- break;
- default:
- g_assert_not_reached();
- }
+static bool do_fp1_scalar_with_fpsttype(DisasContext *s, arg_rr_e *a,
+ const FPScalar1 *f, int rmode,
+ ARMFPStatusFlavour fpsttype)
+{
+ TCGv_i32 tcg_rmode = NULL;
+ TCGv_ptr fpst;
+ TCGv_i64 t64;
+ TCGv_i32 t32;
+ int check = fp_access_check_scalar_hsd(s, a->esz);
- if (elements == 1) {
- write_fp_sreg(s, rd, tcg_float);
- } else {
- write_vec_element_i32(s, tcg_float, rd, pass, size);
- }
- }
+ if (check <= 0) {
+ return check == 0;
}
- clear_vec_high(s, elements << size == 16, rd);
-}
-
-/* UCVTF/SCVTF - Integer to FP conversion */
-static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
- bool is_q, bool is_u,
- int immh, int immb, int opcode,
- int rn, int rd)
-{
- int size, elements, fracbits;
- int immhb = immh << 3 | immb;
-
- if (immh & 8) {
- size = MO_64;
- if (!is_scalar && !is_q) {
- unallocated_encoding(s);
- return;
- }
- } else if (immh & 4) {
- size = MO_32;
- } else if (immh & 2) {
- size = MO_16;
- if (!dc_isar_feature(aa64_fp16, s)) {
- unallocated_encoding(s);
- return;
- }
- } else {
- /* immh == 0 would be a failure of the decode logic */
- g_assert(immh == 1);
- unallocated_encoding(s);
- return;
+ fpst = fpstatus_ptr(fpsttype);
+ if (rmode >= 0) {
+ tcg_rmode = gen_set_rmode(rmode, fpst);
}
- if (is_scalar) {
- elements = 1;
- } else {
- elements = (8 << is_q) >> size;
+ switch (a->esz) {
+ case MO_64:
+ t64 = read_fp_dreg(s, a->rn);
+ f->gen_d(t64, t64, fpst);
+ write_fp_dreg_merging(s, a->rd, a->rd, t64);
+ break;
+ case MO_32:
+ t32 = read_fp_sreg(s, a->rn);
+ f->gen_s(t32, t32, fpst);
+ write_fp_sreg_merging(s, a->rd, a->rd, t32);
+ break;
+ case MO_16:
+ t32 = read_fp_hreg(s, a->rn);
+ f->gen_h(t32, t32, fpst);
+ write_fp_hreg_merging(s, a->rd, a->rd, t32);
+ break;
+ default:
+ g_assert_not_reached();
}
- fracbits = (16 << size) - immhb;
- if (!fp_access_check(s)) {
- return;
+ if (rmode >= 0) {
+ gen_restore_rmode(tcg_rmode, fpst);
}
+ return true;
+}
- handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
+static bool do_fp1_scalar(DisasContext *s, arg_rr_e *a,
+ const FPScalar1 *f, int rmode)
+{
+ return do_fp1_scalar_with_fpsttype(s, a, f, rmode,
+ a->esz == MO_16 ?
+ FPST_A64_F16 : FPST_A64);
}
-/* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
-static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
- bool is_q, bool is_u,
- int immh, int immb, int rn, int rd)
+static bool do_fp1_scalar_ah(DisasContext *s, arg_rr_e *a,
+ const FPScalar1 *f, int rmode)
{
- int immhb = immh << 3 | immb;
- int pass, size, fracbits;
- TCGv_ptr tcg_fpstatus;
- TCGv_i32 tcg_rmode, tcg_shift;
+ return do_fp1_scalar_with_fpsttype(s, a, f, rmode, select_ah_fpst(s, a->esz));
+}
- if (immh & 0x8) {
- size = MO_64;
- if (!is_scalar && !is_q) {
- unallocated_encoding(s);
- return;
- }
- } else if (immh & 0x4) {
- size = MO_32;
- } else if (immh & 0x2) {
- size = MO_16;
- if (!dc_isar_feature(aa64_fp16, s)) {
- unallocated_encoding(s);
- return;
- }
- } else {
- /* Should have split out AdvSIMD modified immediate earlier. */
- assert(immh == 1);
- unallocated_encoding(s);
- return;
- }
+static const FPScalar1 f_scalar_fsqrt = {
+ gen_helper_vfp_sqrth,
+ gen_helper_vfp_sqrts,
+ gen_helper_vfp_sqrtd,
+};
+TRANS(FSQRT_s, do_fp1_scalar, a, &f_scalar_fsqrt, -1)
- if (!fp_access_check(s)) {
- return;
- }
+static const FPScalar1 f_scalar_frint = {
+ gen_helper_advsimd_rinth,
+ gen_helper_rints,
+ gen_helper_rintd,
+};
+TRANS(FRINTN_s, do_fp1_scalar, a, &f_scalar_frint, FPROUNDING_TIEEVEN)
+TRANS(FRINTP_s, do_fp1_scalar, a, &f_scalar_frint, FPROUNDING_POSINF)
+TRANS(FRINTM_s, do_fp1_scalar, a, &f_scalar_frint, FPROUNDING_NEGINF)
+TRANS(FRINTZ_s, do_fp1_scalar, a, &f_scalar_frint, FPROUNDING_ZERO)
+TRANS(FRINTA_s, do_fp1_scalar, a, &f_scalar_frint, FPROUNDING_TIEAWAY)
+TRANS(FRINTI_s, do_fp1_scalar, a, &f_scalar_frint, -1)
+
+static const FPScalar1 f_scalar_frintx = {
+ gen_helper_advsimd_rinth_exact,
+ gen_helper_rints_exact,
+ gen_helper_rintd_exact,
+};
+TRANS(FRINTX_s, do_fp1_scalar, a, &f_scalar_frintx, -1)
+
+static bool trans_BFCVT_s(DisasContext *s, arg_rr_e *a)
+{
+ ARMFPStatusFlavour fpsttype = s->fpcr_ah ? FPST_AH : FPST_A64;
+ TCGv_i32 t32;
+ int check;
- assert(!(is_scalar && is_q));
+ if (!dc_isar_feature(aa64_bf16, s)) {
+ return false;
+ }
- tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
- tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, tcg_fpstatus);
- fracbits = (16 << size) - immhb;
- tcg_shift = tcg_constant_i32(fracbits);
+ check = fp_access_check_scalar_hsd(s, a->esz);
- if (size == MO_64) {
- int maxpass = is_scalar ? 1 : 2;
+ if (check <= 0) {
+ return check == 0;
+ }
- for (pass = 0; pass < maxpass; pass++) {
- TCGv_i64 tcg_op = tcg_temp_new_i64();
+ t32 = read_fp_sreg(s, a->rn);
+ gen_helper_bfcvt(t32, t32, fpstatus_ptr(fpsttype));
+ write_fp_hreg_merging(s, a->rd, a->rd, t32);
+ return true;
+}
- read_vec_element(s, tcg_op, rn, pass, MO_64);
- if (is_u) {
- gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
- } else {
- gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
- }
- write_vec_element(s, tcg_op, rd, pass, MO_64);
- }
- clear_vec_high(s, is_q, rd);
- } else {
- void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
- int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
+static const FPScalar1 f_scalar_frint32 = {
+ NULL,
+ gen_helper_frint32_s,
+ gen_helper_frint32_d,
+};
+TRANS_FEAT(FRINT32Z_s, aa64_frint, do_fp1_scalar, a,
+ &f_scalar_frint32, FPROUNDING_ZERO)
+TRANS_FEAT(FRINT32X_s, aa64_frint, do_fp1_scalar, a, &f_scalar_frint32, -1)
+
+static const FPScalar1 f_scalar_frint64 = {
+ NULL,
+ gen_helper_frint64_s,
+ gen_helper_frint64_d,
+};
+TRANS_FEAT(FRINT64Z_s, aa64_frint, do_fp1_scalar, a,
+ &f_scalar_frint64, FPROUNDING_ZERO)
+TRANS_FEAT(FRINT64X_s, aa64_frint, do_fp1_scalar, a, &f_scalar_frint64, -1)
+
+static const FPScalar1 f_scalar_frecpe = {
+ gen_helper_recpe_f16,
+ gen_helper_recpe_f32,
+ gen_helper_recpe_f64,
+};
+static const FPScalar1 f_scalar_frecpe_rpres = {
+ gen_helper_recpe_f16,
+ gen_helper_recpe_rpres_f32,
+ gen_helper_recpe_f64,
+};
+TRANS(FRECPE_s, do_fp1_scalar_ah, a,
+ s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ?
+ &f_scalar_frecpe_rpres : &f_scalar_frecpe, -1)
+
+static const FPScalar1 f_scalar_frecpx = {
+ gen_helper_frecpx_f16,
+ gen_helper_frecpx_f32,
+ gen_helper_frecpx_f64,
+};
+TRANS(FRECPX_s, do_fp1_scalar_ah, a, &f_scalar_frecpx, -1)
- switch (size) {
- case MO_16:
- if (is_u) {
- fn = gen_helper_vfp_touhh;
- } else {
- fn = gen_helper_vfp_toshh;
- }
- break;
- case MO_32:
- if (is_u) {
- fn = gen_helper_vfp_touls;
- } else {
- fn = gen_helper_vfp_tosls;
- }
- break;
- default:
- g_assert_not_reached();
- }
+static const FPScalar1 f_scalar_frsqrte = {
+ gen_helper_rsqrte_f16,
+ gen_helper_rsqrte_f32,
+ gen_helper_rsqrte_f64,
+};
+static const FPScalar1 f_scalar_frsqrte_rpres = {
+ gen_helper_rsqrte_f16,
+ gen_helper_rsqrte_rpres_f32,
+ gen_helper_rsqrte_f64,
+};
+TRANS(FRSQRTE_s, do_fp1_scalar_ah, a,
+ s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ?
+ &f_scalar_frsqrte_rpres : &f_scalar_frsqrte, -1)
- for (pass = 0; pass < maxpass; pass++) {
- TCGv_i32 tcg_op = tcg_temp_new_i32();
+static bool trans_FCVT_s_ds(DisasContext *s, arg_rr *a)
+{
+ if (fp_access_check(s)) {
+ TCGv_i32 tcg_rn = read_fp_sreg(s, a->rn);
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+ TCGv_ptr fpst = fpstatus_ptr(FPST_A64);
- read_vec_element_i32(s, tcg_op, rn, pass, size);
- fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
- if (is_scalar) {
- if (size == MO_16 && !is_u) {
- tcg_gen_ext16u_i32(tcg_op, tcg_op);
- }
- write_fp_sreg(s, rd, tcg_op);
- } else {
- write_vec_element_i32(s, tcg_op, rd, pass, size);
- }
- }
- if (!is_scalar) {
- clear_vec_high(s, is_q, rd);
- }
+ gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, fpst);
+ write_fp_dreg_merging(s, a->rd, a->rd, tcg_rd);
}
-
- gen_restore_rmode(tcg_rmode, tcg_fpstatus);
+ return true;
}
-/* AdvSIMD scalar shift by immediate
- * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
- * +-----+---+-------------+------+------+--------+---+------+------+
- * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
- * +-----+---+-------------+------+------+--------+---+------+------+
- *
- * This is the scalar version so it works on a fixed sized registers
- */
-static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
+static bool trans_FCVT_s_hs(DisasContext *s, arg_rr *a)
{
- int rd = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- int opcode = extract32(insn, 11, 5);
- int immb = extract32(insn, 16, 3);
- int immh = extract32(insn, 19, 4);
- bool is_u = extract32(insn, 29, 1);
+ if (fp_access_check(s)) {
+ TCGv_i32 tmp = read_fp_sreg(s, a->rn);
+ TCGv_i32 ahp = get_ahp_flag();
+ TCGv_ptr fpst = fpstatus_ptr(FPST_A64);
- if (immh == 0) {
- unallocated_encoding(s);
- return;
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
+ /* write_fp_hreg_merging is OK here because top half of result is zero */
+ write_fp_hreg_merging(s, a->rd, a->rd, tmp);
}
+ return true;
+}
- switch (opcode) {
- case 0x08: /* SRI */
- if (!is_u) {
- unallocated_encoding(s);
- return;
- }
- /* fall through */
- case 0x00: /* SSHR / USHR */
- case 0x02: /* SSRA / USRA */
- case 0x04: /* SRSHR / URSHR */
- case 0x06: /* SRSRA / URSRA */
- handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
- break;
- case 0x0a: /* SHL / SLI */
- handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
- break;
- case 0x1c: /* SCVTF, UCVTF */
- handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
- opcode, rn, rd);
- break;
- case 0x10: /* SQSHRUN, SQSHRUN2 */
- case 0x11: /* SQRSHRUN, SQRSHRUN2 */
- if (!is_u) {
- unallocated_encoding(s);
- return;
- }
- handle_vec_simd_sqshrn(s, true, false, false, true,
- immh, immb, opcode, rn, rd);
- break;
- case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
- case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
- handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
- immh, immb, opcode, rn, rd);
- break;
- case 0xc: /* SQSHLU */
- if (!is_u) {
- unallocated_encoding(s);
- return;
- }
- handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
- break;
- case 0xe: /* SQSHL, UQSHL */
- handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
- break;
- case 0x1f: /* FCVTZS, FCVTZU */
- handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
- break;
- default:
- unallocated_encoding(s);
- break;
+static bool trans_FCVT_s_sd(DisasContext *s, arg_rr *a)
+{
+ if (fp_access_check(s)) {
+ TCGv_i64 tcg_rn = read_fp_dreg(s, a->rn);
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ TCGv_ptr fpst = fpstatus_ptr(FPST_A64);
+
+ gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, fpst);
+ write_fp_sreg_merging(s, a->rd, a->rd, tcg_rd);
}
+ return true;
}
-static void handle_2misc_64(DisasContext *s, int opcode, bool u,
- TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
- TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
+static bool trans_FCVT_s_hd(DisasContext *s, arg_rr *a)
{
- /* Handle 64->64 opcodes which are shared between the scalar and
- * vector 2-reg-misc groups. We cover every integer opcode where size == 3
- * is valid in either group and also the double-precision fp ops.
- * The caller only need provide tcg_rmode and tcg_fpstatus if the op
- * requires them.
- */
- TCGCond cond;
+ if (fp_access_check(s)) {
+ TCGv_i64 tcg_rn = read_fp_dreg(s, a->rn);
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ TCGv_i32 ahp = get_ahp_flag();
+ TCGv_ptr fpst = fpstatus_ptr(FPST_A64);
- switch (opcode) {
- case 0x4: /* CLS, CLZ */
- if (u) {
- tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
- } else {
- tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
- }
- break;
- case 0x5: /* NOT */
- /* This opcode is shared with CNT and RBIT but we have earlier
- * enforced that size == 3 if and only if this is the NOT insn.
- */
- tcg_gen_not_i64(tcg_rd, tcg_rn);
- break;
- case 0x7: /* SQABS, SQNEG */
- if (u) {
- gen_helper_neon_qneg_s64(tcg_rd, tcg_env, tcg_rn);
- } else {
- gen_helper_neon_qabs_s64(tcg_rd, tcg_env, tcg_rn);
- }
- break;
- case 0xa: /* CMLT */
- cond = TCG_COND_LT;
- do_cmop:
- /* 64 bit integer comparison against zero, result is test ? -1 : 0. */
- tcg_gen_negsetcond_i64(cond, tcg_rd, tcg_rn, tcg_constant_i64(0));
- break;
- case 0x8: /* CMGT, CMGE */
- cond = u ? TCG_COND_GE : TCG_COND_GT;
- goto do_cmop;
- case 0x9: /* CMEQ, CMLE */
- cond = u ? TCG_COND_LE : TCG_COND_EQ;
- goto do_cmop;
- case 0xb: /* ABS, NEG */
- if (u) {
- tcg_gen_neg_i64(tcg_rd, tcg_rn);
- } else {
- tcg_gen_abs_i64(tcg_rd, tcg_rn);
- }
- break;
- case 0x2f: /* FABS */
- gen_vfp_absd(tcg_rd, tcg_rn);
- break;
- case 0x6f: /* FNEG */
- gen_vfp_negd(tcg_rd, tcg_rn);
- break;
- case 0x7f: /* FSQRT */
- gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, tcg_env);
- break;
- case 0x1a: /* FCVTNS */
- case 0x1b: /* FCVTMS */
- case 0x1c: /* FCVTAS */
- case 0x3a: /* FCVTPS */
- case 0x3b: /* FCVTZS */
- gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
- break;
- case 0x5a: /* FCVTNU */
- case 0x5b: /* FCVTMU */
- case 0x5c: /* FCVTAU */
- case 0x7a: /* FCVTPU */
- case 0x7b: /* FCVTZU */
- gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
- break;
- case 0x18: /* FRINTN */
- case 0x19: /* FRINTM */
- case 0x38: /* FRINTP */
- case 0x39: /* FRINTZ */
- case 0x58: /* FRINTA */
- case 0x79: /* FRINTI */
- gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
- break;
- case 0x59: /* FRINTX */
- gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
- break;
- case 0x1e: /* FRINT32Z */
- case 0x5e: /* FRINT32X */
- gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
- break;
- case 0x1f: /* FRINT64Z */
- case 0x5f: /* FRINT64X */
- gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
- break;
- default:
- g_assert_not_reached();
+ gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
+ /* write_fp_hreg_merging is OK here because top half of tcg_rd is zero */
+ write_fp_hreg_merging(s, a->rd, a->rd, tcg_rd);
}
+ return true;
}
-static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
- bool is_scalar, bool is_u, bool is_q,
- int size, int rn, int rd)
+static bool trans_FCVT_s_sh(DisasContext *s, arg_rr *a)
{
- bool is_double = (size == MO_64);
- TCGv_ptr fpst;
+ if (fp_access_check(s)) {
+ TCGv_i32 tcg_rn = read_fp_hreg(s, a->rn);
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_A64_F16);
+ TCGv_i32 tcg_ahp = get_ahp_flag();
- if (!fp_access_check(s)) {
- return;
+ gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
+ write_fp_sreg_merging(s, a->rd, a->rd, tcg_rd);
}
+ return true;
+}
- fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+static bool trans_FCVT_s_dh(DisasContext *s, arg_rr *a)
+{
+ if (fp_access_check(s)) {
+ TCGv_i32 tcg_rn = read_fp_hreg(s, a->rn);
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+ TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_A64_F16);
+ TCGv_i32 tcg_ahp = get_ahp_flag();
- if (is_double) {
- TCGv_i64 tcg_op = tcg_temp_new_i64();
- TCGv_i64 tcg_zero = tcg_constant_i64(0);
- TCGv_i64 tcg_res = tcg_temp_new_i64();
- NeonGenTwoDoubleOpFn *genfn;
- bool swap = false;
- int pass;
+ gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
+ write_fp_dreg_merging(s, a->rd, a->rd, tcg_rd);
+ }
+ return true;
+}
- switch (opcode) {
- case 0x2e: /* FCMLT (zero) */
- swap = true;
- /* fallthrough */
- case 0x2c: /* FCMGT (zero) */
- genfn = gen_helper_neon_cgt_f64;
- break;
- case 0x2d: /* FCMEQ (zero) */
- genfn = gen_helper_neon_ceq_f64;
- break;
- case 0x6d: /* FCMLE (zero) */
- swap = true;
- /* fall through */
- case 0x6c: /* FCMGE (zero) */
- genfn = gen_helper_neon_cge_f64;
- break;
- default:
- g_assert_not_reached();
- }
+static bool do_cvtf_scalar(DisasContext *s, MemOp esz, int rd, int shift,
+ TCGv_i64 tcg_int, bool is_signed)
+{
+ TCGv_ptr tcg_fpstatus;
+ TCGv_i32 tcg_shift, tcg_single;
+ TCGv_i64 tcg_double;
- for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
- read_vec_element(s, tcg_op, rn, pass, MO_64);
- if (swap) {
- genfn(tcg_res, tcg_zero, tcg_op, fpst);
- } else {
- genfn(tcg_res, tcg_op, tcg_zero, fpst);
- }
- write_vec_element(s, tcg_res, rd, pass, MO_64);
- }
+ tcg_fpstatus = fpstatus_ptr(esz == MO_16 ? FPST_A64_F16 : FPST_A64);
+ tcg_shift = tcg_constant_i32(shift);
- clear_vec_high(s, !is_scalar, rd);
- } else {
- TCGv_i32 tcg_op = tcg_temp_new_i32();
- TCGv_i32 tcg_zero = tcg_constant_i32(0);
- TCGv_i32 tcg_res = tcg_temp_new_i32();
- NeonGenTwoSingleOpFn *genfn;
- bool swap = false;
- int pass, maxpasses;
-
- if (size == MO_16) {
- switch (opcode) {
- case 0x2e: /* FCMLT (zero) */
- swap = true;
- /* fall through */
- case 0x2c: /* FCMGT (zero) */
- genfn = gen_helper_advsimd_cgt_f16;
- break;
- case 0x2d: /* FCMEQ (zero) */
- genfn = gen_helper_advsimd_ceq_f16;
- break;
- case 0x6d: /* FCMLE (zero) */
- swap = true;
- /* fall through */
- case 0x6c: /* FCMGE (zero) */
- genfn = gen_helper_advsimd_cge_f16;
- break;
- default:
- g_assert_not_reached();
- }
+ switch (esz) {
+ case MO_64:
+ tcg_double = tcg_temp_new_i64();
+ if (is_signed) {
+ gen_helper_vfp_sqtod(tcg_double, tcg_int, tcg_shift, tcg_fpstatus);
} else {
- switch (opcode) {
- case 0x2e: /* FCMLT (zero) */
- swap = true;
- /* fall through */
- case 0x2c: /* FCMGT (zero) */
- genfn = gen_helper_neon_cgt_f32;
- break;
- case 0x2d: /* FCMEQ (zero) */
- genfn = gen_helper_neon_ceq_f32;
- break;
- case 0x6d: /* FCMLE (zero) */
- swap = true;
- /* fall through */
- case 0x6c: /* FCMGE (zero) */
- genfn = gen_helper_neon_cge_f32;
- break;
- default:
- g_assert_not_reached();
- }
+ gen_helper_vfp_uqtod(tcg_double, tcg_int, tcg_shift, tcg_fpstatus);
}
+ write_fp_dreg_merging(s, rd, rd, tcg_double);
+ break;
- if (is_scalar) {
- maxpasses = 1;
+ case MO_32:
+ tcg_single = tcg_temp_new_i32();
+ if (is_signed) {
+ gen_helper_vfp_sqtos(tcg_single, tcg_int, tcg_shift, tcg_fpstatus);
} else {
- int vector_size = 8 << is_q;
- maxpasses = vector_size >> size;
+ gen_helper_vfp_uqtos(tcg_single, tcg_int, tcg_shift, tcg_fpstatus);
}
+ write_fp_sreg_merging(s, rd, rd, tcg_single);
+ break;
- for (pass = 0; pass < maxpasses; pass++) {
- read_vec_element_i32(s, tcg_op, rn, pass, size);
- if (swap) {
- genfn(tcg_res, tcg_zero, tcg_op, fpst);
- } else {
- genfn(tcg_res, tcg_op, tcg_zero, fpst);
- }
- if (is_scalar) {
- write_fp_sreg(s, rd, tcg_res);
- } else {
- write_vec_element_i32(s, tcg_res, rd, pass, size);
- }
+ case MO_16:
+ tcg_single = tcg_temp_new_i32();
+ if (is_signed) {
+ gen_helper_vfp_sqtoh(tcg_single, tcg_int, tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_uqtoh(tcg_single, tcg_int, tcg_shift, tcg_fpstatus);
}
+ write_fp_hreg_merging(s, rd, rd, tcg_single);
+ break;
- if (!is_scalar) {
- clear_vec_high(s, is_q, rd);
- }
+ default:
+ g_assert_not_reached();
}
+ return true;
}
-static void handle_2misc_reciprocal(DisasContext *s, int opcode,
- bool is_scalar, bool is_u, bool is_q,
- int size, int rn, int rd)
+static bool do_cvtf_g(DisasContext *s, arg_fcvt *a, bool is_signed)
{
- bool is_double = (size == 3);
- TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
+ TCGv_i64 tcg_int;
+ int check = fp_access_check_scalar_hsd(s, a->esz);
- if (is_double) {
- TCGv_i64 tcg_op = tcg_temp_new_i64();
- TCGv_i64 tcg_res = tcg_temp_new_i64();
- int pass;
+ if (check <= 0) {
+ return check == 0;
+ }
- for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
- read_vec_element(s, tcg_op, rn, pass, MO_64);
- switch (opcode) {
- case 0x3d: /* FRECPE */
- gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
- break;
- case 0x3f: /* FRECPX */
- gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
- break;
- case 0x7d: /* FRSQRTE */
- gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
- break;
- default:
- g_assert_not_reached();
- }
- write_vec_element(s, tcg_res, rd, pass, MO_64);
- }
- clear_vec_high(s, !is_scalar, rd);
+ if (a->sf) {
+ tcg_int = cpu_reg(s, a->rn);
} else {
- TCGv_i32 tcg_op = tcg_temp_new_i32();
- TCGv_i32 tcg_res = tcg_temp_new_i32();
- int pass, maxpasses;
-
- if (is_scalar) {
- maxpasses = 1;
+ tcg_int = read_cpu_reg(s, a->rn, true);
+ if (is_signed) {
+ tcg_gen_ext32s_i64(tcg_int, tcg_int);
} else {
- maxpasses = is_q ? 4 : 2;
- }
-
- for (pass = 0; pass < maxpasses; pass++) {
- read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
-
- switch (opcode) {
- case 0x3c: /* URECPE */
- gen_helper_recpe_u32(tcg_res, tcg_op);
- break;
- case 0x3d: /* FRECPE */
- gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
- break;
- case 0x3f: /* FRECPX */
- gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
- break;
- case 0x7d: /* FRSQRTE */
- gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
- break;
- default:
- g_assert_not_reached();
- }
-
- if (is_scalar) {
- write_fp_sreg(s, rd, tcg_res);
- } else {
- write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
- }
- }
- if (!is_scalar) {
- clear_vec_high(s, is_q, rd);
+ tcg_gen_ext32u_i64(tcg_int, tcg_int);
}
}
+ return do_cvtf_scalar(s, a->esz, a->rd, a->shift, tcg_int, is_signed);
}
-static void handle_2misc_narrow(DisasContext *s, bool scalar,
- int opcode, bool u, bool is_q,
- int size, int rn, int rd)
+TRANS(SCVTF_g, do_cvtf_g, a, true)
+TRANS(UCVTF_g, do_cvtf_g, a, false)
+
+/*
+ * [US]CVTF (vector), scalar version.
+ * Which sounds weird, but really just means input from fp register
+ * instead of input from general register. Input and output element
+ * size are always equal.
+ */
+static bool do_cvtf_f(DisasContext *s, arg_fcvt *a, bool is_signed)
{
- /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
- * in the source becomes a size element in the destination).
- */
- int pass;
- TCGv_i32 tcg_res[2];
- int destelt = is_q ? 2 : 0;
- int passes = scalar ? 1 : 2;
+ TCGv_i64 tcg_int;
+ int check = fp_access_check_scalar_hsd(s, a->esz);
- if (scalar) {
- tcg_res[1] = tcg_constant_i32(0);
+ if (check <= 0) {
+ return check == 0;
}
- for (pass = 0; pass < passes; pass++) {
- TCGv_i64 tcg_op = tcg_temp_new_i64();
- NeonGenNarrowFn *genfn = NULL;
- NeonGenNarrowEnvFn *genenvfn = NULL;
+ tcg_int = tcg_temp_new_i64();
+ read_vec_element(s, tcg_int, a->rn, 0, a->esz | (is_signed ? MO_SIGN : 0));
+ return do_cvtf_scalar(s, a->esz, a->rd, a->shift, tcg_int, is_signed);
+}
- if (scalar) {
- read_vec_element(s, tcg_op, rn, pass, size + 1);
- } else {
- read_vec_element(s, tcg_op, rn, pass, MO_64);
- }
- tcg_res[pass] = tcg_temp_new_i32();
+TRANS(SCVTF_f, do_cvtf_f, a, true)
+TRANS(UCVTF_f, do_cvtf_f, a, false)
- switch (opcode) {
- case 0x12: /* XTN, SQXTUN */
- {
- static NeonGenNarrowFn * const xtnfns[3] = {
- gen_helper_neon_narrow_u8,
- gen_helper_neon_narrow_u16,
- tcg_gen_extrl_i64_i32,
- };
- static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
- gen_helper_neon_unarrow_sat8,
- gen_helper_neon_unarrow_sat16,
- gen_helper_neon_unarrow_sat32,
- };
- if (u) {
- genenvfn = sqxtunfns[size];
- } else {
- genfn = xtnfns[size];
- }
- break;
- }
- case 0x14: /* SQXTN, UQXTN */
- {
- static NeonGenNarrowEnvFn * const fns[3][2] = {
- { gen_helper_neon_narrow_sat_s8,
- gen_helper_neon_narrow_sat_u8 },
- { gen_helper_neon_narrow_sat_s16,
- gen_helper_neon_narrow_sat_u16 },
- { gen_helper_neon_narrow_sat_s32,
- gen_helper_neon_narrow_sat_u32 },
- };
- genenvfn = fns[size][u];
+static void do_fcvt_scalar(DisasContext *s, MemOp out, MemOp esz,
+ TCGv_i64 tcg_out, int shift, int rn,
+ ARMFPRounding rmode)
+{
+ TCGv_ptr tcg_fpstatus;
+ TCGv_i32 tcg_shift, tcg_rmode, tcg_single;
+
+ tcg_fpstatus = fpstatus_ptr(esz == MO_16 ? FPST_A64_F16 : FPST_A64);
+ tcg_shift = tcg_constant_i32(shift);
+ tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
+
+ switch (esz) {
+ case MO_64:
+ read_vec_element(s, tcg_out, rn, 0, MO_64);
+ switch (out) {
+ case MO_64 | MO_SIGN:
+ gen_helper_vfp_tosqd(tcg_out, tcg_out, tcg_shift, tcg_fpstatus);
break;
- }
- case 0x16: /* FCVTN, FCVTN2 */
- /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
- if (size == 2) {
- gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, tcg_env);
- } else {
- TCGv_i32 tcg_lo = tcg_temp_new_i32();
- TCGv_i32 tcg_hi = tcg_temp_new_i32();
- TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
- TCGv_i32 ahp = get_ahp_flag();
-
- tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
- gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
- gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
- tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
- }
+ case MO_64:
+ gen_helper_vfp_touqd(tcg_out, tcg_out, tcg_shift, tcg_fpstatus);
break;
- case 0x36: /* BFCVTN, BFCVTN2 */
- {
- TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
- gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst);
- }
+ case MO_32 | MO_SIGN:
+ gen_helper_vfp_tosld(tcg_out, tcg_out, tcg_shift, tcg_fpstatus);
break;
- case 0x56: /* FCVTXN, FCVTXN2 */
- /* 64 bit to 32 bit float conversion
- * with von Neumann rounding (round to odd)
- */
- assert(size == 2);
- gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, tcg_env);
+ case MO_32:
+ gen_helper_vfp_tould(tcg_out, tcg_out, tcg_shift, tcg_fpstatus);
break;
default:
g_assert_not_reached();
}
-
- if (genfn) {
- genfn(tcg_res[pass], tcg_op);
- } else if (genenvfn) {
- genenvfn(tcg_res[pass], tcg_env, tcg_op);
- }
- }
-
- for (pass = 0; pass < 2; pass++) {
- write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
- }
- clear_vec_high(s, is_q, rd);
-}
-
-/* AdvSIMD scalar two reg misc
- * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
- * +-----+---+-----------+------+-----------+--------+-----+------+------+
- * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
- * +-----+---+-----------+------+-----------+--------+-----+------+------+
- */
-static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
-{
- int rd = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- int opcode = extract32(insn, 12, 5);
- int size = extract32(insn, 22, 2);
- bool u = extract32(insn, 29, 1);
- bool is_fcvt = false;
- int rmode;
- TCGv_i32 tcg_rmode;
- TCGv_ptr tcg_fpstatus;
-
- switch (opcode) {
- case 0x7: /* SQABS / SQNEG */
- break;
- case 0xa: /* CMLT */
- if (u) {
- unallocated_encoding(s);
- return;
- }
- /* fall through */
- case 0x8: /* CMGT, CMGE */
- case 0x9: /* CMEQ, CMLE */
- case 0xb: /* ABS, NEG */
- if (size != 3) {
- unallocated_encoding(s);
- return;
- }
break;
- case 0x12: /* SQXTUN */
- if (!u) {
- unallocated_encoding(s);
- return;
- }
- /* fall through */
- case 0x14: /* SQXTN, UQXTN */
- if (size == 3) {
- unallocated_encoding(s);
- return;
- }
- if (!fp_access_check(s)) {
- return;
- }
- handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
- return;
- case 0xc ... 0xf:
- case 0x16 ... 0x1d:
- case 0x1f:
- /* Floating point: U, size[1] and opcode indicate operation;
- * size[0] indicates single or double precision.
- */
- opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
- size = extract32(size, 0, 1) ? 3 : 2;
- switch (opcode) {
- case 0x2c: /* FCMGT (zero) */
- case 0x2d: /* FCMEQ (zero) */
- case 0x2e: /* FCMLT (zero) */
- case 0x6c: /* FCMGE (zero) */
- case 0x6d: /* FCMLE (zero) */
- handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
- return;
- case 0x1d: /* SCVTF */
- case 0x5d: /* UCVTF */
- {
- bool is_signed = (opcode == 0x1d);
- if (!fp_access_check(s)) {
- return;
- }
- handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
- return;
- }
- case 0x3d: /* FRECPE */
- case 0x3f: /* FRECPX */
- case 0x7d: /* FRSQRTE */
- if (!fp_access_check(s)) {
- return;
- }
- handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
- return;
- case 0x1a: /* FCVTNS */
- case 0x1b: /* FCVTMS */
- case 0x3a: /* FCVTPS */
- case 0x3b: /* FCVTZS */
- case 0x5a: /* FCVTNU */
- case 0x5b: /* FCVTMU */
- case 0x7a: /* FCVTPU */
- case 0x7b: /* FCVTZU */
- is_fcvt = true;
- rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
+
+ case MO_32:
+ tcg_single = read_fp_sreg(s, rn);
+ switch (out) {
+ case MO_64 | MO_SIGN:
+ gen_helper_vfp_tosqs(tcg_out, tcg_single, tcg_shift, tcg_fpstatus);
break;
- case 0x1c: /* FCVTAS */
- case 0x5c: /* FCVTAU */
- /* TIEAWAY doesn't fit in the usual rounding mode encoding */
- is_fcvt = true;
- rmode = FPROUNDING_TIEAWAY;
+ case MO_64:
+ gen_helper_vfp_touqs(tcg_out, tcg_single, tcg_shift, tcg_fpstatus);
+ break;
+ case MO_32 | MO_SIGN:
+ gen_helper_vfp_tosls(tcg_single, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ tcg_gen_extu_i32_i64(tcg_out, tcg_single);
+ break;
+ case MO_32:
+ gen_helper_vfp_touls(tcg_single, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ tcg_gen_extu_i32_i64(tcg_out, tcg_single);
break;
- case 0x56: /* FCVTXN, FCVTXN2 */
- if (size == 2) {
- unallocated_encoding(s);
- return;
- }
- if (!fp_access_check(s)) {
- return;
- }
- handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
- return;
default:
- unallocated_encoding(s);
- return;
+ g_assert_not_reached();
}
break;
- default:
- case 0x3: /* USQADD / SUQADD */
- unallocated_encoding(s);
- return;
- }
-
- if (!fp_access_check(s)) {
- return;
- }
- if (is_fcvt) {
- tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
- tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
- } else {
- tcg_fpstatus = NULL;
- tcg_rmode = NULL;
- }
-
- if (size == 3) {
- TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
- TCGv_i64 tcg_rd = tcg_temp_new_i64();
-
- handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
- write_fp_dreg(s, rd, tcg_rd);
- } else {
- TCGv_i32 tcg_rn = tcg_temp_new_i32();
- TCGv_i32 tcg_rd = tcg_temp_new_i32();
-
- read_vec_element_i32(s, tcg_rn, rn, 0, size);
-
- switch (opcode) {
- case 0x7: /* SQABS, SQNEG */
- {
- NeonGenOneOpEnvFn *genfn;
- static NeonGenOneOpEnvFn * const fns[3][2] = {
- { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
- { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
- { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
- };
- genfn = fns[size][u];
- genfn(tcg_rd, tcg_env, tcg_rn);
+ case MO_16:
+ tcg_single = read_fp_hreg(s, rn);
+ switch (out) {
+ case MO_64 | MO_SIGN:
+ gen_helper_vfp_tosqh(tcg_out, tcg_single, tcg_shift, tcg_fpstatus);
break;
- }
- case 0x1a: /* FCVTNS */
- case 0x1b: /* FCVTMS */
- case 0x1c: /* FCVTAS */
- case 0x3a: /* FCVTPS */
- case 0x3b: /* FCVTZS */
- gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_constant_i32(0),
- tcg_fpstatus);
+ case MO_64:
+ gen_helper_vfp_touqh(tcg_out, tcg_single, tcg_shift, tcg_fpstatus);
+ break;
+ case MO_32 | MO_SIGN:
+ gen_helper_vfp_toslh(tcg_single, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ tcg_gen_extu_i32_i64(tcg_out, tcg_single);
break;
- case 0x5a: /* FCVTNU */
- case 0x5b: /* FCVTMU */
- case 0x5c: /* FCVTAU */
- case 0x7a: /* FCVTPU */
- case 0x7b: /* FCVTZU */
- gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_constant_i32(0),
- tcg_fpstatus);
+ case MO_32:
+ gen_helper_vfp_toulh(tcg_single, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ tcg_gen_extu_i32_i64(tcg_out, tcg_single);
+ break;
+ case MO_16 | MO_SIGN:
+ gen_helper_vfp_toshh(tcg_single, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ tcg_gen_extu_i32_i64(tcg_out, tcg_single);
+ break;
+ case MO_16:
+ gen_helper_vfp_touhh(tcg_single, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ tcg_gen_extu_i32_i64(tcg_out, tcg_single);
break;
default:
g_assert_not_reached();
}
+ break;
- write_fp_sreg(s, rd, tcg_rd);
+ default:
+ g_assert_not_reached();
}
- if (is_fcvt) {
- gen_restore_rmode(tcg_rmode, tcg_fpstatus);
- }
+ gen_restore_rmode(tcg_rmode, tcg_fpstatus);
}
-/* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
-static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
- int immh, int immb, int opcode, int rn, int rd)
+static bool do_fcvt_g(DisasContext *s, arg_fcvt *a,
+ ARMFPRounding rmode, bool is_signed)
{
- int size = 32 - clz32(immh) - 1;
- int immhb = immh << 3 | immb;
- int shift = 2 * (8 << size) - immhb;
- GVecGen2iFn *gvec_fn;
+ TCGv_i64 tcg_int;
+ int check = fp_access_check_scalar_hsd(s, a->esz);
- if (extract32(immh, 3, 1) && !is_q) {
- unallocated_encoding(s);
- return;
+ if (check <= 0) {
+ return check == 0;
}
- tcg_debug_assert(size <= 3);
- if (!fp_access_check(s)) {
- return;
- }
+ tcg_int = cpu_reg(s, a->rd);
+ do_fcvt_scalar(s, (a->sf ? MO_64 : MO_32) | (is_signed ? MO_SIGN : 0),
+ a->esz, tcg_int, a->shift, a->rn, rmode);
- switch (opcode) {
- case 0x02: /* SSRA / USRA (accumulate) */
- gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra;
- break;
+ if (!a->sf) {
+ tcg_gen_ext32u_i64(tcg_int, tcg_int);
+ }
+ return true;
+}
- case 0x08: /* SRI */
- gvec_fn = gen_gvec_sri;
- break;
+TRANS(FCVTNS_g, do_fcvt_g, a, FPROUNDING_TIEEVEN, true)
+TRANS(FCVTNU_g, do_fcvt_g, a, FPROUNDING_TIEEVEN, false)
+TRANS(FCVTPS_g, do_fcvt_g, a, FPROUNDING_POSINF, true)
+TRANS(FCVTPU_g, do_fcvt_g, a, FPROUNDING_POSINF, false)
+TRANS(FCVTMS_g, do_fcvt_g, a, FPROUNDING_NEGINF, true)
+TRANS(FCVTMU_g, do_fcvt_g, a, FPROUNDING_NEGINF, false)
+TRANS(FCVTZS_g, do_fcvt_g, a, FPROUNDING_ZERO, true)
+TRANS(FCVTZU_g, do_fcvt_g, a, FPROUNDING_ZERO, false)
+TRANS(FCVTAS_g, do_fcvt_g, a, FPROUNDING_TIEAWAY, true)
+TRANS(FCVTAU_g, do_fcvt_g, a, FPROUNDING_TIEAWAY, false)
- case 0x00: /* SSHR / USHR */
- if (is_u) {
- if (shift == 8 << size) {
- /* Shift count the same size as element size produces zero. */
- tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
- is_q ? 16 : 8, vec_full_reg_size(s), 0);
- return;
- }
- gvec_fn = tcg_gen_gvec_shri;
- } else {
- /* Shift count the same size as element size produces all sign. */
- if (shift == 8 << size) {
- shift -= 1;
- }
- gvec_fn = tcg_gen_gvec_sari;
- }
- break;
+/*
+ * FCVT* (vector), scalar version.
+ * Which sounds weird, but really just means output to fp register
+ * instead of output to general register. Input and output element
+ * size are always equal.
+ */
+static bool do_fcvt_f(DisasContext *s, arg_fcvt *a,
+ ARMFPRounding rmode, bool is_signed)
+{
+ TCGv_i64 tcg_int;
+ int check = fp_access_check_scalar_hsd(s, a->esz);
- case 0x04: /* SRSHR / URSHR (rounding) */
- gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr;
- break;
+ if (check <= 0) {
+ return check == 0;
+ }
- case 0x06: /* SRSRA / URSRA (accum + rounding) */
- gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra;
- break;
+ tcg_int = tcg_temp_new_i64();
+ do_fcvt_scalar(s, a->esz | (is_signed ? MO_SIGN : 0),
+ a->esz, tcg_int, a->shift, a->rn, rmode);
- default:
- g_assert_not_reached();
+ if (!s->fpcr_nep) {
+ clear_vec(s, a->rd);
}
-
- gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size);
+ write_vec_element(s, tcg_int, a->rd, 0, a->esz);
+ return true;
}
-/* SHL/SLI - Vector shift left */
-static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
- int immh, int immb, int opcode, int rn, int rd)
+TRANS(FCVTNS_f, do_fcvt_f, a, FPROUNDING_TIEEVEN, true)
+TRANS(FCVTNU_f, do_fcvt_f, a, FPROUNDING_TIEEVEN, false)
+TRANS(FCVTPS_f, do_fcvt_f, a, FPROUNDING_POSINF, true)
+TRANS(FCVTPU_f, do_fcvt_f, a, FPROUNDING_POSINF, false)
+TRANS(FCVTMS_f, do_fcvt_f, a, FPROUNDING_NEGINF, true)
+TRANS(FCVTMU_f, do_fcvt_f, a, FPROUNDING_NEGINF, false)
+TRANS(FCVTZS_f, do_fcvt_f, a, FPROUNDING_ZERO, true)
+TRANS(FCVTZU_f, do_fcvt_f, a, FPROUNDING_ZERO, false)
+TRANS(FCVTAS_f, do_fcvt_f, a, FPROUNDING_TIEAWAY, true)
+TRANS(FCVTAU_f, do_fcvt_f, a, FPROUNDING_TIEAWAY, false)
+
+static bool trans_FJCVTZS(DisasContext *s, arg_FJCVTZS *a)
{
- int size = 32 - clz32(immh) - 1;
- int immhb = immh << 3 | immb;
- int shift = immhb - (8 << size);
+ if (!dc_isar_feature(aa64_jscvt, s)) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ TCGv_i64 t = read_fp_dreg(s, a->rn);
+ TCGv_ptr fpstatus = fpstatus_ptr(FPST_A64);
- /* Range of size is limited by decode: immh is a non-zero 4 bit field */
- assert(size >= 0 && size <= 3);
+ gen_helper_fjcvtzs(t, t, fpstatus);
- if (extract32(immh, 3, 1) && !is_q) {
- unallocated_encoding(s);
- return;
+ tcg_gen_ext32u_i64(cpu_reg(s, a->rd), t);
+ tcg_gen_extrh_i64_i32(cpu_ZF, t);
+ tcg_gen_movi_i32(cpu_CF, 0);
+ tcg_gen_movi_i32(cpu_NF, 0);
+ tcg_gen_movi_i32(cpu_VF, 0);
}
+ return true;
+}
- if (!fp_access_check(s)) {
- return;
+static bool trans_FMOV_hx(DisasContext *s, arg_rr *a)
+{
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ return false;
}
-
- if (insert) {
- gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
- } else {
- gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
+ if (fp_access_check(s)) {
+ TCGv_i64 tcg_rn = cpu_reg(s, a->rn);
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_ext16u_i64(tmp, tcg_rn);
+ write_fp_dreg(s, a->rd, tmp);
}
+ return true;
}
-/* USHLL/SHLL - Vector shift left with widening */
-static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
- int immh, int immb, int opcode, int rn, int rd)
+static bool trans_FMOV_sw(DisasContext *s, arg_rr *a)
{
- int size = 32 - clz32(immh) - 1;
- int immhb = immh << 3 | immb;
- int shift = immhb - (8 << size);
- int dsize = 64;
- int esize = 8 << size;
- int elements = dsize/esize;
- TCGv_i64 tcg_rn = tcg_temp_new_i64();
- TCGv_i64 tcg_rd = tcg_temp_new_i64();
- int i;
-
- if (size >= 3) {
- unallocated_encoding(s);
- return;
+ if (fp_access_check(s)) {
+ TCGv_i64 tcg_rn = cpu_reg(s, a->rn);
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(tmp, tcg_rn);
+ write_fp_dreg(s, a->rd, tmp);
}
+ return true;
+}
- if (!fp_access_check(s)) {
- return;
+static bool trans_FMOV_dx(DisasContext *s, arg_rr *a)
+{
+ if (fp_access_check(s)) {
+ TCGv_i64 tcg_rn = cpu_reg(s, a->rn);
+ write_fp_dreg(s, a->rd, tcg_rn);
}
+ return true;
+}
- /* For the LL variants the store is larger than the load,
- * so if rd == rn we would overwrite parts of our input.
- * So load everything right now and use shifts in the main loop.
- */
- read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
+static bool trans_FMOV_ux(DisasContext *s, arg_rr *a)
+{
+ if (fp_access_check(s)) {
+ TCGv_i64 tcg_rn = cpu_reg(s, a->rn);
+ tcg_gen_st_i64(tcg_rn, tcg_env, fp_reg_hi_offset(s, a->rd));
+ clear_vec_high(s, true, a->rd);
+ }
+ return true;
+}
- for (i = 0; i < elements; i++) {
- tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
- ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
- tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
- write_vec_element(s, tcg_rd, rd, i, size + 1);
+static bool trans_FMOV_xh(DisasContext *s, arg_rr *a)
+{
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
+ tcg_gen_ld16u_i64(tcg_rd, tcg_env, fp_reg_offset(s, a->rn, MO_16));
}
+ return true;
}
-/* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
-static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
- int immh, int immb, int opcode, int rn, int rd)
+static bool trans_FMOV_ws(DisasContext *s, arg_rr *a)
{
- int immhb = immh << 3 | immb;
- int size = 32 - clz32(immh) - 1;
- int dsize = 64;
- int esize = 8 << size;
- int elements = dsize/esize;
- int shift = (2 * esize) - immhb;
- bool round = extract32(opcode, 0, 1);
- TCGv_i64 tcg_rn, tcg_rd, tcg_final;
- TCGv_i64 tcg_round;
- int i;
+ if (fp_access_check(s)) {
+ TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
+ tcg_gen_ld32u_i64(tcg_rd, tcg_env, fp_reg_offset(s, a->rn, MO_32));
+ }
+ return true;
+}
- if (extract32(immh, 3, 1)) {
- unallocated_encoding(s);
- return;
+static bool trans_FMOV_xd(DisasContext *s, arg_rr *a)
+{
+ if (fp_access_check(s)) {
+ TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
+ tcg_gen_ld_i64(tcg_rd, tcg_env, fp_reg_offset(s, a->rn, MO_64));
}
+ return true;
+}
- if (!fp_access_check(s)) {
- return;
+static bool trans_FMOV_xu(DisasContext *s, arg_rr *a)
+{
+ if (fp_access_check(s)) {
+ TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
+ tcg_gen_ld_i64(tcg_rd, tcg_env, fp_reg_hi_offset(s, a->rn));
}
+ return true;
+}
- tcg_rn = tcg_temp_new_i64();
- tcg_rd = tcg_temp_new_i64();
- tcg_final = tcg_temp_new_i64();
- read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
+typedef struct ENVScalar1 {
+ NeonGenOneOpEnvFn *gen_bhs[3];
+ NeonGenOne64OpEnvFn *gen_d;
+} ENVScalar1;
- if (round) {
- tcg_round = tcg_constant_i64(1ULL << (shift - 1));
- } else {
- tcg_round = NULL;
+static bool do_env_scalar1(DisasContext *s, arg_rr_e *a, const ENVScalar1 *f)
+{
+ if (!fp_access_check(s)) {
+ return true;
}
+ if (a->esz == MO_64) {
+ TCGv_i64 t = read_fp_dreg(s, a->rn);
+ f->gen_d(t, tcg_env, t);
+ write_fp_dreg(s, a->rd, t);
+ } else {
+ TCGv_i32 t = tcg_temp_new_i32();
- for (i = 0; i < elements; i++) {
- read_vec_element(s, tcg_rn, rn, i, size+1);
- handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
- false, true, size+1, shift);
+ read_vec_element_i32(s, t, a->rn, 0, a->esz);
+ f->gen_bhs[a->esz](t, tcg_env, t);
+ write_fp_sreg(s, a->rd, t);
+ }
+ return true;
+}
- tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
+static bool do_env_vector1(DisasContext *s, arg_qrr_e *a, const ENVScalar1 *f)
+{
+ if (a->esz == MO_64 && !a->q) {
+ return false;
+ }
+ if (!fp_access_check(s)) {
+ return true;
}
+ if (a->esz == MO_64) {
+ TCGv_i64 t = tcg_temp_new_i64();
- if (!is_q) {
- write_vec_element(s, tcg_final, rd, 0, MO_64);
+ for (int i = 0; i < 2; ++i) {
+ read_vec_element(s, t, a->rn, i, MO_64);
+ f->gen_d(t, tcg_env, t);
+ write_vec_element(s, t, a->rd, i, MO_64);
+ }
} else {
- write_vec_element(s, tcg_final, rd, 1, MO_64);
+ TCGv_i32 t = tcg_temp_new_i32();
+ int n = (a->q ? 16 : 8) >> a->esz;
+
+ for (int i = 0; i < n; ++i) {
+ read_vec_element_i32(s, t, a->rn, i, a->esz);
+ f->gen_bhs[a->esz](t, tcg_env, t);
+ write_vec_element_i32(s, t, a->rd, i, a->esz);
+ }
}
+ clear_vec_high(s, a->q, a->rd);
+ return true;
+}
+
+static const ENVScalar1 f_scalar_sqabs = {
+ { gen_helper_neon_qabs_s8,
+ gen_helper_neon_qabs_s16,
+ gen_helper_neon_qabs_s32 },
+ gen_helper_neon_qabs_s64,
+};
+TRANS(SQABS_s, do_env_scalar1, a, &f_scalar_sqabs)
+TRANS(SQABS_v, do_env_vector1, a, &f_scalar_sqabs)
+
+static const ENVScalar1 f_scalar_sqneg = {
+ { gen_helper_neon_qneg_s8,
+ gen_helper_neon_qneg_s16,
+ gen_helper_neon_qneg_s32 },
+ gen_helper_neon_qneg_s64,
+};
+TRANS(SQNEG_s, do_env_scalar1, a, &f_scalar_sqneg)
+TRANS(SQNEG_v, do_env_vector1, a, &f_scalar_sqneg)
- clear_vec_high(s, is_q, rd);
+static bool do_scalar1_d(DisasContext *s, arg_rr *a, ArithOneOp *f)
+{
+ if (fp_access_check(s)) {
+ TCGv_i64 t = read_fp_dreg(s, a->rn);
+ f(t, t);
+ write_fp_dreg(s, a->rd, t);
+ }
+ return true;
}
+TRANS(ABS_s, do_scalar1_d, a, tcg_gen_abs_i64)
+TRANS(NEG_s, do_scalar1_d, a, tcg_gen_neg_i64)
-/* AdvSIMD shift by immediate
- * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
- * +---+---+---+-------------+------+------+--------+---+------+------+
- * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
- * +---+---+---+-------------+------+------+--------+---+------+------+
- */
-static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
-{
- int rd = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- int opcode = extract32(insn, 11, 5);
- int immb = extract32(insn, 16, 3);
- int immh = extract32(insn, 19, 4);
- bool is_u = extract32(insn, 29, 1);
- bool is_q = extract32(insn, 30, 1);
-
- /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
- assert(immh != 0);
-
- switch (opcode) {
- case 0x08: /* SRI */
- if (!is_u) {
- unallocated_encoding(s);
- return;
- }
- /* fall through */
- case 0x00: /* SSHR / USHR */
- case 0x02: /* SSRA / USRA (accumulate) */
- case 0x04: /* SRSHR / URSHR (rounding) */
- case 0x06: /* SRSRA / URSRA (accum + rounding) */
- handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
- break;
- case 0x0a: /* SHL / SLI */
- handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
- break;
- case 0x10: /* SHRN */
- case 0x11: /* RSHRN / SQRSHRUN */
- if (is_u) {
- handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
- opcode, rn, rd);
- } else {
- handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
- }
- break;
- case 0x12: /* SQSHRN / UQSHRN */
- case 0x13: /* SQRSHRN / UQRSHRN */
- handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
- opcode, rn, rd);
- break;
- case 0x14: /* SSHLL / USHLL */
- handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
- break;
- case 0x1c: /* SCVTF / UCVTF */
- handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
- opcode, rn, rd);
- break;
- case 0xc: /* SQSHLU */
- if (!is_u) {
- unallocated_encoding(s);
- return;
- }
- handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
- break;
- case 0xe: /* SQSHL, UQSHL */
- handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
- break;
- case 0x1f: /* FCVTZS/ FCVTZU */
- handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
- return;
- default:
- unallocated_encoding(s);
- return;
+static bool do_cmop0_d(DisasContext *s, arg_rr *a, TCGCond cond)
+{
+ if (fp_access_check(s)) {
+ TCGv_i64 t = read_fp_dreg(s, a->rn);
+ tcg_gen_negsetcond_i64(cond, t, t, tcg_constant_i64(0));
+ write_fp_dreg(s, a->rd, t);
}
+ return true;
}
-static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
- int size, int rn, int rd)
+TRANS(CMGT0_s, do_cmop0_d, a, TCG_COND_GT)
+TRANS(CMGE0_s, do_cmop0_d, a, TCG_COND_GE)
+TRANS(CMLE0_s, do_cmop0_d, a, TCG_COND_LE)
+TRANS(CMLT0_s, do_cmop0_d, a, TCG_COND_LT)
+TRANS(CMEQ0_s, do_cmop0_d, a, TCG_COND_EQ)
+
+static bool do_2misc_narrow_scalar(DisasContext *s, arg_rr_e *a,
+ ArithOneOp * const fn[3])
{
- /* Handle 2-reg-misc ops which are widening (so each size element
- * in the source becomes a 2*size element in the destination.
- * The only instruction like this is FCVTL.
- */
- int pass;
+ if (a->esz == MO_64) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ TCGv_i64 t = tcg_temp_new_i64();
- if (size == 3) {
- /* 32 -> 64 bit fp conversion */
- TCGv_i64 tcg_res[2];
- int srcelt = is_q ? 2 : 0;
+ read_vec_element(s, t, a->rn, 0, a->esz + 1);
+ fn[a->esz](t, t);
+ clear_vec(s, a->rd);
+ write_vec_element(s, t, a->rd, 0, a->esz);
+ }
+ return true;
+}
- for (pass = 0; pass < 2; pass++) {
- TCGv_i32 tcg_op = tcg_temp_new_i32();
- tcg_res[pass] = tcg_temp_new_i64();
+#define WRAP_ENV(NAME) \
+ static void gen_##NAME(TCGv_i64 d, TCGv_i64 n) \
+ { gen_helper_##NAME(d, tcg_env, n); }
- read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
- gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, tcg_env);
- }
- for (pass = 0; pass < 2; pass++) {
- write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
- }
- } else {
- /* 16 -> 32 bit fp conversion */
- int srcelt = is_q ? 4 : 0;
- TCGv_i32 tcg_res[4];
- TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
- TCGv_i32 ahp = get_ahp_flag();
+WRAP_ENV(neon_unarrow_sat8)
+WRAP_ENV(neon_unarrow_sat16)
+WRAP_ENV(neon_unarrow_sat32)
- for (pass = 0; pass < 4; pass++) {
- tcg_res[pass] = tcg_temp_new_i32();
+static ArithOneOp * const f_scalar_sqxtun[] = {
+ gen_neon_unarrow_sat8,
+ gen_neon_unarrow_sat16,
+ gen_neon_unarrow_sat32,
+};
+TRANS(SQXTUN_s, do_2misc_narrow_scalar, a, f_scalar_sqxtun)
- read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
- gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
- fpst, ahp);
- }
- for (pass = 0; pass < 4; pass++) {
- write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
- }
+WRAP_ENV(neon_narrow_sat_s8)
+WRAP_ENV(neon_narrow_sat_s16)
+WRAP_ENV(neon_narrow_sat_s32)
+
+static ArithOneOp * const f_scalar_sqxtn[] = {
+ gen_neon_narrow_sat_s8,
+ gen_neon_narrow_sat_s16,
+ gen_neon_narrow_sat_s32,
+};
+TRANS(SQXTN_s, do_2misc_narrow_scalar, a, f_scalar_sqxtn)
+
+WRAP_ENV(neon_narrow_sat_u8)
+WRAP_ENV(neon_narrow_sat_u16)
+WRAP_ENV(neon_narrow_sat_u32)
+
+static ArithOneOp * const f_scalar_uqxtn[] = {
+ gen_neon_narrow_sat_u8,
+ gen_neon_narrow_sat_u16,
+ gen_neon_narrow_sat_u32,
+};
+TRANS(UQXTN_s, do_2misc_narrow_scalar, a, f_scalar_uqxtn)
+
+static bool trans_FCVTXN_s(DisasContext *s, arg_rr_e *a)
+{
+ if (fp_access_check(s)) {
+ /*
+ * 64 bit to 32 bit float conversion
+ * with von Neumann rounding (round to odd)
+ */
+ TCGv_i64 src = read_fp_dreg(s, a->rn);
+ TCGv_i32 dst = tcg_temp_new_i32();
+ gen_helper_fcvtx_f64_to_f32(dst, src, fpstatus_ptr(FPST_A64));
+ write_fp_sreg_merging(s, a->rd, a->rd, dst);
}
+ return true;
}
-static void handle_rev(DisasContext *s, int opcode, bool u,
- bool is_q, int size, int rn, int rd)
-{
- int op = (opcode << 1) | u;
- int opsz = op + size;
- int grp_size = 3 - opsz;
- int dsize = is_q ? 128 : 64;
- int i;
+#undef WRAP_ENV
- if (opsz >= 3) {
- unallocated_encoding(s);
- return;
+static bool do_gvec_fn2(DisasContext *s, arg_qrr_e *a, GVecGen2Fn *fn)
+{
+ if (!a->q && a->esz == MO_64) {
+ return false;
}
-
- if (!fp_access_check(s)) {
- return;
+ if (fp_access_check(s)) {
+ gen_gvec_fn2(s, a->q, a->rd, a->rn, fn, a->esz);
}
+ return true;
+}
- if (size == 0) {
- /* Special case bytes, use bswap op on each group of elements */
- int groups = dsize / (8 << grp_size);
-
- for (i = 0; i < groups; i++) {
- TCGv_i64 tcg_tmp = tcg_temp_new_i64();
-
- read_vec_element(s, tcg_tmp, rn, i, grp_size);
- switch (grp_size) {
- case MO_16:
- tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
- break;
- case MO_32:
- tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
- break;
- case MO_64:
- tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
- break;
- default:
- g_assert_not_reached();
- }
- write_vec_element(s, tcg_tmp, rd, i, grp_size);
- }
- clear_vec_high(s, is_q, rd);
- } else {
- int revmask = (1 << grp_size) - 1;
- int esize = 8 << size;
- int elements = dsize / esize;
- TCGv_i64 tcg_rn = tcg_temp_new_i64();
- TCGv_i64 tcg_rd[2];
+TRANS(ABS_v, do_gvec_fn2, a, tcg_gen_gvec_abs)
+TRANS(NEG_v, do_gvec_fn2, a, tcg_gen_gvec_neg)
+TRANS(NOT_v, do_gvec_fn2, a, tcg_gen_gvec_not)
+TRANS(CNT_v, do_gvec_fn2, a, gen_gvec_cnt)
+TRANS(RBIT_v, do_gvec_fn2, a, gen_gvec_rbit)
+TRANS(CMGT0_v, do_gvec_fn2, a, gen_gvec_cgt0)
+TRANS(CMGE0_v, do_gvec_fn2, a, gen_gvec_cge0)
+TRANS(CMLT0_v, do_gvec_fn2, a, gen_gvec_clt0)
+TRANS(CMLE0_v, do_gvec_fn2, a, gen_gvec_cle0)
+TRANS(CMEQ0_v, do_gvec_fn2, a, gen_gvec_ceq0)
+TRANS(REV16_v, do_gvec_fn2, a, gen_gvec_rev16)
+TRANS(REV32_v, do_gvec_fn2, a, gen_gvec_rev32)
+TRANS(URECPE_v, do_gvec_fn2, a, gen_gvec_urecpe)
+TRANS(URSQRTE_v, do_gvec_fn2, a, gen_gvec_ursqrte)
- for (i = 0; i < 2; i++) {
- tcg_rd[i] = tcg_temp_new_i64();
- tcg_gen_movi_i64(tcg_rd[i], 0);
- }
+static bool do_gvec_fn2_bhs(DisasContext *s, arg_qrr_e *a, GVecGen2Fn *fn)
+{
+ if (a->esz == MO_64) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ gen_gvec_fn2(s, a->q, a->rd, a->rn, fn, a->esz);
+ }
+ return true;
+}
- for (i = 0; i < elements; i++) {
- int e_rev = (i & 0xf) ^ revmask;
- int w = (e_rev * esize) / 64;
- int o = (e_rev * esize) % 64;
+TRANS(CLS_v, do_gvec_fn2_bhs, a, gen_gvec_cls)
+TRANS(CLZ_v, do_gvec_fn2_bhs, a, gen_gvec_clz)
+TRANS(REV64_v, do_gvec_fn2_bhs, a, gen_gvec_rev64)
+TRANS(SADDLP_v, do_gvec_fn2_bhs, a, gen_gvec_saddlp)
+TRANS(UADDLP_v, do_gvec_fn2_bhs, a, gen_gvec_uaddlp)
+TRANS(SADALP_v, do_gvec_fn2_bhs, a, gen_gvec_sadalp)
+TRANS(UADALP_v, do_gvec_fn2_bhs, a, gen_gvec_uadalp)
- read_vec_element(s, tcg_rn, rn, i, size);
- tcg_gen_deposit_i64(tcg_rd[w], tcg_rd[w], tcg_rn, o, esize);
- }
+static bool do_2misc_narrow_vector(DisasContext *s, arg_qrr_e *a,
+ ArithOneOp * const fn[3])
+{
+ if (a->esz == MO_64) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
- for (i = 0; i < 2; i++) {
- write_vec_element(s, tcg_rd[i], rd, i, MO_64);
- }
- clear_vec_high(s, true, rd);
+ read_vec_element(s, t0, a->rn, 0, MO_64);
+ read_vec_element(s, t1, a->rn, 1, MO_64);
+ fn[a->esz](t0, t0);
+ fn[a->esz](t1, t1);
+ write_vec_element(s, t0, a->rd, a->q ? 2 : 0, MO_32);
+ write_vec_element(s, t1, a->rd, a->q ? 3 : 1, MO_32);
+ clear_vec_high(s, a->q, a->rd);
}
+ return true;
}
-static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
- bool is_q, int size, int rn, int rd)
+static ArithOneOp * const f_scalar_xtn[] = {
+ gen_helper_neon_narrow_u8,
+ gen_helper_neon_narrow_u16,
+ tcg_gen_ext32u_i64,
+};
+TRANS(XTN, do_2misc_narrow_vector, a, f_scalar_xtn)
+TRANS(SQXTUN_v, do_2misc_narrow_vector, a, f_scalar_sqxtun)
+TRANS(SQXTN_v, do_2misc_narrow_vector, a, f_scalar_sqxtn)
+TRANS(UQXTN_v, do_2misc_narrow_vector, a, f_scalar_uqxtn)
+
+static void gen_fcvtn_hs(TCGv_i64 d, TCGv_i64 n)
{
- /* Implement the pairwise operations from 2-misc:
- * SADDLP, UADDLP, SADALP, UADALP.
- * These all add pairs of elements in the input to produce a
- * double-width result element in the output (possibly accumulating).
- */
- bool accum = (opcode == 0x6);
- int maxpass = is_q ? 2 : 1;
- int pass;
- TCGv_i64 tcg_res[2];
+ TCGv_i32 tcg_lo = tcg_temp_new_i32();
+ TCGv_i32 tcg_hi = tcg_temp_new_i32();
+ TCGv_ptr fpst = fpstatus_ptr(FPST_A64);
+ TCGv_i32 ahp = get_ahp_flag();
- if (size == 2) {
- /* 32 + 32 -> 64 op */
- MemOp memop = size + (u ? 0 : MO_SIGN);
+ tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, n);
+ gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
+ gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
+ tcg_gen_deposit_i32(tcg_lo, tcg_lo, tcg_hi, 16, 16);
+ tcg_gen_extu_i32_i64(d, tcg_lo);
+}
- for (pass = 0; pass < maxpass; pass++) {
- TCGv_i64 tcg_op1 = tcg_temp_new_i64();
- TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+static void gen_fcvtn_sd(TCGv_i64 d, TCGv_i64 n)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ TCGv_ptr fpst = fpstatus_ptr(FPST_A64);
- tcg_res[pass] = tcg_temp_new_i64();
+ gen_helper_vfp_fcvtsd(tmp, n, fpst);
+ tcg_gen_extu_i32_i64(d, tmp);
+}
- read_vec_element(s, tcg_op1, rn, pass * 2, memop);
- read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
- tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
- if (accum) {
- read_vec_element(s, tcg_op1, rd, pass, MO_64);
- tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
- }
- }
- } else {
- for (pass = 0; pass < maxpass; pass++) {
- TCGv_i64 tcg_op = tcg_temp_new_i64();
- NeonGenOne64OpFn *genfn;
- static NeonGenOne64OpFn * const fns[2][2] = {
- { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
- { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
- };
+static void gen_fcvtxn_sd(TCGv_i64 d, TCGv_i64 n)
+{
+ /*
+ * 64 bit to 32 bit float conversion
+ * with von Neumann rounding (round to odd)
+ */
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ gen_helper_fcvtx_f64_to_f32(tmp, n, fpstatus_ptr(FPST_A64));
+ tcg_gen_extu_i32_i64(d, tmp);
+}
- genfn = fns[size][u];
+static ArithOneOp * const f_vector_fcvtn[] = {
+ NULL,
+ gen_fcvtn_hs,
+ gen_fcvtn_sd,
+};
+static ArithOneOp * const f_scalar_fcvtxn[] = {
+ NULL,
+ NULL,
+ gen_fcvtxn_sd,
+};
+TRANS(FCVTN_v, do_2misc_narrow_vector, a, f_vector_fcvtn)
+TRANS(FCVTXN_v, do_2misc_narrow_vector, a, f_scalar_fcvtxn)
- tcg_res[pass] = tcg_temp_new_i64();
+static void gen_bfcvtn_hs(TCGv_i64 d, TCGv_i64 n)
+{
+ TCGv_ptr fpst = fpstatus_ptr(FPST_A64);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ gen_helper_bfcvt_pair(tmp, n, fpst);
+ tcg_gen_extu_i32_i64(d, tmp);
+}
- read_vec_element(s, tcg_op, rn, pass, MO_64);
- genfn(tcg_res[pass], tcg_op);
-
- if (accum) {
- read_vec_element(s, tcg_op, rd, pass, MO_64);
- if (size == 0) {
- gen_helper_neon_addl_u16(tcg_res[pass],
- tcg_res[pass], tcg_op);
- } else {
- gen_helper_neon_addl_u32(tcg_res[pass],
- tcg_res[pass], tcg_op);
- }
- }
- }
- }
- if (!is_q) {
- tcg_res[1] = tcg_constant_i64(0);
- }
- for (pass = 0; pass < 2; pass++) {
- write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
- }
+static void gen_bfcvtn_ah_hs(TCGv_i64 d, TCGv_i64 n)
+{
+ TCGv_ptr fpst = fpstatus_ptr(FPST_AH);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ gen_helper_bfcvt_pair(tmp, n, fpst);
+ tcg_gen_extu_i32_i64(d, tmp);
}
-static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
+static ArithOneOp * const f_vector_bfcvtn[2][3] = {
+ {
+ NULL,
+ gen_bfcvtn_hs,
+ NULL,
+ }, {
+ NULL,
+ gen_bfcvtn_ah_hs,
+ NULL,
+ }
+};
+TRANS_FEAT(BFCVTN_v, aa64_bf16, do_2misc_narrow_vector, a,
+ f_vector_bfcvtn[s->fpcr_ah])
+
+static bool trans_SHLL_v(DisasContext *s, arg_qrr_e *a)
{
- /* Implement SHLL and SHLL2 */
- int pass;
- int part = is_q ? 2 : 0;
+ static NeonGenWidenFn * const widenfns[3] = {
+ gen_helper_neon_widen_u8,
+ gen_helper_neon_widen_u16,
+ tcg_gen_extu_i32_i64,
+ };
+ NeonGenWidenFn *widenfn;
TCGv_i64 tcg_res[2];
+ TCGv_i32 tcg_op;
+ int part, pass;
- for (pass = 0; pass < 2; pass++) {
- static NeonGenWidenFn * const widenfns[3] = {
- gen_helper_neon_widen_u8,
- gen_helper_neon_widen_u16,
- tcg_gen_extu_i32_i64,
- };
- NeonGenWidenFn *widenfn = widenfns[size];
- TCGv_i32 tcg_op = tcg_temp_new_i32();
+ if (a->esz == MO_64) {
+ return false;
+ }
+ if (!fp_access_check(s)) {
+ return true;
+ }
- read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
+ tcg_op = tcg_temp_new_i32();
+ widenfn = widenfns[a->esz];
+ part = a->q ? 2 : 0;
+
+ for (pass = 0; pass < 2; pass++) {
+ read_vec_element_i32(s, tcg_op, a->rn, part + pass, MO_32);
tcg_res[pass] = tcg_temp_new_i64();
widenfn(tcg_res[pass], tcg_op);
- tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
+ tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << a->esz);
}
for (pass = 0; pass < 2; pass++) {
- write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ write_vec_element(s, tcg_res[pass], a->rd, pass, MO_64);
}
+ return true;
}
-/* AdvSIMD two reg misc
- * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
- * +---+---+---+-----------+------+-----------+--------+-----+------+------+
- * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
- * +---+---+---+-----------+------+-----------+--------+-----+------+------+
- */
-static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
-{
- int size = extract32(insn, 22, 2);
- int opcode = extract32(insn, 12, 5);
- bool u = extract32(insn, 29, 1);
- bool is_q = extract32(insn, 30, 1);
- int rn = extract32(insn, 5, 5);
- int rd = extract32(insn, 0, 5);
- bool need_fpstatus = false;
- int rmode = -1;
- TCGv_i32 tcg_rmode;
- TCGv_ptr tcg_fpstatus;
-
- switch (opcode) {
- case 0x0: /* REV64, REV32 */
- case 0x1: /* REV16 */
- handle_rev(s, opcode, u, is_q, size, rn, rd);
- return;
- case 0x5: /* CNT, NOT, RBIT */
- if (u && size == 0) {
- /* NOT */
- break;
- } else if (u && size == 1) {
- /* RBIT */
- break;
- } else if (!u && size == 0) {
- /* CNT */
- break;
- }
- unallocated_encoding(s);
- return;
- case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
- case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
- if (size == 3) {
- unallocated_encoding(s);
- return;
- }
- if (!fp_access_check(s)) {
- return;
- }
-
- handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
- return;
- case 0x4: /* CLS, CLZ */
- if (size == 3) {
- unallocated_encoding(s);
- return;
- }
- break;
- case 0x2: /* SADDLP, UADDLP */
- case 0x6: /* SADALP, UADALP */
- if (size == 3) {
- unallocated_encoding(s);
- return;
- }
- if (!fp_access_check(s)) {
- return;
- }
- handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
- return;
- case 0x13: /* SHLL, SHLL2 */
- if (u == 0 || size == 3) {
- unallocated_encoding(s);
- return;
- }
- if (!fp_access_check(s)) {
- return;
- }
- handle_shll(s, is_q, size, rn, rd);
- return;
- case 0xa: /* CMLT */
- if (u == 1) {
- unallocated_encoding(s);
- return;
- }
- /* fall through */
- case 0x8: /* CMGT, CMGE */
- case 0x9: /* CMEQ, CMLE */
- case 0xb: /* ABS, NEG */
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
- break;
- case 0x7: /* SQABS, SQNEG */
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
- break;
- case 0xc ... 0xf:
- case 0x16 ... 0x1f:
- {
- /* Floating point: U, size[1] and opcode indicate operation;
- * size[0] indicates single or double precision.
- */
- int is_double = extract32(size, 0, 1);
- opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
- size = is_double ? 3 : 2;
- switch (opcode) {
- case 0x2f: /* FABS */
- case 0x6f: /* FNEG */
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
- break;
- case 0x1d: /* SCVTF */
- case 0x5d: /* UCVTF */
- {
- bool is_signed = (opcode == 0x1d) ? true : false;
- int elements = is_double ? 2 : is_q ? 4 : 2;
- if (is_double && !is_q) {
- unallocated_encoding(s);
- return;
- }
- if (!fp_access_check(s)) {
- return;
- }
- handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
- return;
- }
- case 0x2c: /* FCMGT (zero) */
- case 0x2d: /* FCMEQ (zero) */
- case 0x2e: /* FCMLT (zero) */
- case 0x6c: /* FCMGE (zero) */
- case 0x6d: /* FCMLE (zero) */
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
- handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
- return;
- case 0x7f: /* FSQRT */
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
- break;
- case 0x1a: /* FCVTNS */
- case 0x1b: /* FCVTMS */
- case 0x3a: /* FCVTPS */
- case 0x3b: /* FCVTZS */
- case 0x5a: /* FCVTNU */
- case 0x5b: /* FCVTMU */
- case 0x7a: /* FCVTPU */
- case 0x7b: /* FCVTZU */
- need_fpstatus = true;
- rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
- break;
- case 0x5c: /* FCVTAU */
- case 0x1c: /* FCVTAS */
- need_fpstatus = true;
- rmode = FPROUNDING_TIEAWAY;
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
- break;
- case 0x3c: /* URECPE */
- if (size == 3) {
- unallocated_encoding(s);
- return;
- }
- /* fall through */
- case 0x3d: /* FRECPE */
- case 0x7d: /* FRSQRTE */
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
- if (!fp_access_check(s)) {
- return;
- }
- handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
- return;
- case 0x56: /* FCVTXN, FCVTXN2 */
- if (size == 2) {
- unallocated_encoding(s);
- return;
- }
- /* fall through */
- case 0x16: /* FCVTN, FCVTN2 */
- /* handle_2misc_narrow does a 2*size -> size operation, but these
- * instructions encode the source size rather than dest size.
- */
- if (!fp_access_check(s)) {
- return;
- }
- handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
- return;
- case 0x36: /* BFCVTN, BFCVTN2 */
- if (!dc_isar_feature(aa64_bf16, s) || size != 2) {
- unallocated_encoding(s);
- return;
- }
- if (!fp_access_check(s)) {
- return;
- }
- handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
- return;
- case 0x17: /* FCVTL, FCVTL2 */
- if (!fp_access_check(s)) {
- return;
- }
- handle_2misc_widening(s, opcode, is_q, size, rn, rd);
- return;
- case 0x18: /* FRINTN */
- case 0x19: /* FRINTM */
- case 0x38: /* FRINTP */
- case 0x39: /* FRINTZ */
- rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
- /* fall through */
- case 0x59: /* FRINTX */
- case 0x79: /* FRINTI */
- need_fpstatus = true;
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
- break;
- case 0x58: /* FRINTA */
- rmode = FPROUNDING_TIEAWAY;
- need_fpstatus = true;
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
- break;
- case 0x7c: /* URSQRTE */
- if (size == 3) {
- unallocated_encoding(s);
- return;
- }
- break;
- case 0x1e: /* FRINT32Z */
- case 0x1f: /* FRINT64Z */
- rmode = FPROUNDING_ZERO;
- /* fall through */
- case 0x5e: /* FRINT32X */
- case 0x5f: /* FRINT64X */
- need_fpstatus = true;
- if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
- unallocated_encoding(s);
- return;
- }
- break;
- default:
- unallocated_encoding(s);
- return;
- }
- break;
- }
- default:
- case 0x3: /* SUQADD, USQADD */
- unallocated_encoding(s);
- return;
- }
+static bool do_fabs_fneg_v(DisasContext *s, arg_qrr_e *a, GVecGen2Fn *fn)
+{
+ int check = fp_access_check_vector_hsd(s, a->q, a->esz);
- if (!fp_access_check(s)) {
- return;
+ if (check <= 0) {
+ return check == 0;
}
- if (need_fpstatus || rmode >= 0) {
- tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
- } else {
- tcg_fpstatus = NULL;
- }
- if (rmode >= 0) {
- tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
- } else {
- tcg_rmode = NULL;
- }
+ gen_gvec_fn2(s, a->q, a->rd, a->rn, fn, a->esz);
+ return true;
+}
- switch (opcode) {
- case 0x5:
- if (u && size == 0) { /* NOT */
- gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
- return;
- }
- break;
- case 0x8: /* CMGT, CMGE */
- if (u) {
- gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size);
- } else {
- gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size);
- }
- return;
- case 0x9: /* CMEQ, CMLE */
- if (u) {
- gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size);
- } else {
- gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size);
- }
- return;
- case 0xa: /* CMLT */
- gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size);
- return;
- case 0xb:
- if (u) { /* ABS, NEG */
- gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
- } else {
- gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
- }
- return;
- }
+TRANS(FABS_v, do_fabs_fneg_v, a, gen_gvec_fabs)
+TRANS(FNEG_v, do_fabs_fneg_v, a, gen_gvec_fneg)
- if (size == 3) {
- /* All 64-bit element operations can be shared with scalar 2misc */
- int pass;
+static bool do_fp1_vector(DisasContext *s, arg_qrr_e *a,
+ const FPScalar1 *f, int rmode)
+{
+ TCGv_i32 tcg_rmode = NULL;
+ TCGv_ptr fpst;
+ int check = fp_access_check_vector_hsd(s, a->q, a->esz);
- /* Coverity claims (size == 3 && !is_q) has been eliminated
- * from all paths leading to here.
- */
- tcg_debug_assert(is_q);
- for (pass = 0; pass < 2; pass++) {
- TCGv_i64 tcg_op = tcg_temp_new_i64();
- TCGv_i64 tcg_res = tcg_temp_new_i64();
+ if (check <= 0) {
+ return check == 0;
+ }
- read_vec_element(s, tcg_op, rn, pass, MO_64);
+ fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64);
+ if (rmode >= 0) {
+ tcg_rmode = gen_set_rmode(rmode, fpst);
+ }
- handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
- tcg_rmode, tcg_fpstatus);
+ if (a->esz == MO_64) {
+ TCGv_i64 t64 = tcg_temp_new_i64();
- write_vec_element(s, tcg_res, rd, pass, MO_64);
+ for (int pass = 0; pass < 2; ++pass) {
+ read_vec_element(s, t64, a->rn, pass, MO_64);
+ f->gen_d(t64, t64, fpst);
+ write_vec_element(s, t64, a->rd, pass, MO_64);
}
} else {
- int pass;
-
- for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
- TCGv_i32 tcg_op = tcg_temp_new_i32();
- TCGv_i32 tcg_res = tcg_temp_new_i32();
-
- read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
-
- if (size == 2) {
- /* Special cases for 32 bit elements */
- switch (opcode) {
- case 0x4: /* CLS */
- if (u) {
- tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
- } else {
- tcg_gen_clrsb_i32(tcg_res, tcg_op);
- }
- break;
- case 0x7: /* SQABS, SQNEG */
- if (u) {
- gen_helper_neon_qneg_s32(tcg_res, tcg_env, tcg_op);
- } else {
- gen_helper_neon_qabs_s32(tcg_res, tcg_env, tcg_op);
- }
- break;
- case 0x2f: /* FABS */
- gen_vfp_abss(tcg_res, tcg_op);
- break;
- case 0x6f: /* FNEG */
- gen_vfp_negs(tcg_res, tcg_op);
- break;
- case 0x7f: /* FSQRT */
- gen_helper_vfp_sqrts(tcg_res, tcg_op, tcg_env);
- break;
- case 0x1a: /* FCVTNS */
- case 0x1b: /* FCVTMS */
- case 0x1c: /* FCVTAS */
- case 0x3a: /* FCVTPS */
- case 0x3b: /* FCVTZS */
- gen_helper_vfp_tosls(tcg_res, tcg_op,
- tcg_constant_i32(0), tcg_fpstatus);
- break;
- case 0x5a: /* FCVTNU */
- case 0x5b: /* FCVTMU */
- case 0x5c: /* FCVTAU */
- case 0x7a: /* FCVTPU */
- case 0x7b: /* FCVTZU */
- gen_helper_vfp_touls(tcg_res, tcg_op,
- tcg_constant_i32(0), tcg_fpstatus);
- break;
- case 0x18: /* FRINTN */
- case 0x19: /* FRINTM */
- case 0x38: /* FRINTP */
- case 0x39: /* FRINTZ */
- case 0x58: /* FRINTA */
- case 0x79: /* FRINTI */
- gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
- break;
- case 0x59: /* FRINTX */
- gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
- break;
- case 0x7c: /* URSQRTE */
- gen_helper_rsqrte_u32(tcg_res, tcg_op);
- break;
- case 0x1e: /* FRINT32Z */
- case 0x5e: /* FRINT32X */
- gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
- break;
- case 0x1f: /* FRINT64Z */
- case 0x5f: /* FRINT64X */
- gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
- break;
- default:
- g_assert_not_reached();
- }
- } else {
- /* Use helpers for 8 and 16 bit elements */
- switch (opcode) {
- case 0x5: /* CNT, RBIT */
- /* For these two insns size is part of the opcode specifier
- * (handled earlier); they always operate on byte elements.
- */
- if (u) {
- gen_helper_neon_rbit_u8(tcg_res, tcg_op);
- } else {
- gen_helper_neon_cnt_u8(tcg_res, tcg_op);
- }
- break;
- case 0x7: /* SQABS, SQNEG */
- {
- NeonGenOneOpEnvFn *genfn;
- static NeonGenOneOpEnvFn * const fns[2][2] = {
- { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
- { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
- };
- genfn = fns[size][u];
- genfn(tcg_res, tcg_env, tcg_op);
- break;
- }
- case 0x4: /* CLS, CLZ */
- if (u) {
- if (size == 0) {
- gen_helper_neon_clz_u8(tcg_res, tcg_op);
- } else {
- gen_helper_neon_clz_u16(tcg_res, tcg_op);
- }
- } else {
- if (size == 0) {
- gen_helper_neon_cls_s8(tcg_res, tcg_op);
- } else {
- gen_helper_neon_cls_s16(tcg_res, tcg_op);
- }
- }
- break;
- default:
- g_assert_not_reached();
- }
- }
+ TCGv_i32 t32 = tcg_temp_new_i32();
+ void (*gen)(TCGv_i32, TCGv_i32, TCGv_ptr)
+ = (a->esz == MO_16 ? f->gen_h : f->gen_s);
- write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ for (int pass = 0, n = (a->q ? 16 : 8) >> a->esz; pass < n; ++pass) {
+ read_vec_element_i32(s, t32, a->rn, pass, a->esz);
+ gen(t32, t32, fpst);
+ write_vec_element_i32(s, t32, a->rd, pass, a->esz);
}
}
- clear_vec_high(s, is_q, rd);
+ clear_vec_high(s, a->q, a->rd);
- if (tcg_rmode) {
- gen_restore_rmode(tcg_rmode, tcg_fpstatus);
+ if (rmode >= 0) {
+ gen_restore_rmode(tcg_rmode, fpst);
}
+ return true;
}
-/* AdvSIMD [scalar] two register miscellaneous (FP16)
- *
- * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
- * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
- * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
- * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
- * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
- * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
- *
- * This actually covers two groups where scalar access is governed by
- * bit 28. A bunch of the instructions (float to integral) only exist
- * in the vector form and are un-allocated for the scalar decode. Also
- * in the scalar decode Q is always 1.
- */
-static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
-{
- int fpop, opcode, a, u;
- int rn, rd;
- bool is_q;
- bool is_scalar;
- bool only_in_vector = false;
-
- int pass;
- TCGv_i32 tcg_rmode = NULL;
- TCGv_ptr tcg_fpstatus = NULL;
- bool need_fpst = true;
- int rmode = -1;
-
- if (!dc_isar_feature(aa64_fp16, s)) {
- unallocated_encoding(s);
- return;
- }
+TRANS(FSQRT_v, do_fp1_vector, a, &f_scalar_fsqrt, -1)
- rd = extract32(insn, 0, 5);
- rn = extract32(insn, 5, 5);
+TRANS(FRINTN_v, do_fp1_vector, a, &f_scalar_frint, FPROUNDING_TIEEVEN)
+TRANS(FRINTP_v, do_fp1_vector, a, &f_scalar_frint, FPROUNDING_POSINF)
+TRANS(FRINTM_v, do_fp1_vector, a, &f_scalar_frint, FPROUNDING_NEGINF)
+TRANS(FRINTZ_v, do_fp1_vector, a, &f_scalar_frint, FPROUNDING_ZERO)
+TRANS(FRINTA_v, do_fp1_vector, a, &f_scalar_frint, FPROUNDING_TIEAWAY)
+TRANS(FRINTI_v, do_fp1_vector, a, &f_scalar_frint, -1)
+TRANS(FRINTX_v, do_fp1_vector, a, &f_scalar_frintx, -1)
- a = extract32(insn, 23, 1);
- u = extract32(insn, 29, 1);
- is_scalar = extract32(insn, 28, 1);
- is_q = extract32(insn, 30, 1);
+TRANS_FEAT(FRINT32Z_v, aa64_frint, do_fp1_vector, a,
+ &f_scalar_frint32, FPROUNDING_ZERO)
+TRANS_FEAT(FRINT32X_v, aa64_frint, do_fp1_vector, a, &f_scalar_frint32, -1)
+TRANS_FEAT(FRINT64Z_v, aa64_frint, do_fp1_vector, a,
+ &f_scalar_frint64, FPROUNDING_ZERO)
+TRANS_FEAT(FRINT64X_v, aa64_frint, do_fp1_vector, a, &f_scalar_frint64, -1)
- opcode = extract32(insn, 12, 5);
- fpop = deposit32(opcode, 5, 1, a);
- fpop = deposit32(fpop, 6, 1, u);
+static bool do_gvec_op2_fpst_with_fpsttype(DisasContext *s, MemOp esz,
+ bool is_q, int rd, int rn, int data,
+ gen_helper_gvec_2_ptr * const fns[3],
+ ARMFPStatusFlavour fpsttype)
+{
+ int check = fp_access_check_vector_hsd(s, is_q, esz);
+ TCGv_ptr fpst;
- switch (fpop) {
- case 0x1d: /* SCVTF */
- case 0x5d: /* UCVTF */
- {
- int elements;
+ if (check <= 0) {
+ return check == 0;
+ }
- if (is_scalar) {
- elements = 1;
- } else {
- elements = (is_q ? 8 : 4);
- }
+ fpst = fpstatus_ptr(fpsttype);
+ tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn), fpst,
+ is_q ? 16 : 8, vec_full_reg_size(s),
+ data, fns[esz - 1]);
+ return true;
+}
- if (!fp_access_check(s)) {
- return;
- }
- handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
- return;
- }
- break;
- case 0x2c: /* FCMGT (zero) */
- case 0x2d: /* FCMEQ (zero) */
- case 0x2e: /* FCMLT (zero) */
- case 0x6c: /* FCMGE (zero) */
- case 0x6d: /* FCMLE (zero) */
- handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
- return;
- case 0x3d: /* FRECPE */
- case 0x3f: /* FRECPX */
- break;
- case 0x18: /* FRINTN */
- only_in_vector = true;
- rmode = FPROUNDING_TIEEVEN;
- break;
- case 0x19: /* FRINTM */
- only_in_vector = true;
- rmode = FPROUNDING_NEGINF;
- break;
- case 0x38: /* FRINTP */
- only_in_vector = true;
- rmode = FPROUNDING_POSINF;
- break;
- case 0x39: /* FRINTZ */
- only_in_vector = true;
- rmode = FPROUNDING_ZERO;
- break;
- case 0x58: /* FRINTA */
- only_in_vector = true;
- rmode = FPROUNDING_TIEAWAY;
- break;
- case 0x59: /* FRINTX */
- case 0x79: /* FRINTI */
- only_in_vector = true;
- /* current rounding mode */
- break;
- case 0x1a: /* FCVTNS */
- rmode = FPROUNDING_TIEEVEN;
- break;
- case 0x1b: /* FCVTMS */
- rmode = FPROUNDING_NEGINF;
- break;
- case 0x1c: /* FCVTAS */
- rmode = FPROUNDING_TIEAWAY;
- break;
- case 0x3a: /* FCVTPS */
- rmode = FPROUNDING_POSINF;
- break;
- case 0x3b: /* FCVTZS */
- rmode = FPROUNDING_ZERO;
- break;
- case 0x5a: /* FCVTNU */
- rmode = FPROUNDING_TIEEVEN;
- break;
- case 0x5b: /* FCVTMU */
- rmode = FPROUNDING_NEGINF;
- break;
- case 0x5c: /* FCVTAU */
- rmode = FPROUNDING_TIEAWAY;
- break;
- case 0x7a: /* FCVTPU */
- rmode = FPROUNDING_POSINF;
- break;
- case 0x7b: /* FCVTZU */
- rmode = FPROUNDING_ZERO;
- break;
- case 0x2f: /* FABS */
- case 0x6f: /* FNEG */
- need_fpst = false;
- break;
- case 0x7d: /* FRSQRTE */
- case 0x7f: /* FSQRT (vector) */
- break;
- default:
- unallocated_encoding(s);
- return;
- }
+static bool do_gvec_op2_fpst(DisasContext *s, MemOp esz, bool is_q,
+ int rd, int rn, int data,
+ gen_helper_gvec_2_ptr * const fns[3])
+{
+ return do_gvec_op2_fpst_with_fpsttype(s, esz, is_q, rd, rn, data, fns,
+ esz == MO_16 ? FPST_A64_F16 :
+ FPST_A64);
+}
+static bool do_gvec_op2_ah_fpst(DisasContext *s, MemOp esz, bool is_q,
+ int rd, int rn, int data,
+ gen_helper_gvec_2_ptr * const fns[3])
+{
+ return do_gvec_op2_fpst_with_fpsttype(s, esz, is_q, rd, rn, data,
+ fns, select_ah_fpst(s, esz));
+}
- /* Check additional constraints for the scalar encoding */
- if (is_scalar) {
- if (!is_q) {
- unallocated_encoding(s);
- return;
- }
- /* FRINTxx is only in the vector form */
- if (only_in_vector) {
- unallocated_encoding(s);
- return;
- }
- }
+static gen_helper_gvec_2_ptr * const f_scvtf_v[] = {
+ gen_helper_gvec_vcvt_sh,
+ gen_helper_gvec_vcvt_sf,
+ gen_helper_gvec_vcvt_sd,
+};
+TRANS(SCVTF_vi, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, 0, f_scvtf_v)
+TRANS(SCVTF_vf, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, a->shift, f_scvtf_v)
+
+static gen_helper_gvec_2_ptr * const f_ucvtf_v[] = {
+ gen_helper_gvec_vcvt_uh,
+ gen_helper_gvec_vcvt_uf,
+ gen_helper_gvec_vcvt_ud,
+};
+TRANS(UCVTF_vi, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, 0, f_ucvtf_v)
+TRANS(UCVTF_vf, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, a->shift, f_ucvtf_v)
+
+static gen_helper_gvec_2_ptr * const f_fcvtzs_vf[] = {
+ gen_helper_gvec_vcvt_rz_hs,
+ gen_helper_gvec_vcvt_rz_fs,
+ gen_helper_gvec_vcvt_rz_ds,
+};
+TRANS(FCVTZS_vf, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, a->shift, f_fcvtzs_vf)
- if (!fp_access_check(s)) {
- return;
- }
+static gen_helper_gvec_2_ptr * const f_fcvtzu_vf[] = {
+ gen_helper_gvec_vcvt_rz_hu,
+ gen_helper_gvec_vcvt_rz_fu,
+ gen_helper_gvec_vcvt_rz_du,
+};
+TRANS(FCVTZU_vf, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, a->shift, f_fcvtzu_vf)
- if (rmode >= 0 || need_fpst) {
- tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16);
- }
+static gen_helper_gvec_2_ptr * const f_fcvt_s_vi[] = {
+ gen_helper_gvec_vcvt_rm_sh,
+ gen_helper_gvec_vcvt_rm_ss,
+ gen_helper_gvec_vcvt_rm_sd,
+};
- if (rmode >= 0) {
- tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
- }
+static gen_helper_gvec_2_ptr * const f_fcvt_u_vi[] = {
+ gen_helper_gvec_vcvt_rm_uh,
+ gen_helper_gvec_vcvt_rm_us,
+ gen_helper_gvec_vcvt_rm_ud,
+};
- if (is_scalar) {
- TCGv_i32 tcg_op = read_fp_hreg(s, rn);
- TCGv_i32 tcg_res = tcg_temp_new_i32();
+TRANS(FCVTNS_vi, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, float_round_nearest_even, f_fcvt_s_vi)
+TRANS(FCVTNU_vi, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, float_round_nearest_even, f_fcvt_u_vi)
+TRANS(FCVTPS_vi, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, float_round_up, f_fcvt_s_vi)
+TRANS(FCVTPU_vi, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, float_round_up, f_fcvt_u_vi)
+TRANS(FCVTMS_vi, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, float_round_down, f_fcvt_s_vi)
+TRANS(FCVTMU_vi, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, float_round_down, f_fcvt_u_vi)
+TRANS(FCVTZS_vi, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, float_round_to_zero, f_fcvt_s_vi)
+TRANS(FCVTZU_vi, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, float_round_to_zero, f_fcvt_u_vi)
+TRANS(FCVTAS_vi, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, float_round_ties_away, f_fcvt_s_vi)
+TRANS(FCVTAU_vi, do_gvec_op2_fpst,
+ a->esz, a->q, a->rd, a->rn, float_round_ties_away, f_fcvt_u_vi)
+
+static gen_helper_gvec_2_ptr * const f_fceq0[] = {
+ gen_helper_gvec_fceq0_h,
+ gen_helper_gvec_fceq0_s,
+ gen_helper_gvec_fceq0_d,
+};
+TRANS(FCMEQ0_v, do_gvec_op2_fpst, a->esz, a->q, a->rd, a->rn, 0, f_fceq0)
- switch (fpop) {
- case 0x1a: /* FCVTNS */
- case 0x1b: /* FCVTMS */
- case 0x1c: /* FCVTAS */
- case 0x3a: /* FCVTPS */
- case 0x3b: /* FCVTZS */
- gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
- break;
- case 0x3d: /* FRECPE */
- gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
- break;
- case 0x3f: /* FRECPX */
- gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
- break;
- case 0x5a: /* FCVTNU */
- case 0x5b: /* FCVTMU */
- case 0x5c: /* FCVTAU */
- case 0x7a: /* FCVTPU */
- case 0x7b: /* FCVTZU */
- gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
- break;
- case 0x6f: /* FNEG */
- tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
- break;
- case 0x7d: /* FRSQRTE */
- gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
- break;
- default:
- g_assert_not_reached();
- }
+static gen_helper_gvec_2_ptr * const f_fcgt0[] = {
+ gen_helper_gvec_fcgt0_h,
+ gen_helper_gvec_fcgt0_s,
+ gen_helper_gvec_fcgt0_d,
+};
+TRANS(FCMGT0_v, do_gvec_op2_fpst, a->esz, a->q, a->rd, a->rn, 0, f_fcgt0)
- /* limit any sign extension going on */
- tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
- write_fp_sreg(s, rd, tcg_res);
- } else {
- for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
- TCGv_i32 tcg_op = tcg_temp_new_i32();
- TCGv_i32 tcg_res = tcg_temp_new_i32();
-
- read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
-
- switch (fpop) {
- case 0x1a: /* FCVTNS */
- case 0x1b: /* FCVTMS */
- case 0x1c: /* FCVTAS */
- case 0x3a: /* FCVTPS */
- case 0x3b: /* FCVTZS */
- gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
- break;
- case 0x3d: /* FRECPE */
- gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
- break;
- case 0x5a: /* FCVTNU */
- case 0x5b: /* FCVTMU */
- case 0x5c: /* FCVTAU */
- case 0x7a: /* FCVTPU */
- case 0x7b: /* FCVTZU */
- gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
- break;
- case 0x18: /* FRINTN */
- case 0x19: /* FRINTM */
- case 0x38: /* FRINTP */
- case 0x39: /* FRINTZ */
- case 0x58: /* FRINTA */
- case 0x79: /* FRINTI */
- gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
- break;
- case 0x59: /* FRINTX */
- gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
- break;
- case 0x2f: /* FABS */
- tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
- break;
- case 0x6f: /* FNEG */
- tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
- break;
- case 0x7d: /* FRSQRTE */
- gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
- break;
- case 0x7f: /* FSQRT */
- gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
- break;
- default:
- g_assert_not_reached();
- }
+static gen_helper_gvec_2_ptr * const f_fcge0[] = {
+ gen_helper_gvec_fcge0_h,
+ gen_helper_gvec_fcge0_s,
+ gen_helper_gvec_fcge0_d,
+};
+TRANS(FCMGE0_v, do_gvec_op2_fpst, a->esz, a->q, a->rd, a->rn, 0, f_fcge0)
- write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
- }
+static gen_helper_gvec_2_ptr * const f_fclt0[] = {
+ gen_helper_gvec_fclt0_h,
+ gen_helper_gvec_fclt0_s,
+ gen_helper_gvec_fclt0_d,
+};
+TRANS(FCMLT0_v, do_gvec_op2_fpst, a->esz, a->q, a->rd, a->rn, 0, f_fclt0)
- clear_vec_high(s, is_q, rd);
- }
+static gen_helper_gvec_2_ptr * const f_fcle0[] = {
+ gen_helper_gvec_fcle0_h,
+ gen_helper_gvec_fcle0_s,
+ gen_helper_gvec_fcle0_d,
+};
+TRANS(FCMLE0_v, do_gvec_op2_fpst, a->esz, a->q, a->rd, a->rn, 0, f_fcle0)
- if (tcg_rmode) {
- gen_restore_rmode(tcg_rmode, tcg_fpstatus);
- }
-}
+static gen_helper_gvec_2_ptr * const f_frecpe[] = {
+ gen_helper_gvec_frecpe_h,
+ gen_helper_gvec_frecpe_s,
+ gen_helper_gvec_frecpe_d,
+};
+static gen_helper_gvec_2_ptr * const f_frecpe_rpres[] = {
+ gen_helper_gvec_frecpe_h,
+ gen_helper_gvec_frecpe_rpres_s,
+ gen_helper_gvec_frecpe_d,
+};
+TRANS(FRECPE_v, do_gvec_op2_ah_fpst, a->esz, a->q, a->rd, a->rn, 0,
+ s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ? f_frecpe_rpres : f_frecpe)
-/* C3.6 Data processing - SIMD, inc Crypto
- *
- * As the decode gets a little complex we are using a table based
- * approach for this part of the decode.
- */
-static const AArch64DecodeTable data_proc_simd[] = {
- /* pattern , mask , fn */
- { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
- { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
- /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
- { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
- { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
- { 0x0e000000, 0xbf208c00, disas_simd_tb },
- { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
- { 0x2e000000, 0xbf208400, disas_simd_ext },
- { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
- { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
- { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
- { 0x00000000, 0x00000000, NULL }
+static gen_helper_gvec_2_ptr * const f_frsqrte[] = {
+ gen_helper_gvec_frsqrte_h,
+ gen_helper_gvec_frsqrte_s,
+ gen_helper_gvec_frsqrte_d,
+};
+static gen_helper_gvec_2_ptr * const f_frsqrte_rpres[] = {
+ gen_helper_gvec_frsqrte_h,
+ gen_helper_gvec_frsqrte_rpres_s,
+ gen_helper_gvec_frsqrte_d,
};
+TRANS(FRSQRTE_v, do_gvec_op2_ah_fpst, a->esz, a->q, a->rd, a->rn, 0,
+ s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ? f_frsqrte_rpres : f_frsqrte)
-static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
+static bool trans_FCVTL_v(DisasContext *s, arg_qrr_e *a)
{
- /* Note that this is called with all non-FP cases from
- * table C3-6 so it must UNDEF for entries not specifically
- * allocated to instructions in that table.
+ /* Handle 2-reg-misc ops which are widening (so each size element
+ * in the source becomes a 2*size element in the destination.
+ * The only instruction like this is FCVTL.
*/
- AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
- if (fn) {
- fn(s, insn);
- } else {
- unallocated_encoding(s);
+ int pass;
+ TCGv_ptr fpst;
+
+ if (!fp_access_check(s)) {
+ return true;
}
-}
-/* C3.6 Data processing - SIMD and floating point */
-static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
-{
- if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
- disas_data_proc_fp(s, insn);
+ if (a->esz == MO_64) {
+ /* 32 -> 64 bit fp conversion */
+ TCGv_i64 tcg_res[2];
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ int srcelt = a->q ? 2 : 0;
+
+ fpst = fpstatus_ptr(FPST_A64);
+
+ for (pass = 0; pass < 2; pass++) {
+ tcg_res[pass] = tcg_temp_new_i64();
+ read_vec_element_i32(s, tcg_op, a->rn, srcelt + pass, MO_32);
+ gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, fpst);
+ }
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], a->rd, pass, MO_64);
+ }
} else {
- /* SIMD, including crypto */
- disas_data_proc_simd(s, insn);
+ /* 16 -> 32 bit fp conversion */
+ int srcelt = a->q ? 4 : 0;
+ TCGv_i32 tcg_res[4];
+ TCGv_i32 ahp = get_ahp_flag();
+
+ fpst = fpstatus_ptr(FPST_A64_F16);
+
+ for (pass = 0; pass < 4; pass++) {
+ tcg_res[pass] = tcg_temp_new_i32();
+ read_vec_element_i32(s, tcg_res[pass], a->rn, srcelt + pass, MO_16);
+ gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
+ fpst, ahp);
+ }
+ for (pass = 0; pass < 4; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], a->rd, pass, MO_32);
+ }
}
+ clear_vec_high(s, true, a->rd);
+ return true;
}
static bool trans_OK(DisasContext *s, arg_OK *a)
@@ -11879,37 +10042,6 @@ static bool trans_FAIL(DisasContext *s, arg_OK *a)
}
/**
- * is_guarded_page:
- * @env: The cpu environment
- * @s: The DisasContext
- *
- * Return true if the page is guarded.
- */
-static bool is_guarded_page(CPUARMState *env, DisasContext *s)
-{
- uint64_t addr = s->base.pc_first;
-#ifdef CONFIG_USER_ONLY
- return page_get_flags(addr) & PAGE_BTI;
-#else
- CPUTLBEntryFull *full;
- void *host;
- int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
- int flags;
-
- /*
- * We test this immediately after reading an insn, which means
- * that the TLB entry must be present and valid, and thus this
- * access will never raise an exception.
- */
- flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx,
- false, &host, &full, 0);
- assert(!(flags & TLB_INVALID_MASK));
-
- return full->extra.arm.guarded;
-#endif
-}
-
-/**
* btype_destination_ok:
* @insn: The instruction at the branch destination
* @bt: SCTLR_ELx.BT
@@ -11961,24 +10093,6 @@ static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
return false;
}
-/* C3.1 A64 instruction index by encoding */
-static void disas_a64_legacy(DisasContext *s, uint32_t insn)
-{
- switch (extract32(insn, 25, 4)) {
- case 0x5:
- case 0xd: /* Data processing - register */
- disas_data_proc_reg(s, insn);
- break;
- case 0x7:
- case 0xf: /* Data processing - SIMD and floating point */
- disas_data_proc_simd_fp(s, insn);
- break;
- default:
- unallocated_encoding(s);
- break;
- }
-}
-
static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
CPUState *cpu)
{
@@ -12033,6 +10147,8 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
dc->nv2 = EX_TBFLAG_A64(tb_flags, NV2);
dc->nv2_mem_e20 = EX_TBFLAG_A64(tb_flags, NV2_MEM_E20);
dc->nv2_mem_be = EX_TBFLAG_A64(tb_flags, NV2_MEM_BE);
+ dc->fpcr_ah = EX_TBFLAG_A64(tb_flags, AH);
+ dc->fpcr_nep = EX_TBFLAG_A64(tb_flags, NEP);
dc->vec_len = 0;
dc->vec_stride = 0;
dc->cp_regs = arm_cpu->cp_regs;
@@ -12126,7 +10242,7 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
* start of the TB.
*/
assert(s->base.num_insns == 1);
- gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc));
+ gen_helper_exception_pc_alignment(tcg_env, tcg_constant_vaddr(pc));
s->base.is_jmp = DISAS_NORETURN;
s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
return;
@@ -12137,8 +10253,8 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
s->insn = insn;
s->base.pc_next = pc + 4;
- s->fp_access_checked = false;
- s->sve_access_checked = false;
+ s->fp_access_checked = 0;
+ s->sve_access_checked = 0;
if (s->pstate_il) {
/*
@@ -12151,19 +10267,6 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
if (dc_isar_feature(aa64_bti, s)) {
if (s->base.num_insns == 1) {
- /*
- * At the first insn of the TB, compute s->guarded_page.
- * We delayed computing this until successfully reading
- * the first insn of the TB, above. This (mostly) ensures
- * that the softmmu tlb entry has been populated, and the
- * page table GP bit is available.
- *
- * Note that we need to compute this even if btype == 0,
- * because this value is used for BR instructions later
- * where ENV is not available.
- */
- s->guarded_page = is_guarded_page(env, s);
-
/* First insn can have btype set to non-zero. */
tcg_debug_assert(s->btype >= 0);
@@ -12172,12 +10275,13 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
* priority -- below debugging exceptions but above most
* everything else. This allows us to handle this now
* instead of waiting until the insn is otherwise decoded.
+ *
+ * We can check all but the guarded page check here;
+ * defer the latter to a helper.
*/
if (s->btype != 0
- && s->guarded_page
&& !btype_destination_ok(insn, s->bt, s->btype)) {
- gen_exception_insn(s, 0, EXCP_UDEF, syn_btitrap(s->btype));
- return;
+ gen_helper_guarded_page_check(tcg_env);
}
} else {
/* Not the first insn: btype must be 0. */
@@ -12193,7 +10297,7 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
if (!disas_a64(s, insn) &&
!disas_sme(s, insn) &&
!disas_sve(s, insn)) {
- disas_a64_legacy(s, insn);
+ unallocated_encoding(s);
}
/*
diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h
index 0fcf7cb..b2420f5 100644
--- a/target/arm/tcg/translate-a64.h
+++ b/target/arm/tcg/translate-a64.h
@@ -65,7 +65,7 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
static inline void assert_fp_access_checked(DisasContext *s)
{
#ifdef CONFIG_DEBUG_TCG
- if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
+ if (unlikely(s->fp_access_checked <= 0)) {
fprintf(stderr, "target-arm: FP access check missing for "
"instruction 0x%08x\n", s->insn);
abort();
@@ -185,6 +185,19 @@ static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno)
return ret;
}
+/*
+ * Return the ARMFPStatusFlavour to use based on element size and
+ * whether FPCR.AH is set.
+ */
+static inline ARMFPStatusFlavour select_ah_fpst(DisasContext *s, MemOp esz)
+{
+ if (s->fpcr_ah) {
+ return esz == MO_16 ? FPST_AH_F16 : FPST_AH;
+ } else {
+ return esz == MO_16 ? FPST_A64_F16 : FPST_A64;
+ }
+}
+
bool disas_sve(DisasContext *, uint32_t);
bool disas_sme(DisasContext *, uint32_t);
diff --git a/target/arm/tcg/translate-neon.c b/target/arm/tcg/translate-neon.c
index 915c9e5..c4fecb8 100644
--- a/target/arm/tcg/translate-neon.c
+++ b/target/arm/tcg/translate-neon.c
@@ -148,6 +148,37 @@ static bool do_neon_ddda(DisasContext *s, int q, int vd, int vn, int vm,
return true;
}
+static bool do_neon_ddda_env(DisasContext *s, int q, int vd, int vn, int vm,
+ int data, gen_helper_gvec_4_ptr *fn_gvec)
+{
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (((vd | vn | vm) & 0x10) && !dc_isar_feature(aa32_simd_r32, s)) {
+ return false;
+ }
+
+ /*
+ * UNDEF accesses to odd registers for each bit of Q.
+ * Q will be 0b111 for all Q-reg instructions, otherwise
+ * when we have mixed Q- and D-reg inputs.
+ */
+ if (((vd & 1) * 4 | (vn & 1) * 2 | (vm & 1)) & q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ int opr_sz = q ? 16 : 8;
+ tcg_gen_gvec_4_ptr(vfp_reg_offset(1, vd),
+ vfp_reg_offset(1, vn),
+ vfp_reg_offset(1, vm),
+ vfp_reg_offset(1, vd),
+ tcg_env,
+ opr_sz, opr_sz, data, fn_gvec);
+ return true;
+}
+
static bool do_neon_ddda_fpst(DisasContext *s, int q, int vd, int vn, int vm,
int data, ARMFPStatusFlavour fp_flavour,
gen_helper_gvec_4_ptr *fn_gvec_ptr)
@@ -266,8 +297,8 @@ static bool trans_VDOT_b16(DisasContext *s, arg_VDOT_b16 *a)
if (!dc_isar_feature(aa32_bf16, s)) {
return false;
}
- return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
- gen_helper_gvec_bfdot);
+ return do_neon_ddda_env(s, a->q * 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_bfdot);
}
static bool trans_VFML(DisasContext *s, arg_VFML *a)
@@ -360,8 +391,8 @@ static bool trans_VDOT_b16_scal(DisasContext *s, arg_VDOT_b16_scal *a)
if (!dc_isar_feature(aa32_bf16, s)) {
return false;
}
- return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
- gen_helper_gvec_bfdot_idx);
+ return do_neon_ddda_env(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
+ gen_helper_gvec_bfdot_idx);
}
static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
@@ -1068,144 +1099,18 @@ DO_2SH(VRSHR_S, gen_gvec_srshr)
DO_2SH(VRSHR_U, gen_gvec_urshr)
DO_2SH(VRSRA_S, gen_gvec_srsra)
DO_2SH(VRSRA_U, gen_gvec_ursra)
-
-static bool trans_VSHR_S_2sh(DisasContext *s, arg_2reg_shift *a)
-{
- /* Signed shift out of range results in all-sign-bits */
- a->shift = MIN(a->shift, (8 << a->size) - 1);
- return do_vector_2sh(s, a, tcg_gen_gvec_sari);
-}
-
-static void gen_zero_rd_2sh(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
- int64_t shift, uint32_t oprsz, uint32_t maxsz)
-{
- tcg_gen_gvec_dup_imm(vece, rd_ofs, oprsz, maxsz, 0);
-}
-
-static bool trans_VSHR_U_2sh(DisasContext *s, arg_2reg_shift *a)
-{
- /* Shift out of range is architecturally valid and results in zero. */
- if (a->shift >= (8 << a->size)) {
- return do_vector_2sh(s, a, gen_zero_rd_2sh);
- } else {
- return do_vector_2sh(s, a, tcg_gen_gvec_shri);
- }
-}
-
-static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
- NeonGenTwo64OpEnvFn *fn)
-{
- /*
- * 2-reg-and-shift operations, size == 3 case, where the
- * function needs to be passed tcg_env.
- */
- TCGv_i64 constimm;
- int pass;
-
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
- return false;
- }
-
- /* UNDEF accesses to D16-D31 if they don't exist. */
- if (!dc_isar_feature(aa32_simd_r32, s) &&
- ((a->vd | a->vm) & 0x10)) {
- return false;
- }
-
- if ((a->vm | a->vd) & a->q) {
- return false;
- }
-
- if (!vfp_access_check(s)) {
- return true;
- }
-
- /*
- * To avoid excessive duplication of ops we implement shift
- * by immediate using the variable shift operations.
- */
- constimm = tcg_constant_i64(dup_const(a->size, a->shift));
-
- for (pass = 0; pass < a->q + 1; pass++) {
- TCGv_i64 tmp = tcg_temp_new_i64();
-
- read_neon_element64(tmp, a->vm, pass, MO_64);
- fn(tmp, tcg_env, tmp, constimm);
- write_neon_element64(tmp, a->vd, pass, MO_64);
- }
- return true;
-}
-
-static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
- NeonGenTwoOpEnvFn *fn)
-{
- /*
- * 2-reg-and-shift operations, size < 3 case, where the
- * helper needs to be passed tcg_env.
- */
- TCGv_i32 constimm, tmp;
- int pass;
-
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
- return false;
- }
-
- /* UNDEF accesses to D16-D31 if they don't exist. */
- if (!dc_isar_feature(aa32_simd_r32, s) &&
- ((a->vd | a->vm) & 0x10)) {
- return false;
- }
-
- if ((a->vm | a->vd) & a->q) {
- return false;
- }
-
- if (!vfp_access_check(s)) {
- return true;
- }
-
- /*
- * To avoid excessive duplication of ops we implement shift
- * by immediate using the variable shift operations.
- */
- constimm = tcg_constant_i32(dup_const(a->size, a->shift));
- tmp = tcg_temp_new_i32();
-
- for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
- read_neon_element32(tmp, a->vm, pass, MO_32);
- fn(tmp, tcg_env, tmp, constimm);
- write_neon_element32(tmp, a->vd, pass, MO_32);
- }
- return true;
-}
-
-#define DO_2SHIFT_ENV(INSN, FUNC) \
- static bool trans_##INSN##_64_2sh(DisasContext *s, arg_2reg_shift *a) \
- { \
- return do_2shift_env_64(s, a, gen_helper_neon_##FUNC##64); \
- } \
- static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
- { \
- static NeonGenTwoOpEnvFn * const fns[] = { \
- gen_helper_neon_##FUNC##8, \
- gen_helper_neon_##FUNC##16, \
- gen_helper_neon_##FUNC##32, \
- }; \
- assert(a->size < ARRAY_SIZE(fns)); \
- return do_2shift_env_32(s, a, fns[a->size]); \
- }
-
-DO_2SHIFT_ENV(VQSHLU, qshlu_s)
-DO_2SHIFT_ENV(VQSHL_U, qshl_u)
-DO_2SHIFT_ENV(VQSHL_S, qshl_s)
+DO_2SH(VSHR_S, gen_gvec_sshr)
+DO_2SH(VSHR_U, gen_gvec_ushr)
+DO_2SH(VQSHLU, gen_neon_sqshlui)
+DO_2SH(VQSHL_U, gen_neon_uqshli)
+DO_2SH(VQSHL_S, gen_neon_sqshli)
static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
NeonGenTwo64OpFn *shiftfn,
- NeonGenNarrowEnvFn *narrowfn)
+ NeonGenOne64OpEnvFn *narrowfn)
{
/* 2-reg-and-shift narrowing-shift operations, size == 3 case */
- TCGv_i64 constimm, rm1, rm2;
- TCGv_i32 rd;
+ TCGv_i64 constimm, rm1, rm2, rd;
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
return false;
@@ -1232,7 +1137,7 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
constimm = tcg_constant_i64(-a->shift);
rm1 = tcg_temp_new_i64();
rm2 = tcg_temp_new_i64();
- rd = tcg_temp_new_i32();
+ rd = tcg_temp_new_i64();
/* Load both inputs first to avoid potential overwrite if rm == rd */
read_neon_element64(rm1, a->vm, 0, MO_64);
@@ -1240,18 +1145,18 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
shiftfn(rm1, rm1, constimm);
narrowfn(rd, tcg_env, rm1);
- write_neon_element32(rd, a->vd, 0, MO_32);
+ write_neon_element64(rd, a->vd, 0, MO_32);
shiftfn(rm2, rm2, constimm);
narrowfn(rd, tcg_env, rm2);
- write_neon_element32(rd, a->vd, 1, MO_32);
+ write_neon_element64(rd, a->vd, 1, MO_32);
return true;
}
static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
NeonGenTwoOpFn *shiftfn,
- NeonGenNarrowEnvFn *narrowfn)
+ NeonGenOne64OpEnvFn *narrowfn)
{
/* 2-reg-and-shift narrowing-shift operations, size < 3 case */
TCGv_i32 constimm, rm1, rm2, rm3, rm4;
@@ -1306,16 +1211,16 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
tcg_gen_concat_i32_i64(rtmp, rm1, rm2);
- narrowfn(rm1, tcg_env, rtmp);
- write_neon_element32(rm1, a->vd, 0, MO_32);
+ narrowfn(rtmp, tcg_env, rtmp);
+ write_neon_element64(rtmp, a->vd, 0, MO_32);
shiftfn(rm3, rm3, constimm);
shiftfn(rm4, rm4, constimm);
tcg_gen_concat_i32_i64(rtmp, rm3, rm4);
- narrowfn(rm3, tcg_env, rtmp);
- write_neon_element32(rm3, a->vd, 1, MO_32);
+ narrowfn(rtmp, tcg_env, rtmp);
+ write_neon_element64(rtmp, a->vd, 1, MO_32);
return true;
}
@@ -1330,17 +1235,17 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
return do_2shift_narrow_32(s, a, FUNC, NARROWFUNC); \
}
-static void gen_neon_narrow_u32(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
+static void gen_neon_narrow_u32(TCGv_i64 dest, TCGv_ptr env, TCGv_i64 src)
{
- tcg_gen_extrl_i64_i32(dest, src);
+ tcg_gen_ext32u_i64(dest, src);
}
-static void gen_neon_narrow_u16(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
+static void gen_neon_narrow_u16(TCGv_i64 dest, TCGv_ptr env, TCGv_i64 src)
{
gen_helper_neon_narrow_u16(dest, src);
}
-static void gen_neon_narrow_u8(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
+static void gen_neon_narrow_u8(TCGv_i64 dest, TCGv_ptr env, TCGv_i64 src)
{
gen_helper_neon_narrow_u8(dest, src);
}
@@ -1504,13 +1409,13 @@ static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a,
DO_FP_2SH(VCVT_SF, gen_helper_gvec_vcvt_sf)
DO_FP_2SH(VCVT_UF, gen_helper_gvec_vcvt_uf)
-DO_FP_2SH(VCVT_FS, gen_helper_gvec_vcvt_fs)
-DO_FP_2SH(VCVT_FU, gen_helper_gvec_vcvt_fu)
+DO_FP_2SH(VCVT_FS, gen_helper_gvec_vcvt_rz_fs)
+DO_FP_2SH(VCVT_FU, gen_helper_gvec_vcvt_rz_fu)
DO_FP_2SH(VCVT_SH, gen_helper_gvec_vcvt_sh)
DO_FP_2SH(VCVT_UH, gen_helper_gvec_vcvt_uh)
-DO_FP_2SH(VCVT_HS, gen_helper_gvec_vcvt_hs)
-DO_FP_2SH(VCVT_HU, gen_helper_gvec_vcvt_hu)
+DO_FP_2SH(VCVT_HS, gen_helper_gvec_vcvt_rz_hs)
+DO_FP_2SH(VCVT_HU, gen_helper_gvec_vcvt_rz_hu)
static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
GVecGen2iFn *fn)
@@ -1655,8 +1560,8 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
NULL, NULL, \
}; \
static NeonGenTwo64OpFn * const addfn[] = { \
- gen_helper_neon_##OP##l_u16, \
- gen_helper_neon_##OP##l_u32, \
+ tcg_gen_vec_##OP##16_i64, \
+ tcg_gen_vec_##OP##32_i64, \
tcg_gen_##OP##_i64, \
NULL, \
}; \
@@ -1734,8 +1639,8 @@ static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
{ \
static NeonGenTwo64OpFn * const addfn[] = { \
- gen_helper_neon_##OP##l_u16, \
- gen_helper_neon_##OP##l_u32, \
+ tcg_gen_vec_##OP##16_i64, \
+ tcg_gen_vec_##OP##32_i64, \
tcg_gen_##OP##_i64, \
NULL, \
}; \
@@ -1856,8 +1761,8 @@ static bool trans_VABAL_S_3d(DisasContext *s, arg_3diff *a)
NULL,
};
static NeonGenTwo64OpFn * const addfn[] = {
- gen_helper_neon_addl_u16,
- gen_helper_neon_addl_u32,
+ tcg_gen_vec_add16_i64,
+ tcg_gen_vec_add32_i64,
tcg_gen_add_i64,
NULL,
};
@@ -1874,8 +1779,8 @@ static bool trans_VABAL_U_3d(DisasContext *s, arg_3diff *a)
NULL,
};
static NeonGenTwo64OpFn * const addfn[] = {
- gen_helper_neon_addl_u16,
- gen_helper_neon_addl_u32,
+ tcg_gen_vec_add16_i64,
+ tcg_gen_vec_add32_i64,
tcg_gen_add_i64,
NULL,
};
@@ -1935,8 +1840,8 @@ static bool trans_VMULL_U_3d(DisasContext *s, arg_3diff *a)
NULL, \
}; \
static NeonGenTwo64OpFn * const accfn[] = { \
- gen_helper_neon_##ACC##l_u16, \
- gen_helper_neon_##ACC##l_u32, \
+ tcg_gen_vec_##ACC##16_i64, \
+ tcg_gen_vec_##ACC##32_i64, \
tcg_gen_##ACC##_i64, \
NULL, \
}; \
@@ -2466,7 +2371,7 @@ static bool trans_VMULL_U_2sc(DisasContext *s, arg_2scalar *a)
}; \
static NeonGenTwo64OpFn * const accfn[] = { \
NULL, \
- gen_helper_neon_##ACC##l_u32, \
+ tcg_gen_vec_##ACC##32_i64, \
tcg_gen_##ACC##_i64, \
NULL, \
}; \
@@ -2660,204 +2565,6 @@ static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a)
return true;
}
-static bool trans_VREV64(DisasContext *s, arg_VREV64 *a)
-{
- int pass, half;
- TCGv_i32 tmp[2];
-
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
- return false;
- }
-
- /* UNDEF accesses to D16-D31 if they don't exist. */
- if (!dc_isar_feature(aa32_simd_r32, s) &&
- ((a->vd | a->vm) & 0x10)) {
- return false;
- }
-
- if ((a->vd | a->vm) & a->q) {
- return false;
- }
-
- if (a->size == 3) {
- return false;
- }
-
- if (!vfp_access_check(s)) {
- return true;
- }
-
- tmp[0] = tcg_temp_new_i32();
- tmp[1] = tcg_temp_new_i32();
-
- for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
- for (half = 0; half < 2; half++) {
- read_neon_element32(tmp[half], a->vm, pass * 2 + half, MO_32);
- switch (a->size) {
- case 0:
- tcg_gen_bswap32_i32(tmp[half], tmp[half]);
- break;
- case 1:
- gen_swap_half(tmp[half], tmp[half]);
- break;
- case 2:
- break;
- default:
- g_assert_not_reached();
- }
- }
- write_neon_element32(tmp[1], a->vd, pass * 2, MO_32);
- write_neon_element32(tmp[0], a->vd, pass * 2 + 1, MO_32);
- }
- return true;
-}
-
-static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a,
- NeonGenWidenFn *widenfn,
- NeonGenTwo64OpFn *opfn,
- NeonGenTwo64OpFn *accfn)
-{
- /*
- * Pairwise long operations: widen both halves of the pair,
- * combine the pairs with the opfn, and then possibly accumulate
- * into the destination with the accfn.
- */
- int pass;
-
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
- return false;
- }
-
- /* UNDEF accesses to D16-D31 if they don't exist. */
- if (!dc_isar_feature(aa32_simd_r32, s) &&
- ((a->vd | a->vm) & 0x10)) {
- return false;
- }
-
- if ((a->vd | a->vm) & a->q) {
- return false;
- }
-
- if (!widenfn) {
- return false;
- }
-
- if (!vfp_access_check(s)) {
- return true;
- }
-
- for (pass = 0; pass < a->q + 1; pass++) {
- TCGv_i32 tmp;
- TCGv_i64 rm0_64, rm1_64, rd_64;
-
- rm0_64 = tcg_temp_new_i64();
- rm1_64 = tcg_temp_new_i64();
- rd_64 = tcg_temp_new_i64();
-
- tmp = tcg_temp_new_i32();
- read_neon_element32(tmp, a->vm, pass * 2, MO_32);
- widenfn(rm0_64, tmp);
- read_neon_element32(tmp, a->vm, pass * 2 + 1, MO_32);
- widenfn(rm1_64, tmp);
-
- opfn(rd_64, rm0_64, rm1_64);
-
- if (accfn) {
- TCGv_i64 tmp64 = tcg_temp_new_i64();
- read_neon_element64(tmp64, a->vd, pass, MO_64);
- accfn(rd_64, tmp64, rd_64);
- }
- write_neon_element64(rd_64, a->vd, pass, MO_64);
- }
- return true;
-}
-
-static bool trans_VPADDL_S(DisasContext *s, arg_2misc *a)
-{
- static NeonGenWidenFn * const widenfn[] = {
- gen_helper_neon_widen_s8,
- gen_helper_neon_widen_s16,
- tcg_gen_ext_i32_i64,
- NULL,
- };
- static NeonGenTwo64OpFn * const opfn[] = {
- gen_helper_neon_paddl_u16,
- gen_helper_neon_paddl_u32,
- tcg_gen_add_i64,
- NULL,
- };
-
- return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size], NULL);
-}
-
-static bool trans_VPADDL_U(DisasContext *s, arg_2misc *a)
-{
- static NeonGenWidenFn * const widenfn[] = {
- gen_helper_neon_widen_u8,
- gen_helper_neon_widen_u16,
- tcg_gen_extu_i32_i64,
- NULL,
- };
- static NeonGenTwo64OpFn * const opfn[] = {
- gen_helper_neon_paddl_u16,
- gen_helper_neon_paddl_u32,
- tcg_gen_add_i64,
- NULL,
- };
-
- return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size], NULL);
-}
-
-static bool trans_VPADAL_S(DisasContext *s, arg_2misc *a)
-{
- static NeonGenWidenFn * const widenfn[] = {
- gen_helper_neon_widen_s8,
- gen_helper_neon_widen_s16,
- tcg_gen_ext_i32_i64,
- NULL,
- };
- static NeonGenTwo64OpFn * const opfn[] = {
- gen_helper_neon_paddl_u16,
- gen_helper_neon_paddl_u32,
- tcg_gen_add_i64,
- NULL,
- };
- static NeonGenTwo64OpFn * const accfn[] = {
- gen_helper_neon_addl_u16,
- gen_helper_neon_addl_u32,
- tcg_gen_add_i64,
- NULL,
- };
-
- return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size],
- accfn[a->size]);
-}
-
-static bool trans_VPADAL_U(DisasContext *s, arg_2misc *a)
-{
- static NeonGenWidenFn * const widenfn[] = {
- gen_helper_neon_widen_u8,
- gen_helper_neon_widen_u16,
- tcg_gen_extu_i32_i64,
- NULL,
- };
- static NeonGenTwo64OpFn * const opfn[] = {
- gen_helper_neon_paddl_u16,
- gen_helper_neon_paddl_u32,
- tcg_gen_add_i64,
- NULL,
- };
- static NeonGenTwo64OpFn * const accfn[] = {
- gen_helper_neon_addl_u16,
- gen_helper_neon_addl_u32,
- tcg_gen_add_i64,
- NULL,
- };
-
- return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size],
- accfn[a->size]);
-}
-
typedef void ZipFn(TCGv_ptr, TCGv_ptr);
static bool do_zip_uzp(DisasContext *s, arg_2misc *a,
@@ -2931,10 +2638,9 @@ static bool trans_VZIP(DisasContext *s, arg_2misc *a)
}
static bool do_vmovn(DisasContext *s, arg_2misc *a,
- NeonGenNarrowEnvFn *narrowfn)
+ NeonGenOne64OpEnvFn *narrowfn)
{
- TCGv_i64 rm;
- TCGv_i32 rd0, rd1;
+ TCGv_i64 rm, rd0, rd1;
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
return false;
@@ -2959,22 +2665,22 @@ static bool do_vmovn(DisasContext *s, arg_2misc *a,
}
rm = tcg_temp_new_i64();
- rd0 = tcg_temp_new_i32();
- rd1 = tcg_temp_new_i32();
+ rd0 = tcg_temp_new_i64();
+ rd1 = tcg_temp_new_i64();
read_neon_element64(rm, a->vm, 0, MO_64);
narrowfn(rd0, tcg_env, rm);
read_neon_element64(rm, a->vm, 1, MO_64);
narrowfn(rd1, tcg_env, rm);
- write_neon_element32(rd0, a->vd, 0, MO_32);
- write_neon_element32(rd1, a->vd, 1, MO_32);
+ write_neon_element64(rd0, a->vd, 0, MO_32);
+ write_neon_element64(rd1, a->vd, 1, MO_32);
return true;
}
#define DO_VMOVN(INSN, FUNC) \
static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
{ \
- static NeonGenNarrowEnvFn * const narrowfn[] = { \
+ static NeonGenOne64OpEnvFn * const narrowfn[] = { \
FUNC##8, \
FUNC##16, \
FUNC##32, \
@@ -3216,6 +2922,13 @@ DO_2MISC_VEC(VCGT0, gen_gvec_cgt0)
DO_2MISC_VEC(VCLE0, gen_gvec_cle0)
DO_2MISC_VEC(VCGE0, gen_gvec_cge0)
DO_2MISC_VEC(VCLT0, gen_gvec_clt0)
+DO_2MISC_VEC(VCLS, gen_gvec_cls)
+DO_2MISC_VEC(VCLZ, gen_gvec_clz)
+DO_2MISC_VEC(VREV64, gen_gvec_rev64)
+DO_2MISC_VEC(VPADDL_S, gen_gvec_saddlp)
+DO_2MISC_VEC(VPADDL_U, gen_gvec_uaddlp)
+DO_2MISC_VEC(VPADAL_S, gen_gvec_sadalp)
+DO_2MISC_VEC(VPADAL_U, gen_gvec_uadalp)
static bool trans_VMVN(DisasContext *s, arg_2misc *a)
{
@@ -3225,6 +2938,30 @@ static bool trans_VMVN(DisasContext *s, arg_2misc *a)
return do_2misc_vec(s, a, tcg_gen_gvec_not);
}
+static bool trans_VCNT(DisasContext *s, arg_2misc *a)
+{
+ if (a->size != 0) {
+ return false;
+ }
+ return do_2misc_vec(s, a, gen_gvec_cnt);
+}
+
+static bool trans_VREV16(DisasContext *s, arg_2misc *a)
+{
+ if (a->size != 0) {
+ return false;
+ }
+ return do_2misc_vec(s, a, gen_gvec_rev16);
+}
+
+static bool trans_VREV32(DisasContext *s, arg_2misc *a)
+{
+ if (a->size != 0 && a->size != 1) {
+ return false;
+ }
+ return do_2misc_vec(s, a, gen_gvec_rev32);
+}
+
#define WRAP_2M_3_OOL_FN(WRAPNAME, FUNC, DATA) \
static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
uint32_t rm_ofs, uint32_t oprsz, \
@@ -3304,68 +3041,6 @@ static bool do_2misc(DisasContext *s, arg_2misc *a, NeonGenOneOpFn *fn)
return true;
}
-static bool trans_VREV32(DisasContext *s, arg_2misc *a)
-{
- static NeonGenOneOpFn * const fn[] = {
- tcg_gen_bswap32_i32,
- gen_swap_half,
- NULL,
- NULL,
- };
- return do_2misc(s, a, fn[a->size]);
-}
-
-static bool trans_VREV16(DisasContext *s, arg_2misc *a)
-{
- if (a->size != 0) {
- return false;
- }
- return do_2misc(s, a, gen_rev16);
-}
-
-static bool trans_VCLS(DisasContext *s, arg_2misc *a)
-{
- static NeonGenOneOpFn * const fn[] = {
- gen_helper_neon_cls_s8,
- gen_helper_neon_cls_s16,
- gen_helper_neon_cls_s32,
- NULL,
- };
- return do_2misc(s, a, fn[a->size]);
-}
-
-static void do_VCLZ_32(TCGv_i32 rd, TCGv_i32 rm)
-{
- tcg_gen_clzi_i32(rd, rm, 32);
-}
-
-static bool trans_VCLZ(DisasContext *s, arg_2misc *a)
-{
- static NeonGenOneOpFn * const fn[] = {
- gen_helper_neon_clz_u8,
- gen_helper_neon_clz_u16,
- do_VCLZ_32,
- NULL,
- };
- return do_2misc(s, a, fn[a->size]);
-}
-
-static bool trans_VCNT(DisasContext *s, arg_2misc *a)
-{
- if (a->size != 0) {
- return false;
- }
- return do_2misc(s, a, gen_helper_neon_cnt_u8);
-}
-
-static void gen_VABS_F(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
- uint32_t oprsz, uint32_t maxsz)
-{
- tcg_gen_gvec_andi(vece, rd_ofs, rm_ofs,
- vece == MO_16 ? 0x7fff : 0x7fffffff,
- oprsz, maxsz);
-}
-
static bool trans_VABS_F(DisasContext *s, arg_2misc *a)
{
if (a->size == MO_16) {
@@ -3375,15 +3050,7 @@ static bool trans_VABS_F(DisasContext *s, arg_2misc *a)
} else if (a->size != MO_32) {
return false;
}
- return do_2misc_vec(s, a, gen_VABS_F);
-}
-
-static void gen_VNEG_F(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
- uint32_t oprsz, uint32_t maxsz)
-{
- tcg_gen_gvec_xori(vece, rd_ofs, rm_ofs,
- vece == MO_16 ? 0x8000 : 0x80000000,
- oprsz, maxsz);
+ return do_2misc_vec(s, a, gen_gvec_fabs);
}
static bool trans_VNEG_F(DisasContext *s, arg_2misc *a)
@@ -3395,7 +3062,7 @@ static bool trans_VNEG_F(DisasContext *s, arg_2misc *a)
} else if (a->size != MO_32) {
return false;
}
- return do_2misc_vec(s, a, gen_VNEG_F);
+ return do_2misc_vec(s, a, gen_gvec_fneg);
}
static bool trans_VRECPE(DisasContext *s, arg_2misc *a)
@@ -3403,7 +3070,7 @@ static bool trans_VRECPE(DisasContext *s, arg_2misc *a)
if (a->size != 2) {
return false;
}
- return do_2misc(s, a, gen_helper_recpe_u32);
+ return do_2misc_vec(s, a, gen_gvec_urecpe);
}
static bool trans_VRSQRTE(DisasContext *s, arg_2misc *a)
@@ -3411,7 +3078,7 @@ static bool trans_VRSQRTE(DisasContext *s, arg_2misc *a)
if (a->size != 2) {
return false;
}
- return do_2misc(s, a, gen_helper_rsqrte_u32);
+ return do_2misc_vec(s, a, gen_gvec_ursqrte);
}
#define WRAP_1OP_ENV_FN(WRAPNAME, FUNC) \
@@ -3699,8 +3366,8 @@ static bool trans_VMMLA_b16(DisasContext *s, arg_VMMLA_b16 *a)
if (!dc_isar_feature(aa32_bf16, s)) {
return false;
}
- return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
- gen_helper_gvec_bfmmla);
+ return do_neon_ddda_env(s, 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_bfmmla);
}
static bool trans_VFMA_b16(DisasContext *s, arg_VFMA_b16 *a)
diff --git a/target/arm/tcg/translate-sme.c b/target/arm/tcg/translate-sme.c
index 185a8a9..fcbb350 100644
--- a/target/arm/tcg/translate-sme.c
+++ b/target/arm/tcg/translate-sme.c
@@ -49,7 +49,15 @@ static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs,
/* Prepare a power-of-two modulo via extraction of @len bits. */
len = ctz32(streaming_vec_reg_size(s)) - esz;
- if (vertical) {
+ if (!len) {
+ /*
+ * SVL is 128 and the element size is 128. There is exactly
+ * one 128x128 tile in the ZA storage, and so we calculate
+ * (Rs + imm) MOD 1, which is always 0. We need to special case
+ * this because TCG doesn't allow deposit ops with len 0.
+ */
+ tcg_gen_movi_i32(tmp, 0);
+ } else if (vertical) {
/*
* Compute the byte offset of the index within the tile:
* (index % (svl / size)) * size
@@ -326,15 +334,35 @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
return true;
}
-TRANS_FEAT(FMOPA_h, aa64_sme, do_outprod_fpst, a,
- MO_32, FPST_FPCR_F16, gen_helper_sme_fmopa_h)
+static bool do_outprod_env(DisasContext *s, arg_op *a, MemOp esz,
+ gen_helper_gvec_5_ptr *fn)
+{
+ int svl = streaming_vec_reg_size(s);
+ uint32_t desc = simd_desc(svl, svl, a->sub);
+ TCGv_ptr za, zn, zm, pn, pm;
+
+ if (!sme_smza_enabled_check(s)) {
+ return true;
+ }
+
+ za = get_tile(s, esz, a->zad);
+ zn = vec_full_reg_ptr(s, a->zn);
+ zm = vec_full_reg_ptr(s, a->zm);
+ pn = pred_full_reg_ptr(s, a->pn);
+ pm = pred_full_reg_ptr(s, a->pm);
+
+ fn(za, zn, zm, pn, pm, tcg_env, tcg_constant_i32(desc));
+ return true;
+}
+
+TRANS_FEAT(FMOPA_h, aa64_sme, do_outprod_env, a,
+ MO_32, gen_helper_sme_fmopa_h)
TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a,
- MO_32, FPST_FPCR, gen_helper_sme_fmopa_s)
+ MO_32, FPST_A64, gen_helper_sme_fmopa_s)
TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a,
- MO_64, FPST_FPCR, gen_helper_sme_fmopa_d)
+ MO_64, FPST_A64, gen_helper_sme_fmopa_d)
-/* TODO: FEAT_EBF16 */
-TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa)
+TRANS_FEAT(BFMOPA, aa64_sme, do_outprod_env, a, MO_32, gen_helper_sme_bfmopa)
TRANS_FEAT(SMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_smopa_s)
TRANS_FEAT(UMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_umopa_s)
diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c
index 798ab2b..f3cf028 100644
--- a/target/arm/tcg/translate-sve.c
+++ b/target/arm/tcg/translate-sve.c
@@ -50,13 +50,27 @@ static int tszimm_esz(DisasContext *s, int x)
static int tszimm_shr(DisasContext *s, int x)
{
- return (16 << tszimm_esz(s, x)) - x;
+ /*
+ * We won't use the tszimm_shr() value if tszimm_esz() returns -1 (the
+ * trans function will check for esz < 0), so we can return any
+ * value we like from here in that case as long as we avoid UB.
+ */
+ int esz = tszimm_esz(s, x);
+ if (esz < 0) {
+ return esz;
+ }
+ return (16 << esz) - x;
}
/* See e.g. LSL (immediate, predicated). */
static int tszimm_shl(DisasContext *s, int x)
{
- return x - (8 << tszimm_esz(s, x));
+ /* As with tszimm_shr(), value will be unused if esz < 0 */
+ int esz = tszimm_esz(s, x);
+ if (esz < 0) {
+ return esz;
+ }
+ return x - (8 << esz);
}
/* The SH bit is in bit 8. Extract the low 8 and shift. */
@@ -123,11 +137,11 @@ static bool gen_gvec_fpst_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn,
return true;
}
-static bool gen_gvec_fpst_arg_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn,
- arg_rr_esz *a, int data)
+static bool gen_gvec_fpst_ah_arg_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn,
+ arg_rr_esz *a, int data)
{
return gen_gvec_fpst_zz(s, fn, a->rd, a->rn, data,
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ select_ah_fpst(s, a->esz));
}
/* Invoke an out-of-line helper on 3 Zregs. */
@@ -177,7 +191,14 @@ static bool gen_gvec_fpst_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn,
arg_rrr_esz *a, int data)
{
return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data,
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64);
+}
+
+static bool gen_gvec_fpst_ah_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn,
+ arg_rrr_esz *a, int data)
+{
+ return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data,
+ select_ah_fpst(s, a->esz));
}
/* Invoke an out-of-line helper on 4 Zregs. */
@@ -238,6 +259,25 @@ static bool gen_gvec_fpst_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
return ret;
}
+static bool gen_gvec_env_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
+ int rd, int rn, int rm, int ra,
+ int data)
+{
+ return gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, tcg_env);
+}
+
+static bool gen_gvec_env_arg_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
+ arg_rrrr_esz *a, int data)
+{
+ return gen_gvec_env_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data);
+}
+
+static bool gen_gvec_env_arg_zzxz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
+ arg_rrxr_esz *a)
+{
+ return gen_gvec_env_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index);
+}
+
/* Invoke an out-of-line helper on 4 Zregs, 1 Preg, plus fpst. */
static bool gen_gvec_fpst_zzzzp(DisasContext *s, gen_helper_gvec_5_ptr *fn,
int rd, int rn, int rm, int ra, int pg,
@@ -364,7 +404,7 @@ static bool gen_gvec_fpst_arg_zpzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
arg_rprr_esz *a)
{
return gen_gvec_fpst_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, 0,
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64);
}
/* Invoke a vector expander on two Zregs and an immediate. */
@@ -563,14 +603,8 @@ static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
TCGv_vec m, TCGv_vec k)
{
- if (TCG_TARGET_HAS_bitsel_vec) {
- tcg_gen_not_vec(vece, n, n);
- tcg_gen_bitsel_vec(vece, d, k, n, m);
- } else {
- tcg_gen_andc_vec(vece, n, k, n);
- tcg_gen_andc_vec(vece, m, m, k);
- tcg_gen_or_vec(vece, d, n, m);
- }
+ tcg_gen_not_vec(vece, n, n);
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
}
static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
@@ -595,7 +629,7 @@ static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
* = | ~(m | k)
*/
tcg_gen_and_i64(n, n, k);
- if (TCG_TARGET_HAS_orc_i64) {
+ if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I64, 0)) {
tcg_gen_or_i64(m, m, k);
tcg_gen_orc_i64(d, n, m);
} else {
@@ -607,14 +641,8 @@ static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
TCGv_vec m, TCGv_vec k)
{
- if (TCG_TARGET_HAS_bitsel_vec) {
- tcg_gen_not_vec(vece, m, m);
- tcg_gen_bitsel_vec(vece, d, k, n, m);
- } else {
- tcg_gen_and_vec(vece, n, n, k);
- tcg_gen_or_vec(vece, m, m, k);
- tcg_gen_orc_vec(vece, d, n, m);
- }
+ tcg_gen_not_vec(vece, m, m);
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
}
static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
@@ -755,13 +783,23 @@ static gen_helper_gvec_3 * const fabs_fns[4] = {
NULL, gen_helper_sve_fabs_h,
gen_helper_sve_fabs_s, gen_helper_sve_fabs_d,
};
-TRANS_FEAT(FABS, aa64_sve, gen_gvec_ool_arg_zpz, fabs_fns[a->esz], a, 0)
+static gen_helper_gvec_3 * const fabs_ah_fns[4] = {
+ NULL, gen_helper_sve_ah_fabs_h,
+ gen_helper_sve_ah_fabs_s, gen_helper_sve_ah_fabs_d,
+};
+TRANS_FEAT(FABS, aa64_sve, gen_gvec_ool_arg_zpz,
+ s->fpcr_ah ? fabs_ah_fns[a->esz] : fabs_fns[a->esz], a, 0)
static gen_helper_gvec_3 * const fneg_fns[4] = {
NULL, gen_helper_sve_fneg_h,
gen_helper_sve_fneg_s, gen_helper_sve_fneg_d,
};
-TRANS_FEAT(FNEG, aa64_sve, gen_gvec_ool_arg_zpz, fneg_fns[a->esz], a, 0)
+static gen_helper_gvec_3 * const fneg_ah_fns[4] = {
+ NULL, gen_helper_sve_ah_fneg_h,
+ gen_helper_sve_ah_fneg_s, gen_helper_sve_ah_fneg_d,
+};
+TRANS_FEAT(FNEG, aa64_sve, gen_gvec_ool_arg_zpz,
+ s->fpcr_ah ? fneg_ah_fns[a->esz] : fneg_fns[a->esz], a, 0)
static gen_helper_gvec_3 * const sxtb_fns[4] = {
NULL, gen_helper_sve_sxtb_h,
@@ -1200,14 +1238,14 @@ static gen_helper_gvec_2 * const fexpa_fns[4] = {
gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d,
};
TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz,
- fexpa_fns[a->esz], a->rd, a->rn, 0)
+ fexpa_fns[a->esz], a->rd, a->rn, s->fpcr_ah)
static gen_helper_gvec_3 * const ftssel_fns[4] = {
NULL, gen_helper_sve_ftssel_h,
gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d,
};
TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz,
- ftssel_fns[a->esz], a, 0)
+ ftssel_fns[a->esz], a, s->fpcr_ah)
/*
*** SVE Predicate Logical Operations Group
@@ -3486,21 +3524,24 @@ DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d)
*** SVE Floating Point Multiply-Add Indexed Group
*/
-static bool do_FMLA_zzxz(DisasContext *s, arg_rrxr_esz *a, bool sub)
-{
- static gen_helper_gvec_4_ptr * const fns[4] = {
- NULL,
- gen_helper_gvec_fmla_idx_h,
- gen_helper_gvec_fmla_idx_s,
- gen_helper_gvec_fmla_idx_d,
- };
- return gen_gvec_fpst_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra,
- (a->index << 1) | sub,
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
-}
+static gen_helper_gvec_4_ptr * const fmla_idx_fns[4] = {
+ NULL, gen_helper_gvec_fmla_idx_h,
+ gen_helper_gvec_fmla_idx_s, gen_helper_gvec_fmla_idx_d
+};
+TRANS_FEAT(FMLA_zzxz, aa64_sve, gen_gvec_fpst_zzzz,
+ fmla_idx_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->index,
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
-TRANS_FEAT(FMLA_zzxz, aa64_sve, do_FMLA_zzxz, a, false)
-TRANS_FEAT(FMLS_zzxz, aa64_sve, do_FMLA_zzxz, a, true)
+static gen_helper_gvec_4_ptr * const fmls_idx_fns[4][2] = {
+ { NULL, NULL },
+ { gen_helper_gvec_fmls_idx_h, gen_helper_gvec_ah_fmls_idx_h },
+ { gen_helper_gvec_fmls_idx_s, gen_helper_gvec_ah_fmls_idx_s },
+ { gen_helper_gvec_fmls_idx_d, gen_helper_gvec_ah_fmls_idx_d },
+};
+TRANS_FEAT(FMLS_zzxz, aa64_sve, gen_gvec_fpst_zzzz,
+ fmls_idx_fns[a->esz][s->fpcr_ah],
+ a->rd, a->rn, a->rm, a->ra, a->index,
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
/*
*** SVE Floating Point Multiply Indexed Group
@@ -3512,7 +3553,7 @@ static gen_helper_gvec_3_ptr * const fmul_idx_fns[4] = {
};
TRANS_FEAT(FMUL_zzx, aa64_sve, gen_gvec_fpst_zzz,
fmul_idx_fns[a->esz], a->rd, a->rn, a->rm, a->index,
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
/*
*** SVE Floating Point Fast Reduction Group
@@ -3545,7 +3586,7 @@ static bool do_reduce(DisasContext *s, arg_rpr_esz *a,
tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn));
tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg));
- status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ status = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64);
fn(temp, t_zn, t_pg, status, t_desc);
@@ -3560,11 +3601,23 @@ static bool do_reduce(DisasContext *s, arg_rpr_esz *a,
}; \
TRANS_FEAT(NAME, aa64_sve, do_reduce, a, name##_fns[a->esz])
+#define DO_VPZ_AH(NAME, name) \
+ static gen_helper_fp_reduce * const name##_fns[4] = { \
+ NULL, gen_helper_sve_##name##_h, \
+ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
+ }; \
+ static gen_helper_fp_reduce * const name##_ah_fns[4] = { \
+ NULL, gen_helper_sve_ah_##name##_h, \
+ gen_helper_sve_ah_##name##_s, gen_helper_sve_ah_##name##_d, \
+ }; \
+ TRANS_FEAT(NAME, aa64_sve, do_reduce, a, \
+ s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz])
+
DO_VPZ(FADDV, faddv)
DO_VPZ(FMINNMV, fminnmv)
DO_VPZ(FMAXNMV, fmaxnmv)
-DO_VPZ(FMINV, fminv)
-DO_VPZ(FMAXV, fmaxv)
+DO_VPZ_AH(FMINV, fminv)
+DO_VPZ_AH(FMAXV, fmaxv)
#undef DO_VPZ
@@ -3576,13 +3629,25 @@ static gen_helper_gvec_2_ptr * const frecpe_fns[] = {
NULL, gen_helper_gvec_frecpe_h,
gen_helper_gvec_frecpe_s, gen_helper_gvec_frecpe_d,
};
-TRANS_FEAT(FRECPE, aa64_sve, gen_gvec_fpst_arg_zz, frecpe_fns[a->esz], a, 0)
+static gen_helper_gvec_2_ptr * const frecpe_rpres_fns[] = {
+ NULL, gen_helper_gvec_frecpe_h,
+ gen_helper_gvec_frecpe_rpres_s, gen_helper_gvec_frecpe_d,
+};
+TRANS_FEAT(FRECPE, aa64_sve, gen_gvec_fpst_ah_arg_zz,
+ s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ?
+ frecpe_rpres_fns[a->esz] : frecpe_fns[a->esz], a, 0)
static gen_helper_gvec_2_ptr * const frsqrte_fns[] = {
NULL, gen_helper_gvec_frsqrte_h,
gen_helper_gvec_frsqrte_s, gen_helper_gvec_frsqrte_d,
};
-TRANS_FEAT(FRSQRTE, aa64_sve, gen_gvec_fpst_arg_zz, frsqrte_fns[a->esz], a, 0)
+static gen_helper_gvec_2_ptr * const frsqrte_rpres_fns[] = {
+ NULL, gen_helper_gvec_frsqrte_h,
+ gen_helper_gvec_frsqrte_rpres_s, gen_helper_gvec_frsqrte_d,
+};
+TRANS_FEAT(FRSQRTE, aa64_sve, gen_gvec_fpst_ah_arg_zz,
+ s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ?
+ frsqrte_rpres_fns[a->esz] : frsqrte_fns[a->esz], a, 0)
/*
*** SVE Floating Point Compare with Zero Group
@@ -3597,7 +3662,7 @@ static bool do_ppz_fp(DisasContext *s, arg_rpr_esz *a,
if (sve_access_check(s)) {
unsigned vsz = vec_full_reg_size(s);
TCGv_ptr status =
- fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64);
tcg_gen_gvec_3_ptr(pred_full_reg_offset(s, a->rd),
vec_full_reg_offset(s, a->rn),
@@ -3632,8 +3697,9 @@ static gen_helper_gvec_3_ptr * const ftmad_fns[4] = {
gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d,
};
TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz,
- ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm,
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
+ ftmad_fns[a->esz], a->rd, a->rn, a->rm,
+ a->imm | (s->fpcr_ah << 3),
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
/*
*** SVE Floating Point Accumulating Reduction Group
@@ -3666,7 +3732,7 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
t_pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_rm, tcg_env, vec_full_reg_offset(s, a->rm));
tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg));
- t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64);
t_desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
fns[a->esz - 1](t_val, t_val, t_rm, t_pg, t_fpst, t_desc);
@@ -3686,11 +3752,23 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
}; \
TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_arg_zzz, name##_fns[a->esz], a, 0)
+#define DO_FP3_AH(NAME, name) \
+ static gen_helper_gvec_3_ptr * const name##_fns[4] = { \
+ NULL, gen_helper_gvec_##name##_h, \
+ gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \
+ }; \
+ static gen_helper_gvec_3_ptr * const name##_ah_fns[4] = { \
+ NULL, gen_helper_gvec_ah_##name##_h, \
+ gen_helper_gvec_ah_##name##_s, gen_helper_gvec_ah_##name##_d \
+ }; \
+ TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_ah_arg_zzz, \
+ s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz], a, 0)
+
DO_FP3(FADD_zzz, fadd)
DO_FP3(FSUB_zzz, fsub)
DO_FP3(FMUL_zzz, fmul)
-DO_FP3(FRECPS, recps)
-DO_FP3(FRSQRTS, rsqrts)
+DO_FP3_AH(FRECPS, recps)
+DO_FP3_AH(FRSQRTS, rsqrts)
#undef DO_FP3
@@ -3712,14 +3790,27 @@ TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz,
}; \
TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, name##_zpzz_fns[a->esz], a)
+#define DO_ZPZZ_AH_FP(NAME, FEAT, name, ah_name) \
+ static gen_helper_gvec_4_ptr * const name##_zpzz_fns[4] = { \
+ NULL, gen_helper_##name##_h, \
+ gen_helper_##name##_s, gen_helper_##name##_d \
+ }; \
+ static gen_helper_gvec_4_ptr * const name##_ah_zpzz_fns[4] = { \
+ NULL, gen_helper_##ah_name##_h, \
+ gen_helper_##ah_name##_s, gen_helper_##ah_name##_d \
+ }; \
+ TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, \
+ s->fpcr_ah ? name##_ah_zpzz_fns[a->esz] : \
+ name##_zpzz_fns[a->esz], a)
+
DO_ZPZZ_FP(FADD_zpzz, aa64_sve, sve_fadd)
DO_ZPZZ_FP(FSUB_zpzz, aa64_sve, sve_fsub)
DO_ZPZZ_FP(FMUL_zpzz, aa64_sve, sve_fmul)
-DO_ZPZZ_FP(FMIN_zpzz, aa64_sve, sve_fmin)
-DO_ZPZZ_FP(FMAX_zpzz, aa64_sve, sve_fmax)
+DO_ZPZZ_AH_FP(FMIN_zpzz, aa64_sve, sve_fmin, sve_ah_fmin)
+DO_ZPZZ_AH_FP(FMAX_zpzz, aa64_sve, sve_fmax, sve_ah_fmax)
DO_ZPZZ_FP(FMINNM_zpzz, aa64_sve, sve_fminnum)
DO_ZPZZ_FP(FMAXNM_zpzz, aa64_sve, sve_fmaxnum)
-DO_ZPZZ_FP(FABD, aa64_sve, sve_fabd)
+DO_ZPZZ_AH_FP(FABD, aa64_sve, sve_fabd, sve_ah_fabd)
DO_ZPZZ_FP(FSCALE, aa64_sve, sve_fscalbn)
DO_ZPZZ_FP(FDIV, aa64_sve, sve_fdiv)
DO_ZPZZ_FP(FMULX, aa64_sve, sve_fmulx)
@@ -3741,7 +3832,7 @@ static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16,
tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, zn));
tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
- status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
+ status = fpstatus_ptr(is_fp16 ? FPST_A64_F16 : FPST_A64);
desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
fn(t_zd, t_zn, t_pg, scalar, status, desc);
}
@@ -3774,14 +3865,35 @@ static bool do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm,
TRANS_FEAT(NAME##_zpzi, aa64_sve, do_fp_imm, a, \
name##_const[a->esz][a->imm], name##_fns[a->esz])
+#define DO_FP_AH_IMM(NAME, name, const0, const1) \
+ static gen_helper_sve_fp2scalar * const name##_fns[4] = { \
+ NULL, gen_helper_sve_##name##_h, \
+ gen_helper_sve_##name##_s, \
+ gen_helper_sve_##name##_d \
+ }; \
+ static gen_helper_sve_fp2scalar * const name##_ah_fns[4] = { \
+ NULL, gen_helper_sve_ah_##name##_h, \
+ gen_helper_sve_ah_##name##_s, \
+ gen_helper_sve_ah_##name##_d \
+ }; \
+ static uint64_t const name##_const[4][2] = { \
+ { -1, -1 }, \
+ { float16_##const0, float16_##const1 }, \
+ { float32_##const0, float32_##const1 }, \
+ { float64_##const0, float64_##const1 }, \
+ }; \
+ TRANS_FEAT(NAME##_zpzi, aa64_sve, do_fp_imm, a, \
+ name##_const[a->esz][a->imm], \
+ s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz])
+
DO_FP_IMM(FADD, fadds, half, one)
DO_FP_IMM(FSUB, fsubs, half, one)
DO_FP_IMM(FMUL, fmuls, half, two)
DO_FP_IMM(FSUBR, fsubrs, half, one)
DO_FP_IMM(FMAXNM, fmaxnms, zero, one)
DO_FP_IMM(FMINNM, fminnms, zero, one)
-DO_FP_IMM(FMAX, fmaxs, zero, one)
-DO_FP_IMM(FMIN, fmins, zero, one)
+DO_FP_AH_IMM(FMAX, fmaxs, zero, one)
+DO_FP_AH_IMM(FMIN, fmins, zero, one)
#undef DO_FP_IMM
@@ -3793,7 +3905,7 @@ static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a,
}
if (sve_access_check(s)) {
unsigned vsz = vec_full_reg_size(s);
- TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64);
tcg_gen_gvec_4_ptr(pred_full_reg_offset(s, a->rd),
vec_full_reg_offset(s, a->rn),
vec_full_reg_offset(s, a->rm),
@@ -3825,22 +3937,28 @@ static gen_helper_gvec_4_ptr * const fcadd_fns[] = {
gen_helper_sve_fcadd_s, gen_helper_sve_fcadd_d,
};
TRANS_FEAT(FCADD, aa64_sve, gen_gvec_fpst_zzzp, fcadd_fns[a->esz],
- a->rd, a->rn, a->rm, a->pg, a->rot,
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
+ a->rd, a->rn, a->rm, a->pg, a->rot | (s->fpcr_ah << 1),
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
-#define DO_FMLA(NAME, name) \
+#define DO_FMLA(NAME, name, ah_name) \
static gen_helper_gvec_5_ptr * const name##_fns[4] = { \
NULL, gen_helper_sve_##name##_h, \
gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
}; \
- TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_zzzzp, name##_fns[a->esz], \
+ static gen_helper_gvec_5_ptr * const name##_ah_fns[4] = { \
+ NULL, gen_helper_sve_##ah_name##_h, \
+ gen_helper_sve_##ah_name##_s, gen_helper_sve_##ah_name##_d \
+ }; \
+ TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_zzzzp, \
+ s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz], \
a->rd, a->rn, a->rm, a->ra, a->pg, 0, \
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
-DO_FMLA(FMLA_zpzzz, fmla_zpzzz)
-DO_FMLA(FMLS_zpzzz, fmls_zpzzz)
-DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz)
-DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz)
+/* We don't need an ah_fmla_zpzzz because fmla doesn't negate anything */
+DO_FMLA(FMLA_zpzzz, fmla_zpzzz, fmla_zpzzz)
+DO_FMLA(FMLS_zpzzz, fmls_zpzzz, ah_fmls_zpzzz)
+DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz, ah_fnmla_zpzzz)
+DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz, ah_fnmls_zpzzz)
#undef DO_FMLA
@@ -3849,67 +3967,68 @@ static gen_helper_gvec_5_ptr * const fcmla_fns[4] = {
gen_helper_sve_fcmla_zpzzz_s, gen_helper_sve_fcmla_zpzzz_d,
};
TRANS_FEAT(FCMLA_zpzzz, aa64_sve, gen_gvec_fpst_zzzzp, fcmla_fns[a->esz],
- a->rd, a->rn, a->rm, a->ra, a->pg, a->rot,
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
+ a->rd, a->rn, a->rm, a->ra, a->pg, a->rot | (s->fpcr_ah << 2),
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
static gen_helper_gvec_4_ptr * const fcmla_idx_fns[4] = {
NULL, gen_helper_gvec_fcmlah_idx, gen_helper_gvec_fcmlas_idx, NULL
};
TRANS_FEAT(FCMLA_zzxz, aa64_sve, gen_gvec_fpst_zzzz, fcmla_idx_fns[a->esz],
a->rd, a->rn, a->rm, a->ra, a->index * 4 + a->rot,
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
/*
*** SVE Floating Point Unary Operations Predicated Group
*/
TRANS_FEAT(FCVT_sh, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvt_sh, a, 0, FPST_FPCR)
+ gen_helper_sve_fcvt_sh, a, 0, FPST_A64)
TRANS_FEAT(FCVT_hs, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvt_hs, a, 0, FPST_FPCR)
+ gen_helper_sve_fcvt_hs, a, 0, FPST_A64_F16)
TRANS_FEAT(BFCVT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_bfcvt, a, 0, FPST_FPCR)
+ gen_helper_sve_bfcvt, a, 0,
+ s->fpcr_ah ? FPST_AH : FPST_A64)
TRANS_FEAT(FCVT_dh, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvt_dh, a, 0, FPST_FPCR)
+ gen_helper_sve_fcvt_dh, a, 0, FPST_A64)
TRANS_FEAT(FCVT_hd, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvt_hd, a, 0, FPST_FPCR)
+ gen_helper_sve_fcvt_hd, a, 0, FPST_A64_F16)
TRANS_FEAT(FCVT_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvt_ds, a, 0, FPST_FPCR)
+ gen_helper_sve_fcvt_ds, a, 0, FPST_A64)
TRANS_FEAT(FCVT_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvt_sd, a, 0, FPST_FPCR)
+ gen_helper_sve_fcvt_sd, a, 0, FPST_A64)
TRANS_FEAT(FCVTZS_hh, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvtzs_hh, a, 0, FPST_FPCR_F16)
+ gen_helper_sve_fcvtzs_hh, a, 0, FPST_A64_F16)
TRANS_FEAT(FCVTZU_hh, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvtzu_hh, a, 0, FPST_FPCR_F16)
+ gen_helper_sve_fcvtzu_hh, a, 0, FPST_A64_F16)
TRANS_FEAT(FCVTZS_hs, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvtzs_hs, a, 0, FPST_FPCR_F16)
+ gen_helper_sve_fcvtzs_hs, a, 0, FPST_A64_F16)
TRANS_FEAT(FCVTZU_hs, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvtzu_hs, a, 0, FPST_FPCR_F16)
+ gen_helper_sve_fcvtzu_hs, a, 0, FPST_A64_F16)
TRANS_FEAT(FCVTZS_hd, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvtzs_hd, a, 0, FPST_FPCR_F16)
+ gen_helper_sve_fcvtzs_hd, a, 0, FPST_A64_F16)
TRANS_FEAT(FCVTZU_hd, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvtzu_hd, a, 0, FPST_FPCR_F16)
+ gen_helper_sve_fcvtzu_hd, a, 0, FPST_A64_F16)
TRANS_FEAT(FCVTZS_ss, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvtzs_ss, a, 0, FPST_FPCR)
+ gen_helper_sve_fcvtzs_ss, a, 0, FPST_A64)
TRANS_FEAT(FCVTZU_ss, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvtzu_ss, a, 0, FPST_FPCR)
+ gen_helper_sve_fcvtzu_ss, a, 0, FPST_A64)
TRANS_FEAT(FCVTZS_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvtzs_sd, a, 0, FPST_FPCR)
+ gen_helper_sve_fcvtzs_sd, a, 0, FPST_A64)
TRANS_FEAT(FCVTZU_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvtzu_sd, a, 0, FPST_FPCR)
+ gen_helper_sve_fcvtzu_sd, a, 0, FPST_A64)
TRANS_FEAT(FCVTZS_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvtzs_ds, a, 0, FPST_FPCR)
+ gen_helper_sve_fcvtzs_ds, a, 0, FPST_A64)
TRANS_FEAT(FCVTZU_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvtzu_ds, a, 0, FPST_FPCR)
+ gen_helper_sve_fcvtzu_ds, a, 0, FPST_A64)
TRANS_FEAT(FCVTZS_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvtzs_dd, a, 0, FPST_FPCR)
+ gen_helper_sve_fcvtzs_dd, a, 0, FPST_A64)
TRANS_FEAT(FCVTZU_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_fcvtzu_dd, a, 0, FPST_FPCR)
+ gen_helper_sve_fcvtzu_dd, a, 0, FPST_A64)
static gen_helper_gvec_3_ptr * const frint_fns[] = {
NULL,
@@ -3918,7 +4037,7 @@ static gen_helper_gvec_3_ptr * const frint_fns[] = {
gen_helper_sve_frint_d
};
TRANS_FEAT(FRINTI, aa64_sve, gen_gvec_fpst_arg_zpz, frint_fns[a->esz],
- a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
+ a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
static gen_helper_gvec_3_ptr * const frintx_fns[] = {
NULL,
@@ -3927,7 +4046,7 @@ static gen_helper_gvec_3_ptr * const frintx_fns[] = {
gen_helper_sve_frintx_d
};
TRANS_FEAT(FRINTX, aa64_sve, gen_gvec_fpst_arg_zpz, frintx_fns[a->esz],
- a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64);
static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a,
ARMFPRounding mode, gen_helper_gvec_3_ptr *fn)
@@ -3944,7 +4063,7 @@ static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a,
}
vsz = vec_full_reg_size(s);
- status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ status = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64);
tmode = gen_set_rmode(mode, status);
tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
@@ -3972,48 +4091,48 @@ static gen_helper_gvec_3_ptr * const frecpx_fns[] = {
gen_helper_sve_frecpx_s, gen_helper_sve_frecpx_d,
};
TRANS_FEAT(FRECPX, aa64_sve, gen_gvec_fpst_arg_zpz, frecpx_fns[a->esz],
- a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
+ a, 0, select_ah_fpst(s, a->esz))
static gen_helper_gvec_3_ptr * const fsqrt_fns[] = {
NULL, gen_helper_sve_fsqrt_h,
gen_helper_sve_fsqrt_s, gen_helper_sve_fsqrt_d,
};
TRANS_FEAT(FSQRT, aa64_sve, gen_gvec_fpst_arg_zpz, fsqrt_fns[a->esz],
- a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
+ a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
TRANS_FEAT(SCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_scvt_hh, a, 0, FPST_FPCR_F16)
+ gen_helper_sve_scvt_hh, a, 0, FPST_A64_F16)
TRANS_FEAT(SCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_scvt_sh, a, 0, FPST_FPCR_F16)
+ gen_helper_sve_scvt_sh, a, 0, FPST_A64_F16)
TRANS_FEAT(SCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_scvt_dh, a, 0, FPST_FPCR_F16)
+ gen_helper_sve_scvt_dh, a, 0, FPST_A64_F16)
TRANS_FEAT(SCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_scvt_ss, a, 0, FPST_FPCR)
+ gen_helper_sve_scvt_ss, a, 0, FPST_A64)
TRANS_FEAT(SCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_scvt_ds, a, 0, FPST_FPCR)
+ gen_helper_sve_scvt_ds, a, 0, FPST_A64)
TRANS_FEAT(SCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_scvt_sd, a, 0, FPST_FPCR)
+ gen_helper_sve_scvt_sd, a, 0, FPST_A64)
TRANS_FEAT(SCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_scvt_dd, a, 0, FPST_FPCR)
+ gen_helper_sve_scvt_dd, a, 0, FPST_A64)
TRANS_FEAT(UCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_ucvt_hh, a, 0, FPST_FPCR_F16)
+ gen_helper_sve_ucvt_hh, a, 0, FPST_A64_F16)
TRANS_FEAT(UCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_ucvt_sh, a, 0, FPST_FPCR_F16)
+ gen_helper_sve_ucvt_sh, a, 0, FPST_A64_F16)
TRANS_FEAT(UCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_ucvt_dh, a, 0, FPST_FPCR_F16)
+ gen_helper_sve_ucvt_dh, a, 0, FPST_A64_F16)
TRANS_FEAT(UCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_ucvt_ss, a, 0, FPST_FPCR)
+ gen_helper_sve_ucvt_ss, a, 0, FPST_A64)
TRANS_FEAT(UCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_ucvt_ds, a, 0, FPST_FPCR)
+ gen_helper_sve_ucvt_ds, a, 0, FPST_A64)
TRANS_FEAT(UCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_ucvt_sd, a, 0, FPST_FPCR)
+ gen_helper_sve_ucvt_sd, a, 0, FPST_A64)
TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_ucvt_dd, a, 0, FPST_FPCR)
+ gen_helper_sve_ucvt_dd, a, 0, FPST_A64)
/*
*** SVE Memory - 32-bit Gather and Unsized Contiguous Group
@@ -6048,9 +6167,9 @@ static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
if (top) {
if (shl == halfbits) {
- TCGv_vec t = tcg_temp_new_vec_matching(d);
- tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
- tcg_gen_and_vec(vece, d, n, t);
+ tcg_gen_and_vec(vece, d, n,
+ tcg_constant_vec_matching(d, vece,
+ MAKE_64BIT_MASK(halfbits, halfbits)));
} else {
tcg_gen_sari_vec(vece, d, n, halfbits);
tcg_gen_shli_vec(vece, d, d, shl);
@@ -6105,18 +6224,18 @@ static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
if (top) {
if (shl == halfbits) {
- TCGv_vec t = tcg_temp_new_vec_matching(d);
- tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
- tcg_gen_and_vec(vece, d, n, t);
+ tcg_gen_and_vec(vece, d, n,
+ tcg_constant_vec_matching(d, vece,
+ MAKE_64BIT_MASK(halfbits, halfbits)));
} else {
tcg_gen_shri_vec(vece, d, n, halfbits);
tcg_gen_shli_vec(vece, d, d, shl);
}
} else {
if (shl == 0) {
- TCGv_vec t = tcg_temp_new_vec_matching(d);
- tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
- tcg_gen_and_vec(vece, d, n, t);
+ tcg_gen_and_vec(vece, d, n,
+ tcg_constant_vec_matching(d, vece,
+ MAKE_64BIT_MASK(0, halfbits)));
} else {
tcg_gen_shli_vec(vece, d, n, halfbits);
tcg_gen_shri_vec(vece, d, d, halfbits - shl);
@@ -6284,18 +6403,14 @@ static const TCGOpcode sqxtn_list[] = {
static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
{
- TCGv_vec t = tcg_temp_new_vec_matching(d);
int halfbits = 4 << vece;
int64_t mask = (1ull << halfbits) - 1;
int64_t min = -1ull << (halfbits - 1);
int64_t max = -min - 1;
- tcg_gen_dupi_vec(vece, t, min);
- tcg_gen_smax_vec(vece, d, n, t);
- tcg_gen_dupi_vec(vece, t, max);
- tcg_gen_smin_vec(vece, d, d, t);
- tcg_gen_dupi_vec(vece, t, mask);
- tcg_gen_and_vec(vece, d, d, t);
+ tcg_gen_smax_vec(vece, d, n, tcg_constant_vec_matching(d, vece, min));
+ tcg_gen_smin_vec(vece, d, d, tcg_constant_vec_matching(d, vece, max));
+ tcg_gen_and_vec(vece, d, d, tcg_constant_vec_matching(d, vece, mask));
}
static const GVecGen2 sqxtnb_ops[3] = {
@@ -6316,19 +6431,15 @@ TRANS_FEAT(SQXTNB, aa64_sve2, do_narrow_extract, a, sqxtnb_ops)
static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
{
- TCGv_vec t = tcg_temp_new_vec_matching(d);
int halfbits = 4 << vece;
int64_t mask = (1ull << halfbits) - 1;
int64_t min = -1ull << (halfbits - 1);
int64_t max = -min - 1;
- tcg_gen_dupi_vec(vece, t, min);
- tcg_gen_smax_vec(vece, n, n, t);
- tcg_gen_dupi_vec(vece, t, max);
- tcg_gen_smin_vec(vece, n, n, t);
+ tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, min));
+ tcg_gen_smin_vec(vece, n, n, tcg_constant_vec_matching(d, vece, max));
tcg_gen_shli_vec(vece, n, n, halfbits);
- tcg_gen_dupi_vec(vece, t, mask);
- tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_gen_bitsel_vec(vece, d, tcg_constant_vec_matching(d, vece, mask), d, n);
}
static const GVecGen2 sqxtnt_ops[3] = {
@@ -6356,12 +6467,10 @@ static const TCGOpcode uqxtn_list[] = {
static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
{
- TCGv_vec t = tcg_temp_new_vec_matching(d);
int halfbits = 4 << vece;
int64_t max = (1ull << halfbits) - 1;
- tcg_gen_dupi_vec(vece, t, max);
- tcg_gen_umin_vec(vece, d, n, t);
+ tcg_gen_umin_vec(vece, d, n, tcg_constant_vec_matching(d, vece, max));
}
static const GVecGen2 uqxtnb_ops[3] = {
@@ -6382,14 +6491,13 @@ TRANS_FEAT(UQXTNB, aa64_sve2, do_narrow_extract, a, uqxtnb_ops)
static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
{
- TCGv_vec t = tcg_temp_new_vec_matching(d);
int halfbits = 4 << vece;
int64_t max = (1ull << halfbits) - 1;
+ TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max);
- tcg_gen_dupi_vec(vece, t, max);
- tcg_gen_umin_vec(vece, n, n, t);
+ tcg_gen_umin_vec(vece, n, n, maxv);
tcg_gen_shli_vec(vece, n, n, halfbits);
- tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_gen_bitsel_vec(vece, d, maxv, d, n);
}
static const GVecGen2 uqxtnt_ops[3] = {
@@ -6417,14 +6525,11 @@ static const TCGOpcode sqxtun_list[] = {
static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
{
- TCGv_vec t = tcg_temp_new_vec_matching(d);
int halfbits = 4 << vece;
int64_t max = (1ull << halfbits) - 1;
- tcg_gen_dupi_vec(vece, t, 0);
- tcg_gen_smax_vec(vece, d, n, t);
- tcg_gen_dupi_vec(vece, t, max);
- tcg_gen_umin_vec(vece, d, d, t);
+ tcg_gen_smax_vec(vece, d, n, tcg_constant_vec_matching(d, vece, 0));
+ tcg_gen_umin_vec(vece, d, d, tcg_constant_vec_matching(d, vece, max));
}
static const GVecGen2 sqxtunb_ops[3] = {
@@ -6445,16 +6550,14 @@ TRANS_FEAT(SQXTUNB, aa64_sve2, do_narrow_extract, a, sqxtunb_ops)
static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
{
- TCGv_vec t = tcg_temp_new_vec_matching(d);
int halfbits = 4 << vece;
int64_t max = (1ull << halfbits) - 1;
+ TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max);
- tcg_gen_dupi_vec(vece, t, 0);
- tcg_gen_smax_vec(vece, n, n, t);
- tcg_gen_dupi_vec(vece, t, max);
- tcg_gen_umin_vec(vece, n, n, t);
+ tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, 0));
+ tcg_gen_umin_vec(vece, n, n, maxv);
tcg_gen_shli_vec(vece, n, n, halfbits);
- tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_gen_bitsel_vec(vece, d, maxv, d, n);
}
static const GVecGen2 sqxtunt_ops[3] = {
@@ -6518,13 +6621,11 @@ static void gen_shrnb64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
{
- TCGv_vec t = tcg_temp_new_vec_matching(d);
int halfbits = 4 << vece;
uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
tcg_gen_shri_vec(vece, n, n, shr);
- tcg_gen_dupi_vec(vece, t, mask);
- tcg_gen_and_vec(vece, d, n, t);
+ tcg_gen_and_vec(vece, d, n, tcg_constant_vec_matching(d, vece, mask));
}
static const TCGOpcode shrnb_vec_list[] = { INDEX_op_shri_vec, 0 };
@@ -6576,13 +6677,11 @@ static void gen_shrnt64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
{
- TCGv_vec t = tcg_temp_new_vec_matching(d);
int halfbits = 4 << vece;
uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
tcg_gen_shli_vec(vece, n, n, halfbits - shr);
- tcg_gen_dupi_vec(vece, t, mask);
- tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_gen_bitsel_vec(vece, d, tcg_constant_vec_matching(d, vece, mask), d, n);
}
static const TCGOpcode shrnt_vec_list[] = { INDEX_op_shli_vec, 0 };
@@ -6625,14 +6724,12 @@ TRANS_FEAT(RSHRNT, aa64_sve2, do_shr_narrow, a, rshrnt_ops)
static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d,
TCGv_vec n, int64_t shr)
{
- TCGv_vec t = tcg_temp_new_vec_matching(d);
int halfbits = 4 << vece;
+ uint64_t max = MAKE_64BIT_MASK(0, halfbits);
tcg_gen_sari_vec(vece, n, n, shr);
- tcg_gen_dupi_vec(vece, t, 0);
- tcg_gen_smax_vec(vece, n, n, t);
- tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
- tcg_gen_umin_vec(vece, d, n, t);
+ tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, 0));
+ tcg_gen_umin_vec(vece, d, n, tcg_constant_vec_matching(d, vece, max));
}
static const TCGOpcode sqshrunb_vec_list[] = {
@@ -6657,16 +6754,15 @@ TRANS_FEAT(SQSHRUNB, aa64_sve2, do_shr_narrow, a, sqshrunb_ops)
static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d,
TCGv_vec n, int64_t shr)
{
- TCGv_vec t = tcg_temp_new_vec_matching(d);
int halfbits = 4 << vece;
+ uint64_t max = MAKE_64BIT_MASK(0, halfbits);
+ TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max);
tcg_gen_sari_vec(vece, n, n, shr);
- tcg_gen_dupi_vec(vece, t, 0);
- tcg_gen_smax_vec(vece, n, n, t);
- tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
- tcg_gen_umin_vec(vece, n, n, t);
+ tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, 0));
+ tcg_gen_umin_vec(vece, n, n, maxv);
tcg_gen_shli_vec(vece, n, n, halfbits);
- tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_gen_bitsel_vec(vece, d, maxv, d, n);
}
static const TCGOpcode sqshrunt_vec_list[] = {
@@ -6709,18 +6805,15 @@ TRANS_FEAT(SQRSHRUNT, aa64_sve2, do_shr_narrow, a, sqrshrunt_ops)
static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d,
TCGv_vec n, int64_t shr)
{
- TCGv_vec t = tcg_temp_new_vec_matching(d);
int halfbits = 4 << vece;
int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
int64_t min = -max - 1;
+ int64_t mask = MAKE_64BIT_MASK(0, halfbits);
tcg_gen_sari_vec(vece, n, n, shr);
- tcg_gen_dupi_vec(vece, t, min);
- tcg_gen_smax_vec(vece, n, n, t);
- tcg_gen_dupi_vec(vece, t, max);
- tcg_gen_smin_vec(vece, n, n, t);
- tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
- tcg_gen_and_vec(vece, d, n, t);
+ tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, min));
+ tcg_gen_smin_vec(vece, n, n, tcg_constant_vec_matching(d, vece, max));
+ tcg_gen_and_vec(vece, d, n, tcg_constant_vec_matching(d, vece, mask));
}
static const TCGOpcode sqshrnb_vec_list[] = {
@@ -6745,19 +6838,16 @@ TRANS_FEAT(SQSHRNB, aa64_sve2, do_shr_narrow, a, sqshrnb_ops)
static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d,
TCGv_vec n, int64_t shr)
{
- TCGv_vec t = tcg_temp_new_vec_matching(d);
int halfbits = 4 << vece;
int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
int64_t min = -max - 1;
+ int64_t mask = MAKE_64BIT_MASK(0, halfbits);
tcg_gen_sari_vec(vece, n, n, shr);
- tcg_gen_dupi_vec(vece, t, min);
- tcg_gen_smax_vec(vece, n, n, t);
- tcg_gen_dupi_vec(vece, t, max);
- tcg_gen_smin_vec(vece, n, n, t);
+ tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, min));
+ tcg_gen_smin_vec(vece, n, n, tcg_constant_vec_matching(d, vece, max));
tcg_gen_shli_vec(vece, n, n, halfbits);
- tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
- tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_gen_bitsel_vec(vece, d, tcg_constant_vec_matching(d, vece, mask), d, n);
}
static const TCGOpcode sqshrnt_vec_list[] = {
@@ -6800,12 +6890,11 @@ TRANS_FEAT(SQRSHRNT, aa64_sve2, do_shr_narrow, a, sqrshrnt_ops)
static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d,
TCGv_vec n, int64_t shr)
{
- TCGv_vec t = tcg_temp_new_vec_matching(d);
int halfbits = 4 << vece;
+ int64_t max = MAKE_64BIT_MASK(0, halfbits);
tcg_gen_shri_vec(vece, n, n, shr);
- tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
- tcg_gen_umin_vec(vece, d, n, t);
+ tcg_gen_umin_vec(vece, d, n, tcg_constant_vec_matching(d, vece, max));
}
static const TCGOpcode uqshrnb_vec_list[] = {
@@ -6830,14 +6919,14 @@ TRANS_FEAT(UQSHRNB, aa64_sve2, do_shr_narrow, a, uqshrnb_ops)
static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d,
TCGv_vec n, int64_t shr)
{
- TCGv_vec t = tcg_temp_new_vec_matching(d);
int halfbits = 4 << vece;
+ int64_t max = MAKE_64BIT_MASK(0, halfbits);
+ TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max);
tcg_gen_shri_vec(vece, n, n, shr);
- tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
- tcg_gen_umin_vec(vece, n, n, t);
+ tcg_gen_umin_vec(vece, n, n, maxv);
tcg_gen_shli_vec(vece, n, n, halfbits);
- tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_gen_bitsel_vec(vece, d, maxv, d, n);
}
static const TCGOpcode uqshrnt_vec_list[] = {
@@ -6925,10 +7014,10 @@ DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz)
TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz,
gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra,
- 0, FPST_FPCR)
+ 0, FPST_A64)
TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz,
gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra,
- 0, FPST_FPCR)
+ 0, FPST_A64)
static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = {
NULL, gen_helper_sve2_sqdmlal_zzzw_h,
@@ -7044,17 +7133,18 @@ TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz,
gen_gvec_rax1, a)
TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz,
- gen_helper_sve2_fcvtnt_sh, a, 0, FPST_FPCR)
+ gen_helper_sve2_fcvtnt_sh, a, 0, FPST_A64)
TRANS_FEAT(FCVTNT_ds, aa64_sve2, gen_gvec_fpst_arg_zpz,
- gen_helper_sve2_fcvtnt_ds, a, 0, FPST_FPCR)
+ gen_helper_sve2_fcvtnt_ds, a, 0, FPST_A64)
TRANS_FEAT(BFCVTNT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz,
- gen_helper_sve_bfcvtnt, a, 0, FPST_FPCR)
+ gen_helper_sve_bfcvtnt, a, 0,
+ s->fpcr_ah ? FPST_AH : FPST_A64)
TRANS_FEAT(FCVTLT_hs, aa64_sve2, gen_gvec_fpst_arg_zpz,
- gen_helper_sve2_fcvtlt_hs, a, 0, FPST_FPCR)
+ gen_helper_sve2_fcvtlt_hs, a, 0, FPST_A64)
TRANS_FEAT(FCVTLT_sd, aa64_sve2, gen_gvec_fpst_arg_zpz,
- gen_helper_sve2_fcvtlt_sd, a, 0, FPST_FPCR)
+ gen_helper_sve2_fcvtlt_sd, a, 0, FPST_A64)
TRANS_FEAT(FCVTX_ds, aa64_sve2, do_frint_mode, a,
FPROUNDING_ODD, gen_helper_sve_fcvt_ds)
@@ -7066,7 +7156,7 @@ static gen_helper_gvec_3_ptr * const flogb_fns[] = {
gen_helper_flogb_s, gen_helper_flogb_d
};
TRANS_FEAT(FLOGB, aa64_sve2, gen_gvec_fpst_arg_zpz, flogb_fns[a->esz],
- a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
+ a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel)
{
@@ -7099,18 +7189,19 @@ TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
gen_helper_gvec_ummla_b, a, 0)
-TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
+TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_env_arg_zzzz,
gen_helper_gvec_bfdot, a, 0)
-TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz,
+TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_env_arg_zzxz,
gen_helper_gvec_bfdot_idx, a)
-TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
+TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_env_arg_zzzz,
gen_helper_gvec_bfmmla, a, 0)
static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
{
return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal,
- a->rd, a->rn, a->rm, a->ra, sel, FPST_FPCR);
+ a->rd, a->rn, a->rm, a->ra, sel,
+ s->fpcr_ah ? FPST_AH : FPST_A64);
}
TRANS_FEAT(BFMLALB_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, false)
@@ -7120,7 +7211,8 @@ static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
{
return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal_idx,
a->rd, a->rn, a->rm, a->ra,
- (a->index << 1) | sel, FPST_FPCR);
+ (a->index << 1) | sel,
+ s->fpcr_ah ? FPST_AH : FPST_A64);
}
TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false)
diff --git a/target/arm/tcg/translate-vfp.c b/target/arm/tcg/translate-vfp.c
index cd5b848..8d9d1ab 100644
--- a/target/arm/tcg/translate-vfp.c
+++ b/target/arm/tcg/translate-vfp.c
@@ -460,9 +460,9 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
}
if (sz == 1) {
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
} else {
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
}
tcg_rmode = gen_set_rmode(rounding, fpst);
@@ -527,9 +527,9 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
}
if (sz == 1) {
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
} else {
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
}
tcg_shift = tcg_constant_i32(0);
@@ -1398,7 +1398,7 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
f0 = tcg_temp_new_i32();
f1 = tcg_temp_new_i32();
fd = tcg_temp_new_i32();
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
vfp_load_reg32(f0, vn);
vfp_load_reg32(f1, vm);
@@ -1433,7 +1433,7 @@ static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn,
/*
* Do a half-precision operation. Functionally this is
* the same as do_vfp_3op_sp(), except:
- * - it uses the FPST_FPCR_F16
+ * - it uses the FPST_A32_F16
* - it doesn't need the VFP vector handling (fp16 is a
* v8 feature, and in v8 VFP vectors don't exist)
* - it does the aa32_fp16_arith feature test
@@ -1456,7 +1456,7 @@ static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn,
f0 = tcg_temp_new_i32();
f1 = tcg_temp_new_i32();
fd = tcg_temp_new_i32();
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
vfp_load_reg16(f0, vn);
vfp_load_reg16(f1, vm);
@@ -1517,7 +1517,7 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
f0 = tcg_temp_new_i64();
f1 = tcg_temp_new_i64();
fd = tcg_temp_new_i64();
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
vfp_load_reg64(f0, vn);
vfp_load_reg64(f1, vm);
@@ -2122,7 +2122,7 @@ static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
/* VFNMA, VFNMS */
gen_vfp_negh(vd, vd);
}
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst);
vfp_store_reg32(vd, a->vd);
return true;
@@ -2181,7 +2181,7 @@ static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
/* VFNMA, VFNMS */
gen_vfp_negs(vd, vd);
}
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
gen_helper_vfp_muladds(vd, vn, vm, vd, fpst);
vfp_store_reg32(vd, a->vd);
return true;
@@ -2190,8 +2190,8 @@ static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
{
/*
- * VFNMA : fd = muladd(-fd, fn, fm)
- * VFNMS : fd = muladd(-fd, -fn, fm)
+ * VFNMA : fd = muladd(-fd, -fn, fm)
+ * VFNMS : fd = muladd(-fd, fn, fm)
* VFMA : fd = muladd( fd, fn, fm)
* VFMS : fd = muladd( fd, -fn, fm)
*
@@ -2246,7 +2246,7 @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
/* VFNMA, VFNMS */
gen_vfp_negd(vd, vd);
}
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst);
vfp_store_reg64(vd, a->vd);
return true;
@@ -2262,8 +2262,8 @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
#define MAKE_VFM_TRANS_FNS(PREC) \
MAKE_ONE_VFM_TRANS_FN(VFMA, PREC, false, false) \
MAKE_ONE_VFM_TRANS_FN(VFMS, PREC, true, false) \
- MAKE_ONE_VFM_TRANS_FN(VFNMA, PREC, false, true) \
- MAKE_ONE_VFM_TRANS_FN(VFNMS, PREC, true, true)
+ MAKE_ONE_VFM_TRANS_FN(VFNMS, PREC, false, true) \
+ MAKE_ONE_VFM_TRANS_FN(VFNMA, PREC, true, true)
MAKE_VFM_TRANS_FNS(hp)
MAKE_VFM_TRANS_FNS(sp)
@@ -2424,17 +2424,17 @@ DO_VFP_2OP(VNEG, dp, gen_vfp_negd, aa32_fpdp_v2)
static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm)
{
- gen_helper_vfp_sqrth(vd, vm, tcg_env);
+ gen_helper_vfp_sqrth(vd, vm, fpstatus_ptr(FPST_A32_F16));
}
static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
{
- gen_helper_vfp_sqrts(vd, vm, tcg_env);
+ gen_helper_vfp_sqrts(vd, vm, fpstatus_ptr(FPST_A32));
}
static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
{
- gen_helper_vfp_sqrtd(vd, vm, tcg_env);
+ gen_helper_vfp_sqrtd(vd, vm, fpstatus_ptr(FPST_A32));
}
DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp, aa32_fp16_arith)
@@ -2565,7 +2565,7 @@ static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
return true;
}
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
ahp_mode = get_ahp_flag();
tmp = tcg_temp_new_i32();
/* The T bit tells us if we want the low or high 16 bits of Vm */
@@ -2599,7 +2599,7 @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
return true;
}
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
ahp_mode = get_ahp_flag();
tmp = tcg_temp_new_i32();
/* The T bit tells us if we want the low or high 16 bits of Vm */
@@ -2623,7 +2623,7 @@ static bool trans_VCVT_b16_f32(DisasContext *s, arg_VCVT_b16_f32 *a)
return true;
}
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
tmp = tcg_temp_new_i32();
vfp_load_reg32(tmp, a->vm);
@@ -2646,7 +2646,7 @@ static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
return true;
}
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
ahp_mode = get_ahp_flag();
tmp = tcg_temp_new_i32();
@@ -2680,7 +2680,7 @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
return true;
}
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
ahp_mode = get_ahp_flag();
tmp = tcg_temp_new_i32();
vm = tcg_temp_new_i64();
@@ -2706,7 +2706,7 @@ static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a)
tmp = tcg_temp_new_i32();
vfp_load_reg16(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
gen_helper_rinth(tmp, tmp, fpst);
vfp_store_reg32(tmp, a->vd);
return true;
@@ -2727,7 +2727,7 @@ static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
tmp = tcg_temp_new_i32();
vfp_load_reg32(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
gen_helper_rints(tmp, tmp, fpst);
vfp_store_reg32(tmp, a->vd);
return true;
@@ -2757,7 +2757,7 @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
tmp = tcg_temp_new_i64();
vfp_load_reg64(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
gen_helper_rintd(tmp, tmp, fpst);
vfp_store_reg64(tmp, a->vd);
return true;
@@ -2779,7 +2779,7 @@ static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a)
tmp = tcg_temp_new_i32();
vfp_load_reg16(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst);
gen_helper_rinth(tmp, tmp, fpst);
gen_restore_rmode(tcg_rmode, fpst);
@@ -2803,7 +2803,7 @@ static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
tmp = tcg_temp_new_i32();
vfp_load_reg32(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst);
gen_helper_rints(tmp, tmp, fpst);
gen_restore_rmode(tcg_rmode, fpst);
@@ -2836,7 +2836,7 @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
tmp = tcg_temp_new_i64();
vfp_load_reg64(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst);
gen_helper_rintd(tmp, tmp, fpst);
gen_restore_rmode(tcg_rmode, fpst);
@@ -2859,7 +2859,7 @@ static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a)
tmp = tcg_temp_new_i32();
vfp_load_reg16(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
gen_helper_rinth_exact(tmp, tmp, fpst);
vfp_store_reg32(tmp, a->vd);
return true;
@@ -2880,7 +2880,7 @@ static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
tmp = tcg_temp_new_i32();
vfp_load_reg32(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
gen_helper_rints_exact(tmp, tmp, fpst);
vfp_store_reg32(tmp, a->vd);
return true;
@@ -2910,7 +2910,7 @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
tmp = tcg_temp_new_i64();
vfp_load_reg64(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
gen_helper_rintd_exact(tmp, tmp, fpst);
vfp_store_reg64(tmp, a->vd);
return true;
@@ -2937,7 +2937,7 @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
vm = tcg_temp_new_i32();
vd = tcg_temp_new_i64();
vfp_load_reg32(vm, a->vm);
- gen_helper_vfp_fcvtds(vd, vm, tcg_env);
+ gen_helper_vfp_fcvtds(vd, vm, fpstatus_ptr(FPST_A32));
vfp_store_reg64(vd, a->vd);
return true;
}
@@ -2963,7 +2963,7 @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
vd = tcg_temp_new_i32();
vm = tcg_temp_new_i64();
vfp_load_reg64(vm, a->vm);
- gen_helper_vfp_fcvtsd(vd, vm, tcg_env);
+ gen_helper_vfp_fcvtsd(vd, vm, fpstatus_ptr(FPST_A32));
vfp_store_reg32(vd, a->vd);
return true;
}
@@ -2983,7 +2983,7 @@ static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a)
vm = tcg_temp_new_i32();
vfp_load_reg32(vm, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
if (a->s) {
/* i32 -> f16 */
gen_helper_vfp_sitoh(vm, vm, fpst);
@@ -3010,7 +3010,7 @@ static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
vm = tcg_temp_new_i32();
vfp_load_reg32(vm, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
if (a->s) {
/* i32 -> f32 */
gen_helper_vfp_sitos(vm, vm, fpst);
@@ -3044,7 +3044,7 @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
vm = tcg_temp_new_i32();
vd = tcg_temp_new_i64();
vfp_load_reg32(vm, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
if (a->s) {
/* i32 -> f64 */
gen_helper_vfp_sitod(vd, vm, fpst);
@@ -3105,7 +3105,7 @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
vd = tcg_temp_new_i32();
vfp_load_reg32(vd, a->vd);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
shift = tcg_constant_i32(frac_bits);
/* Switch on op:U:sx bits */
@@ -3161,7 +3161,7 @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
vd = tcg_temp_new_i32();
vfp_load_reg32(vd, a->vd);
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
shift = tcg_constant_i32(frac_bits);
/* Switch on op:U:sx bits */
@@ -3223,7 +3223,7 @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
vd = tcg_temp_new_i64();
vfp_load_reg64(vd, a->vd);
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
shift = tcg_constant_i32(frac_bits);
/* Switch on op:U:sx bits */
@@ -3273,7 +3273,7 @@ static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a)
return true;
}
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
vm = tcg_temp_new_i32();
vfp_load_reg16(vm, a->vm);
@@ -3307,7 +3307,7 @@ static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
return true;
}
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
vm = tcg_temp_new_i32();
vfp_load_reg32(vm, a->vm);
@@ -3347,7 +3347,7 @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
return true;
}
- fpst = fpstatus_ptr(FPST_FPCR);
+ fpst = fpstatus_ptr(FPST_A32);
vm = tcg_temp_new_i64();
vd = tcg_temp_new_i32();
vfp_load_reg64(vm, a->vm);
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
index c5bc691..9962f43 100644
--- a/target/arm/tcg/translate.c
+++ b/target/arm/tcg/translate.c
@@ -27,6 +27,7 @@
#include "semihosting/semihost.h"
#include "cpregs.h"
#include "exec/helper-proto.h"
+#include "exec/target_page.h"
#define HELPER_H "helper.h"
#include "exec/helper-info.c.inc"
@@ -228,6 +229,9 @@ static inline int get_a32_user_mem_index(DisasContext *s)
*/
switch (s->mmu_idx) {
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E30_0:
+ case ARMMMUIdx_E30_3_PAN:
+ return arm_to_core_mmu_idx(ARMMMUIdx_E30_0);
case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1:
@@ -490,20 +494,9 @@ static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
{
TCGv_i32 tmp = tcg_temp_new_i32();
- if (TCG_TARGET_HAS_add2_i32) {
- tcg_gen_movi_i32(tmp, 0);
- tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
- tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
- } else {
- TCGv_i64 q0 = tcg_temp_new_i64();
- TCGv_i64 q1 = tcg_temp_new_i64();
- tcg_gen_extu_i32_i64(q0, t0);
- tcg_gen_extu_i32_i64(q1, t1);
- tcg_gen_add_i64(q0, q0, q1);
- tcg_gen_extu_i32_i64(q1, cpu_CF);
- tcg_gen_add_i64(q0, q0, q1);
- tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
- }
+
+ tcg_gen_addcio_i32(cpu_NF, cpu_CF, t0, t1, cpu_CF);
+
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
tcg_gen_xor_i32(tmp, t0, t1);
@@ -3507,7 +3500,7 @@ static int t32_expandimm_rot(DisasContext *s, int x)
/* Return the unrotated immediate from T32ExpandImm. */
static int t32_expandimm_imm(DisasContext *s, int x)
{
- int imm = extract32(x, 0, 8);
+ uint32_t imm = extract32(x, 0, 8);
switch (extract32(x, 8, 4)) {
case 0: /* XY */
@@ -4938,7 +4931,7 @@ static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a)
}
static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
- TCGv_i32 addr, int address_offset)
+ TCGv_i32 addr)
{
if (!a->p) {
TCGv_i32 ofs = load_reg(s, a->rm);
@@ -4951,7 +4944,6 @@ static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
} else if (!a->w) {
return;
}
- tcg_gen_addi_i32(addr, addr, address_offset);
store_reg(s, a->rn, addr);
}
@@ -4971,7 +4963,7 @@ static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
* Perform base writeback before the loaded value to
* ensure correct behavior with overlapping index registers.
*/
- op_addr_rr_post(s, a, addr, 0);
+ op_addr_rr_post(s, a, addr);
store_reg_from_load(s, a->rt, tmp);
return true;
}
@@ -4996,14 +4988,53 @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
disas_set_da_iss(s, mop, issinfo);
- op_addr_rr_post(s, a, addr, 0);
+ op_addr_rr_post(s, a, addr);
return true;
}
-static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
+static void do_ldrd_load(DisasContext *s, TCGv_i32 addr, int rt, int rt2)
{
+ /*
+ * LDRD is required to be an atomic 64-bit access if the
+ * address is 8-aligned, two atomic 32-bit accesses if
+ * it's only 4-aligned, and to give an alignment fault
+ * if it's not 4-aligned. This is MO_ALIGN_4 | MO_ATOM_SUBALIGN.
+ * Rt is always the word from the lower address, and Rt2 the
+ * data from the higher address, regardless of endianness.
+ * So (like gen_load_exclusive) we avoid gen_aa32_ld_i64()
+ * so we don't get its SCTLR_B check, and instead do a 64-bit access
+ * using MO_BE if appropriate and then split the two halves.
+ *
+ * For M-profile, and for A-profile before LPAE, the 64-bit
+ * atomicity is not required. We could model that using
+ * the looser MO_ATOM_IFALIGN_PAIR, but providing a higher
+ * level of atomicity than required is harmless (we would not
+ * currently generate better code for IFALIGN_PAIR here).
+ *
+ * This also gives us the correct behaviour of not updating
+ * rt if the load of rt2 faults; this is required for cases
+ * like "ldrd r2, r3, [r2]" where rt is also the base register.
+ */
int mem_idx = get_mem_index(s);
- TCGv_i32 addr, tmp;
+ MemOp opc = MO_64 | MO_ALIGN_4 | MO_ATOM_SUBALIGN | s->be_data;
+ TCGv taddr = gen_aa32_addr(s, addr, opc);
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ TCGv_i32 tmp2 = tcg_temp_new_i32();
+
+ tcg_gen_qemu_ld_i64(t64, taddr, mem_idx, opc);
+ if (s->be_data == MO_BE) {
+ tcg_gen_extr_i64_i32(tmp2, tmp, t64);
+ } else {
+ tcg_gen_extr_i64_i32(tmp, tmp2, t64);
+ }
+ store_reg(s, rt, tmp);
+ store_reg(s, rt2, tmp2);
+}
+
+static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
+{
+ TCGv_i32 addr;
if (!ENABLE_ARCH_5TE) {
return false;
@@ -5014,25 +5045,49 @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
}
addr = op_addr_rr_pre(s, a);
- tmp = tcg_temp_new_i32();
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
- store_reg(s, a->rt, tmp);
-
- tcg_gen_addi_i32(addr, addr, 4);
-
- tmp = tcg_temp_new_i32();
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
- store_reg(s, a->rt + 1, tmp);
+ do_ldrd_load(s, addr, a->rt, a->rt + 1);
/* LDRD w/ base writeback is undefined if the registers overlap. */
- op_addr_rr_post(s, a, addr, -4);
+ op_addr_rr_post(s, a, addr);
return true;
}
-static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
+static void do_strd_store(DisasContext *s, TCGv_i32 addr, int rt, int rt2)
{
+ /*
+ * STRD is required to be an atomic 64-bit access if the
+ * address is 8-aligned, two atomic 32-bit accesses if
+ * it's only 4-aligned, and to give an alignment fault
+ * if it's not 4-aligned.
+ * Rt is always the word from the lower address, and Rt2 the
+ * data from the higher address, regardless of endianness.
+ * So (like gen_store_exclusive) we avoid gen_aa32_ld_i64()
+ * so we don't get its SCTLR_B check, and instead do a 64-bit access
+ * using MO_BE if appropriate, using a value constructed
+ * by putting the two halves together in the right order.
+ *
+ * As with LDRD, the 64-bit atomicity is not required for
+ * M-profile, or for A-profile before LPAE, and we provide
+ * the higher guarantee always for simplicity.
+ */
int mem_idx = get_mem_index(s);
- TCGv_i32 addr, tmp;
+ MemOp opc = MO_64 | MO_ALIGN_4 | MO_ATOM_SUBALIGN | s->be_data;
+ TCGv taddr = gen_aa32_addr(s, addr, opc);
+ TCGv_i32 t1 = load_reg(s, rt);
+ TCGv_i32 t2 = load_reg(s, rt2);
+ TCGv_i64 t64 = tcg_temp_new_i64();
+
+ if (s->be_data == MO_BE) {
+ tcg_gen_concat_i32_i64(t64, t2, t1);
+ } else {
+ tcg_gen_concat_i32_i64(t64, t1, t2);
+ }
+ tcg_gen_qemu_st_i64(t64, taddr, mem_idx, opc);
+}
+
+static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
+{
+ TCGv_i32 addr;
if (!ENABLE_ARCH_5TE) {
return false;
@@ -5043,15 +5098,9 @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
}
addr = op_addr_rr_pre(s, a);
- tmp = load_reg(s, a->rt);
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
+ do_strd_store(s, addr, a->rt, a->rt + 1);
- tcg_gen_addi_i32(addr, addr, 4);
-
- tmp = load_reg(s, a->rt + 1);
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
-
- op_addr_rr_post(s, a, addr, -4);
+ op_addr_rr_post(s, a, addr);
return true;
}
@@ -5087,13 +5136,14 @@ static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a)
}
static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a,
- TCGv_i32 addr, int address_offset)
+ TCGv_i32 addr)
{
+ int address_offset = 0;
if (!a->p) {
if (a->u) {
- address_offset += a->imm;
+ address_offset = a->imm;
} else {
- address_offset -= a->imm;
+ address_offset = -a->imm;
}
} else if (!a->w) {
return;
@@ -5118,7 +5168,7 @@ static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
* Perform base writeback before the loaded value to
* ensure correct behavior with overlapping index registers.
*/
- op_addr_ri_post(s, a, addr, 0);
+ op_addr_ri_post(s, a, addr);
store_reg_from_load(s, a->rt, tmp);
return true;
}
@@ -5143,29 +5193,20 @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
disas_set_da_iss(s, mop, issinfo);
- op_addr_ri_post(s, a, addr, 0);
+ op_addr_ri_post(s, a, addr);
return true;
}
static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
{
- int mem_idx = get_mem_index(s);
- TCGv_i32 addr, tmp;
+ TCGv_i32 addr;
addr = op_addr_ri_pre(s, a);
- tmp = tcg_temp_new_i32();
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
- store_reg(s, a->rt, tmp);
-
- tcg_gen_addi_i32(addr, addr, 4);
-
- tmp = tcg_temp_new_i32();
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
- store_reg(s, rt2, tmp);
+ do_ldrd_load(s, addr, a->rt, rt2);
/* LDRD w/ base writeback is undefined if the registers overlap. */
- op_addr_ri_post(s, a, addr, -4);
+ op_addr_ri_post(s, a, addr);
return true;
}
@@ -5188,20 +5229,13 @@ static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
{
- int mem_idx = get_mem_index(s);
- TCGv_i32 addr, tmp;
+ TCGv_i32 addr;
addr = op_addr_ri_pre(s, a);
- tmp = load_reg(s, a->rt);
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
-
- tcg_gen_addi_i32(addr, addr, 4);
-
- tmp = load_reg(s, rt2);
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
+ do_strd_store(s, addr, a->rt, rt2);
- op_addr_ri_post(s, a, addr, -4);
+ op_addr_ri_post(s, a, addr);
return true;
}
@@ -7726,7 +7760,8 @@ static bool arm_check_ss_active(DisasContext *dc)
static void arm_post_translate_insn(DisasContext *dc)
{
- if (dc->condjmp && dc->base.is_jmp == DISAS_NEXT) {
+ if (dc->condjmp &&
+ (dc->base.is_jmp == DISAS_NEXT || dc->base.is_jmp == DISAS_TOO_MANY)) {
if (dc->pc_save != dc->condlabel.pc_save) {
gen_update_pc(dc, dc->condlabel.pc_save - dc->pc_save);
}
@@ -7756,7 +7791,7 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
* be possible after an indirect branch, at the start of the TB.
*/
assert(dc->base.num_insns == 1);
- gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc));
+ gen_helper_exception_pc_alignment(tcg_env, tcg_constant_vaddr(pc));
dc->base.is_jmp = DISAS_NORETURN;
dc->base.pc_next = QEMU_ALIGN_UP(pc, 4);
return;
@@ -8090,9 +8125,8 @@ static const TranslatorOps thumb_translator_ops = {
.tb_stop = arm_tr_tb_stop,
};
-/* generate intermediate code for basic block 'tb'. */
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext dc = { };
const TranslatorOps *ops = &arm_translator_ops;
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
index a8672c8..1bfdb0f 100644
--- a/target/arm/tcg/translate.h
+++ b/target/arm/tcg/translate.h
@@ -4,8 +4,8 @@
#include "cpu.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
-#include "exec/exec-all.h"
#include "exec/translator.h"
+#include "exec/translation-block.h"
#include "exec/helper-gen.h"
#include "internals.h"
#include "cpu-features.h"
@@ -91,15 +91,19 @@ typedef struct DisasContext {
bool aarch64;
bool thumb;
bool lse2;
- /* Because unallocated encodings generate different exception syndrome
+ /*
+ * Because unallocated encodings generate different exception syndrome
* information from traps due to FP being disabled, we can't do a single
* "is fp access disabled" check at a high level in the decode tree.
* To help in catching bugs where the access check was forgotten in some
* code path, we set this flag when the access check is done, and assert
* that it is set at the point where we actually touch the FP regs.
+ * 0: not checked,
+ * 1: checked, access ok
+ * -1: checked, access denied
*/
- bool fp_access_checked;
- bool sve_access_checked;
+ int8_t fp_access_checked;
+ int8_t sve_access_checked;
/* ARMv8 single-step state (this is distinct from the QEMU gdbstub
* single-step support).
*/
@@ -154,6 +158,10 @@ typedef struct DisasContext {
bool nv2_mem_e20;
/* True if NV2 enabled and NV2 RAM accesses are big-endian */
bool nv2_mem_be;
+ /* True if FPCR.AH is 1 (alternate floating point handling) */
+ bool fpcr_ah;
+ /* True if FPCR.NEP is 1 (FEAT_AFP scalar upper-element result handling) */
+ bool fpcr_nep;
/*
* >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
* < 0, set by the current instruction.
@@ -163,8 +171,6 @@ typedef struct DisasContext {
uint8_t dcz_blocksize;
/* A copy of cpu->gm_blocksize. */
uint8_t gm_blocksize;
- /* True if this page is guarded. */
- bool guarded_page;
/* True if the current insn_start has been updated. */
bool insn_start_updated;
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
@@ -471,6 +477,13 @@ void gen_neon_sqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
void gen_neon_uqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_neon_sqshli(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ int64_t c, uint32_t opr_sz, uint32_t max_sz);
+void gen_neon_uqshli(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ int64_t c, uint32_t opr_sz, uint32_t max_sz);
+void gen_neon_sqshlui(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ int64_t c, uint32_t opr_sz, uint32_t max_sz);
+
void gen_gvec_shadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
void gen_gvec_uhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
@@ -514,6 +527,11 @@ void gen_sqsub_d(TCGv_i64 d, TCGv_i64 q, TCGv_i64 a, TCGv_i64 b);
void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_sshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_ushr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+
void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
void gen_gvec_usra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
@@ -568,6 +586,41 @@ void gen_gvec_umaxp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
void gen_gvec_uminp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_cls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_clz(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_cnt(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_rbit(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_rev16(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_rev32(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_rev64(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+
+void gen_gvec_saddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_sadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_uaddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_uadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+
+/* These exclusively manipulate the sign bit. */
+void gen_gvec_fabs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz);
+void gen_gvec_fneg(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz);
+
+void gen_gvec_urecpe(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_ursqrte(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+
/*
* Forward to the isar_feature_* tests given a DisasContext pointer.
*/
@@ -593,13 +646,13 @@ typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
-typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
typedef void NeonGenTwoOpWidenFn(TCGv_i64, TCGv_i32, TCGv_i32);
typedef void NeonGenOneSingleOpFn(TCGv_i32, TCGv_i32, TCGv_ptr);
typedef void NeonGenTwoSingleOpFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
typedef void NeonGenTwoDoubleOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
typedef void NeonGenOne64OpFn(TCGv_i64, TCGv_i64);
+typedef void NeonGenOne64OpEnvFn(TCGv_i64, TCGv_env, TCGv_i64);
typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
@@ -620,54 +673,18 @@ static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb)
return (CPUARMTBFlags){ tb->flags, tb->cs_base };
}
-/*
- * Enum for argument to fpstatus_ptr().
- */
-typedef enum ARMFPStatusFlavour {
- FPST_FPCR,
- FPST_FPCR_F16,
- FPST_STD,
- FPST_STD_F16,
-} ARMFPStatusFlavour;
-
/**
* fpstatus_ptr: return TCGv_ptr to the specified fp_status field
*
* We have multiple softfloat float_status fields in the Arm CPU state struct
* (see the comment in cpu.h for details). Return a TCGv_ptr which has
* been set up to point to the requested field in the CPU state struct.
- * The options are:
- *
- * FPST_FPCR
- * for non-FP16 operations controlled by the FPCR
- * FPST_FPCR_F16
- * for operations controlled by the FPCR where FPCR.FZ16 is to be used
- * FPST_STD
- * for A32/T32 Neon operations using the "standard FPSCR value"
- * FPST_STD_F16
- * as FPST_STD, but where FPCR.FZ16 is to be used
*/
static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour)
{
TCGv_ptr statusptr = tcg_temp_new_ptr();
- int offset;
-
- switch (flavour) {
- case FPST_FPCR:
- offset = offsetof(CPUARMState, vfp.fp_status);
- break;
- case FPST_FPCR_F16:
- offset = offsetof(CPUARMState, vfp.fp_status_f16);
- break;
- case FPST_STD:
- offset = offsetof(CPUARMState, vfp.standard_fp_status);
- break;
- case FPST_STD_F16:
- offset = offsetof(CPUARMState, vfp.standard_fp_status_f16);
- break;
- default:
- g_assert_not_reached();
- }
+ int offset = offsetof(CPUARMState, vfp.fp_status[flavour]);
+
tcg_gen_addi_ptr(statusptr, tcg_env, offset);
return statusptr;
}
diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c
index 98604d1..986eaf8 100644
--- a/target/arm/tcg/vec_helper.c
+++ b/target/arm/tcg/vec_helper.c
@@ -836,6 +836,13 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
{ \
intptr_t i = 0, opr_sz = simd_oprsz(desc); \
intptr_t opr_sz_n = opr_sz / sizeof(TYPED); \
+ /* \
+ * Special case: opr_sz == 8 from AA64/AA32 advsimd means the \
+ * first iteration might not be a full 16 byte segment. But \
+ * for vector lengths beyond that this must be SVE and we know \
+ * opr_sz is a multiple of 16, so we need not clamp segend \
+ * to opr_sz_n when we advance it at the end of the loop. \
+ */ \
intptr_t segend = MIN(16 / sizeof(TYPED), opr_sz_n); \
intptr_t index = simd_data(desc); \
TYPED *d = vd, *a = va; \
@@ -853,7 +860,7 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
n[i * 4 + 2] * m2 + \
n[i * 4 + 3] * m3); \
} while (++i < segend); \
- segend = i + 4; \
+ segend = i + (16 / sizeof(TYPED)); \
} while (i < opr_sz_n); \
clear_tail(d, opr_sz, simd_maxsz(desc)); \
}
@@ -866,26 +873,27 @@ DO_DOT_IDX(gvec_sdot_idx_h, int64_t, int16_t, int16_t, H8)
DO_DOT_IDX(gvec_udot_idx_h, uint64_t, uint16_t, uint16_t, H8)
void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm,
- void *vfpst, uint32_t desc)
+ float_status *fpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
float16 *d = vd;
float16 *n = vn;
float16 *m = vm;
- float_status *fpst = vfpst;
- uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1);
- uint32_t neg_imag = neg_real ^ 1;
+ bool rot = extract32(desc, SIMD_DATA_SHIFT, 1);
+ bool fpcr_ah = extract64(desc, SIMD_DATA_SHIFT + 1, 1);
uintptr_t i;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 15;
- neg_imag <<= 15;
-
for (i = 0; i < opr_sz / 2; i += 2) {
float16 e0 = n[H2(i)];
- float16 e1 = m[H2(i + 1)] ^ neg_imag;
+ float16 e1 = m[H2(i + 1)];
float16 e2 = n[H2(i + 1)];
- float16 e3 = m[H2(i)] ^ neg_real;
+ float16 e3 = m[H2(i)];
+
+ if (rot) {
+ e3 = float16_maybe_ah_chs(e3, fpcr_ah);
+ } else {
+ e1 = float16_maybe_ah_chs(e1, fpcr_ah);
+ }
d[H2(i)] = float16_add(e0, e1, fpst);
d[H2(i + 1)] = float16_add(e2, e3, fpst);
@@ -894,26 +902,27 @@ void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm,
}
void HELPER(gvec_fcadds)(void *vd, void *vn, void *vm,
- void *vfpst, uint32_t desc)
+ float_status *fpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
float32 *d = vd;
float32 *n = vn;
float32 *m = vm;
- float_status *fpst = vfpst;
- uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1);
- uint32_t neg_imag = neg_real ^ 1;
+ bool rot = extract32(desc, SIMD_DATA_SHIFT, 1);
+ bool fpcr_ah = extract64(desc, SIMD_DATA_SHIFT + 1, 1);
uintptr_t i;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 31;
- neg_imag <<= 31;
-
for (i = 0; i < opr_sz / 4; i += 2) {
float32 e0 = n[H4(i)];
- float32 e1 = m[H4(i + 1)] ^ neg_imag;
+ float32 e1 = m[H4(i + 1)];
float32 e2 = n[H4(i + 1)];
- float32 e3 = m[H4(i)] ^ neg_real;
+ float32 e3 = m[H4(i)];
+
+ if (rot) {
+ e3 = float32_maybe_ah_chs(e3, fpcr_ah);
+ } else {
+ e1 = float32_maybe_ah_chs(e1, fpcr_ah);
+ }
d[H4(i)] = float32_add(e0, e1, fpst);
d[H4(i + 1)] = float32_add(e2, e3, fpst);
@@ -922,26 +931,27 @@ void HELPER(gvec_fcadds)(void *vd, void *vn, void *vm,
}
void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm,
- void *vfpst, uint32_t desc)
+ float_status *fpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
float64 *d = vd;
float64 *n = vn;
float64 *m = vm;
- float_status *fpst = vfpst;
- uint64_t neg_real = extract64(desc, SIMD_DATA_SHIFT, 1);
- uint64_t neg_imag = neg_real ^ 1;
+ bool rot = extract32(desc, SIMD_DATA_SHIFT, 1);
+ bool fpcr_ah = extract64(desc, SIMD_DATA_SHIFT + 1, 1);
uintptr_t i;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 63;
- neg_imag <<= 63;
-
for (i = 0; i < opr_sz / 8; i += 2) {
float64 e0 = n[i];
- float64 e1 = m[i + 1] ^ neg_imag;
+ float64 e1 = m[i + 1];
float64 e2 = n[i + 1];
- float64 e3 = m[i] ^ neg_real;
+ float64 e3 = m[i];
+
+ if (rot) {
+ e3 = float64_maybe_ah_chs(e3, fpcr_ah);
+ } else {
+ e1 = float64_maybe_ah_chs(e1, fpcr_ah);
+ }
d[i] = float64_add(e0, e1, fpst);
d[i + 1] = float64_add(e2, e3, fpst);
@@ -950,152 +960,167 @@ void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm,
}
void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, void *va,
- void *vfpst, uint32_t desc)
+ float_status *fpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
float16 *d = vd, *n = vn, *m = vm, *a = va;
- float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
- uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
- uint32_t neg_real = flip ^ neg_imag;
+ uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t negf_real = flip ^ negf_imag;
+ float16 negx_imag, negx_real;
uintptr_t i;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 15;
- neg_imag <<= 15;
+ /* With AH=0, use negx; with AH=1 use negf. */
+ negx_real = (negf_real & ~fpcr_ah) << 15;
+ negx_imag = (negf_imag & ~fpcr_ah) << 15;
+ negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0);
+ negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0);
for (i = 0; i < opr_sz / 2; i += 2) {
float16 e2 = n[H2(i + flip)];
- float16 e1 = m[H2(i + flip)] ^ neg_real;
+ float16 e1 = m[H2(i + flip)] ^ negx_real;
float16 e4 = e2;
- float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag;
+ float16 e3 = m[H2(i + 1 - flip)] ^ negx_imag;
- d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], 0, fpst);
- d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], 0, fpst);
+ d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], negf_real, fpst);
+ d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], negf_imag, fpst);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, void *va,
- void *vfpst, uint32_t desc)
+ float_status *fpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
float16 *d = vd, *n = vn, *m = vm, *a = va;
- float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
- uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
- uint32_t neg_real = flip ^ neg_imag;
+ uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 4, 1);
+ uint32_t negf_real = flip ^ negf_imag;
intptr_t elements = opr_sz / sizeof(float16);
intptr_t eltspersegment = MIN(16 / sizeof(float16), elements);
+ float16 negx_imag, negx_real;
intptr_t i, j;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 15;
- neg_imag <<= 15;
+ /* With AH=0, use negx; with AH=1 use negf. */
+ negx_real = (negf_real & ~fpcr_ah) << 15;
+ negx_imag = (negf_imag & ~fpcr_ah) << 15;
+ negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0);
+ negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0);
for (i = 0; i < elements; i += eltspersegment) {
float16 mr = m[H2(i + 2 * index + 0)];
float16 mi = m[H2(i + 2 * index + 1)];
- float16 e1 = neg_real ^ (flip ? mi : mr);
- float16 e3 = neg_imag ^ (flip ? mr : mi);
+ float16 e1 = negx_real ^ (flip ? mi : mr);
+ float16 e3 = negx_imag ^ (flip ? mr : mi);
for (j = i; j < i + eltspersegment; j += 2) {
float16 e2 = n[H2(j + flip)];
float16 e4 = e2;
- d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], 0, fpst);
- d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], 0, fpst);
+ d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], negf_real, fpst);
+ d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], negf_imag, fpst);
}
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, void *va,
- void *vfpst, uint32_t desc)
+ float_status *fpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
float32 *d = vd, *n = vn, *m = vm, *a = va;
- float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
- uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
- uint32_t neg_real = flip ^ neg_imag;
+ uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t negf_real = flip ^ negf_imag;
+ float32 negx_imag, negx_real;
uintptr_t i;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 31;
- neg_imag <<= 31;
+ /* With AH=0, use negx; with AH=1 use negf. */
+ negx_real = (negf_real & ~fpcr_ah) << 31;
+ negx_imag = (negf_imag & ~fpcr_ah) << 31;
+ negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0);
+ negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0);
for (i = 0; i < opr_sz / 4; i += 2) {
float32 e2 = n[H4(i + flip)];
- float32 e1 = m[H4(i + flip)] ^ neg_real;
+ float32 e1 = m[H4(i + flip)] ^ negx_real;
float32 e4 = e2;
- float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag;
+ float32 e3 = m[H4(i + 1 - flip)] ^ negx_imag;
- d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], 0, fpst);
- d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], 0, fpst);
+ d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], negf_real, fpst);
+ d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], negf_imag, fpst);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, void *va,
- void *vfpst, uint32_t desc)
+ float_status *fpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
float32 *d = vd, *n = vn, *m = vm, *a = va;
- float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
- uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
- uint32_t neg_real = flip ^ neg_imag;
+ uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 4, 1);
+ uint32_t negf_real = flip ^ negf_imag;
intptr_t elements = opr_sz / sizeof(float32);
intptr_t eltspersegment = MIN(16 / sizeof(float32), elements);
+ float32 negx_imag, negx_real;
intptr_t i, j;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 31;
- neg_imag <<= 31;
+ /* With AH=0, use negx; with AH=1 use negf. */
+ negx_real = (negf_real & ~fpcr_ah) << 31;
+ negx_imag = (negf_imag & ~fpcr_ah) << 31;
+ negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0);
+ negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0);
for (i = 0; i < elements; i += eltspersegment) {
float32 mr = m[H4(i + 2 * index + 0)];
float32 mi = m[H4(i + 2 * index + 1)];
- float32 e1 = neg_real ^ (flip ? mi : mr);
- float32 e3 = neg_imag ^ (flip ? mr : mi);
+ float32 e1 = negx_real ^ (flip ? mi : mr);
+ float32 e3 = negx_imag ^ (flip ? mr : mi);
for (j = i; j < i + eltspersegment; j += 2) {
float32 e2 = n[H4(j + flip)];
float32 e4 = e2;
- d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], 0, fpst);
- d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], 0, fpst);
+ d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], negf_real, fpst);
+ d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], negf_imag, fpst);
}
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, void *va,
- void *vfpst, uint32_t desc)
+ float_status *fpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
float64 *d = vd, *n = vn, *m = vm, *a = va;
- float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
- uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
- uint64_t neg_real = flip ^ neg_imag;
+ uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t negf_real = flip ^ negf_imag;
+ float64 negx_real, negx_imag;
uintptr_t i;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 63;
- neg_imag <<= 63;
+ /* With AH=0, use negx; with AH=1 use negf. */
+ negx_real = (uint64_t)(negf_real & ~fpcr_ah) << 63;
+ negx_imag = (uint64_t)(negf_imag & ~fpcr_ah) << 63;
+ negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0);
+ negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0);
for (i = 0; i < opr_sz / 8; i += 2) {
float64 e2 = n[i + flip];
- float64 e1 = m[i + flip] ^ neg_real;
+ float64 e1 = m[i + flip] ^ negx_real;
float64 e4 = e2;
- float64 e3 = m[i + 1 - flip] ^ neg_imag;
+ float64 e3 = m[i + 1 - flip] ^ negx_imag;
- d[i] = float64_muladd(e2, e1, a[i], 0, fpst);
- d[i + 1] = float64_muladd(e4, e3, a[i + 1], 0, fpst);
+ d[i] = float64_muladd(e2, e1, a[i], negf_real, fpst);
+ d[i + 1] = float64_muladd(e4, e3, a[i + 1], negf_imag, fpst);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
@@ -1180,9 +1205,8 @@ static uint64_t float64_acgt(float64 op1, float64 op2, float_status *stat)
return -float64_lt(float64_abs(op2), float64_abs(op1), stat);
}
-static int16_t vfp_tosszh(float16 x, void *fpstp)
+static int16_t vfp_tosszh(float16 x, float_status *fpst)
{
- float_status *fpst = fpstp;
if (float16_is_any_nan(x)) {
float_raise(float_flag_invalid, fpst);
return 0;
@@ -1190,9 +1214,8 @@ static int16_t vfp_tosszh(float16 x, void *fpstp)
return float16_to_int16_round_to_zero(x, fpst);
}
-static uint16_t vfp_touszh(float16 x, void *fpstp)
+static uint16_t vfp_touszh(float16 x, float_status *fpst)
{
- float_status *fpst = fpstp;
if (float16_is_any_nan(x)) {
float_raise(float_flag_invalid, fpst);
return 0;
@@ -1201,7 +1224,7 @@ static uint16_t vfp_touszh(float16 x, void *fpstp)
}
#define DO_2OP(NAME, FUNC, TYPE) \
-void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
+void HELPER(NAME)(void *vd, void *vn, float_status *stat, uint32_t desc) \
{ \
intptr_t i, oprsz = simd_oprsz(desc); \
TYPE *d = vd, *n = vn; \
@@ -1213,10 +1236,12 @@ void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
DO_2OP(gvec_frecpe_h, helper_recpe_f16, float16)
DO_2OP(gvec_frecpe_s, helper_recpe_f32, float32)
+DO_2OP(gvec_frecpe_rpres_s, helper_recpe_rpres_f32, float32)
DO_2OP(gvec_frecpe_d, helper_recpe_f64, float64)
DO_2OP(gvec_frsqrte_h, helper_rsqrte_f16, float16)
DO_2OP(gvec_frsqrte_s, helper_rsqrte_f32, float32)
+DO_2OP(gvec_frsqrte_rpres_s, helper_rsqrte_rpres_f32, float32)
DO_2OP(gvec_frsqrte_d, helper_rsqrte_f64, float64)
DO_2OP(gvec_vrintx_h, float16_round_to_int, float16)
@@ -1246,8 +1271,10 @@ DO_2OP(gvec_touszh, vfp_touszh, float16)
#define DO_2OP_CMP0(FN, CMPOP, DIRN) \
WRAP_CMP0_##DIRN(FN, CMPOP, float16) \
WRAP_CMP0_##DIRN(FN, CMPOP, float32) \
+ WRAP_CMP0_##DIRN(FN, CMPOP, float64) \
DO_2OP(gvec_f##FN##0_h, float16_##FN##0, float16) \
- DO_2OP(gvec_f##FN##0_s, float32_##FN##0, float32)
+ DO_2OP(gvec_f##FN##0_s, float32_##FN##0, float32) \
+ DO_2OP(gvec_f##FN##0_d, float64_##FN##0, float64)
DO_2OP_CMP0(cgt, cgt, FWD)
DO_2OP_CMP0(cge, cge, FWD)
@@ -1303,6 +1330,25 @@ static float64 float64_abd(float64 op1, float64 op2, float_status *stat)
return float64_abs(float64_sub(op1, op2, stat));
}
+/* ABD when FPCR.AH = 1: avoid flipping sign bit of a NaN result */
+static float16 float16_ah_abd(float16 op1, float16 op2, float_status *stat)
+{
+ float16 r = float16_sub(op1, op2, stat);
+ return float16_is_any_nan(r) ? r : float16_abs(r);
+}
+
+static float32 float32_ah_abd(float32 op1, float32 op2, float_status *stat)
+{
+ float32 r = float32_sub(op1, op2, stat);
+ return float32_is_any_nan(r) ? r : float32_abs(r);
+}
+
+static float64 float64_ah_abd(float64 op1, float64 op2, float_status *stat)
+{
+ float64 r = float64_sub(op1, op2, stat);
+ return float64_is_any_nan(r) ? r : float64_abs(r);
+}
+
/*
* Reciprocal step. These are the AArch32 version which uses a
* non-fused multiply-and-subtract.
@@ -1359,7 +1405,8 @@ static float32 float32_rsqrts_nf(float32 op1, float32 op2, float_status *stat)
}
#define DO_3OP(NAME, FUNC, TYPE) \
-void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, \
+ float_status *stat, uint32_t desc) \
{ \
intptr_t i, oprsz = simd_oprsz(desc); \
TYPE *d = vd, *n = vn, *m = vm; \
@@ -1389,6 +1436,10 @@ DO_3OP(gvec_fabd_h, float16_abd, float16)
DO_3OP(gvec_fabd_s, float32_abd, float32)
DO_3OP(gvec_fabd_d, float64_abd, float64)
+DO_3OP(gvec_ah_fabd_h, float16_ah_abd, float16)
+DO_3OP(gvec_ah_fabd_s, float32_ah_abd, float32)
+DO_3OP(gvec_ah_fabd_d, float64_ah_abd, float64)
+
DO_3OP(gvec_fceq_h, float16_ceq, float16)
DO_3OP(gvec_fceq_s, float32_ceq, float32)
DO_3OP(gvec_fceq_d, float64_ceq, float64)
@@ -1448,6 +1499,22 @@ DO_3OP(gvec_rsqrts_h, helper_rsqrtsf_f16, float16)
DO_3OP(gvec_rsqrts_s, helper_rsqrtsf_f32, float32)
DO_3OP(gvec_rsqrts_d, helper_rsqrtsf_f64, float64)
+DO_3OP(gvec_ah_recps_h, helper_recpsf_ah_f16, float16)
+DO_3OP(gvec_ah_recps_s, helper_recpsf_ah_f32, float32)
+DO_3OP(gvec_ah_recps_d, helper_recpsf_ah_f64, float64)
+
+DO_3OP(gvec_ah_rsqrts_h, helper_rsqrtsf_ah_f16, float16)
+DO_3OP(gvec_ah_rsqrts_s, helper_rsqrtsf_ah_f32, float32)
+DO_3OP(gvec_ah_rsqrts_d, helper_rsqrtsf_ah_f64, float64)
+
+DO_3OP(gvec_ah_fmax_h, helper_vfp_ah_maxh, float16)
+DO_3OP(gvec_ah_fmax_s, helper_vfp_ah_maxs, float32)
+DO_3OP(gvec_ah_fmax_d, helper_vfp_ah_maxd, float64)
+
+DO_3OP(gvec_ah_fmin_h, helper_vfp_ah_minh, float16)
+DO_3OP(gvec_ah_fmin_s, helper_vfp_ah_mins, float32)
+DO_3OP(gvec_ah_fmin_d, helper_vfp_ah_mind, float64)
+
#endif
#undef DO_3OP
@@ -1513,8 +1580,27 @@ static float64 float64_mulsub_f(float64 dest, float64 op1, float64 op2,
return float64_muladd(float64_chs(op1), op2, dest, 0, stat);
}
-#define DO_MULADD(NAME, FUNC, TYPE) \
-void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
+static float16 float16_ah_mulsub_f(float16 dest, float16 op1, float16 op2,
+ float_status *stat)
+{
+ return float16_muladd(op1, op2, dest, float_muladd_negate_product, stat);
+}
+
+static float32 float32_ah_mulsub_f(float32 dest, float32 op1, float32 op2,
+ float_status *stat)
+{
+ return float32_muladd(op1, op2, dest, float_muladd_negate_product, stat);
+}
+
+static float64 float64_ah_mulsub_f(float64 dest, float64 op1, float64 op2,
+ float_status *stat)
+{
+ return float64_muladd(op1, op2, dest, float_muladd_negate_product, stat);
+}
+
+#define DO_MULADD(NAME, FUNC, TYPE) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, \
+ float_status *stat, uint32_t desc) \
{ \
intptr_t i, oprsz = simd_oprsz(desc); \
TYPE *d = vd, *n = vn, *m = vm; \
@@ -1538,6 +1624,10 @@ DO_MULADD(gvec_vfms_h, float16_mulsub_f, float16)
DO_MULADD(gvec_vfms_s, float32_mulsub_f, float32)
DO_MULADD(gvec_vfms_d, float64_mulsub_f, float64)
+DO_MULADD(gvec_ah_vfms_h, float16_ah_mulsub_f, float16)
+DO_MULADD(gvec_ah_vfms_s, float32_ah_mulsub_f, float32)
+DO_MULADD(gvec_ah_vfms_d, float64_ah_mulsub_f, float64)
+
/* For the indexed ops, SVE applies the index per 128-bit vector segment.
* For AdvSIMD, there is of course only one such vector segment.
*/
@@ -1591,7 +1681,8 @@ DO_MLA_IDX(gvec_mls_idx_d, uint64_t, -, H8)
#undef DO_MLA_IDX
#define DO_FMUL_IDX(NAME, ADD, MUL, TYPE, H) \
-void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, \
+ float_status *stat, uint32_t desc) \
{ \
intptr_t i, j, oprsz = simd_oprsz(desc); \
intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
@@ -1633,29 +1724,35 @@ DO_FMUL_IDX(gvec_fmls_nf_idx_s, float32_sub, float32_mul, float32, H4)
#undef DO_FMUL_IDX
-#define DO_FMLA_IDX(NAME, TYPE, H) \
+#define DO_FMLA_IDX(NAME, TYPE, H, NEGX, NEGF) \
void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \
- void *stat, uint32_t desc) \
+ float_status *stat, uint32_t desc) \
{ \
intptr_t i, j, oprsz = simd_oprsz(desc); \
intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
- TYPE op1_neg = extract32(desc, SIMD_DATA_SHIFT, 1); \
- intptr_t idx = desc >> (SIMD_DATA_SHIFT + 1); \
+ intptr_t idx = simd_data(desc); \
TYPE *d = vd, *n = vn, *m = vm, *a = va; \
- op1_neg <<= (8 * sizeof(TYPE) - 1); \
for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
TYPE mm = m[H(i + idx)]; \
for (j = 0; j < segment; j++) { \
- d[i + j] = TYPE##_muladd(n[i + j] ^ op1_neg, \
- mm, a[i + j], 0, stat); \
+ d[i + j] = TYPE##_muladd(n[i + j] ^ NEGX, mm, \
+ a[i + j], NEGF, stat); \
} \
} \
clear_tail(d, oprsz, simd_maxsz(desc)); \
}
-DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2)
-DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4)
-DO_FMLA_IDX(gvec_fmla_idx_d, float64, H8)
+DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2, 0, 0)
+DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4, 0, 0)
+DO_FMLA_IDX(gvec_fmla_idx_d, float64, H8, 0, 0)
+
+DO_FMLA_IDX(gvec_fmls_idx_h, float16, H2, INT16_MIN, 0)
+DO_FMLA_IDX(gvec_fmls_idx_s, float32, H4, INT32_MIN, 0)
+DO_FMLA_IDX(gvec_fmls_idx_d, float64, H8, INT64_MIN, 0)
+
+DO_FMLA_IDX(gvec_ah_fmls_idx_h, float16, H2, 0, float_muladd_negate_product)
+DO_FMLA_IDX(gvec_ah_fmls_idx_s, float32, H4, 0, float_muladd_negate_product)
+DO_FMLA_IDX(gvec_ah_fmls_idx_d, float64, H8, 0, float_muladd_negate_product)
#undef DO_FMLA_IDX
@@ -2028,135 +2125,171 @@ static uint64_t load4_f16(uint64_t *ptr, int is_q, int is_2)
* as there is not yet SVE versions that might use blocking.
*/
-static void do_fmlal(float32 *d, void *vn, void *vm, float_status *fpst,
- uint32_t desc, bool fz16)
+static void do_fmlal(float32 *d, void *vn, void *vm,
+ CPUARMState *env, uint32_t desc,
+ ARMFPStatusFlavour fpst_idx,
+ uint64_t negx, int negf)
{
+ float_status *fpst = &env->vfp.fp_status[fpst_idx];
+ bool fz16 = env->vfp.fpcr & FPCR_FZ16;
intptr_t i, oprsz = simd_oprsz(desc);
- int is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
int is_q = oprsz == 16;
uint64_t n_4, m_4;
- /* Pre-load all of the f16 data, avoiding overlap issues. */
- n_4 = load4_f16(vn, is_q, is_2);
+ /*
+ * Pre-load all of the f16 data, avoiding overlap issues.
+ * Negate all inputs for AH=0 FMLSL at once.
+ */
+ n_4 = load4_f16(vn, is_q, is_2) ^ negx;
m_4 = load4_f16(vm, is_q, is_2);
- /* Negate all inputs for FMLSL at once. */
- if (is_s) {
- n_4 ^= 0x8000800080008000ull;
- }
-
for (i = 0; i < oprsz / 4; i++) {
float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16);
float32 m_1 = float16_to_float32_by_bits(m_4 >> (i * 16), fz16);
- d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst);
+ d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], negf, fpst);
}
clear_tail(d, oprsz, simd_maxsz(desc));
}
void HELPER(gvec_fmlal_a32)(void *vd, void *vn, void *vm,
- void *venv, uint32_t desc)
+ CPUARMState *env, uint32_t desc)
{
- CPUARMState *env = venv;
- do_fmlal(vd, vn, vm, &env->vfp.standard_fp_status, desc,
- get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
+ bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint64_t negx = is_s ? 0x8000800080008000ull : 0;
+
+ do_fmlal(vd, vn, vm, env, desc, FPST_STD, negx, 0);
}
void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm,
- void *venv, uint32_t desc)
+ CPUARMState *env, uint32_t desc)
{
- CPUARMState *env = venv;
- do_fmlal(vd, vn, vm, &env->vfp.fp_status, desc,
- get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
+ bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint64_t negx = 0;
+ int negf = 0;
+
+ if (is_s) {
+ if (env->vfp.fpcr & FPCR_AH) {
+ negf = float_muladd_negate_product;
+ } else {
+ negx = 0x8000800080008000ull;
+ }
+ }
+ do_fmlal(vd, vn, vm, env, desc, FPST_A64, negx, negf);
}
void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va,
- void *venv, uint32_t desc)
+ CPUARMState *env, uint32_t desc)
{
intptr_t i, oprsz = simd_oprsz(desc);
- uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15;
+ bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16);
- CPUARMState *env = venv;
- float_status *status = &env->vfp.fp_status;
- bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16);
+ float_status *status = &env->vfp.fp_status[FPST_A64];
+ bool fz16 = env->vfp.fpcr & FPCR_FZ16;
+ int negx = 0, negf = 0;
+
+ if (is_s) {
+ if (env->vfp.fpcr & FPCR_AH) {
+ negf = float_muladd_negate_product;
+ } else {
+ negx = 0x8000;
+ }
+ }
for (i = 0; i < oprsz; i += sizeof(float32)) {
- float16 nn_16 = *(float16 *)(vn + H1_2(i + sel)) ^ negn;
+ float16 nn_16 = *(float16 *)(vn + H1_2(i + sel)) ^ negx;
float16 mm_16 = *(float16 *)(vm + H1_2(i + sel));
float32 nn = float16_to_float32_by_bits(nn_16, fz16);
float32 mm = float16_to_float32_by_bits(mm_16, fz16);
float32 aa = *(float32 *)(va + H1_4(i));
- *(float32 *)(vd + H1_4(i)) = float32_muladd(nn, mm, aa, 0, status);
+ *(float32 *)(vd + H1_4(i)) = float32_muladd(nn, mm, aa, negf, status);
}
}
-static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst,
- uint32_t desc, bool fz16)
+static void do_fmlal_idx(float32 *d, void *vn, void *vm,
+ CPUARMState *env, uint32_t desc,
+ ARMFPStatusFlavour fpst_idx,
+ uint64_t negx, int negf)
{
+ float_status *fpst = &env->vfp.fp_status[fpst_idx];
+ bool fz16 = env->vfp.fpcr & FPCR_FZ16;
intptr_t i, oprsz = simd_oprsz(desc);
- int is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
int index = extract32(desc, SIMD_DATA_SHIFT + 2, 3);
int is_q = oprsz == 16;
uint64_t n_4;
float32 m_1;
- /* Pre-load all of the f16 data, avoiding overlap issues. */
- n_4 = load4_f16(vn, is_q, is_2);
-
- /* Negate all inputs for FMLSL at once. */
- if (is_s) {
- n_4 ^= 0x8000800080008000ull;
- }
-
+ /*
+ * Pre-load all of the f16 data, avoiding overlap issues.
+ * Negate all inputs for AH=0 FMLSL at once.
+ */
+ n_4 = load4_f16(vn, is_q, is_2) ^ negx;
m_1 = float16_to_float32_by_bits(((float16 *)vm)[H2(index)], fz16);
for (i = 0; i < oprsz / 4; i++) {
float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16);
- d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst);
+ d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], negf, fpst);
}
clear_tail(d, oprsz, simd_maxsz(desc));
}
void HELPER(gvec_fmlal_idx_a32)(void *vd, void *vn, void *vm,
- void *venv, uint32_t desc)
+ CPUARMState *env, uint32_t desc)
{
- CPUARMState *env = venv;
- do_fmlal_idx(vd, vn, vm, &env->vfp.standard_fp_status, desc,
- get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
+ bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint64_t negx = is_s ? 0x8000800080008000ull : 0;
+
+ do_fmlal_idx(vd, vn, vm, env, desc, FPST_STD, negx, 0);
}
void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm,
- void *venv, uint32_t desc)
+ CPUARMState *env, uint32_t desc)
{
- CPUARMState *env = venv;
- do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status, desc,
- get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
+ bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint64_t negx = 0;
+ int negf = 0;
+
+ if (is_s) {
+ if (env->vfp.fpcr & FPCR_AH) {
+ negf = float_muladd_negate_product;
+ } else {
+ negx = 0x8000800080008000ull;
+ }
+ }
+ do_fmlal_idx(vd, vn, vm, env, desc, FPST_A64, negx, negf);
}
void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va,
- void *venv, uint32_t desc)
+ CPUARMState *env, uint32_t desc)
{
intptr_t i, j, oprsz = simd_oprsz(desc);
- uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15;
+ bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16);
intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 2, 3) * sizeof(float16);
- CPUARMState *env = venv;
- float_status *status = &env->vfp.fp_status;
- bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16);
+ float_status *status = &env->vfp.fp_status[FPST_A64];
+ bool fz16 = env->vfp.fpcr & FPCR_FZ16;
+ int negx = 0, negf = 0;
+ if (is_s) {
+ if (env->vfp.fpcr & FPCR_AH) {
+ negf = float_muladd_negate_product;
+ } else {
+ negx = 0x8000;
+ }
+ }
for (i = 0; i < oprsz; i += 16) {
float16 mm_16 = *(float16 *)(vm + i + idx);
float32 mm = float16_to_float32_by_bits(mm_16, fz16);
for (j = 0; j < 16; j += sizeof(float32)) {
- float16 nn_16 = *(float16 *)(vn + H1_2(i + j + sel)) ^ negn;
+ float16 nn_16 = *(float16 *)(vn + H1_2(i + j + sel)) ^ negx;
float32 nn = float16_to_float32_by_bits(nn_16, fz16);
float32 aa = *(float32 *)(va + H1_4(i + j));
*(float32 *)(vd + H1_4(i + j)) =
- float32_muladd(nn, mm, aa, 0, status);
+ float32_muladd(nn, mm, aa, negf, status);
}
}
}
@@ -2401,7 +2534,8 @@ DO_ABA(gvec_uaba_d, uint64_t)
#undef DO_ABA
#define DO_3OP_PAIR(NAME, FUNC, TYPE, H) \
-void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, \
+ float_status *stat, uint32_t desc) \
{ \
ARMVectorReg scratch; \
intptr_t oprsz = simd_oprsz(desc); \
@@ -2439,6 +2573,16 @@ DO_3OP_PAIR(gvec_fminnump_h, float16_minnum, float16, H2)
DO_3OP_PAIR(gvec_fminnump_s, float32_minnum, float32, H4)
DO_3OP_PAIR(gvec_fminnump_d, float64_minnum, float64, )
+#ifdef TARGET_AARCH64
+DO_3OP_PAIR(gvec_ah_fmaxp_h, helper_vfp_ah_maxh, float16, H2)
+DO_3OP_PAIR(gvec_ah_fmaxp_s, helper_vfp_ah_maxs, float32, H4)
+DO_3OP_PAIR(gvec_ah_fmaxp_d, helper_vfp_ah_maxd, float64, )
+
+DO_3OP_PAIR(gvec_ah_fminp_h, helper_vfp_ah_minh, float16, H2)
+DO_3OP_PAIR(gvec_ah_fminp_s, helper_vfp_ah_mins, float32, H4)
+DO_3OP_PAIR(gvec_ah_fminp_d, helper_vfp_ah_mind, float64, )
+#endif
+
#undef DO_3OP_PAIR
#define DO_3OP_PAIR(NAME, FUNC, TYPE, H) \
@@ -2486,7 +2630,7 @@ DO_3OP_PAIR(gvec_uminp_s, MIN, uint32_t, H4)
#undef DO_3OP_PAIR
#define DO_VCVT_FIXED(NAME, FUNC, TYPE) \
- void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
+ void HELPER(NAME)(void *vd, void *vn, float_status *stat, uint32_t desc) \
{ \
intptr_t i, oprsz = simd_oprsz(desc); \
int shift = simd_data(desc); \
@@ -2498,21 +2642,25 @@ DO_3OP_PAIR(gvec_uminp_s, MIN, uint32_t, H4)
clear_tail(d, oprsz, simd_maxsz(desc)); \
}
+DO_VCVT_FIXED(gvec_vcvt_sd, helper_vfp_sqtod, uint64_t)
+DO_VCVT_FIXED(gvec_vcvt_ud, helper_vfp_uqtod, uint64_t)
DO_VCVT_FIXED(gvec_vcvt_sf, helper_vfp_sltos, uint32_t)
DO_VCVT_FIXED(gvec_vcvt_uf, helper_vfp_ultos, uint32_t)
-DO_VCVT_FIXED(gvec_vcvt_fs, helper_vfp_tosls_round_to_zero, uint32_t)
-DO_VCVT_FIXED(gvec_vcvt_fu, helper_vfp_touls_round_to_zero, uint32_t)
DO_VCVT_FIXED(gvec_vcvt_sh, helper_vfp_shtoh, uint16_t)
DO_VCVT_FIXED(gvec_vcvt_uh, helper_vfp_uhtoh, uint16_t)
-DO_VCVT_FIXED(gvec_vcvt_hs, helper_vfp_toshh_round_to_zero, uint16_t)
-DO_VCVT_FIXED(gvec_vcvt_hu, helper_vfp_touhh_round_to_zero, uint16_t)
+
+DO_VCVT_FIXED(gvec_vcvt_rz_ds, helper_vfp_tosqd_round_to_zero, uint64_t)
+DO_VCVT_FIXED(gvec_vcvt_rz_du, helper_vfp_touqd_round_to_zero, uint64_t)
+DO_VCVT_FIXED(gvec_vcvt_rz_fs, helper_vfp_tosls_round_to_zero, uint32_t)
+DO_VCVT_FIXED(gvec_vcvt_rz_fu, helper_vfp_touls_round_to_zero, uint32_t)
+DO_VCVT_FIXED(gvec_vcvt_rz_hs, helper_vfp_toshh_round_to_zero, uint16_t)
+DO_VCVT_FIXED(gvec_vcvt_rz_hu, helper_vfp_touhh_round_to_zero, uint16_t)
#undef DO_VCVT_FIXED
#define DO_VCVT_RMODE(NAME, FUNC, TYPE) \
- void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
+ void HELPER(NAME)(void *vd, void *vn, float_status *fpst, uint32_t desc) \
{ \
- float_status *fpst = stat; \
intptr_t i, oprsz = simd_oprsz(desc); \
uint32_t rmode = simd_data(desc); \
uint32_t prev_rmode = get_float_rounding_mode(fpst); \
@@ -2525,6 +2673,8 @@ DO_VCVT_FIXED(gvec_vcvt_hu, helper_vfp_touhh_round_to_zero, uint16_t)
clear_tail(d, oprsz, simd_maxsz(desc)); \
}
+DO_VCVT_RMODE(gvec_vcvt_rm_sd, helper_vfp_tosqd, uint64_t)
+DO_VCVT_RMODE(gvec_vcvt_rm_ud, helper_vfp_touqd, uint64_t)
DO_VCVT_RMODE(gvec_vcvt_rm_ss, helper_vfp_tosls, uint32_t)
DO_VCVT_RMODE(gvec_vcvt_rm_us, helper_vfp_touls, uint32_t)
DO_VCVT_RMODE(gvec_vcvt_rm_sh, helper_vfp_toshh, uint16_t)
@@ -2533,9 +2683,8 @@ DO_VCVT_RMODE(gvec_vcvt_rm_uh, helper_vfp_touhh, uint16_t)
#undef DO_VCVT_RMODE
#define DO_VRINT_RMODE(NAME, FUNC, TYPE) \
- void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
+ void HELPER(NAME)(void *vd, void *vn, float_status *fpst, uint32_t desc) \
{ \
- float_status *fpst = stat; \
intptr_t i, oprsz = simd_oprsz(desc); \
uint32_t rmode = simd_data(desc); \
uint32_t prev_rmode = get_float_rounding_mode(fpst); \
@@ -2554,10 +2703,9 @@ DO_VRINT_RMODE(gvec_vrint_rm_s, helper_rints, uint32_t)
#undef DO_VRINT_RMODE
#ifdef TARGET_AARCH64
-void HELPER(simd_tblx)(void *vd, void *vm, void *venv, uint32_t desc)
+void HELPER(simd_tblx)(void *vd, void *vm, CPUARMState *env, uint32_t desc)
{
const uint8_t *indices = vm;
- CPUARMState *env = venv;
size_t oprsz = simd_oprsz(desc);
uint32_t rn = extract32(desc, SIMD_DATA_SHIFT, 5);
bool is_tbx = extract32(desc, SIMD_DATA_SHIFT + 5, 1);
@@ -2790,44 +2938,109 @@ DO_MMLA_B(gvec_usmmla_b, do_usmmla_b)
* BFloat16 Dot Product
*/
-float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2)
+bool is_ebf(CPUARMState *env, float_status *statusp, float_status *oddstatusp)
+{
+ /*
+ * For BFDOT, BFMMLA, etc, the behaviour depends on FPCR.EBF.
+ * For EBF = 0, we ignore the FPCR bits which determine rounding
+ * mode and denormal-flushing, and we do unfused multiplies and
+ * additions with intermediate rounding of all products and sums.
+ * For EBF = 1, we honour FPCR rounding mode and denormal-flushing bits,
+ * and we perform a fused two-way sum-of-products without intermediate
+ * rounding of the products.
+ * In either case, we don't set fp exception flags.
+ *
+ * EBF is AArch64 only, so even if it's set in the FPCR it has
+ * no effect on AArch32 instructions.
+ */
+ bool ebf = is_a64(env) && env->vfp.fpcr & FPCR_EBF;
+
+ *statusp = env->vfp.fp_status[is_a64(env) ? FPST_A64 : FPST_A32];
+ set_default_nan_mode(true, statusp);
+
+ if (ebf) {
+ /* EBF=1 needs to do a step with round-to-odd semantics */
+ *oddstatusp = *statusp;
+ set_float_rounding_mode(float_round_to_odd, oddstatusp);
+ } else {
+ set_flush_to_zero(true, statusp);
+ set_flush_inputs_to_zero(true, statusp);
+ set_float_rounding_mode(float_round_to_odd_inf, statusp);
+ }
+ return ebf;
+}
+
+float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2, float_status *fpst)
{
- /* FPCR is ignored for BFDOT and BFMMLA. */
- float_status bf_status = {
- .tininess_before_rounding = float_tininess_before_rounding,
- .float_rounding_mode = float_round_to_odd_inf,
- .flush_to_zero = true,
- .flush_inputs_to_zero = true,
- .default_nan_mode = true,
- };
float32 t1, t2;
/*
* Extract each BFloat16 from the element pair, and shift
* them such that they become float32.
*/
- t1 = float32_mul(e1 << 16, e2 << 16, &bf_status);
- t2 = float32_mul(e1 & 0xffff0000u, e2 & 0xffff0000u, &bf_status);
- t1 = float32_add(t1, t2, &bf_status);
- t1 = float32_add(sum, t1, &bf_status);
+ t1 = float32_mul(e1 << 16, e2 << 16, fpst);
+ t2 = float32_mul(e1 & 0xffff0000u, e2 & 0xffff0000u, fpst);
+ t1 = float32_add(t1, t2, fpst);
+ t1 = float32_add(sum, t1, fpst);
return t1;
}
-void HELPER(gvec_bfdot)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
+float32 bfdotadd_ebf(float32 sum, uint32_t e1, uint32_t e2,
+ float_status *fpst, float_status *fpst_odd)
+{
+ /*
+ * Compare f16_dotadd() in sme_helper.c, but here we have
+ * bfloat16 inputs. In particular that means that we do not
+ * want the FPCR.FZ16 flush semantics, so we use the normal
+ * float_status for the input handling here.
+ */
+ float64 e1r = float32_to_float64(e1 << 16, fpst);
+ float64 e1c = float32_to_float64(e1 & 0xffff0000u, fpst);
+ float64 e2r = float32_to_float64(e2 << 16, fpst);
+ float64 e2c = float32_to_float64(e2 & 0xffff0000u, fpst);
+ float64 t64;
+ float32 t32;
+
+ /*
+ * The ARM pseudocode function FPDot performs both multiplies
+ * and the add with a single rounding operation. Emulate this
+ * by performing the first multiply in round-to-odd, then doing
+ * the second multiply as fused multiply-add, and rounding to
+ * float32 all in one step.
+ */
+ t64 = float64_mul(e1r, e2r, fpst_odd);
+ t64 = float64r32_muladd(e1c, e2c, t64, 0, fpst);
+
+ /* This conversion is exact, because we've already rounded. */
+ t32 = float64_to_float32(t64, fpst);
+
+ /* The final accumulation step is not fused. */
+ return float32_add(sum, t32, fpst);
+}
+
+void HELPER(gvec_bfdot)(void *vd, void *vn, void *vm, void *va,
+ CPUARMState *env, uint32_t desc)
{
intptr_t i, opr_sz = simd_oprsz(desc);
float32 *d = vd, *a = va;
uint32_t *n = vn, *m = vm;
+ float_status fpst, fpst_odd;
- for (i = 0; i < opr_sz / 4; ++i) {
- d[i] = bfdotadd(a[i], n[i], m[i]);
+ if (is_ebf(env, &fpst, &fpst_odd)) {
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = bfdotadd_ebf(a[i], n[i], m[i], &fpst, &fpst_odd);
+ }
+ } else {
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = bfdotadd(a[i], n[i], m[i], &fpst);
+ }
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
void HELPER(gvec_bfdot_idx)(void *vd, void *vn, void *vm,
- void *va, uint32_t desc)
+ void *va, CPUARMState *env, uint32_t desc)
{
intptr_t i, j, opr_sz = simd_oprsz(desc);
intptr_t index = simd_data(desc);
@@ -2835,59 +3048,106 @@ void HELPER(gvec_bfdot_idx)(void *vd, void *vn, void *vm,
intptr_t eltspersegment = MIN(16 / 4, elements);
float32 *d = vd, *a = va;
uint32_t *n = vn, *m = vm;
+ float_status fpst, fpst_odd;
- for (i = 0; i < elements; i += eltspersegment) {
- uint32_t m_idx = m[i + H4(index)];
+ if (is_ebf(env, &fpst, &fpst_odd)) {
+ for (i = 0; i < elements; i += eltspersegment) {
+ uint32_t m_idx = m[i + H4(index)];
- for (j = i; j < i + eltspersegment; j++) {
- d[j] = bfdotadd(a[j], n[j], m_idx);
+ for (j = i; j < i + eltspersegment; j++) {
+ d[j] = bfdotadd_ebf(a[j], n[j], m_idx, &fpst, &fpst_odd);
+ }
+ }
+ } else {
+ for (i = 0; i < elements; i += eltspersegment) {
+ uint32_t m_idx = m[i + H4(index)];
+
+ for (j = i; j < i + eltspersegment; j++) {
+ d[j] = bfdotadd(a[j], n[j], m_idx, &fpst);
+ }
}
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
+void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va,
+ CPUARMState *env, uint32_t desc)
{
intptr_t s, opr_sz = simd_oprsz(desc);
float32 *d = vd, *a = va;
uint32_t *n = vn, *m = vm;
+ float_status fpst, fpst_odd;
- for (s = 0; s < opr_sz / 4; s += 4) {
- float32 sum00, sum01, sum10, sum11;
-
- /*
- * Process the entire segment at once, writing back the
- * results only after we've consumed all of the inputs.
- *
- * Key to indices by column:
- * i j i k j k
- */
- sum00 = a[s + H4(0 + 0)];
- sum00 = bfdotadd(sum00, n[s + H4(0 + 0)], m[s + H4(0 + 0)]);
- sum00 = bfdotadd(sum00, n[s + H4(0 + 1)], m[s + H4(0 + 1)]);
-
- sum01 = a[s + H4(0 + 1)];
- sum01 = bfdotadd(sum01, n[s + H4(0 + 0)], m[s + H4(2 + 0)]);
- sum01 = bfdotadd(sum01, n[s + H4(0 + 1)], m[s + H4(2 + 1)]);
-
- sum10 = a[s + H4(2 + 0)];
- sum10 = bfdotadd(sum10, n[s + H4(2 + 0)], m[s + H4(0 + 0)]);
- sum10 = bfdotadd(sum10, n[s + H4(2 + 1)], m[s + H4(0 + 1)]);
+ if (is_ebf(env, &fpst, &fpst_odd)) {
+ for (s = 0; s < opr_sz / 4; s += 4) {
+ float32 sum00, sum01, sum10, sum11;
- sum11 = a[s + H4(2 + 1)];
- sum11 = bfdotadd(sum11, n[s + H4(2 + 0)], m[s + H4(2 + 0)]);
- sum11 = bfdotadd(sum11, n[s + H4(2 + 1)], m[s + H4(2 + 1)]);
+ /*
+ * Process the entire segment at once, writing back the
+ * results only after we've consumed all of the inputs.
+ *
+ * Key to indices by column:
+ * i j i k j k
+ */
+ sum00 = a[s + H4(0 + 0)];
+ sum00 = bfdotadd_ebf(sum00, n[s + H4(0 + 0)], m[s + H4(0 + 0)], &fpst, &fpst_odd);
+ sum00 = bfdotadd_ebf(sum00, n[s + H4(0 + 1)], m[s + H4(0 + 1)], &fpst, &fpst_odd);
+
+ sum01 = a[s + H4(0 + 1)];
+ sum01 = bfdotadd_ebf(sum01, n[s + H4(0 + 0)], m[s + H4(2 + 0)], &fpst, &fpst_odd);
+ sum01 = bfdotadd_ebf(sum01, n[s + H4(0 + 1)], m[s + H4(2 + 1)], &fpst, &fpst_odd);
+
+ sum10 = a[s + H4(2 + 0)];
+ sum10 = bfdotadd_ebf(sum10, n[s + H4(2 + 0)], m[s + H4(0 + 0)], &fpst, &fpst_odd);
+ sum10 = bfdotadd_ebf(sum10, n[s + H4(2 + 1)], m[s + H4(0 + 1)], &fpst, &fpst_odd);
+
+ sum11 = a[s + H4(2 + 1)];
+ sum11 = bfdotadd_ebf(sum11, n[s + H4(2 + 0)], m[s + H4(2 + 0)], &fpst, &fpst_odd);
+ sum11 = bfdotadd_ebf(sum11, n[s + H4(2 + 1)], m[s + H4(2 + 1)], &fpst, &fpst_odd);
+
+ d[s + H4(0 + 0)] = sum00;
+ d[s + H4(0 + 1)] = sum01;
+ d[s + H4(2 + 0)] = sum10;
+ d[s + H4(2 + 1)] = sum11;
+ }
+ } else {
+ for (s = 0; s < opr_sz / 4; s += 4) {
+ float32 sum00, sum01, sum10, sum11;
- d[s + H4(0 + 0)] = sum00;
- d[s + H4(0 + 1)] = sum01;
- d[s + H4(2 + 0)] = sum10;
- d[s + H4(2 + 1)] = sum11;
+ /*
+ * Process the entire segment at once, writing back the
+ * results only after we've consumed all of the inputs.
+ *
+ * Key to indices by column:
+ * i j i k j k
+ */
+ sum00 = a[s + H4(0 + 0)];
+ sum00 = bfdotadd(sum00, n[s + H4(0 + 0)], m[s + H4(0 + 0)], &fpst);
+ sum00 = bfdotadd(sum00, n[s + H4(0 + 1)], m[s + H4(0 + 1)], &fpst);
+
+ sum01 = a[s + H4(0 + 1)];
+ sum01 = bfdotadd(sum01, n[s + H4(0 + 0)], m[s + H4(2 + 0)], &fpst);
+ sum01 = bfdotadd(sum01, n[s + H4(0 + 1)], m[s + H4(2 + 1)], &fpst);
+
+ sum10 = a[s + H4(2 + 0)];
+ sum10 = bfdotadd(sum10, n[s + H4(2 + 0)], m[s + H4(0 + 0)], &fpst);
+ sum10 = bfdotadd(sum10, n[s + H4(2 + 1)], m[s + H4(0 + 1)], &fpst);
+
+ sum11 = a[s + H4(2 + 1)];
+ sum11 = bfdotadd(sum11, n[s + H4(2 + 0)], m[s + H4(2 + 0)], &fpst);
+ sum11 = bfdotadd(sum11, n[s + H4(2 + 1)], m[s + H4(2 + 1)], &fpst);
+
+ d[s + H4(0 + 0)] = sum00;
+ d[s + H4(0 + 1)] = sum01;
+ d[s + H4(2 + 0)] = sum10;
+ d[s + H4(2 + 1)] = sum11;
+ }
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
void HELPER(gvec_bfmlal)(void *vd, void *vn, void *vm, void *va,
- void *stat, uint32_t desc)
+ float_status *stat, uint32_t desc)
{
intptr_t i, opr_sz = simd_oprsz(desc);
intptr_t sel = simd_data(desc);
@@ -2903,7 +3163,7 @@ void HELPER(gvec_bfmlal)(void *vd, void *vn, void *vm, void *va,
}
void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm,
- void *va, void *stat, uint32_t desc)
+ void *va, float_status *stat, uint32_t desc)
{
intptr_t i, j, opr_sz = simd_oprsz(desc);
intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1);
@@ -2947,3 +3207,49 @@ DO_CLAMP(gvec_uclamp_b, uint8_t)
DO_CLAMP(gvec_uclamp_h, uint16_t)
DO_CLAMP(gvec_uclamp_s, uint32_t)
DO_CLAMP(gvec_uclamp_d, uint64_t)
+
+/* Bit count in each 8-bit word. */
+void HELPER(gvec_cnt_b)(void *vd, void *vn, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint8_t *d = vd, *n = vn;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = ctpop8(n[i]);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+/* Reverse bits in each 8 bit word */
+void HELPER(gvec_rbit_b)(void *vd, void *vn, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint64_t *d = vd, *n = vn;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ d[i] = revbit64(bswap64(n[i]));
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_urecpe_s)(void *vd, void *vn, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint32_t *d = vd, *n = vn;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = helper_recpe_u32(n[i]);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_ursqrte_s)(void *vd, void *vn, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint32_t *d = vd, *n = vn;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = helper_rsqrte_u32(n[i]);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
diff --git a/target/arm/tcg/vec_internal.h b/target/arm/tcg/vec_internal.h
index 3ca1b94..c02f9c3 100644
--- a/target/arm/tcg/vec_internal.h
+++ b/target/arm/tcg/vec_internal.h
@@ -20,6 +20,10 @@
#ifndef TARGET_ARM_VEC_INTERNAL_H
#define TARGET_ARM_VEC_INTERNAL_H
+#include "fpu/softfloat.h"
+
+typedef struct CPUArchState CPUARMState;
+
/*
* Note that vector data is stored in host-endian 64-bit chunks,
* so addressing units smaller than that needs a host-endian fixup.
@@ -223,13 +227,79 @@ int64_t do_sqrdmlah_d(int64_t, int64_t, int64_t, bool, bool);
* bfdotadd:
* @sum: addend
* @e1, @e2: multiplicand vectors
+ * @fpst: floating-point status to use
+ *
+ * BFloat16 2-way dot product of @e1 & @e2, accumulating with @sum.
+ * The @e1 and @e2 operands correspond to the 32-bit source vector
+ * slots and contain two Bfloat16 values each.
+ *
+ * Corresponds to the ARM pseudocode function BFDotAdd, specialized
+ * for the FPCR.EBF == 0 case.
+ */
+float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2, float_status *fpst);
+/**
+ * bfdotadd_ebf:
+ * @sum: addend
+ * @e1, @e2: multiplicand vectors
+ * @fpst: floating-point status to use
+ * @fpst_odd: floating-point status to use for round-to-odd operations
*
* BFloat16 2-way dot product of @e1 & @e2, accumulating with @sum.
* The @e1 and @e2 operands correspond to the 32-bit source vector
* slots and contain two Bfloat16 values each.
*
- * Corresponds to the ARM pseudocode function BFDotAdd.
+ * Corresponds to the ARM pseudocode function BFDotAdd, specialized
+ * for the FPCR.EBF == 1 case.
+ */
+float32 bfdotadd_ebf(float32 sum, uint32_t e1, uint32_t e2,
+ float_status *fpst, float_status *fpst_odd);
+
+/**
+ * is_ebf:
+ * @env: CPU state
+ * @statusp: pointer to floating point status to fill in
+ * @oddstatusp: pointer to floating point status to fill in for round-to-odd
+ *
+ * Determine whether a BFDotAdd operation should use FPCR.EBF = 0
+ * or FPCR.EBF = 1 semantics. On return, has initialized *statusp
+ * and *oddstatusp to suitable float_status arguments to use with either
+ * bfdotadd() or bfdotadd_ebf().
+ * Returns true for EBF = 1, false for EBF = 0. (The caller should use this
+ * to decide whether to call bfdotadd() or bfdotadd_ebf().)
+ */
+bool is_ebf(CPUARMState *env, float_status *statusp, float_status *oddstatusp);
+
+/*
+ * Negate as for FPCR.AH=1 -- do not negate NaNs.
*/
-float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2);
+static inline float16 float16_ah_chs(float16 a)
+{
+ return float16_is_any_nan(a) ? a : float16_chs(a);
+}
+
+static inline float32 float32_ah_chs(float32 a)
+{
+ return float32_is_any_nan(a) ? a : float32_chs(a);
+}
+
+static inline float64 float64_ah_chs(float64 a)
+{
+ return float64_is_any_nan(a) ? a : float64_chs(a);
+}
+
+static inline float16 float16_maybe_ah_chs(float16 a, bool fpcr_ah)
+{
+ return fpcr_ah && float16_is_any_nan(a) ? a : float16_chs(a);
+}
+
+static inline float32 float32_maybe_ah_chs(float32 a, bool fpcr_ah)
+{
+ return fpcr_ah && float32_is_any_nan(a) ? a : float32_chs(a);
+}
+
+static inline float64 float64_maybe_ah_chs(float64 a, bool fpcr_ah)
+{
+ return fpcr_ah && float64_is_any_nan(a) ? a : float64_chs(a);
+}
#endif /* TARGET_ARM_VEC_INTERNAL_H */
diff --git a/target/arm/tcg/vfp.decode b/target/arm/tcg/vfp.decode
index 5405e80..2dd87a2 100644
--- a/target/arm/tcg/vfp.decode
+++ b/target/arm/tcg/vfp.decode
@@ -141,18 +141,18 @@ VDIV_dp ---- 1110 1.00 .... .... 1011 .0.0 .... @vfp_dnm_d
VFMA_hp ---- 1110 1.10 .... .... 1001 .0. 0 .... @vfp_dnm_s
VFMS_hp ---- 1110 1.10 .... .... 1001 .1. 0 .... @vfp_dnm_s
-VFNMA_hp ---- 1110 1.01 .... .... 1001 .0. 0 .... @vfp_dnm_s
-VFNMS_hp ---- 1110 1.01 .... .... 1001 .1. 0 .... @vfp_dnm_s
+VFNMS_hp ---- 1110 1.01 .... .... 1001 .0. 0 .... @vfp_dnm_s
+VFNMA_hp ---- 1110 1.01 .... .... 1001 .1. 0 .... @vfp_dnm_s
VFMA_sp ---- 1110 1.10 .... .... 1010 .0. 0 .... @vfp_dnm_s
VFMS_sp ---- 1110 1.10 .... .... 1010 .1. 0 .... @vfp_dnm_s
-VFNMA_sp ---- 1110 1.01 .... .... 1010 .0. 0 .... @vfp_dnm_s
-VFNMS_sp ---- 1110 1.01 .... .... 1010 .1. 0 .... @vfp_dnm_s
+VFNMS_sp ---- 1110 1.01 .... .... 1010 .0. 0 .... @vfp_dnm_s
+VFNMA_sp ---- 1110 1.01 .... .... 1010 .1. 0 .... @vfp_dnm_s
VFMA_dp ---- 1110 1.10 .... .... 1011 .0.0 .... @vfp_dnm_d
VFMS_dp ---- 1110 1.10 .... .... 1011 .1.0 .... @vfp_dnm_d
-VFNMA_dp ---- 1110 1.01 .... .... 1011 .0.0 .... @vfp_dnm_d
-VFNMS_dp ---- 1110 1.01 .... .... 1011 .1.0 .... @vfp_dnm_d
+VFNMS_dp ---- 1110 1.01 .... .... 1011 .0.0 .... @vfp_dnm_d
+VFNMA_dp ---- 1110 1.01 .... .... 1011 .1.0 .... @vfp_dnm_d
VMOV_imm_hp ---- 1110 1.11 .... .... 1001 0000 .... \
vd=%vd_sp imm=%vmov_imm
diff --git a/target/arm/tcg/vfp_helper.c b/target/arm/tcg/vfp_helper.c
new file mode 100644
index 0000000..b1324c5
--- /dev/null
+++ b/target/arm/tcg/vfp_helper.c
@@ -0,0 +1,1370 @@
+/*
+ * ARM VFP floating-point operations
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internals.h"
+#include "cpu-features.h"
+#include "fpu/softfloat.h"
+#include "qemu/log.h"
+
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
+/*
+ * Set the float_status behaviour to match the Arm defaults:
+ * * tininess-before-rounding
+ * * 2-input NaN propagation prefers SNaN over QNaN, and then
+ * operand A over operand B (see FPProcessNaNs() pseudocode)
+ * * 3-input NaN propagation prefers SNaN over QNaN, and then
+ * operand C over A over B (see FPProcessNaNs3() pseudocode,
+ * but note that for QEMU muladd is a * b + c, whereas for
+ * the pseudocode function the arguments are in the order c, a, b.
+ * * 0 * Inf + NaN returns the default NaN if the input NaN is quiet,
+ * and the input NaN if it is signalling
+ * * Default NaN has sign bit clear, msb frac bit set
+ */
+void arm_set_default_fp_behaviours(float_status *s)
+{
+ set_float_detect_tininess(float_tininess_before_rounding, s);
+ set_float_ftz_detection(float_ftz_before_rounding, s);
+ set_float_2nan_prop_rule(float_2nan_prop_s_ab, s);
+ set_float_3nan_prop_rule(float_3nan_prop_s_cab, s);
+ set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, s);
+ set_float_default_nan_pattern(0b01000000, s);
+}
+
+/*
+ * Set the float_status behaviour to match the FEAT_AFP
+ * FPCR.AH=1 requirements:
+ * * tininess-after-rounding
+ * * 2-input NaN propagation prefers the first NaN
+ * * 3-input NaN propagation prefers a over b over c
+ * * 0 * Inf + NaN always returns the input NaN and doesn't
+ * set Invalid for a QNaN
+ * * default NaN has sign bit set, msb frac bit set
+ */
+void arm_set_ah_fp_behaviours(float_status *s)
+{
+ set_float_detect_tininess(float_tininess_after_rounding, s);
+ set_float_ftz_detection(float_ftz_after_rounding, s);
+ set_float_2nan_prop_rule(float_2nan_prop_ab, s);
+ set_float_3nan_prop_rule(float_3nan_prop_abc, s);
+ set_float_infzeronan_rule(float_infzeronan_dnan_never |
+ float_infzeronan_suppress_invalid, s);
+ set_float_default_nan_pattern(0b11000000, s);
+}
+
+/* Convert host exception flags to vfp form. */
+static inline uint32_t vfp_exceptbits_from_host(int host_bits, bool ah)
+{
+ uint32_t target_bits = 0;
+
+ if (host_bits & float_flag_invalid) {
+ target_bits |= FPSR_IOC;
+ }
+ if (host_bits & float_flag_divbyzero) {
+ target_bits |= FPSR_DZC;
+ }
+ if (host_bits & float_flag_overflow) {
+ target_bits |= FPSR_OFC;
+ }
+ if (host_bits & (float_flag_underflow | float_flag_output_denormal_flushed)) {
+ target_bits |= FPSR_UFC;
+ }
+ if (host_bits & float_flag_inexact) {
+ target_bits |= FPSR_IXC;
+ }
+ if (host_bits & float_flag_input_denormal_flushed) {
+ target_bits |= FPSR_IDC;
+ }
+ /*
+ * With FPCR.AH, IDC is set when an input denormal is used,
+ * and flushing an output denormal to zero sets both IXC and UFC.
+ */
+ if (ah && (host_bits & float_flag_input_denormal_used)) {
+ target_bits |= FPSR_IDC;
+ }
+ if (ah && (host_bits & float_flag_output_denormal_flushed)) {
+ target_bits |= FPSR_IXC;
+ }
+ return target_bits;
+}
+
+uint32_t vfp_get_fpsr_from_host(CPUARMState *env)
+{
+ uint32_t a32_flags = 0, a64_flags = 0;
+
+ a32_flags |= get_float_exception_flags(&env->vfp.fp_status[FPST_A32]);
+ a32_flags |= get_float_exception_flags(&env->vfp.fp_status[FPST_STD]);
+ /* FZ16 does not generate an input denormal exception. */
+ a32_flags |= (get_float_exception_flags(&env->vfp.fp_status[FPST_A32_F16])
+ & ~float_flag_input_denormal_flushed);
+ a32_flags |= (get_float_exception_flags(&env->vfp.fp_status[FPST_STD_F16])
+ & ~float_flag_input_denormal_flushed);
+
+ a64_flags |= get_float_exception_flags(&env->vfp.fp_status[FPST_A64]);
+ a64_flags |= (get_float_exception_flags(&env->vfp.fp_status[FPST_A64_F16])
+ & ~(float_flag_input_denormal_flushed | float_flag_input_denormal_used));
+ /*
+ * We do not merge in flags from FPST_AH or FPST_AH_F16, because
+ * they are used for insns that must not set the cumulative exception bits.
+ */
+
+ /*
+ * Flushing an input denormal *only* because FPCR.FIZ == 1 does
+ * not set FPSR.IDC; if FPCR.FZ is also set then this takes
+ * precedence and IDC is set (see the FPUnpackBase pseudocode).
+ * So squash it unless (FPCR.AH == 0 && FPCR.FZ == 1).
+ * We only do this for the a64 flags because FIZ has no effect
+ * on AArch32 even if it is set.
+ */
+ if ((env->vfp.fpcr & (FPCR_FZ | FPCR_AH)) != FPCR_FZ) {
+ a64_flags &= ~float_flag_input_denormal_flushed;
+ }
+ return vfp_exceptbits_from_host(a64_flags, env->vfp.fpcr & FPCR_AH) |
+ vfp_exceptbits_from_host(a32_flags, false);
+}
+
+void vfp_clear_float_status_exc_flags(CPUARMState *env)
+{
+ /*
+ * Clear out all the exception-flag information in the float_status
+ * values. The caller should have arranged for env->vfp.fpsr to
+ * be the architecturally up-to-date exception flag information first.
+ */
+ set_float_exception_flags(0, &env->vfp.fp_status[FPST_A32]);
+ set_float_exception_flags(0, &env->vfp.fp_status[FPST_A64]);
+ set_float_exception_flags(0, &env->vfp.fp_status[FPST_A32_F16]);
+ set_float_exception_flags(0, &env->vfp.fp_status[FPST_A64_F16]);
+ set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD]);
+ set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD_F16]);
+ set_float_exception_flags(0, &env->vfp.fp_status[FPST_AH]);
+ set_float_exception_flags(0, &env->vfp.fp_status[FPST_AH_F16]);
+}
+
+static void vfp_sync_and_clear_float_status_exc_flags(CPUARMState *env)
+{
+ /*
+ * Synchronize any pending exception-flag information in the
+ * float_status values into env->vfp.fpsr, and then clear out
+ * the float_status data.
+ */
+ env->vfp.fpsr |= vfp_get_fpsr_from_host(env);
+ vfp_clear_float_status_exc_flags(env);
+}
+
+void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask)
+{
+ uint64_t changed = env->vfp.fpcr;
+
+ changed ^= val;
+ changed &= mask;
+ if (changed & (3 << 22)) {
+ int i = (val >> 22) & 3;
+ switch (i) {
+ case FPROUNDING_TIEEVEN:
+ i = float_round_nearest_even;
+ break;
+ case FPROUNDING_POSINF:
+ i = float_round_up;
+ break;
+ case FPROUNDING_NEGINF:
+ i = float_round_down;
+ break;
+ case FPROUNDING_ZERO:
+ i = float_round_to_zero;
+ break;
+ }
+ set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A32]);
+ set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A64]);
+ set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A32_F16]);
+ set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A64_F16]);
+ }
+ if (changed & FPCR_FZ16) {
+ bool ftz_enabled = val & FPCR_FZ16;
+ set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32_F16]);
+ set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64_F16]);
+ set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_STD_F16]);
+ set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_AH_F16]);
+ set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32_F16]);
+ set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64_F16]);
+ set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_STD_F16]);
+ set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_AH_F16]);
+ }
+ if (changed & FPCR_FZ) {
+ bool ftz_enabled = val & FPCR_FZ;
+ set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32]);
+ set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64]);
+ /* FIZ is A64 only so FZ always makes A32 code flush inputs to zero */
+ set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32]);
+ }
+ if (changed & (FPCR_FZ | FPCR_AH | FPCR_FIZ)) {
+ /*
+ * A64: Flush denormalized inputs to zero if FPCR.FIZ = 1, or
+ * both FPCR.AH = 0 and FPCR.FZ = 1.
+ */
+ bool fitz_enabled = (val & FPCR_FIZ) ||
+ (val & (FPCR_FZ | FPCR_AH)) == FPCR_FZ;
+ set_flush_inputs_to_zero(fitz_enabled, &env->vfp.fp_status[FPST_A64]);
+ }
+ if (changed & FPCR_DN) {
+ bool dnan_enabled = val & FPCR_DN;
+ set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A32]);
+ set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A64]);
+ set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A32_F16]);
+ set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A64_F16]);
+ set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_AH]);
+ set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_AH_F16]);
+ }
+ if (changed & FPCR_AH) {
+ bool ah_enabled = val & FPCR_AH;
+
+ if (ah_enabled) {
+ /* Change behaviours for A64 FP operations */
+ arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_A64]);
+ arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]);
+ } else {
+ arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64]);
+ arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]);
+ }
+ }
+ /*
+ * If any bits changed that we look at in vfp_get_fpsr_from_host(),
+ * we must sync the float_status flags into vfp.fpsr now (under the
+ * old regime) before we update vfp.fpcr.
+ */
+ if (changed & (FPCR_FZ | FPCR_AH | FPCR_FIZ)) {
+ vfp_sync_and_clear_float_status_exc_flags(env);
+ }
+}
+
+/*
+ * VFP support. We follow the convention used for VFP instructions:
+ * Single precision routines have a "s" suffix, double precision a
+ * "d" suffix.
+ */
+
+#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
+
+#define VFP_BINOP(name) \
+dh_ctype_f16 VFP_HELPER(name, h)(dh_ctype_f16 a, dh_ctype_f16 b, float_status *fpst) \
+{ \
+ return float16_ ## name(a, b, fpst); \
+} \
+float32 VFP_HELPER(name, s)(float32 a, float32 b, float_status *fpst) \
+{ \
+ return float32_ ## name(a, b, fpst); \
+} \
+float64 VFP_HELPER(name, d)(float64 a, float64 b, float_status *fpst) \
+{ \
+ return float64_ ## name(a, b, fpst); \
+}
+VFP_BINOP(add)
+VFP_BINOP(sub)
+VFP_BINOP(mul)
+VFP_BINOP(div)
+VFP_BINOP(min)
+VFP_BINOP(max)
+VFP_BINOP(minnum)
+VFP_BINOP(maxnum)
+#undef VFP_BINOP
+
+dh_ctype_f16 VFP_HELPER(sqrt, h)(dh_ctype_f16 a, float_status *fpst)
+{
+ return float16_sqrt(a, fpst);
+}
+
+float32 VFP_HELPER(sqrt, s)(float32 a, float_status *fpst)
+{
+ return float32_sqrt(a, fpst);
+}
+
+float64 VFP_HELPER(sqrt, d)(float64 a, float_status *fpst)
+{
+ return float64_sqrt(a, fpst);
+}
+
+static void softfloat_to_vfp_compare(CPUARMState *env, FloatRelation cmp)
+{
+ uint32_t flags;
+ switch (cmp) {
+ case float_relation_equal:
+ flags = 0x6;
+ break;
+ case float_relation_less:
+ flags = 0x8;
+ break;
+ case float_relation_greater:
+ flags = 0x2;
+ break;
+ case float_relation_unordered:
+ flags = 0x3;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ env->vfp.fpsr = deposit64(env->vfp.fpsr, 28, 4, flags); /* NZCV */
+}
+
+/* XXX: check quiet/signaling case */
+#define DO_VFP_cmp(P, FLOATTYPE, ARGTYPE, FPST) \
+void VFP_HELPER(cmp, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \
+{ \
+ softfloat_to_vfp_compare(env, \
+ FLOATTYPE ## _compare_quiet(a, b, &env->vfp.fp_status[FPST])); \
+} \
+void VFP_HELPER(cmpe, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \
+{ \
+ softfloat_to_vfp_compare(env, \
+ FLOATTYPE ## _compare(a, b, &env->vfp.fp_status[FPST])); \
+}
+DO_VFP_cmp(h, float16, dh_ctype_f16, FPST_A32_F16)
+DO_VFP_cmp(s, float32, float32, FPST_A32)
+DO_VFP_cmp(d, float64, float64, FPST_A32)
+#undef DO_VFP_cmp
+
+/* Integer to float and float to integer conversions */
+
+#define CONV_ITOF(name, ftype, fsz, sign) \
+ftype HELPER(name)(uint32_t x, float_status *fpst) \
+{ \
+ return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
+}
+
+#define CONV_FTOI(name, ftype, fsz, sign, round) \
+sign##int32_t HELPER(name)(ftype x, float_status *fpst) \
+{ \
+ if (float##fsz##_is_any_nan(x)) { \
+ float_raise(float_flag_invalid, fpst); \
+ return 0; \
+ } \
+ return float##fsz##_to_##sign##int32##round(x, fpst); \
+}
+
+#define FLOAT_CONVS(name, p, ftype, fsz, sign) \
+ CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \
+ CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \
+ CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero)
+
+FLOAT_CONVS(si, h, uint32_t, 16, )
+FLOAT_CONVS(si, s, float32, 32, )
+FLOAT_CONVS(si, d, float64, 64, )
+FLOAT_CONVS(ui, h, uint32_t, 16, u)
+FLOAT_CONVS(ui, s, float32, 32, u)
+FLOAT_CONVS(ui, d, float64, 64, u)
+
+#undef CONV_ITOF
+#undef CONV_FTOI
+#undef FLOAT_CONVS
+
+/* floating point conversion */
+float64 VFP_HELPER(fcvtd, s)(float32 x, float_status *status)
+{
+ return float32_to_float64(x, status);
+}
+
+float32 VFP_HELPER(fcvts, d)(float64 x, float_status *status)
+{
+ return float64_to_float32(x, status);
+}
+
+uint32_t HELPER(bfcvt)(float32 x, float_status *status)
+{
+ return float32_to_bfloat16(x, status);
+}
+
+uint32_t HELPER(bfcvt_pair)(uint64_t pair, float_status *status)
+{
+ bfloat16 lo = float32_to_bfloat16(extract64(pair, 0, 32), status);
+ bfloat16 hi = float32_to_bfloat16(extract64(pair, 32, 32), status);
+ return deposit32(lo, 16, 16, hi);
+}
+
+/*
+ * VFP3 fixed point conversion. The AArch32 versions of fix-to-float
+ * must always round-to-nearest; the AArch64 ones honour the FPSCR
+ * rounding mode. (For AArch32 Neon the standard-FPSCR is set to
+ * round-to-nearest so either helper will work.) AArch32 float-to-fix
+ * must round-to-zero.
+ */
+#define VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \
+ftype HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
+ float_status *fpst) \
+{ return itype##_to_##float##fsz##_scalbn(x, -shift, fpst); }
+
+#define VFP_CONV_FIX_FLOAT_ROUND(name, p, fsz, ftype, isz, itype) \
+ ftype HELPER(vfp_##name##to##p##_round_to_nearest)(uint##isz##_t x, \
+ uint32_t shift, \
+ float_status *fpst) \
+ { \
+ ftype ret; \
+ FloatRoundMode oldmode = fpst->float_rounding_mode; \
+ fpst->float_rounding_mode = float_round_nearest_even; \
+ ret = itype##_to_##float##fsz##_scalbn(x, -shift, fpst); \
+ fpst->float_rounding_mode = oldmode; \
+ return ret; \
+ }
+
+#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, ROUND, suff) \
+uint##isz##_t HELPER(vfp_to##name##p##suff)(ftype x, uint32_t shift, \
+ float_status *fpst) \
+{ \
+ if (unlikely(float##fsz##_is_any_nan(x))) { \
+ float_raise(float_flag_invalid, fpst); \
+ return 0; \
+ } \
+ return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \
+}
+
+#define VFP_CONV_FIX(name, p, fsz, ftype, isz, itype) \
+VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \
+VFP_CONV_FIX_FLOAT_ROUND(name, p, fsz, ftype, isz, itype) \
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \
+ float_round_to_zero, _round_to_zero) \
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \
+ get_float_rounding_mode(fpst), )
+
+#define VFP_CONV_FIX_A64(name, p, fsz, ftype, isz, itype) \
+VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \
+ get_float_rounding_mode(fpst), )
+
+VFP_CONV_FIX(sh, d, 64, float64, 64, int16)
+VFP_CONV_FIX(sl, d, 64, float64, 64, int32)
+VFP_CONV_FIX_A64(sq, d, 64, float64, 64, int64)
+VFP_CONV_FIX(uh, d, 64, float64, 64, uint16)
+VFP_CONV_FIX(ul, d, 64, float64, 64, uint32)
+VFP_CONV_FIX_A64(uq, d, 64, float64, 64, uint64)
+VFP_CONV_FIX(sh, s, 32, float32, 32, int16)
+VFP_CONV_FIX(sl, s, 32, float32, 32, int32)
+VFP_CONV_FIX_A64(sq, s, 32, float32, 64, int64)
+VFP_CONV_FIX(uh, s, 32, float32, 32, uint16)
+VFP_CONV_FIX(ul, s, 32, float32, 32, uint32)
+VFP_CONV_FIX_A64(uq, s, 32, float32, 64, uint64)
+VFP_CONV_FIX(sh, h, 16, dh_ctype_f16, 32, int16)
+VFP_CONV_FIX(sl, h, 16, dh_ctype_f16, 32, int32)
+VFP_CONV_FIX_A64(sq, h, 16, dh_ctype_f16, 64, int64)
+VFP_CONV_FIX(uh, h, 16, dh_ctype_f16, 32, uint16)
+VFP_CONV_FIX(ul, h, 16, dh_ctype_f16, 32, uint32)
+VFP_CONV_FIX_A64(uq, h, 16, dh_ctype_f16, 64, uint64)
+VFP_CONV_FLOAT_FIX_ROUND(sq, d, 64, float64, 64, int64,
+ float_round_to_zero, _round_to_zero)
+VFP_CONV_FLOAT_FIX_ROUND(uq, d, 64, float64, 64, uint64,
+ float_round_to_zero, _round_to_zero)
+
+#undef VFP_CONV_FIX
+#undef VFP_CONV_FIX_FLOAT
+#undef VFP_CONV_FLOAT_FIX_ROUND
+#undef VFP_CONV_FIX_A64
+
+/* Set the current fp rounding mode and return the old one.
+ * The argument is a softfloat float_round_ value.
+ */
+uint32_t HELPER(set_rmode)(uint32_t rmode, float_status *fp_status)
+{
+ uint32_t prev_rmode = get_float_rounding_mode(fp_status);
+ set_float_rounding_mode(rmode, fp_status);
+
+ return prev_rmode;
+}
+
+/* Half precision conversions. */
+float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, float_status *fpst,
+ uint32_t ahp_mode)
+{
+ /* Squash FZ16 to 0 for the duration of conversion. In this case,
+ * it would affect flushing input denormals.
+ */
+ bool save = get_flush_inputs_to_zero(fpst);
+ set_flush_inputs_to_zero(false, fpst);
+ float32 r = float16_to_float32(a, !ahp_mode, fpst);
+ set_flush_inputs_to_zero(save, fpst);
+ return r;
+}
+
+uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, float_status *fpst,
+ uint32_t ahp_mode)
+{
+ /* Squash FZ16 to 0 for the duration of conversion. In this case,
+ * it would affect flushing output denormals.
+ */
+ bool save = get_flush_to_zero(fpst);
+ set_flush_to_zero(false, fpst);
+ float16 r = float32_to_float16(a, !ahp_mode, fpst);
+ set_flush_to_zero(save, fpst);
+ return r;
+}
+
+float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, float_status *fpst,
+ uint32_t ahp_mode)
+{
+ /* Squash FZ16 to 0 for the duration of conversion. In this case,
+ * it would affect flushing input denormals.
+ */
+ bool save = get_flush_inputs_to_zero(fpst);
+ set_flush_inputs_to_zero(false, fpst);
+ float64 r = float16_to_float64(a, !ahp_mode, fpst);
+ set_flush_inputs_to_zero(save, fpst);
+ return r;
+}
+
+uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, float_status *fpst,
+ uint32_t ahp_mode)
+{
+ /* Squash FZ16 to 0 for the duration of conversion. In this case,
+ * it would affect flushing output denormals.
+ */
+ bool save = get_flush_to_zero(fpst);
+ set_flush_to_zero(false, fpst);
+ float16 r = float64_to_float16(a, !ahp_mode, fpst);
+ set_flush_to_zero(save, fpst);
+ return r;
+}
+
+/* NEON helpers. */
+
+/* Constants 256 and 512 are used in some helpers; we avoid relying on
+ * int->float conversions at run-time. */
+#define float64_256 make_float64(0x4070000000000000LL)
+#define float64_512 make_float64(0x4080000000000000LL)
+#define float16_maxnorm make_float16(0x7bff)
+#define float32_maxnorm make_float32(0x7f7fffff)
+#define float64_maxnorm make_float64(0x7fefffffffffffffLL)
+
+/* Reciprocal functions
+ *
+ * The algorithm that must be used to calculate the estimate
+ * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate
+ */
+
+/* See RecipEstimate()
+ *
+ * input is a 9 bit fixed point number
+ * input range 256 .. 511 for a number from 0.5 <= x < 1.0.
+ * result range 256 .. 511 for a number from 1.0 to 511/256.
+ */
+
+static int recip_estimate(int input)
+{
+ int a, b, r;
+ assert(256 <= input && input < 512);
+ a = (input * 2) + 1;
+ b = (1 << 19) / a;
+ r = (b + 1) >> 1;
+ assert(256 <= r && r < 512);
+ return r;
+}
+
+/*
+ * Increased precision version:
+ * input is a 13 bit fixed point number
+ * input range 2048 .. 4095 for a number from 0.5 <= x < 1.0.
+ * result range 4096 .. 8191 for a number from 1.0 to 2.0
+ */
+static int recip_estimate_incprec(int input)
+{
+ int a, b, r;
+ assert(2048 <= input && input < 4096);
+ a = (input * 2) + 1;
+ /*
+ * The pseudocode expresses this as an operation on infinite
+ * precision reals where it calculates 2^25 / a and then looks
+ * at the error between that and the rounded-down-to-integer
+ * value to see if it should instead round up. We instead
+ * follow the same approach as the pseudocode for the 8-bit
+ * precision version, and calculate (2 * (2^25 / a)) as an
+ * integer so we can do the "add one and halve" to round it.
+ * So the 1 << 26 here is correct.
+ */
+ b = (1 << 26) / a;
+ r = (b + 1) >> 1;
+ assert(4096 <= r && r < 8192);
+ return r;
+}
+
+/*
+ * Common wrapper to call recip_estimate
+ *
+ * The parameters are exponent and 64 bit fraction (without implicit
+ * bit) where the binary point is nominally at bit 52. Returns a
+ * float64 which can then be rounded to the appropriate size by the
+ * callee.
+ */
+
+static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac,
+ bool increasedprecision)
+{
+ uint32_t scaled, estimate;
+ uint64_t result_frac;
+ int result_exp;
+
+ /* Handle sub-normals */
+ if (*exp == 0) {
+ if (extract64(frac, 51, 1) == 0) {
+ *exp = -1;
+ frac <<= 2;
+ } else {
+ frac <<= 1;
+ }
+ }
+
+ if (increasedprecision) {
+ /* scaled = UInt('1':fraction<51:41>) */
+ scaled = deposit32(1 << 11, 0, 11, extract64(frac, 41, 11));
+ estimate = recip_estimate_incprec(scaled);
+ } else {
+ /* scaled = UInt('1':fraction<51:44>) */
+ scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8));
+ estimate = recip_estimate(scaled);
+ }
+
+ result_exp = exp_off - *exp;
+ if (increasedprecision) {
+ result_frac = deposit64(0, 40, 12, estimate);
+ } else {
+ result_frac = deposit64(0, 44, 8, estimate);
+ }
+ if (result_exp == 0) {
+ result_frac = deposit64(result_frac >> 1, 51, 1, 1);
+ } else if (result_exp == -1) {
+ result_frac = deposit64(result_frac >> 2, 50, 2, 1);
+ result_exp = 0;
+ }
+
+ *exp = result_exp;
+
+ return result_frac;
+}
+
+static bool round_to_inf(float_status *fpst, bool sign_bit)
+{
+ switch (fpst->float_rounding_mode) {
+ case float_round_nearest_even: /* Round to Nearest */
+ return true;
+ case float_round_up: /* Round to +Inf */
+ return !sign_bit;
+ case float_round_down: /* Round to -Inf */
+ return sign_bit;
+ case float_round_to_zero: /* Round to Zero */
+ return false;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+uint32_t HELPER(recpe_f16)(uint32_t input, float_status *fpst)
+{
+ float16 f16 = float16_squash_input_denormal(input, fpst);
+ uint32_t f16_val = float16_val(f16);
+ uint32_t f16_sign = float16_is_neg(f16);
+ int f16_exp = extract32(f16_val, 10, 5);
+ uint32_t f16_frac = extract32(f16_val, 0, 10);
+ uint64_t f64_frac;
+
+ if (float16_is_any_nan(f16)) {
+ float16 nan = f16;
+ if (float16_is_signaling_nan(f16, fpst)) {
+ float_raise(float_flag_invalid, fpst);
+ if (!fpst->default_nan_mode) {
+ nan = float16_silence_nan(f16, fpst);
+ }
+ }
+ if (fpst->default_nan_mode) {
+ nan = float16_default_nan(fpst);
+ }
+ return nan;
+ } else if (float16_is_infinity(f16)) {
+ return float16_set_sign(float16_zero, float16_is_neg(f16));
+ } else if (float16_is_zero(f16)) {
+ float_raise(float_flag_divbyzero, fpst);
+ return float16_set_sign(float16_infinity, float16_is_neg(f16));
+ } else if (float16_abs(f16) < (1 << 8)) {
+ /* Abs(value) < 2.0^-16 */
+ float_raise(float_flag_overflow | float_flag_inexact, fpst);
+ if (round_to_inf(fpst, f16_sign)) {
+ return float16_set_sign(float16_infinity, f16_sign);
+ } else {
+ return float16_set_sign(float16_maxnorm, f16_sign);
+ }
+ } else if (f16_exp >= 29 && fpst->flush_to_zero) {
+ float_raise(float_flag_underflow, fpst);
+ return float16_set_sign(float16_zero, float16_is_neg(f16));
+ }
+
+ f64_frac = call_recip_estimate(&f16_exp, 29,
+ ((uint64_t) f16_frac) << (52 - 10), false);
+
+ /* result = sign : result_exp<4:0> : fraction<51:42> */
+ f16_val = deposit32(0, 15, 1, f16_sign);
+ f16_val = deposit32(f16_val, 10, 5, f16_exp);
+ f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10));
+ return make_float16(f16_val);
+}
+
+/*
+ * FEAT_RPRES means the f32 FRECPE has an "increased precision" variant
+ * which is used when FPCR.AH == 1.
+ */
+static float32 do_recpe_f32(float32 input, float_status *fpst, bool rpres)
+{
+ float32 f32 = float32_squash_input_denormal(input, fpst);
+ uint32_t f32_val = float32_val(f32);
+ bool f32_sign = float32_is_neg(f32);
+ int f32_exp = extract32(f32_val, 23, 8);
+ uint32_t f32_frac = extract32(f32_val, 0, 23);
+ uint64_t f64_frac;
+
+ if (float32_is_any_nan(f32)) {
+ float32 nan = f32;
+ if (float32_is_signaling_nan(f32, fpst)) {
+ float_raise(float_flag_invalid, fpst);
+ if (!fpst->default_nan_mode) {
+ nan = float32_silence_nan(f32, fpst);
+ }
+ }
+ if (fpst->default_nan_mode) {
+ nan = float32_default_nan(fpst);
+ }
+ return nan;
+ } else if (float32_is_infinity(f32)) {
+ return float32_set_sign(float32_zero, float32_is_neg(f32));
+ } else if (float32_is_zero(f32)) {
+ float_raise(float_flag_divbyzero, fpst);
+ return float32_set_sign(float32_infinity, float32_is_neg(f32));
+ } else if (float32_abs(f32) < (1ULL << 21)) {
+ /* Abs(value) < 2.0^-128 */
+ float_raise(float_flag_overflow | float_flag_inexact, fpst);
+ if (round_to_inf(fpst, f32_sign)) {
+ return float32_set_sign(float32_infinity, f32_sign);
+ } else {
+ return float32_set_sign(float32_maxnorm, f32_sign);
+ }
+ } else if (f32_exp >= 253 && fpst->flush_to_zero) {
+ float_raise(float_flag_underflow, fpst);
+ return float32_set_sign(float32_zero, float32_is_neg(f32));
+ }
+
+ f64_frac = call_recip_estimate(&f32_exp, 253,
+ ((uint64_t) f32_frac) << (52 - 23), rpres);
+
+ /* result = sign : result_exp<7:0> : fraction<51:29> */
+ f32_val = deposit32(0, 31, 1, f32_sign);
+ f32_val = deposit32(f32_val, 23, 8, f32_exp);
+ f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23));
+ return make_float32(f32_val);
+}
+
+float32 HELPER(recpe_f32)(float32 input, float_status *fpst)
+{
+ return do_recpe_f32(input, fpst, false);
+}
+
+float32 HELPER(recpe_rpres_f32)(float32 input, float_status *fpst)
+{
+ return do_recpe_f32(input, fpst, true);
+}
+
+float64 HELPER(recpe_f64)(float64 input, float_status *fpst)
+{
+ float64 f64 = float64_squash_input_denormal(input, fpst);
+ uint64_t f64_val = float64_val(f64);
+ bool f64_sign = float64_is_neg(f64);
+ int f64_exp = extract64(f64_val, 52, 11);
+ uint64_t f64_frac = extract64(f64_val, 0, 52);
+
+ /* Deal with any special cases */
+ if (float64_is_any_nan(f64)) {
+ float64 nan = f64;
+ if (float64_is_signaling_nan(f64, fpst)) {
+ float_raise(float_flag_invalid, fpst);
+ if (!fpst->default_nan_mode) {
+ nan = float64_silence_nan(f64, fpst);
+ }
+ }
+ if (fpst->default_nan_mode) {
+ nan = float64_default_nan(fpst);
+ }
+ return nan;
+ } else if (float64_is_infinity(f64)) {
+ return float64_set_sign(float64_zero, float64_is_neg(f64));
+ } else if (float64_is_zero(f64)) {
+ float_raise(float_flag_divbyzero, fpst);
+ return float64_set_sign(float64_infinity, float64_is_neg(f64));
+ } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
+ /* Abs(value) < 2.0^-1024 */
+ float_raise(float_flag_overflow | float_flag_inexact, fpst);
+ if (round_to_inf(fpst, f64_sign)) {
+ return float64_set_sign(float64_infinity, f64_sign);
+ } else {
+ return float64_set_sign(float64_maxnorm, f64_sign);
+ }
+ } else if (f64_exp >= 2045 && fpst->flush_to_zero) {
+ float_raise(float_flag_underflow, fpst);
+ return float64_set_sign(float64_zero, float64_is_neg(f64));
+ }
+
+ f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac, false);
+
+ /* result = sign : result_exp<10:0> : fraction<51:0>; */
+ f64_val = deposit64(0, 63, 1, f64_sign);
+ f64_val = deposit64(f64_val, 52, 11, f64_exp);
+ f64_val = deposit64(f64_val, 0, 52, f64_frac);
+ return make_float64(f64_val);
+}
+
+/* The algorithm that must be used to calculate the estimate
+ * is specified by the ARM ARM.
+ */
+
+static int do_recip_sqrt_estimate(int a)
+{
+ int b, estimate;
+
+ assert(128 <= a && a < 512);
+ if (a < 256) {
+ a = a * 2 + 1;
+ } else {
+ a = (a >> 1) << 1;
+ a = (a + 1) * 2;
+ }
+ b = 512;
+ while (a * (b + 1) * (b + 1) < (1 << 28)) {
+ b += 1;
+ }
+ estimate = (b + 1) / 2;
+ assert(256 <= estimate && estimate < 512);
+
+ return estimate;
+}
+
+static int do_recip_sqrt_estimate_incprec(int a)
+{
+ /*
+ * The Arm ARM describes the 12-bit precision version of RecipSqrtEstimate
+ * in terms of an infinite-precision floating point calculation of a
+ * square root. We implement this using the same kind of pure integer
+ * algorithm as the 8-bit mantissa, to get the same bit-for-bit result.
+ */
+ int64_t b, estimate;
+
+ assert(1024 <= a && a < 4096);
+ if (a < 2048) {
+ a = a * 2 + 1;
+ } else {
+ a = (a >> 1) << 1;
+ a = (a + 1) * 2;
+ }
+ b = 8192;
+ while (a * (b + 1) * (b + 1) < (1ULL << 39)) {
+ b += 1;
+ }
+ estimate = (b + 1) / 2;
+
+ assert(4096 <= estimate && estimate < 8192);
+
+ return estimate;
+}
+
+static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac,
+ bool increasedprecision)
+{
+ int estimate;
+ uint32_t scaled;
+
+ if (*exp == 0) {
+ while (extract64(frac, 51, 1) == 0) {
+ frac = frac << 1;
+ *exp -= 1;
+ }
+ frac = extract64(frac, 0, 51) << 1;
+ }
+
+ if (increasedprecision) {
+ if (*exp & 1) {
+ /* scaled = UInt('01':fraction<51:42>) */
+ scaled = deposit32(1 << 10, 0, 10, extract64(frac, 42, 10));
+ } else {
+ /* scaled = UInt('1':fraction<51:41>) */
+ scaled = deposit32(1 << 11, 0, 11, extract64(frac, 41, 11));
+ }
+ estimate = do_recip_sqrt_estimate_incprec(scaled);
+ } else {
+ if (*exp & 1) {
+ /* scaled = UInt('01':fraction<51:45>) */
+ scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7));
+ } else {
+ /* scaled = UInt('1':fraction<51:44>) */
+ scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8));
+ }
+ estimate = do_recip_sqrt_estimate(scaled);
+ }
+
+ *exp = (exp_off - *exp) / 2;
+ if (increasedprecision) {
+ return extract64(estimate, 0, 12) << 40;
+ } else {
+ return extract64(estimate, 0, 8) << 44;
+ }
+}
+
+uint32_t HELPER(rsqrte_f16)(uint32_t input, float_status *s)
+{
+ float16 f16 = float16_squash_input_denormal(input, s);
+ uint16_t val = float16_val(f16);
+ bool f16_sign = float16_is_neg(f16);
+ int f16_exp = extract32(val, 10, 5);
+ uint16_t f16_frac = extract32(val, 0, 10);
+ uint64_t f64_frac;
+
+ if (float16_is_any_nan(f16)) {
+ float16 nan = f16;
+ if (float16_is_signaling_nan(f16, s)) {
+ float_raise(float_flag_invalid, s);
+ if (!s->default_nan_mode) {
+ nan = float16_silence_nan(f16, s);
+ }
+ }
+ if (s->default_nan_mode) {
+ nan = float16_default_nan(s);
+ }
+ return nan;
+ } else if (float16_is_zero(f16)) {
+ float_raise(float_flag_divbyzero, s);
+ return float16_set_sign(float16_infinity, f16_sign);
+ } else if (f16_sign) {
+ float_raise(float_flag_invalid, s);
+ return float16_default_nan(s);
+ } else if (float16_is_infinity(f16)) {
+ return float16_zero;
+ }
+
+ /* Scale and normalize to a double-precision value between 0.25 and 1.0,
+ * preserving the parity of the exponent. */
+
+ f64_frac = ((uint64_t) f16_frac) << (52 - 10);
+
+ f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac, false);
+
+ /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */
+ val = deposit32(0, 15, 1, f16_sign);
+ val = deposit32(val, 10, 5, f16_exp);
+ val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8));
+ return make_float16(val);
+}
+
+/*
+ * FEAT_RPRES means the f32 FRSQRTE has an "increased precision" variant
+ * which is used when FPCR.AH == 1.
+ */
+static float32 do_rsqrte_f32(float32 input, float_status *s, bool rpres)
+{
+ float32 f32 = float32_squash_input_denormal(input, s);
+ uint32_t val = float32_val(f32);
+ uint32_t f32_sign = float32_is_neg(f32);
+ int f32_exp = extract32(val, 23, 8);
+ uint32_t f32_frac = extract32(val, 0, 23);
+ uint64_t f64_frac;
+
+ if (float32_is_any_nan(f32)) {
+ float32 nan = f32;
+ if (float32_is_signaling_nan(f32, s)) {
+ float_raise(float_flag_invalid, s);
+ if (!s->default_nan_mode) {
+ nan = float32_silence_nan(f32, s);
+ }
+ }
+ if (s->default_nan_mode) {
+ nan = float32_default_nan(s);
+ }
+ return nan;
+ } else if (float32_is_zero(f32)) {
+ float_raise(float_flag_divbyzero, s);
+ return float32_set_sign(float32_infinity, float32_is_neg(f32));
+ } else if (float32_is_neg(f32)) {
+ float_raise(float_flag_invalid, s);
+ return float32_default_nan(s);
+ } else if (float32_is_infinity(f32)) {
+ return float32_zero;
+ }
+
+ /* Scale and normalize to a double-precision value between 0.25 and 1.0,
+ * preserving the parity of the exponent. */
+
+ f64_frac = ((uint64_t) f32_frac) << 29;
+
+ f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac, rpres);
+
+ /*
+ * result = sign : result_exp<7:0> : estimate<7:0> : Zeros(15)
+ * or for increased precision
+ * result = sign : result_exp<7:0> : estimate<11:0> : Zeros(11)
+ */
+ val = deposit32(0, 31, 1, f32_sign);
+ val = deposit32(val, 23, 8, f32_exp);
+ if (rpres) {
+ val = deposit32(val, 11, 12, extract64(f64_frac, 52 - 12, 12));
+ } else {
+ val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8));
+ }
+ return make_float32(val);
+}
+
+float32 HELPER(rsqrte_f32)(float32 input, float_status *s)
+{
+ return do_rsqrte_f32(input, s, false);
+}
+
+float32 HELPER(rsqrte_rpres_f32)(float32 input, float_status *s)
+{
+ return do_rsqrte_f32(input, s, true);
+}
+
+float64 HELPER(rsqrte_f64)(float64 input, float_status *s)
+{
+ float64 f64 = float64_squash_input_denormal(input, s);
+ uint64_t val = float64_val(f64);
+ bool f64_sign = float64_is_neg(f64);
+ int f64_exp = extract64(val, 52, 11);
+ uint64_t f64_frac = extract64(val, 0, 52);
+
+ if (float64_is_any_nan(f64)) {
+ float64 nan = f64;
+ if (float64_is_signaling_nan(f64, s)) {
+ float_raise(float_flag_invalid, s);
+ if (!s->default_nan_mode) {
+ nan = float64_silence_nan(f64, s);
+ }
+ }
+ if (s->default_nan_mode) {
+ nan = float64_default_nan(s);
+ }
+ return nan;
+ } else if (float64_is_zero(f64)) {
+ float_raise(float_flag_divbyzero, s);
+ return float64_set_sign(float64_infinity, float64_is_neg(f64));
+ } else if (float64_is_neg(f64)) {
+ float_raise(float_flag_invalid, s);
+ return float64_default_nan(s);
+ } else if (float64_is_infinity(f64)) {
+ return float64_zero;
+ }
+
+ f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac, false);
+
+ /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */
+ val = deposit64(0, 61, 1, f64_sign);
+ val = deposit64(val, 52, 11, f64_exp);
+ val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8));
+ return make_float64(val);
+}
+
+uint32_t HELPER(recpe_u32)(uint32_t a)
+{
+ int input, estimate;
+
+ if ((a & 0x80000000) == 0) {
+ return 0xffffffff;
+ }
+
+ input = extract32(a, 23, 9);
+ estimate = recip_estimate(input);
+
+ return deposit32(0, (32 - 9), 9, estimate);
+}
+
+uint32_t HELPER(rsqrte_u32)(uint32_t a)
+{
+ int estimate;
+
+ if ((a & 0xc0000000) == 0) {
+ return 0xffffffff;
+ }
+
+ estimate = do_recip_sqrt_estimate(extract32(a, 23, 9));
+
+ return deposit32(0, 23, 9, estimate);
+}
+
+/* VFPv4 fused multiply-accumulate */
+dh_ctype_f16 VFP_HELPER(muladd, h)(dh_ctype_f16 a, dh_ctype_f16 b,
+ dh_ctype_f16 c, float_status *fpst)
+{
+ return float16_muladd(a, b, c, 0, fpst);
+}
+
+float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c,
+ float_status *fpst)
+{
+ return float32_muladd(a, b, c, 0, fpst);
+}
+
+float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c,
+ float_status *fpst)
+{
+ return float64_muladd(a, b, c, 0, fpst);
+}
+
+/* ARMv8 round to integral */
+dh_ctype_f16 HELPER(rinth_exact)(dh_ctype_f16 x, float_status *fp_status)
+{
+ return float16_round_to_int(x, fp_status);
+}
+
+float32 HELPER(rints_exact)(float32 x, float_status *fp_status)
+{
+ return float32_round_to_int(x, fp_status);
+}
+
+float64 HELPER(rintd_exact)(float64 x, float_status *fp_status)
+{
+ return float64_round_to_int(x, fp_status);
+}
+
+dh_ctype_f16 HELPER(rinth)(dh_ctype_f16 x, float_status *fp_status)
+{
+ int old_flags = get_float_exception_flags(fp_status), new_flags;
+ float16 ret;
+
+ ret = float16_round_to_int(x, fp_status);
+
+ /* Suppress any inexact exceptions the conversion produced */
+ if (!(old_flags & float_flag_inexact)) {
+ new_flags = get_float_exception_flags(fp_status);
+ set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
+ }
+
+ return ret;
+}
+
+float32 HELPER(rints)(float32 x, float_status *fp_status)
+{
+ int old_flags = get_float_exception_flags(fp_status), new_flags;
+ float32 ret;
+
+ ret = float32_round_to_int(x, fp_status);
+
+ /* Suppress any inexact exceptions the conversion produced */
+ if (!(old_flags & float_flag_inexact)) {
+ new_flags = get_float_exception_flags(fp_status);
+ set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
+ }
+
+ return ret;
+}
+
+float64 HELPER(rintd)(float64 x, float_status *fp_status)
+{
+ int old_flags = get_float_exception_flags(fp_status), new_flags;
+ float64 ret;
+
+ ret = float64_round_to_int(x, fp_status);
+
+ /* Suppress any inexact exceptions the conversion produced */
+ if (!(old_flags & float_flag_inexact)) {
+ new_flags = get_float_exception_flags(fp_status);
+ set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
+ }
+
+ return ret;
+}
+
+/* Convert ARM rounding mode to softfloat */
+const FloatRoundMode arm_rmode_to_sf_map[] = {
+ [FPROUNDING_TIEEVEN] = float_round_nearest_even,
+ [FPROUNDING_POSINF] = float_round_up,
+ [FPROUNDING_NEGINF] = float_round_down,
+ [FPROUNDING_ZERO] = float_round_to_zero,
+ [FPROUNDING_TIEAWAY] = float_round_ties_away,
+ [FPROUNDING_ODD] = float_round_to_odd,
+};
+
+/*
+ * Implement float64 to int32_t conversion without saturation;
+ * the result is supplied modulo 2^32.
+ */
+uint64_t HELPER(fjcvtzs)(float64 value, float_status *status)
+{
+ uint32_t frac, e_old, e_new;
+ bool inexact;
+
+ e_old = get_float_exception_flags(status);
+ set_float_exception_flags(0, status);
+ frac = float64_to_int32_modulo(value, float_round_to_zero, status);
+ e_new = get_float_exception_flags(status);
+ set_float_exception_flags(e_old | e_new, status);
+
+ /* Normal inexact, denormal with flush-to-zero, or overflow or NaN */
+ inexact = e_new & (float_flag_inexact |
+ float_flag_input_denormal_flushed |
+ float_flag_invalid);
+
+ /* While not inexact for IEEE FP, -0.0 is inexact for JavaScript. */
+ inexact |= value == float64_chs(float64_zero);
+
+ /* Pack the result and the env->ZF representation of Z together. */
+ return deposit64(frac, 32, 32, inexact);
+}
+
+uint32_t HELPER(vjcvt)(float64 value, CPUARMState *env)
+{
+ uint64_t pair = HELPER(fjcvtzs)(value, &env->vfp.fp_status[FPST_A32]);
+ uint32_t result = pair;
+ uint32_t z = (pair >> 32) == 0;
+
+ /* Store Z, clear NCV, in FPSCR.NZCV. */
+ env->vfp.fpsr = (env->vfp.fpsr & ~FPSR_NZCV_MASK) | (z * FPSR_Z);
+
+ return result;
+}
+
+/* Round a float32 to an integer that fits in int32_t or int64_t. */
+static float32 frint_s(float32 f, float_status *fpst, int intsize)
+{
+ int old_flags = get_float_exception_flags(fpst);
+ uint32_t exp = extract32(f, 23, 8);
+
+ if (unlikely(exp == 0xff)) {
+ /* NaN or Inf. */
+ goto overflow;
+ }
+
+ /* Round and re-extract the exponent. */
+ f = float32_round_to_int(f, fpst);
+ exp = extract32(f, 23, 8);
+
+ /* Validate the range of the result. */
+ if (exp < 126 + intsize) {
+ /* abs(F) <= INT{N}_MAX */
+ return f;
+ }
+ if (exp == 126 + intsize) {
+ uint32_t sign = extract32(f, 31, 1);
+ uint32_t frac = extract32(f, 0, 23);
+ if (sign && frac == 0) {
+ /* F == INT{N}_MIN */
+ return f;
+ }
+ }
+
+ overflow:
+ /*
+ * Raise Invalid and return INT{N}_MIN as a float. Revert any
+ * inexact exception float32_round_to_int may have raised.
+ */
+ set_float_exception_flags(old_flags | float_flag_invalid, fpst);
+ return (0x100u + 126u + intsize) << 23;
+}
+
+float32 HELPER(frint32_s)(float32 f, float_status *fpst)
+{
+ return frint_s(f, fpst, 32);
+}
+
+float32 HELPER(frint64_s)(float32 f, float_status *fpst)
+{
+ return frint_s(f, fpst, 64);
+}
+
+/* Round a float64 to an integer that fits in int32_t or int64_t. */
+static float64 frint_d(float64 f, float_status *fpst, int intsize)
+{
+ int old_flags = get_float_exception_flags(fpst);
+ uint32_t exp = extract64(f, 52, 11);
+
+ if (unlikely(exp == 0x7ff)) {
+ /* NaN or Inf. */
+ goto overflow;
+ }
+
+ /* Round and re-extract the exponent. */
+ f = float64_round_to_int(f, fpst);
+ exp = extract64(f, 52, 11);
+
+ /* Validate the range of the result. */
+ if (exp < 1022 + intsize) {
+ /* abs(F) <= INT{N}_MAX */
+ return f;
+ }
+ if (exp == 1022 + intsize) {
+ uint64_t sign = extract64(f, 63, 1);
+ uint64_t frac = extract64(f, 0, 52);
+ if (sign && frac == 0) {
+ /* F == INT{N}_MIN */
+ return f;
+ }
+ }
+
+ overflow:
+ /*
+ * Raise Invalid and return INT{N}_MIN as a float. Revert any
+ * inexact exception float64_round_to_int may have raised.
+ */
+ set_float_exception_flags(old_flags | float_flag_invalid, fpst);
+ return (uint64_t)(0x800 + 1022 + intsize) << 52;
+}
+
+float64 HELPER(frint32_d)(float64 f, float_status *fpst)
+{
+ return frint_d(f, fpst, 32);
+}
+
+float64 HELPER(frint64_d)(float64 f, float_status *fpst)
+{
+ return frint_d(f, fpst, 64);
+}
+
+void HELPER(check_hcr_el2_trap)(CPUARMState *env, uint32_t rt, uint32_t reg)
+{
+ uint32_t syndrome;
+
+ switch (reg) {
+ case ARM_VFP_MVFR0:
+ case ARM_VFP_MVFR1:
+ case ARM_VFP_MVFR2:
+ if (!(arm_hcr_el2_eff(env) & HCR_TID3)) {
+ return;
+ }
+ break;
+ case ARM_VFP_FPSID:
+ if (!(arm_hcr_el2_eff(env) & HCR_TID0)) {
+ return;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ syndrome = ((EC_FPIDTRAP << ARM_EL_EC_SHIFT)
+ | ARM_EL_IL
+ | (1 << 24) | (0xe << 20) | (7 << 14)
+ | (reg << 10) | (rt << 5) | 1);
+
+ raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
+}
+
+uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
+{
+ return vfp_get_fpscr(env);
+}
+
+void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
+{
+ vfp_set_fpscr(env, val);
+}
diff --git a/target/arm/vfp_fpscr.c b/target/arm/vfp_fpscr.c
new file mode 100644
index 0000000..92ea60e
--- /dev/null
+++ b/target/arm/vfp_fpscr.c
@@ -0,0 +1,155 @@
+/*
+ * ARM VFP floating-point: handling of FPSCR/FPCR/FPSR
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internals.h"
+#include "cpu-features.h"
+
+uint32_t vfp_get_fpcr(CPUARMState *env)
+{
+ uint32_t fpcr = env->vfp.fpcr
+ | (env->vfp.vec_len << 16)
+ | (env->vfp.vec_stride << 20);
+
+ /*
+ * M-profile LTPSIZE is the same bits [18:16] as A-profile Len; whichever
+ * of the two is not applicable to this CPU will always be zero.
+ */
+ fpcr |= env->v7m.ltpsize << 16;
+
+ return fpcr;
+}
+
+uint32_t vfp_get_fpsr(CPUARMState *env)
+{
+ uint32_t fpsr = env->vfp.fpsr;
+ uint32_t i;
+
+ fpsr |= vfp_get_fpsr_from_host(env);
+
+ i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3];
+ fpsr |= i ? FPSR_QC : 0;
+ return fpsr;
+}
+
+uint32_t vfp_get_fpscr(CPUARMState *env)
+{
+ return (vfp_get_fpcr(env) & FPSCR_FPCR_MASK) |
+ (vfp_get_fpsr(env) & FPSCR_FPSR_MASK);
+}
+
+void vfp_set_fpsr(CPUARMState *env, uint32_t val)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (arm_feature(env, ARM_FEATURE_NEON) ||
+ cpu_isar_feature(aa32_mve, cpu)) {
+ /*
+ * The bit we set within vfp.qc[] is arbitrary; the array as a
+ * whole being zero/non-zero is what counts.
+ */
+ env->vfp.qc[0] = val & FPSR_QC;
+ env->vfp.qc[1] = 0;
+ env->vfp.qc[2] = 0;
+ env->vfp.qc[3] = 0;
+ }
+
+ /*
+ * NZCV lives only in env->vfp.fpsr. The cumulative exception flags
+ * IOC|DZC|OFC|UFC|IXC|IDC also live in env->vfp.fpsr, with possible
+ * extra pending exception information that hasn't yet been folded in
+ * living in the float_status values (for TCG).
+ * Since this FPSR write gives us the up to date values of the exception
+ * flags, we want to store into vfp.fpsr the NZCV and CEXC bits, zeroing
+ * anything else. We also need to clear out the float_status exception
+ * information so that the next vfp_get_fpsr does not fold in stale data.
+ */
+ val &= FPSR_NZCV_MASK | FPSR_CEXC_MASK;
+ env->vfp.fpsr = val;
+ vfp_clear_float_status_exc_flags(env);
+}
+
+static void vfp_set_fpcr_masked(CPUARMState *env, uint32_t val, uint32_t mask)
+{
+ /*
+ * We only set FPCR bits defined by mask, and leave the others alone.
+ * We assume the mask is sensible (e.g. doesn't try to set only
+ * part of a field)
+ */
+ ARMCPU *cpu = env_archcpu(env);
+
+ /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
+ if (!cpu_isar_feature(any_fp16, cpu)) {
+ val &= ~FPCR_FZ16;
+ }
+ if (!cpu_isar_feature(aa64_afp, cpu)) {
+ val &= ~(FPCR_FIZ | FPCR_AH | FPCR_NEP);
+ }
+
+ if (!cpu_isar_feature(aa64_ebf16, cpu)) {
+ val &= ~FPCR_EBF;
+ }
+
+ vfp_set_fpcr_to_host(env, val, mask);
+
+ if (mask & (FPCR_LEN_MASK | FPCR_STRIDE_MASK)) {
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ /*
+ * Short-vector length and stride; on M-profile these bits
+ * are used for different purposes.
+ * We can't make this conditional be "if MVFR0.FPShVec != 0",
+ * because in v7A no-short-vector-support cores still had to
+ * allow Stride/Len to be written with the only effect that
+ * some insns are required to UNDEF if the guest sets them.
+ */
+ env->vfp.vec_len = extract32(val, 16, 3);
+ env->vfp.vec_stride = extract32(val, 20, 2);
+ } else if (cpu_isar_feature(aa32_mve, cpu)) {
+ env->v7m.ltpsize = extract32(val, FPCR_LTPSIZE_SHIFT,
+ FPCR_LTPSIZE_LENGTH);
+ }
+ }
+
+ /*
+ * We don't implement trapped exception handling, so the
+ * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!)
+ *
+ * The FPCR bits we keep in vfp.fpcr are AHP, DN, FZ, RMode, EBF, FZ16,
+ * FIZ, AH, and NEP.
+ * Len, Stride and LTPSIZE we just handled. Store those bits
+ * there, and zero any of the other FPCR bits and the RES0 and RAZ/WI
+ * bits.
+ */
+ val &= FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK | FPCR_FZ16 |
+ FPCR_EBF | FPCR_FIZ | FPCR_AH | FPCR_NEP;
+ env->vfp.fpcr &= ~mask;
+ env->vfp.fpcr |= val;
+}
+
+void vfp_set_fpcr(CPUARMState *env, uint32_t val)
+{
+ vfp_set_fpcr_masked(env, val, MAKE_64BIT_MASK(0, 32));
+}
+
+void vfp_set_fpscr(CPUARMState *env, uint32_t val)
+{
+ vfp_set_fpcr_masked(env, val, FPSCR_FPCR_MASK);
+ vfp_set_fpsr(env, val & FPSCR_FPSR_MASK);
+}
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
deleted file mode 100644
index b3698da..0000000
--- a/target/arm/vfp_helper.c
+++ /dev/null
@@ -1,1304 +0,0 @@
-/*
- * ARM VFP floating-point operations
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
-#include "internals.h"
-#include "cpu-features.h"
-#ifdef CONFIG_TCG
-#include "qemu/log.h"
-#include "fpu/softfloat.h"
-#endif
-
-/* VFP support. We follow the convention used for VFP instructions:
- Single precision routines have a "s" suffix, double precision a
- "d" suffix. */
-
-#ifdef CONFIG_TCG
-
-/* Convert host exception flags to vfp form. */
-static inline int vfp_exceptbits_from_host(int host_bits)
-{
- int target_bits = 0;
-
- if (host_bits & float_flag_invalid) {
- target_bits |= 1;
- }
- if (host_bits & float_flag_divbyzero) {
- target_bits |= 2;
- }
- if (host_bits & float_flag_overflow) {
- target_bits |= 4;
- }
- if (host_bits & (float_flag_underflow | float_flag_output_denormal)) {
- target_bits |= 8;
- }
- if (host_bits & float_flag_inexact) {
- target_bits |= 0x10;
- }
- if (host_bits & float_flag_input_denormal) {
- target_bits |= 0x80;
- }
- return target_bits;
-}
-
-/* Convert vfp exception flags to target form. */
-static inline int vfp_exceptbits_to_host(int target_bits)
-{
- int host_bits = 0;
-
- if (target_bits & 1) {
- host_bits |= float_flag_invalid;
- }
- if (target_bits & 2) {
- host_bits |= float_flag_divbyzero;
- }
- if (target_bits & 4) {
- host_bits |= float_flag_overflow;
- }
- if (target_bits & 8) {
- host_bits |= float_flag_underflow;
- }
- if (target_bits & 0x10) {
- host_bits |= float_flag_inexact;
- }
- if (target_bits & 0x80) {
- host_bits |= float_flag_input_denormal;
- }
- return host_bits;
-}
-
-static uint32_t vfp_get_fpsr_from_host(CPUARMState *env)
-{
- uint32_t i;
-
- i = get_float_exception_flags(&env->vfp.fp_status);
- i |= get_float_exception_flags(&env->vfp.standard_fp_status);
- /* FZ16 does not generate an input denormal exception. */
- i |= (get_float_exception_flags(&env->vfp.fp_status_f16)
- & ~float_flag_input_denormal);
- i |= (get_float_exception_flags(&env->vfp.standard_fp_status_f16)
- & ~float_flag_input_denormal);
- return vfp_exceptbits_from_host(i);
-}
-
-static void vfp_set_fpsr_to_host(CPUARMState *env, uint32_t val)
-{
- /*
- * The exception flags are ORed together when we read fpscr so we
- * only need to preserve the current state in one of our
- * float_status values.
- */
- int i = vfp_exceptbits_to_host(val);
- set_float_exception_flags(i, &env->vfp.fp_status);
- set_float_exception_flags(0, &env->vfp.fp_status_f16);
- set_float_exception_flags(0, &env->vfp.standard_fp_status);
- set_float_exception_flags(0, &env->vfp.standard_fp_status_f16);
-}
-
-static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask)
-{
- uint64_t changed = env->vfp.fpcr;
-
- changed ^= val;
- changed &= mask;
- if (changed & (3 << 22)) {
- int i = (val >> 22) & 3;
- switch (i) {
- case FPROUNDING_TIEEVEN:
- i = float_round_nearest_even;
- break;
- case FPROUNDING_POSINF:
- i = float_round_up;
- break;
- case FPROUNDING_NEGINF:
- i = float_round_down;
- break;
- case FPROUNDING_ZERO:
- i = float_round_to_zero;
- break;
- }
- set_float_rounding_mode(i, &env->vfp.fp_status);
- set_float_rounding_mode(i, &env->vfp.fp_status_f16);
- }
- if (changed & FPCR_FZ16) {
- bool ftz_enabled = val & FPCR_FZ16;
- set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16);
- set_flush_to_zero(ftz_enabled, &env->vfp.standard_fp_status_f16);
- set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16);
- set_flush_inputs_to_zero(ftz_enabled, &env->vfp.standard_fp_status_f16);
- }
- if (changed & FPCR_FZ) {
- bool ftz_enabled = val & FPCR_FZ;
- set_flush_to_zero(ftz_enabled, &env->vfp.fp_status);
- set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status);
- }
- if (changed & FPCR_DN) {
- bool dnan_enabled = val & FPCR_DN;
- set_default_nan_mode(dnan_enabled, &env->vfp.fp_status);
- set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16);
- }
-}
-
-#else
-
-static uint32_t vfp_get_fpsr_from_host(CPUARMState *env)
-{
- return 0;
-}
-
-static void vfp_set_fpsr_to_host(CPUARMState *env, uint32_t val)
-{
-}
-
-static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask)
-{
-}
-
-#endif
-
-uint32_t vfp_get_fpcr(CPUARMState *env)
-{
- uint32_t fpcr = env->vfp.fpcr
- | (env->vfp.vec_len << 16)
- | (env->vfp.vec_stride << 20);
-
- /*
- * M-profile LTPSIZE is the same bits [18:16] as A-profile Len; whichever
- * of the two is not applicable to this CPU will always be zero.
- */
- fpcr |= env->v7m.ltpsize << 16;
-
- return fpcr;
-}
-
-uint32_t vfp_get_fpsr(CPUARMState *env)
-{
- uint32_t fpsr = env->vfp.fpsr;
- uint32_t i;
-
- fpsr |= vfp_get_fpsr_from_host(env);
-
- i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3];
- fpsr |= i ? FPSR_QC : 0;
- return fpsr;
-}
-
-uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
-{
- return (vfp_get_fpcr(env) & FPSCR_FPCR_MASK) |
- (vfp_get_fpsr(env) & FPSCR_FPSR_MASK);
-}
-
-uint32_t vfp_get_fpscr(CPUARMState *env)
-{
- return HELPER(vfp_get_fpscr)(env);
-}
-
-void vfp_set_fpsr(CPUARMState *env, uint32_t val)
-{
- ARMCPU *cpu = env_archcpu(env);
-
- vfp_set_fpsr_to_host(env, val);
-
- if (arm_feature(env, ARM_FEATURE_NEON) ||
- cpu_isar_feature(aa32_mve, cpu)) {
- /*
- * The bit we set within vfp.qc[] is arbitrary; the array as a
- * whole being zero/non-zero is what counts.
- */
- env->vfp.qc[0] = val & FPSR_QC;
- env->vfp.qc[1] = 0;
- env->vfp.qc[2] = 0;
- env->vfp.qc[3] = 0;
- }
-
- /*
- * The only FPSR bits we keep in vfp.fpsr are NZCV:
- * the exception flags IOC|DZC|OFC|UFC|IXC|IDC are stored in
- * fp_status, and QC is in vfp.qc[]. Store the NZCV bits there,
- * and zero any of the other FPSR bits.
- */
- val &= FPSR_NZCV_MASK;
- env->vfp.fpsr = val;
-}
-
-static void vfp_set_fpcr_masked(CPUARMState *env, uint32_t val, uint32_t mask)
-{
- /*
- * We only set FPCR bits defined by mask, and leave the others alone.
- * We assume the mask is sensible (e.g. doesn't try to set only
- * part of a field)
- */
- ARMCPU *cpu = env_archcpu(env);
-
- /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
- if (!cpu_isar_feature(any_fp16, cpu)) {
- val &= ~FPCR_FZ16;
- }
-
- vfp_set_fpcr_to_host(env, val, mask);
-
- if (mask & (FPCR_LEN_MASK | FPCR_STRIDE_MASK)) {
- if (!arm_feature(env, ARM_FEATURE_M)) {
- /*
- * Short-vector length and stride; on M-profile these bits
- * are used for different purposes.
- * We can't make this conditional be "if MVFR0.FPShVec != 0",
- * because in v7A no-short-vector-support cores still had to
- * allow Stride/Len to be written with the only effect that
- * some insns are required to UNDEF if the guest sets them.
- */
- env->vfp.vec_len = extract32(val, 16, 3);
- env->vfp.vec_stride = extract32(val, 20, 2);
- } else if (cpu_isar_feature(aa32_mve, cpu)) {
- env->v7m.ltpsize = extract32(val, FPCR_LTPSIZE_SHIFT,
- FPCR_LTPSIZE_LENGTH);
- }
- }
-
- /*
- * We don't implement trapped exception handling, so the
- * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!)
- *
- * The FPCR bits we keep in vfp.fpcr are AHP, DN, FZ, RMode
- * and FZ16. Len, Stride and LTPSIZE we just handled. Store those bits
- * there, and zero any of the other FPCR bits and the RES0 and RAZ/WI
- * bits.
- */
- val &= FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK | FPCR_FZ16;
- env->vfp.fpcr &= ~mask;
- env->vfp.fpcr |= val;
-}
-
-void vfp_set_fpcr(CPUARMState *env, uint32_t val)
-{
- vfp_set_fpcr_masked(env, val, MAKE_64BIT_MASK(0, 32));
-}
-
-void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
-{
- vfp_set_fpcr_masked(env, val, FPSCR_FPCR_MASK);
- vfp_set_fpsr(env, val & FPSCR_FPSR_MASK);
-}
-
-void vfp_set_fpscr(CPUARMState *env, uint32_t val)
-{
- HELPER(vfp_set_fpscr)(env, val);
-}
-
-#ifdef CONFIG_TCG
-
-#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
-
-#define VFP_BINOP(name) \
-dh_ctype_f16 VFP_HELPER(name, h)(dh_ctype_f16 a, dh_ctype_f16 b, void *fpstp) \
-{ \
- float_status *fpst = fpstp; \
- return float16_ ## name(a, b, fpst); \
-} \
-float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
-{ \
- float_status *fpst = fpstp; \
- return float32_ ## name(a, b, fpst); \
-} \
-float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
-{ \
- float_status *fpst = fpstp; \
- return float64_ ## name(a, b, fpst); \
-}
-VFP_BINOP(add)
-VFP_BINOP(sub)
-VFP_BINOP(mul)
-VFP_BINOP(div)
-VFP_BINOP(min)
-VFP_BINOP(max)
-VFP_BINOP(minnum)
-VFP_BINOP(maxnum)
-#undef VFP_BINOP
-
-dh_ctype_f16 VFP_HELPER(sqrt, h)(dh_ctype_f16 a, CPUARMState *env)
-{
- return float16_sqrt(a, &env->vfp.fp_status_f16);
-}
-
-float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
-{
- return float32_sqrt(a, &env->vfp.fp_status);
-}
-
-float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
-{
- return float64_sqrt(a, &env->vfp.fp_status);
-}
-
-static void softfloat_to_vfp_compare(CPUARMState *env, FloatRelation cmp)
-{
- uint32_t flags;
- switch (cmp) {
- case float_relation_equal:
- flags = 0x6;
- break;
- case float_relation_less:
- flags = 0x8;
- break;
- case float_relation_greater:
- flags = 0x2;
- break;
- case float_relation_unordered:
- flags = 0x3;
- break;
- default:
- g_assert_not_reached();
- }
- env->vfp.fpsr = deposit64(env->vfp.fpsr, 28, 4, flags); /* NZCV */
-}
-
-/* XXX: check quiet/signaling case */
-#define DO_VFP_cmp(P, FLOATTYPE, ARGTYPE, FPST) \
-void VFP_HELPER(cmp, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \
-{ \
- softfloat_to_vfp_compare(env, \
- FLOATTYPE ## _compare_quiet(a, b, &env->vfp.FPST)); \
-} \
-void VFP_HELPER(cmpe, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \
-{ \
- softfloat_to_vfp_compare(env, \
- FLOATTYPE ## _compare(a, b, &env->vfp.FPST)); \
-}
-DO_VFP_cmp(h, float16, dh_ctype_f16, fp_status_f16)
-DO_VFP_cmp(s, float32, float32, fp_status)
-DO_VFP_cmp(d, float64, float64, fp_status)
-#undef DO_VFP_cmp
-
-/* Integer to float and float to integer conversions */
-
-#define CONV_ITOF(name, ftype, fsz, sign) \
-ftype HELPER(name)(uint32_t x, void *fpstp) \
-{ \
- float_status *fpst = fpstp; \
- return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
-}
-
-#define CONV_FTOI(name, ftype, fsz, sign, round) \
-sign##int32_t HELPER(name)(ftype x, void *fpstp) \
-{ \
- float_status *fpst = fpstp; \
- if (float##fsz##_is_any_nan(x)) { \
- float_raise(float_flag_invalid, fpst); \
- return 0; \
- } \
- return float##fsz##_to_##sign##int32##round(x, fpst); \
-}
-
-#define FLOAT_CONVS(name, p, ftype, fsz, sign) \
- CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \
- CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \
- CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero)
-
-FLOAT_CONVS(si, h, uint32_t, 16, )
-FLOAT_CONVS(si, s, float32, 32, )
-FLOAT_CONVS(si, d, float64, 64, )
-FLOAT_CONVS(ui, h, uint32_t, 16, u)
-FLOAT_CONVS(ui, s, float32, 32, u)
-FLOAT_CONVS(ui, d, float64, 64, u)
-
-#undef CONV_ITOF
-#undef CONV_FTOI
-#undef FLOAT_CONVS
-
-/* floating point conversion */
-float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
-{
- return float32_to_float64(x, &env->vfp.fp_status);
-}
-
-float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
-{
- return float64_to_float32(x, &env->vfp.fp_status);
-}
-
-uint32_t HELPER(bfcvt)(float32 x, void *status)
-{
- return float32_to_bfloat16(x, status);
-}
-
-uint32_t HELPER(bfcvt_pair)(uint64_t pair, void *status)
-{
- bfloat16 lo = float32_to_bfloat16(extract64(pair, 0, 32), status);
- bfloat16 hi = float32_to_bfloat16(extract64(pair, 32, 32), status);
- return deposit32(lo, 16, 16, hi);
-}
-
-/*
- * VFP3 fixed point conversion. The AArch32 versions of fix-to-float
- * must always round-to-nearest; the AArch64 ones honour the FPSCR
- * rounding mode. (For AArch32 Neon the standard-FPSCR is set to
- * round-to-nearest so either helper will work.) AArch32 float-to-fix
- * must round-to-zero.
- */
-#define VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \
-ftype HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
- void *fpstp) \
-{ return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); }
-
-#define VFP_CONV_FIX_FLOAT_ROUND(name, p, fsz, ftype, isz, itype) \
- ftype HELPER(vfp_##name##to##p##_round_to_nearest)(uint##isz##_t x, \
- uint32_t shift, \
- void *fpstp) \
- { \
- ftype ret; \
- float_status *fpst = fpstp; \
- FloatRoundMode oldmode = fpst->float_rounding_mode; \
- fpst->float_rounding_mode = float_round_nearest_even; \
- ret = itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); \
- fpst->float_rounding_mode = oldmode; \
- return ret; \
- }
-
-#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, ROUND, suff) \
-uint##isz##_t HELPER(vfp_to##name##p##suff)(ftype x, uint32_t shift, \
- void *fpst) \
-{ \
- if (unlikely(float##fsz##_is_any_nan(x))) { \
- float_raise(float_flag_invalid, fpst); \
- return 0; \
- } \
- return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \
-}
-
-#define VFP_CONV_FIX(name, p, fsz, ftype, isz, itype) \
-VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \
-VFP_CONV_FIX_FLOAT_ROUND(name, p, fsz, ftype, isz, itype) \
-VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \
- float_round_to_zero, _round_to_zero) \
-VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \
- get_float_rounding_mode(fpst), )
-
-#define VFP_CONV_FIX_A64(name, p, fsz, ftype, isz, itype) \
-VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \
-VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \
- get_float_rounding_mode(fpst), )
-
-VFP_CONV_FIX(sh, d, 64, float64, 64, int16)
-VFP_CONV_FIX(sl, d, 64, float64, 64, int32)
-VFP_CONV_FIX_A64(sq, d, 64, float64, 64, int64)
-VFP_CONV_FIX(uh, d, 64, float64, 64, uint16)
-VFP_CONV_FIX(ul, d, 64, float64, 64, uint32)
-VFP_CONV_FIX_A64(uq, d, 64, float64, 64, uint64)
-VFP_CONV_FIX(sh, s, 32, float32, 32, int16)
-VFP_CONV_FIX(sl, s, 32, float32, 32, int32)
-VFP_CONV_FIX_A64(sq, s, 32, float32, 64, int64)
-VFP_CONV_FIX(uh, s, 32, float32, 32, uint16)
-VFP_CONV_FIX(ul, s, 32, float32, 32, uint32)
-VFP_CONV_FIX_A64(uq, s, 32, float32, 64, uint64)
-VFP_CONV_FIX(sh, h, 16, dh_ctype_f16, 32, int16)
-VFP_CONV_FIX(sl, h, 16, dh_ctype_f16, 32, int32)
-VFP_CONV_FIX_A64(sq, h, 16, dh_ctype_f16, 64, int64)
-VFP_CONV_FIX(uh, h, 16, dh_ctype_f16, 32, uint16)
-VFP_CONV_FIX(ul, h, 16, dh_ctype_f16, 32, uint32)
-VFP_CONV_FIX_A64(uq, h, 16, dh_ctype_f16, 64, uint64)
-
-#undef VFP_CONV_FIX
-#undef VFP_CONV_FIX_FLOAT
-#undef VFP_CONV_FLOAT_FIX_ROUND
-#undef VFP_CONV_FIX_A64
-
-/* Set the current fp rounding mode and return the old one.
- * The argument is a softfloat float_round_ value.
- */
-uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp)
-{
- float_status *fp_status = fpstp;
-
- uint32_t prev_rmode = get_float_rounding_mode(fp_status);
- set_float_rounding_mode(rmode, fp_status);
-
- return prev_rmode;
-}
-
-/* Half precision conversions. */
-float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode)
-{
- /* Squash FZ16 to 0 for the duration of conversion. In this case,
- * it would affect flushing input denormals.
- */
- float_status *fpst = fpstp;
- bool save = get_flush_inputs_to_zero(fpst);
- set_flush_inputs_to_zero(false, fpst);
- float32 r = float16_to_float32(a, !ahp_mode, fpst);
- set_flush_inputs_to_zero(save, fpst);
- return r;
-}
-
-uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode)
-{
- /* Squash FZ16 to 0 for the duration of conversion. In this case,
- * it would affect flushing output denormals.
- */
- float_status *fpst = fpstp;
- bool save = get_flush_to_zero(fpst);
- set_flush_to_zero(false, fpst);
- float16 r = float32_to_float16(a, !ahp_mode, fpst);
- set_flush_to_zero(save, fpst);
- return r;
-}
-
-float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode)
-{
- /* Squash FZ16 to 0 for the duration of conversion. In this case,
- * it would affect flushing input denormals.
- */
- float_status *fpst = fpstp;
- bool save = get_flush_inputs_to_zero(fpst);
- set_flush_inputs_to_zero(false, fpst);
- float64 r = float16_to_float64(a, !ahp_mode, fpst);
- set_flush_inputs_to_zero(save, fpst);
- return r;
-}
-
-uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode)
-{
- /* Squash FZ16 to 0 for the duration of conversion. In this case,
- * it would affect flushing output denormals.
- */
- float_status *fpst = fpstp;
- bool save = get_flush_to_zero(fpst);
- set_flush_to_zero(false, fpst);
- float16 r = float64_to_float16(a, !ahp_mode, fpst);
- set_flush_to_zero(save, fpst);
- return r;
-}
-
-/* NEON helpers. */
-
-/* Constants 256 and 512 are used in some helpers; we avoid relying on
- * int->float conversions at run-time. */
-#define float64_256 make_float64(0x4070000000000000LL)
-#define float64_512 make_float64(0x4080000000000000LL)
-#define float16_maxnorm make_float16(0x7bff)
-#define float32_maxnorm make_float32(0x7f7fffff)
-#define float64_maxnorm make_float64(0x7fefffffffffffffLL)
-
-/* Reciprocal functions
- *
- * The algorithm that must be used to calculate the estimate
- * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate
- */
-
-/* See RecipEstimate()
- *
- * input is a 9 bit fixed point number
- * input range 256 .. 511 for a number from 0.5 <= x < 1.0.
- * result range 256 .. 511 for a number from 1.0 to 511/256.
- */
-
-static int recip_estimate(int input)
-{
- int a, b, r;
- assert(256 <= input && input < 512);
- a = (input * 2) + 1;
- b = (1 << 19) / a;
- r = (b + 1) >> 1;
- assert(256 <= r && r < 512);
- return r;
-}
-
-/*
- * Common wrapper to call recip_estimate
- *
- * The parameters are exponent and 64 bit fraction (without implicit
- * bit) where the binary point is nominally at bit 52. Returns a
- * float64 which can then be rounded to the appropriate size by the
- * callee.
- */
-
-static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac)
-{
- uint32_t scaled, estimate;
- uint64_t result_frac;
- int result_exp;
-
- /* Handle sub-normals */
- if (*exp == 0) {
- if (extract64(frac, 51, 1) == 0) {
- *exp = -1;
- frac <<= 2;
- } else {
- frac <<= 1;
- }
- }
-
- /* scaled = UInt('1':fraction<51:44>) */
- scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8));
- estimate = recip_estimate(scaled);
-
- result_exp = exp_off - *exp;
- result_frac = deposit64(0, 44, 8, estimate);
- if (result_exp == 0) {
- result_frac = deposit64(result_frac >> 1, 51, 1, 1);
- } else if (result_exp == -1) {
- result_frac = deposit64(result_frac >> 2, 50, 2, 1);
- result_exp = 0;
- }
-
- *exp = result_exp;
-
- return result_frac;
-}
-
-static bool round_to_inf(float_status *fpst, bool sign_bit)
-{
- switch (fpst->float_rounding_mode) {
- case float_round_nearest_even: /* Round to Nearest */
- return true;
- case float_round_up: /* Round to +Inf */
- return !sign_bit;
- case float_round_down: /* Round to -Inf */
- return sign_bit;
- case float_round_to_zero: /* Round to Zero */
- return false;
- default:
- g_assert_not_reached();
- }
-}
-
-uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp)
-{
- float_status *fpst = fpstp;
- float16 f16 = float16_squash_input_denormal(input, fpst);
- uint32_t f16_val = float16_val(f16);
- uint32_t f16_sign = float16_is_neg(f16);
- int f16_exp = extract32(f16_val, 10, 5);
- uint32_t f16_frac = extract32(f16_val, 0, 10);
- uint64_t f64_frac;
-
- if (float16_is_any_nan(f16)) {
- float16 nan = f16;
- if (float16_is_signaling_nan(f16, fpst)) {
- float_raise(float_flag_invalid, fpst);
- if (!fpst->default_nan_mode) {
- nan = float16_silence_nan(f16, fpst);
- }
- }
- if (fpst->default_nan_mode) {
- nan = float16_default_nan(fpst);
- }
- return nan;
- } else if (float16_is_infinity(f16)) {
- return float16_set_sign(float16_zero, float16_is_neg(f16));
- } else if (float16_is_zero(f16)) {
- float_raise(float_flag_divbyzero, fpst);
- return float16_set_sign(float16_infinity, float16_is_neg(f16));
- } else if (float16_abs(f16) < (1 << 8)) {
- /* Abs(value) < 2.0^-16 */
- float_raise(float_flag_overflow | float_flag_inexact, fpst);
- if (round_to_inf(fpst, f16_sign)) {
- return float16_set_sign(float16_infinity, f16_sign);
- } else {
- return float16_set_sign(float16_maxnorm, f16_sign);
- }
- } else if (f16_exp >= 29 && fpst->flush_to_zero) {
- float_raise(float_flag_underflow, fpst);
- return float16_set_sign(float16_zero, float16_is_neg(f16));
- }
-
- f64_frac = call_recip_estimate(&f16_exp, 29,
- ((uint64_t) f16_frac) << (52 - 10));
-
- /* result = sign : result_exp<4:0> : fraction<51:42> */
- f16_val = deposit32(0, 15, 1, f16_sign);
- f16_val = deposit32(f16_val, 10, 5, f16_exp);
- f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10));
- return make_float16(f16_val);
-}
-
-float32 HELPER(recpe_f32)(float32 input, void *fpstp)
-{
- float_status *fpst = fpstp;
- float32 f32 = float32_squash_input_denormal(input, fpst);
- uint32_t f32_val = float32_val(f32);
- bool f32_sign = float32_is_neg(f32);
- int f32_exp = extract32(f32_val, 23, 8);
- uint32_t f32_frac = extract32(f32_val, 0, 23);
- uint64_t f64_frac;
-
- if (float32_is_any_nan(f32)) {
- float32 nan = f32;
- if (float32_is_signaling_nan(f32, fpst)) {
- float_raise(float_flag_invalid, fpst);
- if (!fpst->default_nan_mode) {
- nan = float32_silence_nan(f32, fpst);
- }
- }
- if (fpst->default_nan_mode) {
- nan = float32_default_nan(fpst);
- }
- return nan;
- } else if (float32_is_infinity(f32)) {
- return float32_set_sign(float32_zero, float32_is_neg(f32));
- } else if (float32_is_zero(f32)) {
- float_raise(float_flag_divbyzero, fpst);
- return float32_set_sign(float32_infinity, float32_is_neg(f32));
- } else if (float32_abs(f32) < (1ULL << 21)) {
- /* Abs(value) < 2.0^-128 */
- float_raise(float_flag_overflow | float_flag_inexact, fpst);
- if (round_to_inf(fpst, f32_sign)) {
- return float32_set_sign(float32_infinity, f32_sign);
- } else {
- return float32_set_sign(float32_maxnorm, f32_sign);
- }
- } else if (f32_exp >= 253 && fpst->flush_to_zero) {
- float_raise(float_flag_underflow, fpst);
- return float32_set_sign(float32_zero, float32_is_neg(f32));
- }
-
- f64_frac = call_recip_estimate(&f32_exp, 253,
- ((uint64_t) f32_frac) << (52 - 23));
-
- /* result = sign : result_exp<7:0> : fraction<51:29> */
- f32_val = deposit32(0, 31, 1, f32_sign);
- f32_val = deposit32(f32_val, 23, 8, f32_exp);
- f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23));
- return make_float32(f32_val);
-}
-
-float64 HELPER(recpe_f64)(float64 input, void *fpstp)
-{
- float_status *fpst = fpstp;
- float64 f64 = float64_squash_input_denormal(input, fpst);
- uint64_t f64_val = float64_val(f64);
- bool f64_sign = float64_is_neg(f64);
- int f64_exp = extract64(f64_val, 52, 11);
- uint64_t f64_frac = extract64(f64_val, 0, 52);
-
- /* Deal with any special cases */
- if (float64_is_any_nan(f64)) {
- float64 nan = f64;
- if (float64_is_signaling_nan(f64, fpst)) {
- float_raise(float_flag_invalid, fpst);
- if (!fpst->default_nan_mode) {
- nan = float64_silence_nan(f64, fpst);
- }
- }
- if (fpst->default_nan_mode) {
- nan = float64_default_nan(fpst);
- }
- return nan;
- } else if (float64_is_infinity(f64)) {
- return float64_set_sign(float64_zero, float64_is_neg(f64));
- } else if (float64_is_zero(f64)) {
- float_raise(float_flag_divbyzero, fpst);
- return float64_set_sign(float64_infinity, float64_is_neg(f64));
- } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
- /* Abs(value) < 2.0^-1024 */
- float_raise(float_flag_overflow | float_flag_inexact, fpst);
- if (round_to_inf(fpst, f64_sign)) {
- return float64_set_sign(float64_infinity, f64_sign);
- } else {
- return float64_set_sign(float64_maxnorm, f64_sign);
- }
- } else if (f64_exp >= 2045 && fpst->flush_to_zero) {
- float_raise(float_flag_underflow, fpst);
- return float64_set_sign(float64_zero, float64_is_neg(f64));
- }
-
- f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac);
-
- /* result = sign : result_exp<10:0> : fraction<51:0>; */
- f64_val = deposit64(0, 63, 1, f64_sign);
- f64_val = deposit64(f64_val, 52, 11, f64_exp);
- f64_val = deposit64(f64_val, 0, 52, f64_frac);
- return make_float64(f64_val);
-}
-
-/* The algorithm that must be used to calculate the estimate
- * is specified by the ARM ARM.
- */
-
-static int do_recip_sqrt_estimate(int a)
-{
- int b, estimate;
-
- assert(128 <= a && a < 512);
- if (a < 256) {
- a = a * 2 + 1;
- } else {
- a = (a >> 1) << 1;
- a = (a + 1) * 2;
- }
- b = 512;
- while (a * (b + 1) * (b + 1) < (1 << 28)) {
- b += 1;
- }
- estimate = (b + 1) / 2;
- assert(256 <= estimate && estimate < 512);
-
- return estimate;
-}
-
-
-static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac)
-{
- int estimate;
- uint32_t scaled;
-
- if (*exp == 0) {
- while (extract64(frac, 51, 1) == 0) {
- frac = frac << 1;
- *exp -= 1;
- }
- frac = extract64(frac, 0, 51) << 1;
- }
-
- if (*exp & 1) {
- /* scaled = UInt('01':fraction<51:45>) */
- scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7));
- } else {
- /* scaled = UInt('1':fraction<51:44>) */
- scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8));
- }
- estimate = do_recip_sqrt_estimate(scaled);
-
- *exp = (exp_off - *exp) / 2;
- return extract64(estimate, 0, 8) << 44;
-}
-
-uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp)
-{
- float_status *s = fpstp;
- float16 f16 = float16_squash_input_denormal(input, s);
- uint16_t val = float16_val(f16);
- bool f16_sign = float16_is_neg(f16);
- int f16_exp = extract32(val, 10, 5);
- uint16_t f16_frac = extract32(val, 0, 10);
- uint64_t f64_frac;
-
- if (float16_is_any_nan(f16)) {
- float16 nan = f16;
- if (float16_is_signaling_nan(f16, s)) {
- float_raise(float_flag_invalid, s);
- if (!s->default_nan_mode) {
- nan = float16_silence_nan(f16, fpstp);
- }
- }
- if (s->default_nan_mode) {
- nan = float16_default_nan(s);
- }
- return nan;
- } else if (float16_is_zero(f16)) {
- float_raise(float_flag_divbyzero, s);
- return float16_set_sign(float16_infinity, f16_sign);
- } else if (f16_sign) {
- float_raise(float_flag_invalid, s);
- return float16_default_nan(s);
- } else if (float16_is_infinity(f16)) {
- return float16_zero;
- }
-
- /* Scale and normalize to a double-precision value between 0.25 and 1.0,
- * preserving the parity of the exponent. */
-
- f64_frac = ((uint64_t) f16_frac) << (52 - 10);
-
- f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac);
-
- /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */
- val = deposit32(0, 15, 1, f16_sign);
- val = deposit32(val, 10, 5, f16_exp);
- val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8));
- return make_float16(val);
-}
-
-float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
-{
- float_status *s = fpstp;
- float32 f32 = float32_squash_input_denormal(input, s);
- uint32_t val = float32_val(f32);
- uint32_t f32_sign = float32_is_neg(f32);
- int f32_exp = extract32(val, 23, 8);
- uint32_t f32_frac = extract32(val, 0, 23);
- uint64_t f64_frac;
-
- if (float32_is_any_nan(f32)) {
- float32 nan = f32;
- if (float32_is_signaling_nan(f32, s)) {
- float_raise(float_flag_invalid, s);
- if (!s->default_nan_mode) {
- nan = float32_silence_nan(f32, fpstp);
- }
- }
- if (s->default_nan_mode) {
- nan = float32_default_nan(s);
- }
- return nan;
- } else if (float32_is_zero(f32)) {
- float_raise(float_flag_divbyzero, s);
- return float32_set_sign(float32_infinity, float32_is_neg(f32));
- } else if (float32_is_neg(f32)) {
- float_raise(float_flag_invalid, s);
- return float32_default_nan(s);
- } else if (float32_is_infinity(f32)) {
- return float32_zero;
- }
-
- /* Scale and normalize to a double-precision value between 0.25 and 1.0,
- * preserving the parity of the exponent. */
-
- f64_frac = ((uint64_t) f32_frac) << 29;
-
- f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac);
-
- /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */
- val = deposit32(0, 31, 1, f32_sign);
- val = deposit32(val, 23, 8, f32_exp);
- val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8));
- return make_float32(val);
-}
-
-float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
-{
- float_status *s = fpstp;
- float64 f64 = float64_squash_input_denormal(input, s);
- uint64_t val = float64_val(f64);
- bool f64_sign = float64_is_neg(f64);
- int f64_exp = extract64(val, 52, 11);
- uint64_t f64_frac = extract64(val, 0, 52);
-
- if (float64_is_any_nan(f64)) {
- float64 nan = f64;
- if (float64_is_signaling_nan(f64, s)) {
- float_raise(float_flag_invalid, s);
- if (!s->default_nan_mode) {
- nan = float64_silence_nan(f64, fpstp);
- }
- }
- if (s->default_nan_mode) {
- nan = float64_default_nan(s);
- }
- return nan;
- } else if (float64_is_zero(f64)) {
- float_raise(float_flag_divbyzero, s);
- return float64_set_sign(float64_infinity, float64_is_neg(f64));
- } else if (float64_is_neg(f64)) {
- float_raise(float_flag_invalid, s);
- return float64_default_nan(s);
- } else if (float64_is_infinity(f64)) {
- return float64_zero;
- }
-
- f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac);
-
- /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */
- val = deposit64(0, 61, 1, f64_sign);
- val = deposit64(val, 52, 11, f64_exp);
- val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8));
- return make_float64(val);
-}
-
-uint32_t HELPER(recpe_u32)(uint32_t a)
-{
- int input, estimate;
-
- if ((a & 0x80000000) == 0) {
- return 0xffffffff;
- }
-
- input = extract32(a, 23, 9);
- estimate = recip_estimate(input);
-
- return deposit32(0, (32 - 9), 9, estimate);
-}
-
-uint32_t HELPER(rsqrte_u32)(uint32_t a)
-{
- int estimate;
-
- if ((a & 0xc0000000) == 0) {
- return 0xffffffff;
- }
-
- estimate = do_recip_sqrt_estimate(extract32(a, 23, 9));
-
- return deposit32(0, 23, 9, estimate);
-}
-
-/* VFPv4 fused multiply-accumulate */
-dh_ctype_f16 VFP_HELPER(muladd, h)(dh_ctype_f16 a, dh_ctype_f16 b,
- dh_ctype_f16 c, void *fpstp)
-{
- float_status *fpst = fpstp;
- return float16_muladd(a, b, c, 0, fpst);
-}
-
-float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
-{
- float_status *fpst = fpstp;
- return float32_muladd(a, b, c, 0, fpst);
-}
-
-float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
-{
- float_status *fpst = fpstp;
- return float64_muladd(a, b, c, 0, fpst);
-}
-
-/* ARMv8 round to integral */
-dh_ctype_f16 HELPER(rinth_exact)(dh_ctype_f16 x, void *fp_status)
-{
- return float16_round_to_int(x, fp_status);
-}
-
-float32 HELPER(rints_exact)(float32 x, void *fp_status)
-{
- return float32_round_to_int(x, fp_status);
-}
-
-float64 HELPER(rintd_exact)(float64 x, void *fp_status)
-{
- return float64_round_to_int(x, fp_status);
-}
-
-dh_ctype_f16 HELPER(rinth)(dh_ctype_f16 x, void *fp_status)
-{
- int old_flags = get_float_exception_flags(fp_status), new_flags;
- float16 ret;
-
- ret = float16_round_to_int(x, fp_status);
-
- /* Suppress any inexact exceptions the conversion produced */
- if (!(old_flags & float_flag_inexact)) {
- new_flags = get_float_exception_flags(fp_status);
- set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
- }
-
- return ret;
-}
-
-float32 HELPER(rints)(float32 x, void *fp_status)
-{
- int old_flags = get_float_exception_flags(fp_status), new_flags;
- float32 ret;
-
- ret = float32_round_to_int(x, fp_status);
-
- /* Suppress any inexact exceptions the conversion produced */
- if (!(old_flags & float_flag_inexact)) {
- new_flags = get_float_exception_flags(fp_status);
- set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
- }
-
- return ret;
-}
-
-float64 HELPER(rintd)(float64 x, void *fp_status)
-{
- int old_flags = get_float_exception_flags(fp_status), new_flags;
- float64 ret;
-
- ret = float64_round_to_int(x, fp_status);
-
- new_flags = get_float_exception_flags(fp_status);
-
- /* Suppress any inexact exceptions the conversion produced */
- if (!(old_flags & float_flag_inexact)) {
- new_flags = get_float_exception_flags(fp_status);
- set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
- }
-
- return ret;
-}
-
-/* Convert ARM rounding mode to softfloat */
-const FloatRoundMode arm_rmode_to_sf_map[] = {
- [FPROUNDING_TIEEVEN] = float_round_nearest_even,
- [FPROUNDING_POSINF] = float_round_up,
- [FPROUNDING_NEGINF] = float_round_down,
- [FPROUNDING_ZERO] = float_round_to_zero,
- [FPROUNDING_TIEAWAY] = float_round_ties_away,
- [FPROUNDING_ODD] = float_round_to_odd,
-};
-
-/*
- * Implement float64 to int32_t conversion without saturation;
- * the result is supplied modulo 2^32.
- */
-uint64_t HELPER(fjcvtzs)(float64 value, void *vstatus)
-{
- float_status *status = vstatus;
- uint32_t frac, e_old, e_new;
- bool inexact;
-
- e_old = get_float_exception_flags(status);
- set_float_exception_flags(0, status);
- frac = float64_to_int32_modulo(value, float_round_to_zero, status);
- e_new = get_float_exception_flags(status);
- set_float_exception_flags(e_old | e_new, status);
-
- /* Normal inexact, denormal with flush-to-zero, or overflow or NaN */
- inexact = e_new & (float_flag_inexact |
- float_flag_input_denormal |
- float_flag_invalid);
-
- /* While not inexact for IEEE FP, -0.0 is inexact for JavaScript. */
- inexact |= value == float64_chs(float64_zero);
-
- /* Pack the result and the env->ZF representation of Z together. */
- return deposit64(frac, 32, 32, inexact);
-}
-
-uint32_t HELPER(vjcvt)(float64 value, CPUARMState *env)
-{
- uint64_t pair = HELPER(fjcvtzs)(value, &env->vfp.fp_status);
- uint32_t result = pair;
- uint32_t z = (pair >> 32) == 0;
-
- /* Store Z, clear NCV, in FPSCR.NZCV. */
- env->vfp.fpsr = (env->vfp.fpsr & ~FPSR_NZCV_MASK) | (z * FPSR_Z);
-
- return result;
-}
-
-/* Round a float32 to an integer that fits in int32_t or int64_t. */
-static float32 frint_s(float32 f, float_status *fpst, int intsize)
-{
- int old_flags = get_float_exception_flags(fpst);
- uint32_t exp = extract32(f, 23, 8);
-
- if (unlikely(exp == 0xff)) {
- /* NaN or Inf. */
- goto overflow;
- }
-
- /* Round and re-extract the exponent. */
- f = float32_round_to_int(f, fpst);
- exp = extract32(f, 23, 8);
-
- /* Validate the range of the result. */
- if (exp < 126 + intsize) {
- /* abs(F) <= INT{N}_MAX */
- return f;
- }
- if (exp == 126 + intsize) {
- uint32_t sign = extract32(f, 31, 1);
- uint32_t frac = extract32(f, 0, 23);
- if (sign && frac == 0) {
- /* F == INT{N}_MIN */
- return f;
- }
- }
-
- overflow:
- /*
- * Raise Invalid and return INT{N}_MIN as a float. Revert any
- * inexact exception float32_round_to_int may have raised.
- */
- set_float_exception_flags(old_flags | float_flag_invalid, fpst);
- return (0x100u + 126u + intsize) << 23;
-}
-
-float32 HELPER(frint32_s)(float32 f, void *fpst)
-{
- return frint_s(f, fpst, 32);
-}
-
-float32 HELPER(frint64_s)(float32 f, void *fpst)
-{
- return frint_s(f, fpst, 64);
-}
-
-/* Round a float64 to an integer that fits in int32_t or int64_t. */
-static float64 frint_d(float64 f, float_status *fpst, int intsize)
-{
- int old_flags = get_float_exception_flags(fpst);
- uint32_t exp = extract64(f, 52, 11);
-
- if (unlikely(exp == 0x7ff)) {
- /* NaN or Inf. */
- goto overflow;
- }
-
- /* Round and re-extract the exponent. */
- f = float64_round_to_int(f, fpst);
- exp = extract64(f, 52, 11);
-
- /* Validate the range of the result. */
- if (exp < 1022 + intsize) {
- /* abs(F) <= INT{N}_MAX */
- return f;
- }
- if (exp == 1022 + intsize) {
- uint64_t sign = extract64(f, 63, 1);
- uint64_t frac = extract64(f, 0, 52);
- if (sign && frac == 0) {
- /* F == INT{N}_MIN */
- return f;
- }
- }
-
- overflow:
- /*
- * Raise Invalid and return INT{N}_MIN as a float. Revert any
- * inexact exception float64_round_to_int may have raised.
- */
- set_float_exception_flags(old_flags | float_flag_invalid, fpst);
- return (uint64_t)(0x800 + 1022 + intsize) << 52;
-}
-
-float64 HELPER(frint32_d)(float64 f, void *fpst)
-{
- return frint_d(f, fpst, 32);
-}
-
-float64 HELPER(frint64_d)(float64 f, void *fpst)
-{
- return frint_d(f, fpst, 64);
-}
-
-void HELPER(check_hcr_el2_trap)(CPUARMState *env, uint32_t rt, uint32_t reg)
-{
- uint32_t syndrome;
-
- switch (reg) {
- case ARM_VFP_MVFR0:
- case ARM_VFP_MVFR1:
- case ARM_VFP_MVFR2:
- if (!(arm_hcr_el2_eff(env) & HCR_TID3)) {
- return;
- }
- break;
- case ARM_VFP_FPSID:
- if (!(arm_hcr_el2_eff(env) & HCR_TID0)) {
- return;
- }
- break;
- default:
- g_assert_not_reached();
- }
-
- syndrome = ((EC_FPIDTRAP << ARM_EL_EC_SHIFT)
- | ARM_EL_IL
- | (1 << 24) | (0xe << 20) | (7 << 14)
- | (reg << 10) | (rt << 5) | 1);
-
- raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
-}
-
-#endif
diff --git a/target/avr/cpu-param.h b/target/avr/cpu-param.h
index 93c2f47..f74bfc25 100644
--- a/target/avr/cpu-param.h
+++ b/target/avr/cpu-param.h
@@ -21,17 +21,10 @@
#ifndef AVR_CPU_PARAM_H
#define AVR_CPU_PARAM_H
-#define TARGET_LONG_BITS 32
-/*
- * TARGET_PAGE_BITS cannot be more than 8 bits because
- * 1. all IO registers occupy [0x0000 .. 0x00ff] address range, and they
- * should be implemented as a device and not memory
- * 2. SRAM starts at the address 0x0100
- */
-#define TARGET_PAGE_BITS 8
+#define TARGET_PAGE_BITS 10
#define TARGET_PHYS_ADDR_SPACE_BITS 24
#define TARGET_VIRT_ADDR_SPACE_BITS 24
-#define TCG_GUEST_DEFAULT_MO 0
+#define TARGET_INSN_START_EXTRA_WORDS 0
#endif
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
index 3132842..6995de6 100644
--- a/target/avr/cpu.c
+++ b/target/avr/cpu.c
@@ -21,11 +21,13 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/qemu-print.h"
-#include "exec/exec-all.h"
+#include "exec/translation-block.h"
+#include "system/address-spaces.h"
#include "cpu.h"
#include "disas/dis-asm.h"
#include "tcg/debug-assert.h"
#include "hw/qdev-properties.h"
+#include "accel/tcg/cpu-ops.h"
static void avr_cpu_set_pc(CPUState *cs, vaddr value)
{
@@ -52,6 +54,21 @@ static int avr_cpu_mmu_index(CPUState *cs, bool ifetch)
return ifetch ? MMU_CODE_IDX : MMU_DATA_IDX;
}
+static TCGTBCPUState avr_get_tb_cpu_state(CPUState *cs)
+{
+ CPUAVRState *env = cpu_env(cs);
+ uint32_t flags = 0;
+
+ if (env->fullacc) {
+ flags |= TB_FLAGS_FULL_ACCESS;
+ }
+ if (env->skip) {
+ flags |= TB_FLAGS_SKIP;
+ }
+
+ return (TCGTBCPUState){ .pc = env->pc_w * 2, .flags = flags };
+}
+
static void avr_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -101,6 +118,7 @@ static void avr_cpu_reset_hold(Object *obj, ResetType type)
static void avr_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
{
+ info->endian = BFD_ENDIAN_LITTLE;
info->mach = bfd_arch_avr;
info->print_insn = avr_print_insn;
}
@@ -108,6 +126,8 @@ static void avr_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
static void avr_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
+ CPUAVRState *env = cpu_env(cs);
+ AVRCPU *cpu = env_archcpu(env);
AVRCPUClass *mcc = AVR_CPU_GET_CLASS(dev);
Error *local_err = NULL;
@@ -120,6 +140,19 @@ static void avr_cpu_realizefn(DeviceState *dev, Error **errp)
cpu_reset(cs);
mcc->parent_realize(dev, errp);
+
+ /*
+ * Two blocks in the low data space loop back into cpu registers.
+ */
+ memory_region_init_io(&cpu->cpu_reg1, OBJECT(cpu), &avr_cpu_reg1, env,
+ "avr-cpu-reg1", 32);
+ memory_region_add_subregion(get_system_memory(),
+ OFFSET_DATA, &cpu->cpu_reg1);
+
+ memory_region_init_io(&cpu->cpu_reg2, OBJECT(cpu), &avr_cpu_reg2, env,
+ "avr-cpu-reg2", 8);
+ memory_region_add_subregion(get_system_memory(),
+ OFFSET_DATA + 0x58, &cpu->cpu_reg2);
}
static void avr_cpu_set_int(void *opaque, int irq, int level)
@@ -149,9 +182,8 @@ static void avr_cpu_initfn(Object *obj)
sizeof(cpu->env.intsrc) * 8);
}
-static Property avr_cpu_properties[] = {
+static const Property avr_cpu_properties[] = {
DEFINE_PROP_UINT32("init-sp", AVRCPU, init_sp, 0),
- DEFINE_PROP_END_OF_LIST()
};
static ObjectClass *avr_cpu_class_by_name(const char *cpu_model)
@@ -200,22 +232,33 @@ static void avr_cpu_dump_state(CPUState *cs, FILE *f, int flags)
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps avr_sysemu_ops = {
+ .has_work = avr_cpu_has_work,
.get_phys_page_debug = avr_cpu_get_phys_page_debug,
};
-#include "hw/core/tcg-cpu-ops.h"
-
static const TCGCPUOps avr_tcg_ops = {
+ .guest_default_memory_order = 0,
+ .mttcg_supported = false,
.initialize = avr_cpu_tcg_init,
+ .translate_code = avr_cpu_translate_code,
+ .get_tb_cpu_state = avr_get_tb_cpu_state,
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
.restore_state_to_opc = avr_restore_state_to_opc,
+ .mmu_index = avr_cpu_mmu_index,
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
.cpu_exec_halt = avr_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.tlb_fill = avr_cpu_tlb_fill,
.do_interrupt = avr_cpu_do_interrupt,
+ /*
+ * TODO: code and data wrapping are different, but for the most part
+ * AVR only references bytes or aligned code fetches. But we use
+ * non-aligned MO_16 accesses for stack push/pop.
+ */
+ .pointer_wrap = cpu_pointer_wrap_uint32,
};
-static void avr_cpu_class_init(ObjectClass *oc, void *data)
+static void avr_cpu_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
@@ -231,8 +274,6 @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = avr_cpu_class_by_name;
- cc->has_work = avr_cpu_has_work;
- cc->mmu_index = avr_cpu_mmu_index;
cc->dump_state = avr_cpu_dump_state;
cc->set_pc = avr_cpu_set_pc;
cc->get_pc = avr_cpu_get_pc;
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
index 4725535..518e243 100644
--- a/target/avr/cpu.h
+++ b/target/avr/cpu.h
@@ -22,7 +22,10 @@
#define QEMU_AVR_CPU_H
#include "cpu-qom.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
+#include "system/memory.h"
#ifdef CONFIG_USER_ONLY
#error "AVR 8-bit does not support user mode"
@@ -44,8 +47,16 @@
/* Number of CPU registers */
#define NUMBER_OF_CPU_REGISTERS 32
-/* Number of IO registers accessible by ld/st/in/out */
-#define NUMBER_OF_IO_REGISTERS 64
+
+/* CPU registers mapped into i/o ports 0x38-0x3f. */
+#define REG_38_RAMPD 0
+#define REG_38_RAMPX 1
+#define REG_38_RAMPY 2
+#define REG_38_RAMPZ 3
+#define REG_38_EIDN 4
+#define REG_38_SPL 5
+#define REG_38_SPH 6
+#define REG_38_SREG 7
/*
* Offsets of AVR memory regions in host memory space.
@@ -60,8 +71,6 @@
#define OFFSET_CODE 0x00000000
/* CPU registers, IO registers, and SRAM */
#define OFFSET_DATA 0x00800000
-/* CPU registers specifically, these are mapped at the start of data */
-#define OFFSET_CPU_REGISTERS OFFSET_DATA
/*
* IO registers, including status register, stack pointer, and memory
* mapped peripherals, mapped just after CPU registers
@@ -144,6 +153,9 @@ struct ArchCPU {
CPUAVRState env;
+ MemoryRegion cpu_reg1;
+ MemoryRegion cpu_reg2;
+
/* Initial value of stack pointer */
uint32_t init_sp;
};
@@ -183,6 +195,8 @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
}
void avr_cpu_tcg_init(void);
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
int cpu_avr_exec(CPUState *cpu);
@@ -191,24 +205,6 @@ enum {
TB_FLAGS_SKIP = 2,
};
-static inline void cpu_get_tb_cpu_state(CPUAVRState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
-{
- uint32_t flags = 0;
-
- *pc = env->pc_w * 2;
- *cs_base = 0;
-
- if (env->fullacc) {
- flags |= TB_FLAGS_FULL_ACCESS;
- }
- if (env->skip) {
- flags |= TB_FLAGS_SKIP;
- }
-
- *pflags = flags;
-}
-
static inline int cpu_interrupts_enabled(CPUAVRState *env)
{
return env->sregI != 0;
@@ -242,6 +238,7 @@ bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
-#include "exec/cpu-all.h"
+extern const MemoryRegionOps avr_cpu_reg1;
+extern const MemoryRegionOps avr_cpu_reg2;
#endif /* QEMU_AVR_CPU_H */
diff --git a/target/avr/disas.c b/target/avr/disas.c
index b7689e8d..d341030 100644
--- a/target/avr/disas.c
+++ b/target/avr/disas.c
@@ -68,28 +68,35 @@ static bool decode_insn(DisasContext *ctx, uint16_t insn);
int avr_print_insn(bfd_vma addr, disassemble_info *info)
{
- DisasContext ctx;
+ DisasContext ctx = { info };
DisasContext *pctx = &ctx;
bfd_byte buffer[4];
uint16_t insn;
int status;
- ctx.info = info;
-
- status = info->read_memory_func(addr, buffer, 4, info);
+ status = info->read_memory_func(addr, buffer, 2, info);
if (status != 0) {
info->memory_error_func(status, addr, info);
return -1;
}
insn = bfd_getl16(buffer);
- ctx.next_word = bfd_getl16(buffer + 2);
- ctx.next_word_used = false;
+
+ status = info->read_memory_func(addr + 2, buffer + 2, 2, info);
+ if (status == 0) {
+ ctx.next_word = bfd_getl16(buffer + 2);
+ }
if (!decode_insn(&ctx, insn)) {
output(".db", "0x%02x, 0x%02x", buffer[0], buffer[1]);
}
- return ctx.next_word_used ? 4 : 2;
+ if (!ctx.next_word_used) {
+ return 2;
+ } else if (status == 0) {
+ return 4;
+ }
+ info->memory_error_func(status, addr + 2, info);
+ return -1;
}
diff --git a/target/avr/gdbstub.c b/target/avr/gdbstub.c
index d6d3c14..aea7128 100644
--- a/target/avr/gdbstub.c
+++ b/target/avr/gdbstub.c
@@ -69,13 +69,13 @@ int avr_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
/* SP */
if (n == 33) {
- env->sp = lduw_p(mem_buf);
+ env->sp = lduw_le_p(mem_buf);
return 2;
}
/* PC */
if (n == 34) {
- env->pc_w = ldl_p(mem_buf) / 2;
+ env->pc_w = ldl_le_p(mem_buf) / 2;
return 4;
}
diff --git a/target/avr/helper.c b/target/avr/helper.c
index 345708a..b9cd6d5 100644
--- a/target/avr/helper.c
+++ b/target/avr/helper.c
@@ -22,11 +22,11 @@
#include "qemu/log.h"
#include "qemu/error-report.h"
#include "cpu.h"
-#include "hw/core/tcg-cpu-ops.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-ops.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
-#include "exec/cpu_ldst.h"
-#include "exec/address-spaces.h"
+#include "exec/target_page.h"
+#include "accel/tcg/cpu-ldst.h"
#include "exec/helper-proto.h"
bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
@@ -67,6 +67,11 @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
+static void do_stb(CPUAVRState *env, uint32_t addr, uint8_t data, uintptr_t ra)
+{
+ cpu_stb_mmuidx_ra(env, addr, data, MMU_DATA_IDX, ra);
+}
+
void avr_cpu_do_interrupt(CPUState *cs)
{
CPUAVRState *env = cpu_env(cs);
@@ -83,14 +88,14 @@ void avr_cpu_do_interrupt(CPUState *cs)
}
if (avr_feature(env, AVR_FEATURE_3_BYTE_PC)) {
- cpu_stb_data(env, env->sp--, (ret & 0x0000ff));
- cpu_stb_data(env, env->sp--, (ret & 0x00ff00) >> 8);
- cpu_stb_data(env, env->sp--, (ret & 0xff0000) >> 16);
+ do_stb(env, env->sp--, ret, 0);
+ do_stb(env, env->sp--, ret >> 8, 0);
+ do_stb(env, env->sp--, ret >> 16, 0);
} else if (avr_feature(env, AVR_FEATURE_2_BYTE_PC)) {
- cpu_stb_data(env, env->sp--, (ret & 0x0000ff));
- cpu_stb_data(env, env->sp--, (ret & 0x00ff00) >> 8);
+ do_stb(env, env->sp--, ret, 0);
+ do_stb(env, env->sp--, ret >> 8, 0);
} else {
- cpu_stb_data(env, env->sp--, (ret & 0x0000ff));
+ do_stb(env, env->sp--, ret, 0);
}
env->pc_w = base + vector * size;
@@ -108,7 +113,7 @@ bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
- int prot, page_size = TARGET_PAGE_SIZE;
+ int prot;
uint32_t paddr;
address &= TARGET_PAGE_MASK;
@@ -133,23 +138,9 @@ bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
/* Access to memory. */
paddr = OFFSET_DATA + address;
prot = PAGE_READ | PAGE_WRITE;
- if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
- /*
- * Access to CPU registers, exit and rebuilt this TB to use
- * full access in case it touches specially handled registers
- * like SREG or SP. For probing, set page_size = 1, in order
- * to force tlb_fill to be called for the next access.
- */
- if (probe) {
- page_size = 1;
- } else {
- cpu_env(cs)->fullacc = 1;
- cpu_loop_exit_restore(cs, retaddr);
- }
- }
}
- tlb_set_page(cs, address, paddr, prot, mmu_idx, page_size);
+ tlb_set_page(cs, address, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
@@ -203,156 +194,129 @@ void helper_wdr(CPUAVRState *env)
}
/*
- * This function implements IN instruction
- *
- * It does the following
- * a. if an IO register belongs to CPU, its value is read and returned
- * b. otherwise io address is translated to mem address and physical memory
- * is read.
- * c. it caches the value for sake of SBI, SBIC, SBIS & CBI implementation
- *
+ * The first 32 bytes of the data space are mapped to the cpu regs.
+ * We cannot write these from normal store operations because TCG
+ * does not expect global temps to be modified -- a global may be
+ * live in a host cpu register across the store. We can however
+ * read these, as TCG does make sure the global temps are saved
+ * in case the load operation traps.
*/
-target_ulong helper_inb(CPUAVRState *env, uint32_t port)
+
+static uint64_t avr_cpu_reg1_read(void *opaque, hwaddr addr, unsigned size)
{
- target_ulong data = 0;
+ CPUAVRState *env = opaque;
- switch (port) {
- case 0x38: /* RAMPD */
- data = 0xff & (env->rampD >> 16);
- break;
- case 0x39: /* RAMPX */
- data = 0xff & (env->rampX >> 16);
- break;
- case 0x3a: /* RAMPY */
- data = 0xff & (env->rampY >> 16);
- break;
- case 0x3b: /* RAMPZ */
- data = 0xff & (env->rampZ >> 16);
- break;
- case 0x3c: /* EIND */
- data = 0xff & (env->eind >> 16);
- break;
- case 0x3d: /* SPL */
- data = env->sp & 0x00ff;
- break;
- case 0x3e: /* SPH */
- data = env->sp >> 8;
- break;
- case 0x3f: /* SREG */
- data = cpu_get_sreg(env);
- break;
- default:
- /* not a special register, pass to normal memory access */
- data = address_space_ldub(&address_space_memory,
- OFFSET_IO_REGISTERS + port,
- MEMTXATTRS_UNSPECIFIED, NULL);
+ assert(addr < 32);
+ return env->r[addr];
+}
+
+/*
+ * The range 0x38-0x3f of the i/o space is mapped to cpu regs.
+ * As above, we cannot write these from normal store operations.
+ */
+
+static uint64_t avr_cpu_reg2_read(void *opaque, hwaddr addr, unsigned size)
+{
+ CPUAVRState *env = opaque;
+
+ switch (addr) {
+ case REG_38_RAMPD:
+ return 0xff & (env->rampD >> 16);
+ case REG_38_RAMPX:
+ return 0xff & (env->rampX >> 16);
+ case REG_38_RAMPY:
+ return 0xff & (env->rampY >> 16);
+ case REG_38_RAMPZ:
+ return 0xff & (env->rampZ >> 16);
+ case REG_38_EIDN:
+ return 0xff & (env->eind >> 16);
+ case REG_38_SPL:
+ return env->sp & 0x00ff;
+ case REG_38_SPH:
+ return 0xff & (env->sp >> 8);
+ case REG_38_SREG:
+ return cpu_get_sreg(env);
}
+ g_assert_not_reached();
+}
- return data;
+static void avr_cpu_trap_write(void *opaque, hwaddr addr,
+ uint64_t data64, unsigned size)
+{
+ CPUAVRState *env = opaque;
+ CPUState *cs = env_cpu(env);
+
+ env->fullacc = true;
+ cpu_loop_exit_restore(cs, cs->mem_io_pc);
}
+const MemoryRegionOps avr_cpu_reg1 = {
+ .read = avr_cpu_reg1_read,
+ .write = avr_cpu_trap_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 1,
+};
+
+const MemoryRegionOps avr_cpu_reg2 = {
+ .read = avr_cpu_reg2_read,
+ .write = avr_cpu_trap_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 1,
+};
+
/*
- * This function implements OUT instruction
- *
- * It does the following
- * a. if an IO register belongs to CPU, its value is written into the register
- * b. otherwise io address is translated to mem address and physical memory
- * is written.
- * c. it caches the value for sake of SBI, SBIC, SBIS & CBI implementation
- *
+ * this function implements ST instruction when there is a possibility to write
+ * into a CPU register
*/
-void helper_outb(CPUAVRState *env, uint32_t port, uint32_t data)
+void helper_fullwr(CPUAVRState *env, uint32_t data, uint32_t addr)
{
- data &= 0x000000ff;
+ env->fullacc = false;
- switch (port) {
- case 0x38: /* RAMPD */
+ switch (addr) {
+ case 0 ... 31:
+ /* CPU registers */
+ env->r[addr] = data;
+ break;
+
+ case REG_38_RAMPD + 0x38 + NUMBER_OF_CPU_REGISTERS:
if (avr_feature(env, AVR_FEATURE_RAMPD)) {
- env->rampD = (data & 0xff) << 16;
+ env->rampD = data << 16;
}
break;
- case 0x39: /* RAMPX */
+ case REG_38_RAMPX + 0x38 + NUMBER_OF_CPU_REGISTERS:
if (avr_feature(env, AVR_FEATURE_RAMPX)) {
- env->rampX = (data & 0xff) << 16;
+ env->rampX = data << 16;
}
break;
- case 0x3a: /* RAMPY */
+ case REG_38_RAMPY + 0x38 + NUMBER_OF_CPU_REGISTERS:
if (avr_feature(env, AVR_FEATURE_RAMPY)) {
- env->rampY = (data & 0xff) << 16;
+ env->rampY = data << 16;
}
break;
- case 0x3b: /* RAMPZ */
+ case REG_38_RAMPZ + 0x38 + NUMBER_OF_CPU_REGISTERS:
if (avr_feature(env, AVR_FEATURE_RAMPZ)) {
- env->rampZ = (data & 0xff) << 16;
+ env->rampZ = data << 16;
}
break;
- case 0x3c: /* EIDN */
- env->eind = (data & 0xff) << 16;
+ case REG_38_EIDN + 0x38 + NUMBER_OF_CPU_REGISTERS:
+ env->eind = data << 16;
break;
- case 0x3d: /* SPL */
- env->sp = (env->sp & 0xff00) | (data);
+ case REG_38_SPL + 0x38 + NUMBER_OF_CPU_REGISTERS:
+ env->sp = (env->sp & 0xff00) | data;
break;
- case 0x3e: /* SPH */
+ case REG_38_SPH + 0x38 + NUMBER_OF_CPU_REGISTERS:
if (avr_feature(env, AVR_FEATURE_2_BYTE_SP)) {
env->sp = (env->sp & 0x00ff) | (data << 8);
}
break;
- case 0x3f: /* SREG */
+ case REG_38_SREG + 0x38 + NUMBER_OF_CPU_REGISTERS:
cpu_set_sreg(env, data);
break;
- default:
- /* not a special register, pass to normal memory access */
- address_space_stb(&address_space_memory, OFFSET_IO_REGISTERS + port,
- data, MEMTXATTRS_UNSPECIFIED, NULL);
- }
-}
-
-/*
- * this function implements LD instruction when there is a possibility to read
- * from a CPU register
- */
-target_ulong helper_fullrd(CPUAVRState *env, uint32_t addr)
-{
- uint8_t data;
-
- env->fullacc = false;
-
- if (addr < NUMBER_OF_CPU_REGISTERS) {
- /* CPU registers */
- data = env->r[addr];
- } else if (addr < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
- /* IO registers */
- data = helper_inb(env, addr - NUMBER_OF_CPU_REGISTERS);
- } else {
- /* memory */
- data = address_space_ldub(&address_space_memory, OFFSET_DATA + addr,
- MEMTXATTRS_UNSPECIFIED, NULL);
- }
- return data;
-}
-/*
- * this function implements ST instruction when there is a possibility to write
- * into a CPU register
- */
-void helper_fullwr(CPUAVRState *env, uint32_t data, uint32_t addr)
-{
- env->fullacc = false;
-
- /* Following logic assumes this: */
- assert(OFFSET_CPU_REGISTERS == OFFSET_DATA);
- assert(OFFSET_IO_REGISTERS == OFFSET_CPU_REGISTERS +
- NUMBER_OF_CPU_REGISTERS);
-
- if (addr < NUMBER_OF_CPU_REGISTERS) {
- /* CPU registers */
- env->r[addr] = data;
- } else if (addr < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
- /* IO registers */
- helper_outb(env, addr - NUMBER_OF_CPU_REGISTERS, data);
- } else {
- /* memory */
- address_space_stb(&address_space_memory, OFFSET_DATA + addr, data,
- MEMTXATTRS_UNSPECIFIED, NULL);
+ default:
+ do_stb(env, addr, data, GETPC());
+ break;
}
}
diff --git a/target/avr/helper.h b/target/avr/helper.h
index 4d02e64..e8d13e9 100644
--- a/target/avr/helper.h
+++ b/target/avr/helper.h
@@ -23,7 +23,4 @@ DEF_HELPER_1(debug, noreturn, env)
DEF_HELPER_1(break, noreturn, env)
DEF_HELPER_1(sleep, noreturn, env)
DEF_HELPER_1(unsupported, noreturn, env)
-DEF_HELPER_3(outb, void, env, i32, i32)
-DEF_HELPER_2(inb, tl, env, i32)
DEF_HELPER_3(fullwr, void, env, i32, i32)
-DEF_HELPER_2(fullrd, tl, env, i32)
diff --git a/target/avr/insn.decode b/target/avr/insn.decode
index 482c23a..cc30224 100644
--- a/target/avr/insn.decode
+++ b/target/avr/insn.decode
@@ -118,11 +118,8 @@ BRBC 1111 01 ....... ... @op_bit_imm
@io_rd_imm .... . .. ..... .... &rd_imm rd=%rd imm=%io_imm
@ldst_d .. . . .. . rd:5 . ... &rd_imm imm=%ldst_d_imm
-# The 16-bit immediate is completely in the next word.
-# Fields cannot be defined with no bits, so we cannot play
-# the same trick and append to a zero-bit value.
-# Defer reading the immediate until trans_{LDS,STS}.
-@ldst_s .... ... rd:5 .... imm=0
+%ldst_imm !function=next_word
+@ldst_s .... ... rd:5 .... imm=%ldst_imm
MOV 0010 11 . ..... .... @op_rd_rr
MOVW 0000 0001 .... .... &rd_rr rd=%rd_d rr=%rr_d
diff --git a/target/avr/translate.c b/target/avr/translate.c
index 2d51892..804b0b2 100644
--- a/target/avr/translate.c
+++ b/target/avr/translate.c
@@ -22,12 +22,13 @@
#include "qemu/qemu-print.h"
#include "tcg/tcg.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/translation-block.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
#include "exec/log.h"
#include "exec/translator.h"
+#include "exec/target_page.h"
#define HELPER_H "helper.h"
#include "exec/helper-info.c.inc"
@@ -193,6 +194,9 @@ static bool avr_have_feature(DisasContext *ctx, int feature)
static bool decode_insn(DisasContext *ctx, uint16_t insn);
#include "decode-insn.c.inc"
+static void gen_inb(DisasContext *ctx, TCGv data, int port);
+static void gen_outb(DisasContext *ctx, TCGv data, int port);
+
/*
* Arithmetic Instructions
*/
@@ -1292,9 +1296,8 @@ static bool trans_SBRS(DisasContext *ctx, arg_SBRS *a)
static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a)
{
TCGv data = tcg_temp_new_i32();
- TCGv port = tcg_constant_i32(a->reg);
- gen_helper_inb(data, tcg_env, port);
+ gen_inb(ctx, data, a->reg);
tcg_gen_andi_tl(data, data, 1 << a->bit);
ctx->skip_cond = TCG_COND_EQ;
ctx->skip_var0 = data;
@@ -1310,9 +1313,8 @@ static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a)
static bool trans_SBIS(DisasContext *ctx, arg_SBIS *a)
{
TCGv data = tcg_temp_new_i32();
- TCGv port = tcg_constant_i32(a->reg);
- gen_helper_inb(data, tcg_env, port);
+ gen_inb(ctx, data, a->reg);
tcg_gen_andi_tl(data, data, 1 << a->bit);
ctx->skip_cond = TCG_COND_NE;
ctx->skip_var0 = data;
@@ -1501,11 +1503,18 @@ static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr)
{
- if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
- gen_helper_fullrd(data, tcg_env, addr);
- } else {
- tcg_gen_qemu_ld_tl(data, addr, MMU_DATA_IDX, MO_UB);
- }
+ tcg_gen_qemu_ld_tl(data, addr, MMU_DATA_IDX, MO_UB);
+}
+
+static void gen_inb(DisasContext *ctx, TCGv data, int port)
+{
+ gen_data_load(ctx, data, tcg_constant_i32(port + NUMBER_OF_CPU_REGISTERS));
+}
+
+static void gen_outb(DisasContext *ctx, TCGv data, int port)
+{
+ gen_helper_fullwr(tcg_env, data,
+ tcg_constant_i32(port + NUMBER_OF_CPU_REGISTERS));
}
/*
@@ -1577,7 +1586,6 @@ static bool trans_LDS(DisasContext *ctx, arg_LDS *a)
TCGv Rd = cpu_r[a->rd];
TCGv addr = tcg_temp_new_i32();
TCGv H = cpu_rampD;
- a->imm = next_word(ctx);
tcg_gen_mov_tl(addr, H); /* addr = H:M:L */
tcg_gen_shli_tl(addr, addr, 16);
@@ -1782,7 +1790,6 @@ static bool trans_STS(DisasContext *ctx, arg_STS *a)
TCGv Rd = cpu_r[a->rd];
TCGv addr = tcg_temp_new_i32();
TCGv H = cpu_rampD;
- a->imm = next_word(ctx);
tcg_gen_mov_tl(addr, H); /* addr = H:M:L */
tcg_gen_shli_tl(addr, addr, 16);
@@ -2127,9 +2134,8 @@ static bool trans_SPMX(DisasContext *ctx, arg_SPMX *a)
static bool trans_IN(DisasContext *ctx, arg_IN *a)
{
TCGv Rd = cpu_r[a->rd];
- TCGv port = tcg_constant_i32(a->imm);
- gen_helper_inb(Rd, tcg_env, port);
+ gen_inb(ctx, Rd, a->imm);
return true;
}
@@ -2140,9 +2146,8 @@ static bool trans_IN(DisasContext *ctx, arg_IN *a)
static bool trans_OUT(DisasContext *ctx, arg_OUT *a)
{
TCGv Rd = cpu_r[a->rd];
- TCGv port = tcg_constant_i32(a->imm);
- gen_helper_outb(tcg_env, port, Rd);
+ gen_outb(ctx, Rd, a->imm);
return true;
}
@@ -2408,11 +2413,10 @@ static bool trans_SWAP(DisasContext *ctx, arg_SWAP *a)
static bool trans_SBI(DisasContext *ctx, arg_SBI *a)
{
TCGv data = tcg_temp_new_i32();
- TCGv port = tcg_constant_i32(a->reg);
- gen_helper_inb(data, tcg_env, port);
+ gen_inb(ctx, data, a->reg);
tcg_gen_ori_tl(data, data, 1 << a->bit);
- gen_helper_outb(tcg_env, port, data);
+ gen_outb(ctx, data, a->reg);
return true;
}
@@ -2423,11 +2427,10 @@ static bool trans_SBI(DisasContext *ctx, arg_SBI *a)
static bool trans_CBI(DisasContext *ctx, arg_CBI *a)
{
TCGv data = tcg_temp_new_i32();
- TCGv port = tcg_constant_i32(a->reg);
- gen_helper_inb(data, tcg_env, port);
+ gen_inb(ctx, data, a->reg);
tcg_gen_andi_tl(data, data, ~(1 << a->bit));
- gen_helper_outb(tcg_env, port, data);
+ gen_outb(ctx, data, a->reg);
return true;
}
@@ -2598,7 +2601,7 @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
*
* - translate()
* - canonicalize_skip()
- * - gen_intermediate_code()
+ * - translate_code()
* - restore_state_to_opc()
*
*/
@@ -2794,8 +2797,8 @@ static const TranslatorOps avr_tr_ops = {
.tb_stop = avr_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext dc = { };
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
diff --git a/target/cris/Kconfig b/target/cris/Kconfig
deleted file mode 100644
index 3fdc309..0000000
--- a/target/cris/Kconfig
+++ /dev/null
@@ -1,2 +0,0 @@
-config CRIS
- bool
diff --git a/target/cris/cpu-param.h b/target/cris/cpu-param.h
deleted file mode 100644
index b31b742..0000000
--- a/target/cris/cpu-param.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * CRIS cpu parameters for qemu.
- *
- * Copyright (c) 2007 AXIS Communications AB
- * SPDX-License-Identifier: LGPL-2.0+
- */
-
-#ifndef CRIS_CPU_PARAM_H
-#define CRIS_CPU_PARAM_H
-
-#define TARGET_LONG_BITS 32
-#define TARGET_PAGE_BITS 13
-#define TARGET_PHYS_ADDR_SPACE_BITS 32
-#define TARGET_VIRT_ADDR_SPACE_BITS 32
-
-#endif
diff --git a/target/cris/cpu-qom.h b/target/cris/cpu-qom.h
deleted file mode 100644
index 741ca97..0000000
--- a/target/cris/cpu-qom.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * QEMU CRIS CPU QOM header (target agnostic)
- *
- * Copyright (c) 2012 SUSE LINUX Products GmbH
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see
- * <http://www.gnu.org/licenses/lgpl-2.1.html>
- */
-#ifndef QEMU_CRIS_CPU_QOM_H
-#define QEMU_CRIS_CPU_QOM_H
-
-#include "hw/core/cpu.h"
-
-#define TYPE_CRIS_CPU "cris-cpu"
-
-OBJECT_DECLARE_CPU_TYPE(CRISCPU, CRISCPUClass, CRIS_CPU)
-
-#define CRIS_CPU_TYPE_SUFFIX "-" TYPE_CRIS_CPU
-#define CRIS_CPU_TYPE_NAME(name) (name CRIS_CPU_TYPE_SUFFIX)
-
-#endif
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
deleted file mode 100644
index ff31ca7..0000000
--- a/target/cris/cpu.c
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * QEMU CRIS CPU
- *
- * Copyright (c) 2008 AXIS Communications AB
- * Written by Edgar E. Iglesias.
- *
- * Copyright (c) 2012 SUSE LINUX Products GmbH
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see
- * <http://www.gnu.org/licenses/lgpl-2.1.html>
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "qemu/qemu-print.h"
-#include "cpu.h"
-#include "mmu.h"
-
-
-static void cris_cpu_set_pc(CPUState *cs, vaddr value)
-{
- CRISCPU *cpu = CRIS_CPU(cs);
-
- cpu->env.pc = value;
-}
-
-static vaddr cris_cpu_get_pc(CPUState *cs)
-{
- CRISCPU *cpu = CRIS_CPU(cs);
-
- return cpu->env.pc;
-}
-
-static void cris_restore_state_to_opc(CPUState *cs,
- const TranslationBlock *tb,
- const uint64_t *data)
-{
- CRISCPU *cpu = CRIS_CPU(cs);
-
- cpu->env.pc = data[0];
-}
-
-static bool cris_cpu_has_work(CPUState *cs)
-{
- return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
-}
-
-static int cris_cpu_mmu_index(CPUState *cs, bool ifetch)
-{
- return !!(cpu_env(cs)->pregs[PR_CCS] & U_FLAG);
-}
-
-static void cris_cpu_reset_hold(Object *obj, ResetType type)
-{
- CPUState *cs = CPU(obj);
- CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj);
- CPUCRISState *env = cpu_env(cs);
- uint32_t vr;
-
- if (ccc->parent_phases.hold) {
- ccc->parent_phases.hold(obj, type);
- }
-
- vr = env->pregs[PR_VR];
- memset(env, 0, offsetof(CPUCRISState, end_reset_fields));
- env->pregs[PR_VR] = vr;
-
-#if defined(CONFIG_USER_ONLY)
- /* start in user mode with interrupts enabled. */
- env->pregs[PR_CCS] |= U_FLAG | I_FLAG | P_FLAG;
-#else
- cris_mmu_init(env);
- env->pregs[PR_CCS] = 0;
-#endif
-}
-
-static ObjectClass *cris_cpu_class_by_name(const char *cpu_model)
-{
- ObjectClass *oc;
- char *typename;
-
-#if defined(CONFIG_USER_ONLY)
- if (strcasecmp(cpu_model, "any") == 0) {
- return object_class_by_name(CRIS_CPU_TYPE_NAME("crisv32"));
- }
-#endif
-
- typename = g_strdup_printf(CRIS_CPU_TYPE_NAME("%s"), cpu_model);
- oc = object_class_by_name(typename);
- g_free(typename);
-
- return oc;
-}
-
-static void cris_cpu_realizefn(DeviceState *dev, Error **errp)
-{
- CPUState *cs = CPU(dev);
- CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(dev);
- Error *local_err = NULL;
-
- cpu_exec_realizefn(cs, &local_err);
- if (local_err != NULL) {
- error_propagate(errp, local_err);
- return;
- }
-
- cpu_reset(cs);
- qemu_init_vcpu(cs);
-
- ccc->parent_realize(dev, errp);
-}
-
-#ifndef CONFIG_USER_ONLY
-static void cris_cpu_set_irq(void *opaque, int irq, int level)
-{
- CRISCPU *cpu = opaque;
- CPUState *cs = CPU(cpu);
- int type = irq == CRIS_CPU_IRQ ? CPU_INTERRUPT_HARD : CPU_INTERRUPT_NMI;
-
- if (irq == CRIS_CPU_IRQ) {
- /*
- * The PIC passes us the vector for the IRQ as the value it sends
- * over the qemu_irq line
- */
- cpu->env.interrupt_vector = level;
- }
-
- if (level) {
- cpu_interrupt(cs, type);
- } else {
- cpu_reset_interrupt(cs, type);
- }
-}
-#endif
-
-static void cris_disas_set_info(CPUState *cpu, disassemble_info *info)
-{
- if (cpu_env(cpu)->pregs[PR_VR] != 32) {
- info->mach = bfd_mach_cris_v0_v10;
- info->print_insn = print_insn_crisv10;
- } else {
- info->mach = bfd_mach_cris_v32;
- info->print_insn = print_insn_crisv32;
- }
-}
-
-static void cris_cpu_initfn(Object *obj)
-{
- CRISCPU *cpu = CRIS_CPU(obj);
- CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj);
- CPUCRISState *env = &cpu->env;
-
- env->pregs[PR_VR] = ccc->vr;
-
-#ifndef CONFIG_USER_ONLY
- /* IRQ and NMI lines. */
- qdev_init_gpio_in(DEVICE(cpu), cris_cpu_set_irq, 2);
-#endif
-}
-
-#ifndef CONFIG_USER_ONLY
-#include "hw/core/sysemu-cpu-ops.h"
-
-static const struct SysemuCPUOps cris_sysemu_ops = {
- .get_phys_page_debug = cris_cpu_get_phys_page_debug,
-};
-#endif
-
-#include "hw/core/tcg-cpu-ops.h"
-
-static const TCGCPUOps crisv10_tcg_ops = {
- .initialize = cris_initialize_crisv10_tcg,
- .restore_state_to_opc = cris_restore_state_to_opc,
-
-#ifndef CONFIG_USER_ONLY
- .tlb_fill = cris_cpu_tlb_fill,
- .cpu_exec_interrupt = cris_cpu_exec_interrupt,
- .cpu_exec_halt = cris_cpu_has_work,
- .do_interrupt = crisv10_cpu_do_interrupt,
-#endif /* !CONFIG_USER_ONLY */
-};
-
-static const TCGCPUOps crisv32_tcg_ops = {
- .initialize = cris_initialize_tcg,
- .restore_state_to_opc = cris_restore_state_to_opc,
-
-#ifndef CONFIG_USER_ONLY
- .tlb_fill = cris_cpu_tlb_fill,
- .cpu_exec_interrupt = cris_cpu_exec_interrupt,
- .cpu_exec_halt = cris_cpu_has_work,
- .do_interrupt = cris_cpu_do_interrupt,
-#endif /* !CONFIG_USER_ONLY */
-};
-
-static void crisv8_cpu_class_init(ObjectClass *oc, void *data)
-{
- CPUClass *cc = CPU_CLASS(oc);
- CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
-
- ccc->vr = 8;
- cc->gdb_read_register = crisv10_cpu_gdb_read_register;
- cc->tcg_ops = &crisv10_tcg_ops;
-}
-
-static void crisv9_cpu_class_init(ObjectClass *oc, void *data)
-{
- CPUClass *cc = CPU_CLASS(oc);
- CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
-
- ccc->vr = 9;
- cc->gdb_read_register = crisv10_cpu_gdb_read_register;
- cc->tcg_ops = &crisv10_tcg_ops;
-}
-
-static void crisv10_cpu_class_init(ObjectClass *oc, void *data)
-{
- CPUClass *cc = CPU_CLASS(oc);
- CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
-
- ccc->vr = 10;
- cc->gdb_read_register = crisv10_cpu_gdb_read_register;
- cc->tcg_ops = &crisv10_tcg_ops;
-}
-
-static void crisv11_cpu_class_init(ObjectClass *oc, void *data)
-{
- CPUClass *cc = CPU_CLASS(oc);
- CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
-
- ccc->vr = 11;
- cc->gdb_read_register = crisv10_cpu_gdb_read_register;
- cc->tcg_ops = &crisv10_tcg_ops;
-}
-
-static void crisv17_cpu_class_init(ObjectClass *oc, void *data)
-{
- CPUClass *cc = CPU_CLASS(oc);
- CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
-
- ccc->vr = 17;
- cc->gdb_read_register = crisv10_cpu_gdb_read_register;
- cc->tcg_ops = &crisv10_tcg_ops;
-}
-
-static void crisv32_cpu_class_init(ObjectClass *oc, void *data)
-{
- CPUClass *cc = CPU_CLASS(oc);
- CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
-
- ccc->vr = 32;
- cc->tcg_ops = &crisv32_tcg_ops;
-}
-
-static void cris_cpu_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- CPUClass *cc = CPU_CLASS(oc);
- CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
- ResettableClass *rc = RESETTABLE_CLASS(oc);
-
- device_class_set_parent_realize(dc, cris_cpu_realizefn,
- &ccc->parent_realize);
-
- resettable_class_set_parent_phases(rc, NULL, cris_cpu_reset_hold, NULL,
- &ccc->parent_phases);
-
- cc->class_by_name = cris_cpu_class_by_name;
- cc->has_work = cris_cpu_has_work;
- cc->mmu_index = cris_cpu_mmu_index;
- cc->dump_state = cris_cpu_dump_state;
- cc->set_pc = cris_cpu_set_pc;
- cc->get_pc = cris_cpu_get_pc;
- cc->gdb_read_register = cris_cpu_gdb_read_register;
- cc->gdb_write_register = cris_cpu_gdb_write_register;
-#ifndef CONFIG_USER_ONLY
- dc->vmsd = &vmstate_cris_cpu;
- cc->sysemu_ops = &cris_sysemu_ops;
-#endif
-
- cc->gdb_num_core_regs = 49;
- cc->gdb_stop_before_watchpoint = true;
-
- cc->disas_set_info = cris_disas_set_info;
-}
-
-#define DEFINE_CRIS_CPU_TYPE(cpu_model, initfn) \
- { \
- .parent = TYPE_CRIS_CPU, \
- .class_init = initfn, \
- .name = CRIS_CPU_TYPE_NAME(cpu_model), \
- }
-
-static const TypeInfo cris_cpu_model_type_infos[] = {
- {
- .name = TYPE_CRIS_CPU,
- .parent = TYPE_CPU,
- .instance_size = sizeof(CRISCPU),
- .instance_align = __alignof(CRISCPU),
- .instance_init = cris_cpu_initfn,
- .abstract = true,
- .class_size = sizeof(CRISCPUClass),
- .class_init = cris_cpu_class_init,
- },
- DEFINE_CRIS_CPU_TYPE("crisv8", crisv8_cpu_class_init),
- DEFINE_CRIS_CPU_TYPE("crisv9", crisv9_cpu_class_init),
- DEFINE_CRIS_CPU_TYPE("crisv10", crisv10_cpu_class_init),
- DEFINE_CRIS_CPU_TYPE("crisv11", crisv11_cpu_class_init),
- DEFINE_CRIS_CPU_TYPE("crisv17", crisv17_cpu_class_init),
- DEFINE_CRIS_CPU_TYPE("crisv32", crisv32_cpu_class_init),
-};
-
-DEFINE_TYPES(cris_cpu_model_type_infos)
diff --git a/target/cris/cpu.h b/target/cris/cpu.h
deleted file mode 100644
index 3904e54..0000000
--- a/target/cris/cpu.h
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * CRIS virtual CPU header
- *
- * Copyright (c) 2007 AXIS Communications AB
- * Written by Edgar E. Iglesias
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef CRIS_CPU_H
-#define CRIS_CPU_H
-
-#include "cpu-qom.h"
-#include "exec/cpu-defs.h"
-
-#define EXCP_NMI 1
-#define EXCP_GURU 2
-#define EXCP_BUSFAULT 3
-#define EXCP_IRQ 4
-#define EXCP_BREAK 5
-
-/* CRIS-specific interrupt pending bits. */
-#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
-
-/* CRUS CPU device objects interrupt lines. */
-/* PIC passes the vector for the IRQ as the value of it sends over qemu_irq */
-#define CRIS_CPU_IRQ 0
-#define CRIS_CPU_NMI 1
-
-/* Register aliases. R0 - R15 */
-#define R_FP 8
-#define R_SP 14
-#define R_ACR 15
-
-/* Support regs, P0 - P15 */
-#define PR_BZ 0
-#define PR_VR 1
-#define PR_PID 2
-#define PR_SRS 3
-#define PR_WZ 4
-#define PR_EXS 5
-#define PR_EDA 6
-#define PR_PREFIX 6 /* On CRISv10 P6 is reserved, we use it as prefix. */
-#define PR_MOF 7
-#define PR_DZ 8
-#define PR_EBP 9
-#define PR_ERP 10
-#define PR_SRP 11
-#define PR_NRP 12
-#define PR_CCS 13
-#define PR_USP 14
-#define PRV10_BRP 14
-#define PR_SPC 15
-
-/* CPU flags. */
-#define Q_FLAG 0x80000000
-#define M_FLAG_V32 0x40000000
-#define PFIX_FLAG 0x800 /* CRISv10 Only. */
-#define F_FLAG_V10 0x400
-#define P_FLAG_V10 0x200
-#define S_FLAG 0x200
-#define R_FLAG 0x100
-#define P_FLAG 0x80
-#define M_FLAG_V10 0x80
-#define U_FLAG 0x40
-#define I_FLAG 0x20
-#define X_FLAG 0x10
-#define N_FLAG 0x08
-#define Z_FLAG 0x04
-#define V_FLAG 0x02
-#define C_FLAG 0x01
-#define ALU_FLAGS 0x1F
-
-/* Condition codes. */
-#define CC_CC 0
-#define CC_CS 1
-#define CC_NE 2
-#define CC_EQ 3
-#define CC_VC 4
-#define CC_VS 5
-#define CC_PL 6
-#define CC_MI 7
-#define CC_LS 8
-#define CC_HI 9
-#define CC_GE 10
-#define CC_LT 11
-#define CC_GT 12
-#define CC_LE 13
-#define CC_A 14
-#define CC_P 15
-
-typedef struct {
- uint32_t hi;
- uint32_t lo;
-} TLBSet;
-
-typedef struct CPUArchState {
- uint32_t regs[16];
- /* P0 - P15 are referred to as special registers in the docs. */
- uint32_t pregs[16];
-
- /* Pseudo register for the PC. Not directly accessible on CRIS. */
- uint32_t pc;
-
- /* Pseudo register for the kernel stack. */
- uint32_t ksp;
-
- /* Branch. */
- int dslot;
- int btaken;
- uint32_t btarget;
-
- /* Condition flag tracking. */
- uint32_t cc_op;
- uint32_t cc_mask;
- uint32_t cc_dest;
- uint32_t cc_src;
- uint32_t cc_result;
- /* size of the operation, 1 = byte, 2 = word, 4 = dword. */
- int cc_size;
- /* X flag at the time of cc snapshot. */
- int cc_x;
-
- /* CRIS has certain insns that lockout interrupts. */
- int locked_irq;
- int interrupt_vector;
- int fault_vector;
- int trap_vector;
-
- /* FIXME: add a check in the translator to avoid writing to support
- register sets beyond the 4th. The ISA allows up to 256! but in
- practice there is no core that implements more than 4.
-
- Support function registers are used to control units close to the
- core. Accesses do not pass down the normal hierarchy.
- */
- uint32_t sregs[4][16];
-
- /* Linear feedback shift reg in the mmu. Used to provide pseudo
- randomness for the 'hint' the mmu gives to sw for choosing valid
- sets on TLB refills. */
- uint32_t mmu_rand_lfsr;
-
- /*
- * We just store the stores to the tlbset here for later evaluation
- * when the hw needs access to them.
- *
- * One for I and another for D.
- */
- TLBSet tlbsets[2][4][16];
-
- /* Fields up to this point are cleared by a CPU reset */
- struct {} end_reset_fields;
-
- /* Members from load_info on are preserved across resets. */
- void *load_info;
-} CPUCRISState;
-
-/**
- * CRISCPU:
- * @env: #CPUCRISState
- *
- * A CRIS CPU.
- */
-struct ArchCPU {
- CPUState parent_obj;
-
- CPUCRISState env;
-};
-
-/**
- * CRISCPUClass:
- * @parent_realize: The parent class' realize handler.
- * @parent_phases: The parent class' reset phase handlers.
- * @vr: Version Register value.
- *
- * A CRIS CPU model.
- */
-struct CRISCPUClass {
- CPUClass parent_class;
-
- DeviceRealize parent_realize;
- ResettablePhases parent_phases;
-
- uint32_t vr;
-};
-
-#ifndef CONFIG_USER_ONLY
-extern const VMStateDescription vmstate_cris_cpu;
-
-void cris_cpu_do_interrupt(CPUState *cpu);
-void crisv10_cpu_do_interrupt(CPUState *cpu);
-bool cris_cpu_exec_interrupt(CPUState *cpu, int int_req);
-
-bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
-hwaddr cris_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
-#endif
-
-void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags);
-
-int crisv10_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
-int cris_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
-int cris_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
-
-void cris_initialize_tcg(void);
-void cris_initialize_crisv10_tcg(void);
-
-/* Instead of computing the condition codes after each CRIS instruction,
- * QEMU just stores one operand (called CC_SRC), the result
- * (called CC_DEST) and the type of operation (called CC_OP). When the
- * condition codes are needed, the condition codes can be calculated
- * using this information. Condition codes are not generated if they
- * are only needed for conditional branches.
- */
-enum {
- CC_OP_DYNAMIC, /* Use env->cc_op */
- CC_OP_FLAGS,
- CC_OP_CMP,
- CC_OP_MOVE,
- CC_OP_ADD,
- CC_OP_ADDC,
- CC_OP_MCP,
- CC_OP_ADDU,
- CC_OP_SUB,
- CC_OP_SUBU,
- CC_OP_NEG,
- CC_OP_BTST,
- CC_OP_MULS,
- CC_OP_MULU,
- CC_OP_DSTEP,
- CC_OP_MSTEP,
- CC_OP_BOUND,
-
- CC_OP_OR,
- CC_OP_AND,
- CC_OP_XOR,
- CC_OP_LSL,
- CC_OP_LSR,
- CC_OP_ASR,
- CC_OP_LZ
-};
-
-/* CRIS uses 8k pages. */
-#define MMAP_SHIFT TARGET_PAGE_BITS
-
-#define CPU_RESOLVING_TYPE TYPE_CRIS_CPU
-
-/* MMU modes definitions */
-#define MMU_USER_IDX 1
-
-/* Support function regs. */
-#define SFR_RW_GC_CFG 0][0
-#define SFR_RW_MM_CFG env->pregs[PR_SRS]][0
-#define SFR_RW_MM_KBASE_LO env->pregs[PR_SRS]][1
-#define SFR_RW_MM_KBASE_HI env->pregs[PR_SRS]][2
-#define SFR_R_MM_CAUSE env->pregs[PR_SRS]][3
-#define SFR_RW_MM_TLB_SEL env->pregs[PR_SRS]][4
-#define SFR_RW_MM_TLB_LO env->pregs[PR_SRS]][5
-#define SFR_RW_MM_TLB_HI env->pregs[PR_SRS]][6
-
-#include "exec/cpu-all.h"
-
-static inline void cpu_get_tb_cpu_state(CPUCRISState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- *cs_base = 0;
- *flags = env->dslot |
- (env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG
- | X_FLAG | PFIX_FLAG));
-}
-
-#endif
diff --git a/target/cris/crisv10-decode.h b/target/cris/crisv10-decode.h
deleted file mode 100644
index 9c531f3..0000000
--- a/target/cris/crisv10-decode.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * CRISv10 insn decoding macros.
- *
- * Copyright (c) 2010 AXIS Communications AB
- * Written by Edgar E. Iglesias.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef TARGET_CRIS_CRISV10_DECODE_H
-#define TARGET_CRIS_CRISV10_DECODE_H
-
-#define CRISV10_MODE_QIMMEDIATE 0
-#define CRISV10_MODE_REG 1
-#define CRISV10_MODE_INDIRECT 2
-#define CRISV10_MODE_AUTOINC 3
-
-/* Quick Immediate. */
-#define CRISV10_QIMM_BCC_R0 0
-#define CRISV10_QIMM_BCC_R1 1
-#define CRISV10_QIMM_BCC_R2 2
-#define CRISV10_QIMM_BCC_R3 3
-
-#define CRISV10_QIMM_BDAP_R0 4
-#define CRISV10_QIMM_BDAP_R1 5
-#define CRISV10_QIMM_BDAP_R2 6
-#define CRISV10_QIMM_BDAP_R3 7
-
-#define CRISV10_QIMM_ADDQ 8
-#define CRISV10_QIMM_MOVEQ 9
-#define CRISV10_QIMM_SUBQ 10
-#define CRISV10_QIMM_CMPQ 11
-#define CRISV10_QIMM_ANDQ 12
-#define CRISV10_QIMM_ORQ 13
-#define CRISV10_QIMM_ASHQ 14
-#define CRISV10_QIMM_LSHQ 15
-
-
-#define CRISV10_REG_ADDX 0
-#define CRISV10_REG_MOVX 1
-#define CRISV10_REG_SUBX 2
-#define CRISV10_REG_LSL 3
-#define CRISV10_REG_ADDI 4
-#define CRISV10_REG_BIAP 5
-#define CRISV10_REG_NEG 6
-#define CRISV10_REG_BOUND 7
-#define CRISV10_REG_ADD 8
-#define CRISV10_REG_MOVE_R 9
-#define CRISV10_REG_MOVE_SPR_R 9
-#define CRISV10_REG_MOVE_R_SPR 8
-#define CRISV10_REG_SUB 10
-#define CRISV10_REG_CMP 11
-#define CRISV10_REG_AND 12
-#define CRISV10_REG_OR 13
-#define CRISV10_REG_ASR 14
-#define CRISV10_REG_LSR 15
-
-#define CRISV10_REG_BTST 3
-#define CRISV10_REG_SCC 4
-#define CRISV10_REG_SETF 6
-#define CRISV10_REG_CLEARF 7
-#define CRISV10_REG_BIAP 5
-#define CRISV10_REG_ABS 10
-#define CRISV10_REG_DSTEP 11
-#define CRISV10_REG_LZ 12
-#define CRISV10_REG_NOT 13
-#define CRISV10_REG_SWAP 13
-#define CRISV10_REG_XOR 14
-#define CRISV10_REG_MSTEP 15
-
-/* Indirect, var size. */
-#define CRISV10_IND_TEST 14
-#define CRISV10_IND_MUL 4
-#define CRISV10_IND_BDAP_M 5
-#define CRISV10_IND_ADD 8
-#define CRISV10_IND_MOVE_M_R 9
-
-
-/* indirect fixed size. */
-#define CRISV10_IND_ADDX 0
-#define CRISV10_IND_MOVX 1
-#define CRISV10_IND_SUBX 2
-#define CRISV10_IND_CMPX 3
-#define CRISV10_IND_JUMP_M 4
-#define CRISV10_IND_DIP 5
-#define CRISV10_IND_JUMP_R 6
-#define CRISV17_IND_ADDC 6
-#define CRISV10_IND_BOUND 7
-#define CRISV10_IND_BCC_M 7
-#define CRISV10_IND_MOVE_M_SPR 8
-#define CRISV10_IND_MOVE_SPR_M 9
-#define CRISV10_IND_SUB 10
-#define CRISV10_IND_CMP 11
-#define CRISV10_IND_AND 12
-#define CRISV10_IND_OR 13
-#define CRISV10_IND_MOVE_R_M 15
-
-#define CRISV10_IND_MOVEM_M_R 14
-#define CRISV10_IND_MOVEM_R_M 15
-
-#endif
diff --git a/target/cris/crisv32-decode.h b/target/cris/crisv32-decode.h
deleted file mode 100644
index fa0a7f0..0000000
--- a/target/cris/crisv32-decode.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * CRIS insn decoding macros.
- *
- * Copyright (c) 2007 AXIS Communications AB
- * Written by Edgar E. Iglesias.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef CRISV32_DECODE_H
-#define CRISV32_DECODE_H
-
-/* Convenient binary macros. */
-#define HEX__(n) 0x##n##LU
-#define B8__(x) ((x&0x0000000FLU)?1:0) \
- + ((x&0x000000F0LU)?2:0) \
- + ((x&0x00000F00LU)?4:0) \
- + ((x&0x0000F000LU)?8:0) \
- + ((x&0x000F0000LU)?16:0) \
- + ((x&0x00F00000LU)?32:0) \
- + ((x&0x0F000000LU)?64:0) \
- + ((x&0xF0000000LU)?128:0)
-#define B8(d) ((unsigned char)B8__(HEX__(d)))
-
-/* Quick imm. */
-#define DEC_BCCQ {B8(00000000), B8(11110000)}
-#define DEC_ADDOQ {B8(00010000), B8(11110000)}
-#define DEC_ADDQ {B8(00100000), B8(11111100)}
-#define DEC_MOVEQ {B8(00100100), B8(11111100)}
-#define DEC_SUBQ {B8(00101000), B8(11111100)}
-#define DEC_CMPQ {B8(00101100), B8(11111100)}
-#define DEC_ANDQ {B8(00110000), B8(11111100)}
-#define DEC_ORQ {B8(00110100), B8(11111100)}
-#define DEC_BTSTQ {B8(00111000), B8(11111110)}
-#define DEC_ASRQ {B8(00111010), B8(11111110)}
-#define DEC_LSLQ {B8(00111100), B8(11111110)}
-#define DEC_LSRQ {B8(00111110), B8(11111110)}
-
-/* Register. */
-#define DEC_MOVU_R {B8(01000100), B8(11111110)}
-#define DEC_MOVU_R {B8(01000100), B8(11111110)}
-#define DEC_MOVS_R {B8(01000110), B8(11111110)}
-#define DEC_MOVE_R {B8(01100100), B8(11111100)}
-#define DEC_MOVE_RP {B8(01100011), B8(11111111)}
-#define DEC_MOVE_PR {B8(01100111), B8(11111111)}
-#define DEC_DSTEP_R {B8(01101111), B8(11111111)}
-#define DEC_MOVE_RS {B8(10110111), B8(11111111)}
-#define DEC_MOVE_SR {B8(11110111), B8(11111111)}
-#define DEC_ADDU_R {B8(01000000), B8(11111110)}
-#define DEC_ADDS_R {B8(01000010), B8(11111110)}
-#define DEC_ADD_R {B8(01100000), B8(11111100)}
-#define DEC_ADDI_R {B8(01010000), B8(11111100)}
-#define DEC_MULS_R {B8(11010000), B8(11111100)}
-#define DEC_MULU_R {B8(10010000), B8(11111100)}
-#define DEC_ADDI_ACR {B8(01010100), B8(11111100)}
-#define DEC_NEG_R {B8(01011000), B8(11111100)}
-#define DEC_BOUND_R {B8(01011100), B8(11111100)}
-#define DEC_SUBU_R {B8(01001000), B8(11111110)}
-#define DEC_SUBS_R {B8(01001010), B8(11111110)}
-#define DEC_SUB_R {B8(01101000), B8(11111100)}
-#define DEC_CMP_R {B8(01101100), B8(11111100)}
-#define DEC_AND_R {B8(01110000), B8(11111100)}
-#define DEC_ABS_R {B8(01101011), B8(11111111)}
-#define DEC_LZ_R {B8(01110011), B8(11111111)}
-#define DEC_MCP_R {B8(01111111), B8(11111111)}
-#define DEC_SWAP_R {B8(01110111), B8(11111111)}
-#define DEC_XOR_R {B8(01111011), B8(11111111)}
-#define DEC_LSL_R {B8(01001100), B8(11111100)}
-#define DEC_LSR_R {B8(01111100), B8(11111100)}
-#define DEC_ASR_R {B8(01111000), B8(11111100)}
-#define DEC_OR_R {B8(01110100), B8(11111100)}
-#define DEC_BTST_R {B8(01001111), B8(11111111)}
-
-/* Fixed. */
-#define DEC_SETF {B8(01011011), B8(11111111)}
-#define DEC_CLEARF {B8(01011111), B8(11111111)}
-
-/* Memory. */
-#define DEC_ADDU_M {B8(10000000), B8(10111110)}
-#define DEC_ADDS_M {B8(10000010), B8(10111110)}
-#define DEC_MOVU_M {B8(10000100), B8(10111110)}
-#define DEC_MOVS_M {B8(10000110), B8(10111110)}
-#define DEC_SUBU_M {B8(10001000), B8(10111110)}
-#define DEC_SUBS_M {B8(10001010), B8(10111110)}
-#define DEC_CMPU_M {B8(10001100), B8(10111110)}
-#define DEC_CMPS_M {B8(10001110), B8(10111110)}
-#define DEC_ADDO_M {B8(10010100), B8(10111100)}
-#define DEC_BOUND_M {B8(10011100), B8(10111100)}
-#define DEC_ADD_M {B8(10100000), B8(10111100)}
-#define DEC_MOVE_MR {B8(10100100), B8(10111100)}
-#define DEC_SUB_M {B8(10101000), B8(10111100)}
-#define DEC_CMP_M {B8(10101100), B8(10111100)}
-#define DEC_AND_M {B8(10110000), B8(10111100)}
-#define DEC_OR_M {B8(10110100), B8(10111100)}
-#define DEC_TEST_M {B8(10111000), B8(10111100)}
-#define DEC_MOVE_RM {B8(10111100), B8(10111100)}
-
-#define DEC_ADDC_R {B8(01010111), B8(11111111)}
-#define DEC_ADDC_MR {B8(10011010), B8(10111111)}
-#define DEC_LAPCQ {B8(10010111), B8(11111111)}
-#define DEC_LAPC_IM {B8(11010111), B8(11111111)}
-
-#define DEC_MOVE_MP {B8(10100011), B8(10111111)}
-#define DEC_MOVE_PM {B8(10100111), B8(10111111)}
-
-#define DEC_SCC_R {B8(01010011), B8(11111111)}
-#define DEC_RFE_ETC {B8(10010011), B8(11111111)}
-#define DEC_JUMP_P {B8(10011111), B8(11111111)}
-#define DEC_BCC_IM {B8(11011111), B8(11111111)}
-#define DEC_JAS_R {B8(10011011), B8(11111111)}
-#define DEC_JASC_R {B8(10110011), B8(11111111)}
-#define DEC_JAS_IM {B8(11011011), B8(11111111)}
-#define DEC_JASC_IM {B8(11110011), B8(11111111)}
-#define DEC_BAS_IM {B8(11101011), B8(11111111)}
-#define DEC_BASC_IM {B8(11101111), B8(11111111)}
-#define DEC_MOVEM_MR {B8(10111011), B8(10111111)}
-#define DEC_MOVEM_RM {B8(10111111), B8(10111111)}
-
-#define DEC_FTAG_FIDX_D_M {B8(10101011), B8(11111111)}
-#define DEC_FTAG_FIDX_I_M {B8(11010011), B8(11111111)}
-
-#endif
diff --git a/target/cris/gdbstub.c b/target/cris/gdbstub.c
deleted file mode 100644
index 9e87069..0000000
--- a/target/cris/gdbstub.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * CRIS gdb server stub
- *
- * Copyright (c) 2003-2005 Fabrice Bellard
- * Copyright (c) 2013 SUSE LINUX Products GmbH
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "gdbstub/helpers.h"
-
-int crisv10_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
-{
- CPUCRISState *env = cpu_env(cs);
-
- if (n < 15) {
- return gdb_get_reg32(mem_buf, env->regs[n]);
- }
-
- if (n == 15) {
- return gdb_get_reg32(mem_buf, env->pc);
- }
-
- if (n < 32) {
- switch (n) {
- case 16:
- return gdb_get_reg8(mem_buf, env->pregs[n - 16]);
- case 17:
- return gdb_get_reg8(mem_buf, env->pregs[n - 16]);
- case 20:
- case 21:
- return gdb_get_reg16(mem_buf, env->pregs[n - 16]);
- default:
- if (n >= 23) {
- return gdb_get_reg32(mem_buf, env->pregs[n - 16]);
- }
- break;
- }
- }
- return 0;
-}
-
-int cris_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
-{
- CPUCRISState *env = cpu_env(cs);
- uint8_t srs;
-
- srs = env->pregs[PR_SRS];
- if (n < 16) {
- return gdb_get_reg32(mem_buf, env->regs[n]);
- }
-
- if (n >= 21 && n < 32) {
- return gdb_get_reg32(mem_buf, env->pregs[n - 16]);
- }
- if (n >= 33 && n < 49) {
- return gdb_get_reg32(mem_buf, env->sregs[srs][n - 33]);
- }
- switch (n) {
- case 16:
- return gdb_get_reg8(mem_buf, env->pregs[0]);
- case 17:
- return gdb_get_reg8(mem_buf, env->pregs[1]);
- case 18:
- return gdb_get_reg32(mem_buf, env->pregs[2]);
- case 19:
- return gdb_get_reg8(mem_buf, srs);
- case 20:
- return gdb_get_reg16(mem_buf, env->pregs[4]);
- case 32:
- return gdb_get_reg32(mem_buf, env->pc);
- }
-
- return 0;
-}
-
-int cris_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
-{
- CPUCRISState *env = cpu_env(cs);
- uint32_t tmp;
-
- if (n > 49) {
- return 0;
- }
-
- tmp = ldl_p(mem_buf);
-
- if (n < 16) {
- env->regs[n] = tmp;
- }
-
- if (n >= 21 && n < 32) {
- env->pregs[n - 16] = tmp;
- }
-
- /* FIXME: Should support function regs be writable? */
- switch (n) {
- case 16:
- return 1;
- case 17:
- return 1;
- case 18:
- env->pregs[PR_PID] = tmp;
- break;
- case 19:
- return 1;
- case 20:
- return 2;
- case 32:
- env->pc = tmp;
- break;
- }
-
- return 4;
-}
diff --git a/target/cris/helper.c b/target/cris/helper.c
deleted file mode 100644
index 1c3f868..0000000
--- a/target/cris/helper.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * CRIS helper routines.
- *
- * Copyright (c) 2007 AXIS Communications AB
- * Written by Edgar E. Iglesias.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/log.h"
-#include "cpu.h"
-#include "hw/core/tcg-cpu-ops.h"
-#include "mmu.h"
-#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
-#include "exec/helper-proto.h"
-
-
-//#define CRIS_HELPER_DEBUG
-
-
-#ifdef CRIS_HELPER_DEBUG
-#define D(x) x
-#define D_LOG(...) qemu_log(__VA_ARGS__)
-#else
-#define D(x)
-#define D_LOG(...) do { } while (0)
-#endif
-
-static void cris_shift_ccs(CPUCRISState *env)
-{
- uint32_t ccs;
- /* Apply the ccs shift. */
- ccs = env->pregs[PR_CCS];
- ccs = ((ccs & 0xc0000000) | ((ccs << 12) >> 2)) & ~0x3ff;
- env->pregs[PR_CCS] = ccs;
-}
-
-bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
-{
- CPUCRISState *env = cpu_env(cs);
- struct cris_mmu_result res;
- int prot, miss;
- target_ulong phy;
-
- miss = cris_mmu_translate(&res, env, address & TARGET_PAGE_MASK,
- access_type, mmu_idx, 0);
- if (likely(!miss)) {
- /*
- * Mask off the cache selection bit. The ETRAX busses do not
- * see the top bit.
- */
- phy = res.phy & ~0x80000000;
- prot = res.prot;
- tlb_set_page(cs, address & TARGET_PAGE_MASK, phy,
- prot, mmu_idx, TARGET_PAGE_SIZE);
- return true;
- }
-
- if (probe) {
- return false;
- }
-
- if (cs->exception_index == EXCP_BUSFAULT) {
- cpu_abort(cs, "CRIS: Illegal recursive bus fault."
- "addr=%" VADDR_PRIx " access_type=%d\n",
- address, access_type);
- }
-
- env->pregs[PR_EDA] = address;
- cs->exception_index = EXCP_BUSFAULT;
- env->fault_vector = res.bf_vec;
- if (retaddr) {
- if (cpu_restore_state(cs, retaddr)) {
- /* Evaluate flags after retranslation. */
- helper_top_evaluate_flags(env);
- }
- }
- cpu_loop_exit(cs);
-}
-
-void crisv10_cpu_do_interrupt(CPUState *cs)
-{
- CPUCRISState *env = cpu_env(cs);
- int ex_vec = -1;
-
- D_LOG("exception index=%d interrupt_req=%d\n",
- cs->exception_index,
- cs->interrupt_request);
-
- if (env->dslot) {
- /* CRISv10 never takes interrupts while in a delay-slot. */
- cpu_abort(cs, "CRIS: Interrupt on delay-slot\n");
- }
-
- assert(!(env->pregs[PR_CCS] & PFIX_FLAG));
- switch (cs->exception_index) {
- case EXCP_BREAK:
- /* These exceptions are generated by the core itself.
- ERP should point to the insn following the brk. */
- ex_vec = env->trap_vector;
- env->pregs[PRV10_BRP] = env->pc;
- break;
-
- case EXCP_NMI:
- /* NMI is hardwired to vector zero. */
- ex_vec = 0;
- env->pregs[PR_CCS] &= ~M_FLAG_V10;
- env->pregs[PRV10_BRP] = env->pc;
- break;
-
- case EXCP_BUSFAULT:
- cpu_abort(cs, "Unhandled busfault");
- break;
-
- default:
- /* The interrupt controller gives us the vector. */
- ex_vec = env->interrupt_vector;
- /* Normal interrupts are taken between
- TB's. env->pc is valid here. */
- env->pregs[PR_ERP] = env->pc;
- break;
- }
-
- if (env->pregs[PR_CCS] & U_FLAG) {
- /* Swap stack pointers. */
- env->pregs[PR_USP] = env->regs[R_SP];
- env->regs[R_SP] = env->ksp;
- }
-
- /* Now that we are in kernel mode, load the handlers address. */
- env->pc = cpu_ldl_code(env, env->pregs[PR_EBP] + ex_vec * 4);
- env->locked_irq = 1;
- env->pregs[PR_CCS] |= F_FLAG_V10; /* set F. */
-
- qemu_log_mask(CPU_LOG_INT, "%s isr=%x vec=%x ccs=%x pid=%d erp=%x\n",
- __func__, env->pc, ex_vec,
- env->pregs[PR_CCS],
- env->pregs[PR_PID],
- env->pregs[PR_ERP]);
-}
-
-void cris_cpu_do_interrupt(CPUState *cs)
-{
- CPUCRISState *env = cpu_env(cs);
- int ex_vec = -1;
-
- D_LOG("exception index=%d interrupt_req=%d\n",
- cs->exception_index,
- cs->interrupt_request);
-
- switch (cs->exception_index) {
- case EXCP_BREAK:
- /* These exceptions are generated by the core itself.
- ERP should point to the insn following the brk. */
- ex_vec = env->trap_vector;
- env->pregs[PR_ERP] = env->pc;
- break;
-
- case EXCP_NMI:
- /* NMI is hardwired to vector zero. */
- ex_vec = 0;
- env->pregs[PR_CCS] &= ~M_FLAG_V32;
- env->pregs[PR_NRP] = env->pc;
- break;
-
- case EXCP_BUSFAULT:
- ex_vec = env->fault_vector;
- env->pregs[PR_ERP] = env->pc;
- break;
-
- default:
- /* The interrupt controller gives us the vector. */
- ex_vec = env->interrupt_vector;
- /* Normal interrupts are taken between
- TB's. env->pc is valid here. */
- env->pregs[PR_ERP] = env->pc;
- break;
- }
-
- /* Fill in the IDX field. */
- env->pregs[PR_EXS] = (ex_vec & 0xff) << 8;
-
- if (env->dslot) {
- D_LOG("excp isr=%x PC=%x ds=%d SP=%x"
- " ERP=%x pid=%x ccs=%x cc=%d %x\n",
- ex_vec, env->pc, env->dslot,
- env->regs[R_SP],
- env->pregs[PR_ERP], env->pregs[PR_PID],
- env->pregs[PR_CCS],
- env->cc_op, env->cc_mask);
- /* We loose the btarget, btaken state here so rexec the
- branch. */
- env->pregs[PR_ERP] -= env->dslot;
- /* Exception starts with dslot cleared. */
- env->dslot = 0;
- }
-
- if (env->pregs[PR_CCS] & U_FLAG) {
- /* Swap stack pointers. */
- env->pregs[PR_USP] = env->regs[R_SP];
- env->regs[R_SP] = env->ksp;
- }
-
- /* Apply the CRIS CCS shift. Clears U if set. */
- cris_shift_ccs(env);
-
- /* Now that we are in kernel mode, load the handlers address.
- This load may not fault, real hw leaves that behaviour as
- undefined. */
- env->pc = cpu_ldl_code(env, env->pregs[PR_EBP] + ex_vec * 4);
-
- /* Clear the excption_index to avoid spurious hw_aborts for recursive
- bus faults. */
- cs->exception_index = -1;
-
- D_LOG("%s isr=%x vec=%x ccs=%x pid=%d erp=%x\n",
- __func__, env->pc, ex_vec,
- env->pregs[PR_CCS],
- env->pregs[PR_PID],
- env->pregs[PR_ERP]);
-}
-
-hwaddr cris_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
-{
- CRISCPU *cpu = CRIS_CPU(cs);
- uint32_t phy = addr;
- struct cris_mmu_result res;
- int miss;
-
- miss = cris_mmu_translate(&res, &cpu->env, addr, MMU_DATA_LOAD, 0, 1);
- /* If D TLB misses, try I TLB. */
- if (miss) {
- miss = cris_mmu_translate(&res, &cpu->env, addr, MMU_INST_FETCH, 0, 1);
- }
-
- if (!miss) {
- phy = res.phy;
- }
- D(fprintf(stderr, "%s %x -> %x\n", __func__, addr, phy));
- return phy;
-}
-
-bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
-{
- CPUClass *cc = CPU_GET_CLASS(cs);
- CPUCRISState *env = cpu_env(cs);
- bool ret = false;
-
- if (interrupt_request & CPU_INTERRUPT_HARD
- && (env->pregs[PR_CCS] & I_FLAG)
- && !env->locked_irq) {
- cs->exception_index = EXCP_IRQ;
- cc->tcg_ops->do_interrupt(cs);
- ret = true;
- }
- if (interrupt_request & CPU_INTERRUPT_NMI) {
- unsigned int m_flag_archval;
- if (env->pregs[PR_VR] < 32) {
- m_flag_archval = M_FLAG_V10;
- } else {
- m_flag_archval = M_FLAG_V32;
- }
- if ((env->pregs[PR_CCS] & m_flag_archval)) {
- cs->exception_index = EXCP_NMI;
- cc->tcg_ops->do_interrupt(cs);
- ret = true;
- }
- }
-
- return ret;
-}
diff --git a/target/cris/helper.h b/target/cris/helper.h
deleted file mode 100644
index 3abf608..0000000
--- a/target/cris/helper.h
+++ /dev/null
@@ -1,23 +0,0 @@
-DEF_HELPER_2(raise_exception, noreturn, env, i32)
-DEF_HELPER_2(tlb_flush_pid, void, env, i32)
-DEF_HELPER_2(spc_write, void, env, i32)
-DEF_HELPER_1(rfe, void, env)
-DEF_HELPER_1(rfn, void, env)
-
-DEF_HELPER_3(movl_sreg_reg, void, env, i32, i32)
-DEF_HELPER_3(movl_reg_sreg, void, env, i32, i32)
-
-DEF_HELPER_FLAGS_4(btst, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
-
-DEF_HELPER_FLAGS_4(evaluate_flags_muls, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
-DEF_HELPER_FLAGS_4(evaluate_flags_mulu, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
-DEF_HELPER_FLAGS_5(evaluate_flags_mcp, TCG_CALL_NO_SE, i32, env,
- i32, i32, i32, i32)
-DEF_HELPER_FLAGS_5(evaluate_flags_alu_4, TCG_CALL_NO_SE, i32, env,
- i32, i32, i32, i32)
-DEF_HELPER_FLAGS_5(evaluate_flags_sub_4, TCG_CALL_NO_SE, i32, env,
- i32, i32, i32, i32)
-DEF_HELPER_FLAGS_3(evaluate_flags_move_4, TCG_CALL_NO_SE, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(evaluate_flags_move_2, TCG_CALL_NO_SE, i32, env, i32, i32)
-DEF_HELPER_1(evaluate_flags, void, env)
-DEF_HELPER_1(top_evaluate_flags, void, env)
diff --git a/target/cris/machine.c b/target/cris/machine.c
deleted file mode 100644
index 7b9bde8..0000000
--- a/target/cris/machine.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * CRIS virtual CPU state save/load support
- *
- * Copyright (c) 2012 Red Hat, Inc.
- * Written by Juan Quintela <quintela@redhat.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "migration/cpu.h"
-
-static const VMStateDescription vmstate_tlbset = {
- .name = "cpu/tlbset",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32(lo, TLBSet),
- VMSTATE_UINT32(hi, TLBSet),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static const VMStateDescription vmstate_cris_env = {
- .name = "env",
- .version_id = 2,
- .minimum_version_id = 2,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32_ARRAY(regs, CPUCRISState, 16),
- VMSTATE_UINT32_ARRAY(pregs, CPUCRISState, 16),
- VMSTATE_UINT32(pc, CPUCRISState),
- VMSTATE_UINT32(ksp, CPUCRISState),
- VMSTATE_INT32(dslot, CPUCRISState),
- VMSTATE_INT32(btaken, CPUCRISState),
- VMSTATE_UINT32(btarget, CPUCRISState),
- VMSTATE_UINT32(cc_op, CPUCRISState),
- VMSTATE_UINT32(cc_mask, CPUCRISState),
- VMSTATE_UINT32(cc_dest, CPUCRISState),
- VMSTATE_UINT32(cc_src, CPUCRISState),
- VMSTATE_UINT32(cc_result, CPUCRISState),
- VMSTATE_INT32(cc_size, CPUCRISState),
- VMSTATE_INT32(cc_x, CPUCRISState),
- VMSTATE_INT32(locked_irq, CPUCRISState),
- VMSTATE_INT32(interrupt_vector, CPUCRISState),
- VMSTATE_INT32(fault_vector, CPUCRISState),
- VMSTATE_INT32(trap_vector, CPUCRISState),
- VMSTATE_UINT32_ARRAY(sregs[0], CPUCRISState, 16),
- VMSTATE_UINT32_ARRAY(sregs[1], CPUCRISState, 16),
- VMSTATE_UINT32_ARRAY(sregs[2], CPUCRISState, 16),
- VMSTATE_UINT32_ARRAY(sregs[3], CPUCRISState, 16),
- VMSTATE_UINT32(mmu_rand_lfsr, CPUCRISState),
- VMSTATE_STRUCT_ARRAY(tlbsets[0][0], CPUCRISState, 16, 0,
- vmstate_tlbset, TLBSet),
- VMSTATE_STRUCT_ARRAY(tlbsets[0][1], CPUCRISState, 16, 0,
- vmstate_tlbset, TLBSet),
- VMSTATE_STRUCT_ARRAY(tlbsets[0][2], CPUCRISState, 16, 0,
- vmstate_tlbset, TLBSet),
- VMSTATE_STRUCT_ARRAY(tlbsets[0][3], CPUCRISState, 16, 0,
- vmstate_tlbset, TLBSet),
- VMSTATE_STRUCT_ARRAY(tlbsets[1][0], CPUCRISState, 16, 0,
- vmstate_tlbset, TLBSet),
- VMSTATE_STRUCT_ARRAY(tlbsets[1][1], CPUCRISState, 16, 0,
- vmstate_tlbset, TLBSet),
- VMSTATE_STRUCT_ARRAY(tlbsets[1][2], CPUCRISState, 16, 0,
- vmstate_tlbset, TLBSet),
- VMSTATE_STRUCT_ARRAY(tlbsets[1][3], CPUCRISState, 16, 0,
- vmstate_tlbset, TLBSet),
- VMSTATE_END_OF_LIST()
- }
-};
-
-const VMStateDescription vmstate_cris_cpu = {
- .name = "cpu",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_CPU(),
- VMSTATE_STRUCT(env, CRISCPU, 1, vmstate_cris_env, CPUCRISState),
- VMSTATE_END_OF_LIST()
- }
-};
diff --git a/target/cris/meson.build b/target/cris/meson.build
deleted file mode 100644
index bbfcdf7..0000000
--- a/target/cris/meson.build
+++ /dev/null
@@ -1,17 +0,0 @@
-cris_ss = ss.source_set()
-cris_ss.add(files(
- 'cpu.c',
- 'gdbstub.c',
- 'op_helper.c',
- 'translate.c',
-))
-
-cris_system_ss = ss.source_set()
-cris_system_ss.add(files(
- 'helper.c',
- 'machine.c',
- 'mmu.c',
-))
-
-target_arch += {'cris': cris_ss}
-target_system_arch += {'cris': cris_system_ss}
diff --git a/target/cris/mmu.c b/target/cris/mmu.c
deleted file mode 100644
index d51008c..0000000
--- a/target/cris/mmu.c
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * CRIS mmu emulation.
- *
- * Copyright (c) 2007 AXIS Communications AB
- * Written by Edgar E. Iglesias.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/exec-all.h"
-#include "exec/page-protection.h"
-#include "mmu.h"
-
-#ifdef DEBUG
-#define D(x) x
-#define D_LOG(...) qemu_log(__VA_ARGS__)
-#else
-#define D(x) do { } while (0)
-#define D_LOG(...) do { } while (0)
-#endif
-
-void cris_mmu_init(CPUCRISState *env)
-{
- env->mmu_rand_lfsr = 0xcccc;
-}
-
-#define SR_POLYNOM 0x8805
-static inline unsigned int compute_polynom(unsigned int sr)
-{
- unsigned int i;
- unsigned int f;
-
- f = 0;
- for (i = 0; i < 16; i++) {
- f += ((SR_POLYNOM >> i) & 1) & ((sr >> i) & 1);
- }
-
- return f;
-}
-
-static void cris_mmu_update_rand_lfsr(CPUCRISState *env)
-{
- unsigned int f;
-
- /* Update lfsr at every fault. */
- f = compute_polynom(env->mmu_rand_lfsr);
- env->mmu_rand_lfsr >>= 1;
- env->mmu_rand_lfsr |= (f << 15);
- env->mmu_rand_lfsr &= 0xffff;
-}
-
-static inline int cris_mmu_enabled(uint32_t rw_gc_cfg)
-{
- return (rw_gc_cfg & 12) != 0;
-}
-
-static inline int cris_mmu_segmented_addr(int seg, uint32_t rw_mm_cfg)
-{
- return (1 << seg) & rw_mm_cfg;
-}
-
-static uint32_t cris_mmu_translate_seg(CPUCRISState *env, int seg)
-{
- uint32_t base;
- int i;
-
- if (seg < 8) {
- base = env->sregs[SFR_RW_MM_KBASE_LO];
- } else {
- base = env->sregs[SFR_RW_MM_KBASE_HI];
- }
-
- i = seg & 7;
- base >>= i * 4;
- base &= 15;
-
- base <<= 28;
- return base;
-}
-
-/* Used by the tlb decoder. */
-#define EXTRACT_FIELD(src, start, end) \
- (((src) >> start) & ((1 << (end - start + 1)) - 1))
-
-static inline void set_field(uint32_t *dst, unsigned int val,
- unsigned int offset, unsigned int width)
-{
- uint32_t mask;
-
- mask = (1 << width) - 1;
- mask <<= offset;
- val <<= offset;
-
- val &= mask;
- *dst &= ~(mask);
- *dst |= val;
-}
-
-#ifdef DEBUG
-static void dump_tlb(CPUCRISState *env, int mmu)
-{
- int set;
- int idx;
- uint32_t hi, lo, tlb_vpn, tlb_pfn;
-
- for (set = 0; set < 4; set++) {
- for (idx = 0; idx < 16; idx++) {
- lo = env->tlbsets[mmu][set][idx].lo;
- hi = env->tlbsets[mmu][set][idx].hi;
- tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
- tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
-
- printf("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
- set, idx, hi, lo, tlb_vpn, tlb_pfn);
- }
- }
-}
-#endif
-
-static int cris_mmu_translate_page(struct cris_mmu_result *res,
- CPUCRISState *env, uint32_t vaddr,
- MMUAccessType access_type,
- int usermode, int debug)
-{
- unsigned int vpage;
- unsigned int idx;
- uint32_t pid, lo, hi;
- uint32_t tlb_vpn, tlb_pfn = 0;
- int tlb_pid, tlb_g, tlb_v, tlb_k, tlb_w, tlb_x;
- int cfg_v, cfg_k, cfg_w, cfg_x;
- int set, match = 0;
- uint32_t r_cause;
- uint32_t r_cfg;
- int rwcause;
- int mmu = 1; /* Data mmu is default. */
- int vect_base;
-
- r_cause = env->sregs[SFR_R_MM_CAUSE];
- r_cfg = env->sregs[SFR_RW_MM_CFG];
- pid = env->pregs[PR_PID] & 0xff;
-
- switch (access_type) {
- case MMU_INST_FETCH:
- rwcause = CRIS_MMU_ERR_EXEC;
- mmu = 0;
- break;
- case MMU_DATA_STORE:
- rwcause = CRIS_MMU_ERR_WRITE;
- break;
- default:
- case MMU_DATA_LOAD:
- rwcause = CRIS_MMU_ERR_READ;
- break;
- }
-
- /* I exception vectors 4 - 7, D 8 - 11. */
- vect_base = (mmu + 1) * 4;
-
- vpage = vaddr >> 13;
-
- /*
- * We know the index which to check on each set.
- * Scan both I and D.
- */
- idx = vpage & 15;
- for (set = 0; set < 4; set++) {
- lo = env->tlbsets[mmu][set][idx].lo;
- hi = env->tlbsets[mmu][set][idx].hi;
-
- tlb_vpn = hi >> 13;
- tlb_pid = EXTRACT_FIELD(hi, 0, 7);
- tlb_g = EXTRACT_FIELD(lo, 4, 4);
-
- D_LOG("TLB[%d][%d][%d] v=%x vpage=%x lo=%x hi=%x\n",
- mmu, set, idx, tlb_vpn, vpage, lo, hi);
- if ((tlb_g || (tlb_pid == pid)) && tlb_vpn == vpage) {
- match = 1;
- break;
- }
- }
-
- res->bf_vec = vect_base;
- if (match) {
- cfg_w = EXTRACT_FIELD(r_cfg, 19, 19);
- cfg_k = EXTRACT_FIELD(r_cfg, 18, 18);
- cfg_x = EXTRACT_FIELD(r_cfg, 17, 17);
- cfg_v = EXTRACT_FIELD(r_cfg, 16, 16);
-
- tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
- tlb_v = EXTRACT_FIELD(lo, 3, 3);
- tlb_k = EXTRACT_FIELD(lo, 2, 2);
- tlb_w = EXTRACT_FIELD(lo, 1, 1);
- tlb_x = EXTRACT_FIELD(lo, 0, 0);
-
- /*
- * set_exception_vector(0x04, i_mmu_refill);
- * set_exception_vector(0x05, i_mmu_invalid);
- * set_exception_vector(0x06, i_mmu_access);
- * set_exception_vector(0x07, i_mmu_execute);
- * set_exception_vector(0x08, d_mmu_refill);
- * set_exception_vector(0x09, d_mmu_invalid);
- * set_exception_vector(0x0a, d_mmu_access);
- * set_exception_vector(0x0b, d_mmu_write);
- */
- if (cfg_k && tlb_k && usermode) {
- D(printf("tlb: kernel protected %x lo=%x pc=%x\n",
- vaddr, lo, env->pc));
- match = 0;
- res->bf_vec = vect_base + 2;
- } else if (access_type == MMU_DATA_STORE && cfg_w && !tlb_w) {
- D(printf("tlb: write protected %x lo=%x pc=%x\n",
- vaddr, lo, env->pc));
- match = 0;
- /* write accesses never go through the I mmu. */
- res->bf_vec = vect_base + 3;
- } else if (access_type == MMU_INST_FETCH && cfg_x && !tlb_x) {
- D(printf("tlb: exec protected %x lo=%x pc=%x\n",
- vaddr, lo, env->pc));
- match = 0;
- res->bf_vec = vect_base + 3;
- } else if (cfg_v && !tlb_v) {
- D(printf("tlb: invalid %x\n", vaddr));
- match = 0;
- res->bf_vec = vect_base + 1;
- }
-
- res->prot = 0;
- if (match) {
- res->prot |= PAGE_READ;
- if (tlb_w) {
- res->prot |= PAGE_WRITE;
- }
- if (mmu == 0 && (cfg_x || tlb_x)) {
- res->prot |= PAGE_EXEC;
- }
- } else {
- D(dump_tlb(env, mmu));
- }
- } else {
- /* If refill, provide a randomized set. */
- set = env->mmu_rand_lfsr & 3;
- }
-
- if (!match && !debug) {
- cris_mmu_update_rand_lfsr(env);
-
- /* Compute index. */
- idx = vpage & 15;
-
- /* Update RW_MM_TLB_SEL. */
- env->sregs[SFR_RW_MM_TLB_SEL] = 0;
- set_field(&env->sregs[SFR_RW_MM_TLB_SEL], idx, 0, 4);
- set_field(&env->sregs[SFR_RW_MM_TLB_SEL], set, 4, 2);
-
- /* Update RW_MM_CAUSE. */
- set_field(&r_cause, rwcause, 8, 2);
- set_field(&r_cause, vpage, 13, 19);
- set_field(&r_cause, pid, 0, 8);
- env->sregs[SFR_R_MM_CAUSE] = r_cause;
- D(printf("refill vaddr=%x pc=%x\n", vaddr, env->pc));
- }
-
- D(printf("%s access=%u mtch=%d pc=%x va=%x vpn=%x tlbvpn=%x pfn=%x pid=%x"
- " %x cause=%x sel=%x sp=%x %x %x\n",
- __func__, access_type, match, env->pc,
- vaddr, vpage,
- tlb_vpn, tlb_pfn, tlb_pid,
- pid,
- r_cause,
- env->sregs[SFR_RW_MM_TLB_SEL],
- env->regs[R_SP], env->pregs[PR_USP], env->ksp));
-
- res->phy = tlb_pfn << TARGET_PAGE_BITS;
- return !match;
-}
-
-void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid)
-{
- target_ulong vaddr;
- unsigned int idx;
- uint32_t lo, hi;
- uint32_t tlb_vpn;
- int tlb_pid, tlb_g, tlb_v;
- unsigned int set;
- unsigned int mmu;
-
- pid &= 0xff;
- for (mmu = 0; mmu < 2; mmu++) {
- for (set = 0; set < 4; set++) {
- for (idx = 0; idx < 16; idx++) {
- lo = env->tlbsets[mmu][set][idx].lo;
- hi = env->tlbsets[mmu][set][idx].hi;
-
- tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
- tlb_pid = EXTRACT_FIELD(hi, 0, 7);
- tlb_g = EXTRACT_FIELD(lo, 4, 4);
- tlb_v = EXTRACT_FIELD(lo, 3, 3);
-
- if (tlb_v && !tlb_g && (tlb_pid == pid)) {
- vaddr = tlb_vpn << TARGET_PAGE_BITS;
- D_LOG("flush pid=%x vaddr=%x\n", pid, vaddr);
- tlb_flush_page(env_cpu(env), vaddr);
- }
- }
- }
- }
-}
-
-int cris_mmu_translate(struct cris_mmu_result *res,
- CPUCRISState *env, uint32_t vaddr,
- MMUAccessType access_type, int mmu_idx, int debug)
-{
- int seg;
- int miss = 0;
- int is_user = mmu_idx == MMU_USER_IDX;
- uint32_t old_srs;
-
- old_srs = env->pregs[PR_SRS];
-
- env->pregs[PR_SRS] = access_type == MMU_INST_FETCH ? 1 : 2;
-
- if (!cris_mmu_enabled(env->sregs[SFR_RW_GC_CFG])) {
- res->phy = vaddr;
- res->prot = PAGE_RWX;
- goto done;
- }
-
- seg = vaddr >> 28;
- if (!is_user && cris_mmu_segmented_addr(seg, env->sregs[SFR_RW_MM_CFG])) {
- uint32_t base;
-
- miss = 0;
- base = cris_mmu_translate_seg(env, seg);
- res->phy = base | (0x0fffffff & vaddr);
- res->prot = PAGE_RWX;
- } else {
- miss = cris_mmu_translate_page(res, env, vaddr, access_type,
- is_user, debug);
- }
- done:
- env->pregs[PR_SRS] = old_srs;
- return miss;
-}
diff --git a/target/cris/mmu.h b/target/cris/mmu.h
deleted file mode 100644
index d57386e..0000000
--- a/target/cris/mmu.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef TARGET_CRIS_MMU_H
-#define TARGET_CRIS_MMU_H
-
-#define CRIS_MMU_ERR_EXEC 0
-#define CRIS_MMU_ERR_READ 1
-#define CRIS_MMU_ERR_WRITE 2
-#define CRIS_MMU_ERR_FLUSH 3
-
-struct cris_mmu_result
-{
- uint32_t phy;
- int prot;
- int bf_vec;
-};
-
-void cris_mmu_init(CPUCRISState *env);
-void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid);
-int cris_mmu_translate(struct cris_mmu_result *res,
- CPUCRISState *env, uint32_t vaddr,
- MMUAccessType access_type, int mmu_idx, int debug);
-
-#endif
diff --git a/target/cris/op_helper.c b/target/cris/op_helper.c
deleted file mode 100644
index 98a9aaf..0000000
--- a/target/cris/op_helper.c
+++ /dev/null
@@ -1,580 +0,0 @@
-/*
- * CRIS helper routines
- *
- * Copyright (c) 2007 AXIS Communications
- * Written by Edgar E. Iglesias
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "mmu.h"
-#include "exec/helper-proto.h"
-#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
-
-//#define CRIS_OP_HELPER_DEBUG
-
-
-#ifdef CRIS_OP_HELPER_DEBUG
-#define D(x) x
-#define D_LOG(...) qemu_log(__VA_ARGS__)
-#else
-#define D(x)
-#define D_LOG(...) do { } while (0)
-#endif
-
-void helper_raise_exception(CPUCRISState *env, uint32_t index)
-{
- CPUState *cs = env_cpu(env);
-
- cs->exception_index = index;
- cpu_loop_exit(cs);
-}
-
-void helper_tlb_flush_pid(CPUCRISState *env, uint32_t pid)
-{
-#if !defined(CONFIG_USER_ONLY)
- pid &= 0xff;
- if (pid != (env->pregs[PR_PID] & 0xff)) {
- cris_mmu_flush_pid(env, env->pregs[PR_PID]);
- }
-#endif
-}
-
-void helper_spc_write(CPUCRISState *env, uint32_t new_spc)
-{
-#if !defined(CONFIG_USER_ONLY)
- CPUState *cs = env_cpu(env);
-
- tlb_flush_page(cs, env->pregs[PR_SPC]);
- tlb_flush_page(cs, new_spc);
-#endif
-}
-
-/* Used by the tlb decoder. */
-#define EXTRACT_FIELD(src, start, end) \
- (((src) >> start) & ((1 << (end - start + 1)) - 1))
-
-void helper_movl_sreg_reg(CPUCRISState *env, uint32_t sreg, uint32_t reg)
-{
- uint32_t srs;
- srs = env->pregs[PR_SRS];
- srs &= 3;
- env->sregs[srs][sreg] = env->regs[reg];
-
-#if !defined(CONFIG_USER_ONLY)
- if (srs == 1 || srs == 2) {
- if (sreg == 6) {
- /* Writes to tlb-hi write to mm_cause as a side effect. */
- env->sregs[SFR_RW_MM_TLB_HI] = env->regs[reg];
- env->sregs[SFR_R_MM_CAUSE] = env->regs[reg];
- } else if (sreg == 5) {
- uint32_t set;
- uint32_t idx;
- uint32_t lo, hi;
- uint32_t vaddr;
- int tlb_v;
-
- idx = set = env->sregs[SFR_RW_MM_TLB_SEL];
- set >>= 4;
- set &= 3;
-
- idx &= 15;
- /* We've just made a write to tlb_lo. */
- lo = env->sregs[SFR_RW_MM_TLB_LO];
- /* Writes are done via r_mm_cause. */
- hi = env->sregs[SFR_R_MM_CAUSE];
-
- vaddr = EXTRACT_FIELD(env->tlbsets[srs - 1][set][idx].hi, 13, 31);
- vaddr <<= TARGET_PAGE_BITS;
- tlb_v = EXTRACT_FIELD(env->tlbsets[srs - 1][set][idx].lo, 3, 3);
- env->tlbsets[srs - 1][set][idx].lo = lo;
- env->tlbsets[srs - 1][set][idx].hi = hi;
-
- D_LOG("tlb flush vaddr=%x v=%d pc=%x\n",
- vaddr, tlb_v, env->pc);
- if (tlb_v) {
- tlb_flush_page(env_cpu(env), vaddr);
- }
- }
- }
-#endif
-}
-
-void helper_movl_reg_sreg(CPUCRISState *env, uint32_t reg, uint32_t sreg)
-{
- uint32_t srs;
- env->pregs[PR_SRS] &= 3;
- srs = env->pregs[PR_SRS];
-
-#if !defined(CONFIG_USER_ONLY)
- if (srs == 1 || srs == 2) {
- uint32_t set;
- uint32_t idx;
- uint32_t lo, hi;
-
- idx = set = env->sregs[SFR_RW_MM_TLB_SEL];
- set >>= 4;
- set &= 3;
- idx &= 15;
-
- /* Update the mirror regs. */
- hi = env->tlbsets[srs - 1][set][idx].hi;
- lo = env->tlbsets[srs - 1][set][idx].lo;
- env->sregs[SFR_RW_MM_TLB_HI] = hi;
- env->sregs[SFR_RW_MM_TLB_LO] = lo;
- }
-#endif
- env->regs[reg] = env->sregs[srs][sreg];
-}
-
-static void cris_ccs_rshift(CPUCRISState *env)
-{
- uint32_t ccs;
-
- /* Apply the ccs shift. */
- ccs = env->pregs[PR_CCS];
- ccs = (ccs & 0xc0000000) | ((ccs & 0x0fffffff) >> 10);
- if (ccs & U_FLAG) {
- /* Enter user mode. */
- env->ksp = env->regs[R_SP];
- env->regs[R_SP] = env->pregs[PR_USP];
- }
-
- env->pregs[PR_CCS] = ccs;
-}
-
-void helper_rfe(CPUCRISState *env)
-{
- int rflag = env->pregs[PR_CCS] & R_FLAG;
-
- D_LOG("rfe: erp=%x pid=%x ccs=%x btarget=%x\n",
- env->pregs[PR_ERP], env->pregs[PR_PID],
- env->pregs[PR_CCS],
- env->btarget);
-
- cris_ccs_rshift(env);
-
- /* RFE sets the P_FLAG only if the R_FLAG is not set. */
- if (!rflag) {
- env->pregs[PR_CCS] |= P_FLAG;
- }
-}
-
-void helper_rfn(CPUCRISState *env)
-{
- int rflag = env->pregs[PR_CCS] & R_FLAG;
-
- D_LOG("rfn: erp=%x pid=%x ccs=%x btarget=%x\n",
- env->pregs[PR_ERP], env->pregs[PR_PID],
- env->pregs[PR_CCS],
- env->btarget);
-
- cris_ccs_rshift(env);
-
- /* Set the P_FLAG only if the R_FLAG is not set. */
- if (!rflag) {
- env->pregs[PR_CCS] |= P_FLAG;
- }
-
- /* Always set the M flag. */
- env->pregs[PR_CCS] |= M_FLAG_V32;
-}
-
-uint32_t helper_btst(CPUCRISState *env, uint32_t t0, uint32_t t1, uint32_t ccs)
-{
- /* FIXME: clean this up. */
-
- /*
- * des ref:
- * The N flag is set according to the selected bit in the dest reg.
- * The Z flag is set if the selected bit and all bits to the right are
- * zero.
- * The X flag is cleared.
- * Other flags are left untouched.
- * The destination reg is not affected.
- */
- unsigned int fz, sbit, bset, mask, masked_t0;
-
- sbit = t1 & 31;
- bset = !!(t0 & (1 << sbit));
- mask = sbit == 31 ? -1 : (1 << (sbit + 1)) - 1;
- masked_t0 = t0 & mask;
- fz = !(masked_t0 | bset);
-
- /* Clear the X, N and Z flags. */
- ccs = ccs & ~(X_FLAG | N_FLAG | Z_FLAG);
- if (env->pregs[PR_VR] < 32) {
- ccs &= ~(V_FLAG | C_FLAG);
- }
- /* Set the N and Z flags accordingly. */
- ccs |= (bset << 3) | (fz << 2);
- return ccs;
-}
-
-static inline uint32_t evaluate_flags_writeback(CPUCRISState *env,
- uint32_t flags, uint32_t ccs)
-{
- unsigned int x, z, mask;
-
- /* Extended arithmetic, leave the z flag alone. */
- x = env->cc_x;
- mask = env->cc_mask | X_FLAG;
- if (x) {
- z = flags & Z_FLAG;
- mask = mask & ~z;
- }
- flags &= mask;
-
- /* all insn clear the x-flag except setf or clrf. */
- ccs &= ~mask;
- ccs |= flags;
- return ccs;
-}
-
-uint32_t helper_evaluate_flags_muls(CPUCRISState *env,
- uint32_t ccs, uint32_t res, uint32_t mof)
-{
- uint32_t flags = 0;
- int64_t tmp;
- int dneg;
-
- dneg = ((int32_t)res) < 0;
-
- tmp = mof;
- tmp <<= 32;
- tmp |= res;
- if (tmp == 0) {
- flags |= Z_FLAG;
- } else if (tmp < 0) {
- flags |= N_FLAG;
- }
- if ((dneg && mof != -1) || (!dneg && mof != 0)) {
- flags |= V_FLAG;
- }
- return evaluate_flags_writeback(env, flags, ccs);
-}
-
-uint32_t helper_evaluate_flags_mulu(CPUCRISState *env,
- uint32_t ccs, uint32_t res, uint32_t mof)
-{
- uint32_t flags = 0;
- uint64_t tmp;
-
- tmp = mof;
- tmp <<= 32;
- tmp |= res;
- if (tmp == 0) {
- flags |= Z_FLAG;
- } else if (tmp >> 63) {
- flags |= N_FLAG;
- }
- if (mof) {
- flags |= V_FLAG;
- }
-
- return evaluate_flags_writeback(env, flags, ccs);
-}
-
-uint32_t helper_evaluate_flags_mcp(CPUCRISState *env, uint32_t ccs,
- uint32_t src, uint32_t dst, uint32_t res)
-{
- uint32_t flags = 0;
-
- src = src & 0x80000000;
- dst = dst & 0x80000000;
-
- if ((res & 0x80000000L) != 0L) {
- flags |= N_FLAG;
- if (!src && !dst) {
- flags |= V_FLAG;
- } else if (src & dst) {
- flags |= R_FLAG;
- }
- } else {
- if (res == 0L) {
- flags |= Z_FLAG;
- }
- if (src & dst) {
- flags |= V_FLAG;
- }
- if (dst | src) {
- flags |= R_FLAG;
- }
- }
-
- return evaluate_flags_writeback(env, flags, ccs);
-}
-
-uint32_t helper_evaluate_flags_alu_4(CPUCRISState *env, uint32_t ccs,
- uint32_t src, uint32_t dst, uint32_t res)
-{
- uint32_t flags = 0;
-
- src = src & 0x80000000;
- dst = dst & 0x80000000;
-
- if ((res & 0x80000000L) != 0L) {
- flags |= N_FLAG;
- if (!src && !dst) {
- flags |= V_FLAG;
- } else if (src & dst) {
- flags |= C_FLAG;
- }
- } else {
- if (res == 0L) {
- flags |= Z_FLAG;
- }
- if (src & dst) {
- flags |= V_FLAG;
- }
- if (dst | src) {
- flags |= C_FLAG;
- }
- }
-
- return evaluate_flags_writeback(env, flags, ccs);
-}
-
-uint32_t helper_evaluate_flags_sub_4(CPUCRISState *env, uint32_t ccs,
- uint32_t src, uint32_t dst, uint32_t res)
-{
- uint32_t flags = 0;
-
- src = (~src) & 0x80000000;
- dst = dst & 0x80000000;
-
- if ((res & 0x80000000L) != 0L) {
- flags |= N_FLAG;
- if (!src && !dst) {
- flags |= V_FLAG;
- } else if (src & dst) {
- flags |= C_FLAG;
- }
- } else {
- if (res == 0L) {
- flags |= Z_FLAG;
- }
- if (src & dst) {
- flags |= V_FLAG;
- }
- if (dst | src) {
- flags |= C_FLAG;
- }
- }
-
- flags ^= C_FLAG;
- return evaluate_flags_writeback(env, flags, ccs);
-}
-
-uint32_t helper_evaluate_flags_move_4(CPUCRISState *env,
- uint32_t ccs, uint32_t res)
-{
- uint32_t flags = 0;
-
- if ((int32_t)res < 0) {
- flags |= N_FLAG;
- } else if (res == 0L) {
- flags |= Z_FLAG;
- }
-
- return evaluate_flags_writeback(env, flags, ccs);
-}
-
-uint32_t helper_evaluate_flags_move_2(CPUCRISState *env,
- uint32_t ccs, uint32_t res)
-{
- uint32_t flags = 0;
-
- if ((int16_t)res < 0L) {
- flags |= N_FLAG;
- } else if (res == 0) {
- flags |= Z_FLAG;
- }
-
- return evaluate_flags_writeback(env, flags, ccs);
-}
-
-/*
- * TODO: This is expensive. We could split things up and only evaluate part of
- * CCR on a need to know basis. For now, we simply re-evaluate everything.
- */
-void helper_evaluate_flags(CPUCRISState *env)
-{
- uint32_t src, dst, res;
- uint32_t flags = 0;
-
- src = env->cc_src;
- dst = env->cc_dest;
- res = env->cc_result;
-
- if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP) {
- src = ~src;
- }
-
- /*
- * Now, evaluate the flags. This stuff is based on
- * Per Zander's CRISv10 simulator.
- */
- switch (env->cc_size) {
- case 1:
- if ((res & 0x80L) != 0L) {
- flags |= N_FLAG;
- if (((src & 0x80L) == 0L) && ((dst & 0x80L) == 0L)) {
- flags |= V_FLAG;
- } else if (((src & 0x80L) != 0L) && ((dst & 0x80L) != 0L)) {
- flags |= C_FLAG;
- }
- } else {
- if ((res & 0xFFL) == 0L) {
- flags |= Z_FLAG;
- }
- if (((src & 0x80L) != 0L) && ((dst & 0x80L) != 0L)) {
- flags |= V_FLAG;
- }
- if ((dst & 0x80L) != 0L || (src & 0x80L) != 0L) {
- flags |= C_FLAG;
- }
- }
- break;
- case 2:
- if ((res & 0x8000L) != 0L) {
- flags |= N_FLAG;
- if (((src & 0x8000L) == 0L) && ((dst & 0x8000L) == 0L)) {
- flags |= V_FLAG;
- } else if (((src & 0x8000L) != 0L) && ((dst & 0x8000L) != 0L)) {
- flags |= C_FLAG;
- }
- } else {
- if ((res & 0xFFFFL) == 0L) {
- flags |= Z_FLAG;
- }
- if (((src & 0x8000L) != 0L) && ((dst & 0x8000L) != 0L)) {
- flags |= V_FLAG;
- }
- if ((dst & 0x8000L) != 0L || (src & 0x8000L) != 0L) {
- flags |= C_FLAG;
- }
- }
- break;
- case 4:
- if ((res & 0x80000000L) != 0L) {
- flags |= N_FLAG;
- if (((src & 0x80000000L) == 0L) && ((dst & 0x80000000L) == 0L)) {
- flags |= V_FLAG;
- } else if (((src & 0x80000000L) != 0L) &&
- ((dst & 0x80000000L) != 0L)) {
- flags |= C_FLAG;
- }
- } else {
- if (res == 0L) {
- flags |= Z_FLAG;
- }
- if (((src & 0x80000000L) != 0L) && ((dst & 0x80000000L) != 0L)) {
- flags |= V_FLAG;
- }
- if ((dst & 0x80000000L) != 0L || (src & 0x80000000L) != 0L) {
- flags |= C_FLAG;
- }
- }
- break;
- default:
- break;
- }
-
- if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP) {
- flags ^= C_FLAG;
- }
-
- env->pregs[PR_CCS] = evaluate_flags_writeback(env, flags,
- env->pregs[PR_CCS]);
-}
-
-void helper_top_evaluate_flags(CPUCRISState *env)
-{
- switch (env->cc_op) {
- case CC_OP_MCP:
- env->pregs[PR_CCS]
- = helper_evaluate_flags_mcp(env, env->pregs[PR_CCS],
- env->cc_src, env->cc_dest,
- env->cc_result);
- break;
- case CC_OP_MULS:
- env->pregs[PR_CCS]
- = helper_evaluate_flags_muls(env, env->pregs[PR_CCS],
- env->cc_result, env->pregs[PR_MOF]);
- break;
- case CC_OP_MULU:
- env->pregs[PR_CCS]
- = helper_evaluate_flags_mulu(env, env->pregs[PR_CCS],
- env->cc_result, env->pregs[PR_MOF]);
- break;
- case CC_OP_MOVE:
- case CC_OP_AND:
- case CC_OP_OR:
- case CC_OP_XOR:
- case CC_OP_ASR:
- case CC_OP_LSR:
- case CC_OP_LSL:
- switch (env->cc_size) {
- case 4:
- env->pregs[PR_CCS] =
- helper_evaluate_flags_move_4(env,
- env->pregs[PR_CCS],
- env->cc_result);
- break;
- case 2:
- env->pregs[PR_CCS] =
- helper_evaluate_flags_move_2(env,
- env->pregs[PR_CCS],
- env->cc_result);
- break;
- default:
- helper_evaluate_flags(env);
- break;
- }
- break;
- case CC_OP_FLAGS:
- /* live. */
- break;
- case CC_OP_SUB:
- case CC_OP_CMP:
- if (env->cc_size == 4) {
- env->pregs[PR_CCS] =
- helper_evaluate_flags_sub_4(env,
- env->pregs[PR_CCS],
- env->cc_src, env->cc_dest,
- env->cc_result);
- } else {
- helper_evaluate_flags(env);
- }
- break;
- default:
- switch (env->cc_size) {
- case 4:
- env->pregs[PR_CCS] =
- helper_evaluate_flags_alu_4(env,
- env->pregs[PR_CCS],
- env->cc_src, env->cc_dest,
- env->cc_result);
- break;
- default:
- helper_evaluate_flags(env);
- break;
- }
- break;
- }
-}
diff --git a/target/cris/opcode-cris.h b/target/cris/opcode-cris.h
deleted file mode 100644
index 40509c8..0000000
--- a/target/cris/opcode-cris.h
+++ /dev/null
@@ -1,355 +0,0 @@
-/* cris.h -- Header file for CRIS opcode and register tables.
- Copyright (C) 2000, 2001, 2004 Free Software Foundation, Inc.
- Contributed by Axis Communications AB, Lund, Sweden.
- Originally written for GAS 1.38.1 by Mikael Asker.
- Updated, BFDized and GNUified by Hans-Peter Nilsson.
-
-This file is part of GAS, GDB and the GNU binutils.
-
-GAS, GDB, and GNU binutils is free software; you can redistribute it
-and/or modify it under the terms of the GNU General Public License as
-published by the Free Software Foundation; either version 2, or (at your
-option) any later version.
-
-GAS, GDB, and GNU binutils are distributed in the hope that they will be
-useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, see <http://www.gnu.org/licenses/>. */
-
-#ifndef TARGET_CRIS_OPCODE_CRIS_H
-#define TARGET_CRIS_OPCODE_CRIS_H
-
-#if !defined(__STDC__) && !defined(const)
-#define const
-#endif
-
-
-/* Registers. */
-#define MAX_REG (15)
-#define CRIS_REG_SP (14)
-#define CRIS_REG_PC (15)
-
-/* CPU version control of disassembly and assembly of instructions.
- May affect how the instruction is assembled, at least the size of
- immediate operands. */
-enum cris_insn_version_usage
-{
- /* Any version. */
- cris_ver_version_all=0,
-
- /* Indeterminate (intended for disassembly only, or obsolete). */
- cris_ver_warning,
-
- /* Only for v0..3 (Etrax 1..4). */
- cris_ver_v0_3,
-
- /* Only for v3 or higher (ETRAX 4 and beyond). */
- cris_ver_v3p,
-
- /* Only for v8 (Etrax 100). */
- cris_ver_v8,
-
- /* Only for v8 or higher (ETRAX 100, ETRAX 100 LX). */
- cris_ver_v8p,
-
- /* Only for v0..10. FIXME: Not sure what to do with this. */
- cris_ver_sim_v0_10,
-
- /* Only for v0..10. */
- cris_ver_v0_10,
-
- /* Only for v3..10. (ETRAX 4, ETRAX 100 and ETRAX 100 LX). */
- cris_ver_v3_10,
-
- /* Only for v8..10 (ETRAX 100 and ETRAX 100 LX). */
- cris_ver_v8_10,
-
- /* Only for v10 (ETRAX 100 LX) and same series. */
- cris_ver_v10,
-
- /* Only for v10 (ETRAX 100 LX) and same series. */
- cris_ver_v10p,
-
- /* Only for v32 or higher (codename GUINNESS).
- Of course some or all these of may change to cris_ver_v32p if/when
- there's a new revision. */
- cris_ver_v32p
-};
-
-
-/* Special registers. */
-struct cris_spec_reg
-{
- const char *const name;
- unsigned int number;
-
- /* The size of the register. */
- unsigned int reg_size;
-
- /* What CPU version the special register of that name is implemented
- in. If cris_ver_warning, emit an unimplemented-warning. */
- enum cris_insn_version_usage applicable_version;
-
- /* There might be a specific warning for using a special register
- here. */
- const char *const warning;
-};
-extern const struct cris_spec_reg cris_spec_regs[];
-
-
-/* Support registers (kind of special too, but not named as such). */
-struct cris_support_reg
-{
- const char *const name;
- unsigned int number;
-};
-extern const struct cris_support_reg cris_support_regs[];
-
-/* Opcode-dependent constants. */
-#define AUTOINCR_BIT (0x04)
-
-/* Prefixes. */
-#define BDAP_QUICK_OPCODE (0x0100)
-#define BDAP_QUICK_Z_BITS (0x0e00)
-
-#define BIAP_OPCODE (0x0540)
-#define BIAP_Z_BITS (0x0a80)
-
-#define DIP_OPCODE (0x0970)
-#define DIP_Z_BITS (0xf280)
-
-#define BDAP_INDIR_LOW (0x40)
-#define BDAP_INDIR_LOW_Z (0x80)
-#define BDAP_INDIR_HIGH (0x09)
-#define BDAP_INDIR_HIGH_Z (0x02)
-
-#define BDAP_INDIR_OPCODE (BDAP_INDIR_HIGH * 0x0100 + BDAP_INDIR_LOW)
-#define BDAP_INDIR_Z_BITS (BDAP_INDIR_HIGH_Z * 0x100 + BDAP_INDIR_LOW_Z)
-#define BDAP_PC_LOW (BDAP_INDIR_LOW + CRIS_REG_PC)
-#define BDAP_INCR_HIGH (BDAP_INDIR_HIGH + AUTOINCR_BIT)
-
-/* No prefix must have this code for its "match" bits in the
- opcode-table. "BCC .+2" will do nicely. */
-#define NO_CRIS_PREFIX 0
-
-/* Definitions for condition codes. */
-#define CC_CC 0x0
-#define CC_HS 0x0
-#define CC_CS 0x1
-#define CC_LO 0x1
-#define CC_NE 0x2
-#define CC_EQ 0x3
-#define CC_VC 0x4
-#define CC_VS 0x5
-#define CC_PL 0x6
-#define CC_MI 0x7
-#define CC_LS 0x8
-#define CC_HI 0x9
-#define CC_GE 0xA
-#define CC_LT 0xB
-#define CC_GT 0xC
-#define CC_LE 0xD
-#define CC_A 0xE
-#define CC_EXT 0xF
-
-/* A table of strings "cc", "cs"... indexed with condition code
- values as above. */
-extern const char *const cris_cc_strings[];
-
-/* Bcc quick. */
-#define BRANCH_QUICK_LOW (0)
-#define BRANCH_QUICK_HIGH (0)
-#define BRANCH_QUICK_OPCODE (BRANCH_QUICK_HIGH * 0x0100 + BRANCH_QUICK_LOW)
-#define BRANCH_QUICK_Z_BITS (0x0F00)
-
-/* BA quick. */
-#define BA_QUICK_HIGH (BRANCH_QUICK_HIGH + CC_A * 0x10)
-#define BA_QUICK_OPCODE (BA_QUICK_HIGH * 0x100 + BRANCH_QUICK_LOW)
-
-/* Bcc [PC+]. */
-#define BRANCH_PC_LOW (0xFF)
-#define BRANCH_INCR_HIGH (0x0D)
-#define BA_PC_INCR_OPCODE \
- ((BRANCH_INCR_HIGH + CC_A * 0x10) * 0x0100 + BRANCH_PC_LOW)
-
-/* Jump. */
-/* Note that old versions generated special register 8 (in high bits)
- and not-that-old versions recognized it as a jump-instruction.
- That opcode now belongs to JUMPU. */
-#define JUMP_INDIR_OPCODE (0x0930)
-#define JUMP_INDIR_Z_BITS (0xf2c0)
-#define JUMP_PC_INCR_OPCODE \
- (JUMP_INDIR_OPCODE + AUTOINCR_BIT * 0x0100 + CRIS_REG_PC)
-
-#define MOVE_M_TO_PREG_OPCODE 0x0a30
-#define MOVE_M_TO_PREG_ZBITS 0x01c0
-
-/* BDAP.D N,PC. */
-#define MOVE_PC_INCR_OPCODE_PREFIX \
- (((BDAP_INCR_HIGH | (CRIS_REG_PC << 4)) << 8) | BDAP_PC_LOW | (2 << 4))
-#define MOVE_PC_INCR_OPCODE_SUFFIX \
- (MOVE_M_TO_PREG_OPCODE | CRIS_REG_PC | (AUTOINCR_BIT << 8))
-
-#define JUMP_PC_INCR_OPCODE_V32 (0x0DBF)
-
-/* BA DWORD (V32). */
-#define BA_DWORD_OPCODE (0x0EBF)
-
-/* Nop. */
-#define NOP_OPCODE (0x050F)
-#define NOP_Z_BITS (0xFFFF ^ NOP_OPCODE)
-
-#define NOP_OPCODE_V32 (0x05B0)
-#define NOP_Z_BITS_V32 (0xFFFF ^ NOP_OPCODE_V32)
-
-/* For the compatibility mode, let's use "MOVE R0,P0". Doesn't affect
- registers or flags. Unfortunately shuts off interrupts for one cycle
- for < v32, but there doesn't seem to be any alternative without that
- effect. */
-#define NOP_OPCODE_COMMON (0x630)
-#define NOP_OPCODE_ZBITS_COMMON (0xffff & ~NOP_OPCODE_COMMON)
-
-/* LAPC.D */
-#define LAPC_DWORD_OPCODE (0x0D7F)
-#define LAPC_DWORD_Z_BITS (0x0fff & ~LAPC_DWORD_OPCODE)
-
-/* Structure of an opcode table entry. */
-enum cris_imm_oprnd_size_type
-{
- /* No size is applicable. */
- SIZE_NONE,
-
- /* Always 32 bits. */
- SIZE_FIX_32,
-
- /* Indicated by size of special register. */
- SIZE_SPEC_REG,
-
- /* Indicated by size field, signed. */
- SIZE_FIELD_SIGNED,
-
- /* Indicated by size field, unsigned. */
- SIZE_FIELD_UNSIGNED,
-
- /* Indicated by size field, no sign implied. */
- SIZE_FIELD
-};
-
-/* For GDB. FIXME: Is this the best way to handle opcode
- interpretation? */
-enum cris_op_type
-{
- cris_not_implemented_op = 0,
- cris_abs_op,
- cris_addi_op,
- cris_asr_op,
- cris_asrq_op,
- cris_ax_ei_setf_op,
- cris_bdap_prefix,
- cris_biap_prefix,
- cris_break_op,
- cris_btst_nop_op,
- cris_clearf_di_op,
- cris_dip_prefix,
- cris_dstep_logshift_mstep_neg_not_op,
- cris_eight_bit_offset_branch_op,
- cris_move_mem_to_reg_movem_op,
- cris_move_reg_to_mem_movem_op,
- cris_move_to_preg_op,
- cris_muls_op,
- cris_mulu_op,
- cris_none_reg_mode_add_sub_cmp_and_or_move_op,
- cris_none_reg_mode_clear_test_op,
- cris_none_reg_mode_jump_op,
- cris_none_reg_mode_move_from_preg_op,
- cris_quick_mode_add_sub_op,
- cris_quick_mode_and_cmp_move_or_op,
- cris_quick_mode_bdap_prefix,
- cris_reg_mode_add_sub_cmp_and_or_move_op,
- cris_reg_mode_clear_op,
- cris_reg_mode_jump_op,
- cris_reg_mode_move_from_preg_op,
- cris_reg_mode_test_op,
- cris_scc_op,
- cris_sixteen_bit_offset_branch_op,
- cris_three_operand_add_sub_cmp_and_or_op,
- cris_three_operand_bound_op,
- cris_two_operand_bound_op,
- cris_xor_op
-};
-
-struct cris_opcode
-{
- /* The name of the insn. */
- const char *name;
-
- /* Bits that must be 1 for a match. */
- unsigned int match;
-
- /* Bits that must be 0 for a match. */
- unsigned int lose;
-
- /* See the table in "opcodes/cris-opc.c". */
- const char *args;
-
- /* Nonzero if this is a delayed branch instruction. */
- char delayed;
-
- /* Size of immediate operands. */
- enum cris_imm_oprnd_size_type imm_oprnd_size;
-
- /* Indicates which version this insn was first implemented in. */
- enum cris_insn_version_usage applicable_version;
-
- /* What kind of operation this is. */
- enum cris_op_type op;
-};
-extern const struct cris_opcode cris_opcodes[];
-
-
-/* These macros are for the target-specific flags in disassemble_info
- used at disassembly. */
-
-/* This insn accesses memory. This flag is more trustworthy than
- checking insn_type for "dis_dref" which does not work for
- e.g. "JSR [foo]". */
-#define CRIS_DIS_FLAG_MEMREF (1 << 0)
-
-/* The "target" field holds a register number. */
-#define CRIS_DIS_FLAG_MEM_TARGET_IS_REG (1 << 1)
-
-/* The "target2" field holds a register number; add it to "target". */
-#define CRIS_DIS_FLAG_MEM_TARGET2_IS_REG (1 << 2)
-
-/* Yet another add-on: the register in "target2" must be multiplied
- by 2 before adding to "target". */
-#define CRIS_DIS_FLAG_MEM_TARGET2_MULT2 (1 << 3)
-
-/* Yet another add-on: the register in "target2" must be multiplied
- by 4 (mutually exclusive with .._MULT2). */
-#define CRIS_DIS_FLAG_MEM_TARGET2_MULT4 (1 << 4)
-
-/* The register in "target2" is an indirect memory reference (of the
- register there), add to "target". Assumed size is dword (mutually
- exclusive with .._MULT[24]). */
-#define CRIS_DIS_FLAG_MEM_TARGET2_MEM (1 << 5)
-
-/* Add-on to CRIS_DIS_FLAG_MEM_TARGET2_MEM; the memory access is "byte";
- sign-extended before adding to "target". */
-#define CRIS_DIS_FLAG_MEM_TARGET2_MEM_BYTE (1 << 6)
-
-/* Add-on to CRIS_DIS_FLAG_MEM_TARGET2_MEM; the memory access is "word";
- sign-extended before adding to "target". */
-#define CRIS_DIS_FLAG_MEM_TARGET2_MEM_WORD (1 << 7)
-
-#endif /* TARGET_CRIS_OPCODE_CRIS_H */
-
-/*
- * Local variables:
- * eval: (c-set-style "gnu")
- * indent-tabs-mode: t
- * End:
- */
diff --git a/target/cris/translate.c b/target/cris/translate.c
deleted file mode 100644
index a30c67e..0000000
--- a/target/cris/translate.c
+++ /dev/null
@@ -1,3252 +0,0 @@
-/*
- * CRIS emulation for qemu: main translation routines.
- *
- * Copyright (c) 2008 AXIS Communications AB
- * Written by Edgar E. Iglesias.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/*
- * FIXME:
- * The condition code translation is in need of attention.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/exec-all.h"
-#include "tcg/tcg-op.h"
-#include "exec/helper-proto.h"
-#include "mmu.h"
-#include "exec/translator.h"
-#include "crisv32-decode.h"
-#include "qemu/qemu-print.h"
-#include "exec/helper-gen.h"
-#include "exec/log.h"
-
-#define HELPER_H "helper.h"
-#include "exec/helper-info.c.inc"
-#undef HELPER_H
-
-
-#define DISAS_CRIS 0
-#if DISAS_CRIS
-# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
-#else
-# define LOG_DIS(...) do { } while (0)
-#endif
-
-#define D(x)
-#define BUG() (gen_BUG(dc, __FILE__, __LINE__))
-#define BUG_ON(x) ({if (x) BUG();})
-
-/*
- * Target-specific is_jmp field values
- */
-/* Only pc was modified dynamically */
-#define DISAS_JUMP DISAS_TARGET_0
-/* Cpu state was modified dynamically, including pc */
-#define DISAS_UPDATE DISAS_TARGET_1
-/* Cpu state was modified dynamically, excluding pc -- use npc */
-#define DISAS_UPDATE_NEXT DISAS_TARGET_2
-/* PC update for delayed branch, see cpustate_changed otherwise */
-#define DISAS_DBRANCH DISAS_TARGET_3
-
-/* Used by the decoder. */
-#define EXTRACT_FIELD(src, start, end) \
- (((src) >> start) & ((1 << (end - start + 1)) - 1))
-
-#define CC_MASK_NZ 0xc
-#define CC_MASK_NZV 0xe
-#define CC_MASK_NZVC 0xf
-#define CC_MASK_RNZV 0x10e
-
-static TCGv cpu_R[16];
-static TCGv cpu_PR[16];
-static TCGv cc_x;
-static TCGv cc_src;
-static TCGv cc_dest;
-static TCGv cc_result;
-static TCGv cc_op;
-static TCGv cc_size;
-static TCGv cc_mask;
-
-static TCGv env_btaken;
-static TCGv env_btarget;
-static TCGv env_pc;
-
-/* This is the state at translation time. */
-typedef struct DisasContext {
- DisasContextBase base;
-
- CRISCPU *cpu;
- target_ulong pc, ppc;
- int mem_index;
-
- /* Decoder. */
- unsigned int (*decoder)(CPUCRISState *env, struct DisasContext *dc);
- uint32_t ir;
- uint32_t opcode;
- unsigned int op1;
- unsigned int op2;
- unsigned int zsize, zzsize;
- unsigned int mode;
- unsigned int postinc;
-
- unsigned int size;
- unsigned int src;
- unsigned int dst;
- unsigned int cond;
-
- int update_cc;
- int cc_op;
- int cc_size;
- uint32_t cc_mask;
-
- int cc_size_uptodate; /* -1 invalid or last written value. */
-
- int cc_x_uptodate; /* 1 - ccs, 2 - known | X_FLAG. 0 not up-to-date. */
- int flags_uptodate; /* Whether or not $ccs is up-to-date. */
- int flags_x;
-
- int clear_x; /* Clear x after this insn? */
- int clear_prefix; /* Clear prefix after this insn? */
- int clear_locked_irq; /* Clear the irq lockout. */
- int cpustate_changed;
- unsigned int tb_flags; /* tb dependent flags. */
-
-#define JMP_NOJMP 0
-#define JMP_DIRECT 1
-#define JMP_DIRECT_CC 2
-#define JMP_INDIRECT 3
- int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
- uint32_t jmp_pc;
-
- int delayed_branch;
-} DisasContext;
-
-static void gen_BUG(DisasContext *dc, const char *file, int line)
-{
- cpu_abort(CPU(dc->cpu), "%s:%d pc=%x\n", file, line, dc->pc);
-}
-
-static const char * const regnames_v32[] =
-{
- "$r0", "$r1", "$r2", "$r3",
- "$r4", "$r5", "$r6", "$r7",
- "$r8", "$r9", "$r10", "$r11",
- "$r12", "$r13", "$sp", "$acr",
-};
-
-static const char * const pregnames_v32[] =
-{
- "$bz", "$vr", "$pid", "$srs",
- "$wz", "$exs", "$eda", "$mof",
- "$dz", "$ebp", "$erp", "$srp",
- "$nrp", "$ccs", "$usp", "$spc",
-};
-
-/* We need this table to handle preg-moves with implicit width. */
-static const int preg_sizes[] = {
- 1, /* bz. */
- 1, /* vr. */
- 4, /* pid. */
- 1, /* srs. */
- 2, /* wz. */
- 4, 4, 4,
- 4, 4, 4, 4,
- 4, 4, 4, 4,
-};
-
-#define t_gen_mov_TN_env(tn, member) \
- tcg_gen_ld_tl(tn, tcg_env, offsetof(CPUCRISState, member))
-#define t_gen_mov_env_TN(member, tn) \
- tcg_gen_st_tl(tn, tcg_env, offsetof(CPUCRISState, member))
-#define t_gen_movi_env_TN(member, c) \
- t_gen_mov_env_TN(member, tcg_constant_tl(c))
-
-static inline void t_gen_mov_TN_preg(TCGv tn, int r)
-{
- assert(r >= 0 && r <= 15);
- if (r == PR_BZ || r == PR_WZ || r == PR_DZ) {
- tcg_gen_movi_tl(tn, 0);
- } else if (r == PR_VR) {
- tcg_gen_movi_tl(tn, 32);
- } else {
- tcg_gen_mov_tl(tn, cpu_PR[r]);
- }
-}
-static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
-{
- assert(r >= 0 && r <= 15);
- if (r == PR_BZ || r == PR_WZ || r == PR_DZ) {
- return;
- } else if (r == PR_SRS) {
- tcg_gen_andi_tl(cpu_PR[r], tn, 3);
- } else {
- if (r == PR_PID) {
- gen_helper_tlb_flush_pid(tcg_env, tn);
- }
- if (dc->tb_flags & S_FLAG && r == PR_SPC) {
- gen_helper_spc_write(tcg_env, tn);
- } else if (r == PR_CCS) {
- dc->cpustate_changed = 1;
- }
- tcg_gen_mov_tl(cpu_PR[r], tn);
- }
-}
-
-/* Sign extend at translation time. */
-static int sign_extend(unsigned int val, unsigned int width)
-{
- int sval;
-
- /* LSL. */
- val <<= 31 - width;
- sval = val;
- /* ASR. */
- sval >>= 31 - width;
- return sval;
-}
-
-static int cris_fetch(CPUCRISState *env, DisasContext *dc, uint32_t addr,
- unsigned int size, bool sign)
-{
- int r;
-
- switch (size) {
- case 4:
- r = translator_ldl(env, &dc->base, addr);
- break;
- case 2:
- r = translator_lduw(env, &dc->base, addr);
- if (sign) {
- r = (int16_t)r;
- }
- break;
- case 1:
- r = translator_ldub(env, &dc->base, addr);
- if (sign) {
- r = (int8_t)r;
- }
- break;
- default:
- g_assert_not_reached();
- }
- return r;
-}
-
-static void cris_lock_irq(DisasContext *dc)
-{
- dc->clear_locked_irq = 0;
- t_gen_movi_env_TN(locked_irq, 1);
-}
-
-static inline void t_gen_raise_exception(uint32_t index)
-{
- gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
-}
-
-static void t_gen_lsl(TCGv d, TCGv a, TCGv b)
-{
- TCGv t0, t_31;
-
- t0 = tcg_temp_new();
- t_31 = tcg_constant_tl(31);
- tcg_gen_shl_tl(d, a, b);
-
- tcg_gen_sub_tl(t0, t_31, b);
- tcg_gen_sar_tl(t0, t0, t_31);
- tcg_gen_and_tl(t0, t0, d);
- tcg_gen_xor_tl(d, d, t0);
-}
-
-static void t_gen_lsr(TCGv d, TCGv a, TCGv b)
-{
- TCGv t0, t_31;
-
- t0 = tcg_temp_new();
- t_31 = tcg_temp_new();
- tcg_gen_shr_tl(d, a, b);
-
- tcg_gen_movi_tl(t_31, 31);
- tcg_gen_sub_tl(t0, t_31, b);
- tcg_gen_sar_tl(t0, t0, t_31);
- tcg_gen_and_tl(t0, t0, d);
- tcg_gen_xor_tl(d, d, t0);
-}
-
-static void t_gen_asr(TCGv d, TCGv a, TCGv b)
-{
- TCGv t0, t_31;
-
- t0 = tcg_temp_new();
- t_31 = tcg_temp_new();
- tcg_gen_sar_tl(d, a, b);
-
- tcg_gen_movi_tl(t_31, 31);
- tcg_gen_sub_tl(t0, t_31, b);
- tcg_gen_sar_tl(t0, t0, t_31);
- tcg_gen_or_tl(d, d, t0);
-}
-
-static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b)
-{
- TCGv t = tcg_temp_new();
-
- /*
- * d <<= 1
- * if (d >= s)
- * d -= s;
- */
- tcg_gen_shli_tl(d, a, 1);
- tcg_gen_sub_tl(t, d, b);
- tcg_gen_movcond_tl(TCG_COND_GEU, d, d, b, t, d);
-}
-
-static void t_gen_cris_mstep(TCGv d, TCGv a, TCGv b, TCGv ccs)
-{
- TCGv t;
-
- /*
- * d <<= 1
- * if (n)
- * d += s;
- */
- t = tcg_temp_new();
- tcg_gen_shli_tl(d, a, 1);
- tcg_gen_shli_tl(t, ccs, 31 - 3);
- tcg_gen_sari_tl(t, t, 31);
- tcg_gen_and_tl(t, t, b);
- tcg_gen_add_tl(d, d, t);
-}
-
-/* Extended arithmetic on CRIS. */
-static inline void t_gen_add_flag(TCGv d, int flag)
-{
- TCGv c;
-
- c = tcg_temp_new();
- t_gen_mov_TN_preg(c, PR_CCS);
- /* Propagate carry into d. */
- tcg_gen_andi_tl(c, c, 1 << flag);
- if (flag) {
- tcg_gen_shri_tl(c, c, flag);
- }
- tcg_gen_add_tl(d, d, c);
-}
-
-static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
-{
- if (dc->flags_x) {
- TCGv c = tcg_temp_new();
-
- t_gen_mov_TN_preg(c, PR_CCS);
- /* C flag is already at bit 0. */
- tcg_gen_andi_tl(c, c, C_FLAG);
- tcg_gen_add_tl(d, d, c);
- }
-}
-
-static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
-{
- if (dc->flags_x) {
- TCGv c = tcg_temp_new();
-
- t_gen_mov_TN_preg(c, PR_CCS);
- /* C flag is already at bit 0. */
- tcg_gen_andi_tl(c, c, C_FLAG);
- tcg_gen_sub_tl(d, d, c);
- }
-}
-
-/* Swap the two bytes within each half word of the s operand.
- T0 = ((T0 << 8) & 0xff00ff00) | ((T0 >> 8) & 0x00ff00ff) */
-static inline void t_gen_swapb(TCGv d, TCGv s)
-{
- TCGv t, org_s;
-
- t = tcg_temp_new();
- org_s = tcg_temp_new();
-
- /* d and s may refer to the same object. */
- tcg_gen_mov_tl(org_s, s);
- tcg_gen_shli_tl(t, org_s, 8);
- tcg_gen_andi_tl(d, t, 0xff00ff00);
- tcg_gen_shri_tl(t, org_s, 8);
- tcg_gen_andi_tl(t, t, 0x00ff00ff);
- tcg_gen_or_tl(d, d, t);
-}
-
-/* Swap the halfwords of the s operand. */
-static inline void t_gen_swapw(TCGv d, TCGv s)
-{
- TCGv t;
- /* d and s refer the same object. */
- t = tcg_temp_new();
- tcg_gen_mov_tl(t, s);
- tcg_gen_shli_tl(d, t, 16);
- tcg_gen_shri_tl(t, t, 16);
- tcg_gen_or_tl(d, d, t);
-}
-
-/*
- * Reverse the bits within each byte.
- *
- * T0 = ((T0 << 7) & 0x80808080)
- * | ((T0 << 5) & 0x40404040)
- * | ((T0 << 3) & 0x20202020)
- * | ((T0 << 1) & 0x10101010)
- * | ((T0 >> 1) & 0x08080808)
- * | ((T0 >> 3) & 0x04040404)
- * | ((T0 >> 5) & 0x02020202)
- * | ((T0 >> 7) & 0x01010101);
- */
-static void t_gen_swapr(TCGv d, TCGv s)
-{
- static const struct {
- int shift; /* LSL when positive, LSR when negative. */
- uint32_t mask;
- } bitrev[] = {
- {7, 0x80808080},
- {5, 0x40404040},
- {3, 0x20202020},
- {1, 0x10101010},
- {-1, 0x08080808},
- {-3, 0x04040404},
- {-5, 0x02020202},
- {-7, 0x01010101}
- };
- int i;
- TCGv t, org_s;
-
- /* d and s refer the same object. */
- t = tcg_temp_new();
- org_s = tcg_temp_new();
- tcg_gen_mov_tl(org_s, s);
-
- tcg_gen_shli_tl(t, org_s, bitrev[0].shift);
- tcg_gen_andi_tl(d, t, bitrev[0].mask);
- for (i = 1; i < ARRAY_SIZE(bitrev); i++) {
- if (bitrev[i].shift >= 0) {
- tcg_gen_shli_tl(t, org_s, bitrev[i].shift);
- } else {
- tcg_gen_shri_tl(t, org_s, -bitrev[i].shift);
- }
- tcg_gen_andi_tl(t, t, bitrev[i].mask);
- tcg_gen_or_tl(d, d, t);
- }
-}
-
-static bool use_goto_tb(DisasContext *dc, target_ulong dest)
-{
- return translator_use_goto_tb(&dc->base, dest);
-}
-
-static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
-{
- if (use_goto_tb(dc, dest)) {
- tcg_gen_goto_tb(n);
- tcg_gen_movi_tl(env_pc, dest);
- tcg_gen_exit_tb(dc->base.tb, n);
- } else {
- tcg_gen_movi_tl(env_pc, dest);
- tcg_gen_lookup_and_goto_ptr();
- }
-}
-
-static inline void cris_clear_x_flag(DisasContext *dc)
-{
- if (dc->flags_x) {
- dc->flags_uptodate = 0;
- }
- dc->flags_x = 0;
-}
-
-static void cris_flush_cc_state(DisasContext *dc)
-{
- if (dc->cc_size_uptodate != dc->cc_size) {
- tcg_gen_movi_tl(cc_size, dc->cc_size);
- dc->cc_size_uptodate = dc->cc_size;
- }
- tcg_gen_movi_tl(cc_op, dc->cc_op);
- tcg_gen_movi_tl(cc_mask, dc->cc_mask);
-}
-
-static void cris_evaluate_flags(DisasContext *dc)
-{
- if (dc->flags_uptodate) {
- return;
- }
-
- cris_flush_cc_state(dc);
-
- switch (dc->cc_op) {
- case CC_OP_MCP:
- gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS], tcg_env,
- cpu_PR[PR_CCS], cc_src,
- cc_dest, cc_result);
- break;
- case CC_OP_MULS:
- gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS], tcg_env,
- cpu_PR[PR_CCS], cc_result,
- cpu_PR[PR_MOF]);
- break;
- case CC_OP_MULU:
- gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS], tcg_env,
- cpu_PR[PR_CCS], cc_result,
- cpu_PR[PR_MOF]);
- break;
- case CC_OP_MOVE:
- case CC_OP_AND:
- case CC_OP_OR:
- case CC_OP_XOR:
- case CC_OP_ASR:
- case CC_OP_LSR:
- case CC_OP_LSL:
- switch (dc->cc_size) {
- case 4:
- gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS],
- tcg_env, cpu_PR[PR_CCS], cc_result);
- break;
- case 2:
- gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS],
- tcg_env, cpu_PR[PR_CCS], cc_result);
- break;
- default:
- gen_helper_evaluate_flags(tcg_env);
- break;
- }
- break;
- case CC_OP_FLAGS:
- /* live. */
- break;
- case CC_OP_SUB:
- case CC_OP_CMP:
- if (dc->cc_size == 4) {
- gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS], tcg_env,
- cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
- } else {
- gen_helper_evaluate_flags(tcg_env);
- }
-
- break;
- default:
- switch (dc->cc_size) {
- case 4:
- gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS], tcg_env,
- cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
- break;
- default:
- gen_helper_evaluate_flags(tcg_env);
- break;
- }
- break;
- }
-
- if (dc->flags_x) {
- tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], X_FLAG);
- } else if (dc->cc_op == CC_OP_FLAGS) {
- tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~X_FLAG);
- }
- dc->flags_uptodate = 1;
-}
-
-static void cris_cc_mask(DisasContext *dc, unsigned int mask)
-{
- uint32_t ovl;
-
- if (!mask) {
- dc->update_cc = 0;
- return;
- }
-
- /* Check if we need to evaluate the condition codes due to
- CC overlaying. */
- ovl = (dc->cc_mask ^ mask) & ~mask;
- if (ovl) {
- /* TODO: optimize this case. It trigs all the time. */
- cris_evaluate_flags(dc);
- }
- dc->cc_mask = mask;
- dc->update_cc = 1;
-}
-
-static void cris_update_cc_op(DisasContext *dc, int op, int size)
-{
- dc->cc_op = op;
- dc->cc_size = size;
- dc->flags_uptodate = 0;
-}
-
-static inline void cris_update_cc_x(DisasContext *dc)
-{
- /* Save the x flag state at the time of the cc snapshot. */
- if (dc->cc_x_uptodate == (2 | dc->flags_x)) {
- return;
- }
- tcg_gen_movi_tl(cc_x, dc->flags_x);
- dc->cc_x_uptodate = 2 | dc->flags_x;
-}
-
-/* Update cc prior to executing ALU op. Needs source operands untouched. */
-static void cris_pre_alu_update_cc(DisasContext *dc, int op,
- TCGv dst, TCGv src, int size)
-{
- if (dc->update_cc) {
- cris_update_cc_op(dc, op, size);
- tcg_gen_mov_tl(cc_src, src);
-
- if (op != CC_OP_MOVE
- && op != CC_OP_AND
- && op != CC_OP_OR
- && op != CC_OP_XOR
- && op != CC_OP_ASR
- && op != CC_OP_LSR
- && op != CC_OP_LSL) {
- tcg_gen_mov_tl(cc_dest, dst);
- }
-
- cris_update_cc_x(dc);
- }
-}
-
-/* Update cc after executing ALU op. needs the result. */
-static inline void cris_update_result(DisasContext *dc, TCGv res)
-{
- if (dc->update_cc) {
- tcg_gen_mov_tl(cc_result, res);
- }
-}
-
-/* Returns one if the write back stage should execute. */
-static void cris_alu_op_exec(DisasContext *dc, int op,
- TCGv dst, TCGv a, TCGv b, int size)
-{
- /* Emit the ALU insns. */
- switch (op) {
- case CC_OP_ADD:
- tcg_gen_add_tl(dst, a, b);
- /* Extended arithmetic. */
- t_gen_addx_carry(dc, dst);
- break;
- case CC_OP_ADDC:
- tcg_gen_add_tl(dst, a, b);
- t_gen_add_flag(dst, 0); /* C_FLAG. */
- break;
- case CC_OP_MCP:
- tcg_gen_add_tl(dst, a, b);
- t_gen_add_flag(dst, 8); /* R_FLAG. */
- break;
- case CC_OP_SUB:
- tcg_gen_sub_tl(dst, a, b);
- /* Extended arithmetic. */
- t_gen_subx_carry(dc, dst);
- break;
- case CC_OP_MOVE:
- tcg_gen_mov_tl(dst, b);
- break;
- case CC_OP_OR:
- tcg_gen_or_tl(dst, a, b);
- break;
- case CC_OP_AND:
- tcg_gen_and_tl(dst, a, b);
- break;
- case CC_OP_XOR:
- tcg_gen_xor_tl(dst, a, b);
- break;
- case CC_OP_LSL:
- t_gen_lsl(dst, a, b);
- break;
- case CC_OP_LSR:
- t_gen_lsr(dst, a, b);
- break;
- case CC_OP_ASR:
- t_gen_asr(dst, a, b);
- break;
- case CC_OP_NEG:
- tcg_gen_neg_tl(dst, b);
- /* Extended arithmetic. */
- t_gen_subx_carry(dc, dst);
- break;
- case CC_OP_LZ:
- tcg_gen_clzi_tl(dst, b, TARGET_LONG_BITS);
- break;
- case CC_OP_MULS:
- tcg_gen_muls2_tl(dst, cpu_PR[PR_MOF], a, b);
- break;
- case CC_OP_MULU:
- tcg_gen_mulu2_tl(dst, cpu_PR[PR_MOF], a, b);
- break;
- case CC_OP_DSTEP:
- t_gen_cris_dstep(dst, a, b);
- break;
- case CC_OP_MSTEP:
- t_gen_cris_mstep(dst, a, b, cpu_PR[PR_CCS]);
- break;
- case CC_OP_BOUND:
- tcg_gen_movcond_tl(TCG_COND_LEU, dst, a, b, a, b);
- break;
- case CC_OP_CMP:
- tcg_gen_sub_tl(dst, a, b);
- /* Extended arithmetic. */
- t_gen_subx_carry(dc, dst);
- break;
- default:
- qemu_log_mask(LOG_GUEST_ERROR, "illegal ALU op.\n");
- BUG();
- break;
- }
-
- if (size == 1) {
- tcg_gen_andi_tl(dst, dst, 0xff);
- } else if (size == 2) {
- tcg_gen_andi_tl(dst, dst, 0xffff);
- }
-}
-
-static void cris_alu(DisasContext *dc, int op,
- TCGv d, TCGv op_a, TCGv op_b, int size)
-{
- TCGv tmp;
- int writeback;
-
- writeback = 1;
-
- if (op == CC_OP_CMP) {
- tmp = tcg_temp_new();
- writeback = 0;
- } else if (size == 4) {
- tmp = d;
- writeback = 0;
- } else {
- tmp = tcg_temp_new();
- }
-
-
- cris_pre_alu_update_cc(dc, op, op_a, op_b, size);
- cris_alu_op_exec(dc, op, tmp, op_a, op_b, size);
- cris_update_result(dc, tmp);
-
- /* Writeback. */
- if (writeback) {
- if (size == 1) {
- tcg_gen_andi_tl(d, d, ~0xff);
- } else {
- tcg_gen_andi_tl(d, d, ~0xffff);
- }
- tcg_gen_or_tl(d, d, tmp);
- }
-}
-
-static int arith_cc(DisasContext *dc)
-{
- if (dc->update_cc) {
- switch (dc->cc_op) {
- case CC_OP_ADDC: return 1;
- case CC_OP_ADD: return 1;
- case CC_OP_SUB: return 1;
- case CC_OP_DSTEP: return 1;
- case CC_OP_LSL: return 1;
- case CC_OP_LSR: return 1;
- case CC_OP_ASR: return 1;
- case CC_OP_CMP: return 1;
- case CC_OP_NEG: return 1;
- case CC_OP_OR: return 1;
- case CC_OP_AND: return 1;
- case CC_OP_XOR: return 1;
- case CC_OP_MULU: return 1;
- case CC_OP_MULS: return 1;
- default:
- return 0;
- }
- }
- return 0;
-}
-
-static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
-{
- int arith_opt, move_opt;
-
- /* TODO: optimize more condition codes. */
-
- /*
- * If the flags are live, we've gotta look into the bits of CCS.
- * Otherwise, if we just did an arithmetic operation we try to
- * evaluate the condition code faster.
- *
- * When this function is done, T0 should be non-zero if the condition
- * code is true.
- */
- arith_opt = arith_cc(dc) && !dc->flags_uptodate;
- move_opt = (dc->cc_op == CC_OP_MOVE);
- switch (cond) {
- case CC_EQ:
- if ((arith_opt || move_opt)
- && dc->cc_x_uptodate != (2 | X_FLAG)) {
- tcg_gen_setcondi_tl(TCG_COND_EQ, cc, cc_result, 0);
- } else {
- cris_evaluate_flags(dc);
- tcg_gen_andi_tl(cc,
- cpu_PR[PR_CCS], Z_FLAG);
- }
- break;
- case CC_NE:
- if ((arith_opt || move_opt)
- && dc->cc_x_uptodate != (2 | X_FLAG)) {
- tcg_gen_mov_tl(cc, cc_result);
- } else {
- cris_evaluate_flags(dc);
- tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
- Z_FLAG);
- tcg_gen_andi_tl(cc, cc, Z_FLAG);
- }
- break;
- case CC_CS:
- cris_evaluate_flags(dc);
- tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], C_FLAG);
- break;
- case CC_CC:
- cris_evaluate_flags(dc);
- tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], C_FLAG);
- tcg_gen_andi_tl(cc, cc, C_FLAG);
- break;
- case CC_VS:
- cris_evaluate_flags(dc);
- tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], V_FLAG);
- break;
- case CC_VC:
- cris_evaluate_flags(dc);
- tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
- V_FLAG);
- tcg_gen_andi_tl(cc, cc, V_FLAG);
- break;
- case CC_PL:
- if (arith_opt || move_opt) {
- int bits = 31;
-
- if (dc->cc_size == 1) {
- bits = 7;
- } else if (dc->cc_size == 2) {
- bits = 15;
- }
-
- tcg_gen_shri_tl(cc, cc_result, bits);
- tcg_gen_xori_tl(cc, cc, 1);
- } else {
- cris_evaluate_flags(dc);
- tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
- N_FLAG);
- tcg_gen_andi_tl(cc, cc, N_FLAG);
- }
- break;
- case CC_MI:
- if (arith_opt || move_opt) {
- int bits = 31;
-
- if (dc->cc_size == 1) {
- bits = 7;
- } else if (dc->cc_size == 2) {
- bits = 15;
- }
-
- tcg_gen_shri_tl(cc, cc_result, bits);
- tcg_gen_andi_tl(cc, cc, 1);
- } else {
- cris_evaluate_flags(dc);
- tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
- N_FLAG);
- }
- break;
- case CC_LS:
- cris_evaluate_flags(dc);
- tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
- C_FLAG | Z_FLAG);
- break;
- case CC_HI:
- cris_evaluate_flags(dc);
- {
- TCGv tmp;
-
- tmp = tcg_temp_new();
- tcg_gen_xori_tl(tmp, cpu_PR[PR_CCS],
- C_FLAG | Z_FLAG);
- /* Overlay the C flag on top of the Z. */
- tcg_gen_shli_tl(cc, tmp, 2);
- tcg_gen_and_tl(cc, tmp, cc);
- tcg_gen_andi_tl(cc, cc, Z_FLAG);
- }
- break;
- case CC_GE:
- cris_evaluate_flags(dc);
- /* Overlay the V flag on top of the N. */
- tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
- tcg_gen_xor_tl(cc,
- cpu_PR[PR_CCS], cc);
- tcg_gen_andi_tl(cc, cc, N_FLAG);
- tcg_gen_xori_tl(cc, cc, N_FLAG);
- break;
- case CC_LT:
- cris_evaluate_flags(dc);
- /* Overlay the V flag on top of the N. */
- tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
- tcg_gen_xor_tl(cc,
- cpu_PR[PR_CCS], cc);
- tcg_gen_andi_tl(cc, cc, N_FLAG);
- break;
- case CC_GT:
- cris_evaluate_flags(dc);
- {
- TCGv n, z;
-
- n = tcg_temp_new();
- z = tcg_temp_new();
-
- /* To avoid a shift we overlay everything on
- the V flag. */
- tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
- tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
- /* invert Z. */
- tcg_gen_xori_tl(z, z, 2);
-
- tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
- tcg_gen_xori_tl(n, n, 2);
- tcg_gen_and_tl(cc, z, n);
- tcg_gen_andi_tl(cc, cc, 2);
- }
- break;
- case CC_LE:
- cris_evaluate_flags(dc);
- {
- TCGv n, z;
-
- n = tcg_temp_new();
- z = tcg_temp_new();
-
- /* To avoid a shift we overlay everything on
- the V flag. */
- tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
- tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
-
- tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
- tcg_gen_or_tl(cc, z, n);
- tcg_gen_andi_tl(cc, cc, 2);
- }
- break;
- case CC_P:
- cris_evaluate_flags(dc);
- tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], P_FLAG);
- break;
- case CC_A:
- tcg_gen_movi_tl(cc, 1);
- break;
- default:
- BUG();
- break;
- };
-}
-
-static void cris_store_direct_jmp(DisasContext *dc)
-{
- /* Store the direct jmp state into the cpu-state. */
- if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
- if (dc->jmp == JMP_DIRECT) {
- tcg_gen_movi_tl(env_btaken, 1);
- }
- tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
- dc->jmp = JMP_INDIRECT;
- }
-}
-
-static void cris_prepare_cc_branch (DisasContext *dc,
- int offset, int cond)
-{
- /* This helps us re-schedule the micro-code to insns in delay-slots
- before the actual jump. */
- dc->delayed_branch = 2;
- dc->jmp = JMP_DIRECT_CC;
- dc->jmp_pc = dc->pc + offset;
-
- gen_tst_cc(dc, env_btaken, cond);
- tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
-}
-
-
-/* jumps, when the dest is in a live reg for example. Direct should be set
- when the dest addr is constant to allow tb chaining. */
-static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
-{
- /* This helps us re-schedule the micro-code to insns in delay-slots
- before the actual jump. */
- dc->delayed_branch = 2;
- dc->jmp = type;
- if (type == JMP_INDIRECT) {
- tcg_gen_movi_tl(env_btaken, 1);
- }
-}
-
-static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
-{
- /* If we get a fault on a delayslot we must keep the jmp state in
- the cpu-state to be able to re-execute the jmp. */
- if (dc->delayed_branch == 1) {
- cris_store_direct_jmp(dc);
- }
-
- tcg_gen_qemu_ld_i64(dst, addr, dc->mem_index, MO_TEUQ);
-}
-
-static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
- unsigned int size, int sign)
-{
- /* If we get a fault on a delayslot we must keep the jmp state in
- the cpu-state to be able to re-execute the jmp. */
- if (dc->delayed_branch == 1) {
- cris_store_direct_jmp(dc);
- }
-
- tcg_gen_qemu_ld_tl(dst, addr, dc->mem_index,
- MO_TE + ctz32(size) + (sign ? MO_SIGN : 0));
-}
-
-static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
- unsigned int size)
-{
- /* If we get a fault on a delayslot we must keep the jmp state in
- the cpu-state to be able to re-execute the jmp. */
- if (dc->delayed_branch == 1) {
- cris_store_direct_jmp(dc);
- }
-
-
- /* Conditional writes. We only support the kind were X and P are known
- at translation time. */
- if (dc->flags_x && (dc->tb_flags & P_FLAG)) {
- dc->postinc = 0;
- cris_evaluate_flags(dc);
- tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], C_FLAG);
- return;
- }
-
- tcg_gen_qemu_st_tl(val, addr, dc->mem_index, MO_TE + ctz32(size));
-
- if (dc->flags_x) {
- cris_evaluate_flags(dc);
- tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~C_FLAG);
- }
-}
-
-static inline void t_gen_sext(TCGv d, TCGv s, int size)
-{
- if (size == 1) {
- tcg_gen_ext8s_i32(d, s);
- } else if (size == 2) {
- tcg_gen_ext16s_i32(d, s);
- } else {
- tcg_gen_mov_tl(d, s);
- }
-}
-
-static inline void t_gen_zext(TCGv d, TCGv s, int size)
-{
- if (size == 1) {
- tcg_gen_ext8u_i32(d, s);
- } else if (size == 2) {
- tcg_gen_ext16u_i32(d, s);
- } else {
- tcg_gen_mov_tl(d, s);
- }
-}
-
-#if DISAS_CRIS
-static char memsize_char(int size)
-{
- switch (size) {
- case 1: return 'b';
- case 2: return 'w';
- case 4: return 'd';
- default:
- return 'x';
- }
-}
-#endif
-
-static inline unsigned int memsize_z(DisasContext *dc)
-{
- return dc->zsize + 1;
-}
-
-static inline unsigned int memsize_zz(DisasContext *dc)
-{
- switch (dc->zzsize) {
- case 0: return 1;
- case 1: return 2;
- default:
- return 4;
- }
-}
-
-static inline void do_postinc (DisasContext *dc, int size)
-{
- if (dc->postinc) {
- tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], size);
- }
-}
-
-static inline void dec_prep_move_r(DisasContext *dc, int rs, int rd,
- int size, int s_ext, TCGv dst)
-{
- if (s_ext) {
- t_gen_sext(dst, cpu_R[rs], size);
- } else {
- t_gen_zext(dst, cpu_R[rs], size);
- }
-}
-
-/* Prepare T0 and T1 for a register alu operation.
- s_ext decides if the operand1 should be sign-extended or zero-extended when
- needed. */
-static void dec_prep_alu_r(DisasContext *dc, int rs, int rd,
- int size, int s_ext, TCGv dst, TCGv src)
-{
- dec_prep_move_r(dc, rs, rd, size, s_ext, src);
-
- if (s_ext) {
- t_gen_sext(dst, cpu_R[rd], size);
- } else {
- t_gen_zext(dst, cpu_R[rd], size);
- }
-}
-
-static int dec_prep_move_m(CPUCRISState *env, DisasContext *dc,
- int s_ext, int memsize, TCGv dst)
-{
- unsigned int rs;
- uint32_t imm;
- int is_imm;
- int insn_len = 2;
-
- rs = dc->op1;
- is_imm = rs == 15 && dc->postinc;
-
- /* Load [$rs] onto T1. */
- if (is_imm) {
- insn_len = 2 + memsize;
- if (memsize == 1) {
- insn_len++;
- }
-
- imm = cris_fetch(env, dc, dc->pc + 2, memsize, s_ext);
- tcg_gen_movi_tl(dst, imm);
- dc->postinc = 0;
- } else {
- cris_flush_cc_state(dc);
- gen_load(dc, dst, cpu_R[rs], memsize, 0);
- if (s_ext) {
- t_gen_sext(dst, dst, memsize);
- } else {
- t_gen_zext(dst, dst, memsize);
- }
- }
- return insn_len;
-}
-
-/* Prepare T0 and T1 for a memory + alu operation.
- s_ext decides if the operand1 should be sign-extended or zero-extended when
- needed. */
-static int dec_prep_alu_m(CPUCRISState *env, DisasContext *dc,
- int s_ext, int memsize, TCGv dst, TCGv src)
-{
- int insn_len;
-
- insn_len = dec_prep_move_m(env, dc, s_ext, memsize, src);
- tcg_gen_mov_tl(dst, cpu_R[dc->op2]);
- return insn_len;
-}
-
-#if DISAS_CRIS
-static const char *cc_name(int cc)
-{
- static const char * const cc_names[16] = {
- "cc", "cs", "ne", "eq", "vc", "vs", "pl", "mi",
- "ls", "hi", "ge", "lt", "gt", "le", "a", "p"
- };
- assert(cc < 16);
- return cc_names[cc];
-}
-#endif
-
-/* Start of insn decoders. */
-
-static int dec_bccq(CPUCRISState *env, DisasContext *dc)
-{
- int32_t offset;
- int sign;
- uint32_t cond = dc->op2;
-
- offset = EXTRACT_FIELD(dc->ir, 1, 7);
- sign = EXTRACT_FIELD(dc->ir, 0, 0);
-
- offset *= 2;
- offset |= sign << 8;
- offset = sign_extend(offset, 8);
-
- LOG_DIS("b%s %x\n", cc_name(cond), dc->pc + offset);
-
- /* op2 holds the condition-code. */
- cris_cc_mask(dc, 0);
- cris_prepare_cc_branch(dc, offset, cond);
- return 2;
-}
-static int dec_addoq(CPUCRISState *env, DisasContext *dc)
-{
- int32_t imm;
-
- dc->op1 = EXTRACT_FIELD(dc->ir, 0, 7);
- imm = sign_extend(dc->op1, 7);
-
- LOG_DIS("addoq %d, $r%u\n", imm, dc->op2);
- cris_cc_mask(dc, 0);
- /* Fetch register operand, */
- tcg_gen_addi_tl(cpu_R[R_ACR], cpu_R[dc->op2], imm);
-
- return 2;
-}
-static int dec_addq(CPUCRISState *env, DisasContext *dc)
-{
- TCGv c;
- LOG_DIS("addq %u, $r%u\n", dc->op1, dc->op2);
-
- dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
-
- c = tcg_constant_tl(dc->op1);
- cris_alu(dc, CC_OP_ADD,
- cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
- return 2;
-}
-static int dec_moveq(CPUCRISState *env, DisasContext *dc)
-{
- uint32_t imm;
-
- dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
- imm = sign_extend(dc->op1, 5);
- LOG_DIS("moveq %d, $r%u\n", imm, dc->op2);
-
- tcg_gen_movi_tl(cpu_R[dc->op2], imm);
- return 2;
-}
-static int dec_subq(CPUCRISState *env, DisasContext *dc)
-{
- TCGv c;
- dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
-
- LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_constant_tl(dc->op1);
- cris_alu(dc, CC_OP_SUB,
- cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
- return 2;
-}
-static int dec_cmpq(CPUCRISState *env, DisasContext *dc)
-{
- uint32_t imm;
- TCGv c;
- dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
- imm = sign_extend(dc->op1, 5);
-
- LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZVC);
-
- c = tcg_constant_tl(imm);
- cris_alu(dc, CC_OP_CMP,
- cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
- return 2;
-}
-static int dec_andq(CPUCRISState *env, DisasContext *dc)
-{
- uint32_t imm;
- TCGv c;
- dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
- imm = sign_extend(dc->op1, 5);
-
- LOG_DIS("andq %d, $r%d\n", imm, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZ);
-
- c = tcg_constant_tl(imm);
- cris_alu(dc, CC_OP_AND,
- cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
- return 2;
-}
-static int dec_orq(CPUCRISState *env, DisasContext *dc)
-{
- uint32_t imm;
- TCGv c;
- dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
- imm = sign_extend(dc->op1, 5);
- LOG_DIS("orq %d, $r%d\n", imm, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZ);
-
- c = tcg_constant_tl(imm);
- cris_alu(dc, CC_OP_OR,
- cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
- return 2;
-}
-static int dec_btstq(CPUCRISState *env, DisasContext *dc)
-{
- TCGv c;
- dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
- LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZ);
- c = tcg_constant_tl(dc->op1);
- cris_evaluate_flags(dc);
- gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->op2],
- c, cpu_PR[PR_CCS]);
- cris_alu(dc, CC_OP_MOVE,
- cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
- cris_update_cc_op(dc, CC_OP_FLAGS, 4);
- dc->flags_uptodate = 1;
- return 2;
-}
-static int dec_asrq(CPUCRISState *env, DisasContext *dc)
-{
- dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
- LOG_DIS("asrq %u, $r%d\n", dc->op1, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZ);
-
- tcg_gen_sari_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
- cris_alu(dc, CC_OP_MOVE,
- cpu_R[dc->op2],
- cpu_R[dc->op2], cpu_R[dc->op2], 4);
- return 2;
-}
-static int dec_lslq(CPUCRISState *env, DisasContext *dc)
-{
- dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
- LOG_DIS("lslq %u, $r%d\n", dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZ);
-
- tcg_gen_shli_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
-
- cris_alu(dc, CC_OP_MOVE,
- cpu_R[dc->op2],
- cpu_R[dc->op2], cpu_R[dc->op2], 4);
- return 2;
-}
-static int dec_lsrq(CPUCRISState *env, DisasContext *dc)
-{
- dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
- LOG_DIS("lsrq %u, $r%d\n", dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZ);
-
- tcg_gen_shri_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
- cris_alu(dc, CC_OP_MOVE,
- cpu_R[dc->op2],
- cpu_R[dc->op2], cpu_R[dc->op2], 4);
- return 2;
-}
-
-static int dec_move_r(CPUCRISState *env, DisasContext *dc)
-{
- int size = memsize_zz(dc);
-
- LOG_DIS("move.%c $r%u, $r%u\n",
- memsize_char(size), dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZ);
- if (size == 4) {
- dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, cpu_R[dc->op2]);
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_update_cc_op(dc, CC_OP_MOVE, 4);
- cris_update_cc_x(dc);
- cris_update_result(dc, cpu_R[dc->op2]);
- } else {
- TCGv t0;
-
- t0 = tcg_temp_new();
- dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
- cris_alu(dc, CC_OP_MOVE,
- cpu_R[dc->op2],
- cpu_R[dc->op2], t0, size);
- }
- return 2;
-}
-
-static int dec_scc_r(CPUCRISState *env, DisasContext *dc)
-{
- int cond = dc->op2;
-
- LOG_DIS("s%s $r%u\n",
- cc_name(cond), dc->op1);
-
- gen_tst_cc(dc, cpu_R[dc->op1], cond);
- tcg_gen_setcondi_tl(TCG_COND_NE, cpu_R[dc->op1], cpu_R[dc->op1], 0);
-
- cris_cc_mask(dc, 0);
- return 2;
-}
-
-static inline void cris_alu_alloc_temps(DisasContext *dc, int size, TCGv *t)
-{
- if (size == 4) {
- t[0] = cpu_R[dc->op2];
- t[1] = cpu_R[dc->op1];
- } else {
- t[0] = tcg_temp_new();
- t[1] = tcg_temp_new();
- }
-}
-
-static int dec_and_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int size = memsize_zz(dc);
-
- LOG_DIS("and.%c $r%u, $r%u\n",
- memsize_char(size), dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZ);
-
- cris_alu_alloc_temps(dc, size, t);
- dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
- cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], size);
- return 2;
-}
-
-static int dec_lz_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t0;
- LOG_DIS("lz $r%u, $r%u\n",
- dc->op1, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZ);
- t0 = tcg_temp_new();
- dec_prep_alu_r(dc, dc->op1, dc->op2, 4, 0, cpu_R[dc->op2], t0);
- cris_alu(dc, CC_OP_LZ, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
- return 2;
-}
-
-static int dec_lsl_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int size = memsize_zz(dc);
-
- LOG_DIS("lsl.%c $r%u, $r%u\n",
- memsize_char(size), dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu_alloc_temps(dc, size, t);
- dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
- tcg_gen_andi_tl(t[1], t[1], 63);
- cris_alu(dc, CC_OP_LSL, cpu_R[dc->op2], t[0], t[1], size);
- return 2;
-}
-
-static int dec_lsr_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int size = memsize_zz(dc);
-
- LOG_DIS("lsr.%c $r%u, $r%u\n",
- memsize_char(size), dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu_alloc_temps(dc, size, t);
- dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
- tcg_gen_andi_tl(t[1], t[1], 63);
- cris_alu(dc, CC_OP_LSR, cpu_R[dc->op2], t[0], t[1], size);
- return 2;
-}
-
-static int dec_asr_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int size = memsize_zz(dc);
-
- LOG_DIS("asr.%c $r%u, $r%u\n",
- memsize_char(size), dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu_alloc_temps(dc, size, t);
- dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
- tcg_gen_andi_tl(t[1], t[1], 63);
- cris_alu(dc, CC_OP_ASR, cpu_R[dc->op2], t[0], t[1], size);
- return 2;
-}
-
-static int dec_muls_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int size = memsize_zz(dc);
-
- LOG_DIS("muls.%c $r%u, $r%u\n",
- memsize_char(size), dc->op1, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZV);
- cris_alu_alloc_temps(dc, size, t);
- dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
-
- cris_alu(dc, CC_OP_MULS, cpu_R[dc->op2], t[0], t[1], 4);
- return 2;
-}
-
-static int dec_mulu_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int size = memsize_zz(dc);
-
- LOG_DIS("mulu.%c $r%u, $r%u\n",
- memsize_char(size), dc->op1, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZV);
- cris_alu_alloc_temps(dc, size, t);
- dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
-
- cris_alu(dc, CC_OP_MULU, cpu_R[dc->op2], t[0], t[1], 4);
- return 2;
-}
-
-
-static int dec_dstep_r(CPUCRISState *env, DisasContext *dc)
-{
- LOG_DIS("dstep $r%u, $r%u\n", dc->op1, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu(dc, CC_OP_DSTEP,
- cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
- return 2;
-}
-
-static int dec_xor_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int size = memsize_zz(dc);
- LOG_DIS("xor.%c $r%u, $r%u\n",
- memsize_char(size), dc->op1, dc->op2);
- BUG_ON(size != 4); /* xor is dword. */
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu_alloc_temps(dc, size, t);
- dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
-
- cris_alu(dc, CC_OP_XOR, cpu_R[dc->op2], t[0], t[1], 4);
- return 2;
-}
-
-static int dec_bound_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv l0;
- int size = memsize_zz(dc);
- LOG_DIS("bound.%c $r%u, $r%u\n",
- memsize_char(size), dc->op1, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZ);
- l0 = tcg_temp_new();
- dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0);
- cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4);
- return 2;
-}
-
-static int dec_cmp_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int size = memsize_zz(dc);
- LOG_DIS("cmp.%c $r%u, $r%u\n",
- memsize_char(size), dc->op1, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu_alloc_temps(dc, size, t);
- dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
-
- cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[0], t[1], size);
- return 2;
-}
-
-static int dec_abs_r(CPUCRISState *env, DisasContext *dc)
-{
- LOG_DIS("abs $r%u, $r%u\n",
- dc->op1, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZ);
-
- tcg_gen_abs_tl(cpu_R[dc->op2], cpu_R[dc->op1]);
- cris_alu(dc, CC_OP_MOVE,
- cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
- return 2;
-}
-
-static int dec_add_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int size = memsize_zz(dc);
- LOG_DIS("add.%c $r%u, $r%u\n",
- memsize_char(size), dc->op1, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu_alloc_temps(dc, size, t);
- dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
-
- cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], size);
- return 2;
-}
-
-static int dec_addc_r(CPUCRISState *env, DisasContext *dc)
-{
- LOG_DIS("addc $r%u, $r%u\n",
- dc->op1, dc->op2);
- cris_evaluate_flags(dc);
-
- /* Set for this insn. */
- dc->flags_x = X_FLAG;
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu(dc, CC_OP_ADDC,
- cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
- return 2;
-}
-
-static int dec_mcp_r(CPUCRISState *env, DisasContext *dc)
-{
- LOG_DIS("mcp $p%u, $r%u\n",
- dc->op2, dc->op1);
- cris_evaluate_flags(dc);
- cris_cc_mask(dc, CC_MASK_RNZV);
- cris_alu(dc, CC_OP_MCP,
- cpu_R[dc->op1], cpu_R[dc->op1], cpu_PR[dc->op2], 4);
- return 2;
-}
-
-#if DISAS_CRIS
-static char * swapmode_name(int mode, char *modename) {
- int i = 0;
- if (mode & 8) {
- modename[i++] = 'n';
- }
- if (mode & 4) {
- modename[i++] = 'w';
- }
- if (mode & 2) {
- modename[i++] = 'b';
- }
- if (mode & 1) {
- modename[i++] = 'r';
- }
- modename[i++] = 0;
- return modename;
-}
-#endif
-
-static int dec_swap_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t0;
-#if DISAS_CRIS
- char modename[4];
-#endif
- LOG_DIS("swap%s $r%u\n",
- swapmode_name(dc->op2, modename), dc->op1);
-
- cris_cc_mask(dc, CC_MASK_NZ);
- t0 = tcg_temp_new();
- tcg_gen_mov_tl(t0, cpu_R[dc->op1]);
- if (dc->op2 & 8) {
- tcg_gen_not_tl(t0, t0);
- }
- if (dc->op2 & 4) {
- t_gen_swapw(t0, t0);
- }
- if (dc->op2 & 2) {
- t_gen_swapb(t0, t0);
- }
- if (dc->op2 & 1) {
- t_gen_swapr(t0, t0);
- }
- cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op1], cpu_R[dc->op1], t0, 4);
- return 2;
-}
-
-static int dec_or_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int size = memsize_zz(dc);
- LOG_DIS("or.%c $r%u, $r%u\n",
- memsize_char(size), dc->op1, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu_alloc_temps(dc, size, t);
- dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
- cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], size);
- return 2;
-}
-
-static int dec_addi_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t0;
- LOG_DIS("addi.%c $r%u, $r%u\n",
- memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
- cris_cc_mask(dc, 0);
- t0 = tcg_temp_new();
- tcg_gen_shli_tl(t0, cpu_R[dc->op2], dc->zzsize);
- tcg_gen_add_tl(cpu_R[dc->op1], cpu_R[dc->op1], t0);
- return 2;
-}
-
-static int dec_addi_acr(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t0;
- LOG_DIS("addi.%c $r%u, $r%u, $acr\n",
- memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
- cris_cc_mask(dc, 0);
- t0 = tcg_temp_new();
- tcg_gen_shli_tl(t0, cpu_R[dc->op2], dc->zzsize);
- tcg_gen_add_tl(cpu_R[R_ACR], cpu_R[dc->op1], t0);
- return 2;
-}
-
-static int dec_neg_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int size = memsize_zz(dc);
- LOG_DIS("neg.%c $r%u, $r%u\n",
- memsize_char(size), dc->op1, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu_alloc_temps(dc, size, t);
- dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
-
- cris_alu(dc, CC_OP_NEG, cpu_R[dc->op2], t[0], t[1], size);
- return 2;
-}
-
-static int dec_btst_r(CPUCRISState *env, DisasContext *dc)
-{
- LOG_DIS("btst $r%u, $r%u\n",
- dc->op1, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_evaluate_flags(dc);
- gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->op2],
- cpu_R[dc->op1], cpu_PR[PR_CCS]);
- cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2],
- cpu_R[dc->op2], cpu_R[dc->op2], 4);
- cris_update_cc_op(dc, CC_OP_FLAGS, 4);
- dc->flags_uptodate = 1;
- return 2;
-}
-
-static int dec_sub_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int size = memsize_zz(dc);
- LOG_DIS("sub.%c $r%u, $r%u\n",
- memsize_char(size), dc->op1, dc->op2);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu_alloc_temps(dc, size, t);
- dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
- cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], size);
- return 2;
-}
-
-/* Zero extension. From size to dword. */
-static int dec_movu_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t0;
- int size = memsize_z(dc);
- LOG_DIS("movu.%c $r%u, $r%u\n",
- memsize_char(size),
- dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZ);
- t0 = tcg_temp_new();
- dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
- cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
- return 2;
-}
-
-/* Sign extension. From size to dword. */
-static int dec_movs_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t0;
- int size = memsize_z(dc);
- LOG_DIS("movs.%c $r%u, $r%u\n",
- memsize_char(size),
- dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZ);
- t0 = tcg_temp_new();
- /* Size can only be qi or hi. */
- t_gen_sext(t0, cpu_R[dc->op1], size);
- cris_alu(dc, CC_OP_MOVE,
- cpu_R[dc->op2], cpu_R[dc->op1], t0, 4);
- return 2;
-}
-
-/* zero extension. From size to dword. */
-static int dec_addu_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t0;
- int size = memsize_z(dc);
- LOG_DIS("addu.%c $r%u, $r%u\n",
- memsize_char(size),
- dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- t0 = tcg_temp_new();
- /* Size can only be qi or hi. */
- t_gen_zext(t0, cpu_R[dc->op1], size);
- cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
- return 2;
-}
-
-/* Sign extension. From size to dword. */
-static int dec_adds_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t0;
- int size = memsize_z(dc);
- LOG_DIS("adds.%c $r%u, $r%u\n",
- memsize_char(size),
- dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- t0 = tcg_temp_new();
- /* Size can only be qi or hi. */
- t_gen_sext(t0, cpu_R[dc->op1], size);
- cris_alu(dc, CC_OP_ADD,
- cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
- return 2;
-}
-
-/* Zero extension. From size to dword. */
-static int dec_subu_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t0;
- int size = memsize_z(dc);
- LOG_DIS("subu.%c $r%u, $r%u\n",
- memsize_char(size),
- dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- t0 = tcg_temp_new();
- /* Size can only be qi or hi. */
- t_gen_zext(t0, cpu_R[dc->op1], size);
- cris_alu(dc, CC_OP_SUB,
- cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
- return 2;
-}
-
-/* Sign extension. From size to dword. */
-static int dec_subs_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t0;
- int size = memsize_z(dc);
- LOG_DIS("subs.%c $r%u, $r%u\n",
- memsize_char(size),
- dc->op1, dc->op2);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- t0 = tcg_temp_new();
- /* Size can only be qi or hi. */
- t_gen_sext(t0, cpu_R[dc->op1], size);
- cris_alu(dc, CC_OP_SUB,
- cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
- return 2;
-}
-
-static int dec_setclrf(CPUCRISState *env, DisasContext *dc)
-{
- uint32_t flags;
- int set = (~dc->opcode >> 2) & 1;
-
-
- flags = (EXTRACT_FIELD(dc->ir, 12, 15) << 4)
- | EXTRACT_FIELD(dc->ir, 0, 3);
- if (set && flags == 0) {
- LOG_DIS("nop\n");
- return 2;
- } else if (!set && (flags & 0x20)) {
- LOG_DIS("di\n");
- } else {
- LOG_DIS("%sf %x\n", set ? "set" : "clr", flags);
- }
-
- /* User space is not allowed to touch these. Silently ignore. */
- if (dc->tb_flags & U_FLAG) {
- flags &= ~(S_FLAG | I_FLAG | U_FLAG);
- }
-
- if (flags & X_FLAG) {
- if (set) {
- dc->flags_x = X_FLAG;
- } else {
- dc->flags_x = 0;
- }
- }
-
- /* Break the TB if any of the SPI flag changes. */
- if (flags & (P_FLAG | S_FLAG)) {
- tcg_gen_movi_tl(env_pc, dc->pc + 2);
- dc->base.is_jmp = DISAS_UPDATE;
- dc->cpustate_changed = 1;
- }
-
- /* For the I flag, only act on posedge. */
- if ((flags & I_FLAG)) {
- tcg_gen_movi_tl(env_pc, dc->pc + 2);
- dc->base.is_jmp = DISAS_UPDATE;
- dc->cpustate_changed = 1;
- }
-
-
- /* Simply decode the flags. */
- cris_evaluate_flags(dc);
- cris_update_cc_op(dc, CC_OP_FLAGS, 4);
- cris_update_cc_x(dc);
- tcg_gen_movi_tl(cc_op, dc->cc_op);
-
- if (set) {
- if (!(dc->tb_flags & U_FLAG) && (flags & U_FLAG)) {
- /* Enter user mode. */
- t_gen_mov_env_TN(ksp, cpu_R[R_SP]);
- tcg_gen_mov_tl(cpu_R[R_SP], cpu_PR[PR_USP]);
- dc->cpustate_changed = 1;
- }
- tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags);
- } else {
- tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~flags);
- }
-
- dc->flags_uptodate = 1;
- dc->clear_x = 0;
- return 2;
-}
-
-static int dec_move_rs(CPUCRISState *env, DisasContext *dc)
-{
- TCGv c2, c1;
- LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2);
- c1 = tcg_constant_tl(dc->op1);
- c2 = tcg_constant_tl(dc->op2);
- cris_cc_mask(dc, 0);
- gen_helper_movl_sreg_reg(tcg_env, c2, c1);
- return 2;
-}
-static int dec_move_sr(CPUCRISState *env, DisasContext *dc)
-{
- TCGv c2, c1;
- LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1);
- c1 = tcg_constant_tl(dc->op1);
- c2 = tcg_constant_tl(dc->op2);
- cris_cc_mask(dc, 0);
- gen_helper_movl_reg_sreg(tcg_env, c1, c2);
- return 2;
-}
-
-static int dec_move_rp(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- LOG_DIS("move $r%u, $p%u\n", dc->op1, dc->op2);
- cris_cc_mask(dc, 0);
-
- t[0] = tcg_temp_new();
- if (dc->op2 == PR_CCS) {
- cris_evaluate_flags(dc);
- tcg_gen_mov_tl(t[0], cpu_R[dc->op1]);
- if (dc->tb_flags & U_FLAG) {
- t[1] = tcg_temp_new();
- /* User space is not allowed to touch all flags. */
- tcg_gen_andi_tl(t[0], t[0], 0x39f);
- tcg_gen_andi_tl(t[1], cpu_PR[PR_CCS], ~0x39f);
- tcg_gen_or_tl(t[0], t[1], t[0]);
- }
- } else {
- tcg_gen_mov_tl(t[0], cpu_R[dc->op1]);
- }
-
- t_gen_mov_preg_TN(dc, dc->op2, t[0]);
- if (dc->op2 == PR_CCS) {
- cris_update_cc_op(dc, CC_OP_FLAGS, 4);
- dc->flags_uptodate = 1;
- }
- return 2;
-}
-static int dec_move_pr(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t0;
- LOG_DIS("move $p%u, $r%u\n", dc->op2, dc->op1);
- cris_cc_mask(dc, 0);
-
- if (dc->op2 == PR_CCS) {
- cris_evaluate_flags(dc);
- }
-
- if (dc->op2 == PR_DZ) {
- tcg_gen_movi_tl(cpu_R[dc->op1], 0);
- } else {
- t0 = tcg_temp_new();
- t_gen_mov_TN_preg(t0, dc->op2);
- cris_alu(dc, CC_OP_MOVE,
- cpu_R[dc->op1], cpu_R[dc->op1], t0,
- preg_sizes[dc->op2]);
- }
- return 2;
-}
-
-static int dec_move_mr(CPUCRISState *env, DisasContext *dc)
-{
- int memsize = memsize_zz(dc);
- int insn_len;
- LOG_DIS("move.%c [$r%u%s, $r%u\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- if (memsize == 4) {
- insn_len = dec_prep_move_m(env, dc, 0, 4, cpu_R[dc->op2]);
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_update_cc_op(dc, CC_OP_MOVE, 4);
- cris_update_cc_x(dc);
- cris_update_result(dc, cpu_R[dc->op2]);
- } else {
- TCGv t0;
-
- t0 = tcg_temp_new();
- insn_len = dec_prep_move_m(env, dc, 0, memsize, t0);
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu(dc, CC_OP_MOVE,
- cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize);
- }
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static inline void cris_alu_m_alloc_temps(TCGv *t)
-{
- t[0] = tcg_temp_new();
- t[1] = tcg_temp_new();
-}
-
-static int dec_movs_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_z(dc);
- int insn_len;
- LOG_DIS("movs.%c [$r%u%s, $r%u\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_alu_m_alloc_temps(t);
- /* sign extend. */
- insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu(dc, CC_OP_MOVE,
- cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_addu_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_z(dc);
- int insn_len;
- LOG_DIS("addu.%c [$r%u%s, $r%u\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_alu_m_alloc_temps(t);
- /* sign extend. */
- insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu(dc, CC_OP_ADD,
- cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_adds_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_z(dc);
- int insn_len;
- LOG_DIS("adds.%c [$r%u%s, $r%u\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_alu_m_alloc_temps(t);
- /* sign extend. */
- insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_subu_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_z(dc);
- int insn_len;
- LOG_DIS("subu.%c [$r%u%s, $r%u\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_alu_m_alloc_temps(t);
- /* sign extend. */
- insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_subs_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_z(dc);
- int insn_len;
- LOG_DIS("subs.%c [$r%u%s, $r%u\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_alu_m_alloc_temps(t);
- /* sign extend. */
- insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_movu_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_z(dc);
- int insn_len;
-
- LOG_DIS("movu.%c [$r%u%s, $r%u\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_cmpu_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_z(dc);
- int insn_len;
- LOG_DIS("cmpu.%c [$r%u%s, $r%u\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_cmps_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_z(dc);
- int insn_len;
- LOG_DIS("cmps.%c [$r%u%s, $r%u\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu(dc, CC_OP_CMP,
- cpu_R[dc->op2], cpu_R[dc->op2], t[1],
- memsize_zz(dc));
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_cmp_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_zz(dc);
- int insn_len;
- LOG_DIS("cmp.%c [$r%u%s, $r%u\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu(dc, CC_OP_CMP,
- cpu_R[dc->op2], cpu_R[dc->op2], t[1],
- memsize_zz(dc));
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_test_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2], c;
- int memsize = memsize_zz(dc);
- int insn_len;
- LOG_DIS("test.%c [$r%u%s] op2=%x\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_evaluate_flags(dc);
-
- cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZ);
- tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
-
- c = tcg_constant_tl(0);
- cris_alu(dc, CC_OP_CMP,
- cpu_R[dc->op2], t[1], c, memsize_zz(dc));
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_and_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_zz(dc);
- int insn_len;
- LOG_DIS("and.%c [$r%u%s, $r%u\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_add_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_zz(dc);
- int insn_len;
- LOG_DIS("add.%c [$r%u%s, $r%u\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu(dc, CC_OP_ADD,
- cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_addo_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_zz(dc);
- int insn_len;
- LOG_DIS("add.%c [$r%u%s, $r%u\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
- cris_cc_mask(dc, 0);
- cris_alu(dc, CC_OP_ADD, cpu_R[R_ACR], t[0], t[1], 4);
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_bound_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv l[2];
- int memsize = memsize_zz(dc);
- int insn_len;
- LOG_DIS("bound.%c [$r%u%s, $r%u\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- l[0] = tcg_temp_new();
- l[1] = tcg_temp_new();
- insn_len = dec_prep_alu_m(env, dc, 0, memsize, l[0], l[1]);
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4);
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_addc_mr(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int insn_len = 2;
- LOG_DIS("addc [$r%u%s, $r%u\n",
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_evaluate_flags(dc);
-
- /* Set for this insn. */
- dc->flags_x = X_FLAG;
-
- cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(env, dc, 0, 4, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu(dc, CC_OP_ADDC, cpu_R[dc->op2], t[0], t[1], 4);
- do_postinc(dc, 4);
- return insn_len;
-}
-
-static int dec_sub_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_zz(dc);
- int insn_len;
- LOG_DIS("sub.%c [$r%u%s, $r%u ir=%x zz=%x\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2, dc->ir, dc->zzsize);
-
- cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], memsize);
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_or_m(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_zz(dc);
- int insn_len;
- LOG_DIS("or.%c [$r%u%s, $r%u pc=%x\n",
- memsize_char(memsize),
- dc->op1, dc->postinc ? "+]" : "]",
- dc->op2, dc->pc);
-
- cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
- cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu(dc, CC_OP_OR,
- cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_move_mp(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t[2];
- int memsize = memsize_zz(dc);
- int insn_len = 2;
-
- LOG_DIS("move.%c [$r%u%s, $p%u\n",
- memsize_char(memsize),
- dc->op1,
- dc->postinc ? "+]" : "]",
- dc->op2);
-
- cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
- cris_cc_mask(dc, 0);
- if (dc->op2 == PR_CCS) {
- cris_evaluate_flags(dc);
- if (dc->tb_flags & U_FLAG) {
- /* User space is not allowed to touch all flags. */
- tcg_gen_andi_tl(t[1], t[1], 0x39f);
- tcg_gen_andi_tl(t[0], cpu_PR[PR_CCS], ~0x39f);
- tcg_gen_or_tl(t[1], t[0], t[1]);
- }
- }
-
- t_gen_mov_preg_TN(dc, dc->op2, t[1]);
-
- do_postinc(dc, memsize);
- return insn_len;
-}
-
-static int dec_move_pm(CPUCRISState *env, DisasContext *dc)
-{
- TCGv t0;
- int memsize;
-
- memsize = preg_sizes[dc->op2];
-
- LOG_DIS("move.%c $p%u, [$r%u%s\n",
- memsize_char(memsize),
- dc->op2, dc->op1, dc->postinc ? "+]" : "]");
-
- /* prepare store. Address in T0, value in T1. */
- if (dc->op2 == PR_CCS) {
- cris_evaluate_flags(dc);
- }
- t0 = tcg_temp_new();
- t_gen_mov_TN_preg(t0, dc->op2);
- cris_flush_cc_state(dc);
- gen_store(dc, cpu_R[dc->op1], t0, memsize);
-
- cris_cc_mask(dc, 0);
- if (dc->postinc) {
- tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
- }
- return 2;
-}
-
-static int dec_movem_mr(CPUCRISState *env, DisasContext *dc)
-{
- TCGv_i64 tmp[16];
- TCGv tmp32;
- TCGv addr;
- int i;
- int nr = dc->op2 + 1;
-
- LOG_DIS("movem [$r%u%s, $r%u\n", dc->op1,
- dc->postinc ? "+]" : "]", dc->op2);
-
- addr = tcg_temp_new();
- /* There are probably better ways of doing this. */
- cris_flush_cc_state(dc);
- for (i = 0; i < (nr >> 1); i++) {
- tmp[i] = tcg_temp_new_i64();
- tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
- gen_load64(dc, tmp[i], addr);
- }
- if (nr & 1) {
- tmp32 = tcg_temp_new_i32();
- tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
- gen_load(dc, tmp32, addr, 4, 0);
- } else {
- tmp32 = NULL;
- }
-
- for (i = 0; i < (nr >> 1); i++) {
- tcg_gen_extrl_i64_i32(cpu_R[i * 2], tmp[i]);
- tcg_gen_shri_i64(tmp[i], tmp[i], 32);
- tcg_gen_extrl_i64_i32(cpu_R[i * 2 + 1], tmp[i]);
- }
- if (nr & 1) {
- tcg_gen_mov_tl(cpu_R[dc->op2], tmp32);
- }
-
- /* writeback the updated pointer value. */
- if (dc->postinc) {
- tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], nr * 4);
- }
-
- /* gen_load might want to evaluate the previous insns flags. */
- cris_cc_mask(dc, 0);
- return 2;
-}
-
-static int dec_movem_rm(CPUCRISState *env, DisasContext *dc)
-{
- TCGv tmp;
- TCGv addr;
- int i;
-
- LOG_DIS("movem $r%u, [$r%u%s\n", dc->op2, dc->op1,
- dc->postinc ? "+]" : "]");
-
- cris_flush_cc_state(dc);
-
- tmp = tcg_temp_new();
- addr = tcg_temp_new();
- tcg_gen_movi_tl(tmp, 4);
- tcg_gen_mov_tl(addr, cpu_R[dc->op1]);
- for (i = 0; i <= dc->op2; i++) {
- /* Displace addr. */
- /* Perform the store. */
- gen_store(dc, addr, cpu_R[i], 4);
- tcg_gen_add_tl(addr, addr, tmp);
- }
- if (dc->postinc) {
- tcg_gen_mov_tl(cpu_R[dc->op1], addr);
- }
- cris_cc_mask(dc, 0);
- return 2;
-}
-
-static int dec_move_rm(CPUCRISState *env, DisasContext *dc)
-{
- int memsize;
-
- memsize = memsize_zz(dc);
-
- LOG_DIS("move.%c $r%u, [$r%u]\n",
- memsize_char(memsize), dc->op2, dc->op1);
-
- /* prepare store. */
- cris_flush_cc_state(dc);
- gen_store(dc, cpu_R[dc->op1], cpu_R[dc->op2], memsize);
-
- if (dc->postinc) {
- tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
- }
- cris_cc_mask(dc, 0);
- return 2;
-}
-
-static int dec_lapcq(CPUCRISState *env, DisasContext *dc)
-{
- LOG_DIS("lapcq %x, $r%u\n",
- dc->pc + dc->op1*2, dc->op2);
- cris_cc_mask(dc, 0);
- tcg_gen_movi_tl(cpu_R[dc->op2], dc->pc + dc->op1 * 2);
- return 2;
-}
-
-static int dec_lapc_im(CPUCRISState *env, DisasContext *dc)
-{
- unsigned int rd;
- int32_t imm;
- int32_t pc;
-
- rd = dc->op2;
-
- cris_cc_mask(dc, 0);
- imm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
- LOG_DIS("lapc 0x%x, $r%u\n", imm + dc->pc, dc->op2);
-
- pc = dc->pc;
- pc += imm;
- tcg_gen_movi_tl(cpu_R[rd], pc);
- return 6;
-}
-
-/* Jump to special reg. */
-static int dec_jump_p(CPUCRISState *env, DisasContext *dc)
-{
- LOG_DIS("jump $p%u\n", dc->op2);
-
- if (dc->op2 == PR_CCS) {
- cris_evaluate_flags(dc);
- }
- t_gen_mov_TN_preg(env_btarget, dc->op2);
- /* rete will often have low bit set to indicate delayslot. */
- tcg_gen_andi_tl(env_btarget, env_btarget, ~1);
- cris_cc_mask(dc, 0);
- cris_prepare_jmp(dc, JMP_INDIRECT);
- return 2;
-}
-
-/* Jump and save. */
-static int dec_jas_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv c;
- LOG_DIS("jas $r%u, $p%u\n", dc->op1, dc->op2);
- cris_cc_mask(dc, 0);
- /* Store the return address in Pd. */
- tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
- if (dc->op2 > 15) {
- abort();
- }
- c = tcg_constant_tl(dc->pc + 4);
- t_gen_mov_preg_TN(dc, dc->op2, c);
-
- cris_prepare_jmp(dc, JMP_INDIRECT);
- return 2;
-}
-
-static int dec_jas_im(CPUCRISState *env, DisasContext *dc)
-{
- uint32_t imm;
- TCGv c;
-
- imm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
-
- LOG_DIS("jas 0x%x\n", imm);
- cris_cc_mask(dc, 0);
- c = tcg_constant_tl(dc->pc + 8);
- /* Store the return address in Pd. */
- t_gen_mov_preg_TN(dc, dc->op2, c);
-
- dc->jmp_pc = imm;
- cris_prepare_jmp(dc, JMP_DIRECT);
- return 6;
-}
-
-static int dec_jasc_im(CPUCRISState *env, DisasContext *dc)
-{
- uint32_t imm;
- TCGv c;
-
- imm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
-
- LOG_DIS("jasc 0x%x\n", imm);
- cris_cc_mask(dc, 0);
- c = tcg_constant_tl(dc->pc + 8 + 4);
- /* Store the return address in Pd. */
- t_gen_mov_preg_TN(dc, dc->op2, c);
-
- dc->jmp_pc = imm;
- cris_prepare_jmp(dc, JMP_DIRECT);
- return 6;
-}
-
-static int dec_jasc_r(CPUCRISState *env, DisasContext *dc)
-{
- TCGv c;
- LOG_DIS("jasc_r $r%u, $p%u\n", dc->op1, dc->op2);
- cris_cc_mask(dc, 0);
- /* Store the return address in Pd. */
- tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
- c = tcg_constant_tl(dc->pc + 4 + 4);
- t_gen_mov_preg_TN(dc, dc->op2, c);
- cris_prepare_jmp(dc, JMP_INDIRECT);
- return 2;
-}
-
-static int dec_bcc_im(CPUCRISState *env, DisasContext *dc)
-{
- int32_t offset;
- uint32_t cond = dc->op2;
-
- offset = cris_fetch(env, dc, dc->pc + 2, 2, 1);
-
- LOG_DIS("b%s %d pc=%x dst=%x\n",
- cc_name(cond), offset,
- dc->pc, dc->pc + offset);
-
- cris_cc_mask(dc, 0);
- /* op2 holds the condition-code. */
- cris_prepare_cc_branch(dc, offset, cond);
- return 4;
-}
-
-static int dec_bas_im(CPUCRISState *env, DisasContext *dc)
-{
- int32_t simm;
- TCGv c;
-
- simm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
-
- LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2);
- cris_cc_mask(dc, 0);
- c = tcg_constant_tl(dc->pc + 8);
- /* Store the return address in Pd. */
- t_gen_mov_preg_TN(dc, dc->op2, c);
-
- dc->jmp_pc = dc->pc + simm;
- cris_prepare_jmp(dc, JMP_DIRECT);
- return 6;
-}
-
-static int dec_basc_im(CPUCRISState *env, DisasContext *dc)
-{
- int32_t simm;
- TCGv c;
- simm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
-
- LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2);
- cris_cc_mask(dc, 0);
- c = tcg_constant_tl(dc->pc + 12);
- /* Store the return address in Pd. */
- t_gen_mov_preg_TN(dc, dc->op2, c);
-
- dc->jmp_pc = dc->pc + simm;
- cris_prepare_jmp(dc, JMP_DIRECT);
- return 6;
-}
-
-static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
-{
- cris_cc_mask(dc, 0);
-
- if (dc->op2 == 15) {
- tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
- -offsetof(CRISCPU, env) + offsetof(CPUState, halted));
- tcg_gen_movi_tl(env_pc, dc->pc + 2);
- t_gen_raise_exception(EXCP_HLT);
- dc->base.is_jmp = DISAS_NORETURN;
- return 2;
- }
-
- switch (dc->op2 & 7) {
- case 2:
- /* rfe. */
- LOG_DIS("rfe\n");
- cris_evaluate_flags(dc);
- gen_helper_rfe(tcg_env);
- dc->base.is_jmp = DISAS_UPDATE;
- dc->cpustate_changed = true;
- break;
- case 5:
- /* rfn. */
- LOG_DIS("rfn\n");
- cris_evaluate_flags(dc);
- gen_helper_rfn(tcg_env);
- dc->base.is_jmp = DISAS_UPDATE;
- dc->cpustate_changed = true;
- break;
- case 6:
- LOG_DIS("break %d\n", dc->op1);
- cris_evaluate_flags(dc);
- /* break. */
- tcg_gen_movi_tl(env_pc, dc->pc + 2);
-
- /* Breaks start at 16 in the exception vector. */
- t_gen_movi_env_TN(trap_vector, dc->op1 + 16);
- t_gen_raise_exception(EXCP_BREAK);
- dc->base.is_jmp = DISAS_NORETURN;
- break;
- default:
- printf("op2=%x\n", dc->op2);
- BUG();
- break;
-
- }
- return 2;
-}
-
-static int dec_ftag_fidx_d_m(CPUCRISState *env, DisasContext *dc)
-{
- return 2;
-}
-
-static int dec_ftag_fidx_i_m(CPUCRISState *env, DisasContext *dc)
-{
- return 2;
-}
-
-static int dec_null(CPUCRISState *env, DisasContext *dc)
-{
- printf("unknown insn pc=%x opc=%x op1=%x op2=%x\n",
- dc->pc, dc->opcode, dc->op1, dc->op2);
- fflush(NULL);
- BUG();
- return 2;
-}
-
-static const struct decoder_info {
- struct {
- uint32_t bits;
- uint32_t mask;
- };
- int (*dec)(CPUCRISState *env, DisasContext *dc);
-} decinfo[] = {
- /* Order matters here. */
- {DEC_MOVEQ, dec_moveq},
- {DEC_BTSTQ, dec_btstq},
- {DEC_CMPQ, dec_cmpq},
- {DEC_ADDOQ, dec_addoq},
- {DEC_ADDQ, dec_addq},
- {DEC_SUBQ, dec_subq},
- {DEC_ANDQ, dec_andq},
- {DEC_ORQ, dec_orq},
- {DEC_ASRQ, dec_asrq},
- {DEC_LSLQ, dec_lslq},
- {DEC_LSRQ, dec_lsrq},
- {DEC_BCCQ, dec_bccq},
-
- {DEC_BCC_IM, dec_bcc_im},
- {DEC_JAS_IM, dec_jas_im},
- {DEC_JAS_R, dec_jas_r},
- {DEC_JASC_IM, dec_jasc_im},
- {DEC_JASC_R, dec_jasc_r},
- {DEC_BAS_IM, dec_bas_im},
- {DEC_BASC_IM, dec_basc_im},
- {DEC_JUMP_P, dec_jump_p},
- {DEC_LAPC_IM, dec_lapc_im},
- {DEC_LAPCQ, dec_lapcq},
-
- {DEC_RFE_ETC, dec_rfe_etc},
- {DEC_ADDC_MR, dec_addc_mr},
-
- {DEC_MOVE_MP, dec_move_mp},
- {DEC_MOVE_PM, dec_move_pm},
- {DEC_MOVEM_MR, dec_movem_mr},
- {DEC_MOVEM_RM, dec_movem_rm},
- {DEC_MOVE_PR, dec_move_pr},
- {DEC_SCC_R, dec_scc_r},
- {DEC_SETF, dec_setclrf},
- {DEC_CLEARF, dec_setclrf},
-
- {DEC_MOVE_SR, dec_move_sr},
- {DEC_MOVE_RP, dec_move_rp},
- {DEC_SWAP_R, dec_swap_r},
- {DEC_ABS_R, dec_abs_r},
- {DEC_LZ_R, dec_lz_r},
- {DEC_MOVE_RS, dec_move_rs},
- {DEC_BTST_R, dec_btst_r},
- {DEC_ADDC_R, dec_addc_r},
-
- {DEC_DSTEP_R, dec_dstep_r},
- {DEC_XOR_R, dec_xor_r},
- {DEC_MCP_R, dec_mcp_r},
- {DEC_CMP_R, dec_cmp_r},
-
- {DEC_ADDI_R, dec_addi_r},
- {DEC_ADDI_ACR, dec_addi_acr},
-
- {DEC_ADD_R, dec_add_r},
- {DEC_SUB_R, dec_sub_r},
-
- {DEC_ADDU_R, dec_addu_r},
- {DEC_ADDS_R, dec_adds_r},
- {DEC_SUBU_R, dec_subu_r},
- {DEC_SUBS_R, dec_subs_r},
- {DEC_LSL_R, dec_lsl_r},
-
- {DEC_AND_R, dec_and_r},
- {DEC_OR_R, dec_or_r},
- {DEC_BOUND_R, dec_bound_r},
- {DEC_ASR_R, dec_asr_r},
- {DEC_LSR_R, dec_lsr_r},
-
- {DEC_MOVU_R, dec_movu_r},
- {DEC_MOVS_R, dec_movs_r},
- {DEC_NEG_R, dec_neg_r},
- {DEC_MOVE_R, dec_move_r},
-
- {DEC_FTAG_FIDX_I_M, dec_ftag_fidx_i_m},
- {DEC_FTAG_FIDX_D_M, dec_ftag_fidx_d_m},
-
- {DEC_MULS_R, dec_muls_r},
- {DEC_MULU_R, dec_mulu_r},
-
- {DEC_ADDU_M, dec_addu_m},
- {DEC_ADDS_M, dec_adds_m},
- {DEC_SUBU_M, dec_subu_m},
- {DEC_SUBS_M, dec_subs_m},
-
- {DEC_CMPU_M, dec_cmpu_m},
- {DEC_CMPS_M, dec_cmps_m},
- {DEC_MOVU_M, dec_movu_m},
- {DEC_MOVS_M, dec_movs_m},
-
- {DEC_CMP_M, dec_cmp_m},
- {DEC_ADDO_M, dec_addo_m},
- {DEC_BOUND_M, dec_bound_m},
- {DEC_ADD_M, dec_add_m},
- {DEC_SUB_M, dec_sub_m},
- {DEC_AND_M, dec_and_m},
- {DEC_OR_M, dec_or_m},
- {DEC_MOVE_RM, dec_move_rm},
- {DEC_TEST_M, dec_test_m},
- {DEC_MOVE_MR, dec_move_mr},
-
- {{0, 0}, dec_null}
-};
-
-static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
-{
- int insn_len = 2;
- int i;
-
- /* Load a halfword onto the instruction register. */
- dc->ir = cris_fetch(env, dc, dc->pc, 2, 0);
-
- /* Now decode it. */
- dc->opcode = EXTRACT_FIELD(dc->ir, 4, 11);
- dc->op1 = EXTRACT_FIELD(dc->ir, 0, 3);
- dc->op2 = EXTRACT_FIELD(dc->ir, 12, 15);
- dc->zsize = EXTRACT_FIELD(dc->ir, 4, 4);
- dc->zzsize = EXTRACT_FIELD(dc->ir, 4, 5);
- dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10);
-
- /* Large switch for all insns. */
- for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
- if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
- insn_len = decinfo[i].dec(env, dc);
- break;
- }
- }
-
-#if !defined(CONFIG_USER_ONLY)
- /* Single-stepping ? */
- if (dc->tb_flags & S_FLAG) {
- TCGLabel *l1 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_NE, cpu_PR[PR_SPC], dc->pc, l1);
- /* We treat SPC as a break with an odd trap vector. */
- cris_evaluate_flags(dc);
- t_gen_movi_env_TN(trap_vector, 3);
- tcg_gen_movi_tl(env_pc, dc->pc + insn_len);
- tcg_gen_movi_tl(cpu_PR[PR_SPC], dc->pc + insn_len);
- t_gen_raise_exception(EXCP_BREAK);
- gen_set_label(l1);
- }
-#endif
- return insn_len;
-}
-
-#include "translate_v10.c.inc"
-
-/*
- * Delay slots on QEMU/CRIS.
- *
- * If an exception hits on a delayslot, the core will let ERP (the Exception
- * Return Pointer) point to the branch (the previous) insn and set the lsb to
- * to give SW a hint that the exception actually hit on the dslot.
- *
- * CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
- * the core and any jmp to an odd addresses will mask off that lsb. It is
- * simply there to let sw know there was an exception on a dslot.
- *
- * When the software returns from an exception, the branch will re-execute.
- * On QEMU care needs to be taken when a branch+delayslot sequence is broken
- * and the branch and delayslot don't share pages.
- *
- * The TB containing the branch insn will set up env->btarget and evaluate
- * env->btaken. When the translation loop exits we will note that the branch
- * sequence is broken and let env->dslot be the size of the branch insn (those
- * vary in length).
- *
- * The TB containing the delayslot will have the PC of its real insn (i.e no lsb
- * set). It will also expect to have env->dslot setup with the size of the
- * delay slot so that env->pc - env->dslot point to the branch insn. This TB
- * will execute the dslot and take the branch, either to btarget or just one
- * insn ahead.
- *
- * When exceptions occur, we check for env->dslot in do_interrupt to detect
- * broken branch sequences and setup $erp accordingly (i.e let it point to the
- * branch and set lsb). Then env->dslot gets cleared so that the exception
- * handler can enter. When returning from exceptions (jump $erp) the lsb gets
- * masked off and we will reexecute the branch insn.
- *
- */
-
-static void cris_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
-{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUCRISState *env = cpu_env(cs);
- uint32_t tb_flags = dc->base.tb->flags;
- uint32_t pc_start;
-
- if (env->pregs[PR_VR] == 32) {
- dc->decoder = crisv32_decoder;
- dc->clear_locked_irq = 0;
- } else {
- dc->decoder = crisv10_decoder;
- dc->clear_locked_irq = 1;
- }
-
- /*
- * Odd PC indicates that branch is rexecuting due to exception in the
- * delayslot, like in real hw.
- */
- pc_start = dc->base.pc_first & ~1;
- dc->base.pc_first = pc_start;
- dc->base.pc_next = pc_start;
-
- dc->cpu = env_archcpu(env);
- dc->ppc = pc_start;
- dc->pc = pc_start;
- dc->mem_index = cpu_mmu_index(cs, false);
- dc->flags_uptodate = 1;
- dc->flags_x = tb_flags & X_FLAG;
- dc->cc_x_uptodate = 0;
- dc->cc_mask = 0;
- dc->update_cc = 0;
- dc->clear_prefix = 0;
- dc->cpustate_changed = 0;
-
- cris_update_cc_op(dc, CC_OP_FLAGS, 4);
- dc->cc_size_uptodate = -1;
-
- /* Decode TB flags. */
- dc->tb_flags = tb_flags & (S_FLAG | P_FLAG | U_FLAG | X_FLAG | PFIX_FLAG);
- dc->delayed_branch = !!(tb_flags & 7);
- if (dc->delayed_branch) {
- dc->jmp = JMP_INDIRECT;
- } else {
- dc->jmp = JMP_NOJMP;
- }
-}
-
-static void cris_tr_tb_start(DisasContextBase *db, CPUState *cpu)
-{
-}
-
-static void cris_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
-{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
-
- tcg_gen_insn_start(dc->delayed_branch == 1 ? dc->ppc | 1 : dc->pc);
-}
-
-static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
-{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
- unsigned int insn_len;
-
- /* Pretty disas. */
- LOG_DIS("%8.8x:\t", dc->pc);
-
- dc->clear_x = 1;
-
- insn_len = dc->decoder(cpu_env(cs), dc);
- dc->ppc = dc->pc;
- dc->pc += insn_len;
- dc->base.pc_next += insn_len;
-
- if (dc->base.is_jmp == DISAS_NORETURN) {
- return;
- }
-
- if (dc->clear_x) {
- cris_clear_x_flag(dc);
- }
-
- /*
- * All branches are delayed branches, handled immediately below.
- * We don't expect to see odd combinations of exit conditions.
- */
- assert(dc->base.is_jmp == DISAS_NEXT || dc->cpustate_changed);
-
- if (dc->delayed_branch && --dc->delayed_branch == 0) {
- dc->base.is_jmp = DISAS_DBRANCH;
- return;
- }
-
- if (dc->base.is_jmp != DISAS_NEXT) {
- return;
- }
-
- /* Force an update if the per-tb cpu state has changed. */
- if (dc->cpustate_changed) {
- dc->base.is_jmp = DISAS_UPDATE_NEXT;
- return;
- }
-
- /*
- * FIXME: Only the first insn in the TB should cross a page boundary.
- * If we can detect the length of the next insn easily, we should.
- * In the meantime, simply stop when we do cross.
- */
- if ((dc->pc ^ dc->base.pc_first) & TARGET_PAGE_MASK) {
- dc->base.is_jmp = DISAS_TOO_MANY;
- }
-}
-
-static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
-{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
- DisasJumpType is_jmp = dc->base.is_jmp;
- target_ulong npc = dc->pc;
-
- if (is_jmp == DISAS_NORETURN) {
- /* If we have a broken branch+delayslot sequence, it's too late. */
- assert(dc->delayed_branch != 1);
- return;
- }
-
- if (dc->clear_locked_irq) {
- t_gen_movi_env_TN(locked_irq, 0);
- }
-
- /* Broken branch+delayslot sequence. */
- if (dc->delayed_branch == 1) {
- /* Set env->dslot to the size of the branch insn. */
- t_gen_movi_env_TN(dslot, dc->pc - dc->ppc);
- cris_store_direct_jmp(dc);
- }
-
- cris_evaluate_flags(dc);
-
- /* Evaluate delayed branch destination and fold to another is_jmp case. */
- if (is_jmp == DISAS_DBRANCH) {
- if (dc->base.tb->flags & 7) {
- t_gen_movi_env_TN(dslot, 0);
- }
-
- switch (dc->jmp) {
- case JMP_DIRECT:
- npc = dc->jmp_pc;
- is_jmp = dc->cpustate_changed ? DISAS_UPDATE_NEXT : DISAS_TOO_MANY;
- break;
-
- case JMP_DIRECT_CC:
- /*
- * Use a conditional branch if either taken or not-taken path
- * can use goto_tb. If neither can, then treat it as indirect.
- */
- if (likely(!dc->cpustate_changed)
- && (use_goto_tb(dc, dc->jmp_pc) || use_goto_tb(dc, npc))) {
- TCGLabel *not_taken = gen_new_label();
-
- tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, not_taken);
- gen_goto_tb(dc, 1, dc->jmp_pc);
- gen_set_label(not_taken);
-
- /* not-taken case handled below. */
- is_jmp = DISAS_TOO_MANY;
- break;
- }
- tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
- /* fall through */
-
- case JMP_INDIRECT:
- tcg_gen_movcond_tl(TCG_COND_NE, env_pc,
- env_btaken, tcg_constant_tl(0),
- env_btarget, tcg_constant_tl(npc));
- is_jmp = dc->cpustate_changed ? DISAS_UPDATE : DISAS_JUMP;
-
- /*
- * We have now consumed btaken and btarget. Hint to the
- * tcg compiler that the writeback to env may be dropped.
- */
- tcg_gen_discard_tl(env_btaken);
- tcg_gen_discard_tl(env_btarget);
- break;
-
- default:
- g_assert_not_reached();
- }
- }
-
- switch (is_jmp) {
- case DISAS_TOO_MANY:
- gen_goto_tb(dc, 0, npc);
- break;
- case DISAS_UPDATE_NEXT:
- tcg_gen_movi_tl(env_pc, npc);
- /* fall through */
- case DISAS_JUMP:
- tcg_gen_lookup_and_goto_ptr();
- break;
- case DISAS_UPDATE:
- /* Indicate that interrupts must be re-evaluated before the next TB. */
- tcg_gen_exit_tb(NULL, 0);
- break;
- default:
- g_assert_not_reached();
- }
-}
-
-static const TranslatorOps cris_tr_ops = {
- .init_disas_context = cris_tr_init_disas_context,
- .tb_start = cris_tr_tb_start,
- .insn_start = cris_tr_insn_start,
- .translate_insn = cris_tr_translate_insn,
- .tb_stop = cris_tr_tb_stop,
-};
-
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
-{
- DisasContext dc;
- translator_loop(cs, tb, max_insns, pc, host_pc, &cris_tr_ops, &dc.base);
-}
-
-void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
- CPUCRISState *env = cpu_env(cs);
- const char * const *regnames;
- const char * const *pregnames;
- int i;
-
- if (!env) {
- return;
- }
- if (env->pregs[PR_VR] < 32) {
- pregnames = pregnames_v10;
- regnames = regnames_v10;
- } else {
- pregnames = pregnames_v32;
- regnames = regnames_v32;
- }
-
- qemu_fprintf(f, "PC=%x CCS=%x btaken=%d btarget=%x\n"
- "cc_op=%d cc_src=%d cc_dest=%d cc_result=%x cc_mask=%x\n",
- env->pc, env->pregs[PR_CCS], env->btaken, env->btarget,
- env->cc_op,
- env->cc_src, env->cc_dest, env->cc_result, env->cc_mask);
-
-
- for (i = 0; i < 16; i++) {
- qemu_fprintf(f, "%s=%8.8x ", regnames[i], env->regs[i]);
- if ((i + 1) % 4 == 0) {
- qemu_fprintf(f, "\n");
- }
- }
- qemu_fprintf(f, "\nspecial regs:\n");
- for (i = 0; i < 16; i++) {
- qemu_fprintf(f, "%s=%8.8x ", pregnames[i], env->pregs[i]);
- if ((i + 1) % 4 == 0) {
- qemu_fprintf(f, "\n");
- }
- }
- if (env->pregs[PR_VR] >= 32) {
- uint32_t srs = env->pregs[PR_SRS];
- qemu_fprintf(f, "\nsupport function regs bank %x:\n", srs);
- if (srs < ARRAY_SIZE(env->sregs)) {
- for (i = 0; i < 16; i++) {
- qemu_fprintf(f, "s%2.2d=%8.8x ",
- i, env->sregs[srs][i]);
- if ((i + 1) % 4 == 0) {
- qemu_fprintf(f, "\n");
- }
- }
- }
- }
- qemu_fprintf(f, "\n\n");
-
-}
-
-void cris_initialize_tcg(void)
-{
- int i;
-
- cc_x = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, cc_x), "cc_x");
- cc_src = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, cc_src), "cc_src");
- cc_dest = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, cc_dest),
- "cc_dest");
- cc_result = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, cc_result),
- "cc_result");
- cc_op = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, cc_op), "cc_op");
- cc_size = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, cc_size),
- "cc_size");
- cc_mask = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, cc_mask),
- "cc_mask");
-
- env_pc = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, pc),
- "pc");
- env_btarget = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, btarget),
- "btarget");
- env_btaken = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, btaken),
- "btaken");
- for (i = 0; i < 16; i++) {
- cpu_R[i] = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, regs[i]),
- regnames_v32[i]);
- }
- for (i = 0; i < 16; i++) {
- cpu_PR[i] = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, pregs[i]),
- pregnames_v32[i]);
- }
-}
diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc
deleted file mode 100644
index c15ff47..0000000
--- a/target/cris/translate_v10.c.inc
+++ /dev/null
@@ -1,1262 +0,0 @@
-/*
- * CRISv10 emulation for qemu: main translation routines.
- *
- * Copyright (c) 2010 AXIS Communications AB
- * Written by Edgar E. Iglesias.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "crisv10-decode.h"
-
-static const char * const regnames_v10[] =
-{
- "$r0", "$r1", "$r2", "$r3",
- "$r4", "$r5", "$r6", "$r7",
- "$r8", "$r9", "$r10", "$r11",
- "$r12", "$r13", "$sp", "$pc",
-};
-
-static const char * const pregnames_v10[] =
-{
- "$bz", "$vr", "$p2", "$p3",
- "$wz", "$ccr", "$p6-prefix", "$mof",
- "$dz", "$ibr", "$irp", "$srp",
- "$bar", "$dccr", "$brp", "$usp",
-};
-
-/* We need this table to handle preg-moves with implicit width. */
-static const int preg_sizes_v10[] = {
- 1, /* bz. */
- 1, /* vr. */
- 1, /* pid. */
- 1, /* srs. */
- 2, /* wz. */
- 2, 2, 4,
- 4, 4, 4, 4,
- 4, 4, 4, 4,
-};
-
-static inline int dec10_size(unsigned int size)
-{
- size++;
- if (size == 3)
- size++;
- return size;
-}
-
-static inline void cris_illegal_insn(DisasContext *dc)
-{
- qemu_log_mask(LOG_GUEST_ERROR, "illegal insn at pc=%x\n", dc->pc);
- t_gen_raise_exception(EXCP_BREAK);
- dc->base.is_jmp = DISAS_NORETURN;
-}
-
-static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
- unsigned int size, int mem_index)
-{
- TCGLabel *l1 = gen_new_label();
- TCGv taddr = tcg_temp_new();
- TCGv tval = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- dc->postinc = 0;
- cris_evaluate_flags(dc);
-
- tcg_gen_mov_tl(taddr, addr);
- tcg_gen_mov_tl(tval, val);
-
- /* Store only if F flag isn't set */
- tcg_gen_andi_tl(t1, cpu_PR[PR_CCS], F_FLAG_V10);
- tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
-
- tcg_gen_qemu_st_tl(tval, taddr, mem_index, ctz32(size) | MO_TE);
-
- gen_set_label(l1);
- tcg_gen_shri_tl(t1, t1, 1); /* shift F to P position */
- tcg_gen_or_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], t1); /*P=F*/
-}
-
-static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
- unsigned int size)
-{
- /* If we get a fault on a delayslot we must keep the jmp state in
- the cpu-state to be able to re-execute the jmp. */
- if (dc->delayed_branch == 1) {
- cris_store_direct_jmp(dc);
- }
-
- /* Conditional writes. */
- if (dc->flags_x) {
- gen_store_v10_conditional(dc, addr, val, size, dc->mem_index);
- return;
- }
-
- tcg_gen_qemu_st_tl(val, addr, dc->mem_index, ctz32(size) | MO_TE);
-}
-
-
-/* Prefix flag and register are used to handle the more complex
- addressing modes. */
-static void cris_set_prefix(DisasContext *dc)
-{
- dc->clear_prefix = 0;
- dc->tb_flags |= PFIX_FLAG;
- tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], PFIX_FLAG);
-
- /* prefix insns don't clear the x flag. */
- dc->clear_x = 0;
- cris_lock_irq(dc);
-}
-
-static void crisv10_prepare_memaddr(DisasContext *dc,
- TCGv addr, unsigned int size)
-{
- if (dc->tb_flags & PFIX_FLAG) {
- tcg_gen_mov_tl(addr, cpu_PR[PR_PREFIX]);
- } else {
- tcg_gen_mov_tl(addr, cpu_R[dc->src]);
- }
-}
-
-static unsigned int crisv10_post_memaddr(DisasContext *dc, unsigned int size)
-{
- unsigned int insn_len = 0;
-
- if (dc->tb_flags & PFIX_FLAG) {
- if (dc->mode == CRISV10_MODE_AUTOINC) {
- tcg_gen_mov_tl(cpu_R[dc->src], cpu_PR[PR_PREFIX]);
- }
- } else {
- if (dc->mode == CRISV10_MODE_AUTOINC) {
- if (dc->src == 15) {
- insn_len += size & ~1;
- } else {
- tcg_gen_addi_tl(cpu_R[dc->src], cpu_R[dc->src], size);
- }
- }
- }
- return insn_len;
-}
-
-static int dec10_prep_move_m(CPUCRISState *env, DisasContext *dc,
- int s_ext, int memsize, TCGv dst)
-{
- unsigned int rs;
- uint32_t imm;
- int is_imm;
- int insn_len = 0;
-
- rs = dc->src;
- is_imm = rs == 15 && !(dc->tb_flags & PFIX_FLAG);
- LOG_DIS("rs=%d rd=%d is_imm=%d mode=%d pfix=%d\n",
- rs, dc->dst, is_imm, dc->mode, dc->tb_flags & PFIX_FLAG);
-
- /* Load [$rs] onto T1. */
- if (is_imm) {
- imm = cris_fetch(env, dc, dc->pc + 2, memsize, s_ext);
-
- tcg_gen_movi_tl(dst, imm);
-
- if (dc->mode == CRISV10_MODE_AUTOINC) {
- insn_len += memsize;
- if (memsize == 1)
- insn_len++;
- tcg_gen_addi_tl(cpu_R[15], cpu_R[15], insn_len);
- }
- } else {
- TCGv addr;
-
- addr = tcg_temp_new();
- cris_flush_cc_state(dc);
- crisv10_prepare_memaddr(dc, addr, memsize);
- gen_load(dc, dst, addr, memsize, 0);
- if (s_ext)
- t_gen_sext(dst, dst, memsize);
- else
- t_gen_zext(dst, dst, memsize);
- insn_len += crisv10_post_memaddr(dc, memsize);
- }
-
- if (dc->mode == CRISV10_MODE_INDIRECT && (dc->tb_flags & PFIX_FLAG)) {
- dc->dst = dc->src;
- }
- return insn_len;
-}
-
-static unsigned int dec10_quick_imm(DisasContext *dc)
-{
- int32_t imm, simm;
- int op;
- TCGv c;
-
- /* sign extend. */
- imm = dc->ir & ((1 << 6) - 1);
- simm = (int8_t) (imm << 2);
- simm >>= 2;
- switch (dc->opcode) {
- case CRISV10_QIMM_BDAP_R0:
- case CRISV10_QIMM_BDAP_R1:
- case CRISV10_QIMM_BDAP_R2:
- case CRISV10_QIMM_BDAP_R3:
- simm = (int8_t)dc->ir;
- LOG_DIS("bdap %d $r%d\n", simm, dc->dst);
- LOG_DIS("pc=%x mode=%x quickimm %d r%d r%d\n",
- dc->pc, dc->mode, dc->opcode, dc->src, dc->dst);
- cris_set_prefix(dc);
- if (dc->dst == 15) {
- tcg_gen_movi_tl(cpu_PR[PR_PREFIX], dc->pc + 2 + simm);
- } else {
- tcg_gen_addi_tl(cpu_PR[PR_PREFIX], cpu_R[dc->dst], simm);
- }
- break;
-
- case CRISV10_QIMM_MOVEQ:
- LOG_DIS("moveq %d, $r%d\n", simm, dc->dst);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_constant_tl(simm);
- cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst],
- cpu_R[dc->dst], c, 4);
- break;
- case CRISV10_QIMM_CMPQ:
- LOG_DIS("cmpq %d, $r%d\n", simm, dc->dst);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_constant_tl(simm);
- cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst],
- cpu_R[dc->dst], c, 4);
- break;
- case CRISV10_QIMM_ADDQ:
- LOG_DIS("addq %d, $r%d\n", imm, dc->dst);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_constant_tl(imm);
- cris_alu(dc, CC_OP_ADD, cpu_R[dc->dst],
- cpu_R[dc->dst], c, 4);
- break;
- case CRISV10_QIMM_ANDQ:
- LOG_DIS("andq %d, $r%d\n", simm, dc->dst);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_constant_tl(simm);
- cris_alu(dc, CC_OP_AND, cpu_R[dc->dst],
- cpu_R[dc->dst], c, 4);
- break;
- case CRISV10_QIMM_ASHQ:
- LOG_DIS("ashq %d, $r%d\n", simm, dc->dst);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- op = imm & (1 << 5);
- imm &= 0x1f;
- c = tcg_constant_tl(imm);
- if (op) {
- cris_alu(dc, CC_OP_ASR, cpu_R[dc->dst],
- cpu_R[dc->dst], c, 4);
- } else {
- /* BTST */
- cris_update_cc_op(dc, CC_OP_FLAGS, 4);
- gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->dst],
- c, cpu_PR[PR_CCS]);
- }
- break;
- case CRISV10_QIMM_LSHQ:
- LOG_DIS("lshq %d, $r%d\n", simm, dc->dst);
-
- op = CC_OP_LSL;
- if (imm & (1 << 5)) {
- op = CC_OP_LSR;
- }
- imm &= 0x1f;
- cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_constant_tl(imm);
- cris_alu(dc, op, cpu_R[dc->dst],
- cpu_R[dc->dst], c, 4);
- break;
- case CRISV10_QIMM_SUBQ:
- LOG_DIS("subq %d, $r%d\n", imm, dc->dst);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_constant_tl(imm);
- cris_alu(dc, CC_OP_SUB, cpu_R[dc->dst],
- cpu_R[dc->dst], c, 4);
- break;
- case CRISV10_QIMM_ORQ:
- LOG_DIS("andq %d, $r%d\n", simm, dc->dst);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_constant_tl(simm);
- cris_alu(dc, CC_OP_OR, cpu_R[dc->dst],
- cpu_R[dc->dst], c, 4);
- break;
-
- case CRISV10_QIMM_BCC_R0:
- case CRISV10_QIMM_BCC_R1:
- case CRISV10_QIMM_BCC_R2:
- case CRISV10_QIMM_BCC_R3:
- imm = dc->ir & 0xff;
- /* bit 0 is a sign bit. */
- if (imm & 1) {
- imm |= 0xffffff00; /* sign extend. */
- imm &= ~1; /* get rid of the sign bit. */
- }
- imm += 2;
- LOG_DIS("b%s %d\n", cc_name(dc->cond), imm);
-
- cris_cc_mask(dc, 0);
- cris_prepare_cc_branch(dc, imm, dc->cond);
- break;
-
- default:
- LOG_DIS("pc=%x mode=%x quickimm %d r%d r%d\n",
- dc->pc, dc->mode, dc->opcode, dc->src, dc->dst);
- cpu_abort(CPU(dc->cpu), "Unhandled quickimm\n");
- break;
- }
- return 2;
-}
-
-static unsigned int dec10_setclrf(DisasContext *dc)
-{
- uint32_t flags;
- unsigned int set = ~dc->opcode & 1;
-
- flags = EXTRACT_FIELD(dc->ir, 0, 3)
- | (EXTRACT_FIELD(dc->ir, 12, 15) << 4);
- LOG_DIS("%s set=%d flags=%x\n", __func__, set, flags);
-
-
- if (flags & X_FLAG) {
- if (set)
- dc->flags_x = X_FLAG;
- else
- dc->flags_x = 0;
- }
-
- cris_evaluate_flags (dc);
- cris_update_cc_op(dc, CC_OP_FLAGS, 4);
- cris_update_cc_x(dc);
- tcg_gen_movi_tl(cc_op, dc->cc_op);
-
- if (set) {
- tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags);
- } else {
- tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS],
- ~(flags|F_FLAG_V10|P_FLAG_V10));
- }
-
- dc->flags_uptodate = 1;
- dc->clear_x = 0;
- cris_lock_irq(dc);
- return 2;
-}
-
-static inline void dec10_reg_prep_sext(DisasContext *dc, int size, int sext,
- TCGv dd, TCGv ds, TCGv sd, TCGv ss)
-{
- if (sext) {
- t_gen_sext(dd, sd, size);
- t_gen_sext(ds, ss, size);
- } else {
- t_gen_zext(dd, sd, size);
- t_gen_zext(ds, ss, size);
- }
-}
-
-static void dec10_reg_alu(DisasContext *dc, int op, int size, int sext)
-{
- TCGv t[2];
-
- t[0] = tcg_temp_new();
- t[1] = tcg_temp_new();
- dec10_reg_prep_sext(dc, size, sext,
- t[0], t[1], cpu_R[dc->dst], cpu_R[dc->src]);
-
- if (op == CC_OP_LSL || op == CC_OP_LSR || op == CC_OP_ASR) {
- tcg_gen_andi_tl(t[1], t[1], 63);
- }
-
- assert(dc->dst != 15);
- cris_alu(dc, op, cpu_R[dc->dst], t[0], t[1], size);
-}
-
-static void dec10_reg_bound(DisasContext *dc, int size)
-{
- TCGv t;
-
- t = tcg_temp_new();
- t_gen_zext(t, cpu_R[dc->src], size);
- cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[dc->dst], t, 4);
-}
-
-static void dec10_reg_mul(DisasContext *dc, int size, int sext)
-{
- int op = sext ? CC_OP_MULS : CC_OP_MULU;
- TCGv t[2];
-
- t[0] = tcg_temp_new();
- t[1] = tcg_temp_new();
- dec10_reg_prep_sext(dc, size, sext,
- t[0], t[1], cpu_R[dc->dst], cpu_R[dc->src]);
-
- cris_alu(dc, op, cpu_R[dc->dst], t[0], t[1], 4);
-}
-
-
-static void dec10_reg_movs(DisasContext *dc)
-{
- int size = (dc->size & 1) + 1;
- TCGv t;
-
- LOG_DIS("movx.%d $r%d, $r%d\n", size, dc->src, dc->dst);
- cris_cc_mask(dc, CC_MASK_NZVC);
-
- t = tcg_temp_new();
- if (dc->ir & 32)
- t_gen_sext(t, cpu_R[dc->src], size);
- else
- t_gen_zext(t, cpu_R[dc->src], size);
-
- cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t, 4);
-}
-
-static void dec10_reg_alux(DisasContext *dc, int op)
-{
- int size = (dc->size & 1) + 1;
- TCGv t;
-
- LOG_DIS("movx.%d $r%d, $r%d\n", size, dc->src, dc->dst);
- cris_cc_mask(dc, CC_MASK_NZVC);
-
- t = tcg_temp_new();
- if (dc->ir & 32)
- t_gen_sext(t, cpu_R[dc->src], size);
- else
- t_gen_zext(t, cpu_R[dc->src], size);
-
- cris_alu(dc, op, cpu_R[dc->dst], cpu_R[dc->dst], t, 4);
-}
-
-static void dec10_reg_mov_pr(DisasContext *dc)
-{
- LOG_DIS("move p%d r%d sz=%d\n", dc->dst, dc->src, preg_sizes_v10[dc->dst]);
- cris_lock_irq(dc);
- if (dc->src == 15) {
- tcg_gen_mov_tl(env_btarget, cpu_PR[dc->dst]);
- cris_prepare_jmp(dc, JMP_INDIRECT);
- return;
- }
- if (dc->dst == PR_CCS) {
- cris_evaluate_flags(dc);
- }
- cris_alu(dc, CC_OP_MOVE, cpu_R[dc->src],
- cpu_R[dc->src], cpu_PR[dc->dst], preg_sizes_v10[dc->dst]);
-}
-
-static void dec10_reg_abs(DisasContext *dc)
-{
- TCGv t0;
-
- LOG_DIS("abs $r%u, $r%u\n", dc->src, dc->dst);
-
- assert(dc->dst != 15);
- t0 = tcg_temp_new();
- tcg_gen_sari_tl(t0, cpu_R[dc->src], 31);
- tcg_gen_xor_tl(cpu_R[dc->dst], cpu_R[dc->src], t0);
- tcg_gen_sub_tl(t0, cpu_R[dc->dst], t0);
-
- cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t0, 4);
-}
-
-static void dec10_reg_swap(DisasContext *dc)
-{
- TCGv t0;
-
- LOG_DIS("not $r%d, $r%d\n", dc->src, dc->dst);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- t0 = tcg_temp_new();
- tcg_gen_mov_tl(t0, cpu_R[dc->src]);
- if (dc->dst & 8)
- tcg_gen_not_tl(t0, t0);
- if (dc->dst & 4)
- t_gen_swapw(t0, t0);
- if (dc->dst & 2)
- t_gen_swapb(t0, t0);
- if (dc->dst & 1)
- t_gen_swapr(t0, t0);
- cris_alu(dc, CC_OP_MOVE, cpu_R[dc->src], cpu_R[dc->src], t0, 4);
-}
-
-static void dec10_reg_scc(DisasContext *dc)
-{
- int cond = dc->dst;
-
- LOG_DIS("s%s $r%u\n", cc_name(cond), dc->src);
-
- gen_tst_cc(dc, cpu_R[dc->src], cond);
- tcg_gen_setcondi_tl(TCG_COND_NE, cpu_R[dc->src], cpu_R[dc->src], 0);
-
- cris_cc_mask(dc, 0);
-}
-
-static unsigned int dec10_reg(DisasContext *dc)
-{
- TCGv t;
- unsigned int insn_len = 2;
- unsigned int size = dec10_size(dc->size);
- unsigned int tmp;
-
- if (dc->size != 3) {
- switch (dc->opcode) {
- case CRISV10_REG_MOVE_R:
- LOG_DIS("move.%d $r%d, $r%d\n", dc->size, dc->src, dc->dst);
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alu(dc, CC_OP_MOVE, size, 0);
- if (dc->dst == 15) {
- tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
- cris_prepare_jmp(dc, JMP_INDIRECT);
- dc->delayed_branch = 1;
- }
- break;
- case CRISV10_REG_MOVX:
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_movs(dc);
- break;
- case CRISV10_REG_ADDX:
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alux(dc, CC_OP_ADD);
- break;
- case CRISV10_REG_SUBX:
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alux(dc, CC_OP_SUB);
- break;
- case CRISV10_REG_ADD:
- LOG_DIS("add $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alu(dc, CC_OP_ADD, size, 0);
- break;
- case CRISV10_REG_SUB:
- LOG_DIS("sub $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alu(dc, CC_OP_SUB, size, 0);
- break;
- case CRISV10_REG_CMP:
- LOG_DIS("cmp $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alu(dc, CC_OP_CMP, size, 0);
- break;
- case CRISV10_REG_BOUND:
- LOG_DIS("bound $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_bound(dc, size);
- break;
- case CRISV10_REG_AND:
- LOG_DIS("and $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alu(dc, CC_OP_AND, size, 0);
- break;
- case CRISV10_REG_ADDI:
- if (dc->src == 15) {
- /* nop. */
- return 2;
- }
- t = tcg_temp_new();
- LOG_DIS("addi r%d r%d size=%d\n", dc->src, dc->dst, dc->size);
- tcg_gen_shli_tl(t, cpu_R[dc->dst], dc->size & 3);
- tcg_gen_add_tl(cpu_R[dc->src], cpu_R[dc->src], t);
- break;
- case CRISV10_REG_LSL:
- LOG_DIS("lsl $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alu(dc, CC_OP_LSL, size, 0);
- break;
- case CRISV10_REG_LSR:
- LOG_DIS("lsr $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alu(dc, CC_OP_LSR, size, 0);
- break;
- case CRISV10_REG_ASR:
- LOG_DIS("asr $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alu(dc, CC_OP_ASR, size, 1);
- break;
- case CRISV10_REG_OR:
- LOG_DIS("or $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alu(dc, CC_OP_OR, size, 0);
- break;
- case CRISV10_REG_NEG:
- LOG_DIS("neg $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alu(dc, CC_OP_NEG, size, 0);
- break;
- case CRISV10_REG_BIAP:
- LOG_DIS("BIAP pc=%x reg %d r%d r%d size=%d\n", dc->pc,
- dc->opcode, dc->src, dc->dst, size);
- switch (size) {
- case 4: tmp = 2; break;
- case 2: tmp = 1; break;
- case 1: tmp = 0; break;
- default:
- cpu_abort(CPU(dc->cpu), "Unhandled BIAP");
- break;
- }
-
- t = tcg_temp_new();
- tcg_gen_shli_tl(t, cpu_R[dc->dst], tmp);
- if (dc->src == 15) {
- tcg_gen_addi_tl(cpu_PR[PR_PREFIX], t, ((dc->pc +2)| 1) + 1);
- } else {
- tcg_gen_add_tl(cpu_PR[PR_PREFIX], cpu_R[dc->src], t);
- }
- cris_set_prefix(dc);
- break;
-
- default:
- LOG_DIS("pc=%x reg %d r%d r%d\n", dc->pc,
- dc->opcode, dc->src, dc->dst);
- cpu_abort(CPU(dc->cpu), "Unhandled opcode");
- break;
- }
- } else {
- switch (dc->opcode) {
- case CRISV10_REG_MOVX:
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_movs(dc);
- break;
- case CRISV10_REG_ADDX:
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alux(dc, CC_OP_ADD);
- break;
- case CRISV10_REG_SUBX:
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alux(dc, CC_OP_SUB);
- break;
- case CRISV10_REG_MOVE_SPR_R:
- cris_evaluate_flags(dc);
- cris_cc_mask(dc, 0);
- dec10_reg_mov_pr(dc);
- break;
- case CRISV10_REG_MOVE_R_SPR:
- LOG_DIS("move r%d p%d\n", dc->src, dc->dst);
- cris_evaluate_flags(dc);
- if (dc->src != 11) /* fast for srp. */
- dc->cpustate_changed = 1;
- t_gen_mov_preg_TN(dc, dc->dst, cpu_R[dc->src]);
- break;
- case CRISV10_REG_SETF:
- case CRISV10_REG_CLEARF:
- dec10_setclrf(dc);
- break;
- case CRISV10_REG_SWAP:
- dec10_reg_swap(dc);
- break;
- case CRISV10_REG_ABS:
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_abs(dc);
- break;
- case CRISV10_REG_LZ:
- LOG_DIS("lz $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alu(dc, CC_OP_LZ, 4, 0);
- break;
- case CRISV10_REG_XOR:
- LOG_DIS("xor $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_alu(dc, CC_OP_XOR, 4, 0);
- break;
- case CRISV10_REG_BTST:
- LOG_DIS("btst $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_update_cc_op(dc, CC_OP_FLAGS, 4);
- gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->dst],
- cpu_R[dc->src], cpu_PR[PR_CCS]);
- break;
- case CRISV10_REG_DSTEP:
- LOG_DIS("dstep $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu(dc, CC_OP_DSTEP, cpu_R[dc->dst],
- cpu_R[dc->dst], cpu_R[dc->src], 4);
- break;
- case CRISV10_REG_MSTEP:
- LOG_DIS("mstep $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
- cris_evaluate_flags(dc);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu(dc, CC_OP_MSTEP, cpu_R[dc->dst],
- cpu_R[dc->dst], cpu_R[dc->src], 4);
- break;
- case CRISV10_REG_SCC:
- dec10_reg_scc(dc);
- break;
- default:
- LOG_DIS("pc=%x reg %d r%d r%d\n", dc->pc,
- dc->opcode, dc->src, dc->dst);
- cpu_abort(CPU(dc->cpu), "Unhandled opcode");
- break;
- }
- }
- return insn_len;
-}
-
-static unsigned int dec10_ind_move_m_r(CPUCRISState *env, DisasContext *dc,
- unsigned int size)
-{
- unsigned int insn_len = 2;
- TCGv t;
-
- LOG_DIS("%s: move.%d [$r%d], $r%d\n", __func__,
- size, dc->src, dc->dst);
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- t = tcg_temp_new();
- insn_len += dec10_prep_move_m(env, dc, 0, size, t);
- cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t, size);
- if (dc->dst == 15) {
- tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
- cris_prepare_jmp(dc, JMP_INDIRECT);
- dc->delayed_branch = 1;
- }
-
- return insn_len;
-}
-
-static unsigned int dec10_ind_move_r_m(DisasContext *dc, unsigned int size)
-{
- unsigned int insn_len = 2;
- TCGv addr;
-
- LOG_DIS("move.%d $r%d, [$r%d]\n", dc->size, dc->src, dc->dst);
- addr = tcg_temp_new();
- crisv10_prepare_memaddr(dc, addr, size);
- gen_store_v10(dc, addr, cpu_R[dc->dst], size);
- insn_len += crisv10_post_memaddr(dc, size);
-
- return insn_len;
-}
-
-static unsigned int dec10_ind_move_m_pr(CPUCRISState *env, DisasContext *dc)
-{
- unsigned int insn_len = 2, rd = dc->dst;
- TCGv t;
-
- LOG_DIS("move.%d $p%d, [$r%d]\n", dc->size, dc->dst, dc->src);
- cris_lock_irq(dc);
-
- t = tcg_temp_new();
- insn_len += dec10_prep_move_m(env, dc, 0, 4, t);
- if (rd == 15) {
- tcg_gen_mov_tl(env_btarget, t);
- cris_prepare_jmp(dc, JMP_INDIRECT);
- dc->delayed_branch = 1;
- } else {
- tcg_gen_mov_tl(cpu_PR[rd], t);
- dc->cpustate_changed = 1;
- }
- return insn_len;
-}
-
-static unsigned int dec10_ind_move_pr_m(DisasContext *dc)
-{
- unsigned int insn_len = 2, size = preg_sizes_v10[dc->dst];
- TCGv addr, t0;
-
- LOG_DIS("move.%d $p%d, [$r%d]\n", dc->size, dc->dst, dc->src);
-
- addr = tcg_temp_new();
- crisv10_prepare_memaddr(dc, addr, size);
- if (dc->dst == PR_CCS) {
- t0 = tcg_temp_new();
- cris_evaluate_flags(dc);
- tcg_gen_andi_tl(t0, cpu_PR[PR_CCS], ~PFIX_FLAG);
- gen_store_v10(dc, addr, t0, size);
- } else {
- gen_store_v10(dc, addr, cpu_PR[dc->dst], size);
- }
- insn_len += crisv10_post_memaddr(dc, size);
- cris_lock_irq(dc);
-
- return insn_len;
-}
-
-static void dec10_movem_r_m(DisasContext *dc)
-{
- int i, pfix = dc->tb_flags & PFIX_FLAG;
- TCGv addr, t0;
-
- LOG_DIS("%s r%d, [r%d] pi=%d ir=%x\n", __func__,
- dc->dst, dc->src, dc->postinc, dc->ir);
-
- addr = tcg_temp_new();
- t0 = tcg_temp_new();
- crisv10_prepare_memaddr(dc, addr, 4);
- tcg_gen_mov_tl(t0, addr);
- for (i = dc->dst; i >= 0; i--) {
- if ((pfix && dc->mode == CRISV10_MODE_AUTOINC) && dc->src == i) {
- gen_store_v10(dc, addr, t0, 4);
- } else {
- gen_store_v10(dc, addr, cpu_R[i], 4);
- }
- tcg_gen_addi_tl(addr, addr, 4);
- }
-
- if (pfix && dc->mode == CRISV10_MODE_AUTOINC) {
- tcg_gen_mov_tl(cpu_R[dc->src], t0);
- }
-
- if (!pfix && dc->mode == CRISV10_MODE_AUTOINC) {
- tcg_gen_mov_tl(cpu_R[dc->src], addr);
- }
-}
-
-static void dec10_movem_m_r(DisasContext *dc)
-{
- int i, pfix = dc->tb_flags & PFIX_FLAG;
- TCGv addr, t0;
-
- LOG_DIS("%s [r%d], r%d pi=%d ir=%x\n", __func__,
- dc->src, dc->dst, dc->postinc, dc->ir);
-
- addr = tcg_temp_new();
- t0 = tcg_temp_new();
- crisv10_prepare_memaddr(dc, addr, 4);
- tcg_gen_mov_tl(t0, addr);
- for (i = dc->dst; i >= 0; i--) {
- gen_load(dc, cpu_R[i], addr, 4, 0);
- tcg_gen_addi_tl(addr, addr, 4);
- }
-
- if (pfix && dc->mode == CRISV10_MODE_AUTOINC) {
- tcg_gen_mov_tl(cpu_R[dc->src], t0);
- }
-
- if (!pfix && dc->mode == CRISV10_MODE_AUTOINC) {
- tcg_gen_mov_tl(cpu_R[dc->src], addr);
- }
-}
-
-static int dec10_ind_alu(CPUCRISState *env, DisasContext *dc,
- int op, unsigned int size)
-{
- int insn_len = 0;
- int rd = dc->dst;
- TCGv t[2];
-
- cris_alu_m_alloc_temps(t);
- insn_len += dec10_prep_move_m(env, dc, 0, size, t[0]);
- cris_alu(dc, op, cpu_R[dc->dst], cpu_R[rd], t[0], size);
- if (dc->dst == 15) {
- tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
- cris_prepare_jmp(dc, JMP_INDIRECT);
- dc->delayed_branch = 1;
- return insn_len;
- }
- return insn_len;
-}
-
-static int dec10_ind_bound(CPUCRISState *env, DisasContext *dc,
- unsigned int size)
-{
- int insn_len = 0;
- int rd = dc->dst;
- TCGv t;
-
- t = tcg_temp_new();
- insn_len += dec10_prep_move_m(env, dc, 0, size, t);
- cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[rd], t, 4);
- if (dc->dst == 15) {
- tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
- cris_prepare_jmp(dc, JMP_INDIRECT);
- dc->delayed_branch = 1;
- }
-
- return insn_len;
-}
-
-static int dec10_alux_m(CPUCRISState *env, DisasContext *dc, int op)
-{
- unsigned int size = (dc->size & 1) ? 2 : 1;
- unsigned int sx = !!(dc->size & 2);
- int insn_len = 2;
- int rd = dc->dst;
- TCGv t;
-
- LOG_DIS("addx size=%d sx=%d op=%d %d\n", size, sx, dc->src, dc->dst);
-
- t = tcg_temp_new();
-
- cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_prep_move_m(env, dc, sx, size, t);
- cris_alu(dc, op, cpu_R[dc->dst], cpu_R[rd], t, 4);
- if (dc->dst == 15) {
- tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
- cris_prepare_jmp(dc, JMP_INDIRECT);
- dc->delayed_branch = 1;
- }
-
- return insn_len;
-}
-
-static int dec10_dip(CPUCRISState *env, DisasContext *dc)
-{
- int insn_len = 2;
- uint32_t imm;
-
- LOG_DIS("dip pc=%x opcode=%d r%d r%d\n",
- dc->pc, dc->opcode, dc->src, dc->dst);
- if (dc->src == 15) {
- imm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
- tcg_gen_movi_tl(cpu_PR[PR_PREFIX], imm);
- if (dc->postinc) {
- insn_len += 4;
- }
- tcg_gen_addi_tl(cpu_R[15], cpu_R[15], insn_len - 2);
- } else {
- gen_load(dc, cpu_PR[PR_PREFIX], cpu_R[dc->src], 4, 0);
- if (dc->postinc)
- tcg_gen_addi_tl(cpu_R[dc->src], cpu_R[dc->src], 4);
- }
-
- cris_set_prefix(dc);
- return insn_len;
-}
-
-static int dec10_bdap_m(CPUCRISState *env, DisasContext *dc, int size)
-{
- int insn_len = 2;
- int rd = dc->dst;
-
- LOG_DIS("bdap_m pc=%x opcode=%d r%d r%d sz=%d\n",
- dc->pc, dc->opcode, dc->src, dc->dst, size);
-
- assert(dc->dst != 15);
-#if 0
- /* 8bit embedded offset? */
- if (!dc->postinc && (dc->ir & (1 << 11))) {
- int simm = dc->ir & 0xff;
-
- /* cpu_abort(CPU(dc->cpu), "Unhandled opcode"); */
- /* sign extended. */
- simm = (int8_t)simm;
-
- tcg_gen_addi_tl(cpu_PR[PR_PREFIX], cpu_R[dc->dst], simm);
-
- cris_set_prefix(dc);
- return insn_len;
- }
-#endif
- /* Now the rest of the modes are truly indirect. */
- insn_len += dec10_prep_move_m(env, dc, 1, size, cpu_PR[PR_PREFIX]);
- tcg_gen_add_tl(cpu_PR[PR_PREFIX], cpu_PR[PR_PREFIX], cpu_R[rd]);
- cris_set_prefix(dc);
- return insn_len;
-}
-
-static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
-{
- unsigned int insn_len = 2;
- unsigned int size = dec10_size(dc->size);
- uint32_t imm;
- int32_t simm;
- TCGv t[2], c;
-
- if (dc->size != 3) {
- switch (dc->opcode) {
- case CRISV10_IND_MOVE_M_R:
- return dec10_ind_move_m_r(env, dc, size);
- case CRISV10_IND_MOVE_R_M:
- return dec10_ind_move_r_m(dc, size);
- case CRISV10_IND_CMP:
- LOG_DIS("cmp size=%d op=%d %d\n", size, dc->src, dc->dst);
- cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_ind_alu(env, dc, CC_OP_CMP, size);
- break;
- case CRISV10_IND_TEST:
- LOG_DIS("test size=%d op=%d %d\n", size, dc->src, dc->dst);
-
- cris_evaluate_flags(dc);
- cris_cc_mask(dc, CC_MASK_NZVC);
- cris_alu_m_alloc_temps(t);
- insn_len += dec10_prep_move_m(env, dc, 0, size, t[0]);
- tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
- c = tcg_constant_tl(0);
- cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst],
- t[0], c, size);
- break;
- case CRISV10_IND_ADD:
- LOG_DIS("add size=%d op=%d %d\n", size, dc->src, dc->dst);
- cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_ind_alu(env, dc, CC_OP_ADD, size);
- break;
- case CRISV10_IND_SUB:
- LOG_DIS("sub size=%d op=%d %d\n", size, dc->src, dc->dst);
- cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_ind_alu(env, dc, CC_OP_SUB, size);
- break;
- case CRISV10_IND_BOUND:
- LOG_DIS("bound size=%d op=%d %d\n", size, dc->src, dc->dst);
- cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_ind_bound(env, dc, size);
- break;
- case CRISV10_IND_AND:
- LOG_DIS("and size=%d op=%d %d\n", size, dc->src, dc->dst);
- cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_ind_alu(env, dc, CC_OP_AND, size);
- break;
- case CRISV10_IND_OR:
- LOG_DIS("or size=%d op=%d %d\n", size, dc->src, dc->dst);
- cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_ind_alu(env, dc, CC_OP_OR, size);
- break;
- case CRISV10_IND_MOVX:
- insn_len = dec10_alux_m(env, dc, CC_OP_MOVE);
- break;
- case CRISV10_IND_ADDX:
- insn_len = dec10_alux_m(env, dc, CC_OP_ADD);
- break;
- case CRISV10_IND_SUBX:
- insn_len = dec10_alux_m(env, dc, CC_OP_SUB);
- break;
- case CRISV10_IND_CMPX:
- insn_len = dec10_alux_m(env, dc, CC_OP_CMP);
- break;
- case CRISV10_IND_MUL:
- /* This is a reg insn coded in the mem indir space. */
- LOG_DIS("mul pc=%x opcode=%d\n", dc->pc, dc->opcode);
- cris_cc_mask(dc, CC_MASK_NZVC);
- dec10_reg_mul(dc, size, dc->ir & (1 << 10));
- break;
- case CRISV10_IND_BDAP_M:
- insn_len = dec10_bdap_m(env, dc, size);
- break;
- default:
- /*
- * ADDC for v17:
- *
- * Instruction format: ADDC [Rs],Rd
- *
- * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+
- * |Destination(Rd)| 1 0 0 1 1 0 1 0 | Source(Rs)|
- * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+--+--+
- *
- * Instruction format: ADDC [Rs+],Rd
- *
- * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+
- * |Destination(Rd)| 1 1 0 1 1 0 1 0 | Source(Rs)|
- * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+
- */
- if (dc->opcode == CRISV17_IND_ADDC && dc->size == 2 &&
- env->pregs[PR_VR] == 17) {
- LOG_DIS("addc op=%d %d\n", dc->src, dc->dst);
- cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_ind_alu(env, dc, CC_OP_ADDC, size);
- break;
- }
-
- LOG_DIS("pc=%x var-ind.%d %d r%d r%d\n",
- dc->pc, size, dc->opcode, dc->src, dc->dst);
- cpu_abort(CPU(dc->cpu), "Unhandled opcode");
- break;
- }
- return insn_len;
- }
-
- switch (dc->opcode) {
- case CRISV10_IND_MOVE_M_SPR:
- insn_len = dec10_ind_move_m_pr(env, dc);
- break;
- case CRISV10_IND_MOVE_SPR_M:
- insn_len = dec10_ind_move_pr_m(dc);
- break;
- case CRISV10_IND_JUMP_M:
- if (dc->src == 15) {
- LOG_DIS("jump.%d %d r%d r%d direct\n", size,
- dc->opcode, dc->src, dc->dst);
- imm = cris_fetch(env, dc, dc->pc + 2, size, 0);
- if (dc->mode == CRISV10_MODE_AUTOINC) {
- insn_len += size;
- }
- c = tcg_constant_tl(dc->pc + insn_len);
- t_gen_mov_preg_TN(dc, dc->dst, c);
- dc->jmp_pc = imm;
- cris_prepare_jmp(dc, JMP_DIRECT);
- dc->delayed_branch--; /* v10 has no dslot here. */
- } else {
- if (dc->dst == 14) {
- LOG_DIS("break %d\n", dc->src);
- cris_evaluate_flags(dc);
- tcg_gen_movi_tl(env_pc, dc->pc + 2);
- c = tcg_constant_tl(dc->src + 2);
- t_gen_mov_env_TN(trap_vector, c);
- t_gen_raise_exception(EXCP_BREAK);
- dc->base.is_jmp = DISAS_NORETURN;
- return insn_len;
- }
- LOG_DIS("%d: jump.%d %d r%d r%d\n", __LINE__, size,
- dc->opcode, dc->src, dc->dst);
- t[0] = tcg_temp_new();
- c = tcg_constant_tl(dc->pc + insn_len);
- t_gen_mov_preg_TN(dc, dc->dst, c);
- crisv10_prepare_memaddr(dc, t[0], size);
- gen_load(dc, env_btarget, t[0], 4, 0);
- insn_len += crisv10_post_memaddr(dc, size);
- cris_prepare_jmp(dc, JMP_INDIRECT);
- dc->delayed_branch--; /* v10 has no dslot here. */
- }
- break;
-
- case CRISV10_IND_MOVEM_R_M:
- LOG_DIS("movem_r_m pc=%x opcode=%d r%d r%d\n",
- dc->pc, dc->opcode, dc->dst, dc->src);
- dec10_movem_r_m(dc);
- break;
- case CRISV10_IND_MOVEM_M_R:
- LOG_DIS("movem_m_r pc=%x opcode=%d\n", dc->pc, dc->opcode);
- dec10_movem_m_r(dc);
- break;
- case CRISV10_IND_JUMP_R:
- LOG_DIS("jmp pc=%x opcode=%d r%d r%d\n",
- dc->pc, dc->opcode, dc->dst, dc->src);
- tcg_gen_mov_tl(env_btarget, cpu_R[dc->src]);
- c = tcg_constant_tl(dc->pc + insn_len);
- t_gen_mov_preg_TN(dc, dc->dst, c);
- cris_prepare_jmp(dc, JMP_INDIRECT);
- dc->delayed_branch--; /* v10 has no dslot here. */
- break;
- case CRISV10_IND_MOVX:
- insn_len = dec10_alux_m(env, dc, CC_OP_MOVE);
- break;
- case CRISV10_IND_ADDX:
- insn_len = dec10_alux_m(env, dc, CC_OP_ADD);
- break;
- case CRISV10_IND_SUBX:
- insn_len = dec10_alux_m(env, dc, CC_OP_SUB);
- break;
- case CRISV10_IND_CMPX:
- insn_len = dec10_alux_m(env, dc, CC_OP_CMP);
- break;
- case CRISV10_IND_DIP:
- insn_len = dec10_dip(env, dc);
- break;
- case CRISV10_IND_BCC_M:
-
- cris_cc_mask(dc, 0);
- simm = cris_fetch(env, dc, dc->pc + 2, 2, 1);
- simm += 4;
-
- LOG_DIS("bcc_m: b%s %x\n", cc_name(dc->cond), dc->pc + simm);
- cris_prepare_cc_branch(dc, simm, dc->cond);
- insn_len = 4;
- break;
- default:
- LOG_DIS("ERROR pc=%x opcode=%d\n", dc->pc, dc->opcode);
- cpu_abort(CPU(dc->cpu), "Unhandled opcode");
- break;
- }
-
- return insn_len;
-}
-
-static unsigned int crisv10_decoder(CPUCRISState *env, DisasContext *dc)
-{
- unsigned int insn_len = 2;
-
- /* Load a halfword onto the instruction register. */
- dc->ir = cris_fetch(env, dc, dc->pc, 2, 0);
-
- /* Now decode it. */
- dc->opcode = EXTRACT_FIELD(dc->ir, 6, 9);
- dc->mode = EXTRACT_FIELD(dc->ir, 10, 11);
- dc->src = EXTRACT_FIELD(dc->ir, 0, 3);
- dc->size = EXTRACT_FIELD(dc->ir, 4, 5);
- dc->cond = dc->dst = EXTRACT_FIELD(dc->ir, 12, 15);
- dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10);
-
- dc->clear_prefix = 1;
-
- /* FIXME: What if this insn insn't 2 in length?? */
- if (dc->src == 15 || dc->dst == 15)
- tcg_gen_movi_tl(cpu_R[15], dc->pc + 2);
-
- switch (dc->mode) {
- case CRISV10_MODE_QIMMEDIATE:
- insn_len = dec10_quick_imm(dc);
- break;
- case CRISV10_MODE_REG:
- insn_len = dec10_reg(dc);
- break;
- case CRISV10_MODE_AUTOINC:
- case CRISV10_MODE_INDIRECT:
- insn_len = dec10_ind(env, dc);
- break;
- }
-
- if (dc->clear_prefix && dc->tb_flags & PFIX_FLAG) {
- dc->tb_flags &= ~PFIX_FLAG;
- tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~PFIX_FLAG);
- if (dc->tb_flags != dc->base.tb->flags) {
- dc->cpustate_changed = 1;
- }
- }
-
- /* CRISv10 locks out interrupts on dslots. */
- if (dc->delayed_branch == 2) {
- cris_lock_irq(dc);
- }
- return insn_len;
-}
-
-void cris_initialize_crisv10_tcg(void)
-{
- int i;
-
- cc_x = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, cc_x), "cc_x");
- cc_src = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, cc_src), "cc_src");
- cc_dest = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, cc_dest),
- "cc_dest");
- cc_result = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, cc_result),
- "cc_result");
- cc_op = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, cc_op), "cc_op");
- cc_size = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, cc_size),
- "cc_size");
- cc_mask = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, cc_mask),
- "cc_mask");
-
- env_pc = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, pc),
- "pc");
- env_btarget = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, btarget),
- "btarget");
- env_btaken = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, btaken),
- "btaken");
- for (i = 0; i < 16; i++) {
- cpu_R[i] = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, regs[i]),
- regnames_v10[i]);
- }
- for (i = 0; i < 16; i++) {
- cpu_PR[i] = tcg_global_mem_new(tcg_env,
- offsetof(CPUCRISState, pregs[i]),
- pregnames_v10[i]);
- }
-}
diff --git a/target/hexagon/README b/target/hexagon/README
index 7ffd517..ca617e3 100644
--- a/target/hexagon/README
+++ b/target/hexagon/README
@@ -282,10 +282,6 @@ For Hexagon Vector eXtensions (HVX), the following fields are used
*** Debugging ***
-You can turn on a lot of debugging by changing the HEX_DEBUG macro to 1 in
-internal.h. This will stream a lot of information as it generates TCG and
-executes the code.
-
To track down nasty issues with Hexagon->TCG generation, we compare the
execution results with actual hardware running on a Hexagon Linux target.
Run qemu with the "-d cpu" option. Then, we can diff the results and figure
@@ -305,8 +301,3 @@ Here are some handy places to set breakpoints
The helper function for each instruction is named helper_<TAG>, so here's
an example that will set a breakpoint at the start
br helper_A2_add
- If you have the HEX_DEBUG macro set, the following will be useful
- At the start of execution of a packet for a given PC
- br helper_debug_start_packet if env->gpr[41] == 0xdeadbeef
- At the end of execution of a packet for a given PC
- br helper_debug_commit_end if this_PC == 0xdeadbeef
diff --git a/target/hexagon/cpu-param.h b/target/hexagon/cpu-param.h
index 71b4a9b..635d509e7 100644
--- a/target/hexagon/cpu-param.h
+++ b/target/hexagon/cpu-param.h
@@ -19,9 +19,10 @@
#define HEXAGON_CPU_PARAM_H
#define TARGET_PAGE_BITS 16 /* 64K pages */
-#define TARGET_LONG_BITS 32
#define TARGET_PHYS_ADDR_SPACE_BITS 36
#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#define TARGET_INSN_START_EXTRA_WORDS 0
+
#endif
diff --git a/target/hexagon/cpu-qom.h b/target/hexagon/cpu-qom.h
index da92fe7..0b149bd 100644
--- a/target/hexagon/cpu-qom.h
+++ b/target/hexagon/cpu-qom.h
@@ -16,6 +16,7 @@
#define HEXAGON_CPU_TYPE_SUFFIX "-" TYPE_HEXAGON_CPU
#define HEXAGON_CPU_TYPE_NAME(name) (name HEXAGON_CPU_TYPE_SUFFIX)
+#define TYPE_HEXAGON_CPU_V66 HEXAGON_CPU_TYPE_NAME("v66")
#define TYPE_HEXAGON_CPU_V67 HEXAGON_CPU_TYPE_NAME("v67")
#define TYPE_HEXAGON_CPU_V68 HEXAGON_CPU_TYPE_NAME("v68")
#define TYPE_HEXAGON_CPU_V69 HEXAGON_CPU_TYPE_NAME("v69")
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
index 64cc05c..a5a0417 100644
--- a/target/hexagon/cpu.c
+++ b/target/hexagon/cpu.c
@@ -19,13 +19,15 @@
#include "qemu/qemu-print.h"
#include "cpu.h"
#include "internal.h"
-#include "exec/exec-all.h"
+#include "exec/translation-block.h"
#include "qapi/error.h"
#include "hw/qdev-properties.h"
#include "fpu/softfloat-helpers.h"
#include "tcg/tcg.h"
#include "exec/gdbstub.h"
+#include "accel/tcg/cpu-ops.h"
+static void hexagon_v66_cpu_init(Object *obj) { }
static void hexagon_v67_cpu_init(Object *obj) { }
static void hexagon_v68_cpu_init(Object *obj) { }
static void hexagon_v69_cpu_init(Object *obj) { }
@@ -47,13 +49,12 @@ static ObjectClass *hexagon_cpu_class_by_name(const char *cpu_model)
return oc;
}
-static Property hexagon_lldb_compat_property =
- DEFINE_PROP_BOOL("lldb-compat", HexagonCPU, lldb_compat, false);
-static Property hexagon_lldb_stack_adjust_property =
- DEFINE_PROP_UNSIGNED("lldb-stack-adjust", HexagonCPU, lldb_stack_adjust,
- 0, qdev_prop_uint32, target_ulong);
-static Property hexagon_short_circuit_property =
- DEFINE_PROP_BOOL("short-circuit", HexagonCPU, short_circuit, true);
+static const Property hexagon_cpu_properties[] = {
+ DEFINE_PROP_BOOL("lldb-compat", HexagonCPU, lldb_compat, false),
+ DEFINE_PROP_UNSIGNED("lldb-stack-adjust", HexagonCPU, lldb_stack_adjust, 0,
+ qdev_prop_uint32, target_ulong),
+ DEFINE_PROP_BOOL("short-circuit", HexagonCPU, short_circuit, true),
+};
const char * const hexagon_regnames[TOTAL_PER_THREAD_REGS] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
@@ -254,6 +255,22 @@ static vaddr hexagon_cpu_get_pc(CPUState *cs)
return cpu_env(cs)->gpr[HEX_REG_PC];
}
+static TCGTBCPUState hexagon_get_tb_cpu_state(CPUState *cs)
+{
+ CPUHexagonState *env = cpu_env(cs);
+ vaddr pc = env->gpr[HEX_REG_PC];
+ uint32_t hex_flags = 0;
+
+ if (pc == env->gpr[HEX_REG_SA0]) {
+ hex_flags = FIELD_DP32(hex_flags, TB_FLAGS, IS_TIGHT_LOOP, 1);
+ }
+ if (pc & PCALIGN_MASK) {
+ hexagon_raise_exception_err(env, HEX_CAUSE_PC_NOT_ALIGNED, 0);
+ }
+
+ return (TCGTBCPUState){ .pc = pc, .flags = hex_flags };
+}
+
static void hexagon_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -261,11 +278,6 @@ static void hexagon_cpu_synchronize_from_tb(CPUState *cs,
cpu_env(cs)->gpr[HEX_REG_PC] = tb->pc;
}
-static bool hexagon_cpu_has_work(CPUState *cs)
-{
- return true;
-}
-
static void hexagon_restore_state_to_opc(CPUState *cs,
const TranslationBlock *tb,
const uint64_t *data)
@@ -285,11 +297,14 @@ static void hexagon_cpu_reset_hold(Object *obj, ResetType type)
set_default_nan_mode(1, &env->fp_status);
set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status);
+ /* Default NaN value: sign bit set, all frac bits set */
+ set_float_default_nan_pattern(0b11111111, &env->fp_status);
}
static void hexagon_cpu_disas_set_info(CPUState *s, disassemble_info *info)
{
info->print_insn = print_insn_hexagon;
+ info->endian = BFD_ENDIAN_LITTLE;
}
static void hexagon_cpu_realize(DeviceState *dev, Error **errp)
@@ -314,22 +329,28 @@ static void hexagon_cpu_realize(DeviceState *dev, Error **errp)
mcc->parent_realize(dev, errp);
}
-static void hexagon_cpu_init(Object *obj)
+static int hexagon_cpu_mmu_index(CPUState *cs, bool ifetch)
{
- qdev_property_add_static(DEVICE(obj), &hexagon_lldb_compat_property);
- qdev_property_add_static(DEVICE(obj), &hexagon_lldb_stack_adjust_property);
- qdev_property_add_static(DEVICE(obj), &hexagon_short_circuit_property);
+ return MMU_USER_IDX;
}
-#include "hw/core/tcg-cpu-ops.h"
+static void hexagon_cpu_init(Object *obj)
+{
+}
static const TCGCPUOps hexagon_tcg_ops = {
+ /* MTTCG not yet supported: require strict ordering */
+ .guest_default_memory_order = TCG_MO_ALL,
+ .mttcg_supported = false,
.initialize = hexagon_translate_init,
+ .translate_code = hexagon_translate_code,
+ .get_tb_cpu_state = hexagon_get_tb_cpu_state,
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
.restore_state_to_opc = hexagon_restore_state_to_opc,
+ .mmu_index = hexagon_cpu_mmu_index,
};
-static void hexagon_cpu_class_init(ObjectClass *c, void *data)
+static void hexagon_cpu_class_init(ObjectClass *c, const void *data)
{
HexagonCPUClass *mcc = HEXAGON_CPU_CLASS(c);
CPUClass *cc = CPU_CLASS(c);
@@ -339,11 +360,11 @@ static void hexagon_cpu_class_init(ObjectClass *c, void *data)
device_class_set_parent_realize(dc, hexagon_cpu_realize,
&mcc->parent_realize);
+ device_class_set_props(dc, hexagon_cpu_properties);
resettable_class_set_parent_phases(rc, NULL, hexagon_cpu_reset_hold, NULL,
&mcc->parent_phases);
cc->class_by_name = hexagon_cpu_class_by_name;
- cc->has_work = hexagon_cpu_has_work;
cc->dump_state = hexagon_dump_state;
cc->set_pc = hexagon_cpu_set_pc;
cc->get_pc = hexagon_cpu_get_pc;
@@ -373,6 +394,7 @@ static const TypeInfo hexagon_cpu_type_infos[] = {
.class_size = sizeof(HexagonCPUClass),
.class_init = hexagon_cpu_class_init,
},
+ DEFINE_CPU(TYPE_HEXAGON_CPU_V66, hexagon_v66_cpu_init),
DEFINE_CPU(TYPE_HEXAGON_CPU_V67, hexagon_v67_cpu_init),
DEFINE_CPU(TYPE_HEXAGON_CPU_V68, hexagon_v68_cpu_init),
DEFINE_CPU(TYPE_HEXAGON_CPU_V69, hexagon_v69_cpu_init),
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
index 764f3c3..43a854f 100644
--- a/target/hexagon/cpu.h
+++ b/target/hexagon/cpu.h
@@ -21,11 +21,16 @@
#include "fpu/softfloat-types.h"
#include "cpu-qom.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
#include "hex_regs.h"
#include "mmvec/mmvec.h"
#include "hw/registerfields.h"
+#ifndef CONFIG_USER_ONLY
+#error "Hexagon does not support system emulation"
+#endif
+
#define NUM_PREGS 4
#define TOTAL_PER_THREAD_REGS 64
@@ -79,12 +84,6 @@ typedef struct CPUArchState {
uint8_t slot_cancelled;
target_ulong new_value_usr;
- /*
- * Only used when HEX_DEBUG is on, but unconditionally included
- * to reduce recompile time when turning HEX_DEBUG on/off.
- */
- target_ulong reg_written[TOTAL_PER_THREAD_REGS];
-
MemLog mem_log_stores[STORES_MAX];
float_status fp_status;
@@ -138,25 +137,10 @@ G_NORETURN void hexagon_raise_exception_err(CPUHexagonState *env,
uint32_t exception,
uintptr_t pc);
-static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- uint32_t hex_flags = 0;
- *pc = env->gpr[HEX_REG_PC];
- *cs_base = 0;
- if (*pc == env->gpr[HEX_REG_SA0]) {
- hex_flags = FIELD_DP32(hex_flags, TB_FLAGS, IS_TIGHT_LOOP, 1);
- }
- *flags = hex_flags;
- if (*pc & PCALIGN_MASK) {
- hexagon_raise_exception_err(env, HEX_EXCP_PC_NOT_ALIGNED, 0);
- }
-}
-
typedef HexagonCPU ArchCPU;
void hexagon_translate_init(void);
-
-#include "exec/cpu-all.h"
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
#endif /* HEXAGON_CPU_H */
diff --git a/target/hexagon/cpu_bits.h b/target/hexagon/cpu_bits.h
index 4279281..ff596e2 100644
--- a/target/hexagon/cpu_bits.h
+++ b/target/hexagon/cpu_bits.h
@@ -23,14 +23,21 @@
#define PCALIGN 4
#define PCALIGN_MASK (PCALIGN - 1)
-#define HEX_EXCP_FETCH_NO_UPAGE 0x012
-#define HEX_EXCP_INVALID_PACKET 0x015
-#define HEX_EXCP_INVALID_OPCODE 0x015
-#define HEX_EXCP_PC_NOT_ALIGNED 0x01e
-#define HEX_EXCP_PRIV_NO_UREAD 0x024
-#define HEX_EXCP_PRIV_NO_UWRITE 0x025
-
-#define HEX_EXCP_TRAP0 0x172
+enum hex_event {
+ HEX_EVENT_NONE = -1,
+ HEX_EVENT_TRAP0 = 0x008,
+};
+
+enum hex_cause {
+ HEX_CAUSE_NONE = -1,
+ HEX_CAUSE_TRAP0 = 0x172,
+ HEX_CAUSE_FETCH_NO_UPAGE = 0x012,
+ HEX_CAUSE_INVALID_PACKET = 0x015,
+ HEX_CAUSE_INVALID_OPCODE = 0x015,
+ HEX_CAUSE_PC_NOT_ALIGNED = 0x01e,
+ HEX_CAUSE_PRIV_NO_UREAD = 0x024,
+ HEX_CAUSE_PRIV_NO_UWRITE = 0x025,
+};
#define PACKET_WORDS_MAX 4
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
index 05a56d8..c557141 100644
--- a/target/hexagon/fma_emu.c
+++ b/target/hexagon/fma_emu.c
@@ -43,112 +43,51 @@
#define WAY_BIG_EXP 4096
-typedef union {
- double f;
- uint64_t i;
- struct {
- uint64_t mant:52;
- uint64_t exp:11;
- uint64_t sign:1;
- };
-} Double;
-
-typedef union {
- float f;
- uint32_t i;
- struct {
- uint32_t mant:23;
- uint32_t exp:8;
- uint32_t sign:1;
- };
-} Float;
-
static uint64_t float64_getmant(float64 f64)
{
- Double a = { .i = f64 };
+ uint64_t mant = extract64(f64, 0, 52);
if (float64_is_normal(f64)) {
- return a.mant | 1ULL << 52;
+ return mant | 1ULL << 52;
}
if (float64_is_zero(f64)) {
return 0;
}
if (float64_is_denormal(f64)) {
- return a.mant;
+ return mant;
}
return ~0ULL;
}
int32_t float64_getexp(float64 f64)
{
- Double a = { .i = f64 };
+ int exp = extract64(f64, 52, 11);
if (float64_is_normal(f64)) {
- return a.exp;
+ return exp;
}
if (float64_is_denormal(f64)) {
- return a.exp + 1;
+ return exp + 1;
}
return -1;
}
-static uint64_t float32_getmant(float32 f32)
-{
- Float a = { .i = f32 };
- if (float32_is_normal(f32)) {
- return a.mant | 1ULL << 23;
- }
- if (float32_is_zero(f32)) {
- return 0;
- }
- if (float32_is_denormal(f32)) {
- return a.mant;
- }
- return ~0ULL;
-}
-
int32_t float32_getexp(float32 f32)
{
- Float a = { .i = f32 };
+ int exp = float32_getexp_raw(f32);
if (float32_is_normal(f32)) {
- return a.exp;
+ return exp;
}
if (float32_is_denormal(f32)) {
- return a.exp + 1;
+ return exp + 1;
}
return -1;
}
-static uint32_t int128_getw0(Int128 x)
-{
- return int128_getlo(x);
-}
-
-static uint32_t int128_getw1(Int128 x)
-{
- return int128_getlo(x) >> 32;
-}
-
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
{
- Int128 a, b;
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
-
- a = int128_make64(ai);
- b = int128_make64(bi);
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
-
- pp1s = pp1a + pp1b;
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
- pp2 += (1ULL << 32);
- }
- uint64_t ret_low = pp0 + (pp1s << 32);
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
- pp2 += 1;
- }
+ uint64_t l, h;
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
+ mulu64(&l, &h, ai, bi);
+ return int128_make128(l, h);
}
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
@@ -369,298 +308,129 @@ float32 infinite_float32(uint8_t sign)
}
/* Return a maximum finite value with the requested sign */
-static float32 maxfinite_float32(uint8_t sign)
+static float64 accum_round_float64(Accum a, float_status *fp_status)
{
- if (sign) {
- return make_float32(SF_MINUS_MAXF);
- } else {
- return make_float32(SF_MAXF);
- }
-}
-
-/* Return a zero value with requested sign */
-static float32 zero_float32(uint8_t sign)
-{
- if (sign) {
- return make_float32(0x80000000);
- } else {
- return float32_zero;
+ uint64_t ret;
+
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
+ && ((a.guard | a.round | a.sticky) == 0)) {
+ /* result zero */
+ switch (fp_status->float_rounding_mode) {
+ case float_round_down:
+ return zero_float64(1);
+ default:
+ return zero_float64(0);
+ }
}
-}
-
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
-{ \
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
- && ((a.guard | a.round | a.sticky) == 0)) { \
- /* result zero */ \
- switch (fp_status->float_rounding_mode) { \
- case float_round_down: \
- return zero_##SUFFIX(1); \
- default: \
- return zero_##SUFFIX(0); \
- } \
- } \
- /* Normalize right */ \
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
- /* So we need to normalize right while the high word is non-zero and \
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
- while ((int128_gethi(a.mant) != 0) || \
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
- a = accum_norm_right(a, 1); \
- } \
- /* \
- * OK, now normalize left \
- * We want to normalize left until we have a leading one in bit 24 \
- * Theoretically, we only need to shift a maximum of one to the left if we \
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
- * should be 0 \
- */ \
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
- a = accum_norm_left(a); \
- } \
- /* \
- * OK, now we might need to denormalize because of potential underflow. \
- * We need to do this before rounding, and rounding might make us normal \
- * again \
- */ \
- while (a.exp <= 0) { \
- a = accum_norm_right(a, 1 - a.exp); \
- /* \
- * Do we have underflow? \
- * That's when we get an inexact answer because we ran out of bits \
- * in a denormal. \
- */ \
- if (a.guard || a.round || a.sticky) { \
- float_raise(float_flag_underflow, fp_status); \
- } \
- } \
- /* OK, we're relatively canonical... now we need to round */ \
- if (a.guard || a.round || a.sticky) { \
- float_raise(float_flag_inexact, fp_status); \
- switch (fp_status->float_rounding_mode) { \
- case float_round_to_zero: \
- /* Chop and we're done */ \
- break; \
- case float_round_up: \
- if (a.sign == 0) { \
- a.mant = int128_add(a.mant, int128_one()); \
- } \
- break; \
- case float_round_down: \
- if (a.sign != 0) { \
- a.mant = int128_add(a.mant, int128_one()); \
- } \
- break; \
- default: \
- if (a.round || a.sticky) { \
- /* round up if guard is 1, down if guard is zero */ \
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
- } else if (a.guard) { \
- /* exactly .5, round up if odd */ \
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
- } \
- break; \
- } \
- } \
- /* \
- * OK, now we might have carried all the way up. \
- * So we might need to shr once \
- * at least we know that the lsb should be zero if we rounded and \
- * got a carry out... \
- */ \
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
- a = accum_norm_right(a, 1); \
- } \
- /* Overflow? */ \
- if (a.exp >= INF_EXP) { \
- /* Yep, inf result */ \
- float_raise(float_flag_overflow, fp_status); \
- float_raise(float_flag_inexact, fp_status); \
- switch (fp_status->float_rounding_mode) { \
- case float_round_to_zero: \
- return maxfinite_##SUFFIX(a.sign); \
- case float_round_up: \
- if (a.sign == 0) { \
- return infinite_##SUFFIX(a.sign); \
- } else { \
- return maxfinite_##SUFFIX(a.sign); \
- } \
- case float_round_down: \
- if (a.sign != 0) { \
- return infinite_##SUFFIX(a.sign); \
- } else { \
- return maxfinite_##SUFFIX(a.sign); \
- } \
- default: \
- return infinite_##SUFFIX(a.sign); \
- } \
- } \
- /* Underflow? */ \
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
- /* Leading one means: No, we're normal. So, we should be done... */ \
- INTERNAL_TYPE ret; \
- ret.i = 0; \
- ret.sign = a.sign; \
- ret.exp = a.exp; \
- ret.mant = int128_getlo(a.mant); \
- return ret.i; \
- } \
- assert(a.exp == 1); \
- INTERNAL_TYPE ret; \
- ret.i = 0; \
- ret.sign = a.sign; \
- ret.exp = 0; \
- ret.mant = int128_getlo(a.mant); \
- return ret.i; \
-}
-
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
-
-static bool is_inf_prod(float64 a, float64 b)
-{
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
-}
-
-static float64 special_fma(float64 a, float64 b, float64 c,
- float_status *fp_status)
-{
- float64 ret = make_float64(0);
-
/*
- * If A multiplied by B is an exact infinity and C is also an infinity
- * but with the opposite sign, FMA returns NaN and raises invalid.
+ * Normalize right
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
+ * So we need to normalize right while the high word is non-zero and
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
*/
- uint8_t a_sign = float64_is_neg(a);
- uint8_t b_sign = float64_is_neg(b);
- uint8_t c_sign = float64_is_neg(c);
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
- if ((a_sign ^ b_sign) != c_sign) {
- ret = make_float64(DF_NAN);
- float_raise(float_flag_invalid, fp_status);
- return ret;
- }
- }
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
- (float64_is_zero(a) && float64_is_infinity(b))) {
- ret = make_float64(DF_NAN);
- float_raise(float_flag_invalid, fp_status);
- return ret;
+ while ((int128_gethi(a.mant) != 0) ||
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
+ a = accum_norm_right(a, 1);
}
/*
- * If none of the above checks are true and C is a NaN,
- * a NaN shall be returned
- * If A or B are NaN, a NAN shall be returned.
+ * OK, now normalize left
+ * We want to normalize left until we have a leading one in bit 24
+ * Theoretically, we only need to shift a maximum of one to the left if we
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
+ * should be 0
*/
- if (float64_is_any_nan(a) ||
- float64_is_any_nan(b) ||
- float64_is_any_nan(c)) {
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
- float_raise(float_flag_invalid, fp_status);
- }
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
- float_raise(float_flag_invalid, fp_status);
- }
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
- float_raise(float_flag_invalid, fp_status);
- }
- ret = make_float64(DF_NAN);
- return ret;
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
+ a = accum_norm_left(a);
}
/*
- * We have checked for adding opposite-signed infinities.
- * Other infinities return infinity with the correct sign
+ * OK, now we might need to denormalize because of potential underflow.
+ * We need to do this before rounding, and rounding might make us normal
+ * again
*/
- if (float64_is_infinity(c)) {
- ret = infinite_float64(c_sign);
- return ret;
+ while (a.exp <= 0) {
+ a = accum_norm_right(a, 1 - a.exp);
+ /*
+ * Do we have underflow?
+ * That's when we get an inexact answer because we ran out of bits
+ * in a denormal.
+ */
+ if (a.guard || a.round || a.sticky) {
+ float_raise(float_flag_underflow, fp_status);
+ }
}
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
- ret = infinite_float64(a_sign ^ b_sign);
- return ret;
+ /* OK, we're relatively canonical... now we need to round */
+ if (a.guard || a.round || a.sticky) {
+ float_raise(float_flag_inexact, fp_status);
+ switch (fp_status->float_rounding_mode) {
+ case float_round_to_zero:
+ /* Chop and we're done */
+ break;
+ case float_round_up:
+ if (a.sign == 0) {
+ a.mant = int128_add(a.mant, int128_one());
+ }
+ break;
+ case float_round_down:
+ if (a.sign != 0) {
+ a.mant = int128_add(a.mant, int128_one());
+ }
+ break;
+ default:
+ if (a.round || a.sticky) {
+ /* round up if guard is 1, down if guard is zero */
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
+ } else if (a.guard) {
+ /* exactly .5, round up if odd */
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
+ }
+ break;
+ }
}
- g_assert_not_reached();
-}
-
-static float32 special_fmaf(float32 a, float32 b, float32 c,
- float_status *fp_status)
-{
- float64 aa, bb, cc;
- aa = float32_to_float64(a, fp_status);
- bb = float32_to_float64(b, fp_status);
- cc = float32_to_float64(c, fp_status);
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
-}
-
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
- float_status *fp_status)
-{
- Accum prod;
- Accum acc;
- Accum result;
- accum_init(&prod);
- accum_init(&acc);
- accum_init(&result);
-
- uint8_t a_sign = float32_is_neg(a);
- uint8_t b_sign = float32_is_neg(b);
- uint8_t c_sign = float32_is_neg(c);
- if (float32_is_infinity(a) ||
- float32_is_infinity(b) ||
- float32_is_infinity(c)) {
- return special_fmaf(a, b, c, fp_status);
- }
- if (float32_is_any_nan(a) ||
- float32_is_any_nan(b) ||
- float32_is_any_nan(c)) {
- return special_fmaf(a, b, c, fp_status);
- }
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
- float32 tmp = float32_mul(a, b, fp_status);
- tmp = float32_add(tmp, c, fp_status);
- return tmp;
- }
-
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
-
/*
- * Note: extracting the mantissa into an int is multiplying by
- * 2**23, so adjust here
+ * OK, now we might have carried all the way up.
+ * So we might need to shr once
+ * at least we know that the lsb should be zero if we rounded and
+ * got a carry out...
*/
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
- prod.sign = a_sign ^ b_sign;
- if (float32_is_zero(a) || float32_is_zero(b)) {
- prod.exp = -2 * WAY_BIG_EXP;
- }
- if ((scale > 0) && float32_is_denormal(c)) {
- acc.mant = int128_mul_6464(0, 0);
- acc.exp = -WAY_BIG_EXP;
- acc.sign = c_sign;
- acc.sticky = 1;
- result = accum_add(prod, acc);
- } else if (!float32_is_zero(c)) {
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
- acc.exp = float32_getexp(c);
- acc.sign = c_sign;
- result = accum_add(prod, acc);
- } else {
- result = prod;
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
+ a = accum_norm_right(a, 1);
+ }
+ /* Overflow? */
+ if (a.exp >= DF_INF_EXP) {
+ /* Yep, inf result */
+ float_raise(float_flag_overflow, fp_status);
+ float_raise(float_flag_inexact, fp_status);
+ switch (fp_status->float_rounding_mode) {
+ case float_round_to_zero:
+ return maxfinite_float64(a.sign);
+ case float_round_up:
+ if (a.sign == 0) {
+ return infinite_float64(a.sign);
+ } else {
+ return maxfinite_float64(a.sign);
+ }
+ case float_round_down:
+ if (a.sign != 0) {
+ return infinite_float64(a.sign);
+ } else {
+ return maxfinite_float64(a.sign);
+ }
+ default:
+ return infinite_float64(a.sign);
+ }
}
- result.exp += scale;
- return accum_round_float32(result, fp_status);
-}
-
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
-{
- if (float32_is_zero(a) || float32_is_zero(b)) {
- return float32_mul(a, b, fp_status);
+ /* Underflow? */
+ ret = int128_getlo(a.mant);
+ if (ret & (1ULL << DF_MANTBITS)) {
+ /* Leading one means: No, we're normal. So, we should be done... */
+ ret = deposit64(ret, 52, 11, a.exp);
+ } else {
+ assert(a.exp == 1);
+ ret = deposit64(ret, 52, 11, 0);
}
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
+ ret = deposit64(ret, 63, 1, a.sign);
+ return ret;
}
float64 internal_mpyhh(float64 a, float64 b,
@@ -685,7 +455,7 @@ float64 internal_mpyhh(float64 a, float64 b,
float64_is_infinity(b)) {
return float64_mul(a, b, fp_status);
}
- x.mant = int128_mul_6464(accumulated, 1);
+ x.mant = int128_make64(accumulated);
x.sticky = sticky;
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
index 91591d6..fed054b 100644
--- a/target/hexagon/fma_emu.h
+++ b/target/hexagon/fma_emu.h
@@ -30,9 +30,6 @@ static inline uint32_t float32_getexp_raw(float32 f32)
}
int32_t float32_getexp(float32 f32);
float32 infinite_float32(uint8_t sign);
-float32 internal_fmafx(float32 a, float32 b, float32 c,
- int scale, float_status *fp_status);
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
float64 internal_mpyhh(float64 a, float64 b,
unsigned long long int accumulated,
float_status *fp_status);
diff --git a/target/hexagon/gdbstub.c b/target/hexagon/gdbstub.c
index 502c698..12d6b3b 100644
--- a/target/hexagon/gdbstub.c
+++ b/target/hexagon/gdbstub.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2019-2024 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -36,6 +36,14 @@ int hexagon_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
return gdb_get_regl(mem_buf, env->gpr[n]);
}
+ n -= TOTAL_PER_THREAD_REGS;
+
+ if (n < NUM_PREGS) {
+ return gdb_get_reg8(mem_buf, env->pred[n]);
+ }
+
+ n -= NUM_PREGS;
+
g_assert_not_reached();
}
@@ -44,7 +52,7 @@ int hexagon_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
CPUHexagonState *env = cpu_env(cs);
if (n == HEX_REG_P3_0_ALIASED) {
- uint32_t p3_0 = ldtul_p(mem_buf);
+ uint32_t p3_0 = ldl_le_p(mem_buf);
for (int i = 0; i < NUM_PREGS; i++) {
env->pred[i] = extract32(p3_0, i * 8, 8);
}
@@ -52,10 +60,19 @@ int hexagon_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
}
if (n < TOTAL_PER_THREAD_REGS) {
- env->gpr[n] = ldtul_p(mem_buf);
+ env->gpr[n] = ldl_le_p(mem_buf);
return sizeof(target_ulong);
}
+ n -= TOTAL_PER_THREAD_REGS;
+
+ if (n < NUM_PREGS) {
+ env->pred[n] = ldl_le_p(mem_buf) & 0xff;
+ return sizeof(uint8_t);
+ }
+
+ n -= NUM_PREGS;
+
g_assert_not_reached();
}
@@ -100,7 +117,7 @@ static int gdb_put_vreg(CPUHexagonState *env, uint8_t *mem_buf, int n)
{
int i;
for (i = 0; i < ARRAY_SIZE(env->VRegs[n].uw); i++) {
- env->VRegs[n].uw[i] = ldtul_p(mem_buf);
+ env->VRegs[n].uw[i] = ldl_le_p(mem_buf);
mem_buf += 4;
}
return MAX_VEC_SIZE_BYTES;
@@ -110,7 +127,7 @@ static int gdb_put_qreg(CPUHexagonState *env, uint8_t *mem_buf, int n)
{
int i;
for (i = 0; i < ARRAY_SIZE(env->QRegs[n].uw); i++) {
- env->QRegs[n].uw[i] = ldtul_p(mem_buf);
+ env->QRegs[n].uw[i] = ldl_le_p(mem_buf);
mem_buf += 4;
}
return MAX_VEC_SIZE_BYTES / 8;
diff --git a/target/hexagon/gen_analyze_funcs.py b/target/hexagon/gen_analyze_funcs.py
index 54bac19..3ac7cc2 100755
--- a/target/hexagon/gen_analyze_funcs.py
+++ b/target/hexagon/gen_analyze_funcs.py
@@ -78,11 +78,13 @@ def gen_analyze_func(f, tag, regs, imms):
def main():
- hex_common.read_common_files()
+ args = hex_common.parse_common_args(
+ "Emit functions analyzing register accesses"
+ )
tagregs = hex_common.get_tagregs()
tagimms = hex_common.get_tagimms()
- with open(sys.argv[-1], "w") as f:
+ with open(args.out, "w") as f:
f.write("#ifndef HEXAGON_ANALYZE_FUNCS_C_INC\n")
f.write("#define HEXAGON_ANALYZE_FUNCS_C_INC\n\n")
diff --git a/target/hexagon/gen_decodetree.py b/target/hexagon/gen_decodetree.py
index a4fcd62..ce703af 100755
--- a/target/hexagon/gen_decodetree.py
+++ b/target/hexagon/gen_decodetree.py
@@ -24,6 +24,7 @@ import sys
import textwrap
import iset
import hex_common
+import argparse
encs = {
tag: "".join(reversed(iset.iset[tag]["enc"].replace(" ", "")))
@@ -191,8 +192,18 @@ def gen_decodetree_file(f, class_to_decode):
f.write(f"{tag}\t{enc_str} @{tag}\n")
+def main():
+ parser = argparse.ArgumentParser(
+ description="Emit opaque macro calls with instruction semantics"
+ )
+ parser.add_argument("semantics", help="semantics file")
+ parser.add_argument("class_to_decode", help="instruction class to decode")
+ parser.add_argument("out", help="output file")
+ args = parser.parse_args()
+
+ hex_common.read_semantics_file(args.semantics)
+ with open(args.out, "w") as f:
+ gen_decodetree_file(f, args.class_to_decode)
+
if __name__ == "__main__":
- hex_common.read_semantics_file(sys.argv[1])
- class_to_decode = sys.argv[2]
- with open(sys.argv[3], "w") as f:
- gen_decodetree_file(f, class_to_decode)
+ main()
diff --git a/target/hexagon/gen_helper_funcs.py b/target/hexagon/gen_helper_funcs.py
index e9685bf..c1f806a 100755
--- a/target/hexagon/gen_helper_funcs.py
+++ b/target/hexagon/gen_helper_funcs.py
@@ -102,12 +102,13 @@ def gen_helper_function(f, tag, tagregs, tagimms):
def main():
- hex_common.read_common_files()
+ args = hex_common.parse_common_args(
+ "Emit helper function definitions for each instruction"
+ )
tagregs = hex_common.get_tagregs()
tagimms = hex_common.get_tagimms()
- output_file = sys.argv[-1]
- with open(output_file, "w") as f:
+ with open(args.out, "w") as f:
for tag in hex_common.tags:
## Skip the priv instructions
if "A_PRIV" in hex_common.attribdict[tag]:
diff --git a/target/hexagon/gen_helper_protos.py b/target/hexagon/gen_helper_protos.py
index fd2bfd0..77f8e0a 100755
--- a/target/hexagon/gen_helper_protos.py
+++ b/target/hexagon/gen_helper_protos.py
@@ -52,12 +52,13 @@ def gen_helper_prototype(f, tag, tagregs, tagimms):
def main():
- hex_common.read_common_files()
+ args = hex_common.parse_common_args(
+ "Emit helper function prototypes for each instruction"
+ )
tagregs = hex_common.get_tagregs()
tagimms = hex_common.get_tagimms()
- output_file = sys.argv[-1]
- with open(output_file, "w") as f:
+ with open(args.out, "w") as f:
for tag in hex_common.tags:
## Skip the priv instructions
if "A_PRIV" in hex_common.attribdict[tag]:
diff --git a/target/hexagon/gen_idef_parser_funcs.py b/target/hexagon/gen_idef_parser_funcs.py
index eb494ab..2f6e826 100644
--- a/target/hexagon/gen_idef_parser_funcs.py
+++ b/target/hexagon/gen_idef_parser_funcs.py
@@ -20,6 +20,7 @@
import sys
import re
import string
+import argparse
from io import StringIO
import hex_common
@@ -43,14 +44,20 @@ import hex_common
## them are inputs ("in" prefix), while some others are outputs.
##
def main():
- hex_common.read_semantics_file(sys.argv[1])
+ parser = argparse.ArgumentParser(
+ "Emit instruction implementations that can be fed to idef-parser"
+ )
+ parser.add_argument("semantics", help="semantics file")
+ parser.add_argument("out", help="output file")
+ args = parser.parse_args()
+ hex_common.read_semantics_file(args.semantics)
hex_common.calculate_attribs()
hex_common.init_registers()
tagregs = hex_common.get_tagregs()
tagimms = hex_common.get_tagimms()
- with open(sys.argv[-1], "w") as f:
- f.write('#include "macros.inc"\n\n')
+ with open(args.out, "w") as f:
+ f.write('#include "macros.h.inc"\n\n')
for tag in hex_common.tags:
## Skip the priv instructions
diff --git a/target/hexagon/gen_op_attribs.py b/target/hexagon/gen_op_attribs.py
index 9944822..bbbb02d 100755
--- a/target/hexagon/gen_op_attribs.py
+++ b/target/hexagon/gen_op_attribs.py
@@ -21,16 +21,23 @@ import sys
import re
import string
import hex_common
+import argparse
def main():
- hex_common.read_semantics_file(sys.argv[1])
+ parser = argparse.ArgumentParser(
+ "Emit opaque macro calls containing instruction attributes"
+ )
+ parser.add_argument("semantics", help="semantics file")
+ parser.add_argument("out", help="output file")
+ args = parser.parse_args()
+ hex_common.read_semantics_file(args.semantics)
hex_common.calculate_attribs()
##
## Generate all the attributes associated with each instruction
##
- with open(sys.argv[-1], "w") as f:
+ with open(args.out, "w") as f:
for tag in hex_common.tags:
f.write(
f"OP_ATTRIB({tag},ATTRIBS("
diff --git a/target/hexagon/gen_opcodes_def.py b/target/hexagon/gen_opcodes_def.py
index 536f0eb..94a19ff 100755
--- a/target/hexagon/gen_opcodes_def.py
+++ b/target/hexagon/gen_opcodes_def.py
@@ -21,15 +21,22 @@ import sys
import re
import string
import hex_common
+import argparse
def main():
- hex_common.read_semantics_file(sys.argv[1])
+ parser = argparse.ArgumentParser(
+ description="Emit opaque macro calls with instruction names"
+ )
+ parser.add_argument("semantics", help="semantics file")
+ parser.add_argument("out", help="output file")
+ args = parser.parse_args()
+ hex_common.read_semantics_file(args.semantics)
##
## Generate a list of all the opcodes
##
- with open(sys.argv[-1], "w") as f:
+ with open(args.out, "w") as f:
for tag in hex_common.tags:
f.write(f"OPCODE({tag}),\n")
diff --git a/target/hexagon/gen_printinsn.py b/target/hexagon/gen_printinsn.py
index 8bf4d09..d5f9699 100755
--- a/target/hexagon/gen_printinsn.py
+++ b/target/hexagon/gen_printinsn.py
@@ -21,6 +21,7 @@ import sys
import re
import string
import hex_common
+import argparse
##
@@ -96,11 +97,17 @@ def spacify(s):
def main():
- hex_common.read_semantics_file(sys.argv[1])
+ parser = argparse.ArgumentParser(
+ "Emit opaque macro calls with information for printing string representations of instrucions"
+ )
+ parser.add_argument("semantics", help="semantics file")
+ parser.add_argument("out", help="output file")
+ args = parser.parse_args()
+ hex_common.read_semantics_file(args.semantics)
immext_casere = re.compile(r"IMMEXT\(([A-Za-z])")
- with open(sys.argv[-1], "w") as f:
+ with open(args.out, "w") as f:
for tag in hex_common.tags:
if not hex_common.behdict[tag]:
continue
diff --git a/target/hexagon/gen_tcg.h b/target/hexagon/gen_tcg.h
index 3fc1f4e..8a3b801 100644
--- a/target/hexagon/gen_tcg.h
+++ b/target/hexagon/gen_tcg.h
@@ -1365,7 +1365,7 @@
do { \
uiV = uiV; \
tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->pkt->pc); \
- TCGv excp = tcg_constant_tl(HEX_EXCP_TRAP0); \
+ TCGv excp = tcg_constant_tl(HEX_EVENT_TRAP0); \
gen_helper_raise_exception(tcg_env, excp); \
} while (0)
#endif
diff --git a/target/hexagon/gen_tcg_func_table.py b/target/hexagon/gen_tcg_func_table.py
index 978ac18..299a39b 100755
--- a/target/hexagon/gen_tcg_func_table.py
+++ b/target/hexagon/gen_tcg_func_table.py
@@ -21,15 +21,22 @@ import sys
import re
import string
import hex_common
+import argparse
def main():
- hex_common.read_semantics_file(sys.argv[1])
+ parser = argparse.ArgumentParser(
+ "Emit opaque macro calls with instruction semantics"
+ )
+ parser.add_argument("semantics", help="semantics file")
+ parser.add_argument("out", help="output file")
+ args = parser.parse_args()
+ hex_common.read_semantics_file(args.semantics)
hex_common.calculate_attribs()
tagregs = hex_common.get_tagregs()
tagimms = hex_common.get_tagimms()
- with open(sys.argv[-1], "w") as f:
+ with open(args.out, "w") as f:
f.write("#ifndef HEXAGON_FUNC_TABLE_H\n")
f.write("#define HEXAGON_FUNC_TABLE_H\n\n")
diff --git a/target/hexagon/gen_tcg_funcs.py b/target/hexagon/gen_tcg_funcs.py
index 05aa0a7..c2ba91d 100755
--- a/target/hexagon/gen_tcg_funcs.py
+++ b/target/hexagon/gen_tcg_funcs.py
@@ -108,15 +108,16 @@ def gen_def_tcg_func(f, tag, tagregs, tagimms):
def main():
- is_idef_parser_enabled = hex_common.read_common_files()
+ args = hex_common.parse_common_args(
+ "Emit functions calling generated code implementing instruction semantics (helpers, idef-parser)"
+ )
tagregs = hex_common.get_tagregs()
tagimms = hex_common.get_tagimms()
- output_file = sys.argv[-1]
- with open(output_file, "w") as f:
+ with open(args.out, "w") as f:
f.write("#ifndef HEXAGON_TCG_FUNCS_H\n")
f.write("#define HEXAGON_TCG_FUNCS_H\n\n")
- if is_idef_parser_enabled:
+ if args.idef_parser:
f.write('#include "idef-generated-emitter.h.inc"\n\n')
for tag in hex_common.tags:
diff --git a/target/hexagon/gen_trans_funcs.py b/target/hexagon/gen_trans_funcs.py
index 30f0c73..45da1b7 100755
--- a/target/hexagon/gen_trans_funcs.py
+++ b/target/hexagon/gen_trans_funcs.py
@@ -24,6 +24,7 @@ import sys
import textwrap
import iset
import hex_common
+import argparse
encs = {
tag: "".join(reversed(iset.iset[tag]["enc"].replace(" ", "")))
@@ -136,8 +137,19 @@ def gen_trans_funcs(f):
"""))
-if __name__ == "__main__":
- hex_common.read_semantics_file(sys.argv[1])
+def main():
+ parser = argparse.ArgumentParser(
+ description="Emit trans_*() functions to be called by " \
+ "instruction decoder"
+ )
+ parser.add_argument("semantics", help="semantics file")
+ parser.add_argument("out", help="output file")
+ args = parser.parse_args()
+ hex_common.read_semantics_file(args.semantics)
hex_common.init_registers()
- with open(sys.argv[2], "w") as f:
+ with open(args.out, "w") as f:
gen_trans_funcs(f)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c
index dbae6c5..08fc541 100644
--- a/target/hexagon/genptr.c
+++ b/target/hexagon/genptr.c
@@ -100,10 +100,6 @@ void gen_log_reg_write(DisasContext *ctx, int rnum, TCGv val)
gen_masked_reg_write(val, hex_gpr[rnum], reg_mask);
tcg_gen_mov_tl(get_result_gpr(ctx, rnum), val);
- if (HEX_DEBUG) {
- /* Do this so HELPER(debug_commit_end) will know */
- tcg_gen_movi_tl(hex_reg_written[rnum], 1);
- }
}
static void gen_log_reg_write_pair(DisasContext *ctx, int rnum, TCGv_i64 val)
@@ -151,9 +147,6 @@ void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val)
} else {
tcg_gen_and_tl(pred, pred, base_val);
}
- if (HEX_DEBUG) {
- tcg_gen_ori_tl(ctx->pred_written, ctx->pred_written, 1 << pnum);
- }
set_bit(pnum, ctx->pregs_written);
}
@@ -336,14 +329,14 @@ void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src)
static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index)
{
- tcg_gen_qemu_ld_tl(dest, vaddr, mem_index, MO_TEUL);
+ tcg_gen_qemu_ld_tl(dest, vaddr, mem_index, MO_LE | MO_UL);
tcg_gen_mov_tl(hex_llsc_addr, vaddr);
tcg_gen_mov_tl(hex_llsc_val, dest);
}
static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index)
{
- tcg_gen_qemu_ld_i64(dest, vaddr, mem_index, MO_TEUQ);
+ tcg_gen_qemu_ld_i64(dest, vaddr, mem_index, MO_LE | MO_UQ);
tcg_gen_mov_tl(hex_llsc_addr, vaddr);
tcg_gen_mov_i64(hex_llsc_val_i64, dest);
}
@@ -763,7 +756,7 @@ static void gen_load_frame(DisasContext *ctx, TCGv_i64 frame, TCGv EA)
{
Insn *insn = ctx->insn; /* Needed for CHECK_NOSHUF */
CHECK_NOSHUF(EA, 8);
- tcg_gen_qemu_ld_i64(frame, EA, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_qemu_ld_i64(frame, EA, ctx->mem_idx, MO_LE | MO_UQ);
}
#ifndef CONFIG_HEXAGON_IDEF_PARSER
@@ -1237,7 +1230,7 @@ static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src,
tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1));
}
for (int i = 0; i < sizeof(MMVector) / 8; i++) {
- tcg_gen_qemu_ld_i64(tmp, src, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_qemu_ld_i64(tmp, src, ctx->mem_idx, MO_LE | MO_UQ);
tcg_gen_addi_tl(src, src, 8);
tcg_gen_st_i64(tmp, tcg_env, dstoff + i * 8);
}
diff --git a/target/hexagon/helper.h b/target/hexagon/helper.h
index fa0ebaf..f8baa59 100644
--- a/target/hexagon/helper.h
+++ b/target/hexagon/helper.h
@@ -19,9 +19,6 @@
#include "helper_protos_generated.h.inc"
DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_RETURN, noreturn, env, i32)
-DEF_HELPER_1(debug_start_packet, void, env)
-DEF_HELPER_FLAGS_3(debug_check_store_width, TCG_CALL_NO_WG, void, env, int, int)
-DEF_HELPER_FLAGS_5(debug_commit_end, TCG_CALL_NO_WG, void, env, i32, int, int, int)
DEF_HELPER_2(commit_store, void, env, int)
DEF_HELPER_3(gather_store, void, env, i32, int)
DEF_HELPER_1(commit_hvx_stores, void, env)
diff --git a/target/hexagon/hex_common.py b/target/hexagon/hex_common.py
index 15ed498..758e5fd 100755
--- a/target/hexagon/hex_common.py
+++ b/target/hexagon/hex_common.py
@@ -21,6 +21,7 @@ import sys
import re
import string
import textwrap
+import argparse
behdict = {} # tag ->behavior
semdict = {} # tag -> semantics
@@ -1181,22 +1182,20 @@ def helper_args(tag, regs, imms):
return args
-def read_common_files():
- read_semantics_file(sys.argv[1])
- read_overrides_file(sys.argv[2])
- read_overrides_file(sys.argv[3])
- ## Whether or not idef-parser is enabled is
- ## determined by the number of arguments to
- ## this script:
- ##
- ## 4 args. -> not enabled,
- ## 5 args. -> idef-parser enabled.
- ##
- ## The 5:th arg. then holds a list of the successfully
- ## parsed instructions.
- is_idef_parser_enabled = len(sys.argv) > 5
- if is_idef_parser_enabled:
- read_idef_parser_enabled_file(sys.argv[4])
+def parse_common_args(desc):
+ parser = argparse.ArgumentParser(desc)
+ parser.add_argument("semantics", help="semantics file")
+ parser.add_argument("overrides", help="overrides file")
+ parser.add_argument("overrides_vec", help="vector overrides file")
+ parser.add_argument("out", help="output file")
+ parser.add_argument("--idef-parser",
+ help="file of instructions translated by idef-parser")
+ args = parser.parse_args()
+ read_semantics_file(args.semantics)
+ read_overrides_file(args.overrides)
+ read_overrides_file(args.overrides_vec)
+ if args.idef_parser:
+ read_idef_parser_enabled_file(args.idef_parser)
calculate_attribs()
init_registers()
- return is_idef_parser_enabled
+ return args
diff --git a/target/hexagon/idef-parser/README.rst b/target/hexagon/idef-parser/README.rst
index d0aa343..7199177 100644
--- a/target/hexagon/idef-parser/README.rst
+++ b/target/hexagon/idef-parser/README.rst
@@ -138,7 +138,7 @@ we obtain the pseudo code
with macros such as ``fJUMPR`` intact.
The second step is to expand macros into a form suitable for our parser.
-These macros are defined in ``idef-parser/macros.inc`` and the step is
+These macros are defined in ``idef-parser/macros.h.inc`` and the step is
carried out by the ``prepare`` script which runs the C preprocessor on
``idef_parser_input.h.inc`` to produce
``idef_parser_input.preprocessed.h.inc``.
@@ -266,7 +266,7 @@ in plain C is defined as
#define fABS(A) (((A) < 0) ? (-(A)) : (A))
and returns the absolute value of the argument ``A``. This macro is not included
-in ``idef-parser/macros.inc`` and as such is not expanded and kept as a "call"
+in ``idef-parser/macros.h.inc`` and as such is not expanded and kept as a "call"
``fABS(...)``. Reason being, that ``fABS`` is easier to match and map to
``tcg_gen_abs_<width>``, compared to the full ternary expression above. Loads of
macros in ``macros.h`` are kept unexpanded to aid in parsing, as seen in the
diff --git a/target/hexagon/idef-parser/idef-parser.y b/target/hexagon/idef-parser/idef-parser.y
index 9ffb9f9..c6f17c6 100644
--- a/target/hexagon/idef-parser/idef-parser.y
+++ b/target/hexagon/idef-parser/idef-parser.y
@@ -800,7 +800,6 @@ rvalue : FAIL
lvalue : FAIL
{
- @1.last_column = @1.last_column;
yyassert(c, &@1, false, "Encountered a FAIL token as lvalue.\n");
}
| REG
diff --git a/target/hexagon/idef-parser/macros.inc b/target/hexagon/idef-parser/macros.h.inc
index 94975d9..94975d9 100644
--- a/target/hexagon/idef-parser/macros.inc
+++ b/target/hexagon/idef-parser/macros.h.inc
diff --git a/target/hexagon/idef-parser/parser-helpers.c b/target/hexagon/idef-parser/parser-helpers.c
index a7dcd85..542af8d 100644
--- a/target/hexagon/idef-parser/parser-helpers.c
+++ b/target/hexagon/idef-parser/parser-helpers.c
@@ -1761,7 +1761,7 @@ void gen_load(Context *c, YYLTYPE *locp, HexValue *width,
if (signedness == SIGNED) {
OUT(c, locp, " | MO_SIGN");
}
- OUT(c, locp, " | MO_TE);\n");
+ OUT(c, locp, " | MO_LE);\n");
}
void gen_store(Context *c, YYLTYPE *locp, HexValue *width, HexValue *ea,
diff --git a/target/hexagon/internal.h b/target/hexagon/internal.h
index beb08cb..32e96f0 100644
--- a/target/hexagon/internal.h
+++ b/target/hexagon/internal.h
@@ -20,17 +20,6 @@
#include "qemu/log.h"
-/*
- * Change HEX_DEBUG to 1 to turn on debugging output
- */
-#define HEX_DEBUG 0
-#define HEX_DEBUG_LOG(...) \
- do { \
- if (HEX_DEBUG) { \
- qemu_log(__VA_ARGS__); \
- } \
- } while (0)
-
int hexagon_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int hexagon_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
int hexagon_hvx_gdb_read_register(CPUState *env, GByteArray *mem_buf, int n);
diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h
index ee3d4c8..9ba9be4 100644
--- a/target/hexagon/macros.h
+++ b/target/hexagon/macros.h
@@ -21,6 +21,7 @@
#include "cpu.h"
#include "hex_regs.h"
#include "reg_fields.h"
+#include "accel/tcg/getpc.h"
#define GET_FIELD(FIELD, REGIN) \
fEXTRACTU_BITS(REGIN, reg_field_info[FIELD].width, \
@@ -115,27 +116,27 @@
#define MEM_LOAD2s(DST, VA) \
do { \
CHECK_NOSHUF(VA, 2); \
- tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESW); \
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_LE | MO_SW); \
} while (0)
#define MEM_LOAD2u(DST, VA) \
do { \
CHECK_NOSHUF(VA, 2); \
- tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUW); \
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_LE | MO_UW); \
} while (0)
#define MEM_LOAD4s(DST, VA) \
do { \
CHECK_NOSHUF(VA, 4); \
- tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESL); \
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_LE | MO_SL); \
} while (0)
#define MEM_LOAD4u(DST, VA) \
do { \
CHECK_NOSHUF(VA, 4); \
- tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUL); \
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_LE | MO_UL); \
} while (0)
#define MEM_LOAD8u(DST, VA) \
do { \
CHECK_NOSHUF(VA, 8); \
- tcg_gen_qemu_ld_i64(DST, VA, ctx->mem_idx, MO_TEUQ); \
+ tcg_gen_qemu_ld_i64(DST, VA, ctx->mem_idx, MO_LE | MO_UQ); \
} while (0)
#define MEM_STORE1_FUNC(X) \
diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build
index b0b253a..bb4ebaa 100644
--- a/target/hexagon/meson.build
+++ b/target/hexagon/meson.build
@@ -284,7 +284,7 @@ if idef_parser_enabled and 'hexagon-linux-user' in target_dirs
'idef_parser_input.preprocessed.h.inc',
output: 'idef_parser_input.preprocessed.h.inc',
input: idef_parser_input_generated,
- depend_files: [idef_parser_dir / 'macros.inc'],
+ depend_files: [idef_parser_dir / 'macros.h.inc'],
command: [idef_parser_dir / 'prepare', '@INPUT@', '-I' + idef_parser_dir, '-o', '@OUTPUT@'],
)
@@ -300,7 +300,7 @@ if idef_parser_enabled and 'hexagon-linux-user' in target_dirs
arguments: ['@INPUT@', '--defines=@OUTPUT1@', '--output=@OUTPUT0@']
)
- glib_dep = dependency('glib-2.0', native: true)
+ glib_dep = dependency('glib-2.0', native: true, static: false)
idef_parser = executable(
'idef-parser',
@@ -346,7 +346,7 @@ if idef_parser_enabled and 'hexagon-linux-user' in target_dirs
# Setup input and dependencies for the next step, this depends on whether or
# not idef-parser is enabled
helper_dep = [semantics_generated, idef_generated_tcg_c, idef_generated_tcg]
- helper_in = [semantics_generated, gen_tcg_h, gen_tcg_hvx_h, idef_generated_list]
+ helper_in = [semantics_generated, gen_tcg_h, gen_tcg_hvx_h, '--idef-parser', idef_generated_list]
else
# Setup input and dependencies for the next step, this depends on whether or
# not idef-parser is enabled
diff --git a/target/hexagon/mmvec/macros.h b/target/hexagon/mmvec/macros.h
index 1ceb945..c7840fb 100644
--- a/target/hexagon/mmvec/macros.h
+++ b/target/hexagon/mmvec/macros.h
@@ -21,28 +21,30 @@
#include "qemu/host-utils.h"
#include "arch.h"
#include "mmvec/system_ext_mmvec.h"
+#include "accel/tcg/getpc.h"
+#include "accel/tcg/probe.h"
#ifndef QEMU_GENERATE
-#define VdV (*(MMVector *)(VdV_void))
-#define VsV (*(MMVector *)(VsV_void))
-#define VuV (*(MMVector *)(VuV_void))
-#define VvV (*(MMVector *)(VvV_void))
-#define VwV (*(MMVector *)(VwV_void))
-#define VxV (*(MMVector *)(VxV_void))
-#define VyV (*(MMVector *)(VyV_void))
+#define VdV (*(MMVector *restrict)(VdV_void))
+#define VsV (*(MMVector *restrict)(VsV_void))
+#define VuV (*(MMVector *restrict)(VuV_void))
+#define VvV (*(MMVector *restrict)(VvV_void))
+#define VwV (*(MMVector *restrict)(VwV_void))
+#define VxV (*(MMVector *restrict)(VxV_void))
+#define VyV (*(MMVector *restrict)(VyV_void))
-#define VddV (*(MMVectorPair *)(VddV_void))
-#define VuuV (*(MMVectorPair *)(VuuV_void))
-#define VvvV (*(MMVectorPair *)(VvvV_void))
-#define VxxV (*(MMVectorPair *)(VxxV_void))
+#define VddV (*(MMVectorPair *restrict)(VddV_void))
+#define VuuV (*(MMVectorPair *restrict)(VuuV_void))
+#define VvvV (*(MMVectorPair *restrict)(VvvV_void))
+#define VxxV (*(MMVectorPair *restrict)(VxxV_void))
-#define QeV (*(MMQReg *)(QeV_void))
-#define QdV (*(MMQReg *)(QdV_void))
-#define QsV (*(MMQReg *)(QsV_void))
-#define QtV (*(MMQReg *)(QtV_void))
-#define QuV (*(MMQReg *)(QuV_void))
-#define QvV (*(MMQReg *)(QvV_void))
-#define QxV (*(MMQReg *)(QxV_void))
+#define QeV (*(MMQReg *restrict)(QeV_void))
+#define QdV (*(MMQReg *restrict)(QdV_void))
+#define QsV (*(MMQReg *restrict)(QsV_void))
+#define QtV (*(MMQReg *restrict)(QtV_void))
+#define QuV (*(MMQReg *restrict)(QuV_void))
+#define QvV (*(MMQReg *restrict)(QvV_void))
+#define QxV (*(MMQReg *restrict)(QxV_void))
#endif
#define LOG_VTCM_BYTE(VA, MASK, VAL, IDX) \
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
index ae5a605..444799d 100644
--- a/target/hexagon/op_helper.c
+++ b/target/hexagon/op_helper.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2019-2024 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,8 +17,8 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
#include "cpu.h"
@@ -54,9 +54,6 @@ G_NORETURN void HELPER(raise_exception)(CPUHexagonState *env, uint32_t excp)
void log_store32(CPUHexagonState *env, target_ulong addr,
target_ulong val, int width, int slot)
{
- HEX_DEBUG_LOG("log_store%d(0x" TARGET_FMT_lx
- ", %" PRId32 " [0x08%" PRIx32 "])\n",
- width, addr, val, val);
env->mem_log_stores[slot].va = addr;
env->mem_log_stores[slot].width = width;
env->mem_log_stores[slot].data32 = val;
@@ -65,35 +62,11 @@ void log_store32(CPUHexagonState *env, target_ulong addr,
void log_store64(CPUHexagonState *env, target_ulong addr,
int64_t val, int width, int slot)
{
- HEX_DEBUG_LOG("log_store%d(0x" TARGET_FMT_lx
- ", %" PRId64 " [0x016%" PRIx64 "])\n",
- width, addr, val, val);
env->mem_log_stores[slot].va = addr;
env->mem_log_stores[slot].width = width;
env->mem_log_stores[slot].data64 = val;
}
-/* Handy place to set a breakpoint */
-void HELPER(debug_start_packet)(CPUHexagonState *env)
-{
- HEX_DEBUG_LOG("Start packet: pc = 0x" TARGET_FMT_lx "\n",
- env->gpr[HEX_REG_PC]);
-
- for (int i = 0; i < TOTAL_PER_THREAD_REGS; i++) {
- env->reg_written[i] = 0;
- }
-}
-
-/* Checks for bookkeeping errors between disassembly context and runtime */
-void HELPER(debug_check_store_width)(CPUHexagonState *env, int slot, int check)
-{
- if (env->mem_log_stores[slot].width != check) {
- HEX_DEBUG_LOG("ERROR: %d != %d\n",
- env->mem_log_stores[slot].width, check);
- g_assert_not_reached();
- }
-}
-
static void commit_store(CPUHexagonState *env, int slot_num, uintptr_t ra)
{
uint8_t width = env->mem_log_stores[slot_num].width;
@@ -173,91 +146,6 @@ void HELPER(commit_hvx_stores)(CPUHexagonState *env)
}
}
-static void print_store(CPUHexagonState *env, int slot)
-{
- if (!(env->slot_cancelled & (1 << slot))) {
- uint8_t width = env->mem_log_stores[slot].width;
- if (width == 1) {
- uint32_t data = env->mem_log_stores[slot].data32 & 0xff;
- HEX_DEBUG_LOG("\tmemb[0x" TARGET_FMT_lx "] = %" PRId32
- " (0x%02" PRIx32 ")\n",
- env->mem_log_stores[slot].va, data, data);
- } else if (width == 2) {
- uint32_t data = env->mem_log_stores[slot].data32 & 0xffff;
- HEX_DEBUG_LOG("\tmemh[0x" TARGET_FMT_lx "] = %" PRId32
- " (0x%04" PRIx32 ")\n",
- env->mem_log_stores[slot].va, data, data);
- } else if (width == 4) {
- uint32_t data = env->mem_log_stores[slot].data32;
- HEX_DEBUG_LOG("\tmemw[0x" TARGET_FMT_lx "] = %" PRId32
- " (0x%08" PRIx32 ")\n",
- env->mem_log_stores[slot].va, data, data);
- } else if (width == 8) {
- HEX_DEBUG_LOG("\tmemd[0x" TARGET_FMT_lx "] = %" PRId64
- " (0x%016" PRIx64 ")\n",
- env->mem_log_stores[slot].va,
- env->mem_log_stores[slot].data64,
- env->mem_log_stores[slot].data64);
- } else {
- HEX_DEBUG_LOG("\tBad store width %d\n", width);
- g_assert_not_reached();
- }
- }
-}
-
-/* This function is a handy place to set a breakpoint */
-void HELPER(debug_commit_end)(CPUHexagonState *env, uint32_t this_PC,
- int pred_written, int has_st0, int has_st1)
-{
- bool reg_printed = false;
- bool pred_printed = false;
- int i;
-
- HEX_DEBUG_LOG("Packet committed: pc = 0x" TARGET_FMT_lx "\n", this_PC);
- HEX_DEBUG_LOG("slot_cancelled = %d\n", env->slot_cancelled);
-
- for (i = 0; i < TOTAL_PER_THREAD_REGS; i++) {
- if (env->reg_written[i]) {
- if (!reg_printed) {
- HEX_DEBUG_LOG("Regs written\n");
- reg_printed = true;
- }
- HEX_DEBUG_LOG("\tr%d = " TARGET_FMT_ld " (0x" TARGET_FMT_lx ")\n",
- i, env->gpr[i], env->gpr[i]);
- }
- }
-
- for (i = 0; i < NUM_PREGS; i++) {
- if (pred_written & (1 << i)) {
- if (!pred_printed) {
- HEX_DEBUG_LOG("Predicates written\n");
- pred_printed = true;
- }
- HEX_DEBUG_LOG("\tp%d = 0x" TARGET_FMT_lx "\n",
- i, env->pred[i]);
- }
- }
-
- if (has_st0 || has_st1) {
- HEX_DEBUG_LOG("Stores\n");
- if (has_st0) {
- print_store(env, 0);
- }
- if (has_st1) {
- print_store(env, 1);
- }
- }
-
- HEX_DEBUG_LOG("Next PC = " TARGET_FMT_lx "\n", env->gpr[HEX_REG_PC]);
- HEX_DEBUG_LOG("Exec counters: pkt = " TARGET_FMT_lx
- ", insn = " TARGET_FMT_lx
- ", hvx = " TARGET_FMT_lx "\n",
- env->gpr[HEX_REG_QEMU_PKT_CNT],
- env->gpr[HEX_REG_QEMU_INSN_CNT],
- env->gpr[HEX_REG_QEMU_HVX_CNT]);
-
-}
-
int32_t HELPER(fcircadd)(int32_t RxV, int32_t offset, int32_t M, int32_t CS)
{
uint32_t K_const = extract32(M, 24, 4);
@@ -683,7 +571,7 @@ uint32_t HELPER(conv_sf2uw)(CPUHexagonState *env, float32 RsV)
uint32_t RdV;
arch_fpop_start(env);
/* Hexagon checks the sign before rounding */
- if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) {
+ if (float32_is_neg(RsV) && !float32_is_any_nan(RsV) && !float32_is_zero(RsV)) {
float_raise(float_flag_invalid, &env->fp_status);
RdV = 0;
} else {
@@ -713,7 +601,7 @@ uint64_t HELPER(conv_sf2ud)(CPUHexagonState *env, float32 RsV)
uint64_t RddV;
arch_fpop_start(env);
/* Hexagon checks the sign before rounding */
- if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) {
+ if (float32_is_neg(RsV) && !float32_is_any_nan(RsV) && !float32_is_zero(RsV)) {
float_raise(float_flag_invalid, &env->fp_status);
RddV = 0;
} else {
@@ -743,7 +631,7 @@ uint32_t HELPER(conv_df2uw)(CPUHexagonState *env, float64 RssV)
uint32_t RdV;
arch_fpop_start(env);
/* Hexagon checks the sign before rounding */
- if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) {
+ if (float64_is_neg(RssV) && !float64_is_any_nan(RssV) && !float64_is_zero(RssV)) {
float_raise(float_flag_invalid, &env->fp_status);
RdV = 0;
} else {
@@ -773,7 +661,7 @@ uint64_t HELPER(conv_df2ud)(CPUHexagonState *env, float64 RssV)
uint64_t RddV;
arch_fpop_start(env);
/* Hexagon checks the sign before rounding */
- if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) {
+ if (float64_is_neg(RssV) && !float64_is_any_nan(RssV) && !float64_is_zero(RssV)) {
float_raise(float_flag_invalid, &env->fp_status);
RddV = 0;
} else {
@@ -803,7 +691,7 @@ uint32_t HELPER(conv_sf2uw_chop)(CPUHexagonState *env, float32 RsV)
uint32_t RdV;
arch_fpop_start(env);
/* Hexagon checks the sign before rounding */
- if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) {
+ if (float32_is_neg(RsV) && !float32_is_any_nan(RsV) && !float32_is_zero(RsV)) {
float_raise(float_flag_invalid, &env->fp_status);
RdV = 0;
} else {
@@ -833,7 +721,7 @@ uint64_t HELPER(conv_sf2ud_chop)(CPUHexagonState *env, float32 RsV)
uint64_t RddV;
arch_fpop_start(env);
/* Hexagon checks the sign before rounding */
- if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) {
+ if (float32_is_neg(RsV) && !float32_is_any_nan(RsV) && !float32_is_zero(RsV)) {
float_raise(float_flag_invalid, &env->fp_status);
RddV = 0;
} else {
@@ -863,7 +751,7 @@ uint32_t HELPER(conv_df2uw_chop)(CPUHexagonState *env, float64 RssV)
uint32_t RdV;
arch_fpop_start(env);
/* Hexagon checks the sign before rounding */
- if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) {
+ if (float64_is_neg(RssV) && !float64_is_any_nan(RssV) && !float64_is_zero(RssV)) {
float_raise(float_flag_invalid, &env->fp_status);
RdV = 0;
} else {
@@ -893,7 +781,7 @@ uint64_t HELPER(conv_df2ud_chop)(CPUHexagonState *env, float64 RssV)
uint64_t RddV;
arch_fpop_start(env);
/* Hexagon checks the sign before rounding */
- if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) {
+ if (float64_is_neg(RssV) && !float64_is_any_nan(RssV) && !float64_is_zero(RssV)) {
float_raise(float_flag_invalid, &env->fp_status);
RddV = 0;
} else {
@@ -1157,7 +1045,7 @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
{
float32 RdV;
arch_fpop_start(env);
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
arch_fpop_end(env);
return RdV;
}
@@ -1166,41 +1054,18 @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
float32 RsV, float32 RtV)
{
arch_fpop_start(env);
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
arch_fpop_end(env);
return RxV;
}
-static bool is_zero_prod(float32 a, float32 b)
-{
- return ((float32_is_zero(a) && is_finite(b)) ||
- (float32_is_zero(b) && is_finite(a)));
-}
-
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
-{
- float32 ret = dst;
- if (float32_is_any_nan(x)) {
- if (extract32(x, 22, 1) == 0) {
- float_raise(float_flag_invalid, fp_status);
- }
- ret = make_float32(0xffffffff); /* nan */
- }
- return ret;
-}
-
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
float32 RsV, float32 RtV, float32 PuV)
{
- size4s_t tmp;
arch_fpop_start(env);
- RxV = check_nan(RxV, RxV, &env->fp_status);
- RxV = check_nan(RxV, RsV, &env->fp_status);
- RxV = check_nan(RxV, RtV, &env->fp_status);
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
- RxV = tmp;
- }
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
+ float_muladd_suppress_add_product_zero,
+ &env->fp_status);
arch_fpop_end(env);
return RxV;
}
@@ -1208,86 +1073,50 @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
float32 RsV, float32 RtV)
{
- float32 neg_RsV;
arch_fpop_start(env);
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
+ &env->fp_status);
arch_fpop_end(env);
return RxV;
}
-static bool is_inf_prod(int32_t a, int32_t b)
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
+ float32 RsV, float32 RtV, int negate)
{
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
-}
-
-float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
- float32 RsV, float32 RtV)
-{
- bool infinp;
- bool infminusinf;
- float32 tmp;
+ int flags;
arch_fpop_start(env);
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
- infminusinf = float32_is_infinity(RxV) &&
- is_inf_prod(RsV, RtV) &&
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
- infinp = float32_is_infinity(RxV) ||
- float32_is_infinity(RtV) ||
- float32_is_infinity(RsV);
- RxV = check_nan(RxV, RxV, &env->fp_status);
- RxV = check_nan(RxV, RsV, &env->fp_status);
- RxV = check_nan(RxV, RtV, &env->fp_status);
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
- RxV = tmp;
- }
- set_float_exception_flags(0, &env->fp_status);
- if (float32_is_infinity(RxV) && !infinp) {
- RxV = RxV - 1;
- }
- if (infminusinf) {
- RxV = 0;
+
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
+ RxV = float32_muladd(RsV, RtV, RxV,
+ negate | float_muladd_suppress_add_product_zero,
+ &env->fp_status);
+
+ flags = get_float_exception_flags(&env->fp_status);
+ if (flags) {
+ /* Flags are suppressed by this instruction. */
+ set_float_exception_flags(0, &env->fp_status);
+
+ /* Return 0 for Inf - Inf. */
+ if (flags & float_flag_invalid_isi) {
+ RxV = 0;
+ }
}
+
arch_fpop_end(env);
return RxV;
}
-float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
+float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
float32 RsV, float32 RtV)
{
- bool infinp;
- bool infminusinf;
- float32 tmp;
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
+}
- arch_fpop_start(env);
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
- infminusinf = float32_is_infinity(RxV) &&
- is_inf_prod(RsV, RtV) &&
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
- infinp = float32_is_infinity(RxV) ||
- float32_is_infinity(RtV) ||
- float32_is_infinity(RsV);
- RxV = check_nan(RxV, RxV, &env->fp_status);
- RxV = check_nan(RxV, RsV, &env->fp_status);
- RxV = check_nan(RxV, RtV, &env->fp_status);
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
- RxV = tmp;
- }
- set_float_exception_flags(0, &env->fp_status);
- if (float32_is_infinity(RxV) && !infinp) {
- RxV = RxV - 1;
- }
- if (infminusinf) {
- RxV = 0;
- }
- arch_fpop_end(env);
- return RxV;
+float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
+ float32 RsV, float32 RtV)
+{
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
}
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
index 4b1bee3..02fd40c 100644
--- a/target/hexagon/translate.c
+++ b/target/hexagon/translate.c
@@ -23,7 +23,7 @@
#include "exec/helper-gen.h"
#include "exec/helper-proto.h"
#include "exec/translation-block.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "exec/log.h"
#include "internal.h"
#include "attribs.h"
@@ -50,7 +50,6 @@ TCGv hex_gpr[TOTAL_PER_THREAD_REGS];
TCGv hex_pred[NUM_PREGS];
TCGv hex_slot_cancelled;
TCGv hex_new_value_usr;
-TCGv hex_reg_written[TOTAL_PER_THREAD_REGS];
TCGv hex_store_addr[STORES_MAX];
TCGv hex_store_width[STORES_MAX];
TCGv hex_store_val32[STORES_MAX];
@@ -195,21 +194,6 @@ static void gen_exception_end_tb(DisasContext *ctx, int excp)
}
-#define PACKET_BUFFER_LEN 1028
-static void print_pkt(Packet *pkt)
-{
- GString *buf = g_string_sized_new(PACKET_BUFFER_LEN);
- snprint_a_pkt_debug(buf, pkt);
- HEX_DEBUG_LOG("%s", buf->str);
- g_string_free(buf, true);
-}
-#define HEX_DEBUG_PRINT_PKT(pkt) \
- do { \
- if (HEX_DEBUG) { \
- print_pkt(pkt); \
- } \
- } while (0)
-
static int read_packet_words(CPUHexagonState *env, DisasContext *ctx,
uint32_t words[])
{
@@ -235,14 +219,6 @@ static int read_packet_words(CPUHexagonState *env, DisasContext *ctx,
g_assert(ctx->base.num_insns == 1);
}
- HEX_DEBUG_LOG("decode_packet: pc = 0x%" VADDR_PRIx "\n",
- ctx->base.pc_next);
- HEX_DEBUG_LOG(" words = { ");
- for (int i = 0; i < nwords; i++) {
- HEX_DEBUG_LOG("0x%x, ", words[i]);
- }
- HEX_DEBUG_LOG("}\n");
-
return nwords;
}
@@ -465,11 +441,6 @@ static void gen_start_packet(DisasContext *ctx)
*/
bitmap_zero(ctx->pregs_written, NUM_PREGS);
- if (HEX_DEBUG) {
- /* Handy place to set a breakpoint before the packet executes */
- gen_helper_debug_start_packet(tcg_env);
- }
-
/* Initialize the runtime state for packet semantics */
if (need_slot_cancelled(pkt)) {
tcg_gen_movi_tl(hex_slot_cancelled, 0);
@@ -484,10 +455,6 @@ static void gen_start_packet(DisasContext *ctx)
tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], next_PC);
}
}
- if (HEX_DEBUG) {
- ctx->pred_written = tcg_temp_new();
- tcg_gen_movi_tl(ctx->pred_written, 0);
- }
/* Preload the predicated registers into get_result_gpr(ctx, i) */
if (ctx->need_commit &&
@@ -591,7 +558,7 @@ static void gen_insn(DisasContext *ctx)
ctx->insn->generate(ctx);
mark_store_width(ctx);
} else {
- gen_exception_end_tb(ctx, HEX_EXCP_INVALID_OPCODE);
+ gen_exception_end_tb(ctx, HEX_CAUSE_INVALID_OPCODE);
}
}
@@ -635,15 +602,6 @@ static void gen_pred_writes(DisasContext *ctx)
}
}
-static void gen_check_store_width(DisasContext *ctx, int slot_num)
-{
- if (HEX_DEBUG) {
- TCGv slot = tcg_constant_tl(slot_num);
- TCGv check = tcg_constant_tl(ctx->store_width[slot_num]);
- gen_helper_debug_check_store_width(tcg_env, slot, check);
- }
-}
-
static bool slot_is_predicated(Packet *pkt, int slot_num)
{
for (int i = 0; i < pkt->num_insns; i++) {
@@ -691,28 +649,24 @@ void process_store(DisasContext *ctx, int slot_num)
*/
switch (ctx->store_width[slot_num]) {
case 1:
- gen_check_store_width(ctx, slot_num);
tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
hex_store_addr[slot_num],
ctx->mem_idx, MO_UB);
break;
case 2:
- gen_check_store_width(ctx, slot_num);
tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
hex_store_addr[slot_num],
- ctx->mem_idx, MO_TEUW);
+ ctx->mem_idx, MO_LE | MO_UW);
break;
case 4:
- gen_check_store_width(ctx, slot_num);
tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
hex_store_addr[slot_num],
- ctx->mem_idx, MO_TEUL);
+ ctx->mem_idx, MO_LE | MO_UL);
break;
case 8:
- gen_check_store_width(ctx, slot_num);
tcg_gen_qemu_st_i64(hex_store_val64[slot_num],
hex_store_addr[slot_num],
- ctx->mem_idx, MO_TEUQ);
+ ctx->mem_idx, MO_LE | MO_UQ);
break;
default:
{
@@ -937,16 +891,6 @@ static void gen_commit_packet(DisasContext *ctx)
gen_commit_hvx(ctx);
}
update_exec_counters(ctx);
- if (HEX_DEBUG) {
- TCGv has_st0 =
- tcg_constant_tl(pkt->pkt_has_store_s0 && !pkt->pkt_has_dczeroa);
- TCGv has_st1 =
- tcg_constant_tl(pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa);
-
- /* Handy place to set a breakpoint at the end of execution */
- gen_helper_debug_commit_end(tcg_env, tcg_constant_tl(ctx->pkt->pc),
- ctx->pred_written, has_st0, has_st1);
- }
if (pkt->vhist_insn != NULL) {
ctx->pre_commit = false;
@@ -968,14 +912,13 @@ static void decode_and_translate_packet(CPUHexagonState *env, DisasContext *ctx)
nwords = read_packet_words(env, ctx, words);
if (!nwords) {
- gen_exception_end_tb(ctx, HEX_EXCP_INVALID_PACKET);
+ gen_exception_end_tb(ctx, HEX_CAUSE_INVALID_PACKET);
return;
}
ctx->pkt = &pkt;
if (decode_packet(ctx, nwords, words, &pkt, false) > 0) {
pkt.pc = ctx->base.pc_next;
- HEX_DEBUG_PRINT_PKT(&pkt);
gen_start_packet(ctx);
for (i = 0; i < pkt.num_insns; i++) {
ctx->insn = &pkt.insn[i];
@@ -984,7 +927,7 @@ static void decode_and_translate_packet(CPUHexagonState *env, DisasContext *ctx)
gen_commit_packet(ctx);
ctx->base.pc_next += pkt.encod_pkt_size_in_bytes;
} else {
- gen_exception_end_tb(ctx, HEX_EXCP_INVALID_PACKET);
+ gen_exception_end_tb(ctx, HEX_CAUSE_INVALID_PACKET);
}
}
@@ -1083,8 +1026,8 @@ static const TranslatorOps hexagon_tr_ops = {
.tb_stop = hexagon_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext ctx;
@@ -1093,7 +1036,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
}
#define NAME_LEN 64
-static char reg_written_names[TOTAL_PER_THREAD_REGS][NAME_LEN];
static char store_addr_names[STORES_MAX][NAME_LEN];
static char store_width_names[STORES_MAX][NAME_LEN];
static char store_val32_names[STORES_MAX][NAME_LEN];
@@ -1112,14 +1054,6 @@ void hexagon_translate_init(void)
hex_gpr[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, gpr[i]),
hexagon_regnames[i]);
-
- if (HEX_DEBUG) {
- snprintf(reg_written_names[i], NAME_LEN, "reg_written_%s",
- hexagon_regnames[i]);
- hex_reg_written[i] = tcg_global_mem_new(tcg_env,
- offsetof(CPUHexagonState, reg_written[i]),
- reg_written_names[i]);
- }
}
hex_new_value_usr = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, new_value_usr), "new_value_usr");
diff --git a/target/hexagon/translate.h b/target/hexagon/translate.h
index 00cc2bc..d251e22 100644
--- a/target/hexagon/translate.h
+++ b/target/hexagon/translate.h
@@ -73,7 +73,6 @@ typedef struct DisasContext {
bool has_hvx_overlap;
TCGv new_value[TOTAL_PER_THREAD_REGS];
TCGv new_pred_value[NUM_PREGS];
- TCGv pred_written;
TCGv branch_taken;
TCGv dczero_addr;
} DisasContext;
@@ -271,7 +270,6 @@ extern TCGv hex_gpr[TOTAL_PER_THREAD_REGS];
extern TCGv hex_pred[NUM_PREGS];
extern TCGv hex_slot_cancelled;
extern TCGv hex_new_value_usr;
-extern TCGv hex_reg_written[TOTAL_PER_THREAD_REGS];
extern TCGv hex_store_addr[STORES_MAX];
extern TCGv hex_store_width[STORES_MAX];
extern TCGv hex_store_val32[STORES_MAX];
diff --git a/target/hppa/cpu-param.h b/target/hppa/cpu-param.h
index 473d489..9bf7ac7 100644
--- a/target/hppa/cpu-param.h
+++ b/target/hppa/cpu-param.h
@@ -2,14 +2,12 @@
* PA-RISC cpu parameters for qemu.
*
* Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#ifndef HPPA_CPU_PARAM_H
#define HPPA_CPU_PARAM_H
-#define TARGET_LONG_BITS 64
-
#if defined(CONFIG_USER_ONLY) && defined(TARGET_ABI32)
# define TARGET_PHYS_ADDR_SPACE_BITS 32
# define TARGET_VIRT_ADDR_SPACE_BITS 32
@@ -21,12 +19,6 @@
#define TARGET_PAGE_BITS 12
-/* PA-RISC 1.x processors have a strong memory model. */
-/*
- * ??? While we do not yet implement PA-RISC 2.0, those processors have
- * a weak memory model, but with TLB bits that force ordering on a per-page
- * basis. It's probably easier to fall back to a strong memory model.
- */
-#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
+#define TARGET_INSN_START_EXTRA_WORDS 2
#endif
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index 7cf2e2f..2477772 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -24,9 +24,12 @@
#include "qemu/timer.h"
#include "cpu.h"
#include "qemu/module.h"
-#include "exec/exec-all.h"
+#include "exec/translation-block.h"
+#include "exec/target_page.h"
#include "fpu/softfloat.h"
#include "tcg/tcg.h"
+#include "hw/hppa/hppa_hardware.h"
+#include "accel/tcg/cpu-ops.h"
static void hppa_cpu_set_pc(CPUState *cs, vaddr value)
{
@@ -43,15 +46,17 @@ static vaddr hppa_cpu_get_pc(CPUState *cs)
{
CPUHPPAState *env = cpu_env(cs);
- return hppa_form_gva_psw(env->psw, (env->psw & PSW_C ? env->iasq_f : 0),
- env->iaoq_f & -4);
+ return hppa_form_gva_mask(env->gva_offset_mask,
+ (env->psw & PSW_C ? env->iasq_f : 0),
+ env->iaoq_f & -4);
}
-void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
- uint64_t *pcsbase, uint32_t *pflags)
+static TCGTBCPUState hppa_get_tb_cpu_state(CPUState *cs)
{
+ CPUHPPAState *env = cpu_env(cs);
uint32_t flags = 0;
uint64_t cs_base = 0;
+ vaddr pc;
/*
* TB lookup assumes that PC contains the complete virtual address.
@@ -59,7 +64,7 @@ void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
* incomplete virtual address. This also means that we must separate
* out current cpu privilege from the low bits of IAOQ_F.
*/
- *pc = hppa_cpu_get_pc(env_cpu(env));
+ pc = hppa_cpu_get_pc(env_cpu(env));
flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
/*
@@ -89,10 +94,13 @@ void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
& (env->sr[4] == env->sr[7])) {
flags |= TB_FLAG_SR_SAME;
}
+ if ((env->psw & PSW_W) &&
+ (env->dr[2] & HPPA64_DIAG_SPHASH_ENABLE)) {
+ flags |= TB_FLAG_SPHASH;
+ }
#endif
- *pcsbase = cs_base;
- *pflags = flags;
+ return (TCGTBCPUState){ .pc = pc, .flags = flags, .cs_base = cs_base };
}
static void hppa_cpu_synchronize_from_tb(CPUState *cs,
@@ -124,10 +132,12 @@ static void hppa_restore_state_to_opc(CPUState *cs,
env->psw_n = 0;
}
+#ifndef CONFIG_USER_ONLY
static bool hppa_cpu_has_work(CPUState *cs)
{
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
}
+#endif /* !CONFIG_USER_ONLY */
static int hppa_cpu_mmu_index(CPUState *cs, bool ifetch)
{
@@ -143,6 +153,7 @@ static int hppa_cpu_mmu_index(CPUState *cs, bool ifetch)
static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info)
{
info->mach = bfd_mach_hppa20;
+ info->endian = BFD_ENDIAN_BIG;
info->print_insn = print_insn_hppa;
}
@@ -194,13 +205,33 @@ static void hppa_cpu_realizefn(DeviceState *dev, Error **errp)
static void hppa_cpu_initfn(Object *obj)
{
+ CPUHPPAState *env = cpu_env(CPU(obj));
+
+ env->is_pa20 = !!object_dynamic_cast(obj, TYPE_HPPA64_CPU);
+}
+
+static void hppa_cpu_reset_hold(Object *obj, ResetType type)
+{
+ HPPACPUClass *scc = HPPA_CPU_GET_CLASS(obj);
CPUState *cs = CPU(obj);
HPPACPU *cpu = HPPA_CPU(obj);
CPUHPPAState *env = &cpu->env;
+ if (scc->parent_phases.hold) {
+ scc->parent_phases.hold(obj, type);
+ }
cs->exception_index = -1;
+ cs->halted = 0;
+ cpu_set_pc(cs, 0xf0000004);
+
+ memset(env, 0, offsetof(CPUHPPAState, end_reset_fields));
+
cpu_hppa_loaded_fr0(env);
- cpu_hppa_put_psw(env, PSW_W);
+
+ /* 64-bit machines start with space-register hashing enabled in %dr2 */
+ env->dr[2] = hppa_is_pa20(env) ? HPPA64_DIAG_SPHASH_ENABLE : 0;
+
+ cpu_hppa_put_psw(env, PSW_M);
}
static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
@@ -214,39 +245,54 @@ static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps hppa_sysemu_ops = {
+ .has_work = hppa_cpu_has_work,
.get_phys_page_debug = hppa_cpu_get_phys_page_debug,
};
#endif
-#include "hw/core/tcg-cpu-ops.h"
-
static const TCGCPUOps hppa_tcg_ops = {
+ /* PA-RISC 1.x processors have a strong memory model. */
+ /*
+ * ??? While we do not yet implement PA-RISC 2.0, those processors have
+ * a weak memory model, but with TLB bits that force ordering on a per-page
+ * basis. It's probably easier to fall back to a strong memory model.
+ */
+ .guest_default_memory_order = TCG_MO_ALL,
+ .mttcg_supported = true,
+
.initialize = hppa_translate_init,
+ .translate_code = hppa_translate_code,
+ .get_tb_cpu_state = hppa_get_tb_cpu_state,
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
.restore_state_to_opc = hppa_restore_state_to_opc,
+ .mmu_index = hppa_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
- .tlb_fill = hppa_cpu_tlb_fill,
+ .tlb_fill_align = hppa_cpu_tlb_fill_align,
+ .pointer_wrap = cpu_pointer_wrap_notreached,
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
.cpu_exec_halt = hppa_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = hppa_cpu_do_interrupt,
.do_unaligned_access = hppa_cpu_do_unaligned_access,
.do_transaction_failed = hppa_cpu_do_transaction_failed,
#endif /* !CONFIG_USER_ONLY */
};
-static void hppa_cpu_class_init(ObjectClass *oc, void *data)
+static void hppa_cpu_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
HPPACPUClass *acc = HPPA_CPU_CLASS(oc);
+ ResettableClass *rc = RESETTABLE_CLASS(oc);
device_class_set_parent_realize(dc, hppa_cpu_realizefn,
&acc->parent_realize);
+ resettable_class_set_parent_phases(rc, NULL, hppa_cpu_reset_hold, NULL,
+ &acc->parent_phases);
+
cc->class_by_name = hppa_cpu_class_by_name;
- cc->has_work = hppa_cpu_has_work;
- cc->mmu_index = hppa_cpu_mmu_index;
cc->dump_state = hppa_cpu_dump_state;
cc->set_pc = hppa_cpu_set_pc;
cc->get_pc = hppa_cpu_get_pc;
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index 2bcb3b6..11d59d1 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -21,7 +21,10 @@
#define HPPA_CPU_H
#include "cpu-qom.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
+#include "system/memory.h"
#include "qemu/cpu-float.h"
#include "qemu/interval-tree.h"
#include "hw/registerfields.h"
@@ -45,8 +48,6 @@
#define PRIV_KERNEL 0
#define PRIV_USER 3
-#define TARGET_INSN_START_EXTRA_WORDS 2
-
/* No need to flush MMU_ABS*_IDX */
#define HPPA_MMU_FLUSH_MASK \
(1 << MMU_KERNEL_IDX | 1 << MMU_KERNEL_P_IDX | \
@@ -211,7 +212,7 @@ typedef struct CPUArchState {
uint32_t psw; /* All psw bits except the following: */
uint32_t psw_xb; /* X and B, in their normal positions */
target_ulong psw_n; /* boolean */
- target_long psw_v; /* in most significant bit */
+ target_long psw_v; /* in bit 31 */
/* Splitting the carry-borrow field into the MSB and "the rest", allows
* for "the rest" to be deleted when it is unused, but the MSB is in use.
@@ -223,6 +224,7 @@ typedef struct CPUArchState {
target_ulong psw_cb; /* in least significant bit of next nibble */
target_ulong psw_cb_msb; /* boolean */
+ uint64_t gva_offset_mask; /* cached address mask based on PSW and %dr2 */
uint64_t iasq_f;
uint64_t iasq_b;
@@ -232,6 +234,7 @@ typedef struct CPUArchState {
target_ulong cr[32]; /* control registers */
target_ulong cr_back[2]; /* back of cr17/cr18 */
target_ulong shadow[7]; /* shadow registers */
+ target_ulong dr[32]; /* diagnose registers */
/*
* During unwind of a memory insn, the base register of the address.
@@ -263,6 +266,15 @@ typedef struct CPUArchState {
IntervalTreeRoot tlb_root;
HPPATLBEntry tlb[HPPA_TLB_ENTRIES];
+
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
+ bool is_pa20;
+
+ target_ulong kernel_entry; /* Linux kernel was loaded here */
+ target_ulong cmdline_or_bootorder;
+ target_ulong initrd_base, initrd_end;
} CPUHPPAState;
/**
@@ -281,7 +293,7 @@ struct ArchCPU {
/**
* HPPACPUClass:
* @parent_realize: The parent class' realize handler.
- * @parent_reset: The parent class' reset handler.
+ * @parent_phases: The parent class' reset phase handlers.
*
* An HPPA CPU model.
*/
@@ -289,14 +301,12 @@ struct HPPACPUClass {
CPUClass parent_class;
DeviceRealize parent_realize;
- DeviceReset parent_reset;
+ ResettablePhases parent_phases;
};
-#include "exec/cpu-all.h"
-
-static inline bool hppa_is_pa20(CPUHPPAState *env)
+static inline bool hppa_is_pa20(const CPUHPPAState *env)
{
- return object_dynamic_cast(OBJECT(env_cpu(env)), TYPE_HPPA64_CPU) != NULL;
+ return env->is_pa20;
}
static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
@@ -305,30 +315,25 @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
}
void hppa_translate_init(void);
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
-static inline uint64_t gva_offset_mask(target_ulong psw)
-{
- return (psw & PSW_W
- ? MAKE_64BIT_MASK(0, 62)
- : MAKE_64BIT_MASK(0, 32));
-}
-
-static inline target_ulong hppa_form_gva_psw(target_ulong psw, uint64_t spc,
- target_ulong off)
+static inline target_ulong hppa_form_gva_mask(uint64_t gva_offset_mask,
+ uint64_t spc, target_ulong off)
{
#ifdef CONFIG_USER_ONLY
- return off;
+ return off & gva_offset_mask;
#else
- return spc | (off & gva_offset_mask(psw));
+ return spc | (off & gva_offset_mask);
#endif
}
static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc,
target_ulong off)
{
- return hppa_form_gva_psw(env->psw, spc, off);
+ return hppa_form_gva_mask(env->gva_offset_mask, spc, off);
}
hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr);
@@ -342,14 +347,13 @@ hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr);
#define TB_FLAG_SR_SAME PSW_I
#define TB_FLAG_PRIV_SHIFT 8
#define TB_FLAG_UNALIGN 0x400
+#define TB_FLAG_SPHASH 0x800
#define CS_BASE_DIFFPAGE (1 << 12)
#define CS_BASE_DIFFSPACE (1 << 13)
-void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags);
-
target_ulong cpu_hppa_get_psw(CPUHPPAState *env);
void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong);
+void update_gva_offset_mask(CPUHPPAState *env);
void cpu_hppa_loaded_fr0(CPUHPPAState *env);
#ifdef CONFIG_USER_ONLY
@@ -365,13 +369,13 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
void hppa_ptlbe(CPUHPPAState *env);
hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled);
-bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
+bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ MemOp memop, int size, bool probe, uintptr_t ra);
void hppa_cpu_do_interrupt(CPUState *cpu);
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
- int type, hwaddr *pphys, int *pprot);
+ int type, MemOp mop, hwaddr *pphys, int *pprot);
void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
vaddr addr, unsigned size,
MMUAccessType access_type,
@@ -383,6 +387,4 @@ void hppa_cpu_alarm_timer(void *);
#endif
G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra);
-#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
-
#endif /* HPPA_CPU_H */
diff --git a/target/hppa/fpu_helper.c b/target/hppa/fpu_helper.c
index deaed2b..4535320 100644
--- a/target/hppa/fpu_helper.c
+++ b/target/hppa/fpu_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
@@ -49,6 +48,36 @@ void HELPER(loaded_fr0)(CPUHPPAState *env)
d = FIELD_EX32(shadow, FPSR, D);
set_flush_to_zero(d, &env->fp_status);
set_flush_inputs_to_zero(d, &env->fp_status);
+
+ /*
+ * TODO: we only need to do this at CPU reset, but currently
+ * HPPA does note implement a CPU reset method at all...
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->fp_status);
+ /*
+ * TODO: The HPPA architecture reference only documents its NaN
+ * propagation rule for 2-operand operations. Testing on real hardware
+ * might be necessary to confirm whether this order for muladd is correct.
+ * Not preferring the SNaN is almost certainly incorrect as it diverges
+ * from the documented rules for 2-operand operations.
+ */
+ set_float_3nan_prop_rule(float_3nan_prop_abc, &env->fp_status);
+ /* For inf * 0 + NaN, return the input NaN */
+ set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
+ /* Default NaN: sign bit clear, msb-1 frac bit set */
+ set_float_default_nan_pattern(0b00100000, &env->fp_status);
+ set_snan_bit_is_one(true, &env->fp_status);
+ /*
+ * "PA-RISC 2.0 Architecture" says it is IMPDEF whether the flushing
+ * enabled by FPSR.D happens before or after rounding. We pick "before"
+ * for consistency with tininess detection.
+ */
+ set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status);
+ /*
+ * TODO: "PA-RISC 2.0 Architecture" chapter 10 says that we should
+ * detect tininess before rounding, but we don't set that here so we
+ * get the default tininess after rounding.
+ */
}
void cpu_hppa_loaded_fr0(CPUHPPAState *env)
@@ -65,7 +94,8 @@ static void update_fr0_op(CPUHPPAState *env, uintptr_t ra)
{
uint32_t soft_exp = get_float_exception_flags(&env->fp_status);
uint32_t hard_exp = 0;
- uint32_t shadow = env->fr0_shadow;
+ uint32_t shadow = env->fr0_shadow & 0x3ffffff;
+ uint32_t fr1 = 0;
if (likely(soft_exp == 0)) {
env->fr[0] = (uint64_t)shadow << 32;
@@ -78,9 +108,22 @@ static void update_fr0_op(CPUHPPAState *env, uintptr_t ra)
hard_exp |= CONVERT_BIT(soft_exp, float_flag_overflow, R_FPSR_ENA_O_MASK);
hard_exp |= CONVERT_BIT(soft_exp, float_flag_divbyzero, R_FPSR_ENA_Z_MASK);
hard_exp |= CONVERT_BIT(soft_exp, float_flag_invalid, R_FPSR_ENA_V_MASK);
- shadow |= hard_exp << (R_FPSR_FLAGS_SHIFT - R_FPSR_ENABLES_SHIFT);
+ if (hard_exp & shadow) {
+ shadow = FIELD_DP32(shadow, FPSR, T, 1);
+ /* fill exception register #1, which is lower 32-bits of fr[0] */
+#if !defined(CONFIG_USER_ONLY)
+ if (hard_exp & (R_FPSR_ENA_O_MASK | R_FPSR_ENA_U_MASK)) {
+ /* over- and underflow both set overflow flag only */
+ fr1 = FIELD_DP32(fr1, FPSR, C, 1);
+ fr1 = FIELD_DP32(fr1, FPSR, FLG_O, 1);
+ } else
+#endif
+ {
+ fr1 |= hard_exp << (R_FPSR_FLAGS_SHIFT - R_FPSR_ENABLES_SHIFT);
+ }
+ }
env->fr0_shadow = shadow;
- env->fr[0] = (uint64_t)shadow << 32;
+ env->fr[0] = (uint64_t)shadow << 32 | fr1;
if (hard_exp & shadow) {
hppa_dynamic_excp(env, EXCP_ASSIST, ra);
diff --git a/target/hppa/helper.c b/target/hppa/helper.c
index b79ddd8..d7f8495 100644
--- a/target/hppa/helper.c
+++ b/target/hppa/helper.c
@@ -21,9 +21,9 @@
#include "qemu/log.h"
#include "cpu.h"
#include "fpu/softfloat.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/qemu-print.h"
+#include "hw/hppa/hppa_hardware.h"
target_ulong cpu_hppa_get_psw(CPUHPPAState *env)
{
@@ -53,12 +53,28 @@ target_ulong cpu_hppa_get_psw(CPUHPPAState *env)
}
psw |= env->psw_n * PSW_N;
- psw |= (env->psw_v < 0) * PSW_V;
+ psw |= ((env->psw_v >> 31) & 1) * PSW_V;
psw |= env->psw | env->psw_xb;
return psw;
}
+void update_gva_offset_mask(CPUHPPAState *env)
+{
+ uint64_t gom;
+
+ if (env->psw & PSW_W) {
+ gom = (env->dr[2] & HPPA64_DIAG_SPHASH_ENABLE)
+ ? MAKE_64BIT_MASK(0, 62) &
+ ~((uint64_t)HPPA64_PDC_CACHE_RET_SPID_VAL << 48)
+ : MAKE_64BIT_MASK(0, 62);
+ } else {
+ gom = MAKE_64BIT_MASK(0, 32);
+ }
+
+ env->gva_offset_mask = gom;
+}
+
void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong psw)
{
uint64_t reserved;
@@ -98,6 +114,8 @@ void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong psw)
cb |= ((psw >> 9) & 1) << 8;
cb |= ((psw >> 8) & 1) << 4;
env->psw_cb = cb;
+
+ update_gva_offset_mask(env);
}
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
@@ -133,9 +151,11 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
qemu_fprintf(f, "IA_F %08" PRIx64 ":%0*" PRIx64 " (" TARGET_FMT_lx ")\n"
"IA_B %08" PRIx64 ":%0*" PRIx64 " (" TARGET_FMT_lx ")\n",
env->iasq_f >> 32, w, m & env->iaoq_f,
- hppa_form_gva_psw(psw, env->iasq_f, env->iaoq_f),
+ hppa_form_gva_mask(env->gva_offset_mask, env->iasq_f,
+ env->iaoq_f),
env->iasq_b >> 32, w, m & env->iaoq_b,
- hppa_form_gva_psw(psw, env->iasq_b, env->iaoq_b));
+ hppa_form_gva_mask(env->gva_offset_mask, env->iasq_b,
+ env->iaoq_b));
psw_c[0] = (psw & PSW_W ? 'W' : '-');
psw_c[1] = (psw & PSW_E ? 'E' : '-');
diff --git a/target/hppa/helper.h b/target/hppa/helper.h
index de41192..8369855 100644
--- a/target/hppa/helper.h
+++ b/target/hppa/helper.h
@@ -99,6 +99,7 @@ DEF_HELPER_FLAGS_2(ptlb_l, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_1(ptlbe, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(lpa, TCG_CALL_NO_WG, tl, env, tl)
DEF_HELPER_FLAGS_1(change_prot_id, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(update_gva_offset_mask, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_1(diag_btlb, void, env)
DEF_HELPER_1(diag_console_output, void, env)
#endif
diff --git a/target/hppa/insns.decode b/target/hppa/insns.decode
index 71074a6..4eaac75 100644
--- a/target/hppa/insns.decode
+++ b/target/hppa/insns.decode
@@ -644,10 +644,12 @@ xmpyu 001110 ..... ..... 010 .0111 .00 t:5 r1=%ra64 r2=%rb64
# For 32-bit PA-7300LC (PCX-L2)
diag_getshadowregs_pa1 000101 00 0000 0000 0001 1010 0000 0000
diag_putshadowregs_pa1 000101 00 0000 0000 0001 1010 0100 0000
+ diag_mfdiag 000101 dr:5 rt:5 0000 0110 0000 0000
+ diag_mtdiag 000101 dr:5 r1:5 0001 0110 0000 0000
# For 64-bit PA8700 (PCX-W2)
- diag_getshadowregs_pa2 000101 00 0111 1000 0001 1000 0100 0000
- diag_putshadowregs_pa2 000101 00 0111 0000 0001 1000 0100 0000
+ diag_mfdiag 000101 dr:5 0 0000 0000 1000 101 rt:5
+ diag_mtdiag 000101 dr:5 r1:5 0001 1000 0100 0000
]
diag_unimp 000101 i:26
}
diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c
index 391f32f..191ae19 100644
--- a/target/hppa/int_helper.c
+++ b/target/hppa/int_helper.c
@@ -94,11 +94,12 @@ void hppa_cpu_do_interrupt(CPUState *cs)
HPPACPU *cpu = HPPA_CPU(cs);
CPUHPPAState *env = &cpu->env;
int i = cs->exception_index;
- uint64_t old_psw;
+ uint64_t old_psw, old_gva_offset_mask;
/* As documented in pa2.0 -- interruption handling. */
/* step 1 */
env->cr[CR_IPSW] = old_psw = cpu_hppa_get_psw(env);
+ old_gva_offset_mask = env->gva_offset_mask;
/* step 2 -- Note PSW_W is masked out again for pa1.x */
cpu_hppa_put_psw(env,
@@ -112,9 +113,9 @@ void hppa_cpu_do_interrupt(CPUState *cs)
*/
if (old_psw & PSW_C) {
env->cr[CR_IIASQ] =
- hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32;
+ hppa_form_gva_mask(old_gva_offset_mask, env->iasq_f, env->iaoq_f) >> 32;
env->cr_back[0] =
- hppa_form_gva_psw(old_psw, env->iasq_b, env->iaoq_b) >> 32;
+ hppa_form_gva_mask(old_gva_offset_mask, env->iasq_b, env->iaoq_b) >> 32;
} else {
env->cr[CR_IIASQ] = 0;
env->cr_back[0] = 0;
@@ -165,9 +166,10 @@ void hppa_cpu_do_interrupt(CPUState *cs)
if (old_psw & PSW_C) {
int prot, t;
- vaddr = hppa_form_gva_psw(old_psw, env->iasq_f, vaddr);
+ vaddr = hppa_form_gva_mask(old_gva_offset_mask,
+ env->iasq_f, vaddr);
t = hppa_get_physical_address(env, vaddr, MMU_KERNEL_IDX,
- 0, &paddr, &prot);
+ 0, 0, &paddr, &prot);
if (t >= 0) {
/* We can't re-load the instruction. */
env->cr[CR_IIR] = 0;
@@ -175,6 +177,10 @@ void hppa_cpu_do_interrupt(CPUState *cs)
}
}
env->cr[CR_IIR] = ldl_phys(cs->as, paddr);
+ if (i == EXCP_ASSIST) {
+ /* stuff insn code into bits of FP exception register #1 */
+ env->fr[0] |= (env->cr[CR_IIR] & 0x03ffffff);
+ }
}
break;
diff --git a/target/hppa/machine.c b/target/hppa/machine.c
index 211bfcf..13e5551 100644
--- a/target/hppa/machine.c
+++ b/target/hppa/machine.c
@@ -198,6 +198,7 @@ static const VMStateField vmstate_env_fields[] = {
VMSTATE_UINT64(iasq_b, CPUHPPAState),
VMSTATE_UINT32(fr0_shadow, CPUHPPAState),
+ VMSTATE_UINT64_ARRAY(dr, CPUHPPAState, 32),
VMSTATE_END_OF_LIST()
};
@@ -208,14 +209,14 @@ static const VMStateDescription * const vmstate_env_subsections[] = {
static const VMStateDescription vmstate_env = {
.name = "env",
- .version_id = 3,
- .minimum_version_id = 3,
+ .version_id = 4,
+ .minimum_version_id = 4,
.fields = vmstate_env_fields,
.subsections = vmstate_env_subsections,
};
static const VMStateField vmstate_cpu_fields[] = {
- VMSTATE_CPU(),
+ VMSTATE_STRUCT(parent_obj, HPPACPU, 0, vmstate_cpu_common, CPUState),
VMSTATE_STRUCT(env, HPPACPU, 1, vmstate_env, CPUHPPAState),
VMSTATE_END_OF_LIST()
};
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
index b984f73..9bdd0a6 100644
--- a/target/hppa/mem_helper.c
+++ b/target/hppa/mem_helper.c
@@ -20,8 +20,11 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
+#include "accel/tcg/cpu-mmu-index.h"
+#include "accel/tcg/probe.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
#include "exec/helper-proto.h"
#include "hw/core/cpu.h"
#include "trace.h"
@@ -197,7 +200,7 @@ static int match_prot_id64(CPUHPPAState *env, uint32_t access_id)
}
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
- int type, hwaddr *pphys, int *pprot)
+ int type, MemOp mop, hwaddr *pphys, int *pprot)
{
hwaddr phys;
int prot, r_prot, w_prot, x_prot, priv;
@@ -221,7 +224,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
g_assert_not_reached();
}
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- goto egress;
+ goto egress_align;
}
/* Find a valid tlb entry that matches the virtual address. */
@@ -267,6 +270,12 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
goto egress;
}
+ if (unlikely(!(prot & type))) {
+ /* Not allowed -- Inst/Data Memory Access Rights Fault. */
+ ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
+ goto egress;
+ }
+
/* access_id == 0 means public page and no check is performed */
if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
int access_prot = (hppa_is_pa20(env)
@@ -281,14 +290,8 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
prot &= access_prot;
}
- if (unlikely(!(prot & type))) {
- /* Not allowed -- Inst/Data Memory Access Rights Fault. */
- ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
- goto egress;
- }
-
/*
- * In priority order, check for conditions which raise faults.
+ * In reverse priority order, check for conditions which raise faults.
* Remove PROT bits that cover the condition we want to check,
* so that the resulting PROT will force a re-check of the
* architectural TLB entry for the next access.
@@ -299,13 +302,15 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
/* The T bit is set -- Page Reference Fault. */
ret = EXCP_PAGE_REF;
}
- } else if (!ent->d) {
+ }
+ if (unlikely(!ent->d)) {
prot &= PAGE_READ | PAGE_EXEC;
if (type & PAGE_WRITE) {
/* The D bit is not set -- TLB Dirty Bit Fault. */
ret = EXCP_TLB_DIRTY;
}
- } else if (unlikely(ent->b)) {
+ }
+ if (unlikely(ent->b)) {
prot &= PAGE_READ | PAGE_EXEC;
if (type & PAGE_WRITE) {
/*
@@ -321,6 +326,11 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
}
}
+ egress_align:
+ if (addr & ((1u << memop_alignment_bits(mop)) - 1)) {
+ ret = EXCP_UNALIGN;
+ }
+
egress:
*pphys = phys;
*pprot = prot;
@@ -340,7 +350,7 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
- excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0,
+ excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0, 0,
&phys, &prot);
/* Since we're translating for debugging, the only error that is a
@@ -417,12 +427,11 @@ void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
}
}
-bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
- MMUAccessType type, int mmu_idx,
- bool probe, uintptr_t retaddr)
+bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
+ MMUAccessType type, int mmu_idx,
+ MemOp memop, int size, bool probe, uintptr_t ra)
{
- HPPACPU *cpu = HPPA_CPU(cs);
- CPUHPPAState *env = &cpu->env;
+ CPUHPPAState *env = cpu_env(cs);
int prot, excp, a_prot;
hwaddr phys;
@@ -438,7 +447,8 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
break;
}
- excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, &phys, &prot);
+ excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, memop,
+ &phys, &prot);
if (unlikely(excp >= 0)) {
if (probe) {
return false;
@@ -446,7 +456,7 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
/* Failure. Raise the indicated exception. */
- raise_exception_with_ior(env, excp, retaddr, addr,
+ raise_exception_with_ior(env, excp, ra, addr,
MMU_IDX_MMU_DISABLED(mmu_idx));
}
@@ -460,8 +470,12 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
* the large page protection mask. We do not require this,
* because we record the large page here in the hppa tlb.
*/
- tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
- prot, mmu_idx, TARGET_PAGE_SIZE);
+ memset(out, 0, sizeof(*out));
+ out->phys_addr = phys;
+ out->prot = prot;
+ out->attrs = MEMTXATTRS_UNSPECIFIED;
+ out->lg_page_size = TARGET_PAGE_BITS;
+
return true;
}
@@ -678,7 +692,7 @@ target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
hwaddr phys;
int prot, excp;
- excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
+ excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, 0,
&phys, &prot);
if (excp >= 0) {
if (excp == EXCP_DTLB_MISS) {
@@ -813,3 +827,8 @@ uint64_t HELPER(b_gate_priv)(CPUHPPAState *env, uint64_t iaoq_f)
}
return iaoq_f;
}
+
+void HELPER(update_gva_offset_mask)(CPUHPPAState *env)
+{
+ update_gva_offset_mask(env);
+}
diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c
index 7f79196..0458378 100644
--- a/target/hppa/op_helper.c
+++ b/target/hppa/op_helper.c
@@ -20,11 +20,14 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "qemu/timer.h"
#include "trace.h"
+#ifdef CONFIG_USER_ONLY
+#include "user/page-protection.h"
+#endif
G_NORETURN void HELPER(excp)(CPUHPPAState *env, int excp)
{
@@ -334,7 +337,7 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
}
mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P);
- excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys, &prot);
+ excp = hppa_get_physical_address(env, addr, mmu_idx, 0, 0, &phys, &prot);
if (excp >= 0) {
cpu_restore_state(env_cpu(env), GETPC());
hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx));
diff --git a/target/hppa/sys_helper.c b/target/hppa/sys_helper.c
index 9b43b55..6e65fad 100644
--- a/target/hppa/sys_helper.c
+++ b/target/hppa/sys_helper.c
@@ -20,11 +20,10 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/timer.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/runstate.h"
+#include "system/system.h"
#include "chardev/char-fe.h"
void HELPER(write_interval_timer)(CPUHPPAState *env, target_ulong val)
@@ -73,7 +72,7 @@ target_ulong HELPER(swap_system_mask)(CPUHPPAState *env, target_ulong nsm)
* machines set the Q bit from 0 to 1 without an exception,
* so let this go without comment.
*/
- env->psw = (psw & ~PSW_SM) | (nsm & PSW_SM);
+ cpu_hppa_put_psw(env, (psw & ~PSW_SM) | (nsm & PSW_SM));
return psw & PSW_SM;
}
@@ -88,7 +87,7 @@ void HELPER(rfi)(CPUHPPAState *env)
* To recreate the space identifier, remove the offset bits.
* For pa1.x, the mask reduces to no change to space.
*/
- mask = gva_offset_mask(env->psw);
+ mask = env->gva_offset_mask;
env->iaoq_f = env->cr[CR_IIAOQ];
env->iaoq_b = env->cr_back[1];
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 51c1762..7a81cfc 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -20,13 +20,14 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
#include "exec/translator.h"
+#include "exec/translation-block.h"
+#include "exec/target_page.h"
#include "exec/log.h"
#define HELPER_H "helper.h"
@@ -72,6 +73,7 @@ typedef struct DisasContext {
/* IAOQ_Front at entry to TB. */
uint64_t iaoq_first;
+ uint64_t gva_offset_mask;
DisasCond null_cond;
TCGLabel *null_lab;
@@ -1206,10 +1208,10 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
cb_msb = tcg_temp_new_i64();
cb = tcg_temp_new_i64();
- tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
if (is_c) {
- tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
- get_psw_carry(ctx, d), ctx->zero);
+ tcg_gen_addcio_i64(dest, cb_msb, in1, in2, get_psw_carry(ctx, d));
+ } else {
+ tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
}
tcg_gen_xor_i64(cb, in1, in2);
tcg_gen_xor_i64(cb, cb, dest);
@@ -1305,9 +1307,7 @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
if (is_b) {
/* DEST,C = IN1 + ~IN2 + C. */
tcg_gen_not_i64(cb, in2);
- tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
- get_psw_carry(ctx, d), ctx->zero);
- tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
+ tcg_gen_addcio_i64(dest, cb_msb, in1, cb, get_psw_carry(ctx, d));
tcg_gen_xor_i64(cb, cb, in1);
tcg_gen_xor_i64(cb, cb, dest);
} else {
@@ -1576,7 +1576,7 @@ static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
*pofs = ofs;
*pgva = addr = tcg_temp_new_i64();
tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base,
- gva_offset_mask(ctx->tb_flags));
+ ctx->gva_offset_mask);
#ifndef CONFIG_USER_ONLY
if (!is_phys) {
tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
@@ -3005,9 +3005,7 @@ static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
tcg_gen_xor_i64(add2, in2, addc);
tcg_gen_andi_i64(addc, addc, 1);
- tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
- tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
- addc, ctx->zero);
+ tcg_gen_addcio_i64(dest, cpu_psw_cb_msb, add1, add2, addc);
/* Write back the result register. */
save_gpr(ctx, a->t, dest);
@@ -3550,8 +3548,7 @@ static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
TCGv_i64 cb = tcg_temp_new_i64();
TCGv_i64 cb_msb = tcg_temp_new_i64();
- tcg_gen_movi_i64(cb_msb, 0);
- tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
+ tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
tcg_gen_xor_i64(cb, in1, in2);
tcg_gen_xor_i64(cb, cb, dest);
cb_cond = get_carry(ctx, d, cb, cb_msb);
@@ -4592,19 +4589,37 @@ static bool trans_diag_getshadowregs_pa1(DisasContext *ctx, arg_empty *a)
return !ctx->is_pa20 && do_getshadowregs(ctx);
}
-static bool trans_diag_getshadowregs_pa2(DisasContext *ctx, arg_empty *a)
+static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a)
{
- return ctx->is_pa20 && do_getshadowregs(ctx);
+ return !ctx->is_pa20 && do_putshadowregs(ctx);
}
-static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a)
+static bool trans_diag_mfdiag(DisasContext *ctx, arg_diag_mfdiag *a)
{
- return !ctx->is_pa20 && do_putshadowregs(ctx);
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+ TCGv_i64 dest = dest_gpr(ctx, a->rt);
+ tcg_gen_ld_i64(dest, tcg_env,
+ offsetof(CPUHPPAState, dr[a->dr]));
+ save_gpr(ctx, a->rt, dest);
+ return nullify_end(ctx);
}
-static bool trans_diag_putshadowregs_pa2(DisasContext *ctx, arg_empty *a)
+static bool trans_diag_mtdiag(DisasContext *ctx, arg_diag_mtdiag *a)
{
- return ctx->is_pa20 && do_putshadowregs(ctx);
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+ tcg_gen_st_i64(load_gpr(ctx, a->r1), tcg_env,
+ offsetof(CPUHPPAState, dr[a->dr]));
+#ifndef CONFIG_USER_ONLY
+ if (ctx->is_pa20 && (a->dr == 2)) {
+ /* Update gva_offset_mask from the new value of %dr2 */
+ gen_helper_update_gva_offset_mask(tcg_env);
+ /* Exit to capture the new value for the next TB. */
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
+ }
+#endif
+ return nullify_end(ctx);
}
static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a)
@@ -4624,6 +4639,7 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->tb_flags = ctx->base.tb->flags;
ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
ctx->psw_xb = ctx->tb_flags & (PSW_X | PSW_B);
+ ctx->gva_offset_mask = cpu_env(cs)->gva_offset_mask;
#ifdef CONFIG_USER_ONLY
ctx->privilege = PRIV_USER;
@@ -4868,8 +4884,8 @@ static const TranslatorOps hppa_tr_ops = {
#endif
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext ctx = { };
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
diff --git a/target/i386/arch_dump.c b/target/i386/arch_dump.c
index c290910..16e47c4 100644
--- a/target/i386/arch_dump.c
+++ b/target/i386/arch_dump.c
@@ -13,9 +13,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "sysemu/dump.h"
+#include "system/dump.h"
#include "elf.h"
-#include "sysemu/memory_mapping.h"
+#include "system/memory_mapping.h"
#define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \
((DIV_ROUND_UP((hdr_size), 4) \
diff --git a/target/i386/arch_memory_mapping.c b/target/i386/arch_memory_mapping.c
index d1ff659..a2398c2 100644
--- a/target/i386/arch_memory_mapping.c
+++ b/target/i386/arch_memory_mapping.c
@@ -13,7 +13,8 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "sysemu/memory_mapping.h"
+#include "system/memory_mapping.h"
+#include "system/memory.h"
/* PAE Paging or IA-32e Paging */
static void walk_pte(MemoryMappingList *list, AddressSpace *as,
diff --git a/target/i386/confidential-guest.c b/target/i386/confidential-guest.c
index b372784..cfb71bf 100644
--- a/target/i386/confidential-guest.c
+++ b/target/i386/confidential-guest.c
@@ -20,7 +20,7 @@ OBJECT_DEFINE_ABSTRACT_TYPE(X86ConfidentialGuest,
X86_CONFIDENTIAL_GUEST,
CONFIDENTIAL_GUEST_SUPPORT)
-static void x86_confidential_guest_class_init(ObjectClass *oc, void *data)
+static void x86_confidential_guest_class_init(ObjectClass *oc, const void *data)
{
}
diff --git a/target/i386/confidential-guest.h b/target/i386/confidential-guest.h
index 7342d28..48b88db 100644
--- a/target/i386/confidential-guest.h
+++ b/target/i386/confidential-guest.h
@@ -14,7 +14,7 @@
#include "qom/object.h"
-#include "exec/confidential-guest-support.h"
+#include "system/confidential-guest-support.h"
#define TYPE_X86_CONFIDENTIAL_GUEST "x86-confidential-guest"
@@ -39,14 +39,16 @@ struct X86ConfidentialGuestClass {
/* <public> */
int (*kvm_type)(X86ConfidentialGuest *cg);
- uint32_t (*mask_cpuid_features)(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index,
- int reg, uint32_t value);
+ void (*cpu_instance_init)(X86ConfidentialGuest *cg, CPUState *cpu);
+ uint32_t (*adjust_cpuid_features)(X86ConfidentialGuest *cg, uint32_t feature,
+ uint32_t index, int reg, uint32_t value);
+ int (*check_features)(X86ConfidentialGuest *cg, CPUState *cs);
};
/**
* x86_confidential_guest_kvm_type:
*
- * Calls #X86ConfidentialGuestClass.unplug callback of @plug_handler.
+ * Calls #X86ConfidentialGuestClass.kvm_type() callback.
*/
static inline int x86_confidential_guest_kvm_type(X86ConfidentialGuest *cg)
{
@@ -59,25 +61,47 @@ static inline int x86_confidential_guest_kvm_type(X86ConfidentialGuest *cg)
}
}
+static inline void x86_confidential_guest_cpu_instance_init(X86ConfidentialGuest *cg,
+ CPUState *cpu)
+{
+ X86ConfidentialGuestClass *klass = X86_CONFIDENTIAL_GUEST_GET_CLASS(cg);
+
+ if (klass->cpu_instance_init) {
+ klass->cpu_instance_init(cg, cpu);
+ }
+}
+
/**
- * x86_confidential_guest_mask_cpuid_features:
+ * x86_confidential_guest_adjust_cpuid_features:
*
- * Removes unsupported features from a confidential guest's CPUID values, returns
- * the value with the bits removed. The bits removed should be those that KVM
- * provides independent of host-supported CPUID features, but are not supported by
- * the confidential computing firmware.
+ * Adjust the supported features from a confidential guest's CPUID values,
+ * returns the adjusted value. There are bits being removed that are not
+ * supported by the confidential computing firmware or bits being added that
+ * are forcibly exposed to guest by the confidential computing firmware.
*/
-static inline int x86_confidential_guest_mask_cpuid_features(X86ConfidentialGuest *cg,
+static inline int x86_confidential_guest_adjust_cpuid_features(X86ConfidentialGuest *cg,
uint32_t feature, uint32_t index,
int reg, uint32_t value)
{
X86ConfidentialGuestClass *klass = X86_CONFIDENTIAL_GUEST_GET_CLASS(cg);
- if (klass->mask_cpuid_features) {
- return klass->mask_cpuid_features(cg, feature, index, reg, value);
+ if (klass->adjust_cpuid_features) {
+ return klass->adjust_cpuid_features(cg, feature, index, reg, value);
} else {
return value;
}
}
+static inline int x86_confidential_guest_check_features(X86ConfidentialGuest *cg,
+ CPUState *cs)
+{
+ X86ConfidentialGuestClass *klass = X86_CONFIDENTIAL_GUEST_GET_CLASS(cg);
+
+ if (klass->check_features) {
+ return klass->check_features(cg, cs);
+ }
+
+ return 0;
+}
+
#endif
diff --git a/target/i386/cpu-apic.c b/target/i386/cpu-apic.c
index d397ec94..242a05f 100644
--- a/target/i386/cpu-apic.c
+++ b/target/i386/cpu-apic.c
@@ -7,14 +7,14 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/error.h"
#include "monitor/monitor.h"
#include "monitor/hmp-target.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/kvm.h"
-#include "sysemu/xen.h"
-#include "exec/address-spaces.h"
+#include "system/hw_accel.h"
+#include "system/kvm.h"
+#include "system/xen.h"
+#include "system/address-spaces.h"
#include "hw/qdev-properties.h"
#include "hw/i386/apic_internal.h"
#include "cpu-internal.h"
diff --git a/target/i386/cpu-dump.c b/target/i386/cpu-dump.c
index 3bb8e44..a72ed93 100644
--- a/target/i386/cpu-dump.c
+++ b/target/i386/cpu-dump.c
@@ -27,7 +27,7 @@
/***********************************************************/
/* x86 debug */
-static const char *cc_op_str[CC_OP_NB] = {
+static const char * const cc_op_str[] = {
[CC_OP_DYNAMIC] = "DYNAMIC",
[CC_OP_EFLAGS] = "EFLAGS",
@@ -91,7 +91,6 @@ static const char *cc_op_str[CC_OP_NB] = {
[CC_OP_BMILGQ] = "BMILGQ",
[CC_OP_POPCNT] = "POPCNT",
- [CC_OP_CLR] = "CLR",
};
static void
@@ -347,7 +346,6 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
int eflags, i, nb;
- char cc_op_name[32];
static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
eflags = cpu_compute_eflags(env);
@@ -456,10 +454,16 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
env->dr[6], env->dr[7]);
}
if (flags & CPU_DUMP_CCOP) {
- if ((unsigned)env->cc_op < CC_OP_NB)
- snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
- else
- snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
+ const char *cc_op_name = NULL;
+ char cc_op_buf[32];
+
+ if ((unsigned)env->cc_op < ARRAY_SIZE(cc_op_str)) {
+ cc_op_name = cc_op_str[env->cc_op];
+ }
+ if (cc_op_name == NULL) {
+ snprintf(cc_op_buf, sizeof(cc_op_buf), "[%d]", env->cc_op);
+ cc_op_name = cc_op_buf;
+ }
#ifdef TARGET_X86_64
if (env->hflags & HF_CS64_MASK) {
qemu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%s\n",
diff --git a/target/i386/cpu-internal.h b/target/i386/cpu-internal.h
index 9baac5c..37c61a1 100644
--- a/target/i386/cpu-internal.h
+++ b/target/i386/cpu-internal.h
@@ -1,5 +1,5 @@
/*
- * i386 CPU internal definitions to be shared between cpu.c and cpu-sysemu.c
+ * i386 CPU internal definitions to be shared between cpu.c and cpu-system.c
*
* Copyright (c) 2003 Fabrice Bellard
*
diff --git a/target/i386/cpu-param.h b/target/i386/cpu-param.h
index 5e15335..ebb844b 100644
--- a/target/i386/cpu-param.h
+++ b/target/i386/cpu-param.h
@@ -2,14 +2,13 @@
* i386 cpu parameters for qemu.
*
* Copyright (c) 2003 Fabrice Bellard
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#ifndef I386_CPU_PARAM_H
#define I386_CPU_PARAM_H
#ifdef TARGET_X86_64
-# define TARGET_LONG_BITS 64
# define TARGET_PHYS_ADDR_SPACE_BITS 52
/*
* ??? This is really 48 bits, sign-extended, but the only thing
@@ -18,13 +17,11 @@
*/
# define TARGET_VIRT_ADDR_SPACE_BITS 47
#else
-# define TARGET_LONG_BITS 32
# define TARGET_PHYS_ADDR_SPACE_BITS 36
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
#define TARGET_PAGE_BITS 12
-/* The x86 has a strong memory model with some store-after-load re-ordering */
-#define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
+#define TARGET_INSN_START_EXTRA_WORDS 1
#endif
diff --git a/target/i386/cpu-sysemu.c b/target/i386/cpu-sysemu.c
deleted file mode 100644
index 227ac02..0000000
--- a/target/i386/cpu-sysemu.c
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * i386 CPUID, CPU class, definitions, models: sysemu-only code
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "qapi/error.h"
-#include "qapi/qapi-visit-run-state.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qobject-input-visitor.h"
-#include "qom/qom-qobject.h"
-#include "qapi/qapi-commands-machine-target.h"
-
-#include "cpu-internal.h"
-
-/* Return a QDict containing keys for all properties that can be included
- * in static expansion of CPU models. All properties set by x86_cpu_load_model()
- * must be included in the dictionary.
- */
-static QDict *x86_cpu_static_props(void)
-{
- FeatureWord w;
- int i;
- static const char *props[] = {
- "min-level",
- "min-xlevel",
- "family",
- "model",
- "stepping",
- "model-id",
- "vendor",
- "lmce",
- NULL,
- };
- static QDict *d;
-
- if (d) {
- return d;
- }
-
- d = qdict_new();
- for (i = 0; props[i]; i++) {
- qdict_put_null(d, props[i]);
- }
-
- for (w = 0; w < FEATURE_WORDS; w++) {
- FeatureWordInfo *fi = &feature_word_info[w];
- int bit;
- for (bit = 0; bit < 64; bit++) {
- if (!fi->feat_names[bit]) {
- continue;
- }
- qdict_put_null(d, fi->feat_names[bit]);
- }
- }
-
- return d;
-}
-
-/* Add an entry to @props dict, with the value for property. */
-static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
-{
- QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
- &error_abort);
-
- qdict_put_obj(props, prop, value);
-}
-
-/* Convert CPU model data from X86CPU object to a property dictionary
- * that can recreate exactly the same CPU model.
- */
-static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
-{
- QDict *sprops = x86_cpu_static_props();
- const QDictEntry *e;
-
- for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
- const char *prop = qdict_entry_key(e);
- x86_cpu_expand_prop(cpu, props, prop);
- }
-}
-
-/* Convert CPU model data from X86CPU object to a property dictionary
- * that can recreate exactly the same CPU model, including every
- * writable QOM property.
- */
-static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
-{
- ObjectPropertyIterator iter;
- ObjectProperty *prop;
-
- object_property_iter_init(&iter, OBJECT(cpu));
- while ((prop = object_property_iter_next(&iter))) {
- /* skip read-only or write-only properties */
- if (!prop->get || !prop->set) {
- continue;
- }
-
- /* "hotplugged" is the only property that is configurable
- * on the command-line but will be set differently on CPUs
- * created using "-cpu ... -smp ..." and by CPUs created
- * on the fly by x86_cpu_from_model() for querying. Skip it.
- */
- if (!strcmp(prop->name, "hotplugged")) {
- continue;
- }
- x86_cpu_expand_prop(cpu, props, prop->name);
- }
-}
-
-static void object_apply_props(Object *obj, QObject *props,
- const char *props_arg_name, Error **errp)
-{
- Visitor *visitor;
- QDict *qdict;
- const QDictEntry *prop;
-
- visitor = qobject_input_visitor_new(props);
- if (!visit_start_struct(visitor, props_arg_name, NULL, 0, errp)) {
- visit_free(visitor);
- return;
- }
-
- qdict = qobject_to(QDict, props);
- for (prop = qdict_first(qdict); prop; prop = qdict_next(qdict, prop)) {
- if (!object_property_set(obj, qdict_entry_key(prop),
- visitor, errp)) {
- goto out;
- }
- }
-
- visit_check_struct(visitor, errp);
-out:
- visit_end_struct(visitor, NULL);
- visit_free(visitor);
-}
-
-/* Create X86CPU object according to model+props specification */
-static X86CPU *x86_cpu_from_model(const char *model, QObject *props,
- const char *props_arg_name, Error **errp)
-{
- X86CPU *xc = NULL;
- X86CPUClass *xcc;
- Error *err = NULL;
-
- xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
- if (xcc == NULL) {
- error_setg(&err, "CPU model '%s' not found", model);
- goto out;
- }
-
- xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
- if (props) {
- object_apply_props(OBJECT(xc), props, props_arg_name, &err);
- if (err) {
- goto out;
- }
- }
-
- x86_cpu_expand_features(xc, &err);
- if (err) {
- goto out;
- }
-
-out:
- if (err) {
- error_propagate(errp, err);
- object_unref(OBJECT(xc));
- xc = NULL;
- }
- return xc;
-}
-
-CpuModelExpansionInfo *
-qmp_query_cpu_model_expansion(CpuModelExpansionType type,
- CpuModelInfo *model,
- Error **errp)
-{
- X86CPU *xc = NULL;
- Error *err = NULL;
- CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
- QDict *props = NULL;
- const char *base_name;
-
- xc = x86_cpu_from_model(model->name, model->props, "model.props", &err);
- if (err) {
- goto out;
- }
-
- props = qdict_new();
- ret->model = g_new0(CpuModelInfo, 1);
- ret->model->props = QOBJECT(props);
-
- switch (type) {
- case CPU_MODEL_EXPANSION_TYPE_STATIC:
- /* Static expansion will be based on "base" only */
- base_name = "base";
- x86_cpu_to_dict(xc, props);
- break;
- case CPU_MODEL_EXPANSION_TYPE_FULL:
- /* As we don't return every single property, full expansion needs
- * to keep the original model name+props, and add extra
- * properties on top of that.
- */
- base_name = model->name;
- x86_cpu_to_dict_full(xc, props);
- break;
- default:
- error_setg(&err, "Unsupported expansion type");
- goto out;
- }
-
- x86_cpu_to_dict(xc, props);
-
- ret->model->name = g_strdup(base_name);
-
-out:
- object_unref(OBJECT(xc));
- if (err) {
- error_propagate(errp, err);
- qapi_free_CpuModelExpansionInfo(ret);
- ret = NULL;
- }
- return ret;
-}
-
-void cpu_clear_apic_feature(CPUX86State *env)
-{
- env->features[FEAT_1_EDX] &= ~CPUID_APIC;
-}
-
-void cpu_set_apic_feature(CPUX86State *env)
-{
- env->features[FEAT_1_EDX] |= CPUID_APIC;
-}
-
-bool cpu_has_x2apic_feature(CPUX86State *env)
-{
- return env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
-}
-
-bool cpu_is_bsp(X86CPU *cpu)
-{
- return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
-}
-
-/* TODO: remove me, when reset over QOM tree is implemented */
-void x86_cpu_machine_reset_cb(void *opaque)
-{
- X86CPU *cpu = opaque;
- cpu_reset(CPU(cpu));
-}
-
-GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
-{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
- GuestPanicInformation *panic_info = NULL;
-
- if (hyperv_feat_enabled(cpu, HYPERV_FEAT_CRASH)) {
- panic_info = g_new0(GuestPanicInformation, 1);
-
- panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
-
- assert(HV_CRASH_PARAMS >= 5);
- panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
- panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
- panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
- panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
- panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
- }
-
- return panic_info;
-}
-void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
- const char *name, void *opaque,
- Error **errp)
-{
- CPUState *cs = CPU(obj);
- GuestPanicInformation *panic_info;
-
- if (!cs->crash_occurred) {
- error_setg(errp, "No crash occurred");
- return;
- }
-
- panic_info = x86_cpu_get_crash_info(cs);
- if (panic_info == NULL) {
- error_setg(errp, "No crash information");
- return;
- }
-
- visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
- errp);
- qapi_free_GuestPanicInformation(panic_info);
-}
diff --git a/target/i386/cpu-system.c b/target/i386/cpu-system.c
new file mode 100644
index 0000000..b1494aa
--- /dev/null
+++ b/target/i386/cpu-system.c
@@ -0,0 +1,322 @@
+/*
+ * i386 CPUID, CPU class, definitions, models: system-only code
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qapi/error.h"
+#include "qapi/qapi-visit-run-state.h"
+#include "qobject/qdict.h"
+#include "qapi/qobject-input-visitor.h"
+#include "qom/qom-qobject.h"
+#include "qapi/qapi-commands-machine.h"
+
+#include "cpu-internal.h"
+
+/* Return a QDict containing keys for all properties that can be included
+ * in static expansion of CPU models. All properties set by x86_cpu_load_model()
+ * must be included in the dictionary.
+ */
+static QDict *x86_cpu_static_props(void)
+{
+ FeatureWord w;
+ int i;
+ static const char *props[] = {
+ "min-level",
+ "min-xlevel",
+ "family",
+ "model",
+ "stepping",
+ "model-id",
+ "vendor",
+ "lmce",
+ NULL,
+ };
+ static QDict *d;
+
+ if (d) {
+ return d;
+ }
+
+ d = qdict_new();
+ for (i = 0; props[i]; i++) {
+ qdict_put_null(d, props[i]);
+ }
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ FeatureWordInfo *fi = &feature_word_info[w];
+ int bit;
+ for (bit = 0; bit < 64; bit++) {
+ if (!fi->feat_names[bit]) {
+ continue;
+ }
+ qdict_put_null(d, fi->feat_names[bit]);
+ }
+ }
+
+ return d;
+}
+
+/* Add an entry to @props dict, with the value for property. */
+static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
+{
+ QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
+ &error_abort);
+
+ qdict_put_obj(props, prop, value);
+}
+
+/* Convert CPU model data from X86CPU object to a property dictionary
+ * that can recreate exactly the same CPU model.
+ */
+static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
+{
+ QDict *sprops = x86_cpu_static_props();
+ const QDictEntry *e;
+
+ for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
+ const char *prop = qdict_entry_key(e);
+ x86_cpu_expand_prop(cpu, props, prop);
+ }
+}
+
+/* Convert CPU model data from X86CPU object to a property dictionary
+ * that can recreate exactly the same CPU model, including every
+ * writable QOM property.
+ */
+static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
+{
+ ObjectPropertyIterator iter;
+ ObjectProperty *prop;
+
+ object_property_iter_init(&iter, OBJECT(cpu));
+ while ((prop = object_property_iter_next(&iter))) {
+ /* skip read-only or write-only properties */
+ if (!prop->get || !prop->set) {
+ continue;
+ }
+
+ /* "hotplugged" is the only property that is configurable
+ * on the command-line but will be set differently on CPUs
+ * created using "-cpu ... -smp ..." and by CPUs created
+ * on the fly by x86_cpu_from_model() for querying. Skip it.
+ */
+ if (!strcmp(prop->name, "hotplugged")) {
+ continue;
+ }
+ x86_cpu_expand_prop(cpu, props, prop->name);
+ }
+}
+
+static void object_apply_props(Object *obj, QObject *props,
+ const char *props_arg_name, Error **errp)
+{
+ Visitor *visitor;
+ QDict *qdict;
+ const QDictEntry *prop;
+
+ visitor = qobject_input_visitor_new(props);
+ if (!visit_start_struct(visitor, props_arg_name, NULL, 0, errp)) {
+ visit_free(visitor);
+ return;
+ }
+
+ qdict = qobject_to(QDict, props);
+ for (prop = qdict_first(qdict); prop; prop = qdict_next(qdict, prop)) {
+ if (!object_property_set(obj, qdict_entry_key(prop),
+ visitor, errp)) {
+ goto out;
+ }
+ }
+
+ visit_check_struct(visitor, errp);
+out:
+ visit_end_struct(visitor, NULL);
+ visit_free(visitor);
+}
+
+/* Create X86CPU object according to model+props specification */
+static X86CPU *x86_cpu_from_model(const char *model, QObject *props,
+ const char *props_arg_name, Error **errp)
+{
+ X86CPU *xc = NULL;
+ X86CPUClass *xcc;
+ Error *err = NULL;
+
+ xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
+ if (xcc == NULL) {
+ error_setg(&err, "CPU model '%s' not found", model);
+ goto out;
+ }
+
+ xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
+ if (props) {
+ object_apply_props(OBJECT(xc), props, props_arg_name, &err);
+ if (err) {
+ goto out;
+ }
+ }
+
+ x86_cpu_expand_features(xc, &err);
+ if (err) {
+ goto out;
+ }
+
+out:
+ if (err) {
+ error_propagate(errp, err);
+ object_unref(OBJECT(xc));
+ xc = NULL;
+ }
+ return xc;
+}
+
+CpuModelExpansionInfo *
+qmp_query_cpu_model_expansion(CpuModelExpansionType type,
+ CpuModelInfo *model,
+ Error **errp)
+{
+ X86CPU *xc = NULL;
+ Error *err = NULL;
+ CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
+ QDict *props = NULL;
+ const char *base_name;
+
+ xc = x86_cpu_from_model(model->name, model->props, "model.props", &err);
+ if (err) {
+ goto out;
+ }
+
+ props = qdict_new();
+ ret->model = g_new0(CpuModelInfo, 1);
+ ret->model->props = QOBJECT(props);
+
+ switch (type) {
+ case CPU_MODEL_EXPANSION_TYPE_STATIC:
+ /* Static expansion will be based on "base" only */
+ base_name = "base";
+ x86_cpu_to_dict(xc, props);
+ break;
+ case CPU_MODEL_EXPANSION_TYPE_FULL:
+ /* As we don't return every single property, full expansion needs
+ * to keep the original model name+props, and add extra
+ * properties on top of that.
+ */
+ base_name = model->name;
+ x86_cpu_to_dict_full(xc, props);
+ break;
+ default:
+ error_setg(&err, "Unsupported expansion type");
+ goto out;
+ }
+
+ x86_cpu_to_dict(xc, props);
+
+ ret->model->name = g_strdup(base_name);
+
+out:
+ object_unref(OBJECT(xc));
+ if (err) {
+ error_propagate(errp, err);
+ qapi_free_CpuModelExpansionInfo(ret);
+ ret = NULL;
+ }
+ return ret;
+}
+
+void cpu_clear_apic_feature(CPUX86State *env)
+{
+ env->features[FEAT_1_EDX] &= ~CPUID_APIC;
+}
+
+void cpu_set_apic_feature(CPUX86State *env)
+{
+ env->features[FEAT_1_EDX] |= CPUID_APIC;
+}
+
+bool cpu_has_x2apic_feature(CPUX86State *env)
+{
+ return env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
+}
+
+bool cpu_is_bsp(X86CPU *cpu)
+{
+ return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
+}
+
+/* TODO: remove me, when reset over QOM tree is implemented */
+void x86_cpu_machine_reset_cb(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ cpu_reset(CPU(cpu));
+}
+
+GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ GuestPanicInformation *panic_info = NULL;
+
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_CRASH)) {
+ panic_info = g_new0(GuestPanicInformation, 1);
+
+ panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
+
+ assert(HV_CRASH_PARAMS >= 5);
+ panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
+ panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
+ panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
+ panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
+ panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
+ }
+
+ return panic_info;
+}
+void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ CPUState *cs = CPU(obj);
+ GuestPanicInformation *panic_info;
+
+ if (!cs->crash_occurred) {
+ error_setg(errp, "No crash occurred");
+ return;
+ }
+
+ panic_info = x86_cpu_get_crash_info(cs);
+ if (panic_info == NULL) {
+ error_setg(errp, "No crash information");
+ return;
+ }
+
+ visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
+ errp);
+ qapi_free_GuestPanicInformation(panic_info);
+}
+
+uint64_t cpu_x86_get_msr_core_thread_count(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ uint64_t val;
+
+ val = x86_threads_per_pkg(&env->topo_info); /* thread count, bits 15..0 */
+ val |= x86_cores_per_pkg(&env->topo_info) << 16; /* core count, bits 31..16 */
+
+ return val;
+}
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 4688d14..0d35e95 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -24,29 +24,35 @@
#include "qemu/hw-version.h"
#include "cpu.h"
#include "tcg/helper-tcg.h"
-#include "sysemu/hvf.h"
+#include "exec/translation-block.h"
+#include "system/hvf.h"
#include "hvf/hvf-i386.h"
#include "kvm/kvm_i386.h"
#include "sev.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qapi/qapi-visit-machine.h"
-#include "qapi/qmp/qerror.h"
#include "standard-headers/asm-x86/kvm_para.h"
#include "hw/qdev-properties.h"
#include "hw/i386/topology.h"
+#include "exec/watchpoint.h"
#ifndef CONFIG_USER_ONLY
-#include "sysemu/reset.h"
-#include "qapi/qapi-commands-machine-target.h"
-#include "exec/address-spaces.h"
+#include "confidential-guest.h"
+#include "system/reset.h"
+#include "qapi/qapi-commands-machine.h"
+#include "system/address-spaces.h"
#include "hw/boards.h"
#include "hw/i386/sgx-epc.h"
#endif
+#include "tcg/tcg-cpu.h"
#include "disas/capstone.h"
#include "cpu-internal.h"
static void x86_cpu_realizefn(DeviceState *dev, Error **errp);
+static void x86_cpu_get_supported_cpuid(uint32_t func, uint32_t index,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx);
/* Helpers for building CPUID[2] descriptors: */
@@ -236,23 +242,26 @@ static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
0 /* Invalid value */)
static uint32_t max_thread_ids_for_cache(X86CPUTopoInfo *topo_info,
- enum CPUTopoLevel share_level)
+ enum CpuTopologyLevel share_level)
{
uint32_t num_ids = 0;
switch (share_level) {
- case CPU_TOPO_LEVEL_CORE:
+ case CPU_TOPOLOGY_LEVEL_CORE:
num_ids = 1 << apicid_core_offset(topo_info);
break;
- case CPU_TOPO_LEVEL_DIE:
+ case CPU_TOPOLOGY_LEVEL_MODULE:
+ num_ids = 1 << apicid_module_offset(topo_info);
+ break;
+ case CPU_TOPOLOGY_LEVEL_DIE:
num_ids = 1 << apicid_die_offset(topo_info);
break;
- case CPU_TOPO_LEVEL_PACKAGE:
+ case CPU_TOPOLOGY_LEVEL_SOCKET:
num_ids = 1 << apicid_pkg_offset(topo_info);
break;
default:
/*
- * Currently there is no use case for SMT and MODULE, so use
+ * Currently there is no use case for THREAD, so use
* assert directly to facilitate debugging.
*/
g_assert_not_reached();
@@ -301,21 +310,19 @@ static void encode_cache_cpuid4(CPUCacheInfo *cache,
}
static uint32_t num_threads_by_topo_level(X86CPUTopoInfo *topo_info,
- enum CPUTopoLevel topo_level)
+ enum CpuTopologyLevel topo_level)
{
switch (topo_level) {
- case CPU_TOPO_LEVEL_SMT:
+ case CPU_TOPOLOGY_LEVEL_THREAD:
return 1;
- case CPU_TOPO_LEVEL_CORE:
+ case CPU_TOPOLOGY_LEVEL_CORE:
return topo_info->threads_per_core;
- case CPU_TOPO_LEVEL_MODULE:
- return topo_info->threads_per_core * topo_info->cores_per_module;
- case CPU_TOPO_LEVEL_DIE:
- return topo_info->threads_per_core * topo_info->cores_per_module *
- topo_info->modules_per_die;
- case CPU_TOPO_LEVEL_PACKAGE:
- return topo_info->threads_per_core * topo_info->cores_per_module *
- topo_info->modules_per_die * topo_info->dies_per_pkg;
+ case CPU_TOPOLOGY_LEVEL_MODULE:
+ return x86_threads_per_module(topo_info);
+ case CPU_TOPOLOGY_LEVEL_DIE:
+ return x86_threads_per_die(topo_info);
+ case CPU_TOPOLOGY_LEVEL_SOCKET:
+ return x86_threads_per_pkg(topo_info);
default:
g_assert_not_reached();
}
@@ -323,18 +330,18 @@ static uint32_t num_threads_by_topo_level(X86CPUTopoInfo *topo_info,
}
static uint32_t apicid_offset_by_topo_level(X86CPUTopoInfo *topo_info,
- enum CPUTopoLevel topo_level)
+ enum CpuTopologyLevel topo_level)
{
switch (topo_level) {
- case CPU_TOPO_LEVEL_SMT:
+ case CPU_TOPOLOGY_LEVEL_THREAD:
return 0;
- case CPU_TOPO_LEVEL_CORE:
+ case CPU_TOPOLOGY_LEVEL_CORE:
return apicid_core_offset(topo_info);
- case CPU_TOPO_LEVEL_MODULE:
+ case CPU_TOPOLOGY_LEVEL_MODULE:
return apicid_module_offset(topo_info);
- case CPU_TOPO_LEVEL_DIE:
+ case CPU_TOPOLOGY_LEVEL_DIE:
return apicid_die_offset(topo_info);
- case CPU_TOPO_LEVEL_PACKAGE:
+ case CPU_TOPOLOGY_LEVEL_SOCKET:
return apicid_pkg_offset(topo_info);
default:
g_assert_not_reached();
@@ -342,18 +349,18 @@ static uint32_t apicid_offset_by_topo_level(X86CPUTopoInfo *topo_info,
return 0;
}
-static uint32_t cpuid1f_topo_type(enum CPUTopoLevel topo_level)
+static uint32_t cpuid1f_topo_type(enum CpuTopologyLevel topo_level)
{
switch (topo_level) {
- case CPU_TOPO_LEVEL_INVALID:
+ case CPU_TOPOLOGY_LEVEL_INVALID:
return CPUID_1F_ECX_TOPO_LEVEL_INVALID;
- case CPU_TOPO_LEVEL_SMT:
+ case CPU_TOPOLOGY_LEVEL_THREAD:
return CPUID_1F_ECX_TOPO_LEVEL_SMT;
- case CPU_TOPO_LEVEL_CORE:
+ case CPU_TOPOLOGY_LEVEL_CORE:
return CPUID_1F_ECX_TOPO_LEVEL_CORE;
- case CPU_TOPO_LEVEL_MODULE:
+ case CPU_TOPOLOGY_LEVEL_MODULE:
return CPUID_1F_ECX_TOPO_LEVEL_MODULE;
- case CPU_TOPO_LEVEL_DIE:
+ case CPU_TOPOLOGY_LEVEL_DIE:
return CPUID_1F_ECX_TOPO_LEVEL_DIE;
default:
/* Other types are not supported in QEMU. */
@@ -368,38 +375,41 @@ static void encode_topo_cpuid1f(CPUX86State *env, uint32_t count,
uint32_t *ecx, uint32_t *edx)
{
X86CPU *cpu = env_archcpu(env);
- unsigned long level, next_level;
+ unsigned long level, base_level, next_level;
uint32_t num_threads_next_level, offset_next_level;
- assert(count + 1 < CPU_TOPO_LEVEL_MAX);
+ assert(count <= CPU_TOPOLOGY_LEVEL_SOCKET);
/*
* Find the No.(count + 1) topology level in avail_cpu_topo bitmap.
- * The search starts from bit 1 (CPU_TOPO_LEVEL_INVALID + 1).
+ * The search starts from bit 0 (CPU_TOPOLOGY_LEVEL_THREAD).
*/
- level = CPU_TOPO_LEVEL_INVALID;
+ level = CPU_TOPOLOGY_LEVEL_THREAD;
+ base_level = level;
for (int i = 0; i <= count; i++) {
level = find_next_bit(env->avail_cpu_topo,
- CPU_TOPO_LEVEL_PACKAGE,
- level + 1);
+ CPU_TOPOLOGY_LEVEL_SOCKET,
+ base_level);
/*
* CPUID[0x1f] doesn't explicitly encode the package level,
* and it just encodes the invalid level (all fields are 0)
* into the last subleaf of 0x1f.
*/
- if (level == CPU_TOPO_LEVEL_PACKAGE) {
- level = CPU_TOPO_LEVEL_INVALID;
+ if (level == CPU_TOPOLOGY_LEVEL_SOCKET) {
+ level = CPU_TOPOLOGY_LEVEL_INVALID;
break;
}
+ /* Search the next level. */
+ base_level = level + 1;
}
- if (level == CPU_TOPO_LEVEL_INVALID) {
+ if (level == CPU_TOPOLOGY_LEVEL_INVALID) {
num_threads_next_level = 0;
offset_next_level = 0;
} else {
next_level = find_next_bit(env->avail_cpu_topo,
- CPU_TOPO_LEVEL_PACKAGE,
+ CPU_TOPOLOGY_LEVEL_SOCKET,
level + 1);
num_threads_next_level = num_threads_by_topo_level(topo_info,
next_level);
@@ -575,7 +585,7 @@ static CPUCacheInfo legacy_l1d_cache = {
.sets = 64,
.partitions = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
};
/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
@@ -590,7 +600,7 @@ static CPUCacheInfo legacy_l1d_cache_amd = {
.partitions = 1,
.lines_per_tag = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
};
/* L1 instruction cache: */
@@ -604,7 +614,7 @@ static CPUCacheInfo legacy_l1i_cache = {
.sets = 64,
.partitions = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
};
/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
@@ -619,7 +629,7 @@ static CPUCacheInfo legacy_l1i_cache_amd = {
.partitions = 1,
.lines_per_tag = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
};
/* Level 2 unified cache: */
@@ -633,7 +643,7 @@ static CPUCacheInfo legacy_l2_cache = {
.sets = 4096,
.partitions = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
};
/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
@@ -643,7 +653,7 @@ static CPUCacheInfo legacy_l2_cache_cpuid2 = {
.size = 2 * MiB,
.line_size = 64,
.associativity = 8,
- .share_level = CPU_TOPO_LEVEL_INVALID,
+ .share_level = CPU_TOPOLOGY_LEVEL_INVALID,
};
@@ -657,7 +667,7 @@ static CPUCacheInfo legacy_l2_cache_amd = {
.associativity = 16,
.sets = 512,
.partitions = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
};
/* Level 3 unified cache: */
@@ -673,7 +683,7 @@ static CPUCacheInfo legacy_l3_cache = {
.self_init = true,
.inclusive = true,
.complex_indexing = true,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
};
/* TLB definitions: */
@@ -767,11 +777,12 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
- CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
+ CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE | \
+ CPUID_HT)
/* partly implemented:
CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
/* missing:
- CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
+ CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_TM, CPUID_PBE */
/*
* Kernel-only features that can be shown to usermode programs even if
@@ -839,7 +850,8 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A | \
- CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_KERNEL_FEATURES)
+ CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_KERNEL_FEATURES | \
+ CPUID_EXT3_CMP_LEG)
#define TCG_EXT4_FEATURES 0
@@ -888,6 +900,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
#define TCG_7_1_EAX_FEATURES (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | \
CPUID_7_1_EAX_FSRC | CPUID_7_1_EAX_CMPCCXADD)
+#define TCG_7_1_ECX_FEATURES 0
#define TCG_7_1_EDX_FEATURES 0
#define TCG_7_2_EDX_FEATURES 0
#define TCG_APM_FEATURES 0
@@ -899,6 +912,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
#define TCG_SGX_12_0_EAX_FEATURES 0
#define TCG_SGX_12_0_EBX_FEATURES 0
#define TCG_SGX_12_1_EAX_FEATURES 0
+#define TCG_24_0_EBX_FEATURES 0
#if defined CONFIG_USER_ONLY
#define CPUID_8000_0008_EBX_KERNEL_FEATURES (CPUID_8000_0008_EBX_IBPB | \
@@ -912,6 +926,17 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
#define TCG_8000_0008_EBX (CPUID_8000_0008_EBX_XSAVEERPTR | \
CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_KERNEL_FEATURES)
+#if defined CONFIG_USER_ONLY
+#define CPUID_8000_0021_EAX_KERNEL_FEATURES CPUID_8000_0021_EAX_AUTO_IBRS
+#else
+#define CPUID_8000_0021_EAX_KERNEL_FEATURES 0
+#endif
+
+#define TCG_8000_0021_EAX_FEATURES ( \
+ CPUID_8000_0021_EAX_NO_NESTED_DATA_BP | \
+ CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE | \
+ CPUID_8000_0021_EAX_KERNEL_FEATURES)
+
FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_1_EDX] = {
.type = CPUID_FEATURE_WORD,
@@ -1054,9 +1079,9 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
"fsgsbase", "tsc-adjust", "sgx", "bmi1",
- "hle", "avx2", NULL, "smep",
+ "hle", "avx2", "fdp-excptn-only", "smep",
"bmi2", "erms", "invpcid", "rtm",
- NULL, NULL, "mpx", NULL,
+ NULL, "zero-fcs-fds", "mpx", NULL,
"avx512f", "avx512dq", "rdseed", "adx",
"smap", "avx512ifma", "pcommit", "clflushopt",
"clwb", "intel-pt", "avx512pf", "avx512er",
@@ -1110,7 +1135,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_7_1_EAX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
- NULL, NULL, NULL, NULL,
+ "sha512", "sm3", "sm4", NULL,
"avx-vnni", "avx512-bf16", NULL, "cmpccxadd",
NULL, NULL, "fzrm", "fsrs",
"fsrc", NULL, NULL, NULL,
@@ -1126,6 +1151,25 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
.tcg_features = TCG_7_1_EAX_FEATURES,
},
+ [FEAT_7_1_ECX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ NULL, "msr-imm", NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = {
+ .eax = 7,
+ .needs_ecx = true, .ecx = 1,
+ .reg = R_ECX,
+ },
+ .tcg_features = TCG_7_1_ECX_FEATURES,
+ },
[FEAT_7_1_EDX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
@@ -1133,7 +1177,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"avx-vnni-int8", "avx-ne-convert", NULL, NULL,
"amx-complex", NULL, "avx-vnni-int16", NULL,
NULL, NULL, "prefetchiti", NULL,
- NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, "avx10",
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
@@ -1148,8 +1192,8 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_7_2_EDX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
- NULL, NULL, NULL, NULL,
- NULL, "mcdt-no", NULL, NULL,
+ "intel-psfd", "ipred-ctrl", "rrsba-ctrl", "ddpd-u",
+ "bhi-ctrl", "mcdt-no", NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
@@ -1164,6 +1208,20 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
.tcg_features = TCG_7_2_EDX_FEATURES,
},
+ [FEAT_24_0_EBX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ [16] = "avx10-128",
+ [17] = "avx10-256",
+ [18] = "avx10-512",
+ },
+ .cpuid = {
+ .eax = 0x24,
+ .needs_ecx = true, .ecx = 0,
+ .reg = R_EBX,
+ },
+ .tcg_features = TCG_24_0_EBX_FEATURES,
+ },
[FEAT_8000_0007_EDX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
@@ -1215,16 +1273,38 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_8000_0021_EAX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
- "no-nested-data-bp", NULL, "lfence-always-serializing", NULL,
+ "no-nested-data-bp", "fs-gs-base-ns", "lfence-always-serializing", NULL,
NULL, NULL, "null-sel-clr-base", NULL,
"auto-ibrs", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
+ "prefetchi", NULL, NULL, NULL,
+ "eraps", NULL, NULL, "sbpb",
+ "ibpb-brtype", "srso-no", "srso-user-kernel-no", NULL,
+ },
+ .cpuid = { .eax = 0x80000021, .reg = R_EAX, },
+ .tcg_features = TCG_8000_0021_EAX_FEATURES,
+ .unmigratable_flags = 0,
+ },
+ [FEAT_8000_0021_EBX] = {
+ .type = CPUID_FEATURE_WORD,
+ .cpuid = { .eax = 0x80000021, .reg = R_EBX, },
+ .tcg_features = 0,
+ .unmigratable_flags = 0,
+ },
+ [FEAT_8000_0022_EAX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ "perfmon-v2", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
- .cpuid = { .eax = 0x80000021, .reg = R_EAX, },
+ .cpuid = { .eax = 0x80000022, .reg = R_EAX, },
.tcg_features = 0,
.unmigratable_flags = 0,
},
@@ -1297,7 +1377,9 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.needs_ecx = true, .ecx = 0,
.reg = R_EAX,
},
- .tcg_features = ~0U,
+ .tcg_features = XSTATE_FP_MASK | XSTATE_SSE_MASK |
+ XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
+ XSTATE_PKRU_MASK,
.migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
@@ -1310,7 +1392,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.needs_ecx = true, .ecx = 0,
.reg = R_EDX,
},
- .tcg_features = ~0U,
+ .tcg_features = 0U,
},
/*Below are MSR exposed features*/
[FEAT_ARCH_CAPABILITIES] = {
@@ -1321,9 +1403,17 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"taa-no", NULL, NULL, NULL,
NULL, "sbdr-ssdp-no", "fbsdp-no", "psdp-no",
NULL, "fb-clear", NULL, NULL,
- NULL, NULL, NULL, NULL,
+ "bhi-no", NULL, NULL, NULL,
"pbrsb-no", NULL, "gds-no", "rfds-no",
"rfds-clear", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, "its-no", NULL,
},
.msr = {
.index = MSR_IA32_ARCH_CAPABILITIES,
@@ -1435,7 +1525,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"vmx-exit-save-efer", "vmx-exit-load-efer",
"vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs",
NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL,
- NULL, "vmx-exit-load-pkrs", NULL, NULL,
+ NULL, "vmx-exit-load-pkrs", NULL, "vmx-exit-secondary-ctls",
},
.msr = {
.index = MSR_IA32_VMX_TRUE_EXIT_CTLS,
@@ -1450,7 +1540,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
NULL, "vmx-entry-ia32e-mode", NULL, NULL,
NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer",
"vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL,
- NULL, NULL, "vmx-entry-load-pkrs", NULL,
+ NULL, NULL, "vmx-entry-load-pkrs", "vmx-entry-load-fred",
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
@@ -1608,14 +1698,21 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
};
-typedef struct FeatureMask {
- FeatureWord index;
- uint64_t mask;
-} FeatureMask;
+bool is_feature_word_cpuid(uint32_t feature, uint32_t index, int reg)
+{
+ FeatureWordInfo *wi;
+ FeatureWord w;
-typedef struct FeatureDep {
- FeatureMask from, to;
-} FeatureDep;
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ wi = &feature_word_info[w];
+ if (wi->type == CPUID_FEATURE_WORD && wi->cpuid.eax == feature &&
+ (!wi->cpuid.needs_ecx || wi->cpuid.ecx == index) &&
+ wi->cpuid.reg == reg) {
+ return true;
+ }
+ }
+ return false;
+}
static FeatureDep feature_dependencies[] = {
{
@@ -1727,8 +1824,36 @@ static FeatureDep feature_dependencies[] = {
.to = { FEAT_7_1_EAX, CPUID_7_1_EAX_FRED },
},
{
- .from = { FEAT_7_1_EAX, CPUID_7_1_EAX_WRMSRNS },
- .to = { FEAT_7_1_EAX, CPUID_7_1_EAX_FRED },
+ .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_SGX },
+ .to = { FEAT_7_0_ECX, CPUID_7_0_ECX_SGX_LC },
+ },
+ {
+ .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_SGX },
+ .to = { FEAT_SGX_12_0_EAX, ~0ull },
+ },
+ {
+ .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_SGX },
+ .to = { FEAT_SGX_12_0_EBX, ~0ull },
+ },
+ {
+ .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_SGX },
+ .to = { FEAT_SGX_12_1_EAX, ~0ull },
+ },
+ {
+ .from = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_128 },
+ .to = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_256 },
+ },
+ {
+ .from = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_256 },
+ .to = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_512 },
+ },
+ {
+ .from = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_VL_MASK },
+ .to = { FEAT_7_1_EDX, CPUID_7_1_EDX_AVX10 },
+ },
+ {
+ .from = { FEAT_7_1_EDX, CPUID_7_1_EDX_AVX10 },
+ .to = { FEAT_24_0_EBX, ~0ull },
},
};
@@ -1753,9 +1878,6 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
};
#undef REGISTER
-/* CPUID feature bits available in XSS */
-#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK)
-
ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
[XSTATE_FP_BIT] = {
/* x87 FP state component is always enabled if XSAVE is supported */
@@ -1849,9 +1971,10 @@ static inline uint64_t x86_cpu_xsave_xss_components(X86CPU *cpu)
* Returns the set of feature flags that are supported and migratable by
* QEMU, for a given FeatureWord.
*/
-static uint64_t x86_cpu_get_migratable_flags(FeatureWord w)
+static uint64_t x86_cpu_get_migratable_flags(X86CPU *cpu, FeatureWord w)
{
FeatureWordInfo *wi = &feature_word_info[w];
+ CPUX86State *env = &cpu->env;
uint64_t r = 0;
int i;
@@ -1865,6 +1988,12 @@ static uint64_t x86_cpu_get_migratable_flags(FeatureWord w)
r |= f;
}
}
+
+ /* when tsc-khz is set explicitly, invtsc is migratable */
+ if ((w == FEAT_8000_0007_EDX) && env->user_tsc_khz) {
+ r |= CPUID_APM_INVTSC;
+ }
+
return r;
}
@@ -1943,6 +2072,7 @@ typedef struct X86CPUDefinition {
int family;
int model;
int stepping;
+ uint8_t avx10_version;
FeatureWordArray features;
const char *model_id;
const CPUCaches *const cache_info;
@@ -2001,7 +2131,7 @@ static const CPUCaches epyc_cache_info = {
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l1i_cache = &(CPUCacheInfo) {
.type = INSTRUCTION_CACHE,
@@ -2014,7 +2144,7 @@ static const CPUCaches epyc_cache_info = {
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
@@ -2025,7 +2155,7 @@ static const CPUCaches epyc_cache_info = {
.partitions = 1,
.sets = 1024,
.lines_per_tag = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
@@ -2039,7 +2169,7 @@ static const CPUCaches epyc_cache_info = {
.self_init = true,
.inclusive = true,
.complex_indexing = true,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
},
};
@@ -2055,7 +2185,7 @@ static CPUCaches epyc_v4_cache_info = {
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l1i_cache = &(CPUCacheInfo) {
.type = INSTRUCTION_CACHE,
@@ -2068,7 +2198,7 @@ static CPUCaches epyc_v4_cache_info = {
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
@@ -2079,7 +2209,7 @@ static CPUCaches epyc_v4_cache_info = {
.partitions = 1,
.sets = 1024,
.lines_per_tag = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
@@ -2093,7 +2223,61 @@ static CPUCaches epyc_v4_cache_info = {
.self_init = true,
.inclusive = true,
.complex_indexing = false,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
+static CPUCaches epyc_v5_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 64 * KiB,
+ .line_size = 64,
+ .associativity = 4,
+ .partitions = 1,
+ .sets = 256,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 8 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 8192,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
},
};
@@ -2109,7 +2293,7 @@ static const CPUCaches epyc_rome_cache_info = {
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l1i_cache = &(CPUCacheInfo) {
.type = INSTRUCTION_CACHE,
@@ -2122,7 +2306,7 @@ static const CPUCaches epyc_rome_cache_info = {
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
@@ -2133,7 +2317,7 @@ static const CPUCaches epyc_rome_cache_info = {
.partitions = 1,
.sets = 1024,
.lines_per_tag = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
@@ -2147,7 +2331,7 @@ static const CPUCaches epyc_rome_cache_info = {
.self_init = true,
.inclusive = true,
.complex_indexing = true,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
},
};
@@ -2163,7 +2347,7 @@ static const CPUCaches epyc_rome_v3_cache_info = {
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l1i_cache = &(CPUCacheInfo) {
.type = INSTRUCTION_CACHE,
@@ -2176,7 +2360,7 @@ static const CPUCaches epyc_rome_v3_cache_info = {
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
@@ -2187,7 +2371,7 @@ static const CPUCaches epyc_rome_v3_cache_info = {
.partitions = 1,
.sets = 1024,
.lines_per_tag = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
@@ -2201,7 +2385,61 @@ static const CPUCaches epyc_rome_v3_cache_info = {
.self_init = true,
.inclusive = true,
.complex_indexing = false,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
+static const CPUCaches epyc_rome_v5_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 16 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 16384,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
},
};
@@ -2217,7 +2455,7 @@ static const CPUCaches epyc_milan_cache_info = {
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l1i_cache = &(CPUCacheInfo) {
.type = INSTRUCTION_CACHE,
@@ -2230,7 +2468,7 @@ static const CPUCaches epyc_milan_cache_info = {
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
@@ -2241,7 +2479,7 @@ static const CPUCaches epyc_milan_cache_info = {
.partitions = 1,
.sets = 1024,
.lines_per_tag = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
@@ -2255,7 +2493,7 @@ static const CPUCaches epyc_milan_cache_info = {
.self_init = true,
.inclusive = true,
.complex_indexing = true,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
},
};
@@ -2271,7 +2509,7 @@ static const CPUCaches epyc_milan_v2_cache_info = {
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l1i_cache = &(CPUCacheInfo) {
.type = INSTRUCTION_CACHE,
@@ -2284,7 +2522,7 @@ static const CPUCaches epyc_milan_v2_cache_info = {
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
@@ -2295,7 +2533,7 @@ static const CPUCaches epyc_milan_v2_cache_info = {
.partitions = 1,
.sets = 1024,
.lines_per_tag = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
@@ -2309,7 +2547,61 @@ static const CPUCaches epyc_milan_v2_cache_info = {
.self_init = true,
.inclusive = true,
.complex_indexing = false,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
+static const CPUCaches epyc_milan_v3_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 32 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 32768,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
},
};
@@ -2325,7 +2617,7 @@ static const CPUCaches epyc_genoa_cache_info = {
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l1i_cache = &(CPUCacheInfo) {
.type = INSTRUCTION_CACHE,
@@ -2338,7 +2630,59 @@ static const CPUCaches epyc_genoa_cache_info = {
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 1 * MiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 2048,
+ .lines_per_tag = 1,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 32 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 32768,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
+static const CPUCaches epyc_genoa_v2_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
@@ -2349,7 +2693,9 @@ static const CPUCaches epyc_genoa_cache_info = {
.partitions = 1,
.sets = 2048,
.lines_per_tag = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
@@ -2361,9 +2707,63 @@ static const CPUCaches epyc_genoa_cache_info = {
.sets = 32768,
.lines_per_tag = 1,
.self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
+static const CPUCaches epyc_turin_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 48 * KiB,
+ .line_size = 64,
+ .associativity = 12,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 1 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
.inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 32 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 32768,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
.complex_indexing = false,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
},
};
@@ -3607,6 +4007,7 @@ static const X86CPUDefinition builtin_x86_defs[] = {
},
{
.version = 4,
+ .note = "IBRS, EPT switching, no TSX",
.props = (PropValue[]) {
{ "vmx-eptp-switching", "on" },
{ /* end of list */ }
@@ -3741,7 +4142,7 @@ static const X86CPUDefinition builtin_x86_defs[] = {
},
},
{ .version = 4,
- .note = "ARCH_CAPABILITIES, no TSX",
+ .note = "ARCH_CAPABILITIES, EPT switching, no TSX",
.props = (PropValue[]) {
{ "vmx-eptp-switching", "on" },
{ /* end of list */ }
@@ -4322,6 +4723,23 @@ static const X86CPUDefinition builtin_x86_defs[] = {
.model_id = "Intel Xeon Processor (GraniteRapids)",
.versions = (X86CPUVersionDefinition[]) {
{ .version = 1 },
+ {
+ .version = 2,
+ .props = (PropValue[]) {
+ { "ss", "on" },
+ { "tsc-adjust", "on" },
+ { "cldemote", "on" },
+ { "movdiri", "on" },
+ { "movdir64b", "on" },
+ { "avx10", "on" },
+ { "avx10-128", "on" },
+ { "avx10-256", "on" },
+ { "avx10-512", "on" },
+ { "avx10-version", "1" },
+ { "stepping", "1" },
+ { /* end of list */ }
+ }
+ },
{ /* end of list */ },
},
},
@@ -4448,6 +4866,160 @@ static const X86CPUDefinition builtin_x86_defs[] = {
.model_id = "Intel Xeon Processor (SierraForest)",
.versions = (X86CPUVersionDefinition[]) {
{ .version = 1 },
+ {
+ .version = 2,
+ .props = (PropValue[]) {
+ { "ss", "on" },
+ { "tsc-adjust", "on" },
+ { "cldemote", "on" },
+ { "movdiri", "on" },
+ { "movdir64b", "on" },
+ { "gds-no", "on" },
+ { "rfds-no", "on" },
+ { "lam", "on" },
+ { "intel-psfd", "on"},
+ { "ipred-ctrl", "on"},
+ { "rrsba-ctrl", "on"},
+ { "bhi-ctrl", "on"},
+ { "stepping", "3" },
+ { /* end of list */ }
+ }
+ },
+ { /* end of list */ },
+ },
+ },
+ {
+ .name = "ClearwaterForest",
+ .level = 0x23,
+ .xlevel = 0x80000008,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 221,
+ .stepping = 0,
+ /*
+ * please keep the ascending order so that we can have a clear view of
+ * bit position of each feature.
+ */
+ .features[FEAT_1_EDX] =
+ CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
+ CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
+ CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
+ CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR |
+ CPUID_SSE | CPUID_SSE2 | CPUID_SS,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 |
+ CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID | CPUID_EXT_SSE41 |
+ CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
+ CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES |
+ CPUID_EXT_XSAVE | CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_RDTSCP | CPUID_EXT2_LM,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_8000_0008_EBX] =
+ CPUID_8000_0008_EBX_WBNOINVD,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_TSC_ADJUST |
+ CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP |
+ CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB |
+ CPUID_7_0_EBX_SHA_NI,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_GFNI |
+ CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
+ CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_BUS_LOCK_DETECT |
+ CPUID_7_0_ECX_CLDEMOTE | CPUID_7_0_ECX_MOVDIRI |
+ CPUID_7_0_ECX_MOVDIR64B,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_SERIALIZE |
+ CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES |
+ CPUID_7_0_EDX_SPEC_CTRL_SSBD,
+ .features[FEAT_ARCH_CAPABILITIES] =
+ MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL |
+ MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO |
+ MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_SBDR_SSDP_NO |
+ MSR_ARCH_CAP_FBSDP_NO | MSR_ARCH_CAP_PSDP_NO |
+ MSR_ARCH_CAP_BHI_NO | MSR_ARCH_CAP_PBRSB_NO |
+ MSR_ARCH_CAP_GDS_NO | MSR_ARCH_CAP_RFDS_NO,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_7_1_EAX] =
+ CPUID_7_1_EAX_SHA512 | CPUID_7_1_EAX_SM3 | CPUID_7_1_EAX_SM4 |
+ CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_CMPCCXADD |
+ CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_AVX_IFMA |
+ CPUID_7_1_EAX_LAM,
+ .features[FEAT_7_1_EDX] =
+ CPUID_7_1_EDX_AVX_VNNI_INT8 | CPUID_7_1_EDX_AVX_NE_CONVERT |
+ CPUID_7_1_EDX_AVX_VNNI_INT16 | CPUID_7_1_EDX_PREFETCHITI,
+ .features[FEAT_7_2_EDX] =
+ CPUID_7_2_EDX_PSFD | CPUID_7_2_EDX_IPRED_CTRL |
+ CPUID_7_2_EDX_RRSBA_CTRL | CPUID_7_2_EDX_DDPD_U |
+ CPUID_7_2_EDX_BHI_CTRL | CPUID_7_2_EDX_MCDT_NO,
+ .features[FEAT_VMX_BASIC] =
+ MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] =
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] =
+ MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 |
+ MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB |
+ MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_AD_BITS |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT |
+ MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_IA32_PAT |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] =
+ MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_VMWRITE_VMEXIT,
+ .features[FEAT_VMX_PINBASED_CTLS] =
+ VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING |
+ VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER |
+ VMX_PIN_BASED_POSTED_INTR,
+ .features[FEAT_VMX_PROCBASED_CTLS] =
+ VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_VIRTUAL_NMI_PENDING |
+ VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
+ VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
+ VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC |
+ VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_WBINVD_EXITING |
+ VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ VMX_SECONDARY_EXEC_RDRAND_EXITING |
+ VMX_SECONDARY_EXEC_ENABLE_INVPCID |
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
+ VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML |
+ VMX_SECONDARY_EXEC_XSAVES,
+ .features[FEAT_VMX_VMFUNC] =
+ MSR_VMX_VMFUNC_EPT_SWITCHING,
+ .model_id = "Intel Xeon Processor (ClearwaterForest)",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
{ /* end of list */ },
},
},
@@ -4952,6 +5524,25 @@ static const X86CPUDefinition builtin_x86_defs[] = {
},
.cache_info = &epyc_v4_cache_info
},
+ {
+ .version = 5,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "model-id",
+ "AMD EPYC-v5 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_v5_cache_info
+ },
{ /* end of list */ }
}
},
@@ -5090,6 +5681,25 @@ static const X86CPUDefinition builtin_x86_defs[] = {
{ /* end of list */ }
},
},
+ {
+ .version = 5,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "model-id",
+ "AMD EPYC-Rome-v5 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_rome_v5_cache_info
+ },
{ /* end of list */ }
}
},
@@ -5165,6 +5775,25 @@ static const X86CPUDefinition builtin_x86_defs[] = {
},
.cache_info = &epyc_milan_v2_cache_info
},
+ {
+ .version = 3,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "model-id",
+ "AMD EPYC-Milan-v3 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_milan_v3_cache_info
+ },
{ /* end of list */ }
}
},
@@ -5204,7 +5833,7 @@ static const X86CPUDefinition builtin_x86_defs[] = {
CPUID_8000_0008_EBX_STIBP_ALWAYS_ON |
CPUID_8000_0008_EBX_AMD_SSBD | CPUID_8000_0008_EBX_AMD_PSFD,
.features[FEAT_8000_0021_EAX] =
- CPUID_8000_0021_EAX_No_NESTED_DATA_BP |
+ CPUID_8000_0021_EAX_NO_NESTED_DATA_BP |
CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING |
CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE |
CPUID_8000_0021_EAX_AUTO_IBRS,
@@ -5239,6 +5868,250 @@ static const X86CPUDefinition builtin_x86_defs[] = {
.xlevel = 0x80000022,
.model_id = "AMD EPYC-Genoa Processor",
.cache_info = &epyc_genoa_cache_info,
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "fs-gs-base-ns", "on" },
+ { "perfmon-v2", "on" },
+ { "model-id",
+ "AMD EPYC-Genoa-v2 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_genoa_v2_cache_info
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "YongFeng",
+ .level = 0x1F,
+ .vendor = CPUID_VENDOR_ZHAOXIN1,
+ .family = 7,
+ .model = 11,
+ .stepping = 3,
+ /* missing: CPUID_HT, CPUID_TM, CPUID_PBE */
+ .features[FEAT_1_EDX] =
+ CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_ACPI | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
+ CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
+ CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
+ CPUID_PSE | CPUID_DE | CPUID_VME | CPUID_FP87,
+ /*
+ * missing: CPUID_EXT_OSXSAVE, CPUID_EXT_XTPR, CPUID_EXT_TM2,
+ * CPUID_EXT_EST, CPUID_EXT_SMX, CPUID_EXT_VMX
+ */
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
+ CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_TSC_DEADLINE_TIMER |
+ CPUID_EXT_POPCNT | CPUID_EXT_MOVBE | CPUID_EXT_X2APIC |
+ CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_PCID |
+ CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
+ CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_BMI2 |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_FSGSBASE,
+ /* missing: CPUID_7_0_ECX_OSPKE */
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_UMIP,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
+ .features[FEAT_8000_0007_EDX] = CPUID_APM_INVTSC,
+ /*
+ * TODO: When the Linux kernel introduces other existing definitions
+ * for this leaf, remember to update the definitions here.
+ */
+ .features[FEAT_C000_0001_EDX] =
+ CPUID_C000_0001_EDX_PMM_EN | CPUID_C000_0001_EDX_PMM |
+ CPUID_C000_0001_EDX_PHE_EN | CPUID_C000_0001_EDX_PHE |
+ CPUID_C000_0001_EDX_ACE2 |
+ CPUID_C000_0001_EDX_XCRYPT_EN | CPUID_C000_0001_EDX_XCRYPT |
+ CPUID_C000_0001_EDX_XSTORE_EN | CPUID_C000_0001_EDX_XSTORE,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
+ .features[FEAT_ARCH_CAPABILITIES] =
+ MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY |
+ MSR_ARCH_CAP_MDS_NO | MSR_ARCH_CAP_PSCHANGE_MC_NO |
+ MSR_ARCH_CAP_SSB_NO,
+ .features[FEAT_VMX_PROCBASED_CTLS] =
+ VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_INVLPG_EXITING |
+ VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING |
+ VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR3_LOAD_EXITING |
+ VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING |
+ VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ /*
+ * missing: VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING,
+ * VMX_SECONDARY_EXEC_TSC_SCALING
+ */
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC |
+ VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_ENABLE_VPID |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING |
+ VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ VMX_SECONDARY_EXEC_RDRAND_EXITING |
+ VMX_SECONDARY_EXEC_ENABLE_INVPCID |
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC |
+ VMX_SECONDARY_EXEC_SHADOW_VMCS |
+ VMX_SECONDARY_EXEC_ENABLE_PML,
+ .features[FEAT_VMX_PINBASED_CTLS] =
+ VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING |
+ VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER |
+ VMX_PIN_BASED_POSTED_INTR,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_IA32_PAT |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ /* missing: VMX_VM_ENTRY_SMM, VMX_VM_ENTRY_DEACT_DUAL_MONITOR */
+ .features[FEAT_VMX_ENTRY_CTLS] =
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ /*
+ * missing: MSR_VMX_MISC_ACTIVITY_SHUTDOWN,
+ * MSR_VMX_MISC_ACTIVITY_WAIT_SIPI
+ */
+ .features[FEAT_VMX_MISC] =
+ MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_VMWRITE_VMEXIT,
+ /* missing: MSR_VMX_EPT_UC */
+ .features[FEAT_VMX_EPT_VPID_CAPS] =
+ MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 |
+ MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB |
+ MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_AD_BITS |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID |
+ MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
+ .features[FEAT_VMX_BASIC] =
+ MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
+ .xlevel = 0x80000008,
+ .model_id = "Zhaoxin YongFeng Processor",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .note = "with the correct model number",
+ .props = (PropValue[]) {
+ { "model", "0x5b" },
+ { /* end of list */ }
+ }
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "EPYC-Turin",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 26,
+ .model = 0,
+ .stepping = 0,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
+ CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
+ CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_PCID | CPUID_EXT_CX16 | CPUID_EXT_FMA |
+ CPUID_EXT_SSSE3 | CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3,
+ .features[FEAT_1_EDX] =
+ CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
+ CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
+ CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
+ CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
+ CPUID_VME | CPUID_FP87,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
+ CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_AVX512F |
+ CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_AVX512IFMA |
+ CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB |
+ CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_SHA_NI |
+ CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
+ CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
+ CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
+ CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
+ CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57 |
+ CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_MOVDIRI |
+ CPUID_7_0_ECX_MOVDIR64B,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_AVX512_VP2INTERSECT,
+ .features[FEAT_7_1_EAX] =
+ CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_AVX512_BF16,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
+ CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
+ CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
+ CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0007_EBX] =
+ CPUID_8000_0007_EBX_OVERFLOW_RECOV | CPUID_8000_0007_EBX_SUCCOR,
+ .features[FEAT_8000_0008_EBX] =
+ CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR |
+ CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB |
+ CPUID_8000_0008_EBX_IBRS | CPUID_8000_0008_EBX_STIBP |
+ CPUID_8000_0008_EBX_STIBP_ALWAYS_ON |
+ CPUID_8000_0008_EBX_AMD_SSBD | CPUID_8000_0008_EBX_AMD_PSFD,
+ .features[FEAT_8000_0021_EAX] =
+ CPUID_8000_0021_EAX_NO_NESTED_DATA_BP |
+ CPUID_8000_0021_EAX_FS_GS_BASE_NS |
+ CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING |
+ CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE |
+ CPUID_8000_0021_EAX_AUTO_IBRS | CPUID_8000_0021_EAX_PREFETCHI |
+ CPUID_8000_0021_EAX_SBPB | CPUID_8000_0021_EAX_IBPB_BRTYPE |
+ CPUID_8000_0021_EAX_SRSO_USER_KERNEL_NO,
+ .features[FEAT_8000_0022_EAX] =
+ CPUID_8000_0022_EAX_PERFMON_V2,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
+ .features[FEAT_SVM] =
+ CPUID_SVM_NPT | CPUID_SVM_LBRV | CPUID_SVM_NRIPSAVE |
+ CPUID_SVM_TSCSCALE | CPUID_SVM_VMCBCLEAN | CPUID_SVM_FLUSHASID |
+ CPUID_SVM_PAUSEFILTER | CPUID_SVM_PFTHRESHOLD |
+ CPUID_SVM_V_VMSAVE_VMLOAD | CPUID_SVM_VGIF |
+ CPUID_SVM_VNMI | CPUID_SVM_SVME_ADDR_CHK,
+ .xlevel = 0x80000022,
+ .model_id = "AMD EPYC-Turin Processor",
+ .cache_info = &epyc_turin_cache_info,
},
};
@@ -5283,10 +6156,9 @@ static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model)
return v;
}
-static Property max_x86_cpu_properties[] = {
+static const Property max_x86_cpu_properties[] = {
DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
- DEFINE_PROP_END_OF_LIST()
};
static void max_x86_cpu_realize(DeviceState *dev, Error **errp)
@@ -5308,7 +6180,7 @@ static void max_x86_cpu_realize(DeviceState *dev, Error **errp)
x86_cpu_realizefn(dev, errp);
}
-static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
+static void max_x86_cpu_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
X86CPUClass *xcc = X86_CPU_CLASS(oc);
@@ -5350,7 +6222,7 @@ static const TypeInfo max_x86_cpu_type_info = {
.class_init = max_x86_cpu_class_init,
};
-static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
+static char *feature_word_description(FeatureWordInfo *f)
{
assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
@@ -5359,11 +6231,15 @@ static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
{
const char *reg = get_register_name_32(f->cpuid.reg);
assert(reg);
- return g_strdup_printf("CPUID.%02XH:%s",
- f->cpuid.eax, reg);
+ if (!f->cpuid.needs_ecx) {
+ return g_strdup_printf("CPUID[eax=%02Xh].%s", f->cpuid.eax, reg);
+ } else {
+ return g_strdup_printf("CPUID[eax=%02Xh,ecx=%02Xh].%s",
+ f->cpuid.eax, f->cpuid.ecx, reg);
+ }
}
case MSR_FEATURE_WORD:
- return g_strdup_printf("MSR(%02XH)",
+ return g_strdup_printf("MSR(%02Xh)",
f->msr.index);
}
@@ -5383,12 +6259,13 @@ static bool x86_cpu_have_filtered_features(X86CPU *cpu)
return false;
}
-static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
- const char *verbose_prefix)
+void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix)
{
CPUX86State *env = &cpu->env;
FeatureWordInfo *f = &feature_word_info[w];
int i;
+ g_autofree char *feat_word_str = feature_word_description(f);
if (!cpu->force_features) {
env->features[w] &= ~mask;
@@ -5401,7 +6278,35 @@ static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
for (i = 0; i < 64; ++i) {
if ((1ULL << i) & mask) {
- g_autofree char *feat_word_str = feature_word_description(f, i);
+ warn_report("%s: %s%s%s [bit %d]",
+ verbose_prefix,
+ feat_word_str,
+ f->feat_names[i] ? "." : "",
+ f->feat_names[i] ? f->feat_names[i] : "", i);
+ }
+ }
+}
+
+void mark_forced_on_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix)
+{
+ CPUX86State *env = &cpu->env;
+ FeatureWordInfo *f = &feature_word_info[w];
+ int i;
+
+ if (!cpu->force_features) {
+ env->features[w] |= mask;
+ }
+
+ cpu->forced_on_features[w] |= mask;
+
+ if (!verbose_prefix) {
+ return;
+ }
+
+ for (i = 0; i < 64; ++i) {
+ if ((1ULL << i) & mask) {
+ g_autofree char *feat_word_str = feature_word_description(f);
warn_report("%s: %s%s%s [bit %d]",
verbose_prefix,
feat_word_str,
@@ -5417,13 +6322,13 @@ static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
{
X86CPU *cpu = X86_CPU(obj);
CPUX86State *env = &cpu->env;
- int64_t value;
+ uint64_t value;
value = (env->cpuid_version >> 8) & 0xf;
if (value == 0xf) {
value += (env->cpuid_version >> 20) & 0xff;
}
- visit_type_int(v, name, &value, errp);
+ visit_type_uint64(v, name, &value, errp);
}
static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
@@ -5432,16 +6337,15 @@ static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
{
X86CPU *cpu = X86_CPU(obj);
CPUX86State *env = &cpu->env;
- const int64_t min = 0;
- const int64_t max = 0xff + 0xf;
- int64_t value;
+ const uint64_t max = 0xff + 0xf;
+ uint64_t value;
- if (!visit_type_int(v, name, &value, errp)) {
+ if (!visit_type_uint64(v, name, &value, errp)) {
return;
}
- if (value < min || value > max) {
- error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
- name ? name : "null", value, min, max);
+ if (value > max) {
+ error_setg(errp, "parameter '%s' can be at most %" PRIu64,
+ name ? name : "null", max);
return;
}
@@ -5459,11 +6363,11 @@ static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
{
X86CPU *cpu = X86_CPU(obj);
CPUX86State *env = &cpu->env;
- int64_t value;
+ uint64_t value;
value = (env->cpuid_version >> 4) & 0xf;
value |= ((env->cpuid_version >> 16) & 0xf) << 4;
- visit_type_int(v, name, &value, errp);
+ visit_type_uint64(v, name, &value, errp);
}
static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
@@ -5472,16 +6376,15 @@ static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
{
X86CPU *cpu = X86_CPU(obj);
CPUX86State *env = &cpu->env;
- const int64_t min = 0;
- const int64_t max = 0xff;
- int64_t value;
+ const uint64_t max = 0xff;
+ uint64_t value;
- if (!visit_type_int(v, name, &value, errp)) {
+ if (!visit_type_uint64(v, name, &value, errp)) {
return;
}
- if (value < min || value > max) {
- error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
- name ? name : "null", value, min, max);
+ if (value > max) {
+ error_setg(errp, "parameter '%s' can be at most %" PRIu64,
+ name ? name : "null", max);
return;
}
@@ -5495,10 +6398,10 @@ static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
{
X86CPU *cpu = X86_CPU(obj);
CPUX86State *env = &cpu->env;
- int64_t value;
+ uint64_t value;
value = env->cpuid_version & 0xf;
- visit_type_int(v, name, &value, errp);
+ visit_type_uint64(v, name, &value, errp);
}
static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
@@ -5507,16 +6410,15 @@ static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
{
X86CPU *cpu = X86_CPU(obj);
CPUX86State *env = &cpu->env;
- const int64_t min = 0;
- const int64_t max = 0xf;
- int64_t value;
+ const uint64_t max = 0xf;
+ uint64_t value;
- if (!visit_type_int(v, name, &value, errp)) {
+ if (!visit_type_uint64(v, name, &value, errp)) {
return;
}
- if (value < min || value > max) {
- error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
- name ? name : "null", value, min, max);
+ if (value > max) {
+ error_setg(errp, "parameter '%s' can be at most %" PRIu64,
+ name ? name : "null", max);
return;
}
@@ -5610,16 +6512,15 @@ static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
- const int64_t min = 0;
const int64_t max = INT64_MAX;
int64_t value;
if (!visit_type_int(v, name, &value, errp)) {
return;
}
- if (value < min || value > max) {
- error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
- name ? name : "null", value, min, max);
+ if (value < 0 || value > max) {
+ error_setg(errp, "parameter '%s' can be at most %" PRId64,
+ name ? name : "null", max);
return;
}
@@ -5798,7 +6699,7 @@ static void x86_cpu_parse_featurestr(const char *typename, char *features,
}
}
-static void x86_cpu_filter_features(X86CPU *cpu, bool verbose);
+static bool x86_cpu_filter_features(X86CPU *cpu, bool verbose);
/* Build a list with the name of all features on a feature word array */
static void x86_cpu_list_feature_names(FeatureWordArray features,
@@ -5849,7 +6750,7 @@ static void listflags(GList *features)
}
/* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
-static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
+static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b, gpointer d)
{
ObjectClass *class_a = (ObjectClass *)a;
ObjectClass *class_b = (ObjectClass *)b;
@@ -5870,7 +6771,7 @@ static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
static GSList *get_sorted_cpu_model_list(void)
{
GSList *list = object_class_get_list(TYPE_X86_CPU, false);
- list = g_slist_sort(list, x86_cpu_list_compare);
+ list = g_slist_sort_with_data(list, x86_cpu_list_compare, NULL);
return list;
}
@@ -5916,7 +6817,7 @@ static void x86_cpu_list_entry(gpointer data, gpointer user_data)
desc = g_strdup_printf("%s [%s]", model_id, cc->model->note);
}
if (!desc) {
- desc = g_strdup_printf("%s", model_id);
+ desc = g_strdup(model_id);
}
if (cc->model && cc->model->cpudef->deprecation_note) {
@@ -5927,8 +6828,13 @@ static void x86_cpu_list_entry(gpointer data, gpointer user_data)
qemu_printf(" %-20s %s\n", name, desc);
}
+static gint strcmp_wrap(gconstpointer a, gconstpointer b, gpointer d)
+{
+ return strcmp(a, b);
+}
+
/* list available CPU models and flags */
-void x86_cpu_list(void)
+static void x86_cpu_list(void)
{
int i, j;
GSList *list;
@@ -5949,7 +6855,7 @@ void x86_cpu_list(void)
}
}
- names = g_list_sort(names, (GCompareFunc)strcmp);
+ names = g_list_sort_with_data(names, strcmp_wrap, NULL);
qemu_printf("\nRecognized CPUID flags:\n");
listflags(names);
@@ -6039,7 +6945,7 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w)
{
FeatureWordInfo *wi = &feature_word_info[w];
uint64_t r = 0;
- uint32_t unavail = 0;
+ uint64_t unavail = 0;
if (kvm_enabled()) {
switch (wi->type) {
@@ -6087,13 +6993,28 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w)
}
break;
+ case FEAT_7_0_EBX:
+#ifndef CONFIG_USER_ONLY
+ if (!check_sgx_support()) {
+ unavail = CPUID_7_0_EBX_SGX;
+ }
+#endif
+ break;
+ case FEAT_7_0_ECX:
+#ifndef CONFIG_USER_ONLY
+ if (!check_sgx_support()) {
+ unavail = CPUID_7_0_ECX_SGX_LC;
+ }
+#endif
+ break;
+
default:
break;
}
r &= ~unavail;
if (cpu && cpu->migratable) {
- r &= x86_cpu_get_migratable_flags(w);
+ r &= x86_cpu_get_migratable_flags(cpu, w);
}
return r;
}
@@ -6171,7 +7092,7 @@ void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
* Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
*/
-static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
+static void x86_cpu_apply_version_props(X86CPU *cpu, const X86CPUModel *model)
{
const X86CPUVersionDefinition *vdef;
X86CPUVersion version = x86_cpu_model_resolve_version(model);
@@ -6200,7 +7121,7 @@ static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
}
static const CPUCaches *x86_cpu_get_versioned_cache_info(X86CPU *cpu,
- X86CPUModel *model)
+ const X86CPUModel *model)
{
const X86CPUVersionDefinition *vdef;
X86CPUVersion version = x86_cpu_model_resolve_version(model);
@@ -6228,7 +7149,7 @@ static const CPUCaches *x86_cpu_get_versioned_cache_info(X86CPU *cpu,
* Load data from X86CPUDefinition into a X86CPU object.
* Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
*/
-static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model)
+static void x86_cpu_load_model(X86CPU *cpu, const X86CPUModel *model)
{
const X86CPUDefinition *def = model->cpudef;
CPUX86State *env = &cpu->env;
@@ -6274,6 +7195,9 @@ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model)
*/
object_property_set_str(OBJECT(cpu), "vendor", def->vendor, &error_abort);
+ object_property_set_uint(OBJECT(cpu), "avx10-version", def->avx10_version,
+ &error_abort);
+
x86_cpu_apply_version_props(cpu, model);
/*
@@ -6293,9 +7217,9 @@ static const gchar *x86_gdb_arch_name(CPUState *cs)
#endif
}
-static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
+static void x86_cpu_cpudef_class_init(ObjectClass *oc, const void *data)
{
- X86CPUModel *model = data;
+ const X86CPUModel *model = data;
X86CPUClass *xcc = X86_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
@@ -6314,7 +7238,7 @@ static void x86_register_cpu_model_type(const char *name, X86CPUModel *model)
.class_data = model,
};
- type_register(&ti);
+ type_register_static(&ti);
}
@@ -6382,18 +7306,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
CPUState *cs = env_cpu(env);
uint32_t limit;
uint32_t signature[3];
- X86CPUTopoInfo topo_info;
- uint32_t cores_per_pkg;
+ X86CPUTopoInfo *topo_info = &env->topo_info;
uint32_t threads_per_pkg;
- topo_info.dies_per_pkg = env->nr_dies;
- topo_info.modules_per_die = env->nr_modules;
- topo_info.cores_per_module = cs->nr_cores / env->nr_dies / env->nr_modules;
- topo_info.threads_per_core = cs->nr_threads;
-
- cores_per_pkg = topo_info.cores_per_module * topo_info.modules_per_die *
- topo_info.dies_per_pkg;
- threads_per_pkg = cores_per_pkg * topo_info.threads_per_core;
+ threads_per_pkg = x86_threads_per_pkg(topo_info);
/* Calculate & apply limits for different index ranges */
if (index >= 0xC0000000) {
@@ -6432,10 +7348,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*edx = env->features[FEAT_1_EDX];
if (threads_per_pkg > 1) {
*ebx |= threads_per_pkg << 16;
- *edx |= CPUID_HT;
- }
- if (!cpu->enable_pmu) {
- *ecx &= ~CPUID_EXT_PDCM;
}
break;
case 2:
@@ -6470,13 +7382,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14);
*eax &= ~0xFC000000;
- *eax |= max_core_ids_in_package(&topo_info) << 26;
+ *eax |= max_core_ids_in_package(topo_info) << 26;
if (host_vcpus_per_cache > threads_per_pkg) {
*eax &= ~0x3FFC000;
/* Share the cache at package level. */
- *eax |= max_thread_ids_for_cache(&topo_info,
- CPU_TOPO_LEVEL_PACKAGE) << 14;
+ *eax |= max_thread_ids_for_cache(topo_info,
+ CPU_TOPOLOGY_LEVEL_SOCKET) << 14;
}
}
} else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) {
@@ -6487,7 +7399,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
switch (count) {
case 0: /* L1 dcache info */
encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
- &topo_info,
+ topo_info,
eax, ebx, ecx, edx);
if (!cpu->l1_cache_per_core) {
*eax &= ~MAKE_64BIT_MASK(14, 12);
@@ -6495,7 +7407,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 1: /* L1 icache info */
encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
- &topo_info,
+ topo_info,
eax, ebx, ecx, edx);
if (!cpu->l1_cache_per_core) {
*eax &= ~MAKE_64BIT_MASK(14, 12);
@@ -6503,13 +7415,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 2: /* L2 cache info */
encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
- &topo_info,
+ topo_info,
eax, ebx, ecx, edx);
break;
case 3: /* L3 cache info */
if (cpu->enable_l3_cache) {
encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
- &topo_info,
+ topo_info,
eax, ebx, ecx, edx);
break;
}
@@ -6537,8 +7449,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
case 7:
/* Structured Extended Feature Flags Enumeration Leaf */
if (count == 0) {
- uint32_t eax_0_unused, ebx_0, ecx_0, edx_0_unused;
-
/* Maximum ECX value for sub-leaves */
*eax = env->cpuid_level_func7;
*ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
@@ -6547,28 +7457,11 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ecx |= CPUID_7_0_ECX_OSPKE;
}
*edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
-
- /*
- * SGX cannot be emulated in software. If hardware does not
- * support enabling SGX and/or SGX flexible launch control,
- * then we need to update the VM's CPUID values accordingly.
- */
- x86_cpu_get_supported_cpuid(0x7, 0,
- &eax_0_unused, &ebx_0,
- &ecx_0, &edx_0_unused);
- if ((*ebx & CPUID_7_0_EBX_SGX) && !(ebx_0 & CPUID_7_0_EBX_SGX)) {
- *ebx &= ~CPUID_7_0_EBX_SGX;
- }
-
- if ((*ecx & CPUID_7_0_ECX_SGX_LC)
- && (!(*ebx & CPUID_7_0_EBX_SGX) || !(ecx_0 & CPUID_7_0_ECX_SGX_LC))) {
- *ecx &= ~CPUID_7_0_ECX_SGX_LC;
- }
} else if (count == 1) {
*eax = env->features[FEAT_7_1_EAX];
+ *ecx = env->features[FEAT_7_1_ECX];
*edx = env->features[FEAT_7_1_EDX];
*ebx = 0;
- *ecx = 0;
} else if (count == 2) {
*edx = env->features[FEAT_7_2_EDX];
*eax = 0;
@@ -6611,12 +7504,12 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
switch (count) {
case 0:
- *eax = apicid_core_offset(&topo_info);
- *ebx = topo_info.threads_per_core;
+ *eax = apicid_core_offset(topo_info);
+ *ebx = topo_info->threads_per_core;
*ecx |= CPUID_B_ECX_TOPO_LEVEL_SMT << 8;
break;
case 1:
- *eax = apicid_pkg_offset(&topo_info);
+ *eax = apicid_pkg_offset(topo_info);
*ebx = threads_per_pkg;
*ecx |= CPUID_B_ECX_TOPO_LEVEL_CORE << 8;
break;
@@ -6637,12 +7530,12 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 0x1F:
/* V2 Extended Topology Enumeration Leaf */
- if (!x86_has_extended_topo(env->avail_cpu_topo)) {
+ if (!x86_has_cpuid_0x1f(cpu)) {
*eax = *ebx = *ecx = *edx = 0;
break;
}
- encode_topo_cpuid1f(env, count, &topo_info, eax, ebx, ecx, edx);
+ encode_topo_cpuid1f(env, count, topo_info, eax, ebx, ecx, edx);
break;
case 0xD: {
/* Processor Extended State */
@@ -6821,6 +7714,16 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
break;
}
+ case 0x24: {
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ if ((env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) && count == 0) {
+ *ebx = env->features[FEAT_24_0_EBX] | env->avx10_version;
+ }
+ break;
+ }
case 0x40000000:
/*
* CPUID code in kvm_arch_init_vcpu() ignores stuff
@@ -6857,17 +7760,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ecx = env->features[FEAT_8000_0001_ECX];
*edx = env->features[FEAT_8000_0001_EDX];
- /* The Linux kernel checks for the CMPLegacy bit and
- * discards multiple thread information if it is set.
- * So don't set it here for Intel to make Linux guests happy.
- */
- if (threads_per_pkg > 1) {
- if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
- env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
- env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
- *ecx |= 1 << 1; /* CmpLegacy bit */
- }
- }
if (tcg_enabled() && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 &&
!(env->hflags & HF_LMA_MASK)) {
*edx &= ~CPUID_EXT2_SYSCALL;
@@ -6935,7 +7827,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
* thread ID within a package".
* Bits 7:0 is "The number of threads in the package is NC+1"
*/
- *ecx = (apicid_pkg_offset(&topo_info) << 12) |
+ *ecx = (apicid_pkg_offset(topo_info) << 12) |
(threads_per_pkg - 1);
} else {
*ecx = 0;
@@ -6964,19 +7856,19 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
switch (count) {
case 0: /* L1 dcache info */
encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache,
- &topo_info, eax, ebx, ecx, edx);
+ topo_info, eax, ebx, ecx, edx);
break;
case 1: /* L1 icache info */
encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache,
- &topo_info, eax, ebx, ecx, edx);
+ topo_info, eax, ebx, ecx, edx);
break;
case 2: /* L2 cache info */
encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache,
- &topo_info, eax, ebx, ecx, edx);
+ topo_info, eax, ebx, ecx, edx);
break;
case 3: /* L3 cache info */
encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache,
- &topo_info, eax, ebx, ecx, edx);
+ topo_info, eax, ebx, ecx, edx);
break;
default: /* end of info */
*eax = *ebx = *ecx = *edx = 0;
@@ -6988,7 +7880,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 0x8000001E:
if (cpu->core_id <= 255) {
- encode_topo_cpuid8000001e(cpu, &topo_info, eax, ebx, ecx, edx);
+ encode_topo_cpuid8000001e(cpu, topo_info, eax, ebx, ecx, edx);
} else {
*eax = 0;
*ebx = 0;
@@ -6996,6 +7888,16 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*edx = 0;
}
break;
+ case 0x80000022:
+ *eax = *ebx = *ecx = *edx = 0;
+ /* AMD Extended Performance Monitoring and Debug */
+ if (kvm_enabled() && cpu->enable_pmu &&
+ (env->features[FEAT_8000_0022_EAX] & CPUID_8000_0022_EAX_PERFMON_V2)) {
+ *eax |= CPUID_8000_0022_EAX_PERFMON_V2;
+ *ebx |= kvm_arch_get_supported_cpuid(cs->kvm_state, index, count,
+ R_EBX) & 0xf;
+ }
+ break;
case 0xC0000000:
*eax = env->cpuid_xlevel2;
*ebx = 0;
@@ -7029,8 +7931,9 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
break;
case 0x80000021:
+ *eax = *ebx = *ecx = *edx = 0;
*eax = env->features[FEAT_8000_0021_EAX];
- *ebx = *ecx = *edx = 0;
+ *ebx = env->features[FEAT_8000_0021_EBX];
break;
default:
/* reserved values: zero */
@@ -7053,6 +7956,23 @@ static void x86_cpu_set_sgxlepubkeyhash(CPUX86State *env)
#endif
}
+static bool cpuid_has_xsave_feature(CPUX86State *env, const ExtSaveArea *esa)
+{
+ if (!esa->size) {
+ return false;
+ }
+
+ if (env->features[esa->feature] & esa->bits) {
+ return true;
+ }
+ if (esa->feature == FEAT_7_0_EBX && esa->bits == CPUID_7_0_EBX_AVX512F
+ && (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10)) {
+ return true;
+ }
+
+ return false;
+}
+
static void x86_cpu_reset_hold(Object *obj, ResetType type)
{
CPUState *cs = CPU(obj);
@@ -7069,6 +7989,10 @@ static void x86_cpu_reset_hold(Object *obj, ResetType type)
memset(env, 0, offsetof(CPUX86State, end_reset_fields));
+ if (tcg_enabled()) {
+ cpu_init_fp_statuses(env);
+ }
+
env->old_exception = -1;
/* init to reset state */
@@ -7161,7 +8085,7 @@ static void x86_cpu_reset_hold(Object *obj, ResetType type)
if (!((1 << i) & CPUID_XSTATE_XCR0_MASK)) {
continue;
}
- if (env->features[esa->feature] & esa->bits) {
+ if (cpuid_has_xsave_feature(env, esa)) {
xcr0 |= 1ull << i;
}
}
@@ -7299,7 +8223,7 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu)
mask = 0;
for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
const ExtSaveArea *esa = &x86_ext_save_areas[i];
- if (env->features[esa->feature] & esa->bits) {
+ if (cpuid_has_xsave_feature(env, esa)) {
mask |= (1ULL << i);
}
}
@@ -7392,6 +8316,33 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
~env->user_features[w] &
~feature_word_info[w].no_autoenable_flags;
}
+
+ if ((env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) && !env->avx10_version) {
+ uint32_t eax, ebx, ecx, edx;
+ x86_cpu_get_supported_cpuid(0x24, 0, &eax, &ebx, &ecx, &edx);
+ env->avx10_version = ebx & 0xff;
+ }
+ }
+
+ if (x86_threads_per_pkg(&env->topo_info) > 1) {
+ env->features[FEAT_1_EDX] |= CPUID_HT;
+
+ /*
+ * The Linux kernel checks for the CMPLegacy bit and
+ * discards multiple thread information if it is set.
+ * So don't set it here for Intel (and other processors
+ * following Intel's behavior) to make Linux guests happy.
+ */
+ if (!IS_INTEL_CPU(env) && !IS_ZHAOXIN_CPU(env)) {
+ env->features[FEAT_8000_0001_ECX] |= CPUID_EXT3_CMP_LEG;
+ }
+ }
+
+ if (!cpu->enable_pmu) {
+ mark_unavailable_features(cpu, FEAT_1_ECX,
+ env->user_features[FEAT_1_ECX] & CPUID_EXT_PDCM,
+ "This feature is not available due to PMU being disabled");
+ env->features[FEAT_1_ECX] &= ~CPUID_EXT_PDCM;
}
for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
@@ -7422,6 +8373,7 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_1_ECX);
x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EDX);
x86_cpu_adjust_feat_level(cpu, FEAT_7_2_EDX);
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
@@ -7450,11 +8402,16 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
* cpu->vendor_cpuid_only has been unset for compatibility with older
* machine types.
*/
- if (x86_has_extended_topo(env->avail_cpu_topo) &&
+ if (x86_has_cpuid_0x1f(cpu) &&
(IS_INTEL_CPU(env) || !cpu->vendor_cpuid_only)) {
x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
}
+ /* Advanced Vector Extensions 10 (AVX10) requires CPUID[0x24] */
+ if (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x24);
+ }
+
/* SVM requires CPUID[0x8000000A] */
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
@@ -7498,13 +8455,17 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
* Finishes initialization of CPUID data, filters CPU feature
* words based on host availability of each feature.
*
- * Returns: 0 if all flags are supported by the host, non-zero otherwise.
+ * Returns: true if any flag is not supported by the host, false otherwise.
*/
-static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
+static bool x86_cpu_filter_features(X86CPU *cpu, bool verbose)
{
CPUX86State *env = &cpu->env;
FeatureWord w;
const char *prefix = NULL;
+ bool have_filtered_features;
+
+ uint32_t eax_0, ebx_0, ecx_0, edx_0;
+ uint32_t eax_1, ebx_1, ecx_1, edx_1;
if (verbose) {
prefix = accel_uses_host_cpuid()
@@ -7526,13 +8487,10 @@ static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
*/
if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
kvm_enabled()) {
- uint32_t eax_0, ebx_0, ecx_0, edx_0_unused;
- uint32_t eax_1, ebx_1, ecx_1_unused, edx_1_unused;
-
x86_cpu_get_supported_cpuid(0x14, 0,
- &eax_0, &ebx_0, &ecx_0, &edx_0_unused);
+ &eax_0, &ebx_0, &ecx_0, &edx_0);
x86_cpu_get_supported_cpuid(0x14, 1,
- &eax_1, &ebx_1, &ecx_1_unused, &edx_1_unused);
+ &eax_1, &ebx_1, &ecx_1, &edx_1);
if (!eax_0 ||
((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
@@ -7552,6 +8510,30 @@ static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix);
}
}
+
+ have_filtered_features = x86_cpu_have_filtered_features(cpu);
+
+ if (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) {
+ x86_cpu_get_supported_cpuid(0x24, 0,
+ &eax_0, &ebx_0, &ecx_0, &edx_0);
+ uint8_t version = ebx_0 & 0xff;
+
+ if (version < env->avx10_version) {
+ if (prefix) {
+ warn_report("%s: avx10.%d. Adjust to avx10.%d",
+ prefix, env->avx10_version, version);
+ }
+ env->avx10_version = version;
+ have_filtered_features = true;
+ }
+ } else if (env->avx10_version) {
+ if (prefix) {
+ warn_report("%s: avx10.%d.", prefix, env->avx10_version);
+ }
+ have_filtered_features = true;
+ }
+
+ return have_filtered_features;
}
static void x86_cpu_hyperv_realize(X86CPU *cpu)
@@ -7583,6 +8565,64 @@ static void x86_cpu_hyperv_realize(X86CPU *cpu)
cpu->hyperv_limits[2] = 0;
}
+#ifndef CONFIG_USER_ONLY
+static bool x86_cpu_update_smp_cache_topo(MachineState *ms, X86CPU *cpu,
+ Error **errp)
+{
+ CPUX86State *env = &cpu->env;
+ CpuTopologyLevel level;
+
+ level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D);
+ if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) {
+ env->cache_info_cpuid4.l1d_cache->share_level = level;
+ env->cache_info_amd.l1d_cache->share_level = level;
+ } else {
+ machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D,
+ env->cache_info_cpuid4.l1d_cache->share_level);
+ machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D,
+ env->cache_info_amd.l1d_cache->share_level);
+ }
+
+ level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I);
+ if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) {
+ env->cache_info_cpuid4.l1i_cache->share_level = level;
+ env->cache_info_amd.l1i_cache->share_level = level;
+ } else {
+ machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I,
+ env->cache_info_cpuid4.l1i_cache->share_level);
+ machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I,
+ env->cache_info_amd.l1i_cache->share_level);
+ }
+
+ level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2);
+ if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) {
+ env->cache_info_cpuid4.l2_cache->share_level = level;
+ env->cache_info_amd.l2_cache->share_level = level;
+ } else {
+ machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2,
+ env->cache_info_cpuid4.l2_cache->share_level);
+ machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2,
+ env->cache_info_amd.l2_cache->share_level);
+ }
+
+ level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3);
+ if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) {
+ env->cache_info_cpuid4.l3_cache->share_level = level;
+ env->cache_info_amd.l3_cache->share_level = level;
+ } else {
+ machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3,
+ env->cache_info_cpuid4.l3_cache->share_level);
+ machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3,
+ env->cache_info_amd.l3_cache->share_level);
+ }
+
+ if (!machine_check_smp_cache(ms, errp)) {
+ return false;
+ }
+ return true;
+}
+#endif
+
static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
@@ -7649,14 +8689,14 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
}
}
- x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid);
-
- if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) {
- error_setg(&local_err,
- accel_uses_host_cpuid() ?
+ if (x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid)) {
+ if (cpu->enforce_cpuid) {
+ error_setg(&local_err,
+ accel_uses_host_cpuid() ?
"Host doesn't support requested features" :
"TCG doesn't support requested features");
- goto out;
+ goto out;
+ }
}
/* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
@@ -7718,6 +8758,21 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
*/
cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
+ /*
+ * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
+ * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
+ * based on inputs (sockets,cores,threads), it is still better to give
+ * users a warning.
+ */
+ if (IS_AMD_CPU(env) &&
+ !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
+ env->topo_info.threads_per_core > 1) {
+ warn_report_once("This family of AMD CPU doesn't support "
+ "hyperthreading(%d). Please configure -smp "
+ "options properly or try enabling topoext "
+ "feature.", env->topo_info.threads_per_core);
+ }
+
/* For 64bit systems think about the number of physical bits to present.
* ideally this should be the same as the host; anything other than matching
* the host can cause incorrect guest behaviour.
@@ -7807,6 +8862,14 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
#ifndef CONFIG_USER_ONLY
MachineState *ms = MACHINE(qdev_get_machine());
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+
+ if (mc->smp_props.has_caches) {
+ if (!x86_cpu_update_smp_cache_topo(ms, cpu, errp)) {
+ return;
+ }
+ }
+
qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
@@ -7819,26 +8882,9 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
mce_init(cpu);
+ x86_cpu_gdb_init(cs);
qemu_init_vcpu(cs);
- /*
- * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
- * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
- * based on inputs (sockets,cores,threads), it is still better to give
- * users a warning.
- *
- * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
- * cs->nr_threads hasn't be populated yet and the checking is incorrect.
- */
- if (IS_AMD_CPU(env) &&
- !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
- cs->nr_threads > 1) {
- warn_report_once("This family of AMD CPU doesn't support "
- "hyperthreading(%d). Please configure -smp "
- "options properly or try enabling topoext "
- "feature.", cs->nr_threads);
- }
-
#ifndef CONFIG_USER_ONLY
x86_cpu_apic_realize(cpu, &local_err);
if (local_err != NULL) {
@@ -7970,20 +9016,46 @@ static void x86_cpu_register_feature_bit_props(X86CPUClass *xcc,
static void x86_cpu_post_initfn(Object *obj)
{
+ static bool first = true;
+ uint64_t supported_xcr0;
+ int i;
+
+ if (first) {
+ first = false;
+
+ supported_xcr0 =
+ ((uint64_t) x86_cpu_get_supported_feature_word(NULL, FEAT_XSAVE_XCR0_HI) << 32) |
+ x86_cpu_get_supported_feature_word(NULL, FEAT_XSAVE_XCR0_LO);
+
+ for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) {
+ ExtSaveArea *esa = &x86_ext_save_areas[i];
+
+ if (!(supported_xcr0 & (1 << i))) {
+ esa->size = 0;
+ }
+ }
+ }
+
accel_cpu_instance_init(CPU(obj));
+
+#ifndef CONFIG_USER_ONLY
+ if (current_machine && current_machine->cgs) {
+ x86_confidential_guest_cpu_instance_init(
+ X86_CONFIDENTIAL_GUEST(current_machine->cgs), (CPU(obj)));
+ }
+#endif
}
static void x86_cpu_init_default_topo(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
- env->nr_modules = 1;
- env->nr_dies = 1;
+ env->topo_info = (X86CPUTopoInfo) {1, 1, 1, 1};
- /* SMT, core and package levels are set by default. */
- set_bit(CPU_TOPO_LEVEL_SMT, env->avail_cpu_topo);
- set_bit(CPU_TOPO_LEVEL_CORE, env->avail_cpu_topo);
- set_bit(CPU_TOPO_LEVEL_PACKAGE, env->avail_cpu_topo);
+ /* thread, core and socket levels are set by default. */
+ set_bit(CPU_TOPOLOGY_LEVEL_THREAD, env->avail_cpu_topo);
+ set_bit(CPU_TOPOLOGY_LEVEL_CORE, env->avail_cpu_topo);
+ set_bit(CPU_TOPOLOGY_LEVEL_SOCKET, env->avail_cpu_topo);
}
static void x86_cpu_initfn(Object *obj)
@@ -8073,16 +9145,15 @@ static vaddr x86_cpu_get_pc(CPUState *cs)
return cpu->env.eip + cpu->env.segs[R_CS].base;
}
+#if !defined(CONFIG_USER_ONLY)
int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
-#if !defined(CONFIG_USER_ONLY)
if (interrupt_request & CPU_INTERRUPT_POLL) {
return CPU_INTERRUPT_POLL;
}
-#endif
if (interrupt_request & CPU_INTERRUPT_SIPI) {
return CPU_INTERRUPT_SIPI;
}
@@ -8103,14 +9174,12 @@ int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
(env->eflags & IF_MASK &&
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
return CPU_INTERRUPT_HARD;
-#if !defined(CONFIG_USER_ONLY)
} else if (env->hflags2 & HF2_VGIF_MASK) {
if((interrupt_request & CPU_INTERRUPT_VIRQ) &&
(env->eflags & IF_MASK) &&
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
return CPU_INTERRUPT_VIRQ;
}
-#endif
}
}
@@ -8121,45 +9190,14 @@ static bool x86_cpu_has_work(CPUState *cs)
{
return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
}
-
-int x86_mmu_index_pl(CPUX86State *env, unsigned pl)
-{
- int mmu_index_32 = (env->hflags & HF_CS64_MASK) ? 0 : 1;
- int mmu_index_base =
- pl == 3 ? MMU_USER64_IDX :
- !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
- (env->eflags & AC_MASK) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
-
- return mmu_index_base + mmu_index_32;
-}
-
-static int x86_cpu_mmu_index(CPUState *cs, bool ifetch)
-{
- CPUX86State *env = cpu_env(cs);
- return x86_mmu_index_pl(env, env->hflags & HF_CPL_MASK);
-}
-
-static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl)
-{
- int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1;
- int mmu_index_base =
- !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
- (pl < 3 && (env->eflags & AC_MASK)
- ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX);
-
- return mmu_index_base + mmu_index_32;
-}
-
-int cpu_mmu_index_kernel(CPUX86State *env)
-{
- return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK);
-}
+#endif /* !CONFIG_USER_ONLY */
static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ info->endian = BFD_ENDIAN_LITTLE;
info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
: env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
: bfd_mach_i386_i8086);
@@ -8214,7 +9252,7 @@ void x86_update_hflags(CPUX86State *env)
env->hflags = hflags;
}
-static Property x86_cpu_properties[] = {
+static const Property x86_cpu_properties[] = {
#ifdef CONFIG_USER_ONLY
/* apic_id = 0 by default for *-user, see commit 9886e834 */
DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
@@ -8279,8 +9317,10 @@ static Property x86_cpu_properties[] = {
HYPERV_FEAT_TLBFLUSH_DIRECT, 0),
DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU,
hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF),
+#ifdef CONFIG_SYNDBG
DEFINE_PROP_BIT64("hv-syndbg", X86CPU, hyperv_features,
HYPERV_FEAT_SYNDBG, 0),
+#endif
DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
DEFINE_PROP_BOOL("hv-enforce-cpuid", X86CPU, hyperv_enforce_cpuid, false),
@@ -8312,6 +9352,7 @@ static Property x86_cpu_properties[] = {
DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
+ DEFINE_PROP_UINT8("avx10-version", X86CPU, env.avx10_version, 0),
DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0),
DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor),
@@ -8352,13 +9393,13 @@ static Property x86_cpu_properties[] = {
DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
true),
DEFINE_PROP_BOOL("x-l1-cache-per-thread", X86CPU, l1_cache_per_core, true),
- DEFINE_PROP_END_OF_LIST()
};
#ifndef CONFIG_USER_ONLY
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps i386_sysemu_ops = {
+ .has_work = x86_cpu_has_work,
.get_memory_mapping = x86_cpu_get_memory_mapping,
.get_paging_enabled = x86_cpu_get_paging_enabled,
.get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug,
@@ -8372,7 +9413,7 @@ static const struct SysemuCPUOps i386_sysemu_ops = {
};
#endif
-static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
+static void x86_cpu_common_class_init(ObjectClass *oc, const void *data)
{
X86CPUClass *xcc = X86_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
@@ -8391,9 +9432,8 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
cc->class_by_name = x86_cpu_class_by_name;
+ cc->list_cpus = x86_cpu_list;
cc->parse_features = x86_cpu_parse_featurestr;
- cc->has_work = x86_cpu_has_work;
- cc->mmu_index = x86_cpu_mmu_index;
cc->dump_state = x86_cpu_dump_state;
cc->set_pc = x86_cpu_set_pc;
cc->get_pc = x86_cpu_get_pc;
@@ -8404,6 +9444,9 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
#ifndef CONFIG_USER_ONLY
cc->sysemu_ops = &i386_sysemu_ops;
#endif /* !CONFIG_USER_ONLY */
+#ifdef CONFIG_TCG
+ cc->tcg_ops = &x86_tcg_ops;
+#endif /* CONFIG_TCG */
cc->gdb_arch_name = x86_gdb_arch_name;
#ifdef TARGET_X86_64
@@ -8470,7 +9513,7 @@ static const TypeInfo x86_cpu_type_info = {
};
/* "base" CPU model, used by query-cpu-model-expansion */
-static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
+static void x86_cpu_base_class_init(ObjectClass *oc, const void *data)
{
X86CPUClass *xcc = X86_CPU_CLASS(oc);
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 1e121ac..51e1013 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -20,23 +20,21 @@
#ifndef I386_CPU_H
#define I386_CPU_H
-#include "sysemu/tcg.h"
+#include "system/tcg.h"
#include "cpu-qom.h"
#include "kvm/hyperv-proto.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
+#include "exec/memop.h"
#include "hw/i386/topology.h"
#include "qapi/qapi-types-common.h"
#include "qemu/cpu-float.h"
#include "qemu/timer.h"
+#include "standard-headers/asm-x86/kvm_para.h"
#define XEN_NR_VIRQS 24
-#define KVM_HAVE_MCE_INJECTION 1
-
-/* support for self modifying code even if the modified instruction is
- close to the modifying instruction */
-#define TARGET_HAS_PRECISE_SMC
-
#ifdef TARGET_X86_64
#define I386_ELF_MACHINE EM_X86_64
#define ELF_MACHINE_UNAME "x86_64"
@@ -267,12 +265,6 @@ typedef enum X86Seg {
#define CR4_FRED_MASK 0
#endif
-#ifdef TARGET_X86_64
-#define CR4_FRED_MASK (1ULL << 32)
-#else
-#define CR4_FRED_MASK 0
-#endif
-
#define CR4_RESERVED_MASK \
(~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \
| CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \
@@ -351,6 +343,7 @@ typedef enum X86Seg {
#define PG_MODE_PKE (1 << 17)
#define PG_MODE_PKS (1 << 18)
#define PG_MODE_SMEP (1 << 19)
+#define PG_MODE_PG (1 << 20)
#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
@@ -414,6 +407,10 @@ typedef enum X86Seg {
#define MSR_IA32_TSX_CTRL 0x122
#define MSR_IA32_TSCDEADLINE 0x6e0
#define MSR_IA32_PKRS 0x6e1
+#define MSR_RAPL_POWER_UNIT 0x00000606
+#define MSR_PKG_POWER_LIMIT 0x00000610
+#define MSR_PKG_ENERGY_STATUS 0x00000611
+#define MSR_PKG_POWER_INFO 0x00000614
#define MSR_ARCH_LBR_CTL 0x000014ce
#define MSR_ARCH_LBR_DEPTH 0x000014cf
#define MSR_ARCH_LBR_FROM_0 0x00001500
@@ -535,6 +532,8 @@ typedef enum X86Seg {
#define MSR_AMD64_TSC_RATIO_DEFAULT 0x100000000ULL
+#define MSR_K7_HWCR 0xc0010015
+
#define MSR_VM_HSAVE_PA 0xc0010117
#define MSR_IA32_XFD 0x000001c4
@@ -585,6 +584,7 @@ typedef enum X86Seg {
#define XSTATE_OPMASK_BIT 5
#define XSTATE_ZMM_Hi256_BIT 6
#define XSTATE_Hi16_ZMM_BIT 7
+#define XSTATE_PT_BIT 8
#define XSTATE_PKRU_BIT 9
#define XSTATE_ARCH_LBR_BIT 15
#define XSTATE_XTILE_CFG_BIT 17
@@ -598,6 +598,7 @@ typedef enum X86Seg {
#define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
#define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
#define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
+#define XSTATE_PT_MASK (1ULL << XSTATE_PT_BIT)
#define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
#define XSTATE_ARCH_LBR_MASK (1ULL << XSTATE_ARCH_LBR_BIT)
#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT)
@@ -620,6 +621,11 @@ typedef enum X86Seg {
XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \
XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK)
+/* CPUID feature bits available in XSS */
+#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK)
+
+#define CPUID_XSTATE_MASK (CPUID_XSTATE_XCR0_MASK | CPUID_XSTATE_XSS_MASK)
+
/* CPUID feature words */
typedef enum FeatureWord {
FEAT_1_EDX, /* CPUID[1].EDX */
@@ -634,6 +640,8 @@ typedef enum FeatureWord {
FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */
FEAT_8000_0021_EAX, /* CPUID[8000_0021].EAX */
+ FEAT_8000_0021_EBX, /* CPUID[8000_0021].EBX */
+ FEAT_8000_0022_EAX, /* CPUID[8000_0022].EAX */
FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */
@@ -660,11 +668,22 @@ typedef enum FeatureWord {
FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */
FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */
FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */
+ FEAT_7_1_ECX, /* CPUID[EAX=7,ECX=1].ECX */
FEAT_7_1_EDX, /* CPUID[EAX=7,ECX=1].EDX */
FEAT_7_2_EDX, /* CPUID[EAX=7,ECX=2].EDX */
+ FEAT_24_0_EBX, /* CPUID[EAX=0x24,ECX=0].EBX */
FEATURE_WORDS,
} FeatureWord;
+typedef struct FeatureMask {
+ FeatureWord index;
+ uint64_t mask;
+} FeatureMask;
+
+typedef struct FeatureDep {
+ FeatureMask from, to;
+} FeatureDep;
+
typedef uint64_t FeatureWordArray[FEATURE_WORDS];
uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
@@ -822,6 +841,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_0_EBX_HLE (1U << 4)
/* Intel Advanced Vector Extensions 2 */
#define CPUID_7_0_EBX_AVX2 (1U << 5)
+/* FPU data pointer updated only on x87 exceptions */
+#define CPUID_7_0_EBX_FDP_EXCPTN_ONLY (1u << 6)
/* Supervisor-mode Execution Prevention */
#define CPUID_7_0_EBX_SMEP (1U << 7)
/* 2nd Group of Advanced Bit Manipulation Extensions */
@@ -832,6 +853,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_0_EBX_INVPCID (1U << 10)
/* Restricted Transactional Memory */
#define CPUID_7_0_EBX_RTM (1U << 11)
+/* Zero out FPU CS and FPU DS */
+#define CPUID_7_0_EBX_ZERO_FCS_FDS (1U << 13)
/* Memory Protection Extension */
#define CPUID_7_0_EBX_MPX (1U << 14)
/* AVX-512 Foundation */
@@ -893,6 +916,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_0_ECX_LA57 (1U << 16)
/* Read Processor ID */
#define CPUID_7_0_ECX_RDPID (1U << 22)
+/* KeyLocker */
+#define CPUID_7_0_ECX_KeyLocker (1U << 23)
/* Bus Lock Debug Exception */
#define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24)
/* Cache Line Demote Instruction */
@@ -914,6 +939,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_0_EDX_FSRM (1U << 4)
/* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
#define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
+ /* "md_clear" VERW clears CPU buffers */
+#define CPUID_7_0_EDX_MD_CLEAR (1U << 10)
/* SERIALIZE instruction */
#define CPUID_7_0_EDX_SERIALIZE (1U << 14)
/* TSX Suspend Load Address Tracking instruction */
@@ -941,10 +968,18 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
/* Speculative Store Bypass Disable */
#define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31)
+/* SHA512 Instruction */
+#define CPUID_7_1_EAX_SHA512 (1U << 0)
+/* SM3 Instruction */
+#define CPUID_7_1_EAX_SM3 (1U << 1)
+/* SM4 Instruction */
+#define CPUID_7_1_EAX_SM4 (1U << 2)
/* AVX VNNI Instruction */
#define CPUID_7_1_EAX_AVX_VNNI (1U << 4)
/* AVX512 BFloat16 Instruction */
#define CPUID_7_1_EAX_AVX512_BF16 (1U << 5)
+/* Linear address space separation */
+#define CPUID_7_1_EAX_LASS (1U << 6)
/* CMPCCXADD Instructions */
#define CPUID_7_1_EAX_CMPCCXADD (1U << 7)
/* Fast Zero REP MOVS */
@@ -953,6 +988,12 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_1_EAX_FSRS (1U << 11)
/* Fast Short REP CMPS/SCAS */
#define CPUID_7_1_EAX_FSRC (1U << 12)
+/* Flexible return and event delivery (FRED) */
+#define CPUID_7_1_EAX_FRED (1U << 17)
+/* Load into IA32_KERNEL_GS_BASE (LKGS) */
+#define CPUID_7_1_EAX_LKGS (1U << 18)
+/* Non-Serializing Write to Model Specific Register (WRMSRNS) */
+#define CPUID_7_1_EAX_WRMSRNS (1U << 19)
/* Support Tile Computational Operations on FP16 Numbers */
#define CPUID_7_1_EAX_AMX_FP16 (1U << 21)
/* Support for VPMADD52[H,L]UQ */
@@ -960,20 +1001,32 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
/* Linear Address Masking */
#define CPUID_7_1_EAX_LAM (1U << 26)
+/* The immediate form of MSR access instructions */
+#define CPUID_7_1_ECX_MSR_IMM (1U << 5)
+
/* Support for VPDPB[SU,UU,SS]D[,S] */
#define CPUID_7_1_EDX_AVX_VNNI_INT8 (1U << 4)
/* AVX NE CONVERT Instructions */
#define CPUID_7_1_EDX_AVX_NE_CONVERT (1U << 5)
/* AMX COMPLEX Instructions */
#define CPUID_7_1_EDX_AMX_COMPLEX (1U << 8)
+/* AVX-VNNI-INT16 Instructions */
+#define CPUID_7_1_EDX_AVX_VNNI_INT16 (1U << 10)
/* PREFETCHIT0/1 Instructions */
#define CPUID_7_1_EDX_PREFETCHITI (1U << 14)
-/* Flexible return and event delivery (FRED) */
-#define CPUID_7_1_EAX_FRED (1U << 17)
-/* Load into IA32_KERNEL_GS_BASE (LKGS) */
-#define CPUID_7_1_EAX_LKGS (1U << 18)
-/* Non-Serializing Write to Model Specific Register (WRMSRNS) */
-#define CPUID_7_1_EAX_WRMSRNS (1U << 19)
+/* Support for Advanced Vector Extensions 10 */
+#define CPUID_7_1_EDX_AVX10 (1U << 19)
+
+/* Indicate bit 7 of the IA32_SPEC_CTRL MSR is supported */
+#define CPUID_7_2_EDX_PSFD (1U << 0)
+/* Indicate bits 3 and 4 of the IA32_SPEC_CTRL MSR are supported */
+#define CPUID_7_2_EDX_IPRED_CTRL (1U << 1)
+/* Indicate bits 5 and 6 of the IA32_SPEC_CTRL MSR are supported */
+#define CPUID_7_2_EDX_RRSBA_CTRL (1U << 2)
+/* Indicate bit 8 of the IA32_SPEC_CTRL MSR is supported */
+#define CPUID_7_2_EDX_DDPD_U (1U << 3)
+/* Indicate bit 10 of the IA32_SPEC_CTRL MSR is supported */
+#define CPUID_7_2_EDX_BHI_CTRL (1U << 4)
/* Do not exhibit MXCSR Configuration Dependent Timing (MCDT) behavior */
#define CPUID_7_2_EDX_MCDT_NO (1U << 5)
@@ -984,10 +1037,43 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
/* Packets which contain IP payload have LIP values */
#define CPUID_14_0_ECX_LIP (1U << 31)
+/* AVX10 128-bit vector support is present */
+#define CPUID_24_0_EBX_AVX10_128 (1U << 16)
+/* AVX10 256-bit vector support is present */
+#define CPUID_24_0_EBX_AVX10_256 (1U << 17)
+/* AVX10 512-bit vector support is present */
+#define CPUID_24_0_EBX_AVX10_512 (1U << 18)
+/* AVX10 vector length support mask */
+#define CPUID_24_0_EBX_AVX10_VL_MASK (CPUID_24_0_EBX_AVX10_128 | \
+ CPUID_24_0_EBX_AVX10_256 | \
+ CPUID_24_0_EBX_AVX10_512)
+
/* RAS Features */
#define CPUID_8000_0007_EBX_OVERFLOW_RECOV (1U << 0)
#define CPUID_8000_0007_EBX_SUCCOR (1U << 1)
+/* (Old) KVM paravirtualized clocksource */
+#define CPUID_KVM_CLOCK (1U << KVM_FEATURE_CLOCKSOURCE)
+/* (New) KVM specific paravirtualized clocksource */
+#define CPUID_KVM_CLOCK2 (1U << KVM_FEATURE_CLOCKSOURCE2)
+/* KVM asynchronous page fault */
+#define CPUID_KVM_ASYNCPF (1U << KVM_FEATURE_ASYNC_PF)
+/* KVM stolen (when guest vCPU is not running) time accounting */
+#define CPUID_KVM_STEAL_TIME (1U << KVM_FEATURE_STEAL_TIME)
+/* KVM paravirtualized end-of-interrupt signaling */
+#define CPUID_KVM_PV_EOI (1U << KVM_FEATURE_PV_EOI)
+/* KVM paravirtualized spinlocks support */
+#define CPUID_KVM_PV_UNHALT (1U << KVM_FEATURE_PV_UNHALT)
+/* KVM host-side polling on HLT control from the guest */
+#define CPUID_KVM_POLL_CONTROL (1U << KVM_FEATURE_POLL_CONTROL)
+/* KVM interrupt based asynchronous page fault*/
+#define CPUID_KVM_ASYNCPF_INT (1U << KVM_FEATURE_ASYNC_PF_INT)
+/* KVM 'Extended Destination ID' support for external interrupts */
+#define CPUID_KVM_MSI_EXT_DEST_ID (1U << KVM_FEATURE_MSI_EXT_DEST_ID)
+
+/* Hint to KVM that vCPUs expect never preempted for an unlimited time */
+#define CPUID_KVM_HINTS_REALTIME (1U << KVM_HINTS_REALTIME)
+
/* CLZERO instruction */
#define CPUID_8000_0008_EBX_CLZERO (1U << 0)
/* Always save/restore FP error pointers */
@@ -1010,24 +1096,69 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_8000_0008_EBX_AMD_PSFD (1U << 28)
/* Processor ignores nested data breakpoints */
-#define CPUID_8000_0021_EAX_No_NESTED_DATA_BP (1U << 0)
+#define CPUID_8000_0021_EAX_NO_NESTED_DATA_BP (1U << 0)
+/* WRMSR to FS_BASE, GS_BASE, or KERNEL_GS_BASE is non-serializing */
+#define CPUID_8000_0021_EAX_FS_GS_BASE_NS (1U << 1)
/* LFENCE is always serializing */
#define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2)
/* Null Selector Clears Base */
-#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6)
+#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6)
/* Automatic IBRS */
-#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8)
+#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8)
+/* Indicates support for IC prefetch */
+#define CPUID_8000_0021_EAX_PREFETCHI (1U << 20)
+/* Enhanced Return Address Predictor Scurity */
+#define CPUID_8000_0021_EAX_ERAPS (1U << 24)
+/* Selective Branch Predictor Barrier */
+#define CPUID_8000_0021_EAX_SBPB (1U << 27)
+/* IBPB includes branch type prediction flushing */
+#define CPUID_8000_0021_EAX_IBPB_BRTYPE (1U << 28)
+/* Not vulnerable to Speculative Return Stack Overflow */
+#define CPUID_8000_0021_EAX_SRSO_NO (1U << 29)
+/* Not vulnerable to SRSO at the user-kernel boundary */
+#define CPUID_8000_0021_EAX_SRSO_USER_KERNEL_NO (1U << 30)
+
+/*
+ * Return Address Predictor size. RapSize x 8 is the minimum number of
+ * CALL instructions software needs to execute to flush the RAP.
+ */
+#define CPUID_8000_0021_EBX_RAPSIZE (8U << 16)
+
+/* Performance Monitoring Version 2 */
+#define CPUID_8000_0022_EAX_PERFMON_V2 (1U << 0)
#define CPUID_XSAVE_XSAVEOPT (1U << 0)
#define CPUID_XSAVE_XSAVEC (1U << 1)
#define CPUID_XSAVE_XGETBV1 (1U << 2)
#define CPUID_XSAVE_XSAVES (1U << 3)
+#define CPUID_XSAVE_XFD (1U << 4)
#define CPUID_6_EAX_ARAT (1U << 2)
/* CPUID[0x80000007].EDX flags: */
#define CPUID_APM_INVTSC (1U << 8)
+/* "rng" RNG present (xstore) */
+#define CPUID_C000_0001_EDX_XSTORE (1U << 2)
+/* "rng_en" RNG enabled */
+#define CPUID_C000_0001_EDX_XSTORE_EN (1U << 3)
+/* "ace" on-CPU crypto (xcrypt) */
+#define CPUID_C000_0001_EDX_XCRYPT (1U << 6)
+/* "ace_en" on-CPU crypto enabled */
+#define CPUID_C000_0001_EDX_XCRYPT_EN (1U << 7)
+/* Advanced Cryptography Engine v2 */
+#define CPUID_C000_0001_EDX_ACE2 (1U << 8)
+/* ACE v2 enabled */
+#define CPUID_C000_0001_EDX_ACE2_EN (1U << 9)
+/* PadLock Hash Engine */
+#define CPUID_C000_0001_EDX_PHE (1U << 10)
+/* PHE enabled */
+#define CPUID_C000_0001_EDX_PHE_EN (1U << 11)
+/* PadLock Montgomery Multiplier */
+#define CPUID_C000_0001_EDX_PMM (1U << 12)
+/* PMM enabled */
+#define CPUID_C000_0001_EDX_PMM_EN (1U << 13)
+
#define CPUID_VENDOR_SZ 12
#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
@@ -1040,7 +1171,16 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
#define CPUID_VENDOR_AMD "AuthenticAMD"
-#define CPUID_VENDOR_VIA "CentaurHauls"
+#define CPUID_VENDOR_ZHAOXIN1_1 0x746E6543 /* "Cent" */
+#define CPUID_VENDOR_ZHAOXIN1_2 0x48727561 /* "aurH" */
+#define CPUID_VENDOR_ZHAOXIN1_3 0x736C7561 /* "auls" */
+
+#define CPUID_VENDOR_ZHAOXIN2_1 0x68532020 /* " Sh" */
+#define CPUID_VENDOR_ZHAOXIN2_2 0x68676E61 /* "angh" */
+#define CPUID_VENDOR_ZHAOXIN2_3 0x20206961 /* "ai " */
+
+#define CPUID_VENDOR_ZHAOXIN1 "CentaurHauls"
+#define CPUID_VENDOR_ZHAOXIN2 " Shanghai "
#define CPUID_VENDOR_HYGON "HygonGenuine"
@@ -1050,6 +1190,15 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
(env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
(env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
+#define IS_ZHAOXIN1_CPU(env) \
+ ((env)->cpuid_vendor1 == CPUID_VENDOR_ZHAOXIN1_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_ZHAOXIN1_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_ZHAOXIN1_3)
+#define IS_ZHAOXIN2_CPU(env) \
+ ((env)->cpuid_vendor1 == CPUID_VENDOR_ZHAOXIN2_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_ZHAOXIN2_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_ZHAOXIN2_3)
+#define IS_ZHAOXIN_CPU(env) (IS_ZHAOXIN1_CPU(env) || IS_ZHAOXIN2_CPU(env))
#define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
#define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
@@ -1080,7 +1229,10 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define MSR_ARCH_CAP_FBSDP_NO (1U << 14)
#define MSR_ARCH_CAP_PSDP_NO (1U << 15)
#define MSR_ARCH_CAP_FB_CLEAR (1U << 17)
+#define MSR_ARCH_CAP_BHI_NO (1U << 20)
#define MSR_ARCH_CAP_PBRSB_NO (1U << 24)
+#define MSR_ARCH_CAP_GDS_NO (1U << 26)
+#define MSR_ARCH_CAP_RFDS_NO (1U << 27)
#define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5)
@@ -1188,6 +1340,7 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000
#define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000
#define VMX_VM_EXIT_LOAD_IA32_PKRS 0x20000000
+#define VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS 0x80000000
#define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
#define VMX_VM_ENTRY_IA32E_MODE 0x00000200
@@ -1273,14 +1426,14 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
* are only needed for conditional branches.
*/
typedef enum {
- CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
- CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
- CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
- CC_OP_ADOX, /* CC_SRC2 = O, CC_SRC = rest. */
- CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
- CC_OP_CLR, /* Z and P set, all other flags clear. */
-
- CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
+ CC_OP_EFLAGS = 0, /* all cc are explicitly computed, CC_SRC = flags */
+ CC_OP_ADCX = 1, /* CC_DST = C, CC_SRC = rest. */
+ CC_OP_ADOX = 2, /* CC_SRC2 = O, CC_SRC = rest. */
+ CC_OP_ADCOX = 3, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
+
+ /* Low 2 bits = MemOp constant for the size */
+#define CC_OP_FIRST_BWLQ CC_OP_MULB
+ CC_OP_MULB = 4, /* modify all flags, C, O = (CC_SRC != 0) */
CC_OP_MULW,
CC_OP_MULL,
CC_OP_MULQ,
@@ -1335,6 +1488,11 @@ typedef enum {
CC_OP_BMILGL,
CC_OP_BMILGQ,
+ CC_OP_BLSIB, /* Z,S via CC_DST, C = SRC!=0; O=0; P,A undefined */
+ CC_OP_BLSIW,
+ CC_OP_BLSIL,
+ CC_OP_BLSIQ,
+
/*
* Note that only CC_OP_POPCNT (i.e. the one with MO_TL size)
* is used or implemented, because the translation needs
@@ -1345,10 +1503,24 @@ typedef enum {
CC_OP_POPCNTL__,
CC_OP_POPCNTQ__,
CC_OP_POPCNT = sizeof(target_ulong) == 8 ? CC_OP_POPCNTQ__ : CC_OP_POPCNTL__,
+#define CC_OP_LAST_BWLQ CC_OP_POPCNTQ__
- CC_OP_NB,
+ CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
} CCOp;
-QEMU_BUILD_BUG_ON(CC_OP_NB >= 128);
+
+/* See X86DecodedInsn.cc_op, using int8_t. */
+QEMU_BUILD_BUG_ON(CC_OP_DYNAMIC > INT8_MAX);
+
+static inline MemOp cc_op_size(CCOp op)
+{
+ MemOp size = op & 3;
+
+ QEMU_BUILD_BUG_ON(CC_OP_FIRST_BWLQ & 3);
+ assert(op >= CC_OP_FIRST_BWLQ && op <= CC_OP_LAST_BWLQ);
+ assert(size <= MO_TL);
+
+ return size;
+}
typedef struct SegmentCache {
uint32_t selector;
@@ -1466,8 +1638,6 @@ typedef struct {
#define MAX_FIXED_COUNTERS 3
#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
-#define TARGET_INSN_START_EXTRA_WORDS 1
-
#define NB_OPMASK_REGS 8
/* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
@@ -1603,12 +1773,6 @@ typedef enum TPRAccess {
/* Cache information data structures: */
-enum CacheType {
- DATA_CACHE,
- INSTRUCTION_CACHE,
- UNIFIED_CACHE
-};
-
typedef struct CPUCacheInfo {
enum CacheType type;
uint8_t level;
@@ -1656,7 +1820,7 @@ typedef struct CPUCacheInfo {
* Used to encode CPUID[4].EAX[bits 25:14] or
* CPUID[0x8000001D].EAX[bits 25:14].
*/
- enum CPUTopoLevel share_level;
+ CpuTopologyLevel share_level;
} CPUCacheInfo;
@@ -1667,11 +1831,6 @@ typedef struct CPUCaches {
CPUCacheInfo *l3_cache;
} CPUCaches;
-typedef struct HVFX86LazyFlags {
- target_ulong result;
- target_ulong auxbits;
-} HVFX86LazyFlags;
-
typedef struct CPUArchState {
/* standard registers */
target_ulong regs[CPU_NB_REGS];
@@ -1850,6 +2009,9 @@ typedef struct CPUArchState {
uint64_t msr_lbr_depth;
LBREntry lbr_records[ARCH_LBR_NR_ENTRIES];
+ /* AMD MSRC001_0015 Hardware Configuration */
+ uint64_t msr_hwcr;
+
/* exception/interrupt handling */
int error_code;
int exception_is_int;
@@ -1880,6 +2042,10 @@ typedef struct CPUArchState {
uintptr_t retaddr;
+ /* RAPL MSR */
+ uint64_t msr_rapl_power_unit;
+ uint64_t msr_pkg_energy_status;
+
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
@@ -1901,6 +2067,8 @@ typedef struct CPUArchState {
uint32_t cpuid_vendor3;
uint32_t cpuid_version;
FeatureWordArray features;
+ /* AVX10 version */
+ uint8_t avx10_version;
/* Features that were explicitly enabled/disabled */
FeatureWordArray user_features;
uint32_t cpuid_model[12];
@@ -1955,8 +2123,7 @@ typedef struct CPUArchState {
QemuMutex xen_timers_lock;
#endif
#if defined(CONFIG_HVF)
- HVFX86LazyFlags hvf_lflags;
- void *hvf_mmio_buf;
+ void *emu_mmio_buf;
#endif
uint64_t mcg_cap;
@@ -1975,14 +2142,10 @@ typedef struct CPUArchState {
TPRAccess tpr_access_type;
- /* Number of dies within this CPU package. */
- unsigned nr_dies;
-
- /* Number of modules within one die. */
- unsigned nr_modules;
+ X86CPUTopoInfo topo_info;
/* Bitmap of available CPU topology levels for this CPU. */
- DECLARE_BITMAP(avail_cpu_topo, CPU_TOPO_LEVEL_MAX);
+ DECLARE_BITMAP(avail_cpu_topo, CPU_TOPOLOGY_LEVEL__MAX);
} CPUX86State;
struct kvm_msrs;
@@ -2055,6 +2218,9 @@ struct ArchCPU {
/* Features that were filtered out because of missing host capabilities */
FeatureWordArray filtered_features;
+ /* Features that are forced enabled by underlying hypervisor, e.g., TDX */
+ FeatureWordArray forced_on_features;
+
/* Enable PMU CPUID bits. This can't be enabled by default yet because
* it doesn't have ABI stability guarantees, as it passes all PMU CPUID
* bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
@@ -2102,6 +2268,9 @@ struct ArchCPU {
/* Compatibility bits for old machine types: */
bool enable_cpuid_0xb;
+ /* Force to enable cpuid 0x1f */
+ bool force_cpuid_0x1f;
+
/* Enable auto level-increase for all CPUID leaves */
bool full_cpuid_auto_level;
@@ -2178,7 +2347,7 @@ struct X86CPUClass {
* CPU definition, automatically loaded by instance_init if not NULL.
* Should be eventually replaced by subclass-specific property defaults.
*/
- X86CPUModel *model;
+ const X86CPUModel *model;
bool host_cpuid_required;
int ordering;
@@ -2200,8 +2369,6 @@ struct X86CPUClass {
extern const VMStateDescription vmstate_x86_cpu;
#endif
-int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
-
int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
int cpuid, DumpState *s);
int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
@@ -2218,11 +2385,13 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags);
int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void x86_cpu_gdb_init(CPUState *cs);
-void x86_cpu_list(void);
int cpu_x86_support_mca_broadcast(CPUX86State *env);
#ifndef CONFIG_USER_ONLY
+int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
+
hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
MemTxAttrs *attrs);
int cpu_get_pic_interrupt(CPUX86State *s);
@@ -2319,6 +2488,8 @@ static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
cs->halted = 0;
}
+uint64_t cpu_x86_get_msr_core_thread_count(X86CPU *cpu);
+
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
target_ulong *base, unsigned int *limit,
unsigned int *flags);
@@ -2360,6 +2531,17 @@ void cpu_set_apic_feature(CPUX86State *env);
void host_cpuid(uint32_t function, uint32_t count,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
bool cpu_has_x2apic_feature(CPUX86State *env);
+bool is_feature_word_cpuid(uint32_t feature, uint32_t index, int reg);
+void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix);
+void mark_forced_on_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix);
+
+static inline bool x86_has_cpuid_0x1f(X86CPU *cpu)
+{
+ return cpu->force_cpuid_0x1f ||
+ x86_has_extended_topo(cpu->env.avail_cpu_topo);
+}
/* helper.c */
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
@@ -2409,8 +2591,6 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32")
#endif
-#define cpu_list x86_cpu_list
-
/* MMU modes definitions */
#define MMU_KSMAP64_IDX 0
#define MMU_KSMAP32_IDX 1
@@ -2445,35 +2625,17 @@ static inline bool is_mmu_index_32(int mmu_index)
return mmu_index & 1;
}
-int x86_mmu_index_pl(CPUX86State *env, unsigned pl);
-int cpu_mmu_index_kernel(CPUX86State *env);
-
#define CC_DST (env->cc_dst)
#define CC_SRC (env->cc_src)
#define CC_SRC2 (env->cc_src2)
#define CC_OP (env->cc_op)
-#include "exec/cpu-all.h"
#include "svm.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/i386/apic.h"
#endif
-static inline void cpu_get_tb_cpu_state(CPUX86State *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *flags = env->hflags |
- (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
- if (env->hflags & HF_CS64_MASK) {
- *cs_base = 0;
- *pc = env->eip;
- } else {
- *cs_base = env->segs[R_CS].base;
- *pc = (uint32_t)(*cs_base + env->eip);
- }
-}
-
void do_cpu_init(X86CPU *cpu);
#define MCE_INJECT_BROADCAST 1
@@ -2544,6 +2706,9 @@ static inline bool cpu_vmx_maybe_enabled(CPUX86State *env)
int get_pg_mode(CPUX86State *env);
/* fpu_helper.c */
+
+/* Set all non-runtime-variable float_status fields to x86 handling */
+void cpu_init_fp_statuses(CPUX86State *env);
void update_fp_status(CPUX86State *env);
void update_mxcsr_status(CPUX86State *env);
void update_mxcsr_from_sse_status(CPUX86State *env);
@@ -2688,4 +2853,29 @@ static inline bool ctl_has_irq(CPUX86State *env)
# define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20)
#endif
+/* majority(NOT a, b, c) = (a ^ b) ? b : c */
+#define MAJ_INV1(a, b, c) ((((a) ^ (b)) & ((b) ^ (c))) ^ (c))
+
+/*
+ * ADD_COUT_VEC(x, y) = majority((x + y) ^ x ^ y, x, y)
+ *
+ * If two corresponding bits in x and y are the same, that's the carry
+ * independent of the value (x+y)^x^y. Hence x^y can be replaced with
+ * 1 in (x+y)^x^y, resulting in majority(NOT (x+y), x, y)
+ */
+#define ADD_COUT_VEC(op1, op2, result) \
+ MAJ_INV1(result, op1, op2)
+
+/*
+ * SUB_COUT_VEC(x, y) = NOT majority(x, NOT y, (x - y) ^ x ^ NOT y)
+ * = majority(NOT x, y, (x - y) ^ x ^ y)
+ *
+ * Note that the carry out is actually a borrow, i.e. it is inverted.
+ * If two corresponding bits in x and y are different, the value of the
+ * bit in (x-y)^x^y likewise does not matter. Hence, x^y can be replaced
+ * with 0 in (x-y)^x^y, resulting in majority(NOT x, y, x-y)
+ */
+#define SUB_COUT_VEC(op1, op2, result) \
+ MAJ_INV1(op1, op2, result)
+
#endif /* I386_CPU_H */
diff --git a/target/i386/emulate/meson.build b/target/i386/emulate/meson.build
new file mode 100644
index 0000000..4edd4f4
--- /dev/null
+++ b/target/i386/emulate/meson.build
@@ -0,0 +1,5 @@
+i386_system_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files(
+ 'x86_decode.c',
+ 'x86_emu.c',
+ 'x86_flags.c',
+))
diff --git a/target/i386/emulate/panic.h b/target/i386/emulate/panic.h
new file mode 100644
index 0000000..71c2487
--- /dev/null
+++ b/target/i386/emulate/panic.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef X86_EMU_PANIC_H
+#define X86_EMU_PANIC_H
+
+#define VM_PANIC(x) {\
+ printf("%s\n", x); \
+ abort(); \
+}
+
+#define VM_PANIC_ON(x) {\
+ if (x) { \
+ printf("%s\n", #x); \
+ abort(); \
+ } \
+}
+
+#define VM_PANIC_EX(...) {\
+ printf(__VA_ARGS__); \
+ abort(); \
+}
+
+#define VM_PANIC_ON_EX(x, ...) {\
+ if (x) { \
+ printf(__VA_ARGS__); \
+ abort(); \
+ } \
+}
+
+#endif
diff --git a/target/i386/emulate/x86.h b/target/i386/emulate/x86.h
new file mode 100644
index 0000000..73edccf
--- /dev/null
+++ b/target/i386/emulate/x86.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Veertu Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef X86_EMU_DEFS_H
+#define X86_EMU_DEFS_H
+
+typedef struct x86_register {
+ union {
+ struct {
+ uint64_t rrx; /* full 64 bit */
+ };
+ struct {
+ uint32_t erx; /* low 32 bit part */
+ uint32_t hi32_unused1;
+ };
+ struct {
+ uint16_t rx; /* low 16 bit part */
+ uint16_t hi16_unused1;
+ uint32_t hi32_unused2;
+ };
+ struct {
+ uint8_t lx; /* low 8 bit part */
+ uint8_t hx; /* high 8 bit */
+ uint16_t hi16_unused2;
+ uint32_t hi32_unused3;
+ };
+ };
+} __attribute__ ((__packed__)) x86_register;
+
+/* 16 bit Task State Segment */
+typedef struct x86_tss_segment16 {
+ uint16_t link;
+ uint16_t sp0;
+ uint16_t ss0;
+ uint32_t sp1;
+ uint16_t ss1;
+ uint32_t sp2;
+ uint16_t ss2;
+ uint16_t ip;
+ uint16_t flags;
+ uint16_t ax;
+ uint16_t cx;
+ uint16_t dx;
+ uint16_t bx;
+ uint16_t sp;
+ uint16_t bp;
+ uint16_t si;
+ uint16_t di;
+ uint16_t es;
+ uint16_t cs;
+ uint16_t ss;
+ uint16_t ds;
+ uint16_t ldtr;
+} __attribute__((packed)) x86_tss_segment16;
+
+/* 32 bit Task State Segment */
+typedef struct x86_tss_segment32 {
+ uint32_t prev_tss;
+ uint32_t esp0;
+ uint32_t ss0;
+ uint32_t esp1;
+ uint32_t ss1;
+ uint32_t esp2;
+ uint32_t ss2;
+ uint32_t cr3;
+ uint32_t eip;
+ uint32_t eflags;
+ uint32_t eax;
+ uint32_t ecx;
+ uint32_t edx;
+ uint32_t ebx;
+ uint32_t esp;
+ uint32_t ebp;
+ uint32_t esi;
+ uint32_t edi;
+ uint32_t es;
+ uint32_t cs;
+ uint32_t ss;
+ uint32_t ds;
+ uint32_t fs;
+ uint32_t gs;
+ uint32_t ldt;
+ uint16_t trap;
+ uint16_t iomap_base;
+} __attribute__ ((__packed__)) x86_tss_segment32;
+
+/* 64 bit Task State Segment */
+typedef struct x86_tss_segment64 {
+ uint32_t unused;
+ uint64_t rsp0;
+ uint64_t rsp1;
+ uint64_t rsp2;
+ uint64_t unused1;
+ uint64_t ist1;
+ uint64_t ist2;
+ uint64_t ist3;
+ uint64_t ist4;
+ uint64_t ist5;
+ uint64_t ist6;
+ uint64_t ist7;
+ uint64_t unused2;
+ uint16_t unused3;
+ uint16_t iomap_base;
+} __attribute__ ((__packed__)) x86_tss_segment64;
+
+/* segment descriptors */
+typedef struct x86_segment_descriptor {
+ uint64_t limit0:16;
+ uint64_t base0:16;
+ uint64_t base1:8;
+ uint64_t type:4;
+ uint64_t s:1;
+ uint64_t dpl:2;
+ uint64_t p:1;
+ uint64_t limit1:4;
+ uint64_t avl:1;
+ uint64_t l:1;
+ uint64_t db:1;
+ uint64_t g:1;
+ uint64_t base2:8;
+} __attribute__ ((__packed__)) x86_segment_descriptor;
+
+static inline uint32_t x86_segment_base(x86_segment_descriptor *desc)
+{
+ return (uint32_t)((desc->base2 << 24) | (desc->base1 << 16) | desc->base0);
+}
+
+static inline void x86_set_segment_base(x86_segment_descriptor *desc,
+ uint32_t base)
+{
+ desc->base2 = base >> 24;
+ desc->base1 = (base >> 16) & 0xff;
+ desc->base0 = base & 0xffff;
+}
+
+static inline uint32_t x86_segment_limit(x86_segment_descriptor *desc)
+{
+ uint32_t limit = (uint32_t)((desc->limit1 << 16) | desc->limit0);
+ if (desc->g) {
+ return (limit << 12) | 0xfff;
+ }
+ return limit;
+}
+
+static inline void x86_set_segment_limit(x86_segment_descriptor *desc,
+ uint32_t limit)
+{
+ desc->limit0 = limit & 0xffff;
+ desc->limit1 = limit >> 16;
+}
+
+typedef struct x86_call_gate {
+ uint64_t offset0:16;
+ uint64_t selector:16;
+ uint64_t param_count:4;
+ uint64_t reserved:3;
+ uint64_t type:4;
+ uint64_t dpl:1;
+ uint64_t p:1;
+ uint64_t offset1:16;
+} __attribute__ ((__packed__)) x86_call_gate;
+
+static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)
+{
+ return (uint32_t)((gate->offset1 << 16) | gate->offset0);
+}
+
+#define GDT_SEL 0
+#define LDT_SEL 1
+
+typedef struct x86_segment_selector {
+ union {
+ uint16_t sel;
+ struct {
+ uint16_t rpl:2;
+ uint16_t ti:1;
+ uint16_t index:13;
+ };
+ };
+} __attribute__ ((__packed__)) x86_segment_selector;
+
+/* useful register access macros */
+#define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg])
+
+#define RRX(cpu, reg) (x86_reg(cpu, reg)->rrx)
+#define RAX(cpu) RRX(cpu, R_EAX)
+#define RCX(cpu) RRX(cpu, R_ECX)
+#define RDX(cpu) RRX(cpu, R_EDX)
+#define RBX(cpu) RRX(cpu, R_EBX)
+#define RSP(cpu) RRX(cpu, R_ESP)
+#define RBP(cpu) RRX(cpu, R_EBP)
+#define RSI(cpu) RRX(cpu, R_ESI)
+#define RDI(cpu) RRX(cpu, R_EDI)
+#define R8(cpu) RRX(cpu, R_R8)
+#define R9(cpu) RRX(cpu, R_R9)
+#define R10(cpu) RRX(cpu, R_R10)
+#define R11(cpu) RRX(cpu, R_R11)
+#define R12(cpu) RRX(cpu, R_R12)
+#define R13(cpu) RRX(cpu, R_R13)
+#define R14(cpu) RRX(cpu, R_R14)
+#define R15(cpu) RRX(cpu, R_R15)
+
+#define ERX(cpu, reg) (x86_reg(cpu, reg)->erx)
+#define EAX(cpu) ERX(cpu, R_EAX)
+#define ECX(cpu) ERX(cpu, R_ECX)
+#define EDX(cpu) ERX(cpu, R_EDX)
+#define EBX(cpu) ERX(cpu, R_EBX)
+#define ESP(cpu) ERX(cpu, R_ESP)
+#define EBP(cpu) ERX(cpu, R_EBP)
+#define ESI(cpu) ERX(cpu, R_ESI)
+#define EDI(cpu) ERX(cpu, R_EDI)
+
+#define RX(cpu, reg) (x86_reg(cpu, reg)->rx)
+#define AX(cpu) RX(cpu, R_EAX)
+#define CX(cpu) RX(cpu, R_ECX)
+#define DX(cpu) RX(cpu, R_EDX)
+#define BP(cpu) RX(cpu, R_EBP)
+#define SP(cpu) RX(cpu, R_ESP)
+#define BX(cpu) RX(cpu, R_EBX)
+#define SI(cpu) RX(cpu, R_ESI)
+#define DI(cpu) RX(cpu, R_EDI)
+
+#define RL(cpu, reg) (x86_reg(cpu, reg)->lx)
+#define AL(cpu) RL(cpu, R_EAX)
+#define CL(cpu) RL(cpu, R_ECX)
+#define DL(cpu) RL(cpu, R_EDX)
+#define BL(cpu) RL(cpu, R_EBX)
+
+#define RH(cpu, reg) (x86_reg(cpu, reg)->hx)
+#define AH(cpu) RH(cpu, R_EAX)
+#define CH(cpu) RH(cpu, R_ECX)
+#define DH(cpu) RH(cpu, R_EDX)
+#define BH(cpu) RH(cpu, R_EBX)
+
+/* deal with GDT/LDT descriptors in memory */
+bool x86_read_segment_descriptor(CPUState *cpu,
+ struct x86_segment_descriptor *desc,
+ x86_segment_selector sel);
+bool x86_write_segment_descriptor(CPUState *cpu,
+ struct x86_segment_descriptor *desc,
+ x86_segment_selector sel);
+
+bool x86_read_call_gate(CPUState *cpu, struct x86_call_gate *idt_desc,
+ int gate);
+
+/* helpers */
+bool x86_is_protected(CPUState *cpu);
+bool x86_is_real(CPUState *cpu);
+bool x86_is_v8086(CPUState *cpu);
+bool x86_is_long_mode(CPUState *cpu);
+bool x86_is_long64_mode(CPUState *cpu);
+bool x86_is_paging_mode(CPUState *cpu);
+bool x86_is_pae_enabled(CPUState *cpu);
+
+enum X86Seg;
+target_ulong linear_addr(CPUState *cpu, target_ulong addr, enum X86Seg seg);
+target_ulong linear_addr_size(CPUState *cpu, target_ulong addr, int size,
+ enum X86Seg seg);
+target_ulong linear_rip(CPUState *cpu, target_ulong rip);
+
+static inline uint64_t rdtscp(void)
+{
+ uint64_t tsc;
+ __asm__ __volatile__("rdtscp; " /* serializing read of tsc */
+ "shl $32,%%rdx; " /* shift higher 32 bits stored in rdx up */
+ "or %%rdx,%%rax" /* and or onto rax */
+ : "=a"(tsc) /* output to tsc variable */
+ :
+ : "%rcx", "%rdx"); /* rcx and rdx are clobbered */
+
+ return tsc;
+}
+
+#endif
diff --git a/target/i386/emulate/x86_decode.c b/target/i386/emulate/x86_decode.c
new file mode 100644
index 0000000..2eca398
--- /dev/null
+++ b/target/i386/emulate/x86_decode.c
@@ -0,0 +1,2173 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "panic.h"
+#include "x86_decode.h"
+#include "x86_emu.h"
+
+#define OPCODE_ESCAPE 0xf
+
+static void decode_invalid(CPUX86State *env, struct x86_decode *decode)
+{
+ printf(TARGET_FMT_lx ": failed to decode instruction ", env->eip);
+ for (int i = 0; i < decode->opcode_len; i++) {
+ printf("%x ", decode->opcode[i]);
+ }
+ printf("\n");
+ VM_PANIC("decoder failed\n");
+}
+
+uint64_t sign(uint64_t val, int size)
+{
+ switch (size) {
+ case 1:
+ val = (int8_t)val;
+ break;
+ case 2:
+ val = (int16_t)val;
+ break;
+ case 4:
+ val = (int32_t)val;
+ break;
+ case 8:
+ val = (int64_t)val;
+ break;
+ default:
+ VM_PANIC_EX("%s invalid size %d\n", __func__, size);
+ break;
+ }
+ return val;
+}
+
+static inline uint64_t decode_bytes(CPUX86State *env, struct x86_decode *decode,
+ int size)
+{
+ uint64_t val = 0;
+
+ switch (size) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ VM_PANIC_EX("%s invalid size %d\n", __func__, size);
+ break;
+ }
+ target_ulong va = linear_rip(env_cpu(env), env->eip) + decode->len;
+ emul_ops->read_mem(env_cpu(env), &val, va, size);
+ decode->len += size;
+
+ return val;
+}
+
+static inline uint8_t decode_byte(CPUX86State *env, struct x86_decode *decode)
+{
+ return (uint8_t)decode_bytes(env, decode, 1);
+}
+
+static inline uint16_t decode_word(CPUX86State *env, struct x86_decode *decode)
+{
+ return (uint16_t)decode_bytes(env, decode, 2);
+}
+
+static inline uint32_t decode_dword(CPUX86State *env, struct x86_decode *decode)
+{
+ return (uint32_t)decode_bytes(env, decode, 4);
+}
+
+static inline uint64_t decode_qword(CPUX86State *env, struct x86_decode *decode)
+{
+ return decode_bytes(env, decode, 8);
+}
+
+static void decode_modrm_rm(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_RM;
+}
+
+static void decode_modrm_reg(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_REG;
+ op->reg = decode->modrm.reg;
+ op->regptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.r,
+ decode->operand_size);
+}
+
+static void decode_rax(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_REG;
+ op->reg = R_EAX;
+ /* Since reg is always AX, REX prefix has no impact. */
+ op->regptr = get_reg_ref(env, op->reg, false, 0,
+ decode->operand_size);
+}
+
+static inline void decode_immediate(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *var, int size)
+{
+ var->type = X86_VAR_IMMEDIATE;
+ var->size = size;
+ switch (size) {
+ case 1:
+ var->val = decode_byte(env, decode);
+ break;
+ case 2:
+ var->val = decode_word(env, decode);
+ break;
+ case 4:
+ var->val = decode_dword(env, decode);
+ break;
+ case 8:
+ var->val = decode_qword(env, decode);
+ break;
+ default:
+ VM_PANIC_EX("bad size %d\n", size);
+ }
+}
+
+static void decode_imm8(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ decode_immediate(env, decode, op, 1);
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+static void decode_imm8_signed(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ decode_immediate(env, decode, op, 1);
+ op->val = sign(op->val, 1);
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+static void decode_imm16(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ decode_immediate(env, decode, op, 2);
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+
+static void decode_imm(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ if (8 == decode->operand_size) {
+ decode_immediate(env, decode, op, 4);
+ op->val = sign(op->val, decode->operand_size);
+ } else {
+ decode_immediate(env, decode, op, decode->operand_size);
+ }
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+static void decode_imm_signed(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ decode_immediate(env, decode, op, decode->operand_size);
+ op->val = sign(op->val, decode->operand_size);
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+static void decode_imm_1(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_IMMEDIATE;
+ op->val = 1;
+}
+
+static void decode_imm_0(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_IMMEDIATE;
+ op->val = 0;
+}
+
+
+static void decode_pushseg(CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0];
+
+ decode->op[0].type = X86_VAR_REG;
+ switch (op) {
+ case 0xe:
+ decode->op[0].reg = R_CS;
+ break;
+ case 0x16:
+ decode->op[0].reg = R_SS;
+ break;
+ case 0x1e:
+ decode->op[0].reg = R_DS;
+ break;
+ case 0x06:
+ decode->op[0].reg = R_ES;
+ break;
+ case 0xa0:
+ decode->op[0].reg = R_FS;
+ break;
+ case 0xa8:
+ decode->op[0].reg = R_GS;
+ break;
+ }
+}
+
+static void decode_popseg(CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0];
+
+ decode->op[0].type = X86_VAR_REG;
+ switch (op) {
+ case 0xf:
+ decode->op[0].reg = R_CS;
+ break;
+ case 0x17:
+ decode->op[0].reg = R_SS;
+ break;
+ case 0x1f:
+ decode->op[0].reg = R_DS;
+ break;
+ case 0x07:
+ decode->op[0].reg = R_ES;
+ break;
+ case 0xa1:
+ decode->op[0].reg = R_FS;
+ break;
+ case 0xa9:
+ decode->op[0].reg = R_GS;
+ break;
+ }
+}
+
+static void decode_incgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x40;
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+}
+
+static void decode_decgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x48;
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+}
+
+static void decode_incgroup2(CPUX86State *env, struct x86_decode *decode)
+{
+ if (!decode->modrm.reg) {
+ decode->cmd = X86_DECODE_CMD_INC;
+ } else if (1 == decode->modrm.reg) {
+ decode->cmd = X86_DECODE_CMD_DEC;
+ }
+}
+
+static void decode_pushgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x50;
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+}
+
+static void decode_popgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x58;
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+}
+
+static void decode_jxx(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->displacement = decode_bytes(env, decode, decode->operand_size);
+ decode->displacement_size = decode->operand_size;
+}
+
+static void decode_farjmp(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_IMMEDIATE;
+ decode->op[0].val = decode_bytes(env, decode, decode->operand_size);
+ decode->displacement = decode_word(env, decode);
+}
+
+static void decode_addgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_ADD,
+ X86_DECODE_CMD_OR,
+ X86_DECODE_CMD_ADC,
+ X86_DECODE_CMD_SBB,
+ X86_DECODE_CMD_AND,
+ X86_DECODE_CMD_SUB,
+ X86_DECODE_CMD_XOR,
+ X86_DECODE_CMD_CMP
+ };
+ decode->cmd = group[decode->modrm.reg];
+}
+
+static void decode_rotgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_ROL,
+ X86_DECODE_CMD_ROR,
+ X86_DECODE_CMD_RCL,
+ X86_DECODE_CMD_RCR,
+ X86_DECODE_CMD_SHL,
+ X86_DECODE_CMD_SHR,
+ X86_DECODE_CMD_SHL,
+ X86_DECODE_CMD_SAR
+ };
+ decode->cmd = group[decode->modrm.reg];
+}
+
+static void decode_f7group(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_TST,
+ X86_DECODE_CMD_TST,
+ X86_DECODE_CMD_NOT,
+ X86_DECODE_CMD_NEG,
+ X86_DECODE_CMD_MUL,
+ X86_DECODE_CMD_IMUL_1,
+ X86_DECODE_CMD_DIV,
+ X86_DECODE_CMD_IDIV
+ };
+ decode->cmd = group[decode->modrm.reg];
+ decode_modrm_rm(env, decode, &decode->op[0]);
+
+ switch (decode->modrm.reg) {
+ case 0:
+ case 1:
+ decode_imm(env, decode, &decode->op[1]);
+ break;
+ case 2:
+ break;
+ case 3:
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ decode->op[1].val = 0;
+ break;
+ default:
+ break;
+ }
+}
+
+static void decode_xchgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x90;
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+}
+
+static void decode_movgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0xb8;
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+ decode_immediate(env, decode, &decode->op[1], decode->operand_size);
+}
+
+static void fetch_moffs(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_OFFSET;
+ op->addr = decode_bytes(env, decode, decode->addressing_size);
+}
+
+static void decode_movgroup8(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0xb0;
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+ decode_immediate(env, decode, &decode->op[1], decode->operand_size);
+}
+
+static void decode_rcx(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_REG;
+ op->reg = R_ECX;
+ op->regptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.b,
+ decode->operand_size);
+}
+
+struct decode_tbl {
+ uint8_t opcode;
+ enum x86_decode_cmd cmd;
+ uint8_t operand_size;
+ bool is_modrm;
+ void (*decode_op1)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op1);
+ void (*decode_op2)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op2);
+ void (*decode_op3)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op3);
+ void (*decode_op4)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op4);
+ void (*decode_postfix)(CPUX86State *env, struct x86_decode *decode);
+};
+
+struct decode_x87_tbl {
+ uint8_t opcode;
+ uint8_t modrm_reg;
+ uint8_t modrm_mod;
+ enum x86_decode_cmd cmd;
+ uint8_t operand_size;
+ bool rev;
+ bool pop;
+ void (*decode_op1)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op1);
+ void (*decode_op2)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op2);
+ void (*decode_postfix)(CPUX86State *env, struct x86_decode *decode);
+};
+
+struct decode_tbl invl_inst = {0x0, 0, 0, false, NULL, NULL, NULL, NULL,
+ decode_invalid};
+
+struct decode_tbl _decode_tbl1[256];
+struct decode_tbl _decode_tbl2[256];
+struct decode_x87_tbl _decode_tbl3[256];
+
+static void decode_x87_ins(CPUX86State *env, struct x86_decode *decode)
+{
+ struct decode_x87_tbl *decoder;
+
+ decode->is_fpu = true;
+ int mode = decode->modrm.mod == 3 ? 1 : 0;
+ int index = ((decode->opcode[0] & 0xf) << 4) | (mode << 3) |
+ decode->modrm.reg;
+
+ decoder = &_decode_tbl3[index];
+
+ decode->cmd = decoder->cmd;
+ if (decoder->operand_size) {
+ decode->operand_size = decoder->operand_size;
+ }
+ decode->fpop_stack = decoder->pop;
+ decode->frev = decoder->rev;
+
+ if (decoder->decode_op1) {
+ decoder->decode_op1(env, decode, &decode->op[0]);
+ }
+ if (decoder->decode_op2) {
+ decoder->decode_op2(env, decode, &decode->op[1]);
+ }
+ if (decoder->decode_postfix) {
+ decoder->decode_postfix(env, decode);
+ }
+
+ VM_PANIC_ON_EX(!decode->cmd, "x87 opcode %x %x (%x %x) not decoded\n",
+ decode->opcode[0], decode->modrm.modrm, decoder->modrm_reg,
+ decoder->modrm_mod);
+}
+
+static void decode_ffgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_INC,
+ X86_DECODE_CMD_DEC,
+ X86_DECODE_CMD_CALL_NEAR_ABS_INDIRECT,
+ X86_DECODE_CMD_CALL_FAR_ABS_INDIRECT,
+ X86_DECODE_CMD_JMP_NEAR_ABS_INDIRECT,
+ X86_DECODE_CMD_JMP_FAR_ABS_INDIRECT,
+ X86_DECODE_CMD_PUSH,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL
+ };
+ decode->cmd = group[decode->modrm.reg];
+}
+
+static void decode_sldtgroup(CPUX86State *env, struct x86_decode *decode)
+{
+
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_SLDT,
+ X86_DECODE_CMD_STR,
+ X86_DECODE_CMD_LLDT,
+ X86_DECODE_CMD_LTR,
+ X86_DECODE_CMD_VERR,
+ X86_DECODE_CMD_VERW,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL
+ };
+ decode->cmd = group[decode->modrm.reg];
+}
+
+static void decode_lidtgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_SGDT,
+ X86_DECODE_CMD_SIDT,
+ X86_DECODE_CMD_LGDT,
+ X86_DECODE_CMD_LIDT,
+ X86_DECODE_CMD_SMSW,
+ X86_DECODE_CMD_LMSW,
+ X86_DECODE_CMD_LMSW,
+ X86_DECODE_CMD_INVLPG
+ };
+ decode->cmd = group[decode->modrm.reg];
+ if (0xf9 == decode->modrm.modrm) {
+ decode->opcode[decode->len++] = decode->modrm.modrm;
+ decode->cmd = X86_DECODE_CMD_RDTSCP;
+ }
+}
+
+static void decode_btgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_BT,
+ X86_DECODE_CMD_BTS,
+ X86_DECODE_CMD_BTR,
+ X86_DECODE_CMD_BTC
+ };
+ decode->cmd = group[decode->modrm.reg];
+}
+
+static void decode_x87_general(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->is_fpu = true;
+}
+
+static void decode_x87_modrm_floatp(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_FLOATP;
+}
+
+static void decode_x87_modrm_intp(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_INTP;
+}
+
+static void decode_x87_modrm_bytep(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_BYTEP;
+}
+
+static void decode_x87_modrm_st0(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_REG;
+ op->reg = 0;
+}
+
+static void decode_decode_x87_modrm_st0(CPUX86State *env,
+ struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_REG;
+ op->reg = decode->modrm.modrm & 7;
+}
+
+
+static void decode_aegroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->is_fpu = true;
+ switch (decode->modrm.reg) {
+ case 0:
+ decode->cmd = X86_DECODE_CMD_FXSAVE;
+ decode_x87_modrm_bytep(env, decode, &decode->op[0]);
+ break;
+ case 1:
+ decode_x87_modrm_bytep(env, decode, &decode->op[0]);
+ decode->cmd = X86_DECODE_CMD_FXRSTOR;
+ break;
+ case 5:
+ if (decode->modrm.modrm == 0xe8) {
+ decode->cmd = X86_DECODE_CMD_LFENCE;
+ } else {
+ VM_PANIC("xrstor");
+ }
+ break;
+ case 6:
+ VM_PANIC_ON(decode->modrm.modrm != 0xf0);
+ decode->cmd = X86_DECODE_CMD_MFENCE;
+ break;
+ case 7:
+ if (decode->modrm.modrm == 0xf8) {
+ decode->cmd = X86_DECODE_CMD_SFENCE;
+ } else {
+ decode->cmd = X86_DECODE_CMD_CLFLUSH;
+ }
+ break;
+ default:
+ VM_PANIC_EX("0xae: reg %d\n", decode->modrm.reg);
+ break;
+ }
+}
+
+static void decode_bswap(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[1] - 0xc8;
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+}
+
+static void decode_d9_4(CPUX86State *env, struct x86_decode *decode)
+{
+ switch (decode->modrm.modrm) {
+ case 0xe0:
+ /* FCHS */
+ decode->cmd = X86_DECODE_CMD_FCHS;
+ break;
+ case 0xe1:
+ decode->cmd = X86_DECODE_CMD_FABS;
+ break;
+ case 0xe4:
+ VM_PANIC("FTST");
+ break;
+ case 0xe5:
+ /* FXAM */
+ decode->cmd = X86_DECODE_CMD_FXAM;
+ break;
+ default:
+ VM_PANIC("FLDENV");
+ break;
+ }
+}
+
+static void decode_db_4(CPUX86State *env, struct x86_decode *decode)
+{
+ switch (decode->modrm.modrm) {
+ case 0xe0:
+ VM_PANIC_EX("unhandled FNENI: %x %x\n", decode->opcode[0],
+ decode->modrm.modrm);
+ break;
+ case 0xe1:
+ VM_PANIC_EX("unhandled FNDISI: %x %x\n", decode->opcode[0],
+ decode->modrm.modrm);
+ break;
+ case 0xe2:
+ VM_PANIC_EX("unhandled FCLEX: %x %x\n", decode->opcode[0],
+ decode->modrm.modrm);
+ break;
+ case 0xe3:
+ decode->cmd = X86_DECODE_CMD_FNINIT;
+ break;
+ case 0xe4:
+ decode->cmd = X86_DECODE_CMD_FNSETPM;
+ break;
+ default:
+ VM_PANIC_EX("unhandled fpu opcode: %x %x\n", decode->opcode[0],
+ decode->modrm.modrm);
+ break;
+ }
+}
+
+
+struct decode_tbl _1op_inst[] = {
+ {0x0, X86_DECODE_CMD_ADD, 1, true, decode_modrm_rm, decode_modrm_reg, NULL,
+ NULL, NULL},
+ {0x1, X86_DECODE_CMD_ADD, 0, true, decode_modrm_rm, decode_modrm_reg, NULL,
+ NULL, NULL},
+ {0x2, X86_DECODE_CMD_ADD, 1, true, decode_modrm_reg, decode_modrm_rm, NULL,
+ NULL, NULL},
+ {0x3, X86_DECODE_CMD_ADD, 0, true, decode_modrm_reg, decode_modrm_rm, NULL,
+ NULL, NULL},
+ {0x4, X86_DECODE_CMD_ADD, 1, false, decode_rax, decode_imm8, NULL, NULL,
+ NULL},
+ {0x5, X86_DECODE_CMD_ADD, 0, false, decode_rax, decode_imm, NULL, NULL,
+ NULL},
+ {0x6, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL,
+ decode_pushseg},
+ {0x7, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL,
+ decode_popseg},
+ {0x8, X86_DECODE_CMD_OR, 1, true, decode_modrm_rm, decode_modrm_reg, NULL,
+ NULL, NULL},
+ {0x9, X86_DECODE_CMD_OR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL,
+ NULL, NULL},
+ {0xa, X86_DECODE_CMD_OR, 1, true, decode_modrm_reg, decode_modrm_rm, NULL,
+ NULL, NULL},
+ {0xb, X86_DECODE_CMD_OR, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0xc, X86_DECODE_CMD_OR, 1, false, decode_rax, decode_imm8,
+ NULL, NULL, NULL},
+ {0xd, X86_DECODE_CMD_OR, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL},
+
+ {0xe, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg},
+ {0xf, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg},
+
+ {0x10, X86_DECODE_CMD_ADC, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x11, X86_DECODE_CMD_ADC, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x12, X86_DECODE_CMD_ADC, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x13, X86_DECODE_CMD_ADC, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x14, X86_DECODE_CMD_ADC, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL},
+ {0x15, X86_DECODE_CMD_ADC, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL},
+
+ {0x16, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg},
+ {0x17, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg},
+
+ {0x18, X86_DECODE_CMD_SBB, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x19, X86_DECODE_CMD_SBB, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x1a, X86_DECODE_CMD_SBB, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x1b, X86_DECODE_CMD_SBB, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x1c, X86_DECODE_CMD_SBB, 1, false, decode_rax, decode_imm8,
+ NULL, NULL, NULL},
+ {0x1d, X86_DECODE_CMD_SBB, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL},
+
+ {0x1e, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg},
+ {0x1f, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg},
+
+ {0x20, X86_DECODE_CMD_AND, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x21, X86_DECODE_CMD_AND, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x22, X86_DECODE_CMD_AND, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x23, X86_DECODE_CMD_AND, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x24, X86_DECODE_CMD_AND, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL},
+ {0x25, X86_DECODE_CMD_AND, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL},
+ {0x28, X86_DECODE_CMD_SUB, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x29, X86_DECODE_CMD_SUB, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x2a, X86_DECODE_CMD_SUB, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x2b, X86_DECODE_CMD_SUB, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x2c, X86_DECODE_CMD_SUB, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL},
+ {0x2d, X86_DECODE_CMD_SUB, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL},
+ {0x2f, X86_DECODE_CMD_DAS, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0x30, X86_DECODE_CMD_XOR, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x31, X86_DECODE_CMD_XOR, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x32, X86_DECODE_CMD_XOR, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x33, X86_DECODE_CMD_XOR, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x34, X86_DECODE_CMD_XOR, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL},
+ {0x35, X86_DECODE_CMD_XOR, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL},
+
+ {0x38, X86_DECODE_CMD_CMP, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x39, X86_DECODE_CMD_CMP, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x3a, X86_DECODE_CMD_CMP, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x3b, X86_DECODE_CMD_CMP, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x3c, X86_DECODE_CMD_CMP, 1, false, decode_rax, decode_imm8,
+ NULL, NULL, NULL},
+ {0x3d, X86_DECODE_CMD_CMP, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL},
+
+ {0x3f, X86_DECODE_CMD_AAS, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+
+ {0x40, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup},
+ {0x41, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup},
+ {0x42, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup},
+ {0x43, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup},
+ {0x44, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup},
+ {0x45, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup},
+ {0x46, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup},
+ {0x47, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup},
+
+ {0x48, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup},
+ {0x49, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup},
+ {0x4a, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup},
+ {0x4b, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup},
+ {0x4c, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup},
+ {0x4d, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup},
+ {0x4e, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup},
+ {0x4f, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup},
+
+ {0x50, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup},
+ {0x51, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup},
+ {0x52, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup},
+ {0x53, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup},
+ {0x54, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup},
+ {0x55, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup},
+ {0x56, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup},
+ {0x57, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup},
+
+ {0x58, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup},
+ {0x59, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup},
+ {0x5a, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup},
+ {0x5b, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup},
+ {0x5c, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup},
+ {0x5d, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup},
+ {0x5e, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup},
+ {0x5f, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup},
+
+ {0x60, X86_DECODE_CMD_PUSHA, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0x61, X86_DECODE_CMD_POPA, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+
+ {0x68, X86_DECODE_CMD_PUSH, 0, false, decode_imm,
+ NULL, NULL, NULL, NULL},
+ {0x6a, X86_DECODE_CMD_PUSH, 0, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL},
+ {0x69, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg,
+ decode_modrm_rm, decode_imm, NULL, NULL},
+ {0x6b, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg, decode_modrm_rm,
+ decode_imm8_signed, NULL, NULL},
+
+ {0x6c, X86_DECODE_CMD_INS, 1, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0x6d, X86_DECODE_CMD_INS, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0x6e, X86_DECODE_CMD_OUTS, 1, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0x6f, X86_DECODE_CMD_OUTS, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+
+ {0x70, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x71, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x72, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x73, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x74, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x75, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x76, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x77, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x78, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x79, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x7a, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x7b, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x7c, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x7d, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x7e, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x7f, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+
+ {0x80, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_addgroup},
+ {0x81, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm,
+ NULL, NULL, decode_addgroup},
+ {0x82, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_addgroup},
+ {0x83, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8_signed,
+ NULL, NULL, decode_addgroup},
+ {0x84, X86_DECODE_CMD_TST, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x85, X86_DECODE_CMD_TST, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x86, X86_DECODE_CMD_XCHG, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x87, X86_DECODE_CMD_XCHG, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x88, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x89, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0x8a, X86_DECODE_CMD_MOV, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x8b, X86_DECODE_CMD_MOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x8c, X86_DECODE_CMD_MOV_FROM_SEG, 0, true, decode_modrm_rm,
+ decode_modrm_reg, NULL, NULL, NULL},
+ {0x8d, X86_DECODE_CMD_LEA, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x8e, X86_DECODE_CMD_MOV_TO_SEG, 0, true, decode_modrm_reg,
+ decode_modrm_rm, NULL, NULL, NULL},
+ {0x8f, X86_DECODE_CMD_POP, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+
+ {0x90, X86_DECODE_CMD_NOP, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0x91, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup},
+ {0x92, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup},
+ {0x93, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup},
+ {0x94, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup},
+ {0x95, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup},
+ {0x96, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup},
+ {0x97, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup},
+
+ {0x98, X86_DECODE_CMD_CBW, 0, false, NULL, NULL,
+ NULL, NULL, NULL},
+ {0x99, X86_DECODE_CMD_CWD, 0, false, NULL, NULL,
+ NULL, NULL, NULL},
+
+ {0x9a, X86_DECODE_CMD_CALL_FAR, 0, false, NULL,
+ NULL, NULL, NULL, decode_farjmp},
+
+ {0x9c, X86_DECODE_CMD_PUSHF, 0, false, NULL, NULL,
+ NULL, NULL, NULL},
+ /*{0x9d, X86_DECODE_CMD_POPF, 0, false, NULL, NULL,
+ NULL, NULL, NULL},*/
+ {0x9e, X86_DECODE_CMD_SAHF, 0, false, NULL, NULL,
+ NULL, NULL, NULL},
+ {0x9f, X86_DECODE_CMD_LAHF, 0, false, NULL, NULL,
+ NULL, NULL, NULL},
+
+ {0xa0, X86_DECODE_CMD_MOV, 1, false, decode_rax, fetch_moffs,
+ NULL, NULL, NULL},
+ {0xa1, X86_DECODE_CMD_MOV, 0, false, decode_rax, fetch_moffs,
+ NULL, NULL, NULL},
+ {0xa2, X86_DECODE_CMD_MOV, 1, false, fetch_moffs, decode_rax,
+ NULL, NULL, NULL},
+ {0xa3, X86_DECODE_CMD_MOV, 0, false, fetch_moffs, decode_rax,
+ NULL, NULL, NULL},
+
+ {0xa4, X86_DECODE_CMD_MOVS, 1, false, NULL, NULL,
+ NULL, NULL, NULL},
+ {0xa5, X86_DECODE_CMD_MOVS, 0, false, NULL, NULL,
+ NULL, NULL, NULL},
+ {0xa6, X86_DECODE_CMD_CMPS, 1, false, NULL, NULL,
+ NULL, NULL, NULL},
+ {0xa7, X86_DECODE_CMD_CMPS, 0, false, NULL, NULL,
+ NULL, NULL, NULL},
+ {0xaa, X86_DECODE_CMD_STOS, 1, false, NULL, NULL,
+ NULL, NULL, NULL},
+ {0xab, X86_DECODE_CMD_STOS, 0, false, NULL, NULL,
+ NULL, NULL, NULL},
+ {0xac, X86_DECODE_CMD_LODS, 1, false, NULL, NULL,
+ NULL, NULL, NULL},
+ {0xad, X86_DECODE_CMD_LODS, 0, false, NULL, NULL,
+ NULL, NULL, NULL},
+ {0xae, X86_DECODE_CMD_SCAS, 1, false, NULL, NULL,
+ NULL, NULL, NULL},
+ {0xaf, X86_DECODE_CMD_SCAS, 0, false, NULL, NULL,
+ NULL, NULL, NULL},
+
+ {0xa8, X86_DECODE_CMD_TST, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL},
+ {0xa9, X86_DECODE_CMD_TST, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL},
+
+ {0xb0, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8},
+ {0xb1, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8},
+ {0xb2, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8},
+ {0xb3, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8},
+ {0xb4, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8},
+ {0xb5, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8},
+ {0xb6, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8},
+ {0xb7, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8},
+
+ {0xb8, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup},
+ {0xb9, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup},
+ {0xba, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup},
+ {0xbb, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup},
+ {0xbc, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup},
+ {0xbd, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup},
+ {0xbe, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup},
+ {0xbf, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup},
+
+ {0xc0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_rotgroup},
+ {0xc1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_rotgroup},
+
+ {0xc2, X86_DECODE_RET_NEAR, 0, false, decode_imm16,
+ NULL, NULL, NULL, NULL},
+ {0xc3, X86_DECODE_RET_NEAR, 0, false, NULL,
+ NULL, NULL, NULL, NULL},
+
+ {0xc4, X86_DECODE_CMD_LES, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0xc5, X86_DECODE_CMD_LDS, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+
+ {0xc6, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, NULL},
+ {0xc7, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_imm,
+ NULL, NULL, NULL},
+
+ {0xc8, X86_DECODE_CMD_ENTER, 0, false, decode_imm16, decode_imm8,
+ NULL, NULL, NULL},
+ {0xc9, X86_DECODE_CMD_LEAVE, 0, false, NULL, NULL,
+ NULL, NULL, NULL},
+ {0xca, X86_DECODE_RET_FAR, 0, false, decode_imm16, NULL,
+ NULL, NULL, NULL},
+ {0xcb, X86_DECODE_RET_FAR, 0, false, decode_imm_0, NULL,
+ NULL, NULL, NULL},
+ {0xcd, X86_DECODE_CMD_INT, 0, false, decode_imm8, NULL,
+ NULL, NULL, NULL},
+ /*{0xcf, X86_DECODE_CMD_IRET, 0, false, NULL, NULL,
+ NULL, NULL, NULL},*/
+
+ {0xd0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm_1,
+ NULL, NULL, decode_rotgroup},
+ {0xd1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm_1,
+ NULL, NULL, decode_rotgroup},
+ {0xd2, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_rcx,
+ NULL, NULL, decode_rotgroup},
+ {0xd3, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_rcx,
+ NULL, NULL, decode_rotgroup},
+
+ {0xd4, X86_DECODE_CMD_AAM, 0, false, decode_imm8,
+ NULL, NULL, NULL, NULL},
+ {0xd5, X86_DECODE_CMD_AAD, 0, false, decode_imm8,
+ NULL, NULL, NULL, NULL},
+
+ {0xd7, X86_DECODE_CMD_XLAT, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+
+ {0xd8, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins},
+ {0xd9, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins},
+ {0xda, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins},
+ {0xdb, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins},
+ {0xdc, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins},
+ {0xdd, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins},
+ {0xde, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins},
+ {0xdf, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins},
+
+ {0xe0, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL},
+ {0xe1, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL},
+ {0xe2, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL},
+
+ {0xe3, X86_DECODE_CMD_JCXZ, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+
+ {0xe4, X86_DECODE_CMD_IN, 1, false, decode_imm8,
+ NULL, NULL, NULL, NULL},
+ {0xe5, X86_DECODE_CMD_IN, 0, false, decode_imm8,
+ NULL, NULL, NULL, NULL},
+ {0xe6, X86_DECODE_CMD_OUT, 1, false, decode_imm8,
+ NULL, NULL, NULL, NULL},
+ {0xe7, X86_DECODE_CMD_OUT, 0, false, decode_imm8,
+ NULL, NULL, NULL, NULL},
+ {0xe8, X86_DECODE_CMD_CALL_NEAR, 0, false, decode_imm_signed,
+ NULL, NULL, NULL, NULL},
+ {0xe9, X86_DECODE_CMD_JMP_NEAR, 0, false, decode_imm_signed,
+ NULL, NULL, NULL, NULL},
+ {0xea, X86_DECODE_CMD_JMP_FAR, 0, false,
+ NULL, NULL, NULL, NULL, decode_farjmp},
+ {0xeb, X86_DECODE_CMD_JMP_NEAR, 1, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL},
+ {0xec, X86_DECODE_CMD_IN, 1, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0xed, X86_DECODE_CMD_IN, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0xee, X86_DECODE_CMD_OUT, 1, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0xef, X86_DECODE_CMD_OUT, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+
+ {0xf4, X86_DECODE_CMD_HLT, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+
+ {0xf5, X86_DECODE_CMD_CMC, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+
+ {0xf6, X86_DECODE_CMD_INVL, 1, true,
+ NULL, NULL, NULL, NULL, decode_f7group},
+ {0xf7, X86_DECODE_CMD_INVL, 0, true,
+ NULL, NULL, NULL, NULL, decode_f7group},
+
+ {0xf8, X86_DECODE_CMD_CLC, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0xf9, X86_DECODE_CMD_STC, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+
+ {0xfa, X86_DECODE_CMD_CLI, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0xfb, X86_DECODE_CMD_STI, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0xfc, X86_DECODE_CMD_CLD, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0xfd, X86_DECODE_CMD_STD, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0xfe, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_incgroup2},
+ {0xff, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_ffgroup},
+};
+
+struct decode_tbl _2op_inst[] = {
+ {0x0, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_sldtgroup},
+ {0x1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_lidtgroup},
+ {0x6, X86_DECODE_CMD_CLTS, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0x9, X86_DECODE_CMD_WBINVD, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0x18, X86_DECODE_CMD_PREFETCH, 0, true,
+ NULL, NULL, NULL, NULL, decode_x87_general},
+ {0x1f, X86_DECODE_CMD_NOP, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x20, X86_DECODE_CMD_MOV_FROM_CR, 0, true, decode_modrm_rm,
+ decode_modrm_reg, NULL, NULL, NULL},
+ {0x21, X86_DECODE_CMD_MOV_FROM_DR, 0, true, decode_modrm_rm,
+ decode_modrm_reg, NULL, NULL, NULL},
+ {0x22, X86_DECODE_CMD_MOV_TO_CR, 0, true, decode_modrm_reg,
+ decode_modrm_rm, NULL, NULL, NULL},
+ {0x23, X86_DECODE_CMD_MOV_TO_DR, 0, true, decode_modrm_reg,
+ decode_modrm_rm, NULL, NULL, NULL},
+ {0x30, X86_DECODE_CMD_WRMSR, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0x31, X86_DECODE_CMD_RDTSC, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0x32, X86_DECODE_CMD_RDMSR, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0x40, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x41, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x42, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x43, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x44, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x45, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x46, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x47, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x48, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x49, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x4a, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x4b, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x4c, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x4d, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x4e, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x4f, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0x77, X86_DECODE_CMD_EMMS, 0, false,
+ NULL, NULL, NULL, NULL, decode_x87_general},
+ {0x82, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x83, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x84, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x85, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x86, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x87, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x88, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x89, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x8a, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x8b, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x8c, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x8d, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x8e, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x8f, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx},
+ {0x90, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x91, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x92, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x93, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x94, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x95, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x96, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x97, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x98, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x99, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x9a, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x9b, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x9c, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x9d, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x9e, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+ {0x9f, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+
+ {0xb0, X86_DECODE_CMD_CMPXCHG, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0xb1, X86_DECODE_CMD_CMPXCHG, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+
+ {0xb6, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0xb7, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0xb8, X86_DECODE_CMD_POPCNT, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0xbe, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0xbf, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0xa0, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg},
+ {0xa1, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg},
+ {0xa2, X86_DECODE_CMD_CPUID, 0, false,
+ NULL, NULL, NULL, NULL, NULL},
+ {0xa3, X86_DECODE_CMD_BT, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0xa4, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ decode_imm8, NULL, NULL},
+ {0xa5, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ decode_rcx, NULL, NULL},
+ {0xa8, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg},
+ {0xa9, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg},
+ {0xab, X86_DECODE_CMD_BTS, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0xac, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ decode_imm8, NULL, NULL},
+ {0xad, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ decode_rcx, NULL, NULL},
+
+ {0xae, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_aegroup},
+
+ {0xaf, X86_DECODE_CMD_IMUL_2, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0xb2, X86_DECODE_CMD_LSS, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0xb3, X86_DECODE_CMD_BTR, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0xba, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_btgroup},
+ {0xbb, X86_DECODE_CMD_BTC, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+ {0xbc, X86_DECODE_CMD_BSF, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+ {0xbd, X86_DECODE_CMD_BSR, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL},
+
+ {0xc1, X86_DECODE_CMD_XADD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL},
+
+ {0xc7, X86_DECODE_CMD_CMPXCHG8B, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL},
+
+ {0xc8, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap},
+ {0xc9, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap},
+ {0xca, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap},
+ {0xcb, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap},
+ {0xcc, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap},
+ {0xcd, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap},
+ {0xce, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap},
+ {0xcf, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap},
+};
+
+struct decode_x87_tbl invl_inst_x87 = {0x0, 0, 0, 0, 0, false, false, NULL,
+ NULL, decode_invalid};
+
+struct decode_x87_tbl _x87_inst[] = {
+ {0xd8, 0, 3, X86_DECODE_CMD_FADD, 10, false, false,
+ decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL},
+ {0xd8, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL},
+ {0xd8, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false, decode_x87_modrm_st0,
+ decode_decode_x87_modrm_st0, NULL},
+ {0xd8, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL},
+ {0xd8, 4, 3, X86_DECODE_CMD_FSUB, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL},
+ {0xd8, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL},
+ {0xd8, 5, 3, X86_DECODE_CMD_FSUB, 10, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL},
+ {0xd8, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL},
+ {0xd8, 6, 3, X86_DECODE_CMD_FDIV, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL},
+ {0xd8, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL},
+ {0xd8, 7, 3, X86_DECODE_CMD_FDIV, 10, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL},
+ {0xd8, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL},
+
+ {0xd9, 0, 3, X86_DECODE_CMD_FLD, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL},
+ {0xd9, 0, 0, X86_DECODE_CMD_FLD, 4, false, false,
+ decode_x87_modrm_floatp, NULL, NULL},
+ {0xd9, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL},
+ {0xd9, 1, 0, X86_DECODE_CMD_INVL, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL},
+ {0xd9, 2, 3, X86_DECODE_CMD_INVL, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL},
+ {0xd9, 2, 0, X86_DECODE_CMD_FST, 4, false, false,
+ decode_x87_modrm_floatp, NULL, NULL},
+ {0xd9, 3, 3, X86_DECODE_CMD_INVL, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL},
+ {0xd9, 3, 0, X86_DECODE_CMD_FST, 4, false, true,
+ decode_x87_modrm_floatp, NULL, NULL},
+ {0xd9, 4, 3, X86_DECODE_CMD_INVL, 10, false, false,
+ decode_x87_modrm_st0, NULL, decode_d9_4},
+ {0xd9, 4, 0, X86_DECODE_CMD_INVL, 4, false, false,
+ decode_x87_modrm_bytep, NULL, NULL},
+ {0xd9, 5, 3, X86_DECODE_CMD_FLDxx, 10, false, false, NULL, NULL, NULL},
+ {0xd9, 5, 0, X86_DECODE_CMD_FLDCW, 2, false, false,
+ decode_x87_modrm_bytep, NULL, NULL},
+
+ {0xd9, 7, 3, X86_DECODE_CMD_FNSTCW, 2, false, false,
+ decode_x87_modrm_bytep, NULL, NULL},
+ {0xd9, 7, 0, X86_DECODE_CMD_FNSTCW, 2, false, false,
+ decode_x87_modrm_bytep, NULL, NULL},
+
+ {0xda, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xda, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL},
+ {0xda, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
+ decode_decode_x87_modrm_st0, NULL},
+ {0xda, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL},
+ {0xda, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL},
+ {0xda, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL},
+ {0xda, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL},
+ {0xda, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL},
+ {0xda, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true, decode_x87_modrm_st0,
+ decode_decode_x87_modrm_st0, NULL},
+ {0xda, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL},
+ {0xda, 6, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL},
+ {0xda, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL},
+ {0xda, 7, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL},
+ {0xda, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL},
+
+ {0xdb, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL},
+ {0xdb, 0, 0, X86_DECODE_CMD_FLD, 4, false, false,
+ decode_x87_modrm_intp, NULL, NULL},
+ {0xdb, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdb, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdb, 2, 0, X86_DECODE_CMD_FST, 4, false, false,
+ decode_x87_modrm_intp, NULL, NULL},
+ {0xdb, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdb, 3, 0, X86_DECODE_CMD_FST, 4, false, true,
+ decode_x87_modrm_intp, NULL, NULL},
+ {0xdb, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL,
+ decode_db_4},
+ {0xdb, 4, 0, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL},
+ {0xdb, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdb, 5, 0, X86_DECODE_CMD_FLD, 10, false, false,
+ decode_x87_modrm_floatp, NULL, NULL},
+ {0xdb, 7, 0, X86_DECODE_CMD_FST, 10, false, true,
+ decode_x87_modrm_floatp, NULL, NULL},
+
+ {0xdc, 0, 3, X86_DECODE_CMD_FADD, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdc, 0, 0, X86_DECODE_CMD_FADD, 8, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL},
+ {0xdc, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdc, 1, 0, X86_DECODE_CMD_FMUL, 8, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL},
+ {0xdc, 4, 3, X86_DECODE_CMD_FSUB, 10, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdc, 4, 0, X86_DECODE_CMD_FSUB, 8, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL},
+ {0xdc, 5, 3, X86_DECODE_CMD_FSUB, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdc, 5, 0, X86_DECODE_CMD_FSUB, 8, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL},
+ {0xdc, 6, 3, X86_DECODE_CMD_FDIV, 10, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdc, 6, 0, X86_DECODE_CMD_FDIV, 8, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL},
+ {0xdc, 7, 3, X86_DECODE_CMD_FDIV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdc, 7, 0, X86_DECODE_CMD_FDIV, 8, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL},
+
+ {0xdd, 0, 0, X86_DECODE_CMD_FLD, 8, false, false,
+ decode_x87_modrm_floatp, NULL, NULL},
+ {0xdd, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdd, 2, 3, X86_DECODE_CMD_FST, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL},
+ {0xdd, 2, 0, X86_DECODE_CMD_FST, 8, false, false,
+ decode_x87_modrm_floatp, NULL, NULL},
+ {0xdd, 3, 3, X86_DECODE_CMD_FST, 10, false, true,
+ decode_x87_modrm_st0, NULL, NULL},
+ {0xdd, 3, 0, X86_DECODE_CMD_FST, 8, false, true,
+ decode_x87_modrm_floatp, NULL, NULL},
+ {0xdd, 4, 3, X86_DECODE_CMD_FUCOM, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdd, 4, 0, X86_DECODE_CMD_FRSTOR, 8, false, false,
+ decode_x87_modrm_bytep, NULL, NULL},
+ {0xdd, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdd, 7, 0, X86_DECODE_CMD_FNSTSW, 0, false, false,
+ decode_x87_modrm_bytep, NULL, NULL},
+ {0xdd, 7, 3, X86_DECODE_CMD_FNSTSW, 0, false, false,
+ decode_x87_modrm_bytep, NULL, NULL},
+
+ {0xde, 0, 3, X86_DECODE_CMD_FADD, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xde, 0, 0, X86_DECODE_CMD_FADD, 2, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL},
+ {0xde, 1, 3, X86_DECODE_CMD_FMUL, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xde, 1, 0, X86_DECODE_CMD_FMUL, 2, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL},
+ {0xde, 4, 3, X86_DECODE_CMD_FSUB, 10, true, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xde, 4, 0, X86_DECODE_CMD_FSUB, 2, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL},
+ {0xde, 5, 3, X86_DECODE_CMD_FSUB, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xde, 5, 0, X86_DECODE_CMD_FSUB, 2, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL},
+ {0xde, 6, 3, X86_DECODE_CMD_FDIV, 10, true, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xde, 6, 0, X86_DECODE_CMD_FDIV, 2, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL},
+ {0xde, 7, 3, X86_DECODE_CMD_FDIV, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xde, 7, 0, X86_DECODE_CMD_FDIV, 2, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL},
+
+ {0xdf, 0, 0, X86_DECODE_CMD_FLD, 2, false, false,
+ decode_x87_modrm_intp, NULL, NULL},
+ {0xdf, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdf, 2, 3, X86_DECODE_CMD_FST, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdf, 2, 0, X86_DECODE_CMD_FST, 2, false, false,
+ decode_x87_modrm_intp, NULL, NULL},
+ {0xdf, 3, 3, X86_DECODE_CMD_FST, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdf, 3, 0, X86_DECODE_CMD_FST, 2, false, true,
+ decode_x87_modrm_intp, NULL, NULL},
+ {0xdf, 4, 3, X86_DECODE_CMD_FNSTSW, 2, false, true,
+ decode_x87_modrm_bytep, NULL, NULL},
+ {0xdf, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
+ {0xdf, 5, 0, X86_DECODE_CMD_FLD, 8, false, false,
+ decode_x87_modrm_intp, NULL, NULL},
+ {0xdf, 7, 0, X86_DECODE_CMD_FST, 8, false, true,
+ decode_x87_modrm_intp, NULL, NULL},
+};
+
+void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ target_ulong ptr = 0;
+ X86Seg seg = R_DS;
+
+ if (!decode->modrm.mod && 6 == decode->modrm.rm) {
+ ptr = decode->displacement;
+ goto calc_addr;
+ }
+
+ if (decode->displacement_size) {
+ ptr = sign(decode->displacement, decode->displacement_size);
+ }
+
+ switch (decode->modrm.rm) {
+ case 0:
+ ptr += BX(env) + SI(env);
+ break;
+ case 1:
+ ptr += BX(env) + DI(env);
+ break;
+ case 2:
+ ptr += BP(env) + SI(env);
+ seg = R_SS;
+ break;
+ case 3:
+ ptr += BP(env) + DI(env);
+ seg = R_SS;
+ break;
+ case 4:
+ ptr += SI(env);
+ break;
+ case 5:
+ ptr += DI(env);
+ break;
+ case 6:
+ ptr += BP(env);
+ seg = R_SS;
+ break;
+ case 7:
+ ptr += BX(env);
+ break;
+ }
+calc_addr:
+ if (X86_DECODE_CMD_LEA == decode->cmd) {
+ op->addr = (uint16_t)ptr;
+ } else {
+ op->addr = decode_linear_addr(env, decode, (uint16_t)ptr, seg);
+ }
+}
+
+void *get_reg_ref(CPUX86State *env, int reg, int rex_present,
+ int is_extended, int size)
+{
+ void *ptr = NULL;
+
+ if (is_extended) {
+ reg |= R_R8;
+ }
+
+ switch (size) {
+ case 1:
+ if (is_extended || reg < 4 || rex_present) {
+ ptr = &RL(env, reg);
+ } else {
+ ptr = &RH(env, reg - 4);
+ }
+ break;
+ default:
+ ptr = &RRX(env, reg);
+ break;
+ }
+ return ptr;
+}
+
+target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present,
+ int is_extended, int size)
+{
+ target_ulong val = 0;
+ memcpy(&val,
+ get_reg_ref(env, reg, rex_present, is_extended, size),
+ size);
+ return val;
+}
+
+static target_ulong get_sib_val(CPUX86State *env, struct x86_decode *decode,
+ X86Seg *sel)
+{
+ target_ulong base = 0;
+ target_ulong scaled_index = 0;
+ int addr_size = decode->addressing_size;
+ int base_reg = decode->sib.base;
+ int index_reg = decode->sib.index;
+
+ *sel = R_DS;
+
+ if (decode->modrm.mod || base_reg != R_EBP) {
+ if (decode->rex.b) {
+ base_reg |= R_R8;
+ }
+ if (base_reg == R_ESP || base_reg == R_EBP) {
+ *sel = R_SS;
+ }
+ base = get_reg_val(env, decode->sib.base, decode->rex.rex,
+ decode->rex.b, addr_size);
+ }
+
+ if (decode->rex.x) {
+ index_reg |= R_R8;
+ }
+
+ if (index_reg != R_ESP) {
+ scaled_index = get_reg_val(env, index_reg, decode->rex.rex,
+ decode->rex.x, addr_size) <<
+ decode->sib.scale;
+ }
+ return base + scaled_index;
+}
+
+void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ X86Seg seg = R_DS;
+ target_ulong ptr = 0;
+ int addr_size = decode->addressing_size;
+
+ if (decode->displacement_size) {
+ ptr = sign(decode->displacement, decode->displacement_size);
+ }
+
+ if (4 == decode->modrm.rm) {
+ ptr += get_sib_val(env, decode, &seg);
+ } else if (!decode->modrm.mod && 5 == decode->modrm.rm) {
+ if (x86_is_long_mode(env_cpu(env))) {
+ ptr += env->eip + decode->len;
+ } else {
+ ptr = decode->displacement;
+ }
+ } else {
+ if (decode->modrm.rm == R_EBP || decode->modrm.rm == R_ESP) {
+ seg = R_SS;
+ }
+ ptr += get_reg_val(env, decode->modrm.rm, decode->rex.rex,
+ decode->rex.b, addr_size);
+ }
+
+ if (X86_DECODE_CMD_LEA == decode->cmd) {
+ op->addr = (uint32_t)ptr;
+ } else {
+ op->addr = decode_linear_addr(env, decode, (uint32_t)ptr, seg);
+ }
+}
+
+void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ X86Seg seg = R_DS;
+ int32_t offset = 0;
+ int mod = decode->modrm.mod;
+ int rm = decode->modrm.rm;
+ target_ulong ptr;
+ int src = decode->modrm.rm;
+
+ if (decode->displacement_size) {
+ offset = sign(decode->displacement, decode->displacement_size);
+ }
+
+ if (4 == rm) {
+ ptr = get_sib_val(env, decode, &seg) + offset;
+ } else if (0 == mod && 5 == rm) {
+ ptr = env->eip + decode->len + (int32_t) offset;
+ } else {
+ ptr = get_reg_val(env, src, decode->rex.rex, decode->rex.b, 8) +
+ (int64_t) offset;
+ }
+
+ if (X86_DECODE_CMD_LEA == decode->cmd) {
+ op->addr = ptr;
+ } else {
+ op->addr = decode_linear_addr(env, decode, ptr, seg);
+ }
+}
+
+
+void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ if (3 == decode->modrm.mod) {
+ op->reg = decode->modrm.reg;
+ op->type = X86_VAR_REG;
+ op->regptr = get_reg_ref(env, decode->modrm.rm, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+ return;
+ }
+
+ switch (decode->addressing_size) {
+ case 2:
+ calc_modrm_operand16(env, decode, op);
+ break;
+ case 4:
+ calc_modrm_operand32(env, decode, op);
+ break;
+ case 8:
+ calc_modrm_operand64(env, decode, op);
+ break;
+ default:
+ VM_PANIC_EX("unsupported address size %d\n", decode->addressing_size);
+ break;
+ }
+}
+
+static void decode_prefix(CPUX86State *env, struct x86_decode *decode)
+{
+ while (1) {
+ /*
+ * REX prefix must come after legacy prefixes.
+ * REX before legacy is ignored.
+ * Clear rex to simulate this.
+ */
+ uint8_t byte = decode_byte(env, decode);
+ switch (byte) {
+ case PREFIX_LOCK:
+ decode->lock = byte;
+ decode->rex.rex = 0;
+ break;
+ case PREFIX_REPN:
+ case PREFIX_REP:
+ decode->rep = byte;
+ decode->rex.rex = 0;
+ break;
+ case PREFIX_CS_SEG_OVERRIDE:
+ case PREFIX_SS_SEG_OVERRIDE:
+ case PREFIX_DS_SEG_OVERRIDE:
+ case PREFIX_ES_SEG_OVERRIDE:
+ case PREFIX_FS_SEG_OVERRIDE:
+ case PREFIX_GS_SEG_OVERRIDE:
+ decode->segment_override = byte;
+ decode->rex.rex = 0;
+ break;
+ case PREFIX_OP_SIZE_OVERRIDE:
+ decode->op_size_override = byte;
+ decode->rex.rex = 0;
+ break;
+ case PREFIX_ADDR_SIZE_OVERRIDE:
+ decode->addr_size_override = byte;
+ decode->rex.rex = 0;
+ break;
+ case PREFIX_REX ... (PREFIX_REX + 0xf):
+ if (x86_is_long_mode(env_cpu(env))) {
+ decode->rex.rex = byte;
+ break;
+ }
+ /* fall through when not in long mode */
+ default:
+ decode->len--;
+ return;
+ }
+ }
+}
+
+void set_addressing_size(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->addressing_size = -1;
+ if (x86_is_real(env_cpu(env)) || x86_is_v8086(env_cpu(env))) {
+ if (decode->addr_size_override) {
+ decode->addressing_size = 4;
+ } else {
+ decode->addressing_size = 2;
+ }
+ } else if (!x86_is_long_mode(env_cpu(env))) {
+ /* protected */
+ x86_segment_descriptor cs;
+ emul_ops->read_segment_descriptor(env_cpu(env), &cs, R_CS);
+ /* check db */
+ if (cs.db) {
+ if (decode->addr_size_override) {
+ decode->addressing_size = 2;
+ } else {
+ decode->addressing_size = 4;
+ }
+ } else {
+ if (decode->addr_size_override) {
+ decode->addressing_size = 4;
+ } else {
+ decode->addressing_size = 2;
+ }
+ }
+ } else {
+ /* long */
+ if (decode->addr_size_override) {
+ decode->addressing_size = 4;
+ } else {
+ decode->addressing_size = 8;
+ }
+ }
+}
+
+void set_operand_size(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->operand_size = -1;
+ if (x86_is_real(env_cpu(env)) || x86_is_v8086(env_cpu(env))) {
+ if (decode->op_size_override) {
+ decode->operand_size = 4;
+ } else {
+ decode->operand_size = 2;
+ }
+ } else if (!x86_is_long_mode(env_cpu(env))) {
+ /* protected */
+ x86_segment_descriptor cs;
+ emul_ops->read_segment_descriptor(env_cpu(env), &cs, R_CS);
+ /* check db */
+ if (cs.db) {
+ if (decode->op_size_override) {
+ decode->operand_size = 2;
+ } else{
+ decode->operand_size = 4;
+ }
+ } else {
+ if (decode->op_size_override) {
+ decode->operand_size = 4;
+ } else {
+ decode->operand_size = 2;
+ }
+ }
+ } else {
+ /* long */
+ if (decode->op_size_override) {
+ decode->operand_size = 2;
+ } else {
+ decode->operand_size = 4;
+ }
+
+ if (decode->rex.w) {
+ decode->operand_size = 8;
+ }
+ }
+}
+
+static void decode_sib(CPUX86State *env, struct x86_decode *decode)
+{
+ if ((decode->modrm.mod != 3) && (4 == decode->modrm.rm) &&
+ (decode->addressing_size != 2)) {
+ decode->sib.sib = decode_byte(env, decode);
+ decode->sib_present = true;
+ }
+}
+
+/* 16 bit modrm */
+int disp16_tbl[4][8] = {
+ {0, 0, 0, 0, 0, 0, 2, 0},
+ {1, 1, 1, 1, 1, 1, 1, 1},
+ {2, 2, 2, 2, 2, 2, 2, 2},
+ {0, 0, 0, 0, 0, 0, 0, 0}
+};
+
+/* 32/64-bit modrm */
+int disp32_tbl[4][8] = {
+ {0, 0, 0, 0, -1, 4, 0, 0},
+ {1, 1, 1, 1, 1, 1, 1, 1},
+ {4, 4, 4, 4, 4, 4, 4, 4},
+ {0, 0, 0, 0, 0, 0, 0, 0}
+};
+
+static inline void decode_displacement(CPUX86State *env, struct x86_decode *decode)
+{
+ int addressing_size = decode->addressing_size;
+ int mod = decode->modrm.mod;
+ int rm = decode->modrm.rm;
+
+ decode->displacement_size = 0;
+ switch (addressing_size) {
+ case 2:
+ decode->displacement_size = disp16_tbl[mod][rm];
+ if (decode->displacement_size) {
+ decode->displacement = (uint16_t)decode_bytes(env, decode,
+ decode->displacement_size);
+ }
+ break;
+ case 4:
+ case 8:
+ if (-1 == disp32_tbl[mod][rm]) {
+ if (5 == decode->sib.base) {
+ decode->displacement_size = 4;
+ }
+ } else {
+ decode->displacement_size = disp32_tbl[mod][rm];
+ }
+
+ if (decode->displacement_size) {
+ decode->displacement = (uint32_t)decode_bytes(env, decode,
+ decode->displacement_size);
+ }
+ break;
+ }
+}
+
+static inline void decode_modrm(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->modrm.modrm = decode_byte(env, decode);
+ decode->is_modrm = true;
+
+ decode_sib(env, decode);
+ decode_displacement(env, decode);
+}
+
+static inline void decode_opcode_general(CPUX86State *env,
+ struct x86_decode *decode,
+ uint8_t opcode,
+ struct decode_tbl *inst_decoder)
+{
+ decode->cmd = inst_decoder->cmd;
+ if (inst_decoder->operand_size) {
+ decode->operand_size = inst_decoder->operand_size;
+ }
+
+ if (inst_decoder->is_modrm) {
+ decode_modrm(env, decode);
+ }
+ if (inst_decoder->decode_op1) {
+ inst_decoder->decode_op1(env, decode, &decode->op[0]);
+ }
+ if (inst_decoder->decode_op2) {
+ inst_decoder->decode_op2(env, decode, &decode->op[1]);
+ }
+ if (inst_decoder->decode_op3) {
+ inst_decoder->decode_op3(env, decode, &decode->op[2]);
+ }
+ if (inst_decoder->decode_op4) {
+ inst_decoder->decode_op4(env, decode, &decode->op[3]);
+ }
+ if (inst_decoder->decode_postfix) {
+ inst_decoder->decode_postfix(env, decode);
+ }
+}
+
+static inline void decode_opcode_1(CPUX86State *env, struct x86_decode *decode,
+ uint8_t opcode)
+{
+ struct decode_tbl *inst_decoder = &_decode_tbl1[opcode];
+ decode_opcode_general(env, decode, opcode, inst_decoder);
+}
+
+
+static inline void decode_opcode_2(CPUX86State *env, struct x86_decode *decode,
+ uint8_t opcode)
+{
+ struct decode_tbl *inst_decoder = &_decode_tbl2[opcode];
+ decode_opcode_general(env, decode, opcode, inst_decoder);
+}
+
+static void decode_opcodes(CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t opcode;
+
+ opcode = decode_byte(env, decode);
+ decode->opcode[decode->opcode_len++] = opcode;
+ if (opcode != OPCODE_ESCAPE) {
+ decode_opcode_1(env, decode, opcode);
+ } else {
+ opcode = decode_byte(env, decode);
+ decode->opcode[decode->opcode_len++] = opcode;
+ decode_opcode_2(env, decode, opcode);
+ }
+}
+
+uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode)
+{
+ memset(decode, 0, sizeof(*decode));
+ decode_prefix(env, decode);
+ set_addressing_size(env, decode);
+ set_operand_size(env, decode);
+
+ decode_opcodes(env, decode);
+
+ return decode->len;
+}
+
+void init_decoder(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(_decode_tbl1); i++) {
+ memcpy(&_decode_tbl1[i], &invl_inst, sizeof(invl_inst));
+ }
+ for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++) {
+ memcpy(&_decode_tbl2[i], &invl_inst, sizeof(invl_inst));
+ }
+ for (i = 0; i < ARRAY_SIZE(_decode_tbl3); i++) {
+ memcpy(&_decode_tbl3[i], &invl_inst_x87, sizeof(invl_inst_x87));
+
+ }
+ for (i = 0; i < ARRAY_SIZE(_1op_inst); i++) {
+ _decode_tbl1[_1op_inst[i].opcode] = _1op_inst[i];
+ }
+ for (i = 0; i < ARRAY_SIZE(_2op_inst); i++) {
+ _decode_tbl2[_2op_inst[i].opcode] = _2op_inst[i];
+ }
+ for (i = 0; i < ARRAY_SIZE(_x87_inst); i++) {
+ int index = ((_x87_inst[i].opcode & 0xf) << 4) |
+ ((_x87_inst[i].modrm_mod & 1) << 3) |
+ _x87_inst[i].modrm_reg;
+ _decode_tbl3[index] = _x87_inst[i];
+ }
+}
+
+
+const char *decode_cmd_to_string(enum x86_decode_cmd cmd)
+{
+ static const char *cmds[] = {"INVL", "PUSH", "PUSH_SEG", "POP", "POP_SEG",
+ "MOV", "MOVSX", "MOVZX", "CALL_NEAR", "CALL_NEAR_ABS_INDIRECT",
+ "CALL_FAR_ABS_INDIRECT", "CMD_CALL_FAR", "RET_NEAR", "RET_FAR", "ADD",
+ "OR", "ADC", "SBB", "AND", "SUB", "XOR", "CMP", "INC", "DEC", "TST",
+ "NOT", "NEG", "JMP_NEAR", "JMP_NEAR_ABS_INDIRECT", "JMP_FAR",
+ "JMP_FAR_ABS_INDIRECT", "LEA", "JXX", "JCXZ", "SETXX", "MOV_TO_SEG",
+ "MOV_FROM_SEG", "CLI", "STI", "CLD", "STD", "STC", "CLC", "OUT", "IN",
+ "INS", "OUTS", "LIDT", "SIDT", "LGDT", "SGDT", "SMSW", "LMSW",
+ "RDTSCP", "INVLPG", "MOV_TO_CR", "MOV_FROM_CR", "MOV_TO_DR",
+ "MOV_FROM_DR", "PUSHF", "POPF", "CPUID", "ROL", "ROR", "RCL", "RCR",
+ "SHL", "SAL", "SHR", "SHRD", "SHLD", "SAR", "DIV", "IDIV", "MUL",
+ "IMUL_3", "IMUL_2", "IMUL_1", "MOVS", "CMPS", "SCAS", "LODS", "STOS",
+ "BSWAP", "XCHG", "RDTSC", "RDMSR", "WRMSR", "ENTER", "LEAVE", "BT",
+ "BTS", "BTC", "BTR", "BSF", "BSR", "IRET", "INT", "POPA", "PUSHA",
+ "CWD", "CBW", "DAS", "AAD", "AAM", "AAS", "LOOP", "SLDT", "STR", "LLDT",
+ "LTR", "VERR", "VERW", "SAHF", "LAHF", "WBINVD", "LDS", "LSS", "LES",
+ "LGS", "LFS", "CMC", "XLAT", "NOP", "CMOV", "CLTS", "XADD", "HLT",
+ "CMPXCHG8B", "CMPXCHG", "POPCNT", "FNINIT", "FLD", "FLDxx", "FNSTCW",
+ "FNSTSW", "FNSETPM", "FSAVE", "FRSTOR", "FXSAVE", "FXRSTOR", "FDIV",
+ "FMUL", "FSUB", "FADD", "EMMS", "MFENCE", "SFENCE", "LFENCE",
+ "PREFETCH", "FST", "FABS", "FUCOM", "FUCOMI", "FLDCW",
+ "FXCH", "FCHS", "FCMOV", "FRNDINT", "FXAM", "LAST"};
+ return cmds[cmd];
+}
+
+target_ulong decode_linear_addr(CPUX86State *env, struct x86_decode *decode,
+ target_ulong addr, X86Seg seg)
+{
+ switch (decode->segment_override) {
+ case PREFIX_CS_SEG_OVERRIDE:
+ seg = R_CS;
+ break;
+ case PREFIX_SS_SEG_OVERRIDE:
+ seg = R_SS;
+ break;
+ case PREFIX_DS_SEG_OVERRIDE:
+ seg = R_DS;
+ break;
+ case PREFIX_ES_SEG_OVERRIDE:
+ seg = R_ES;
+ break;
+ case PREFIX_FS_SEG_OVERRIDE:
+ seg = R_FS;
+ break;
+ case PREFIX_GS_SEG_OVERRIDE:
+ seg = R_GS;
+ break;
+ default:
+ break;
+ }
+ return linear_addr_size(env_cpu(env), addr, decode->addressing_size, seg);
+}
diff --git a/target/i386/emulate/x86_decode.h b/target/i386/emulate/x86_decode.h
new file mode 100644
index 0000000..927645a
--- /dev/null
+++ b/target/i386/emulate/x86_decode.h
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef X86_EMU_DECODE_H
+#define X86_EMU_DECODE_H
+
+#include "cpu.h"
+#include "x86.h"
+
+typedef enum x86_prefix {
+ /* group 1 */
+ PREFIX_LOCK = 0xf0,
+ PREFIX_REPN = 0xf2,
+ PREFIX_REP = 0xf3,
+ /* group 2 */
+ PREFIX_CS_SEG_OVERRIDE = 0x2e,
+ PREFIX_SS_SEG_OVERRIDE = 0x36,
+ PREFIX_DS_SEG_OVERRIDE = 0x3e,
+ PREFIX_ES_SEG_OVERRIDE = 0x26,
+ PREFIX_FS_SEG_OVERRIDE = 0x64,
+ PREFIX_GS_SEG_OVERRIDE = 0x65,
+ /* group 3 */
+ PREFIX_OP_SIZE_OVERRIDE = 0x66,
+ /* group 4 */
+ PREFIX_ADDR_SIZE_OVERRIDE = 0x67,
+
+ PREFIX_REX = 0x40,
+} x86_prefix;
+
+enum x86_decode_cmd {
+ X86_DECODE_CMD_INVL = 0,
+
+ X86_DECODE_CMD_PUSH,
+ X86_DECODE_CMD_PUSH_SEG,
+ X86_DECODE_CMD_POP,
+ X86_DECODE_CMD_POP_SEG,
+ X86_DECODE_CMD_MOV,
+ X86_DECODE_CMD_MOVSX,
+ X86_DECODE_CMD_MOVZX,
+ X86_DECODE_CMD_CALL_NEAR,
+ X86_DECODE_CMD_CALL_NEAR_ABS_INDIRECT,
+ X86_DECODE_CMD_CALL_FAR_ABS_INDIRECT,
+ X86_DECODE_CMD_CALL_FAR,
+ X86_DECODE_RET_NEAR,
+ X86_DECODE_RET_FAR,
+ X86_DECODE_CMD_ADD,
+ X86_DECODE_CMD_OR,
+ X86_DECODE_CMD_ADC,
+ X86_DECODE_CMD_SBB,
+ X86_DECODE_CMD_AND,
+ X86_DECODE_CMD_SUB,
+ X86_DECODE_CMD_XOR,
+ X86_DECODE_CMD_CMP,
+ X86_DECODE_CMD_INC,
+ X86_DECODE_CMD_DEC,
+ X86_DECODE_CMD_TST,
+ X86_DECODE_CMD_NOT,
+ X86_DECODE_CMD_NEG,
+ X86_DECODE_CMD_JMP_NEAR,
+ X86_DECODE_CMD_JMP_NEAR_ABS_INDIRECT,
+ X86_DECODE_CMD_JMP_FAR,
+ X86_DECODE_CMD_JMP_FAR_ABS_INDIRECT,
+ X86_DECODE_CMD_LEA,
+ X86_DECODE_CMD_JXX,
+ X86_DECODE_CMD_JCXZ,
+ X86_DECODE_CMD_SETXX,
+ X86_DECODE_CMD_MOV_TO_SEG,
+ X86_DECODE_CMD_MOV_FROM_SEG,
+ X86_DECODE_CMD_CLI,
+ X86_DECODE_CMD_STI,
+ X86_DECODE_CMD_CLD,
+ X86_DECODE_CMD_STD,
+ X86_DECODE_CMD_STC,
+ X86_DECODE_CMD_CLC,
+ X86_DECODE_CMD_OUT,
+ X86_DECODE_CMD_IN,
+ X86_DECODE_CMD_INS,
+ X86_DECODE_CMD_OUTS,
+ X86_DECODE_CMD_LIDT,
+ X86_DECODE_CMD_SIDT,
+ X86_DECODE_CMD_LGDT,
+ X86_DECODE_CMD_SGDT,
+ X86_DECODE_CMD_SMSW,
+ X86_DECODE_CMD_LMSW,
+ X86_DECODE_CMD_RDTSCP,
+ X86_DECODE_CMD_INVLPG,
+ X86_DECODE_CMD_MOV_TO_CR,
+ X86_DECODE_CMD_MOV_FROM_CR,
+ X86_DECODE_CMD_MOV_TO_DR,
+ X86_DECODE_CMD_MOV_FROM_DR,
+ X86_DECODE_CMD_PUSHF,
+ X86_DECODE_CMD_POPF,
+ X86_DECODE_CMD_CPUID,
+ X86_DECODE_CMD_ROL,
+ X86_DECODE_CMD_ROR,
+ X86_DECODE_CMD_RCL,
+ X86_DECODE_CMD_RCR,
+ X86_DECODE_CMD_SHL,
+ X86_DECODE_CMD_SAL,
+ X86_DECODE_CMD_SHR,
+ X86_DECODE_CMD_SHRD,
+ X86_DECODE_CMD_SHLD,
+ X86_DECODE_CMD_SAR,
+ X86_DECODE_CMD_DIV,
+ X86_DECODE_CMD_IDIV,
+ X86_DECODE_CMD_MUL,
+ X86_DECODE_CMD_IMUL_3,
+ X86_DECODE_CMD_IMUL_2,
+ X86_DECODE_CMD_IMUL_1,
+ X86_DECODE_CMD_MOVS,
+ X86_DECODE_CMD_CMPS,
+ X86_DECODE_CMD_SCAS,
+ X86_DECODE_CMD_LODS,
+ X86_DECODE_CMD_STOS,
+ X86_DECODE_CMD_BSWAP,
+ X86_DECODE_CMD_XCHG,
+ X86_DECODE_CMD_RDTSC,
+ X86_DECODE_CMD_RDMSR,
+ X86_DECODE_CMD_WRMSR,
+ X86_DECODE_CMD_ENTER,
+ X86_DECODE_CMD_LEAVE,
+ X86_DECODE_CMD_BT,
+ X86_DECODE_CMD_BTS,
+ X86_DECODE_CMD_BTC,
+ X86_DECODE_CMD_BTR,
+ X86_DECODE_CMD_BSF,
+ X86_DECODE_CMD_BSR,
+ X86_DECODE_CMD_IRET,
+ X86_DECODE_CMD_INT,
+ X86_DECODE_CMD_POPA,
+ X86_DECODE_CMD_PUSHA,
+ X86_DECODE_CMD_CWD,
+ X86_DECODE_CMD_CBW,
+ X86_DECODE_CMD_DAS,
+ X86_DECODE_CMD_AAD,
+ X86_DECODE_CMD_AAM,
+ X86_DECODE_CMD_AAS,
+ X86_DECODE_CMD_LOOP,
+ X86_DECODE_CMD_SLDT,
+ X86_DECODE_CMD_STR,
+ X86_DECODE_CMD_LLDT,
+ X86_DECODE_CMD_LTR,
+ X86_DECODE_CMD_VERR,
+ X86_DECODE_CMD_VERW,
+ X86_DECODE_CMD_SAHF,
+ X86_DECODE_CMD_LAHF,
+ X86_DECODE_CMD_WBINVD,
+ X86_DECODE_CMD_LDS,
+ X86_DECODE_CMD_LSS,
+ X86_DECODE_CMD_LES,
+ X86_DECODE_XMD_LGS,
+ X86_DECODE_CMD_LFS,
+ X86_DECODE_CMD_CMC,
+ X86_DECODE_CMD_XLAT,
+ X86_DECODE_CMD_NOP,
+ X86_DECODE_CMD_CMOV,
+ X86_DECODE_CMD_CLTS,
+ X86_DECODE_CMD_XADD,
+ X86_DECODE_CMD_HLT,
+ X86_DECODE_CMD_CMPXCHG8B,
+ X86_DECODE_CMD_CMPXCHG,
+ X86_DECODE_CMD_POPCNT,
+
+ X86_DECODE_CMD_FNINIT,
+ X86_DECODE_CMD_FLD,
+ X86_DECODE_CMD_FLDxx,
+ X86_DECODE_CMD_FNSTCW,
+ X86_DECODE_CMD_FNSTSW,
+ X86_DECODE_CMD_FNSETPM,
+ X86_DECODE_CMD_FSAVE,
+ X86_DECODE_CMD_FRSTOR,
+ X86_DECODE_CMD_FXSAVE,
+ X86_DECODE_CMD_FXRSTOR,
+ X86_DECODE_CMD_FDIV,
+ X86_DECODE_CMD_FMUL,
+ X86_DECODE_CMD_FSUB,
+ X86_DECODE_CMD_FADD,
+ X86_DECODE_CMD_EMMS,
+ X86_DECODE_CMD_MFENCE,
+ X86_DECODE_CMD_SFENCE,
+ X86_DECODE_CMD_LFENCE,
+ X86_DECODE_CMD_PREFETCH,
+ X86_DECODE_CMD_CLFLUSH,
+ X86_DECODE_CMD_FST,
+ X86_DECODE_CMD_FABS,
+ X86_DECODE_CMD_FUCOM,
+ X86_DECODE_CMD_FUCOMI,
+ X86_DECODE_CMD_FLDCW,
+ X86_DECODE_CMD_FXCH,
+ X86_DECODE_CMD_FCHS,
+ X86_DECODE_CMD_FCMOV,
+ X86_DECODE_CMD_FRNDINT,
+ X86_DECODE_CMD_FXAM,
+
+ X86_DECODE_CMD_LAST,
+};
+
+const char *decode_cmd_to_string(enum x86_decode_cmd cmd);
+
+typedef struct x86_modrm {
+ union {
+ uint8_t modrm;
+ struct {
+ uint8_t rm:3;
+ uint8_t reg:3;
+ uint8_t mod:2;
+ };
+ };
+} __attribute__ ((__packed__)) x86_modrm;
+
+typedef struct x86_sib {
+ union {
+ uint8_t sib;
+ struct {
+ uint8_t base:3;
+ uint8_t index:3;
+ uint8_t scale:2;
+ };
+ };
+} __attribute__ ((__packed__)) x86_sib;
+
+typedef struct x86_rex {
+ union {
+ uint8_t rex;
+ struct {
+ uint8_t b:1;
+ uint8_t x:1;
+ uint8_t r:1;
+ uint8_t w:1;
+ uint8_t unused:4;
+ };
+ };
+} __attribute__ ((__packed__)) x86_rex;
+
+typedef enum x86_var_type {
+ X86_VAR_IMMEDIATE,
+ X86_VAR_OFFSET,
+ X86_VAR_REG,
+ X86_VAR_RM,
+
+ /* for floating point computations */
+ X87_VAR_REG,
+ X87_VAR_FLOATP,
+ X87_VAR_INTP,
+ X87_VAR_BYTEP,
+} x86_var_type;
+
+typedef struct x86_decode_op {
+ enum x86_var_type type;
+ int size;
+
+ int reg;
+ target_ulong val;
+
+ union {
+ target_ulong addr;
+ void *regptr;
+ };
+} x86_decode_op;
+
+typedef struct x86_decode {
+ int len;
+ uint8_t opcode[4];
+ uint8_t opcode_len;
+ enum x86_decode_cmd cmd;
+ int addressing_size;
+ int operand_size;
+ int lock;
+ int rep;
+ int op_size_override;
+ int addr_size_override;
+ int segment_override;
+ int control_change_inst;
+ bool fwait;
+ bool fpop_stack;
+ bool frev;
+
+ uint32_t displacement;
+ uint8_t displacement_size;
+ struct x86_rex rex;
+ bool is_modrm;
+ bool sib_present;
+ struct x86_sib sib;
+ struct x86_modrm modrm;
+ struct x86_decode_op op[4];
+ bool is_fpu;
+} x86_decode;
+
+uint64_t sign(uint64_t val, int size);
+
+uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode);
+
+void *get_reg_ref(CPUX86State *env, int reg, int rex_present,
+ int is_extended, int size);
+target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present,
+ int is_extended, int size);
+void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op);
+target_ulong decode_linear_addr(CPUX86State *env, struct x86_decode *decode,
+ target_ulong addr, enum X86Seg seg);
+
+void init_decoder(void);
+void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op);
+void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op);
+void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op);
+void set_addressing_size(CPUX86State *env, struct x86_decode *decode);
+void set_operand_size(CPUX86State *env, struct x86_decode *decode);
+
+#endif
diff --git a/target/i386/emulate/x86_emu.c b/target/i386/emulate/x86_emu.c
new file mode 100644
index 0000000..db7a7f7
--- /dev/null
+++ b/target/i386/emulate/x86_emu.c
@@ -0,0 +1,1264 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2001-2012 The Bochs Project
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, see
+// <https://www.gnu.org/licenses/>.
+/////////////////////////////////////////////////////////////////////////
+
+#include "qemu/osdep.h"
+#include "panic.h"
+#include "x86_decode.h"
+#include "x86.h"
+#include "x86_emu.h"
+#include "x86_flags.h"
+
+#define EXEC_2OP_FLAGS_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
+{ \
+ fetch_operands(env, decode, 2, true, true, false); \
+ switch (decode->operand_size) { \
+ case 1: \
+ { \
+ uint8_t v1 = (uint8_t)decode->op[0].val; \
+ uint8_t v2 = (uint8_t)decode->op[1].val; \
+ uint8_t diff = v1 cmd v2; \
+ if (save_res) { \
+ write_val_ext(env, &decode->op[0], diff, 1); \
+ } \
+ FLAGS_FUNC##8(env, v1, v2, diff); \
+ break; \
+ } \
+ case 2: \
+ { \
+ uint16_t v1 = (uint16_t)decode->op[0].val; \
+ uint16_t v2 = (uint16_t)decode->op[1].val; \
+ uint16_t diff = v1 cmd v2; \
+ if (save_res) { \
+ write_val_ext(env, &decode->op[0], diff, 2); \
+ } \
+ FLAGS_FUNC##16(env, v1, v2, diff); \
+ break; \
+ } \
+ case 4: \
+ { \
+ uint32_t v1 = (uint32_t)decode->op[0].val; \
+ uint32_t v2 = (uint32_t)decode->op[1].val; \
+ uint32_t diff = v1 cmd v2; \
+ if (save_res) { \
+ write_val_ext(env, &decode->op[0], diff, 4); \
+ } \
+ FLAGS_FUNC##32(env, v1, v2, diff); \
+ break; \
+ } \
+ default: \
+ VM_PANIC("bad size\n"); \
+ } \
+} \
+
+target_ulong read_reg(CPUX86State *env, int reg, int size)
+{
+ switch (size) {
+ case 1:
+ return x86_reg(env, reg)->lx;
+ case 2:
+ return x86_reg(env, reg)->rx;
+ case 4:
+ return x86_reg(env, reg)->erx;
+ case 8:
+ return x86_reg(env, reg)->rrx;
+ default:
+ abort();
+ }
+ return 0;
+}
+
+void write_reg(CPUX86State *env, int reg, target_ulong val, int size)
+{
+ switch (size) {
+ case 1:
+ x86_reg(env, reg)->lx = val;
+ break;
+ case 2:
+ x86_reg(env, reg)->rx = val;
+ break;
+ case 4:
+ x86_reg(env, reg)->rrx = (uint32_t)val;
+ break;
+ case 8:
+ x86_reg(env, reg)->rrx = val;
+ break;
+ default:
+ abort();
+ }
+}
+
+target_ulong read_val_from_reg(void *reg_ptr, int size)
+{
+ target_ulong val;
+
+ switch (size) {
+ case 1:
+ val = *(uint8_t *)reg_ptr;
+ break;
+ case 2:
+ val = *(uint16_t *)reg_ptr;
+ break;
+ case 4:
+ val = *(uint32_t *)reg_ptr;
+ break;
+ case 8:
+ val = *(uint64_t *)reg_ptr;
+ break;
+ default:
+ abort();
+ }
+ return val;
+}
+
+void write_val_to_reg(void *reg_ptr, target_ulong val, int size)
+{
+ switch (size) {
+ case 1:
+ *(uint8_t *)reg_ptr = val;
+ break;
+ case 2:
+ *(uint16_t *)reg_ptr = val;
+ break;
+ case 4:
+ *(uint64_t *)reg_ptr = (uint32_t)val;
+ break;
+ case 8:
+ *(uint64_t *)reg_ptr = val;
+ break;
+ default:
+ abort();
+ }
+}
+
+static void write_val_to_mem(CPUX86State *env, target_ulong ptr, target_ulong val, int size)
+{
+ emul_ops->write_mem(env_cpu(env), &val, ptr, size);
+}
+
+void write_val_ext(CPUX86State *env, struct x86_decode_op *decode, target_ulong val, int size)
+{
+ if (decode->type == X86_VAR_REG) {
+ write_val_to_reg(decode->regptr, val, size);
+ } else {
+ write_val_to_mem(env, decode->addr, val, size);
+ }
+}
+
+uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes)
+{
+ emul_ops->read_mem(env_cpu(env), env->emu_mmio_buf, ptr, bytes);
+ return env->emu_mmio_buf;
+}
+
+
+static target_ulong read_val_from_mem(CPUX86State *env, target_long ptr, int size)
+{
+ target_ulong val;
+ uint8_t *mmio_ptr;
+
+ mmio_ptr = read_mmio(env, ptr, size);
+ switch (size) {
+ case 1:
+ val = *(uint8_t *)mmio_ptr;
+ break;
+ case 2:
+ val = *(uint16_t *)mmio_ptr;
+ break;
+ case 4:
+ val = *(uint32_t *)mmio_ptr;
+ break;
+ case 8:
+ val = *(uint64_t *)mmio_ptr;
+ break;
+ default:
+ VM_PANIC("bad size\n");
+ break;
+ }
+ return val;
+}
+
+target_ulong read_val_ext(CPUX86State *env, struct x86_decode_op *decode, int size)
+{
+ if (decode->type == X86_VAR_REG) {
+ return read_val_from_reg(decode->regptr, size);
+ } else {
+ return read_val_from_mem(env, decode->addr, size);
+ }
+}
+
+static void fetch_operands(CPUX86State *env, struct x86_decode *decode,
+ int n, bool val_op0, bool val_op1, bool val_op2)
+{
+ int i;
+ bool calc_val[3] = {val_op0, val_op1, val_op2};
+
+ for (i = 0; i < n; i++) {
+ switch (decode->op[i].type) {
+ case X86_VAR_IMMEDIATE:
+ break;
+ case X86_VAR_REG:
+ VM_PANIC_ON(!decode->op[i].regptr);
+ if (calc_val[i]) {
+ decode->op[i].val = read_val_from_reg(decode->op[i].regptr,
+ decode->operand_size);
+ }
+ break;
+ case X86_VAR_RM:
+ calc_modrm_operand(env, decode, &decode->op[i]);
+ if (calc_val[i]) {
+ decode->op[i].val = read_val_ext(env, &decode->op[i],
+ decode->operand_size);
+ }
+ break;
+ case X86_VAR_OFFSET:
+ decode->op[i].addr = decode_linear_addr(env, decode,
+ decode->op[i].addr,
+ R_DS);
+ if (calc_val[i]) {
+ decode->op[i].val = read_val_ext(env, &decode->op[i],
+ decode->operand_size);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void exec_mov(CPUX86State *env, struct x86_decode *decode)
+{
+ fetch_operands(env, decode, 2, false, true, false);
+ write_val_ext(env, &decode->op[0], decode->op[1].val,
+ decode->operand_size);
+
+ env->eip += decode->len;
+}
+
+static void exec_add(CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
+ env->eip += decode->len;
+}
+
+static void exec_or(CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true);
+ env->eip += decode->len;
+}
+
+static void exec_adc(CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true);
+ env->eip += decode->len;
+}
+
+static void exec_sbb(CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true);
+ env->eip += decode->len;
+}
+
+static void exec_and(CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true);
+ env->eip += decode->len;
+}
+
+static void exec_sub(CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true);
+ env->eip += decode->len;
+}
+
+static void exec_xor(CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true);
+ env->eip += decode->len;
+}
+
+static void exec_neg(CPUX86State *env, struct x86_decode *decode)
+{
+ /*EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/
+ int32_t val;
+ fetch_operands(env, decode, 2, true, true, false);
+
+ val = 0 - sign(decode->op[1].val, decode->operand_size);
+ write_val_ext(env, &decode->op[1], val, decode->operand_size);
+
+ if (4 == decode->operand_size) {
+ SET_FLAGS_OSZAPC_SUB32(env, 0, 0 - val, val);
+ } else if (2 == decode->operand_size) {
+ SET_FLAGS_OSZAPC_SUB16(env, 0, 0 - val, val);
+ } else if (1 == decode->operand_size) {
+ SET_FLAGS_OSZAPC_SUB8(env, 0, 0 - val, val);
+ } else {
+ VM_PANIC("bad op size\n");
+ }
+
+ /*lflags_to_rflags(env);*/
+ env->eip += decode->len;
+}
+
+static void exec_cmp(CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
+ env->eip += decode->len;
+}
+
+static void exec_inc(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ decode->op[1].val = 0;
+
+ EXEC_2OP_FLAGS_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true);
+
+ env->eip += decode->len;
+}
+
+static void exec_dec(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ decode->op[1].val = 0;
+
+ EXEC_2OP_FLAGS_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true);
+ env->eip += decode->len;
+}
+
+static void exec_tst(CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false);
+ env->eip += decode->len;
+}
+
+static void exec_not(CPUX86State *env, struct x86_decode *decode)
+{
+ fetch_operands(env, decode, 1, true, false, false);
+
+ write_val_ext(env, &decode->op[0], ~decode->op[0].val,
+ decode->operand_size);
+ env->eip += decode->len;
+}
+
+void exec_movzx(CPUX86State *env, struct x86_decode *decode)
+{
+ int src_op_size;
+ int op_size = decode->operand_size;
+
+ fetch_operands(env, decode, 1, false, false, false);
+
+ if (0xb6 == decode->opcode[1]) {
+ src_op_size = 1;
+ } else {
+ src_op_size = 2;
+ }
+ decode->operand_size = src_op_size;
+ calc_modrm_operand(env, decode, &decode->op[1]);
+ decode->op[1].val = read_val_ext(env, &decode->op[1], src_op_size);
+ write_val_ext(env, &decode->op[0], decode->op[1].val, op_size);
+
+ env->eip += decode->len;
+}
+
+static void exec_out(CPUX86State *env, struct x86_decode *decode)
+{
+ switch (decode->opcode[0]) {
+ case 0xe6:
+ emul_ops->handle_io(env_cpu(env), decode->op[0].val, &AL(env), 1, 1, 1);
+ break;
+ case 0xe7:
+ emul_ops->handle_io(env_cpu(env), decode->op[0].val, &RAX(env), 1,
+ decode->operand_size, 1);
+ break;
+ case 0xee:
+ emul_ops->handle_io(env_cpu(env), DX(env), &AL(env), 1, 1, 1);
+ break;
+ case 0xef:
+ emul_ops->handle_io(env_cpu(env), DX(env), &RAX(env), 1,
+ decode->operand_size, 1);
+ break;
+ default:
+ VM_PANIC("Bad out opcode\n");
+ break;
+ }
+ env->eip += decode->len;
+}
+
+static void exec_in(CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong val = 0;
+ switch (decode->opcode[0]) {
+ case 0xe4:
+ emul_ops->handle_io(env_cpu(env), decode->op[0].val, &AL(env), 0, 1, 1);
+ break;
+ case 0xe5:
+ emul_ops->handle_io(env_cpu(env), decode->op[0].val, &val, 0,
+ decode->operand_size, 1);
+ if (decode->operand_size == 2) {
+ AX(env) = val;
+ } else {
+ RAX(env) = (uint32_t)val;
+ }
+ break;
+ case 0xec:
+ emul_ops->handle_io(env_cpu(env), DX(env), &AL(env), 0, 1, 1);
+ break;
+ case 0xed:
+ emul_ops->handle_io(env_cpu(env), DX(env), &val, 0,
+ decode->operand_size, 1);
+ if (decode->operand_size == 2) {
+ AX(env) = val;
+ } else {
+ RAX(env) = (uint32_t)val;
+ }
+
+ break;
+ default:
+ VM_PANIC("Bad in opcode\n");
+ break;
+ }
+
+ env->eip += decode->len;
+}
+
+static inline void string_increment_reg(CPUX86State *env, int reg,
+ struct x86_decode *decode)
+{
+ target_ulong val = read_reg(env, reg, decode->addressing_size);
+ if (env->eflags & DF_MASK) {
+ val -= decode->operand_size;
+ } else {
+ val += decode->operand_size;
+ }
+ write_reg(env, reg, val, decode->addressing_size);
+}
+
+static inline void string_rep(CPUX86State *env, struct x86_decode *decode,
+ void (*func)(CPUX86State *env,
+ struct x86_decode *ins), int rep)
+{
+ target_ulong rcx = read_reg(env, R_ECX, decode->addressing_size);
+ while (rcx--) {
+ func(env, decode);
+ write_reg(env, R_ECX, rcx, decode->addressing_size);
+ if ((PREFIX_REP == rep) && !env->cc_dst) {
+ break;
+ }
+ if ((PREFIX_REPN == rep) && env->cc_dst) {
+ break;
+ }
+ }
+}
+
+static void exec_ins_single(CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr = linear_addr_size(env_cpu(env), RDI(env),
+ decode->addressing_size, R_ES);
+
+ emul_ops->handle_io(env_cpu(env), DX(env), env->emu_mmio_buf, 0,
+ decode->operand_size, 1);
+ emul_ops->write_mem(env_cpu(env), env->emu_mmio_buf, addr,
+ decode->operand_size);
+
+ string_increment_reg(env, R_EDI, decode);
+}
+
+static void exec_ins(CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_ins_single, 0);
+ } else {
+ exec_ins_single(env, decode);
+ }
+
+ env->eip += decode->len;
+}
+
+static void exec_outs_single(CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS);
+
+ emul_ops->read_mem(env_cpu(env), env->emu_mmio_buf, addr,
+ decode->operand_size);
+ emul_ops->handle_io(env_cpu(env), DX(env), env->emu_mmio_buf, 1,
+ decode->operand_size, 1);
+
+ string_increment_reg(env, R_ESI, decode);
+}
+
+static void exec_outs(CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_outs_single, 0);
+ } else {
+ exec_outs_single(env, decode);
+ }
+
+ env->eip += decode->len;
+}
+
+static void exec_movs_single(CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong src_addr;
+ target_ulong dst_addr;
+ target_ulong val;
+
+ src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
+ dst_addr = linear_addr_size(env_cpu(env), RDI(env),
+ decode->addressing_size, R_ES);
+
+ val = read_val_from_mem(env, src_addr, decode->operand_size);
+ write_val_to_mem(env, dst_addr, val, decode->operand_size);
+
+ string_increment_reg(env, R_ESI, decode);
+ string_increment_reg(env, R_EDI, decode);
+}
+
+static void exec_movs(CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_movs_single, 0);
+ } else {
+ exec_movs_single(env, decode);
+ }
+
+ env->eip += decode->len;
+}
+
+static void exec_cmps_single(CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong src_addr;
+ target_ulong dst_addr;
+
+ src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
+ dst_addr = linear_addr_size(env_cpu(env), RDI(env),
+ decode->addressing_size, R_ES);
+
+ decode->op[0].type = X86_VAR_IMMEDIATE;
+ decode->op[0].val = read_val_from_mem(env, src_addr, decode->operand_size);
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ decode->op[1].val = read_val_from_mem(env, dst_addr, decode->operand_size);
+
+ EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
+
+ string_increment_reg(env, R_ESI, decode);
+ string_increment_reg(env, R_EDI, decode);
+}
+
+static void exec_cmps(CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_cmps_single, decode->rep);
+ } else {
+ exec_cmps_single(env, decode);
+ }
+ env->eip += decode->len;
+}
+
+
+static void exec_stos_single(CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr;
+ target_ulong val;
+
+ addr = linear_addr_size(env_cpu(env), RDI(env),
+ decode->addressing_size, R_ES);
+ val = read_reg(env, R_EAX, decode->operand_size);
+ emul_ops->write_mem(env_cpu(env), &val, addr, decode->operand_size);
+
+ string_increment_reg(env, R_EDI, decode);
+}
+
+
+static void exec_stos(CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_stos_single, 0);
+ } else {
+ exec_stos_single(env, decode);
+ }
+
+ env->eip += decode->len;
+}
+
+static void exec_scas_single(CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr;
+
+ addr = linear_addr_size(env_cpu(env), RDI(env),
+ decode->addressing_size, R_ES);
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ emul_ops->read_mem(env_cpu(env), &decode->op[1].val, addr, decode->operand_size);
+
+ EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
+ string_increment_reg(env, R_EDI, decode);
+}
+
+static void exec_scas(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = R_EAX;
+ if (decode->rep) {
+ string_rep(env, decode, exec_scas_single, decode->rep);
+ } else {
+ exec_scas_single(env, decode);
+ }
+
+ env->eip += decode->len;
+}
+
+static void exec_lods_single(CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr;
+ target_ulong val = 0;
+
+ addr = decode_linear_addr(env, decode, RSI(env), R_DS);
+ emul_ops->read_mem(env_cpu(env), &val, addr, decode->operand_size);
+ write_reg(env, R_EAX, val, decode->operand_size);
+
+ string_increment_reg(env, R_ESI, decode);
+}
+
+static void exec_lods(CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_lods_single, 0);
+ } else {
+ exec_lods_single(env, decode);
+ }
+
+ env->eip += decode->len;
+}
+
+void x86_emul_raise_exception(CPUX86State *env, int exception_index, int error_code)
+{
+ env->exception_nr = exception_index;
+ env->error_code = error_code;
+ env->has_error_code = true;
+ env->exception_injected = 1;
+}
+
+static void exec_rdmsr(CPUX86State *env, struct x86_decode *decode)
+{
+ emul_ops->simulate_rdmsr(env_cpu(env));
+ env->eip += decode->len;
+}
+
+static void exec_wrmsr(CPUX86State *env, struct x86_decode *decode)
+{
+ emul_ops->simulate_wrmsr(env_cpu(env));
+ env->eip += decode->len;
+}
+
+/*
+ * flag:
+ * 0 - bt, 1 - btc, 2 - bts, 3 - btr
+ */
+static void do_bt(CPUX86State *env, struct x86_decode *decode, int flag)
+{
+ int32_t displacement;
+ uint8_t index;
+ bool cf;
+ int mask = (4 == decode->operand_size) ? 0x1f : 0xf;
+
+ VM_PANIC_ON(decode->rex.rex);
+
+ fetch_operands(env, decode, 2, false, true, false);
+ index = decode->op[1].val & mask;
+
+ if (decode->op[0].type != X86_VAR_REG) {
+ if (4 == decode->operand_size) {
+ displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32;
+ decode->op[0].addr += 4 * displacement;
+ } else if (2 == decode->operand_size) {
+ displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16;
+ decode->op[0].addr += 2 * displacement;
+ } else {
+ VM_PANIC("bt 64bit\n");
+ }
+ }
+ decode->op[0].val = read_val_ext(env, &decode->op[0],
+ decode->operand_size);
+ cf = (decode->op[0].val >> index) & 0x01;
+
+ switch (flag) {
+ case 0:
+ set_CF(env, cf);
+ return;
+ case 1:
+ decode->op[0].val ^= (1u << index);
+ break;
+ case 2:
+ decode->op[0].val |= (1u << index);
+ break;
+ case 3:
+ decode->op[0].val &= ~(1u << index);
+ break;
+ }
+ write_val_ext(env, &decode->op[0], decode->op[0].val,
+ decode->operand_size);
+ set_CF(env, cf);
+}
+
+static void exec_bt(CPUX86State *env, struct x86_decode *decode)
+{
+ do_bt(env, decode, 0);
+ env->eip += decode->len;
+}
+
+static void exec_btc(CPUX86State *env, struct x86_decode *decode)
+{
+ do_bt(env, decode, 1);
+ env->eip += decode->len;
+}
+
+static void exec_btr(CPUX86State *env, struct x86_decode *decode)
+{
+ do_bt(env, decode, 3);
+ env->eip += decode->len;
+}
+
+static void exec_bts(CPUX86State *env, struct x86_decode *decode)
+{
+ do_bt(env, decode, 2);
+ env->eip += decode->len;
+}
+
+void exec_shl(CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+ int of = 0, cf = 0;
+
+ fetch_operands(env, decode, 2, true, true, false);
+
+ count = decode->op[1].val;
+ count &= 0x1f; /* count is masked to 5 bits*/
+ if (!count) {
+ goto exit;
+ }
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint8_t res = 0;
+ if (count <= 8) {
+ res = (decode->op[0].val << count);
+ cf = (decode->op[0].val >> (8 - count)) & 0x1;
+ of = cf ^ (res >> 7);
+ }
+
+ write_val_ext(env, &decode->op[0], res, 1);
+ SET_FLAGS_OSZAPC_LOGIC8(env, 0, 0, res);
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 2:
+ {
+ uint16_t res = 0;
+
+ /* from bochs */
+ if (count <= 16) {
+ res = (decode->op[0].val << count);
+ cf = (decode->op[0].val >> (16 - count)) & 0x1;
+ of = cf ^ (res >> 15); /* of = cf ^ result15 */
+ }
+
+ write_val_ext(env, &decode->op[0], res, 2);
+ SET_FLAGS_OSZAPC_LOGIC16(env, 0, 0, res);
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 4:
+ {
+ uint32_t res = decode->op[0].val << count;
+
+ write_val_ext(env, &decode->op[0], res, 4);
+ SET_FLAGS_OSZAPC_LOGIC32(env, 0, 0, res);
+ cf = (decode->op[0].val >> (32 - count)) & 0x1;
+ of = cf ^ (res >> 31); /* of = cf ^ result31 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ default:
+ abort();
+ }
+
+exit:
+ /* lflags_to_rflags(env); */
+ env->eip += decode->len;
+}
+
+void exec_movsx(CPUX86State *env, struct x86_decode *decode)
+{
+ int src_op_size;
+ int op_size = decode->operand_size;
+
+ fetch_operands(env, decode, 2, false, false, false);
+
+ if (0xbe == decode->opcode[1]) {
+ src_op_size = 1;
+ } else {
+ src_op_size = 2;
+ }
+
+ decode->operand_size = src_op_size;
+ calc_modrm_operand(env, decode, &decode->op[1]);
+ decode->op[1].val = sign(read_val_ext(env, &decode->op[1], src_op_size),
+ src_op_size);
+
+ write_val_ext(env, &decode->op[0], decode->op[1].val, op_size);
+
+ env->eip += decode->len;
+}
+
+void exec_ror(CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+
+ fetch_operands(env, decode, 2, true, true, false);
+ count = decode->op[1].val;
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint32_t bit6, bit7;
+ uint8_t res;
+
+ if ((count & 0x07) == 0) {
+ if (count & 0x18) {
+ bit6 = ((uint8_t)decode->op[0].val >> 6) & 1;
+ bit7 = ((uint8_t)decode->op[0].val >> 7) & 1;
+ SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
+ }
+ } else {
+ count &= 0x7; /* use only bottom 3 bits */
+ res = ((uint8_t)decode->op[0].val >> count) |
+ ((uint8_t)decode->op[0].val << (8 - count));
+ write_val_ext(env, &decode->op[0], res, 1);
+ bit6 = (res >> 6) & 1;
+ bit7 = (res >> 7) & 1;
+ /* set eflags: ROR count affects the following flags: C, O */
+ SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
+ }
+ break;
+ }
+ case 2:
+ {
+ uint32_t bit14, bit15;
+ uint16_t res;
+
+ if ((count & 0x0f) == 0) {
+ if (count & 0x10) {
+ bit14 = ((uint16_t)decode->op[0].val >> 14) & 1;
+ bit15 = ((uint16_t)decode->op[0].val >> 15) & 1;
+ /* of = result14 ^ result15 */
+ SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
+ }
+ } else {
+ count &= 0x0f; /* use only 4 LSB's */
+ res = ((uint16_t)decode->op[0].val >> count) |
+ ((uint16_t)decode->op[0].val << (16 - count));
+ write_val_ext(env, &decode->op[0], res, 2);
+
+ bit14 = (res >> 14) & 1;
+ bit15 = (res >> 15) & 1;
+ /* of = result14 ^ result15 */
+ SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
+ }
+ break;
+ }
+ case 4:
+ {
+ uint32_t bit31, bit30;
+ uint32_t res;
+
+ count &= 0x1f;
+ if (count) {
+ res = ((uint32_t)decode->op[0].val >> count) |
+ ((uint32_t)decode->op[0].val << (32 - count));
+ write_val_ext(env, &decode->op[0], res, 4);
+
+ bit31 = (res >> 31) & 1;
+ bit30 = (res >> 30) & 1;
+ /* of = result30 ^ result31 */
+ SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31);
+ }
+ break;
+ }
+ }
+ env->eip += decode->len;
+}
+
+void exec_rol(CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+
+ fetch_operands(env, decode, 2, true, true, false);
+ count = decode->op[1].val;
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint32_t bit0, bit7;
+ uint8_t res;
+
+ if ((count & 0x07) == 0) {
+ if (count & 0x18) {
+ bit0 = ((uint8_t)decode->op[0].val & 1);
+ bit7 = ((uint8_t)decode->op[0].val >> 7);
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
+ }
+ } else {
+ count &= 0x7; /* use only lowest 3 bits */
+ res = ((uint8_t)decode->op[0].val << count) |
+ ((uint8_t)decode->op[0].val >> (8 - count));
+
+ write_val_ext(env, &decode->op[0], res, 1);
+ /* set eflags:
+ * ROL count affects the following flags: C, O
+ */
+ bit0 = (res & 1);
+ bit7 = (res >> 7);
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
+ }
+ break;
+ }
+ case 2:
+ {
+ uint32_t bit0, bit15;
+ uint16_t res;
+
+ if ((count & 0x0f) == 0) {
+ if (count & 0x10) {
+ bit0 = ((uint16_t)decode->op[0].val & 0x1);
+ bit15 = ((uint16_t)decode->op[0].val >> 15);
+ /* of = cf ^ result15 */
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
+ }
+ } else {
+ count &= 0x0f; /* only use bottom 4 bits */
+ res = ((uint16_t)decode->op[0].val << count) |
+ ((uint16_t)decode->op[0].val >> (16 - count));
+
+ write_val_ext(env, &decode->op[0], res, 2);
+ bit0 = (res & 0x1);
+ bit15 = (res >> 15);
+ /* of = cf ^ result15 */
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
+ }
+ break;
+ }
+ case 4:
+ {
+ uint32_t bit0, bit31;
+ uint32_t res;
+
+ count &= 0x1f;
+ if (count) {
+ res = ((uint32_t)decode->op[0].val << count) |
+ ((uint32_t)decode->op[0].val >> (32 - count));
+
+ write_val_ext(env, &decode->op[0], res, 4);
+ bit0 = (res & 0x1);
+ bit31 = (res >> 31);
+ /* of = cf ^ result31 */
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0);
+ }
+ break;
+ }
+ }
+ env->eip += decode->len;
+}
+
+
+void exec_rcl(CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+ int of = 0, cf = 0;
+
+ fetch_operands(env, decode, 2, true, true, false);
+ count = decode->op[1].val & 0x1f;
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint8_t op1_8 = decode->op[0].val;
+ uint8_t res;
+ count %= 9;
+ if (!count) {
+ break;
+ }
+
+ if (1 == count) {
+ res = (op1_8 << 1) | get_CF(env);
+ } else {
+ res = (op1_8 << count) | (get_CF(env) << (count - 1)) |
+ (op1_8 >> (9 - count));
+ }
+
+ write_val_ext(env, &decode->op[0], res, 1);
+
+ cf = (op1_8 >> (8 - count)) & 0x01;
+ of = cf ^ (res >> 7); /* of = cf ^ result7 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 2:
+ {
+ uint16_t res;
+ uint16_t op1_16 = decode->op[0].val;
+
+ count %= 17;
+ if (!count) {
+ break;
+ }
+
+ if (1 == count) {
+ res = (op1_16 << 1) | get_CF(env);
+ } else if (count == 16) {
+ res = (get_CF(env) << 15) | (op1_16 >> 1);
+ } else { /* 2..15 */
+ res = (op1_16 << count) | (get_CF(env) << (count - 1)) |
+ (op1_16 >> (17 - count));
+ }
+
+ write_val_ext(env, &decode->op[0], res, 2);
+
+ cf = (op1_16 >> (16 - count)) & 0x1;
+ of = cf ^ (res >> 15); /* of = cf ^ result15 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 4:
+ {
+ uint32_t res;
+ uint32_t op1_32 = decode->op[0].val;
+
+ if (!count) {
+ break;
+ }
+
+ if (1 == count) {
+ res = (op1_32 << 1) | get_CF(env);
+ } else {
+ res = (op1_32 << count) | (get_CF(env) << (count - 1)) |
+ (op1_32 >> (33 - count));
+ }
+
+ write_val_ext(env, &decode->op[0], res, 4);
+
+ cf = (op1_32 >> (32 - count)) & 0x1;
+ of = cf ^ (res >> 31); /* of = cf ^ result31 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ }
+ env->eip += decode->len;
+}
+
+void exec_rcr(CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+ int of = 0, cf = 0;
+
+ fetch_operands(env, decode, 2, true, true, false);
+ count = decode->op[1].val & 0x1f;
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint8_t op1_8 = decode->op[0].val;
+ uint8_t res;
+
+ count %= 9;
+ if (!count) {
+ break;
+ }
+ res = (op1_8 >> count) | (get_CF(env) << (8 - count)) |
+ (op1_8 << (9 - count));
+
+ write_val_ext(env, &decode->op[0], res, 1);
+
+ cf = (op1_8 >> (count - 1)) & 0x1;
+ of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 2:
+ {
+ uint16_t op1_16 = decode->op[0].val;
+ uint16_t res;
+
+ count %= 17;
+ if (!count) {
+ break;
+ }
+ res = (op1_16 >> count) | (get_CF(env) << (16 - count)) |
+ (op1_16 << (17 - count));
+
+ write_val_ext(env, &decode->op[0], res, 2);
+
+ cf = (op1_16 >> (count - 1)) & 0x1;
+ of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^
+ result14 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 4:
+ {
+ uint32_t res;
+ uint32_t op1_32 = decode->op[0].val;
+
+ if (!count) {
+ break;
+ }
+
+ if (1 == count) {
+ res = (op1_32 >> 1) | (get_CF(env) << 31);
+ } else {
+ res = (op1_32 >> count) | (get_CF(env) << (32 - count)) |
+ (op1_32 << (33 - count));
+ }
+
+ write_val_ext(env, &decode->op[0], res, 4);
+
+ cf = (op1_32 >> (count - 1)) & 0x1;
+ of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ }
+ env->eip += decode->len;
+}
+
+static void exec_xchg(CPUX86State *env, struct x86_decode *decode)
+{
+ fetch_operands(env, decode, 2, true, true, false);
+
+ write_val_ext(env, &decode->op[0], decode->op[1].val,
+ decode->operand_size);
+ write_val_ext(env, &decode->op[1], decode->op[0].val,
+ decode->operand_size);
+
+ env->eip += decode->len;
+}
+
+static void exec_xadd(CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
+ write_val_ext(env, &decode->op[1], decode->op[0].val,
+ decode->operand_size);
+
+ env->eip += decode->len;
+}
+
+static struct cmd_handler {
+ enum x86_decode_cmd cmd;
+ void (*handler)(CPUX86State *env, struct x86_decode *ins);
+} handlers[] = {
+ {X86_DECODE_CMD_INVL, NULL,},
+ {X86_DECODE_CMD_MOV, exec_mov},
+ {X86_DECODE_CMD_ADD, exec_add},
+ {X86_DECODE_CMD_OR, exec_or},
+ {X86_DECODE_CMD_ADC, exec_adc},
+ {X86_DECODE_CMD_SBB, exec_sbb},
+ {X86_DECODE_CMD_AND, exec_and},
+ {X86_DECODE_CMD_SUB, exec_sub},
+ {X86_DECODE_CMD_NEG, exec_neg},
+ {X86_DECODE_CMD_XOR, exec_xor},
+ {X86_DECODE_CMD_CMP, exec_cmp},
+ {X86_DECODE_CMD_INC, exec_inc},
+ {X86_DECODE_CMD_DEC, exec_dec},
+ {X86_DECODE_CMD_TST, exec_tst},
+ {X86_DECODE_CMD_NOT, exec_not},
+ {X86_DECODE_CMD_MOVZX, exec_movzx},
+ {X86_DECODE_CMD_OUT, exec_out},
+ {X86_DECODE_CMD_IN, exec_in},
+ {X86_DECODE_CMD_INS, exec_ins},
+ {X86_DECODE_CMD_OUTS, exec_outs},
+ {X86_DECODE_CMD_RDMSR, exec_rdmsr},
+ {X86_DECODE_CMD_WRMSR, exec_wrmsr},
+ {X86_DECODE_CMD_BT, exec_bt},
+ {X86_DECODE_CMD_BTR, exec_btr},
+ {X86_DECODE_CMD_BTC, exec_btc},
+ {X86_DECODE_CMD_BTS, exec_bts},
+ {X86_DECODE_CMD_SHL, exec_shl},
+ {X86_DECODE_CMD_ROL, exec_rol},
+ {X86_DECODE_CMD_ROR, exec_ror},
+ {X86_DECODE_CMD_RCR, exec_rcr},
+ {X86_DECODE_CMD_RCL, exec_rcl},
+ /*{X86_DECODE_CMD_CPUID, exec_cpuid},*/
+ {X86_DECODE_CMD_MOVS, exec_movs},
+ {X86_DECODE_CMD_CMPS, exec_cmps},
+ {X86_DECODE_CMD_STOS, exec_stos},
+ {X86_DECODE_CMD_SCAS, exec_scas},
+ {X86_DECODE_CMD_LODS, exec_lods},
+ {X86_DECODE_CMD_MOVSX, exec_movsx},
+ {X86_DECODE_CMD_XCHG, exec_xchg},
+ {X86_DECODE_CMD_XADD, exec_xadd},
+};
+
+static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST];
+
+const struct x86_emul_ops *emul_ops;
+
+static void init_cmd_handler(void)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+ _cmd_handler[handlers[i].cmd] = handlers[i];
+ }
+}
+
+bool exec_instruction(CPUX86State *env, struct x86_decode *ins)
+{
+ if (!_cmd_handler[ins->cmd].handler) {
+ printf("Unimplemented handler (" TARGET_FMT_lx ") for %d (%x %x) \n", env->eip,
+ ins->cmd, ins->opcode[0],
+ ins->opcode_len > 1 ? ins->opcode[1] : 0);
+ env->eip += ins->len;
+ return true;
+ }
+
+ _cmd_handler[ins->cmd].handler(env, ins);
+ return true;
+}
+
+void init_emu(const struct x86_emul_ops *o)
+{
+ emul_ops = o;
+ init_cmd_handler();
+}
diff --git a/target/i386/emulate/x86_emu.h b/target/i386/emulate/x86_emu.h
new file mode 100644
index 0000000..a1a9612
--- /dev/null
+++ b/target/i386/emulate/x86_emu.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef X86_EMU_H
+#define X86_EMU_H
+
+#include "x86.h"
+#include "x86_decode.h"
+#include "cpu.h"
+
+struct x86_emul_ops {
+ void (*read_mem)(CPUState *cpu, void *data, target_ulong addr, int bytes);
+ void (*write_mem)(CPUState *cpu, void *data, target_ulong addr, int bytes);
+ void (*read_segment_descriptor)(CPUState *cpu, struct x86_segment_descriptor *desc,
+ enum X86Seg seg);
+ void (*handle_io)(CPUState *cpu, uint16_t port, void *data, int direction,
+ int size, int count);
+ void (*simulate_rdmsr)(CPUState *cs);
+ void (*simulate_wrmsr)(CPUState *cs);
+};
+
+extern const struct x86_emul_ops *emul_ops;
+
+void init_emu(const struct x86_emul_ops *ops);
+bool exec_instruction(CPUX86State *env, struct x86_decode *ins);
+void x86_emul_raise_exception(CPUX86State *env, int exception_index, int error_code);
+
+target_ulong read_reg(CPUX86State *env, int reg, int size);
+void write_reg(CPUX86State *env, int reg, target_ulong val, int size);
+target_ulong read_val_from_reg(void *reg_ptr, int size);
+void write_val_to_reg(void *reg_ptr, target_ulong val, int size);
+void write_val_ext(CPUX86State *env, struct x86_decode_op *decode, target_ulong val, int size);
+uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes);
+target_ulong read_val_ext(CPUX86State *env, struct x86_decode_op *decode, int size);
+
+void exec_movzx(CPUX86State *env, struct x86_decode *decode);
+void exec_shl(CPUX86State *env, struct x86_decode *decode);
+void exec_movsx(CPUX86State *env, struct x86_decode *decode);
+void exec_ror(CPUX86State *env, struct x86_decode *decode);
+void exec_rol(CPUX86State *env, struct x86_decode *decode);
+void exec_rcl(CPUX86State *env, struct x86_decode *decode);
+void exec_rcr(CPUX86State *env, struct x86_decode *decode);
+#endif
diff --git a/target/i386/emulate/x86_flags.c b/target/i386/emulate/x86_flags.c
new file mode 100644
index 0000000..6592193
--- /dev/null
+++ b/target/i386/emulate/x86_flags.c
@@ -0,0 +1,273 @@
+/////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2001-2012 The Bochs Project
+// Copyright (C) 2017 Google Inc.
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, see
+// <https://www.gnu.org/licenses/>.
+/////////////////////////////////////////////////////////////////////////
+/*
+ * flags functions
+ */
+
+#include "qemu/osdep.h"
+
+#include "panic.h"
+#include "cpu.h"
+#include "x86_flags.h"
+#include "x86.h"
+
+
+/*
+ * The algorithms here are similar to those in Bochs. After an ALU
+ * operation, CC_DST can be used to compute ZF, SF and PF, whereas
+ * CC_SRC is used to compute AF, CF and OF. In reality, SF and PF are the
+ * XOR of the value computed from CC_DST and the value found in bits 7 and 2
+ * of CC_SRC; this way the same logic can be used to compute the flags
+ * both before and after an ALU operation.
+ *
+ * Compared to the TCG CC_OP codes, this avoids conditionals when converting
+ * to and from the RFLAGS representation.
+ */
+
+#define LF_SIGN_BIT (TARGET_LONG_BITS - 1)
+
+#define LF_BIT_PD (2) /* lazy Parity Delta, same bit as PF */
+#define LF_BIT_AF (3) /* lazy Adjust flag */
+#define LF_BIT_SD (7) /* lazy Sign Flag Delta, same bit as SF */
+#define LF_BIT_CF (TARGET_LONG_BITS - 1) /* lazy Carry Flag */
+#define LF_BIT_PO (TARGET_LONG_BITS - 2) /* lazy Partial Overflow = CF ^ OF */
+
+#define LF_MASK_PD ((target_ulong)0x01 << LF_BIT_PD)
+#define LF_MASK_AF ((target_ulong)0x01 << LF_BIT_AF)
+#define LF_MASK_SD ((target_ulong)0x01 << LF_BIT_SD)
+#define LF_MASK_CF ((target_ulong)0x01 << LF_BIT_CF)
+#define LF_MASK_PO ((target_ulong)0x01 << LF_BIT_PO)
+
+/* ******************* */
+/* OSZAPC */
+/* ******************* */
+
+/* use carries to fill in AF, PO and CF, while ensuring PD and SD are clear.
+ * for full-word operations just clear PD and SD; for smaller operand
+ * sizes only keep AF in the low byte and shift the carries left to
+ * place PO and CF in the top two bits.
+ */
+#define SET_FLAGS_OSZAPC_SIZE(size, lf_carries, lf_result) { \
+ env->cc_dst = (target_ulong)(int##size##_t)(lf_result); \
+ target_ulong temp = (lf_carries); \
+ if ((size) == TARGET_LONG_BITS) { \
+ temp = temp & ~(LF_MASK_PD | LF_MASK_SD); \
+ } else { \
+ temp = (temp & LF_MASK_AF) | (temp << (TARGET_LONG_BITS - (size))); \
+ } \
+ env->cc_src = temp; \
+}
+
+/* carries, result */
+#define SET_FLAGS_OSZAPC_8(carries, result) \
+ SET_FLAGS_OSZAPC_SIZE(8, carries, result)
+#define SET_FLAGS_OSZAPC_16(carries, result) \
+ SET_FLAGS_OSZAPC_SIZE(16, carries, result)
+#define SET_FLAGS_OSZAPC_32(carries, result) \
+ SET_FLAGS_OSZAPC_SIZE(32, carries, result)
+
+/* ******************* */
+/* OSZAP */
+/* ******************* */
+/* same as setting OSZAPC, but preserve CF and flip PO if the old value of CF
+ * did not match the high bit of lf_carries. */
+#define SET_FLAGS_OSZAP_SIZE(size, lf_carries, lf_result) { \
+ env->cc_dst = (target_ulong)(int##size##_t)(lf_result); \
+ target_ulong temp = (lf_carries); \
+ if ((size) == TARGET_LONG_BITS) { \
+ temp = (temp & ~(LF_MASK_PD | LF_MASK_SD)); \
+ } else { \
+ temp = (temp & LF_MASK_AF) | (temp << (TARGET_LONG_BITS - (size))); \
+ } \
+ target_ulong cf_changed = ((target_long)(env->cc_src ^ temp)) < 0; \
+ env->cc_src = temp ^ (cf_changed * (LF_MASK_PO | LF_MASK_CF)); \
+}
+
+/* carries, result */
+#define SET_FLAGS_OSZAP_8(carries, result) \
+ SET_FLAGS_OSZAP_SIZE(8, carries, result)
+#define SET_FLAGS_OSZAP_16(carries, result) \
+ SET_FLAGS_OSZAP_SIZE(16, carries, result)
+#define SET_FLAGS_OSZAP_32(carries, result) \
+ SET_FLAGS_OSZAP_SIZE(32, carries, result)
+
+void SET_FLAGS_OxxxxC(CPUX86State *env, bool new_of, bool new_cf)
+{
+ env->cc_src &= ~(LF_MASK_PO | LF_MASK_CF);
+ env->cc_src |= (-(target_ulong)new_cf << LF_BIT_PO);
+ env->cc_src ^= ((target_ulong)new_of << LF_BIT_PO);
+}
+
+void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAPC_32(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAPC_16(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAPC_8(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAPC_32(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAPC_16(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAPC_8(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAP_32(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAP_16(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAP_8(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAP_32(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAP_16(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAP_8(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+
+void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAPC_32(0, diff);
+}
+
+void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAPC_16(0, diff);
+}
+
+void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAPC_8(0, diff);
+}
+
+static inline uint32_t get_PF(CPUX86State *env)
+{
+ return ((parity8(env->cc_dst) - 1) ^ env->cc_src) & CC_P;
+}
+
+static inline uint32_t get_OF(CPUX86State *env)
+{
+ return ((env->cc_src >> (LF_BIT_CF - 11)) + CC_O / 2) & CC_O;
+}
+
+bool get_CF(CPUX86State *env)
+{
+ return ((target_long)env->cc_src) < 0;
+}
+
+void set_CF(CPUX86State *env, bool val)
+{
+ /* If CF changes, flip PO and CF */
+ target_ulong temp = -(target_ulong)val;
+ target_ulong cf_changed = ((target_long)(env->cc_src ^ temp)) < 0;
+ env->cc_src ^= cf_changed * (LF_MASK_PO | LF_MASK_CF);
+}
+
+static inline uint32_t get_ZF(CPUX86State *env)
+{
+ return env->cc_dst ? 0 : CC_Z;
+}
+
+static inline uint32_t get_SF(CPUX86State *env)
+{
+ return ((env->cc_dst >> (LF_SIGN_BIT - LF_BIT_SD)) ^
+ env->cc_src) & CC_S;
+}
+
+void lflags_to_rflags(CPUX86State *env)
+{
+ env->eflags &= ~(CC_C|CC_P|CC_A|CC_Z|CC_S|CC_O);
+ /* rotate left by one to move carry-out bits into CF and AF */
+ env->eflags |= (
+ (env->cc_src << 1) |
+ (env->cc_src >> (TARGET_LONG_BITS - 1))) & (CC_C | CC_A);
+ env->eflags |= get_SF(env);
+ env->eflags |= get_PF(env);
+ env->eflags |= get_ZF(env);
+ env->eflags |= get_OF(env);
+}
+
+void rflags_to_lflags(CPUX86State *env)
+{
+ target_ulong cf_af, cf_xor_of;
+
+ /* Leave the low byte zero so that parity is always even... */
+ env->cc_dst = !(env->eflags & CC_Z) << 8;
+
+ /* ... and therefore cc_src always uses opposite polarity. */
+ env->cc_src = CC_P;
+ env->cc_src ^= env->eflags & (CC_S | CC_P);
+
+ /* rotate right by one to move CF and AF into the carry-out positions */
+ cf_af = env->eflags & (CC_C | CC_A);
+ env->cc_src |= ((cf_af >> 1) | (cf_af << (TARGET_LONG_BITS - 1)));
+
+ cf_xor_of = ((env->eflags & (CC_C | CC_O)) + (CC_O - CC_C)) & CC_O;
+ env->cc_src |= -cf_xor_of & LF_MASK_PO;
+}
diff --git a/target/i386/emulate/x86_flags.h b/target/i386/emulate/x86_flags.h
new file mode 100644
index 0000000..a395c83
--- /dev/null
+++ b/target/i386/emulate/x86_flags.h
@@ -0,0 +1,71 @@
+/////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2001-2012 The Bochs Project
+// Copyright (C) 2017 Google Inc.
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, see
+// <https://www.gnu.org/licenses/>.
+/////////////////////////////////////////////////////////////////////////
+/*
+ * x86 eflags functions
+ */
+
+#ifndef X86_EMU_FLAGS_H
+#define X86_EMU_FLAGS_H
+
+#include "cpu.h"
+void lflags_to_rflags(CPUX86State *env);
+void rflags_to_lflags(CPUX86State *env);
+
+bool get_CF(CPUX86State *env);
+void set_CF(CPUX86State *env, bool val);
+
+void SET_FLAGS_OxxxxC(CPUX86State *env, bool new_of, bool new_cf);
+
+void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+#endif /* X86_EMU_FLAGS_H */
diff --git a/target/i386/gdbstub.c b/target/i386/gdbstub.c
index 4acf485..04c49e8 100644
--- a/target/i386/gdbstub.c
+++ b/target/i386/gdbstub.c
@@ -18,8 +18,13 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
+#include "accel/tcg/vcpu-state.h"
#include "cpu.h"
+#include "exec/gdbstub.h"
#include "gdbstub/helpers.h"
+#ifdef CONFIG_LINUX_USER
+#include "linux-user/qemu.h"
+#endif
#ifdef TARGET_X86_64
static const int gpr_map[16] = {
@@ -96,6 +101,19 @@ static int gdb_write_reg_cs64(uint32_t hflags, uint8_t *buf, target_ulong *val)
return 4;
}
+static int gdb_get_reg(CPUX86State *env, GByteArray *mem_buf, target_ulong val)
+{
+ if (TARGET_LONG_BITS == 64) {
+ if (env->hflags & HF_CS64_MASK) {
+ return gdb_get_reg64(mem_buf, val);
+ } else {
+ return gdb_get_reg64(mem_buf, val & 0xffffffffUL);
+ }
+ } else {
+ return gdb_get_reg32(mem_buf, val);
+ }
+}
+
int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
X86CPU *cpu = X86_CPU(cs);
@@ -137,15 +155,7 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
} else {
switch (n) {
case IDX_IP_REG:
- if (TARGET_LONG_BITS == 64) {
- if (env->hflags & HF_CS64_MASK) {
- return gdb_get_reg64(mem_buf, env->eip);
- } else {
- return gdb_get_reg64(mem_buf, env->eip & 0xffffffffUL);
- }
- } else {
- return gdb_get_reg32(mem_buf, env->eip);
- }
+ return gdb_get_reg(env, mem_buf, env->eip);
case IDX_FLAGS_REG:
return gdb_get_reg32(mem_buf, env->eflags);
@@ -248,6 +258,21 @@ static int x86_cpu_gdb_load_seg(X86CPU *cpu, X86Seg sreg, uint8_t *mem_buf)
return 4;
}
+static int gdb_write_reg(CPUX86State *env, uint8_t *mem_buf, target_ulong *val)
+{
+ if (TARGET_LONG_BITS == 64) {
+ if (env->hflags & HF_CS64_MASK) {
+ *val = ldq_p(mem_buf);
+ } else {
+ *val = ldq_p(mem_buf) & 0xffffffffUL;
+ }
+ return 8;
+ } else {
+ *val = (uint32_t)ldl_p(mem_buf);
+ return 4;
+ }
+}
+
int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
X86CPU *cpu = X86_CPU(cs);
@@ -288,18 +313,7 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
} else {
switch (n) {
case IDX_IP_REG:
- if (TARGET_LONG_BITS == 64) {
- if (env->hflags & HF_CS64_MASK) {
- env->eip = ldq_p(mem_buf);
- } else {
- env->eip = ldq_p(mem_buf) & 0xffffffffUL;
- }
- return 8;
- } else {
- env->eip &= ~0xffffffffUL;
- env->eip |= (uint32_t)ldl_p(mem_buf);
- return 4;
- }
+ return gdb_write_reg(env, mem_buf, &env->eip);
case IDX_FLAGS_REG:
env->eflags = ldl_p(mem_buf);
return 4;
@@ -397,3 +411,49 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
/* Unrecognised register. */
return 0;
}
+
+#ifdef CONFIG_LINUX_USER
+
+#define IDX_ORIG_AX 0
+
+static int x86_cpu_gdb_read_linux_register(CPUState *cs, GByteArray *mem_buf,
+ int n)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ switch (n) {
+ case IDX_ORIG_AX:
+ return gdb_get_reg(env, mem_buf, get_task_state(cs)->orig_ax);
+ }
+ return 0;
+}
+
+static int x86_cpu_gdb_write_linux_register(CPUState *cs, uint8_t *mem_buf,
+ int n)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ switch (n) {
+ case IDX_ORIG_AX:
+ return gdb_write_reg(env, mem_buf, &get_task_state(cs)->orig_ax);
+ }
+ return 0;
+}
+
+#endif
+
+void x86_cpu_gdb_init(CPUState *cs)
+{
+#ifdef CONFIG_LINUX_USER
+ gdb_register_coprocessor(cs, x86_cpu_gdb_read_linux_register,
+ x86_cpu_gdb_write_linux_register,
+#ifdef TARGET_X86_64
+ gdb_find_static_feature("i386-64bit-linux.xml"),
+#else
+ gdb_find_static_feature("i386-32bit-linux.xml"),
+#endif
+ 0);
+#endif
+}
diff --git a/target/i386/helper.c b/target/i386/helper.c
index 01a268a..e0aaed3 100644
--- a/target/i386/helper.c
+++ b/target/i386/helper.c
@@ -20,10 +20,13 @@
#include "qemu/osdep.h"
#include "qapi/qapi-events-run-state.h"
#include "cpu.h"
-#include "exec/exec-all.h"
-#include "sysemu/runstate.h"
+#include "exec/cputlb.h"
+#include "exec/translation-block.h"
+#include "exec/target_page.h"
+#include "system/runstate.h"
#ifndef CONFIG_USER_ONLY
-#include "sysemu/hw_accel.h"
+#include "system/hw_accel.h"
+#include "system/memory.h"
#include "monitor/monitor.h"
#include "kvm/kvm_i386.h"
#endif
@@ -523,7 +526,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
static inline target_ulong get_memio_eip(CPUX86State *env)
{
#ifdef CONFIG_TCG
- uint64_t data[TARGET_INSN_START_WORDS];
+ uint64_t data[INSN_START_WORDS];
CPUState *cs = env_cpu(env);
if (!cpu_unwind_state_data(cs, cs->mem_io_pc, data)) {
diff --git a/target/i386/helper.h b/target/i386/helper.h
index eeb8df5..3f67098 100644
--- a/target/i386/helper.h
+++ b/target/i386/helper.h
@@ -1,5 +1,6 @@
DEF_HELPER_FLAGS_4(cc_compute_all, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
DEF_HELPER_FLAGS_4(cc_compute_c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
+DEF_HELPER_FLAGS_3(cc_compute_nz, TCG_CALL_NO_RWG_SE, tl, tl, tl, int)
DEF_HELPER_3(write_eflags, void, env, tl, i32)
DEF_HELPER_1(read_eflags, tl, env)
diff --git a/target/i386/host-cpu.c b/target/i386/host-cpu.c
index 8b8bf5a..7512567 100644
--- a/target/i386/host-cpu.c
+++ b/target/i386/host-cpu.c
@@ -12,10 +12,10 @@
#include "host-cpu.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
/* Note: Only safe for use on x86(-64) hosts */
-static uint32_t host_cpu_phys_bits(void)
+uint32_t host_cpu_phys_bits(void)
{
uint32_t eax;
uint32_t host_phys_bits;
@@ -42,7 +42,7 @@ static uint32_t host_cpu_phys_bits(void)
return host_phys_bits;
}
-static uint32_t host_cpu_adjust_phys_bits(X86CPU *cpu)
+static void host_cpu_adjust_phys_bits(X86CPU *cpu)
{
uint32_t host_phys_bits = host_cpu_phys_bits();
uint32_t phys_bits = cpu->phys_bits;
@@ -66,7 +66,7 @@ static uint32_t host_cpu_adjust_phys_bits(X86CPU *cpu)
}
}
- return phys_bits;
+ cpu->phys_bits = phys_bits;
}
bool host_cpu_realizefn(CPUState *cs, Error **errp)
@@ -75,17 +75,7 @@ bool host_cpu_realizefn(CPUState *cs, Error **errp)
CPUX86State *env = &cpu->env;
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
- uint32_t phys_bits = host_cpu_adjust_phys_bits(cpu);
-
- if (phys_bits &&
- (phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
- phys_bits < 32)) {
- error_setg(errp, "phys-bits should be between 32 and %u "
- " (but is %u)",
- TARGET_PHYS_ADDR_SPACE_BITS, phys_bits);
- return false;
- }
- cpu->phys_bits = phys_bits;
+ host_cpu_adjust_phys_bits(cpu);
}
return true;
}
@@ -119,9 +109,13 @@ void host_cpu_vendor_fms(char *vendor, int *family, int *model, int *stepping)
{
uint32_t eax, ebx, ecx, edx;
- host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
+ host_cpuid(0x0, 0, NULL, &ebx, &ecx, &edx);
x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
+ if (!family && !model && !stepping) {
+ return;
+ }
+
host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
if (family) {
*family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
@@ -139,11 +133,9 @@ void host_cpu_instance_init(X86CPU *cpu)
X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
if (xcc->model) {
- uint32_t ebx = 0, ecx = 0, edx = 0;
char vendor[CPUID_VENDOR_SZ + 1];
- host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
- x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
+ host_cpu_vendor_fms(vendor, NULL, NULL, NULL);
object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort);
}
}
@@ -169,7 +161,7 @@ void host_cpu_max_instance_init(X86CPU *cpu)
&error_abort);
}
-static void host_cpu_class_init(ObjectClass *oc, void *data)
+static void host_cpu_class_init(ObjectClass *oc, const void *data)
{
X86CPUClass *xcc = X86_CPU_CLASS(oc);
diff --git a/target/i386/host-cpu.h b/target/i386/host-cpu.h
index 6a9bc91..b97ec01 100644
--- a/target/i386/host-cpu.h
+++ b/target/i386/host-cpu.h
@@ -10,6 +10,7 @@
#ifndef HOST_CPU_H
#define HOST_CPU_H
+uint32_t host_cpu_phys_bits(void);
void host_cpu_instance_init(X86CPU *cpu);
void host_cpu_max_instance_init(X86CPU *cpu);
bool host_cpu_realizefn(CPUState *cs, Error **errp);
diff --git a/target/i386/hvf/hvf-cpu.c b/target/i386/hvf/hvf-cpu.c
index ac617f1..dfdda70 100644
--- a/target/i386/hvf/hvf-cpu.c
+++ b/target/i386/hvf/hvf-cpu.c
@@ -11,10 +11,10 @@
#include "cpu.h"
#include "host-cpu.h"
#include "qapi/error.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/boards.h"
-#include "sysemu/hvf.h"
-#include "hw/core/accel-cpu.h"
+#include "system/hvf.h"
+#include "accel/accel-cpu-target.h"
#include "hvf-i386.h"
static void hvf_cpu_max_instance_init(X86CPU *cpu)
@@ -74,7 +74,7 @@ static void hvf_cpu_instance_init(CPUState *cs)
hvf_cpu_xsave_init();
}
-static void hvf_cpu_accel_class_init(ObjectClass *oc, void *data)
+static void hvf_cpu_accel_class_init(ObjectClass *oc, const void *data)
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
diff --git a/target/i386/hvf/hvf-i386.h b/target/i386/hvf/hvf-i386.h
index e99c02c..8c42ae6 100644
--- a/target/i386/hvf/hvf-i386.h
+++ b/target/i386/hvf/hvf-i386.h
@@ -18,7 +18,9 @@
uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx, int reg);
-void hvf_handle_io(CPUArchState *, uint16_t, void *, int, int, int);
+void hvf_handle_io(CPUState *, uint16_t, void *, int, int, int);
+void hvf_simulate_rdmsr(CPUState *cpu);
+void hvf_simulate_wrmsr(CPUState *cpu);
/* Host specific functions */
int hvf_inject_interrupt(CPUArchState *env, int vector);
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
index c9c64e2..99e37a3 100644
--- a/target/i386/hvf/hvf.c
+++ b/target/i386/hvf/hvf.c
@@ -52,18 +52,19 @@
#include "qapi/error.h"
#include "migration/blocker.h"
-#include "sysemu/hvf.h"
-#include "sysemu/hvf_int.h"
-#include "sysemu/runstate.h"
-#include "sysemu/cpus.h"
+#include "system/hvf.h"
+#include "system/hvf_int.h"
+#include "system/runstate.h"
+#include "system/cpus.h"
#include "hvf-i386.h"
#include "vmcs.h"
#include "vmx.h"
-#include "x86.h"
+#include "emulate/x86.h"
#include "x86_descr.h"
+#include "emulate/x86_flags.h"
#include "x86_mmu.h"
-#include "x86_decode.h"
-#include "x86_emu.h"
+#include "emulate/x86_decode.h"
+#include "emulate/x86_emu.h"
#include "x86_task.h"
#include "x86hvf.h"
@@ -75,6 +76,7 @@
#include "qemu/main-loop.h"
#include "qemu/accel.h"
#include "target/i386/cpu.h"
+#include "exec/target_page.h"
static Error *invtsc_mig_blocker;
@@ -103,7 +105,7 @@ static void update_apic_tpr(CPUState *cpu)
#define VECTORING_INFO_VECTOR_MASK 0xff
-void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,
+void hvf_handle_io(CPUState *env, uint16_t port, void *buffer,
int direction, int size, int count)
{
int i;
@@ -167,7 +169,7 @@ void hvf_arch_vcpu_destroy(CPUState *cpu)
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
- g_free(env->hvf_mmio_buf);
+ g_free(env->emu_mmio_buf);
}
static void init_tsc_freq(CPUX86State *env)
@@ -223,6 +225,38 @@ int hvf_arch_init(void)
return 0;
}
+hv_return_t hvf_arch_vm_create(MachineState *ms, uint32_t pa_range)
+{
+ return hv_vm_create(HV_VM_DEFAULT);
+}
+
+static void hvf_read_segment_descriptor(CPUState *s, struct x86_segment_descriptor *desc,
+ X86Seg seg)
+{
+ struct vmx_segment vmx_segment;
+ vmx_read_segment_descriptor(s, &vmx_segment, seg);
+ vmx_segment_to_x86_descriptor(s, &vmx_segment, desc);
+}
+
+static void hvf_read_mem(CPUState *cpu, void *data, target_ulong gva, int bytes)
+{
+ vmx_read_mem(cpu, data, gva, bytes);
+}
+
+static void hvf_write_mem(CPUState *cpu, void *data, target_ulong gva, int bytes)
+{
+ vmx_write_mem(cpu, gva, data, bytes);
+}
+
+static const struct x86_emul_ops hvf_x86_emul_ops = {
+ .read_mem = hvf_read_mem,
+ .write_mem = hvf_write_mem,
+ .read_segment_descriptor = hvf_read_segment_descriptor,
+ .handle_io = hvf_handle_io,
+ .simulate_rdmsr = hvf_simulate_rdmsr,
+ .simulate_wrmsr = hvf_simulate_wrmsr,
+};
+
int hvf_arch_init_vcpu(CPUState *cpu)
{
X86CPU *x86cpu = X86_CPU(cpu);
@@ -231,11 +265,13 @@ int hvf_arch_init_vcpu(CPUState *cpu)
int r;
uint64_t reqCap;
- init_emu();
+ init_emu(&hvf_x86_emul_ops);
init_decoder();
- hvf_state->hvf_caps = g_new0(struct hvf_vcpu_caps, 1);
- env->hvf_mmio_buf = g_new(char, 4096);
+ if (hvf_state->hvf_caps == NULL) {
+ hvf_state->hvf_caps = g_new0(struct hvf_vcpu_caps, 1);
+ }
+ env->emu_mmio_buf = g_new(char, 4096);
if (x86cpu->vmware_cpuid_freq) {
init_tsc_freq(env);
@@ -427,6 +463,264 @@ static void hvf_cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
}
+void hvf_load_regs(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ int i = 0;
+ RRX(env, R_EAX) = rreg(cs->accel->fd, HV_X86_RAX);
+ RRX(env, R_EBX) = rreg(cs->accel->fd, HV_X86_RBX);
+ RRX(env, R_ECX) = rreg(cs->accel->fd, HV_X86_RCX);
+ RRX(env, R_EDX) = rreg(cs->accel->fd, HV_X86_RDX);
+ RRX(env, R_ESI) = rreg(cs->accel->fd, HV_X86_RSI);
+ RRX(env, R_EDI) = rreg(cs->accel->fd, HV_X86_RDI);
+ RRX(env, R_ESP) = rreg(cs->accel->fd, HV_X86_RSP);
+ RRX(env, R_EBP) = rreg(cs->accel->fd, HV_X86_RBP);
+ for (i = 8; i < 16; i++) {
+ RRX(env, i) = rreg(cs->accel->fd, HV_X86_RAX + i);
+ }
+
+ env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS);
+ rflags_to_lflags(env);
+ env->eip = rreg(cs->accel->fd, HV_X86_RIP);
+}
+
+void hvf_store_regs(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ int i = 0;
+ wreg(cs->accel->fd, HV_X86_RAX, RAX(env));
+ wreg(cs->accel->fd, HV_X86_RBX, RBX(env));
+ wreg(cs->accel->fd, HV_X86_RCX, RCX(env));
+ wreg(cs->accel->fd, HV_X86_RDX, RDX(env));
+ wreg(cs->accel->fd, HV_X86_RSI, RSI(env));
+ wreg(cs->accel->fd, HV_X86_RDI, RDI(env));
+ wreg(cs->accel->fd, HV_X86_RBP, RBP(env));
+ wreg(cs->accel->fd, HV_X86_RSP, RSP(env));
+ for (i = 8; i < 16; i++) {
+ wreg(cs->accel->fd, HV_X86_RAX + i, RRX(env, i));
+ }
+
+ lflags_to_rflags(env);
+ wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags);
+ macvm_set_rip(cs, env->eip);
+}
+
+void hvf_simulate_rdmsr(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ uint32_t msr = ECX(env);
+ uint64_t val = 0;
+
+ switch (msr) {
+ case MSR_IA32_TSC:
+ val = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET);
+ break;
+ case MSR_IA32_APICBASE:
+ val = cpu_get_apic_base(cpu->apic_state);
+ break;
+ case MSR_APIC_START ... MSR_APIC_END: {
+ int ret;
+ int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
+
+ ret = apic_msr_read(index, &val);
+ if (ret < 0) {
+ x86_emul_raise_exception(env, EXCP0D_GPF, 0);
+ }
+
+ break;
+ }
+ case MSR_IA32_UCODE_REV:
+ val = cpu->ucode_rev;
+ break;
+ case MSR_EFER:
+ val = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER);
+ break;
+ case MSR_FSBASE:
+ val = rvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE);
+ break;
+ case MSR_GSBASE:
+ val = rvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE);
+ break;
+ case MSR_KERNELGSBASE:
+ val = rvmcs(cs->accel->fd, VMCS_HOST_FS_BASE);
+ break;
+ case MSR_STAR:
+ abort();
+ break;
+ case MSR_LSTAR:
+ abort();
+ break;
+ case MSR_CSTAR:
+ abort();
+ break;
+ case MSR_IA32_MISC_ENABLE:
+ val = env->msr_ia32_misc_enable;
+ break;
+ case MSR_MTRRphysBase(0):
+ case MSR_MTRRphysBase(1):
+ case MSR_MTRRphysBase(2):
+ case MSR_MTRRphysBase(3):
+ case MSR_MTRRphysBase(4):
+ case MSR_MTRRphysBase(5):
+ case MSR_MTRRphysBase(6):
+ case MSR_MTRRphysBase(7):
+ val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base;
+ break;
+ case MSR_MTRRphysMask(0):
+ case MSR_MTRRphysMask(1):
+ case MSR_MTRRphysMask(2):
+ case MSR_MTRRphysMask(3):
+ case MSR_MTRRphysMask(4):
+ case MSR_MTRRphysMask(5):
+ case MSR_MTRRphysMask(6):
+ case MSR_MTRRphysMask(7):
+ val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask;
+ break;
+ case MSR_MTRRfix64K_00000:
+ val = env->mtrr_fixed[0];
+ break;
+ case MSR_MTRRfix16K_80000:
+ case MSR_MTRRfix16K_A0000:
+ val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1];
+ break;
+ case MSR_MTRRfix4K_C0000:
+ case MSR_MTRRfix4K_C8000:
+ case MSR_MTRRfix4K_D0000:
+ case MSR_MTRRfix4K_D8000:
+ case MSR_MTRRfix4K_E0000:
+ case MSR_MTRRfix4K_E8000:
+ case MSR_MTRRfix4K_F0000:
+ case MSR_MTRRfix4K_F8000:
+ val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3];
+ break;
+ case MSR_MTRRdefType:
+ val = env->mtrr_deftype;
+ break;
+ case MSR_CORE_THREAD_COUNT:
+ val = cpu_x86_get_msr_core_thread_count(cpu);
+ break;
+ default:
+ /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */
+ val = 0;
+ break;
+ }
+
+ RAX(env) = (uint32_t)val;
+ RDX(env) = (uint32_t)(val >> 32);
+}
+
+void hvf_simulate_wrmsr(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ uint32_t msr = ECX(env);
+ uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env);
+
+ switch (msr) {
+ case MSR_IA32_TSC:
+ break;
+ case MSR_IA32_APICBASE: {
+ int r;
+
+ r = cpu_set_apic_base(cpu->apic_state, data);
+ if (r < 0) {
+ x86_emul_raise_exception(env, EXCP0D_GPF, 0);
+ }
+
+ break;
+ }
+ case MSR_APIC_START ... MSR_APIC_END: {
+ int ret;
+ int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
+
+ ret = apic_msr_write(index, data);
+ if (ret < 0) {
+ x86_emul_raise_exception(env, EXCP0D_GPF, 0);
+ }
+
+ break;
+ }
+ case MSR_FSBASE:
+ wvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE, data);
+ break;
+ case MSR_GSBASE:
+ wvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE, data);
+ break;
+ case MSR_KERNELGSBASE:
+ wvmcs(cs->accel->fd, VMCS_HOST_FS_BASE, data);
+ break;
+ case MSR_STAR:
+ abort();
+ break;
+ case MSR_LSTAR:
+ abort();
+ break;
+ case MSR_CSTAR:
+ abort();
+ break;
+ case MSR_EFER:
+ /*printf("new efer %llx\n", EFER(cs));*/
+ wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, data);
+ if (data & MSR_EFER_NXE) {
+ hv_vcpu_invalidate_tlb(cs->accel->fd);
+ }
+ break;
+ case MSR_MTRRphysBase(0):
+ case MSR_MTRRphysBase(1):
+ case MSR_MTRRphysBase(2):
+ case MSR_MTRRphysBase(3):
+ case MSR_MTRRphysBase(4):
+ case MSR_MTRRphysBase(5):
+ case MSR_MTRRphysBase(6):
+ case MSR_MTRRphysBase(7):
+ env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data;
+ break;
+ case MSR_MTRRphysMask(0):
+ case MSR_MTRRphysMask(1):
+ case MSR_MTRRphysMask(2):
+ case MSR_MTRRphysMask(3):
+ case MSR_MTRRphysMask(4):
+ case MSR_MTRRphysMask(5):
+ case MSR_MTRRphysMask(6):
+ case MSR_MTRRphysMask(7):
+ env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data;
+ break;
+ case MSR_MTRRfix64K_00000:
+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data;
+ break;
+ case MSR_MTRRfix16K_80000:
+ case MSR_MTRRfix16K_A0000:
+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data;
+ break;
+ case MSR_MTRRfix4K_C0000:
+ case MSR_MTRRfix4K_C8000:
+ case MSR_MTRRfix4K_D0000:
+ case MSR_MTRRfix4K_D8000:
+ case MSR_MTRRfix4K_E0000:
+ case MSR_MTRRfix4K_E8000:
+ case MSR_MTRRfix4K_F0000:
+ case MSR_MTRRfix4K_F8000:
+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data;
+ break;
+ case MSR_MTRRdefType:
+ env->mtrr_deftype = data;
+ break;
+ default:
+ break;
+ }
+
+ /* Related to support known hypervisor interface */
+ /* if (g_hypervisor_iface)
+ g_hypervisor_iface->wrmsr_handler(cs, msr, data);
+
+ printf("write msr %llx\n", RCX(cs));*/
+}
+
int hvf_vcpu_exec(CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
@@ -510,10 +804,10 @@ int hvf_vcpu_exec(CPUState *cpu)
if (ept_emulation_fault(slot, gpa, exit_qual)) {
struct x86_decode decode;
- load_regs(cpu);
+ hvf_load_regs(cpu);
decode_instruction(env, &decode);
exec_instruction(env, &decode);
- store_regs(cpu);
+ hvf_store_regs(cpu);
break;
}
break;
@@ -528,8 +822,8 @@ int hvf_vcpu_exec(CPUState *cpu)
if (!string && in) {
uint64_t val = 0;
- load_regs(cpu);
- hvf_handle_io(env, port, &val, 0, size, 1);
+ hvf_load_regs(cpu);
+ hvf_handle_io(env_cpu(env), port, &val, 0, size, 1);
if (size == 1) {
AL(env) = val;
} else if (size == 2) {
@@ -540,21 +834,21 @@ int hvf_vcpu_exec(CPUState *cpu)
RAX(env) = (uint64_t)val;
}
env->eip += ins_len;
- store_regs(cpu);
+ hvf_store_regs(cpu);
break;
} else if (!string && !in) {
RAX(env) = rreg(cpu->accel->fd, HV_X86_RAX);
- hvf_handle_io(env, port, &RAX(env), 1, size, 1);
+ hvf_handle_io(env_cpu(env), port, &RAX(env), 1, size, 1);
macvm_set_rip(cpu, rip + ins_len);
break;
}
struct x86_decode decode;
- load_regs(cpu);
+ hvf_load_regs(cpu);
decode_instruction(env, &decode);
assert(ins_len == decode.len);
exec_instruction(env, &decode);
- store_regs(cpu);
+ hvf_store_regs(cpu);
break;
}
@@ -579,8 +873,6 @@ int hvf_vcpu_exec(CPUState *cpu)
break;
}
case EXIT_REASON_XSETBV: {
- X86CPU *x86_cpu = X86_CPU(cpu);
- CPUX86State *env = &x86_cpu->env;
uint32_t eax = (uint32_t)rreg(cpu->accel->fd, HV_X86_RAX);
uint32_t ecx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RCX);
uint32_t edx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RDX);
@@ -609,21 +901,21 @@ int hvf_vcpu_exec(CPUState *cpu)
case EXIT_REASON_RDMSR:
case EXIT_REASON_WRMSR:
{
- load_regs(cpu);
+ hvf_load_regs(cpu);
if (exit_reason == EXIT_REASON_RDMSR) {
- simulate_rdmsr(env);
+ hvf_simulate_rdmsr(cpu);
} else {
- simulate_wrmsr(env);
+ hvf_simulate_wrmsr(cpu);
}
env->eip += ins_len;
- store_regs(cpu);
+ hvf_store_regs(cpu);
break;
}
case EXIT_REASON_CR_ACCESS: {
int cr;
int reg;
- load_regs(cpu);
+ hvf_load_regs(cpu);
cr = exit_qual & 15;
reg = (exit_qual >> 8) & 15;
@@ -637,7 +929,6 @@ int hvf_vcpu_exec(CPUState *cpu)
break;
}
case 8: {
- X86CPU *x86_cpu = X86_CPU(cpu);
if (exit_qual & 0x10) {
RRX(env, reg) = cpu_get_apic_tpr(x86_cpu->apic_state);
} else {
@@ -652,16 +943,16 @@ int hvf_vcpu_exec(CPUState *cpu)
abort();
}
env->eip += ins_len;
- store_regs(cpu);
+ hvf_store_regs(cpu);
break;
}
case EXIT_REASON_APIC_ACCESS: { /* TODO */
struct x86_decode decode;
- load_regs(cpu);
+ hvf_load_regs(cpu);
decode_instruction(env, &decode);
exec_instruction(env, &decode);
- store_regs(cpu);
+ hvf_store_regs(cpu);
break;
}
case EXIT_REASON_TPR: {
@@ -670,7 +961,7 @@ int hvf_vcpu_exec(CPUState *cpu)
}
case EXIT_REASON_TASK_SWITCH: {
uint64_t vinfo = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO);
- x68_segment_selector sel = {.sel = exit_qual & 0xffff};
+ x86_segment_selector sel = {.sel = exit_qual & 0xffff};
vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
& VMCS_INTR_T_MASK);
diff --git a/target/i386/hvf/meson.build b/target/i386/hvf/meson.build
index 05c3c8c..519d190 100644
--- a/target/i386/hvf/meson.build
+++ b/target/i386/hvf/meson.build
@@ -2,10 +2,7 @@ i386_system_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files(
'hvf.c',
'x86.c',
'x86_cpuid.c',
- 'x86_decode.c',
'x86_descr.c',
- 'x86_emu.c',
- 'x86_flags.c',
'x86_mmu.c',
'x86_task.c',
'x86hvf.c',
diff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h
index 3954ef8..26d6029 100644
--- a/target/i386/hvf/vmx.h
+++ b/target/i386/hvf/vmx.h
@@ -29,11 +29,12 @@
#include <Hypervisor/hv_vmx.h>
#include "vmcs.h"
#include "cpu.h"
-#include "x86.h"
-#include "sysemu/hvf.h"
-#include "sysemu/hvf_int.h"
+#include "emulate/x86.h"
+#include "system/hvf.h"
+#include "system/hvf_int.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
+#include "system/memory.h"
static inline uint64_t rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg)
{
diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c
index 80e3613..5c75ec9 100644
--- a/target/i386/hvf/x86.c
+++ b/target/i386/hvf/x86.c
@@ -19,8 +19,8 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "x86_decode.h"
-#include "x86_emu.h"
+#include "emulate/x86_decode.h"
+#include "emulate/x86_emu.h"
#include "vmcs.h"
#include "vmx.h"
#include "x86_mmu.h"
@@ -48,7 +48,7 @@
bool x86_read_segment_descriptor(CPUState *cpu,
struct x86_segment_descriptor *desc,
- x68_segment_selector sel)
+ x86_segment_selector sel)
{
target_ulong base;
uint32_t limit;
@@ -78,7 +78,7 @@ bool x86_read_segment_descriptor(CPUState *cpu,
bool x86_write_segment_descriptor(CPUState *cpu,
struct x86_segment_descriptor *desc,
- x68_segment_selector sel)
+ x86_segment_selector sel)
{
target_ulong base;
uint32_t limit;
diff --git a/target/i386/hvf/x86.h b/target/i386/hvf/x86.h
deleted file mode 100644
index 3570f29..0000000
--- a/target/i386/hvf/x86.h
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * Copyright (C) 2016 Veertu Inc,
- * Copyright (C) 2017 Veertu Inc,
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef HVF_X86_H
-#define HVF_X86_H
-
-typedef struct x86_register {
- union {
- struct {
- uint64_t rrx; /* full 64 bit */
- };
- struct {
- uint32_t erx; /* low 32 bit part */
- uint32_t hi32_unused1;
- };
- struct {
- uint16_t rx; /* low 16 bit part */
- uint16_t hi16_unused1;
- uint32_t hi32_unused2;
- };
- struct {
- uint8_t lx; /* low 8 bit part */
- uint8_t hx; /* high 8 bit */
- uint16_t hi16_unused2;
- uint32_t hi32_unused3;
- };
- };
-} __attribute__ ((__packed__)) x86_register;
-
-/* 16 bit Task State Segment */
-typedef struct x86_tss_segment16 {
- uint16_t link;
- uint16_t sp0;
- uint16_t ss0;
- uint32_t sp1;
- uint16_t ss1;
- uint32_t sp2;
- uint16_t ss2;
- uint16_t ip;
- uint16_t flags;
- uint16_t ax;
- uint16_t cx;
- uint16_t dx;
- uint16_t bx;
- uint16_t sp;
- uint16_t bp;
- uint16_t si;
- uint16_t di;
- uint16_t es;
- uint16_t cs;
- uint16_t ss;
- uint16_t ds;
- uint16_t ldtr;
-} __attribute__((packed)) x86_tss_segment16;
-
-/* 32 bit Task State Segment */
-typedef struct x86_tss_segment32 {
- uint32_t prev_tss;
- uint32_t esp0;
- uint32_t ss0;
- uint32_t esp1;
- uint32_t ss1;
- uint32_t esp2;
- uint32_t ss2;
- uint32_t cr3;
- uint32_t eip;
- uint32_t eflags;
- uint32_t eax;
- uint32_t ecx;
- uint32_t edx;
- uint32_t ebx;
- uint32_t esp;
- uint32_t ebp;
- uint32_t esi;
- uint32_t edi;
- uint32_t es;
- uint32_t cs;
- uint32_t ss;
- uint32_t ds;
- uint32_t fs;
- uint32_t gs;
- uint32_t ldt;
- uint16_t trap;
- uint16_t iomap_base;
-} __attribute__ ((__packed__)) x86_tss_segment32;
-
-/* 64 bit Task State Segment */
-typedef struct x86_tss_segment64 {
- uint32_t unused;
- uint64_t rsp0;
- uint64_t rsp1;
- uint64_t rsp2;
- uint64_t unused1;
- uint64_t ist1;
- uint64_t ist2;
- uint64_t ist3;
- uint64_t ist4;
- uint64_t ist5;
- uint64_t ist6;
- uint64_t ist7;
- uint64_t unused2;
- uint16_t unused3;
- uint16_t iomap_base;
-} __attribute__ ((__packed__)) x86_tss_segment64;
-
-/* segment descriptors */
-typedef struct x86_segment_descriptor {
- uint64_t limit0:16;
- uint64_t base0:16;
- uint64_t base1:8;
- uint64_t type:4;
- uint64_t s:1;
- uint64_t dpl:2;
- uint64_t p:1;
- uint64_t limit1:4;
- uint64_t avl:1;
- uint64_t l:1;
- uint64_t db:1;
- uint64_t g:1;
- uint64_t base2:8;
-} __attribute__ ((__packed__)) x86_segment_descriptor;
-
-static inline uint32_t x86_segment_base(x86_segment_descriptor *desc)
-{
- return (uint32_t)((desc->base2 << 24) | (desc->base1 << 16) | desc->base0);
-}
-
-static inline void x86_set_segment_base(x86_segment_descriptor *desc,
- uint32_t base)
-{
- desc->base2 = base >> 24;
- desc->base1 = (base >> 16) & 0xff;
- desc->base0 = base & 0xffff;
-}
-
-static inline uint32_t x86_segment_limit(x86_segment_descriptor *desc)
-{
- uint32_t limit = (uint32_t)((desc->limit1 << 16) | desc->limit0);
- if (desc->g) {
- return (limit << 12) | 0xfff;
- }
- return limit;
-}
-
-static inline void x86_set_segment_limit(x86_segment_descriptor *desc,
- uint32_t limit)
-{
- desc->limit0 = limit & 0xffff;
- desc->limit1 = limit >> 16;
-}
-
-typedef struct x86_call_gate {
- uint64_t offset0:16;
- uint64_t selector:16;
- uint64_t param_count:4;
- uint64_t reserved:3;
- uint64_t type:4;
- uint64_t dpl:1;
- uint64_t p:1;
- uint64_t offset1:16;
-} __attribute__ ((__packed__)) x86_call_gate;
-
-static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)
-{
- return (uint32_t)((gate->offset1 << 16) | gate->offset0);
-}
-
-#define GDT_SEL 0
-#define LDT_SEL 1
-
-typedef struct x68_segment_selector {
- union {
- uint16_t sel;
- struct {
- uint16_t rpl:2;
- uint16_t ti:1;
- uint16_t index:13;
- };
- };
-} __attribute__ ((__packed__)) x68_segment_selector;
-
-/* useful register access macros */
-#define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg])
-
-#define RRX(cpu, reg) (x86_reg(cpu, reg)->rrx)
-#define RAX(cpu) RRX(cpu, R_EAX)
-#define RCX(cpu) RRX(cpu, R_ECX)
-#define RDX(cpu) RRX(cpu, R_EDX)
-#define RBX(cpu) RRX(cpu, R_EBX)
-#define RSP(cpu) RRX(cpu, R_ESP)
-#define RBP(cpu) RRX(cpu, R_EBP)
-#define RSI(cpu) RRX(cpu, R_ESI)
-#define RDI(cpu) RRX(cpu, R_EDI)
-#define R8(cpu) RRX(cpu, R_R8)
-#define R9(cpu) RRX(cpu, R_R9)
-#define R10(cpu) RRX(cpu, R_R10)
-#define R11(cpu) RRX(cpu, R_R11)
-#define R12(cpu) RRX(cpu, R_R12)
-#define R13(cpu) RRX(cpu, R_R13)
-#define R14(cpu) RRX(cpu, R_R14)
-#define R15(cpu) RRX(cpu, R_R15)
-
-#define ERX(cpu, reg) (x86_reg(cpu, reg)->erx)
-#define EAX(cpu) ERX(cpu, R_EAX)
-#define ECX(cpu) ERX(cpu, R_ECX)
-#define EDX(cpu) ERX(cpu, R_EDX)
-#define EBX(cpu) ERX(cpu, R_EBX)
-#define ESP(cpu) ERX(cpu, R_ESP)
-#define EBP(cpu) ERX(cpu, R_EBP)
-#define ESI(cpu) ERX(cpu, R_ESI)
-#define EDI(cpu) ERX(cpu, R_EDI)
-
-#define RX(cpu, reg) (x86_reg(cpu, reg)->rx)
-#define AX(cpu) RX(cpu, R_EAX)
-#define CX(cpu) RX(cpu, R_ECX)
-#define DX(cpu) RX(cpu, R_EDX)
-#define BP(cpu) RX(cpu, R_EBP)
-#define SP(cpu) RX(cpu, R_ESP)
-#define BX(cpu) RX(cpu, R_EBX)
-#define SI(cpu) RX(cpu, R_ESI)
-#define DI(cpu) RX(cpu, R_EDI)
-
-#define RL(cpu, reg) (x86_reg(cpu, reg)->lx)
-#define AL(cpu) RL(cpu, R_EAX)
-#define CL(cpu) RL(cpu, R_ECX)
-#define DL(cpu) RL(cpu, R_EDX)
-#define BL(cpu) RL(cpu, R_EBX)
-
-#define RH(cpu, reg) (x86_reg(cpu, reg)->hx)
-#define AH(cpu) RH(cpu, R_EAX)
-#define CH(cpu) RH(cpu, R_ECX)
-#define DH(cpu) RH(cpu, R_EDX)
-#define BH(cpu) RH(cpu, R_EBX)
-
-/* deal with GDT/LDT descriptors in memory */
-bool x86_read_segment_descriptor(CPUState *cpu,
- struct x86_segment_descriptor *desc,
- x68_segment_selector sel);
-bool x86_write_segment_descriptor(CPUState *cpu,
- struct x86_segment_descriptor *desc,
- x68_segment_selector sel);
-
-bool x86_read_call_gate(CPUState *cpu, struct x86_call_gate *idt_desc,
- int gate);
-
-/* helpers */
-bool x86_is_protected(CPUState *cpu);
-bool x86_is_real(CPUState *cpu);
-bool x86_is_v8086(CPUState *cpu);
-bool x86_is_long_mode(CPUState *cpu);
-bool x86_is_long64_mode(CPUState *cpu);
-bool x86_is_paging_mode(CPUState *cpu);
-bool x86_is_pae_enabled(CPUState *cpu);
-
-enum X86Seg;
-target_ulong linear_addr(CPUState *cpu, target_ulong addr, enum X86Seg seg);
-target_ulong linear_addr_size(CPUState *cpu, target_ulong addr, int size,
- enum X86Seg seg);
-target_ulong linear_rip(CPUState *cpu, target_ulong rip);
-
-static inline uint64_t rdtscp(void)
-{
- uint64_t tsc;
- __asm__ __volatile__("rdtscp; " /* serializing read of tsc */
- "shl $32,%%rdx; " /* shift higher 32 bits stored in rdx up */
- "or %%rdx,%%rax" /* and or onto rax */
- : "=a"(tsc) /* output to tsc variable */
- :
- : "%rcx", "%rdx"); /* rcx and rdx are clobbered */
-
- return tsc;
-}
-
-#endif
diff --git a/target/i386/hvf/x86_cpuid.c b/target/i386/hvf/x86_cpuid.c
index e56cd84..0798a0c 100644
--- a/target/i386/hvf/x86_cpuid.c
+++ b/target/i386/hvf/x86_cpuid.c
@@ -21,28 +21,38 @@
*/
#include "qemu/osdep.h"
+#include "qemu/cpuid.h"
+#include "host/cpuinfo.h"
#include "cpu.h"
-#include "x86.h"
+#include "emulate/x86.h"
#include "vmx.h"
-#include "sysemu/hvf.h"
+#include "system/hvf.h"
#include "hvf-i386.h"
-static bool xgetbv(uint32_t cpuid_ecx, uint32_t idx, uint64_t *xcr)
+static bool cached_xcr0;
+static uint64_t supported_xcr0;
+
+static void cache_host_xcr0(void)
{
- uint32_t xcrl, xcrh;
+ if (cached_xcr0) {
+ return;
+ }
- if (cpuid_ecx & CPUID_EXT_OSXSAVE) {
- /*
- * The xgetbv instruction is not available to older versions of
- * the assembler, so we encode the instruction manually.
- */
- asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl), "=d" (xcrh) : "c" (idx));
+ if (cpuinfo & CPUINFO_OSXSAVE) {
+ uint64_t host_xcr0 = xgetbv_low(0);
- *xcr = (((uint64_t)xcrh) << 32) | xcrl;
- return true;
+ /* Only show xcr0 bits corresponding to usable features. */
+ supported_xcr0 = host_xcr0 & (XSTATE_FP_MASK |
+ XSTATE_SSE_MASK | XSTATE_YMM_MASK |
+ XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK |
+ XSTATE_Hi16_ZMM_MASK);
+ if ((supported_xcr0 & (XSTATE_FP_MASK | XSTATE_SSE_MASK)) !=
+ (XSTATE_FP_MASK | XSTATE_SSE_MASK)) {
+ supported_xcr0 = 0;
+ }
}
- return false;
+ cached_xcr0 = true;
}
uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
@@ -51,6 +61,7 @@ uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
uint64_t cap;
uint32_t eax, ebx, ecx, edx;
+ cache_host_xcr0();
host_cpuid(func, idx, &eax, &ebx, &ecx, &edx);
switch (func) {
@@ -62,11 +73,12 @@ uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX |
- CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS;
+ CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_HT;
ecx &= CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 |
CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID |
CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_MOVBE |
- CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_XSAVE |
+ CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_X2APIC |
+ (supported_xcr0 ? CPUID_EXT_XSAVE : 0) |
CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND;
ecx |= CPUID_EXT_HYPERVISOR;
break;
@@ -107,16 +119,14 @@ uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
eax = 0;
break;
case 0xD:
+ if (!supported_xcr0 || idx >= 63 ||
+ (idx > 1 && !(supported_xcr0 & (UINT64_C(1) << idx)))) {
+ eax = ebx = ecx = edx = 0;
+ break;
+ }
+
if (idx == 0) {
- uint64_t host_xcr0;
- if (xgetbv(ecx, 0, &host_xcr0)) {
- uint64_t supp_xcr0 = host_xcr0 & (XSTATE_FP_MASK |
- XSTATE_SSE_MASK | XSTATE_YMM_MASK |
- XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
- XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK |
- XSTATE_Hi16_ZMM_MASK);
- eax &= supp_xcr0;
- }
+ eax = supported_xcr0;
} else if (idx == 1) {
hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap);
eax &= CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1;
diff --git a/target/i386/hvf/x86_decode.c b/target/i386/hvf/x86_decode.c
deleted file mode 100644
index a4a28f1..0000000
--- a/target/i386/hvf/x86_decode.c
+++ /dev/null
@@ -1,2196 +0,0 @@
-/*
- * Copyright (C) 2016 Veertu Inc,
- * Copyright (C) 2017 Google Inc,
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-
-#include "panic.h"
-#include "x86_decode.h"
-#include "vmx.h"
-#include "x86_mmu.h"
-#include "x86_descr.h"
-
-#define OPCODE_ESCAPE 0xf
-
-static void decode_invalid(CPUX86State *env, struct x86_decode *decode)
-{
- printf("%llx: failed to decode instruction ", env->eip);
- for (int i = 0; i < decode->opcode_len; i++) {
- printf("%x ", decode->opcode[i]);
- }
- printf("\n");
- VM_PANIC("decoder failed\n");
-}
-
-uint64_t sign(uint64_t val, int size)
-{
- switch (size) {
- case 1:
- val = (int8_t)val;
- break;
- case 2:
- val = (int16_t)val;
- break;
- case 4:
- val = (int32_t)val;
- break;
- case 8:
- val = (int64_t)val;
- break;
- default:
- VM_PANIC_EX("%s invalid size %d\n", __func__, size);
- break;
- }
- return val;
-}
-
-static inline uint64_t decode_bytes(CPUX86State *env, struct x86_decode *decode,
- int size)
-{
- target_ulong val = 0;
-
- switch (size) {
- case 1:
- case 2:
- case 4:
- case 8:
- break;
- default:
- VM_PANIC_EX("%s invalid size %d\n", __func__, size);
- break;
- }
- target_ulong va = linear_rip(env_cpu(env), env->eip) + decode->len;
- vmx_read_mem(env_cpu(env), &val, va, size);
- decode->len += size;
-
- return val;
-}
-
-static inline uint8_t decode_byte(CPUX86State *env, struct x86_decode *decode)
-{
- return (uint8_t)decode_bytes(env, decode, 1);
-}
-
-static inline uint16_t decode_word(CPUX86State *env, struct x86_decode *decode)
-{
- return (uint16_t)decode_bytes(env, decode, 2);
-}
-
-static inline uint32_t decode_dword(CPUX86State *env, struct x86_decode *decode)
-{
- return (uint32_t)decode_bytes(env, decode, 4);
-}
-
-static inline uint64_t decode_qword(CPUX86State *env, struct x86_decode *decode)
-{
- return decode_bytes(env, decode, 8);
-}
-
-static void decode_modrm_rm(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- op->type = X86_VAR_RM;
-}
-
-static void decode_modrm_reg(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- op->type = X86_VAR_REG;
- op->reg = decode->modrm.reg;
- op->ptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.r,
- decode->operand_size);
-}
-
-static void decode_rax(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- op->type = X86_VAR_REG;
- op->reg = R_EAX;
- /* Since reg is always AX, REX prefix has no impact. */
- op->ptr = get_reg_ref(env, op->reg, false, 0,
- decode->operand_size);
-}
-
-static inline void decode_immediate(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *var, int size)
-{
- var->type = X86_VAR_IMMEDIATE;
- var->size = size;
- switch (size) {
- case 1:
- var->val = decode_byte(env, decode);
- break;
- case 2:
- var->val = decode_word(env, decode);
- break;
- case 4:
- var->val = decode_dword(env, decode);
- break;
- case 8:
- var->val = decode_qword(env, decode);
- break;
- default:
- VM_PANIC_EX("bad size %d\n", size);
- }
-}
-
-static void decode_imm8(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- decode_immediate(env, decode, op, 1);
- op->type = X86_VAR_IMMEDIATE;
-}
-
-static void decode_imm8_signed(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- decode_immediate(env, decode, op, 1);
- op->val = sign(op->val, 1);
- op->type = X86_VAR_IMMEDIATE;
-}
-
-static void decode_imm16(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- decode_immediate(env, decode, op, 2);
- op->type = X86_VAR_IMMEDIATE;
-}
-
-
-static void decode_imm(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- if (8 == decode->operand_size) {
- decode_immediate(env, decode, op, 4);
- op->val = sign(op->val, decode->operand_size);
- } else {
- decode_immediate(env, decode, op, decode->operand_size);
- }
- op->type = X86_VAR_IMMEDIATE;
-}
-
-static void decode_imm_signed(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- decode_immediate(env, decode, op, decode->operand_size);
- op->val = sign(op->val, decode->operand_size);
- op->type = X86_VAR_IMMEDIATE;
-}
-
-static void decode_imm_1(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- op->type = X86_VAR_IMMEDIATE;
- op->val = 1;
-}
-
-static void decode_imm_0(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- op->type = X86_VAR_IMMEDIATE;
- op->val = 0;
-}
-
-
-static void decode_pushseg(CPUX86State *env, struct x86_decode *decode)
-{
- uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0];
-
- decode->op[0].type = X86_VAR_REG;
- switch (op) {
- case 0xe:
- decode->op[0].reg = R_CS;
- break;
- case 0x16:
- decode->op[0].reg = R_SS;
- break;
- case 0x1e:
- decode->op[0].reg = R_DS;
- break;
- case 0x06:
- decode->op[0].reg = R_ES;
- break;
- case 0xa0:
- decode->op[0].reg = R_FS;
- break;
- case 0xa8:
- decode->op[0].reg = R_GS;
- break;
- }
-}
-
-static void decode_popseg(CPUX86State *env, struct x86_decode *decode)
-{
- uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0];
-
- decode->op[0].type = X86_VAR_REG;
- switch (op) {
- case 0xf:
- decode->op[0].reg = R_CS;
- break;
- case 0x17:
- decode->op[0].reg = R_SS;
- break;
- case 0x1f:
- decode->op[0].reg = R_DS;
- break;
- case 0x07:
- decode->op[0].reg = R_ES;
- break;
- case 0xa1:
- decode->op[0].reg = R_FS;
- break;
- case 0xa9:
- decode->op[0].reg = R_GS;
- break;
- }
-}
-
-static void decode_incgroup(CPUX86State *env, struct x86_decode *decode)
-{
- decode->op[0].type = X86_VAR_REG;
- decode->op[0].reg = decode->opcode[0] - 0x40;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
-}
-
-static void decode_decgroup(CPUX86State *env, struct x86_decode *decode)
-{
- decode->op[0].type = X86_VAR_REG;
- decode->op[0].reg = decode->opcode[0] - 0x48;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
-}
-
-static void decode_incgroup2(CPUX86State *env, struct x86_decode *decode)
-{
- if (!decode->modrm.reg) {
- decode->cmd = X86_DECODE_CMD_INC;
- } else if (1 == decode->modrm.reg) {
- decode->cmd = X86_DECODE_CMD_DEC;
- }
-}
-
-static void decode_pushgroup(CPUX86State *env, struct x86_decode *decode)
-{
- decode->op[0].type = X86_VAR_REG;
- decode->op[0].reg = decode->opcode[0] - 0x50;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
-}
-
-static void decode_popgroup(CPUX86State *env, struct x86_decode *decode)
-{
- decode->op[0].type = X86_VAR_REG;
- decode->op[0].reg = decode->opcode[0] - 0x58;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
-}
-
-static void decode_jxx(CPUX86State *env, struct x86_decode *decode)
-{
- decode->displacement = decode_bytes(env, decode, decode->operand_size);
- decode->displacement_size = decode->operand_size;
-}
-
-static void decode_farjmp(CPUX86State *env, struct x86_decode *decode)
-{
- decode->op[0].type = X86_VAR_IMMEDIATE;
- decode->op[0].val = decode_bytes(env, decode, decode->operand_size);
- decode->displacement = decode_word(env, decode);
-}
-
-static void decode_addgroup(CPUX86State *env, struct x86_decode *decode)
-{
- enum x86_decode_cmd group[] = {
- X86_DECODE_CMD_ADD,
- X86_DECODE_CMD_OR,
- X86_DECODE_CMD_ADC,
- X86_DECODE_CMD_SBB,
- X86_DECODE_CMD_AND,
- X86_DECODE_CMD_SUB,
- X86_DECODE_CMD_XOR,
- X86_DECODE_CMD_CMP
- };
- decode->cmd = group[decode->modrm.reg];
-}
-
-static void decode_rotgroup(CPUX86State *env, struct x86_decode *decode)
-{
- enum x86_decode_cmd group[] = {
- X86_DECODE_CMD_ROL,
- X86_DECODE_CMD_ROR,
- X86_DECODE_CMD_RCL,
- X86_DECODE_CMD_RCR,
- X86_DECODE_CMD_SHL,
- X86_DECODE_CMD_SHR,
- X86_DECODE_CMD_SHL,
- X86_DECODE_CMD_SAR
- };
- decode->cmd = group[decode->modrm.reg];
-}
-
-static void decode_f7group(CPUX86State *env, struct x86_decode *decode)
-{
- enum x86_decode_cmd group[] = {
- X86_DECODE_CMD_TST,
- X86_DECODE_CMD_TST,
- X86_DECODE_CMD_NOT,
- X86_DECODE_CMD_NEG,
- X86_DECODE_CMD_MUL,
- X86_DECODE_CMD_IMUL_1,
- X86_DECODE_CMD_DIV,
- X86_DECODE_CMD_IDIV
- };
- decode->cmd = group[decode->modrm.reg];
- decode_modrm_rm(env, decode, &decode->op[0]);
-
- switch (decode->modrm.reg) {
- case 0:
- case 1:
- decode_imm(env, decode, &decode->op[1]);
- break;
- case 2:
- break;
- case 3:
- decode->op[1].type = X86_VAR_IMMEDIATE;
- decode->op[1].val = 0;
- break;
- default:
- break;
- }
-}
-
-static void decode_xchgroup(CPUX86State *env, struct x86_decode *decode)
-{
- decode->op[0].type = X86_VAR_REG;
- decode->op[0].reg = decode->opcode[0] - 0x90;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
-}
-
-static void decode_movgroup(CPUX86State *env, struct x86_decode *decode)
-{
- decode->op[0].type = X86_VAR_REG;
- decode->op[0].reg = decode->opcode[0] - 0xb8;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
- decode_immediate(env, decode, &decode->op[1], decode->operand_size);
-}
-
-static void fetch_moffs(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- op->type = X86_VAR_OFFSET;
- op->ptr = decode_bytes(env, decode, decode->addressing_size);
-}
-
-static void decode_movgroup8(CPUX86State *env, struct x86_decode *decode)
-{
- decode->op[0].type = X86_VAR_REG;
- decode->op[0].reg = decode->opcode[0] - 0xb0;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
- decode_immediate(env, decode, &decode->op[1], decode->operand_size);
-}
-
-static void decode_rcx(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- op->type = X86_VAR_REG;
- op->reg = R_ECX;
- op->ptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.b,
- decode->operand_size);
-}
-
-struct decode_tbl {
- uint8_t opcode;
- enum x86_decode_cmd cmd;
- uint8_t operand_size;
- bool is_modrm;
- void (*decode_op1)(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op1);
- void (*decode_op2)(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op2);
- void (*decode_op3)(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op3);
- void (*decode_op4)(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op4);
- void (*decode_postfix)(CPUX86State *env, struct x86_decode *decode);
- uint32_t flags_mask;
-};
-
-struct decode_x87_tbl {
- uint8_t opcode;
- uint8_t modrm_reg;
- uint8_t modrm_mod;
- enum x86_decode_cmd cmd;
- uint8_t operand_size;
- bool rev;
- bool pop;
- void (*decode_op1)(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op1);
- void (*decode_op2)(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op2);
- void (*decode_postfix)(CPUX86State *env, struct x86_decode *decode);
- uint32_t flags_mask;
-};
-
-struct decode_tbl invl_inst = {0x0, 0, 0, false, NULL, NULL, NULL, NULL,
- decode_invalid};
-
-struct decode_tbl _decode_tbl1[256];
-struct decode_tbl _decode_tbl2[256];
-struct decode_x87_tbl _decode_tbl3[256];
-
-static void decode_x87_ins(CPUX86State *env, struct x86_decode *decode)
-{
- struct decode_x87_tbl *decoder;
-
- decode->is_fpu = true;
- int mode = decode->modrm.mod == 3 ? 1 : 0;
- int index = ((decode->opcode[0] & 0xf) << 4) | (mode << 3) |
- decode->modrm.reg;
-
- decoder = &_decode_tbl3[index];
-
- decode->cmd = decoder->cmd;
- if (decoder->operand_size) {
- decode->operand_size = decoder->operand_size;
- }
- decode->flags_mask = decoder->flags_mask;
- decode->fpop_stack = decoder->pop;
- decode->frev = decoder->rev;
-
- if (decoder->decode_op1) {
- decoder->decode_op1(env, decode, &decode->op[0]);
- }
- if (decoder->decode_op2) {
- decoder->decode_op2(env, decode, &decode->op[1]);
- }
- if (decoder->decode_postfix) {
- decoder->decode_postfix(env, decode);
- }
-
- VM_PANIC_ON_EX(!decode->cmd, "x87 opcode %x %x (%x %x) not decoded\n",
- decode->opcode[0], decode->modrm.modrm, decoder->modrm_reg,
- decoder->modrm_mod);
-}
-
-static void decode_ffgroup(CPUX86State *env, struct x86_decode *decode)
-{
- enum x86_decode_cmd group[] = {
- X86_DECODE_CMD_INC,
- X86_DECODE_CMD_DEC,
- X86_DECODE_CMD_CALL_NEAR_ABS_INDIRECT,
- X86_DECODE_CMD_CALL_FAR_ABS_INDIRECT,
- X86_DECODE_CMD_JMP_NEAR_ABS_INDIRECT,
- X86_DECODE_CMD_JMP_FAR_ABS_INDIRECT,
- X86_DECODE_CMD_PUSH,
- X86_DECODE_CMD_INVL,
- X86_DECODE_CMD_INVL
- };
- decode->cmd = group[decode->modrm.reg];
- if (decode->modrm.reg > 2) {
- decode->flags_mask = 0;
- }
-}
-
-static void decode_sldtgroup(CPUX86State *env, struct x86_decode *decode)
-{
-
- enum x86_decode_cmd group[] = {
- X86_DECODE_CMD_SLDT,
- X86_DECODE_CMD_STR,
- X86_DECODE_CMD_LLDT,
- X86_DECODE_CMD_LTR,
- X86_DECODE_CMD_VERR,
- X86_DECODE_CMD_VERW,
- X86_DECODE_CMD_INVL,
- X86_DECODE_CMD_INVL
- };
- decode->cmd = group[decode->modrm.reg];
-}
-
-static void decode_lidtgroup(CPUX86State *env, struct x86_decode *decode)
-{
- enum x86_decode_cmd group[] = {
- X86_DECODE_CMD_SGDT,
- X86_DECODE_CMD_SIDT,
- X86_DECODE_CMD_LGDT,
- X86_DECODE_CMD_LIDT,
- X86_DECODE_CMD_SMSW,
- X86_DECODE_CMD_LMSW,
- X86_DECODE_CMD_LMSW,
- X86_DECODE_CMD_INVLPG
- };
- decode->cmd = group[decode->modrm.reg];
- if (0xf9 == decode->modrm.modrm) {
- decode->opcode[decode->len++] = decode->modrm.modrm;
- decode->cmd = X86_DECODE_CMD_RDTSCP;
- }
-}
-
-static void decode_btgroup(CPUX86State *env, struct x86_decode *decode)
-{
- enum x86_decode_cmd group[] = {
- X86_DECODE_CMD_INVL,
- X86_DECODE_CMD_INVL,
- X86_DECODE_CMD_INVL,
- X86_DECODE_CMD_INVL,
- X86_DECODE_CMD_BT,
- X86_DECODE_CMD_BTS,
- X86_DECODE_CMD_BTR,
- X86_DECODE_CMD_BTC
- };
- decode->cmd = group[decode->modrm.reg];
-}
-
-static void decode_x87_general(CPUX86State *env, struct x86_decode *decode)
-{
- decode->is_fpu = true;
-}
-
-static void decode_x87_modrm_floatp(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- op->type = X87_VAR_FLOATP;
-}
-
-static void decode_x87_modrm_intp(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- op->type = X87_VAR_INTP;
-}
-
-static void decode_x87_modrm_bytep(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- op->type = X87_VAR_BYTEP;
-}
-
-static void decode_x87_modrm_st0(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- op->type = X87_VAR_REG;
- op->reg = 0;
-}
-
-static void decode_decode_x87_modrm_st0(CPUX86State *env,
- struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- op->type = X87_VAR_REG;
- op->reg = decode->modrm.modrm & 7;
-}
-
-
-static void decode_aegroup(CPUX86State *env, struct x86_decode *decode)
-{
- decode->is_fpu = true;
- switch (decode->modrm.reg) {
- case 0:
- decode->cmd = X86_DECODE_CMD_FXSAVE;
- decode_x87_modrm_bytep(env, decode, &decode->op[0]);
- break;
- case 1:
- decode_x87_modrm_bytep(env, decode, &decode->op[0]);
- decode->cmd = X86_DECODE_CMD_FXRSTOR;
- break;
- case 5:
- if (decode->modrm.modrm == 0xe8) {
- decode->cmd = X86_DECODE_CMD_LFENCE;
- } else {
- VM_PANIC("xrstor");
- }
- break;
- case 6:
- VM_PANIC_ON(decode->modrm.modrm != 0xf0);
- decode->cmd = X86_DECODE_CMD_MFENCE;
- break;
- case 7:
- if (decode->modrm.modrm == 0xf8) {
- decode->cmd = X86_DECODE_CMD_SFENCE;
- } else {
- decode->cmd = X86_DECODE_CMD_CLFLUSH;
- }
- break;
- default:
- VM_PANIC_EX("0xae: reg %d\n", decode->modrm.reg);
- break;
- }
-}
-
-static void decode_bswap(CPUX86State *env, struct x86_decode *decode)
-{
- decode->op[0].type = X86_VAR_REG;
- decode->op[0].reg = decode->opcode[1] - 0xc8;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
-}
-
-static void decode_d9_4(CPUX86State *env, struct x86_decode *decode)
-{
- switch (decode->modrm.modrm) {
- case 0xe0:
- /* FCHS */
- decode->cmd = X86_DECODE_CMD_FCHS;
- break;
- case 0xe1:
- decode->cmd = X86_DECODE_CMD_FABS;
- break;
- case 0xe4:
- VM_PANIC("FTST");
- break;
- case 0xe5:
- /* FXAM */
- decode->cmd = X86_DECODE_CMD_FXAM;
- break;
- default:
- VM_PANIC("FLDENV");
- break;
- }
-}
-
-static void decode_db_4(CPUX86State *env, struct x86_decode *decode)
-{
- switch (decode->modrm.modrm) {
- case 0xe0:
- VM_PANIC_EX("unhandled FNENI: %x %x\n", decode->opcode[0],
- decode->modrm.modrm);
- break;
- case 0xe1:
- VM_PANIC_EX("unhandled FNDISI: %x %x\n", decode->opcode[0],
- decode->modrm.modrm);
- break;
- case 0xe2:
- VM_PANIC_EX("unhandled FCLEX: %x %x\n", decode->opcode[0],
- decode->modrm.modrm);
- break;
- case 0xe3:
- decode->cmd = X86_DECODE_CMD_FNINIT;
- break;
- case 0xe4:
- decode->cmd = X86_DECODE_CMD_FNSETPM;
- break;
- default:
- VM_PANIC_EX("unhandled fpu opcode: %x %x\n", decode->opcode[0],
- decode->modrm.modrm);
- break;
- }
-}
-
-
-#define RFLAGS_MASK_NONE 0
-#define RFLAGS_MASK_OSZAPC (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C)
-#define RFLAGS_MASK_LAHF (CC_S | CC_Z | CC_A | CC_P | CC_C)
-#define RFLAGS_MASK_CF (CC_C)
-#define RFLAGS_MASK_IF (IF_MASK)
-#define RFLAGS_MASK_TF (TF_MASK)
-#define RFLAGS_MASK_DF (DF_MASK)
-#define RFLAGS_MASK_ZF (CC_Z)
-
-struct decode_tbl _1op_inst[] = {
- {0x0, X86_DECODE_CMD_ADD, 1, true, decode_modrm_rm, decode_modrm_reg, NULL,
- NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x1, X86_DECODE_CMD_ADD, 0, true, decode_modrm_rm, decode_modrm_reg, NULL,
- NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x2, X86_DECODE_CMD_ADD, 1, true, decode_modrm_reg, decode_modrm_rm, NULL,
- NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x3, X86_DECODE_CMD_ADD, 0, true, decode_modrm_reg, decode_modrm_rm, NULL,
- NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x4, X86_DECODE_CMD_ADD, 1, false, decode_rax, decode_imm8, NULL, NULL,
- NULL, RFLAGS_MASK_OSZAPC},
- {0x5, X86_DECODE_CMD_ADD, 0, false, decode_rax, decode_imm, NULL, NULL,
- NULL, RFLAGS_MASK_OSZAPC},
- {0x6, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL,
- decode_pushseg, RFLAGS_MASK_NONE},
- {0x7, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL,
- decode_popseg, RFLAGS_MASK_NONE},
- {0x8, X86_DECODE_CMD_OR, 1, true, decode_modrm_rm, decode_modrm_reg, NULL,
- NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x9, X86_DECODE_CMD_OR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL,
- NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xa, X86_DECODE_CMD_OR, 1, true, decode_modrm_reg, decode_modrm_rm, NULL,
- NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xb, X86_DECODE_CMD_OR, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xc, X86_DECODE_CMD_OR, 1, false, decode_rax, decode_imm8,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xd, X86_DECODE_CMD_OR, 0, false, decode_rax, decode_imm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
-
- {0xe, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
- NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
- {0xf, X86_DECODE_CMD_POP_SEG, 0, false, false,
- NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
-
- {0x10, X86_DECODE_CMD_ADC, 1, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x11, X86_DECODE_CMD_ADC, 0, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x12, X86_DECODE_CMD_ADC, 1, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x13, X86_DECODE_CMD_ADC, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x14, X86_DECODE_CMD_ADC, 1, false, decode_rax, decode_imm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x15, X86_DECODE_CMD_ADC, 0, false, decode_rax, decode_imm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
-
- {0x16, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
- NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
- {0x17, X86_DECODE_CMD_POP_SEG, 0, false, false,
- NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
-
- {0x18, X86_DECODE_CMD_SBB, 1, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x19, X86_DECODE_CMD_SBB, 0, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x1a, X86_DECODE_CMD_SBB, 1, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x1b, X86_DECODE_CMD_SBB, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x1c, X86_DECODE_CMD_SBB, 1, false, decode_rax, decode_imm8,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x1d, X86_DECODE_CMD_SBB, 0, false, decode_rax, decode_imm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
-
- {0x1e, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
- NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
- {0x1f, X86_DECODE_CMD_POP_SEG, 0, false, false,
- NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
-
- {0x20, X86_DECODE_CMD_AND, 1, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x21, X86_DECODE_CMD_AND, 0, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x22, X86_DECODE_CMD_AND, 1, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x23, X86_DECODE_CMD_AND, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x24, X86_DECODE_CMD_AND, 1, false, decode_rax, decode_imm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x25, X86_DECODE_CMD_AND, 0, false, decode_rax, decode_imm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x28, X86_DECODE_CMD_SUB, 1, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x29, X86_DECODE_CMD_SUB, 0, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x2a, X86_DECODE_CMD_SUB, 1, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x2b, X86_DECODE_CMD_SUB, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x2c, X86_DECODE_CMD_SUB, 1, false, decode_rax, decode_imm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x2d, X86_DECODE_CMD_SUB, 0, false, decode_rax, decode_imm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x2f, X86_DECODE_CMD_DAS, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x30, X86_DECODE_CMD_XOR, 1, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x31, X86_DECODE_CMD_XOR, 0, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x32, X86_DECODE_CMD_XOR, 1, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x33, X86_DECODE_CMD_XOR, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x34, X86_DECODE_CMD_XOR, 1, false, decode_rax, decode_imm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x35, X86_DECODE_CMD_XOR, 0, false, decode_rax, decode_imm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
-
- {0x38, X86_DECODE_CMD_CMP, 1, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x39, X86_DECODE_CMD_CMP, 0, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x3a, X86_DECODE_CMD_CMP, 1, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x3b, X86_DECODE_CMD_CMP, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x3c, X86_DECODE_CMD_CMP, 1, false, decode_rax, decode_imm8,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x3d, X86_DECODE_CMD_CMP, 0, false, decode_rax, decode_imm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
-
- {0x3f, X86_DECODE_CMD_AAS, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
-
- {0x40, X86_DECODE_CMD_INC, 0, false,
- NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
- {0x41, X86_DECODE_CMD_INC, 0, false,
- NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
- {0x42, X86_DECODE_CMD_INC, 0, false,
- NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
- {0x43, X86_DECODE_CMD_INC, 0, false,
- NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
- {0x44, X86_DECODE_CMD_INC, 0, false,
- NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
- {0x45, X86_DECODE_CMD_INC, 0, false,
- NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
- {0x46, X86_DECODE_CMD_INC, 0, false,
- NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
- {0x47, X86_DECODE_CMD_INC, 0, false,
- NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
-
- {0x48, X86_DECODE_CMD_DEC, 0, false,
- NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
- {0x49, X86_DECODE_CMD_DEC, 0, false,
- NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
- {0x4a, X86_DECODE_CMD_DEC, 0, false,
- NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
- {0x4b, X86_DECODE_CMD_DEC, 0, false,
- NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
- {0x4c, X86_DECODE_CMD_DEC, 0, false,
- NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
- {0x4d, X86_DECODE_CMD_DEC, 0, false,
- NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
- {0x4e, X86_DECODE_CMD_DEC, 0, false,
- NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
- {0x4f, X86_DECODE_CMD_DEC, 0, false,
- NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
-
- {0x50, X86_DECODE_CMD_PUSH, 0, false,
- NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
- {0x51, X86_DECODE_CMD_PUSH, 0, false,
- NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
- {0x52, X86_DECODE_CMD_PUSH, 0, false,
- NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
- {0x53, X86_DECODE_CMD_PUSH, 0, false,
- NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
- {0x54, X86_DECODE_CMD_PUSH, 0, false,
- NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
- {0x55, X86_DECODE_CMD_PUSH, 0, false,
- NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
- {0x56, X86_DECODE_CMD_PUSH, 0, false,
- NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
- {0x57, X86_DECODE_CMD_PUSH, 0, false,
- NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
-
- {0x58, X86_DECODE_CMD_POP, 0, false,
- NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
- {0x59, X86_DECODE_CMD_POP, 0, false,
- NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
- {0x5a, X86_DECODE_CMD_POP, 0, false,
- NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
- {0x5b, X86_DECODE_CMD_POP, 0, false,
- NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
- {0x5c, X86_DECODE_CMD_POP, 0, false,
- NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
- {0x5d, X86_DECODE_CMD_POP, 0, false,
- NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
- {0x5e, X86_DECODE_CMD_POP, 0, false,
- NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
- {0x5f, X86_DECODE_CMD_POP, 0, false,
- NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
-
- {0x60, X86_DECODE_CMD_PUSHA, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x61, X86_DECODE_CMD_POPA, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0x68, X86_DECODE_CMD_PUSH, 0, false, decode_imm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x6a, X86_DECODE_CMD_PUSH, 0, false, decode_imm8_signed,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x69, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg,
- decode_modrm_rm, decode_imm, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x6b, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg, decode_modrm_rm,
- decode_imm8_signed, NULL, NULL, RFLAGS_MASK_OSZAPC},
-
- {0x6c, X86_DECODE_CMD_INS, 1, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x6d, X86_DECODE_CMD_INS, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x6e, X86_DECODE_CMD_OUTS, 1, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x6f, X86_DECODE_CMD_OUTS, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0x70, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x71, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x72, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x73, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x74, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x75, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x76, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x77, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x78, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x79, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x7a, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x7b, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x7c, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x7d, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x7e, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x7f, X86_DECODE_CMD_JXX, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
-
- {0x80, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,
- NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},
- {0x81, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm,
- NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},
- {0x82, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,
- NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},
- {0x83, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8_signed,
- NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},
- {0x84, X86_DECODE_CMD_TST, 1, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x85, X86_DECODE_CMD_TST, 0, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0x86, X86_DECODE_CMD_XCHG, 1, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x87, X86_DECODE_CMD_XCHG, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x88, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x89, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x8a, X86_DECODE_CMD_MOV, 1, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x8b, X86_DECODE_CMD_MOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x8c, X86_DECODE_CMD_MOV_FROM_SEG, 0, true, decode_modrm_rm,
- decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x8d, X86_DECODE_CMD_LEA, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x8e, X86_DECODE_CMD_MOV_TO_SEG, 0, true, decode_modrm_reg,
- decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x8f, X86_DECODE_CMD_POP, 0, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0x90, X86_DECODE_CMD_NOP, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x91, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
- NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
- {0x92, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
- NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
- {0x93, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
- NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
- {0x94, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
- NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
- {0x95, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
- NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
- {0x96, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
- NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
- {0x97, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
- NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
-
- {0x98, X86_DECODE_CMD_CBW, 0, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x99, X86_DECODE_CMD_CWD, 0, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0x9a, X86_DECODE_CMD_CALL_FAR, 0, false, NULL,
- NULL, NULL, NULL, decode_farjmp, RFLAGS_MASK_NONE},
-
- {0x9c, X86_DECODE_CMD_PUSHF, 0, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- /*{0x9d, X86_DECODE_CMD_POPF, 0, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_POPF},*/
- {0x9e, X86_DECODE_CMD_SAHF, 0, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x9f, X86_DECODE_CMD_LAHF, 0, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_LAHF},
-
- {0xa0, X86_DECODE_CMD_MOV, 1, false, decode_rax, fetch_moffs,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xa1, X86_DECODE_CMD_MOV, 0, false, decode_rax, fetch_moffs,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xa2, X86_DECODE_CMD_MOV, 1, false, fetch_moffs, decode_rax,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xa3, X86_DECODE_CMD_MOV, 0, false, fetch_moffs, decode_rax,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0xa4, X86_DECODE_CMD_MOVS, 1, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xa5, X86_DECODE_CMD_MOVS, 0, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xa6, X86_DECODE_CMD_CMPS, 1, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xa7, X86_DECODE_CMD_CMPS, 0, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xaa, X86_DECODE_CMD_STOS, 1, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xab, X86_DECODE_CMD_STOS, 0, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xac, X86_DECODE_CMD_LODS, 1, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xad, X86_DECODE_CMD_LODS, 0, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xae, X86_DECODE_CMD_SCAS, 1, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xaf, X86_DECODE_CMD_SCAS, 0, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
-
- {0xa8, X86_DECODE_CMD_TST, 1, false, decode_rax, decode_imm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xa9, X86_DECODE_CMD_TST, 0, false, decode_rax, decode_imm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
-
- {0xb0, X86_DECODE_CMD_MOV, 1, false, NULL,
- NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
- {0xb1, X86_DECODE_CMD_MOV, 1, false, NULL,
- NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
- {0xb2, X86_DECODE_CMD_MOV, 1, false, NULL,
- NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
- {0xb3, X86_DECODE_CMD_MOV, 1, false, NULL,
- NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
- {0xb4, X86_DECODE_CMD_MOV, 1, false, NULL,
- NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
- {0xb5, X86_DECODE_CMD_MOV, 1, false, NULL,
- NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
- {0xb6, X86_DECODE_CMD_MOV, 1, false, NULL,
- NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
- {0xb7, X86_DECODE_CMD_MOV, 1, false, NULL,
- NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
-
- {0xb8, X86_DECODE_CMD_MOV, 0, false, NULL,
- NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
- {0xb9, X86_DECODE_CMD_MOV, 0, false, NULL,
- NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
- {0xba, X86_DECODE_CMD_MOV, 0, false, NULL,
- NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
- {0xbb, X86_DECODE_CMD_MOV, 0, false, NULL,
- NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
- {0xbc, X86_DECODE_CMD_MOV, 0, false, NULL,
- NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
- {0xbd, X86_DECODE_CMD_MOV, 0, false, NULL,
- NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
- {0xbe, X86_DECODE_CMD_MOV, 0, false, NULL,
- NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
- {0xbf, X86_DECODE_CMD_MOV, 0, false, NULL,
- NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
-
- {0xc0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,
- NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
- {0xc1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8,
- NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
-
- {0xc2, X86_DECODE_RET_NEAR, 0, false, decode_imm16,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xc3, X86_DECODE_RET_NEAR, 0, false, NULL,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0xc4, X86_DECODE_CMD_LES, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xc5, X86_DECODE_CMD_LDS, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0xc6, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_imm8,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xc7, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_imm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0xc8, X86_DECODE_CMD_ENTER, 0, false, decode_imm16, decode_imm8,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xc9, X86_DECODE_CMD_LEAVE, 0, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xca, X86_DECODE_RET_FAR, 0, false, decode_imm16, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xcb, X86_DECODE_RET_FAR, 0, false, decode_imm_0, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xcd, X86_DECODE_CMD_INT, 0, false, decode_imm8, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- /*{0xcf, X86_DECODE_CMD_IRET, 0, false, NULL, NULL,
- NULL, NULL, NULL, RFLAGS_MASK_IRET},*/
-
- {0xd0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm_1,
- NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
- {0xd1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm_1,
- NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
- {0xd2, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_rcx,
- NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
- {0xd3, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_rcx,
- NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
-
- {0xd4, X86_DECODE_CMD_AAM, 0, false, decode_imm8,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xd5, X86_DECODE_CMD_AAD, 0, false, decode_imm8,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
-
- {0xd7, X86_DECODE_CMD_XLAT, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0xd8, X86_DECODE_CMD_INVL, 0, true, NULL,
- NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
- {0xd9, X86_DECODE_CMD_INVL, 0, true, NULL,
- NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
- {0xda, X86_DECODE_CMD_INVL, 0, true, NULL,
- NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
- {0xdb, X86_DECODE_CMD_INVL, 0, true, NULL,
- NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
- {0xdc, X86_DECODE_CMD_INVL, 0, true, NULL,
- NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
- {0xdd, X86_DECODE_CMD_INVL, 0, true, NULL,
- NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
- {0xde, X86_DECODE_CMD_INVL, 0, true, NULL,
- NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
- {0xdf, X86_DECODE_CMD_INVL, 0, true, NULL,
- NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
-
- {0xe0, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xe1, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xe2, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0xe3, X86_DECODE_CMD_JCXZ, 1, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
-
- {0xe4, X86_DECODE_CMD_IN, 1, false, decode_imm8,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xe5, X86_DECODE_CMD_IN, 0, false, decode_imm8,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xe6, X86_DECODE_CMD_OUT, 1, false, decode_imm8,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xe7, X86_DECODE_CMD_OUT, 0, false, decode_imm8,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xe8, X86_DECODE_CMD_CALL_NEAR, 0, false, decode_imm_signed,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xe9, X86_DECODE_CMD_JMP_NEAR, 0, false, decode_imm_signed,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xea, X86_DECODE_CMD_JMP_FAR, 0, false,
- NULL, NULL, NULL, NULL, decode_farjmp, RFLAGS_MASK_NONE},
- {0xeb, X86_DECODE_CMD_JMP_NEAR, 1, false, decode_imm8_signed,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xec, X86_DECODE_CMD_IN, 1, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xed, X86_DECODE_CMD_IN, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xee, X86_DECODE_CMD_OUT, 1, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xef, X86_DECODE_CMD_OUT, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0xf4, X86_DECODE_CMD_HLT, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0xf5, X86_DECODE_CMD_CMC, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},
-
- {0xf6, X86_DECODE_CMD_INVL, 1, true,
- NULL, NULL, NULL, NULL, decode_f7group, RFLAGS_MASK_OSZAPC},
- {0xf7, X86_DECODE_CMD_INVL, 0, true,
- NULL, NULL, NULL, NULL, decode_f7group, RFLAGS_MASK_OSZAPC},
-
- {0xf8, X86_DECODE_CMD_CLC, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},
- {0xf9, X86_DECODE_CMD_STC, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},
-
- {0xfa, X86_DECODE_CMD_CLI, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IF},
- {0xfb, X86_DECODE_CMD_STI, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IF},
- {0xfc, X86_DECODE_CMD_CLD, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_DF},
- {0xfd, X86_DECODE_CMD_STD, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_DF},
- {0xfe, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, decode_incgroup2, RFLAGS_MASK_OSZAPC},
- {0xff, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
- NULL, NULL, NULL, decode_ffgroup, RFLAGS_MASK_OSZAPC},
-};
-
-struct decode_tbl _2op_inst[] = {
- {0x0, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
- NULL, NULL, NULL, decode_sldtgroup, RFLAGS_MASK_NONE},
- {0x1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
- NULL, NULL, NULL, decode_lidtgroup, RFLAGS_MASK_NONE},
- {0x6, X86_DECODE_CMD_CLTS, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_TF},
- {0x9, X86_DECODE_CMD_WBINVD, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x18, X86_DECODE_CMD_PREFETCH, 0, true,
- NULL, NULL, NULL, NULL, decode_x87_general, RFLAGS_MASK_NONE},
- {0x1f, X86_DECODE_CMD_NOP, 0, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x20, X86_DECODE_CMD_MOV_FROM_CR, 0, true, decode_modrm_rm,
- decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x21, X86_DECODE_CMD_MOV_FROM_DR, 0, true, decode_modrm_rm,
- decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x22, X86_DECODE_CMD_MOV_TO_CR, 0, true, decode_modrm_reg,
- decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x23, X86_DECODE_CMD_MOV_TO_DR, 0, true, decode_modrm_reg,
- decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x30, X86_DECODE_CMD_WRMSR, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x31, X86_DECODE_CMD_RDTSC, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x32, X86_DECODE_CMD_RDMSR, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x40, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x41, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x42, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x43, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x44, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x45, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x46, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x47, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x48, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x49, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x4a, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x4b, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x4c, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x4d, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x4e, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x4f, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x77, X86_DECODE_CMD_EMMS, 0, false,
- NULL, NULL, NULL, NULL, decode_x87_general, RFLAGS_MASK_NONE},
- {0x82, X86_DECODE_CMD_JXX, 0, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x83, X86_DECODE_CMD_JXX, 0, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x84, X86_DECODE_CMD_JXX, 0, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x85, X86_DECODE_CMD_JXX, 0, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x86, X86_DECODE_CMD_JXX, 0, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x87, X86_DECODE_CMD_JXX, 0, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x88, X86_DECODE_CMD_JXX, 0, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x89, X86_DECODE_CMD_JXX, 0, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x8a, X86_DECODE_CMD_JXX, 0, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x8b, X86_DECODE_CMD_JXX, 0, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x8c, X86_DECODE_CMD_JXX, 0, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x8d, X86_DECODE_CMD_JXX, 0, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x8e, X86_DECODE_CMD_JXX, 0, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x8f, X86_DECODE_CMD_JXX, 0, false,
- NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
- {0x90, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x91, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x92, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x93, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x94, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x95, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x96, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x97, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x98, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x99, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x9a, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x9b, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x9c, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x9d, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x9e, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0x9f, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0xb0, X86_DECODE_CMD_CMPXCHG, 1, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xb1, X86_DECODE_CMD_CMPXCHG, 0, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0xb6, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xb7, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xb8, X86_DECODE_CMD_POPCNT, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xbe, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xbf, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xa0, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
- NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
- {0xa1, X86_DECODE_CMD_POP_SEG, 0, false, false,
- NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
- {0xa2, X86_DECODE_CMD_CPUID, 0, false,
- NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xa3, X86_DECODE_CMD_BT, 0, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_CF},
- {0xa4, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg,
- decode_imm8, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xa5, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg,
- decode_rcx, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xa8, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
- NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
- {0xa9, X86_DECODE_CMD_POP_SEG, 0, false, false,
- NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
- {0xab, X86_DECODE_CMD_BTS, 0, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_CF},
- {0xac, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg,
- decode_imm8, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xad, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg,
- decode_rcx, NULL, NULL, RFLAGS_MASK_OSZAPC},
-
- {0xae, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
- NULL, NULL, NULL, decode_aegroup, RFLAGS_MASK_NONE},
-
- {0xaf, X86_DECODE_CMD_IMUL_2, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xb2, X86_DECODE_CMD_LSS, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_NONE},
- {0xb3, X86_DECODE_CMD_BTR, 0, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xba, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8,
- NULL, NULL, decode_btgroup, RFLAGS_MASK_OSZAPC},
- {0xbb, X86_DECODE_CMD_BTC, 0, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xbc, X86_DECODE_CMD_BSF, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
- {0xbd, X86_DECODE_CMD_BSR, 0, true, decode_modrm_reg, decode_modrm_rm,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
-
- {0xc1, X86_DECODE_CMD_XADD, 0, true, decode_modrm_rm, decode_modrm_reg,
- NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
-
- {0xc7, X86_DECODE_CMD_CMPXCHG8B, 0, true, decode_modrm_rm,
- NULL, NULL, NULL, NULL, RFLAGS_MASK_ZF},
-
- {0xc8, X86_DECODE_CMD_BSWAP, 0, false,
- NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
- {0xc9, X86_DECODE_CMD_BSWAP, 0, false,
- NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
- {0xca, X86_DECODE_CMD_BSWAP, 0, false,
- NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
- {0xcb, X86_DECODE_CMD_BSWAP, 0, false,
- NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
- {0xcc, X86_DECODE_CMD_BSWAP, 0, false,
- NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
- {0xcd, X86_DECODE_CMD_BSWAP, 0, false,
- NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
- {0xce, X86_DECODE_CMD_BSWAP, 0, false,
- NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
- {0xcf, X86_DECODE_CMD_BSWAP, 0, false,
- NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
-};
-
-struct decode_x87_tbl invl_inst_x87 = {0x0, 0, 0, 0, 0, false, false, NULL,
- NULL, decode_invalid, 0};
-
-struct decode_x87_tbl _x87_inst[] = {
- {0xd8, 0, 3, X86_DECODE_CMD_FADD, 10, false, false,
- decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xd8, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0,
- decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
- {0xd8, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false, decode_x87_modrm_st0,
- decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xd8, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0,
- decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
- {0xd8, 4, 3, X86_DECODE_CMD_FSUB, 10, false, false, decode_x87_modrm_st0,
- decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xd8, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0,
- decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
- {0xd8, 5, 3, X86_DECODE_CMD_FSUB, 10, true, false, decode_x87_modrm_st0,
- decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xd8, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0,
- decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
- {0xd8, 6, 3, X86_DECODE_CMD_FDIV, 10, false, false, decode_x87_modrm_st0,
- decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xd8, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0,
- decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
- {0xd8, 7, 3, X86_DECODE_CMD_FDIV, 10, true, false, decode_x87_modrm_st0,
- decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xd8, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0,
- decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
-
- {0xd9, 0, 3, X86_DECODE_CMD_FLD, 10, false, false,
- decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
- {0xd9, 0, 0, X86_DECODE_CMD_FLD, 4, false, false,
- decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
- {0xd9, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, decode_x87_modrm_st0,
- decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xd9, 1, 0, X86_DECODE_CMD_INVL, 10, false, false,
- decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
- {0xd9, 2, 3, X86_DECODE_CMD_INVL, 10, false, false,
- decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
- {0xd9, 2, 0, X86_DECODE_CMD_FST, 4, false, false,
- decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
- {0xd9, 3, 3, X86_DECODE_CMD_INVL, 10, false, false,
- decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
- {0xd9, 3, 0, X86_DECODE_CMD_FST, 4, false, true,
- decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
- {0xd9, 4, 3, X86_DECODE_CMD_INVL, 10, false, false,
- decode_x87_modrm_st0, NULL, decode_d9_4, RFLAGS_MASK_NONE},
- {0xd9, 4, 0, X86_DECODE_CMD_INVL, 4, false, false,
- decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
- {0xd9, 5, 3, X86_DECODE_CMD_FLDxx, 10, false, false, NULL, NULL, NULL,
- RFLAGS_MASK_NONE},
- {0xd9, 5, 0, X86_DECODE_CMD_FLDCW, 2, false, false,
- decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0xd9, 7, 3, X86_DECODE_CMD_FNSTCW, 2, false, false,
- decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
- {0xd9, 7, 0, X86_DECODE_CMD_FNSTCW, 2, false, false,
- decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0xda, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xda, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0,
- decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
- {0xda, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
- decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xda, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0,
- decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
- {0xda, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
- decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xda, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
- decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xda, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
- RFLAGS_MASK_NONE},
- {0xda, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0,
- decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
- {0xda, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true, decode_x87_modrm_st0,
- decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xda, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0,
- decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
- {0xda, 6, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
- RFLAGS_MASK_NONE},
- {0xda, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0,
- decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
- {0xda, 7, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
- RFLAGS_MASK_NONE},
- {0xda, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0,
- decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
-
- {0xdb, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
- decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdb, 0, 0, X86_DECODE_CMD_FLD, 4, false, false,
- decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdb, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdb, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdb, 2, 0, X86_DECODE_CMD_FST, 4, false, false,
- decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdb, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdb, 3, 0, X86_DECODE_CMD_FST, 4, false, true,
- decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdb, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL,
- decode_db_4, RFLAGS_MASK_NONE},
- {0xdb, 4, 0, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
- RFLAGS_MASK_NONE},
- {0xdb, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdb, 5, 0, X86_DECODE_CMD_FLD, 10, false, false,
- decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdb, 7, 0, X86_DECODE_CMD_FST, 10, false, true,
- decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0xdc, 0, 3, X86_DECODE_CMD_FADD, 10, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdc, 0, 0, X86_DECODE_CMD_FADD, 8, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
- {0xdc, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdc, 1, 0, X86_DECODE_CMD_FMUL, 8, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
- {0xdc, 4, 3, X86_DECODE_CMD_FSUB, 10, true, false,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdc, 4, 0, X86_DECODE_CMD_FSUB, 8, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
- {0xdc, 5, 3, X86_DECODE_CMD_FSUB, 10, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdc, 5, 0, X86_DECODE_CMD_FSUB, 8, true, false,
- decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
- {0xdc, 6, 3, X86_DECODE_CMD_FDIV, 10, true, false,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdc, 6, 0, X86_DECODE_CMD_FDIV, 8, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
- {0xdc, 7, 3, X86_DECODE_CMD_FDIV, 10, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdc, 7, 0, X86_DECODE_CMD_FDIV, 8, true, false,
- decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
-
- {0xdd, 0, 0, X86_DECODE_CMD_FLD, 8, false, false,
- decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdd, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdd, 2, 3, X86_DECODE_CMD_FST, 10, false, false,
- decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdd, 2, 0, X86_DECODE_CMD_FST, 8, false, false,
- decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdd, 3, 3, X86_DECODE_CMD_FST, 10, false, true,
- decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdd, 3, 0, X86_DECODE_CMD_FST, 8, false, true,
- decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdd, 4, 3, X86_DECODE_CMD_FUCOM, 10, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdd, 4, 0, X86_DECODE_CMD_FRSTOR, 8, false, false,
- decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdd, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdd, 7, 0, X86_DECODE_CMD_FNSTSW, 0, false, false,
- decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdd, 7, 3, X86_DECODE_CMD_FNSTSW, 0, false, false,
- decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
-
- {0xde, 0, 3, X86_DECODE_CMD_FADD, 10, false, true,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xde, 0, 0, X86_DECODE_CMD_FADD, 2, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
- {0xde, 1, 3, X86_DECODE_CMD_FMUL, 10, false, true,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xde, 1, 0, X86_DECODE_CMD_FMUL, 2, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
- {0xde, 4, 3, X86_DECODE_CMD_FSUB, 10, true, true,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xde, 4, 0, X86_DECODE_CMD_FSUB, 2, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
- {0xde, 5, 3, X86_DECODE_CMD_FSUB, 10, false, true,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xde, 5, 0, X86_DECODE_CMD_FSUB, 2, true, false,
- decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
- {0xde, 6, 3, X86_DECODE_CMD_FDIV, 10, true, true,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xde, 6, 0, X86_DECODE_CMD_FDIV, 2, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
- {0xde, 7, 3, X86_DECODE_CMD_FDIV, 10, false, true,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xde, 7, 0, X86_DECODE_CMD_FDIV, 2, true, false,
- decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
-
- {0xdf, 0, 0, X86_DECODE_CMD_FLD, 2, false, false,
- decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdf, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdf, 2, 3, X86_DECODE_CMD_FST, 10, false, true,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdf, 2, 0, X86_DECODE_CMD_FST, 2, false, false,
- decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdf, 3, 3, X86_DECODE_CMD_FST, 10, false, true,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdf, 3, 0, X86_DECODE_CMD_FST, 2, false, true,
- decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdf, 4, 3, X86_DECODE_CMD_FNSTSW, 2, false, true,
- decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdf, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, true,
- decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
- {0xdf, 5, 0, X86_DECODE_CMD_FLD, 8, false, false,
- decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
- {0xdf, 7, 0, X86_DECODE_CMD_FST, 8, false, true,
- decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
-};
-
-void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- target_ulong ptr = 0;
- X86Seg seg = R_DS;
-
- if (!decode->modrm.mod && 6 == decode->modrm.rm) {
- ptr = decode->displacement;
- goto calc_addr;
- }
-
- if (decode->displacement_size) {
- ptr = sign(decode->displacement, decode->displacement_size);
- }
-
- switch (decode->modrm.rm) {
- case 0:
- ptr += BX(env) + SI(env);
- break;
- case 1:
- ptr += BX(env) + DI(env);
- break;
- case 2:
- ptr += BP(env) + SI(env);
- seg = R_SS;
- break;
- case 3:
- ptr += BP(env) + DI(env);
- seg = R_SS;
- break;
- case 4:
- ptr += SI(env);
- break;
- case 5:
- ptr += DI(env);
- break;
- case 6:
- ptr += BP(env);
- seg = R_SS;
- break;
- case 7:
- ptr += BX(env);
- break;
- }
-calc_addr:
- if (X86_DECODE_CMD_LEA == decode->cmd) {
- op->ptr = (uint16_t)ptr;
- } else {
- op->ptr = decode_linear_addr(env, decode, (uint16_t)ptr, seg);
- }
-}
-
-target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
- int is_extended, int size)
-{
- target_ulong ptr = 0;
-
- if (is_extended) {
- reg |= R_R8;
- }
-
- switch (size) {
- case 1:
- if (is_extended || reg < 4 || rex_present) {
- ptr = (target_ulong)&RL(env, reg);
- } else {
- ptr = (target_ulong)&RH(env, reg - 4);
- }
- break;
- default:
- ptr = (target_ulong)&RRX(env, reg);
- break;
- }
- return ptr;
-}
-
-target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present,
- int is_extended, int size)
-{
- target_ulong val = 0;
- memcpy(&val,
- (void *)get_reg_ref(env, reg, rex_present, is_extended, size),
- size);
- return val;
-}
-
-static target_ulong get_sib_val(CPUX86State *env, struct x86_decode *decode,
- X86Seg *sel)
-{
- target_ulong base = 0;
- target_ulong scaled_index = 0;
- int addr_size = decode->addressing_size;
- int base_reg = decode->sib.base;
- int index_reg = decode->sib.index;
-
- *sel = R_DS;
-
- if (decode->modrm.mod || base_reg != R_EBP) {
- if (decode->rex.b) {
- base_reg |= R_R8;
- }
- if (base_reg == R_ESP || base_reg == R_EBP) {
- *sel = R_SS;
- }
- base = get_reg_val(env, decode->sib.base, decode->rex.rex,
- decode->rex.b, addr_size);
- }
-
- if (decode->rex.x) {
- index_reg |= R_R8;
- }
-
- if (index_reg != R_ESP) {
- scaled_index = get_reg_val(env, index_reg, decode->rex.rex,
- decode->rex.x, addr_size) <<
- decode->sib.scale;
- }
- return base + scaled_index;
-}
-
-void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- X86Seg seg = R_DS;
- target_ulong ptr = 0;
- int addr_size = decode->addressing_size;
-
- if (decode->displacement_size) {
- ptr = sign(decode->displacement, decode->displacement_size);
- }
-
- if (4 == decode->modrm.rm) {
- ptr += get_sib_val(env, decode, &seg);
- } else if (!decode->modrm.mod && 5 == decode->modrm.rm) {
- if (x86_is_long_mode(env_cpu(env))) {
- ptr += env->eip + decode->len;
- } else {
- ptr = decode->displacement;
- }
- } else {
- if (decode->modrm.rm == R_EBP || decode->modrm.rm == R_ESP) {
- seg = R_SS;
- }
- ptr += get_reg_val(env, decode->modrm.rm, decode->rex.rex,
- decode->rex.b, addr_size);
- }
-
- if (X86_DECODE_CMD_LEA == decode->cmd) {
- op->ptr = (uint32_t)ptr;
- } else {
- op->ptr = decode_linear_addr(env, decode, (uint32_t)ptr, seg);
- }
-}
-
-void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- X86Seg seg = R_DS;
- int32_t offset = 0;
- int mod = decode->modrm.mod;
- int rm = decode->modrm.rm;
- target_ulong ptr;
- int src = decode->modrm.rm;
-
- if (decode->displacement_size) {
- offset = sign(decode->displacement, decode->displacement_size);
- }
-
- if (4 == rm) {
- ptr = get_sib_val(env, decode, &seg) + offset;
- } else if (0 == mod && 5 == rm) {
- ptr = env->eip + decode->len + (int32_t) offset;
- } else {
- ptr = get_reg_val(env, src, decode->rex.rex, decode->rex.b, 8) +
- (int64_t) offset;
- }
-
- if (X86_DECODE_CMD_LEA == decode->cmd) {
- op->ptr = ptr;
- } else {
- op->ptr = decode_linear_addr(env, decode, ptr, seg);
- }
-}
-
-
-void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op)
-{
- if (3 == decode->modrm.mod) {
- op->reg = decode->modrm.reg;
- op->type = X86_VAR_REG;
- op->ptr = get_reg_ref(env, decode->modrm.rm, decode->rex.rex,
- decode->rex.b, decode->operand_size);
- return;
- }
-
- switch (decode->addressing_size) {
- case 2:
- calc_modrm_operand16(env, decode, op);
- break;
- case 4:
- calc_modrm_operand32(env, decode, op);
- break;
- case 8:
- calc_modrm_operand64(env, decode, op);
- break;
- default:
- VM_PANIC_EX("unsupported address size %d\n", decode->addressing_size);
- break;
- }
-}
-
-static void decode_prefix(CPUX86State *env, struct x86_decode *decode)
-{
- while (1) {
- /*
- * REX prefix must come after legacy prefixes.
- * REX before legacy is ignored.
- * Clear rex to simulate this.
- */
- uint8_t byte = decode_byte(env, decode);
- switch (byte) {
- case PREFIX_LOCK:
- decode->lock = byte;
- decode->rex.rex = 0;
- break;
- case PREFIX_REPN:
- case PREFIX_REP:
- decode->rep = byte;
- decode->rex.rex = 0;
- break;
- case PREFIX_CS_SEG_OVERRIDE:
- case PREFIX_SS_SEG_OVERRIDE:
- case PREFIX_DS_SEG_OVERRIDE:
- case PREFIX_ES_SEG_OVERRIDE:
- case PREFIX_FS_SEG_OVERRIDE:
- case PREFIX_GS_SEG_OVERRIDE:
- decode->segment_override = byte;
- decode->rex.rex = 0;
- break;
- case PREFIX_OP_SIZE_OVERRIDE:
- decode->op_size_override = byte;
- decode->rex.rex = 0;
- break;
- case PREFIX_ADDR_SIZE_OVERRIDE:
- decode->addr_size_override = byte;
- decode->rex.rex = 0;
- break;
- case PREFIX_REX ... (PREFIX_REX + 0xf):
- if (x86_is_long_mode(env_cpu(env))) {
- decode->rex.rex = byte;
- break;
- }
- /* fall through when not in long mode */
- default:
- decode->len--;
- return;
- }
- }
-}
-
-void set_addressing_size(CPUX86State *env, struct x86_decode *decode)
-{
- decode->addressing_size = -1;
- if (x86_is_real(env_cpu(env)) || x86_is_v8086(env_cpu(env))) {
- if (decode->addr_size_override) {
- decode->addressing_size = 4;
- } else {
- decode->addressing_size = 2;
- }
- } else if (!x86_is_long_mode(env_cpu(env))) {
- /* protected */
- struct vmx_segment cs;
- vmx_read_segment_descriptor(env_cpu(env), &cs, R_CS);
- /* check db */
- if ((cs.ar >> 14) & 1) {
- if (decode->addr_size_override) {
- decode->addressing_size = 2;
- } else {
- decode->addressing_size = 4;
- }
- } else {
- if (decode->addr_size_override) {
- decode->addressing_size = 4;
- } else {
- decode->addressing_size = 2;
- }
- }
- } else {
- /* long */
- if (decode->addr_size_override) {
- decode->addressing_size = 4;
- } else {
- decode->addressing_size = 8;
- }
- }
-}
-
-void set_operand_size(CPUX86State *env, struct x86_decode *decode)
-{
- decode->operand_size = -1;
- if (x86_is_real(env_cpu(env)) || x86_is_v8086(env_cpu(env))) {
- if (decode->op_size_override) {
- decode->operand_size = 4;
- } else {
- decode->operand_size = 2;
- }
- } else if (!x86_is_long_mode(env_cpu(env))) {
- /* protected */
- struct vmx_segment cs;
- vmx_read_segment_descriptor(env_cpu(env), &cs, R_CS);
- /* check db */
- if ((cs.ar >> 14) & 1) {
- if (decode->op_size_override) {
- decode->operand_size = 2;
- } else{
- decode->operand_size = 4;
- }
- } else {
- if (decode->op_size_override) {
- decode->operand_size = 4;
- } else {
- decode->operand_size = 2;
- }
- }
- } else {
- /* long */
- if (decode->op_size_override) {
- decode->operand_size = 2;
- } else {
- decode->operand_size = 4;
- }
-
- if (decode->rex.w) {
- decode->operand_size = 8;
- }
- }
-}
-
-static void decode_sib(CPUX86State *env, struct x86_decode *decode)
-{
- if ((decode->modrm.mod != 3) && (4 == decode->modrm.rm) &&
- (decode->addressing_size != 2)) {
- decode->sib.sib = decode_byte(env, decode);
- decode->sib_present = true;
- }
-}
-
-/* 16 bit modrm */
-int disp16_tbl[4][8] = {
- {0, 0, 0, 0, 0, 0, 2, 0},
- {1, 1, 1, 1, 1, 1, 1, 1},
- {2, 2, 2, 2, 2, 2, 2, 2},
- {0, 0, 0, 0, 0, 0, 0, 0}
-};
-
-/* 32/64-bit modrm */
-int disp32_tbl[4][8] = {
- {0, 0, 0, 0, -1, 4, 0, 0},
- {1, 1, 1, 1, 1, 1, 1, 1},
- {4, 4, 4, 4, 4, 4, 4, 4},
- {0, 0, 0, 0, 0, 0, 0, 0}
-};
-
-static inline void decode_displacement(CPUX86State *env, struct x86_decode *decode)
-{
- int addressing_size = decode->addressing_size;
- int mod = decode->modrm.mod;
- int rm = decode->modrm.rm;
-
- decode->displacement_size = 0;
- switch (addressing_size) {
- case 2:
- decode->displacement_size = disp16_tbl[mod][rm];
- if (decode->displacement_size) {
- decode->displacement = (uint16_t)decode_bytes(env, decode,
- decode->displacement_size);
- }
- break;
- case 4:
- case 8:
- if (-1 == disp32_tbl[mod][rm]) {
- if (5 == decode->sib.base) {
- decode->displacement_size = 4;
- }
- } else {
- decode->displacement_size = disp32_tbl[mod][rm];
- }
-
- if (decode->displacement_size) {
- decode->displacement = (uint32_t)decode_bytes(env, decode,
- decode->displacement_size);
- }
- break;
- }
-}
-
-static inline void decode_modrm(CPUX86State *env, struct x86_decode *decode)
-{
- decode->modrm.modrm = decode_byte(env, decode);
- decode->is_modrm = true;
-
- decode_sib(env, decode);
- decode_displacement(env, decode);
-}
-
-static inline void decode_opcode_general(CPUX86State *env,
- struct x86_decode *decode,
- uint8_t opcode,
- struct decode_tbl *inst_decoder)
-{
- decode->cmd = inst_decoder->cmd;
- if (inst_decoder->operand_size) {
- decode->operand_size = inst_decoder->operand_size;
- }
- decode->flags_mask = inst_decoder->flags_mask;
-
- if (inst_decoder->is_modrm) {
- decode_modrm(env, decode);
- }
- if (inst_decoder->decode_op1) {
- inst_decoder->decode_op1(env, decode, &decode->op[0]);
- }
- if (inst_decoder->decode_op2) {
- inst_decoder->decode_op2(env, decode, &decode->op[1]);
- }
- if (inst_decoder->decode_op3) {
- inst_decoder->decode_op3(env, decode, &decode->op[2]);
- }
- if (inst_decoder->decode_op4) {
- inst_decoder->decode_op4(env, decode, &decode->op[3]);
- }
- if (inst_decoder->decode_postfix) {
- inst_decoder->decode_postfix(env, decode);
- }
-}
-
-static inline void decode_opcode_1(CPUX86State *env, struct x86_decode *decode,
- uint8_t opcode)
-{
- struct decode_tbl *inst_decoder = &_decode_tbl1[opcode];
- decode_opcode_general(env, decode, opcode, inst_decoder);
-}
-
-
-static inline void decode_opcode_2(CPUX86State *env, struct x86_decode *decode,
- uint8_t opcode)
-{
- struct decode_tbl *inst_decoder = &_decode_tbl2[opcode];
- decode_opcode_general(env, decode, opcode, inst_decoder);
-}
-
-static void decode_opcodes(CPUX86State *env, struct x86_decode *decode)
-{
- uint8_t opcode;
-
- opcode = decode_byte(env, decode);
- decode->opcode[decode->opcode_len++] = opcode;
- if (opcode != OPCODE_ESCAPE) {
- decode_opcode_1(env, decode, opcode);
- } else {
- opcode = decode_byte(env, decode);
- decode->opcode[decode->opcode_len++] = opcode;
- decode_opcode_2(env, decode, opcode);
- }
-}
-
-uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode)
-{
- memset(decode, 0, sizeof(*decode));
- decode_prefix(env, decode);
- set_addressing_size(env, decode);
- set_operand_size(env, decode);
-
- decode_opcodes(env, decode);
-
- return decode->len;
-}
-
-void init_decoder(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(_decode_tbl1); i++) {
- memcpy(&_decode_tbl1[i], &invl_inst, sizeof(invl_inst));
- }
- for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++) {
- memcpy(&_decode_tbl2[i], &invl_inst, sizeof(invl_inst));
- }
- for (i = 0; i < ARRAY_SIZE(_decode_tbl3); i++) {
- memcpy(&_decode_tbl3[i], &invl_inst_x87, sizeof(invl_inst_x87));
-
- }
- for (i = 0; i < ARRAY_SIZE(_1op_inst); i++) {
- _decode_tbl1[_1op_inst[i].opcode] = _1op_inst[i];
- }
- for (i = 0; i < ARRAY_SIZE(_2op_inst); i++) {
- _decode_tbl2[_2op_inst[i].opcode] = _2op_inst[i];
- }
- for (i = 0; i < ARRAY_SIZE(_x87_inst); i++) {
- int index = ((_x87_inst[i].opcode & 0xf) << 4) |
- ((_x87_inst[i].modrm_mod & 1) << 3) |
- _x87_inst[i].modrm_reg;
- _decode_tbl3[index] = _x87_inst[i];
- }
-}
-
-
-const char *decode_cmd_to_string(enum x86_decode_cmd cmd)
-{
- static const char *cmds[] = {"INVL", "PUSH", "PUSH_SEG", "POP", "POP_SEG",
- "MOV", "MOVSX", "MOVZX", "CALL_NEAR", "CALL_NEAR_ABS_INDIRECT",
- "CALL_FAR_ABS_INDIRECT", "CMD_CALL_FAR", "RET_NEAR", "RET_FAR", "ADD",
- "OR", "ADC", "SBB", "AND", "SUB", "XOR", "CMP", "INC", "DEC", "TST",
- "NOT", "NEG", "JMP_NEAR", "JMP_NEAR_ABS_INDIRECT", "JMP_FAR",
- "JMP_FAR_ABS_INDIRECT", "LEA", "JXX", "JCXZ", "SETXX", "MOV_TO_SEG",
- "MOV_FROM_SEG", "CLI", "STI", "CLD", "STD", "STC", "CLC", "OUT", "IN",
- "INS", "OUTS", "LIDT", "SIDT", "LGDT", "SGDT", "SMSW", "LMSW",
- "RDTSCP", "INVLPG", "MOV_TO_CR", "MOV_FROM_CR", "MOV_TO_DR",
- "MOV_FROM_DR", "PUSHF", "POPF", "CPUID", "ROL", "ROR", "RCL", "RCR",
- "SHL", "SAL", "SHR", "SHRD", "SHLD", "SAR", "DIV", "IDIV", "MUL",
- "IMUL_3", "IMUL_2", "IMUL_1", "MOVS", "CMPS", "SCAS", "LODS", "STOS",
- "BSWAP", "XCHG", "RDTSC", "RDMSR", "WRMSR", "ENTER", "LEAVE", "BT",
- "BTS", "BTC", "BTR", "BSF", "BSR", "IRET", "INT", "POPA", "PUSHA",
- "CWD", "CBW", "DAS", "AAD", "AAM", "AAS", "LOOP", "SLDT", "STR", "LLDT",
- "LTR", "VERR", "VERW", "SAHF", "LAHF", "WBINVD", "LDS", "LSS", "LES",
- "LGS", "LFS", "CMC", "XLAT", "NOP", "CMOV", "CLTS", "XADD", "HLT",
- "CMPXCHG8B", "CMPXCHG", "POPCNT", "FNINIT", "FLD", "FLDxx", "FNSTCW",
- "FNSTSW", "FNSETPM", "FSAVE", "FRSTOR", "FXSAVE", "FXRSTOR", "FDIV",
- "FMUL", "FSUB", "FADD", "EMMS", "MFENCE", "SFENCE", "LFENCE",
- "PREFETCH", "FST", "FABS", "FUCOM", "FUCOMI", "FLDCW",
- "FXCH", "FCHS", "FCMOV", "FRNDINT", "FXAM", "LAST"};
- return cmds[cmd];
-}
-
-target_ulong decode_linear_addr(CPUX86State *env, struct x86_decode *decode,
- target_ulong addr, X86Seg seg)
-{
- switch (decode->segment_override) {
- case PREFIX_CS_SEG_OVERRIDE:
- seg = R_CS;
- break;
- case PREFIX_SS_SEG_OVERRIDE:
- seg = R_SS;
- break;
- case PREFIX_DS_SEG_OVERRIDE:
- seg = R_DS;
- break;
- case PREFIX_ES_SEG_OVERRIDE:
- seg = R_ES;
- break;
- case PREFIX_FS_SEG_OVERRIDE:
- seg = R_FS;
- break;
- case PREFIX_GS_SEG_OVERRIDE:
- seg = R_GS;
- break;
- default:
- break;
- }
- return linear_addr_size(env_cpu(env), addr, decode->addressing_size, seg);
-}
diff --git a/target/i386/hvf/x86_decode.h b/target/i386/hvf/x86_decode.h
deleted file mode 100644
index a2d7a2a..0000000
--- a/target/i386/hvf/x86_decode.h
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- * Copyright (C) 2016 Veertu Inc,
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef HVF_X86_DECODE_H
-#define HVF_X86_DECODE_H
-
-#include "cpu.h"
-#include "x86.h"
-
-typedef enum x86_prefix {
- /* group 1 */
- PREFIX_LOCK = 0xf0,
- PREFIX_REPN = 0xf2,
- PREFIX_REP = 0xf3,
- /* group 2 */
- PREFIX_CS_SEG_OVERRIDE = 0x2e,
- PREFIX_SS_SEG_OVERRIDE = 0x36,
- PREFIX_DS_SEG_OVERRIDE = 0x3e,
- PREFIX_ES_SEG_OVERRIDE = 0x26,
- PREFIX_FS_SEG_OVERRIDE = 0x64,
- PREFIX_GS_SEG_OVERRIDE = 0x65,
- /* group 3 */
- PREFIX_OP_SIZE_OVERRIDE = 0x66,
- /* group 4 */
- PREFIX_ADDR_SIZE_OVERRIDE = 0x67,
-
- PREFIX_REX = 0x40,
-} x86_prefix;
-
-enum x86_decode_cmd {
- X86_DECODE_CMD_INVL = 0,
-
- X86_DECODE_CMD_PUSH,
- X86_DECODE_CMD_PUSH_SEG,
- X86_DECODE_CMD_POP,
- X86_DECODE_CMD_POP_SEG,
- X86_DECODE_CMD_MOV,
- X86_DECODE_CMD_MOVSX,
- X86_DECODE_CMD_MOVZX,
- X86_DECODE_CMD_CALL_NEAR,
- X86_DECODE_CMD_CALL_NEAR_ABS_INDIRECT,
- X86_DECODE_CMD_CALL_FAR_ABS_INDIRECT,
- X86_DECODE_CMD_CALL_FAR,
- X86_DECODE_RET_NEAR,
- X86_DECODE_RET_FAR,
- X86_DECODE_CMD_ADD,
- X86_DECODE_CMD_OR,
- X86_DECODE_CMD_ADC,
- X86_DECODE_CMD_SBB,
- X86_DECODE_CMD_AND,
- X86_DECODE_CMD_SUB,
- X86_DECODE_CMD_XOR,
- X86_DECODE_CMD_CMP,
- X86_DECODE_CMD_INC,
- X86_DECODE_CMD_DEC,
- X86_DECODE_CMD_TST,
- X86_DECODE_CMD_NOT,
- X86_DECODE_CMD_NEG,
- X86_DECODE_CMD_JMP_NEAR,
- X86_DECODE_CMD_JMP_NEAR_ABS_INDIRECT,
- X86_DECODE_CMD_JMP_FAR,
- X86_DECODE_CMD_JMP_FAR_ABS_INDIRECT,
- X86_DECODE_CMD_LEA,
- X86_DECODE_CMD_JXX,
- X86_DECODE_CMD_JCXZ,
- X86_DECODE_CMD_SETXX,
- X86_DECODE_CMD_MOV_TO_SEG,
- X86_DECODE_CMD_MOV_FROM_SEG,
- X86_DECODE_CMD_CLI,
- X86_DECODE_CMD_STI,
- X86_DECODE_CMD_CLD,
- X86_DECODE_CMD_STD,
- X86_DECODE_CMD_STC,
- X86_DECODE_CMD_CLC,
- X86_DECODE_CMD_OUT,
- X86_DECODE_CMD_IN,
- X86_DECODE_CMD_INS,
- X86_DECODE_CMD_OUTS,
- X86_DECODE_CMD_LIDT,
- X86_DECODE_CMD_SIDT,
- X86_DECODE_CMD_LGDT,
- X86_DECODE_CMD_SGDT,
- X86_DECODE_CMD_SMSW,
- X86_DECODE_CMD_LMSW,
- X86_DECODE_CMD_RDTSCP,
- X86_DECODE_CMD_INVLPG,
- X86_DECODE_CMD_MOV_TO_CR,
- X86_DECODE_CMD_MOV_FROM_CR,
- X86_DECODE_CMD_MOV_TO_DR,
- X86_DECODE_CMD_MOV_FROM_DR,
- X86_DECODE_CMD_PUSHF,
- X86_DECODE_CMD_POPF,
- X86_DECODE_CMD_CPUID,
- X86_DECODE_CMD_ROL,
- X86_DECODE_CMD_ROR,
- X86_DECODE_CMD_RCL,
- X86_DECODE_CMD_RCR,
- X86_DECODE_CMD_SHL,
- X86_DECODE_CMD_SAL,
- X86_DECODE_CMD_SHR,
- X86_DECODE_CMD_SHRD,
- X86_DECODE_CMD_SHLD,
- X86_DECODE_CMD_SAR,
- X86_DECODE_CMD_DIV,
- X86_DECODE_CMD_IDIV,
- X86_DECODE_CMD_MUL,
- X86_DECODE_CMD_IMUL_3,
- X86_DECODE_CMD_IMUL_2,
- X86_DECODE_CMD_IMUL_1,
- X86_DECODE_CMD_MOVS,
- X86_DECODE_CMD_CMPS,
- X86_DECODE_CMD_SCAS,
- X86_DECODE_CMD_LODS,
- X86_DECODE_CMD_STOS,
- X86_DECODE_CMD_BSWAP,
- X86_DECODE_CMD_XCHG,
- X86_DECODE_CMD_RDTSC,
- X86_DECODE_CMD_RDMSR,
- X86_DECODE_CMD_WRMSR,
- X86_DECODE_CMD_ENTER,
- X86_DECODE_CMD_LEAVE,
- X86_DECODE_CMD_BT,
- X86_DECODE_CMD_BTS,
- X86_DECODE_CMD_BTC,
- X86_DECODE_CMD_BTR,
- X86_DECODE_CMD_BSF,
- X86_DECODE_CMD_BSR,
- X86_DECODE_CMD_IRET,
- X86_DECODE_CMD_INT,
- X86_DECODE_CMD_POPA,
- X86_DECODE_CMD_PUSHA,
- X86_DECODE_CMD_CWD,
- X86_DECODE_CMD_CBW,
- X86_DECODE_CMD_DAS,
- X86_DECODE_CMD_AAD,
- X86_DECODE_CMD_AAM,
- X86_DECODE_CMD_AAS,
- X86_DECODE_CMD_LOOP,
- X86_DECODE_CMD_SLDT,
- X86_DECODE_CMD_STR,
- X86_DECODE_CMD_LLDT,
- X86_DECODE_CMD_LTR,
- X86_DECODE_CMD_VERR,
- X86_DECODE_CMD_VERW,
- X86_DECODE_CMD_SAHF,
- X86_DECODE_CMD_LAHF,
- X86_DECODE_CMD_WBINVD,
- X86_DECODE_CMD_LDS,
- X86_DECODE_CMD_LSS,
- X86_DECODE_CMD_LES,
- X86_DECODE_XMD_LGS,
- X86_DECODE_CMD_LFS,
- X86_DECODE_CMD_CMC,
- X86_DECODE_CMD_XLAT,
- X86_DECODE_CMD_NOP,
- X86_DECODE_CMD_CMOV,
- X86_DECODE_CMD_CLTS,
- X86_DECODE_CMD_XADD,
- X86_DECODE_CMD_HLT,
- X86_DECODE_CMD_CMPXCHG8B,
- X86_DECODE_CMD_CMPXCHG,
- X86_DECODE_CMD_POPCNT,
-
- X86_DECODE_CMD_FNINIT,
- X86_DECODE_CMD_FLD,
- X86_DECODE_CMD_FLDxx,
- X86_DECODE_CMD_FNSTCW,
- X86_DECODE_CMD_FNSTSW,
- X86_DECODE_CMD_FNSETPM,
- X86_DECODE_CMD_FSAVE,
- X86_DECODE_CMD_FRSTOR,
- X86_DECODE_CMD_FXSAVE,
- X86_DECODE_CMD_FXRSTOR,
- X86_DECODE_CMD_FDIV,
- X86_DECODE_CMD_FMUL,
- X86_DECODE_CMD_FSUB,
- X86_DECODE_CMD_FADD,
- X86_DECODE_CMD_EMMS,
- X86_DECODE_CMD_MFENCE,
- X86_DECODE_CMD_SFENCE,
- X86_DECODE_CMD_LFENCE,
- X86_DECODE_CMD_PREFETCH,
- X86_DECODE_CMD_CLFLUSH,
- X86_DECODE_CMD_FST,
- X86_DECODE_CMD_FABS,
- X86_DECODE_CMD_FUCOM,
- X86_DECODE_CMD_FUCOMI,
- X86_DECODE_CMD_FLDCW,
- X86_DECODE_CMD_FXCH,
- X86_DECODE_CMD_FCHS,
- X86_DECODE_CMD_FCMOV,
- X86_DECODE_CMD_FRNDINT,
- X86_DECODE_CMD_FXAM,
-
- X86_DECODE_CMD_LAST,
-};
-
-const char *decode_cmd_to_string(enum x86_decode_cmd cmd);
-
-typedef struct x86_modrm {
- union {
- uint8_t modrm;
- struct {
- uint8_t rm:3;
- uint8_t reg:3;
- uint8_t mod:2;
- };
- };
-} __attribute__ ((__packed__)) x86_modrm;
-
-typedef struct x86_sib {
- union {
- uint8_t sib;
- struct {
- uint8_t base:3;
- uint8_t index:3;
- uint8_t scale:2;
- };
- };
-} __attribute__ ((__packed__)) x86_sib;
-
-typedef struct x86_rex {
- union {
- uint8_t rex;
- struct {
- uint8_t b:1;
- uint8_t x:1;
- uint8_t r:1;
- uint8_t w:1;
- uint8_t unused:4;
- };
- };
-} __attribute__ ((__packed__)) x86_rex;
-
-typedef enum x86_var_type {
- X86_VAR_IMMEDIATE,
- X86_VAR_OFFSET,
- X86_VAR_REG,
- X86_VAR_RM,
-
- /* for floating point computations */
- X87_VAR_REG,
- X87_VAR_FLOATP,
- X87_VAR_INTP,
- X87_VAR_BYTEP,
-} x86_var_type;
-
-typedef struct x86_decode_op {
- enum x86_var_type type;
- int size;
-
- int reg;
- target_ulong val;
-
- target_ulong ptr;
-} x86_decode_op;
-
-typedef struct x86_decode {
- int len;
- uint8_t opcode[4];
- uint8_t opcode_len;
- enum x86_decode_cmd cmd;
- int addressing_size;
- int operand_size;
- int lock;
- int rep;
- int op_size_override;
- int addr_size_override;
- int segment_override;
- int control_change_inst;
- bool fwait;
- bool fpop_stack;
- bool frev;
-
- uint32_t displacement;
- uint8_t displacement_size;
- struct x86_rex rex;
- bool is_modrm;
- bool sib_present;
- struct x86_sib sib;
- struct x86_modrm modrm;
- struct x86_decode_op op[4];
- bool is_fpu;
- uint32_t flags_mask;
-
-} x86_decode;
-
-uint64_t sign(uint64_t val, int size);
-
-uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode);
-
-target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
- int is_extended, int size);
-target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present,
- int is_extended, int size);
-void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op);
-target_ulong decode_linear_addr(CPUX86State *env, struct x86_decode *decode,
- target_ulong addr, enum X86Seg seg);
-
-void init_decoder(void);
-void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op);
-void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op);
-void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,
- struct x86_decode_op *op);
-void set_addressing_size(CPUX86State *env, struct x86_decode *decode);
-void set_operand_size(CPUX86State *env, struct x86_decode *decode);
-
-#endif
diff --git a/target/i386/hvf/x86_descr.c b/target/i386/hvf/x86_descr.c
index f33836d..7b599c9 100644
--- a/target/i386/hvf/x86_descr.c
+++ b/target/i386/hvf/x86_descr.c
@@ -60,14 +60,14 @@ uint64_t vmx_read_segment_base(CPUState *cpu, X86Seg seg)
return rvmcs(cpu->accel->fd, vmx_segment_fields[seg].base);
}
-x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg)
+x86_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg)
{
- x68_segment_selector sel;
+ x86_segment_selector sel;
sel.sel = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector);
return sel;
}
-void vmx_write_segment_selector(CPUState *cpu, x68_segment_selector selector, X86Seg seg)
+void vmx_write_segment_selector(CPUState *cpu, x86_segment_selector selector, X86Seg seg)
{
wvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector, selector.sel);
}
@@ -90,7 +90,7 @@ void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Se
wvmcs(cpu->accel->fd, sf->ar_bytes, desc->ar);
}
-void x86_segment_descriptor_to_vmx(CPUState *cpu, x68_segment_selector selector,
+void x86_segment_descriptor_to_vmx(CPUState *cpu, x86_segment_selector selector,
struct x86_segment_descriptor *desc,
struct vmx_segment *vmx_desc)
{
diff --git a/target/i386/hvf/x86_descr.h b/target/i386/hvf/x86_descr.h
index 9f06014..24af494 100644
--- a/target/i386/hvf/x86_descr.h
+++ b/target/i386/hvf/x86_descr.h
@@ -19,7 +19,7 @@
#ifndef HVF_X86_DESCR_H
#define HVF_X86_DESCR_H
-#include "x86.h"
+#include "emulate/x86.h"
typedef struct vmx_segment {
uint16_t sel;
@@ -34,10 +34,10 @@ void vmx_read_segment_descriptor(CPUState *cpu,
void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc,
enum X86Seg seg);
-x68_segment_selector vmx_read_segment_selector(CPUState *cpu,
+x86_segment_selector vmx_read_segment_selector(CPUState *cpu,
enum X86Seg seg);
void vmx_write_segment_selector(CPUState *cpu,
- x68_segment_selector selector,
+ x86_segment_selector selector,
enum X86Seg seg);
uint64_t vmx_read_segment_base(CPUState *cpu, enum X86Seg seg);
@@ -45,7 +45,7 @@ void vmx_write_segment_base(CPUState *cpu, enum X86Seg seg,
uint64_t base);
void x86_segment_descriptor_to_vmx(CPUState *cpu,
- x68_segment_selector selector,
+ x86_segment_selector selector,
struct x86_segment_descriptor *desc,
struct vmx_segment *vmx_desc);
diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c
deleted file mode 100644
index 38c782b..0000000
--- a/target/i386/hvf/x86_emu.c
+++ /dev/null
@@ -1,1487 +0,0 @@
-/*
- * Copyright (C) 2016 Veertu Inc,
- * Copyright (C) 2017 Google Inc,
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/////////////////////////////////////////////////////////////////////////
-//
-// Copyright (C) 2001-2012 The Bochs Project
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
-/////////////////////////////////////////////////////////////////////////
-
-#include "qemu/osdep.h"
-#include "panic.h"
-#include "x86_decode.h"
-#include "x86.h"
-#include "x86_emu.h"
-#include "x86_mmu.h"
-#include "x86_flags.h"
-#include "vmcs.h"
-#include "vmx.h"
-
-void hvf_handle_io(CPUState *cs, uint16_t port, void *data,
- int direction, int size, uint32_t count);
-
-#define EXEC_2OP_FLAGS_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
-{ \
- fetch_operands(env, decode, 2, true, true, false); \
- switch (decode->operand_size) { \
- case 1: \
- { \
- uint8_t v1 = (uint8_t)decode->op[0].val; \
- uint8_t v2 = (uint8_t)decode->op[1].val; \
- uint8_t diff = v1 cmd v2; \
- if (save_res) { \
- write_val_ext(env, decode->op[0].ptr, diff, 1); \
- } \
- FLAGS_FUNC##8(env, v1, v2, diff); \
- break; \
- } \
- case 2: \
- { \
- uint16_t v1 = (uint16_t)decode->op[0].val; \
- uint16_t v2 = (uint16_t)decode->op[1].val; \
- uint16_t diff = v1 cmd v2; \
- if (save_res) { \
- write_val_ext(env, decode->op[0].ptr, diff, 2); \
- } \
- FLAGS_FUNC##16(env, v1, v2, diff); \
- break; \
- } \
- case 4: \
- { \
- uint32_t v1 = (uint32_t)decode->op[0].val; \
- uint32_t v2 = (uint32_t)decode->op[1].val; \
- uint32_t diff = v1 cmd v2; \
- if (save_res) { \
- write_val_ext(env, decode->op[0].ptr, diff, 4); \
- } \
- FLAGS_FUNC##32(env, v1, v2, diff); \
- break; \
- } \
- default: \
- VM_PANIC("bad size\n"); \
- } \
-} \
-
-target_ulong read_reg(CPUX86State *env, int reg, int size)
-{
- switch (size) {
- case 1:
- return x86_reg(env, reg)->lx;
- case 2:
- return x86_reg(env, reg)->rx;
- case 4:
- return x86_reg(env, reg)->erx;
- case 8:
- return x86_reg(env, reg)->rrx;
- default:
- abort();
- }
- return 0;
-}
-
-void write_reg(CPUX86State *env, int reg, target_ulong val, int size)
-{
- switch (size) {
- case 1:
- x86_reg(env, reg)->lx = val;
- break;
- case 2:
- x86_reg(env, reg)->rx = val;
- break;
- case 4:
- x86_reg(env, reg)->rrx = (uint32_t)val;
- break;
- case 8:
- x86_reg(env, reg)->rrx = val;
- break;
- default:
- abort();
- }
-}
-
-target_ulong read_val_from_reg(target_ulong reg_ptr, int size)
-{
- target_ulong val;
-
- switch (size) {
- case 1:
- val = *(uint8_t *)reg_ptr;
- break;
- case 2:
- val = *(uint16_t *)reg_ptr;
- break;
- case 4:
- val = *(uint32_t *)reg_ptr;
- break;
- case 8:
- val = *(uint64_t *)reg_ptr;
- break;
- default:
- abort();
- }
- return val;
-}
-
-void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size)
-{
- switch (size) {
- case 1:
- *(uint8_t *)reg_ptr = val;
- break;
- case 2:
- *(uint16_t *)reg_ptr = val;
- break;
- case 4:
- *(uint64_t *)reg_ptr = (uint32_t)val;
- break;
- case 8:
- *(uint64_t *)reg_ptr = val;
- break;
- default:
- abort();
- }
-}
-
-static bool is_host_reg(CPUX86State *env, target_ulong ptr)
-{
- return (ptr - (target_ulong)&env->regs[0]) < sizeof(env->regs);
-}
-
-void write_val_ext(CPUX86State *env, target_ulong ptr, target_ulong val, int size)
-{
- if (is_host_reg(env, ptr)) {
- write_val_to_reg(ptr, val, size);
- return;
- }
- vmx_write_mem(env_cpu(env), ptr, &val, size);
-}
-
-uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes)
-{
- vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, ptr, bytes);
- return env->hvf_mmio_buf;
-}
-
-
-target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size)
-{
- target_ulong val;
- uint8_t *mmio_ptr;
-
- if (is_host_reg(env, ptr)) {
- return read_val_from_reg(ptr, size);
- }
-
- mmio_ptr = read_mmio(env, ptr, size);
- switch (size) {
- case 1:
- val = *(uint8_t *)mmio_ptr;
- break;
- case 2:
- val = *(uint16_t *)mmio_ptr;
- break;
- case 4:
- val = *(uint32_t *)mmio_ptr;
- break;
- case 8:
- val = *(uint64_t *)mmio_ptr;
- break;
- default:
- VM_PANIC("bad size\n");
- break;
- }
- return val;
-}
-
-static void fetch_operands(CPUX86State *env, struct x86_decode *decode,
- int n, bool val_op0, bool val_op1, bool val_op2)
-{
- int i;
- bool calc_val[3] = {val_op0, val_op1, val_op2};
-
- for (i = 0; i < n; i++) {
- switch (decode->op[i].type) {
- case X86_VAR_IMMEDIATE:
- break;
- case X86_VAR_REG:
- VM_PANIC_ON(!decode->op[i].ptr);
- if (calc_val[i]) {
- decode->op[i].val = read_val_from_reg(decode->op[i].ptr,
- decode->operand_size);
- }
- break;
- case X86_VAR_RM:
- calc_modrm_operand(env, decode, &decode->op[i]);
- if (calc_val[i]) {
- decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
- decode->operand_size);
- }
- break;
- case X86_VAR_OFFSET:
- decode->op[i].ptr = decode_linear_addr(env, decode,
- decode->op[i].ptr,
- R_DS);
- if (calc_val[i]) {
- decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
- decode->operand_size);
- }
- break;
- default:
- break;
- }
- }
-}
-
-static void exec_mov(CPUX86State *env, struct x86_decode *decode)
-{
- fetch_operands(env, decode, 2, false, true, false);
- write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
- decode->operand_size);
-
- env->eip += decode->len;
-}
-
-static void exec_add(CPUX86State *env, struct x86_decode *decode)
-{
- EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
- env->eip += decode->len;
-}
-
-static void exec_or(CPUX86State *env, struct x86_decode *decode)
-{
- EXEC_2OP_FLAGS_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true);
- env->eip += decode->len;
-}
-
-static void exec_adc(CPUX86State *env, struct x86_decode *decode)
-{
- EXEC_2OP_FLAGS_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true);
- env->eip += decode->len;
-}
-
-static void exec_sbb(CPUX86State *env, struct x86_decode *decode)
-{
- EXEC_2OP_FLAGS_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true);
- env->eip += decode->len;
-}
-
-static void exec_and(CPUX86State *env, struct x86_decode *decode)
-{
- EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true);
- env->eip += decode->len;
-}
-
-static void exec_sub(CPUX86State *env, struct x86_decode *decode)
-{
- EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true);
- env->eip += decode->len;
-}
-
-static void exec_xor(CPUX86State *env, struct x86_decode *decode)
-{
- EXEC_2OP_FLAGS_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true);
- env->eip += decode->len;
-}
-
-static void exec_neg(CPUX86State *env, struct x86_decode *decode)
-{
- /*EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/
- int32_t val;
- fetch_operands(env, decode, 2, true, true, false);
-
- val = 0 - sign(decode->op[1].val, decode->operand_size);
- write_val_ext(env, decode->op[1].ptr, val, decode->operand_size);
-
- if (4 == decode->operand_size) {
- SET_FLAGS_OSZAPC_SUB32(env, 0, 0 - val, val);
- } else if (2 == decode->operand_size) {
- SET_FLAGS_OSZAPC_SUB16(env, 0, 0 - val, val);
- } else if (1 == decode->operand_size) {
- SET_FLAGS_OSZAPC_SUB8(env, 0, 0 - val, val);
- } else {
- VM_PANIC("bad op size\n");
- }
-
- /*lflags_to_rflags(env);*/
- env->eip += decode->len;
-}
-
-static void exec_cmp(CPUX86State *env, struct x86_decode *decode)
-{
- EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
- env->eip += decode->len;
-}
-
-static void exec_inc(CPUX86State *env, struct x86_decode *decode)
-{
- decode->op[1].type = X86_VAR_IMMEDIATE;
- decode->op[1].val = 0;
-
- EXEC_2OP_FLAGS_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true);
-
- env->eip += decode->len;
-}
-
-static void exec_dec(CPUX86State *env, struct x86_decode *decode)
-{
- decode->op[1].type = X86_VAR_IMMEDIATE;
- decode->op[1].val = 0;
-
- EXEC_2OP_FLAGS_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true);
- env->eip += decode->len;
-}
-
-static void exec_tst(CPUX86State *env, struct x86_decode *decode)
-{
- EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false);
- env->eip += decode->len;
-}
-
-static void exec_not(CPUX86State *env, struct x86_decode *decode)
-{
- fetch_operands(env, decode, 1, true, false, false);
-
- write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val,
- decode->operand_size);
- env->eip += decode->len;
-}
-
-void exec_movzx(CPUX86State *env, struct x86_decode *decode)
-{
- int src_op_size;
- int op_size = decode->operand_size;
-
- fetch_operands(env, decode, 1, false, false, false);
-
- if (0xb6 == decode->opcode[1]) {
- src_op_size = 1;
- } else {
- src_op_size = 2;
- }
- decode->operand_size = src_op_size;
- calc_modrm_operand(env, decode, &decode->op[1]);
- decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size);
- write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
-
- env->eip += decode->len;
-}
-
-static void exec_out(CPUX86State *env, struct x86_decode *decode)
-{
- switch (decode->opcode[0]) {
- case 0xe6:
- hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 1, 1, 1);
- break;
- case 0xe7:
- hvf_handle_io(env_cpu(env), decode->op[0].val, &RAX(env), 1,
- decode->operand_size, 1);
- break;
- case 0xee:
- hvf_handle_io(env_cpu(env), DX(env), &AL(env), 1, 1, 1);
- break;
- case 0xef:
- hvf_handle_io(env_cpu(env), DX(env), &RAX(env), 1,
- decode->operand_size, 1);
- break;
- default:
- VM_PANIC("Bad out opcode\n");
- break;
- }
- env->eip += decode->len;
-}
-
-static void exec_in(CPUX86State *env, struct x86_decode *decode)
-{
- target_ulong val = 0;
- switch (decode->opcode[0]) {
- case 0xe4:
- hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 0, 1, 1);
- break;
- case 0xe5:
- hvf_handle_io(env_cpu(env), decode->op[0].val, &val, 0,
- decode->operand_size, 1);
- if (decode->operand_size == 2) {
- AX(env) = val;
- } else {
- RAX(env) = (uint32_t)val;
- }
- break;
- case 0xec:
- hvf_handle_io(env_cpu(env), DX(env), &AL(env), 0, 1, 1);
- break;
- case 0xed:
- hvf_handle_io(env_cpu(env), DX(env), &val, 0, decode->operand_size, 1);
- if (decode->operand_size == 2) {
- AX(env) = val;
- } else {
- RAX(env) = (uint32_t)val;
- }
-
- break;
- default:
- VM_PANIC("Bad in opcode\n");
- break;
- }
-
- env->eip += decode->len;
-}
-
-static inline void string_increment_reg(CPUX86State *env, int reg,
- struct x86_decode *decode)
-{
- target_ulong val = read_reg(env, reg, decode->addressing_size);
- if (env->eflags & DF_MASK) {
- val -= decode->operand_size;
- } else {
- val += decode->operand_size;
- }
- write_reg(env, reg, val, decode->addressing_size);
-}
-
-static inline void string_rep(CPUX86State *env, struct x86_decode *decode,
- void (*func)(CPUX86State *env,
- struct x86_decode *ins), int rep)
-{
- target_ulong rcx = read_reg(env, R_ECX, decode->addressing_size);
- while (rcx--) {
- func(env, decode);
- write_reg(env, R_ECX, rcx, decode->addressing_size);
- if ((PREFIX_REP == rep) && !get_ZF(env)) {
- break;
- }
- if ((PREFIX_REPN == rep) && get_ZF(env)) {
- break;
- }
- }
-}
-
-static void exec_ins_single(CPUX86State *env, struct x86_decode *decode)
-{
- target_ulong addr = linear_addr_size(env_cpu(env), RDI(env),
- decode->addressing_size, R_ES);
-
- hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 0,
- decode->operand_size, 1);
- vmx_write_mem(env_cpu(env), addr, env->hvf_mmio_buf,
- decode->operand_size);
-
- string_increment_reg(env, R_EDI, decode);
-}
-
-static void exec_ins(CPUX86State *env, struct x86_decode *decode)
-{
- if (decode->rep) {
- string_rep(env, decode, exec_ins_single, 0);
- } else {
- exec_ins_single(env, decode);
- }
-
- env->eip += decode->len;
-}
-
-static void exec_outs_single(CPUX86State *env, struct x86_decode *decode)
-{
- target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS);
-
- vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, addr,
- decode->operand_size);
- hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 1,
- decode->operand_size, 1);
-
- string_increment_reg(env, R_ESI, decode);
-}
-
-static void exec_outs(CPUX86State *env, struct x86_decode *decode)
-{
- if (decode->rep) {
- string_rep(env, decode, exec_outs_single, 0);
- } else {
- exec_outs_single(env, decode);
- }
-
- env->eip += decode->len;
-}
-
-static void exec_movs_single(CPUX86State *env, struct x86_decode *decode)
-{
- target_ulong src_addr;
- target_ulong dst_addr;
- target_ulong val;
-
- src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
- dst_addr = linear_addr_size(env_cpu(env), RDI(env),
- decode->addressing_size, R_ES);
-
- val = read_val_ext(env, src_addr, decode->operand_size);
- write_val_ext(env, dst_addr, val, decode->operand_size);
-
- string_increment_reg(env, R_ESI, decode);
- string_increment_reg(env, R_EDI, decode);
-}
-
-static void exec_movs(CPUX86State *env, struct x86_decode *decode)
-{
- if (decode->rep) {
- string_rep(env, decode, exec_movs_single, 0);
- } else {
- exec_movs_single(env, decode);
- }
-
- env->eip += decode->len;
-}
-
-static void exec_cmps_single(CPUX86State *env, struct x86_decode *decode)
-{
- target_ulong src_addr;
- target_ulong dst_addr;
-
- src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
- dst_addr = linear_addr_size(env_cpu(env), RDI(env),
- decode->addressing_size, R_ES);
-
- decode->op[0].type = X86_VAR_IMMEDIATE;
- decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size);
- decode->op[1].type = X86_VAR_IMMEDIATE;
- decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size);
-
- EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
-
- string_increment_reg(env, R_ESI, decode);
- string_increment_reg(env, R_EDI, decode);
-}
-
-static void exec_cmps(CPUX86State *env, struct x86_decode *decode)
-{
- if (decode->rep) {
- string_rep(env, decode, exec_cmps_single, decode->rep);
- } else {
- exec_cmps_single(env, decode);
- }
- env->eip += decode->len;
-}
-
-
-static void exec_stos_single(CPUX86State *env, struct x86_decode *decode)
-{
- target_ulong addr;
- target_ulong val;
-
- addr = linear_addr_size(env_cpu(env), RDI(env),
- decode->addressing_size, R_ES);
- val = read_reg(env, R_EAX, decode->operand_size);
- vmx_write_mem(env_cpu(env), addr, &val, decode->operand_size);
-
- string_increment_reg(env, R_EDI, decode);
-}
-
-
-static void exec_stos(CPUX86State *env, struct x86_decode *decode)
-{
- if (decode->rep) {
- string_rep(env, decode, exec_stos_single, 0);
- } else {
- exec_stos_single(env, decode);
- }
-
- env->eip += decode->len;
-}
-
-static void exec_scas_single(CPUX86State *env, struct x86_decode *decode)
-{
- target_ulong addr;
-
- addr = linear_addr_size(env_cpu(env), RDI(env),
- decode->addressing_size, R_ES);
- decode->op[1].type = X86_VAR_IMMEDIATE;
- vmx_read_mem(env_cpu(env), &decode->op[1].val, addr, decode->operand_size);
-
- EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
- string_increment_reg(env, R_EDI, decode);
-}
-
-static void exec_scas(CPUX86State *env, struct x86_decode *decode)
-{
- decode->op[0].type = X86_VAR_REG;
- decode->op[0].reg = R_EAX;
- if (decode->rep) {
- string_rep(env, decode, exec_scas_single, decode->rep);
- } else {
- exec_scas_single(env, decode);
- }
-
- env->eip += decode->len;
-}
-
-static void exec_lods_single(CPUX86State *env, struct x86_decode *decode)
-{
- target_ulong addr;
- target_ulong val = 0;
-
- addr = decode_linear_addr(env, decode, RSI(env), R_DS);
- vmx_read_mem(env_cpu(env), &val, addr, decode->operand_size);
- write_reg(env, R_EAX, val, decode->operand_size);
-
- string_increment_reg(env, R_ESI, decode);
-}
-
-static void exec_lods(CPUX86State *env, struct x86_decode *decode)
-{
- if (decode->rep) {
- string_rep(env, decode, exec_lods_single, 0);
- } else {
- exec_lods_single(env, decode);
- }
-
- env->eip += decode->len;
-}
-
-void simulate_rdmsr(CPUX86State *env)
-{
- X86CPU *cpu = env_archcpu(env);
- CPUState *cs = env_cpu(env);
- uint32_t msr = ECX(env);
- uint64_t val = 0;
-
- switch (msr) {
- case MSR_IA32_TSC:
- val = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET);
- break;
- case MSR_IA32_APICBASE:
- val = cpu_get_apic_base(cpu->apic_state);
- break;
- case MSR_IA32_UCODE_REV:
- val = cpu->ucode_rev;
- break;
- case MSR_EFER:
- val = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER);
- break;
- case MSR_FSBASE:
- val = rvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE);
- break;
- case MSR_GSBASE:
- val = rvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE);
- break;
- case MSR_KERNELGSBASE:
- val = rvmcs(cs->accel->fd, VMCS_HOST_FS_BASE);
- break;
- case MSR_STAR:
- abort();
- break;
- case MSR_LSTAR:
- abort();
- break;
- case MSR_CSTAR:
- abort();
- break;
- case MSR_IA32_MISC_ENABLE:
- val = env->msr_ia32_misc_enable;
- break;
- case MSR_MTRRphysBase(0):
- case MSR_MTRRphysBase(1):
- case MSR_MTRRphysBase(2):
- case MSR_MTRRphysBase(3):
- case MSR_MTRRphysBase(4):
- case MSR_MTRRphysBase(5):
- case MSR_MTRRphysBase(6):
- case MSR_MTRRphysBase(7):
- val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base;
- break;
- case MSR_MTRRphysMask(0):
- case MSR_MTRRphysMask(1):
- case MSR_MTRRphysMask(2):
- case MSR_MTRRphysMask(3):
- case MSR_MTRRphysMask(4):
- case MSR_MTRRphysMask(5):
- case MSR_MTRRphysMask(6):
- case MSR_MTRRphysMask(7):
- val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask;
- break;
- case MSR_MTRRfix64K_00000:
- val = env->mtrr_fixed[0];
- break;
- case MSR_MTRRfix16K_80000:
- case MSR_MTRRfix16K_A0000:
- val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1];
- break;
- case MSR_MTRRfix4K_C0000:
- case MSR_MTRRfix4K_C8000:
- case MSR_MTRRfix4K_D0000:
- case MSR_MTRRfix4K_D8000:
- case MSR_MTRRfix4K_E0000:
- case MSR_MTRRfix4K_E8000:
- case MSR_MTRRfix4K_F0000:
- case MSR_MTRRfix4K_F8000:
- val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3];
- break;
- case MSR_MTRRdefType:
- val = env->mtrr_deftype;
- break;
- case MSR_CORE_THREAD_COUNT:
- val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */
- val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */
- break;
- default:
- /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */
- val = 0;
- break;
- }
-
- RAX(env) = (uint32_t)val;
- RDX(env) = (uint32_t)(val >> 32);
-}
-
-static void exec_rdmsr(CPUX86State *env, struct x86_decode *decode)
-{
- simulate_rdmsr(env);
- env->eip += decode->len;
-}
-
-void simulate_wrmsr(CPUX86State *env)
-{
- X86CPU *cpu = env_archcpu(env);
- CPUState *cs = env_cpu(env);
- uint32_t msr = ECX(env);
- uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env);
-
- switch (msr) {
- case MSR_IA32_TSC:
- break;
- case MSR_IA32_APICBASE:
- cpu_set_apic_base(cpu->apic_state, data);
- break;
- case MSR_FSBASE:
- wvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE, data);
- break;
- case MSR_GSBASE:
- wvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE, data);
- break;
- case MSR_KERNELGSBASE:
- wvmcs(cs->accel->fd, VMCS_HOST_FS_BASE, data);
- break;
- case MSR_STAR:
- abort();
- break;
- case MSR_LSTAR:
- abort();
- break;
- case MSR_CSTAR:
- abort();
- break;
- case MSR_EFER:
- /*printf("new efer %llx\n", EFER(cs));*/
- wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, data);
- if (data & MSR_EFER_NXE) {
- hv_vcpu_invalidate_tlb(cs->accel->fd);
- }
- break;
- case MSR_MTRRphysBase(0):
- case MSR_MTRRphysBase(1):
- case MSR_MTRRphysBase(2):
- case MSR_MTRRphysBase(3):
- case MSR_MTRRphysBase(4):
- case MSR_MTRRphysBase(5):
- case MSR_MTRRphysBase(6):
- case MSR_MTRRphysBase(7):
- env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data;
- break;
- case MSR_MTRRphysMask(0):
- case MSR_MTRRphysMask(1):
- case MSR_MTRRphysMask(2):
- case MSR_MTRRphysMask(3):
- case MSR_MTRRphysMask(4):
- case MSR_MTRRphysMask(5):
- case MSR_MTRRphysMask(6):
- case MSR_MTRRphysMask(7):
- env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data;
- break;
- case MSR_MTRRfix64K_00000:
- env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data;
- break;
- case MSR_MTRRfix16K_80000:
- case MSR_MTRRfix16K_A0000:
- env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data;
- break;
- case MSR_MTRRfix4K_C0000:
- case MSR_MTRRfix4K_C8000:
- case MSR_MTRRfix4K_D0000:
- case MSR_MTRRfix4K_D8000:
- case MSR_MTRRfix4K_E0000:
- case MSR_MTRRfix4K_E8000:
- case MSR_MTRRfix4K_F0000:
- case MSR_MTRRfix4K_F8000:
- env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data;
- break;
- case MSR_MTRRdefType:
- env->mtrr_deftype = data;
- break;
- default:
- break;
- }
-
- /* Related to support known hypervisor interface */
- /* if (g_hypervisor_iface)
- g_hypervisor_iface->wrmsr_handler(cs, msr, data);
-
- printf("write msr %llx\n", RCX(cs));*/
-}
-
-static void exec_wrmsr(CPUX86State *env, struct x86_decode *decode)
-{
- simulate_wrmsr(env);
- env->eip += decode->len;
-}
-
-/*
- * flag:
- * 0 - bt, 1 - btc, 2 - bts, 3 - btr
- */
-static void do_bt(CPUX86State *env, struct x86_decode *decode, int flag)
-{
- int32_t displacement;
- uint8_t index;
- bool cf;
- int mask = (4 == decode->operand_size) ? 0x1f : 0xf;
-
- VM_PANIC_ON(decode->rex.rex);
-
- fetch_operands(env, decode, 2, false, true, false);
- index = decode->op[1].val & mask;
-
- if (decode->op[0].type != X86_VAR_REG) {
- if (4 == decode->operand_size) {
- displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32;
- decode->op[0].ptr += 4 * displacement;
- } else if (2 == decode->operand_size) {
- displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16;
- decode->op[0].ptr += 2 * displacement;
- } else {
- VM_PANIC("bt 64bit\n");
- }
- }
- decode->op[0].val = read_val_ext(env, decode->op[0].ptr,
- decode->operand_size);
- cf = (decode->op[0].val >> index) & 0x01;
-
- switch (flag) {
- case 0:
- set_CF(env, cf);
- return;
- case 1:
- decode->op[0].val ^= (1u << index);
- break;
- case 2:
- decode->op[0].val |= (1u << index);
- break;
- case 3:
- decode->op[0].val &= ~(1u << index);
- break;
- }
- write_val_ext(env, decode->op[0].ptr, decode->op[0].val,
- decode->operand_size);
- set_CF(env, cf);
-}
-
-static void exec_bt(CPUX86State *env, struct x86_decode *decode)
-{
- do_bt(env, decode, 0);
- env->eip += decode->len;
-}
-
-static void exec_btc(CPUX86State *env, struct x86_decode *decode)
-{
- do_bt(env, decode, 1);
- env->eip += decode->len;
-}
-
-static void exec_btr(CPUX86State *env, struct x86_decode *decode)
-{
- do_bt(env, decode, 3);
- env->eip += decode->len;
-}
-
-static void exec_bts(CPUX86State *env, struct x86_decode *decode)
-{
- do_bt(env, decode, 2);
- env->eip += decode->len;
-}
-
-void exec_shl(CPUX86State *env, struct x86_decode *decode)
-{
- uint8_t count;
- int of = 0, cf = 0;
-
- fetch_operands(env, decode, 2, true, true, false);
-
- count = decode->op[1].val;
- count &= 0x1f; /* count is masked to 5 bits*/
- if (!count) {
- goto exit;
- }
-
- switch (decode->operand_size) {
- case 1:
- {
- uint8_t res = 0;
- if (count <= 8) {
- res = (decode->op[0].val << count);
- cf = (decode->op[0].val >> (8 - count)) & 0x1;
- of = cf ^ (res >> 7);
- }
-
- write_val_ext(env, decode->op[0].ptr, res, 1);
- SET_FLAGS_OSZAPC_LOGIC8(env, 0, 0, res);
- SET_FLAGS_OxxxxC(env, of, cf);
- break;
- }
- case 2:
- {
- uint16_t res = 0;
-
- /* from bochs */
- if (count <= 16) {
- res = (decode->op[0].val << count);
- cf = (decode->op[0].val >> (16 - count)) & 0x1;
- of = cf ^ (res >> 15); /* of = cf ^ result15 */
- }
-
- write_val_ext(env, decode->op[0].ptr, res, 2);
- SET_FLAGS_OSZAPC_LOGIC16(env, 0, 0, res);
- SET_FLAGS_OxxxxC(env, of, cf);
- break;
- }
- case 4:
- {
- uint32_t res = decode->op[0].val << count;
-
- write_val_ext(env, decode->op[0].ptr, res, 4);
- SET_FLAGS_OSZAPC_LOGIC32(env, 0, 0, res);
- cf = (decode->op[0].val >> (32 - count)) & 0x1;
- of = cf ^ (res >> 31); /* of = cf ^ result31 */
- SET_FLAGS_OxxxxC(env, of, cf);
- break;
- }
- default:
- abort();
- }
-
-exit:
- /* lflags_to_rflags(env); */
- env->eip += decode->len;
-}
-
-void exec_movsx(CPUX86State *env, struct x86_decode *decode)
-{
- int src_op_size;
- int op_size = decode->operand_size;
-
- fetch_operands(env, decode, 2, false, false, false);
-
- if (0xbe == decode->opcode[1]) {
- src_op_size = 1;
- } else {
- src_op_size = 2;
- }
-
- decode->operand_size = src_op_size;
- calc_modrm_operand(env, decode, &decode->op[1]);
- decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size),
- src_op_size);
-
- write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
-
- env->eip += decode->len;
-}
-
-void exec_ror(CPUX86State *env, struct x86_decode *decode)
-{
- uint8_t count;
-
- fetch_operands(env, decode, 2, true, true, false);
- count = decode->op[1].val;
-
- switch (decode->operand_size) {
- case 1:
- {
- uint32_t bit6, bit7;
- uint8_t res;
-
- if ((count & 0x07) == 0) {
- if (count & 0x18) {
- bit6 = ((uint8_t)decode->op[0].val >> 6) & 1;
- bit7 = ((uint8_t)decode->op[0].val >> 7) & 1;
- SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
- }
- } else {
- count &= 0x7; /* use only bottom 3 bits */
- res = ((uint8_t)decode->op[0].val >> count) |
- ((uint8_t)decode->op[0].val << (8 - count));
- write_val_ext(env, decode->op[0].ptr, res, 1);
- bit6 = (res >> 6) & 1;
- bit7 = (res >> 7) & 1;
- /* set eflags: ROR count affects the following flags: C, O */
- SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
- }
- break;
- }
- case 2:
- {
- uint32_t bit14, bit15;
- uint16_t res;
-
- if ((count & 0x0f) == 0) {
- if (count & 0x10) {
- bit14 = ((uint16_t)decode->op[0].val >> 14) & 1;
- bit15 = ((uint16_t)decode->op[0].val >> 15) & 1;
- /* of = result14 ^ result15 */
- SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
- }
- } else {
- count &= 0x0f; /* use only 4 LSB's */
- res = ((uint16_t)decode->op[0].val >> count) |
- ((uint16_t)decode->op[0].val << (16 - count));
- write_val_ext(env, decode->op[0].ptr, res, 2);
-
- bit14 = (res >> 14) & 1;
- bit15 = (res >> 15) & 1;
- /* of = result14 ^ result15 */
- SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
- }
- break;
- }
- case 4:
- {
- uint32_t bit31, bit30;
- uint32_t res;
-
- count &= 0x1f;
- if (count) {
- res = ((uint32_t)decode->op[0].val >> count) |
- ((uint32_t)decode->op[0].val << (32 - count));
- write_val_ext(env, decode->op[0].ptr, res, 4);
-
- bit31 = (res >> 31) & 1;
- bit30 = (res >> 30) & 1;
- /* of = result30 ^ result31 */
- SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31);
- }
- break;
- }
- }
- env->eip += decode->len;
-}
-
-void exec_rol(CPUX86State *env, struct x86_decode *decode)
-{
- uint8_t count;
-
- fetch_operands(env, decode, 2, true, true, false);
- count = decode->op[1].val;
-
- switch (decode->operand_size) {
- case 1:
- {
- uint32_t bit0, bit7;
- uint8_t res;
-
- if ((count & 0x07) == 0) {
- if (count & 0x18) {
- bit0 = ((uint8_t)decode->op[0].val & 1);
- bit7 = ((uint8_t)decode->op[0].val >> 7);
- SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
- }
- } else {
- count &= 0x7; /* use only lowest 3 bits */
- res = ((uint8_t)decode->op[0].val << count) |
- ((uint8_t)decode->op[0].val >> (8 - count));
-
- write_val_ext(env, decode->op[0].ptr, res, 1);
- /* set eflags:
- * ROL count affects the following flags: C, O
- */
- bit0 = (res & 1);
- bit7 = (res >> 7);
- SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
- }
- break;
- }
- case 2:
- {
- uint32_t bit0, bit15;
- uint16_t res;
-
- if ((count & 0x0f) == 0) {
- if (count & 0x10) {
- bit0 = ((uint16_t)decode->op[0].val & 0x1);
- bit15 = ((uint16_t)decode->op[0].val >> 15);
- /* of = cf ^ result15 */
- SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
- }
- } else {
- count &= 0x0f; /* only use bottom 4 bits */
- res = ((uint16_t)decode->op[0].val << count) |
- ((uint16_t)decode->op[0].val >> (16 - count));
-
- write_val_ext(env, decode->op[0].ptr, res, 2);
- bit0 = (res & 0x1);
- bit15 = (res >> 15);
- /* of = cf ^ result15 */
- SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
- }
- break;
- }
- case 4:
- {
- uint32_t bit0, bit31;
- uint32_t res;
-
- count &= 0x1f;
- if (count) {
- res = ((uint32_t)decode->op[0].val << count) |
- ((uint32_t)decode->op[0].val >> (32 - count));
-
- write_val_ext(env, decode->op[0].ptr, res, 4);
- bit0 = (res & 0x1);
- bit31 = (res >> 31);
- /* of = cf ^ result31 */
- SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0);
- }
- break;
- }
- }
- env->eip += decode->len;
-}
-
-
-void exec_rcl(CPUX86State *env, struct x86_decode *decode)
-{
- uint8_t count;
- int of = 0, cf = 0;
-
- fetch_operands(env, decode, 2, true, true, false);
- count = decode->op[1].val & 0x1f;
-
- switch (decode->operand_size) {
- case 1:
- {
- uint8_t op1_8 = decode->op[0].val;
- uint8_t res;
- count %= 9;
- if (!count) {
- break;
- }
-
- if (1 == count) {
- res = (op1_8 << 1) | get_CF(env);
- } else {
- res = (op1_8 << count) | (get_CF(env) << (count - 1)) |
- (op1_8 >> (9 - count));
- }
-
- write_val_ext(env, decode->op[0].ptr, res, 1);
-
- cf = (op1_8 >> (8 - count)) & 0x01;
- of = cf ^ (res >> 7); /* of = cf ^ result7 */
- SET_FLAGS_OxxxxC(env, of, cf);
- break;
- }
- case 2:
- {
- uint16_t res;
- uint16_t op1_16 = decode->op[0].val;
-
- count %= 17;
- if (!count) {
- break;
- }
-
- if (1 == count) {
- res = (op1_16 << 1) | get_CF(env);
- } else if (count == 16) {
- res = (get_CF(env) << 15) | (op1_16 >> 1);
- } else { /* 2..15 */
- res = (op1_16 << count) | (get_CF(env) << (count - 1)) |
- (op1_16 >> (17 - count));
- }
-
- write_val_ext(env, decode->op[0].ptr, res, 2);
-
- cf = (op1_16 >> (16 - count)) & 0x1;
- of = cf ^ (res >> 15); /* of = cf ^ result15 */
- SET_FLAGS_OxxxxC(env, of, cf);
- break;
- }
- case 4:
- {
- uint32_t res;
- uint32_t op1_32 = decode->op[0].val;
-
- if (!count) {
- break;
- }
-
- if (1 == count) {
- res = (op1_32 << 1) | get_CF(env);
- } else {
- res = (op1_32 << count) | (get_CF(env) << (count - 1)) |
- (op1_32 >> (33 - count));
- }
-
- write_val_ext(env, decode->op[0].ptr, res, 4);
-
- cf = (op1_32 >> (32 - count)) & 0x1;
- of = cf ^ (res >> 31); /* of = cf ^ result31 */
- SET_FLAGS_OxxxxC(env, of, cf);
- break;
- }
- }
- env->eip += decode->len;
-}
-
-void exec_rcr(CPUX86State *env, struct x86_decode *decode)
-{
- uint8_t count;
- int of = 0, cf = 0;
-
- fetch_operands(env, decode, 2, true, true, false);
- count = decode->op[1].val & 0x1f;
-
- switch (decode->operand_size) {
- case 1:
- {
- uint8_t op1_8 = decode->op[0].val;
- uint8_t res;
-
- count %= 9;
- if (!count) {
- break;
- }
- res = (op1_8 >> count) | (get_CF(env) << (8 - count)) |
- (op1_8 << (9 - count));
-
- write_val_ext(env, decode->op[0].ptr, res, 1);
-
- cf = (op1_8 >> (count - 1)) & 0x1;
- of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */
- SET_FLAGS_OxxxxC(env, of, cf);
- break;
- }
- case 2:
- {
- uint16_t op1_16 = decode->op[0].val;
- uint16_t res;
-
- count %= 17;
- if (!count) {
- break;
- }
- res = (op1_16 >> count) | (get_CF(env) << (16 - count)) |
- (op1_16 << (17 - count));
-
- write_val_ext(env, decode->op[0].ptr, res, 2);
-
- cf = (op1_16 >> (count - 1)) & 0x1;
- of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^
- result14 */
- SET_FLAGS_OxxxxC(env, of, cf);
- break;
- }
- case 4:
- {
- uint32_t res;
- uint32_t op1_32 = decode->op[0].val;
-
- if (!count) {
- break;
- }
-
- if (1 == count) {
- res = (op1_32 >> 1) | (get_CF(env) << 31);
- } else {
- res = (op1_32 >> count) | (get_CF(env) << (32 - count)) |
- (op1_32 << (33 - count));
- }
-
- write_val_ext(env, decode->op[0].ptr, res, 4);
-
- cf = (op1_32 >> (count - 1)) & 0x1;
- of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */
- SET_FLAGS_OxxxxC(env, of, cf);
- break;
- }
- }
- env->eip += decode->len;
-}
-
-static void exec_xchg(CPUX86State *env, struct x86_decode *decode)
-{
- fetch_operands(env, decode, 2, true, true, false);
-
- write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
- decode->operand_size);
- write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
- decode->operand_size);
-
- env->eip += decode->len;
-}
-
-static void exec_xadd(CPUX86State *env, struct x86_decode *decode)
-{
- EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
- write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
- decode->operand_size);
-
- env->eip += decode->len;
-}
-
-static struct cmd_handler {
- enum x86_decode_cmd cmd;
- void (*handler)(CPUX86State *env, struct x86_decode *ins);
-} handlers[] = {
- {X86_DECODE_CMD_INVL, NULL,},
- {X86_DECODE_CMD_MOV, exec_mov},
- {X86_DECODE_CMD_ADD, exec_add},
- {X86_DECODE_CMD_OR, exec_or},
- {X86_DECODE_CMD_ADC, exec_adc},
- {X86_DECODE_CMD_SBB, exec_sbb},
- {X86_DECODE_CMD_AND, exec_and},
- {X86_DECODE_CMD_SUB, exec_sub},
- {X86_DECODE_CMD_NEG, exec_neg},
- {X86_DECODE_CMD_XOR, exec_xor},
- {X86_DECODE_CMD_CMP, exec_cmp},
- {X86_DECODE_CMD_INC, exec_inc},
- {X86_DECODE_CMD_DEC, exec_dec},
- {X86_DECODE_CMD_TST, exec_tst},
- {X86_DECODE_CMD_NOT, exec_not},
- {X86_DECODE_CMD_MOVZX, exec_movzx},
- {X86_DECODE_CMD_OUT, exec_out},
- {X86_DECODE_CMD_IN, exec_in},
- {X86_DECODE_CMD_INS, exec_ins},
- {X86_DECODE_CMD_OUTS, exec_outs},
- {X86_DECODE_CMD_RDMSR, exec_rdmsr},
- {X86_DECODE_CMD_WRMSR, exec_wrmsr},
- {X86_DECODE_CMD_BT, exec_bt},
- {X86_DECODE_CMD_BTR, exec_btr},
- {X86_DECODE_CMD_BTC, exec_btc},
- {X86_DECODE_CMD_BTS, exec_bts},
- {X86_DECODE_CMD_SHL, exec_shl},
- {X86_DECODE_CMD_ROL, exec_rol},
- {X86_DECODE_CMD_ROR, exec_ror},
- {X86_DECODE_CMD_RCR, exec_rcr},
- {X86_DECODE_CMD_RCL, exec_rcl},
- /*{X86_DECODE_CMD_CPUID, exec_cpuid},*/
- {X86_DECODE_CMD_MOVS, exec_movs},
- {X86_DECODE_CMD_CMPS, exec_cmps},
- {X86_DECODE_CMD_STOS, exec_stos},
- {X86_DECODE_CMD_SCAS, exec_scas},
- {X86_DECODE_CMD_LODS, exec_lods},
- {X86_DECODE_CMD_MOVSX, exec_movsx},
- {X86_DECODE_CMD_XCHG, exec_xchg},
- {X86_DECODE_CMD_XADD, exec_xadd},
-};
-
-static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST];
-
-static void init_cmd_handler(void)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(handlers); i++) {
- _cmd_handler[handlers[i].cmd] = handlers[i];
- }
-}
-
-void load_regs(CPUState *cs)
-{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
-
- int i = 0;
- RRX(env, R_EAX) = rreg(cs->accel->fd, HV_X86_RAX);
- RRX(env, R_EBX) = rreg(cs->accel->fd, HV_X86_RBX);
- RRX(env, R_ECX) = rreg(cs->accel->fd, HV_X86_RCX);
- RRX(env, R_EDX) = rreg(cs->accel->fd, HV_X86_RDX);
- RRX(env, R_ESI) = rreg(cs->accel->fd, HV_X86_RSI);
- RRX(env, R_EDI) = rreg(cs->accel->fd, HV_X86_RDI);
- RRX(env, R_ESP) = rreg(cs->accel->fd, HV_X86_RSP);
- RRX(env, R_EBP) = rreg(cs->accel->fd, HV_X86_RBP);
- for (i = 8; i < 16; i++) {
- RRX(env, i) = rreg(cs->accel->fd, HV_X86_RAX + i);
- }
-
- env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS);
- rflags_to_lflags(env);
- env->eip = rreg(cs->accel->fd, HV_X86_RIP);
-}
-
-void store_regs(CPUState *cs)
-{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
-
- int i = 0;
- wreg(cs->accel->fd, HV_X86_RAX, RAX(env));
- wreg(cs->accel->fd, HV_X86_RBX, RBX(env));
- wreg(cs->accel->fd, HV_X86_RCX, RCX(env));
- wreg(cs->accel->fd, HV_X86_RDX, RDX(env));
- wreg(cs->accel->fd, HV_X86_RSI, RSI(env));
- wreg(cs->accel->fd, HV_X86_RDI, RDI(env));
- wreg(cs->accel->fd, HV_X86_RBP, RBP(env));
- wreg(cs->accel->fd, HV_X86_RSP, RSP(env));
- for (i = 8; i < 16; i++) {
- wreg(cs->accel->fd, HV_X86_RAX + i, RRX(env, i));
- }
-
- lflags_to_rflags(env);
- wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags);
- macvm_set_rip(cs, env->eip);
-}
-
-bool exec_instruction(CPUX86State *env, struct x86_decode *ins)
-{
- /*if (hvf_vcpu_id(cs))
- printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cs), env->eip,
- decode_cmd_to_string(ins->cmd));*/
-
- if (!_cmd_handler[ins->cmd].handler) {
- printf("Unimplemented handler (%llx) for %d (%x %x) \n", env->eip,
- ins->cmd, ins->opcode[0],
- ins->opcode_len > 1 ? ins->opcode[1] : 0);
- env->eip += ins->len;
- return true;
- }
-
- _cmd_handler[ins->cmd].handler(env, ins);
- return true;
-}
-
-void init_emu(void)
-{
- init_cmd_handler();
-}
diff --git a/target/i386/hvf/x86_emu.h b/target/i386/hvf/x86_emu.h
deleted file mode 100644
index 8bd9760..0000000
--- a/target/i386/hvf/x86_emu.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2016 Veertu Inc,
- * Copyright (C) 2017 Google Inc,
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef X86_EMU_H
-#define X86_EMU_H
-
-#include "x86.h"
-#include "x86_decode.h"
-#include "cpu.h"
-
-void init_emu(void);
-bool exec_instruction(CPUX86State *env, struct x86_decode *ins);
-
-void load_regs(CPUState *cpu);
-void store_regs(CPUState *cpu);
-
-void simulate_rdmsr(CPUX86State *env);
-void simulate_wrmsr(CPUX86State *env);
-
-target_ulong read_reg(CPUX86State *env, int reg, int size);
-void write_reg(CPUX86State *env, int reg, target_ulong val, int size);
-target_ulong read_val_from_reg(target_ulong reg_ptr, int size);
-void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size);
-void write_val_ext(CPUX86State *env, target_ulong ptr, target_ulong val, int size);
-uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes);
-target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size);
-
-void exec_movzx(CPUX86State *env, struct x86_decode *decode);
-void exec_shl(CPUX86State *env, struct x86_decode *decode);
-void exec_movsx(CPUX86State *env, struct x86_decode *decode);
-void exec_ror(CPUX86State *env, struct x86_decode *decode);
-void exec_rol(CPUX86State *env, struct x86_decode *decode);
-void exec_rcl(CPUX86State *env, struct x86_decode *decode);
-void exec_rcr(CPUX86State *env, struct x86_decode *decode);
-#endif
diff --git a/target/i386/hvf/x86_flags.c b/target/i386/hvf/x86_flags.c
deleted file mode 100644
index 03d6de5..0000000
--- a/target/i386/hvf/x86_flags.c
+++ /dev/null
@@ -1,313 +0,0 @@
-/////////////////////////////////////////////////////////////////////////
-//
-// Copyright (C) 2001-2012 The Bochs Project
-// Copyright (C) 2017 Google Inc.
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
-/////////////////////////////////////////////////////////////////////////
-/*
- * flags functions
- */
-
-#include "qemu/osdep.h"
-
-#include "panic.h"
-#include "cpu.h"
-#include "x86_flags.h"
-#include "x86.h"
-
-
-/* this is basically bocsh code */
-
-#define LF_SIGN_BIT 31
-
-#define LF_BIT_SD (0) /* lazy Sign Flag Delta */
-#define LF_BIT_AF (3) /* lazy Adjust flag */
-#define LF_BIT_PDB (8) /* lazy Parity Delta Byte (8 bits) */
-#define LF_BIT_CF (31) /* lazy Carry Flag */
-#define LF_BIT_PO (30) /* lazy Partial Overflow = CF ^ OF */
-
-#define LF_MASK_SD (0x01 << LF_BIT_SD)
-#define LF_MASK_AF (0x01 << LF_BIT_AF)
-#define LF_MASK_PDB (0xFF << LF_BIT_PDB)
-#define LF_MASK_CF (0x01 << LF_BIT_CF)
-#define LF_MASK_PO (0x01 << LF_BIT_PO)
-
-#define ADD_COUT_VEC(op1, op2, result) \
- (((op1) & (op2)) | (((op1) | (op2)) & (~(result))))
-
-#define SUB_COUT_VEC(op1, op2, result) \
- (((~(op1)) & (op2)) | (((~(op1)) ^ (op2)) & (result)))
-
-#define GET_ADD_OVERFLOW(op1, op2, result, mask) \
- ((((op1) ^ (result)) & ((op2) ^ (result))) & (mask))
-
-/* ******************* */
-/* OSZAPC */
-/* ******************* */
-
-/* size, carries, result */
-#define SET_FLAGS_OSZAPC_SIZE(size, lf_carries, lf_result) { \
- target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \
- (((lf_carries) >> (size - 2)) << LF_BIT_PO); \
- env->hvf_lflags.result = (target_ulong)(int##size##_t)(lf_result); \
- if ((size) == 32) { \
- temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
- } else if ((size) == 16) { \
- temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \
- } else if ((size) == 8) { \
- temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \
- } else { \
- VM_PANIC("unimplemented"); \
- } \
- env->hvf_lflags.auxbits = (target_ulong)(uint32_t)temp; \
-}
-
-/* carries, result */
-#define SET_FLAGS_OSZAPC_8(carries, result) \
- SET_FLAGS_OSZAPC_SIZE(8, carries, result)
-#define SET_FLAGS_OSZAPC_16(carries, result) \
- SET_FLAGS_OSZAPC_SIZE(16, carries, result)
-#define SET_FLAGS_OSZAPC_32(carries, result) \
- SET_FLAGS_OSZAPC_SIZE(32, carries, result)
-
-/* ******************* */
-/* OSZAP */
-/* ******************* */
-/* size, carries, result */
-#define SET_FLAGS_OSZAP_SIZE(size, lf_carries, lf_result) { \
- target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \
- (((lf_carries) >> (size - 2)) << LF_BIT_PO); \
- if ((size) == 32) { \
- temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
- } else if ((size) == 16) { \
- temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \
- } else if ((size) == 8) { \
- temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \
- } else { \
- VM_PANIC("unimplemented"); \
- } \
- env->hvf_lflags.result = (target_ulong)(int##size##_t)(lf_result); \
- target_ulong delta_c = (env->hvf_lflags.auxbits ^ temp) & LF_MASK_CF; \
- delta_c ^= (delta_c >> 1); \
- env->hvf_lflags.auxbits = (target_ulong)(uint32_t)(temp ^ delta_c); \
-}
-
-/* carries, result */
-#define SET_FLAGS_OSZAP_8(carries, result) \
- SET_FLAGS_OSZAP_SIZE(8, carries, result)
-#define SET_FLAGS_OSZAP_16(carries, result) \
- SET_FLAGS_OSZAP_SIZE(16, carries, result)
-#define SET_FLAGS_OSZAP_32(carries, result) \
- SET_FLAGS_OSZAP_SIZE(32, carries, result)
-
-void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf)
-{
- uint32_t temp_po = new_of ^ new_cf;
- env->hvf_lflags.auxbits &= ~(LF_MASK_PO | LF_MASK_CF);
- env->hvf_lflags.auxbits |= (temp_po << LF_BIT_PO) | (new_cf << LF_BIT_CF);
-}
-
-void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
- uint32_t diff)
-{
- SET_FLAGS_OSZAPC_32(SUB_COUT_VEC(v1, v2, diff), diff);
-}
-
-void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
- uint16_t diff)
-{
- SET_FLAGS_OSZAPC_16(SUB_COUT_VEC(v1, v2, diff), diff);
-}
-
-void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
- uint8_t diff)
-{
- SET_FLAGS_OSZAPC_8(SUB_COUT_VEC(v1, v2, diff), diff);
-}
-
-void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
- uint32_t diff)
-{
- SET_FLAGS_OSZAPC_32(ADD_COUT_VEC(v1, v2, diff), diff);
-}
-
-void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
- uint16_t diff)
-{
- SET_FLAGS_OSZAPC_16(ADD_COUT_VEC(v1, v2, diff), diff);
-}
-
-void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
- uint8_t diff)
-{
- SET_FLAGS_OSZAPC_8(ADD_COUT_VEC(v1, v2, diff), diff);
-}
-
-void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
- uint32_t diff)
-{
- SET_FLAGS_OSZAP_32(SUB_COUT_VEC(v1, v2, diff), diff);
-}
-
-void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
- uint16_t diff)
-{
- SET_FLAGS_OSZAP_16(SUB_COUT_VEC(v1, v2, diff), diff);
-}
-
-void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
- uint8_t diff)
-{
- SET_FLAGS_OSZAP_8(SUB_COUT_VEC(v1, v2, diff), diff);
-}
-
-void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
- uint32_t diff)
-{
- SET_FLAGS_OSZAP_32(ADD_COUT_VEC(v1, v2, diff), diff);
-}
-
-void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
- uint16_t diff)
-{
- SET_FLAGS_OSZAP_16(ADD_COUT_VEC(v1, v2, diff), diff);
-}
-
-void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
- uint8_t diff)
-{
- SET_FLAGS_OSZAP_8(ADD_COUT_VEC(v1, v2, diff), diff);
-}
-
-
-void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t v1, uint32_t v2,
- uint32_t diff)
-{
- SET_FLAGS_OSZAPC_32(0, diff);
-}
-
-void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t v1, uint16_t v2,
- uint16_t diff)
-{
- SET_FLAGS_OSZAPC_16(0, diff);
-}
-
-void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2,
- uint8_t diff)
-{
- SET_FLAGS_OSZAPC_8(0, diff);
-}
-
-bool get_PF(CPUX86State *env)
-{
- uint32_t temp = (255 & env->hvf_lflags.result);
- temp = temp ^ (255 & (env->hvf_lflags.auxbits >> LF_BIT_PDB));
- temp = (temp ^ (temp >> 4)) & 0x0F;
- return (0x9669U >> temp) & 1;
-}
-
-void set_PF(CPUX86State *env, bool val)
-{
- uint32_t temp = (255 & env->hvf_lflags.result) ^ (!val);
- env->hvf_lflags.auxbits &= ~(LF_MASK_PDB);
- env->hvf_lflags.auxbits |= (temp << LF_BIT_PDB);
-}
-
-bool get_OF(CPUX86State *env)
-{
- return ((env->hvf_lflags.auxbits + (1U << LF_BIT_PO)) >> LF_BIT_CF) & 1;
-}
-
-bool get_CF(CPUX86State *env)
-{
- return (env->hvf_lflags.auxbits >> LF_BIT_CF) & 1;
-}
-
-void set_OF(CPUX86State *env, bool val)
-{
- bool old_cf = get_CF(env);
- SET_FLAGS_OxxxxC(env, val, old_cf);
-}
-
-void set_CF(CPUX86State *env, bool val)
-{
- bool old_of = get_OF(env);
- SET_FLAGS_OxxxxC(env, old_of, val);
-}
-
-bool get_AF(CPUX86State *env)
-{
- return (env->hvf_lflags.auxbits >> LF_BIT_AF) & 1;
-}
-
-void set_AF(CPUX86State *env, bool val)
-{
- env->hvf_lflags.auxbits &= ~(LF_MASK_AF);
- env->hvf_lflags.auxbits |= val << LF_BIT_AF;
-}
-
-bool get_ZF(CPUX86State *env)
-{
- return !env->hvf_lflags.result;
-}
-
-void set_ZF(CPUX86State *env, bool val)
-{
- if (val) {
- env->hvf_lflags.auxbits ^=
- (((env->hvf_lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD);
- /* merge the parity bits into the Parity Delta Byte */
- uint32_t temp_pdb = (255 & env->hvf_lflags.result);
- env->hvf_lflags.auxbits ^= (temp_pdb << LF_BIT_PDB);
- /* now zero the .result value */
- env->hvf_lflags.result = 0;
- } else {
- env->hvf_lflags.result |= (1 << 8);
- }
-}
-
-bool get_SF(CPUX86State *env)
-{
- return ((env->hvf_lflags.result >> LF_SIGN_BIT) ^
- (env->hvf_lflags.auxbits >> LF_BIT_SD)) & 1;
-}
-
-void set_SF(CPUX86State *env, bool val)
-{
- bool temp_sf = get_SF(env);
- env->hvf_lflags.auxbits ^= (temp_sf ^ val) << LF_BIT_SD;
-}
-
-void lflags_to_rflags(CPUX86State *env)
-{
- env->eflags |= get_CF(env) ? CC_C : 0;
- env->eflags |= get_PF(env) ? CC_P : 0;
- env->eflags |= get_AF(env) ? CC_A : 0;
- env->eflags |= get_ZF(env) ? CC_Z : 0;
- env->eflags |= get_SF(env) ? CC_S : 0;
- env->eflags |= get_OF(env) ? CC_O : 0;
-}
-
-void rflags_to_lflags(CPUX86State *env)
-{
- env->hvf_lflags.auxbits = env->hvf_lflags.result = 0;
- set_OF(env, env->eflags & CC_O);
- set_SF(env, env->eflags & CC_S);
- set_ZF(env, env->eflags & CC_Z);
- set_AF(env, env->eflags & CC_A);
- set_PF(env, env->eflags & CC_P);
- set_CF(env, env->eflags & CC_C);
-}
diff --git a/target/i386/hvf/x86_flags.h b/target/i386/hvf/x86_flags.h
deleted file mode 100644
index 75c2a7f..0000000
--- a/target/i386/hvf/x86_flags.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/////////////////////////////////////////////////////////////////////////
-//
-// Copyright (C) 2001-2012 The Bochs Project
-// Copyright (C) 2017 Google Inc.
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
-/////////////////////////////////////////////////////////////////////////
-/*
- * x86 eflags functions
- */
-
-#ifndef X86_FLAGS_H
-#define X86_FLAGS_H
-
-#include "cpu.h"
-void lflags_to_rflags(CPUX86State *env);
-void rflags_to_lflags(CPUX86State *env);
-
-bool get_PF(CPUX86State *env);
-void set_PF(CPUX86State *env, bool val);
-bool get_CF(CPUX86State *env);
-void set_CF(CPUX86State *env, bool val);
-bool get_AF(CPUX86State *env);
-void set_AF(CPUX86State *env, bool val);
-bool get_ZF(CPUX86State *env);
-void set_ZF(CPUX86State *env, bool val);
-bool get_SF(CPUX86State *env);
-void set_SF(CPUX86State *env, bool val);
-bool get_OF(CPUX86State *env);
-void set_OF(CPUX86State *env, bool val);
-
-void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf);
-
-void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
- uint32_t diff);
-void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
- uint16_t diff);
-void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
- uint8_t diff);
-
-void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
- uint32_t diff);
-void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
- uint16_t diff);
-void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
- uint8_t diff);
-
-void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
- uint32_t diff);
-void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
- uint16_t diff);
-void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
- uint8_t diff);
-
-void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
- uint32_t diff);
-void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
- uint16_t diff);
-void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
- uint8_t diff);
-
-void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t v1, uint32_t v2,
- uint32_t diff);
-void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t v1, uint16_t v2,
- uint16_t diff);
-void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2,
- uint8_t diff);
-
-#endif /* X86_FLAGS_H */
diff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c
index 649074a..afc5c17 100644
--- a/target/i386/hvf/x86_mmu.c
+++ b/target/i386/hvf/x86_mmu.c
@@ -19,7 +19,7 @@
#include "qemu/osdep.h"
#include "panic.h"
#include "cpu.h"
-#include "x86.h"
+#include "emulate/x86.h"
#include "x86_mmu.h"
#include "vmcs.h"
#include "vmx.h"
@@ -38,6 +38,7 @@
#define LEGACY_PTE_PAGE_MASK (0xffffffffllu << 12)
#define PAE_PTE_PAGE_MASK ((-1llu << 12) & ((1llu << 52) - 1))
#define PAE_PTE_LARGE_PAGE_MASK ((-1llu << (21)) & ((1llu << 52) - 1))
+#define PAE_PTE_SUPER_PAGE_MASK ((-1llu << (30)) & ((1llu << 52) - 1))
struct gpt_translation {
target_ulong gva;
@@ -96,7 +97,7 @@ static bool get_pt_entry(CPUState *cpu, struct gpt_translation *pt,
/* test page table entry */
static bool test_pt_entry(CPUState *cpu, struct gpt_translation *pt,
- int level, bool *is_large, bool pae)
+ int level, int *largeness, bool pae)
{
uint64_t pte = pt->pte[level];
@@ -118,9 +119,9 @@ static bool test_pt_entry(CPUState *cpu, struct gpt_translation *pt,
goto exit;
}
- if (1 == level && pte_large_page(pte)) {
+ if (level && pte_large_page(pte)) {
pt->err_code |= MMU_PAGE_PT;
- *is_large = true;
+ *largeness = level;
}
if (!level) {
pt->err_code |= MMU_PAGE_PT;
@@ -152,9 +153,18 @@ static inline uint64_t pse_pte_to_page(uint64_t pte)
return ((pte & 0x1fe000) << 19) | (pte & 0xffc00000);
}
-static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae)
+static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae,
+ int largeness)
{
- VM_PANIC_ON(!pte_large_page(pt->pte[1]))
+ VM_PANIC_ON(!pte_large_page(pt->pte[largeness]))
+
+ /* 1Gib large page */
+ if (pae && largeness == 2) {
+ return (pt->pte[2] & PAE_PTE_SUPER_PAGE_MASK) | (pt->gva & 0x3fffffff);
+ }
+
+ VM_PANIC_ON(largeness != 1)
+
/* 2Mb large page */
if (pae) {
return (pt->pte[1] & PAE_PTE_LARGE_PAGE_MASK) | (pt->gva & 0x1fffff);
@@ -170,7 +180,7 @@ static bool walk_gpt(CPUState *cpu, target_ulong addr, int err_code,
struct gpt_translation *pt, bool pae)
{
int top_level, level;
- bool is_large = false;
+ int largeness = 0;
target_ulong cr3 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR3);
uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
@@ -186,19 +196,19 @@ static bool walk_gpt(CPUState *cpu, target_ulong addr, int err_code,
for (level = top_level; level > 0; level--) {
get_pt_entry(cpu, pt, level, pae);
- if (!test_pt_entry(cpu, pt, level - 1, &is_large, pae)) {
+ if (!test_pt_entry(cpu, pt, level - 1, &largeness, pae)) {
return false;
}
- if (is_large) {
+ if (largeness) {
break;
}
}
- if (!is_large) {
+ if (!largeness) {
pt->gpa = (pt->pte[0] & page_mask) | (pt->gva & 0xfff);
} else {
- pt->gpa = large_page_gpa(pt, pae);
+ pt->gpa = large_page_gpa(pt, pae, largeness);
}
return true;
diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c
index f09bfbd..bdf8b51 100644
--- a/target/i386/hvf/x86_task.c
+++ b/target/i386/hvf/x86_task.c
@@ -10,15 +10,15 @@
#include "panic.h"
#include "qemu/error-report.h"
-#include "sysemu/hvf.h"
+#include "system/hvf.h"
#include "hvf-i386.h"
#include "vmcs.h"
#include "vmx.h"
-#include "x86.h"
+#include "emulate/x86.h"
#include "x86_descr.h"
#include "x86_mmu.h"
-#include "x86_decode.h"
-#include "x86_emu.h"
+#include "emulate/x86_decode.h"
+#include "emulate/x86_emu.h"
#include "x86_task.h"
#include "x86hvf.h"
@@ -76,16 +76,16 @@ static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
RSI(env) = tss->esi;
RDI(env) = tss->edi;
- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, R_LDTR);
- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, R_ES);
- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, R_CS);
- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, R_SS);
- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, R_DS);
- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, R_FS);
- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, R_GS);
+ vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ldt}}, R_LDTR);
+ vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->es}}, R_ES);
+ vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->cs}}, R_CS);
+ vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ss}}, R_SS);
+ vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ds}}, R_DS);
+ vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->fs}}, R_FS);
+ vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->gs}}, R_GS);
}
-static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel,
+static int task_switch_32(CPUState *cpu, x86_segment_selector tss_sel, x86_segment_selector old_tss_sel,
uint64_t old_tss_base, struct x86_segment_descriptor *new_desc)
{
struct x86_tss_segment32 tss_seg;
@@ -108,7 +108,7 @@ static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segme
return 0;
}
-void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
+void vmx_handle_task_switch(CPUState *cpu, x86_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
{
uint64_t rip = rreg(cpu->accel->fd, HV_X86_RIP);
if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
@@ -119,11 +119,10 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
return;
}
- load_regs(cpu);
+ hvf_load_regs(cpu);
struct x86_segment_descriptor curr_tss_desc, next_tss_desc;
- int ret;
- x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR);
+ x86_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR);
uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR);
uint32_t desc_limit;
struct x86_call_gate task_gate_desc;
@@ -138,10 +137,10 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
if (reason == TSR_IDT_GATE && gate_valid) {
int dpl;
- ret = x86_read_call_gate(cpu, &task_gate_desc, gate);
+ x86_read_call_gate(cpu, &task_gate_desc, gate);
dpl = task_gate_desc.dpl;
- x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS);
+ x86_segment_selector cs = vmx_read_segment_selector(cpu, R_CS);
if (tss_sel.rpl > dpl || cs.rpl > dpl)
;//DPRINTF("emulate_gp");
}
@@ -167,18 +166,19 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel);
}
- if (next_tss_desc.type & 8)
- ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
- else
+ if (next_tss_desc.type & 8) {
+ task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
+ } else {
//ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
VM_PANIC("task_switch_16");
+ }
macvm_set_cr0(cpu->accel->fd, rvmcs(cpu->accel->fd, VMCS_GUEST_CR0) |
CR0_TS_MASK);
x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);
- store_regs(cpu);
+ hvf_store_regs(cpu);
hv_vcpu_invalidate_tlb(cpu->accel->fd);
}
diff --git a/target/i386/hvf/x86_task.h b/target/i386/hvf/x86_task.h
index 4eaa61a..b9afac6 100644
--- a/target/i386/hvf/x86_task.h
+++ b/target/i386/hvf/x86_task.h
@@ -15,6 +15,6 @@
#ifndef HVF_X86_TASK_H
#define HVF_X86_TASK_H
-void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel,
+void vmx_handle_task_switch(CPUState *cpu, x86_segment_selector tss_sel,
int reason, bool gate_valid, uint8_t gate, uint64_t gate_type);
#endif
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
index 1569f86..2057314 100644
--- a/target/i386/hvf/x86hvf.c
+++ b/target/i386/hvf/x86hvf.c
@@ -24,8 +24,8 @@
#include "vmcs.h"
#include "cpu.h"
#include "x86_descr.h"
-#include "x86_decode.h"
-#include "sysemu/hw_accel.h"
+#include "emulate/x86_decode.h"
+#include "system/hw_accel.h"
#include "hw/i386/apic_internal.h"
diff --git a/target/i386/hvf/x86hvf.h b/target/i386/hvf/x86hvf.h
index 423a89b..8c46ce8 100644
--- a/target/i386/hvf/x86hvf.h
+++ b/target/i386/hvf/x86hvf.h
@@ -31,4 +31,7 @@ void hvf_get_xsave(CPUState *cs);
void hvf_get_msrs(CPUState *cs);
void vmx_clear_int_window_exiting(CPUState *cs);
void vmx_update_tpr(CPUState *cs);
+
+void hvf_load_regs(CPUState *cpu);
+void hvf_store_regs(CPUState *cpu);
#endif
diff --git a/target/i386/kvm/hyperv-proto.h b/target/i386/kvm/hyperv-proto.h
index 464fbf0..a9f056f 100644
--- a/target/i386/kvm/hyperv-proto.h
+++ b/target/i386/kvm/hyperv-proto.h
@@ -152,18 +152,6 @@
#define HV_X64_MSR_STIMER3_COUNT 0x400000B7
/*
- * Hyper-V Synthetic debug options MSR
- */
-#define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1
-#define HV_X64_MSR_SYNDBG_STATUS 0x400000F2
-#define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3
-#define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4
-#define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5
-#define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF
-
-#define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2)
-
-/*
* Guest crash notification MSRs
*/
#define HV_X64_MSR_CRASH_P0 0x40000100
diff --git a/target/i386/kvm/hyperv-stub.c b/target/i386/kvm/hyperv-stub.c
index 3263dcf..5836f53 100644
--- a/target/i386/kvm/hyperv-stub.c
+++ b/target/i386/kvm/hyperv-stub.c
@@ -56,3 +56,8 @@ void hyperv_x86_synic_update(X86CPU *cpu)
void hyperv_x86_set_vmbus_recommended_features_enabled(void)
{
}
+
+uint64_t hyperv_syndbg_query_options(void)
+{
+ return 0;
+}
diff --git a/target/i386/kvm/hyperv.c b/target/i386/kvm/hyperv.c
index b94f12a..9865120 100644
--- a/target/i386/kvm/hyperv.c
+++ b/target/i386/kvm/hyperv.c
@@ -13,6 +13,7 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
+#include "exec/target_page.h"
#include "hyperv.h"
#include "hw/hyperv/hyperv.h"
#include "hyperv-proto.h"
@@ -80,6 +81,7 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
* necessary because memory hierarchy is being changed
*/
async_safe_run_on_cpu(CPU(cpu), async_synic_update, RUN_ON_CPU_NULL);
+ cpu_exit(CPU(cpu));
return EXCP_INTERRUPT;
case KVM_EXIT_HYPERV_HCALL: {
diff --git a/target/i386/kvm/hyperv.h b/target/i386/kvm/hyperv.h
index e3982c8..e45a451 100644
--- a/target/i386/kvm/hyperv.h
+++ b/target/i386/kvm/hyperv.h
@@ -15,7 +15,7 @@
#define TARGET_I386_HYPERV_H
#include "cpu.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "hw/hyperv/hyperv.h"
#ifdef CONFIG_KVM
diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c
index 6bf8dcf..16bde4d 100644
--- a/target/i386/kvm/kvm-cpu.c
+++ b/target/i386/kvm/kvm-cpu.c
@@ -11,11 +11,11 @@
#include "cpu.h"
#include "host-cpu.h"
#include "qapi/error.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "hw/boards.h"
#include "kvm_i386.h"
-#include "hw/core/accel-cpu.h"
+#include "accel/accel-cpu-target.h"
static void kvm_set_guest_phys_bits(CPUState *cs)
{
@@ -143,10 +143,6 @@ static void kvm_cpu_xsave_init(void)
if (!esa->size) {
continue;
}
- if ((x86_cpu_get_supported_feature_word(NULL, esa->feature) & esa->bits)
- != esa->bits) {
- continue;
- }
host_cpuid(0xd, i, &eax, &ebx, &ecx, &edx);
if (eax != 0) {
assert(esa->size == eax);
@@ -227,7 +223,7 @@ static void kvm_cpu_instance_init(CPUState *cs)
kvm_cpu_xsave_init();
}
-static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data)
+static void kvm_cpu_accel_class_init(ObjectClass *oc, const void *data)
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index becca2e..234878c 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -16,9 +16,12 @@
#include "qapi/qapi-events-run-state.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
+#include <math.h>
#include <sys/ioctl.h>
#include <sys/utsname.h>
#include <sys/syscall.h>
+#include <sys/resource.h>
+#include <sys/time.h>
#include <linux/kvm.h>
#include <linux/kvm_para.h>
@@ -27,13 +30,15 @@
#include "cpu.h"
#include "host-cpu.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/kvm_int.h"
-#include "sysemu/runstate.h"
+#include "vmsr_energy.h"
+#include "system/system.h"
+#include "system/hw_accel.h"
+#include "system/kvm_int.h"
+#include "system/runstate.h"
#include "kvm_i386.h"
#include "../confidential-guest.h"
#include "sev.h"
+#include "tdx.h"
#include "xen-emu.h"
#include "hyperv.h"
#include "hyperv-proto.h"
@@ -63,6 +68,7 @@
#include "hw/pci/msix.h"
#include "migration/blocker.h"
#include "exec/memattrs.h"
+#include "exec/target_page.h"
#include "trace.h"
#include CONFIG_DEVICES
@@ -77,18 +83,35 @@
do { } while (0)
#endif
+/*
+ * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
+ * In order to use vm86 mode, an EPT identity map and a TSS are needed.
+ * Since these must be part of guest physical memory, we need to allocate
+ * them, both by setting their start addresses in the kernel and by
+ * creating a corresponding e820 entry. We need 4 pages before the BIOS,
+ * so this value allows up to 16M BIOSes.
+ */
+#define KVM_IDENTITY_BASE 0xfeffc000
+
/* From arch/x86/kvm/lapic.h */
#define KVM_APIC_BUS_CYCLE_NS 1
#define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS)
-#define MSR_KVM_WALL_CLOCK 0x11
-#define MSR_KVM_SYSTEM_TIME 0x12
-
/* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
* 255 kvm_msr_entry structs */
#define MSR_BUF_SIZE 4096
+typedef bool QEMURDMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t *val);
+typedef bool QEMUWRMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t val);
+typedef struct {
+ uint32_t msr;
+ QEMURDMSRHandler *rdmsr;
+ QEMUWRMSRHandler *wrmsr;
+} KVMMSRHandlers;
+
static void kvm_init_msrs(X86CPU *cpu);
+static int kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
+ QEMUWRMSRHandler *wrmsr);
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_INFO(SET_TSS_ADDR),
@@ -141,6 +164,7 @@ static bool has_msr_ucode_rev;
static bool has_msr_vmx_procbased_ctls2;
static bool has_msr_perf_capabs;
static bool has_msr_pkrs;
+static bool has_msr_hwcr;
static uint32_t has_architectural_pmu_version;
static uint32_t num_architectural_pmu_gp_counters;
@@ -169,6 +193,7 @@ static const char *vm_type_name[] = {
[KVM_X86_SEV_VM] = "SEV",
[KVM_X86_SEV_ES_VM] = "SEV-ES",
[KVM_X86_SNP_VM] = "SEV-SNP",
+ [KVM_X86_TDX_VM] = "TDX",
};
bool kvm_is_vm_type_supported(int type)
@@ -303,7 +328,7 @@ void kvm_synchronize_all_tsc(void)
{
CPUState *cpu;
- if (kvm_enabled()) {
+ if (kvm_enabled() && !is_tdx_vm()) {
CPU_FOREACH(cpu) {
run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
}
@@ -369,7 +394,7 @@ static bool host_tsx_broken(void)
/* Returns the value for a specific register on the cpuid entry
*/
-static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
+uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
{
uint32_t ret = 0;
switch (reg) {
@@ -391,9 +416,9 @@ static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
/* Find matching entry for function/index on kvm_cpuid2 struct
*/
-static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
- uint32_t function,
- uint32_t index)
+struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
+ uint32_t function,
+ uint32_t index)
{
int i;
for (i = 0; i < cpuid->nent; ++i) {
@@ -539,17 +564,17 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
* be enabled without the in-kernel irqchip
*/
if (!kvm_irqchip_in_kernel()) {
- ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
+ ret &= ~CPUID_KVM_PV_UNHALT;
}
if (kvm_irqchip_is_split()) {
- ret |= 1U << KVM_FEATURE_MSI_EXT_DEST_ID;
+ ret |= CPUID_KVM_MSI_EXT_DEST_ID;
}
} else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
- ret |= 1U << KVM_HINTS_REALTIME;
+ ret |= CPUID_KVM_HINTS_REALTIME;
}
if (current_machine->cgs) {
- ret = x86_confidential_guest_mask_cpuid_features(
+ ret = x86_confidential_guest_adjust_cpuid_features(
X86_CONFIDENTIAL_GUEST(current_machine->cgs),
function, index, reg, ret);
}
@@ -845,6 +870,15 @@ static int kvm_arch_set_tsc_khz(CPUState *cs)
int r, cur_freq;
bool set_ioctl = false;
+ /*
+ * TSC of TD vcpu is immutable, it cannot be set/changed via vcpu scope
+ * VM_SET_TSC_KHZ, but only be initialized via VM scope VM_SET_TSC_KHZ
+ * before ioctl KVM_TDX_INIT_VM in tdx_pre_create_vcpu()
+ */
+ if (is_tdx_vm()) {
+ return 0;
+ }
+
if (!env->tsc_khz) {
return 0;
}
@@ -909,6 +943,7 @@ static struct {
uint32_t bits;
} flags[2];
uint64_t dependencies;
+ bool skip_passthrough;
} kvm_hyperv_properties[] = {
[HYPERV_FEAT_RELAXED] = {
.desc = "relaxed timing (hv-relaxed)",
@@ -1031,16 +1066,15 @@ static struct {
.bits = HV_DEPRECATING_AEOI_RECOMMENDED}
}
},
-#ifdef CONFIG_SYNDBG
[HYPERV_FEAT_SYNDBG] = {
.desc = "Enable synthetic kernel debugger channel (hv-syndbg)",
.flags = {
{.func = HV_CPUID_FEATURES, .reg = R_EDX,
.bits = HV_FEATURE_DEBUG_MSRS_AVAILABLE}
},
- .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_RELAXED)
+ .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_RELAXED),
+ .skip_passthrough = true,
},
-#endif
[HYPERV_FEAT_MSR_BITMAP] = {
.desc = "enlightened MSR-Bitmap (hv-emsr-bitmap)",
.flags = {
@@ -1292,6 +1326,13 @@ static bool hyperv_feature_supported(CPUState *cs, int feature)
uint32_t func, bits;
int i, reg;
+ /*
+ * kvm_hyperv_properties needs to define at least one CPUID flag which
+ * must be used to detect the feature, it's hard to say whether it is
+ * supported or not otherwise.
+ */
+ assert(kvm_hyperv_properties[feature].flags[0].func);
+
for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) {
func = kvm_hyperv_properties[feature].flags[i].func;
@@ -1441,7 +1482,8 @@ bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp)
* hv_build_cpuid_leaf() uses this info to build guest CPUIDs.
*/
for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
- if (hyperv_feature_supported(cs, feat)) {
+ if (hyperv_feature_supported(cs, feat) &&
+ !kvm_hyperv_properties[feat].skip_passthrough) {
cpu->hyperv_features |= BIT(feat);
}
}
@@ -1748,8 +1790,6 @@ static int hyperv_init_vcpu(X86CPU *cpu)
static Error *invtsc_mig_blocker;
-#define KVM_MAX_CPUID_ENTRIES 100
-
static void kvm_init_xsave(CPUX86State *env)
{
if (has_xsave2) {
@@ -1792,9 +1832,8 @@ static void kvm_init_nested_state(CPUX86State *env)
}
}
-static uint32_t kvm_x86_build_cpuid(CPUX86State *env,
- struct kvm_cpuid_entry2 *entries,
- uint32_t cpuid_i)
+uint32_t kvm_x86_build_cpuid(CPUX86State *env, struct kvm_cpuid_entry2 *entries,
+ uint32_t cpuid_i)
{
uint32_t limit, i, j;
uint32_t unused;
@@ -1814,10 +1853,12 @@ static uint32_t kvm_x86_build_cpuid(CPUX86State *env,
int times;
c->function = i;
- c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
- KVM_CPUID_FLAG_STATE_READ_NEXT;
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
times = c->eax & 0xff;
+ if (times > 1) {
+ c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
+ KVM_CPUID_FLAG_STATE_READ_NEXT;
+ }
for (j = 1; j < times; ++j) {
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
@@ -1831,7 +1872,7 @@ static uint32_t kvm_x86_build_cpuid(CPUX86State *env,
break;
}
case 0x1f:
- if (!x86_has_extended_topo(env->avail_cpu_topo)) {
+ if (!x86_has_cpuid_0x1f(env_archcpu(env))) {
cpuid_i--;
break;
}
@@ -1840,10 +1881,6 @@ static uint32_t kvm_x86_build_cpuid(CPUX86State *env,
case 0xb:
case 0xd:
for (j = 0; ; j++) {
- if (i == 0xd && j == 64) {
- break;
- }
-
c->function = i;
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
c->index = j;
@@ -1859,7 +1896,12 @@ static uint32_t kvm_x86_build_cpuid(CPUX86State *env,
break;
}
if (i == 0xd && c->eax == 0) {
- continue;
+ if (j < 63) {
+ continue;
+ } else {
+ cpuid_i--;
+ break;
+ }
}
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
goto full;
@@ -1887,7 +1929,8 @@ static uint32_t kvm_x86_build_cpuid(CPUX86State *env,
case 0x7:
case 0x14:
case 0x1d:
- case 0x1e: {
+ case 0x1e:
+ case 0x24: {
uint32_t times;
c->function = i;
@@ -2017,6 +2060,15 @@ full:
abort();
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ if (is_tdx_vm()) {
+ return tdx_pre_create_vcpu(cpu, errp);
+ }
+
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
struct {
@@ -2041,6 +2093,14 @@ int kvm_arch_init_vcpu(CPUState *cs)
int r;
Error *local_err = NULL;
+ if (current_machine->cgs) {
+ r = x86_confidential_guest_check_features(
+ X86_CONFIDENTIAL_GUEST(current_machine->cgs), cs);
+ if (r < 0) {
+ return r;
+ }
+ }
+
memset(&cpuid_data, 0, sizeof(cpuid_data));
cpuid_i = 0;
@@ -2378,6 +2438,21 @@ void kvm_arch_after_reset_vcpu(X86CPU *cpu)
}
}
+void kvm_arch_reset_parked_vcpu(unsigned long vcpu_id, int kvm_fd)
+{
+ g_autofree struct kvm_msrs *msrs = NULL;
+
+ msrs = g_malloc0(sizeof(*msrs) + sizeof(msrs->entries[0]));
+ msrs->entries[0].index = MSR_IA32_TSC;
+ msrs->entries[0].data = 1; /* match the value in x86_cpu_reset() */
+ msrs->nmsrs++;
+
+ if (ioctl(kvm_fd, KVM_SET_MSRS, msrs) != 1) {
+ warn_report("parked vCPU %lu TSC reset failed: %d",
+ vcpu_id, errno);
+ }
+}
+
void kvm_arch_do_init_vcpu(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
@@ -2550,6 +2625,8 @@ static int kvm_get_supported_msrs(KVMState *s)
case MSR_IA32_PKRS:
has_msr_pkrs = true;
break;
+ case MSR_K7_HWCR:
+ has_msr_hwcr = true;
}
}
}
@@ -2559,13 +2636,58 @@ static int kvm_get_supported_msrs(KVMState *s)
return ret;
}
-static bool kvm_rdmsr_core_thread_count(X86CPU *cpu, uint32_t msr,
+static bool kvm_rdmsr_core_thread_count(X86CPU *cpu,
+ uint32_t msr,
uint64_t *val)
{
+ *val = cpu_x86_get_msr_core_thread_count(cpu);
+
+ return true;
+}
+
+static bool kvm_rdmsr_rapl_power_unit(X86CPU *cpu,
+ uint32_t msr,
+ uint64_t *val)
+{
+
+ CPUState *cs = CPU(cpu);
+
+ *val = cs->kvm_state->msr_energy.msr_unit;
+
+ return true;
+}
+
+static bool kvm_rdmsr_pkg_power_limit(X86CPU *cpu,
+ uint32_t msr,
+ uint64_t *val)
+{
+
CPUState *cs = CPU(cpu);
- *val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */
- *val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */
+ *val = cs->kvm_state->msr_energy.msr_limit;
+
+ return true;
+}
+
+static bool kvm_rdmsr_pkg_power_info(X86CPU *cpu,
+ uint32_t msr,
+ uint64_t *val)
+{
+
+ CPUState *cs = CPU(cpu);
+
+ *val = cs->kvm_state->msr_energy.msr_info;
+
+ return true;
+}
+
+static bool kvm_rdmsr_pkg_energy_status(X86CPU *cpu,
+ uint32_t msr,
+ uint64_t *val)
+{
+
+ CPUState *cs = CPU(cpu);
+ *val = cs->kvm_state->msr_energy.msr_value[cs->cpu_index];
return true;
}
@@ -2604,63 +2726,535 @@ static void register_smram_listener(Notifier *n, void *unused)
&smram_address_space, 1, "kvm-smram");
}
-int kvm_arch_get_default_type(MachineState *ms)
+static void *kvm_msr_energy_thread(void *data)
{
- return 0;
+ KVMState *s = data;
+ struct KVMMsrEnergy *vmsr = &s->msr_energy;
+
+ g_autofree vmsr_package_energy_stat *pkg_stat = NULL;
+ g_autofree vmsr_thread_stat *thd_stat = NULL;
+ g_autofree CPUState *cpu = NULL;
+ g_autofree unsigned int *vpkgs_energy_stat = NULL;
+ unsigned int num_threads = 0;
+
+ X86CPUTopoIDs topo_ids;
+
+ rcu_register_thread();
+
+ /* Allocate memory for each package energy status */
+ pkg_stat = g_new0(vmsr_package_energy_stat, vmsr->host_topo.maxpkgs);
+
+ /* Allocate memory for thread stats */
+ thd_stat = g_new0(vmsr_thread_stat, 1);
+
+ /* Allocate memory for holding virtual package energy counter */
+ vpkgs_energy_stat = g_new0(unsigned int, vmsr->guest_vsockets);
+
+ /* Populate the max tick of each packages */
+ for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) {
+ /*
+ * Max numbers of ticks per package
+ * Time in second * Number of ticks/second * Number of cores/package
+ * ex: 100 ticks/second/CPU, 12 CPUs per Package gives 1200 ticks max
+ */
+ vmsr->host_topo.maxticks[i] = (MSR_ENERGY_THREAD_SLEEP_US / 1000000)
+ * sysconf(_SC_CLK_TCK)
+ * vmsr->host_topo.pkg_cpu_count[i];
+ }
+
+ while (true) {
+ /* Get all qemu threads id */
+ g_autofree pid_t *thread_ids
+ = vmsr_get_thread_ids(vmsr->pid, &num_threads);
+
+ if (thread_ids == NULL) {
+ goto clean;
+ }
+
+ thd_stat = g_renew(vmsr_thread_stat, thd_stat, num_threads);
+ /* Unlike g_new0, g_renew0 function doesn't exist yet... */
+ memset(thd_stat, 0, num_threads * sizeof(vmsr_thread_stat));
+
+ /* Populate all the thread stats */
+ for (int i = 0; i < num_threads; i++) {
+ thd_stat[i].utime = g_new0(unsigned long long, 2);
+ thd_stat[i].stime = g_new0(unsigned long long, 2);
+ thd_stat[i].thread_id = thread_ids[i];
+ vmsr_read_thread_stat(vmsr->pid,
+ thd_stat[i].thread_id,
+ &thd_stat[i].utime[0],
+ &thd_stat[i].stime[0],
+ &thd_stat[i].cpu_id);
+ thd_stat[i].pkg_id =
+ vmsr_get_physical_package_id(thd_stat[i].cpu_id);
+ }
+
+ /* Retrieve all packages power plane energy counter */
+ for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) {
+ for (int j = 0; j < num_threads; j++) {
+ /*
+ * Use the first thread we found that ran on the CPU
+ * of the package to read the packages energy counter
+ */
+ if (thd_stat[j].pkg_id == i) {
+ pkg_stat[i].e_start =
+ vmsr_read_msr(MSR_PKG_ENERGY_STATUS,
+ thd_stat[j].cpu_id,
+ thd_stat[j].thread_id,
+ s->msr_energy.sioc);
+ break;
+ }
+ }
+ }
+
+ /* Sleep a short period while the other threads are working */
+ usleep(MSR_ENERGY_THREAD_SLEEP_US);
+
+ /*
+ * Retrieve all packages power plane energy counter
+ * Calculate the delta of all packages
+ */
+ for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) {
+ for (int j = 0; j < num_threads; j++) {
+ /*
+ * Use the first thread we found that ran on the CPU
+ * of the package to read the packages energy counter
+ */
+ if (thd_stat[j].pkg_id == i) {
+ pkg_stat[i].e_end =
+ vmsr_read_msr(MSR_PKG_ENERGY_STATUS,
+ thd_stat[j].cpu_id,
+ thd_stat[j].thread_id,
+ s->msr_energy.sioc);
+ /*
+ * Prevent the case we have migrate the VM
+ * during the sleep period or any other cases
+ * were energy counter might be lower after
+ * the sleep period.
+ */
+ if (pkg_stat[i].e_end > pkg_stat[i].e_start) {
+ pkg_stat[i].e_delta =
+ pkg_stat[i].e_end - pkg_stat[i].e_start;
+ } else {
+ pkg_stat[i].e_delta = 0;
+ }
+ break;
+ }
+ }
+ }
+
+ /* Delta of ticks spend by each thread between the sample */
+ for (int i = 0; i < num_threads; i++) {
+ vmsr_read_thread_stat(vmsr->pid,
+ thd_stat[i].thread_id,
+ &thd_stat[i].utime[1],
+ &thd_stat[i].stime[1],
+ &thd_stat[i].cpu_id);
+
+ if (vmsr->pid < 0) {
+ /*
+ * We don't count the dead thread
+ * i.e threads that existed before the sleep
+ * and not anymore
+ */
+ thd_stat[i].delta_ticks = 0;
+ } else {
+ vmsr_delta_ticks(thd_stat, i);
+ }
+ }
+
+ /*
+ * Identify the vcpu threads
+ * Calculate the number of vcpu per package
+ */
+ CPU_FOREACH(cpu) {
+ for (int i = 0; i < num_threads; i++) {
+ if (cpu->thread_id == thd_stat[i].thread_id) {
+ thd_stat[i].is_vcpu = true;
+ thd_stat[i].vcpu_id = cpu->cpu_index;
+ pkg_stat[thd_stat[i].pkg_id].nb_vcpu++;
+ thd_stat[i].acpi_id = kvm_arch_vcpu_id(cpu);
+ break;
+ }
+ }
+ }
+
+ /* Retrieve the virtual package number of each vCPU */
+ for (int i = 0; i < vmsr->guest_cpu_list->len; i++) {
+ for (int j = 0; j < num_threads; j++) {
+ if ((thd_stat[j].acpi_id ==
+ vmsr->guest_cpu_list->cpus[i].arch_id)
+ && (thd_stat[j].is_vcpu == true)) {
+ x86_topo_ids_from_apicid(thd_stat[j].acpi_id,
+ &vmsr->guest_topo_info, &topo_ids);
+ thd_stat[j].vpkg_id = topo_ids.pkg_id;
+ }
+ }
+ }
+
+ /* Calculate the total energy of all non-vCPU thread */
+ for (int i = 0; i < num_threads; i++) {
+ if ((thd_stat[i].is_vcpu != true) &&
+ (thd_stat[i].delta_ticks > 0)) {
+ double temp;
+ temp = vmsr_get_ratio(pkg_stat[thd_stat[i].pkg_id].e_delta,
+ thd_stat[i].delta_ticks,
+ vmsr->host_topo.maxticks[thd_stat[i].pkg_id]);
+ pkg_stat[thd_stat[i].pkg_id].e_ratio
+ += (uint64_t)lround(temp);
+ }
+ }
+
+ /* Calculate the ratio per non-vCPU thread of each package */
+ for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) {
+ if (pkg_stat[i].nb_vcpu > 0) {
+ pkg_stat[i].e_ratio = pkg_stat[i].e_ratio / pkg_stat[i].nb_vcpu;
+ }
+ }
+
+ /*
+ * Calculate the energy for each Package:
+ * Energy Package = sum of each vCPU energy that belongs to the package
+ */
+ for (int i = 0; i < num_threads; i++) {
+ if ((thd_stat[i].is_vcpu == true) && \
+ (thd_stat[i].delta_ticks > 0)) {
+ double temp;
+ temp = vmsr_get_ratio(pkg_stat[thd_stat[i].pkg_id].e_delta,
+ thd_stat[i].delta_ticks,
+ vmsr->host_topo.maxticks[thd_stat[i].pkg_id]);
+ vpkgs_energy_stat[thd_stat[i].vpkg_id] +=
+ (uint64_t)lround(temp);
+ vpkgs_energy_stat[thd_stat[i].vpkg_id] +=
+ pkg_stat[thd_stat[i].pkg_id].e_ratio;
+ }
+ }
+
+ /*
+ * Finally populate the vmsr register of each vCPU with the total
+ * package value to emulate the real hardware where each CPU return the
+ * value of the package it belongs.
+ */
+ for (int i = 0; i < num_threads; i++) {
+ if ((thd_stat[i].is_vcpu == true) && \
+ (thd_stat[i].delta_ticks > 0)) {
+ vmsr->msr_value[thd_stat[i].vcpu_id] = \
+ vpkgs_energy_stat[thd_stat[i].vpkg_id];
+ }
+ }
+
+ /* Freeing memory before zeroing the pointer */
+ for (int i = 0; i < num_threads; i++) {
+ g_free(thd_stat[i].utime);
+ g_free(thd_stat[i].stime);
+ }
+ }
+
+clean:
+ rcu_unregister_thread();
+ return NULL;
}
-int kvm_arch_init(MachineState *ms, KVMState *s)
+static int kvm_msr_energy_thread_init(KVMState *s, MachineState *ms)
{
- uint64_t identity_base = 0xfffbc000;
- uint64_t shadow_mem;
- int ret;
- struct utsname utsname;
- Error *local_err = NULL;
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ struct KVMMsrEnergy *r = &s->msr_energy;
/*
- * Initialize SEV context, if required
- *
- * If no memory encryption is requested (ms->cgs == NULL) this is
- * a no-op.
- *
- * It's also a no-op if a non-SEV confidential guest support
- * mechanism is selected. SEV is the only mechanism available to
- * select on x86 at present, so this doesn't arise, but if new
- * mechanisms are supported in future (e.g. TDX), they'll need
- * their own initialization either here or elsewhere.
+ * Sanity check
+ * 1. Host cpu must be Intel cpu
+ * 2. RAPL must be enabled on the Host
*/
- if (ms->cgs) {
- ret = confidential_guest_kvm_init(ms->cgs, &local_err);
- if (ret < 0) {
- error_report_err(local_err);
- return ret;
+ if (!is_host_cpu_intel()) {
+ error_report("The RAPL feature can only be enabled on hosts "
+ "with Intel CPU models");
+ return -1;
+ }
+
+ if (!is_rapl_enabled()) {
+ return -1;
+ }
+
+ /* Retrieve the virtual topology */
+ vmsr_init_topo_info(&r->guest_topo_info, ms);
+
+ /* Retrieve the number of vcpu */
+ r->guest_vcpus = ms->smp.cpus;
+
+ /* Retrieve the number of virtual sockets */
+ r->guest_vsockets = ms->smp.sockets;
+
+ /* Allocate register memory (MSR_PKG_STATUS) for each vcpu */
+ r->msr_value = g_new0(uint64_t, r->guest_vcpus);
+
+ /* Retrieve the CPUArchIDlist */
+ r->guest_cpu_list = mc->possible_cpu_arch_ids(ms);
+
+ /* Max number of cpus on the Host */
+ r->host_topo.maxcpus = vmsr_get_maxcpus();
+ if (r->host_topo.maxcpus == 0) {
+ error_report("host max cpus = 0");
+ return -1;
+ }
+
+ /* Max number of packages on the host */
+ r->host_topo.maxpkgs = vmsr_get_max_physical_package(r->host_topo.maxcpus);
+ if (r->host_topo.maxpkgs == 0) {
+ error_report("host max pkgs = 0");
+ return -1;
+ }
+
+ /* Allocate memory for each package on the host */
+ r->host_topo.pkg_cpu_count = g_new0(unsigned int, r->host_topo.maxpkgs);
+ r->host_topo.maxticks = g_new0(unsigned int, r->host_topo.maxpkgs);
+
+ vmsr_count_cpus_per_package(r->host_topo.pkg_cpu_count,
+ r->host_topo.maxpkgs);
+ for (int i = 0; i < r->host_topo.maxpkgs; i++) {
+ if (r->host_topo.pkg_cpu_count[i] == 0) {
+ error_report("cpu per packages = 0 on package_%d", i);
+ return -1;
}
}
- has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
- has_sregs2 = kvm_check_extension(s, KVM_CAP_SREGS2) > 0;
+ /* Get QEMU PID*/
+ r->pid = getpid();
- hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
+ /* Compute the socket path if necessary */
+ if (s->msr_energy.socket_path == NULL) {
+ s->msr_energy.socket_path = vmsr_compute_default_paths();
+ }
+
+ /* Open socket with vmsr helper */
+ s->msr_energy.sioc = vmsr_open_socket(s->msr_energy.socket_path);
+ if (s->msr_energy.sioc == NULL) {
+ error_report("vmsr socket opening failed");
+ return -1;
+ }
+
+ /* Those MSR values should not change */
+ r->msr_unit = vmsr_read_msr(MSR_RAPL_POWER_UNIT, 0, r->pid,
+ s->msr_energy.sioc);
+ r->msr_limit = vmsr_read_msr(MSR_PKG_POWER_LIMIT, 0, r->pid,
+ s->msr_energy.sioc);
+ r->msr_info = vmsr_read_msr(MSR_PKG_POWER_INFO, 0, r->pid,
+ s->msr_energy.sioc);
+ if (r->msr_unit == 0 || r->msr_limit == 0 || r->msr_info == 0) {
+ error_report("can't read any virtual msr");
+ return -1;
+ }
+
+ qemu_thread_create(&r->msr_thr, "kvm-msr",
+ kvm_msr_energy_thread,
+ s, QEMU_THREAD_JOINABLE);
+ return 0;
+}
+
+int kvm_arch_get_default_type(MachineState *ms)
+{
+ return 0;
+}
+
+static int kvm_vm_enable_exception_payload(KVMState *s)
+{
+ int ret = 0;
has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
if (has_exception_payload) {
ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
if (ret < 0) {
error_report("kvm: Failed to enable exception payload cap: %s",
strerror(-ret));
- return ret;
}
}
- has_triple_fault_event = kvm_check_extension(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT);
+ return ret;
+}
+
+static int kvm_vm_enable_triple_fault_event(KVMState *s)
+{
+ int ret = 0;
+ has_triple_fault_event = \
+ kvm_check_extension(s,
+ KVM_CAP_X86_TRIPLE_FAULT_EVENT);
if (has_triple_fault_event) {
ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true);
if (ret < 0) {
error_report("kvm: Failed to enable triple fault event cap: %s",
strerror(-ret));
+ }
+ }
+ return ret;
+}
+
+static int kvm_vm_set_identity_map_addr(KVMState *s, uint64_t identity_base)
+{
+ return kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
+}
+
+static int kvm_vm_set_nr_mmu_pages(KVMState *s)
+{
+ uint64_t shadow_mem;
+ int ret = 0;
+ shadow_mem = object_property_get_int(OBJECT(s),
+ "kvm-shadow-mem",
+ &error_abort);
+ if (shadow_mem != -1) {
+ shadow_mem /= 4096;
+ ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
+ }
+ return ret;
+}
+
+static int kvm_vm_set_tss_addr(KVMState *s, uint64_t tss_base)
+{
+ return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, tss_base);
+}
+
+static int kvm_vm_enable_disable_exits(KVMState *s)
+{
+ int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
+
+ if (disable_exits) {
+ disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
+ KVM_X86_DISABLE_EXITS_HLT |
+ KVM_X86_DISABLE_EXITS_PAUSE |
+ KVM_X86_DISABLE_EXITS_CSTATE);
+ }
+
+ return kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
+ disable_exits);
+}
+
+static int kvm_vm_enable_bus_lock_exit(KVMState *s)
+{
+ int ret = 0;
+ ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT);
+ if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) {
+ error_report("kvm: bus lock detection unsupported");
+ return -ENOTSUP;
+ }
+ ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0,
+ KVM_BUS_LOCK_DETECTION_EXIT);
+ if (ret < 0) {
+ error_report("kvm: Failed to enable bus lock detection cap: %s",
+ strerror(-ret));
+ }
+
+ return ret;
+}
+
+static int kvm_vm_enable_notify_vmexit(KVMState *s)
+{
+ int ret = 0;
+ if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE) {
+ uint64_t notify_window_flags =
+ ((uint64_t)s->notify_window << 32) |
+ KVM_X86_NOTIFY_VMEXIT_ENABLED |
+ KVM_X86_NOTIFY_VMEXIT_USER;
+ ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0,
+ notify_window_flags);
+ if (ret < 0) {
+ error_report("kvm: Failed to enable notify vmexit cap: %s",
+ strerror(-ret));
+ }
+ }
+ return ret;
+}
+
+static int kvm_vm_enable_userspace_msr(KVMState *s)
+{
+ int ret;
+
+ ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0,
+ KVM_MSR_EXIT_REASON_FILTER);
+ if (ret < 0) {
+ error_report("Could not enable user space MSRs: %s",
+ strerror(-ret));
+ exit(1);
+ }
+
+ ret = kvm_filter_msr(s, MSR_CORE_THREAD_COUNT,
+ kvm_rdmsr_core_thread_count, NULL);
+ if (ret < 0) {
+ error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s",
+ strerror(-ret));
+ exit(1);
+ }
+
+ return 0;
+}
+
+static int kvm_vm_enable_energy_msrs(KVMState *s)
+{
+ int ret;
+
+ if (s->msr_energy.enable == true) {
+ ret = kvm_filter_msr(s, MSR_RAPL_POWER_UNIT,
+ kvm_rdmsr_rapl_power_unit, NULL);
+ if (ret < 0) {
+ error_report("Could not install MSR_RAPL_POWER_UNIT handler: %s",
+ strerror(-ret));
+ return ret;
+ }
+
+ ret = kvm_filter_msr(s, MSR_PKG_POWER_LIMIT,
+ kvm_rdmsr_pkg_power_limit, NULL);
+ if (ret < 0) {
+ error_report("Could not install MSR_PKG_POWER_LIMIT handler: %s",
+ strerror(-ret));
+ return ret;
+ }
+
+ ret = kvm_filter_msr(s, MSR_PKG_POWER_INFO,
+ kvm_rdmsr_pkg_power_info, NULL);
+ if (ret < 0) {
+ error_report("Could not install MSR_PKG_POWER_INFO handler: %s",
+ strerror(-ret));
+ return ret;
+ }
+ ret = kvm_filter_msr(s, MSR_PKG_ENERGY_STATUS,
+ kvm_rdmsr_pkg_energy_status, NULL);
+ if (ret < 0) {
+ error_report("Could not install MSR_PKG_ENERGY_STATUS handler: %s",
+ strerror(-ret));
return ret;
}
}
+ return 0;
+}
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+ int ret;
+ struct utsname utsname;
+ Error *local_err = NULL;
+
+ /*
+ * Initialize confidential guest (SEV/TDX) context, if required
+ */
+ if (ms->cgs) {
+ ret = confidential_guest_kvm_init(ms->cgs, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+ }
+
+ has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
+ has_sregs2 = kvm_check_extension(s, KVM_CAP_SREGS2) > 0;
+
+ hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
+
+ ret = kvm_vm_enable_exception_payload(s);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = kvm_vm_enable_triple_fault_event(s);
+ if (ret < 0) {
+ return ret;
+ }
if (s->xen_version) {
#ifdef CONFIG_XEN_EMU
@@ -2685,41 +3279,31 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
return ret;
}
- kvm_get_supported_feature_msrs(s);
+ ret = kvm_get_supported_feature_msrs(s);
+ if (ret < 0) {
+ return ret;
+ }
uname(&utsname);
lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
- /*
- * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
- * In order to use vm86 mode, an EPT identity map and a TSS are needed.
- * Since these must be part of guest physical memory, we need to allocate
- * them, both by setting their start addresses in the kernel and by
- * creating a corresponding e820 entry. We need 4 pages before the BIOS,
- * so this value allows up to 16M BIOSes.
- */
- identity_base = 0xfeffc000;
- ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
+ ret = kvm_vm_set_identity_map_addr(s, KVM_IDENTITY_BASE);
if (ret < 0) {
return ret;
}
/* Set TSS base one page after EPT identity map. */
- ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
+ ret = kvm_vm_set_tss_addr(s, KVM_IDENTITY_BASE + 0x1000);
if (ret < 0) {
return ret;
}
/* Tell fw_cfg to notify the BIOS to reserve the range. */
- e820_add_entry(identity_base, 0x4000, E820_RESERVED);
+ e820_add_entry(KVM_IDENTITY_BASE, 0x4000, E820_RESERVED);
- shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort);
- if (shadow_mem != -1) {
- shadow_mem /= 4096;
- ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
- if (ret < 0) {
- return ret;
- }
+ ret = kvm_vm_set_nr_mmu_pages(s);
+ if (ret < 0) {
+ return ret;
}
if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
@@ -2730,23 +3314,11 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
}
if (enable_cpu_pm) {
- int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
-/* Work around for kernel header with a typo. TODO: fix header and drop. */
-#if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
-#define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
-#endif
- if (disable_exits) {
- disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
- KVM_X86_DISABLE_EXITS_HLT |
- KVM_X86_DISABLE_EXITS_PAUSE |
- KVM_X86_DISABLE_EXITS_CSTATE);
- }
-
- ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
- disable_exits);
+ ret = kvm_vm_enable_disable_exits(s);
if (ret < 0) {
error_report("kvm: guest stopping CPU not supported: %s",
strerror(-ret));
+ return ret;
}
}
@@ -2754,16 +3326,8 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
X86MachineState *x86ms = X86_MACHINE(ms);
if (x86ms->bus_lock_ratelimit > 0) {
- ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT);
- if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) {
- error_report("kvm: bus lock detection unsupported");
- return -ENOTSUP;
- }
- ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0,
- KVM_BUS_LOCK_DETECTION_EXIT);
+ ret = kvm_vm_enable_bus_lock_exit(s);
if (ret < 0) {
- error_report("kvm: Failed to enable bus lock detection cap: %s",
- strerror(-ret));
return ret;
}
ratelimit_init(&bus_lock_ratelimit_ctrl);
@@ -2772,37 +3336,30 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
}
}
- if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE &&
- kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) {
- uint64_t notify_window_flags =
- ((uint64_t)s->notify_window << 32) |
- KVM_X86_NOTIFY_VMEXIT_ENABLED |
- KVM_X86_NOTIFY_VMEXIT_USER;
- ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0,
- notify_window_flags);
- if (ret < 0) {
- error_report("kvm: Failed to enable notify vmexit cap: %s",
- strerror(-ret));
- return ret;
- }
+ if (kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) {
+ ret = kvm_vm_enable_notify_vmexit(s);
+ if (ret < 0) {
+ return ret;
+ }
}
- if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) {
- bool r;
- ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0,
- KVM_MSR_EXIT_REASON_FILTER);
- if (ret) {
- error_report("Could not enable user space MSRs: %s",
- strerror(-ret));
- exit(1);
+ if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) {
+ ret = kvm_vm_enable_userspace_msr(s);
+ if (ret < 0) {
+ return ret;
}
- r = kvm_filter_msr(s, MSR_CORE_THREAD_COUNT,
- kvm_rdmsr_core_thread_count, NULL);
- if (!r) {
- error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s",
- strerror(-ret));
- exit(1);
+ if (s->msr_energy.enable == true) {
+ ret = kvm_vm_enable_energy_msrs(s);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = kvm_msr_energy_thread_init(s, ms);
+ if (ret < 0) {
+ error_report("kvm : error RAPL feature requirement not met");
+ return ret;
+ }
}
}
@@ -3265,7 +3822,14 @@ static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f)
kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0,
CR4_VMXE_MASK);
- if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) {
+ if (f[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED) {
+ /* FRED injected-event data (0x2052). */
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x52);
+ } else if (f[FEAT_VMX_EXIT_CTLS] &
+ VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS) {
+ /* Secondary VM-exit controls (0x2044). */
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x44);
+ } else if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) {
/* TSC multiplier (0x2032). */
kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32);
} else {
@@ -3308,32 +3872,34 @@ static void kvm_init_msrs(X86CPU *cpu)
CPUX86State *env = &cpu->env;
kvm_msr_buf_reset(cpu);
- if (has_msr_arch_capabs) {
- kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
- env->features[FEAT_ARCH_CAPABILITIES]);
- }
- if (has_msr_core_capabs) {
- kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
- env->features[FEAT_CORE_CAPABILITY]);
- }
+ if (!is_tdx_vm()) {
+ if (has_msr_arch_capabs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
+ env->features[FEAT_ARCH_CAPABILITIES]);
+ }
+
+ if (has_msr_core_capabs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
+ env->features[FEAT_CORE_CAPABILITY]);
+ }
- if (has_msr_perf_capabs && cpu->enable_pmu) {
- kvm_msr_entry_add_perf(cpu, env->features);
+ if (has_msr_perf_capabs && cpu->enable_pmu) {
+ kvm_msr_entry_add_perf(cpu, env->features);
+ }
+
+ /*
+ * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
+ * all kernels with MSR features should have them.
+ */
+ if (kvm_feature_msrs && cpu_has_vmx(env)) {
+ kvm_msr_entry_add_vmx(cpu, env->features);
+ }
}
if (has_msr_ucode_rev) {
kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev);
}
-
- /*
- * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
- * all kernels with MSR features should have them.
- */
- if (kvm_feature_msrs && cpu_has_vmx(env)) {
- kvm_msr_entry_add_vmx(cpu, env->features);
- }
-
assert(kvm_buf_set_msrs(cpu) == 0);
}
@@ -3395,6 +3961,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
if (has_msr_virt_ssbd) {
kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
}
+ if (has_msr_hwcr) {
+ kvm_msr_entry_add(cpu, MSR_K7_HWCR, env->msr_hwcr);
+ }
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
@@ -3422,22 +3991,24 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
*/
if (level >= KVM_PUT_RESET_STATE) {
kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
- kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
- kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
- if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
+ if (env->features[FEAT_KVM] & (CPUID_KVM_CLOCK | CPUID_KVM_CLOCK2)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
+ kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
+ }
+ if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF_INT) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr);
}
- if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
+ if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
}
- if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
+ if (env->features[FEAT_KVM] & CPUID_KVM_PV_EOI) {
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
}
- if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
+ if (env->features[FEAT_KVM] & CPUID_KVM_STEAL_TIME) {
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
}
- if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
+ if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) {
kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
}
@@ -3495,13 +4066,11 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS,
env->msr_hv_tsc_emulation_status);
}
-#ifdef CONFIG_SYNDBG
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG) &&
has_msr_hv_syndbg_options) {
kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS,
hyperv_syndbg_query_options());
}
-#endif
}
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
@@ -3673,7 +4242,8 @@ static int kvm_get_xsave(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
void *xsave = env->xsave_buf;
- int type, ret;
+ unsigned long type;
+ int ret;
type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE;
ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave);
@@ -3878,6 +4448,9 @@ static int kvm_get_msrs(X86CPU *cpu)
kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
env->tsc_valid = !runstate_is_running();
}
+ if (has_msr_hwcr) {
+ kvm_msr_entry_add(cpu, MSR_K7_HWCR, 0);
+ }
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
@@ -3898,21 +4471,23 @@ static int kvm_get_msrs(X86CPU *cpu)
}
}
#endif
- kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
- kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
- if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
+ if (env->features[FEAT_KVM] & (CPUID_KVM_CLOCK | CPUID_KVM_CLOCK2)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
+ kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
+ }
+ if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF_INT) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0);
}
- if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
+ if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
}
- if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
+ if (env->features[FEAT_KVM] & CPUID_KVM_PV_EOI) {
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
}
- if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
+ if (env->features[FEAT_KVM] & CPUID_KVM_STEAL_TIME) {
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
}
- if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
+ if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) {
kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
}
if (has_architectural_pmu_version > 0) {
@@ -4397,6 +4972,9 @@ static int kvm_get_msrs(X86CPU *cpu)
case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data;
break;
+ case MSR_K7_HWCR:
+ env->msr_hwcr = msrs[i].data;
+ break;
}
}
@@ -4688,7 +5266,7 @@ static int kvm_get_nested_state(X86CPU *cpu)
return ret;
}
-int kvm_arch_put_registers(CPUState *cpu, int level)
+int kvm_arch_put_registers(CPUState *cpu, int level, Error **errp)
{
X86CPU *x86_cpu = X86_CPU(cpu);
int ret;
@@ -4703,6 +5281,7 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
if (level >= KVM_PUT_RESET_STATE) {
ret = kvm_put_msr_feature_control(x86_cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to set feature control MSR");
return ret;
}
}
@@ -4710,12 +5289,14 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
/* must be before kvm_put_nested_state so that EFER.SVME is set */
ret = has_sregs2 ? kvm_put_sregs2(x86_cpu) : kvm_put_sregs(x86_cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to set special registers");
return ret;
}
if (level >= KVM_PUT_RESET_STATE) {
ret = kvm_put_nested_state(x86_cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to set nested state");
return ret;
}
}
@@ -4733,6 +5314,7 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
if (xen_mode == XEN_EMULATE && level == KVM_PUT_FULL_STATE) {
ret = kvm_put_xen_state(cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to set Xen state");
return ret;
}
}
@@ -4740,43 +5322,51 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
ret = kvm_getput_regs(x86_cpu, 1);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to set general purpose registers");
return ret;
}
ret = kvm_put_xsave(x86_cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to set XSAVE");
return ret;
}
ret = kvm_put_xcrs(x86_cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to set XCRs");
return ret;
}
ret = kvm_put_msrs(x86_cpu, level);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to set MSRs");
return ret;
}
ret = kvm_put_vcpu_events(x86_cpu, level);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to set vCPU events");
return ret;
}
if (level >= KVM_PUT_RESET_STATE) {
ret = kvm_put_mp_state(x86_cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to set MP state");
return ret;
}
}
ret = kvm_put_tscdeadline_msr(x86_cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to set TSC deadline MSR");
return ret;
}
ret = kvm_put_debugregs(x86_cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to set debug registers");
return ret;
}
return 0;
}
-int kvm_arch_get_registers(CPUState *cs)
+int kvm_arch_get_registers(CPUState *cs, Error **errp)
{
X86CPU *cpu = X86_CPU(cs);
int ret;
@@ -4785,6 +5375,7 @@ int kvm_arch_get_registers(CPUState *cs)
ret = kvm_get_vcpu_events(cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to get vCPU events");
goto out;
}
/*
@@ -4793,44 +5384,54 @@ int kvm_arch_get_registers(CPUState *cs)
*/
ret = kvm_get_mp_state(cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to get MP state");
goto out;
}
ret = kvm_getput_regs(cpu, 0);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to get general purpose registers");
goto out;
}
ret = kvm_get_xsave(cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to get XSAVE");
goto out;
}
ret = kvm_get_xcrs(cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to get XCRs");
goto out;
}
ret = has_sregs2 ? kvm_get_sregs2(cpu) : kvm_get_sregs(cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to get special registers");
goto out;
}
ret = kvm_get_msrs(cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to get MSRs");
goto out;
}
ret = kvm_get_apic(cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to get APIC");
goto out;
}
ret = kvm_get_debugregs(cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to get debug registers");
goto out;
}
ret = kvm_get_nested_state(cpu);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to get nested state");
goto out;
}
#ifdef CONFIG_XEN_EMU
if (xen_mode == XEN_EMULATE) {
ret = kvm_get_xen_state(cs);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to get Xen state");
goto out;
}
}
@@ -5261,15 +5862,16 @@ void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
}
}
-static bool kvm_install_msr_filters(KVMState *s)
+static int kvm_install_msr_filters(KVMState *s)
{
uint64_t zero = 0;
struct kvm_msr_filter filter = {
.flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
};
- int r, i, j = 0;
+ int i, j = 0;
- for (i = 0; i < KVM_MSR_FILTER_MAX_RANGES; i++) {
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(msr_handlers) != ARRAY_SIZE(filter.ranges));
+ for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
KVMMSRHandlers *handler = &msr_handlers[i];
if (handler->msr) {
struct kvm_msr_filter_range *range = &filter.ranges[j++];
@@ -5291,18 +5893,13 @@ static bool kvm_install_msr_filters(KVMState *s)
}
}
- r = kvm_vm_ioctl(s, KVM_X86_SET_MSR_FILTER, &filter);
- if (r) {
- return false;
- }
-
- return true;
+ return kvm_vm_ioctl(s, KVM_X86_SET_MSR_FILTER, &filter);
}
-bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
- QEMUWRMSRHandler *wrmsr)
+static int kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
+ QEMUWRMSRHandler *wrmsr)
{
- int i;
+ int i, ret;
for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
if (!msr_handlers[i].msr) {
@@ -5312,16 +5909,17 @@ bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
.wrmsr = wrmsr,
};
- if (!kvm_install_msr_filters(s)) {
+ ret = kvm_install_msr_filters(s);
+ if (ret) {
msr_handlers[i] = (KVMMSRHandlers) { };
- return false;
+ return ret;
}
- return true;
+ return 0;
}
}
- return false;
+ return -EINVAL;
}
static int kvm_handle_rdmsr(X86CPU *cpu, struct kvm_run *run)
@@ -5341,7 +5939,7 @@ static int kvm_handle_rdmsr(X86CPU *cpu, struct kvm_run *run)
}
}
- assert(false);
+ g_assert_not_reached();
}
static int kvm_handle_wrmsr(X86CPU *cpu, struct kvm_run *run)
@@ -5360,7 +5958,7 @@ static int kvm_handle_wrmsr(X86CPU *cpu, struct kvm_run *run)
}
}
- assert(false);
+ g_assert_not_reached();
}
static bool has_sgx_provisioning;
@@ -5420,9 +6018,11 @@ static bool host_supports_vmx(void)
* because private/shared page tracking is already provided through other
* means, these 2 use-cases should be treated as being mutually-exclusive.
*/
-static int kvm_handle_hc_map_gpa_range(struct kvm_run *run)
+static int kvm_handle_hc_map_gpa_range(X86CPU *cpu, struct kvm_run *run)
{
+ struct kvm_pre_fault_memory mem;
uint64_t gpa, size, attributes;
+ int ret;
if (!machine_require_guest_memfd(current_machine))
return -EINVAL;
@@ -5433,13 +6033,32 @@ static int kvm_handle_hc_map_gpa_range(struct kvm_run *run)
trace_kvm_hc_map_gpa_range(gpa, size, attributes, run->hypercall.flags);
- return kvm_convert_memory(gpa, size, attributes & KVM_MAP_GPA_RANGE_ENCRYPTED);
+ ret = kvm_convert_memory(gpa, size, attributes & KVM_MAP_GPA_RANGE_ENCRYPTED);
+ if (ret || !kvm_pre_fault_memory_supported) {
+ return ret;
+ }
+
+ /*
+ * Opportunistically pre-fault memory in. Failures are ignored so that any
+ * errors in faulting in the memory will get captured in KVM page fault
+ * path when the guest first accesses the page.
+ */
+ memset(&mem, 0, sizeof(mem));
+ mem.gpa = gpa;
+ mem.size = size;
+ while (mem.size) {
+ if (kvm_vcpu_ioctl(CPU(cpu), KVM_PRE_FAULT_MEMORY, &mem)) {
+ break;
+ }
+ }
+
+ return 0;
}
-static int kvm_handle_hypercall(struct kvm_run *run)
+static int kvm_handle_hypercall(X86CPU *cpu, struct kvm_run *run)
{
if (run->hypercall.nr == KVM_HC_MAP_GPA_RANGE)
- return kvm_handle_hc_map_gpa_range(run);
+ return kvm_handle_hc_map_gpa_range(cpu, run);
return -EINVAL;
}
@@ -5539,7 +6158,32 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
break;
#endif
case KVM_EXIT_HYPERCALL:
- ret = kvm_handle_hypercall(run);
+ ret = kvm_handle_hypercall(cpu, run);
+ break;
+ case KVM_EXIT_SYSTEM_EVENT:
+ switch (run->system_event.type) {
+ case KVM_SYSTEM_EVENT_TDX_FATAL:
+ ret = tdx_handle_report_fatal_error(cpu, run);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ break;
+ case KVM_EXIT_TDX:
+ /*
+ * run->tdx is already set up for the case where userspace
+ * does not handle the TDVMCALL.
+ */
+ switch (run->tdx.nr) {
+ case TDVMCALL_GET_QUOTE:
+ tdx_handle_get_quote(cpu, run);
+ break;
+ case TDVMCALL_GET_TD_VM_CALL_INFO:
+ tdx_handle_get_tdvmcall_info(cpu, run);
+ break;
+ }
+ ret = 0;
break;
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
@@ -5613,7 +6257,7 @@ uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address)
return address;
}
env = &X86_CPU(first_cpu)->env;
- if (!(env->features[FEAT_KVM] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID))) {
+ if (!(env->features[FEAT_KVM] & CPUID_KVM_MSI_EXT_DEST_ID)) {
return address;
}
diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h
index 34fc607..5f83e88 100644
--- a/target/i386/kvm/kvm_i386.h
+++ b/target/i386/kvm/kvm_i386.h
@@ -11,10 +11,11 @@
#ifndef QEMU_KVM_I386_H
#define QEMU_KVM_I386_H
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
-#ifdef CONFIG_KVM
+#define KVM_MAX_CPUID_ENTRIES 100
+/* always false if !CONFIG_KVM */
#define kvm_pit_in_kernel() \
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
#define kvm_pic_in_kernel() \
@@ -22,14 +23,6 @@
#define kvm_ioapic_in_kernel() \
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
-#else
-
-#define kvm_pit_in_kernel() 0
-#define kvm_pic_in_kernel() 0
-#define kvm_ioapic_in_kernel() 0
-
-#endif /* CONFIG_KVM */
-
bool kvm_has_smm(void);
bool kvm_enable_x2apic(void);
bool kvm_hv_vpindex_settable(void);
@@ -51,6 +44,13 @@ void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask);
#ifdef CONFIG_KVM
+#include <linux/kvm.h>
+
+typedef struct KvmCpuidInfo {
+ struct kvm_cpuid2 cpuid;
+ struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
+} KvmCpuidInfo;
+
bool kvm_is_vm_type_supported(int type);
bool kvm_has_adjust_clock_stable(void);
bool kvm_has_exception_payload(void);
@@ -66,17 +66,12 @@ uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address);
void kvm_update_msi_routes_all(void *private, bool global,
uint32_t index, uint32_t mask);
-typedef bool QEMURDMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t *val);
-typedef bool QEMUWRMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t val);
-typedef struct kvm_msr_handlers {
- uint32_t msr;
- QEMURDMSRHandler *rdmsr;
- QEMUWRMSRHandler *wrmsr;
-} KVMMSRHandlers;
-
-bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
- QEMUWRMSRHandler *wrmsr);
-
+struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
+ uint32_t function,
+ uint32_t index);
+uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg);
+uint32_t kvm_x86_build_cpuid(CPUX86State *env, struct kvm_cpuid_entry2 *entries,
+ uint32_t cpuid_i);
#endif /* CONFIG_KVM */
void kvm_pc_setup_irq_routing(bool pci_enabled);
diff --git a/target/i386/kvm/meson.build b/target/i386/kvm/meson.build
index e785098..2675bf8 100644
--- a/target/i386/kvm/meson.build
+++ b/target/i386/kvm/meson.build
@@ -3,10 +3,13 @@ i386_kvm_ss = ss.source_set()
i386_kvm_ss.add(files(
'kvm.c',
'kvm-cpu.c',
+ 'vmsr_energy.c',
))
i386_kvm_ss.add(when: 'CONFIG_XEN_EMU', if_true: files('xen-emu.c'))
+i386_kvm_ss.add(when: 'CONFIG_TDX', if_true: files('tdx.c', 'tdx-quote-generator.c'), if_false: files('tdx-stub.c'))
+
i386_system_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c'), if_false: files('hyperv-stub.c'))
i386_system_ss.add_all(when: 'CONFIG_KVM', if_true: i386_kvm_ss)
diff --git a/target/i386/kvm/tdx-quote-generator.c b/target/i386/kvm/tdx-quote-generator.c
new file mode 100644
index 0000000..f59715f
--- /dev/null
+++ b/target/i386/kvm/tdx-quote-generator.c
@@ -0,0 +1,300 @@
+/*
+ * QEMU TDX Quote Generation Support
+ *
+ * Copyright (c) 2025 Intel Corporation
+ *
+ * Author:
+ * Xiaoyao Li <xiaoyao.li@intel.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "qapi/qapi-visit-sockets.h"
+
+#include "tdx-quote-generator.h"
+
+#define QGS_MSG_LIB_MAJOR_VER 1
+#define QGS_MSG_LIB_MINOR_VER 1
+
+typedef enum _qgs_msg_type_t {
+ GET_QUOTE_REQ = 0,
+ GET_QUOTE_RESP = 1,
+ GET_COLLATERAL_REQ = 2,
+ GET_COLLATERAL_RESP = 3,
+ GET_PLATFORM_INFO_REQ = 4,
+ GET_PLATFORM_INFO_RESP = 5,
+ QGS_MSG_TYPE_MAX
+} qgs_msg_type_t;
+
+typedef struct _qgs_msg_header_t {
+ uint16_t major_version;
+ uint16_t minor_version;
+ uint32_t type;
+ uint32_t size; // size of the whole message, include this header, in byte
+ uint32_t error_code; // used in response only
+} qgs_msg_header_t;
+
+typedef struct _qgs_msg_get_quote_req_t {
+ qgs_msg_header_t header; // header.type = GET_QUOTE_REQ
+ uint32_t report_size; // cannot be 0
+ uint32_t id_list_size; // length of id_list, in byte, can be 0
+} qgs_msg_get_quote_req_t;
+
+typedef struct _qgs_msg_get_quote_resp_s {
+ qgs_msg_header_t header; // header.type = GET_QUOTE_RESP
+ uint32_t selected_id_size; // can be 0 in case only one id is sent in request
+ uint32_t quote_size; // length of quote_data, in byte
+ uint8_t id_quote[]; // selected id followed by quote
+} qgs_msg_get_quote_resp_t;
+
+#define HEADER_SIZE 4
+
+static uint32_t decode_header(const char *buf, size_t len) {
+ if (len < HEADER_SIZE) {
+ return 0;
+ }
+ uint32_t msg_size = 0;
+ for (uint32_t i = 0; i < HEADER_SIZE; ++i) {
+ msg_size = msg_size * 256 + (buf[i] & 0xFF);
+ }
+ return msg_size;
+}
+
+static void encode_header(char *buf, size_t len, uint32_t size) {
+ assert(len >= HEADER_SIZE);
+ buf[0] = ((size >> 24) & 0xFF);
+ buf[1] = ((size >> 16) & 0xFF);
+ buf[2] = ((size >> 8) & 0xFF);
+ buf[3] = (size & 0xFF);
+}
+
+static void tdx_generate_quote_cleanup(TdxGenerateQuoteTask *task)
+{
+ timer_del(&task->timer);
+
+ g_source_remove(task->watch);
+ qio_channel_close(QIO_CHANNEL(task->sioc), NULL);
+ object_unref(OBJECT(task->sioc));
+
+ task->completion(task);
+}
+
+static gboolean tdx_get_quote_read(QIOChannel *ioc, GIOCondition condition,
+ gpointer opaque)
+{
+ TdxGenerateQuoteTask *task = opaque;
+ Error *err = NULL;
+ int ret;
+
+ ret = qio_channel_read(ioc, task->receive_buf + task->receive_buf_received,
+ task->payload_len - task->receive_buf_received, &err);
+ if (ret < 0) {
+ if (ret == QIO_CHANNEL_ERR_BLOCK) {
+ return G_SOURCE_CONTINUE;
+ } else {
+ error_report_err(err);
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+ }
+
+ if (ret == 0) {
+ error_report("End of file before reply received");
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+
+ task->receive_buf_received += ret;
+ if (task->receive_buf_received >= HEADER_SIZE) {
+ uint32_t len = decode_header(task->receive_buf,
+ task->receive_buf_received);
+ if (len == 0 ||
+ len > (task->payload_len - HEADER_SIZE)) {
+ error_report("Message len %u must be non-zero & less than %zu",
+ len, (task->payload_len - HEADER_SIZE));
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+
+ /* Now we know the size, shrink to fit */
+ task->payload_len = HEADER_SIZE + len;
+ task->receive_buf = g_renew(char,
+ task->receive_buf,
+ task->payload_len);
+ }
+
+ if (task->receive_buf_received >= (sizeof(qgs_msg_header_t) + HEADER_SIZE)) {
+ qgs_msg_header_t *hdr = (qgs_msg_header_t *)(task->receive_buf + HEADER_SIZE);
+ if (hdr->major_version != QGS_MSG_LIB_MAJOR_VER ||
+ hdr->minor_version != QGS_MSG_LIB_MINOR_VER) {
+ error_report("Invalid QGS message header version %d.%d",
+ hdr->major_version,
+ hdr->minor_version);
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+ if (hdr->type != GET_QUOTE_RESP) {
+ error_report("Invalid QGS message type %d",
+ hdr->type);
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+ if (hdr->size > (task->payload_len - HEADER_SIZE)) {
+ error_report("QGS message size %d exceeds payload capacity %zu",
+ hdr->size, task->payload_len);
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+ if (hdr->error_code != 0) {
+ error_report("QGS message error code %d",
+ hdr->error_code);
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+ }
+ if (task->receive_buf_received >= (sizeof(qgs_msg_get_quote_resp_t) + HEADER_SIZE)) {
+ qgs_msg_get_quote_resp_t *msg = (qgs_msg_get_quote_resp_t *)(task->receive_buf + HEADER_SIZE);
+ if (msg->selected_id_size != 0) {
+ error_report("QGS message selected ID was %d not 0",
+ msg->selected_id_size);
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+
+ if ((task->payload_len - HEADER_SIZE - sizeof(qgs_msg_get_quote_resp_t)) !=
+ msg->quote_size) {
+ error_report("QGS quote size %d should be %zu",
+ msg->quote_size,
+ (task->payload_len - sizeof(qgs_msg_get_quote_resp_t)));
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+ }
+
+ if (task->receive_buf_received == task->payload_len) {
+ size_t strip = HEADER_SIZE + sizeof(qgs_msg_get_quote_resp_t);
+ memmove(task->receive_buf,
+ task->receive_buf + strip,
+ task->receive_buf_received - strip);
+ task->receive_buf_received -= strip;
+ task->status_code = TDX_VP_GET_QUOTE_SUCCESS;
+ goto end;
+ }
+
+ return G_SOURCE_CONTINUE;
+
+end:
+ tdx_generate_quote_cleanup(task);
+ return G_SOURCE_REMOVE;
+}
+
+static gboolean tdx_send_report(QIOChannel *ioc, GIOCondition condition,
+ gpointer opaque)
+{
+ TdxGenerateQuoteTask *task = opaque;
+ Error *err = NULL;
+ int ret;
+
+ ret = qio_channel_write(ioc, task->send_data + task->send_data_sent,
+ task->send_data_size - task->send_data_sent, &err);
+ if (ret < 0) {
+ if (ret == QIO_CHANNEL_ERR_BLOCK) {
+ ret = 0;
+ } else {
+ error_report_err(err);
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ tdx_generate_quote_cleanup(task);
+ goto end;
+ }
+ }
+ task->send_data_sent += ret;
+
+ if (task->send_data_sent == task->send_data_size) {
+ task->watch = qio_channel_add_watch(QIO_CHANNEL(task->sioc), G_IO_IN,
+ tdx_get_quote_read, task, NULL);
+ goto end;
+ }
+
+ return G_SOURCE_CONTINUE;
+
+end:
+ return G_SOURCE_REMOVE;
+}
+
+static void tdx_quote_generator_connected(QIOTask *qio_task, gpointer opaque)
+{
+ TdxGenerateQuoteTask *task = opaque;
+ Error *err = NULL;
+ int ret;
+
+ ret = qio_task_propagate_error(qio_task, &err);
+ if (ret) {
+ error_report_err(err);
+ task->status_code = TDX_VP_GET_QUOTE_QGS_UNAVAILABLE;
+ tdx_generate_quote_cleanup(task);
+ return;
+ }
+
+ task->watch = qio_channel_add_watch(QIO_CHANNEL(task->sioc), G_IO_OUT,
+ tdx_send_report, task, NULL);
+}
+
+#define TRANSACTION_TIMEOUT 30000
+
+static void getquote_expired(void *opaque)
+{
+ TdxGenerateQuoteTask *task = opaque;
+
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ tdx_generate_quote_cleanup(task);
+}
+
+static void setup_get_quote_timer(TdxGenerateQuoteTask *task)
+{
+ int64_t time;
+
+ timer_init_ms(&task->timer, QEMU_CLOCK_VIRTUAL, getquote_expired, task);
+ time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
+ timer_mod(&task->timer, time + TRANSACTION_TIMEOUT);
+}
+
+void tdx_generate_quote(TdxGenerateQuoteTask *task,
+ SocketAddress *qg_sock_addr)
+{
+ QIOChannelSocket *sioc;
+ qgs_msg_get_quote_req_t msg;
+
+ /* Prepare a QGS message prelude */
+ msg.header.major_version = QGS_MSG_LIB_MAJOR_VER;
+ msg.header.minor_version = QGS_MSG_LIB_MINOR_VER;
+ msg.header.type = GET_QUOTE_REQ;
+ msg.header.size = sizeof(msg) + task->send_data_size;
+ msg.header.error_code = 0;
+ msg.report_size = task->send_data_size;
+ msg.id_list_size = 0;
+
+ /* Make room to add the QGS message prelude */
+ task->send_data = g_renew(char,
+ task->send_data,
+ task->send_data_size + sizeof(msg) + HEADER_SIZE);
+ memmove(task->send_data + sizeof(msg) + HEADER_SIZE,
+ task->send_data,
+ task->send_data_size);
+ memcpy(task->send_data + HEADER_SIZE,
+ &msg,
+ sizeof(msg));
+ encode_header(task->send_data, HEADER_SIZE, task->send_data_size + sizeof(msg));
+ task->send_data_size += sizeof(msg) + HEADER_SIZE;
+
+ sioc = qio_channel_socket_new();
+ task->sioc = sioc;
+
+ setup_get_quote_timer(task);
+
+ qio_channel_socket_connect_async(sioc, qg_sock_addr,
+ tdx_quote_generator_connected, task,
+ NULL, NULL);
+}
diff --git a/target/i386/kvm/tdx-quote-generator.h b/target/i386/kvm/tdx-quote-generator.h
new file mode 100644
index 0000000..3bd9b8e
--- /dev/null
+++ b/target/i386/kvm/tdx-quote-generator.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef QEMU_I386_TDX_QUOTE_GENERATOR_H
+#define QEMU_I386_TDX_QUOTE_GENERATOR_H
+
+#include "qom/object_interfaces.h"
+#include "io/channel-socket.h"
+#include "exec/hwaddr.h"
+
+#define TDX_GET_QUOTE_STRUCTURE_VERSION 1ULL
+
+#define TDX_VP_GET_QUOTE_SUCCESS 0ULL
+#define TDX_VP_GET_QUOTE_IN_FLIGHT (-1ULL)
+#define TDX_VP_GET_QUOTE_ERROR 0x8000000000000000ULL
+#define TDX_VP_GET_QUOTE_QGS_UNAVAILABLE 0x8000000000000001ULL
+
+/* Limit to avoid resource starvation. */
+#define TDX_GET_QUOTE_MAX_BUF_LEN (128 * 1024)
+#define TDX_MAX_GET_QUOTE_REQUEST 16
+
+#define TDX_GET_QUOTE_HDR_SIZE 24
+
+/* Format of pages shared with guest. */
+struct tdx_get_quote_header {
+ /* Format version: must be 1 in little endian. */
+ uint64_t structure_version;
+
+ /*
+ * GetQuote status code in little endian:
+ * Guest must set error_code to 0 to avoid information leak.
+ * Qemu sets this before interrupting guest.
+ */
+ uint64_t error_code;
+
+ /*
+ * in-message size in little endian: The message will follow this header.
+ * The in-message will be send to QGS.
+ */
+ uint32_t in_len;
+
+ /*
+ * out-message size in little endian:
+ * On request, out_len must be zero to avoid information leak.
+ * On return, message size from QGS. Qemu overwrites this field.
+ * The message will follows this header. The in-message is overwritten.
+ */
+ uint32_t out_len;
+
+ /*
+ * Message buffer follows.
+ * Guest sets message that will be send to QGS. If out_len > in_len, guest
+ * should zero remaining buffer to avoid information leak.
+ * Qemu overwrites this buffer with a message returned from QGS.
+ */
+};
+
+typedef struct TdxGenerateQuoteTask {
+ hwaddr buf_gpa;
+ hwaddr payload_gpa;
+ uint64_t payload_len;
+
+ char *send_data;
+ uint64_t send_data_size;
+ uint64_t send_data_sent;
+
+ char *receive_buf;
+ uint64_t receive_buf_received;
+
+ uint64_t status_code;
+ struct tdx_get_quote_header hdr;
+
+ QIOChannelSocket *sioc;
+ guint watch;
+ QEMUTimer timer;
+
+ void (*completion)(struct TdxGenerateQuoteTask *task);
+ void *opaque;
+} TdxGenerateQuoteTask;
+
+void tdx_generate_quote(TdxGenerateQuoteTask *task, SocketAddress *qg_sock_addr);
+
+#endif /* QEMU_I386_TDX_QUOTE_GENERATOR_H */
diff --git a/target/i386/kvm/tdx-stub.c b/target/i386/kvm/tdx-stub.c
new file mode 100644
index 0000000..76fee49
--- /dev/null
+++ b/target/i386/kvm/tdx-stub.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+
+#include "tdx.h"
+
+int tdx_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return -EINVAL;
+}
+
+int tdx_parse_tdvf(void *flash_ptr, int size)
+{
+ return -EINVAL;
+}
+
+int tdx_handle_report_fatal_error(X86CPU *cpu, struct kvm_run *run)
+{
+ return -EINVAL;
+}
+
+void tdx_handle_get_quote(X86CPU *cpu, struct kvm_run *run)
+{
+}
+
+void tdx_handle_get_tdvmcall_info(X86CPU *cpu, struct kvm_run *run)
+{
+}
diff --git a/target/i386/kvm/tdx.c b/target/i386/kvm/tdx.c
new file mode 100644
index 0000000..e809e4b
--- /dev/null
+++ b/target/i386/kvm/tdx.c
@@ -0,0 +1,1487 @@
+/*
+ * QEMU TDX support
+ *
+ * Copyright (c) 2025 Intel Corporation
+ *
+ * Author:
+ * Xiaoyao Li <xiaoyao.li@intel.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qemu/base64.h"
+#include "qemu/mmap-alloc.h"
+#include "qapi/error.h"
+#include "qapi/qapi-visit-sockets.h"
+#include "qom/object_interfaces.h"
+#include "crypto/hash.h"
+#include "system/kvm_int.h"
+#include "system/runstate.h"
+#include "system/system.h"
+#include "system/ramblock.h"
+#include "system/address-spaces.h"
+
+#include <linux/kvm_para.h>
+
+#include "cpu.h"
+#include "cpu-internal.h"
+#include "host-cpu.h"
+#include "hw/i386/e820_memory_layout.h"
+#include "hw/i386/tdvf.h"
+#include "hw/i386/x86.h"
+#include "hw/i386/tdvf-hob.h"
+#include "kvm_i386.h"
+#include "tdx.h"
+#include "tdx-quote-generator.h"
+
+#include "standard-headers/asm-x86/kvm_para.h"
+
+#define TDX_MIN_TSC_FREQUENCY_KHZ (100 * 1000)
+#define TDX_MAX_TSC_FREQUENCY_KHZ (10 * 1000 * 1000)
+
+#define TDX_TD_ATTRIBUTES_DEBUG BIT_ULL(0)
+#define TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE BIT_ULL(28)
+#define TDX_TD_ATTRIBUTES_PKS BIT_ULL(30)
+#define TDX_TD_ATTRIBUTES_PERFMON BIT_ULL(63)
+
+#define TDX_SUPPORTED_TD_ATTRS (TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE |\
+ TDX_TD_ATTRIBUTES_PKS | \
+ TDX_TD_ATTRIBUTES_PERFMON)
+
+#define TDX_SUPPORTED_KVM_FEATURES ((1U << KVM_FEATURE_NOP_IO_DELAY) | \
+ (1U << KVM_FEATURE_PV_UNHALT) | \
+ (1U << KVM_FEATURE_PV_TLB_FLUSH) | \
+ (1U << KVM_FEATURE_PV_SEND_IPI) | \
+ (1U << KVM_FEATURE_POLL_CONTROL) | \
+ (1U << KVM_FEATURE_PV_SCHED_YIELD) | \
+ (1U << KVM_FEATURE_MSI_EXT_DEST_ID))
+
+static TdxGuest *tdx_guest;
+
+static struct kvm_tdx_capabilities *tdx_caps;
+static struct kvm_cpuid2 *tdx_supported_cpuid;
+
+/* Valid after kvm_arch_init()->confidential_guest_kvm_init()->tdx_kvm_init() */
+bool is_tdx_vm(void)
+{
+ return !!tdx_guest;
+}
+
+enum tdx_ioctl_level {
+ TDX_VM_IOCTL,
+ TDX_VCPU_IOCTL,
+};
+
+static int tdx_ioctl_internal(enum tdx_ioctl_level level, void *state,
+ int cmd_id, __u32 flags, void *data,
+ Error **errp)
+{
+ struct kvm_tdx_cmd tdx_cmd = {};
+ int r;
+
+ const char *tdx_ioctl_name[] = {
+ [KVM_TDX_CAPABILITIES] = "KVM_TDX_CAPABILITIES",
+ [KVM_TDX_INIT_VM] = "KVM_TDX_INIT_VM",
+ [KVM_TDX_INIT_VCPU] = "KVM_TDX_INIT_VCPU",
+ [KVM_TDX_INIT_MEM_REGION] = "KVM_TDX_INIT_MEM_REGION",
+ [KVM_TDX_FINALIZE_VM] = "KVM_TDX_FINALIZE_VM",
+ [KVM_TDX_GET_CPUID] = "KVM_TDX_GET_CPUID",
+ };
+
+ tdx_cmd.id = cmd_id;
+ tdx_cmd.flags = flags;
+ tdx_cmd.data = (__u64)(unsigned long)data;
+
+ switch (level) {
+ case TDX_VM_IOCTL:
+ r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd);
+ break;
+ case TDX_VCPU_IOCTL:
+ r = kvm_vcpu_ioctl(state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd);
+ break;
+ default:
+ error_setg(errp, "Invalid tdx_ioctl_level %d", level);
+ return -EINVAL;
+ }
+
+ if (r < 0) {
+ error_setg_errno(errp, -r, "TDX ioctl %s failed, hw_errors: 0x%llx",
+ tdx_ioctl_name[cmd_id], tdx_cmd.hw_error);
+ }
+ return r;
+}
+
+static inline int tdx_vm_ioctl(int cmd_id, __u32 flags, void *data,
+ Error **errp)
+{
+ return tdx_ioctl_internal(TDX_VM_IOCTL, NULL, cmd_id, flags, data, errp);
+}
+
+static inline int tdx_vcpu_ioctl(CPUState *cpu, int cmd_id, __u32 flags,
+ void *data, Error **errp)
+{
+ return tdx_ioctl_internal(TDX_VCPU_IOCTL, cpu, cmd_id, flags, data, errp);
+}
+
+static int get_tdx_capabilities(Error **errp)
+{
+ struct kvm_tdx_capabilities *caps;
+ /* 1st generation of TDX reports 6 cpuid configs */
+ int nr_cpuid_configs = 6;
+ size_t size;
+ int r;
+
+ do {
+ Error *local_err = NULL;
+ size = sizeof(struct kvm_tdx_capabilities) +
+ nr_cpuid_configs * sizeof(struct kvm_cpuid_entry2);
+ caps = g_malloc0(size);
+ caps->cpuid.nent = nr_cpuid_configs;
+
+ r = tdx_vm_ioctl(KVM_TDX_CAPABILITIES, 0, caps, &local_err);
+ if (r == -E2BIG) {
+ g_free(caps);
+ nr_cpuid_configs *= 2;
+ if (nr_cpuid_configs > KVM_MAX_CPUID_ENTRIES) {
+ error_report("KVM TDX seems broken that number of CPUID entries"
+ " in kvm_tdx_capabilities exceeds limit: %d",
+ KVM_MAX_CPUID_ENTRIES);
+ error_propagate(errp, local_err);
+ return r;
+ }
+ error_free(local_err);
+ } else if (r < 0) {
+ g_free(caps);
+ error_propagate(errp, local_err);
+ return r;
+ }
+ } while (r == -E2BIG);
+
+ tdx_caps = caps;
+
+ return 0;
+}
+
+void tdx_set_tdvf_region(MemoryRegion *tdvf_mr)
+{
+ assert(!tdx_guest->tdvf_mr);
+ tdx_guest->tdvf_mr = tdvf_mr;
+}
+
+static TdxFirmwareEntry *tdx_get_hob_entry(TdxGuest *tdx)
+{
+ TdxFirmwareEntry *entry;
+
+ for_each_tdx_fw_entry(&tdx->tdvf, entry) {
+ if (entry->type == TDVF_SECTION_TYPE_TD_HOB) {
+ return entry;
+ }
+ }
+ error_report("TDVF metadata doesn't specify TD_HOB location.");
+ exit(1);
+}
+
+static void tdx_add_ram_entry(uint64_t address, uint64_t length,
+ enum TdxRamType type)
+{
+ uint32_t nr_entries = tdx_guest->nr_ram_entries;
+ tdx_guest->ram_entries = g_renew(TdxRamEntry, tdx_guest->ram_entries,
+ nr_entries + 1);
+
+ tdx_guest->ram_entries[nr_entries].address = address;
+ tdx_guest->ram_entries[nr_entries].length = length;
+ tdx_guest->ram_entries[nr_entries].type = type;
+ tdx_guest->nr_ram_entries++;
+}
+
+static int tdx_accept_ram_range(uint64_t address, uint64_t length)
+{
+ uint64_t head_start, tail_start, head_length, tail_length;
+ uint64_t tmp_address, tmp_length;
+ TdxRamEntry *e;
+ int i = 0;
+
+ do {
+ if (i == tdx_guest->nr_ram_entries) {
+ return -1;
+ }
+
+ e = &tdx_guest->ram_entries[i++];
+ } while (address + length <= e->address || address >= e->address + e->length);
+
+ /*
+ * The to-be-accepted ram range must be fully contained by one
+ * RAM entry.
+ */
+ if (e->address > address ||
+ e->address + e->length < address + length) {
+ return -1;
+ }
+
+ if (e->type == TDX_RAM_ADDED) {
+ return 0;
+ }
+
+ tmp_address = e->address;
+ tmp_length = e->length;
+
+ e->address = address;
+ e->length = length;
+ e->type = TDX_RAM_ADDED;
+
+ head_length = address - tmp_address;
+ if (head_length > 0) {
+ head_start = tmp_address;
+ tdx_add_ram_entry(head_start, head_length, TDX_RAM_UNACCEPTED);
+ }
+
+ tail_start = address + length;
+ if (tail_start < tmp_address + tmp_length) {
+ tail_length = tmp_address + tmp_length - tail_start;
+ tdx_add_ram_entry(tail_start, tail_length, TDX_RAM_UNACCEPTED);
+ }
+
+ return 0;
+}
+
+static int tdx_ram_entry_compare(const void *lhs_, const void* rhs_)
+{
+ const TdxRamEntry *lhs = lhs_;
+ const TdxRamEntry *rhs = rhs_;
+
+ if (lhs->address == rhs->address) {
+ return 0;
+ }
+ if (le64_to_cpu(lhs->address) > le64_to_cpu(rhs->address)) {
+ return 1;
+ }
+ return -1;
+}
+
+static void tdx_init_ram_entries(void)
+{
+ unsigned i, j, nr_e820_entries;
+
+ nr_e820_entries = e820_get_table(NULL);
+ tdx_guest->ram_entries = g_new(TdxRamEntry, nr_e820_entries);
+
+ for (i = 0, j = 0; i < nr_e820_entries; i++) {
+ uint64_t addr, len;
+
+ if (e820_get_entry(i, E820_RAM, &addr, &len)) {
+ tdx_guest->ram_entries[j].address = addr;
+ tdx_guest->ram_entries[j].length = len;
+ tdx_guest->ram_entries[j].type = TDX_RAM_UNACCEPTED;
+ j++;
+ }
+ }
+ tdx_guest->nr_ram_entries = j;
+}
+
+static void tdx_post_init_vcpus(void)
+{
+ TdxFirmwareEntry *hob;
+ CPUState *cpu;
+
+ hob = tdx_get_hob_entry(tdx_guest);
+ CPU_FOREACH(cpu) {
+ tdx_vcpu_ioctl(cpu, KVM_TDX_INIT_VCPU, 0, (void *)(uintptr_t)hob->address,
+ &error_fatal);
+ }
+}
+
+static void tdx_finalize_vm(Notifier *notifier, void *unused)
+{
+ TdxFirmware *tdvf = &tdx_guest->tdvf;
+ TdxFirmwareEntry *entry;
+ RAMBlock *ram_block;
+ Error *local_err = NULL;
+ int r;
+
+ tdx_init_ram_entries();
+
+ for_each_tdx_fw_entry(tdvf, entry) {
+ switch (entry->type) {
+ case TDVF_SECTION_TYPE_BFV:
+ case TDVF_SECTION_TYPE_CFV:
+ entry->mem_ptr = tdvf->mem_ptr + entry->data_offset;
+ break;
+ case TDVF_SECTION_TYPE_TD_HOB:
+ case TDVF_SECTION_TYPE_TEMP_MEM:
+ entry->mem_ptr = qemu_ram_mmap(-1, entry->size,
+ qemu_real_host_page_size(), 0, 0);
+ if (entry->mem_ptr == MAP_FAILED) {
+ error_report("Failed to mmap memory for TDVF section %d",
+ entry->type);
+ exit(1);
+ }
+ if (tdx_accept_ram_range(entry->address, entry->size)) {
+ error_report("Failed to accept memory for TDVF section %d",
+ entry->type);
+ qemu_ram_munmap(-1, entry->mem_ptr, entry->size);
+ exit(1);
+ }
+ break;
+ default:
+ error_report("Unsupported TDVF section %d", entry->type);
+ exit(1);
+ }
+ }
+
+ qsort(tdx_guest->ram_entries, tdx_guest->nr_ram_entries,
+ sizeof(TdxRamEntry), &tdx_ram_entry_compare);
+
+ tdvf_hob_create(tdx_guest, tdx_get_hob_entry(tdx_guest));
+
+ tdx_post_init_vcpus();
+
+ for_each_tdx_fw_entry(tdvf, entry) {
+ struct kvm_tdx_init_mem_region region;
+ uint32_t flags;
+
+ region = (struct kvm_tdx_init_mem_region) {
+ .source_addr = (uintptr_t)entry->mem_ptr,
+ .gpa = entry->address,
+ .nr_pages = entry->size >> 12,
+ };
+
+ flags = entry->attributes & TDVF_SECTION_ATTRIBUTES_MR_EXTEND ?
+ KVM_TDX_MEASURE_MEMORY_REGION : 0;
+
+ do {
+ error_free(local_err);
+ local_err = NULL;
+ r = tdx_vcpu_ioctl(first_cpu, KVM_TDX_INIT_MEM_REGION, flags,
+ &region, &local_err);
+ } while (r == -EAGAIN || r == -EINTR);
+ if (r < 0) {
+ error_report_err(local_err);
+ exit(1);
+ }
+
+ if (entry->type == TDVF_SECTION_TYPE_TD_HOB ||
+ entry->type == TDVF_SECTION_TYPE_TEMP_MEM) {
+ qemu_ram_munmap(-1, entry->mem_ptr, entry->size);
+ entry->mem_ptr = NULL;
+ }
+ }
+
+ /*
+ * TDVF image has been copied into private region above via
+ * KVM_MEMORY_MAPPING. It becomes useless.
+ */
+ ram_block = tdx_guest->tdvf_mr->ram_block;
+ ram_block_discard_range(ram_block, 0, ram_block->max_length);
+
+ tdx_vm_ioctl(KVM_TDX_FINALIZE_VM, 0, NULL, &error_fatal);
+ CONFIDENTIAL_GUEST_SUPPORT(tdx_guest)->ready = true;
+}
+
+static Notifier tdx_machine_done_notify = {
+ .notify = tdx_finalize_vm,
+};
+
+/*
+ * Some CPUID bits change from fixed1 to configurable bits when TDX module
+ * supports TDX_FEATURES0.VE_REDUCTION. e.g., MCA/MCE/MTRR/CORE_CAPABILITY.
+ *
+ * To make QEMU work with all the versions of TDX module, keep the fixed1 bits
+ * here if they are ever fixed1 bits in any of the version though not fixed1 in
+ * the latest version. Otherwise, with the older version of TDX module, QEMU may
+ * treat the fixed1 bit as unsupported.
+ *
+ * For newer TDX module, it does no harm to keep them in tdx_fixed1_bits even
+ * though they changed to configurable bits. Because tdx_fixed1_bits is used to
+ * setup the supported bits.
+ */
+KvmCpuidInfo tdx_fixed1_bits = {
+ .cpuid.nent = 8,
+ .entries[0] = {
+ .function = 0x1,
+ .index = 0,
+ .ecx = CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_DTES64 |
+ CPUID_EXT_DSCPL | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 |
+ CPUID_EXT_PDCM | CPUID_EXT_PCID | CPUID_EXT_SSE41 |
+ CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
+ CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_XSAVE |
+ CPUID_EXT_RDRAND | CPUID_EXT_HYPERVISOR,
+ .edx = CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
+ CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
+ CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
+ CPUID_PAT | CPUID_CLFLUSH | CPUID_DTS | CPUID_MMX | CPUID_FXSR |
+ CPUID_SSE | CPUID_SSE2,
+ },
+ .entries[1] = {
+ .function = 0x6,
+ .index = 0,
+ .eax = CPUID_6_EAX_ARAT,
+ },
+ .entries[2] = {
+ .function = 0x7,
+ .index = 0,
+ .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX,
+ .ebx = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_FDP_EXCPTN_ONLY |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_ZERO_FCS_FDS | CPUID_7_0_EBX_RDSEED |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
+ CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_SHA_NI,
+ .ecx = CPUID_7_0_ECX_BUS_LOCK_DETECT | CPUID_7_0_ECX_MOVDIRI |
+ CPUID_7_0_ECX_MOVDIR64B,
+ .edx = CPUID_7_0_EDX_MD_CLEAR | CPUID_7_0_EDX_SPEC_CTRL |
+ CPUID_7_0_EDX_STIBP | CPUID_7_0_EDX_FLUSH_L1D |
+ CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_CORE_CAPABILITY |
+ CPUID_7_0_EDX_SPEC_CTRL_SSBD,
+ },
+ .entries[3] = {
+ .function = 0x7,
+ .index = 2,
+ .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX,
+ .edx = CPUID_7_2_EDX_PSFD | CPUID_7_2_EDX_IPRED_CTRL |
+ CPUID_7_2_EDX_RRSBA_CTRL | CPUID_7_2_EDX_BHI_CTRL,
+ },
+ .entries[4] = {
+ .function = 0xD,
+ .index = 0,
+ .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX,
+ .eax = XSTATE_FP_MASK | XSTATE_SSE_MASK,
+ },
+ .entries[5] = {
+ .function = 0xD,
+ .index = 1,
+ .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX,
+ .eax = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC|
+ CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
+ },
+ .entries[6] = {
+ .function = 0x80000001,
+ .index = 0,
+ .ecx = CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_3DNOWPREFETCH,
+ /*
+ * Strictly speaking, SYSCALL is not fixed1 bit since it depends on
+ * the CPU to be in 64-bit mode. But here fixed1 is used to serve the
+ * purpose of supported bits for TDX. In this sense, SYACALL is always
+ * supported.
+ */
+ .edx = CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_RDTSCP | CPUID_EXT2_LM,
+ },
+ .entries[7] = {
+ .function = 0x80000007,
+ .index = 0,
+ .edx = CPUID_APM_INVTSC,
+ },
+};
+
+typedef struct TdxAttrsMap {
+ uint32_t attr_index;
+ uint32_t cpuid_leaf;
+ uint32_t cpuid_subleaf;
+ int cpuid_reg;
+ uint32_t feat_mask;
+} TdxAttrsMap;
+
+static TdxAttrsMap tdx_attrs_maps[] = {
+ {.attr_index = 27,
+ .cpuid_leaf = 7,
+ .cpuid_subleaf = 1,
+ .cpuid_reg = R_EAX,
+ .feat_mask = CPUID_7_1_EAX_LASS,},
+
+ {.attr_index = 30,
+ .cpuid_leaf = 7,
+ .cpuid_subleaf = 0,
+ .cpuid_reg = R_ECX,
+ .feat_mask = CPUID_7_0_ECX_PKS,},
+
+ {.attr_index = 31,
+ .cpuid_leaf = 7,
+ .cpuid_subleaf = 0,
+ .cpuid_reg = R_ECX,
+ .feat_mask = CPUID_7_0_ECX_KeyLocker,},
+};
+
+typedef struct TdxXFAMDep {
+ int xfam_bit;
+ FeatureMask feat_mask;
+} TdxXFAMDep;
+
+/*
+ * Note, only the CPUID bits whose virtualization type are "XFAM & Native" are
+ * defiend here.
+ *
+ * For those whose virtualization type are "XFAM & Configured & Native", they
+ * are reported as configurable bits. And they are not supported if not in the
+ * configureable bits list from KVM even if the corresponding XFAM bit is
+ * supported.
+ */
+TdxXFAMDep tdx_xfam_deps[] = {
+ { XSTATE_YMM_BIT, { FEAT_1_ECX, CPUID_EXT_FMA }},
+ { XSTATE_YMM_BIT, { FEAT_7_0_EBX, CPUID_7_0_EBX_AVX2 }},
+ { XSTATE_OPMASK_BIT, { FEAT_7_0_ECX, CPUID_7_0_ECX_AVX512_VBMI}},
+ { XSTATE_OPMASK_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AVX512_FP16}},
+ { XSTATE_PT_BIT, { FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT}},
+ { XSTATE_PKRU_BIT, { FEAT_7_0_ECX, CPUID_7_0_ECX_PKU}},
+ { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_BF16 }},
+ { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_TILE }},
+ { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_INT8 }},
+};
+
+static struct kvm_cpuid_entry2 *find_in_supported_entry(uint32_t function,
+ uint32_t index)
+{
+ struct kvm_cpuid_entry2 *e;
+
+ e = cpuid_find_entry(tdx_supported_cpuid, function, index);
+ if (!e) {
+ if (tdx_supported_cpuid->nent >= KVM_MAX_CPUID_ENTRIES) {
+ error_report("tdx_supported_cpuid requries more space than %d entries",
+ KVM_MAX_CPUID_ENTRIES);
+ exit(1);
+ }
+ e = &tdx_supported_cpuid->entries[tdx_supported_cpuid->nent++];
+ e->function = function;
+ e->index = index;
+ }
+
+ return e;
+}
+
+static void tdx_add_supported_cpuid_by_fixed1_bits(void)
+{
+ struct kvm_cpuid_entry2 *e, *e1;
+ int i;
+
+ for (i = 0; i < tdx_fixed1_bits.cpuid.nent; i++) {
+ e = &tdx_fixed1_bits.entries[i];
+
+ e1 = find_in_supported_entry(e->function, e->index);
+ e1->eax |= e->eax;
+ e1->ebx |= e->ebx;
+ e1->ecx |= e->ecx;
+ e1->edx |= e->edx;
+ }
+}
+
+static void tdx_add_supported_cpuid_by_attrs(void)
+{
+ struct kvm_cpuid_entry2 *e;
+ TdxAttrsMap *map;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tdx_attrs_maps); i++) {
+ map = &tdx_attrs_maps[i];
+ if (!((1ULL << map->attr_index) & tdx_caps->supported_attrs)) {
+ continue;
+ }
+
+ e = find_in_supported_entry(map->cpuid_leaf, map->cpuid_subleaf);
+
+ switch(map->cpuid_reg) {
+ case R_EAX:
+ e->eax |= map->feat_mask;
+ break;
+ case R_EBX:
+ e->ebx |= map->feat_mask;
+ break;
+ case R_ECX:
+ e->ecx |= map->feat_mask;
+ break;
+ case R_EDX:
+ e->edx |= map->feat_mask;
+ break;
+ }
+ }
+}
+
+static void tdx_add_supported_cpuid_by_xfam(void)
+{
+ struct kvm_cpuid_entry2 *e;
+ int i;
+
+ const TdxXFAMDep *xfam_dep;
+ const FeatureWordInfo *f;
+ for (i = 0; i < ARRAY_SIZE(tdx_xfam_deps); i++) {
+ xfam_dep = &tdx_xfam_deps[i];
+ if (!((1ULL << xfam_dep->xfam_bit) & tdx_caps->supported_xfam)) {
+ continue;
+ }
+
+ f = &feature_word_info[xfam_dep->feat_mask.index];
+ if (f->type != CPUID_FEATURE_WORD) {
+ continue;
+ }
+
+ e = find_in_supported_entry(f->cpuid.eax, f->cpuid.ecx);
+ switch(f->cpuid.reg) {
+ case R_EAX:
+ e->eax |= xfam_dep->feat_mask.mask;
+ break;
+ case R_EBX:
+ e->ebx |= xfam_dep->feat_mask.mask;
+ break;
+ case R_ECX:
+ e->ecx |= xfam_dep->feat_mask.mask;
+ break;
+ case R_EDX:
+ e->edx |= xfam_dep->feat_mask.mask;
+ break;
+ }
+ }
+
+ e = find_in_supported_entry(0xd, 0);
+ e->eax |= (tdx_caps->supported_xfam & CPUID_XSTATE_XCR0_MASK);
+ e->edx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XCR0_MASK) >> 32;
+
+ e = find_in_supported_entry(0xd, 1);
+ /*
+ * Mark XFD always support for TDX, it will be cleared finally in
+ * tdx_adjust_cpuid_features() if XFD is unavailable on the hardware
+ * because in this case the original data has it as 0.
+ */
+ e->eax |= CPUID_XSAVE_XFD;
+ e->ecx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XSS_MASK);
+ e->edx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XSS_MASK) >> 32;
+}
+
+static void tdx_add_supported_kvm_features(void)
+{
+ struct kvm_cpuid_entry2 *e;
+
+ e = find_in_supported_entry(0x40000001, 0);
+ e->eax = TDX_SUPPORTED_KVM_FEATURES;
+}
+
+static void tdx_setup_supported_cpuid(void)
+{
+ if (tdx_supported_cpuid) {
+ return;
+ }
+
+ tdx_supported_cpuid = g_malloc0(sizeof(*tdx_supported_cpuid) +
+ KVM_MAX_CPUID_ENTRIES * sizeof(struct kvm_cpuid_entry2));
+
+ memcpy(tdx_supported_cpuid->entries, tdx_caps->cpuid.entries,
+ tdx_caps->cpuid.nent * sizeof(struct kvm_cpuid_entry2));
+ tdx_supported_cpuid->nent = tdx_caps->cpuid.nent;
+
+ tdx_add_supported_cpuid_by_fixed1_bits();
+ tdx_add_supported_cpuid_by_attrs();
+ tdx_add_supported_cpuid_by_xfam();
+
+ tdx_add_supported_kvm_features();
+}
+
+static int tdx_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ X86MachineState *x86ms = X86_MACHINE(ms);
+ TdxGuest *tdx = TDX_GUEST(cgs);
+ int r = 0;
+
+ kvm_mark_guest_state_protected();
+
+ if (x86ms->smm == ON_OFF_AUTO_AUTO) {
+ x86ms->smm = ON_OFF_AUTO_OFF;
+ } else if (x86ms->smm == ON_OFF_AUTO_ON) {
+ error_setg(errp, "TDX VM doesn't support SMM");
+ return -EINVAL;
+ }
+
+ if (x86ms->pic == ON_OFF_AUTO_AUTO) {
+ x86ms->pic = ON_OFF_AUTO_OFF;
+ } else if (x86ms->pic == ON_OFF_AUTO_ON) {
+ error_setg(errp, "TDX VM doesn't support PIC");
+ return -EINVAL;
+ }
+
+ if (kvm_state->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
+ kvm_state->kernel_irqchip_split = ON_OFF_AUTO_ON;
+ } else if (kvm_state->kernel_irqchip_split != ON_OFF_AUTO_ON) {
+ error_setg(errp, "TDX VM requires kernel_irqchip to be split");
+ return -EINVAL;
+ }
+
+ if (!tdx_caps) {
+ r = get_tdx_capabilities(errp);
+ if (r) {
+ return r;
+ }
+ }
+
+ tdx_setup_supported_cpuid();
+
+ /* TDX relies on KVM_HC_MAP_GPA_RANGE to handle TDG.VP.VMCALL<MapGPA> */
+ if (!kvm_enable_hypercall(BIT_ULL(KVM_HC_MAP_GPA_RANGE))) {
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Set kvm_readonly_mem_allowed to false, because TDX only supports readonly
+ * memory for shared memory but not for private memory. Besides, whether a
+ * memslot is private or shared is not determined by QEMU.
+ *
+ * Thus, just mark readonly memory not supported for simplicity.
+ */
+ kvm_readonly_mem_allowed = false;
+
+ qemu_add_machine_init_done_notifier(&tdx_machine_done_notify);
+
+ tdx_guest = tdx;
+ return 0;
+}
+
+static int tdx_kvm_type(X86ConfidentialGuest *cg)
+{
+ /* Do the object check */
+ TDX_GUEST(cg);
+
+ return KVM_X86_TDX_VM;
+}
+
+static void tdx_cpu_instance_init(X86ConfidentialGuest *cg, CPUState *cpu)
+{
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
+ X86CPU *x86cpu = X86_CPU(cpu);
+
+ if (xcc->model) {
+ error_report("Named cpu model is not supported for TDX yet!");
+ exit(1);
+ }
+
+ object_property_set_bool(OBJECT(cpu), "pmu", false, &error_abort);
+
+ /* invtsc is fixed1 for TD guest */
+ object_property_set_bool(OBJECT(cpu), "invtsc", true, &error_abort);
+
+ x86cpu->force_cpuid_0x1f = true;
+}
+
+static uint32_t tdx_adjust_cpuid_features(X86ConfidentialGuest *cg,
+ uint32_t feature, uint32_t index,
+ int reg, uint32_t value)
+{
+ struct kvm_cpuid_entry2 *e;
+
+ e = cpuid_find_entry(&tdx_fixed1_bits.cpuid, feature, index);
+ if (e) {
+ value |= cpuid_entry_get_reg(e, reg);
+ }
+
+ if (is_feature_word_cpuid(feature, index, reg)) {
+ e = cpuid_find_entry(tdx_supported_cpuid, feature, index);
+ if (e) {
+ value &= cpuid_entry_get_reg(e, reg);
+ }
+ }
+
+ return value;
+}
+
+static struct kvm_cpuid2 *tdx_fetch_cpuid(CPUState *cpu, int *ret)
+{
+ struct kvm_cpuid2 *fetch_cpuid;
+ int size = KVM_MAX_CPUID_ENTRIES;
+ Error *local_err = NULL;
+ int r;
+
+ do {
+ error_free(local_err);
+ local_err = NULL;
+
+ fetch_cpuid = g_malloc0(sizeof(*fetch_cpuid) +
+ sizeof(struct kvm_cpuid_entry2) * size);
+ fetch_cpuid->nent = size;
+ r = tdx_vcpu_ioctl(cpu, KVM_TDX_GET_CPUID, 0, fetch_cpuid, &local_err);
+ if (r == -E2BIG) {
+ g_free(fetch_cpuid);
+ size = fetch_cpuid->nent;
+ }
+ } while (r == -E2BIG);
+
+ if (r < 0) {
+ error_report_err(local_err);
+ *ret = r;
+ return NULL;
+ }
+
+ return fetch_cpuid;
+}
+
+static int tdx_check_features(X86ConfidentialGuest *cg, CPUState *cs)
+{
+ uint64_t actual, requested, unavailable, forced_on;
+ g_autofree struct kvm_cpuid2 *fetch_cpuid;
+ const char *forced_on_prefix = NULL;
+ const char *unav_prefix = NULL;
+ struct kvm_cpuid_entry2 *entry;
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ FeatureWordInfo *wi;
+ FeatureWord w;
+ bool mismatch = false;
+ int r;
+
+ fetch_cpuid = tdx_fetch_cpuid(cs, &r);
+ if (!fetch_cpuid) {
+ return r;
+ }
+
+ if (cpu->check_cpuid || cpu->enforce_cpuid) {
+ unav_prefix = "TDX doesn't support requested feature";
+ forced_on_prefix = "TDX forcibly sets the feature";
+ }
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ wi = &feature_word_info[w];
+ actual = 0;
+
+ switch (wi->type) {
+ case CPUID_FEATURE_WORD:
+ entry = cpuid_find_entry(fetch_cpuid, wi->cpuid.eax, wi->cpuid.ecx);
+ if (!entry) {
+ /*
+ * If KVM doesn't report it means it's totally configurable
+ * by QEMU
+ */
+ continue;
+ }
+
+ actual = cpuid_entry_get_reg(entry, wi->cpuid.reg);
+ break;
+ case MSR_FEATURE_WORD:
+ /*
+ * TODO:
+ * validate MSR features when KVM has interface report them.
+ */
+ continue;
+ }
+
+ /* Fixup for special cases */
+ switch (w) {
+ case FEAT_8000_0001_EDX:
+ /*
+ * Intel enumerates SYSCALL bit as 1 only when processor in 64-bit
+ * mode and before vcpu running it's not in 64-bit mode.
+ */
+ actual |= CPUID_EXT2_SYSCALL;
+ break;
+ default:
+ break;
+ }
+
+ requested = env->features[w];
+ unavailable = requested & ~actual;
+ mark_unavailable_features(cpu, w, unavailable, unav_prefix);
+ if (unavailable) {
+ mismatch = true;
+ }
+
+ forced_on = actual & ~requested;
+ mark_forced_on_features(cpu, w, forced_on, forced_on_prefix);
+ if (forced_on) {
+ mismatch = true;
+ }
+ }
+
+ if (cpu->enforce_cpuid && mismatch) {
+ return -EINVAL;
+ }
+
+ if (cpu->phys_bits != host_cpu_phys_bits()) {
+ error_report("TDX requires guest CPU physical bits (%u) "
+ "to match host CPU physical bits (%u)",
+ cpu->phys_bits, host_cpu_phys_bits());
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tdx_validate_attributes(TdxGuest *tdx, Error **errp)
+{
+ if ((tdx->attributes & ~tdx_caps->supported_attrs)) {
+ error_setg(errp, "Invalid attributes 0x%"PRIx64" for TDX VM "
+ "(KVM supported: 0x%"PRIx64")", tdx->attributes,
+ (uint64_t)tdx_caps->supported_attrs);
+ return -1;
+ }
+
+ if (tdx->attributes & ~TDX_SUPPORTED_TD_ATTRS) {
+ error_setg(errp, "Some QEMU unsupported TD attribute bits being "
+ "requested: 0x%"PRIx64" (QEMU supported: 0x%"PRIx64")",
+ tdx->attributes, (uint64_t)TDX_SUPPORTED_TD_ATTRS);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int setup_td_guest_attributes(X86CPU *x86cpu, Error **errp)
+{
+ CPUX86State *env = &x86cpu->env;
+
+ tdx_guest->attributes |= (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS) ?
+ TDX_TD_ATTRIBUTES_PKS : 0;
+ tdx_guest->attributes |= x86cpu->enable_pmu ? TDX_TD_ATTRIBUTES_PERFMON : 0;
+
+ return tdx_validate_attributes(tdx_guest, errp);
+}
+
+static int setup_td_xfam(X86CPU *x86cpu, Error **errp)
+{
+ CPUX86State *env = &x86cpu->env;
+ uint64_t xfam;
+
+ xfam = env->features[FEAT_XSAVE_XCR0_LO] |
+ env->features[FEAT_XSAVE_XCR0_HI] |
+ env->features[FEAT_XSAVE_XSS_LO] |
+ env->features[FEAT_XSAVE_XSS_HI];
+
+ if (xfam & ~tdx_caps->supported_xfam) {
+ error_setg(errp, "Invalid XFAM 0x%"PRIx64" for TDX VM (supported: 0x%"PRIx64"))",
+ xfam, (uint64_t)tdx_caps->supported_xfam);
+ return -1;
+ }
+
+ tdx_guest->xfam = xfam;
+ return 0;
+}
+
+static void tdx_filter_cpuid(struct kvm_cpuid2 *cpuids)
+{
+ int i, dest_cnt = 0;
+ struct kvm_cpuid_entry2 *src, *dest, *conf;
+
+ for (i = 0; i < cpuids->nent; i++) {
+ src = cpuids->entries + i;
+ conf = cpuid_find_entry(&tdx_caps->cpuid, src->function, src->index);
+ if (!conf) {
+ continue;
+ }
+ dest = cpuids->entries + dest_cnt;
+
+ dest->function = src->function;
+ dest->index = src->index;
+ dest->flags = src->flags;
+ dest->eax = src->eax & conf->eax;
+ dest->ebx = src->ebx & conf->ebx;
+ dest->ecx = src->ecx & conf->ecx;
+ dest->edx = src->edx & conf->edx;
+
+ dest_cnt++;
+ }
+ cpuids->nent = dest_cnt++;
+}
+
+int tdx_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ X86CPU *x86cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86cpu->env;
+ g_autofree struct kvm_tdx_init_vm *init_vm = NULL;
+ Error *local_err = NULL;
+ size_t data_len;
+ int retry = 10000;
+ int r = 0;
+
+ QEMU_LOCK_GUARD(&tdx_guest->lock);
+ if (tdx_guest->initialized) {
+ return r;
+ }
+
+ init_vm = g_malloc0(sizeof(struct kvm_tdx_init_vm) +
+ sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
+
+ if (!kvm_check_extension(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS)) {
+ error_setg(errp, "KVM doesn't support KVM_CAP_X86_APIC_BUS_CYCLES_NS");
+ return -EOPNOTSUPP;
+ }
+
+ r = kvm_vm_enable_cap(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS,
+ 0, TDX_APIC_BUS_CYCLES_NS);
+ if (r < 0) {
+ error_setg_errno(errp, -r,
+ "Unable to set core crystal clock frequency to 25MHz");
+ return r;
+ }
+
+ if (env->tsc_khz && (env->tsc_khz < TDX_MIN_TSC_FREQUENCY_KHZ ||
+ env->tsc_khz > TDX_MAX_TSC_FREQUENCY_KHZ)) {
+ error_setg(errp, "Invalid TSC %"PRId64" KHz, must specify cpu_frequency "
+ "between [%d, %d] kHz", env->tsc_khz,
+ TDX_MIN_TSC_FREQUENCY_KHZ, TDX_MAX_TSC_FREQUENCY_KHZ);
+ return -EINVAL;
+ }
+
+ if (env->tsc_khz % (25 * 1000)) {
+ error_setg(errp, "Invalid TSC %"PRId64" KHz, it must be multiple of 25MHz",
+ env->tsc_khz);
+ return -EINVAL;
+ }
+
+ /* it's safe even env->tsc_khz is 0. KVM uses host's tsc_khz in this case */
+ r = kvm_vm_ioctl(kvm_state, KVM_SET_TSC_KHZ, env->tsc_khz);
+ if (r < 0) {
+ error_setg_errno(errp, -r, "Unable to set TSC frequency to %"PRId64" kHz",
+ env->tsc_khz);
+ return r;
+ }
+
+ if (tdx_guest->mrconfigid) {
+ g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrconfigid,
+ strlen(tdx_guest->mrconfigid), &data_len, errp);
+ if (!data) {
+ return -1;
+ }
+ if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
+ error_setg(errp, "TDX 'mrconfigid' sha384 digest was %ld bytes, "
+ "expected %d bytes", data_len,
+ QCRYPTO_HASH_DIGEST_LEN_SHA384);
+ return -1;
+ }
+ memcpy(init_vm->mrconfigid, data, data_len);
+ }
+
+ if (tdx_guest->mrowner) {
+ g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrowner,
+ strlen(tdx_guest->mrowner), &data_len, errp);
+ if (!data) {
+ return -1;
+ }
+ if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
+ error_setg(errp, "TDX 'mrowner' sha384 digest was %ld bytes, "
+ "expected %d bytes", data_len,
+ QCRYPTO_HASH_DIGEST_LEN_SHA384);
+ return -1;
+ }
+ memcpy(init_vm->mrowner, data, data_len);
+ }
+
+ if (tdx_guest->mrownerconfig) {
+ g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrownerconfig,
+ strlen(tdx_guest->mrownerconfig), &data_len, errp);
+ if (!data) {
+ return -1;
+ }
+ if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
+ error_setg(errp, "TDX 'mrownerconfig' sha384 digest was %ld bytes, "
+ "expected %d bytes", data_len,
+ QCRYPTO_HASH_DIGEST_LEN_SHA384);
+ return -1;
+ }
+ memcpy(init_vm->mrownerconfig, data, data_len);
+ }
+
+ r = setup_td_guest_attributes(x86cpu, errp);
+ if (r) {
+ return r;
+ }
+
+ r = setup_td_xfam(x86cpu, errp);
+ if (r) {
+ return r;
+ }
+
+ init_vm->cpuid.nent = kvm_x86_build_cpuid(env, init_vm->cpuid.entries, 0);
+ tdx_filter_cpuid(&init_vm->cpuid);
+
+ init_vm->attributes = tdx_guest->attributes;
+ init_vm->xfam = tdx_guest->xfam;
+
+ /*
+ * KVM_TDX_INIT_VM gets -EAGAIN when KVM side SEAMCALL(TDH_MNG_CREATE)
+ * gets TDX_RND_NO_ENTROPY due to Random number generation (e.g., RDRAND or
+ * RDSEED) is busy.
+ *
+ * Retry for the case.
+ */
+ do {
+ error_free(local_err);
+ local_err = NULL;
+ r = tdx_vm_ioctl(KVM_TDX_INIT_VM, 0, init_vm, &local_err);
+ } while (r == -EAGAIN && --retry);
+
+ if (r < 0) {
+ if (!retry) {
+ error_append_hint(&local_err, "Hardware RNG (Random Number "
+ "Generator) is busy occupied by someone (via RDRAND/RDSEED) "
+ "maliciously, which leads to KVM_TDX_INIT_VM keeping failure "
+ "due to lack of entropy.\n");
+ }
+ error_propagate(errp, local_err);
+ return r;
+ }
+
+ tdx_guest->initialized = true;
+
+ return 0;
+}
+
+int tdx_parse_tdvf(void *flash_ptr, int size)
+{
+ return tdvf_parse_metadata(&tdx_guest->tdvf, flash_ptr, size);
+}
+
+static void tdx_get_quote_completion(TdxGenerateQuoteTask *task)
+{
+ TdxGuest *tdx = task->opaque;
+ int ret;
+
+ /* Maintain the number of in-flight requests. */
+ qemu_mutex_lock(&tdx->lock);
+ tdx->num--;
+ qemu_mutex_unlock(&tdx->lock);
+
+ if (task->status_code == TDX_VP_GET_QUOTE_SUCCESS) {
+ ret = address_space_write(&address_space_memory, task->payload_gpa,
+ MEMTXATTRS_UNSPECIFIED, task->receive_buf,
+ task->receive_buf_received);
+ if (ret != MEMTX_OK) {
+ error_report("TDX: get-quote: failed to write quote data.");
+ } else {
+ task->hdr.out_len = cpu_to_le64(task->receive_buf_received);
+ }
+ }
+ task->hdr.error_code = cpu_to_le64(task->status_code);
+
+ /* Publish the response contents before marking this request completed. */
+ smp_wmb();
+ ret = address_space_write(&address_space_memory, task->buf_gpa,
+ MEMTXATTRS_UNSPECIFIED, &task->hdr,
+ TDX_GET_QUOTE_HDR_SIZE);
+ if (ret != MEMTX_OK) {
+ error_report("TDX: get-quote: failed to update GetQuote header.");
+ }
+
+ g_free(task->send_data);
+ g_free(task->receive_buf);
+ g_free(task);
+ object_unref(tdx);
+}
+
+void tdx_handle_get_quote(X86CPU *cpu, struct kvm_run *run)
+{
+ TdxGenerateQuoteTask *task;
+ struct tdx_get_quote_header hdr;
+ hwaddr buf_gpa = run->tdx.get_quote.gpa;
+ uint64_t buf_len = run->tdx.get_quote.size;
+
+ QEMU_BUILD_BUG_ON(sizeof(struct tdx_get_quote_header) != TDX_GET_QUOTE_HDR_SIZE);
+
+ run->tdx.get_quote.ret = TDG_VP_VMCALL_INVALID_OPERAND;
+
+ if (buf_len == 0) {
+ return;
+ }
+
+ if (!QEMU_IS_ALIGNED(buf_gpa, 4096) || !QEMU_IS_ALIGNED(buf_len, 4096)) {
+ run->tdx.get_quote.ret = TDG_VP_VMCALL_ALIGN_ERROR;
+ return;
+ }
+
+ if (address_space_read(&address_space_memory, buf_gpa, MEMTXATTRS_UNSPECIFIED,
+ &hdr, TDX_GET_QUOTE_HDR_SIZE) != MEMTX_OK) {
+ error_report("TDX: get-quote: failed to read GetQuote header.");
+ return;
+ }
+
+ if (le64_to_cpu(hdr.structure_version) != TDX_GET_QUOTE_STRUCTURE_VERSION) {
+ return;
+ }
+
+ /* Only safe-guard check to avoid too large buffer size. */
+ if (buf_len > TDX_GET_QUOTE_MAX_BUF_LEN ||
+ le32_to_cpu(hdr.in_len) > buf_len - TDX_GET_QUOTE_HDR_SIZE) {
+ return;
+ }
+
+ if (!tdx_guest->qg_sock_addr) {
+ hdr.error_code = cpu_to_le64(TDX_VP_GET_QUOTE_QGS_UNAVAILABLE);
+ if (address_space_write(&address_space_memory, buf_gpa,
+ MEMTXATTRS_UNSPECIFIED,
+ &hdr, TDX_GET_QUOTE_HDR_SIZE) != MEMTX_OK) {
+ error_report("TDX: failed to update GetQuote header.");
+ return;
+ }
+ run->tdx.get_quote.ret = TDG_VP_VMCALL_SUCCESS;
+ return;
+ }
+
+ qemu_mutex_lock(&tdx_guest->lock);
+ if (tdx_guest->num >= TDX_MAX_GET_QUOTE_REQUEST) {
+ qemu_mutex_unlock(&tdx_guest->lock);
+ run->tdx.get_quote.ret = TDG_VP_VMCALL_RETRY;
+ return;
+ }
+ tdx_guest->num++;
+ qemu_mutex_unlock(&tdx_guest->lock);
+
+ task = g_new(TdxGenerateQuoteTask, 1);
+ task->buf_gpa = buf_gpa;
+ task->payload_gpa = buf_gpa + TDX_GET_QUOTE_HDR_SIZE;
+ task->payload_len = buf_len - TDX_GET_QUOTE_HDR_SIZE;
+ task->hdr = hdr;
+ task->completion = tdx_get_quote_completion;
+
+ task->send_data_size = le32_to_cpu(hdr.in_len);
+ task->send_data = g_malloc(task->send_data_size);
+ task->send_data_sent = 0;
+
+ if (address_space_read(&address_space_memory, task->payload_gpa,
+ MEMTXATTRS_UNSPECIFIED, task->send_data,
+ task->send_data_size) != MEMTX_OK) {
+ goto out_free;
+ }
+
+ /* Mark the buffer in-flight. */
+ hdr.error_code = cpu_to_le64(TDX_VP_GET_QUOTE_IN_FLIGHT);
+ if (address_space_write(&address_space_memory, buf_gpa,
+ MEMTXATTRS_UNSPECIFIED,
+ &hdr, TDX_GET_QUOTE_HDR_SIZE) != MEMTX_OK) {
+ goto out_free;
+ }
+
+ task->receive_buf = g_malloc0(task->payload_len);
+ task->receive_buf_received = 0;
+ task->opaque = tdx_guest;
+
+ object_ref(tdx_guest);
+ tdx_generate_quote(task, tdx_guest->qg_sock_addr);
+ run->tdx.get_quote.ret = TDG_VP_VMCALL_SUCCESS;
+ return;
+
+out_free:
+ g_free(task->send_data);
+ g_free(task);
+}
+
+void tdx_handle_get_tdvmcall_info(X86CPU *cpu, struct kvm_run *run)
+{
+ if (run->tdx.get_tdvmcall_info.leaf != 1) {
+ return;
+ }
+
+ run->tdx.get_tdvmcall_info.r11 = TDG_VP_VMCALL_SUBFUNC_GET_QUOTE;
+ run->tdx.get_tdvmcall_info.r12 = 0;
+ run->tdx.get_tdvmcall_info.r13 = 0;
+ run->tdx.get_tdvmcall_info.r14 = 0;
+}
+
+static void tdx_panicked_on_fatal_error(X86CPU *cpu, uint64_t error_code,
+ char *message, uint64_t gpa)
+{
+ GuestPanicInformation *panic_info;
+
+ panic_info = g_new0(GuestPanicInformation, 1);
+ panic_info->type = GUEST_PANIC_INFORMATION_TYPE_TDX;
+ panic_info->u.tdx.error_code = (uint32_t) error_code;
+ panic_info->u.tdx.message = message;
+ panic_info->u.tdx.gpa = gpa;
+
+ qemu_system_guest_panicked(panic_info);
+}
+
+/*
+ * Only 8 registers can contain valid ASCII byte stream to form the fatal
+ * message, and their sequence is: R14, R15, RBX, RDI, RSI, R8, R9, RDX
+ */
+#define TDX_FATAL_MESSAGE_MAX 64
+
+#define TDX_REPORT_FATAL_ERROR_GPA_VALID BIT_ULL(63)
+
+int tdx_handle_report_fatal_error(X86CPU *cpu, struct kvm_run *run)
+{
+ uint64_t error_code = run->system_event.data[R_R12];
+ uint64_t reg_mask = run->system_event.data[R_ECX];
+ char *message = NULL;
+ uint64_t *tmp;
+ uint64_t gpa = -1ull;
+
+ if (error_code & 0xffff) {
+ error_report("TDX: REPORT_FATAL_ERROR: invalid error code: 0x%"PRIx64,
+ error_code);
+ return -1;
+ }
+
+ if (reg_mask) {
+ message = g_malloc0(TDX_FATAL_MESSAGE_MAX + 1);
+ tmp = (uint64_t *)message;
+
+#define COPY_REG(REG) \
+ do { \
+ if (reg_mask & BIT_ULL(REG)) { \
+ *(tmp++) = run->system_event.data[REG]; \
+ } \
+ } while (0)
+
+ COPY_REG(R_R14);
+ COPY_REG(R_R15);
+ COPY_REG(R_EBX);
+ COPY_REG(R_EDI);
+ COPY_REG(R_ESI);
+ COPY_REG(R_R8);
+ COPY_REG(R_R9);
+ COPY_REG(R_EDX);
+ *((char *)tmp) = '\0';
+ }
+#undef COPY_REG
+
+ if (error_code & TDX_REPORT_FATAL_ERROR_GPA_VALID) {
+ gpa = run->system_event.data[R_R13];
+ }
+
+ tdx_panicked_on_fatal_error(cpu, error_code, message, gpa);
+
+ return -1;
+}
+
+static bool tdx_guest_get_sept_ve_disable(Object *obj, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ return !!(tdx->attributes & TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE);
+}
+
+static void tdx_guest_set_sept_ve_disable(Object *obj, bool value, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ if (value) {
+ tdx->attributes |= TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
+ } else {
+ tdx->attributes &= ~TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
+ }
+}
+
+static char *tdx_guest_get_mrconfigid(Object *obj, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ return g_strdup(tdx->mrconfigid);
+}
+
+static void tdx_guest_set_mrconfigid(Object *obj, const char *value, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ g_free(tdx->mrconfigid);
+ tdx->mrconfigid = g_strdup(value);
+}
+
+static char *tdx_guest_get_mrowner(Object *obj, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ return g_strdup(tdx->mrowner);
+}
+
+static void tdx_guest_set_mrowner(Object *obj, const char *value, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ g_free(tdx->mrowner);
+ tdx->mrowner = g_strdup(value);
+}
+
+static char *tdx_guest_get_mrownerconfig(Object *obj, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ return g_strdup(tdx->mrownerconfig);
+}
+
+static void tdx_guest_set_mrownerconfig(Object *obj, const char *value, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ g_free(tdx->mrownerconfig);
+ tdx->mrownerconfig = g_strdup(value);
+}
+
+static void tdx_guest_get_qgs(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ if (!tdx->qg_sock_addr) {
+ error_setg(errp, "quote-generation-socket is not set");
+ return;
+ }
+ visit_type_SocketAddress(v, name, &tdx->qg_sock_addr, errp);
+}
+
+static void tdx_guest_set_qgs(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+ SocketAddress *sock = NULL;
+
+ if (!visit_type_SocketAddress(v, name, &sock, errp)) {
+ return;
+ }
+
+ if (tdx->qg_sock_addr) {
+ qapi_free_SocketAddress(tdx->qg_sock_addr);
+ }
+
+ tdx->qg_sock_addr = sock;
+}
+
+/* tdx guest */
+OBJECT_DEFINE_TYPE_WITH_INTERFACES(TdxGuest,
+ tdx_guest,
+ TDX_GUEST,
+ X86_CONFIDENTIAL_GUEST,
+ { TYPE_USER_CREATABLE },
+ { NULL })
+
+static void tdx_guest_init(Object *obj)
+{
+ ConfidentialGuestSupport *cgs = CONFIDENTIAL_GUEST_SUPPORT(obj);
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ qemu_mutex_init(&tdx->lock);
+
+ cgs->require_guest_memfd = true;
+ tdx->attributes = TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
+
+ object_property_add_uint64_ptr(obj, "attributes", &tdx->attributes,
+ OBJ_PROP_FLAG_READWRITE);
+ object_property_add_bool(obj, "sept-ve-disable",
+ tdx_guest_get_sept_ve_disable,
+ tdx_guest_set_sept_ve_disable);
+ object_property_add_str(obj, "mrconfigid",
+ tdx_guest_get_mrconfigid,
+ tdx_guest_set_mrconfigid);
+ object_property_add_str(obj, "mrowner",
+ tdx_guest_get_mrowner, tdx_guest_set_mrowner);
+ object_property_add_str(obj, "mrownerconfig",
+ tdx_guest_get_mrownerconfig,
+ tdx_guest_set_mrownerconfig);
+
+ object_property_add(obj, "quote-generation-socket", "SocketAddress",
+ tdx_guest_get_qgs,
+ tdx_guest_set_qgs,
+ NULL, NULL);
+
+ qemu_mutex_init(&tdx->lock);
+}
+
+static void tdx_guest_finalize(Object *obj)
+{
+}
+
+static void tdx_guest_class_init(ObjectClass *oc, const void *data)
+{
+ ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc);
+ X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc);
+
+ klass->kvm_init = tdx_kvm_init;
+ x86_klass->kvm_type = tdx_kvm_type;
+ x86_klass->cpu_instance_init = tdx_cpu_instance_init;
+ x86_klass->adjust_cpuid_features = tdx_adjust_cpuid_features;
+ x86_klass->check_features = tdx_check_features;
+}
diff --git a/target/i386/kvm/tdx.h b/target/i386/kvm/tdx.h
new file mode 100644
index 0000000..35a09c1
--- /dev/null
+++ b/target/i386/kvm/tdx.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef QEMU_I386_TDX_H
+#define QEMU_I386_TDX_H
+
+#ifndef CONFIG_USER_ONLY
+#include CONFIG_DEVICES /* CONFIG_TDX */
+#endif
+
+#include "confidential-guest.h"
+#include "cpu.h"
+#include "hw/i386/tdvf.h"
+
+#include "tdx-quote-generator.h"
+
+#define TYPE_TDX_GUEST "tdx-guest"
+#define TDX_GUEST(obj) OBJECT_CHECK(TdxGuest, (obj), TYPE_TDX_GUEST)
+
+typedef struct TdxGuestClass {
+ X86ConfidentialGuestClass parent_class;
+} TdxGuestClass;
+
+/* TDX requires bus frequency 25MHz */
+#define TDX_APIC_BUS_CYCLES_NS 40
+
+#define TDVMCALL_GET_TD_VM_CALL_INFO 0x10000
+#define TDVMCALL_GET_QUOTE 0x10002
+
+#define TDG_VP_VMCALL_SUCCESS 0x0000000000000000ULL
+#define TDG_VP_VMCALL_RETRY 0x0000000000000001ULL
+#define TDG_VP_VMCALL_INVALID_OPERAND 0x8000000000000000ULL
+#define TDG_VP_VMCALL_GPA_INUSE 0x8000000000000001ULL
+#define TDG_VP_VMCALL_ALIGN_ERROR 0x8000000000000002ULL
+
+#define TDG_VP_VMCALL_SUBFUNC_GET_QUOTE 0x0000000000000001ULL
+
+enum TdxRamType {
+ TDX_RAM_UNACCEPTED,
+ TDX_RAM_ADDED,
+};
+
+typedef struct TdxRamEntry {
+ uint64_t address;
+ uint64_t length;
+ enum TdxRamType type;
+} TdxRamEntry;
+
+typedef struct TdxGuest {
+ X86ConfidentialGuest parent_obj;
+
+ QemuMutex lock;
+
+ bool initialized;
+ uint64_t attributes; /* TD attributes */
+ uint64_t xfam;
+ char *mrconfigid; /* base64 encoded sha384 digest */
+ char *mrowner; /* base64 encoded sha384 digest */
+ char *mrownerconfig; /* base64 encoded sha384 digest */
+
+ MemoryRegion *tdvf_mr;
+ TdxFirmware tdvf;
+
+ uint32_t nr_ram_entries;
+ TdxRamEntry *ram_entries;
+
+ /* GetQuote */
+ SocketAddress *qg_sock_addr;
+ int num;
+} TdxGuest;
+
+#ifdef CONFIG_TDX
+bool is_tdx_vm(void);
+#else
+#define is_tdx_vm() 0
+#endif /* CONFIG_TDX */
+
+int tdx_pre_create_vcpu(CPUState *cpu, Error **errp);
+void tdx_set_tdvf_region(MemoryRegion *tdvf_mr);
+int tdx_parse_tdvf(void *flash_ptr, int size);
+int tdx_handle_report_fatal_error(X86CPU *cpu, struct kvm_run *run);
+void tdx_handle_get_quote(X86CPU *cpu, struct kvm_run *run);
+void tdx_handle_get_tdvmcall_info(X86CPU *cpu, struct kvm_run *run);
+
+#endif /* QEMU_I386_TDX_H */
diff --git a/target/i386/kvm/vmsr_energy.c b/target/i386/kvm/vmsr_energy.c
new file mode 100644
index 0000000..d6aad52
--- /dev/null
+++ b/target/i386/kvm/vmsr_energy.c
@@ -0,0 +1,344 @@
+/*
+ * QEMU KVM support -- x86 virtual RAPL msr
+ *
+ * Copyright 2024 Red Hat, Inc. 2024
+ *
+ * Author:
+ * Anthony Harivel <aharivel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "vmsr_energy.h"
+#include "io/channel.h"
+#include "io/channel-socket.h"
+#include "hw/boards.h"
+#include "cpu.h"
+#include "host-cpu.h"
+
+char *vmsr_compute_default_paths(void)
+{
+ g_autofree char *state = qemu_get_local_state_dir();
+
+ return g_build_filename(state, "run", "qemu-vmsr-helper.sock", NULL);
+}
+
+bool is_host_cpu_intel(void)
+{
+ char vendor[CPUID_VENDOR_SZ + 1];
+
+ host_cpu_vendor_fms(vendor, NULL, NULL, NULL);
+
+ return g_str_equal(vendor, CPUID_VENDOR_INTEL);
+}
+
+int is_rapl_enabled(void)
+{
+ const char *path = "/sys/class/powercap/intel-rapl/enabled";
+ FILE *file = fopen(path, "r");
+ int value = 0;
+
+ if (file != NULL) {
+ if (fscanf(file, "%d", &value) != 1) {
+ error_report("INTEL RAPL not enabled");
+ }
+ fclose(file);
+ } else {
+ error_report("Error opening %s", path);
+ }
+
+ return value;
+}
+
+QIOChannelSocket *vmsr_open_socket(const char *path)
+{
+ g_autofree char *socket_path = NULL;
+
+ socket_path = g_strdup(path);
+
+ SocketAddress saddr = {
+ .type = SOCKET_ADDRESS_TYPE_UNIX,
+ .u.q_unix.path = socket_path
+ };
+
+ QIOChannelSocket *sioc = qio_channel_socket_new();
+ Error *local_err = NULL;
+
+ qio_channel_set_name(QIO_CHANNEL(sioc), "vmsr-helper");
+ qio_channel_socket_connect_sync(sioc,
+ &saddr,
+ &local_err);
+ if (local_err) {
+ /* Close socket. */
+ qio_channel_close(QIO_CHANNEL(sioc), NULL);
+ object_unref(OBJECT(sioc));
+ sioc = NULL;
+ goto out;
+ }
+
+ qio_channel_set_delay(QIO_CHANNEL(sioc), false);
+out:
+ return sioc;
+}
+
+uint64_t vmsr_read_msr(uint32_t reg, uint32_t cpu_id, uint32_t tid,
+ QIOChannelSocket *sioc)
+{
+ uint64_t data = 0;
+ int r = 0;
+ Error *local_err = NULL;
+ uint32_t buffer[3];
+ /*
+ * Send the required arguments:
+ * 1. RAPL MSR register to read
+ * 2. On which CPU ID
+ * 3. From which vCPU (Thread ID)
+ */
+ buffer[0] = reg;
+ buffer[1] = cpu_id;
+ buffer[2] = tid;
+
+ r = qio_channel_write_all(QIO_CHANNEL(sioc),
+ (char *)buffer, sizeof(buffer),
+ &local_err);
+ if (r < 0) {
+ goto out_close;
+ }
+
+ r = qio_channel_read(QIO_CHANNEL(sioc),
+ (char *)&data, sizeof(data),
+ &local_err);
+ if (r < 0) {
+ data = 0;
+ goto out_close;
+ }
+
+out_close:
+ return data;
+}
+
+/* Retrieve the max number of physical package */
+unsigned int vmsr_get_max_physical_package(unsigned int max_cpus)
+{
+ const char *dir = "/sys/devices/system/cpu/";
+ const char *topo_path = "topology/physical_package_id";
+ g_autofree int *uniquePackages = g_new0(int, max_cpus);
+ unsigned int packageCount = 0;
+ FILE *file = NULL;
+
+ for (int i = 0; i < max_cpus; i++) {
+ g_autofree char *filePath = NULL;
+ g_autofree char *cpuid = g_strdup_printf("cpu%d", i);
+
+ filePath = g_build_filename(dir, cpuid, topo_path, NULL);
+
+ file = fopen(filePath, "r");
+
+ if (file == NULL) {
+ error_report("Error opening physical_package_id file");
+ return 0;
+ }
+
+ char packageId[10];
+ if (fgets(packageId, sizeof(packageId), file) == NULL) {
+ packageCount = 0;
+ }
+
+ fclose(file);
+
+ int currentPackageId = atoi(packageId);
+
+ bool isUnique = true;
+ for (int j = 0; j < packageCount; j++) {
+ if (uniquePackages[j] == currentPackageId) {
+ isUnique = false;
+ break;
+ }
+ }
+
+ if (isUnique) {
+ uniquePackages[packageCount] = currentPackageId;
+ packageCount++;
+
+ if (packageCount >= max_cpus) {
+ break;
+ }
+ }
+ }
+
+ return (packageCount == 0) ? 1 : packageCount;
+}
+
+/* Retrieve the max number of physical cpu on the host */
+unsigned int vmsr_get_maxcpus(void)
+{
+ GDir *dir;
+ const gchar *entry_name;
+ unsigned int cpu_count = 0;
+ const char *path = "/sys/devices/system/cpu/";
+
+ dir = g_dir_open(path, 0, NULL);
+ if (dir == NULL) {
+ error_report("Unable to open cpu directory");
+ return -1;
+ }
+
+ while ((entry_name = g_dir_read_name(dir)) != NULL) {
+ if (g_ascii_strncasecmp(entry_name, "cpu", 3) == 0 &&
+ isdigit(entry_name[3])) {
+ cpu_count++;
+ }
+ }
+
+ g_dir_close(dir);
+
+ return cpu_count;
+}
+
+/* Count the number of physical cpu on each packages */
+unsigned int vmsr_count_cpus_per_package(unsigned int *package_count,
+ unsigned int max_pkgs)
+{
+ g_autofree char *file_contents = NULL;
+ g_autofree char *path = NULL;
+ g_autofree char *path_name = NULL;
+ gsize length;
+
+ /* Iterate over cpus and count cpus in each package */
+ for (int cpu_id = 0; ; cpu_id++) {
+ path_name = g_strdup_printf("/sys/devices/system/cpu/cpu%d/"
+ "topology/physical_package_id", cpu_id);
+
+ path = g_build_filename(path_name, NULL);
+
+ if (!g_file_get_contents(path, &file_contents, &length, NULL)) {
+ break; /* No more cpus */
+ }
+
+ /* Get the physical package ID for this CPU */
+ int package_id = atoi(file_contents);
+
+ /* Check if the package ID is within the known number of packages */
+ if (package_id >= 0 && package_id < max_pkgs) {
+ /* If yes, count the cpu for this package*/
+ package_count[package_id]++;
+ }
+ }
+
+ return 0;
+}
+
+/* Get the physical package id from a given cpu id */
+int vmsr_get_physical_package_id(int cpu_id)
+{
+ g_autofree char *file_contents = NULL;
+ g_autofree char *file_path = NULL;
+ int package_id = -1;
+ gsize length;
+
+ file_path = g_strdup_printf("/sys/devices/system/cpu/cpu%d"
+ "/topology/physical_package_id", cpu_id);
+
+ if (!g_file_get_contents(file_path, &file_contents, &length, NULL)) {
+ goto out;
+ }
+
+ package_id = atoi(file_contents);
+
+out:
+ return package_id;
+}
+
+/* Read the scheduled time for a given thread of a give pid */
+void vmsr_read_thread_stat(pid_t pid,
+ unsigned int thread_id,
+ unsigned long long *utime,
+ unsigned long long *stime,
+ unsigned int *cpu_id)
+{
+ g_autofree char *path = NULL;
+ g_autofree char *path_name = NULL;
+
+ path_name = g_strdup_printf("/proc/%u/task/%d/stat", pid, thread_id);
+
+ path = g_build_filename(path_name, NULL);
+
+ FILE *file = fopen(path, "r");
+ if (file == NULL) {
+ error_report("Error opening %s", path_name);
+ return;
+ }
+
+ if (fscanf(file, "%*d (%*[^)]) %*c %*d %*d %*d %*d %*d %*u %*u %*u %*u %*u"
+ " %llu %llu %*d %*d %*d %*d %*d %*d %*u %*u %*d %*u %*u"
+ " %*u %*u %*u %*u %*u %*u %*u %*u %*u %*d %*u %*u %u",
+ utime, stime, cpu_id) != 3)
+ {
+ fclose(file);
+ error_report("Error fscanf did not report the right amount of items");
+ return;
+ }
+
+ fclose(file);
+}
+
+/* Read QEMU stat task folder to retrieve all QEMU threads ID */
+pid_t *vmsr_get_thread_ids(pid_t pid, unsigned int *num_threads)
+{
+ g_autofree char *task_path = g_strdup_printf("%d/task", pid);
+ g_autofree char *path = g_build_filename("/proc", task_path, NULL);
+
+ DIR *dir = opendir(path);
+ if (dir == NULL) {
+ error_report("Error opening /proc/qemu/task");
+ return NULL;
+ }
+
+ pid_t *thread_ids = NULL;
+ unsigned int thread_count = 0;
+
+ g_autofree struct dirent *ent = NULL;
+ while ((ent = readdir(dir)) != NULL) {
+ if (ent->d_name[0] == '.') {
+ continue;
+ }
+ pid_t tid = atoi(ent->d_name);
+ if (pid != tid) {
+ thread_ids = g_renew(pid_t, thread_ids, (thread_count + 1));
+ thread_ids[thread_count] = tid;
+ thread_count++;
+ }
+ }
+
+ closedir(dir);
+
+ *num_threads = thread_count;
+ return thread_ids;
+}
+
+void vmsr_delta_ticks(vmsr_thread_stat *thd_stat, int i)
+{
+ thd_stat[i].delta_ticks = (thd_stat[i].utime[1] + thd_stat[i].stime[1])
+ - (thd_stat[i].utime[0] + thd_stat[i].stime[0]);
+}
+
+double vmsr_get_ratio(uint64_t e_delta,
+ unsigned long long delta_ticks,
+ unsigned int maxticks)
+{
+ return (e_delta / 100.0) * ((100.0 / maxticks) * delta_ticks);
+}
+
+void vmsr_init_topo_info(X86CPUTopoInfo *topo_info,
+ const MachineState *ms)
+{
+ topo_info->dies_per_pkg = ms->smp.dies;
+ topo_info->modules_per_die = ms->smp.modules;
+ topo_info->cores_per_module = ms->smp.cores;
+ topo_info->threads_per_core = ms->smp.threads;
+}
+
diff --git a/target/i386/kvm/vmsr_energy.h b/target/i386/kvm/vmsr_energy.h
new file mode 100644
index 0000000..16cc1f4
--- /dev/null
+++ b/target/i386/kvm/vmsr_energy.h
@@ -0,0 +1,99 @@
+/*
+ * QEMU KVM support -- x86 virtual energy-related MSR.
+ *
+ * Copyright 2024 Red Hat, Inc. 2024
+ *
+ * Author:
+ * Anthony Harivel <aharivel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef VMSR_ENERGY_H
+#define VMSR_ENERGY_H
+
+#include <stdint.h>
+#include "qemu/osdep.h"
+#include "io/channel-socket.h"
+#include "hw/i386/topology.h"
+
+/*
+ * Define the interval time in micro seconds between 2 samples of
+ * energy related MSRs
+ */
+#define MSR_ENERGY_THREAD_SLEEP_US 1000000.0
+
+/*
+ * Thread statistic
+ * @ thread_id: TID (thread ID)
+ * @ is_vcpu: true if TID is vCPU thread
+ * @ cpu_id: CPU number last executed on
+ * @ pkg_id: package number of the CPU
+ * @ vcpu_id: vCPU ID
+ * @ vpkg: virtual package number
+ * @ acpi_id: APIC id of the vCPU
+ * @ utime: amount of clock ticks the thread
+ * has been scheduled in User mode
+ * @ stime: amount of clock ticks the thread
+ * has been scheduled in System mode
+ * @ delta_ticks: delta of utime+stime between
+ * the two samples (before/after sleep)
+ */
+struct vmsr_thread_stat {
+ unsigned int thread_id;
+ bool is_vcpu;
+ unsigned int cpu_id;
+ unsigned int pkg_id;
+ unsigned int vpkg_id;
+ unsigned int vcpu_id;
+ unsigned long acpi_id;
+ unsigned long long *utime;
+ unsigned long long *stime;
+ unsigned long long delta_ticks;
+};
+
+/*
+ * Package statistic
+ * @ e_start: package energy counter before the sleep
+ * @ e_end: package energy counter after the sleep
+ * @ e_delta: delta of package energy counter
+ * @ e_ratio: store the energy ratio of non-vCPU thread
+ * @ nb_vcpu: number of vCPU running on this package
+ */
+struct vmsr_package_energy_stat {
+ uint64_t e_start;
+ uint64_t e_end;
+ uint64_t e_delta;
+ uint64_t e_ratio;
+ unsigned int nb_vcpu;
+};
+
+typedef struct vmsr_thread_stat vmsr_thread_stat;
+typedef struct vmsr_package_energy_stat vmsr_package_energy_stat;
+
+char *vmsr_compute_default_paths(void);
+void vmsr_read_thread_stat(pid_t pid,
+ unsigned int thread_id,
+ unsigned long long *utime,
+ unsigned long long *stime,
+ unsigned int *cpu_id);
+
+QIOChannelSocket *vmsr_open_socket(const char *path);
+uint64_t vmsr_read_msr(uint32_t reg, uint32_t cpu_id,
+ uint32_t tid, QIOChannelSocket *sioc);
+void vmsr_delta_ticks(vmsr_thread_stat *thd_stat, int i);
+unsigned int vmsr_get_maxcpus(void);
+unsigned int vmsr_get_max_physical_package(unsigned int max_cpus);
+unsigned int vmsr_count_cpus_per_package(unsigned int *package_count,
+ unsigned int max_pkgs);
+int vmsr_get_physical_package_id(int cpu_id);
+pid_t *vmsr_get_thread_ids(pid_t pid, unsigned int *num_threads);
+double vmsr_get_ratio(uint64_t e_delta,
+ unsigned long long delta_ticks,
+ unsigned int maxticks);
+void vmsr_init_topo_info(X86CPUTopoInfo *topo_info, const MachineState *ms);
+bool is_host_cpu_intel(void);
+int is_rapl_enabled(void);
+#endif /* VMSR_ENERGY_H */
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index 2f89dc6..284c5ef 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -13,14 +13,15 @@
#include "qemu/log.h"
#include "qemu/main-loop.h"
#include "qemu/error-report.h"
+#include "exec/target_page.h"
#include "hw/xen/xen.h"
-#include "sysemu/kvm_int.h"
-#include "sysemu/kvm_xen.h"
+#include "system/kvm_int.h"
+#include "system/kvm_xen.h"
#include "kvm/kvm_i386.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "xen-emu.h"
#include "trace.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/pci/msi.h"
#include "hw/i386/apic-msidef.h"
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 39f8294..dd2dac1 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -1,16 +1,16 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "hw/isa/isa.h"
#include "migration/cpu.h"
#include "kvm/hyperv.h"
#include "hw/i386/x86.h"
#include "kvm/kvm_i386.h"
#include "hw/xen/xen.h"
-
-#include "sysemu/kvm.h"
-#include "sysemu/kvm_xen.h"
-#include "sysemu/tcg.h"
+#include "exec/watchpoint.h"
+#include "system/kvm.h"
+#include "system/kvm_xen.h"
+#include "system/tcg.h"
#include "qemu/error-report.h"
@@ -1060,9 +1060,8 @@ static bool tsc_khz_needed(void *opaque)
{
X86CPU *cpu = opaque;
CPUX86State *env = &cpu->env;
- MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
- X86MachineClass *x86mc = X86_MACHINE_CLASS(mc);
- return env->tsc_khz && x86mc->save_tsc_khz;
+
+ return env->tsc_khz;
}
static const VMStateDescription vmstate_tsc_khz = {
@@ -1543,6 +1542,25 @@ static const VMStateDescription vmstate_msr_xfd = {
}
};
+static bool msr_hwcr_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_hwcr != 0;
+}
+
+static const VMStateDescription vmstate_msr_hwcr = {
+ .name = "cpu/msr_hwcr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = msr_hwcr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hwcr, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
#ifdef TARGET_X86_64
static bool intel_fred_msrs_needed(void *opaque)
{
@@ -1773,6 +1791,7 @@ const VMStateDescription vmstate_x86_cpu = {
&vmstate_msr_intel_sgx,
&vmstate_pdptrs,
&vmstate_msr_xfd,
+ &vmstate_msr_hwcr,
#ifdef TARGET_X86_64
&vmstate_msr_fred,
&vmstate_amx_xtile,
diff --git a/target/i386/meson.build b/target/i386/meson.build
index 0751179..c1aacea 100644
--- a/target/i386/meson.build
+++ b/target/i386/meson.build
@@ -19,9 +19,10 @@ i386_system_ss.add(files(
'machine.c',
'monitor.c',
'cpu-apic.c',
- 'cpu-sysemu.c',
+ 'cpu-system.c',
))
-i386_system_ss.add(when: 'CONFIG_SEV', if_true: files('sev.c'), if_false: files('sev-sysemu-stub.c'))
+i386_system_ss.add(when: 'CONFIG_SEV', if_true: files('sev.c'),
+ if_false: files('sev-system-stub.c'))
i386_user_ss = ss.source_set()
@@ -30,6 +31,7 @@ subdir('whpx')
subdir('nvmm')
subdir('hvf')
subdir('tcg')
+subdir('emulate')
target_arch += {'i386': i386_ss}
target_system_arch += {'i386': i386_system_ss}
diff --git a/target/i386/monitor.c b/target/i386/monitor.c
index 2d766b2..3c9b6ca 100644
--- a/target/i386/monitor.c
+++ b/target/i386/monitor.c
@@ -27,9 +27,8 @@
#include "monitor/monitor.h"
#include "monitor/hmp-target.h"
#include "monitor/hmp.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-misc-target.h"
#include "qapi/qapi-commands-misc.h"
/* Perform linear address sign extension */
diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c
index 0ba3120..2144307 100644
--- a/target/i386/nvmm/nvmm-accel-ops.c
+++ b/target/i386/nvmm/nvmm-accel-ops.c
@@ -8,12 +8,13 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/kvm_int.h"
+#include "system/kvm_int.h"
#include "qemu/main-loop.h"
-#include "sysemu/cpus.h"
+#include "system/accel-ops.h"
+#include "system/cpus.h"
#include "qemu/guest-random.h"
-#include "sysemu/nvmm.h"
+#include "system/nvmm.h"
#include "nvmm-accel-ops.h"
static void *qemu_nvmm_cpu_thread_fn(void *arg)
@@ -80,7 +81,7 @@ static void nvmm_kick_vcpu_thread(CPUState *cpu)
cpus_kick_thread(cpu);
}
-static void nvmm_accel_ops_class_init(ObjectClass *oc, void *data)
+static void nvmm_accel_ops_class_init(ObjectClass *oc, const void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
diff --git a/target/i386/nvmm/nvmm-accel-ops.h b/target/i386/nvmm/nvmm-accel-ops.h
index 7c5461b..931bb5c 100644
--- a/target/i386/nvmm/nvmm-accel-ops.h
+++ b/target/i386/nvmm/nvmm-accel-ops.h
@@ -10,7 +10,7 @@
#ifndef TARGET_I386_NVMM_ACCEL_OPS_H
#define TARGET_I386_NVMM_ACCEL_OPS_H
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
int nvmm_init_vcpu(CPUState *cpu);
int nvmm_vcpu_exec(CPUState *cpu);
diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c
index 65768ac..f1c6120 100644
--- a/target/i386/nvmm/nvmm-all.c
+++ b/target/i386/nvmm/nvmm-all.c
@@ -9,12 +9,12 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/address-spaces.h"
-#include "exec/ioport.h"
+#include "system/address-spaces.h"
+#include "system/ioport.h"
#include "qemu/accel.h"
-#include "sysemu/nvmm.h"
-#include "sysemu/cpus.h"
-#include "sysemu/runstate.h"
+#include "system/nvmm.h"
+#include "system/cpus.h"
+#include "system/runstate.h"
#include "qemu/main-loop.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
@@ -1200,7 +1200,7 @@ nvmm_enabled(void)
}
static void
-nvmm_accel_class_init(ObjectClass *oc, void *data)
+nvmm_accel_class_init(ObjectClass *oc, const void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "NVMM";
diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h
index f0aa189..a2e4d48 100644
--- a/target/i386/ops_sse.h
+++ b/target/i386/ops_sse.h
@@ -842,7 +842,7 @@ int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s)
void glue(helper_rsqrtps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
int i;
for (i = 0; i < 2 << SHIFT; i++) {
d->ZMM_S(i) = float32_div(float32_one,
@@ -855,7 +855,7 @@ void glue(helper_rsqrtps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s)
#if SHIFT == 1
void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
int i;
d->ZMM_S(0) = float32_div(float32_one,
float32_sqrt(s->ZMM_S(0), &env->sse_status),
@@ -869,7 +869,7 @@ void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s)
void glue(helper_rcpps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
int i;
for (i = 0; i < 2 << SHIFT; i++) {
d->ZMM_S(i) = float32_div(float32_one, s->ZMM_S(i), &env->sse_status);
@@ -880,7 +880,7 @@ void glue(helper_rcpps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s)
#if SHIFT == 1
void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
int i;
d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
for (i = 1; i < 2 << SHIFT; i++) {
@@ -1714,7 +1714,7 @@ void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
uint32_t mode)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
signed char prev_rounding_mode;
int i;
@@ -1738,7 +1738,7 @@ void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
uint32_t mode)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
signed char prev_rounding_mode;
int i;
@@ -1763,7 +1763,7 @@ void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s,
uint32_t mode)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
signed char prev_rounding_mode;
int i;
@@ -1788,7 +1788,7 @@ void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s,
void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s,
uint32_t mode)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
signed char prev_rounding_mode;
int i;
diff --git a/target/i386/sev-sysemu-stub.c b/target/i386/sev-sysemu-stub.c
deleted file mode 100644
index d5bf886..0000000
--- a/target/i386/sev-sysemu-stub.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * QEMU SEV system stub
- *
- * Copyright Advanced Micro Devices 2018
- *
- * Authors:
- * Brijesh Singh <brijesh.singh@amd.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#include "qemu/osdep.h"
-#include "monitor/monitor.h"
-#include "monitor/hmp-target.h"
-#include "qapi/qapi-commands-misc-target.h"
-#include "qapi/error.h"
-#include "sev.h"
-
-SevInfo *qmp_query_sev(Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
- return NULL;
-}
-
-SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
- return NULL;
-}
-
-SevCapability *qmp_query_sev_capabilities(Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
- return NULL;
-}
-
-void qmp_sev_inject_launch_secret(const char *packet_header, const char *secret,
- bool has_gpa, uint64_t gpa, Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
-}
-
-int sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp)
-{
- g_assert_not_reached();
-}
-
-void sev_es_set_reset_vector(CPUState *cpu)
-{
-}
-
-int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size)
-{
- g_assert_not_reached();
-}
-
-SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce,
- Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
- return NULL;
-}
-
-void hmp_info_sev(Monitor *mon, const QDict *qdict)
-{
- monitor_printf(mon, "SEV is not available in this QEMU\n");
-}
-
-void pc_system_parse_sev_metadata(uint8_t *flash_ptr, size_t flash_size)
-{
-}
diff --git a/target/i386/sev-system-stub.c b/target/i386/sev-system-stub.c
new file mode 100644
index 0000000..7c5c02a
--- /dev/null
+++ b/target/i386/sev-system-stub.c
@@ -0,0 +1,41 @@
+/*
+ * QEMU SEV system stub
+ *
+ * Copyright Advanced Micro Devices 2018
+ *
+ * Authors:
+ * Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "qapi/error.h"
+#include "sev.h"
+
+int sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+void sev_es_set_reset_vector(CPUState *cpu)
+{
+}
+
+int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size)
+{
+ g_assert_not_reached();
+}
+
+void hmp_info_sev(Monitor *mon, const QDict *qdict)
+{
+ monitor_printf(mon, "SEV is not available in this QEMU\n");
+}
+
+void pc_system_parse_sev_metadata(uint8_t *flash_ptr, size_t flash_size)
+{
+}
diff --git a/target/i386/sev.c b/target/i386/sev.c
index a1157c0..1a12f06 100644
--- a/target/i386/sev.c
+++ b/target/i386/sev.c
@@ -26,20 +26,21 @@
#include "qemu/uuid.h"
#include "qemu/error-report.h"
#include "crypto/hash.h"
-#include "sysemu/kvm.h"
+#include "exec/target_page.h"
+#include "system/kvm.h"
#include "kvm/kvm_i386.h"
#include "sev.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/runstate.h"
+#include "system/system.h"
+#include "system/runstate.h"
#include "trace.h"
#include "migration/blocker.h"
#include "qom/object.h"
#include "monitor/monitor.h"
#include "monitor/hmp-target.h"
-#include "qapi/qapi-commands-misc-target.h"
+#include "qapi/qapi-commands-misc-i386.h"
#include "confidential-guest.h"
#include "hw/i386/pc.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "qemu/queue.h"
OBJECT_DECLARE_TYPE(SevCommonState, SevCommonStateClass, SEV_COMMON)
@@ -211,14 +212,6 @@ static const char *const sev_fw_errlist[] = {
#define SEV_FW_MAX_ERROR ARRAY_SIZE(sev_fw_errlist)
-/* <linux/kvm.h> doesn't expose this, so re-use the max from kvm.c */
-#define KVM_MAX_CPUID_ENTRIES 100
-
-typedef struct KvmCpuidInfo {
- struct kvm_cpuid2 cpuid;
- struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
-} KvmCpuidInfo;
-
#define SNP_CPUID_FUNCTION_MAXCOUNT 64
#define SNP_CPUID_FUNCTION_UNKNOWN 0xFFFFFFFF
@@ -946,7 +939,7 @@ out:
}
static uint32_t
-sev_snp_mask_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index,
+sev_snp_adjust_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index,
int reg, uint32_t value)
{
switch (feature) {
@@ -1883,7 +1876,7 @@ static bool build_kernel_loader_hashes(PaddedSevHashTable *padded_ht,
* be used.
*/
hashp = cmdline_hash;
- if (qcrypto_hash_bytes(QCRYPTO_HASH_ALG_SHA256, ctx->cmdline_data,
+ if (qcrypto_hash_bytes(QCRYPTO_HASH_ALGO_SHA256, ctx->cmdline_data,
ctx->cmdline_size, &hashp, &hash_len, errp) < 0) {
return false;
}
@@ -1894,7 +1887,7 @@ static bool build_kernel_loader_hashes(PaddedSevHashTable *padded_ht,
* -initrd, an empty buffer will be used (ctx->initrd_size == 0).
*/
hashp = initrd_hash;
- if (qcrypto_hash_bytes(QCRYPTO_HASH_ALG_SHA256, ctx->initrd_data,
+ if (qcrypto_hash_bytes(QCRYPTO_HASH_ALGO_SHA256, ctx->initrd_data,
ctx->initrd_size, &hashp, &hash_len, errp) < 0) {
return false;
}
@@ -1906,7 +1899,7 @@ static bool build_kernel_loader_hashes(PaddedSevHashTable *padded_ht,
{ .iov_base = ctx->setup_data, .iov_len = ctx->setup_size },
{ .iov_base = ctx->kernel_data, .iov_len = ctx->kernel_size }
};
- if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALG_SHA256, iov, ARRAY_SIZE(iov),
+ if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALGO_SHA256, iov, ARRAY_SIZE(iov),
&hashp, &hash_len, errp) < 0) {
return false;
}
@@ -2045,7 +2038,7 @@ static void sev_common_set_kernel_hashes(Object *obj, bool value, Error **errp)
}
static void
-sev_common_class_init(ObjectClass *oc, void *data)
+sev_common_class_init(ObjectClass *oc, const void *data)
{
ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc);
@@ -2088,7 +2081,7 @@ static const TypeInfo sev_common_info = {
.class_size = sizeof(SevCommonStateClass),
.class_init = sev_common_class_init,
.abstract = true,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
@@ -2140,7 +2133,7 @@ static void sev_guest_set_legacy_vm_type(Object *obj, Visitor *v,
}
static void
-sev_guest_class_init(ObjectClass *oc, void *data)
+sev_guest_class_init(ObjectClass *oc, const void *data)
{
SevCommonStateClass *klass = SEV_COMMON_CLASS(oc);
X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc);
@@ -2394,7 +2387,7 @@ sev_snp_guest_set_host_data(Object *obj, const char *value, Error **errp)
}
static void
-sev_snp_guest_class_init(ObjectClass *oc, void *data)
+sev_snp_guest_class_init(ObjectClass *oc, const void *data)
{
SevCommonStateClass *klass = SEV_COMMON_CLASS(oc);
X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc);
@@ -2404,7 +2397,7 @@ sev_snp_guest_class_init(ObjectClass *oc, void *data)
klass->launch_finish = sev_snp_launch_finish;
klass->launch_update_data = sev_snp_launch_update_data;
klass->kvm_init = sev_snp_kvm_init;
- x86_klass->mask_cpuid_features = sev_snp_mask_cpuid_features;
+ x86_klass->adjust_cpuid_features = sev_snp_adjust_cpuid_features;
x86_klass->kvm_type = sev_snp_kvm_type;
object_class_property_add(oc, "policy", "uint64",
@@ -2422,7 +2415,7 @@ sev_snp_guest_class_init(ObjectClass *oc, void *data)
object_class_property_add_bool(oc, "author-key-enabled",
sev_snp_guest_get_author_key_enabled,
sev_snp_guest_set_author_key_enabled);
- object_class_property_add_bool(oc, "vcek-required",
+ object_class_property_add_bool(oc, "vcek-disabled",
sev_snp_guest_get_vcek_disabled,
sev_snp_guest_set_vcek_disabled);
object_class_property_add_str(oc, "host-data",
diff --git a/target/i386/sev.h b/target/i386/sev.h
index 858005a..373669e 100644
--- a/target/i386/sev.h
+++ b/target/i386/sev.h
@@ -18,7 +18,17 @@
#include CONFIG_DEVICES /* CONFIG_SEV */
#endif
-#include "exec/confidential-guest-support.h"
+#if !defined(CONFIG_SEV) || defined(CONFIG_USER_ONLY)
+#define sev_enabled() 0
+#define sev_es_enabled() 0
+#define sev_snp_enabled() 0
+#else
+bool sev_enabled(void);
+bool sev_es_enabled(void);
+bool sev_snp_enabled(void);
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
#define TYPE_SEV_COMMON "sev-common"
#define TYPE_SEV_GUEST "sev-guest"
@@ -45,18 +55,6 @@ typedef struct SevKernelLoaderContext {
size_t cmdline_size;
} SevKernelLoaderContext;
-#ifdef CONFIG_SEV
-bool sev_enabled(void);
-bool sev_es_enabled(void);
-bool sev_snp_enabled(void);
-#else
-#define sev_enabled() 0
-#define sev_es_enabled() 0
-#define sev_snp_enabled() 0
-#endif
-
-uint32_t sev_get_cbit_position(void);
-uint32_t sev_get_reduced_phys_bits(void);
bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp);
int sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp);
@@ -68,4 +66,9 @@ void sev_es_set_reset_vector(CPUState *cpu);
void pc_system_parse_sev_metadata(uint8_t *flash_ptr, size_t flash_size);
+#endif /* !CONFIG_USER_ONLY */
+
+uint32_t sev_get_cbit_position(void);
+uint32_t sev_get_reduced_phys_bits(void);
+
#endif
diff --git a/target/i386/tcg/access.c b/target/i386/tcg/access.c
index 56a1181..97e3f0e 100644
--- a/target/i386/tcg/access.c
+++ b/target/i386/tcg/access.c
@@ -3,8 +3,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/cpu_ldst.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
+#include "exec/target_page.h"
#include "access.h"
@@ -58,6 +59,11 @@ static void *access_ptr(X86Access *ac, vaddr addr, unsigned len)
assert(addr >= ac->vaddr);
+ /* No haddr means probe_access wants to force slow path */
+ if (!ac->haddr1) {
+ return NULL;
+ }
+
#ifdef CONFIG_USER_ONLY
assert(offset <= ac->size1 - len);
return ac->haddr1 + offset;
@@ -78,17 +84,11 @@ static void *access_ptr(X86Access *ac, vaddr addr, unsigned len)
#endif
}
-#ifdef CONFIG_USER_ONLY
-# define test_ptr(p) true
-#else
-# define test_ptr(p) likely(p)
-#endif
-
uint8_t access_ldb(X86Access *ac, vaddr addr)
{
void *p = access_ptr(ac, addr, sizeof(uint8_t));
- if (test_ptr(p)) {
+ if (likely(p)) {
return ldub_p(p);
}
return cpu_ldub_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
@@ -98,7 +98,7 @@ uint16_t access_ldw(X86Access *ac, vaddr addr)
{
void *p = access_ptr(ac, addr, sizeof(uint16_t));
- if (test_ptr(p)) {
+ if (likely(p)) {
return lduw_le_p(p);
}
return cpu_lduw_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
@@ -108,7 +108,7 @@ uint32_t access_ldl(X86Access *ac, vaddr addr)
{
void *p = access_ptr(ac, addr, sizeof(uint32_t));
- if (test_ptr(p)) {
+ if (likely(p)) {
return ldl_le_p(p);
}
return cpu_ldl_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
@@ -118,7 +118,7 @@ uint64_t access_ldq(X86Access *ac, vaddr addr)
{
void *p = access_ptr(ac, addr, sizeof(uint64_t));
- if (test_ptr(p)) {
+ if (likely(p)) {
return ldq_le_p(p);
}
return cpu_ldq_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
@@ -128,7 +128,7 @@ void access_stb(X86Access *ac, vaddr addr, uint8_t val)
{
void *p = access_ptr(ac, addr, sizeof(uint8_t));
- if (test_ptr(p)) {
+ if (likely(p)) {
stb_p(p, val);
} else {
cpu_stb_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
@@ -139,7 +139,7 @@ void access_stw(X86Access *ac, vaddr addr, uint16_t val)
{
void *p = access_ptr(ac, addr, sizeof(uint16_t));
- if (test_ptr(p)) {
+ if (likely(p)) {
stw_le_p(p, val);
} else {
cpu_stw_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
@@ -150,7 +150,7 @@ void access_stl(X86Access *ac, vaddr addr, uint32_t val)
{
void *p = access_ptr(ac, addr, sizeof(uint32_t));
- if (test_ptr(p)) {
+ if (likely(p)) {
stl_le_p(p, val);
} else {
cpu_stl_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
@@ -161,7 +161,7 @@ void access_stq(X86Access *ac, vaddr addr, uint64_t val)
{
void *p = access_ptr(ac, addr, sizeof(uint64_t));
- if (test_ptr(p)) {
+ if (likely(p)) {
stq_le_p(p, val);
} else {
cpu_stq_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
diff --git a/target/i386/tcg/cc_helper.c b/target/i386/tcg/cc_helper.c
index 301ed95..f1940b4 100644
--- a/target/i386/tcg/cc_helper.c
+++ b/target/i386/tcg/cc_helper.c
@@ -22,41 +22,6 @@
#include "exec/helper-proto.h"
#include "helper-tcg.h"
-const uint8_t parity_table[256] = {
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
-};
-
#define SHIFT 0
#include "cc_helper_template.h.inc"
#undef SHIFT
@@ -95,6 +60,19 @@ static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1,
return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O);
}
+target_ulong helper_cc_compute_nz(target_ulong dst, target_ulong src1,
+ int op)
+{
+ if (CC_OP_HAS_EFLAGS(op)) {
+ return ~src1 & CC_Z;
+ } else {
+ MemOp size = cc_op_size(op);
+ target_ulong mask = MAKE_64BIT_MASK(0, 8 << size);
+
+ return dst & mask;
+ }
+}
+
target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
target_ulong src2, int op)
{
@@ -104,8 +82,6 @@ target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
case CC_OP_EFLAGS:
return src1;
- case CC_OP_CLR:
- return CC_Z | CC_P;
case CC_OP_POPCNT:
return dst ? 0 : CC_Z;
@@ -186,6 +162,13 @@ target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
case CC_OP_BMILGL:
return compute_all_bmilgl(dst, src1);
+ case CC_OP_BLSIB:
+ return compute_all_blsib(dst, src1);
+ case CC_OP_BLSIW:
+ return compute_all_blsiw(dst, src1);
+ case CC_OP_BLSIL:
+ return compute_all_blsil(dst, src1);
+
case CC_OP_ADCX:
return compute_all_adcx(dst, src1, src2);
case CC_OP_ADOX:
@@ -216,6 +199,8 @@ target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
return compute_all_sarq(dst, src1);
case CC_OP_BMILGQ:
return compute_all_bmilgq(dst, src1);
+ case CC_OP_BLSIQ:
+ return compute_all_blsiq(dst, src1);
#endif
}
}
@@ -234,7 +219,6 @@ target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
case CC_OP_LOGICW:
case CC_OP_LOGICL:
case CC_OP_LOGICQ:
- case CC_OP_CLR:
case CC_OP_POPCNT:
return 0;
@@ -308,6 +292,13 @@ target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
case CC_OP_BMILGL:
return compute_c_bmilgl(dst, src1);
+ case CC_OP_BLSIB:
+ return compute_c_blsib(dst, src1);
+ case CC_OP_BLSIW:
+ return compute_c_blsiw(dst, src1);
+ case CC_OP_BLSIL:
+ return compute_c_blsil(dst, src1);
+
#ifdef TARGET_X86_64
case CC_OP_ADDQ:
return compute_c_addq(dst, src1);
@@ -321,6 +312,8 @@ target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
return compute_c_shlq(dst, src1);
case CC_OP_BMILGQ:
return compute_c_bmilgq(dst, src1);
+ case CC_OP_BLSIQ:
+ return compute_c_blsiq(dst, src1);
#endif
}
}
diff --git a/target/i386/tcg/cc_helper_template.h.inc b/target/i386/tcg/cc_helper_template.h.inc
index bb611fe..d8fd976 100644
--- a/target/i386/tcg/cc_helper_template.h.inc
+++ b/target/i386/tcg/cc_helper_template.h.inc
@@ -22,12 +22,17 @@
#if DATA_BITS == 8
#define SUFFIX b
#define DATA_TYPE uint8_t
+#define WIDER_TYPE uint32_t
#elif DATA_BITS == 16
#define SUFFIX w
#define DATA_TYPE uint16_t
+#define WIDER_TYPE uint32_t
#elif DATA_BITS == 32
#define SUFFIX l
#define DATA_TYPE uint32_t
+#if HOST_LONG_BITS >= 64
+#define WIDER_TYPE uint64_t
+#endif
#elif DATA_BITS == 64
#define SUFFIX q
#define DATA_TYPE uint64_t
@@ -39,18 +44,32 @@
/* dynamic flags computation */
-static int glue(compute_all_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+static uint32_t glue(compute_all_cout, SUFFIX)(DATA_TYPE dst, DATA_TYPE carries)
{
- int cf, pf, af, zf, sf, of;
- DATA_TYPE src2 = dst - src1;
+ uint32_t af_cf, pf, zf, sf, of;
- cf = dst < src1;
- pf = parity_table[(uint8_t)dst];
- af = (dst ^ src1 ^ src2) & CC_A;
+ /* PF, ZF, SF computed from result. */
+ pf = compute_pf(dst);
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
- of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
- return cf | pf | af | zf | sf | of;
+
+ /*
+ * AF, CF, OF computed from carry out vector. To compute AF and CF, rotate it
+ * left by one so cout(DATA_BITS - 1) is in bit 0 and cout(3) in bit 4.
+ *
+ * To compute OF, place the highest two carry bits into OF and the bit
+ * immediately to the right of it; then, adding CC_O / 2 XORs them.
+ */
+ af_cf = ((carries << 1) | (carries >> (DATA_BITS - 1))) & (CC_A | CC_C);
+ of = (lshift(carries, 12 - DATA_BITS) + CC_O / 2) & CC_O;
+ return pf + zf + sf + af_cf + of;
+}
+
+static uint32_t glue(compute_all_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ DATA_TYPE src2 = dst - src1;
+ DATA_TYPE carries = ADD_COUT_VEC(src1, src2, dst);
+ return glue(compute_all_cout, SUFFIX)(dst, carries);
}
static int glue(compute_c_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
@@ -58,39 +77,31 @@ static int glue(compute_c_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
return dst < src1;
}
-static int glue(compute_all_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
+static uint32_t glue(compute_all_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
DATA_TYPE src3)
{
- int cf, pf, af, zf, sf, of;
DATA_TYPE src2 = dst - src1 - src3;
-
- cf = (src3 ? dst <= src1 : dst < src1);
- pf = parity_table[(uint8_t)dst];
- af = (dst ^ src1 ^ src2) & 0x10;
- zf = (dst == 0) << 6;
- sf = lshift(dst, 8 - DATA_BITS) & 0x80;
- of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
- return cf | pf | af | zf | sf | of;
+ DATA_TYPE carries = ADD_COUT_VEC(src1, src2, dst);
+ return glue(compute_all_cout, SUFFIX)(dst, carries);
}
static int glue(compute_c_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
DATA_TYPE src3)
{
+#ifdef WIDER_TYPE
+ WIDER_TYPE src13 = (WIDER_TYPE) src1 + (WIDER_TYPE) src3;
+
+ return dst < src13;
+#else
return src3 ? dst <= src1 : dst < src1;
+#endif
}
-static int glue(compute_all_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
+static uint32_t glue(compute_all_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
{
- int cf, pf, af, zf, sf, of;
DATA_TYPE src1 = dst + src2;
-
- cf = src1 < src2;
- pf = parity_table[(uint8_t)dst];
- af = (dst ^ src1 ^ src2) & CC_A;
- zf = (dst == 0) * CC_Z;
- sf = lshift(dst, 8 - DATA_BITS) & CC_S;
- of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
- return cf | pf | af | zf | sf | of;
+ DATA_TYPE carries = SUB_COUT_VEC(src1, src2, dst);
+ return glue(compute_all_cout, SUFFIX)(dst, carries);
}
static int glue(compute_c_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
@@ -100,86 +111,80 @@ static int glue(compute_c_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
return src1 < src2;
}
-static int glue(compute_all_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
+static uint32_t glue(compute_all_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
DATA_TYPE src3)
{
- int cf, pf, af, zf, sf, of;
DATA_TYPE src1 = dst + src2 + src3;
-
- cf = (src3 ? src1 <= src2 : src1 < src2);
- pf = parity_table[(uint8_t)dst];
- af = (dst ^ src1 ^ src2) & 0x10;
- zf = (dst == 0) << 6;
- sf = lshift(dst, 8 - DATA_BITS) & 0x80;
- of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
- return cf | pf | af | zf | sf | of;
+ DATA_TYPE carries = SUB_COUT_VEC(src1, src2, dst);
+ return glue(compute_all_cout, SUFFIX)(dst, carries);
}
static int glue(compute_c_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
DATA_TYPE src3)
{
+#ifdef WIDER_TYPE
+ WIDER_TYPE src23 = (WIDER_TYPE) src2 + (WIDER_TYPE) src3;
+ DATA_TYPE src1 = dst + src23;
+
+ return src1 < src23;
+#else
DATA_TYPE src1 = dst + src2 + src3;
return (src3 ? src1 <= src2 : src1 < src2);
+#endif
}
-static int glue(compute_all_logic, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+static uint32_t glue(compute_all_logic, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
{
- int cf, pf, af, zf, sf, of;
+ uint32_t cf, pf, af, zf, sf, of;
cf = 0;
- pf = parity_table[(uint8_t)dst];
+ pf = compute_pf(dst);
af = 0;
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
of = 0;
- return cf | pf | af | zf | sf | of;
+ return cf + pf + af + zf + sf + of;
}
-static int glue(compute_all_inc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+static uint32_t glue(compute_all_inc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
{
- int cf, pf, af, zf, sf, of;
- DATA_TYPE src2;
+ uint32_t cf, pf, af, zf, sf, of;
cf = src1;
- src1 = dst - 1;
- src2 = 1;
- pf = parity_table[(uint8_t)dst];
- af = (dst ^ src1 ^ src2) & CC_A;
+ pf = compute_pf(dst);
+ af = (dst ^ (dst - 1)) & CC_A; /* bits 0..3 are all clear */
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
of = (dst == SIGN_MASK) * CC_O;
- return cf | pf | af | zf | sf | of;
+ return cf + pf + af + zf + sf + of;
}
-static int glue(compute_all_dec, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+static uint32_t glue(compute_all_dec, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
{
- int cf, pf, af, zf, sf, of;
- DATA_TYPE src2;
+ uint32_t cf, pf, af, zf, sf, of;
cf = src1;
- src1 = dst + 1;
- src2 = 1;
- pf = parity_table[(uint8_t)dst];
- af = (dst ^ src1 ^ src2) & CC_A;
+ pf = compute_pf(dst);
+ af = (dst ^ (dst + 1)) & CC_A; /* bits 0..3 are all set */
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
of = (dst == SIGN_MASK - 1) * CC_O;
- return cf | pf | af | zf | sf | of;
+ return cf + pf + af + zf + sf + of;
}
-static int glue(compute_all_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+static uint32_t glue(compute_all_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
{
- int cf, pf, af, zf, sf, of;
+ uint32_t cf, pf, af, zf, sf, of;
cf = (src1 >> (DATA_BITS - 1)) & CC_C;
- pf = parity_table[(uint8_t)dst];
+ pf = compute_pf(dst);
af = 0; /* undefined */
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
/* of is defined iff shift count == 1 */
of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O;
- return cf | pf | af | zf | sf | of;
+ return cf + pf + af + zf + sf + of;
}
static int glue(compute_c_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
@@ -187,39 +192,39 @@ static int glue(compute_c_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
return (src1 >> (DATA_BITS - 1)) & CC_C;
}
-static int glue(compute_all_sar, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+static uint32_t glue(compute_all_sar, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
{
- int cf, pf, af, zf, sf, of;
+ uint32_t cf, pf, af, zf, sf, of;
cf = src1 & 1;
- pf = parity_table[(uint8_t)dst];
+ pf = compute_pf(dst);
af = 0; /* undefined */
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
/* of is defined iff shift count == 1 */
of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O;
- return cf | pf | af | zf | sf | of;
+ return cf + pf + af + zf + sf + of;
}
/* NOTE: we compute the flags like the P4. On olders CPUs, only OF and
CF are modified and it is slower to do that. Note as well that we
don't truncate SRC1 for computing carry to DATA_TYPE. */
-static int glue(compute_all_mul, SUFFIX)(DATA_TYPE dst, target_long src1)
+static uint32_t glue(compute_all_mul, SUFFIX)(DATA_TYPE dst, target_long src1)
{
- int cf, pf, af, zf, sf, of;
+ uint32_t cf, pf, af, zf, sf, of;
cf = (src1 != 0);
- pf = parity_table[(uint8_t)dst];
+ pf = compute_pf(dst);
af = 0; /* undefined */
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
of = cf * CC_O;
- return cf | pf | af | zf | sf | of;
+ return cf + pf + af + zf + sf + of;
}
-static int glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+static uint32_t glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
{
- int cf, pf, af, zf, sf, of;
+ uint32_t cf, pf, af, zf, sf, of;
cf = (src1 == 0);
pf = 0; /* undefined */
@@ -227,7 +232,7 @@ static int glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
of = 0;
- return cf | pf | af | zf | sf | of;
+ return cf + pf + af + zf + sf + of;
}
static int glue(compute_c_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
@@ -235,8 +240,26 @@ static int glue(compute_c_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
return src1 == 0;
}
+static int glue(compute_all_blsi, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ uint32_t cf, pf, af, zf, sf, of;
+
+ cf = (src1 != 0);
+ pf = 0; /* undefined */
+ af = 0; /* undefined */
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = 0;
+ return cf + pf + af + zf + sf + of;
+}
+
+static int glue(compute_c_blsi, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ return src1 != 0;
+}
+
#undef DATA_BITS
#undef SIGN_MASK
#undef DATA_TYPE
-#undef DATA_MASK
#undef SUFFIX
+#undef WIDER_TYPE
diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc
index d2da1d3..55216e0 100644
--- a/target/i386/tcg/decode-new.c.inc
+++ b/target/i386/tcg/decode-new.c.inc
@@ -129,6 +129,37 @@
*
* (^) these are the two cases in which Intel and AMD disagree on the
* primary exception class
+ *
+ * Instructions still in translate.c
+ * ---------------------------------
+ * Generation of TCG opcodes for almost all instructions is in emit.c.inc;
+ * this file interprets the prefixes and opcode bytes down to individual
+ * instruction mnemonics. There is only a handful of opcodes still using
+ * a switch statement to decode modrm bits 3-5 and prefixes after decoding
+ * is complete; these are relics of the older x86 decoder and their code
+ * generation is performed in translate.c.
+ *
+ * These unconverted opcodes also perform their own effective address
+ * generation using the gen_lea_modrm() function.
+ *
+ * There is nothing particularly complicated about them; simply, they don't
+ * need any nasty hacks in the decoder, and they shouldn't get in the way
+ * of the implementation of new x86 instructions, so they are left alone
+ * for the time being.
+ *
+ * x87:
+ * 0xD8 - 0xDF
+ *
+ * privileged/system:
+ * 0x0F 0x00 group 6 (SLDT, STR, LLDT, LTR, VERR, VERW)
+ * 0x0F 0x01 group 7 (SGDT, SIDT, LGDT, LIDT, SMSW, LMSW, INVLPG,
+ * MONITOR, MWAIT, CLAC, STAC, XGETBV, XSETBV,
+ * SWAPGS, RDTSCP)
+ * 0x0F 0xC7 (reg operand) group 9 (RDRAND, RDSEED, RDPID)
+ *
+ * MPX:
+ * 0x0F 0x1A BNDLDX, BNDMOV, BNDCL, BNDCU
+ * 0x0F 0x1B BNDSTX, BNDMOV, BNDMK, BNDCN
*/
#define X86_OP_NONE { 0 },
@@ -205,6 +236,7 @@
#define sextT0 .special = X86_SPECIAL_SExtT0,
#define zextT0 .special = X86_SPECIAL_ZExtT0,
#define op0_Mw .special = X86_SPECIAL_Op0_Mw,
+#define btEvGv .special = X86_SPECIAL_BitTest,
#define vex1 .vex_class = 1,
#define vex1_rep3 .vex_class = 1, .vex_special = X86_VEX_REPScalar,
@@ -269,6 +301,43 @@ static inline const X86OpEntry *decode_by_prefix(DisasContext *s, const X86OpEnt
}
}
+static void decode_group8(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
+{
+ static const X86GenFunc group8_gen[8] = {
+ NULL, NULL, NULL, NULL,
+ gen_BT, gen_BTS, gen_BTR, gen_BTC,
+ };
+ int op = (get_modrm(s, env) >> 3) & 7;
+ entry->gen = group8_gen[op];
+ if (op == 4) {
+ /* prevent writeback and LOCK for BT */
+ entry->op1 = entry->op0;
+ entry->op0 = X86_TYPE_None;
+ entry->s0 = X86_SIZE_None;
+ } else {
+ entry->special = X86_SPECIAL_HasLock;
+ }
+}
+
+static void decode_group9(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
+{
+ static const X86OpEntry group9_reg =
+ X86_OP_ENTRY0(multi0F); /* unconverted */
+ static const X86OpEntry cmpxchg8b =
+ X86_OP_ENTRY1(CMPXCHG8B, M,q, lock p_00 cpuid(CX8));
+ static const X86OpEntry cmpxchg16b =
+ X86_OP_ENTRY1(CMPXCHG16B, M,dq, lock p_00 cpuid(CX16));
+
+ int modrm = get_modrm(s, env);
+ int op = (modrm >> 3) & 7;
+
+ if ((modrm >> 6) == 3) {
+ *entry = group9_reg;
+ } else if (op == 1) {
+ *entry = REX_W(s) ? cmpxchg16b : cmpxchg8b;
+ }
+}
+
static void decode_group15(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
{
static const X86OpEntry group15_reg[8] = {
@@ -276,9 +345,9 @@ static void decode_group15(DisasContext *s, CPUX86State *env, X86OpEntry *entry,
[1] = X86_OP_ENTRYw(RDxxBASE, R,y, cpuid(FSGSBASE) chk(o64) p_f3),
[2] = X86_OP_ENTRYr(WRxxBASE, R,y, cpuid(FSGSBASE) chk(o64) p_f3 zextT0),
[3] = X86_OP_ENTRYr(WRxxBASE, R,y, cpuid(FSGSBASE) chk(o64) p_f3 zextT0),
- [5] = X86_OP_ENTRY0(LFENCE, cpuid(SSE2) p_00),
+ [5] = X86_OP_ENTRY0(LFENCE, cpuid(SSE) p_00),
[6] = X86_OP_ENTRY0(MFENCE, cpuid(SSE2) p_00),
- [7] = X86_OP_ENTRY0(SFENCE, cpuid(SSE2) p_00),
+ [7] = X86_OP_ENTRY0(SFENCE, cpuid(SSE) p_00),
};
static const X86OpEntry group15_mem[8] = {
@@ -1073,6 +1142,8 @@ static void decode_MOV_CR_DR(DisasContext *s, CPUX86State *env, X86OpEntry *entr
}
static const X86OpEntry opcodes_0F[256] = {
+ [0x00] = X86_OP_ENTRY1(multi0F, nop,v, nolea), /* unconverted */
+ [0x01] = X86_OP_ENTRY1(multi0F, nop,v, nolea), /* unconverted */
[0x02] = X86_OP_ENTRYwr(LAR, G,v, E,w, chk(prot)),
[0x03] = X86_OP_ENTRYwr(LSL, G,v, E,w, chk(prot)),
[0x05] = X86_OP_ENTRY0(SYSCALL, chk(o64_intel)),
@@ -1162,12 +1233,14 @@ static const X86OpEntry opcodes_0F[256] = {
[0xa0] = X86_OP_ENTRYr(PUSH, FS, w),
[0xa1] = X86_OP_ENTRYw(POP, FS, w),
[0xa2] = X86_OP_ENTRY0(CPUID),
+ [0xa3] = X86_OP_ENTRYrr(BT, E,v, G,v, btEvGv),
[0xa4] = X86_OP_ENTRY4(SHLD, E,v, 2op,v, G,v),
[0xa5] = X86_OP_ENTRY3(SHLD, E,v, 2op,v, G,v),
[0xb0] = X86_OP_ENTRY2(CMPXCHG,E,b, G,b, lock),
[0xb1] = X86_OP_ENTRY2(CMPXCHG,E,v, G,v, lock),
[0xb2] = X86_OP_ENTRY3(LSS, G,v, EM,p, None, None),
+ [0xb3] = X86_OP_ENTRY2(BTR, E,v, G,v, btEvGv),
[0xb4] = X86_OP_ENTRY3(LFS, G,v, EM,p, None, None),
[0xb5] = X86_OP_ENTRY3(LGS, G,v, EM,p, None, None),
[0xb6] = X86_OP_ENTRY3(MOV, G,v, E,b, None, None, zextT0), /* MOVZX */
@@ -1180,6 +1253,7 @@ static const X86OpEntry opcodes_0F[256] = {
[0xc4] = X86_OP_ENTRY4(PINSRW, V,dq,H,dq,E,w, vex5 mmx p_00_66),
[0xc5] = X86_OP_ENTRY3(PEXTRW, G,d, U,dq,I,b, vex5 mmx p_00_66),
[0xc6] = X86_OP_ENTRY4(VSHUF, V,x, H,x, W,x, vex4 p_00_66),
+ [0xc7] = X86_OP_GROUP0(group9),
[0xd0] = X86_OP_ENTRY3(VADDSUB, V,x, H,x, W,x, vex2 cpuid(SSE3) p_66_f2),
[0xd1] = X86_OP_ENTRY3(PSRLW_r, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
@@ -1222,6 +1296,8 @@ static const X86OpEntry opcodes_0F[256] = {
[0x18] = X86_OP_ENTRY1(NOP, nop,v), /* prefetch/reserved NOP */
[0x19] = X86_OP_ENTRY1(NOP, nop,v), /* reserved NOP */
+ [0x1a] = X86_OP_ENTRY1(multi0F, nop,v, nolea), /* unconverted MPX */
+ [0x1b] = X86_OP_ENTRY1(multi0F, nop,v, nolea), /* unconverted MPX */
[0x1c] = X86_OP_ENTRY1(NOP, nop,v), /* reserved NOP */
[0x1d] = X86_OP_ENTRY1(NOP, nop,v), /* reserved NOP */
[0x1e] = X86_OP_ENTRY1(NOP, nop,v), /* reserved NOP */
@@ -1294,6 +1370,7 @@ static const X86OpEntry opcodes_0F[256] = {
[0xa8] = X86_OP_ENTRYr(PUSH, GS, w),
[0xa9] = X86_OP_ENTRYw(POP, GS, w),
[0xaa] = X86_OP_ENTRY0(RSM, chk(smm) svm(RSM)),
+ [0xab] = X86_OP_ENTRY2(BTS, E,v, G,v, btEvGv),
[0xac] = X86_OP_ENTRY4(SHRD, E,v, 2op,v, G,v),
[0xad] = X86_OP_ENTRY3(SHRD, E,v, 2op,v, G,v),
[0xae] = X86_OP_GROUP0(group15),
@@ -1306,6 +1383,8 @@ static const X86OpEntry opcodes_0F[256] = {
[0xb8] = X86_OP_GROUP0(0FB8),
/* decoded as modrm, which is visible as a difference between page fault and #UD */
[0xb9] = X86_OP_ENTRYr(UD, nop,v), /* UD1 */
+ [0xba] = X86_OP_GROUP2(group8, E,v, I,b),
+ [0xbb] = X86_OP_ENTRY2(BTC, E,v, G,v, btEvGv),
[0xbc] = X86_OP_GROUP0(0FBC),
[0xbd] = X86_OP_GROUP0(0FBD),
[0xbe] = X86_OP_ENTRY3(MOV, G,v, E,b, None, None, sextT0), /* MOVSX */
@@ -1627,9 +1706,9 @@ static const X86OpEntry opcodes_root[256] = {
[0xE2] = X86_OP_ENTRYr(LOOP, J,b), /* implicit: CX with aflag size */
[0xE3] = X86_OP_ENTRYr(JCXZ, J,b), /* implicit: CX with aflag size */
[0xE4] = X86_OP_ENTRYwr(IN, 0,b, I_unsigned,b), /* AL */
- [0xE5] = X86_OP_ENTRYwr(IN, 0,v, I_unsigned,b), /* AX/EAX */
+ [0xE5] = X86_OP_ENTRYwr(IN, 0,z, I_unsigned,b), /* AX/EAX */
[0xE6] = X86_OP_ENTRYrr(OUT, 0,b, I_unsigned,b), /* AL */
- [0xE7] = X86_OP_ENTRYrr(OUT, 0,v, I_unsigned,b), /* AX/EAX */
+ [0xE7] = X86_OP_ENTRYrr(OUT, 0,z, I_unsigned,b), /* AX/EAX */
[0xF1] = X86_OP_ENTRY0(INT1, svm(ICEBP)),
[0xF4] = X86_OP_ENTRY0(HLT, chk(cpl0) svm(HLT)),
@@ -1756,14 +1835,27 @@ static const X86OpEntry opcodes_root[256] = {
[0xCE] = X86_OP_ENTRY0(INTO),
[0xCF] = X86_OP_ENTRY0(IRET, chk(vm86_iopl) svm(IRET)),
+ /*
+ * x87 is nolea because it needs the address without segment base,
+ * in order to store it in fdp.
+ */
+ [0xD8] = X86_OP_ENTRY1(x87, nop,v, nolea),
+ [0xD9] = X86_OP_ENTRY1(x87, nop,v, nolea),
+ [0xDA] = X86_OP_ENTRY1(x87, nop,v, nolea),
+ [0xDB] = X86_OP_ENTRY1(x87, nop,v, nolea),
+ [0xDC] = X86_OP_ENTRY1(x87, nop,v, nolea),
+ [0xDD] = X86_OP_ENTRY1(x87, nop,v, nolea),
+ [0xDE] = X86_OP_ENTRY1(x87, nop,v, nolea),
+ [0xDF] = X86_OP_ENTRY1(x87, nop,v, nolea),
+
[0xE8] = X86_OP_ENTRYr(CALL, J,z_f64),
[0xE9] = X86_OP_ENTRYr(JMP, J,z_f64),
[0xEA] = X86_OP_ENTRYrr(JMPF, I_unsigned,p, I_unsigned,w, chk(i64)),
[0xEB] = X86_OP_ENTRYr(JMP, J,b),
[0xEC] = X86_OP_ENTRYwr(IN, 0,b, 2,w), /* AL, DX */
- [0xED] = X86_OP_ENTRYwr(IN, 0,v, 2,w), /* AX/EAX, DX */
+ [0xED] = X86_OP_ENTRYwr(IN, 0,z, 2,w), /* AX/EAX, DX */
[0xEE] = X86_OP_ENTRYrr(OUT, 0,b, 2,w), /* DX, AL */
- [0xEF] = X86_OP_ENTRYrr(OUT, 0,v, 2,w), /* DX, AX/EAX */
+ [0xEF] = X86_OP_ENTRYrr(OUT, 0,z, 2,w), /* DX, AX/EAX */
[0xF8] = X86_OP_ENTRY0(CLC),
[0xF9] = X86_OP_ENTRY0(STC),
@@ -1799,19 +1891,20 @@ static void decode_root(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
}
-static int decode_modrm(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode,
- X86DecodedOp *op, X86OpType type)
+static int decode_modrm(DisasContext *s, CPUX86State *env,
+ X86DecodedInsn *decode, X86DecodedOp *op)
{
int modrm = get_modrm(s, env);
if ((modrm >> 6) == 3) {
op->n = (modrm & 7);
- if (type != X86_TYPE_Q && type != X86_TYPE_N) {
+ if (op->unit != X86_OP_MMX) {
op->n |= REX_B(s);
}
} else {
op->has_ea = true;
op->n = -1;
- decode->mem = gen_lea_modrm_0(env, s, get_modrm(s, env));
+ decode->mem = gen_lea_modrm_0(env, s, modrm,
+ decode->e.vex_class == 12);
}
return modrm;
}
@@ -1978,7 +2071,10 @@ static bool decode_op(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode,
op->unit = X86_OP_SSE;
}
get_reg:
- op->n = ((get_modrm(s, env) >> 3) & 7) | REX_R(s);
+ op->n = ((get_modrm(s, env) >> 3) & 7);
+ if (op->unit != X86_OP_MMX) {
+ op->n |= REX_R(s);
+ }
break;
case X86_TYPE_E: /* ALU modrm operand */
@@ -2036,7 +2132,7 @@ static bool decode_op(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode,
/* fall through */
case X86_TYPE_nop: /* modrm operand decoded but not fetched */
get_modrm:
- decode_modrm(s, env, decode, op, type);
+ decode_modrm(s, env, decode, op);
break;
case X86_TYPE_O: /* Absolute address encoded in the instruction */
@@ -2199,8 +2295,12 @@ static bool has_cpuid_feature(DisasContext *s, X86CPUIDFeature cpuid)
return (s->cpuid_features & CPUID_CMOV);
case X86_FEAT_CLFLUSH:
return (s->cpuid_features & CPUID_CLFLUSH);
+ case X86_FEAT_CX8:
+ return (s->cpuid_features & CPUID_CX8);
case X86_FEAT_FXSR:
return (s->cpuid_features & CPUID_FXSR);
+ case X86_FEAT_CX16:
+ return (s->cpuid_ext_features & CPUID_EXT_CX16);
case X86_FEAT_F16C:
return (s->cpuid_ext_features & CPUID_EXT_F16C);
case X86_FEAT_FMA:
@@ -2424,6 +2524,7 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
CPUX86State *env = cpu_env(cpu);
X86DecodedInsn decode;
X86DecodeFunc decode_func = decode_root;
+ bool accept_lock = false;
uint8_t cc_live, b;
s->pc = s->base.pc_next;
@@ -2441,7 +2542,13 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
s->has_modrm = false;
s->prefix = 0;
- next_byte:
+ next_byte:;
+#ifdef TARGET_X86_64
+ /* clear any REX prefix followed by other prefixes. */
+ int rex;
+ rex = -1;
+ next_byte_rex:
+#endif
b = x86_ldub_code(env, s);
/* Collect prefixes. */
@@ -2484,13 +2591,12 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
#ifdef TARGET_X86_64
case 0x40 ... 0x4f:
if (CODE64(s)) {
- /* REX prefix */
- s->prefix |= PREFIX_REX;
- s->vex_w = (b >> 3) & 1;
- s->rex_r = (b & 0x4) << 1;
- s->rex_x = (b & 0x2) << 2;
- s->rex_b = (b & 0x1) << 3;
- goto next_byte;
+ /*
+ * REX prefix; ignored unless it is the last prefix, so
+ * for now just stash it
+ */
+ rex = b;
+ goto next_byte_rex;
}
break;
#endif
@@ -2517,10 +2623,13 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
/* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ
- | PREFIX_LOCK | PREFIX_DATA | PREFIX_REX)) {
+ | PREFIX_LOCK | PREFIX_DATA)) {
goto illegal_op;
}
#ifdef TARGET_X86_64
+ if (rex != -1) {
+ goto illegal_op;
+ }
s->rex_r = (~vex2 >> 4) & 8;
#endif
if (b == 0xc5) {
@@ -2560,6 +2669,16 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
/* Post-process prefixes. */
if (CODE64(s)) {
+#ifdef TARGET_X86_64
+ if (rex != -1) {
+ s->prefix |= PREFIX_REX;
+ s->vex_w = (rex >> 3) & 1;
+ s->rex_r = (rex & 0x4) << 1;
+ s->rex_x = (rex & 0x2) << 2;
+ s->rex_b = (rex & 0x1) << 3;
+ }
+#endif
+
/*
* In 64-bit mode, the default data size is 32-bit. Select 64-bit
* data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
@@ -2583,34 +2702,6 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
}
}
- /* Go back to old decoder for unconverted opcodes. */
- if (!(s->prefix & PREFIX_VEX)) {
- if ((b & ~7) == 0xd8) {
- if (!disas_insn_x87(s, cpu, b)) {
- goto unknown_op;
- }
- return;
- }
-
- if (b == 0x0f) {
- b = x86_ldub_code(env, s);
- switch (b) {
- case 0x00 ... 0x01: /* mostly privileged instructions */
- case 0x1a ... 0x1b: /* MPX */
- case 0xa3: /* bt */
- case 0xab: /* bts */
- case 0xb3: /* btr */
- case 0xba ... 0xbb: /* grp8, btc */
- case 0xc7: /* grp9 */
- disas_insn_old(s, cpu, b + 0x100);
- return;
- default:
- decode_func = do_decode_0F;
- break;
- }
- }
- }
-
memset(&decode, 0, sizeof(decode));
decode.cc_op = -1;
decode.b = b;
@@ -2662,9 +2753,10 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
if (decode.op[0].has_ea) {
s->prefix |= PREFIX_LOCK;
}
- decode.e.special = X86_SPECIAL_HasLock;
/* fallthrough */
case X86_SPECIAL_HasLock:
+ case X86_SPECIAL_BitTest:
+ accept_lock = decode.op[0].has_ea;
break;
case X86_SPECIAL_Op0_Rd:
@@ -2706,10 +2798,8 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
break;
}
- if (s->prefix & PREFIX_LOCK) {
- if (decode.e.special != X86_SPECIAL_HasLock || !decode.op[0].has_ea) {
- goto illegal_op;
- }
+ if ((s->prefix & PREFIX_LOCK) && !accept_lock) {
+ goto illegal_op;
}
if (!validate_vex(s, &decode)) {
@@ -2755,9 +2845,10 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
if (decode.e.special != X86_SPECIAL_NoLoadEA &&
(decode.op[0].has_ea || decode.op[1].has_ea || decode.op[2].has_ea)) {
- gen_load_ea(s, &decode.mem, decode.e.vex_class == 12);
+ gen_lea_modrm(s, &decode);
}
if (s->prefix & PREFIX_LOCK) {
+ assert(decode.op[0].has_ea && !decode.op[2].has_ea);
gen_load(s, &decode, 2, s->T1);
decode.e.gen(s, &decode);
} else {
@@ -2792,7 +2883,7 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
tcg_gen_mov_i32(cpu_cc_op, decode.cc_op_dynamic);
}
set_cc_op(s, decode.cc_op);
- cc_live = cc_op_live[decode.cc_op];
+ cc_live = cc_op_live(decode.cc_op);
} else {
cc_live = 0;
}
diff --git a/target/i386/tcg/decode-new.h b/target/i386/tcg/decode-new.h
index f9bf9a6..7f23d37 100644
--- a/target/i386/tcg/decode-new.h
+++ b/target/i386/tcg/decode-new.h
@@ -114,6 +114,8 @@ typedef enum X86CPUIDFeature {
X86_FEAT_CLWB,
X86_FEAT_CMOV,
X86_FEAT_CMPCCXADD,
+ X86_FEAT_CX8,
+ X86_FEAT_CX16,
X86_FEAT_F16C,
X86_FEAT_FMA,
X86_FEAT_FSGSBASE,
@@ -190,6 +192,9 @@ typedef enum X86InsnSpecial {
/* Always locked if it has a memory operand (XCHG) */
X86_SPECIAL_Locked,
+ /* Like HasLock, but also operand 2 provides bit displacement into memory. */
+ X86_SPECIAL_BitTest,
+
/* Do not load effective address in s->A0 */
X86_SPECIAL_NoLoadEA,
@@ -261,12 +266,13 @@ typedef enum X86VEXSpecial {
typedef struct X86OpEntry X86OpEntry;
typedef struct X86DecodedInsn X86DecodedInsn;
+struct DisasContext;
/* Decode function for multibyte opcodes. */
-typedef void (*X86DecodeFunc)(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b);
+typedef void (*X86DecodeFunc)(struct DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b);
/* Code generation function. */
-typedef void (*X86GenFunc)(DisasContext *s, X86DecodedInsn *decode);
+typedef void (*X86GenFunc)(struct DisasContext *s, X86DecodedInsn *decode);
struct X86OpEntry {
/* Based on the is_decode flags. */
@@ -313,6 +319,14 @@ typedef struct X86DecodedOp {
};
} X86DecodedOp;
+typedef struct AddressParts {
+ int def_seg;
+ int base;
+ int index;
+ int scale;
+ target_long disp;
+} AddressParts;
+
struct X86DecodedInsn {
X86OpEntry e;
X86DecodedOp op[3];
@@ -330,3 +344,4 @@ struct X86DecodedInsn {
uint8_t b;
};
+static void gen_lea_modrm(struct DisasContext *s, X86DecodedInsn *decode);
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index 016dce8..1a7fab93 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -19,22 +19,13 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-/*
- * Sometimes, knowing what the backend has can produce better code.
- * The exact opcode to check depends on 32- vs. 64-bit.
- */
-#ifdef TARGET_X86_64
-#define TCG_TARGET_HAS_extract2_tl TCG_TARGET_HAS_extract2_i64
-#define TCG_TARGET_deposit_tl_valid TCG_TARGET_deposit_i64_valid
-#define TCG_TARGET_extract_tl_valid TCG_TARGET_extract_i64_valid
-#else
-#define TCG_TARGET_HAS_extract2_tl TCG_TARGET_HAS_extract2_i32
-#define TCG_TARGET_deposit_tl_valid TCG_TARGET_deposit_i32_valid
-#define TCG_TARGET_extract_tl_valid TCG_TARGET_extract_i32_valid
-#endif
-
+#define MMX_OFFSET(reg) \
+ ({ assert((reg) >= 0 && (reg) <= 7); \
+ offsetof(CPUX86State, fpregs[reg].mmx); })
-#define ZMM_OFFSET(reg) offsetof(CPUX86State, xmm_regs[reg])
+#define ZMM_OFFSET(reg) \
+ ({ assert((reg) >= 0 && (reg) <= 15); \
+ offsetof(CPUX86State, xmm_regs[reg]); })
typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
@@ -73,9 +64,26 @@ static void gen_NM_exception(DisasContext *s)
gen_exception(s, EXCP07_PREX);
}
-static void gen_load_ea(DisasContext *s, AddressParts *mem, bool is_vsib)
+static void gen_lea_modrm(DisasContext *s, X86DecodedInsn *decode)
{
- TCGv ea = gen_lea_modrm_1(s, *mem, is_vsib);
+ AddressParts *mem = &decode->mem;
+ TCGv ea;
+
+ ea = gen_lea_modrm_1(s, *mem, decode->e.vex_class == 12);
+ if (decode->e.special == X86_SPECIAL_BitTest) {
+ MemOp ot = decode->op[1].ot;
+ int poslen = 8 << ot;
+ int opn = decode->op[2].n;
+ TCGv ofs = tcg_temp_new();
+
+ /* Extract memory displacement from the second operand. */
+ assert(decode->op[2].unit == X86_OP_INT && decode->op[2].ot != MO_8);
+ tcg_gen_sextract_tl(ofs, cpu_regs[opn], 3, poslen - 3);
+ tcg_gen_andi_tl(ofs, ofs, -1 << ot);
+ tcg_gen_add_tl(s->A0, ea, ofs);
+ ea = s->A0;
+ }
+
gen_lea_v_seg(s, ea, mem->def_seg, s->override);
}
@@ -168,7 +176,7 @@ static int vector_elem_offset(X86DecodedOp *op, MemOp ot, int n)
static void compute_mmx_offset(X86DecodedOp *op)
{
if (!op->has_ea) {
- op->offset = offsetof(CPUX86State, fpregs[op->n].mmx) + mmx_offset(op->ot);
+ op->offset = MMX_OFFSET(op->n) + mmx_offset(op->ot);
} else {
op->offset = offsetof(CPUX86State, mmx_t0) + mmx_offset(op->ot);
}
@@ -264,24 +272,25 @@ static void gen_load(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
gen_op_ld_v(s, op->ot, v, s->A0);
}
- } else if (op->ot == MO_8 && byte_reg_is_xH(s, op->n)) {
- if (v == s->T0 && decode->e.special == X86_SPECIAL_SExtT0) {
- tcg_gen_sextract_tl(v, cpu_regs[op->n - 4], 8, 8);
- } else {
- tcg_gen_extract_tl(v, cpu_regs[op->n - 4], 8, 8);
- }
-
} else if (op->ot < MO_TL && v == s->T0 &&
(decode->e.special == X86_SPECIAL_SExtT0 ||
decode->e.special == X86_SPECIAL_ZExtT0)) {
- if (decode->e.special == X86_SPECIAL_SExtT0) {
- tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot | MO_SIGN);
+ if (op->ot == MO_8 && byte_reg_is_xH(s, op->n)) {
+ if (decode->e.special == X86_SPECIAL_SExtT0) {
+ tcg_gen_sextract_tl(v, cpu_regs[op->n - 4], 8, 8);
+ } else {
+ tcg_gen_extract_tl(v, cpu_regs[op->n - 4], 8, 8);
+ }
} else {
- tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot);
+ if (decode->e.special == X86_SPECIAL_SExtT0) {
+ tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot | MO_SIGN);
+ } else {
+ tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot);
+ }
}
} else {
- tcg_gen_mov_tl(v, cpu_regs[op->n]);
+ gen_op_mov_v_reg(s, op->ot, v, op->n);
}
break;
case X86_OP_IMM:
@@ -333,7 +342,7 @@ static void gen_writeback(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv
break;
case X86_OP_SEG:
/* Note that gen_movl_seg takes care of interrupt shadow and TF. */
- gen_movl_seg(s, op->n, s->T0);
+ gen_movl_seg(s, op->n, v, op->n == R_SS);
break;
case X86_OP_INT:
if (op->has_ea) {
@@ -407,6 +416,32 @@ static void prepare_update3_cc(X86DecodedInsn *decode, DisasContext *s, CCOp op,
decode->cc_op = op;
}
+/* Set up decode->cc_* to modify CF while keeping other flags unchanged. */
+static void prepare_update_cf(X86DecodedInsn *decode, DisasContext *s, TCGv cf)
+{
+ switch (s->cc_op) {
+ case CC_OP_ADOX:
+ case CC_OP_ADCOX:
+ decode->cc_src2 = cpu_cc_src2;
+ decode->cc_src = cpu_cc_src;
+ decode->cc_op = CC_OP_ADCOX;
+ break;
+
+ case CC_OP_EFLAGS:
+ case CC_OP_ADCX:
+ decode->cc_src = cpu_cc_src;
+ decode->cc_op = CC_OP_ADCX;
+ break;
+
+ default:
+ decode->cc_src = tcg_temp_new();
+ gen_mov_eflags(s, decode->cc_src);
+ decode->cc_op = CC_OP_ADCX;
+ break;
+ }
+ decode->cc_dst = cf;
+}
+
static void gen_store_sse(DisasContext *s, X86DecodedInsn *decode, int src_ofs)
{
MemOp ot = decode->op[0].ot;
@@ -1125,11 +1160,28 @@ static void gen_AAS(DisasContext *s, X86DecodedInsn *decode)
assume_cc_op(s, CC_OP_EFLAGS);
}
+static void gen_ADD(DisasContext *s, X86DecodedInsn *decode);
static void gen_ADC(DisasContext *s, X86DecodedInsn *decode)
{
MemOp ot = decode->op[1].ot;
- TCGv c_in = tcg_temp_new();
+ TCGv c_in;
+
+ /*
+ * Try to avoid CC_OP_ADC by transforming as follows:
+ * CC_ADC: src1 = dst + c_in, src2 = 0, src3 = c_in
+ * CC_ADD: src1 = dst + c_in, src2 = c_in (no src3)
+ *
+ * In general src2 vs. src3 matters when computing AF and OF, but not here:
+ * - AF is bit 4 of dst^src1^src2, which is bit 4 of dst^src1 in both cases
+ * - OF is a function of the two MSBs, and in both cases they are zero for src2
+ */
+ if (decode->e.op2 == X86_TYPE_I && decode->immediate == 0) {
+ gen_compute_eflags_c(s, s->T1);
+ gen_ADD(s, decode);
+ return;
+ }
+ c_in = tcg_temp_new();
gen_compute_eflags_c(s, c_in);
if (s->prefix & PREFIX_LOCK) {
tcg_gen_add_tl(s->T0, c_in, s->T1);
@@ -1299,7 +1351,7 @@ static void gen_BLSI(DisasContext *s, X86DecodedInsn *decode)
/* input in T1, which is ready for prepare_update2_cc */
tcg_gen_neg_tl(s->T0, s->T1);
tcg_gen_and_tl(s->T0, s->T0, s->T1);
- prepare_update2_cc(decode, s, CC_OP_BMILGB + ot);
+ prepare_update2_cc(decode, s, CC_OP_BLSIB + ot);
}
static void gen_BLSMSK(DisasContext *s, X86DecodedInsn *decode)
@@ -1385,6 +1437,121 @@ static void gen_BSWAP(DisasContext *s, X86DecodedInsn *decode)
tcg_gen_bswap32_tl(s->T0, s->T0, TCG_BSWAP_OZ);
}
+static TCGv gen_bt_mask(DisasContext *s, X86DecodedInsn *decode)
+{
+ MemOp ot = decode->op[1].ot;
+ TCGv mask = tcg_temp_new();
+
+ tcg_gen_andi_tl(s->T1, s->T1, (8 << ot) - 1);
+ tcg_gen_shl_tl(mask, tcg_constant_tl(1), s->T1);
+ return mask;
+}
+
+/* Expects truncated bit index in COUNT, 1 << COUNT in MASK. */
+static void gen_bt_flags(DisasContext *s, X86DecodedInsn *decode, TCGv src,
+ TCGv count, TCGv mask)
+{
+ TCGv cf;
+
+ /*
+ * C is the result of the test, Z is unchanged, and the others
+ * are all undefined.
+ */
+ if (s->cc_op == CC_OP_DYNAMIC || CC_OP_HAS_EFLAGS(s->cc_op)) {
+ /* Generate EFLAGS and replace the C bit. */
+ cf = tcg_temp_new();
+ tcg_gen_setcond_tl(TCG_COND_TSTNE, cf, src, mask);
+ prepare_update_cf(decode, s, cf);
+ } else {
+ /*
+ * Z was going to be computed from the non-zero status of CC_DST.
+ * We can get that same Z value (and the new C value) by leaving
+ * CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
+ * same width.
+ */
+ decode->cc_src = tcg_temp_new();
+ decode->cc_dst = cpu_cc_dst;
+ decode->cc_op = CC_OP_SARB + cc_op_size(s->cc_op);
+ tcg_gen_shr_tl(decode->cc_src, src, count);
+ }
+}
+
+static void gen_BT(DisasContext *s, X86DecodedInsn *decode)
+{
+ TCGv count = s->T1;
+ TCGv mask;
+
+ /*
+ * Try to ensure that the rhs of the TSTNE condition is a constant (and a
+ * power of two), as that is more readily available on most TCG backends.
+ *
+ * For immediate bit number gen_bt_mask()'s output is already a constant;
+ * for register bit number, shift the source right and check bit 0.
+ */
+ if (decode->e.op2 == X86_TYPE_I) {
+ mask = gen_bt_mask(s, decode);
+ } else {
+ MemOp ot = decode->op[1].ot;
+
+ tcg_gen_andi_tl(s->T1, s->T1, (8 << ot) - 1);
+ tcg_gen_shr_tl(s->T0, s->T0, s->T1);
+
+ count = tcg_constant_tl(0);
+ mask = tcg_constant_tl(1);
+ }
+ gen_bt_flags(s, decode, s->T0, count, mask);
+}
+
+static void gen_BTC(DisasContext *s, X86DecodedInsn *decode)
+{
+ MemOp ot = decode->op[0].ot;
+ TCGv old = tcg_temp_new();
+ TCGv mask = gen_bt_mask(s, decode);
+
+ if (s->prefix & PREFIX_LOCK) {
+ tcg_gen_atomic_fetch_xor_tl(old, s->A0, mask, s->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_mov_tl(old, s->T0);
+ tcg_gen_xor_tl(s->T0, s->T0, mask);
+ }
+
+ gen_bt_flags(s, decode, old, s->T1, mask);
+}
+
+static void gen_BTR(DisasContext *s, X86DecodedInsn *decode)
+{
+ MemOp ot = decode->op[0].ot;
+ TCGv old = tcg_temp_new();
+ TCGv mask = gen_bt_mask(s, decode);
+
+ if (s->prefix & PREFIX_LOCK) {
+ TCGv maskc = tcg_temp_new();
+ tcg_gen_not_tl(maskc, mask);
+ tcg_gen_atomic_fetch_and_tl(old, s->A0, maskc, s->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_mov_tl(old, s->T0);
+ tcg_gen_andc_tl(s->T0, s->T0, mask);
+ }
+
+ gen_bt_flags(s, decode, old, s->T1, mask);
+}
+
+static void gen_BTS(DisasContext *s, X86DecodedInsn *decode)
+{
+ MemOp ot = decode->op[0].ot;
+ TCGv old = tcg_temp_new();
+ TCGv mask = gen_bt_mask(s, decode);
+
+ if (s->prefix & PREFIX_LOCK) {
+ tcg_gen_atomic_fetch_or_tl(old, s->A0, mask, s->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_mov_tl(old, s->T0);
+ tcg_gen_or_tl(s->T0, s->T0, mask);
+ }
+
+ gen_bt_flags(s, decode, old, s->T1, mask);
+}
+
static void gen_BZHI(DisasContext *s, X86DecodedInsn *decode)
{
MemOp ot = decode->op[0].ot;
@@ -1470,7 +1637,7 @@ static void gen_CMC(DisasContext *s, X86DecodedInsn *decode)
static void gen_CMOVcc(DisasContext *s, X86DecodedInsn *decode)
{
- gen_cmovcc1(s, decode->b & 0xf, s->T0, s->T1);
+ gen_cmovcc(s, decode->b & 0xf, s->T0, s->T1);
}
static void gen_CMPccXADD(DisasContext *s, X86DecodedInsn *decode)
@@ -1533,22 +1700,22 @@ static void gen_CMPccXADD(DisasContext *s, X86DecodedInsn *decode)
switch (jcc_op) {
case JCC_O:
/* (src1 ^ src2) & (src1 ^ dst). newv is only used here for a moment */
+ cmp_lhs = tcg_temp_new(), cmp_rhs = tcg_constant_tl(0);
tcg_gen_xor_tl(newv, s->cc_srcT, s->T0);
- tcg_gen_xor_tl(s->tmp0, s->cc_srcT, cmpv);
- tcg_gen_and_tl(s->tmp0, s->tmp0, newv);
- tcg_gen_sextract_tl(s->tmp0, s->tmp0, 0, 8 << ot);
- cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(0);
+ tcg_gen_xor_tl(cmp_lhs, s->cc_srcT, cmpv);
+ tcg_gen_and_tl(cmp_lhs, cmp_lhs, newv);
+ tcg_gen_sextract_tl(cmp_lhs, cmp_lhs, 0, 8 << ot);
break;
case JCC_P:
- tcg_gen_ext8u_tl(s->tmp0, s->T0);
- tcg_gen_ctpop_tl(s->tmp0, s->tmp0);
- cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(1);
+ cmp_lhs = tcg_temp_new(), cmp_rhs = tcg_constant_tl(1);
+ tcg_gen_ext8u_tl(cmp_lhs, s->T0);
+ tcg_gen_ctpop_tl(cmp_lhs, cmp_lhs);
break;
case JCC_S:
- tcg_gen_sextract_tl(s->tmp0, s->T0, 0, 8 << ot);
- cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(0);
+ cmp_lhs = tcg_temp_new(), cmp_rhs = tcg_constant_tl(0);
+ tcg_gen_sextract_tl(cmp_lhs, s->T0, 0, 8 << ot);
break;
default:
@@ -1579,11 +1746,7 @@ static void gen_CMPccXADD(DisasContext *s, X86DecodedInsn *decode)
static void gen_CMPS(DisasContext *s, X86DecodedInsn *decode)
{
MemOp ot = decode->op[2].ot;
- if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
- gen_repz_nz(s, ot, gen_cmps);
- } else {
- gen_cmps(s, ot);
- }
+ gen_repz_nz(s, ot, gen_cmps);
}
static void gen_CMPXCHG(DisasContext *s, X86DecodedInsn *decode)
@@ -1637,6 +1800,102 @@ static void gen_CMPXCHG(DisasContext *s, X86DecodedInsn *decode)
decode->cc_op = CC_OP_SUBB + ot;
}
+static void gen_CMPXCHG16B(DisasContext *s, X86DecodedInsn *decode)
+{
+#ifdef TARGET_X86_64
+ MemOp mop = MO_LE | MO_128 | MO_ALIGN;
+ TCGv_i64 t0, t1;
+ TCGv_i128 cmp, val;
+
+ cmp = tcg_temp_new_i128();
+ val = tcg_temp_new_i128();
+ tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
+ tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
+
+ /* Only require atomic with LOCK; non-parallel handled in generator. */
+ if (s->prefix & PREFIX_LOCK) {
+ tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
+ } else {
+ tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
+ }
+
+ tcg_gen_extr_i128_i64(s->T0, s->T1, val);
+
+ /* Determine success after the fact. */
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
+ tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ /* Update Z. */
+ gen_compute_eflags(s);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
+ tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
+
+ /*
+ * Extract the result values for the register pair. We may do this
+ * unconditionally, because on success (Z=1), the old value matches
+ * the previous value in RDX:RAX.
+ */
+ tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
+ tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
+#else
+ abort();
+#endif
+}
+
+static void gen_CMPXCHG8B(DisasContext *s, X86DecodedInsn *decode)
+{
+ TCGv_i64 cmp, val, old;
+ TCGv Z;
+
+ cmp = tcg_temp_new_i64();
+ val = tcg_temp_new_i64();
+ old = tcg_temp_new_i64();
+
+ /* Construct the comparison values from the register pair. */
+ tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
+ tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
+
+ /* Only require atomic with LOCK; non-parallel handled in generator. */
+ if (s->prefix & PREFIX_LOCK) {
+ tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_LEUQ);
+ } else {
+ tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
+ s->mem_index, MO_LEUQ);
+ }
+
+ /* Compute the required value of Z. */
+ tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
+ Z = tcg_temp_new();
+ tcg_gen_trunc_i64_tl(Z, cmp);
+
+ /*
+ * Extract the result values for the register pair.
+ * For 32-bit, we may do this unconditionally, because on success (Z=1),
+ * the old value matches the previous value in EDX:EAX. For x86_64,
+ * the store must be conditional, because we must leave the source
+ * registers unchanged on success, and zero-extend the writeback
+ * on failure (Z=0).
+ */
+ if (TARGET_LONG_BITS == 32) {
+ tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
+ } else {
+ TCGv zero = tcg_constant_tl(0);
+
+ tcg_gen_extr_i64_tl(s->T0, s->T1, old);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
+ s->T0, cpu_regs[R_EAX]);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
+ s->T1, cpu_regs[R_EDX]);
+ }
+
+ /* Update Z. */
+ gen_compute_eflags(s);
+ tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
+}
+
static void gen_CPUID(DisasContext *s, X86DecodedInsn *decode)
{
gen_update_cc_op(s);
@@ -1647,9 +1906,10 @@ static void gen_CPUID(DisasContext *s, X86DecodedInsn *decode)
static void gen_CRC32(DisasContext *s, X86DecodedInsn *decode)
{
MemOp ot = decode->op[2].ot;
+ TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- gen_helper_crc32(s->T0, s->tmp2_i32, s->T1, tcg_constant_i32(8 << ot));
+ tcg_gen_trunc_tl_i32(tmp, s->T0);
+ gen_helper_crc32(s->T0, tmp, s->T1, tcg_constant_i32(8 << ot));
}
static void gen_CVTPI2Px(DisasContext *s, X86DecodedInsn *decode)
@@ -1978,11 +2238,7 @@ static void gen_INS(DisasContext *s, X86DecodedInsn *decode)
}
translator_io_start(&s->base);
- if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
- gen_repz(s, ot, gen_ins);
- } else {
- gen_ins(s, ot);
- }
+ gen_repz(s, ot, gen_ins);
}
static void gen_INSERTQ_i(DisasContext *s, X86DecodedInsn *decode)
@@ -2037,8 +2293,11 @@ static void gen_IRET(DisasContext *s, X86DecodedInsn *decode)
static void gen_Jcc(DisasContext *s, X86DecodedInsn *decode)
{
+ TCGLabel *taken = gen_new_label();
+
gen_bnd_jmp(s);
- gen_jcc(s, decode->b & 0xf, decode->immediate);
+ gen_jcc(s, decode->b & 0xf, taken);
+ gen_conditional_jump_labels(s, decode->immediate, NULL, taken);
}
static void gen_JCXZ(DisasContext *s, X86DecodedInsn *decode)
@@ -2108,8 +2367,10 @@ static void gen_LAR(DisasContext *s, X86DecodedInsn *decode)
static void gen_LDMXCSR(DisasContext *s, X86DecodedInsn *decode)
{
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(tmp, s->T0);
+ gen_helper_ldmxcsr(tcg_env, tmp);
}
static void gen_lxx_seg(DisasContext *s, X86DecodedInsn *decode, int seg)
@@ -2121,7 +2382,7 @@ static void gen_lxx_seg(DisasContext *s, X86DecodedInsn *decode, int seg)
gen_op_ld_v(s, MO_16, s->T1, s->A0);
/* load the segment here to handle exceptions properly */
- gen_movl_seg(s, seg, s->T1);
+ gen_movl_seg(s, seg, s->T1, false);
}
static void gen_LDS(DisasContext *s, X86DecodedInsn *decode)
@@ -2163,11 +2424,7 @@ static void gen_LGS(DisasContext *s, X86DecodedInsn *decode)
static void gen_LODS(DisasContext *s, X86DecodedInsn *decode)
{
MemOp ot = decode->op[1].ot;
- if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
- gen_repz(s, ot, gen_lods);
- } else {
- gen_lods(s, ot);
- }
+ gen_repz(s, ot, gen_lods);
}
static void gen_LOOP(DisasContext *s, X86DecodedInsn *decode)
@@ -2188,7 +2445,7 @@ static void gen_LOOPE(DisasContext *s, X86DecodedInsn *decode)
gen_update_cc_op(s);
gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
gen_op_jz_ecx(s, not_taken);
- gen_jcc1(s, (JCC_Z << 1), taken); /* jz taken */
+ gen_jcc(s, (JCC_Z << 1), taken); /* jz taken */
gen_conditional_jump_labels(s, decode->immediate, not_taken, taken);
}
@@ -2200,7 +2457,7 @@ static void gen_LOOPNE(DisasContext *s, X86DecodedInsn *decode)
gen_update_cc_op(s);
gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
gen_op_jz_ecx(s, not_taken);
- gen_jcc1(s, (JCC_Z << 1) | 1, taken); /* jnz taken */
+ gen_jcc(s, (JCC_Z << 1) | 1, taken); /* jnz taken */
gen_conditional_jump_labels(s, decode->immediate, not_taken, taken);
}
@@ -2326,11 +2583,13 @@ static void gen_MOVDQ(DisasContext *s, X86DecodedInsn *decode)
static void gen_MOVMSK(DisasContext *s, X86DecodedInsn *decode)
{
typeof(gen_helper_movmskps_ymm) *ps, *pd, *fn;
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
ps = s->vex_l ? gen_helper_movmskps_ymm : gen_helper_movmskps_xmm;
pd = s->vex_l ? gen_helper_movmskpd_ymm : gen_helper_movmskpd_xmm;
fn = s->prefix & PREFIX_DATA ? pd : ps;
- fn(s->tmp2_i32, tcg_env, OP_PTR2);
- tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
+ fn(tmp, tcg_env, OP_PTR2);
+ tcg_gen_extu_i32_tl(s->T0, tmp);
}
static void gen_MOVQ(DisasContext *s, X86DecodedInsn *decode)
@@ -2365,11 +2624,7 @@ static void gen_MOVq_dq(DisasContext *s, X86DecodedInsn *decode)
static void gen_MOVS(DisasContext *s, X86DecodedInsn *decode)
{
MemOp ot = decode->op[2].ot;
- if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
- gen_repz(s, ot, gen_movs);
- } else {
- gen_movs(s, ot);
- }
+ gen_repz(s, ot, gen_movs);
}
static void gen_MUL(DisasContext *s, X86DecodedInsn *decode)
@@ -2431,13 +2686,17 @@ static void gen_MULX(DisasContext *s, X86DecodedInsn *decode)
switch (ot) {
case MO_32:
#ifdef TARGET_X86_64
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
- tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
- s->tmp2_i32, s->tmp3_i32);
- tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32);
- tcg_gen_extu_i32_tl(s->T0, s->tmp3_i32);
- break;
+ {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, s->T0);
+ tcg_gen_trunc_tl_i32(t1, s->T1);
+ tcg_gen_mulu2_i32(t0, t1, t0, t1);
+ tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], t0);
+ tcg_gen_extu_i32_tl(s->T0, t1);
+ break;
+ }
case MO_64:
#endif
@@ -2531,11 +2790,7 @@ static void gen_OUTS(DisasContext *s, X86DecodedInsn *decode)
}
translator_io_start(&s->base);
- if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
- gen_repz(s, ot, gen_outs);
- } else {
- gen_outs(s, ot);
- }
+ gen_repz(s, ot, gen_outs);
}
static void gen_PALIGNR(DisasContext *s, X86DecodedInsn *decode)
@@ -2758,7 +3013,7 @@ static void gen_PMOVMSKB(DisasContext *s, X86DecodedInsn *decode)
tcg_gen_ld8u_tl(s->T0, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1)));
while (vec_len > 8) {
vec_len -= 8;
- if (TCG_TARGET_HAS_extract2_tl) {
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_TL, 0)) {
/*
* Load the next byte of the result into the high byte of T.
* TCG does a similar expansion of deposit to shl+extract2; by
@@ -3107,7 +3362,8 @@ static bool gen_eflags_adcox(DisasContext *s, X86DecodedInsn *decode, bool want_
* bit, we might as well fish CF out of EFLAGS and save a shift.
*/
if (want_carry && (!need_flags || s->cc_op == CC_OP_SHLB + MO_TL)) {
- tcg_gen_shri_tl(decode->cc_dst, cpu_cc_src, (8 << (s->cc_op - CC_OP_SHLB)) - 1);
+ MemOp size = cc_op_size(s->cc_op);
+ tcg_gen_shri_tl(decode->cc_dst, cpu_cc_src, (8 << size) - 1);
got_cf = true;
}
gen_mov_eflags(s, decode->cc_src);
@@ -3211,7 +3467,7 @@ static void gen_RCL(DisasContext *s, X86DecodedInsn *decode)
}
/* Compute high part, including incoming carry. */
- if (!have_1bit_cin || TCG_TARGET_deposit_tl_valid(1, TARGET_LONG_BITS - 1)) {
+ if (!have_1bit_cin || tcg_op_deposit_valid(TCG_TYPE_TL, 1, TARGET_LONG_BITS - 1)) {
/* high = (T0 << 1) | cin */
TCGv cin = have_1bit_cin ? decode->cc_dst : decode->cc_src;
tcg_gen_deposit_tl(high, cin, s->T0, 1, TARGET_LONG_BITS - 1);
@@ -3263,7 +3519,7 @@ static void gen_RCR(DisasContext *s, X86DecodedInsn *decode)
}
/* Save incoming carry into high, it will be shifted later. */
- if (!have_1bit_cin || TCG_TARGET_deposit_tl_valid(1, TARGET_LONG_BITS - 1)) {
+ if (!have_1bit_cin || tcg_op_deposit_valid(TCG_TYPE_TL, 1, TARGET_LONG_BITS - 1)) {
TCGv cin = have_1bit_cin ? decode->cc_dst : decode->cc_src;
tcg_gen_deposit_tl(high, cin, s->T0, 1, TARGET_LONG_BITS - 1);
} else {
@@ -3484,10 +3740,14 @@ static void gen_RORX(DisasContext *s, X86DecodedInsn *decode)
switch (ot) {
case MO_32:
#ifdef TARGET_X86_64
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b);
- tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
- break;
+ {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(tmp, s->T0);
+ tcg_gen_rotri_i32(tmp, tmp, b);
+ tcg_gen_extu_i32_tl(s->T0, tmp);
+ break;
+ }
case MO_64:
#endif
@@ -3537,13 +3797,13 @@ static void gen_shift_dynamic_flags(DisasContext *s, X86DecodedInsn *decode, TCG
decode->cc_op_dynamic = tcg_temp_new_i32();
assert(decode->cc_dst == s->T0);
- if (cc_op_live[s->cc_op] & USES_CC_DST) {
+ if (cc_op_live(s->cc_op) & USES_CC_DST) {
decode->cc_dst = tcg_temp_new();
tcg_gen_movcond_tl(TCG_COND_EQ, decode->cc_dst, count, tcg_constant_tl(0),
cpu_cc_dst, s->T0);
}
- if (cc_op_live[s->cc_op] & USES_CC_SRC) {
+ if (cc_op_live(s->cc_op) & USES_CC_SRC) {
tcg_gen_movcond_tl(TCG_COND_EQ, decode->cc_src, count, tcg_constant_tl(0),
cpu_cc_src, decode->cc_src);
}
@@ -3590,22 +3850,64 @@ static void gen_SARX(DisasContext *s, X86DecodedInsn *decode)
tcg_gen_sar_tl(s->T0, s->T0, s->T1);
}
+static void gen_SUB(DisasContext *s, X86DecodedInsn *decode);
static void gen_SBB(DisasContext *s, X86DecodedInsn *decode)
{
MemOp ot = decode->op[0].ot;
- TCGv c_in = tcg_temp_new();
+ TCGv c_in;
+
+ /*
+ * Try to avoid CC_OP_SBB by transforming as follows:
+ * CC_SBB: src1 = dst + c_in, src2 = 0, src3 = c_in
+ * CC_SUB: src1 = dst + c_in, src2 = c_in (no src3)
+ *
+ * In general src2 vs. src3 matters when computing AF and OF, but not here:
+ * - AF is bit 4 of dst^src1^src2, which is bit 4 of dst^src1 in both cases
+ * - OF is a function of the two MSBs, and in both cases they are zero for src2
+ */
+ if (decode->e.op2 == X86_TYPE_I && decode->immediate == 0) {
+ gen_compute_eflags_c(s, s->T1);
+ gen_SUB(s, decode);
+ return;
+ }
+ c_in = tcg_temp_new();
gen_compute_eflags_c(s, c_in);
+
+ /*
+ * Here the change is as follows:
+ * CC_SBB: src1 = T0, src2 = T0, src3 = c_in
+ * CC_SUB: src1 = 0, src2 = c_in (no src3)
+ *
+ * The difference also does not matter:
+ * - AF is bit 4 of dst^src1^src2, but bit 4 of src1^src2 is zero in both cases
+ * therefore AF comes straight from dst (in fact it is c_in)
+ * - for OF, src1 and src2 have the same sign in both cases, meaning there
+ * can be no overflow
+ */
+ if (decode->e.op2 != X86_TYPE_I && !decode->op[0].has_ea && decode->op[0].n == decode->op[2].n) {
+ if (s->cc_op == CC_OP_DYNAMIC) {
+ tcg_gen_neg_tl(s->T0, c_in);
+ } else {
+ /*
+ * Do not negate c_in because it will often be dead and only the
+ * instruction generated by negsetcond will survive.
+ */
+ gen_neg_setcc(s, JCC_B << 1, s->T0);
+ }
+ tcg_gen_movi_tl(s->cc_srcT, 0);
+ decode->cc_src = c_in;
+ decode->cc_dst = s->T0;
+ decode->cc_op = CC_OP_SUBB + ot;
+ return;
+ }
+
if (s->prefix & PREFIX_LOCK) {
tcg_gen_add_tl(s->T0, s->T1, c_in);
tcg_gen_neg_tl(s->T0, s->T0);
tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T0,
s->mem_index, ot | MO_LE);
} else {
- /*
- * TODO: SBB reg, reg could use gen_prepare_eflags_c followed by
- * negsetcond, and CC_OP_SUBB as the cc_op.
- */
tcg_gen_sub_tl(s->T0, s->T0, s->T1);
tcg_gen_sub_tl(s->T0, s->T0, c_in);
}
@@ -3615,16 +3917,12 @@ static void gen_SBB(DisasContext *s, X86DecodedInsn *decode)
static void gen_SCAS(DisasContext *s, X86DecodedInsn *decode)
{
MemOp ot = decode->op[2].ot;
- if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
- gen_repz_nz(s, ot, gen_scas);
- } else {
- gen_scas(s, ot);
- }
+ gen_repz_nz(s, ot, gen_scas);
}
static void gen_SETcc(DisasContext *s, X86DecodedInsn *decode)
{
- gen_setcc1(s, decode->b & 0xf, s->T0);
+ gen_setcc(s, decode->b & 0xf, s->T0);
}
static void gen_SFENCE(DisasContext *s, X86DecodedInsn *decode)
@@ -3720,8 +4018,7 @@ static void gen_SHLD(DisasContext *s, X86DecodedInsn *decode)
}
decode->cc_dst = s->T0;
- decode->cc_src = s->tmp0;
- gen_shiftd_rm_T1(s, ot, false, count);
+ decode->cc_src = gen_shiftd_rm_T1(s, ot, false, count);
if (can_be_zero) {
gen_shift_dynamic_flags(s, decode, count, CC_OP_SHLB + ot);
} else {
@@ -3773,8 +4070,7 @@ static void gen_SHRD(DisasContext *s, X86DecodedInsn *decode)
}
decode->cc_dst = s->T0;
- decode->cc_src = s->tmp0;
- gen_shiftd_rm_T1(s, ot, true, count);
+ decode->cc_src = gen_shiftd_rm_T1(s, ot, true, count);
if (can_be_zero) {
gen_shift_dynamic_flags(s, decode, count, CC_OP_SARB + ot);
} else {
@@ -3825,11 +4121,7 @@ static void gen_STMXCSR(DisasContext *s, X86DecodedInsn *decode)
static void gen_STOS(DisasContext *s, X86DecodedInsn *decode)
{
MemOp ot = decode->op[1].ot;
- if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
- gen_repz(s, ot, gen_stos);
- } else {
- gen_stos(s, ot);
- }
+ gen_repz(s, ot, gen_stos);
}
static void gen_SUB(DisasContext *s, X86DecodedInsn *decode)
@@ -4045,7 +4337,7 @@ static void gen_VCVTSI2Sx(DisasContext *s, X86DecodedInsn *decode)
}
return;
}
- in = s->tmp2_i32;
+ in = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(in, s->T1);
#else
in = s->T1;
@@ -4075,7 +4367,7 @@ static inline void gen_VCVTtSx2SI(DisasContext *s, X86DecodedInsn *decode,
return;
}
- out = s->tmp2_i32;
+ out = tcg_temp_new_i32();
#else
out = s->T0;
#endif
@@ -4127,7 +4419,7 @@ static void gen_VEXTRACTPS(DisasContext *s, X86DecodedInsn *decode)
gen_pextr(s, decode, MO_32);
}
-static void gen_vinsertps(DisasContext *s, X86DecodedInsn *decode)
+static void gen_vinsertps(DisasContext *s, X86DecodedInsn *decode, TCGv_i32 tmp)
{
int val = decode->immediate;
int dest_word = (val >> 4) & 3;
@@ -4144,7 +4436,7 @@ static void gen_vinsertps(DisasContext *s, X86DecodedInsn *decode)
}
if (new_mask != (val & 15)) {
- tcg_gen_st_i32(s->tmp2_i32, tcg_env,
+ tcg_gen_st_i32(tmp, tcg_env,
vector_elem_offset(&decode->op[0], MO_32, dest_word));
}
@@ -4163,15 +4455,19 @@ static void gen_vinsertps(DisasContext *s, X86DecodedInsn *decode)
static void gen_VINSERTPS_r(DisasContext *s, X86DecodedInsn *decode)
{
int val = decode->immediate;
- tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_ld_i32(tmp, tcg_env,
vector_elem_offset(&decode->op[2], MO_32, (val >> 6) & 3));
- gen_vinsertps(s, decode);
+ gen_vinsertps(s, decode, tmp);
}
static void gen_VINSERTPS_m(DisasContext *s, X86DecodedInsn *decode)
{
- tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
- gen_vinsertps(s, decode);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_qemu_ld_i32(tmp, s->A0, s->mem_index, MO_LEUL);
+ gen_vinsertps(s, decode, tmp);
}
static void gen_VINSERTx128(DisasContext *s, X86DecodedInsn *decode)
@@ -4292,25 +4588,29 @@ static void gen_VMOVSD_ld(DisasContext *s, X86DecodedInsn *decode)
static void gen_VMOVSS(DisasContext *s, X86DecodedInsn *decode)
{
int vec_len = vector_len(s, decode);
+ TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_ld_i32(s->tmp2_i32, OP_PTR2, offsetof(ZMMReg, ZMM_L(0)));
+ tcg_gen_ld_i32(tmp, OP_PTR2, offsetof(ZMMReg, ZMM_L(0)));
tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len);
- tcg_gen_st_i32(s->tmp2_i32, OP_PTR0, offsetof(ZMMReg, ZMM_L(0)));
+ tcg_gen_st_i32(tmp, OP_PTR0, offsetof(ZMMReg, ZMM_L(0)));
}
static void gen_VMOVSS_ld(DisasContext *s, X86DecodedInsn *decode)
{
int vec_len = vector_len(s, decode);
+ TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
+ tcg_gen_qemu_ld_i32(tmp, s->A0, s->mem_index, MO_LEUL);
tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
- tcg_gen_st_i32(s->tmp2_i32, OP_PTR0, offsetof(ZMMReg, ZMM_L(0)));
+ tcg_gen_st_i32(tmp, OP_PTR0, offsetof(ZMMReg, ZMM_L(0)));
}
static void gen_VMOVSS_st(DisasContext *s, X86DecodedInsn *decode)
{
- tcg_gen_ld_i32(s->tmp2_i32, OP_PTR2, offsetof(ZMMReg, ZMM_L(0)));
- tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_ld_i32(tmp, OP_PTR2, offsetof(ZMMReg, ZMM_L(0)));
+ tcg_gen_qemu_st_i32(tmp, s->A0, s->mem_index, MO_LEUL);
}
static void gen_VPMASKMOV_st(DisasContext *s, X86DecodedInsn *decode)
@@ -4477,7 +4777,8 @@ static void gen_XOR(DisasContext *s, X86DecodedInsn *decode)
decode->op[2].unit == X86_OP_INT &&
decode->op[1].n == decode->op[2].n) {
tcg_gen_movi_tl(s->T0, 0);
- decode->cc_op = CC_OP_CLR;
+ decode->cc_op = CC_OP_EFLAGS;
+ decode->cc_src = tcg_constant_tl(CC_Z | CC_P);
} else {
MemOp ot = decode->op[1].ot;
diff --git a/target/i386/tcg/excp_helper.c b/target/i386/tcg/excp_helper.c
index 72387aa..6fb8036 100644
--- a/target/i386/tcg/excp_helper.c
+++ b/target/i386/tcg/excp_helper.c
@@ -19,9 +19,8 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "qemu/log.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "exec/helper-proto.h"
#include "helper-tcg.h"
diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c
index e1b850f..b3b2382 100644
--- a/target/i386/tcg/fpu_helper.c
+++ b/target/i386/tcg/fpu_helper.c
@@ -21,8 +21,8 @@
#include <math.h>
#include "cpu.h"
#include "tcg-cpu.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "exec/cputlb.h"
+#include "accel/tcg/cpu-ldst.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
#include "fpu/softfloat-macros.h"
@@ -135,16 +135,79 @@ static void fpu_set_exception(CPUX86State *env, int mask)
}
}
-static inline uint8_t save_exception_flags(CPUX86State *env)
+void cpu_init_fp_statuses(CPUX86State *env)
{
- uint8_t old_flags = get_float_exception_flags(&env->fp_status);
+ /*
+ * Initialise the non-runtime-varying fields of the various
+ * float_status words to x86 behaviour. This must be called at
+ * CPU reset because the float_status words are in the
+ * "zeroed on reset" portion of the CPU state struct.
+ * Fields in float_status that vary under guest control are set
+ * via the codepath for setting that register, eg cpu_set_fpuc().
+ */
+ /*
+ * Use x87 NaN propagation rules:
+ * SNaN + QNaN => return the QNaN
+ * two SNaNs => return the one with the larger significand, silenced
+ * two QNaNs => return the one with the larger significand
+ * SNaN and a non-NaN => return the SNaN, silenced
+ * QNaN and a non-NaN => return the QNaN
+ *
+ * If we get down to comparing significands and they are the same,
+ * return the NaN with the positive sign bit (if any).
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status);
+ /*
+ * TODO: These are incorrect: the x86 Software Developer's Manual vol 1
+ * section 4.8.3.5 "Operating on SNaNs and QNaNs" says that the
+ * "larger significand" behaviour is only used for x87 FPU operations.
+ * For SSE the required behaviour is to always return the first NaN,
+ * which is float_2nan_prop_ab.
+ *
+ * mmx_status is used only for the AMD 3DNow! instructions, which
+ * are documented in the "3DNow! Technology Manual" as not supporting
+ * NaNs or infinities as inputs. The result of passing two NaNs is
+ * documented as "undefined", so we can do what we choose.
+ * (Strictly there is some behaviour we don't implement correctly
+ * for these "unsupported" NaN and Inf values, like "NaN * 0 == 0".)
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_x87, &env->mmx_status);
+ set_float_2nan_prop_rule(float_2nan_prop_x87, &env->sse_status);
+ /*
+ * Only SSE has multiply-add instructions. In the SDM Section 14.5.2
+ * "Fused-Multiply-ADD (FMA) Numeric Behavior" the NaN handling is
+ * specified -- for 0 * inf + NaN the input NaN is selected, and if
+ * there are multiple input NaNs they are selected in the order a, b, c.
+ * We also do not raise Invalid for the 0 * inf + (Q)NaN case.
+ */
+ set_float_infzeronan_rule(float_infzeronan_dnan_never |
+ float_infzeronan_suppress_invalid,
+ &env->sse_status);
+ set_float_3nan_prop_rule(float_3nan_prop_abc, &env->sse_status);
+ /* Default NaN: sign bit set, most significant frac bit set */
+ set_float_default_nan_pattern(0b11000000, &env->fp_status);
+ set_float_default_nan_pattern(0b11000000, &env->mmx_status);
+ set_float_default_nan_pattern(0b11000000, &env->sse_status);
+ /*
+ * x86 does flush-to-zero detection after rounding (the SDM
+ * section 10.2.3.3 on the FTZ bit of MXCSR says that we flush
+ * when we detect underflow, which x86 does after rounding).
+ */
+ set_float_ftz_detection(float_ftz_after_rounding, &env->fp_status);
+ set_float_ftz_detection(float_ftz_after_rounding, &env->mmx_status);
+ set_float_ftz_detection(float_ftz_after_rounding, &env->sse_status);
+}
+
+static inline int save_exception_flags(CPUX86State *env)
+{
+ int old_flags = get_float_exception_flags(&env->fp_status);
set_float_exception_flags(0, &env->fp_status);
return old_flags;
}
-static void merge_exception_flags(CPUX86State *env, uint8_t old_flags)
+static void merge_exception_flags(CPUX86State *env, int old_flags)
{
- uint8_t new_flags = get_float_exception_flags(&env->fp_status);
+ int new_flags = get_float_exception_flags(&env->fp_status);
float_raise(old_flags, &env->fp_status);
fpu_set_exception(env,
((new_flags & float_flag_invalid ? FPUS_IE : 0) |
@@ -152,12 +215,12 @@ static void merge_exception_flags(CPUX86State *env, uint8_t old_flags)
(new_flags & float_flag_overflow ? FPUS_OE : 0) |
(new_flags & float_flag_underflow ? FPUS_UE : 0) |
(new_flags & float_flag_inexact ? FPUS_PE : 0) |
- (new_flags & float_flag_input_denormal ? FPUS_DE : 0)));
+ (new_flags & float_flag_input_denormal_used ? FPUS_DE : 0)));
}
static inline floatx80 helper_fdiv(CPUX86State *env, floatx80 a, floatx80 b)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
floatx80 ret = floatx80_div(a, b, &env->fp_status);
merge_exception_flags(env, old_flags);
return ret;
@@ -177,7 +240,7 @@ static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr)
void helper_flds_FT0(CPUX86State *env, uint32_t val)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
union {
float32 f;
uint32_t i;
@@ -190,7 +253,7 @@ void helper_flds_FT0(CPUX86State *env, uint32_t val)
void helper_fldl_FT0(CPUX86State *env, uint64_t val)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
union {
float64 f;
uint64_t i;
@@ -208,7 +271,7 @@ void helper_fildl_FT0(CPUX86State *env, int32_t val)
void helper_flds_ST0(CPUX86State *env, uint32_t val)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int new_fpstt;
union {
float32 f;
@@ -225,7 +288,7 @@ void helper_flds_ST0(CPUX86State *env, uint32_t val)
void helper_fldl_ST0(CPUX86State *env, uint64_t val)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int new_fpstt;
union {
float64 f;
@@ -275,7 +338,7 @@ void helper_fildll_ST0(CPUX86State *env, int64_t val)
uint32_t helper_fsts_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
union {
float32 f;
uint32_t i;
@@ -288,7 +351,7 @@ uint32_t helper_fsts_ST0(CPUX86State *env)
uint64_t helper_fstl_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
union {
float64 f;
uint64_t i;
@@ -301,7 +364,7 @@ uint64_t helper_fstl_ST0(CPUX86State *env)
int32_t helper_fist_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int32_t val;
val = floatx80_to_int32(ST0, &env->fp_status);
@@ -315,7 +378,7 @@ int32_t helper_fist_ST0(CPUX86State *env)
int32_t helper_fistl_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int32_t val;
val = floatx80_to_int32(ST0, &env->fp_status);
@@ -328,7 +391,7 @@ int32_t helper_fistl_ST0(CPUX86State *env)
int64_t helper_fistll_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int64_t val;
val = floatx80_to_int64(ST0, &env->fp_status);
@@ -341,7 +404,7 @@ int64_t helper_fistll_ST0(CPUX86State *env)
int32_t helper_fistt_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int32_t val;
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
@@ -355,7 +418,7 @@ int32_t helper_fistt_ST0(CPUX86State *env)
int32_t helper_fisttl_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int32_t val;
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
@@ -368,7 +431,7 @@ int32_t helper_fisttl_ST0(CPUX86State *env)
int64_t helper_fisttll_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int64_t val;
val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
@@ -464,7 +527,7 @@ static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
void helper_fcom_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
FloatRelation ret;
ret = floatx80_compare(ST0, FT0, &env->fp_status);
@@ -474,7 +537,7 @@ void helper_fcom_ST0_FT0(CPUX86State *env)
void helper_fucom_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
FloatRelation ret;
ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
@@ -486,7 +549,7 @@ static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
void helper_fcomi_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int eflags;
FloatRelation ret;
@@ -499,7 +562,7 @@ void helper_fcomi_ST0_FT0(CPUX86State *env)
void helper_fucomi_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int eflags;
FloatRelation ret;
@@ -512,28 +575,28 @@ void helper_fucomi_ST0_FT0(CPUX86State *env)
void helper_fadd_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST0 = floatx80_add(ST0, FT0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fmul_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fsub_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fsubr_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
@@ -552,28 +615,28 @@ void helper_fdivr_ST0_FT0(CPUX86State *env)
void helper_fadd_STN_ST0(CPUX86State *env, int st_index)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fmul_STN_ST0(CPUX86State *env, int st_index)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fsub_STN_ST0(CPUX86State *env, int st_index)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fsubr_STN_ST0(CPUX86State *env, int st_index)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
merge_exception_flags(env, old_flags);
}
@@ -798,7 +861,7 @@ void helper_fbld_ST0(CPUX86State *env, target_ulong ptr)
void helper_fbst_ST0(CPUX86State *env, target_ulong ptr)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int v;
target_ulong mem_ref, mem_end;
int64_t val;
@@ -1073,12 +1136,12 @@ static const struct f2xm1_data f2xm1_table[65] = {
void helper_f2xm1(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
uint64_t sig = extractFloatx80Frac(ST0);
int32_t exp = extractFloatx80Exp(ST0);
bool sign = extractFloatx80Sign(ST0);
- if (floatx80_invalid_encoding(ST0)) {
+ if (floatx80_invalid_encoding(ST0, &env->fp_status)) {
float_raise(float_flag_invalid, &env->fp_status);
ST0 = floatx80_default_nan(&env->fp_status);
} else if (floatx80_is_any_nan(ST0)) {
@@ -1306,7 +1369,7 @@ static const struct fpatan_data fpatan_table[9] = {
void helper_fpatan(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
uint64_t arg0_sig = extractFloatx80Frac(ST0);
int32_t arg0_exp = extractFloatx80Exp(ST0);
bool arg0_sign = extractFloatx80Sign(ST0);
@@ -1320,8 +1383,8 @@ void helper_fpatan(CPUX86State *env)
} else if (floatx80_is_signaling_nan(ST1, &env->fp_status)) {
float_raise(float_flag_invalid, &env->fp_status);
ST1 = floatx80_silence_nan(ST1, &env->fp_status);
- } else if (floatx80_invalid_encoding(ST0) ||
- floatx80_invalid_encoding(ST1)) {
+ } else if (floatx80_invalid_encoding(ST0, &env->fp_status) ||
+ floatx80_invalid_encoding(ST1, &env->fp_status)) {
float_raise(float_flag_invalid, &env->fp_status);
ST1 = floatx80_default_nan(&env->fp_status);
} else if (floatx80_is_any_nan(ST0)) {
@@ -1330,7 +1393,8 @@ void helper_fpatan(CPUX86State *env)
/* Pass this NaN through. */
} else if (floatx80_is_zero(ST1) && !arg0_sign) {
/* Pass this zero through. */
- } else if (((floatx80_is_infinity(ST0) && !floatx80_is_infinity(ST1)) ||
+ } else if (((floatx80_is_infinity(ST0, &env->fp_status) &&
+ !floatx80_is_infinity(ST1, &env->fp_status)) ||
arg0_exp - arg1_exp >= 80) &&
!arg0_sign) {
/*
@@ -1379,8 +1443,8 @@ void helper_fpatan(CPUX86State *env)
rexp = pi_exp;
rsig0 = pi_sig_high;
rsig1 = pi_sig_low;
- } else if (floatx80_is_infinity(ST1)) {
- if (floatx80_is_infinity(ST0)) {
+ } else if (floatx80_is_infinity(ST1, &env->fp_status)) {
+ if (floatx80_is_infinity(ST0, &env->fp_status)) {
if (arg0_sign) {
rexp = pi_34_exp;
rsig0 = pi_34_sig_high;
@@ -1399,7 +1463,8 @@ void helper_fpatan(CPUX86State *env)
rexp = pi_2_exp;
rsig0 = pi_2_sig_high;
rsig1 = pi_2_sig_low;
- } else if (floatx80_is_infinity(ST0) || arg0_exp - arg1_exp >= 80) {
+ } else if (floatx80_is_infinity(ST0, &env->fp_status) ||
+ arg0_exp - arg1_exp >= 80) {
/* ST0 is negative. */
rexp = pi_exp;
rsig0 = pi_sig_high;
@@ -1743,7 +1808,7 @@ void helper_fpatan(CPUX86State *env)
void helper_fxtract(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
CPU_LDoubleU temp;
temp.d = ST0;
@@ -1754,7 +1819,7 @@ void helper_fxtract(CPUX86State *env)
&env->fp_status);
fpush(env);
ST0 = temp.d;
- } else if (floatx80_invalid_encoding(ST0)) {
+ } else if (floatx80_invalid_encoding(ST0, &env->fp_status)) {
float_raise(float_flag_invalid, &env->fp_status);
ST0 = floatx80_default_nan(&env->fp_status);
fpush(env);
@@ -1766,10 +1831,10 @@ void helper_fxtract(CPUX86State *env)
}
fpush(env);
ST0 = ST1;
- } else if (floatx80_is_infinity(ST0)) {
+ } else if (floatx80_is_infinity(ST0, &env->fp_status)) {
fpush(env);
ST0 = ST1;
- ST1 = floatx80_infinity;
+ ST1 = floatx80_default_inf(0, &env->fp_status);
} else {
int expdif;
@@ -1777,7 +1842,7 @@ void helper_fxtract(CPUX86State *env)
int shift = clz64(temp.l.lower);
temp.l.lower <<= shift;
expdif = 1 - EXPBIAS - shift;
- float_raise(float_flag_input_denormal, &env->fp_status);
+ float_raise(float_flag_input_denormal_flushed, &env->fp_status);
} else {
expdif = EXPD(temp) - EXPBIAS;
}
@@ -1792,7 +1857,7 @@ void helper_fxtract(CPUX86State *env)
static void helper_fprem_common(CPUX86State *env, bool mod)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
uint64_t quotient;
CPU_LDoubleU temp0, temp1;
int exp0, exp1, expdiff;
@@ -1805,7 +1870,8 @@ static void helper_fprem_common(CPUX86State *env, bool mod)
env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
if (floatx80_is_zero(ST0) || floatx80_is_zero(ST1) ||
exp0 == 0x7fff || exp1 == 0x7fff ||
- floatx80_invalid_encoding(ST0) || floatx80_invalid_encoding(ST1)) {
+ floatx80_invalid_encoding(ST0, &env->fp_status) ||
+ floatx80_invalid_encoding(ST1, &env->fp_status)) {
ST0 = floatx80_modrem(ST0, ST1, mod, &quotient, &env->fp_status);
} else {
if (exp0 == 0) {
@@ -1987,7 +2053,7 @@ static void helper_fyl2x_common(CPUX86State *env, floatx80 arg, int32_t *exp,
void helper_fyl2xp1(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
uint64_t arg0_sig = extractFloatx80Frac(ST0);
int32_t arg0_exp = extractFloatx80Exp(ST0);
bool arg0_sign = extractFloatx80Sign(ST0);
@@ -2001,8 +2067,8 @@ void helper_fyl2xp1(CPUX86State *env)
} else if (floatx80_is_signaling_nan(ST1, &env->fp_status)) {
float_raise(float_flag_invalid, &env->fp_status);
ST1 = floatx80_silence_nan(ST1, &env->fp_status);
- } else if (floatx80_invalid_encoding(ST0) ||
- floatx80_invalid_encoding(ST1)) {
+ } else if (floatx80_invalid_encoding(ST0, &env->fp_status) ||
+ floatx80_invalid_encoding(ST1, &env->fp_status)) {
float_raise(float_flag_invalid, &env->fp_status);
ST1 = floatx80_default_nan(&env->fp_status);
} else if (floatx80_is_any_nan(ST0)) {
@@ -2085,7 +2151,7 @@ void helper_fyl2xp1(CPUX86State *env)
void helper_fyl2x(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
uint64_t arg0_sig = extractFloatx80Frac(ST0);
int32_t arg0_exp = extractFloatx80Exp(ST0);
bool arg0_sign = extractFloatx80Sign(ST0);
@@ -2099,8 +2165,8 @@ void helper_fyl2x(CPUX86State *env)
} else if (floatx80_is_signaling_nan(ST1, &env->fp_status)) {
float_raise(float_flag_invalid, &env->fp_status);
ST1 = floatx80_silence_nan(ST1, &env->fp_status);
- } else if (floatx80_invalid_encoding(ST0) ||
- floatx80_invalid_encoding(ST1)) {
+ } else if (floatx80_invalid_encoding(ST0, &env->fp_status) ||
+ floatx80_invalid_encoding(ST1, &env->fp_status)) {
float_raise(float_flag_invalid, &env->fp_status);
ST1 = floatx80_default_nan(&env->fp_status);
} else if (floatx80_is_any_nan(ST0)) {
@@ -2110,7 +2176,7 @@ void helper_fyl2x(CPUX86State *env)
} else if (arg0_sign && !floatx80_is_zero(ST0)) {
float_raise(float_flag_invalid, &env->fp_status);
ST1 = floatx80_default_nan(&env->fp_status);
- } else if (floatx80_is_infinity(ST1)) {
+ } else if (floatx80_is_infinity(ST1, &env->fp_status)) {
FloatRelation cmp = floatx80_compare(ST0, floatx80_one,
&env->fp_status);
switch (cmp) {
@@ -2125,7 +2191,7 @@ void helper_fyl2x(CPUX86State *env)
ST1 = floatx80_default_nan(&env->fp_status);
break;
}
- } else if (floatx80_is_infinity(ST0)) {
+ } else if (floatx80_is_infinity(ST0, &env->fp_status)) {
if (floatx80_is_zero(ST1)) {
float_raise(float_flag_invalid, &env->fp_status);
ST1 = floatx80_default_nan(&env->fp_status);
@@ -2232,7 +2298,7 @@ void helper_fyl2x(CPUX86State *env)
void helper_fsqrt(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
if (floatx80_is_neg(ST0)) {
env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
env->fpus |= 0x400;
@@ -2258,15 +2324,16 @@ void helper_fsincos(CPUX86State *env)
void helper_frndint(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST0 = floatx80_round_to_int(ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fscale(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
- if (floatx80_invalid_encoding(ST1) || floatx80_invalid_encoding(ST0)) {
+ int old_flags = save_exception_flags(env);
+ if (floatx80_invalid_encoding(ST1, &env->fp_status) ||
+ floatx80_invalid_encoding(ST0, &env->fp_status)) {
float_raise(float_flag_invalid, &env->fp_status);
ST0 = floatx80_default_nan(&env->fp_status);
} else if (floatx80_is_any_nan(ST1)) {
@@ -2278,11 +2345,11 @@ void helper_fscale(CPUX86State *env)
float_raise(float_flag_invalid, &env->fp_status);
ST0 = floatx80_silence_nan(ST0, &env->fp_status);
}
- } else if (floatx80_is_infinity(ST1) &&
- !floatx80_invalid_encoding(ST0) &&
+ } else if (floatx80_is_infinity(ST1, &env->fp_status) &&
+ !floatx80_invalid_encoding(ST0, &env->fp_status) &&
!floatx80_is_any_nan(ST0)) {
if (floatx80_is_neg(ST1)) {
- if (floatx80_is_infinity(ST0)) {
+ if (floatx80_is_infinity(ST0, &env->fp_status)) {
float_raise(float_flag_invalid, &env->fp_status);
ST0 = floatx80_default_nan(&env->fp_status);
} else {
@@ -2295,15 +2362,14 @@ void helper_fscale(CPUX86State *env)
float_raise(float_flag_invalid, &env->fp_status);
ST0 = floatx80_default_nan(&env->fp_status);
} else {
- ST0 = (floatx80_is_neg(ST0) ?
- floatx80_chs(floatx80_infinity) :
- floatx80_infinity);
+ ST0 = floatx80_default_inf(floatx80_is_neg(ST0),
+ &env->fp_status);
}
}
} else {
int n;
FloatX80RoundPrec save = env->fp_status.floatx80_rounding_precision;
- uint8_t save_flags = get_float_exception_flags(&env->fp_status);
+ int save_flags = get_float_exception_flags(&env->fp_status);
set_float_exception_flags(0, &env->fp_status);
n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
set_float_exception_flags(save_flags, &env->fp_status);
@@ -3188,6 +3254,7 @@ void update_mxcsr_status(CPUX86State *env)
/* Set exception flags. */
set_float_exception_flags((mxcsr & FPUS_IE ? float_flag_invalid : 0) |
+ (mxcsr & FPUS_DE ? float_flag_input_denormal_used : 0) |
(mxcsr & FPUS_ZE ? float_flag_divbyzero : 0) |
(mxcsr & FPUS_OE ? float_flag_overflow : 0) |
(mxcsr & FPUS_UE ? float_flag_underflow : 0) |
@@ -3203,20 +3270,14 @@ void update_mxcsr_status(CPUX86State *env)
void update_mxcsr_from_sse_status(CPUX86State *env)
{
- uint8_t flags = get_float_exception_flags(&env->sse_status);
- /*
- * The MXCSR denormal flag has opposite semantics to
- * float_flag_input_denormal (the softfloat code sets that flag
- * only when flushing input denormals to zero, but SSE sets it
- * only when not flushing them to zero), so is not converted
- * here.
- */
+ int flags = get_float_exception_flags(&env->sse_status);
env->mxcsr |= ((flags & float_flag_invalid ? FPUS_IE : 0) |
+ (flags & float_flag_input_denormal_used ? FPUS_DE : 0) |
(flags & float_flag_divbyzero ? FPUS_ZE : 0) |
(flags & float_flag_overflow ? FPUS_OE : 0) |
(flags & float_flag_underflow ? FPUS_UE : 0) |
(flags & float_flag_inexact ? FPUS_PE : 0) |
- (flags & float_flag_output_denormal ? FPUS_UE | FPUS_PE :
+ (flags & float_flag_output_denormal_flushed ? FPUS_UE | FPUS_PE :
0));
}
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
index 15d6c6f..be011b0 100644
--- a/target/i386/tcg/helper-tcg.h
+++ b/target/i386/tcg/helper-tcg.h
@@ -20,7 +20,7 @@
#ifndef I386_HELPER_TCG_H
#define I386_HELPER_TCG_H
-#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
/* Maximum instruction code size */
#define TARGET_MAX_INSN_SIZE 16
@@ -58,6 +58,8 @@ static inline target_long lshift(target_long x, int n)
/* translate.c */
void tcg_x86_init(void);
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
/* excp_helper.c */
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
@@ -87,12 +89,15 @@ G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
#endif
/* cc_helper.c */
-extern const uint8_t parity_table[256];
+static inline unsigned int compute_pf(uint8_t x)
+{
+ return !parity8(x) * CC_P;
+}
/* misc_helper.c */
void cpu_load_eflags(CPUX86State *env, int eflags, int update_mask);
-/* sysemu/svm_helper.c */
+/* system/svm_helper.c */
#ifndef CONFIG_USER_ONLY
G_NORETURN void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code,
uint64_t exit_info_1, uintptr_t retaddr);
@@ -110,7 +115,7 @@ int exception_has_error_code(int intno);
/* smm_helper.c */
void do_smm_enter(X86CPU *cpu);
-/* sysemu/bpt_helper.c */
+/* system/bpt_helper.c */
bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update);
/*
diff --git a/target/i386/tcg/int_helper.c b/target/i386/tcg/int_helper.c
index e1f9240..46741d9 100644
--- a/target/i386/tcg/int_helper.c
+++ b/target/i386/tcg/int_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
#include "qapi/error.h"
@@ -237,7 +236,7 @@ void helper_daa(CPUX86State *env)
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al;
/* well, speed is not an issue here, so we compute the flags by hand */
eflags |= (al == 0) << 6; /* zf */
- eflags |= parity_table[al]; /* pf */
+ eflags |= compute_pf(al);
eflags |= (al & 0x80); /* sf */
CC_SRC = eflags;
CC_OP = CC_OP_EFLAGS;
@@ -269,7 +268,7 @@ void helper_das(CPUX86State *env)
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al;
/* well, speed is not an issue here, so we compute the flags by hand */
eflags |= (al == 0) << 6; /* zf */
- eflags |= parity_table[al]; /* pf */
+ eflags |= compute_pf(al);
eflags |= (al & 0x80); /* sf */
CC_SRC = eflags;
CC_OP = CC_OP_EFLAGS;
diff --git a/target/i386/tcg/mem_helper.c b/target/i386/tcg/mem_helper.c
index 3ef84e9..9e7c2d8 100644
--- a/target/i386/tcg/mem_helper.c
+++ b/target/i386/tcg/mem_helper.c
@@ -20,8 +20,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "qemu/int128.h"
#include "qemu/atomic128.h"
#include "tcg/tcg.h"
diff --git a/target/i386/tcg/meson.build b/target/i386/tcg/meson.build
index 1105b35..c57e661 100644
--- a/target/i386/tcg/meson.build
+++ b/target/i386/tcg/meson.build
@@ -12,5 +12,5 @@ i386_ss.add(when: 'CONFIG_TCG', if_true: files(
'tcg-cpu.c',
'translate.c'), if_false: files('tcg-stub.c'))
-subdir('sysemu')
+subdir('system')
subdir('user')
diff --git a/target/i386/tcg/misc_helper.c b/target/i386/tcg/misc_helper.c
index ed4cda8..2b5f092 100644
--- a/target/i386/tcg/misc_helper.c
+++ b/target/i386/tcg/misc_helper.c
@@ -21,7 +21,7 @@
#include "qemu/log.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "helper-tcg.h"
/*
diff --git a/target/i386/tcg/mpx_helper.c b/target/i386/tcg/mpx_helper.c
index 22423eed..fa8abcc 100644
--- a/target/i386/tcg/mpx_helper.c
+++ b/target/i386/tcg/mpx_helper.c
@@ -20,8 +20,8 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "exec/target_page.h"
#include "helper-tcg.h"
diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c
index aac092a..071f3fb 100644
--- a/target/i386/tcg/seg_helper.c
+++ b/target/i386/tcg/seg_helper.c
@@ -22,12 +22,13 @@
#include "cpu.h"
#include "qemu/log.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/log.h"
#include "helper-tcg.h"
#include "seg_helper.h"
#include "access.h"
+#include "tcg-cpu.h"
#ifdef TARGET_X86_64
#define SET_ESP(val, sp_mask) \
@@ -94,7 +95,7 @@ static uint32_t popl(StackAccess *sa)
int get_pg_mode(CPUX86State *env)
{
- int pg_mode = 0;
+ int pg_mode = PG_MODE_PG;
if (!(env->cr[0] & CR0_PG_MASK)) {
return 0;
}
@@ -128,6 +129,22 @@ int get_pg_mode(CPUX86State *env)
return pg_mode;
}
+static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl)
+{
+ int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1;
+ int mmu_index_base =
+ !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
+ (pl < 3 && (env->eflags & AC_MASK)
+ ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX);
+
+ return mmu_index_base + mmu_index_32;
+}
+
+int cpu_mmu_index_kernel(CPUX86State *env)
+{
+ return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK);
+}
+
/* return non zero if error */
static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
uint32_t *e2_ptr, int selector,
@@ -309,10 +326,10 @@ static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
#define SWITCH_TSS_IRET 1
#define SWITCH_TSS_CALL 2
-/* return 0 if switching to a 16-bit selector */
-static int switch_tss_ra(CPUX86State *env, int tss_selector,
- uint32_t e1, uint32_t e2, int source,
- uint32_t next_eip, uintptr_t retaddr)
+static void switch_tss_ra(CPUX86State *env, int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip, bool has_error_code,
+ uint32_t error_code, uintptr_t retaddr)
{
int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i;
target_ulong tss_base;
@@ -378,7 +395,7 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
/* X86Access avoids memory exceptions during the task switch */
mmu_index = cpu_mmu_index_kernel(env);
- access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max,
+ access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max + 1,
MMU_DATA_STORE, mmu_index, retaddr);
if (source == SWITCH_TSS_CALL) {
@@ -386,7 +403,8 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
probe_access(env, tss_base, 2, MMU_DATA_STORE,
mmu_index, retaddr);
}
- access_prepare_mmu(&new, env, tss_base, tss_limit,
+ /* While true tss_limit may be larger, we don't access the iopb here. */
+ access_prepare_mmu(&new, env, tss_base, tss_limit_max + 1,
MMU_DATA_LOAD, mmu_index, retaddr);
/* save the current state in the old TSS */
@@ -455,10 +473,6 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
new_segs[R_GS] = 0;
new_trap = 0;
}
- /* XXX: avoid a compiler warning, see
- http://support.amd.com/us/Processor_TechDocs/24593.pdf
- chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
- (void)new_trap;
/* clear busy bit (it is restartable) */
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
@@ -581,14 +595,43 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
}
#endif
- return type >> 3;
+
+ if (has_error_code) {
+ int cpl = env->hflags & HF_CPL_MASK;
+ StackAccess sa;
+
+ /* push the error code */
+ sa.env = env;
+ sa.ra = retaddr;
+ sa.mmu_index = x86_mmu_index_pl(env, cpl);
+ sa.sp = env->regs[R_ESP];
+ if (env->segs[R_SS].flags & DESC_B_MASK) {
+ sa.sp_mask = 0xffffffff;
+ } else {
+ sa.sp_mask = 0xffff;
+ }
+ sa.ss_base = env->segs[R_SS].base;
+ if (type & 8) {
+ pushl(&sa, error_code);
+ } else {
+ pushw(&sa, error_code);
+ }
+ SET_ESP(sa.sp, sa.sp_mask);
+ }
+
+ if (new_trap) {
+ env->dr[6] |= DR6_BT;
+ raise_exception_ra(env, EXCP01_DB, retaddr);
+ }
}
-static int switch_tss(CPUX86State *env, int tss_selector,
- uint32_t e1, uint32_t e2, int source,
- uint32_t next_eip)
+static void switch_tss(CPUX86State *env, int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip, bool has_error_code,
+ int error_code)
{
- return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
+ switch_tss_ra(env, tss_selector, e1, e2, source, next_eip,
+ has_error_code, error_code, 0);
}
static inline unsigned int get_sp_mask(unsigned int e2)
@@ -694,7 +737,6 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
sa.env = env;
sa.ra = 0;
- sa.mmu_index = cpu_mmu_index_kernel(env);
if (type == 5) {
/* task gate */
@@ -702,23 +744,8 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
if (!(e2 & DESC_P_MASK)) {
raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
}
- shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
- if (has_error_code) {
- /* push the error code */
- if (env->segs[R_SS].flags & DESC_B_MASK) {
- sa.sp_mask = 0xffffffff;
- } else {
- sa.sp_mask = 0xffff;
- }
- sa.sp = env->regs[R_ESP];
- sa.ss_base = env->segs[R_SS].base;
- if (shift) {
- pushl(&sa, error_code);
- } else {
- pushw(&sa, error_code);
- }
- SET_ESP(sa.sp, sa.sp_mask);
- }
+ switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip,
+ has_error_code, error_code);
return;
}
@@ -749,6 +776,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
if (e2 & DESC_C_MASK) {
dpl = cpl;
}
+ sa.mmu_index = x86_mmu_index_pl(env, dpl);
if (dpl < cpl) {
/* to inner privilege */
uint32_t esp;
@@ -926,7 +954,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
target_ulong ptr;
int type, dpl, selector, cpl, ist;
int has_error_code, new_stack;
- uint32_t e1, e2, e3, ss, eflags;
+ uint32_t e1, e2, e3, eflags;
target_ulong old_eip, offset;
bool set_rf;
StackAccess sa;
@@ -1000,14 +1028,13 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
sa.env = env;
sa.ra = 0;
- sa.mmu_index = cpu_mmu_index_kernel(env);
+ sa.mmu_index = x86_mmu_index_pl(env, dpl);
sa.sp_mask = -1;
sa.ss_base = 0;
if (dpl < cpl || ist != 0) {
/* to inner privilege */
new_stack = 1;
sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
- ss = 0;
} else {
/* to same privilege */
if (env->eflags & VM_MASK) {
@@ -1040,7 +1067,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
if (new_stack) {
- ss = 0 | dpl;
+ uint32_t ss = 0 | dpl; /* SS = NULL selector with RPL = new CPL */
cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
}
env->regs[R_ESP] = sa.sp;
@@ -1135,7 +1162,7 @@ static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
sa.sp = env->regs[R_ESP];
sa.sp_mask = 0xffff;
sa.ss_base = env->segs[R_SS].base;
- sa.mmu_index = cpu_mmu_index_kernel(env);
+ sa.mmu_index = x86_mmu_index_pl(env, 0);
if (is_int) {
old_eip = next_eip;
@@ -1514,7 +1541,8 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
if (dpl < cpl || dpl < rpl) {
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
- switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
+ switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip,
+ false, 0, GETPC());
break;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
@@ -1599,7 +1627,7 @@ void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
sa.sp = env->regs[R_ESP];
sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
sa.ss_base = env->segs[R_SS].base;
- sa.mmu_index = cpu_mmu_index_kernel(env);
+ sa.mmu_index = x86_mmu_index_pl(env, 0);
if (shift) {
pushl(&sa, env->segs[R_CS].selector);
@@ -1639,9 +1667,9 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
sa.env = env;
sa.ra = GETPC();
- sa.mmu_index = cpu_mmu_index_kernel(env);
if (e2 & DESC_S_MASK) {
+ /* "normal" far call, no stack switch possible */
if (!(e2 & DESC_CS_MASK)) {
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
@@ -1665,6 +1693,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
}
+ sa.mmu_index = x86_mmu_index_pl(env, cpl);
#ifdef TARGET_X86_64
/* XXX: check 16/32 bit cases in long mode */
if (shift == 2) {
@@ -1725,7 +1754,8 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
if (dpl < cpl || dpl < rpl) {
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
- switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
+ switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip,
+ false, 0, GETPC());
return;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
@@ -1792,6 +1822,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
/* to inner privilege */
+ sa.mmu_index = x86_mmu_index_pl(env, dpl);
#ifdef TARGET_X86_64
if (shift == 2) {
ss = dpl; /* SS = NULL selector with RPL = new CPL */
@@ -1870,6 +1901,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
new_stack = 1;
} else {
/* to same privilege */
+ sa.mmu_index = x86_mmu_index_pl(env, cpl);
sa.sp = env->regs[R_ESP];
sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
sa.ss_base = env->segs[R_SS].base;
@@ -2234,7 +2266,8 @@ void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
if (type != 3) {
raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
}
- switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
+ switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip,
+ false, 0, GETPC());
} else {
helper_ret_protected(env, shift, 1, 0, GETPC());
}
diff --git a/target/i386/tcg/seg_helper.h b/target/i386/tcg/seg_helper.h
index ebf1035..ea98e1a 100644
--- a/target/i386/tcg/seg_helper.h
+++ b/target/i386/tcg/seg_helper.h
@@ -20,6 +20,8 @@
#ifndef SEG_HELPER_H
#define SEG_HELPER_H
+#include "cpu.h"
+
//#define DEBUG_PCALL
#ifdef DEBUG_PCALL
@@ -31,12 +33,12 @@
# define LOG_PCALL_STATE(cpu) do { } while (0)
#endif
+int cpu_mmu_index_kernel(CPUX86State *env);
+
/*
* TODO: Convert callers to compute cpu_mmu_index_kernel once
* and use *_mmuidx_ra directly.
*/
-#define cpu_ldub_kernel_ra(e, p, r) \
- cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
#define cpu_lduw_kernel_ra(e, p, r) \
cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
#define cpu_ldl_kernel_ra(e, p, r) \
@@ -44,8 +46,6 @@
#define cpu_ldq_kernel_ra(e, p, r) \
cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
-#define cpu_stb_kernel_ra(e, p, v, r) \
- cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
#define cpu_stw_kernel_ra(e, p, v, r) \
cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
#define cpu_stl_kernel_ra(e, p, v, r) \
@@ -53,12 +53,10 @@
#define cpu_stq_kernel_ra(e, p, v, r) \
cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
-#define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0)
#define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0)
#define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0)
#define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0)
-#define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0)
#define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0)
#define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0)
#define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0)
diff --git a/target/i386/tcg/sysemu/bpt_helper.c b/target/i386/tcg/sysemu/bpt_helper.c
deleted file mode 100644
index b29acf4..0000000
--- a/target/i386/tcg/sysemu/bpt_helper.c
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * i386 breakpoint helpers - sysemu code
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/exec-all.h"
-#include "exec/helper-proto.h"
-#include "tcg/helper-tcg.h"
-
-
-static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
-{
- return (dr7 >> (index * 2)) & 1;
-}
-
-static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index)
-{
- return (dr7 >> (index * 2)) & 2;
-
-}
-static inline bool hw_breakpoint_enabled(unsigned long dr7, int index)
-{
- return hw_global_breakpoint_enabled(dr7, index) ||
- hw_local_breakpoint_enabled(dr7, index);
-}
-
-static inline int hw_breakpoint_type(unsigned long dr7, int index)
-{
- return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3;
-}
-
-static inline int hw_breakpoint_len(unsigned long dr7, int index)
-{
- int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3);
- return (len == 2) ? 8 : len + 1;
-}
-
-static int hw_breakpoint_insert(CPUX86State *env, int index)
-{
- CPUState *cs = env_cpu(env);
- target_ulong dr7 = env->dr[7];
- target_ulong drN = env->dr[index];
- int err = 0;
-
- switch (hw_breakpoint_type(dr7, index)) {
- case DR7_TYPE_BP_INST:
- if (hw_breakpoint_enabled(dr7, index)) {
- err = cpu_breakpoint_insert(cs, drN, BP_CPU,
- &env->cpu_breakpoint[index]);
- }
- break;
-
- case DR7_TYPE_IO_RW:
- /* Notice when we should enable calls to bpt_io. */
- return hw_breakpoint_enabled(env->dr[7], index)
- ? HF_IOBPT_MASK : 0;
-
- case DR7_TYPE_DATA_WR:
- if (hw_breakpoint_enabled(dr7, index)) {
- err = cpu_watchpoint_insert(cs, drN,
- hw_breakpoint_len(dr7, index),
- BP_CPU | BP_MEM_WRITE,
- &env->cpu_watchpoint[index]);
- }
- break;
-
- case DR7_TYPE_DATA_RW:
- if (hw_breakpoint_enabled(dr7, index)) {
- err = cpu_watchpoint_insert(cs, drN,
- hw_breakpoint_len(dr7, index),
- BP_CPU | BP_MEM_ACCESS,
- &env->cpu_watchpoint[index]);
- }
- break;
- }
- if (err) {
- env->cpu_breakpoint[index] = NULL;
- }
- return 0;
-}
-
-static void hw_breakpoint_remove(CPUX86State *env, int index)
-{
- CPUState *cs = env_cpu(env);
-
- switch (hw_breakpoint_type(env->dr[7], index)) {
- case DR7_TYPE_BP_INST:
- if (env->cpu_breakpoint[index]) {
- cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
- env->cpu_breakpoint[index] = NULL;
- }
- break;
-
- case DR7_TYPE_DATA_WR:
- case DR7_TYPE_DATA_RW:
- if (env->cpu_watchpoint[index]) {
- cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
- env->cpu_watchpoint[index] = NULL;
- }
- break;
-
- case DR7_TYPE_IO_RW:
- /* HF_IOBPT_MASK cleared elsewhere. */
- break;
- }
-}
-
-void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7)
-{
- target_ulong old_dr7 = env->dr[7];
- int iobpt = 0;
- int i;
-
- new_dr7 |= DR7_FIXED_1;
-
- /* If nothing is changing except the global/local enable bits,
- then we can make the change more efficient. */
- if (((old_dr7 ^ new_dr7) & ~0xff) == 0) {
- /* Fold the global and local enable bits together into the
- global fields, then xor to show which registers have
- changed collective enable state. */
- int mod = ((old_dr7 | old_dr7 * 2) ^ (new_dr7 | new_dr7 * 2)) & 0xff;
-
- for (i = 0; i < DR7_MAX_BP; i++) {
- if ((mod & (2 << i * 2)) && !hw_breakpoint_enabled(new_dr7, i)) {
- hw_breakpoint_remove(env, i);
- }
- }
- env->dr[7] = new_dr7;
- for (i = 0; i < DR7_MAX_BP; i++) {
- if (mod & (2 << i * 2) && hw_breakpoint_enabled(new_dr7, i)) {
- iobpt |= hw_breakpoint_insert(env, i);
- } else if (hw_breakpoint_type(new_dr7, i) == DR7_TYPE_IO_RW
- && hw_breakpoint_enabled(new_dr7, i)) {
- iobpt |= HF_IOBPT_MASK;
- }
- }
- } else {
- for (i = 0; i < DR7_MAX_BP; i++) {
- hw_breakpoint_remove(env, i);
- }
- env->dr[7] = new_dr7;
- for (i = 0; i < DR7_MAX_BP; i++) {
- iobpt |= hw_breakpoint_insert(env, i);
- }
- }
-
- env->hflags = (env->hflags & ~HF_IOBPT_MASK) | iobpt;
-}
-
-bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
-{
- target_ulong dr6;
- int reg;
- bool hit_enabled = false;
-
- dr6 = env->dr[6] & ~0xf;
- for (reg = 0; reg < DR7_MAX_BP; reg++) {
- bool bp_match = false;
- bool wp_match = false;
-
- switch (hw_breakpoint_type(env->dr[7], reg)) {
- case DR7_TYPE_BP_INST:
- if (env->dr[reg] == env->eip) {
- bp_match = true;
- }
- break;
- case DR7_TYPE_DATA_WR:
- case DR7_TYPE_DATA_RW:
- if (env->cpu_watchpoint[reg] &&
- env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
- wp_match = true;
- }
- break;
- case DR7_TYPE_IO_RW:
- break;
- }
- if (bp_match || wp_match) {
- dr6 |= 1 << reg;
- if (hw_breakpoint_enabled(env->dr[7], reg)) {
- hit_enabled = true;
- }
- }
- }
-
- if (hit_enabled || force_dr6_update) {
- env->dr[6] = dr6;
- }
-
- return hit_enabled;
-}
-
-void breakpoint_handler(CPUState *cs)
-{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
-
- if (cs->watchpoint_hit) {
- if (cs->watchpoint_hit->flags & BP_CPU) {
- cs->watchpoint_hit = NULL;
- if (check_hw_breakpoints(env, false)) {
- /*
- * FIXME: #DB should be delayed by one instruction if
- * INHIBIT_IRQ is set (STI cannot trigger a watchpoint).
- * The delayed #DB should also fuse with one generated
- * by ICEBP (aka INT1).
- */
- raise_exception(env, EXCP01_DB);
- } else {
- cpu_loop_exit_noexc(cs);
- }
- }
- } else {
- if (cpu_breakpoint_test(cs, env->eip, BP_CPU)) {
- check_hw_breakpoints(env, true);
- raise_exception(env, EXCP01_DB);
- }
- }
-}
-
-target_ulong helper_get_dr(CPUX86State *env, int reg)
-{
- if (reg >= 4 && reg < 6) {
- if (env->cr[4] & CR4_DE_MASK) {
- raise_exception_ra(env, EXCP06_ILLOP, GETPC());
- } else {
- reg += 2;
- }
- }
-
- if (env->dr[7] & DR7_GD) {
- env->dr[7] &= ~DR7_GD;
- env->dr[6] |= DR6_BD;
- raise_exception_ra(env, EXCP01_DB, GETPC());
- }
-
- return env->dr[reg];
-}
-
-void helper_set_dr(CPUX86State *env, int reg, target_ulong t0)
-{
- if (reg >= 4 && reg < 6) {
- if (env->cr[4] & CR4_DE_MASK) {
- raise_exception_ra(env, EXCP06_ILLOP, GETPC());
- } else {
- reg += 2;
- }
- }
-
- if (env->dr[7] & DR7_GD) {
- env->dr[7] &= ~DR7_GD;
- env->dr[6] |= DR6_BD;
- raise_exception_ra(env, EXCP01_DB, GETPC());
- }
-
- if (reg < 4) {
- if (hw_breakpoint_enabled(env->dr[7], reg)
- && hw_breakpoint_type(env->dr[7], reg) != DR7_TYPE_IO_RW) {
- hw_breakpoint_remove(env, reg);
- env->dr[reg] = t0;
- hw_breakpoint_insert(env, reg);
- } else {
- env->dr[reg] = t0;
- }
- } else {
- if (t0 & DR_RESERVED_MASK) {
- raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
- }
- if (reg == 6) {
- env->dr[6] = t0 | DR6_FIXED_1;
- } else {
- cpu_x86_update_dr7(env, t0);
- }
- }
-}
-
-/* Check if Port I/O is trapped by a breakpoint. */
-void helper_bpt_io(CPUX86State *env, uint32_t port,
- uint32_t size, target_ulong next_eip)
-{
- target_ulong dr7 = env->dr[7];
- int i, hit = 0;
-
- for (i = 0; i < DR7_MAX_BP; ++i) {
- if (hw_breakpoint_type(dr7, i) == DR7_TYPE_IO_RW
- && hw_breakpoint_enabled(dr7, i)) {
- int bpt_len = hw_breakpoint_len(dr7, i);
- if (port + size - 1 >= env->dr[i]
- && port <= env->dr[i] + bpt_len - 1) {
- hit |= 1 << i;
- }
- }
- }
-
- if (hit) {
- env->dr[6] = (env->dr[6] & ~0xf) | hit;
- env->eip = next_eip;
- raise_exception(env, EXCP01_DB);
- }
-}
diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c
deleted file mode 100644
index 8fb05b1..0000000
--- a/target/i386/tcg/sysemu/excp_helper.c
+++ /dev/null
@@ -1,644 +0,0 @@
-/*
- * x86 exception helpers - sysemu code
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/cpu_ldst.h"
-#include "exec/exec-all.h"
-#include "exec/page-protection.h"
-#include "tcg/helper-tcg.h"
-
-typedef struct TranslateParams {
- target_ulong addr;
- target_ulong cr3;
- int pg_mode;
- int mmu_idx;
- int ptw_idx;
- MMUAccessType access_type;
-} TranslateParams;
-
-typedef struct TranslateResult {
- hwaddr paddr;
- int prot;
- int page_size;
-} TranslateResult;
-
-typedef enum TranslateFaultStage2 {
- S2_NONE,
- S2_GPA,
- S2_GPT,
-} TranslateFaultStage2;
-
-typedef struct TranslateFault {
- int exception_index;
- int error_code;
- target_ulong cr2;
- TranslateFaultStage2 stage2;
-} TranslateFault;
-
-typedef struct PTETranslate {
- CPUX86State *env;
- TranslateFault *err;
- int ptw_idx;
- void *haddr;
- hwaddr gaddr;
-} PTETranslate;
-
-static bool ptw_translate(PTETranslate *inout, hwaddr addr, uint64_t ra)
-{
- CPUTLBEntryFull *full;
- int flags;
-
- inout->gaddr = addr;
- flags = probe_access_full(inout->env, addr, 0, MMU_DATA_STORE,
- inout->ptw_idx, true, &inout->haddr, &full, ra);
-
- if (unlikely(flags & TLB_INVALID_MASK)) {
- TranslateFault *err = inout->err;
-
- assert(inout->ptw_idx == MMU_NESTED_IDX);
- *err = (TranslateFault){
- .error_code = inout->env->error_code,
- .cr2 = addr,
- .stage2 = S2_GPT,
- };
- return false;
- }
- return true;
-}
-
-static inline uint32_t ptw_ldl(const PTETranslate *in, uint64_t ra)
-{
- if (likely(in->haddr)) {
- return ldl_p(in->haddr);
- }
- return cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra);
-}
-
-static inline uint64_t ptw_ldq(const PTETranslate *in, uint64_t ra)
-{
- if (likely(in->haddr)) {
- return ldq_p(in->haddr);
- }
- return cpu_ldq_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra);
-}
-
-/*
- * Note that we can use a 32-bit cmpxchg for all page table entries,
- * even 64-bit ones, because PG_PRESENT_MASK, PG_ACCESSED_MASK and
- * PG_DIRTY_MASK are all in the low 32 bits.
- */
-static bool ptw_setl_slow(const PTETranslate *in, uint32_t old, uint32_t new)
-{
- uint32_t cmp;
-
- /* Does x86 really perform a rmw cycle on mmio for ptw? */
- start_exclusive();
- cmp = cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, 0);
- if (cmp == old) {
- cpu_stl_mmuidx_ra(in->env, in->gaddr, new, in->ptw_idx, 0);
- }
- end_exclusive();
- return cmp == old;
-}
-
-static inline bool ptw_setl(const PTETranslate *in, uint32_t old, uint32_t set)
-{
- if (set & ~old) {
- uint32_t new = old | set;
- if (likely(in->haddr)) {
- old = cpu_to_le32(old);
- new = cpu_to_le32(new);
- return qatomic_cmpxchg((uint32_t *)in->haddr, old, new) == old;
- }
- return ptw_setl_slow(in, old, new);
- }
- return true;
-}
-
-static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
- TranslateResult *out, TranslateFault *err,
- uint64_t ra)
-{
- const target_ulong addr = in->addr;
- const int pg_mode = in->pg_mode;
- const bool is_user = is_mmu_index_user(in->mmu_idx);
- const MMUAccessType access_type = in->access_type;
- uint64_t ptep, pte, rsvd_mask;
- PTETranslate pte_trans = {
- .env = env,
- .err = err,
- .ptw_idx = in->ptw_idx,
- };
- hwaddr pte_addr, paddr;
- uint32_t pkr;
- int page_size;
- int error_code;
-
- restart_all:
- rsvd_mask = ~MAKE_64BIT_MASK(0, env_archcpu(env)->phys_bits);
- rsvd_mask &= PG_ADDRESS_MASK;
- if (!(pg_mode & PG_MODE_NXE)) {
- rsvd_mask |= PG_NX_MASK;
- }
-
- if (pg_mode & PG_MODE_PAE) {
-#ifdef TARGET_X86_64
- if (pg_mode & PG_MODE_LMA) {
- if (pg_mode & PG_MODE_LA57) {
- /*
- * Page table level 5
- */
- pte_addr = (in->cr3 & ~0xfff) + (((addr >> 48) & 0x1ff) << 3);
- if (!ptw_translate(&pte_trans, pte_addr, ra)) {
- return false;
- }
- restart_5:
- pte = ptw_ldq(&pte_trans, ra);
- if (!(pte & PG_PRESENT_MASK)) {
- goto do_fault;
- }
- if (pte & (rsvd_mask | PG_PSE_MASK)) {
- goto do_fault_rsvd;
- }
- if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
- goto restart_5;
- }
- ptep = pte ^ PG_NX_MASK;
- } else {
- pte = in->cr3;
- ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
- }
-
- /*
- * Page table level 4
- */
- pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 39) & 0x1ff) << 3);
- if (!ptw_translate(&pte_trans, pte_addr, ra)) {
- return false;
- }
- restart_4:
- pte = ptw_ldq(&pte_trans, ra);
- if (!(pte & PG_PRESENT_MASK)) {
- goto do_fault;
- }
- if (pte & (rsvd_mask | PG_PSE_MASK)) {
- goto do_fault_rsvd;
- }
- if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
- goto restart_4;
- }
- ptep &= pte ^ PG_NX_MASK;
-
- /*
- * Page table level 3
- */
- pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3);
- if (!ptw_translate(&pte_trans, pte_addr, ra)) {
- return false;
- }
- restart_3_lma:
- pte = ptw_ldq(&pte_trans, ra);
- if (!(pte & PG_PRESENT_MASK)) {
- goto do_fault;
- }
- if (pte & rsvd_mask) {
- goto do_fault_rsvd;
- }
- if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
- goto restart_3_lma;
- }
- ptep &= pte ^ PG_NX_MASK;
- if (pte & PG_PSE_MASK) {
- /* 1 GB page */
- page_size = 1024 * 1024 * 1024;
- goto do_check_protect;
- }
- } else
-#endif
- {
- /*
- * Page table level 3
- */
- pte_addr = (in->cr3 & 0xffffffe0ULL) + ((addr >> 27) & 0x18);
- if (!ptw_translate(&pte_trans, pte_addr, ra)) {
- return false;
- }
- rsvd_mask |= PG_HI_USER_MASK;
- restart_3_nolma:
- pte = ptw_ldq(&pte_trans, ra);
- if (!(pte & PG_PRESENT_MASK)) {
- goto do_fault;
- }
- if (pte & (rsvd_mask | PG_NX_MASK)) {
- goto do_fault_rsvd;
- }
- if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
- goto restart_3_nolma;
- }
- ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
- }
-
- /*
- * Page table level 2
- */
- pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3);
- if (!ptw_translate(&pte_trans, pte_addr, ra)) {
- return false;
- }
- restart_2_pae:
- pte = ptw_ldq(&pte_trans, ra);
- if (!(pte & PG_PRESENT_MASK)) {
- goto do_fault;
- }
- if (pte & rsvd_mask) {
- goto do_fault_rsvd;
- }
- if (pte & PG_PSE_MASK) {
- /* 2 MB page */
- page_size = 2048 * 1024;
- ptep &= pte ^ PG_NX_MASK;
- goto do_check_protect;
- }
- if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
- goto restart_2_pae;
- }
- ptep &= pte ^ PG_NX_MASK;
-
- /*
- * Page table level 1
- */
- pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3);
- if (!ptw_translate(&pte_trans, pte_addr, ra)) {
- return false;
- }
- pte = ptw_ldq(&pte_trans, ra);
- if (!(pte & PG_PRESENT_MASK)) {
- goto do_fault;
- }
- if (pte & rsvd_mask) {
- goto do_fault_rsvd;
- }
- /* combine pde and pte nx, user and rw protections */
- ptep &= pte ^ PG_NX_MASK;
- page_size = 4096;
- } else {
- /*
- * Page table level 2
- */
- pte_addr = (in->cr3 & 0xfffff000ULL) + ((addr >> 20) & 0xffc);
- if (!ptw_translate(&pte_trans, pte_addr, ra)) {
- return false;
- }
- restart_2_nopae:
- pte = ptw_ldl(&pte_trans, ra);
- if (!(pte & PG_PRESENT_MASK)) {
- goto do_fault;
- }
- ptep = pte | PG_NX_MASK;
-
- /* if PSE bit is set, then we use a 4MB page */
- if ((pte & PG_PSE_MASK) && (pg_mode & PG_MODE_PSE)) {
- page_size = 4096 * 1024;
- /*
- * Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
- * Leave bits 20-13 in place for setting accessed/dirty bits below.
- */
- pte = (uint32_t)pte | ((pte & 0x1fe000LL) << (32 - 13));
- rsvd_mask = 0x200000;
- goto do_check_protect_pse36;
- }
- if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
- goto restart_2_nopae;
- }
-
- /*
- * Page table level 1
- */
- pte_addr = (pte & ~0xfffu) + ((addr >> 10) & 0xffc);
- if (!ptw_translate(&pte_trans, pte_addr, ra)) {
- return false;
- }
- pte = ptw_ldl(&pte_trans, ra);
- if (!(pte & PG_PRESENT_MASK)) {
- goto do_fault;
- }
- /* combine pde and pte user and rw protections */
- ptep &= pte | PG_NX_MASK;
- page_size = 4096;
- rsvd_mask = 0;
- }
-
-do_check_protect:
- rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
-do_check_protect_pse36:
- if (pte & rsvd_mask) {
- goto do_fault_rsvd;
- }
- ptep ^= PG_NX_MASK;
-
- /* can the page can be put in the TLB? prot will tell us */
- if (is_user && !(ptep & PG_USER_MASK)) {
- goto do_fault_protect;
- }
-
- int prot = 0;
- if (!is_mmu_index_smap(in->mmu_idx) || !(ptep & PG_USER_MASK)) {
- prot |= PAGE_READ;
- if ((ptep & PG_RW_MASK) || !(is_user || (pg_mode & PG_MODE_WP))) {
- prot |= PAGE_WRITE;
- }
- }
- if (!(ptep & PG_NX_MASK) &&
- (is_user ||
- !((pg_mode & PG_MODE_SMEP) && (ptep & PG_USER_MASK)))) {
- prot |= PAGE_EXEC;
- }
-
- if (ptep & PG_USER_MASK) {
- pkr = pg_mode & PG_MODE_PKE ? env->pkru : 0;
- } else {
- pkr = pg_mode & PG_MODE_PKS ? env->pkrs : 0;
- }
- if (pkr) {
- uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
- uint32_t pkr_ad = (pkr >> pk * 2) & 1;
- uint32_t pkr_wd = (pkr >> pk * 2) & 2;
- uint32_t pkr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
-
- if (pkr_ad) {
- pkr_prot &= ~(PAGE_READ | PAGE_WRITE);
- } else if (pkr_wd && (is_user || (pg_mode & PG_MODE_WP))) {
- pkr_prot &= ~PAGE_WRITE;
- }
- if ((pkr_prot & (1 << access_type)) == 0) {
- goto do_fault_pk_protect;
- }
- prot &= pkr_prot;
- }
-
- if ((prot & (1 << access_type)) == 0) {
- goto do_fault_protect;
- }
-
- /* yes, it can! */
- {
- uint32_t set = PG_ACCESSED_MASK;
- if (access_type == MMU_DATA_STORE) {
- set |= PG_DIRTY_MASK;
- } else if (!(pte & PG_DIRTY_MASK)) {
- /*
- * Only set write access if already dirty...
- * otherwise wait for dirty access.
- */
- prot &= ~PAGE_WRITE;
- }
- if (!ptw_setl(&pte_trans, pte, set)) {
- /*
- * We can arrive here from any of 3 levels and 2 formats.
- * The only safe thing is to restart the entire lookup.
- */
- goto restart_all;
- }
- }
-
- /* merge offset within page */
- paddr = (pte & PG_ADDRESS_MASK & ~(page_size - 1)) | (addr & (page_size - 1));
-
- /*
- * Note that NPT is walked (for both paging structures and final guest
- * addresses) using the address with the A20 bit set.
- */
- if (in->ptw_idx == MMU_NESTED_IDX) {
- CPUTLBEntryFull *full;
- int flags, nested_page_size;
-
- flags = probe_access_full(env, paddr, 0, access_type,
- MMU_NESTED_IDX, true,
- &pte_trans.haddr, &full, 0);
- if (unlikely(flags & TLB_INVALID_MASK)) {
- *err = (TranslateFault){
- .error_code = env->error_code,
- .cr2 = paddr,
- .stage2 = S2_GPA,
- };
- return false;
- }
-
- /* Merge stage1 & stage2 protection bits. */
- prot &= full->prot;
-
- /* Re-verify resulting protection. */
- if ((prot & (1 << access_type)) == 0) {
- goto do_fault_protect;
- }
-
- /* Merge stage1 & stage2 addresses to final physical address. */
- nested_page_size = 1 << full->lg_page_size;
- paddr = (full->phys_addr & ~(nested_page_size - 1))
- | (paddr & (nested_page_size - 1));
-
- /*
- * Use the larger of stage1 & stage2 page sizes, so that
- * invalidation works.
- */
- if (nested_page_size > page_size) {
- page_size = nested_page_size;
- }
- }
-
- out->paddr = paddr & x86_get_a20_mask(env);
- out->prot = prot;
- out->page_size = page_size;
- return true;
-
- do_fault_rsvd:
- error_code = PG_ERROR_RSVD_MASK;
- goto do_fault_cont;
- do_fault_protect:
- error_code = PG_ERROR_P_MASK;
- goto do_fault_cont;
- do_fault_pk_protect:
- assert(access_type != MMU_INST_FETCH);
- error_code = PG_ERROR_PK_MASK | PG_ERROR_P_MASK;
- goto do_fault_cont;
- do_fault:
- error_code = 0;
- do_fault_cont:
- if (is_user) {
- error_code |= PG_ERROR_U_MASK;
- }
- switch (access_type) {
- case MMU_DATA_LOAD:
- break;
- case MMU_DATA_STORE:
- error_code |= PG_ERROR_W_MASK;
- break;
- case MMU_INST_FETCH:
- if (pg_mode & (PG_MODE_NXE | PG_MODE_SMEP)) {
- error_code |= PG_ERROR_I_D_MASK;
- }
- break;
- }
- *err = (TranslateFault){
- .exception_index = EXCP0E_PAGE,
- .error_code = error_code,
- .cr2 = addr,
- };
- return false;
-}
-
-static G_NORETURN void raise_stage2(CPUX86State *env, TranslateFault *err,
- uintptr_t retaddr)
-{
- uint64_t exit_info_1 = err->error_code;
-
- switch (err->stage2) {
- case S2_GPT:
- exit_info_1 |= SVM_NPTEXIT_GPT;
- break;
- case S2_GPA:
- exit_info_1 |= SVM_NPTEXIT_GPA;
- break;
- default:
- g_assert_not_reached();
- }
-
- x86_stq_phys(env_cpu(env),
- env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
- err->cr2);
- cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, retaddr);
-}
-
-static bool get_physical_address(CPUX86State *env, vaddr addr,
- MMUAccessType access_type, int mmu_idx,
- TranslateResult *out, TranslateFault *err,
- uint64_t ra)
-{
- TranslateParams in;
- bool use_stage2 = env->hflags2 & HF2_NPT_MASK;
-
- in.addr = addr;
- in.access_type = access_type;
-
- switch (mmu_idx) {
- case MMU_PHYS_IDX:
- break;
-
- case MMU_NESTED_IDX:
- if (likely(use_stage2)) {
- in.cr3 = env->nested_cr3;
- in.pg_mode = env->nested_pg_mode;
- in.mmu_idx =
- env->nested_pg_mode & PG_MODE_LMA ? MMU_USER64_IDX : MMU_USER32_IDX;
- in.ptw_idx = MMU_PHYS_IDX;
-
- if (!mmu_translate(env, &in, out, err, ra)) {
- err->stage2 = S2_GPA;
- return false;
- }
- return true;
- }
- break;
-
- default:
- if (is_mmu_index_32(mmu_idx)) {
- addr = (uint32_t)addr;
- }
-
- if (likely(env->cr[0] & CR0_PG_MASK)) {
- in.cr3 = env->cr[3];
- in.mmu_idx = mmu_idx;
- in.ptw_idx = use_stage2 ? MMU_NESTED_IDX : MMU_PHYS_IDX;
- in.pg_mode = get_pg_mode(env);
-
- if (in.pg_mode & PG_MODE_LMA) {
- /* test virtual address sign extension */
- int shift = in.pg_mode & PG_MODE_LA57 ? 56 : 47;
- int64_t sext = (int64_t)addr >> shift;
- if (sext != 0 && sext != -1) {
- *err = (TranslateFault){
- .exception_index = EXCP0D_GPF,
- .cr2 = addr,
- };
- return false;
- }
- }
- return mmu_translate(env, &in, out, err, ra);
- }
- break;
- }
-
- /* No translation needed. */
- out->paddr = addr & x86_get_a20_mask(env);
- out->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- out->page_size = TARGET_PAGE_SIZE;
- return true;
-}
-
-bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
-{
- CPUX86State *env = cpu_env(cs);
- TranslateResult out;
- TranslateFault err;
-
- if (get_physical_address(env, addr, access_type, mmu_idx, &out, &err,
- retaddr)) {
- /*
- * Even if 4MB pages, we map only one 4KB page in the cache to
- * avoid filling it too fast.
- */
- assert(out.prot & (1 << access_type));
- tlb_set_page_with_attrs(cs, addr & TARGET_PAGE_MASK,
- out.paddr & TARGET_PAGE_MASK,
- cpu_get_mem_attrs(env),
- out.prot, mmu_idx, out.page_size);
- return true;
- }
-
- if (probe) {
- /* This will be used if recursing for stage2 translation. */
- env->error_code = err.error_code;
- return false;
- }
-
- if (err.stage2 != S2_NONE) {
- raise_stage2(env, &err, retaddr);
- }
-
- if (env->intercept_exceptions & (1 << err.exception_index)) {
- /* cr2 is not modified in case of exceptions */
- x86_stq_phys(cs, env->vm_vmcb +
- offsetof(struct vmcb, control.exit_info_2),
- err.cr2);
- } else {
- env->cr[2] = err.cr2;
- }
- raise_exception_err_ra(env, err.exception_index, err.error_code, retaddr);
-}
-
-G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
- MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr)
-{
- X86CPU *cpu = X86_CPU(cs);
- handle_unaligned_access(&cpu->env, vaddr, access_type, retaddr);
-}
diff --git a/target/i386/tcg/sysemu/fpu_helper.c b/target/i386/tcg/sysemu/fpu_helper.c
deleted file mode 100644
index e0305ba..0000000
--- a/target/i386/tcg/sysemu/fpu_helper.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers (sysemu code)
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/main-loop.h"
-#include "cpu.h"
-#include "hw/irq.h"
-
-static qemu_irq ferr_irq;
-
-void x86_register_ferr_irq(qemu_irq irq)
-{
- ferr_irq = irq;
-}
-
-void fpu_check_raise_ferr_irq(CPUX86State *env)
-{
- if (ferr_irq && !(env->hflags2 & HF2_IGNNE_MASK)) {
- bql_lock();
- qemu_irq_raise(ferr_irq);
- bql_unlock();
- return;
- }
-}
-
-void cpu_clear_ignne(void)
-{
- CPUX86State *env = &X86_CPU(first_cpu)->env;
- env->hflags2 &= ~HF2_IGNNE_MASK;
-}
-
-void cpu_set_ignne(void)
-{
- CPUX86State *env = &X86_CPU(first_cpu)->env;
-
- assert(bql_locked());
-
- env->hflags2 |= HF2_IGNNE_MASK;
- /*
- * We get here in response to a write to port F0h. The chipset should
- * deassert FP_IRQ and FERR# instead should stay signaled until FPSW_SE is
- * cleared, because FERR# and FP_IRQ are two separate pins on real
- * hardware. However, we don't model FERR# as a qemu_irq, so we just
- * do directly what the chipset would do, i.e. deassert FP_IRQ.
- */
- qemu_irq_lower(ferr_irq);
-}
diff --git a/target/i386/tcg/sysemu/misc_helper.c b/target/i386/tcg/sysemu/misc_helper.c
deleted file mode 100644
index 094aa56..0000000
--- a/target/i386/tcg/sysemu/misc_helper.c
+++ /dev/null
@@ -1,544 +0,0 @@
-/*
- * x86 misc helpers - sysemu code
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/main-loop.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
-#include "exec/address-spaces.h"
-#include "exec/exec-all.h"
-#include "tcg/helper-tcg.h"
-#include "hw/i386/apic.h"
-
-void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
-{
- address_space_stb(&address_space_io, port, data,
- cpu_get_mem_attrs(env), NULL);
-}
-
-target_ulong helper_inb(CPUX86State *env, uint32_t port)
-{
- return address_space_ldub(&address_space_io, port,
- cpu_get_mem_attrs(env), NULL);
-}
-
-void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
-{
- address_space_stw(&address_space_io, port, data,
- cpu_get_mem_attrs(env), NULL);
-}
-
-target_ulong helper_inw(CPUX86State *env, uint32_t port)
-{
- return address_space_lduw(&address_space_io, port,
- cpu_get_mem_attrs(env), NULL);
-}
-
-void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
-{
- address_space_stl(&address_space_io, port, data,
- cpu_get_mem_attrs(env), NULL);
-}
-
-target_ulong helper_inl(CPUX86State *env, uint32_t port)
-{
- return address_space_ldl(&address_space_io, port,
- cpu_get_mem_attrs(env), NULL);
-}
-
-target_ulong helper_read_cr8(CPUX86State *env)
-{
- if (!(env->hflags2 & HF2_VINTR_MASK)) {
- return cpu_get_apic_tpr(env_archcpu(env)->apic_state);
- } else {
- return env->int_ctl & V_TPR_MASK;
- }
-}
-
-void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
-{
- switch (reg) {
- case 0:
- /*
- * If we reach this point, the CR0 write intercept is disabled.
- * But we could still exit if the hypervisor has requested the selective
- * intercept for bits other than TS and MP
- */
- if (cpu_svm_has_intercept(env, SVM_EXIT_CR0_SEL_WRITE) &&
- ((env->cr[0] ^ t0) & ~(CR0_TS_MASK | CR0_MP_MASK))) {
- cpu_vmexit(env, SVM_EXIT_CR0_SEL_WRITE, 0, GETPC());
- }
- cpu_x86_update_cr0(env, t0);
- break;
- case 3:
- if ((env->efer & MSR_EFER_LMA) &&
- (t0 & ((~0ULL) << env_archcpu(env)->phys_bits))) {
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- }
- if (!(env->efer & MSR_EFER_LMA)) {
- t0 &= 0xffffffffUL;
- }
- cpu_x86_update_cr3(env, t0);
- break;
- case 4:
- if (t0 & cr4_reserved_bits(env)) {
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- }
- if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) &&
- (env->hflags & HF_CS64_MASK)) {
- raise_exception_ra(env, EXCP0D_GPF, GETPC());
- }
- cpu_x86_update_cr4(env, t0);
- break;
- case 8:
- if (!(env->hflags2 & HF2_VINTR_MASK)) {
- bql_lock();
- cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0);
- bql_unlock();
- }
- env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK);
-
- CPUState *cs = env_cpu(env);
- if (ctl_has_irq(env)) {
- cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
- } else {
- cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
- }
- break;
- default:
- env->cr[reg] = t0;
- break;
- }
-}
-
-void helper_wrmsr(CPUX86State *env)
-{
- uint64_t val;
- CPUState *cs = env_cpu(env);
-
- cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC());
-
- val = ((uint32_t)env->regs[R_EAX]) |
- ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
-
- switch ((uint32_t)env->regs[R_ECX]) {
- case MSR_IA32_SYSENTER_CS:
- env->sysenter_cs = val & 0xffff;
- break;
- case MSR_IA32_SYSENTER_ESP:
- env->sysenter_esp = val;
- break;
- case MSR_IA32_SYSENTER_EIP:
- env->sysenter_eip = val;
- break;
- case MSR_IA32_APICBASE: {
- int ret;
-
- if (val & MSR_IA32_APICBASE_RESERVED) {
- goto error;
- }
-
- ret = cpu_set_apic_base(env_archcpu(env)->apic_state, val);
- if (ret < 0) {
- goto error;
- }
- break;
- }
- case MSR_EFER:
- {
- uint64_t update_mask;
-
- update_mask = 0;
- if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
- update_mask |= MSR_EFER_SCE;
- }
- if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
- update_mask |= MSR_EFER_LME;
- }
- if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
- update_mask |= MSR_EFER_FFXSR;
- }
- if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
- update_mask |= MSR_EFER_NXE;
- }
- if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
- update_mask |= MSR_EFER_SVME;
- }
- if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
- update_mask |= MSR_EFER_FFXSR;
- }
- cpu_load_efer(env, (env->efer & ~update_mask) |
- (val & update_mask));
- }
- break;
- case MSR_STAR:
- env->star = val;
- break;
- case MSR_PAT:
- env->pat = val;
- break;
- case MSR_IA32_PKRS:
- if (val & 0xFFFFFFFF00000000ull) {
- goto error;
- }
- env->pkrs = val;
- tlb_flush(cs);
- break;
- case MSR_VM_HSAVE_PA:
- if (val & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
- goto error;
- }
- env->vm_hsave = val;
- break;
-#ifdef TARGET_X86_64
- case MSR_LSTAR:
- env->lstar = val;
- break;
- case MSR_CSTAR:
- env->cstar = val;
- break;
- case MSR_FMASK:
- env->fmask = val;
- break;
- case MSR_FSBASE:
- env->segs[R_FS].base = val;
- break;
- case MSR_GSBASE:
- env->segs[R_GS].base = val;
- break;
- case MSR_KERNELGSBASE:
- env->kernelgsbase = val;
- break;
-#endif
- case MSR_MTRRphysBase(0):
- case MSR_MTRRphysBase(1):
- case MSR_MTRRphysBase(2):
- case MSR_MTRRphysBase(3):
- case MSR_MTRRphysBase(4):
- case MSR_MTRRphysBase(5):
- case MSR_MTRRphysBase(6):
- case MSR_MTRRphysBase(7):
- env->mtrr_var[((uint32_t)env->regs[R_ECX] -
- MSR_MTRRphysBase(0)) / 2].base = val;
- break;
- case MSR_MTRRphysMask(0):
- case MSR_MTRRphysMask(1):
- case MSR_MTRRphysMask(2):
- case MSR_MTRRphysMask(3):
- case MSR_MTRRphysMask(4):
- case MSR_MTRRphysMask(5):
- case MSR_MTRRphysMask(6):
- case MSR_MTRRphysMask(7):
- env->mtrr_var[((uint32_t)env->regs[R_ECX] -
- MSR_MTRRphysMask(0)) / 2].mask = val;
- break;
- case MSR_MTRRfix64K_00000:
- env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
- MSR_MTRRfix64K_00000] = val;
- break;
- case MSR_MTRRfix16K_80000:
- case MSR_MTRRfix16K_A0000:
- env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
- MSR_MTRRfix16K_80000 + 1] = val;
- break;
- case MSR_MTRRfix4K_C0000:
- case MSR_MTRRfix4K_C8000:
- case MSR_MTRRfix4K_D0000:
- case MSR_MTRRfix4K_D8000:
- case MSR_MTRRfix4K_E0000:
- case MSR_MTRRfix4K_E8000:
- case MSR_MTRRfix4K_F0000:
- case MSR_MTRRfix4K_F8000:
- env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
- MSR_MTRRfix4K_C0000 + 3] = val;
- break;
- case MSR_MTRRdefType:
- env->mtrr_deftype = val;
- break;
- case MSR_MCG_STATUS:
- env->mcg_status = val;
- break;
- case MSR_MCG_CTL:
- if ((env->mcg_cap & MCG_CTL_P)
- && (val == 0 || val == ~(uint64_t)0)) {
- env->mcg_ctl = val;
- }
- break;
- case MSR_TSC_AUX:
- env->tsc_aux = val;
- break;
- case MSR_IA32_MISC_ENABLE:
- env->msr_ia32_misc_enable = val;
- break;
- case MSR_IA32_BNDCFGS:
- /* FIXME: #GP if reserved bits are set. */
- /* FIXME: Extend highest implemented bit of linear address. */
- env->msr_bndcfgs = val;
- cpu_sync_bndcs_hflags(env);
- break;
- case MSR_APIC_START ... MSR_APIC_END: {
- int ret;
- int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
-
- bql_lock();
- ret = apic_msr_write(index, val);
- bql_unlock();
- if (ret < 0) {
- goto error;
- }
-
- break;
- }
- default:
- if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
- && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
- (4 * env->mcg_cap & 0xff)) {
- uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
- if ((offset & 0x3) != 0
- || (val == 0 || val == ~(uint64_t)0)) {
- env->mce_banks[offset] = val;
- }
- break;
- }
- /* XXX: exception? */
- break;
- }
- return;
-error:
- raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
-}
-
-void helper_rdmsr(CPUX86State *env)
-{
- X86CPU *x86_cpu = env_archcpu(env);
- uint64_t val;
-
- cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC());
-
- switch ((uint32_t)env->regs[R_ECX]) {
- case MSR_IA32_SYSENTER_CS:
- val = env->sysenter_cs;
- break;
- case MSR_IA32_SYSENTER_ESP:
- val = env->sysenter_esp;
- break;
- case MSR_IA32_SYSENTER_EIP:
- val = env->sysenter_eip;
- break;
- case MSR_IA32_APICBASE:
- val = cpu_get_apic_base(env_archcpu(env)->apic_state);
- break;
- case MSR_EFER:
- val = env->efer;
- break;
- case MSR_STAR:
- val = env->star;
- break;
- case MSR_PAT:
- val = env->pat;
- break;
- case MSR_IA32_PKRS:
- val = env->pkrs;
- break;
- case MSR_VM_HSAVE_PA:
- val = env->vm_hsave;
- break;
- case MSR_IA32_PERF_STATUS:
- /* tsc_increment_by_tick */
- val = 1000ULL;
- /* CPU multiplier */
- val |= (((uint64_t)4ULL) << 40);
- break;
-#ifdef TARGET_X86_64
- case MSR_LSTAR:
- val = env->lstar;
- break;
- case MSR_CSTAR:
- val = env->cstar;
- break;
- case MSR_FMASK:
- val = env->fmask;
- break;
- case MSR_FSBASE:
- val = env->segs[R_FS].base;
- break;
- case MSR_GSBASE:
- val = env->segs[R_GS].base;
- break;
- case MSR_KERNELGSBASE:
- val = env->kernelgsbase;
- break;
- case MSR_TSC_AUX:
- val = env->tsc_aux;
- break;
-#endif
- case MSR_SMI_COUNT:
- val = env->msr_smi_count;
- break;
- case MSR_MTRRphysBase(0):
- case MSR_MTRRphysBase(1):
- case MSR_MTRRphysBase(2):
- case MSR_MTRRphysBase(3):
- case MSR_MTRRphysBase(4):
- case MSR_MTRRphysBase(5):
- case MSR_MTRRphysBase(6):
- case MSR_MTRRphysBase(7):
- val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
- MSR_MTRRphysBase(0)) / 2].base;
- break;
- case MSR_MTRRphysMask(0):
- case MSR_MTRRphysMask(1):
- case MSR_MTRRphysMask(2):
- case MSR_MTRRphysMask(3):
- case MSR_MTRRphysMask(4):
- case MSR_MTRRphysMask(5):
- case MSR_MTRRphysMask(6):
- case MSR_MTRRphysMask(7):
- val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
- MSR_MTRRphysMask(0)) / 2].mask;
- break;
- case MSR_MTRRfix64K_00000:
- val = env->mtrr_fixed[0];
- break;
- case MSR_MTRRfix16K_80000:
- case MSR_MTRRfix16K_A0000:
- val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
- MSR_MTRRfix16K_80000 + 1];
- break;
- case MSR_MTRRfix4K_C0000:
- case MSR_MTRRfix4K_C8000:
- case MSR_MTRRfix4K_D0000:
- case MSR_MTRRfix4K_D8000:
- case MSR_MTRRfix4K_E0000:
- case MSR_MTRRfix4K_E8000:
- case MSR_MTRRfix4K_F0000:
- case MSR_MTRRfix4K_F8000:
- val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
- MSR_MTRRfix4K_C0000 + 3];
- break;
- case MSR_MTRRdefType:
- val = env->mtrr_deftype;
- break;
- case MSR_MTRRcap:
- if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
- val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
- MSR_MTRRcap_WC_SUPPORTED;
- } else {
- /* XXX: exception? */
- val = 0;
- }
- break;
- case MSR_MCG_CAP:
- val = env->mcg_cap;
- break;
- case MSR_MCG_CTL:
- if (env->mcg_cap & MCG_CTL_P) {
- val = env->mcg_ctl;
- } else {
- val = 0;
- }
- break;
- case MSR_MCG_STATUS:
- val = env->mcg_status;
- break;
- case MSR_IA32_MISC_ENABLE:
- val = env->msr_ia32_misc_enable;
- break;
- case MSR_IA32_BNDCFGS:
- val = env->msr_bndcfgs;
- break;
- case MSR_IA32_UCODE_REV:
- val = x86_cpu->ucode_rev;
- break;
- case MSR_CORE_THREAD_COUNT: {
- CPUState *cs = CPU(x86_cpu);
- val = (cs->nr_threads * cs->nr_cores) | (cs->nr_cores << 16);
- break;
- }
- case MSR_APIC_START ... MSR_APIC_END: {
- int ret;
- int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
-
- bql_lock();
- ret = apic_msr_read(index, &val);
- bql_unlock();
- if (ret < 0) {
- raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
- }
-
- break;
- }
- default:
- if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
- && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
- (4 * env->mcg_cap & 0xff)) {
- uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
- val = env->mce_banks[offset];
- break;
- }
- /* XXX: exception? */
- val = 0;
- break;
- }
- env->regs[R_EAX] = (uint32_t)(val);
- env->regs[R_EDX] = (uint32_t)(val >> 32);
-}
-
-void helper_flush_page(CPUX86State *env, target_ulong addr)
-{
- tlb_flush_page(env_cpu(env), addr);
-}
-
-G_NORETURN void helper_hlt(CPUX86State *env)
-{
- CPUState *cs = env_cpu(env);
-
- do_end_instruction(env);
- cs->halted = 1;
- cs->exception_index = EXCP_HLT;
- cpu_loop_exit(cs);
-}
-
-void helper_monitor(CPUX86State *env, target_ulong ptr)
-{
- if ((uint32_t)env->regs[R_ECX] != 0) {
- raise_exception_ra(env, EXCP0D_GPF, GETPC());
- }
- /* XXX: store address? */
- cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC());
-}
-
-G_NORETURN void helper_mwait(CPUX86State *env, int next_eip_addend)
-{
- CPUState *cs = env_cpu(env);
-
- if ((uint32_t)env->regs[R_ECX] != 0) {
- raise_exception_ra(env, EXCP0D_GPF, GETPC());
- }
- cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC());
- env->eip += next_eip_addend;
-
- /* XXX: not complete but not completely erroneous */
- if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
- helper_pause(env);
- } else {
- helper_hlt(env);
- }
-}
diff --git a/target/i386/tcg/sysemu/seg_helper.c b/target/i386/tcg/sysemu/seg_helper.c
deleted file mode 100644
index 05174a7..0000000
--- a/target/i386/tcg/sysemu/seg_helper.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * x86 segmentation related helpers: (sysemu-only code)
- * TSS, interrupts, system calls, jumps and call/task gates, descriptors
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/log.h"
-#include "qemu/main-loop.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
-#include "tcg/helper-tcg.h"
-#include "../seg_helper.h"
-
-void helper_syscall(CPUX86State *env, int next_eip_addend)
-{
- int selector;
-
- if (!(env->efer & MSR_EFER_SCE)) {
- raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
- }
- selector = (env->star >> 32) & 0xffff;
-#ifdef TARGET_X86_64
- if (env->hflags & HF_LMA_MASK) {
- int code64;
-
- env->regs[R_ECX] = env->eip + next_eip_addend;
- env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
-
- code64 = env->hflags & HF_CS64_MASK;
-
- env->eflags &= ~(env->fmask | RF_MASK);
- cpu_load_eflags(env, env->eflags, 0);
- cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_P_MASK |
- DESC_S_MASK |
- DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
- DESC_L_MASK);
- cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK |
- DESC_W_MASK | DESC_A_MASK);
- if (code64) {
- env->eip = env->lstar;
- } else {
- env->eip = env->cstar;
- }
- } else
-#endif
- {
- env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
-
- env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
- cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK |
- DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
- cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK |
- DESC_W_MASK | DESC_A_MASK);
- env->eip = (uint32_t)env->star;
- }
-}
-
-void handle_even_inj(CPUX86State *env, int intno, int is_int,
- int error_code, int is_hw, int rm)
-{
- CPUState *cs = env_cpu(env);
- uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- control.event_inj));
-
- if (!(event_inj & SVM_EVTINJ_VALID)) {
- int type;
-
- if (is_int) {
- type = SVM_EVTINJ_TYPE_SOFT;
- } else {
- type = SVM_EVTINJ_TYPE_EXEPT;
- }
- event_inj = intno | type | SVM_EVTINJ_VALID;
- if (!rm && exception_has_error_code(intno)) {
- event_inj |= SVM_EVTINJ_VALID_ERR;
- x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- control.event_inj_err),
- error_code);
- }
- x86_stl_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
- event_inj);
- }
-}
-
-void x86_cpu_do_interrupt(CPUState *cs)
-{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
-
- if (cs->exception_index == EXCP_VMEXIT) {
- assert(env->old_exception == -1);
- do_vmexit(env);
- } else {
- do_interrupt_all(cpu, cs->exception_index,
- env->exception_is_int,
- env->error_code,
- env->exception_next_eip, 0);
- /* successfully delivered */
- env->old_exception = -1;
- }
-}
-
-bool x86_cpu_exec_halt(CPUState *cpu)
-{
- X86CPU *x86_cpu = X86_CPU(cpu);
- CPUX86State *env = &x86_cpu->env;
-
- if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
- bql_lock();
- apic_poll_irq(x86_cpu->apic_state);
- cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
- bql_unlock();
- }
-
- if (!cpu_has_work(cpu)) {
- return false;
- }
-
- /* Complete HLT instruction. */
- if (env->eflags & TF_MASK) {
- env->dr[6] |= DR6_BS;
- do_interrupt_all(x86_cpu, EXCP01_DB, 0, 0, env->eip, 0);
- }
- return true;
-}
-
-bool x86_need_replay_interrupt(int interrupt_request)
-{
- /*
- * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
- * "real" interrupt event later. It does not need to be recorded for
- * replay purposes.
- */
- return !(interrupt_request & CPU_INTERRUPT_POLL);
-}
-
-bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
-{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
- int intno;
-
- interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
- if (!interrupt_request) {
- return false;
- }
-
- /* Don't process multiple interrupt requests in a single call.
- * This is required to make icount-driven execution deterministic.
- */
- switch (interrupt_request) {
- case CPU_INTERRUPT_POLL:
- cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
- apic_poll_irq(cpu->apic_state);
- break;
- case CPU_INTERRUPT_SIPI:
- do_cpu_sipi(cpu);
- break;
- case CPU_INTERRUPT_SMI:
- cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
- cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
- do_smm_enter(cpu);
- break;
- case CPU_INTERRUPT_NMI:
- cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
- cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
- env->hflags2 |= HF2_NMI_MASK;
- do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
- break;
- case CPU_INTERRUPT_MCE:
- cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
- do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
- break;
- case CPU_INTERRUPT_HARD:
- cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
- cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
- CPU_INTERRUPT_VIRQ);
- intno = cpu_get_pic_interrupt(env);
- qemu_log_mask(CPU_LOG_INT,
- "Servicing hardware INT=0x%02x\n", intno);
- do_interrupt_x86_hardirq(env, intno, 1);
- break;
- case CPU_INTERRUPT_VIRQ:
- cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
- intno = x86_ldl_phys(cs, env->vm_vmcb
- + offsetof(struct vmcb, control.int_vector));
- qemu_log_mask(CPU_LOG_INT,
- "Servicing virtual hardware INT=0x%02x\n", intno);
- do_interrupt_x86_hardirq(env, intno, 1);
- cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
- env->int_ctl &= ~V_IRQ_MASK;
- break;
- }
-
- /* Ensure that no TB jump will be modified as the program flow was changed. */
- return true;
-}
-
-/* check if Port I/O is allowed in TSS */
-void helper_check_io(CPUX86State *env, uint32_t addr, uint32_t size)
-{
- uintptr_t retaddr = GETPC();
- uint32_t io_offset, val, mask;
-
- /* TSS must be a valid 32 bit one */
- if (!(env->tr.flags & DESC_P_MASK) ||
- ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
- env->tr.limit < 103) {
- goto fail;
- }
- io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
- io_offset += (addr >> 3);
- /* Note: the check needs two bytes */
- if ((io_offset + 1) > env->tr.limit) {
- goto fail;
- }
- val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
- val >>= (addr & 7);
- mask = (1 << size) - 1;
- /* all bits must be zero to allow the I/O */
- if ((val & mask) != 0) {
- fail:
- raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
- }
-}
diff --git a/target/i386/tcg/sysemu/smm_helper.c b/target/i386/tcg/sysemu/smm_helper.c
deleted file mode 100644
index a45b565..0000000
--- a/target/i386/tcg/sysemu/smm_helper.c
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * x86 SMM helpers (sysemu-only)
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
-#include "exec/log.h"
-#include "tcg/helper-tcg.h"
-
-
-/* SMM support */
-
-#ifdef TARGET_X86_64
-#define SMM_REVISION_ID 0x00020064
-#else
-#define SMM_REVISION_ID 0x00020000
-#endif
-
-void do_smm_enter(X86CPU *cpu)
-{
- CPUX86State *env = &cpu->env;
- CPUState *cs = CPU(cpu);
- target_ulong sm_state;
- SegmentCache *dt;
- int i, offset;
-
- qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
- log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
-
- env->msr_smi_count++;
- env->hflags |= HF_SMM_MASK;
- if (env->hflags2 & HF2_NMI_MASK) {
- env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
- } else {
- env->hflags2 |= HF2_NMI_MASK;
- }
-
- sm_state = env->smbase + 0x8000;
-
-#ifdef TARGET_X86_64
- for (i = 0; i < 6; i++) {
- dt = &env->segs[i];
- offset = 0x7e00 + i * 16;
- x86_stw_phys(cs, sm_state + offset, dt->selector);
- x86_stw_phys(cs, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
- x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
- x86_stq_phys(cs, sm_state + offset + 8, dt->base);
- }
-
- x86_stq_phys(cs, sm_state + 0x7e68, env->gdt.base);
- x86_stl_phys(cs, sm_state + 0x7e64, env->gdt.limit);
-
- x86_stw_phys(cs, sm_state + 0x7e70, env->ldt.selector);
- x86_stq_phys(cs, sm_state + 0x7e78, env->ldt.base);
- x86_stl_phys(cs, sm_state + 0x7e74, env->ldt.limit);
- x86_stw_phys(cs, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
-
- x86_stq_phys(cs, sm_state + 0x7e88, env->idt.base);
- x86_stl_phys(cs, sm_state + 0x7e84, env->idt.limit);
-
- x86_stw_phys(cs, sm_state + 0x7e90, env->tr.selector);
- x86_stq_phys(cs, sm_state + 0x7e98, env->tr.base);
- x86_stl_phys(cs, sm_state + 0x7e94, env->tr.limit);
- x86_stw_phys(cs, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
-
- /* ??? Vol 1, 16.5.6 Intel MPX and SMM says that IA32_BNDCFGS
- is saved at offset 7ED0. Vol 3, 34.4.1.1, Table 32-2, has
- 7EA0-7ED7 as "reserved". What's this, and what's really
- supposed to happen? */
- x86_stq_phys(cs, sm_state + 0x7ed0, env->efer);
-
- x86_stq_phys(cs, sm_state + 0x7ff8, env->regs[R_EAX]);
- x86_stq_phys(cs, sm_state + 0x7ff0, env->regs[R_ECX]);
- x86_stq_phys(cs, sm_state + 0x7fe8, env->regs[R_EDX]);
- x86_stq_phys(cs, sm_state + 0x7fe0, env->regs[R_EBX]);
- x86_stq_phys(cs, sm_state + 0x7fd8, env->regs[R_ESP]);
- x86_stq_phys(cs, sm_state + 0x7fd0, env->regs[R_EBP]);
- x86_stq_phys(cs, sm_state + 0x7fc8, env->regs[R_ESI]);
- x86_stq_phys(cs, sm_state + 0x7fc0, env->regs[R_EDI]);
- for (i = 8; i < 16; i++) {
- x86_stq_phys(cs, sm_state + 0x7ff8 - i * 8, env->regs[i]);
- }
- x86_stq_phys(cs, sm_state + 0x7f78, env->eip);
- x86_stl_phys(cs, sm_state + 0x7f70, cpu_compute_eflags(env));
- x86_stl_phys(cs, sm_state + 0x7f68, env->dr[6]);
- x86_stl_phys(cs, sm_state + 0x7f60, env->dr[7]);
-
- x86_stl_phys(cs, sm_state + 0x7f48, env->cr[4]);
- x86_stq_phys(cs, sm_state + 0x7f50, env->cr[3]);
- x86_stl_phys(cs, sm_state + 0x7f58, env->cr[0]);
-
- x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
- x86_stl_phys(cs, sm_state + 0x7f00, env->smbase);
-#else
- x86_stl_phys(cs, sm_state + 0x7ffc, env->cr[0]);
- x86_stl_phys(cs, sm_state + 0x7ff8, env->cr[3]);
- x86_stl_phys(cs, sm_state + 0x7ff4, cpu_compute_eflags(env));
- x86_stl_phys(cs, sm_state + 0x7ff0, env->eip);
- x86_stl_phys(cs, sm_state + 0x7fec, env->regs[R_EDI]);
- x86_stl_phys(cs, sm_state + 0x7fe8, env->regs[R_ESI]);
- x86_stl_phys(cs, sm_state + 0x7fe4, env->regs[R_EBP]);
- x86_stl_phys(cs, sm_state + 0x7fe0, env->regs[R_ESP]);
- x86_stl_phys(cs, sm_state + 0x7fdc, env->regs[R_EBX]);
- x86_stl_phys(cs, sm_state + 0x7fd8, env->regs[R_EDX]);
- x86_stl_phys(cs, sm_state + 0x7fd4, env->regs[R_ECX]);
- x86_stl_phys(cs, sm_state + 0x7fd0, env->regs[R_EAX]);
- x86_stl_phys(cs, sm_state + 0x7fcc, env->dr[6]);
- x86_stl_phys(cs, sm_state + 0x7fc8, env->dr[7]);
-
- x86_stl_phys(cs, sm_state + 0x7fc4, env->tr.selector);
- x86_stl_phys(cs, sm_state + 0x7f64, env->tr.base);
- x86_stl_phys(cs, sm_state + 0x7f60, env->tr.limit);
- x86_stl_phys(cs, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
-
- x86_stl_phys(cs, sm_state + 0x7fc0, env->ldt.selector);
- x86_stl_phys(cs, sm_state + 0x7f80, env->ldt.base);
- x86_stl_phys(cs, sm_state + 0x7f7c, env->ldt.limit);
- x86_stl_phys(cs, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
-
- x86_stl_phys(cs, sm_state + 0x7f74, env->gdt.base);
- x86_stl_phys(cs, sm_state + 0x7f70, env->gdt.limit);
-
- x86_stl_phys(cs, sm_state + 0x7f58, env->idt.base);
- x86_stl_phys(cs, sm_state + 0x7f54, env->idt.limit);
-
- for (i = 0; i < 6; i++) {
- dt = &env->segs[i];
- if (i < 3) {
- offset = 0x7f84 + i * 12;
- } else {
- offset = 0x7f2c + (i - 3) * 12;
- }
- x86_stl_phys(cs, sm_state + 0x7fa8 + i * 4, dt->selector);
- x86_stl_phys(cs, sm_state + offset + 8, dt->base);
- x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
- x86_stl_phys(cs, sm_state + offset, (dt->flags >> 8) & 0xf0ff);
- }
- x86_stl_phys(cs, sm_state + 0x7f14, env->cr[4]);
-
- x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
- x86_stl_phys(cs, sm_state + 0x7ef8, env->smbase);
-#endif
- /* init SMM cpu state */
-
-#ifdef TARGET_X86_64
- cpu_load_efer(env, 0);
-#endif
- cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
- DF_MASK));
- env->eip = 0x00008000;
- cpu_x86_update_cr0(env,
- env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
- CR0_PG_MASK));
- cpu_x86_update_cr4(env, 0);
- env->dr[7] = 0x00000400;
-
- cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
- 0xffffffff,
- DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
- DESC_G_MASK | DESC_A_MASK);
- cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff,
- DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
- DESC_G_MASK | DESC_A_MASK);
- cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff,
- DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
- DESC_G_MASK | DESC_A_MASK);
- cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff,
- DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
- DESC_G_MASK | DESC_A_MASK);
- cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff,
- DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
- DESC_G_MASK | DESC_A_MASK);
- cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff,
- DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
- DESC_G_MASK | DESC_A_MASK);
-}
-
-void helper_rsm(CPUX86State *env)
-{
- X86CPU *cpu = env_archcpu(env);
- CPUState *cs = env_cpu(env);
- target_ulong sm_state;
- int i, offset;
- uint32_t val;
-
- sm_state = env->smbase + 0x8000;
-#ifdef TARGET_X86_64
- cpu_load_efer(env, x86_ldq_phys(cs, sm_state + 0x7ed0));
-
- env->gdt.base = x86_ldq_phys(cs, sm_state + 0x7e68);
- env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7e64);
-
- env->ldt.selector = x86_lduw_phys(cs, sm_state + 0x7e70);
- env->ldt.base = x86_ldq_phys(cs, sm_state + 0x7e78);
- env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7e74);
- env->ldt.flags = (x86_lduw_phys(cs, sm_state + 0x7e72) & 0xf0ff) << 8;
-
- env->idt.base = x86_ldq_phys(cs, sm_state + 0x7e88);
- env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7e84);
-
- env->tr.selector = x86_lduw_phys(cs, sm_state + 0x7e90);
- env->tr.base = x86_ldq_phys(cs, sm_state + 0x7e98);
- env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7e94);
- env->tr.flags = (x86_lduw_phys(cs, sm_state + 0x7e92) & 0xf0ff) << 8;
-
- env->regs[R_EAX] = x86_ldq_phys(cs, sm_state + 0x7ff8);
- env->regs[R_ECX] = x86_ldq_phys(cs, sm_state + 0x7ff0);
- env->regs[R_EDX] = x86_ldq_phys(cs, sm_state + 0x7fe8);
- env->regs[R_EBX] = x86_ldq_phys(cs, sm_state + 0x7fe0);
- env->regs[R_ESP] = x86_ldq_phys(cs, sm_state + 0x7fd8);
- env->regs[R_EBP] = x86_ldq_phys(cs, sm_state + 0x7fd0);
- env->regs[R_ESI] = x86_ldq_phys(cs, sm_state + 0x7fc8);
- env->regs[R_EDI] = x86_ldq_phys(cs, sm_state + 0x7fc0);
- for (i = 8; i < 16; i++) {
- env->regs[i] = x86_ldq_phys(cs, sm_state + 0x7ff8 - i * 8);
- }
- env->eip = x86_ldq_phys(cs, sm_state + 0x7f78);
- cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7f70),
- ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
- env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7f68);
- env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7f60);
-
- cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f48));
- cpu_x86_update_cr3(env, x86_ldq_phys(cs, sm_state + 0x7f50));
- cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7f58));
-
- for (i = 0; i < 6; i++) {
- offset = 0x7e00 + i * 16;
- cpu_x86_load_seg_cache(env, i,
- x86_lduw_phys(cs, sm_state + offset),
- x86_ldq_phys(cs, sm_state + offset + 8),
- x86_ldl_phys(cs, sm_state + offset + 4),
- (x86_lduw_phys(cs, sm_state + offset + 2) &
- 0xf0ff) << 8);
- }
-
- val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
- if (val & 0x20000) {
- env->smbase = x86_ldl_phys(cs, sm_state + 0x7f00);
- }
-#else
- cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7ffc));
- cpu_x86_update_cr3(env, x86_ldl_phys(cs, sm_state + 0x7ff8));
- cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7ff4),
- ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
- env->eip = x86_ldl_phys(cs, sm_state + 0x7ff0);
- env->regs[R_EDI] = x86_ldl_phys(cs, sm_state + 0x7fec);
- env->regs[R_ESI] = x86_ldl_phys(cs, sm_state + 0x7fe8);
- env->regs[R_EBP] = x86_ldl_phys(cs, sm_state + 0x7fe4);
- env->regs[R_ESP] = x86_ldl_phys(cs, sm_state + 0x7fe0);
- env->regs[R_EBX] = x86_ldl_phys(cs, sm_state + 0x7fdc);
- env->regs[R_EDX] = x86_ldl_phys(cs, sm_state + 0x7fd8);
- env->regs[R_ECX] = x86_ldl_phys(cs, sm_state + 0x7fd4);
- env->regs[R_EAX] = x86_ldl_phys(cs, sm_state + 0x7fd0);
- env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7fcc);
- env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7fc8);
-
- env->tr.selector = x86_ldl_phys(cs, sm_state + 0x7fc4) & 0xffff;
- env->tr.base = x86_ldl_phys(cs, sm_state + 0x7f64);
- env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7f60);
- env->tr.flags = (x86_ldl_phys(cs, sm_state + 0x7f5c) & 0xf0ff) << 8;
-
- env->ldt.selector = x86_ldl_phys(cs, sm_state + 0x7fc0) & 0xffff;
- env->ldt.base = x86_ldl_phys(cs, sm_state + 0x7f80);
- env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7f7c);
- env->ldt.flags = (x86_ldl_phys(cs, sm_state + 0x7f78) & 0xf0ff) << 8;
-
- env->gdt.base = x86_ldl_phys(cs, sm_state + 0x7f74);
- env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7f70);
-
- env->idt.base = x86_ldl_phys(cs, sm_state + 0x7f58);
- env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7f54);
-
- for (i = 0; i < 6; i++) {
- if (i < 3) {
- offset = 0x7f84 + i * 12;
- } else {
- offset = 0x7f2c + (i - 3) * 12;
- }
- cpu_x86_load_seg_cache(env, i,
- x86_ldl_phys(cs,
- sm_state + 0x7fa8 + i * 4) & 0xffff,
- x86_ldl_phys(cs, sm_state + offset + 8),
- x86_ldl_phys(cs, sm_state + offset + 4),
- (x86_ldl_phys(cs,
- sm_state + offset) & 0xf0ff) << 8);
- }
- cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f14));
-
- val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
- if (val & 0x20000) {
- env->smbase = x86_ldl_phys(cs, sm_state + 0x7ef8);
- }
-#endif
- if ((env->hflags2 & HF2_SMM_INSIDE_NMI_MASK) == 0) {
- env->hflags2 &= ~HF2_NMI_MASK;
- }
- env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
- env->hflags &= ~HF_SMM_MASK;
-
- qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
- log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
-}
diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
deleted file mode 100644
index 9db8ad6..0000000
--- a/target/i386/tcg/sysemu/svm_helper.c
+++ /dev/null
@@ -1,926 +0,0 @@
-/*
- * x86 SVM helpers (sysemu only)
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/log.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
-#include "tcg/helper-tcg.h"
-
-/* Secure Virtual Machine helpers */
-
-static void svm_save_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
- const SegmentCache *sc)
-{
- cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
- sc->selector, mmu_idx, 0);
- cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
- sc->base, mmu_idx, 0);
- cpu_stl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
- sc->limit, mmu_idx, 0);
- cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
- ((sc->flags >> 8) & 0xff)
- | ((sc->flags >> 12) & 0x0f00),
- mmu_idx, 0);
-}
-
-/*
- * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
- * addresses in the segment registers that have been loaded.
- */
-static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
-{
- uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
- *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
-}
-
-static void svm_load_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
- SegmentCache *sc)
-{
- unsigned int flags;
-
- sc->selector =
- cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
- mmu_idx, 0);
- sc->base =
- cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
- mmu_idx, 0);
- sc->limit =
- cpu_ldl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
- mmu_idx, 0);
- flags =
- cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
- mmu_idx, 0);
- sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
-
- svm_canonicalization(env, &sc->base);
-}
-
-static void svm_load_seg_cache(CPUX86State *env, int mmu_idx,
- hwaddr addr, int seg_reg)
-{
- SegmentCache sc;
-
- svm_load_seg(env, mmu_idx, addr, &sc);
- cpu_x86_load_seg_cache(env, seg_reg, sc.selector,
- sc.base, sc.limit, sc.flags);
-}
-
-static inline bool is_efer_invalid_state (CPUX86State *env)
-{
- if (!(env->efer & MSR_EFER_SVME)) {
- return true;
- }
-
- if (env->efer & MSR_EFER_RESERVED) {
- return true;
- }
-
- if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
- !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
- return true;
- }
-
- if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
- && !(env->cr[4] & CR4_PAE_MASK)) {
- return true;
- }
-
- if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
- && !(env->cr[0] & CR0_PE_MASK)) {
- return true;
- }
-
- if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
- && (env->cr[4] & CR4_PAE_MASK)
- && (env->segs[R_CS].flags & DESC_L_MASK)
- && (env->segs[R_CS].flags & DESC_B_MASK)) {
- return true;
- }
-
- return false;
-}
-
-static inline bool virtual_gif_enabled(CPUX86State *env)
-{
- if (likely(env->hflags & HF_GUEST_MASK)) {
- return (env->features[FEAT_SVM] & CPUID_SVM_VGIF)
- && (env->int_ctl & V_GIF_ENABLED_MASK);
- }
- return false;
-}
-
-static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t retaddr)
-{
- uint64_t lbr_ctl;
-
- if (likely(env->hflags & HF_GUEST_MASK)) {
- if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) {
- cpu_vmexit(env, exit_code, 0, retaddr);
- }
-
- lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb,
- control.lbr_ctl));
- return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD)
- && (lbr_ctl & V_VMLOAD_VMSAVE_ENABLED_MASK);
-
- }
-
- return false;
-}
-
-static inline bool virtual_gif_set(CPUX86State *env)
-{
- return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK);
-}
-
-void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
-{
- CPUState *cs = env_cpu(env);
- X86CPU *cpu = env_archcpu(env);
- target_ulong addr;
- uint64_t nested_ctl;
- uint32_t event_inj;
- uint32_t asid;
- uint64_t new_cr0;
- uint64_t new_cr3;
- uint64_t new_cr4;
- uint64_t new_dr6;
- uint64_t new_dr7;
-
- if (aflag == 2) {
- addr = env->regs[R_EAX];
- } else {
- addr = (uint32_t)env->regs[R_EAX];
- }
-
- /* Exceptions are checked before the intercept. */
- if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
- raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
- }
-
- cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
-
- env->vm_vmcb = addr;
-
- /* save the current CPU state in the hsave page */
- x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
- env->gdt.base);
- x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
- env->gdt.limit);
-
- x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
- env->idt.base);
- x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
- env->idt.limit);
-
- x86_stq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
- x86_stq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
- x86_stq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
- x86_stq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
- x86_stq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
- x86_stq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
-
- x86_stq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
- x86_stq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb, save.rflags),
- cpu_compute_eflags(env));
-
- svm_save_seg(env, MMU_PHYS_IDX,
- env->vm_hsave + offsetof(struct vmcb, save.es),
- &env->segs[R_ES]);
- svm_save_seg(env, MMU_PHYS_IDX,
- env->vm_hsave + offsetof(struct vmcb, save.cs),
- &env->segs[R_CS]);
- svm_save_seg(env, MMU_PHYS_IDX,
- env->vm_hsave + offsetof(struct vmcb, save.ss),
- &env->segs[R_SS]);
- svm_save_seg(env, MMU_PHYS_IDX,
- env->vm_hsave + offsetof(struct vmcb, save.ds),
- &env->segs[R_DS]);
-
- x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
- env->eip + next_eip_addend);
- x86_stq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
- x86_stq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
-
- /* load the interception bitmaps so we do not need to access the
- vmcb in svm mode */
- env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- control.intercept));
- env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
- offsetof(struct vmcb,
- control.intercept_cr_read));
- env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
- offsetof(struct vmcb,
- control.intercept_cr_write));
- env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
- offsetof(struct vmcb,
- control.intercept_dr_read));
- env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
- offsetof(struct vmcb,
- control.intercept_dr_write));
- env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
- offsetof(struct vmcb,
- control.intercept_exceptions
- ));
-
- env->hflags &= ~HF_INHIBIT_IRQ_MASK;
- if (x86_ldl_phys(cs, env->vm_vmcb +
- offsetof(struct vmcb, control.int_state)) &
- SVM_INTERRUPT_SHADOW_MASK) {
- env->hflags |= HF_INHIBIT_IRQ_MASK;
- }
-
- nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- control.nested_ctl));
- asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- control.asid));
-
- uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
- offsetof(struct vmcb,
- control.msrpm_base_pa));
- uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
- offsetof(struct vmcb, control.iopm_base_pa));
-
- if ((msrpm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_MSRPM_SIZE) {
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- }
-
- if ((iopm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_IOPM_SIZE) {
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- }
-
- env->nested_pg_mode = 0;
-
- if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) {
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- }
- if (asid == 0) {
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- }
-
- if (nested_ctl & SVM_NPT_ENABLED) {
- env->nested_cr3 = x86_ldq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb,
- control.nested_cr3));
- env->hflags2 |= HF2_NPT_MASK;
-
- env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
-
- tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX);
- }
-
- /* enable intercepts */
- env->hflags |= HF_GUEST_MASK;
-
- env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
- offsetof(struct vmcb, control.tsc_offset));
-
- new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
- if (new_cr0 & SVM_CR0_RESERVED_MASK) {
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- }
- if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- }
- new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3));
- if ((env->efer & MSR_EFER_LMA) &&
- (new_cr3 & ((~0ULL) << cpu->phys_bits))) {
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- }
- new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4));
- if (new_cr4 & cr4_reserved_bits(env)) {
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- }
- /* clear exit_info_2 so we behave like the real hardware */
- x86_stq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
-
- cpu_x86_update_cr0(env, new_cr0);
- cpu_x86_update_cr4(env, new_cr4);
- cpu_x86_update_cr3(env, new_cr3);
- env->cr[2] = x86_ldq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, save.cr2));
- env->int_ctl = x86_ldl_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
- env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
- if (env->int_ctl & V_INTR_MASKING_MASK) {
- env->hflags2 |= HF2_VINTR_MASK;
- if (env->eflags & IF_MASK) {
- env->hflags2 |= HF2_HIF_MASK;
- }
- }
-
- cpu_load_efer(env,
- x86_ldq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, save.efer)));
- env->eflags = 0;
- cpu_load_eflags(env, x86_ldq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb,
- save.rflags)),
- ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
-
- svm_load_seg_cache(env, MMU_PHYS_IDX,
- env->vm_vmcb + offsetof(struct vmcb, save.es), R_ES);
- svm_load_seg_cache(env, MMU_PHYS_IDX,
- env->vm_vmcb + offsetof(struct vmcb, save.cs), R_CS);
- svm_load_seg_cache(env, MMU_PHYS_IDX,
- env->vm_vmcb + offsetof(struct vmcb, save.ss), R_SS);
- svm_load_seg_cache(env, MMU_PHYS_IDX,
- env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS);
- svm_load_seg(env, MMU_PHYS_IDX,
- env->vm_vmcb + offsetof(struct vmcb, save.idtr), &env->idt);
- svm_load_seg(env, MMU_PHYS_IDX,
- env->vm_vmcb + offsetof(struct vmcb, save.gdtr), &env->gdt);
-
- env->eip = x86_ldq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, save.rip));
-
- env->regs[R_ESP] = x86_ldq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, save.rsp));
- env->regs[R_EAX] = x86_ldq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, save.rax));
-
- new_dr7 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr7));
- new_dr6 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr6));
-
-#ifdef TARGET_X86_64
- if (new_dr7 & DR_RESERVED_MASK) {
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- }
- if (new_dr6 & DR_RESERVED_MASK) {
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- }
-#endif
-
- cpu_x86_update_dr7(env, new_dr7);
- env->dr[6] = new_dr6;
-
- if (is_efer_invalid_state(env)) {
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- }
-
- switch (x86_ldub_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
- case TLB_CONTROL_DO_NOTHING:
- break;
- case TLB_CONTROL_FLUSH_ALL_ASID:
- /* FIXME: this is not 100% correct but should work for now */
- tlb_flush(cs);
- break;
- }
-
- env->hflags2 |= HF2_GIF_MASK;
-
- if (ctl_has_irq(env)) {
- cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
- }
-
- if (virtual_gif_set(env)) {
- env->hflags2 |= HF2_VGIF_MASK;
- }
-
- /* maybe we need to inject an event */
- event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- control.event_inj));
- if (event_inj & SVM_EVTINJ_VALID) {
- uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
- uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
- uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
- offsetof(struct vmcb,
- control.event_inj_err));
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
- /* FIXME: need to implement valid_err */
- switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
- case SVM_EVTINJ_TYPE_INTR:
- cs->exception_index = vector;
- env->error_code = event_inj_err;
- env->exception_is_int = 0;
- env->exception_next_eip = -1;
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
- /* XXX: is it always correct? */
- do_interrupt_x86_hardirq(env, vector, 1);
- break;
- case SVM_EVTINJ_TYPE_NMI:
- cs->exception_index = EXCP02_NMI;
- env->error_code = event_inj_err;
- env->exception_is_int = 0;
- env->exception_next_eip = env->eip;
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
- cpu_loop_exit(cs);
- break;
- case SVM_EVTINJ_TYPE_EXEPT:
- if (vector == EXCP02_NMI || vector >= 31) {
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- }
- cs->exception_index = vector;
- env->error_code = event_inj_err;
- env->exception_is_int = 0;
- env->exception_next_eip = -1;
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
- cpu_loop_exit(cs);
- break;
- case SVM_EVTINJ_TYPE_SOFT:
- cs->exception_index = vector;
- env->error_code = event_inj_err;
- env->exception_is_int = 1;
- env->exception_next_eip = env->eip;
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
- cpu_loop_exit(cs);
- break;
- default:
- cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
- break;
- }
- qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
- env->error_code);
- }
-}
-
-void helper_vmmcall(CPUX86State *env)
-{
- cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
- raise_exception(env, EXCP06_ILLOP);
-}
-
-void helper_vmload(CPUX86State *env, int aflag)
-{
- int mmu_idx = MMU_PHYS_IDX;
- target_ulong addr;
-
- if (aflag == 2) {
- addr = env->regs[R_EAX];
- } else {
- addr = (uint32_t)env->regs[R_EAX];
- }
-
- /* Exceptions are checked before the intercept. */
- if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
- raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
- }
-
- cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
-
- if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) {
- mmu_idx = MMU_NESTED_IDX;
- }
-
- svm_load_seg_cache(env, mmu_idx,
- addr + offsetof(struct vmcb, save.fs), R_FS);
- svm_load_seg_cache(env, mmu_idx,
- addr + offsetof(struct vmcb, save.gs), R_GS);
- svm_load_seg(env, mmu_idx,
- addr + offsetof(struct vmcb, save.tr), &env->tr);
- svm_load_seg(env, mmu_idx,
- addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
-
-#ifdef TARGET_X86_64
- env->kernelgsbase =
- cpu_ldq_mmuidx_ra(env,
- addr + offsetof(struct vmcb, save.kernel_gs_base),
- mmu_idx, 0);
- env->lstar =
- cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
- mmu_idx, 0);
- env->cstar =
- cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
- mmu_idx, 0);
- env->fmask =
- cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
- mmu_idx, 0);
- svm_canonicalization(env, &env->kernelgsbase);
-#endif
- env->star =
- cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
- mmu_idx, 0);
- env->sysenter_cs =
- cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
- mmu_idx, 0);
- env->sysenter_esp =
- cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
- mmu_idx, 0);
- env->sysenter_eip =
- cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
- mmu_idx, 0);
-}
-
-void helper_vmsave(CPUX86State *env, int aflag)
-{
- int mmu_idx = MMU_PHYS_IDX;
- target_ulong addr;
-
- if (aflag == 2) {
- addr = env->regs[R_EAX];
- } else {
- addr = (uint32_t)env->regs[R_EAX];
- }
-
- /* Exceptions are checked before the intercept. */
- if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
- raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
- }
-
- cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
-
- if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) {
- mmu_idx = MMU_NESTED_IDX;
- }
-
- svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.fs),
- &env->segs[R_FS]);
- svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.gs),
- &env->segs[R_GS]);
- svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.tr),
- &env->tr);
- svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.ldtr),
- &env->ldt);
-
-#ifdef TARGET_X86_64
- cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.kernel_gs_base),
- env->kernelgsbase, mmu_idx, 0);
- cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
- env->lstar, mmu_idx, 0);
- cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
- env->cstar, mmu_idx, 0);
- cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
- env->fmask, mmu_idx, 0);
-#endif
- cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
- env->star, mmu_idx, 0);
- cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
- env->sysenter_cs, mmu_idx, 0);
- cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
- env->sysenter_esp, mmu_idx, 0);
- cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
- env->sysenter_eip, mmu_idx, 0);
-}
-
-void helper_stgi(CPUX86State *env)
-{
- cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
-
- if (virtual_gif_enabled(env)) {
- env->int_ctl |= V_GIF_MASK;
- env->hflags2 |= HF2_VGIF_MASK;
- } else {
- env->hflags2 |= HF2_GIF_MASK;
- }
-}
-
-void helper_clgi(CPUX86State *env)
-{
- cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
-
- if (virtual_gif_enabled(env)) {
- env->int_ctl &= ~V_GIF_MASK;
- env->hflags2 &= ~HF2_VGIF_MASK;
- } else {
- env->hflags2 &= ~HF2_GIF_MASK;
- }
-}
-
-bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
-{
- switch (type) {
- case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
- if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
- return true;
- }
- break;
- case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
- if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
- return true;
- }
- break;
- case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
- if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
- return true;
- }
- break;
- case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
- if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
- return true;
- }
- break;
- case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
- if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
- return true;
- }
- break;
- default:
- if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
- return true;
- }
- break;
- }
- return false;
-}
-
-void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
- uint64_t param, uintptr_t retaddr)
-{
- CPUState *cs = env_cpu(env);
-
- if (likely(!(env->hflags & HF_GUEST_MASK))) {
- return;
- }
-
- if (!cpu_svm_has_intercept(env, type)) {
- return;
- }
-
- if (type == SVM_EXIT_MSR) {
- /* FIXME: this should be read in at vmrun (faster this way?) */
- uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
- offsetof(struct vmcb,
- control.msrpm_base_pa));
- uint32_t t0, t1;
-
- switch ((uint32_t)env->regs[R_ECX]) {
- case 0 ... 0x1fff:
- t0 = (env->regs[R_ECX] * 2) % 8;
- t1 = (env->regs[R_ECX] * 2) / 8;
- break;
- case 0xc0000000 ... 0xc0001fff:
- t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
- t1 = (t0 / 8);
- t0 %= 8;
- break;
- case 0xc0010000 ... 0xc0011fff:
- t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
- t1 = (t0 / 8);
- t0 %= 8;
- break;
- default:
- cpu_vmexit(env, type, param, retaddr);
- t0 = 0;
- t1 = 0;
- break;
- }
- if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
- cpu_vmexit(env, type, param, retaddr);
- }
- return;
- }
-
- cpu_vmexit(env, type, param, retaddr);
-}
-
-void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
-{
- cpu_svm_check_intercept_param(env, type, 0, GETPC());
-}
-
-void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
- uint32_t next_eip_addend)
-{
- CPUState *cs = env_cpu(env);
-
- if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
- /* FIXME: this should be read in at vmrun (faster this way?) */
- uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
- offsetof(struct vmcb, control.iopm_base_pa));
- uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
-
- if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
- /* next env->eip */
- x86_stq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
- env->eip + next_eip_addend);
- cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
- }
- }
-}
-
-void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
- uintptr_t retaddr)
-{
- CPUState *cs = env_cpu(env);
-
- cpu_restore_state(cs, retaddr);
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
- PRIx64 ", " TARGET_FMT_lx ")!\n",
- exit_code, exit_info_1,
- x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- control.exit_info_2)),
- env->eip);
-
- cs->exception_index = EXCP_VMEXIT;
- x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
- exit_code);
-
- x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- control.exit_info_1), exit_info_1),
-
- /* remove any pending exception */
- env->old_exception = -1;
- cpu_loop_exit(cs);
-}
-
-void do_vmexit(CPUX86State *env)
-{
- CPUState *cs = env_cpu(env);
-
- if (env->hflags & HF_INHIBIT_IRQ_MASK) {
- x86_stl_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, control.int_state),
- SVM_INTERRUPT_SHADOW_MASK);
- env->hflags &= ~HF_INHIBIT_IRQ_MASK;
- } else {
- x86_stl_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
- }
- env->hflags2 &= ~HF2_NPT_MASK;
- tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX);
-
- /* Save the VM state in the vmcb */
- svm_save_seg(env, MMU_PHYS_IDX,
- env->vm_vmcb + offsetof(struct vmcb, save.es),
- &env->segs[R_ES]);
- svm_save_seg(env, MMU_PHYS_IDX,
- env->vm_vmcb + offsetof(struct vmcb, save.cs),
- &env->segs[R_CS]);
- svm_save_seg(env, MMU_PHYS_IDX,
- env->vm_vmcb + offsetof(struct vmcb, save.ss),
- &env->segs[R_SS]);
- svm_save_seg(env, MMU_PHYS_IDX,
- env->vm_vmcb + offsetof(struct vmcb, save.ds),
- &env->segs[R_DS]);
-
- x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
- env->gdt.base);
- x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
- env->gdt.limit);
-
- x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
- env->idt.base);
- x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
- env->idt.limit);
-
- x86_stq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
- x86_stq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
- x86_stq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
- x86_stq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
- x86_stq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
- x86_stl_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl);
-
- x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
- cpu_compute_eflags(env));
- x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
- env->eip);
- x86_stq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
- x86_stq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
- x86_stq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
- x86_stq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
- x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
- env->hflags & HF_CPL_MASK);
-
- /* Reload the host state from vm_hsave */
- env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
- env->hflags &= ~HF_GUEST_MASK;
- env->intercept = 0;
- env->intercept_exceptions = 0;
-
- /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
- cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
- env->int_ctl = 0;
-
- /* Clears the TSC_OFFSET inside the processor. */
- env->tsc_offset = 0;
-
- env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
- save.gdtr.base));
- env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
- save.gdtr.limit));
-
- env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
- save.idtr.base));
- env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
- save.idtr.limit));
-
- cpu_x86_update_cr0(env, x86_ldq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb,
- save.cr0)) |
- CR0_PE_MASK);
- cpu_x86_update_cr4(env, x86_ldq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb,
- save.cr4)));
-
- /*
- * Resets the current ASID register to zero (host ASID; TLB flush).
- *
- * If the host is in PAE mode, the processor reloads the host's PDPEs
- * from the page table indicated the host's CR3. FIXME: If the PDPEs
- * contain illegal state, the processor causes a shutdown (QEMU does
- * not implement PDPTRs).
- */
- cpu_x86_update_cr3(env, x86_ldq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb,
- save.cr3)));
- /* we need to set the efer after the crs so the hidden flags get
- set properly */
- cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
- save.efer)));
-
- /* Completion of the VMRUN instruction clears the host EFLAGS.RF bit. */
- env->eflags = 0;
- cpu_load_eflags(env, x86_ldq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb,
- save.rflags)),
- ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
- RF_MASK | VM_MASK));
-
- svm_load_seg_cache(env, MMU_PHYS_IDX,
- env->vm_hsave + offsetof(struct vmcb, save.es), R_ES);
- svm_load_seg_cache(env, MMU_PHYS_IDX,
- env->vm_hsave + offsetof(struct vmcb, save.cs), R_CS);
- svm_load_seg_cache(env, MMU_PHYS_IDX,
- env->vm_hsave + offsetof(struct vmcb, save.ss), R_SS);
- svm_load_seg_cache(env, MMU_PHYS_IDX,
- env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS);
-
- env->eip = x86_ldq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb, save.rip));
- env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
- offsetof(struct vmcb, save.rsp));
- env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
- offsetof(struct vmcb, save.rax));
-
- env->dr[6] = x86_ldq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb, save.dr6));
-
- /* Disables all breakpoints in the host DR7 register. */
- cpu_x86_update_dr7(env,
- x86_ldq_phys(cs,
- env->vm_hsave + offsetof(struct vmcb, save.dr7)) & ~0xff);
-
- /* other setups */
- x86_stl_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
- x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- control.event_inj)));
- x86_stl_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
- x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- control.event_inj_err)));
- x86_stl_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
-
- env->hflags2 &= ~HF2_GIF_MASK;
- env->hflags2 &= ~HF2_VGIF_MASK;
-
-
- /* FIXME: Checks the reloaded host state for consistency. */
-
- /*
- * EFLAGS.TF causes a #DB trap after the VMRUN completes on the host
- * side (i.e., after the #VMEXIT from the guest). Since we're running
- * in the main loop, call do_interrupt_all directly.
- */
- if ((env->eflags & TF_MASK) != 0) {
- env->dr[6] |= DR6_BS;
- do_interrupt_all(X86_CPU(cs), EXCP01_DB, 0, 0, env->eip, 0);
- }
-}
diff --git a/target/i386/tcg/sysemu/tcg-cpu.c b/target/i386/tcg/sysemu/tcg-cpu.c
deleted file mode 100644
index c223c0f..0000000
--- a/target/i386/tcg/sysemu/tcg-cpu.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * i386 TCG cpu class initialization functions specific to sysemu
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "tcg/helper-tcg.h"
-
-#include "sysemu/sysemu.h"
-#include "qemu/units.h"
-#include "exec/address-spaces.h"
-
-#include "tcg/tcg-cpu.h"
-
-static void tcg_cpu_machine_done(Notifier *n, void *unused)
-{
- X86CPU *cpu = container_of(n, X86CPU, machine_done);
- MemoryRegion *smram =
- (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
-
- if (smram) {
- cpu->smram = g_new(MemoryRegion, 1);
- memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
- smram, 0, 4 * GiB);
- memory_region_set_enabled(cpu->smram, true);
- memory_region_add_subregion_overlap(cpu->cpu_as_root, 0,
- cpu->smram, 1);
- }
-}
-
-bool tcg_cpu_realizefn(CPUState *cs, Error **errp)
-{
- X86CPU *cpu = X86_CPU(cs);
-
- /*
- * The realize order is important, since x86_cpu_realize() checks if
- * nothing else has been set by the user (or by accelerators) in
- * cpu->ucode_rev and cpu->phys_bits, and the memory regions
- * initialized here are needed for the vcpu initialization.
- *
- * realize order:
- * tcg_cpu -> host_cpu -> x86_cpu
- */
- cpu->cpu_as_mem = g_new(MemoryRegion, 1);
- cpu->cpu_as_root = g_new(MemoryRegion, 1);
-
- /* Outer container... */
- memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
- memory_region_set_enabled(cpu->cpu_as_root, true);
-
- /*
- * ... with two regions inside: normal system memory with low
- * priority, and...
- */
- memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
- get_system_memory(), 0, ~0ull);
- memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
- memory_region_set_enabled(cpu->cpu_as_mem, true);
-
- cs->num_ases = 2;
- cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
- cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
-
- /* ... SMRAM with higher priority, linked from /machine/smram. */
- cpu->machine_done.notify = tcg_cpu_machine_done;
- qemu_add_machine_init_done_notifier(&cpu->machine_done);
- return true;
-}
diff --git a/target/i386/tcg/system/bpt_helper.c b/target/i386/tcg/system/bpt_helper.c
new file mode 100644
index 0000000..aebb5ca
--- /dev/null
+++ b/target/i386/tcg/system/bpt_helper.c
@@ -0,0 +1,316 @@
+/*
+ * i386 breakpoint helpers - system code
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/watchpoint.h"
+#include "tcg/helper-tcg.h"
+
+
+static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return (dr7 >> (index * 2)) & 1;
+}
+
+static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return (dr7 >> (index * 2)) & 2;
+
+}
+static inline bool hw_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return hw_global_breakpoint_enabled(dr7, index) ||
+ hw_local_breakpoint_enabled(dr7, index);
+}
+
+static inline int hw_breakpoint_type(unsigned long dr7, int index)
+{
+ return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3;
+}
+
+static inline int hw_breakpoint_len(unsigned long dr7, int index)
+{
+ int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3);
+ return (len == 2) ? 8 : len + 1;
+}
+
+static int hw_breakpoint_insert(CPUX86State *env, int index)
+{
+ CPUState *cs = env_cpu(env);
+ target_ulong dr7 = env->dr[7];
+ target_ulong drN = env->dr[index];
+ int err = 0;
+
+ switch (hw_breakpoint_type(dr7, index)) {
+ case DR7_TYPE_BP_INST:
+ if (hw_breakpoint_enabled(dr7, index)) {
+ err = cpu_breakpoint_insert(cs, drN, BP_CPU,
+ &env->cpu_breakpoint[index]);
+ }
+ break;
+
+ case DR7_TYPE_IO_RW:
+ /* Notice when we should enable calls to bpt_io. */
+ return hw_breakpoint_enabled(env->dr[7], index)
+ ? HF_IOBPT_MASK : 0;
+
+ case DR7_TYPE_DATA_WR:
+ if (hw_breakpoint_enabled(dr7, index)) {
+ err = cpu_watchpoint_insert(cs, drN,
+ hw_breakpoint_len(dr7, index),
+ BP_CPU | BP_MEM_WRITE,
+ &env->cpu_watchpoint[index]);
+ }
+ break;
+
+ case DR7_TYPE_DATA_RW:
+ if (hw_breakpoint_enabled(dr7, index)) {
+ err = cpu_watchpoint_insert(cs, drN,
+ hw_breakpoint_len(dr7, index),
+ BP_CPU | BP_MEM_ACCESS,
+ &env->cpu_watchpoint[index]);
+ }
+ break;
+ }
+ if (err) {
+ env->cpu_breakpoint[index] = NULL;
+ }
+ return 0;
+}
+
+static void hw_breakpoint_remove(CPUX86State *env, int index)
+{
+ CPUState *cs = env_cpu(env);
+
+ switch (hw_breakpoint_type(env->dr[7], index)) {
+ case DR7_TYPE_BP_INST:
+ if (env->cpu_breakpoint[index]) {
+ cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
+ env->cpu_breakpoint[index] = NULL;
+ }
+ break;
+
+ case DR7_TYPE_DATA_WR:
+ case DR7_TYPE_DATA_RW:
+ if (env->cpu_watchpoint[index]) {
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
+ env->cpu_watchpoint[index] = NULL;
+ }
+ break;
+
+ case DR7_TYPE_IO_RW:
+ /* HF_IOBPT_MASK cleared elsewhere. */
+ break;
+ }
+}
+
+void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7)
+{
+ target_ulong old_dr7 = env->dr[7];
+ int iobpt = 0;
+ int i;
+
+ new_dr7 |= DR7_FIXED_1;
+
+ /* If nothing is changing except the global/local enable bits,
+ then we can make the change more efficient. */
+ if (((old_dr7 ^ new_dr7) & ~0xff) == 0) {
+ /* Fold the global and local enable bits together into the
+ global fields, then xor to show which registers have
+ changed collective enable state. */
+ int mod = ((old_dr7 | old_dr7 * 2) ^ (new_dr7 | new_dr7 * 2)) & 0xff;
+
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ if ((mod & (2 << i * 2)) && !hw_breakpoint_enabled(new_dr7, i)) {
+ hw_breakpoint_remove(env, i);
+ }
+ }
+ env->dr[7] = new_dr7;
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ if (mod & (2 << i * 2) && hw_breakpoint_enabled(new_dr7, i)) {
+ iobpt |= hw_breakpoint_insert(env, i);
+ } else if (hw_breakpoint_type(new_dr7, i) == DR7_TYPE_IO_RW
+ && hw_breakpoint_enabled(new_dr7, i)) {
+ iobpt |= HF_IOBPT_MASK;
+ }
+ }
+ } else {
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ hw_breakpoint_remove(env, i);
+ }
+ env->dr[7] = new_dr7;
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ iobpt |= hw_breakpoint_insert(env, i);
+ }
+ }
+
+ env->hflags = (env->hflags & ~HF_IOBPT_MASK) | iobpt;
+}
+
+bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
+{
+ target_ulong dr6;
+ int reg;
+ bool hit_enabled = false;
+
+ dr6 = env->dr[6] & ~0xf;
+ for (reg = 0; reg < DR7_MAX_BP; reg++) {
+ bool bp_match = false;
+ bool wp_match = false;
+
+ switch (hw_breakpoint_type(env->dr[7], reg)) {
+ case DR7_TYPE_BP_INST:
+ if (env->dr[reg] == env->eip) {
+ bp_match = true;
+ }
+ break;
+ case DR7_TYPE_DATA_WR:
+ case DR7_TYPE_DATA_RW:
+ if (env->cpu_watchpoint[reg] &&
+ env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
+ wp_match = true;
+ }
+ break;
+ case DR7_TYPE_IO_RW:
+ break;
+ }
+ if (bp_match || wp_match) {
+ dr6 |= 1 << reg;
+ if (hw_breakpoint_enabled(env->dr[7], reg)) {
+ hit_enabled = true;
+ }
+ }
+ }
+
+ if (hit_enabled || force_dr6_update) {
+ env->dr[6] = dr6;
+ }
+
+ return hit_enabled;
+}
+
+void breakpoint_handler(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ if (cs->watchpoint_hit) {
+ if (cs->watchpoint_hit->flags & BP_CPU) {
+ cs->watchpoint_hit = NULL;
+ if (check_hw_breakpoints(env, false)) {
+ /*
+ * FIXME: #DB should be delayed by one instruction if
+ * INHIBIT_IRQ is set (STI cannot trigger a watchpoint).
+ * The delayed #DB should also fuse with one generated
+ * by ICEBP (aka INT1).
+ */
+ raise_exception(env, EXCP01_DB);
+ } else {
+ cpu_loop_exit_noexc(cs);
+ }
+ }
+ } else {
+ if (cpu_breakpoint_test(cs, env->eip, BP_CPU)) {
+ check_hw_breakpoints(env, true);
+ raise_exception(env, EXCP01_DB);
+ }
+ }
+}
+
+target_ulong helper_get_dr(CPUX86State *env, int reg)
+{
+ if (reg >= 4 && reg < 6) {
+ if (env->cr[4] & CR4_DE_MASK) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ } else {
+ reg += 2;
+ }
+ }
+
+ if (env->dr[7] & DR7_GD) {
+ env->dr[7] &= ~DR7_GD;
+ env->dr[6] |= DR6_BD;
+ raise_exception_ra(env, EXCP01_DB, GETPC());
+ }
+
+ return env->dr[reg];
+}
+
+void helper_set_dr(CPUX86State *env, int reg, target_ulong t0)
+{
+ if (reg >= 4 && reg < 6) {
+ if (env->cr[4] & CR4_DE_MASK) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ } else {
+ reg += 2;
+ }
+ }
+
+ if (env->dr[7] & DR7_GD) {
+ env->dr[7] &= ~DR7_GD;
+ env->dr[6] |= DR6_BD;
+ raise_exception_ra(env, EXCP01_DB, GETPC());
+ }
+
+ if (reg < 4) {
+ if (hw_breakpoint_enabled(env->dr[7], reg)
+ && hw_breakpoint_type(env->dr[7], reg) != DR7_TYPE_IO_RW) {
+ hw_breakpoint_remove(env, reg);
+ env->dr[reg] = t0;
+ hw_breakpoint_insert(env, reg);
+ } else {
+ env->dr[reg] = t0;
+ }
+ } else {
+ if (t0 & DR_RESERVED_MASK) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ if (reg == 6) {
+ env->dr[6] = t0 | DR6_FIXED_1;
+ } else {
+ cpu_x86_update_dr7(env, t0);
+ }
+ }
+}
+
+/* Check if Port I/O is trapped by a breakpoint. */
+void helper_bpt_io(CPUX86State *env, uint32_t port,
+ uint32_t size, target_ulong next_eip)
+{
+ target_ulong dr7 = env->dr[7];
+ int i, hit = 0;
+
+ for (i = 0; i < DR7_MAX_BP; ++i) {
+ if (hw_breakpoint_type(dr7, i) == DR7_TYPE_IO_RW
+ && hw_breakpoint_enabled(dr7, i)) {
+ int bpt_len = hw_breakpoint_len(dr7, i);
+ if (port + size - 1 >= env->dr[i]
+ && port <= env->dr[i] + bpt_len - 1) {
+ hit |= 1 << i;
+ }
+ }
+ }
+
+ if (hit) {
+ env->dr[6] = (env->dr[6] & ~0xf) | hit;
+ env->eip = next_eip;
+ raise_exception(env, EXCP01_DB);
+ }
+}
diff --git a/target/i386/tcg/system/excp_helper.c b/target/i386/tcg/system/excp_helper.c
new file mode 100644
index 0000000..c162621
--- /dev/null
+++ b/target/i386/tcg/system/excp_helper.c
@@ -0,0 +1,662 @@
+/*
+ * x86 exception helpers - system code
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
+#include "exec/cputlb.h"
+#include "exec/page-protection.h"
+#include "exec/target_page.h"
+#include "exec/tlb-flags.h"
+#include "exec/tswap.h"
+#include "tcg/helper-tcg.h"
+
+typedef struct TranslateParams {
+ target_ulong addr;
+ target_ulong cr3;
+ int pg_mode;
+ int mmu_idx;
+ int ptw_idx;
+ MMUAccessType access_type;
+} TranslateParams;
+
+typedef struct TranslateResult {
+ hwaddr paddr;
+ int prot;
+ int page_size;
+} TranslateResult;
+
+typedef enum TranslateFaultStage2 {
+ S2_NONE,
+ S2_GPA,
+ S2_GPT,
+} TranslateFaultStage2;
+
+typedef struct TranslateFault {
+ int exception_index;
+ int error_code;
+ target_ulong cr2;
+ TranslateFaultStage2 stage2;
+} TranslateFault;
+
+typedef struct PTETranslate {
+ CPUX86State *env;
+ TranslateFault *err;
+ int ptw_idx;
+ void *haddr;
+ hwaddr gaddr;
+} PTETranslate;
+
+static bool ptw_translate(PTETranslate *inout, hwaddr addr)
+{
+ int flags;
+
+ inout->gaddr = addr;
+ flags = probe_access_full_mmu(inout->env, addr, 0, MMU_DATA_STORE,
+ inout->ptw_idx, &inout->haddr, NULL);
+
+ if (unlikely(flags & TLB_INVALID_MASK)) {
+ TranslateFault *err = inout->err;
+
+ assert(inout->ptw_idx == MMU_NESTED_IDX);
+ *err = (TranslateFault){
+ .error_code = inout->env->error_code,
+ .cr2 = addr,
+ .stage2 = S2_GPT,
+ };
+ return false;
+ }
+ return true;
+}
+
+static inline uint32_t ptw_ldl(const PTETranslate *in, uint64_t ra)
+{
+ if (likely(in->haddr)) {
+ return ldl_p(in->haddr);
+ }
+ return cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra);
+}
+
+static inline uint64_t ptw_ldq(const PTETranslate *in, uint64_t ra)
+{
+ if (likely(in->haddr)) {
+ return ldq_p(in->haddr);
+ }
+ return cpu_ldq_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra);
+}
+
+/*
+ * Note that we can use a 32-bit cmpxchg for all page table entries,
+ * even 64-bit ones, because PG_PRESENT_MASK, PG_ACCESSED_MASK and
+ * PG_DIRTY_MASK are all in the low 32 bits.
+ */
+static bool ptw_setl_slow(const PTETranslate *in, uint32_t old, uint32_t new)
+{
+ uint32_t cmp;
+
+ CPUState *cpu = env_cpu(in->env);
+ /* We are in cpu_exec, and start_exclusive can't be called directly.*/
+ g_assert(cpu->running);
+ cpu_exec_end(cpu);
+ /* Does x86 really perform a rmw cycle on mmio for ptw? */
+ start_exclusive();
+ cmp = cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, 0);
+ if (cmp == old) {
+ cpu_stl_mmuidx_ra(in->env, in->gaddr, new, in->ptw_idx, 0);
+ }
+ end_exclusive();
+ cpu_exec_start(cpu);
+ return cmp == old;
+}
+
+static inline bool ptw_setl(const PTETranslate *in, uint32_t old, uint32_t set)
+{
+ if (set & ~old) {
+ uint32_t new = old | set;
+ if (likely(in->haddr)) {
+ old = cpu_to_le32(old);
+ new = cpu_to_le32(new);
+ return qatomic_cmpxchg((uint32_t *)in->haddr, old, new) == old;
+ }
+ return ptw_setl_slow(in, old, new);
+ }
+ return true;
+}
+
+static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
+ TranslateResult *out, TranslateFault *err,
+ uint64_t ra)
+{
+ const target_ulong addr = in->addr;
+ const int pg_mode = in->pg_mode;
+ const bool is_user = is_mmu_index_user(in->mmu_idx);
+ const MMUAccessType access_type = in->access_type;
+ uint64_t ptep, pte, rsvd_mask;
+ PTETranslate pte_trans = {
+ .env = env,
+ .err = err,
+ .ptw_idx = in->ptw_idx,
+ };
+ hwaddr pte_addr, paddr;
+ uint32_t pkr;
+ int page_size;
+ int error_code;
+ int prot;
+
+ restart_all:
+ rsvd_mask = ~MAKE_64BIT_MASK(0, env_archcpu(env)->phys_bits);
+ rsvd_mask &= PG_ADDRESS_MASK;
+ if (!(pg_mode & PG_MODE_NXE)) {
+ rsvd_mask |= PG_NX_MASK;
+ }
+
+ if (pg_mode & PG_MODE_PAE) {
+#ifdef TARGET_X86_64
+ if (pg_mode & PG_MODE_LMA) {
+ if (pg_mode & PG_MODE_LA57) {
+ /*
+ * Page table level 5
+ */
+ pte_addr = (in->cr3 & ~0xfff) + (((addr >> 48) & 0x1ff) << 3);
+ if (!ptw_translate(&pte_trans, pte_addr)) {
+ return false;
+ }
+ restart_5:
+ pte = ptw_ldq(&pte_trans, ra);
+ if (!(pte & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pte & (rsvd_mask | PG_PSE_MASK)) {
+ goto do_fault_rsvd;
+ }
+ if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
+ goto restart_5;
+ }
+ ptep = pte ^ PG_NX_MASK;
+ } else {
+ pte = in->cr3;
+ ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
+ }
+
+ /*
+ * Page table level 4
+ */
+ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 39) & 0x1ff) << 3);
+ if (!ptw_translate(&pte_trans, pte_addr)) {
+ return false;
+ }
+ restart_4:
+ pte = ptw_ldq(&pte_trans, ra);
+ if (!(pte & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pte & (rsvd_mask | PG_PSE_MASK)) {
+ goto do_fault_rsvd;
+ }
+ if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
+ goto restart_4;
+ }
+ ptep &= pte ^ PG_NX_MASK;
+
+ /*
+ * Page table level 3
+ */
+ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3);
+ if (!ptw_translate(&pte_trans, pte_addr)) {
+ return false;
+ }
+ restart_3_lma:
+ pte = ptw_ldq(&pte_trans, ra);
+ if (!(pte & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pte & rsvd_mask) {
+ goto do_fault_rsvd;
+ }
+ if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
+ goto restart_3_lma;
+ }
+ ptep &= pte ^ PG_NX_MASK;
+ if (pte & PG_PSE_MASK) {
+ /* 1 GB page */
+ page_size = 1024 * 1024 * 1024;
+ goto do_check_protect;
+ }
+ } else
+#endif
+ {
+ /*
+ * Page table level 3
+ */
+ pte_addr = (in->cr3 & 0xffffffe0ULL) + ((addr >> 27) & 0x18);
+ if (!ptw_translate(&pte_trans, pte_addr)) {
+ return false;
+ }
+ rsvd_mask |= PG_HI_USER_MASK;
+ restart_3_nolma:
+ pte = ptw_ldq(&pte_trans, ra);
+ if (!(pte & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pte & (rsvd_mask | PG_NX_MASK)) {
+ goto do_fault_rsvd;
+ }
+ if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
+ goto restart_3_nolma;
+ }
+ ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
+ }
+
+ /*
+ * Page table level 2
+ */
+ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3);
+ if (!ptw_translate(&pte_trans, pte_addr)) {
+ return false;
+ }
+ restart_2_pae:
+ pte = ptw_ldq(&pte_trans, ra);
+ if (!(pte & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pte & rsvd_mask) {
+ goto do_fault_rsvd;
+ }
+ if (pte & PG_PSE_MASK) {
+ /* 2 MB page */
+ page_size = 2048 * 1024;
+ ptep &= pte ^ PG_NX_MASK;
+ goto do_check_protect;
+ }
+ if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
+ goto restart_2_pae;
+ }
+ ptep &= pte ^ PG_NX_MASK;
+
+ /*
+ * Page table level 1
+ */
+ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3);
+ if (!ptw_translate(&pte_trans, pte_addr)) {
+ return false;
+ }
+ pte = ptw_ldq(&pte_trans, ra);
+ if (!(pte & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pte & rsvd_mask) {
+ goto do_fault_rsvd;
+ }
+ /* combine pde and pte nx, user and rw protections */
+ ptep &= pte ^ PG_NX_MASK;
+ page_size = 4096;
+ } else if (pg_mode & PG_MODE_PG) {
+ /*
+ * Page table level 2
+ */
+ pte_addr = (in->cr3 & 0xfffff000ULL) + ((addr >> 20) & 0xffc);
+ if (!ptw_translate(&pte_trans, pte_addr)) {
+ return false;
+ }
+ restart_2_nopae:
+ pte = ptw_ldl(&pte_trans, ra);
+ if (!(pte & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ ptep = pte | PG_NX_MASK;
+
+ /* if PSE bit is set, then we use a 4MB page */
+ if ((pte & PG_PSE_MASK) && (pg_mode & PG_MODE_PSE)) {
+ page_size = 4096 * 1024;
+ /*
+ * Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
+ * Leave bits 20-13 in place for setting accessed/dirty bits below.
+ */
+ pte = (uint32_t)pte | ((pte & 0x1fe000LL) << (32 - 13));
+ rsvd_mask = 0x200000;
+ goto do_check_protect_pse36;
+ }
+ if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
+ goto restart_2_nopae;
+ }
+
+ /*
+ * Page table level 1
+ */
+ pte_addr = (pte & ~0xfffu) + ((addr >> 10) & 0xffc);
+ if (!ptw_translate(&pte_trans, pte_addr)) {
+ return false;
+ }
+ pte = ptw_ldl(&pte_trans, ra);
+ if (!(pte & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ /* combine pde and pte user and rw protections */
+ ptep &= pte | PG_NX_MASK;
+ page_size = 4096;
+ rsvd_mask = 0;
+ } else {
+ /*
+ * No paging (real mode), let's tentatively resolve the address as 1:1
+ * here, but conditionally still perform an NPT walk on it later.
+ */
+ page_size = 0x40000000;
+ paddr = in->addr;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ goto stage2;
+ }
+
+do_check_protect:
+ rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
+do_check_protect_pse36:
+ if (pte & rsvd_mask) {
+ goto do_fault_rsvd;
+ }
+ ptep ^= PG_NX_MASK;
+
+ /* can the page can be put in the TLB? prot will tell us */
+ if (is_user && !(ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+
+ prot = 0;
+ if (!is_mmu_index_smap(in->mmu_idx) || !(ptep & PG_USER_MASK)) {
+ prot |= PAGE_READ;
+ if ((ptep & PG_RW_MASK) || !(is_user || (pg_mode & PG_MODE_WP))) {
+ prot |= PAGE_WRITE;
+ }
+ }
+ if (!(ptep & PG_NX_MASK) &&
+ (is_user ||
+ !((pg_mode & PG_MODE_SMEP) && (ptep & PG_USER_MASK)))) {
+ prot |= PAGE_EXEC;
+ }
+
+ if (ptep & PG_USER_MASK) {
+ pkr = pg_mode & PG_MODE_PKE ? env->pkru : 0;
+ } else {
+ pkr = pg_mode & PG_MODE_PKS ? env->pkrs : 0;
+ }
+ if (pkr) {
+ uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
+ uint32_t pkr_ad = (pkr >> pk * 2) & 1;
+ uint32_t pkr_wd = (pkr >> pk * 2) & 2;
+ uint32_t pkr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+
+ if (pkr_ad) {
+ pkr_prot &= ~(PAGE_READ | PAGE_WRITE);
+ } else if (pkr_wd && (is_user || (pg_mode & PG_MODE_WP))) {
+ pkr_prot &= ~PAGE_WRITE;
+ }
+ if ((pkr_prot & (1 << access_type)) == 0) {
+ goto do_fault_pk_protect;
+ }
+ prot &= pkr_prot;
+ }
+
+ if ((prot & (1 << access_type)) == 0) {
+ goto do_fault_protect;
+ }
+
+ /* yes, it can! */
+ {
+ uint32_t set = PG_ACCESSED_MASK;
+ if (access_type == MMU_DATA_STORE) {
+ set |= PG_DIRTY_MASK;
+ } else if (!(pte & PG_DIRTY_MASK)) {
+ /*
+ * Only set write access if already dirty...
+ * otherwise wait for dirty access.
+ */
+ prot &= ~PAGE_WRITE;
+ }
+ if (!ptw_setl(&pte_trans, pte, set)) {
+ /*
+ * We can arrive here from any of 3 levels and 2 formats.
+ * The only safe thing is to restart the entire lookup.
+ */
+ goto restart_all;
+ }
+ }
+
+ /* merge offset within page */
+ paddr = (pte & PG_ADDRESS_MASK & ~(page_size - 1)) | (addr & (page_size - 1));
+ stage2:
+
+ /*
+ * Note that NPT is walked (for both paging structures and final guest
+ * addresses) using the address with the A20 bit set.
+ */
+ if (in->ptw_idx == MMU_NESTED_IDX) {
+ CPUTLBEntryFull *full;
+ int flags, nested_page_size;
+
+ flags = probe_access_full_mmu(env, paddr, 0, access_type,
+ MMU_NESTED_IDX, &pte_trans.haddr, &full);
+ if (unlikely(flags & TLB_INVALID_MASK)) {
+ *err = (TranslateFault){
+ .error_code = env->error_code,
+ .cr2 = paddr,
+ .stage2 = S2_GPA,
+ };
+ return false;
+ }
+
+ /* Merge stage1 & stage2 protection bits. */
+ prot &= full->prot;
+
+ /* Re-verify resulting protection. */
+ if ((prot & (1 << access_type)) == 0) {
+ goto do_fault_protect;
+ }
+
+ /* Merge stage1 & stage2 addresses to final physical address. */
+ nested_page_size = 1 << full->lg_page_size;
+ paddr = (full->phys_addr & ~(nested_page_size - 1))
+ | (paddr & (nested_page_size - 1));
+
+ /*
+ * Use the larger of stage1 & stage2 page sizes, so that
+ * invalidation works.
+ */
+ if (nested_page_size > page_size) {
+ page_size = nested_page_size;
+ }
+ }
+
+ out->paddr = paddr & x86_get_a20_mask(env);
+ out->prot = prot;
+ out->page_size = page_size;
+ return true;
+
+ do_fault_rsvd:
+ error_code = PG_ERROR_RSVD_MASK;
+ goto do_fault_cont;
+ do_fault_protect:
+ error_code = PG_ERROR_P_MASK;
+ goto do_fault_cont;
+ do_fault_pk_protect:
+ assert(access_type != MMU_INST_FETCH);
+ error_code = PG_ERROR_PK_MASK | PG_ERROR_P_MASK;
+ goto do_fault_cont;
+ do_fault:
+ error_code = 0;
+ do_fault_cont:
+ if (is_user) {
+ error_code |= PG_ERROR_U_MASK;
+ }
+ switch (access_type) {
+ case MMU_DATA_LOAD:
+ break;
+ case MMU_DATA_STORE:
+ error_code |= PG_ERROR_W_MASK;
+ break;
+ case MMU_INST_FETCH:
+ if (pg_mode & (PG_MODE_NXE | PG_MODE_SMEP)) {
+ error_code |= PG_ERROR_I_D_MASK;
+ }
+ break;
+ }
+ *err = (TranslateFault){
+ .exception_index = EXCP0E_PAGE,
+ .error_code = error_code,
+ .cr2 = addr,
+ };
+ return false;
+}
+
+static G_NORETURN void raise_stage2(CPUX86State *env, TranslateFault *err,
+ uintptr_t retaddr)
+{
+ uint64_t exit_info_1 = err->error_code;
+
+ switch (err->stage2) {
+ case S2_GPT:
+ exit_info_1 |= SVM_NPTEXIT_GPT;
+ break;
+ case S2_GPA:
+ exit_info_1 |= SVM_NPTEXIT_GPA;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ x86_stq_phys(env_cpu(env),
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+ err->cr2);
+ cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, retaddr);
+}
+
+static bool get_physical_address(CPUX86State *env, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ TranslateResult *out, TranslateFault *err,
+ uint64_t ra)
+{
+ TranslateParams in;
+ bool use_stage2 = env->hflags2 & HF2_NPT_MASK;
+
+ in.addr = addr;
+ in.access_type = access_type;
+
+ switch (mmu_idx) {
+ case MMU_PHYS_IDX:
+ break;
+
+ case MMU_NESTED_IDX:
+ if (likely(use_stage2)) {
+ in.cr3 = env->nested_cr3;
+ in.pg_mode = env->nested_pg_mode;
+ in.mmu_idx =
+ env->nested_pg_mode & PG_MODE_LMA ? MMU_USER64_IDX : MMU_USER32_IDX;
+ in.ptw_idx = MMU_PHYS_IDX;
+
+ if (!mmu_translate(env, &in, out, err, ra)) {
+ err->stage2 = S2_GPA;
+ return false;
+ }
+ return true;
+ }
+ break;
+
+ default:
+ if (is_mmu_index_32(mmu_idx)) {
+ addr = (uint32_t)addr;
+ }
+
+ if (likely(env->cr[0] & CR0_PG_MASK || use_stage2)) {
+ in.cr3 = env->cr[3];
+ in.mmu_idx = mmu_idx;
+ in.ptw_idx = use_stage2 ? MMU_NESTED_IDX : MMU_PHYS_IDX;
+ in.pg_mode = get_pg_mode(env);
+
+ if (in.pg_mode & PG_MODE_LMA) {
+ /* test virtual address sign extension */
+ int shift = in.pg_mode & PG_MODE_LA57 ? 56 : 47;
+ int64_t sext = (int64_t)addr >> shift;
+ if (sext != 0 && sext != -1) {
+ *err = (TranslateFault){
+ .exception_index = EXCP0D_GPF,
+ .cr2 = addr,
+ };
+ return false;
+ }
+ }
+ return mmu_translate(env, &in, out, err, ra);
+ }
+ break;
+ }
+
+ /* No translation needed. */
+ out->paddr = addr & x86_get_a20_mask(env);
+ out->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ out->page_size = TARGET_PAGE_SIZE;
+ return true;
+}
+
+bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ CPUX86State *env = cpu_env(cs);
+ TranslateResult out;
+ TranslateFault err;
+
+ if (get_physical_address(env, addr, access_type, mmu_idx, &out, &err,
+ retaddr)) {
+ /*
+ * Even if 4MB pages, we map only one 4KB page in the cache to
+ * avoid filling it too fast.
+ */
+ assert(out.prot & (1 << access_type));
+ tlb_set_page_with_attrs(cs, addr & TARGET_PAGE_MASK,
+ out.paddr & TARGET_PAGE_MASK,
+ cpu_get_mem_attrs(env),
+ out.prot, mmu_idx, out.page_size);
+ return true;
+ }
+
+ if (probe) {
+ /* This will be used if recursing for stage2 translation. */
+ env->error_code = err.error_code;
+ return false;
+ }
+
+ if (err.stage2 != S2_NONE) {
+ raise_stage2(env, &err, retaddr);
+ }
+
+ if (env->intercept_exceptions & (1 << err.exception_index)) {
+ /* cr2 is not modified in case of exceptions */
+ x86_stq_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb, control.exit_info_2),
+ err.cr2);
+ } else {
+ env->cr[2] = err.cr2;
+ }
+ raise_exception_err_ra(env, err.exception_index, err.error_code, retaddr);
+}
+
+G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ handle_unaligned_access(&cpu->env, vaddr, access_type, retaddr);
+}
diff --git a/target/i386/tcg/system/fpu_helper.c b/target/i386/tcg/system/fpu_helper.c
new file mode 100644
index 0000000..0b4fa18
--- /dev/null
+++ b/target/i386/tcg/system/fpu_helper.c
@@ -0,0 +1,63 @@
+/*
+ * x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers (system code)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "hw/irq.h"
+
+static qemu_irq ferr_irq;
+
+void x86_register_ferr_irq(qemu_irq irq)
+{
+ ferr_irq = irq;
+}
+
+void fpu_check_raise_ferr_irq(CPUX86State *env)
+{
+ if (ferr_irq && !(env->hflags2 & HF2_IGNNE_MASK)) {
+ bql_lock();
+ qemu_irq_raise(ferr_irq);
+ bql_unlock();
+ return;
+ }
+}
+
+void cpu_clear_ignne(void)
+{
+ CPUX86State *env = &X86_CPU(first_cpu)->env;
+ env->hflags2 &= ~HF2_IGNNE_MASK;
+}
+
+void cpu_set_ignne(void)
+{
+ CPUX86State *env = &X86_CPU(first_cpu)->env;
+
+ assert(bql_locked());
+
+ env->hflags2 |= HF2_IGNNE_MASK;
+ /*
+ * We get here in response to a write to port F0h. The chipset should
+ * deassert FP_IRQ and FERR# instead should stay signaled until FPSW_SE is
+ * cleared, because FERR# and FP_IRQ are two separate pins on real
+ * hardware. However, we don't model FERR# as a qemu_irq, so we just
+ * do directly what the chipset would do, i.e. deassert FP_IRQ.
+ */
+ qemu_irq_lower(ferr_irq);
+}
diff --git a/target/i386/tcg/sysemu/meson.build b/target/i386/tcg/system/meson.build
index f9ac254..f9ac254 100644
--- a/target/i386/tcg/sysemu/meson.build
+++ b/target/i386/tcg/system/meson.build
diff --git a/target/i386/tcg/system/misc_helper.c b/target/i386/tcg/system/misc_helper.c
new file mode 100644
index 0000000..9c3f5cc
--- /dev/null
+++ b/target/i386/tcg/system/misc_helper.c
@@ -0,0 +1,544 @@
+/*
+ * x86 misc helpers - system code
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "system/address-spaces.h"
+#include "system/memory.h"
+#include "exec/cputlb.h"
+#include "tcg/helper-tcg.h"
+#include "hw/i386/apic.h"
+
+void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
+{
+ address_space_stb(&address_space_io, port, data,
+ cpu_get_mem_attrs(env), NULL);
+}
+
+target_ulong helper_inb(CPUX86State *env, uint32_t port)
+{
+ return address_space_ldub(&address_space_io, port,
+ cpu_get_mem_attrs(env), NULL);
+}
+
+void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
+{
+ address_space_stw(&address_space_io, port, data,
+ cpu_get_mem_attrs(env), NULL);
+}
+
+target_ulong helper_inw(CPUX86State *env, uint32_t port)
+{
+ return address_space_lduw(&address_space_io, port,
+ cpu_get_mem_attrs(env), NULL);
+}
+
+void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
+{
+ address_space_stl(&address_space_io, port, data,
+ cpu_get_mem_attrs(env), NULL);
+}
+
+target_ulong helper_inl(CPUX86State *env, uint32_t port)
+{
+ return address_space_ldl(&address_space_io, port,
+ cpu_get_mem_attrs(env), NULL);
+}
+
+target_ulong helper_read_cr8(CPUX86State *env)
+{
+ if (!(env->hflags2 & HF2_VINTR_MASK)) {
+ return cpu_get_apic_tpr(env_archcpu(env)->apic_state);
+ } else {
+ return env->int_ctl & V_TPR_MASK;
+ }
+}
+
+void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
+{
+ switch (reg) {
+ case 0:
+ /*
+ * If we reach this point, the CR0 write intercept is disabled.
+ * But we could still exit if the hypervisor has requested the selective
+ * intercept for bits other than TS and MP
+ */
+ if (cpu_svm_has_intercept(env, SVM_EXIT_CR0_SEL_WRITE) &&
+ ((env->cr[0] ^ t0) & ~(CR0_TS_MASK | CR0_MP_MASK))) {
+ cpu_vmexit(env, SVM_EXIT_CR0_SEL_WRITE, 0, GETPC());
+ }
+ cpu_x86_update_cr0(env, t0);
+ break;
+ case 3:
+ if ((env->efer & MSR_EFER_LMA) &&
+ (t0 & ((~0ULL) << env_archcpu(env)->phys_bits))) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ if (!(env->efer & MSR_EFER_LMA)) {
+ t0 &= 0xffffffffUL;
+ }
+ cpu_x86_update_cr3(env, t0);
+ break;
+ case 4:
+ if (t0 & cr4_reserved_bits(env)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) &&
+ (env->hflags & HF_CS64_MASK)) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ cpu_x86_update_cr4(env, t0);
+ break;
+ case 8:
+ if (!(env->hflags2 & HF2_VINTR_MASK)) {
+ bql_lock();
+ cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0);
+ bql_unlock();
+ }
+ env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK);
+
+ CPUState *cs = env_cpu(env);
+ if (ctl_has_irq(env)) {
+ cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
+ }
+ break;
+ default:
+ env->cr[reg] = t0;
+ break;
+ }
+}
+
+void helper_wrmsr(CPUX86State *env)
+{
+ uint64_t val;
+ CPUState *cs = env_cpu(env);
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC());
+
+ val = ((uint32_t)env->regs[R_EAX]) |
+ ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
+
+ switch ((uint32_t)env->regs[R_ECX]) {
+ case MSR_IA32_SYSENTER_CS:
+ env->sysenter_cs = val & 0xffff;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ env->sysenter_esp = val;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ env->sysenter_eip = val;
+ break;
+ case MSR_IA32_APICBASE: {
+ int ret;
+
+ if (val & MSR_IA32_APICBASE_RESERVED) {
+ goto error;
+ }
+
+ ret = cpu_set_apic_base(env_archcpu(env)->apic_state, val);
+ if (ret < 0) {
+ goto error;
+ }
+ break;
+ }
+ case MSR_EFER:
+ {
+ uint64_t update_mask;
+
+ update_mask = 0;
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
+ update_mask |= MSR_EFER_SCE;
+ }
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
+ update_mask |= MSR_EFER_LME;
+ }
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
+ update_mask |= MSR_EFER_FFXSR;
+ }
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
+ update_mask |= MSR_EFER_NXE;
+ }
+ if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
+ update_mask |= MSR_EFER_SVME;
+ }
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
+ update_mask |= MSR_EFER_FFXSR;
+ }
+ cpu_load_efer(env, (env->efer & ~update_mask) |
+ (val & update_mask));
+ }
+ break;
+ case MSR_STAR:
+ env->star = val;
+ break;
+ case MSR_PAT:
+ env->pat = val;
+ break;
+ case MSR_IA32_PKRS:
+ if (val & 0xFFFFFFFF00000000ull) {
+ goto error;
+ }
+ env->pkrs = val;
+ tlb_flush(cs);
+ break;
+ case MSR_VM_HSAVE_PA:
+ if (val & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
+ goto error;
+ }
+ env->vm_hsave = val;
+ break;
+#ifdef TARGET_X86_64
+ case MSR_LSTAR:
+ env->lstar = val;
+ break;
+ case MSR_CSTAR:
+ env->cstar = val;
+ break;
+ case MSR_FMASK:
+ env->fmask = val;
+ break;
+ case MSR_FSBASE:
+ env->segs[R_FS].base = val;
+ break;
+ case MSR_GSBASE:
+ env->segs[R_GS].base = val;
+ break;
+ case MSR_KERNELGSBASE:
+ env->kernelgsbase = val;
+ break;
+#endif
+ case MSR_MTRRphysBase(0):
+ case MSR_MTRRphysBase(1):
+ case MSR_MTRRphysBase(2):
+ case MSR_MTRRphysBase(3):
+ case MSR_MTRRphysBase(4):
+ case MSR_MTRRphysBase(5):
+ case MSR_MTRRphysBase(6):
+ case MSR_MTRRphysBase(7):
+ env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+ MSR_MTRRphysBase(0)) / 2].base = val;
+ break;
+ case MSR_MTRRphysMask(0):
+ case MSR_MTRRphysMask(1):
+ case MSR_MTRRphysMask(2):
+ case MSR_MTRRphysMask(3):
+ case MSR_MTRRphysMask(4):
+ case MSR_MTRRphysMask(5):
+ case MSR_MTRRphysMask(6):
+ case MSR_MTRRphysMask(7):
+ env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+ MSR_MTRRphysMask(0)) / 2].mask = val;
+ break;
+ case MSR_MTRRfix64K_00000:
+ env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix64K_00000] = val;
+ break;
+ case MSR_MTRRfix16K_80000:
+ case MSR_MTRRfix16K_A0000:
+ env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix16K_80000 + 1] = val;
+ break;
+ case MSR_MTRRfix4K_C0000:
+ case MSR_MTRRfix4K_C8000:
+ case MSR_MTRRfix4K_D0000:
+ case MSR_MTRRfix4K_D8000:
+ case MSR_MTRRfix4K_E0000:
+ case MSR_MTRRfix4K_E8000:
+ case MSR_MTRRfix4K_F0000:
+ case MSR_MTRRfix4K_F8000:
+ env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix4K_C0000 + 3] = val;
+ break;
+ case MSR_MTRRdefType:
+ env->mtrr_deftype = val;
+ break;
+ case MSR_MCG_STATUS:
+ env->mcg_status = val;
+ break;
+ case MSR_MCG_CTL:
+ if ((env->mcg_cap & MCG_CTL_P)
+ && (val == 0 || val == ~(uint64_t)0)) {
+ env->mcg_ctl = val;
+ }
+ break;
+ case MSR_TSC_AUX:
+ env->tsc_aux = val;
+ break;
+ case MSR_IA32_MISC_ENABLE:
+ env->msr_ia32_misc_enable = val;
+ break;
+ case MSR_IA32_BNDCFGS:
+ /* FIXME: #GP if reserved bits are set. */
+ /* FIXME: Extend highest implemented bit of linear address. */
+ env->msr_bndcfgs = val;
+ cpu_sync_bndcs_hflags(env);
+ break;
+ case MSR_APIC_START ... MSR_APIC_END: {
+ int ret;
+ int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
+
+ bql_lock();
+ ret = apic_msr_write(index, val);
+ bql_unlock();
+ if (ret < 0) {
+ goto error;
+ }
+
+ break;
+ }
+ default:
+ if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
+ && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
+ (4 * env->mcg_cap & 0xff)) {
+ uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
+ if ((offset & 0x3) != 0
+ || (val == 0 || val == ~(uint64_t)0)) {
+ env->mce_banks[offset] = val;
+ }
+ break;
+ }
+ /* XXX: exception? */
+ break;
+ }
+ return;
+error:
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+}
+
+void helper_rdmsr(CPUX86State *env)
+{
+ X86CPU *x86_cpu = env_archcpu(env);
+ uint64_t val;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC());
+
+ switch ((uint32_t)env->regs[R_ECX]) {
+ case MSR_IA32_SYSENTER_CS:
+ val = env->sysenter_cs;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ val = env->sysenter_esp;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ val = env->sysenter_eip;
+ break;
+ case MSR_IA32_APICBASE:
+ val = cpu_get_apic_base(env_archcpu(env)->apic_state);
+ break;
+ case MSR_EFER:
+ val = env->efer;
+ break;
+ case MSR_STAR:
+ val = env->star;
+ break;
+ case MSR_PAT:
+ val = env->pat;
+ break;
+ case MSR_IA32_PKRS:
+ val = env->pkrs;
+ break;
+ case MSR_VM_HSAVE_PA:
+ val = env->vm_hsave;
+ break;
+ case MSR_IA32_PERF_STATUS:
+ /* tsc_increment_by_tick */
+ val = 1000ULL;
+ /* CPU multiplier */
+ val |= (((uint64_t)4ULL) << 40);
+ break;
+#ifdef TARGET_X86_64
+ case MSR_LSTAR:
+ val = env->lstar;
+ break;
+ case MSR_CSTAR:
+ val = env->cstar;
+ break;
+ case MSR_FMASK:
+ val = env->fmask;
+ break;
+ case MSR_FSBASE:
+ val = env->segs[R_FS].base;
+ break;
+ case MSR_GSBASE:
+ val = env->segs[R_GS].base;
+ break;
+ case MSR_KERNELGSBASE:
+ val = env->kernelgsbase;
+ break;
+ case MSR_TSC_AUX:
+ val = env->tsc_aux;
+ break;
+#endif
+ case MSR_SMI_COUNT:
+ val = env->msr_smi_count;
+ break;
+ case MSR_MTRRphysBase(0):
+ case MSR_MTRRphysBase(1):
+ case MSR_MTRRphysBase(2):
+ case MSR_MTRRphysBase(3):
+ case MSR_MTRRphysBase(4):
+ case MSR_MTRRphysBase(5):
+ case MSR_MTRRphysBase(6):
+ case MSR_MTRRphysBase(7):
+ val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+ MSR_MTRRphysBase(0)) / 2].base;
+ break;
+ case MSR_MTRRphysMask(0):
+ case MSR_MTRRphysMask(1):
+ case MSR_MTRRphysMask(2):
+ case MSR_MTRRphysMask(3):
+ case MSR_MTRRphysMask(4):
+ case MSR_MTRRphysMask(5):
+ case MSR_MTRRphysMask(6):
+ case MSR_MTRRphysMask(7):
+ val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+ MSR_MTRRphysMask(0)) / 2].mask;
+ break;
+ case MSR_MTRRfix64K_00000:
+ val = env->mtrr_fixed[0];
+ break;
+ case MSR_MTRRfix16K_80000:
+ case MSR_MTRRfix16K_A0000:
+ val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix16K_80000 + 1];
+ break;
+ case MSR_MTRRfix4K_C0000:
+ case MSR_MTRRfix4K_C8000:
+ case MSR_MTRRfix4K_D0000:
+ case MSR_MTRRfix4K_D8000:
+ case MSR_MTRRfix4K_E0000:
+ case MSR_MTRRfix4K_E8000:
+ case MSR_MTRRfix4K_F0000:
+ case MSR_MTRRfix4K_F8000:
+ val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix4K_C0000 + 3];
+ break;
+ case MSR_MTRRdefType:
+ val = env->mtrr_deftype;
+ break;
+ case MSR_MTRRcap:
+ if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
+ val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
+ MSR_MTRRcap_WC_SUPPORTED;
+ } else {
+ /* XXX: exception? */
+ val = 0;
+ }
+ break;
+ case MSR_MCG_CAP:
+ val = env->mcg_cap;
+ break;
+ case MSR_MCG_CTL:
+ if (env->mcg_cap & MCG_CTL_P) {
+ val = env->mcg_ctl;
+ } else {
+ val = 0;
+ }
+ break;
+ case MSR_MCG_STATUS:
+ val = env->mcg_status;
+ break;
+ case MSR_IA32_MISC_ENABLE:
+ val = env->msr_ia32_misc_enable;
+ break;
+ case MSR_IA32_BNDCFGS:
+ val = env->msr_bndcfgs;
+ break;
+ case MSR_IA32_UCODE_REV:
+ val = x86_cpu->ucode_rev;
+ break;
+ case MSR_CORE_THREAD_COUNT: {
+ val = cpu_x86_get_msr_core_thread_count(x86_cpu);
+ break;
+ }
+ case MSR_APIC_START ... MSR_APIC_END: {
+ int ret;
+ int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
+
+ bql_lock();
+ ret = apic_msr_read(index, &val);
+ bql_unlock();
+ if (ret < 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+
+ break;
+ }
+ default:
+ if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
+ && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
+ (4 * env->mcg_cap & 0xff)) {
+ uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
+ val = env->mce_banks[offset];
+ break;
+ }
+ /* XXX: exception? */
+ val = 0;
+ break;
+ }
+ env->regs[R_EAX] = (uint32_t)(val);
+ env->regs[R_EDX] = (uint32_t)(val >> 32);
+}
+
+void helper_flush_page(CPUX86State *env, target_ulong addr)
+{
+ tlb_flush_page(env_cpu(env), addr);
+}
+
+G_NORETURN void helper_hlt(CPUX86State *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ do_end_instruction(env);
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ cpu_loop_exit(cs);
+}
+
+void helper_monitor(CPUX86State *env, target_ulong ptr)
+{
+ if ((uint32_t)env->regs[R_ECX] != 0) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ /* XXX: store address? */
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC());
+}
+
+G_NORETURN void helper_mwait(CPUX86State *env, int next_eip_addend)
+{
+ CPUState *cs = env_cpu(env);
+
+ if ((uint32_t)env->regs[R_ECX] != 0) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC());
+ env->eip += next_eip_addend;
+
+ /* XXX: not complete but not completely erroneous */
+ if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
+ helper_pause(env);
+ } else {
+ helper_hlt(env);
+ }
+}
diff --git a/target/i386/tcg/system/seg_helper.c b/target/i386/tcg/system/seg_helper.c
new file mode 100644
index 0000000..d4ea890
--- /dev/null
+++ b/target/i386/tcg/system/seg_helper.c
@@ -0,0 +1,253 @@
+/*
+ * x86 segmentation related helpers: (system-only code)
+ * TSS, interrupts, system calls, jumps and call/task gates, descriptors
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "tcg/helper-tcg.h"
+#include "../seg_helper.h"
+
+void helper_syscall(CPUX86State *env, int next_eip_addend)
+{
+ int selector;
+
+ if (!(env->efer & MSR_EFER_SCE)) {
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+ }
+ selector = (env->star >> 32) & 0xffff;
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ int code64;
+
+ env->regs[R_ECX] = env->eip + next_eip_addend;
+ env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
+
+ code64 = env->hflags & HF_CS64_MASK;
+
+ env->eflags &= ~(env->fmask | RF_MASK);
+ cpu_load_eflags(env, env->eflags, 0);
+ cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+ DESC_L_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_W_MASK | DESC_A_MASK);
+ if (code64) {
+ env->eip = env->lstar;
+ } else {
+ env->eip = env->cstar;
+ }
+ } else
+#endif
+ {
+ env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
+
+ env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
+ cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_W_MASK | DESC_A_MASK);
+ env->eip = (uint32_t)env->star;
+ }
+}
+
+void handle_even_inj(CPUX86State *env, int intno, int is_int,
+ int error_code, int is_hw, int rm)
+{
+ CPUState *cs = env_cpu(env);
+ uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj));
+
+ if (!(event_inj & SVM_EVTINJ_VALID)) {
+ int type;
+
+ if (is_int) {
+ type = SVM_EVTINJ_TYPE_SOFT;
+ } else {
+ type = SVM_EVTINJ_TYPE_EXEPT;
+ }
+ event_inj = intno | type | SVM_EVTINJ_VALID;
+ if (!rm && exception_has_error_code(intno)) {
+ event_inj |= SVM_EVTINJ_VALID_ERR;
+ x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj_err),
+ error_code);
+ }
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
+ event_inj);
+ }
+}
+
+void x86_cpu_do_interrupt(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ if (cs->exception_index == EXCP_VMEXIT) {
+ assert(env->old_exception == -1);
+ do_vmexit(env);
+ } else {
+ do_interrupt_all(cpu, cs->exception_index,
+ env->exception_is_int,
+ env->error_code,
+ env->exception_next_eip, 0);
+ /* successfully delivered */
+ env->old_exception = -1;
+ }
+}
+
+bool x86_cpu_exec_halt(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
+ bql_lock();
+ apic_poll_irq(x86_cpu->apic_state);
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
+ bql_unlock();
+ }
+
+ if (!cpu_has_work(cpu)) {
+ return false;
+ }
+
+ /* Complete HLT instruction. */
+ if (env->eflags & TF_MASK) {
+ env->dr[6] |= DR6_BS;
+ do_interrupt_all(x86_cpu, EXCP01_DB, 0, 0, env->eip, 0);
+ }
+ return true;
+}
+
+bool x86_need_replay_interrupt(int interrupt_request)
+{
+ /*
+ * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
+ * "real" interrupt event later. It does not need to be recorded for
+ * replay purposes.
+ */
+ return !(interrupt_request & CPU_INTERRUPT_POLL);
+}
+
+bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ int intno;
+
+ interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
+ if (!interrupt_request) {
+ return false;
+ }
+
+ /* Don't process multiple interrupt requests in a single call.
+ * This is required to make icount-driven execution deterministic.
+ */
+ switch (interrupt_request) {
+ case CPU_INTERRUPT_POLL:
+ cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(cpu->apic_state);
+ break;
+ case CPU_INTERRUPT_SIPI:
+ do_cpu_sipi(cpu);
+ break;
+ case CPU_INTERRUPT_SMI:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
+ cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ do_smm_enter(cpu);
+ break;
+ case CPU_INTERRUPT_NMI:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
+ cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ env->hflags2 |= HF2_NMI_MASK;
+ do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
+ break;
+ case CPU_INTERRUPT_MCE:
+ cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+ do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
+ break;
+ case CPU_INTERRUPT_HARD:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
+ cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_VIRQ);
+ intno = cpu_get_pic_interrupt(env);
+ qemu_log_mask(CPU_LOG_INT,
+ "Servicing hardware INT=0x%02x\n", intno);
+ do_interrupt_x86_hardirq(env, intno, 1);
+ break;
+ case CPU_INTERRUPT_VIRQ:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
+ intno = x86_ldl_phys(cs, env->vm_vmcb
+ + offsetof(struct vmcb, control.int_vector));
+ qemu_log_mask(CPU_LOG_INT,
+ "Servicing virtual hardware INT=0x%02x\n", intno);
+ do_interrupt_x86_hardirq(env, intno, 1);
+ cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ env->int_ctl &= ~V_IRQ_MASK;
+ break;
+ }
+
+ /* Ensure that no TB jump will be modified as the program flow was changed. */
+ return true;
+}
+
+/* check if Port I/O is allowed in TSS */
+void helper_check_io(CPUX86State *env, uint32_t addr, uint32_t size)
+{
+ uintptr_t retaddr = GETPC();
+ uint32_t io_offset, val, mask;
+
+ /* TSS must be a valid 32 bit one */
+ if (!(env->tr.flags & DESC_P_MASK) ||
+ ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
+ env->tr.limit < 103) {
+ goto fail;
+ }
+ io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
+ io_offset += (addr >> 3);
+ /* Note: the check needs two bytes */
+ if ((io_offset + 1) > env->tr.limit) {
+ goto fail;
+ }
+ val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
+ val >>= (addr & 7);
+ mask = (1 << size) - 1;
+ /* all bits must be zero to allow the I/O */
+ if ((val & mask) != 0) {
+ fail:
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
+ }
+}
diff --git a/target/i386/tcg/system/smm_helper.c b/target/i386/tcg/system/smm_helper.c
new file mode 100644
index 0000000..251eb78
--- /dev/null
+++ b/target/i386/tcg/system/smm_helper.c
@@ -0,0 +1,319 @@
+/*
+ * x86 SMM helpers (system-only)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/log.h"
+#include "tcg/helper-tcg.h"
+
+
+/* SMM support */
+
+#ifdef TARGET_X86_64
+#define SMM_REVISION_ID 0x00020064
+#else
+#define SMM_REVISION_ID 0x00020000
+#endif
+
+void do_smm_enter(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ target_ulong sm_state;
+ SegmentCache *dt;
+ int i, offset;
+
+ qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
+ log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
+
+ env->msr_smi_count++;
+ env->hflags |= HF_SMM_MASK;
+ if (env->hflags2 & HF2_NMI_MASK) {
+ env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
+ } else {
+ env->hflags2 |= HF2_NMI_MASK;
+ }
+
+ sm_state = env->smbase + 0x8000;
+
+#ifdef TARGET_X86_64
+ for (i = 0; i < 6; i++) {
+ dt = &env->segs[i];
+ offset = 0x7e00 + i * 16;
+ x86_stw_phys(cs, sm_state + offset, dt->selector);
+ x86_stw_phys(cs, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
+ x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
+ x86_stq_phys(cs, sm_state + offset + 8, dt->base);
+ }
+
+ x86_stq_phys(cs, sm_state + 0x7e68, env->gdt.base);
+ x86_stl_phys(cs, sm_state + 0x7e64, env->gdt.limit);
+
+ x86_stw_phys(cs, sm_state + 0x7e70, env->ldt.selector);
+ x86_stq_phys(cs, sm_state + 0x7e78, env->ldt.base);
+ x86_stl_phys(cs, sm_state + 0x7e74, env->ldt.limit);
+ x86_stw_phys(cs, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
+
+ x86_stq_phys(cs, sm_state + 0x7e88, env->idt.base);
+ x86_stl_phys(cs, sm_state + 0x7e84, env->idt.limit);
+
+ x86_stw_phys(cs, sm_state + 0x7e90, env->tr.selector);
+ x86_stq_phys(cs, sm_state + 0x7e98, env->tr.base);
+ x86_stl_phys(cs, sm_state + 0x7e94, env->tr.limit);
+ x86_stw_phys(cs, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
+
+ /* ??? Vol 1, 16.5.6 Intel MPX and SMM says that IA32_BNDCFGS
+ is saved at offset 7ED0. Vol 3, 34.4.1.1, Table 32-2, has
+ 7EA0-7ED7 as "reserved". What's this, and what's really
+ supposed to happen? */
+ x86_stq_phys(cs, sm_state + 0x7ed0, env->efer);
+
+ x86_stq_phys(cs, sm_state + 0x7ff8, env->regs[R_EAX]);
+ x86_stq_phys(cs, sm_state + 0x7ff0, env->regs[R_ECX]);
+ x86_stq_phys(cs, sm_state + 0x7fe8, env->regs[R_EDX]);
+ x86_stq_phys(cs, sm_state + 0x7fe0, env->regs[R_EBX]);
+ x86_stq_phys(cs, sm_state + 0x7fd8, env->regs[R_ESP]);
+ x86_stq_phys(cs, sm_state + 0x7fd0, env->regs[R_EBP]);
+ x86_stq_phys(cs, sm_state + 0x7fc8, env->regs[R_ESI]);
+ x86_stq_phys(cs, sm_state + 0x7fc0, env->regs[R_EDI]);
+ for (i = 8; i < 16; i++) {
+ x86_stq_phys(cs, sm_state + 0x7ff8 - i * 8, env->regs[i]);
+ }
+ x86_stq_phys(cs, sm_state + 0x7f78, env->eip);
+ x86_stl_phys(cs, sm_state + 0x7f70, cpu_compute_eflags(env));
+ x86_stl_phys(cs, sm_state + 0x7f68, env->dr[6]);
+ x86_stl_phys(cs, sm_state + 0x7f60, env->dr[7]);
+
+ x86_stl_phys(cs, sm_state + 0x7f48, env->cr[4]);
+ x86_stq_phys(cs, sm_state + 0x7f50, env->cr[3]);
+ x86_stl_phys(cs, sm_state + 0x7f58, env->cr[0]);
+
+ x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
+ x86_stl_phys(cs, sm_state + 0x7f00, env->smbase);
+#else
+ x86_stl_phys(cs, sm_state + 0x7ffc, env->cr[0]);
+ x86_stl_phys(cs, sm_state + 0x7ff8, env->cr[3]);
+ x86_stl_phys(cs, sm_state + 0x7ff4, cpu_compute_eflags(env));
+ x86_stl_phys(cs, sm_state + 0x7ff0, env->eip);
+ x86_stl_phys(cs, sm_state + 0x7fec, env->regs[R_EDI]);
+ x86_stl_phys(cs, sm_state + 0x7fe8, env->regs[R_ESI]);
+ x86_stl_phys(cs, sm_state + 0x7fe4, env->regs[R_EBP]);
+ x86_stl_phys(cs, sm_state + 0x7fe0, env->regs[R_ESP]);
+ x86_stl_phys(cs, sm_state + 0x7fdc, env->regs[R_EBX]);
+ x86_stl_phys(cs, sm_state + 0x7fd8, env->regs[R_EDX]);
+ x86_stl_phys(cs, sm_state + 0x7fd4, env->regs[R_ECX]);
+ x86_stl_phys(cs, sm_state + 0x7fd0, env->regs[R_EAX]);
+ x86_stl_phys(cs, sm_state + 0x7fcc, env->dr[6]);
+ x86_stl_phys(cs, sm_state + 0x7fc8, env->dr[7]);
+
+ x86_stl_phys(cs, sm_state + 0x7fc4, env->tr.selector);
+ x86_stl_phys(cs, sm_state + 0x7f64, env->tr.base);
+ x86_stl_phys(cs, sm_state + 0x7f60, env->tr.limit);
+ x86_stl_phys(cs, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
+
+ x86_stl_phys(cs, sm_state + 0x7fc0, env->ldt.selector);
+ x86_stl_phys(cs, sm_state + 0x7f80, env->ldt.base);
+ x86_stl_phys(cs, sm_state + 0x7f7c, env->ldt.limit);
+ x86_stl_phys(cs, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
+
+ x86_stl_phys(cs, sm_state + 0x7f74, env->gdt.base);
+ x86_stl_phys(cs, sm_state + 0x7f70, env->gdt.limit);
+
+ x86_stl_phys(cs, sm_state + 0x7f58, env->idt.base);
+ x86_stl_phys(cs, sm_state + 0x7f54, env->idt.limit);
+
+ for (i = 0; i < 6; i++) {
+ dt = &env->segs[i];
+ if (i < 3) {
+ offset = 0x7f84 + i * 12;
+ } else {
+ offset = 0x7f2c + (i - 3) * 12;
+ }
+ x86_stl_phys(cs, sm_state + 0x7fa8 + i * 4, dt->selector);
+ x86_stl_phys(cs, sm_state + offset + 8, dt->base);
+ x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
+ x86_stl_phys(cs, sm_state + offset, (dt->flags >> 8) & 0xf0ff);
+ }
+ x86_stl_phys(cs, sm_state + 0x7f14, env->cr[4]);
+
+ x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
+ x86_stl_phys(cs, sm_state + 0x7ef8, env->smbase);
+#endif
+ /* init SMM cpu state */
+
+#ifdef TARGET_X86_64
+ cpu_load_efer(env, 0);
+#endif
+ cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
+ DF_MASK));
+ env->eip = 0x00008000;
+ cpu_x86_update_cr0(env,
+ env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
+ CR0_PG_MASK));
+ cpu_x86_update_cr4(env, 0);
+ env->dr[7] = 0x00000400;
+
+ cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
+ 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+}
+
+void helper_rsm(CPUX86State *env)
+{
+ X86CPU *cpu = env_archcpu(env);
+ CPUState *cs = env_cpu(env);
+ target_ulong sm_state;
+ int i, offset;
+ uint32_t val;
+
+ sm_state = env->smbase + 0x8000;
+#ifdef TARGET_X86_64
+ cpu_load_efer(env, x86_ldq_phys(cs, sm_state + 0x7ed0));
+
+ env->gdt.base = x86_ldq_phys(cs, sm_state + 0x7e68);
+ env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7e64);
+
+ env->ldt.selector = x86_lduw_phys(cs, sm_state + 0x7e70);
+ env->ldt.base = x86_ldq_phys(cs, sm_state + 0x7e78);
+ env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7e74);
+ env->ldt.flags = (x86_lduw_phys(cs, sm_state + 0x7e72) & 0xf0ff) << 8;
+
+ env->idt.base = x86_ldq_phys(cs, sm_state + 0x7e88);
+ env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7e84);
+
+ env->tr.selector = x86_lduw_phys(cs, sm_state + 0x7e90);
+ env->tr.base = x86_ldq_phys(cs, sm_state + 0x7e98);
+ env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7e94);
+ env->tr.flags = (x86_lduw_phys(cs, sm_state + 0x7e92) & 0xf0ff) << 8;
+
+ env->regs[R_EAX] = x86_ldq_phys(cs, sm_state + 0x7ff8);
+ env->regs[R_ECX] = x86_ldq_phys(cs, sm_state + 0x7ff0);
+ env->regs[R_EDX] = x86_ldq_phys(cs, sm_state + 0x7fe8);
+ env->regs[R_EBX] = x86_ldq_phys(cs, sm_state + 0x7fe0);
+ env->regs[R_ESP] = x86_ldq_phys(cs, sm_state + 0x7fd8);
+ env->regs[R_EBP] = x86_ldq_phys(cs, sm_state + 0x7fd0);
+ env->regs[R_ESI] = x86_ldq_phys(cs, sm_state + 0x7fc8);
+ env->regs[R_EDI] = x86_ldq_phys(cs, sm_state + 0x7fc0);
+ for (i = 8; i < 16; i++) {
+ env->regs[i] = x86_ldq_phys(cs, sm_state + 0x7ff8 - i * 8);
+ }
+ env->eip = x86_ldq_phys(cs, sm_state + 0x7f78);
+ cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7f70),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+ env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7f68);
+ env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7f60);
+
+ cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f48));
+ cpu_x86_update_cr3(env, x86_ldq_phys(cs, sm_state + 0x7f50));
+ cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7f58));
+
+ for (i = 0; i < 6; i++) {
+ offset = 0x7e00 + i * 16;
+ cpu_x86_load_seg_cache(env, i,
+ x86_lduw_phys(cs, sm_state + offset),
+ x86_ldq_phys(cs, sm_state + offset + 8),
+ x86_ldl_phys(cs, sm_state + offset + 4),
+ (x86_lduw_phys(cs, sm_state + offset + 2) &
+ 0xf0ff) << 8);
+ }
+
+ val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
+ if (val & 0x20000) {
+ env->smbase = x86_ldl_phys(cs, sm_state + 0x7f00);
+ }
+#else
+ cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7ffc));
+ cpu_x86_update_cr3(env, x86_ldl_phys(cs, sm_state + 0x7ff8));
+ cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7ff4),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+ env->eip = x86_ldl_phys(cs, sm_state + 0x7ff0);
+ env->regs[R_EDI] = x86_ldl_phys(cs, sm_state + 0x7fec);
+ env->regs[R_ESI] = x86_ldl_phys(cs, sm_state + 0x7fe8);
+ env->regs[R_EBP] = x86_ldl_phys(cs, sm_state + 0x7fe4);
+ env->regs[R_ESP] = x86_ldl_phys(cs, sm_state + 0x7fe0);
+ env->regs[R_EBX] = x86_ldl_phys(cs, sm_state + 0x7fdc);
+ env->regs[R_EDX] = x86_ldl_phys(cs, sm_state + 0x7fd8);
+ env->regs[R_ECX] = x86_ldl_phys(cs, sm_state + 0x7fd4);
+ env->regs[R_EAX] = x86_ldl_phys(cs, sm_state + 0x7fd0);
+ env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7fcc);
+ env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7fc8);
+
+ env->tr.selector = x86_ldl_phys(cs, sm_state + 0x7fc4) & 0xffff;
+ env->tr.base = x86_ldl_phys(cs, sm_state + 0x7f64);
+ env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7f60);
+ env->tr.flags = (x86_ldl_phys(cs, sm_state + 0x7f5c) & 0xf0ff) << 8;
+
+ env->ldt.selector = x86_ldl_phys(cs, sm_state + 0x7fc0) & 0xffff;
+ env->ldt.base = x86_ldl_phys(cs, sm_state + 0x7f80);
+ env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7f7c);
+ env->ldt.flags = (x86_ldl_phys(cs, sm_state + 0x7f78) & 0xf0ff) << 8;
+
+ env->gdt.base = x86_ldl_phys(cs, sm_state + 0x7f74);
+ env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7f70);
+
+ env->idt.base = x86_ldl_phys(cs, sm_state + 0x7f58);
+ env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7f54);
+
+ for (i = 0; i < 6; i++) {
+ if (i < 3) {
+ offset = 0x7f84 + i * 12;
+ } else {
+ offset = 0x7f2c + (i - 3) * 12;
+ }
+ cpu_x86_load_seg_cache(env, i,
+ x86_ldl_phys(cs,
+ sm_state + 0x7fa8 + i * 4) & 0xffff,
+ x86_ldl_phys(cs, sm_state + offset + 8),
+ x86_ldl_phys(cs, sm_state + offset + 4),
+ (x86_ldl_phys(cs,
+ sm_state + offset) & 0xf0ff) << 8);
+ }
+ cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f14));
+
+ val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
+ if (val & 0x20000) {
+ env->smbase = x86_ldl_phys(cs, sm_state + 0x7ef8);
+ }
+#endif
+ if ((env->hflags2 & HF2_SMM_INSIDE_NMI_MASK) == 0) {
+ env->hflags2 &= ~HF2_NMI_MASK;
+ }
+ env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
+ env->hflags &= ~HF_SMM_MASK;
+
+ qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
+ log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
+}
diff --git a/target/i386/tcg/system/svm_helper.c b/target/i386/tcg/system/svm_helper.c
new file mode 100644
index 0000000..b27049b
--- /dev/null
+++ b/target/i386/tcg/system/svm_helper.c
@@ -0,0 +1,926 @@
+/*
+ * x86 SVM helpers (system only)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/cputlb.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "tcg/helper-tcg.h"
+
+/* Secure Virtual Machine helpers */
+
+static void svm_save_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
+ const SegmentCache *sc)
+{
+ cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
+ sc->selector, mmu_idx, 0);
+ cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
+ sc->base, mmu_idx, 0);
+ cpu_stl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
+ sc->limit, mmu_idx, 0);
+ cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
+ ((sc->flags >> 8) & 0xff)
+ | ((sc->flags >> 12) & 0x0f00),
+ mmu_idx, 0);
+}
+
+/*
+ * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
+ * addresses in the segment registers that have been loaded.
+ */
+static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
+{
+ uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
+ *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
+}
+
+static void svm_load_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
+ SegmentCache *sc)
+{
+ unsigned int flags;
+
+ sc->selector =
+ cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
+ mmu_idx, 0);
+ sc->base =
+ cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
+ mmu_idx, 0);
+ sc->limit =
+ cpu_ldl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
+ mmu_idx, 0);
+ flags =
+ cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
+ mmu_idx, 0);
+ sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
+
+ svm_canonicalization(env, &sc->base);
+}
+
+static void svm_load_seg_cache(CPUX86State *env, int mmu_idx,
+ hwaddr addr, int seg_reg)
+{
+ SegmentCache sc;
+
+ svm_load_seg(env, mmu_idx, addr, &sc);
+ cpu_x86_load_seg_cache(env, seg_reg, sc.selector,
+ sc.base, sc.limit, sc.flags);
+}
+
+static inline bool is_efer_invalid_state (CPUX86State *env)
+{
+ if (!(env->efer & MSR_EFER_SVME)) {
+ return true;
+ }
+
+ if (env->efer & MSR_EFER_RESERVED) {
+ return true;
+ }
+
+ if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
+ !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
+ return true;
+ }
+
+ if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+ && !(env->cr[4] & CR4_PAE_MASK)) {
+ return true;
+ }
+
+ if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+ && !(env->cr[0] & CR0_PE_MASK)) {
+ return true;
+ }
+
+ if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+ && (env->cr[4] & CR4_PAE_MASK)
+ && (env->segs[R_CS].flags & DESC_L_MASK)
+ && (env->segs[R_CS].flags & DESC_B_MASK)) {
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool virtual_gif_enabled(CPUX86State *env)
+{
+ if (likely(env->hflags & HF_GUEST_MASK)) {
+ return (env->features[FEAT_SVM] & CPUID_SVM_VGIF)
+ && (env->int_ctl & V_GIF_ENABLED_MASK);
+ }
+ return false;
+}
+
+static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t retaddr)
+{
+ uint64_t lbr_ctl;
+
+ if (likely(env->hflags & HF_GUEST_MASK)) {
+ if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) {
+ cpu_vmexit(env, exit_code, 0, retaddr);
+ }
+
+ lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb,
+ control.lbr_ctl));
+ return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD)
+ && (lbr_ctl & V_VMLOAD_VMSAVE_ENABLED_MASK);
+
+ }
+
+ return false;
+}
+
+static inline bool virtual_gif_set(CPUX86State *env)
+{
+ return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK);
+}
+
+void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
+{
+ CPUState *cs = env_cpu(env);
+ X86CPU *cpu = env_archcpu(env);
+ target_ulong addr;
+ uint64_t nested_ctl;
+ uint32_t event_inj;
+ uint32_t asid;
+ uint64_t new_cr0;
+ uint64_t new_cr3;
+ uint64_t new_cr4;
+ uint64_t new_dr6;
+ uint64_t new_dr7;
+
+ if (aflag == 2) {
+ addr = env->regs[R_EAX];
+ } else {
+ addr = (uint32_t)env->regs[R_EAX];
+ }
+
+ /* Exceptions are checked before the intercept. */
+ if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
+
+ env->vm_vmcb = addr;
+
+ /* save the current CPU state in the hsave page */
+ x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
+ env->gdt.base);
+ x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
+ env->gdt.limit);
+
+ x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
+ env->idt.base);
+ x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
+ env->idt.limit);
+
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
+
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.rflags),
+ cpu_compute_eflags(env));
+
+ svm_save_seg(env, MMU_PHYS_IDX,
+ env->vm_hsave + offsetof(struct vmcb, save.es),
+ &env->segs[R_ES]);
+ svm_save_seg(env, MMU_PHYS_IDX,
+ env->vm_hsave + offsetof(struct vmcb, save.cs),
+ &env->segs[R_CS]);
+ svm_save_seg(env, MMU_PHYS_IDX,
+ env->vm_hsave + offsetof(struct vmcb, save.ss),
+ &env->segs[R_SS]);
+ svm_save_seg(env, MMU_PHYS_IDX,
+ env->vm_hsave + offsetof(struct vmcb, save.ds),
+ &env->segs[R_DS]);
+
+ x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
+ env->eip + next_eip_addend);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
+
+ /* load the interception bitmaps so we do not need to access the
+ vmcb in svm mode */
+ env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.intercept));
+ env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_cr_read));
+ env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_cr_write));
+ env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_dr_read));
+ env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_dr_write));
+ env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_exceptions
+ ));
+
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK;
+ if (x86_ldl_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb, control.int_state)) &
+ SVM_INTERRUPT_SHADOW_MASK) {
+ env->hflags |= HF_INHIBIT_IRQ_MASK;
+ }
+
+ nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.nested_ctl));
+ asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.asid));
+
+ uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.msrpm_base_pa));
+ uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb, control.iopm_base_pa));
+
+ if ((msrpm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_MSRPM_SIZE) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+
+ if ((iopm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_IOPM_SIZE) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+
+ env->nested_pg_mode = 0;
+
+ if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ if (asid == 0) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+
+ if (nested_ctl & SVM_NPT_ENABLED) {
+ env->nested_cr3 = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb,
+ control.nested_cr3));
+ env->hflags2 |= HF2_NPT_MASK;
+
+ env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
+
+ tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX);
+ }
+
+ /* enable intercepts */
+ env->hflags |= HF_GUEST_MASK;
+
+ env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb, control.tsc_offset));
+
+ new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
+ if (new_cr0 & SVM_CR0_RESERVED_MASK) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3));
+ if ((env->efer & MSR_EFER_LMA) &&
+ (new_cr3 & ((~0ULL) << cpu->phys_bits))) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4));
+ if (new_cr4 & cr4_reserved_bits(env)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ /* clear exit_info_2 so we behave like the real hardware */
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
+
+ cpu_x86_update_cr0(env, new_cr0);
+ cpu_x86_update_cr4(env, new_cr4);
+ cpu_x86_update_cr3(env, new_cr3);
+ env->cr[2] = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr2));
+ env->int_ctl = x86_ldl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
+ env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
+ if (env->int_ctl & V_INTR_MASKING_MASK) {
+ env->hflags2 |= HF2_VINTR_MASK;
+ if (env->eflags & IF_MASK) {
+ env->hflags2 |= HF2_HIF_MASK;
+ }
+ }
+
+ cpu_load_efer(env,
+ x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.efer)));
+ env->eflags = 0;
+ cpu_load_eflags(env, x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb,
+ save.rflags)),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+
+ svm_load_seg_cache(env, MMU_PHYS_IDX,
+ env->vm_vmcb + offsetof(struct vmcb, save.es), R_ES);
+ svm_load_seg_cache(env, MMU_PHYS_IDX,
+ env->vm_vmcb + offsetof(struct vmcb, save.cs), R_CS);
+ svm_load_seg_cache(env, MMU_PHYS_IDX,
+ env->vm_vmcb + offsetof(struct vmcb, save.ss), R_SS);
+ svm_load_seg_cache(env, MMU_PHYS_IDX,
+ env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS);
+ svm_load_seg(env, MMU_PHYS_IDX,
+ env->vm_vmcb + offsetof(struct vmcb, save.idtr), &env->idt);
+ svm_load_seg(env, MMU_PHYS_IDX,
+ env->vm_vmcb + offsetof(struct vmcb, save.gdtr), &env->gdt);
+
+ env->eip = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rip));
+
+ env->regs[R_ESP] = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rsp));
+ env->regs[R_EAX] = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rax));
+
+ new_dr7 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr7));
+ new_dr6 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr6));
+
+#ifdef TARGET_X86_64
+ if (new_dr7 & DR_RESERVED_MASK) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ if (new_dr6 & DR_RESERVED_MASK) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+#endif
+
+ cpu_x86_update_dr7(env, new_dr7);
+ env->dr[6] = new_dr6;
+
+ if (is_efer_invalid_state(env)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+
+ switch (x86_ldub_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
+ case TLB_CONTROL_DO_NOTHING:
+ break;
+ case TLB_CONTROL_FLUSH_ALL_ASID:
+ /* FIXME: this is not 100% correct but should work for now */
+ tlb_flush(cs);
+ break;
+ }
+
+ env->hflags2 |= HF2_GIF_MASK;
+
+ if (ctl_has_irq(env)) {
+ cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
+ }
+
+ if (virtual_gif_set(env)) {
+ env->hflags2 |= HF2_VGIF_MASK;
+ }
+
+ /* maybe we need to inject an event */
+ event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj));
+ if (event_inj & SVM_EVTINJ_VALID) {
+ uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
+ uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
+ uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.event_inj_err));
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
+ /* FIXME: need to implement valid_err */
+ switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
+ case SVM_EVTINJ_TYPE_INTR:
+ cs->exception_index = vector;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 0;
+ env->exception_next_eip = -1;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
+ /* XXX: is it always correct? */
+ do_interrupt_x86_hardirq(env, vector, 1);
+ break;
+ case SVM_EVTINJ_TYPE_NMI:
+ cs->exception_index = EXCP02_NMI;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 0;
+ env->exception_next_eip = env->eip;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
+ cpu_loop_exit(cs);
+ break;
+ case SVM_EVTINJ_TYPE_EXEPT:
+ if (vector == EXCP02_NMI || vector >= 31) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ cs->exception_index = vector;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 0;
+ env->exception_next_eip = -1;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
+ cpu_loop_exit(cs);
+ break;
+ case SVM_EVTINJ_TYPE_SOFT:
+ cs->exception_index = vector;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 1;
+ env->exception_next_eip = env->eip;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
+ cpu_loop_exit(cs);
+ break;
+ default:
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ break;
+ }
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
+ env->error_code);
+ }
+}
+
+void helper_vmmcall(CPUX86State *env)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
+ raise_exception(env, EXCP06_ILLOP);
+}
+
+void helper_vmload(CPUX86State *env, int aflag)
+{
+ int mmu_idx = MMU_PHYS_IDX;
+ target_ulong addr;
+
+ if (aflag == 2) {
+ addr = env->regs[R_EAX];
+ } else {
+ addr = (uint32_t)env->regs[R_EAX];
+ }
+
+ /* Exceptions are checked before the intercept. */
+ if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
+
+ if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) {
+ mmu_idx = MMU_NESTED_IDX;
+ }
+
+ svm_load_seg_cache(env, mmu_idx,
+ addr + offsetof(struct vmcb, save.fs), R_FS);
+ svm_load_seg_cache(env, mmu_idx,
+ addr + offsetof(struct vmcb, save.gs), R_GS);
+ svm_load_seg(env, mmu_idx,
+ addr + offsetof(struct vmcb, save.tr), &env->tr);
+ svm_load_seg(env, mmu_idx,
+ addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
+
+#ifdef TARGET_X86_64
+ env->kernelgsbase =
+ cpu_ldq_mmuidx_ra(env,
+ addr + offsetof(struct vmcb, save.kernel_gs_base),
+ mmu_idx, 0);
+ env->lstar =
+ cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
+ mmu_idx, 0);
+ env->cstar =
+ cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
+ mmu_idx, 0);
+ env->fmask =
+ cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
+ mmu_idx, 0);
+ svm_canonicalization(env, &env->kernelgsbase);
+#endif
+ env->star =
+ cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
+ mmu_idx, 0);
+ env->sysenter_cs =
+ cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
+ mmu_idx, 0);
+ env->sysenter_esp =
+ cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
+ mmu_idx, 0);
+ env->sysenter_eip =
+ cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
+ mmu_idx, 0);
+}
+
+void helper_vmsave(CPUX86State *env, int aflag)
+{
+ int mmu_idx = MMU_PHYS_IDX;
+ target_ulong addr;
+
+ if (aflag == 2) {
+ addr = env->regs[R_EAX];
+ } else {
+ addr = (uint32_t)env->regs[R_EAX];
+ }
+
+ /* Exceptions are checked before the intercept. */
+ if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
+
+ if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) {
+ mmu_idx = MMU_NESTED_IDX;
+ }
+
+ svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.fs),
+ &env->segs[R_FS]);
+ svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.gs),
+ &env->segs[R_GS]);
+ svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.tr),
+ &env->tr);
+ svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.ldtr),
+ &env->ldt);
+
+#ifdef TARGET_X86_64
+ cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.kernel_gs_base),
+ env->kernelgsbase, mmu_idx, 0);
+ cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
+ env->lstar, mmu_idx, 0);
+ cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
+ env->cstar, mmu_idx, 0);
+ cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
+ env->fmask, mmu_idx, 0);
+#endif
+ cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
+ env->star, mmu_idx, 0);
+ cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
+ env->sysenter_cs, mmu_idx, 0);
+ cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
+ env->sysenter_esp, mmu_idx, 0);
+ cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
+ env->sysenter_eip, mmu_idx, 0);
+}
+
+void helper_stgi(CPUX86State *env)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
+
+ if (virtual_gif_enabled(env)) {
+ env->int_ctl |= V_GIF_MASK;
+ env->hflags2 |= HF2_VGIF_MASK;
+ } else {
+ env->hflags2 |= HF2_GIF_MASK;
+ }
+}
+
+void helper_clgi(CPUX86State *env)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
+
+ if (virtual_gif_enabled(env)) {
+ env->int_ctl &= ~V_GIF_MASK;
+ env->hflags2 &= ~HF2_VGIF_MASK;
+ } else {
+ env->hflags2 &= ~HF2_GIF_MASK;
+ }
+}
+
+bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
+{
+ switch (type) {
+ case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
+ if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
+ return true;
+ }
+ break;
+ case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
+ if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
+ return true;
+ }
+ break;
+ case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
+ if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
+ return true;
+ }
+ break;
+ case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
+ if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
+ return true;
+ }
+ break;
+ case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
+ if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
+ return true;
+ }
+ break;
+ default:
+ if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
+ return true;
+ }
+ break;
+ }
+ return false;
+}
+
+void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
+ uint64_t param, uintptr_t retaddr)
+{
+ CPUState *cs = env_cpu(env);
+
+ if (likely(!(env->hflags & HF_GUEST_MASK))) {
+ return;
+ }
+
+ if (!cpu_svm_has_intercept(env, type)) {
+ return;
+ }
+
+ if (type == SVM_EXIT_MSR) {
+ /* FIXME: this should be read in at vmrun (faster this way?) */
+ uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.msrpm_base_pa));
+ uint32_t t0, t1;
+
+ switch ((uint32_t)env->regs[R_ECX]) {
+ case 0 ... 0x1fff:
+ t0 = (env->regs[R_ECX] * 2) % 8;
+ t1 = (env->regs[R_ECX] * 2) / 8;
+ break;
+ case 0xc0000000 ... 0xc0001fff:
+ t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
+ t1 = (t0 / 8);
+ t0 %= 8;
+ break;
+ case 0xc0010000 ... 0xc0011fff:
+ t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
+ t1 = (t0 / 8);
+ t0 %= 8;
+ break;
+ default:
+ cpu_vmexit(env, type, param, retaddr);
+ t0 = 0;
+ t1 = 0;
+ break;
+ }
+ if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
+ cpu_vmexit(env, type, param, retaddr);
+ }
+ return;
+ }
+
+ cpu_vmexit(env, type, param, retaddr);
+}
+
+void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
+{
+ cpu_svm_check_intercept_param(env, type, 0, GETPC());
+}
+
+void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
+ uint32_t next_eip_addend)
+{
+ CPUState *cs = env_cpu(env);
+
+ if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
+ /* FIXME: this should be read in at vmrun (faster this way?) */
+ uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb, control.iopm_base_pa));
+ uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
+
+ if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
+ /* next env->eip */
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+ env->eip + next_eip_addend);
+ cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
+ }
+ }
+}
+
+void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
+ uintptr_t retaddr)
+{
+ CPUState *cs = env_cpu(env);
+
+ cpu_restore_state(cs, retaddr);
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
+ PRIx64 ", " TARGET_FMT_lx ")!\n",
+ exit_code, exit_info_1,
+ x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.exit_info_2)),
+ env->eip);
+
+ cs->exception_index = EXCP_VMEXIT;
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
+ exit_code);
+
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.exit_info_1), exit_info_1),
+
+ /* remove any pending exception */
+ env->old_exception = -1;
+ cpu_loop_exit(cs);
+}
+
+void do_vmexit(CPUX86State *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ if (env->hflags & HF_INHIBIT_IRQ_MASK) {
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_state),
+ SVM_INTERRUPT_SHADOW_MASK);
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK;
+ } else {
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
+ }
+ env->hflags2 &= ~HF2_NPT_MASK;
+ tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX);
+
+ /* Save the VM state in the vmcb */
+ svm_save_seg(env, MMU_PHYS_IDX,
+ env->vm_vmcb + offsetof(struct vmcb, save.es),
+ &env->segs[R_ES]);
+ svm_save_seg(env, MMU_PHYS_IDX,
+ env->vm_vmcb + offsetof(struct vmcb, save.cs),
+ &env->segs[R_CS]);
+ svm_save_seg(env, MMU_PHYS_IDX,
+ env->vm_vmcb + offsetof(struct vmcb, save.ss),
+ &env->segs[R_SS]);
+ svm_save_seg(env, MMU_PHYS_IDX,
+ env->vm_vmcb + offsetof(struct vmcb, save.ds),
+ &env->segs[R_DS]);
+
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
+ env->gdt.base);
+ x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
+ env->gdt.limit);
+
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
+ env->idt.base);
+ x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
+ env->idt.limit);
+
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl);
+
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
+ cpu_compute_eflags(env));
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
+ env->eip);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
+ x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
+ env->hflags & HF_CPL_MASK);
+
+ /* Reload the host state from vm_hsave */
+ env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
+ env->hflags &= ~HF_GUEST_MASK;
+ env->intercept = 0;
+ env->intercept_exceptions = 0;
+
+ /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
+ cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ env->int_ctl = 0;
+
+ /* Clears the TSC_OFFSET inside the processor. */
+ env->tsc_offset = 0;
+
+ env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.gdtr.base));
+ env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.gdtr.limit));
+
+ env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.idtr.base));
+ env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.idtr.limit));
+
+ cpu_x86_update_cr0(env, x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb,
+ save.cr0)) |
+ CR0_PE_MASK);
+ cpu_x86_update_cr4(env, x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb,
+ save.cr4)));
+
+ /*
+ * Resets the current ASID register to zero (host ASID; TLB flush).
+ *
+ * If the host is in PAE mode, the processor reloads the host's PDPEs
+ * from the page table indicated the host's CR3. FIXME: If the PDPEs
+ * contain illegal state, the processor causes a shutdown (QEMU does
+ * not implement PDPTRs).
+ */
+ cpu_x86_update_cr3(env, x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb,
+ save.cr3)));
+ /* we need to set the efer after the crs so the hidden flags get
+ set properly */
+ cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.efer)));
+
+ /* Completion of the VMRUN instruction clears the host EFLAGS.RF bit. */
+ env->eflags = 0;
+ cpu_load_eflags(env, x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb,
+ save.rflags)),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
+ RF_MASK | VM_MASK));
+
+ svm_load_seg_cache(env, MMU_PHYS_IDX,
+ env->vm_hsave + offsetof(struct vmcb, save.es), R_ES);
+ svm_load_seg_cache(env, MMU_PHYS_IDX,
+ env->vm_hsave + offsetof(struct vmcb, save.cs), R_CS);
+ svm_load_seg_cache(env, MMU_PHYS_IDX,
+ env->vm_hsave + offsetof(struct vmcb, save.ss), R_SS);
+ svm_load_seg_cache(env, MMU_PHYS_IDX,
+ env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS);
+
+ env->eip = x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.rip));
+ env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
+ offsetof(struct vmcb, save.rsp));
+ env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
+ offsetof(struct vmcb, save.rax));
+
+ env->dr[6] = x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.dr6));
+
+ /* Disables all breakpoints in the host DR7 register. */
+ cpu_x86_update_dr7(env,
+ x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.dr7)) & ~0xff);
+
+ /* other setups */
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
+ x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj)));
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
+ x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj_err)));
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
+
+ env->hflags2 &= ~HF2_GIF_MASK;
+ env->hflags2 &= ~HF2_VGIF_MASK;
+
+
+ /* FIXME: Checks the reloaded host state for consistency. */
+
+ /*
+ * EFLAGS.TF causes a #DB trap after the VMRUN completes on the host
+ * side (i.e., after the #VMEXIT from the guest). Since we're running
+ * in the main loop, call do_interrupt_all directly.
+ */
+ if ((env->eflags & TF_MASK) != 0) {
+ env->dr[6] |= DR6_BS;
+ do_interrupt_all(X86_CPU(cs), EXCP01_DB, 0, 0, env->eip, 0);
+ }
+}
diff --git a/target/i386/tcg/system/tcg-cpu.c b/target/i386/tcg/system/tcg-cpu.c
new file mode 100644
index 0000000..0538a4f
--- /dev/null
+++ b/target/i386/tcg/system/tcg-cpu.c
@@ -0,0 +1,84 @@
+/*
+ * i386 TCG cpu class initialization functions specific to system emulation
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "tcg/helper-tcg.h"
+
+#include "system/system.h"
+#include "qemu/units.h"
+#include "system/address-spaces.h"
+#include "system/memory.h"
+
+#include "tcg/tcg-cpu.h"
+
+static void tcg_cpu_machine_done(Notifier *n, void *unused)
+{
+ X86CPU *cpu = container_of(n, X86CPU, machine_done);
+ MemoryRegion *smram =
+ (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
+
+ if (smram) {
+ cpu->smram = g_new(MemoryRegion, 1);
+ memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
+ smram, 0, 4 * GiB);
+ memory_region_set_enabled(cpu->smram, true);
+ memory_region_add_subregion_overlap(cpu->cpu_as_root, 0,
+ cpu->smram, 1);
+ }
+}
+
+bool tcg_cpu_realizefn(CPUState *cs, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ /*
+ * The realize order is important, since x86_cpu_realize() checks if
+ * nothing else has been set by the user (or by accelerators) in
+ * cpu->ucode_rev and cpu->phys_bits, and the memory regions
+ * initialized here are needed for the vcpu initialization.
+ *
+ * realize order:
+ * tcg_cpu -> host_cpu -> x86_cpu
+ */
+ cpu->cpu_as_mem = g_new(MemoryRegion, 1);
+ cpu->cpu_as_root = g_new(MemoryRegion, 1);
+
+ /* Outer container... */
+ memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
+ memory_region_set_enabled(cpu->cpu_as_root, true);
+
+ /*
+ * ... with two regions inside: normal system memory with low
+ * priority, and...
+ */
+ memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
+ get_system_memory(), 0, ~0ull);
+ memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
+ memory_region_set_enabled(cpu->cpu_as_mem, true);
+
+ cs->num_ases = 2;
+ cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
+ cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
+
+ /* ... SMRAM with higher priority, linked from /machine/smram. */
+ cpu->machine_done.notify = tcg_cpu_machine_done;
+ qemu_add_machine_init_done_notifier(&cpu->machine_done);
+ return true;
+}
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
index cca19cd..6f5dc06 100644
--- a/target/i386/tcg/tcg-cpu.c
+++ b/target/i386/tcg/tcg-cpu.c
@@ -21,8 +21,10 @@
#include "cpu.h"
#include "helper-tcg.h"
#include "qemu/accel.h"
-#include "hw/core/accel-cpu.h"
-
+#include "accel/accel-cpu-target.h"
+#include "exec/translation-block.h"
+#include "exec/target_page.h"
+#include "accel/tcg/cpu-ops.h"
#include "tcg-cpu.h"
/* Frob eflags into and out of the CPU temporary format. */
@@ -46,6 +48,25 @@ static void x86_cpu_exec_exit(CPUState *cs)
env->eflags = cpu_compute_eflags(env);
}
+static TCGTBCPUState x86_get_tb_cpu_state(CPUState *cs)
+{
+ CPUX86State *env = cpu_env(cs);
+ uint32_t flags, cs_base;
+ vaddr pc;
+
+ flags = env->hflags |
+ (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
+ if (env->hflags & HF_CS64_MASK) {
+ cs_base = 0;
+ pc = env->eip;
+ } else {
+ cs_base = env->segs[R_CS].base;
+ pc = (uint32_t)(cs_base + env->eip);
+ }
+
+ return (TCGTBCPUState){ .pc = pc, .flags = flags, .cs_base = cs_base };
+}
+
static void x86_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -93,6 +114,23 @@ static void x86_restore_state_to_opc(CPUState *cs,
}
}
+int x86_mmu_index_pl(CPUX86State *env, unsigned pl)
+{
+ int mmu_index_32 = (env->hflags & HF_CS64_MASK) ? 0 : 1;
+ int mmu_index_base =
+ pl == 3 ? MMU_USER64_IDX :
+ !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
+ (env->eflags & AC_MASK) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
+
+ return mmu_index_base + mmu_index_32;
+}
+
+static int x86_cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+ CPUX86State *env = cpu_env(cs);
+ return x86_mmu_index_pl(env, env->hflags & HF_CPL_MASK);
+}
+
#ifndef CONFIG_USER_ONLY
static bool x86_debug_check_breakpoint(CPUState *cs)
{
@@ -102,14 +140,36 @@ static bool x86_debug_check_breakpoint(CPUState *cs)
/* RF disables all architectural breakpoints. */
return !(env->eflags & RF_MASK);
}
-#endif
-#include "hw/core/tcg-cpu-ops.h"
+static void x86_cpu_exec_reset(CPUState *cs)
+{
+ CPUArchState *env = cpu_env(cs);
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
+ do_cpu_init(env_archcpu(env));
+ cs->exception_index = EXCP_HALTED;
+}
+
+static vaddr x86_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return cpu_env(cs)->hflags & HF_CS64_MASK ? result : (uint32_t)result;
+}
+#endif
-static const TCGCPUOps x86_tcg_ops = {
+const TCGCPUOps x86_tcg_ops = {
+ .mttcg_supported = true,
+ .precise_smc = true,
+ /*
+ * The x86 has a strong memory model with some store-after-load re-ordering
+ */
+ .guest_default_memory_order = TCG_MO_ALL & ~TCG_MO_ST_LD,
.initialize = tcg_x86_init,
+ .translate_code = x86_translate_code,
+ .get_tb_cpu_state = x86_get_tb_cpu_state,
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
.restore_state_to_opc = x86_restore_state_to_opc,
+ .mmu_index = x86_cpu_mmu_index,
.cpu_exec_enter = x86_cpu_exec_enter,
.cpu_exec_exit = x86_cpu_exec_exit,
#ifdef CONFIG_USER_ONLY
@@ -118,9 +178,11 @@ static const TCGCPUOps x86_tcg_ops = {
.record_sigbus = x86_cpu_record_sigbus,
#else
.tlb_fill = x86_cpu_tlb_fill,
+ .pointer_wrap = x86_pointer_wrap,
.do_interrupt = x86_cpu_do_interrupt,
.cpu_exec_halt = x86_cpu_exec_halt,
.cpu_exec_interrupt = x86_cpu_exec_interrupt,
+ .cpu_exec_reset = x86_cpu_exec_reset,
.do_unaligned_access = x86_cpu_do_unaligned_access,
.debug_excp_handler = breakpoint_handler,
.debug_check_breakpoint = x86_debug_check_breakpoint,
@@ -128,17 +190,6 @@ static const TCGCPUOps x86_tcg_ops = {
#endif /* !CONFIG_USER_ONLY */
};
-static void x86_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
-{
- /* for x86, all cpus use the same set of operations */
- cc->tcg_ops = &x86_tcg_ops;
-}
-
-static void x86_tcg_cpu_class_init(CPUClass *cc)
-{
- cc->init_accel_cpu = x86_tcg_cpu_init_ops;
-}
-
static void x86_tcg_cpu_xsave_init(void)
{
#define XO(bit, field) \
@@ -179,7 +230,7 @@ static void x86_tcg_cpu_instance_init(CPUState *cs)
x86_tcg_cpu_xsave_init();
}
-static void x86_tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
+static void x86_tcg_cpu_accel_class_init(ObjectClass *oc, const void *data)
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
@@ -187,7 +238,6 @@ static void x86_tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
acc->cpu_target_realize = tcg_cpu_realizefn;
#endif /* CONFIG_USER_ONLY */
- acc->cpu_class_init = x86_tcg_cpu_class_init;
acc->cpu_instance_init = x86_tcg_cpu_instance_init;
}
static const TypeInfo x86_tcg_cpu_accel_type_info = {
diff --git a/target/i386/tcg/tcg-cpu.h b/target/i386/tcg/tcg-cpu.h
index 53a8494..85bcd61 100644
--- a/target/i386/tcg/tcg-cpu.h
+++ b/target/i386/tcg/tcg-cpu.h
@@ -19,6 +19,8 @@
#ifndef TCG_CPU_H
#define TCG_CPU_H
+#include "cpu.h"
+
#define XSAVE_FCW_FSW_OFFSET 0x000
#define XSAVE_FTW_FOP_OFFSET 0x004
#define XSAVE_CWD_RIP_OFFSET 0x008
@@ -76,6 +78,10 @@ QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != XSAVE_ZMM_HI256_OFF
QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != XSAVE_HI16_ZMM_OFFSET);
QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != XSAVE_PKRU_OFFSET);
+extern const TCGCPUOps x86_tcg_ops;
+
bool tcg_cpu_realizefn(CPUState *cs, Error **errp);
+int x86_mmu_index_pl(CPUX86State *env, unsigned pl);
+
#endif /* TCG_CPU_H */
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index 95bad55..0cb87d0 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -20,15 +20,18 @@
#include "qemu/host-utils.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-mmu-index.h"
+#include "exec/translation-block.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "exec/translator.h"
+#include "exec/target_page.h"
#include "fpu/softfloat.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
#include "helper-tcg.h"
+#include "decode-new.h"
#include "exec/log.h"
@@ -111,7 +114,6 @@ typedef struct DisasContext {
#endif
bool vex_w; /* used by AVX even on 32-bit processors */
bool jmp_opt; /* use direct block chaining for direct jumps */
- bool repz_opt; /* optimize jumps within repz instructions */
bool cc_op_dirty;
CCOp cc_op; /* current CC operation */
@@ -133,10 +135,7 @@ typedef struct DisasContext {
TCGv T1;
/* TCG local register indexes (only used inside old micro ops) */
- TCGv tmp0;
- TCGv tmp4;
TCGv_i32 tmp2_i32;
- TCGv_i32 tmp3_i32;
TCGv_i64 tmp1_i64;
sigjmp_buf jmpbuf;
@@ -227,7 +226,7 @@ typedef struct DisasContext {
#endif
/*
- * Many sysemu-only helpers are not reachable for user-only.
+ * Many system-only helpers are not reachable for user-only.
* Define stub generators here, so that we need not either sprinkle
* ifdefs through the translator, nor provide the helper function.
*/
@@ -290,7 +289,7 @@ enum {
};
/* Bit set if the global variable is live after setting CC_OP to X. */
-static const uint8_t cc_op_live[CC_OP_NB] = {
+static const uint8_t cc_op_live_[] = {
[CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
[CC_OP_EFLAGS] = USES_CC_SRC,
[CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
@@ -304,13 +303,28 @@ static const uint8_t cc_op_live[CC_OP_NB] = {
[CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
[CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
[CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_BLSIB ... CC_OP_BLSIQ] = USES_CC_DST | USES_CC_SRC,
[CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
[CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
[CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
- [CC_OP_CLR] = 0,
[CC_OP_POPCNT] = USES_CC_DST,
};
+static uint8_t cc_op_live(CCOp op)
+{
+ uint8_t result;
+ assert(op >= 0 && op < ARRAY_SIZE(cc_op_live_));
+
+ /*
+ * Check that the array is fully populated. A zero entry would correspond
+ * to a fixed value of EFLAGS, which can be obtained with CC_OP_EFLAGS
+ * as well.
+ */
+ result = cc_op_live_[op];
+ assert(result);
+ return result;
+}
+
static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
{
int dead;
@@ -320,7 +334,7 @@ static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
}
/* Discard CC computation that will no longer be used. */
- dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
+ dead = cc_op_live(s->cc_op) & ~cc_op_live(op);
if (dead & USES_CC_DST) {
tcg_gen_discard_tl(cpu_cc_dst);
}
@@ -469,7 +483,7 @@ static inline
void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
{
if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
- tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
+ tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
} else {
tcg_gen_mov_tl(t0, cpu_regs[reg]);
}
@@ -489,17 +503,24 @@ static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
s->pc_save = -1;
}
-static inline
-void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
+static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
{
- tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
- gen_op_mov_reg_v(s, size, reg, s->tmp0);
+ /* Using cpu_regs[reg] does not work for xH registers. */
+ assert(size >= MO_16);
+ if (size == MO_16) {
+ TCGv temp = tcg_temp_new();
+ tcg_gen_add_tl(temp, cpu_regs[reg], val);
+ gen_op_mov_reg_v(s, size, reg, temp);
+ } else {
+ tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], val);
+ tcg_gen_ext_tl(cpu_regs[reg], cpu_regs[reg], size);
+ }
}
-static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
+static inline
+void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
{
- tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
- gen_op_mov_reg_v(s, size, reg, s->tmp0);
+ gen_op_add_reg(s, size, reg, tcg_constant_tl(val));
}
static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
@@ -672,14 +693,6 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s)
gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1);
}
-static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
-{
- TCGv dshift = tcg_temp_new();
- tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
- tcg_gen_shli_tl(dshift, dshift, ot);
- return dshift;
-};
-
static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
{
if (size == MO_TL) {
@@ -692,11 +705,6 @@ static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
return dst;
}
-static void gen_exts(MemOp ot, TCGv reg)
-{
- gen_ext_tl(reg, reg, ot, true);
-}
-
static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
{
TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
@@ -714,6 +722,46 @@ static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
gen_op_j_ecx(s, TCG_COND_NE, label1);
}
+static void gen_set_hflag(DisasContext *s, uint32_t mask)
+{
+ if ((s->flags & mask) == 0) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
+ tcg_gen_ori_i32(t, t, mask);
+ tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
+ s->flags |= mask;
+ }
+}
+
+static void gen_reset_hflag(DisasContext *s, uint32_t mask)
+{
+ if (s->flags & mask) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
+ tcg_gen_andi_i32(t, t, ~mask);
+ tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
+ s->flags &= ~mask;
+ }
+}
+
+static void gen_set_eflags(DisasContext *s, target_ulong mask)
+{
+ TCGv t = tcg_temp_new();
+
+ tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
+ tcg_gen_ori_tl(t, t, mask);
+ tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
+}
+
+static void gen_reset_eflags(DisasContext *s, target_ulong mask)
+{
+ TCGv t = tcg_temp_new();
+
+ tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
+ tcg_gen_andi_tl(t, t, ~mask);
+ tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
+}
+
static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
{
switch (ot) {
@@ -781,16 +829,13 @@ static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
#endif
}
-static void gen_movs(DisasContext *s, MemOp ot)
+static void gen_movs(DisasContext *s, MemOp ot, TCGv dshift)
{
- TCGv dshift;
-
gen_string_movl_A0_ESI(s);
gen_op_ld_v(s, ot, s->T0, s->A0);
gen_string_movl_A0_EDI(s);
gen_op_st_v(s, ot, s->T0, s->A0);
- dshift = gen_compute_Dshift(s, ot);
gen_op_add_reg(s, s->aflag, R_ESI, dshift);
gen_op_add_reg(s, s->aflag, R_EDI, dshift);
}
@@ -806,17 +851,13 @@ static void gen_mov_eflags(DisasContext *s, TCGv reg)
tcg_gen_mov_tl(reg, cpu_cc_src);
return;
}
- if (s->cc_op == CC_OP_CLR) {
- tcg_gen_movi_tl(reg, CC_Z | CC_P);
- return;
- }
dst = cpu_cc_dst;
src1 = cpu_cc_src;
src2 = cpu_cc_src2;
/* Take care to not read values that are not live. */
- live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
+ live = cc_op_live(s->cc_op) & ~USES_CC_SRCT;
dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
if (dead) {
TCGv zero = tcg_constant_tl(0);
@@ -865,6 +906,18 @@ static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
}
}
+static CCPrepare gen_prepare_val_nz(TCGv src, MemOp size, bool eqz)
+{
+ if (size == MO_TL) {
+ return (CCPrepare) { .cond = eqz ? TCG_COND_EQ : TCG_COND_NE,
+ .reg = src };
+ } else {
+ return (CCPrepare) { .cond = eqz ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
+ .imm = MAKE_64BIT_MASK(0, 8 << size),
+ .reg = src };
+ }
+}
+
/* compute eflags.C, trying to store it in reg if not NULL */
static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
{
@@ -874,21 +927,20 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
case CC_OP_SUBB ... CC_OP_SUBQ:
/* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
size = s->cc_op - CC_OP_SUBB;
- gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
- gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
+ tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
+ tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
.reg2 = cpu_cc_src, .use_reg2 = true };
case CC_OP_ADDB ... CC_OP_ADDQ:
/* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
- size = s->cc_op - CC_OP_ADDB;
- gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size, false);
- gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
+ size = cc_op_size(s->cc_op);
+ tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size);
+ tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
.reg2 = cpu_cc_src, .use_reg2 = true };
case CC_OP_LOGICB ... CC_OP_LOGICQ:
- case CC_OP_CLR:
case CC_OP_POPCNT:
return (CCPrepare) { .cond = TCG_COND_NEVER };
@@ -899,7 +951,7 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
case CC_OP_SHLB ... CC_OP_SHLQ:
/* (CC_SRC >> (DATA_BITS - 1)) & 1 */
- size = s->cc_op - CC_OP_SHLB;
+ size = cc_op_size(s->cc_op);
return gen_prepare_sign_nz(cpu_cc_src, size);
case CC_OP_MULB ... CC_OP_MULQ:
@@ -907,9 +959,12 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
.reg = cpu_cc_src };
case CC_OP_BMILGB ... CC_OP_BMILGQ:
- size = s->cc_op - CC_OP_BMILGB;
- gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
- return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src };
+ size = cc_op_size(s->cc_op);
+ return gen_prepare_val_nz(cpu_cc_src, size, true);
+
+ case CC_OP_BLSIB ... CC_OP_BLSIQ:
+ size = cc_op_size(s->cc_op);
+ return gen_prepare_val_nz(cpu_cc_src, size, false);
case CC_OP_ADCX:
case CC_OP_ADCOX:
@@ -957,14 +1012,10 @@ static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
case CC_OP_ADCOX:
return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
.imm = CC_S };
- case CC_OP_CLR:
case CC_OP_POPCNT:
return (CCPrepare) { .cond = TCG_COND_NEVER };
default:
- {
- MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
- return gen_prepare_sign_nz(cpu_cc_dst, size);
- }
+ return gen_prepare_sign_nz(cpu_cc_dst, cc_op_size(s->cc_op));
}
}
@@ -976,7 +1027,7 @@ static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
case CC_OP_ADCOX:
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
.no_setcond = true };
- case CC_OP_CLR:
+ case CC_OP_LOGICB ... CC_OP_LOGICQ:
case CC_OP_POPCNT:
return (CCPrepare) { .cond = TCG_COND_NEVER };
case CC_OP_MULB ... CC_OP_MULQ:
@@ -992,26 +1043,25 @@ static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
{
switch (s->cc_op) {
- case CC_OP_DYNAMIC:
- gen_compute_eflags(s);
- /* FALLTHRU */
case CC_OP_EFLAGS:
case CC_OP_ADCX:
case CC_OP_ADOX:
case CC_OP_ADCOX:
return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
.imm = CC_Z };
- case CC_OP_CLR:
- return (CCPrepare) { .cond = TCG_COND_ALWAYS };
+ case CC_OP_DYNAMIC:
+ gen_update_cc_op(s);
+ if (!reg) {
+ reg = tcg_temp_new();
+ }
+ gen_helper_cc_compute_nz(reg, cpu_cc_dst, cpu_cc_src, cpu_cc_op);
+ return (CCPrepare) { .cond = TCG_COND_EQ, .reg = reg, .imm = 0 };
+ case CC_OP_POPCNT:
+ return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
default:
{
- MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
- if (size == MO_TL) {
- return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
- } else {
- return (CCPrepare) { .cond = TCG_COND_TSTEQ, .reg = cpu_cc_dst,
- .imm = (1ull << (8 << size)) - 1 };
- }
+ MemOp size = cc_op_size(s->cc_op);
+ return gen_prepare_val_nz(cpu_cc_dst, size, true);
}
}
}
@@ -1031,11 +1081,11 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
switch (s->cc_op) {
case CC_OP_SUBB ... CC_OP_SUBQ:
/* We optimize relational operators for the cmp/jcc case. */
- size = s->cc_op - CC_OP_SUBB;
+ size = cc_op_size(s->cc_op);
switch (jcc_op) {
case JCC_BE:
- gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
- gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
+ tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
+ tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
.reg2 = cpu_cc_src, .use_reg2 = true };
break;
@@ -1045,8 +1095,8 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
case JCC_LE:
cond = TCG_COND_LE;
fast_jcc_l:
- gen_ext_tl(s->cc_srcT, s->cc_srcT, size, true);
- gen_ext_tl(cpu_cc_src, cpu_cc_src, size, true);
+ tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size | MO_SIGN);
+ tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size | MO_SIGN);
cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
.reg2 = cpu_cc_src, .use_reg2 = true };
break;
@@ -1056,6 +1106,28 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
}
break;
+ case CC_OP_LOGICB ... CC_OP_LOGICQ:
+ /* Mostly used for test+jump */
+ size = s->cc_op - CC_OP_LOGICB;
+ switch (jcc_op) {
+ case JCC_BE:
+ /* CF = 0, becomes jz/je */
+ jcc_op = JCC_Z;
+ goto slow_jcc;
+ case JCC_L:
+ /* OF = 0, becomes js/jns */
+ jcc_op = JCC_S;
+ goto slow_jcc;
+ case JCC_LE:
+ /* SF or ZF, becomes signed <= 0 */
+ tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size | MO_SIGN);
+ cc = (CCPrepare) { .cond = TCG_COND_LE, .reg = cpu_cc_dst };
+ break;
+ default:
+ goto slow_jcc;
+ }
+ break;
+
default:
slow_jcc:
/* This actually generates good code for JC, JZ and JS. */
@@ -1109,7 +1181,27 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
return cc;
}
-static void gen_setcc1(DisasContext *s, int b, TCGv reg)
+static void gen_neg_setcc(DisasContext *s, int b, TCGv reg)
+{
+ CCPrepare cc = gen_prepare_cc(s, b, reg);
+
+ if (cc.no_setcond) {
+ if (cc.cond == TCG_COND_EQ) {
+ tcg_gen_addi_tl(reg, cc.reg, -1);
+ } else {
+ tcg_gen_neg_tl(reg, cc.reg);
+ }
+ return;
+ }
+
+ if (cc.use_reg2) {
+ tcg_gen_negsetcond_tl(cc.cond, reg, cc.reg, cc.reg2);
+ } else {
+ tcg_gen_negsetcondi_tl(cc.cond, reg, cc.reg, cc.imm);
+ }
+}
+
+static void gen_setcc(DisasContext *s, int b, TCGv reg)
{
CCPrepare cc = gen_prepare_cc(s, b, reg);
@@ -1131,12 +1223,12 @@ static void gen_setcc1(DisasContext *s, int b, TCGv reg)
static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
{
- gen_setcc1(s, JCC_B << 1, reg);
+ gen_setcc(s, JCC_B << 1, reg);
}
/* generate a conditional jump to label 'l1' according to jump opcode
value 'b'. In the fast case, T0 is guaranteed not to be used. */
-static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
+static inline void gen_jcc_noeob(DisasContext *s, int b, TCGLabel *l1)
{
CCPrepare cc = gen_prepare_cc(s, b, NULL);
@@ -1151,10 +1243,15 @@ static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
value 'b'. In the fast case, T0 is guaranteed not to be used.
One or both of the branches will call gen_jmp_rel, so ensure
cc_op is clean. */
-static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
+static inline void gen_jcc(DisasContext *s, int b, TCGLabel *l1)
{
CCPrepare cc = gen_prepare_cc(s, b, NULL);
+ /*
+ * Note that this must be _after_ gen_prepare_cc, because it can change
+ * the cc_op to CC_OP_EFLAGS (because it's CC_OP_DYNAMIC or because
+ * it's cheaper to just compute the flags)!
+ */
gen_update_cc_op(s);
if (cc.use_reg2) {
tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
@@ -1163,39 +1260,22 @@ static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
}
}
-/* XXX: does not work with gdbstub "ice" single step - not a
- serious problem. The caller can jump to the returned label
- to stop the REP but, if the flags have changed, it has to call
- gen_update_cc_op before doing so. */
-static TCGLabel *gen_jz_ecx_string(DisasContext *s)
-{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
-
- gen_update_cc_op(s);
- gen_op_jnz_ecx(s, l1);
- gen_set_label(l2);
- gen_jmp_rel_csize(s, 0, 1);
- gen_set_label(l1);
- return l2;
-}
-
-static void gen_stos(DisasContext *s, MemOp ot)
+static void gen_stos(DisasContext *s, MemOp ot, TCGv dshift)
{
gen_string_movl_A0_EDI(s);
gen_op_st_v(s, ot, s->T0, s->A0);
- gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
+ gen_op_add_reg(s, s->aflag, R_EDI, dshift);
}
-static void gen_lods(DisasContext *s, MemOp ot)
+static void gen_lods(DisasContext *s, MemOp ot, TCGv dshift)
{
gen_string_movl_A0_ESI(s);
gen_op_ld_v(s, ot, s->T0, s->A0);
gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
- gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
+ gen_op_add_reg(s, s->aflag, R_ESI, dshift);
}
-static void gen_scas(DisasContext *s, MemOp ot)
+static void gen_scas(DisasContext *s, MemOp ot, TCGv dshift)
{
gen_string_movl_A0_EDI(s);
gen_op_ld_v(s, ot, s->T1, s->A0);
@@ -1204,13 +1284,11 @@ static void gen_scas(DisasContext *s, MemOp ot)
tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
set_cc_op(s, CC_OP_SUBB + ot);
- gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
+ gen_op_add_reg(s, s->aflag, R_EDI, dshift);
}
-static void gen_cmps(DisasContext *s, MemOp ot)
+static void gen_cmps(DisasContext *s, MemOp ot, TCGv dshift)
{
- TCGv dshift;
-
gen_string_movl_A0_EDI(s);
gen_op_ld_v(s, ot, s->T1, s->A0);
gen_string_movl_A0_ESI(s);
@@ -1220,7 +1298,6 @@ static void gen_cmps(DisasContext *s, MemOp ot)
tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
set_cc_op(s, CC_OP_SUBB + ot);
- dshift = gen_compute_Dshift(s, ot);
gen_op_add_reg(s, s->aflag, R_ESI, dshift);
gen_op_add_reg(s, s->aflag, R_EDI, dshift);
}
@@ -1239,71 +1316,183 @@ static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
}
}
-static void gen_ins(DisasContext *s, MemOp ot)
+static void gen_ins(DisasContext *s, MemOp ot, TCGv dshift)
{
+ TCGv_i32 port = tcg_temp_new_i32();
+
gen_string_movl_A0_EDI(s);
/* Note: we must do this dummy write first to be restartable in
case of page fault. */
tcg_gen_movi_tl(s->T0, 0);
gen_op_st_v(s, ot, s->T0, s->A0);
- tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
- tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
- gen_helper_in_func(ot, s->T0, s->tmp2_i32);
+ tcg_gen_trunc_tl_i32(port, cpu_regs[R_EDX]);
+ tcg_gen_andi_i32(port, port, 0xffff);
+ gen_helper_in_func(ot, s->T0, port);
gen_op_st_v(s, ot, s->T0, s->A0);
- gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
- gen_bpt_io(s, s->tmp2_i32, ot);
+ gen_op_add_reg(s, s->aflag, R_EDI, dshift);
+ gen_bpt_io(s, port, ot);
}
-static void gen_outs(DisasContext *s, MemOp ot)
+static void gen_outs(DisasContext *s, MemOp ot, TCGv dshift)
{
+ TCGv_i32 port = tcg_temp_new_i32();
+ TCGv_i32 value = tcg_temp_new_i32();
+
gen_string_movl_A0_ESI(s);
gen_op_ld_v(s, ot, s->T0, s->A0);
- tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
- tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
- tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
- gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
- gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
- gen_bpt_io(s, s->tmp2_i32, ot);
+ tcg_gen_trunc_tl_i32(port, cpu_regs[R_EDX]);
+ tcg_gen_andi_i32(port, port, 0xffff);
+ tcg_gen_trunc_tl_i32(value, s->T0);
+ gen_helper_out_func(ot, port, value);
+ gen_op_add_reg(s, s->aflag, R_ESI, dshift);
+ gen_bpt_io(s, port, ot);
}
-/* Generate jumps to current or next instruction */
-static void gen_repz(DisasContext *s, MemOp ot,
- void (*fn)(DisasContext *s, MemOp ot))
+#define REP_MAX 65535
+
+static void do_gen_rep(DisasContext *s, MemOp ot, TCGv dshift,
+ void (*fn)(DisasContext *s, MemOp ot, TCGv dshift),
+ bool is_repz_nz)
{
- TCGLabel *l2;
- l2 = gen_jz_ecx_string(s);
- fn(s, ot);
- gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
+ TCGLabel *last = gen_new_label();
+ TCGLabel *loop = gen_new_label();
+ TCGLabel *done = gen_new_label();
+
+ target_ulong cx_mask = MAKE_64BIT_MASK(0, 8 << s->aflag);
+ TCGv cx_next = tcg_temp_new();
+
+ /*
+ * Check if we must translate a single iteration only. Normally, HF_RF_MASK
+ * would also limit translation blocks to one instruction, so that gen_eob
+ * can reset the flag; here however RF is set throughout the repetition, so
+ * we can plow through until CX/ECX/RCX is zero.
+ */
+ bool can_loop =
+ (!(tb_cflags(s->base.tb) & (CF_USE_ICOUNT | CF_SINGLE_STEP))
+ && !(s->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
+ bool had_rf = s->flags & HF_RF_MASK;
+
/*
- * A loop would cause two single step exceptions if ECX = 1
- * before rep string_insn
+ * Even if EFLAGS.RF was set on entry (such as if we're on the second or
+ * later iteration and an exception or interrupt happened), force gen_eob()
+ * not to clear the flag. We do that ourselves after the last iteration.
*/
- if (s->repz_opt) {
- gen_op_jz_ecx(s, l2);
+ s->flags &= ~HF_RF_MASK;
+
+ /*
+ * For CMPS/SCAS, the CC_OP after a memory fault could come from either
+ * the previous instruction or the string instruction; but because we
+ * arrange to keep CC_OP up to date all the time, just mark the whole
+ * insn as CC_OP_DYNAMIC.
+ *
+ * It's not a problem to do this even for instructions that do not
+ * modify the flags, so do it unconditionally.
+ */
+ gen_update_cc_op(s);
+ tcg_set_insn_start_param(s->base.insn_start, 1, CC_OP_DYNAMIC);
+
+ /* Any iteration at all? */
+ tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cpu_regs[R_ECX], cx_mask, done);
+
+ /*
+ * From now on we operate on the value of CX/ECX/RCX that will be written
+ * back, which is stored in cx_next. There can be no carry, so we can zero
+ * extend here if needed and not do any expensive deposit operations later.
+ */
+ tcg_gen_subi_tl(cx_next, cpu_regs[R_ECX], 1);
+#ifdef TARGET_X86_64
+ if (s->aflag == MO_32) {
+ tcg_gen_ext32u_tl(cx_next, cx_next);
+ cx_mask = ~0;
}
- gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
-}
+#endif
-static void gen_repz_nz(DisasContext *s, MemOp ot,
- void (*fn)(DisasContext *s, MemOp ot))
-{
- TCGLabel *l2;
- int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0;
+ /*
+ * The last iteration is handled outside the loop, so that cx_next
+ * can never underflow.
+ */
+ if (can_loop) {
+ tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cx_next, cx_mask, last);
+ }
- l2 = gen_jz_ecx_string(s);
- fn(s, ot);
- gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
- gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
- if (s->repz_opt) {
- gen_op_jz_ecx(s, l2);
+ gen_set_label(loop);
+ fn(s, ot, dshift);
+ tcg_gen_mov_tl(cpu_regs[R_ECX], cx_next);
+ gen_update_cc_op(s);
+
+ /* Leave if REP condition fails. */
+ if (is_repz_nz) {
+ int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0;
+ gen_jcc_noeob(s, (JCC_Z << 1) | (nz ^ 1), done);
+ /* gen_prepare_eflags_z never changes cc_op. */
+ assert(!s->cc_op_dirty);
}
+
+ if (can_loop) {
+ tcg_gen_subi_tl(cx_next, cx_next, 1);
+ tcg_gen_brcondi_tl(TCG_COND_TSTNE, cx_next, REP_MAX, loop);
+ tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cx_next, cx_mask, last);
+ }
+
/*
- * Only one iteration is done at a time, so the translation
- * block ends unconditionally after this instruction and there
- * is no control flow junction - no need to set CC_OP_DYNAMIC.
+ * Traps or interrupts set RF_MASK if they happen after any iteration
+ * but the last. Set it here before giving the main loop a chance to
+ * execute. (For faults, seg_helper.c sets the flag as usual).
*/
+ if (!had_rf) {
+ gen_set_eflags(s, RF_MASK);
+ }
+
+ /* Go to the main loop but reenter the same instruction. */
gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
+
+ if (can_loop) {
+ /*
+ * The last iteration needs no conditional jump, even if is_repz_nz,
+ * because the repeats are ending anyway.
+ */
+ gen_set_label(last);
+ set_cc_op(s, CC_OP_DYNAMIC);
+ fn(s, ot, dshift);
+ tcg_gen_mov_tl(cpu_regs[R_ECX], cx_next);
+ gen_update_cc_op(s);
+ }
+
+ /* CX/ECX/RCX is zero, or REPZ/REPNZ broke the repetition. */
+ gen_set_label(done);
+ set_cc_op(s, CC_OP_DYNAMIC);
+ if (had_rf) {
+ gen_reset_eflags(s, RF_MASK);
+ }
+ gen_jmp_rel_csize(s, 0, 1);
+}
+
+static void do_gen_string(DisasContext *s, MemOp ot,
+ void (*fn)(DisasContext *s, MemOp ot, TCGv dshift),
+ bool is_repz_nz)
+{
+ TCGv dshift = tcg_temp_new();
+ tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
+ tcg_gen_shli_tl(dshift, dshift, ot);
+
+ if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ do_gen_rep(s, ot, dshift, fn, is_repz_nz);
+ } else {
+ fn(s, ot, dshift);
+ }
+}
+
+static void gen_repz(DisasContext *s, MemOp ot,
+ void (*fn)(DisasContext *s, MemOp ot, TCGv dshift))
+{
+ do_gen_string(s, ot, fn, false);
+}
+
+static void gen_repz_nz(DisasContext *s, MemOp ot,
+ void (*fn)(DisasContext *s, MemOp ot, TCGv dshift))
+{
+ do_gen_string(s, ot, fn, true);
}
static void gen_helper_fp_arith_ST0_FT0(int op)
@@ -1394,10 +1583,13 @@ static bool check_cpl0(DisasContext *s)
}
/* XXX: add faster immediate case */
-static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
+static TCGv gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
bool is_right, TCGv count)
{
target_ulong mask = (ot == MO_64 ? 63 : 31);
+ TCGv cc_src = tcg_temp_new();
+ TCGv tmp = tcg_temp_new();
+ TCGv hishift;
switch (ot) {
case MO_16:
@@ -1405,9 +1597,9 @@ static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
portion by constructing it as a 32-bit value. */
if (is_right) {
- tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
+ tcg_gen_deposit_tl(tmp, s->T0, s->T1, 16, 16);
tcg_gen_mov_tl(s->T1, s->T0);
- tcg_gen_mov_tl(s->T0, s->tmp0);
+ tcg_gen_mov_tl(s->T0, tmp);
} else {
tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
}
@@ -1418,47 +1610,52 @@ static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
case MO_32:
#ifdef TARGET_X86_64
/* Concatenate the two 32-bit values and use a 64-bit shift. */
- tcg_gen_subi_tl(s->tmp0, count, 1);
+ tcg_gen_subi_tl(tmp, count, 1);
if (is_right) {
tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
- tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
+ tcg_gen_shr_i64(cc_src, s->T0, tmp);
tcg_gen_shr_i64(s->T0, s->T0, count);
} else {
tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
- tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
+ tcg_gen_shl_i64(cc_src, s->T0, tmp);
tcg_gen_shl_i64(s->T0, s->T0, count);
- tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
+ tcg_gen_shri_i64(cc_src, cc_src, 32);
tcg_gen_shri_i64(s->T0, s->T0, 32);
}
break;
#endif
default:
- tcg_gen_subi_tl(s->tmp0, count, 1);
+ hishift = tcg_temp_new();
+ tcg_gen_subi_tl(tmp, count, 1);
if (is_right) {
- tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
+ tcg_gen_shr_tl(cc_src, s->T0, tmp);
- tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
+ /* mask + 1 - count = mask - tmp = mask ^ tmp */
+ tcg_gen_xori_tl(hishift, tmp, mask);
tcg_gen_shr_tl(s->T0, s->T0, count);
- tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
+ tcg_gen_shl_tl(s->T1, s->T1, hishift);
} else {
- tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
+ tcg_gen_shl_tl(cc_src, s->T0, tmp);
+
+ /* mask + 1 - count = mask - tmp = mask ^ tmp */
+ tcg_gen_xori_tl(hishift, tmp, mask);
+ tcg_gen_shl_tl(s->T0, s->T0, count);
+ tcg_gen_shr_tl(s->T1, s->T1, hishift);
+
if (ot == MO_16) {
/* Only needed if count > 16, for Intel behaviour. */
- tcg_gen_subfi_tl(s->tmp4, 33, count);
- tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
- tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
+ tcg_gen_shri_tl(tmp, s->T1, 1);
+ tcg_gen_or_tl(cc_src, cc_src, tmp);
}
-
- tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
- tcg_gen_shl_tl(s->T0, s->T0, count);
- tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
}
- tcg_gen_movi_tl(s->tmp4, 0);
- tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
- s->tmp4, s->T1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, s->T1,
+ count, tcg_constant_tl(0),
+ tcg_constant_tl(0), s->T1);
tcg_gen_or_tl(s->T0, s->T0, s->T1);
break;
}
+
+ return cc_src;
}
#define X86_MAX_INSN_LENGTH 15
@@ -1469,7 +1666,7 @@ static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
/* This is a subsequent insn that crosses a page boundary. */
if (s->base.num_insns > 1 &&
- !is_same_page(&s->base, s->pc + num_bytes - 1)) {
+ !translator_is_same_page(&s->base, s->pc + num_bytes - 1)) {
siglongjmp(s->jmpbuf, 2);
}
@@ -1514,16 +1711,8 @@ static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
/* Decompose an address. */
-typedef struct AddressParts {
- int def_seg;
- int base;
- int index;
- int scale;
- target_long disp;
-} AddressParts;
-
static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
- int modrm)
+ int modrm, bool is_vsib)
{
int def_seg, base, index, scale, mod, rm;
target_long disp;
@@ -1552,7 +1741,7 @@ static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
int code = x86_ldub_code(env, s);
scale = (code >> 6) & 3;
index = ((code >> 3) & 7) | REX_X(s);
- if (index == 4) {
+ if (index == 4 && !is_vsib) {
index = -1; /* no index */
}
base = (code & 7) | REX_B(s);
@@ -1680,37 +1869,27 @@ static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
return ea;
}
-static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
-{
- AddressParts a = gen_lea_modrm_0(env, s, modrm);
- TCGv ea = gen_lea_modrm_1(s, a, false);
- gen_lea_v_seg(s, ea, a.def_seg, s->override);
-}
-
-static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
-{
- (void)gen_lea_modrm_0(env, s, modrm);
-}
-
/* Used for BNDCL, BNDCU, BNDCN. */
-static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
+static void gen_bndck(DisasContext *s, X86DecodedInsn *decode,
TCGCond cond, TCGv_i64 bndv)
{
- AddressParts a = gen_lea_modrm_0(env, s, modrm);
- TCGv ea = gen_lea_modrm_1(s, a, false);
+ TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
+ TCGv_i32 t32 = tcg_temp_new_i32();
+ TCGv_i64 t64 = tcg_temp_new_i64();
- tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
+ tcg_gen_extu_tl_i64(t64, ea);
if (!CODE64(s)) {
- tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
+ tcg_gen_ext32u_i64(t64, t64);
}
- tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
- tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
- gen_helper_bndck(tcg_env, s->tmp2_i32);
+ tcg_gen_setcond_i64(cond, t64, t64, bndv);
+ tcg_gen_extrl_i64_i32(t32, t64);
+ gen_helper_bndck(tcg_env, t32);
}
/* generate modrm load of memory or register. */
-static void gen_ld_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot)
+static void gen_ld_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
{
+ int modrm = s->modrm;
int mod, rm;
mod = (modrm >> 6) & 3;
@@ -1718,14 +1897,15 @@ static void gen_ld_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot)
if (mod == 3) {
gen_op_mov_v_reg(s, ot, s->T0, rm);
} else {
- gen_lea_modrm(env, s, modrm);
+ gen_lea_modrm(s, decode);
gen_op_ld_v(s, ot, s->T0, s->A0);
}
}
/* generate modrm store of memory or register. */
-static void gen_st_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot)
+static void gen_st_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
{
+ int modrm = s->modrm;
int mod, rm;
mod = (modrm >> 6) & 3;
@@ -1733,7 +1913,7 @@ static void gen_st_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot)
if (mod == 3) {
gen_op_mov_reg_v(s, ot, rm, s->T0);
} else {
- gen_lea_modrm(env, s, modrm);
+ gen_lea_modrm(s, decode);
gen_op_st_v(s, ot, s->T0, s->A0);
}
}
@@ -1823,15 +2003,7 @@ static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
gen_jmp_rel(s, s->dflag, diff, 0);
}
-static void gen_jcc(DisasContext *s, int b, int diff)
-{
- TCGLabel *l1 = gen_new_label();
-
- gen_jcc1(s, b, l1);
- gen_conditional_jump_labels(s, diff, NULL, l1);
-}
-
-static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src)
+static void gen_cmovcc(DisasContext *s, int b, TCGv dest, TCGv src)
{
CCPrepare cc = gen_prepare_cc(s, b, NULL);
@@ -1853,25 +2025,39 @@ static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
/* move SRC to seg_reg and compute if the CPU state may change. Never
call this function with seg_reg == R_CS */
-static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src)
+static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src, bool inhibit_irq)
{
if (PE(s) && !VM86(s)) {
- tcg_gen_trunc_tl_i32(s->tmp2_i32, src);
- gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
- /* abort translation because the addseg value may change or
- because ss32 may change. For R_SS, translation must always
- stop as a special handling must be done to disable hardware
- interrupts for the next instruction */
- if (seg_reg == R_SS) {
- s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
- } else if (CODE32(s) && seg_reg < R_FS) {
+ TCGv_i32 sel = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(sel, src);
+ gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), sel);
+
+ /*
+ * For moves to SS, the SS32 flag may change. For CODE32 only, changes
+ * to SS, DS and ES may change the ADDSEG flags.
+ */
+ if (seg_reg == R_SS || (CODE32(s) && seg_reg < R_FS)) {
s->base.is_jmp = DISAS_EOB_NEXT;
}
} else {
gen_op_movl_seg_real(s, seg_reg, src);
- if (seg_reg == R_SS) {
- s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
- }
+ }
+
+ /*
+ * For MOV or POP to SS (but not LSS) translation must always
+ * stop as a special handling must be done to disable hardware
+ * interrupts for the next instruction.
+ *
+ * This is the last instruction, so it's okay to overwrite
+ * HF_TF_MASK; the next TB will start with the flag set.
+ *
+ * DISAS_EOB_INHIBIT_IRQ is a superset of DISAS_EOB_NEXT which
+ * might have been set above.
+ */
+ if (inhibit_irq) {
+ s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
+ s->flags &= ~HF_TF_MASK;
}
}
@@ -2009,14 +2195,17 @@ static void gen_enter(DisasContext *s, int esp_addend, int level)
level &= 31;
if (level != 0) {
int i;
+ if (level > 1) {
+ TCGv fp = tcg_temp_new();
- /* Copy level-1 pointers from the previous frame. */
- for (i = 1; i < level; ++i) {
- gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i);
- gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
+ /* Copy level-1 pointers from the previous frame. */
+ for (i = 1; i < level; ++i) {
+ gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i);
+ gen_op_ld_v(s, d_ot, fp, s->A0);
- gen_lea_ss_ofs(s, s->A0, s->T1, -size * i);
- gen_op_st_v(s, d_ot, s->tmp0, s->A0);
+ gen_lea_ss_ofs(s, s->A0, s->T1, -size * i);
+ gen_op_st_v(s, d_ot, fp, s->A0);
+ }
}
/* Push the current FrameTemp as the last level. */
@@ -2079,46 +2268,6 @@ static void gen_interrupt(DisasContext *s, uint8_t intno)
s->base.is_jmp = DISAS_NORETURN;
}
-static void gen_set_hflag(DisasContext *s, uint32_t mask)
-{
- if ((s->flags & mask) == 0) {
- TCGv_i32 t = tcg_temp_new_i32();
- tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
- tcg_gen_ori_i32(t, t, mask);
- tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
- s->flags |= mask;
- }
-}
-
-static void gen_reset_hflag(DisasContext *s, uint32_t mask)
-{
- if (s->flags & mask) {
- TCGv_i32 t = tcg_temp_new_i32();
- tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
- tcg_gen_andi_i32(t, t, ~mask);
- tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
- s->flags &= ~mask;
- }
-}
-
-static void gen_set_eflags(DisasContext *s, target_ulong mask)
-{
- TCGv t = tcg_temp_new();
-
- tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
- tcg_gen_ori_tl(t, t, mask);
- tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
-}
-
-static void gen_reset_eflags(DisasContext *s, target_ulong mask)
-{
- TCGv t = tcg_temp_new();
-
- tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
- tcg_gen_andi_tl(t, t, ~mask);
- tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
-}
-
/* Clear BND registers during legacy branches. */
static void gen_bnd_jmp(DisasContext *s)
{
@@ -2153,13 +2302,13 @@ gen_eob(DisasContext *s, int mode)
gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
}
- if (s->base.tb->flags & HF_RF_MASK) {
+ if (s->flags & HF_RF_MASK) {
gen_reset_eflags(s, RF_MASK);
}
if (mode == DISAS_EOB_RECHECK_TF) {
gen_helper_rechecking_single_step(tcg_env);
tcg_gen_exit_tb(NULL, 0);
- } else if ((s->flags & HF_TF_MASK) && mode != DISAS_EOB_INHIBIT_IRQ) {
+ } else if (s->flags & HF_TF_MASK) {
gen_helper_single_step(tcg_env);
} else if (mode == DISAS_JUMP &&
/* give irqs a chance to happen */
@@ -2202,7 +2351,7 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
* no extra masking to apply (data16 branch in code32, see above),
* then we have also proven that the addition does not wrap.
*/
- if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
+ if (!use_goto_tb || !translator_is_same_page(&s->base, new_pc)) {
tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
use_goto_tb = false;
}
@@ -2279,10 +2428,11 @@ static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
int mem_index = s->mem_index;
TCGv_i128 t0 = tcg_temp_new_i128();
TCGv_i128 t1 = tcg_temp_new_i128();
+ TCGv a0_hi = tcg_temp_new();
tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
- tcg_gen_addi_tl(s->tmp0, s->A0, 16);
- tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
+ tcg_gen_addi_tl(a0_hi, s->A0, 16);
+ tcg_gen_qemu_ld_i128(t1, a0_hi, mem_index, mop);
tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
@@ -2293,137 +2443,41 @@ static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
int mem_index = s->mem_index;
TCGv_i128 t = tcg_temp_new_i128();
+ TCGv a0_hi = tcg_temp_new();
tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
- tcg_gen_addi_tl(s->tmp0, s->A0, 16);
+ tcg_gen_addi_tl(a0_hi, s->A0, 16);
tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
- tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
+ tcg_gen_qemu_st_i128(t, a0_hi, mem_index, mop);
}
-static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
-{
- TCGv_i64 cmp, val, old;
- TCGv Z;
-
- gen_lea_modrm(env, s, modrm);
-
- cmp = tcg_temp_new_i64();
- val = tcg_temp_new_i64();
- old = tcg_temp_new_i64();
-
- /* Construct the comparison values from the register pair. */
- tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
- tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
-
- /* Only require atomic with LOCK; non-parallel handled in generator. */
- if (s->prefix & PREFIX_LOCK) {
- tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
- } else {
- tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
- s->mem_index, MO_TEUQ);
- }
-
- /* Set tmp0 to match the required value of Z. */
- tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
- Z = tcg_temp_new();
- tcg_gen_trunc_i64_tl(Z, cmp);
-
- /*
- * Extract the result values for the register pair.
- * For 32-bit, we may do this unconditionally, because on success (Z=1),
- * the old value matches the previous value in EDX:EAX. For x86_64,
- * the store must be conditional, because we must leave the source
- * registers unchanged on success, and zero-extend the writeback
- * on failure (Z=0).
- */
- if (TARGET_LONG_BITS == 32) {
- tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
- } else {
- TCGv zero = tcg_constant_tl(0);
-
- tcg_gen_extr_i64_tl(s->T0, s->T1, old);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
- s->T0, cpu_regs[R_EAX]);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
- s->T1, cpu_regs[R_EDX]);
- }
-
- /* Update Z. */
- gen_compute_eflags(s);
- tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
-}
-
-#ifdef TARGET_X86_64
-static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
-{
- MemOp mop = MO_TE | MO_128 | MO_ALIGN;
- TCGv_i64 t0, t1;
- TCGv_i128 cmp, val;
-
- gen_lea_modrm(env, s, modrm);
-
- cmp = tcg_temp_new_i128();
- val = tcg_temp_new_i128();
- tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
- tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
-
- /* Only require atomic with LOCK; non-parallel handled in generator. */
- if (s->prefix & PREFIX_LOCK) {
- tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
- } else {
- tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
- }
-
- tcg_gen_extr_i128_i64(s->T0, s->T1, val);
-
- /* Determine success after the fact. */
- t0 = tcg_temp_new_i64();
- t1 = tcg_temp_new_i64();
- tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
- tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
- tcg_gen_or_i64(t0, t0, t1);
-
- /* Update Z. */
- gen_compute_eflags(s);
- tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
- tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
-
- /*
- * Extract the result values for the register pair. We may do this
- * unconditionally, because on success (Z=1), the old value matches
- * the previous value in RDX:RAX.
- */
- tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
- tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
-}
-#endif
+#include "emit.c.inc"
-static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
+static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
{
- CPUX86State *env = cpu_env(cpu);
bool update_fip = true;
- int modrm, mod, rm, op;
+ int b = decode->b;
+ int modrm = s->modrm;
+ int mod, rm, op;
if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
/* if CR0.EM or CR0.TS are set, generate an FPU exception */
/* XXX: what to do if illegal op ? */
gen_exception(s, EXCP07_PREX);
- return true;
+ return;
}
- modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
rm = modrm & 7;
op = ((b & 7) << 3) | ((modrm >> 3) & 7);
if (mod != 3) {
/* memory op */
- AddressParts a = gen_lea_modrm_0(env, s, modrm);
- TCGv ea = gen_lea_modrm_1(s, a, false);
+ TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
TCGv last_addr = tcg_temp_new();
bool update_fdp = true;
tcg_gen_mov_tl(last_addr, ea);
- gen_lea_v_seg(s, ea, a.def_seg, s->override);
+ gen_lea_v_seg(s, ea, decode->mem.def_seg, s->override);
switch (op) {
case 0x00 ... 0x07: /* fxxxs */
@@ -2613,11 +2667,11 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
gen_helper_fpop(tcg_env);
break;
default:
- return false;
+ goto illegal_op;
}
if (update_fdp) {
- int last_seg = s->override >= 0 ? s->override : a.def_seg;
+ int last_seg = s->override >= 0 ? s->override : decode->mem.def_seg;
tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
offsetof(CPUX86State,
@@ -2654,7 +2708,7 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
update_fip = false;
break;
default:
- return false;
+ goto illegal_op;
}
break;
case 0x0c: /* grp d9/4 */
@@ -2673,7 +2727,7 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
gen_helper_fxam_ST0(tcg_env);
break;
default:
- return false;
+ goto illegal_op;
}
break;
case 0x0d: /* grp d9/5 */
@@ -2708,7 +2762,7 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
gen_helper_fldz_ST0(tcg_env);
break;
default:
- return false;
+ goto illegal_op;
}
}
break;
@@ -2810,7 +2864,7 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
gen_helper_fpop(tcg_env);
break;
default:
- return false;
+ goto illegal_op;
}
break;
case 0x1c:
@@ -2830,7 +2884,7 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
case 4: /* fsetpm (287 only, just do nop here) */
break;
default:
- return false;
+ goto illegal_op;
}
break;
case 0x1d: /* fucomi */
@@ -2882,7 +2936,7 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
gen_helper_fpop(tcg_env);
break;
default:
- return false;
+ goto illegal_op;
}
break;
case 0x38: /* ffreep sti, undocumented op */
@@ -2897,7 +2951,7 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
break;
default:
- return false;
+ goto illegal_op;
}
break;
case 0x3d: /* fucomip */
@@ -2937,14 +2991,14 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
}
op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
l1 = gen_new_label();
- gen_jcc1_noeob(s, op1, l1);
+ gen_jcc_noeob(s, op1, l1);
gen_helper_fmov_ST0_STN(tcg_env,
tcg_constant_i32(opreg));
gen_set_label(l1);
}
break;
default:
- return false;
+ goto illegal_op;
}
}
@@ -2956,49 +3010,29 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
tcg_gen_st_tl(eip_cur_tl(s),
tcg_env, offsetof(CPUX86State, fpip));
}
- return true;
+ return;
illegal_op:
gen_illegal_opcode(s);
- return true;
}
-static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
+static void gen_multi0F(DisasContext *s, X86DecodedInsn *decode)
{
- CPUX86State *env = cpu_env(cpu);
int prefixes = s->prefix;
MemOp dflag = s->dflag;
+ int b = decode->b + 0x100;
+ int modrm = s->modrm;
MemOp ot;
- int modrm, reg, rm, mod, op, val;
+ int reg, rm, mod, op;
/* now check op code */
switch (b) {
- case 0x1c7: /* cmpxchg8b */
- modrm = x86_ldub_code(env, s);
+ case 0x1c7: /* RDSEED, RDPID with f3 prefix */
mod = (modrm >> 6) & 3;
switch ((modrm >> 3) & 7) {
- case 1: /* CMPXCHG8, CMPXCHG16 */
- if (mod == 3) {
- goto illegal_op;
- }
-#ifdef TARGET_X86_64
- if (dflag == MO_64) {
- if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
- goto illegal_op;
- }
- gen_cmpxchg16b(s, env, modrm);
- break;
- }
-#endif
- if (!(s->cpuid_features & CPUID_CX8)) {
- goto illegal_op;
- }
- gen_cmpxchg8b(s, env, modrm);
- break;
-
- case 7: /* RDSEED, RDPID with f3 prefix */
+ case 7:
if (mod != 3 ||
- (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
+ (s->prefix & PREFIX_REPNZ)) {
goto illegal_op;
}
if (s->prefix & PREFIX_REPZ) {
@@ -3018,7 +3052,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
case 6: /* RDRAND */
if (mod != 3 ||
- (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
+ (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) ||
!(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
goto illegal_op;
}
@@ -3035,148 +3069,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
}
break;
- /************************/
- /* bit operations */
- case 0x1ba: /* bt/bts/btr/btc Gv, im */
- ot = dflag;
- modrm = x86_ldub_code(env, s);
- op = (modrm >> 3) & 7;
- mod = (modrm >> 6) & 3;
- rm = (modrm & 7) | REX_B(s);
- if (mod != 3) {
- s->rip_offset = 1;
- gen_lea_modrm(env, s, modrm);
- if (!(s->prefix & PREFIX_LOCK)) {
- gen_op_ld_v(s, ot, s->T0, s->A0);
- }
- } else {
- gen_op_mov_v_reg(s, ot, s->T0, rm);
- }
- /* load shift */
- val = x86_ldub_code(env, s);
- tcg_gen_movi_tl(s->T1, val);
- if (op < 4)
- goto unknown_op;
- op -= 4;
- goto bt_op;
- case 0x1a3: /* bt Gv, Ev */
- op = 0;
- goto do_btx;
- case 0x1ab: /* bts */
- op = 1;
- goto do_btx;
- case 0x1b3: /* btr */
- op = 2;
- goto do_btx;
- case 0x1bb: /* btc */
- op = 3;
- do_btx:
- ot = dflag;
- modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | REX_R(s);
- mod = (modrm >> 6) & 3;
- rm = (modrm & 7) | REX_B(s);
- gen_op_mov_v_reg(s, MO_32, s->T1, reg);
- if (mod != 3) {
- AddressParts a = gen_lea_modrm_0(env, s, modrm);
- /* specific case: we need to add a displacement */
- gen_exts(ot, s->T1);
- tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
- tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
- tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
- gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
- if (!(s->prefix & PREFIX_LOCK)) {
- gen_op_ld_v(s, ot, s->T0, s->A0);
- }
- } else {
- gen_op_mov_v_reg(s, ot, s->T0, rm);
- }
- bt_op:
- tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
- tcg_gen_movi_tl(s->tmp0, 1);
- tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
- if (s->prefix & PREFIX_LOCK) {
- switch (op) {
- case 0: /* bt */
- /* Needs no atomic ops; we suppressed the normal
- memory load for LOCK above so do it now. */
- gen_op_ld_v(s, ot, s->T0, s->A0);
- break;
- case 1: /* bts */
- tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
- s->mem_index, ot | MO_LE);
- break;
- case 2: /* btr */
- tcg_gen_not_tl(s->tmp0, s->tmp0);
- tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
- s->mem_index, ot | MO_LE);
- break;
- default:
- case 3: /* btc */
- tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
- s->mem_index, ot | MO_LE);
- break;
- }
- tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
- } else {
- tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
- switch (op) {
- case 0: /* bt */
- /* Data already loaded; nothing to do. */
- break;
- case 1: /* bts */
- tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
- break;
- case 2: /* btr */
- tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
- break;
- default:
- case 3: /* btc */
- tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
- break;
- }
- if (op != 0) {
- if (mod != 3) {
- gen_op_st_v(s, ot, s->T0, s->A0);
- } else {
- gen_op_mov_reg_v(s, ot, rm, s->T0);
- }
- }
- }
-
- /* Delay all CC updates until after the store above. Note that
- C is the result of the test, Z is unchanged, and the others
- are all undefined. */
- switch (s->cc_op) {
- case CC_OP_MULB ... CC_OP_MULQ:
- case CC_OP_ADDB ... CC_OP_ADDQ:
- case CC_OP_ADCB ... CC_OP_ADCQ:
- case CC_OP_SUBB ... CC_OP_SUBQ:
- case CC_OP_SBBB ... CC_OP_SBBQ:
- case CC_OP_LOGICB ... CC_OP_LOGICQ:
- case CC_OP_INCB ... CC_OP_INCQ:
- case CC_OP_DECB ... CC_OP_DECQ:
- case CC_OP_SHLB ... CC_OP_SHLQ:
- case CC_OP_SARB ... CC_OP_SARQ:
- case CC_OP_BMILGB ... CC_OP_BMILGQ:
- case CC_OP_POPCNT:
- /* Z was going to be computed from the non-zero status of CC_DST.
- We can get that same Z value (and the new C value) by leaving
- CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
- same width. */
- tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
- set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
- break;
- default:
- /* Otherwise, generate EFLAGS and replace the C bit. */
- gen_compute_eflags(s);
- tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
- ctz32(CC_C), 1);
- break;
- }
- break;
case 0x100:
- modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
@@ -3190,14 +3083,14 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
tcg_gen_ld32u_tl(s->T0, tcg_env,
offsetof(CPUX86State, ldt.selector));
ot = mod == 3 ? dflag : MO_16;
- gen_st_modrm(env, s, modrm, ot);
+ gen_st_modrm(s, decode, ot);
break;
case 2: /* lldt */
if (!PE(s) || VM86(s))
goto illegal_op;
if (check_cpl0(s)) {
gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
- gen_ld_modrm(env, s, modrm, MO_16);
+ gen_ld_modrm(s, decode, MO_16);
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
gen_helper_lldt(tcg_env, s->tmp2_i32);
}
@@ -3212,14 +3105,14 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
tcg_gen_ld32u_tl(s->T0, tcg_env,
offsetof(CPUX86State, tr.selector));
ot = mod == 3 ? dflag : MO_16;
- gen_st_modrm(env, s, modrm, ot);
+ gen_st_modrm(s, decode, ot);
break;
case 3: /* ltr */
if (!PE(s) || VM86(s))
goto illegal_op;
if (check_cpl0(s)) {
gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
- gen_ld_modrm(env, s, modrm, MO_16);
+ gen_ld_modrm(s, decode, MO_16);
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
gen_helper_ltr(tcg_env, s->tmp2_i32);
}
@@ -3228,7 +3121,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
case 5: /* verw */
if (!PE(s) || VM86(s))
goto illegal_op;
- gen_ld_modrm(env, s, modrm, MO_16);
+ gen_ld_modrm(s, decode, MO_16);
gen_update_cc_op(s);
if (op == 4) {
gen_helper_verr(tcg_env, s->T0);
@@ -3238,19 +3131,18 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
assume_cc_op(s, CC_OP_EFLAGS);
break;
default:
- goto unknown_op;
+ goto illegal_op;
}
break;
case 0x101:
- modrm = x86_ldub_code(env, s);
switch (modrm) {
CASE_MODRM_MEM_OP(0): /* sgdt */
if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
break;
}
gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
- gen_lea_modrm(env, s, modrm);
+ gen_lea_modrm(s, decode);
tcg_gen_ld32u_tl(s->T0,
tcg_env, offsetof(CPUX86State, gdt.limit));
gen_op_st_v(s, MO_16, s->T0, s->A0);
@@ -3306,7 +3198,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
break;
}
gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
- gen_lea_modrm(env, s, modrm);
+ gen_lea_modrm(s, decode);
tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
gen_op_st_v(s, MO_16, s->T0, s->A0);
gen_add_A0_im(s, 2);
@@ -3320,8 +3212,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
case 0xd0: /* xgetbv */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
- || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
- | PREFIX_REPZ | PREFIX_REPNZ))) {
+ || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
@@ -3331,8 +3222,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
case 0xd1: /* xsetbv */
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
- || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
- | PREFIX_REPZ | PREFIX_REPNZ))) {
+ || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
@@ -3456,7 +3346,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
break;
}
gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
- gen_lea_modrm(env, s, modrm);
+ gen_lea_modrm(s, decode);
gen_op_ld_v(s, MO_16, s->T1, s->A0);
gen_add_A0_im(s, 2);
gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
@@ -3472,7 +3362,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
break;
}
gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
- gen_lea_modrm(env, s, modrm);
+ gen_lea_modrm(s, decode);
gen_op_ld_v(s, MO_16, s->T1, s->A0);
gen_add_A0_im(s, 2);
gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
@@ -3496,11 +3386,10 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
*/
mod = (modrm >> 6) & 3;
ot = (mod != 3 ? MO_16 : s->dflag);
- gen_st_modrm(env, s, modrm, ot);
+ gen_st_modrm(s, decode, ot);
break;
case 0xee: /* rdpkru */
- if (s->prefix & (PREFIX_LOCK | PREFIX_DATA
- | PREFIX_REPZ | PREFIX_REPNZ)) {
+ if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
goto illegal_op;
}
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
@@ -3508,8 +3397,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
break;
case 0xef: /* wrpkru */
- if (s->prefix & (PREFIX_LOCK | PREFIX_DATA
- | PREFIX_REPZ | PREFIX_REPNZ)) {
+ if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
goto illegal_op;
}
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
@@ -3523,7 +3411,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
break;
}
gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
- gen_ld_modrm(env, s, modrm, MO_16);
+ gen_ld_modrm(s, decode, MO_16);
/*
* Only the 4 lower bits of CR0 are modified.
* PE cannot be set to zero if already set to one.
@@ -3541,7 +3429,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
break;
}
gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
- gen_lea_modrm(env, s, modrm);
+ gen_lea_modrm(s, decode);
gen_helper_flush_page(tcg_env, s->A0);
s->base.is_jmp = DISAS_EOB_NEXT;
break;
@@ -3574,33 +3462,30 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
break;
default:
- goto unknown_op;
+ goto illegal_op;
}
break;
case 0x11a:
- modrm = x86_ldub_code(env, s);
if (s->flags & HF_MPX_EN_MASK) {
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | REX_R(s);
if (prefixes & PREFIX_REPZ) {
/* bndcl */
if (reg >= 4
- || (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
- gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
+ gen_bndck(s, decode, TCG_COND_LTU, cpu_bndl[reg]);
} else if (prefixes & PREFIX_REPNZ) {
/* bndcu */
if (reg >= 4
- || (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
TCGv_i64 notu = tcg_temp_new_i64();
tcg_gen_not_i64(notu, cpu_bndu[reg]);
- gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
+ gen_bndck(s, decode, TCG_COND_GTU, notu);
} else if (prefixes & PREFIX_DATA) {
/* bndmov -- from reg/mem */
if (reg >= 4 || s->aflag == MO_16) {
@@ -3608,7 +3493,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
}
if (mod == 3) {
int reg2 = (modrm & 7) | REX_B(s);
- if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
+ if (reg2 >= 4) {
goto illegal_op;
}
if (s->flags & HF_MPX_IU_MASK) {
@@ -3616,7 +3501,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
}
} else {
- gen_lea_modrm(env, s, modrm);
+ gen_lea_modrm(s, decode);
if (CODE64(s)) {
tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
s->mem_index, MO_LEUQ);
@@ -3635,9 +3520,8 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
}
} else if (mod != 3) {
/* bndldx */
- AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ AddressParts a = decode->mem;
if (reg >= 4
- || (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16
|| a.base < -1) {
goto illegal_op;
@@ -3665,21 +3549,18 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
gen_set_hflag(s, HF_MPX_IU_MASK);
}
}
- gen_nop_modrm(env, s, modrm);
break;
case 0x11b:
- modrm = x86_ldub_code(env, s);
if (s->flags & HF_MPX_EN_MASK) {
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | REX_R(s);
if (mod != 3 && (prefixes & PREFIX_REPZ)) {
/* bndmk */
if (reg >= 4
- || (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
- AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ AddressParts a = decode->mem;
if (a.base >= 0) {
tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
if (!CODE64(s)) {
@@ -3692,7 +3573,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
/* rip-relative generates #ud */
goto illegal_op;
}
- tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
+ tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, decode->mem, false));
if (!CODE64(s)) {
tcg_gen_ext32u_tl(s->A0, s->A0);
}
@@ -3703,11 +3584,10 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
} else if (prefixes & PREFIX_REPNZ) {
/* bndcn */
if (reg >= 4
- || (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16) {
goto illegal_op;
}
- gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
+ gen_bndck(s, decode, TCG_COND_GTU, cpu_bndu[reg]);
} else if (prefixes & PREFIX_DATA) {
/* bndmov -- to reg/mem */
if (reg >= 4 || s->aflag == MO_16) {
@@ -3715,7 +3595,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
}
if (mod == 3) {
int reg2 = (modrm & 7) | REX_B(s);
- if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
+ if (reg2 >= 4) {
goto illegal_op;
}
if (s->flags & HF_MPX_IU_MASK) {
@@ -3723,7 +3603,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
}
} else {
- gen_lea_modrm(env, s, modrm);
+ gen_lea_modrm(s, decode);
if (CODE64(s)) {
tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
s->mem_index, MO_LEUQ);
@@ -3740,9 +3620,8 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
}
} else if (mod != 3) {
/* bndstx */
- AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ AddressParts a = decode->mem;
if (reg >= 4
- || (prefixes & PREFIX_LOCK)
|| s->aflag == MO_16
|| a.base < -1) {
goto illegal_op;
@@ -3767,7 +3646,6 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
}
}
}
- gen_nop_modrm(env, s, modrm);
break;
default:
g_assert_not_reached();
@@ -3775,13 +3653,8 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
return;
illegal_op:
gen_illegal_opcode(s);
- return;
- unknown_op:
- gen_unknown_opcode(env, s);
}
-#include "decode-new.h"
-#include "emit.c.inc"
#include "decode-new.c.inc"
void tcg_x86_init(void)
@@ -3917,30 +3790,13 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
(flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
- /*
- * If jmp_opt, we want to handle each string instruction individually.
- * For icount also disable repz optimization so that each iteration
- * is accounted separately.
- *
- * FIXME: this is messy; it makes REP string instructions a lot less
- * efficient than they should be and it gets in the way of correct
- * handling of RF (interrupts or traps arriving after any iteration
- * of a repeated string instruction but the last should set RF to 1).
- * Perhaps it would be more efficient if REP string instructions were
- * always at the beginning of the TB, or even their own TB? That
- * would even allow accounting up to 64k iterations at once for icount.
- */
- dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
dc->T0 = tcg_temp_new();
dc->T1 = tcg_temp_new();
dc->A0 = tcg_temp_new();
- dc->tmp0 = tcg_temp_new();
dc->tmp1_i64 = tcg_temp_new_i64();
dc->tmp2_i32 = tcg_temp_new_i32();
- dc->tmp3_i32 = tcg_temp_new_i32();
- dc->tmp4 = tcg_temp_new();
dc->cc_srcT = tcg_temp_new();
}
@@ -3989,15 +3845,9 @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
case 2:
/* Restore state that may affect the next instruction. */
dc->pc = dc->base.pc_next;
- /*
- * TODO: These save/restore can be removed after the table-based
- * decoder is complete; we will be decoding the insn completely
- * before any code generation that might affect these variables.
- */
- dc->cc_op_dirty = orig_cc_op_dirty;
- dc->cc_op = orig_cc_op;
- dc->pc_save = orig_pc_save;
- /* END TODO */
+ assert(dc->cc_op_dirty == orig_cc_op_dirty);
+ assert(dc->cc_op == orig_cc_op);
+ assert(dc->pc_save == orig_pc_save);
dc->base.num_insns--;
tcg_remove_ops_after(dc->prev_insn_end);
dc->base.insn_start = dc->prev_insn_start;
@@ -4022,7 +3872,7 @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
* chance to happen.
*/
dc->base.is_jmp = DISAS_EOB_NEXT;
- } else if (!is_same_page(&dc->base, dc->base.pc_next)) {
+ } else if (!translator_is_same_page(&dc->base, dc->base.pc_next)) {
dc->base.is_jmp = DISAS_TOO_MANY;
}
}
@@ -4073,9 +3923,8 @@ static const TranslatorOps i386_tr_ops = {
.tb_stop = i386_tr_tb_stop,
};
-/* generate intermediate code for basic block 'tb'. */
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext dc;
diff --git a/target/i386/tcg/user/excp_helper.c b/target/i386/tcg/user/excp_helper.c
index b3bdb78..98fab4cb 100644
--- a/target/i386/tcg/user/excp_helper.c
+++ b/target/i386/tcg/user/excp_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "tcg/helper-tcg.h"
void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr,
diff --git a/target/i386/tcg/user/seg_helper.c b/target/i386/tcg/user/seg_helper.c
index c45f2ac..263f599 100644
--- a/target/i386/tcg/user/seg_helper.c
+++ b/target/i386/tcg/user/seg_helper.c
@@ -21,8 +21,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "tcg/helper-tcg.h"
#include "tcg/seg_helper.h"
diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c
index 1a2b4e1..b8bebe4 100644
--- a/target/i386/whpx/whpx-accel-ops.c
+++ b/target/i386/whpx/whpx-accel-ops.c
@@ -9,12 +9,13 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/kvm_int.h"
+#include "system/kvm_int.h"
#include "qemu/main-loop.h"
-#include "sysemu/cpus.h"
+#include "system/accel-ops.h"
+#include "system/cpus.h"
#include "qemu/guest-random.h"
-#include "sysemu/whpx.h"
+#include "system/whpx.h"
#include "whpx-internal.h"
#include "whpx-accel-ops.h"
@@ -82,7 +83,7 @@ static bool whpx_vcpu_thread_is_idle(CPUState *cpu)
return !whpx_apic_in_platform();
}
-static void whpx_accel_ops_class_init(ObjectClass *oc, void *data)
+static void whpx_accel_ops_class_init(ObjectClass *oc, const void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
diff --git a/target/i386/whpx/whpx-accel-ops.h b/target/i386/whpx/whpx-accel-ops.h
index 7a1bb1a..e6cf155 100644
--- a/target/i386/whpx/whpx-accel-ops.h
+++ b/target/i386/whpx/whpx-accel-ops.h
@@ -10,7 +10,7 @@
#ifndef TARGET_I386_WHPX_ACCEL_OPS_H
#define TARGET_I386_WHPX_ACCEL_OPS_H
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
int whpx_init_vcpu(CPUState *cpu);
int whpx_vcpu_exec(CPUState *cpu);
diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c
index a6674a8..cf6d3e4 100644
--- a/target/i386/whpx/whpx-all.c
+++ b/target/i386/whpx/whpx-all.c
@@ -10,13 +10,13 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/address-spaces.h"
-#include "exec/ioport.h"
+#include "system/address-spaces.h"
+#include "system/ioport.h"
#include "gdbstub/helpers.h"
#include "qemu/accel.h"
-#include "sysemu/whpx.h"
-#include "sysemu/cpus.h"
-#include "sysemu/runstate.h"
+#include "system/whpx.h"
+#include "system/cpus.h"
+#include "system/runstate.h"
#include "qemu/main-loop.h"
#include "hw/boards.h"
#include "hw/intc/ioapic.h"
@@ -549,8 +549,6 @@ static void whpx_set_registers(CPUState *cpu, int level)
error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
hr);
}
-
- return;
}
static int whpx_get_tsc(CPUState *cpu)
@@ -771,8 +769,6 @@ static void whpx_get_registers(CPUState *cpu)
}
x86_update_hflags(env);
-
- return;
}
static HRESULT CALLBACK whpx_emu_ioport_callback(
@@ -1570,8 +1566,6 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
" hr=%08lx", hr);
}
}
-
- return;
}
static void whpx_vcpu_post_run(CPUState *cpu)
@@ -1595,8 +1589,6 @@ static void whpx_vcpu_post_run(CPUState *cpu)
vcpu->interruptable =
!vcpu->exit_ctx.VpContext.ExecutionState.InterruptShadow;
-
- return;
}
static void whpx_vcpu_process_async_events(CPUState *cpu)
@@ -1634,8 +1626,6 @@ static void whpx_vcpu_process_async_events(CPUState *cpu)
apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
env->tpr_access_type);
}
-
- return;
}
static int whpx_vcpu_run(CPUState *cpu)
@@ -2280,7 +2270,6 @@ void whpx_destroy_vcpu(CPUState *cpu)
whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index);
whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator);
g_free(cpu->accel);
- return;
}
void whpx_vcpu_kick(CPUState *cpu)
@@ -2709,7 +2698,7 @@ bool whpx_apic_in_platform(void) {
return whpx_global.apic_in_platform;
}
-static void whpx_accel_class_init(ObjectClass *oc, void *data)
+static void whpx_accel_class_init(ObjectClass *oc, const void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "WHPX";
diff --git a/target/i386/whpx/whpx-apic.c b/target/i386/whpx/whpx-apic.c
index 7e14ded..e1ef6d4 100644
--- a/target/i386/whpx/whpx-apic.c
+++ b/target/i386/whpx/whpx-apic.c
@@ -16,8 +16,8 @@
#include "hw/i386/apic_internal.h"
#include "hw/i386/apic-msidef.h"
#include "hw/pci/msi.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/whpx.h"
+#include "system/hw_accel.h"
+#include "system/whpx.h"
#include "whpx-internal.h"
struct whpx_lapic_state {
@@ -231,7 +231,7 @@ static void whpx_apic_mem_write(void *opaque, hwaddr addr,
static const MemoryRegionOps whpx_apic_io_ops = {
.read = whpx_apic_mem_read,
.write = whpx_apic_mem_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
+ .endianness = DEVICE_LITTLE_ENDIAN,
};
static void whpx_apic_reset(APICCommonState *s)
@@ -252,7 +252,7 @@ static void whpx_apic_realize(DeviceState *dev, Error **errp)
msi_nonbroken = true;
}
-static void whpx_apic_class_init(ObjectClass *klass, void *data)
+static void whpx_apic_class_init(ObjectClass *klass, const void *data)
{
APICCommonClass *k = APIC_COMMON_CLASS(klass);
diff --git a/target/i386/xsave_helper.c b/target/i386/xsave_helper.c
index 996e9f3..24ab7be 100644
--- a/target/i386/xsave_helper.c
+++ b/target/i386/xsave_helper.c
@@ -5,6 +5,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
+#include "exec/tswap.h"
void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
{
diff --git a/target/loongarch/arch_dump.c b/target/loongarch/arch_dump.c
new file mode 100644
index 0000000..2b0955a
--- /dev/null
+++ b/target/loongarch/arch_dump.c
@@ -0,0 +1,163 @@
+/*
+ * Support for writing ELF notes for LoongArch architectures
+ *
+ * Copyright (c) 2023 Loongarch Technology
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "elf.h"
+#include "system/dump.h"
+#include "internals.h"
+
+/* struct user_pt_regs from arch/loongarch/include/uapi/asm/ptrace.h */
+struct loongarch_user_regs {
+ uint64_t gpr[32];
+ uint64_t pad1[1];
+ /* Special CSR registers. */
+ uint64_t csr_era;
+ uint64_t csr_badv;
+ uint64_t pad2[10];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct loongarch_user_regs) != 360);
+
+/* struct elf_prstatus from include/uapi/linux/elfcore.h */
+struct loongarch_elf_prstatus {
+ char pad1[32]; /* 32 == offsetof(struct elf_prstatus, pr_pid) */
+ uint32_t pr_pid;
+ /*
+ * 76 == offsetof(struct elf_prstatus, pr_reg) -
+ * offsetof(struct elf_prstatus, pr_ppid)
+ */
+ char pad2[76];
+ struct loongarch_user_regs pr_reg;
+ uint32_t pr_fpvalid;
+ char pad3[4];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct loongarch_elf_prstatus) != 480);
+
+/* struct user_fp_state from arch/loongarch/include/uapi/asm/ptrace.h */
+struct loongarch_fpu_struct {
+ uint64_t fpr[32];
+ uint64_t fcc;
+ unsigned int fcsr;
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct loongarch_fpu_struct) != 268);
+
+struct loongarch_note {
+ Elf64_Nhdr hdr;
+ char name[8]; /* align_up(sizeof("CORE"), 4) */
+ union {
+ struct loongarch_elf_prstatus prstatus;
+ struct loongarch_fpu_struct fpu;
+ };
+} QEMU_PACKED;
+
+#define LOONGARCH_NOTE_HEADER_SIZE offsetof(struct loongarch_note, prstatus)
+#define LOONGARCH_PRSTATUS_NOTE_SIZE \
+ (LOONGARCH_NOTE_HEADER_SIZE + sizeof(struct loongarch_elf_prstatus))
+#define LOONGARCH_PRFPREG_NOTE_SIZE \
+ (LOONGARCH_NOTE_HEADER_SIZE + sizeof(struct loongarch_fpu_struct))
+
+static void loongarch_note_init(struct loongarch_note *note, DumpState *s,
+ const char *name, Elf64_Word namesz,
+ Elf64_Word type, Elf64_Word descsz)
+{
+ memset(note, 0, sizeof(*note));
+
+ note->hdr.n_namesz = cpu_to_dump32(s, namesz);
+ note->hdr.n_descsz = cpu_to_dump32(s, descsz);
+ note->hdr.n_type = cpu_to_dump32(s, type);
+
+ memcpy(note->name, name, namesz);
+}
+
+static int loongarch_write_elf64_fprpreg(WriteCoreDumpFunction f,
+ CPULoongArchState *env, int cpuid,
+ DumpState *s)
+{
+ struct loongarch_note note;
+ int ret, i;
+
+ loongarch_note_init(&note, s, "CORE", 5, NT_PRFPREG, sizeof(note.fpu));
+ note.fpu.fcsr = cpu_to_dump64(s, env->fcsr0);
+ note.fpu.fcc = cpu_to_dump64(s, read_fcc(env));
+
+ for (i = 0; i < 32; ++i) {
+ note.fpu.fpr[i] = cpu_to_dump64(s, env->fpr[i].vreg.UD[0]);
+ }
+
+ ret = f(&note, LOONGARCH_PRFPREG_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int loongarch_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, DumpState *s)
+{
+ struct loongarch_note note;
+ CPULoongArchState *env = &LOONGARCH_CPU(cs)->env;
+ int ret, i;
+
+ loongarch_note_init(&note, s, "CORE", 5, NT_PRSTATUS,
+ sizeof(note.prstatus));
+ note.prstatus.pr_pid = cpu_to_dump32(s, cpuid);
+ note.prstatus.pr_fpvalid = cpu_to_dump32(s, 1);
+
+ for (i = 0; i < 32; ++i) {
+ note.prstatus.pr_reg.gpr[i] = cpu_to_dump64(s, env->gpr[i]);
+ }
+ note.prstatus.pr_reg.csr_era = cpu_to_dump64(s, env->CSR_ERA);
+ note.prstatus.pr_reg.csr_badv = cpu_to_dump64(s, env->CSR_BADV);
+ ret = f(&note, LOONGARCH_PRSTATUS_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ ret = loongarch_write_elf64_fprpreg(f, env, cpuid, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return ret;
+}
+
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const GuestPhysBlockList *guest_phys_blocks)
+{
+ info->d_machine = EM_LOONGARCH;
+ info->d_endian = ELFDATA2LSB;
+ info->d_class = ELFCLASS64;
+
+ return 0;
+}
+
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
+{
+ size_t note_size = 0;
+
+ if (class == ELFCLASS64) {
+ note_size = LOONGARCH_PRSTATUS_NOTE_SIZE + LOONGARCH_PRFPREG_NOTE_SIZE;
+ }
+
+ return note_size * nr_cpus;
+}
diff --git a/target/loongarch/cpu-param.h b/target/loongarch/cpu-param.h
index db5ad1c..58cc45a 100644
--- a/target/loongarch/cpu-param.h
+++ b/target/loongarch/cpu-param.h
@@ -8,12 +8,11 @@
#ifndef LOONGARCH_CPU_PARAM_H
#define LOONGARCH_CPU_PARAM_H
-#define TARGET_LONG_BITS 64
#define TARGET_PHYS_ADDR_SPACE_BITS 48
#define TARGET_VIRT_ADDR_SPACE_BITS 48
#define TARGET_PAGE_BITS 12
-#define TCG_GUEST_DEFAULT_MO (0)
+#define TARGET_INSN_START_EXTRA_WORDS 0
#endif
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
index 5e85b9d..abad84c 100644
--- a/target/loongarch/cpu.c
+++ b/target/loongarch/cpu.c
@@ -10,26 +10,29 @@
#include "qemu/qemu-print.h"
#include "qapi/error.h"
#include "qemu/module.h"
-#include "sysemu/qtest.h"
-#include "sysemu/tcg.h"
-#include "sysemu/kvm.h"
+#include "system/qtest.h"
+#include "system/tcg.h"
+#include "system/kvm.h"
#include "kvm/kvm_loongarch.h"
-#include "exec/exec-all.h"
+#include "hw/qdev-properties.h"
+#include "exec/translation-block.h"
#include "cpu.h"
#include "internals.h"
#include "fpu/softfloat-helpers.h"
-#include "cpu-csr.h"
+#include "csr.h"
#ifndef CONFIG_USER_ONLY
-#include "sysemu/reset.h"
+#include "system/reset.h"
#endif
#include "vec.h"
#ifdef CONFIG_KVM
#include <linux/kvm.h>
#endif
#ifdef CONFIG_TCG
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/cpu-ops.h"
#include "tcg/tcg.h"
#endif
+#include "tcg/tcg_loongarch.h"
const char * const regnames[32] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
@@ -331,8 +334,28 @@ static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
return false;
}
+
+static vaddr loongarch_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return is_va32(cpu_env(cs)) ? (uint32_t)result : result;
+}
#endif
+static TCGTBCPUState loongarch_get_tb_cpu_state(CPUState *cs)
+{
+ CPULoongArchState *env = cpu_env(cs);
+ uint32_t flags;
+
+ flags = env->CSR_CRMD & (R_CSR_CRMD_PLV_MASK | R_CSR_CRMD_PG_MASK);
+ flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, FPE) * HW_FLAGS_EUEN_FPE;
+ flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE) * HW_FLAGS_EUEN_SXE;
+ flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE) * HW_FLAGS_EUEN_ASXE;
+ flags |= is_va32(env) * HW_FLAGS_VA32;
+
+ return (TCGTBCPUState){ .pc = env->pc, .flags = flags };
+}
+
static void loongarch_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -348,11 +371,9 @@ static void loongarch_restore_state_to_opc(CPUState *cs,
}
#endif /* CONFIG_TCG */
+#ifndef CONFIG_USER_ONLY
static bool loongarch_cpu_has_work(CPUState *cs)
{
-#ifdef CONFIG_USER_ONLY
- return true;
-#else
bool has_work = false;
if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -361,8 +382,8 @@ static bool loongarch_cpu_has_work(CPUState *cs)
}
return has_work;
-#endif
}
+#endif /* !CONFIG_USER_ONLY */
static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch)
{
@@ -374,10 +395,38 @@ static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch)
return MMU_DA_IDX;
}
+static void loongarch_la464_init_csr(Object *obj)
+{
+#ifndef CONFIG_USER_ONLY
+ static bool initialized;
+ LoongArchCPU *cpu = LOONGARCH_CPU(obj);
+ CPULoongArchState *env = &cpu->env;
+ int i, num;
+
+ if (!initialized) {
+ initialized = true;
+ num = FIELD_EX64(env->CSR_PRCFG1, CSR_PRCFG1, SAVE_NUM);
+ for (i = num; i < 16; i++) {
+ set_csr_flag(LOONGARCH_CSR_SAVE(i), CSRFL_UNUSED);
+ }
+ set_csr_flag(LOONGARCH_CSR_IMPCTL1, CSRFL_UNUSED);
+ set_csr_flag(LOONGARCH_CSR_IMPCTL2, CSRFL_UNUSED);
+ set_csr_flag(LOONGARCH_CSR_MERRCTL, CSRFL_UNUSED);
+ set_csr_flag(LOONGARCH_CSR_MERRINFO1, CSRFL_UNUSED);
+ set_csr_flag(LOONGARCH_CSR_MERRINFO2, CSRFL_UNUSED);
+ set_csr_flag(LOONGARCH_CSR_MERRENTRY, CSRFL_UNUSED);
+ set_csr_flag(LOONGARCH_CSR_MERRERA, CSRFL_UNUSED);
+ set_csr_flag(LOONGARCH_CSR_MERRSAVE, CSRFL_UNUSED);
+ set_csr_flag(LOONGARCH_CSR_CTAG, CSRFL_UNUSED);
+ }
+#endif
+}
+
static void loongarch_la464_initfn(Object *obj)
{
LoongArchCPU *cpu = LOONGARCH_CPU(obj);
CPULoongArchState *env = &cpu->env;
+ uint32_t data = 0, field;
int i;
for (i = 0; i < 21; i++) {
@@ -387,18 +436,23 @@ static void loongarch_la464_initfn(Object *obj)
cpu->dtb_compatible = "loongarch,Loongson-3A5000";
env->cpucfg[0] = 0x14c010; /* PRID */
- uint32_t data = 0;
data = FIELD_DP32(data, CPUCFG1, ARCH, 2);
data = FIELD_DP32(data, CPUCFG1, PGMMU, 1);
data = FIELD_DP32(data, CPUCFG1, IOCSR, 1);
- data = FIELD_DP32(data, CPUCFG1, PALEN, 0x2f);
+ if (kvm_enabled()) {
+ /* GPA address width of VM is 47, field value is 47 - 1 */
+ field = 0x2e;
+ } else {
+ field = 0x2f; /* 48 bit - 1 */
+ }
+ data = FIELD_DP32(data, CPUCFG1, PALEN, field);
data = FIELD_DP32(data, CPUCFG1, VALEN, 0x2f);
data = FIELD_DP32(data, CPUCFG1, UAL, 1);
data = FIELD_DP32(data, CPUCFG1, RI, 1);
data = FIELD_DP32(data, CPUCFG1, EP, 1);
data = FIELD_DP32(data, CPUCFG1, RPLV, 1);
data = FIELD_DP32(data, CPUCFG1, HP, 1);
- data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1);
+ data = FIELD_DP32(data, CPUCFG1, CRC, 1);
env->cpucfg[1] = data;
data = 0;
@@ -469,6 +523,7 @@ static void loongarch_la464_initfn(Object *obj)
env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_WAYS, 7);
env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8);
+ loongarch_la464_init_csr(obj);
loongarch_cpu_post_init(obj);
}
@@ -476,7 +531,7 @@ static void loongarch_la132_initfn(Object *obj)
{
LoongArchCPU *cpu = LOONGARCH_CPU(obj);
CPULoongArchState *env = &cpu->env;
-
+ uint32_t data = 0;
int i;
for (i = 0; i < 21; i++) {
@@ -486,7 +541,6 @@ static void loongarch_la132_initfn(Object *obj)
cpu->dtb_compatible = "loongarch,Loongson-1C103";
env->cpucfg[0] = 0x148042; /* PRID */
- uint32_t data = 0;
data = FIELD_DP32(data, CPUCFG1, ARCH, 1); /* LA32 */
data = FIELD_DP32(data, CPUCFG1, PGMMU, 1);
data = FIELD_DP32(data, CPUCFG1, IOCSR, 1);
@@ -497,7 +551,7 @@ static void loongarch_la132_initfn(Object *obj)
data = FIELD_DP32(data, CPUCFG1, EP, 0);
data = FIELD_DP32(data, CPUCFG1, RPLV, 0);
data = FIELD_DP32(data, CPUCFG1, HP, 1);
- data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1);
+ data = FIELD_DP32(data, CPUCFG1, CRC, 1);
env->cpucfg[1] = data;
}
@@ -509,6 +563,7 @@ static void loongarch_max_initfn(Object *obj)
static void loongarch_cpu_reset_hold(Object *obj, ResetType type)
{
+ uint8_t tlb_ps;
CPUState *cs = CPU(obj);
LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(obj);
CPULoongArchState *env = cpu_env(cs);
@@ -549,7 +604,25 @@ static void loongarch_cpu_reset_hold(Object *obj, ResetType type)
env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0);
env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0);
env->CSR_TID = cs->cpu_index;
-
+ /*
+ * Workaround for edk2-stable202408, CSR PGD register is set only if
+ * its value is equal to zero for boot cpu, it causes reboot issue.
+ *
+ * Here clear CSR registers relative with TLB.
+ */
+ env->CSR_PGDH = 0;
+ env->CSR_PGDL = 0;
+ env->CSR_PWCH = 0;
+ env->CSR_EENTRY = 0;
+ env->CSR_TLBRENTRY = 0;
+ env->CSR_MERRENTRY = 0;
+ /* set CSR_PWCL.PTBASE and CSR_STLBPS.PS bits from CSR_PRCFG2 */
+ if (env->CSR_PRCFG2 == 0) {
+ env->CSR_PRCFG2 = 0x3fffff000;
+ }
+ tlb_ps = ctz32(env->CSR_PRCFG2);
+ env->CSR_STLBPS = FIELD_DP64(env->CSR_STLBPS, CSR_STLBPS, PS, tlb_ps);
+ env->CSR_PWCL = FIELD_DP64(env->CSR_PWCL, CSR_PWCL, PTBASE, tlb_ps);
for (n = 0; n < 4; n++) {
env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV0, 0);
env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV1, 0);
@@ -563,7 +636,7 @@ static void loongarch_cpu_reset_hold(Object *obj, ResetType type)
memset(env->tlb, 0, sizeof(env->tlb));
#endif
if (kvm_enabled()) {
- kvm_arch_reset_vcpu(env);
+ kvm_arch_reset_vcpu(cs);
}
#endif
@@ -575,6 +648,7 @@ static void loongarch_cpu_reset_hold(Object *obj, ResetType type)
static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info)
{
+ info->endian = BFD_ENDIAN_LITTLE;
info->print_insn = print_insn_loongarch;
}
@@ -592,70 +666,111 @@ static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp)
loongarch_cpu_register_gdb_regs_for_features(cs);
- cpu_reset(cs);
qemu_init_vcpu(cs);
+ cpu_reset(cs);
lacc->parent_realize(dev, errp);
}
-static bool loongarch_get_lsx(Object *obj, Error **errp)
+static void loongarch_cpu_unrealizefn(DeviceState *dev)
{
- LoongArchCPU *cpu = LOONGARCH_CPU(obj);
- bool ret;
+ LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev);
- if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) {
- ret = true;
- } else {
- ret = false;
- }
- return ret;
+#ifndef CONFIG_USER_ONLY
+ cpu_remove_sync(CPU(dev));
+#endif
+
+ lacc->parent_unrealize(dev);
+}
+
+static bool loongarch_get_lsx(Object *obj, Error **errp)
+{
+ return LOONGARCH_CPU(obj)->lsx != ON_OFF_AUTO_OFF;
}
static void loongarch_set_lsx(Object *obj, bool value, Error **errp)
{
LoongArchCPU *cpu = LOONGARCH_CPU(obj);
+ uint32_t val;
+
+ cpu->lsx = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
+ if (cpu->lsx == ON_OFF_AUTO_OFF) {
+ cpu->lasx = ON_OFF_AUTO_OFF;
+ if (cpu->lasx == ON_OFF_AUTO_ON) {
+ error_setg(errp, "Failed to disable LSX since LASX is enabled");
+ return;
+ }
+ }
+
+ if (kvm_enabled()) {
+ /* kvm feature detection in function kvm_arch_init_vcpu */
+ return;
+ }
- if (value) {
- cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 1);
+ /* LSX feature detection in TCG mode */
+ val = cpu->env.cpucfg[2];
+ if (cpu->lsx == ON_OFF_AUTO_ON) {
+ if (FIELD_EX32(val, CPUCFG2, LSX) == 0) {
+ error_setg(errp, "Failed to enable LSX in TCG mode");
+ return;
+ }
} else {
- cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 0);
- cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 0);
+ cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LASX, 0);
+ val = cpu->env.cpucfg[2];
}
+
+ cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LSX, value);
}
static bool loongarch_get_lasx(Object *obj, Error **errp)
{
- LoongArchCPU *cpu = LOONGARCH_CPU(obj);
- bool ret;
-
- if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) {
- ret = true;
- } else {
- ret = false;
- }
- return ret;
+ return LOONGARCH_CPU(obj)->lasx != ON_OFF_AUTO_OFF;
}
static void loongarch_set_lasx(Object *obj, bool value, Error **errp)
{
LoongArchCPU *cpu = LOONGARCH_CPU(obj);
+ uint32_t val;
- if (value) {
- if (!FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) {
- cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 1);
- }
- cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 1);
- } else {
- cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 0);
+ cpu->lasx = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
+ if ((cpu->lsx == ON_OFF_AUTO_OFF) && (cpu->lasx == ON_OFF_AUTO_ON)) {
+ error_setg(errp, "Failed to enable LASX since lSX is disabled");
+ return;
}
+
+ if (kvm_enabled()) {
+ /* kvm feature detection in function kvm_arch_init_vcpu */
+ return;
+ }
+
+ /* LASX feature detection in TCG mode */
+ val = cpu->env.cpucfg[2];
+ if (cpu->lasx == ON_OFF_AUTO_ON) {
+ if (FIELD_EX32(val, CPUCFG2, LASX) == 0) {
+ error_setg(errp, "Failed to enable LASX in TCG mode");
+ return;
+ }
+ }
+
+ cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LASX, value);
}
void loongarch_cpu_post_init(Object *obj)
{
+ LoongArchCPU *cpu = LOONGARCH_CPU(obj);
+
+ cpu->lbt = ON_OFF_AUTO_OFF;
+ cpu->pmu = ON_OFF_AUTO_OFF;
+ cpu->lsx = ON_OFF_AUTO_AUTO;
+ cpu->lasx = ON_OFF_AUTO_AUTO;
object_property_add_bool(obj, "lsx", loongarch_get_lsx,
loongarch_set_lsx);
object_property_add_bool(obj, "lasx", loongarch_get_lasx,
loongarch_set_lasx);
+ /* lbt is enabled only in kvm mode, not supported in tcg mode */
+ if (kvm_enabled()) {
+ kvm_loongarch_cpu_post_init(cpu);
+ }
}
static void loongarch_cpu_init(Object *obj)
@@ -685,7 +800,55 @@ static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model)
return oc;
}
-void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+static void loongarch_cpu_dump_csr(CPUState *cs, FILE *f)
+{
+#ifndef CONFIG_USER_ONLY
+ CPULoongArchState *env = cpu_env(cs);
+ CSRInfo *csr_info;
+ int64_t *addr;
+ int i, j, len, col = 0;
+
+ qemu_fprintf(f, "\n");
+
+ /* Dump all generic CSR register */
+ for (i = 0; i < LOONGARCH_CSR_DBG; i++) {
+ csr_info = get_csr(i);
+ if (!csr_info || (csr_info->flags & CSRFL_UNUSED)) {
+ if (i == (col + 3)) {
+ qemu_fprintf(f, "\n");
+ }
+
+ continue;
+ }
+
+ if ((i > (col + 3)) || (i == col)) {
+ col = i & ~3;
+ qemu_fprintf(f, " CSR%03d:", col);
+ }
+
+ addr = (void *)env + csr_info->offset;
+ qemu_fprintf(f, " %s ", csr_info->name);
+ len = strlen(csr_info->name);
+ for (; len < 6; len++) {
+ qemu_fprintf(f, " ");
+ }
+
+ qemu_fprintf(f, "%" PRIx64, *addr);
+ j = find_last_bit((void *)addr, BITS_PER_LONG) & (BITS_PER_LONG - 1);
+ len += j / 4 + 1;
+ for (; len < 22; len++) {
+ qemu_fprintf(f, " ");
+ }
+
+ if (i == (col + 3)) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+ qemu_fprintf(f, "\n");
+#endif
+}
+
+static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
CPULoongArchState *env = cpu_env(cs);
int i;
@@ -704,22 +867,8 @@ void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
}
- qemu_fprintf(f, "CRMD=%016" PRIx64 "\n", env->CSR_CRMD);
- qemu_fprintf(f, "PRMD=%016" PRIx64 "\n", env->CSR_PRMD);
- qemu_fprintf(f, "EUEN=%016" PRIx64 "\n", env->CSR_EUEN);
- qemu_fprintf(f, "ESTAT=%016" PRIx64 "\n", env->CSR_ESTAT);
- qemu_fprintf(f, "ERA=%016" PRIx64 "\n", env->CSR_ERA);
- qemu_fprintf(f, "BADV=%016" PRIx64 "\n", env->CSR_BADV);
- qemu_fprintf(f, "BADI=%016" PRIx64 "\n", env->CSR_BADI);
- qemu_fprintf(f, "EENTRY=%016" PRIx64 "\n", env->CSR_EENTRY);
- qemu_fprintf(f, "PRCFG1=%016" PRIx64 ", PRCFG2=%016" PRIx64 ","
- " PRCFG3=%016" PRIx64 "\n",
- env->CSR_PRCFG1, env->CSR_PRCFG2, env->CSR_PRCFG3);
- qemu_fprintf(f, "TLBRENTRY=%016" PRIx64 "\n", env->CSR_TLBRENTRY);
- qemu_fprintf(f, "TLBRBADV=%016" PRIx64 "\n", env->CSR_TLBRBADV);
- qemu_fprintf(f, "TLBRERA=%016" PRIx64 "\n", env->CSR_TLBRERA);
- qemu_fprintf(f, "TCFG=%016" PRIx64 "\n", env->CSR_TCFG);
- qemu_fprintf(f, "TVAL=%016" PRIx64 "\n", env->CSR_TVAL);
+ /* csr */
+ loongarch_cpu_dump_csr(cs, f);
/* fpr */
if (flags & CPU_DUMP_FPU) {
@@ -733,17 +882,23 @@ void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
#ifdef CONFIG_TCG
-#include "hw/core/tcg-cpu-ops.h"
-
static const TCGCPUOps loongarch_tcg_ops = {
+ .guest_default_memory_order = 0,
+ .mttcg_supported = true,
+
.initialize = loongarch_translate_init,
+ .translate_code = loongarch_translate_code,
+ .get_tb_cpu_state = loongarch_get_tb_cpu_state,
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
.restore_state_to_opc = loongarch_restore_state_to_opc,
+ .mmu_index = loongarch_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = loongarch_cpu_tlb_fill,
+ .pointer_wrap = loongarch_pointer_wrap,
.cpu_exec_interrupt = loongarch_cpu_exec_interrupt,
.cpu_exec_halt = loongarch_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = loongarch_cpu_do_interrupt,
.do_transaction_failed = loongarch_cpu_do_transaction_failed,
#endif
@@ -754,6 +909,8 @@ static const TCGCPUOps loongarch_tcg_ops = {
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps loongarch_sysemu_ops = {
+ .has_work = loongarch_cpu_has_work,
+ .write_elf64_note = loongarch_cpu_write_elf64_note,
.get_phys_page_debug = loongarch_cpu_get_phys_page_debug,
};
@@ -765,21 +922,29 @@ static int64_t loongarch_cpu_get_arch_id(CPUState *cs)
}
#endif
-static void loongarch_cpu_class_init(ObjectClass *c, void *data)
+static const Property loongarch_cpu_properties[] = {
+ DEFINE_PROP_INT32("socket-id", LoongArchCPU, socket_id, 0),
+ DEFINE_PROP_INT32("core-id", LoongArchCPU, core_id, 0),
+ DEFINE_PROP_INT32("thread-id", LoongArchCPU, thread_id, 0),
+ DEFINE_PROP_INT32("node-id", LoongArchCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
+};
+
+static void loongarch_cpu_class_init(ObjectClass *c, const void *data)
{
LoongArchCPUClass *lacc = LOONGARCH_CPU_CLASS(c);
CPUClass *cc = CPU_CLASS(c);
DeviceClass *dc = DEVICE_CLASS(c);
ResettableClass *rc = RESETTABLE_CLASS(c);
+ device_class_set_props(dc, loongarch_cpu_properties);
device_class_set_parent_realize(dc, loongarch_cpu_realizefn,
&lacc->parent_realize);
+ device_class_set_parent_unrealize(dc, loongarch_cpu_unrealizefn,
+ &lacc->parent_unrealize);
resettable_class_set_parent_phases(rc, NULL, loongarch_cpu_reset_hold, NULL,
&lacc->parent_phases);
cc->class_by_name = loongarch_cpu_class_by_name;
- cc->has_work = loongarch_cpu_has_work;
- cc->mmu_index = loongarch_cpu_mmu_index;
cc->dump_state = loongarch_cpu_dump_state;
cc->set_pc = loongarch_cpu_set_pc;
cc->get_pc = loongarch_cpu_get_pc;
@@ -796,6 +961,7 @@ static void loongarch_cpu_class_init(ObjectClass *c, void *data)
#ifdef CONFIG_TCG
cc->tcg_ops = &loongarch_tcg_ops;
#endif
+ dc->user_creatable = true;
}
static const gchar *loongarch32_gdb_arch_name(CPUState *cs)
@@ -803,7 +969,7 @@ static const gchar *loongarch32_gdb_arch_name(CPUState *cs)
return "loongarch32";
}
-static void loongarch32_cpu_class_init(ObjectClass *c, void *data)
+static void loongarch32_cpu_class_init(ObjectClass *c, const void *data)
{
CPUClass *cc = CPU_CLASS(c);
@@ -816,7 +982,7 @@ static const gchar *loongarch64_gdb_arch_name(CPUState *cs)
return "loongarch64";
}
-static void loongarch64_cpu_class_init(ObjectClass *c, void *data)
+static void loongarch64_cpu_class_init(ObjectClass *c, const void *data)
{
CPUClass *cc = CPU_CLASS(c);
diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h
index 6c41faf..9538e8d 100644
--- a/target/loongarch/cpu.h
+++ b/target/loongarch/cpu.h
@@ -9,12 +9,14 @@
#define LOONGARCH_CPU_H
#include "qemu/int128.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "fpu/softfloat-types.h"
#include "hw/registerfields.h"
#include "qemu/timer.h"
#ifndef CONFIG_USER_ONLY
-#include "exec/memory.h"
+#include "system/memory.h"
#endif
#include "cpu-csr.h"
#include "cpu-qom.h"
@@ -129,7 +131,7 @@ FIELD(CPUCFG1, RI, 21, 1)
FIELD(CPUCFG1, EP, 22, 1)
FIELD(CPUCFG1, RPLV, 23, 1)
FIELD(CPUCFG1, HP, 24, 1)
-FIELD(CPUCFG1, IOCSR_BRD, 25, 1)
+FIELD(CPUCFG1, CRC, 25, 1)
FIELD(CPUCFG1, MSG_INT, 26, 1)
/* cpucfg[1].arch */
@@ -153,6 +155,7 @@ FIELD(CPUCFG2, LLFTP_VER, 15, 3)
FIELD(CPUCFG2, LBT_X86, 18, 1)
FIELD(CPUCFG2, LBT_ARM, 19, 1)
FIELD(CPUCFG2, LBT_MIPS, 20, 1)
+FIELD(CPUCFG2, LBT_ALL, 18, 3)
FIELD(CPUCFG2, LSPW, 21, 1)
FIELD(CPUCFG2, LAM, 22, 1)
@@ -281,6 +284,26 @@ struct LoongArchTLB {
typedef struct LoongArchTLB LoongArchTLB;
#endif
+enum loongarch_features {
+ LOONGARCH_FEATURE_LSX,
+ LOONGARCH_FEATURE_LASX,
+ LOONGARCH_FEATURE_LBT, /* loongson binary translation extension */
+ LOONGARCH_FEATURE_PMU,
+ LOONGARCH_FEATURE_PV_IPI,
+ LOONGARCH_FEATURE_STEALTIME,
+};
+
+typedef struct LoongArchBT {
+ /* scratch registers */
+ uint64_t scr0;
+ uint64_t scr1;
+ uint64_t scr2;
+ uint64_t scr3;
+ /* loongarch eflags */
+ uint32_t eflags;
+ uint32_t ftop;
+} lbt_t;
+
typedef struct CPUArchState {
uint64_t gpr[32];
uint64_t pc;
@@ -288,8 +311,10 @@ typedef struct CPUArchState {
fpr_t fpr[32];
bool cf[8];
uint32_t fcsr0;
+ lbt_t lbt;
uint32_t cpucfg[21];
+ uint32_t pv_features;
/* LoongArch CSRs */
uint64_t CSR_CRMD;
@@ -346,6 +371,9 @@ typedef struct CPUArchState {
uint64_t CSR_DBG;
uint64_t CSR_DERA;
uint64_t CSR_DSAVE;
+ struct {
+ uint64_t guest_addr;
+ } stealtime;
#ifdef CONFIG_TCG
float_status fp_status;
@@ -362,13 +390,17 @@ typedef struct CPUArchState {
bool load_elf;
uint64_t elf_address;
uint32_t mp_state;
- /* Store ipistate to access from this struct */
- DeviceState *ipistate;
struct loongarch_boot_info *boot_info;
#endif
} CPULoongArchState;
+typedef struct LoongArchCPUTopo {
+ int32_t socket_id; /* socket-id of this VCPU */
+ int32_t core_id; /* core-id of this VCPU */
+ int32_t thread_id; /* thread-id of this VCPU */
+} LoongArchCPUTopo;
+
/**
* LoongArchCPU:
* @env: #CPULoongArchState
@@ -381,11 +413,22 @@ struct ArchCPU {
CPULoongArchState env;
QEMUTimer timer;
uint32_t phy_id;
+ OnOffAuto lbt;
+ OnOffAuto pmu;
+ OnOffAuto lsx;
+ OnOffAuto lasx;
+ OnOffAuto kvm_pv_ipi;
+ OnOffAuto kvm_steal_time;
+ int32_t socket_id; /* socket-id of this CPU */
+ int32_t core_id; /* core-id of this CPU */
+ int32_t thread_id; /* thread-id of this CPU */
+ int32_t node_id; /* NUMA node of this CPU */
/* 'compatible' string for this CPU for Linux device trees */
const char *dtb_compatible;
/* used by KVM_REG_LOONGARCH_COUNTER ioctl to access guest time counters */
uint64_t kvm_state_counter;
+ VMChangeStateEntry *vmsentry;
};
/**
@@ -399,6 +442,7 @@ struct LoongArchCPUClass {
CPUClass parent_class;
DeviceRealize parent_realize;
+ DeviceUnrealize parent_unrealize;
ResettablePhases parent_phases;
};
@@ -448,22 +492,17 @@ static inline void set_pc(CPULoongArchState *env, uint64_t value)
#define HW_FLAGS_VA32 0x20
#define HW_FLAGS_EUEN_ASXE 0x40
-static inline void cpu_get_tb_cpu_state(CPULoongArchState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- *cs_base = 0;
- *flags = env->CSR_CRMD & (R_CSR_CRMD_PLV_MASK | R_CSR_CRMD_PG_MASK);
- *flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, FPE) * HW_FLAGS_EUEN_FPE;
- *flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE) * HW_FLAGS_EUEN_SXE;
- *flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE) * HW_FLAGS_EUEN_ASXE;
- *flags |= is_va32(env) * HW_FLAGS_VA32;
-}
-
-#include "exec/cpu-all.h"
-
#define CPU_RESOLVING_TYPE TYPE_LOONGARCH_CPU
void loongarch_cpu_post_init(Object *obj);
+#ifdef CONFIG_KVM
+void kvm_loongarch_cpu_post_init(LoongArchCPU *cpu);
+#else
+static inline void kvm_loongarch_cpu_post_init(LoongArchCPU *cpu)
+{
+}
+#endif
+void kvm_loongarch_init_irq_routing(void);
+
#endif /* LOONGARCH_CPU_H */
diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index 580362a..e172b11 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -7,162 +7,143 @@
*/
#include "qemu/osdep.h"
+#include "system/tcg.h"
#include "cpu.h"
+#include "accel/tcg/cpu-mmu-index.h"
+#include "exec/target_page.h"
#include "internals.h"
#include "cpu-csr.h"
+#include "tcg/tcg_loongarch.h"
-#ifdef CONFIG_TCG
-static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical,
- int *prot, target_ulong address,
- int access_type, int index, int mmu_idx)
+void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
+ uint64_t *dir_width, target_ulong level)
{
- LoongArchTLB *tlb = &env->tlb[index];
- uint64_t plv = mmu_idx;
- uint64_t tlb_entry, tlb_ppn;
- uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
-
- if (index >= LOONGARCH_STLB) {
- tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
- } else {
- tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
+ switch (level) {
+ case 1:
+ *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE);
+ *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH);
+ break;
+ case 2:
+ *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE);
+ *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH);
+ break;
+ case 3:
+ *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE);
+ *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH);
+ break;
+ case 4:
+ *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE);
+ *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH);
+ break;
+ default:
+ /* level may be zero for ldpte */
+ *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
+ *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
+ break;
}
- n = (address >> tlb_ps) & 0x1;/* Odd or even */
+}
- tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
- tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V);
- tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D);
- tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
- if (is_la64(env)) {
- tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN);
- tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX);
- tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR);
- tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV);
+static int loongarch_page_table_walker(CPULoongArchState *env, hwaddr *physical,
+ int *prot, target_ulong address)
+{
+ CPUState *cs = env_cpu(env);
+ target_ulong index, phys;
+ uint64_t dir_base, dir_width;
+ uint64_t base;
+ int level;
+
+ if ((address >> 63) & 0x1) {
+ base = env->CSR_PGDH;
} else {
- tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN);
- tlb_nx = 0;
- tlb_nr = 0;
- tlb_rplv = 0;
+ base = env->CSR_PGDL;
}
+ base &= TARGET_PHYS_MASK;
- /* Remove sw bit between bit12 -- bit PS*/
- tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) -1));
+ for (level = 4; level > 0; level--) {
+ get_dir_base_width(env, &dir_base, &dir_width, level);
- /* Check access rights */
- if (!tlb_v) {
- return TLBRET_INVALID;
- }
+ if (dir_width == 0) {
+ continue;
+ }
- if (access_type == MMU_INST_FETCH && tlb_nx) {
- return TLBRET_XI;
+ /* get next level page directory */
+ index = (address >> dir_base) & ((1 << dir_width) - 1);
+ phys = base | index << 3;
+ base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
+ if (FIELD_EX64(base, TLBENTRY, HUGE)) {
+ /* base is a huge pte */
+ break;
+ }
}
- if (access_type == MMU_DATA_LOAD && tlb_nr) {
- return TLBRET_RI;
+ /* pte */
+ if (FIELD_EX64(base, TLBENTRY, HUGE)) {
+ /* Huge Page. base is pte */
+ base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
+ base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
+ if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
+ base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
+ base = FIELD_DP64(base, TLBENTRY, G, 1);
+ }
+ } else {
+ /* Normal Page. base points to pte */
+ get_dir_base_width(env, &dir_base, &dir_width, 0);
+ index = (address >> dir_base) & ((1 << dir_width) - 1);
+ phys = base | index << 3;
+ base = ldq_phys(cs->as, phys);
}
- if (((tlb_rplv == 0) && (plv > tlb_plv)) ||
- ((tlb_rplv == 1) && (plv != tlb_plv))) {
- return TLBRET_PE;
- }
+ /* TODO: check plv and other bits? */
- if ((access_type == MMU_DATA_STORE) && !tlb_d) {
- return TLBRET_DIRTY;
+ /* base is pte, in normal pte format */
+ if (!FIELD_EX64(base, TLBENTRY, V)) {
+ return TLBRET_NOMATCH;
}
- *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) |
- (address & MAKE_64BIT_MASK(0, tlb_ps));
- *prot = PAGE_READ;
- if (tlb_d) {
- *prot |= PAGE_WRITE;
- }
- if (!tlb_nx) {
- *prot |= PAGE_EXEC;
+ if (!FIELD_EX64(base, TLBENTRY, D)) {
+ *prot = PAGE_READ;
+ } else {
+ *prot = PAGE_READ | PAGE_WRITE;
}
- return TLBRET_MATCH;
-}
-/*
- * One tlb entry holds an adjacent odd/even pair, the vpn is the
- * content of the virtual page number divided by 2. So the
- * compare vpn is bit[47:15] for 16KiB page. while the vppn
- * field in tlb entry contains bit[47:13], so need adjust.
- * virt_vpn = vaddr[47:13]
- */
-bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
- int *index)
-{
- LoongArchTLB *tlb;
- uint16_t csr_asid, tlb_asid, stlb_idx;
- uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps;
- int i, compare_shift;
- uint64_t vpn, tlb_vppn;
-
- csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
- stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
- vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1);
- stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
- compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
-
- /* Search STLB */
- for (i = 0; i < 8; ++i) {
- tlb = &env->tlb[i * 256 + stlb_idx];
- tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
- if (tlb_e) {
- tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
- tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
- tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
-
- if ((tlb_g == 1 || tlb_asid == csr_asid) &&
- (vpn == (tlb_vppn >> compare_shift))) {
- *index = i * 256 + stlb_idx;
- return true;
- }
- }
- }
+ /* get TARGET_PAGE_SIZE aligned physical address */
+ base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
+ /* mask RPLV, NX, NR bits */
+ base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
+ base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
+ base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
+ /* mask other attribute bits */
+ *physical = base & TARGET_PAGE_MASK;
- /* Search MTLB */
- for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) {
- tlb = &env->tlb[i];
- tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
- if (tlb_e) {
- tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
- tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
- tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
- tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
- compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
- vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
- if ((tlb_g == 1 || tlb_asid == csr_asid) &&
- (vpn == (tlb_vppn >> compare_shift))) {
- *index = i;
- return true;
- }
- }
- }
- return false;
+ return 0;
}
static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
int *prot, target_ulong address,
- MMUAccessType access_type, int mmu_idx)
+ MMUAccessType access_type, int mmu_idx,
+ int is_debug)
{
- int index, match;
+ int ret;
+
+ if (tcg_enabled()) {
+ ret = loongarch_get_addr_from_tlb(env, physical, prot, address,
+ access_type, mmu_idx);
+ if (ret != TLBRET_NOMATCH) {
+ return ret;
+ }
+ }
- match = loongarch_tlb_search(env, address, &index);
- if (match) {
- return loongarch_map_tlb_entry(env, physical, prot,
- address, access_type, index, mmu_idx);
+ if (is_debug) {
+ /*
+ * For debugger memory access, we want to do the map when there is a
+ * legal mapping, even if the mapping is not yet in TLB. return 0 if
+ * there is a valid map, else none zero.
+ */
+ return loongarch_page_table_walker(env, physical, prot, address);
}
return TLBRET_NOMATCH;
}
-#else
-static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
- int *prot, target_ulong address,
- MMUAccessType access_type, int mmu_idx)
-{
- return TLBRET_NOMATCH;
-}
-#endif
static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va,
target_ulong dmw)
@@ -178,7 +159,7 @@ static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va,
int get_physical_address(CPULoongArchState *env, hwaddr *physical,
int *prot, target_ulong address,
- MMUAccessType access_type, int mmu_idx)
+ MMUAccessType access_type, int mmu_idx, int is_debug)
{
int user_mode = mmu_idx == MMU_USER_IDX;
int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
@@ -222,7 +203,7 @@ int get_physical_address(CPULoongArchState *env, hwaddr *physical,
/* Mapped address */
return loongarch_map_address(env, physical, prot, address,
- access_type, mmu_idx);
+ access_type, mmu_idx, is_debug);
}
hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
@@ -232,7 +213,7 @@ hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
int prot;
if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD,
- cpu_mmu_index(cs, false)) != 0) {
+ cpu_mmu_index(cs, false), 1) != 0) {
return -1;
}
return phys_addr;
diff --git a/target/loongarch/csr.c b/target/loongarch/csr.c
new file mode 100644
index 0000000..7ea0a30
--- /dev/null
+++ b/target/loongarch/csr.c
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 Loongson Technology Corporation Limited
+ */
+#include <stddef.h>
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "csr.h"
+
+#define CSR_OFF_FUNCS(NAME, FL, RD, WR) \
+ [LOONGARCH_CSR_##NAME] = { \
+ .name = (stringify(NAME)), \
+ .offset = offsetof(CPULoongArchState, CSR_##NAME), \
+ .flags = FL, .readfn = RD, .writefn = WR \
+ }
+
+#define CSR_OFF_ARRAY(NAME, N) \
+ [LOONGARCH_CSR_##NAME(N)] = { \
+ .name = (stringify(NAME##N)), \
+ .offset = offsetof(CPULoongArchState, CSR_##NAME[N]), \
+ .flags = 0, .readfn = NULL, .writefn = NULL \
+ }
+
+#define CSR_OFF_FLAGS(NAME, FL) CSR_OFF_FUNCS(NAME, FL, NULL, NULL)
+#define CSR_OFF(NAME) CSR_OFF_FLAGS(NAME, 0)
+
+static CSRInfo csr_info[] = {
+ CSR_OFF_FLAGS(CRMD, CSRFL_EXITTB),
+ CSR_OFF(PRMD),
+ CSR_OFF_FLAGS(EUEN, CSRFL_EXITTB),
+ CSR_OFF_FLAGS(MISC, CSRFL_READONLY),
+ CSR_OFF(ECFG),
+ CSR_OFF_FLAGS(ESTAT, CSRFL_EXITTB),
+ CSR_OFF(ERA),
+ CSR_OFF(BADV),
+ CSR_OFF_FLAGS(BADI, CSRFL_READONLY),
+ CSR_OFF(EENTRY),
+ CSR_OFF(TLBIDX),
+ CSR_OFF(TLBEHI),
+ CSR_OFF(TLBELO0),
+ CSR_OFF(TLBELO1),
+ CSR_OFF_FLAGS(ASID, CSRFL_EXITTB),
+ CSR_OFF(PGDL),
+ CSR_OFF(PGDH),
+ CSR_OFF_FLAGS(PGD, CSRFL_READONLY),
+ CSR_OFF(PWCL),
+ CSR_OFF(PWCH),
+ CSR_OFF(STLBPS),
+ CSR_OFF(RVACFG),
+ CSR_OFF_FLAGS(CPUID, CSRFL_READONLY),
+ CSR_OFF_FLAGS(PRCFG1, CSRFL_READONLY),
+ CSR_OFF_FLAGS(PRCFG2, CSRFL_READONLY),
+ CSR_OFF_FLAGS(PRCFG3, CSRFL_READONLY),
+ CSR_OFF_ARRAY(SAVE, 0),
+ CSR_OFF_ARRAY(SAVE, 1),
+ CSR_OFF_ARRAY(SAVE, 2),
+ CSR_OFF_ARRAY(SAVE, 3),
+ CSR_OFF_ARRAY(SAVE, 4),
+ CSR_OFF_ARRAY(SAVE, 5),
+ CSR_OFF_ARRAY(SAVE, 6),
+ CSR_OFF_ARRAY(SAVE, 7),
+ CSR_OFF_ARRAY(SAVE, 8),
+ CSR_OFF_ARRAY(SAVE, 9),
+ CSR_OFF_ARRAY(SAVE, 10),
+ CSR_OFF_ARRAY(SAVE, 11),
+ CSR_OFF_ARRAY(SAVE, 12),
+ CSR_OFF_ARRAY(SAVE, 13),
+ CSR_OFF_ARRAY(SAVE, 14),
+ CSR_OFF_ARRAY(SAVE, 15),
+ CSR_OFF(TID),
+ CSR_OFF_FLAGS(TCFG, CSRFL_IO),
+ CSR_OFF_FLAGS(TVAL, CSRFL_READONLY | CSRFL_IO),
+ CSR_OFF(CNTC),
+ CSR_OFF_FLAGS(TICLR, CSRFL_IO),
+ CSR_OFF(LLBCTL),
+ CSR_OFF(IMPCTL1),
+ CSR_OFF(IMPCTL2),
+ CSR_OFF(TLBRENTRY),
+ CSR_OFF(TLBRBADV),
+ CSR_OFF(TLBRERA),
+ CSR_OFF(TLBRSAVE),
+ CSR_OFF(TLBRELO0),
+ CSR_OFF(TLBRELO1),
+ CSR_OFF(TLBREHI),
+ CSR_OFF(TLBRPRMD),
+ CSR_OFF(MERRCTL),
+ CSR_OFF(MERRINFO1),
+ CSR_OFF(MERRINFO2),
+ CSR_OFF(MERRENTRY),
+ CSR_OFF(MERRERA),
+ CSR_OFF(MERRSAVE),
+ CSR_OFF(CTAG),
+ CSR_OFF_ARRAY(DMW, 0),
+ CSR_OFF_ARRAY(DMW, 1),
+ CSR_OFF_ARRAY(DMW, 2),
+ CSR_OFF_ARRAY(DMW, 3),
+ CSR_OFF(DBG),
+ CSR_OFF(DERA),
+ CSR_OFF(DSAVE),
+};
+
+CSRInfo *get_csr(unsigned int csr_num)
+{
+ CSRInfo *csr;
+
+ if (csr_num >= ARRAY_SIZE(csr_info)) {
+ return NULL;
+ }
+
+ csr = &csr_info[csr_num];
+ if (csr->offset == 0) {
+ return NULL;
+ }
+
+ return csr;
+}
+
+bool set_csr_flag(unsigned int csr_num, int flag)
+{
+ CSRInfo *csr;
+
+ csr = get_csr(csr_num);
+ if (!csr) {
+ return false;
+ }
+
+ csr->flags |= flag;
+ return true;
+}
diff --git a/target/loongarch/csr.h b/target/loongarch/csr.h
new file mode 100644
index 0000000..81a656b
--- /dev/null
+++ b/target/loongarch/csr.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 Loongson Technology Corporation Limited
+ */
+
+#ifndef TARGET_LOONGARCH_CSR_H
+#define TARGET_LOONGARCH_CSR_H
+
+#include "cpu-csr.h"
+
+typedef void (*GenCSRFunc)(void);
+enum {
+ CSRFL_READONLY = (1 << 0),
+ CSRFL_EXITTB = (1 << 1),
+ CSRFL_IO = (1 << 2),
+ CSRFL_UNUSED = (1 << 3),
+};
+
+typedef struct {
+ const char *name;
+ int offset;
+ int flags;
+ GenCSRFunc readfn;
+ GenCSRFunc writefn;
+} CSRInfo;
+
+CSRInfo *get_csr(unsigned int csr_num);
+bool set_csr_flag(unsigned int csr_num, int flag);
+#endif /* TARGET_LOONGARCH_CSR_H */
diff --git a/target/loongarch/gdbstub.c b/target/loongarch/gdbstub.c
index 7ca245e..471eda2 100644
--- a/target/loongarch/gdbstub.c
+++ b/target/loongarch/gdbstub.c
@@ -34,26 +34,28 @@ void write_fcc(CPULoongArchState *env, uint64_t val)
int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
CPULoongArchState *env = cpu_env(cs);
- uint64_t val;
-
- if (0 <= n && n < 32) {
- val = env->gpr[n];
- } else if (n == 32) {
- /* orig_a0 */
- val = 0;
- } else if (n == 33) {
- val = env->pc;
- } else if (n == 34) {
- val = env->CSR_BADV;
- }
if (0 <= n && n <= 34) {
+ uint64_t val;
+
+ if (n < 32) {
+ val = env->gpr[n];
+ } else if (n == 32) {
+ /* orig_a0 */
+ val = 0;
+ } else if (n == 33) {
+ val = env->pc;
+ } else /* if (n == 34) */ {
+ val = env->CSR_BADV;
+ }
+
if (is_la64(env)) {
return gdb_get_reg64(mem_buf, val);
} else {
return gdb_get_reg32(mem_buf, val);
}
}
+
return 0;
}
@@ -61,23 +63,24 @@ int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
CPULoongArchState *env = cpu_env(cs);
target_ulong tmp;
- int read_length;
int length = 0;
+ if (n < 0 || n > 34) {
+ return 0;
+ }
+
if (is_la64(env)) {
- tmp = ldq_p(mem_buf);
- read_length = 8;
+ tmp = ldq_le_p(mem_buf);
+ length = 8;
} else {
- tmp = ldl_p(mem_buf);
- read_length = 4;
+ tmp = ldl_le_p(mem_buf);
+ length = 4;
}
if (0 <= n && n < 32) {
env->gpr[n] = tmp;
- length = read_length;
} else if (n == 33) {
set_pc(env, tmp);
- length = read_length;
}
return length;
}
@@ -104,13 +107,13 @@ static int loongarch_gdb_set_fpu(CPUState *cs, uint8_t *mem_buf, int n)
int length = 0;
if (0 <= n && n < 32) {
- env->fpr[n].vreg.D(0) = ldq_p(mem_buf);
+ env->fpr[n].vreg.D(0) = ldq_le_p(mem_buf);
length = 8;
} else if (32 <= n && n < 40) {
env->cf[n - 32] = ldub_p(mem_buf);
length = 1;
} else if (n == 40) {
- env->fcsr0 = ldl_p(mem_buf);
+ env->fcsr0 = ldl_le_p(mem_buf);
length = 4;
}
return length;
diff --git a/target/loongarch/helper.h b/target/loongarch/helper.h
index b3b64a0..99981ab 100644
--- a/target/loongarch/helper.h
+++ b/target/loongarch/helper.h
@@ -1,720 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Copyright (c) 2025 Loongson Technology Corporation Limited
*/
-DEF_HELPER_2(raise_exception, noreturn, env, i32)
-
-DEF_HELPER_FLAGS_1(bitrev_w, TCG_CALL_NO_RWG_SE, tl, tl)
-DEF_HELPER_FLAGS_1(bitrev_d, TCG_CALL_NO_RWG_SE, tl, tl)
-DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl)
-
-DEF_HELPER_FLAGS_3(asrtle_d, TCG_CALL_NO_WG, void, env, tl, tl)
-DEF_HELPER_FLAGS_3(asrtgt_d, TCG_CALL_NO_WG, void, env, tl, tl)
-
-DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
-DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
-DEF_HELPER_FLAGS_2(cpucfg, TCG_CALL_NO_RWG_SE, tl, env, tl)
-
-/* Floating-point helper */
-DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fsub_s, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fsub_d, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fmul_s, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fmul_d, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fdiv_s, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fdiv_d, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fmax_s, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fmax_d, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fmin_s, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fmin_d, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fmaxa_s, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fmaxa_d, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fmina_s, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fmina_d, TCG_CALL_NO_WG, i64, env, i64, i64)
-
-DEF_HELPER_FLAGS_5(fmuladd_s, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i32)
-DEF_HELPER_FLAGS_5(fmuladd_d, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i32)
-
-DEF_HELPER_FLAGS_3(fscaleb_s, TCG_CALL_NO_WG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(fscaleb_d, TCG_CALL_NO_WG, i64, env, i64, i64)
-
-DEF_HELPER_FLAGS_2(flogb_s, TCG_CALL_NO_WG, i64, env, i64)
-DEF_HELPER_FLAGS_2(flogb_d, TCG_CALL_NO_WG, i64, env, i64)
-
-DEF_HELPER_FLAGS_2(fsqrt_s, TCG_CALL_NO_WG, i64, env, i64)
-DEF_HELPER_FLAGS_2(fsqrt_d, TCG_CALL_NO_WG, i64, env, i64)
-DEF_HELPER_FLAGS_2(frsqrt_s, TCG_CALL_NO_WG, i64, env, i64)
-DEF_HELPER_FLAGS_2(frsqrt_d, TCG_CALL_NO_WG, i64, env, i64)
-DEF_HELPER_FLAGS_2(frecip_s, TCG_CALL_NO_WG, i64, env, i64)
-DEF_HELPER_FLAGS_2(frecip_d, TCG_CALL_NO_WG, i64, env, i64)
-
-DEF_HELPER_FLAGS_2(fclass_s, TCG_CALL_NO_RWG_SE, i64, env, i64)
-DEF_HELPER_FLAGS_2(fclass_d, TCG_CALL_NO_RWG_SE, i64, env, i64)
-
-/* fcmp.cXXX.s */
-DEF_HELPER_4(fcmp_c_s, i64, env, i64, i64, i32)
-/* fcmp.sXXX.s */
-DEF_HELPER_4(fcmp_s_s, i64, env, i64, i64, i32)
-/* fcmp.cXXX.d */
-DEF_HELPER_4(fcmp_c_d, i64, env, i64, i64, i32)
-/* fcmp.sXXX.d */
-DEF_HELPER_4(fcmp_s_d, i64, env, i64, i64, i32)
-
-DEF_HELPER_2(fcvt_d_s, i64, env, i64)
-DEF_HELPER_2(fcvt_s_d, i64, env, i64)
-DEF_HELPER_2(ffint_d_w, i64, env, i64)
-DEF_HELPER_2(ffint_d_l, i64, env, i64)
-DEF_HELPER_2(ffint_s_w, i64, env, i64)
-DEF_HELPER_2(ffint_s_l, i64, env, i64)
-DEF_HELPER_2(ftintrm_l_s, i64, env, i64)
-DEF_HELPER_2(ftintrm_l_d, i64, env, i64)
-DEF_HELPER_2(ftintrm_w_s, i64, env, i64)
-DEF_HELPER_2(ftintrm_w_d, i64, env, i64)
-DEF_HELPER_2(ftintrp_l_s, i64, env, i64)
-DEF_HELPER_2(ftintrp_l_d, i64, env, i64)
-DEF_HELPER_2(ftintrp_w_s, i64, env, i64)
-DEF_HELPER_2(ftintrp_w_d, i64, env, i64)
-DEF_HELPER_2(ftintrz_l_s, i64, env, i64)
-DEF_HELPER_2(ftintrz_l_d, i64, env, i64)
-DEF_HELPER_2(ftintrz_w_s, i64, env, i64)
-DEF_HELPER_2(ftintrz_w_d, i64, env, i64)
-DEF_HELPER_2(ftintrne_l_s, i64, env, i64)
-DEF_HELPER_2(ftintrne_l_d, i64, env, i64)
-DEF_HELPER_2(ftintrne_w_s, i64, env, i64)
-DEF_HELPER_2(ftintrne_w_d, i64, env, i64)
-DEF_HELPER_2(ftint_l_s, i64, env, i64)
-DEF_HELPER_2(ftint_l_d, i64, env, i64)
-DEF_HELPER_2(ftint_w_s, i64, env, i64)
-DEF_HELPER_2(ftint_w_d, i64, env, i64)
-DEF_HELPER_2(frint_s, i64, env, i64)
-DEF_HELPER_2(frint_d, i64, env, i64)
-
-DEF_HELPER_FLAGS_1(set_rounding_mode, TCG_CALL_NO_RWG, void, env)
-
-DEF_HELPER_1(rdtime_d, i64, env)
-
-#ifndef CONFIG_USER_ONLY
-/* CSRs helper */
-DEF_HELPER_1(csrrd_pgd, i64, env)
-DEF_HELPER_1(csrrd_cpuid, i64, env)
-DEF_HELPER_1(csrrd_tval, i64, env)
-DEF_HELPER_2(csrwr_estat, i64, env, tl)
-DEF_HELPER_2(csrwr_asid, i64, env, tl)
-DEF_HELPER_2(csrwr_tcfg, i64, env, tl)
-DEF_HELPER_2(csrwr_ticlr, i64, env, tl)
-DEF_HELPER_2(iocsrrd_b, i64, env, tl)
-DEF_HELPER_2(iocsrrd_h, i64, env, tl)
-DEF_HELPER_2(iocsrrd_w, i64, env, tl)
-DEF_HELPER_2(iocsrrd_d, i64, env, tl)
-DEF_HELPER_3(iocsrwr_b, void, env, tl, tl)
-DEF_HELPER_3(iocsrwr_h, void, env, tl, tl)
-DEF_HELPER_3(iocsrwr_w, void, env, tl, tl)
-DEF_HELPER_3(iocsrwr_d, void, env, tl, tl)
-
-/* TLB helper */
-DEF_HELPER_1(tlbwr, void, env)
-DEF_HELPER_1(tlbfill, void, env)
-DEF_HELPER_1(tlbsrch, void, env)
-DEF_HELPER_1(tlbrd, void, env)
-DEF_HELPER_1(tlbclr, void, env)
-DEF_HELPER_1(tlbflush, void, env)
-DEF_HELPER_1(invtlb_all, void, env)
-DEF_HELPER_2(invtlb_all_g, void, env, i32)
-DEF_HELPER_2(invtlb_all_asid, void, env, tl)
-DEF_HELPER_3(invtlb_page_asid, void, env, tl, tl)
-DEF_HELPER_3(invtlb_page_asid_or_g, void, env, tl, tl)
-
-DEF_HELPER_4(lddir, tl, env, tl, tl, i32)
-DEF_HELPER_4(ldpte, void, env, tl, tl, i32)
-DEF_HELPER_1(ertn, void, env)
-DEF_HELPER_1(idle, void, env)
-#endif
-
-/* LoongArch LSX */
-DEF_HELPER_FLAGS_4(vhaddw_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhaddw_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhaddw_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhaddw_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhaddw_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhaddw_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhaddw_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhaddw_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhsubw_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhsubw_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhsubw_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhsubw_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhsubw_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhsubw_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhsubw_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vhsubw_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vaddwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwev_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwod_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vsubwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsubwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsubwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsubwev_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsubwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsubwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsubwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsubwod_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vaddwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwev_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwod_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vsubwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsubwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsubwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsubwev_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsubwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsubwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsubwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsubwod_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vaddwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwev_q_du_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vaddwod_q_du_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vavg_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vavg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vavg_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vavg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vavg_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vavg_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vavg_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vavg_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vavgr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vavgr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vavgr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vavgr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vavgr_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vavgr_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vavgr_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vavgr_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vabsd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vabsd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vabsd_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vabsd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vabsd_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vabsd_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vabsd_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vabsd_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vadda_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vadda_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vadda_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vadda_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vmini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vmini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vmini_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vmini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vmini_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vmini_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vmini_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vmini_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vmaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vmaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vmaxi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vmaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vmaxi_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vmaxi_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vmaxi_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vmaxi_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vmuh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmuh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmuh_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmuh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmuh_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmuh_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmuh_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmuh_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vmulwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vmulwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vmulwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmulwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vmadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmadd_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmsub_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmsub_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vmaddwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vmaddwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vmaddwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmaddwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vdiv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vdiv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vdiv_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vdiv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vdiv_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vdiv_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vdiv_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vdiv_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmod_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmod_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmod_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmod_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmod_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmod_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmod_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vmod_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vsat_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsat_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsat_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsat_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsat_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsat_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsat_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsat_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_3(vexth_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vexth_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vexth_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vexth_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vexth_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vexth_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vexth_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vexth_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(vext2xv_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vext2xv_w_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vext2xv_d_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vext2xv_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vext2xv_d_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vext2xv_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vext2xv_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vext2xv_wu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vext2xv_du_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vext2xv_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vext2xv_du_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vext2xv_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vsigncov_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsigncov_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsigncov_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsigncov_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(vmskltz_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vmskltz_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vmskltz_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vmskltz_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vmskgez_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vmsknz_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vnori_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vsllwil_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsllwil_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsllwil_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_3(vextl_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsllwil_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsllwil_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsllwil_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_3(vextl_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vsrlr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrlr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrlr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrlr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrlri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrlri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrlri_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrlri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vsrar_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrar_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrar_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrari_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrari_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrari_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrari_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vsrln_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrln_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrln_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsran_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsran_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsran_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vsrlni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrlni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrlni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrlni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrani_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrani_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrani_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrani_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vsrlrn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrlrn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrlrn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrarn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrarn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vsrarn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vsrlrni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrlrni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrlrni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrlrni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrarni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrarni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrarni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vsrarni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vssrln_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrln_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrln_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssran_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssran_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssran_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrln_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrln_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrln_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssran_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssran_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssran_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vssrlni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrlni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrlni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrlni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrani_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrani_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrani_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrani_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrlni_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrlni_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrlni_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrlni_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrani_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrani_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrani_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrani_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vssrlrn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrlrn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrlrn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrarn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrarn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrarn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrlrn_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrlrn_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrlrn_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrarn_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrarn_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vssrarn_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vssrlrni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrlrni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrlrni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrlrni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrarni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrarni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrarni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrarni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrlrni_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrlrni_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrlrni_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrlrni_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrarni_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrarni_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrarni_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vssrarni_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_3(vclo_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vclo_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vclo_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vclo_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vclz_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vclz_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vclz_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vclz_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(vpcnt_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vpcnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vpcnt_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(vpcnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vbitclr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vbitclr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vbitclr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vbitclr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vbitclri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vbitclri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vbitclri_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vbitclri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vbitset_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vbitset_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vbitset_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vbitset_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vbitseti_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vbitseti_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vbitseti_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vbitseti_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vbitrev_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vbitrev_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vbitrev_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vbitrev_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vbitrevi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vbitrevi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vbitrevi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vbitrevi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vfrstp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vfrstp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vfrstpi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vfrstpi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_5(vfadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfdiv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfdiv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_6(vfmadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_6(vfmadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_6(vfmsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_6(vfmsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_6(vfnmadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_6(vfnmadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_6(vfnmsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_6(vfnmsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_5(vfmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_5(vfmaxa_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfmaxa_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfmina_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfmina_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_4(vflogb_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vflogb_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_4(vfclass_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfclass_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_4(vfsqrt_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfsqrt_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfrecip_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfrecip_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfrsqrt_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfrsqrt_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_4(vfcvtl_s_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfcvth_s_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfcvtl_d_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfcvth_d_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfcvt_h_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vfcvt_s_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_4(vfrintrne_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfrintrne_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfrintrz_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfrintrz_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfrintrp_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfrintrp_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfrintrm_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfrintrm_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfrint_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vfrint_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_4(vftintrne_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrne_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrz_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrz_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrp_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrp_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrm_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrm_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftint_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftint_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrz_wu_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrz_lu_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftint_wu_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftint_lu_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vftintrne_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vftintrz_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vftintrp_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vftintrm_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vftint_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrnel_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrneh_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrzl_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrzh_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrpl_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrph_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrml_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintrmh_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftintl_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vftinth_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_4(vffint_s_w, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vffint_d_l, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vffint_s_wu, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vffint_d_lu, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vffintl_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(vffinth_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(vffint_s_l, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_4(vseqi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vseqi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vseqi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vseqi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vslei_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vslei_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vslei_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vslei_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vslei_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vslei_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vslei_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vslei_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vslti_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vslti_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vslti_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vslti_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vslti_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vslti_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vslti_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vslti_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_6(vfcmp_c_s, void, env, i32, i32, i32, i32, i32)
-DEF_HELPER_6(vfcmp_s_s, void, env, i32, i32, i32, i32, i32)
-DEF_HELPER_6(vfcmp_c_d, void, env, i32, i32, i32, i32, i32)
-DEF_HELPER_6(vfcmp_s_d, void, env, i32, i32, i32, i32, i32)
-
-DEF_HELPER_FLAGS_4(vbitseli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_4(vsetanyeqz_b, void, env, i32, i32, i32)
-DEF_HELPER_4(vsetanyeqz_h, void, env, i32, i32, i32)
-DEF_HELPER_4(vsetanyeqz_w, void, env, i32, i32, i32)
-DEF_HELPER_4(vsetanyeqz_d, void, env, i32, i32, i32)
-DEF_HELPER_4(vsetallnez_b, void, env, i32, i32, i32)
-DEF_HELPER_4(vsetallnez_h, void, env, i32, i32, i32)
-DEF_HELPER_4(vsetallnez_w, void, env, i32, i32, i32)
-DEF_HELPER_4(vsetallnez_d, void, env, i32, i32, i32)
-
-DEF_HELPER_FLAGS_4(xvinsve0_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(xvinsve0_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(xvpickve_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(xvpickve_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vpackev_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpackev_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpackev_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpackev_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpackod_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpackod_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpackod_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpackod_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vpickev_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpickev_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpickev_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpickev_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpickod_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpickod_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpickod_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpickod_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(vilvl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vilvl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vilvl_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vilvl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vilvh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vilvh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vilvh_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vilvh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(vshuf_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vshuf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vshuf_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vshuf_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vshuf4i_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vshuf4i_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vshuf4i_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vshuf4i_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vperm_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(vpermi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vpermi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vpermi_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-
-DEF_HELPER_FLAGS_4(vextrins_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vextrins_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vextrins_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
-DEF_HELPER_FLAGS_4(vextrins_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+#include "tcg/helper.h"
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
index 944153b..a7384b0 100644
--- a/target/loongarch/internals.h
+++ b/target/loongarch/internals.h
@@ -17,8 +17,8 @@
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
void loongarch_translate_init(void);
-
-void loongarch_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
void G_NORETURN do_raise_exception(CPULoongArchState *env,
uint32_t exception,
@@ -43,6 +43,8 @@ enum {
TLBRET_PE = 7,
};
+bool check_ps(CPULoongArchState *ent, uint8_t ps);
+
extern const VMStateDescription vmstate_loongarch_cpu;
void loongarch_cpu_set_irq(void *opaque, int irq, int level);
@@ -52,18 +54,13 @@ uint64_t cpu_loongarch_get_constant_timer_counter(LoongArchCPU *cpu);
uint64_t cpu_loongarch_get_constant_timer_ticks(LoongArchCPU *cpu);
void cpu_loongarch_store_constant_timer_config(LoongArchCPU *cpu,
uint64_t value);
-bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
- int *index);
int get_physical_address(CPULoongArchState *env, hwaddr *physical,
int *prot, target_ulong address,
- MMUAccessType access_type, int mmu_idx);
+ MMUAccessType access_type, int mmu_idx, int is_debug);
+void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
+ uint64_t *dir_width, target_ulong level);
hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
-#ifdef CONFIG_TCG
-bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
-#endif
#endif /* !CONFIG_USER_ONLY */
uint64_t read_fcc(CPULoongArchState *env);
@@ -72,5 +69,7 @@ void write_fcc(CPULoongArchState *env, uint64_t val);
int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n);
int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n);
void loongarch_cpu_register_gdb_regs_for_features(CPUState *cs);
+int loongarch_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
+ int cpuid, DumpState *s);
#endif
diff --git a/target/loongarch/kvm/kvm.c b/target/loongarch/kvm/kvm.c
index e1be6a6..e5ea2db 100644
--- a/target/loongarch/kvm/kvm.c
+++ b/target/loongarch/kvm/kvm.c
@@ -8,21 +8,23 @@
#include "qemu/osdep.h"
#include <sys/ioctl.h>
#include <linux/kvm.h>
-
+#include "asm-loongarch/kvm_para.h"
+#include "qapi/error.h"
#include "qemu/timer.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/kvm.h"
-#include "sysemu/kvm_int.h"
+#include "system/system.h"
+#include "system/kvm.h"
+#include "system/kvm_int.h"
#include "hw/pci/pci.h"
#include "exec/memattrs.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/boards.h"
#include "hw/irq.h"
+#include "hw/loongarch/virt.h"
#include "qemu/log.h"
#include "hw/loader.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "cpu-csr.h"
#include "kvm_loongarch.h"
#include "trace.h"
@@ -33,6 +35,82 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
};
+static int kvm_get_stealtime(CPUState *cs)
+{
+ CPULoongArchState *env = cpu_env(cs);
+ int err;
+ struct kvm_device_attr attr = {
+ .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL,
+ .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA,
+ .addr = (uint64_t)&env->stealtime.guest_addr,
+ };
+
+ err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
+ if (err) {
+ return 0;
+ }
+
+ err = kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, attr);
+ if (err) {
+ error_report("PVTIME: KVM_GET_DEVICE_ATTR: %s", strerror(errno));
+ return err;
+ }
+
+ return 0;
+}
+
+static int kvm_set_stealtime(CPUState *cs)
+{
+ CPULoongArchState *env = cpu_env(cs);
+ int err;
+ struct kvm_device_attr attr = {
+ .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL,
+ .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA,
+ .addr = (uint64_t)&env->stealtime.guest_addr,
+ };
+
+ err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
+ if (err) {
+ return 0;
+ }
+
+ err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
+ if (err) {
+ error_report("PVTIME: KVM_SET_DEVICE_ATTR %s with gpa "TARGET_FMT_lx,
+ strerror(errno), env->stealtime.guest_addr);
+ return err;
+ }
+
+ return 0;
+}
+
+static int kvm_set_pv_features(CPUState *cs)
+{
+ CPULoongArchState *env = cpu_env(cs);
+ int err;
+ uint64_t val;
+ struct kvm_device_attr attr = {
+ .group = KVM_LOONGARCH_VCPU_CPUCFG,
+ .attr = CPUCFG_KVM_FEATURE,
+ .addr = (uint64_t)&val,
+ };
+
+ err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
+ if (err) {
+ return 0;
+ }
+
+ val = env->pv_features;
+ err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
+ if (err) {
+ error_report("Fail to set pv feature "TARGET_FMT_lx " with error %s",
+ val, strerror(errno));
+ return err;
+ }
+
+ return 0;
+}
+
static int kvm_loongarch_get_regs_core(CPUState *cs)
{
int ret = 0;
@@ -476,9 +554,71 @@ static int kvm_loongarch_put_regs_fp(CPUState *cs)
return ret;
}
-void kvm_arch_reset_vcpu(CPULoongArchState *env)
+static int kvm_loongarch_put_lbt(CPUState *cs)
+{
+ CPULoongArchState *env = cpu_env(cs);
+ uint64_t val;
+ int ret;
+
+ /* check whether vm support LBT firstly */
+ if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LBT_ALL) != 7) {
+ return 0;
+ }
+
+ /* set six LBT registers including scr0-scr3, eflags, ftop */
+ ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR0, &env->lbt.scr0);
+ ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR1, &env->lbt.scr1);
+ ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR2, &env->lbt.scr2);
+ ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR3, &env->lbt.scr3);
+ /*
+ * Be cautious, KVM_REG_LOONGARCH_LBT_FTOP is defined as 64-bit however
+ * lbt.ftop is 32-bit; the same with KVM_REG_LOONGARCH_LBT_EFLAGS register
+ */
+ val = env->lbt.eflags;
+ ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_EFLAGS, &val);
+ val = env->lbt.ftop;
+ ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_FTOP, &val);
+
+ return ret;
+}
+
+static int kvm_loongarch_get_lbt(CPUState *cs)
+{
+ CPULoongArchState *env = cpu_env(cs);
+ uint64_t val;
+ int ret;
+
+ /* check whether vm support LBT firstly */
+ if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LBT_ALL) != 7) {
+ return 0;
+ }
+
+ /* get six LBT registers including scr0-scr3, eflags, ftop */
+ ret = kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR0, &env->lbt.scr0);
+ ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR1, &env->lbt.scr1);
+ ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR2, &env->lbt.scr2);
+ ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR3, &env->lbt.scr3);
+ ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_EFLAGS, &val);
+ env->lbt.eflags = (uint32_t)val;
+ ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_FTOP, &val);
+ env->lbt.ftop = (uint32_t)val;
+
+ return ret;
+}
+
+void kvm_arch_reset_vcpu(CPUState *cs)
{
+ CPULoongArchState *env = cpu_env(cs);
+ int ret = 0;
+ uint64_t unused = 0;
+
env->mp_state = KVM_MP_STATE_RUNNABLE;
+ ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_VCPU_RESET, &unused);
+ if (ret) {
+ error_report("Failed to set KVM_REG_LOONGARCH_VCPU_RESET: %s",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
}
static int kvm_loongarch_get_mpstate(CPUState *cs)
@@ -585,7 +725,7 @@ static int kvm_loongarch_put_cpucfg(CPUState *cs)
return ret;
}
-int kvm_arch_get_registers(CPUState *cs)
+int kvm_arch_get_registers(CPUState *cs, Error **errp)
{
int ret;
@@ -609,13 +749,24 @@ int kvm_arch_get_registers(CPUState *cs)
return ret;
}
+ ret = kvm_loongarch_get_lbt(cs);
+ if (ret) {
+ return ret;
+ }
+
+ ret = kvm_get_stealtime(cs);
+ if (ret) {
+ return ret;
+ }
+
ret = kvm_loongarch_get_mpstate(cs);
return ret;
}
-int kvm_arch_put_registers(CPUState *cs, int level)
+int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
{
int ret;
+ static int once;
ret = kvm_loongarch_put_regs_core(cs);
if (ret) {
@@ -637,6 +788,30 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
+ ret = kvm_loongarch_put_lbt(cs);
+ if (ret) {
+ return ret;
+ }
+
+ if (!once) {
+ ret = kvm_set_pv_features(cs);
+ if (ret) {
+ return ret;
+ }
+ once = 1;
+ }
+
+ if (level >= KVM_PUT_FULL_STATE) {
+ /*
+ * only KVM_PUT_FULL_STATE is required, kvm kernel will clear
+ * guest_addr for KVM_PUT_RESET_STATE
+ */
+ ret = kvm_set_stealtime(cs);
+ if (ret) {
+ return ret;
+ }
+ }
+
ret = kvm_loongarch_put_mpstate(cs);
return ret;
}
@@ -663,21 +838,374 @@ static void kvm_loongarch_vm_stage_change(void *opaque, bool running,
}
}
+static bool kvm_feature_supported(CPUState *cs, enum loongarch_features feature)
+{
+ int ret;
+ struct kvm_device_attr attr;
+ uint64_t val;
+
+ switch (feature) {
+ case LOONGARCH_FEATURE_LSX:
+ attr.group = KVM_LOONGARCH_VM_FEAT_CTRL;
+ attr.attr = KVM_LOONGARCH_VM_FEAT_LSX;
+ ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
+ if (ret == 0) {
+ return true;
+ }
+
+ /* Fallback to old kernel detect interface */
+ val = 0;
+ attr.group = KVM_LOONGARCH_VCPU_CPUCFG;
+ /* Cpucfg2 */
+ attr.attr = 2;
+ attr.addr = (uint64_t)&val;
+ ret = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, &attr);
+ if (!ret) {
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, &attr);
+ if (ret) {
+ return false;
+ }
+
+ ret = FIELD_EX32((uint32_t)val, CPUCFG2, LSX);
+ return (ret != 0);
+ }
+ return false;
+
+ case LOONGARCH_FEATURE_LASX:
+ attr.group = KVM_LOONGARCH_VM_FEAT_CTRL;
+ attr.attr = KVM_LOONGARCH_VM_FEAT_LASX;
+ ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
+ if (ret == 0) {
+ return true;
+ }
+
+ /* Fallback to old kernel detect interface */
+ val = 0;
+ attr.group = KVM_LOONGARCH_VCPU_CPUCFG;
+ /* Cpucfg2 */
+ attr.attr = 2;
+ attr.addr = (uint64_t)&val;
+ ret = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, &attr);
+ if (!ret) {
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, &attr);
+ if (ret) {
+ return false;
+ }
+
+ ret = FIELD_EX32((uint32_t)val, CPUCFG2, LASX);
+ return (ret != 0);
+ }
+ return false;
+
+ case LOONGARCH_FEATURE_LBT:
+ /*
+ * Return all if all the LBT features are supported such as:
+ * KVM_LOONGARCH_VM_FEAT_X86BT
+ * KVM_LOONGARCH_VM_FEAT_ARMBT
+ * KVM_LOONGARCH_VM_FEAT_MIPSBT
+ */
+ attr.group = KVM_LOONGARCH_VM_FEAT_CTRL;
+ attr.attr = KVM_LOONGARCH_VM_FEAT_X86BT;
+ ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
+ attr.attr = KVM_LOONGARCH_VM_FEAT_ARMBT;
+ ret |= kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
+ attr.attr = KVM_LOONGARCH_VM_FEAT_MIPSBT;
+ ret |= kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
+ return (ret == 0);
+
+ case LOONGARCH_FEATURE_PMU:
+ attr.group = KVM_LOONGARCH_VM_FEAT_CTRL;
+ attr.attr = KVM_LOONGARCH_VM_FEAT_PMU;
+ ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
+ return (ret == 0);
+
+ case LOONGARCH_FEATURE_PV_IPI:
+ attr.group = KVM_LOONGARCH_VM_FEAT_CTRL;
+ attr.attr = KVM_LOONGARCH_VM_FEAT_PV_IPI;
+ ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
+ return (ret == 0);
+
+ case LOONGARCH_FEATURE_STEALTIME:
+ attr.group = KVM_LOONGARCH_VM_FEAT_CTRL;
+ attr.attr = KVM_LOONGARCH_VM_FEAT_PV_STEALTIME;
+ ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
+ return (ret == 0);
+
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static int kvm_cpu_check_lsx(CPUState *cs, Error **errp)
+{
+ CPULoongArchState *env = cpu_env(cs);
+ LoongArchCPU *cpu = LOONGARCH_CPU(cs);
+ bool kvm_supported;
+
+ kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_LSX);
+ env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LSX, 0);
+ if (cpu->lsx == ON_OFF_AUTO_ON) {
+ if (kvm_supported) {
+ env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LSX, 1);
+ } else {
+ error_setg(errp, "'lsx' feature not supported by KVM on this host");
+ return -ENOTSUP;
+ }
+ } else if ((cpu->lsx == ON_OFF_AUTO_AUTO) && kvm_supported) {
+ env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LSX, 1);
+ }
+
+ return 0;
+}
+
+static int kvm_cpu_check_lasx(CPUState *cs, Error **errp)
+{
+ CPULoongArchState *env = cpu_env(cs);
+ LoongArchCPU *cpu = LOONGARCH_CPU(cs);
+ bool kvm_supported;
+
+ kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_LASX);
+ env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LASX, 0);
+ if (cpu->lasx == ON_OFF_AUTO_ON) {
+ if (kvm_supported) {
+ env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LASX, 1);
+ } else {
+ error_setg(errp, "'lasx' feature not supported by KVM on host");
+ return -ENOTSUP;
+ }
+ } else if ((cpu->lasx == ON_OFF_AUTO_AUTO) && kvm_supported) {
+ env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LASX, 1);
+ }
+
+ return 0;
+}
+
+static int kvm_cpu_check_lbt(CPUState *cs, Error **errp)
+{
+ CPULoongArchState *env = cpu_env(cs);
+ LoongArchCPU *cpu = LOONGARCH_CPU(cs);
+ bool kvm_supported;
+
+ kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_LBT);
+ if (cpu->lbt == ON_OFF_AUTO_ON) {
+ if (kvm_supported) {
+ env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LBT_ALL, 7);
+ } else {
+ error_setg(errp, "'lbt' feature not supported by KVM on this host");
+ return -ENOTSUP;
+ }
+ } else if ((cpu->lbt == ON_OFF_AUTO_AUTO) && kvm_supported) {
+ env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LBT_ALL, 7);
+ }
+
+ return 0;
+}
+
+static int kvm_cpu_check_pmu(CPUState *cs, Error **errp)
+{
+ LoongArchCPU *cpu = LOONGARCH_CPU(cs);
+ CPULoongArchState *env = cpu_env(cs);
+ bool kvm_supported;
+
+ kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_PMU);
+ if (cpu->pmu == ON_OFF_AUTO_ON) {
+ if (!kvm_supported) {
+ error_setg(errp, "'pmu' feature not supported by KVM on the host");
+ return -ENOTSUP;
+ }
+ } else if (cpu->pmu != ON_OFF_AUTO_AUTO) {
+ /* disable pmu if ON_OFF_AUTO_OFF is set */
+ kvm_supported = false;
+ }
+
+ if (kvm_supported) {
+ env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, PMP, 1);
+ env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, PMNUM, 3);
+ env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, PMBITS, 63);
+ env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, UPM, 1);
+ }
+ return 0;
+}
+
+static int kvm_cpu_check_pv_features(CPUState *cs, Error **errp)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ LoongArchCPU *cpu = LOONGARCH_CPU(cs);
+ CPULoongArchState *env = cpu_env(cs);
+ bool kvm_supported;
+
+ kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_PV_IPI);
+ if (cpu->kvm_pv_ipi == ON_OFF_AUTO_ON) {
+ if (!kvm_supported) {
+ error_setg(errp, "'pv_ipi' feature not supported by KVM host");
+ return -ENOTSUP;
+ }
+ } else if (cpu->kvm_pv_ipi != ON_OFF_AUTO_AUTO) {
+ kvm_supported = false;
+ }
+
+ if (kvm_supported) {
+ env->pv_features |= BIT(KVM_FEATURE_IPI);
+ }
+
+ kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_STEALTIME);
+ if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) {
+ if (!kvm_supported) {
+ error_setg(errp, "'kvm stealtime' feature not supported by KVM host");
+ return -ENOTSUP;
+ }
+ } else if (cpu->kvm_steal_time != ON_OFF_AUTO_AUTO) {
+ kvm_supported = false;
+ }
+
+ if (kvm_supported) {
+ env->pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
+ }
+
+ if (object_dynamic_cast(OBJECT(ms), TYPE_LOONGARCH_VIRT_MACHINE)) {
+ LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(ms);
+
+ if (virt_is_veiointc_enabled(lvms)) {
+ env->pv_features |= BIT(KVM_FEATURE_VIRT_EXTIOI);
+ }
+ }
+ return 0;
+}
+
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
uint64_t val;
+ int ret;
+ Error *local_err = NULL;
+ LoongArchCPU *cpu = LOONGARCH_CPU(cs);
- qemu_add_vm_change_state_handler(kvm_loongarch_vm_stage_change, cs);
+ cpu->vmsentry = qemu_add_vm_change_state_handler(
+ kvm_loongarch_vm_stage_change, cs);
if (!kvm_get_one_reg(cs, KVM_REG_LOONGARCH_DEBUG_INST, &val)) {
brk_insn = val;
}
+ ret = kvm_cpu_check_lsx(cs, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+
+ ret = kvm_cpu_check_lasx(cs, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+
+ ret = kvm_cpu_check_lbt(cs, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+
+ ret = kvm_cpu_check_pmu(cs, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+
+ ret = kvm_cpu_check_pv_features(cs, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+
return 0;
}
+static bool loongarch_get_lbt(Object *obj, Error **errp)
+{
+ return LOONGARCH_CPU(obj)->lbt != ON_OFF_AUTO_OFF;
+}
+
+static void loongarch_set_lbt(Object *obj, bool value, Error **errp)
+{
+ LoongArchCPU *cpu = LOONGARCH_CPU(obj);
+
+ cpu->lbt = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
+}
+
+static bool loongarch_get_pmu(Object *obj, Error **errp)
+{
+ return LOONGARCH_CPU(obj)->pmu != ON_OFF_AUTO_OFF;
+}
+
+static void loongarch_set_pmu(Object *obj, bool value, Error **errp)
+{
+ LoongArchCPU *cpu = LOONGARCH_CPU(obj);
+
+ cpu->pmu = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
+}
+
+static bool kvm_pv_ipi_get(Object *obj, Error **errp)
+{
+ return LOONGARCH_CPU(obj)->kvm_pv_ipi != ON_OFF_AUTO_OFF;
+}
+
+static void kvm_pv_ipi_set(Object *obj, bool value, Error **errp)
+{
+ LoongArchCPU *cpu = LOONGARCH_CPU(obj);
+
+ cpu->kvm_pv_ipi = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
+}
+
+static bool kvm_steal_time_get(Object *obj, Error **errp)
+{
+ return LOONGARCH_CPU(obj)->kvm_steal_time != ON_OFF_AUTO_OFF;
+}
+
+static void kvm_steal_time_set(Object *obj, bool value, Error **errp)
+{
+ LoongArchCPU *cpu = LOONGARCH_CPU(obj);
+
+ cpu->kvm_steal_time = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
+}
+
+void kvm_loongarch_cpu_post_init(LoongArchCPU *cpu)
+{
+ cpu->lbt = ON_OFF_AUTO_AUTO;
+ object_property_add_bool(OBJECT(cpu), "lbt", loongarch_get_lbt,
+ loongarch_set_lbt);
+ object_property_set_description(OBJECT(cpu), "lbt",
+ "Set off to disable Binary Tranlation.");
+
+ cpu->pmu = ON_OFF_AUTO_AUTO;
+ object_property_add_bool(OBJECT(cpu), "pmu", loongarch_get_pmu,
+ loongarch_set_pmu);
+ object_property_set_description(OBJECT(cpu), "pmu",
+ "Set off to disable performance monitor unit.");
+
+ cpu->kvm_pv_ipi = ON_OFF_AUTO_AUTO;
+ object_property_add_bool(OBJECT(cpu), "kvm-pv-ipi", kvm_pv_ipi_get,
+ kvm_pv_ipi_set);
+ object_property_set_description(OBJECT(cpu), "kvm-pv-ipi",
+ "Set off to disable KVM paravirt IPI.");
+
+ cpu->kvm_steal_time = ON_OFF_AUTO_AUTO;
+ object_property_add_bool(OBJECT(cpu), "kvm-steal-time", kvm_steal_time_get,
+ kvm_steal_time_set);
+ object_property_set_description(OBJECT(cpu), "kvm-steal-time",
+ "Set off to disable KVM steal time.");
+}
+
int kvm_arch_destroy_vcpu(CPUState *cs)
{
+ LoongArchCPU *cpu = LOONGARCH_CPU(cs);
+
+ qemu_del_vm_change_state_handler(cpu->vmsentry);
return 0;
}
@@ -712,6 +1240,22 @@ void kvm_arch_init_irq_routing(KVMState *s)
{
}
+void kvm_loongarch_init_irq_routing(void)
+{
+ int i;
+
+ kvm_async_interrupts_allowed = true;
+ kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
+ if (kvm_has_gsi_routing()) {
+ for (i = 0; i < KVM_IRQCHIP_NUM_PINS; ++i) {
+ kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
+ }
+
+ kvm_gsi_routing_allowed = true;
+ kvm_irqchip_commit_routes(kvm_state);
+ }
+}
+
int kvm_arch_get_default_type(MachineState *ms)
{
return 0;
@@ -725,7 +1269,12 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
int kvm_arch_irqchip_create(KVMState *s)
{
- return 0;
+ if (kvm_kernel_irqchip_split()) {
+ error_report("kernel_irqchip=split is not supported on LoongArch");
+ exit(1);
+ }
+
+ return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
}
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
diff --git a/target/loongarch/kvm/kvm_loongarch.h b/target/loongarch/kvm/kvm_loongarch.h
index d945b6b..1051a34 100644
--- a/target/loongarch/kvm/kvm_loongarch.h
+++ b/target/loongarch/kvm/kvm_loongarch.h
@@ -11,6 +11,6 @@
#define QEMU_KVM_LOONGARCH_H
int kvm_loongarch_set_interrupt(LoongArchCPU *cpu, int irq, int level);
-void kvm_arch_reset_vcpu(CPULoongArchState *env);
+void kvm_arch_reset_vcpu(CPUState *cs);
#endif
diff --git a/target/loongarch/loongarch-qmp-cmds.c b/target/loongarch/loongarch-qmp-cmds.c
index 8721a5e..f5f1cd0 100644
--- a/target/loongarch/loongarch-qmp-cmds.c
+++ b/target/loongarch/loongarch-qmp-cmds.c
@@ -8,9 +8,9 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/qapi-commands-machine.h"
#include "cpu.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/qobject-input-visitor.h"
#include "qom/qom-qobject.h"
@@ -40,7 +40,7 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
}
static const char *cpu_model_advertised_features[] = {
- "lsx", "lasx", NULL
+ "lsx", "lasx", "lbt", "pmu", "kvm-pv-ipi", "kvm-steal-time", NULL
};
CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
diff --git a/target/loongarch/machine.c b/target/loongarch/machine.c
index 08a7fa5..4e70f5c 100644
--- a/target/loongarch/machine.c
+++ b/target/loongarch/machine.c
@@ -8,7 +8,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "migration/cpu.h"
-#include "sysemu/tcg.h"
+#include "system/tcg.h"
#include "vec.h"
static const VMStateDescription vmstate_fpu_reg = {
@@ -110,6 +110,29 @@ static const VMStateDescription vmstate_lasx = {
},
};
+static bool lbt_needed(void *opaque)
+{
+ LoongArchCPU *cpu = opaque;
+
+ return !!FIELD_EX64(cpu->env.cpucfg[2], CPUCFG2, LBT_ALL);
+}
+
+static const VMStateDescription vmstate_lbt = {
+ .name = "cpu/lbt",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .needed = lbt_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT64(env.lbt.scr0, LoongArchCPU),
+ VMSTATE_UINT64(env.lbt.scr1, LoongArchCPU),
+ VMSTATE_UINT64(env.lbt.scr2, LoongArchCPU),
+ VMSTATE_UINT64(env.lbt.scr3, LoongArchCPU),
+ VMSTATE_UINT32(env.lbt.eflags, LoongArchCPU),
+ VMSTATE_UINT32(env.lbt.ftop, LoongArchCPU),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
static bool tlb_needed(void *opaque)
{
@@ -145,8 +168,8 @@ static const VMStateDescription vmstate_tlb = {
/* LoongArch CPU state */
const VMStateDescription vmstate_loongarch_cpu = {
.name = "cpu",
- .version_id = 2,
- .minimum_version_id = 2,
+ .version_id = 3,
+ .minimum_version_id = 3,
.fields = (const VMStateField[]) {
VMSTATE_UINTTL_ARRAY(env.gpr, LoongArchCPU, 32),
VMSTATE_UINTTL(env.pc, LoongArchCPU),
@@ -209,6 +232,8 @@ const VMStateDescription vmstate_loongarch_cpu = {
VMSTATE_UINT64(env.CSR_DSAVE, LoongArchCPU),
VMSTATE_UINT64(kvm_state_counter, LoongArchCPU),
+ /* PV steal time */
+ VMSTATE_UINT64(env.stealtime.guest_addr, LoongArchCPU),
VMSTATE_END_OF_LIST()
},
@@ -219,6 +244,7 @@ const VMStateDescription vmstate_loongarch_cpu = {
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
&vmstate_tlb,
#endif
+ &vmstate_lbt,
NULL
}
};
diff --git a/target/loongarch/meson.build b/target/loongarch/meson.build
index e002e9a..20bd3e2 100644
--- a/target/loongarch/meson.build
+++ b/target/loongarch/meson.build
@@ -8,7 +8,9 @@ loongarch_ss.add(files(
loongarch_system_ss = ss.source_set()
loongarch_system_ss.add(files(
+ 'arch_dump.c',
'cpu_helper.c',
+ 'csr.c',
'loongarch-qmp-cmds.c',
'machine.c',
))
diff --git a/target/loongarch/tcg/csr_helper.c b/target/loongarch/tcg/csr_helper.c
index 15f94ca..2942d7f 100644
--- a/target/loongarch/tcg/csr_helper.c
+++ b/target/loongarch/tcg/csr_helper.c
@@ -6,16 +6,33 @@
*/
#include "qemu/osdep.h"
+#include "qemu/log.h"
#include "qemu/main-loop.h"
#include "cpu.h"
#include "internals.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "exec/cputlb.h"
+#include "accel/tcg/cpu-ldst.h"
#include "hw/irq.h"
#include "cpu-csr.h"
+target_ulong helper_csrwr_stlbps(CPULoongArchState *env, target_ulong val)
+{
+ int64_t old_v = env->CSR_STLBPS;
+
+ /*
+ * The real hardware only supports the min tlb_ps is 12
+ * tlb_ps=0 may cause undefined-behavior.
+ */
+ uint8_t tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
+ if (!check_ps(env, tlb_ps)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Attempted set ps %d\n", tlb_ps);
+ }
+ return old_v;
+}
+
target_ulong helper_csrrd_pgd(CPULoongArchState *env)
{
int64_t v;
@@ -95,3 +112,27 @@ target_ulong helper_csrwr_ticlr(CPULoongArchState *env, target_ulong val)
}
return old_v;
}
+
+target_ulong helper_csrwr_pwcl(CPULoongArchState *env, target_ulong val)
+{
+ uint8_t shift, ptbase;
+ int64_t old_v = env->CSR_PWCL;
+
+ /*
+ * The real hardware only supports 64bit PTE width now, 128bit or others
+ * treated as illegal.
+ */
+ shift = FIELD_EX64(val, CSR_PWCL, PTEWIDTH);
+ ptbase = FIELD_EX64(val, CSR_PWCL, PTBASE);
+ if (shift) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Attempted set pte width with %d bit\n", 64 << shift);
+ val = FIELD_DP64(val, CSR_PWCL, PTEWIDTH, 0);
+ }
+ if (!check_ps(env, ptbase)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Attrmpted set ptbase 2^%d\n", ptbase);
+ }
+ env->CSR_PWCL =val;
+ return old_v;
+}
diff --git a/target/loongarch/tcg/fpu_helper.c b/target/loongarch/tcg/fpu_helper.c
index f6753c5..fc9c64c 100644
--- a/target/loongarch/tcg/fpu_helper.c
+++ b/target/loongarch/tcg/fpu_helper.c
@@ -8,8 +8,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "fpu/softfloat.h"
#include "internals.h"
@@ -31,6 +30,15 @@ void restore_fp_status(CPULoongArchState *env)
set_float_rounding_mode(ieee_rm[(env->fcsr0 >> FCSR0_RM) & 0x3],
&env->fp_status);
set_flush_to_zero(0, &env->fp_status);
+ set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->fp_status);
+ /*
+ * For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan)
+ * case sets InvalidOp and returns the input value 'c'
+ */
+ set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
+ set_float_3nan_prop_rule(float_3nan_prop_s_cab, &env->fp_status);
+ /* Default NaN: sign bit clear, msb frac bit set */
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
}
int ieee_ex_to_loongarch(int xcpt)
@@ -352,8 +360,7 @@ uint64_t helper_fclass_s(CPULoongArchState *env, uint64_t fj)
} else if (float32_is_zero_or_denormal(f)) {
return sign ? 1 << 4 : 1 << 8;
} else if (float32_is_any_nan(f)) {
- float_status s = { }; /* for snan_bit_is_one */
- return float32_is_quiet_nan(f, &s) ? 1 << 1 : 1 << 0;
+ return float32_is_quiet_nan(f, &env->fp_status) ? 1 << 1 : 1 << 0;
} else {
return sign ? 1 << 3 : 1 << 7;
}
@@ -371,8 +378,7 @@ uint64_t helper_fclass_d(CPULoongArchState *env, uint64_t fj)
} else if (float64_is_zero_or_denormal(f)) {
return sign ? 1 << 4 : 1 << 8;
} else if (float64_is_any_nan(f)) {
- float_status s = { }; /* for snan_bit_is_one */
- return float64_is_quiet_nan(f, &s) ? 1 << 1 : 1 << 0;
+ return float64_is_quiet_nan(f, &env->fp_status) ? 1 << 1 : 1 << 0;
} else {
return sign ? 1 << 3 : 1 << 7;
}
diff --git a/target/loongarch/tcg/helper.h b/target/loongarch/tcg/helper.h
new file mode 100644
index 0000000..1d5cb01
--- /dev/null
+++ b/target/loongarch/tcg/helper.h
@@ -0,0 +1,722 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+DEF_HELPER_2(raise_exception, noreturn, env, i32)
+
+DEF_HELPER_FLAGS_1(bitrev_w, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(bitrev_d, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl)
+
+DEF_HELPER_FLAGS_3(asrtle_d, TCG_CALL_NO_WG, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(asrtgt_d, TCG_CALL_NO_WG, void, env, tl, tl)
+
+DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
+DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
+DEF_HELPER_FLAGS_2(cpucfg, TCG_CALL_NO_RWG_SE, tl, env, tl)
+
+/* Floating-point helper */
+DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fsub_s, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fsub_d, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmul_s, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmul_d, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fdiv_s, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fdiv_d, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmax_s, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmax_d, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmin_s, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmin_d, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmaxa_s, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmaxa_d, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmina_s, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmina_d, TCG_CALL_NO_WG, i64, env, i64, i64)
+
+DEF_HELPER_FLAGS_5(fmuladd_s, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i32)
+DEF_HELPER_FLAGS_5(fmuladd_d, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i32)
+
+DEF_HELPER_FLAGS_3(fscaleb_s, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fscaleb_d, TCG_CALL_NO_WG, i64, env, i64, i64)
+
+DEF_HELPER_FLAGS_2(flogb_s, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(flogb_d, TCG_CALL_NO_WG, i64, env, i64)
+
+DEF_HELPER_FLAGS_2(fsqrt_s, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(fsqrt_d, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(frsqrt_s, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(frsqrt_d, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(frecip_s, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(frecip_d, TCG_CALL_NO_WG, i64, env, i64)
+
+DEF_HELPER_FLAGS_2(fclass_s, TCG_CALL_NO_RWG_SE, i64, env, i64)
+DEF_HELPER_FLAGS_2(fclass_d, TCG_CALL_NO_RWG_SE, i64, env, i64)
+
+/* fcmp.cXXX.s */
+DEF_HELPER_4(fcmp_c_s, i64, env, i64, i64, i32)
+/* fcmp.sXXX.s */
+DEF_HELPER_4(fcmp_s_s, i64, env, i64, i64, i32)
+/* fcmp.cXXX.d */
+DEF_HELPER_4(fcmp_c_d, i64, env, i64, i64, i32)
+/* fcmp.sXXX.d */
+DEF_HELPER_4(fcmp_s_d, i64, env, i64, i64, i32)
+
+DEF_HELPER_2(fcvt_d_s, i64, env, i64)
+DEF_HELPER_2(fcvt_s_d, i64, env, i64)
+DEF_HELPER_2(ffint_d_w, i64, env, i64)
+DEF_HELPER_2(ffint_d_l, i64, env, i64)
+DEF_HELPER_2(ffint_s_w, i64, env, i64)
+DEF_HELPER_2(ffint_s_l, i64, env, i64)
+DEF_HELPER_2(ftintrm_l_s, i64, env, i64)
+DEF_HELPER_2(ftintrm_l_d, i64, env, i64)
+DEF_HELPER_2(ftintrm_w_s, i64, env, i64)
+DEF_HELPER_2(ftintrm_w_d, i64, env, i64)
+DEF_HELPER_2(ftintrp_l_s, i64, env, i64)
+DEF_HELPER_2(ftintrp_l_d, i64, env, i64)
+DEF_HELPER_2(ftintrp_w_s, i64, env, i64)
+DEF_HELPER_2(ftintrp_w_d, i64, env, i64)
+DEF_HELPER_2(ftintrz_l_s, i64, env, i64)
+DEF_HELPER_2(ftintrz_l_d, i64, env, i64)
+DEF_HELPER_2(ftintrz_w_s, i64, env, i64)
+DEF_HELPER_2(ftintrz_w_d, i64, env, i64)
+DEF_HELPER_2(ftintrne_l_s, i64, env, i64)
+DEF_HELPER_2(ftintrne_l_d, i64, env, i64)
+DEF_HELPER_2(ftintrne_w_s, i64, env, i64)
+DEF_HELPER_2(ftintrne_w_d, i64, env, i64)
+DEF_HELPER_2(ftint_l_s, i64, env, i64)
+DEF_HELPER_2(ftint_l_d, i64, env, i64)
+DEF_HELPER_2(ftint_w_s, i64, env, i64)
+DEF_HELPER_2(ftint_w_d, i64, env, i64)
+DEF_HELPER_2(frint_s, i64, env, i64)
+DEF_HELPER_2(frint_d, i64, env, i64)
+
+DEF_HELPER_FLAGS_1(set_rounding_mode, TCG_CALL_NO_RWG, void, env)
+
+DEF_HELPER_1(rdtime_d, i64, env)
+
+#ifndef CONFIG_USER_ONLY
+/* CSRs helper */
+DEF_HELPER_1(csrrd_pgd, i64, env)
+DEF_HELPER_1(csrrd_cpuid, i64, env)
+DEF_HELPER_1(csrrd_tval, i64, env)
+DEF_HELPER_2(csrwr_stlbps, i64, env, tl)
+DEF_HELPER_2(csrwr_estat, i64, env, tl)
+DEF_HELPER_2(csrwr_asid, i64, env, tl)
+DEF_HELPER_2(csrwr_tcfg, i64, env, tl)
+DEF_HELPER_2(csrwr_ticlr, i64, env, tl)
+DEF_HELPER_2(csrwr_pwcl, i64, env, tl)
+DEF_HELPER_2(iocsrrd_b, i64, env, tl)
+DEF_HELPER_2(iocsrrd_h, i64, env, tl)
+DEF_HELPER_2(iocsrrd_w, i64, env, tl)
+DEF_HELPER_2(iocsrrd_d, i64, env, tl)
+DEF_HELPER_3(iocsrwr_b, void, env, tl, tl)
+DEF_HELPER_3(iocsrwr_h, void, env, tl, tl)
+DEF_HELPER_3(iocsrwr_w, void, env, tl, tl)
+DEF_HELPER_3(iocsrwr_d, void, env, tl, tl)
+
+/* TLB helper */
+DEF_HELPER_1(tlbwr, void, env)
+DEF_HELPER_1(tlbfill, void, env)
+DEF_HELPER_1(tlbsrch, void, env)
+DEF_HELPER_1(tlbrd, void, env)
+DEF_HELPER_1(tlbclr, void, env)
+DEF_HELPER_1(tlbflush, void, env)
+DEF_HELPER_1(invtlb_all, void, env)
+DEF_HELPER_2(invtlb_all_g, void, env, i32)
+DEF_HELPER_2(invtlb_all_asid, void, env, tl)
+DEF_HELPER_3(invtlb_page_asid, void, env, tl, tl)
+DEF_HELPER_3(invtlb_page_asid_or_g, void, env, tl, tl)
+
+DEF_HELPER_4(lddir, tl, env, tl, tl, i32)
+DEF_HELPER_4(ldpte, void, env, tl, tl, i32)
+DEF_HELPER_1(ertn, void, env)
+DEF_HELPER_1(idle, void, env)
+#endif
+
+/* LoongArch LSX */
+DEF_HELPER_FLAGS_4(vhaddw_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhaddw_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhaddw_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhaddw_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhaddw_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhaddw_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhaddw_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhaddw_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhsubw_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhsubw_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhsubw_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhsubw_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhsubw_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhsubw_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhsubw_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vhsubw_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vaddwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vsubwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwev_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vaddwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vsubwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwev_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vaddwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_q_du_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_q_du_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vavg_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavg_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavg_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavg_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavg_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavg_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vavgr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavgr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavgr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavgr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavgr_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavgr_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavgr_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavgr_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vabsd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vabsd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vabsd_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vabsd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vabsd_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vabsd_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vabsd_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vabsd_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vadda_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vadda_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vadda_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vadda_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vmaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vmuh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmuh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmuh_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmuh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmuh_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmuh_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmuh_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmuh_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmulwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmulwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmulwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmadd_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmsub_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmsub_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmaddwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmaddwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmaddwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vdiv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vdiv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vdiv_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vdiv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vdiv_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vdiv_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vdiv_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vdiv_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmod_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmod_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmod_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmod_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmod_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmod_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmod_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmod_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vsat_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsat_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsat_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsat_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsat_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsat_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsat_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsat_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_3(vexth_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vexth_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vexth_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vexth_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vexth_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vexth_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vexth_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vexth_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(vext2xv_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vext2xv_w_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vext2xv_d_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vext2xv_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vext2xv_d_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vext2xv_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vext2xv_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vext2xv_wu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vext2xv_du_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vext2xv_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vext2xv_du_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vext2xv_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vsigncov_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsigncov_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsigncov_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsigncov_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(vmskltz_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vmskltz_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vmskltz_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vmskltz_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vmskgez_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vmsknz_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vnori_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vsllwil_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsllwil_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsllwil_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_3(vextl_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsllwil_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsllwil_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsllwil_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_3(vextl_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vsrlr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrlr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrlr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrlr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrlri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrlri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrlri_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrlri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vsrar_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrar_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrar_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrari_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrari_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrari_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrari_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vsrln_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrln_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrln_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsran_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsran_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsran_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vsrlni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrlni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrlni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrlni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrani_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrani_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrani_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrani_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vsrlrn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrlrn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrlrn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrarn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrarn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsrarn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vsrlrni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrlrni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrlrni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrlrni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrarni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrarni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrarni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsrarni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vssrln_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrln_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrln_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssran_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssran_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssran_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrln_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrln_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrln_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssran_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssran_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssran_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vssrlni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrlni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrlni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrlni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrani_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrani_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrani_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrani_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrlni_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrlni_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrlni_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrlni_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrani_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrani_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrani_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrani_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vssrlrn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrlrn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrlrn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrarn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrarn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrarn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrlrn_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrlrn_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrlrn_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrarn_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrarn_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vssrarn_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vssrlrni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrlrni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrlrni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrlrni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrarni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrarni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrarni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrarni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrlrni_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrlrni_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrlrni_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrlrni_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrarni_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrarni_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrarni_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vssrarni_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_3(vclo_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vclo_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vclo_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vclo_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vclz_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vclz_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vclz_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vclz_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(vpcnt_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vpcnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vpcnt_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vpcnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vbitclr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitclr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitclr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitclr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitclri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitclri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitclri_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitclri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vbitset_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitset_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitset_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitset_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitseti_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitseti_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitseti_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitseti_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vbitrev_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitrev_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitrev_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitrev_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitrevi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitrevi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitrevi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitrevi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vfrstp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vfrstp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vfrstpi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vfrstpi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_5(vfadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfdiv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfdiv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_6(vfmadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(vfmadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(vfmsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(vfmsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(vfnmadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(vfnmadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(vfnmsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(vfnmsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_5(vfmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_5(vfmaxa_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfmaxa_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfmina_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfmina_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(vflogb_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vflogb_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(vfclass_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfclass_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(vfsqrt_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfsqrt_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfrecip_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfrecip_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfrsqrt_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfrsqrt_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(vfcvtl_s_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfcvth_s_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfcvtl_d_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfcvth_d_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfcvt_h_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vfcvt_s_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(vfrintrne_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfrintrne_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfrintrz_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfrintrz_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfrintrp_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfrintrp_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfrintrm_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfrintrm_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfrint_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vfrint_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(vftintrne_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrne_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrz_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrz_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrp_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrp_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrm_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrm_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftint_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftint_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrz_wu_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrz_lu_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftint_wu_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftint_lu_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vftintrne_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vftintrz_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vftintrp_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vftintrm_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vftint_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrnel_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrneh_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrzl_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrzh_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrpl_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrph_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrml_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintrmh_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftintl_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vftinth_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(vffint_s_w, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vffint_d_l, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vffint_s_wu, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vffint_d_lu, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vffintl_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vffinth_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(vffint_s_l, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(vseqi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vseqi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vseqi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vseqi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vslei_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslei_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslei_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslei_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslei_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslei_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslei_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslei_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vslti_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslti_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslti_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslti_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslti_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslti_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslti_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslti_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_6(vfcmp_c_s, void, env, i32, i32, i32, i32, i32)
+DEF_HELPER_6(vfcmp_s_s, void, env, i32, i32, i32, i32, i32)
+DEF_HELPER_6(vfcmp_c_d, void, env, i32, i32, i32, i32, i32)
+DEF_HELPER_6(vfcmp_s_d, void, env, i32, i32, i32, i32, i32)
+
+DEF_HELPER_FLAGS_4(vbitseli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_4(vsetanyeqz_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vsetanyeqz_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsetanyeqz_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsetanyeqz_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vsetallnez_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vsetallnez_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsetallnez_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsetallnez_d, void, env, i32, i32, i32)
+
+DEF_HELPER_FLAGS_4(xvinsve0_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(xvinsve0_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(xvpickve_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(xvpickve_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vpackev_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpackev_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpackev_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpackev_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpackod_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpackod_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpackod_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpackod_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vpickev_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpickev_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpickev_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpickev_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpickod_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpickod_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpickod_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpickod_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vilvl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vilvl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vilvl_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vilvl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vilvh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vilvh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vilvh_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vilvh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(vshuf_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vshuf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vshuf_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vshuf_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vshuf4i_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vshuf4i_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vshuf4i_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vshuf4i_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vperm_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vpermi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vpermi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vpermi_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vextrins_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vextrins_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vextrins_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vextrins_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
diff --git a/target/loongarch/tcg/insn_trans/trans_atomic.c.inc b/target/loongarch/tcg/insn_trans/trans_atomic.c.inc
index 974bc2a..3d70d75 100644
--- a/target/loongarch/tcg/insn_trans/trans_atomic.c.inc
+++ b/target/loongarch/tcg/insn_trans/trans_atomic.c.inc
@@ -56,7 +56,7 @@ static bool gen_am(DisasContext *ctx, arg_rrr *a,
if (a->rd != 0 && (a->rj == a->rd || a->rk == a->rd)) {
qemu_log_mask(LOG_GUEST_ERROR,
"Warning: source register overlaps destination register"
- "in atomic insn at pc=0x" TARGET_FMT_lx "\n",
+ "in atomic insn at pc=0x%" VADDR_PRIx "\n",
ctx->base.pc_next - 4);
return false;
}
diff --git a/target/loongarch/tcg/insn_trans/trans_branch.c.inc b/target/loongarch/tcg/insn_trans/trans_branch.c.inc
index 221e515..f94c1f3 100644
--- a/target/loongarch/tcg/insn_trans/trans_branch.c.inc
+++ b/target/loongarch/tcg/insn_trans/trans_branch.c.inc
@@ -80,5 +80,5 @@ TRANS(bltu, ALL, gen_rr_bc, TCG_COND_LTU)
TRANS(bgeu, ALL, gen_rr_bc, TCG_COND_GEU)
TRANS(beqz, ALL, gen_rz_bc, TCG_COND_EQ)
TRANS(bnez, ALL, gen_rz_bc, TCG_COND_NE)
-TRANS(bceqz, 64, gen_cz_bc, TCG_COND_EQ)
-TRANS(bcnez, 64, gen_cz_bc, TCG_COND_NE)
+TRANS(bceqz, FP, gen_cz_bc, TCG_COND_EQ)
+TRANS(bcnez, FP, gen_cz_bc, TCG_COND_NE)
diff --git a/target/loongarch/tcg/insn_trans/trans_extra.c.inc b/target/loongarch/tcg/insn_trans/trans_extra.c.inc
index cfa361f..eda3d6e 100644
--- a/target/loongarch/tcg/insn_trans/trans_extra.c.inc
+++ b/target/loongarch/tcg/insn_trans/trans_extra.c.inc
@@ -97,11 +97,11 @@ static bool gen_crc(DisasContext *ctx, arg_rrr *a,
return true;
}
-TRANS(crc_w_b_w, 64, gen_crc, gen_helper_crc32, tcg_constant_tl(1))
-TRANS(crc_w_h_w, 64, gen_crc, gen_helper_crc32, tcg_constant_tl(2))
-TRANS(crc_w_w_w, 64, gen_crc, gen_helper_crc32, tcg_constant_tl(4))
-TRANS(crc_w_d_w, 64, gen_crc, gen_helper_crc32, tcg_constant_tl(8))
-TRANS(crcc_w_b_w, 64, gen_crc, gen_helper_crc32c, tcg_constant_tl(1))
-TRANS(crcc_w_h_w, 64, gen_crc, gen_helper_crc32c, tcg_constant_tl(2))
-TRANS(crcc_w_w_w, 64, gen_crc, gen_helper_crc32c, tcg_constant_tl(4))
-TRANS(crcc_w_d_w, 64, gen_crc, gen_helper_crc32c, tcg_constant_tl(8))
+TRANS(crc_w_b_w, CRC, gen_crc, gen_helper_crc32, tcg_constant_tl(1))
+TRANS(crc_w_h_w, CRC, gen_crc, gen_helper_crc32, tcg_constant_tl(2))
+TRANS(crc_w_w_w, CRC, gen_crc, gen_helper_crc32, tcg_constant_tl(4))
+TRANS(crc_w_d_w, CRC, gen_crc, gen_helper_crc32, tcg_constant_tl(8))
+TRANS(crcc_w_b_w, CRC, gen_crc, gen_helper_crc32c, tcg_constant_tl(1))
+TRANS(crcc_w_h_w, CRC, gen_crc, gen_helper_crc32c, tcg_constant_tl(2))
+TRANS(crcc_w_w_w, CRC, gen_crc, gen_helper_crc32c, tcg_constant_tl(4))
+TRANS(crcc_w_d_w, CRC, gen_crc, gen_helper_crc32c, tcg_constant_tl(8))
diff --git a/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc b/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc
index 3babf69..6a2c030 100644
--- a/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc
+++ b/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc
@@ -4,10 +4,15 @@
*/
/* bit0(signaling/quiet) bit1(lt) bit2(eq) bit3(un) bit4(neq) */
-static uint32_t get_fcmp_flags(int cond)
+static uint32_t get_fcmp_flags(DisasContext *ctx, int cond)
{
uint32_t flags = 0;
+ /*check cond , cond =[0-8,10,12] */
+ if ((cond > 8) &&(cond != 10) && (cond != 12)) {
+ return -1;
+ }
+
if (cond & 0x1) {
flags |= FCMP_LT;
}
@@ -26,9 +31,14 @@ static uint32_t get_fcmp_flags(int cond)
static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a)
{
TCGv var, src1, src2;
- uint32_t flags;
+ uint32_t flags = get_fcmp_flags(ctx, a->fcond >>1);
void (*fn)(TCGv, TCGv_env, TCGv, TCGv, TCGv_i32);
+ if (flags == -1) {
+ generate_exception(ctx, EXCCODE_INE);
+ return true;
+ }
+
if (!avail_FP_SP(ctx)) {
return false;
}
@@ -39,8 +49,6 @@ static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a)
src1 = get_fpr(ctx, a->fj);
src2 = get_fpr(ctx, a->fk);
fn = (a->fcond & 1 ? gen_helper_fcmp_s_s : gen_helper_fcmp_c_s);
- flags = get_fcmp_flags(a->fcond >> 1);
-
fn(var, tcg_env, src1, src2, tcg_constant_i32(flags));
tcg_gen_st8_tl(var, tcg_env, offsetof(CPULoongArchState, cf[a->cd]));
@@ -50,9 +58,14 @@ static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a)
static bool trans_fcmp_cond_d(DisasContext *ctx, arg_fcmp_cond_d *a)
{
TCGv var, src1, src2;
- uint32_t flags;
+ uint32_t flags = get_fcmp_flags(ctx, a->fcond >> 1);
void (*fn)(TCGv, TCGv_env, TCGv, TCGv, TCGv_i32);
+ if (flags == -1) {
+ generate_exception(ctx, EXCCODE_INE);
+ return true;
+ }
+
if (!avail_FP_DP(ctx)) {
return false;
}
@@ -63,8 +76,6 @@ static bool trans_fcmp_cond_d(DisasContext *ctx, arg_fcmp_cond_d *a)
src1 = get_fpr(ctx, a->fj);
src2 = get_fpr(ctx, a->fk);
fn = (a->fcond & 1 ? gen_helper_fcmp_s_d : gen_helper_fcmp_c_d);
- flags = get_fcmp_flags(a->fcond >> 1);
-
fn(var, tcg_env, src1, src2, tcg_constant_i32(flags));
tcg_gen_st8_tl(var, tcg_env, offsetof(CPULoongArchState, cf[a->cd]));
diff --git a/target/loongarch/tcg/insn_trans/trans_privileged.c.inc b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc
index 7e4ec93..ecbfe23 100644
--- a/target/loongarch/tcg/insn_trans/trans_privileged.c.inc
+++ b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc
@@ -5,7 +5,7 @@
* LoongArch translation routines for the privileged instructions.
*/
-#include "cpu-csr.h"
+#include "csr.h"
#ifdef CONFIG_USER_ONLY
@@ -45,112 +45,6 @@ GEN_FALSE_TRANS(idle)
typedef void (*GenCSRRead)(TCGv dest, TCGv_ptr env);
typedef void (*GenCSRWrite)(TCGv dest, TCGv_ptr env, TCGv src);
-typedef struct {
- int offset;
- int flags;
- GenCSRRead readfn;
- GenCSRWrite writefn;
-} CSRInfo;
-
-enum {
- CSRFL_READONLY = (1 << 0),
- CSRFL_EXITTB = (1 << 1),
- CSRFL_IO = (1 << 2),
-};
-
-#define CSR_OFF_FUNCS(NAME, FL, RD, WR) \
- [LOONGARCH_CSR_##NAME] = { \
- .offset = offsetof(CPULoongArchState, CSR_##NAME), \
- .flags = FL, .readfn = RD, .writefn = WR \
- }
-
-#define CSR_OFF_ARRAY(NAME, N) \
- [LOONGARCH_CSR_##NAME(N)] = { \
- .offset = offsetof(CPULoongArchState, CSR_##NAME[N]), \
- .flags = 0, .readfn = NULL, .writefn = NULL \
- }
-
-#define CSR_OFF_FLAGS(NAME, FL) \
- CSR_OFF_FUNCS(NAME, FL, NULL, NULL)
-
-#define CSR_OFF(NAME) \
- CSR_OFF_FLAGS(NAME, 0)
-
-static const CSRInfo csr_info[] = {
- CSR_OFF_FLAGS(CRMD, CSRFL_EXITTB),
- CSR_OFF(PRMD),
- CSR_OFF_FLAGS(EUEN, CSRFL_EXITTB),
- CSR_OFF_FLAGS(MISC, CSRFL_READONLY),
- CSR_OFF(ECFG),
- CSR_OFF_FUNCS(ESTAT, CSRFL_EXITTB, NULL, gen_helper_csrwr_estat),
- CSR_OFF(ERA),
- CSR_OFF(BADV),
- CSR_OFF_FLAGS(BADI, CSRFL_READONLY),
- CSR_OFF(EENTRY),
- CSR_OFF(TLBIDX),
- CSR_OFF(TLBEHI),
- CSR_OFF(TLBELO0),
- CSR_OFF(TLBELO1),
- CSR_OFF_FUNCS(ASID, CSRFL_EXITTB, NULL, gen_helper_csrwr_asid),
- CSR_OFF(PGDL),
- CSR_OFF(PGDH),
- CSR_OFF_FUNCS(PGD, CSRFL_READONLY, gen_helper_csrrd_pgd, NULL),
- CSR_OFF(PWCL),
- CSR_OFF(PWCH),
- CSR_OFF(STLBPS),
- CSR_OFF(RVACFG),
- CSR_OFF_FUNCS(CPUID, CSRFL_READONLY, gen_helper_csrrd_cpuid, NULL),
- CSR_OFF_FLAGS(PRCFG1, CSRFL_READONLY),
- CSR_OFF_FLAGS(PRCFG2, CSRFL_READONLY),
- CSR_OFF_FLAGS(PRCFG3, CSRFL_READONLY),
- CSR_OFF_ARRAY(SAVE, 0),
- CSR_OFF_ARRAY(SAVE, 1),
- CSR_OFF_ARRAY(SAVE, 2),
- CSR_OFF_ARRAY(SAVE, 3),
- CSR_OFF_ARRAY(SAVE, 4),
- CSR_OFF_ARRAY(SAVE, 5),
- CSR_OFF_ARRAY(SAVE, 6),
- CSR_OFF_ARRAY(SAVE, 7),
- CSR_OFF_ARRAY(SAVE, 8),
- CSR_OFF_ARRAY(SAVE, 9),
- CSR_OFF_ARRAY(SAVE, 10),
- CSR_OFF_ARRAY(SAVE, 11),
- CSR_OFF_ARRAY(SAVE, 12),
- CSR_OFF_ARRAY(SAVE, 13),
- CSR_OFF_ARRAY(SAVE, 14),
- CSR_OFF_ARRAY(SAVE, 15),
- CSR_OFF(TID),
- CSR_OFF_FUNCS(TCFG, CSRFL_IO, NULL, gen_helper_csrwr_tcfg),
- CSR_OFF_FUNCS(TVAL, CSRFL_READONLY | CSRFL_IO, gen_helper_csrrd_tval, NULL),
- CSR_OFF(CNTC),
- CSR_OFF_FUNCS(TICLR, CSRFL_IO, NULL, gen_helper_csrwr_ticlr),
- CSR_OFF(LLBCTL),
- CSR_OFF(IMPCTL1),
- CSR_OFF(IMPCTL2),
- CSR_OFF(TLBRENTRY),
- CSR_OFF(TLBRBADV),
- CSR_OFF(TLBRERA),
- CSR_OFF(TLBRSAVE),
- CSR_OFF(TLBRELO0),
- CSR_OFF(TLBRELO1),
- CSR_OFF(TLBREHI),
- CSR_OFF(TLBRPRMD),
- CSR_OFF(MERRCTL),
- CSR_OFF(MERRINFO1),
- CSR_OFF(MERRINFO2),
- CSR_OFF(MERRENTRY),
- CSR_OFF(MERRERA),
- CSR_OFF(MERRSAVE),
- CSR_OFF(CTAG),
- CSR_OFF_ARRAY(DMW, 0),
- CSR_OFF_ARRAY(DMW, 1),
- CSR_OFF_ARRAY(DMW, 2),
- CSR_OFF_ARRAY(DMW, 3),
- CSR_OFF(DBG),
- CSR_OFF(DERA),
- CSR_OFF(DSAVE),
-};
-
static bool check_plv(DisasContext *ctx)
{
if (ctx->plv == MMU_PLV_USER) {
@@ -160,20 +54,38 @@ static bool check_plv(DisasContext *ctx)
return false;
}
-static const CSRInfo *get_csr(unsigned csr_num)
+static bool set_csr_trans_func(unsigned int csr_num, GenCSRRead readfn,
+ GenCSRWrite writefn)
{
- const CSRInfo *csr;
+ CSRInfo *csr;
- if (csr_num >= ARRAY_SIZE(csr_info)) {
- return NULL;
- }
- csr = &csr_info[csr_num];
- if (csr->offset == 0) {
- return NULL;
+ csr = get_csr(csr_num);
+ if (!csr) {
+ return false;
}
- return csr;
+
+ csr->readfn = (GenCSRFunc)readfn;
+ csr->writefn = (GenCSRFunc)writefn;
+ return true;
}
+#define SET_CSR_FUNC(NAME, read, write) \
+ set_csr_trans_func(LOONGARCH_CSR_##NAME, read, write)
+
+void loongarch_csr_translate_init(void)
+{
+ SET_CSR_FUNC(STLBPS, NULL, gen_helper_csrwr_stlbps);
+ SET_CSR_FUNC(ESTAT, NULL, gen_helper_csrwr_estat);
+ SET_CSR_FUNC(ASID, NULL, gen_helper_csrwr_asid);
+ SET_CSR_FUNC(PGD, gen_helper_csrrd_pgd, NULL);
+ SET_CSR_FUNC(PWCL, NULL, gen_helper_csrwr_pwcl);
+ SET_CSR_FUNC(CPUID, gen_helper_csrrd_cpuid, NULL);
+ SET_CSR_FUNC(TCFG, NULL, gen_helper_csrwr_tcfg);
+ SET_CSR_FUNC(TVAL, gen_helper_csrrd_tval, NULL);
+ SET_CSR_FUNC(TICLR, NULL, gen_helper_csrwr_ticlr);
+}
+#undef SET_CSR_FUNC
+
static bool check_csr_flags(DisasContext *ctx, const CSRInfo *csr, bool write)
{
if ((csr->flags & CSRFL_READONLY) && write) {
@@ -191,6 +103,7 @@ static bool trans_csrrd(DisasContext *ctx, arg_csrrd *a)
{
TCGv dest;
const CSRInfo *csr;
+ GenCSRRead readfn;
if (check_plv(ctx)) {
return false;
@@ -202,8 +115,9 @@ static bool trans_csrrd(DisasContext *ctx, arg_csrrd *a)
} else {
check_csr_flags(ctx, csr, false);
dest = gpr_dst(ctx, a->rd, EXT_NONE);
- if (csr->readfn) {
- csr->readfn(dest, tcg_env);
+ readfn = (GenCSRRead)csr->readfn;
+ if (readfn) {
+ readfn(dest, tcg_env);
} else {
tcg_gen_ld_tl(dest, tcg_env, csr->offset);
}
@@ -216,6 +130,7 @@ static bool trans_csrwr(DisasContext *ctx, arg_csrwr *a)
{
TCGv dest, src1;
const CSRInfo *csr;
+ GenCSRWrite writefn;
if (check_plv(ctx)) {
return false;
@@ -231,9 +146,10 @@ static bool trans_csrwr(DisasContext *ctx, arg_csrwr *a)
return false;
}
src1 = gpr_src(ctx, a->rd, EXT_NONE);
- if (csr->writefn) {
+ writefn = (GenCSRWrite)csr->writefn;
+ if (writefn) {
dest = gpr_dst(ctx, a->rd, EXT_NONE);
- csr->writefn(dest, tcg_env, src1);
+ writefn(dest, tcg_env, src1);
} else {
dest = tcg_temp_new();
tcg_gen_ld_tl(dest, tcg_env, csr->offset);
@@ -247,6 +163,7 @@ static bool trans_csrxchg(DisasContext *ctx, arg_csrxchg *a)
{
TCGv src1, mask, oldv, newv, temp;
const CSRInfo *csr;
+ GenCSRWrite writefn;
if (check_plv(ctx)) {
return false;
@@ -277,8 +194,9 @@ static bool trans_csrxchg(DisasContext *ctx, arg_csrxchg *a)
tcg_gen_andc_tl(temp, oldv, mask);
tcg_gen_or_tl(newv, newv, temp);
- if (csr->writefn) {
- csr->writefn(oldv, tcg_env, newv);
+ writefn = (GenCSRWrite)csr->writefn;
+ if (writefn) {
+ writefn(oldv, tcg_env, newv);
} else {
tcg_gen_st_tl(newv, tcg_env, csr->offset);
}
diff --git a/target/loongarch/tcg/insn_trans/trans_vec.c.inc b/target/loongarch/tcg/insn_trans/trans_vec.c.inc
index 92b1d22..7873002 100644
--- a/target/loongarch/tcg/insn_trans/trans_vec.c.inc
+++ b/target/loongarch/tcg/insn_trans/trans_vec.c.inc
@@ -3465,7 +3465,7 @@ TRANS(xvmsknz_b, LASX, gen_xx, gen_helper_vmsknz_b)
static uint64_t vldi_get_value(DisasContext *ctx, uint32_t imm)
{
int mode;
- uint64_t data, t;
+ uint64_t data = 0, t;
/*
* imm bit [11:8] is mode, mode value is 0-12.
@@ -3480,7 +3480,7 @@ static uint64_t vldi_get_value(DisasContext *ctx, uint32_t imm)
break;
case 1:
/* data: {2{16'0, imm[7:0], 8'0}} */
- data = (t << 24) | (t << 8);
+ data = (t << 40) | (t << 8);
break;
case 2:
/* data: {2{8'0, imm[7:0], 16'0}} */
@@ -3570,17 +3570,26 @@ static uint64_t vldi_get_value(DisasContext *ctx, uint32_t imm)
}
break;
default:
- generate_exception(ctx, EXCCODE_INE);
g_assert_not_reached();
}
return data;
}
+static bool check_valid_vldi_mode(arg_vldi *a)
+{
+ return extract32(a->imm, 8, 4) <= 12;
+}
+
static bool gen_vldi(DisasContext *ctx, arg_vldi *a, uint32_t oprsz)
{
int sel, vece;
uint64_t value;
+ if (!check_valid_vldi_mode(a)) {
+ generate_exception(ctx, EXCCODE_INE);
+ return true;
+ }
+
if (!check_vec(ctx, oprsz)) {
return true;
}
@@ -4655,19 +4664,23 @@ TRANS(xvslti_du, LASX, do_xcmpi, MO_64, TCG_COND_LTU)
static bool do_vfcmp_cond_s(DisasContext *ctx, arg_vvv_fcond *a, uint32_t sz)
{
- uint32_t flags;
+ uint32_t flags = get_fcmp_flags(ctx, a->fcond >> 1);
void (*fn)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
TCGv_i32 vd = tcg_constant_i32(a->vd);
TCGv_i32 vj = tcg_constant_i32(a->vj);
TCGv_i32 vk = tcg_constant_i32(a->vk);
TCGv_i32 oprsz = tcg_constant_i32(sz);
+ if(flags == -1){
+ generate_exception(ctx, EXCCODE_INE);
+ return true;
+ }
+
if (!check_vec(ctx, sz)) {
return true;
}
fn = (a->fcond & 1 ? gen_helper_vfcmp_s_s : gen_helper_vfcmp_c_s);
- flags = get_fcmp_flags(a->fcond >> 1);
fn(tcg_env, oprsz, vd, vj, vk, tcg_constant_i32(flags));
return true;
@@ -4675,19 +4688,23 @@ static bool do_vfcmp_cond_s(DisasContext *ctx, arg_vvv_fcond *a, uint32_t sz)
static bool do_vfcmp_cond_d(DisasContext *ctx, arg_vvv_fcond *a, uint32_t sz)
{
- uint32_t flags;
+ uint32_t flags = get_fcmp_flags(ctx, a->fcond >> 1);
void (*fn)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
TCGv_i32 vd = tcg_constant_i32(a->vd);
TCGv_i32 vj = tcg_constant_i32(a->vj);
TCGv_i32 vk = tcg_constant_i32(a->vk);
TCGv_i32 oprsz = tcg_constant_i32(sz);
+ if (flags == -1) {
+ generate_exception(ctx, EXCCODE_INE);
+ return true;
+ }
+
if (!check_vec(ctx, sz)) {
return true;
}
fn = (a->fcond & 1 ? gen_helper_vfcmp_s_d : gen_helper_vfcmp_c_d);
- flags = get_fcmp_flags(a->fcond >> 1);
fn(tcg_env, oprsz, vd, vj, vk, tcg_constant_i32(flags));
return true;
@@ -5126,7 +5143,7 @@ static bool do_vbsrl_v(DisasContext *ctx, arg_vv_i *a, uint32_t oprsz)
{
int i, ofs;
- if (!check_vec(ctx, 32)) {
+ if (!check_vec(ctx, oprsz)) {
return true;
}
diff --git a/target/loongarch/tcg/iocsr_helper.c b/target/loongarch/tcg/iocsr_helper.c
index b6916f5..c155f48 100644
--- a/target/loongarch/tcg/iocsr_helper.c
+++ b/target/loongarch/tcg/iocsr_helper.c
@@ -9,8 +9,7 @@
#include "cpu.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#define GET_MEMTXATTRS(cas) \
((MemTxAttrs){.requester_id = env_cpu(cas)->cpu_index})
diff --git a/target/loongarch/tcg/op_helper.c b/target/loongarch/tcg/op_helper.c
index fe79c62..16ac0d4 100644
--- a/target/loongarch/tcg/op_helper.c
+++ b/target/loongarch/tcg/op_helper.c
@@ -10,11 +10,10 @@
#include "cpu.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "internals.h"
#include "qemu/crc32c.h"
-#include <zlib.h>
+#include <zlib.h> /* for crc32 */
#include "cpu-csr.h"
/* Exceptions helpers */
diff --git a/target/loongarch/tcg/tcg_loongarch.h b/target/loongarch/tcg/tcg_loongarch.h
new file mode 100644
index 0000000..fd4e116
--- /dev/null
+++ b/target/loongarch/tcg/tcg_loongarch.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * QEMU LoongArch TCG interface
+ *
+ * Copyright (c) 2025 Loongson Technology Corporation Limited
+ */
+#ifndef TARGET_LOONGARCH_TCG_LOONGARCH_H
+#define TARGET_LOONGARCH_TCG_LOONGARCH_H
+#include "cpu.h"
+
+void loongarch_csr_translate_init(void);
+
+bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
+int loongarch_get_addr_from_tlb(CPULoongArchState *env, hwaddr *physical,
+ int *prot, target_ulong address,
+ MMUAccessType access_type, int mmu_idx);
+
+#endif /* TARGET_LOONGARCH_TCG_LOONGARCH_H */
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index d6331f9..dc48b0f 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -12,38 +12,20 @@
#include "cpu.h"
#include "internals.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
-#include "exec/cpu_ldst.h"
+#include "exec/target_page.h"
+#include "accel/tcg/cpu-ldst.h"
#include "exec/log.h"
#include "cpu-csr.h"
+#include "tcg/tcg_loongarch.h"
-static void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
- uint64_t *dir_width, target_ulong level)
+bool check_ps(CPULoongArchState *env, uint8_t tlb_ps)
{
- switch (level) {
- case 1:
- *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE);
- *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH);
- break;
- case 2:
- *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE);
- *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH);
- break;
- case 3:
- *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE);
- *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH);
- break;
- case 4:
- *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE);
- *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH);
- break;
- default:
- /* level may be zero for ldpte */
- *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
- *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
- break;
+ if (tlb_ps >= 64) {
+ return false;
}
+ return BIT_ULL(tlb_ps) & (env->CSR_PRCFG2);
}
static void raise_mmu_exception(CPULoongArchState *env, target_ulong address,
@@ -123,7 +105,11 @@ static void invalidate_tlb_entry(CPULoongArchState *env, int index)
uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V);
uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V);
uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
+ uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
+ if (!tlb_e) {
+ return;
+ }
if (index >= LOONGARCH_STLB) {
tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
} else {
@@ -187,8 +173,10 @@ static void fill_tlb_entry(CPULoongArchState *env, int index)
lo1 = env->CSR_TLBELO1;
}
- if (csr_ps == 0) {
- qemu_log_mask(CPU_LOG_MMU, "page size is 0\n");
+ /*check csr_ps */
+ if (!check_ps(env, csr_ps)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "csr_ps %d is illegal\n", csr_ps);
+ return;
}
/* Only MTLB has the ps fields */
@@ -214,6 +202,66 @@ static uint32_t get_random_tlb(uint32_t low, uint32_t high)
return val % (high - low + 1) + low;
}
+/*
+ * One tlb entry holds an adjacent odd/even pair, the vpn is the
+ * content of the virtual page number divided by 2. So the
+ * compare vpn is bit[47:15] for 16KiB page. while the vppn
+ * field in tlb entry contains bit[47:13], so need adjust.
+ * virt_vpn = vaddr[47:13]
+ */
+static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
+ int *index)
+{
+ LoongArchTLB *tlb;
+ uint16_t csr_asid, tlb_asid, stlb_idx;
+ uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps;
+ int i, compare_shift;
+ uint64_t vpn, tlb_vppn;
+
+ csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
+ stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
+ vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1);
+ stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
+ compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
+
+ /* Search STLB */
+ for (i = 0; i < 8; ++i) {
+ tlb = &env->tlb[i * 256 + stlb_idx];
+ tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
+ if (tlb_e) {
+ tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
+ tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
+ tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
+
+ if ((tlb_g == 1 || tlb_asid == csr_asid) &&
+ (vpn == (tlb_vppn >> compare_shift))) {
+ *index = i * 256 + stlb_idx;
+ return true;
+ }
+ }
+ }
+
+ /* Search MTLB */
+ for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) {
+ tlb = &env->tlb[i];
+ tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
+ if (tlb_e) {
+ tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
+ tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
+ tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
+ tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
+ compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
+ vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
+ if ((tlb_g == 1 || tlb_asid == csr_asid) &&
+ (vpn == (tlb_vppn >> compare_shift))) {
+ *index = i;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
void helper_tlbsrch(CPULoongArchState *env)
{
int index, match;
@@ -298,7 +346,16 @@ void helper_tlbfill(CPULoongArchState *env)
pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
}
+ if (!check_ps(env, pagesize)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "pagesize %d is illegal\n", pagesize);
+ return;
+ }
+
stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
+ if (!check_ps(env, stlb_ps)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "stlb_ps %d is illegal\n", stlb_ps);
+ return;
+ }
if (pagesize == stlb_ps) {
/* Only write into STLB bits [47:13] */
@@ -427,7 +484,11 @@ void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info,
uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
uint64_t vpn, tlb_vppn;
uint8_t tlb_ps, compare_shift;
+ uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
+ if (!tlb_e) {
+ continue;
+ }
if (i >= LOONGARCH_STLB) {
tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
} else {
@@ -456,7 +517,11 @@ void helper_invtlb_page_asid_or_g(CPULoongArchState *env,
uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
uint64_t vpn, tlb_vppn;
uint8_t tlb_ps, compare_shift;
+ uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
+ if (!tlb_e) {
+ continue;
+ }
if (i >= LOONGARCH_STLB) {
tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
} else {
@@ -485,7 +550,7 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
/* Data access */
ret = get_physical_address(env, &physical, &prot, address,
- access_type, mmu_idx);
+ access_type, mmu_idx, 0);
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
@@ -511,8 +576,7 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
target_ulong level, uint32_t mem_idx)
{
CPUState *cs = env_cpu(env);
- target_ulong badvaddr, index, phys, ret;
- int shift;
+ target_ulong badvaddr, index, phys;
uint64_t dir_base, dir_width;
if (unlikely((level == 0) || (level > 4))) {
@@ -525,6 +589,7 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
if (unlikely(level == 4)) {
qemu_log_mask(LOG_GUEST_ERROR,
"Attempted use of level 4 huge page\n");
+ return base;
}
if (FIELD_EX64(base, TLBENTRY, LEVEL)) {
@@ -536,16 +601,10 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
badvaddr = env->CSR_TLBRBADV;
base = base & TARGET_PHYS_MASK;
-
- /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
- shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
- shift = (shift + 1) * 3;
-
get_dir_base_width(env, &dir_base, &dir_width, level);
index = (badvaddr >> dir_base) & ((1 << dir_width) - 1);
- phys = base | index << shift;
- ret = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
- return ret;
+ phys = base | index << 3;
+ return ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
}
void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
@@ -553,7 +612,6 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
{
CPUState *cs = env_cpu(env);
target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv;
- int shift;
uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
uint64_t dir_base, dir_width;
@@ -594,16 +652,12 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
tmp0 += MAKE_64BIT_MASK(ps, 1);
}
} else {
- /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
- shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
- shift = (shift + 1) * 3;
badv = env->CSR_TLBRBADV;
ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1);
ptindex = ptindex & ~0x1; /* clear bit 0 */
- ptoffset0 = ptindex << shift;
- ptoffset1 = (ptindex + 1) << shift;
-
+ ptoffset0 = ptindex << 3;
+ ptoffset1 = (ptindex + 1) << 3;
phys = base | (odd ? ptoffset1 : ptoffset0);
tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
ps = ptbase;
@@ -616,3 +670,87 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
}
env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps);
}
+
+static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical,
+ int *prot, target_ulong address,
+ int access_type, int index, int mmu_idx)
+{
+ LoongArchTLB *tlb = &env->tlb[index];
+ uint64_t plv = mmu_idx;
+ uint64_t tlb_entry, tlb_ppn;
+ uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
+
+ if (index >= LOONGARCH_STLB) {
+ tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
+ } else {
+ tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
+ }
+ n = (address >> tlb_ps) & 0x1;/* Odd or even */
+
+ tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
+ tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V);
+ tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D);
+ tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
+ if (is_la64(env)) {
+ tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN);
+ tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX);
+ tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR);
+ tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV);
+ } else {
+ tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN);
+ tlb_nx = 0;
+ tlb_nr = 0;
+ tlb_rplv = 0;
+ }
+
+ /* Remove sw bit between bit12 -- bit PS*/
+ tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) - 1));
+
+ /* Check access rights */
+ if (!tlb_v) {
+ return TLBRET_INVALID;
+ }
+
+ if (access_type == MMU_INST_FETCH && tlb_nx) {
+ return TLBRET_XI;
+ }
+
+ if (access_type == MMU_DATA_LOAD && tlb_nr) {
+ return TLBRET_RI;
+ }
+
+ if (((tlb_rplv == 0) && (plv > tlb_plv)) ||
+ ((tlb_rplv == 1) && (plv != tlb_plv))) {
+ return TLBRET_PE;
+ }
+
+ if ((access_type == MMU_DATA_STORE) && !tlb_d) {
+ return TLBRET_DIRTY;
+ }
+
+ *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) |
+ (address & MAKE_64BIT_MASK(0, tlb_ps));
+ *prot = PAGE_READ;
+ if (tlb_d) {
+ *prot |= PAGE_WRITE;
+ }
+ if (!tlb_nx) {
+ *prot |= PAGE_EXEC;
+ }
+ return TLBRET_MATCH;
+}
+
+int loongarch_get_addr_from_tlb(CPULoongArchState *env, hwaddr *physical,
+ int *prot, target_ulong address,
+ MMUAccessType access_type, int mmu_idx)
+{
+ int index, match;
+
+ match = loongarch_tlb_search(env, address, &index);
+ if (match) {
+ return loongarch_map_tlb_entry(env, physical, prot,
+ address, access_type, index, mmu_idx);
+ }
+
+ return TLBRET_NOMATCH;
+}
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
index 1fca4af..53a0b4c 100644
--- a/target/loongarch/tcg/translate.c
+++ b/target/loongarch/tcg/translate.c
@@ -11,11 +11,13 @@
#include "tcg/tcg-op-gvec.h"
#include "exec/translation-block.h"
#include "exec/translator.h"
+#include "exec/target_page.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
#include "exec/log.h"
#include "qemu/qemu-print.h"
#include "fpu/softfloat.h"
+#include "tcg_loongarch.h"
#include "translate.h"
#include "internals.h"
#include "vec.h"
@@ -288,7 +290,7 @@ static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
if (!decode(ctx, ctx->opcode)) {
qemu_log_mask(LOG_UNIMP, "Error: unknown opcode. "
- TARGET_FMT_lx ": 0x%x\n",
+ "0x%" VADDR_PRIx ": 0x%x\n",
ctx->base.pc_next, ctx->opcode);
generate_exception(ctx, EXCCODE_INE);
}
@@ -333,8 +335,8 @@ static const TranslatorOps loongarch_tr_ops = {
.tb_stop = loongarch_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext ctx;
@@ -358,4 +360,8 @@ void loongarch_translate_init(void)
offsetof(CPULoongArchState, lladdr), "lladdr");
cpu_llval = tcg_global_mem_new(tcg_env,
offsetof(CPULoongArchState, llval), "llval");
+
+#ifndef CONFIG_USER_ONLY
+ loongarch_csr_translate_init();
+#endif
}
diff --git a/target/loongarch/tcg/vec_helper.c b/target/loongarch/tcg/vec_helper.c
index 3faf52c..a270998 100644
--- a/target/loongarch/tcg/vec_helper.c
+++ b/target/loongarch/tcg/vec_helper.c
@@ -7,7 +7,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
#include "internals.h"
diff --git a/target/loongarch/translate.h b/target/loongarch/translate.h
index 195f535..018dc5e 100644
--- a/target/loongarch/translate.h
+++ b/target/loongarch/translate.h
@@ -25,6 +25,7 @@
#define avail_LSX(C) (FIELD_EX32((C)->cpucfg2, CPUCFG2, LSX))
#define avail_LASX(C) (FIELD_EX32((C)->cpucfg2, CPUCFG2, LASX))
#define avail_IOCSR(C) (FIELD_EX32((C)->cpucfg1, CPUCFG1, IOCSR))
+#define avail_CRC(C) (FIELD_EX32((C)->cpucfg1, CPUCFG1, CRC))
/*
* If an operation is being performed on less than TARGET_LONG_BITS,
diff --git a/target/m68k/Kconfig b/target/m68k/Kconfig
index 9eae714..23aae24 100644
--- a/target/m68k/Kconfig
+++ b/target/m68k/Kconfig
@@ -1,3 +1,3 @@
config M68K
bool
- select SEMIHOSTING
+ imply SEMIHOSTING if TCG
diff --git a/target/m68k/cpu-param.h b/target/m68k/cpu-param.h
index 39dcbce..256a2b5 100644
--- a/target/m68k/cpu-param.h
+++ b/target/m68k/cpu-param.h
@@ -2,13 +2,12 @@
* m68k cpu parameters for qemu.
*
* Copyright (c) 2005-2007 CodeSourcery
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#ifndef M68K_CPU_PARAM_H
#define M68K_CPU_PARAM_H
-#define TARGET_LONG_BITS 32
/*
* Coldfire Linux uses 8k pages
* and m68k linux uses 4k pages
@@ -18,4 +17,6 @@
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
#endif
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
index 1d49f4c..6a09db3 100644
--- a/target/m68k/cpu.c
+++ b/target/m68k/cpu.c
@@ -23,6 +23,8 @@
#include "cpu.h"
#include "migration/vmstate.h"
#include "fpu/softfloat.h"
+#include "exec/translation-block.h"
+#include "accel/tcg/cpu-ops.h"
static void m68k_cpu_set_pc(CPUState *cs, vaddr value)
{
@@ -38,6 +40,24 @@ static vaddr m68k_cpu_get_pc(CPUState *cs)
return cpu->env.pc;
}
+static TCGTBCPUState m68k_get_tb_cpu_state(CPUState *cs)
+{
+ CPUM68KState *env = cpu_env(cs);
+ uint32_t flags;
+
+ flags = (env->macsr >> 4) & TB_FLAGS_MACSR;
+ if (env->sr & SR_S) {
+ flags |= TB_FLAGS_MSR_S;
+ flags |= (env->sfc << (TB_FLAGS_SFC_S_BIT - 2)) & TB_FLAGS_SFC_S;
+ flags |= (env->dfc << (TB_FLAGS_DFC_S_BIT - 2)) & TB_FLAGS_DFC_S;
+ }
+ if (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS) {
+ flags |= TB_FLAGS_TRACE;
+ }
+
+ return (TCGTBCPUState){ .pc = env->pc, .flags = flags };
+}
+
static void m68k_restore_state_to_opc(CPUState *cs,
const TranslationBlock *tb,
const uint64_t *data)
@@ -51,10 +71,12 @@ static void m68k_restore_state_to_opc(CPUState *cs,
}
}
+#ifndef CONFIG_USER_ONLY
static bool m68k_cpu_has_work(CPUState *cs)
{
return cs->interrupt_request & CPU_INTERRUPT_HARD;
}
+#endif /* !CONFIG_USER_ONLY */
static int m68k_cpu_mmu_index(CPUState *cs, bool ifetch)
{
@@ -76,7 +98,7 @@ static void m68k_cpu_reset_hold(Object *obj, ResetType type)
CPUState *cs = CPU(obj);
M68kCPUClass *mcc = M68K_CPU_GET_CLASS(obj);
CPUM68KState *env = cpu_env(cs);
- floatx80 nan = floatx80_default_nan(NULL);
+ floatx80 nan;
int i;
if (mcc->parent_phases.hold) {
@@ -89,6 +111,61 @@ static void m68k_cpu_reset_hold(Object *obj, ResetType type)
#else
cpu_m68k_set_sr(env, SR_S | SR_I);
#endif
+ /*
+ * M68000 FAMILY PROGRAMMER'S REFERENCE MANUAL
+ * 3.4 FLOATING-POINT INSTRUCTION DETAILS
+ * If either operand, but not both operands, of an operation is a
+ * nonsignaling NaN, then that NaN is returned as the result. If both
+ * operands are nonsignaling NaNs, then the destination operand
+ * nonsignaling NaN is returned as the result.
+ * If either operand to an operation is a signaling NaN (SNaN), then the
+ * SNaN bit is set in the FPSR EXC byte. If the SNaN exception enable bit
+ * is set in the FPCR ENABLE byte, then the exception is taken and the
+ * destination is not modified. If the SNaN exception enable bit is not
+ * set, setting the SNaN bit in the operand to a one converts the SNaN to
+ * a nonsignaling NaN. The operation then continues as described in the
+ * preceding paragraph for nonsignaling NaNs.
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_ab, &env->fp_status);
+ /* Default NaN: sign bit clear, all frac bits set */
+ set_float_default_nan_pattern(0b01111111, &env->fp_status);
+ /*
+ * m68k-specific floatx80 behaviour:
+ * * default Infinity values have a zero Integer bit
+ * * input Infinities may have the Integer bit either 0 or 1
+ * * pseudo-denormals supported for input and output
+ * * don't raise Invalid for pseudo-NaN/pseudo-Inf/Unnormal
+ *
+ * With m68k, the explicit integer bit can be zero in the case of:
+ * - zeros (exp == 0, mantissa == 0)
+ * - denormalized numbers (exp == 0, mantissa != 0)
+ * - unnormalized numbers (exp != 0, exp < 0x7FFF)
+ * - infinities (exp == 0x7FFF, mantissa == 0)
+ * - not-a-numbers (exp == 0x7FFF, mantissa != 0)
+ *
+ * For infinities and NaNs, the explicit integer bit can be either one or
+ * zero.
+ *
+ * The IEEE 754 standard does not define a zero integer bit. Such a number
+ * is an unnormalized number. Hardware does not directly support
+ * denormalized and unnormalized numbers, but implicitly supports them by
+ * trapping them as unimplemented data types, allowing efficient conversion
+ * in software.
+ *
+ * See "M68000 FAMILY PROGRAMMER’S REFERENCE MANUAL",
+ * "1.6 FLOATING-POINT DATA TYPES"
+ *
+ * Note though that QEMU's fp emulation does directly handle both
+ * denormal and unnormal values, and does not trap to guest software.
+ */
+ set_floatx80_behaviour(floatx80_default_inf_int_bit_is_zero |
+ floatx80_pseudo_inf_valid |
+ floatx80_pseudo_nan_valid |
+ floatx80_unnormal_valid |
+ floatx80_pseudo_denormal_valid,
+ &env->fp_status);
+
+ nan = floatx80_default_nan(&env->fp_status);
for (i = 0; i < 8; i++) {
env->fregs[i].d = nan;
}
@@ -102,6 +179,7 @@ static void m68k_cpu_reset_hold(Object *obj, ResetType type)
static void m68k_cpu_disas_set_info(CPUState *s, disassemble_info *info)
{
info->print_insn = print_insn_m68k;
+ info->endian = BFD_ENDIAN_BIG;
info->mach = 0;
}
@@ -523,26 +601,34 @@ static const VMStateDescription vmstate_m68k_cpu = {
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps m68k_sysemu_ops = {
+ .has_work = m68k_cpu_has_work,
.get_phys_page_debug = m68k_cpu_get_phys_page_debug,
};
#endif /* !CONFIG_USER_ONLY */
-#include "hw/core/tcg-cpu-ops.h"
-
static const TCGCPUOps m68k_tcg_ops = {
+ /* MTTCG not yet supported: require strict ordering */
+ .guest_default_memory_order = TCG_MO_ALL,
+ .mttcg_supported = false,
+
.initialize = m68k_tcg_init,
+ .translate_code = m68k_translate_code,
+ .get_tb_cpu_state = m68k_get_tb_cpu_state,
.restore_state_to_opc = m68k_restore_state_to_opc,
+ .mmu_index = m68k_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = m68k_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = m68k_cpu_exec_interrupt,
.cpu_exec_halt = m68k_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = m68k_cpu_do_interrupt,
.do_transaction_failed = m68k_cpu_transaction_failed,
#endif /* !CONFIG_USER_ONLY */
};
-static void m68k_cpu_class_init(ObjectClass *c, void *data)
+static void m68k_cpu_class_init(ObjectClass *c, const void *data)
{
M68kCPUClass *mcc = M68K_CPU_CLASS(c);
CPUClass *cc = CPU_CLASS(c);
@@ -555,8 +641,6 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
&mcc->parent_phases);
cc->class_by_name = m68k_cpu_class_by_name;
- cc->has_work = m68k_cpu_has_work;
- cc->mmu_index = m68k_cpu_mmu_index;
cc->dump_state = m68k_cpu_dump_state;
cc->set_pc = m68k_cpu_set_pc;
cc->get_pc = m68k_cpu_get_pc;
@@ -571,7 +655,7 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
cc->tcg_ops = &m68k_tcg_ops;
}
-static void m68k_cpu_class_init_cf_core(ObjectClass *c, void *data)
+static void m68k_cpu_class_init_cf_core(ObjectClass *c, const void *data)
{
CPUClass *cc = CPU_CLASS(c);
@@ -586,7 +670,7 @@ static void m68k_cpu_class_init_cf_core(ObjectClass *c, void *data)
.class_init = m68k_cpu_class_init_cf_core \
}
-static void m68k_cpu_class_init_m68k_core(ObjectClass *c, void *data)
+static void m68k_cpu_class_init_m68k_core(ObjectClass *c, const void *data)
{
CPUClass *cc = CPU_CLASS(c);
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
index b5bbeed..d9db6a4 100644
--- a/target/m68k/cpu.h
+++ b/target/m68k/cpu.h
@@ -21,7 +21,9 @@
#ifndef M68K_CPU_H
#define M68K_CPU_H
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "qemu/cpu-float.h"
#include "cpu-qom.h"
@@ -76,8 +78,6 @@
#define M68K_MAX_TTR 2
#define TTR(type, index) ttr[((type & ACCESS_CODE) == ACCESS_CODE) * 2 + index]
-#define TARGET_INSN_START_EXTRA_WORDS 1
-
typedef CPU_LDoubleU FPReg;
typedef struct CPUArchState {
@@ -193,6 +193,8 @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void m68k_tcg_init(void);
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
void m68k_cpu_init_gdb(M68kCPU *cpu);
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
@@ -592,8 +594,6 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
MemTxResult response, uintptr_t retaddr);
#endif
-#include "exec/cpu-all.h"
-
/* TB flags */
#define TB_FLAGS_MACSR 0x0f
#define TB_FLAGS_MSR_S_BIT 13
@@ -605,22 +605,6 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
#define TB_FLAGS_TRACE 16
#define TB_FLAGS_TRACE_BIT (1 << TB_FLAGS_TRACE)
-static inline void cpu_get_tb_cpu_state(CPUM68KState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- *cs_base = 0;
- *flags = (env->macsr >> 4) & TB_FLAGS_MACSR;
- if (env->sr & SR_S) {
- *flags |= TB_FLAGS_MSR_S;
- *flags |= (env->sfc << (TB_FLAGS_SFC_S_BIT - 2)) & TB_FLAGS_SFC_S;
- *flags |= (env->dfc << (TB_FLAGS_DFC_S_BIT - 2)) & TB_FLAGS_DFC_S;
- }
- if (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS) {
- *flags |= TB_FLAGS_TRACE;
- }
-}
-
void dump_mmu(CPUM68KState *env);
#endif
diff --git a/target/m68k/fpu_helper.c b/target/m68k/fpu_helper.c
index 8314791..5601286 100644
--- a/target/m68k/fpu_helper.c
+++ b/target/m68k/fpu_helper.c
@@ -21,8 +21,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "softfloat.h"
/*
@@ -175,7 +174,7 @@ static int cpu_m68k_exceptbits_from_host(int host_bits)
if (host_bits & float_flag_overflow) {
target_bits |= 0x40;
}
- if (host_bits & (float_flag_underflow | float_flag_output_denormal)) {
+ if (host_bits & (float_flag_underflow | float_flag_output_denormal_flushed)) {
target_bits |= 0x20;
}
if (host_bits & float_flag_divbyzero) {
@@ -455,7 +454,7 @@ void HELPER(ftst)(CPUM68KState *env, FPReg *val)
if (floatx80_is_any_nan(val->d)) {
cc |= FPSR_CC_A;
- } else if (floatx80_is_infinity(val->d)) {
+ } else if (floatx80_is_infinity(val->d, &env->fp_status)) {
cc |= FPSR_CC_I;
} else if (floatx80_is_zero(val->d)) {
cc |= FPSR_CC_Z;
@@ -615,14 +614,13 @@ void HELPER(frem)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
fp_rem = floatx80_rem(val1->d, val0->d, &env->fp_status);
if (!floatx80_is_any_nan(fp_rem)) {
- float_status fp_status = { };
+ /* Use local temporary fp_status to set different rounding mode */
+ float_status fp_status = env->fp_status;
uint32_t quotient;
int sign;
/* Calculate quotient directly using round to nearest mode */
set_float_rounding_mode(float_round_nearest_even, &fp_status);
- set_floatx80_rounding_precision(
- get_floatx80_rounding_precision(&env->fp_status), &fp_status);
fp_quot.d = floatx80_div(val1->d, val0->d, &fp_status);
sign = extractFloatx80Sign(fp_quot.d);
diff --git a/target/m68k/gdbstub.c b/target/m68k/gdbstub.c
index 15547e2..136159f 100644
--- a/target/m68k/gdbstub.c
+++ b/target/m68k/gdbstub.c
@@ -52,7 +52,7 @@ int m68k_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
CPUM68KState *env = cpu_env(cs);
uint32_t tmp;
- tmp = ldl_p(mem_buf);
+ tmp = ldl_be_p(mem_buf);
if (n < 8) {
/* D0-D7 */
diff --git a/target/m68k/helper.c b/target/m68k/helper.c
index 7967ad1..15f110f 100644
--- a/target/m68k/helper.c
+++ b/target/m68k/helper.c
@@ -20,10 +20,12 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
#include "exec/gdbstub.h"
#include "exec/helper-proto.h"
+#include "system/memory.h"
#include "gdbstub/helpers.h"
#include "fpu/softfloat.h"
#include "qemu/qemu-print.h"
@@ -36,7 +38,8 @@ static int cf_fpu_gdb_get_reg(CPUState *cs, GByteArray *mem_buf, int n)
CPUM68KState *env = &cpu->env;
if (n < 8) {
- float_status s;
+ /* Use scratch float_status so any exceptions don't change CPU state */
+ float_status s = env->fp_status;
return gdb_get_reg64(mem_buf, floatx80_to_float64(env->fregs[n].d, &s));
}
switch (n) {
@@ -56,16 +59,17 @@ static int cf_fpu_gdb_set_reg(CPUState *cs, uint8_t *mem_buf, int n)
CPUM68KState *env = &cpu->env;
if (n < 8) {
- float_status s;
- env->fregs[n].d = float64_to_floatx80(ldq_p(mem_buf), &s);
+ /* Use scratch float_status so any exceptions don't change CPU state */
+ float_status s = env->fp_status;
+ env->fregs[n].d = float64_to_floatx80(ldq_be_p(mem_buf), &s);
return 8;
}
switch (n) {
case 8: /* fpcontrol */
- cpu_m68k_set_fpcr(env, ldl_p(mem_buf));
+ cpu_m68k_set_fpcr(env, ldl_be_p(mem_buf));
return 4;
case 9: /* fpstatus */
- env->fpsr = ldl_p(mem_buf);
+ env->fpsr = ldl_be_p(mem_buf);
return 4;
case 10: /* fpiar, not implemented */
return 4;
@@ -107,10 +111,10 @@ static int m68k_fpu_gdb_set_reg(CPUState *cs, uint8_t *mem_buf, int n)
}
switch (n) {
case 8: /* fpcontrol */
- cpu_m68k_set_fpcr(env, ldl_p(mem_buf));
+ cpu_m68k_set_fpcr(env, ldl_be_p(mem_buf));
return 4;
case 9: /* fpstatus */
- cpu_m68k_set_fpsr(env, ldl_p(mem_buf));
+ cpu_m68k_set_fpsr(env, ldl_be_p(mem_buf));
return 4;
case 10: /* fpiar, not implemented */
return 4;
@@ -287,7 +291,6 @@ void HELPER(m68k_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val)
/* Invalid control registers will generate an exception. */
raise_exception_ra(env, EXCP_ILLEGAL, 0);
- return;
}
uint32_t HELPER(m68k_movec_from)(CPUM68KState *env, uint32_t reg)
@@ -479,7 +482,6 @@ static void print_address_zone(uint32_t logical, uint32_t physical,
static void dump_address_map(CPUM68KState *env, uint32_t root_pointer)
{
- int i, j, k;
int tic_size, tic_shift;
uint32_t tib_mask;
uint32_t tia, tib, tic;
@@ -502,19 +504,19 @@ static void dump_address_map(CPUM68KState *env, uint32_t root_pointer)
tic_shift = 12;
tib_mask = M68K_4K_PAGE_MASK;
}
- for (i = 0; i < M68K_ROOT_POINTER_ENTRIES; i++) {
+ for (unsigned i = 0; i < M68K_ROOT_POINTER_ENTRIES; i++) {
tia = address_space_ldl(cs->as, M68K_POINTER_BASE(root_pointer) + i * 4,
MEMTXATTRS_UNSPECIFIED, &txres);
if (txres != MEMTX_OK || !M68K_UDT_VALID(tia)) {
continue;
}
- for (j = 0; j < M68K_ROOT_POINTER_ENTRIES; j++) {
+ for (unsigned j = 0; j < M68K_ROOT_POINTER_ENTRIES; j++) {
tib = address_space_ldl(cs->as, M68K_POINTER_BASE(tia) + j * 4,
MEMTXATTRS_UNSPECIFIED, &txres);
if (txres != MEMTX_OK || !M68K_UDT_VALID(tib)) {
continue;
}
- for (k = 0; k < tic_size; k++) {
+ for (unsigned k = 0; k < tic_size; k++) {
tic = address_space_ldl(cs->as, (tib & tib_mask) + k * 4,
MEMTXATTRS_UNSPECIFIED, &txres);
if (txres != MEMTX_OK || !M68K_PDT_VALID(tic)) {
diff --git a/target/m68k/meson.build b/target/m68k/meson.build
index 8d3f9ce..4d213da 100644
--- a/target/m68k/meson.build
+++ b/target/m68k/meson.build
@@ -11,9 +11,12 @@ m68k_ss.add(files(
m68k_system_ss = ss.source_set()
m68k_system_ss.add(files(
- 'm68k-semi.c',
'monitor.c'
))
+m68k_system_ss.add(when: ['CONFIG_SEMIHOSTING'],
+ if_true: files('m68k-semi.c'),
+ if_false: files('semihosting-stub.c')
+)
target_arch += {'m68k': m68k_ss}
target_system_arch += {'m68k': m68k_system_ss}
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
index 15bad5d..f29ae12 100644
--- a/target/m68k/op_helper.c
+++ b/target/m68k/op_helper.c
@@ -20,8 +20,7 @@
#include "qemu/log.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "semihosting/semihost.h"
#if !defined(CONFIG_USER_ONLY)
diff --git a/target/m68k/semihosting-stub.c b/target/m68k/semihosting-stub.c
new file mode 100644
index 0000000..dbe669c
--- /dev/null
+++ b/target/m68k/semihosting-stub.c
@@ -0,0 +1,18 @@
+/*
+ * m68k/ColdFire semihosting stub
+ *
+ * Copyright (c) 2024 Linaro Ltd.
+ *
+ * Authors:
+ * Philippe Mathieu-DaudƩ
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+
+void do_m68k_semihosting(CPUM68KState *env, int nr)
+{
+ g_assert_not_reached();
+}
diff --git a/target/m68k/softfloat.c b/target/m68k/softfloat.c
index 02dcc03..d1f150e 100644
--- a/target/m68k/softfloat.c
+++ b/target/m68k/softfloat.c
@@ -142,8 +142,7 @@ floatx80 floatx80_scale(floatx80 a, floatx80 b, float_status *status)
if ((uint64_t) (aSig << 1)) {
return propagateFloatx80NaN(a, b, status);
}
- return packFloatx80(aSign, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(aSign, status);
}
if (aExp == 0) {
if (aSig == 0) {
@@ -245,7 +244,7 @@ floatx80 floatx80_lognp1(floatx80 a, float_status *status)
float_raise(float_flag_invalid, status);
return floatx80_default_nan(status);
}
- return packFloatx80(0, floatx80_infinity.high, floatx80_infinity.low);
+ return floatx80_default_inf(0, status);
}
if (aExp == 0 && aSig == 0) {
@@ -255,8 +254,7 @@ floatx80 floatx80_lognp1(floatx80 a, float_status *status)
if (aSign && aExp >= one_exp) {
if (aExp == one_exp && aSig == one_sig) {
float_raise(float_flag_divbyzero, status);
- return packFloatx80(aSign, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(aSign, status);
}
float_raise(float_flag_invalid, status);
return floatx80_default_nan(status);
@@ -442,8 +440,7 @@ floatx80 floatx80_logn(floatx80 a, float_status *status)
propagateFloatx80NaNOneArg(a, status);
}
if (aSign == 0) {
- return packFloatx80(0, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(0, status);
}
}
@@ -452,8 +449,7 @@ floatx80 floatx80_logn(floatx80 a, float_status *status)
if (aExp == 0) {
if (aSig == 0) { /* zero */
float_raise(float_flag_divbyzero, status);
- return packFloatx80(1, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(1, status);
}
if ((aSig & one_sig) == 0) { /* denormal */
normalizeFloatx80Subnormal(aSig, &aExp, &aSig);
@@ -610,15 +606,13 @@ floatx80 floatx80_log10(floatx80 a, float_status *status)
propagateFloatx80NaNOneArg(a, status);
}
if (aSign == 0) {
- return packFloatx80(0, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(0, status);
}
}
if (aExp == 0 && aSig == 0) {
float_raise(float_flag_divbyzero, status);
- return packFloatx80(1, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(1, status);
}
if (aSign) {
@@ -668,16 +662,14 @@ floatx80 floatx80_log2(floatx80 a, float_status *status)
propagateFloatx80NaNOneArg(a, status);
}
if (aSign == 0) {
- return packFloatx80(0, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(0, status);
}
}
if (aExp == 0) {
if (aSig == 0) {
float_raise(float_flag_divbyzero, status);
- return packFloatx80(1, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(1, status);
}
normalizeFloatx80Subnormal(aSig, &aExp, &aSig);
}
@@ -740,8 +732,7 @@ floatx80 floatx80_etox(floatx80 a, float_status *status)
if (aSign) {
return packFloatx80(0, 0, 0);
}
- return packFloatx80(0, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(0, status);
}
if (aExp == 0 && aSig == 0) {
@@ -924,8 +915,7 @@ floatx80 floatx80_twotox(floatx80 a, float_status *status)
if (aSign) {
return packFloatx80(0, 0, 0);
}
- return packFloatx80(0, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(0, status);
}
if (aExp == 0 && aSig == 0) {
@@ -1075,8 +1065,7 @@ floatx80 floatx80_tentox(floatx80 a, float_status *status)
if (aSign) {
return packFloatx80(0, 0, 0);
}
- return packFloatx80(0, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(0, status);
}
if (aExp == 0 && aSig == 0) {
@@ -2260,8 +2249,7 @@ floatx80 floatx80_atanh(floatx80 a, float_status *status)
if (compact >= 0x3FFF8000) { /* |X| >= 1 */
if (aExp == one_exp && aSig == one_sig) { /* |X| == 1 */
float_raise(float_flag_divbyzero, status);
- return packFloatx80(aSign, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(aSign, status);
} else { /* |X| > 1 */
float_raise(float_flag_invalid, status);
return floatx80_default_nan(status);
@@ -2320,8 +2308,7 @@ floatx80 floatx80_etoxm1(floatx80 a, float_status *status)
if (aSign) {
return packFloatx80(aSign, one_exp, one_sig);
}
- return packFloatx80(0, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(0, status);
}
if (aExp == 0 && aSig == 0) {
@@ -2687,8 +2674,7 @@ floatx80 floatx80_sinh(floatx80 a, float_status *status)
if ((uint64_t) (aSig << 1)) {
return propagateFloatx80NaNOneArg(a, status);
}
- return packFloatx80(aSign, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(aSign, status);
}
if (aExp == 0 && aSig == 0) {
@@ -2774,8 +2760,7 @@ floatx80 floatx80_cosh(floatx80 a, float_status *status)
if ((uint64_t) (aSig << 1)) {
return propagateFloatx80NaNOneArg(a, status);
}
- return packFloatx80(0, floatx80_infinity.high,
- floatx80_infinity.low);
+ return floatx80_default_inf(0, status);
}
if (aExp == 0 && aSig == 0) {
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index 445966f..97afceb 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -20,7 +20,8 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/translation-block.h"
+#include "exec/target_page.h"
#include "tcg/tcg-op.h"
#include "qemu/log.h"
#include "qemu/qemu-print.h"
@@ -720,7 +721,9 @@ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
}
/* fallthru */
case 2: /* Indirect register */
- return get_areg(s, reg0);
+ tmp = tcg_temp_new();
+ tcg_gen_mov_i32(tmp, get_areg(s, reg0));
+ return tmp;
case 4: /* Indirect predecrememnt. */
if (opsize == OS_UNSIZED) {
return NULL_QREG;
@@ -747,20 +750,23 @@ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
switch (reg0) {
case 0: /* Absolute short. */
offset = (int16_t)read_im16(env, s);
- return tcg_constant_i32(offset);
+ break;
case 1: /* Absolute long. */
offset = read_im32(env, s);
- return tcg_constant_i32(offset);
+ break;
case 2: /* pc displacement */
offset = s->pc;
offset += (int16_t)read_im16(env, s);
- return tcg_constant_i32(offset);
+ break;
case 3: /* pc index+displacement. */
return gen_lea_indexed(env, s, NULL_QREG);
case 4: /* Immediate. */
default:
return NULL_QREG;
}
+ tmp = tcg_temp_new();
+ tcg_gen_movi_i32(tmp, offset);
+ return tmp;
}
/* Should never happen. */
return NULL_QREG;
@@ -6112,8 +6118,8 @@ static const TranslatorOps m68k_tr_ops = {
.tb_stop = m68k_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext dc;
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
diff --git a/target/meson.build b/target/meson.build
index 1c2e6f2..b29598e 100644
--- a/target/meson.build
+++ b/target/meson.build
@@ -1,7 +1,6 @@
subdir('alpha')
subdir('arm')
subdir('avr')
-subdir('cris')
subdir('hexagon')
subdir('hppa')
subdir('i386')
diff --git a/target/microblaze/cpu-param.h b/target/microblaze/cpu-param.h
index e530fea..e0a3794 100644
--- a/target/microblaze/cpu-param.h
+++ b/target/microblaze/cpu-param.h
@@ -2,7 +2,7 @@
* MicroBlaze cpu parameters for qemu.
*
* Copyright (c) 2009 Edgar E. Iglesias
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#ifndef MICROBLAZE_CPU_PARAM_H
@@ -17,11 +17,9 @@
* of address space.
*/
#ifdef CONFIG_USER_ONLY
-#define TARGET_LONG_BITS 32
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
#else
-#define TARGET_LONG_BITS 64
#define TARGET_PHYS_ADDR_SPACE_BITS 64
#define TARGET_VIRT_ADDR_SPACE_BITS 64
#endif
@@ -29,7 +27,6 @@
/* FIXME: MB uses variable pages down to 1K but linux only uses 4k. */
#define TARGET_PAGE_BITS 12
-/* MicroBlaze is always in-order. */
-#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
+#define TARGET_INSN_START_EXTRA_WORDS 1
#endif
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
index 135947e..ee0a869 100644
--- a/target/microblaze/cpu.c
+++ b/target/microblaze/cpu.c
@@ -27,10 +27,11 @@
#include "cpu.h"
#include "qemu/module.h"
#include "hw/qdev-properties.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "exec/gdbstub.h"
+#include "exec/translation-block.h"
#include "fpu/softfloat-helpers.h"
+#include "accel/tcg/cpu-ops.h"
#include "tcg/tcg.h"
static const struct {
@@ -94,6 +95,17 @@ static vaddr mb_cpu_get_pc(CPUState *cs)
return cpu->env.pc;
}
+static TCGTBCPUState mb_get_tb_cpu_state(CPUState *cs)
+{
+ CPUMBState *env = cpu_env(cs);
+
+ return (TCGTBCPUState){
+ .pc = env->pc,
+ .flags = (env->iflags & IFLAGS_TB_MASK) | (env->msr & MSR_TB_MASK),
+ .cs_base = (env->iflags & IMM_FLAG ? env->imm : 0),
+ };
+}
+
static void mb_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -114,10 +126,12 @@ static void mb_restore_state_to_opc(CPUState *cs,
cpu->env.iflags = data[1];
}
+#ifndef CONFIG_USER_ONLY
static bool mb_cpu_has_work(CPUState *cs)
{
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
}
+#endif /* !CONFIG_USER_ONLY */
static int mb_cpu_mmu_index(CPUState *cs, bool ifetch)
{
@@ -201,6 +215,15 @@ static void mb_cpu_reset_hold(Object *obj, ResetType type)
env->pc = cpu->cfg.base_vectors;
+ set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
+ /*
+ * TODO: this is probably not the correct NaN propagation rule for
+ * this architecture.
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status);
+ /* Default NaN: sign bit set, most significant frac bit set */
+ set_float_default_nan_pattern(0b11000000, &env->fp_status);
+
#if defined(CONFIG_USER_ONLY)
/* start in user mode with interrupts enabled. */
mb_cpu_write_msr(env, MSR_EE | MSR_IE | MSR_VM | MSR_UM);
@@ -214,6 +237,8 @@ static void mb_disas_set_info(CPUState *cpu, disassemble_info *info)
{
info->mach = bfd_arch_microblaze;
info->print_insn = print_insn_microblaze;
+ info->endian = TARGET_BIG_ENDIAN ? BFD_ENDIAN_BIG
+ : BFD_ENDIAN_LITTLE;
}
static void mb_cpu_realizefn(DeviceState *dev, Error **errp)
@@ -238,6 +263,11 @@ static void mb_cpu_realizefn(DeviceState *dev, Error **errp)
return;
}
+ gdb_register_coprocessor(cs, mb_cpu_gdb_read_stack_protect,
+ mb_cpu_gdb_write_stack_protect,
+ gdb_find_static_feature("microblaze-stack-protect.xml"),
+ 0);
+
qemu_init_vcpu(cs);
version = cpu->cfg.version ? cpu->cfg.version : DEFAULT_CPU_VERSION;
@@ -310,27 +340,24 @@ static void mb_cpu_realizefn(DeviceState *dev, Error **errp)
static void mb_cpu_initfn(Object *obj)
{
- MicroBlazeCPU *cpu = MICROBLAZE_CPU(obj);
- CPUMBState *env = &cpu->env;
-
- gdb_register_coprocessor(CPU(cpu), mb_cpu_gdb_read_stack_protect,
- mb_cpu_gdb_write_stack_protect,
- gdb_find_static_feature("microblaze-stack-protect.xml"),
- 0);
-
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
-
#ifndef CONFIG_USER_ONLY
/* Inbound IRQ and FIR lines */
- qdev_init_gpio_in(DEVICE(cpu), microblaze_cpu_set_irq, 2);
- qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_dp, "ns_axi_dp", 1);
- qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_ip, "ns_axi_ip", 1);
- qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_dc, "ns_axi_dc", 1);
- qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_ic, "ns_axi_ic", 1);
+ qdev_init_gpio_in(DEVICE(obj), microblaze_cpu_set_irq, 2);
+ qdev_init_gpio_in_named(DEVICE(obj), mb_cpu_ns_axi_dp, "ns_axi_dp", 1);
+ qdev_init_gpio_in_named(DEVICE(obj), mb_cpu_ns_axi_ip, "ns_axi_ip", 1);
+ qdev_init_gpio_in_named(DEVICE(obj), mb_cpu_ns_axi_dc, "ns_axi_dc", 1);
+ qdev_init_gpio_in_named(DEVICE(obj), mb_cpu_ns_axi_ic, "ns_axi_ic", 1);
#endif
+
+ /* Restricted 'endianness' property is equivalent of 'little-endian' */
+ object_property_add_alias(obj, "little-endian", obj, "endianness");
}
-static Property mb_properties[] = {
+static const Property mb_properties[] = {
+ /*
+ * Following properties are used by Xilinx DTS conversion tool
+ * do not rename them.
+ */
DEFINE_PROP_UINT32("base-vectors", MicroBlazeCPU, cfg.base_vectors, 0),
DEFINE_PROP_BOOL("use-stack-protection", MicroBlazeCPU, cfg.stackprot,
false),
@@ -387,7 +414,9 @@ static Property mb_properties[] = {
DEFINE_PROP_UINT8("pvr", MicroBlazeCPU, cfg.pvr, C_PVR_FULL),
DEFINE_PROP_UINT8("pvr-user1", MicroBlazeCPU, cfg.pvr_user1, 0),
DEFINE_PROP_UINT32("pvr-user2", MicroBlazeCPU, cfg.pvr_user2, 0),
- DEFINE_PROP_END_OF_LIST(),
+ /*
+ * End of properties reserved by Xilinx DTS conversion tool.
+ */
};
static ObjectClass *mb_cpu_class_by_name(const char *cpu_model)
@@ -399,28 +428,36 @@ static ObjectClass *mb_cpu_class_by_name(const char *cpu_model)
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps mb_sysemu_ops = {
+ .has_work = mb_cpu_has_work,
.get_phys_page_attrs_debug = mb_cpu_get_phys_page_attrs_debug,
};
#endif
-#include "hw/core/tcg-cpu-ops.h"
-
static const TCGCPUOps mb_tcg_ops = {
+ /* MicroBlaze is always in-order. */
+ .guest_default_memory_order = TCG_MO_ALL,
+ .mttcg_supported = true,
+
.initialize = mb_tcg_init,
+ .translate_code = mb_translate_code,
+ .get_tb_cpu_state = mb_get_tb_cpu_state,
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
.restore_state_to_opc = mb_restore_state_to_opc,
+ .mmu_index = mb_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = mb_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = mb_cpu_exec_interrupt,
.cpu_exec_halt = mb_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = mb_cpu_do_interrupt,
.do_transaction_failed = mb_cpu_transaction_failed,
.do_unaligned_access = mb_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
};
-static void mb_cpu_class_init(ObjectClass *oc, void *data)
+static void mb_cpu_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
@@ -433,8 +470,6 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
&mcc->parent_phases);
cc->class_by_name = mb_cpu_class_by_name;
- cc->has_work = mb_cpu_has_work;
- cc->mmu_index = mb_cpu_mmu_index;
cc->dump_state = mb_cpu_dump_state;
cc->set_pc = mb_cpu_set_pc;
cc->get_pc = mb_cpu_get_pc;
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index 3e5a3e5..3ce28b3 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -21,8 +21,10 @@
#define MICROBLAZE_CPU_H
#include "cpu-qom.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
#include "qemu/cpu-float.h"
+#include "exec/cpu-interrupt.h"
typedef struct CPUArchState CPUMBState;
#if !defined(CONFIG_USER_ONLY)
@@ -231,8 +233,6 @@ typedef struct CPUArchState CPUMBState;
#define STREAM_CONTROL (1 << 3)
#define STREAM_NONBLOCK (1 << 4)
-#define TARGET_INSN_START_EXTRA_WORDS 1
-
/* use-non-secure property masks */
#define USE_NON_SECURE_M_AXI_DP_MASK 0x1
#define USE_NON_SECURE_M_AXI_IP_MASK 0x2
@@ -248,7 +248,7 @@ struct CPUArchState {
uint32_t pc;
uint32_t msr; /* All bits of MSR except MSR[C] and MSR[CC] */
uint32_t msr_c; /* MSR[C], in low bit; other bits must be 0 */
- target_ulong ear;
+ uint64_t ear;
uint32_t esr;
uint32_t fsr;
uint32_t btr;
@@ -398,6 +398,8 @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
}
void mb_tcg_init(void);
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
@@ -407,17 +409,14 @@ void mb_tcg_init(void);
#define MMU_USER_IDX 2
/* See NB_MMU_MODES in cpu-defs.h. */
-#include "exec/cpu-all.h"
-
/* Ensure there is no overlap between the two masks. */
QEMU_BUILD_BUG_ON(MSR_TB_MASK & IFLAGS_TB_MASK);
-static inline void cpu_get_tb_cpu_state(CPUMBState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
+static inline bool mb_cpu_is_big_endian(CPUState *cs)
{
- *pc = env->pc;
- *flags = (env->iflags & IFLAGS_TB_MASK) | (env->msr & MSR_TB_MASK);
- *cs_base = (*flags & IMM_FLAG ? env->imm : 0);
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+
+ return !cpu->cfg.endi;
}
#if !defined(CONFIG_USER_ONLY)
diff --git a/target/microblaze/gdbstub.c b/target/microblaze/gdbstub.c
index 09d74e1..d493681 100644
--- a/target/microblaze/gdbstub.c
+++ b/target/microblaze/gdbstub.c
@@ -110,14 +110,9 @@ int mb_cpu_gdb_read_stack_protect(CPUState *cs, GByteArray *mem_buf, int n)
int mb_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
- CPUClass *cc = CPU_GET_CLASS(cs);
CPUMBState *env = cpu_env(cs);
uint32_t tmp;
- if (n > cc->gdb_num_core_regs) {
- return 0;
- }
-
tmp = ldl_p(mem_buf);
switch (n) {
diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index 5d3259c..ef0e2f9 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -20,12 +20,57 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
+#include "accel/tcg/cpu-mmu-index.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
#include "qemu/host-utils.h"
#include "exec/log.h"
+#include "exec/helper-proto.h"
+
+
+G_NORETURN
+static void mb_unaligned_access_internal(CPUState *cs, uint64_t addr,
+ uintptr_t retaddr)
+{
+ CPUMBState *env = cpu_env(cs);
+ uint32_t esr, iflags;
+
+ /* Recover the pc and iflags from the corresponding insn_start. */
+ cpu_restore_state(cs, retaddr);
+ iflags = env->iflags;
+
+ qemu_log_mask(CPU_LOG_INT,
+ "Unaligned access addr=0x%" PRIx64 " pc=%x iflags=%x\n",
+ addr, env->pc, iflags);
+
+ esr = ESR_EC_UNALIGNED_DATA;
+ if (likely(iflags & ESR_ESS_FLAG)) {
+ esr |= iflags & ESR_ESS_MASK;
+ } else {
+ qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
+ }
+
+ env->ear = addr;
+ env->esr = esr;
+ cs->exception_index = EXCP_HW_EXCP;
+ cpu_loop_exit(cs);
+}
+
+void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ mb_unaligned_access_internal(cs, addr, retaddr);
+}
#ifndef CONFIG_USER_ONLY
+
+void HELPER(unaligned_access)(CPUMBState *env, uint64_t addr)
+{
+ mb_unaligned_access_internal(env_cpu(env), addr, GETPC());
+}
+
static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu,
MMUAccessType access_type)
{
@@ -267,31 +312,3 @@ bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
#endif /* !CONFIG_USER_ONLY */
-
-void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
- MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr)
-{
- MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
- uint32_t esr, iflags;
-
- /* Recover the pc and iflags from the corresponding insn_start. */
- cpu_restore_state(cs, retaddr);
- iflags = cpu->env.iflags;
-
- qemu_log_mask(CPU_LOG_INT,
- "Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n",
- (target_ulong)addr, cpu->env.pc, iflags);
-
- esr = ESR_EC_UNALIGNED_DATA;
- if (likely(iflags & ESR_ESS_FLAG)) {
- esr |= iflags & ESR_ESS_MASK;
- } else {
- qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
- }
-
- cpu->env.ear = addr;
- cpu->env.esr = esr;
- cs->exception_index = EXCP_HW_EXCP;
- cpu_loop_exit(cs);
-}
diff --git a/target/microblaze/helper.h b/target/microblaze/helper.h
index f740835..ef4fad9 100644
--- a/target/microblaze/helper.h
+++ b/target/microblaze/helper.h
@@ -20,12 +20,22 @@ DEF_HELPER_FLAGS_3(fcmp_ne, TCG_CALL_NO_WG, i32, env, i32, i32)
DEF_HELPER_FLAGS_3(fcmp_ge, TCG_CALL_NO_WG, i32, env, i32, i32)
DEF_HELPER_FLAGS_2(pcmpbf, TCG_CALL_NO_RWG_SE, i32, i32, i32)
-#if !defined(CONFIG_USER_ONLY)
-DEF_HELPER_FLAGS_3(mmu_read, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_4(mmu_write, TCG_CALL_NO_RWG, void, env, i32, i32, i32)
-#endif
-
DEF_HELPER_FLAGS_2(stackprot, TCG_CALL_NO_WG, void, env, tl)
-
DEF_HELPER_FLAGS_2(get, TCG_CALL_NO_RWG, i32, i32, i32)
DEF_HELPER_FLAGS_3(put, TCG_CALL_NO_RWG, void, i32, i32, i32)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_FLAGS_3(mmu_read, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_4(mmu_write, TCG_CALL_NO_RWG, void, env, i32, i32, i32)
+DEF_HELPER_FLAGS_2(unaligned_access, TCG_CALL_NO_WG, noreturn, env, i64)
+DEF_HELPER_FLAGS_2(lbuea, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_2(lhuea_be, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_2(lhuea_le, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_2(lwea_be, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_2(lwea_le, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_3(sbea, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_3(shea_be, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_3(shea_le, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_3(swea_be, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_3(swea_le, TCG_CALL_NO_WG, void, env, i32, i64)
+#endif
diff --git a/target/microblaze/machine.c b/target/microblaze/machine.c
index 51705e4..a4cf38d 100644
--- a/target/microblaze/machine.c
+++ b/target/microblaze/machine.c
@@ -93,7 +93,7 @@ static const VMStateDescription vmstate_env = {
};
static const VMStateField vmstate_cpu_fields[] = {
- VMSTATE_CPU(),
+ VMSTATE_STRUCT(parent_obj, MicroBlazeCPU, 0, vmstate_cpu_common, CPUState),
VMSTATE_STRUCT(env, MicroBlazeCPU, 1, vmstate_env, CPUMBState),
VMSTATE_END_OF_LIST()
};
diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
index 2423ac6..8703ff5 100644
--- a/target/microblaze/mmu.c
+++ b/target/microblaze/mmu.c
@@ -21,8 +21,10 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
+#include "accel/tcg/cpu-mmu-index.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
static unsigned int tlb_decode_size(unsigned int f)
{
@@ -170,7 +172,8 @@ unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
}
done:
qemu_log_mask(CPU_LOG_MMU,
- "MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
+ "MMU vaddr=0x" TARGET_FMT_lx
+ " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
vaddr, rw, tlb_wr, tlb_ex, hit);
return hit;
}
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
index f637803..b8365b3 100644
--- a/target/microblaze/op_helper.c
+++ b/target/microblaze/op_helper.c
@@ -23,8 +23,7 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "fpu/softfloat.h"
void helper_put(uint32_t id, uint32_t ctrl, uint32_t data)
@@ -383,6 +382,8 @@ void helper_stackprot(CPUMBState *env, target_ulong addr)
}
#if !defined(CONFIG_USER_ONLY)
+#include "system/memory.h"
+
/* Writes/reads to the MMU's special regs end up here. */
uint32_t helper_mmu_read(CPUMBState *env, uint32_t ext, uint32_t rn)
{
@@ -394,38 +395,90 @@ void helper_mmu_write(CPUMBState *env, uint32_t ext, uint32_t rn, uint32_t v)
mmu_write(env, ext, rn, v);
}
+static void mb_transaction_failed_internal(CPUState *cs, hwaddr physaddr,
+ uint64_t addr, unsigned size,
+ MMUAccessType access_type,
+ uintptr_t retaddr)
+{
+ CPUMBState *env = cpu_env(cs);
+ MicroBlazeCPU *cpu = env_archcpu(env);
+ const char *access_name = "INVALID";
+ bool take = env->msr & MSR_EE;
+ uint32_t esr = ESR_EC_DATA_BUS;
+
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ access_name = "INST_FETCH";
+ esr = ESR_EC_INSN_BUS;
+ take &= cpu->cfg.iopb_bus_exception;
+ break;
+ case MMU_DATA_LOAD:
+ access_name = "DATA_LOAD";
+ take &= cpu->cfg.dopb_bus_exception;
+ break;
+ case MMU_DATA_STORE:
+ access_name = "DATA_STORE";
+ take &= cpu->cfg.dopb_bus_exception;
+ break;
+ }
+
+ qemu_log_mask(CPU_LOG_INT, "Transaction failed: addr 0x%" PRIx64
+ "physaddr 0x" HWADDR_FMT_plx " size %d access-type %s (%s)\n",
+ addr, physaddr, size, access_name,
+ take ? "TAKEN" : "DROPPED");
+
+ if (take) {
+ env->esr = esr;
+ env->ear = addr;
+ cs->exception_index = EXCP_HW_EXCP;
+ cpu_loop_exit_restore(cs, retaddr);
+ }
+}
+
void mb_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
unsigned size, MMUAccessType access_type,
int mmu_idx, MemTxAttrs attrs,
MemTxResult response, uintptr_t retaddr)
{
- MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
- CPUMBState *env = &cpu->env;
+ mb_transaction_failed_internal(cs, physaddr, addr, size,
+ access_type, retaddr);
+}
- qemu_log_mask(CPU_LOG_INT, "Transaction failed: vaddr 0x%" VADDR_PRIx
- " physaddr 0x" HWADDR_FMT_plx " size %d access type %s\n",
- addr, physaddr, size,
- access_type == MMU_INST_FETCH ? "INST_FETCH" :
- (access_type == MMU_DATA_LOAD ? "DATA_LOAD" : "DATA_STORE"));
+#define LD_EA(NAME, TYPE, FUNC) \
+uint32_t HELPER(NAME)(CPUMBState *env, uint64_t ea) \
+{ \
+ CPUState *cs = env_cpu(env); \
+ MemTxResult txres; \
+ TYPE ret = FUNC(cs->as, ea, MEMTXATTRS_UNSPECIFIED, &txres); \
+ if (unlikely(txres != MEMTX_OK)) { \
+ mb_transaction_failed_internal(cs, ea, ea, sizeof(TYPE), \
+ MMU_DATA_LOAD, GETPC()); \
+ } \
+ return ret; \
+}
- if (!(env->msr & MSR_EE)) {
- return;
- }
+LD_EA(lbuea, uint8_t, address_space_ldub)
+LD_EA(lhuea_be, uint16_t, address_space_lduw_be)
+LD_EA(lhuea_le, uint16_t, address_space_lduw_le)
+LD_EA(lwea_be, uint32_t, address_space_ldl_be)
+LD_EA(lwea_le, uint32_t, address_space_ldl_le)
+
+#define ST_EA(NAME, TYPE, FUNC) \
+void HELPER(NAME)(CPUMBState *env, uint32_t data, uint64_t ea) \
+{ \
+ CPUState *cs = env_cpu(env); \
+ MemTxResult txres; \
+ FUNC(cs->as, ea, data, MEMTXATTRS_UNSPECIFIED, &txres); \
+ if (unlikely(txres != MEMTX_OK)) { \
+ mb_transaction_failed_internal(cs, ea, ea, sizeof(TYPE), \
+ MMU_DATA_STORE, GETPC()); \
+ } \
+}
- if (access_type == MMU_INST_FETCH) {
- if (!cpu->cfg.iopb_bus_exception) {
- return;
- }
- env->esr = ESR_EC_INSN_BUS;
- } else {
- if (!cpu->cfg.dopb_bus_exception) {
- return;
- }
- env->esr = ESR_EC_DATA_BUS;
- }
+ST_EA(sbea, uint8_t, address_space_stb)
+ST_EA(shea_be, uint16_t, address_space_stw_be)
+ST_EA(shea_le, uint16_t, address_space_stw_le)
+ST_EA(swea_be, uint32_t, address_space_stl_be)
+ST_EA(swea_le, uint32_t, address_space_stl_le)
- env->ear = addr;
- cs->exception_index = EXCP_HW_EXCP;
- cpu_loop_exit_restore(cs, retaddr);
-}
#endif
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 4beaf69..5098a1d 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -20,12 +20,13 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
#include "exec/translator.h"
+#include "exec/translation-block.h"
+#include "exec/target_page.h"
#include "qemu/qemu-print.h"
#include "exec/log.h"
@@ -62,9 +63,6 @@ typedef struct DisasContext {
DisasContextBase base;
const MicroBlazeCPUConfig *cfg;
- TCGv_i32 r0;
- bool r0_set;
-
/* Decoder. */
uint32_t ext_imm;
unsigned int tb_flags;
@@ -178,14 +176,7 @@ static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
if (likely(reg != 0)) {
return cpu_R[reg];
}
- if (!dc->r0_set) {
- if (dc->r0 == NULL) {
- dc->r0 = tcg_temp_new_i32();
- }
- tcg_gen_movi_i32(dc->r0, 0);
- dc->r0_set = true;
- }
- return dc->r0;
+ return tcg_constant_i32(0);
}
static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
@@ -193,10 +184,7 @@ static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
if (likely(reg != 0)) {
return cpu_R[reg];
}
- if (dc->r0 == NULL) {
- dc->r0 = tcg_temp_new_i32();
- }
- return dc->r0;
+ return tcg_temp_new_i32();
}
static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
@@ -309,11 +297,7 @@ static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
/* Input and output carry. */
static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
{
- TCGv_i32 zero = tcg_constant_i32(0);
- TCGv_i32 tmp = tcg_temp_new_i32();
-
- tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
- tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
+ tcg_gen_addcio_i32(out, cpu_msr_c, ina, inb, cpu_msr_c);
}
/* Input carry, but no output carry. */
@@ -542,12 +526,10 @@ static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
/* Input and output carry. */
static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
{
- TCGv_i32 zero = tcg_constant_i32(0);
TCGv_i32 tmp = tcg_temp_new_i32();
tcg_gen_not_i32(tmp, ina);
- tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
- tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
+ tcg_gen_addcio_i32(out, cpu_msr_c, tmp, inb, cpu_msr_c);
}
/* No input or output carry. */
@@ -624,19 +606,18 @@ DO_TYPEBI(xori, false, tcg_gen_xori_i32)
static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
{
- TCGv ret = tcg_temp_new();
+ TCGv ret;
/* If any of the regs is r0, set t to the value of the other reg. */
if (ra && rb) {
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
- tcg_gen_extu_i32_tl(ret, tmp);
+ ret = tcg_temp_new_i32();
+ tcg_gen_add_i32(ret, cpu_R[ra], cpu_R[rb]);
} else if (ra) {
- tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
+ ret = cpu_R[ra];
} else if (rb) {
- tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
+ ret = cpu_R[rb];
} else {
- tcg_gen_movi_tl(ret, 0);
+ ret = tcg_constant_i32(0);
}
if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
@@ -647,15 +628,16 @@ static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
{
- TCGv ret = tcg_temp_new();
+ TCGv ret;
/* If any of the regs is r0, set t to the value of the other reg. */
- if (ra) {
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
- tcg_gen_extu_i32_tl(ret, tmp);
+ if (ra && imm) {
+ ret = tcg_temp_new_i32();
+ tcg_gen_addi_i32(ret, cpu_R[ra], imm);
+ } else if (ra) {
+ ret = cpu_R[ra];
} else {
- tcg_gen_movi_tl(ret, (uint32_t)imm);
+ ret = tcg_constant_i32(imm);
}
if (ra == 1 && dc->cfg->stackprot) {
@@ -665,23 +647,23 @@ static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
}
#ifndef CONFIG_USER_ONLY
-static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
+static TCGv_i64 compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
{
int addr_size = dc->cfg->addr_size;
- TCGv ret = tcg_temp_new();
+ TCGv_i64 ret = tcg_temp_new_i64();
if (addr_size == 32 || ra == 0) {
if (rb) {
- tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
+ tcg_gen_extu_i32_i64(ret, cpu_R[rb]);
} else {
- tcg_gen_movi_tl(ret, 0);
+ return tcg_constant_i64(0);
}
} else {
if (rb) {
tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
} else {
- tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
- tcg_gen_shli_tl(ret, ret, 32);
+ tcg_gen_extu_i32_i64(ret, cpu_R[ra]);
+ tcg_gen_shli_i64(ret, ret, 32);
}
if (addr_size < 64) {
/* Mask off out of range bits. */
@@ -705,13 +687,34 @@ static void record_unaligned_ess(DisasContext *dc, int rd,
tcg_set_insn_start_param(dc->base.insn_start, 1, iflags);
}
+
+static void gen_alignment_check_ea(DisasContext *dc, TCGv_i64 ea, int rb,
+ int rd, MemOp size, bool store)
+{
+ if (rb && (dc->tb_flags & MSR_EE) && dc->cfg->unaligned_exceptions) {
+ TCGLabel *over = gen_new_label();
+
+ record_unaligned_ess(dc, rd, size, store);
+
+ tcg_gen_brcondi_i64(TCG_COND_TSTEQ, ea, (1 << size) - 1, over);
+ gen_helper_unaligned_access(tcg_env, ea);
+ gen_set_label(over);
+ }
+}
#endif
+static inline MemOp mo_endian(DisasContext *dc)
+{
+ return dc->cfg->endi ? MO_LE : MO_BE;
+}
+
static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
int mem_index, bool rev)
{
MemOp size = mop & MO_SIZE;
+ mop |= mo_endian(dc);
+
/*
* When doing reverse accesses we need to do two things.
*
@@ -763,10 +766,11 @@ static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_helper_lbuea(reg_for_write(dc, arg->rd), tcg_env, addr);
+ return true;
#endif
}
@@ -779,13 +783,13 @@ static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
static bool trans_lhu(DisasContext *dc, arg_typea *arg)
{
TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
- return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
+ return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
}
static bool trans_lhur(DisasContext *dc, arg_typea *arg)
{
TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
- return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
+ return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
}
static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
@@ -794,29 +798,32 @@ static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, false);
+ (mo_endian(dc) == MO_BE ? gen_helper_lhuea_be : gen_helper_lhuea_le)
+ (reg_for_write(dc, arg->rd), tcg_env, addr);
+ return true;
#endif
}
static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
{
TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
- return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
+ return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
}
static bool trans_lw(DisasContext *dc, arg_typea *arg)
{
TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
- return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
+ return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
}
static bool trans_lwr(DisasContext *dc, arg_typea *arg)
{
TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
- return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
+ return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
}
static bool trans_lwea(DisasContext *dc, arg_typea *arg)
@@ -825,17 +832,20 @@ static bool trans_lwea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, false);
+ (mo_endian(dc) == MO_BE ? gen_helper_lwea_be : gen_helper_lwea_le)
+ (reg_for_write(dc, arg->rd), tcg_env, addr);
+ return true;
#endif
}
static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
{
TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
- return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
+ return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
}
static bool trans_lwx(DisasContext *dc, arg_typea *arg)
@@ -845,7 +855,8 @@ static bool trans_lwx(DisasContext *dc, arg_typea *arg)
/* lwx does not throw unaligned access errors, so force alignment */
tcg_gen_andi_tl(addr, addr, ~3);
- tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
+ tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index,
+ mo_endian(dc) | MO_UL);
tcg_gen_mov_tl(cpu_res_addr, addr);
if (arg->rd) {
@@ -862,6 +873,8 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
{
MemOp size = mop & MO_SIZE;
+ mop |= mo_endian(dc);
+
/*
* When doing reverse accesses we need to do two things.
*
@@ -913,10 +926,11 @@ static bool trans_sbea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_helper_sbea(tcg_env, reg_for_read(dc, arg->rd), addr);
+ return true;
#endif
}
@@ -929,13 +943,13 @@ static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
static bool trans_sh(DisasContext *dc, arg_typea *arg)
{
TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
- return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
+ return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
}
static bool trans_shr(DisasContext *dc, arg_typea *arg)
{
TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
- return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
+ return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
}
static bool trans_shea(DisasContext *dc, arg_typea *arg)
@@ -944,29 +958,32 @@ static bool trans_shea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, true);
+ (mo_endian(dc) == MO_BE ? gen_helper_shea_be : gen_helper_shea_le)
+ (tcg_env, reg_for_read(dc, arg->rd), addr);
+ return true;
#endif
}
static bool trans_shi(DisasContext *dc, arg_typeb *arg)
{
TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
- return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
+ return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
}
static bool trans_sw(DisasContext *dc, arg_typea *arg)
{
TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
- return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
+ return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
}
static bool trans_swr(DisasContext *dc, arg_typea *arg)
{
TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
- return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
+ return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
}
static bool trans_swea(DisasContext *dc, arg_typea *arg)
@@ -975,17 +992,20 @@ static bool trans_swea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, true);
+ (mo_endian(dc) == MO_BE ? gen_helper_swea_be : gen_helper_swea_le)
+ (tcg_env, reg_for_read(dc, arg->rd), addr);
+ return true;
#endif
}
static bool trans_swi(DisasContext *dc, arg_typeb *arg)
{
TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
- return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
+ return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
}
static bool trans_swx(DisasContext *dc, arg_typea *arg)
@@ -1014,7 +1034,7 @@ static bool trans_swx(DisasContext *dc, arg_typea *arg)
tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
reg_for_write(dc, arg->rd),
- dc->mem_index, MO_TEUL);
+ dc->mem_index, mo_endian(dc) | MO_UL);
tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
@@ -1602,8 +1622,6 @@ static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
dc->cfg = &cpu->cfg;
dc->tb_flags = dc->base.tb->flags;
dc->ext_imm = dc->base.tb->cs_base;
- dc->r0 = NULL;
- dc->r0_set = false;
dc->mem_index = cpu_mmu_index(cs, false);
dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
dc->jmp_dest = -1;
@@ -1636,16 +1654,12 @@ static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
dc->tb_flags_to_set = 0;
- ir = translator_ldl(cpu_env(cs), &dc->base, dc->base.pc_next);
+ ir = translator_ldl_swap(cpu_env(cs), &dc->base, dc->base.pc_next,
+ mb_cpu_is_big_endian(cs) != TARGET_BIG_ENDIAN);
if (!decode(dc, ir)) {
trap_illegal(dc, true);
}
- if (dc->r0) {
- dc->r0 = NULL;
- dc->r0_set = false;
- }
-
/* Discard the imm global when its contents cannot be used. */
if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
tcg_gen_discard_i32(cpu_imm);
@@ -1778,8 +1792,8 @@ static const TranslatorOps mb_tr_ops = {
.tb_stop = mb_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext dc;
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
@@ -1823,7 +1837,7 @@ void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
- "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
+ "ear=0x%" PRIx64 " slr=0x%x shr=0x%x\n",
env->esr, env->fsr, env->btr, env->edr,
env->ear, env->slr, env->shr);
diff --git a/target/mips/Kconfig b/target/mips/Kconfig
index eb19c94..876048b 100644
--- a/target/mips/Kconfig
+++ b/target/mips/Kconfig
@@ -1,6 +1,6 @@
config MIPS
bool
- select SEMIHOSTING
+ imply SEMIHOSTING if TCG
config MIPS64
bool
diff --git a/target/mips/cpu-defs.c.inc b/target/mips/cpu-defs.c.inc
index fbf787d..922fc39 100644
--- a/target/mips/cpu-defs.c.inc
+++ b/target/mips/cpu-defs.c.inc
@@ -314,7 +314,7 @@ const mips_def_t mips_defs[] =
(0x3fe << CP0SRSC4_SRS14) | (0x3fe << CP0SRSC4_SRS13),
.SEGBITS = 32,
.PABITS = 32,
- .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_MT,
+ .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP,
.mmu_type = MMU_TYPE_R4000,
},
{
@@ -478,14 +478,15 @@ const mips_def_t mips_defs[] =
(2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) |
(0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
.CP0_Config2 = MIPS_CONFIG2,
- .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_BP) | (1 << CP0C3_BI) |
+ .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_MSAP) |
+ (1 << CP0C3_BP) | (1 << CP0C3_BI) |
(2 << CP0C3_ISA) | (1 << CP0C3_ULRI) |
(1 << CP0C3_RXI) | (1U << CP0C3_M),
.CP0_Config4 = MIPS_CONFIG4 | (0xfc << CP0C4_KScrExist) |
(3 << CP0C4_IE) | (1U << CP0C4_M),
.CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_LLB),
- .CP0_Config5_rw_bitmask = (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) |
- (1 << CP0C5_UFE),
+ .CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_UFE) |
+ (1 << CP0C5_FRE) | (1 << CP0C5_SBRI),
.CP0_LLAddr_rw_bitmask = 0,
.CP0_LLAddr_shift = 0,
.SYNCI_Step = 32,
@@ -499,6 +500,7 @@ const mips_def_t mips_defs[] =
(1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV),
.CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008),
.CP1_fcr31_rw_bitmask = 0x0103FFFF,
+ .MSAIR = 0x03 << MSAIR_ProcID,
.SEGBITS = 32,
.PABITS = 32,
.insn_flags = CPU_MIPS32R6 | ASE_MICROMIPS,
@@ -541,7 +543,7 @@ const mips_def_t mips_defs[] =
.SEGBITS = 32,
.PABITS = 32,
.insn_flags = CPU_MIPS32R6 | ISA_NANOMIPS32 |
- ASE_DSP | ASE_DSP_R2 | ASE_DSP_R3 | ASE_MT,
+ ASE_DSP | ASE_DSP_R2 | ASE_DSP_R3,
.mmu_type = MMU_TYPE_R4000,
},
#if defined(TARGET_MIPS64)
@@ -661,7 +663,7 @@ const mips_def_t mips_defs[] =
.CP1_fcr31_rw_bitmask = 0xFF83FFFF,
.SEGBITS = 40,
.PABITS = 36,
- .insn_flags = CPU_MIPS64R1 | ASE_MIPS3D,
+ .insn_flags = CPU_MIPS64R1,
.mmu_type = MMU_TYPE_R4000,
},
{
@@ -690,7 +692,7 @@ const mips_def_t mips_defs[] =
.CP1_fcr31_rw_bitmask = 0xFF83FFFF,
.SEGBITS = 42,
.PABITS = 36,
- .insn_flags = CPU_MIPS64R2 | ASE_MIPS3D,
+ .insn_flags = CPU_MIPS64R2,
.mmu_type = MMU_TYPE_R4000,
},
{
diff --git a/target/mips/cpu-param.h b/target/mips/cpu-param.h
index 6f6ac16..58f4508 100644
--- a/target/mips/cpu-param.h
+++ b/target/mips/cpu-param.h
@@ -1,17 +1,12 @@
/*
* MIPS cpu parameters for qemu.
*
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#ifndef MIPS_CPU_PARAM_H
#define MIPS_CPU_PARAM_H
-#ifdef TARGET_MIPS64
-# define TARGET_LONG_BITS 64
-#else
-# define TARGET_LONG_BITS 32
-#endif
#ifdef TARGET_ABI_MIPSN64
#define TARGET_PHYS_ADDR_SPACE_BITS 48
#define TARGET_VIRT_ADDR_SPACE_BITS 48
@@ -23,13 +18,8 @@
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
#endif
-#ifdef CONFIG_USER_ONLY
#define TARGET_PAGE_BITS 12
-#else
-#define TARGET_PAGE_BITS_VARY
-#define TARGET_PAGE_BITS_MIN 12
-#endif
-#define TCG_GUEST_DEFAULT_MO (0)
+#define TARGET_INSN_START_EXTRA_WORDS 2
#endif
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
index 89655b1..1f6c41f 100644
--- a/target/mips/cpu.c
+++ b/target/mips/cpu.c
@@ -27,13 +27,14 @@
#include "internal.h"
#include "kvm_mips.h"
#include "qemu/module.h"
-#include "sysemu/kvm.h"
-#include "sysemu/qtest.h"
-#include "exec/exec-all.h"
+#include "system/kvm.h"
+#include "system/qtest.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-clock.h"
-#include "semihosting/semihost.h"
#include "fpu_helper.h"
+#ifndef CONFIG_USER_ONLY
+#include "semihosting/semihost.h"
+#endif
const char regnames[32][3] = {
"r0", "at", "v0", "v1", "a0", "a1", "a2", "a3",
@@ -132,6 +133,7 @@ static vaddr mips_cpu_get_pc(CPUState *cs)
return cpu->env.active_tc.PC;
}
+#if !defined(CONFIG_USER_ONLY)
static bool mips_cpu_has_work(CPUState *cs)
{
CPUMIPSState *env = cpu_env(cs);
@@ -177,11 +179,7 @@ static bool mips_cpu_has_work(CPUState *cs)
}
return has_work;
}
-
-static int mips_cpu_mmu_index(CPUState *cs, bool ifunc)
-{
- return mips_env_mmu_index(cpu_env(cs));
-}
+#endif /* !CONFIG_USER_ONLY */
#include "cpu-defs.c.inc"
@@ -200,10 +198,8 @@ static void mips_cpu_reset_hold(Object *obj, ResetType type)
/* Reset registers to their default values */
env->CP0_PRid = env->cpu_model->CP0_PRid;
- env->CP0_Config0 = env->cpu_model->CP0_Config0;
-#if TARGET_BIG_ENDIAN
- env->CP0_Config0 |= (1 << CP0C0_BE);
-#endif
+ env->CP0_Config0 = deposit32(env->cpu_model->CP0_Config0,
+ CP0C0_BE, 1, cpu->is_big_endian);
env->CP0_Config1 = env->cpu_model->CP0_Config1;
env->CP0_Config2 = env->cpu_model->CP0_Config2;
env->CP0_Config3 = env->cpu_model->CP0_Config3;
@@ -409,18 +405,17 @@ static void mips_cpu_reset_hold(Object *obj, ResetType type)
}
msa_reset(env);
+ fp_reset(env);
compute_hflags(env);
- restore_fp_status(env);
restore_pamask(env);
cs->exception_index = EXCP_NONE;
+#ifndef CONFIG_USER_ONLY
if (semihosting_get_argc()) {
/* UHI interface can be used to obtain argc and argv */
env->active_tc.gpr[4] = -1;
}
-
-#ifndef CONFIG_USER_ONLY
if (kvm_enabled()) {
kvm_mips_reset_vcpu(cpu);
}
@@ -430,13 +425,13 @@ static void mips_cpu_reset_hold(Object *obj, ResetType type)
static void mips_cpu_disas_set_info(CPUState *s, disassemble_info *info)
{
if (!(cpu_env(s)->insn_flags & ISA_NANOMIPS32)) {
-#if TARGET_BIG_ENDIAN
- info->print_insn = print_insn_big_mips;
-#else
- info->print_insn = print_insn_little_mips;
-#endif
+ info->endian = TARGET_BIG_ENDIAN ? BFD_ENDIAN_BIG
+ : BFD_ENDIAN_LITTLE;
+ info->print_insn = TARGET_BIG_ENDIAN ? print_insn_big_mips
+ : print_insn_little_mips;
} else {
info->print_insn = print_insn_nanomips;
+ info->endian = BFD_ENDIAN_LITTLE;
}
}
@@ -536,26 +531,60 @@ static ObjectClass *mips_cpu_class_by_name(const char *cpu_model)
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps mips_sysemu_ops = {
+ .has_work = mips_cpu_has_work,
.get_phys_page_debug = mips_cpu_get_phys_page_debug,
.legacy_vmsd = &vmstate_mips_cpu,
};
#endif
+static const Property mips_cpu_properties[] = {
+ DEFINE_PROP_BOOL("big-endian", MIPSCPU, is_big_endian, TARGET_BIG_ENDIAN),
+};
+
#ifdef CONFIG_TCG
-#include "hw/core/tcg-cpu-ops.h"
-/*
- * NB: cannot be const, as some elements are changed for specific
- * mips hardware (see hw/mips/jazz.c).
- */
+#include "accel/tcg/cpu-ops.h"
+
+static int mips_cpu_mmu_index(CPUState *cs, bool ifunc)
+{
+ return mips_env_mmu_index(cpu_env(cs));
+}
+
+static TCGTBCPUState mips_get_tb_cpu_state(CPUState *cs)
+{
+ CPUMIPSState *env = cpu_env(cs);
+
+ return (TCGTBCPUState){
+ .pc = env->active_tc.PC,
+ .flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK |
+ MIPS_HFLAG_HWRENA_ULR),
+ };
+}
+
+#ifndef CONFIG_USER_ONLY
+static vaddr mips_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return cpu_env(cs)->hflags & MIPS_HFLAG_AWRAP ? (int32_t)result : result;
+}
+#endif
+
static const TCGCPUOps mips_tcg_ops = {
+ .mttcg_supported = TARGET_LONG_BITS == 32,
+ .guest_default_memory_order = 0,
+
.initialize = mips_tcg_init,
+ .translate_code = mips_translate_code,
+ .get_tb_cpu_state = mips_get_tb_cpu_state,
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
.restore_state_to_opc = mips_restore_state_to_opc,
+ .mmu_index = mips_cpu_mmu_index,
#if !defined(CONFIG_USER_ONLY)
.tlb_fill = mips_cpu_tlb_fill,
+ .pointer_wrap = mips_pointer_wrap,
.cpu_exec_interrupt = mips_cpu_exec_interrupt,
.cpu_exec_halt = mips_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = mips_cpu_do_interrupt,
.do_transaction_failed = mips_cpu_do_transaction_failed,
.do_unaligned_access = mips_cpu_do_unaligned_access,
@@ -564,21 +593,20 @@ static const TCGCPUOps mips_tcg_ops = {
};
#endif /* CONFIG_TCG */
-static void mips_cpu_class_init(ObjectClass *c, void *data)
+static void mips_cpu_class_init(ObjectClass *c, const void *data)
{
MIPSCPUClass *mcc = MIPS_CPU_CLASS(c);
CPUClass *cc = CPU_CLASS(c);
DeviceClass *dc = DEVICE_CLASS(c);
ResettableClass *rc = RESETTABLE_CLASS(c);
+ device_class_set_props(dc, mips_cpu_properties);
device_class_set_parent_realize(dc, mips_cpu_realizefn,
&mcc->parent_realize);
resettable_class_set_parent_phases(rc, NULL, mips_cpu_reset_hold, NULL,
&mcc->parent_phases);
cc->class_by_name = mips_cpu_class_by_name;
- cc->has_work = mips_cpu_has_work;
- cc->mmu_index = mips_cpu_mmu_index;
cc->dump_state = mips_cpu_dump_state;
cc->set_pc = mips_cpu_set_pc;
cc->get_pc = mips_cpu_get_pc;
@@ -606,7 +634,7 @@ static const TypeInfo mips_cpu_type_info = {
.class_init = mips_cpu_class_init,
};
-static void mips_cpu_cpudef_class_init(ObjectClass *oc, void *data)
+static void mips_cpu_cpudef_class_init(ObjectClass *oc, const void *data)
{
MIPSCPUClass *mcc = MIPS_CPU_CLASS(oc);
mcc->cpu_def = data;
@@ -619,10 +647,10 @@ static void mips_register_cpudef_type(const struct mips_def_t *def)
.name = typename,
.parent = TYPE_MIPS_CPU,
.class_init = mips_cpu_cpudef_class_init,
- .class_data = (void *)def,
+ .class_data = def,
};
- type_register(&ti);
+ type_register_static(&ti);
g_free(typename);
}
@@ -639,12 +667,15 @@ static void mips_cpu_register_types(void)
type_init(mips_cpu_register_types)
/* Could be used by generic CPU object */
-MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk)
+MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk,
+ bool is_big_endian)
{
DeviceState *cpu;
- cpu = DEVICE(object_new(cpu_type));
+ cpu = qdev_new(cpu_type);
qdev_connect_clock_in(cpu, "clk-in", cpu_refclk);
+ object_property_set_bool(OBJECT(cpu), "big-endian", is_big_endian,
+ &error_abort);
qdev_realize(cpu, NULL, &error_abort);
return MIPS_CPU(cpu);
diff --git a/target/mips/cpu.h b/target/mips/cpu.h
index 3e906a1..5cd4c6c 100644
--- a/target/mips/cpu.h
+++ b/target/mips/cpu.h
@@ -2,9 +2,11 @@
#define MIPS_CPU_H
#include "cpu-qom.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#ifndef CONFIG_USER_ONLY
-#include "exec/memory.h"
+#include "system/memory.h"
#endif
#include "fpu/softfloat-types.h"
#include "hw/clock.h"
@@ -98,8 +100,6 @@ struct CPUMIPSFPUContext {
#define FP_UNIMPLEMENTED 32
};
-#define TARGET_INSN_START_EXTRA_WORDS 2
-
typedef struct CPUMIPSMVPContext CPUMIPSMVPContext;
struct CPUMIPSMVPContext {
int32_t CP0_MVPControl;
@@ -530,7 +530,6 @@ typedef struct CPUArchState {
CPUMIPSFPUContext active_fpu;
uint32_t current_tc;
- uint32_t current_fpu;
uint32_t SEGBITS;
uint32_t PABITS;
@@ -1209,6 +1208,9 @@ struct ArchCPU {
Clock *clock;
Clock *count_div; /* Divider for CP0_Count clock */
+
+ /* Properties */
+ bool is_big_endian;
};
/**
@@ -1254,8 +1256,6 @@ static inline int mips_env_mmu_index(CPUMIPSState *env)
return hflags_mmu_index(env->hflags);
}
-#include "exec/cpu-all.h"
-
/* Exceptions */
enum {
EXCP_NONE = -1,
@@ -1316,6 +1316,12 @@ bool cpu_type_supports_cps_smp(const char *cpu_type);
bool cpu_supports_isa(const CPUMIPSState *env, uint64_t isa_mask);
bool cpu_type_supports_isa(const char *cpu_type, uint64_t isa);
+/* Check presence of MIPS-3D ASE */
+static inline bool ase_3d_available(const CPUMIPSState *env)
+{
+ return env->active_fpu.fcr0 & (1 << FCR0_3D);
+}
+
/* Check presence of MSA implementation */
static inline bool ase_msa_available(CPUMIPSState *env)
{
@@ -1360,25 +1366,18 @@ void cpu_mips_clock_init(MIPSCPU *cpu);
/* helper.c */
target_ulong exception_resume_pc(CPUMIPSState *env);
-static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->active_tc.PC;
- *cs_base = 0;
- *flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK |
- MIPS_HFLAG_HWRENA_ULR);
-}
-
/**
* mips_cpu_create_with_clock:
* @typename: a MIPS CPU type.
* @cpu_refclk: this cpu input clock (an output clock of another device)
+ * @is_big_endian: whether this CPU is configured in big endianness
*
* Instantiates a MIPS CPU, set the input clock of the CPU to @cpu_refclk,
* then realizes the CPU.
*
* Returns: A #CPUState or %NULL if an error occurred.
*/
-MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk);
+MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk,
+ bool is_big_endian);
#endif /* MIPS_CPU_H */
diff --git a/target/mips/fpu_helper.h b/target/mips/fpu_helper.h
index ad1116e..08fb409 100644
--- a/target/mips/fpu_helper.h
+++ b/target/mips/fpu_helper.h
@@ -28,6 +28,8 @@ static inline void restore_flush_mode(CPUMIPSState *env)
static inline void restore_snan_bit_mode(CPUMIPSState *env)
{
bool nan2008 = env->active_fpu.fcr31 & (1 << FCR31_NAN2008);
+ FloatInfZeroNaNRule izn_rule;
+ Float3NaNPropRule nan3_rule;
/*
* With nan2008, SNaNs are silenced in the usual way.
@@ -35,6 +37,24 @@ static inline void restore_snan_bit_mode(CPUMIPSState *env)
*/
set_snan_bit_is_one(!nan2008, &env->active_fpu.fp_status);
set_default_nan_mode(!nan2008, &env->active_fpu.fp_status);
+ /*
+ * For MIPS systems that conform to IEEE754-1985, the (inf,zero,nan)
+ * case sets InvalidOp and returns the default NaN.
+ * For MIPS systems that conform to IEEE754-2008, the (inf,zero,nan)
+ * case sets InvalidOp and returns the input value 'c'.
+ */
+ izn_rule = nan2008 ? float_infzeronan_dnan_never : float_infzeronan_dnan_always;
+ set_float_infzeronan_rule(izn_rule, &env->active_fpu.fp_status);
+ nan3_rule = nan2008 ? float_3nan_prop_s_cab : float_3nan_prop_s_abc;
+ set_float_3nan_prop_rule(nan3_rule, &env->active_fpu.fp_status);
+ /*
+ * With nan2008, the default NaN value has the sign bit clear and the
+ * frac msb set; with the older mode, the sign bit is clear, and all
+ * frac bits except the msb are set.
+ */
+ set_float_default_nan_pattern(nan2008 ? 0b01000000 : 0b00111111,
+ &env->active_fpu.fp_status);
+
}
static inline void restore_fp_status(CPUMIPSState *env)
@@ -44,6 +64,34 @@ static inline void restore_fp_status(CPUMIPSState *env)
restore_snan_bit_mode(env);
}
+static inline void fp_reset(CPUMIPSState *env)
+{
+ restore_fp_status(env);
+
+ /*
+ * According to MIPS specifications, if one of the two operands is
+ * a sNaN, a new qNaN has to be generated. This is done in
+ * floatXX_silence_nan(). For qNaN inputs the specifications
+ * says: "When possible, this QNaN result is one of the operand QNaN
+ * values." In practice it seems that most implementations choose
+ * the first operand if both operands are qNaN. In short this gives
+ * the following rules:
+ * 1. A if it is signaling
+ * 2. B if it is signaling
+ * 3. A (quiet)
+ * 4. B (quiet)
+ * A signaling NaN is always silenced before returning it.
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_s_ab,
+ &env->active_fpu.fp_status);
+ /*
+ * TODO: the spec does't say clearly whether FTZ happens before
+ * or after rounding for normal FPU operations.
+ */
+ set_float_ftz_detection(float_ftz_before_rounding,
+ &env->active_fpu.fp_status);
+}
+
/* MSA */
enum CPUMIPSMSADataFormat {
diff --git a/target/mips/helper.h b/target/mips/helper.h
index 0f8462f..7e40041 100644
--- a/target/mips/helper.h
+++ b/target/mips/helper.h
@@ -594,7 +594,7 @@ DEF_HELPER_FLAGS_3(wrdsp, 0, void, tl, tl, env)
DEF_HELPER_FLAGS_2(rddsp, 0, tl, tl, env)
#ifndef CONFIG_USER_ONLY
-#include "tcg/sysemu_helper.h.inc"
+#include "tcg/system_helper.h.inc"
#endif /* !CONFIG_USER_ONLY */
#include "tcg/msa_helper.h.inc"
diff --git a/target/mips/internal.h b/target/mips/internal.h
index a9a22ea..28eb289 100644
--- a/target/mips/internal.h
+++ b/target/mips/internal.h
@@ -162,8 +162,6 @@ void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val);
extern const VMStateDescription vmstate_mips_cpu;
-#endif /* !CONFIG_USER_ONLY */
-
static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env)
{
return (env->CP0_Status & (1 << CP0St_IE)) &&
@@ -206,6 +204,8 @@ static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env)
return r;
}
+#endif /* !CONFIG_USER_ONLY */
+
void msa_reset(CPUMIPSState *env);
/* cp0_timer.c */
@@ -225,6 +225,16 @@ static inline void mips_env_set_pc(CPUMIPSState *env, target_ulong value)
}
}
+static inline bool mips_env_is_bigendian(CPUMIPSState *env)
+{
+ return extract32(env->CP0_Config0, CP0C0_BE, 1);
+}
+
+static inline MemOp mo_endian_env(CPUMIPSState *env)
+{
+ return mips_env_is_bigendian(env) ? MO_BE : MO_LE;
+}
+
static inline void restore_pamask(CPUMIPSState *env)
{
if (env->hflags & MIPS_HFLAG_ELPA) {
diff --git a/target/mips/kvm.c b/target/mips/kvm.c
index a631ab5..ec53acb 100644
--- a/target/mips/kvm.c
+++ b/target/mips/kvm.c
@@ -18,9 +18,9 @@
#include "internal.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
-#include "sysemu/kvm.h"
-#include "sysemu/kvm_int.h"
-#include "sysemu/runstate.h"
+#include "system/kvm.h"
+#include "system/kvm_int.h"
+#include "system/runstate.h"
#include "kvm_mips.h"
#include "hw/boards.h"
#include "fpu_helper.h"
@@ -61,6 +61,11 @@ int kvm_arch_irqchip_create(KVMState *s)
return 0;
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
CPUMIPSState *env = cpu_env(cs);
@@ -1172,7 +1177,7 @@ static int kvm_mips_get_cp0_registers(CPUState *cs)
return ret;
}
-int kvm_arch_put_registers(CPUState *cs, int level)
+int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
{
CPUMIPSState *env = cpu_env(cs);
struct kvm_regs regs;
@@ -1207,7 +1212,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
-int kvm_arch_get_registers(CPUState *cs)
+int kvm_arch_get_registers(CPUState *cs, Error **errp)
{
CPUMIPSState *env = cpu_env(cs);
int ret = 0;
diff --git a/target/mips/meson.build b/target/mips/meson.build
index a26d1e1..247979a 100644
--- a/target/mips/meson.build
+++ b/target/mips/meson.build
@@ -9,7 +9,7 @@ mips_ss.add(files(
))
if have_system
- subdir('sysemu')
+ subdir('system')
endif
if 'CONFIG_TCG' in config_all_accel
diff --git a/target/mips/mips-defs.h b/target/mips/mips-defs.h
index a6cebe0..9d4d292 100644
--- a/target/mips/mips-defs.h
+++ b/target/mips/mips-defs.h
@@ -26,12 +26,10 @@
* bits 24-39: MIPS ASEs
*/
#define ASE_MIPS16 0x0000000001000000ULL
-#define ASE_MIPS3D 0x0000000002000000ULL
#define ASE_MDMX 0x0000000004000000ULL
#define ASE_DSP 0x0000000008000000ULL
#define ASE_DSP_R2 0x0000000010000000ULL
#define ASE_DSP_R3 0x0000000020000000ULL
-#define ASE_MT 0x0000000040000000ULL
#define ASE_SMARTMIPS 0x0000000080000000ULL
#define ASE_MICROMIPS 0x0000000100000000ULL
/*
diff --git a/target/mips/msa.c b/target/mips/msa.c
index 61f1a9a..32c6acb 100644
--- a/target/mips/msa.c
+++ b/target/mips/msa.c
@@ -48,6 +48,35 @@ void msa_reset(CPUMIPSState *env)
/* tininess detected after rounding.*/
set_float_detect_tininess(float_tininess_after_rounding,
&env->active_tc.msa_fp_status);
+ /*
+ * MSACSR.FS detects tiny results to flush to zero before rounding
+ * (per "MIPS Architecture for Programmers Volume IV-j: The MIPS64 SIMD
+ * Architecture Module, Revision 1.1" section 3.5.4), even though it
+ * detects tininess after rounding for underflow purposes (section 3.4.2
+ * table 3.3).
+ */
+ set_float_ftz_detection(float_ftz_before_rounding,
+ &env->active_tc.msa_fp_status);
+
+ /*
+ * According to MIPS specifications, if one of the two operands is
+ * a sNaN, a new qNaN has to be generated. This is done in
+ * floatXX_silence_nan(). For qNaN inputs the specifications
+ * says: "When possible, this QNaN result is one of the operand QNaN
+ * values." In practice it seems that most implementations choose
+ * the first operand if both operands are qNaN. In short this gives
+ * the following rules:
+ * 1. A if it is signaling
+ * 2. B if it is signaling
+ * 3. A (quiet)
+ * 4. B (quiet)
+ * A signaling NaN is always silenced before returning it.
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_s_ab,
+ &env->active_tc.msa_fp_status);
+
+ set_float_3nan_prop_rule(float_3nan_prop_s_cab,
+ &env->active_tc.msa_fp_status);
/* clear float_status exception flags */
set_float_exception_flags(0, &env->active_tc.msa_fp_status);
@@ -57,4 +86,11 @@ void msa_reset(CPUMIPSState *env)
/* set proper signanling bit meaning ("1" means "quiet") */
set_snan_bit_is_one(0, &env->active_tc.msa_fp_status);
+
+ /* Inf * 0 + NaN returns the input NaN */
+ set_float_infzeronan_rule(float_infzeronan_dnan_never,
+ &env->active_tc.msa_fp_status);
+ /* Default NaN: sign bit clear, frac msb set */
+ set_float_default_nan_pattern(0b01000000,
+ &env->active_tc.msa_fp_status);
}
diff --git a/target/mips/sysemu/cp0.c b/target/mips/sysemu/cp0.c
deleted file mode 100644
index bae37f5..0000000
--- a/target/mips/sysemu/cp0.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * QEMU MIPS CPU
- *
- * Copyright (c) 2012 SUSE LINUX Products GmbH
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see
- * <http://www.gnu.org/licenses/lgpl-2.1.html>
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "internal.h"
-#include "exec/exec-all.h"
-
-/* Called for updates to CP0_Status. */
-void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc)
-{
- int32_t tcstatus, *tcst;
- uint32_t v = cpu->CP0_Status;
- uint32_t cu, mx, asid, ksu;
- uint32_t mask = ((1 << CP0TCSt_TCU3)
- | (1 << CP0TCSt_TCU2)
- | (1 << CP0TCSt_TCU1)
- | (1 << CP0TCSt_TCU0)
- | (1 << CP0TCSt_TMX)
- | (3 << CP0TCSt_TKSU)
- | (0xff << CP0TCSt_TASID));
-
- cu = (v >> CP0St_CU0) & 0xf;
- mx = (v >> CP0St_MX) & 0x1;
- ksu = (v >> CP0St_KSU) & 0x3;
- asid = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
-
- tcstatus = cu << CP0TCSt_TCU0;
- tcstatus |= mx << CP0TCSt_TMX;
- tcstatus |= ksu << CP0TCSt_TKSU;
- tcstatus |= asid;
-
- if (tc == cpu->current_tc) {
- tcst = &cpu->active_tc.CP0_TCStatus;
- } else {
- tcst = &cpu->tcs[tc].CP0_TCStatus;
- }
-
- *tcst &= ~mask;
- *tcst |= tcstatus;
- compute_hflags(cpu);
-}
-
-void cpu_mips_store_status(CPUMIPSState *env, target_ulong val)
-{
- uint32_t mask = env->CP0_Status_rw_bitmask;
- target_ulong old = env->CP0_Status;
-
- if (env->insn_flags & ISA_MIPS_R6) {
- bool has_supervisor = extract32(mask, CP0St_KSU, 2) == 0x3;
-#if defined(TARGET_MIPS64)
- uint32_t ksux = (1 << CP0St_KX) & val;
- ksux |= (ksux >> 1) & val; /* KX = 0 forces SX to be 0 */
- ksux |= (ksux >> 1) & val; /* SX = 0 forces UX to be 0 */
- val = (val & ~(7 << CP0St_UX)) | ksux;
-#endif
- if (has_supervisor && extract32(val, CP0St_KSU, 2) == 0x3) {
- mask &= ~(3 << CP0St_KSU);
- }
- mask &= ~(((1 << CP0St_SR) | (1 << CP0St_NMI)) & val);
- }
-
- env->CP0_Status = (old & ~mask) | (val & mask);
-#if defined(TARGET_MIPS64)
- if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) {
- /* Access to at least one of the 64-bit segments has been disabled */
- tlb_flush(env_cpu(env));
- }
-#endif
- if (ase_mt_available(env)) {
- sync_c0_status(env, env, env->current_tc);
- } else {
- compute_hflags(env);
- }
-}
-
-void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val)
-{
- uint32_t mask = 0x00C00300;
- uint32_t old = env->CP0_Cause;
- int i;
-
- if (env->insn_flags & ISA_MIPS_R2) {
- mask |= 1 << CP0Ca_DC;
- }
- if (env->insn_flags & ISA_MIPS_R6) {
- mask &= ~((1 << CP0Ca_WP) & val);
- }
-
- env->CP0_Cause = (env->CP0_Cause & ~mask) | (val & mask);
-
- if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
- if (env->CP0_Cause & (1 << CP0Ca_DC)) {
- cpu_mips_stop_count(env);
- } else {
- cpu_mips_start_count(env);
- }
- }
-
- /* Set/reset software interrupts */
- for (i = 0 ; i < 2 ; i++) {
- if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
- cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i)));
- }
- }
-}
diff --git a/target/mips/sysemu/cp0_timer.c b/target/mips/sysemu/cp0_timer.c
deleted file mode 100644
index 62de502..0000000
--- a/target/mips/sysemu/cp0_timer.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * QEMU MIPS timer support
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "hw/irq.h"
-#include "qemu/timer.h"
-#include "sysemu/kvm.h"
-#include "internal.h"
-
-/* MIPS R4K timer */
-static uint32_t cpu_mips_get_count_val(CPUMIPSState *env)
-{
- int64_t now_ns;
- now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- return env->CP0_Count +
- (uint32_t)clock_ns_to_ticks(env->count_clock, now_ns);
-}
-
-static void cpu_mips_timer_update(CPUMIPSState *env)
-{
- uint64_t now_ns, next_ns;
- uint32_t wait;
-
- now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- wait = env->CP0_Compare - cpu_mips_get_count_val(env);
- /* Clamp interval to overflow if virtual time had not progressed */
- if (!wait) {
- wait = UINT32_MAX;
- }
- next_ns = now_ns + clock_ticks_to_ns(env->count_clock, wait);
- timer_mod(env->timer, next_ns);
-}
-
-/* Expire the timer. */
-static void cpu_mips_timer_expire(CPUMIPSState *env)
-{
- cpu_mips_timer_update(env);
- if (env->insn_flags & ISA_MIPS_R2) {
- env->CP0_Cause |= 1 << CP0Ca_TI;
- }
- qemu_irq_raise(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]);
-}
-
-uint32_t cpu_mips_get_count(CPUMIPSState *env)
-{
- if (env->CP0_Cause & (1 << CP0Ca_DC)) {
- return env->CP0_Count;
- } else {
- uint64_t now_ns;
-
- now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- if (timer_pending(env->timer)
- && timer_expired(env->timer, now_ns)) {
- /* The timer has already expired. */
- cpu_mips_timer_expire(env);
- }
-
- return cpu_mips_get_count_val(env);
- }
-}
-
-void cpu_mips_store_count(CPUMIPSState *env, uint32_t count)
-{
- /*
- * This gets called from cpu_state_reset(), potentially before timer init.
- * So env->timer may be NULL, which is also the case with KVM enabled so
- * treat timer as disabled in that case.
- */
- if (env->CP0_Cause & (1 << CP0Ca_DC) || !env->timer) {
- env->CP0_Count = count;
- } else {
- /* Store new count register */
- env->CP0_Count = count - (uint32_t)clock_ns_to_ticks(env->count_clock,
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
- /* Update timer timer */
- cpu_mips_timer_update(env);
- }
-}
-
-void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value)
-{
- env->CP0_Compare = value;
- if (!(env->CP0_Cause & (1 << CP0Ca_DC))) {
- cpu_mips_timer_update(env);
- }
- if (env->insn_flags & ISA_MIPS_R2) {
- env->CP0_Cause &= ~(1 << CP0Ca_TI);
- }
- qemu_irq_lower(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]);
-}
-
-void cpu_mips_start_count(CPUMIPSState *env)
-{
- cpu_mips_store_count(env, env->CP0_Count);
-}
-
-void cpu_mips_stop_count(CPUMIPSState *env)
-{
- /* Store the current value */
- env->CP0_Count += (uint32_t)clock_ns_to_ticks(env->count_clock,
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
-}
-
-static void mips_timer_cb(void *opaque)
-{
- CPUMIPSState *env;
-
- env = opaque;
-
- if (env->CP0_Cause & (1 << CP0Ca_DC)) {
- return;
- }
-
- cpu_mips_timer_expire(env);
-}
-
-void cpu_mips_clock_init(MIPSCPU *cpu)
-{
- CPUMIPSState *env = &cpu->env;
-
- /*
- * If we're in KVM mode, don't create the periodic timer, that is handled in
- * kernel.
- */
- if (!kvm_enabled()) {
- env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &mips_timer_cb, env);
- }
-}
diff --git a/target/mips/sysemu/machine.c b/target/mips/sysemu/machine.c
deleted file mode 100644
index 213fd63..0000000
--- a/target/mips/sysemu/machine.c
+++ /dev/null
@@ -1,333 +0,0 @@
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "internal.h"
-#include "migration/cpu.h"
-#include "fpu_helper.h"
-
-static int cpu_post_load(void *opaque, int version_id)
-{
- MIPSCPU *cpu = opaque;
- CPUMIPSState *env = &cpu->env;
-
- restore_fp_status(env);
- restore_msa_fp_status(env);
- compute_hflags(env);
- restore_pamask(env);
-
- return 0;
-}
-
-/* FPU state */
-
-static int get_fpr(QEMUFile *f, void *pv, size_t size,
- const VMStateField *field)
-{
- int i;
- fpr_t *v = pv;
- /* Restore entire MSA vector register */
- for (i = 0; i < MSA_WRLEN / 64; i++) {
- qemu_get_sbe64s(f, &v->wr.d[i]);
- }
- return 0;
-}
-
-static int put_fpr(QEMUFile *f, void *pv, size_t size,
- const VMStateField *field, JSONWriter *vmdesc)
-{
- int i;
- fpr_t *v = pv;
- /* Save entire MSA vector register */
- for (i = 0; i < MSA_WRLEN / 64; i++) {
- qemu_put_sbe64s(f, &v->wr.d[i]);
- }
-
- return 0;
-}
-
-static const VMStateInfo vmstate_info_fpr = {
- .name = "fpr",
- .get = get_fpr,
- .put = put_fpr,
-};
-
-#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \
- VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_fpr, fpr_t)
-
-#define VMSTATE_FPR_ARRAY(_f, _s, _n) \
- VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
-
-static const VMStateField vmstate_fpu_fields[] = {
- VMSTATE_FPR_ARRAY(fpr, CPUMIPSFPUContext, 32),
- VMSTATE_UINT32(fcr0, CPUMIPSFPUContext),
- VMSTATE_UINT32(fcr31, CPUMIPSFPUContext),
- VMSTATE_END_OF_LIST()
-};
-
-static const VMStateDescription vmstate_fpu = {
- .name = "cpu/fpu",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = vmstate_fpu_fields
-};
-
-static const VMStateDescription vmstate_inactive_fpu = {
- .name = "cpu/inactive_fpu",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = vmstate_fpu_fields
-};
-
-/* TC state */
-
-static const VMStateField vmstate_tc_fields[] = {
- VMSTATE_UINTTL_ARRAY(gpr, TCState, 32),
-#if defined(TARGET_MIPS64)
- VMSTATE_UINT64_ARRAY(gpr_hi, TCState, 32),
-#endif /* TARGET_MIPS64 */
- VMSTATE_UINTTL(PC, TCState),
- VMSTATE_UINTTL_ARRAY(HI, TCState, MIPS_DSP_ACC),
- VMSTATE_UINTTL_ARRAY(LO, TCState, MIPS_DSP_ACC),
- VMSTATE_UINTTL_ARRAY(ACX, TCState, MIPS_DSP_ACC),
- VMSTATE_UINTTL(DSPControl, TCState),
- VMSTATE_INT32(CP0_TCStatus, TCState),
- VMSTATE_INT32(CP0_TCBind, TCState),
- VMSTATE_UINTTL(CP0_TCHalt, TCState),
- VMSTATE_UINTTL(CP0_TCContext, TCState),
- VMSTATE_UINTTL(CP0_TCSchedule, TCState),
- VMSTATE_UINTTL(CP0_TCScheFBack, TCState),
- VMSTATE_INT32(CP0_Debug_tcstatus, TCState),
- VMSTATE_UINTTL(CP0_UserLocal, TCState),
- VMSTATE_INT32(msacsr, TCState),
- VMSTATE_UINTTL_ARRAY(mxu_gpr, TCState, NUMBER_OF_MXU_REGISTERS - 1),
- VMSTATE_UINTTL(mxu_cr, TCState),
- VMSTATE_END_OF_LIST()
-};
-
-static const VMStateDescription vmstate_tc = {
- .name = "cpu/tc",
- .version_id = 2,
- .minimum_version_id = 2,
- .fields = vmstate_tc_fields
-};
-
-static const VMStateDescription vmstate_inactive_tc = {
- .name = "cpu/inactive_tc",
- .version_id = 2,
- .minimum_version_id = 2,
- .fields = vmstate_tc_fields
-};
-
-/* MVP state */
-
-static const VMStateDescription vmstate_mvp = {
- .name = "cpu/mvp",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_INT32(CP0_MVPControl, CPUMIPSMVPContext),
- VMSTATE_INT32(CP0_MVPConf0, CPUMIPSMVPContext),
- VMSTATE_INT32(CP0_MVPConf1, CPUMIPSMVPContext),
- VMSTATE_END_OF_LIST()
- }
-};
-
-/* TLB state */
-
-static int get_tlb(QEMUFile *f, void *pv, size_t size,
- const VMStateField *field)
-{
- r4k_tlb_t *v = pv;
- uint16_t flags;
-
- qemu_get_betls(f, &v->VPN);
- qemu_get_be32s(f, &v->PageMask);
- qemu_get_be16s(f, &v->ASID);
- qemu_get_be16s(f, &flags);
- v->G = (flags >> 10) & 1;
- v->C0 = (flags >> 7) & 3;
- v->C1 = (flags >> 4) & 3;
- v->V0 = (flags >> 3) & 1;
- v->V1 = (flags >> 2) & 1;
- v->D0 = (flags >> 1) & 1;
- v->D1 = (flags >> 0) & 1;
- v->EHINV = (flags >> 15) & 1;
- v->RI1 = (flags >> 14) & 1;
- v->RI0 = (flags >> 13) & 1;
- v->XI1 = (flags >> 12) & 1;
- v->XI0 = (flags >> 11) & 1;
- qemu_get_be64s(f, &v->PFN[0]);
- qemu_get_be64s(f, &v->PFN[1]);
-
- return 0;
-}
-
-static int put_tlb(QEMUFile *f, void *pv, size_t size,
- const VMStateField *field, JSONWriter *vmdesc)
-{
- r4k_tlb_t *v = pv;
-
- uint16_t asid = v->ASID;
- uint16_t flags = ((v->EHINV << 15) |
- (v->RI1 << 14) |
- (v->RI0 << 13) |
- (v->XI1 << 12) |
- (v->XI0 << 11) |
- (v->G << 10) |
- (v->C0 << 7) |
- (v->C1 << 4) |
- (v->V0 << 3) |
- (v->V1 << 2) |
- (v->D0 << 1) |
- (v->D1 << 0));
-
- qemu_put_betls(f, &v->VPN);
- qemu_put_be32s(f, &v->PageMask);
- qemu_put_be16s(f, &asid);
- qemu_put_be16s(f, &flags);
- qemu_put_be64s(f, &v->PFN[0]);
- qemu_put_be64s(f, &v->PFN[1]);
-
- return 0;
-}
-
-static const VMStateInfo vmstate_info_tlb = {
- .name = "tlb_entry",
- .get = get_tlb,
- .put = put_tlb,
-};
-
-#define VMSTATE_TLB_ARRAY_V(_f, _s, _n, _v) \
- VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_tlb, r4k_tlb_t)
-
-#define VMSTATE_TLB_ARRAY(_f, _s, _n) \
- VMSTATE_TLB_ARRAY_V(_f, _s, _n, 0)
-
-static const VMStateDescription vmstate_tlb = {
- .name = "cpu/tlb",
- .version_id = 2,
- .minimum_version_id = 2,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32(nb_tlb, CPUMIPSTLBContext),
- VMSTATE_UINT32(tlb_in_use, CPUMIPSTLBContext),
- VMSTATE_TLB_ARRAY(mmu.r4k.tlb, CPUMIPSTLBContext, MIPS_TLB_MAX),
- VMSTATE_END_OF_LIST()
- }
-};
-
-/* MIPS CPU state */
-
-const VMStateDescription vmstate_mips_cpu = {
- .name = "cpu",
- .version_id = 21,
- .minimum_version_id = 21,
- .post_load = cpu_post_load,
- .fields = (const VMStateField[]) {
- /* Active TC */
- VMSTATE_STRUCT(env.active_tc, MIPSCPU, 1, vmstate_tc, TCState),
-
- /* Active FPU */
- VMSTATE_STRUCT(env.active_fpu, MIPSCPU, 1, vmstate_fpu,
- CPUMIPSFPUContext),
-
- /* MVP */
- VMSTATE_STRUCT_POINTER(env.mvp, MIPSCPU, vmstate_mvp,
- CPUMIPSMVPContext),
-
- /* TLB */
- VMSTATE_STRUCT_POINTER(env.tlb, MIPSCPU, vmstate_tlb,
- CPUMIPSTLBContext),
-
- /* CPU metastate */
- VMSTATE_UINT32(env.current_tc, MIPSCPU),
- VMSTATE_UINT32(env.current_fpu, MIPSCPU),
- VMSTATE_INT32(env.error_code, MIPSCPU),
- VMSTATE_UINTTL(env.btarget, MIPSCPU),
- VMSTATE_UINTTL(env.bcond, MIPSCPU),
-
- /* Remaining CP0 registers */
- VMSTATE_INT32(env.CP0_Index, MIPSCPU),
- VMSTATE_INT32(env.CP0_VPControl, MIPSCPU),
- VMSTATE_INT32(env.CP0_Random, MIPSCPU),
- VMSTATE_INT32(env.CP0_VPEControl, MIPSCPU),
- VMSTATE_INT32(env.CP0_VPEConf0, MIPSCPU),
- VMSTATE_INT32(env.CP0_VPEConf1, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_YQMask, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_VPESchedule, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_VPEScheFBack, MIPSCPU),
- VMSTATE_INT32(env.CP0_VPEOpt, MIPSCPU),
- VMSTATE_UINT64(env.CP0_EntryLo0, MIPSCPU),
- VMSTATE_UINT64(env.CP0_EntryLo1, MIPSCPU),
- VMSTATE_INT32(env.CP0_GlobalNumber, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_Context, MIPSCPU),
- VMSTATE_INT32(env.CP0_MemoryMapID, MIPSCPU),
- VMSTATE_INT32(env.CP0_PageMask, MIPSCPU),
- VMSTATE_INT32(env.CP0_PageGrain, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_SegCtl0, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_SegCtl1, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_SegCtl2, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_PWBase, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_PWField, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_PWSize, MIPSCPU),
- VMSTATE_INT32(env.CP0_Wired, MIPSCPU),
- VMSTATE_INT32(env.CP0_PWCtl, MIPSCPU),
- VMSTATE_INT32(env.CP0_SRSConf0, MIPSCPU),
- VMSTATE_INT32(env.CP0_SRSConf1, MIPSCPU),
- VMSTATE_INT32(env.CP0_SRSConf2, MIPSCPU),
- VMSTATE_INT32(env.CP0_SRSConf3, MIPSCPU),
- VMSTATE_INT32(env.CP0_SRSConf4, MIPSCPU),
- VMSTATE_INT32(env.CP0_HWREna, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_BadVAddr, MIPSCPU),
- VMSTATE_UINT32(env.CP0_BadInstr, MIPSCPU),
- VMSTATE_UINT32(env.CP0_BadInstrP, MIPSCPU),
- VMSTATE_UINT32(env.CP0_BadInstrX, MIPSCPU),
- VMSTATE_INT32(env.CP0_Count, MIPSCPU),
- VMSTATE_UNUSED(sizeof(uint32_t)), /* was CP0_SAARI */
- VMSTATE_UNUSED(2 * sizeof(uint64_t)), /* was CP0_SAAR[2] */
- VMSTATE_UINTTL(env.CP0_EntryHi, MIPSCPU),
- VMSTATE_INT32(env.CP0_Compare, MIPSCPU),
- VMSTATE_INT32(env.CP0_Status, MIPSCPU),
- VMSTATE_INT32(env.CP0_IntCtl, MIPSCPU),
- VMSTATE_INT32(env.CP0_SRSCtl, MIPSCPU),
- VMSTATE_INT32(env.CP0_SRSMap, MIPSCPU),
- VMSTATE_INT32(env.CP0_Cause, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_EPC, MIPSCPU),
- VMSTATE_INT32(env.CP0_PRid, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_EBase, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_CMGCRBase, MIPSCPU),
- VMSTATE_INT32(env.CP0_Config0, MIPSCPU),
- VMSTATE_INT32(env.CP0_Config1, MIPSCPU),
- VMSTATE_INT32(env.CP0_Config2, MIPSCPU),
- VMSTATE_INT32(env.CP0_Config3, MIPSCPU),
- VMSTATE_INT32(env.CP0_Config4, MIPSCPU),
- VMSTATE_INT32(env.CP0_Config5, MIPSCPU),
- VMSTATE_INT32(env.CP0_Config6, MIPSCPU),
- VMSTATE_INT32(env.CP0_Config7, MIPSCPU),
- VMSTATE_UINT64(env.CP0_LLAddr, MIPSCPU),
- VMSTATE_UINT64_ARRAY(env.CP0_MAAR, MIPSCPU, MIPS_MAAR_MAX),
- VMSTATE_INT32(env.CP0_MAARI, MIPSCPU),
- VMSTATE_UINTTL(env.lladdr, MIPSCPU),
- VMSTATE_UINTTL_ARRAY(env.CP0_WatchLo, MIPSCPU, 8),
- VMSTATE_UINT64_ARRAY(env.CP0_WatchHi, MIPSCPU, 8),
- VMSTATE_UINTTL(env.CP0_XContext, MIPSCPU),
- VMSTATE_INT32(env.CP0_Framemask, MIPSCPU),
- VMSTATE_INT32(env.CP0_Debug, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_DEPC, MIPSCPU),
- VMSTATE_INT32(env.CP0_Performance0, MIPSCPU),
- VMSTATE_INT32(env.CP0_ErrCtl, MIPSCPU),
- VMSTATE_UINT64(env.CP0_TagLo, MIPSCPU),
- VMSTATE_INT32(env.CP0_DataLo, MIPSCPU),
- VMSTATE_INT32(env.CP0_TagHi, MIPSCPU),
- VMSTATE_INT32(env.CP0_DataHi, MIPSCPU),
- VMSTATE_UINTTL(env.CP0_ErrorEPC, MIPSCPU),
- VMSTATE_INT32(env.CP0_DESAVE, MIPSCPU),
- VMSTATE_UINTTL_ARRAY(env.CP0_KScratch, MIPSCPU, MIPS_KSCRATCH_NUM),
-
- /* Inactive TC */
- VMSTATE_STRUCT_ARRAY(env.tcs, MIPSCPU, MIPS_SHADOW_SET_MAX, 1,
- vmstate_inactive_tc, TCState),
- VMSTATE_STRUCT_ARRAY(env.fpus, MIPSCPU, MIPS_FPU_MAX, 1,
- vmstate_inactive_fpu, CPUMIPSFPUContext),
-
- VMSTATE_END_OF_LIST()
- },
-};
diff --git a/target/mips/sysemu/mips-qmp-cmds.c b/target/mips/sysemu/mips-qmp-cmds.c
deleted file mode 100644
index 7340ac7..0000000
--- a/target/mips/sysemu/mips-qmp-cmds.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * QEMU MIPS CPU (monitor definitions)
- *
- * SPDX-FileCopyrightText: 2012 SUSE LINUX Products GmbH
- *
- * SPDX-License-Identifier: LGPL-2.1-or-later
- */
-
-#include "qemu/osdep.h"
-#include "qapi/qapi-commands-machine-target.h"
-#include "cpu.h"
-
-static void mips_cpu_add_definition(gpointer data, gpointer user_data)
-{
- ObjectClass *oc = data;
- CpuDefinitionInfoList **cpu_list = user_data;
- CpuDefinitionInfo *info;
- const char *typename;
-
- typename = object_class_get_name(oc);
- info = g_malloc0(sizeof(*info));
- info->name = cpu_model_from_type(typename);
- info->q_typename = g_strdup(typename);
-
- QAPI_LIST_PREPEND(*cpu_list, info);
-}
-
-CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
-{
- CpuDefinitionInfoList *cpu_list = NULL;
- GSList *list;
-
- list = object_class_get_list(TYPE_MIPS_CPU, false);
- g_slist_foreach(list, mips_cpu_add_definition, &cpu_list);
- g_slist_free(list);
-
- return cpu_list;
-}
diff --git a/target/mips/sysemu/physaddr.c b/target/mips/sysemu/physaddr.c
deleted file mode 100644
index 505781d..0000000
--- a/target/mips/sysemu/physaddr.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * MIPS TLB (Translation lookaside buffer) helpers.
- *
- * Copyright (c) 2004-2005 Jocelyn Mayer
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/exec-all.h"
-#include "exec/page-protection.h"
-#include "../internal.h"
-
-static int is_seg_am_mapped(unsigned int am, bool eu, int mmu_idx)
-{
- /*
- * Interpret access control mode and mmu_idx.
- * AdE? TLB?
- * AM K S U E K S U E
- * UK 0 0 1 1 0 0 - - 0
- * MK 1 0 1 1 0 1 - - !eu
- * MSK 2 0 0 1 0 1 1 - !eu
- * MUSK 3 0 0 0 0 1 1 1 !eu
- * MUSUK 4 0 0 0 0 0 1 1 0
- * USK 5 0 0 1 0 0 0 - 0
- * - 6 - - - - - - - -
- * UUSK 7 0 0 0 0 0 0 0 0
- */
- int32_t adetlb_mask;
-
- switch (mmu_idx) {
- case 3: /* ERL */
- /* If EU is set, always unmapped */
- if (eu) {
- return 0;
- }
- /* fall through */
- case MIPS_HFLAG_KM:
- /* Never AdE, TLB mapped if AM={1,2,3} */
- adetlb_mask = 0x70000000;
- goto check_tlb;
-
- case MIPS_HFLAG_SM:
- /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */
- adetlb_mask = 0xc0380000;
- goto check_ade;
-
- case MIPS_HFLAG_UM:
- /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */
- adetlb_mask = 0xe4180000;
- /* fall through */
- check_ade:
- /* does this AM cause AdE in current execution mode */
- if ((adetlb_mask << am) < 0) {
- return TLBRET_BADADDR;
- }
- adetlb_mask <<= 8;
- /* fall through */
- check_tlb:
- /* is this AM mapped in current execution mode */
- return ((adetlb_mask << am) < 0);
- default:
- g_assert_not_reached();
- };
-}
-
-static int get_seg_physical_address(CPUMIPSState *env, hwaddr *physical,
- int *prot, target_ulong real_address,
- MMUAccessType access_type, int mmu_idx,
- unsigned int am, bool eu,
- target_ulong segmask,
- hwaddr physical_base)
-{
- int mapped = is_seg_am_mapped(am, eu, mmu_idx);
-
- if (mapped < 0) {
- /* is_seg_am_mapped can report TLBRET_BADADDR */
- return mapped;
- } else if (mapped) {
- /* The segment is TLB mapped */
- return env->tlb->map_address(env, physical, prot, real_address,
- access_type);
- } else {
- /* The segment is unmapped */
- *physical = physical_base | (real_address & segmask);
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- return TLBRET_MATCH;
- }
-}
-
-static int get_segctl_physical_address(CPUMIPSState *env, hwaddr *physical,
- int *prot, target_ulong real_address,
- MMUAccessType access_type, int mmu_idx,
- uint16_t segctl, target_ulong segmask)
-{
- unsigned int am = (segctl & CP0SC_AM_MASK) >> CP0SC_AM;
- bool eu = (segctl >> CP0SC_EU) & 1;
- hwaddr pa = ((hwaddr)segctl & CP0SC_PA_MASK) << 20;
-
- return get_seg_physical_address(env, physical, prot, real_address,
- access_type, mmu_idx, am, eu, segmask,
- pa & ~(hwaddr)segmask);
-}
-
-int get_physical_address(CPUMIPSState *env, hwaddr *physical,
- int *prot, target_ulong real_address,
- MMUAccessType access_type, int mmu_idx)
-{
- /* User mode can only access useg/xuseg */
-#if defined(TARGET_MIPS64)
- int user_mode = mmu_idx == MIPS_HFLAG_UM;
- int supervisor_mode = mmu_idx == MIPS_HFLAG_SM;
- int kernel_mode = !user_mode && !supervisor_mode;
- int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
- int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
- int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
-#endif
- int ret = TLBRET_MATCH;
- /* effective address (modified for KVM T&E kernel segments) */
- target_ulong address = real_address;
-
- if (address <= USEG_LIMIT) {
- /* useg */
- uint16_t segctl;
-
- if (address >= 0x40000000UL) {
- segctl = env->CP0_SegCtl2;
- } else {
- segctl = env->CP0_SegCtl2 >> 16;
- }
- ret = get_segctl_physical_address(env, physical, prot,
- real_address, access_type,
- mmu_idx, segctl, 0x3FFFFFFF);
-#if defined(TARGET_MIPS64)
- } else if (address < 0x4000000000000000ULL) {
- /* xuseg */
- if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) {
- ret = env->tlb->map_address(env, physical, prot,
- real_address, access_type);
- } else {
- ret = TLBRET_BADADDR;
- }
- } else if (address < 0x8000000000000000ULL) {
- /* xsseg */
- if ((supervisor_mode || kernel_mode) &&
- SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) {
- ret = env->tlb->map_address(env, physical, prot,
- real_address, access_type);
- } else {
- ret = TLBRET_BADADDR;
- }
- } else if (address < 0xC000000000000000ULL) {
- /* xkphys */
- if ((address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) {
- /* KX/SX/UX bit to check for each xkphys EVA access mode */
- static const uint8_t am_ksux[8] = {
- [CP0SC_AM_UK] = (1u << CP0St_KX),
- [CP0SC_AM_MK] = (1u << CP0St_KX),
- [CP0SC_AM_MSK] = (1u << CP0St_SX),
- [CP0SC_AM_MUSK] = (1u << CP0St_UX),
- [CP0SC_AM_MUSUK] = (1u << CP0St_UX),
- [CP0SC_AM_USK] = (1u << CP0St_SX),
- [6] = (1u << CP0St_KX),
- [CP0SC_AM_UUSK] = (1u << CP0St_UX),
- };
- unsigned int am = CP0SC_AM_UK;
- unsigned int xr = (env->CP0_SegCtl2 & CP0SC2_XR_MASK) >> CP0SC2_XR;
-
- if (xr & (1 << ((address >> 59) & 0x7))) {
- am = (env->CP0_SegCtl1 & CP0SC1_XAM_MASK) >> CP0SC1_XAM;
- }
- /* Does CP0_Status.KX/SX/UX permit the access mode (am) */
- if (env->CP0_Status & am_ksux[am]) {
- ret = get_seg_physical_address(env, physical, prot,
- real_address, access_type,
- mmu_idx, am, false, env->PAMask,
- 0);
- } else {
- ret = TLBRET_BADADDR;
- }
- } else {
- ret = TLBRET_BADADDR;
- }
- } else if (address < 0xFFFFFFFF80000000ULL) {
- /* xkseg */
- if (kernel_mode && KX &&
- address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) {
- ret = env->tlb->map_address(env, physical, prot,
- real_address, access_type);
- } else {
- ret = TLBRET_BADADDR;
- }
-#endif
- } else if (address < KSEG1_BASE) {
- /* kseg0 */
- ret = get_segctl_physical_address(env, physical, prot, real_address,
- access_type, mmu_idx,
- env->CP0_SegCtl1 >> 16, 0x1FFFFFFF);
- } else if (address < KSEG2_BASE) {
- /* kseg1 */
- ret = get_segctl_physical_address(env, physical, prot, real_address,
- access_type, mmu_idx,
- env->CP0_SegCtl1, 0x1FFFFFFF);
- } else if (address < KSEG3_BASE) {
- /* sseg (kseg2) */
- ret = get_segctl_physical_address(env, physical, prot, real_address,
- access_type, mmu_idx,
- env->CP0_SegCtl0 >> 16, 0x1FFFFFFF);
- } else {
- /*
- * kseg3
- * XXX: debug segment is not emulated
- */
- ret = get_segctl_physical_address(env, physical, prot, real_address,
- access_type, mmu_idx,
- env->CP0_SegCtl0, 0x1FFFFFFF);
- }
- return ret;
-}
-
-hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
-{
- CPUMIPSState *env = cpu_env(cs);
- hwaddr phys_addr;
- int prot;
-
- if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD,
- mips_env_mmu_index(env)) != 0) {
- return -1;
- }
- return phys_addr;
-}
diff --git a/target/mips/sysemu/addr.c b/target/mips/system/addr.c
index 4f025be..4f025be 100644
--- a/target/mips/sysemu/addr.c
+++ b/target/mips/system/addr.c
diff --git a/target/mips/system/cp0.c b/target/mips/system/cp0.c
new file mode 100644
index 0000000..ff7d3db
--- /dev/null
+++ b/target/mips/system/cp0.c
@@ -0,0 +1,123 @@
+/*
+ * QEMU MIPS CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "exec/cputlb.h"
+
+/* Called for updates to CP0_Status. */
+void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc)
+{
+ int32_t tcstatus, *tcst;
+ uint32_t v = cpu->CP0_Status;
+ uint32_t cu, mx, asid, ksu;
+ uint32_t mask = ((1 << CP0TCSt_TCU3)
+ | (1 << CP0TCSt_TCU2)
+ | (1 << CP0TCSt_TCU1)
+ | (1 << CP0TCSt_TCU0)
+ | (1 << CP0TCSt_TMX)
+ | (3 << CP0TCSt_TKSU)
+ | (0xff << CP0TCSt_TASID));
+
+ cu = (v >> CP0St_CU0) & 0xf;
+ mx = (v >> CP0St_MX) & 0x1;
+ ksu = (v >> CP0St_KSU) & 0x3;
+ asid = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+
+ tcstatus = cu << CP0TCSt_TCU0;
+ tcstatus |= mx << CP0TCSt_TMX;
+ tcstatus |= ksu << CP0TCSt_TKSU;
+ tcstatus |= asid;
+
+ if (tc == cpu->current_tc) {
+ tcst = &cpu->active_tc.CP0_TCStatus;
+ } else {
+ tcst = &cpu->tcs[tc].CP0_TCStatus;
+ }
+
+ *tcst &= ~mask;
+ *tcst |= tcstatus;
+ compute_hflags(cpu);
+}
+
+void cpu_mips_store_status(CPUMIPSState *env, target_ulong val)
+{
+ uint32_t mask = env->CP0_Status_rw_bitmask;
+ target_ulong old = env->CP0_Status;
+
+ if (env->insn_flags & ISA_MIPS_R6) {
+ bool has_supervisor = extract32(mask, CP0St_KSU, 2) == 0x3;
+#if defined(TARGET_MIPS64)
+ uint32_t ksux = (1 << CP0St_KX) & val;
+ ksux |= (ksux >> 1) & val; /* KX = 0 forces SX to be 0 */
+ ksux |= (ksux >> 1) & val; /* SX = 0 forces UX to be 0 */
+ val = (val & ~(7 << CP0St_UX)) | ksux;
+#endif
+ if (has_supervisor && extract32(val, CP0St_KSU, 2) == 0x3) {
+ mask &= ~(3 << CP0St_KSU);
+ }
+ mask &= ~(((1 << CP0St_SR) | (1 << CP0St_NMI)) & val);
+ }
+
+ env->CP0_Status = (old & ~mask) | (val & mask);
+#if defined(TARGET_MIPS64)
+ if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) {
+ /* Access to at least one of the 64-bit segments has been disabled */
+ tlb_flush(env_cpu(env));
+ }
+#endif
+ if (ase_mt_available(env)) {
+ sync_c0_status(env, env, env->current_tc);
+ } else {
+ compute_hflags(env);
+ }
+}
+
+void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val)
+{
+ uint32_t mask = 0x00C00300;
+ uint32_t old = env->CP0_Cause;
+ int i;
+
+ if (env->insn_flags & ISA_MIPS_R2) {
+ mask |= 1 << CP0Ca_DC;
+ }
+ if (env->insn_flags & ISA_MIPS_R6) {
+ mask &= ~((1 << CP0Ca_WP) & val);
+ }
+
+ env->CP0_Cause = (env->CP0_Cause & ~mask) | (val & mask);
+
+ if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
+ if (env->CP0_Cause & (1 << CP0Ca_DC)) {
+ cpu_mips_stop_count(env);
+ } else {
+ cpu_mips_start_count(env);
+ }
+ }
+
+ /* Set/reset software interrupts */
+ for (i = 0 ; i < 2 ; i++) {
+ if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
+ cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i)));
+ }
+ }
+}
diff --git a/target/mips/system/cp0_timer.c b/target/mips/system/cp0_timer.c
new file mode 100644
index 0000000..ca16945
--- /dev/null
+++ b/target/mips/system/cp0_timer.c
@@ -0,0 +1,147 @@
+/*
+ * QEMU MIPS timer support
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/irq.h"
+#include "qemu/timer.h"
+#include "system/kvm.h"
+#include "internal.h"
+
+/* MIPS R4K timer */
+static uint32_t cpu_mips_get_count_val(CPUMIPSState *env)
+{
+ int64_t now_ns;
+ now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ return env->CP0_Count +
+ (uint32_t)clock_ns_to_ticks(env->count_clock, now_ns);
+}
+
+static void cpu_mips_timer_update(CPUMIPSState *env)
+{
+ uint64_t now_ns, next_ns;
+ uint32_t wait;
+
+ now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ wait = env->CP0_Compare - cpu_mips_get_count_val(env);
+ /* Clamp interval to overflow if virtual time had not progressed */
+ if (!wait) {
+ wait = UINT32_MAX;
+ }
+ next_ns = now_ns + clock_ticks_to_ns(env->count_clock, wait);
+ timer_mod(env->timer, next_ns);
+}
+
+/* Expire the timer. */
+static void cpu_mips_timer_expire(CPUMIPSState *env)
+{
+ cpu_mips_timer_update(env);
+ if (env->insn_flags & ISA_MIPS_R2) {
+ env->CP0_Cause |= 1 << CP0Ca_TI;
+ }
+ qemu_irq_raise(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]);
+}
+
+uint32_t cpu_mips_get_count(CPUMIPSState *env)
+{
+ if (env->CP0_Cause & (1 << CP0Ca_DC)) {
+ return env->CP0_Count;
+ } else {
+ uint64_t now_ns;
+
+ now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ if (timer_pending(env->timer)
+ && timer_expired(env->timer, now_ns)) {
+ /* The timer has already expired. */
+ cpu_mips_timer_expire(env);
+ }
+
+ return cpu_mips_get_count_val(env);
+ }
+}
+
+void cpu_mips_store_count(CPUMIPSState *env, uint32_t count)
+{
+ /*
+ * This gets called from cpu_state_reset(), potentially before timer init.
+ * So env->timer may be NULL, which is also the case with KVM enabled so
+ * treat timer as disabled in that case.
+ */
+ if (env->CP0_Cause & (1 << CP0Ca_DC) || !env->timer) {
+ env->CP0_Count = count;
+ } else {
+ /* Store new count register */
+ env->CP0_Count = count - (uint32_t)clock_ns_to_ticks(env->count_clock,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+ /* Update timer timer */
+ cpu_mips_timer_update(env);
+ }
+}
+
+void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value)
+{
+ env->CP0_Compare = value;
+ if (!(env->CP0_Cause & (1 << CP0Ca_DC))) {
+ cpu_mips_timer_update(env);
+ }
+ if (env->insn_flags & ISA_MIPS_R2) {
+ env->CP0_Cause &= ~(1 << CP0Ca_TI);
+ }
+ qemu_irq_lower(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]);
+}
+
+void cpu_mips_start_count(CPUMIPSState *env)
+{
+ cpu_mips_store_count(env, env->CP0_Count);
+}
+
+void cpu_mips_stop_count(CPUMIPSState *env)
+{
+ /* Store the current value */
+ env->CP0_Count += (uint32_t)clock_ns_to_ticks(env->count_clock,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+}
+
+static void mips_timer_cb(void *opaque)
+{
+ CPUMIPSState *env;
+
+ env = opaque;
+
+ if (env->CP0_Cause & (1 << CP0Ca_DC)) {
+ return;
+ }
+
+ cpu_mips_timer_expire(env);
+}
+
+void cpu_mips_clock_init(MIPSCPU *cpu)
+{
+ CPUMIPSState *env = &cpu->env;
+
+ /*
+ * If we're in KVM mode, don't create the periodic timer, that is handled in
+ * kernel.
+ */
+ if (!kvm_enabled()) {
+ env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &mips_timer_cb, env);
+ }
+}
diff --git a/target/mips/system/machine.c b/target/mips/system/machine.c
new file mode 100644
index 0000000..8af11fd
--- /dev/null
+++ b/target/mips/system/machine.c
@@ -0,0 +1,336 @@
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "migration/cpu.h"
+#include "fpu_helper.h"
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+ MIPSCPU *cpu = opaque;
+ CPUMIPSState *env = &cpu->env;
+
+ restore_fp_status(env);
+ restore_msa_fp_status(env);
+ compute_hflags(env);
+ restore_pamask(env);
+
+ return 0;
+}
+
+/* FPU state */
+
+static int get_fpr(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ int i;
+ fpr_t *v = pv;
+ /* Restore entire MSA vector register */
+ for (i = 0; i < MSA_WRLEN / 64; i++) {
+ qemu_get_sbe64s(f, &v->wr.d[i]);
+ }
+ return 0;
+}
+
+static int put_fpr(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ int i;
+ fpr_t *v = pv;
+ /* Save entire MSA vector register */
+ for (i = 0; i < MSA_WRLEN / 64; i++) {
+ qemu_put_sbe64s(f, &v->wr.d[i]);
+ }
+
+ return 0;
+}
+
+static const VMStateInfo vmstate_info_fpr = {
+ .name = "fpr",
+ .get = get_fpr,
+ .put = put_fpr,
+};
+
+#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_fpr, fpr_t)
+
+#define VMSTATE_FPR_ARRAY(_f, _s, _n) \
+ VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
+
+static const VMStateField vmstate_fpu_fields[] = {
+ VMSTATE_FPR_ARRAY(fpr, CPUMIPSFPUContext, 32),
+ VMSTATE_UINT32(fcr0, CPUMIPSFPUContext),
+ VMSTATE_UINT32(fcr31, CPUMIPSFPUContext),
+ VMSTATE_END_OF_LIST()
+};
+
+static const VMStateDescription vmstate_fpu = {
+ .name = "cpu/fpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_fpu_fields
+};
+
+static const VMStateDescription vmstate_inactive_fpu = {
+ .name = "cpu/inactive_fpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_fpu_fields
+};
+
+/* TC state */
+
+static const VMStateField vmstate_tc_fields[] = {
+ VMSTATE_UINTTL_ARRAY(gpr, TCState, 32),
+#if defined(TARGET_MIPS64)
+ VMSTATE_UINT64_ARRAY(gpr_hi, TCState, 32),
+#endif /* TARGET_MIPS64 */
+ VMSTATE_UINTTL(PC, TCState),
+ VMSTATE_UINTTL_ARRAY(HI, TCState, MIPS_DSP_ACC),
+ VMSTATE_UINTTL_ARRAY(LO, TCState, MIPS_DSP_ACC),
+ VMSTATE_UINTTL_ARRAY(ACX, TCState, MIPS_DSP_ACC),
+ VMSTATE_UINTTL(DSPControl, TCState),
+ VMSTATE_INT32(CP0_TCStatus, TCState),
+ VMSTATE_INT32(CP0_TCBind, TCState),
+ VMSTATE_UINTTL(CP0_TCHalt, TCState),
+ VMSTATE_UINTTL(CP0_TCContext, TCState),
+ VMSTATE_UINTTL(CP0_TCSchedule, TCState),
+ VMSTATE_UINTTL(CP0_TCScheFBack, TCState),
+ VMSTATE_INT32(CP0_Debug_tcstatus, TCState),
+ VMSTATE_UINTTL(CP0_UserLocal, TCState),
+ VMSTATE_INT32(msacsr, TCState),
+ VMSTATE_UINTTL_ARRAY(mxu_gpr, TCState, NUMBER_OF_MXU_REGISTERS - 1),
+ VMSTATE_UINTTL(mxu_cr, TCState),
+ VMSTATE_END_OF_LIST()
+};
+
+static const VMStateDescription vmstate_tc = {
+ .name = "cpu/tc",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = vmstate_tc_fields
+};
+
+static const VMStateDescription vmstate_inactive_tc = {
+ .name = "cpu/inactive_tc",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = vmstate_tc_fields
+};
+
+/* MVP state */
+
+static const VMStateDescription vmstate_mvp = {
+ .name = "cpu/mvp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_INT32(CP0_MVPControl, CPUMIPSMVPContext),
+ VMSTATE_INT32(CP0_MVPConf0, CPUMIPSMVPContext),
+ VMSTATE_INT32(CP0_MVPConf1, CPUMIPSMVPContext),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/* TLB state */
+
+static int get_tlb(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ r4k_tlb_t *v = pv;
+ uint16_t flags;
+
+ qemu_get_betls(f, &v->VPN);
+ qemu_get_be32s(f, &v->PageMask);
+ qemu_get_be16s(f, &v->ASID);
+ qemu_get_be32s(f, &v->MMID);
+ qemu_get_be16s(f, &flags);
+ v->G = (flags >> 10) & 1;
+ v->C0 = (flags >> 7) & 3;
+ v->C1 = (flags >> 4) & 3;
+ v->V0 = (flags >> 3) & 1;
+ v->V1 = (flags >> 2) & 1;
+ v->D0 = (flags >> 1) & 1;
+ v->D1 = (flags >> 0) & 1;
+ v->EHINV = (flags >> 15) & 1;
+ v->RI1 = (flags >> 14) & 1;
+ v->RI0 = (flags >> 13) & 1;
+ v->XI1 = (flags >> 12) & 1;
+ v->XI0 = (flags >> 11) & 1;
+ qemu_get_be64s(f, &v->PFN[0]);
+ qemu_get_be64s(f, &v->PFN[1]);
+
+ return 0;
+}
+
+static int put_tlb(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ r4k_tlb_t *v = pv;
+
+ uint16_t asid = v->ASID;
+ uint32_t mmid = v->MMID;
+ uint16_t flags = ((v->EHINV << 15) |
+ (v->RI1 << 14) |
+ (v->RI0 << 13) |
+ (v->XI1 << 12) |
+ (v->XI0 << 11) |
+ (v->G << 10) |
+ (v->C0 << 7) |
+ (v->C1 << 4) |
+ (v->V0 << 3) |
+ (v->V1 << 2) |
+ (v->D0 << 1) |
+ (v->D1 << 0));
+
+ qemu_put_betls(f, &v->VPN);
+ qemu_put_be32s(f, &v->PageMask);
+ qemu_put_be16s(f, &asid);
+ qemu_put_be32s(f, &mmid);
+ qemu_put_be16s(f, &flags);
+ qemu_put_be64s(f, &v->PFN[0]);
+ qemu_put_be64s(f, &v->PFN[1]);
+
+ return 0;
+}
+
+static const VMStateInfo vmstate_info_tlb = {
+ .name = "tlb_entry",
+ .get = get_tlb,
+ .put = put_tlb,
+};
+
+#define VMSTATE_TLB_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_tlb, r4k_tlb_t)
+
+#define VMSTATE_TLB_ARRAY(_f, _s, _n) \
+ VMSTATE_TLB_ARRAY_V(_f, _s, _n, 0)
+
+static const VMStateDescription vmstate_tlb = {
+ .name = "cpu/tlb",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32(nb_tlb, CPUMIPSTLBContext),
+ VMSTATE_UINT32(tlb_in_use, CPUMIPSTLBContext),
+ VMSTATE_TLB_ARRAY(mmu.r4k.tlb, CPUMIPSTLBContext, MIPS_TLB_MAX),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/* MIPS CPU state */
+
+const VMStateDescription vmstate_mips_cpu = {
+ .name = "cpu",
+ .version_id = 21,
+ .minimum_version_id = 21,
+ .post_load = cpu_post_load,
+ .fields = (const VMStateField[]) {
+ /* Active TC */
+ VMSTATE_STRUCT(env.active_tc, MIPSCPU, 1, vmstate_tc, TCState),
+
+ /* Active FPU */
+ VMSTATE_STRUCT(env.active_fpu, MIPSCPU, 1, vmstate_fpu,
+ CPUMIPSFPUContext),
+
+ /* MVP */
+ VMSTATE_STRUCT_POINTER(env.mvp, MIPSCPU, vmstate_mvp,
+ CPUMIPSMVPContext),
+
+ /* TLB */
+ VMSTATE_STRUCT_POINTER(env.tlb, MIPSCPU, vmstate_tlb,
+ CPUMIPSTLBContext),
+
+ /* CPU metastate */
+ VMSTATE_UINT32(env.current_tc, MIPSCPU),
+ VMSTATE_UNUSED(sizeof(uint32_t)), /* was current_fpu */
+ VMSTATE_INT32(env.error_code, MIPSCPU),
+ VMSTATE_UINTTL(env.btarget, MIPSCPU),
+ VMSTATE_UINTTL(env.bcond, MIPSCPU),
+
+ /* Remaining CP0 registers */
+ VMSTATE_INT32(env.CP0_Index, MIPSCPU),
+ VMSTATE_INT32(env.CP0_VPControl, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Random, MIPSCPU),
+ VMSTATE_INT32(env.CP0_VPEControl, MIPSCPU),
+ VMSTATE_INT32(env.CP0_VPEConf0, MIPSCPU),
+ VMSTATE_INT32(env.CP0_VPEConf1, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_YQMask, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_VPESchedule, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_VPEScheFBack, MIPSCPU),
+ VMSTATE_INT32(env.CP0_VPEOpt, MIPSCPU),
+ VMSTATE_UINT64(env.CP0_EntryLo0, MIPSCPU),
+ VMSTATE_UINT64(env.CP0_EntryLo1, MIPSCPU),
+ VMSTATE_INT32(env.CP0_GlobalNumber, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_Context, MIPSCPU),
+ VMSTATE_INT32(env.CP0_MemoryMapID, MIPSCPU),
+ VMSTATE_INT32(env.CP0_PageMask, MIPSCPU),
+ VMSTATE_INT32(env.CP0_PageGrain, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_SegCtl0, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_SegCtl1, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_SegCtl2, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_PWBase, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_PWField, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_PWSize, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Wired, MIPSCPU),
+ VMSTATE_INT32(env.CP0_PWCtl, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf0, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf1, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf2, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf3, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf4, MIPSCPU),
+ VMSTATE_INT32(env.CP0_HWREna, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_BadVAddr, MIPSCPU),
+ VMSTATE_UINT32(env.CP0_BadInstr, MIPSCPU),
+ VMSTATE_UINT32(env.CP0_BadInstrP, MIPSCPU),
+ VMSTATE_UINT32(env.CP0_BadInstrX, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Count, MIPSCPU),
+ VMSTATE_UNUSED(sizeof(uint32_t)), /* was CP0_SAARI */
+ VMSTATE_UNUSED(2 * sizeof(uint64_t)), /* was CP0_SAAR[2] */
+ VMSTATE_UINTTL(env.CP0_EntryHi, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Compare, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Status, MIPSCPU),
+ VMSTATE_INT32(env.CP0_IntCtl, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSCtl, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSMap, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Cause, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_EPC, MIPSCPU),
+ VMSTATE_INT32(env.CP0_PRid, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_EBase, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_CMGCRBase, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config0, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config1, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config2, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config3, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config4, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config5, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config6, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config7, MIPSCPU),
+ VMSTATE_UINT64(env.CP0_LLAddr, MIPSCPU),
+ VMSTATE_UINT64_ARRAY(env.CP0_MAAR, MIPSCPU, MIPS_MAAR_MAX),
+ VMSTATE_INT32(env.CP0_MAARI, MIPSCPU),
+ VMSTATE_UINTTL(env.lladdr, MIPSCPU),
+ VMSTATE_UINTTL_ARRAY(env.CP0_WatchLo, MIPSCPU, 8),
+ VMSTATE_UINT64_ARRAY(env.CP0_WatchHi, MIPSCPU, 8),
+ VMSTATE_UINTTL(env.CP0_XContext, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Framemask, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Debug, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_DEPC, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Performance0, MIPSCPU),
+ VMSTATE_INT32(env.CP0_ErrCtl, MIPSCPU),
+ VMSTATE_UINT64(env.CP0_TagLo, MIPSCPU),
+ VMSTATE_INT32(env.CP0_DataLo, MIPSCPU),
+ VMSTATE_INT32(env.CP0_TagHi, MIPSCPU),
+ VMSTATE_INT32(env.CP0_DataHi, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_ErrorEPC, MIPSCPU),
+ VMSTATE_INT32(env.CP0_DESAVE, MIPSCPU),
+ VMSTATE_UINTTL_ARRAY(env.CP0_KScratch, MIPSCPU, MIPS_KSCRATCH_NUM),
+
+ /* Inactive TC */
+ VMSTATE_STRUCT_ARRAY(env.tcs, MIPSCPU, MIPS_SHADOW_SET_MAX, 1,
+ vmstate_inactive_tc, TCState),
+ VMSTATE_STRUCT_ARRAY(env.fpus, MIPSCPU, MIPS_FPU_MAX, 1,
+ vmstate_inactive_fpu, CPUMIPSFPUContext),
+
+ VMSTATE_END_OF_LIST()
+ },
+};
diff --git a/target/mips/sysemu/meson.build b/target/mips/system/meson.build
index 498cf28..498cf28 100644
--- a/target/mips/sysemu/meson.build
+++ b/target/mips/system/meson.build
diff --git a/target/mips/system/mips-qmp-cmds.c b/target/mips/system/mips-qmp-cmds.c
new file mode 100644
index 0000000..d98d662
--- /dev/null
+++ b/target/mips/system/mips-qmp-cmds.c
@@ -0,0 +1,48 @@
+/*
+ * QEMU MIPS CPU (monitor definitions)
+ *
+ * SPDX-FileCopyrightText: 2012 SUSE LINUX Products GmbH
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-machine.h"
+#include "cpu.h"
+
+CpuModelExpansionInfo *
+qmp_query_cpu_model_expansion(CpuModelExpansionType type,
+ CpuModelInfo *model,
+ Error **errp)
+{
+ error_setg(errp, "CPU model expansion is not supported on this target");
+ return NULL;
+}
+
+static void mips_cpu_add_definition(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CpuDefinitionInfoList **cpu_list = user_data;
+ CpuDefinitionInfo *info;
+ const char *typename;
+
+ typename = object_class_get_name(oc);
+ info = g_malloc0(sizeof(*info));
+ info->name = cpu_model_from_type(typename);
+ info->q_typename = g_strdup(typename);
+
+ QAPI_LIST_PREPEND(*cpu_list, info);
+}
+
+CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
+{
+ CpuDefinitionInfoList *cpu_list = NULL;
+ GSList *list;
+
+ list = object_class_get_list(TYPE_MIPS_CPU, false);
+ g_slist_foreach(list, mips_cpu_add_definition, &cpu_list);
+ g_slist_free(list);
+
+ return cpu_list;
+}
diff --git a/target/mips/system/physaddr.c b/target/mips/system/physaddr.c
new file mode 100644
index 0000000..b8e1a5a
--- /dev/null
+++ b/target/mips/system/physaddr.c
@@ -0,0 +1,242 @@
+/*
+ * MIPS TLB (Translation lookaside buffer) helpers.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/page-protection.h"
+#include "../internal.h"
+
+static int is_seg_am_mapped(unsigned int am, bool eu, int mmu_idx)
+{
+ /*
+ * Interpret access control mode and mmu_idx.
+ * AdE? TLB?
+ * AM K S U E K S U E
+ * UK 0 0 1 1 0 0 - - 0
+ * MK 1 0 1 1 0 1 - - !eu
+ * MSK 2 0 0 1 0 1 1 - !eu
+ * MUSK 3 0 0 0 0 1 1 1 !eu
+ * MUSUK 4 0 0 0 0 0 1 1 0
+ * USK 5 0 0 1 0 0 0 - 0
+ * - 6 - - - - - - - -
+ * UUSK 7 0 0 0 0 0 0 0 0
+ */
+ int32_t adetlb_mask;
+
+ switch (mmu_idx) {
+ case 3: /* ERL */
+ /* If EU is set, always unmapped */
+ if (eu) {
+ return 0;
+ }
+ /* fall through */
+ case MIPS_HFLAG_KM:
+ /* Never AdE, TLB mapped if AM={1,2,3} */
+ adetlb_mask = 0x70000000;
+ goto check_tlb;
+
+ case MIPS_HFLAG_SM:
+ /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */
+ adetlb_mask = 0xc0380000;
+ goto check_ade;
+
+ case MIPS_HFLAG_UM:
+ /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */
+ adetlb_mask = 0xe4180000;
+ /* fall through */
+ check_ade:
+ /* does this AM cause AdE in current execution mode */
+ if ((adetlb_mask << am) < 0) {
+ return TLBRET_BADADDR;
+ }
+ adetlb_mask <<= 8;
+ /* fall through */
+ check_tlb:
+ /* is this AM mapped in current execution mode */
+ return ((adetlb_mask << am) < 0);
+ default:
+ g_assert_not_reached();
+ };
+}
+
+static int get_seg_physical_address(CPUMIPSState *env, hwaddr *physical,
+ int *prot, target_ulong real_address,
+ MMUAccessType access_type, int mmu_idx,
+ unsigned int am, bool eu,
+ target_ulong segmask,
+ hwaddr physical_base)
+{
+ int mapped = is_seg_am_mapped(am, eu, mmu_idx);
+
+ if (mapped < 0) {
+ /* is_seg_am_mapped can report TLBRET_BADADDR */
+ return mapped;
+ } else if (mapped) {
+ /* The segment is TLB mapped */
+ return env->tlb->map_address(env, physical, prot, real_address,
+ access_type);
+ } else {
+ /* The segment is unmapped */
+ *physical = physical_base | (real_address & segmask);
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+ }
+}
+
+static int get_segctl_physical_address(CPUMIPSState *env, hwaddr *physical,
+ int *prot, target_ulong real_address,
+ MMUAccessType access_type, int mmu_idx,
+ uint16_t segctl, target_ulong segmask)
+{
+ unsigned int am = (segctl & CP0SC_AM_MASK) >> CP0SC_AM;
+ bool eu = (segctl >> CP0SC_EU) & 1;
+ hwaddr pa = ((hwaddr)segctl & CP0SC_PA_MASK) << 20;
+
+ return get_seg_physical_address(env, physical, prot, real_address,
+ access_type, mmu_idx, am, eu, segmask,
+ pa & ~(hwaddr)segmask);
+}
+
+int get_physical_address(CPUMIPSState *env, hwaddr *physical,
+ int *prot, target_ulong real_address,
+ MMUAccessType access_type, int mmu_idx)
+{
+ /* User mode can only access useg/xuseg */
+#if defined(TARGET_MIPS64)
+ int user_mode = mmu_idx == MIPS_HFLAG_UM;
+ int supervisor_mode = mmu_idx == MIPS_HFLAG_SM;
+ int kernel_mode = !user_mode && !supervisor_mode;
+ int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
+ int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
+ int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
+#endif
+ int ret = TLBRET_MATCH;
+ /* effective address (modified for KVM T&E kernel segments) */
+ target_ulong address = real_address;
+
+ if (address <= USEG_LIMIT) {
+ /* useg */
+ uint16_t segctl;
+
+ if (address >= 0x40000000UL) {
+ segctl = env->CP0_SegCtl2;
+ } else {
+ segctl = env->CP0_SegCtl2 >> 16;
+ }
+ ret = get_segctl_physical_address(env, physical, prot,
+ real_address, access_type,
+ mmu_idx, segctl, 0x3FFFFFFF);
+#if defined(TARGET_MIPS64)
+ } else if (address < 0x4000000000000000ULL) {
+ /* xuseg */
+ if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) {
+ ret = env->tlb->map_address(env, physical, prot,
+ real_address, access_type);
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ } else if (address < 0x8000000000000000ULL) {
+ /* xsseg */
+ if ((supervisor_mode || kernel_mode) &&
+ SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) {
+ ret = env->tlb->map_address(env, physical, prot,
+ real_address, access_type);
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ } else if (address < 0xC000000000000000ULL) {
+ /* xkphys */
+ if ((address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) {
+ /* KX/SX/UX bit to check for each xkphys EVA access mode */
+ static const uint8_t am_ksux[8] = {
+ [CP0SC_AM_UK] = (1u << CP0St_KX),
+ [CP0SC_AM_MK] = (1u << CP0St_KX),
+ [CP0SC_AM_MSK] = (1u << CP0St_SX),
+ [CP0SC_AM_MUSK] = (1u << CP0St_UX),
+ [CP0SC_AM_MUSUK] = (1u << CP0St_UX),
+ [CP0SC_AM_USK] = (1u << CP0St_SX),
+ [6] = (1u << CP0St_KX),
+ [CP0SC_AM_UUSK] = (1u << CP0St_UX),
+ };
+ unsigned int am = CP0SC_AM_UK;
+ unsigned int xr = (env->CP0_SegCtl2 & CP0SC2_XR_MASK) >> CP0SC2_XR;
+
+ if (xr & (1 << ((address >> 59) & 0x7))) {
+ am = (env->CP0_SegCtl1 & CP0SC1_XAM_MASK) >> CP0SC1_XAM;
+ }
+ /* Does CP0_Status.KX/SX/UX permit the access mode (am) */
+ if (env->CP0_Status & am_ksux[am]) {
+ ret = get_seg_physical_address(env, physical, prot,
+ real_address, access_type,
+ mmu_idx, am, false, env->PAMask,
+ 0);
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ } else if (address < 0xFFFFFFFF80000000ULL) {
+ /* xkseg */
+ if (kernel_mode && KX &&
+ address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) {
+ ret = env->tlb->map_address(env, physical, prot,
+ real_address, access_type);
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+#endif
+ } else if (address < KSEG1_BASE) {
+ /* kseg0 */
+ ret = get_segctl_physical_address(env, physical, prot, real_address,
+ access_type, mmu_idx,
+ env->CP0_SegCtl1 >> 16, 0x1FFFFFFF);
+ } else if (address < KSEG2_BASE) {
+ /* kseg1 */
+ ret = get_segctl_physical_address(env, physical, prot, real_address,
+ access_type, mmu_idx,
+ env->CP0_SegCtl1, 0x1FFFFFFF);
+ } else if (address < KSEG3_BASE) {
+ /* sseg (kseg2) */
+ ret = get_segctl_physical_address(env, physical, prot, real_address,
+ access_type, mmu_idx,
+ env->CP0_SegCtl0 >> 16, 0x1FFFFFFF);
+ } else {
+ /*
+ * kseg3
+ * XXX: debug segment is not emulated
+ */
+ ret = get_segctl_physical_address(env, physical, prot, real_address,
+ access_type, mmu_idx,
+ env->CP0_SegCtl0, 0x1FFFFFFF);
+ }
+ return ret;
+}
+
+hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ CPUMIPSState *env = cpu_env(cs);
+ hwaddr phys_addr;
+ int prot;
+
+ if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD,
+ mips_env_mmu_index(env)) != 0) {
+ return -1;
+ }
+ return phys_addr;
+}
diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c
index 4886d08..d32bceb 100644
--- a/target/mips/tcg/exception.c
+++ b/target/mips/tcg/exception.c
@@ -23,7 +23,7 @@
#include "cpu.h"
#include "internal.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
+#include "exec/translation-block.h"
target_ulong exception_resume_pc(CPUMIPSState *env)
{
diff --git a/target/mips/tcg/fpu_helper.c b/target/mips/tcg/fpu_helper.c
index 45d593d..36af980 100644
--- a/target/mips/tcg/fpu_helper.c
+++ b/target/mips/tcg/fpu_helper.c
@@ -24,7 +24,6 @@
#include "cpu.h"
#include "internal.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "fpu/softfloat.h"
#include "fpu_helper.h"
diff --git a/target/mips/tcg/godson2.decode b/target/mips/tcg/godson2.decode
new file mode 100644
index 0000000..25b396b
--- /dev/null
+++ b/target/mips/tcg/godson2.decode
@@ -0,0 +1,27 @@
+# Godson2 64-bit Integer instructions
+#
+# Copyright (C) 2021 Philippe Mathieu-DaudƩ
+#
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Reference:
+# Godson-2E Software Manual
+# (Document Number: godson2e-user-manual-V0.6)
+#
+
+&muldiv rs rt rd
+
+@rs_rt_rd ...... rs:5 rt:5 rd:5 ..... ...... &muldiv
+
+MULTu_G 011111 ..... ..... ..... 00000 01100- @rs_rt_rd
+DMULTu_G 011111 ..... ..... ..... 00000 01110- @rs_rt_rd
+
+DIV_G 011111 ..... ..... ..... 00000 011010 @rs_rt_rd
+DIVU_G 011111 ..... ..... ..... 00000 011011 @rs_rt_rd
+DDIV_G 011111 ..... ..... ..... 00000 011110 @rs_rt_rd
+DDIVU_G 011111 ..... ..... ..... 00000 011111 @rs_rt_rd
+
+MOD_G 011111 ..... ..... ..... 00000 100010 @rs_rt_rd
+MODU_G 011111 ..... ..... ..... 00000 100011 @rs_rt_rd
+DMOD_G 011111 ..... ..... ..... 00000 100110 @rs_rt_rd
+DMODU_G 011111 ..... ..... ..... 00000 100111 @rs_rt_rd
diff --git a/target/mips/tcg/ldst_helper.c b/target/mips/tcg/ldst_helper.c
index 97056d0..10319bf 100644
--- a/target/mips/tcg/ldst_helper.c
+++ b/target/mips/tcg/ldst_helper.c
@@ -23,8 +23,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "exec/memop.h"
#include "internal.h"
@@ -53,11 +52,6 @@ HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong))
#endif /* !CONFIG_USER_ONLY */
-static inline bool cpu_is_bigendian(CPUMIPSState *env)
-{
- return extract32(env->CP0_Config0, CP0C0_BE, 1);
-}
-
static inline target_ulong get_lmask(CPUMIPSState *env,
target_ulong value, unsigned bits)
{
@@ -65,7 +59,7 @@ static inline target_ulong get_lmask(CPUMIPSState *env,
value &= mask;
- if (!cpu_is_bigendian(env)) {
+ if (!mips_env_is_bigendian(env)) {
value ^= mask;
}
@@ -76,7 +70,7 @@ void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
int mem_idx)
{
target_ulong lmask = get_lmask(env, arg2, 32);
- int dir = cpu_is_bigendian(env) ? 1 : -1;
+ int dir = mips_env_is_bigendian(env) ? 1 : -1;
cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
@@ -100,7 +94,7 @@ void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
int mem_idx)
{
target_ulong lmask = get_lmask(env, arg2, 32);
- int dir = cpu_is_bigendian(env) ? 1 : -1;
+ int dir = mips_env_is_bigendian(env) ? 1 : -1;
cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
@@ -130,7 +124,7 @@ void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
int mem_idx)
{
target_ulong lmask = get_lmask(env, arg2, 64);
- int dir = cpu_is_bigendian(env) ? 1 : -1;
+ int dir = mips_env_is_bigendian(env) ? 1 : -1;
cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
@@ -174,7 +168,7 @@ void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
int mem_idx)
{
target_ulong lmask = get_lmask(env, arg2, 64);
- int dir = cpu_is_bigendian(env) ? 1 : -1;
+ int dir = mips_env_is_bigendian(env) ? 1 : -1;
cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
diff --git a/target/mips/tcg/loong-ext.decode b/target/mips/tcg/loong-ext.decode
new file mode 100644
index 0000000..b43979d
--- /dev/null
+++ b/target/mips/tcg/loong-ext.decode
@@ -0,0 +1,28 @@
+# Loongson 64-bit Extension instructions
+#
+# Copyright (C) 2021 Philippe Mathieu-DaudƩ
+#
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Reference:
+# STLS2F01 User Manual
+# Appendix A: new integer instructions
+# (Document Number: UM0447)
+#
+
+&muldiv rs rt rd !extern
+
+@rs_rt_rd ...... rs:5 rt:5 rd:5 ..... ...... &muldiv
+
+MULTu_G 011100 ..... ..... ..... 00000 0100-0 @rs_rt_rd
+DMULTu_G 011100 ..... ..... ..... 00000 0100-1 @rs_rt_rd
+
+DIV_G 011100 ..... ..... ..... 00000 010100 @rs_rt_rd
+DDIV_G 011100 ..... ..... ..... 00000 010101 @rs_rt_rd
+DIVU_G 011100 ..... ..... ..... 00000 010110 @rs_rt_rd
+DDIVU_G 011100 ..... ..... ..... 00000 010111 @rs_rt_rd
+
+MOD_G 011100 ..... ..... ..... 00000 011100 @rs_rt_rd
+DMOD_G 011100 ..... ..... ..... 00000 011101 @rs_rt_rd
+MODU_G 011100 ..... ..... ..... 00000 011110 @rs_rt_rd
+DMODU_G 011100 ..... ..... ..... 00000 011111 @rs_rt_rd
diff --git a/target/mips/tcg/loong_translate.c b/target/mips/tcg/loong_translate.c
new file mode 100644
index 0000000..7d74cc3
--- /dev/null
+++ b/target/mips/tcg/loong_translate.c
@@ -0,0 +1,271 @@
+/*
+ * MIPS Loongson 64-bit translation routines
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2006 Marius Groeger (FPU operations)
+ * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
+ * Copyright (c) 2011 Richard Henderson <rth@twiddle.net>
+ * Copyright (c) 2021 Philippe Mathieu-DaudƩ
+ *
+ * This code is licensed under the GNU GPLv2 and later.
+ */
+
+#include "qemu/osdep.h"
+#include "translate.h"
+
+/* Include the auto-generated decoder. */
+#include "decode-godson2.c.inc"
+#include "decode-loong-ext.c.inc"
+
+/*
+ * Word or double-word Fixed-point instructions.
+ * ---------------------------------------------
+ *
+ * Fixed-point multiplies and divisions write only
+ * one result into general-purpose registers.
+ */
+
+static bool gen_lext_DIV_G(DisasContext *s, int rd, int rs, int rt,
+ bool is_double)
+{
+ TCGv t0, t1;
+ TCGLabel *l1, *l2, *l3;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return true;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ l3 = gen_new_label();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ if (!is_double) {
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ }
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l3);
+ gen_set_label(l1);
+
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, is_double ? LLONG_MIN : INT_MIN, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2);
+ tcg_gen_mov_tl(cpu_gpr[rd], t0);
+
+ tcg_gen_br(l3);
+ gen_set_label(l2);
+ tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
+ if (!is_double) {
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ }
+ gen_set_label(l3);
+
+ return true;
+}
+
+static bool trans_DIV_G(DisasContext *s, arg_muldiv *a)
+{
+ return gen_lext_DIV_G(s, a->rd, a->rs, a->rt, false);
+}
+
+static bool trans_DDIV_G(DisasContext *s, arg_muldiv *a)
+{
+ return gen_lext_DIV_G(s, a->rd, a->rs, a->rt, true);
+}
+
+static bool gen_lext_DIVU_G(DisasContext *s, int rd, int rs, int rt,
+ bool is_double)
+{
+ TCGv t0, t1;
+ TCGLabel *l1, *l2;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return true;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ if (!is_double) {
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ }
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
+ if (!is_double) {
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ }
+ gen_set_label(l2);
+
+ return true;
+}
+
+static bool trans_DIVU_G(DisasContext *s, arg_muldiv *a)
+{
+ return gen_lext_DIVU_G(s, a->rd, a->rs, a->rt, false);
+}
+
+static bool trans_DDIVU_G(DisasContext *s, arg_muldiv *a)
+{
+ return gen_lext_DIVU_G(s, a->rd, a->rs, a->rt, true);
+}
+
+static bool gen_lext_MOD_G(DisasContext *s, int rd, int rs, int rt,
+ bool is_double)
+{
+ TCGv t0, t1;
+ TCGLabel *l1, *l2, *l3;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return true;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ l3 = gen_new_label();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ if (!is_double) {
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ }
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, is_double ? LLONG_MIN : INT_MIN, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l3);
+ gen_set_label(l2);
+ tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
+ if (!is_double) {
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ }
+ gen_set_label(l3);
+
+ return true;
+}
+
+static bool trans_MOD_G(DisasContext *s, arg_muldiv *a)
+{
+ return gen_lext_MOD_G(s, a->rd, a->rs, a->rt, false);
+}
+
+static bool trans_DMOD_G(DisasContext *s, arg_muldiv *a)
+{
+ return gen_lext_MOD_G(s, a->rd, a->rs, a->rt, true);
+}
+
+static bool gen_lext_MODU_G(DisasContext *s, int rd, int rs, int rt,
+ bool is_double)
+{
+ TCGv t0, t1;
+ TCGLabel *l1, *l2;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return true;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ if (!is_double) {
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ }
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
+ if (!is_double) {
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ }
+ gen_set_label(l2);
+
+ return true;
+}
+
+static bool trans_MODU_G(DisasContext *s, arg_muldiv *a)
+{
+ return gen_lext_MODU_G(s, a->rd, a->rs, a->rt, false);
+}
+
+static bool trans_DMODU_G(DisasContext *s, arg_muldiv *a)
+{
+ return gen_lext_MODU_G(s, a->rd, a->rs, a->rt, true);
+}
+
+static bool gen_lext_MULT_G(DisasContext *s, int rd, int rs, int rt,
+ bool is_double)
+{
+ TCGv t0, t1;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return true;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ tcg_gen_mul_tl(cpu_gpr[rd], t0, t1);
+ if (!is_double) {
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ }
+
+ return true;
+}
+
+static bool trans_MULTu_G(DisasContext *s, arg_muldiv *a)
+{
+ return gen_lext_MULT_G(s, a->rd, a->rs, a->rt, false);
+}
+
+static bool trans_DMULTu_G(DisasContext *s, arg_muldiv *a)
+{
+ return gen_lext_MULT_G(s, a->rd, a->rs, a->rt, true);
+}
+
+bool decode_ext_loongson(DisasContext *ctx, uint32_t insn)
+{
+ if (!decode_64bit_enabled(ctx)) {
+ return false;
+ }
+ if ((ctx->insn_flags & INSN_LOONGSON2E) && decode_godson2(ctx, ctx->opcode)) {
+ return true;
+ }
+ if ((ctx->insn_flags & ASE_LEXT) && decode_loong_ext(ctx, ctx->opcode)) {
+ return true;
+ }
+ return false;
+}
diff --git a/target/mips/tcg/meson.build b/target/mips/tcg/meson.build
index ea7fb58..fff9cd6 100644
--- a/target/mips/tcg/meson.build
+++ b/target/mips/tcg/meson.build
@@ -5,6 +5,8 @@ gen = [
decodetree.process('vr54xx.decode', extra_args: '--decode=decode_ext_vr54xx'),
decodetree.process('octeon.decode', extra_args: '--decode=decode_ext_octeon'),
decodetree.process('lcsr.decode', extra_args: '--decode=decode_ase_lcsr'),
+ decodetree.process('godson2.decode', extra_args: ['--static-decode=decode_godson2']),
+ decodetree.process('loong-ext.decode', extra_args: ['--static-decode=decode_loong_ext']),
]
mips_ss.add(gen)
@@ -28,10 +30,11 @@ mips_ss.add(when: 'TARGET_MIPS64', if_true: files(
'tx79_translate.c',
'octeon_translate.c',
'lcsr_translate.c',
+ 'loong_translate.c',
), if_false: files(
'mxu_translate.c',
))
if have_system
- subdir('sysemu')
+ subdir('system')
endif
diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc
index 7510831..c479bec 100644
--- a/target/mips/tcg/micromips_translate.c.inc
+++ b/target/mips/tcg/micromips_translate.c.inc
@@ -977,23 +977,21 @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd,
gen_reserved_instruction(ctx);
return;
}
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL |
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL |
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, rd);
- tcg_gen_movi_tl(t1, 4);
- gen_op_addr_add(ctx, t0, t0, t1);
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL |
+ gen_op_addr_addi(ctx, t0, t0, 4);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL |
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, rd + 1);
break;
case SWP:
gen_load_gpr(t1, rd);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
- tcg_gen_movi_tl(t1, 4);
- gen_op_addr_add(ctx, t0, t0, t1);
+ gen_op_addr_addi(ctx, t0, t0, 4);
gen_load_gpr(t1, rd + 1);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
break;
#ifdef TARGET_MIPS64
@@ -1002,23 +1000,21 @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd,
gen_reserved_instruction(ctx);
return;
}
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, rd);
- tcg_gen_movi_tl(t1, 8);
- gen_op_addr_add(ctx, t0, t0, t1);
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
+ gen_op_addr_addi(ctx, t0, t0, 8);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, rd + 1);
break;
case SDP:
gen_load_gpr(t1, rd);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
- tcg_gen_movi_tl(t1, 8);
- gen_op_addr_add(ctx, t0, t0, t1);
+ gen_op_addr_addi(ctx, t0, t0, 8);
gen_load_gpr(t1, rd + 1);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
break;
#endif
@@ -2488,7 +2484,10 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
mips32_op = OPC_BC1TANY4;
do_cp1mips3d:
check_cop1x(ctx);
- check_insn(ctx, ASE_MIPS3D);
+ if (!ase_3d_available(env)) {
+ gen_reserved_instruction(ctx);
+ break;
+ }
/* Fall through */
do_cp1branch:
if (env->CP0_Config1 & (1 << CP0C1_FP)) {
@@ -2572,13 +2571,13 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
gen_st(ctx, mips32_op, rt, rs, offset);
break;
case SC:
- gen_st_cond(ctx, rt, rs, offset, MO_TESL, false);
+ gen_st_cond(ctx, rt, rs, offset, mo_endian(ctx) | MO_SL, false);
break;
#if defined(TARGET_MIPS64)
case SCD:
check_insn(ctx, ISA_MIPS3);
check_mips_64(ctx);
- gen_st_cond(ctx, rt, rs, offset, MO_TEUQ, false);
+ gen_st_cond(ctx, rt, rs, offset, mo_endian(ctx) | MO_UQ, false);
break;
#endif
case LD_EVA:
@@ -2659,7 +2658,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
mips32_op = OPC_SHE;
goto do_st_lr;
case SCE:
- gen_st_cond(ctx, rt, rs, offset, MO_TESL, true);
+ gen_st_cond(ctx, rt, rs, offset, mo_endian(ctx) | MO_SL, true);
break;
case SWE:
mips32_op = OPC_SWE;
diff --git a/target/mips/tcg/mips16e_translate.c.inc b/target/mips/tcg/mips16e_translate.c.inc
index 5cffe0e..97da345 100644
--- a/target/mips/tcg/mips16e_translate.c.inc
+++ b/target/mips/tcg/mips16e_translate.c.inc
@@ -122,11 +122,21 @@ enum {
static int xlat(int r)
{
- static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+ static const int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
return map[r];
}
+static void decr_and_store(DisasContext *ctx, unsigned regidx, TCGv t0)
+{
+ TCGv t1 = tcg_temp_new();
+
+ gen_op_addr_addi(ctx, t0, t0, -4);
+ gen_load_gpr(t1, regidx);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
+ ctx->default_tcg_memop_mask);
+}
+
static void gen_mips16_save(DisasContext *ctx,
int xsregs, int aregs,
int do_ra, int do_s0, int do_s1,
@@ -134,7 +144,6 @@ static void gen_mips16_save(DisasContext *ctx,
{
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_temp_new();
int args, astatic;
switch (aregs) {
@@ -172,70 +181,62 @@ static void gen_mips16_save(DisasContext *ctx,
case 4:
gen_base_offset_addr(ctx, t0, 29, 12);
gen_load_gpr(t1, 7);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
/* Fall through */
case 3:
gen_base_offset_addr(ctx, t0, 29, 8);
gen_load_gpr(t1, 6);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
/* Fall through */
case 2:
gen_base_offset_addr(ctx, t0, 29, 4);
gen_load_gpr(t1, 5);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
/* Fall through */
case 1:
gen_base_offset_addr(ctx, t0, 29, 0);
gen_load_gpr(t1, 4);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
}
gen_load_gpr(t0, 29);
-#define DECR_AND_STORE(reg) do { \
- tcg_gen_movi_tl(t2, -4); \
- gen_op_addr_add(ctx, t0, t0, t2); \
- gen_load_gpr(t1, reg); \
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | \
- ctx->default_tcg_memop_mask); \
- } while (0)
-
if (do_ra) {
- DECR_AND_STORE(31);
+ decr_and_store(ctx, 31, t0);
}
switch (xsregs) {
case 7:
- DECR_AND_STORE(30);
+ decr_and_store(ctx, 30, t0);
/* Fall through */
case 6:
- DECR_AND_STORE(23);
+ decr_and_store(ctx, 23, t0);
/* Fall through */
case 5:
- DECR_AND_STORE(22);
+ decr_and_store(ctx, 22, t0);
/* Fall through */
case 4:
- DECR_AND_STORE(21);
+ decr_and_store(ctx, 21, t0);
/* Fall through */
case 3:
- DECR_AND_STORE(20);
+ decr_and_store(ctx, 20, t0);
/* Fall through */
case 2:
- DECR_AND_STORE(19);
+ decr_and_store(ctx, 19, t0);
/* Fall through */
case 1:
- DECR_AND_STORE(18);
+ decr_and_store(ctx, 18, t0);
}
if (do_s1) {
- DECR_AND_STORE(17);
+ decr_and_store(ctx, 17, t0);
}
if (do_s0) {
- DECR_AND_STORE(16);
+ decr_and_store(ctx, 16, t0);
}
switch (aregs) {
@@ -270,21 +271,31 @@ static void gen_mips16_save(DisasContext *ctx,
}
if (astatic > 0) {
- DECR_AND_STORE(7);
+ decr_and_store(ctx, 7, t0);
if (astatic > 1) {
- DECR_AND_STORE(6);
+ decr_and_store(ctx, 6, t0);
if (astatic > 2) {
- DECR_AND_STORE(5);
+ decr_and_store(ctx, 5, t0);
if (astatic > 3) {
- DECR_AND_STORE(4);
+ decr_and_store(ctx, 4, t0);
}
}
}
}
-#undef DECR_AND_STORE
- tcg_gen_movi_tl(t2, -framesize);
- gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2);
+ gen_op_addr_addi(ctx, cpu_gpr[29], cpu_gpr[29], -framesize);
+}
+
+static void decr_and_load(DisasContext *ctx, unsigned regidx, TCGv t0)
+{
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ tcg_gen_movi_tl(t2, -4);
+ gen_op_addr_add(ctx, t0, t0, t2);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TE | MO_SL |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t1, regidx);
}
static void gen_mips16_restore(DisasContext *ctx,
@@ -294,52 +305,41 @@ static void gen_mips16_restore(DisasContext *ctx,
{
int astatic;
TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_temp_new();
-
- tcg_gen_movi_tl(t2, framesize);
- gen_op_addr_add(ctx, t0, cpu_gpr[29], t2);
-#define DECR_AND_LOAD(reg) do { \
- tcg_gen_movi_tl(t2, -4); \
- gen_op_addr_add(ctx, t0, t0, t2); \
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | \
- ctx->default_tcg_memop_mask); \
- gen_store_gpr(t1, reg); \
- } while (0)
+ gen_op_addr_addi(ctx, t0, cpu_gpr[29], framesize);
if (do_ra) {
- DECR_AND_LOAD(31);
+ decr_and_load(ctx, 31, t0);
}
switch (xsregs) {
case 7:
- DECR_AND_LOAD(30);
+ decr_and_load(ctx, 30, t0);
/* Fall through */
case 6:
- DECR_AND_LOAD(23);
+ decr_and_load(ctx, 23, t0);
/* Fall through */
case 5:
- DECR_AND_LOAD(22);
+ decr_and_load(ctx, 22, t0);
/* Fall through */
case 4:
- DECR_AND_LOAD(21);
+ decr_and_load(ctx, 21, t0);
/* Fall through */
case 3:
- DECR_AND_LOAD(20);
+ decr_and_load(ctx, 20, t0);
/* Fall through */
case 2:
- DECR_AND_LOAD(19);
+ decr_and_load(ctx, 19, t0);
/* Fall through */
case 1:
- DECR_AND_LOAD(18);
+ decr_and_load(ctx, 18, t0);
}
if (do_s1) {
- DECR_AND_LOAD(17);
+ decr_and_load(ctx, 17, t0);
}
if (do_s0) {
- DECR_AND_LOAD(16);
+ decr_and_load(ctx, 16, t0);
}
switch (aregs) {
@@ -374,21 +374,19 @@ static void gen_mips16_restore(DisasContext *ctx,
}
if (astatic > 0) {
- DECR_AND_LOAD(7);
+ decr_and_load(ctx, 7, t0);
if (astatic > 1) {
- DECR_AND_LOAD(6);
+ decr_and_load(ctx, 6, t0);
if (astatic > 2) {
- DECR_AND_LOAD(5);
+ decr_and_load(ctx, 5, t0);
if (astatic > 3) {
- DECR_AND_LOAD(4);
+ decr_and_load(ctx, 4, t0);
}
}
}
}
-#undef DECR_AND_LOAD
- tcg_gen_movi_tl(t2, framesize);
- gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2);
+ gen_op_addr_addi(ctx, cpu_gpr[29], cpu_gpr[29], framesize);
}
#if defined(TARGET_MIPS64)
diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c
index d218176..f554b3d 100644
--- a/target/mips/tcg/msa_helper.c
+++ b/target/mips/tcg/msa_helper.c
@@ -21,10 +21,11 @@
#include "cpu.h"
#include "internal.h"
#include "tcg/tcg.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/helper-proto.h"
#include "exec/memop.h"
+#include "exec/target_page.h"
#include "fpu/softfloat.h"
#include "fpu_helper.h"
@@ -5577,7 +5578,7 @@ static inline int64_t msa_mulr_q_df(uint32_t df, int64_t arg1, int64_t arg2)
{
int64_t q_min = DF_MIN_INT(df);
int64_t q_max = DF_MAX_INT(df);
- int64_t r_bit = 1 << (DF_BITS(df) - 2);
+ int64_t r_bit = 1LL << (DF_BITS(df) - 2);
if (arg1 == q_min && arg2 == q_min) {
return q_max;
@@ -5685,7 +5686,7 @@ static inline int64_t msa_maddr_q_df(uint32_t df, int64_t dest, int64_t arg1,
int64_t q_max = DF_MAX_INT(df);
int64_t q_min = DF_MIN_INT(df);
- int64_t r_bit = 1 << (DF_BITS(df) - 2);
+ int64_t r_bit = 1LL << (DF_BITS(df) - 2);
q_prod = arg1 * arg2;
q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod + r_bit) >> (DF_BITS(df) - 1);
@@ -5700,7 +5701,7 @@ static inline int64_t msa_msubr_q_df(uint32_t df, int64_t dest, int64_t arg1,
int64_t q_max = DF_MAX_INT(df);
int64_t q_min = DF_MIN_INT(df);
- int64_t r_bit = 1 << (DF_BITS(df) - 2);
+ int64_t r_bit = 1LL << (DF_BITS(df) - 2);
q_prod = arg1 * arg2;
q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod + r_bit) >> (DF_BITS(df) - 1);
@@ -6231,7 +6232,7 @@ static inline int update_msacsr(CPUMIPSState *env, int action, int denormal)
enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED;
/* Set Inexact (I) when flushing inputs to zero */
- if ((ieee_exception_flags & float_flag_input_denormal) &&
+ if ((ieee_exception_flags & float_flag_input_denormal_flushed) &&
(env->active_tc.msacsr & MSACSR_FS_MASK) != 0) {
if (action & CLEAR_IS_INEXACT) {
mips_exception_flags &= ~FP_INEXACT;
@@ -6241,7 +6242,7 @@ static inline int update_msacsr(CPUMIPSState *env, int action, int denormal)
}
/* Set Inexact (I) and Underflow (U) when flushing outputs to zero */
- if ((ieee_exception_flags & float_flag_output_denormal) &&
+ if ((ieee_exception_flags & float_flag_output_denormal_flushed) &&
(env->active_tc.msacsr & MSACSR_FS_MASK) != 0) {
mips_exception_flags |= FP_INEXACT;
if (action & CLEAR_FS_UNDERFLOW) {
@@ -8211,15 +8212,6 @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
/* Element-by-element access macros */
#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
-#if !defined(CONFIG_USER_ONLY)
-#define MEMOP_IDX(DF) \
- MemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
- mips_env_mmu_index(env));
-#else
-#define MEMOP_IDX(DF)
-#endif
-
-#if TARGET_BIG_ENDIAN
static inline uint64_t bswap16x4(uint64_t x)
{
uint64_t m = 0x00ff00ff00ff00ffull;
@@ -8230,7 +8222,6 @@ static inline uint64_t bswap32x2(uint64_t x)
{
return ror64(bswap64(x), 32);
}
-#endif
void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd,
target_ulong addr)
@@ -8259,10 +8250,10 @@ void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
*/
d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
-#if TARGET_BIG_ENDIAN
- d0 = bswap16x4(d0);
- d1 = bswap16x4(d1);
-#endif
+ if (mips_env_is_bigendian(env)) {
+ d0 = bswap16x4(d0);
+ d1 = bswap16x4(d1);
+ }
pwd->d[0] = d0;
pwd->d[1] = d1;
}
@@ -8280,10 +8271,10 @@ void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
*/
d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
-#if TARGET_BIG_ENDIAN
- d0 = bswap32x2(d0);
- d1 = bswap32x2(d1);
-#endif
+ if (mips_env_is_bigendian(env)) {
+ d0 = bswap32x2(d0);
+ d1 = bswap32x2(d1);
+ }
pwd->d[0] = d0;
pwd->d[1] = d1;
}
@@ -8346,10 +8337,10 @@ void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
/* Store 8 bytes at a time. See helper_msa_ld_h. */
d0 = pwd->d[0];
d1 = pwd->d[1];
-#if TARGET_BIG_ENDIAN
- d0 = bswap16x4(d0);
- d1 = bswap16x4(d1);
-#endif
+ if (mips_env_is_bigendian(env)) {
+ d0 = bswap16x4(d0);
+ d1 = bswap16x4(d1);
+ }
cpu_stq_le_data_ra(env, addr + 0, d0, ra);
cpu_stq_le_data_ra(env, addr + 8, d1, ra);
}
@@ -8367,10 +8358,10 @@ void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
/* Store 8 bytes at a time. See helper_msa_ld_w. */
d0 = pwd->d[0];
d1 = pwd->d[1];
-#if TARGET_BIG_ENDIAN
- d0 = bswap32x2(d0);
- d1 = bswap32x2(d1);
-#endif
+ if (mips_env_is_bigendian(env)) {
+ d0 = bswap32x2(d0);
+ d1 = bswap32x2(d1);
+ }
cpu_stq_le_data_ra(env, addr + 0, d0, ra);
cpu_stq_le_data_ra(env, addr + 8, d1, ra);
}
diff --git a/target/mips/tcg/mxu_translate.c b/target/mips/tcg/mxu_translate.c
index c517258..35ebb03 100644
--- a/target/mips/tcg/mxu_translate.c
+++ b/target/mips/tcg/mxu_translate.c
@@ -1533,7 +1533,7 @@ static void gen_mxu_s32ldxx(DisasContext *ctx, bool reversed, bool postinc)
tcg_gen_add_tl(t0, t0, t1);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx,
- (MO_TESL ^ (reversed ? MO_BSWAP : 0)) |
+ MO_SL | mo_endian_rev(ctx, reversed) |
ctx->default_tcg_memop_mask);
gen_store_mxu_gpr(t1, XRa);
@@ -1569,7 +1569,7 @@ static void gen_mxu_s32stxx(DisasContext *ctx, bool reversed, bool postinc)
gen_load_mxu_gpr(t1, XRa);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
- (MO_TESL ^ (reversed ? MO_BSWAP : 0)) |
+ MO_SL | mo_endian_rev(ctx, reversed) |
ctx->default_tcg_memop_mask);
if (postinc) {
@@ -1605,7 +1605,7 @@ static void gen_mxu_s32ldxvx(DisasContext *ctx, bool reversed,
tcg_gen_add_tl(t0, t0, t1);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx,
- (MO_TESL ^ (reversed ? MO_BSWAP : 0)) |
+ MO_SL | mo_endian_rev(ctx, reversed) |
ctx->default_tcg_memop_mask);
gen_store_mxu_gpr(t1, XRa);
@@ -1675,7 +1675,7 @@ static void gen_mxu_s32stxvx(DisasContext *ctx, bool reversed,
gen_load_mxu_gpr(t1, XRa);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
- (MO_TESL ^ (reversed ? MO_BSWAP : 0)) |
+ MO_SL | mo_endian_rev(ctx, reversed) |
ctx->default_tcg_memop_mask);
if (postinc) {
@@ -4803,19 +4803,19 @@ static void decode_opc_mxu__pool17(DisasContext *ctx)
switch (opcode) {
case OPC_MXU_LXW:
- gen_mxu_lxx(ctx, strd2, MO_TE | MO_UL);
+ gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_UL);
break;
case OPC_MXU_LXB:
- gen_mxu_lxx(ctx, strd2, MO_TE | MO_SB);
+ gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_SB);
break;
case OPC_MXU_LXH:
- gen_mxu_lxx(ctx, strd2, MO_TE | MO_SW);
+ gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_SW);
break;
case OPC_MXU_LXBU:
- gen_mxu_lxx(ctx, strd2, MO_TE | MO_UB);
+ gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_UB);
break;
case OPC_MXU_LXHU:
- gen_mxu_lxx(ctx, strd2, MO_TE | MO_UW);
+ gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_UW);
break;
default:
MIPS_INVAL("decode_opc_mxu");
diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc
index b4b746d..1e27414 100644
--- a/target/mips/tcg/nanomips_translate.c.inc
+++ b/target/mips/tcg/nanomips_translate.c.inc
@@ -998,8 +998,9 @@ static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset,
TCGv tmp2 = tcg_temp_new();
gen_base_offset_addr(ctx, taddr, base, offset);
- tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx, MO_TEUQ | MO_ALIGN);
- if (cpu_is_bigendian(ctx)) {
+ tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx,
+ mo_endian(ctx) | MO_UQ | MO_ALIGN);
+ if (disas_is_bigendian(ctx)) {
tcg_gen_extr_i64_tl(tmp2, tmp1, tval);
} else {
tcg_gen_extr_i64_tl(tmp1, tmp2, tval);
@@ -1031,7 +1032,7 @@ static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset,
gen_load_gpr(tmp1, reg1);
gen_load_gpr(tmp2, reg2);
- if (cpu_is_bigendian(ctx)) {
+ if (disas_is_bigendian(ctx)) {
tcg_gen_concat_tl_i64(tval, tmp2, tmp1);
} else {
tcg_gen_concat_tl_i64(tval, tmp1, tmp2);
@@ -1052,8 +1053,7 @@ static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset,
tcg_gen_movi_tl(cpu_gpr[reg1], 0);
}
gen_set_label(lab_done);
- tcg_gen_movi_tl(lladdr, -1);
- tcg_gen_st_tl(lladdr, tcg_env, offsetof(CPUMIPSState, lladdr));
+ tcg_gen_st_tl(tcg_constant_tl(-1), tcg_env, offsetof(CPUMIPSState, lladdr));
}
static void gen_adjust_sp(DisasContext *ctx, int u)
@@ -1075,7 +1075,7 @@ static void gen_save(DisasContext *ctx, uint8_t rt, uint8_t count,
gen_base_offset_addr(ctx, va, 29, this_offset);
gen_load_gpr(t0, this_rt);
tcg_gen_qemu_st_tl(t0, va, ctx->mem_idx,
- (MO_TEUL | ctx->default_tcg_memop_mask));
+ mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask);
counter++;
}
@@ -1095,8 +1095,8 @@ static void gen_restore(DisasContext *ctx, uint8_t rt, uint8_t count,
int this_rt = use_gp ? 28 : (rt & 0x10) | ((rt + counter) & 0x1f);
int this_offset = u - ((counter + 1) << 2);
gen_base_offset_addr(ctx, va, 29, this_offset);
- tcg_gen_qemu_ld_tl(t0, va, ctx->mem_idx, MO_TESL |
- ctx->default_tcg_memop_mask);
+ tcg_gen_qemu_ld_tl(t0, va, ctx->mem_idx,
+ mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask);
tcg_gen_ext32s_tl(t0, t0);
gen_store_gpr(t0, this_rt);
counter++;
@@ -1543,7 +1543,6 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc,
{
int16_t imm;
TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
TCGv v0_t = tcg_temp_new();
gen_load_gpr(v0_t, v1);
@@ -1570,12 +1569,10 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc,
check_dsp(ctx);
switch (extract32(ctx->opcode, 12, 2)) {
case NM_MTHLIP:
- tcg_gen_movi_tl(t0, v2 >> 3);
- gen_helper_mthlip(t0, v0_t, tcg_env);
+ gen_helper_mthlip(tcg_constant_tl(v2 >> 3), v0_t, tcg_env);
break;
case NM_SHILOV:
- tcg_gen_movi_tl(t0, v2 >> 3);
- gen_helper_shilo(t0, v0_t, tcg_env);
+ gen_helper_shilo(tcg_constant_tl(v2 >> 3), v0_t, tcg_env);
break;
default:
gen_reserved_instruction(ctx);
@@ -1587,39 +1584,34 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc,
imm = extract32(ctx->opcode, 14, 7);
switch (extract32(ctx->opcode, 12, 2)) {
case NM_RDDSP:
- tcg_gen_movi_tl(t0, imm);
- gen_helper_rddsp(t0, t0, tcg_env);
+ gen_helper_rddsp(t0, tcg_constant_tl(imm), tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_WRDSP:
gen_load_gpr(t0, ret);
- tcg_gen_movi_tl(t1, imm);
- gen_helper_wrdsp(t0, t1, tcg_env);
+ gen_helper_wrdsp(t0, tcg_constant_tl(imm), tcg_env);
break;
case NM_EXTP:
- tcg_gen_movi_tl(t0, v2 >> 3);
- tcg_gen_movi_tl(t1, v1);
- gen_helper_extp(t0, t0, t1, tcg_env);
+ gen_helper_extp(t0, tcg_constant_tl(v2 >> 3),
+ tcg_constant_tl(v1), tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_EXTPDP:
- tcg_gen_movi_tl(t0, v2 >> 3);
- tcg_gen_movi_tl(t1, v1);
- gen_helper_extpdp(t0, t0, t1, tcg_env);
+ gen_helper_extpdp(t0, tcg_constant_tl(v2 >> 3),
+ tcg_constant_tl(v1), tcg_env);
gen_store_gpr(t0, ret);
break;
}
break;
case NM_POOL32AXF_1_4:
check_dsp(ctx);
- tcg_gen_movi_tl(t0, v2 >> 2);
switch (extract32(ctx->opcode, 12, 1)) {
case NM_SHLL_QB:
- gen_helper_shll_qb(t0, t0, v0_t, tcg_env);
+ gen_helper_shll_qb(t0, tcg_constant_tl(v2 >> 2), v0_t, tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_SHRL_QB:
- gen_helper_shrl_qb(t0, t0, v0_t);
+ gen_helper_shrl_qb(t0, tcg_constant_tl(v2 >> 2), v0_t);
gen_store_gpr(t0, ret);
break;
}
@@ -1630,23 +1622,25 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc,
break;
case NM_POOL32AXF_1_7:
check_dsp(ctx);
- tcg_gen_movi_tl(t0, v2 >> 3);
- tcg_gen_movi_tl(t1, v1);
switch (extract32(ctx->opcode, 12, 2)) {
case NM_EXTR_W:
- gen_helper_extr_w(t0, t0, t1, tcg_env);
+ gen_helper_extr_w(t0, tcg_constant_tl(v2 >> 3),
+ tcg_constant_tl(v1), tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_EXTR_R_W:
- gen_helper_extr_r_w(t0, t0, t1, tcg_env);
+ gen_helper_extr_r_w(t0, tcg_constant_tl(v2 >> 3),
+ tcg_constant_tl(v1), tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_EXTR_RS_W:
- gen_helper_extr_rs_w(t0, t0, t1, tcg_env);
+ gen_helper_extr_rs_w(t0, tcg_constant_tl(v2 >> 3),
+ tcg_constant_tl(v1), tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_EXTR_S_H:
- gen_helper_extr_s_h(t0, t0, t1, tcg_env);
+ gen_helper_extr_s_h(t0, tcg_constant_tl(v2 >> 3),
+ tcg_constant_tl(v1), tcg_env);
gen_store_gpr(t0, ret);
break;
}
@@ -1848,8 +1842,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
case NM_EXTRV_W:
check_dsp(ctx);
gen_load_gpr(v1_t, rs);
- tcg_gen_movi_tl(t0, rd >> 3);
- gen_helper_extr_w(t0, t0, v1_t, tcg_env);
+ gen_helper_extr_w(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
}
@@ -1903,8 +1896,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
break;
case NM_EXTRV_R_W:
check_dsp(ctx);
- tcg_gen_movi_tl(t0, rd >> 3);
- gen_helper_extr_r_w(t0, t0, v1_t, tcg_env);
+ gen_helper_extr_r_w(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
default:
@@ -1923,8 +1915,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
break;
case NM_EXTPV:
check_dsp(ctx);
- tcg_gen_movi_tl(t0, rd >> 3);
- gen_helper_extp(t0, t0, v1_t, tcg_env);
+ gen_helper_extp(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_MSUB:
@@ -1947,8 +1938,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
break;
case NM_EXTRV_RS_W:
check_dsp(ctx);
- tcg_gen_movi_tl(t0, rd >> 3);
- gen_helper_extr_rs_w(t0, t0, v1_t, tcg_env);
+ gen_helper_extr_rs_w(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
}
@@ -1964,8 +1954,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
break;
case NM_EXTPDPV:
check_dsp(ctx);
- tcg_gen_movi_tl(t0, rd >> 3);
- gen_helper_extpdp(t0, t0, v1_t, tcg_env);
+ gen_helper_extpdp(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_MSUBU:
@@ -1990,8 +1979,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
break;
case NM_EXTRV_S_H:
check_dsp(ctx);
- tcg_gen_movi_tl(t0, rd >> 3);
- gen_helper_extr_s_h(t0, t0, v1_t, tcg_env);
+ gen_helper_extr_s_h(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
}
@@ -2149,24 +2137,22 @@ static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc,
switch (opc) {
case NM_SHRA_R_QB:
check_dsp_r2(ctx);
- tcg_gen_movi_tl(t0, rd >> 2);
switch (extract32(ctx->opcode, 12, 1)) {
case 0:
/* NM_SHRA_QB */
- gen_helper_shra_qb(t0, t0, rs_t);
+ gen_helper_shra_qb(t0, tcg_constant_tl(rd >> 2), rs_t);
gen_store_gpr(t0, rt);
break;
case 1:
/* NM_SHRA_R_QB */
- gen_helper_shra_r_qb(t0, t0, rs_t);
+ gen_helper_shra_r_qb(t0, tcg_constant_tl(rd >> 2), rs_t);
gen_store_gpr(t0, rt);
break;
}
break;
case NM_SHRL_PH:
check_dsp_r2(ctx);
- tcg_gen_movi_tl(t0, rd >> 1);
- gen_helper_shrl_ph(t0, t0, rs_t);
+ gen_helper_shrl_ph(t0, tcg_constant_tl(rd >> 1), rs_t);
gen_store_gpr(t0, rt);
break;
case NM_REPL_QB:
@@ -2180,8 +2166,7 @@ static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc,
(uint32_t)imm << 8 |
(uint32_t)imm;
result = (int32_t)result;
- tcg_gen_movi_tl(t0, result);
- gen_store_gpr(t0, rt);
+ gen_store_gpr(tcg_constant_tl(result), rt);
}
break;
default:
@@ -2302,10 +2287,9 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc,
{
TCGCond cond = TCG_COND_ALWAYS;
TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
+ TCGv timm = tcg_constant_tl(imm);
gen_load_gpr(t0, rt);
- tcg_gen_movi_tl(t1, imm);
ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
/* Load needed operands and calculate btarget */
@@ -2334,7 +2318,7 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc,
} else {
tcg_gen_shri_tl(t0, t0, imm);
tcg_gen_andi_tl(t0, t0, 1);
- tcg_gen_movi_tl(t1, 0);
+ timm = tcg_constant_tl(0);
if (opc == NM_BBEQZC) {
cond = TCG_COND_EQ;
} else {
@@ -2389,7 +2373,7 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc,
/* Conditional compact branch */
TCGLabel *fs = gen_new_label();
- tcg_gen_brcond_tl(tcg_invert_cond(cond), t0, t1, fs);
+ tcg_gen_brcond_tl(tcg_invert_cond(cond), t0, timm, fs);
gen_goto_tb(ctx, 1, ctx->btarget);
gen_set_label(fs);
@@ -2403,7 +2387,6 @@ static void gen_compute_nanomips_pbalrsc_branch(DisasContext *ctx, int rs,
int rt)
{
TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
/* load rs */
gen_load_gpr(t0, rs);
@@ -2415,8 +2398,7 @@ static void gen_compute_nanomips_pbalrsc_branch(DisasContext *ctx, int rs,
/* calculate btarget */
tcg_gen_shli_tl(t0, t0, 1);
- tcg_gen_movi_tl(t1, ctx->base.pc_next + 4);
- gen_op_addr_add(ctx, btarget, t1, t0);
+ gen_op_addr_add(ctx, btarget, tcg_constant_tl(ctx->base.pc_next + 4), t0);
/* branch completion */
clear_branch_hflags(ctx);
@@ -2469,11 +2451,9 @@ static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc,
} else {
/* OPC_JIC, OPC_JIALC */
TCGv tbase = tcg_temp_new();
- TCGv toffset = tcg_temp_new();
gen_load_gpr(tbase, rt);
- tcg_gen_movi_tl(toffset, offset);
- gen_op_addr_add(ctx, btarget, tbase, toffset);
+ gen_op_addr_addi(ctx, btarget, tbase, offset);
}
break;
default:
@@ -2647,13 +2627,13 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt)
case NM_LHX:
/*case NM_LHXS:*/
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
- MO_TESW | ctx->default_tcg_memop_mask);
+ mo_endian(ctx) | MO_SW | ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rd);
break;
case NM_LWX:
/*case NM_LWXS:*/
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
- MO_TESL | ctx->default_tcg_memop_mask);
+ mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rd);
break;
case NM_LBUX:
@@ -2663,7 +2643,7 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt)
case NM_LHUX:
/*case NM_LHUXS:*/
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
- MO_TEUW | ctx->default_tcg_memop_mask);
+ mo_endian(ctx) | MO_UW | ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rd);
break;
case NM_SBX:
@@ -2676,14 +2656,14 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt)
check_nms(ctx);
gen_load_gpr(t1, rd);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
- MO_TEUW | ctx->default_tcg_memop_mask);
+ mo_endian(ctx) | MO_UW | ctx->default_tcg_memop_mask);
break;
case NM_SWX:
/*case NM_SWXS:*/
check_nms(ctx);
gen_load_gpr(t1, rd);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
- MO_TEUL | ctx->default_tcg_memop_mask);
+ mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask);
break;
case NM_LWC1X:
/*case NM_LWC1XS:*/
@@ -3445,13 +3425,10 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc,
case NM_SHILO:
check_dsp(ctx);
{
- TCGv tv0 = tcg_temp_new();
- TCGv tv1 = tcg_temp_new();
int16_t imm = extract32(ctx->opcode, 16, 7);
- tcg_gen_movi_tl(tv0, rd >> 3);
- tcg_gen_movi_tl(tv1, imm);
- gen_helper_shilo(tv0, tv1, tcg_env);
+ gen_helper_shilo(tcg_constant_tl(rd >> 3),
+ tcg_constant_tl(imm), tcg_env);
}
break;
case NM_MULEQ_S_W_PHL:
@@ -3506,8 +3483,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc,
break;
case NM_SHRA_R_W:
check_dsp(ctx);
- tcg_gen_movi_tl(t0, rd);
- gen_helper_shra_r_w(v1_t, t0, v1_t);
+ gen_helper_shra_r_w(v1_t, tcg_constant_tl(rd), v1_t);
gen_store_gpr(v1_t, rt);
break;
case NM_SHRA_R_PH:
@@ -3547,8 +3523,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc,
break;
case NM_SHLL_S_W:
check_dsp(ctx);
- tcg_gen_movi_tl(t0, rd);
- gen_helper_shll_s_w(v1_t, t0, v1_t, tcg_env);
+ gen_helper_shll_s_w(v1_t, tcg_constant_tl(rd), v1_t, tcg_env);
gen_store_gpr(v1_t, rt);
break;
case NM_REPL_PH:
@@ -3729,32 +3704,29 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
case NM_LWPC48:
check_nms(ctx);
if (rt != 0) {
- TCGv t0;
- t0 = tcg_temp_new();
-
target_long addr = addr_add(ctx, ctx->base.pc_next + 6,
addr_off);
- tcg_gen_movi_tl(t0, addr);
- tcg_gen_qemu_ld_tl(cpu_gpr[rt], t0, ctx->mem_idx,
- MO_TESL | ctx->default_tcg_memop_mask);
+ tcg_gen_qemu_ld_tl(cpu_gpr[rt], tcg_constant_tl(addr),
+ ctx->mem_idx,
+ mo_endian(ctx) | MO_SL
+ | ctx->default_tcg_memop_mask);
}
break;
case NM_SWPC48:
check_nms(ctx);
{
- TCGv t0, t1;
- t0 = tcg_temp_new();
+ TCGv t1;
t1 = tcg_temp_new();
target_long addr = addr_add(ctx, ctx->base.pc_next + 6,
addr_off);
- tcg_gen_movi_tl(t0, addr);
gen_load_gpr(t1, rt);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
- MO_TEUL | ctx->default_tcg_memop_mask);
+ tcg_gen_qemu_st_tl(t1, tcg_constant_tl(addr), ctx->mem_idx,
+ mo_endian(ctx) | MO_UL
+ | ctx->default_tcg_memop_mask);
}
break;
default:
@@ -4132,14 +4104,14 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
switch (extract32(ctx->opcode, 11, 4)) {
case NM_UALH:
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW |
- MO_UNALN);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
+ mo_endian(ctx) | MO_SW | MO_UNALN);
gen_store_gpr(t0, rt);
break;
case NM_UASH:
gen_load_gpr(t1, rt);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW |
- MO_UNALN);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
+ mo_endian(ctx) | MO_UW | MO_UNALN);
break;
}
}
@@ -4161,7 +4133,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
case NM_P_SC:
switch (ctx->opcode & 0x03) {
case NM_SC:
- gen_st_cond(ctx, rt, rs, s, MO_TESL, false);
+ gen_st_cond(ctx, rt, rs, s, mo_endian(ctx) | MO_SL,
+ false);
break;
case NM_SCWP:
check_xnp(ctx);
@@ -4274,7 +4247,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
check_xnp(ctx);
check_eva(ctx);
check_cp0_enabled(ctx);
- gen_st_cond(ctx, rt, rs, s, MO_TESL, true);
+ gen_st_cond(ctx, rt, rs, s, mo_endian(ctx) | MO_SL,
+ true);
break;
case NM_SCWPE:
check_xnp(ctx);
@@ -4317,7 +4291,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
switch (extract32(ctx->opcode, 11, 1)) {
case NM_LWM:
tcg_gen_qemu_ld_tl(t1, va, ctx->mem_idx,
- memop | MO_TESL);
+ memop | mo_endian(ctx) | MO_SL);
gen_store_gpr(t1, this_rt);
if ((this_rt == rs) &&
(counter != (count - 1))) {
@@ -4328,7 +4302,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
this_rt = (rt == 0) ? 0 : this_rt;
gen_load_gpr(t1, this_rt);
tcg_gen_qemu_st_tl(t1, va, ctx->mem_idx,
- memop | MO_TEUL);
+ memop | mo_endian(ctx) | MO_UL);
break;
}
counter++;
diff --git a/target/mips/tcg/octeon_translate.c b/target/mips/tcg/octeon_translate.c
index e25c4cb..d9eb437 100644
--- a/target/mips/tcg/octeon_translate.c
+++ b/target/mips/tcg/octeon_translate.c
@@ -18,8 +18,8 @@ static bool trans_BBIT(DisasContext *ctx, arg_BBIT *a)
TCGv p;
if (ctx->hflags & MIPS_HFLAG_BMASK) {
- LOG_DISAS("Branch in delay / forbidden slot at PC 0x"
- TARGET_FMT_lx "\n", ctx->base.pc_next);
+ LOG_DISAS("Branch in delay / forbidden slot at PC 0x%" VADDR_PRIx "\n",
+ ctx->base.pc_next);
generate_exception_end(ctx, EXCP_RI);
return true;
}
diff --git a/target/mips/tcg/op_helper.c b/target/mips/tcg/op_helper.c
index 65403f1..b906d10 100644
--- a/target/mips/tcg/op_helper.c
+++ b/target/mips/tcg/op_helper.c
@@ -22,7 +22,6 @@
#include "cpu.h"
#include "internal.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "exec/memop.h"
#include "fpu_helper.h"
diff --git a/target/mips/tcg/sysemu/cp0_helper.c b/target/mips/tcg/sysemu/cp0_helper.c
deleted file mode 100644
index 79a5c83..0000000
--- a/target/mips/tcg/sysemu/cp0_helper.c
+++ /dev/null
@@ -1,1644 +0,0 @@
-/*
- * Helpers for emulation of CP0-related MIPS instructions.
- *
- * Copyright (C) 2004-2005 Jocelyn Mayer
- * Copyright (C) 2020 Wave Computing, Inc.
- * Copyright (C) 2020 Aleksandar Markovic <amarkovic@wavecomp.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- *
- */
-
-#include "qemu/osdep.h"
-#include "qemu/log.h"
-#include "qemu/main-loop.h"
-#include "cpu.h"
-#include "internal.h"
-#include "qemu/host-utils.h"
-#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-
-
-/* SMP helpers. */
-static bool mips_vpe_is_wfi(MIPSCPU *c)
-{
- CPUState *cpu = CPU(c);
- CPUMIPSState *env = &c->env;
-
- /*
- * If the VPE is halted but otherwise active, it means it's waiting for
- * an interrupt.\
- */
- return cpu->halted && mips_vpe_active(env);
-}
-
-static bool mips_vp_is_wfi(MIPSCPU *c)
-{
- CPUState *cpu = CPU(c);
- CPUMIPSState *env = &c->env;
-
- return cpu->halted && mips_vp_active(env);
-}
-
-static inline void mips_vpe_wake(MIPSCPU *c)
-{
- /*
- * Don't set ->halted = 0 directly, let it be done via cpu_has_work
- * because there might be other conditions that state that c should
- * be sleeping.
- */
- bql_lock();
- cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
- bql_unlock();
-}
-
-static inline void mips_vpe_sleep(MIPSCPU *cpu)
-{
- CPUState *cs = CPU(cpu);
-
- /*
- * The VPE was shut off, really go to bed.
- * Reset any old _WAKE requests.
- */
- cs->halted = 1;
- cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
-}
-
-static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
-{
- CPUMIPSState *c = &cpu->env;
-
- /* FIXME: TC reschedule. */
- if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
- mips_vpe_wake(cpu);
- }
-}
-
-static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
-{
- CPUMIPSState *c = &cpu->env;
-
- /* FIXME: TC reschedule. */
- if (!mips_vpe_active(c)) {
- mips_vpe_sleep(cpu);
- }
-}
-
-/**
- * mips_cpu_map_tc:
- * @env: CPU from which mapping is performed.
- * @tc: Should point to an int with the value of the global TC index.
- *
- * This function will transform @tc into a local index within the
- * returned #CPUMIPSState.
- */
-
-/*
- * FIXME: This code assumes that all VPEs have the same number of TCs,
- * which depends on runtime setup. Can probably be fixed by
- * walking the list of CPUMIPSStates.
- */
-static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
-{
- MIPSCPU *cpu;
- CPUState *cs;
- CPUState *other_cs;
- int vpe_idx;
- int tc_idx = *tc;
-
- if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
- /* Not allowed to address other CPUs. */
- *tc = env->current_tc;
- return env;
- }
-
- cs = env_cpu(env);
- vpe_idx = tc_idx / cs->nr_threads;
- *tc = tc_idx % cs->nr_threads;
- other_cs = qemu_get_cpu(vpe_idx);
- if (other_cs == NULL) {
- return env;
- }
- cpu = MIPS_CPU(other_cs);
- return &cpu->env;
-}
-
-/*
- * The per VPE CP0_Status register shares some fields with the per TC
- * CP0_TCStatus registers. These fields are wired to the same registers,
- * so changes to either of them should be reflected on both registers.
- *
- * Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
- *
- * These helper call synchronizes the regs for a given cpu.
- */
-
-/*
- * Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c.
- * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
- * int tc);
- */
-
-/* Called for updates to CP0_TCStatus. */
-static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
- target_ulong v)
-{
- uint32_t status;
- uint32_t tcu, tmx, tasid, tksu;
- uint32_t mask = ((1U << CP0St_CU3)
- | (1 << CP0St_CU2)
- | (1 << CP0St_CU1)
- | (1 << CP0St_CU0)
- | (1 << CP0St_MX)
- | (3 << CP0St_KSU));
-
- tcu = (v >> CP0TCSt_TCU0) & 0xf;
- tmx = (v >> CP0TCSt_TMX) & 0x1;
- tasid = v & cpu->CP0_EntryHi_ASID_mask;
- tksu = (v >> CP0TCSt_TKSU) & 0x3;
-
- status = tcu << CP0St_CU0;
- status |= tmx << CP0St_MX;
- status |= tksu << CP0St_KSU;
-
- cpu->CP0_Status &= ~mask;
- cpu->CP0_Status |= status;
-
- /* Sync the TASID with EntryHi. */
- cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask;
- cpu->CP0_EntryHi |= tasid;
-
- compute_hflags(cpu);
-}
-
-/* Called for updates to CP0_EntryHi. */
-static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
-{
- int32_t *tcst;
- uint32_t asid, v = cpu->CP0_EntryHi;
-
- asid = v & cpu->CP0_EntryHi_ASID_mask;
-
- if (tc == cpu->current_tc) {
- tcst = &cpu->active_tc.CP0_TCStatus;
- } else {
- tcst = &cpu->tcs[tc].CP0_TCStatus;
- }
-
- *tcst &= ~cpu->CP0_EntryHi_ASID_mask;
- *tcst |= asid;
-}
-
-/* XXX: do not use a global */
-uint32_t cpu_mips_get_random(CPUMIPSState *env)
-{
- static uint32_t seed = 1;
- static uint32_t prev_idx;
- uint32_t idx;
- uint32_t nb_rand_tlb = env->tlb->nb_tlb - env->CP0_Wired;
-
- if (nb_rand_tlb == 1) {
- return env->tlb->nb_tlb - 1;
- }
-
- /* Don't return same value twice, so get another value */
- do {
- /*
- * Use a simple algorithm of Linear Congruential Generator
- * from ISO/IEC 9899 standard.
- */
- seed = 1103515245 * seed + 12345;
- idx = (seed >> 16) % nb_rand_tlb + env->CP0_Wired;
- } while (idx == prev_idx);
- prev_idx = idx;
- return idx;
-}
-
-/* CP0 helpers */
-target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
-{
- return env->mvp->CP0_MVPControl;
-}
-
-target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
-{
- return env->mvp->CP0_MVPConf0;
-}
-
-target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
-{
- return env->mvp->CP0_MVPConf1;
-}
-
-target_ulong helper_mfc0_random(CPUMIPSState *env)
-{
- return (int32_t)cpu_mips_get_random(env);
-}
-
-target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
-{
- return env->active_tc.CP0_TCStatus;
-}
-
-target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- return other->active_tc.CP0_TCStatus;
- } else {
- return other->tcs[other_tc].CP0_TCStatus;
- }
-}
-
-target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
-{
- return env->active_tc.CP0_TCBind;
-}
-
-target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- return other->active_tc.CP0_TCBind;
- } else {
- return other->tcs[other_tc].CP0_TCBind;
- }
-}
-
-target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
-{
- return env->active_tc.PC;
-}
-
-target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- return other->active_tc.PC;
- } else {
- return other->tcs[other_tc].PC;
- }
-}
-
-target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
-{
- return env->active_tc.CP0_TCHalt;
-}
-
-target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- return other->active_tc.CP0_TCHalt;
- } else {
- return other->tcs[other_tc].CP0_TCHalt;
- }
-}
-
-target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
-{
- return env->active_tc.CP0_TCContext;
-}
-
-target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- return other->active_tc.CP0_TCContext;
- } else {
- return other->tcs[other_tc].CP0_TCContext;
- }
-}
-
-target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
-{
- return env->active_tc.CP0_TCSchedule;
-}
-
-target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- return other->active_tc.CP0_TCSchedule;
- } else {
- return other->tcs[other_tc].CP0_TCSchedule;
- }
-}
-
-target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
-{
- return env->active_tc.CP0_TCScheFBack;
-}
-
-target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- return other->active_tc.CP0_TCScheFBack;
- } else {
- return other->tcs[other_tc].CP0_TCScheFBack;
- }
-}
-
-target_ulong helper_mfc0_count(CPUMIPSState *env)
-{
- return (int32_t)cpu_mips_get_count(env);
-}
-
-target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- return other->CP0_EntryHi;
-}
-
-target_ulong helper_mftc0_cause(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- return other->CP0_Cause;
-}
-
-target_ulong helper_mftc0_status(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- return other->CP0_Status;
-}
-
-target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
-{
- return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift);
-}
-
-target_ulong helper_mfc0_maar(CPUMIPSState *env)
-{
- return (int32_t) env->CP0_MAAR[env->CP0_MAARI];
-}
-
-target_ulong helper_mfhc0_maar(CPUMIPSState *env)
-{
- return env->CP0_MAAR[env->CP0_MAARI] >> 32;
-}
-
-target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
-{
- return (int32_t)env->CP0_WatchLo[sel];
-}
-
-target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
-{
- return (int32_t) env->CP0_WatchHi[sel];
-}
-
-target_ulong helper_mfhc0_watchhi(CPUMIPSState *env, uint32_t sel)
-{
- return env->CP0_WatchHi[sel] >> 32;
-}
-
-target_ulong helper_mfc0_debug(CPUMIPSState *env)
-{
- target_ulong t0 = env->CP0_Debug;
- if (env->hflags & MIPS_HFLAG_DM) {
- t0 |= 1 << CP0DB_DM;
- }
-
- return t0;
-}
-
-target_ulong helper_mftc0_debug(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- int32_t tcstatus;
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- tcstatus = other->active_tc.CP0_Debug_tcstatus;
- } else {
- tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
- }
-
- /* XXX: Might be wrong, check with EJTAG spec. */
- return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
- (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
-}
-
-#if defined(TARGET_MIPS64)
-target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
-{
- return env->active_tc.PC;
-}
-
-target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
-{
- return env->active_tc.CP0_TCHalt;
-}
-
-target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
-{
- return env->active_tc.CP0_TCContext;
-}
-
-target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
-{
- return env->active_tc.CP0_TCSchedule;
-}
-
-target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
-{
- return env->active_tc.CP0_TCScheFBack;
-}
-
-target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
-{
- return env->CP0_LLAddr >> env->CP0_LLAddr_shift;
-}
-
-target_ulong helper_dmfc0_maar(CPUMIPSState *env)
-{
- return env->CP0_MAAR[env->CP0_MAARI];
-}
-
-target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
-{
- return env->CP0_WatchLo[sel];
-}
-
-target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel)
-{
- return env->CP0_WatchHi[sel];
-}
-
-#endif /* TARGET_MIPS64 */
-
-void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
-{
- uint32_t index_p = env->CP0_Index & 0x80000000;
- uint32_t tlb_index = arg1 & 0x7fffffff;
- if (tlb_index < env->tlb->nb_tlb) {
- if (env->insn_flags & ISA_MIPS_R6) {
- index_p |= arg1 & 0x80000000;
- }
- env->CP0_Index = index_p | tlb_index;
- }
-}
-
-void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
-{
- uint32_t mask = 0;
- uint32_t newval;
-
- if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
- mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
- (1 << CP0MVPCo_EVP);
- }
- if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
- mask |= (1 << CP0MVPCo_STLB);
- }
- newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
-
- /* TODO: Enable/disable shared TLB, enable/disable VPEs. */
-
- env->mvp->CP0_MVPControl = newval;
-}
-
-void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
-{
- uint32_t mask;
- uint32_t newval;
-
- mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
- (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
- newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
-
- /*
- * Yield scheduler intercept not implemented.
- * Gating storage scheduler intercept not implemented.
- */
-
- /* TODO: Enable/disable TCs. */
-
- env->CP0_VPEControl = newval;
-}
-
-void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
- uint32_t mask;
- uint32_t newval;
-
- mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
- (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
- newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
-
- /* TODO: Enable/disable TCs. */
-
- other->CP0_VPEControl = newval;
-}
-
-target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
- /* FIXME: Mask away return zero on read bits. */
- return other->CP0_VPEControl;
-}
-
-target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- return other->CP0_VPEConf0;
-}
-
-void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
-{
- uint32_t mask = 0;
- uint32_t newval;
-
- if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
- if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) {
- mask |= (0xff << CP0VPEC0_XTC);
- }
- mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
- }
- newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
-
- /* TODO: TC exclusive handling due to ERL/EXL. */
-
- env->CP0_VPEConf0 = newval;
-}
-
-void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
- uint32_t mask = 0;
- uint32_t newval;
-
- mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
- newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
-
- /* TODO: TC exclusive handling due to ERL/EXL. */
- other->CP0_VPEConf0 = newval;
-}
-
-void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
-{
- uint32_t mask = 0;
- uint32_t newval;
-
- if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
- mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
- (0xff << CP0VPEC1_NCP1);
- newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
-
- /* UDI not implemented. */
- /* CP2 not implemented. */
-
- /* TODO: Handle FPU (CP1) binding. */
-
- env->CP0_VPEConf1 = newval;
-}
-
-void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
-{
- /* Yield qualifier inputs not implemented. */
- env->CP0_YQMask = 0x00000000;
-}
-
-void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_VPEOpt = arg1 & 0x0000ffff;
-}
-
-#define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF)
-
-void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
-{
- /* 1k pages not implemented */
- target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
- env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env))
- | (rxi << (CP0EnLo_XI - 30));
-}
-
-#if defined(TARGET_MIPS64)
-#define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6)
-
-void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1)
-{
- uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
- env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
-}
-#endif
-
-void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
-{
- uint32_t mask = env->CP0_TCStatus_rw_bitmask;
- uint32_t newval;
-
- newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
-
- env->active_tc.CP0_TCStatus = newval;
- sync_c0_tcstatus(env, env->current_tc, newval);
-}
-
-void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- other->active_tc.CP0_TCStatus = arg1;
- } else {
- other->tcs[other_tc].CP0_TCStatus = arg1;
- }
- sync_c0_tcstatus(other, other_tc, arg1);
-}
-
-void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
-{
- uint32_t mask = (1 << CP0TCBd_TBE);
- uint32_t newval;
-
- if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
- mask |= (1 << CP0TCBd_CurVPE);
- }
- newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
- env->active_tc.CP0_TCBind = newval;
-}
-
-void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- uint32_t mask = (1 << CP0TCBd_TBE);
- uint32_t newval;
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
- mask |= (1 << CP0TCBd_CurVPE);
- }
- if (other_tc == other->current_tc) {
- newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
- other->active_tc.CP0_TCBind = newval;
- } else {
- newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
- other->tcs[other_tc].CP0_TCBind = newval;
- }
-}
-
-void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
-{
- env->active_tc.PC = arg1;
- env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
- env->CP0_LLAddr = 0;
- env->lladdr = 0;
- /* MIPS16 not implemented. */
-}
-
-void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- other->active_tc.PC = arg1;
- other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
- other->CP0_LLAddr = 0;
- other->lladdr = 0;
- /* MIPS16 not implemented. */
- } else {
- other->tcs[other_tc].PC = arg1;
- other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
- other->CP0_LLAddr = 0;
- other->lladdr = 0;
- /* MIPS16 not implemented. */
- }
-}
-
-void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
-{
- MIPSCPU *cpu = env_archcpu(env);
-
- env->active_tc.CP0_TCHalt = arg1 & 0x1;
-
- /* TODO: Halt TC / Restart (if allocated+active) TC. */
- if (env->active_tc.CP0_TCHalt & 1) {
- mips_tc_sleep(cpu, env->current_tc);
- } else {
- mips_tc_wake(cpu, env->current_tc);
- }
-}
-
-void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
- MIPSCPU *other_cpu = env_archcpu(other);
-
- /* TODO: Halt TC / Restart (if allocated+active) TC. */
-
- if (other_tc == other->current_tc) {
- other->active_tc.CP0_TCHalt = arg1;
- } else {
- other->tcs[other_tc].CP0_TCHalt = arg1;
- }
-
- if (arg1 & 1) {
- mips_tc_sleep(other_cpu, other_tc);
- } else {
- mips_tc_wake(other_cpu, other_tc);
- }
-}
-
-void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
-{
- env->active_tc.CP0_TCContext = arg1;
-}
-
-void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- other->active_tc.CP0_TCContext = arg1;
- } else {
- other->tcs[other_tc].CP0_TCContext = arg1;
- }
-}
-
-void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
-{
- env->active_tc.CP0_TCSchedule = arg1;
-}
-
-void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- other->active_tc.CP0_TCSchedule = arg1;
- } else {
- other->tcs[other_tc].CP0_TCSchedule = arg1;
- }
-}
-
-void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
-{
- env->active_tc.CP0_TCScheFBack = arg1;
-}
-
-void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- other->active_tc.CP0_TCScheFBack = arg1;
- } else {
- other->tcs[other_tc].CP0_TCScheFBack = arg1;
- }
-}
-
-void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
-{
- /* 1k pages not implemented */
- target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
- env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env))
- | (rxi << (CP0EnLo_XI - 30));
-}
-
-#if defined(TARGET_MIPS64)
-void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1)
-{
- uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
- env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
-}
-#endif
-
-void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
-}
-
-void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1)
-{
- int32_t old;
- old = env->CP0_MemoryMapID;
- env->CP0_MemoryMapID = (int32_t) arg1;
- /* If the MemoryMapID changes, flush qemu's TLB. */
- if (old != env->CP0_MemoryMapID) {
- cpu_mips_tlb_flush(env);
- }
-}
-
-void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask)
-{
- uint32_t mask;
- int maskbits;
-
- /* Don't care MASKX as we don't support 1KB page */
- mask = extract32((uint32_t)arg1, CP0PM_MASK, 16);
- maskbits = cto32(mask);
-
- /* Ensure no more set bit after first zero */
- if ((mask >> maskbits) != 0) {
- goto invalid;
- }
- /* We don't support VTLB entry smaller than target page */
- if ((maskbits + TARGET_PAGE_BITS_MIN) < TARGET_PAGE_BITS) {
- goto invalid;
- }
- env->CP0_PageMask = mask << CP0PM_MASK;
-
- return;
-
-invalid:
- /* When invalid, set to default target page size. */
- mask = (~TARGET_PAGE_MASK >> TARGET_PAGE_BITS_MIN);
- env->CP0_PageMask = mask << CP0PM_MASK;
-}
-
-void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
-{
- update_pagemask(env, arg1, &env->CP0_PageMask);
-}
-
-void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
-{
- /* SmartMIPS not implemented */
- /* 1k pages not implemented */
- env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) |
- (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask);
- compute_hflags(env);
- restore_pamask(env);
-}
-
-void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1)
-{
- CPUState *cs = env_cpu(env);
-
- env->CP0_SegCtl0 = arg1 & CP0SC0_MASK;
- tlb_flush(cs);
-}
-
-void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1)
-{
- CPUState *cs = env_cpu(env);
-
- env->CP0_SegCtl1 = arg1 & CP0SC1_MASK;
- tlb_flush(cs);
-}
-
-void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1)
-{
- CPUState *cs = env_cpu(env);
-
- env->CP0_SegCtl2 = arg1 & CP0SC2_MASK;
- tlb_flush(cs);
-}
-
-void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1)
-{
-#if defined(TARGET_MIPS64)
- uint64_t mask = 0x3F3FFFFFFFULL;
- uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL;
- uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL;
-
- if ((env->insn_flags & ISA_MIPS_R6)) {
- if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) {
- mask &= ~(0x3FULL << CP0PF_BDI);
- }
- if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) {
- mask &= ~(0x3FULL << CP0PF_GDI);
- }
- if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) {
- mask &= ~(0x3FULL << CP0PF_UDI);
- }
- if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) {
- mask &= ~(0x3FULL << CP0PF_MDI);
- }
- if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) {
- mask &= ~(0x3FULL << CP0PF_PTI);
- }
- }
- env->CP0_PWField = arg1 & mask;
-
- if ((new_ptei >= 32) ||
- ((env->insn_flags & ISA_MIPS_R6) &&
- (new_ptei == 0 || new_ptei == 1))) {
- env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) |
- (old_ptei << CP0PF_PTEI);
- }
-#else
- uint32_t mask = 0x3FFFFFFF;
- uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
- uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F;
-
- if ((env->insn_flags & ISA_MIPS_R6)) {
- if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) {
- mask &= ~(0x3F << CP0PF_GDW);
- }
- if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) {
- mask &= ~(0x3F << CP0PF_UDW);
- }
- if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) {
- mask &= ~(0x3F << CP0PF_MDW);
- }
- if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) {
- mask &= ~(0x3F << CP0PF_PTW);
- }
- }
- env->CP0_PWField = arg1 & mask;
-
- if ((new_ptew >= 32) ||
- ((env->insn_flags & ISA_MIPS_R6) &&
- (new_ptew == 0 || new_ptew == 1))) {
- env->CP0_PWField = (env->CP0_PWField & ~0x3F) |
- (old_ptew << CP0PF_PTEW);
- }
-#endif
-}
-
-void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1)
-{
-#if defined(TARGET_MIPS64)
- env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL;
-#else
- env->CP0_PWSize = arg1 & 0x3FFFFFFF;
-#endif
-}
-
-void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
-{
- if (env->insn_flags & ISA_MIPS_R6) {
- if (arg1 < env->tlb->nb_tlb) {
- env->CP0_Wired = arg1;
- }
- } else {
- env->CP0_Wired = arg1 % env->tlb->nb_tlb;
- }
-}
-
-void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1)
-{
-#if defined(TARGET_MIPS64)
- /* PWEn = 0. Hardware page table walking is not implemented. */
- env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F);
-#else
- env->CP0_PWCtl = (arg1 & 0x800000FF);
-#endif
-}
-
-void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
-}
-
-void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
-}
-
-void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
-}
-
-void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
-}
-
-void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
-}
-
-void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
-{
- uint32_t mask = 0x0000000F;
-
- if ((env->CP0_Config1 & (1 << CP0C1_PC)) &&
- (env->insn_flags & ISA_MIPS_R6)) {
- mask |= (1 << 4);
- }
- if (env->insn_flags & ISA_MIPS_R6) {
- mask |= (1 << 5);
- }
- if (env->CP0_Config3 & (1 << CP0C3_ULRI)) {
- mask |= (1 << 29);
-
- if (arg1 & (1 << 29)) {
- env->hflags |= MIPS_HFLAG_HWRENA_ULR;
- } else {
- env->hflags &= ~MIPS_HFLAG_HWRENA_ULR;
- }
- }
-
- env->CP0_HWREna = arg1 & mask;
-}
-
-void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
-{
- cpu_mips_store_count(env, arg1);
-}
-
-void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
-{
- target_ulong old, val, mask;
- mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask;
- if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) {
- mask |= 1 << CP0EnHi_EHINV;
- }
-
- /* 1k pages not implemented */
-#if defined(TARGET_MIPS64)
- if (env->insn_flags & ISA_MIPS_R6) {
- int entryhi_r = extract64(arg1, 62, 2);
- int config0_at = extract32(env->CP0_Config0, 13, 2);
- bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0;
- if ((entryhi_r == 2) ||
- (entryhi_r == 1 && (no_supervisor || config0_at == 1))) {
- /* skip EntryHi.R field if new value is reserved */
- mask &= ~(0x3ull << 62);
- }
- }
- mask &= env->SEGMask;
-#endif
- old = env->CP0_EntryHi;
- val = (arg1 & mask) | (old & ~mask);
- env->CP0_EntryHi = val;
- if (ase_mt_available(env)) {
- sync_c0_entryhi(env, env->current_tc);
- }
- /* If the ASID changes, flush qemu's TLB. */
- if ((old & env->CP0_EntryHi_ASID_mask) !=
- (val & env->CP0_EntryHi_ASID_mask)) {
- tlb_flush(env_cpu(env));
- }
-}
-
-void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- other->CP0_EntryHi = arg1;
- sync_c0_entryhi(other, other_tc);
-}
-
-void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
-{
- cpu_mips_store_compare(env, arg1);
-}
-
-void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
-{
- uint32_t val, old;
-
- old = env->CP0_Status;
- cpu_mips_store_status(env, arg1);
- val = env->CP0_Status;
-
- if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
- qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
- old, old & env->CP0_Cause & CP0Ca_IP_mask,
- val, val & env->CP0_Cause & CP0Ca_IP_mask,
- env->CP0_Cause);
- switch (mips_env_mmu_index(env)) {
- case 3:
- qemu_log(", ERL\n");
- break;
- case MIPS_HFLAG_UM:
- qemu_log(", UM\n");
- break;
- case MIPS_HFLAG_SM:
- qemu_log(", SM\n");
- break;
- case MIPS_HFLAG_KM:
- qemu_log("\n");
- break;
- default:
- cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
- break;
- }
- }
-}
-
-void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018;
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask);
- sync_c0_status(env, other, other_tc);
-}
-
-void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
-}
-
-void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
-{
- uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
- env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
-}
-
-void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
-{
- cpu_mips_store_cause(env, arg1);
-}
-
-void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- cpu_mips_store_cause(other, arg1);
-}
-
-target_ulong helper_mftc0_epc(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- return other->CP0_EPC;
-}
-
-target_ulong helper_mftc0_ebase(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- return other->CP0_EBase;
-}
-
-void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
-{
- target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
- if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
- mask |= ~0x3FFFFFFF;
- }
- env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask);
-}
-
-void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
- target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
- if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
- mask |= ~0x3FFFFFFF;
- }
- other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask);
-}
-
-target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- switch (idx) {
- case 0: return other->CP0_Config0;
- case 1: return other->CP0_Config1;
- case 2: return other->CP0_Config2;
- case 3: return other->CP0_Config3;
- /* 4 and 5 are reserved. */
- case 6: return other->CP0_Config6;
- case 7: return other->CP0_Config7;
- default:
- break;
- }
- return 0;
-}
-
-void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
-}
-
-void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
-{
- /* tertiary/secondary caches not implemented */
- env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
-}
-
-void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1)
-{
- if (env->insn_flags & ASE_MICROMIPS) {
- env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) |
- (arg1 & (1 << CP0C3_ISA_ON_EXC));
- }
-}
-
-void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) |
- (arg1 & env->CP0_Config4_rw_bitmask);
-}
-
-void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) |
- (arg1 & env->CP0_Config5_rw_bitmask);
- env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ?
- 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff;
- compute_hflags(env);
-}
-
-void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
-{
- target_long mask = env->CP0_LLAddr_rw_bitmask;
- arg1 = arg1 << env->CP0_LLAddr_shift;
- env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask);
-}
-
-#define MTC0_MAAR_MASK(env) \
- ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3)
-
-void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env);
-}
-
-void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_MAAR[env->CP0_MAARI] =
- (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) |
- (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL);
-}
-
-void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1)
-{
- int index = arg1 & 0x3f;
- if (index == 0x3f) {
- /*
- * Software may write all ones to INDEX to determine the
- * maximum value supported.
- */
- env->CP0_MAARI = MIPS_MAAR_MAX - 1;
- } else if (index < MIPS_MAAR_MAX) {
- env->CP0_MAARI = index;
- }
- /*
- * Other than the all ones, if the value written is not supported,
- * then INDEX is unchanged from its previous value.
- */
-}
-
-void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
-{
- /*
- * Watch exceptions for instructions, data loads, data stores
- * not implemented.
- */
- env->CP0_WatchLo[sel] = (arg1 & ~0x7);
-}
-
-void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
-{
- uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID);
- uint64_t m_bit = env->CP0_WatchHi[sel] & (1 << CP0WH_M); /* read-only */
- if ((env->CP0_Config5 >> CP0C5_MI) & 1) {
- mask |= 0xFFFFFFFF00000000ULL; /* MMID */
- }
- env->CP0_WatchHi[sel] = m_bit | (arg1 & mask);
- env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
-}
-
-void helper_mthc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
-{
- env->CP0_WatchHi[sel] = ((uint64_t) (arg1) << 32) |
- (env->CP0_WatchHi[sel] & 0x00000000ffffffffULL);
-}
-
-void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
-{
- target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
- env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
-}
-
-void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_Framemask = arg1; /* XXX */
-}
-
-void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
- if (arg1 & (1 << CP0DB_DM)) {
- env->hflags |= MIPS_HFLAG_DM;
- } else {
- env->hflags &= ~MIPS_HFLAG_DM;
- }
-}
-
-void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- /* XXX: Might be wrong, check with EJTAG spec. */
- if (other_tc == other->current_tc) {
- other->active_tc.CP0_Debug_tcstatus = val;
- } else {
- other->tcs[other_tc].CP0_Debug_tcstatus = val;
- }
- other->CP0_Debug = (other->CP0_Debug &
- ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
- (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
-}
-
-void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_Performance0 = arg1 & 0x000007ff;
-}
-
-void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1)
-{
- int32_t wst = arg1 & (1 << CP0EC_WST);
- int32_t spr = arg1 & (1 << CP0EC_SPR);
- int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0;
-
- env->CP0_ErrCtl = wst | spr | itc;
-
- if (itc && !wst && !spr) {
- env->hflags |= MIPS_HFLAG_ITC_CACHE;
- } else {
- env->hflags &= ~MIPS_HFLAG_ITC_CACHE;
- }
-}
-
-void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
-{
- if (env->hflags & MIPS_HFLAG_ITC_CACHE) {
- /*
- * If CACHE instruction is configured for ITC tags then make all
- * CP0.TagLo bits writable. The actual write to ITC Configuration
- * Tag will take care of the read-only bits.
- */
- env->CP0_TagLo = arg1;
- } else {
- env->CP0_TagLo = arg1 & 0xFFFFFCF6;
- }
-}
-
-void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_DataLo = arg1; /* XXX */
-}
-
-void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_TagHi = arg1; /* XXX */
-}
-
-void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
-{
- env->CP0_DataHi = arg1; /* XXX */
-}
-
-/* MIPS MT functions */
-target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- return other->active_tc.gpr[sel];
- } else {
- return other->tcs[other_tc].gpr[sel];
- }
-}
-
-target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- return other->active_tc.LO[sel];
- } else {
- return other->tcs[other_tc].LO[sel];
- }
-}
-
-target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- return other->active_tc.HI[sel];
- } else {
- return other->tcs[other_tc].HI[sel];
- }
-}
-
-target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- return other->active_tc.ACX[sel];
- } else {
- return other->tcs[other_tc].ACX[sel];
- }
-}
-
-target_ulong helper_mftdsp(CPUMIPSState *env)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- return other->active_tc.DSPControl;
- } else {
- return other->tcs[other_tc].DSPControl;
- }
-}
-
-void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- other->active_tc.gpr[sel] = arg1;
- } else {
- other->tcs[other_tc].gpr[sel] = arg1;
- }
-}
-
-void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- other->active_tc.LO[sel] = arg1;
- } else {
- other->tcs[other_tc].LO[sel] = arg1;
- }
-}
-
-void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- other->active_tc.HI[sel] = arg1;
- } else {
- other->tcs[other_tc].HI[sel] = arg1;
- }
-}
-
-void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- other->active_tc.ACX[sel] = arg1;
- } else {
- other->tcs[other_tc].ACX[sel] = arg1;
- }
-}
-
-void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
-
- if (other_tc == other->current_tc) {
- other->active_tc.DSPControl = arg1;
- } else {
- other->tcs[other_tc].DSPControl = arg1;
- }
-}
-
-/* MIPS MT functions */
-target_ulong helper_dmt(void)
-{
- /* TODO */
- return 0;
-}
-
-target_ulong helper_emt(void)
-{
- /* TODO */
- return 0;
-}
-
-target_ulong helper_dvpe(CPUMIPSState *env)
-{
- CPUState *other_cs = first_cpu;
- target_ulong prev = env->mvp->CP0_MVPControl;
-
- CPU_FOREACH(other_cs) {
- MIPSCPU *other_cpu = MIPS_CPU(other_cs);
- /* Turn off all VPEs except the one executing the dvpe. */
- if (&other_cpu->env != env) {
- other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
- mips_vpe_sleep(other_cpu);
- }
- }
- return prev;
-}
-
-target_ulong helper_evpe(CPUMIPSState *env)
-{
- CPUState *other_cs = first_cpu;
- target_ulong prev = env->mvp->CP0_MVPControl;
-
- CPU_FOREACH(other_cs) {
- MIPSCPU *other_cpu = MIPS_CPU(other_cs);
-
- if (&other_cpu->env != env
- /* If the VPE is WFI, don't disturb its sleep. */
- && !mips_vpe_is_wfi(other_cpu)) {
- /* Enable the VPE. */
- other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
- mips_vpe_wake(other_cpu); /* And wake it up. */
- }
- }
- return prev;
-}
-
-/* R6 Multi-threading */
-target_ulong helper_dvp(CPUMIPSState *env)
-{
- CPUState *other_cs = first_cpu;
- target_ulong prev = env->CP0_VPControl;
-
- if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
- CPU_FOREACH(other_cs) {
- MIPSCPU *other_cpu = MIPS_CPU(other_cs);
- /* Turn off all VPs except the one executing the dvp. */
- if (&other_cpu->env != env) {
- mips_vpe_sleep(other_cpu);
- }
- }
- env->CP0_VPControl |= (1 << CP0VPCtl_DIS);
- }
- return prev;
-}
-
-target_ulong helper_evp(CPUMIPSState *env)
-{
- CPUState *other_cs = first_cpu;
- target_ulong prev = env->CP0_VPControl;
-
- if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
- CPU_FOREACH(other_cs) {
- MIPSCPU *other_cpu = MIPS_CPU(other_cs);
- if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) {
- /*
- * If the VP is WFI, don't disturb its sleep.
- * Otherwise, wake it up.
- */
- mips_vpe_wake(other_cpu);
- }
- }
- env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS);
- }
- return prev;
-}
diff --git a/target/mips/tcg/sysemu/meson.build b/target/mips/tcg/sysemu/meson.build
deleted file mode 100644
index ec665a4..0000000
--- a/target/mips/tcg/sysemu/meson.build
+++ /dev/null
@@ -1,10 +0,0 @@
-mips_system_ss.add(files(
- 'cp0_helper.c',
- 'mips-semi.c',
- 'special_helper.c',
- 'tlb_helper.c',
-))
-
-mips_system_ss.add(when: 'TARGET_MIPS64', if_true: files(
- 'lcsr_helper.c',
-))
diff --git a/target/mips/tcg/sysemu/mips-semi.c b/target/mips/tcg/sysemu/mips-semi.c
deleted file mode 100644
index 5ba06e9..0000000
--- a/target/mips/tcg/sysemu/mips-semi.c
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * Unified Hosting Interface syscalls.
- *
- * Copyright (c) 2015 Imagination Technologies
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "qemu/log.h"
-#include "gdbstub/syscalls.h"
-#include "gdbstub/helpers.h"
-#include "semihosting/uaccess.h"
-#include "semihosting/semihost.h"
-#include "semihosting/console.h"
-#include "semihosting/syscalls.h"
-#include "internal.h"
-
-typedef enum UHIOp {
- UHI_exit = 1,
- UHI_open = 2,
- UHI_close = 3,
- UHI_read = 4,
- UHI_write = 5,
- UHI_lseek = 6,
- UHI_unlink = 7,
- UHI_fstat = 8,
- UHI_argc = 9,
- UHI_argnlen = 10,
- UHI_argn = 11,
- UHI_plog = 13,
- UHI_assert = 14,
- UHI_pread = 19,
- UHI_pwrite = 20,
- UHI_link = 22
-} UHIOp;
-
-typedef struct UHIStat {
- int16_t uhi_st_dev;
- uint16_t uhi_st_ino;
- uint32_t uhi_st_mode;
- uint16_t uhi_st_nlink;
- uint16_t uhi_st_uid;
- uint16_t uhi_st_gid;
- int16_t uhi_st_rdev;
- uint64_t uhi_st_size;
- uint64_t uhi_st_atime;
- uint64_t uhi_st_spare1;
- uint64_t uhi_st_mtime;
- uint64_t uhi_st_spare2;
- uint64_t uhi_st_ctime;
- uint64_t uhi_st_spare3;
- uint64_t uhi_st_blksize;
- uint64_t uhi_st_blocks;
- uint64_t uhi_st_spare4[2];
-} UHIStat;
-
-enum UHIOpenFlags {
- UHIOpen_RDONLY = 0x0,
- UHIOpen_WRONLY = 0x1,
- UHIOpen_RDWR = 0x2,
- UHIOpen_APPEND = 0x8,
- UHIOpen_CREAT = 0x200,
- UHIOpen_TRUNC = 0x400,
- UHIOpen_EXCL = 0x800
-};
-
-enum UHIErrno {
- UHI_EACCESS = 13,
- UHI_EAGAIN = 11,
- UHI_EBADF = 9,
- UHI_EBADMSG = 77,
- UHI_EBUSY = 16,
- UHI_ECONNRESET = 104,
- UHI_EEXIST = 17,
- UHI_EFBIG = 27,
- UHI_EINTR = 4,
- UHI_EINVAL = 22,
- UHI_EIO = 5,
- UHI_EISDIR = 21,
- UHI_ELOOP = 92,
- UHI_EMFILE = 24,
- UHI_EMLINK = 31,
- UHI_ENAMETOOLONG = 91,
- UHI_ENETDOWN = 115,
- UHI_ENETUNREACH = 114,
- UHI_ENFILE = 23,
- UHI_ENOBUFS = 105,
- UHI_ENOENT = 2,
- UHI_ENOMEM = 12,
- UHI_ENOSPC = 28,
- UHI_ENOSR = 63,
- UHI_ENOTCONN = 128,
- UHI_ENOTDIR = 20,
- UHI_ENXIO = 6,
- UHI_EOVERFLOW = 139,
- UHI_EPERM = 1,
- UHI_EPIPE = 32,
- UHI_ERANGE = 34,
- UHI_EROFS = 30,
- UHI_ESPIPE = 29,
- UHI_ETIMEDOUT = 116,
- UHI_ETXTBSY = 26,
- UHI_EWOULDBLOCK = 11,
- UHI_EXDEV = 18,
-};
-
-static void report_fault(CPUMIPSState *env)
-{
- int op = env->active_tc.gpr[25];
- error_report("Fault during UHI operation %d", op);
- abort();
-}
-
-static void uhi_cb(CPUState *cs, uint64_t ret, int err)
-{
- CPUMIPSState *env = cpu_env(cs);
-
-#define E(N) case E##N: err = UHI_E##N; break
-
- switch (err) {
- case 0:
- break;
- E(PERM);
- E(NOENT);
- E(INTR);
- E(BADF);
- E(BUSY);
- E(EXIST);
- E(NOTDIR);
- E(ISDIR);
- E(INVAL);
- E(NFILE);
- E(MFILE);
- E(FBIG);
- E(NOSPC);
- E(SPIPE);
- E(ROFS);
- E(NAMETOOLONG);
- default:
- err = UHI_EINVAL;
- break;
- case EFAULT:
- report_fault(env);
- }
-
-#undef E
-
- env->active_tc.gpr[2] = ret;
- env->active_tc.gpr[3] = err;
-}
-
-static void uhi_fstat_cb(CPUState *cs, uint64_t ret, int err)
-{
- QEMU_BUILD_BUG_ON(sizeof(UHIStat) < sizeof(struct gdb_stat));
-
- if (!err) {
- CPUMIPSState *env = cpu_env(cs);
- target_ulong addr = env->active_tc.gpr[5];
- UHIStat *dst = lock_user(VERIFY_WRITE, addr, sizeof(UHIStat), 1);
- struct gdb_stat s;
-
- if (!dst) {
- report_fault(env);
- }
-
- memcpy(&s, dst, sizeof(struct gdb_stat));
- memset(dst, 0, sizeof(UHIStat));
-
- dst->uhi_st_dev = tswap16(be32_to_cpu(s.gdb_st_dev));
- dst->uhi_st_ino = tswap16(be32_to_cpu(s.gdb_st_ino));
- dst->uhi_st_mode = tswap32(be32_to_cpu(s.gdb_st_mode));
- dst->uhi_st_nlink = tswap16(be32_to_cpu(s.gdb_st_nlink));
- dst->uhi_st_uid = tswap16(be32_to_cpu(s.gdb_st_uid));
- dst->uhi_st_gid = tswap16(be32_to_cpu(s.gdb_st_gid));
- dst->uhi_st_rdev = tswap16(be32_to_cpu(s.gdb_st_rdev));
- dst->uhi_st_size = tswap64(be64_to_cpu(s.gdb_st_size));
- dst->uhi_st_atime = tswap64(be32_to_cpu(s.gdb_st_atime));
- dst->uhi_st_mtime = tswap64(be32_to_cpu(s.gdb_st_mtime));
- dst->uhi_st_ctime = tswap64(be32_to_cpu(s.gdb_st_ctime));
- dst->uhi_st_blksize = tswap64(be64_to_cpu(s.gdb_st_blksize));
- dst->uhi_st_blocks = tswap64(be64_to_cpu(s.gdb_st_blocks));
-
- unlock_user(dst, addr, sizeof(UHIStat));
- }
-
- uhi_cb(cs, ret, err);
-}
-
-void mips_semihosting(CPUMIPSState *env)
-{
- CPUState *cs = env_cpu(env);
- target_ulong *gpr = env->active_tc.gpr;
- const UHIOp op = gpr[25];
- char *p;
-
- switch (op) {
- case UHI_exit:
- gdb_exit(gpr[4]);
- exit(gpr[4]);
-
- case UHI_open:
- {
- target_ulong fname = gpr[4];
- int ret = -1;
-
- p = lock_user_string(fname);
- if (!p) {
- report_fault(env);
- }
- if (!strcmp("/dev/stdin", p)) {
- ret = 0;
- } else if (!strcmp("/dev/stdout", p)) {
- ret = 1;
- } else if (!strcmp("/dev/stderr", p)) {
- ret = 2;
- }
- unlock_user(p, fname, 0);
-
- /* FIXME: reusing a guest fd doesn't seem correct. */
- if (ret >= 0) {
- gpr[2] = ret;
- break;
- }
-
- semihost_sys_open(cs, uhi_cb, fname, 0, gpr[5], gpr[6]);
- }
- break;
-
- case UHI_close:
- semihost_sys_close(cs, uhi_cb, gpr[4]);
- break;
- case UHI_read:
- semihost_sys_read(cs, uhi_cb, gpr[4], gpr[5], gpr[6]);
- break;
- case UHI_write:
- semihost_sys_write(cs, uhi_cb, gpr[4], gpr[5], gpr[6]);
- break;
- case UHI_lseek:
- semihost_sys_lseek(cs, uhi_cb, gpr[4], gpr[5], gpr[6]);
- break;
- case UHI_unlink:
- semihost_sys_remove(cs, uhi_cb, gpr[4], 0);
- break;
- case UHI_fstat:
- semihost_sys_fstat(cs, uhi_fstat_cb, gpr[4], gpr[5]);
- break;
-
- case UHI_argc:
- gpr[2] = semihosting_get_argc();
- break;
- case UHI_argnlen:
- {
- const char *s = semihosting_get_arg(gpr[4]);
- gpr[2] = s ? strlen(s) : -1;
- }
- break;
- case UHI_argn:
- {
- const char *s = semihosting_get_arg(gpr[4]);
- target_ulong addr;
- size_t len;
-
- if (!s) {
- gpr[2] = -1;
- break;
- }
- len = strlen(s) + 1;
- addr = gpr[5];
- p = lock_user(VERIFY_WRITE, addr, len, 0);
- if (!p) {
- report_fault(env);
- }
- memcpy(p, s, len);
- unlock_user(p, addr, len);
- gpr[2] = 0;
- }
- break;
-
- case UHI_plog:
- {
- target_ulong addr = gpr[4];
- ssize_t len = target_strlen(addr);
- GString *str;
- char *pct_d;
-
- if (len < 0) {
- report_fault(env);
- }
- p = lock_user(VERIFY_READ, addr, len, 1);
- if (!p) {
- report_fault(env);
- }
-
- pct_d = strstr(p, "%d");
- if (!pct_d) {
- unlock_user(p, addr, 0);
- semihost_sys_write(cs, uhi_cb, 2, addr, len);
- break;
- }
-
- str = g_string_new_len(p, pct_d - p);
- g_string_append_printf(str, "%d%s", (int)gpr[5], pct_d + 2);
- unlock_user(p, addr, 0);
-
- /*
- * When we're using gdb, we need a guest address, so
- * drop the string onto the stack below the stack pointer.
- */
- if (use_gdb_syscalls()) {
- addr = gpr[29] - str->len;
- p = lock_user(VERIFY_WRITE, addr, str->len, 0);
- if (!p) {
- report_fault(env);
- }
- memcpy(p, str->str, str->len);
- unlock_user(p, addr, str->len);
- semihost_sys_write(cs, uhi_cb, 2, addr, str->len);
- } else {
- gpr[2] = qemu_semihosting_console_write(str->str, str->len);
- }
- g_string_free(str, true);
- }
- break;
-
- case UHI_assert:
- {
- const char *msg, *file;
-
- msg = lock_user_string(gpr[4]);
- if (!msg) {
- msg = "<EFAULT>";
- }
- file = lock_user_string(gpr[5]);
- if (!file) {
- file = "<EFAULT>";
- }
-
- error_report("UHI assertion \"%s\": file \"%s\", line %d",
- msg, file, (int)gpr[6]);
- abort();
- }
-
- default:
- error_report("Unknown UHI operation %d", op);
- abort();
- }
- return;
-}
diff --git a/target/mips/tcg/sysemu/special_helper.c b/target/mips/tcg/sysemu/special_helper.c
deleted file mode 100644
index 9ce5e2c..0000000
--- a/target/mips/tcg/sysemu/special_helper.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * QEMU MIPS emulation: Special opcode helpers
- *
- * Copyright (c) 2004-2005 Jocelyn Mayer
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- *
- */
-
-#include "qemu/osdep.h"
-#include "qemu/log.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "internal.h"
-
-/* Specials */
-target_ulong helper_di(CPUMIPSState *env)
-{
- target_ulong t0 = env->CP0_Status;
-
- env->CP0_Status = t0 & ~(1 << CP0St_IE);
- return t0;
-}
-
-target_ulong helper_ei(CPUMIPSState *env)
-{
- target_ulong t0 = env->CP0_Status;
-
- env->CP0_Status = t0 | (1 << CP0St_IE);
- return t0;
-}
-
-static void debug_pre_eret(CPUMIPSState *env)
-{
- if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
- qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
- env->active_tc.PC, env->CP0_EPC);
- if (env->CP0_Status & (1 << CP0St_ERL)) {
- qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
- }
- if (env->hflags & MIPS_HFLAG_DM) {
- qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
- }
- qemu_log("\n");
- }
-}
-
-static void debug_post_eret(CPUMIPSState *env)
-{
- if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
- qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
- env->active_tc.PC, env->CP0_EPC);
- if (env->CP0_Status & (1 << CP0St_ERL)) {
- qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
- }
- if (env->hflags & MIPS_HFLAG_DM) {
- qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
- }
- switch (mips_env_mmu_index(env)) {
- case 3:
- qemu_log(", ERL\n");
- break;
- case MIPS_HFLAG_UM:
- qemu_log(", UM\n");
- break;
- case MIPS_HFLAG_SM:
- qemu_log(", SM\n");
- break;
- case MIPS_HFLAG_KM:
- qemu_log("\n");
- break;
- default:
- cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
- break;
- }
- }
-}
-
-bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb)
-{
- CPUMIPSState *env = cpu_env(cs);
-
- if ((env->hflags & MIPS_HFLAG_BMASK) != 0
- && !tcg_cflags_has(cs, CF_PCREL) && env->active_tc.PC != tb->pc) {
- env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
- env->hflags &= ~MIPS_HFLAG_BMASK;
- return true;
- }
- return false;
-}
-
-static inline void exception_return(CPUMIPSState *env)
-{
- debug_pre_eret(env);
- if (env->CP0_Status & (1 << CP0St_ERL)) {
- mips_env_set_pc(env, env->CP0_ErrorEPC);
- env->CP0_Status &= ~(1 << CP0St_ERL);
- } else {
- mips_env_set_pc(env, env->CP0_EPC);
- env->CP0_Status &= ~(1 << CP0St_EXL);
- }
- compute_hflags(env);
- debug_post_eret(env);
-}
-
-void helper_eret(CPUMIPSState *env)
-{
- exception_return(env);
- env->CP0_LLAddr = 1;
- env->lladdr = 1;
-}
-
-void helper_eretnc(CPUMIPSState *env)
-{
- exception_return(env);
-}
-
-void helper_deret(CPUMIPSState *env)
-{
- debug_pre_eret(env);
-
- env->hflags &= ~MIPS_HFLAG_DM;
- compute_hflags(env);
-
- mips_env_set_pc(env, env->CP0_DEPC);
-
- debug_post_eret(env);
-}
-
-void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op)
-{
- static const char *const type_name[] = {
- "Primary Instruction",
- "Primary Data or Unified Primary",
- "Tertiary",
- "Secondary"
- };
- uint32_t cache_type = extract32(op, 0, 2);
- uint32_t cache_operation = extract32(op, 2, 3);
- target_ulong index = addr & 0x1fffffff;
-
- switch (cache_operation) {
- case 0b010: /* Index Store Tag */
- memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo,
- MO_64, MEMTXATTRS_UNSPECIFIED);
- break;
- case 0b001: /* Index Load Tag */
- memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo,
- MO_64, MEMTXATTRS_UNSPECIFIED);
- break;
- case 0b000: /* Index Invalidate */
- case 0b100: /* Hit Invalidate */
- case 0b110: /* Hit Writeback */
- /* no-op */
- break;
- default:
- qemu_log_mask(LOG_UNIMP, "cache operation:%u (type: %s cache)\n",
- cache_operation, type_name[cache_type]);
- break;
- }
-}
diff --git a/target/mips/tcg/sysemu/tlb_helper.c b/target/mips/tcg/sysemu/tlb_helper.c
deleted file mode 100644
index 3ba6d36..0000000
--- a/target/mips/tcg/sysemu/tlb_helper.c
+++ /dev/null
@@ -1,1420 +0,0 @@
-/*
- * MIPS TLB (Translation lookaside buffer) helpers.
- *
- * Copyright (c) 2004-2005 Jocelyn Mayer
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "qemu/osdep.h"
-#include "qemu/bitops.h"
-
-#include "cpu.h"
-#include "internal.h"
-#include "exec/exec-all.h"
-#include "exec/page-protection.h"
-#include "exec/cpu_ldst.h"
-#include "exec/log.h"
-#include "exec/helper-proto.h"
-
-/* TLB management */
-static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first)
-{
- /* Discard entries from env->tlb[first] onwards. */
- while (env->tlb->tlb_in_use > first) {
- r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
- }
-}
-
-static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
-{
-#if defined(TARGET_MIPS64)
- return extract64(entrylo, 6, 54);
-#else
- return extract64(entrylo, 6, 24) | /* PFN */
- (extract64(entrylo, 32, 32) << 24); /* PFNX */
-#endif
-}
-
-static void r4k_fill_tlb(CPUMIPSState *env, int idx)
-{
- r4k_tlb_t *tlb;
- uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
-
- /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
- tlb = &env->tlb->mmu.r4k.tlb[idx];
- if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
- tlb->EHINV = 1;
- return;
- }
- tlb->EHINV = 0;
- tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
-#if defined(TARGET_MIPS64)
- tlb->VPN &= env->SEGMask;
-#endif
- tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
- tlb->MMID = env->CP0_MemoryMapID;
- tlb->PageMask = env->CP0_PageMask;
- tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
- tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
- tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
- tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
- tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
- tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
- tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
- tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
- tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
- tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
- tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
- tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
- tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
-}
-
-static void r4k_helper_tlbinv(CPUMIPSState *env)
-{
- bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
- uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
- uint32_t MMID = env->CP0_MemoryMapID;
- uint32_t tlb_mmid;
- r4k_tlb_t *tlb;
- int idx;
-
- MMID = mi ? MMID : (uint32_t) ASID;
- for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
- tlb = &env->tlb->mmu.r4k.tlb[idx];
- tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
- if (!tlb->G && tlb_mmid == MMID) {
- tlb->EHINV = 1;
- }
- }
- cpu_mips_tlb_flush(env);
-}
-
-static void r4k_helper_tlbinvf(CPUMIPSState *env)
-{
- int idx;
-
- for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
- env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
- }
- cpu_mips_tlb_flush(env);
-}
-
-static void r4k_helper_tlbwi(CPUMIPSState *env)
-{
- bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
- target_ulong VPN;
- uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
- uint32_t MMID = env->CP0_MemoryMapID;
- uint32_t tlb_mmid;
- bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
- r4k_tlb_t *tlb;
- int idx;
-
- MMID = mi ? MMID : (uint32_t) ASID;
-
- idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
- tlb = &env->tlb->mmu.r4k.tlb[idx];
- VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
-#if defined(TARGET_MIPS64)
- VPN &= env->SEGMask;
-#endif
- EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
- G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
- V0 = (env->CP0_EntryLo0 & 2) != 0;
- D0 = (env->CP0_EntryLo0 & 4) != 0;
- XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
- RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
- V1 = (env->CP0_EntryLo1 & 2) != 0;
- D1 = (env->CP0_EntryLo1 & 4) != 0;
- XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
- RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
-
- tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
- /*
- * Discard cached TLB entries, unless tlbwi is just upgrading access
- * permissions on the current entry.
- */
- if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G ||
- (!tlb->EHINV && EHINV) ||
- (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
- (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
- (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
- (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
- r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
- }
-
- r4k_invalidate_tlb(env, idx, 0);
- r4k_fill_tlb(env, idx);
-}
-
-static void r4k_helper_tlbwr(CPUMIPSState *env)
-{
- int r = cpu_mips_get_random(env);
-
- r4k_invalidate_tlb(env, r, 1);
- r4k_fill_tlb(env, r);
-}
-
-static void r4k_helper_tlbp(CPUMIPSState *env)
-{
- bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
- r4k_tlb_t *tlb;
- target_ulong mask;
- target_ulong tag;
- target_ulong VPN;
- uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
- uint32_t MMID = env->CP0_MemoryMapID;
- uint32_t tlb_mmid;
- int i;
-
- MMID = mi ? MMID : (uint32_t) ASID;
- for (i = 0; i < env->tlb->nb_tlb; i++) {
- tlb = &env->tlb->mmu.r4k.tlb[i];
- /* 1k pages are not supported. */
- mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
- tag = env->CP0_EntryHi & ~mask;
- VPN = tlb->VPN & ~mask;
-#if defined(TARGET_MIPS64)
- tag &= env->SEGMask;
-#endif
- tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
- /* Check ASID/MMID, virtual page number & size */
- if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
- /* TLB match */
- env->CP0_Index = i;
- break;
- }
- }
- if (i == env->tlb->nb_tlb) {
- /* No match. Discard any shadow entries, if any of them match. */
- for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
- tlb = &env->tlb->mmu.r4k.tlb[i];
- /* 1k pages are not supported. */
- mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
- tag = env->CP0_EntryHi & ~mask;
- VPN = tlb->VPN & ~mask;
-#if defined(TARGET_MIPS64)
- tag &= env->SEGMask;
-#endif
- tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
- /* Check ASID/MMID, virtual page number & size */
- if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) {
- r4k_mips_tlb_flush_extra(env, i);
- break;
- }
- }
-
- env->CP0_Index |= 0x80000000;
- }
-}
-
-static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
-{
-#if defined(TARGET_MIPS64)
- return tlb_pfn << 6;
-#else
- return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
- (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
-#endif
-}
-
-static void r4k_helper_tlbr(CPUMIPSState *env)
-{
- bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
- uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
- uint32_t MMID = env->CP0_MemoryMapID;
- uint32_t tlb_mmid;
- r4k_tlb_t *tlb;
- int idx;
-
- MMID = mi ? MMID : (uint32_t) ASID;
- idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
- tlb = &env->tlb->mmu.r4k.tlb[idx];
-
- tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
- /* If this will change the current ASID/MMID, flush qemu's TLB. */
- if (MMID != tlb_mmid) {
- cpu_mips_tlb_flush(env);
- }
-
- r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
-
- if (tlb->EHINV) {
- env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
- env->CP0_PageMask = 0;
- env->CP0_EntryLo0 = 0;
- env->CP0_EntryLo1 = 0;
- } else {
- env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID;
- env->CP0_MemoryMapID = tlb->MMID;
- env->CP0_PageMask = tlb->PageMask;
- env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
- ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
- ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
- get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
- env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
- ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
- ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
- get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
- }
-}
-
-void helper_tlbwi(CPUMIPSState *env)
-{
- env->tlb->helper_tlbwi(env);
-}
-
-void helper_tlbwr(CPUMIPSState *env)
-{
- env->tlb->helper_tlbwr(env);
-}
-
-void helper_tlbp(CPUMIPSState *env)
-{
- env->tlb->helper_tlbp(env);
-}
-
-void helper_tlbr(CPUMIPSState *env)
-{
- env->tlb->helper_tlbr(env);
-}
-
-void helper_tlbinv(CPUMIPSState *env)
-{
- env->tlb->helper_tlbinv(env);
-}
-
-void helper_tlbinvf(CPUMIPSState *env)
-{
- env->tlb->helper_tlbinvf(env);
-}
-
-static void global_invalidate_tlb(CPUMIPSState *env,
- uint32_t invMsgVPN2,
- uint8_t invMsgR,
- uint32_t invMsgMMid,
- bool invAll,
- bool invVAMMid,
- bool invMMid,
- bool invVA)
-{
-
- int idx;
- r4k_tlb_t *tlb;
- bool VAMatch;
- bool MMidMatch;
-
- for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
- tlb = &env->tlb->mmu.r4k.tlb[idx];
- VAMatch =
- (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask))
-#ifdef TARGET_MIPS64
- &&
- (extract64(env->CP0_EntryHi, 62, 2) == invMsgR)
-#endif
- );
- MMidMatch = tlb->MMID == invMsgMMid;
- if ((invAll && (idx > env->CP0_Wired)) ||
- (VAMatch && invVAMMid && (tlb->G || MMidMatch)) ||
- (VAMatch && invVA) ||
- (MMidMatch && !(tlb->G) && invMMid)) {
- tlb->EHINV = 1;
- }
- }
- cpu_mips_tlb_flush(env);
-}
-
-void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type)
-{
- bool invAll = type == 0;
- bool invVA = type == 1;
- bool invMMid = type == 2;
- bool invVAMMid = type == 3;
- uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1);
- uint8_t invMsgR = 0;
- uint32_t invMsgMMid = env->CP0_MemoryMapID;
- CPUState *other_cs = first_cpu;
-
-#ifdef TARGET_MIPS64
- invMsgR = extract64(arg, 62, 2);
-#endif
-
- CPU_FOREACH(other_cs) {
- MIPSCPU *other_cpu = MIPS_CPU(other_cs);
- global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid,
- invAll, invVAMMid, invMMid, invVA);
- }
-}
-
-/* no MMU emulation */
-static int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
- target_ulong address, MMUAccessType access_type)
-{
- *physical = address;
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- return TLBRET_MATCH;
-}
-
-/* fixed mapping MMU emulation */
-static int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical,
- int *prot, target_ulong address,
- MMUAccessType access_type)
-{
- if (address <= (int32_t)0x7FFFFFFFUL) {
- if (!(env->CP0_Status & (1 << CP0St_ERL))) {
- *physical = address + 0x40000000UL;
- } else {
- *physical = address;
- }
- } else if (address <= (int32_t)0xBFFFFFFFUL) {
- *physical = address & 0x1FFFFFFF;
- } else {
- *physical = address;
- }
-
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- return TLBRET_MATCH;
-}
-
-/* MIPS32/MIPS64 R4000-style MMU emulation */
-static int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
- target_ulong address, MMUAccessType access_type)
-{
- uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
- uint32_t MMID = env->CP0_MemoryMapID;
- bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
- uint32_t tlb_mmid;
- int i;
-
- MMID = mi ? MMID : (uint32_t) ASID;
-
- for (i = 0; i < env->tlb->tlb_in_use; i++) {
- r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i];
- /* 1k pages are not supported. */
- target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
- target_ulong tag = address & ~mask;
- target_ulong VPN = tlb->VPN & ~mask;
-#if defined(TARGET_MIPS64)
- tag &= env->SEGMask;
-#endif
-
- /* Check ASID/MMID, virtual page number & size */
- tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
- if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
- /* TLB match */
- int n = !!(address & mask & ~(mask >> 1));
- /* Check access rights */
- if (!(n ? tlb->V1 : tlb->V0)) {
- return TLBRET_INVALID;
- }
- if (access_type == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) {
- return TLBRET_XI;
- }
- if (access_type == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) {
- return TLBRET_RI;
- }
- if (access_type != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) {
- *physical = tlb->PFN[n] | (address & (mask >> 1));
- *prot = PAGE_READ;
- if (n ? tlb->D1 : tlb->D0) {
- *prot |= PAGE_WRITE;
- }
- if (!(n ? tlb->XI1 : tlb->XI0)) {
- *prot |= PAGE_EXEC;
- }
- return TLBRET_MATCH;
- }
- return TLBRET_DIRTY;
- }
- }
- return TLBRET_NOMATCH;
-}
-
-static void no_mmu_init(CPUMIPSState *env, const mips_def_t *def)
-{
- env->tlb->nb_tlb = 1;
- env->tlb->map_address = &no_mmu_map_address;
-}
-
-static void fixed_mmu_init(CPUMIPSState *env, const mips_def_t *def)
-{
- env->tlb->nb_tlb = 1;
- env->tlb->map_address = &fixed_mmu_map_address;
-}
-
-static void r4k_mmu_init(CPUMIPSState *env, const mips_def_t *def)
-{
- env->tlb->nb_tlb = 1 + ((def->CP0_Config1 >> CP0C1_MMU) & 63);
- env->tlb->map_address = &r4k_map_address;
- env->tlb->helper_tlbwi = r4k_helper_tlbwi;
- env->tlb->helper_tlbwr = r4k_helper_tlbwr;
- env->tlb->helper_tlbp = r4k_helper_tlbp;
- env->tlb->helper_tlbr = r4k_helper_tlbr;
- env->tlb->helper_tlbinv = r4k_helper_tlbinv;
- env->tlb->helper_tlbinvf = r4k_helper_tlbinvf;
-}
-
-void mmu_init(CPUMIPSState *env, const mips_def_t *def)
-{
- env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext));
-
- switch (def->mmu_type) {
- case MMU_TYPE_NONE:
- no_mmu_init(env, def);
- break;
- case MMU_TYPE_R4000:
- r4k_mmu_init(env, def);
- break;
- case MMU_TYPE_FMT:
- fixed_mmu_init(env, def);
- break;
- case MMU_TYPE_R3000:
- case MMU_TYPE_R6000:
- case MMU_TYPE_R8000:
- default:
- cpu_abort(env_cpu(env), "MMU type not supported\n");
- }
-}
-
-void cpu_mips_tlb_flush(CPUMIPSState *env)
-{
- /* Flush qemu's TLB and discard all shadowed entries. */
- tlb_flush(env_cpu(env));
- env->tlb->tlb_in_use = env->tlb->nb_tlb;
-}
-
-static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
- MMUAccessType access_type, int tlb_error)
-{
- CPUState *cs = env_cpu(env);
- int exception = 0, error_code = 0;
-
- if (access_type == MMU_INST_FETCH) {
- error_code |= EXCP_INST_NOTAVAIL;
- }
-
- switch (tlb_error) {
- default:
- case TLBRET_BADADDR:
- /* Reference to kernel address from user mode or supervisor mode */
- /* Reference to supervisor address from user mode */
- if (access_type == MMU_DATA_STORE) {
- exception = EXCP_AdES;
- } else {
- exception = EXCP_AdEL;
- }
- break;
- case TLBRET_NOMATCH:
- /* No TLB match for a mapped address */
- if (access_type == MMU_DATA_STORE) {
- exception = EXCP_TLBS;
- } else {
- exception = EXCP_TLBL;
- }
- error_code |= EXCP_TLB_NOMATCH;
- break;
- case TLBRET_INVALID:
- /* TLB match with no valid bit */
- if (access_type == MMU_DATA_STORE) {
- exception = EXCP_TLBS;
- } else {
- exception = EXCP_TLBL;
- }
- break;
- case TLBRET_DIRTY:
- /* TLB match but 'D' bit is cleared */
- exception = EXCP_LTLBL;
- break;
- case TLBRET_XI:
- /* Execute-Inhibit Exception */
- if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
- exception = EXCP_TLBXI;
- } else {
- exception = EXCP_TLBL;
- }
- break;
- case TLBRET_RI:
- /* Read-Inhibit Exception */
- if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
- exception = EXCP_TLBRI;
- } else {
- exception = EXCP_TLBL;
- }
- break;
- }
- /* Raise exception */
- if (!(env->hflags & MIPS_HFLAG_DM)) {
- env->CP0_BadVAddr = address;
- }
- env->CP0_Context = (env->CP0_Context & ~0x007fffff) |
- ((address >> 9) & 0x007ffff0);
- env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) |
- (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) |
- (address & (TARGET_PAGE_MASK << 1));
-#if defined(TARGET_MIPS64)
- env->CP0_EntryHi &= env->SEGMask;
- env->CP0_XContext =
- (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) | /* PTEBase */
- (extract64(address, 62, 2) << (env->SEGBITS - 9)) | /* R */
- (extract64(address, 13, env->SEGBITS - 13) << 4); /* BadVPN2 */
-#endif
- cs->exception_index = exception;
- env->error_code = error_code;
-}
-
-#if !defined(TARGET_MIPS64)
-
-/*
- * Perform hardware page table walk
- *
- * Memory accesses are performed using the KERNEL privilege level.
- * Synchronous exceptions detected on memory accesses cause a silent exit
- * from page table walking, resulting in a TLB or XTLB Refill exception.
- *
- * Implementations are not required to support page table walk memory
- * accesses from mapped memory regions. When an unsupported access is
- * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
- * exception.
- *
- * Note that if an exception is caused by AddressTranslation or LoadMemory
- * functions, the exception is not taken, a silent exit is taken,
- * resulting in a TLB or XTLB Refill exception.
- */
-
-static bool get_pte(CPUMIPSState *env, uint64_t vaddr, int entry_size,
- uint64_t *pte)
-{
- if ((vaddr & ((entry_size >> 3) - 1)) != 0) {
- return false;
- }
- if (entry_size == 64) {
- *pte = cpu_ldq_code(env, vaddr);
- } else {
- *pte = cpu_ldl_code(env, vaddr);
- }
- return true;
-}
-
-static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry,
- int entry_size, int ptei)
-{
- uint64_t result = entry;
- uint64_t rixi;
- if (ptei > entry_size) {
- ptei -= 32;
- }
- result >>= (ptei - 2);
- rixi = result & 3;
- result >>= 2;
- result |= rixi << CP0EnLo_XI;
- return result;
-}
-
-static int walk_directory(CPUMIPSState *env, uint64_t *vaddr,
- int directory_index, bool *huge_page, bool *hgpg_directory_hit,
- uint64_t *pw_entrylo0, uint64_t *pw_entrylo1,
- unsigned directory_shift, unsigned leaf_shift, int ptw_mmu_idx)
-{
- int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1;
- int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F;
- int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
- int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
- uint32_t direntry_size = 1 << (directory_shift + 3);
- uint32_t leafentry_size = 1 << (leaf_shift + 3);
- uint64_t entry;
- uint64_t paddr;
- int prot;
- uint64_t lsb = 0;
- uint64_t w = 0;
-
- if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD,
- ptw_mmu_idx) != TLBRET_MATCH) {
- /* wrong base address */
- return 0;
- }
- if (!get_pte(env, *vaddr, direntry_size, &entry)) {
- return 0;
- }
-
- if ((entry & (1 << psn)) && hugepg) {
- *huge_page = true;
- *hgpg_directory_hit = true;
- entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew);
- w = directory_index - 1;
- if (directory_index & 0x1) {
- /* Generate adjacent page from same PTE for odd TLB page */
- lsb = BIT_ULL(w) >> 6;
- *pw_entrylo0 = entry & ~lsb; /* even page */
- *pw_entrylo1 = entry | lsb; /* odd page */
- } else if (dph) {
- int oddpagebit = 1 << leaf_shift;
- uint64_t vaddr2 = *vaddr ^ oddpagebit;
- if (*vaddr & oddpagebit) {
- *pw_entrylo1 = entry;
- } else {
- *pw_entrylo0 = entry;
- }
- if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD,
- ptw_mmu_idx) != TLBRET_MATCH) {
- return 0;
- }
- if (!get_pte(env, vaddr2, leafentry_size, &entry)) {
- return 0;
- }
- entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew);
- if (*vaddr & oddpagebit) {
- *pw_entrylo0 = entry;
- } else {
- *pw_entrylo1 = entry;
- }
- } else {
- return 0;
- }
- return 1;
- } else {
- *vaddr = entry;
- return 2;
- }
-}
-
-static bool page_table_walk_refill(CPUMIPSState *env, vaddr address,
- int ptw_mmu_idx)
-{
- int gdw = (env->CP0_PWSize >> CP0PS_GDW) & 0x3F;
- int udw = (env->CP0_PWSize >> CP0PS_UDW) & 0x3F;
- int mdw = (env->CP0_PWSize >> CP0PS_MDW) & 0x3F;
- int ptw = (env->CP0_PWSize >> CP0PS_PTW) & 0x3F;
- int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F;
-
- /* Initial values */
- bool huge_page = false;
- bool hgpg_bdhit = false;
- bool hgpg_gdhit = false;
- bool hgpg_udhit = false;
- bool hgpg_mdhit = false;
-
- int32_t pw_pagemask = 0;
- target_ulong pw_entryhi = 0;
- uint64_t pw_entrylo0 = 0;
- uint64_t pw_entrylo1 = 0;
-
- /* Native pointer size */
- /*For the 32-bit architectures, this bit is fixed to 0.*/
- int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3;
-
- /* Indices from PWField */
- int pf_gdw = (env->CP0_PWField >> CP0PF_GDW) & 0x3F;
- int pf_udw = (env->CP0_PWField >> CP0PF_UDW) & 0x3F;
- int pf_mdw = (env->CP0_PWField >> CP0PF_MDW) & 0x3F;
- int pf_ptw = (env->CP0_PWField >> CP0PF_PTW) & 0x3F;
- int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
-
- /* Indices computed from faulting address */
- int gindex = (address >> pf_gdw) & ((1 << gdw) - 1);
- int uindex = (address >> pf_udw) & ((1 << udw) - 1);
- int mindex = (address >> pf_mdw) & ((1 << mdw) - 1);
- int ptindex = (address >> pf_ptw) & ((1 << ptw) - 1);
-
- /* Other HTW configs */
- int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
- unsigned directory_shift, leaf_shift;
-
- /* Offsets into tables */
- unsigned goffset, uoffset, moffset, ptoffset0, ptoffset1;
- uint32_t leafentry_size;
-
- /* Starting address - Page Table Base */
- uint64_t vaddr = env->CP0_PWBase;
-
- uint64_t dir_entry;
- uint64_t paddr;
- int prot;
- int m;
-
- if (!(env->CP0_Config3 & (1 << CP0C3_PW))) {
- /* walker is unimplemented */
- return false;
- }
- if (!(env->CP0_PWCtl & (1 << CP0PC_PWEN))) {
- /* walker is disabled */
- return false;
- }
- if (!(gdw > 0 || udw > 0 || mdw > 0)) {
- /* no structure to walk */
- return false;
- }
- if (ptew > 1) {
- return false;
- }
-
- /* HTW Shift values (depend on entry size) */
- directory_shift = (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift;
- leaf_shift = (ptew == 1) ? native_shift + 1 : native_shift;
-
- goffset = gindex << directory_shift;
- uoffset = uindex << directory_shift;
- moffset = mindex << directory_shift;
- ptoffset0 = (ptindex >> 1) << (leaf_shift + 1);
- ptoffset1 = ptoffset0 | (1 << (leaf_shift));
-
- leafentry_size = 1 << (leaf_shift + 3);
-
- /* Global Directory */
- if (gdw > 0) {
- vaddr |= goffset;
- switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit,
- &pw_entrylo0, &pw_entrylo1,
- directory_shift, leaf_shift, ptw_mmu_idx))
- {
- case 0:
- return false;
- case 1:
- goto refill;
- case 2:
- default:
- break;
- }
- }
-
- /* Upper directory */
- if (udw > 0) {
- vaddr |= uoffset;
- switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit,
- &pw_entrylo0, &pw_entrylo1,
- directory_shift, leaf_shift, ptw_mmu_idx))
- {
- case 0:
- return false;
- case 1:
- goto refill;
- case 2:
- default:
- break;
- }
- }
-
- /* Middle directory */
- if (mdw > 0) {
- vaddr |= moffset;
- switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit,
- &pw_entrylo0, &pw_entrylo1,
- directory_shift, leaf_shift, ptw_mmu_idx))
- {
- case 0:
- return false;
- case 1:
- goto refill;
- case 2:
- default:
- break;
- }
- }
-
- /* Leaf Level Page Table - First half of PTE pair */
- vaddr |= ptoffset0;
- if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
- ptw_mmu_idx) != TLBRET_MATCH) {
- return false;
- }
- if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) {
- return false;
- }
- dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew);
- pw_entrylo0 = dir_entry;
-
- /* Leaf Level Page Table - Second half of PTE pair */
- vaddr |= ptoffset1;
- if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
- ptw_mmu_idx) != TLBRET_MATCH) {
- return false;
- }
- if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) {
- return false;
- }
- dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew);
- pw_entrylo1 = dir_entry;
-
-refill:
-
- m = (1 << pf_ptw) - 1;
-
- if (huge_page) {
- switch (hgpg_bdhit << 3 | hgpg_gdhit << 2 | hgpg_udhit << 1 |
- hgpg_mdhit)
- {
- case 4:
- m = (1 << pf_gdw) - 1;
- if (pf_gdw & 1) {
- m >>= 1;
- }
- break;
- case 2:
- m = (1 << pf_udw) - 1;
- if (pf_udw & 1) {
- m >>= 1;
- }
- break;
- case 1:
- m = (1 << pf_mdw) - 1;
- if (pf_mdw & 1) {
- m >>= 1;
- }
- break;
- }
- }
- pw_pagemask = m >> TARGET_PAGE_BITS_MIN;
- update_pagemask(env, pw_pagemask << CP0PM_MASK, &pw_pagemask);
- pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF);
- {
- target_ulong tmp_entryhi = env->CP0_EntryHi;
- int32_t tmp_pagemask = env->CP0_PageMask;
- uint64_t tmp_entrylo0 = env->CP0_EntryLo0;
- uint64_t tmp_entrylo1 = env->CP0_EntryLo1;
-
- env->CP0_EntryHi = pw_entryhi;
- env->CP0_PageMask = pw_pagemask;
- env->CP0_EntryLo0 = pw_entrylo0;
- env->CP0_EntryLo1 = pw_entrylo1;
-
- /*
- * The hardware page walker inserts a page into the TLB in a manner
- * identical to a TLBWR instruction as executed by the software refill
- * handler.
- */
- r4k_helper_tlbwr(env);
-
- env->CP0_EntryHi = tmp_entryhi;
- env->CP0_PageMask = tmp_pagemask;
- env->CP0_EntryLo0 = tmp_entrylo0;
- env->CP0_EntryLo1 = tmp_entrylo1;
- }
- return true;
-}
-#endif
-
-bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
-{
- CPUMIPSState *env = cpu_env(cs);
- hwaddr physical;
- int prot;
- int ret = TLBRET_BADADDR;
-
- /* data access */
- /* XXX: put correct access by using cpu_restore_state() correctly */
- ret = get_physical_address(env, &physical, &prot, address,
- access_type, mmu_idx);
- switch (ret) {
- case TLBRET_MATCH:
- qemu_log_mask(CPU_LOG_MMU,
- "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
- " prot %d\n", __func__, address, physical, prot);
- break;
- default:
- qemu_log_mask(CPU_LOG_MMU,
- "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
- ret);
- break;
- }
- if (ret == TLBRET_MATCH) {
- tlb_set_page(cs, address & TARGET_PAGE_MASK,
- physical & TARGET_PAGE_MASK, prot,
- mmu_idx, TARGET_PAGE_SIZE);
- return true;
- }
-#if !defined(TARGET_MIPS64)
- if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
- /*
- * Memory reads during hardware page table walking are performed
- * as if they were kernel-mode load instructions.
- */
- int ptw_mmu_idx = (env->hflags & MIPS_HFLAG_ERL ?
- MMU_ERL_IDX : MMU_KERNEL_IDX);
-
- if (page_table_walk_refill(env, address, ptw_mmu_idx)) {
- ret = get_physical_address(env, &physical, &prot, address,
- access_type, mmu_idx);
- if (ret == TLBRET_MATCH) {
- tlb_set_page(cs, address & TARGET_PAGE_MASK,
- physical & TARGET_PAGE_MASK, prot,
- mmu_idx, TARGET_PAGE_SIZE);
- return true;
- }
- }
- }
-#endif
- if (probe) {
- return false;
- }
-
- raise_mmu_exception(env, address, access_type, ret);
- do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
-}
-
-hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address,
- MMUAccessType access_type, uintptr_t retaddr)
-{
- hwaddr physical;
- int prot;
- int ret = 0;
- CPUState *cs = env_cpu(env);
-
- /* data access */
- ret = get_physical_address(env, &physical, &prot, address, access_type,
- mips_env_mmu_index(env));
- if (ret == TLBRET_MATCH) {
- return physical;
- }
-
- raise_mmu_exception(env, address, access_type, ret);
- cpu_loop_exit_restore(cs, retaddr);
-}
-
-static void set_hflags_for_handler(CPUMIPSState *env)
-{
- /* Exception handlers are entered in 32-bit mode. */
- env->hflags &= ~(MIPS_HFLAG_M16);
- /* ...except that microMIPS lets you choose. */
- if (env->insn_flags & ASE_MICROMIPS) {
- env->hflags |= (!!(env->CP0_Config3 &
- (1 << CP0C3_ISA_ON_EXC))
- << MIPS_HFLAG_M16_SHIFT);
- }
-}
-
-static inline void set_badinstr_registers(CPUMIPSState *env)
-{
- if (env->insn_flags & ISA_NANOMIPS32) {
- if (env->CP0_Config3 & (1 << CP0C3_BI)) {
- uint32_t instr = (cpu_lduw_code(env, env->active_tc.PC)) << 16;
- if ((instr & 0x10000000) == 0) {
- instr |= cpu_lduw_code(env, env->active_tc.PC + 2);
- }
- env->CP0_BadInstr = instr;
-
- if ((instr & 0xFC000000) == 0x60000000) {
- instr = cpu_lduw_code(env, env->active_tc.PC + 4) << 16;
- env->CP0_BadInstrX = instr;
- }
- }
- return;
- }
-
- if (env->hflags & MIPS_HFLAG_M16) {
- /* TODO: add BadInstr support for microMIPS */
- return;
- }
- if (env->CP0_Config3 & (1 << CP0C3_BI)) {
- env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC);
- }
- if ((env->CP0_Config3 & (1 << CP0C3_BP)) &&
- (env->hflags & MIPS_HFLAG_BMASK)) {
- env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4);
- }
-}
-
-void mips_cpu_do_interrupt(CPUState *cs)
-{
- MIPSCPU *cpu = MIPS_CPU(cs);
- CPUMIPSState *env = &cpu->env;
- bool update_badinstr = 0;
- target_ulong offset;
- int cause = -1;
-
- if (qemu_loglevel_mask(CPU_LOG_INT)
- && cs->exception_index != EXCP_EXT_INTERRUPT) {
- qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx
- " %s exception\n",
- __func__, env->active_tc.PC, env->CP0_EPC,
- mips_exception_name(cs->exception_index));
- }
- if (cs->exception_index == EXCP_EXT_INTERRUPT &&
- (env->hflags & MIPS_HFLAG_DM)) {
- cs->exception_index = EXCP_DINT;
- }
- offset = 0x180;
- switch (cs->exception_index) {
- case EXCP_SEMIHOST:
- cs->exception_index = EXCP_NONE;
- mips_semihosting(env);
- env->active_tc.PC += env->error_code;
- return;
- case EXCP_DSS:
- env->CP0_Debug |= 1 << CP0DB_DSS;
- /*
- * Debug single step cannot be raised inside a delay slot and
- * resume will always occur on the next instruction
- * (but we assume the pc has always been updated during
- * code translation).
- */
- env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16);
- goto enter_debug_mode;
- case EXCP_DINT:
- env->CP0_Debug |= 1 << CP0DB_DINT;
- goto set_DEPC;
- case EXCP_DIB:
- env->CP0_Debug |= 1 << CP0DB_DIB;
- goto set_DEPC;
- case EXCP_DBp:
- env->CP0_Debug |= 1 << CP0DB_DBp;
- /* Setup DExcCode - SDBBP instruction */
- env->CP0_Debug = (env->CP0_Debug & ~(0x1fULL << CP0DB_DEC)) |
- (9 << CP0DB_DEC);
- goto set_DEPC;
- case EXCP_DDBS:
- env->CP0_Debug |= 1 << CP0DB_DDBS;
- goto set_DEPC;
- case EXCP_DDBL:
- env->CP0_Debug |= 1 << CP0DB_DDBL;
- set_DEPC:
- env->CP0_DEPC = exception_resume_pc(env);
- env->hflags &= ~MIPS_HFLAG_BMASK;
- enter_debug_mode:
- if (env->insn_flags & ISA_MIPS3) {
- env->hflags |= MIPS_HFLAG_64;
- if (!(env->insn_flags & ISA_MIPS_R6) ||
- env->CP0_Status & (1 << CP0St_KX)) {
- env->hflags &= ~MIPS_HFLAG_AWRAP;
- }
- }
- env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0;
- env->hflags &= ~(MIPS_HFLAG_KSU);
- /* EJTAG probe trap enable is not implemented... */
- if (!(env->CP0_Status & (1 << CP0St_EXL))) {
- env->CP0_Cause &= ~(1U << CP0Ca_BD);
- }
- env->active_tc.PC = env->exception_base + 0x480;
- set_hflags_for_handler(env);
- break;
- case EXCP_RESET:
- cpu_reset(CPU(cpu));
- break;
- case EXCP_SRESET:
- env->CP0_Status |= (1 << CP0St_SR);
- memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo));
- goto set_error_EPC;
- case EXCP_NMI:
- env->CP0_Status |= (1 << CP0St_NMI);
- set_error_EPC:
- env->CP0_ErrorEPC = exception_resume_pc(env);
- env->hflags &= ~MIPS_HFLAG_BMASK;
- env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV);
- if (env->insn_flags & ISA_MIPS3) {
- env->hflags |= MIPS_HFLAG_64;
- if (!(env->insn_flags & ISA_MIPS_R6) ||
- env->CP0_Status & (1 << CP0St_KX)) {
- env->hflags &= ~MIPS_HFLAG_AWRAP;
- }
- }
- env->hflags |= MIPS_HFLAG_CP0;
- env->hflags &= ~(MIPS_HFLAG_KSU);
- if (!(env->CP0_Status & (1 << CP0St_EXL))) {
- env->CP0_Cause &= ~(1U << CP0Ca_BD);
- }
- env->active_tc.PC = env->exception_base;
- set_hflags_for_handler(env);
- break;
- case EXCP_EXT_INTERRUPT:
- cause = 0;
- if (env->CP0_Cause & (1 << CP0Ca_IV)) {
- uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f;
-
- if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) {
- offset = 0x200;
- } else {
- uint32_t vector = 0;
- uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP;
-
- if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
- /*
- * For VEIC mode, the external interrupt controller feeds
- * the vector through the CP0Cause IP lines.
- */
- vector = pending;
- } else {
- /*
- * Vectored Interrupts
- * Mask with Status.IM7-IM0 to get enabled interrupts.
- */
- pending &= (env->CP0_Status >> CP0St_IM) & 0xff;
- /* Find the highest-priority interrupt. */
- while (pending >>= 1) {
- vector++;
- }
- }
- offset = 0x200 + (vector * (spacing << 5));
- }
- }
- goto set_EPC;
- case EXCP_LTLBL:
- cause = 1;
- update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
- goto set_EPC;
- case EXCP_TLBL:
- cause = 2;
- update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
- if ((env->error_code & EXCP_TLB_NOMATCH) &&
- !(env->CP0_Status & (1 << CP0St_EXL))) {
-#if defined(TARGET_MIPS64)
- int R = env->CP0_BadVAddr >> 62;
- int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
- int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
-
- if ((R != 0 || UX) && (R != 3 || KX) &&
- (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
- offset = 0x080;
- } else {
-#endif
- offset = 0x000;
-#if defined(TARGET_MIPS64)
- }
-#endif
- }
- goto set_EPC;
- case EXCP_TLBS:
- cause = 3;
- update_badinstr = 1;
- if ((env->error_code & EXCP_TLB_NOMATCH) &&
- !(env->CP0_Status & (1 << CP0St_EXL))) {
-#if defined(TARGET_MIPS64)
- int R = env->CP0_BadVAddr >> 62;
- int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
- int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
-
- if ((R != 0 || UX) && (R != 3 || KX) &&
- (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
- offset = 0x080;
- } else {
-#endif
- offset = 0x000;
-#if defined(TARGET_MIPS64)
- }
-#endif
- }
- goto set_EPC;
- case EXCP_AdEL:
- cause = 4;
- update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
- goto set_EPC;
- case EXCP_AdES:
- cause = 5;
- update_badinstr = 1;
- goto set_EPC;
- case EXCP_IBE:
- cause = 6;
- goto set_EPC;
- case EXCP_DBE:
- cause = 7;
- goto set_EPC;
- case EXCP_SYSCALL:
- cause = 8;
- update_badinstr = 1;
- goto set_EPC;
- case EXCP_BREAK:
- cause = 9;
- update_badinstr = 1;
- goto set_EPC;
- case EXCP_RI:
- cause = 10;
- update_badinstr = 1;
- goto set_EPC;
- case EXCP_CpU:
- cause = 11;
- update_badinstr = 1;
- env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) |
- (env->error_code << CP0Ca_CE);
- goto set_EPC;
- case EXCP_OVERFLOW:
- cause = 12;
- update_badinstr = 1;
- goto set_EPC;
- case EXCP_TRAP:
- cause = 13;
- update_badinstr = 1;
- goto set_EPC;
- case EXCP_MSAFPE:
- cause = 14;
- update_badinstr = 1;
- goto set_EPC;
- case EXCP_FPE:
- cause = 15;
- update_badinstr = 1;
- goto set_EPC;
- case EXCP_C2E:
- cause = 18;
- goto set_EPC;
- case EXCP_TLBRI:
- cause = 19;
- update_badinstr = 1;
- goto set_EPC;
- case EXCP_TLBXI:
- cause = 20;
- goto set_EPC;
- case EXCP_MSADIS:
- cause = 21;
- update_badinstr = 1;
- goto set_EPC;
- case EXCP_MDMX:
- cause = 22;
- goto set_EPC;
- case EXCP_DWATCH:
- cause = 23;
- /* XXX: TODO: manage deferred watch exceptions */
- goto set_EPC;
- case EXCP_MCHECK:
- cause = 24;
- goto set_EPC;
- case EXCP_THREAD:
- cause = 25;
- goto set_EPC;
- case EXCP_DSPDIS:
- cause = 26;
- goto set_EPC;
- case EXCP_CACHE:
- cause = 30;
- offset = 0x100;
- set_EPC:
- if (!(env->CP0_Status & (1 << CP0St_EXL))) {
- env->CP0_EPC = exception_resume_pc(env);
- if (update_badinstr) {
- set_badinstr_registers(env);
- }
- if (env->hflags & MIPS_HFLAG_BMASK) {
- env->CP0_Cause |= (1U << CP0Ca_BD);
- } else {
- env->CP0_Cause &= ~(1U << CP0Ca_BD);
- }
- env->CP0_Status |= (1 << CP0St_EXL);
- if (env->insn_flags & ISA_MIPS3) {
- env->hflags |= MIPS_HFLAG_64;
- if (!(env->insn_flags & ISA_MIPS_R6) ||
- env->CP0_Status & (1 << CP0St_KX)) {
- env->hflags &= ~MIPS_HFLAG_AWRAP;
- }
- }
- env->hflags |= MIPS_HFLAG_CP0;
- env->hflags &= ~(MIPS_HFLAG_KSU);
- }
- env->hflags &= ~MIPS_HFLAG_BMASK;
- if (env->CP0_Status & (1 << CP0St_BEV)) {
- env->active_tc.PC = env->exception_base + 0x200;
- } else if (cause == 30 && !(env->CP0_Config3 & (1 << CP0C3_SC) &&
- env->CP0_Config5 & (1 << CP0C5_CV))) {
- /* Force KSeg1 for cache errors */
- env->active_tc.PC = KSEG1_BASE | (env->CP0_EBase & 0x1FFFF000);
- } else {
- env->active_tc.PC = env->CP0_EBase & ~0xfff;
- }
-
- env->active_tc.PC += offset;
- set_hflags_for_handler(env);
- env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) |
- (cause << CP0Ca_EC);
- break;
- default:
- abort();
- }
- if (qemu_loglevel_mask(CPU_LOG_INT)
- && cs->exception_index != EXCP_EXT_INTERRUPT) {
- qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
- " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
- __func__, env->active_tc.PC, env->CP0_EPC, cause,
- env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr,
- env->CP0_DEPC);
- }
- cs->exception_index = EXCP_NONE;
-}
-
-bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
-{
- if (interrupt_request & CPU_INTERRUPT_HARD) {
- CPUMIPSState *env = cpu_env(cs);
-
- if (cpu_mips_hw_interrupts_enabled(env) &&
- cpu_mips_hw_interrupts_pending(env)) {
- /* Raise it */
- cs->exception_index = EXCP_EXT_INTERRUPT;
- env->error_code = 0;
- mips_cpu_do_interrupt(cs);
- return true;
- }
- }
- return false;
-}
-
-void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra)
-{
- CPUState *cs = env_cpu(env);
- r4k_tlb_t *tlb;
- target_ulong addr;
- target_ulong end;
- uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
- uint32_t MMID = env->CP0_MemoryMapID;
- bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
- uint32_t tlb_mmid;
- target_ulong mask;
-
- MMID = mi ? MMID : (uint32_t) ASID;
-
- tlb = &env->tlb->mmu.r4k.tlb[idx];
- /*
- * The qemu TLB is flushed when the ASID/MMID changes, so no need to
- * flush these entries again.
- */
- tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
- if (tlb->G == 0 && tlb_mmid != MMID) {
- return;
- }
-
- if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) {
- /*
- * For tlbwr, we can shadow the discarded entry into
- * a new (fake) TLB entry, as long as the guest can not
- * tell that it's there.
- */
- env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb;
- env->tlb->tlb_in_use++;
- return;
- }
-
- /* 1k pages are not supported. */
- mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
- if (tlb->V0) {
- addr = tlb->VPN & ~mask;
-#if defined(TARGET_MIPS64)
- if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
- addr |= 0x3FFFFF0000000000ULL;
- }
-#endif
- end = addr | (mask >> 1);
- while (addr < end) {
- tlb_flush_page(cs, addr);
- addr += TARGET_PAGE_SIZE;
- }
- }
- if (tlb->V1) {
- addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
-#if defined(TARGET_MIPS64)
- if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
- addr |= 0x3FFFFF0000000000ULL;
- }
-#endif
- end = addr | mask;
- while (addr - 1 < end) {
- tlb_flush_page(cs, addr);
- addr += TARGET_PAGE_SIZE;
- }
- }
-}
diff --git a/target/mips/tcg/sysemu_helper.h.inc b/target/mips/tcg/sysemu_helper.h.inc
deleted file mode 100644
index 1861d53..0000000
--- a/target/mips/tcg/sysemu_helper.h.inc
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * QEMU MIPS sysemu helpers
- *
- * Copyright (c) 2004-2005 Jocelyn Mayer
- * Copyright (c) 2006 Marius Groeger (FPU operations)
- * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
- * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support)
- *
- * SPDX-License-Identifier: LGPL-2.1-or-later
- */
-
-/* CP0 helpers */
-DEF_HELPER_1(mfc0_mvpcontrol, tl, env)
-DEF_HELPER_1(mfc0_mvpconf0, tl, env)
-DEF_HELPER_1(mfc0_mvpconf1, tl, env)
-DEF_HELPER_1(mftc0_vpecontrol, tl, env)
-DEF_HELPER_1(mftc0_vpeconf0, tl, env)
-DEF_HELPER_1(mfc0_random, tl, env)
-DEF_HELPER_1(mfc0_tcstatus, tl, env)
-DEF_HELPER_1(mftc0_tcstatus, tl, env)
-DEF_HELPER_1(mfc0_tcbind, tl, env)
-DEF_HELPER_1(mftc0_tcbind, tl, env)
-DEF_HELPER_1(mfc0_tcrestart, tl, env)
-DEF_HELPER_1(mftc0_tcrestart, tl, env)
-DEF_HELPER_1(mfc0_tchalt, tl, env)
-DEF_HELPER_1(mftc0_tchalt, tl, env)
-DEF_HELPER_1(mfc0_tccontext, tl, env)
-DEF_HELPER_1(mftc0_tccontext, tl, env)
-DEF_HELPER_1(mfc0_tcschedule, tl, env)
-DEF_HELPER_1(mftc0_tcschedule, tl, env)
-DEF_HELPER_1(mfc0_tcschefback, tl, env)
-DEF_HELPER_1(mftc0_tcschefback, tl, env)
-DEF_HELPER_1(mfc0_count, tl, env)
-DEF_HELPER_1(mftc0_entryhi, tl, env)
-DEF_HELPER_1(mftc0_status, tl, env)
-DEF_HELPER_1(mftc0_cause, tl, env)
-DEF_HELPER_1(mftc0_epc, tl, env)
-DEF_HELPER_1(mftc0_ebase, tl, env)
-DEF_HELPER_2(mftc0_configx, tl, env, tl)
-DEF_HELPER_1(mfc0_lladdr, tl, env)
-DEF_HELPER_1(mfc0_maar, tl, env)
-DEF_HELPER_1(mfhc0_maar, tl, env)
-DEF_HELPER_2(mfc0_watchlo, tl, env, i32)
-DEF_HELPER_2(mfc0_watchhi, tl, env, i32)
-DEF_HELPER_2(mfhc0_watchhi, tl, env, i32)
-DEF_HELPER_1(mfc0_debug, tl, env)
-DEF_HELPER_1(mftc0_debug, tl, env)
-#ifdef TARGET_MIPS64
-DEF_HELPER_1(dmfc0_tcrestart, tl, env)
-DEF_HELPER_1(dmfc0_tchalt, tl, env)
-DEF_HELPER_1(dmfc0_tccontext, tl, env)
-DEF_HELPER_1(dmfc0_tcschedule, tl, env)
-DEF_HELPER_1(dmfc0_tcschefback, tl, env)
-DEF_HELPER_1(dmfc0_lladdr, tl, env)
-DEF_HELPER_1(dmfc0_maar, tl, env)
-DEF_HELPER_2(dmfc0_watchlo, tl, env, i32)
-DEF_HELPER_2(dmfc0_watchhi, tl, env, i32)
-#endif /* TARGET_MIPS64 */
-
-DEF_HELPER_2(mtc0_index, void, env, tl)
-DEF_HELPER_2(mtc0_mvpcontrol, void, env, tl)
-DEF_HELPER_2(mtc0_vpecontrol, void, env, tl)
-DEF_HELPER_2(mttc0_vpecontrol, void, env, tl)
-DEF_HELPER_2(mtc0_vpeconf0, void, env, tl)
-DEF_HELPER_2(mttc0_vpeconf0, void, env, tl)
-DEF_HELPER_2(mtc0_vpeconf1, void, env, tl)
-DEF_HELPER_2(mtc0_yqmask, void, env, tl)
-DEF_HELPER_2(mtc0_vpeopt, void, env, tl)
-DEF_HELPER_2(mtc0_entrylo0, void, env, tl)
-DEF_HELPER_2(mtc0_tcstatus, void, env, tl)
-DEF_HELPER_2(mttc0_tcstatus, void, env, tl)
-DEF_HELPER_2(mtc0_tcbind, void, env, tl)
-DEF_HELPER_2(mttc0_tcbind, void, env, tl)
-DEF_HELPER_2(mtc0_tcrestart, void, env, tl)
-DEF_HELPER_2(mttc0_tcrestart, void, env, tl)
-DEF_HELPER_2(mtc0_tchalt, void, env, tl)
-DEF_HELPER_2(mttc0_tchalt, void, env, tl)
-DEF_HELPER_2(mtc0_tccontext, void, env, tl)
-DEF_HELPER_2(mttc0_tccontext, void, env, tl)
-DEF_HELPER_2(mtc0_tcschedule, void, env, tl)
-DEF_HELPER_2(mttc0_tcschedule, void, env, tl)
-DEF_HELPER_2(mtc0_tcschefback, void, env, tl)
-DEF_HELPER_2(mttc0_tcschefback, void, env, tl)
-DEF_HELPER_2(mtc0_entrylo1, void, env, tl)
-DEF_HELPER_2(mtc0_context, void, env, tl)
-DEF_HELPER_2(mtc0_memorymapid, void, env, tl)
-DEF_HELPER_2(mtc0_pagemask, void, env, tl)
-DEF_HELPER_2(mtc0_pagegrain, void, env, tl)
-DEF_HELPER_2(mtc0_segctl0, void, env, tl)
-DEF_HELPER_2(mtc0_segctl1, void, env, tl)
-DEF_HELPER_2(mtc0_segctl2, void, env, tl)
-DEF_HELPER_2(mtc0_pwfield, void, env, tl)
-DEF_HELPER_2(mtc0_pwsize, void, env, tl)
-DEF_HELPER_2(mtc0_wired, void, env, tl)
-DEF_HELPER_2(mtc0_srsconf0, void, env, tl)
-DEF_HELPER_2(mtc0_srsconf1, void, env, tl)
-DEF_HELPER_2(mtc0_srsconf2, void, env, tl)
-DEF_HELPER_2(mtc0_srsconf3, void, env, tl)
-DEF_HELPER_2(mtc0_srsconf4, void, env, tl)
-DEF_HELPER_2(mtc0_hwrena, void, env, tl)
-DEF_HELPER_2(mtc0_pwctl, void, env, tl)
-DEF_HELPER_2(mtc0_count, void, env, tl)
-DEF_HELPER_2(mtc0_entryhi, void, env, tl)
-DEF_HELPER_2(mttc0_entryhi, void, env, tl)
-DEF_HELPER_2(mtc0_compare, void, env, tl)
-DEF_HELPER_2(mtc0_status, void, env, tl)
-DEF_HELPER_2(mttc0_status, void, env, tl)
-DEF_HELPER_2(mtc0_intctl, void, env, tl)
-DEF_HELPER_2(mtc0_srsctl, void, env, tl)
-DEF_HELPER_2(mtc0_cause, void, env, tl)
-DEF_HELPER_2(mttc0_cause, void, env, tl)
-DEF_HELPER_2(mtc0_ebase, void, env, tl)
-DEF_HELPER_2(mttc0_ebase, void, env, tl)
-DEF_HELPER_2(mtc0_config0, void, env, tl)
-DEF_HELPER_2(mtc0_config2, void, env, tl)
-DEF_HELPER_2(mtc0_config3, void, env, tl)
-DEF_HELPER_2(mtc0_config4, void, env, tl)
-DEF_HELPER_2(mtc0_config5, void, env, tl)
-DEF_HELPER_2(mtc0_lladdr, void, env, tl)
-DEF_HELPER_2(mtc0_maar, void, env, tl)
-DEF_HELPER_2(mthc0_maar, void, env, tl)
-DEF_HELPER_2(mtc0_maari, void, env, tl)
-DEF_HELPER_3(mtc0_watchlo, void, env, tl, i32)
-DEF_HELPER_3(mtc0_watchhi, void, env, tl, i32)
-DEF_HELPER_3(mthc0_watchhi, void, env, tl, i32)
-DEF_HELPER_2(mtc0_xcontext, void, env, tl)
-DEF_HELPER_2(mtc0_framemask, void, env, tl)
-DEF_HELPER_2(mtc0_debug, void, env, tl)
-DEF_HELPER_2(mttc0_debug, void, env, tl)
-DEF_HELPER_2(mtc0_performance0, void, env, tl)
-DEF_HELPER_2(mtc0_errctl, void, env, tl)
-DEF_HELPER_2(mtc0_taglo, void, env, tl)
-DEF_HELPER_2(mtc0_datalo, void, env, tl)
-DEF_HELPER_2(mtc0_taghi, void, env, tl)
-DEF_HELPER_2(mtc0_datahi, void, env, tl)
-
-#if defined(TARGET_MIPS64)
-DEF_HELPER_2(dmtc0_entrylo0, void, env, i64)
-DEF_HELPER_2(dmtc0_entrylo1, void, env, i64)
-#endif
-
-/* MIPS MT functions */
-DEF_HELPER_2(mftgpr, tl, env, i32)
-DEF_HELPER_2(mftlo, tl, env, i32)
-DEF_HELPER_2(mfthi, tl, env, i32)
-DEF_HELPER_2(mftacx, tl, env, i32)
-DEF_HELPER_1(mftdsp, tl, env)
-DEF_HELPER_3(mttgpr, void, env, tl, i32)
-DEF_HELPER_3(mttlo, void, env, tl, i32)
-DEF_HELPER_3(mtthi, void, env, tl, i32)
-DEF_HELPER_3(mttacx, void, env, tl, i32)
-DEF_HELPER_2(mttdsp, void, env, tl)
-DEF_HELPER_0(dmt, tl)
-DEF_HELPER_0(emt, tl)
-DEF_HELPER_1(dvpe, tl, env)
-DEF_HELPER_1(evpe, tl, env)
-
-/* R6 Multi-threading */
-DEF_HELPER_1(dvp, tl, env)
-DEF_HELPER_1(evp, tl, env)
-
-/* TLB */
-DEF_HELPER_1(tlbwi, void, env)
-DEF_HELPER_1(tlbwr, void, env)
-DEF_HELPER_1(tlbp, void, env)
-DEF_HELPER_1(tlbr, void, env)
-DEF_HELPER_1(tlbinv, void, env)
-DEF_HELPER_1(tlbinvf, void, env)
-DEF_HELPER_3(ginvt, void, env, tl, i32)
-
-/* Special */
-DEF_HELPER_1(di, tl, env)
-DEF_HELPER_1(ei, tl, env)
-DEF_HELPER_1(eret, void, env)
-DEF_HELPER_1(eretnc, void, env)
-DEF_HELPER_1(deret, void, env)
-DEF_HELPER_3(cache, void, env, tl, i32)
-
-#ifdef TARGET_MIPS64
-/* Longson CSR */
-DEF_HELPER_2(lcsr_rdcsr, i64, env, tl)
-DEF_HELPER_2(lcsr_drdcsr, i64, env, tl)
-DEF_HELPER_3(lcsr_wrcsr, void, env, tl, tl)
-DEF_HELPER_3(lcsr_dwrcsr, void, env, tl, tl)
-#endif
diff --git a/target/mips/tcg/system/cp0_helper.c b/target/mips/tcg/system/cp0_helper.c
new file mode 100644
index 0000000..101b1e6
--- /dev/null
+++ b/target/mips/tcg/system/cp0_helper.c
@@ -0,0 +1,1633 @@
+/*
+ * Helpers for emulation of CP0-related MIPS instructions.
+ *
+ * Copyright (C) 2004-2005 Jocelyn Mayer
+ * Copyright (C) 2020 Wave Computing, Inc.
+ * Copyright (C) 2020 Aleksandar Markovic <amarkovic@wavecomp.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "internal.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "exec/cputlb.h"
+#include "exec/target_page.h"
+
+
+/* SMP helpers. */
+static bool mips_vpe_is_wfi(MIPSCPU *c)
+{
+ CPUState *cpu = CPU(c);
+ CPUMIPSState *env = &c->env;
+
+ /*
+ * If the VPE is halted but otherwise active, it means it's waiting for
+ * an interrupt.\
+ */
+ return cpu->halted && mips_vpe_active(env);
+}
+
+static bool mips_vp_is_wfi(MIPSCPU *c)
+{
+ CPUState *cpu = CPU(c);
+ CPUMIPSState *env = &c->env;
+
+ return cpu->halted && mips_vp_active(env);
+}
+
+static inline void mips_vpe_wake(MIPSCPU *c)
+{
+ /*
+ * Don't set ->halted = 0 directly, let it be done via cpu_has_work
+ * because there might be other conditions that state that c should
+ * be sleeping.
+ */
+ bql_lock();
+ cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
+ bql_unlock();
+}
+
+static inline void mips_vpe_sleep(MIPSCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+
+ /*
+ * The VPE was shut off, really go to bed.
+ * Reset any old _WAKE requests.
+ */
+ cs->halted = 1;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
+}
+
+static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
+{
+ CPUMIPSState *c = &cpu->env;
+
+ /* FIXME: TC reschedule. */
+ if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
+ mips_vpe_wake(cpu);
+ }
+}
+
+static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
+{
+ CPUMIPSState *c = &cpu->env;
+
+ /* FIXME: TC reschedule. */
+ if (!mips_vpe_active(c)) {
+ mips_vpe_sleep(cpu);
+ }
+}
+
+/**
+ * mips_cpu_map_tc:
+ * @env: CPU from which mapping is performed.
+ * @tc: Should point to an int with the value of the global TC index.
+ *
+ * This function will transform @tc into a local index within the
+ * returned #CPUMIPSState.
+ */
+
+/*
+ * FIXME: This code assumes that all VPEs have the same number of TCs,
+ * which depends on runtime setup. Can probably be fixed by
+ * walking the list of CPUMIPSStates.
+ */
+static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
+{
+ MIPSCPU *cpu;
+ CPUState *cs;
+ CPUState *other_cs;
+ int vpe_idx;
+ int tc_idx = *tc;
+
+ if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
+ /* Not allowed to address other CPUs. */
+ *tc = env->current_tc;
+ return env;
+ }
+
+ cs = env_cpu(env);
+ vpe_idx = tc_idx / cs->nr_threads;
+ *tc = tc_idx % cs->nr_threads;
+ other_cs = qemu_get_cpu(vpe_idx);
+ if (other_cs == NULL) {
+ return env;
+ }
+ cpu = MIPS_CPU(other_cs);
+ return &cpu->env;
+}
+
+/*
+ * The per VPE CP0_Status register shares some fields with the per TC
+ * CP0_TCStatus registers. These fields are wired to the same registers,
+ * so changes to either of them should be reflected on both registers.
+ *
+ * Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
+ *
+ * These helper call synchronizes the regs for a given cpu.
+ */
+
+/*
+ * Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c.
+ * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
+ * int tc);
+ */
+
+/* Called for updates to CP0_TCStatus. */
+static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
+ target_ulong v)
+{
+ uint32_t status;
+ uint32_t tcu, tmx, tasid, tksu;
+ uint32_t mask = ((1U << CP0St_CU3)
+ | (1 << CP0St_CU2)
+ | (1 << CP0St_CU1)
+ | (1 << CP0St_CU0)
+ | (1 << CP0St_MX)
+ | (3 << CP0St_KSU));
+
+ tcu = (v >> CP0TCSt_TCU0) & 0xf;
+ tmx = (v >> CP0TCSt_TMX) & 0x1;
+ tasid = v & cpu->CP0_EntryHi_ASID_mask;
+ tksu = (v >> CP0TCSt_TKSU) & 0x3;
+
+ status = tcu << CP0St_CU0;
+ status |= tmx << CP0St_MX;
+ status |= tksu << CP0St_KSU;
+
+ cpu->CP0_Status &= ~mask;
+ cpu->CP0_Status |= status;
+
+ /* Sync the TASID with EntryHi. */
+ cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask;
+ cpu->CP0_EntryHi |= tasid;
+
+ compute_hflags(cpu);
+}
+
+/* Called for updates to CP0_EntryHi. */
+static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
+{
+ int32_t *tcst;
+ uint32_t asid, v = cpu->CP0_EntryHi;
+
+ asid = v & cpu->CP0_EntryHi_ASID_mask;
+
+ if (tc == cpu->current_tc) {
+ tcst = &cpu->active_tc.CP0_TCStatus;
+ } else {
+ tcst = &cpu->tcs[tc].CP0_TCStatus;
+ }
+
+ *tcst &= ~cpu->CP0_EntryHi_ASID_mask;
+ *tcst |= asid;
+}
+
+/* XXX: do not use a global */
+uint32_t cpu_mips_get_random(CPUMIPSState *env)
+{
+ static uint32_t seed = 1;
+ static uint32_t prev_idx;
+ uint32_t idx;
+ uint32_t nb_rand_tlb = env->tlb->nb_tlb - env->CP0_Wired;
+
+ if (nb_rand_tlb == 1) {
+ return env->tlb->nb_tlb - 1;
+ }
+
+ /* Don't return same value twice, so get another value */
+ do {
+ /*
+ * Use a simple algorithm of Linear Congruential Generator
+ * from ISO/IEC 9899 standard.
+ */
+ seed = 1103515245 * seed + 12345;
+ idx = (seed >> 16) % nb_rand_tlb + env->CP0_Wired;
+ } while (idx == prev_idx);
+ prev_idx = idx;
+ return idx;
+}
+
+/* CP0 helpers */
+target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
+{
+ return env->mvp->CP0_MVPControl;
+}
+
+target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
+{
+ return env->mvp->CP0_MVPConf0;
+}
+
+target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
+{
+ return env->mvp->CP0_MVPConf1;
+}
+
+target_ulong helper_mfc0_random(CPUMIPSState *env)
+{
+ return (int32_t)cpu_mips_get_random(env);
+}
+
+target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCStatus;
+}
+
+target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCStatus;
+ } else {
+ return other->tcs[other_tc].CP0_TCStatus;
+ }
+}
+
+target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCBind;
+}
+
+target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCBind;
+ } else {
+ return other->tcs[other_tc].CP0_TCBind;
+ }
+}
+
+target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
+{
+ return env->active_tc.PC;
+}
+
+target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.PC;
+ } else {
+ return other->tcs[other_tc].PC;
+ }
+}
+
+target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCHalt;
+}
+
+target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCHalt;
+ } else {
+ return other->tcs[other_tc].CP0_TCHalt;
+ }
+}
+
+target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCContext;
+}
+
+target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCContext;
+ } else {
+ return other->tcs[other_tc].CP0_TCContext;
+ }
+}
+
+target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCSchedule;
+}
+
+target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCSchedule;
+ } else {
+ return other->tcs[other_tc].CP0_TCSchedule;
+ }
+}
+
+target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCScheFBack;
+}
+
+target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCScheFBack;
+ } else {
+ return other->tcs[other_tc].CP0_TCScheFBack;
+ }
+}
+
+target_ulong helper_mfc0_count(CPUMIPSState *env)
+{
+ return (int32_t)cpu_mips_get_count(env);
+}
+
+target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_EntryHi;
+}
+
+target_ulong helper_mftc0_cause(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_Cause;
+}
+
+target_ulong helper_mftc0_status(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_Status;
+}
+
+target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
+{
+ return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift);
+}
+
+target_ulong helper_mfc0_maar(CPUMIPSState *env)
+{
+ return (int32_t) env->CP0_MAAR[env->CP0_MAARI];
+}
+
+target_ulong helper_mfhc0_maar(CPUMIPSState *env)
+{
+ return env->CP0_MAAR[env->CP0_MAARI] >> 32;
+}
+
+target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
+{
+ return (int32_t)env->CP0_WatchLo[sel];
+}
+
+target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
+{
+ return (int32_t) env->CP0_WatchHi[sel];
+}
+
+target_ulong helper_mfhc0_watchhi(CPUMIPSState *env, uint32_t sel)
+{
+ return env->CP0_WatchHi[sel] >> 32;
+}
+
+target_ulong helper_mfc0_debug(CPUMIPSState *env)
+{
+ target_ulong t0 = env->CP0_Debug;
+ if (env->hflags & MIPS_HFLAG_DM) {
+ t0 |= 1 << CP0DB_DM;
+ }
+
+ return t0;
+}
+
+target_ulong helper_mftc0_debug(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ int32_t tcstatus;
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ tcstatus = other->active_tc.CP0_Debug_tcstatus;
+ } else {
+ tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
+ }
+
+ /* XXX: Might be wrong, check with EJTAG spec. */
+ return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
+ (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
+{
+ return env->active_tc.PC;
+}
+
+target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCHalt;
+}
+
+target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCContext;
+}
+
+target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCSchedule;
+}
+
+target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCScheFBack;
+}
+
+target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
+{
+ return env->CP0_LLAddr >> env->CP0_LLAddr_shift;
+}
+
+target_ulong helper_dmfc0_maar(CPUMIPSState *env)
+{
+ return env->CP0_MAAR[env->CP0_MAARI];
+}
+
+target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
+{
+ return env->CP0_WatchLo[sel];
+}
+
+target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel)
+{
+ return env->CP0_WatchHi[sel];
+}
+
+#endif /* TARGET_MIPS64 */
+
+void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t index_p = env->CP0_Index & 0x80000000;
+ uint32_t tlb_index = arg1 & 0x7fffffff;
+ if (tlb_index < env->tlb->nb_tlb) {
+ if (env->insn_flags & ISA_MIPS_R6) {
+ index_p |= arg1 & 0x80000000;
+ }
+ env->CP0_Index = index_p | tlb_index;
+ }
+}
+
+void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
+ mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
+ (1 << CP0MVPCo_EVP);
+ }
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
+ mask |= (1 << CP0MVPCo_STLB);
+ }
+ newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
+
+ /* TODO: Enable/disable shared TLB, enable/disable VPEs. */
+
+ env->mvp->CP0_MVPControl = newval;
+}
+
+void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask;
+ uint32_t newval;
+
+ mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
+ (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
+ newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
+
+ /*
+ * Yield scheduler intercept not implemented.
+ * Gating storage scheduler intercept not implemented.
+ */
+
+ /* TODO: Enable/disable TCs. */
+
+ env->CP0_VPEControl = newval;
+}
+
+void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ uint32_t mask;
+ uint32_t newval;
+
+ mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
+ (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
+ newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
+
+ /* TODO: Enable/disable TCs. */
+
+ other->CP0_VPEControl = newval;
+}
+
+target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ /* FIXME: Mask away return zero on read bits. */
+ return other->CP0_VPEControl;
+}
+
+target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_VPEConf0;
+}
+
+void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) {
+ mask |= (0xff << CP0VPEC0_XTC);
+ }
+ mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
+ }
+ newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
+
+ /* TODO: TC exclusive handling due to ERL/EXL. */
+
+ env->CP0_VPEConf0 = newval;
+}
+
+void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
+ newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
+
+ /* TODO: TC exclusive handling due to ERL/EXL. */
+ other->CP0_VPEConf0 = newval;
+}
+
+void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
+ mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
+ (0xff << CP0VPEC1_NCP1);
+ newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
+
+ /* UDI not implemented. */
+ /* CP2 not implemented. */
+
+ /* TODO: Handle FPU (CP1) binding. */
+
+ env->CP0_VPEConf1 = newval;
+}
+
+void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
+{
+ /* Yield qualifier inputs not implemented. */
+ env->CP0_YQMask = 0x00000000;
+}
+
+void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_VPEOpt = arg1 & 0x0000ffff;
+}
+
+#define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF)
+
+void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
+{
+ /* 1k pages not implemented */
+ target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
+ env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env))
+ | (rxi << (CP0EnLo_XI - 30));
+}
+
+#if defined(TARGET_MIPS64)
+#define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6)
+
+void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1)
+{
+ uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
+ env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
+}
+#endif
+
+void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = env->CP0_TCStatus_rw_bitmask;
+ uint32_t newval;
+
+ newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
+
+ env->active_tc.CP0_TCStatus = newval;
+ sync_c0_tcstatus(env, env->current_tc, newval);
+}
+
+void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCStatus = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCStatus = arg1;
+ }
+ sync_c0_tcstatus(other, other_tc, arg1);
+}
+
+void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = (1 << CP0TCBd_TBE);
+ uint32_t newval;
+
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
+ mask |= (1 << CP0TCBd_CurVPE);
+ }
+ newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
+ env->active_tc.CP0_TCBind = newval;
+}
+
+void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t mask = (1 << CP0TCBd_TBE);
+ uint32_t newval;
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
+ mask |= (1 << CP0TCBd_CurVPE);
+ }
+ if (other_tc == other->current_tc) {
+ newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
+ other->active_tc.CP0_TCBind = newval;
+ } else {
+ newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
+ other->tcs[other_tc].CP0_TCBind = newval;
+ }
+}
+
+void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.PC = arg1;
+ env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
+ env->CP0_LLAddr = 0;
+ env->lladdr = 0;
+ /* MIPS16 not implemented. */
+}
+
+void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.PC = arg1;
+ other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
+ other->CP0_LLAddr = 0;
+ other->lladdr = 0;
+ /* MIPS16 not implemented. */
+ } else {
+ other->tcs[other_tc].PC = arg1;
+ other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
+ other->CP0_LLAddr = 0;
+ other->lladdr = 0;
+ /* MIPS16 not implemented. */
+ }
+}
+
+void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
+{
+ MIPSCPU *cpu = env_archcpu(env);
+
+ env->active_tc.CP0_TCHalt = arg1 & 0x1;
+
+ /* TODO: Halt TC / Restart (if allocated+active) TC. */
+ if (env->active_tc.CP0_TCHalt & 1) {
+ mips_tc_sleep(cpu, env->current_tc);
+ } else {
+ mips_tc_wake(cpu, env->current_tc);
+ }
+}
+
+void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ MIPSCPU *other_cpu = env_archcpu(other);
+
+ /* TODO: Halt TC / Restart (if allocated+active) TC. */
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCHalt = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCHalt = arg1;
+ }
+
+ if (arg1 & 1) {
+ mips_tc_sleep(other_cpu, other_tc);
+ } else {
+ mips_tc_wake(other_cpu, other_tc);
+ }
+}
+
+void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.CP0_TCContext = arg1;
+}
+
+void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCContext = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCContext = arg1;
+ }
+}
+
+void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.CP0_TCSchedule = arg1;
+}
+
+void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCSchedule = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCSchedule = arg1;
+ }
+}
+
+void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.CP0_TCScheFBack = arg1;
+}
+
+void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCScheFBack = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCScheFBack = arg1;
+ }
+}
+
+void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
+{
+ /* 1k pages not implemented */
+ target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
+ env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env))
+ | (rxi << (CP0EnLo_XI - 30));
+}
+
+#if defined(TARGET_MIPS64)
+void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1)
+{
+ uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
+ env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
+}
+#endif
+
+void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
+}
+
+void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1)
+{
+ int32_t old;
+ old = env->CP0_MemoryMapID;
+ env->CP0_MemoryMapID = (int32_t) arg1;
+ /* If the MemoryMapID changes, flush qemu's TLB. */
+ if (old != env->CP0_MemoryMapID) {
+ cpu_mips_tlb_flush(env);
+ }
+}
+
+uint32_t compute_pagemask(uint32_t val)
+{
+ /* Don't care MASKX as we don't support 1KB page */
+ uint32_t mask = extract32(val, CP0PM_MASK, 16);
+ int maskbits = cto32(mask);
+
+ /* Ensure no more set bit after first zero, and maskbits even. */
+ if ((mask >> maskbits) == 0 && maskbits % 2 == 0) {
+ return mask << CP0PM_MASK;
+ } else {
+ /* When invalid, set to default target page size. */
+ return 0;
+ }
+}
+
+void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_PageMask = compute_pagemask(arg1);
+}
+
+void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
+{
+ /* SmartMIPS not implemented */
+ /* 1k pages not implemented */
+ env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) |
+ (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask);
+ compute_hflags(env);
+ restore_pamask(env);
+}
+
+void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->CP0_SegCtl0 = arg1 & CP0SC0_MASK;
+ tlb_flush(cs);
+}
+
+void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->CP0_SegCtl1 = arg1 & CP0SC1_MASK;
+ tlb_flush(cs);
+}
+
+void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->CP0_SegCtl2 = arg1 & CP0SC2_MASK;
+ tlb_flush(cs);
+}
+
+void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1)
+{
+#if defined(TARGET_MIPS64)
+ uint64_t mask = 0x3F3FFFFFFFULL;
+ uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL;
+ uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL;
+
+ if ((env->insn_flags & ISA_MIPS_R6)) {
+ if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_BDI);
+ }
+ if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_GDI);
+ }
+ if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_UDI);
+ }
+ if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_MDI);
+ }
+ if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_PTI);
+ }
+ }
+ env->CP0_PWField = arg1 & mask;
+
+ if ((new_ptei >= 32) ||
+ ((env->insn_flags & ISA_MIPS_R6) &&
+ (new_ptei == 0 || new_ptei == 1))) {
+ env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) |
+ (old_ptei << CP0PF_PTEI);
+ }
+#else
+ uint32_t mask = 0x3FFFFFFF;
+ uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
+ uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F;
+
+ if ((env->insn_flags & ISA_MIPS_R6)) {
+ if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) {
+ mask &= ~(0x3F << CP0PF_GDW);
+ }
+ if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) {
+ mask &= ~(0x3F << CP0PF_UDW);
+ }
+ if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) {
+ mask &= ~(0x3F << CP0PF_MDW);
+ }
+ if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) {
+ mask &= ~(0x3F << CP0PF_PTW);
+ }
+ }
+ env->CP0_PWField = arg1 & mask;
+
+ if ((new_ptew >= 32) ||
+ ((env->insn_flags & ISA_MIPS_R6) &&
+ (new_ptew == 0 || new_ptew == 1))) {
+ env->CP0_PWField = (env->CP0_PWField & ~0x3F) |
+ (old_ptew << CP0PF_PTEW);
+ }
+#endif
+}
+
+void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1)
+{
+#if defined(TARGET_MIPS64)
+ env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL;
+#else
+ env->CP0_PWSize = arg1 & 0x3FFFFFFF;
+#endif
+}
+
+void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
+{
+ if (env->insn_flags & ISA_MIPS_R6) {
+ if (arg1 < env->tlb->nb_tlb) {
+ env->CP0_Wired = arg1;
+ }
+ } else {
+ env->CP0_Wired = arg1 % env->tlb->nb_tlb;
+ }
+}
+
+void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1)
+{
+#if defined(TARGET_MIPS64)
+ /* PWEn = 0. Hardware page table walking is not implemented. */
+ env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F);
+#else
+ env->CP0_PWCtl = (arg1 & 0x800000FF);
+#endif
+}
+
+void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
+}
+
+void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
+}
+
+void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
+}
+
+void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
+}
+
+void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
+}
+
+void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0x0000000F;
+
+ if ((env->CP0_Config1 & (1 << CP0C1_PC)) &&
+ (env->insn_flags & ISA_MIPS_R6)) {
+ mask |= (1 << 4);
+ }
+ if (env->insn_flags & ISA_MIPS_R6) {
+ mask |= (1 << 5);
+ }
+ if (env->CP0_Config3 & (1 << CP0C3_ULRI)) {
+ mask |= (1 << 29);
+
+ if (arg1 & (1 << 29)) {
+ env->hflags |= MIPS_HFLAG_HWRENA_ULR;
+ } else {
+ env->hflags &= ~MIPS_HFLAG_HWRENA_ULR;
+ }
+ }
+
+ env->CP0_HWREna = arg1 & mask;
+}
+
+void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
+{
+ cpu_mips_store_count(env, arg1);
+}
+
+void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
+{
+ target_ulong old, val, mask;
+ mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask;
+ if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) {
+ mask |= 1 << CP0EnHi_EHINV;
+ }
+
+ /* 1k pages not implemented */
+#if defined(TARGET_MIPS64)
+ if (env->insn_flags & ISA_MIPS_R6) {
+ int entryhi_r = extract64(arg1, 62, 2);
+ int config0_at = extract32(env->CP0_Config0, 13, 2);
+ bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0;
+ if ((entryhi_r == 2) ||
+ (entryhi_r == 1 && (no_supervisor || config0_at == 1))) {
+ /* skip EntryHi.R field if new value is reserved */
+ mask &= ~(0x3ull << 62);
+ }
+ }
+ mask &= env->SEGMask;
+#endif
+ old = env->CP0_EntryHi;
+ val = (arg1 & mask) | (old & ~mask);
+ env->CP0_EntryHi = val;
+ if (ase_mt_available(env)) {
+ sync_c0_entryhi(env, env->current_tc);
+ }
+ /* If the ASID changes, flush qemu's TLB. */
+ if ((old & env->CP0_EntryHi_ASID_mask) !=
+ (val & env->CP0_EntryHi_ASID_mask)) {
+ tlb_flush(env_cpu(env));
+ }
+}
+
+void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ other->CP0_EntryHi = arg1;
+ sync_c0_entryhi(other, other_tc);
+}
+
+void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
+{
+ cpu_mips_store_compare(env, arg1);
+}
+
+void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t val, old;
+
+ old = env->CP0_Status;
+ cpu_mips_store_status(env, arg1);
+ val = env->CP0_Status;
+
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
+ old, old & env->CP0_Cause & CP0Ca_IP_mask,
+ val, val & env->CP0_Cause & CP0Ca_IP_mask,
+ env->CP0_Cause);
+ switch (mips_env_mmu_index(env)) {
+ case 3:
+ qemu_log(", ERL\n");
+ break;
+ case MIPS_HFLAG_UM:
+ qemu_log(", UM\n");
+ break;
+ case MIPS_HFLAG_SM:
+ qemu_log(", SM\n");
+ break;
+ case MIPS_HFLAG_KM:
+ qemu_log("\n");
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
+ break;
+ }
+ }
+}
+
+void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018;
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask);
+ sync_c0_status(env, other, other_tc);
+}
+
+void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
+}
+
+void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
+ env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
+}
+
+void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
+{
+ cpu_mips_store_cause(env, arg1);
+}
+
+void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ cpu_mips_store_cause(other, arg1);
+}
+
+target_ulong helper_mftc0_epc(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_EPC;
+}
+
+target_ulong helper_mftc0_ebase(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_EBase;
+}
+
+void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
+{
+ target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
+ if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
+ mask |= ~0x3FFFFFFF;
+ }
+ env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask);
+}
+
+void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
+ if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
+ mask |= ~0x3FFFFFFF;
+ }
+ other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask);
+}
+
+target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ switch (idx) {
+ case 0: return other->CP0_Config0;
+ case 1: return other->CP0_Config1;
+ case 2: return other->CP0_Config2;
+ case 3: return other->CP0_Config3;
+ /* 4 and 5 are reserved. */
+ case 6: return other->CP0_Config6;
+ case 7: return other->CP0_Config7;
+ default:
+ break;
+ }
+ return 0;
+}
+
+void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
+}
+
+void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
+{
+ /* tertiary/secondary caches not implemented */
+ env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
+}
+
+void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1)
+{
+ if (env->insn_flags & ASE_MICROMIPS) {
+ env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) |
+ (arg1 & (1 << CP0C3_ISA_ON_EXC));
+ }
+}
+
+void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) |
+ (arg1 & env->CP0_Config4_rw_bitmask);
+}
+
+void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) |
+ (arg1 & env->CP0_Config5_rw_bitmask);
+ env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ?
+ 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff;
+ compute_hflags(env);
+}
+
+void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
+{
+ target_long mask = env->CP0_LLAddr_rw_bitmask;
+ arg1 = arg1 << env->CP0_LLAddr_shift;
+ env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask);
+}
+
+#define MTC0_MAAR_MASK(env) \
+ ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3)
+
+void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env);
+}
+
+void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_MAAR[env->CP0_MAARI] =
+ (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) |
+ (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL);
+}
+
+void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1)
+{
+ int index = arg1 & 0x3f;
+ if (index == 0x3f) {
+ /*
+ * Software may write all ones to INDEX to determine the
+ * maximum value supported.
+ */
+ env->CP0_MAARI = MIPS_MAAR_MAX - 1;
+ } else if (index < MIPS_MAAR_MAX) {
+ env->CP0_MAARI = index;
+ }
+ /*
+ * Other than the all ones, if the value written is not supported,
+ * then INDEX is unchanged from its previous value.
+ */
+}
+
+void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ /*
+ * Watch exceptions for instructions, data loads, data stores
+ * not implemented.
+ */
+ env->CP0_WatchLo[sel] = (arg1 & ~0x7);
+}
+
+void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID);
+ uint64_t m_bit = env->CP0_WatchHi[sel] & (1 << CP0WH_M); /* read-only */
+ if ((env->CP0_Config5 >> CP0C5_MI) & 1) {
+ mask |= 0xFFFFFFFF00000000ULL; /* MMID */
+ }
+ env->CP0_WatchHi[sel] = m_bit | (arg1 & mask);
+ env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
+}
+
+void helper_mthc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ env->CP0_WatchHi[sel] = ((uint64_t) (arg1) << 32) |
+ (env->CP0_WatchHi[sel] & 0x00000000ffffffffULL);
+}
+
+void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
+{
+ target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
+ env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
+}
+
+void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Framemask = arg1; /* XXX */
+}
+
+void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
+ if (arg1 & (1 << CP0DB_DM)) {
+ env->hflags |= MIPS_HFLAG_DM;
+ } else {
+ env->hflags &= ~MIPS_HFLAG_DM;
+ }
+}
+
+void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ /* XXX: Might be wrong, check with EJTAG spec. */
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_Debug_tcstatus = val;
+ } else {
+ other->tcs[other_tc].CP0_Debug_tcstatus = val;
+ }
+ other->CP0_Debug = (other->CP0_Debug &
+ ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
+ (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
+}
+
+void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Performance0 = arg1 & 0x000007ff;
+}
+
+void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1)
+{
+ int32_t wst = arg1 & (1 << CP0EC_WST);
+ int32_t spr = arg1 & (1 << CP0EC_SPR);
+ int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0;
+
+ env->CP0_ErrCtl = wst | spr | itc;
+
+ if (itc && !wst && !spr) {
+ env->hflags |= MIPS_HFLAG_ITC_CACHE;
+ } else {
+ env->hflags &= ~MIPS_HFLAG_ITC_CACHE;
+ }
+}
+
+void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
+{
+ if (env->hflags & MIPS_HFLAG_ITC_CACHE) {
+ /*
+ * If CACHE instruction is configured for ITC tags then make all
+ * CP0.TagLo bits writable. The actual write to ITC Configuration
+ * Tag will take care of the read-only bits.
+ */
+ env->CP0_TagLo = arg1;
+ } else {
+ env->CP0_TagLo = arg1 & 0xFFFFFCF6;
+ }
+}
+
+void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_DataLo = arg1; /* XXX */
+}
+
+void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_TagHi = arg1; /* XXX */
+}
+
+void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_DataHi = arg1; /* XXX */
+}
+
+/* MIPS MT functions */
+target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.gpr[sel];
+ } else {
+ return other->tcs[other_tc].gpr[sel];
+ }
+}
+
+target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.LO[sel];
+ } else {
+ return other->tcs[other_tc].LO[sel];
+ }
+}
+
+target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.HI[sel];
+ } else {
+ return other->tcs[other_tc].HI[sel];
+ }
+}
+
+target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.ACX[sel];
+ } else {
+ return other->tcs[other_tc].ACX[sel];
+ }
+}
+
+target_ulong helper_mftdsp(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.DSPControl;
+ } else {
+ return other->tcs[other_tc].DSPControl;
+ }
+}
+
+void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.gpr[sel] = arg1;
+ } else {
+ other->tcs[other_tc].gpr[sel] = arg1;
+ }
+}
+
+void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.LO[sel] = arg1;
+ } else {
+ other->tcs[other_tc].LO[sel] = arg1;
+ }
+}
+
+void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.HI[sel] = arg1;
+ } else {
+ other->tcs[other_tc].HI[sel] = arg1;
+ }
+}
+
+void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.ACX[sel] = arg1;
+ } else {
+ other->tcs[other_tc].ACX[sel] = arg1;
+ }
+}
+
+void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.DSPControl = arg1;
+ } else {
+ other->tcs[other_tc].DSPControl = arg1;
+ }
+}
+
+/* MIPS MT functions */
+target_ulong helper_dmt(void)
+{
+ /* TODO */
+ return 0;
+}
+
+target_ulong helper_emt(void)
+{
+ /* TODO */
+ return 0;
+}
+
+target_ulong helper_dvpe(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->mvp->CP0_MVPControl;
+
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ /* Turn off all VPEs except the one executing the dvpe. */
+ if (&other_cpu->env != env) {
+ other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
+ mips_vpe_sleep(other_cpu);
+ }
+ }
+ return prev;
+}
+
+target_ulong helper_evpe(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->mvp->CP0_MVPControl;
+
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+
+ if (&other_cpu->env != env
+ /* If the VPE is WFI, don't disturb its sleep. */
+ && !mips_vpe_is_wfi(other_cpu)) {
+ /* Enable the VPE. */
+ other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
+ mips_vpe_wake(other_cpu); /* And wake it up. */
+ }
+ }
+ return prev;
+}
+
+/* R6 Multi-threading */
+target_ulong helper_dvp(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->CP0_VPControl;
+
+ if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ /* Turn off all VPs except the one executing the dvp. */
+ if (&other_cpu->env != env) {
+ mips_vpe_sleep(other_cpu);
+ }
+ }
+ env->CP0_VPControl |= (1 << CP0VPCtl_DIS);
+ }
+ return prev;
+}
+
+target_ulong helper_evp(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->CP0_VPControl;
+
+ if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) {
+ /*
+ * If the VP is WFI, don't disturb its sleep.
+ * Otherwise, wake it up.
+ */
+ mips_vpe_wake(other_cpu);
+ }
+ }
+ env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS);
+ }
+ return prev;
+}
diff --git a/target/mips/tcg/sysemu/lcsr_helper.c b/target/mips/tcg/system/lcsr_helper.c
index 25e0357..25e0357 100644
--- a/target/mips/tcg/sysemu/lcsr_helper.c
+++ b/target/mips/tcg/system/lcsr_helper.c
diff --git a/target/mips/tcg/system/meson.build b/target/mips/tcg/system/meson.build
new file mode 100644
index 0000000..911341a
--- /dev/null
+++ b/target/mips/tcg/system/meson.build
@@ -0,0 +1,12 @@
+mips_system_ss.add(files(
+ 'cp0_helper.c',
+ 'special_helper.c',
+ 'tlb_helper.c',
+))
+mips_system_ss.add(when: ['CONFIG_SEMIHOSTING'],
+ if_true: files('mips-semi.c'),
+ if_false: files('semihosting-stub.c')
+)
+mips_system_ss.add(when: 'TARGET_MIPS64', if_true: files(
+ 'lcsr_helper.c',
+))
diff --git a/target/mips/tcg/system/mips-semi.c b/target/mips/tcg/system/mips-semi.c
new file mode 100644
index 0000000..e822a42
--- /dev/null
+++ b/target/mips/tcg/system/mips-semi.c
@@ -0,0 +1,377 @@
+/*
+ * Unified Hosting Interface syscalls.
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/log.h"
+#include "gdbstub/syscalls.h"
+#include "gdbstub/helpers.h"
+#include "semihosting/uaccess.h"
+#include "semihosting/semihost.h"
+#include "semihosting/console.h"
+#include "semihosting/syscalls.h"
+#include "internal.h"
+
+typedef enum UHIOp {
+ UHI_exit = 1,
+ UHI_open = 2,
+ UHI_close = 3,
+ UHI_read = 4,
+ UHI_write = 5,
+ UHI_lseek = 6,
+ UHI_unlink = 7,
+ UHI_fstat = 8,
+ UHI_argc = 9,
+ UHI_argnlen = 10,
+ UHI_argn = 11,
+ UHI_plog = 13,
+ UHI_assert = 14,
+ UHI_pread = 19,
+ UHI_pwrite = 20,
+ UHI_link = 22
+} UHIOp;
+
+typedef struct UHIStat {
+ int16_t uhi_st_dev;
+ uint16_t uhi_st_ino;
+ uint32_t uhi_st_mode;
+ uint16_t uhi_st_nlink;
+ uint16_t uhi_st_uid;
+ uint16_t uhi_st_gid;
+ int16_t uhi_st_rdev;
+ uint64_t uhi_st_size;
+ uint64_t uhi_st_atime;
+ uint64_t uhi_st_spare1;
+ uint64_t uhi_st_mtime;
+ uint64_t uhi_st_spare2;
+ uint64_t uhi_st_ctime;
+ uint64_t uhi_st_spare3;
+ uint64_t uhi_st_blksize;
+ uint64_t uhi_st_blocks;
+ uint64_t uhi_st_spare4[2];
+} UHIStat;
+
+enum UHIOpenFlags {
+ UHIOpen_RDONLY = 0x0,
+ UHIOpen_WRONLY = 0x1,
+ UHIOpen_RDWR = 0x2,
+ UHIOpen_APPEND = 0x8,
+ UHIOpen_CREAT = 0x200,
+ UHIOpen_TRUNC = 0x400,
+ UHIOpen_EXCL = 0x800
+};
+
+enum UHIErrno {
+ UHI_EACCESS = 13,
+ UHI_EAGAIN = 11,
+ UHI_EBADF = 9,
+ UHI_EBADMSG = 77,
+ UHI_EBUSY = 16,
+ UHI_ECONNRESET = 104,
+ UHI_EEXIST = 17,
+ UHI_EFBIG = 27,
+ UHI_EINTR = 4,
+ UHI_EINVAL = 22,
+ UHI_EIO = 5,
+ UHI_EISDIR = 21,
+ UHI_ELOOP = 92,
+ UHI_EMFILE = 24,
+ UHI_EMLINK = 31,
+ UHI_ENAMETOOLONG = 91,
+ UHI_ENETDOWN = 115,
+ UHI_ENETUNREACH = 114,
+ UHI_ENFILE = 23,
+ UHI_ENOBUFS = 105,
+ UHI_ENOENT = 2,
+ UHI_ENOMEM = 12,
+ UHI_ENOSPC = 28,
+ UHI_ENOSR = 63,
+ UHI_ENOTCONN = 128,
+ UHI_ENOTDIR = 20,
+ UHI_ENXIO = 6,
+ UHI_EOVERFLOW = 139,
+ UHI_EPERM = 1,
+ UHI_EPIPE = 32,
+ UHI_ERANGE = 34,
+ UHI_EROFS = 30,
+ UHI_ESPIPE = 29,
+ UHI_ETIMEDOUT = 116,
+ UHI_ETXTBSY = 26,
+ UHI_EWOULDBLOCK = 11,
+ UHI_EXDEV = 18,
+};
+
+static void report_fault(CPUMIPSState *env)
+{
+ int op = env->active_tc.gpr[25];
+ error_report("Fault during UHI operation %d", op);
+ abort();
+}
+
+static void uhi_cb(CPUState *cs, uint64_t ret, int err)
+{
+ CPUMIPSState *env = cpu_env(cs);
+
+#define E(N) case E##N: err = UHI_E##N; break
+
+ switch (err) {
+ case 0:
+ break;
+ E(PERM);
+ E(NOENT);
+ E(INTR);
+ E(BADF);
+ E(BUSY);
+ E(EXIST);
+ E(NOTDIR);
+ E(ISDIR);
+ E(INVAL);
+ E(NFILE);
+ E(MFILE);
+ E(FBIG);
+ E(NOSPC);
+ E(SPIPE);
+ E(ROFS);
+ E(NAMETOOLONG);
+ default:
+ err = UHI_EINVAL;
+ break;
+ case EFAULT:
+ report_fault(env);
+ }
+
+#undef E
+
+ env->active_tc.gpr[2] = ret;
+ env->active_tc.gpr[3] = err;
+}
+
+static void uhi_fstat_cb(CPUState *cs, uint64_t ret, int err)
+{
+ QEMU_BUILD_BUG_ON(sizeof(UHIStat) < sizeof(struct gdb_stat));
+
+ if (!err) {
+ CPUMIPSState *env = cpu_env(cs);
+ bool swap_needed = HOST_BIG_ENDIAN != mips_env_is_bigendian(env);
+ target_ulong addr = env->active_tc.gpr[5];
+ UHIStat *dst = lock_user(VERIFY_WRITE, addr, sizeof(UHIStat), 1);
+ struct gdb_stat s;
+
+ if (!dst) {
+ report_fault(env);
+ }
+
+ memcpy(&s, dst, sizeof(struct gdb_stat));
+ memset(dst, 0, sizeof(UHIStat));
+
+ dst->uhi_st_dev = be32_to_cpu(s.gdb_st_dev);
+ dst->uhi_st_ino = be32_to_cpu(s.gdb_st_ino);
+ dst->uhi_st_mode = be32_to_cpu(s.gdb_st_mode);
+ dst->uhi_st_nlink = be32_to_cpu(s.gdb_st_nlink);
+ dst->uhi_st_uid = be32_to_cpu(s.gdb_st_uid);
+ dst->uhi_st_gid = be32_to_cpu(s.gdb_st_gid);
+ dst->uhi_st_rdev = be32_to_cpu(s.gdb_st_rdev);
+ dst->uhi_st_size = be64_to_cpu(s.gdb_st_size);
+ dst->uhi_st_atime = be32_to_cpu(s.gdb_st_atime);
+ dst->uhi_st_mtime = be32_to_cpu(s.gdb_st_mtime);
+ dst->uhi_st_ctime = be32_to_cpu(s.gdb_st_ctime);
+ dst->uhi_st_blksize = be64_to_cpu(s.gdb_st_blksize);
+ dst->uhi_st_blocks = be64_to_cpu(s.gdb_st_blocks);
+
+ if (swap_needed) {
+ dst->uhi_st_dev = bswap16(dst->uhi_st_dev);
+ dst->uhi_st_ino = bswap16(dst->uhi_st_ino);
+ dst->uhi_st_mode = bswap32(dst->uhi_st_mode);
+ dst->uhi_st_nlink = bswap16(dst->uhi_st_nlink);
+ dst->uhi_st_uid = bswap16(dst->uhi_st_uid);
+ dst->uhi_st_gid = bswap16(dst->uhi_st_gid);
+ dst->uhi_st_rdev = bswap16(dst->uhi_st_rdev);
+ dst->uhi_st_size = bswap64(dst->uhi_st_size);
+ dst->uhi_st_atime = bswap64(dst->uhi_st_atime);
+ dst->uhi_st_mtime = bswap64(dst->uhi_st_mtime);
+ dst->uhi_st_ctime = bswap64(dst->uhi_st_ctime);
+ dst->uhi_st_blksize = bswap64(dst->uhi_st_blksize);
+ dst->uhi_st_blocks = bswap64(dst->uhi_st_blocks);
+ }
+
+ unlock_user(dst, addr, sizeof(UHIStat));
+ }
+
+ uhi_cb(cs, ret, err);
+}
+
+void mips_semihosting(CPUMIPSState *env)
+{
+ CPUState *cs = env_cpu(env);
+ target_ulong *gpr = env->active_tc.gpr;
+ const UHIOp op = gpr[25];
+ char *p;
+
+ switch (op) {
+ case UHI_exit:
+ gdb_exit(gpr[4]);
+ exit(gpr[4]);
+
+ case UHI_open:
+ {
+ target_ulong fname = gpr[4];
+ int ret = -1;
+
+ p = lock_user_string(fname);
+ if (!p) {
+ report_fault(env);
+ }
+ if (!strcmp("/dev/stdin", p)) {
+ ret = 0;
+ } else if (!strcmp("/dev/stdout", p)) {
+ ret = 1;
+ } else if (!strcmp("/dev/stderr", p)) {
+ ret = 2;
+ }
+ unlock_user(p, fname, 0);
+
+ /* FIXME: reusing a guest fd doesn't seem correct. */
+ if (ret >= 0) {
+ gpr[2] = ret;
+ break;
+ }
+
+ semihost_sys_open(cs, uhi_cb, fname, 0, gpr[5], gpr[6]);
+ }
+ break;
+
+ case UHI_close:
+ semihost_sys_close(cs, uhi_cb, gpr[4]);
+ break;
+ case UHI_read:
+ semihost_sys_read(cs, uhi_cb, gpr[4], gpr[5], gpr[6]);
+ break;
+ case UHI_write:
+ semihost_sys_write(cs, uhi_cb, gpr[4], gpr[5], gpr[6]);
+ break;
+ case UHI_lseek:
+ semihost_sys_lseek(cs, uhi_cb, gpr[4], gpr[5], gpr[6]);
+ break;
+ case UHI_unlink:
+ semihost_sys_remove(cs, uhi_cb, gpr[4], 0);
+ break;
+ case UHI_fstat:
+ semihost_sys_fstat(cs, uhi_fstat_cb, gpr[4], gpr[5]);
+ break;
+
+ case UHI_argc:
+ gpr[2] = semihosting_get_argc();
+ break;
+ case UHI_argnlen:
+ {
+ const char *s = semihosting_get_arg(gpr[4]);
+ gpr[2] = s ? strlen(s) : -1;
+ }
+ break;
+ case UHI_argn:
+ {
+ const char *s = semihosting_get_arg(gpr[4]);
+ target_ulong addr;
+ size_t len;
+
+ if (!s) {
+ gpr[2] = -1;
+ break;
+ }
+ len = strlen(s) + 1;
+ addr = gpr[5];
+ p = lock_user(VERIFY_WRITE, addr, len, 0);
+ if (!p) {
+ report_fault(env);
+ }
+ memcpy(p, s, len);
+ unlock_user(p, addr, len);
+ gpr[2] = 0;
+ }
+ break;
+
+ case UHI_plog:
+ {
+ target_ulong addr = gpr[4];
+ ssize_t len = target_strlen(addr);
+ GString *str;
+ char *pct_d;
+
+ if (len < 0) {
+ report_fault(env);
+ }
+ p = lock_user(VERIFY_READ, addr, len, 1);
+ if (!p) {
+ report_fault(env);
+ }
+
+ pct_d = strstr(p, "%d");
+ if (!pct_d) {
+ unlock_user(p, addr, 0);
+ semihost_sys_write(cs, uhi_cb, 2, addr, len);
+ break;
+ }
+
+ str = g_string_new_len(p, pct_d - p);
+ g_string_append_printf(str, "%d%s", (int)gpr[5], pct_d + 2);
+ unlock_user(p, addr, 0);
+
+ /*
+ * When we're using gdb, we need a guest address, so
+ * drop the string onto the stack below the stack pointer.
+ */
+ if (use_gdb_syscalls()) {
+ addr = gpr[29] - str->len;
+ p = lock_user(VERIFY_WRITE, addr, str->len, 0);
+ if (!p) {
+ report_fault(env);
+ }
+ memcpy(p, str->str, str->len);
+ unlock_user(p, addr, str->len);
+ semihost_sys_write(cs, uhi_cb, 2, addr, str->len);
+ } else {
+ gpr[2] = qemu_semihosting_console_write(str->str, str->len);
+ }
+ g_string_free(str, true);
+ }
+ break;
+
+ case UHI_assert:
+ {
+ const char *msg, *file;
+
+ msg = lock_user_string(gpr[4]);
+ if (!msg) {
+ msg = "<EFAULT>";
+ }
+ file = lock_user_string(gpr[5]);
+ if (!file) {
+ file = "<EFAULT>";
+ }
+
+ error_report("UHI assertion \"%s\": file \"%s\", line %d",
+ msg, file, (int)gpr[6]);
+ abort();
+ }
+
+ default:
+ error_report("Unknown UHI operation %d", op);
+ abort();
+ }
+}
diff --git a/target/mips/tcg/system/semihosting-stub.c b/target/mips/tcg/system/semihosting-stub.c
new file mode 100644
index 0000000..bb1f7aa
--- /dev/null
+++ b/target/mips/tcg/system/semihosting-stub.c
@@ -0,0 +1,16 @@
+/*
+ * MIPS semihosting stub
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (c) 2024 Linaro Ltd.
+ * Authors:
+ * Philippe Mathieu-DaudƩ
+ */
+
+#include "qemu/osdep.h"
+#include "internal.h"
+
+void mips_semihosting(CPUMIPSState *env)
+{
+ g_assert_not_reached();
+}
diff --git a/target/mips/tcg/system/special_helper.c b/target/mips/tcg/system/special_helper.c
new file mode 100644
index 0000000..b54cbe8
--- /dev/null
+++ b/target/mips/tcg/system/special_helper.c
@@ -0,0 +1,173 @@
+/*
+ * QEMU MIPS emulation: Special opcode helpers
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/translation-block.h"
+#include "internal.h"
+
+/* Specials */
+target_ulong helper_di(CPUMIPSState *env)
+{
+ target_ulong t0 = env->CP0_Status;
+
+ env->CP0_Status = t0 & ~(1 << CP0St_IE);
+ return t0;
+}
+
+target_ulong helper_ei(CPUMIPSState *env)
+{
+ target_ulong t0 = env->CP0_Status;
+
+ env->CP0_Status = t0 | (1 << CP0St_IE);
+ return t0;
+}
+
+static void debug_pre_eret(CPUMIPSState *env)
+{
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
+ env->active_tc.PC, env->CP0_EPC);
+ if (env->CP0_Status & (1 << CP0St_ERL)) {
+ qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
+ }
+ if (env->hflags & MIPS_HFLAG_DM) {
+ qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
+ }
+ qemu_log("\n");
+ }
+}
+
+static void debug_post_eret(CPUMIPSState *env)
+{
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
+ env->active_tc.PC, env->CP0_EPC);
+ if (env->CP0_Status & (1 << CP0St_ERL)) {
+ qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
+ }
+ if (env->hflags & MIPS_HFLAG_DM) {
+ qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
+ }
+ switch (mips_env_mmu_index(env)) {
+ case 3:
+ qemu_log(", ERL\n");
+ break;
+ case MIPS_HFLAG_UM:
+ qemu_log(", UM\n");
+ break;
+ case MIPS_HFLAG_SM:
+ qemu_log(", SM\n");
+ break;
+ case MIPS_HFLAG_KM:
+ qemu_log("\n");
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
+ break;
+ }
+ }
+}
+
+bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb)
+{
+ CPUMIPSState *env = cpu_env(cs);
+
+ if ((env->hflags & MIPS_HFLAG_BMASK) != 0
+ && !tcg_cflags_has(cs, CF_PCREL) && env->active_tc.PC != tb->pc) {
+ env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ return true;
+ }
+ return false;
+}
+
+static inline void exception_return(CPUMIPSState *env)
+{
+ debug_pre_eret(env);
+ if (env->CP0_Status & (1 << CP0St_ERL)) {
+ mips_env_set_pc(env, env->CP0_ErrorEPC);
+ env->CP0_Status &= ~(1 << CP0St_ERL);
+ } else {
+ mips_env_set_pc(env, env->CP0_EPC);
+ env->CP0_Status &= ~(1 << CP0St_EXL);
+ }
+ compute_hflags(env);
+ debug_post_eret(env);
+}
+
+void helper_eret(CPUMIPSState *env)
+{
+ exception_return(env);
+ env->CP0_LLAddr = 1;
+ env->lladdr = 1;
+}
+
+void helper_eretnc(CPUMIPSState *env)
+{
+ exception_return(env);
+}
+
+void helper_deret(CPUMIPSState *env)
+{
+ debug_pre_eret(env);
+
+ env->hflags &= ~MIPS_HFLAG_DM;
+ compute_hflags(env);
+
+ mips_env_set_pc(env, env->CP0_DEPC);
+
+ debug_post_eret(env);
+}
+
+void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op)
+{
+ static const char *const type_name[] = {
+ "Primary Instruction",
+ "Primary Data or Unified Primary",
+ "Tertiary",
+ "Secondary"
+ };
+ uint32_t cache_type = extract32(op, 0, 2);
+ uint32_t cache_operation = extract32(op, 2, 3);
+ target_ulong index = addr & 0x1fffffff;
+
+ switch (cache_operation) {
+ case 0b010: /* Index Store Tag */
+ memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo,
+ MO_64, MEMTXATTRS_UNSPECIFIED);
+ break;
+ case 0b001: /* Index Load Tag */
+ memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo,
+ MO_64, MEMTXATTRS_UNSPECIFIED);
+ break;
+ case 0b000: /* Index Invalidate */
+ case 0b100: /* Hit Invalidate */
+ case 0b110: /* Hit Writeback */
+ /* no-op */
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "cache operation:%u (type: %s cache)\n",
+ cache_operation, type_name[cache_type]);
+ break;
+ }
+}
diff --git a/target/mips/tcg/system/tlb_helper.c b/target/mips/tcg/system/tlb_helper.c
new file mode 100644
index 0000000..eccaf36
--- /dev/null
+++ b/target/mips/tcg/system/tlb_helper.c
@@ -0,0 +1,1422 @@
+/*
+ * MIPS TLB (Translation lookaside buffer) helpers.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu/bitops.h"
+
+#include "cpu.h"
+#include "internal.h"
+#include "exec/cputlb.h"
+#include "exec/page-protection.h"
+#include "exec/target_page.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "exec/log.h"
+#include "exec/helper-proto.h"
+
+/* TLB management */
+static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first)
+{
+ /* Discard entries from env->tlb[first] onwards. */
+ while (env->tlb->tlb_in_use > first) {
+ r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
+ }
+}
+
+static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
+{
+#if defined(TARGET_MIPS64)
+ return extract64(entrylo, 6, 54);
+#else
+ return extract64(entrylo, 6, 24) | /* PFN */
+ (extract64(entrylo, 32, 32) << 24); /* PFNX */
+#endif
+}
+
+static void r4k_fill_tlb(CPUMIPSState *env, int idx)
+{
+ r4k_tlb_t *tlb;
+ uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
+
+ /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
+ tlb->EHINV = 1;
+ return;
+ }
+ tlb->EHINV = 0;
+ tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
+#if defined(TARGET_MIPS64)
+ tlb->VPN &= env->SEGMask;
+#endif
+ tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ tlb->MMID = env->CP0_MemoryMapID;
+ tlb->PageMask = env->CP0_PageMask;
+ tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
+ tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
+ tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
+ tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
+ tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
+ tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
+ tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
+ tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
+ tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
+ tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
+ tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
+ tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
+ tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
+}
+
+static void r4k_helper_tlbinv(CPUMIPSState *env)
+{
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ uint32_t tlb_mmid;
+ r4k_tlb_t *tlb;
+ int idx;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+ for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ if (!tlb->G && tlb_mmid == MMID) {
+ tlb->EHINV = 1;
+ }
+ }
+ cpu_mips_tlb_flush(env);
+}
+
+static void r4k_helper_tlbinvf(CPUMIPSState *env)
+{
+ int idx;
+
+ for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
+ env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
+ }
+ cpu_mips_tlb_flush(env);
+}
+
+static void r4k_helper_tlbwi(CPUMIPSState *env)
+{
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ target_ulong VPN;
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ uint32_t tlb_mmid;
+ bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
+ r4k_tlb_t *tlb;
+ int idx;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+
+ idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
+#if defined(TARGET_MIPS64)
+ VPN &= env->SEGMask;
+#endif
+ EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
+ G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
+ V0 = (env->CP0_EntryLo0 & 2) != 0;
+ D0 = (env->CP0_EntryLo0 & 4) != 0;
+ XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
+ RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
+ V1 = (env->CP0_EntryLo1 & 2) != 0;
+ D1 = (env->CP0_EntryLo1 & 4) != 0;
+ XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
+ RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
+
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ /*
+ * Discard cached TLB entries, unless tlbwi is just upgrading access
+ * permissions on the current entry.
+ */
+ if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G ||
+ (!tlb->EHINV && EHINV) ||
+ (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
+ (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
+ (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
+ (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
+ r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
+ }
+
+ r4k_invalidate_tlb(env, idx, 0);
+ r4k_fill_tlb(env, idx);
+}
+
+static void r4k_helper_tlbwr(CPUMIPSState *env)
+{
+ int r = cpu_mips_get_random(env);
+
+ r4k_invalidate_tlb(env, r, 1);
+ r4k_fill_tlb(env, r);
+}
+
+static void r4k_helper_tlbp(CPUMIPSState *env)
+{
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ r4k_tlb_t *tlb;
+ target_ulong mask;
+ target_ulong tag;
+ target_ulong VPN;
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ uint32_t tlb_mmid;
+ int i;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+ for (i = 0; i < env->tlb->nb_tlb; i++) {
+ tlb = &env->tlb->mmu.r4k.tlb[i];
+ /* 1k pages are not supported. */
+ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ tag = env->CP0_EntryHi & ~mask;
+ VPN = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ tag &= env->SEGMask;
+#endif
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ /* Check ASID/MMID, virtual page number & size */
+ if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
+ /* TLB match */
+ env->CP0_Index = i;
+ break;
+ }
+ }
+ if (i == env->tlb->nb_tlb) {
+ /* No match. Discard any shadow entries, if any of them match. */
+ for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
+ tlb = &env->tlb->mmu.r4k.tlb[i];
+ /* 1k pages are not supported. */
+ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ tag = env->CP0_EntryHi & ~mask;
+ VPN = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ tag &= env->SEGMask;
+#endif
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ /* Check ASID/MMID, virtual page number & size */
+ if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) {
+ r4k_mips_tlb_flush_extra(env, i);
+ break;
+ }
+ }
+
+ env->CP0_Index |= 0x80000000;
+ }
+}
+
+static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
+{
+#if defined(TARGET_MIPS64)
+ return tlb_pfn << 6;
+#else
+ return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
+ (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
+#endif
+}
+
+static void r4k_helper_tlbr(CPUMIPSState *env)
+{
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ uint32_t tlb_mmid;
+ r4k_tlb_t *tlb;
+ int idx;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+ idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ /* If this will change the current ASID/MMID, flush qemu's TLB. */
+ if (MMID != tlb_mmid) {
+ cpu_mips_tlb_flush(env);
+ }
+
+ r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
+
+ if (tlb->EHINV) {
+ env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
+ env->CP0_PageMask = 0;
+ env->CP0_EntryLo0 = 0;
+ env->CP0_EntryLo1 = 0;
+ } else {
+ env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID;
+ env->CP0_MemoryMapID = tlb->MMID;
+ env->CP0_PageMask = tlb->PageMask;
+ env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
+ ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
+ ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
+ get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
+ env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
+ ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
+ ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
+ get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
+ }
+}
+
+void helper_tlbwi(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbwi(env);
+}
+
+void helper_tlbwr(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbwr(env);
+}
+
+void helper_tlbp(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbp(env);
+}
+
+void helper_tlbr(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbr(env);
+}
+
+void helper_tlbinv(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbinv(env);
+}
+
+void helper_tlbinvf(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbinvf(env);
+}
+
+static void global_invalidate_tlb(CPUMIPSState *env,
+ uint32_t invMsgVPN2,
+ uint8_t invMsgR,
+ uint32_t invMsgMMid,
+ bool invAll,
+ bool invVAMMid,
+ bool invMMid,
+ bool invVA)
+{
+
+ int idx;
+ r4k_tlb_t *tlb;
+ bool VAMatch;
+ bool MMidMatch;
+
+ for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ VAMatch =
+ (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask))
+#ifdef TARGET_MIPS64
+ &&
+ (extract64(env->CP0_EntryHi, 62, 2) == invMsgR)
+#endif
+ );
+ MMidMatch = tlb->MMID == invMsgMMid;
+ if ((invAll && (idx > env->CP0_Wired)) ||
+ (VAMatch && invVAMMid && (tlb->G || MMidMatch)) ||
+ (VAMatch && invVA) ||
+ (MMidMatch && !(tlb->G) && invMMid)) {
+ tlb->EHINV = 1;
+ }
+ }
+ cpu_mips_tlb_flush(env);
+}
+
+void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type)
+{
+ bool invAll = type == 0;
+ bool invVA = type == 1;
+ bool invMMid = type == 2;
+ bool invVAMMid = type == 3;
+ uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1);
+ uint8_t invMsgR = 0;
+ uint32_t invMsgMMid = env->CP0_MemoryMapID;
+ CPUState *other_cs = first_cpu;
+
+#ifdef TARGET_MIPS64
+ invMsgR = extract64(arg, 62, 2);
+#endif
+
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid,
+ invAll, invVAMMid, invMMid, invVA);
+ }
+}
+
+/* no MMU emulation */
+static int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
+ target_ulong address, MMUAccessType access_type)
+{
+ *physical = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+}
+
+/* fixed mapping MMU emulation */
+static int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical,
+ int *prot, target_ulong address,
+ MMUAccessType access_type)
+{
+ if (address <= (int32_t)0x7FFFFFFFUL) {
+ if (!(env->CP0_Status & (1 << CP0St_ERL))) {
+ *physical = address + 0x40000000UL;
+ } else {
+ *physical = address;
+ }
+ } else if (address <= (int32_t)0xBFFFFFFFUL) {
+ *physical = address & 0x1FFFFFFF;
+ } else {
+ *physical = address;
+ }
+
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+}
+
+/* MIPS32/MIPS64 R4000-style MMU emulation */
+static int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
+ target_ulong address, MMUAccessType access_type)
+{
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ uint32_t tlb_mmid;
+ int i;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+
+ for (i = 0; i < env->tlb->tlb_in_use; i++) {
+ r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i];
+ /* 1k pages are not supported. */
+ target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ target_ulong tag = address & ~mask;
+ target_ulong VPN = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ tag &= env->SEGMask;
+#endif
+
+ /* Check ASID/MMID, virtual page number & size */
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
+ /* TLB match */
+ int n = !!(address & mask & ~(mask >> 1));
+ /* Check access rights */
+ if (!(n ? tlb->V1 : tlb->V0)) {
+ return TLBRET_INVALID;
+ }
+ if (access_type == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) {
+ return TLBRET_XI;
+ }
+ if (access_type == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) {
+ return TLBRET_RI;
+ }
+ if (access_type != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) {
+ *physical = tlb->PFN[n] | (address & (mask >> 1));
+ *prot = PAGE_READ;
+ if (n ? tlb->D1 : tlb->D0) {
+ *prot |= PAGE_WRITE;
+ }
+ if (!(n ? tlb->XI1 : tlb->XI0)) {
+ *prot |= PAGE_EXEC;
+ }
+ return TLBRET_MATCH;
+ }
+ return TLBRET_DIRTY;
+ }
+ }
+ return TLBRET_NOMATCH;
+}
+
+static void no_mmu_init(CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb->nb_tlb = 1;
+ env->tlb->map_address = &no_mmu_map_address;
+}
+
+static void fixed_mmu_init(CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb->nb_tlb = 1;
+ env->tlb->map_address = &fixed_mmu_map_address;
+}
+
+static void r4k_mmu_init(CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb->nb_tlb = 1 + ((def->CP0_Config1 >> CP0C1_MMU) & 63);
+ env->tlb->map_address = &r4k_map_address;
+ env->tlb->helper_tlbwi = r4k_helper_tlbwi;
+ env->tlb->helper_tlbwr = r4k_helper_tlbwr;
+ env->tlb->helper_tlbp = r4k_helper_tlbp;
+ env->tlb->helper_tlbr = r4k_helper_tlbr;
+ env->tlb->helper_tlbinv = r4k_helper_tlbinv;
+ env->tlb->helper_tlbinvf = r4k_helper_tlbinvf;
+}
+
+void mmu_init(CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext));
+
+ switch (def->mmu_type) {
+ case MMU_TYPE_NONE:
+ no_mmu_init(env, def);
+ break;
+ case MMU_TYPE_R4000:
+ r4k_mmu_init(env, def);
+ break;
+ case MMU_TYPE_FMT:
+ fixed_mmu_init(env, def);
+ break;
+ case MMU_TYPE_R3000:
+ case MMU_TYPE_R6000:
+ case MMU_TYPE_R8000:
+ default:
+ cpu_abort(env_cpu(env), "MMU type not supported\n");
+ }
+}
+
+void cpu_mips_tlb_flush(CPUMIPSState *env)
+{
+ /* Flush qemu's TLB and discard all shadowed entries. */
+ tlb_flush(env_cpu(env));
+ env->tlb->tlb_in_use = env->tlb->nb_tlb;
+}
+
+static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
+ MMUAccessType access_type, int tlb_error)
+{
+ CPUState *cs = env_cpu(env);
+ int exception = 0, error_code = 0;
+
+ if (access_type == MMU_INST_FETCH) {
+ error_code |= EXCP_INST_NOTAVAIL;
+ }
+
+ switch (tlb_error) {
+ default:
+ case TLBRET_BADADDR:
+ /* Reference to kernel address from user mode or supervisor mode */
+ /* Reference to supervisor address from user mode */
+ if (access_type == MMU_DATA_STORE) {
+ exception = EXCP_AdES;
+ } else {
+ exception = EXCP_AdEL;
+ }
+ break;
+ case TLBRET_NOMATCH:
+ /* No TLB match for a mapped address */
+ if (access_type == MMU_DATA_STORE) {
+ exception = EXCP_TLBS;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ error_code |= EXCP_TLB_NOMATCH;
+ break;
+ case TLBRET_INVALID:
+ /* TLB match with no valid bit */
+ if (access_type == MMU_DATA_STORE) {
+ exception = EXCP_TLBS;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ break;
+ case TLBRET_DIRTY:
+ /* TLB match but 'D' bit is cleared */
+ exception = EXCP_LTLBL;
+ break;
+ case TLBRET_XI:
+ /* Execute-Inhibit Exception */
+ if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
+ exception = EXCP_TLBXI;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ break;
+ case TLBRET_RI:
+ /* Read-Inhibit Exception */
+ if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
+ exception = EXCP_TLBRI;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ break;
+ }
+ /* Raise exception */
+ if (!(env->hflags & MIPS_HFLAG_DM)) {
+ env->CP0_BadVAddr = address;
+ }
+ env->CP0_Context = (env->CP0_Context & ~0x007fffff) |
+ ((address >> 9) & 0x007ffff0);
+ env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) |
+ (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) |
+ (address & (TARGET_PAGE_MASK << 1));
+#if defined(TARGET_MIPS64)
+ env->CP0_EntryHi &= env->SEGMask;
+ env->CP0_XContext =
+ (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) | /* PTEBase */
+ (extract64(address, 62, 2) << (env->SEGBITS - 9)) | /* R */
+ (extract64(address, 13, env->SEGBITS - 13) << 4); /* BadVPN2 */
+#endif
+ cs->exception_index = exception;
+ env->error_code = error_code;
+}
+
+#if !defined(TARGET_MIPS64)
+
+/*
+ * Perform hardware page table walk
+ *
+ * Memory accesses are performed using the KERNEL privilege level.
+ * Synchronous exceptions detected on memory accesses cause a silent exit
+ * from page table walking, resulting in a TLB or XTLB Refill exception.
+ *
+ * Implementations are not required to support page table walk memory
+ * accesses from mapped memory regions. When an unsupported access is
+ * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
+ * exception.
+ *
+ * Note that if an exception is caused by AddressTranslation or LoadMemory
+ * functions, the exception is not taken, a silent exit is taken,
+ * resulting in a TLB or XTLB Refill exception.
+ */
+
+static bool get_pte(CPUMIPSState *env, uint64_t vaddr, MemOp op,
+ uint64_t *pte, unsigned ptw_mmu_idx)
+{
+ MemOpIdx oi;
+
+ if ((vaddr & (memop_size(op) - 1)) != 0) {
+ return false;
+ }
+
+ oi = make_memop_idx(op | mo_endian_env(env), ptw_mmu_idx);
+ if (op == MO_64) {
+ *pte = cpu_ldq_mmu(env, vaddr, oi, 0);
+ } else {
+ *pte = cpu_ldl_mmu(env, vaddr, oi, 0);
+ }
+
+ return true;
+}
+
+static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry,
+ MemOp op, int ptei)
+{
+ unsigned entry_size = memop_size(op) << 3;
+ uint64_t result = entry;
+ uint64_t rixi;
+ if (ptei > entry_size) {
+ ptei -= 32;
+ }
+ result >>= (ptei - 2);
+ rixi = result & 3;
+ result >>= 2;
+ result |= rixi << CP0EnLo_XI;
+ return result;
+}
+
+static int walk_directory(CPUMIPSState *env, uint64_t *vaddr,
+ int directory_index, bool *huge_page, bool *hgpg_directory_hit,
+ uint64_t *pw_entrylo0, uint64_t *pw_entrylo1,
+ MemOp directory_mop, MemOp leaf_mop, int ptw_mmu_idx)
+{
+ int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1;
+ int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F;
+ int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
+ int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
+ uint64_t entry;
+ uint64_t paddr;
+ int prot;
+ uint64_t lsb = 0;
+ uint64_t w = 0;
+
+ if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD,
+ ptw_mmu_idx) != TLBRET_MATCH) {
+ /* wrong base address */
+ return 0;
+ }
+ if (!get_pte(env, *vaddr, directory_mop, &entry, ptw_mmu_idx)) {
+ return 0;
+ }
+
+ if ((entry & (1 << psn)) && hugepg) {
+ *huge_page = true;
+ *hgpg_directory_hit = true;
+ entry = get_tlb_entry_layout(env, entry, leaf_mop, pf_ptew);
+ w = directory_index - 1;
+ if (directory_index & 0x1) {
+ /* Generate adjacent page from same PTE for odd TLB page */
+ lsb = BIT_ULL(w) >> 6;
+ *pw_entrylo0 = entry & ~lsb; /* even page */
+ *pw_entrylo1 = entry | lsb; /* odd page */
+ } else if (dph) {
+ int oddpagebit = 1 << leaf_mop;
+ uint64_t vaddr2 = *vaddr ^ oddpagebit;
+ if (*vaddr & oddpagebit) {
+ *pw_entrylo1 = entry;
+ } else {
+ *pw_entrylo0 = entry;
+ }
+ if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD,
+ ptw_mmu_idx) != TLBRET_MATCH) {
+ return 0;
+ }
+ if (!get_pte(env, vaddr2, leaf_mop, &entry, ptw_mmu_idx)) {
+ return 0;
+ }
+ entry = get_tlb_entry_layout(env, entry, leaf_mop, pf_ptew);
+ if (*vaddr & oddpagebit) {
+ *pw_entrylo0 = entry;
+ } else {
+ *pw_entrylo1 = entry;
+ }
+ } else {
+ return 0;
+ }
+ return 1;
+ } else {
+ *vaddr = entry;
+ return 2;
+ }
+}
+
+static bool page_table_walk_refill(CPUMIPSState *env, vaddr address,
+ int ptw_mmu_idx)
+{
+ int gdw = (env->CP0_PWSize >> CP0PS_GDW) & 0x3F;
+ int udw = (env->CP0_PWSize >> CP0PS_UDW) & 0x3F;
+ int mdw = (env->CP0_PWSize >> CP0PS_MDW) & 0x3F;
+ int ptw = (env->CP0_PWSize >> CP0PS_PTW) & 0x3F;
+ int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F;
+
+ /* Initial values */
+ bool huge_page = false;
+ bool hgpg_bdhit = false;
+ bool hgpg_gdhit = false;
+ bool hgpg_udhit = false;
+ bool hgpg_mdhit = false;
+
+ int32_t pw_pagemask = 0;
+ target_ulong pw_entryhi = 0;
+ uint64_t pw_entrylo0 = 0;
+ uint64_t pw_entrylo1 = 0;
+
+ /* Native pointer size */
+ /*For the 32-bit architectures, this bit is fixed to 0.*/
+ MemOp native_op = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? MO_32 : MO_64;
+
+ /* Indices from PWField */
+ int pf_gdw = (env->CP0_PWField >> CP0PF_GDW) & 0x3F;
+ int pf_udw = (env->CP0_PWField >> CP0PF_UDW) & 0x3F;
+ int pf_mdw = (env->CP0_PWField >> CP0PF_MDW) & 0x3F;
+ int pf_ptw = (env->CP0_PWField >> CP0PF_PTW) & 0x3F;
+ int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
+
+ /* Indices computed from faulting address */
+ int gindex = (address >> pf_gdw) & ((1 << gdw) - 1);
+ int uindex = (address >> pf_udw) & ((1 << udw) - 1);
+ int mindex = (address >> pf_mdw) & ((1 << mdw) - 1);
+ int ptindex = (address >> pf_ptw) & ((1 << ptw) - 1);
+
+ /* Other HTW configs */
+ int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
+ MemOp directory_mop, leaf_mop;
+
+ /* Offsets into tables */
+ unsigned goffset, uoffset, moffset, ptoffset0, ptoffset1;
+
+ /* Starting address - Page Table Base */
+ uint64_t vaddr = env->CP0_PWBase;
+
+ uint64_t dir_entry;
+ uint64_t paddr;
+ int prot;
+ int m;
+
+ if (!(env->CP0_Config3 & (1 << CP0C3_PW))) {
+ /* walker is unimplemented */
+ return false;
+ }
+ if (!(env->CP0_PWCtl & (1 << CP0PC_PWEN))) {
+ /* walker is disabled */
+ return false;
+ }
+ if (!(gdw > 0 || udw > 0 || mdw > 0)) {
+ /* no structure to walk */
+ return false;
+ }
+ if (ptew > 1) {
+ return false;
+ }
+
+ /* HTW Shift values (depend on entry size) */
+ directory_mop = (hugepg && (ptew == 1)) ? native_op + 1 : native_op;
+ leaf_mop = (ptew == 1) ? native_op + 1 : native_op;
+
+ goffset = gindex << directory_mop;
+ uoffset = uindex << directory_mop;
+ moffset = mindex << directory_mop;
+ ptoffset0 = (ptindex >> 1) << (leaf_mop + 1);
+ ptoffset1 = ptoffset0 | (1 << (leaf_mop));
+
+ /* Global Directory */
+ if (gdw > 0) {
+ vaddr |= goffset;
+ switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit,
+ &pw_entrylo0, &pw_entrylo1,
+ directory_mop, leaf_mop, ptw_mmu_idx))
+ {
+ case 0:
+ return false;
+ case 1:
+ goto refill;
+ case 2:
+ default:
+ break;
+ }
+ }
+
+ /* Upper directory */
+ if (udw > 0) {
+ vaddr |= uoffset;
+ switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit,
+ &pw_entrylo0, &pw_entrylo1,
+ directory_mop, leaf_mop, ptw_mmu_idx))
+ {
+ case 0:
+ return false;
+ case 1:
+ goto refill;
+ case 2:
+ default:
+ break;
+ }
+ }
+
+ /* Middle directory */
+ if (mdw > 0) {
+ vaddr |= moffset;
+ switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit,
+ &pw_entrylo0, &pw_entrylo1,
+ directory_mop, leaf_mop, ptw_mmu_idx))
+ {
+ case 0:
+ return false;
+ case 1:
+ goto refill;
+ case 2:
+ default:
+ break;
+ }
+ }
+
+ /* Leaf Level Page Table - First half of PTE pair */
+ vaddr |= ptoffset0;
+ if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
+ ptw_mmu_idx) != TLBRET_MATCH) {
+ return false;
+ }
+ if (!get_pte(env, vaddr, leaf_mop, &dir_entry, ptw_mmu_idx)) {
+ return false;
+ }
+ dir_entry = get_tlb_entry_layout(env, dir_entry, leaf_mop, pf_ptew);
+ pw_entrylo0 = dir_entry;
+
+ /* Leaf Level Page Table - Second half of PTE pair */
+ vaddr |= ptoffset1;
+ if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
+ ptw_mmu_idx) != TLBRET_MATCH) {
+ return false;
+ }
+ if (!get_pte(env, vaddr, leaf_mop, &dir_entry, ptw_mmu_idx)) {
+ return false;
+ }
+ dir_entry = get_tlb_entry_layout(env, dir_entry, leaf_mop, pf_ptew);
+ pw_entrylo1 = dir_entry;
+
+refill:
+
+ m = (1 << pf_ptw) - 1;
+
+ if (huge_page) {
+ switch (hgpg_bdhit << 3 | hgpg_gdhit << 2 | hgpg_udhit << 1 |
+ hgpg_mdhit)
+ {
+ case 4:
+ m = (1 << pf_gdw) - 1;
+ if (pf_gdw & 1) {
+ m >>= 1;
+ }
+ break;
+ case 2:
+ m = (1 << pf_udw) - 1;
+ if (pf_udw & 1) {
+ m >>= 1;
+ }
+ break;
+ case 1:
+ m = (1 << pf_mdw) - 1;
+ if (pf_mdw & 1) {
+ m >>= 1;
+ }
+ break;
+ }
+ }
+ pw_pagemask = m >> TARGET_PAGE_BITS;
+ pw_pagemask = compute_pagemask(pw_pagemask << CP0PM_MASK);
+ pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF);
+ {
+ target_ulong tmp_entryhi = env->CP0_EntryHi;
+ int32_t tmp_pagemask = env->CP0_PageMask;
+ uint64_t tmp_entrylo0 = env->CP0_EntryLo0;
+ uint64_t tmp_entrylo1 = env->CP0_EntryLo1;
+
+ env->CP0_EntryHi = pw_entryhi;
+ env->CP0_PageMask = pw_pagemask;
+ env->CP0_EntryLo0 = pw_entrylo0;
+ env->CP0_EntryLo1 = pw_entrylo1;
+
+ /*
+ * The hardware page walker inserts a page into the TLB in a manner
+ * identical to a TLBWR instruction as executed by the software refill
+ * handler.
+ */
+ r4k_helper_tlbwr(env);
+
+ env->CP0_EntryHi = tmp_entryhi;
+ env->CP0_PageMask = tmp_pagemask;
+ env->CP0_EntryLo0 = tmp_entrylo0;
+ env->CP0_EntryLo1 = tmp_entrylo1;
+ }
+ return true;
+}
+#endif
+
+bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ CPUMIPSState *env = cpu_env(cs);
+ hwaddr physical;
+ int prot;
+ int ret = TLBRET_BADADDR;
+
+ /* data access */
+ /* XXX: put correct access by using cpu_restore_state() correctly */
+ ret = get_physical_address(env, &physical, &prot, address,
+ access_type, mmu_idx);
+ switch (ret) {
+ case TLBRET_MATCH:
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
+ " prot %d\n", __func__, address, physical, prot);
+ break;
+ default:
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
+ ret);
+ break;
+ }
+ if (ret == TLBRET_MATCH) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+#if !defined(TARGET_MIPS64)
+ if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
+ /*
+ * Memory reads during hardware page table walking are performed
+ * as if they were kernel-mode load instructions.
+ */
+ int ptw_mmu_idx = (env->hflags & MIPS_HFLAG_ERL ?
+ MMU_ERL_IDX : MMU_KERNEL_IDX);
+
+ if (page_table_walk_refill(env, address, ptw_mmu_idx)) {
+ ret = get_physical_address(env, &physical, &prot, address,
+ access_type, mmu_idx);
+ if (ret == TLBRET_MATCH) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ }
+ }
+#endif
+ if (probe) {
+ return false;
+ }
+
+ raise_mmu_exception(env, address, access_type, ret);
+ do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
+}
+
+hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address,
+ MMUAccessType access_type, uintptr_t retaddr)
+{
+ hwaddr physical;
+ int prot;
+ int ret = 0;
+ CPUState *cs = env_cpu(env);
+
+ /* data access */
+ ret = get_physical_address(env, &physical, &prot, address, access_type,
+ mips_env_mmu_index(env));
+ if (ret == TLBRET_MATCH) {
+ return physical;
+ }
+
+ raise_mmu_exception(env, address, access_type, ret);
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+static void set_hflags_for_handler(CPUMIPSState *env)
+{
+ /* Exception handlers are entered in 32-bit mode. */
+ env->hflags &= ~(MIPS_HFLAG_M16);
+ /* ...except that microMIPS lets you choose. */
+ if (env->insn_flags & ASE_MICROMIPS) {
+ env->hflags |= (!!(env->CP0_Config3 &
+ (1 << CP0C3_ISA_ON_EXC))
+ << MIPS_HFLAG_M16_SHIFT);
+ }
+}
+
+static inline void set_badinstr_registers(CPUMIPSState *env)
+{
+ if (env->insn_flags & ISA_NANOMIPS32) {
+ if (env->CP0_Config3 & (1 << CP0C3_BI)) {
+ uint32_t instr = (cpu_lduw_code(env, env->active_tc.PC)) << 16;
+ if ((instr & 0x10000000) == 0) {
+ instr |= cpu_lduw_code(env, env->active_tc.PC + 2);
+ }
+ env->CP0_BadInstr = instr;
+
+ if ((instr & 0xFC000000) == 0x60000000) {
+ instr = cpu_lduw_code(env, env->active_tc.PC + 4) << 16;
+ env->CP0_BadInstrX = instr;
+ }
+ }
+ return;
+ }
+
+ if (env->hflags & MIPS_HFLAG_M16) {
+ /* TODO: add BadInstr support for microMIPS */
+ return;
+ }
+ if (env->CP0_Config3 & (1 << CP0C3_BI)) {
+ env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC);
+ }
+ if ((env->CP0_Config3 & (1 << CP0C3_BP)) &&
+ (env->hflags & MIPS_HFLAG_BMASK)) {
+ env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4);
+ }
+}
+
+void mips_cpu_do_interrupt(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ bool update_badinstr = 0;
+ target_ulong offset;
+ int cause = -1;
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)
+ && cs->exception_index != EXCP_EXT_INTERRUPT) {
+ qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx
+ " %s exception\n",
+ __func__, env->active_tc.PC, env->CP0_EPC,
+ mips_exception_name(cs->exception_index));
+ }
+ if (cs->exception_index == EXCP_EXT_INTERRUPT &&
+ (env->hflags & MIPS_HFLAG_DM)) {
+ cs->exception_index = EXCP_DINT;
+ }
+ offset = 0x180;
+ switch (cs->exception_index) {
+ case EXCP_SEMIHOST:
+ cs->exception_index = EXCP_NONE;
+ mips_semihosting(env);
+ env->active_tc.PC += env->error_code;
+ return;
+ case EXCP_DSS:
+ env->CP0_Debug |= 1 << CP0DB_DSS;
+ /*
+ * Debug single step cannot be raised inside a delay slot and
+ * resume will always occur on the next instruction
+ * (but we assume the pc has always been updated during
+ * code translation).
+ */
+ env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16);
+ goto enter_debug_mode;
+ case EXCP_DINT:
+ env->CP0_Debug |= 1 << CP0DB_DINT;
+ goto set_DEPC;
+ case EXCP_DIB:
+ env->CP0_Debug |= 1 << CP0DB_DIB;
+ goto set_DEPC;
+ case EXCP_DBp:
+ env->CP0_Debug |= 1 << CP0DB_DBp;
+ /* Setup DExcCode - SDBBP instruction */
+ env->CP0_Debug = (env->CP0_Debug & ~(0x1fULL << CP0DB_DEC)) |
+ (9 << CP0DB_DEC);
+ goto set_DEPC;
+ case EXCP_DDBS:
+ env->CP0_Debug |= 1 << CP0DB_DDBS;
+ goto set_DEPC;
+ case EXCP_DDBL:
+ env->CP0_Debug |= 1 << CP0DB_DDBL;
+ set_DEPC:
+ env->CP0_DEPC = exception_resume_pc(env);
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ enter_debug_mode:
+ if (env->insn_flags & ISA_MIPS3) {
+ env->hflags |= MIPS_HFLAG_64;
+ if (!(env->insn_flags & ISA_MIPS_R6) ||
+ env->CP0_Status & (1 << CP0St_KX)) {
+ env->hflags &= ~MIPS_HFLAG_AWRAP;
+ }
+ }
+ env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0;
+ env->hflags &= ~(MIPS_HFLAG_KSU);
+ /* EJTAG probe trap enable is not implemented... */
+ if (!(env->CP0_Status & (1 << CP0St_EXL))) {
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
+ }
+ env->active_tc.PC = env->exception_base + 0x480;
+ set_hflags_for_handler(env);
+ break;
+ case EXCP_RESET:
+ cpu_reset(CPU(cpu));
+ break;
+ case EXCP_SRESET:
+ env->CP0_Status |= (1 << CP0St_SR);
+ memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo));
+ goto set_error_EPC;
+ case EXCP_NMI:
+ env->CP0_Status |= (1 << CP0St_NMI);
+ set_error_EPC:
+ env->CP0_ErrorEPC = exception_resume_pc(env);
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV);
+ if (env->insn_flags & ISA_MIPS3) {
+ env->hflags |= MIPS_HFLAG_64;
+ if (!(env->insn_flags & ISA_MIPS_R6) ||
+ env->CP0_Status & (1 << CP0St_KX)) {
+ env->hflags &= ~MIPS_HFLAG_AWRAP;
+ }
+ }
+ env->hflags |= MIPS_HFLAG_CP0;
+ env->hflags &= ~(MIPS_HFLAG_KSU);
+ if (!(env->CP0_Status & (1 << CP0St_EXL))) {
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
+ }
+ env->active_tc.PC = env->exception_base;
+ set_hflags_for_handler(env);
+ break;
+ case EXCP_EXT_INTERRUPT:
+ cause = 0;
+ if (env->CP0_Cause & (1 << CP0Ca_IV)) {
+ uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f;
+
+ if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) {
+ offset = 0x200;
+ } else {
+ uint32_t vector = 0;
+ uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP;
+
+ if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
+ /*
+ * For VEIC mode, the external interrupt controller feeds
+ * the vector through the CP0Cause IP lines.
+ */
+ vector = pending;
+ } else {
+ /*
+ * Vectored Interrupts
+ * Mask with Status.IM7-IM0 to get enabled interrupts.
+ */
+ pending &= (env->CP0_Status >> CP0St_IM) & 0xff;
+ /* Find the highest-priority interrupt. */
+ while (pending >>= 1) {
+ vector++;
+ }
+ }
+ offset = 0x200 + (vector * (spacing << 5));
+ }
+ }
+ goto set_EPC;
+ case EXCP_LTLBL:
+ cause = 1;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ goto set_EPC;
+ case EXCP_TLBL:
+ cause = 2;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ if ((env->error_code & EXCP_TLB_NOMATCH) &&
+ !(env->CP0_Status & (1 << CP0St_EXL))) {
+#if defined(TARGET_MIPS64)
+ int R = env->CP0_BadVAddr >> 62;
+ int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
+ int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
+
+ if ((R != 0 || UX) && (R != 3 || KX) &&
+ (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
+ offset = 0x080;
+ } else {
+#endif
+ offset = 0x000;
+#if defined(TARGET_MIPS64)
+ }
+#endif
+ }
+ goto set_EPC;
+ case EXCP_TLBS:
+ cause = 3;
+ update_badinstr = 1;
+ if ((env->error_code & EXCP_TLB_NOMATCH) &&
+ !(env->CP0_Status & (1 << CP0St_EXL))) {
+#if defined(TARGET_MIPS64)
+ int R = env->CP0_BadVAddr >> 62;
+ int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
+ int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
+
+ if ((R != 0 || UX) && (R != 3 || KX) &&
+ (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
+ offset = 0x080;
+ } else {
+#endif
+ offset = 0x000;
+#if defined(TARGET_MIPS64)
+ }
+#endif
+ }
+ goto set_EPC;
+ case EXCP_AdEL:
+ cause = 4;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ goto set_EPC;
+ case EXCP_AdES:
+ cause = 5;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_IBE:
+ cause = 6;
+ goto set_EPC;
+ case EXCP_DBE:
+ cause = 7;
+ goto set_EPC;
+ case EXCP_SYSCALL:
+ cause = 8;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_BREAK:
+ cause = 9;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_RI:
+ cause = 10;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_CpU:
+ cause = 11;
+ update_badinstr = 1;
+ env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) |
+ (env->error_code << CP0Ca_CE);
+ goto set_EPC;
+ case EXCP_OVERFLOW:
+ cause = 12;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_TRAP:
+ cause = 13;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_MSAFPE:
+ cause = 14;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_FPE:
+ cause = 15;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_C2E:
+ cause = 18;
+ goto set_EPC;
+ case EXCP_TLBRI:
+ cause = 19;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_TLBXI:
+ cause = 20;
+ goto set_EPC;
+ case EXCP_MSADIS:
+ cause = 21;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_MDMX:
+ cause = 22;
+ goto set_EPC;
+ case EXCP_DWATCH:
+ cause = 23;
+ /* XXX: TODO: manage deferred watch exceptions */
+ goto set_EPC;
+ case EXCP_MCHECK:
+ cause = 24;
+ goto set_EPC;
+ case EXCP_THREAD:
+ cause = 25;
+ goto set_EPC;
+ case EXCP_DSPDIS:
+ cause = 26;
+ goto set_EPC;
+ case EXCP_CACHE:
+ cause = 30;
+ offset = 0x100;
+ set_EPC:
+ if (!(env->CP0_Status & (1 << CP0St_EXL))) {
+ env->CP0_EPC = exception_resume_pc(env);
+ if (update_badinstr) {
+ set_badinstr_registers(env);
+ }
+ if (env->hflags & MIPS_HFLAG_BMASK) {
+ env->CP0_Cause |= (1U << CP0Ca_BD);
+ } else {
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
+ }
+ env->CP0_Status |= (1 << CP0St_EXL);
+ if (env->insn_flags & ISA_MIPS3) {
+ env->hflags |= MIPS_HFLAG_64;
+ if (!(env->insn_flags & ISA_MIPS_R6) ||
+ env->CP0_Status & (1 << CP0St_KX)) {
+ env->hflags &= ~MIPS_HFLAG_AWRAP;
+ }
+ }
+ env->hflags |= MIPS_HFLAG_CP0;
+ env->hflags &= ~(MIPS_HFLAG_KSU);
+ }
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ if (env->CP0_Status & (1 << CP0St_BEV)) {
+ env->active_tc.PC = env->exception_base + 0x200;
+ } else if (cause == 30 && !(env->CP0_Config3 & (1 << CP0C3_SC) &&
+ env->CP0_Config5 & (1 << CP0C5_CV))) {
+ /* Force KSeg1 for cache errors */
+ env->active_tc.PC = KSEG1_BASE | (env->CP0_EBase & 0x1FFFF000);
+ } else {
+ env->active_tc.PC = env->CP0_EBase & ~0xfff;
+ }
+
+ env->active_tc.PC += offset;
+ set_hflags_for_handler(env);
+ env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) |
+ (cause << CP0Ca_EC);
+ break;
+ default:
+ abort();
+ }
+ if (qemu_loglevel_mask(CPU_LOG_INT)
+ && cs->exception_index != EXCP_EXT_INTERRUPT) {
+ qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
+ " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
+ __func__, env->active_tc.PC, env->CP0_EPC, cause,
+ env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr,
+ env->CP0_DEPC);
+ }
+ cs->exception_index = EXCP_NONE;
+}
+
+bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ CPUMIPSState *env = cpu_env(cs);
+
+ if (cpu_mips_hw_interrupts_enabled(env) &&
+ cpu_mips_hw_interrupts_pending(env)) {
+ /* Raise it */
+ cs->exception_index = EXCP_EXT_INTERRUPT;
+ env->error_code = 0;
+ mips_cpu_do_interrupt(cs);
+ return true;
+ }
+ }
+ return false;
+}
+
+void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra)
+{
+ CPUState *cs = env_cpu(env);
+ r4k_tlb_t *tlb;
+ target_ulong addr;
+ target_ulong end;
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ uint32_t tlb_mmid;
+ target_ulong mask;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ /*
+ * The qemu TLB is flushed when the ASID/MMID changes, so no need to
+ * flush these entries again.
+ */
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ if (tlb->G == 0 && tlb_mmid != MMID) {
+ return;
+ }
+
+ if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) {
+ /*
+ * For tlbwr, we can shadow the discarded entry into
+ * a new (fake) TLB entry, as long as the guest can not
+ * tell that it's there.
+ */
+ env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb;
+ env->tlb->tlb_in_use++;
+ return;
+ }
+
+ /* 1k pages are not supported. */
+ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ if (tlb->V0) {
+ addr = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
+ addr |= 0x3FFFFF0000000000ULL;
+ }
+#endif
+ end = addr | (mask >> 1);
+ while (addr < end) {
+ tlb_flush_page(cs, addr);
+ addr += TARGET_PAGE_SIZE;
+ }
+ }
+ if (tlb->V1) {
+ addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
+#if defined(TARGET_MIPS64)
+ if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
+ addr |= 0x3FFFFF0000000000ULL;
+ }
+#endif
+ end = addr | mask;
+ while (addr - 1 < end) {
+ tlb_flush_page(cs, addr);
+ addr += TARGET_PAGE_SIZE;
+ }
+ }
+}
diff --git a/target/mips/tcg/system_helper.h.inc b/target/mips/tcg/system_helper.h.inc
new file mode 100644
index 0000000..eaac5e2
--- /dev/null
+++ b/target/mips/tcg/system_helper.h.inc
@@ -0,0 +1,185 @@
+/*
+ * QEMU MIPS TCG system helpers
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2006 Marius Groeger (FPU operations)
+ * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
+ * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support)
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+/* CP0 helpers */
+DEF_HELPER_1(mfc0_mvpcontrol, tl, env)
+DEF_HELPER_1(mfc0_mvpconf0, tl, env)
+DEF_HELPER_1(mfc0_mvpconf1, tl, env)
+DEF_HELPER_1(mftc0_vpecontrol, tl, env)
+DEF_HELPER_1(mftc0_vpeconf0, tl, env)
+DEF_HELPER_1(mfc0_random, tl, env)
+DEF_HELPER_1(mfc0_tcstatus, tl, env)
+DEF_HELPER_1(mftc0_tcstatus, tl, env)
+DEF_HELPER_1(mfc0_tcbind, tl, env)
+DEF_HELPER_1(mftc0_tcbind, tl, env)
+DEF_HELPER_1(mfc0_tcrestart, tl, env)
+DEF_HELPER_1(mftc0_tcrestart, tl, env)
+DEF_HELPER_1(mfc0_tchalt, tl, env)
+DEF_HELPER_1(mftc0_tchalt, tl, env)
+DEF_HELPER_1(mfc0_tccontext, tl, env)
+DEF_HELPER_1(mftc0_tccontext, tl, env)
+DEF_HELPER_1(mfc0_tcschedule, tl, env)
+DEF_HELPER_1(mftc0_tcschedule, tl, env)
+DEF_HELPER_1(mfc0_tcschefback, tl, env)
+DEF_HELPER_1(mftc0_tcschefback, tl, env)
+DEF_HELPER_1(mfc0_count, tl, env)
+DEF_HELPER_1(mftc0_entryhi, tl, env)
+DEF_HELPER_1(mftc0_status, tl, env)
+DEF_HELPER_1(mftc0_cause, tl, env)
+DEF_HELPER_1(mftc0_epc, tl, env)
+DEF_HELPER_1(mftc0_ebase, tl, env)
+DEF_HELPER_2(mftc0_configx, tl, env, tl)
+DEF_HELPER_1(mfc0_lladdr, tl, env)
+DEF_HELPER_1(mfc0_maar, tl, env)
+DEF_HELPER_1(mfhc0_maar, tl, env)
+DEF_HELPER_2(mfc0_watchlo, tl, env, i32)
+DEF_HELPER_2(mfc0_watchhi, tl, env, i32)
+DEF_HELPER_2(mfhc0_watchhi, tl, env, i32)
+DEF_HELPER_1(mfc0_debug, tl, env)
+DEF_HELPER_1(mftc0_debug, tl, env)
+#ifdef TARGET_MIPS64
+DEF_HELPER_1(dmfc0_tcrestart, tl, env)
+DEF_HELPER_1(dmfc0_tchalt, tl, env)
+DEF_HELPER_1(dmfc0_tccontext, tl, env)
+DEF_HELPER_1(dmfc0_tcschedule, tl, env)
+DEF_HELPER_1(dmfc0_tcschefback, tl, env)
+DEF_HELPER_1(dmfc0_lladdr, tl, env)
+DEF_HELPER_1(dmfc0_maar, tl, env)
+DEF_HELPER_2(dmfc0_watchlo, tl, env, i32)
+DEF_HELPER_2(dmfc0_watchhi, tl, env, i32)
+#endif /* TARGET_MIPS64 */
+
+DEF_HELPER_2(mtc0_index, void, env, tl)
+DEF_HELPER_2(mtc0_mvpcontrol, void, env, tl)
+DEF_HELPER_2(mtc0_vpecontrol, void, env, tl)
+DEF_HELPER_2(mttc0_vpecontrol, void, env, tl)
+DEF_HELPER_2(mtc0_vpeconf0, void, env, tl)
+DEF_HELPER_2(mttc0_vpeconf0, void, env, tl)
+DEF_HELPER_2(mtc0_vpeconf1, void, env, tl)
+DEF_HELPER_2(mtc0_yqmask, void, env, tl)
+DEF_HELPER_2(mtc0_vpeopt, void, env, tl)
+DEF_HELPER_2(mtc0_entrylo0, void, env, tl)
+DEF_HELPER_2(mtc0_tcstatus, void, env, tl)
+DEF_HELPER_2(mttc0_tcstatus, void, env, tl)
+DEF_HELPER_2(mtc0_tcbind, void, env, tl)
+DEF_HELPER_2(mttc0_tcbind, void, env, tl)
+DEF_HELPER_2(mtc0_tcrestart, void, env, tl)
+DEF_HELPER_2(mttc0_tcrestart, void, env, tl)
+DEF_HELPER_2(mtc0_tchalt, void, env, tl)
+DEF_HELPER_2(mttc0_tchalt, void, env, tl)
+DEF_HELPER_2(mtc0_tccontext, void, env, tl)
+DEF_HELPER_2(mttc0_tccontext, void, env, tl)
+DEF_HELPER_2(mtc0_tcschedule, void, env, tl)
+DEF_HELPER_2(mttc0_tcschedule, void, env, tl)
+DEF_HELPER_2(mtc0_tcschefback, void, env, tl)
+DEF_HELPER_2(mttc0_tcschefback, void, env, tl)
+DEF_HELPER_2(mtc0_entrylo1, void, env, tl)
+DEF_HELPER_2(mtc0_context, void, env, tl)
+DEF_HELPER_2(mtc0_memorymapid, void, env, tl)
+DEF_HELPER_2(mtc0_pagemask, void, env, tl)
+DEF_HELPER_2(mtc0_pagegrain, void, env, tl)
+DEF_HELPER_2(mtc0_segctl0, void, env, tl)
+DEF_HELPER_2(mtc0_segctl1, void, env, tl)
+DEF_HELPER_2(mtc0_segctl2, void, env, tl)
+DEF_HELPER_2(mtc0_pwfield, void, env, tl)
+DEF_HELPER_2(mtc0_pwsize, void, env, tl)
+DEF_HELPER_2(mtc0_wired, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf0, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf1, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf2, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf3, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf4, void, env, tl)
+DEF_HELPER_2(mtc0_hwrena, void, env, tl)
+DEF_HELPER_2(mtc0_pwctl, void, env, tl)
+DEF_HELPER_2(mtc0_count, void, env, tl)
+DEF_HELPER_2(mtc0_entryhi, void, env, tl)
+DEF_HELPER_2(mttc0_entryhi, void, env, tl)
+DEF_HELPER_2(mtc0_compare, void, env, tl)
+DEF_HELPER_2(mtc0_status, void, env, tl)
+DEF_HELPER_2(mttc0_status, void, env, tl)
+DEF_HELPER_2(mtc0_intctl, void, env, tl)
+DEF_HELPER_2(mtc0_srsctl, void, env, tl)
+DEF_HELPER_2(mtc0_cause, void, env, tl)
+DEF_HELPER_2(mttc0_cause, void, env, tl)
+DEF_HELPER_2(mtc0_ebase, void, env, tl)
+DEF_HELPER_2(mttc0_ebase, void, env, tl)
+DEF_HELPER_2(mtc0_config0, void, env, tl)
+DEF_HELPER_2(mtc0_config2, void, env, tl)
+DEF_HELPER_2(mtc0_config3, void, env, tl)
+DEF_HELPER_2(mtc0_config4, void, env, tl)
+DEF_HELPER_2(mtc0_config5, void, env, tl)
+DEF_HELPER_2(mtc0_lladdr, void, env, tl)
+DEF_HELPER_2(mtc0_maar, void, env, tl)
+DEF_HELPER_2(mthc0_maar, void, env, tl)
+DEF_HELPER_2(mtc0_maari, void, env, tl)
+DEF_HELPER_3(mtc0_watchlo, void, env, tl, i32)
+DEF_HELPER_3(mtc0_watchhi, void, env, tl, i32)
+DEF_HELPER_3(mthc0_watchhi, void, env, tl, i32)
+DEF_HELPER_2(mtc0_xcontext, void, env, tl)
+DEF_HELPER_2(mtc0_framemask, void, env, tl)
+DEF_HELPER_2(mtc0_debug, void, env, tl)
+DEF_HELPER_2(mttc0_debug, void, env, tl)
+DEF_HELPER_2(mtc0_performance0, void, env, tl)
+DEF_HELPER_2(mtc0_errctl, void, env, tl)
+DEF_HELPER_2(mtc0_taglo, void, env, tl)
+DEF_HELPER_2(mtc0_datalo, void, env, tl)
+DEF_HELPER_2(mtc0_taghi, void, env, tl)
+DEF_HELPER_2(mtc0_datahi, void, env, tl)
+
+#if defined(TARGET_MIPS64)
+DEF_HELPER_2(dmtc0_entrylo0, void, env, i64)
+DEF_HELPER_2(dmtc0_entrylo1, void, env, i64)
+#endif
+
+/* MIPS MT functions */
+DEF_HELPER_2(mftgpr, tl, env, i32)
+DEF_HELPER_2(mftlo, tl, env, i32)
+DEF_HELPER_2(mfthi, tl, env, i32)
+DEF_HELPER_2(mftacx, tl, env, i32)
+DEF_HELPER_1(mftdsp, tl, env)
+DEF_HELPER_3(mttgpr, void, env, tl, i32)
+DEF_HELPER_3(mttlo, void, env, tl, i32)
+DEF_HELPER_3(mtthi, void, env, tl, i32)
+DEF_HELPER_3(mttacx, void, env, tl, i32)
+DEF_HELPER_2(mttdsp, void, env, tl)
+DEF_HELPER_0(dmt, tl)
+DEF_HELPER_0(emt, tl)
+DEF_HELPER_1(dvpe, tl, env)
+DEF_HELPER_1(evpe, tl, env)
+
+/* R6 Multi-threading */
+DEF_HELPER_1(dvp, tl, env)
+DEF_HELPER_1(evp, tl, env)
+
+/* TLB */
+DEF_HELPER_1(tlbwi, void, env)
+DEF_HELPER_1(tlbwr, void, env)
+DEF_HELPER_1(tlbp, void, env)
+DEF_HELPER_1(tlbr, void, env)
+DEF_HELPER_1(tlbinv, void, env)
+DEF_HELPER_1(tlbinvf, void, env)
+DEF_HELPER_3(ginvt, void, env, tl, i32)
+
+/* Special */
+DEF_HELPER_1(di, tl, env)
+DEF_HELPER_1(ei, tl, env)
+DEF_HELPER_1(eret, void, env)
+DEF_HELPER_1(eretnc, void, env)
+DEF_HELPER_1(deret, void, env)
+DEF_HELPER_3(cache, void, env, tl, i32)
+
+#ifdef TARGET_MIPS64
+/* Longson CSR */
+DEF_HELPER_2(lcsr_rdcsr, i64, env, tl)
+DEF_HELPER_2(lcsr_drdcsr, i64, env, tl)
+DEF_HELPER_3(lcsr_wrcsr, void, env, tl, tl)
+DEF_HELPER_3(lcsr_dwrcsr, void, env, tl, tl)
+#endif
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
index aef032c..950e6af 100644
--- a/target/mips/tcg/tcg-internal.h
+++ b/target/mips/tcg/tcg-internal.h
@@ -16,6 +16,8 @@
#include "cpu.h"
void mips_tcg_init(void);
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
@@ -45,7 +47,7 @@ bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req);
void mmu_init(CPUMIPSState *env, const mips_def_t *def);
-void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask);
+uint32_t compute_pagemask(uint32_t val);
void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra);
uint32_t cpu_mips_get_random(CPUMIPSState *env);
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
index 333469b..8658315 100644
--- a/target/mips/tcg/translate.c
+++ b/target/mips/tcg/translate.c
@@ -27,6 +27,7 @@
#include "internal.h"
#include "exec/helper-proto.h"
#include "exec/translation-block.h"
+#include "exec/target_page.h"
#include "semihosting/semihost.h"
#include "trace.h"
#include "fpu_helper.h"
@@ -37,7 +38,7 @@
/*
- * Many sysemu-only helpers are not reachable for user-only.
+ * Many system-only helpers are not reachable for user-only.
* Define stub generators here, so that we need not either sprinkle
* ifdefs through the translator, nor provide the helper function.
*/
@@ -327,19 +328,6 @@ enum {
OPC_MUL = 0x02 | OPC_SPECIAL2,
OPC_MSUB = 0x04 | OPC_SPECIAL2,
OPC_MSUBU = 0x05 | OPC_SPECIAL2,
- /* Loongson 2F */
- OPC_MULT_G_2F = 0x10 | OPC_SPECIAL2,
- OPC_DMULT_G_2F = 0x11 | OPC_SPECIAL2,
- OPC_MULTU_G_2F = 0x12 | OPC_SPECIAL2,
- OPC_DMULTU_G_2F = 0x13 | OPC_SPECIAL2,
- OPC_DIV_G_2F = 0x14 | OPC_SPECIAL2,
- OPC_DDIV_G_2F = 0x15 | OPC_SPECIAL2,
- OPC_DIVU_G_2F = 0x16 | OPC_SPECIAL2,
- OPC_DDIVU_G_2F = 0x17 | OPC_SPECIAL2,
- OPC_MOD_G_2F = 0x1c | OPC_SPECIAL2,
- OPC_DMOD_G_2F = 0x1d | OPC_SPECIAL2,
- OPC_MODU_G_2F = 0x1e | OPC_SPECIAL2,
- OPC_DMODU_G_2F = 0x1f | OPC_SPECIAL2,
/* Misc */
OPC_CLZ = 0x20 | OPC_SPECIAL2,
OPC_CLO = 0x21 | OPC_SPECIAL2,
@@ -368,20 +356,6 @@ enum {
OPC_RDHWR = 0x3B | OPC_SPECIAL3,
OPC_GINV = 0x3D | OPC_SPECIAL3,
- /* Loongson 2E */
- OPC_MULT_G_2E = 0x18 | OPC_SPECIAL3,
- OPC_MULTU_G_2E = 0x19 | OPC_SPECIAL3,
- OPC_DIV_G_2E = 0x1A | OPC_SPECIAL3,
- OPC_DIVU_G_2E = 0x1B | OPC_SPECIAL3,
- OPC_DMULT_G_2E = 0x1C | OPC_SPECIAL3,
- OPC_DMULTU_G_2E = 0x1D | OPC_SPECIAL3,
- OPC_DDIV_G_2E = 0x1E | OPC_SPECIAL3,
- OPC_DDIVU_G_2E = 0x1F | OPC_SPECIAL3,
- OPC_MOD_G_2E = 0x22 | OPC_SPECIAL3,
- OPC_MODU_G_2E = 0x23 | OPC_SPECIAL3,
- OPC_DMOD_G_2E = 0x26 | OPC_SPECIAL3,
- OPC_DMODU_G_2E = 0x27 | OPC_SPECIAL3,
-
/* MIPS DSP Load */
OPC_LX_DSP = 0x0A | OPC_SPECIAL3,
/* MIPS DSP Arithmetic */
@@ -389,16 +363,14 @@ enum {
OPC_ADDU_OB_DSP = 0x14 | OPC_SPECIAL3,
OPC_ABSQ_S_PH_DSP = 0x12 | OPC_SPECIAL3,
OPC_ABSQ_S_QH_DSP = 0x16 | OPC_SPECIAL3,
- /* OPC_ADDUH_QB_DSP is same as OPC_MULT_G_2E. */
- /* OPC_ADDUH_QB_DSP = 0x18 | OPC_SPECIAL3, */
+ OPC_ADDUH_QB_DSP = 0x18 | OPC_SPECIAL3,
OPC_CMPU_EQ_QB_DSP = 0x11 | OPC_SPECIAL3,
OPC_CMPU_EQ_OB_DSP = 0x15 | OPC_SPECIAL3,
/* MIPS DSP GPR-Based Shift Sub-class */
OPC_SHLL_QB_DSP = 0x13 | OPC_SPECIAL3,
OPC_SHLL_OB_DSP = 0x17 | OPC_SPECIAL3,
/* MIPS DSP Multiply Sub-class insns */
- /* OPC_MUL_PH_DSP is same as OPC_ADDUH_QB_DSP. */
- /* OPC_MUL_PH_DSP = 0x18 | OPC_SPECIAL3, */
+ OPC_MUL_PH_DSP = 0x18 | OPC_SPECIAL3,
OPC_DPA_W_PH_DSP = 0x30 | OPC_SPECIAL3,
OPC_DPAQ_W_QH_DSP = 0x34 | OPC_SPECIAL3,
/* DSP Bit/Manipulation Sub-class */
@@ -556,7 +528,6 @@ enum {
OPC_MULQ_S_PH = (0x1E << 6) | OPC_ADDU_QB_DSP,
};
-#define OPC_ADDUH_QB_DSP OPC_MULT_G_2E
#define MASK_ADDUH_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
enum {
/* MIPS DSP Arithmetic Sub-class */
@@ -1456,8 +1427,7 @@ void gen_op_addr_add(DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1)
#endif
}
-static inline void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base,
- target_long ofs)
+void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base, target_long ofs)
{
tcg_gen_addi_tl(ret, base, ofs);
@@ -1646,13 +1616,18 @@ static inline void check_ps(DisasContext *ctx)
check_cp1_64bitmode(ctx);
}
+bool decode_64bit_enabled(DisasContext *ctx)
+{
+ return ctx->hflags & MIPS_HFLAG_64;
+}
+
/*
* This code generates a "reserved instruction" exception if cpu is not
* 64-bit or 64-bit instructions are not enabled.
*/
void check_mips_64(DisasContext *ctx)
{
- if (unlikely((TARGET_LONG_BITS != 64) || !(ctx->hflags & MIPS_HFLAG_64))) {
+ if (unlikely((TARGET_LONG_BITS != 64) || !decode_64bit_enabled(ctx))) {
gen_reserved_instruction(ctx);
}
}
@@ -1957,16 +1932,16 @@ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
tcg_gen_st_tl(ret, tcg_env, offsetof(CPUMIPSState, llval)); \
}
#else
-#define OP_LD_ATOMIC(insn, fname) \
+#define OP_LD_ATOMIC(insn, ignored_memop) \
static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
DisasContext *ctx) \
{ \
gen_helper_##insn(ret, tcg_env, arg1, tcg_constant_i32(mem_idx)); \
}
#endif
-OP_LD_ATOMIC(ll, MO_TESL);
+OP_LD_ATOMIC(ll, mo_endian(ctx) | MO_SL);
#if defined(TARGET_MIPS64)
-OP_LD_ATOMIC(lld, MO_TEUQ);
+OP_LD_ATOMIC(lld, mo_endian(ctx) | MO_UQ);
#endif
#undef OP_LD_ATOMIC
@@ -2010,7 +1985,7 @@ static void gen_lxl(DisasContext *ctx, TCGv reg, TCGv addr,
*/
tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB);
tcg_gen_andi_tl(t1, addr, sizem1);
- if (!cpu_is_bigendian(ctx)) {
+ if (!disas_is_bigendian(ctx)) {
tcg_gen_xori_tl(t1, t1, sizem1);
}
tcg_gen_shli_tl(t1, t1, 3);
@@ -2037,7 +2012,7 @@ static void gen_lxr(DisasContext *ctx, TCGv reg, TCGv addr,
*/
tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB);
tcg_gen_andi_tl(t1, addr, sizem1);
- if (cpu_is_bigendian(ctx)) {
+ if (disas_is_bigendian(ctx)) {
tcg_gen_xori_tl(t1, t1, sizem1);
}
tcg_gen_shli_tl(t1, t1, 3);
@@ -2073,12 +2048,12 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
switch (opc) {
#if defined(TARGET_MIPS64)
case OPC_LWU:
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL |
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
case OPC_LD:
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ |
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@@ -2090,33 +2065,33 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
case OPC_LDL:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
- gen_lxl(ctx, t1, t0, mem_idx, MO_TEUQ);
+ gen_lxl(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_gpr(t1, rt);
break;
case OPC_LDR:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
- gen_lxr(ctx, t1, t0, mem_idx, MO_TEUQ);
+ gen_lxr(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_gpr(t1, rt);
break;
case OPC_LDPC:
t1 = tcg_constant_tl(pc_relative_pc(ctx));
gen_op_addr_add(ctx, t0, t0, t1);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_gpr(t0, rt);
break;
#endif
case OPC_LWPC:
t1 = tcg_constant_tl(pc_relative_pc(ctx));
gen_op_addr_add(ctx, t0, t0, t1);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_SL);
gen_store_gpr(t0, rt);
break;
case OPC_LWE:
mem_idx = MIPS_HFLAG_UM;
/* fall through */
case OPC_LW:
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL |
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_SL |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@@ -2124,7 +2099,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
mem_idx = MIPS_HFLAG_UM;
/* fall through */
case OPC_LH:
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESW |
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_SW |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@@ -2132,7 +2107,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
mem_idx = MIPS_HFLAG_UM;
/* fall through */
case OPC_LHU:
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUW |
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UW |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@@ -2156,7 +2131,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
case OPC_LWL:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
- gen_lxl(ctx, t1, t0, mem_idx, MO_TEUL);
+ gen_lxl(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UL);
tcg_gen_ext32s_tl(t1, t1);
gen_store_gpr(t1, rt);
break;
@@ -2166,7 +2141,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
case OPC_LWR:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
- gen_lxr(ctx, t1, t0, mem_idx, MO_TEUL);
+ gen_lxr(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UL);
tcg_gen_ext32s_tl(t1, t1);
gen_store_gpr(t1, rt);
break;
@@ -2194,7 +2169,7 @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt,
switch (opc) {
#if defined(TARGET_MIPS64)
case OPC_SD:
- tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUQ |
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
break;
case OPC_SDL:
@@ -2208,14 +2183,14 @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt,
mem_idx = MIPS_HFLAG_UM;
/* fall through */
case OPC_SW:
- tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUL |
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
break;
case OPC_SHE:
mem_idx = MIPS_HFLAG_UM;
/* fall through */
case OPC_SH:
- tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUW |
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, mo_endian(ctx) | MO_UW |
ctx->default_tcg_memop_mask);
break;
case OPC_SBE:
@@ -2253,8 +2228,7 @@ static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset,
/* compare the address against that of the preceding LL */
gen_base_offset_addr(ctx, addr, base, offset);
tcg_gen_brcond_tl(TCG_COND_EQ, addr, cpu_lladdr, l1);
- tcg_gen_movi_tl(t0, 0);
- gen_store_gpr(t0, rt);
+ gen_store_gpr(tcg_constant_tl(0), rt);
tcg_gen_br(done);
gen_set_label(l1);
@@ -2281,7 +2255,7 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft,
case OPC_LWC1:
{
TCGv_i32 fp0 = tcg_temp_new_i32();
- tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL |
+ tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL |
ctx->default_tcg_memop_mask);
gen_store_fpr32(ctx, fp0, ft);
}
@@ -2290,14 +2264,14 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft,
{
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, ft);
- tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL |
+ tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
}
break;
case OPC_LDC1:
{
TCGv_i64 fp0 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, fp0, ft);
}
@@ -2306,7 +2280,7 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft,
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, ft);
- tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
}
break;
@@ -2987,14 +2961,14 @@ static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc,
case R6_OPC_LWPC:
offset = sextract32(ctx->opcode << 2, 0, 21);
addr = addr_add(ctx, pc, offset);
- gen_r6_ld(addr, rs, ctx->mem_idx, MO_TESL);
+ gen_r6_ld(addr, rs, ctx->mem_idx, mo_endian(ctx) | MO_SL);
break;
#if defined(TARGET_MIPS64)
case OPC_LWUPC:
check_mips_64(ctx);
offset = sextract32(ctx->opcode << 2, 0, 21);
addr = addr_add(ctx, pc, offset);
- gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEUL);
+ gen_r6_ld(addr, rs, ctx->mem_idx, mo_endian(ctx) | MO_UL);
break;
#endif
default:
@@ -3021,7 +2995,7 @@ static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc,
check_mips_64(ctx);
offset = sextract32(ctx->opcode << 3, 0, 21);
addr = addr_add(ctx, (pc & ~0x7), offset);
- gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEUQ);
+ gen_r6_ld(addr, rs, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
break;
#endif
default:
@@ -3060,8 +3034,7 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt)
tcg_gen_and_tl(t2, t2, t3);
tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
tcg_gen_or_tl(t2, t2, t3);
- tcg_gen_movi_tl(t3, 0);
- tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1);
tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
}
@@ -3077,30 +3050,27 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt)
tcg_gen_and_tl(t2, t2, t3);
tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
tcg_gen_or_tl(t2, t2, t3);
- tcg_gen_movi_tl(t3, 0);
- tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1);
tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
}
break;
case R6_OPC_DIVU:
{
- TCGv t2 = tcg_constant_tl(0);
- TCGv t3 = tcg_constant_tl(1);
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
- tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1,
+ tcg_constant_tl(0), tcg_constant_tl(1), t1);
tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
}
break;
case R6_OPC_MODU:
{
- TCGv t2 = tcg_constant_tl(0);
- TCGv t3 = tcg_constant_tl(1);
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
- tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1,
+ tcg_constant_tl(0), tcg_constant_tl(1), t1);
tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
}
@@ -3155,8 +3125,7 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt)
tcg_gen_and_tl(t2, t2, t3);
tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
tcg_gen_or_tl(t2, t2, t3);
- tcg_gen_movi_tl(t3, 0);
- tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1);
tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
}
break;
@@ -3169,24 +3138,21 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt)
tcg_gen_and_tl(t2, t2, t3);
tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
tcg_gen_or_tl(t2, t2, t3);
- tcg_gen_movi_tl(t3, 0);
- tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1);
tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
}
break;
case R6_OPC_DDIVU:
{
- TCGv t2 = tcg_constant_tl(0);
- TCGv t3 = tcg_constant_tl(1);
- tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1,
+ tcg_constant_tl(0), tcg_constant_tl(1), t1);
tcg_gen_divu_i64(cpu_gpr[rd], t0, t1);
}
break;
case R6_OPC_DMODU:
{
- TCGv t2 = tcg_constant_tl(0);
- TCGv t3 = tcg_constant_tl(1);
- tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1,
+ tcg_constant_tl(0), tcg_constant_tl(1), t1);
tcg_gen_remu_i64(cpu_gpr[rd], t0, t1);
}
break;
@@ -3239,8 +3205,7 @@ static void gen_div1_tx79(DisasContext *ctx, uint32_t opc, int rs, int rt)
tcg_gen_and_tl(t2, t2, t3);
tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
tcg_gen_or_tl(t2, t2, t3);
- tcg_gen_movi_tl(t3, 0);
- tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1);
tcg_gen_div_tl(cpu_LO[1], t0, t1);
tcg_gen_rem_tl(cpu_HI[1], t0, t1);
tcg_gen_ext32s_tl(cpu_LO[1], cpu_LO[1]);
@@ -3295,8 +3260,7 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc,
tcg_gen_and_tl(t2, t2, t3);
tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
tcg_gen_or_tl(t2, t2, t3);
- tcg_gen_movi_tl(t3, 0);
- tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1);
tcg_gen_div_tl(cpu_LO[acc], t0, t1);
tcg_gen_rem_tl(cpu_HI[acc], t0, t1);
tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]);
@@ -3348,17 +3312,15 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc,
tcg_gen_and_tl(t2, t2, t3);
tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
tcg_gen_or_tl(t2, t2, t3);
- tcg_gen_movi_tl(t3, 0);
- tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1);
tcg_gen_div_tl(cpu_LO[acc], t0, t1);
tcg_gen_rem_tl(cpu_HI[acc], t0, t1);
}
break;
case OPC_DDIVU:
{
- TCGv t2 = tcg_constant_tl(0);
- TCGv t3 = tcg_constant_tl(1);
- tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1,
+ tcg_constant_tl(0), tcg_constant_tl(1), t1);
tcg_gen_divu_i64(cpu_LO[acc], t0, t1);
tcg_gen_remu_i64(cpu_HI[acc], t0, t1);
}
@@ -3600,184 +3562,6 @@ static void gen_cl(DisasContext *ctx, uint32_t opc,
}
}
-/* Godson integer instructions */
-static void gen_loongson_integer(DisasContext *ctx, uint32_t opc,
- int rd, int rs, int rt)
-{
- TCGv t0, t1;
-
- if (rd == 0) {
- /* Treat as NOP. */
- return;
- }
-
- t0 = tcg_temp_new();
- t1 = tcg_temp_new();
- gen_load_gpr(t0, rs);
- gen_load_gpr(t1, rt);
-
- switch (opc) {
- case OPC_MULT_G_2E:
- case OPC_MULT_G_2F:
- tcg_gen_mul_tl(cpu_gpr[rd], t0, t1);
- tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
- break;
- case OPC_MULTU_G_2E:
- case OPC_MULTU_G_2F:
- tcg_gen_ext32u_tl(t0, t0);
- tcg_gen_ext32u_tl(t1, t1);
- tcg_gen_mul_tl(cpu_gpr[rd], t0, t1);
- tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
- break;
- case OPC_DIV_G_2E:
- case OPC_DIV_G_2F:
- {
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGLabel *l3 = gen_new_label();
- tcg_gen_ext32s_tl(t0, t0);
- tcg_gen_ext32s_tl(t1, t1);
- tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
- tcg_gen_movi_tl(cpu_gpr[rd], 0);
- tcg_gen_br(l3);
- gen_set_label(l1);
- tcg_gen_brcondi_tl(TCG_COND_NE, t0, INT_MIN, l2);
- tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1, l2);
- tcg_gen_mov_tl(cpu_gpr[rd], t0);
- tcg_gen_br(l3);
- gen_set_label(l2);
- tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
- tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
- gen_set_label(l3);
- }
- break;
- case OPC_DIVU_G_2E:
- case OPC_DIVU_G_2F:
- {
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- tcg_gen_ext32u_tl(t0, t0);
- tcg_gen_ext32u_tl(t1, t1);
- tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
- tcg_gen_movi_tl(cpu_gpr[rd], 0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
- tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
- gen_set_label(l2);
- }
- break;
- case OPC_MOD_G_2E:
- case OPC_MOD_G_2F:
- {
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGLabel *l3 = gen_new_label();
- tcg_gen_ext32u_tl(t0, t0);
- tcg_gen_ext32u_tl(t1, t1);
- tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
- tcg_gen_brcondi_tl(TCG_COND_NE, t0, INT_MIN, l2);
- tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1, l2);
- gen_set_label(l1);
- tcg_gen_movi_tl(cpu_gpr[rd], 0);
- tcg_gen_br(l3);
- gen_set_label(l2);
- tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
- tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
- gen_set_label(l3);
- }
- break;
- case OPC_MODU_G_2E:
- case OPC_MODU_G_2F:
- {
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- tcg_gen_ext32u_tl(t0, t0);
- tcg_gen_ext32u_tl(t1, t1);
- tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
- tcg_gen_movi_tl(cpu_gpr[rd], 0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
- tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
- gen_set_label(l2);
- }
- break;
-#if defined(TARGET_MIPS64)
- case OPC_DMULT_G_2E:
- case OPC_DMULT_G_2F:
- tcg_gen_mul_tl(cpu_gpr[rd], t0, t1);
- break;
- case OPC_DMULTU_G_2E:
- case OPC_DMULTU_G_2F:
- tcg_gen_mul_tl(cpu_gpr[rd], t0, t1);
- break;
- case OPC_DDIV_G_2E:
- case OPC_DDIV_G_2F:
- {
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGLabel *l3 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
- tcg_gen_movi_tl(cpu_gpr[rd], 0);
- tcg_gen_br(l3);
- gen_set_label(l1);
- tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2);
- tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2);
- tcg_gen_mov_tl(cpu_gpr[rd], t0);
- tcg_gen_br(l3);
- gen_set_label(l2);
- tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
- gen_set_label(l3);
- }
- break;
- case OPC_DDIVU_G_2E:
- case OPC_DDIVU_G_2F:
- {
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
- tcg_gen_movi_tl(cpu_gpr[rd], 0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
- gen_set_label(l2);
- }
- break;
- case OPC_DMOD_G_2E:
- case OPC_DMOD_G_2F:
- {
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGLabel *l3 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
- tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2);
- tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2);
- gen_set_label(l1);
- tcg_gen_movi_tl(cpu_gpr[rd], 0);
- tcg_gen_br(l3);
- gen_set_label(l2);
- tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
- gen_set_label(l3);
- }
- break;
- case OPC_DMODU_G_2E:
- case OPC_DMODU_G_2F:
- {
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
- tcg_gen_movi_tl(cpu_gpr[rd], 0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
- gen_set_label(l2);
- }
- break;
-#endif
- }
-}
-
/* Loongson multimedia instructions */
static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt)
{
@@ -4160,10 +3944,10 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
case OPC_GSLQ:
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, rt);
gen_store_gpr(t0, lsq_rt1);
@@ -4172,10 +3956,10 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
check_cp1_enabled(ctx);
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, t1, rt);
gen_store_fpr64(ctx, t0, lsq_rt1);
@@ -4184,11 +3968,11 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
gen_load_gpr(t1, rt);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
gen_load_gpr(t1, lsq_rt1);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
break;
case OPC_GSSQC1:
@@ -4196,11 +3980,11 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
gen_load_fpr64(ctx, t1, rt);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
gen_load_fpr64(ctx, t1, lsq_rt1);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
break;
#endif
@@ -4213,7 +3997,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
gen_load_fpr32(ctx, fp0, rt);
t1 = tcg_temp_new();
tcg_gen_ext_i32_tl(t1, fp0);
- gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUL);
+ gen_lxl(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL);
tcg_gen_trunc_tl_i32(fp0, t1);
gen_store_fpr32(ctx, fp0, rt);
break;
@@ -4224,7 +4008,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
gen_load_fpr32(ctx, fp0, rt);
t1 = tcg_temp_new();
tcg_gen_ext_i32_tl(t1, fp0);
- gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUL);
+ gen_lxr(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL);
tcg_gen_trunc_tl_i32(fp0, t1);
gen_store_fpr32(ctx, fp0, rt);
break;
@@ -4234,7 +4018,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
gen_base_offset_addr(ctx, t0, rs, shf_offset);
t1 = tcg_temp_new();
gen_load_fpr64(ctx, t1, rt);
- gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUQ);
+ gen_lxl(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_fpr64(ctx, t1, rt);
break;
case OPC_GSLDRC1:
@@ -4242,7 +4026,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
gen_base_offset_addr(ctx, t0, rs, shf_offset);
t1 = tcg_temp_new();
gen_load_fpr64(ctx, t1, rt);
- gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUQ);
+ gen_lxr(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_fpr64(ctx, t1, rt);
break;
#endif
@@ -4360,7 +4144,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
gen_store_gpr(t0, rt);
break;
case OPC_GSLHX:
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW |
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SW |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@@ -4369,7 +4153,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
if (rd) {
gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
}
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL |
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@@ -4379,7 +4163,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
if (rd) {
gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
}
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@@ -4390,7 +4174,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
}
fp0 = tcg_temp_new_i32();
- tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL |
+ tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL |
ctx->default_tcg_memop_mask);
gen_store_fpr32(ctx, fp0, rt);
break;
@@ -4400,7 +4184,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
if (rd) {
gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
}
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, t0, rt);
break;
@@ -4413,34 +4197,34 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
case OPC_GSSHX:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UW |
ctx->default_tcg_memop_mask);
break;
case OPC_GSSWX:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
break;
#if defined(TARGET_MIPS64)
case OPC_GSSDX:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
break;
#endif
case OPC_GSSWXC1:
fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, rt);
- tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL |
+ tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL |
ctx->default_tcg_memop_mask);
break;
#if defined(TARGET_MIPS64)
case OPC_GSSDXC1:
t1 = tcg_temp_new();
gen_load_fpr64(ctx, t1, rt);
- tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEUQ |
+ tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ |
ctx->default_tcg_memop_mask);
break;
#endif
@@ -5329,17 +5113,17 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
register_name = "Index";
break;
case CP0_REG00__MVPCONTROL:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_mvpcontrol(arg, tcg_env);
register_name = "MVPControl";
break;
case CP0_REG00__MVPCONF0:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_mvpconf0(arg, tcg_env);
register_name = "MVPConf0";
break;
case CP0_REG00__MVPCONF1:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_mvpconf1(arg, tcg_env);
register_name = "MVPConf1";
break;
@@ -5360,37 +5144,37 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
register_name = "Random";
break;
case CP0_REG01__VPECONTROL:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEControl));
register_name = "VPEControl";
break;
case CP0_REG01__VPECONF0:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf0));
register_name = "VPEConf0";
break;
case CP0_REG01__VPECONF1:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf1));
register_name = "VPEConf1";
break;
case CP0_REG01__YQMASK:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_YQMask));
register_name = "YQMask";
break;
case CP0_REG01__VPESCHEDULE:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPESchedule));
register_name = "VPESchedule";
break;
case CP0_REG01__VPESCHEFBACK:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPEScheFBack));
register_name = "VPEScheFBack";
break;
case CP0_REG01__VPEOPT:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEOpt));
register_name = "VPEOpt";
break;
@@ -5417,37 +5201,37 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
register_name = "EntryLo0";
break;
case CP0_REG02__TCSTATUS:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_tcstatus(arg, tcg_env);
register_name = "TCStatus";
break;
case CP0_REG02__TCBIND:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_tcbind(arg, tcg_env);
register_name = "TCBind";
break;
case CP0_REG02__TCRESTART:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_tcrestart(arg, tcg_env);
register_name = "TCRestart";
break;
case CP0_REG02__TCHALT:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_tchalt(arg, tcg_env);
register_name = "TCHalt";
break;
case CP0_REG02__TCCONTEXT:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_tccontext(arg, tcg_env);
register_name = "TCContext";
break;
case CP0_REG02__TCSCHEDULE:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_tcschedule(arg, tcg_env);
register_name = "TCSchedule";
break;
case CP0_REG02__TCSCHEFBACK:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_tcschefback(arg, tcg_env);
register_name = "TCScheFBack";
break;
@@ -6086,17 +5870,17 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
register_name = "Index";
break;
case CP0_REG00__MVPCONTROL:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_mvpcontrol(tcg_env, arg);
register_name = "MVPControl";
break;
case CP0_REG00__MVPCONF0:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
/* ignored */
register_name = "MVPConf0";
break;
case CP0_REG00__MVPCONF1:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
/* ignored */
register_name = "MVPConf1";
break;
@@ -6116,39 +5900,39 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
register_name = "Random";
break;
case CP0_REG01__VPECONTROL:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_vpecontrol(tcg_env, arg);
register_name = "VPEControl";
break;
case CP0_REG01__VPECONF0:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_vpeconf0(tcg_env, arg);
register_name = "VPEConf0";
break;
case CP0_REG01__VPECONF1:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_vpeconf1(tcg_env, arg);
register_name = "VPEConf1";
break;
case CP0_REG01__YQMASK:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_yqmask(tcg_env, arg);
register_name = "YQMask";
break;
case CP0_REG01__VPESCHEDULE:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
tcg_gen_st_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_VPESchedule));
register_name = "VPESchedule";
break;
case CP0_REG01__VPESCHEFBACK:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
tcg_gen_st_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_VPEScheFBack));
register_name = "VPEScheFBack";
break;
case CP0_REG01__VPEOPT:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_vpeopt(tcg_env, arg);
register_name = "VPEOpt";
break;
@@ -6163,37 +5947,37 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
register_name = "EntryLo0";
break;
case CP0_REG02__TCSTATUS:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_tcstatus(tcg_env, arg);
register_name = "TCStatus";
break;
case CP0_REG02__TCBIND:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_tcbind(tcg_env, arg);
register_name = "TCBind";
break;
case CP0_REG02__TCRESTART:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_tcrestart(tcg_env, arg);
register_name = "TCRestart";
break;
case CP0_REG02__TCHALT:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_tchalt(tcg_env, arg);
register_name = "TCHalt";
break;
case CP0_REG02__TCCONTEXT:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_tccontext(tcg_env, arg);
register_name = "TCContext";
break;
case CP0_REG02__TCSCHEDULE:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_tcschedule(tcg_env, arg);
register_name = "TCSchedule";
break;
case CP0_REG02__TCSCHEFBACK:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_tcschefback(tcg_env, arg);
register_name = "TCScheFBack";
break;
@@ -6836,17 +6620,17 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
register_name = "Index";
break;
case CP0_REG00__MVPCONTROL:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_mvpcontrol(arg, tcg_env);
register_name = "MVPControl";
break;
case CP0_REG00__MVPCONF0:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_mvpconf0(arg, tcg_env);
register_name = "MVPConf0";
break;
case CP0_REG00__MVPCONF1:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_mvpconf1(arg, tcg_env);
register_name = "MVPConf1";
break;
@@ -6867,40 +6651,40 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
register_name = "Random";
break;
case CP0_REG01__VPECONTROL:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEControl));
register_name = "VPEControl";
break;
case CP0_REG01__VPECONF0:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf0));
register_name = "VPEConf0";
break;
case CP0_REG01__VPECONF1:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf1));
register_name = "VPEConf1";
break;
case CP0_REG01__YQMASK:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
tcg_gen_ld_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_YQMask));
register_name = "YQMask";
break;
case CP0_REG01__VPESCHEDULE:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
tcg_gen_ld_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_VPESchedule));
register_name = "VPESchedule";
break;
case CP0_REG01__VPESCHEFBACK:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
tcg_gen_ld_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_VPEScheFBack));
register_name = "VPEScheFBack";
break;
case CP0_REG01__VPEOPT:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEOpt));
register_name = "VPEOpt";
break;
@@ -6916,37 +6700,37 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
register_name = "EntryLo0";
break;
case CP0_REG02__TCSTATUS:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_tcstatus(arg, tcg_env);
register_name = "TCStatus";
break;
case CP0_REG02__TCBIND:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mfc0_tcbind(arg, tcg_env);
register_name = "TCBind";
break;
case CP0_REG02__TCRESTART:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_dmfc0_tcrestart(arg, tcg_env);
register_name = "TCRestart";
break;
case CP0_REG02__TCHALT:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_dmfc0_tchalt(arg, tcg_env);
register_name = "TCHalt";
break;
case CP0_REG02__TCCONTEXT:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_dmfc0_tccontext(arg, tcg_env);
register_name = "TCContext";
break;
case CP0_REG02__TCSCHEDULE:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_dmfc0_tcschedule(arg, tcg_env);
register_name = "TCSchedule";
break;
case CP0_REG02__TCSCHEFBACK:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_dmfc0_tcschefback(arg, tcg_env);
register_name = "TCScheFBack";
break;
@@ -7553,17 +7337,17 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
register_name = "Index";
break;
case CP0_REG00__MVPCONTROL:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_mvpcontrol(tcg_env, arg);
register_name = "MVPControl";
break;
case CP0_REG00__MVPCONF0:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
/* ignored */
register_name = "MVPConf0";
break;
case CP0_REG00__MVPCONF1:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
/* ignored */
register_name = "MVPConf1";
break;
@@ -7583,39 +7367,39 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
register_name = "Random";
break;
case CP0_REG01__VPECONTROL:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_vpecontrol(tcg_env, arg);
register_name = "VPEControl";
break;
case CP0_REG01__VPECONF0:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_vpeconf0(tcg_env, arg);
register_name = "VPEConf0";
break;
case CP0_REG01__VPECONF1:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_vpeconf1(tcg_env, arg);
register_name = "VPEConf1";
break;
case CP0_REG01__YQMASK:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_yqmask(tcg_env, arg);
register_name = "YQMask";
break;
case CP0_REG01__VPESCHEDULE:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
tcg_gen_st_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_VPESchedule));
register_name = "VPESchedule";
break;
case CP0_REG01__VPESCHEFBACK:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
tcg_gen_st_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_VPEScheFBack));
register_name = "VPEScheFBack";
break;
case CP0_REG01__VPEOPT:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_vpeopt(tcg_env, arg);
register_name = "VPEOpt";
break;
@@ -7630,37 +7414,37 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
register_name = "EntryLo0";
break;
case CP0_REG02__TCSTATUS:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_tcstatus(tcg_env, arg);
register_name = "TCStatus";
break;
case CP0_REG02__TCBIND:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_tcbind(tcg_env, arg);
register_name = "TCBind";
break;
case CP0_REG02__TCRESTART:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_tcrestart(tcg_env, arg);
register_name = "TCRestart";
break;
case CP0_REG02__TCHALT:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_tchalt(tcg_env, arg);
register_name = "TCHalt";
break;
case CP0_REG02__TCCONTEXT:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_tccontext(tcg_env, arg);
register_name = "TCContext";
break;
case CP0_REG02__TCSCHEDULE:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_tcschedule(tcg_env, arg);
register_name = "TCSchedule";
break;
case CP0_REG02__TCSCHEFBACK:
- CP0_CHECK(ctx->insn_flags & ASE_MT);
+ CP0_CHECK(disas_mt_available(ctx));
gen_helper_mtc0_tcschefback(tcg_env, arg);
register_name = "TCScheFBack";
break;
@@ -10779,7 +10563,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
{
TCGv_i32 fp0 = tcg_temp_new_i32();
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL);
tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32(ctx, fp0, fd);
}
@@ -10789,7 +10573,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
check_cp1_registers(ctx, fd);
{
TCGv_i64 fp0 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10799,7 +10583,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
{
TCGv_i64 fp0 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10808,7 +10592,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
{
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL);
+ tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL);
}
break;
case OPC_SDXC1:
@@ -10817,7 +10601,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
}
break;
case OPC_SUXC1:
@@ -10826,7 +10610,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
}
break;
}
@@ -10856,7 +10640,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc,
tcg_gen_br(l2);
gen_set_label(l1);
tcg_gen_brcondi_tl(TCG_COND_NE, t0, 4, l2);
- if (cpu_is_bigendian(ctx)) {
+ if (disas_is_bigendian(ctx)) {
gen_load_fpr32(ctx, fp, fs);
gen_load_fpr32h(ctx, fph, ft);
gen_store_fpr32h(ctx, fp, fd);
@@ -11265,10 +11049,9 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc,
} else {
/* OPC_JIC, OPC_JIALC */
TCGv tbase = tcg_temp_new();
- TCGv toffset = tcg_constant_tl(offset);
gen_load_gpr(tbase, rt);
- gen_op_addr_add(ctx, btarget, tbase, toffset);
+ gen_op_addr_addi(ctx, btarget, tbase, offset);
}
break;
default:
@@ -11428,20 +11211,18 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc,
void gen_addiupc(DisasContext *ctx, int rx, int imm,
int is_64_bit, int extended)
{
- TCGv t0;
+ target_ulong npc;
if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) {
gen_reserved_instruction(ctx);
return;
}
- t0 = tcg_temp_new();
-
- tcg_gen_movi_tl(t0, pc_relative_pc(ctx));
- tcg_gen_addi_tl(cpu_gpr[rx], t0, imm);
+ npc = pc_relative_pc(ctx) + imm;
if (!is_64_bit) {
- tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ npc = (int32_t)npc;
}
+ tcg_gen_movi_tl(cpu_gpr[rx], npc);
}
static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base,
@@ -11476,7 +11257,7 @@ void gen_ldxs(DisasContext *ctx, int base, int index, int rd)
gen_op_addr_add(ctx, t0, t1, t0);
}
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL);
gen_store_gpr(t1, rd);
}
@@ -11567,16 +11348,16 @@ static void gen_mips_lx(DisasContext *ctx, uint32_t opc,
gen_store_gpr(t0, rd);
break;
case OPC_LHX:
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SW);
gen_store_gpr(t0, rd);
break;
case OPC_LWX:
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL);
gen_store_gpr(t0, rd);
break;
#if defined(TARGET_MIPS64)
case OPC_LDX:
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_gpr(t0, rd);
break;
#endif
@@ -11601,8 +11382,7 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2,
gen_load_gpr(v2_t, v2);
switch (op1) {
- /* OPC_MULT_G_2E is equal OPC_ADDUH_QB_DSP */
- case OPC_MULT_G_2E:
+ case OPC_ADDUH_QB_DSP:
check_dsp_r2(ctx);
switch (op2) {
case OPC_ADDUH_QB:
@@ -12285,11 +12065,7 @@ static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2,
gen_load_gpr(v2_t, v2);
switch (op1) {
- /*
- * OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have
- * the same mask and op1.
- */
- case OPC_MULT_G_2E:
+ case OPC_MUL_PH_DSP:
check_dsp_r2(ctx);
switch (op2) {
case OPC_MUL_PH:
@@ -13641,15 +13417,6 @@ static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx)
case OPC_MUL:
gen_arith(ctx, op1, rd, rs, rt);
break;
- case OPC_DIV_G_2F:
- case OPC_DIVU_G_2F:
- case OPC_MULT_G_2F:
- case OPC_MULTU_G_2F:
- case OPC_MOD_G_2F:
- case OPC_MODU_G_2F:
- check_insn(ctx, INSN_LOONGSON2F | ASE_LEXT);
- gen_loongson_integer(ctx, op1, rd, rs, rt);
- break;
case OPC_CLO:
case OPC_CLZ:
check_insn(ctx, ISA_MIPS_R1);
@@ -13674,15 +13441,6 @@ static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx)
check_mips_64(ctx);
gen_cl(ctx, op1, rd, rs);
break;
- case OPC_DMULT_G_2F:
- case OPC_DMULTU_G_2F:
- case OPC_DDIV_G_2F:
- case OPC_DDIVU_G_2F:
- case OPC_DMOD_G_2F:
- case OPC_DMODU_G_2F:
- check_insn(ctx, INSN_LOONGSON2F | ASE_LEXT);
- gen_loongson_integer(ctx, op1, rd, rs, rt);
- break;
#endif
default: /* Invalid */
MIPS_INVAL("special2_legacy");
@@ -13719,7 +13477,7 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx)
}
break;
case R6_OPC_SC:
- gen_st_cond(ctx, rt, rs, imm, MO_TESL, false);
+ gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_SL, false);
break;
case R6_OPC_LL:
gen_ld(ctx, op1, rt, rs, imm);
@@ -13765,7 +13523,7 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx)
#endif
#if defined(TARGET_MIPS64)
case R6_OPC_SCD:
- gen_st_cond(ctx, rt, rs, imm, MO_TEUQ, false);
+ gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_UQ, false);
break;
case R6_OPC_LLD:
gen_ld(ctx, op1, rt, rs, imm);
@@ -13815,17 +13573,12 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx)
op1 = MASK_SPECIAL3(ctx->opcode);
switch (op1) {
- case OPC_DIV_G_2E:
- case OPC_DIVU_G_2E:
- case OPC_MOD_G_2E:
- case OPC_MODU_G_2E:
- case OPC_MULT_G_2E:
- case OPC_MULTU_G_2E:
+ case OPC_MUL_PH_DSP:
/*
- * OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have
+ * OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have
* the same mask and op1.
*/
- if ((ctx->insn_flags & ASE_DSP_R2) && (op1 == OPC_MULT_G_2E)) {
+ if ((ctx->insn_flags & ASE_DSP_R2) && (op1 == OPC_MUL_PH_DSP)) {
op2 = MASK_ADDUH_QB(ctx->opcode);
switch (op2) {
case OPC_ADDUH_QB:
@@ -13853,8 +13606,6 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx)
gen_reserved_instruction(ctx);
break;
}
- } else if (ctx->insn_flags & INSN_LOONGSON2E) {
- gen_loongson_integer(ctx, op1, rd, rs, rt);
} else {
gen_reserved_instruction(ctx);
}
@@ -14083,15 +13834,6 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx)
}
break;
#if defined(TARGET_MIPS64)
- case OPC_DDIV_G_2E:
- case OPC_DDIVU_G_2E:
- case OPC_DMULT_G_2E:
- case OPC_DMULTU_G_2E:
- case OPC_DMOD_G_2E:
- case OPC_DMODU_G_2E:
- check_insn(ctx, INSN_LOONGSON2E);
- gen_loongson_integer(ctx, op1, rd, rs, rt);
- break;
case OPC_ABSQ_S_QH_DSP:
op2 = MASK_ABSQ_S_QH(ctx->opcode);
switch (op2) {
@@ -14448,7 +14190,7 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx)
return;
case OPC_SCE:
check_cp0_enabled(ctx);
- gen_st_cond(ctx, rt, rs, imm, MO_TESL, true);
+ gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_SL, true);
return;
case OPC_CACHEE:
check_eva(ctx);
@@ -14912,7 +14654,7 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx)
if (ctx->insn_flags & INSN_R5900) {
check_insn_opc_user_only(ctx, INSN_R5900);
}
- gen_st_cond(ctx, rt, rs, imm, MO_TESL, false);
+ gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_SL, false);
break;
case OPC_CACHE:
check_cp0_enabled(ctx);
@@ -14969,7 +14711,9 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx)
} else {
/* OPC_BC1ANY2 */
check_cop1x(ctx);
- check_insn(ctx, ASE_MIPS3D);
+ if (!ase_3d_available(env)) {
+ return false;
+ }
gen_compute_branch1(ctx, MASK_BC1(ctx->opcode),
(rt >> 2) & 0x7, imm << 2);
}
@@ -14984,7 +14728,9 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx)
check_cp1_enabled(ctx);
check_insn_opc_removed(ctx, ISA_MIPS_R6);
check_cop1x(ctx);
- check_insn(ctx, ASE_MIPS3D);
+ if (!ase_3d_available(env)) {
+ return false;
+ }
/* fall through */
case OPC_BC1:
check_cp1_enabled(ctx);
@@ -15191,7 +14937,7 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx)
check_insn_opc_user_only(ctx, INSN_R5900);
}
check_mips_64(ctx);
- gen_st_cond(ctx, rt, rs, imm, MO_TEUQ, false);
+ gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_UQ, false);
break;
case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */
if (ctx->insn_flags & ISA_MIPS_R6) {
@@ -15284,6 +15030,9 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
if (cpu_supports_isa(env, INSN_VR54XX) && decode_ext_vr54xx(ctx, ctx->opcode)) {
return;
}
+ if (TARGET_LONG_BITS == 64 && decode_ext_loongson(ctx, ctx->opcode)) {
+ return;
+ }
#if defined(TARGET_MIPS64)
if (ase_lcsr_available(env) && decode_ase_lcsr(ctx, ctx->opcode)) {
return;
@@ -15362,7 +15111,8 @@ static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
* hardware does (e.g. if a delay slot instruction faults, the
* reported PC is the PC of the branch).
*/
- if (ctx->base.singlestep_enabled && (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ if ((tb_cflags(ctx->base.tb) & CF_SINGLE_STEP) &&
+ (ctx->hflags & MIPS_HFLAG_BMASK)) {
ctx->base.max_insns = 2;
}
@@ -15445,7 +15195,7 @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
* together with its delay slot.
*/
if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE
- && !ctx->base.singlestep_enabled) {
+ && !(tb_cflags(ctx->base.tb) & CF_SINGLE_STEP)) {
ctx->base.is_jmp = DISAS_TOO_MANY;
}
}
@@ -15482,8 +15232,8 @@ static const TranslatorOps mips_tr_ops = {
.tb_stop = mips_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext ctx;
diff --git a/target/mips/tcg/translate.h b/target/mips/tcg/translate.h
index 2b6646b..1bf153d 100644
--- a/target/mips/tcg/translate.h
+++ b/target/mips/tcg/translate.h
@@ -176,6 +176,7 @@ void gen_addiupc(DisasContext *ctx, int rx, int imm,
* Address Computation and Large Constant Instructions
*/
void gen_op_addr_add(DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1);
+void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base, target_long ofs);
bool gen_lsa(DisasContext *ctx, int rd, int rt, int rs, int sa);
bool gen_dlsa(DisasContext *ctx, int rd, int rt, int rs, int sa);
@@ -216,10 +217,13 @@ void msa_translate_init(void);
void mxu_translate_init(void);
bool decode_ase_mxu(DisasContext *ctx, uint32_t insn);
+bool decode_64bit_enabled(DisasContext *ctx);
+
/* decodetree generated */
bool decode_isa_rel6(DisasContext *ctx, uint32_t insn);
bool decode_ase_msa(DisasContext *ctx, uint32_t insn);
bool decode_ext_txx9(DisasContext *ctx, uint32_t insn);
+bool decode_ext_loongson(DisasContext *ctx, uint32_t insn);
#if defined(TARGET_MIPS64)
bool decode_ase_lcsr(DisasContext *ctx, uint32_t insn);
bool decode_ext_tx79(DisasContext *ctx, uint32_t insn);
@@ -227,6 +231,11 @@ bool decode_ext_octeon(DisasContext *ctx, uint32_t insn);
#endif
bool decode_ext_vr54xx(DisasContext *ctx, uint32_t insn);
+static inline bool disas_mt_available(DisasContext *ctx)
+{
+ return ctx->CP0_Config3 & (1 << CP0C3_MT);
+}
+
/*
* Helpers for implementing sets of trans_* functions.
* Defer the implementation of NAME to FUNC, with optional extra arguments.
@@ -235,9 +244,19 @@ bool decode_ext_vr54xx(DisasContext *ctx, uint32_t insn);
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ return FUNC(ctx, a, __VA_ARGS__); }
-static inline bool cpu_is_bigendian(DisasContext *ctx)
+static inline bool disas_is_bigendian(DisasContext *ctx)
{
return extract32(ctx->CP0_Config0, CP0C0_BE, 1);
}
+static inline MemOp mo_endian(DisasContext *dc)
+{
+ return disas_is_bigendian(dc) ? MO_BE : MO_LE;
+}
+
+static inline MemOp mo_endian_rev(DisasContext *dc, bool reversed)
+{
+ return disas_is_bigendian(dc) ^ reversed ? MO_BE : MO_LE;
+}
+
#endif
diff --git a/target/mips/tcg/tx79_translate.c b/target/mips/tcg/tx79_translate.c
index dd6fb8a..ae3f5e1 100644
--- a/target/mips/tcg/tx79_translate.c
+++ b/target/mips/tcg/tx79_translate.c
@@ -340,12 +340,12 @@ static bool trans_LQ(DisasContext *ctx, arg_i *a)
tcg_gen_andi_tl(addr, addr, ~0xf);
/* Lower half */
- tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_gpr(t0, a->rt);
/* Upper half */
tcg_gen_addi_i64(addr, addr, 8);
- tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
gen_store_gpr_hi(t0, a->rt);
return true;
}
@@ -364,12 +364,12 @@ static bool trans_SQ(DisasContext *ctx, arg_i *a)
/* Lower half */
gen_load_gpr(t0, a->rt);
- tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
/* Upper half */
tcg_gen_addi_i64(addr, addr, 8);
gen_load_gpr_hi(t0, a->rt);
- tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ);
return true;
}
diff --git a/target/openrisc/cpu-param.h b/target/openrisc/cpu-param.h
index fbfc0f5..b4f57bb 100644
--- a/target/openrisc/cpu-param.h
+++ b/target/openrisc/cpu-param.h
@@ -2,17 +2,16 @@
* OpenRISC cpu parameters for qemu.
*
* Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#ifndef OPENRISC_CPU_PARAM_H
#define OPENRISC_CPU_PARAM_H
-#define TARGET_LONG_BITS 32
#define TARGET_PAGE_BITS 13
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#define TCG_GUEST_DEFAULT_MO (0)
+#define TARGET_INSN_START_EXTRA_WORDS 1
#endif
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
index 6ec54ad..dfbb2df 100644
--- a/target/openrisc/cpu.c
+++ b/target/openrisc/cpu.c
@@ -21,8 +21,9 @@
#include "qapi/error.h"
#include "qemu/qemu-print.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/translation-block.h"
#include "fpu/softfloat-helpers.h"
+#include "accel/tcg/cpu-ops.h"
#include "tcg/tcg.h"
static void openrisc_cpu_set_pc(CPUState *cs, vaddr value)
@@ -40,6 +41,18 @@ static vaddr openrisc_cpu_get_pc(CPUState *cs)
return cpu->env.pc;
}
+static TCGTBCPUState openrisc_get_tb_cpu_state(CPUState *cs)
+{
+ CPUOpenRISCState *env = cpu_env(cs);
+
+ return (TCGTBCPUState){
+ .pc = env->pc,
+ .flags = ((env->dflag ? TB_FLAGS_DFLAG : 0)
+ | (cpu_get_gpr(env, 0) ? 0 : TB_FLAGS_R0_0)
+ | (env->sr & (SR_SM | SR_DME | SR_IME | SR_OVE))),
+ };
+}
+
static void openrisc_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -62,11 +75,13 @@ static void openrisc_restore_state_to_opc(CPUState *cs,
}
}
+#ifndef CONFIG_USER_ONLY
static bool openrisc_cpu_has_work(CPUState *cs)
{
return cs->interrupt_request & (CPU_INTERRUPT_HARD |
CPU_INTERRUPT_TIMER);
}
+#endif /* !CONFIG_USER_ONLY */
static int openrisc_cpu_mmu_index(CPUState *cs, bool ifetch)
{
@@ -82,6 +97,7 @@ static int openrisc_cpu_mmu_index(CPUState *cs, bool ifetch)
static void openrisc_disas_set_info(CPUState *cpu, disassemble_info *info)
{
+ info->endian = BFD_ENDIAN_BIG;
info->print_insn = print_insn_or1k;
}
@@ -105,6 +121,14 @@ static void openrisc_cpu_reset_hold(Object *obj, ResetType type)
set_float_detect_tininess(float_tininess_before_rounding,
&cpu->env.fp_status);
+ /*
+ * TODO: this is probably not the correct NaN propagation rule for
+ * this architecture.
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_x87, &cpu->env.fp_status);
+
+ /* Default NaN: sign bit clear, frac msb set */
+ set_float_default_nan_pattern(0b01000000, &cpu->env.fp_status);
#ifndef CONFIG_USER_ONLY
cpu->env.picmr = 0x00000000;
@@ -156,6 +180,10 @@ static void openrisc_cpu_realizefn(DeviceState *dev, Error **errp)
qemu_init_vcpu(cs);
cpu_reset(cs);
+#ifndef CONFIG_USER_ONLY
+ cpu_openrisc_clock_init(OPENRISC_CPU(dev));
+#endif
+
occ->parent_realize(dev, errp);
}
@@ -219,26 +247,33 @@ static void openrisc_any_initfn(Object *obj)
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps openrisc_sysemu_ops = {
+ .has_work = openrisc_cpu_has_work,
.get_phys_page_debug = openrisc_cpu_get_phys_page_debug,
};
#endif
-#include "hw/core/tcg-cpu-ops.h"
-
static const TCGCPUOps openrisc_tcg_ops = {
+ .guest_default_memory_order = 0,
+ .mttcg_supported = true,
+
.initialize = openrisc_translate_init,
+ .translate_code = openrisc_translate_code,
+ .get_tb_cpu_state = openrisc_get_tb_cpu_state,
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
.restore_state_to_opc = openrisc_restore_state_to_opc,
+ .mmu_index = openrisc_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = openrisc_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
.cpu_exec_halt = openrisc_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = openrisc_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
-static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
+static void openrisc_cpu_class_init(ObjectClass *oc, const void *data)
{
OpenRISCCPUClass *occ = OPENRISC_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(occ);
@@ -251,8 +286,6 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
&occ->parent_phases);
cc->class_by_name = openrisc_cpu_class_by_name;
- cc->has_work = openrisc_cpu_has_work;
- cc->mmu_index = openrisc_cpu_mmu_index;
cc->dump_state = openrisc_cpu_dump_state;
cc->set_pc = openrisc_cpu_set_pc;
cc->get_pc = openrisc_cpu_get_pc;
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
index c9fe9ae..f4bcf00 100644
--- a/target/openrisc/cpu.h
+++ b/target/openrisc/cpu.h
@@ -21,7 +21,9 @@
#define OPENRISC_CPU_H
#include "cpu-qom.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "fpu/softfloat-types.h"
/**
@@ -38,8 +40,6 @@ struct OpenRISCCPUClass {
ResettablePhases parent_phases;
};
-#define TARGET_INSN_START_EXTRA_WORDS 1
-
enum {
MMU_NOMMU_IDX = 0,
MMU_SUPERVISOR_IDX = 1,
@@ -301,6 +301,8 @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void openrisc_translate_init(void);
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
#ifndef CONFIG_USER_ONLY
@@ -330,8 +332,6 @@ void cpu_openrisc_count_stop(OpenRISCCPU *cpu);
#define CPU_RESOLVING_TYPE TYPE_OPENRISC_CPU
-#include "exec/cpu-all.h"
-
#define TB_FLAGS_SM SR_SM
#define TB_FLAGS_DME SR_DME
#define TB_FLAGS_IME SR_IME
@@ -349,16 +349,6 @@ static inline void cpu_set_gpr(CPUOpenRISCState *env, int i, uint32_t val)
env->shadow_gpr[0][i] = val;
}
-static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- *cs_base = 0;
- *flags = (env->dflag ? TB_FLAGS_DFLAG : 0)
- | (cpu_get_gpr(env, 0) ? 0 : TB_FLAGS_R0_0)
- | (env->sr & (SR_SM | SR_DME | SR_IME | SR_OVE));
-}
-
static inline uint32_t cpu_get_sr(const CPUOpenRISCState *env)
{
return (env->sr
diff --git a/target/openrisc/exception.c b/target/openrisc/exception.c
index 8699c3d..e213be3 100644
--- a/target/openrisc/exception.c
+++ b/target/openrisc/exception.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exception.h"
G_NORETURN void raise_exception(OpenRISCCPU *cpu, uint32_t excp)
diff --git a/target/openrisc/exception_helper.c b/target/openrisc/exception_helper.c
index 1f5be4b..c2c9d13 100644
--- a/target/openrisc/exception_helper.c
+++ b/target/openrisc/exception_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "exception.h"
diff --git a/target/openrisc/fpu_helper.c b/target/openrisc/fpu_helper.c
index 8b81d2f..dba9972 100644
--- a/target/openrisc/fpu_helper.c
+++ b/target/openrisc/fpu_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
diff --git a/target/openrisc/gdbstub.c b/target/openrisc/gdbstub.c
index c2a77d5..45bba80 100644
--- a/target/openrisc/gdbstub.c
+++ b/target/openrisc/gdbstub.c
@@ -47,14 +47,9 @@ int openrisc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
int openrisc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
- CPUClass *cc = CPU_GET_CLASS(cs);
CPUOpenRISCState *env = cpu_env(cs);
uint32_t tmp;
- if (n > cc->gdb_num_core_regs) {
- return 0;
- }
-
tmp = ldl_p(mem_buf);
if (n < 32) {
diff --git a/target/openrisc/interrupt.c b/target/openrisc/interrupt.c
index b3b5b40..4868230 100644
--- a/target/openrisc/interrupt.c
+++ b/target/openrisc/interrupt.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#ifndef CONFIG_USER_ONLY
diff --git a/target/openrisc/interrupt_helper.c b/target/openrisc/interrupt_helper.c
index ab4ea88..1553ebc 100644
--- a/target/openrisc/interrupt_helper.c
+++ b/target/openrisc/interrupt_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
void HELPER(rfe)(CPUOpenRISCState *env)
diff --git a/target/openrisc/machine.c b/target/openrisc/machine.c
index 3574e57..081c706 100644
--- a/target/openrisc/machine.c
+++ b/target/openrisc/machine.c
@@ -136,7 +136,7 @@ const VMStateDescription vmstate_openrisc_cpu = {
.minimum_version_id = 1,
.post_load = cpu_post_load,
.fields = (const VMStateField[]) {
- VMSTATE_CPU(),
+ VMSTATE_STRUCT(parent_obj, OpenRISCCPU, 0, vmstate_cpu_common, CPUState),
VMSTATE_STRUCT(env, OpenRISCCPU, 1, vmstate_env, CPUOpenRISCState),
VMSTATE_END_OF_LIST()
}
diff --git a/target/openrisc/mmu.c b/target/openrisc/mmu.c
index c632d52..acea50c 100644
--- a/target/openrisc/mmu.c
+++ b/target/openrisc/mmu.c
@@ -21,8 +21,9 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#include "hw/loader.h"
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
index 77567af..d96b41a 100644
--- a/target/openrisc/sys_helper.c
+++ b/target/openrisc/sys_helper.c
@@ -20,7 +20,8 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
+#include "exec/target_page.h"
#include "exec/helper-proto.h"
#include "exception.h"
#ifndef CONFIG_USER_ONLY
@@ -217,7 +218,7 @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd,
{
OpenRISCCPU *cpu = env_archcpu(env);
#ifndef CONFIG_USER_ONLY
- uint64_t data[TARGET_INSN_START_WORDS];
+ uint64_t data[INSN_START_WORDS];
MachineState *ms = MACHINE(qdev_get_machine());
CPUState *cs = env_cpu(env);
int idx;
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index ca56684..5ab3bc7 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -20,13 +20,14 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-mmu-index.h"
#include "tcg/tcg-op.h"
#include "qemu/log.h"
#include "qemu/bitops.h"
#include "qemu/qemu-print.h"
#include "exec/translator.h"
-
+#include "exec/translation-block.h"
+#include "exec/target_page.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
@@ -219,8 +220,7 @@ static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
TCGv t0 = tcg_temp_new();
TCGv res = tcg_temp_new();
- tcg_gen_add2_tl(res, cpu_sr_cy, srca, dc->zero, cpu_sr_cy, dc->zero);
- tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, dc->zero);
+ tcg_gen_addcio_tl(res, cpu_sr_cy, srca, srcb, cpu_sr_cy);
tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
tcg_gen_xor_tl(t0, res, srcb);
tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
@@ -1645,8 +1645,8 @@ static const TranslatorOps openrisc_tr_ops = {
.tb_stop = openrisc_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext ctx;
diff --git a/target/ppc/arch_dump.c b/target/ppc/arch_dump.c
index a831565..80ac6c3 100644
--- a/target/ppc/arch_dump.c
+++ b/target/ppc/arch_dump.c
@@ -15,8 +15,8 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "elf.h"
-#include "sysemu/dump.h"
-#include "sysemu/kvm.h"
+#include "system/dump.h"
+#include "system/kvm.h"
#ifdef TARGET_PPC64
#define ELFCLASS ELFCLASS64
@@ -47,9 +47,14 @@ struct PPCUserRegStruct {
} QEMU_PACKED;
struct PPCElfPrstatus {
- char pad1[112];
+ char pad1[32]; /* 32 == offsetof(struct elf_prstatus, pr_pid) */
+ uint32_t pid;
+ char pad2[76]; /* 76 == offsetof(struct elf_prstatus, pr_reg) -
+ offsetof(struct elf_prstatus, pr_ppid) */
struct PPCUserRegStruct pr_reg;
- char pad2[40];
+ char pad3[40]; /* 40 == sizeof(struct elf_prstatus) -
+ offsetof(struct elf_prstatus, pr_reg) -
+ sizeof(struct user_pt_regs) */
} QEMU_PACKED;
@@ -96,7 +101,7 @@ typedef struct NoteFuncArg {
DumpState *state;
} NoteFuncArg;
-static void ppc_write_elf_prstatus(NoteFuncArg *arg, PowerPCCPU *cpu)
+static void ppc_write_elf_prstatus(NoteFuncArg *arg, PowerPCCPU *cpu, int id)
{
int i;
reg_t cr;
@@ -109,6 +114,7 @@ static void ppc_write_elf_prstatus(NoteFuncArg *arg, PowerPCCPU *cpu)
prstatus = &note->contents.prstatus;
memset(prstatus, 0, sizeof(*prstatus));
+ prstatus->pid = cpu_to_dump32(s, id);
reg = &prstatus->pr_reg;
for (i = 0; i < 32; i++) {
@@ -127,7 +133,7 @@ static void ppc_write_elf_prstatus(NoteFuncArg *arg, PowerPCCPU *cpu)
reg->ccr = cpu_to_dump_reg(s, cr);
}
-static void ppc_write_elf_fpregset(NoteFuncArg *arg, PowerPCCPU *cpu)
+static void ppc_write_elf_fpregset(NoteFuncArg *arg, PowerPCCPU *cpu, int id)
{
int i;
struct PPCElfFpregset *fpregset;
@@ -146,7 +152,7 @@ static void ppc_write_elf_fpregset(NoteFuncArg *arg, PowerPCCPU *cpu)
fpregset->fpscr = cpu_to_dump_reg(s, cpu->env.fpscr);
}
-static void ppc_write_elf_vmxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
+static void ppc_write_elf_vmxregset(NoteFuncArg *arg, PowerPCCPU *cpu, int id)
{
int i;
struct PPCElfVmxregset *vmxregset;
@@ -178,7 +184,7 @@ static void ppc_write_elf_vmxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
vmxregset->vscr.u32[3] = cpu_to_dump32(s, ppc_get_vscr(&cpu->env));
}
-static void ppc_write_elf_vsxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
+static void ppc_write_elf_vsxregset(NoteFuncArg *arg, PowerPCCPU *cpu, int id)
{
int i;
struct PPCElfVsxregset *vsxregset;
@@ -195,7 +201,7 @@ static void ppc_write_elf_vsxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
}
}
-static void ppc_write_elf_speregset(NoteFuncArg *arg, PowerPCCPU *cpu)
+static void ppc_write_elf_speregset(NoteFuncArg *arg, PowerPCCPU *cpu, int id)
{
struct PPCElfSperegset *speregset;
Note *note = &arg->note;
@@ -211,7 +217,7 @@ static void ppc_write_elf_speregset(NoteFuncArg *arg, PowerPCCPU *cpu)
static const struct NoteFuncDescStruct {
int contents_size;
- void (*note_contents_func)(NoteFuncArg *arg, PowerPCCPU *cpu);
+ void (*note_contents_func)(NoteFuncArg *arg, PowerPCCPU *cpu, int id);
} note_func[] = {
{sizeof_field(Note, contents.prstatus), ppc_write_elf_prstatus},
{sizeof_field(Note, contents.fpregset), ppc_write_elf_fpregset},
@@ -282,7 +288,7 @@ static int ppc_write_all_elf_notes(const char *note_name,
arg.note.hdr.n_descsz = cpu_to_dump32(s, nf->contents_size);
strncpy(arg.note.name, note_name, sizeof(arg.note.name));
- (*nf->note_contents_func)(&arg, cpu);
+ (*nf->note_contents_func)(&arg, cpu, id);
note_size =
sizeof(arg.note) - sizeof(arg.note.contents) + nf->contents_size;
diff --git a/target/ppc/compat.c b/target/ppc/compat.c
index ebef2cc..55de3bd 100644
--- a/target/ppc/compat.c
+++ b/target/ppc/compat.c
@@ -18,10 +18,10 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/kvm.h"
+#include "system/hw_accel.h"
+#include "system/kvm.h"
#include "kvm_ppc.h"
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
@@ -100,6 +100,13 @@ static const CompatInfo compat_table[] = {
.pcr_level = PCR_COMPAT_3_10,
.max_vthreads = 8,
},
+ { /* POWER11, ISA3.10 */
+ .name = "power11",
+ .pvr = CPU_POWERPC_LOGICAL_3_10_P11,
+ .pcr = PCR_COMPAT_3_10,
+ .pcr_level = PCR_COMPAT_3_10,
+ .max_vthreads = 8,
+ },
};
static const CompatInfo *compat_by_pvr(uint32_t pvr)
@@ -132,6 +139,10 @@ static bool pcc_compat(PowerPCCPUClass *pcc, uint32_t compat_pvr,
/* Outside specified range */
return false;
}
+ if (compat->pvr > pcc->spapr_logical_pvr) {
+ /* Older CPU cannot support a newer processor's compat mode */
+ return false;
+ }
if (!(pcc->pcr_supported & compat->pcr_level)) {
/* Not supported by this CPU */
return false;
diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c
index f2301b4..ea86ea2 100644
--- a/target/ppc/cpu-models.c
+++ b/target/ppc/cpu-models.c
@@ -35,7 +35,7 @@
#define POWERPC_DEF_SVR(_name, _desc, _pvr, _svr, _type) \
static void \
glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_class_init) \
- (ObjectClass *oc, void *data) \
+ (ObjectClass *oc, const void *data) \
{ \
DeviceClass *dc = DEVICE_CLASS(oc); \
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); \
@@ -734,6 +734,8 @@
"POWER9 v2.2")
POWERPC_DEF("power10_v2.0", CPU_POWERPC_POWER10_DD20, POWER10,
"POWER10 v2.0")
+ POWERPC_DEF("power11_v2.0", CPU_POWERPC_POWER11_DD20, POWER11,
+ "POWER11_v2.0")
#endif /* defined (TARGET_PPC64) */
/***************************************************************************/
@@ -909,6 +911,7 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
{ "power8nvl", "power8nvl_v1.0" },
{ "power9", "power9_v2.2" },
{ "power10", "power10_v2.0" },
+ { "power11", "power11_v2.0" },
#endif
/* Generic PowerPCs */
diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h
index 0229ef3..72ad31b 100644
--- a/target/ppc/cpu-models.h
+++ b/target/ppc/cpu-models.h
@@ -354,6 +354,8 @@ enum {
CPU_POWERPC_POWER10_BASE = 0x00800000,
CPU_POWERPC_POWER10_DD1 = 0x00801100,
CPU_POWERPC_POWER10_DD20 = 0x00801200,
+ CPU_POWERPC_POWER11_BASE = 0x00820000,
+ CPU_POWERPC_POWER11_DD20 = 0x00821200,
CPU_POWERPC_970_v22 = 0x00390202,
CPU_POWERPC_970FX_v10 = 0x00391100,
CPU_POWERPC_970FX_v20 = 0x003C0200,
@@ -391,6 +393,7 @@ enum {
CPU_POWERPC_LOGICAL_2_07 = 0x0F000004,
CPU_POWERPC_LOGICAL_3_00 = 0x0F000005,
CPU_POWERPC_LOGICAL_3_10 = 0x0F000006,
+ CPU_POWERPC_LOGICAL_3_10_P11 = 0x0F000007,
};
/* System version register (used on MPC 8xxx) */
diff --git a/target/ppc/cpu-param.h b/target/ppc/cpu-param.h
index 77c5ed9..e4ed908 100644
--- a/target/ppc/cpu-param.h
+++ b/target/ppc/cpu-param.h
@@ -2,14 +2,13 @@
* PowerPC cpu parameters for qemu.
*
* Copyright (c) 2007 Jocelyn Mayer
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#ifndef PPC_CPU_PARAM_H
#define PPC_CPU_PARAM_H
#ifdef TARGET_PPC64
-# define TARGET_LONG_BITS 64
/*
* Note that the official physical address space bits is 62-M where M
* is implementation dependent. I've not looked up M for the set of
@@ -27,7 +26,6 @@
# define TARGET_VIRT_ADDR_SPACE_BITS 64
# endif
#else
-# define TARGET_LONG_BITS 32
# define TARGET_PHYS_ADDR_SPACE_BITS 36
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
@@ -35,11 +33,10 @@
#ifdef CONFIG_USER_ONLY
/* Allow user-only to vary page size from 4k */
# define TARGET_PAGE_BITS_VARY
-# define TARGET_PAGE_BITS_MIN 12
#else
# define TARGET_PAGE_BITS 12
#endif
-#define TCG_GUEST_DEFAULT_MO 0
+#define TARGET_INSN_START_EXTRA_WORDS 0
#endif
diff --git a/target/ppc/cpu.c b/target/ppc/cpu.c
index e3ad8e0..4d8faad 100644
--- a/target/ppc/cpu.c
+++ b/target/ppc/cpu.c
@@ -22,10 +22,11 @@
#include "cpu-models.h"
#include "cpu-qom.h"
#include "exec/log.h"
+#include "exec/watchpoint.h"
#include "fpu/softfloat-helpers.h"
#include "mmu-hash64.h"
#include "helper_regs.h"
-#include "sysemu/tcg.h"
+#include "system/tcg.h"
target_ulong cpu_read_xer(const CPUPPCState *env)
{
@@ -130,11 +131,13 @@ void ppc_store_ciabr(CPUPPCState *env, target_ulong val)
ppc_update_ciabr(env);
}
-void ppc_update_daw0(CPUPPCState *env)
+void ppc_update_daw(CPUPPCState *env, int rid)
{
CPUState *cs = env_cpu(env);
- target_ulong deaw = env->spr[SPR_DAWR0] & PPC_BITMASK(0, 60);
- uint32_t dawrx = env->spr[SPR_DAWRX0];
+ int spr_dawr = rid ? SPR_DAWR1 : SPR_DAWR0;
+ int spr_dawrx = rid ? SPR_DAWRX1 : SPR_DAWRX0;
+ target_ulong deaw = env->spr[spr_dawr] & PPC_BITMASK(0, 60);
+ uint32_t dawrx = env->spr[spr_dawrx];
int mrd = extract32(dawrx, PPC_BIT_NR(48), 54 - 48);
bool dw = extract32(dawrx, PPC_BIT_NR(57), 1);
bool dr = extract32(dawrx, PPC_BIT_NR(58), 1);
@@ -144,9 +147,9 @@ void ppc_update_daw0(CPUPPCState *env)
vaddr len;
int flags;
- if (env->dawr0_watchpoint) {
- cpu_watchpoint_remove_by_ref(cs, env->dawr0_watchpoint);
- env->dawr0_watchpoint = NULL;
+ if (env->dawr_watchpoint[rid]) {
+ cpu_watchpoint_remove_by_ref(cs, env->dawr_watchpoint[rid]);
+ env->dawr_watchpoint[rid] = NULL;
}
if (!dr && !dw) {
@@ -166,28 +169,45 @@ void ppc_update_daw0(CPUPPCState *env)
flags |= BP_MEM_WRITE;
}
- cpu_watchpoint_insert(cs, deaw, len, flags, &env->dawr0_watchpoint);
+ cpu_watchpoint_insert(cs, deaw, len, flags, &env->dawr_watchpoint[rid]);
}
void ppc_store_dawr0(CPUPPCState *env, target_ulong val)
{
env->spr[SPR_DAWR0] = val;
- ppc_update_daw0(env);
+ ppc_update_daw(env, 0);
}
-void ppc_store_dawrx0(CPUPPCState *env, uint32_t val)
+static void ppc_store_dawrx(CPUPPCState *env, uint32_t val, int rid)
{
int hrammc = extract32(val, PPC_BIT_NR(56), 1);
if (hrammc) {
/* This might be done with a second watchpoint at the xor of DEAW[0] */
- qemu_log_mask(LOG_UNIMP, "%s: DAWRX0[HRAMMC] is unimplemented\n",
- __func__);
+ qemu_log_mask(LOG_UNIMP, "%s: DAWRX%d[HRAMMC] is unimplemented\n",
+ __func__, rid);
}
- env->spr[SPR_DAWRX0] = val;
- ppc_update_daw0(env);
+ env->spr[rid ? SPR_DAWRX1 : SPR_DAWRX0] = val;
+ ppc_update_daw(env, rid);
+}
+
+void ppc_store_dawrx0(CPUPPCState *env, uint32_t val)
+{
+ ppc_store_dawrx(env, val, 0);
+}
+
+void ppc_store_dawr1(CPUPPCState *env, target_ulong val)
+{
+ env->spr[SPR_DAWR1] = val;
+ ppc_update_daw(env, 1);
+}
+
+void ppc_store_dawrx1(CPUPPCState *env, uint32_t val)
+{
+ ppc_store_dawrx(env, val, 1);
}
+
#endif
#endif
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 2015e60..6b90543 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -22,7 +22,9 @@
#include "qemu/int128.h"
#include "qemu/cpu-float.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "cpu-qom.h"
#include "qom/object.h"
#include "hw/registerfields.h"
@@ -40,6 +42,7 @@
#define PPC_BIT_NR(bit) (63 - (bit))
#define PPC_BIT(bit) (0x8000000000000000ULL >> (bit))
+#define PPC_BIT32_NR(bit) (31 - (bit))
#define PPC_BIT32(bit) (0x80000000 >> (bit))
#define PPC_BIT8(bit) (0x80 >> (bit))
#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
@@ -215,6 +218,8 @@ typedef enum powerpc_excp_t {
POWERPC_EXCP_POWER9,
/* POWER10 exception model */
POWERPC_EXCP_POWER10,
+ /* POWER11 exception model */
+ POWERPC_EXCP_POWER11,
} powerpc_excp_t;
/*****************************************************************************/
@@ -634,8 +639,8 @@ FIELD(MSR, LE, MSR_LE, 1)
#define PSSCR_EC PPC_BIT(43) /* Exit Criterion */
/* HFSCR bits */
-#define HFSCR_MSGP PPC_BIT(53) /* Privileged Message Send Facilities */
-#define HFSCR_BHRB PPC_BIT(59) /* BHRB Instructions */
+#define HFSCR_MSGP PPC_BIT_NR(53) /* Privileged Message Send Facilities */
+#define HFSCR_BHRB PPC_BIT_NR(59) /* BHRB Instructions */
#define HFSCR_IC_MSGP 0xA
#define DBCR0_ICMP (1 << 27)
@@ -1197,21 +1202,6 @@ DEXCR_ASPECT(NPHIE, 5)
DEXCR_ASPECT(PHIE, 6)
/*****************************************************************************/
-/* PowerNV ChipTOD and TimeBase State Machine */
-struct pnv_tod_tbst {
- int tb_ready_for_tod; /* core TB ready to receive TOD from chiptod */
- int tod_sent_to_tb; /* chiptod sent TOD to the core TB */
-
- /*
- * "Timers" for async TBST events are simulated by mfTFAC because TFAC
- * is polled for such events. These are just used to ensure firmware
- * performs the polling at least a few times.
- */
- int tb_state_timer;
- int tb_sync_pulse_timer;
-};
-
-/*****************************************************************************/
/* The whole PowerPC CPU context */
/*
@@ -1262,15 +1252,17 @@ struct CPUArchState {
/* when a memory exception occurs, the access type is stored here */
int access_type;
+ /* For SMT processors */
+ bool has_smt_siblings;
+ int core_index;
+ int chip_index;
+
#if !defined(CONFIG_USER_ONLY)
/* MMU context, only relevant for full system emulation */
#if defined(TARGET_PPC64)
ppc_slb_t slb[MAX_SLB_ENTRIES]; /* PowerPC 64 SLB area */
struct CPUBreakpoint *ciabr_breakpoint;
- struct CPUWatchpoint *dawr0_watchpoint;
-
- /* POWER CPU regs/state */
- target_ulong scratch[8]; /* SCRATCH registers (shared across core) */
+ struct CPUWatchpoint *dawr_watchpoint[2];
#endif
target_ulong sr[32]; /* segment registers */
uint32_t nb_BATs; /* number of BATs */
@@ -1291,12 +1283,6 @@ struct CPUArchState {
uint32_t tlb_need_flush; /* Delayed flush needed */
#define TLB_NEED_LOCAL_FLUSH 0x1
#define TLB_NEED_GLOBAL_FLUSH 0x2
-
-#if defined(TARGET_PPC64)
- /* PowerNV chiptod / timebase facility state. */
- /* Would be nice to put these into PnvCore */
- struct pnv_tod_tbst pnv_tod_tbst;
-#endif
#endif
/* Other registers */
@@ -1372,6 +1358,18 @@ struct CPUArchState {
* special way (such as routing some resume causes to 0x100, i.e. sreset).
*/
bool resume_as_sreset;
+
+ /*
+ * On powernv, quiesced means the CPU has been stopped using PC direct
+ * control xscom registers.
+ *
+ * On spapr, quiesced means it is in the "RTAS stopped" state.
+ *
+ * The core halted/stopped variables aren't sufficient for this, because
+ * they can be changed with various side-band operations like qmp cont,
+ * powersave interrupts, etc.
+ */
+ bool quiesced;
#endif
/* These resources are used only in TCG */
@@ -1426,12 +1424,12 @@ struct CPUArchState {
uint64_t pmu_base_time;
};
-#define _CORE_ID(cs) \
- (POWERPC_CPU(cs)->env.spr_cb[SPR_PIR].default_value & ~(cs->nr_threads - 1))
-
#define THREAD_SIBLING_FOREACH(cs, cs_sibling) \
CPU_FOREACH(cs_sibling) \
- if (_CORE_ID(cs) == _CORE_ID(cs_sibling))
+ if ((POWERPC_CPU(cs)->env.chip_index == \
+ POWERPC_CPU(cs_sibling)->env.chip_index) && \
+ (POWERPC_CPU(cs)->env.core_index == \
+ POWERPC_CPU(cs_sibling)->env.core_index))
#define SET_FIT_PERIOD(a_, b_, c_, d_) \
do { \
@@ -1476,16 +1474,6 @@ struct ArchCPU {
/* Those resources are used only during code translation */
/* opcode handlers */
opc_handler_t *opcodes[PPC_CPU_OPCODES_LEN];
-
- /* Fields related to migration compatibility hacks */
- bool pre_2_8_migration;
- target_ulong mig_msr_mask;
- uint64_t mig_insns_flags;
- uint64_t mig_insns_flags2;
- uint32_t mig_nb_BATs;
- bool pre_2_10_migration;
- bool pre_3_0_migration;
- int32_t mig_slb_nr;
};
/**
@@ -1504,6 +1492,7 @@ struct PowerPCCPUClass {
void (*parent_parse_features)(const char *type, char *str, Error **errp);
uint32_t pvr;
+ uint32_t spapr_logical_pvr;
/*
* If @best is false, match if pcc is in the family of pvr
* Else match only if pcc is the best match for pvr in this family.
@@ -1535,6 +1524,17 @@ struct PowerPCCPUClass {
int (*check_attn)(CPUPPCState *env);
};
+static inline bool ppc_cpu_core_single_threaded(CPUState *cs)
+{
+ return !POWERPC_CPU(cs)->env.has_smt_siblings;
+}
+
+static inline bool ppc_cpu_lpar_single_threaded(CPUState *cs)
+{
+ return !(POWERPC_CPU(cs)->env.flags & POWERPC_FLAG_SMT_1LPAR) ||
+ ppc_cpu_core_single_threaded(cs);
+}
+
ObjectClass *ppc_cpu_class_by_name(const char *name);
PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr);
PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr);
@@ -1594,20 +1594,22 @@ extern const VMStateDescription vmstate_ppc_cpu;
/*****************************************************************************/
void ppc_translate_init(void);
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
#if !defined(CONFIG_USER_ONLY)
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val);
void ppc_update_ciabr(CPUPPCState *env);
void ppc_store_ciabr(CPUPPCState *env, target_ulong value);
-void ppc_update_daw0(CPUPPCState *env);
+void ppc_update_daw(CPUPPCState *env, int rid);
void ppc_store_dawr0(CPUPPCState *env, target_ulong value);
void ppc_store_dawrx0(CPUPPCState *env, uint32_t value);
+void ppc_store_dawr1(CPUPPCState *env, target_ulong value);
+void ppc_store_dawrx1(CPUPPCState *env, uint32_t value);
#endif /* !defined(CONFIG_USER_ONLY) */
void ppc_store_msr(CPUPPCState *env, target_ulong value);
-void ppc_cpu_list(void);
-
/* Time-base and decrementer management */
uint64_t cpu_ppc_load_tbl(CPUPPCState *env);
uint32_t cpu_ppc_load_tbu(CPUPPCState *env);
@@ -1669,8 +1671,6 @@ static inline uint64_t ppc_dump_gpr(CPUPPCState *env, int gprn)
int ppc_dcr_read(ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp);
int ppc_dcr_write(ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
-#define cpu_list ppc_cpu_list
-
/* MMU modes definitions */
#define MMU_USER_IDX 0
static inline int ppc_env_mmu_index(CPUPPCState *env, bool ifetch)
@@ -1700,8 +1700,6 @@ void ppc_compat_add_property(Object *obj, const char *name,
uint32_t *compat_pvr, const char *basedesc);
#endif /* defined(TARGET_PPC64) */
-#include "exec/cpu-all.h"
-
/*****************************************************************************/
/* CRF definitions */
#define CRF_LT_BIT 3
@@ -2102,6 +2100,7 @@ void ppc_compat_add_property(Object *obj, const char *name,
#define SPR_VTB (0x351)
#define SPR_LDBAR (0x352)
#define SPR_MMCRC (0x353)
+#define SPR_PMSR (0x355)
#define SPR_PSSCR (0x357)
#define SPR_440_INV0 (0x370)
#define SPR_440_INV1 (0x371)
@@ -2109,8 +2108,10 @@ void ppc_compat_add_property(Object *obj, const char *name,
#define SPR_440_INV2 (0x372)
#define SPR_TRIG2 (0x372)
#define SPR_440_INV3 (0x373)
+#define SPR_PMCR (0x374)
#define SPR_440_ITV0 (0x374)
#define SPR_440_ITV1 (0x375)
+#define SPR_RWMR (0x375)
#define SPR_440_ITV2 (0x376)
#define SPR_440_ITV3 (0x377)
#define SPR_440_CCR1 (0x378)
@@ -2750,24 +2751,6 @@ void cpu_write_xer(CPUPPCState *env, target_ulong xer);
*/
#define is_book3s_arch2x(ctx) (!!((ctx)->insns_flags & PPC_SEGMENT_64B))
-#ifdef CONFIG_DEBUG_TCG
-void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags);
-#else
-static inline void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->nip;
- *cs_base = 0;
- *flags = env->hflags;
-}
-#endif
-
-G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception);
-G_NORETURN void raise_exception_ra(CPUPPCState *env, uint32_t exception,
- uintptr_t raddr);
-G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception,
- uint32_t error_code);
G_NORETURN void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
uint32_t error_code, uintptr_t raddr);
@@ -3051,7 +3034,8 @@ static inline int check_attn_none(CPUPPCState *env)
#define POWERPC_FAMILY(_name) \
static void \
- glue(glue(ppc_, _name), _cpu_family_class_init)(ObjectClass *, void *); \
+ glue(glue(ppc_, _name), _cpu_family_class_init)(ObjectClass *, \
+ const void *); \
\
static const TypeInfo \
glue(glue(ppc_, _name), _cpu_family_type_info) = { \
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index cdada79..a0e77f2 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -22,9 +22,9 @@
#include "qemu/osdep.h"
#include "disas/dis-asm.h"
#include "gdbstub/helpers.h"
-#include "sysemu/cpus.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/tcg.h"
+#include "system/cpus.h"
+#include "system/hw_accel.h"
+#include "system/tcg.h"
#include "cpu-models.h"
#include "mmu-hash32.h"
#include "mmu-hash64.h"
@@ -32,7 +32,7 @@
#include "qemu/module.h"
#include "qemu/qemu-print.h"
#include "qapi/error.h"
-#include "qapi/qmp/qnull.h"
+#include "qobject/qnull.h"
#include "qapi/visitor.h"
#include "hw/qdev-properties.h"
#include "hw/ppc/ppc.h"
@@ -40,18 +40,18 @@
#include "qemu/cutils.h"
#include "disas/capstone.h"
#include "fpu/softfloat.h"
-
+#include "exec/watchpoint.h"
#include "helper_regs.h"
#include "internal.h"
#include "spr_common.h"
#include "power8-pmu.h"
-
#ifndef CONFIG_USER_ONLY
#include "hw/boards.h"
#include "hw/intc/intc.h"
#include "kvm_ppc.h"
#endif
+#include "cpu_init.h"
/* #define PPC_DEBUG_SPR */
/* #define USE_APPLE_GDB */
@@ -921,6 +921,18 @@ static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask,
#endif
}
+static void register_atb_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_ATBL, "ATBL",
+ &spr_read_atbl, SPR_NOACCESS,
+ &spr_read_atbl, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_ATBU, "ATBU",
+ &spr_read_atbu, SPR_NOACCESS,
+ &spr_read_atbu, SPR_NOACCESS,
+ 0x00000000);
+}
+
/* SPR specific to PowerPC 440 implementation */
static void register_440_sprs(CPUPPCState *env)
{
@@ -2154,7 +2166,7 @@ static void init_proc_405(CPUPPCState *env)
SET_WDT_PERIOD(16, 20, 24, 28);
}
-POWERPC_FAMILY(405)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(405)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -2222,7 +2234,7 @@ static void init_proc_440EP(CPUPPCState *env)
SET_WDT_PERIOD(20, 24, 28, 32);
}
-POWERPC_FAMILY(440EP)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(440EP)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -2261,7 +2273,7 @@ POWERPC_FAMILY(440EP)(ObjectClass *oc, void *data)
POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
}
-POWERPC_FAMILY(460EX)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(460EX)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -2316,7 +2328,7 @@ static void init_proc_440GP(CPUPPCState *env)
SET_WDT_PERIOD(20, 24, 28, 32);
}
-POWERPC_FAMILY(440GP)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(440GP)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -2386,7 +2398,7 @@ static void init_proc_440x5(CPUPPCState *env)
SET_WDT_PERIOD(20, 24, 28, 32);
}
-POWERPC_FAMILY(440x5)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(440x5)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -2422,7 +2434,7 @@ POWERPC_FAMILY(440x5)(ObjectClass *oc, void *data)
POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
}
-POWERPC_FAMILY(440x5wDFPU)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(440x5wDFPU)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -2471,7 +2483,7 @@ static void init_proc_MPC5xx(CPUPPCState *env)
/* XXX: TODO: allocate internal IRQ controller */
}
-POWERPC_FAMILY(MPC5xx)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(MPC5xx)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -2514,7 +2526,7 @@ static void init_proc_MPC8xx(CPUPPCState *env)
/* XXX: TODO: allocate internal IRQ controller */
}
-POWERPC_FAMILY(MPC8xx)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(MPC8xx)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -2565,7 +2577,7 @@ static void init_proc_G2(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(G2)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(G2)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -2604,7 +2616,7 @@ POWERPC_FAMILY(G2)(ObjectClass *oc, void *data)
POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
}
-POWERPC_FAMILY(G2LE)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(G2LE)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -2731,14 +2743,6 @@ static void init_proc_e200(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000); /* TOFIX */
- spr_register(env, SPR_BOOKE_DSRR0, "DSRR0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- spr_register(env, SPR_BOOKE_DSRR1, "DSRR1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
init_tlbs_emb(env);
init_excp_e200(env, 0xFFFF0000UL);
@@ -2747,7 +2751,7 @@ static void init_proc_e200(CPUPPCState *env)
/* XXX: TODO: allocate internal IRQ controller */
}
-POWERPC_FAMILY(e200)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(e200)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -2910,6 +2914,11 @@ static void init_proc_e500(CPUPPCState *env, int version)
register_BookE206_sprs(env, 0x000000DF, tlbncfg, mmucfg);
register_usprgh_sprs(env);
+ if (version != fsl_e500v1) {
+ /* e500v1 has no support for alternate timebase */
+ register_atb_sprs(env);
+ }
+
spr_register(env, SPR_HID0, "HID0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -3035,7 +3044,7 @@ static void init_proc_e500v1(CPUPPCState *env)
init_proc_e500(env, fsl_e500v1);
}
-POWERPC_FAMILY(e500v1)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(e500v1)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3079,7 +3088,7 @@ static void init_proc_e500v2(CPUPPCState *env)
init_proc_e500(env, fsl_e500v2);
}
-POWERPC_FAMILY(e500v2)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(e500v2)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3123,7 +3132,7 @@ static void init_proc_e500mc(CPUPPCState *env)
init_proc_e500(env, fsl_e500mc);
}
-POWERPC_FAMILY(e500mc)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(e500mc)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3170,7 +3179,7 @@ static void init_proc_e5500(CPUPPCState *env)
init_proc_e500(env, fsl_e5500);
}
-POWERPC_FAMILY(e5500)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(e5500)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3219,7 +3228,7 @@ static void init_proc_e6500(CPUPPCState *env)
init_proc_e500(env, fsl_e6500);
}
-POWERPC_FAMILY(e6500)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(e6500)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3282,7 +3291,7 @@ static void init_proc_603(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(603)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(603)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3322,7 +3331,7 @@ POWERPC_FAMILY(603)(ObjectClass *oc, void *data)
POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
}
-POWERPC_FAMILY(603E)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(603E)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3368,7 +3377,7 @@ static void init_proc_e300(CPUPPCState *env)
register_e300_sprs(env);
}
-POWERPC_FAMILY(e300)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(e300)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3424,7 +3433,7 @@ static void init_proc_604(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(604)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(604)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3470,7 +3479,7 @@ static void init_proc_604E(CPUPPCState *env)
register_604e_sprs(env);
}
-POWERPC_FAMILY(604E)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(604E)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3527,7 +3536,7 @@ static void init_proc_740(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(740)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(740)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3593,7 +3602,7 @@ static void init_proc_750(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(750)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(750)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3740,7 +3749,7 @@ static void init_proc_750cl(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(750cl)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3848,7 +3857,7 @@ static void init_proc_750cx(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(750cx)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(750cx)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3921,7 +3930,7 @@ static void init_proc_750fx(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(750fx)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(750fx)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -3994,7 +4003,7 @@ static void init_proc_750gx(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(750gx)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(750gx)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -4054,7 +4063,7 @@ static void init_proc_745(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(745)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(745)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -4100,7 +4109,7 @@ static void init_proc_755(CPUPPCState *env)
register_755_sprs(env);
}
-POWERPC_FAMILY(755)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(755)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -4167,7 +4176,7 @@ static void init_proc_7400(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(7400)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(7400)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -4247,7 +4256,7 @@ static void init_proc_7410(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(7410)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(7410)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -4348,7 +4357,7 @@ static void init_proc_7440(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(7440)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(7440)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -4471,7 +4480,7 @@ static void init_proc_7450(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(7450)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(7450)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -4601,7 +4610,7 @@ static void init_proc_7445(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(7445)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(7445)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -4733,7 +4742,7 @@ static void init_proc_7455(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(7455)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(7455)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -4885,7 +4894,7 @@ static void init_proc_7457(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(7457)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(7457)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -5020,7 +5029,7 @@ static void init_proc_e600(CPUPPCState *env)
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(e600)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(e600)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -5171,6 +5180,20 @@ static void register_book3s_207_dbg_sprs(CPUPPCState *env)
KVM_REG_PPC_CIABR, 0x00000000);
}
+static void register_book3s_310_dbg_sprs(CPUPPCState *env)
+{
+ spr_register_kvm_hv(env, SPR_DAWR1, "DAWR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_dawr1,
+ KVM_REG_PPC_DAWR1, 0x00000000);
+ spr_register_kvm_hv(env, SPR_DAWRX1, "DAWRX1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_dawrx1,
+ KVM_REG_PPC_DAWRX1, 0x00000000);
+}
+
static void register_970_dbg_sprs(CPUPPCState *env)
{
/* Breakpoints */
@@ -5760,16 +5783,6 @@ static void register_power_common_book4_sprs(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_core_write_generic,
0x00000000);
- spr_register_hv(env, SPR_POWER_SPRC, "SPRC",
- SPR_NOACCESS, SPR_NOACCESS,
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_sprc,
- 0x00000000);
- spr_register_hv(env, SPR_POWER_SPRD, "SPRD",
- SPR_NOACCESS, SPR_NOACCESS,
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_sprd, &spr_write_sprd,
- 0x00000000);
#endif
}
@@ -5782,6 +5795,23 @@ static void register_power9_book4_sprs(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_WORT, 0);
+ spr_register_hv(env, SPR_RWMR, "RWMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ /* SPRC/SPRD exist in earlier CPUs but only tested on POWER9/10 */
+ spr_register_hv(env, SPR_POWER_SPRC, "SPRC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_sprc,
+ 0x00000000);
+ spr_register_hv(env, SPR_POWER_SPRD, "SPRD",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_sprd, &spr_write_sprd,
+ 0x00000000);
#endif
}
@@ -5873,22 +5903,22 @@ static void register_power10_hash_sprs(CPUPPCState *env)
((uint64_t)g_rand_int(rand) << 32) | (uint64_t)g_rand_int(rand);
g_rand_free(rand);
#endif
- spr_register(env, SPR_HASHKEYR, "HASHKEYR",
+ spr_register_kvm(env, SPR_HASHKEYR, "HASHKEYR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
- hashkeyr_initial_value);
- spr_register_hv(env, SPR_HASHPKEYR, "HASHPKEYR",
+ KVM_REG_PPC_HASHKEYR, hashkeyr_initial_value);
+ spr_register_kvm_hv(env, SPR_HASHPKEYR, "HASHPKEYR",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
- hashpkeyr_initial_value);
+ KVM_REG_PPC_HASHPKEYR, hashpkeyr_initial_value);
}
static void register_power10_dexcr_sprs(CPUPPCState *env)
{
- spr_register(env, SPR_DEXCR, "DEXCR",
+ spr_register_kvm(env, SPR_DEXCR, "DEXCR",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DEXCR,
0);
spr_register(env, SPR_UDEXCR, "UDEXCR",
@@ -5964,7 +5994,7 @@ static void init_proc_970(CPUPPCState *env)
ppc970_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(970)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(970)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -6039,7 +6069,7 @@ static void init_proc_power5plus(CPUPPCState *env)
ppc970_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(POWER5P)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -6145,13 +6175,14 @@ static bool ppc_pvr_match_power7(PowerPCCPUClass *pcc, uint32_t pvr, bool best)
return true;
}
-POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(POWER7)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
dc->fw_name = "PowerPC,POWER7";
dc->desc = "POWER7";
+ pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_2_06_PLUS;
pcc->pvr_match = ppc_pvr_match_power7;
pcc->pcr_mask = PCR_VEC_DIS | PCR_VSX_DIS | PCR_COMPAT_2_05;
pcc->pcr_supported = PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
@@ -6308,13 +6339,14 @@ static bool ppc_pvr_match_power8(PowerPCCPUClass *pcc, uint32_t pvr, bool best)
return true;
}
-POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(POWER8)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
dc->fw_name = "PowerPC,POWER8";
dc->desc = "POWER8";
+ pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_2_07;
pcc->pvr_match = ppc_pvr_match_power8;
pcc->pcr_mask = PCR_TM_DIS | PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
pcc->pcr_supported = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
@@ -6406,7 +6438,7 @@ static struct ppc_radix_page_info POWER9_radix_page_info = {
#endif /* CONFIG_USER_ONLY */
#define POWER9_BHRB_ENTRIES_LOG2 5
-static void init_proc_POWER9(CPUPPCState *env)
+static void register_power9_common_sprs(CPUPPCState *env)
{
/* Common Registers */
init_proc_book3s_common(env);
@@ -6425,7 +6457,6 @@ static void init_proc_POWER9(CPUPPCState *env)
register_power5p_ear_sprs(env);
register_power5p_tb_sprs(env);
register_power6_common_sprs(env);
- register_HEIR32_spr(env);
register_power6_dbg_sprs(env);
register_power7_common_sprs(env);
register_power8_tce_address_control_sprs(env);
@@ -6443,16 +6474,32 @@ static void init_proc_POWER9(CPUPPCState *env)
register_power8_rpr_sprs(env);
register_power9_mmu_sprs(env);
- /* POWER9 Specific registers */
- spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL,
- spr_read_generic, spr_write_generic,
- KVM_REG_PPC_TIDR, 0);
-
/* FIXME: Filter fields properly based on privilege level */
spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL,
spr_read_generic, spr_write_generic,
KVM_REG_PPC_PSSCR, 0);
+ spr_register_hv(env, SPR_PMSR, "PMSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_pmsr, SPR_NOACCESS,
+ 0);
+ spr_register_hv(env, SPR_PMCR, "PMCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pmcr,
+ PPC_BIT(63)); /* Version 1 (POWER9/10) */
+
+}
+
+static void init_proc_POWER9(CPUPPCState *env)
+{
+ register_power9_common_sprs(env);
+ register_HEIR32_spr(env);
+ /* POWER9 Specific registers */
+ spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL,
+ spr_read_generic, spr_write_generic,
+ KVM_REG_PPC_TIDR, 0);
/* env variables */
env->dcache_line_size = 128;
env->icache_line_size = 128;
@@ -6501,66 +6548,24 @@ static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr, bool best)
return false;
}
-POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(POWER9)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
dc->fw_name = "PowerPC,POWER9";
dc->desc = "POWER9";
+ pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_3_00;
pcc->pvr_match = ppc_pvr_match_power9;
- pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07;
- pcc->pcr_supported = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 |
- PCR_COMPAT_2_05;
+ pcc->pcr_mask = PPC_PCR_MASK_POWER9;
+ pcc->pcr_supported = PPC_PCR_SUPPORTED_POWER9;
pcc->init_proc = init_proc_POWER9;
pcc->check_pow = check_pow_nocheck;
pcc->check_attn = check_attn_hid0_power9;
- pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
- PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
- PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
- PPC_FLOAT_FRSQRTES |
- PPC_FLOAT_STFIWX |
- PPC_FLOAT_EXT |
- PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
- PPC_MEM_SYNC | PPC_MEM_EIEIO |
- PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
- PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC |
- PPC_SEGMENT_64B | PPC_SLBI |
- PPC_POPCNTB | PPC_POPCNTWD |
- PPC_CILDST;
- pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX |
- PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
- PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
- PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
- PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
- PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
- PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_MEM_LWSYNC |
- PPC2_BCDA_ISA206;
- pcc->msr_mask = (1ull << MSR_SF) |
- (1ull << MSR_HV) |
- (1ull << MSR_TM) |
- (1ull << MSR_VR) |
- (1ull << MSR_VSX) |
- (1ull << MSR_EE) |
- (1ull << MSR_PR) |
- (1ull << MSR_FP) |
- (1ull << MSR_ME) |
- (1ull << MSR_FE0) |
- (1ull << MSR_SE) |
- (1ull << MSR_DE) |
- (1ull << MSR_FE1) |
- (1ull << MSR_IR) |
- (1ull << MSR_DR) |
- (1ull << MSR_PMM) |
- (1ull << MSR_RI) |
- (1ull << MSR_LE);
- pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD |
- (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL |
- LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD |
- (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE |
- LPCR_DEE | LPCR_OEE))
- | LPCR_MER | LPCR_GTSE | LPCR_TC |
- LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE;
+ pcc->insns_flags = PPC_INSNS_FLAGS_POWER9;
+ pcc->insns_flags2 = PPC_INSNS_FLAGS2_POWER9;
+ pcc->msr_mask = PPC_MSR_MASK_POWER9;
+ pcc->lpcr_mask = PPC_LPCR_MASK_POWER9;
pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
pcc->mmu_model = POWERPC_MMU_3_00;
#if !defined(CONFIG_USER_ONLY)
@@ -6573,10 +6578,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
pcc->excp_model = POWERPC_EXCP_POWER9;
pcc->bus_model = PPC_FLAGS_INPUT_POWER9;
pcc->bfd_mach = bfd_mach_ppc64;
- pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
- POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
- POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
- POWERPC_FLAG_VSX | POWERPC_FLAG_TM | POWERPC_FLAG_SCV;
+ pcc->flags = POWERPC_FLAGS_POWER9;
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
}
@@ -6603,50 +6605,13 @@ static struct ppc_radix_page_info POWER10_radix_page_info = {
#define POWER10_BHRB_ENTRIES_LOG2 5
static void init_proc_POWER10(CPUPPCState *env)
{
- /* Common Registers */
- init_proc_book3s_common(env);
- register_book3s_207_dbg_sprs(env);
-
- /* Common TCG PMU */
- init_tcg_pmu_power8(env);
-
- /* POWER8 Specific Registers */
- register_book3s_ids_sprs(env);
- register_amr_sprs(env);
- register_iamr_sprs(env);
- register_book3s_purr_sprs(env);
- register_power5p_common_sprs(env);
- register_power5p_lpar_sprs(env);
- register_power5p_ear_sprs(env);
- register_power5p_tb_sprs(env);
- register_power6_common_sprs(env);
+ register_power9_common_sprs(env);
register_HEIR64_spr(env);
- register_power6_dbg_sprs(env);
- register_power7_common_sprs(env);
- register_power8_tce_address_control_sprs(env);
- register_power8_ids_sprs(env);
- register_power8_ebb_sprs(env);
- register_power8_fscr_sprs(env);
- register_power8_pmu_sup_sprs(env);
- register_power8_pmu_user_sprs(env);
- register_power8_tm_sprs(env);
- register_power8_pspb_sprs(env);
- register_power8_dpdes_sprs(env);
- register_vtb_sprs(env);
- register_power8_ic_sprs(env);
- register_power9_book4_sprs(env);
- register_power8_rpr_sprs(env);
- register_power9_mmu_sprs(env);
+ register_book3s_310_dbg_sprs(env);
register_power10_hash_sprs(env);
register_power10_dexcr_sprs(env);
register_power10_pmu_sup_sprs(env);
register_power10_pmu_user_sprs(env);
-
- /* FIXME: Filter fields properly based on privilege level */
- spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL,
- spr_read_generic, spr_write_generic,
- KVM_REG_PPC_PSSCR, 0);
-
/* env variables */
env->dcache_line_size = 128;
env->icache_line_size = 128;
@@ -6681,68 +6646,24 @@ static bool ppc_pvr_match_power10(PowerPCCPUClass *pcc, uint32_t pvr, bool best)
return false;
}
-POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(POWER10)(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
dc->fw_name = "PowerPC,POWER10";
dc->desc = "POWER10";
+ pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_3_10;
pcc->pvr_match = ppc_pvr_match_power10;
- pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07 |
- PCR_COMPAT_3_00;
- pcc->pcr_supported = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 |
- PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
+ pcc->pcr_mask = PPC_PCR_MASK_POWER10;
+ pcc->pcr_supported = PPC_PCR_SUPPORTED_POWER10;
pcc->init_proc = init_proc_POWER10;
pcc->check_pow = check_pow_nocheck;
pcc->check_attn = check_attn_hid0_power9;
- pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
- PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
- PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
- PPC_FLOAT_FRSQRTES |
- PPC_FLOAT_STFIWX |
- PPC_FLOAT_EXT |
- PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
- PPC_MEM_SYNC | PPC_MEM_EIEIO |
- PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
- PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC |
- PPC_SEGMENT_64B | PPC_SLBI |
- PPC_POPCNTB | PPC_POPCNTWD |
- PPC_CILDST;
- pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX |
- PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
- PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
- PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
- PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
- PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
- PPC2_ISA300 | PPC2_PRCNTL | PPC2_ISA310 |
- PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206;
- pcc->msr_mask = (1ull << MSR_SF) |
- (1ull << MSR_HV) |
- (1ull << MSR_VR) |
- (1ull << MSR_VSX) |
- (1ull << MSR_EE) |
- (1ull << MSR_PR) |
- (1ull << MSR_FP) |
- (1ull << MSR_ME) |
- (1ull << MSR_FE0) |
- (1ull << MSR_SE) |
- (1ull << MSR_DE) |
- (1ull << MSR_FE1) |
- (1ull << MSR_IR) |
- (1ull << MSR_DR) |
- (1ull << MSR_PMM) |
- (1ull << MSR_RI) |
- (1ull << MSR_LE);
- pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD |
- (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL |
- LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD |
- (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE |
- LPCR_DEE | LPCR_OEE))
- | LPCR_MER | LPCR_GTSE | LPCR_TC |
- LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE;
- /* DD2 adds an extra HAIL bit */
- pcc->lpcr_mask |= LPCR_HAIL;
+ pcc->insns_flags = PPC_INSNS_FLAGS_POWER10;
+ pcc->insns_flags2 = PPC_INSNS_FLAGS2_POWER10;
+ pcc->msr_mask = PPC_MSR_MASK_POWER10;
+ pcc->lpcr_mask = PPC_LPCR_MASK_POWER10;
pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
pcc->mmu_model = POWERPC_MMU_3_00;
@@ -6755,11 +6676,67 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
pcc->excp_model = POWERPC_EXCP_POWER10;
pcc->bus_model = PPC_FLAGS_INPUT_POWER9;
pcc->bfd_mach = bfd_mach_ppc64;
- pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
- POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
- POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
- POWERPC_FLAG_VSX | POWERPC_FLAG_SCV |
- POWERPC_FLAG_BHRB;
+ pcc->flags = POWERPC_FLAGS_POWER10;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x8000;
+}
+
+static void init_proc_POWER11(CPUPPCState *env)
+{
+ init_proc_POWER10(env);
+}
+
+static bool ppc_pvr_match_power11(PowerPCCPUClass *pcc, uint32_t pvr, bool best)
+{
+ uint32_t base = pvr & CPU_POWERPC_POWER_SERVER_MASK;
+ uint32_t pcc_base = pcc->pvr & CPU_POWERPC_POWER_SERVER_MASK;
+
+ if (!best && (base == CPU_POWERPC_POWER11_BASE)) {
+ return true;
+ }
+
+ if (base != pcc_base) {
+ return false;
+ }
+
+ if ((pvr & 0x0f00) == (pcc->pvr & 0x0f00)) {
+ return true;
+ }
+
+ return false;
+}
+
+POWERPC_FAMILY(POWER11)(ObjectClass *oc, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->fw_name = "PowerPC,POWER11";
+ dc->desc = "POWER11";
+ pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_3_10_P11;
+ pcc->pvr_match = ppc_pvr_match_power11;
+ pcc->pcr_mask = PPC_PCR_MASK_POWER11;
+ pcc->pcr_supported = PPC_PCR_SUPPORTED_POWER11;
+ pcc->init_proc = init_proc_POWER11;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->check_attn = check_attn_hid0_power9;
+ pcc->insns_flags = PPC_INSNS_FLAGS_POWER11;
+ pcc->insns_flags2 = PPC_INSNS_FLAGS2_POWER11;
+ pcc->msr_mask = PPC_MSR_MASK_POWER11;
+ pcc->lpcr_mask = PPC_LPCR_MASK_POWER11;
+
+ pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
+ pcc->mmu_model = POWERPC_MMU_3_00;
+#if !defined(CONFIG_USER_ONLY)
+ /* segment page size remain the same */
+ pcc->hash64_opts = &ppc_hash64_opts_POWER7;
+ pcc->radix_page_info = &POWER10_radix_page_info;
+ pcc->lrg_decr_bits = 56;
+#endif
+ pcc->excp_model = POWERPC_EXCP_POWER11;
+ pcc->bus_model = PPC_FLAGS_INPUT_POWER9;
+ pcc->bfd_mach = bfd_mach_ppc64;
+ pcc->flags = POWERPC_FLAGS_POWER11;
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
}
@@ -6785,7 +6762,8 @@ void cpu_ppc_set_1lpar(PowerPCCPU *cpu)
/*
* pseries SMT means "LPAR per core" mode, e.g., msgsndp is usable
- * between threads.
+ * between threads. powernv be in either mode, and it mostly affects
+ * supervisor visible registers and instructions.
*/
if (env->flags & POWERPC_FLAG_SMT) {
env->flags |= POWERPC_FLAG_SMT_1LPAR;
@@ -6975,7 +6953,7 @@ static void ppc_cpu_realize(DeviceState *dev, Error **errp)
pcc->parent_realize(dev, errp);
- if (env_cpu(env)->nr_threads > 1) {
+ if (!ppc_cpu_core_single_threaded(cs)) {
env->flags |= POWERPC_FLAG_SMT;
}
@@ -7103,7 +7081,7 @@ ObjectClass *ppc_cpu_class_by_name(const char *name)
if (strcmp(name, "max") == 0) {
MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
if (mc) {
- return object_class_by_name(mc->default_cpu_type);
+ return object_class_by_name(machine_class_default_cpu_type(mc));
}
}
#endif
@@ -7136,7 +7114,7 @@ PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc)
}
/* Sort by PVR, ordering special case "host" last. */
-static gint ppc_cpu_list_compare(gconstpointer a, gconstpointer b)
+static gint ppc_cpu_list_compare(gconstpointer a, gconstpointer b, gpointer d)
{
ObjectClass *oc_a = (ObjectClass *)a;
ObjectClass *oc_b = (ObjectClass *)b;
@@ -7198,13 +7176,13 @@ static void ppc_cpu_list_entry(gpointer data, gpointer user_data)
g_free(name);
}
-void ppc_cpu_list(void)
+static void ppc_cpu_list(void)
{
GSList *list;
qemu_printf("Available CPUs:\n");
list = object_class_get_list(TYPE_POWERPC_CPU, false);
- list = g_slist_sort(list, ppc_cpu_list_compare);
+ list = g_slist_sort_with_data(list, ppc_cpu_list_compare, NULL);
g_slist_foreach(list, ppc_cpu_list_entry, NULL);
g_slist_free(list);
@@ -7237,17 +7215,19 @@ static void ppc_restore_state_to_opc(CPUState *cs,
cpu->env.nip = data[0];
}
-#endif /* CONFIG_TCG */
-static bool ppc_cpu_has_work(CPUState *cs)
+static int ppc_cpu_mmu_index(CPUState *cs, bool ifetch)
{
- return cs->interrupt_request & CPU_INTERRUPT_HARD;
+ return ppc_env_mmu_index(cpu_env(cs), ifetch);
}
+#endif /* CONFIG_TCG */
-static int ppc_cpu_mmu_index(CPUState *cs, bool ifetch)
+#ifndef CONFIG_USER_ONLY
+static bool ppc_cpu_has_work(CPUState *cs)
{
- return ppc_env_mmu_index(cpu_env(cs), ifetch);
+ return cs->interrupt_request & CPU_INTERRUPT_HARD;
}
+#endif /* !CONFIG_USER_ONLY */
static void ppc_cpu_reset_hold(Object *obj, ResetType type)
{
@@ -7324,6 +7304,36 @@ static void ppc_cpu_reset_hold(Object *obj, ResetType type)
/* tininess for underflow is detected before rounding */
set_float_detect_tininess(float_tininess_before_rounding,
&env->fp_status);
+ /* Similarly for flush-to-zero */
+ set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status);
+
+ /*
+ * PowerPC propagation rules:
+ * 1. A if it sNaN or qNaN
+ * 2. B if it sNaN or qNaN
+ * A signaling NaN is always silenced before returning it.
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_ab, &env->fp_status);
+ set_float_2nan_prop_rule(float_2nan_prop_ab, &env->vec_status);
+ /*
+ * NaN propagation for fused multiply-add:
+ * if fRA is a NaN return it; otherwise if fRB is a NaN return it;
+ * otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB
+ * whereas QEMU labels the operands as (a * b) + c.
+ */
+ set_float_3nan_prop_rule(float_3nan_prop_acb, &env->fp_status);
+ set_float_3nan_prop_rule(float_3nan_prop_acb, &env->vec_status);
+ /*
+ * For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer
+ * to return an input NaN if we have one (ie c) rather than generating
+ * a default NaN
+ */
+ set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
+ set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->vec_status);
+
+ /* Default NaN: sign bit clear, set frac msb */
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
+ set_float_default_nan_pattern(0b01000000, &env->vec_status);
for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) {
ppc_spr_t *spr = &env->spr_cb[i];
@@ -7376,6 +7386,12 @@ static void ppc_cpu_exec_exit(CPUState *cs)
cpu->vhyp_class->cpu_exec_exit(cpu->vhyp, cpu);
}
}
+
+static vaddr ppc_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return (cpu_env(cs)->hflags >> HFLAGS_64) & 1 ? result : (uint32_t)result;
+}
#endif /* CONFIG_TCG */
#endif /* !CONFIG_USER_ONLY */
@@ -7433,6 +7449,8 @@ static void ppc_disas_set_info(CPUState *cs, disassemble_info *info)
if ((env->hflags >> MSR_LE) & 1) {
info->endian = BFD_ENDIAN_LITTLE;
+ } else {
+ info->endian = BFD_ENDIAN_BIG;
}
info->mach = env->bfd_mach;
if (!env->bfd_mach) {
@@ -7449,19 +7467,11 @@ static void ppc_disas_set_info(CPUState *cs, disassemble_info *info)
#endif
}
-static Property ppc_cpu_properties[] = {
- DEFINE_PROP_BOOL("pre-2.8-migration", PowerPCCPU, pre_2_8_migration, false),
- DEFINE_PROP_BOOL("pre-2.10-migration", PowerPCCPU, pre_2_10_migration,
- false),
- DEFINE_PROP_BOOL("pre-3.0-migration", PowerPCCPU, pre_3_0_migration,
- false),
- DEFINE_PROP_END_OF_LIST(),
-};
-
#ifndef CONFIG_USER_ONLY
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps ppc_sysemu_ops = {
+ .has_work = ppc_cpu_has_work,
.get_phys_page_debug = ppc_cpu_get_phys_page_debug,
.write_elf32_note = ppc32_cpu_write_elf32_note,
.write_elf64_note = ppc64_cpu_write_elf64_note,
@@ -7471,18 +7481,25 @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
#endif
#ifdef CONFIG_TCG
-#include "hw/core/tcg-cpu-ops.h"
+#include "accel/tcg/cpu-ops.h"
static const TCGCPUOps ppc_tcg_ops = {
+ .mttcg_supported = TARGET_LONG_BITS == 64,
+ .guest_default_memory_order = 0,
.initialize = ppc_translate_init,
+ .translate_code = ppc_translate_code,
+ .get_tb_cpu_state = ppc_get_tb_cpu_state,
.restore_state_to_opc = ppc_restore_state_to_opc,
+ .mmu_index = ppc_cpu_mmu_index,
#ifdef CONFIG_USER_ONLY
.record_sigsegv = ppc_cpu_record_sigsegv,
#else
.tlb_fill = ppc_cpu_tlb_fill,
+ .pointer_wrap = ppc_pointer_wrap,
.cpu_exec_interrupt = ppc_cpu_exec_interrupt,
.cpu_exec_halt = ppc_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = ppc_cpu_do_interrupt,
.cpu_exec_enter = ppc_cpu_exec_enter,
.cpu_exec_exit = ppc_cpu_exec_exit,
@@ -7495,7 +7512,7 @@ static const TCGCPUOps ppc_tcg_ops = {
};
#endif /* CONFIG_TCG */
-static void ppc_cpu_class_init(ObjectClass *oc, void *data)
+static void ppc_cpu_class_init(ObjectClass *oc, const void *data)
{
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
@@ -7507,14 +7524,12 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
device_class_set_parent_unrealize(dc, ppc_cpu_unrealize,
&pcc->parent_unrealize);
pcc->pvr_match = ppc_pvr_match_default;
- device_class_set_props(dc, ppc_cpu_properties);
resettable_class_set_parent_phases(rc, NULL, ppc_cpu_reset_hold, NULL,
&pcc->parent_phases);
cc->class_by_name = ppc_cpu_class_by_name;
- cc->has_work = ppc_cpu_has_work;
- cc->mmu_index = ppc_cpu_mmu_index;
+ cc->list_cpus = ppc_cpu_list;
cc->dump_state = ppc_cpu_dump_state;
cc->set_pc = ppc_cpu_set_pc;
cc->get_pc = ppc_cpu_get_pc;
@@ -7563,7 +7578,7 @@ static const TypeInfo ppc_cpu_type_info = {
.class_size = sizeof(PowerPCCPUClass),
.class_init = ppc_cpu_class_init,
#ifndef CONFIG_USER_ONLY
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_INTERRUPT_STATS_PROVIDER },
{ }
},
diff --git a/target/ppc/cpu_init.h b/target/ppc/cpu_init.h
new file mode 100644
index 0000000..f8fd6ff
--- /dev/null
+++ b/target/ppc/cpu_init.h
@@ -0,0 +1,91 @@
+#ifndef TARGET_PPC_CPU_INIT_H
+#define TARGET_PPC_CPU_INIT_H
+
+#define PPC_INSNS_FLAGS_POWER9 \
+ (PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | \
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | \
+ PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | \
+ PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | \
+ PPC_MEM_TLBSYNC | PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | \
+ PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | \
+ PPC_CILDST)
+
+#define PPC_INSNS_FLAGS_POWER10 PPC_INSNS_FLAGS_POWER9
+#define PPC_INSNS_FLAGS_POWER11 PPC_INSNS_FLAGS_POWER10
+
+#define PPC_INSNS_FLAGS2_POWER_COMMON \
+ (PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | \
+ PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | \
+ PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | \
+ PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | \
+ PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_ISA300 | PPC2_PRCNTL | \
+ PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206)
+
+#define PPC_INSNS_FLAGS2_POWER9 \
+ (PPC_INSNS_FLAGS2_POWER_COMMON | PPC2_TM)
+#define PPC_INSNS_FLAGS2_POWER10 \
+ (PPC_INSNS_FLAGS2_POWER_COMMON | PPC2_ISA310)
+#define PPC_INSNS_FLAGS2_POWER11 PPC_INSNS_FLAGS2_POWER10
+
+#define PPC_MSR_MASK_POWER_COMMON \
+ ((1ull << MSR_SF) | \
+ (1ull << MSR_HV) | \
+ (1ull << MSR_VR) | \
+ (1ull << MSR_VSX) | \
+ (1ull << MSR_EE) | \
+ (1ull << MSR_PR) | \
+ (1ull << MSR_FP) | \
+ (1ull << MSR_ME) | \
+ (1ull << MSR_FE0) | \
+ (1ull << MSR_SE) | \
+ (1ull << MSR_DE) | \
+ (1ull << MSR_FE1) | \
+ (1ull << MSR_IR) | \
+ (1ull << MSR_DR) | \
+ (1ull << MSR_PMM) | \
+ (1ull << MSR_RI) | \
+ (1ull << MSR_LE))
+
+#define PPC_MSR_MASK_POWER9 \
+ (PPC_MSR_MASK_POWER_COMMON | (1ull << MSR_TM))
+#define PPC_MSR_MASK_POWER10 \
+ PPC_MSR_MASK_POWER_COMMON
+#define PPC_MSR_MASK_POWER11 PPC_MSR_MASK_POWER10
+
+#define PPC_PCR_MASK_POWER9 \
+ (PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07)
+#define PPC_PCR_MASK_POWER10 \
+ (PPC_PCR_MASK_POWER9 | PCR_COMPAT_3_00)
+#define PPC_PCR_MASK_POWER11 PPC_PCR_MASK_POWER10
+
+#define PPC_PCR_SUPPORTED_POWER9 \
+ (PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05)
+#define PPC_PCR_SUPPORTED_POWER10 \
+ (PPC_PCR_SUPPORTED_POWER9 | PCR_COMPAT_3_10)
+#define PPC_PCR_SUPPORTED_POWER11 PPC_PCR_SUPPORTED_POWER10
+
+#define PPC_LPCR_MASK_POWER9 \
+ (LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | \
+ (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | \
+ LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD | \
+ (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | \
+ LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC | \
+ LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE)
+/* DD2 adds an extra HAIL bit */
+#define PPC_LPCR_MASK_POWER10 \
+ (PPC_LPCR_MASK_POWER9 | LPCR_HAIL)
+#define PPC_LPCR_MASK_POWER11 PPC_LPCR_MASK_POWER10
+
+#define POWERPC_FLAGS_POWER_COMMON \
+ (POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | \
+ POWERPC_FLAG_VSX | POWERPC_FLAG_SCV)
+
+#define POWERPC_FLAGS_POWER9 \
+ (POWERPC_FLAGS_POWER_COMMON | POWERPC_FLAG_TM)
+#define POWERPC_FLAGS_POWER10 \
+ (POWERPC_FLAGS_POWER_COMMON | POWERPC_FLAG_BHRB)
+#define POWERPC_FLAGS_POWER11 POWERPC_FLAGS_POWER10
+
+#endif /* TARGET_PPC_CPU_INIT_H */
diff --git a/target/ppc/dfp_helper.c b/target/ppc/dfp_helper.c
index 5967ea0..ecc3f79 100644
--- a/target/ppc/dfp_helper.c
+++ b/target/ppc/dfp_helper.c
@@ -249,7 +249,7 @@ static void dfp_set_FPRF_from_FRT_with_context(struct PPC_DFP *dfp,
fprf = 0x05;
break;
default:
- assert(0); /* should never get here */
+ g_assert_not_reached();
}
dfp->env->fpscr &= ~FP_FPRF;
dfp->env->fpscr |= (fprf << FPSCR_FPRF);
@@ -1243,7 +1243,7 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \
} else if (decNumberIsQNaN(&dfp.b)) { \
vt.VsrD(1) = -2; \
} else { \
- assert(0); \
+ g_assert_not_reached(); \
} \
set_dfp64(t, &vt); \
} else { \
@@ -1252,7 +1252,7 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \
} else if ((size) == 128) { \
vt.VsrD(1) = dfp.b.exponent + 6176; \
} else { \
- assert(0); \
+ g_assert_not_reached(); \
} \
set_dfp64(t, &vt); \
} \
@@ -1300,7 +1300,7 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
raw_inf = 0x1e000; \
bias = 6176; \
} else { \
- assert(0); \
+ g_assert_not_reached(); \
} \
\
if (unlikely((exp < 0) || (exp > max_exp))) { \
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index 0cd5426..1efdc40 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -19,22 +19,17 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "qemu/log.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/runstate.h"
+#include "system/memory.h"
+#include "system/tcg.h"
+#include "system/system.h"
+#include "system/runstate.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "internal.h"
#include "helper_regs.h"
#include "hw/ppc/ppc.h"
#include "trace.h"
-#ifdef CONFIG_TCG
-#include "sysemu/tcg.h"
-#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
-#endif
-
/*****************************************************************************/
/* Exception processing */
#ifndef CONFIG_USER_ONLY
@@ -136,27 +131,6 @@ static void dump_hcall(CPUPPCState *env)
env->nip);
}
-#ifdef CONFIG_TCG
-/* Return true iff byteswap is needed to load instruction */
-static inline bool insn_need_byteswap(CPUArchState *env)
-{
- /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */
- return !!(env->msr & ((target_ulong)1 << MSR_LE));
-}
-
-static uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr)
-{
- uint32_t insn = cpu_ldl_code(env, addr);
-
- if (insn_need_byteswap(env)) {
- insn = bswap32(insn);
- }
-
- return insn;
-}
-
-#endif
-
static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp)
{
const char *es;
@@ -324,10 +298,7 @@ static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr,
}
ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
- if (ail == 0) {
- return;
- }
- if (ail == 1) {
+ if (ail == 0 || ail == 1) {
/* AIL=1 is reserved, treat it like AIL=0 */
return;
}
@@ -351,10 +322,7 @@ static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr,
} else {
ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
}
- if (ail == 0) {
- return;
- }
- if (ail == 1 || ail == 2) {
+ if (ail == 0 || ail == 1 || ail == 2) {
/* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
return;
}
@@ -426,57 +394,14 @@ static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector,
env->reserve_addr = -1;
}
-#ifdef CONFIG_TCG
-/*
- * This stops the machine and logs CPU state without killing QEMU (like
- * cpu_abort()) because it is often a guest error as opposed to a QEMU error,
- * so the machine can still be debugged.
- */
-static G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason)
-{
- CPUState *cs = env_cpu(env);
- FILE *f;
-
- f = qemu_log_trylock();
- if (f) {
- fprintf(f, "Entering checkstop state: %s\n", reason);
- cpu_dump_state(cs, f, CPU_DUMP_FPU | CPU_DUMP_CCOP);
- qemu_log_unlock(f);
- }
-
- /*
- * This stops the machine and logs CPU state without killing QEMU
- * (like cpu_abort()) so the machine can still be debugged (because
- * it is often a guest error).
- */
- qemu_system_guest_panicked(NULL);
- cpu_loop_exit_noexc(cs);
-}
-
-#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
-void helper_attn(CPUPPCState *env)
-{
- /* POWER attn is unprivileged when enabled by HID, otherwise illegal */
- if ((*env->check_attn)(env)) {
- powerpc_checkstop(env, "host executed attn");
- } else {
- raise_exception_err(env, POWERPC_EXCP_HV_EMU,
- POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
- }
-}
-#endif
-#endif /* CONFIG_TCG */
-
static void powerpc_mcheck_checkstop(CPUPPCState *env)
{
/* KVM guests always have MSR[ME] enabled */
-#ifdef CONFIG_TCG
if (FIELD_EX64(env->msr, MSR, ME)) {
return;
}
-
+ assert(tcg_enabled());
powerpc_checkstop(env, "machine check with MSR[ME]=0");
-#endif
}
static void powerpc_excp_40x(PowerPCCPU *cpu, int excp)
@@ -1626,7 +1551,7 @@ static inline void powerpc_excp_books(PowerPCCPU *cpu, int excp)
}
#endif /* TARGET_PPC64 */
-static void powerpc_excp(PowerPCCPU *cpu, int excp)
+void powerpc_excp(PowerPCCPU *cpu, int excp)
{
CPUPPCState *env = &cpu->env;
@@ -1661,6 +1586,7 @@ static void powerpc_excp(PowerPCCPU *cpu, int excp)
case POWERPC_EXCP_POWER8:
case POWERPC_EXCP_POWER9:
case POWERPC_EXCP_POWER10:
+ case POWERPC_EXCP_POWER11:
powerpc_excp_books(cpu, excp);
break;
default:
@@ -1682,51 +1608,54 @@ void ppc_cpu_do_interrupt(CPUState *cs)
PPC_INTERRUPT_PIT | PPC_INTERRUPT_DOORBELL | PPC_INTERRUPT_HDOORBELL | \
PPC_INTERRUPT_THERM | PPC_INTERRUPT_EBB)
-static int p7_interrupt_powersave(CPUPPCState *env)
+static int p7_interrupt_powersave(uint32_t pending_interrupts,
+ target_ulong lpcr)
{
- if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
- (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) {
+ if ((pending_interrupts & PPC_INTERRUPT_EXT) &&
+ (lpcr & LPCR_P7_PECE0)) {
return PPC_INTERRUPT_EXT;
}
- if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
- (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) {
+ if ((pending_interrupts & PPC_INTERRUPT_DECR) &&
+ (lpcr & LPCR_P7_PECE1)) {
return PPC_INTERRUPT_DECR;
}
- if ((env->pending_interrupts & PPC_INTERRUPT_MCK) &&
- (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
+ if ((pending_interrupts & PPC_INTERRUPT_MCK) &&
+ (lpcr & LPCR_P7_PECE2)) {
return PPC_INTERRUPT_MCK;
}
- if ((env->pending_interrupts & PPC_INTERRUPT_HMI) &&
- (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
+ if ((pending_interrupts & PPC_INTERRUPT_HMI) &&
+ (lpcr & LPCR_P7_PECE2)) {
return PPC_INTERRUPT_HMI;
}
- if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
+ if (pending_interrupts & PPC_INTERRUPT_RESET) {
return PPC_INTERRUPT_RESET;
}
return 0;
}
-static int p7_next_unmasked_interrupt(CPUPPCState *env)
+static int p7_next_unmasked_interrupt(CPUPPCState *env,
+ uint32_t pending_interrupts,
+ target_ulong lpcr)
{
CPUState *cs = env_cpu(env);
/* Ignore MSR[EE] when coming out of some power management states */
bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
- assert((env->pending_interrupts & P7_UNUSED_INTERRUPTS) == 0);
+ assert((pending_interrupts & P7_UNUSED_INTERRUPTS) == 0);
if (cs->halted) {
/* LPCR[PECE] controls which interrupts can exit power-saving mode */
- return p7_interrupt_powersave(env);
+ return p7_interrupt_powersave(pending_interrupts, lpcr);
}
/* Machine check exception */
- if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
+ if (pending_interrupts & PPC_INTERRUPT_MCK) {
return PPC_INTERRUPT_MCK;
}
/* Hypervisor decrementer exception */
- if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
+ if (pending_interrupts & PPC_INTERRUPT_HDECR) {
/* LPCR will be clear when not supported so this will work */
bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
@@ -1736,9 +1665,9 @@ static int p7_next_unmasked_interrupt(CPUPPCState *env)
}
/* External interrupt can ignore MSR:EE under some circumstances */
- if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
- bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
- bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
+ if (pending_interrupts & PPC_INTERRUPT_EXT) {
+ bool lpes0 = !!(lpcr & LPCR_LPES0);
+ bool heic = !!(lpcr & LPCR_HEIC);
/* HEIC blocks delivery to the hypervisor */
if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
!FIELD_EX64(env->msr, MSR, PR))) ||
@@ -1748,10 +1677,10 @@ static int p7_next_unmasked_interrupt(CPUPPCState *env)
}
if (msr_ee != 0) {
/* Decrementer exception */
- if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
+ if (pending_interrupts & PPC_INTERRUPT_DECR) {
return PPC_INTERRUPT_DECR;
}
- if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
+ if (pending_interrupts & PPC_INTERRUPT_PERFM) {
return PPC_INTERRUPT_PERFM;
}
}
@@ -1764,39 +1693,42 @@ static int p7_next_unmasked_interrupt(CPUPPCState *env)
PPC_INTERRUPT_CEXT | PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | \
PPC_INTERRUPT_FIT | PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM)
-static int p8_interrupt_powersave(CPUPPCState *env)
+static int p8_interrupt_powersave(uint32_t pending_interrupts,
+ target_ulong lpcr)
{
- if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
- (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) {
+ if ((pending_interrupts & PPC_INTERRUPT_EXT) &&
+ (lpcr & LPCR_P8_PECE2)) {
return PPC_INTERRUPT_EXT;
}
- if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
- (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) {
+ if ((pending_interrupts & PPC_INTERRUPT_DECR) &&
+ (lpcr & LPCR_P8_PECE3)) {
return PPC_INTERRUPT_DECR;
}
- if ((env->pending_interrupts & PPC_INTERRUPT_MCK) &&
- (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
+ if ((pending_interrupts & PPC_INTERRUPT_MCK) &&
+ (lpcr & LPCR_P8_PECE4)) {
return PPC_INTERRUPT_MCK;
}
- if ((env->pending_interrupts & PPC_INTERRUPT_HMI) &&
- (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
+ if ((pending_interrupts & PPC_INTERRUPT_HMI) &&
+ (lpcr & LPCR_P8_PECE4)) {
return PPC_INTERRUPT_HMI;
}
- if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
- (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) {
+ if ((pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
+ (lpcr & LPCR_P8_PECE0)) {
return PPC_INTERRUPT_DOORBELL;
}
- if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
- (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) {
+ if ((pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
+ (lpcr & LPCR_P8_PECE1)) {
return PPC_INTERRUPT_HDOORBELL;
}
- if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
+ if (pending_interrupts & PPC_INTERRUPT_RESET) {
return PPC_INTERRUPT_RESET;
}
return 0;
}
-static int p8_next_unmasked_interrupt(CPUPPCState *env)
+static int p8_next_unmasked_interrupt(CPUPPCState *env,
+ uint32_t pending_interrupts,
+ target_ulong lpcr)
{
CPUState *cs = env_cpu(env);
@@ -1807,18 +1739,18 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env)
if (cs->halted) {
/* LPCR[PECE] controls which interrupts can exit power-saving mode */
- return p8_interrupt_powersave(env);
+ return p8_interrupt_powersave(pending_interrupts, lpcr);
}
/* Machine check exception */
- if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
+ if (pending_interrupts & PPC_INTERRUPT_MCK) {
return PPC_INTERRUPT_MCK;
}
/* Hypervisor decrementer exception */
- if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
+ if (pending_interrupts & PPC_INTERRUPT_HDECR) {
/* LPCR will be clear when not supported so this will work */
- bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
+ bool hdice = !!(lpcr & LPCR_HDICE);
if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
/* HDEC clears on delivery */
return PPC_INTERRUPT_HDECR;
@@ -1826,9 +1758,9 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env)
}
/* External interrupt can ignore MSR:EE under some circumstances */
- if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
- bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
- bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
+ if (pending_interrupts & PPC_INTERRUPT_EXT) {
+ bool lpes0 = !!(lpcr & LPCR_LPES0);
+ bool heic = !!(lpcr & LPCR_HEIC);
/* HEIC blocks delivery to the hypervisor */
if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
!FIELD_EX64(env->msr, MSR, PR))) ||
@@ -1838,20 +1770,20 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env)
}
if (msr_ee != 0) {
/* Decrementer exception */
- if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
+ if (pending_interrupts & PPC_INTERRUPT_DECR) {
return PPC_INTERRUPT_DECR;
}
- if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
+ if (pending_interrupts & PPC_INTERRUPT_DOORBELL) {
return PPC_INTERRUPT_DOORBELL;
}
- if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
+ if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
return PPC_INTERRUPT_HDOORBELL;
}
- if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
+ if (pending_interrupts & PPC_INTERRUPT_PERFM) {
return PPC_INTERRUPT_PERFM;
}
/* EBB exception */
- if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
+ if (pending_interrupts & PPC_INTERRUPT_EBB) {
/*
* EBB exception must be taken in problem state and
* with BESCR_GE set.
@@ -1871,60 +1803,65 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env)
PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \
PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM)
-static int p9_interrupt_powersave(CPUPPCState *env)
+static int p9_interrupt_powersave(CPUPPCState *env,
+ uint32_t pending_interrupts,
+ target_ulong lpcr)
{
+
/* External Exception */
- if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
- (env->spr[SPR_LPCR] & LPCR_EEE)) {
- bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
+ if ((pending_interrupts & PPC_INTERRUPT_EXT) &&
+ (lpcr & LPCR_EEE)) {
+ bool heic = !!(lpcr & LPCR_HEIC);
if (!heic || !FIELD_EX64_HV(env->msr) ||
FIELD_EX64(env->msr, MSR, PR)) {
return PPC_INTERRUPT_EXT;
}
}
/* Decrementer Exception */
- if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
- (env->spr[SPR_LPCR] & LPCR_DEE)) {
+ if ((pending_interrupts & PPC_INTERRUPT_DECR) &&
+ (lpcr & LPCR_DEE)) {
return PPC_INTERRUPT_DECR;
}
/* Machine Check or Hypervisor Maintenance Exception */
- if (env->spr[SPR_LPCR] & LPCR_OEE) {
- if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
+ if (lpcr & LPCR_OEE) {
+ if (pending_interrupts & PPC_INTERRUPT_MCK) {
return PPC_INTERRUPT_MCK;
}
- if (env->pending_interrupts & PPC_INTERRUPT_HMI) {
+ if (pending_interrupts & PPC_INTERRUPT_HMI) {
return PPC_INTERRUPT_HMI;
}
}
/* Privileged Doorbell Exception */
- if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
- (env->spr[SPR_LPCR] & LPCR_PDEE)) {
+ if ((pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
+ (lpcr & LPCR_PDEE)) {
return PPC_INTERRUPT_DOORBELL;
}
/* Hypervisor Doorbell Exception */
- if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
- (env->spr[SPR_LPCR] & LPCR_HDEE)) {
+ if ((pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
+ (lpcr & LPCR_HDEE)) {
return PPC_INTERRUPT_HDOORBELL;
}
/* Hypervisor virtualization exception */
- if ((env->pending_interrupts & PPC_INTERRUPT_HVIRT) &&
- (env->spr[SPR_LPCR] & LPCR_HVEE)) {
+ if ((pending_interrupts & PPC_INTERRUPT_HVIRT) &&
+ (lpcr & LPCR_HVEE)) {
return PPC_INTERRUPT_HVIRT;
}
- if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
+ if (pending_interrupts & PPC_INTERRUPT_RESET) {
return PPC_INTERRUPT_RESET;
}
return 0;
}
-static int p9_next_unmasked_interrupt(CPUPPCState *env)
+static int p9_next_unmasked_interrupt(CPUPPCState *env,
+ uint32_t pending_interrupts,
+ target_ulong lpcr)
{
CPUState *cs = env_cpu(env);
/* Ignore MSR[EE] when coming out of some power management states */
bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
- assert((env->pending_interrupts & P9_UNUSED_INTERRUPTS) == 0);
+ assert((pending_interrupts & P9_UNUSED_INTERRUPTS) == 0);
if (cs->halted) {
if (env->spr[SPR_PSSCR] & PSSCR_EC) {
@@ -1932,7 +1869,7 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env)
* When PSSCR[EC] is set, LPCR[PECE] controls which interrupts can
* wakeup the processor
*/
- return p9_interrupt_powersave(env);
+ return p9_interrupt_powersave(env, pending_interrupts, lpcr);
} else {
/*
* When it's clear, any system-caused exception exits power-saving
@@ -1943,14 +1880,14 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env)
}
/* Machine check exception */
- if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
+ if (pending_interrupts & PPC_INTERRUPT_MCK) {
return PPC_INTERRUPT_MCK;
}
/* Hypervisor decrementer exception */
- if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
+ if (pending_interrupts & PPC_INTERRUPT_HDECR) {
/* LPCR will be clear when not supported so this will work */
- bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
+ bool hdice = !!(lpcr & LPCR_HDICE);
if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
/* HDEC clears on delivery */
return PPC_INTERRUPT_HDECR;
@@ -1958,18 +1895,18 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env)
}
/* Hypervisor virtualization interrupt */
- if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) {
+ if (pending_interrupts & PPC_INTERRUPT_HVIRT) {
/* LPCR will be clear when not supported so this will work */
- bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
+ bool hvice = !!(lpcr & LPCR_HVICE);
if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hvice) {
return PPC_INTERRUPT_HVIRT;
}
}
/* External interrupt can ignore MSR:EE under some circumstances */
- if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
- bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
- bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
+ if (pending_interrupts & PPC_INTERRUPT_EXT) {
+ bool lpes0 = !!(lpcr & LPCR_LPES0);
+ bool heic = !!(lpcr & LPCR_HEIC);
/* HEIC blocks delivery to the hypervisor */
if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
!FIELD_EX64(env->msr, MSR, PR))) ||
@@ -1979,20 +1916,20 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env)
}
if (msr_ee != 0) {
/* Decrementer exception */
- if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
+ if (pending_interrupts & PPC_INTERRUPT_DECR) {
return PPC_INTERRUPT_DECR;
}
- if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
+ if (pending_interrupts & PPC_INTERRUPT_DOORBELL) {
return PPC_INTERRUPT_DOORBELL;
}
- if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
+ if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
return PPC_INTERRUPT_HDOORBELL;
}
- if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
+ if (pending_interrupts & PPC_INTERRUPT_PERFM) {
return PPC_INTERRUPT_PERFM;
}
/* EBB exception */
- if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
+ if (pending_interrupts & PPC_INTERRUPT_EBB) {
/*
* EBB exception must be taken in problem state and
* with BESCR_GE set.
@@ -2010,27 +1947,35 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env)
static int ppc_next_unmasked_interrupt(CPUPPCState *env)
{
+ uint32_t pending_interrupts = env->pending_interrupts;
+ target_ulong lpcr = env->spr[SPR_LPCR];
+ bool async_deliver;
+
+ if (unlikely(env->quiesced)) {
+ return 0;
+ }
+
#ifdef TARGET_PPC64
switch (env->excp_model) {
case POWERPC_EXCP_POWER7:
- return p7_next_unmasked_interrupt(env);
+ return p7_next_unmasked_interrupt(env, pending_interrupts, lpcr);
case POWERPC_EXCP_POWER8:
- return p8_next_unmasked_interrupt(env);
+ return p8_next_unmasked_interrupt(env, pending_interrupts, lpcr);
case POWERPC_EXCP_POWER9:
case POWERPC_EXCP_POWER10:
- return p9_next_unmasked_interrupt(env);
+ case POWERPC_EXCP_POWER11:
+ return p9_next_unmasked_interrupt(env, pending_interrupts, lpcr);
default:
break;
}
#endif
- bool async_deliver;
/* External reset */
- if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
+ if (pending_interrupts & PPC_INTERRUPT_RESET) {
return PPC_INTERRUPT_RESET;
}
/* Machine check exception */
- if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
+ if (pending_interrupts & PPC_INTERRUPT_MCK) {
return PPC_INTERRUPT_MCK;
}
#if 0 /* TODO */
@@ -2049,9 +1994,9 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env)
async_deliver = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
/* Hypervisor decrementer exception */
- if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
+ if (pending_interrupts & PPC_INTERRUPT_HDECR) {
/* LPCR will be clear when not supported so this will work */
- bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
+ bool hdice = !!(lpcr & LPCR_HDICE);
if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hdice) {
/* HDEC clears on delivery */
return PPC_INTERRUPT_HDECR;
@@ -2059,18 +2004,18 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env)
}
/* Hypervisor virtualization interrupt */
- if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) {
+ if (pending_interrupts & PPC_INTERRUPT_HVIRT) {
/* LPCR will be clear when not supported so this will work */
- bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
+ bool hvice = !!(lpcr & LPCR_HVICE);
if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hvice) {
return PPC_INTERRUPT_HVIRT;
}
}
/* External interrupt can ignore MSR:EE under some circumstances */
- if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
- bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
- bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
+ if (pending_interrupts & PPC_INTERRUPT_EXT) {
+ bool lpes0 = !!(lpcr & LPCR_LPES0);
+ bool heic = !!(lpcr & LPCR_HEIC);
/* HEIC blocks delivery to the hypervisor */
if ((async_deliver && !(heic && FIELD_EX64_HV(env->msr) &&
!FIELD_EX64(env->msr, MSR, PR))) ||
@@ -2080,45 +2025,45 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env)
}
if (FIELD_EX64(env->msr, MSR, CE)) {
/* External critical interrupt */
- if (env->pending_interrupts & PPC_INTERRUPT_CEXT) {
+ if (pending_interrupts & PPC_INTERRUPT_CEXT) {
return PPC_INTERRUPT_CEXT;
}
}
if (async_deliver != 0) {
/* Watchdog timer on embedded PowerPC */
- if (env->pending_interrupts & PPC_INTERRUPT_WDT) {
+ if (pending_interrupts & PPC_INTERRUPT_WDT) {
return PPC_INTERRUPT_WDT;
}
- if (env->pending_interrupts & PPC_INTERRUPT_CDOORBELL) {
+ if (pending_interrupts & PPC_INTERRUPT_CDOORBELL) {
return PPC_INTERRUPT_CDOORBELL;
}
/* Fixed interval timer on embedded PowerPC */
- if (env->pending_interrupts & PPC_INTERRUPT_FIT) {
+ if (pending_interrupts & PPC_INTERRUPT_FIT) {
return PPC_INTERRUPT_FIT;
}
/* Programmable interval timer on embedded PowerPC */
- if (env->pending_interrupts & PPC_INTERRUPT_PIT) {
+ if (pending_interrupts & PPC_INTERRUPT_PIT) {
return PPC_INTERRUPT_PIT;
}
/* Decrementer exception */
- if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
+ if (pending_interrupts & PPC_INTERRUPT_DECR) {
return PPC_INTERRUPT_DECR;
}
- if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
+ if (pending_interrupts & PPC_INTERRUPT_DOORBELL) {
return PPC_INTERRUPT_DOORBELL;
}
- if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
+ if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
return PPC_INTERRUPT_HDOORBELL;
}
- if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
+ if (pending_interrupts & PPC_INTERRUPT_PERFM) {
return PPC_INTERRUPT_PERFM;
}
/* Thermal interrupt */
- if (env->pending_interrupts & PPC_INTERRUPT_THERM) {
+ if (pending_interrupts & PPC_INTERRUPT_THERM) {
return PPC_INTERRUPT_THERM;
}
/* EBB exception */
- if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
+ if (pending_interrupts & PPC_INTERRUPT_EBB) {
/*
* EBB exception must be taken in problem state and
* with BESCR_GE set.
@@ -2187,7 +2132,6 @@ static void p7_deliver_interrupt(CPUPPCState *env, int interrupt)
powerpc_excp(cpu, POWERPC_EXCP_DECR);
break;
case PPC_INTERRUPT_PERFM:
- env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
powerpc_excp(cpu, POWERPC_EXCP_PERFM);
break;
case 0:
@@ -2238,7 +2182,9 @@ static void p8_deliver_interrupt(CPUPPCState *env, int interrupt)
powerpc_excp(cpu, POWERPC_EXCP_DECR);
break;
case PPC_INTERRUPT_DOORBELL:
- env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
+ if (!env->resume_as_sreset) {
+ env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
+ }
if (is_book3s_arch2x(env)) {
powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
} else {
@@ -2246,11 +2192,12 @@ static void p8_deliver_interrupt(CPUPPCState *env, int interrupt)
}
break;
case PPC_INTERRUPT_HDOORBELL:
- env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
+ if (!env->resume_as_sreset) {
+ env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
+ }
powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
break;
case PPC_INTERRUPT_PERFM:
- env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
powerpc_excp(cpu, POWERPC_EXCP_PERFM);
break;
case PPC_INTERRUPT_EBB: /* EBB exception */
@@ -2303,6 +2250,7 @@ static void p9_deliver_interrupt(CPUPPCState *env, int interrupt)
case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
/* HDEC clears on delivery */
+ /* XXX: should not see an HDEC if resume_as_sreset. assert? */
env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
powerpc_excp(cpu, POWERPC_EXCP_HDECR);
break;
@@ -2322,15 +2270,18 @@ static void p9_deliver_interrupt(CPUPPCState *env, int interrupt)
powerpc_excp(cpu, POWERPC_EXCP_DECR);
break;
case PPC_INTERRUPT_DOORBELL:
- env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
+ if (!env->resume_as_sreset) {
+ env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
+ }
powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
break;
case PPC_INTERRUPT_HDOORBELL:
- env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
+ if (!env->resume_as_sreset) {
+ env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
+ }
powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
break;
case PPC_INTERRUPT_PERFM:
- env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
powerpc_excp(cpu, POWERPC_EXCP_PERFM);
break;
case PPC_INTERRUPT_EBB: /* EBB exception */
@@ -2372,6 +2323,7 @@ static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt)
return p8_deliver_interrupt(env, interrupt);
case POWERPC_EXCP_POWER9:
case POWERPC_EXCP_POWER10:
+ case POWERPC_EXCP_POWER11:
return p9_deliver_interrupt(env, interrupt);
default:
break;
@@ -2444,7 +2396,6 @@ static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt)
powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
break;
case PPC_INTERRUPT_PERFM:
- env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
powerpc_excp(cpu, POWERPC_EXCP_PERFM);
break;
case PPC_INTERRUPT_THERM: /* Thermal interrupt */
@@ -2479,10 +2430,16 @@ static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt)
}
}
+/*
+ * system reset is not delivered via normal irq method, so have to set
+ * halted = 0 to resume CPU running if it was halted. Possibly we should
+ * move it over to using PPC_INTERRUPT_RESET rather than async_run_on_cpu.
+ */
void ppc_cpu_do_system_reset(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
+ cs->halted = 0;
powerpc_excp(cpu, POWERPC_EXCP_RESET);
}
@@ -2504,6 +2461,7 @@ void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
/* Anything for nested required here? MSR[HV] bit? */
+ cs->halted = 0;
powerpc_set_excp_state(cpu, vector, msr);
}
@@ -2529,762 +2487,3 @@ bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
#endif /* !CONFIG_USER_ONLY */
-
-/*****************************************************************************/
-/* Exceptions processing helpers */
-
-void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
- uint32_t error_code, uintptr_t raddr)
-{
- CPUState *cs = env_cpu(env);
-
- cs->exception_index = exception;
- env->error_code = error_code;
- cpu_loop_exit_restore(cs, raddr);
-}
-
-void raise_exception_err(CPUPPCState *env, uint32_t exception,
- uint32_t error_code)
-{
- raise_exception_err_ra(env, exception, error_code, 0);
-}
-
-void raise_exception(CPUPPCState *env, uint32_t exception)
-{
- raise_exception_err_ra(env, exception, 0, 0);
-}
-
-void raise_exception_ra(CPUPPCState *env, uint32_t exception,
- uintptr_t raddr)
-{
- raise_exception_err_ra(env, exception, 0, raddr);
-}
-
-#ifdef CONFIG_TCG
-void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
- uint32_t error_code)
-{
- raise_exception_err_ra(env, exception, error_code, 0);
-}
-
-void helper_raise_exception(CPUPPCState *env, uint32_t exception)
-{
- raise_exception_err_ra(env, exception, 0, 0);
-}
-
-#ifndef CONFIG_USER_ONLY
-void helper_store_msr(CPUPPCState *env, target_ulong val)
-{
- uint32_t excp = hreg_store_msr(env, val, 0);
-
- if (excp != 0) {
- cpu_interrupt_exittb(env_cpu(env));
- raise_exception(env, excp);
- }
-}
-
-void helper_ppc_maybe_interrupt(CPUPPCState *env)
-{
- ppc_maybe_interrupt(env);
-}
-
-#ifdef TARGET_PPC64
-void helper_scv(CPUPPCState *env, uint32_t lev)
-{
- if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
- raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
- } else {
- raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
- }
-}
-
-void helper_pminsn(CPUPPCState *env, uint32_t insn)
-{
- CPUState *cs = env_cpu(env);
-
- cs->halted = 1;
-
- /* Condition for waking up at 0x100 */
- env->resume_as_sreset = (insn != PPC_PM_STOP) ||
- (env->spr[SPR_PSSCR] & PSSCR_EC);
-
- /* HDECR is not to wake from PM state, it may have already fired */
- if (env->resume_as_sreset) {
- PowerPCCPU *cpu = env_archcpu(env);
- ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
- }
-
- ppc_maybe_interrupt(env);
-}
-#endif /* TARGET_PPC64 */
-
-static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
-{
- /* MSR:POW cannot be set by any form of rfi */
- msr &= ~(1ULL << MSR_POW);
-
- /* MSR:TGPR cannot be set by any form of rfi */
- if (env->flags & POWERPC_FLAG_TGPR)
- msr &= ~(1ULL << MSR_TGPR);
-
-#ifdef TARGET_PPC64
- /* Switching to 32-bit ? Crop the nip */
- if (!msr_is_64bit(env, msr)) {
- nip = (uint32_t)nip;
- }
-#else
- nip = (uint32_t)nip;
-#endif
- /* XXX: beware: this is false if VLE is supported */
- env->nip = nip & ~((target_ulong)0x00000003);
- hreg_store_msr(env, msr, 1);
- trace_ppc_excp_rfi(env->nip, env->msr);
- /*
- * No need to raise an exception here, as rfi is always the last
- * insn of a TB
- */
- cpu_interrupt_exittb(env_cpu(env));
- /* Reset the reservation */
- env->reserve_addr = -1;
-
- /* Context synchronizing: check if TCG TLB needs flush */
- check_tlb_flush(env, false);
-}
-
-void helper_rfi(CPUPPCState *env)
-{
- do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
-}
-
-#ifdef TARGET_PPC64
-void helper_rfid(CPUPPCState *env)
-{
- /*
- * The architecture defines a number of rules for which bits can
- * change but in practice, we handle this in hreg_store_msr()
- * which will be called by do_rfi(), so there is no need to filter
- * here
- */
- do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
-}
-
-void helper_rfscv(CPUPPCState *env)
-{
- do_rfi(env, env->lr, env->ctr);
-}
-
-void helper_hrfid(CPUPPCState *env)
-{
- do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
-}
-
-void helper_rfebb(CPUPPCState *env, target_ulong s)
-{
- target_ulong msr = env->msr;
-
- /*
- * Handling of BESCR bits 32:33 according to PowerISA v3.1:
- *
- * "If BESCR 32:33 != 0b00 the instruction is treated as if
- * the instruction form were invalid."
- */
- if (env->spr[SPR_BESCR] & BESCR_INVALID) {
- raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
- }
-
- env->nip = env->spr[SPR_EBBRR];
-
- /* Switching to 32-bit ? Crop the nip */
- if (!msr_is_64bit(env, msr)) {
- env->nip = (uint32_t)env->spr[SPR_EBBRR];
- }
-
- if (s) {
- env->spr[SPR_BESCR] |= BESCR_GE;
- } else {
- env->spr[SPR_BESCR] &= ~BESCR_GE;
- }
-}
-
-/*
- * Triggers or queues an 'ebb_excp' EBB exception. All checks
- * but FSCR, HFSCR and msr_pr must be done beforehand.
- *
- * PowerISA v3.1 isn't clear about whether an EBB should be
- * postponed or cancelled if the EBB facility is unavailable.
- * Our assumption here is that the EBB is cancelled if both
- * FSCR and HFSCR EBB facilities aren't available.
- */
-static void do_ebb(CPUPPCState *env, int ebb_excp)
-{
- PowerPCCPU *cpu = env_archcpu(env);
-
- /*
- * FSCR_EBB and FSCR_IC_EBB are the same bits used with
- * HFSCR.
- */
- helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
- helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
-
- if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
- env->spr[SPR_BESCR] |= BESCR_PMEO;
- } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
- env->spr[SPR_BESCR] |= BESCR_EEO;
- }
-
- if (FIELD_EX64(env->msr, MSR, PR)) {
- powerpc_excp(cpu, ebb_excp);
- } else {
- ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
- }
-}
-
-void raise_ebb_perfm_exception(CPUPPCState *env)
-{
- bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
- env->spr[SPR_BESCR] & BESCR_PME &&
- env->spr[SPR_BESCR] & BESCR_GE;
-
- if (!perfm_ebb_enabled) {
- return;
- }
-
- do_ebb(env, POWERPC_EXCP_PERFM_EBB);
-}
-#endif /* TARGET_PPC64 */
-
-/*****************************************************************************/
-/* Embedded PowerPC specific helpers */
-void helper_40x_rfci(CPUPPCState *env)
-{
- do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
-}
-
-void helper_rfci(CPUPPCState *env)
-{
- do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
-}
-
-void helper_rfdi(CPUPPCState *env)
-{
- /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
- do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
-}
-
-void helper_rfmci(CPUPPCState *env)
-{
- /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
- do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
-}
-#endif /* !CONFIG_USER_ONLY */
-
-void helper_TW(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
- uint32_t flags)
-{
- if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
- ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
- ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
- ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
- ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
- raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_TRAP, GETPC());
- }
-}
-
-#ifdef TARGET_PPC64
-void helper_TD(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
- uint32_t flags)
-{
- if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
- ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
- ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
- ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
- ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
- raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_TRAP, GETPC());
- }
-}
-#endif /* TARGET_PPC64 */
-
-static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane)
-{
- const uint16_t c = 0xfffc;
- const uint64_t z0 = 0xfa2561cdf44ac398ULL;
- uint16_t z = 0, temp;
- uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32];
-
- for (int i = 3; i >= 0; i--) {
- k[i] = key & 0xffff;
- key >>= 16;
- }
- xleft[0] = x & 0xffff;
- xright[0] = (x >> 16) & 0xffff;
-
- for (int i = 0; i < 28; i++) {
- z = (z0 >> (63 - i)) & 1;
- temp = ror16(k[i + 3], 3) ^ k[i + 1];
- k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1);
- }
-
- for (int i = 0; i < 8; i++) {
- eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)];
- eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)];
- eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)];
- eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)];
- }
-
- for (int i = 0; i < 32; i++) {
- fxleft[i] = (rol16(xleft[i], 1) &
- rol16(xleft[i], 8)) ^ rol16(xleft[i], 2);
- xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i];
- xright[i + 1] = xleft[i];
- }
-
- return (((uint32_t)xright[32]) << 16) | xleft[32];
-}
-
-static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key)
-{
- uint64_t stage0_h = 0ULL, stage0_l = 0ULL;
- uint64_t stage1_h, stage1_l;
-
- for (int i = 0; i < 4; i++) {
- stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1));
- stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i);
- stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1));
- stage0_l |= (ra & 0xff) << (8 * 2 * i);
- rb >>= 8;
- ra >>= 8;
- }
-
- stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32;
- stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1);
- stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32;
- stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3);
-
- return stage1_h ^ stage1_l;
-}
-
-static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra,
- target_ulong rb, uint64_t key, bool store)
-{
- uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash;
-
- if (store) {
- cpu_stq_data_ra(env, ea, calculated_hash, GETPC());
- } else {
- loaded_hash = cpu_ldq_data_ra(env, ea, GETPC());
- if (loaded_hash != calculated_hash) {
- raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_TRAP, GETPC());
- }
- }
-}
-
-#include "qemu/guest-random.h"
-
-#ifdef TARGET_PPC64
-#define HELPER_HASH(op, key, store, dexcr_aspect) \
-void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
- target_ulong rb) \
-{ \
- if (env->msr & R_MSR_PR_MASK) { \
- if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \
- env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
- return; \
- } else if (!(env->msr & R_MSR_HV_MASK)) { \
- if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \
- env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
- return; \
- } else if (!(env->msr & R_MSR_S_MASK)) { \
- if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \
- return; \
- } \
- \
- do_hash(env, ea, ra, rb, key, store); \
-}
-#else
-#define HELPER_HASH(op, key, store, dexcr_aspect) \
-void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
- target_ulong rb) \
-{ \
- do_hash(env, ea, ra, rb, key, store); \
-}
-#endif /* TARGET_PPC64 */
-
-HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE)
-HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE)
-HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE)
-HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE)
-
-#ifndef CONFIG_USER_ONLY
-/* Embedded.Processor Control */
-static int dbell2irq(target_ulong rb)
-{
- int msg = rb & DBELL_TYPE_MASK;
- int irq = -1;
-
- switch (msg) {
- case DBELL_TYPE_DBELL:
- irq = PPC_INTERRUPT_DOORBELL;
- break;
- case DBELL_TYPE_DBELL_CRIT:
- irq = PPC_INTERRUPT_CDOORBELL;
- break;
- case DBELL_TYPE_G_DBELL:
- case DBELL_TYPE_G_DBELL_CRIT:
- case DBELL_TYPE_G_DBELL_MC:
- /* XXX implement */
- default:
- break;
- }
-
- return irq;
-}
-
-void helper_msgclr(CPUPPCState *env, target_ulong rb)
-{
- int irq = dbell2irq(rb);
-
- if (irq < 0) {
- return;
- }
-
- ppc_set_irq(env_archcpu(env), irq, 0);
-}
-
-void helper_msgsnd(target_ulong rb)
-{
- int irq = dbell2irq(rb);
- int pir = rb & DBELL_PIRTAG_MASK;
- CPUState *cs;
-
- if (irq < 0) {
- return;
- }
-
- bql_lock();
- CPU_FOREACH(cs) {
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *cenv = &cpu->env;
-
- if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
- ppc_set_irq(cpu, irq, 1);
- }
- }
- bql_unlock();
-}
-
-/* Server Processor Control */
-
-static bool dbell_type_server(target_ulong rb)
-{
- /*
- * A Directed Hypervisor Doorbell message is sent only if the
- * message type is 5. All other types are reserved and the
- * instruction is a no-op
- */
- return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
-}
-
-static inline bool dbell_bcast_core(target_ulong rb)
-{
- return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE;
-}
-
-static inline bool dbell_bcast_subproc(target_ulong rb)
-{
- return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC;
-}
-
-void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
-{
- if (!dbell_type_server(rb)) {
- return;
- }
-
- ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
-}
-
-void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb)
-{
- int pir = rb & DBELL_PROCIDTAG_MASK;
- bool brdcast = false;
- CPUState *cs, *ccs;
- PowerPCCPU *cpu;
-
- if (!dbell_type_server(rb)) {
- return;
- }
-
- cpu = ppc_get_vcpu_by_pir(pir);
- if (!cpu) {
- return;
- }
- cs = CPU(cpu);
-
- if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) &&
- (env->flags & POWERPC_FLAG_SMT_1LPAR))) {
- brdcast = true;
- }
-
- if (cs->nr_threads == 1 || !brdcast) {
- ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1);
- return;
- }
-
- /*
- * Why is bql needed for walking CPU list? Answer seems to be because ppc
- * irq handling needs it, but ppc_set_irq takes the lock itself if needed,
- * so could this be removed?
- */
- bql_lock();
- THREAD_SIBLING_FOREACH(cs, ccs) {
- ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1);
- }
- bql_unlock();
-}
-
-#ifdef TARGET_PPC64
-void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
-{
- helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
-
- if (!dbell_type_server(rb)) {
- return;
- }
-
- ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
-}
-
-/*
- * sends a message to another thread on the same
- * multi-threaded processor
- */
-void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
-{
- CPUState *cs = env_cpu(env);
- PowerPCCPU *cpu = env_archcpu(env);
- CPUState *ccs;
- uint32_t nr_threads = cs->nr_threads;
- int ttir = rb & PPC_BITMASK(57, 63);
-
- helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
-
- if (!(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
- nr_threads = 1; /* msgsndp behaves as 1-thread in LPAR-per-thread mode*/
- }
-
- if (!dbell_type_server(rb) || ttir >= nr_threads) {
- return;
- }
-
- if (nr_threads == 1) {
- ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, 1);
- return;
- }
-
- /* Does iothread need to be locked for walking CPU list? */
- bql_lock();
- THREAD_SIBLING_FOREACH(cs, ccs) {
- PowerPCCPU *ccpu = POWERPC_CPU(ccs);
- uint32_t thread_id = ppc_cpu_tir(ccpu);
-
- if (ttir == thread_id) {
- ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, 1);
- bql_unlock();
- return;
- }
- }
-
- g_assert_not_reached();
-}
-#endif /* TARGET_PPC64 */
-
-/* Single-step tracing */
-void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
-{
- uint32_t error_code = 0;
- if (env->insns_flags2 & PPC2_ISA207S) {
- /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
- env->spr[SPR_POWER_SIAR] = prev_ip;
- error_code = PPC_BIT(33);
- }
- raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
-}
-
-void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
- MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr)
-{
- CPUPPCState *env = cpu_env(cs);
- uint32_t insn;
-
- /* Restore state and reload the insn we executed, for filling in DSISR. */
- cpu_restore_state(cs, retaddr);
- insn = ppc_ldl_code(env, env->nip);
-
- switch (env->mmu_model) {
- case POWERPC_MMU_SOFT_4xx:
- env->spr[SPR_40x_DEAR] = vaddr;
- break;
- case POWERPC_MMU_BOOKE:
- case POWERPC_MMU_BOOKE206:
- env->spr[SPR_BOOKE_DEAR] = vaddr;
- break;
- default:
- env->spr[SPR_DAR] = vaddr;
- break;
- }
-
- cs->exception_index = POWERPC_EXCP_ALIGN;
- env->error_code = insn & 0x03FF0000;
- cpu_loop_exit(cs);
-}
-
-void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
- vaddr vaddr, unsigned size,
- MMUAccessType access_type,
- int mmu_idx, MemTxAttrs attrs,
- MemTxResult response, uintptr_t retaddr)
-{
- CPUPPCState *env = cpu_env(cs);
-
- switch (env->excp_model) {
-#if defined(TARGET_PPC64)
- case POWERPC_EXCP_POWER8:
- case POWERPC_EXCP_POWER9:
- case POWERPC_EXCP_POWER10:
- /*
- * Machine check codes can be found in processor User Manual or
- * Linux or skiboot source.
- */
- if (access_type == MMU_DATA_LOAD) {
- env->spr[SPR_DAR] = vaddr;
- env->spr[SPR_DSISR] = PPC_BIT(57);
- env->error_code = PPC_BIT(42);
-
- } else if (access_type == MMU_DATA_STORE) {
- /*
- * MCE for stores in POWER is asynchronous so hardware does
- * not set DAR, but QEMU can do better.
- */
- env->spr[SPR_DAR] = vaddr;
- env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45);
- env->error_code |= PPC_BIT(42);
-
- } else { /* Fetch */
- /*
- * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching
- * the instruction, so that must always be clear for fetches.
- */
- env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45);
- }
- break;
-#endif
- default:
- /*
- * TODO: Check behaviour for other CPUs, for now do nothing.
- * Could add a basic MCE even if real hardware ignores.
- */
- return;
- }
-
- cs->exception_index = POWERPC_EXCP_MCHECK;
- cpu_loop_exit_restore(cs, retaddr);
-}
-
-void ppc_cpu_debug_excp_handler(CPUState *cs)
-{
-#if defined(TARGET_PPC64)
- CPUPPCState *env = cpu_env(cs);
-
- if (env->insns_flags2 & PPC2_ISA207S) {
- if (cs->watchpoint_hit) {
- if (cs->watchpoint_hit->flags & BP_CPU) {
- env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr;
- env->spr[SPR_DSISR] = PPC_BIT(41);
- cs->watchpoint_hit = NULL;
- raise_exception(env, POWERPC_EXCP_DSI);
- }
- cs->watchpoint_hit = NULL;
- } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) {
- raise_exception_err(env, POWERPC_EXCP_TRACE,
- PPC_BIT(33) | PPC_BIT(43));
- }
- }
-#endif
-}
-
-bool ppc_cpu_debug_check_breakpoint(CPUState *cs)
-{
-#if defined(TARGET_PPC64)
- CPUPPCState *env = cpu_env(cs);
-
- if (env->insns_flags2 & PPC2_ISA207S) {
- target_ulong priv;
-
- priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63);
- switch (priv) {
- case 0x1: /* problem */
- return env->msr & ((target_ulong)1 << MSR_PR);
- case 0x2: /* supervisor */
- return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
- !(env->msr & ((target_ulong)1 << MSR_HV)));
- case 0x3: /* hypervisor */
- return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
- (env->msr & ((target_ulong)1 << MSR_HV)));
- default:
- g_assert_not_reached();
- }
- }
-#endif
-
- return false;
-}
-
-bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
-{
-#if defined(TARGET_PPC64)
- CPUPPCState *env = cpu_env(cs);
-
- if (env->insns_flags2 & PPC2_ISA207S) {
- if (wp == env->dawr0_watchpoint) {
- uint32_t dawrx = env->spr[SPR_DAWRX0];
- bool wt = extract32(dawrx, PPC_BIT_NR(59), 1);
- bool wti = extract32(dawrx, PPC_BIT_NR(60), 1);
- bool hv = extract32(dawrx, PPC_BIT_NR(61), 1);
- bool sv = extract32(dawrx, PPC_BIT_NR(62), 1);
- bool pr = extract32(dawrx, PPC_BIT_NR(62), 1);
-
- if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) {
- return false;
- } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) {
- return false;
- } else if (!sv) {
- return false;
- }
-
- if (!wti) {
- if (env->msr & ((target_ulong)1 << MSR_DR)) {
- if (!wt) {
- return false;
- }
- } else {
- if (wt) {
- return false;
- }
- }
- }
-
- return true;
- }
- }
-#endif
-
- return false;
-}
-
-#endif /* !CONFIG_USER_ONLY */
-#endif /* CONFIG_TCG */
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
index 51bce99..07b782f 100644
--- a/target/ppc/fpu_helper.c
+++ b/target/ppc/fpu_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "internal.h"
#include "fpu/softfloat.h"
@@ -155,8 +154,7 @@ void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
} else if (tp##_is_infinity(arg)) { \
fprf = neg ? 0x09 << FPSCR_FPRF : 0x05 << FPSCR_FPRF; \
} else { \
- float_status dummy = { }; /* snan_bit_is_one = 0 */ \
- if (tp##_is_signaling_nan(arg, &dummy)) { \
+ if (tp##_is_signaling_nan(arg, &env->fp_status)) { \
fprf = 0x00 << FPSCR_FPRF; \
} else { \
fprf = 0x11 << FPSCR_FPRF; \
@@ -1599,14 +1597,14 @@ void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
do_float_check_status(env, sfifprf, GETPC()); \
}
-VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
-VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
-VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
-VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
-VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
-VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
-VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
-VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
+VSX_ADD_SUB(XSADDDP, add, 1, float64, VsrD(0), 1, 0)
+VSX_ADD_SUB(XSADDSP, add, 1, float64, VsrD(0), 1, 1)
+VSX_ADD_SUB(XVADDDP, add, 2, float64, VsrD(i), 0, 0)
+VSX_ADD_SUB(XVADDSP, add, 4, float32, VsrW(i), 0, 0)
+VSX_ADD_SUB(XSSUBDP, sub, 1, float64, VsrD(0), 1, 0)
+VSX_ADD_SUB(XSSUBSP, sub, 1, float64, VsrD(0), 1, 1)
+VSX_ADD_SUB(XVSUBDP, sub, 2, float64, VsrD(i), 0, 0)
+VSX_ADD_SUB(XVSUBSP, sub, 4, float32, VsrW(i), 0, 0)
void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
@@ -1676,10 +1674,10 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
do_float_check_status(env, sfifprf, GETPC()); \
}
-VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
-VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
-VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
-VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
+VSX_MUL(XSMULDP, 1, float64, VsrD(0), 1, 0)
+VSX_MUL(XSMULSP, 1, float64, VsrD(0), 1, 1)
+VSX_MUL(XVMULDP, 2, float64, VsrD(i), 0, 0)
+VSX_MUL(XVMULSP, 4, float32, VsrW(i), 0, 0)
void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
@@ -1750,10 +1748,10 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
do_float_check_status(env, sfifprf, GETPC()); \
}
-VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
-VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
-VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
-VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
+VSX_DIV(XSDIVDP, 1, float64, VsrD(0), 1, 0)
+VSX_DIV(XSDIVSP, 1, float64, VsrD(0), 1, 1)
+VSX_DIV(XVDIVDP, 2, float64, VsrD(i), 0, 0)
+VSX_DIV(XVDIVSP, 4, float32, VsrW(i), 0, 0)
void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
@@ -2383,12 +2381,12 @@ void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
do_float_check_status(env, false, GETPC()); \
}
-VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
-VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
-VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
-VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
-VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
-VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
+VSX_MAX_MIN(XSMAXDP, maxnum, 1, float64, VsrD(0))
+VSX_MAX_MIN(XVMAXDP, maxnum, 2, float64, VsrD(i))
+VSX_MAX_MIN(XVMAXSP, maxnum, 4, float32, VsrW(i))
+VSX_MAX_MIN(XSMINDP, minnum, 1, float64, VsrD(0))
+VSX_MAX_MIN(XVMINDP, minnum, 2, float64, VsrD(i))
+VSX_MAX_MIN(XVMINSP, minnum, 4, float32, VsrW(i))
#define VSX_MAX_MINC(name, max, tp, fld) \
void helper_##name(CPUPPCState *env, \
@@ -2527,14 +2525,14 @@ uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
return crf6; \
}
-VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
-VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
-VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
-VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
-VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
-VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
-VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
-VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
+VSX_CMP(XVCMPEQDP, 2, float64, VsrD(i), eq, 0, 1)
+VSX_CMP(XVCMPGEDP, 2, float64, VsrD(i), le, 1, 1)
+VSX_CMP(XVCMPGTDP, 2, float64, VsrD(i), lt, 1, 1)
+VSX_CMP(XVCMPNEDP, 2, float64, VsrD(i), eq, 0, 0)
+VSX_CMP(XVCMPEQSP, 4, float32, VsrW(i), eq, 0, 1)
+VSX_CMP(XVCMPGESP, 4, float32, VsrW(i), le, 1, 1)
+VSX_CMP(XVCMPGTSP, 4, float32, VsrW(i), lt, 1, 1)
+VSX_CMP(XVCMPNESP, 4, float32, VsrW(i), eq, 0, 0)
/*
* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index 76b8f25..ca414f2 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -28,6 +28,8 @@ DEF_HELPER_2(store_pcr, void, env, tl)
DEF_HELPER_2(store_ciabr, void, env, tl)
DEF_HELPER_2(store_dawr0, void, env, tl)
DEF_HELPER_2(store_dawrx0, void, env, tl)
+DEF_HELPER_2(store_dawr1, void, env, tl)
+DEF_HELPER_2(store_dawrx1, void, env, tl)
DEF_HELPER_2(store_mmcr0, void, env, tl)
DEF_HELPER_2(store_mmcr1, void, env, tl)
DEF_HELPER_2(store_mmcrA, void, env, tl)
@@ -46,8 +48,10 @@ DEF_HELPER_FLAGS_3(stmw, TCG_CALL_NO_WG, void, env, tl, i32)
DEF_HELPER_4(lsw, void, env, tl, i32, i32)
DEF_HELPER_5(lswx, void, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_4(stsw, TCG_CALL_NO_WG, void, env, tl, i32, i32)
-DEF_HELPER_FLAGS_3(dcbz, TCG_CALL_NO_WG, void, env, tl, i32)
-DEF_HELPER_FLAGS_3(dcbzep, TCG_CALL_NO_WG, void, env, tl, i32)
+DEF_HELPER_FLAGS_3(dcbz, TCG_CALL_NO_WG, void, env, tl, int)
+#ifdef TARGET_PPC64
+DEF_HELPER_FLAGS_2(dcbzl, TCG_CALL_NO_WG, void, env, tl)
+#endif
DEF_HELPER_FLAGS_2(icbi, TCG_CALL_NO_WG, void, env, tl)
DEF_HELPER_FLAGS_2(icbiep, TCG_CALL_NO_WG, void, env, tl)
DEF_HELPER_5(lscbx, tl, env, tl, i32, i32, i32)
@@ -201,18 +205,18 @@ DEF_HELPER_FLAGS_3(vsro, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(vsrv, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(vslv, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VPRTYBQ, TCG_CALL_NO_RWG, void, avr, avr, i32)
-DEF_HELPER_FLAGS_5(vaddsbs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
-DEF_HELPER_FLAGS_5(vaddshs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
-DEF_HELPER_FLAGS_5(vaddsws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
-DEF_HELPER_FLAGS_5(vsubsbs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
-DEF_HELPER_FLAGS_5(vsubshs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
-DEF_HELPER_FLAGS_5(vsubsws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
-DEF_HELPER_FLAGS_5(vaddubs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
-DEF_HELPER_FLAGS_5(vadduhs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
-DEF_HELPER_FLAGS_5(vadduws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
-DEF_HELPER_FLAGS_5(vsububs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
-DEF_HELPER_FLAGS_5(vsubuhs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
-DEF_HELPER_FLAGS_5(vsubuws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(VADDSBS, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(VADDSHS, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(VADDSWS, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(VSUBSBS, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(VSUBSHS, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(VSUBSWS, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(VADDUBS, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(VADDUHS, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(VADDUWS, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(VSUBUBS, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(VSUBUHS, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(VSUBUWS, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
DEF_HELPER_FLAGS_3(VADDUQM, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_4(VADDECUQ, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
DEF_HELPER_FLAGS_4(VADDEUQM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
@@ -275,10 +279,10 @@ DEF_HELPER_3(STVEBX, void, env, avr, tl)
DEF_HELPER_3(STVEHX, void, env, avr, tl)
DEF_HELPER_3(STVEWX, void, env, avr, tl)
#if defined(TARGET_PPC64)
-DEF_HELPER_4(lxvl, void, env, tl, vsr, tl)
-DEF_HELPER_4(lxvll, void, env, tl, vsr, tl)
-DEF_HELPER_4(stxvl, void, env, tl, vsr, tl)
-DEF_HELPER_4(stxvll, void, env, tl, vsr, tl)
+DEF_HELPER_4(LXVL, void, env, tl, vsr, tl)
+DEF_HELPER_4(LXVLL, void, env, tl, vsr, tl)
+DEF_HELPER_4(STXVL, void, env, tl, vsr, tl)
+DEF_HELPER_4(STXVLL, void, env, tl, vsr, tl)
#endif
DEF_HELPER_4(vsumsws, void, env, avr, avr, avr)
DEF_HELPER_4(vsum2sws, void, env, avr, avr, avr)
@@ -362,12 +366,12 @@ DEF_HELPER_FLAGS_4(bcdsr, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32)
DEF_HELPER_FLAGS_4(bcdtrunc, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32)
DEF_HELPER_FLAGS_4(bcdutrunc, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32)
-DEF_HELPER_4(xsadddp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSADDDP, void, env, vsr, vsr, vsr)
DEF_HELPER_5(xsaddqp, void, env, i32, vsr, vsr, vsr)
-DEF_HELPER_4(xssubdp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xsmuldp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSSUBDP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSMULDP, void, env, vsr, vsr, vsr)
DEF_HELPER_5(xsmulqp, void, env, i32, vsr, vsr, vsr)
-DEF_HELPER_4(xsdivdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSDIVDP, void, env, vsr, vsr, vsr)
DEF_HELPER_5(xsdivqp, void, env, i32, vsr, vsr, vsr)
DEF_HELPER_3(xsredp, void, env, vsr, vsr)
DEF_HELPER_3(xssqrtdp, void, env, vsr, vsr)
@@ -390,8 +394,8 @@ DEF_HELPER_4(xscmpodp, void, env, i32, vsr, vsr)
DEF_HELPER_4(xscmpudp, void, env, i32, vsr, vsr)
DEF_HELPER_4(xscmpoqp, void, env, i32, vsr, vsr)
DEF_HELPER_4(xscmpuqp, void, env, i32, vsr, vsr)
-DEF_HELPER_4(xsmaxdp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xsmindp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSMAXDP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSMINDP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(XSMAXCDP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(XSMINCDP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(XSMAXJDP, void, env, vsr, vsr, vsr)
@@ -437,10 +441,10 @@ DEF_HELPER_4(xsrqpxp, void, env, i32, vsr, vsr)
DEF_HELPER_4(xssqrtqp, void, env, i32, vsr, vsr)
DEF_HELPER_5(xssubqp, void, env, i32, vsr, vsr, vsr)
-DEF_HELPER_4(xsaddsp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xssubsp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xsmulsp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xsdivsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSADDSP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSSUBSP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSMULSP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSDIVSP, void, env, vsr, vsr, vsr)
DEF_HELPER_3(xsresp, void, env, vsr, vsr)
DEF_HELPER_2(xsrsp, i64, env, i64)
DEF_HELPER_3(xssqrtsp, void, env, vsr, vsr)
@@ -459,10 +463,10 @@ DEF_HELPER_5(XSNMADDQPO, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSNMSUBQP, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSNMSUBQPO, void, env, vsr, vsr, vsr, vsr)
-DEF_HELPER_4(xvadddp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xvsubdp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xvmuldp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xvdivdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XVADDDP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XVSUBDP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XVMULDP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XVDIVDP, void, env, vsr, vsr, vsr)
DEF_HELPER_3(xvredp, void, env, vsr, vsr)
DEF_HELPER_3(xvsqrtdp, void, env, vsr, vsr)
DEF_HELPER_3(xvrsqrtedp, void, env, vsr, vsr)
@@ -472,12 +476,12 @@ DEF_HELPER_5(xvmadddp, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(xvmsubdp, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(xvnmadddp, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(xvnmsubdp, void, env, vsr, vsr, vsr, vsr)
-DEF_HELPER_4(xvmaxdp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xvmindp, void, env, vsr, vsr, vsr)
-DEF_HELPER_FLAGS_4(xvcmpeqdp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
-DEF_HELPER_FLAGS_4(xvcmpgedp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
-DEF_HELPER_FLAGS_4(xvcmpgtdp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
-DEF_HELPER_FLAGS_4(xvcmpnedp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_4(XVMAXDP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XVMINDP, void, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(XVCMPEQDP, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(XVCMPGEDP, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(XVCMPGTDP, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(XVCMPNEDP, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
DEF_HELPER_3(xvcvdpsp, void, env, vsr, vsr)
DEF_HELPER_3(xvcvdpsxds, void, env, vsr, vsr)
DEF_HELPER_3(xvcvdpsxws, void, env, vsr, vsr)
@@ -493,10 +497,10 @@ DEF_HELPER_3(xvrdpim, void, env, vsr, vsr)
DEF_HELPER_3(xvrdpip, void, env, vsr, vsr)
DEF_HELPER_3(xvrdpiz, void, env, vsr, vsr)
-DEF_HELPER_4(xvaddsp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xvsubsp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xvmulsp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xvdivsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XVADDSP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XVSUBSP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XVMULSP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XVDIVSP, void, env, vsr, vsr, vsr)
DEF_HELPER_3(xvresp, void, env, vsr, vsr)
DEF_HELPER_3(xvsqrtsp, void, env, vsr, vsr)
DEF_HELPER_3(xvrsqrtesp, void, env, vsr, vsr)
@@ -506,12 +510,12 @@ DEF_HELPER_5(xvmaddsp, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(xvmsubsp, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(xvnmaddsp, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(xvnmsubsp, void, env, vsr, vsr, vsr, vsr)
-DEF_HELPER_4(xvmaxsp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xvminsp, void, env, vsr, vsr, vsr)
-DEF_HELPER_FLAGS_4(xvcmpeqsp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
-DEF_HELPER_FLAGS_4(xvcmpgesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
-DEF_HELPER_FLAGS_4(xvcmpgtsp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
-DEF_HELPER_FLAGS_4(xvcmpnesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_4(XVMAXSP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XVMINSP, void, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(XVCMPEQSP, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(XVCMPGESP, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(XVCMPGTSP, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(XVCMPNESP, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
DEF_HELPER_3(xvcvspdp, void, env, vsr, vsr)
DEF_HELPER_3(xvcvsphp, void, env, vsr, vsr)
DEF_HELPER_3(xvcvhpsp, void, env, vsr, vsr)
@@ -731,6 +735,8 @@ DEF_HELPER_2(store_tfmr, void, env, tl)
DEF_HELPER_FLAGS_2(store_sprc, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_1(load_sprd, TCG_CALL_NO_RWG_SE, tl, env)
DEF_HELPER_FLAGS_2(store_sprd, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_1(load_pmsr, TCG_CALL_NO_RWG_SE, tl, env)
+DEF_HELPER_FLAGS_2(store_pmcr, TCG_CALL_NO_RWG, void, env, tl)
#endif
DEF_HELPER_2(store_sdr1, void, env, tl)
DEF_HELPER_2(store_pidr, void, env, tl)
diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c
index 02076e9..7e57268 100644
--- a/target/ppc/helper_regs.c
+++ b/target/ppc/helper_regs.c
@@ -20,13 +20,15 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/main-loop.h"
-#include "exec/exec-all.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
+#include "exec/cputlb.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
#include "helper_regs.h"
#include "power8-pmu.h"
#include "cpu-models.h"
#include "spr_common.h"
+#include "accel/tcg/cpu-ops.h"
+#include "internal.h"
/* Swap temporary saved registers with GPRs */
void hreg_swap_gpr_tgpr(CPUPPCState *env)
@@ -83,15 +85,16 @@ static bool hreg_check_bhrb_enable(CPUPPCState *env)
static uint32_t hreg_compute_pmu_hflags_value(CPUPPCState *env)
{
uint32_t hflags = 0;
-
#if defined(TARGET_PPC64)
- if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC0) {
+ target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
+
+ if (mmcr0 & MMCR0_PMCC0) {
hflags |= 1 << HFLAGS_PMCC0;
}
- if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC1) {
+ if (mmcr0 & MMCR0_PMCC1) {
hflags |= 1 << HFLAGS_PMCC1;
}
- if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE) {
+ if (mmcr0 & MMCR0_PMCjCE) {
hflags |= 1 << HFLAGS_PMCJCE;
}
if (hreg_check_bhrb_enable(env)) {
@@ -101,9 +104,9 @@ static uint32_t hreg_compute_pmu_hflags_value(CPUPPCState *env)
#ifndef CONFIG_USER_ONLY
if (env->pmc_ins_cnt) {
hflags |= 1 << HFLAGS_INSN_CNT;
- }
- if (env->pmc_ins_cnt & 0x1e) {
- hflags |= 1 << HFLAGS_PMC_OTHER;
+ if (env->pmc_ins_cnt & 0x1e) {
+ hflags |= 1 << HFLAGS_PMC_OTHER;
+ }
}
#endif
#endif
@@ -143,10 +146,10 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
if (ppc_flags & POWERPC_FLAG_DE) {
target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0];
- if ((dbcr0 & DBCR0_ICMP) && FIELD_EX64(env->msr, MSR, DE)) {
+ if ((dbcr0 & DBCR0_ICMP) && FIELD_EX64(msr, MSR, DE)) {
hflags |= 1 << HFLAGS_SE;
}
- if ((dbcr0 & DBCR0_BRT) && FIELD_EX64(env->msr, MSR, DE)) {
+ if ((dbcr0 & DBCR0_BRT) && FIELD_EX64(msr, MSR, DE)) {
hflags |= 1 << HFLAGS_BE;
}
} else {
@@ -254,26 +257,23 @@ void hreg_update_pmu_hflags(CPUPPCState *env)
env->hflags |= hreg_compute_pmu_hflags_value(env);
}
-#ifdef CONFIG_DEBUG_TCG
-void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
+TCGTBCPUState ppc_get_tb_cpu_state(CPUState *cs)
{
+ CPUPPCState *env = cpu_env(cs);
uint32_t hflags_current = env->hflags;
- uint32_t hflags_rebuilt;
- *pc = env->nip;
- *cs_base = 0;
- *flags = hflags_current;
-
- hflags_rebuilt = hreg_compute_hflags_value(env);
+#ifdef CONFIG_DEBUG_TCG
+ uint32_t hflags_rebuilt = hreg_compute_hflags_value(env);
if (unlikely(hflags_current != hflags_rebuilt)) {
cpu_abort(env_cpu(env),
"TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
hflags_current, hflags_rebuilt);
}
-}
#endif
+ return (TCGTBCPUState){ .pc = env->nip, .flags = hflags_current };
+}
+
void cpu_interrupt_exittb(CPUState *cs)
{
/*
diff --git a/target/ppc/helper_regs.h b/target/ppc/helper_regs.h
index 8196c13..b928c2c 100644
--- a/target/ppc/helper_regs.h
+++ b/target/ppc/helper_regs.h
@@ -20,6 +20,8 @@
#ifndef HELPER_REGS_H
#define HELPER_REGS_H
+#include "target/ppc/cpu.h"
+
void hreg_swap_gpr_tgpr(CPUPPCState *env);
void hreg_compute_hflags(CPUPPCState *env);
void hreg_update_pmu_hflags(CPUPPCState *env);
diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode
index ee33141..e53fd28 100644
--- a/target/ppc/insn32.decode
+++ b/target/ppc/insn32.decode
@@ -241,6 +241,9 @@
&XX3 xt xa xb
@XX3 ...... ..... ..... ..... ........ ... &XX3 xt=%xx_xt xa=%xx_xa xb=%xx_xb
+&XX3_rc xt xa xb rc:bool
+@XX3_rc ...... ..... ..... ..... rc:1 ....... ... &XX3_rc xt=%xx_xt xa=%xx_xa xb=%xx_xb
+
# 32 bit GER instructions have all mask bits considered 1
&MMIRR_XX3 xa xb xt pmsk xmsk ymsk
%xx_at 23:3
@@ -832,6 +835,14 @@ VADDCUW 000100 ..... ..... ..... 00110000000 @VX
VADDCUQ 000100 ..... ..... ..... 00101000000 @VX
VADDUQM 000100 ..... ..... ..... 00100000000 @VX
+VADDSBS 000100 ..... ..... ..... 01100000000 @VX
+VADDSHS 000100 ..... ..... ..... 01101000000 @VX
+VADDSWS 000100 ..... ..... ..... 01110000000 @VX
+
+VADDUBS 000100 ..... ..... ..... 01000000000 @VX
+VADDUHS 000100 ..... ..... ..... 01001000000 @VX
+VADDUWS 000100 ..... ..... ..... 01010000000 @VX
+
VADDEUQM 000100 ..... ..... ..... ..... 111100 @VA
VADDECUQ 000100 ..... ..... ..... ..... 111101 @VA
@@ -839,6 +850,14 @@ VSUBCUW 000100 ..... ..... ..... 10110000000 @VX
VSUBCUQ 000100 ..... ..... ..... 10101000000 @VX
VSUBUQM 000100 ..... ..... ..... 10100000000 @VX
+VSUBSBS 000100 ..... ..... ..... 11100000000 @VX
+VSUBSHS 000100 ..... ..... ..... 11101000000 @VX
+VSUBSWS 000100 ..... ..... ..... 11110000000 @VX
+
+VSUBUBS 000100 ..... ..... ..... 11000000000 @VX
+VSUBUHS 000100 ..... ..... ..... 11001000000 @VX
+VSUBUWS 000100 ..... ..... ..... 11010000000 @VX
+
VSUBECUQ 000100 ..... ..... ..... ..... 111111 @VA
VSUBEUQM 000100 ..... ..... ..... ..... 111110 @VA
@@ -977,6 +996,35 @@ STXVRHX 011111 ..... ..... ..... 0010101101 . @X_TSX
STXVRWX 011111 ..... ..... ..... 0011001101 . @X_TSX
STXVRDX 011111 ..... ..... ..... 0011101101 . @X_TSX
+LXSDX 011111 ..... ..... ..... 1001001100 . @X_TSX
+LXSIWAX 011111 ..... ..... ..... 0001001100 . @X_TSX
+LXSIBZX 011111 ..... ..... ..... 1100001101 . @X_TSX
+LXSIHZX 011111 ..... ..... ..... 1100101101 . @X_TSX
+LXSIWZX 011111 ..... ..... ..... 0000001100 . @X_TSX
+LXSSPX 011111 ..... ..... ..... 1000001100 . @X_TSX
+
+STXSDX 011111 ..... ..... ..... 1011001100 . @X_TSX
+STXSIBX 011111 ..... ..... ..... 1110001101 . @X_TSX
+STXSIHX 011111 ..... ..... ..... 1110101101 . @X_TSX
+STXSIWX 011111 ..... ..... ..... 0010001100 . @X_TSX
+STXSSPX 011111 ..... ..... ..... 1010001100 . @X_TSX
+
+LXVB16X 011111 ..... ..... ..... 1101101100 . @X_TSX
+LXVD2X 011111 ..... ..... ..... 1101001100 . @X_TSX
+LXVH8X 011111 ..... ..... ..... 1100101100 . @X_TSX
+LXVW4X 011111 ..... ..... ..... 1100001100 . @X_TSX
+LXVDSX 011111 ..... ..... ..... 0101001100 . @X_TSX
+LXVWSX 011111 ..... ..... ..... 0101101100 . @X_TSX
+LXVL 011111 ..... ..... ..... 0100001101 . @X_TSX
+LXVLL 011111 ..... ..... ..... 0100101101 . @X_TSX
+
+STXVB16X 011111 ..... ..... ..... 1111101100 . @X_TSX
+STXVD2X 011111 ..... ..... ..... 1111001100 . @X_TSX
+STXVH8X 011111 ..... ..... ..... 1110101100 . @X_TSX
+STXVW4X 011111 ..... ..... ..... 1110001100 . @X_TSX
+STXVL 011111 ..... ..... ..... 0110001101 . @X_TSX
+STXVLL 011111 ..... ..... ..... 0110101101 . @X_TSX
+
## VSX Vector Binary Floating-Point Sign Manipulation Instructions
XVABSDP 111100 ..... 00000 ..... 111011001 .. @XX2
@@ -988,6 +1036,28 @@ XVNEGSP 111100 ..... 00000 ..... 110111001 .. @XX2
XVCPSGNDP 111100 ..... ..... ..... 11110000 ... @XX3
XVCPSGNSP 111100 ..... ..... ..... 11010000 ... @XX3
+## VSX Binary Floating-Point Arithmetic Instructions
+
+XSADDSP 111100 ..... ..... ..... 00000000 ... @XX3
+XSSUBSP 111100 ..... ..... ..... 00001000 ... @XX3
+XSMULSP 111100 ..... ..... ..... 00010000 ... @XX3
+XSDIVSP 111100 ..... ..... ..... 00011000 ... @XX3
+
+XSADDDP 111100 ..... ..... ..... 00100000 ... @XX3
+XSSUBDP 111100 ..... ..... ..... 00101000 ... @XX3
+XSMULDP 111100 ..... ..... ..... 00110000 ... @XX3
+XSDIVDP 111100 ..... ..... ..... 00111000 ... @XX3
+
+XVADDSP 111100 ..... ..... ..... 01000000 ... @XX3
+XVSUBSP 111100 ..... ..... ..... 01001000 ... @XX3
+XVMULSP 111100 ..... ..... ..... 01010000 ... @XX3
+XVDIVSP 111100 ..... ..... ..... 01011000 ... @XX3
+
+XVADDDP 111100 ..... ..... ..... 01100000 ... @XX3
+XVSUBDP 111100 ..... ..... ..... 01101000 ... @XX3
+XVMULDP 111100 ..... ..... ..... 01110000 ... @XX3
+XVDIVDP 111100 ..... ..... ..... 01111000 ... @XX3
+
## VSX Scalar Multiply-Add Instructions
XSMADDADP 111100 ..... ..... ..... 00100001 . . . @XX3
@@ -1057,6 +1127,23 @@ XSCMPEQQP 111111 ..... ..... ..... 0001000100 - @X
XSCMPGEQP 111111 ..... ..... ..... 0011000100 - @X
XSCMPGTQP 111111 ..... ..... ..... 0011100100 - @X
+XVCMPEQSP 111100 ..... ..... ..... . 1000011 ... @XX3_rc
+XVCMPGTSP 111100 ..... ..... ..... . 1001011 ... @XX3_rc
+XVCMPGESP 111100 ..... ..... ..... . 1010011 ... @XX3_rc
+XVCMPNESP 111100 ..... ..... ..... . 1011011 ... @XX3_rc
+XVCMPEQDP 111100 ..... ..... ..... . 1100011 ... @XX3_rc
+XVCMPGTDP 111100 ..... ..... ..... . 1101011 ... @XX3_rc
+XVCMPGEDP 111100 ..... ..... ..... . 1110011 ... @XX3_rc
+XVCMPNEDP 111100 ..... ..... ..... . 1111011 ... @XX3_rc
+
+XSMAXDP 111100 ..... ..... ..... 10100000 ... @XX3
+XSMINDP 111100 ..... ..... ..... 10101000 ... @XX3
+
+XVMAXSP 111100 ..... ..... ..... 11000000 ... @XX3
+XVMINSP 111100 ..... ..... ..... 11001000 ... @XX3
+XVMAXDP 111100 ..... ..... ..... 11100000 ... @XX3
+XVMINDP 111100 ..... ..... ..... 11101000 ... @XX3
+
## VSX Binary Floating-Point Convert Instructions
XSCVQPDP 111111 ..... 10100 ..... 1101000100 . @X_tb_rc
@@ -1092,6 +1179,17 @@ XXMFACC 011111 ... -- 00000 ----- 0010110001 - @X_a
XXMTACC 011111 ... -- 00001 ----- 0010110001 - @X_a
XXSETACCZ 011111 ... -- 00011 ----- 0010110001 - @X_a
+## VSX Vector Logical instructions
+
+XXLAND 111100 ..... ..... ..... 10000010 ... @XX3
+XXLANDC 111100 ..... ..... ..... 10001010 ... @XX3
+XXLOR 111100 ..... ..... ..... 10010010 ... @XX3
+XXLXOR 111100 ..... ..... ..... 10011010 ... @XX3
+XXLNOR 111100 ..... ..... ..... 10100010 ... @XX3
+XXLEQV 111100 ..... ..... ..... 10111010 ... @XX3
+XXLNAND 111100 ..... ..... ..... 10110010 ... @XX3
+XXLORC 111100 ..... ..... ..... 10101010 ... @XX3
+
## VSX GER instruction
XVI4GER8 111011 ... -- ..... ..... 00100011 ..- @XX3_at xa=%xx_xa
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index 2c6b633..ef4b2e7 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -541,7 +541,7 @@ VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c);
}
#define VARITHSAT_DO(name, op, optype, cvt, element) \
- void helper_v##name(ppc_avr_t *r, ppc_avr_t *vscr_sat, \
+ void helper_V##name(ppc_avr_t *r, ppc_avr_t *vscr_sat, \
ppc_avr_t *a, ppc_avr_t *b, uint32_t desc) \
{ \
int sat = 0; \
@@ -555,17 +555,17 @@ VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c);
} \
}
#define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
- VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
- VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
+ VARITHSAT_DO(ADDS##suffix##S, +, optype, cvt, element) \
+ VARITHSAT_DO(SUBS##suffix##S, -, optype, cvt, element)
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
- VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
- VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
-VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
-VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
-VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
-VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
-VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
-VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
+ VARITHSAT_DO(ADDU##suffix##S, +, optype, cvt, element) \
+ VARITHSAT_DO(SUBU##suffix##S, -, optype, cvt, element)
+VARITHSAT_SIGNED(B, s8, int16_t, cvtshsb)
+VARITHSAT_SIGNED(H, s16, int32_t, cvtswsh)
+VARITHSAT_SIGNED(W, s32, int64_t, cvtsdsw)
+VARITHSAT_UNSIGNED(B, u8, uint16_t, cvtshub)
+VARITHSAT_UNSIGNED(H, u16, uint32_t, cvtswuh)
+VARITHSAT_UNSIGNED(W, u32, uint64_t, cvtsduw)
#undef VARITHSAT_CASE
#undef VARITHSAT_DO
#undef VARITHSAT_SIGNED
diff --git a/target/ppc/internal.h b/target/ppc/internal.h
index 20fb2ec..7723350 100644
--- a/target/ppc/internal.h
+++ b/target/ppc/internal.h
@@ -21,6 +21,7 @@
#include "exec/breakpoint.h"
#include "hw/registerfields.h"
#include "exec/page-protection.h"
+#include "accel/tcg/tb-cpu-state.h"
/* PM instructions */
typedef enum {
@@ -268,6 +269,8 @@ static inline void pte_invalidate(target_ulong *pte0)
#define PTE_PTEM_MASK 0x7FFFFFBF
#define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
+uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr);
+
#ifdef CONFIG_USER_ONLY
void ppc_cpu_record_sigsegv(CPUState *cs, vaddr addr,
MMUAccessType access_type,
@@ -287,7 +290,11 @@ void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
void ppc_cpu_debug_excp_handler(CPUState *cs);
bool ppc_cpu_debug_check_breakpoint(CPUState *cs);
bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
-#endif
+
+G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason);
+void powerpc_excp(PowerPCCPU *cpu, int excp);
+
+#endif /* !CONFIG_USER_ONLY */
FIELD(GER_MSK, XMSK, 0, 4)
FIELD(GER_MSK, YMSK, 4, 4)
@@ -302,4 +309,6 @@ static inline int ger_pack_masks(int pmsk, int ymsk, int xmsk)
return msk;
}
+TCGTBCPUState ppc_get_tb_cpu_state(CPUState *cs);
+
#endif /* PPC_INTERNAL_H */
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 2c39322..0156580 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -26,10 +26,10 @@
#include "cpu.h"
#include "cpu-models.h"
#include "qemu/timer.h"
-#include "sysemu/hw_accel.h"
+#include "system/hw_accel.h"
#include "kvm_ppc.h"
-#include "sysemu/cpus.h"
-#include "sysemu/device_tree.h"
+#include "system/cpus.h"
+#include "system/device_tree.h"
#include "mmu-hash64.h"
#include "hw/ppc/spapr.h"
@@ -37,17 +37,19 @@
#include "hw/hw.h"
#include "hw/ppc/ppc.h"
#include "migration/qemu-file-types.h"
-#include "sysemu/watchdog.h"
+#include "system/watchdog.h"
#include "trace.h"
#include "gdbstub/enums.h"
#include "exec/memattrs.h"
-#include "exec/ram_addr.h"
-#include "sysemu/hostmem.h"
+#include "system/ram_addr.h"
+#include "system/hostmem.h"
#include "qemu/cutils.h"
#include "qemu/main-loop.h"
#include "qemu/mmap-alloc.h"
#include "elf.h"
-#include "sysemu/kvm_int.h"
+#include "system/kvm_int.h"
+#include "system/kvm.h"
+#include "accel/accel-cpu-target.h"
#include CONFIG_DEVICES
@@ -90,6 +92,7 @@ static int cap_large_decr;
static int cap_fwnmi;
static int cap_rpt_invalidate;
static int cap_ail_mode_3;
+static int cap_dawr1;
#ifdef CONFIG_PSERIES
static int cap_papr;
@@ -150,6 +153,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
cap_ppc_nested_kvm_hv = kvm_vm_check_extension(s, KVM_CAP_PPC_NESTED_HV);
cap_large_decr = kvmppc_get_dec_bits();
cap_fwnmi = kvm_vm_check_extension(s, KVM_CAP_PPC_FWNMI);
+ cap_dawr1 = kvm_vm_check_extension(s, KVM_CAP_PPC_DAWR1);
/*
* Note: setting it to false because there is not such capability
* in KVM at this moment.
@@ -475,6 +479,11 @@ static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
}
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
@@ -898,7 +907,7 @@ int kvmppc_put_books_sregs(PowerPCCPU *cpu)
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
}
-int kvm_arch_put_registers(CPUState *cs, int level)
+int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
@@ -1203,7 +1212,7 @@ static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
return 0;
}
-int kvm_arch_get_registers(CPUState *cs)
+int kvm_arch_get_registers(CPUState *cs, Error **errp)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
@@ -1328,7 +1337,6 @@ int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
{
- return;
}
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
@@ -2112,6 +2120,16 @@ int kvmppc_set_fwnmi(PowerPCCPU *cpu)
return kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_FWNMI, 0);
}
+bool kvmppc_has_cap_dawr1(void)
+{
+ return !!cap_dawr1;
+}
+
+int kvmppc_set_cap_dawr1(int enable)
+{
+ return kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_DAWR1, 0, enable);
+}
+
int kvmppc_smt_threads(void)
{
return cap_ppc_smt ? cap_ppc_smt : 1;
@@ -2346,7 +2364,31 @@ static void alter_insns(uint64_t *word, uint64_t flags, bool on)
}
}
-static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
+static bool kvmppc_cpu_realize(CPUState *cs, Error **errp)
+{
+ int ret;
+ const char *vcpu_str = (cs->parent_obj.hotplugged == true) ?
+ "hotplug" : "create";
+ cs->cpu_index = cpu_get_free_index();
+
+ POWERPC_CPU(cs)->vcpu_id = cs->cpu_index;
+
+ /* create and park to fail gracefully in case vcpu hotplug fails */
+ ret = kvm_create_and_park_vcpu(cs);
+ if (ret) {
+ /*
+ * This causes QEMU to terminate if initial CPU creation
+ * fails, and only CPU hotplug failure if the error happens
+ * there.
+ */
+ error_setg(errp, "%s: vcpu %s failed with %d",
+ __func__, vcpu_str, ret);
+ return false;
+ }
+ return true;
+}
+
+static void kvmppc_host_cpu_class_init(ObjectClass *oc, const void *data)
{
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
@@ -2607,7 +2649,7 @@ static int kvm_ppc_register_host_cpu_type(void)
return -1;
}
type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
- type_register(&type_info);
+ type_register_static(&type_info);
/* override TCG default cpu type with 'host' cpu model */
object_class_foreach(pseries_machine_class_fixup, TYPE_SPAPR_MACHINE,
false, NULL);
@@ -2966,3 +3008,23 @@ void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
void kvm_arch_accel_class_init(ObjectClass *oc)
{
}
+
+static void kvm_cpu_accel_class_init(ObjectClass *oc, const void *data)
+{
+ AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
+
+ acc->cpu_target_realize = kvmppc_cpu_realize;
+}
+
+static const TypeInfo kvm_cpu_accel_type_info = {
+ .name = ACCEL_CPU_NAME("kvm"),
+
+ .parent = TYPE_ACCEL_CPU,
+ .class_init = kvm_cpu_accel_class_init,
+ .abstract = true,
+};
+static void kvm_cpu_accel_register_types(void)
+{
+ type_register_static(&kvm_cpu_accel_type_info);
+}
+type_init(kvm_cpu_accel_register_types);
diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h
index 1975fb5..a1d9ce9 100644
--- a/target/ppc/kvm_ppc.h
+++ b/target/ppc/kvm_ppc.h
@@ -9,7 +9,7 @@
#ifndef KVM_PPC_H
#define KVM_PPC_H
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "exec/hwaddr.h"
#include "cpu.h"
@@ -68,6 +68,8 @@ bool kvmppc_has_cap_htm(void);
bool kvmppc_has_cap_mmu_radix(void);
bool kvmppc_has_cap_mmu_hash_v3(void);
bool kvmppc_has_cap_xive(void);
+bool kvmppc_has_cap_dawr1(void);
+int kvmppc_set_cap_dawr1(int enable);
int kvmppc_get_cap_safe_cache(void);
int kvmppc_get_cap_safe_bounds_check(void);
int kvmppc_get_cap_safe_indirect_branch(void);
@@ -219,7 +221,6 @@ static inline int kvmppc_smt_threads(void)
static inline void kvmppc_error_append_smt_possible_hint(Error *const *errp)
{
- return;
}
static inline int kvmppc_set_smt_threads(int smt)
@@ -257,7 +258,6 @@ static inline target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
static inline void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu,
unsigned int online)
{
- return;
}
static inline void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
@@ -377,6 +377,16 @@ static inline bool kvmppc_has_cap_xive(void)
return false;
}
+static inline bool kvmppc_has_cap_dawr1(void)
+{
+ return false;
+}
+
+static inline int kvmppc_set_cap_dawr1(int enable)
+{
+ abort();
+}
+
static inline int kvmppc_get_cap_safe_cache(void)
{
return 0;
@@ -444,7 +454,6 @@ static inline PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
static inline void kvmppc_check_papr_resize_hpt(Error **errp)
{
- return;
}
static inline int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu,
diff --git a/target/ppc/machine.c b/target/ppc/machine.c
index 731dd8d..d72e5ec 100644
--- a/target/ppc/machine.c
+++ b/target/ppc/machine.c
@@ -1,15 +1,14 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
#include "helper_regs.h"
#include "mmu-hash64.h"
#include "migration/cpu.h"
#include "qapi/error.h"
#include "kvm_ppc.h"
#include "power8-pmu.h"
-#include "sysemu/replay.h"
+#include "system/replay.h"
static void post_load_update_msr(CPUPPCState *env)
{
@@ -118,43 +117,11 @@ static const VMStateInfo vmstate_info_vsr = {
#define VMSTATE_VSR_ARRAY(_f, _s, _n) \
VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0)
-static bool cpu_pre_2_8_migration(void *opaque, int version_id)
-{
- PowerPCCPU *cpu = opaque;
-
- return cpu->pre_2_8_migration;
-}
-
-#if defined(TARGET_PPC64)
-static bool cpu_pre_3_0_migration(void *opaque, int version_id)
-{
- PowerPCCPU *cpu = opaque;
-
- return cpu->pre_3_0_migration;
-}
-#endif
-
static int cpu_pre_save(void *opaque)
{
PowerPCCPU *cpu = opaque;
CPUPPCState *env = &cpu->env;
int i;
- uint64_t insns_compat_mask =
- PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB
- | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES
- | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES
- | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT
- | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ
- | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC
- | PPC_64B | PPC_64BX | PPC_ALTIVEC
- | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD;
- uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX
- | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206
- | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206
- | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207
- | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207
- | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM
- | PPC2_MEM_LWSYNC;
env->spr[SPR_LR] = env->lr;
env->spr[SPR_CTR] = env->ctr;
@@ -177,35 +144,6 @@ static int cpu_pre_save(void *opaque)
env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4];
}
- /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
- if (cpu->pre_2_8_migration) {
- /*
- * Mask out bits that got added to msr_mask since the versions
- * which stupidly included it in the migration stream.
- */
- target_ulong metamask = 0
-#if defined(TARGET_PPC64)
- | (1ULL << MSR_TS0)
- | (1ULL << MSR_TS1)
-#endif
- ;
- cpu->mig_msr_mask = env->msr_mask & ~metamask;
- cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
- /*
- * CPU models supported by old machines all have
- * PPC_MEM_TLBIE, so we set it unconditionally to allow
- * backward migration from a POWER9 host to a POWER8 host.
- */
- cpu->mig_insns_flags |= PPC_MEM_TLBIE;
- cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
- cpu->mig_nb_BATs = env->nb_BATs;
- }
- if (cpu->pre_3_0_migration) {
- if (cpu->hash64_opts) {
- cpu->mig_slb_nr = cpu->hash64_opts->slb_size;
- }
- }
-
/* Used to retain migration compatibility for pre 6.0 for 601 machines. */
env->hflags_compat_nmsr = 0;
@@ -325,7 +263,8 @@ static int cpu_post_load(void *opaque, int version_id)
/* Re-set breaks based on regs */
#if defined(TARGET_PPC64)
ppc_update_ciabr(env);
- ppc_update_daw0(env);
+ ppc_update_daw(env, 0);
+ ppc_update_daw(env, 1);
#endif
/*
* TCG needs to re-start the decrementer timer and/or raise the
@@ -549,12 +488,11 @@ static int slb_post_load(void *opaque, int version_id)
static const VMStateDescription vmstate_slb = {
.name = "cpu/slb",
- .version_id = 1,
+ .version_id = 2,
.minimum_version_id = 1,
.needed = slb_needed,
.post_load = slb_post_load,
.fields = (const VMStateField[]) {
- VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration),
VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
VMSTATE_END_OF_LIST()
}
@@ -621,7 +559,7 @@ static bool tlbemb_needed(void *opaque)
}
static const VMStateDescription vmstate_tlbemb = {
- .name = "cpu/tlb6xx",
+ .name = "cpu/tlbemb",
.version_id = 1,
.minimum_version_id = 1,
.needed = tlbemb_needed,
@@ -676,7 +614,7 @@ static bool compat_needed(void *opaque)
PowerPCCPU *cpu = opaque;
assert(!(cpu->compat_pvr && !cpu->vhyp));
- return !cpu->pre_2_10_migration && cpu->compat_pvr != 0;
+ return cpu->compat_pvr != 0;
}
static const VMStateDescription vmstate_compat = {
@@ -760,12 +698,6 @@ const VMStateDescription vmstate_ppc_cpu = {
/* Backward compatible internal state */
VMSTATE_UINTTL(env.hflags_compat_nmsr, PowerPCCPU),
- /* Sanity checking */
- VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration),
- VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration),
- VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU,
- cpu_pre_2_8_migration),
- VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * const []) {
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
index f88155a..6ab71a6 100644
--- a/target/ppc/mem_helper.c
+++ b/target/ppc/mem_helper.c
@@ -19,11 +19,13 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/target_page.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
#include "helper_regs.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/helper-retaddr.h"
+#include "accel/tcg/probe.h"
#include "internal.h"
#include "qemu/atomic128.h"
@@ -271,51 +273,59 @@ void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
}
static void dcbz_common(CPUPPCState *env, target_ulong addr,
- uint32_t opcode, bool epid, uintptr_t retaddr)
+ int mmu_idx, int dcbz_size, uintptr_t retaddr)
{
- target_ulong mask, dcbz_size = env->dcache_line_size;
- uint32_t i;
+ target_ulong mask = ~(target_ulong)(dcbz_size - 1);
void *haddr;
- int mmu_idx = epid ? PPC_TLB_EPID_STORE : ppc_env_mmu_index(env, false);
-
-#if defined(TARGET_PPC64)
- /* Check for dcbz vs dcbzl on 970 */
- if (env->excp_model == POWERPC_EXCP_970 &&
- !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
- dcbz_size = 32;
- }
-#endif
/* Align address */
- mask = ~(dcbz_size - 1);
addr &= mask;
/* Check reservation */
- if ((env->reserve_addr & mask) == addr) {
+ if (unlikely((env->reserve_addr & mask) == addr)) {
env->reserve_addr = (target_ulong)-1ULL;
}
/* Try fast path translate */
+#ifdef CONFIG_USER_ONLY
+ haddr = tlb_vaddr_to_host(env, addr, MMU_DATA_STORE, mmu_idx);
+#else
haddr = probe_write(env, addr, dcbz_size, mmu_idx, retaddr);
- if (haddr) {
- memset(haddr, 0, dcbz_size);
- } else {
+ if (unlikely(!haddr)) {
/* Slow path */
- for (i = 0; i < dcbz_size; i += 8) {
+ for (int i = 0; i < dcbz_size; i += 8) {
cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr);
}
+ return;
}
+#endif
+
+ set_helper_retaddr(retaddr);
+ memset(haddr, 0, dcbz_size);
+ clear_helper_retaddr();
}
-void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode)
+void helper_dcbz(CPUPPCState *env, target_ulong addr, int mmu_idx)
{
- dcbz_common(env, addr, opcode, false, GETPC());
+ dcbz_common(env, addr, mmu_idx, env->dcache_line_size, GETPC());
}
-void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode)
+#ifdef TARGET_PPC64
+void helper_dcbzl(CPUPPCState *env, target_ulong addr)
{
- dcbz_common(env, addr, opcode, true, GETPC());
+ int dcbz_size = env->dcache_line_size;
+
+ /*
+ * The translator checked for POWERPC_EXCP_970.
+ * All that's left is to check HID5.
+ */
+ if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
+ dcbz_size = 32;
+ }
+
+ dcbz_common(env, addr, ppc_env_mmu_index(env, false), dcbz_size, GETPC());
}
+#endif
void helper_icbi(CPUPPCState *env, target_ulong addr)
{
@@ -467,8 +477,8 @@ void helper_##name(CPUPPCState *env, target_ulong addr, \
*xt = t; \
}
-VSX_LXVL(lxvl, 0)
-VSX_LXVL(lxvll, 1)
+VSX_LXVL(LXVL, 0)
+VSX_LXVL(LXVLL, 1)
#undef VSX_LXVL
#define VSX_STXVL(name, lj) \
@@ -496,8 +506,8 @@ void helper_##name(CPUPPCState *env, target_ulong addr, \
} \
}
-VSX_STXVL(stxvl, 0)
-VSX_STXVL(stxvll, 1)
+VSX_STXVL(STXVL, 0)
+VSX_STXVL(STXVLL, 1)
#undef VSX_STXVL
#undef GET_NB
#endif /* TARGET_PPC64 */
diff --git a/target/ppc/meson.build b/target/ppc/meson.build
index db3b7a0..8eed1fa 100644
--- a/target/ppc/meson.build
+++ b/target/ppc/meson.build
@@ -14,6 +14,7 @@ ppc_ss.add(when: 'CONFIG_TCG', if_true: files(
'int_helper.c',
'mem_helper.c',
'misc_helper.c',
+ 'tcg-excp_helper.c',
'timebase_helper.c',
'translate.c',
'power8-pmu.c',
diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c
index fa47be2..e7d9462 100644
--- a/target/ppc/misc_helper.c
+++ b/target/ppc/misc_helper.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/helper-proto.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
@@ -48,9 +48,8 @@ void helper_spr_core_write_generic(CPUPPCState *env, uint32_t sprn,
{
CPUState *cs = env_cpu(env);
CPUState *ccs;
- uint32_t nr_threads = cs->nr_threads;
- if (nr_threads == 1) {
+ if (ppc_cpu_core_single_threaded(cs)) {
env->spr[sprn] = val;
return;
}
@@ -195,7 +194,7 @@ void helper_store_ptcr(CPUPPCState *env, target_ulong val)
return;
}
- if (cs->nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ if (ppc_cpu_lpar_single_threaded(cs)) {
env->spr[SPR_PTCR] = val;
tlb_flush(cs);
} else {
@@ -234,6 +233,16 @@ void helper_store_dawrx0(CPUPPCState *env, target_ulong value)
ppc_store_dawrx0(env, value);
}
+void helper_store_dawr1(CPUPPCState *env, target_ulong value)
+{
+ ppc_store_dawr1(env, value);
+}
+
+void helper_store_dawrx1(CPUPPCState *env, target_ulong value)
+{
+ ppc_store_dawrx1(env, value);
+}
+
/*
* DPDES register is shared. Each bit reflects the state of the
* doorbell interrupt of a thread of the same core.
@@ -242,16 +251,12 @@ target_ulong helper_load_dpdes(CPUPPCState *env)
{
CPUState *cs = env_cpu(env);
CPUState *ccs;
- uint32_t nr_threads = cs->nr_threads;
target_ulong dpdes = 0;
helper_hfscr_facility_check(env, HFSCR_MSGP, "load DPDES", HFSCR_IC_MSGP);
- if (!(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
- nr_threads = 1; /* DPDES behaves as 1-thread in LPAR-per-thread mode */
- }
-
- if (nr_threads == 1) {
+ /* DPDES behaves as 1-thread in LPAR-per-thread mode */
+ if (ppc_cpu_lpar_single_threaded(cs)) {
if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
dpdes = 1;
}
@@ -278,21 +283,11 @@ void helper_store_dpdes(CPUPPCState *env, target_ulong val)
PowerPCCPU *cpu = env_archcpu(env);
CPUState *cs = env_cpu(env);
CPUState *ccs;
- uint32_t nr_threads = cs->nr_threads;
helper_hfscr_facility_check(env, HFSCR_MSGP, "store DPDES", HFSCR_IC_MSGP);
- if (!(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
- nr_threads = 1; /* DPDES behaves as 1-thread in LPAR-per-thread mode */
- }
-
- if (val & ~(nr_threads - 1)) {
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid DPDES register value "
- TARGET_FMT_lx"\n", val);
- val &= (nr_threads - 1); /* Ignore the invalid bits */
- }
-
- if (nr_threads == 1) {
+ /* DPDES behaves as 1-thread in LPAR-per-thread mode */
+ if (ppc_cpu_lpar_single_threaded(cs)) {
ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & 0x1);
return;
}
@@ -303,11 +298,18 @@ void helper_store_dpdes(CPUPPCState *env, target_ulong val)
PowerPCCPU *ccpu = POWERPC_CPU(ccs);
uint32_t thread_id = ppc_cpu_tir(ccpu);
- ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id));
+ ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id));
}
bql_unlock();
}
+/*
+ * qemu-user breaks with pnv headers, so they go under ifdefs for now.
+ * A clean up may be to move powernv specific registers and helpers into
+ * target/ppc/pnv_helper.c
+ */
+#include "hw/ppc/pnv_core.h"
+
/* Indirect SCOM (SPRC/SPRD) access to SCRATCH0-7 are implemented. */
void helper_store_sprc(CPUPPCState *env, target_ulong val)
{
@@ -321,11 +323,39 @@ void helper_store_sprc(CPUPPCState *env, target_ulong val)
target_ulong helper_load_sprd(CPUPPCState *env)
{
+ /*
+ * SPRD is a HV-only register for Power CPUs, so this will only be
+ * accessed by powernv machines.
+ */
+ PowerPCCPU *cpu = env_archcpu(env);
+ PnvCore *pc = pnv_cpu_state(cpu)->pnv_core;
target_ulong sprc = env->spr[SPR_POWER_SPRC];
- switch (sprc & 0x3c0) {
- case 0: /* SCRATCH0-7 */
- return env->scratch[(sprc >> 3) & 0x7];
+ if (pc->big_core) {
+ pc = pnv_chip_find_core(pc->chip, CPU_CORE(pc)->core_id & ~0x1);
+ }
+
+ switch (sprc & 0x3e0) {
+ case 0: /* SCRATCH0-3 */
+ case 1: /* SCRATCH4-7 */
+ return pc->scratch[(sprc >> 3) & 0x7];
+
+ case 0x1e0: /* core thread state */
+ if (env->excp_model == POWERPC_EXCP_POWER9) {
+ /*
+ * Only implement for POWER9 because skiboot uses it to check
+ * big-core mode. Other bits are unimplemented so we would
+ * prefer to get unimplemented message on POWER10 if it were
+ * used anywhere.
+ */
+ if (pc->big_core) {
+ return PPC_BIT(63);
+ } else {
+ return 0;
+ }
+ }
+ /* fallthru */
+
default:
qemu_log_mask(LOG_UNIMP, "mfSPRD: Unimplemented SPRC:0x"
TARGET_FMT_lx"\n", sprc);
@@ -334,45 +364,88 @@ target_ulong helper_load_sprd(CPUPPCState *env)
return 0;
}
-static void do_store_scratch(CPUPPCState *env, int nr, target_ulong val)
+void helper_store_sprd(CPUPPCState *env, target_ulong val)
+{
+ target_ulong sprc = env->spr[SPR_POWER_SPRC];
+ PowerPCCPU *cpu = env_archcpu(env);
+ PnvCore *pc = pnv_cpu_state(cpu)->pnv_core;
+ int nr;
+
+ if (pc->big_core) {
+ pc = pnv_chip_find_core(pc->chip, CPU_CORE(pc)->core_id & ~0x1);
+ }
+
+ switch (sprc & 0x3e0) {
+ case 0: /* SCRATCH0-3 */
+ case 1: /* SCRATCH4-7 */
+ /*
+ * Log stores to SCRATCH, because some firmware uses these for
+ * debugging and logging, but they would normally be read by the BMC,
+ * which is not implemented in QEMU yet. This gives a way to get at the
+ * information. Could also dump these upon checkstop.
+ */
+ nr = (sprc >> 3) & 0x7;
+ pc->scratch[nr] = val;
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "mtSPRD: Unimplemented SPRC:0x"
+ TARGET_FMT_lx"\n", sprc);
+ break;
+ }
+}
+
+target_ulong helper_load_pmsr(CPUPPCState *env)
+{
+ target_ulong lowerps = extract64(env->spr[SPR_PMCR], PPC_BIT_NR(15), 8);
+ target_ulong val = 0;
+
+ val |= PPC_BIT(63); /* verion 0x1 (POWER9/10) */
+ /* Pmin = 0 */
+ /* XXX: POWER9 should be 3 */
+ val |= 4ULL << PPC_BIT_NR(31); /* Pmax */
+ val |= lowerps << PPC_BIT_NR(15); /* Local actual Pstate */
+ val |= lowerps << PPC_BIT_NR(7); /* Global actual Pstate */
+
+ return val;
+}
+
+static void ppc_set_pmcr(PowerPCCPU *cpu, target_ulong val)
{
+ cpu->env.spr[SPR_PMCR] = val;
+}
+
+void helper_store_pmcr(CPUPPCState *env, target_ulong val)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
CPUState *cs = env_cpu(env);
CPUState *ccs;
- uint32_t nr_threads = cs->nr_threads;
- /*
- * Log stores to SCRATCH, because some firmware uses these for debugging
- * and logging, but they would normally be read by the BMC, which is
- * not implemented in QEMU yet. This gives a way to get at the information.
- * Could also dump these upon checkstop.
- */
- qemu_log("SPRD write 0x" TARGET_FMT_lx " to SCRATCH%d\n", val, nr);
+ /* Leave version field unchanged (0x1) */
+ val &= ~PPC_BITMASK(60, 63);
+ val |= PPC_BIT(63);
+
+ val &= ~PPC_BITMASK(0, 7); /* UpperPS ignored */
+ if (val & PPC_BITMASK(16, 59)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Non-zero PMCR reserved bits "
+ TARGET_FMT_lx"\n", val);
+ val &= ~PPC_BITMASK(16, 59);
+ }
- if (nr_threads == 1) {
- env->scratch[nr] = val;
+ /* DPDES behaves as 1-thread in LPAR-per-thread mode */
+ if (ppc_cpu_lpar_single_threaded(cs)) {
+ ppc_set_pmcr(cpu, val);
return;
}
+ /* Does iothread need to be locked for walking CPU list? */
+ bql_lock();
THREAD_SIBLING_FOREACH(cs, ccs) {
- CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
- cenv->scratch[nr] = val;
+ PowerPCCPU *ccpu = POWERPC_CPU(ccs);
+ ppc_set_pmcr(ccpu, val);
}
+ bql_unlock();
}
-void helper_store_sprd(CPUPPCState *env, target_ulong val)
-{
- target_ulong sprc = env->spr[SPR_POWER_SPRC];
-
- switch (sprc & 0x3c0) {
- case 0: /* SCRATCH0-7 */
- do_store_scratch(env, (sprc >> 3) & 0x7, val);
- break;
- default:
- qemu_log_mask(LOG_UNIMP, "mfSPRD: Unimplemented SPRC:0x"
- TARGET_FMT_lx"\n", sprc);
- break;
- }
-}
#endif /* defined(TARGET_PPC64) */
void helper_store_pidr(CPUPPCState *env, target_ulong val)
diff --git a/target/ppc/mmu-book3s-v3.c b/target/ppc/mmu-book3s-v3.c
index c8f69b3..3865556 100644
--- a/target/ppc/mmu-book3s-v3.c
+++ b/target/ppc/mmu-book3s-v3.c
@@ -18,10 +18,10 @@
*/
#include "qemu/osdep.h"
+#include "system/memory.h"
#include "cpu.h"
#include "mmu-hash64.h"
#include "mmu-book3s-v3.h"
-#include "mmu-radix64.h"
bool ppc64_v3_get_pate(PowerPCCPU *cpu, target_ulong lpid, ppc_v3_pate_t *entry)
{
diff --git a/target/ppc/mmu-book3s-v3.h b/target/ppc/mmu-book3s-v3.h
index f3f7993..be66e26 100644
--- a/target/ppc/mmu-book3s-v3.h
+++ b/target/ppc/mmu-book3s-v3.h
@@ -20,9 +20,6 @@
#ifndef PPC_MMU_BOOK3S_V3_H
#define PPC_MMU_BOOK3S_V3_H
-#include "mmu-hash64.h"
-#include "mmu-books.h"
-
#ifndef CONFIG_USER_ONLY
/*
@@ -83,46 +80,6 @@ static inline bool ppc64_v3_radix(PowerPCCPU *cpu)
return !!(cpu->env.spr[SPR_LPCR] & LPCR_HR);
}
-static inline hwaddr ppc_hash64_hpt_base(PowerPCCPU *cpu)
-{
- uint64_t base;
-
- if (cpu->vhyp) {
- return 0;
- }
- if (cpu->env.mmu_model == POWERPC_MMU_3_00) {
- ppc_v3_pate_t pate;
-
- if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
- return 0;
- }
- base = pate.dw0;
- } else {
- base = cpu->env.spr[SPR_SDR1];
- }
- return base & SDR_64_HTABORG;
-}
-
-static inline hwaddr ppc_hash64_hpt_mask(PowerPCCPU *cpu)
-{
- uint64_t base;
-
- if (cpu->vhyp) {
- return cpu->vhyp_class->hpt_mask(cpu->vhyp);
- }
- if (cpu->env.mmu_model == POWERPC_MMU_3_00) {
- ppc_v3_pate_t pate;
-
- if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
- return 0;
- }
- base = pate.dw0;
- } else {
- base = cpu->env.spr[SPR_SDR1];
- }
- return (1ULL << ((base & SDR_64_HTABSIZE) + 18 - 7)) - 1;
-}
-
#endif /* TARGET_PPC64 */
#endif /* CONFIG_USER_ONLY */
diff --git a/target/ppc/mmu-hash32.c b/target/ppc/mmu-hash32.c
index d5f2057..8b980a5 100644
--- a/target/ppc/mmu-hash32.c
+++ b/target/ppc/mmu-hash32.c
@@ -20,9 +20,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
-#include "sysemu/kvm.h"
+#include "exec/target_page.h"
+#include "system/kvm.h"
#include "kvm_ppc.h"
#include "internal.h"
#include "mmu-hash32.h"
@@ -37,17 +37,6 @@
# define LOG_BATS(...) do { } while (0)
#endif
-static int ppc_hash32_pte_prot(int mmu_idx,
- target_ulong sr, ppc_hash_pte32_t pte)
-{
- unsigned pp, key;
-
- key = !!(mmuidx_pr(mmu_idx) ? (sr & SR32_KP) : (sr & SR32_KS));
- pp = pte.pte1 & HPTE32_R_PP;
-
- return ppc_hash32_pp_prot(key, pp, !!(sr & SR32_NX));
-}
-
static target_ulong hash32_bat_size(int mmu_idx,
target_ulong batu, target_ulong batl)
{
@@ -59,22 +48,6 @@ static target_ulong hash32_bat_size(int mmu_idx,
return BATU32_BEPI & ~((batu & BATU32_BL) << 15);
}
-static int hash32_bat_prot(PowerPCCPU *cpu,
- target_ulong batu, target_ulong batl)
-{
- int pp, prot;
-
- prot = 0;
- pp = batl & BATL32_PP;
- if (pp != 0) {
- prot = PAGE_READ | PAGE_EXEC;
- if (pp == 0x2) {
- prot |= PAGE_WRITE;
- }
- }
- return prot;
-}
-
static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea,
MMUAccessType access_type, int *prot,
int mmu_idx)
@@ -106,7 +79,7 @@ static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea,
if (mask && ((ea & mask) == (batu & BATU32_BEPI))) {
hwaddr raddr = (batl & mask) | (ea & ~mask);
- *prot = hash32_bat_prot(cpu, batu, batl);
+ *prot = ppc_hash32_bat_prot(batu, batl);
return raddr & TARGET_PAGE_MASK;
}
@@ -145,7 +118,6 @@ static bool ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- int key = !!(mmuidx_pr(mmu_idx) ? (sr & SR32_KP) : (sr & SR32_KS));
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
@@ -206,7 +178,11 @@ static bool ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
cpu_abort(cs, "ERROR: insn should not need address translation\n");
}
- *prot = key ? PAGE_READ | PAGE_WRITE : PAGE_READ;
+ if (ppc_hash32_key(mmuidx_pr(mmu_idx), sr)) {
+ *prot = PAGE_READ | PAGE_WRITE;
+ } else {
+ *prot = PAGE_READ;
+ }
if (check_prot_access_type(*prot, access_type)) {
*raddr = eaddr;
return true;
@@ -225,13 +201,6 @@ static bool ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
return false;
}
-hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash)
-{
- target_ulong mask = ppc_hash32_hpt_mask(cpu);
-
- return (hash * HASH_PTEG_SIZE_32) & mask;
-}
-
static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off,
bool secondary, target_ulong ptem,
ppc_hash_pte32_t *pte)
@@ -322,15 +291,6 @@ static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu,
return pte_offset;
}
-static hwaddr ppc_hash32_pte_raddr(target_ulong sr, ppc_hash_pte32_t pte,
- target_ulong eaddr)
-{
- hwaddr rpn = pte.pte1 & HPTE32_R_RPN;
- hwaddr mask = ~TARGET_PAGE_MASK;
-
- return (rpn & ~mask) | (eaddr & mask);
-}
-
bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
bool guest_visible)
@@ -338,10 +298,10 @@ bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
target_ulong sr;
- hwaddr pte_offset;
+ hwaddr pte_offset, raddr;
ppc_hash_pte32_t pte;
+ bool key;
int prot;
- hwaddr raddr;
/* There are no hash32 large pages. */
*psizep = TARGET_PAGE_BITS;
@@ -423,8 +383,8 @@ bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
"found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
/* 7. Check access permissions */
-
- prot = ppc_hash32_pte_prot(mmu_idx, sr, pte);
+ key = ppc_hash32_key(mmuidx_pr(mmu_idx), sr);
+ prot = ppc_hash32_prot(key, pte.pte1 & HPTE32_R_PP, sr & SR32_NX);
if (!check_prot_access_type(prot, access_type)) {
/* Access right violation */
@@ -464,11 +424,12 @@ bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
*/
prot &= ~PAGE_WRITE;
}
- }
+ }
+ *protp = prot;
/* 9. Determine the real address from the PTE */
-
- *raddrp = ppc_hash32_pte_raddr(sr, pte, eaddr);
- *protp = prot;
+ *raddrp = pte.pte1 & HPTE32_R_RPN;
+ *raddrp &= TARGET_PAGE_MASK;
+ *raddrp |= eaddr & ~TARGET_PAGE_MASK;
return true;
}
diff --git a/target/ppc/mmu-hash32.h b/target/ppc/mmu-hash32.h
index f0ce695..04c23ea 100644
--- a/target/ppc/mmu-hash32.h
+++ b/target/ppc/mmu-hash32.h
@@ -3,7 +3,8 @@
#ifndef CONFIG_USER_ONLY
-hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash);
+#include "system/memory.h"
+
bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
bool guest_visible);
@@ -102,48 +103,63 @@ static inline void ppc_hash32_store_hpte1(PowerPCCPU *cpu,
stl_phys(CPU(cpu)->as, base + pte_offset + HASH_PTE_SIZE_32 / 2, pte1);
}
-static inline int ppc_hash32_pp_prot(bool key, int pp, bool nx)
+static inline hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash)
+{
+ return (hash * HASH_PTEG_SIZE_32) & ppc_hash32_hpt_mask(cpu);
+}
+
+static inline bool ppc_hash32_key(bool pr, target_ulong sr)
+{
+ return pr ? (sr & SR32_KP) : (sr & SR32_KS);
+}
+
+static inline int ppc_hash32_prot(bool key, int pp, bool nx)
{
int prot;
- if (key == 0) {
+ if (key) {
switch (pp) {
case 0x0:
- case 0x1:
- case 0x2:
- prot = PAGE_READ | PAGE_WRITE;
+ prot = 0;
break;
-
+ case 0x1:
case 0x3:
prot = PAGE_READ;
break;
-
+ case 0x2:
+ prot = PAGE_READ | PAGE_WRITE;
+ break;
default:
- abort();
+ g_assert_not_reached();
}
} else {
switch (pp) {
case 0x0:
- prot = 0;
- break;
-
case 0x1:
- case 0x3:
- prot = PAGE_READ;
- break;
-
case 0x2:
prot = PAGE_READ | PAGE_WRITE;
break;
-
+ case 0x3:
+ prot = PAGE_READ;
+ break;
default:
- abort();
+ g_assert_not_reached();
}
}
- if (nx == 0) {
- prot |= PAGE_EXEC;
- }
+ return nx ? prot : prot | PAGE_EXEC;
+}
+static inline int ppc_hash32_bat_prot(target_ulong batu, target_ulong batl)
+{
+ int prot = 0;
+ int pp = batl & BATL32_PP;
+
+ if (pp) {
+ prot = PAGE_READ | PAGE_EXEC;
+ if (pp == 0x2) {
+ prot |= PAGE_WRITE;
+ }
+ }
return prot;
}
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
index cbc8efa..dd33755 100644
--- a/target/ppc/mmu-hash64.c
+++ b/target/ppc/mmu-hash64.c
@@ -20,17 +20,18 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "qemu/error-report.h"
#include "qemu/qemu-print.h"
-#include "sysemu/hw_accel.h"
+#include "system/hw_accel.h"
+#include "system/memory.h"
#include "kvm_ppc.h"
#include "mmu-hash64.h"
#include "exec/log.h"
#include "hw/hw.h"
#include "internal.h"
#include "mmu-book3s-v3.h"
+#include "mmu-books.h"
#include "helper_regs.h"
#ifdef CONFIG_TCG
@@ -508,6 +509,46 @@ static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
return prot;
}
+static hwaddr ppc_hash64_hpt_base(PowerPCCPU *cpu)
+{
+ uint64_t base;
+
+ if (cpu->vhyp) {
+ return 0;
+ }
+ if (cpu->env.mmu_model == POWERPC_MMU_3_00) {
+ ppc_v3_pate_t pate;
+
+ if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
+ return 0;
+ }
+ base = pate.dw0;
+ } else {
+ base = cpu->env.spr[SPR_SDR1];
+ }
+ return base & SDR_64_HTABORG;
+}
+
+static hwaddr ppc_hash64_hpt_mask(PowerPCCPU *cpu)
+{
+ uint64_t base;
+
+ if (cpu->vhyp) {
+ return cpu->vhyp_class->hpt_mask(cpu->vhyp);
+ }
+ if (cpu->env.mmu_model == POWERPC_MMU_3_00) {
+ ppc_v3_pate_t pate;
+
+ if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
+ return 0;
+ }
+ base = pate.dw0;
+ } else {
+ base = cpu->env.spr[SPR_SDR1];
+ }
+ return (1ULL << ((base & SDR_64_HTABSIZE) + 18 - 7)) - 1;
+}
+
const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
hwaddr ptex, int n)
{
@@ -545,6 +586,15 @@ void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
false, n * HASH_PTE_SIZE_64);
}
+bool ppc_hash64_valid_ptex(PowerPCCPU *cpu, target_ulong ptex)
+{
+ /* hash value/pteg group index is normalized by HPT mask */
+ if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) {
+ return false;
+ }
+ return true;
+}
+
static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps,
uint64_t pte0, uint64_t pte1)
{
@@ -943,6 +993,7 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
int exec_prot, pp_prot, amr_prot, prot;
int need_prot;
hwaddr raddr;
+ bool vrma = false;
/*
* Note on LPCR usage: 970 uses HID4, but our special variant of
@@ -972,6 +1023,7 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
}
} else if (ppc_hash64_use_vrma(env)) {
/* Emulated VRMA mode */
+ vrma = true;
slb = &vrma_slbe;
if (build_vrma_slbe(cpu, slb) != 0) {
/* Invalid VRMA setup, machine check */
@@ -1086,7 +1138,12 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte);
pp_prot = ppc_hash64_pte_prot(mmu_idx, slb, pte);
- amr_prot = ppc_hash64_amr_prot(cpu, pte);
+ if (vrma) {
+ /* VRMA does not check keys */
+ amr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ } else {
+ amr_prot = ppc_hash64_amr_prot(cpu, pte);
+ }
prot = exec_prot & pp_prot & amr_prot;
need_prot = check_prot_access_type(PAGE_RWX, access_type);
diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h
index de653fc..b8fb12a 100644
--- a/target/ppc/mmu-hash64.h
+++ b/target/ppc/mmu-hash64.h
@@ -1,6 +1,8 @@
#ifndef MMU_HASH64_H
#define MMU_HASH64_H
+#include "exec/tswap.h"
+
#ifndef CONFIG_USER_ONLY
#ifdef TARGET_PPC64
@@ -120,6 +122,7 @@ const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
hwaddr ptex, int n);
void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
hwaddr ptex, int n);
+bool ppc_hash64_valid_ptex(PowerPCCPU *cpu, target_ulong ptex);
static inline uint64_t ppc_hash64_hpte0(PowerPCCPU *cpu,
const ppc_hash_pte64_t *hptes, int i)
diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
index 5a02e49..33ac341 100644
--- a/target/ppc/mmu-radix64.c
+++ b/target/ppc/mmu-radix64.c
@@ -19,15 +19,47 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "qemu/error-report.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
+#include "system/memory.h"
#include "kvm_ppc.h"
#include "exec/log.h"
#include "internal.h"
#include "mmu-radix64.h"
#include "mmu-book3s-v3.h"
+#include "mmu-books.h"
+
+/* Radix Partition Table Entry Fields */
+#define PATE1_R_PRTB 0x0FFFFFFFFFFFF000
+#define PATE1_R_PRTS 0x000000000000001F
+
+/* Radix Process Table Entry Fields */
+#define PRTBE_R_GET_RTS(rts) \
+ ((((rts >> 58) & 0x18) | ((rts >> 5) & 0x7)) + 31)
+#define PRTBE_R_RPDB 0x0FFFFFFFFFFFFF00
+#define PRTBE_R_RPDS 0x000000000000001F
+
+/* Radix Page Directory/Table Entry Fields */
+#define R_PTE_VALID 0x8000000000000000
+#define R_PTE_LEAF 0x4000000000000000
+#define R_PTE_SW0 0x2000000000000000
+#define R_PTE_RPN 0x01FFFFFFFFFFF000
+#define R_PTE_SW1 0x0000000000000E00
+#define R_GET_SW(sw) (((sw >> 58) & 0x8) | ((sw >> 9) & 0x7))
+#define R_PTE_R 0x0000000000000100
+#define R_PTE_C 0x0000000000000080
+#define R_PTE_ATT 0x0000000000000030
+#define R_PTE_ATT_NORMAL 0x0000000000000000
+#define R_PTE_ATT_SAO 0x0000000000000010
+#define R_PTE_ATT_NI_IO 0x0000000000000020
+#define R_PTE_ATT_TOLERANT_IO 0x0000000000000030
+#define R_PTE_EAA_PRIV 0x0000000000000008
+#define R_PTE_EAA_R 0x0000000000000004
+#define R_PTE_EAA_RW 0x0000000000000002
+#define R_PTE_EAA_X 0x0000000000000001
+#define R_PDE_NLB PRTBE_R_RPDB
+#define R_PDE_NLS PRTBE_R_RPDS
static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
vaddr eaddr,
@@ -180,6 +212,24 @@ static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type,
}
}
+static int ppc_radix64_get_prot_eaa(uint64_t pte)
+{
+ return (pte & R_PTE_EAA_R ? PAGE_READ : 0) |
+ (pte & R_PTE_EAA_RW ? PAGE_READ | PAGE_WRITE : 0) |
+ (pte & R_PTE_EAA_X ? PAGE_EXEC : 0);
+}
+
+static int ppc_radix64_get_prot_amr(const PowerPCCPU *cpu)
+{
+ const CPUPPCState *env = &cpu->env;
+ int amr = env->spr[SPR_AMR] >> 62; /* We only care about key0 AMR63:62 */
+ int iamr = env->spr[SPR_IAMR] >> 62; /* We only care about key0 IAMR63:62 */
+
+ return (amr & 0x2 ? 0 : PAGE_WRITE) | /* Access denied if bit is set */
+ (amr & 0x1 ? 0 : PAGE_READ) |
+ (iamr & 0x1 ? 0 : PAGE_EXEC);
+}
+
static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type,
uint64_t pte, int *fault_cause, int *prot,
int mmu_idx, bool partition_scoped)
@@ -521,6 +571,20 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
prtbe0 = ldq_phys(cs->as, h_raddr);
}
+ /*
+ * Some Linux uses a zero process table entry in PID!=0 for kernel context
+ * without userspace in order to fault on NULL dereference, because using
+ * PIDR=0 for the kernel causes the Q0 page table to be used to translate
+ * Q3 as well. Check for that case here to avoid the invalid configuration
+ * message.
+ */
+ if (unlikely(!prtbe0)) {
+ if (guest_visible) {
+ ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG);
+ }
+ return 1;
+ }
+
/* Walk Radix Tree from Process Table Entry to Convert EA to RA */
*g_page_size = PRTBE_R_GET_RTS(prtbe0);
base_addr = prtbe0 & PRTBE_R_RPDB;
diff --git a/target/ppc/mmu-radix64.h b/target/ppc/mmu-radix64.h
index c5c04a1..6620b3d 100644
--- a/target/ppc/mmu-radix64.h
+++ b/target/ppc/mmu-radix64.h
@@ -3,7 +3,7 @@
#ifndef CONFIG_USER_ONLY
-#include "exec/page-protection.h"
+#ifdef TARGET_PPC64
/* Radix Quadrants */
#define R_EADDR_MASK 0x3FFFFFFFFFFFFFFF
@@ -14,61 +14,10 @@
#define R_EADDR_QUADRANT2 0x8000000000000000
#define R_EADDR_QUADRANT3 0xC000000000000000
-/* Radix Partition Table Entry Fields */
-#define PATE1_R_PRTB 0x0FFFFFFFFFFFF000
-#define PATE1_R_PRTS 0x000000000000001F
-
-/* Radix Process Table Entry Fields */
-#define PRTBE_R_GET_RTS(rts) \
- ((((rts >> 58) & 0x18) | ((rts >> 5) & 0x7)) + 31)
-#define PRTBE_R_RPDB 0x0FFFFFFFFFFFFF00
-#define PRTBE_R_RPDS 0x000000000000001F
-
-/* Radix Page Directory/Table Entry Fields */
-#define R_PTE_VALID 0x8000000000000000
-#define R_PTE_LEAF 0x4000000000000000
-#define R_PTE_SW0 0x2000000000000000
-#define R_PTE_RPN 0x01FFFFFFFFFFF000
-#define R_PTE_SW1 0x0000000000000E00
-#define R_GET_SW(sw) (((sw >> 58) & 0x8) | ((sw >> 9) & 0x7))
-#define R_PTE_R 0x0000000000000100
-#define R_PTE_C 0x0000000000000080
-#define R_PTE_ATT 0x0000000000000030
-#define R_PTE_ATT_NORMAL 0x0000000000000000
-#define R_PTE_ATT_SAO 0x0000000000000010
-#define R_PTE_ATT_NI_IO 0x0000000000000020
-#define R_PTE_ATT_TOLERANT_IO 0x0000000000000030
-#define R_PTE_EAA_PRIV 0x0000000000000008
-#define R_PTE_EAA_R 0x0000000000000004
-#define R_PTE_EAA_RW 0x0000000000000002
-#define R_PTE_EAA_X 0x0000000000000001
-#define R_PDE_NLB PRTBE_R_RPDB
-#define R_PDE_NLS PRTBE_R_RPDS
-
-#ifdef TARGET_PPC64
-
bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
hwaddr *raddr, int *psizep, int *protp, int mmu_idx,
bool guest_visible);
-static inline int ppc_radix64_get_prot_eaa(uint64_t pte)
-{
- return (pte & R_PTE_EAA_R ? PAGE_READ : 0) |
- (pte & R_PTE_EAA_RW ? PAGE_READ | PAGE_WRITE : 0) |
- (pte & R_PTE_EAA_X ? PAGE_EXEC : 0);
-}
-
-static inline int ppc_radix64_get_prot_amr(const PowerPCCPU *cpu)
-{
- const CPUPPCState *env = &cpu->env;
- int amr = env->spr[SPR_AMR] >> 62; /* We only care about key0 AMR63:62 */
- int iamr = env->spr[SPR_IAMR] >> 62; /* We only care about key0 IAMR63:62 */
-
- return (amr & 0x2 ? 0 : PAGE_WRITE) | /* Access denied if bit is set */
- (amr & 0x1 ? 0 : PAGE_READ) |
- (iamr & 0x1 ? 0 : PAGE_EXEC);
-}
-
#endif /* TARGET_PPC64 */
#endif /* CONFIG_USER_ONLY */
diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c
index e254269..52d4861 100644
--- a/target/ppc/mmu_common.c
+++ b/target/ppc/mmu_common.c
@@ -20,12 +20,12 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "cpu.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "kvm_ppc.h"
#include "mmu-hash64.h"
#include "mmu-hash32.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
#include "exec/log.h"
#include "helper_regs.h"
#include "qemu/error-report.h"
@@ -37,17 +37,6 @@
/* #define DUMP_PAGE_TABLES */
-/* Context used internally during MMU translations */
-typedef struct {
- hwaddr raddr; /* Real address */
- hwaddr eaddr; /* Effective address */
- int prot; /* Protection bits */
- hwaddr hash[2]; /* Pagetable hash values */
- target_ulong ptem; /* Virtual segment ID | API */
- int key; /* Access key */
- int nx; /* Non-execute area */
-} mmu_ctx_t;
-
void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
{
PowerPCCPU *cpu = env_archcpu(env);
@@ -94,86 +83,23 @@ int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr,
return nr;
}
-static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
- target_ulong pte1, int h,
- MMUAccessType access_type)
-{
- target_ulong ptem, mmask;
- int ret, pteh, ptev, pp;
-
- ret = -1;
- /* Check validity and table match */
- ptev = pte_is_valid(pte0);
- pteh = (pte0 >> 6) & 1;
- if (ptev && h == pteh) {
- /* Check vsid & api */
- ptem = pte0 & PTE_PTEM_MASK;
- mmask = PTE_CHECK_MASK;
- pp = pte1 & 0x00000003;
- if (ptem == ctx->ptem) {
- if (ctx->raddr != (hwaddr)-1ULL) {
- /* all matches should have equal RPN, WIMG & PP */
- if ((ctx->raddr & mmask) != (pte1 & mmask)) {
- qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n");
- return -3;
- }
- }
- /* Keep the matching PTE information */
- ctx->raddr = pte1;
- ctx->prot = ppc_hash32_pp_prot(ctx->key, pp, ctx->nx);
- if (check_prot_access_type(ctx->prot, access_type)) {
- /* Access granted */
- qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
- ret = 0;
- } else {
- /* Access right violation */
- qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
- ret = -2;
- }
- }
- }
-
- return ret;
-}
-
-static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p,
- int ret, MMUAccessType access_type)
-{
- int store = 0;
-
- /* Update page flags */
- if (!(*pte1p & 0x00000100)) {
- /* Update accessed flag */
- *pte1p |= 0x00000100;
- store = 1;
- }
- if (!(*pte1p & 0x00000080)) {
- if (access_type == MMU_DATA_STORE && ret == 0) {
- /* Update changed flag */
- *pte1p |= 0x00000080;
- store = 1;
- } else {
- /* Force page fault for first write access */
- ctx->prot &= ~PAGE_WRITE;
- }
- }
-
- return store;
-}
-
/* Software driven TLB helpers */
-static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
- target_ulong eaddr, MMUAccessType access_type)
+static int ppc6xx_tlb_check(CPUPPCState *env, hwaddr *raddr, int *prot,
+ target_ulong eaddr, MMUAccessType access_type,
+ target_ulong ptem, bool key, bool nx)
{
ppc6xx_tlb_t *tlb;
- int nr, best, way;
- int ret;
+ target_ulong *pte1p;
+ int nr, best, way, ret;
+ bool is_code = (access_type == MMU_INST_FETCH);
+ /* Initialize real address with an invalid value */
+ *raddr = (hwaddr)-1ULL;
best = -1;
ret = -1; /* No TLB found */
for (way = 0; way < env->nb_ways; way++) {
- nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH);
+ nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
tlb = &env->tlb.tlb6[nr];
/* This test "emulates" the PTE index match for hardware TLBs */
if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
@@ -191,37 +117,51 @@ static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
tlb->EPN, eaddr, tlb->pte1,
access_type == MMU_DATA_STORE ? 'S' : 'L',
access_type == MMU_INST_FETCH ? 'I' : 'D');
- switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1,
- 0, access_type)) {
- case -2:
- /* Access violation */
- ret = -2;
- best = nr;
- break;
- case -1: /* No match */
- case -3: /* TLB inconsistency */
- default:
- break;
- case 0:
- /* access granted */
- /*
- * XXX: we should go on looping to check all TLBs
- * consistency but we can speed-up the whole thing as
- * the result would be undefined if TLBs are not
- * consistent.
- */
+ /* Check validity and table match */
+ if (!pte_is_valid(tlb->pte0) || ((tlb->pte0 >> 6) & 1) != 0 ||
+ (tlb->pte0 & PTE_PTEM_MASK) != ptem) {
+ continue;
+ }
+ /* all matches should have equal RPN, WIMG & PP */
+ if (*raddr != (hwaddr)-1ULL &&
+ (*raddr & PTE_CHECK_MASK) != (tlb->pte1 & PTE_CHECK_MASK)) {
+ qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n");
+ /* TLB inconsistency */
+ continue;
+ }
+ /* Keep the matching PTE information */
+ best = nr;
+ *raddr = tlb->pte1;
+ *prot = ppc_hash32_prot(key, tlb->pte1 & HPTE32_R_PP, nx);
+ if (check_prot_access_type(*prot, access_type)) {
+ qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
ret = 0;
- best = nr;
- goto done;
+ break;
+ } else {
+ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
+ ret = -2;
}
}
if (best != -1) {
-done:
qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " HWADDR_FMT_plx
" prot=%01x ret=%d\n",
- ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
+ *raddr & TARGET_PAGE_MASK, *prot, ret);
/* Update page flags */
- pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type);
+ pte1p = &env->tlb.tlb6[best].pte1;
+ *pte1p |= 0x00000100; /* Update accessed flag */
+ if (!(*pte1p & 0x00000080)) {
+ if (access_type == MMU_DATA_STORE && ret == 0) {
+ /* Update changed flag */
+ *pte1p |= 0x00000080;
+ } else {
+ /* Force page fault for first write access */
+ *prot &= ~PAGE_WRITE;
+ }
+ }
+ }
+ if (ret == -1) {
+ int r = is_code ? SPR_ICMP : SPR_DCMP;
+ env->spr[r] = ptem;
}
#if defined(DUMP_PAGE_TABLES)
if (qemu_loglevel_mask(CPU_LOG_MMU)) {
@@ -247,44 +187,17 @@ done:
return ret;
}
-/* Perform BAT hit & translation */
-static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp,
- int *validp, int *protp, target_ulong *BATu,
- target_ulong *BATl)
-{
- target_ulong bl;
- int pp, valid, prot;
-
- bl = (*BATu & 0x00001FFC) << 15;
- valid = 0;
- prot = 0;
- if ((!FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000002)) ||
- (FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000001))) {
- valid = 1;
- pp = *BATl & 0x00000003;
- if (pp != 0) {
- prot = PAGE_READ | PAGE_EXEC;
- if (pp == 0x2) {
- prot |= PAGE_WRITE;
- }
- }
- }
- *blp = bl;
- *validp = valid;
- *protp = prot;
-}
-
-static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
- target_ulong virtual, MMUAccessType access_type)
+static int get_bat_6xx_tlb(CPUPPCState *env, hwaddr *raddr, int *prot,
+ target_ulong eaddr, MMUAccessType access_type,
+ bool pr)
{
target_ulong *BATlt, *BATut, *BATu, *BATl;
target_ulong BEPIl, BEPIu, bl;
- int i, valid, prot;
- int ret = -1;
+ int i, ret = -1;
bool ifetch = access_type == MMU_INST_FETCH;
qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
- ifetch ? 'I' : 'D', virtual);
+ ifetch ? 'I' : 'D', eaddr);
if (ifetch) {
BATlt = env->IBAT[1];
BATut = env->IBAT[0];
@@ -295,27 +208,26 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
for (i = 0; i < env->nb_BATs; i++) {
BATu = &BATut[i];
BATl = &BATlt[i];
- BEPIu = *BATu & 0xF0000000;
- BEPIl = *BATu & 0x0FFE0000;
- bat_size_prot(env, &bl, &valid, &prot, BATu, BATl);
+ BEPIu = *BATu & BATU32_BEPIU;
+ BEPIl = *BATu & BATU32_BEPIL;
qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx " BATu "
TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__,
- ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl);
- if ((virtual & 0xF0000000) == BEPIu &&
- ((virtual & 0x0FFE0000) & ~bl) == BEPIl) {
- /* BAT matches */
- if (valid != 0) {
+ ifetch ? 'I' : 'D', i, eaddr, *BATu, *BATl);
+ bl = (*BATu & BATU32_BL) << 15;
+ if ((!pr && (*BATu & BATU32_VS)) || (pr && (*BATu & BATU32_VP))) {
+ if ((eaddr & BATU32_BEPIU) == BEPIu &&
+ ((eaddr & BATU32_BEPIL) & ~bl) == BEPIl) {
/* Get physical address */
- ctx->raddr = (*BATl & 0xF0000000) |
- ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) |
- (virtual & 0x0001F000);
+ *raddr = (*BATl & BATU32_BEPIU) |
+ ((eaddr & BATU32_BEPIL & bl) | (*BATl & BATU32_BEPIL)) |
+ (eaddr & 0x0001F000);
/* Compute access rights */
- ctx->prot = prot;
- if (check_prot_access_type(ctx->prot, access_type)) {
+ *prot = ppc_hash32_bat_prot(*BATu, *BATl);
+ if (check_prot_access_type(*prot, access_type)) {
qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " HWADDR_FMT_plx
- " prot=%c%c\n", i, ctx->raddr,
- ctx->prot & PAGE_READ ? 'R' : '-',
- ctx->prot & PAGE_WRITE ? 'W' : '-');
+ " prot=%c%c\n", i, *raddr,
+ *prot & PAGE_READ ? 'R' : '-',
+ *prot & PAGE_WRITE ? 'W' : '-');
ret = 0;
} else {
ret = -2;
@@ -327,18 +239,18 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
if (ret < 0) {
if (qemu_log_enabled()) {
qemu_log_mask(CPU_LOG_MMU, "no BAT match for "
- TARGET_FMT_lx ":\n", virtual);
+ TARGET_FMT_lx ":\n", eaddr);
for (i = 0; i < 4; i++) {
BATu = &BATut[i];
BATl = &BATlt[i];
- BEPIu = *BATu & 0xF0000000;
- BEPIl = *BATu & 0x0FFE0000;
- bl = (*BATu & 0x00001FFC) << 15;
+ BEPIu = *BATu & BATU32_BEPIU;
+ BEPIl = *BATu & BATU32_BEPIL;
+ bl = (*BATu & BATU32_BL) << 15;
qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx " " TARGET_FMT_lx " "
TARGET_FMT_lx "\n", __func__, ifetch ? 'I' : 'D',
- i, virtual, *BATu, *BATl, BEPIu, BEPIl, bl);
+ i, eaddr, *BATu, *BATl, BEPIu, BEPIl, bl);
}
}
}
@@ -346,32 +258,30 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
return ret;
}
-static int mmu6xx_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
- target_ulong eaddr,
+static int mmu6xx_get_physical_address(CPUPPCState *env, hwaddr *raddr,
+ int *prot, target_ulong eaddr,
+ hwaddr *hashp, bool *keyp,
MMUAccessType access_type, int type)
{
PowerPCCPU *cpu = env_archcpu(env);
hwaddr hash;
- target_ulong vsid, sr, pgidx;
- int ds, target_page_bits;
- bool pr;
+ target_ulong vsid, sr, pgidx, ptem;
+ bool key, ds, nx;
+ bool pr = FIELD_EX64(env->msr, MSR, PR);
/* First try to find a BAT entry if there are any */
- if (env->nb_BATs && get_bat_6xx_tlb(env, ctx, eaddr, access_type) == 0) {
+ if (env->nb_BATs &&
+ get_bat_6xx_tlb(env, raddr, prot, eaddr, access_type, pr) == 0) {
return 0;
}
/* Perform segment based translation when no BATs matched */
- pr = FIELD_EX64(env->msr, MSR, PR);
- ctx->eaddr = eaddr;
-
sr = env->sr[eaddr >> 28];
- ctx->key = (((sr & 0x20000000) && pr) ||
- ((sr & 0x40000000) && !pr)) ? 1 : 0;
- ds = sr & 0x80000000 ? 1 : 0;
- ctx->nx = sr & 0x10000000 ? 1 : 0;
- vsid = sr & 0x00FFFFFF;
- target_page_bits = TARGET_PAGE_BITS;
+ key = ppc_hash32_key(pr, sr);
+ *keyp = key;
+ ds = sr & SR32_T;
+ nx = sr & SR32_NX;
+ vsid = sr & SR32_VSID;
qemu_log_mask(CPU_LOG_MMU,
"Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx
" nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx
@@ -380,15 +290,15 @@ static int mmu6xx_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
(int)FIELD_EX64(env->msr, MSR, IR),
(int)FIELD_EX64(env->msr, MSR, DR), pr ? 1 : 0,
access_type == MMU_DATA_STORE, type);
- pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits;
+ pgidx = (eaddr & ~SEGMENT_MASK_256M) >> TARGET_PAGE_BITS;
hash = vsid ^ pgidx;
- ctx->ptem = (vsid << 7) | (pgidx >> 10);
+ ptem = (vsid << 7) | (pgidx >> 10); /* Virtual segment ID | API */
qemu_log_mask(CPU_LOG_MMU, "pte segment: key=%d ds %d nx %d vsid "
- TARGET_FMT_lx "\n", ctx->key, ds, ctx->nx, vsid);
+ TARGET_FMT_lx "\n", key, ds, nx, vsid);
if (!ds) {
/* Check if instruction fetch is allowed, if needed */
- if (type == ACCESS_CODE && ctx->nx) {
+ if (type == ACCESS_CODE && nx) {
qemu_log_mask(CPU_LOG_MMU, "No access allowed\n");
return -3;
}
@@ -396,13 +306,11 @@ static int mmu6xx_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
qemu_log_mask(CPU_LOG_MMU, "htab_base " HWADDR_FMT_plx " htab_mask "
HWADDR_FMT_plx " hash " HWADDR_FMT_plx "\n",
ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
- ctx->hash[0] = hash;
- ctx->hash[1] = ~hash;
+ *hashp = hash;
- /* Initialize real address with an invalid value */
- ctx->raddr = (hwaddr)-1ULL;
/* Software TLB search */
- return ppc6xx_tlb_check(env, ctx, eaddr, access_type);
+ return ppc6xx_tlb_check(env, raddr, prot, eaddr,
+ access_type, ptem, key, nx);
}
/* Direct-store segment : absolutely *BUGGY* for now */
@@ -411,15 +319,6 @@ static int mmu6xx_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
case ACCESS_INT:
/* Integer load/store : only access allowed */
break;
- case ACCESS_CODE:
- /* No code fetch is allowed in direct-store areas */
- return -4;
- case ACCESS_FLOAT:
- /* Floating point load/store */
- return -4;
- case ACCESS_RES:
- /* lwarx, ldarx or srwcx. */
- return -4;
case ACCESS_CACHE:
/*
* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
@@ -427,19 +326,17 @@ static int mmu6xx_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
* Should make the instruction do no-op. As it already do
* no-op, it's quite easy :-)
*/
- ctx->raddr = eaddr;
+ *raddr = eaddr;
return 0;
- case ACCESS_EXT:
- /* eciwx or ecowx */
- return -4;
- default:
- qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need address"
- " translation\n");
+ case ACCESS_CODE: /* No code fetch is allowed in direct-store areas */
+ case ACCESS_FLOAT: /* Floating point load/store */
+ case ACCESS_RES: /* lwarx, ldarx or srwcx. */
+ case ACCESS_EXT: /* eciwx or ecowx */
return -4;
}
- if ((access_type == MMU_DATA_STORE || ctx->key != 1) &&
- (access_type == MMU_DATA_LOAD || ctx->key != 0)) {
- ctx->raddr = eaddr;
+ if ((access_type == MMU_DATA_STORE || !key) &&
+ (access_type == MMU_DATA_LOAD || key)) {
+ *raddr = eaddr;
return 2;
}
return -2;
@@ -589,9 +486,9 @@ static void mmu6xx_dump_BATs(CPUPPCState *env, int type)
for (i = 0; i < env->nb_BATs; i++) {
BATu = &BATut[i];
BATl = &BATlt[i];
- BEPIu = *BATu & 0xF0000000;
- BEPIl = *BATu & 0x0FFE0000;
- bl = (*BATu & 0x00001FFC) << 15;
+ BEPIu = *BATu & BATU32_BEPIU;
+ BEPIl = *BATu & BATU32_BEPIL;
+ bl = (*BATu & BATU32_BL) << 15;
qemu_printf("%s BAT%d BATu " TARGET_FMT_lx
" BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
TARGET_FMT_lx " " TARGET_FMT_lx "\n",
@@ -777,9 +674,9 @@ static bool ppc_6xx_xlate(PowerPCCPU *cpu, vaddr eaddr,
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- mmu_ctx_t ctx;
- int type;
- int ret;
+ hwaddr hash = 0; /* init to 0 to avoid used uninit warning */
+ bool key;
+ int type, ret;
if (ppc_real_mode_xlate(cpu, eaddr, access_type, raddrp, psizep, protp)) {
return true;
@@ -795,13 +692,9 @@ static bool ppc_6xx_xlate(PowerPCCPU *cpu, vaddr eaddr,
type = ACCESS_INT;
}
- ctx.prot = 0;
- ctx.hash[0] = 0;
- ctx.hash[1] = 0;
- ret = mmu6xx_get_physical_address(env, &ctx, eaddr, access_type, type);
+ ret = mmu6xx_get_physical_address(env, raddrp, protp, eaddr, &hash, &key,
+ access_type, type);
if (ret == 0) {
- *raddrp = ctx.raddr;
- *protp = ctx.prot;
*psizep = TARGET_PAGE_BITS;
return true;
} else if (!guest_visible) {
@@ -816,7 +709,7 @@ static bool ppc_6xx_xlate(PowerPCCPU *cpu, vaddr eaddr,
cs->exception_index = POWERPC_EXCP_IFTLB;
env->error_code = 1 << 18;
env->spr[SPR_IMISS] = eaddr;
- env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem;
+ env->spr[SPR_ICMP] |= 0x80000000;
goto tlb_miss;
case -2:
/* Access rights violation */
@@ -847,13 +740,13 @@ static bool ppc_6xx_xlate(PowerPCCPU *cpu, vaddr eaddr,
env->error_code = 0;
}
env->spr[SPR_DMISS] = eaddr;
- env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem;
+ env->spr[SPR_DCMP] |= 0x80000000;
tlb_miss:
- env->error_code |= ctx.key << 19;
+ env->error_code |= key << 19;
env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) +
- get_pteg_offset32(cpu, ctx.hash[0]);
+ get_pteg_offset32(cpu, hash);
env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) +
- get_pteg_offset32(cpu, ctx.hash[1]);
+ get_pteg_offset32(cpu, ~hash);
break;
case -2:
/* Access rights violation */
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index b0a0676..ac60705 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -20,12 +20,13 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "cpu.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "kvm_ppc.h"
#include "mmu-hash64.h"
#include "mmu-hash32.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
#include "exec/log.h"
#include "helper_regs.h"
#include "qemu/error-report.h"
@@ -35,7 +36,7 @@
#include "mmu-radix64.h"
#include "mmu-booke.h"
#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
/* #define FLUSH_ALL_TLBS */
@@ -316,7 +317,7 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
break;
default:
/* Should never reach here with other MMU models */
- assert(0);
+ g_assert_not_reached();
}
#else
ppc_tlb_invalidate_all(env);
diff --git a/target/ppc/power8-pmu.c b/target/ppc/power8-pmu.c
index db9ee8e..2a7a5b4 100644
--- a/target/ppc/power8-pmu.c
+++ b/target/ppc/power8-pmu.c
@@ -13,7 +13,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "helper_regs.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/error-report.h"
#include "qemu/timer.h"
diff --git a/target/ppc/ppc-qmp-cmds.c b/target/ppc/ppc-qmp-cmds.c
index a25d86a..7022564 100644
--- a/target/ppc/ppc-qmp-cmds.c
+++ b/target/ppc/ppc-qmp-cmds.c
@@ -28,7 +28,8 @@
#include "qemu/ctype.h"
#include "monitor/hmp-target.h"
#include "monitor/hmp.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-machine.h"
#include "cpu-models.h"
#include "cpu-qom.h"
@@ -175,6 +176,15 @@ int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval)
return -EINVAL;
}
+CpuModelExpansionInfo *
+qmp_query_cpu_model_expansion(CpuModelExpansionType type,
+ CpuModelInfo *model,
+ Error **errp)
+{
+ error_setg(errp, "CPU model expansion is not supported on this target");
+ return NULL;
+}
+
static void ppc_cpu_defs_entry(gpointer data, gpointer user_data)
{
ObjectClass *oc = data;
diff --git a/target/ppc/spr_common.h b/target/ppc/spr_common.h
index 01aff44..84c910c 100644
--- a/target/ppc/spr_common.h
+++ b/target/ppc/spr_common.h
@@ -165,6 +165,8 @@ void spr_write_cfar(DisasContext *ctx, int sprn, int gprn);
void spr_write_ciabr(DisasContext *ctx, int sprn, int gprn);
void spr_write_dawr0(DisasContext *ctx, int sprn, int gprn);
void spr_write_dawrx0(DisasContext *ctx, int sprn, int gprn);
+void spr_write_dawr1(DisasContext *ctx, int sprn, int gprn);
+void spr_write_dawrx1(DisasContext *ctx, int sprn, int gprn);
void spr_write_ureg(DisasContext *ctx, int sprn, int gprn);
void spr_read_purr(DisasContext *ctx, int gprn, int sprn);
void spr_write_purr(DisasContext *ctx, int sprn, int gprn);
@@ -204,6 +206,8 @@ void spr_write_hmer(DisasContext *ctx, int sprn, int gprn);
void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn);
void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn);
void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn);
+void spr_read_pmsr(DisasContext *ctx, int gprn, int sprn);
+void spr_write_pmcr(DisasContext *ctx, int sprn, int gprn);
void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn);
void spr_read_ppr32(DisasContext *ctx, int sprn, int gprn);
void spr_write_ppr32(DisasContext *ctx, int sprn, int gprn);
diff --git a/target/ppc/tcg-excp_helper.c b/target/ppc/tcg-excp_helper.c
new file mode 100644
index 0000000..f835be5
--- /dev/null
+++ b/target/ppc/tcg-excp_helper.c
@@ -0,0 +1,851 @@
+/*
+ * PowerPC exception emulation helpers for QEMU (TCG specific)
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "qemu/log.h"
+#include "target/ppc/cpu.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "exec/helper-proto.h"
+#include "system/runstate.h"
+
+#include "helper_regs.h"
+#include "hw/ppc/ppc.h"
+#include "internal.h"
+#include "cpu.h"
+#include "trace.h"
+
+/*****************************************************************************/
+/* Exceptions processing helpers */
+
+void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code, uintptr_t raddr)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = exception;
+ env->error_code = error_code;
+ cpu_loop_exit_restore(cs, raddr);
+}
+
+void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code)
+{
+ raise_exception_err_ra(env, exception, error_code, 0);
+}
+
+void helper_raise_exception(CPUPPCState *env, uint32_t exception)
+{
+ raise_exception_err_ra(env, exception, 0, 0);
+}
+
+#ifndef CONFIG_USER_ONLY
+
+static G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code)
+{
+ raise_exception_err_ra(env, exception, error_code, 0);
+}
+
+static G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception)
+{
+ raise_exception_err_ra(env, exception, 0, 0);
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+void helper_TW(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
+ uint32_t flags)
+{
+ if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
+ ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
+ ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
+ ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
+ ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_TRAP, GETPC());
+ }
+}
+
+#ifdef TARGET_PPC64
+void helper_TD(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
+ uint32_t flags)
+{
+ if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
+ ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
+ ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
+ ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
+ ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_TRAP, GETPC());
+ }
+}
+#endif /* TARGET_PPC64 */
+
+static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane)
+{
+ const uint16_t c = 0xfffc;
+ const uint64_t z0 = 0xfa2561cdf44ac398ULL;
+ uint16_t z = 0, temp;
+ uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32];
+
+ for (int i = 3; i >= 0; i--) {
+ k[i] = key & 0xffff;
+ key >>= 16;
+ }
+ xleft[0] = x & 0xffff;
+ xright[0] = (x >> 16) & 0xffff;
+
+ for (int i = 0; i < 28; i++) {
+ z = (z0 >> (63 - i)) & 1;
+ temp = ror16(k[i + 3], 3) ^ k[i + 1];
+ k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1);
+ }
+
+ for (int i = 0; i < 8; i++) {
+ eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)];
+ eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)];
+ eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)];
+ eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)];
+ }
+
+ for (int i = 0; i < 32; i++) {
+ fxleft[i] = (rol16(xleft[i], 1) &
+ rol16(xleft[i], 8)) ^ rol16(xleft[i], 2);
+ xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i];
+ xright[i + 1] = xleft[i];
+ }
+
+ return (((uint32_t)xright[32]) << 16) | xleft[32];
+}
+
+static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key)
+{
+ uint64_t stage0_h = 0ULL, stage0_l = 0ULL;
+ uint64_t stage1_h, stage1_l;
+
+ for (int i = 0; i < 4; i++) {
+ stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1));
+ stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i);
+ stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1));
+ stage0_l |= (ra & 0xff) << (8 * 2 * i);
+ rb >>= 8;
+ ra >>= 8;
+ }
+
+ stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32;
+ stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1);
+ stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32;
+ stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3);
+
+ return stage1_h ^ stage1_l;
+}
+
+static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra,
+ target_ulong rb, uint64_t key, bool store)
+{
+ uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash;
+
+ if (store) {
+ cpu_stq_data_ra(env, ea, calculated_hash, GETPC());
+ } else {
+ loaded_hash = cpu_ldq_data_ra(env, ea, GETPC());
+ if (loaded_hash != calculated_hash) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_TRAP, GETPC());
+ }
+ }
+}
+
+#include "qemu/guest-random.h"
+
+#ifdef TARGET_PPC64
+#define HELPER_HASH(op, key, store, dexcr_aspect) \
+void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
+ target_ulong rb) \
+{ \
+ if (env->msr & R_MSR_PR_MASK) { \
+ if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \
+ env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
+ return; \
+ } else if (!(env->msr & R_MSR_HV_MASK)) { \
+ if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \
+ env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
+ return; \
+ } else if (!(env->msr & R_MSR_S_MASK)) { \
+ if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \
+ return; \
+ } \
+ \
+ do_hash(env, ea, ra, rb, key, store); \
+}
+#else
+#define HELPER_HASH(op, key, store, dexcr_aspect) \
+void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
+ target_ulong rb) \
+{ \
+ do_hash(env, ea, ra, rb, key, store); \
+}
+#endif /* TARGET_PPC64 */
+
+HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE)
+HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE)
+HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE)
+HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE)
+
+#ifndef CONFIG_USER_ONLY
+
+void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ CPUPPCState *env = cpu_env(cs);
+ uint32_t insn;
+
+ /* Restore state and reload the insn we executed, for filling in DSISR. */
+ cpu_restore_state(cs, retaddr);
+ insn = ppc_ldl_code(env, env->nip);
+
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_4xx:
+ env->spr[SPR_40x_DEAR] = vaddr;
+ break;
+ case POWERPC_MMU_BOOKE:
+ case POWERPC_MMU_BOOKE206:
+ env->spr[SPR_BOOKE_DEAR] = vaddr;
+ break;
+ default:
+ env->spr[SPR_DAR] = vaddr;
+ break;
+ }
+
+ cs->exception_index = POWERPC_EXCP_ALIGN;
+ env->error_code = insn & 0x03FF0000;
+ cpu_loop_exit(cs);
+}
+
+void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr vaddr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+ CPUPPCState *env = cpu_env(cs);
+
+ switch (env->excp_model) {
+#if defined(TARGET_PPC64)
+ case POWERPC_EXCP_POWER8:
+ case POWERPC_EXCP_POWER9:
+ case POWERPC_EXCP_POWER10:
+ case POWERPC_EXCP_POWER11:
+ /*
+ * Machine check codes can be found in processor User Manual or
+ * Linux or skiboot source.
+ */
+ if (access_type == MMU_DATA_LOAD) {
+ env->spr[SPR_DAR] = vaddr;
+ env->spr[SPR_DSISR] = PPC_BIT(57);
+ env->error_code = PPC_BIT(42);
+
+ } else if (access_type == MMU_DATA_STORE) {
+ /*
+ * MCE for stores in POWER is asynchronous so hardware does
+ * not set DAR, but QEMU can do better.
+ */
+ env->spr[SPR_DAR] = vaddr;
+ env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45);
+ env->error_code |= PPC_BIT(42);
+
+ } else { /* Fetch */
+ /*
+ * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching
+ * the instruction, so that must always be clear for fetches.
+ */
+ env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45);
+ }
+ break;
+#endif
+ default:
+ /*
+ * TODO: Check behaviour for other CPUs, for now do nothing.
+ * Could add a basic MCE even if real hardware ignores.
+ */
+ return;
+ }
+
+ cs->exception_index = POWERPC_EXCP_MCHECK;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+void ppc_cpu_debug_excp_handler(CPUState *cs)
+{
+#if defined(TARGET_PPC64)
+ CPUPPCState *env = cpu_env(cs);
+
+ if (env->insns_flags2 & PPC2_ISA207S) {
+ if (cs->watchpoint_hit) {
+ if (cs->watchpoint_hit->flags & BP_CPU) {
+ env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr;
+ env->spr[SPR_DSISR] = PPC_BIT(41);
+ cs->watchpoint_hit = NULL;
+ raise_exception(env, POWERPC_EXCP_DSI);
+ }
+ cs->watchpoint_hit = NULL;
+ } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) {
+ raise_exception_err(env, POWERPC_EXCP_TRACE,
+ PPC_BIT(33) | PPC_BIT(43));
+ }
+ }
+#endif
+}
+
+bool ppc_cpu_debug_check_breakpoint(CPUState *cs)
+{
+#if defined(TARGET_PPC64)
+ CPUPPCState *env = cpu_env(cs);
+
+ if (env->insns_flags2 & PPC2_ISA207S) {
+ target_ulong priv;
+
+ priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63);
+ switch (priv) {
+ case 0x1: /* problem */
+ return env->msr & ((target_ulong)1 << MSR_PR);
+ case 0x2: /* supervisor */
+ return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
+ !(env->msr & ((target_ulong)1 << MSR_HV)));
+ case 0x3: /* hypervisor */
+ return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
+ (env->msr & ((target_ulong)1 << MSR_HV)));
+ default:
+ g_assert_not_reached();
+ }
+ }
+#endif
+
+ return false;
+}
+
+bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
+{
+#if defined(TARGET_PPC64)
+ CPUPPCState *env = cpu_env(cs);
+ bool wt, wti, hv, sv, pr;
+ uint32_t dawrx;
+
+ if ((env->insns_flags2 & PPC2_ISA207S) &&
+ (wp == env->dawr_watchpoint[0])) {
+ dawrx = env->spr[SPR_DAWRX0];
+ } else if ((env->insns_flags2 & PPC2_ISA310) &&
+ (wp == env->dawr_watchpoint[1])) {
+ dawrx = env->spr[SPR_DAWRX1];
+ } else {
+ return false;
+ }
+
+ wt = extract32(dawrx, PPC_BIT_NR(59), 1);
+ wti = extract32(dawrx, PPC_BIT_NR(60), 1);
+ hv = extract32(dawrx, PPC_BIT_NR(61), 1);
+ sv = extract32(dawrx, PPC_BIT_NR(62), 1);
+ pr = extract32(dawrx, PPC_BIT_NR(62), 1);
+
+ if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) {
+ return false;
+ } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) {
+ return false;
+ } else if (!sv) {
+ return false;
+ }
+
+ if (!wti) {
+ if (env->msr & ((target_ulong)1 << MSR_DR)) {
+ return wt;
+ } else {
+ return !wt;
+ }
+ }
+
+ return true;
+#endif
+
+ return false;
+}
+
+/*
+ * This stops the machine and logs CPU state without killing QEMU (like
+ * cpu_abort()) because it is often a guest error as opposed to a QEMU error,
+ * so the machine can still be debugged.
+ */
+G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason)
+{
+ CPUState *cs = env_cpu(env);
+ FILE *f;
+
+ f = qemu_log_trylock();
+ if (f) {
+ fprintf(f, "Entering checkstop state: %s\n", reason);
+ cpu_dump_state(cs, f, CPU_DUMP_FPU | CPU_DUMP_CCOP);
+ qemu_log_unlock(f);
+ }
+
+ /*
+ * This stops the machine and logs CPU state without killing QEMU
+ * (like cpu_abort()) so the machine can still be debugged (because
+ * it is often a guest error).
+ */
+ qemu_system_guest_panicked(NULL);
+ cpu_loop_exit_noexc(cs);
+}
+
+/* Return true iff byteswap is needed to load instruction */
+static inline bool insn_need_byteswap(CPUArchState *env)
+{
+ /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */
+ return !!(env->msr & ((target_ulong)1 << MSR_LE));
+}
+
+uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr)
+{
+ uint32_t insn = cpu_ldl_code(env, addr);
+
+ if (insn_need_byteswap(env)) {
+ insn = bswap32(insn);
+ }
+
+ return insn;
+}
+
+#if defined(TARGET_PPC64)
+void helper_attn(CPUPPCState *env)
+{
+ /* POWER attn is unprivileged when enabled by HID, otherwise illegal */
+ if ((*env->check_attn)(env)) {
+ powerpc_checkstop(env, "host executed attn");
+ } else {
+ raise_exception_err(env, POWERPC_EXCP_HV_EMU,
+ POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
+ }
+}
+
+void helper_scv(CPUPPCState *env, uint32_t lev)
+{
+ if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
+ raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
+ } else {
+ raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
+ }
+}
+
+void helper_pminsn(CPUPPCState *env, uint32_t insn)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->halted = 1;
+
+ /* Condition for waking up at 0x100 */
+ env->resume_as_sreset = (insn != PPC_PM_STOP) ||
+ (env->spr[SPR_PSSCR] & PSSCR_EC);
+
+ /* HDECR is not to wake from PM state, it may have already fired */
+ if (env->resume_as_sreset) {
+ PowerPCCPU *cpu = env_archcpu(env);
+ ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
+ }
+
+ ppc_maybe_interrupt(env);
+}
+
+#endif /* TARGET_PPC64 */
+void helper_store_msr(CPUPPCState *env, target_ulong val)
+{
+ uint32_t excp = hreg_store_msr(env, val, 0);
+
+ if (excp != 0) {
+ cpu_interrupt_exittb(env_cpu(env));
+ raise_exception(env, excp);
+ }
+}
+
+void helper_ppc_maybe_interrupt(CPUPPCState *env)
+{
+ ppc_maybe_interrupt(env);
+}
+
+static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
+{
+ /* MSR:POW cannot be set by any form of rfi */
+ msr &= ~(1ULL << MSR_POW);
+
+ /* MSR:TGPR cannot be set by any form of rfi */
+ if (env->flags & POWERPC_FLAG_TGPR) {
+ msr &= ~(1ULL << MSR_TGPR);
+ }
+
+#ifdef TARGET_PPC64
+ /* Switching to 32-bit ? Crop the nip */
+ if (!msr_is_64bit(env, msr)) {
+ nip = (uint32_t)nip;
+ }
+#else
+ nip = (uint32_t)nip;
+#endif
+ /* XXX: beware: this is false if VLE is supported */
+ env->nip = nip & ~((target_ulong)0x00000003);
+ hreg_store_msr(env, msr, 1);
+ trace_ppc_excp_rfi(env->nip, env->msr);
+ /*
+ * No need to raise an exception here, as rfi is always the last
+ * insn of a TB
+ */
+ cpu_interrupt_exittb(env_cpu(env));
+ /* Reset the reservation */
+ env->reserve_addr = -1;
+
+ /* Context synchronizing: check if TCG TLB needs flush */
+ check_tlb_flush(env, false);
+}
+
+void helper_rfi(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
+}
+
+#ifdef TARGET_PPC64
+void helper_rfid(CPUPPCState *env)
+{
+ /*
+ * The architecture defines a number of rules for which bits can
+ * change but in practice, we handle this in hreg_store_msr()
+ * which will be called by do_rfi(), so there is no need to filter
+ * here
+ */
+ do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
+}
+
+void helper_rfscv(CPUPPCState *env)
+{
+ do_rfi(env, env->lr, env->ctr);
+}
+
+void helper_hrfid(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
+}
+
+void helper_rfebb(CPUPPCState *env, target_ulong s)
+{
+ target_ulong msr = env->msr;
+
+ /*
+ * Handling of BESCR bits 32:33 according to PowerISA v3.1:
+ *
+ * "If BESCR 32:33 != 0b00 the instruction is treated as if
+ * the instruction form were invalid."
+ */
+ if (env->spr[SPR_BESCR] & BESCR_INVALID) {
+ raise_exception_err(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
+ }
+
+ env->nip = env->spr[SPR_EBBRR];
+
+ /* Switching to 32-bit ? Crop the nip */
+ if (!msr_is_64bit(env, msr)) {
+ env->nip = (uint32_t)env->spr[SPR_EBBRR];
+ }
+
+ if (s) {
+ env->spr[SPR_BESCR] |= BESCR_GE;
+ } else {
+ env->spr[SPR_BESCR] &= ~BESCR_GE;
+ }
+}
+
+/*
+ * Triggers or queues an 'ebb_excp' EBB exception. All checks
+ * but FSCR, HFSCR and msr_pr must be done beforehand.
+ *
+ * PowerISA v3.1 isn't clear about whether an EBB should be
+ * postponed or cancelled if the EBB facility is unavailable.
+ * Our assumption here is that the EBB is cancelled if both
+ * FSCR and HFSCR EBB facilities aren't available.
+ */
+static void do_ebb(CPUPPCState *env, int ebb_excp)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+
+ /*
+ * FSCR_EBB and FSCR_IC_EBB are the same bits used with
+ * HFSCR.
+ */
+ helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
+ helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
+
+ if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
+ env->spr[SPR_BESCR] |= BESCR_PMEO;
+ } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
+ env->spr[SPR_BESCR] |= BESCR_EEO;
+ }
+
+ if (FIELD_EX64(env->msr, MSR, PR)) {
+ powerpc_excp(cpu, ebb_excp);
+ } else {
+ ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
+ }
+}
+
+void raise_ebb_perfm_exception(CPUPPCState *env)
+{
+ bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
+ env->spr[SPR_BESCR] & BESCR_PME &&
+ env->spr[SPR_BESCR] & BESCR_GE;
+
+ if (!perfm_ebb_enabled) {
+ return;
+ }
+
+ do_ebb(env, POWERPC_EXCP_PERFM_EBB);
+}
+#endif /* TARGET_PPC64 */
+
+/*****************************************************************************/
+/* Embedded PowerPC specific helpers */
+void helper_40x_rfci(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
+}
+
+void helper_rfci(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
+}
+
+void helper_rfdi(CPUPPCState *env)
+{
+ /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
+ do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
+}
+
+void helper_rfmci(CPUPPCState *env)
+{
+ /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
+ do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
+}
+
+/* Embedded.Processor Control */
+static int dbell2irq(target_ulong rb)
+{
+ int msg = rb & DBELL_TYPE_MASK;
+ int irq = -1;
+
+ switch (msg) {
+ case DBELL_TYPE_DBELL:
+ irq = PPC_INTERRUPT_DOORBELL;
+ break;
+ case DBELL_TYPE_DBELL_CRIT:
+ irq = PPC_INTERRUPT_CDOORBELL;
+ break;
+ case DBELL_TYPE_G_DBELL:
+ case DBELL_TYPE_G_DBELL_CRIT:
+ case DBELL_TYPE_G_DBELL_MC:
+ /* XXX implement */
+ default:
+ break;
+ }
+
+ return irq;
+}
+
+void helper_msgclr(CPUPPCState *env, target_ulong rb)
+{
+ int irq = dbell2irq(rb);
+
+ if (irq < 0) {
+ return;
+ }
+
+ ppc_set_irq(env_archcpu(env), irq, 0);
+}
+
+void helper_msgsnd(target_ulong rb)
+{
+ int irq = dbell2irq(rb);
+ int pir = rb & DBELL_PIRTAG_MASK;
+ CPUState *cs;
+
+ if (irq < 0) {
+ return;
+ }
+
+ bql_lock();
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *cenv = &cpu->env;
+
+ if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
+ ppc_set_irq(cpu, irq, 1);
+ }
+ }
+ bql_unlock();
+}
+
+/* Server Processor Control */
+
+static bool dbell_type_server(target_ulong rb)
+{
+ /*
+ * A Directed Hypervisor Doorbell message is sent only if the
+ * message type is 5. All other types are reserved and the
+ * instruction is a no-op
+ */
+ return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
+}
+
+static inline bool dbell_bcast_core(target_ulong rb)
+{
+ return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE;
+}
+
+static inline bool dbell_bcast_subproc(target_ulong rb)
+{
+ return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC;
+}
+
+/*
+ * Send an interrupt to a thread in the same core as env).
+ */
+static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ CPUState *cs = env_cpu(env);
+
+ if (ppc_cpu_lpar_single_threaded(cs)) {
+ if (target_tir == 0) {
+ ppc_set_irq(cpu, irq, 1);
+ }
+ } else {
+ CPUState *ccs;
+
+ /* Does iothread need to be locked for walking CPU list? */
+ bql_lock();
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ PowerPCCPU *ccpu = POWERPC_CPU(ccs);
+ if (target_tir == ppc_cpu_tir(ccpu)) {
+ ppc_set_irq(ccpu, irq, 1);
+ break;
+ }
+ }
+ bql_unlock();
+ }
+}
+
+void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
+{
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
+}
+
+void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb)
+{
+ int pir = rb & DBELL_PROCIDTAG_MASK;
+ bool brdcast = false;
+ CPUState *cs, *ccs;
+ PowerPCCPU *cpu;
+
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ /* POWER8 msgsnd is like msgsndp (targets a thread within core) */
+ if (!(env->insns_flags2 & PPC2_ISA300)) {
+ msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL);
+ return;
+ }
+
+ /* POWER9 and later msgsnd is a global (targets any thread) */
+ cpu = ppc_get_vcpu_by_pir(pir);
+ if (!cpu) {
+ return;
+ }
+ cs = CPU(cpu);
+
+ if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) &&
+ (env->flags & POWERPC_FLAG_SMT_1LPAR))) {
+ brdcast = true;
+ }
+
+ if (ppc_cpu_core_single_threaded(cs) || !brdcast) {
+ ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1);
+ return;
+ }
+
+ /*
+ * Why is bql needed for walking CPU list? Answer seems to be because ppc
+ * irq handling needs it, but ppc_set_irq takes the lock itself if needed,
+ * so could this be removed?
+ */
+ bql_lock();
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1);
+ }
+ bql_unlock();
+}
+
+#ifdef TARGET_PPC64
+void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
+{
+ helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
+
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
+}
+
+/*
+ * sends a message to another thread on the same
+ * multi-threaded processor
+ */
+void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
+{
+ helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
+
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL);
+}
+#endif /* TARGET_PPC64 */
+
+/* Single-step tracing */
+void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
+{
+ uint32_t error_code = 0;
+ if (env->insns_flags2 & PPC2_ISA207S) {
+ /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
+ env->spr[SPR_POWER_SIAR] = prev_ip;
+ error_code = PPC_BIT(33);
+ }
+ raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
+}
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/ppc/timebase_helper.c b/target/ppc/timebase_helper.c
index 39d3974..7209b41 100644
--- a/target/ppc/timebase_helper.c
+++ b/target/ppc/timebase_helper.c
@@ -20,7 +20,6 @@
#include "cpu.h"
#include "hw/ppc/ppc.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
@@ -62,9 +61,8 @@ void helper_store_purr(CPUPPCState *env, target_ulong val)
{
CPUState *cs = env_cpu(env);
CPUState *ccs;
- uint32_t nr_threads = cs->nr_threads;
- if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ if (ppc_cpu_lpar_single_threaded(cs)) {
cpu_ppc_store_purr(env, val);
return;
}
@@ -81,9 +79,8 @@ void helper_store_tbl(CPUPPCState *env, target_ulong val)
{
CPUState *cs = env_cpu(env);
CPUState *ccs;
- uint32_t nr_threads = cs->nr_threads;
- if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ if (ppc_cpu_lpar_single_threaded(cs)) {
cpu_ppc_store_tbl(env, val);
return;
}
@@ -98,9 +95,8 @@ void helper_store_tbu(CPUPPCState *env, target_ulong val)
{
CPUState *cs = env_cpu(env);
CPUState *ccs;
- uint32_t nr_threads = cs->nr_threads;
- if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ if (ppc_cpu_lpar_single_threaded(cs)) {
cpu_ppc_store_tbu(env, val);
return;
}
@@ -140,9 +136,8 @@ void helper_store_hdecr(CPUPPCState *env, target_ulong val)
{
CPUState *cs = env_cpu(env);
CPUState *ccs;
- uint32_t nr_threads = cs->nr_threads;
- if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ if (ppc_cpu_lpar_single_threaded(cs)) {
cpu_ppc_store_hdecr(env, val);
return;
}
@@ -157,9 +152,8 @@ void helper_store_vtb(CPUPPCState *env, target_ulong val)
{
CPUState *cs = env_cpu(env);
CPUState *ccs;
- uint32_t nr_threads = cs->nr_threads;
- if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ if (ppc_cpu_lpar_single_threaded(cs)) {
cpu_ppc_store_vtb(env, val);
return;
}
@@ -174,9 +168,8 @@ void helper_store_tbu40(CPUPPCState *env, target_ulong val)
{
CPUState *cs = env_cpu(env);
CPUState *ccs;
- uint32_t nr_threads = cs->nr_threads;
- if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ if (ppc_cpu_lpar_single_threaded(cs)) {
cpu_ppc_store_tbu40(env, val);
return;
}
@@ -217,7 +210,14 @@ void helper_store_booke_tsr(CPUPPCState *env, target_ulong val)
store_booke_tsr(env, val);
}
-#if defined(TARGET_PPC64)
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+/*
+ * qemu-user breaks with pnv headers, so they go under ifdefs for now.
+ * A clean up may be to move powernv specific registers and helpers into
+ * target/ppc/pnv_helper.c
+ */
+#include "hw/ppc/pnv_core.h"
+#include "hw/ppc/pnv_chip.h"
/*
* POWER processor Timebase Facility
*/
@@ -287,7 +287,7 @@ static void write_tfmr(CPUPPCState *env, target_ulong val)
{
CPUState *cs = env_cpu(env);
- if (cs->nr_threads == 1) {
+ if (ppc_cpu_core_single_threaded(cs)) {
env->spr[SPR_TFMR] = val;
} else {
CPUState *ccs;
@@ -298,8 +298,25 @@ static void write_tfmr(CPUPPCState *env, target_ulong val)
}
}
+static PnvCoreTODState *cpu_get_tbst(PowerPCCPU *cpu)
+{
+ PnvCore *pc = pnv_cpu_state(cpu)->pnv_core;
+
+ if (pc->big_core && pc->tod_state.big_core_quirk) {
+ /* Must operate on the even small core */
+ int core_id = CPU_CORE(pc)->core_id;
+ if (core_id & 1) {
+ pc = pc->chip->cores[core_id & ~1];
+ }
+ }
+
+ return &pc->tod_state;
+}
+
static void tb_state_machine_step(CPUPPCState *env)
{
+ PowerPCCPU *cpu = env_archcpu(env);
+ PnvCoreTODState *tod_state = cpu_get_tbst(cpu);
uint64_t tfmr = env->spr[SPR_TFMR];
unsigned int tbst = tfmr_get_tb_state(tfmr);
@@ -307,15 +324,15 @@ static void tb_state_machine_step(CPUPPCState *env)
return;
}
- if (env->pnv_tod_tbst.tb_sync_pulse_timer) {
- env->pnv_tod_tbst.tb_sync_pulse_timer--;
+ if (tod_state->tb_sync_pulse_timer) {
+ tod_state->tb_sync_pulse_timer--;
} else {
tfmr |= TFMR_TB_SYNC_OCCURED;
write_tfmr(env, tfmr);
}
- if (env->pnv_tod_tbst.tb_state_timer) {
- env->pnv_tod_tbst.tb_state_timer--;
+ if (tod_state->tb_state_timer) {
+ tod_state->tb_state_timer--;
return;
}
@@ -332,20 +349,20 @@ static void tb_state_machine_step(CPUPPCState *env)
} else if (tfmr & TFMR_MOVE_CHIP_TOD_TO_TB) {
if (tbst == TBST_SYNC_WAIT) {
tfmr = tfmr_new_tb_state(tfmr, TBST_GET_TOD);
- env->pnv_tod_tbst.tb_state_timer = 3;
+ tod_state->tb_state_timer = 3;
} else if (tbst == TBST_GET_TOD) {
- if (env->pnv_tod_tbst.tod_sent_to_tb) {
+ if (tod_state->tod_sent_to_tb) {
tfmr = tfmr_new_tb_state(tfmr, TBST_TB_RUNNING);
tfmr &= ~TFMR_MOVE_CHIP_TOD_TO_TB;
- env->pnv_tod_tbst.tb_ready_for_tod = 0;
- env->pnv_tod_tbst.tod_sent_to_tb = 0;
+ tod_state->tb_ready_for_tod = 0;
+ tod_state->tod_sent_to_tb = 0;
}
} else {
qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: MOVE_CHIP_TOD_TO_TB "
"state machine in invalid state 0x%x\n", tbst);
tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
- env->pnv_tod_tbst.tb_ready_for_tod = 0;
+ tod_state->tb_ready_for_tod = 0;
}
}
@@ -361,6 +378,8 @@ target_ulong helper_load_tfmr(CPUPPCState *env)
void helper_store_tfmr(CPUPPCState *env, target_ulong val)
{
+ PowerPCCPU *cpu = env_archcpu(env);
+ PnvCoreTODState *tod_state = cpu_get_tbst(cpu);
uint64_t tfmr = env->spr[SPR_TFMR];
uint64_t clear_on_write;
unsigned int tbst = tfmr_get_tb_state(tfmr);
@@ -384,14 +403,7 @@ void helper_store_tfmr(CPUPPCState *env, target_ulong val)
* after the second mfspr.
*/
tfmr &= ~TFMR_TB_SYNC_OCCURED;
- env->pnv_tod_tbst.tb_sync_pulse_timer = 1;
-
- if (ppc_cpu_tir(env_archcpu(env)) != 0 &&
- (val & (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB))) {
- qemu_log_mask(LOG_UNIMP, "TFMR timebase state machine can only be "
- "driven by thread 0\n");
- goto out;
- }
+ tod_state->tb_sync_pulse_timer = 1;
if (((tfmr | val) & (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB)) ==
(TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB)) {
@@ -399,7 +411,7 @@ void helper_store_tfmr(CPUPPCState *env, target_ulong val)
"MOVE_CHIP_TOD_TO_TB both set\n");
tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
- env->pnv_tod_tbst.tb_ready_for_tod = 0;
+ tod_state->tb_ready_for_tod = 0;
goto out;
}
@@ -413,8 +425,8 @@ void helper_store_tfmr(CPUPPCState *env, target_ulong val)
tfmr &= ~TFMR_LOAD_TOD_MOD;
tfmr &= ~TFMR_MOVE_CHIP_TOD_TO_TB;
tfmr &= ~TFMR_FIRMWARE_CONTROL_ERROR; /* XXX: should this be cleared? */
- env->pnv_tod_tbst.tb_ready_for_tod = 0;
- env->pnv_tod_tbst.tod_sent_to_tb = 0;
+ tod_state->tb_ready_for_tod = 0;
+ tod_state->tod_sent_to_tb = 0;
goto out;
}
@@ -427,19 +439,19 @@ void helper_store_tfmr(CPUPPCState *env, target_ulong val)
if (tfmr & TFMR_LOAD_TOD_MOD) {
/* Wait for an arbitrary 3 mfspr until the next state transition. */
- env->pnv_tod_tbst.tb_state_timer = 3;
+ tod_state->tb_state_timer = 3;
} else if (tfmr & TFMR_MOVE_CHIP_TOD_TO_TB) {
if (tbst == TBST_NOT_SET) {
tfmr = tfmr_new_tb_state(tfmr, TBST_SYNC_WAIT);
- env->pnv_tod_tbst.tb_ready_for_tod = 1;
- env->pnv_tod_tbst.tb_state_timer = 3; /* arbitrary */
+ tod_state->tb_ready_for_tod = 1;
+ tod_state->tb_state_timer = 3; /* arbitrary */
} else {
qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: MOVE_CHIP_TOD_TO_TB "
"not in TB not set state 0x%x\n",
tbst);
tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
- env->pnv_tod_tbst.tb_ready_for_tod = 0;
+ tod_state->tb_ready_for_tod = 0;
}
}
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 0bc16d7..27f90c3 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -21,7 +21,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internal.h"
-#include "exec/exec-all.h"
+#include "exec/target_page.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "qemu/host-utils.h"
@@ -30,6 +30,7 @@
#include "exec/helper-gen.h"
#include "exec/translator.h"
+#include "exec/translation-block.h"
#include "exec/log.h"
#include "qemu/atomic128.h"
#include "spr_common.h"
@@ -178,6 +179,7 @@ struct DisasContext {
/* Translation flags */
MemOp default_tcg_memop_mask;
#if defined(TARGET_PPC64)
+ powerpc_excp_t excp_model;
bool sf_mode;
bool has_cfar;
bool has_bhrb;
@@ -635,6 +637,18 @@ void spr_write_dawrx0(DisasContext *ctx, int sprn, int gprn)
translator_io_start(&ctx->base);
gen_helper_store_dawrx0(tcg_env, cpu_gpr[gprn]);
}
+
+void spr_write_dawr1(DisasContext *ctx, int sprn, int gprn)
+{
+ translator_io_start(&ctx->base);
+ gen_helper_store_dawr1(tcg_env, cpu_gpr[gprn]);
+}
+
+void spr_write_dawrx1(DisasContext *ctx, int sprn, int gprn)
+{
+ translator_io_start(&ctx->base);
+ gen_helper_store_dawrx1(tcg_env, cpu_gpr[gprn]);
+}
#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
/* CTR */
@@ -1324,6 +1338,22 @@ void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
translator_io_start(&ctx->base);
gen_helper_store_lpcr(tcg_env, cpu_gpr[gprn]);
}
+
+void spr_read_pmsr(DisasContext *ctx, int gprn, int sprn)
+{
+ translator_io_start(&ctx->base);
+ gen_helper_load_pmsr(cpu_gpr[gprn], tcg_env);
+}
+
+void spr_write_pmcr(DisasContext *ctx, int sprn, int gprn)
+{
+ if (!gen_serialize_core_lpar(ctx)) {
+ return;
+ }
+ translator_io_start(&ctx->base);
+ gen_helper_store_pmcr(tcg_env, cpu_gpr[gprn]);
+}
+
#endif /* !defined(CONFIG_USER_ONLY) */
void spr_read_tar(DisasContext *ctx, int gprn, int sprn)
@@ -1587,16 +1617,13 @@ static opc_handler_t invalid_handler = {
static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
{
TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
TCGv_i32 t = tcg_temp_new_i32();
- tcg_gen_movi_tl(t0, CRF_EQ);
- tcg_gen_movi_tl(t1, CRF_LT);
tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU),
- t0, arg0, arg1, t1, t0);
- tcg_gen_movi_tl(t1, CRF_GT);
+ t0, arg0, arg1,
+ tcg_constant_tl(CRF_LT), tcg_constant_tl(CRF_EQ));
tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU),
- t0, arg0, arg1, t1, t0);
+ t0, arg0, arg1, tcg_constant_tl(CRF_GT), t0);
tcg_gen_trunc_tl_i32(t, t0);
tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
@@ -1718,11 +1745,10 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
tcg_gen_mov_tl(ca32, ca);
}
} else {
- TCGv zero = tcg_constant_tl(0);
if (add_ca) {
- tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
- tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
+ tcg_gen_addcio_tl(t0, ca, arg1, arg2, ca);
} else {
+ TCGv zero = tcg_constant_tl(0);
tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero);
}
gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0);
@@ -1822,7 +1848,7 @@ static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret,
tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
}
- if (unlikely(Rc(ctx->opcode) != 0)) {
+ if (unlikely(compute_rc0)) {
gen_set_Rc0(ctx, ret);
}
}
@@ -1921,11 +1947,9 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
tcg_gen_mov_tl(cpu_ca32, cpu_ca);
}
} else if (add_ca) {
- TCGv zero, inv1 = tcg_temp_new();
+ TCGv inv1 = tcg_temp_new();
tcg_gen_not_tl(inv1, arg1);
- zero = tcg_constant_tl(0);
- tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
- tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
+ tcg_gen_addcio_tl(t0, cpu_ca, arg2, inv1, cpu_ca);
gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
} else {
tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
@@ -2542,6 +2566,7 @@ static inline void gen_align_no_le(DisasContext *ctx)
(ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
}
+/* EA <- {(ra == 0) ? 0 : GPR[ra]} + displ */
static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ)
{
TCGv ea = tcg_temp_new();
@@ -2556,6 +2581,22 @@ static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ)
return ea;
}
+#if defined(TARGET_PPC64)
+/* EA <- (ra == 0) ? 0 : GPR[ra] */
+static TCGv do_ea_calc_ra(DisasContext *ctx, int ra)
+{
+ TCGv EA = tcg_temp_new();
+ if (!ra) {
+ tcg_gen_movi_tl(EA, 0);
+ } else if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[ra]);
+ } else {
+ tcg_gen_mov_tl(EA, cpu_gpr[ra]);
+ }
+ return EA;
+}
+#endif
+
/*** Integer load ***/
#define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
#define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
@@ -2956,8 +2997,8 @@ static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop,
tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop);
/* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
- tcg_gen_movi_tl(u, 1 << (memop_size(memop) * 8 - 1));
- tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u);
+ tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t,
+ tcg_constant_tl(1 << (memop_size(memop) * 8 - 1)));
}
static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
@@ -3583,7 +3624,6 @@ static void pmu_count_insns(DisasContext *ctx)
#else
static void pmu_count_insns(DisasContext *ctx)
{
- return;
}
#endif /* #if defined(TARGET_PPC64) */
@@ -4445,27 +4485,29 @@ static void gen_dcblc(DisasContext *ctx)
/* dcbz */
static void gen_dcbz(DisasContext *ctx)
{
- TCGv tcgv_addr;
- TCGv_i32 tcgv_op;
+ TCGv tcgv_addr = tcg_temp_new();
gen_set_access_type(ctx, ACCESS_CACHE);
- tcgv_addr = tcg_temp_new();
- tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
gen_addr_reg_index(ctx, tcgv_addr);
- gen_helper_dcbz(tcg_env, tcgv_addr, tcgv_op);
+
+#ifdef TARGET_PPC64
+ if (ctx->excp_model == POWERPC_EXCP_970 && !(ctx->opcode & 0x00200000)) {
+ gen_helper_dcbzl(tcg_env, tcgv_addr);
+ return;
+ }
+#endif
+
+ gen_helper_dcbz(tcg_env, tcgv_addr, tcg_constant_i32(ctx->mem_idx));
}
/* dcbzep */
static void gen_dcbzep(DisasContext *ctx)
{
- TCGv tcgv_addr;
- TCGv_i32 tcgv_op;
+ TCGv tcgv_addr = tcg_temp_new();
gen_set_access_type(ctx, ACCESS_CACHE);
- tcgv_addr = tcg_temp_new();
- tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
gen_addr_reg_index(ctx, tcgv_addr);
- gen_helper_dcbzep(tcg_env, tcgv_addr, tcgv_op);
+ gen_helper_dcbz(tcg_env, tcgv_addr, tcg_constant_i32(PPC_TLB_EPID_STORE));
}
/* dst / dstt */
@@ -5538,16 +5580,6 @@ static inline void set_fpr(int regno, TCGv_i64 src)
tcg_gen_st_i64(tcg_constant_i64(0), tcg_env, vsr64_offset(regno, false));
}
-static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
-{
- tcg_gen_ld_i64(dst, tcg_env, avr64_offset(regno, high));
-}
-
-static inline void set_avr64(int regno, TCGv_i64 src, bool high)
-{
- tcg_gen_st_i64(src, tcg_env, avr64_offset(regno, high));
-}
-
/*
* Helpers for decodetree used by !function for decoding arguments.
*/
@@ -6416,8 +6448,6 @@ static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn)
opc_handler_t **table, *handler;
uint32_t inval;
- ctx->opcode = insn;
-
LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn),
ctx->le_mode ? "little" : "big");
@@ -6486,6 +6516,7 @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE;
ctx->flags = env->flags;
#if defined(TARGET_PPC64)
+ ctx->excp_model = env->excp_model;
ctx->sf_mode = (hflags >> HFLAGS_64) & 1;
ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
ctx->has_bhrb = !!(env->flags & POWERPC_FLAG_BHRB);
@@ -6550,6 +6581,7 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
ctx->base.pc_next = pc += 4;
if (!is_prefix_insn(ctx, insn)) {
+ ctx->opcode = insn;
ok = (decode_insn32(ctx, insn) ||
decode_legacy(cpu, ctx, insn));
} else if ((pc & 63) == 0) {
@@ -6661,8 +6693,8 @@ static const TranslatorOps ppc_tr_ops = {
.tb_stop = ppc_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext ctx;
diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
index 8084af7..92d6e8c 100644
--- a/target/ppc/translate/vmx-impl.c.inc
+++ b/target/ppc/translate/vmx-impl.c.inc
@@ -14,25 +14,39 @@ static inline TCGv_ptr gen_avr_ptr(int reg)
return r;
}
+static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
+{
+ tcg_gen_ld_i64(dst, tcg_env, avr64_offset(regno, high));
+}
+
+static inline void set_avr64(int regno, TCGv_i64 src, bool high)
+{
+ tcg_gen_st_i64(src, tcg_env, avr64_offset(regno, high));
+}
+
+static inline void get_avr_full(TCGv_i128 dst, int regno)
+{
+ tcg_gen_ld_i128(dst, tcg_env, avr_full_offset(regno));
+}
+
+static inline void set_avr_full(int regno, TCGv_i128 src)
+{
+ tcg_gen_st_i128(src, tcg_env, avr_full_offset(regno));
+}
+
static bool trans_LVX(DisasContext *ctx, arg_X *a)
{
TCGv EA;
- TCGv_i64 avr;
+ TCGv_i128 avr;
REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
REQUIRE_VECTOR(ctx);
gen_set_access_type(ctx, ACCESS_INT);
- avr = tcg_temp_new_i64();
+ avr = tcg_temp_new_i128();
EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
tcg_gen_andi_tl(EA, EA, ~0xf);
- /*
- * We only need to swap high and low halves. gen_qemu_ld64_i64
- * does necessary 64-bit byteswap already.
- */
- gen_qemu_ld64_i64(ctx, avr, EA);
- set_avr64(a->rt, avr, !ctx->le_mode);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_ld64_i64(ctx, avr, EA);
- set_avr64(a->rt, avr, ctx->le_mode);
+ tcg_gen_qemu_ld_i128(avr, EA, ctx->mem_idx,
+ DEF_MEMOP(MO_128 | MO_ATOM_IFALIGN_PAIR));
+ set_avr_full(a->rt, avr);
return true;
}
@@ -46,22 +60,16 @@ static bool trans_LVXL(DisasContext *ctx, arg_LVXL *a)
static bool trans_STVX(DisasContext *ctx, arg_STVX *a)
{
TCGv EA;
- TCGv_i64 avr;
+ TCGv_i128 avr;
REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
REQUIRE_VECTOR(ctx);
gen_set_access_type(ctx, ACCESS_INT);
- avr = tcg_temp_new_i64();
+ avr = tcg_temp_new_i128();
EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
tcg_gen_andi_tl(EA, EA, ~0xf);
- /*
- * We only need to swap high and low halves. gen_qemu_st64_i64
- * does necessary 64-bit byteswap already.
- */
- get_avr64(avr, a->rt, !ctx->le_mode);
- gen_qemu_st64_i64(ctx, avr, EA);
- tcg_gen_addi_tl(EA, EA, 8);
- get_avr64(avr, a->rt, ctx->le_mode);
- gen_qemu_st64_i64(ctx, avr, EA);
+ get_avr_full(avr, a->rt);
+ tcg_gen_qemu_st_i128(avr, EA, ctx->mem_idx,
+ DEF_MEMOP(MO_128 | MO_ATOM_IFALIGN_PAIR));
return true;
}
@@ -986,8 +994,8 @@ static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask,
{
TCGv_i64 ah, al, vrb, n, t0, t1, zero = tcg_constant_i64(0);
- REQUIRE_VECTOR(ctx);
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
ah = tcg_temp_new_i64();
al = tcg_temp_new_i64();
@@ -1047,58 +1055,6 @@ TRANS(VRLQ, do_vector_rotl_quad, false, false)
TRANS(VRLQNM, do_vector_rotl_quad, true, false)
TRANS(VRLQMI, do_vector_rotl_quad, false, true)
-#define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3) \
-static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t, \
- TCGv_vec sat, TCGv_vec a, \
- TCGv_vec b) \
-{ \
- TCGv_vec x = tcg_temp_new_vec_matching(t); \
- glue(glue(tcg_gen_, NORM), _vec)(VECE, x, a, b); \
- glue(glue(tcg_gen_, SAT), _vec)(VECE, t, a, b); \
- tcg_gen_cmp_vec(TCG_COND_NE, VECE, x, x, t); \
- tcg_gen_or_vec(VECE, sat, sat, x); \
-} \
-static void glue(gen_, NAME)(DisasContext *ctx) \
-{ \
- static const TCGOpcode vecop_list[] = { \
- glue(glue(INDEX_op_, NORM), _vec), \
- glue(glue(INDEX_op_, SAT), _vec), \
- INDEX_op_cmp_vec, 0 \
- }; \
- static const GVecGen4 g = { \
- .fniv = glue(glue(gen_, NAME), _vec), \
- .fno = glue(gen_helper_, NAME), \
- .opt_opc = vecop_list, \
- .write_aofs = true, \
- .vece = VECE, \
- }; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- tcg_gen_gvec_4(avr_full_offset(rD(ctx->opcode)), \
- offsetof(CPUPPCState, vscr_sat), \
- avr_full_offset(rA(ctx->opcode)), \
- avr_full_offset(rB(ctx->opcode)), \
- 16, 16, &g); \
-}
-
-GEN_VXFORM_SAT(vaddubs, MO_8, add, usadd, 0, 8);
-GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0, \
- vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800)
-GEN_VXFORM_SAT(vadduhs, MO_16, add, usadd, 0, 9);
-GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \
- vmul10euq, PPC_NONE, PPC2_ISA300)
-GEN_VXFORM_SAT(vadduws, MO_32, add, usadd, 0, 10);
-GEN_VXFORM_SAT(vaddsbs, MO_8, add, ssadd, 0, 12);
-GEN_VXFORM_SAT(vaddshs, MO_16, add, ssadd, 0, 13);
-GEN_VXFORM_SAT(vaddsws, MO_32, add, ssadd, 0, 14);
-GEN_VXFORM_SAT(vsububs, MO_8, sub, ussub, 0, 24);
-GEN_VXFORM_SAT(vsubuhs, MO_16, sub, ussub, 0, 25);
-GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26);
-GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28);
-GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29);
-GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30);
GEN_VXFORM_TRANS(vsl, 2, 7);
GEN_VXFORM_TRANS(vsr, 2, 11);
GEN_VXFORM_ENV(vpkuhum, 7, 0);
@@ -2641,26 +2597,14 @@ static void gen_xpnd04_2(DisasContext *ctx)
}
}
-
-GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \
- xpnd04_2, PPC_NONE, PPC2_ISA300)
-
GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \
bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \
- bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \
bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \
- bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM_DUAL(vaddshs, PPC_ALTIVEC, PPC_NONE, \
- bcdcpsgn, PPC_NONE, PPC2_ISA300)
GEN_VXFORM_DUAL(vsubudm, PPC2_ALTIVEC_207, PPC_NONE, \
bcds, PPC_NONE, PPC2_ISA300)
GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \
bcdus, PPC_NONE, PPC2_ISA300)
-GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \
- bcdtrunc, PPC_NONE, PPC2_ISA300)
static void gen_vsbox(DisasContext *ctx)
{
@@ -2937,6 +2881,180 @@ static bool do_vx_vaddsubcuw(DisasContext *ctx, arg_VX *a, int add)
TRANS(VSUBCUW, do_vx_vaddsubcuw, 0)
TRANS(VADDCUW, do_vx_vaddsubcuw, 1)
+/* Integer Add/Sub Saturate Instructions */
+static inline void do_vadd_vsub_sat
+(
+ unsigned vece, TCGv_vec t, TCGv_vec qc, TCGv_vec a, TCGv_vec b,
+ void (*norm_op)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec),
+ void (*sat_op)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
+{
+ TCGv_vec x = tcg_temp_new_vec_matching(t);
+ norm_op(vece, x, a, b);
+ sat_op(vece, t, a, b);
+ tcg_gen_xor_vec(vece, x, x, t);
+ tcg_gen_or_vec(vece, qc, qc, x);
+}
+
+static void gen_vadd_sat_u(unsigned vece, TCGv_vec t, TCGv_vec sat,
+ TCGv_vec a, TCGv_vec b)
+{
+ do_vadd_vsub_sat(vece, t, sat, a, b, tcg_gen_add_vec, tcg_gen_usadd_vec);
+}
+
+static void gen_vadd_sat_s(unsigned vece, TCGv_vec t, TCGv_vec sat,
+ TCGv_vec a, TCGv_vec b)
+{
+ do_vadd_vsub_sat(vece, t, sat, a, b, tcg_gen_add_vec, tcg_gen_ssadd_vec);
+}
+
+static void gen_vsub_sat_u(unsigned vece, TCGv_vec t, TCGv_vec sat,
+ TCGv_vec a, TCGv_vec b)
+{
+ do_vadd_vsub_sat(vece, t, sat, a, b, tcg_gen_sub_vec, tcg_gen_ussub_vec);
+}
+
+static void gen_vsub_sat_s(unsigned vece, TCGv_vec t, TCGv_vec sat,
+ TCGv_vec a, TCGv_vec b)
+{
+ do_vadd_vsub_sat(vece, t, sat, a, b, tcg_gen_sub_vec, tcg_gen_sssub_vec);
+}
+
+/*
+ * Signed/Unsigned add/sub helper ops for byte/halfword/word
+ * GVecGen4 struct variants.
+ */
+static const TCGOpcode vecop_list_sub_u[] = {
+ INDEX_op_sub_vec, INDEX_op_ussub_vec, 0
+};
+static const TCGOpcode vecop_list_sub_s[] = {
+ INDEX_op_sub_vec, INDEX_op_sssub_vec, 0
+};
+static const TCGOpcode vecop_list_add_u[] = {
+ INDEX_op_add_vec, INDEX_op_usadd_vec, 0
+};
+static const TCGOpcode vecop_list_add_s[] = {
+ INDEX_op_add_vec, INDEX_op_ssadd_vec, 0
+};
+
+static const GVecGen4 op_vsububs = {
+ .fniv = gen_vsub_sat_u,
+ .fno = gen_helper_VSUBUBS,
+ .opt_opc = vecop_list_sub_u,
+ .write_aofs = true,
+ .vece = MO_8
+};
+
+static const GVecGen4 op_vaddubs = {
+ .fniv = gen_vadd_sat_u,
+ .fno = gen_helper_VADDUBS,
+ .opt_opc = vecop_list_add_u,
+ .write_aofs = true,
+ .vece = MO_8
+};
+
+static const GVecGen4 op_vsubuhs = {
+ .fniv = gen_vsub_sat_u,
+ .fno = gen_helper_VSUBUHS,
+ .opt_opc = vecop_list_sub_u,
+ .write_aofs = true,
+ .vece = MO_16
+};
+
+static const GVecGen4 op_vadduhs = {
+ .fniv = gen_vadd_sat_u,
+ .fno = gen_helper_VADDUHS,
+ .opt_opc = vecop_list_add_u,
+ .write_aofs = true,
+ .vece = MO_16
+};
+
+static const GVecGen4 op_vsubuws = {
+ .fniv = gen_vsub_sat_u,
+ .fno = gen_helper_VSUBUWS,
+ .opt_opc = vecop_list_sub_u,
+ .write_aofs = true,
+ .vece = MO_32
+};
+
+static const GVecGen4 op_vadduws = {
+ .fniv = gen_vadd_sat_u,
+ .fno = gen_helper_VADDUWS,
+ .opt_opc = vecop_list_add_u,
+ .write_aofs = true,
+ .vece = MO_32
+};
+
+static const GVecGen4 op_vsubsbs = {
+ .fniv = gen_vsub_sat_s,
+ .fno = gen_helper_VSUBSBS,
+ .opt_opc = vecop_list_sub_s,
+ .write_aofs = true,
+ .vece = MO_8
+};
+
+static const GVecGen4 op_vaddsbs = {
+ .fniv = gen_vadd_sat_s,
+ .fno = gen_helper_VADDSBS,
+ .opt_opc = vecop_list_add_s,
+ .write_aofs = true,
+ .vece = MO_8
+};
+
+static const GVecGen4 op_vsubshs = {
+ .fniv = gen_vsub_sat_s,
+ .fno = gen_helper_VSUBSHS,
+ .opt_opc = vecop_list_sub_s,
+ .write_aofs = true,
+ .vece = MO_16
+};
+
+static const GVecGen4 op_vaddshs = {
+ .fniv = gen_vadd_sat_s,
+ .fno = gen_helper_VADDSHS,
+ .opt_opc = vecop_list_add_s,
+ .write_aofs = true,
+ .vece = MO_16
+};
+
+static const GVecGen4 op_vsubsws = {
+ .fniv = gen_vsub_sat_s,
+ .fno = gen_helper_VSUBSWS,
+ .opt_opc = vecop_list_sub_s,
+ .write_aofs = true,
+ .vece = MO_32
+};
+
+static const GVecGen4 op_vaddsws = {
+ .fniv = gen_vadd_sat_s,
+ .fno = gen_helper_VADDSWS,
+ .opt_opc = vecop_list_add_s,
+ .write_aofs = true,
+ .vece = MO_32
+};
+
+static bool do_vx_vadd_vsub_sat(DisasContext *ctx, arg_VX *a, const GVecGen4 *op)
+{
+ REQUIRE_VECTOR(ctx);
+ tcg_gen_gvec_4(avr_full_offset(a->vrt), offsetof(CPUPPCState, vscr_sat),
+ avr_full_offset(a->vra), avr_full_offset(a->vrb),
+ 16, 16, op);
+
+ return true;
+}
+
+TRANS_FLAGS(ALTIVEC, VSUBUBS, do_vx_vadd_vsub_sat, &op_vsububs)
+TRANS_FLAGS(ALTIVEC, VSUBUHS, do_vx_vadd_vsub_sat, &op_vsubuhs)
+TRANS_FLAGS(ALTIVEC, VSUBUWS, do_vx_vadd_vsub_sat, &op_vsubuws)
+TRANS_FLAGS(ALTIVEC, VSUBSBS, do_vx_vadd_vsub_sat, &op_vsubsbs)
+TRANS_FLAGS(ALTIVEC, VSUBSHS, do_vx_vadd_vsub_sat, &op_vsubshs)
+TRANS_FLAGS(ALTIVEC, VSUBSWS, do_vx_vadd_vsub_sat, &op_vsubsws)
+TRANS_FLAGS(ALTIVEC, VADDUBS, do_vx_vadd_vsub_sat, &op_vaddubs)
+TRANS_FLAGS(ALTIVEC, VADDUHS, do_vx_vadd_vsub_sat, &op_vadduhs)
+TRANS_FLAGS(ALTIVEC, VADDUWS, do_vx_vadd_vsub_sat, &op_vadduws)
+TRANS_FLAGS(ALTIVEC, VADDSBS, do_vx_vadd_vsub_sat, &op_vaddsbs)
+TRANS_FLAGS(ALTIVEC, VADDSHS, do_vx_vadd_vsub_sat, &op_vaddshs)
+TRANS_FLAGS(ALTIVEC, VADDSWS, do_vx_vadd_vsub_sat, &op_vaddsws)
+
static bool do_vx_vmuleo(DisasContext *ctx, arg_VX *a, bool even,
void (*gen_mul)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
{
diff --git a/target/ppc/translate/vmx-ops.c.inc b/target/ppc/translate/vmx-ops.c.inc
index 7bb11b0..e28958a 100644
--- a/target/ppc/translate/vmx-ops.c.inc
+++ b/target/ppc/translate/vmx-ops.c.inc
@@ -54,18 +54,13 @@ GEN_VXFORM(vsro, 6, 17),
GEN_VXFORM(xpnd04_1, 0, 22),
GEN_VXFORM_300(bcdsr, 0, 23),
GEN_VXFORM_300(bcdsr, 0, 31),
-GEN_VXFORM_DUAL(vaddubs, vmul10uq, 0, 8, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM_DUAL(vadduhs, vmul10euq, 0, 9, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM(vadduws, 0, 10),
-GEN_VXFORM(vaddsbs, 0, 12),
-GEN_VXFORM_DUAL(vaddshs, bcdcpsgn, 0, 13, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM(vaddsws, 0, 14),
-GEN_VXFORM_DUAL(vsububs, bcdadd, 0, 24, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM_DUAL(vsubuhs, bcdsub, 0, 25, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM(vsubuws, 0, 26),
-GEN_VXFORM_DUAL(vsubsbs, bcdtrunc, 0, 28, PPC_ALTIVEC, PPC2_ISA300),
-GEN_VXFORM(vsubshs, 0, 29),
-GEN_VXFORM_DUAL(vsubsws, xpnd04_2, 0, 30, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_300_EXT(vmul10uq, 0, 8, 0x0000F800),
+GEN_VXFORM_300(vmul10euq, 0, 9),
+GEN_VXFORM_300(bcdcpsgn, 0, 13),
+GEN_VXFORM_207(bcdadd, 0, 24),
+GEN_VXFORM_207(bcdsub, 0, 25),
+GEN_VXFORM_300(bcdtrunc, 0, 28),
+GEN_VXFORM_300(xpnd04_2, 0, 30),
GEN_VXFORM_300(bcdtrunc, 0, 20),
GEN_VXFORM_300(bcdutrunc, 0, 21),
GEN_VXFORM(vsl, 2, 7),
diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
index 0266f09..00ad57c 100644
--- a/target/ppc/translate/vsx-impl.c.inc
+++ b/target/ppc/translate/vsx-impl.c.inc
@@ -10,6 +10,16 @@ static inline void set_cpu_vsr(int n, TCGv_i64 src, bool high)
tcg_gen_st_i64(src, tcg_env, vsr64_offset(n, high));
}
+static inline void get_vsr_full(TCGv_i128 dst, int reg)
+{
+ tcg_gen_ld_i128(dst, tcg_env, vsr_full_offset(reg));
+}
+
+static inline void set_vsr_full(int reg, TCGv_i128 src)
+{
+ tcg_gen_st_i128(src, tcg_env, vsr_full_offset(reg));
+}
+
static inline TCGv_ptr gen_vsr_ptr(int reg)
{
TCGv_ptr r = tcg_temp_new_ptr();
@@ -24,66 +34,59 @@ static inline TCGv_ptr gen_acc_ptr(int reg)
return r;
}
-#define VSX_LOAD_SCALAR(name, operation) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv EA; \
- TCGv_i64 t0; \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- t0 = tcg_temp_new_i64(); \
- gen_set_access_type(ctx, ACCESS_INT); \
- EA = tcg_temp_new(); \
- gen_addr_reg_index(ctx, EA); \
- gen_qemu_##operation(ctx, t0, EA); \
- set_cpu_vsr(xT(ctx->opcode), t0, true); \
- /* NOTE: cpu_vsrl is undefined */ \
+static bool do_lxs(DisasContext *ctx, arg_X *a,
+ void (*op)(DisasContext *, TCGv_i64, TCGv))
+{
+ TCGv EA;
+ TCGv_i64 t0;
+ REQUIRE_VSX(ctx);
+ t0 = tcg_temp_new_i64();
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
+ op(ctx, t0, EA);
+ set_cpu_vsr(a->rt, t0, true);
+ /* NOTE: cpu_vsrl is undefined */
+ return true;
}
-VSX_LOAD_SCALAR(lxsdx, ld64_i64)
-VSX_LOAD_SCALAR(lxsiwax, ld32s_i64)
-VSX_LOAD_SCALAR(lxsibzx, ld8u_i64)
-VSX_LOAD_SCALAR(lxsihzx, ld16u_i64)
-VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64)
-VSX_LOAD_SCALAR(lxsspx, ld32fs)
+TRANS_FLAGS2(VSX, LXSDX, do_lxs, gen_qemu_ld64_i64);
+TRANS_FLAGS2(VSX207, LXSIWAX, do_lxs, gen_qemu_ld32s_i64);
+TRANS_FLAGS2(ISA300, LXSIBZX, do_lxs, gen_qemu_ld8u_i64);
+TRANS_FLAGS2(ISA300, LXSIHZX, do_lxs, gen_qemu_ld16u_i64);
+TRANS_FLAGS2(VSX207, LXSIWZX, do_lxs, gen_qemu_ld32u_i64);
+TRANS_FLAGS2(VSX207, LXSSPX, do_lxs, gen_qemu_ld32fs);
-static void gen_lxvd2x(DisasContext *ctx)
+static bool trans_LXVD2X(DisasContext *ctx, arg_LXVD2X *a)
{
TCGv EA;
TCGv_i64 t0;
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
+
+ REQUIRE_INSNS_FLAGS2(ctx, VSX);
+ REQUIRE_VSX(ctx);
+
t0 = tcg_temp_new_i64();
gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
+ EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
gen_qemu_ld64_i64(ctx, t0, EA);
- set_cpu_vsr(xT(ctx->opcode), t0, true);
+ set_cpu_vsr(a->rt, t0, true);
tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_ld64_i64(ctx, t0, EA);
- set_cpu_vsr(xT(ctx->opcode), t0, false);
+ set_cpu_vsr(a->rt, t0, false);
+ return true;
}
-static void gen_lxvw4x(DisasContext *ctx)
+static bool trans_LXVW4X(DisasContext *ctx, arg_LXVW4X *a)
{
TCGv EA;
- TCGv_i64 xth;
- TCGv_i64 xtl;
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
+ TCGv_i64 xth, xtl;
+
+ REQUIRE_INSNS_FLAGS2(ctx, VSX);
+ REQUIRE_VSX(ctx);
+
xth = tcg_temp_new_i64();
xtl = tcg_temp_new_i64();
-
gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
-
- gen_addr_reg_index(ctx, EA);
+ EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
if (ctx->le_mode) {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
@@ -100,55 +103,45 @@ static void gen_lxvw4x(DisasContext *ctx)
tcg_gen_addi_tl(EA, EA, 8);
tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
}
- set_cpu_vsr(xT(ctx->opcode), xth, true);
- set_cpu_vsr(xT(ctx->opcode), xtl, false);
+ set_cpu_vsr(a->rt, xth, true);
+ set_cpu_vsr(a->rt, xtl, false);
+ return true;
}
-static void gen_lxvwsx(DisasContext *ctx)
+static bool trans_LXVWSX(DisasContext *ctx, arg_LXVWSX *a)
{
TCGv EA;
TCGv_i32 data;
- if (xT(ctx->opcode) < 32) {
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ if (a->rt < 32) {
+ REQUIRE_VSX(ctx);
} else {
- if (unlikely(!ctx->altivec_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VPU);
- return;
- }
+ REQUIRE_VECTOR(ctx);
}
gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
-
- gen_addr_reg_index(ctx, EA);
-
+ EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
data = tcg_temp_new_i32();
tcg_gen_qemu_ld_i32(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UL));
- tcg_gen_gvec_dup_i32(MO_UL, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
+ tcg_gen_gvec_dup_i32(MO_UL, vsr_full_offset(a->rt), 16, 16, data);
+ return true;
}
-static void gen_lxvdsx(DisasContext *ctx)
+static bool trans_LXVDSX(DisasContext *ctx, arg_LXVDSX *a)
{
TCGv EA;
TCGv_i64 data;
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
+ REQUIRE_INSNS_FLAGS2(ctx, VSX);
+ REQUIRE_VSX(ctx);
gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
-
- gen_addr_reg_index(ctx, EA);
-
+ EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
data = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UQ));
- tcg_gen_gvec_dup_i64(MO_UQ, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
+ tcg_gen_gvec_dup_i64(MO_UQ, vsr_full_offset(a->rt), 16, 16, data);
+ return true;
}
static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl,
@@ -187,145 +180,166 @@ static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl,
tcg_gen_deposit_i64(outl, outl, lo, 32, 32);
}
-static void gen_lxvh8x(DisasContext *ctx)
+static bool trans_LXVH8X(DisasContext *ctx, arg_LXVH8X *a)
{
TCGv EA;
- TCGv_i64 xth;
- TCGv_i64 xtl;
+ TCGv_i64 xth, xtl;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
xth = tcg_temp_new_i64();
xtl = tcg_temp_new_i64();
gen_set_access_type(ctx, ACCESS_INT);
-
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
+ EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
if (ctx->le_mode) {
gen_bswap16x8(xth, xtl, xth, xtl);
}
- set_cpu_vsr(xT(ctx->opcode), xth, true);
- set_cpu_vsr(xT(ctx->opcode), xtl, false);
+ set_cpu_vsr(a->rt, xth, true);
+ set_cpu_vsr(a->rt, xtl, false);
+ return true;
}
-static void gen_lxvb16x(DisasContext *ctx)
+static bool trans_LXVB16X(DisasContext *ctx, arg_LXVB16X *a)
{
TCGv EA;
- TCGv_i64 xth;
- TCGv_i64 xtl;
+ TCGv_i128 data;
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
+
+ data = tcg_temp_new_i128();
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
+ tcg_gen_qemu_ld_i128(data, EA, ctx->mem_idx,
+ MO_BE | MO_128 | MO_ATOM_IFALIGN_PAIR);
+ set_vsr_full(a->rt, data);
+ return true;
+}
+
+#if defined(TARGET_PPC64)
+static bool do_ld_st_vl(DisasContext *ctx, arg_X *a,
+ void (*helper)(TCGv_ptr, TCGv, TCGv_ptr, TCGv))
+{
+ TCGv EA;
+ TCGv_ptr xt;
+ if (a->rt < 32) {
+ REQUIRE_VSX(ctx);
+ } else {
+ REQUIRE_VECTOR(ctx);
}
- xth = tcg_temp_new_i64();
- xtl = tcg_temp_new_i64();
+ xt = gen_vsr_ptr(a->rt);
gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
- tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
- tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
- set_cpu_vsr(xT(ctx->opcode), xth, true);
- set_cpu_vsr(xT(ctx->opcode), xtl, false);
+ EA = do_ea_calc_ra(ctx, a->ra);
+ helper(tcg_env, EA, xt, cpu_gpr[a->rb]);
+ return true;
}
+#endif
-#ifdef TARGET_PPC64
-#define VSX_VECTOR_LOAD_STORE_LENGTH(name) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv EA; \
- TCGv_ptr xt; \
- \
- if (xT(ctx->opcode) < 32) { \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- } else { \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- } \
- EA = tcg_temp_new(); \
- xt = gen_vsr_ptr(xT(ctx->opcode)); \
- gen_set_access_type(ctx, ACCESS_INT); \
- gen_addr_register(ctx, EA); \
- gen_helper_##name(tcg_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \
-}
-
-VSX_VECTOR_LOAD_STORE_LENGTH(lxvl)
-VSX_VECTOR_LOAD_STORE_LENGTH(lxvll)
-VSX_VECTOR_LOAD_STORE_LENGTH(stxvl)
-VSX_VECTOR_LOAD_STORE_LENGTH(stxvll)
+static bool trans_LXVL(DisasContext *ctx, arg_LXVL *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+#if defined(TARGET_PPC64)
+ return do_ld_st_vl(ctx, a, gen_helper_LXVL);
+#else
+ qemu_build_not_reached();
#endif
+ return true;
+}
-#define VSX_STORE_SCALAR(name, operation) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv EA; \
- TCGv_i64 t0; \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- t0 = tcg_temp_new_i64(); \
- gen_set_access_type(ctx, ACCESS_INT); \
- EA = tcg_temp_new(); \
- gen_addr_reg_index(ctx, EA); \
- get_cpu_vsr(t0, xS(ctx->opcode), true); \
- gen_qemu_##operation(ctx, t0, EA); \
+static bool trans_LXVLL(DisasContext *ctx, arg_LXVLL *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+#if defined(TARGET_PPC64)
+ return do_ld_st_vl(ctx, a, gen_helper_LXVLL);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
}
-VSX_STORE_SCALAR(stxsdx, st64_i64)
+static bool trans_STXVL(DisasContext *ctx, arg_STXVL *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+#if defined(TARGET_PPC64)
+ return do_ld_st_vl(ctx, a, gen_helper_STXVL);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
-VSX_STORE_SCALAR(stxsibx, st8_i64)
-VSX_STORE_SCALAR(stxsihx, st16_i64)
-VSX_STORE_SCALAR(stxsiwx, st32_i64)
-VSX_STORE_SCALAR(stxsspx, st32fs)
+static bool trans_STXVLL(DisasContext *ctx, arg_STXVLL *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+#if defined(TARGET_PPC64)
+ return do_ld_st_vl(ctx, a, gen_helper_STXVLL);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
-static void gen_stxvd2x(DisasContext *ctx)
+static bool do_stxs(DisasContext *ctx, arg_X *a,
+ void (*op)(DisasContext *, TCGv_i64, TCGv))
{
TCGv EA;
TCGv_i64 t0;
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
+ REQUIRE_VSX(ctx);
t0 = tcg_temp_new_i64();
gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
- get_cpu_vsr(t0, xS(ctx->opcode), true);
+ EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
+ get_cpu_vsr(t0, a->rt, true);
+ op(ctx, t0, EA);
+ return true;
+}
+
+TRANS_FLAGS2(VSX, STXSDX, do_stxs, gen_qemu_st64_i64);
+TRANS_FLAGS2(ISA300, STXSIBX, do_stxs, gen_qemu_st8_i64);
+TRANS_FLAGS2(ISA300, STXSIHX, do_stxs, gen_qemu_st16_i64);
+TRANS_FLAGS2(VSX207, STXSIWX, do_stxs, gen_qemu_st32_i64);
+TRANS_FLAGS2(VSX207, STXSSPX, do_stxs, gen_qemu_st32fs);
+
+static bool trans_STXVD2X(DisasContext *ctx, arg_STXVD2X *a)
+{
+ TCGv EA;
+ TCGv_i64 t0;
+
+ REQUIRE_INSNS_FLAGS2(ctx, VSX);
+ REQUIRE_VSX(ctx);
+
+ t0 = tcg_temp_new_i64();
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
+ get_cpu_vsr(t0, a->rt, true);
gen_qemu_st64_i64(ctx, t0, EA);
tcg_gen_addi_tl(EA, EA, 8);
- get_cpu_vsr(t0, xS(ctx->opcode), false);
+ get_cpu_vsr(t0, a->rt, false);
gen_qemu_st64_i64(ctx, t0, EA);
+ return true;
}
-static void gen_stxvw4x(DisasContext *ctx)
+static bool trans_STXVW4X(DisasContext *ctx, arg_STXVW4X *a)
{
TCGv EA;
- TCGv_i64 xsh;
- TCGv_i64 xsl;
+ TCGv_i64 xsh, xsl;
+
+ REQUIRE_INSNS_FLAGS2(ctx, VSX);
+ REQUIRE_VSX(ctx);
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
xsh = tcg_temp_new_i64();
xsl = tcg_temp_new_i64();
- get_cpu_vsr(xsh, xS(ctx->opcode), true);
- get_cpu_vsr(xsl, xS(ctx->opcode), false);
+ get_cpu_vsr(xsh, a->rt, true);
+ get_cpu_vsr(xsl, a->rt, false);
gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
+ EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
if (ctx->le_mode) {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
@@ -342,25 +356,23 @@ static void gen_stxvw4x(DisasContext *ctx)
tcg_gen_addi_tl(EA, EA, 8);
tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
}
+ return true;
}
-static void gen_stxvh8x(DisasContext *ctx)
+static bool trans_STXVH8X(DisasContext *ctx, arg_STXVH8X *a)
{
TCGv EA;
- TCGv_i64 xsh;
- TCGv_i64 xsl;
+ TCGv_i64 xsh, xsl;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
xsh = tcg_temp_new_i64();
xsl = tcg_temp_new_i64();
- get_cpu_vsr(xsh, xS(ctx->opcode), true);
- get_cpu_vsr(xsl, xS(ctx->opcode), false);
+ get_cpu_vsr(xsh, a->rt, true);
+ get_cpu_vsr(xsl, a->rt, false);
gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
+ EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
if (ctx->le_mode) {
TCGv_i64 outh = tcg_temp_new_i64();
TCGv_i64 outl = tcg_temp_new_i64();
@@ -374,28 +386,24 @@ static void gen_stxvh8x(DisasContext *ctx)
tcg_gen_addi_tl(EA, EA, 8);
tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
}
+ return true;
}
-static void gen_stxvb16x(DisasContext *ctx)
+static bool trans_STXVB16X(DisasContext *ctx, arg_STXVB16X *a)
{
TCGv EA;
- TCGv_i64 xsh;
- TCGv_i64 xsl;
+ TCGv_i128 data;
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
- xsh = tcg_temp_new_i64();
- xsl = tcg_temp_new_i64();
- get_cpu_vsr(xsh, xS(ctx->opcode), true);
- get_cpu_vsr(xsl, xS(ctx->opcode), false);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
+
+ data = tcg_temp_new_i128();
gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
- tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
- tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
+ EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
+ get_vsr_full(data, a->rt);
+ tcg_gen_qemu_st_i128(data, EA, ctx->mem_idx,
+ MO_BE | MO_128 | MO_ATOM_IFALIGN_PAIR);
+ return true;
}
static void gen_mfvsrwz(DisasContext *ctx)
@@ -788,34 +796,28 @@ static bool do_xvcpsgn(DisasContext *ctx, arg_XX3 *a, unsigned vece)
TRANS(XVCPSGNSP, do_xvcpsgn, MO_32)
TRANS(XVCPSGNDP, do_xvcpsgn, MO_64)
-#define VSX_CMP(name, op1, op2, inval, type) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_i32 ignored; \
- TCGv_ptr xt, xa, xb; \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- xt = gen_vsr_ptr(xT(ctx->opcode)); \
- xa = gen_vsr_ptr(xA(ctx->opcode)); \
- xb = gen_vsr_ptr(xB(ctx->opcode)); \
- if ((ctx->opcode >> (31 - 21)) & 1) { \
- gen_helper_##name(cpu_crf[6], tcg_env, xt, xa, xb); \
- } else { \
- ignored = tcg_temp_new_i32(); \
- gen_helper_##name(ignored, tcg_env, xt, xa, xb); \
- } \
+static bool do_cmp(DisasContext *ctx, arg_XX3_rc *a,
+ void (*helper)(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
+{
+ TCGv_i32 dest;
+ TCGv_ptr xt, xa, xb;
+ REQUIRE_VSX(ctx);
+ xt = gen_vsr_ptr(a->xt);
+ xa = gen_vsr_ptr(a->xa);
+ xb = gen_vsr_ptr(a->xb);
+ dest = a->rc ? cpu_crf[6] : tcg_temp_new_i32();
+ helper(dest, tcg_env, xt, xa, xb);
+ return true;
}
-VSX_CMP(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
-VSX_CMP(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
-VSX_CMP(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
-VSX_CMP(xvcmpnedp, 0x0C, 0x0F, 0, PPC2_ISA300)
-VSX_CMP(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
-VSX_CMP(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
-VSX_CMP(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
-VSX_CMP(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX)
+TRANS_FLAGS2(VSX, XVCMPEQSP, do_cmp, gen_helper_XVCMPEQSP);
+TRANS_FLAGS2(VSX, XVCMPGTSP, do_cmp, gen_helper_XVCMPGTSP);
+TRANS_FLAGS2(VSX, XVCMPGESP, do_cmp, gen_helper_XVCMPGESP);
+TRANS_FLAGS2(ISA300, XVCMPNESP, do_cmp, gen_helper_XVCMPNESP);
+TRANS_FLAGS2(VSX, XVCMPEQDP, do_cmp, gen_helper_XVCMPEQDP);
+TRANS_FLAGS2(VSX, XVCMPGTDP, do_cmp, gen_helper_XVCMPGTDP);
+TRANS_FLAGS2(VSX, XVCMPGEDP, do_cmp, gen_helper_XVCMPGEDP);
+TRANS_FLAGS2(ISA300, XVCMPNEDP, do_cmp, gen_helper_XVCMPNEDP);
static bool trans_XSCVQPDP(DisasContext *ctx, arg_X_tb_rc *a)
{
@@ -864,20 +866,6 @@ static void gen_##name(DisasContext *ctx) \
gen_helper_##name(tcg_env, opc); \
}
-#define GEN_VSX_HELPER_X3(name, op1, op2, inval, type) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr xt, xa, xb; \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- xt = gen_vsr_ptr(xT(ctx->opcode)); \
- xa = gen_vsr_ptr(xA(ctx->opcode)); \
- xb = gen_vsr_ptr(xB(ctx->opcode)); \
- gen_helper_##name(tcg_env, xt, xa, xb); \
-}
-
#define GEN_VSX_HELPER_X2(name, op1, op2, inval, type) \
static void gen_##name(DisasContext *ctx) \
{ \
@@ -983,12 +971,8 @@ static void gen_##name(DisasContext *ctx) \
set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \
}
-GEN_VSX_HELPER_X3(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
GEN_VSX_HELPER_R3(xsaddqp, 0x04, 0x00, 0, PPC2_ISA300)
-GEN_VSX_HELPER_X3(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
GEN_VSX_HELPER_R3(xsmulqp, 0x04, 0x01, 0, PPC2_ISA300)
-GEN_VSX_HELPER_X3(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
GEN_VSX_HELPER_R3(xsdivqp, 0x04, 0x11, 0, PPC2_ISA300)
GEN_VSX_HELPER_X2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
@@ -1001,8 +985,6 @@ GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
GEN_VSX_HELPER_X2_AB(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
@@ -1233,27 +1215,17 @@ GEN_VSX_HELPER_R2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300)
GEN_VSX_HELPER_R2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300)
GEN_VSX_HELPER_R2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300)
GEN_VSX_HELPER_R3(xssubqp, 0x04, 0x10, 0, PPC2_ISA300)
-GEN_VSX_HELPER_X3(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
-GEN_VSX_HELPER_X3(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
-GEN_VSX_HELPER_X3(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
-GEN_VSX_HELPER_X3(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
GEN_VSX_HELPER_X2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
-GEN_VSX_HELPER_X3(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
GEN_VSX_HELPER_X2_AB(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
GEN_VSX_HELPER_X1(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
@@ -1269,17 +1241,11 @@ GEN_VSX_HELPER_X2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
GEN_VSX_HELPER_X2_AB(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
GEN_VSX_HELPER_X1(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300)
GEN_VSX_HELPER_X2(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300)
@@ -1609,26 +1575,24 @@ static void gen_xxbrw(DisasContext *ctx)
set_cpu_vsr(xT(ctx->opcode), xtl, false);
}
-#define VSX_LOGICAL(name, vece, tcg_op) \
-static void glue(gen_, name)(DisasContext *ctx) \
- { \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- tcg_op(vece, vsr_full_offset(xT(ctx->opcode)), \
- vsr_full_offset(xA(ctx->opcode)), \
- vsr_full_offset(xB(ctx->opcode)), 16, 16); \
- }
+static bool do_logical_op(DisasContext *ctx, arg_XX3 *a, unsigned vece,
+ void (*helper)(unsigned, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t))
+{
+ REQUIRE_VSX(ctx);
+ helper(vece, vsr_full_offset(a->xt),
+ vsr_full_offset(a->xa),
+ vsr_full_offset(a->xb), 16, 16);
+ return true;
+}
-VSX_LOGICAL(xxland, MO_64, tcg_gen_gvec_and)
-VSX_LOGICAL(xxlandc, MO_64, tcg_gen_gvec_andc)
-VSX_LOGICAL(xxlor, MO_64, tcg_gen_gvec_or)
-VSX_LOGICAL(xxlxor, MO_64, tcg_gen_gvec_xor)
-VSX_LOGICAL(xxlnor, MO_64, tcg_gen_gvec_nor)
-VSX_LOGICAL(xxleqv, MO_64, tcg_gen_gvec_eqv)
-VSX_LOGICAL(xxlnand, MO_64, tcg_gen_gvec_nand)
-VSX_LOGICAL(xxlorc, MO_64, tcg_gen_gvec_orc)
+TRANS_FLAGS2(VSX, XXLAND, do_logical_op, MO_64, tcg_gen_gvec_and);
+TRANS_FLAGS2(VSX, XXLANDC, do_logical_op, MO_64, tcg_gen_gvec_andc);
+TRANS_FLAGS2(VSX, XXLOR, do_logical_op, MO_64, tcg_gen_gvec_or);
+TRANS_FLAGS2(VSX, XXLXOR, do_logical_op, MO_64, tcg_gen_gvec_xor);
+TRANS_FLAGS2(VSX, XXLNOR, do_logical_op, MO_64, tcg_gen_gvec_nor);
+TRANS_FLAGS2(VSX207, XXLEQV, do_logical_op, MO_64, tcg_gen_gvec_eqv);
+TRANS_FLAGS2(VSX207, XXLNAND, do_logical_op, MO_64, tcg_gen_gvec_nand);
+TRANS_FLAGS2(VSX207, XXLORC, do_logical_op, MO_64, tcg_gen_gvec_orc);
#define VSX_XXMRG(name, high) \
static void glue(gen_, name)(DisasContext *ctx) \
@@ -2215,13 +2179,13 @@ static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ,
int rt, bool store, bool paired)
{
TCGv ea;
- TCGv_i64 xt;
+ TCGv_i128 data;
MemOp mop;
int rt1, rt2;
- xt = tcg_temp_new_i64();
+ data = tcg_temp_new_i128();
- mop = DEF_MEMOP(MO_UQ);
+ mop = DEF_MEMOP(MO_128 | MO_ATOM_IFALIGN_PAIR);
gen_set_access_type(ctx, ACCESS_INT);
ea = do_ea_calc(ctx, ra, displ);
@@ -2235,32 +2199,20 @@ static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ,
}
if (store) {
- get_cpu_vsr(xt, rt1, !ctx->le_mode);
- tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
- gen_addr_add(ctx, ea, ea, 8);
- get_cpu_vsr(xt, rt1, ctx->le_mode);
- tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+ get_vsr_full(data, rt1);
+ tcg_gen_qemu_st_i128(data, ea, ctx->mem_idx, mop);
if (paired) {
- gen_addr_add(ctx, ea, ea, 8);
- get_cpu_vsr(xt, rt2, !ctx->le_mode);
- tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
- gen_addr_add(ctx, ea, ea, 8);
- get_cpu_vsr(xt, rt2, ctx->le_mode);
- tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+ gen_addr_add(ctx, ea, ea, 16);
+ get_vsr_full(data, rt2);
+ tcg_gen_qemu_st_i128(data, ea, ctx->mem_idx, mop);
}
} else {
- tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
- set_cpu_vsr(rt1, xt, !ctx->le_mode);
- gen_addr_add(ctx, ea, ea, 8);
- tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
- set_cpu_vsr(rt1, xt, ctx->le_mode);
+ tcg_gen_qemu_ld_i128(data, ea, ctx->mem_idx, mop);
+ set_vsr_full(rt1, data);
if (paired) {
- gen_addr_add(ctx, ea, ea, 8);
- tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
- set_cpu_vsr(rt2, xt, !ctx->le_mode);
- gen_addr_add(ctx, ea, ea, 8);
- tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
- set_cpu_vsr(rt2, xt, ctx->le_mode);
+ gen_addr_add(ctx, ea, ea, 16);
+ tcg_gen_qemu_ld_i128(data, ea, ctx->mem_idx, mop);
+ set_vsr_full(rt2, data);
}
}
return true;
@@ -2292,7 +2244,7 @@ static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a,
static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired)
{
- if (paired || a->rt >= 32) {
+ if (paired || a->rt < 32) {
REQUIRE_VSX(ctx);
} else {
REQUIRE_VECTOR(ctx);
@@ -2712,8 +2664,6 @@ static bool do_helper_XX3(DisasContext *ctx, arg_XX3 *a,
void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
{
TCGv_ptr xt, xa, xb;
-
- REQUIRE_INSNS_FLAGS2(ctx, ISA300);
REQUIRE_VSX(ctx);
xt = gen_vsr_ptr(a->xt);
@@ -2724,13 +2674,40 @@ static bool do_helper_XX3(DisasContext *ctx, arg_XX3 *a,
return true;
}
-TRANS(XSCMPEQDP, do_helper_XX3, gen_helper_XSCMPEQDP)
-TRANS(XSCMPGEDP, do_helper_XX3, gen_helper_XSCMPGEDP)
-TRANS(XSCMPGTDP, do_helper_XX3, gen_helper_XSCMPGTDP)
-TRANS(XSMAXCDP, do_helper_XX3, gen_helper_XSMAXCDP)
-TRANS(XSMINCDP, do_helper_XX3, gen_helper_XSMINCDP)
-TRANS(XSMAXJDP, do_helper_XX3, gen_helper_XSMAXJDP)
-TRANS(XSMINJDP, do_helper_XX3, gen_helper_XSMINJDP)
+TRANS_FLAGS2(ISA300, XSCMPEQDP, do_helper_XX3, gen_helper_XSCMPEQDP)
+TRANS_FLAGS2(ISA300, XSCMPGEDP, do_helper_XX3, gen_helper_XSCMPGEDP)
+TRANS_FLAGS2(ISA300, XSCMPGTDP, do_helper_XX3, gen_helper_XSCMPGTDP)
+TRANS_FLAGS2(ISA300, XSMAXCDP, do_helper_XX3, gen_helper_XSMAXCDP)
+TRANS_FLAGS2(ISA300, XSMINCDP, do_helper_XX3, gen_helper_XSMINCDP)
+TRANS_FLAGS2(ISA300, XSMAXJDP, do_helper_XX3, gen_helper_XSMAXJDP)
+TRANS_FLAGS2(ISA300, XSMINJDP, do_helper_XX3, gen_helper_XSMINJDP)
+
+TRANS_FLAGS2(VSX207, XSADDSP, do_helper_XX3, gen_helper_XSADDSP)
+TRANS_FLAGS2(VSX207, XSSUBSP, do_helper_XX3, gen_helper_XSSUBSP)
+TRANS_FLAGS2(VSX207, XSMULSP, do_helper_XX3, gen_helper_XSMULSP)
+TRANS_FLAGS2(VSX207, XSDIVSP, do_helper_XX3, gen_helper_XSDIVSP)
+
+TRANS_FLAGS2(VSX, XSADDDP, do_helper_XX3, gen_helper_XSADDDP)
+TRANS_FLAGS2(VSX, XSSUBDP, do_helper_XX3, gen_helper_XSSUBDP)
+TRANS_FLAGS2(VSX, XSMULDP, do_helper_XX3, gen_helper_XSMULDP)
+TRANS_FLAGS2(VSX, XSDIVDP, do_helper_XX3, gen_helper_XSDIVDP)
+
+TRANS_FLAGS2(VSX, XVADDSP, do_helper_XX3, gen_helper_XVADDSP)
+TRANS_FLAGS2(VSX, XVSUBSP, do_helper_XX3, gen_helper_XVSUBSP)
+TRANS_FLAGS2(VSX, XVMULSP, do_helper_XX3, gen_helper_XVMULSP)
+TRANS_FLAGS2(VSX, XVDIVSP, do_helper_XX3, gen_helper_XVDIVSP)
+
+TRANS_FLAGS2(VSX, XVADDDP, do_helper_XX3, gen_helper_XVADDDP)
+TRANS_FLAGS2(VSX, XVSUBDP, do_helper_XX3, gen_helper_XVSUBDP)
+TRANS_FLAGS2(VSX, XVMULDP, do_helper_XX3, gen_helper_XVMULDP)
+TRANS_FLAGS2(VSX, XVDIVDP, do_helper_XX3, gen_helper_XVDIVDP)
+
+TRANS_FLAGS2(VSX, XSMAXDP, do_helper_XX3, gen_helper_XSMAXDP)
+TRANS_FLAGS2(VSX, XSMINDP, do_helper_XX3, gen_helper_XSMINDP)
+TRANS_FLAGS2(VSX, XVMAXSP, do_helper_XX3, gen_helper_XVMAXSP)
+TRANS_FLAGS2(VSX, XVMINSP, do_helper_XX3, gen_helper_XVMINSP)
+TRANS_FLAGS2(VSX, XVMAXDP, do_helper_XX3, gen_helper_XVMAXDP)
+TRANS_FLAGS2(VSX, XVMINDP, do_helper_XX3, gen_helper_XVMINDP)
static bool do_helper_X(arg_X *a,
void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
@@ -2910,4 +2887,3 @@ TRANS64(PMXVF64GERNN, do_ger, gen_helper_XVF64GERNN)
#undef GEN_XX2IFORM
#undef GEN_XX3_RC_FORM
#undef GEN_XX3FORM_DM
-#undef VSX_LOGICAL
diff --git a/target/ppc/translate/vsx-ops.c.inc b/target/ppc/translate/vsx-ops.c.inc
index a3ba094..e553b5b 100644
--- a/target/ppc/translate/vsx-ops.c.inc
+++ b/target/ppc/translate/vsx-ops.c.inc
@@ -1,34 +1,3 @@
-GEN_HANDLER_E(lxsdx, 0x1F, 0x0C, 0x12, 0, PPC_NONE, PPC2_VSX),
-GEN_HANDLER_E(lxsiwax, 0x1F, 0x0C, 0x02, 0, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER_E(lxsiwzx, 0x1F, 0x0C, 0x00, 0, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER_E(lxsibzx, 0x1F, 0x0D, 0x18, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E(lxsihzx, 0x1F, 0x0D, 0x19, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E(lxsspx, 0x1F, 0x0C, 0x10, 0, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER_E(lxvd2x, 0x1F, 0x0C, 0x1A, 0, PPC_NONE, PPC2_VSX),
-GEN_HANDLER_E(lxvwsx, 0x1F, 0x0C, 0x0B, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX),
-GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX),
-GEN_HANDLER_E(lxvh8x, 0x1F, 0x0C, 0x19, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E(lxvb16x, 0x1F, 0x0C, 0x1B, 0, PPC_NONE, PPC2_ISA300),
-#if defined(TARGET_PPC64)
-GEN_HANDLER_E(lxvl, 0x1F, 0x0D, 0x08, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E(lxvll, 0x1F, 0x0D, 0x09, 0, PPC_NONE, PPC2_ISA300),
-#endif
-
-GEN_HANDLER_E(stxsdx, 0x1F, 0xC, 0x16, 0, PPC_NONE, PPC2_VSX),
-GEN_HANDLER_E(stxsibx, 0x1F, 0xD, 0x1C, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E(stxsihx, 0x1F, 0xD, 0x1D, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E(stxsiwx, 0x1F, 0xC, 0x04, 0, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER_E(stxsspx, 0x1F, 0xC, 0x14, 0, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX),
-GEN_HANDLER_E(stxvw4x, 0x1F, 0xC, 0x1C, 0, PPC_NONE, PPC2_VSX),
-GEN_HANDLER_E(stxvh8x, 0x1F, 0x0C, 0x1D, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E(stxvb16x, 0x1F, 0x0C, 0x1F, 0, PPC_NONE, PPC2_ISA300),
-#if defined(TARGET_PPC64)
-GEN_HANDLER_E(stxvl, 0x1F, 0x0D, 0x0C, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E(stxvll, 0x1F, 0x0D, 0x0D, 0, PPC_NONE, PPC2_ISA300),
-#endif
-
GEN_HANDLER_E(mfvsrwz, 0x1F, 0x13, 0x03, 0x0000F800, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(mtvsrwa, 0x1F, 0x13, 0x06, 0x0000F800, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(mtvsrwz, 0x1F, 0x13, 0x07, 0x0000F800, PPC_NONE, PPC2_VSX207),
@@ -74,16 +43,6 @@ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 1, PPC_NONE, fl2), \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 1, PPC_NONE, fl2), \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 1, PPC_NONE, fl2)
-#define GEN_XX3_RC_FORM(name, opc2, opc3, fl2) \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x00, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x01, opc3 | 0x00, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x02, opc3 | 0x00, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x03, opc3 | 0x00, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x10, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x01, opc3 | 0x10, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x02, opc3 | 0x10, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x03, opc3 | 0x10, 0, PPC_NONE, fl2)
-
#define GEN_XX3FORM_DM(name, opc2, opc3) \
GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
@@ -153,12 +112,8 @@ GEN_XX2FORM_EO(xvxexpdp, 0x16, 0x1D, 0x00, PPC2_ISA300),
GEN_XX2FORM_EO(xvxsigdp, 0x16, 0x1D, 0x01, PPC2_ISA300),
GEN_XX2FORM_EO(xvxexpsp, 0x16, 0x1D, 0x08, PPC2_ISA300),
-GEN_XX3FORM(xsadddp, 0x00, 0x04, PPC2_VSX),
GEN_VSX_XFORM_300(xsaddqp, 0x04, 0x00, 0x0),
-GEN_XX3FORM(xssubdp, 0x00, 0x05, PPC2_VSX),
-GEN_XX3FORM(xsmuldp, 0x00, 0x06, PPC2_VSX),
GEN_VSX_XFORM_300(xsmulqp, 0x04, 0x01, 0x0),
-GEN_XX3FORM(xsdivdp, 0x00, 0x07, PPC2_VSX),
GEN_XX2FORM(xsredp, 0x14, 0x05, PPC2_VSX),
GEN_XX2FORM(xssqrtdp, 0x16, 0x04, PPC2_VSX),
GEN_XX2FORM(xsrsqrtedp, 0x14, 0x04, PPC2_VSX),
@@ -170,8 +125,6 @@ GEN_XX2IFORM(xscmpodp, 0x0C, 0x05, PPC2_VSX),
GEN_XX2IFORM(xscmpudp, 0x0C, 0x04, PPC2_VSX),
GEN_VSX_XFORM_300(xscmpoqp, 0x04, 0x04, 0x00600001),
GEN_VSX_XFORM_300(xscmpuqp, 0x04, 0x14, 0x00600001),
-GEN_XX3FORM(xsmaxdp, 0x00, 0x14, PPC2_VSX),
-GEN_XX3FORM(xsmindp, 0x00, 0x15, PPC2_VSX),
GEN_XX2FORM_EO(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300),
GEN_XX2FORM(xscvdpsp, 0x12, 0x10, PPC2_VSX),
GEN_XX2FORM(xscvdpspn, 0x16, 0x10, PPC2_VSX207),
@@ -191,10 +144,6 @@ GEN_XX2FORM(xsrdpim, 0x12, 0x07, PPC2_VSX),
GEN_XX2FORM(xsrdpip, 0x12, 0x06, PPC2_VSX),
GEN_XX2FORM(xsrdpiz, 0x12, 0x05, PPC2_VSX),
-GEN_XX3FORM(xsaddsp, 0x00, 0x00, PPC2_VSX207),
-GEN_XX3FORM(xssubsp, 0x00, 0x01, PPC2_VSX207),
-GEN_XX3FORM(xsmulsp, 0x00, 0x02, PPC2_VSX207),
-GEN_XX3FORM(xsdivsp, 0x00, 0x03, PPC2_VSX207),
GEN_VSX_XFORM_300(xsdivqp, 0x04, 0x11, 0x0),
GEN_XX2FORM(xsresp, 0x14, 0x01, PPC2_VSX207),
GEN_XX2FORM(xsrsp, 0x12, 0x11, PPC2_VSX207),
@@ -203,10 +152,6 @@ GEN_XX2FORM(xsrsqrtesp, 0x14, 0x00, PPC2_VSX207),
GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207),
GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207),
-GEN_XX3FORM(xvadddp, 0x00, 0x0C, PPC2_VSX),
-GEN_XX3FORM(xvsubdp, 0x00, 0x0D, PPC2_VSX),
-GEN_XX3FORM(xvmuldp, 0x00, 0x0E, PPC2_VSX),
-GEN_XX3FORM(xvdivdp, 0x00, 0x0F, PPC2_VSX),
GEN_XX2FORM(xvredp, 0x14, 0x0D, PPC2_VSX),
GEN_XX2FORM(xvsqrtdp, 0x16, 0x0C, PPC2_VSX),
GEN_XX2FORM(xvrsqrtedp, 0x14, 0x0C, PPC2_VSX),
@@ -220,12 +165,6 @@ GEN_XX3FORM_NAME(xvnmadddp, "xvnmaddadp", 0x04, 0x1C, PPC2_VSX),
GEN_XX3FORM_NAME(xvnmadddp, "xvnmaddmdp", 0x04, 0x1D, PPC2_VSX),
GEN_XX3FORM_NAME(xvnmsubdp, "xvnmsubadp", 0x04, 0x1E, PPC2_VSX),
GEN_XX3FORM_NAME(xvnmsubdp, "xvnmsubmdp", 0x04, 0x1F, PPC2_VSX),
-GEN_XX3FORM(xvmaxdp, 0x00, 0x1C, PPC2_VSX),
-GEN_XX3FORM(xvmindp, 0x00, 0x1D, PPC2_VSX),
-GEN_XX3_RC_FORM(xvcmpeqdp, 0x0C, 0x0C, PPC2_VSX),
-GEN_XX3_RC_FORM(xvcmpgtdp, 0x0C, 0x0D, PPC2_VSX),
-GEN_XX3_RC_FORM(xvcmpgedp, 0x0C, 0x0E, PPC2_VSX),
-GEN_XX3_RC_FORM(xvcmpnedp, 0x0C, 0x0F, PPC2_ISA300),
GEN_XX2FORM(xvcvdpsp, 0x12, 0x18, PPC2_VSX),
GEN_XX2FORM(xvcvdpsxds, 0x10, 0x1D, PPC2_VSX),
GEN_XX2FORM(xvcvdpsxws, 0x10, 0x0D, PPC2_VSX),
@@ -241,10 +180,6 @@ GEN_XX2FORM(xvrdpim, 0x12, 0x0F, PPC2_VSX),
GEN_XX2FORM(xvrdpip, 0x12, 0x0E, PPC2_VSX),
GEN_XX2FORM(xvrdpiz, 0x12, 0x0D, PPC2_VSX),
-GEN_XX3FORM(xvaddsp, 0x00, 0x08, PPC2_VSX),
-GEN_XX3FORM(xvsubsp, 0x00, 0x09, PPC2_VSX),
-GEN_XX3FORM(xvmulsp, 0x00, 0x0A, PPC2_VSX),
-GEN_XX3FORM(xvdivsp, 0x00, 0x0B, PPC2_VSX),
GEN_XX2FORM(xvresp, 0x14, 0x09, PPC2_VSX),
GEN_XX2FORM(xvsqrtsp, 0x16, 0x08, PPC2_VSX),
GEN_XX2FORM(xvrsqrtesp, 0x14, 0x08, PPC2_VSX),
@@ -258,12 +193,6 @@ GEN_XX3FORM_NAME(xvnmaddsp, "xvnmaddasp", 0x04, 0x18, PPC2_VSX),
GEN_XX3FORM_NAME(xvnmaddsp, "xvnmaddmsp", 0x04, 0x19, PPC2_VSX),
GEN_XX3FORM_NAME(xvnmsubsp, "xvnmsubasp", 0x04, 0x1A, PPC2_VSX),
GEN_XX3FORM_NAME(xvnmsubsp, "xvnmsubmsp", 0x04, 0x1B, PPC2_VSX),
-GEN_XX3FORM(xvmaxsp, 0x00, 0x18, PPC2_VSX),
-GEN_XX3FORM(xvminsp, 0x00, 0x19, PPC2_VSX),
-GEN_XX3_RC_FORM(xvcmpeqsp, 0x0C, 0x08, PPC2_VSX),
-GEN_XX3_RC_FORM(xvcmpgtsp, 0x0C, 0x09, PPC2_VSX),
-GEN_XX3_RC_FORM(xvcmpgesp, 0x0C, 0x0A, PPC2_VSX),
-GEN_XX3_RC_FORM(xvcmpnesp, 0x0C, 0x0B, PPC2_ISA300),
GEN_XX2FORM(xvcvspdp, 0x12, 0x1C, PPC2_VSX),
GEN_XX2FORM(xvcvspsxds, 0x10, 0x19, PPC2_VSX),
GEN_XX2FORM(xvcvspsxws, 0x10, 0x09, PPC2_VSX),
@@ -285,17 +214,6 @@ GEN_XX2FORM_EO(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300),
GEN_XX2FORM_EO(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300),
GEN_XX2FORM_EO(xxbrq, 0x16, 0x1D, 0x1F, PPC2_ISA300),
-#define VSX_LOGICAL(name, opc2, opc3, fl2) \
-GEN_XX3FORM(name, opc2, opc3, fl2)
-
-VSX_LOGICAL(xxland, 0x8, 0x10, PPC2_VSX),
-VSX_LOGICAL(xxlandc, 0x8, 0x11, PPC2_VSX),
-VSX_LOGICAL(xxlor, 0x8, 0x12, PPC2_VSX),
-VSX_LOGICAL(xxlxor, 0x8, 0x13, PPC2_VSX),
-VSX_LOGICAL(xxlnor, 0x8, 0x14, PPC2_VSX),
-VSX_LOGICAL(xxleqv, 0x8, 0x17, PPC2_VSX207),
-VSX_LOGICAL(xxlnand, 0x8, 0x16, PPC2_VSX207),
-VSX_LOGICAL(xxlorc, 0x8, 0x15, PPC2_VSX207),
GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX),
GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX),
GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00),
diff --git a/target/ppc/user_only_helper.c b/target/ppc/user_only_helper.c
index a4d07a0..ae210eb 100644
--- a/target/ppc/user_only_helper.c
+++ b/target/ppc/user_only_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "internal.h"
void ppc_cpu_record_sigsegv(CPUState *cs, vaddr address,
diff --git a/target/riscv/Kconfig b/target/riscv/Kconfig
index 5f30df2..11bc09b 100644
--- a/target/riscv/Kconfig
+++ b/target/riscv/Kconfig
@@ -1,9 +1,9 @@
config RISCV32
bool
- select ARM_COMPATIBLE_SEMIHOSTING # for do_common_semihosting()
+ select ARM_COMPATIBLE_SEMIHOSTING if TCG
select DEVICE_TREE # needed by boot.c
config RISCV64
bool
- select ARM_COMPATIBLE_SEMIHOSTING # for do_common_semihosting()
+ select ARM_COMPATIBLE_SEMIHOSTING if TCG
select DEVICE_TREE # needed by boot.c
diff --git a/target/riscv/arch_dump.c b/target/riscv/arch_dump.c
index 434c8a3..12b6879 100644
--- a/target/riscv/arch_dump.c
+++ b/target/riscv/arch_dump.c
@@ -19,7 +19,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "elf.h"
-#include "sysemu/dump.h"
+#include "system/dump.h"
/* struct user_regs_struct from arch/riscv/include/uapi/asm/ptrace.h */
struct riscv64_user_regs {
diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c
index b99c4a3..e9c8d7f 100644
--- a/target/riscv/bitmanip_helper.c
+++ b/target/riscv/bitmanip_helper.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
+#include "exec/target_long.h"
#include "exec/helper-proto.h"
#include "tcg/tcg.h"
diff --git a/target/riscv/cpu-param.h b/target/riscv/cpu-param.h
index 1fbd649..cfdc67c 100644
--- a/target/riscv/cpu-param.h
+++ b/target/riscv/cpu-param.h
@@ -2,22 +2,28 @@
* RISC-V cpu parameters for qemu.
*
* Copyright (c) 2017-2018 SiFive, Inc.
- * SPDX-License-Identifier: GPL-2.0+
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef RISCV_CPU_PARAM_H
#define RISCV_CPU_PARAM_H
#if defined(TARGET_RISCV64)
-# define TARGET_LONG_BITS 64
# define TARGET_PHYS_ADDR_SPACE_BITS 56 /* 44-bit PPN */
# define TARGET_VIRT_ADDR_SPACE_BITS 48 /* sv48 */
#elif defined(TARGET_RISCV32)
-# define TARGET_LONG_BITS 32
# define TARGET_PHYS_ADDR_SPACE_BITS 34 /* 22-bit PPN */
# define TARGET_VIRT_ADDR_SPACE_BITS 32 /* sv32 */
#endif
#define TARGET_PAGE_BITS 12 /* 4 KiB Pages */
+
+/*
+ * RISC-V-specific extra insn start words:
+ * 1: Original instruction opcode
+ * 2: more information about instruction
+ */
+#define TARGET_INSN_START_EXTRA_WORDS 2
+
/*
* The current MMU Modes are:
* - U mode 0b000
@@ -28,6 +34,4 @@
* - M mode HLV/HLVX/HSV 0b111
*/
-#define TCG_GUEST_DEFAULT_MO 0
-
#endif
diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h
index 3670cfe..1ee05eb 100644
--- a/target/riscv/cpu-qom.h
+++ b/target/riscv/cpu-qom.h
@@ -29,8 +29,8 @@
#define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU
#define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX)
-#define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any")
#define TYPE_RISCV_CPU_MAX RISCV_CPU_TYPE_NAME("max")
+#define TYPE_RISCV_CPU_MAX32 RISCV_CPU_TYPE_NAME("max32")
#define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32")
#define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64")
#define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128")
@@ -40,15 +40,21 @@
#define TYPE_RISCV_CPU_RV64E RISCV_CPU_TYPE_NAME("rv64e")
#define TYPE_RISCV_CPU_RVA22U64 RISCV_CPU_TYPE_NAME("rva22u64")
#define TYPE_RISCV_CPU_RVA22S64 RISCV_CPU_TYPE_NAME("rva22s64")
+#define TYPE_RISCV_CPU_RVA23U64 RISCV_CPU_TYPE_NAME("rva23u64")
+#define TYPE_RISCV_CPU_RVA23S64 RISCV_CPU_TYPE_NAME("rva23s64")
#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")
#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c")
+#define TYPE_RISCV_CPU_SIFIVE_E RISCV_CPU_TYPE_NAME("sifive-e")
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
#define TYPE_RISCV_CPU_SIFIVE_E34 RISCV_CPU_TYPE_NAME("sifive-e34")
#define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51")
+#define TYPE_RISCV_CPU_SIFIVE_U RISCV_CPU_TYPE_NAME("sifive-u")
#define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34")
#define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54")
#define TYPE_RISCV_CPU_THEAD_C906 RISCV_CPU_TYPE_NAME("thead-c906")
#define TYPE_RISCV_CPU_VEYRON_V1 RISCV_CPU_TYPE_NAME("veyron-v1")
+#define TYPE_RISCV_CPU_TT_ASCALON RISCV_CPU_TYPE_NAME("tt-ascalon")
+#define TYPE_RISCV_CPU_XIANGSHAN_NANHU RISCV_CPU_TYPE_NAME("xiangshan-nanhu")
#define TYPE_RISCV_CPU_HOST RISCV_CPU_TYPE_NAME("host")
OBJECT_DECLARE_CPU_TYPE(RISCVCPU, RISCVCPUClass, RISCV_CPU)
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index a90808a..629ac37 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -24,7 +24,6 @@
#include "cpu.h"
#include "cpu_vendorid.h"
#include "internals.h"
-#include "exec/exec-all.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qemu/error-report.h"
@@ -32,9 +31,9 @@
#include "hw/core/qdev-prop-internal.h"
#include "migration/vmstate.h"
#include "fpu/softfloat-helpers.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
+#include "system/device_tree.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
#include "kvm/kvm_riscv.h"
#include "tcg/tcg-cpu.h"
#include "tcg/tcg.h"
@@ -42,7 +41,7 @@
/* RISC-V CPU definitions */
static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
- RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
+ RVC, RVS, RVU, RVH, RVG, RVB, 0};
/*
* From vector_helper.c
@@ -74,6 +73,13 @@ bool riscv_cpu_option_set(const char *optname)
return g_hash_table_contains(general_user_opts, optname);
}
+static void riscv_cpu_cfg_merge(RISCVCPUConfig *dest, const RISCVCPUConfig *src)
+{
+#define BOOL_FIELD(x) dest->x |= src->x;
+#define TYPED_FIELD(type, x, default_) if (src->x != default_) dest->x = src->x;
+#include "cpu_cfg_fields.h.inc"
+}
+
#define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
{#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
@@ -105,7 +111,9 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
- ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
+ ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse),
+ ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp),
+ ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss),
ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
@@ -115,7 +123,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
- ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11),
+ ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha),
ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
@@ -181,22 +189,47 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
+ ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha),
+ ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
+ ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg),
ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
+ ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind),
+ ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp),
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
+ ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi),
+ ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm),
+ ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm),
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
+ ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg),
ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind),
+ ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp),
+ ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm),
+ ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm),
+ ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen),
ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm),
ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
+ ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr),
+ ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr),
ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
+ ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte),
+ ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc),
ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
@@ -210,7 +243,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
- DEFINE_PROP_END_OF_LIST(),
+ { },
};
bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
@@ -283,7 +316,7 @@ static const char * const riscv_excp_names[] = {
"load_page_fault",
"reserved",
"store_page_fault",
- "reserved",
+ "double_trap",
"reserved",
"reserved",
"reserved",
@@ -330,7 +363,7 @@ void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
{
- return 16 << mcc->misa_mxl_max;
+ return 16 << mcc->def->misa_mxl_max;
}
#ifndef CONFIG_USER_ONLY
@@ -363,7 +396,7 @@ static uint8_t satp_mode_from_str(const char *satp_mode_str)
g_assert_not_reached();
}
-uint8_t satp_mode_max_from_map(uint32_t map)
+static uint8_t satp_mode_max_from_map(uint32_t map)
{
/*
* 'map = 0' will make us return (31 - 32), which C will
@@ -407,17 +440,23 @@ const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
g_assert_not_reached();
}
-static void set_satp_mode_max_supported(RISCVCPU *cpu,
- uint8_t satp_mode)
+static bool get_satp_mode_supported(RISCVCPU *cpu, uint16_t *supported)
{
- bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
+ bool rv32 = riscv_cpu_is_32bit(cpu);
const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
+ int satp_mode = cpu->cfg.max_satp_mode;
+ if (satp_mode == -1) {
+ return false;
+ }
+
+ *supported = 0;
for (int i = 0; i <= satp_mode; ++i) {
if (valid_vm[i]) {
- cpu->cfg.satp_mode.supported |= (1 << i);
+ *supported |= (1 << i);
}
}
+ return true;
}
/* Set the satp mode to the max supported */
@@ -426,310 +465,26 @@ static void set_satp_mode_default_map(RISCVCPU *cpu)
/*
* Bare CPUs do not default to the max available.
* Users must set a valid satp_mode in the command
- * line.
+ * line. Otherwise, leave the existing max_satp_mode
+ * in place.
*/
if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
warn_report("No satp mode set. Defaulting to 'bare'");
- cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
- return;
+ cpu->cfg.max_satp_mode = VM_1_10_MBARE;
}
-
- cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
-}
-#endif
-
-static void riscv_any_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
-
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj),
- riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
- VM_1_10_SV32 : VM_1_10_SV57);
-#endif
-
- env->priv_ver = PRIV_VERSION_LATEST;
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-}
-
-static void riscv_max_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
-
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-
- env->priv_ver = PRIV_VERSION_LATEST;
-#ifndef CONFIG_USER_ONLY
-#ifdef TARGET_RISCV32
- set_satp_mode_max_supported(cpu, VM_1_10_SV32);
-#else
- set_satp_mode_max_supported(cpu, VM_1_10_SV57);
-#endif
-#endif
-}
-
-#if defined(TARGET_RISCV64)
-static void rv64_base_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
-
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-
- /* Set latest version of privileged specification */
- env->priv_ver = PRIV_VERSION_LATEST;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
-#endif
}
-
-static void rv64_sifive_u_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
#endif
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-}
-
-static void rv64_sifive_e_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
-}
-
-static void rv64_thead_c906_cpu_init(Object *obj)
+static void riscv_register_custom_csrs(RISCVCPU *cpu, const RISCVCSR *csr_list)
{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
- env->priv_ver = PRIV_VERSION_1_11_0;
-
- cpu->cfg.ext_zfa = true;
- cpu->cfg.ext_zfh = true;
- cpu->cfg.mmu = true;
- cpu->cfg.ext_xtheadba = true;
- cpu->cfg.ext_xtheadbb = true;
- cpu->cfg.ext_xtheadbs = true;
- cpu->cfg.ext_xtheadcmo = true;
- cpu->cfg.ext_xtheadcondmov = true;
- cpu->cfg.ext_xtheadfmemidx = true;
- cpu->cfg.ext_xtheadmac = true;
- cpu->cfg.ext_xtheadmemidx = true;
- cpu->cfg.ext_xtheadmempair = true;
- cpu->cfg.ext_xtheadsync = true;
-
- cpu->cfg.mvendorid = THEAD_VENDOR_ID;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_SV39);
- th_register_custom_csrs(cpu);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.pmp = true;
-}
-
-static void rv64_veyron_v1_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
- env->priv_ver = PRIV_VERSION_1_12_0;
-
- /* Enable ISA extensions */
- cpu->cfg.mmu = true;
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
- cpu->cfg.ext_zicbom = true;
- cpu->cfg.cbom_blocksize = 64;
- cpu->cfg.cboz_blocksize = 64;
- cpu->cfg.ext_zicboz = true;
- cpu->cfg.ext_smaia = true;
- cpu->cfg.ext_ssaia = true;
- cpu->cfg.ext_sscofpmf = true;
- cpu->cfg.ext_sstc = true;
- cpu->cfg.ext_svinval = true;
- cpu->cfg.ext_svnapot = true;
- cpu->cfg.ext_svpbmt = true;
- cpu->cfg.ext_smstateen = true;
- cpu->cfg.ext_zba = true;
- cpu->cfg.ext_zbb = true;
- cpu->cfg.ext_zbc = true;
- cpu->cfg.ext_zbs = true;
- cpu->cfg.ext_XVentanaCondOps = true;
-
- cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
- cpu->cfg.marchid = VEYRON_V1_MARCHID;
- cpu->cfg.mimpid = VEYRON_V1_MIMPID;
-
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_SV48);
-#endif
-}
-
-#ifdef CONFIG_TCG
-static void rv128_base_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
-
- if (qemu_tcg_mttcg_enabled()) {
- /* Missing 128-bit aligned atomics */
- error_report("128-bit RISC-V currently does not work with Multi "
- "Threaded TCG. Please use: -accel tcg,thread=single");
- exit(EXIT_FAILURE);
+ for (size_t i = 0; csr_list[i].csr_ops.name; i++) {
+ int csrno = csr_list[i].csrno;
+ const riscv_csr_operations *csr_ops = &csr_list[i].csr_ops;
+ if (!csr_list[i].insertion_test || csr_list[i].insertion_test(cpu)) {
+ riscv_set_csr_ops(csrno, csr_ops);
+ }
}
-
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-
- /* Set latest version of privileged specification */
- env->priv_ver = PRIV_VERSION_LATEST;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
-#endif
-}
-#endif /* CONFIG_TCG */
-
-static void rv64i_bare_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- riscv_cpu_set_misa_ext(env, RVI);
-}
-
-static void rv64e_bare_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- riscv_cpu_set_misa_ext(env, RVE);
-}
-
-#else /* !TARGET_RISCV64 */
-
-static void rv32_base_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
-
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-
- /* Set latest version of privileged specification */
- env->priv_ver = PRIV_VERSION_LATEST;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
-#endif
-}
-
-static void rv32_sifive_u_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-}
-
-static void rv32_sifive_e_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
-}
-
-static void rv32_ibex_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
- env->priv_ver = PRIV_VERSION_1_12_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
-#endif
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
- cpu->cfg.ext_smepmp = true;
-}
-
-static void rv32_imafcu_nommu_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
-}
-
-static void rv32i_bare_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- riscv_cpu_set_misa_ext(env, RVI);
-}
-
-static void rv32e_bare_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- riscv_cpu_set_misa_ext(env, RVE);
}
#endif
@@ -805,13 +560,6 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
CSR_MSCRATCH,
CSR_SSCRATCH,
CSR_SATP,
- CSR_MMTE,
- CSR_UPMBASE,
- CSR_UPMMASK,
- CSR_SPMBASE,
- CSR_SPMMASK,
- CSR_MPMBASE,
- CSR_MPMMASK,
};
for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
@@ -839,6 +587,12 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
}
if (flags & CPU_DUMP_FPU) {
+ target_ulong val = 0;
+ RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0);
+ if (res == RISCV_EXCP_NONE) {
+ qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
+ csr_ops[CSR_FCSR].name, val);
+ }
for (i = 0; i < 32; i++) {
qemu_fprintf(f, " %-8s %016" PRIx64,
riscv_fpr_regnames[i], env->fpr[i]);
@@ -908,9 +662,9 @@ static vaddr riscv_cpu_get_pc(CPUState *cs)
return env->pc;
}
+#ifndef CONFIG_USER_ONLY
bool riscv_cpu_has_work(CPUState *cs)
{
-#ifndef CONFIG_USER_ONLY
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
/*
@@ -920,15 +674,8 @@ bool riscv_cpu_has_work(CPUState *cs)
return riscv_cpu_all_pending(env) != 0 ||
riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
-#else
- return true;
-#endif
-}
-
-static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
-{
- return riscv_env_mmu_index(cpu_env(cs), ifetch);
}
+#endif /* !CONFIG_USER_ONLY */
static void riscv_cpu_reset_hold(Object *obj, ResetType type)
{
@@ -945,7 +692,7 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
mcc->parent_phases.hold(obj, type);
}
#ifndef CONFIG_USER_ONLY
- env->misa_mxl = mcc->misa_mxl_max;
+ env->misa_mxl = mcc->def->misa_mxl_max;
env->priv = PRV_M;
env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
if (env->misa_mxl > MXL_RV32) {
@@ -965,6 +712,9 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
env->mstatus_hs = set_field(env->mstatus_hs,
MSTATUS64_UXL, env->misa_mxl);
}
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1);
+ }
}
env->mcause = 0;
env->miclaim = MIP_SGEIP;
@@ -991,8 +741,6 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
}
i++;
}
- /* mmte is supposed to have pm.current hardwired to 1 */
- env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
/*
* Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
@@ -1012,18 +760,35 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
}
pmp_unlock_entries(env);
+#else
+ env->priv = PRV_U;
+ env->senvcfg = 0;
+ env->menvcfg = 0;
#endif
+
+ /* on reset elp is clear */
+ env->elp = false;
+ /* on reset ssp is set to 0 */
+ env->ssp = 0;
+
env->xl = riscv_cpu_mxl(env);
- riscv_cpu_update_mask(env);
cs->exception_index = RISCV_EXCP_NONE;
env->load_res = -1;
set_default_nan_mode(1, &env->fp_status);
+ /* Default NaN value: sign bit clear, frac msb set */
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
+ env->vill = true;
#ifndef CONFIG_USER_ONLY
if (cpu->cfg.debug) {
riscv_trigger_reset_hold(env);
}
+ if (cpu->cfg.ext_smrnmi) {
+ env->rnmip = 0;
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
+ }
+
if (kvm_enabled()) {
kvm_riscv_reset_vcpu(cpu);
}
@@ -1036,6 +801,15 @@ static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
CPURISCVState *env = &cpu->env;
info->target_info = &cpu->cfg;
+ /*
+ * A couple of bits in MSTATUS set the endianness:
+ * - MSTATUS_UBE (User-mode),
+ * - MSTATUS_SBE (Supervisor-mode),
+ * - MSTATUS_MBE (Machine-mode)
+ * but we don't implement that yet.
+ */
+ info->endian = BFD_ENDIAN_LITTLE;
+
switch (env->xl) {
case MXL_RV32:
info->print_insn = print_insn_riscv32;
@@ -1055,18 +829,16 @@ static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
{
bool rv32 = riscv_cpu_is_32bit(cpu);
- uint8_t satp_mode_map_max, satp_mode_supported_max;
+ uint16_t supported;
+ uint8_t satp_mode_map_max;
- /* The CPU wants the OS to decide which satp mode to use */
- if (cpu->cfg.satp_mode.supported == 0) {
+ if (!get_satp_mode_supported(cpu, &supported)) {
+ /* The CPU wants the hypervisor to decide which satp mode to allow */
return;
}
- satp_mode_supported_max =
- satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
-
- if (cpu->cfg.satp_mode.map == 0) {
- if (cpu->cfg.satp_mode.init == 0) {
+ if (cpu->satp_modes.map == 0) {
+ if (cpu->satp_modes.init == 0) {
/* If unset by the user, we fallback to the default satp mode. */
set_satp_mode_default_map(cpu);
} else {
@@ -1076,27 +848,27 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
* valid_vm_1_10_32/64.
*/
for (int i = 1; i < 16; ++i) {
- if ((cpu->cfg.satp_mode.init & (1 << i)) &&
- (cpu->cfg.satp_mode.supported & (1 << i))) {
+ if ((cpu->satp_modes.init & (1 << i)) &&
+ supported & (1 << i)) {
for (int j = i - 1; j >= 0; --j) {
- if (cpu->cfg.satp_mode.supported & (1 << j)) {
- cpu->cfg.satp_mode.map |= (1 << j);
- break;
+ if (supported & (1 << j)) {
+ cpu->cfg.max_satp_mode = j;
+ return;
}
}
- break;
}
}
}
+ return;
}
- satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
+ satp_mode_map_max = satp_mode_max_from_map(cpu->satp_modes.map);
/* Make sure the user asked for a supported configuration (HW and qemu) */
- if (satp_mode_map_max > satp_mode_supported_max) {
+ if (satp_mode_map_max > cpu->cfg.max_satp_mode) {
error_setg(errp, "satp_mode %s is higher than hw max capability %s",
satp_mode_str(satp_mode_map_max, rv32),
- satp_mode_str(satp_mode_supported_max, rv32));
+ satp_mode_str(cpu->cfg.max_satp_mode, rv32));
return;
}
@@ -1106,9 +878,9 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
*/
if (!rv32) {
for (int i = satp_mode_map_max - 1; i >= 0; --i) {
- if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
- (cpu->cfg.satp_mode.init & (1 << i)) &&
- (cpu->cfg.satp_mode.supported & (1 << i))) {
+ if (!(cpu->satp_modes.map & (1 << i)) &&
+ (cpu->satp_modes.init & (1 << i)) &&
+ (supported & (1 << i))) {
error_setg(errp, "cannot disable %s satp mode if %s "
"is enabled", satp_mode_str(i, false),
satp_mode_str(satp_mode_map_max, false));
@@ -1117,12 +889,7 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
}
}
- /* Finally expand the map so that all valid modes are set */
- for (int i = satp_mode_map_max - 1; i >= 0; --i) {
- if (cpu->cfg.satp_mode.supported & (1 << i)) {
- cpu->cfg.satp_mode.map |= (1 << i);
- }
- }
+ cpu->cfg.max_satp_mode = satp_mode_map_max;
}
#endif
@@ -1161,11 +928,6 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
Error *local_err = NULL;
- if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
- warn_report("The 'any' CPU is deprecated and will be "
- "removed in the future.");
- }
-
cpu_exec_realizefn(cs, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
@@ -1205,11 +967,11 @@ bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- RISCVSATPMap *satp_map = opaque;
+ RISCVSATPModes *satp_modes = opaque;
uint8_t satp = satp_mode_from_str(name);
bool value;
- value = satp_map->map & (1 << satp);
+ value = satp_modes->map & (1 << satp);
visit_type_bool(v, name, &value, errp);
}
@@ -1217,7 +979,7 @@ static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- RISCVSATPMap *satp_map = opaque;
+ RISCVSATPModes *satp_modes = opaque;
uint8_t satp = satp_mode_from_str(name);
bool value;
@@ -1225,8 +987,8 @@ static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
return;
}
- satp_map->map = deposit32(satp_map->map, satp, 1, value);
- satp_map->init |= 1 << satp;
+ satp_modes->map = deposit32(satp_modes->map, satp, 1, value);
+ satp_modes->init |= 1 << satp;
}
void riscv_add_satp_mode_properties(Object *obj)
@@ -1235,16 +997,16 @@ void riscv_add_satp_mode_properties(Object *obj)
if (cpu->env.misa_mxl == MXL_RV32) {
object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
} else {
object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
}
}
@@ -1309,16 +1071,16 @@ static void riscv_cpu_set_irq(void *opaque, int irq, int level)
g_assert_not_reached();
}
}
-#endif /* CONFIG_USER_ONLY */
-static bool riscv_cpu_is_dynamic(Object *cpu_obj)
+static void riscv_cpu_set_nmi(void *opaque, int irq, int level)
{
- return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
+ riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level);
}
+#endif /* CONFIG_USER_ONLY */
-static void riscv_cpu_post_init(Object *obj)
+static bool riscv_cpu_is_dynamic(Object *cpu_obj)
{
- accel_cpu_instance_init(CPU(obj));
+ return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
}
static void riscv_cpu_init(Object *obj)
@@ -1327,11 +1089,13 @@ static void riscv_cpu_init(Object *obj)
RISCVCPU *cpu = RISCV_CPU(obj);
CPURISCVState *env = &cpu->env;
- env->misa_mxl = mcc->misa_mxl_max;
+ env->misa_mxl = mcc->def->misa_mxl_max;
#ifndef CONFIG_USER_ONLY
qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
+ qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi,
+ "riscv.cpu.rnmi", RNMI_MAX);
#endif /* CONFIG_USER_ONLY */
general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
@@ -1343,8 +1107,8 @@ static void riscv_cpu_init(Object *obj)
* for all CPUs. Each accelerator will decide what to do when
* users disable them.
*/
- RISCV_CPU(obj)->cfg.ext_zicntr = true;
- RISCV_CPU(obj)->cfg.ext_zihpm = true;
+ RISCV_CPU(obj)->cfg.ext_zicntr = !mcc->def->bare;
+ RISCV_CPU(obj)->cfg.ext_zihpm = !mcc->def->bare;
/* Default values for non-bool cpu properties */
cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
@@ -1354,34 +1118,28 @@ static void riscv_cpu_init(Object *obj)
cpu->cfg.cbop_blocksize = 64;
cpu->cfg.cboz_blocksize = 64;
cpu->env.vext_ver = VEXT_VERSION_1_00_0;
-}
-
-static void riscv_bare_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
+ cpu->cfg.max_satp_mode = -1;
- /*
- * Bare CPUs do not inherit the timer and performance
- * counters from the parent class (see riscv_cpu_init()
- * for info on why the parent enables them).
- *
- * Users have to explicitly enable these counters for
- * bare CPUs.
- */
- cpu->cfg.ext_zicntr = false;
- cpu->cfg.ext_zihpm = false;
+ if (mcc->def->profile) {
+ mcc->def->profile->enabled = true;
+ }
- /* Set to QEMU's first supported priv version */
- cpu->env.priv_ver = PRIV_VERSION_1_10_0;
+ env->misa_ext_mask = env->misa_ext = mcc->def->misa_ext;
+ riscv_cpu_cfg_merge(&cpu->cfg, &mcc->def->cfg);
- /*
- * Support all available satp_mode settings. The default
- * value will be set to MBARE if the user doesn't set
- * satp_mode manually (see set_satp_mode_default()).
- */
+ if (mcc->def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) {
+ cpu->env.priv_ver = mcc->def->priv_spec;
+ }
+ if (mcc->def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) {
+ cpu->env.vext_ver = mcc->def->vext_spec;
+ }
#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_SV64);
+ if (mcc->def->custom_csrs) {
+ riscv_register_custom_csrs(cpu, mcc->def->custom_csrs);
+ }
#endif
+
+ accel_cpu_instance_init(CPU(obj));
}
typedef struct misa_ext_info {
@@ -1406,7 +1164,6 @@ static const MISAExtInfo misa_ext_info_arr[] = {
MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
MISA_EXT_INFO(RVU, "u", "User-level instructions"),
MISA_EXT_INFO(RVH, "h", "Hypervisor"),
- MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
MISA_EXT_INFO(RVV, "v", "Vector operations"),
MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
@@ -1417,7 +1174,7 @@ static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
CPUClass *cc = CPU_CLASS(mcc);
/* Validate that MISA_MXL is set properly. */
- switch (mcc->misa_mxl_max) {
+ switch (mcc->def->misa_mxl_max) {
#ifdef TARGET_RISCV64
case MXL_RV64:
case MXL_RV128:
@@ -1473,7 +1230,15 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
/* Defaults for standard extensions */
MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
+ MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false),
+ MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false),
+ MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false),
+ MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false),
+ MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false),
+ MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false),
MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
+ MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false),
+ MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false),
MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
@@ -1499,16 +1264,25 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
+ MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false),
+ MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false),
+ MULTI_EXT_CFG_BOOL("supm", ext_supm, false),
MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
+ MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false),
MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
+ MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false),
+ MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false),
+ MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false),
MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
+ MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false),
MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
+ MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true),
MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
@@ -1570,7 +1344,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
- DEFINE_PROP_END_OF_LIST(),
+ { },
};
const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
@@ -1587,12 +1361,14 @@ const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
- DEFINE_PROP_END_OF_LIST(),
+ { },
};
/* These are experimental so mark with 'x-' */
const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
- DEFINE_PROP_END_OF_LIST(),
+ MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false),
+
+ { },
};
/*
@@ -1604,8 +1380,11 @@ const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
*/
const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
+ MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true),
+ MULTI_EXT_CFG_BOOL("sha", ext_sha, true),
+ MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true),
- DEFINE_PROP_END_OF_LIST(),
+ { },
};
/* Deprecated entries marked for future removal */
@@ -1622,7 +1401,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
- DEFINE_PROP_END_OF_LIST(),
+ { },
};
static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
@@ -1677,7 +1456,8 @@ static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_pmu_num = {
- .name = "pmu-num",
+ .type = "int8",
+ .description = "pmu-num",
.get = prop_pmu_num_get,
.set = prop_pmu_num_set,
};
@@ -1718,7 +1498,8 @@ static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_pmu_mask = {
- .name = "pmu-mask",
+ .type = "int8",
+ .description = "pmu-mask",
.get = prop_pmu_mask_get,
.set = prop_pmu_mask_set,
};
@@ -1749,7 +1530,8 @@ static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_mmu = {
- .name = "mmu",
+ .type = "bool",
+ .description = "mmu",
.get = prop_mmu_get,
.set = prop_mmu_set,
};
@@ -1780,7 +1562,8 @@ static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_pmp = {
- .name = "pmp",
+ .type = "bool",
+ .description = "pmp",
.get = prop_pmp_get,
.set = prop_pmp_set,
};
@@ -1854,7 +1637,9 @@ static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_priv_spec = {
- .name = "priv_spec",
+ .type = "str",
+ .description = "priv_spec",
+ /* FIXME enum? */
.get = prop_priv_spec_get,
.set = prop_priv_spec_set,
};
@@ -1885,7 +1670,9 @@ static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_vext_spec = {
- .name = "vext_spec",
+ .type = "str",
+ .description = "vext_spec",
+ /* FIXME enum? */
.get = prop_vext_spec_get,
.set = prop_vext_spec_set,
};
@@ -1894,6 +1681,7 @@ static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
RISCVCPU *cpu = RISCV_CPU(obj);
+ uint16_t cpu_vlen = cpu->cfg.vlenb << 3;
uint16_t value;
if (!visit_type_uint16(v, name, &value, errp)) {
@@ -1905,10 +1693,10 @@ static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
return;
}
- if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
+ if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) {
cpu_set_prop_err(cpu, name, errp);
error_append_hint(errp, "Current '%s' val: %u\n",
- name, cpu->cfg.vlenb << 3);
+ name, cpu_vlen);
return;
}
@@ -1925,7 +1713,8 @@ static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_vlen = {
- .name = "vlen",
+ .type = "uint16",
+ .description = "vlen",
.get = prop_vlen_get,
.set = prop_vlen_set,
};
@@ -1965,7 +1754,8 @@ static void prop_elen_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_elen = {
- .name = "elen",
+ .type = "uint16",
+ .description = "elen",
.get = prop_elen_get,
.set = prop_elen_set,
};
@@ -2000,7 +1790,8 @@ static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_cbom_blksize = {
- .name = "cbom_blocksize",
+ .type = "uint16",
+ .description = "cbom_blocksize",
.get = prop_cbom_blksize_get,
.set = prop_cbom_blksize_set,
};
@@ -2035,7 +1826,8 @@ static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_cbop_blksize = {
- .name = "cbop_blocksize",
+ .type = "uint16",
+ .description = "cbop_blocksize",
.get = prop_cbop_blksize_get,
.set = prop_cbop_blksize_set,
};
@@ -2070,7 +1862,8 @@ static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_cboz_blksize = {
- .name = "cboz_blocksize",
+ .type = "uint16",
+ .description = "cboz_blocksize",
.get = prop_cboz_blksize_get,
.set = prop_cboz_blksize_set,
};
@@ -2105,7 +1898,8 @@ static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_mvendorid = {
- .name = "mvendorid",
+ .type = "uint32",
+ .description = "mvendorid",
.get = prop_mvendorid_get,
.set = prop_mvendorid_set,
};
@@ -2140,7 +1934,8 @@ static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_mimpid = {
- .name = "mimpid",
+ .type = "uint64",
+ .description = "mimpid",
.get = prop_mimpid_get,
.set = prop_mimpid_set,
};
@@ -2196,7 +1991,8 @@ static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_marchid = {
- .name = "marchid",
+ .type = "uint64",
+ .description = "marchid",
.get = prop_marchid_get,
.set = prop_marchid_set,
};
@@ -2208,9 +2004,10 @@ static const PropertyInfo prop_marchid = {
* doesn't need to be manually enabled by the profile.
*/
static RISCVCPUProfile RVA22U64 = {
- .parent = NULL,
+ .u_parent = NULL,
+ .s_parent = NULL,
.name = "rva22u64",
- .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
+ .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU,
.priv_spec = RISCV_PROFILE_ATTR_UNUSED,
.satp_mode = RISCV_PROFILE_ATTR_UNUSED,
.ext_offsets = {
@@ -2240,7 +2037,8 @@ static RISCVCPUProfile RVA22U64 = {
* The remaining features/extensions comes from RVA22U64.
*/
static RISCVCPUProfile RVA22S64 = {
- .parent = &RVA22U64,
+ .u_parent = &RVA22U64,
+ .s_parent = NULL,
.name = "rva22s64",
.misa_ext = RVS,
.priv_spec = PRIV_VERSION_1_12_0,
@@ -2254,9 +2052,65 @@ static RISCVCPUProfile RVA22S64 = {
}
};
+/*
+ * All mandatory extensions from RVA22U64 are present
+ * in RVA23U64 so set RVA22 as a parent. We need to
+ * declare just the newly added mandatory extensions.
+ */
+static RISCVCPUProfile RVA23U64 = {
+ .u_parent = &RVA22U64,
+ .s_parent = NULL,
+ .name = "rva23u64",
+ .misa_ext = RVV,
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
+ .ext_offsets = {
+ CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb),
+ CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl),
+ CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop),
+ CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb),
+ CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs),
+ CPU_CFG_OFFSET(ext_supm),
+
+ RISCV_PROFILE_EXT_LIST_END
+ }
+};
+
+/*
+ * As with RVA23U64, RVA23S64 also defines 'named features'.
+ *
+ * Cache related features that we consider enabled since we don't
+ * implement cache: Ssccptr
+ *
+ * Other named features that we already implement: Sstvecd, Sstvala,
+ * Sscounterenw, Ssu64xl
+ *
+ * The remaining features/extensions comes from RVA23S64.
+ */
+static RISCVCPUProfile RVA23S64 = {
+ .u_parent = &RVA23U64,
+ .s_parent = &RVA22S64,
+ .name = "rva23s64",
+ .misa_ext = RVS,
+ .priv_spec = PRIV_VERSION_1_13_0,
+ .satp_mode = VM_1_10_SV39,
+ .ext_offsets = {
+ /* New in RVA23S64 */
+ CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc),
+ CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm),
+
+ /* Named features: Sha */
+ CPU_CFG_OFFSET(ext_sha),
+
+ RISCV_PROFILE_EXT_LIST_END
+ }
+};
+
RISCVCPUProfile *riscv_profiles[] = {
&RVA22U64,
&RVA22S64,
+ &RVA23U64,
+ &RVA23S64,
NULL,
};
@@ -2635,6 +2489,54 @@ static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
},
};
+static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_ssccfg),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind),
+ CPU_CFG_OFFSET(ext_smcdeleg),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule SUPM_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_supm),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule SSPM_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_sspm),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_smnpm),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_smctr),
+ .implied_misa_exts = RVS,
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_sscsrind),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_ssctr),
+ .implied_misa_exts = RVS,
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_sscsrind),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
&RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
&RVM_IMPLIED, &RVV_IMPLIED, NULL
@@ -2652,11 +2554,12 @@ RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
&ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
&ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
&ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
- &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
+ &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED,
+ &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED,
NULL
};
-static Property riscv_cpu_properties[] = {
+static const Property riscv_cpu_properties[] = {
DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
{.name = "pmu-mask", .info = &prop_pmu_mask},
@@ -2675,43 +2578,31 @@ static Property riscv_cpu_properties[] = {
{.name = "cbop_blocksize", .info = &prop_cbop_blksize},
{.name = "cboz_blocksize", .info = &prop_cboz_blksize},
- {.name = "mvendorid", .info = &prop_mvendorid},
- {.name = "mimpid", .info = &prop_mimpid},
- {.name = "marchid", .info = &prop_marchid},
+ {.name = "mvendorid", .info = &prop_mvendorid},
+ {.name = "mimpid", .info = &prop_mimpid},
+ {.name = "marchid", .info = &prop_marchid},
#ifndef CONFIG_USER_ONLY
DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
+ DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec,
+ DEFAULT_RNMI_IRQVEC),
+ DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec,
+ DEFAULT_RNMI_EXCPVEC),
#endif
DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
+ DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false),
/*
* write_misa() is marked as experimental for now so mark
* it with -x and default to 'false'.
*/
DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
- DEFINE_PROP_END_OF_LIST(),
};
-#if defined(TARGET_RISCV64)
-static void rva22u64_profile_cpu_init(Object *obj)
-{
- rv64i_bare_cpu_init(obj);
-
- RVA22U64.enabled = true;
-}
-
-static void rva22s64_profile_cpu_init(Object *obj)
-{
- rv64i_bare_cpu_init(obj);
-
- RVA22S64.enabled = true;
-}
-#endif
-
static const gchar *riscv_gdb_arch_name(CPUState *cs)
{
RISCVCPU *cpu = RISCV_CPU(cs);
@@ -2739,6 +2630,7 @@ static int64_t riscv_get_arch_id(CPUState *cs)
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps riscv_sysemu_ops = {
+ .has_work = riscv_cpu_has_work,
.get_phys_page_debug = riscv_cpu_get_phys_page_debug,
.write_elf64_note = riscv_cpu_write_elf64_note,
.write_elf32_note = riscv_cpu_write_elf32_note,
@@ -2746,7 +2638,7 @@ static const struct SysemuCPUOps riscv_sysemu_ops = {
};
#endif
-static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
+static void riscv_cpu_common_class_init(ObjectClass *c, const void *data)
{
RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
CPUClass *cc = CPU_CLASS(c);
@@ -2760,8 +2652,6 @@ static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
&mcc->parent_phases);
cc->class_by_name = riscv_cpu_class_by_name;
- cc->has_work = riscv_cpu_has_work;
- cc->mmu_index = riscv_cpu_mmu_index;
cc->dump_state = riscv_cpu_dump_state;
cc->set_pc = riscv_cpu_set_pc;
cc->get_pc = riscv_cpu_get_pc;
@@ -2774,16 +2664,94 @@ static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
cc->get_arch_id = riscv_get_arch_id;
#endif
cc->gdb_arch_name = riscv_gdb_arch_name;
+#ifdef CONFIG_TCG
+ cc->tcg_ops = &riscv_tcg_ops;
+#endif /* CONFIG_TCG */
device_class_set_props(dc, riscv_cpu_properties);
}
-static void riscv_cpu_class_init(ObjectClass *c, void *data)
+static bool profile_extends(RISCVCPUProfile *trial, RISCVCPUProfile *parent)
+{
+ RISCVCPUProfile *curr;
+ if (!parent) {
+ return true;
+ }
+
+ curr = trial;
+ while (curr) {
+ if (curr == parent) {
+ return true;
+ }
+ curr = curr->u_parent;
+ }
+
+ curr = trial;
+ while (curr) {
+ if (curr == parent) {
+ return true;
+ }
+ curr = curr->s_parent;
+ }
+
+ return false;
+}
+
+static void riscv_cpu_class_base_init(ObjectClass *c, const void *data)
{
RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
+ RISCVCPUClass *pcc = RISCV_CPU_CLASS(object_class_get_parent(c));
- mcc->misa_mxl_max = (uint32_t)(uintptr_t)data;
- riscv_cpu_validate_misa_mxl(mcc);
+ if (pcc->def) {
+ mcc->def = g_memdup2(pcc->def, sizeof(*pcc->def));
+ } else {
+ mcc->def = g_new0(RISCVCPUDef, 1);
+ }
+
+ if (data) {
+ const RISCVCPUDef *def = data;
+ mcc->def->bare |= def->bare;
+ if (def->profile) {
+ assert(profile_extends(def->profile, mcc->def->profile));
+ assert(mcc->def->bare);
+ mcc->def->profile = def->profile;
+ }
+ if (def->misa_mxl_max) {
+ assert(def->misa_mxl_max <= MXL_RV128);
+ mcc->def->misa_mxl_max = def->misa_mxl_max;
+
+#ifndef CONFIG_USER_ONLY
+ /*
+ * Hack to simplify CPU class hierarchies that include both 32- and
+ * 64-bit models: reduce SV39/48/57/64 to SV32 for 32-bit models.
+ */
+ if (mcc->def->misa_mxl_max == MXL_RV32 &&
+ !valid_vm_1_10_32[mcc->def->cfg.max_satp_mode]) {
+ mcc->def->cfg.max_satp_mode = VM_1_10_SV32;
+ }
+#endif
+ }
+ if (def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) {
+ assert(def->priv_spec <= PRIV_VERSION_LATEST);
+ mcc->def->priv_spec = def->priv_spec;
+ }
+ if (def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) {
+ assert(def->vext_spec != 0);
+ mcc->def->vext_spec = def->vext_spec;
+ }
+ mcc->def->misa_ext |= def->misa_ext;
+
+ riscv_cpu_cfg_merge(&mcc->def->cfg, &def->cfg);
+
+ if (def->custom_csrs) {
+ assert(!mcc->def->custom_csrs);
+ mcc->def->custom_csrs = def->custom_csrs;
+ }
+ }
+
+ if (!object_class_is_abstract(c)) {
+ riscv_cpu_validate_misa_mxl(mcc);
+ }
}
static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
@@ -2878,50 +2846,34 @@ void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
}
#endif
-#define DEFINE_CPU(type_name, misa_mxl_max, initfn) \
+#define DEFINE_ABSTRACT_RISCV_CPU(type_name, parent_type_name, ...) \
{ \
.name = (type_name), \
- .parent = TYPE_RISCV_CPU, \
- .instance_init = (initfn), \
- .class_init = riscv_cpu_class_init, \
- .class_data = (void *)(misa_mxl_max) \
+ .parent = (parent_type_name), \
+ .abstract = true, \
+ .class_data = &(const RISCVCPUDef) { \
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \
+ .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \
+ .cfg.max_satp_mode = -1, \
+ __VA_ARGS__ \
+ }, \
}
-#define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
+#define DEFINE_RISCV_CPU(type_name, parent_type_name, ...) \
{ \
.name = (type_name), \
- .parent = TYPE_RISCV_DYNAMIC_CPU, \
- .instance_init = (initfn), \
- .class_init = riscv_cpu_class_init, \
- .class_data = (void *)(misa_mxl_max) \
+ .parent = (parent_type_name), \
+ .class_data = &(const RISCVCPUDef) { \
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \
+ .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \
+ .cfg.max_satp_mode = -1, \
+ __VA_ARGS__ \
+ }, \
}
-#define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \
- { \
- .name = (type_name), \
- .parent = TYPE_RISCV_VENDOR_CPU, \
- .instance_init = (initfn), \
- .class_init = riscv_cpu_class_init, \
- .class_data = (void *)(misa_mxl_max) \
- }
-
-#define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \
- { \
- .name = (type_name), \
- .parent = TYPE_RISCV_BARE_CPU, \
- .instance_init = (initfn), \
- .class_init = riscv_cpu_class_init, \
- .class_data = (void *)(misa_mxl_max) \
- }
-
-#define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
- { \
- .name = (type_name), \
- .parent = TYPE_RISCV_BARE_CPU, \
- .instance_init = (initfn), \
- .class_init = riscv_cpu_class_init, \
- .class_data = (void *)(misa_mxl_max) \
- }
+#define DEFINE_PROFILE_CPU(type_name, parent_type_name, profile_) \
+ DEFINE_RISCV_CPU(type_name, parent_type_name, \
+ .profile = &(profile_))
static const TypeInfo riscv_cpu_type_infos[] = {
{
@@ -2930,53 +2882,310 @@ static const TypeInfo riscv_cpu_type_infos[] = {
.instance_size = sizeof(RISCVCPU),
.instance_align = __alignof(RISCVCPU),
.instance_init = riscv_cpu_init,
- .instance_post_init = riscv_cpu_post_init,
.abstract = true,
.class_size = sizeof(RISCVCPUClass),
.class_init = riscv_cpu_common_class_init,
+ .class_base_init = riscv_cpu_class_base_init,
},
- {
- .name = TYPE_RISCV_DYNAMIC_CPU,
- .parent = TYPE_RISCV_CPU,
- .abstract = true,
- },
- {
- .name = TYPE_RISCV_VENDOR_CPU,
- .parent = TYPE_RISCV_CPU,
- .abstract = true,
- },
- {
- .name = TYPE_RISCV_BARE_CPU,
- .parent = TYPE_RISCV_CPU,
- .instance_init = riscv_bare_cpu_init,
- .abstract = true,
- },
+
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_DYNAMIC_CPU, TYPE_RISCV_CPU,
+ .cfg.mmu = true,
+ .cfg.pmp = true,
+ .priv_spec = PRIV_VERSION_LATEST,
+ ),
+
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_VENDOR_CPU, TYPE_RISCV_CPU),
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_BARE_CPU, TYPE_RISCV_CPU,
+ /*
+ * Bare CPUs do not inherit the timer and performance
+ * counters from the parent class (see riscv_cpu_init()
+ * for info on why the parent enables them).
+ *
+ * Users have to explicitly enable these counters for
+ * bare CPUs.
+ */
+ .bare = true,
+
+ /* Set to QEMU's first supported priv version */
+ .priv_spec = PRIV_VERSION_1_10_0,
+
+ /*
+ * Support all available satp_mode settings. By default
+ * only MBARE will be available if the user doesn't enable
+ * a mode manually (see riscv_cpu_satp_mode_finalize()).
+ */
+#ifdef TARGET_RISCV32
+ .cfg.max_satp_mode = VM_1_10_SV32,
+#else
+ .cfg.max_satp_mode = VM_1_10_SV57,
+#endif
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX, TYPE_RISCV_DYNAMIC_CPU,
#if defined(TARGET_RISCV32)
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init),
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init),
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init),
- DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init),
- DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init),
+ .misa_mxl_max = MXL_RV32,
+ .cfg.max_satp_mode = VM_1_10_SV32,
#elif defined(TARGET_RISCV64)
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init),
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init),
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init),
-#ifdef CONFIG_TCG
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init),
+ .misa_mxl_max = MXL_RV64,
+ .cfg.max_satp_mode = VM_1_10_SV57,
+#endif
+ ),
+
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E, TYPE_RISCV_VENDOR_CPU,
+ .misa_ext = RVI | RVM | RVA | RVC | RVU,
+ .priv_spec = PRIV_VERSION_1_10_0,
+ .cfg.max_satp_mode = VM_1_10_MBARE,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zicsr = true,
+ .cfg.pmp = true
+ ),
+
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U, TYPE_RISCV_VENDOR_CPU,
+ .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU,
+ .priv_spec = PRIV_VERSION_1_10_0,
+
+ .cfg.max_satp_mode = VM_1_10_SV39,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zicsr = true,
+ .cfg.mmu = true,
+ .cfg.pmp = true
+ ),
+
+#if defined(TARGET_RISCV32) || \
+ (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE32, TYPE_RISCV_DYNAMIC_CPU,
+ .cfg.max_satp_mode = VM_1_10_SV32,
+ .misa_mxl_max = MXL_RV32,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_IBEX, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV32,
+ .misa_ext = RVI | RVM | RVC | RVU,
+ .priv_spec = PRIV_VERSION_1_12_0,
+ .cfg.max_satp_mode = VM_1_10_MBARE,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zicsr = true,
+ .cfg.pmp = true,
+ .cfg.ext_smepmp = true,
+
+ .cfg.ext_zba = true,
+ .cfg.ext_zbb = true,
+ .cfg.ext_zbc = true,
+ .cfg.ext_zbs = true
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E31, TYPE_RISCV_CPU_SIFIVE_E,
+ .misa_mxl_max = MXL_RV32
+ ),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E34, TYPE_RISCV_CPU_SIFIVE_E,
+ .misa_mxl_max = MXL_RV32,
+ .misa_ext = RVF, /* IMAFCU */
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U34, TYPE_RISCV_CPU_SIFIVE_U,
+ .misa_mxl_max = MXL_RV32,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32I, TYPE_RISCV_BARE_CPU,
+ .misa_mxl_max = MXL_RV32,
+ .misa_ext = RVI
+ ),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32E, TYPE_RISCV_BARE_CPU,
+ .misa_mxl_max = MXL_RV32,
+ .misa_ext = RVE
+ ),
+#endif
+
+#if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX32, TYPE_RISCV_DYNAMIC_CPU,
+ .cfg.max_satp_mode = VM_1_10_SV32,
+ .misa_mxl_max = MXL_RV32,
+ ),
+#endif
+
+#if defined(TARGET_RISCV64)
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE64, TYPE_RISCV_DYNAMIC_CPU,
+ .cfg.max_satp_mode = VM_1_10_SV57,
+ .misa_mxl_max = MXL_RV64,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E51, TYPE_RISCV_CPU_SIFIVE_E,
+ .misa_mxl_max = MXL_RV64
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U54, TYPE_RISCV_CPU_SIFIVE_U,
+ .misa_mxl_max = MXL_RV64,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SHAKTI_C, TYPE_RISCV_CPU_SIFIVE_U,
+ .misa_mxl_max = MXL_RV64,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_THEAD_C906, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVG | RVC | RVS | RVU,
+ .priv_spec = PRIV_VERSION_1_11_0,
+
+ .cfg.ext_zfa = true,
+ .cfg.ext_zfh = true,
+ .cfg.mmu = true,
+ .cfg.ext_xtheadba = true,
+ .cfg.ext_xtheadbb = true,
+ .cfg.ext_xtheadbs = true,
+ .cfg.ext_xtheadcmo = true,
+ .cfg.ext_xtheadcondmov = true,
+ .cfg.ext_xtheadfmemidx = true,
+ .cfg.ext_xtheadmac = true,
+ .cfg.ext_xtheadmemidx = true,
+ .cfg.ext_xtheadmempair = true,
+ .cfg.ext_xtheadsync = true,
+ .cfg.pmp = true,
+
+ .cfg.mvendorid = THEAD_VENDOR_ID,
+
+ .cfg.max_satp_mode = VM_1_10_SV39,
+#ifndef CONFIG_USER_ONLY
+ .custom_csrs = th_csr_list,
+#endif
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_TT_ASCALON, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVG | RVC | RVS | RVU | RVH | RVV,
+ .priv_spec = PRIV_VERSION_1_13_0,
+ .vext_spec = VEXT_VERSION_1_00_0,
+
+ /* ISA extensions */
+ .cfg.mmu = true,
+ .cfg.vlenb = 256 >> 3,
+ .cfg.elen = 64,
+ .cfg.rvv_ma_all_1s = true,
+ .cfg.rvv_ta_all_1s = true,
+ .cfg.misa_w = true,
+ .cfg.pmp = true,
+ .cfg.cbom_blocksize = 64,
+ .cfg.cbop_blocksize = 64,
+ .cfg.cboz_blocksize = 64,
+ .cfg.ext_zic64b = true,
+ .cfg.ext_zicbom = true,
+ .cfg.ext_zicbop = true,
+ .cfg.ext_zicboz = true,
+ .cfg.ext_zicntr = true,
+ .cfg.ext_zicond = true,
+ .cfg.ext_zicsr = true,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zihintntl = true,
+ .cfg.ext_zihintpause = true,
+ .cfg.ext_zihpm = true,
+ .cfg.ext_zimop = true,
+ .cfg.ext_zawrs = true,
+ .cfg.ext_zfa = true,
+ .cfg.ext_zfbfmin = true,
+ .cfg.ext_zfh = true,
+ .cfg.ext_zfhmin = true,
+ .cfg.ext_zcb = true,
+ .cfg.ext_zcmop = true,
+ .cfg.ext_zba = true,
+ .cfg.ext_zbb = true,
+ .cfg.ext_zbs = true,
+ .cfg.ext_zkt = true,
+ .cfg.ext_zvbb = true,
+ .cfg.ext_zvbc = true,
+ .cfg.ext_zvfbfmin = true,
+ .cfg.ext_zvfbfwma = true,
+ .cfg.ext_zvfh = true,
+ .cfg.ext_zvfhmin = true,
+ .cfg.ext_zvkng = true,
+ .cfg.ext_smaia = true,
+ .cfg.ext_smstateen = true,
+ .cfg.ext_ssaia = true,
+ .cfg.ext_sscofpmf = true,
+ .cfg.ext_sstc = true,
+ .cfg.ext_svade = true,
+ .cfg.ext_svinval = true,
+ .cfg.ext_svnapot = true,
+ .cfg.ext_svpbmt = true,
+
+ .cfg.max_satp_mode = VM_1_10_SV57,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_VEYRON_V1, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVG | RVC | RVS | RVU | RVH,
+ .priv_spec = PRIV_VERSION_1_12_0,
+
+ /* ISA extensions */
+ .cfg.mmu = true,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zicsr = true,
+ .cfg.pmp = true,
+ .cfg.ext_zicbom = true,
+ .cfg.cbom_blocksize = 64,
+ .cfg.cboz_blocksize = 64,
+ .cfg.ext_zicboz = true,
+ .cfg.ext_smaia = true,
+ .cfg.ext_ssaia = true,
+ .cfg.ext_sscofpmf = true,
+ .cfg.ext_sstc = true,
+ .cfg.ext_svinval = true,
+ .cfg.ext_svnapot = true,
+ .cfg.ext_svpbmt = true,
+ .cfg.ext_smstateen = true,
+ .cfg.ext_zba = true,
+ .cfg.ext_zbb = true,
+ .cfg.ext_zbc = true,
+ .cfg.ext_zbs = true,
+ .cfg.ext_XVentanaCondOps = true,
+
+ .cfg.mvendorid = VEYRON_V1_MVENDORID,
+ .cfg.marchid = VEYRON_V1_MARCHID,
+ .cfg.mimpid = VEYRON_V1_MIMPID,
+
+ .cfg.max_satp_mode = VM_1_10_SV48,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVG | RVC | RVB | RVS | RVU,
+ .priv_spec = PRIV_VERSION_1_12_0,
+
+ /* ISA extensions */
+ .cfg.ext_zbc = true,
+ .cfg.ext_zbkb = true,
+ .cfg.ext_zbkc = true,
+ .cfg.ext_zbkx = true,
+ .cfg.ext_zknd = true,
+ .cfg.ext_zkne = true,
+ .cfg.ext_zknh = true,
+ .cfg.ext_zksed = true,
+ .cfg.ext_zksh = true,
+ .cfg.ext_svinval = true,
+
+ .cfg.mmu = true,
+ .cfg.pmp = true,
+
+ .cfg.max_satp_mode = VM_1_10_SV39,
+ ),
+
+#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE128, TYPE_RISCV_DYNAMIC_CPU,
+ .cfg.max_satp_mode = VM_1_10_SV57,
+ .misa_mxl_max = MXL_RV128,
+ ),
#endif /* CONFIG_TCG */
- DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init),
- DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init),
- DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init),
- DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64I, TYPE_RISCV_BARE_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVI
+ ),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64E, TYPE_RISCV_BARE_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVE
+ ),
+
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, TYPE_RISCV_CPU_RV64I, RVA22U64),
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, TYPE_RISCV_CPU_RV64I, RVA22S64),
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, TYPE_RISCV_CPU_RV64I, RVA23U64),
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, TYPE_RISCV_CPU_RV64I, RVA23S64),
#endif /* TARGET_RISCV64 */
};
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 1619c3a..229ade9 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -23,7 +23,9 @@
#include "hw/core/cpu.h"
#include "hw/registerfields.h"
#include "hw/qdev-properties.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "exec/gdbstub.h"
#include "qemu/cpu-float.h"
#include "qom/object.h"
@@ -44,10 +46,9 @@ typedef struct CPUArchState CPURISCVState;
#endif
/*
- * RISC-V-specific extra insn start words:
- * 1: Original instruction opcode
+ * b0: Whether a instruction always raise a store AMO or not.
*/
-#define TARGET_INSN_START_EXTRA_WORDS 1
+#define RISCV_UW2_ALWAYS_STORE_AMO 1
#define RV(x) ((target_ulong)1 << (x - 'A'))
@@ -66,7 +67,6 @@ typedef struct CPUArchState CPURISCVState;
#define RVS RV('S')
#define RVU RV('U')
#define RVH RV('H')
-#define RVJ RV('J')
#define RVG RV('G')
#define RVB RV('B')
@@ -75,9 +75,11 @@ const char *riscv_get_misa_ext_name(uint32_t bit);
const char *riscv_get_misa_ext_description(uint32_t bit);
#define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
+#define ENV_CSR_OFFSET(_csr) offsetof(CPURISCVState, _csr)
typedef struct riscv_cpu_profile {
- struct riscv_cpu_profile *parent;
+ struct riscv_cpu_profile *u_parent;
+ struct riscv_cpu_profile *s_parent;
const char *name;
uint32_t misa_ext;
bool enabled;
@@ -124,6 +126,14 @@ typedef enum {
EXT_STATUS_DIRTY,
} RISCVExtStatus;
+/* Enum holds PMM field values for Zjpm v1.0 extension */
+typedef enum {
+ PMM_FIELD_DISABLED = 0,
+ PMM_FIELD_RESERVED = 1,
+ PMM_FIELD_PMLEN7 = 2,
+ PMM_FIELD_PMLEN16 = 3,
+} RISCVPmPmm;
+
typedef struct riscv_cpu_implied_exts_rule {
#ifndef CONFIG_USER_ONLY
/*
@@ -230,12 +240,24 @@ struct CPUArchState {
target_ulong jvt;
+ /* elp state for zicfilp extension */
+ bool elp;
+ /* shadow stack register for zicfiss extension */
+ target_ulong ssp;
+ /* env place holder for extra word 2 during unwind */
+ target_ulong excp_uw2;
+ /* sw check code for sw check exception */
+ target_ulong sw_check_code;
#ifdef CONFIG_USER_ONLY
uint32_t elf_flags;
#endif
-#ifndef CONFIG_USER_ONLY
target_ulong priv;
+ /* CSRs for execution environment configuration */
+ uint64_t menvcfg;
+ target_ulong senvcfg;
+
+#ifndef CONFIG_USER_ONLY
/* This contains QEMU specific information about the virt state. */
bool virt_enabled;
target_ulong geilen;
@@ -288,6 +310,15 @@ struct CPUArchState {
target_ulong mcause;
target_ulong mtval; /* since: priv-1.10.0 */
+ uint64_t mctrctl;
+ uint32_t sctrdepth;
+ uint32_t sctrstatus;
+ uint64_t vsctrctl;
+
+ uint64_t ctr_src[16 << SCTRDEPTH_MAX];
+ uint64_t ctr_dst[16 << SCTRDEPTH_MAX];
+ uint64_t ctr_data[16 << SCTRDEPTH_MAX];
+
/* Machine and Supervisor interrupt priorities */
uint8_t miprio[64];
uint8_t siprio[64];
@@ -368,6 +399,7 @@ struct CPUArchState {
uint32_t scounteren;
uint32_t mcounteren;
+ uint32_t scountinhibit;
uint32_t mcountinhibit;
/* PMU cycle & instret privilege mode filtering */
@@ -434,27 +466,11 @@ struct CPUArchState {
/* True if in debugger mode. */
bool debugger;
- /*
- * CSRs for PointerMasking extension
- */
- target_ulong mmte;
- target_ulong mpmmask;
- target_ulong mpmbase;
- target_ulong spmmask;
- target_ulong spmbase;
- target_ulong upmmask;
- target_ulong upmbase;
-
- /* CSRs for execution environment configuration */
- uint64_t menvcfg;
uint64_t mstateen[SMSTATEEN_MAX_COUNT];
uint64_t hstateen[SMSTATEEN_MAX_COUNT];
uint64_t sstateen[SMSTATEEN_MAX_COUNT];
- target_ulong senvcfg;
uint64_t henvcfg;
#endif
- target_ulong cur_pmmask;
- target_ulong cur_pmbase;
/* Fields from here on are preserved across CPU reset. */
QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
@@ -472,9 +488,31 @@ struct CPUArchState {
uint64_t kvm_timer_state;
uint64_t kvm_timer_frequency;
#endif /* CONFIG_KVM */
+
+ /* RNMI */
+ target_ulong mnscratch;
+ target_ulong mnepc;
+ target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */
+ target_ulong mnstatus;
+ target_ulong rnmip;
+ uint64_t rnmi_irqvec;
+ uint64_t rnmi_excpvec;
};
/*
+ * map is a 16-bit bitmap: the most significant set bit in map is the maximum
+ * satp mode that is supported. It may be chosen by the user and must respect
+ * what qemu implements (valid_1_10_32/64) and what the hw is capable of
+ * (supported bitmap below).
+ *
+ * init is a 16-bit bitmap used to make sure the user selected a correct
+ * configuration as per the specification.
+ */
+typedef struct {
+ uint16_t map, init;
+} RISCVSATPModes;
+
+/*
* RISCVCPU:
* @env: #CPURISCVState
*
@@ -490,6 +528,7 @@ struct ArchCPU {
/* Configuration Settings */
RISCVCPUConfig cfg;
+ RISCVSATPModes satp_modes;
QEMUTimer *pmu_timer;
/* A bitmask of Available programmable counters */
@@ -499,6 +538,19 @@ struct ArchCPU {
const GPtrArray *decoders;
};
+typedef struct RISCVCSR RISCVCSR;
+
+typedef struct RISCVCPUDef {
+ RISCVMXL misa_mxl_max; /* max mxl for this cpu */
+ RISCVCPUProfile *profile;
+ uint32_t misa_ext;
+ int priv_spec;
+ int32_t vext_spec;
+ RISCVCPUConfig cfg;
+ bool bare;
+ const RISCVCSR *custom_csrs;
+} RISCVCPUDef;
+
/**
* RISCVCPUClass:
* @parent_realize: The parent class' realize handler.
@@ -511,7 +563,7 @@ struct RISCVCPUClass {
DeviceRealize parent_realize;
ResettablePhases parent_phases;
- uint32_t misa_mxl_max; /* max mxl for this cpu */
+ RISCVCPUDef *def;
};
static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
@@ -544,6 +596,9 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
bool riscv_cpu_vector_enabled(CPURISCVState *env);
void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
+bool cpu_get_fcfien(CPURISCVState *env);
+bool cpu_get_bcfien(CPURISCVState *env);
+bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt);
G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
@@ -568,6 +623,7 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
uint64_t value);
+void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level);
void riscv_cpu_interrupt(CPURISCVState *env);
#define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
@@ -585,15 +641,21 @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
+void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
+ enum CTRType type, target_ulong prev_priv, bool prev_virt);
+void riscv_ctr_clear(CPURISCVState *env);
+
void riscv_translate_init(void);
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
+
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
- uint32_t exception, uintptr_t pc);
+ RISCVException exception,
+ uintptr_t pc);
target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
-#include "exec/cpu-all.h"
-
FIELD(TB_FLAGS, MEM_IDX, 0, 3)
FIELD(TB_FLAGS, FS, 3, 2)
/* Vector flags */
@@ -608,14 +670,22 @@ FIELD(TB_FLAGS, XL, 16, 2)
/* If PointerMasking should be applied */
FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
-FIELD(TB_FLAGS, VTA, 20, 1)
-FIELD(TB_FLAGS, VMA, 21, 1)
+FIELD(TB_FLAGS, VTA, 18, 1)
+FIELD(TB_FLAGS, VMA, 19, 1)
/* Native debug itrigger */
-FIELD(TB_FLAGS, ITRIGGER, 22, 1)
+FIELD(TB_FLAGS, ITRIGGER, 20, 1)
/* Virtual mode enabled */
-FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
-FIELD(TB_FLAGS, PRIV, 24, 2)
-FIELD(TB_FLAGS, AXL, 26, 2)
+FIELD(TB_FLAGS, VIRT_ENABLED, 21, 1)
+FIELD(TB_FLAGS, PRIV, 22, 2)
+FIELD(TB_FLAGS, AXL, 24, 2)
+/* zicfilp needs a TB flag to track indirect branches */
+FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1)
+FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1)
+/* zicfiss needs a TB flag so that correct TB is located based on tb flags */
+FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1)
+/* If pointer masking should be applied and address sign extended */
+FIELD(TB_FLAGS, PM_PMM, 29, 2)
+FIELD(TB_FLAGS, PM_SIGNEXTEND, 31, 1)
#ifdef TARGET_RISCV32
#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
@@ -709,11 +779,26 @@ static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
#ifdef CONFIG_USER_ONLY
return env->misa_mxl;
#else
- return get_field(env->mstatus, MSTATUS64_SXL);
+ if (env->misa_mxl != MXL_RV32) {
+ return get_field(env->mstatus, MSTATUS64_SXL);
+ }
#endif
+ return MXL_RV32;
}
#endif
+static inline bool riscv_cpu_allow_16bit_insn(const RISCVCPUConfig *cfg,
+ target_long priv_ver,
+ uint32_t misa_ext)
+{
+ /* In priv spec version 1.12 or newer, C always implies Zca */
+ if (priv_ver >= PRIV_VERSION_1_12_0) {
+ return cfg->ext_zca;
+ } else {
+ return misa_ext & RVC;
+ }
+}
+
/*
* Encode LMUL to lmul as follows:
* LMUL vlmul lmul
@@ -745,17 +830,19 @@ static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
return vlen >> (vsew + 3 - lmul);
}
-void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags);
-
-void riscv_cpu_update_mask(CPURISCVState *env);
bool riscv_cpu_is_32bit(RISCVCPU *cpu);
+bool riscv_cpu_virt_mem_enabled(CPURISCVState *env);
+RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env);
+RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env);
+uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm);
+
RISCVException riscv_csrr(CPURISCVState *env, int csrno,
target_ulong *ret_value);
+
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
- target_ulong *ret_value,
- target_ulong new_value, target_ulong write_mask);
+ target_ulong *ret_value, target_ulong new_value,
+ target_ulong write_mask, uintptr_t ra);
RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value,
@@ -764,13 +851,13 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
static inline void riscv_csr_write(CPURISCVState *env, int csrno,
target_ulong val)
{
- riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
+ riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0);
}
static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
{
target_ulong val = 0;
- riscv_csrrw(env, csrno, &val, 0, 0);
+ riscv_csrrw(env, csrno, &val, 0, 0, 0);
return val;
}
@@ -779,7 +866,8 @@ typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
target_ulong *ret_value);
typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
- target_ulong new_value);
+ target_ulong new_value,
+ uintptr_t ra);
typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value,
@@ -788,8 +876,8 @@ typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
Int128 *ret_value);
RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
- Int128 *ret_value,
- Int128 new_value, Int128 write_mask);
+ Int128 *ret_value, Int128 new_value,
+ Int128 write_mask, uintptr_t ra);
typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
Int128 *ret_value);
@@ -808,6 +896,12 @@ typedef struct {
uint32_t min_priv_ver;
} riscv_csr_operations;
+struct RISCVCSR {
+ int csrno;
+ bool (*insertion_test)(RISCVCPU *cpu);
+ riscv_csr_operations csr_ops;
+};
+
/* CSR function table constants */
enum {
CSR_TABLE_SIZE = 0x1000
@@ -862,18 +956,17 @@ extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
-void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
+void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops);
void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
target_ulong riscv_new_csr_seed(target_ulong new_value,
target_ulong write_mask);
-uint8_t satp_mode_max_from_map(uint32_t map);
const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
-/* Implemented in th_csr.c */
-void th_register_custom_csrs(RISCVCPU *cpu);
+/* In th_csr.c */
+extern const RISCVCSR th_csr_list[];
const char *priv_spec_to_str(int priv_version);
#endif /* RISCV_CPU_H */
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
index 32b068f..a30317c 100644
--- a/target/riscv/cpu_bits.h
+++ b/target/riscv/cpu_bits.h
@@ -34,6 +34,9 @@
/* Control and Status Registers */
+/* zicfiss user ssp csr */
+#define CSR_SSP 0x011
+
/* User Trap Setup */
#define CSR_USTATUS 0x000
#define CSR_UIE 0x004
@@ -170,6 +173,13 @@
#define CSR_MISELECT 0x350
#define CSR_MIREG 0x351
+/* Machine Indirect Register Alias */
+#define CSR_MIREG2 0x352
+#define CSR_MIREG3 0x353
+#define CSR_MIREG4 0x355
+#define CSR_MIREG5 0x356
+#define CSR_MIREG6 0x357
+
/* Machine-Level Interrupts (AIA) */
#define CSR_MTOPEI 0x35c
#define CSR_MTOPI 0xfb0
@@ -200,6 +210,9 @@
#define CSR_SSTATEEN2 0x10E
#define CSR_SSTATEEN3 0x10F
+/* Supervisor Counter Delegation */
+#define CSR_SCOUNTINHIBIT 0x120
+
/* Supervisor Trap Handling */
#define CSR_SSCRATCH 0x140
#define CSR_SEPC 0x141
@@ -219,6 +232,13 @@
#define CSR_SISELECT 0x150
#define CSR_SIREG 0x151
+/* Supervisor Indirect Register Alias */
+#define CSR_SIREG2 0x152
+#define CSR_SIREG3 0x153
+#define CSR_SIREG4 0x155
+#define CSR_SIREG5 0x156
+#define CSR_SIREG6 0x157
+
/* Supervisor-Level Interrupts (AIA) */
#define CSR_STOPEI 0x15c
#define CSR_STOPI 0xdb0
@@ -227,6 +247,17 @@
#define CSR_SIEH 0x114
#define CSR_SIPH 0x154
+/* Machine-Level Control transfer records CSRs */
+#define CSR_MCTRCTL 0x34e
+
+/* Supervisor-Level Control transfer records CSRs */
+#define CSR_SCTRCTL 0x14e
+#define CSR_SCTRSTATUS 0x14f
+#define CSR_SCTRDEPTH 0x15f
+
+/* VS-Level Control transfer records CSRs */
+#define CSR_VSCTRCTL 0x24e
+
/* Hpervisor CSRs */
#define CSR_HSTATUS 0x600
#define CSR_HEDELEG 0x602
@@ -285,6 +316,13 @@
#define CSR_VSISELECT 0x250
#define CSR_VSIREG 0x251
+/* Virtual Supervisor Indirect Alias */
+#define CSR_VSIREG2 0x252
+#define CSR_VSIREG3 0x253
+#define CSR_VSIREG4 0x255
+#define CSR_VSIREG5 0x256
+#define CSR_VSIREG6 0x257
+
/* VS-Level Interrupts (H-extension with AIA) */
#define CSR_VSTOPEI 0x25c
#define CSR_VSTOPI 0xeb0
@@ -317,6 +355,7 @@
#define SMSTATEEN0_CS (1ULL << 0)
#define SMSTATEEN0_FCSR (1ULL << 1)
#define SMSTATEEN0_JVT (1ULL << 2)
+#define SMSTATEEN0_CTR (1ULL << 54)
#define SMSTATEEN0_P1P13 (1ULL << 56)
#define SMSTATEEN0_HSCONTXT (1ULL << 57)
#define SMSTATEEN0_IMSIC (1ULL << 58)
@@ -350,6 +389,12 @@
#define CSR_PMPADDR14 0x3be
#define CSR_PMPADDR15 0x3bf
+/* RNMI */
+#define CSR_MNSCRATCH 0x740
+#define CSR_MNEPC 0x741
+#define CSR_MNCAUSE 0x742
+#define CSR_MNSTATUS 0x744
+
/* Debug/Trace Registers (shared with Debug Mode) */
#define CSR_TSELECT 0x7a0
#define CSR_TDATA1 0x7a1
@@ -494,37 +539,6 @@
#define CSR_MHPMCOUNTER30H 0xb9e
#define CSR_MHPMCOUNTER31H 0xb9f
-/*
- * User PointerMasking registers
- * NB: actual CSR numbers might be changed in future
- */
-#define CSR_UMTE 0x4c0
-#define CSR_UPMMASK 0x4c1
-#define CSR_UPMBASE 0x4c2
-
-/*
- * Machine PointerMasking registers
- * NB: actual CSR numbers might be changed in future
- */
-#define CSR_MMTE 0x3c0
-#define CSR_MPMMASK 0x3c1
-#define CSR_MPMBASE 0x3c2
-
-/*
- * Supervisor PointerMaster registers
- * NB: actual CSR numbers might be changed in future
- */
-#define CSR_SMTE 0x1c0
-#define CSR_SPMMASK 0x1c1
-#define CSR_SPMBASE 0x1c2
-
-/*
- * Hypervisor PointerMaster registers
- * NB: actual CSR numbers might be changed in future
- */
-#define CSR_VSMTE 0x2c0
-#define CSR_VSPMMASK 0x2c1
-#define CSR_VSPMBASE 0x2c2
#define CSR_SCOUNTOVF 0xda0
/* Crypto Extension */
@@ -552,8 +566,12 @@
#define MSTATUS_TVM 0x00100000 /* since: priv-1.10 */
#define MSTATUS_TW 0x00200000 /* since: priv-1.10 */
#define MSTATUS_TSR 0x00400000 /* since: priv-1.10 */
+#define MSTATUS_SPELP 0x00800000 /* zicfilp */
+#define MSTATUS_SDT 0x01000000
+#define MSTATUS_MPELP 0x020000000000 /* zicfilp */
#define MSTATUS_GVA 0x4000000000ULL
#define MSTATUS_MPV 0x8000000000ULL
+#define MSTATUS_MDT 0x40000000000ULL /* Smdbltrp extension */
#define MSTATUS64_UXL 0x0000000300000000ULL
#define MSTATUS64_SXL 0x0000000C00000000ULL
@@ -582,6 +600,8 @@ typedef enum {
#define SSTATUS_XS 0x00018000
#define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */
#define SSTATUS_MXR 0x00080000
+#define SSTATUS_SPELP MSTATUS_SPELP /* zicfilp */
+#define SSTATUS_SDT MSTATUS_SDT
#define SSTATUS64_UXL 0x0000000300000000ULL
@@ -598,7 +618,9 @@ typedef enum {
#define HSTATUS_VTVM 0x00100000
#define HSTATUS_VTW 0x00200000
#define HSTATUS_VTSR 0x00400000
+#define HSTATUS_HUKTE 0x01000000
#define HSTATUS_VSXL 0x300000000
+#define HSTATUS_HUPMM 0x3000000000000
#define HSTATUS32_WPRI 0xFF8FF87E
#define HSTATUS64_WPRI 0xFFFFFFFFFF8FF87EULL
@@ -627,6 +649,12 @@ typedef enum {
#define SATP64_ASID 0x0FFFF00000000000ULL
#define SATP64_PPN 0x00000FFFFFFFFFFFULL
+/* RNMI mnstatus CSR mask */
+#define MNSTATUS_NMIE 0x00000008
+#define MNSTATUS_MNPV 0x00000080
+#define MNSTATUS_MNPELP 0x00000200
+#define MNSTATUS_MNPP 0x00001800
+
/* VM modes (satp.mode) privileged ISA 1.10 */
#define VM_1_10_MBARE 0
#define VM_1_10_SV32 1
@@ -662,6 +690,12 @@ typedef enum {
/* Default Reset Vector address */
#define DEFAULT_RSTVEC 0x1000
+/* Default RNMI Interrupt Vector address */
+#define DEFAULT_RNMI_IRQVEC 0x0
+
+/* Default RNMI Exception Vector address */
+#define DEFAULT_RNMI_EXCPVEC 0x0
+
/* Exception causes */
typedef enum RISCVException {
RISCV_EXCP_NONE = -1, /* sentinel value */
@@ -680,6 +714,7 @@ typedef enum RISCVException {
RISCV_EXCP_INST_PAGE_FAULT = 0xc, /* since: priv-1.10.0 */
RISCV_EXCP_LOAD_PAGE_FAULT = 0xd, /* since: priv-1.10.0 */
RISCV_EXCP_STORE_PAGE_FAULT = 0xf, /* since: priv-1.10.0 */
+ RISCV_EXCP_DOUBLE_TRAP = 0x10,
RISCV_EXCP_SW_CHECK = 0x12, /* since: priv-1.13.0 */
RISCV_EXCP_HW_ERR = 0x13, /* since: priv-1.13.0 */
RISCV_EXCP_INST_GUEST_PAGE_FAULT = 0x14,
@@ -689,6 +724,11 @@ typedef enum RISCVException {
RISCV_EXCP_SEMIHOST = 0x3f,
} RISCVException;
+/* zicfilp defines lp violation results in sw check with tval = 2*/
+#define RISCV_EXCP_SW_CHECK_FCFI_TVAL 2
+/* zicfiss defines ss violation results in sw check with tval = 3*/
+#define RISCV_EXCP_SW_CHECK_BCFI_TVAL 3
+
#define RISCV_EXCP_INT_FLAG 0x80000000
#define RISCV_EXCP_INT_MASK 0x7fffffff
@@ -711,6 +751,9 @@ typedef enum RISCVException {
/* -1 is due to bit zero of hgeip and hgeie being ROZ. */
#define IRQ_LOCAL_GUEST_MAX (TARGET_LONG_BITS - 1)
+/* RNMI causes */
+#define RNMI_MAX 16
+
/* mip masks */
#define MIP_USIP (1 << IRQ_U_SOFT)
#define MIP_SSIP (1 << IRQ_S_SOFT)
@@ -747,39 +790,49 @@ typedef enum RISCVException {
#define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
#define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
-/* General PointerMasking CSR bits */
-#define PM_ENABLE 0x00000001ULL
-#define PM_CURRENT 0x00000002ULL
-#define PM_INSN 0x00000004ULL
-
/* Execution environment configuration bits */
#define MENVCFG_FIOM BIT(0)
+#define MENVCFG_LPE BIT(2) /* zicfilp */
+#define MENVCFG_SSE BIT(3) /* zicfiss */
#define MENVCFG_CBIE (3UL << 4)
#define MENVCFG_CBCFE BIT(6)
#define MENVCFG_CBZE BIT(7)
+#define MENVCFG_PMM (3ULL << 32)
+#define MENVCFG_DTE (1ULL << 59)
+#define MENVCFG_CDE (1ULL << 60)
#define MENVCFG_ADUE (1ULL << 61)
#define MENVCFG_PBMTE (1ULL << 62)
#define MENVCFG_STCE (1ULL << 63)
/* For RV32 */
+#define MENVCFGH_DTE BIT(27)
#define MENVCFGH_ADUE BIT(29)
#define MENVCFGH_PBMTE BIT(30)
#define MENVCFGH_STCE BIT(31)
#define SENVCFG_FIOM MENVCFG_FIOM
+#define SENVCFG_LPE MENVCFG_LPE
+#define SENVCFG_SSE MENVCFG_SSE
#define SENVCFG_CBIE MENVCFG_CBIE
#define SENVCFG_CBCFE MENVCFG_CBCFE
#define SENVCFG_CBZE MENVCFG_CBZE
+#define SENVCFG_UKTE BIT(8)
+#define SENVCFG_PMM MENVCFG_PMM
#define HENVCFG_FIOM MENVCFG_FIOM
+#define HENVCFG_LPE MENVCFG_LPE
+#define HENVCFG_SSE MENVCFG_SSE
#define HENVCFG_CBIE MENVCFG_CBIE
#define HENVCFG_CBCFE MENVCFG_CBCFE
#define HENVCFG_CBZE MENVCFG_CBZE
+#define HENVCFG_PMM MENVCFG_PMM
+#define HENVCFG_DTE MENVCFG_DTE
#define HENVCFG_ADUE MENVCFG_ADUE
#define HENVCFG_PBMTE MENVCFG_PBMTE
#define HENVCFG_STCE MENVCFG_STCE
/* For RV32 */
+#define HENVCFGH_DTE MENVCFGH_DTE
#define HENVCFGH_ADUE MENVCFGH_ADUE
#define HENVCFGH_PBMTE MENVCFGH_PBMTE
#define HENVCFGH_STCE MENVCFGH_STCE
@@ -835,6 +888,88 @@ typedef enum RISCVException {
#define UMTE_U_PM_INSN U_PM_INSN
#define UMTE_MASK (UMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | UMTE_U_PM_INSN)
+/* CTR control register commom fields */
+#define XCTRCTL_U BIT_ULL(0)
+#define XCTRCTL_S BIT_ULL(1)
+#define XCTRCTL_RASEMU BIT_ULL(7)
+#define XCTRCTL_STE BIT_ULL(8)
+#define XCTRCTL_BPFRZ BIT_ULL(11)
+#define XCTRCTL_LCOFIFRZ BIT_ULL(12)
+#define XCTRCTL_EXCINH BIT_ULL(33)
+#define XCTRCTL_INTRINH BIT_ULL(34)
+#define XCTRCTL_TRETINH BIT_ULL(35)
+#define XCTRCTL_NTBREN BIT_ULL(36)
+#define XCTRCTL_TKBRINH BIT_ULL(37)
+#define XCTRCTL_INDCALLINH BIT_ULL(40)
+#define XCTRCTL_DIRCALLINH BIT_ULL(41)
+#define XCTRCTL_INDJMPINH BIT_ULL(42)
+#define XCTRCTL_DIRJMPINH BIT_ULL(43)
+#define XCTRCTL_CORSWAPINH BIT_ULL(44)
+#define XCTRCTL_RETINH BIT_ULL(45)
+#define XCTRCTL_INDLJMPINH BIT_ULL(46)
+#define XCTRCTL_DIRLJMPINH BIT_ULL(47)
+
+#define XCTRCTL_MASK (XCTRCTL_U | XCTRCTL_S | XCTRCTL_RASEMU | \
+ XCTRCTL_STE | XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ | \
+ XCTRCTL_EXCINH | XCTRCTL_INTRINH | XCTRCTL_TRETINH | \
+ XCTRCTL_NTBREN | XCTRCTL_TKBRINH | XCTRCTL_INDCALLINH | \
+ XCTRCTL_DIRCALLINH | XCTRCTL_INDJMPINH | \
+ XCTRCTL_DIRJMPINH | XCTRCTL_CORSWAPINH | \
+ XCTRCTL_RETINH | XCTRCTL_INDLJMPINH | XCTRCTL_DIRLJMPINH)
+
+#define XCTRCTL_INH_START 32U
+
+/* CTR mctrctl bits */
+#define MCTRCTL_M BIT_ULL(2)
+#define MCTRCTL_MTE BIT_ULL(9)
+
+#define MCTRCTL_MASK (XCTRCTL_MASK | MCTRCTL_M | MCTRCTL_MTE)
+#define SCTRCTL_MASK XCTRCTL_MASK
+#define VSCTRCTL_MASK XCTRCTL_MASK
+
+/* sctrstatus CSR bits. */
+#define SCTRSTATUS_WRPTR_MASK 0xFF
+#define SCTRSTATUS_FROZEN BIT(31)
+#define SCTRSTATUS_MASK (SCTRSTATUS_WRPTR_MASK | SCTRSTATUS_FROZEN)
+
+/* sctrdepth CSR bits. */
+#define SCTRDEPTH_MASK 0x7
+#define SCTRDEPTH_MIN 0U /* 16 Entries. */
+#define SCTRDEPTH_MAX 4U /* 256 Entries. */
+
+#define CTR_ENTRIES_FIRST 0x200
+#define CTR_ENTRIES_LAST 0x2ff
+
+#define CTRSOURCE_VALID BIT(0)
+#define CTRTARGET_MISP BIT(0)
+
+#define CTRDATA_TYPE_MASK 0xF
+#define CTRDATA_CCV BIT(15)
+#define CTRDATA_CCM_MASK 0xFFF0000
+#define CTRDATA_CCE_MASK 0xF0000000
+
+#define CTRDATA_MASK (CTRDATA_TYPE_MASK | CTRDATA_CCV | \
+ CTRDATA_CCM_MASK | CTRDATA_CCE_MASK)
+
+typedef enum CTRType {
+ CTRDATA_TYPE_NONE = 0,
+ CTRDATA_TYPE_EXCEPTION = 1,
+ CTRDATA_TYPE_INTERRUPT = 2,
+ CTRDATA_TYPE_EXCEP_INT_RET = 3,
+ CTRDATA_TYPE_NONTAKEN_BRANCH = 4,
+ CTRDATA_TYPE_TAKEN_BRANCH = 5,
+ CTRDATA_TYPE_RESERVED_0 = 6,
+ CTRDATA_TYPE_RESERVED_1 = 7,
+ CTRDATA_TYPE_INDIRECT_CALL = 8,
+ CTRDATA_TYPE_DIRECT_CALL = 9,
+ CTRDATA_TYPE_INDIRECT_JUMP = 10,
+ CTRDATA_TYPE_DIRECT_JUMP = 11,
+ CTRDATA_TYPE_CO_ROUTINE_SWAP = 12,
+ CTRDATA_TYPE_RETURN = 13,
+ CTRDATA_TYPE_OTHER_INDIRECT_JUMP = 14,
+ CTRDATA_TYPE_OTHER_DIRECT_JUMP = 15,
+} CTRType;
+
/* MISELECT, SISELECT, and VSISELECT bits (AIA) */
#define ISELECT_IPRIO0 0x30
#define ISELECT_IPRIO15 0x3f
@@ -846,10 +981,15 @@ typedef enum RISCVException {
#define ISELECT_IMSIC_EIE63 0xff
#define ISELECT_IMSIC_FIRST ISELECT_IMSIC_EIDELIVERY
#define ISELECT_IMSIC_LAST ISELECT_IMSIC_EIE63
-#define ISELECT_MASK 0x1ff
+#define ISELECT_MASK_AIA 0x1ff
+
+/* [M|S|VS]SELCT value for Indirect CSR Access Extension */
+#define ISELECT_CD_FIRST 0x40
+#define ISELECT_CD_LAST 0x5f
+#define ISELECT_MASK_SXCSRIND 0xfff
/* Dummy [M|S|VS]ISELECT value for emulating [M|S|VS]TOPEI CSRs */
-#define ISELECT_IMSIC_TOPEI (ISELECT_MASK + 1)
+#define ISELECT_IMSIC_TOPEI (ISELECT_MASK_AIA + 1)
/* IMSIC bits (AIA) */
#define IMSIC_TOPEI_IID_SHIFT 16
@@ -938,15 +1078,27 @@ typedef enum RISCVException {
MHPMEVENTH_BIT_VSINH | \
MHPMEVENTH_BIT_VUINH)
-#define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000)
-#define MHPMEVENT_IDX_MASK 0xFFFFF
-#define MHPMEVENT_SSCOF_RESVD 16
+#define MHPMEVENT_SSCOF_MASK MAKE_64BIT_MASK(63, 56)
+#define MHPMEVENT_IDX_MASK (~MHPMEVENT_SSCOF_MASK)
+
+/* RISC-V-specific interrupt pending bits. */
+#define CPU_INTERRUPT_RNMI CPU_INTERRUPT_TGT_EXT_0
/* JVT CSR bits */
#define JVT_MODE 0x3F
#define JVT_BASE (~0x3F)
/* Debug Sdtrig CSR masks */
+#define TEXTRA32_MHVALUE 0xFC000000
+#define TEXTRA32_MHSELECT 0x03800000
+#define TEXTRA32_SBYTEMASK 0x000C0000
+#define TEXTRA32_SVALUE 0x0003FFFC
+#define TEXTRA32_SSELECT 0x00000003
+#define TEXTRA64_MHVALUE 0xFFF8000000000000ULL
+#define TEXTRA64_MHSELECT 0x0007000000000000ULL
+#define TEXTRA64_SBYTEMASK 0x000000F000000000ULL
+#define TEXTRA64_SVALUE 0x00000003FFFFFFFCULL
+#define TEXTRA64_SSELECT 0x0000000000000003ULL
#define MCONTEXT32 0x0000003F
#define MCONTEXT64 0x0000000000001FFFULL
#define MCONTEXT32_HCONTEXT 0x0000007F
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
index 8b272fb..aa28dc8 100644
--- a/target/riscv/cpu_cfg.h
+++ b/target/riscv/cpu_cfg.h
@@ -21,160 +21,10 @@
#ifndef RISCV_CPU_CFG_H
#define RISCV_CPU_CFG_H
-/*
- * map is a 16-bit bitmap: the most significant set bit in map is the maximum
- * satp mode that is supported. It may be chosen by the user and must respect
- * what qemu implements (valid_1_10_32/64) and what the hw is capable of
- * (supported bitmap below).
- *
- * init is a 16-bit bitmap used to make sure the user selected a correct
- * configuration as per the specification.
- *
- * supported is a 16-bit bitmap used to reflect the hw capabilities.
- */
-typedef struct {
- uint16_t map, init, supported;
-} RISCVSATPMap;
-
struct RISCVCPUConfig {
- bool ext_zba;
- bool ext_zbb;
- bool ext_zbc;
- bool ext_zbkb;
- bool ext_zbkc;
- bool ext_zbkx;
- bool ext_zbs;
- bool ext_zca;
- bool ext_zcb;
- bool ext_zcd;
- bool ext_zce;
- bool ext_zcf;
- bool ext_zcmp;
- bool ext_zcmt;
- bool ext_zk;
- bool ext_zkn;
- bool ext_zknd;
- bool ext_zkne;
- bool ext_zknh;
- bool ext_zkr;
- bool ext_zks;
- bool ext_zksed;
- bool ext_zksh;
- bool ext_zkt;
- bool ext_zifencei;
- bool ext_zicntr;
- bool ext_zicsr;
- bool ext_zicbom;
- bool ext_zicbop;
- bool ext_zicboz;
- bool ext_zicond;
- bool ext_zihintntl;
- bool ext_zihintpause;
- bool ext_zihpm;
- bool ext_zimop;
- bool ext_zcmop;
- bool ext_ztso;
- bool ext_smstateen;
- bool ext_sstc;
- bool ext_smcntrpmf;
- bool ext_svadu;
- bool ext_svinval;
- bool ext_svnapot;
- bool ext_svpbmt;
- bool ext_zdinx;
- bool ext_zaamo;
- bool ext_zacas;
- bool ext_zama16b;
- bool ext_zabha;
- bool ext_zalrsc;
- bool ext_zawrs;
- bool ext_zfa;
- bool ext_zfbfmin;
- bool ext_zfh;
- bool ext_zfhmin;
- bool ext_zfinx;
- bool ext_zhinx;
- bool ext_zhinxmin;
- bool ext_zve32f;
- bool ext_zve32x;
- bool ext_zve64f;
- bool ext_zve64d;
- bool ext_zve64x;
- bool ext_zvbb;
- bool ext_zvbc;
- bool ext_zvkb;
- bool ext_zvkg;
- bool ext_zvkned;
- bool ext_zvknha;
- bool ext_zvknhb;
- bool ext_zvksed;
- bool ext_zvksh;
- bool ext_zvkt;
- bool ext_zvkn;
- bool ext_zvknc;
- bool ext_zvkng;
- bool ext_zvks;
- bool ext_zvksc;
- bool ext_zvksg;
- bool ext_zmmul;
- bool ext_zvfbfmin;
- bool ext_zvfbfwma;
- bool ext_zvfh;
- bool ext_zvfhmin;
- bool ext_smaia;
- bool ext_ssaia;
- bool ext_sscofpmf;
- bool ext_smepmp;
- bool rvv_ta_all_1s;
- bool rvv_ma_all_1s;
-
- uint32_t mvendorid;
- uint64_t marchid;
- uint64_t mimpid;
-
- /* Named features */
- bool ext_svade;
- bool ext_zic64b;
-
- /*
- * Always 'true' booleans for named features
- * TCG always implement/can't be user disabled,
- * based on spec version.
- */
- bool has_priv_1_13;
- bool has_priv_1_12;
- bool has_priv_1_11;
-
- /* Vendor-specific custom extensions */
- bool ext_xtheadba;
- bool ext_xtheadbb;
- bool ext_xtheadbs;
- bool ext_xtheadcmo;
- bool ext_xtheadcondmov;
- bool ext_xtheadfmemidx;
- bool ext_xtheadfmv;
- bool ext_xtheadmac;
- bool ext_xtheadmemidx;
- bool ext_xtheadmempair;
- bool ext_xtheadsync;
- bool ext_XVentanaCondOps;
-
- uint32_t pmu_mask;
- uint16_t vlenb;
- uint16_t elen;
- uint16_t cbom_blocksize;
- uint16_t cbop_blocksize;
- uint16_t cboz_blocksize;
- bool mmu;
- bool pmp;
- bool debug;
- bool misa_w;
-
- bool short_isa_string;
-
-#ifndef CONFIG_USER_ONLY
- RISCVSATPMap satp_mode;
-#endif
+#define BOOL_FIELD(x) bool x;
+#define TYPED_FIELD(type, x, default) type x;
+#include "cpu_cfg_fields.h.inc"
};
typedef struct RISCVCPUConfig RISCVCPUConfig;
diff --git a/target/riscv/cpu_cfg_fields.h.inc b/target/riscv/cpu_cfg_fields.h.inc
new file mode 100644
index 0000000..59f134a
--- /dev/null
+++ b/target/riscv/cpu_cfg_fields.h.inc
@@ -0,0 +1,170 @@
+/*
+ * Required definitions before including this file:
+ *
+ * #define BOOL_FIELD(x)
+ * #define TYPED_FIELD(type, x, default)
+ */
+
+BOOL_FIELD(ext_zba)
+BOOL_FIELD(ext_zbb)
+BOOL_FIELD(ext_zbc)
+BOOL_FIELD(ext_zbkb)
+BOOL_FIELD(ext_zbkc)
+BOOL_FIELD(ext_zbkx)
+BOOL_FIELD(ext_zbs)
+BOOL_FIELD(ext_zca)
+BOOL_FIELD(ext_zcb)
+BOOL_FIELD(ext_zcd)
+BOOL_FIELD(ext_zce)
+BOOL_FIELD(ext_zcf)
+BOOL_FIELD(ext_zcmp)
+BOOL_FIELD(ext_zcmt)
+BOOL_FIELD(ext_zk)
+BOOL_FIELD(ext_zkn)
+BOOL_FIELD(ext_zknd)
+BOOL_FIELD(ext_zkne)
+BOOL_FIELD(ext_zknh)
+BOOL_FIELD(ext_zkr)
+BOOL_FIELD(ext_zks)
+BOOL_FIELD(ext_zksed)
+BOOL_FIELD(ext_zksh)
+BOOL_FIELD(ext_zkt)
+BOOL_FIELD(ext_zifencei)
+BOOL_FIELD(ext_zicntr)
+BOOL_FIELD(ext_zicsr)
+BOOL_FIELD(ext_zicbom)
+BOOL_FIELD(ext_zicbop)
+BOOL_FIELD(ext_zicboz)
+BOOL_FIELD(ext_zicfilp)
+BOOL_FIELD(ext_zicfiss)
+BOOL_FIELD(ext_zicond)
+BOOL_FIELD(ext_zihintntl)
+BOOL_FIELD(ext_zihintpause)
+BOOL_FIELD(ext_zihpm)
+BOOL_FIELD(ext_zimop)
+BOOL_FIELD(ext_zcmop)
+BOOL_FIELD(ext_ztso)
+BOOL_FIELD(ext_smstateen)
+BOOL_FIELD(ext_sstc)
+BOOL_FIELD(ext_smcdeleg)
+BOOL_FIELD(ext_ssccfg)
+BOOL_FIELD(ext_smcntrpmf)
+BOOL_FIELD(ext_smcsrind)
+BOOL_FIELD(ext_sscsrind)
+BOOL_FIELD(ext_ssdbltrp)
+BOOL_FIELD(ext_smdbltrp)
+BOOL_FIELD(ext_svadu)
+BOOL_FIELD(ext_svinval)
+BOOL_FIELD(ext_svnapot)
+BOOL_FIELD(ext_svpbmt)
+BOOL_FIELD(ext_svvptc)
+BOOL_FIELD(ext_svukte)
+BOOL_FIELD(ext_zdinx)
+BOOL_FIELD(ext_zaamo)
+BOOL_FIELD(ext_zacas)
+BOOL_FIELD(ext_zama16b)
+BOOL_FIELD(ext_zabha)
+BOOL_FIELD(ext_zalrsc)
+BOOL_FIELD(ext_zawrs)
+BOOL_FIELD(ext_zfa)
+BOOL_FIELD(ext_zfbfmin)
+BOOL_FIELD(ext_zfh)
+BOOL_FIELD(ext_zfhmin)
+BOOL_FIELD(ext_zfinx)
+BOOL_FIELD(ext_zhinx)
+BOOL_FIELD(ext_zhinxmin)
+BOOL_FIELD(ext_zve32f)
+BOOL_FIELD(ext_zve32x)
+BOOL_FIELD(ext_zve64f)
+BOOL_FIELD(ext_zve64d)
+BOOL_FIELD(ext_zve64x)
+BOOL_FIELD(ext_zvbb)
+BOOL_FIELD(ext_zvbc)
+BOOL_FIELD(ext_zvkb)
+BOOL_FIELD(ext_zvkg)
+BOOL_FIELD(ext_zvkned)
+BOOL_FIELD(ext_zvknha)
+BOOL_FIELD(ext_zvknhb)
+BOOL_FIELD(ext_zvksed)
+BOOL_FIELD(ext_zvksh)
+BOOL_FIELD(ext_zvkt)
+BOOL_FIELD(ext_zvkn)
+BOOL_FIELD(ext_zvknc)
+BOOL_FIELD(ext_zvkng)
+BOOL_FIELD(ext_zvks)
+BOOL_FIELD(ext_zvksc)
+BOOL_FIELD(ext_zvksg)
+BOOL_FIELD(ext_zmmul)
+BOOL_FIELD(ext_zvfbfmin)
+BOOL_FIELD(ext_zvfbfwma)
+BOOL_FIELD(ext_zvfh)
+BOOL_FIELD(ext_zvfhmin)
+BOOL_FIELD(ext_smaia)
+BOOL_FIELD(ext_ssaia)
+BOOL_FIELD(ext_smctr)
+BOOL_FIELD(ext_ssctr)
+BOOL_FIELD(ext_sscofpmf)
+BOOL_FIELD(ext_smepmp)
+BOOL_FIELD(ext_smrnmi)
+BOOL_FIELD(ext_ssnpm)
+BOOL_FIELD(ext_smnpm)
+BOOL_FIELD(ext_smmpm)
+BOOL_FIELD(ext_sspm)
+BOOL_FIELD(ext_supm)
+BOOL_FIELD(rvv_ta_all_1s)
+BOOL_FIELD(rvv_ma_all_1s)
+BOOL_FIELD(rvv_vl_half_avl)
+/* Named features */
+BOOL_FIELD(ext_svade)
+BOOL_FIELD(ext_zic64b)
+BOOL_FIELD(ext_ssstateen)
+BOOL_FIELD(ext_sha)
+
+/*
+ * Always 'true' booleans for named features
+ * TCG always implement/can't be user disabled,
+ * based on spec version.
+ */
+BOOL_FIELD(has_priv_1_13)
+BOOL_FIELD(has_priv_1_12)
+BOOL_FIELD(has_priv_1_11)
+
+/* Always enabled for TCG if has_priv_1_11 */
+BOOL_FIELD(ext_ziccrse)
+
+/* Vendor-specific custom extensions */
+BOOL_FIELD(ext_xtheadba)
+BOOL_FIELD(ext_xtheadbb)
+BOOL_FIELD(ext_xtheadbs)
+BOOL_FIELD(ext_xtheadcmo)
+BOOL_FIELD(ext_xtheadcondmov)
+BOOL_FIELD(ext_xtheadfmemidx)
+BOOL_FIELD(ext_xtheadfmv)
+BOOL_FIELD(ext_xtheadmac)
+BOOL_FIELD(ext_xtheadmemidx)
+BOOL_FIELD(ext_xtheadmempair)
+BOOL_FIELD(ext_xtheadsync)
+BOOL_FIELD(ext_XVentanaCondOps)
+
+BOOL_FIELD(mmu)
+BOOL_FIELD(pmp)
+BOOL_FIELD(debug)
+BOOL_FIELD(misa_w)
+
+BOOL_FIELD(short_isa_string)
+
+TYPED_FIELD(uint32_t, mvendorid, 0)
+TYPED_FIELD(uint64_t, marchid, 0)
+TYPED_FIELD(uint64_t, mimpid, 0)
+
+TYPED_FIELD(uint32_t, pmu_mask, 0)
+TYPED_FIELD(uint16_t, vlenb, 0)
+TYPED_FIELD(uint16_t, elen, 0)
+TYPED_FIELD(uint16_t, cbom_blocksize, 0)
+TYPED_FIELD(uint16_t, cbop_blocksize, 0)
+TYPED_FIELD(uint16_t, cboz_blocksize, 0)
+
+TYPED_FIELD(int8_t, max_satp_mode, -1)
+
+#undef BOOL_FIELD
+#undef TYPED_FIELD
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 395a1d9..2ed69d7 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -23,16 +23,19 @@
#include "cpu.h"
#include "internals.h"
#include "pmu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
+#include "system/memory.h"
#include "instmap.h"
#include "tcg/tcg-op.h"
+#include "accel/tcg/cpu-ops.h"
#include "trace.h"
#include "semihosting/common-semi.h"
-#include "sysemu/cpu-timers.h"
+#include "exec/icount.h"
#include "cpu_bits.h"
#include "debug.h"
-#include "tcg/oversized-guest.h"
+#include "pmp.h"
int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
{
@@ -63,134 +66,169 @@ int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
#endif
}
-void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
+bool cpu_get_fcfien(CPURISCVState *env)
{
- RISCVCPU *cpu = env_archcpu(env);
- RISCVExtStatus fs, vs;
- uint32_t flags = 0;
+ /* no cfi extension, return false */
+ if (!env_archcpu(env)->cfg.ext_zicfilp) {
+ return false;
+ }
- *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
- *cs_base = 0;
+ switch (env->priv) {
+ case PRV_U:
+ if (riscv_has_ext(env, RVS)) {
+ return env->senvcfg & SENVCFG_LPE;
+ }
+ return env->menvcfg & MENVCFG_LPE;
+#ifndef CONFIG_USER_ONLY
+ case PRV_S:
+ if (env->virt_enabled) {
+ return env->henvcfg & HENVCFG_LPE;
+ }
+ return env->menvcfg & MENVCFG_LPE;
+ case PRV_M:
+ return env->mseccfg & MSECCFG_MLPE;
+#endif
+ default:
+ g_assert_not_reached();
+ }
+}
- if (cpu->cfg.ext_zve32x) {
+bool cpu_get_bcfien(CPURISCVState *env)
+{
+ /* no cfi extension, return false */
+ if (!env_archcpu(env)->cfg.ext_zicfiss) {
+ return false;
+ }
+
+ switch (env->priv) {
+ case PRV_U:
/*
- * If env->vl equals to VLMAX, we can use generic vector operation
- * expanders (GVEC) to accerlate the vector operations.
- * However, as LMUL could be a fractional number. The maximum
- * vector size can be operated might be less than 8 bytes,
- * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
- * only when maxsz >= 8 bytes.
+ * If S is not implemented then shadow stack for U can't be turned on
+ * It is checked in `riscv_cpu_validate_set_extensions`, so no need to
+ * check here or assert here
*/
-
- /* lmul encoded as in DisasContext::lmul */
- int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
- uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
- uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
- uint32_t maxsz = vlmax << vsew;
- bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
- (maxsz >= 8);
- flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
- flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
- flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
- FIELD_EX64(env->vtype, VTYPE, VLMUL));
- flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
- flags = FIELD_DP32(flags, TB_FLAGS, VTA,
- FIELD_EX64(env->vtype, VTYPE, VTA));
- flags = FIELD_DP32(flags, TB_FLAGS, VMA,
- FIELD_EX64(env->vtype, VTYPE, VMA));
- flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
- } else {
- flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
+ return env->senvcfg & SENVCFG_SSE;
+#ifndef CONFIG_USER_ONLY
+ case PRV_S:
+ if (env->virt_enabled) {
+ return env->henvcfg & HENVCFG_SSE;
+ }
+ return env->menvcfg & MENVCFG_SSE;
+ case PRV_M: /* M-mode shadow stack is always off */
+ return false;
+#endif
+ default:
+ g_assert_not_reached();
}
+}
+bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt)
+{
#ifdef CONFIG_USER_ONLY
- fs = EXT_STATUS_DIRTY;
- vs = EXT_STATUS_DIRTY;
+ return false;
#else
- flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
-
- flags |= riscv_env_mmu_index(env, 0);
- fs = get_field(env->mstatus, MSTATUS_FS);
- vs = get_field(env->mstatus, MSTATUS_VS);
-
- if (env->virt_enabled) {
- flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
- /*
- * Merge DISABLED and !DIRTY states using MIN.
- * We will set both fields when dirtying.
- */
- fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
- vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
+ if (virt) {
+ return (env->henvcfg & HENVCFG_DTE) != 0;
+ } else {
+ return (env->menvcfg & MENVCFG_DTE) != 0;
}
+#endif
+}
- /* With Zfinx, floating point is enabled/disabled by Smstateen. */
- if (!riscv_has_ext(env, RVF)) {
- fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
- ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
- }
+RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ int priv_mode = cpu_address_mode(env);
- if (cpu->cfg.debug && !icount_enabled()) {
- flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
+ if (get_field(env->mstatus, MSTATUS_MPRV) &&
+ get_field(env->mstatus, MSTATUS_MXR)) {
+ return PMM_FIELD_DISABLED;
}
-#endif
- flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
- flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
- flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
- flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
- if (env->cur_pmmask != 0) {
- flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
- }
- if (env->cur_pmbase != 0) {
- flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
+ /* Get current PMM field */
+ switch (priv_mode) {
+ case PRV_M:
+ if (riscv_cpu_cfg(env)->ext_smmpm) {
+ return get_field(env->mseccfg, MSECCFG_PMM);
+ }
+ break;
+ case PRV_S:
+ if (riscv_cpu_cfg(env)->ext_smnpm) {
+ if (get_field(env->mstatus, MSTATUS_MPV)) {
+ return get_field(env->henvcfg, HENVCFG_PMM);
+ } else {
+ return get_field(env->menvcfg, MENVCFG_PMM);
+ }
+ }
+ break;
+ case PRV_U:
+ if (riscv_has_ext(env, RVS)) {
+ if (riscv_cpu_cfg(env)->ext_ssnpm) {
+ return get_field(env->senvcfg, SENVCFG_PMM);
+ }
+ } else {
+ if (riscv_cpu_cfg(env)->ext_smnpm) {
+ return get_field(env->menvcfg, MENVCFG_PMM);
+ }
+ }
+ break;
+ default:
+ g_assert_not_reached();
}
-
- *pflags = flags;
+ return PMM_FIELD_DISABLED;
+#else
+ return PMM_FIELD_DISABLED;
+#endif
}
-void riscv_cpu_update_mask(CPURISCVState *env)
+RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env)
{
- target_ulong mask = 0, base = 0;
- RISCVMXL xl = env->xl;
- /*
- * TODO: Current RVJ spec does not specify
- * how the extension interacts with XLEN.
- */
#ifndef CONFIG_USER_ONLY
- int mode = cpu_address_mode(env);
- xl = cpu_get_xl(env, mode);
- if (riscv_has_ext(env, RVJ)) {
- switch (mode) {
- case PRV_M:
- if (env->mmte & M_PM_ENABLE) {
- mask = env->mpmmask;
- base = env->mpmbase;
- }
- break;
- case PRV_S:
- if (env->mmte & S_PM_ENABLE) {
- mask = env->spmmask;
- base = env->spmbase;
- }
- break;
- case PRV_U:
- if (env->mmte & U_PM_ENABLE) {
- mask = env->upmmask;
- base = env->upmbase;
- }
- break;
- default:
- g_assert_not_reached();
+ int priv_mode = cpu_address_mode(env);
+
+ if (priv_mode == PRV_U) {
+ return get_field(env->hstatus, HSTATUS_HUPMM);
+ } else {
+ if (get_field(env->hstatus, HSTATUS_SPVP)) {
+ return get_field(env->henvcfg, HENVCFG_PMM);
+ } else {
+ return get_field(env->senvcfg, SENVCFG_PMM);
}
}
+#else
+ return PMM_FIELD_DISABLED;
#endif
- if (xl == MXL_RV32) {
- env->cur_pmmask = mask & UINT32_MAX;
- env->cur_pmbase = base & UINT32_MAX;
+}
+
+bool riscv_cpu_virt_mem_enabled(CPURISCVState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ int satp_mode = 0;
+ int priv_mode = cpu_address_mode(env);
+
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ satp_mode = get_field(env->satp, SATP32_MODE);
} else {
- env->cur_pmmask = mask;
- env->cur_pmbase = base;
+ satp_mode = get_field(env->satp, SATP64_MODE);
+ }
+
+ return ((satp_mode != VM_1_10_MBARE) && (priv_mode != PRV_M));
+#else
+ return false;
+#endif
+}
+
+uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm)
+{
+ switch (pmm) {
+ case PMM_FIELD_DISABLED:
+ return 0;
+ case PMM_FIELD_PMLEN7:
+ return 7;
+ case PMM_FIELD_PMLEN16:
+ return 16;
+ default:
+ g_assert_not_reached();
}
}
@@ -434,6 +472,18 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
uint64_t vsbits, irq_delegated;
int virq;
+ /* Priority: RNMI > Other interrupt. */
+ if (riscv_cpu_cfg(env)->ext_smrnmi) {
+ /* If mnstatus.NMIE == 0, all interrupts are disabled. */
+ if (!get_field(env->mnstatus, MNSTATUS_NMIE)) {
+ return RISCV_EXCP_NONE;
+ }
+
+ if (env->rnmip) {
+ return ctz64(env->rnmip); /* since non-zero */
+ }
+ }
+
/* Determine interrupt enable state of all privilege modes */
if (env->virt_enabled) {
mie = 1;
@@ -496,7 +546,9 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
- if (interrupt_request & CPU_INTERRUPT_HARD) {
+ uint32_t mask = CPU_INTERRUPT_HARD | CPU_INTERRUPT_RNMI;
+
+ if (interrupt_request & mask) {
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
int interruptno = riscv_cpu_local_irq_pending(env);
@@ -546,8 +598,21 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
}
bool current_virt = env->virt_enabled;
+ /*
+ * If zicfilp extension available and henvcfg.LPE = 1,
+ * then apply SPELP mask on mstatus
+ */
+ if (env_archcpu(env)->cfg.ext_zicfilp &&
+ get_field(env->henvcfg, HENVCFG_LPE)) {
+ mstatus_mask |= SSTATUS_SPELP;
+ }
+
g_assert(riscv_has_ext(env, RVH));
+ if (riscv_env_smode_dbltrp_enabled(env, current_virt)) {
+ mstatus_mask |= MSTATUS_SDT;
+ }
+
if (current_virt) {
/* Current V=1 and we are about to change to V=0 */
env->vsstatus = env->mstatus & mstatus_mask;
@@ -619,6 +684,30 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
env->geilen = geilen;
}
+void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level)
+{
+ CPURISCVState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ bool release_lock = false;
+
+ if (!bql_locked()) {
+ release_lock = true;
+ bql_lock();
+ }
+
+ if (level) {
+ env->rnmip |= 1 << irq;
+ cpu_interrupt(cs, CPU_INTERRUPT_RNMI);
+ } else {
+ env->rnmip &= ~(1 << irq);
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_RNMI);
+ }
+
+ if (release_lock) {
+ bql_unlock();
+ }
+}
+
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
{
CPURISCVState *env = &cpu->env;
@@ -691,6 +780,254 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
}
}
+static void riscv_ctr_freeze(CPURISCVState *env, uint64_t freeze_mask,
+ bool virt)
+{
+ uint64_t ctl = virt ? env->vsctrctl : env->mctrctl;
+
+ assert((freeze_mask & (~(XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ))) == 0);
+
+ if (ctl & freeze_mask) {
+ env->sctrstatus |= SCTRSTATUS_FROZEN;
+ }
+}
+
+void riscv_ctr_clear(CPURISCVState *env)
+{
+ memset(env->ctr_src, 0x0, sizeof(env->ctr_src));
+ memset(env->ctr_dst, 0x0, sizeof(env->ctr_dst));
+ memset(env->ctr_data, 0x0, sizeof(env->ctr_data));
+}
+
+static uint64_t riscv_ctr_priv_to_mask(target_ulong priv, bool virt)
+{
+ switch (priv) {
+ case PRV_M:
+ return MCTRCTL_M;
+ case PRV_S:
+ if (virt) {
+ return XCTRCTL_S;
+ }
+ return XCTRCTL_S;
+ case PRV_U:
+ if (virt) {
+ return XCTRCTL_U;
+ }
+ return XCTRCTL_U;
+ }
+
+ g_assert_not_reached();
+}
+
+static uint64_t riscv_ctr_get_control(CPURISCVState *env, target_long priv,
+ bool virt)
+{
+ switch (priv) {
+ case PRV_M:
+ return env->mctrctl;
+ case PRV_S:
+ case PRV_U:
+ if (virt) {
+ return env->vsctrctl;
+ }
+ return env->mctrctl;
+ }
+
+ g_assert_not_reached();
+}
+
+/*
+ * This function assumes that src privilege and target privilege are not same
+ * and src privilege is less than target privilege. This includes the virtual
+ * state as well.
+ */
+static bool riscv_ctr_check_xte(CPURISCVState *env, target_long src_prv,
+ bool src_virt)
+{
+ target_long tgt_prv = env->priv;
+ bool res = true;
+
+ /*
+ * VS and U mode are same in terms of xTE bits required to record an
+ * external trap. See 6.1.2. External Traps, table 8 External Trap Enable
+ * Requirements. This changes VS to U to simplify the logic a bit.
+ */
+ if (src_virt && src_prv == PRV_S) {
+ src_prv = PRV_U;
+ } else if (env->virt_enabled && tgt_prv == PRV_S) {
+ tgt_prv = PRV_U;
+ }
+
+ /* VU mode is an outlier here. */
+ if (src_virt && src_prv == PRV_U) {
+ res &= !!(env->vsctrctl & XCTRCTL_STE);
+ }
+
+ switch (src_prv) {
+ case PRV_U:
+ if (tgt_prv == PRV_U) {
+ break;
+ }
+ res &= !!(env->mctrctl & XCTRCTL_STE);
+ /* fall-through */
+ case PRV_S:
+ if (tgt_prv == PRV_S) {
+ break;
+ }
+ res &= !!(env->mctrctl & MCTRCTL_MTE);
+ /* fall-through */
+ case PRV_M:
+ break;
+ }
+
+ return res;
+}
+
+/*
+ * Special cases for traps and trap returns:
+ *
+ * 1- Traps, and trap returns, between enabled modes are recorded as normal.
+ * 2- Traps from an inhibited mode to an enabled mode, and trap returns from an
+ * enabled mode back to an inhibited mode, are partially recorded. In such
+ * cases, the PC from the inhibited mode (source PC for traps, and target PC
+ * for trap returns) is 0.
+ *
+ * 3- Trap returns from an inhibited mode to an enabled mode are not recorded.
+ * Traps from an enabled mode to an inhibited mode, known as external traps,
+ * receive special handling.
+ * By default external traps are not recorded, but a handshake mechanism exists
+ * to allow partial recording. Software running in the target mode of the trap
+ * can opt-in to allowing CTR to record traps into that mode even when the mode
+ * is inhibited. The MTE, STE, and VSTE bits allow M-mode, S-mode, and VS-mode,
+ * respectively, to opt-in. When an External Trap occurs, and xTE=1, such that
+ * x is the target privilege mode of the trap, will CTR record the trap. In such
+ * cases, the target PC is 0.
+ */
+/*
+ * CTR arrays are implemented as circular buffers and new entry is stored at
+ * sctrstatus.WRPTR, but they are presented to software as moving circular
+ * buffers. Which means, software get's the illusion that whenever a new entry
+ * is added the whole buffer is moved by one place and the new entry is added at
+ * the start keeping new entry at idx 0 and older ones follow.
+ *
+ * Depth = 16.
+ *
+ * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
+ * WRPTR W
+ * entry 7 6 5 4 3 2 1 0 F E D C B A 9 8
+ *
+ * When a new entry is added:
+ * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
+ * WRPTR W
+ * entry 8 7 6 5 4 3 2 1 0 F E D C B A 9
+ *
+ * entry here denotes the logical entry number that software can access
+ * using ctrsource, ctrtarget and ctrdata registers. So xiselect 0x200
+ * will return entry 0 i-e buffer[8] and 0x201 will return entry 1 i-e
+ * buffer[7]. Here is how we convert entry to buffer idx.
+ *
+ * entry = isel - CTR_ENTRIES_FIRST;
+ * idx = (sctrstatus.WRPTR - entry - 1) & (depth - 1);
+ */
+void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
+ enum CTRType type, target_ulong src_priv, bool src_virt)
+{
+ bool tgt_virt = env->virt_enabled;
+ uint64_t src_mask = riscv_ctr_priv_to_mask(src_priv, src_virt);
+ uint64_t tgt_mask = riscv_ctr_priv_to_mask(env->priv, tgt_virt);
+ uint64_t src_ctrl = riscv_ctr_get_control(env, src_priv, src_virt);
+ uint64_t tgt_ctrl = riscv_ctr_get_control(env, env->priv, tgt_virt);
+ uint64_t depth, head;
+ bool ext_trap = false;
+
+ /*
+ * Return immediately if both target and src recording is disabled or if
+ * CTR is in frozen state.
+ */
+ if ((!(src_ctrl & src_mask) && !(tgt_ctrl & tgt_mask)) ||
+ env->sctrstatus & SCTRSTATUS_FROZEN) {
+ return;
+ }
+
+ /*
+ * With RAS Emul enabled, only allow Indirect, direct calls, Function
+ * returns and Co-routine swap types.
+ */
+ if (tgt_ctrl & XCTRCTL_RASEMU &&
+ type != CTRDATA_TYPE_INDIRECT_CALL &&
+ type != CTRDATA_TYPE_DIRECT_CALL &&
+ type != CTRDATA_TYPE_RETURN &&
+ type != CTRDATA_TYPE_CO_ROUTINE_SWAP) {
+ return;
+ }
+
+ if (type == CTRDATA_TYPE_EXCEPTION || type == CTRDATA_TYPE_INTERRUPT) {
+ /* Case 2 for traps. */
+ if (!(src_ctrl & src_mask)) {
+ src = 0;
+ } else if (!(tgt_ctrl & tgt_mask)) {
+ /* Check if target priv-mode has allowed external trap recording. */
+ if (!riscv_ctr_check_xte(env, src_priv, src_virt)) {
+ return;
+ }
+
+ ext_trap = true;
+ dst = 0;
+ }
+ } else if (type == CTRDATA_TYPE_EXCEP_INT_RET) {
+ /*
+ * Case 3 for trap returns. Trap returns from inhibited mode are not
+ * recorded.
+ */
+ if (!(src_ctrl & src_mask)) {
+ return;
+ }
+
+ /* Case 2 for trap returns. */
+ if (!(tgt_ctrl & tgt_mask)) {
+ dst = 0;
+ }
+ }
+
+ /* Ignore filters in case of RASEMU mode or External trap. */
+ if (!(tgt_ctrl & XCTRCTL_RASEMU) && !ext_trap) {
+ /*
+ * Check if the specific type is inhibited. Not taken branch filter is
+ * an enable bit and needs to be checked separatly.
+ */
+ bool check = tgt_ctrl & BIT_ULL(type + XCTRCTL_INH_START);
+ if ((type == CTRDATA_TYPE_NONTAKEN_BRANCH && !check) ||
+ (type != CTRDATA_TYPE_NONTAKEN_BRANCH && check)) {
+ return;
+ }
+ }
+
+ head = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
+
+ depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
+ if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_RETURN) {
+ head = (head - 1) & (depth - 1);
+
+ env->ctr_src[head] &= ~CTRSOURCE_VALID;
+ env->sctrstatus =
+ set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
+ return;
+ }
+
+ /* In case of Co-routine SWAP we overwrite latest entry. */
+ if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_CO_ROUTINE_SWAP) {
+ head = (head - 1) & (depth - 1);
+ }
+
+ env->ctr_src[head] = src | CTRSOURCE_VALID;
+ env->ctr_dst[head] = dst & ~CTRTARGET_MISP;
+ env->ctr_data[head] = set_field(0, CTRDATA_TYPE_MASK, type);
+
+ head = (head + 1) & (depth - 1);
+
+ env->sctrstatus = set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
+}
+
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
{
g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
@@ -706,7 +1043,6 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
/* tlb_flush is unnecessary as mode is contained in mmu_idx */
env->priv = newpriv;
env->xl = cpu_recompute_xl(env);
- riscv_cpu_update_mask(env);
/*
* Clear the load reservation - otherwise a reservation placed in one
@@ -777,6 +1113,55 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr,
return TRANSLATE_SUCCESS;
}
+/* Returns 'true' if a svukte address check is needed */
+static bool do_svukte_check(CPURISCVState *env, bool first_stage,
+ int mode, bool virt)
+{
+ /* Svukte extension depends on Sv39. */
+ if (!(env_archcpu(env)->cfg.ext_svukte ||
+ !first_stage ||
+ VM_1_10_SV39 != get_field(env->satp, SATP64_MODE))) {
+ return false;
+ }
+
+ /*
+ * Check hstatus.HUKTE if the effective mode is switched to VU-mode by
+ * executing HLV/HLVX/HSV in U-mode.
+ * For other cases, check senvcfg.UKTE.
+ */
+ if (env->priv == PRV_U && !env->virt_enabled && virt) {
+ if (!get_field(env->hstatus, HSTATUS_HUKTE)) {
+ return false;
+ }
+ } else if (!get_field(env->senvcfg, SENVCFG_UKTE)) {
+ return false;
+ }
+
+ /*
+ * Svukte extension is qualified only in U or VU-mode.
+ *
+ * Effective mode can be switched to U or VU-mode by:
+ * - M-mode + mstatus.MPRV=1 + mstatus.MPP=U-mode.
+ * - Execute HLV/HLVX/HSV from HS-mode + hstatus.SPVP=0.
+ * - U-mode.
+ * - VU-mode.
+ * - Execute HLV/HLVX/HSV from U-mode + hstatus.HU=1.
+ */
+ if (mode != PRV_U) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_svukte_addr(CPURISCVState *env, vaddr addr)
+{
+ /* svukte extension excludes RV32 */
+ uint32_t sxlen = 32 * riscv_cpu_sxl(env);
+ uint64_t high_bit = addr & (1UL << (sxlen - 1));
+ return !high_bit;
+}
+
/*
* get_physical_address - get the physical address for this virtual address
*
@@ -804,7 +1189,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
target_ulong *fault_pte_addr,
int access_type, int mmu_idx,
bool first_stage, bool two_stage,
- bool is_debug)
+ bool is_debug, bool is_probe)
{
/*
* NOTE: the env->pc value visible here will not be
@@ -814,10 +1199,18 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
MemTxResult res;
MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int mode = mmuidx_priv(mmu_idx);
+ bool virt = mmuidx_2stage(mmu_idx);
bool use_background = false;
hwaddr ppn;
int napot_bits = 0;
target_ulong napot_mask;
+ bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
+ bool sstack_page = false;
+
+ if (do_svukte_check(env, first_stage, mode, virt) &&
+ !check_svukte_addr(env, addr)) {
+ return TRANSLATE_FAIL;
+ }
/*
* Check if we should use the background registers for the two
@@ -890,12 +1283,14 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
CPUState *cs = env_cpu(env);
int va_bits = PGSHIFT + levels * ptidxbits + widened;
+ int sxlen = 16 << riscv_cpu_sxl(env);
+ int sxlen_bytes = sxlen / 8;
if (first_stage == true) {
target_ulong mask, masked_msbs;
- if (TARGET_LONG_BITS > (va_bits - 1)) {
- mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
+ if (sxlen > (va_bits - 1)) {
+ mask = (1L << (sxlen - (va_bits - 1))) - 1;
} else {
mask = 0;
}
@@ -925,9 +1320,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
hwaddr pte_addr;
int i;
-#if !TCG_OVERSIZED_GUEST
-restart:
-#endif
+ restart:
for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
target_ulong idx;
if (i == 0) {
@@ -948,7 +1341,7 @@ restart:
int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
base, NULL, MMU_DATA_LOAD,
MMUIdx_U, false, true,
- is_debug);
+ is_debug, false);
if (vbase_ret != TRANSLATE_SUCCESS) {
if (fault_pte_addr) {
@@ -964,7 +1357,7 @@ restart:
int pmp_prot;
int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr,
- sizeof(target_ulong),
+ sxlen_bytes,
MMU_DATA_LOAD, PRV_S);
if (pmp_ret != TRANSLATE_SUCCESS) {
return TRANSLATE_PMP_FAIL;
@@ -984,14 +1377,27 @@ restart:
ppn = pte >> PTE_PPN_SHIFT;
} else {
if (pte & PTE_RESERVED) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: reserved bits set in PTE: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
if (!pbmte && (pte & PTE_PBMT)) {
+ /* Reserved without Svpbmt. */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
+ "and Svpbmt extension is disabled: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
+ /* Reserved without Svnapot extension */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: N bit set in PTE, "
+ "and Svnapot extension is disabled: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
@@ -1002,14 +1408,19 @@ restart:
/* Invalid PTE */
return TRANSLATE_FAIL;
}
+
if (pte & (PTE_R | PTE_W | PTE_X)) {
goto leaf;
}
- /* Inner PTE, continue walking */
if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
+ /* D, A, and U bits are reserved in non-leaf/inner PTEs */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: D, A, or U bits set in non-leaf PTE: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
+ /* Inner PTE, continue walking */
base = ppn << PGSHIFT;
}
@@ -1019,28 +1430,57 @@ restart:
leaf:
if (ppn & ((1ULL << ptshift) - 1)) {
/* Misaligned PPN */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PPN bits in PTE is misaligned: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
if (!pbmte && (pte & PTE_PBMT)) {
/* Reserved without Svpbmt. */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
+ "and Svpbmt extension is disabled: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
+ target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
/* Check for reserved combinations of RWX flags. */
- switch (pte & (PTE_R | PTE_W | PTE_X)) {
- case PTE_W:
+ switch (rwx) {
case PTE_W | PTE_X:
return TRANSLATE_FAIL;
+ case PTE_W:
+ /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
+ if (cpu_get_bcfien(env) && first_stage) {
+ sstack_page = true;
+ /*
+ * if ss index, read and write allowed. else if not a probe
+ * then only read allowed
+ */
+ rwx = is_sstack_idx ? (PTE_R | PTE_W) : (is_probe ? 0 : PTE_R);
+ break;
+ }
+ return TRANSLATE_FAIL;
+ case PTE_R:
+ /*
+ * no matter what's the `access_type`, shadow stack access to readonly
+ * memory are always store page faults. During unwind, loads will be
+ * promoted as store fault.
+ */
+ if (is_sstack_idx) {
+ return TRANSLATE_FAIL;
+ }
+ break;
}
int prot = 0;
- if (pte & PTE_R) {
+ if (rwx & PTE_R) {
prot |= PAGE_READ;
}
- if (pte & PTE_W) {
+ if (rwx & PTE_W) {
prot |= PAGE_WRITE;
}
- if (pte & PTE_X) {
+ if (rwx & PTE_X) {
bool mxr = false;
/*
@@ -1084,8 +1524,11 @@ restart:
}
if (!((prot >> access_type) & 1)) {
- /* Access check failed */
- return TRANSLATE_FAIL;
+ /*
+ * Access check failed, access check failures for shadow stack are
+ * access faults.
+ */
+ return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
}
target_ulong updated_pte = pte;
@@ -1116,24 +1559,23 @@ restart:
* it is no longer valid and we must re-walk the page table.
*/
MemoryRegion *mr;
- hwaddr l = sizeof(target_ulong), addr1;
+ hwaddr l = sxlen_bytes, addr1;
mr = address_space_translate(cs->as, pte_addr, &addr1, &l,
false, MEMTXATTRS_UNSPECIFIED);
if (memory_region_is_ram(mr)) {
target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
-#if TCG_OVERSIZED_GUEST
- /*
- * MTTCG is not enabled on oversized TCG guests so
- * page table updates do not need to be atomic
- */
- *pte_pa = pte = updated_pte;
-#else
- target_ulong old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte);
+ target_ulong old_pte;
+ if (riscv_cpu_sxl(env) == MXL_RV32) {
+ old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, cpu_to_le32(pte), cpu_to_le32(updated_pte));
+ old_pte = le32_to_cpu(old_pte);
+ } else {
+ old_pte = qatomic_cmpxchg(pte_pa, cpu_to_le64(pte), cpu_to_le64(updated_pte));
+ old_pte = le64_to_cpu(old_pte);
+ }
if (old_pte != pte) {
goto restart;
}
pte = updated_pte;
-#endif
} else {
/*
* Misconfigured PTE in ROM (AD bits are not preset) or
@@ -1223,13 +1665,13 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
int mmu_idx = riscv_env_mmu_index(&cpu->env, false);
if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
- true, env->virt_enabled, true)) {
+ true, env->virt_enabled, true, false)) {
return -1;
}
if (env->virt_enabled) {
if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
- 0, MMUIdx_U, false, true, true)) {
+ 0, MMUIdx_U, false, true, true, false)) {
return -1;
}
}
@@ -1272,9 +1714,17 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
break;
case MMU_DATA_LOAD:
cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
+ /* shadow stack mis aligned accesses are access faults */
+ if (mmu_idx & MMU_IDX_SS_WRITE) {
+ cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
+ }
break;
case MMU_DATA_STORE:
cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
+ /* shadow stack mis aligned accesses are access faults */
+ if (mmu_idx & MMU_IDX_SS_WRITE) {
+ cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+ }
break;
default:
g_assert_not_reached();
@@ -1323,7 +1773,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
int ret = TRANSLATE_FAIL;
int mode = mmuidx_priv(mmu_idx);
/* default TLB page size */
- target_ulong tlb_size = TARGET_PAGE_SIZE;
+ hwaddr tlb_size = TARGET_PAGE_SIZE;
env->guest_phys_fault_addr = 0;
@@ -1335,7 +1785,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
/* Two stage lookup */
ret = get_physical_address(env, &pa, &prot, address,
&env->guest_phys_fault_addr, access_type,
- mmu_idx, true, true, false);
+ mmu_idx, true, true, false, probe);
/*
* A G-stage exception may be triggered during two state lookup.
@@ -1358,7 +1808,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
access_type, MMUIdx_U, false, true,
- false);
+ false, probe);
qemu_log_mask(CPU_LOG_MMU,
"%s 2nd-stage address=%" VADDR_PRIx
@@ -1375,7 +1825,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
qemu_log_mask(CPU_LOG_MMU,
"%s PMP address=" HWADDR_FMT_plx " ret %d prot"
- " %d tlb_size " TARGET_FMT_lu "\n",
+ " %d tlb_size %" HWADDR_PRIu "\n",
__func__, pa, ret, prot_pmp, tlb_size);
prot &= prot_pmp;
@@ -1395,7 +1845,8 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
} else {
/* Single stage lookup */
ret = get_physical_address(env, &pa, &prot, address, NULL,
- access_type, mmu_idx, true, false, false);
+ access_type, mmu_idx, true, false, false,
+ probe);
qemu_log_mask(CPU_LOG_MMU,
"%s address=%" VADDR_PRIx " ret %d physical "
@@ -1409,7 +1860,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
qemu_log_mask(CPU_LOG_MMU,
"%s PMP address=" HWADDR_FMT_plx " ret %d prot"
- " %d tlb_size " TARGET_FMT_lu "\n",
+ " %d tlb_size %" HWADDR_PRIu "\n",
__func__, pa, ret, prot_pmp, tlb_size);
prot &= prot_pmp;
@@ -1427,6 +1878,23 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
} else if (probe) {
return false;
} else {
+ int wp_access = 0;
+
+ if (access_type == MMU_DATA_LOAD) {
+ wp_access |= BP_MEM_READ;
+ } else if (access_type == MMU_DATA_STORE) {
+ wp_access |= BP_MEM_WRITE;
+ }
+
+ /*
+ * If a watchpoint isn't found for 'addr' this will
+ * be a no-op and we'll resume the mmu_exception path.
+ * Otherwise we'll throw a debug exception and execution
+ * will continue elsewhere.
+ */
+ cpu_check_watchpoint(cs, address, size, MEMTXATTRS_UNSPECIFIED,
+ wp_access, retaddr);
+
raise_mmu_exception(env, address, access_type, pmp_violation,
first_stage_error, two_stage_lookup,
two_stage_indirect_error);
@@ -1641,6 +2109,40 @@ static target_ulong riscv_transformed_insn(CPURISCVState *env,
return xinsn;
}
+static target_ulong promote_load_fault(target_ulong orig_cause)
+{
+ switch (orig_cause) {
+ case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
+ return RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
+
+ case RISCV_EXCP_LOAD_ACCESS_FAULT:
+ return RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+
+ case RISCV_EXCP_LOAD_PAGE_FAULT:
+ return RISCV_EXCP_STORE_PAGE_FAULT;
+ }
+
+ /* if no promotion, return original cause */
+ return orig_cause;
+}
+
+static void riscv_do_nmi(CPURISCVState *env, target_ulong cause, bool virt)
+{
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV, virt);
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP, env->priv);
+ env->mncause = cause;
+ env->mnepc = env->pc;
+ env->pc = env->rnmi_irqvec;
+
+ if (cpu_get_fcfien(env)) {
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp);
+ }
+
+ /* Trapping to M mode, virt is disabled */
+ riscv_cpu_set_mode(env, PRV_M, false);
+}
+
/*
* Handle Traps
*
@@ -1653,7 +2155,10 @@ void riscv_cpu_do_interrupt(CPUState *cs)
CPURISCVState *env = &cpu->env;
bool virt = env->virt_enabled;
bool write_gva = false;
+ bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO);
+ bool vsmode_exc;
uint64_t s;
+ int mode;
/*
* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
@@ -1662,22 +2167,38 @@ void riscv_cpu_do_interrupt(CPUState *cs)
bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
uint64_t deleg = async ? env->mideleg : env->medeleg;
- bool s_injected = env->mvip & (1 << cause) & env->mvien &&
- !(env->mip & (1 << cause));
- bool vs_injected = env->hvip & (1 << cause) & env->hvien &&
- !(env->mip & (1 << cause));
+ bool s_injected = env->mvip & (1ULL << cause) & env->mvien &&
+ !(env->mip & (1ULL << cause));
+ bool vs_injected = env->hvip & (1ULL << cause) & env->hvien &&
+ !(env->mip & (1ULL << cause));
+ bool smode_double_trap = false;
+ uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
+ const bool prev_virt = env->virt_enabled;
+ const target_ulong prev_priv = env->priv;
target_ulong tval = 0;
target_ulong tinst = 0;
target_ulong htval = 0;
target_ulong mtval2 = 0;
+ target_ulong src;
+ int sxlen = 0;
+ int mxlen = 16 << riscv_cpu_mxl(env);
+ bool nnmi_excep = false;
+
+ if (cpu->cfg.ext_smrnmi && env->rnmip && async) {
+ riscv_do_nmi(env, cause | ((target_ulong)1U << (mxlen - 1)),
+ env->virt_enabled);
+ return;
+ }
if (!async) {
/* set tval to badaddr for traps with address information */
switch (cause) {
+#ifdef CONFIG_TCG
case RISCV_EXCP_SEMIHOST:
do_common_semihosting(cs);
env->pc += 4;
return;
+#endif
case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
case RISCV_EXCP_LOAD_ADDR_MIS:
@@ -1686,6 +2207,9 @@ void riscv_cpu_do_interrupt(CPUState *cs)
case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
case RISCV_EXCP_LOAD_PAGE_FAULT:
case RISCV_EXCP_STORE_PAGE_FAULT:
+ if (always_storeamo) {
+ cause = promote_load_fault(cause);
+ }
write_gva = env->two_stage_lookup;
tval = env->badaddr;
if (env->two_stage_indirect_lookup) {
@@ -1727,6 +2251,9 @@ void riscv_cpu_do_interrupt(CPUState *cs)
cs->watchpoint_hit = NULL;
}
break;
+ case RISCV_EXCP_SW_CHECK:
+ tval = env->sw_check_code;
+ break;
default:
break;
}
@@ -1755,14 +2282,44 @@ void riscv_cpu_do_interrupt(CPUState *cs)
__func__, env->mhartid, async, cause, env->pc, tval,
riscv_cpu_get_trap_name(cause, async));
- if (env->priv <= PRV_S && cause < 64 &&
- (((deleg >> cause) & 1) || s_injected || vs_injected)) {
- /* handle the trap in S-mode */
+ mode = env->priv <= PRV_S && cause < 64 &&
+ (((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
+
+ vsmode_exc = env->virt_enabled && cause < 64 &&
+ (((hdeleg >> cause) & 1) || vs_injected);
+
+ /*
+ * Check double trap condition only if already in S-mode and targeting
+ * S-mode
+ */
+ if (cpu->cfg.ext_ssdbltrp && env->priv == PRV_S && mode == PRV_S) {
+ bool dte = (env->menvcfg & MENVCFG_DTE) != 0;
+ bool sdt = (env->mstatus & MSTATUS_SDT) != 0;
+ /* In VS or HS */
if (riscv_has_ext(env, RVH)) {
- uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
+ if (vsmode_exc) {
+ /* VS -> VS, use henvcfg instead of menvcfg*/
+ dte = (env->henvcfg & HENVCFG_DTE) != 0;
+ } else if (env->virt_enabled) {
+ /* VS -> HS, use mstatus_hs */
+ sdt = (env->mstatus_hs & MSTATUS_SDT) != 0;
+ }
+ }
+ smode_double_trap = dte && sdt;
+ if (smode_double_trap) {
+ mode = PRV_M;
+ }
+ }
+
+ if (mode == PRV_S) {
+ /* handle the trap in S-mode */
+ /* save elp status */
+ if (cpu_get_fcfien(env)) {
+ env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, env->elp);
+ }
- if (env->virt_enabled &&
- (((hdeleg >> cause) & 1) || vs_injected)) {
+ if (riscv_has_ext(env, RVH)) {
+ if (vsmode_exc) {
/* Trap to VS mode */
/*
* See if we need to adjust cause. Yes if its VS mode interrupt
@@ -1795,8 +2352,12 @@ void riscv_cpu_do_interrupt(CPUState *cs)
s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
s = set_field(s, MSTATUS_SPP, env->priv);
s = set_field(s, MSTATUS_SIE, 0);
+ if (riscv_env_smode_dbltrp_enabled(env, virt)) {
+ s = set_field(s, MSTATUS_SDT, 1);
+ }
env->mstatus = s;
- env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
+ sxlen = 16 << riscv_cpu_sxl(env);
+ env->scause = cause | ((target_ulong)async << (sxlen - 1));
env->sepc = env->pc;
env->stval = tval;
env->htval = htval;
@@ -1804,8 +2365,28 @@ void riscv_cpu_do_interrupt(CPUState *cs)
env->pc = (env->stvec >> 2 << 2) +
((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
riscv_cpu_set_mode(env, PRV_S, virt);
+
+ src = env->sepc;
} else {
+ /*
+ * If the hart encounters an exception while executing in M-mode
+ * with the mnstatus.NMIE bit clear, the exception is an RNMI exception.
+ */
+ nnmi_excep = cpu->cfg.ext_smrnmi &&
+ !get_field(env->mnstatus, MNSTATUS_NMIE) &&
+ !async;
+
/* handle the trap in M-mode */
+ /* save elp status */
+ if (cpu_get_fcfien(env)) {
+ if (nnmi_excep) {
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP,
+ env->elp);
+ } else {
+ env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
+ }
+ }
+
if (riscv_has_ext(env, RVH)) {
if (env->virt_enabled) {
riscv_cpu_swap_hypervisor_regs(env);
@@ -1821,23 +2402,78 @@ void riscv_cpu_do_interrupt(CPUState *cs)
/* Trapping to M mode, virt is disabled */
virt = false;
}
+ /*
+ * If the hart encounters an exception while executing in M-mode,
+ * with the mnstatus.NMIE bit clear, the program counter is set to
+ * the RNMI exception trap handler address.
+ */
+ nnmi_excep = cpu->cfg.ext_smrnmi &&
+ !get_field(env->mnstatus, MNSTATUS_NMIE) &&
+ !async;
s = env->mstatus;
s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
s = set_field(s, MSTATUS_MPP, env->priv);
s = set_field(s, MSTATUS_MIE, 0);
+ if (cpu->cfg.ext_smdbltrp) {
+ if (env->mstatus & MSTATUS_MDT) {
+ assert(env->priv == PRV_M);
+ if (!cpu->cfg.ext_smrnmi || nnmi_excep) {
+ cpu_abort(CPU(cpu), "M-mode double trap\n");
+ } else {
+ riscv_do_nmi(env, cause, false);
+ return;
+ }
+ }
+
+ s = set_field(s, MSTATUS_MDT, 1);
+ }
env->mstatus = s;
- env->mcause = cause | ~(((target_ulong)-1) >> async);
+ env->mcause = cause | ((target_ulong)async << (mxlen - 1));
+ if (smode_double_trap) {
+ env->mtval2 = env->mcause;
+ env->mcause = RISCV_EXCP_DOUBLE_TRAP;
+ } else {
+ env->mtval2 = mtval2;
+ }
env->mepc = env->pc;
env->mtval = tval;
- env->mtval2 = mtval2;
env->mtinst = tinst;
- env->pc = (env->mtvec >> 2 << 2) +
- ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
+
+ /*
+ * For RNMI exception, program counter is set to the RNMI exception
+ * trap handler address.
+ */
+ if (nnmi_excep) {
+ env->pc = env->rnmi_excpvec;
+ } else {
+ env->pc = (env->mtvec >> 2 << 2) +
+ ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
+ }
riscv_cpu_set_mode(env, PRV_M, virt);
+ src = env->mepc;
+ }
+
+ if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
+ if (async && cause == IRQ_PMU_OVF) {
+ riscv_ctr_freeze(env, XCTRCTL_LCOFIFRZ, virt);
+ } else if (!async && cause == RISCV_EXCP_BREAKPOINT) {
+ riscv_ctr_freeze(env, XCTRCTL_BPFRZ, virt);
+ }
+
+ riscv_ctr_add_entry(env, src, env->pc,
+ async ? CTRDATA_TYPE_INTERRUPT : CTRDATA_TYPE_EXCEPTION,
+ prev_priv, prev_virt);
}
/*
+ * Interrupt/exception/trap delivery is asynchronous event and as per
+ * zicfilp spec CPU should clear up the ELP state. No harm in clearing
+ * unconditionally.
+ */
+ env->elp = false;
+
+ /*
* NOTE: it is not necessary to yield load reservations here. It is only
* necessary for an SC from "another hart" to cause a load reservation
* to be yielded. Refer to the memory consistency model section of the
diff --git a/target/riscv/cpu_user.h b/target/riscv/cpu_user.h
index 02afad6..e6927ff 100644
--- a/target/riscv/cpu_user.h
+++ b/target/riscv/cpu_user.h
@@ -15,5 +15,6 @@
#define xA6 16
#define xA7 17 /* syscall number for RVI ABI */
#define xT0 5 /* syscall number for RVE ABI */
+#define xT2 7
#endif
diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c
index bb084e0..a0fb54b 100644
--- a/target/riscv/crypto_helper.c
+++ b/target/riscv/crypto_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "crypto/aes.h"
#include "crypto/aes-round.h"
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index ea35603..fb14972 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -24,11 +24,15 @@
#include "tcg/tcg-cpu.h"
#include "pmu.h"
#include "time_helper.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/tb-flush.h"
-#include "sysemu/cpu-timers.h"
+#include "exec/icount.h"
+#include "accel/tcg/getpc.h"
#include "qemu/guest-random.h"
#include "qapi/error.h"
+#include "tcg/insn-start-words.h"
+#include "internals.h"
+#include <stdbool.h>
/* CSR function table public API */
void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
@@ -36,7 +40,7 @@ void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
*ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
}
-void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
+void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops)
{
csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
}
@@ -184,6 +188,30 @@ static RISCVException zcmt(CPURISCVState *env, int csrno)
return RISCV_EXCP_NONE;
}
+static RISCVException cfi_ss(CPURISCVState *env, int csrno)
+{
+ if (!env_archcpu(env)->cfg.ext_zicfiss) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ /* If ext implemented, M-mode always have access to SSP CSR */
+ if (env->priv == PRV_M) {
+ return RISCV_EXCP_NONE;
+ }
+
+ /* if bcfi not active for current env, access to csr is illegal */
+ if (!cpu_get_bcfien(env)) {
+#if !defined(CONFIG_USER_ONLY)
+ if (env->debugger) {
+ return RISCV_EXCP_NONE;
+ }
+#endif
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
#if !defined(CONFIG_USER_ONLY)
static RISCVException mctr(CPURISCVState *env, int csrno)
{
@@ -286,6 +314,24 @@ static RISCVException aia_any32(CPURISCVState *env, int csrno)
return any32(env, csrno);
}
+static RISCVException csrind_any(CPURISCVState *env, int csrno)
+{
+ if (!riscv_cpu_cfg(env)->ext_smcsrind) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException csrind_or_aia_any(CPURISCVState *env, int csrno)
+{
+ if (!riscv_cpu_cfg(env)->ext_smaia && !riscv_cpu_cfg(env)->ext_smcsrind) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return any(env, csrno);
+}
+
static RISCVException smode(CPURISCVState *env, int csrno)
{
if (riscv_has_ext(env, RVS)) {
@@ -306,22 +352,89 @@ static RISCVException smode32(CPURISCVState *env, int csrno)
static RISCVException aia_smode(CPURISCVState *env, int csrno)
{
+ int ret;
+
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
+ if (csrno == CSR_STOPEI) {
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
+ } else {
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
+ }
+
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
return smode(env, csrno);
}
static RISCVException aia_smode32(CPURISCVState *env, int csrno)
{
+ int ret;
+
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
return smode32(env, csrno);
}
+static RISCVException scountinhibit_pred(CPURISCVState *env, int csrno)
+{
+ RISCVCPU *cpu = env_archcpu(env);
+
+ if (!cpu->cfg.ext_ssccfg || !cpu->cfg.ext_smcdeleg) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ if (env->virt_enabled) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+
+ return smode(env, csrno);
+}
+
+static bool csrind_extensions_present(CPURISCVState *env)
+{
+ return riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind;
+}
+
+static bool aia_extensions_present(CPURISCVState *env)
+{
+ return riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_ssaia;
+}
+
+static bool csrind_or_aia_extensions_present(CPURISCVState *env)
+{
+ return csrind_extensions_present(env) || aia_extensions_present(env);
+}
+
+static RISCVException csrind_smode(CPURISCVState *env, int csrno)
+{
+ if (!csrind_extensions_present(env)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return smode(env, csrno);
+}
+
+static RISCVException csrind_or_aia_smode(CPURISCVState *env, int csrno)
+{
+ if (!csrind_or_aia_extensions_present(env)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return smode(env, csrno);
+}
+
static RISCVException hmode(CPURISCVState *env, int csrno)
{
if (riscv_has_ext(env, RVH)) {
@@ -341,6 +454,24 @@ static RISCVException hmode32(CPURISCVState *env, int csrno)
}
+static RISCVException csrind_hmode(CPURISCVState *env, int csrno)
+{
+ if (!csrind_extensions_present(env)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return hmode(env, csrno);
+}
+
+static RISCVException csrind_or_aia_hmode(CPURISCVState *env, int csrno)
+{
+ if (!csrind_or_aia_extensions_present(env)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return hmode(env, csrno);
+}
+
static RISCVException umode(CPURISCVState *env, int csrno)
{
if (riscv_has_ext(env, RVU)) {
@@ -512,27 +643,82 @@ static RISCVException hgatp(CPURISCVState *env, int csrno)
return hmode(env, csrno);
}
-/* Checks if PointerMasking registers could be accessed */
-static RISCVException pointer_masking(CPURISCVState *env, int csrno)
+/*
+ * M-mode:
+ * Without ext_smctr raise illegal inst excep.
+ * Otherwise everything is accessible to m-mode.
+ *
+ * S-mode:
+ * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
+ * Otherwise everything other than mctrctl is accessible.
+ *
+ * VS-mode:
+ * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
+ * Without hstateen.ctr raise virtual illegal inst excep.
+ * Otherwise allow sctrctl (vsctrctl), sctrstatus, 0x200-0x2ff entry range.
+ * Always raise illegal instruction exception for sctrdepth.
+ */
+static RISCVException ctr_mmode(CPURISCVState *env, int csrno)
{
- /* Check if j-ext is present */
- if (riscv_has_ext(env, RVJ)) {
+ /* Check if smctr-ext is present */
+ if (riscv_cpu_cfg(env)->ext_smctr) {
return RISCV_EXCP_NONE;
}
+
return RISCV_EXCP_ILLEGAL_INST;
}
+static RISCVException ctr_smode(CPURISCVState *env, int csrno)
+{
+ const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
+
+ if (!cfg->ext_smctr && !cfg->ext_ssctr) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
+ if (ret == RISCV_EXCP_NONE && csrno == CSR_SCTRDEPTH &&
+ env->virt_enabled) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+
+ return ret;
+}
+
static RISCVException aia_hmode(CPURISCVState *env, int csrno)
{
+ int ret;
+
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
- return hmode(env, csrno);
+ if (csrno == CSR_VSTOPEI) {
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
+ } else {
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
+ }
+
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ return hmode(env, csrno);
}
static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
{
+ int ret;
+
+ if (!riscv_cpu_cfg(env)->ext_ssaia) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -540,6 +726,15 @@ static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
return hmode32(env, csrno);
}
+static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
+{
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ return RISCV_EXCP_NONE;
+ }
+
+ return hmode(env, csrno);
+}
+
static RISCVException pmp(CPURISCVState *env, int csrno)
{
if (riscv_cpu_cfg(env)->pmp) {
@@ -566,6 +761,9 @@ static RISCVException have_mseccfg(CPURISCVState *env, int csrno)
if (riscv_cpu_cfg(env)->ext_zkr) {
return RISCV_EXCP_NONE;
}
+ if (riscv_cpu_cfg(env)->ext_smmpm) {
+ return RISCV_EXCP_NONE;
+ }
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -578,6 +776,17 @@ static RISCVException debug(CPURISCVState *env, int csrno)
return RISCV_EXCP_ILLEGAL_INST;
}
+
+static RISCVException rnmi(CPURISCVState *env, int csrno)
+{
+ RISCVCPU *cpu = env_archcpu(env);
+
+ if (cpu->cfg.ext_smrnmi) {
+ return RISCV_EXCP_NONE;
+ }
+
+ return RISCV_EXCP_ILLEGAL_INST;
+}
#endif
static RISCVException seed(CPURISCVState *env, int csrno)
@@ -622,6 +831,21 @@ static RISCVException seed(CPURISCVState *env, int csrno)
#endif
}
+/* zicfiss CSR_SSP read and write */
+static RISCVException read_ssp(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->ssp;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_ssp(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
+{
+ env->ssp = val;
+ return RISCV_EXCP_NONE;
+}
+
/* User Floating-Point CSRs */
static RISCVException read_fflags(CPURISCVState *env, int csrno,
target_ulong *val)
@@ -631,7 +855,7 @@ static RISCVException read_fflags(CPURISCVState *env, int csrno,
}
static RISCVException write_fflags(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVF)) {
@@ -650,7 +874,7 @@ static RISCVException read_frm(CPURISCVState *env, int csrno,
}
static RISCVException write_frm(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVF)) {
@@ -670,7 +894,7 @@ static RISCVException read_fcsr(CPURISCVState *env, int csrno,
}
static RISCVException write_fcsr(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVF)) {
@@ -722,7 +946,7 @@ static RISCVException read_vxrm(CPURISCVState *env, int csrno,
}
static RISCVException write_vxrm(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@@ -734,17 +958,17 @@ static RISCVException write_vxrm(CPURISCVState *env, int csrno,
static RISCVException read_vxsat(CPURISCVState *env, int csrno,
target_ulong *val)
{
- *val = env->vxsat;
+ *val = env->vxsat & BIT(0);
return RISCV_EXCP_NONE;
}
static RISCVException write_vxsat(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
#endif
- env->vxsat = val;
+ env->vxsat = val & BIT(0);
return RISCV_EXCP_NONE;
}
@@ -756,7 +980,7 @@ static RISCVException read_vstart(CPURISCVState *env, int csrno,
}
static RISCVException write_vstart(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@@ -777,7 +1001,7 @@ static RISCVException read_vcsr(CPURISCVState *env, int csrno,
}
static RISCVException write_vcsr(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@@ -835,7 +1059,7 @@ static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
}
static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t inh_avail_mask;
@@ -864,7 +1088,7 @@ static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
}
static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
MCYCLECFGH_BIT_MINH);
@@ -889,7 +1113,7 @@ static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t inh_avail_mask;
@@ -916,7 +1140,7 @@ static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
}
static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
MINSTRETCFGH_BIT_MINH);
@@ -943,7 +1167,7 @@ static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
}
static RISCVException write_mhpmevent(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
int evt_index = csrno - CSR_MCOUNTINHIBIT;
uint64_t mhpmevt_val = val;
@@ -981,7 +1205,7 @@ static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno,
}
static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
int evt_index = csrno - CSR_MHPMEVENT3H + 3;
uint64_t mhpmevth_val;
@@ -1072,10 +1296,9 @@ done:
return result;
}
-static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException riscv_pmu_write_ctr(CPURISCVState *env, target_ulong val,
+ uint32_t ctr_idx)
{
- int ctr_idx = csrno - CSR_MCYCLE;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = val;
@@ -1100,10 +1323,9 @@ static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
-static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong val,
+ uint32_t ctr_idx)
{
- int ctr_idx = csrno - CSR_MCYCLEH;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = counter->mhpmcounter_val;
uint64_t mhpmctrh_val = val;
@@ -1125,6 +1347,22 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
+{
+ int ctr_idx = csrno - CSR_MCYCLE;
+
+ return riscv_pmu_write_ctr(env, val, ctr_idx);
+}
+
+static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
+{
+ int ctr_idx = csrno - CSR_MCYCLEH;
+
+ return riscv_pmu_write_ctrh(env, val, ctr_idx);
+}
+
RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
bool upper_half, uint32_t ctr_idx)
{
@@ -1190,6 +1428,167 @@ static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
return riscv_pmu_read_ctr(env, val, true, ctr_index);
}
+static int rmw_cd_mhpmcounter(CPURISCVState *env, int ctr_idx,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ riscv_pmu_read_ctr(env, val, false, ctr_idx);
+ } else if (wr_mask) {
+ riscv_pmu_write_ctr(env, new_val, ctr_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_mhpmcounterh(CPURISCVState *env, int ctr_idx,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ riscv_pmu_read_ctr(env, val, true, ctr_idx);
+ } else if (wr_mask) {
+ riscv_pmu_write_ctrh(env, new_val, ctr_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_mhpmevent(CPURISCVState *env, int evt_index,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ uint64_t mhpmevt_val = new_val;
+
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ *val = env->mhpmevent_val[evt_index];
+ if (riscv_cpu_cfg(env)->ext_sscofpmf) {
+ *val &= ~MHPMEVENT_BIT_MINH;
+ }
+ } else if (wr_mask) {
+ wr_mask &= ~MHPMEVENT_BIT_MINH;
+ mhpmevt_val = (new_val & wr_mask) |
+ (env->mhpmevent_val[evt_index] & ~wr_mask);
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ mhpmevt_val = mhpmevt_val |
+ ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
+ }
+ env->mhpmevent_val[evt_index] = mhpmevt_val;
+ riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_mhpmeventh(CPURISCVState *env, int evt_index,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ uint64_t mhpmevth_val;
+ uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
+
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ *val = env->mhpmeventh_val[evt_index];
+ if (riscv_cpu_cfg(env)->ext_sscofpmf) {
+ *val &= ~MHPMEVENTH_BIT_MINH;
+ }
+ } else if (wr_mask) {
+ wr_mask &= ~MHPMEVENTH_BIT_MINH;
+ env->mhpmeventh_val[evt_index] =
+ (new_val & wr_mask) | (env->mhpmeventh_val[evt_index] & ~wr_mask);
+ mhpmevth_val = env->mhpmeventh_val[evt_index];
+ mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
+ riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_ctr_cfg(CPURISCVState *env, int cfg_index, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ switch (cfg_index) {
+ case 0: /* CYCLECFG */
+ if (wr_mask) {
+ wr_mask &= ~MCYCLECFG_BIT_MINH;
+ env->mcyclecfg = (new_val & wr_mask) | (env->mcyclecfg & ~wr_mask);
+ } else {
+ *val = env->mcyclecfg &= ~MHPMEVENTH_BIT_MINH;
+ }
+ break;
+ case 2: /* INSTRETCFG */
+ if (wr_mask) {
+ wr_mask &= ~MINSTRETCFG_BIT_MINH;
+ env->minstretcfg = (new_val & wr_mask) |
+ (env->minstretcfg & ~wr_mask);
+ } else {
+ *val = env->minstretcfg &= ~MHPMEVENTH_BIT_MINH;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int rmw_cd_ctr_cfgh(CPURISCVState *env, int cfg_index, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ switch (cfg_index) {
+ case 0: /* CYCLECFGH */
+ if (wr_mask) {
+ wr_mask &= ~MCYCLECFGH_BIT_MINH;
+ env->mcyclecfgh = (new_val & wr_mask) |
+ (env->mcyclecfgh & ~wr_mask);
+ } else {
+ *val = env->mcyclecfgh;
+ }
+ break;
+ case 2: /* INSTRETCFGH */
+ if (wr_mask) {
+ wr_mask &= ~MINSTRETCFGH_BIT_MINH;
+ env->minstretcfgh = (new_val & wr_mask) |
+ (env->minstretcfgh & ~wr_mask);
+ } else {
+ *val = env->minstretcfgh;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
static RISCVException read_scountovf(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -1199,6 +1598,14 @@ static RISCVException read_scountovf(CPURISCVState *env, int csrno,
target_ulong *mhpm_evt_val;
uint64_t of_bit_mask;
+ /* Virtualize scountovf for counter delegation */
+ if (riscv_cpu_cfg(env)->ext_sscofpmf &&
+ riscv_cpu_cfg(env)->ext_ssccfg &&
+ get_field(env->menvcfg, MENVCFG_CDE) &&
+ env->virt_enabled) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+
if (riscv_cpu_mxl(env) == MXL_RV32) {
mhpm_evt_val = env->mhpmeventh_val;
of_bit_mask = MHPMEVENTH_BIT_OF;
@@ -1260,7 +1667,7 @@ static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
}
static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (riscv_cpu_mxl(env) == MXL_RV32) {
env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
@@ -1275,7 +1682,7 @@ static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
}
static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
@@ -1309,13 +1716,13 @@ static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
}
static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (env->virt_enabled) {
if (env->hvictl & HVICTL_VTI) {
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
}
- return write_vstimecmp(env, csrno, val);
+ return write_vstimecmp(env, csrno, val, ra);
}
if (riscv_cpu_mxl(env) == MXL_RV32) {
@@ -1330,13 +1737,13 @@ static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
}
static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (env->virt_enabled) {
if (env->hvictl & HVICTL_VTI) {
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
}
- return write_vstimecmph(env, csrno, val);
+ return write_vstimecmph(env, csrno, val, ra);
}
env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
@@ -1377,6 +1784,7 @@ static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
(1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
(1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
(1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
+ (1ULL << (RISCV_EXCP_SW_CHECK)) | \
(1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
(1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
(1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
@@ -1440,7 +1848,7 @@ static RISCVException read_zero(CPURISCVState *env, int csrno,
}
static RISCVException write_ignore(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return RISCV_EXCP_NONE;
}
@@ -1504,8 +1912,13 @@ static RISCVException read_mstatus(CPURISCVState *env, int csrno,
static bool validate_vm(CPURISCVState *env, target_ulong vm)
{
- uint64_t mode_supported = riscv_cpu_cfg(env)->satp_mode.map;
- return get_field(mode_supported, (1 << vm));
+ bool rv32 = riscv_cpu_mxl(env) == MXL_RV32;
+ RISCVCPU *cpu = env_archcpu(env);
+ int satp_mode_supported_max = cpu->cfg.max_satp_mode;
+ const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
+
+ assert(satp_mode_supported_max >= 0);
+ return vm <= satp_mode_supported_max && valid_vm[vm];
}
static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp,
@@ -1561,7 +1974,7 @@ static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp,
}
static RISCVException write_mstatus(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mstatus = env->mstatus;
uint64_t mask = 0;
@@ -1589,6 +2002,20 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
mask |= MSTATUS_VS;
}
+ if (riscv_env_smode_dbltrp_enabled(env, env->virt_enabled)) {
+ mask |= MSTATUS_SDT;
+ if ((val & MSTATUS_SDT) != 0) {
+ val &= ~MSTATUS_SIE;
+ }
+ }
+
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ mask |= MSTATUS_MDT;
+ if ((val & MSTATUS_MDT) != 0) {
+ val &= ~MSTATUS_MIE;
+ }
+ }
+
if (xl != MXL_RV32 || env->debugger) {
if (riscv_has_ext(env, RVH)) {
mask |= MSTATUS_MPV | MSTATUS_GVA;
@@ -1598,6 +2025,11 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
}
}
+ /* If cfi lp extension is available, then apply cfi lp mask */
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ mask |= (MSTATUS_MPELP | MSTATUS_SPELP);
+ }
+
mstatus = (mstatus & ~mask) | (val & mask);
env->mstatus = mstatus;
@@ -1610,7 +2042,6 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
env->xl = cpu_recompute_xl(env);
}
- riscv_cpu_update_mask(env);
return RISCV_EXCP_NONE;
}
@@ -1622,11 +2053,17 @@ static RISCVException read_mstatush(CPURISCVState *env, int csrno,
}
static RISCVException write_mstatush(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t valh = (uint64_t)val << 32;
uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ mask |= MSTATUS_MDT;
+ if ((valh & MSTATUS_MDT) != 0) {
+ mask |= MSTATUS_MIE;
+ }
+ }
env->mstatus = (env->mstatus & ~mask) | (valh & mask);
return RISCV_EXCP_NONE;
@@ -1669,8 +2106,21 @@ static RISCVException read_misa(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static target_ulong get_next_pc(CPURISCVState *env, uintptr_t ra)
+{
+ uint64_t data[INSN_START_WORDS];
+
+ /* Outside of a running cpu, env contains the next pc. */
+ if (ra == 0 || !cpu_unwind_state_data(env_cpu(env), ra, data)) {
+ return env->pc;
+ }
+
+ /* Within unwind data, [0] is pc and [1] is the opcode. */
+ return data[0] + insn_len(data[1]);
+}
+
static RISCVException write_misa(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
uint32_t orig_misa_ext = env->misa_ext;
@@ -1684,11 +2134,8 @@ static RISCVException write_misa(CPURISCVState *env, int csrno,
/* Mask extensions that are not supported by this hart */
val &= env->misa_ext_mask;
- /*
- * Suppress 'C' if next instruction is not aligned
- * TODO: this should check next_pc
- */
- if ((val & RVC) && (GETPC() & ~3) != 0) {
+ /* Suppress 'C' if next instruction is not aligned. */
+ if ((val & RVC) && (get_next_pc(env, ra) & 3) != 0) {
val &= ~RVC;
}
@@ -1734,7 +2181,7 @@ static RISCVException read_medeleg(CPURISCVState *env, int csrno,
}
static RISCVException write_medeleg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
return RISCV_EXCP_NONE;
@@ -1928,14 +2375,41 @@ static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
};
}
+static int csrind_xlate_vs_csrno(CPURISCVState *env, int csrno)
+{
+ if (!env->virt_enabled) {
+ return csrno;
+ }
+
+ switch (csrno) {
+ case CSR_SISELECT:
+ return CSR_VSISELECT;
+ case CSR_SIREG:
+ case CSR_SIREG2:
+ case CSR_SIREG3:
+ case CSR_SIREG4:
+ case CSR_SIREG5:
+ case CSR_SIREG6:
+ return CSR_VSIREG + (csrno - CSR_SIREG);
+ default:
+ return csrno;
+ };
+}
+
static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
target_ulong *val, target_ulong new_val,
target_ulong wr_mask)
{
target_ulong *iselect;
+ int ret;
+
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
/* Translate CSR number for VS-mode */
- csrno = aia_xlate_vs_csrno(env, csrno);
+ csrno = csrind_xlate_vs_csrno(env, csrno);
/* Find the iselect CSR based on CSR number */
switch (csrno) {
@@ -1956,7 +2430,12 @@ static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
*val = *iselect;
}
- wr_mask &= ISELECT_MASK;
+ if (riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind) {
+ wr_mask &= ISELECT_MASK_SXCSRIND;
+ } else {
+ wr_mask &= ISELECT_MASK_AIA;
+ }
+
if (wr_mask) {
*iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
}
@@ -1964,6 +2443,24 @@ static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static bool xiselect_aia_range(target_ulong isel)
+{
+ return (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) ||
+ (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST);
+}
+
+static bool xiselect_cd_range(target_ulong isel)
+{
+ return (ISELECT_CD_FIRST <= isel && isel <= ISELECT_CD_LAST);
+}
+
+static bool xiselect_ctr_range(int csrno, target_ulong isel)
+{
+ /* MIREG-MIREG6 for the range 0x200-0x2ff are not used by CTR. */
+ return CTR_ENTRIES_FIRST <= isel && isel <= CTR_ENTRIES_LAST &&
+ csrno < CSR_MIREG;
+}
+
static int rmw_iprio(target_ulong xlen,
target_ulong iselect, uint8_t *iprio,
target_ulong *val, target_ulong new_val,
@@ -2009,45 +2506,162 @@ static int rmw_iprio(target_ulong xlen,
return 0;
}
-static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
- target_ulong *val, target_ulong new_val,
- target_ulong wr_mask)
+static int rmw_ctrsource(CPURISCVState *env, int isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
{
- bool virt, isel_reserved;
- uint8_t *iprio;
- int ret = -EINVAL;
- target_ulong priv, isel, vgein;
+ /*
+ * CTR arrays are treated as circular buffers and TOS always points to next
+ * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
+ * 0 is always the latest one, traversal is a bit different here. See the
+ * below example.
+ *
+ * Depth = 16.
+ *
+ * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
+ * TOS H
+ * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
+ */
+ const uint64_t entry = isel - CTR_ENTRIES_FIRST;
+ const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
+ uint64_t idx;
+
+ /* Entry greater than depth-1 is read-only zero */
+ if (entry >= depth) {
+ if (val) {
+ *val = 0;
+ }
+ return 0;
+ }
- /* Translate CSR number for VS-mode */
- csrno = aia_xlate_vs_csrno(env, csrno);
+ idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
+ idx = (idx - entry - 1) & (depth - 1);
- /* Decode register details from CSR number */
- virt = false;
- isel_reserved = false;
+ if (val) {
+ *val = env->ctr_src[idx];
+ }
+
+ env->ctr_src[idx] = (env->ctr_src[idx] & ~wr_mask) | (new_val & wr_mask);
+
+ return 0;
+}
+
+static int rmw_ctrtarget(CPURISCVState *env, int isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ /*
+ * CTR arrays are treated as circular buffers and TOS always points to next
+ * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
+ * 0 is always the latest one, traversal is a bit different here. See the
+ * below example.
+ *
+ * Depth = 16.
+ *
+ * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
+ * head H
+ * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
+ */
+ const uint64_t entry = isel - CTR_ENTRIES_FIRST;
+ const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
+ uint64_t idx;
+
+ /* Entry greater than depth-1 is read-only zero */
+ if (entry >= depth) {
+ if (val) {
+ *val = 0;
+ }
+ return 0;
+ }
+
+ idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
+ idx = (idx - entry - 1) & (depth - 1);
+
+ if (val) {
+ *val = env->ctr_dst[idx];
+ }
+
+ env->ctr_dst[idx] = (env->ctr_dst[idx] & ~wr_mask) | (new_val & wr_mask);
+
+ return 0;
+}
+
+static int rmw_ctrdata(CPURISCVState *env, int isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ /*
+ * CTR arrays are treated as circular buffers and TOS always points to next
+ * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
+ * 0 is always the latest one, traversal is a bit different here. See the
+ * below example.
+ *
+ * Depth = 16.
+ *
+ * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
+ * head H
+ * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
+ */
+ const uint64_t entry = isel - CTR_ENTRIES_FIRST;
+ const uint64_t mask = wr_mask & CTRDATA_MASK;
+ const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
+ uint64_t idx;
+
+ /* Entry greater than depth-1 is read-only zero */
+ if (entry >= depth) {
+ if (val) {
+ *val = 0;
+ }
+ return 0;
+ }
+
+ idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
+ idx = (idx - entry - 1) & (depth - 1);
+
+ if (val) {
+ *val = env->ctr_data[idx];
+ }
+
+ env->ctr_data[idx] = (env->ctr_data[idx] & ~mask) | (new_val & mask);
+
+ return 0;
+}
+
+static RISCVException rmw_xireg_aia(CPURISCVState *env, int csrno,
+ target_ulong isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ bool virt = false, isel_reserved = false;
+ int ret = -EINVAL;
+ uint8_t *iprio;
+ target_ulong priv, vgein;
+
+ /* VS-mode CSR number passed in has already been translated */
switch (csrno) {
case CSR_MIREG:
+ if (!riscv_cpu_cfg(env)->ext_smaia) {
+ goto done;
+ }
iprio = env->miprio;
- isel = env->miselect;
priv = PRV_M;
break;
case CSR_SIREG:
- if (env->priv == PRV_S && env->mvien & MIP_SEIP &&
+ if (!riscv_cpu_cfg(env)->ext_ssaia ||
+ (env->priv == PRV_S && env->mvien & MIP_SEIP &&
env->siselect >= ISELECT_IMSIC_EIDELIVERY &&
- env->siselect <= ISELECT_IMSIC_EIE63) {
+ env->siselect <= ISELECT_IMSIC_EIE63)) {
goto done;
}
iprio = env->siprio;
- isel = env->siselect;
priv = PRV_S;
break;
case CSR_VSIREG:
+ if (!riscv_cpu_cfg(env)->ext_ssaia) {
+ goto done;
+ }
iprio = env->hviprio;
- isel = env->vsiselect;
priv = PRV_S;
virt = true;
break;
default:
- goto done;
+ goto done;
};
/* Find the selected guest interrupt file */
@@ -2078,13 +2692,224 @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
}
done:
+ /*
+ * If AIA is not enabled, illegal instruction exception is always
+ * returned regardless of whether we are in VS-mode or not
+ */
if (ret) {
return (env->virt_enabled && virt && !isel_reserved) ?
RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
}
+
return RISCV_EXCP_NONE;
}
+static int rmw_xireg_cd(CPURISCVState *env, int csrno,
+ target_ulong isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ int ret = -EINVAL;
+ int ctr_index = isel - ISELECT_CD_FIRST;
+ int isel_hpm_start = ISELECT_CD_FIRST + 3;
+
+ if (!riscv_cpu_cfg(env)->ext_smcdeleg || !riscv_cpu_cfg(env)->ext_ssccfg) {
+ ret = RISCV_EXCP_ILLEGAL_INST;
+ goto done;
+ }
+
+ /* Invalid siselect value for reserved */
+ if (ctr_index == 1) {
+ goto done;
+ }
+
+ /* sireg4 and sireg5 provides access RV32 only CSRs */
+ if (((csrno == CSR_SIREG5) || (csrno == CSR_SIREG4)) &&
+ (riscv_cpu_mxl(env) != MXL_RV32)) {
+ ret = RISCV_EXCP_ILLEGAL_INST;
+ goto done;
+ }
+
+ /* Check Sscofpmf dependancy */
+ if (!riscv_cpu_cfg(env)->ext_sscofpmf && csrno == CSR_SIREG5 &&
+ (isel_hpm_start <= isel && isel <= ISELECT_CD_LAST)) {
+ goto done;
+ }
+
+ /* Check smcntrpmf dependancy */
+ if (!riscv_cpu_cfg(env)->ext_smcntrpmf &&
+ (csrno == CSR_SIREG2 || csrno == CSR_SIREG5) &&
+ (ISELECT_CD_FIRST <= isel && isel < isel_hpm_start)) {
+ goto done;
+ }
+
+ if (!get_field(env->mcounteren, BIT(ctr_index)) ||
+ !get_field(env->menvcfg, MENVCFG_CDE)) {
+ goto done;
+ }
+
+ switch (csrno) {
+ case CSR_SIREG:
+ ret = rmw_cd_mhpmcounter(env, ctr_index, val, new_val, wr_mask);
+ break;
+ case CSR_SIREG4:
+ ret = rmw_cd_mhpmcounterh(env, ctr_index, val, new_val, wr_mask);
+ break;
+ case CSR_SIREG2:
+ if (ctr_index <= 2) {
+ ret = rmw_cd_ctr_cfg(env, ctr_index, val, new_val, wr_mask);
+ } else {
+ ret = rmw_cd_mhpmevent(env, ctr_index, val, new_val, wr_mask);
+ }
+ break;
+ case CSR_SIREG5:
+ if (ctr_index <= 2) {
+ ret = rmw_cd_ctr_cfgh(env, ctr_index, val, new_val, wr_mask);
+ } else {
+ ret = rmw_cd_mhpmeventh(env, ctr_index, val, new_val, wr_mask);
+ }
+ break;
+ default:
+ goto done;
+ }
+
+done:
+ return ret;
+}
+
+static int rmw_xireg_ctr(CPURISCVState *env, int csrno,
+ target_ulong isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ if (!riscv_cpu_cfg(env)->ext_smctr && !riscv_cpu_cfg(env)->ext_ssctr) {
+ return -EINVAL;
+ }
+
+ if (csrno == CSR_SIREG || csrno == CSR_VSIREG) {
+ return rmw_ctrsource(env, isel, val, new_val, wr_mask);
+ } else if (csrno == CSR_SIREG2 || csrno == CSR_VSIREG2) {
+ return rmw_ctrtarget(env, isel, val, new_val, wr_mask);
+ } else if (csrno == CSR_SIREG3 || csrno == CSR_VSIREG3) {
+ return rmw_ctrdata(env, isel, val, new_val, wr_mask);
+ } else if (val) {
+ *val = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * rmw_xireg_csrind: Perform indirect access to xireg and xireg2-xireg6
+ *
+ * Perform indirect access to xireg and xireg2-xireg6.
+ * This is a generic interface for all xireg CSRs. Apart from AIA, all other
+ * extension using csrind should be implemented here.
+ */
+static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
+ target_ulong isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ bool virt = csrno == CSR_VSIREG ? true : false;
+ int ret = -EINVAL;
+
+ if (xiselect_cd_range(isel)) {
+ ret = rmw_xireg_cd(env, csrno, isel, val, new_val, wr_mask);
+ } else if (xiselect_ctr_range(csrno, isel)) {
+ ret = rmw_xireg_ctr(env, csrno, isel, val, new_val, wr_mask);
+ } else {
+ /*
+ * As per the specification, access to unimplented region is undefined
+ * but recommendation is to raise illegal instruction exception.
+ */
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ if (ret) {
+ return (env->virt_enabled && virt) ?
+ RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ int ret = -EINVAL;
+ target_ulong isel;
+
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ /* Translate CSR number for VS-mode */
+ csrno = csrind_xlate_vs_csrno(env, csrno);
+
+ if (CSR_MIREG <= csrno && csrno <= CSR_MIREG6 &&
+ csrno != CSR_MIREG4 - 1) {
+ isel = env->miselect;
+ } else if (CSR_SIREG <= csrno && csrno <= CSR_SIREG6 &&
+ csrno != CSR_SIREG4 - 1) {
+ isel = env->siselect;
+ } else if (CSR_VSIREG <= csrno && csrno <= CSR_VSIREG6 &&
+ csrno != CSR_VSIREG4 - 1) {
+ isel = env->vsiselect;
+ } else {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
+}
+
+static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ int ret = -EINVAL;
+ target_ulong isel;
+
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ /* Translate CSR number for VS-mode */
+ csrno = csrind_xlate_vs_csrno(env, csrno);
+
+ /* Decode register details from CSR number */
+ switch (csrno) {
+ case CSR_MIREG:
+ isel = env->miselect;
+ break;
+ case CSR_SIREG:
+ isel = env->siselect;
+ break;
+ case CSR_VSIREG:
+ isel = env->vsiselect;
+ break;
+ default:
+ goto done;
+ };
+
+ /*
+ * Use the xiselect range to determine actual op on xireg.
+ *
+ * Since we only checked the existence of AIA or Indirect Access in the
+ * predicate, we should check the existence of the exact extension when
+ * we get to a specific range and return illegal instruction exception even
+ * in VS-mode.
+ */
+ if (xiselect_aia_range(isel)) {
+ return rmw_xireg_aia(env, csrno, isel, val, new_val, wr_mask);
+ } else if (riscv_cpu_cfg(env)->ext_smcsrind ||
+ riscv_cpu_cfg(env)->ext_sscsrind) {
+ return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
+ }
+
+done:
+ return RISCV_EXCP_ILLEGAL_INST;
+}
+
static RISCVException rmw_xtopei(CPURISCVState *env, int csrno,
target_ulong *val, target_ulong new_val,
target_ulong wr_mask)
@@ -2151,7 +2976,7 @@ static RISCVException read_mtvec(CPURISCVState *env, int csrno,
}
static RISCVException write_mtvec(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
/* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val & 3) < 2) {
@@ -2170,7 +2995,7 @@ static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
}
static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
int cidx;
PMUCTRState *counter;
@@ -2236,6 +3061,20 @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException read_scountinhibit(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ /* S-mode can only access the bits delegated by M-mode */
+ *val = env->mcountinhibit & env->mcounteren;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_scountinhibit(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
+{
+ return write_mcountinhibit(env, csrno, val & env->mcounteren, ra);
+}
+
static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -2244,7 +3083,7 @@ static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
}
static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
@@ -2278,7 +3117,7 @@ static RISCVException read_mscratch(CPURISCVState *env, int csrno,
}
static RISCVException write_mscratch(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mscratch = val;
return RISCV_EXCP_NONE;
@@ -2292,7 +3131,7 @@ static RISCVException read_mepc(CPURISCVState *env, int csrno,
}
static RISCVException write_mepc(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mepc = val;
return RISCV_EXCP_NONE;
@@ -2306,7 +3145,7 @@ static RISCVException read_mcause(CPURISCVState *env, int csrno,
}
static RISCVException write_mcause(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mcause = val;
return RISCV_EXCP_NONE;
@@ -2320,7 +3159,7 @@ static RISCVException read_mtval(CPURISCVState *env, int csrno,
}
static RISCVException write_mtval(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mtval = val;
return RISCV_EXCP_NONE;
@@ -2334,20 +3173,42 @@ static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra);
static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
- uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
+ uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE |
+ MENVCFG_CBZE | MENVCFG_CDE;
if (riscv_cpu_mxl(env) == MXL_RV64) {
mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
- (cfg->ext_svadu ? MENVCFG_ADUE : 0);
+ (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
+ (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
+
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ mask |= MENVCFG_LPE;
+ }
+
+ if (env_archcpu(env)->cfg.ext_zicfiss) {
+ mask |= MENVCFG_SSE;
+ }
+
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (env_archcpu(env)->cfg.ext_smnpm &&
+ get_field(val, MENVCFG_PMM) != PMM_FIELD_RESERVED) {
+ mask |= MENVCFG_PMM;
+ }
+
+ if ((val & MENVCFG_DTE) == 0) {
+ env->mstatus &= ~MSTATUS_SDT;
+ }
}
env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
-
- return RISCV_EXCP_NONE;
+ return write_henvcfg(env, CSR_HENVCFG, env->henvcfg, ra);
}
static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
@@ -2357,18 +3218,25 @@ static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra);
static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
- (cfg->ext_svadu ? MENVCFG_ADUE : 0);
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
+ (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
+ (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
uint64_t valh = (uint64_t)val << 32;
- env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
+ if ((valh & MENVCFG_DTE) == 0) {
+ env->mstatus &= ~MSTATUS_SDT;
+ }
- return RISCV_EXCP_NONE;
+ env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
+ return write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32, ra);
}
static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
@@ -2386,16 +3254,37 @@ static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
RISCVException ret;
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (env_archcpu(env)->cfg.ext_ssnpm &&
+ riscv_cpu_mxl(env) == MXL_RV64 &&
+ get_field(val, SENVCFG_PMM) != PMM_FIELD_RESERVED) {
+ mask |= SENVCFG_PMM;
+ }
ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ mask |= SENVCFG_LPE;
+ }
+
+ /* Higher mode SSE must be ON for next-less mode SSE to be ON */
+ if (env_archcpu(env)->cfg.ext_zicfiss &&
+ get_field(env->menvcfg, MENVCFG_SSE) &&
+ (env->virt_enabled ? get_field(env->henvcfg, HENVCFG_SSE) : true)) {
+ mask |= SENVCFG_SSE;
+ }
+
+ if (env_archcpu(env)->cfg.ext_svukte) {
+ mask |= SENVCFG_UKTE;
+ }
+
env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
return RISCV_EXCP_NONE;
}
@@ -2414,14 +3303,15 @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
* henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
* henvcfg.stce is read_only 0 when menvcfg.stce = 0
* henvcfg.adue is read_only 0 when menvcfg.adue = 0
+ * henvcfg.dte is read_only 0 when menvcfg.dte = 0
*/
- *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
- env->menvcfg);
+ *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
+ HENVCFG_DTE) | env->menvcfg);
return RISCV_EXCP_NONE;
}
static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
RISCVException ret;
@@ -2432,10 +3322,30 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
}
if (riscv_cpu_mxl(env) == MXL_RV64) {
- mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE);
+ mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
+ HENVCFG_DTE);
+
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ mask |= HENVCFG_LPE;
+ }
+
+ /* H can light up SSE for VS only if HS had it from menvcfg */
+ if (env_archcpu(env)->cfg.ext_zicfiss &&
+ get_field(env->menvcfg, MENVCFG_SSE)) {
+ mask |= HENVCFG_SSE;
+ }
+
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (env_archcpu(env)->cfg.ext_ssnpm &&
+ get_field(val, HENVCFG_PMM) != PMM_FIELD_RESERVED) {
+ mask |= HENVCFG_PMM;
+ }
}
- env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
+ env->henvcfg = val & mask;
+ if ((env->henvcfg & HENVCFG_DTE) == 0) {
+ env->vsstatus &= ~MSTATUS_SDT;
+ }
return RISCV_EXCP_NONE;
}
@@ -2450,16 +3360,16 @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
return ret;
}
- *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
- env->menvcfg)) >> 32;
+ *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
+ HENVCFG_DTE) | env->menvcfg)) >> 32;
return RISCV_EXCP_NONE;
}
static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
- HENVCFG_ADUE);
+ HENVCFG_ADUE | HENVCFG_DTE);
uint64_t valh = (uint64_t)val << 32;
RISCVException ret;
@@ -2467,8 +3377,10 @@ static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
if (ret != RISCV_EXCP_NONE) {
return ret;
}
-
- env->henvcfg = (env->henvcfg & ~mask) | (valh & mask);
+ env->henvcfg = (env->henvcfg & 0xFFFFFFFF) | (valh & mask);
+ if ((env->henvcfg & HENVCFG_DTE) == 0) {
+ env->vsstatus &= ~MSTATUS_SDT;
+ }
return RISCV_EXCP_NONE;
}
@@ -2492,7 +3404,7 @@ static RISCVException write_mstateen(CPURISCVState *env, int csrno,
}
static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
if (!riscv_has_ext(env, RVF)) {
@@ -2503,11 +3415,28 @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
wr_mask |= SMSTATEEN0_P1P13;
}
+ if (riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_smcsrind) {
+ wr_mask |= SMSTATEEN0_SVSLCT;
+ }
+
+ /*
+ * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
+ * implemented. However, that information is with MachineState and we can't
+ * figure that out in csr.c. Just enable if Smaia is available.
+ */
+ if (riscv_cpu_cfg(env)->ext_smaia) {
+ wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
+ }
+
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
+ wr_mask |= SMSTATEEN0_CTR;
+ }
+
return write_mstateen(env, csrno, wr_mask, new_val);
}
static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -2534,7 +3463,7 @@ static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
}
static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@@ -2542,11 +3471,15 @@ static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
wr_mask |= SMSTATEEN0_P1P13;
}
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
+ wr_mask |= SMSTATEEN0_CTR;
+ }
+
return write_mstateenh(env, csrno, wr_mask, new_val);
}
static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -2575,7 +3508,7 @@ static RISCVException write_hstateen(CPURISCVState *env, int csrno,
}
static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@@ -2583,11 +3516,28 @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
wr_mask |= SMSTATEEN0_FCSR;
}
+ if (riscv_cpu_cfg(env)->ext_ssaia || riscv_cpu_cfg(env)->ext_sscsrind) {
+ wr_mask |= SMSTATEEN0_SVSLCT;
+ }
+
+ /*
+ * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
+ * implemented. However, that information is with MachineState and we can't
+ * figure that out in csr.c. Just enable if Ssaia is available.
+ */
+ if (riscv_cpu_cfg(env)->ext_ssaia) {
+ wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
+ }
+
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
+ wr_mask |= SMSTATEEN0_CTR;
+ }
+
return write_hstateen(env, csrno, wr_mask, new_val);
}
static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -2618,15 +3568,19 @@ static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
}
static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
+ wr_mask |= SMSTATEEN0_CTR;
+ }
+
return write_hstateenh(env, csrno, wr_mask, new_val);
}
static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -2665,7 +3619,7 @@ static RISCVException write_sstateen(CPURISCVState *env, int csrno,
}
static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@@ -2677,7 +3631,7 @@ static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
}
static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -2896,6 +3850,13 @@ static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
if (env->xl != MXL_RV32 || env->debugger) {
mask |= SSTATUS64_UXL;
}
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ mask |= SSTATUS_SDT;
+ }
+
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ mask |= SSTATUS_SPELP;
+ }
*val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
return RISCV_EXCP_NONE;
@@ -2908,13 +3869,20 @@ static RISCVException read_sstatus(CPURISCVState *env, int csrno,
if (env->xl != MXL_RV32 || env->debugger) {
mask |= SSTATUS64_UXL;
}
+
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ mask |= SSTATUS_SPELP;
+ }
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ mask |= SSTATUS_SDT;
+ }
/* TODO: Use SXL not MXL. */
*val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
return RISCV_EXCP_NONE;
}
static RISCVException write_sstatus(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
target_ulong mask = (sstatus_v1_10_mask);
@@ -2923,8 +3891,15 @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno,
mask |= SSTATUS64_UXL;
}
}
+
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ mask |= SSTATUS_SPELP;
+ }
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ mask |= SSTATUS_SDT;
+ }
target_ulong newval = (env->mstatus & ~mask) | (val & mask);
- return write_mstatus(env, CSR_MSTATUS, newval);
+ return write_mstatus(env, CSR_MSTATUS, newval, ra);
}
static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
@@ -3076,7 +4051,7 @@ static RISCVException read_stvec(CPURISCVState *env, int csrno,
}
static RISCVException write_stvec(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
/* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val & 3) < 2) {
@@ -3095,7 +4070,7 @@ static RISCVException read_scounteren(CPURISCVState *env, int csrno,
}
static RISCVException write_scounteren(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
@@ -3129,7 +4104,7 @@ static RISCVException read_sscratch(CPURISCVState *env, int csrno,
}
static RISCVException write_sscratch(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->sscratch = val;
return RISCV_EXCP_NONE;
@@ -3143,7 +4118,7 @@ static RISCVException read_sepc(CPURISCVState *env, int csrno,
}
static RISCVException write_sepc(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->sepc = val;
return RISCV_EXCP_NONE;
@@ -3157,7 +4132,7 @@ static RISCVException read_scause(CPURISCVState *env, int csrno,
}
static RISCVException write_scause(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->scause = val;
return RISCV_EXCP_NONE;
@@ -3171,7 +4146,7 @@ static RISCVException read_stval(CPURISCVState *env, int csrno,
}
static RISCVException write_stval(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->stval = val;
return RISCV_EXCP_NONE;
@@ -3311,7 +4286,7 @@ static RISCVException read_satp(CPURISCVState *env, int csrno,
}
static RISCVException write_satp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (!riscv_cpu_cfg(env)->mmu) {
return RISCV_EXCP_NONE;
@@ -3321,6 +4296,86 @@ static RISCVException write_satp(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno,
+ target_ulong *ret_val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ uint64_t mask = wr_mask & SCTRDEPTH_MASK;
+
+ if (ret_val) {
+ *ret_val = env->sctrdepth;
+ }
+
+ env->sctrdepth = (env->sctrdepth & ~mask) | (new_val & mask);
+
+ /* Correct depth. */
+ if (mask) {
+ uint64_t depth = get_field(env->sctrdepth, SCTRDEPTH_MASK);
+
+ if (depth > SCTRDEPTH_MAX) {
+ depth = SCTRDEPTH_MAX;
+ env->sctrdepth = set_field(env->sctrdepth, SCTRDEPTH_MASK, depth);
+ }
+
+ /* Update sctrstatus.WRPTR with a legal value */
+ depth = 16ULL << depth;
+ env->sctrstatus =
+ env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException rmw_sctrstatus(CPURISCVState *env, int csrno,
+ target_ulong *ret_val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ uint32_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
+ uint32_t mask = wr_mask & SCTRSTATUS_MASK;
+
+ if (ret_val) {
+ *ret_val = env->sctrstatus;
+ }
+
+ env->sctrstatus = (env->sctrstatus & ~mask) | (new_val & mask);
+
+ /* Update sctrstatus.WRPTR with a legal value */
+ env->sctrstatus = env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException rmw_xctrctl(CPURISCVState *env, int csrno,
+ target_ulong *ret_val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ uint64_t csr_mask, mask = wr_mask;
+ uint64_t *ctl_ptr = &env->mctrctl;
+
+ if (csrno == CSR_MCTRCTL) {
+ csr_mask = MCTRCTL_MASK;
+ } else if (csrno == CSR_SCTRCTL && !env->virt_enabled) {
+ csr_mask = SCTRCTL_MASK;
+ } else {
+ /*
+ * This is for csrno == CSR_SCTRCTL and env->virt_enabled == true
+ * or csrno == CSR_VSCTRCTL.
+ */
+ csr_mask = VSCTRCTL_MASK;
+ ctl_ptr = &env->vsctrctl;
+ }
+
+ mask &= csr_mask;
+
+ if (ret_val) {
+ *ret_val = *ctl_ptr & csr_mask;
+ }
+
+ *ctl_ptr = (*ctl_ptr & ~mask) | (new_val & mask);
+
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException read_vstopi(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -3453,9 +4508,20 @@ static RISCVException read_hstatus(CPURISCVState *env, int csrno,
}
static RISCVException write_hstatus(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
- env->hstatus = val;
+ uint64_t mask = (target_ulong)-1;
+ if (!env_archcpu(env)->cfg.ext_svukte) {
+ mask &= ~HSTATUS_HUKTE;
+ }
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (!env_archcpu(env)->cfg.ext_ssnpm ||
+ riscv_cpu_mxl(env) != MXL_RV64 ||
+ get_field(val, HSTATUS_HUPMM) == PMM_FIELD_RESERVED) {
+ mask &= ~HSTATUS_HUPMM;
+ }
+ env->hstatus = (env->hstatus & ~mask) | (val & mask);
+
if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
qemu_log_mask(LOG_UNIMP,
"QEMU does not support mixed HSXLEN options.");
@@ -3474,7 +4540,7 @@ static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
}
static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->hedeleg = val & vs_delegable_excps;
return RISCV_EXCP_NONE;
@@ -3495,7 +4561,7 @@ static RISCVException read_hedelegh(CPURISCVState *env, int csrno,
}
static RISCVException write_hedelegh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVException ret;
ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
@@ -3758,7 +4824,7 @@ static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
}
static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
@@ -3778,7 +4844,7 @@ static RISCVException read_hgeie(CPURISCVState *env, int csrno,
}
static RISCVException write_hgeie(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
/* Only GEILEN:1 bits implemented and BIT0 is never implemented */
val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
@@ -3797,7 +4863,7 @@ static RISCVException read_htval(CPURISCVState *env, int csrno,
}
static RISCVException write_htval(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->htval = val;
return RISCV_EXCP_NONE;
@@ -3811,7 +4877,7 @@ static RISCVException read_htinst(CPURISCVState *env, int csrno,
}
static RISCVException write_htinst(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return RISCV_EXCP_NONE;
}
@@ -3833,7 +4899,7 @@ static RISCVException read_hgatp(CPURISCVState *env, int csrno,
}
static RISCVException write_hgatp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->hgatp = legalize_xatp(env, env->hgatp, val);
return RISCV_EXCP_NONE;
@@ -3851,7 +4917,7 @@ static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
}
static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (!env->rdtime_fn) {
return RISCV_EXCP_ILLEGAL_INST;
@@ -3883,7 +4949,7 @@ static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
}
static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (!env->rdtime_fn) {
return RISCV_EXCP_ILLEGAL_INST;
@@ -3907,7 +4973,7 @@ static RISCVException read_hvictl(CPURISCVState *env, int csrno,
}
static RISCVException write_hvictl(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->hvictl = val & HVICTL_VALID_MASK;
return RISCV_EXCP_NONE;
@@ -3972,7 +5038,7 @@ static RISCVException read_hviprio1(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio1(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 0, env->hviprio, val);
}
@@ -3984,7 +5050,7 @@ static RISCVException read_hviprio1h(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio1h(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 4, env->hviprio, val);
}
@@ -3996,7 +5062,7 @@ static RISCVException read_hviprio2(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio2(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 8, env->hviprio, val);
}
@@ -4008,7 +5074,7 @@ static RISCVException read_hviprio2h(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio2h(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 12, env->hviprio, val);
}
@@ -4022,12 +5088,19 @@ static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
}
static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mask = (target_ulong)-1;
if ((val & VSSTATUS64_UXL) == 0) {
mask &= ~VSSTATUS64_UXL;
}
+ if ((env->henvcfg & HENVCFG_DTE)) {
+ if ((val & SSTATUS_SDT) != 0) {
+ val &= ~SSTATUS_SIE;
+ }
+ } else {
+ val &= ~SSTATUS_SDT;
+ }
env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
return RISCV_EXCP_NONE;
}
@@ -4040,7 +5113,7 @@ static RISCVException read_vstvec(CPURISCVState *env, int csrno,
}
static RISCVException write_vstvec(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
/* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val & 3) < 2) {
@@ -4059,7 +5132,7 @@ static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
}
static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vsscratch = val;
return RISCV_EXCP_NONE;
@@ -4073,7 +5146,7 @@ static RISCVException read_vsepc(CPURISCVState *env, int csrno,
}
static RISCVException write_vsepc(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vsepc = val;
return RISCV_EXCP_NONE;
@@ -4087,7 +5160,7 @@ static RISCVException read_vscause(CPURISCVState *env, int csrno,
}
static RISCVException write_vscause(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vscause = val;
return RISCV_EXCP_NONE;
@@ -4101,7 +5174,7 @@ static RISCVException read_vstval(CPURISCVState *env, int csrno,
}
static RISCVException write_vstval(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vstval = val;
return RISCV_EXCP_NONE;
@@ -4115,7 +5188,7 @@ static RISCVException read_vsatp(CPURISCVState *env, int csrno,
}
static RISCVException write_vsatp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vsatp = legalize_xatp(env, env->vsatp, val);
return RISCV_EXCP_NONE;
@@ -4129,7 +5202,7 @@ static RISCVException read_mtval2(CPURISCVState *env, int csrno,
}
static RISCVException write_mtval2(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mtval2 = val;
return RISCV_EXCP_NONE;
@@ -4143,7 +5216,7 @@ static RISCVException read_mtinst(CPURISCVState *env, int csrno,
}
static RISCVException write_mtinst(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mtinst = val;
return RISCV_EXCP_NONE;
@@ -4158,7 +5231,7 @@ static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
}
static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
mseccfg_csr_write(env, val);
return RISCV_EXCP_NONE;
@@ -4174,7 +5247,7 @@ static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint32_t reg_index = csrno - CSR_PMPCFG0;
@@ -4190,7 +5263,7 @@ static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
}
static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
return RISCV_EXCP_NONE;
@@ -4204,7 +5277,7 @@ static RISCVException read_tselect(CPURISCVState *env, int csrno,
}
static RISCVException write_tselect(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
tselect_csr_write(env, val);
return RISCV_EXCP_NONE;
@@ -4228,7 +5301,7 @@ static RISCVException read_tdata(CPURISCVState *env, int csrno,
}
static RISCVException write_tdata(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (!tdata_available(env, csrno - CSR_TDATA1)) {
return RISCV_EXCP_ILLEGAL_INST;
@@ -4253,7 +5326,7 @@ static RISCVException read_mcontext(CPURISCVState *env, int csrno,
}
static RISCVException write_mcontext(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
int32_t mask;
@@ -4270,299 +5343,71 @@ static RISCVException write_mcontext(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
-/*
- * Functions to access Pointer Masking feature registers
- * We have to check if current priv lvl could modify
- * csr in given mode
- */
-static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
-{
- int csr_priv = get_field(csrno, 0x300);
- int pm_current;
-
- if (env->debugger) {
- return false;
- }
- /*
- * If priv lvls differ that means we're accessing csr from higher priv lvl,
- * so allow the access
- */
- if (env->priv != csr_priv) {
- return false;
- }
- switch (env->priv) {
- case PRV_M:
- pm_current = get_field(env->mmte, M_PM_CURRENT);
- break;
- case PRV_S:
- pm_current = get_field(env->mmte, S_PM_CURRENT);
- break;
- case PRV_U:
- pm_current = get_field(env->mmte, U_PM_CURRENT);
- break;
- default:
- g_assert_not_reached();
- }
- /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
- return !pm_current;
-}
-
-static RISCVException read_mmte(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->mmte & MMTE_MASK;
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException write_mmte(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- uint64_t mstatus;
- target_ulong wpri_val = val & MMTE_MASK;
-
- if (val != wpri_val) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
- TARGET_FMT_lx "\n", "MMTE: WPRI violation written 0x",
- val, "vs expected 0x", wpri_val);
- }
- /* for machine mode pm.current is hardwired to 1 */
- wpri_val |= MMTE_M_PM_CURRENT;
-
- /* hardwiring pm.instruction bit to 0, since it's not supported yet */
- wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
- env->mmte = wpri_val | EXT_STATUS_DIRTY;
- riscv_cpu_update_mask(env);
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_smte(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->mmte & SMTE_MASK;
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException write_smte(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- target_ulong wpri_val = val & SMTE_MASK;
-
- if (val != wpri_val) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
- TARGET_FMT_lx "\n", "SMTE: WPRI violation written 0x",
- val, "vs expected 0x", wpri_val);
- }
-
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
-
- wpri_val |= (env->mmte & ~SMTE_MASK);
- write_mmte(env, csrno, wpri_val);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_umte(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->mmte & UMTE_MASK;
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException write_umte(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- target_ulong wpri_val = val & UMTE_MASK;
-
- if (val != wpri_val) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
- TARGET_FMT_lx "\n", "UMTE: WPRI violation written 0x",
- val, "vs expected 0x", wpri_val);
- }
-
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
-
- wpri_val |= (env->mmte & ~UMTE_MASK);
- write_mmte(env, csrno, wpri_val);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->mpmmask;
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- uint64_t mstatus;
-
- env->mpmmask = val;
- if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
- env->cur_pmmask = val;
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_spmmask(CPURISCVState *env, int csrno,
- target_ulong *val)
+static RISCVException read_mnscratch(CPURISCVState *env, int csrno,
+ target_ulong *val)
{
- *val = env->spmmask;
+ *val = env->mnscratch;
return RISCV_EXCP_NONE;
}
-static RISCVException write_spmmask(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException write_mnscratch(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
- uint64_t mstatus;
-
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
- env->spmmask = val;
- if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
- env->cur_pmmask = val;
- if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
- env->cur_pmmask &= UINT32_MAX;
- }
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
+ env->mnscratch = val;
return RISCV_EXCP_NONE;
}
-static RISCVException read_upmmask(CPURISCVState *env, int csrno,
- target_ulong *val)
+static RISCVException read_mnepc(CPURISCVState *env, int csrno,
+ target_ulong *val)
{
- *val = env->upmmask;
+ *val = env->mnepc;
return RISCV_EXCP_NONE;
}
-static RISCVException write_upmmask(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException write_mnepc(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
- uint64_t mstatus;
-
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
- env->upmmask = val;
- if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
- env->cur_pmmask = val;
- if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
- env->cur_pmmask &= UINT32_MAX;
- }
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
+ env->mnepc = val;
return RISCV_EXCP_NONE;
}
-static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
+static RISCVException read_mncause(CPURISCVState *env, int csrno,
target_ulong *val)
{
- *val = env->mpmbase;
+ *val = env->mncause;
return RISCV_EXCP_NONE;
}
-static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException write_mncause(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
- uint64_t mstatus;
-
- env->mpmbase = val;
- if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
- env->cur_pmbase = val;
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
+ env->mncause = val;
return RISCV_EXCP_NONE;
}
-static RISCVException read_spmbase(CPURISCVState *env, int csrno,
- target_ulong *val)
+static RISCVException read_mnstatus(CPURISCVState *env, int csrno,
+ target_ulong *val)
{
- *val = env->spmbase;
+ *val = env->mnstatus;
return RISCV_EXCP_NONE;
}
-static RISCVException write_spmbase(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException write_mnstatus(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
- uint64_t mstatus;
+ target_ulong mask = (MNSTATUS_NMIE | MNSTATUS_MNPP);
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
- env->spmbase = val;
- if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
- env->cur_pmbase = val;
- if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
- env->cur_pmbase &= UINT32_MAX;
+ if (riscv_has_ext(env, RVH)) {
+ /* Flush tlb on mnstatus fields that affect VM. */
+ if ((val ^ env->mnstatus) & MNSTATUS_MNPV) {
+ tlb_flush(env_cpu(env));
}
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_upmbase(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->upmbase;
- return RISCV_EXCP_NONE;
-}
-static RISCVException write_upmbase(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- uint64_t mstatus;
-
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
+ mask |= MNSTATUS_MNPV;
}
- env->upmbase = val;
- if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
- env->cur_pmbase = val;
- if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
- env->cur_pmbase &= UINT32_MAX;
- }
- }
- env->mmte |= EXT_STATUS_DIRTY;
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
+ /* mnstatus.mnie can only be cleared by hardware. */
+ env->mnstatus = (env->mnstatus & MNSTATUS_NMIE) | (val & mask);
return RISCV_EXCP_NONE;
}
@@ -4688,7 +5533,8 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value,
- target_ulong write_mask)
+ target_ulong write_mask,
+ uintptr_t ra)
{
RISCVException ret;
target_ulong old_value = 0;
@@ -4718,7 +5564,7 @@ static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
if (write_mask) {
new_value = (old_value & ~write_mask) | (new_value & write_mask);
if (csr_ops[csrno].write) {
- ret = csr_ops[csrno].write(env, csrno, new_value);
+ ret = csr_ops[csrno].write(env, csrno, new_value, ra);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
@@ -4741,25 +5587,25 @@ RISCVException riscv_csrr(CPURISCVState *env, int csrno,
return ret;
}
- return riscv_csrrw_do64(env, csrno, ret_value, 0, 0);
+ return riscv_csrrw_do64(env, csrno, ret_value, 0, 0, 0);
}
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
- target_ulong *ret_value,
- target_ulong new_value, target_ulong write_mask)
+ target_ulong *ret_value, target_ulong new_value,
+ target_ulong write_mask, uintptr_t ra)
{
RISCVException ret = riscv_csrrw_check(env, csrno, true);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
- return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
+ return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask, ra);
}
static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
Int128 *ret_value,
Int128 new_value,
- Int128 write_mask)
+ Int128 write_mask, uintptr_t ra)
{
RISCVException ret;
Int128 old_value;
@@ -4781,7 +5627,7 @@ static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
}
} else if (csr_ops[csrno].write) {
/* avoids having to write wrappers for all registers */
- ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
+ ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value), ra);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
@@ -4808,7 +5654,7 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
if (csr_ops[csrno].read128) {
return riscv_csrrw_do128(env, csrno, ret_value,
- int128_zero(), int128_zero());
+ int128_zero(), int128_zero(), 0);
}
/*
@@ -4819,9 +5665,7 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
* accesses
*/
target_ulong old_value;
- ret = riscv_csrrw_do64(env, csrno, &old_value,
- (target_ulong)0,
- (target_ulong)0);
+ ret = riscv_csrrw_do64(env, csrno, &old_value, 0, 0, 0);
if (ret == RISCV_EXCP_NONE && ret_value) {
*ret_value = int128_make64(old_value);
}
@@ -4829,8 +5673,8 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
}
RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
- Int128 *ret_value,
- Int128 new_value, Int128 write_mask)
+ Int128 *ret_value, Int128 new_value,
+ Int128 write_mask, uintptr_t ra)
{
RISCVException ret;
@@ -4840,7 +5684,8 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
}
if (csr_ops[csrno].read128) {
- return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
+ return riscv_csrrw_do128(env, csrno, ret_value,
+ new_value, write_mask, ra);
}
/*
@@ -4853,7 +5698,7 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
target_ulong old_value;
ret = riscv_csrrw_do64(env, csrno, &old_value,
int128_getlo(new_value),
- int128_getlo(write_mask));
+ int128_getlo(write_mask), ra);
if (ret == RISCV_EXCP_NONE && ret_value) {
*ret_value = int128_make64(old_value);
}
@@ -4876,7 +5721,7 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
if (!write_mask) {
ret = riscv_csrr(env, csrno, ret_value);
} else {
- ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
+ ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask, 0);
}
#if !defined(CONFIG_USER_ONLY)
env->debugger = false;
@@ -4892,7 +5737,7 @@ static RISCVException read_jvt(CPURISCVState *env, int csrno,
}
static RISCVException write_jvt(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->jvt = val;
return RISCV_EXCP_NONE;
@@ -4934,6 +5779,9 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
/* Zcmt Extension */
[CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt},
+ /* zicfiss Extension, shadow stack register */
+ [CSR_SSP] = { "ssp", cfi_ss, read_ssp, write_ssp },
+
#if !defined(CONFIG_USER_ONLY)
/* Machine Timers and Counters */
[CSR_MCYCLE] = { "mcycle", any, read_hpmcounter,
@@ -4981,8 +5829,22 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
/* Machine-Level Window to Indirectly Accessed Registers (AIA) */
- [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect },
- [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
+ [CSR_MISELECT] = { "miselect", csrind_or_aia_any, NULL, NULL,
+ rmw_xiselect },
+ [CSR_MIREG] = { "mireg", csrind_or_aia_any, NULL, NULL,
+ rmw_xireg },
+
+ /* Machine Indirect Register Alias */
+ [CSR_MIREG2] = { "mireg2", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MIREG3] = { "mireg3", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MIREG4] = { "mireg4", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MIREG5] = { "mireg5", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MIREG6] = { "mireg6", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
/* Machine-Level Interrupts (AIA) */
[CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
@@ -5070,6 +5932,21 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
write_sstateen_1_3,
.min_priv_ver = PRIV_VERSION_1_12_0 },
+ /* RNMI */
+ [CSR_MNSCRATCH] = { "mnscratch", rnmi, read_mnscratch, write_mnscratch,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MNEPC] = { "mnepc", rnmi, read_mnepc, write_mnepc,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MNCAUSE] = { "mncause", rnmi, read_mncause, write_mncause,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MNSTATUS] = { "mnstatus", rnmi, read_mnstatus, write_mnstatus,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+
+ /* Supervisor Counter Delegation */
+ [CSR_SCOUNTINHIBIT] = {"scountinhibit", scountinhibit_pred,
+ read_scountinhibit, write_scountinhibit,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+
/* Supervisor Trap Setup */
[CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
NULL, read_sstatus_i128 },
@@ -5100,8 +5977,22 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_SATP] = { "satp", satp, read_satp, write_satp },
/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
- [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect },
- [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
+ [CSR_SISELECT] = { "siselect", csrind_or_aia_smode, NULL, NULL,
+ rmw_xiselect },
+ [CSR_SIREG] = { "sireg", csrind_or_aia_smode, NULL, NULL,
+ rmw_xireg },
+
+ /* Supervisor Indirect Register Alias */
+ [CSR_SIREG2] = { "sireg2", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_SIREG3] = { "sireg3", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_SIREG4] = { "sireg4", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_SIREG5] = { "sireg5", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_SIREG6] = { "sireg6", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
/* Supervisor-Level Interrupts (AIA) */
[CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
@@ -5164,7 +6055,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2,
+ [CSR_MTVAL2] = { "mtval2", dbltrp_hmode, read_mtval2, write_mtval2,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
.min_priv_ver = PRIV_VERSION_1_12_0 },
@@ -5180,9 +6071,22 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
/*
* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
*/
- [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL,
- rmw_xiselect },
- [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
+ [CSR_VSISELECT] = { "vsiselect", csrind_or_aia_hmode, NULL, NULL,
+ rmw_xiselect },
+ [CSR_VSIREG] = { "vsireg", csrind_or_aia_hmode, NULL, NULL,
+ rmw_xireg },
+
+ /* Virtual Supervisor Indirect Alias */
+ [CSR_VSIREG2] = { "vsireg2", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_VSIREG3] = { "vsireg3", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_VSIREG4] = { "vsireg4", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_VSIREG5] = { "vsireg5", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_VSIREG6] = { "vsireg6", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
/* VS-Level Interrupts (H-extension with AIA) */
[CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
@@ -5232,24 +6136,11 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
[CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext },
- /* User Pointer Masking */
- [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
- [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask,
- write_upmmask },
- [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase,
- write_upmbase },
- /* Machine Pointer Masking */
- [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
- [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask,
- write_mpmmask },
- [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase,
- write_mpmbase },
- /* Supervisor Pointer Masking */
- [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
- [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask,
- write_spmmask },
- [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase,
- write_spmbase },
+ [CSR_MCTRCTL] = { "mctrctl", ctr_mmode, NULL, NULL, rmw_xctrctl },
+ [CSR_SCTRCTL] = { "sctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
+ [CSR_VSCTRCTL] = { "vsctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
+ [CSR_SCTRDEPTH] = { "sctrdepth", ctr_smode, NULL, NULL, rmw_sctrdepth },
+ [CSR_SCTRSTATUS] = { "sctrstatus", ctr_smode, NULL, NULL, rmw_sctrstatus },
/* Performance Counters */
[CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
diff --git a/target/riscv/debug.c b/target/riscv/debug.c
index 0b5099f..5664466 100644
--- a/target/riscv/debug.c
+++ b/target/riscv/debug.c
@@ -28,9 +28,10 @@
#include "qapi/error.h"
#include "cpu.h"
#include "trace.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
-#include "sysemu/cpu-timers.h"
+#include "exec/watchpoint.h"
+#include "system/cpu-timers.h"
+#include "exec/icount.h"
/*
* The following M-mode trigger CSRs are implemented:
@@ -217,6 +218,66 @@ static inline void warn_always_zero_bit(target_ulong val, target_ulong mask,
}
}
+static target_ulong textra_validate(CPURISCVState *env, target_ulong tdata3)
+{
+ target_ulong mhvalue, mhselect;
+ target_ulong mhselect_new;
+ target_ulong textra;
+ const uint32_t mhselect_no_rvh[8] = { 0, 0, 0, 0, 4, 4, 4, 4 };
+
+ switch (riscv_cpu_mxl(env)) {
+ case MXL_RV32:
+ mhvalue = get_field(tdata3, TEXTRA32_MHVALUE);
+ mhselect = get_field(tdata3, TEXTRA32_MHSELECT);
+ /* Validate unimplemented (always zero) bits */
+ warn_always_zero_bit(tdata3, (target_ulong)TEXTRA32_SBYTEMASK,
+ "sbytemask");
+ warn_always_zero_bit(tdata3, (target_ulong)TEXTRA32_SVALUE,
+ "svalue");
+ warn_always_zero_bit(tdata3, (target_ulong)TEXTRA32_SSELECT,
+ "sselect");
+ break;
+ case MXL_RV64:
+ case MXL_RV128:
+ mhvalue = get_field(tdata3, TEXTRA64_MHVALUE);
+ mhselect = get_field(tdata3, TEXTRA64_MHSELECT);
+ /* Validate unimplemented (always zero) bits */
+ warn_always_zero_bit(tdata3, (target_ulong)TEXTRA64_SBYTEMASK,
+ "sbytemask");
+ warn_always_zero_bit(tdata3, (target_ulong)TEXTRA64_SVALUE,
+ "svalue");
+ warn_always_zero_bit(tdata3, (target_ulong)TEXTRA64_SSELECT,
+ "sselect");
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Validate mhselect. */
+ mhselect_new = mhselect_no_rvh[mhselect];
+ if (mhselect != mhselect_new) {
+ qemu_log_mask(LOG_UNIMP, "mhselect only supports 0 or 4 for now\n");
+ }
+
+ /* Write legal values into textra */
+ textra = 0;
+ switch (riscv_cpu_mxl(env)) {
+ case MXL_RV32:
+ textra = set_field(textra, TEXTRA32_MHVALUE, mhvalue);
+ textra = set_field(textra, TEXTRA32_MHSELECT, mhselect_new);
+ break;
+ case MXL_RV64:
+ case MXL_RV128:
+ textra = set_field(textra, TEXTRA64_MHVALUE, mhvalue);
+ textra = set_field(textra, TEXTRA64_MHSELECT, mhselect_new);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ return textra;
+}
+
static void do_trigger_action(CPURISCVState *env, target_ulong trigger_index)
{
trigger_action_t action = get_trigger_action(env, trigger_index);
@@ -304,11 +365,54 @@ static bool trigger_priv_match(CPURISCVState *env, trigger_type_t type,
return false;
}
+static bool trigger_textra_match(CPURISCVState *env, trigger_type_t type,
+ int trigger_index)
+{
+ target_ulong textra = env->tdata3[trigger_index];
+ target_ulong mhvalue, mhselect;
+
+ if (type < TRIGGER_TYPE_AD_MATCH || type > TRIGGER_TYPE_AD_MATCH6) {
+ /* textra checking is only applicable when type is 2, 3, 4, 5, or 6 */
+ return true;
+ }
+
+ switch (riscv_cpu_mxl(env)) {
+ case MXL_RV32:
+ mhvalue = get_field(textra, TEXTRA32_MHVALUE);
+ mhselect = get_field(textra, TEXTRA32_MHSELECT);
+ break;
+ case MXL_RV64:
+ case MXL_RV128:
+ mhvalue = get_field(textra, TEXTRA64_MHVALUE);
+ mhselect = get_field(textra, TEXTRA64_MHSELECT);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Check mhvalue and mhselect. */
+ switch (mhselect) {
+ case MHSELECT_IGNORE:
+ break;
+ case MHSELECT_MCONTEXT:
+ /* Match if the low bits of mcontext/hcontext equal mhvalue. */
+ if (mhvalue != env->mcontext) {
+ return false;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
/* Common matching conditions for all types of the triggers. */
static bool trigger_common_match(CPURISCVState *env, trigger_type_t type,
int trigger_index)
{
- return trigger_priv_match(env, type, trigger_index);
+ return trigger_priv_match(env, type, trigger_index) &&
+ trigger_textra_match(env, type, trigger_index);
}
/* type 2 trigger */
@@ -375,7 +479,7 @@ static void type2_breakpoint_insert(CPURISCVState *env, target_ulong index)
bool enabled = type2_breakpoint_enabled(ctrl);
CPUState *cs = env_cpu(env);
int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
- uint32_t size;
+ uint32_t size, def_size;
if (!enabled) {
return;
@@ -398,7 +502,9 @@ static void type2_breakpoint_insert(CPURISCVState *env, target_ulong index)
cpu_watchpoint_insert(cs, addr, size, flags,
&env->cpu_watchpoint[index]);
} else {
- cpu_watchpoint_insert(cs, addr, 8, flags,
+ def_size = riscv_cpu_mxl(env) == MXL_RV64 ? 8 : 4;
+
+ cpu_watchpoint_insert(cs, addr, def_size, flags,
&env->cpu_watchpoint[index]);
}
}
@@ -441,14 +547,11 @@ static void type2_reg_write(CPURISCVState *env, target_ulong index,
}
break;
case TDATA3:
- qemu_log_mask(LOG_UNIMP,
- "tdata3 is not supported for type 2 trigger\n");
+ env->tdata3[index] = textra_validate(env, val);
break;
default:
g_assert_not_reached();
}
-
- return;
}
/* type 6 trigger */
@@ -558,14 +661,11 @@ static void type6_reg_write(CPURISCVState *env, target_ulong index,
}
break;
case TDATA3:
- qemu_log_mask(LOG_UNIMP,
- "tdata3 is not supported for type 6 trigger\n");
+ env->tdata3[index] = textra_validate(env, val);
break;
default:
g_assert_not_reached();
}
-
- return;
}
/* icount trigger type */
@@ -741,14 +841,11 @@ static void itrigger_reg_write(CPURISCVState *env, target_ulong index,
"tdata2 is not supported for icount trigger\n");
break;
case TDATA3:
- qemu_log_mask(LOG_UNIMP,
- "tdata3 is not supported for icount trigger\n");
+ env->tdata3[index] = textra_validate(env, val);
break;
default:
g_assert_not_reached();
}
-
- return;
}
static int itrigger_get_adjust_count(CPURISCVState *env)
diff --git a/target/riscv/debug.h b/target/riscv/debug.h
index c347863..f76b8f9 100644
--- a/target/riscv/debug.h
+++ b/target/riscv/debug.h
@@ -131,6 +131,9 @@ enum {
#define ITRIGGER_VU BIT(25)
#define ITRIGGER_VS BIT(26)
+#define MHSELECT_IGNORE 0
+#define MHSELECT_MCONTEXT 4
+
bool tdata_available(CPURISCVState *env, int tdata_index);
target_ulong tselect_csr_read(CPURISCVState *env);
diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
index 91b1a56..706bdfa 100644
--- a/target/riscv/fpu_helper.c
+++ b/target/riscv/fpu_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
#include "internals.h"
diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
index c07df97..1934f91 100644
--- a/target/riscv/gdbstub.c
+++ b/target/riscv/gdbstub.c
@@ -62,7 +62,7 @@ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
return 0;
}
- switch (mcc->misa_mxl_max) {
+ switch (mcc->def->misa_mxl_max) {
case MXL_RV32:
return gdb_get_reg32(mem_buf, tmp);
case MXL_RV64:
@@ -82,7 +82,7 @@ int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
int length = 0;
target_ulong tmp;
- switch (mcc->misa_mxl_max) {
+ switch (mcc->def->misa_mxl_max) {
case MXL_RV32:
tmp = (int32_t)ldl_p(mem_buf);
length = 4;
@@ -213,7 +213,10 @@ static int riscv_gdb_get_virtual(CPUState *cs, GByteArray *buf, int n)
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
- return gdb_get_regl(buf, env->priv);
+ /* Per RiscV debug spec v1.0.0 rc4 */
+ target_ulong vbit = (env->virt_enabled) ? BIT(2) : 0;
+
+ return gdb_get_regl(buf, env->priv | vbit);
#endif
}
return 0;
@@ -226,10 +229,22 @@ static int riscv_gdb_set_virtual(CPUState *cs, uint8_t *mem_buf, int n)
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
- env->priv = ldtul_p(mem_buf) & 0x3;
- if (env->priv == PRV_RESERVED) {
- env->priv = PRV_S;
+ target_ulong new_priv = ldtul_p(mem_buf) & 0x3;
+ bool new_virt = 0;
+
+ if (new_priv == PRV_RESERVED) {
+ new_priv = PRV_S;
+ }
+
+ if (new_priv != PRV_M) {
+ new_virt = (ldtul_p(mem_buf) & BIT(2)) >> 2;
}
+
+ if (riscv_has_ext(env, RVH) && new_virt != env->virt_enabled) {
+ riscv_cpu_swap_hypervisor_regs(env);
+ }
+
+ riscv_cpu_set_mode(env, new_priv, new_virt);
#endif
return sizeof(target_ulong);
}
@@ -344,7 +359,7 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
ricsv_gen_dynamic_vector_feature(cs, cs->gdb_num_regs),
0);
}
- switch (mcc->misa_mxl_max) {
+ switch (mcc->def->misa_mxl_max) {
case MXL_RV32:
gdb_register_coprocessor(cs, riscv_gdb_get_virtual,
riscv_gdb_set_virtual,
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 451261c..85d73e4 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -131,10 +131,13 @@ DEF_HELPER_6(csrrw_i128, tl, env, int, tl, tl, tl, tl)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_1(sret, tl, env)
DEF_HELPER_1(mret, tl, env)
+DEF_HELPER_1(mnret, tl, env)
+DEF_HELPER_1(ctr_clear, void, env)
DEF_HELPER_1(wfi, void, env)
DEF_HELPER_1(wrs_nto, void, env)
DEF_HELPER_1(tlb_flush, void, env)
DEF_HELPER_1(tlb_flush_all, void, env)
+DEF_HELPER_4(ctr_add_entry, void, env, tl, tl, tl)
/* Native Debug */
DEF_HELPER_1(itrigger_match, void, env)
#endif
diff --git a/target/riscv/insn16.decode b/target/riscv/insn16.decode
index 3953bcf..bf893d1 100644
--- a/target/riscv/insn16.decode
+++ b/target/riscv/insn16.decode
@@ -140,6 +140,10 @@ sw 110 ... ... .. ... 00 @cs_w
addi 000 . ..... ..... 01 @ci
addi 010 . ..... ..... 01 @c_li
{
+ # c.sspush x1 carving out of zcmops
+ sspush 011 0 00001 00000 01 &r2_s rs2=1 rs1=0
+ # c.sspopchk x5 carving out of zcmops
+ sspopchk 011 0 00101 00000 01 &r2 rs1=5 rd=0
c_mop_n 011 0 0 n:3 1 00000 01
illegal 011 0 ----- 00000 01 # c.addi16sp and c.lui, RES nzimm=0
addi 011 . 00010 ..... 01 @c_addi16sp
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index c45b8fa..cd23b1f 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -114,16 +114,22 @@
# *** Privileged Instructions ***
ecall 000000000000 00000 000 00000 1110011
ebreak 000000000001 00000 000 00000 1110011
+sctrclr 000100000100 00000 000 00000 1110011
uret 0000000 00010 00000 000 00000 1110011
sret 0001000 00010 00000 000 00000 1110011
mret 0011000 00010 00000 000 00000 1110011
wfi 0001000 00101 00000 000 00000 1110011
sfence_vma 0001001 ..... ..... 000 00000 1110011 @sfence_vma
-sfence_vm 0001000 00100 ..... 000 00000 1110011 @sfence_vm
+
+# *** NMI ***
+mnret 0111000 00010 00000 000 00000 1110011
# *** RV32I Base Instruction Set ***
lui .................... ..... 0110111 @u
-auipc .................... ..... 0010111 @u
+{
+ lpad label:20 00000 0010111
+ auipc .................... ..... 0010111 @u
+}
jal .................... ..... 1101111 @j
jalr ............ ..... 000 ..... 1100111 @i
beq ....... ..... ..... 000 ..... 1100011 @b
@@ -243,6 +249,7 @@ remud 0000001 ..... ..... 111 ..... 1111011 @r
lr_w 00010 . . 00000 ..... 010 ..... 0101111 @atom_ld
sc_w 00011 . . ..... ..... 010 ..... 0101111 @atom_st
amoswap_w 00001 . . ..... ..... 010 ..... 0101111 @atom_st
+ssamoswap_w 01001 . . ..... ..... 010 ..... 0101111 @atom_st
amoadd_w 00000 . . ..... ..... 010 ..... 0101111 @atom_st
amoxor_w 00100 . . ..... ..... 010 ..... 0101111 @atom_st
amoand_w 01100 . . ..... ..... 010 ..... 0101111 @atom_st
@@ -256,6 +263,7 @@ amomaxu_w 11100 . . ..... ..... 010 ..... 0101111 @atom_st
lr_d 00010 . . 00000 ..... 011 ..... 0101111 @atom_ld
sc_d 00011 . . ..... ..... 011 ..... 0101111 @atom_st
amoswap_d 00001 . . ..... ..... 011 ..... 0101111 @atom_st
+ssamoswap_d 01001 . . ..... ..... 011 ..... 0101111 @atom_st
amoadd_d 00000 . . ..... ..... 011 ..... 0101111 @atom_st
amoxor_d 00100 . . ..... ..... 011 ..... 0101111 @atom_st
amoand_d 01100 . . ..... ..... 011 ..... 0101111 @atom_st
@@ -695,14 +703,14 @@ vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm
# Vector widening ordered and unordered float reduction sum
vfwredusum_vs 110001 . ..... ..... 001 ..... 1010111 @r_vm
vfwredosum_vs 110011 . ..... ..... 001 ..... 1010111 @r_vm
-vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r
-vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r
-vmandn_mm 011000 - ..... ..... 010 ..... 1010111 @r
-vmxor_mm 011011 - ..... ..... 010 ..... 1010111 @r
-vmor_mm 011010 - ..... ..... 010 ..... 1010111 @r
-vmnor_mm 011110 - ..... ..... 010 ..... 1010111 @r
-vmorn_mm 011100 - ..... ..... 010 ..... 1010111 @r
-vmxnor_mm 011111 - ..... ..... 010 ..... 1010111 @r
+vmand_mm 011001 1 ..... ..... 010 ..... 1010111 @r
+vmnand_mm 011101 1 ..... ..... 010 ..... 1010111 @r
+vmandn_mm 011000 1 ..... ..... 010 ..... 1010111 @r
+vmxor_mm 011011 1 ..... ..... 010 ..... 1010111 @r
+vmor_mm 011010 1 ..... ..... 010 ..... 1010111 @r
+vmnor_mm 011110 1 ..... ..... 010 ..... 1010111 @r
+vmorn_mm 011100 1 ..... ..... 010 ..... 1010111 @r
+vmxnor_mm 011111 1 ..... ..... 010 ..... 1010111 @r
vcpop_m 010000 . ..... 10000 010 ..... 1010111 @r2_vm
vfirst_m 010000 . ..... 10001 010 ..... 1010111 @r2_vm
vmsbf_m 010100 . ..... 00001 010 ..... 1010111 @r2_vm
@@ -724,7 +732,7 @@ vrgather_vv 001100 . ..... ..... 000 ..... 1010111 @r_vm
vrgatherei16_vv 001110 . ..... ..... 000 ..... 1010111 @r_vm
vrgather_vx 001100 . ..... ..... 100 ..... 1010111 @r_vm
vrgather_vi 001100 . ..... ..... 011 ..... 1010111 @r_vm
-vcompress_vm 010111 - ..... ..... 010 ..... 1010111 @r
+vcompress_vm 010111 1 ..... ..... 010 ..... 1010111 @r
vmv1r_v 100111 1 ..... 00000 011 ..... 1010111 @r2rd
vmv2r_v 100111 1 ..... 00001 011 ..... 1010111 @r2rd
vmv4r_v 100111 1 ..... 00011 011 ..... 1010111 @r2rd
@@ -1019,8 +1027,23 @@ amocas_d 00101 . . ..... ..... 011 ..... 0101111 @atom_st
amocas_q 00101 . . ..... ..... 100 ..... 0101111 @atom_st
# *** Zimop may-be-operation extension ***
-mop_r_n 1 . 00 .. 0111 .. ..... 100 ..... 1110011 @mop5
-mop_rr_n 1 . 00 .. 1 ..... ..... 100 ..... 1110011 @mop3
+{
+ # zicfiss instructions carved out of mop.r
+ [
+ ssrdp 1100110 11100 00000 100 rd:5 1110011
+ sspopchk 1100110 11100 00001 100 00000 1110011 &r2 rs1=1 rd=0
+ sspopchk 1100110 11100 00101 100 00000 1110011 &r2 rs1=5 rd=0
+ ]
+ mop_r_n 1 . 00 .. 0111 .. ..... 100 ..... 1110011 @mop5
+}
+{
+ # zicfiss instruction carved out of mop.rr
+ [
+ sspush 1100111 00001 00000 100 00000 1110011 &r2_s rs2=1 rs1=0
+ sspush 1100111 00101 00000 100 00000 1110011 &r2_s rs2=5 rs1=0
+ ]
+ mop_rr_n 1 . 00 .. 1 ..... ..... 100 ..... 1110011 @mop3
+}
# *** Zabhb Standard Extension ***
amoswap_b 00001 . . ..... ..... 000 ..... 0101111 @atom_st
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
index bc5263a..8a62b4c 100644
--- a/target/riscv/insn_trans/trans_privileged.c.inc
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
@@ -18,6 +18,12 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#define REQUIRE_SMRNMI(ctx) do { \
+ if (!ctx->cfg_ptr->ext_smrnmi) { \
+ return false; \
+ } \
+} while (0)
+
static bool trans_ecall(DisasContext *ctx, arg_ecall *a)
{
/* always generates U-level ECALL, fixed in do_interrupt handler */
@@ -69,6 +75,17 @@ static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a)
return true;
}
+static bool trans_sctrclr(DisasContext *ctx, arg_sctrclr *a)
+{
+#ifndef CONFIG_USER_ONLY
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
+ gen_helper_ctr_clear(tcg_env);
+ return true;
+ }
+#endif
+ return false;
+}
+
static bool trans_uret(DisasContext *ctx, arg_uret *a)
{
return false;
@@ -78,8 +95,9 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
{
#ifndef CONFIG_USER_ONLY
if (has_ext(ctx, RVS)) {
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
translator_io_start(&ctx->base);
+ gen_update_pc(ctx, 0);
gen_helper_sret(cpu_pc, tcg_env);
exit_tb(ctx); /* no chaining */
ctx->base.is_jmp = DISAS_NORETURN;
@@ -95,8 +113,9 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
static bool trans_mret(DisasContext *ctx, arg_mret *a)
{
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
translator_io_start(&ctx->base);
+ gen_update_pc(ctx, 0);
gen_helper_mret(cpu_pc, tcg_env);
exit_tb(ctx); /* no chaining */
ctx->base.is_jmp = DISAS_NORETURN;
@@ -106,10 +125,24 @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
#endif
}
+static bool trans_mnret(DisasContext *ctx, arg_mnret *a)
+{
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_SMRNMI(ctx);
+ decode_save_opc(ctx, 0);
+ gen_helper_mnret(cpu_pc, tcg_env);
+ tcg_gen_exit_tb(NULL, 0); /* no chaining */
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return true;
+#else
+ return false;
+#endif
+}
+
static bool trans_wfi(DisasContext *ctx, arg_wfi *a)
{
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_update_pc(ctx, ctx->cur_insn_len);
gen_helper_wfi(tcg_env);
return true;
@@ -121,14 +154,9 @@ static bool trans_wfi(DisasContext *ctx, arg_wfi *a)
static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a)
{
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_tlb_flush(tcg_env);
return true;
#endif
return false;
}
-
-static bool trans_sfence_vm(DisasContext *ctx, arg_sfence_vm *a)
-{
- return false;
-}
diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc
index 39bbf60..9cf3ae8 100644
--- a/target/riscv/insn_trans/trans_rva.c.inc
+++ b/target/riscv/insn_trans/trans_rva.c.inc
@@ -34,7 +34,7 @@ static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
{
TCGv src1;
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
src1 = get_address(ctx, a->rs1, 0);
if (a->rl) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
@@ -61,7 +61,7 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
src1 = get_address(ctx, a->rs1, 0);
tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
diff --git a/target/riscv/insn_trans/trans_rvbf16.c.inc b/target/riscv/insn_trans/trans_rvbf16.c.inc
index 0a9cd1e..066dc36 100644
--- a/target/riscv/insn_trans/trans_rvbf16.c.inc
+++ b/target/riscv/insn_trans/trans_rvbf16.c.inc
@@ -119,8 +119,11 @@ static bool trans_vfwmaccbf16_vv(DisasContext *ctx, arg_vfwmaccbf16_vv *a)
REQUIRE_FPU;
REQUIRE_ZVFBFWMA(ctx);
+ uint8_t sew = ctx->sew;
if (require_rvv(ctx) && vext_check_isa_ill(ctx) && (ctx->sew == MO_16) &&
- vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm)) {
+ vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm) &&
+ vext_check_input_eew(ctx, a->rd, sew + 1, a->rs1, sew, a->vm) &&
+ vext_check_input_eew(ctx, a->rd, sew + 1, a->rs2, sew, a->vm)) {
uint32_t data = 0;
gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN);
@@ -146,8 +149,10 @@ static bool trans_vfwmaccbf16_vf(DisasContext *ctx, arg_vfwmaccbf16_vf *a)
REQUIRE_FPU;
REQUIRE_ZVFBFWMA(ctx);
+ uint8_t sew = ctx->sew;
if (require_rvv(ctx) && (ctx->sew == MO_16) && vext_check_isa_ill(ctx) &&
- vext_check_ds(ctx, a->rd, a->rs2, a->vm)) {
+ vext_check_ds(ctx, a->rd, a->rs2, a->vm) &&
+ vext_check_input_eew(ctx, a->rd, sew + 1, a->rs2, sew, a->vm)) {
uint32_t data = 0;
gen_set_rm(ctx, RISCV_FRM_DYN);
diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc
index 1f5fac6..30883ea 100644
--- a/target/riscv/insn_trans/trans_rvd.c.inc
+++ b/target/riscv/insn_trans/trans_rvd.c.inc
@@ -47,11 +47,21 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a)
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
- if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
+ /*
+ * FLD and FSD are only guaranteed to execute atomically if the effective
+ * address is naturally aligned and XLEN≄64. Also, zama16b applies to
+ * loads and stores of no more than MXLEN bits defined in the F, D, and
+ * Q extensions.
+ */
+ if (get_xl_max(ctx) == MXL_RV32) {
+ memop |= MO_ATOM_NONE;
+ } else if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
+ } else {
+ memop |= MO_ATOM_IFALIGN;
}
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
addr = get_address(ctx, a->rs1, a->imm);
tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, memop);
@@ -67,11 +77,15 @@ static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
- if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
+ if (get_xl_max(ctx) == MXL_RV32) {
+ memop |= MO_ATOM_NONE;
+ } else if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
+ } else {
+ memop |= MO_ATOM_IFALIGN;
}
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
addr = get_address(ctx, a->rs1, a->imm);
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop);
return true;
diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc
index f771aa1..ed73afe 100644
--- a/target/riscv/insn_trans/trans_rvf.c.inc
+++ b/target/riscv/insn_trans/trans_rvf.c.inc
@@ -48,11 +48,11 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a)
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
- if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
+ if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
}
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
addr = get_address(ctx, a->rs1, a->imm);
dest = cpu_fpr[a->rd];
tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, memop);
@@ -70,11 +70,11 @@ static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
- if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
+ if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
}
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
addr = get_address(ctx, a->rs1, a->imm);
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop);
return true;
diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc
index aa9d41c..03c6694 100644
--- a/target/riscv/insn_trans/trans_rvh.c.inc
+++ b/target/riscv/insn_trans/trans_rvh.c.inc
@@ -44,7 +44,7 @@ static bool do_hlv(DisasContext *ctx, arg_r2 *a,
TCGv dest = dest_gpr(ctx, a->rd);
TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
func(dest, tcg_env, addr);
gen_set_gpr(ctx, a->rd, dest);
return true;
@@ -56,7 +56,7 @@ static bool do_hsv(DisasContext *ctx, arg_r2_s *a,
TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
func(tcg_env, addr, data);
return true;
}
@@ -147,7 +147,7 @@ static bool trans_hfence_gvma(DisasContext *ctx, arg_sfence_vma *a)
{
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_hyp_gvma_tlb_flush(tcg_env);
return true;
#endif
@@ -158,7 +158,7 @@ static bool trans_hfence_vvma(DisasContext *ctx, arg_sfence_vma *a)
{
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_hyp_tlb_flush(tcg_env);
return true;
#endif
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
index 98e3806..b9c7160 100644
--- a/target/riscv/insn_trans/trans_rvi.c.inc
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
@@ -36,6 +36,49 @@ static bool trans_lui(DisasContext *ctx, arg_lui *a)
return true;
}
+static bool trans_lpad(DisasContext *ctx, arg_lpad *a)
+{
+ /*
+ * fcfi_lp_expected can set only if fcfi was eanbled.
+ * translate further only if fcfi_lp_expected set.
+ * lpad comes from NOP space anyways, so return true if
+ * fcfi_lp_expected is false.
+ */
+ if (!ctx->fcfi_lp_expected) {
+ return true;
+ }
+
+ ctx->fcfi_lp_expected = false;
+ if ((ctx->base.pc_next) & 0x3) {
+ /*
+ * misaligned, according to spec we should raise sw check exception
+ */
+ tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
+ tcg_env, offsetof(CPURISCVState, sw_check_code));
+ gen_helper_raise_exception(tcg_env,
+ tcg_constant_i32(RISCV_EXCP_SW_CHECK));
+ return true;
+ }
+
+ /* per spec, label check performed only when embedded label non-zero */
+ if (a->label != 0) {
+ TCGLabel *skip = gen_new_label();
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_extract_tl(tmp, get_gpr(ctx, xT2, EXT_NONE), 12, 20);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, tmp, a->label, skip);
+ tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
+ tcg_env, offsetof(CPURISCVState, sw_check_code));
+ gen_helper_raise_exception(tcg_env,
+ tcg_constant_i32(RISCV_EXCP_SW_CHECK));
+ gen_set_label(skip);
+ }
+
+ tcg_gen_st8_tl(tcg_constant_tl(0), tcg_env,
+ offsetof(CPURISCVState, elp));
+
+ return true;
+}
+
static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
{
TCGv target_pc = dest_gpr(ctx, a->rd);
@@ -50,6 +93,51 @@ static bool trans_jal(DisasContext *ctx, arg_jal *a)
return true;
}
+#ifndef CONFIG_USER_ONLY
+/*
+ * Indirect calls
+ * - jalr x1, rs where rs != x5;
+ * - jalr x5, rs where rs != x1;
+ * - c.jalr rs1 where rs1 != x5;
+ *
+ * Indirect jumps
+ * - jalr x0, rs where rs != x1 and rs != x5;
+ * - c.jr rs1 where rs1 != x1 and rs1 != x5.
+ *
+ * Returns
+ * - jalr rd, rs where (rs == x1 or rs == x5) and rd != x1 and rd != x5;
+ * - c.jr rs1 where rs1 == x1 or rs1 == x5.
+ *
+ * Co-routine swap
+ * - jalr x1, x5;
+ * - jalr x5, x1;
+ * - c.jalr x5.
+ *
+ * Other indirect jumps
+ * - jalr rd, rs where rs != x1, rs != x5, rd != x0, rd != x1 and rd != x5.
+ */
+static void gen_ctr_jalr(DisasContext *ctx, arg_jalr *a, TCGv dest)
+{
+ TCGv src = tcg_temp_new();
+ TCGv type;
+
+ if ((a->rd == 1 && a->rs1 != 5) || (a->rd == 5 && a->rs1 != 1)) {
+ type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_CALL);
+ } else if (a->rd == 0 && a->rs1 != 1 && a->rs1 != 5) {
+ type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_JUMP);
+ } else if ((a->rs1 == 1 || a->rs1 == 5) && (a->rd != 1 && a->rd != 5)) {
+ type = tcg_constant_tl(CTRDATA_TYPE_RETURN);
+ } else if ((a->rs1 == 1 && a->rd == 5) || (a->rs1 == 5 && a->rd == 1)) {
+ type = tcg_constant_tl(CTRDATA_TYPE_CO_ROUTINE_SWAP);
+ } else {
+ type = tcg_constant_tl(CTRDATA_TYPE_OTHER_INDIRECT_JUMP);
+ }
+
+ gen_pc_plus_diff(src, ctx, 0);
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
+}
+#endif
+
static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
{
TCGLabel *misaligned = NULL;
@@ -63,7 +151,9 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
tcg_gen_ext32s_tl(target_pc, target_pc);
}
- if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
+ if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
+ ctx->priv_ver,
+ ctx->misa_ext)) {
TCGv t0 = tcg_temp_new();
misaligned = gen_new_label();
@@ -74,7 +164,25 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
gen_set_gpr(ctx, a->rd, succ_pc);
+#ifndef CONFIG_USER_ONLY
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
+ gen_ctr_jalr(ctx, a, target_pc);
+ }
+#endif
+
tcg_gen_mov_tl(cpu_pc, target_pc);
+ if (ctx->fcfi_enabled) {
+ /*
+ * return from functions (i.e. rs1 == xRA || rs1 == xT0) are not
+ * tracked. zicfilp introduces sw guarded branch as well. sw guarded
+ * branch are not tracked. rs1 == xT2 is a sw guarded branch.
+ */
+ if (a->rs1 != xRA && a->rs1 != xT0 && a->rs1 != xT2) {
+ tcg_gen_st8_tl(tcg_constant_tl(1),
+ tcg_env, offsetof(CPURISCVState, elp));
+ }
+ }
+
lookup_and_goto_ptr(ctx);
if (misaligned) {
@@ -176,18 +284,44 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
} else {
tcg_gen_brcond_tl(cond, src1, src2, l);
}
+
+#ifndef CONFIG_USER_ONLY
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_NONTAKEN_BRANCH);
+ TCGv dest = tcg_temp_new();
+ TCGv src = tcg_temp_new();
+
+ gen_pc_plus_diff(src, ctx, 0);
+ gen_pc_plus_diff(dest, ctx, ctx->cur_insn_len);
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
+ }
+#endif
+
gen_goto_tb(ctx, 1, ctx->cur_insn_len);
ctx->pc_save = orig_pc_save;
gen_set_label(l); /* branch taken */
- if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca &&
+ if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
+ ctx->priv_ver,
+ ctx->misa_ext) &&
(a->imm & 0x3)) {
/* misaligned */
TCGv target_pc = tcg_temp_new();
gen_pc_plus_diff(target_pc, ctx, a->imm);
gen_exception_inst_addr_mis(ctx, target_pc);
} else {
+#ifndef CONFIG_USER_ONLY
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_TAKEN_BRANCH);
+ TCGv dest = tcg_temp_new();
+ TCGv src = tcg_temp_new();
+
+ gen_pc_plus_diff(src, ctx, 0);
+ gen_pc_plus_diff(dest, ctx, a->imm);
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
+ }
+#endif
gen_goto_tb(ctx, 0, a->imm);
}
ctx->pc_save = -1;
@@ -268,10 +402,10 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
{
bool out;
- if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
+ if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
}
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
if (get_xl(ctx) == MXL_RV128) {
out = gen_load_i128(ctx, a, memop);
} else {
@@ -369,10 +503,10 @@ static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
{
- if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
+ if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
}
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
if (get_xl(ctx) == MXL_RV128) {
return gen_store_i128(ctx, a, memop);
} else {
@@ -834,7 +968,7 @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
static bool do_csr_post(DisasContext *ctx)
{
/* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
/* We may have changed important cpu state -- exit to main loop. */
gen_update_pc(ctx, ctx->cur_insn_len);
exit_tb(ctx);
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
index 3a3896b..2b6077a 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -100,10 +100,33 @@ static bool require_scale_rvfmin(DisasContext *s)
}
}
-/* Destination vector register group cannot overlap source mask register. */
-static bool require_vm(int vm, int vd)
+/*
+ * Source and destination vector register groups cannot overlap source mask
+ * register:
+ *
+ * A vector register cannot be used to provide source operands with more than
+ * one EEW for a single instruction. A mask register source is considered to
+ * have EEW=1 for this constraint. An encoding that would result in the same
+ * vector register being read with two or more different EEWs, including when
+ * the vector register appears at different positions within two or more vector
+ * register groups, is reserved.
+ * (Section 5.2)
+ *
+ * A destination vector register group can overlap a source vector
+ * register group only if one of the following holds:
+ * 1. The destination EEW equals the source EEW.
+ * 2. The destination EEW is smaller than the source EEW and the overlap
+ * is in the lowest-numbered part of the source register group.
+ * 3. The destination EEW is greater than the source EEW, the source EMUL
+ * is at least 1, and the overlap is in the highest-numbered part of
+ * the destination register group.
+ * For the purpose of determining register group overlap constraints, mask
+ * elements have EEW=1.
+ * (Section 5.2)
+ */
+static bool require_vm(int vm, int v)
{
- return (vm != 0 || vd != 0);
+ return (vm != 0 || v != 0);
}
static bool require_nf(int vd, int nf, int lmul)
@@ -356,11 +379,41 @@ static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
return ret;
}
+/*
+ * Check whether a vector register is used to provide source operands with
+ * more than one EEW for the vector instruction.
+ * Returns true if the instruction has valid encoding
+ * Returns false if encoding violates the mismatched input EEWs constraint
+ */
+static bool vext_check_input_eew(DisasContext *s, int vs1, uint8_t eew_vs1,
+ int vs2, uint8_t eew_vs2, int vm)
+{
+ bool is_valid = true;
+ int8_t emul_vs1 = eew_vs1 - s->sew + s->lmul;
+ int8_t emul_vs2 = eew_vs2 - s->sew + s->lmul;
+
+ /* When vm is 0, vs1 & vs2(EEW!=1) group can't overlap v0 (EEW=1) */
+ if ((vs1 != -1 && !require_vm(vm, vs1)) ||
+ (vs2 != -1 && !require_vm(vm, vs2))) {
+ is_valid = false;
+ }
+
+ /* When eew_vs1 != eew_vs2, check whether vs1 and vs2 are overlapped */
+ if ((vs1 != -1 && vs2 != -1) && (eew_vs1 != eew_vs2) &&
+ is_overlapped(vs1, 1 << MAX(emul_vs1, 0),
+ vs2, 1 << MAX(emul_vs2, 0))) {
+ is_valid = false;
+ }
+
+ return is_valid;
+}
+
static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
{
return require_vm(vm, vd) &&
require_align(vd, s->lmul) &&
- require_align(vs, s->lmul);
+ require_align(vs, s->lmul) &&
+ vext_check_input_eew(s, vs, s->sew, -1, s->sew, vm);
}
/*
@@ -379,6 +432,7 @@ static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ss(s, vd, vs2, vm) &&
+ vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) &&
require_align(vs1, s->lmul);
}
@@ -474,6 +528,7 @@ static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
{
return vext_wide_check_common(s, vd, vm) &&
+ vext_check_input_eew(s, vs, s->sew, -1, 0, vm) &&
require_align(vs, s->lmul) &&
require_noover(vd, s->lmul + 1, vs, s->lmul);
}
@@ -481,6 +536,7 @@ static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
{
return vext_wide_check_common(s, vd, vm) &&
+ vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm) &&
require_align(vs, s->lmul + 1);
}
@@ -499,6 +555,7 @@ static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ds(s, vd, vs2, vm) &&
+ vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) &&
require_align(vs1, s->lmul) &&
require_noover(vd, s->lmul + 1, vs1, s->lmul);
}
@@ -521,12 +578,14 @@ static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ds(s, vd, vs1, vm) &&
+ vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) &&
require_align(vs2, s->lmul + 1);
}
static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
{
- bool ret = vext_narrow_check_common(s, vd, vs, vm);
+ bool ret = vext_narrow_check_common(s, vd, vs, vm) &&
+ vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm);
if (vd != vs) {
ret &= require_noover(vd, s->lmul, vs, s->lmul + 1);
}
@@ -549,6 +608,7 @@ static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_sd(s, vd, vs2, vm) &&
+ vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) &&
require_align(vs1, s->lmul);
}
@@ -584,7 +644,9 @@ static bool vext_check_slide(DisasContext *s, int vd, int vs2,
{
bool ret = require_align(vs2, s->lmul) &&
require_align(vd, s->lmul) &&
- require_vm(vm, vd);
+ require_vm(vm, vd) &&
+ vext_check_input_eew(s, -1, 0, vs2, s->sew, vm);
+
if (is_over) {
ret &= (vd != vs2);
}
@@ -770,6 +832,7 @@ static bool ld_us_mask_op(DisasContext *s, arg_vlm_v *a, uint8_t eew)
/* Mask destination register are always tail-agnostic */
data = FIELD_DP32(data, VDATA, VTA, s->cfg_vta_all_1s);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
+ data = FIELD_DP32(data, VDATA, VM, 1);
return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
}
@@ -787,6 +850,7 @@ static bool st_us_mask_op(DisasContext *s, arg_vsm_v *a, uint8_t eew)
/* EMUL = 1, NFIELDS = 1 */
data = FIELD_DP32(data, VDATA, LMUL, 0);
data = FIELD_DP32(data, VDATA, NF, 1);
+ data = FIELD_DP32(data, VDATA, VM, 1);
return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
}
@@ -800,32 +864,286 @@ GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check)
GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check)
/*
- *** stride load and store
+ * MAXSZ returns the maximum vector size can be operated in bytes,
+ * which is used in GVEC IR when vl_eq_vlmax flag is set to true
+ * to accelerate vector operation.
+ */
+static inline uint32_t MAXSZ(DisasContext *s)
+{
+ int max_sz = s->cfg_ptr->vlenb << 3;
+ return max_sz >> (3 - s->lmul);
+}
+
+static inline uint32_t get_log2(uint32_t a)
+{
+ uint32_t i = 0;
+ for (; a > 0;) {
+ a >>= 1;
+ i++;
+ }
+ return i;
+}
+
+typedef void gen_tl_ldst(TCGv, TCGv_ptr, tcg_target_long);
+
+/*
+ * Simulate the strided load/store main loop:
+ *
+ * for (i = env->vstart; i < env->vl; env->vstart = ++i) {
+ * k = 0;
+ * while (k < nf) {
+ * if (!vm && !vext_elem_mask(v0, i)) {
+ * vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
+ * (i + k * max_elems + 1) * esz);
+ * k++;
+ * continue;
+ * }
+ * target_ulong addr = base + stride * i + (k << log2_esz);
+ * ldst(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
+ * k++;
+ * }
+ * }
+ */
+static void gen_ldst_stride_main_loop(DisasContext *s, TCGv dest, uint32_t rs1,
+ uint32_t rs2, uint32_t vm, uint32_t nf,
+ gen_tl_ldst *ld_fn, gen_tl_ldst *st_fn,
+ bool is_load)
+{
+ TCGv addr = tcg_temp_new();
+ TCGv base = get_gpr(s, rs1, EXT_NONE);
+ TCGv stride = get_gpr(s, rs2, EXT_NONE);
+
+ TCGv i = tcg_temp_new();
+ TCGv i_esz = tcg_temp_new();
+ TCGv k = tcg_temp_new();
+ TCGv k_esz = tcg_temp_new();
+ TCGv k_max = tcg_temp_new();
+ TCGv mask = tcg_temp_new();
+ TCGv mask_offs = tcg_temp_new();
+ TCGv mask_offs_64 = tcg_temp_new();
+ TCGv mask_elem = tcg_temp_new();
+ TCGv mask_offs_rem = tcg_temp_new();
+ TCGv vreg = tcg_temp_new();
+ TCGv dest_offs = tcg_temp_new();
+ TCGv stride_offs = tcg_temp_new();
+
+ uint32_t max_elems = MAXSZ(s) >> s->sew;
+
+ TCGLabel *start = gen_new_label();
+ TCGLabel *end = gen_new_label();
+ TCGLabel *start_k = gen_new_label();
+ TCGLabel *inc_k = gen_new_label();
+ TCGLabel *end_k = gen_new_label();
+
+ MemOp atomicity = MO_ATOM_NONE;
+ if (s->sew == 0) {
+ atomicity = MO_ATOM_NONE;
+ } else {
+ atomicity = MO_ATOM_IFALIGN_PAIR;
+ }
+
+ mark_vs_dirty(s);
+
+ tcg_gen_addi_tl(mask, (TCGv)tcg_env, vreg_ofs(s, 0));
+
+ /* Start of outer loop. */
+ tcg_gen_mov_tl(i, cpu_vstart);
+ gen_set_label(start);
+ tcg_gen_brcond_tl(TCG_COND_GE, i, cpu_vl, end);
+ tcg_gen_shli_tl(i_esz, i, s->sew);
+ /* Start of inner loop. */
+ tcg_gen_movi_tl(k, 0);
+ gen_set_label(start_k);
+ tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end_k);
+ /*
+ * If we are in mask agnostic regime and the operation is not unmasked we
+ * set the inactive elements to 1.
+ */
+ if (!vm && s->vma) {
+ TCGLabel *active_element = gen_new_label();
+ /* (i + k * max_elems) * esz */
+ tcg_gen_shli_tl(mask_offs, k, get_log2(max_elems << s->sew));
+ tcg_gen_add_tl(mask_offs, mask_offs, i_esz);
+
+ /*
+ * Check whether the i bit of the mask is 0 or 1.
+ *
+ * static inline int vext_elem_mask(void *v0, int index)
+ * {
+ * int idx = index / 64;
+ * int pos = index % 64;
+ * return (((uint64_t *)v0)[idx] >> pos) & 1;
+ * }
+ */
+ tcg_gen_shri_tl(mask_offs_64, mask_offs, 3);
+ tcg_gen_add_tl(mask_offs_64, mask_offs_64, mask);
+ tcg_gen_ld_i64((TCGv_i64)mask_elem, (TCGv_ptr)mask_offs_64, 0);
+ tcg_gen_rem_tl(mask_offs_rem, mask_offs, tcg_constant_tl(8));
+ tcg_gen_shr_tl(mask_elem, mask_elem, mask_offs_rem);
+ tcg_gen_andi_tl(mask_elem, mask_elem, 1);
+ tcg_gen_brcond_tl(TCG_COND_NE, mask_elem, tcg_constant_tl(0),
+ active_element);
+ /*
+ * Set masked-off elements in the destination vector register to 1s.
+ * Store instructions simply skip this bit as memory ops access memory
+ * only for active elements.
+ */
+ if (is_load) {
+ tcg_gen_shli_tl(mask_offs, mask_offs, s->sew);
+ tcg_gen_add_tl(mask_offs, mask_offs, dest);
+ st_fn(tcg_constant_tl(-1), (TCGv_ptr)mask_offs, 0);
+ }
+ tcg_gen_br(inc_k);
+ gen_set_label(active_element);
+ }
+ /*
+ * The element is active, calculate the address with stride:
+ * target_ulong addr = base + stride * i + (k << log2_esz);
+ */
+ tcg_gen_mul_tl(stride_offs, stride, i);
+ tcg_gen_shli_tl(k_esz, k, s->sew);
+ tcg_gen_add_tl(stride_offs, stride_offs, k_esz);
+ tcg_gen_add_tl(addr, base, stride_offs);
+ /* Calculate the offset in the dst/src vector register. */
+ tcg_gen_shli_tl(k_max, k, get_log2(max_elems));
+ tcg_gen_add_tl(dest_offs, i, k_max);
+ tcg_gen_shli_tl(dest_offs, dest_offs, s->sew);
+ tcg_gen_add_tl(dest_offs, dest_offs, dest);
+ if (is_load) {
+ tcg_gen_qemu_ld_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity);
+ st_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0);
+ } else {
+ ld_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0);
+ tcg_gen_qemu_st_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity);
+ }
+ /*
+ * We don't execute the load/store above if the element was inactive.
+ * We jump instead directly to incrementing k and continuing the loop.
+ */
+ if (!vm && s->vma) {
+ gen_set_label(inc_k);
+ }
+ tcg_gen_addi_tl(k, k, 1);
+ tcg_gen_br(start_k);
+ /* End of the inner loop. */
+ gen_set_label(end_k);
+
+ tcg_gen_addi_tl(i, i, 1);
+ tcg_gen_mov_tl(cpu_vstart, i);
+ tcg_gen_br(start);
+
+ /* End of the outer loop. */
+ gen_set_label(end);
+
+ return;
+}
+
+
+/*
+ * Set the tail bytes of the strided loads/stores to 1:
+ *
+ * for (k = 0; k < nf; ++k) {
+ * cnt = (k * max_elems + vl) * esz;
+ * tot = (k * max_elems + max_elems) * esz;
+ * for (i = cnt; i < tot; i += esz) {
+ * store_1s(-1, vd[vl+i]);
+ * }
+ * }
*/
-typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
- TCGv, TCGv_env, TCGv_i32);
+static void gen_ldst_stride_tail_loop(DisasContext *s, TCGv dest, uint32_t nf,
+ gen_tl_ldst *st_fn)
+{
+ TCGv i = tcg_temp_new();
+ TCGv k = tcg_temp_new();
+ TCGv tail_cnt = tcg_temp_new();
+ TCGv tail_tot = tcg_temp_new();
+ TCGv tail_addr = tcg_temp_new();
+
+ TCGLabel *start = gen_new_label();
+ TCGLabel *end = gen_new_label();
+ TCGLabel *start_i = gen_new_label();
+ TCGLabel *end_i = gen_new_label();
+
+ uint32_t max_elems_b = MAXSZ(s);
+ uint32_t esz = 1 << s->sew;
+
+ /* Start of the outer loop. */
+ tcg_gen_movi_tl(k, 0);
+ tcg_gen_shli_tl(tail_cnt, cpu_vl, s->sew);
+ tcg_gen_movi_tl(tail_tot, max_elems_b);
+ tcg_gen_add_tl(tail_addr, dest, tail_cnt);
+ gen_set_label(start);
+ tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end);
+ /* Start of the inner loop. */
+ tcg_gen_mov_tl(i, tail_cnt);
+ gen_set_label(start_i);
+ tcg_gen_brcond_tl(TCG_COND_GE, i, tail_tot, end_i);
+ /* store_1s(-1, vd[vl+i]); */
+ st_fn(tcg_constant_tl(-1), (TCGv_ptr)tail_addr, 0);
+ tcg_gen_addi_tl(tail_addr, tail_addr, esz);
+ tcg_gen_addi_tl(i, i, esz);
+ tcg_gen_br(start_i);
+ /* End of the inner loop. */
+ gen_set_label(end_i);
+ /* Update the counts */
+ tcg_gen_addi_tl(tail_cnt, tail_cnt, max_elems_b);
+ tcg_gen_addi_tl(tail_tot, tail_cnt, max_elems_b);
+ tcg_gen_addi_tl(k, k, 1);
+ tcg_gen_br(start);
+ /* End of the outer loop. */
+ gen_set_label(end);
+
+ return;
+}
static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
- uint32_t data, gen_helper_ldst_stride *fn,
- DisasContext *s)
+ uint32_t data, DisasContext *s, bool is_load)
{
- TCGv_ptr dest, mask;
- TCGv base, stride;
- TCGv_i32 desc;
+ if (!s->vstart_eq_zero) {
+ return false;
+ }
- dest = tcg_temp_new_ptr();
- mask = tcg_temp_new_ptr();
- base = get_gpr(s, rs1, EXT_NONE);
- stride = get_gpr(s, rs2, EXT_NONE);
- desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
- s->cfg_ptr->vlenb, data));
+ TCGv dest = tcg_temp_new();
- tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
- tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
+ uint32_t nf = FIELD_EX32(data, VDATA, NF);
+ uint32_t vm = FIELD_EX32(data, VDATA, VM);
+
+ /* Destination register and mask register */
+ tcg_gen_addi_tl(dest, (TCGv)tcg_env, vreg_ofs(s, vd));
+
+ /*
+ * Select the appropriate load/tore to retrieve data from the vector
+ * register given a specific sew.
+ */
+ static gen_tl_ldst * const ld_fns[4] = {
+ tcg_gen_ld8u_tl, tcg_gen_ld16u_tl,
+ tcg_gen_ld32u_tl, tcg_gen_ld_tl
+ };
+
+ static gen_tl_ldst * const st_fns[4] = {
+ tcg_gen_st8_tl, tcg_gen_st16_tl,
+ tcg_gen_st32_tl, tcg_gen_st_tl
+ };
+
+ gen_tl_ldst *ld_fn = ld_fns[s->sew];
+ gen_tl_ldst *st_fn = st_fns[s->sew];
+
+ if (ld_fn == NULL || st_fn == NULL) {
+ return false;
+ }
mark_vs_dirty(s);
- fn(dest, mask, base, stride, tcg_env, desc);
+ gen_ldst_stride_main_loop(s, dest, rs1, rs2, vm, nf, ld_fn, st_fn, is_load);
+
+ tcg_gen_movi_tl(cpu_vstart, 0);
+
+ /*
+ * Set the tail bytes to 1 if tail agnostic:
+ */
+ if (s->vta != 0 && is_load) {
+ gen_ldst_stride_tail_loop(s, dest, nf, st_fn);
+ }
finalize_rvv_inst(s);
return true;
@@ -834,16 +1152,6 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
{
uint32_t data = 0;
- gen_helper_ldst_stride *fn;
- static gen_helper_ldst_stride * const fns[4] = {
- gen_helper_vlse8_v, gen_helper_vlse16_v,
- gen_helper_vlse32_v, gen_helper_vlse64_v
- };
-
- fn = fns[eew];
- if (fn == NULL) {
- return false;
- }
uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
@@ -851,7 +1159,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
data = FIELD_DP32(data, VDATA, NF, a->nf);
data = FIELD_DP32(data, VDATA, VTA, s->vta);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
- return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
+ return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, true);
}
static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@@ -869,23 +1177,13 @@ GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
{
uint32_t data = 0;
- gen_helper_ldst_stride *fn;
- static gen_helper_ldst_stride * const fns[4] = {
- /* masked stride store */
- gen_helper_vsse8_v, gen_helper_vsse16_v,
- gen_helper_vsse32_v, gen_helper_vsse64_v
- };
uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
- fn = fns[eew];
- if (fn == NULL) {
- return false;
- }
- return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
+ return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, false);
}
static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@@ -979,7 +1277,8 @@ static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
- vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
+ vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew) &&
+ vext_check_input_eew(s, -1, 0, a->rs2, eew, a->vm);
}
GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check)
@@ -1031,7 +1330,8 @@ static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
- vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
+ vext_check_st_index(s, a->rd, a->rs2, a->nf, eew) &&
+ vext_check_input_eew(s, a->rd, s->sew, a->rs2, eew, a->vm);
}
GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check)
@@ -1098,24 +1398,86 @@ GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
- gen_helper_ldst_whole *fn,
- DisasContext *s)
+ uint32_t log2_esz, gen_helper_ldst_whole *fn,
+ DisasContext *s, bool is_load)
{
- TCGv_ptr dest;
- TCGv base;
- TCGv_i32 desc;
-
- uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
- dest = tcg_temp_new_ptr();
- desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
- s->cfg_ptr->vlenb, data));
-
- base = get_gpr(s, rs1, EXT_NONE);
- tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
-
mark_vs_dirty(s);
- fn(dest, base, tcg_env, desc);
+ /*
+ * Load/store multiple bytes per iteration.
+ * When possible do this atomically.
+ * Update vstart with the number of processed elements.
+ * Use the helper function if either:
+ * - vstart is not 0.
+ * - the target has 32 bit registers and we are loading/storing 64 bit long
+ * elements. This is to ensure that we process every element with a single
+ * memory instruction.
+ */
+
+ bool use_helper_fn = !(s->vstart_eq_zero) ||
+ (TCG_TARGET_REG_BITS == 32 && log2_esz == 3);
+
+ if (!use_helper_fn) {
+ TCGv addr = tcg_temp_new();
+ uint32_t size = s->cfg_ptr->vlenb * nf;
+ TCGv_i64 t8 = tcg_temp_new_i64();
+ TCGv_i32 t4 = tcg_temp_new_i32();
+ MemOp atomicity = MO_ATOM_NONE;
+ if (log2_esz == 0) {
+ atomicity = MO_ATOM_NONE;
+ } else {
+ atomicity = MO_ATOM_IFALIGN_PAIR;
+ }
+ if (TCG_TARGET_REG_BITS == 64) {
+ for (int i = 0; i < size; i += 8) {
+ addr = get_address(s, rs1, i);
+ if (is_load) {
+ tcg_gen_qemu_ld_i64(t8, addr, s->mem_idx,
+ MO_LE | MO_64 | atomicity);
+ tcg_gen_st_i64(t8, tcg_env, vreg_ofs(s, vd) + i);
+ } else {
+ tcg_gen_ld_i64(t8, tcg_env, vreg_ofs(s, vd) + i);
+ tcg_gen_qemu_st_i64(t8, addr, s->mem_idx,
+ MO_LE | MO_64 | atomicity);
+ }
+ if (i == size - 8) {
+ tcg_gen_movi_tl(cpu_vstart, 0);
+ } else {
+ tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 8 >> log2_esz);
+ }
+ }
+ } else {
+ for (int i = 0; i < size; i += 4) {
+ addr = get_address(s, rs1, i);
+ if (is_load) {
+ tcg_gen_qemu_ld_i32(t4, addr, s->mem_idx,
+ MO_LE | MO_32 | atomicity);
+ tcg_gen_st_i32(t4, tcg_env, vreg_ofs(s, vd) + i);
+ } else {
+ tcg_gen_ld_i32(t4, tcg_env, vreg_ofs(s, vd) + i);
+ tcg_gen_qemu_st_i32(t4, addr, s->mem_idx,
+ MO_LE | MO_32 | atomicity);
+ }
+ if (i == size - 4) {
+ tcg_gen_movi_tl(cpu_vstart, 0);
+ } else {
+ tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 4 >> log2_esz);
+ }
+ }
+ }
+ } else {
+ TCGv_ptr dest;
+ TCGv base;
+ TCGv_i32 desc;
+ uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
+ data = FIELD_DP32(data, VDATA, VM, 1);
+ dest = tcg_temp_new_ptr();
+ desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
+ s->cfg_ptr->vlenb, data));
+ base = get_gpr(s, rs1, EXT_NONE);
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
+ fn(dest, base, tcg_env, desc);
+ }
finalize_rvv_inst(s);
return true;
@@ -1125,58 +1487,47 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
* load and store whole register instructions ignore vtype and vl setting.
* Thus, we don't need to check vill bit. (Section 7.9)
*/
-#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF) \
-static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
-{ \
- if (require_rvv(s) && \
- QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
- return ldst_whole_trans(a->rd, a->rs1, ARG_NF, \
- gen_helper_##NAME, s); \
- } \
- return false; \
-}
-
-GEN_LDST_WHOLE_TRANS(vl1re8_v, 1)
-GEN_LDST_WHOLE_TRANS(vl1re16_v, 1)
-GEN_LDST_WHOLE_TRANS(vl1re32_v, 1)
-GEN_LDST_WHOLE_TRANS(vl1re64_v, 1)
-GEN_LDST_WHOLE_TRANS(vl2re8_v, 2)
-GEN_LDST_WHOLE_TRANS(vl2re16_v, 2)
-GEN_LDST_WHOLE_TRANS(vl2re32_v, 2)
-GEN_LDST_WHOLE_TRANS(vl2re64_v, 2)
-GEN_LDST_WHOLE_TRANS(vl4re8_v, 4)
-GEN_LDST_WHOLE_TRANS(vl4re16_v, 4)
-GEN_LDST_WHOLE_TRANS(vl4re32_v, 4)
-GEN_LDST_WHOLE_TRANS(vl4re64_v, 4)
-GEN_LDST_WHOLE_TRANS(vl8re8_v, 8)
-GEN_LDST_WHOLE_TRANS(vl8re16_v, 8)
-GEN_LDST_WHOLE_TRANS(vl8re32_v, 8)
-GEN_LDST_WHOLE_TRANS(vl8re64_v, 8)
+#define GEN_LDST_WHOLE_TRANS(NAME, ETYPE, ARG_NF, IS_LOAD) \
+static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
+{ \
+ if (require_rvv(s) && \
+ QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
+ return ldst_whole_trans(a->rd, a->rs1, ARG_NF, ctzl(sizeof(ETYPE)), \
+ gen_helper_##NAME, s, IS_LOAD); \
+ } \
+ return false; \
+}
+
+GEN_LDST_WHOLE_TRANS(vl1re8_v, int8_t, 1, true)
+GEN_LDST_WHOLE_TRANS(vl1re16_v, int16_t, 1, true)
+GEN_LDST_WHOLE_TRANS(vl1re32_v, int32_t, 1, true)
+GEN_LDST_WHOLE_TRANS(vl1re64_v, int64_t, 1, true)
+GEN_LDST_WHOLE_TRANS(vl2re8_v, int8_t, 2, true)
+GEN_LDST_WHOLE_TRANS(vl2re16_v, int16_t, 2, true)
+GEN_LDST_WHOLE_TRANS(vl2re32_v, int32_t, 2, true)
+GEN_LDST_WHOLE_TRANS(vl2re64_v, int64_t, 2, true)
+GEN_LDST_WHOLE_TRANS(vl4re8_v, int8_t, 4, true)
+GEN_LDST_WHOLE_TRANS(vl4re16_v, int16_t, 4, true)
+GEN_LDST_WHOLE_TRANS(vl4re32_v, int32_t, 4, true)
+GEN_LDST_WHOLE_TRANS(vl4re64_v, int64_t, 4, true)
+GEN_LDST_WHOLE_TRANS(vl8re8_v, int8_t, 8, true)
+GEN_LDST_WHOLE_TRANS(vl8re16_v, int16_t, 8, true)
+GEN_LDST_WHOLE_TRANS(vl8re32_v, int32_t, 8, true)
+GEN_LDST_WHOLE_TRANS(vl8re64_v, int64_t, 8, true)
/*
* The vector whole register store instructions are encoded similar to
* unmasked unit-stride store of elements with EEW=8.
*/
-GEN_LDST_WHOLE_TRANS(vs1r_v, 1)
-GEN_LDST_WHOLE_TRANS(vs2r_v, 2)
-GEN_LDST_WHOLE_TRANS(vs4r_v, 4)
-GEN_LDST_WHOLE_TRANS(vs8r_v, 8)
+GEN_LDST_WHOLE_TRANS(vs1r_v, int8_t, 1, false)
+GEN_LDST_WHOLE_TRANS(vs2r_v, int8_t, 2, false)
+GEN_LDST_WHOLE_TRANS(vs4r_v, int8_t, 4, false)
+GEN_LDST_WHOLE_TRANS(vs8r_v, int8_t, 8, false)
/*
*** Vector Integer Arithmetic Instructions
*/
-/*
- * MAXSZ returns the maximum vector size can be operated in bytes,
- * which is used in GVEC IR when vl_eq_vlmax flag is set to true
- * to accelerate vector operation.
- */
-static inline uint32_t MAXSZ(DisasContext *s)
-{
- int max_sz = s->cfg_ptr->vlenb * 8;
- return max_sz >> (3 - s->lmul);
-}
-
static bool opivv_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
@@ -1472,6 +1823,16 @@ static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
}
+/* OPIVV with overwrite and WIDEN */
+static bool opivv_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return require_rvv(s) &&
+ vext_check_isa_ill(s) &&
+ vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
+}
+
static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
gen_helper_gvec_4_ptr *fn,
bool (*checkfn)(DisasContext *, arg_rmrr *))
@@ -1519,6 +1880,14 @@ static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_ds(s, a->rd, a->rs2, a->vm);
}
+static bool opivx_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return require_rvv(s) &&
+ vext_check_isa_ill(s) &&
+ vext_check_ds(s, a->rd, a->rs2, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
+}
+
#define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
{ \
@@ -1990,13 +2359,13 @@ GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
/* Vector Widening Integer Multiply-Add Instructions */
-GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
-GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
-GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
-GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check)
-GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check)
-GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check)
-GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_overwrite_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_overwrite_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_overwrite_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_overwrite_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_overwrite_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_overwrite_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_overwrite_widen_check)
/* Vector Integer Merge and Move Instructions */
static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
@@ -2337,6 +2706,17 @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
}
+static bool opfvv_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return require_rvv(s) &&
+ require_rvf(s) &&
+ require_scale_rvf(s) &&
+ vext_check_isa_ill(s) &&
+ vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
+}
+
/* OPFVV with WIDEN */
#define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
@@ -2376,11 +2756,21 @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_ds(s, a->rd, a->rs2, a->vm);
}
+static bool opfvf_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return require_rvv(s) &&
+ require_rvf(s) &&
+ require_scale_rvf(s) &&
+ vext_check_isa_ill(s) &&
+ vext_check_ds(s, a->rd, a->rs2, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
+}
+
/* OPFVF with WIDEN */
-#define GEN_OPFVF_WIDEN_TRANS(NAME) \
+#define GEN_OPFVF_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
{ \
- if (opfvf_widen_check(s, a)) { \
+ if (CHECK(s, a)) { \
uint32_t data = 0; \
static gen_helper_opfvf *const fns[2] = { \
gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
@@ -2396,8 +2786,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
return false; \
}
-GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
-GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
+GEN_OPFVF_WIDEN_TRANS(vfwadd_vf, opfvf_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwsub_vf, opfvf_widen_check)
static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
{
@@ -2479,7 +2869,7 @@ GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check)
/* Vector Widening Floating-Point Multiply */
GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
-GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
+GEN_OPFVF_WIDEN_TRANS(vfwmul_vf, opfvf_widen_check)
/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
@@ -2500,14 +2890,14 @@ GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
/* Vector Widening Floating-Point Fused Multiply-Add Instructions */
-GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
-GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
-GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
-GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
-GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
-GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
-GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
-GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
+GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_overwrite_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_overwrite_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_overwrite_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_overwrite_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf, opfvf_overwrite_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf, opfvf_overwrite_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf, opfvf_overwrite_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf, opfvf_overwrite_widen_check)
/* Vector Floating-Point Square-Root Instruction */
@@ -3172,7 +3562,6 @@ static void load_element(TCGv_i64 dest, TCGv_ptr base,
break;
default:
g_assert_not_reached();
- break;
}
}
@@ -3257,7 +3646,6 @@ static void store_element(TCGv_i64 val, TCGv_ptr base,
break;
default:
g_assert_not_reached();
- break;
}
}
@@ -3425,6 +3813,7 @@ static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
+ vext_check_input_eew(s, a->rs1, s->sew, a->rs2, s->sew, a->vm) &&
require_align(a->rd, s->lmul) &&
require_align(a->rs1, s->lmul) &&
require_align(a->rs2, s->lmul) &&
@@ -3437,6 +3826,7 @@ static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a)
int8_t emul = MO_16 - s->sew + s->lmul;
return require_rvv(s) &&
vext_check_isa_ill(s) &&
+ vext_check_input_eew(s, a->rs1, MO_16, a->rs2, s->sew, a->vm) &&
(emul >= -3 && emul <= 3) &&
require_align(a->rd, s->lmul) &&
require_align(a->rs1, emul) &&
@@ -3456,6 +3846,7 @@ static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
+ vext_check_input_eew(s, -1, MO_64, a->rs2, s->sew, a->vm) &&
require_align(a->rd, s->lmul) &&
require_align(a->rs2, s->lmul) &&
(a->rd != a->rs2) &&
@@ -3599,7 +3990,9 @@ static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
require_align(a->rd, s->lmul) &&
require_align(a->rs2, s->lmul - div) &&
require_vm(a->vm, a->rd) &&
- require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
+ require_noover(a->rd, s->lmul, a->rs2, s->lmul - div) &&
+ vext_check_input_eew(s, -1, 0, a->rs2, s->sew, a->vm);
+
return ret;
}
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
index ae1f401..27bf3f0 100644
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
@@ -249,7 +249,7 @@ GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check)
\
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
/* save opcode for unwinding in case we throw an exception */ \
- decode_save_opc(s); \
+ decode_save_opc(s, 0); \
egs = tcg_constant_i32(EGS); \
gen_helper_egs_check(egs, tcg_env); \
} \
@@ -322,7 +322,7 @@ GEN_V_UNMASKED_TRANS(vaesem_vs, vaes_check_vs, ZVKNED_EGS)
\
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
/* save opcode for unwinding in case we throw an exception */ \
- decode_save_opc(s); \
+ decode_save_opc(s, 0); \
egs = tcg_constant_i32(EGS); \
gen_helper_egs_check(egs, tcg_env); \
} \
@@ -389,7 +389,7 @@ GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS)
\
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
/* save opcode for unwinding in case we throw an exception */ \
- decode_save_opc(s); \
+ decode_save_opc(s, 0); \
egs = tcg_constant_i32(EGS); \
gen_helper_egs_check(egs, tcg_env); \
} \
@@ -440,7 +440,7 @@ static bool trans_vsha2cl_vv(DisasContext *s, arg_rmrr *a)
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) {
/* save opcode for unwinding in case we throw an exception */
- decode_save_opc(s);
+ decode_save_opc(s, 0);
egs = tcg_constant_i32(ZVKNH_EGS);
gen_helper_egs_check(egs, tcg_env);
}
@@ -471,7 +471,7 @@ static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a)
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) {
/* save opcode for unwinding in case we throw an exception */
- decode_save_opc(s);
+ decode_save_opc(s, 0);
egs = tcg_constant_i32(ZVKNH_EGS);
gen_helper_egs_check(egs, tcg_env);
}
diff --git a/target/riscv/insn_trans/trans_rvzacas.c.inc b/target/riscv/insn_trans/trans_rvzacas.c.inc
index fcced99..15e688a 100644
--- a/target/riscv/insn_trans/trans_rvzacas.c.inc
+++ b/target/riscv/insn_trans/trans_rvzacas.c.inc
@@ -76,7 +76,7 @@ static bool gen_cmpxchg64(DisasContext *ctx, arg_atomic *a, MemOp mop)
TCGv src1 = get_address(ctx, a->rs1, 0);
TCGv_i64 src2 = get_gpr_pair(ctx, a->rs2);
- decode_save_opc(ctx);
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
tcg_gen_atomic_cmpxchg_i64(dest, src1, dest, src2, ctx->mem_idx, mop);
gen_set_gpr_pair(ctx, a->rd, dest);
@@ -121,7 +121,7 @@ static bool trans_amocas_q(DisasContext *ctx, arg_amocas_q *a)
tcg_gen_concat_i64_i128(src2, src2l, src2h);
tcg_gen_concat_i64_i128(dest, destl, desth);
- decode_save_opc(ctx);
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
tcg_gen_atomic_cmpxchg_i128(dest, src1, dest, src2, ctx->mem_idx,
(MO_ALIGN | MO_TEUO));
diff --git a/target/riscv/insn_trans/trans_rvzce.c.inc b/target/riscv/insn_trans/trans_rvzce.c.inc
index cd234ad..c77c2b9 100644
--- a/target/riscv/insn_trans/trans_rvzce.c.inc
+++ b/target/riscv/insn_trans/trans_rvzce.c.inc
@@ -203,6 +203,14 @@ static bool gen_pop(DisasContext *ctx, arg_cmpp *a, bool ret, bool ret_val)
if (ret) {
TCGv ret_addr = get_gpr(ctx, xRA, EXT_SIGN);
+#ifndef CONFIG_USER_ONLY
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_RETURN);
+ TCGv src = tcg_temp_new();
+ gen_pc_plus_diff(src, ctx, 0);
+ gen_helper_ctr_add_entry(tcg_env, src, ret_addr, type);
+ }
+#endif
tcg_gen_mov_tl(cpu_pc, ret_addr);
tcg_gen_lookup_and_goto_ptr();
ctx->base.is_jmp = DISAS_NORETURN;
@@ -309,6 +317,19 @@ static bool trans_cm_jalt(DisasContext *ctx, arg_cm_jalt *a)
gen_set_gpr(ctx, xRA, succ_pc);
}
+#ifndef CONFIG_USER_ONLY
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
+ if (a->index >= 32) {
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_CALL);
+ gen_helper_ctr_add_entry(tcg_env, cpu_pc, addr, type);
+ } else {
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_JUMP);
+ gen_helper_ctr_add_entry(tcg_env, cpu_pc, addr, type);
+ }
+ }
+#endif
+
+
tcg_gen_mov_tl(cpu_pc, addr);
tcg_gen_lookup_and_goto_ptr();
diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc
index 1eb458b..bece48e 100644
--- a/target/riscv/insn_trans/trans_rvzfh.c.inc
+++ b/target/riscv/insn_trans/trans_rvzfh.c.inc
@@ -48,7 +48,7 @@ static bool trans_flh(DisasContext *ctx, arg_flh *a)
REQUIRE_FPU;
REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
if (a->imm) {
TCGv temp = tcg_temp_new();
@@ -71,7 +71,7 @@ static bool trans_fsh(DisasContext *ctx, arg_fsh *a)
REQUIRE_FPU;
REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
if (a->imm) {
TCGv temp = tcg_temp_new();
diff --git a/target/riscv/insn_trans/trans_rvzicfiss.c.inc b/target/riscv/insn_trans/trans_rvzicfiss.c.inc
new file mode 100644
index 0000000..b0096ad
--- /dev/null
+++ b/target/riscv/insn_trans/trans_rvzicfiss.c.inc
@@ -0,0 +1,131 @@
+/*
+ * RISC-V translation routines for the Control-Flow Integrity Extension
+ *
+ * Copyright (c) 2024 Rivos Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define REQUIRE_ZICFISS(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zicfiss) { \
+ return false; \
+ } \
+} while (0)
+
+static bool trans_sspopchk(DisasContext *ctx, arg_sspopchk *a)
+{
+ if (!ctx->bcfi_enabled) {
+ return false;
+ }
+
+ TCGv addr = tcg_temp_new();
+ TCGLabel *skip = gen_new_label();
+ uint32_t tmp = (get_xl(ctx) == MXL_RV64) ? 8 : 4;
+ TCGv data = tcg_temp_new();
+ tcg_gen_ld_tl(addr, tcg_env, offsetof(CPURISCVState, ssp));
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
+ tcg_gen_qemu_ld_tl(data, addr, SS_MMU_INDEX(ctx),
+ mxl_memop(ctx) | MO_ALIGN);
+ TCGv rs1 = get_gpr(ctx, a->rs1, EXT_NONE);
+ tcg_gen_brcond_tl(TCG_COND_EQ, data, rs1, skip);
+ tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_BCFI_TVAL),
+ tcg_env, offsetof(CPURISCVState, sw_check_code));
+ gen_helper_raise_exception(tcg_env,
+ tcg_constant_i32(RISCV_EXCP_SW_CHECK));
+ gen_set_label(skip);
+ tcg_gen_addi_tl(addr, addr, tmp);
+ tcg_gen_st_tl(addr, tcg_env, offsetof(CPURISCVState, ssp));
+
+ return true;
+}
+
+static bool trans_sspush(DisasContext *ctx, arg_sspush *a)
+{
+ if (!ctx->bcfi_enabled) {
+ return false;
+ }
+
+ TCGv addr = tcg_temp_new();
+ int tmp = (get_xl(ctx) == MXL_RV64) ? -8 : -4;
+ TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
+ tcg_gen_ld_tl(addr, tcg_env, offsetof(CPURISCVState, ssp));
+ tcg_gen_addi_tl(addr, addr, tmp);
+ tcg_gen_qemu_st_tl(data, addr, SS_MMU_INDEX(ctx),
+ mxl_memop(ctx) | MO_ALIGN);
+ tcg_gen_st_tl(addr, tcg_env, offsetof(CPURISCVState, ssp));
+
+ return true;
+}
+
+static bool trans_ssrdp(DisasContext *ctx, arg_ssrdp *a)
+{
+ if (!ctx->bcfi_enabled || a->rd == 0) {
+ return false;
+ }
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ tcg_gen_ld_tl(dest, tcg_env, offsetof(CPURISCVState, ssp));
+ gen_set_gpr(ctx, a->rd, dest);
+
+ return true;
+}
+
+static bool trans_ssamoswap_w(DisasContext *ctx, arg_amoswap_w *a)
+{
+ REQUIRE_A_OR_ZAAMO(ctx);
+ REQUIRE_ZICFISS(ctx);
+ if (ctx->priv == PRV_M) {
+ generate_exception(ctx, RISCV_EXCP_STORE_AMO_ACCESS_FAULT);
+ }
+
+ if (!ctx->bcfi_enabled) {
+ return false;
+ }
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
+ src1 = get_address(ctx, a->rs1, 0);
+
+ tcg_gen_atomic_xchg_tl(dest, src1, src2, SS_MMU_INDEX(ctx),
+ (MO_ALIGN | MO_TESL));
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_ssamoswap_d(DisasContext *ctx, arg_amoswap_w *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_A_OR_ZAAMO(ctx);
+ REQUIRE_ZICFISS(ctx);
+ if (ctx->priv == PRV_M) {
+ generate_exception(ctx, RISCV_EXCP_STORE_AMO_ACCESS_FAULT);
+ }
+
+ if (!ctx->bcfi_enabled) {
+ return false;
+ }
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
+ src1 = get_address(ctx, a->rs1, 0);
+
+ tcg_gen_atomic_xchg_tl(dest, src1, src2, SS_MMU_INDEX(ctx),
+ (MO_ALIGN | MO_TESQ));
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
diff --git a/target/riscv/insn_trans/trans_svinval.c.inc b/target/riscv/insn_trans/trans_svinval.c.inc
index 0f692a1..a06c3b2 100644
--- a/target/riscv/insn_trans/trans_svinval.c.inc
+++ b/target/riscv/insn_trans/trans_svinval.c.inc
@@ -28,7 +28,7 @@ static bool trans_sinval_vma(DisasContext *ctx, arg_sinval_vma *a)
/* Do the same as sfence.vma currently */
REQUIRE_EXT(ctx, RVS);
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_tlb_flush(tcg_env);
return true;
#endif
@@ -57,7 +57,7 @@ static bool trans_hinval_vvma(DisasContext *ctx, arg_hinval_vvma *a)
/* Do the same as hfence.vvma currently */
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_hyp_tlb_flush(tcg_env);
return true;
#endif
@@ -70,7 +70,7 @@ static bool trans_hinval_gvma(DisasContext *ctx, arg_hinval_gvma *a)
/* Do the same as hfence.gvma currently */
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_hyp_gvma_tlb_flush(tcg_env);
return true;
#endif
diff --git a/target/riscv/internals.h b/target/riscv/internals.h
index 0ac17bc..4570bd5 100644
--- a/target/riscv/internals.h
+++ b/target/riscv/internals.h
@@ -19,7 +19,10 @@
#ifndef RISCV_CPU_INTERNALS_H
#define RISCV_CPU_INTERNALS_H
+#include "exec/cpu-common.h"
#include "hw/registerfields.h"
+#include "fpu/softfloat-types.h"
+#include "target/riscv/cpu_bits.h"
/*
* The current MMU Modes are:
@@ -30,12 +33,15 @@
* - U+2STAGE 0b100
* - S+2STAGE 0b101
* - S+SUM+2STAGE 0b110
+ * - Shadow stack+U 0b1000
+ * - Shadow stack+S 0b1001
*/
#define MMUIdx_U 0
#define MMUIdx_S 1
#define MMUIdx_S_SUM 2
#define MMUIdx_M 3
#define MMU_2STAGE_BIT (1 << 2)
+#define MMU_IDX_SS_WRITE (1 << 3)
static inline int mmuidx_priv(int mmu_idx)
{
@@ -136,7 +142,68 @@ static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f)
}
}
-/* Our implementation of CPUClass::has_work */
+#ifndef CONFIG_USER_ONLY
+/* Our implementation of SysemuCPUOps::has_work */
bool riscv_cpu_has_work(CPUState *cs);
+#endif
+
+/* Zjpm addr masking routine */
+static inline target_ulong adjust_addr_body(CPURISCVState *env,
+ target_ulong addr,
+ bool is_virt_addr)
+{
+ RISCVPmPmm pmm = PMM_FIELD_DISABLED;
+ uint32_t pmlen = 0;
+ bool signext = false;
+
+ /* do nothing for rv32 mode */
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ return addr;
+ }
+
+ /* get pmm field depending on whether addr is */
+ if (is_virt_addr) {
+ pmm = riscv_pm_get_virt_pmm(env);
+ } else {
+ pmm = riscv_pm_get_pmm(env);
+ }
+
+ /* if pointer masking is disabled, return original addr */
+ if (pmm == PMM_FIELD_DISABLED) {
+ return addr;
+ }
+
+ if (!is_virt_addr) {
+ signext = riscv_cpu_virt_mem_enabled(env);
+ }
+ addr = addr << pmlen;
+ pmlen = riscv_pm_get_pmlen(pmm);
+
+ /* sign/zero extend masked address by N-1 bit */
+ if (signext) {
+ addr = (target_long)addr >> pmlen;
+ } else {
+ addr = addr >> pmlen;
+ }
+
+ return addr;
+}
+
+static inline target_ulong adjust_addr(CPURISCVState *env,
+ target_ulong addr)
+{
+ return adjust_addr_body(env, addr, false);
+}
+
+static inline target_ulong adjust_addr_virt(CPURISCVState *env,
+ target_ulong addr)
+{
+ return adjust_addr_body(env, addr, true);
+}
+
+static inline int insn_len(uint16_t first_word)
+{
+ return (first_word & 3) == 3 ? 4 : 2;
+}
#endif
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
index f6e3156..e1a04be 100644
--- a/target/riscv/kvm/kvm-cpu.c
+++ b/target/riscv/kvm/kvm-cpu.c
@@ -27,15 +27,15 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qapi/visitor.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/kvm.h"
-#include "sysemu/kvm_int.h"
+#include "system/system.h"
+#include "system/kvm.h"
+#include "system/kvm_int.h"
#include "cpu.h"
#include "trace.h"
-#include "hw/core/accel-cpu.h"
+#include "accel/accel-cpu-target.h"
#include "hw/pci/pci.h"
#include "exec/memattrs.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/boards.h"
#include "hw/irq.h"
#include "hw/intc/riscv_imsic.h"
@@ -45,7 +45,7 @@
#include "sbi_ecall_interface.h"
#include "chardev/char-fe.h"
#include "migration/misc.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/riscv/numa.h"
#define PR_RISCV_V_SET_CONTROL 69
@@ -58,33 +58,17 @@ void riscv_kvm_aplic_request(void *opaque, int irq, int level)
static bool cap_has_mp_state;
-static uint64_t kvm_riscv_reg_id_ulong(CPURISCVState *env, uint64_t type,
- uint64_t idx)
-{
- uint64_t id = KVM_REG_RISCV | type | idx;
+#define KVM_RISCV_REG_ID_U32(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U32 | \
+ type | idx)
- switch (riscv_cpu_mxl(env)) {
- case MXL_RV32:
- id |= KVM_REG_SIZE_U32;
- break;
- case MXL_RV64:
- id |= KVM_REG_SIZE_U64;
- break;
- default:
- g_assert_not_reached();
- }
- return id;
-}
+#define KVM_RISCV_REG_ID_U64(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U64 | \
+ type | idx)
-static uint64_t kvm_riscv_reg_id_u32(uint64_t type, uint64_t idx)
-{
- return KVM_REG_RISCV | KVM_REG_SIZE_U32 | type | idx;
-}
-
-static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx)
-{
- return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx;
-}
+#if defined(TARGET_RISCV64)
+#define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U64(type, idx)
+#else
+#define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U32(type, idx)
+#endif
static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b)
{
@@ -107,45 +91,29 @@ static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu,
return kvm_encode_reg_size_id(id, size_b);
}
-#define RISCV_CORE_REG(env, name) \
- kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \
+#define RISCV_CORE_REG(name) \
+ KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, \
KVM_REG_RISCV_CORE_REG(name))
-#define RISCV_CSR_REG(env, name) \
- kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CSR, \
+#define RISCV_CSR_REG(name) \
+ KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CSR, \
KVM_REG_RISCV_CSR_REG(name))
-#define RISCV_CONFIG_REG(env, name) \
- kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, \
+#define RISCV_CONFIG_REG(name) \
+ KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, \
KVM_REG_RISCV_CONFIG_REG(name))
-#define RISCV_TIMER_REG(name) kvm_riscv_reg_id_u64(KVM_REG_RISCV_TIMER, \
+#define RISCV_TIMER_REG(name) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_TIMER, \
KVM_REG_RISCV_TIMER_REG(name))
-#define RISCV_FP_F_REG(idx) kvm_riscv_reg_id_u32(KVM_REG_RISCV_FP_F, idx)
+#define RISCV_FP_F_REG(idx) KVM_RISCV_REG_ID_U32(KVM_REG_RISCV_FP_F, idx)
-#define RISCV_FP_D_REG(idx) kvm_riscv_reg_id_u64(KVM_REG_RISCV_FP_D, idx)
+#define RISCV_FP_D_REG(idx) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_FP_D, idx)
-#define RISCV_VECTOR_CSR_REG(env, name) \
- kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_VECTOR, \
+#define RISCV_VECTOR_CSR_REG(name) \
+ KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_VECTOR, \
KVM_REG_RISCV_VECTOR_CSR_REG(name))
-#define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
- do { \
- int _ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
- if (_ret) { \
- return _ret; \
- } \
- } while (0)
-
-#define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
- do { \
- int _ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
- if (_ret) { \
- return _ret; \
- } \
- } while (0)
-
#define KVM_RISCV_GET_TIMER(cs, name, reg) \
do { \
int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), &reg); \
@@ -167,6 +135,7 @@ typedef struct KVMCPUConfig {
const char *description;
target_ulong offset;
uint64_t kvm_reg_id;
+ uint32_t prop_size;
bool user_set;
bool supported;
} KVMCPUConfig;
@@ -248,7 +217,7 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
/* If we're here we're going to disable the MISA bit */
reg = 0;
- id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
+ id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
misa_cfg->kvm_reg_id);
ret = kvm_set_one_reg(cs, id, &reg);
if (ret != 0) {
@@ -267,6 +236,56 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
}
}
+#define KVM_CSR_CFG(_name, _env_prop, reg_id) \
+ {.name = _name, .offset = ENV_CSR_OFFSET(_env_prop), \
+ .prop_size = sizeof(((CPURISCVState *)0)->_env_prop), \
+ .kvm_reg_id = reg_id}
+
+static KVMCPUConfig kvm_csr_cfgs[] = {
+ KVM_CSR_CFG("sstatus", mstatus, RISCV_CSR_REG(sstatus)),
+ KVM_CSR_CFG("sie", mie, RISCV_CSR_REG(sie)),
+ KVM_CSR_CFG("stvec", stvec, RISCV_CSR_REG(stvec)),
+ KVM_CSR_CFG("sscratch", sscratch, RISCV_CSR_REG(sscratch)),
+ KVM_CSR_CFG("sepc", sepc, RISCV_CSR_REG(sepc)),
+ KVM_CSR_CFG("scause", scause, RISCV_CSR_REG(scause)),
+ KVM_CSR_CFG("stval", stval, RISCV_CSR_REG(stval)),
+ KVM_CSR_CFG("sip", mip, RISCV_CSR_REG(sip)),
+ KVM_CSR_CFG("satp", satp, RISCV_CSR_REG(satp)),
+ KVM_CSR_CFG("scounteren", scounteren, RISCV_CSR_REG(scounteren)),
+ KVM_CSR_CFG("senvcfg", senvcfg, RISCV_CSR_REG(senvcfg)),
+};
+
+static void *kvmconfig_get_env_addr(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
+{
+ return (void *)&cpu->env + csr_cfg->offset;
+}
+
+static uint32_t kvm_cpu_csr_get_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
+{
+ uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
+ return *val32;
+}
+
+static uint64_t kvm_cpu_csr_get_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
+{
+ uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
+ return *val64;
+}
+
+static void kvm_cpu_csr_set_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
+ uint32_t val)
+{
+ uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
+ *val32 = val;
+}
+
+static void kvm_cpu_csr_set_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
+ uint64_t val)
+{
+ uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
+ *val64 = val;
+}
+
#define KVM_EXT_CFG(_name, _prop, _reg_id) \
{.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
.kvm_reg_id = _reg_id}
@@ -274,6 +293,7 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
static KVMCPUConfig kvm_multi_ext_cfgs[] = {
KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM),
KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ),
+ KVM_EXT_CFG("ziccrse", ext_ziccrse, KVM_RISCV_ISA_EXT_ZICCRSE),
KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR),
KVM_EXT_CFG("zicond", ext_zicond, KVM_RISCV_ISA_EXT_ZICOND),
KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR),
@@ -281,7 +301,11 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
KVM_EXT_CFG("zihintntl", ext_zihintntl, KVM_RISCV_ISA_EXT_ZIHINTNTL),
KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE),
KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM),
+ KVM_EXT_CFG("zimop", ext_zimop, KVM_RISCV_ISA_EXT_ZIMOP),
+ KVM_EXT_CFG("zcmop", ext_zcmop, KVM_RISCV_ISA_EXT_ZCMOP),
+ KVM_EXT_CFG("zabha", ext_zabha, KVM_RISCV_ISA_EXT_ZABHA),
KVM_EXT_CFG("zacas", ext_zacas, KVM_RISCV_ISA_EXT_ZACAS),
+ KVM_EXT_CFG("zawrs", ext_zawrs, KVM_RISCV_ISA_EXT_ZAWRS),
KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA),
KVM_EXT_CFG("zfh", ext_zfh, KVM_RISCV_ISA_EXT_ZFH),
KVM_EXT_CFG("zfhmin", ext_zfhmin, KVM_RISCV_ISA_EXT_ZFHMIN),
@@ -292,6 +316,10 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
KVM_EXT_CFG("zbkc", ext_zbkc, KVM_RISCV_ISA_EXT_ZBKC),
KVM_EXT_CFG("zbkx", ext_zbkx, KVM_RISCV_ISA_EXT_ZBKX),
KVM_EXT_CFG("zbs", ext_zbs, KVM_RISCV_ISA_EXT_ZBS),
+ KVM_EXT_CFG("zca", ext_zca, KVM_RISCV_ISA_EXT_ZCA),
+ KVM_EXT_CFG("zcb", ext_zcb, KVM_RISCV_ISA_EXT_ZCB),
+ KVM_EXT_CFG("zcd", ext_zcd, KVM_RISCV_ISA_EXT_ZCD),
+ KVM_EXT_CFG("zcf", ext_zcf, KVM_RISCV_ISA_EXT_ZCF),
KVM_EXT_CFG("zknd", ext_zknd, KVM_RISCV_ISA_EXT_ZKND),
KVM_EXT_CFG("zkne", ext_zkne, KVM_RISCV_ISA_EXT_ZKNE),
KVM_EXT_CFG("zknh", ext_zknh, KVM_RISCV_ISA_EXT_ZKNH),
@@ -312,12 +340,18 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
KVM_EXT_CFG("zvksed", ext_zvksed, KVM_RISCV_ISA_EXT_ZVKSED),
KVM_EXT_CFG("zvksh", ext_zvksh, KVM_RISCV_ISA_EXT_ZVKSH),
KVM_EXT_CFG("zvkt", ext_zvkt, KVM_RISCV_ISA_EXT_ZVKT),
+ KVM_EXT_CFG("smnpm", ext_smnpm, KVM_RISCV_ISA_EXT_SMNPM),
KVM_EXT_CFG("smstateen", ext_smstateen, KVM_RISCV_ISA_EXT_SMSTATEEN),
KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
+ KVM_EXT_CFG("sscofpmf", ext_sscofpmf, KVM_RISCV_ISA_EXT_SSCOFPMF),
+ KVM_EXT_CFG("ssnpm", ext_ssnpm, KVM_RISCV_ISA_EXT_SSNPM),
KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
+ KVM_EXT_CFG("svade", ext_svade, KVM_RISCV_ISA_EXT_SVADE),
+ KVM_EXT_CFG("svadu", ext_svadu, KVM_RISCV_ISA_EXT_SVADU),
KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT),
KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
+ KVM_EXT_CFG("svvptc", ext_svvptc, KVM_RISCV_ISA_EXT_SVVPTC),
};
static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg)
@@ -419,7 +453,6 @@ static KVMCPUConfig kvm_sbi_dbcn = {
static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
{
- CPURISCVState *env = &cpu->env;
uint64_t id, reg;
int i, ret;
@@ -430,7 +463,7 @@ static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
continue;
}
- id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
+ id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
ret = kvm_set_one_reg(cs, id, &reg);
@@ -555,14 +588,14 @@ static int kvm_riscv_get_regs_core(CPUState *cs)
target_ulong reg;
CPURISCVState *env = &RISCV_CPU(cs)->env;
- ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_CORE_REG(regs.pc), &reg);
if (ret) {
return ret;
}
env->pc = reg;
for (i = 1; i < 32; i++) {
- uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i);
+ uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
ret = kvm_get_one_reg(cs, id, &reg);
if (ret) {
return ret;
@@ -581,13 +614,13 @@ static int kvm_riscv_put_regs_core(CPUState *cs)
CPURISCVState *env = &RISCV_CPU(cs)->env;
reg = env->pc;
- ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_CORE_REG(regs.pc), &reg);
if (ret) {
return ret;
}
for (i = 1; i < 32; i++) {
- uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i);
+ uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
reg = env->gpr[i];
ret = kvm_set_one_reg(cs, id, &reg);
if (ret) {
@@ -600,38 +633,79 @@ static int kvm_riscv_put_regs_core(CPUState *cs)
static int kvm_riscv_get_regs_csr(CPUState *cs)
{
- CPURISCVState *env = &RISCV_CPU(cs)->env;
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ uint64_t reg;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
+ KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
- KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus);
- KVM_RISCV_GET_CSR(cs, env, sie, env->mie);
- KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec);
- KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch);
- KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc);
- KVM_RISCV_GET_CSR(cs, env, scause, env->scause);
- KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
- KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
- KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
+ if (!csr_cfg->supported) {
+ continue;
+ }
+
+ ret = kvm_get_one_reg(cs, csr_cfg->kvm_reg_id, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ if (csr_cfg->prop_size == sizeof(uint32_t)) {
+ kvm_cpu_csr_set_u32(cpu, csr_cfg, (uint32_t)reg);
+ } else if (csr_cfg->prop_size == sizeof(uint64_t)) {
+ kvm_cpu_csr_set_u64(cpu, csr_cfg, reg);
+ } else {
+ g_assert_not_reached();
+ }
+ }
return 0;
}
static int kvm_riscv_put_regs_csr(CPUState *cs)
{
- CPURISCVState *env = &RISCV_CPU(cs)->env;
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ uint64_t reg;
+ int i, ret;
- KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus);
- KVM_RISCV_SET_CSR(cs, env, sie, env->mie);
- KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec);
- KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch);
- KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc);
- KVM_RISCV_SET_CSR(cs, env, scause, env->scause);
- KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
- KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
- KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
+ for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
+ KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
+
+ if (!csr_cfg->supported) {
+ continue;
+ }
+
+ if (csr_cfg->prop_size == sizeof(uint32_t)) {
+ reg = kvm_cpu_csr_get_u32(cpu, csr_cfg);
+ } else if (csr_cfg->prop_size == sizeof(uint64_t)) {
+ reg = kvm_cpu_csr_get_u64(cpu, csr_cfg);
+ } else {
+ g_assert_not_reached();
+ }
+
+ ret = kvm_set_one_reg(cs, csr_cfg->kvm_reg_id, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
return 0;
}
+static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
+{
+ env->mstatus = 0;
+ env->mie = 0;
+ env->stvec = 0;
+ env->sscratch = 0;
+ env->sepc = 0;
+ env->scause = 0;
+ env->stval = 0;
+ env->mip = 0;
+ env->satp = 0;
+ env->scounteren = 0;
+ env->senvcfg = 0;
+}
+
static int kvm_riscv_get_regs_fp(CPUState *cs)
{
int ret = 0;
@@ -751,11 +825,11 @@ static void kvm_riscv_put_regs_timer(CPUState *cs)
env->kvm_timer_dirty = false;
}
-uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs)
+uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu)
{
uint64_t reg;
- KVM_RISCV_GET_TIMER(cs, frequency, reg);
+ KVM_RISCV_GET_TIMER(CPU(cpu), frequency, reg);
return reg;
}
@@ -772,26 +846,26 @@ static int kvm_riscv_get_regs_vector(CPUState *cs)
return 0;
}
- ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), &reg);
if (ret) {
return ret;
}
env->vstart = reg;
- ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), &reg);
if (ret) {
return ret;
}
env->vl = reg;
- ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), &reg);
if (ret) {
return ret;
}
env->vtype = reg;
if (kvm_v_vlenb.supported) {
- ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), &reg);
if (ret) {
return ret;
}
@@ -829,26 +903,26 @@ static int kvm_riscv_put_regs_vector(CPUState *cs)
}
reg = env->vstart;
- ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), &reg);
if (ret) {
return ret;
}
reg = env->vl;
- ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), &reg);
if (ret) {
return ret;
}
reg = env->vtype;
- ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), &reg);
if (ret) {
return ret;
}
if (kvm_v_vlenb.supported) {
reg = cpu->cfg.vlenb;
- ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), &reg);
for (int i = 0; i < 32; i++) {
/*
@@ -927,25 +1001,24 @@ static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch)
static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
{
- CPURISCVState *env = &cpu->env;
struct kvm_one_reg reg;
int ret;
- reg.id = RISCV_CONFIG_REG(env, mvendorid);
+ reg.id = RISCV_CONFIG_REG(mvendorid);
reg.addr = (uint64_t)&cpu->cfg.mvendorid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_report("Unable to retrieve mvendorid from host, error %d", ret);
}
- reg.id = RISCV_CONFIG_REG(env, marchid);
+ reg.id = RISCV_CONFIG_REG(marchid);
reg.addr = (uint64_t)&cpu->cfg.marchid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_report("Unable to retrieve marchid from host, error %d", ret);
}
- reg.id = RISCV_CONFIG_REG(env, mimpid);
+ reg.id = RISCV_CONFIG_REG(mimpid);
reg.addr = (uint64_t)&cpu->cfg.mimpid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
@@ -960,7 +1033,7 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
struct kvm_one_reg reg;
int ret;
- reg.id = RISCV_CONFIG_REG(env, isa);
+ reg.id = RISCV_CONFIG_REG(isa);
reg.addr = (uint64_t)&env->misa_ext_mask;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
@@ -977,11 +1050,10 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
KVMCPUConfig *cbomz_cfg)
{
- CPURISCVState *env = &cpu->env;
struct kvm_one_reg reg;
int ret;
- reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
+ reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
cbomz_cfg->kvm_reg_id);
reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
@@ -995,7 +1067,6 @@ static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
KVMScratchCPU *kvmcpu)
{
- CPURISCVState *env = &cpu->env;
uint64_t val;
int i, ret;
@@ -1003,7 +1074,7 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
struct kvm_one_reg reg;
- reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
+ reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
@@ -1033,6 +1104,32 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
}
}
+static void kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU *kvmcpu)
+{
+ uint64_t val;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
+ KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
+ struct kvm_one_reg reg;
+
+ reg.id = csr_cfg->kvm_reg_id;
+ reg.addr = (uint64_t)&val;
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
+ if (ret != 0) {
+ if (errno == EINVAL) {
+ csr_cfg->supported = false;
+ } else {
+ error_report("Unable to read KVM CSR %s: %s",
+ csr_cfg->name, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ } else {
+ csr_cfg->supported = true;
+ }
+ }
+}
+
static int uint64_cmp(const void *a, const void *b)
{
uint64_t val1 = *(const uint64_t *)a;
@@ -1050,7 +1147,6 @@ static int uint64_cmp(const void *a, const void *b)
}
static void kvm_riscv_check_sbi_dbcn_support(RISCVCPU *cpu,
- KVMScratchCPU *kvmcpu,
struct kvm_reg_list *reglist)
{
struct kvm_reg_list *reg_search;
@@ -1090,12 +1186,31 @@ static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
}
}
-static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
+static void kvm_riscv_read_csr_cfg(struct kvm_reg_list *reglist)
{
+ struct kvm_reg_list *reg_search;
+ uint64_t reg_id;
+
+ for (int i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
+ KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
+
+ reg_id = csr_cfg->kvm_reg_id;
+ reg_search = bsearch(&reg_id, reglist->reg, reglist->n,
+ sizeof(uint64_t), uint64_cmp);
+ if (!reg_search) {
+ continue;
+ }
+
+ csr_cfg->supported = true;
+ }
+}
+
+static void kvm_riscv_init_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
+{
+ g_autofree struct kvm_reg_list *reglist = NULL;
KVMCPUConfig *multi_ext_cfg;
struct kvm_one_reg reg;
struct kvm_reg_list rl_struct;
- struct kvm_reg_list *reglist;
uint64_t val, reg_id, *reg_search;
int i, ret;
@@ -1107,7 +1222,9 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
* (EINVAL). Use read_legacy() in this case.
*/
if (errno == EINVAL) {
- return kvm_riscv_read_multiext_legacy(cpu, kvmcpu);
+ kvm_riscv_read_multiext_legacy(cpu, kvmcpu);
+ kvm_riscv_read_csr_cfg_legacy(kvmcpu);
+ return;
} else if (errno != E2BIG) {
/*
* E2BIG is an expected error message for the API since we
@@ -1136,7 +1253,7 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
multi_ext_cfg = &kvm_multi_ext_cfgs[i];
- reg_id = kvm_riscv_reg_id_ulong(&cpu->env, KVM_REG_RISCV_ISA_EXT,
+ reg_id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg_search = bsearch(&reg_id, reglist->reg, reglist->n,
sizeof(uint64_t), uint64_cmp);
@@ -1169,7 +1286,8 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
kvm_riscv_read_vlenb(cpu, kvmcpu, reglist);
}
- kvm_riscv_check_sbi_dbcn_support(cpu, kvmcpu, reglist);
+ kvm_riscv_check_sbi_dbcn_support(cpu, reglist);
+ kvm_riscv_read_csr_cfg(reglist);
}
static void riscv_init_kvm_registers(Object *cpu_obj)
@@ -1183,7 +1301,7 @@ static void riscv_init_kvm_registers(Object *cpu_obj)
kvm_riscv_init_machine_ids(cpu, &kvmcpu);
kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu);
- kvm_riscv_init_multiext_cfg(cpu, &kvmcpu);
+ kvm_riscv_init_cfg(cpu, &kvmcpu);
kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
}
@@ -1192,7 +1310,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
};
-int kvm_arch_get_registers(CPUState *cs)
+int kvm_arch_get_registers(CPUState *cs, Error **errp)
{
int ret = 0;
@@ -1237,7 +1355,7 @@ int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state)
return 0;
}
-int kvm_arch_put_registers(CPUState *cs, int level)
+int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
{
int ret = 0;
@@ -1315,12 +1433,11 @@ void kvm_arch_init_irq_routing(KVMState *s)
static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
{
- CPURISCVState *env = &cpu->env;
target_ulong reg;
uint64_t id;
int ret;
- id = RISCV_CONFIG_REG(env, mvendorid);
+ id = RISCV_CONFIG_REG(mvendorid);
/*
* cfg.mvendorid is an uint32 but a target_ulong will
* be written. Assign it to a target_ulong var to avoid
@@ -1332,13 +1449,13 @@ static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
return ret;
}
- id = RISCV_CONFIG_REG(env, marchid);
+ id = RISCV_CONFIG_REG(marchid);
ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid);
if (ret != 0) {
return ret;
}
- id = RISCV_CONFIG_REG(env, mimpid);
+ id = RISCV_CONFIG_REG(mimpid);
ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid);
return ret;
@@ -1355,6 +1472,11 @@ static int kvm_vcpu_enable_sbi_dbcn(RISCVCPU *cpu, CPUState *cs)
return kvm_set_one_reg(cs, kvm_sbi_dbcn.kvm_reg_id, &reg);
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
int ret = 0;
@@ -1401,11 +1523,6 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
int kvm_arch_irqchip_create(KVMState *s)
{
- if (kvm_kernel_irqchip_split()) {
- error_report("-machine kernel_irqchip=split is not supported on RISC-V.");
- exit(1);
- }
-
/*
* We can create the VAIA using the newer device control API.
*/
@@ -1601,23 +1718,14 @@ void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
CPURISCVState *env = &cpu->env;
int i;
- if (!kvm_enabled()) {
- return;
- }
for (i = 0; i < 32; i++) {
env->gpr[i] = 0;
}
env->pc = cpu->env.kernel_addr;
env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
env->gpr[11] = cpu->env.fdt_addr; /* a1 */
- env->satp = 0;
- env->mie = 0;
- env->stvec = 0;
- env->sscratch = 0;
- env->sepc = 0;
- env->scause = 0;
- env->stval = 0;
- env->mip = 0;
+
+ kvm_riscv_reset_regs_csr(env);
}
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
@@ -1676,9 +1784,9 @@ void kvm_arch_accel_class_init(ObjectClass *oc)
object_class_property_add_str(oc, "riscv-aia", riscv_get_kvm_aia,
riscv_set_kvm_aia);
object_class_property_set_description(oc, "riscv-aia",
- "Set KVM AIA mode. Valid values are "
- "emul, hwaccel, and auto. Default "
- "is auto.");
+ "Set KVM AIA mode. Valid values are 'emul', 'hwaccel' and 'auto'. "
+ "Changing KVM AIA modes relies on host support. Defaults to 'auto' "
+ "if the host supports it");
object_property_set_default_str(object_class_property_find(oc, "riscv-aia"),
"auto");
}
@@ -1695,6 +1803,7 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
uint64_t max_hart_per_socket = 0;
uint64_t socket, base_hart, hart_count, socket_imsic_base, imsic_addr;
uint64_t socket_bits, hart_bits, guest_bits;
+ uint64_t max_group_id;
aia_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_RISCV_AIA, false);
@@ -1710,28 +1819,46 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
error_report("KVM AIA: failed to get current KVM AIA mode");
exit(1);
}
- qemu_log("KVM AIA: default mode is %s\n",
- kvm_aia_mode_str(default_aia_mode));
if (default_aia_mode != aia_mode) {
ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
KVM_DEV_RISCV_AIA_CONFIG_MODE,
&aia_mode, true, NULL);
- if (ret < 0)
- warn_report("KVM AIA: failed to set KVM AIA mode");
- else
- qemu_log("KVM AIA: set current mode to %s\n",
- kvm_aia_mode_str(aia_mode));
- }
+ if (ret < 0) {
+ warn_report("KVM AIA: failed to set KVM AIA mode '%s', using "
+ "default host mode '%s'",
+ kvm_aia_mode_str(aia_mode),
+ kvm_aia_mode_str(default_aia_mode));
- ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
- KVM_DEV_RISCV_AIA_CONFIG_SRCS,
- &aia_irq_num, true, NULL);
- if (ret < 0) {
- error_report("KVM AIA: failed to set number of input irq lines");
- exit(1);
+ /* failed to change AIA mode, use default */
+ aia_mode = default_aia_mode;
+ }
}
+ /*
+ * Skip APLIC creation in KVM if we're running split mode.
+ * This is done by leaving KVM_DEV_RISCV_AIA_CONFIG_SRCS
+ * unset. We can also skip KVM_DEV_RISCV_AIA_ADDR_APLIC
+ * since KVM won't be using it.
+ */
+ if (!kvm_kernel_irqchip_split()) {
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
+ KVM_DEV_RISCV_AIA_CONFIG_SRCS,
+ &aia_irq_num, true, NULL);
+ if (ret < 0) {
+ error_report("KVM AIA: failed to set number of input irq lines");
+ exit(1);
+ }
+
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
+ KVM_DEV_RISCV_AIA_ADDR_APLIC,
+ &aplic_base, true, NULL);
+ if (ret < 0) {
+ error_report("KVM AIA: failed to set the base address of APLIC");
+ exit(1);
+ }
+ }
+
ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
KVM_DEV_RISCV_AIA_CONFIG_IDS,
&aia_msi_num, true, NULL);
@@ -1742,7 +1869,8 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
if (socket_count > 1) {
- socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1;
+ max_group_id = socket_count - 1;
+ socket_bits = find_last_bit(&max_group_id, BITS_PER_LONG) + 1;
ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS,
&socket_bits, true, NULL);
@@ -1770,14 +1898,6 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
exit(1);
}
- ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
- KVM_DEV_RISCV_AIA_ADDR_APLIC,
- &aplic_base, true, NULL);
- if (ret < 0) {
- error_report("KVM AIA: failed to set the base address of APLIC");
- exit(1);
- }
-
for (socket = 0; socket < socket_count; socket++) {
socket_imsic_base = imsic_base + socket * (1U << group_shift);
hart_count = riscv_socket_hart_count(machine, socket);
@@ -1890,7 +2010,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
if (cpu->cfg.ext_zicbom &&
riscv_cpu_option_set(kvm_cbom_blocksize.name)) {
- reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
+ reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
kvm_cbom_blocksize.kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, &reg);
@@ -1909,7 +2029,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
if (cpu->cfg.ext_zicboz &&
riscv_cpu_option_set(kvm_cboz_blocksize.name)) {
- reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
+ reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
kvm_cboz_blocksize.kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, &reg);
@@ -1950,7 +2070,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
}
-static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data)
+static void kvm_cpu_accel_class_init(ObjectClass *oc, const void *data)
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
@@ -1971,22 +2091,25 @@ static void kvm_cpu_accel_register_types(void)
}
type_init(kvm_cpu_accel_register_types);
-static void riscv_host_cpu_class_init(ObjectClass *c, void *data)
-{
- RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
-
-#if defined(TARGET_RISCV32)
- mcc->misa_mxl_max = MXL_RV32;
-#elif defined(TARGET_RISCV64)
- mcc->misa_mxl_max = MXL_RV64;
-#endif
-}
-
static const TypeInfo riscv_kvm_cpu_type_infos[] = {
{
.name = TYPE_RISCV_CPU_HOST,
.parent = TYPE_RISCV_CPU,
- .class_init = riscv_host_cpu_class_init,
+#if defined(TARGET_RISCV32)
+ .class_data = &(const RISCVCPUDef) {
+ .misa_mxl_max = MXL_RV32,
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .vext_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .cfg.max_satp_mode = -1,
+ },
+#elif defined(TARGET_RISCV64)
+ .class_data = &(const RISCVCPUDef) {
+ .misa_mxl_max = MXL_RV64,
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .vext_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .cfg.max_satp_mode = -1,
+ },
+#endif
}
};
diff --git a/target/riscv/kvm/kvm_riscv.h b/target/riscv/kvm/kvm_riscv.h
index 5851898..b2bcd10 100644
--- a/target/riscv/kvm/kvm_riscv.h
+++ b/target/riscv/kvm/kvm_riscv.h
@@ -19,6 +19,8 @@
#ifndef QEMU_KVM_RISCV_H
#define QEMU_KVM_RISCV_H
+#include "target/riscv/cpu-qom.h"
+
void kvm_riscv_reset_vcpu(RISCVCPU *cpu);
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level);
void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
@@ -28,6 +30,6 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
void riscv_kvm_aplic_request(void *opaque, int irq, int level);
int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state);
void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
-uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs);
+uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu);
#endif
diff --git a/target/riscv/m128_helper.c b/target/riscv/m128_helper.c
index ec14aaa..7d9b83b 100644
--- a/target/riscv/m128_helper.c
+++ b/target/riscv/m128_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
target_ulong HELPER(divu_i128)(CPURISCVState *env,
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
index 492c2c6..c97e9ce 100644
--- a/target/riscv/machine.c
+++ b/target/riscv/machine.c
@@ -19,9 +19,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/error-report.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "migration/cpu.h"
-#include "sysemu/cpu-timers.h"
+#include "exec/icount.h"
#include "debug.h"
static bool pmp_needed(void *opaque)
@@ -152,25 +152,15 @@ static const VMStateDescription vmstate_vector = {
static bool pointermasking_needed(void *opaque)
{
- RISCVCPU *cpu = opaque;
- CPURISCVState *env = &cpu->env;
-
- return riscv_has_ext(env, RVJ);
+ return false;
}
static const VMStateDescription vmstate_pointermasking = {
.name = "cpu/pointer_masking",
- .version_id = 1,
- .minimum_version_id = 1,
+ .version_id = 2,
+ .minimum_version_id = 2,
.needed = pointermasking_needed,
.fields = (const VMStateField[]) {
- VMSTATE_UINTTL(env.mmte, RISCVCPU),
- VMSTATE_UINTTL(env.mpmmask, RISCVCPU),
- VMSTATE_UINTTL(env.mpmbase, RISCVCPU),
- VMSTATE_UINTTL(env.spmmask, RISCVCPU),
- VMSTATE_UINTTL(env.spmbase, RISCVCPU),
- VMSTATE_UINTTL(env.upmmask, RISCVCPU),
- VMSTATE_UINTTL(env.upmbase, RISCVCPU),
VMSTATE_END_OF_LIST()
}
@@ -180,7 +170,7 @@ static bool rv128_needed(void *opaque)
{
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(opaque);
- return mcc->misa_mxl_max == MXL_RV128;
+ return mcc->def->misa_mxl_max == MXL_RV128;
}
static const VMStateDescription vmstate_rv128 = {
@@ -266,7 +256,6 @@ static int riscv_cpu_post_load(void *opaque, int version_id)
CPURISCVState *env = &cpu->env;
env->xl = cpu_recompute_xl(env);
- riscv_cpu_update_mask(env);
return 0;
}
@@ -311,6 +300,30 @@ static const VMStateDescription vmstate_envcfg = {
}
};
+static bool ctr_needed(void *opaque)
+{
+ RISCVCPU *cpu = opaque;
+
+ return cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr;
+}
+
+static const VMStateDescription vmstate_ctr = {
+ .name = "cpu/ctr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = ctr_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT64(env.mctrctl, RISCVCPU),
+ VMSTATE_UINT32(env.sctrdepth, RISCVCPU),
+ VMSTATE_UINT32(env.sctrstatus, RISCVCPU),
+ VMSTATE_UINT64(env.vsctrctl, RISCVCPU),
+ VMSTATE_UINT64_ARRAY(env.ctr_src, RISCVCPU, 16 << SCTRDEPTH_MAX),
+ VMSTATE_UINT64_ARRAY(env.ctr_dst, RISCVCPU, 16 << SCTRDEPTH_MAX),
+ VMSTATE_UINT64_ARRAY(env.ctr_data, RISCVCPU, 16 << SCTRDEPTH_MAX),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static bool pmu_needed(void *opaque)
{
RISCVCPU *cpu = opaque;
@@ -350,6 +363,42 @@ static const VMStateDescription vmstate_jvt = {
}
};
+static bool elp_needed(void *opaque)
+{
+ RISCVCPU *cpu = opaque;
+
+ return cpu->cfg.ext_zicfilp;
+}
+
+static const VMStateDescription vmstate_elp = {
+ .name = "cpu/elp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = elp_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_BOOL(env.elp, RISCVCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool ssp_needed(void *opaque)
+{
+ RISCVCPU *cpu = opaque;
+
+ return cpu->cfg.ext_zicfiss;
+}
+
+static const VMStateDescription vmstate_ssp = {
+ .name = "cpu/ssp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = ssp_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINTTL(env.ssp, RISCVCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_riscv_cpu = {
.name = "cpu",
.version_id = 10,
@@ -398,6 +447,7 @@ const VMStateDescription vmstate_riscv_cpu = {
VMSTATE_UINTTL(env.siselect, RISCVCPU),
VMSTATE_UINT32(env.scounteren, RISCVCPU),
VMSTATE_UINT32(env.mcounteren, RISCVCPU),
+ VMSTATE_UINT32(env.scountinhibit, RISCVCPU),
VMSTATE_UINT32(env.mcountinhibit, RISCVCPU),
VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0,
vmstate_pmu_ctr_state, PMUCTRState),
@@ -422,6 +472,9 @@ const VMStateDescription vmstate_riscv_cpu = {
&vmstate_debug,
&vmstate_smstateen,
&vmstate_jvt,
+ &vmstate_elp,
+ &vmstate_ssp,
+ &vmstate_ctr,
NULL
}
};
diff --git a/target/riscv/monitor.c b/target/riscv/monitor.c
index f5b1ffe..100005e 100644
--- a/target/riscv/monitor.c
+++ b/target/riscv/monitor.c
@@ -184,7 +184,6 @@ static void mem_info_svxx(Monitor *mon, CPUArchState *env)
break;
default:
g_assert_not_reached();
- break;
}
/* calculate virtual address bits */
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index 25a5263..557807b 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -21,15 +21,24 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "exec/cputlb.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/helper-proto.h"
+#include "exec/tlb-flags.h"
+#include "trace.h"
/* Exceptions processing helpers */
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
- uint32_t exception, uintptr_t pc)
+ RISCVException exception,
+ uintptr_t pc)
{
CPUState *cs = env_cpu(env);
+
+ trace_riscv_exception(exception,
+ riscv_cpu_get_trap_name(exception, false),
+ env->pc);
+
cs->exception_index = exception;
cpu_loop_exit_restore(cs, pc);
}
@@ -62,7 +71,7 @@ target_ulong helper_csrr(CPURISCVState *env, int csr)
void helper_csrw(CPURISCVState *env, int csr, target_ulong src)
{
target_ulong mask = env->xl == MXL_RV32 ? UINT32_MAX : (target_ulong)-1;
- RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask);
+ RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask, GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@@ -73,7 +82,7 @@ target_ulong helper_csrrw(CPURISCVState *env, int csr,
target_ulong src, target_ulong write_mask)
{
target_ulong val = 0;
- RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask);
+ RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask, GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@@ -99,7 +108,7 @@ void helper_csrw_i128(CPURISCVState *env, int csr,
{
RISCVException ret = riscv_csrrw_i128(env, csr, NULL,
int128_make128(srcl, srch),
- UINT128_MAX);
+ UINT128_MAX, GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@@ -107,13 +116,14 @@ void helper_csrw_i128(CPURISCVState *env, int csr,
}
target_ulong helper_csrrw_i128(CPURISCVState *env, int csr,
- target_ulong srcl, target_ulong srch,
- target_ulong maskl, target_ulong maskh)
+ target_ulong srcl, target_ulong srch,
+ target_ulong maskl, target_ulong maskh)
{
Int128 rv = int128_zero();
RISCVException ret = riscv_csrrw_i128(env, csr, &rv,
int128_make128(srcl, srch),
- int128_make128(maskl, maskh));
+ int128_make128(maskl, maskh),
+ GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@@ -263,13 +273,17 @@ target_ulong helper_sret(CPURISCVState *env)
{
uint64_t mstatus;
target_ulong prev_priv, prev_virt = env->virt_enabled;
+ const target_ulong src_priv = env->priv;
+ const bool src_virt = env->virt_enabled;
if (!(env->priv >= PRV_S)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
target_ulong retpc = env->sepc;
- if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
+ if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg,
+ env->priv_ver,
+ env->misa_ext) && (retpc & 0x3)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
}
@@ -287,6 +301,21 @@ target_ulong helper_sret(CPURISCVState *env)
get_field(mstatus, MSTATUS_SPIE));
mstatus = set_field(mstatus, MSTATUS_SPIE, 1);
mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U);
+
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ if (riscv_has_ext(env, RVH)) {
+ target_ulong prev_vu = get_field(env->hstatus, HSTATUS_SPV) &&
+ prev_priv == PRV_U;
+ /* Returning to VU from HS, vsstatus.sdt = 0 */
+ if (!env->virt_enabled && prev_vu) {
+ env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0);
+ }
+ }
+ mstatus = set_field(mstatus, MSTATUS_SDT, 0);
+ }
+ if (riscv_cpu_cfg(env)->ext_smdbltrp && env->priv >= PRV_M) {
+ mstatus = set_field(mstatus, MSTATUS_MDT, 0);
+ }
if (env->priv_ver >= PRIV_VERSION_1_12_0) {
mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
}
@@ -297,7 +326,6 @@ target_ulong helper_sret(CPURISCVState *env)
target_ulong hstatus = env->hstatus;
prev_virt = get_field(hstatus, HSTATUS_SPV);
-
hstatus = set_field(hstatus, HSTATUS_SPV, 0);
env->hstatus = hstatus;
@@ -309,27 +337,65 @@ target_ulong helper_sret(CPURISCVState *env)
riscv_cpu_set_mode(env, prev_priv, prev_virt);
+ /*
+ * If forward cfi enabled for new priv, restore elp status
+ * and clear spelp in mstatus
+ */
+ if (cpu_get_fcfien(env)) {
+ env->elp = get_field(env->mstatus, MSTATUS_SPELP);
+ }
+ env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, 0);
+
+ if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
+ riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET,
+ src_priv, src_virt);
+ }
+
return retpc;
}
-target_ulong helper_mret(CPURISCVState *env)
+static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc,
+ target_ulong prev_priv)
{
if (!(env->priv >= PRV_M)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
- target_ulong retpc = env->mepc;
- if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
+ if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg,
+ env->priv_ver,
+ env->misa_ext) && (retpc & 0x3)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
}
- uint64_t mstatus = env->mstatus;
- target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
-
if (riscv_cpu_cfg(env)->pmp &&
!pmp_get_num_rules(env) && (prev_priv != PRV_M)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC());
}
+}
+static target_ulong ssdbltrp_mxret(CPURISCVState *env, target_ulong mstatus,
+ target_ulong prev_priv,
+ target_ulong prev_virt)
+{
+ /* If returning to U, VS or VU, sstatus.sdt = 0 */
+ if (prev_priv == PRV_U || (prev_virt &&
+ (prev_priv == PRV_S || prev_priv == PRV_U))) {
+ mstatus = set_field(mstatus, MSTATUS_SDT, 0);
+ /* If returning to VU, vsstatus.sdt = 0 */
+ if (prev_virt && prev_priv == PRV_U) {
+ env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0);
+ }
+ }
+
+ return mstatus;
+}
+
+target_ulong helper_mret(CPURISCVState *env)
+{
+ target_ulong retpc = env->mepc;
+ uint64_t mstatus = env->mstatus;
+ target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
+
+ check_ret_from_m_mode(env, retpc, prev_priv);
target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV) &&
(prev_priv != PRV_M);
@@ -339,6 +405,12 @@ target_ulong helper_mret(CPURISCVState *env)
mstatus = set_field(mstatus, MSTATUS_MPP,
riscv_has_ext(env, RVU) ? PRV_U : PRV_M);
mstatus = set_field(mstatus, MSTATUS_MPV, 0);
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ mstatus = ssdbltrp_mxret(env, mstatus, prev_priv, prev_virt);
+ }
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ mstatus = set_field(mstatus, MSTATUS_MDT, 0);
+ }
if ((env->priv_ver >= PRIV_VERSION_1_12_0) && (prev_priv != PRV_M)) {
mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
}
@@ -349,10 +421,106 @@ target_ulong helper_mret(CPURISCVState *env)
}
riscv_cpu_set_mode(env, prev_priv, prev_virt);
+ /*
+ * If forward cfi enabled for new priv, restore elp status
+ * and clear mpelp in mstatus
+ */
+ if (cpu_get_fcfien(env)) {
+ env->elp = get_field(env->mstatus, MSTATUS_MPELP);
+ }
+ env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, 0);
+
+ if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
+ riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET,
+ PRV_M, false);
+ }
+
+ return retpc;
+}
+
+target_ulong helper_mnret(CPURISCVState *env)
+{
+ target_ulong retpc = env->mnepc;
+ target_ulong prev_priv = get_field(env->mnstatus, MNSTATUS_MNPP);
+ target_ulong prev_virt;
+
+ check_ret_from_m_mode(env, retpc, prev_priv);
+
+ prev_virt = get_field(env->mnstatus, MNSTATUS_MNPV) &&
+ (prev_priv != PRV_M);
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, true);
+
+ /*
+ * If MNRET changes the privilege mode to a mode
+ * less privileged than M, it also sets mstatus.MPRV to 0.
+ */
+ if (prev_priv < PRV_M) {
+ env->mstatus = set_field(env->mstatus, MSTATUS_MPRV, false);
+ }
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ env->mstatus = ssdbltrp_mxret(env, env->mstatus, prev_priv, prev_virt);
+ }
+
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ if (prev_priv < PRV_M) {
+ env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 0);
+ }
+ }
+
+ if (riscv_has_ext(env, RVH) && prev_virt) {
+ riscv_cpu_swap_hypervisor_regs(env);
+ }
+
+ riscv_cpu_set_mode(env, prev_priv, prev_virt);
+
+ /*
+ * If forward cfi enabled for new priv, restore elp status
+ * and clear mnpelp in mnstatus
+ */
+ if (cpu_get_fcfien(env)) {
+ env->elp = get_field(env->mnstatus, MNSTATUS_MNPELP);
+ }
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, 0);
return retpc;
}
+void helper_ctr_add_entry(CPURISCVState *env, target_ulong src,
+ target_ulong dest, target_ulong type)
+{
+ riscv_ctr_add_entry(env, src, dest, (enum CTRType)type,
+ env->priv, env->virt_enabled);
+}
+
+void helper_ctr_clear(CPURISCVState *env)
+{
+ /*
+ * It's safe to call smstateen_acc_ok() for umode access regardless of the
+ * state of bit 54 (CTR bit in case of m/hstateen) of sstateen. If the bit
+ * is zero, smstateen_acc_ok() will return the correct exception code and
+ * if it's one, smstateen_acc_ok() will return RISCV_EXCP_NONE. In that
+ * scenario the U-mode check below will handle that case.
+ */
+ RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
+ if (ret != RISCV_EXCP_NONE) {
+ riscv_raise_exception(env, ret, GETPC());
+ }
+
+ if (env->priv == PRV_U) {
+ /*
+ * One corner case is when sctrclr is executed from VU-mode and
+ * mstateen.CTR = 0, in which case we are supposed to raise
+ * RISCV_EXCP_ILLEGAL_INST. This case is already handled in
+ * smstateen_acc_ok().
+ */
+ uint32_t excep = env->virt_enabled ? RISCV_EXCP_VIRT_INSTRUCTION_FAULT :
+ RISCV_EXCP_ILLEGAL_INST;
+ riscv_raise_exception(env, excep, GETPC());
+ }
+
+ riscv_ctr_clear(env);
+}
+
void helper_wfi(CPURISCVState *env)
{
CPUState *cs = env_cpu(env);
@@ -455,7 +623,7 @@ target_ulong helper_hyp_hlv_bu(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
- return cpu_ldb_mmu(env, addr, oi, ra);
+ return cpu_ldb_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr)
@@ -464,7 +632,7 @@ target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx);
- return cpu_ldw_mmu(env, addr, oi, ra);
+ return cpu_ldw_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr)
@@ -473,7 +641,7 @@ target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx);
- return cpu_ldl_mmu(env, addr, oi, ra);
+ return cpu_ldl_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr)
@@ -482,7 +650,7 @@ target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx);
- return cpu_ldq_mmu(env, addr, oi, ra);
+ return cpu_ldq_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val)
@@ -491,7 +659,7 @@ void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
- cpu_stb_mmu(env, addr, val, oi, ra);
+ cpu_stb_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val)
@@ -500,7 +668,7 @@ void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx);
- cpu_stw_mmu(env, addr, val, oi, ra);
+ cpu_stw_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val)
@@ -509,7 +677,7 @@ void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx);
- cpu_stl_mmu(env, addr, val, oi, ra);
+ cpu_stl_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val)
@@ -518,7 +686,7 @@ void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx);
- cpu_stq_mmu(env, addr, val, oi, ra);
+ cpu_stq_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
/*
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index 9eea397..5af295e 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -24,14 +24,24 @@
#include "qapi/error.h"
#include "cpu.h"
#include "trace.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
uint8_t val);
static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
/*
+ * Convert the PMP permissions to match the truth table in the Smepmp spec.
+ */
+static inline uint8_t pmp_get_smepmp_operation(uint8_t cfg)
+{
+ return ((cfg & PMP_LOCK) >> 4) | ((cfg & PMP_READ) << 2) |
+ (cfg & PMP_WRITE) | ((cfg & PMP_EXEC) >> 2);
+}
+
+/*
* Accessor method to extract address matching type 'a field' from cfg reg
*/
static inline uint8_t pmp_get_a_field(uint8_t cfg)
@@ -45,21 +55,58 @@ static inline uint8_t pmp_get_a_field(uint8_t cfg)
*/
static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
{
- /* mseccfg.RLB is set */
- if (MSECCFG_RLB_ISSET(env)) {
- return 0;
- }
-
if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
return 1;
}
- /* Top PMP has no 'next' to check */
- if ((pmp_index + 1u) >= MAX_RISCV_PMPS) {
+ return 0;
+}
+
+/*
+ * Check whether a PMP is locked for writing or not.
+ * (i.e. has LOCK flag and mseccfg.RLB is unset)
+ */
+static int pmp_is_readonly(CPURISCVState *env, uint32_t pmp_index)
+{
+ return pmp_is_locked(env, pmp_index) && !MSECCFG_RLB_ISSET(env);
+}
+
+/*
+ * Check whether `val` is an invalid Smepmp config value
+ */
+static int pmp_is_invalid_smepmp_cfg(CPURISCVState *env, uint8_t val)
+{
+ /* No check if mseccfg.MML is not set or if mseccfg.RLB is set */
+ if (!MSECCFG_MML_ISSET(env) || MSECCFG_RLB_ISSET(env)) {
return 0;
}
- return 0;
+ /*
+ * Adding a rule with executable privileges that either is M-mode-only
+ * or a locked Shared-Region is not possible
+ */
+ switch (pmp_get_smepmp_operation(val)) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ case 12:
+ case 14:
+ case 15:
+ return 0;
+ case 9:
+ case 10:
+ case 11:
+ case 13:
+ return 1;
+ default:
+ g_assert_not_reached();
+ }
}
/*
@@ -90,45 +137,18 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
{
if (pmp_index < MAX_RISCV_PMPS) {
- bool locked = true;
-
- if (riscv_cpu_cfg(env)->ext_smepmp) {
- /* mseccfg.RLB is set */
- if (MSECCFG_RLB_ISSET(env)) {
- locked = false;
- }
-
- /* mseccfg.MML is not set */
- if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) {
- locked = false;
- }
-
- /* mseccfg.MML is set */
- if (MSECCFG_MML_ISSET(env)) {
- /* not adding execute bit */
- if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) {
- locked = false;
- }
- /* shared region and not adding X bit */
- if ((val & PMP_LOCK) != PMP_LOCK &&
- (val & 0x7) != (PMP_WRITE | PMP_EXEC)) {
- locked = false;
- }
- }
- } else {
- if (!pmp_is_locked(env, pmp_index)) {
- locked = false;
- }
+ if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
+ /* no change */
+ return false;
}
- if (locked) {
- qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
- } else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) {
- /* If !mseccfg.MML then ignore writes with encoding RW=01 */
- if ((val & PMP_WRITE) && !(val & PMP_READ) &&
- !MSECCFG_MML_ISSET(env)) {
- return false;
- }
+ if (pmp_is_readonly(env, pmp_index)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpcfg write - read only\n");
+ } else if (pmp_is_invalid_smepmp_cfg(env, val)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpcfg write - invalid\n");
+ } else {
env->pmp_state.pmp[pmp_index].cfg_reg = val;
pmp_update_rule_addr(env, pmp_index);
return true;
@@ -326,7 +346,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
*/
pmp_size = -(addr | TARGET_PAGE_MASK);
} else {
- pmp_size = sizeof(target_ulong);
+ pmp_size = 2 << riscv_cpu_mxl(env);
}
} else {
pmp_size = size;
@@ -352,16 +372,6 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
const uint8_t a_field =
pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
- /*
- * Convert the PMP permissions to match the truth table in the
- * Smepmp spec.
- */
- const uint8_t smepmp_operation =
- ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
- ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
- (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
- ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2);
-
if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
/*
* If the PMP entry is not off and the address is in range,
@@ -380,6 +390,9 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
/*
* If mseccfg.MML Bit set, do the enhanced pmp priv check
*/
+ const uint8_t smepmp_operation =
+ pmp_get_smepmp_operation(env->pmp_state.pmp[i].cfg_reg);
+
if (mode == PRV_M) {
switch (smepmp_operation) {
case 0:
@@ -516,6 +529,11 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
bool is_next_cfg_tor = false;
if (addr_index < MAX_RISCV_PMPS) {
+ if (env->pmp_state.pmp[addr_index].addr_reg == val) {
+ /* no change */
+ return;
+ }
+
/*
* In TOR mode, need to check the lock bit of the next pmp
* (if there is a next).
@@ -524,25 +542,23 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
- if (pmp_cfg & PMP_LOCK && is_next_cfg_tor) {
+ if (pmp_is_readonly(env, addr_index + 1) && is_next_cfg_tor) {
qemu_log_mask(LOG_GUEST_ERROR,
- "ignoring pmpaddr write - pmpcfg + 1 locked\n");
+ "ignoring pmpaddr write - pmpcfg+1 read only\n");
return;
}
}
- if (!pmp_is_locked(env, addr_index)) {
- if (env->pmp_state.pmp[addr_index].addr_reg != val) {
- env->pmp_state.pmp[addr_index].addr_reg = val;
- pmp_update_rule_addr(env, addr_index);
- if (is_next_cfg_tor) {
- pmp_update_rule_addr(env, addr_index + 1);
- }
- tlb_flush(env_cpu(env));
+ if (!pmp_is_readonly(env, addr_index)) {
+ env->pmp_state.pmp[addr_index].addr_reg = val;
+ pmp_update_rule_addr(env, addr_index);
+ if (is_next_cfg_tor) {
+ pmp_update_rule_addr(env, addr_index + 1);
}
+ tlb_flush(env_cpu(env));
} else {
qemu_log_mask(LOG_GUEST_ERROR,
- "ignoring pmpaddr write - locked\n");
+ "ignoring pmpaddr write - read only\n");
}
} else {
qemu_log_mask(LOG_GUEST_ERROR,
@@ -575,6 +591,13 @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
{
int i;
+ uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (riscv_cpu_cfg(env)->ext_smmpm &&
+ riscv_cpu_mxl(env) == MXL_RV64 &&
+ get_field(val, MSECCFG_PMM) != PMM_FIELD_RESERVED) {
+ mask |= MSECCFG_PMM;
+ }
trace_mseccfg_csr_write(env->mhartid, val);
@@ -590,12 +613,18 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
if (riscv_cpu_cfg(env)->ext_smepmp) {
/* Sticky bits */
- val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
- if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) {
+ val |= (env->mseccfg & mask);
+ if ((val ^ env->mseccfg) & mask) {
tlb_flush(env_cpu(env));
}
} else {
- val &= ~(MSECCFG_MMWP | MSECCFG_MML | MSECCFG_RLB);
+ mask |= MSECCFG_RLB;
+ val &= ~(mask);
+ }
+
+ /* M-mode forward cfi to be enabled if cfi extension is implemented */
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ val |= (val & MSECCFG_MLPE);
}
env->mseccfg = val;
diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
index f5c10ce..271cf24 100644
--- a/target/riscv/pmp.h
+++ b/target/riscv/pmp.h
@@ -44,7 +44,9 @@ typedef enum {
MSECCFG_MMWP = 1 << 1,
MSECCFG_RLB = 1 << 2,
MSECCFG_USEED = 1 << 8,
- MSECCFG_SSEED = 1 << 9
+ MSECCFG_SSEED = 1 << 9,
+ MSECCFG_MLPE = 1 << 10,
+ MSECCFG_PMM = 3ULL << 32,
} mseccfg_field_t;
typedef struct {
diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
index 3cc0b36..a68809e 100644
--- a/target/riscv/pmu.c
+++ b/target/riscv/pmu.c
@@ -22,8 +22,8 @@
#include "qemu/timer.h"
#include "cpu.h"
#include "pmu.h"
-#include "sysemu/cpu-timers.h"
-#include "sysemu/device_tree.h"
+#include "exec/icount.h"
+#include "system/device_tree.h"
#define RISCV_TIMEBASE_FREQ 1000000000 /* 1Ghz */
@@ -204,6 +204,7 @@ static void riscv_pmu_icount_update_priv(CPURISCVState *env,
}
if (env->virt_enabled) {
+ g_assert(env->priv <= PRV_S);
counter_arr = env->pmu_fixed_ctrs[1].counter_virt;
snapshot_prev = env->pmu_fixed_ctrs[1].counter_virt_prev;
} else {
@@ -212,6 +213,7 @@ static void riscv_pmu_icount_update_priv(CPURISCVState *env,
}
if (new_virt) {
+ g_assert(newpriv <= PRV_S);
snapshot_new = env->pmu_fixed_ctrs[1].counter_virt_prev;
} else {
snapshot_new = env->pmu_fixed_ctrs[1].counter_prev;
@@ -242,6 +244,7 @@ static void riscv_pmu_cycle_update_priv(CPURISCVState *env,
}
if (env->virt_enabled) {
+ g_assert(env->priv <= PRV_S);
counter_arr = env->pmu_fixed_ctrs[0].counter_virt;
snapshot_prev = env->pmu_fixed_ctrs[0].counter_virt_prev;
} else {
@@ -250,6 +253,7 @@ static void riscv_pmu_cycle_update_priv(CPURISCVState *env,
}
if (new_virt) {
+ g_assert(newpriv <= PRV_S);
snapshot_new = env->pmu_fixed_ctrs[0].counter_virt_prev;
} else {
snapshot_new = env->pmu_fixed_ctrs[0].counter_prev;
@@ -386,7 +390,7 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
* Expected mhpmevent value is zero for reset case. Remove the current
* mapping.
*/
- if (!value) {
+ if (!(value & MHPMEVENT_IDX_MASK)) {
g_hash_table_foreach_remove(cpu->pmu_event_ctr_map,
pmu_remove_event_map,
GUINT_TO_POINTER(ctr_idx));
diff --git a/target/riscv/riscv-qmp-cmds.c b/target/riscv/riscv-qmp-cmds.c
index d363dc3..8ba8aa0 100644
--- a/target/riscv/riscv-qmp-cmds.c
+++ b/target/riscv/riscv-qmp-cmds.c
@@ -25,14 +25,14 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-machine-target.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qdict.h"
+#include "qapi/qapi-commands-machine.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/visitor.h"
#include "qom/qom-qobject.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
#include "cpu-qom.h"
#include "cpu.h"
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
index b8814ab..55fd9e5 100644
--- a/target/riscv/tcg/tcg-cpu.c
+++ b/target/riscv/tcg/tcg-cpu.c
@@ -18,9 +18,10 @@
*/
#include "qemu/osdep.h"
-#include "exec/exec-all.h"
+#include "exec/translation-block.h"
#include "tcg-cpu.h"
#include "cpu.h"
+#include "exec/target_page.h"
#include "internals.h"
#include "pmu.h"
#include "time_helper.h"
@@ -29,11 +30,13 @@
#include "qemu/accel.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
-#include "hw/core/accel-cpu.h"
-#include "hw/core/tcg-cpu-ops.h"
+#include "accel/accel-cpu-target.h"
+#include "accel/tcg/cpu-ops.h"
#include "tcg/tcg.h"
#ifndef CONFIG_USER_ONLY
#include "hw/boards.h"
+#include "system/tcg.h"
+#include "exec/icount.h"
#endif
/* Hash that stores user set extensions */
@@ -90,6 +93,108 @@ static const char *cpu_priv_ver_to_str(int priv_ver)
return priv_spec_str;
}
+static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+ return riscv_env_mmu_index(cpu_env(cs), ifetch);
+}
+
+static TCGTBCPUState riscv_get_tb_cpu_state(CPUState *cs)
+{
+ CPURISCVState *env = cpu_env(cs);
+ RISCVCPU *cpu = env_archcpu(env);
+ RISCVExtStatus fs, vs;
+ uint32_t flags = 0;
+ bool pm_signext = riscv_cpu_virt_mem_enabled(env);
+
+ if (cpu->cfg.ext_zve32x) {
+ /*
+ * If env->vl equals to VLMAX, we can use generic vector operation
+ * expanders (GVEC) to accerlate the vector operations.
+ * However, as LMUL could be a fractional number. The maximum
+ * vector size can be operated might be less than 8 bytes,
+ * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
+ * only when maxsz >= 8 bytes.
+ */
+
+ /* lmul encoded as in DisasContext::lmul */
+ int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
+ uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
+ uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
+ uint32_t maxsz = vlmax << vsew;
+ bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
+ (maxsz >= 8);
+ flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
+ flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
+ flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
+ FIELD_EX64(env->vtype, VTYPE, VLMUL));
+ flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
+ flags = FIELD_DP32(flags, TB_FLAGS, VTA,
+ FIELD_EX64(env->vtype, VTYPE, VTA));
+ flags = FIELD_DP32(flags, TB_FLAGS, VMA,
+ FIELD_EX64(env->vtype, VTYPE, VMA));
+ flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
+ } else {
+ flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
+ }
+
+ if (cpu_get_fcfien(env)) {
+ /*
+ * For Forward CFI, only the expectation of a lpad at
+ * the start of the block is tracked via env->elp. env->elp
+ * is turned on during jalr translation.
+ */
+ flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp);
+ flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1);
+ }
+
+ if (cpu_get_bcfien(env)) {
+ flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1);
+ }
+
+#ifdef CONFIG_USER_ONLY
+ fs = EXT_STATUS_DIRTY;
+ vs = EXT_STATUS_DIRTY;
+#else
+ flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
+
+ flags |= riscv_env_mmu_index(env, 0);
+ fs = get_field(env->mstatus, MSTATUS_FS);
+ vs = get_field(env->mstatus, MSTATUS_VS);
+
+ if (env->virt_enabled) {
+ flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
+ /*
+ * Merge DISABLED and !DIRTY states using MIN.
+ * We will set both fields when dirtying.
+ */
+ fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
+ vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
+ }
+
+ /* With Zfinx, floating point is enabled/disabled by Smstateen. */
+ if (!riscv_has_ext(env, RVF)) {
+ fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
+ ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
+ }
+
+ if (cpu->cfg.debug && !icount_enabled()) {
+ flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
+ }
+#endif
+
+ flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
+ flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
+ flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
+ flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
+
+ return (TCGTBCPUState){
+ .pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc,
+ .flags = flags
+ };
+}
+
static void riscv_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -129,17 +234,51 @@ static void riscv_restore_state_to_opc(CPUState *cs,
env->pc = pc;
}
env->bins = data[1];
+ env->excp_uw2 = data[2];
}
-static const TCGCPUOps riscv_tcg_ops = {
+#ifndef CONFIG_USER_ONLY
+static vaddr riscv_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ CPURISCVState *env = cpu_env(cs);
+ uint32_t pm_len;
+ bool pm_signext;
+
+ if (cpu_address_xl(env) == MXL_RV32) {
+ return (uint32_t)result;
+ }
+
+ pm_len = riscv_pm_get_pmlen(riscv_pm_get_pmm(env));
+ if (pm_len == 0) {
+ return result;
+ }
+
+ pm_signext = riscv_cpu_virt_mem_enabled(env);
+ if (pm_signext) {
+ return sextract64(result, 0, 64 - pm_len);
+ }
+ return extract64(result, 0, 64 - pm_len);
+}
+#endif
+
+const TCGCPUOps riscv_tcg_ops = {
+ .mttcg_supported = true,
+ .guest_default_memory_order = 0,
+
.initialize = riscv_translate_init,
+ .translate_code = riscv_translate_code,
+ .get_tb_cpu_state = riscv_get_tb_cpu_state,
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
.restore_state_to_opc = riscv_restore_state_to_opc,
+ .mmu_index = riscv_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = riscv_cpu_tlb_fill,
+ .pointer_wrap = riscv_pointer_wrap,
.cpu_exec_interrupt = riscv_cpu_exec_interrupt,
.cpu_exec_halt = riscv_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = riscv_cpu_do_interrupt,
.do_transaction_failed = riscv_cpu_do_transaction_failed,
.do_unaligned_access = riscv_cpu_do_unaligned_access,
@@ -203,10 +342,20 @@ static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset)
* All other named features are already enabled
* in riscv_tcg_cpu_instance_init().
*/
- if (feat_offset == CPU_CFG_OFFSET(ext_zic64b)) {
+ switch (feat_offset) {
+ case CPU_CFG_OFFSET(ext_zic64b):
cpu->cfg.cbom_blocksize = 64;
cpu->cfg.cbop_blocksize = 64;
cpu->cfg.cboz_blocksize = 64;
+ break;
+ case CPU_CFG_OFFSET(ext_sha):
+ if (!cpu_misa_ext_is_user_set(RVH)) {
+ riscv_cpu_write_misa_bit(cpu, RVH, true);
+ }
+ /* fallthrough */
+ case CPU_CFG_OFFSET(ext_ssstateen):
+ cpu->cfg.ext_smstateen = true;
+ break;
}
}
@@ -303,6 +452,15 @@ static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
}
isa_ext_update_enabled(cpu, edata->ext_enable_offset, false);
+
+ /*
+ * Do not show user warnings for named features that users
+ * can't enable/disable in the command line. See commit
+ * 68c9e54bea for more info.
+ */
+ if (cpu_cfg_offset_is_named_feat(edata->ext_enable_offset)) {
+ continue;
+ }
#ifndef CONFIG_USER_ONLY
warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
" because privilege spec version does not match",
@@ -330,11 +488,16 @@ static void riscv_cpu_update_named_features(RISCVCPU *cpu)
cpu->cfg.has_priv_1_13 = true;
}
- /* zic64b is 1.12 or later */
cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 &&
cpu->cfg.cbop_blocksize == 64 &&
- cpu->cfg.cboz_blocksize == 64 &&
- cpu->cfg.has_priv_1_12;
+ cpu->cfg.cboz_blocksize == 64;
+
+ cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen;
+
+ cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) &&
+ cpu->cfg.ext_ssstateen;
+
+ cpu->cfg.ext_ziccrse = cpu->cfg.has_priv_1_11;
}
static void riscv_cpu_validate_g(RISCVCPU *cpu)
@@ -554,7 +717,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
return;
}
- if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
+ if (mcc->def->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
error_setg(errp, "Zcf extension is only relevant to RV32");
return;
}
@@ -618,11 +781,55 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
cpu->cfg.ext_zihpm = false;
}
+ if (cpu->cfg.ext_zicfiss) {
+ if (!cpu->cfg.ext_zicsr) {
+ error_setg(errp, "zicfiss extension requires zicsr extension");
+ return;
+ }
+ if (!riscv_has_ext(env, RVA)) {
+ error_setg(errp, "zicfiss extension requires A extension");
+ return;
+ }
+ if (!riscv_has_ext(env, RVS)) {
+ error_setg(errp, "zicfiss extension requires S");
+ return;
+ }
+ if (!cpu->cfg.ext_zimop) {
+ error_setg(errp, "zicfiss extension requires zimop extension");
+ return;
+ }
+ if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) {
+ error_setg(errp, "zicfiss with zca requires zcmop extension");
+ return;
+ }
+ }
+
if (!cpu->cfg.ext_zihpm) {
cpu->cfg.pmu_mask = 0;
cpu->pmu_avail_ctrs = 0;
}
+ if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) {
+ error_setg(errp, "zicfilp extension requires zicsr extension");
+ return;
+ }
+
+ if (mcc->def->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) {
+ error_setg(errp, "svukte is not supported for RV32");
+ return;
+ }
+
+ if ((cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr) &&
+ (!riscv_has_ext(env, RVS) || !cpu->cfg.ext_sscsrind)) {
+ if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_smctr)) ||
+ cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ssctr))) {
+ error_setg(errp, "Smctr and Ssctr require S-mode and Sscsrind");
+ return;
+ }
+ cpu->cfg.ext_smctr = false;
+ cpu->cfg.ext_ssctr = false;
+ }
+
/*
* Disable isa extensions based on priv spec after we
* validated and set everything we need.
@@ -635,8 +842,9 @@ static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
RISCVCPUProfile *profile,
bool send_warn)
{
- int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
+ int satp_max = cpu->cfg.max_satp_mode;
+ assert(satp_max >= 0);
if (profile->satp_mode > satp_max) {
if (send_warn) {
bool is_32bit = riscv_cpu_is_32bit(cpu);
@@ -655,13 +863,29 @@ static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
}
#endif
+static void riscv_cpu_check_parent_profile(RISCVCPU *cpu,
+ RISCVCPUProfile *profile,
+ RISCVCPUProfile *parent)
+{
+ const char *parent_name;
+ bool parent_enabled;
+
+ if (!profile->enabled || !parent) {
+ return;
+ }
+
+ parent_name = parent->name;
+ parent_enabled = object_property_get_bool(OBJECT(cpu), parent_name, NULL);
+ profile->enabled = parent_enabled;
+}
+
static void riscv_cpu_validate_profile(RISCVCPU *cpu,
RISCVCPUProfile *profile)
{
CPURISCVState *env = &cpu->env;
const char *warn_msg = "Profile %s mandates disabled extension %s";
bool send_warn = profile->user_set && profile->enabled;
- bool parent_enabled, profile_impl = true;
+ bool profile_impl = true;
int i;
#ifndef CONFIG_USER_ONLY
@@ -672,7 +896,7 @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu,
#endif
if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED &&
- profile->priv_spec != env->priv_ver) {
+ profile->priv_spec > env->priv_ver) {
profile_impl = false;
if (send_warn) {
@@ -715,12 +939,8 @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu,
profile->enabled = profile_impl;
- if (profile->parent != NULL) {
- parent_enabled = object_property_get_bool(OBJECT(cpu),
- profile->parent->name,
- NULL);
- profile->enabled = profile->enabled && parent_enabled;
- }
+ riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent);
+ riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent);
}
static void riscv_cpu_validate_profiles(RISCVCPU *cpu)
@@ -778,11 +998,18 @@ static void cpu_enable_implied_rule(RISCVCPU *cpu,
if (!enabled) {
/* Enable the implied MISAs. */
if (rule->implied_misa_exts) {
- riscv_cpu_set_misa_ext(env,
- env->misa_ext | rule->implied_misa_exts);
-
for (i = 0; misa_bits[i] != 0; i++) {
if (rule->implied_misa_exts & misa_bits[i]) {
+ /*
+ * If the user disabled the misa_bit do not re-enable it
+ * and do not apply any implied rules related to it.
+ */
+ if (cpu_misa_ext_is_user_set(misa_bits[i]) &&
+ !(env->misa_ext & misa_bits[i])) {
+ continue;
+ }
+
+ riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]);
ir = g_hash_table_lookup(misa_ext_implied_rules,
GUINT_TO_POINTER(misa_bits[i]));
@@ -825,7 +1052,7 @@ static void cpu_enable_zc_implied_rules(RISCVCPU *cpu)
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true);
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true);
- if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
+ if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) {
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
}
}
@@ -834,7 +1061,7 @@ static void cpu_enable_zc_implied_rules(RISCVCPU *cpu)
if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
- if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
+ if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) {
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
}
@@ -898,6 +1125,20 @@ void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
error_propagate(errp, local_err);
return;
}
+#ifndef CONFIG_USER_ONLY
+ if (cpu->cfg.pmu_mask) {
+ riscv_pmu_init(cpu, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (cpu->cfg.ext_sscofpmf) {
+ cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ riscv_pmu_timer_cb, cpu);
+ }
+ }
+#endif
}
void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu)
@@ -944,8 +1185,17 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
}
#ifndef CONFIG_USER_ONLY
+ RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
+
+ if (mcc->def->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) {
+ /* Missing 128-bit aligned atomics */
+ error_setg(errp,
+ "128-bit RISC-V currently does not work with Multi "
+ "Threaded TCG. Please use: -accel tcg,thread=single");
+ return false;
+ }
+
CPURISCVState *env = &cpu->env;
- Error *local_err = NULL;
tcg_cflags_set(CPU(cs), CF_PCREL);
@@ -953,19 +1203,6 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
riscv_timer_init(cpu);
}
- if (cpu->cfg.pmu_mask) {
- riscv_pmu_init(cpu, &local_err);
- if (local_err != NULL) {
- error_propagate(errp, local_err);
- return false;
- }
-
- if (cpu->cfg.ext_sscofpmf) {
- cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
- riscv_pmu_timer_cb, cpu);
- }
- }
-
/* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */
if (riscv_has_ext(env, RVH)) {
env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP;
@@ -1050,7 +1287,6 @@ static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
MISA_CFG(RVS, true),
MISA_CFG(RVU, true),
MISA_CFG(RVH, true),
- MISA_CFG(RVJ, false),
MISA_CFG(RVV, false),
MISA_CFG(RVG, false),
MISA_CFG(RVB, false),
@@ -1117,8 +1353,13 @@ static void cpu_set_profile(Object *obj, Visitor *v, const char *name,
profile->user_set = true;
profile->enabled = value;
- if (profile->parent != NULL) {
- object_property_set_bool(obj, profile->parent->name,
+ if (profile->u_parent != NULL) {
+ object_property_set_bool(obj, profile->u_parent->name,
+ profile->enabled, NULL);
+ }
+
+ if (profile->s_parent != NULL) {
+ object_property_set_bool(obj, profile->s_parent->name,
profile->enabled, NULL);
}
@@ -1337,8 +1578,8 @@ static void riscv_init_max_cpu_extensions(Object *obj)
CPURISCVState *env = &cpu->env;
const RISCVCPUMultiExtConfig *prop;
- /* Enable RVG, RVJ and RVV that are disabled by default */
- riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVJ | RVV);
+ /* Enable RVG and RVV that are disabled by default */
+ riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVV);
for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
isa_ext_update_enabled(cpu, prop->offset, true);
@@ -1366,6 +1607,23 @@ static void riscv_init_max_cpu_extensions(Object *obj)
if (env->misa_mxl != MXL_RV32) {
isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false);
}
+
+ /*
+ * TODO: ext_smrnmi requires OpenSBI changes that our current
+ * image does not have. Disable it for now.
+ */
+ if (cpu->cfg.ext_smrnmi) {
+ isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false);
+ }
+
+ /*
+ * TODO: ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup
+ * to avoid generating a double trap. OpenSBI does not currently support it,
+ * disable it for now.
+ */
+ if (cpu->cfg.ext_smdbltrp) {
+ isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false);
+ }
}
static bool riscv_cpu_has_max_extensions(Object *cpu_obj)
@@ -1396,24 +1654,10 @@ static void riscv_tcg_cpu_instance_init(CPUState *cs)
}
}
-static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
-{
- /*
- * All cpus use the same set of operations.
- */
- cc->tcg_ops = &riscv_tcg_ops;
-}
-
-static void riscv_tcg_cpu_class_init(CPUClass *cc)
-{
- cc->init_accel_cpu = riscv_tcg_cpu_init_ops;
-}
-
-static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
+static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, const void *data)
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
- acc->cpu_class_init = riscv_tcg_cpu_class_init;
acc->cpu_instance_init = riscv_tcg_cpu_instance_init;
acc->cpu_target_realize = riscv_tcg_cpu_realize;
}
diff --git a/target/riscv/tcg/tcg-cpu.h b/target/riscv/tcg/tcg-cpu.h
index ce94253..a23716a 100644
--- a/target/riscv/tcg/tcg-cpu.h
+++ b/target/riscv/tcg/tcg-cpu.h
@@ -26,6 +26,8 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp);
void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
bool riscv_cpu_tcg_compatible(RISCVCPU *cpu);
+extern const TCGCPUOps riscv_tcg_ops;
+
struct DisasContext;
struct RISCVCPUConfig;
typedef struct RISCVDecoder {
diff --git a/target/riscv/th_csr.c b/target/riscv/th_csr.c
index 6c970d4..49eb7bb 100644
--- a/target/riscv/th_csr.c
+++ b/target/riscv/th_csr.c
@@ -27,12 +27,6 @@
#define TH_SXSTATUS_MAEE BIT(21)
#define TH_SXSTATUS_THEADISAEE BIT(22)
-typedef struct {
- int csrno;
- int (*insertion_test)(RISCVCPU *cpu);
- riscv_csr_operations csr_ops;
-} riscv_csr;
-
static RISCVException smode(CPURISCVState *env, int csrno)
{
if (riscv_has_ext(env, RVS)) {
@@ -42,13 +36,9 @@ static RISCVException smode(CPURISCVState *env, int csrno)
return RISCV_EXCP_ILLEGAL_INST;
}
-static int test_thead_mvendorid(RISCVCPU *cpu)
+static bool test_thead_mvendorid(RISCVCPU *cpu)
{
- if (cpu->cfg.mvendorid != THEAD_VENDOR_ID) {
- return -1;
- }
-
- return 0;
+ return cpu->cfg.mvendorid == THEAD_VENDOR_ID;
}
static RISCVException read_th_sxstatus(CPURISCVState *env, int csrno,
@@ -59,21 +49,11 @@ static RISCVException read_th_sxstatus(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
-static riscv_csr th_csr_list[] = {
+const RISCVCSR th_csr_list[] = {
{
.csrno = CSR_TH_SXSTATUS,
.insertion_test = test_thead_mvendorid,
.csr_ops = { "th.sxstatus", smode, read_th_sxstatus }
- }
+ },
+ { }
};
-
-void th_register_custom_csrs(RISCVCPU *cpu)
-{
- for (size_t i = 0; i < ARRAY_SIZE(th_csr_list); i++) {
- int csrno = th_csr_list[i].csrno;
- riscv_csr_operations *csr_ops = &th_csr_list[i].csr_ops;
- if (!th_csr_list[i].insertion_test(cpu)) {
- riscv_set_csr_ops(csrno, csr_ops);
- }
- }
-}
diff --git a/target/riscv/time_helper.c b/target/riscv/time_helper.c
index 8d245be..bc0d9a0 100644
--- a/target/riscv/time_helper.c
+++ b/target/riscv/time_helper.c
@@ -92,6 +92,7 @@ void riscv_timer_write_timecmp(CPURISCVState *env, QEMUTimer *timer,
* equals UINT64_MAX.
*/
if (timecmp == UINT64_MAX) {
+ timer_del(timer);
return;
}
diff --git a/target/riscv/trace-events b/target/riscv/trace-events
index 49ec4d3..93837f8 100644
--- a/target/riscv/trace-events
+++ b/target/riscv/trace-events
@@ -9,3 +9,6 @@ pmpaddr_csr_write(uint64_t mhartid, uint32_t addr_index, uint64_t val) "hart %"
mseccfg_csr_read(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": read mseccfg, val: 0x%" PRIx64
mseccfg_csr_write(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": write mseccfg, val: 0x%" PRIx64
+
+# op_helper.c
+riscv_exception(uint32_t exception, const char *desc, uint64_t epc) "%u (%s) on epc 0x%"PRIx64""
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index acba90f..d7a6de0 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -20,11 +20,11 @@
#include "qemu/log.h"
#include "cpu.h"
#include "tcg/tcg-op.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
-
+#include "exec/target_page.h"
#include "exec/translator.h"
+#include "exec/translation-block.h"
#include "exec/log.h"
#include "semihosting/semihost.h"
@@ -41,9 +41,6 @@ static TCGv cpu_gpr[32], cpu_gprh[32], cpu_pc, cpu_vl, cpu_vstart;
static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
static TCGv load_res;
static TCGv load_val;
-/* globals for PM CSRs */
-static TCGv pm_mask;
-static TCGv pm_base;
/*
* If an operation is being performed on less than TARGET_LONG_BITS,
@@ -105,9 +102,9 @@ typedef struct DisasContext {
bool vl_eq_vlmax;
CPUState *cs;
TCGv zero;
- /* PointerMasking extension */
- bool pm_mask_enabled;
- bool pm_base_enabled;
+ /* actual address width */
+ uint8_t addr_xl;
+ bool addr_signed;
/* Ztso */
bool ztso;
/* Use icount trigger for native debug */
@@ -116,6 +113,11 @@ typedef struct DisasContext {
bool frm_valid;
bool insn_start_updated;
const GPtrArray *decoders;
+ /* zicfilp extension. fcfi_enabled, lp expected or not */
+ bool fcfi_enabled;
+ bool fcfi_lp_expected;
+ /* zicfiss extension, if shadow stack was enabled during TB gen */
+ bool bcfi_enabled;
} DisasContext;
static inline bool has_ext(DisasContext *ctx, uint32_t ext)
@@ -139,6 +141,8 @@ static inline bool has_ext(DisasContext *ctx, uint32_t ext)
#define get_address_xl(ctx) ((ctx)->address_xl)
#endif
+#define mxl_memop(ctx) ((get_xl(ctx) + 1) | MO_TE)
+
/* The word size for this machine mode. */
static inline int __attribute__((unused)) get_xlen(DisasContext *ctx)
{
@@ -204,11 +208,12 @@ static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
}
-static void decode_save_opc(DisasContext *ctx)
+static void decode_save_opc(DisasContext *ctx, target_ulong excp_uw2)
{
assert(!ctx->insn_start_updated);
ctx->insn_start_updated = true;
tcg_set_insn_start_param(ctx->base.insn_start, 1, ctx->opcode);
+ tcg_set_insn_start_param(ctx->base.insn_start, 2, excp_uw2);
}
static void gen_pc_plus_diff(TCGv target, DisasContext *ctx,
@@ -236,7 +241,7 @@ static void gen_update_pc(DisasContext *ctx, target_long diff)
ctx->pc_save = ctx->base.pc_next + diff;
}
-static void generate_exception(DisasContext *ctx, int excp)
+static void generate_exception(DisasContext *ctx, RISCVException excp)
{
gen_update_pc(ctx, 0);
gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp));
@@ -555,12 +560,54 @@ static void gen_set_fpr_d(DisasContext *ctx, int reg_num, TCGv_i64 t)
}
}
+#ifndef CONFIG_USER_ONLY
+/*
+ * Direct calls
+ * - jal x1;
+ * - jal x5;
+ * - c.jal.
+ * - cm.jalt.
+ *
+ * Direct jumps
+ * - jal x0;
+ * - c.j;
+ * - cm.jt.
+ *
+ * Other direct jumps
+ * - jal rd where rd != x1 and rd != x5 and rd != x0;
+ */
+static void gen_ctr_jal(DisasContext *ctx, int rd, target_ulong imm)
+{
+ TCGv dest = tcg_temp_new();
+ TCGv src = tcg_temp_new();
+ TCGv type;
+
+ /*
+ * If rd is x1 or x5 link registers, treat this as direct call otherwise
+ * its a direct jump.
+ */
+ if (rd == 1 || rd == 5) {
+ type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_CALL);
+ } else if (rd == 0) {
+ type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_JUMP);
+ } else {
+ type = tcg_constant_tl(CTRDATA_TYPE_OTHER_DIRECT_JUMP);
+ }
+
+ gen_pc_plus_diff(dest, ctx, imm);
+ gen_pc_plus_diff(src, ctx, 0);
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
+}
+#endif
+
static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
{
TCGv succ_pc = dest_gpr(ctx, rd);
/* check misaligned: */
- if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
+ if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
+ ctx->priv_ver,
+ ctx->misa_ext)) {
if ((imm & 0x3) != 0) {
TCGv target_pc = tcg_temp_new();
gen_pc_plus_diff(target_pc, ctx, imm);
@@ -569,6 +616,12 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
}
}
+#ifndef CONFIG_USER_ONLY
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
+ gen_ctr_jal(ctx, rd, imm);
+ }
+#endif
+
gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
gen_set_gpr(ctx, rd, succ_pc);
@@ -583,13 +636,10 @@ static TCGv get_address(DisasContext *ctx, int rs1, int imm)
TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
tcg_gen_addi_tl(addr, src1, imm);
- if (ctx->pm_mask_enabled) {
- tcg_gen_andc_tl(addr, addr, pm_mask);
- } else if (get_address_xl(ctx) == MXL_RV32) {
- tcg_gen_ext32u_tl(addr, addr);
- }
- if (ctx->pm_base_enabled) {
- tcg_gen_or_tl(addr, addr, pm_base);
+ if (ctx->addr_signed) {
+ tcg_gen_sextract_tl(addr, addr, 0, ctx->addr_xl);
+ } else {
+ tcg_gen_extract_tl(addr, addr, 0, ctx->addr_xl);
}
return addr;
@@ -602,14 +652,12 @@ static TCGv get_address_indexed(DisasContext *ctx, int rs1, TCGv offs)
TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
tcg_gen_add_tl(addr, src1, offs);
- if (ctx->pm_mask_enabled) {
- tcg_gen_andc_tl(addr, addr, pm_mask);
- } else if (get_xl(ctx) == MXL_RV32) {
- tcg_gen_ext32u_tl(addr, addr);
- }
- if (ctx->pm_base_enabled) {
- tcg_gen_or_tl(addr, addr, pm_base);
+ if (ctx->addr_signed) {
+ tcg_gen_sextract_tl(addr, addr, 0, ctx->addr_xl);
+ } else {
+ tcg_gen_extract_tl(addr, addr, 0, ctx->addr_xl);
}
+
return addr;
}
@@ -694,7 +742,7 @@ static void gen_set_rm(DisasContext *ctx, int rm)
}
/* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_set_rounding_mode(tcg_env, tcg_constant_i32(rm));
}
@@ -707,7 +755,7 @@ static void gen_set_rm_chkfrm(DisasContext *ctx, int rm)
ctx->frm_valid = true;
/* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_set_rounding_mode_chkfrm(tcg_env, tcg_constant_i32(rm));
}
@@ -1091,7 +1139,7 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
mop |= MO_ALIGN;
}
- decode_save_opc(ctx);
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
src1 = get_address(ctx, a->rs1, 0);
func(dest, src1, src2, ctx->mem_idx, mop);
@@ -1105,7 +1153,7 @@ static bool gen_cmpxchg(DisasContext *ctx, arg_atomic *a, MemOp mop)
TCGv src1 = get_address(ctx, a->rs1, 0);
TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
- decode_save_opc(ctx);
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop);
gen_set_gpr(ctx, a->rd, dest);
@@ -1121,6 +1169,8 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
return translator_ldl(env, &ctx->base, pc);
}
+#define SS_MMU_INDEX(ctx) (ctx->mem_idx | MMU_IDX_SS_WRITE)
+
/* Include insn module translation function */
#include "insn_trans/trans_rvi.c.inc"
#include "insn_trans/trans_rvm.c.inc"
@@ -1151,6 +1201,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
#include "decode-insn16.c.inc"
#include "insn_trans/trans_rvzce.c.inc"
#include "insn_trans/trans_rvzcmop.c.inc"
+#include "insn_trans/trans_rvzicfiss.c.inc"
/* Include decoders for factored-out extensions */
#include "decode-XVentanaCondOps.c.inc"
@@ -1158,11 +1209,6 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
/* The specification allows for longer insns, but not supported by qemu. */
#define MAX_INSN_LEN 4
-static inline int insn_len(uint16_t first_word)
-{
- return (first_word & 3) == 3 ? 4 : 2;
-}
-
const RISCVDecoder decoder_table[] = {
{ always_true_p, decode_insn32 },
{ has_xthead_p, decode_xthead},
@@ -1230,14 +1276,23 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->cfg_vta_all_1s = cpu->cfg.rvv_ta_all_1s;
ctx->vstart_eq_zero = FIELD_EX32(tb_flags, TB_FLAGS, VSTART_EQ_ZERO);
ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
- ctx->misa_mxl_max = mcc->misa_mxl_max;
+ ctx->misa_mxl_max = mcc->def->misa_mxl_max;
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL);
ctx->cs = cs;
- ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED);
- ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
+ if (get_xl(ctx) == MXL_RV32) {
+ ctx->addr_xl = 32;
+ ctx->addr_signed = false;
+ } else {
+ int pm_pmm = FIELD_EX32(tb_flags, TB_FLAGS, PM_PMM);
+ ctx->addr_xl = 64 - riscv_pm_get_pmlen(pm_pmm);
+ ctx->addr_signed = FIELD_EX32(tb_flags, TB_FLAGS, PM_SIGNEXTEND);
+ }
ctx->ztso = cpu->cfg.ext_ztso;
ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER);
+ ctx->bcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, BCFI_ENABLED);
+ ctx->fcfi_lp_expected = FIELD_EX32(tb_flags, TB_FLAGS, FCFI_LP_EXPECTED);
+ ctx->fcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, FCFI_ENABLED);
ctx->zero = tcg_constant_tl(0);
ctx->virt_inst_excp = false;
ctx->decoders = cpu->decoders;
@@ -1256,7 +1311,7 @@ static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
pc_next &= ~TARGET_PAGE_MASK;
}
- tcg_gen_insn_start(pc_next, 0);
+ tcg_gen_insn_start(pc_next, 0, 0);
ctx->insn_start_updated = false;
}
@@ -1270,9 +1325,27 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
decode_opc(env, ctx, opcode16);
ctx->base.pc_next += ctx->cur_insn_len;
+ /*
+ * If 'fcfi_lp_expected' is still true after processing the instruction,
+ * then we did not see an 'lpad' instruction, and must raise an exception.
+ * Insert code to raise the exception at the start of the insn; any other
+ * code the insn may have emitted will be deleted as dead code following
+ * the noreturn exception
+ */
+ if (ctx->fcfi_lp_expected) {
+ /* Emit after insn_start, i.e. before the op following insn_start. */
+ tcg_ctx->emit_before_op = QTAILQ_NEXT(ctx->base.insn_start, link);
+ tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
+ tcg_env, offsetof(CPURISCVState, sw_check_code));
+ gen_helper_raise_exception(tcg_env,
+ tcg_constant_i32(RISCV_EXCP_SW_CHECK));
+ tcg_ctx->emit_before_op = NULL;
+ ctx->base.is_jmp = DISAS_NORETURN;
+ }
+
/* Only the first insn within a TB is allowed to cross a page boundary. */
if (ctx->base.is_jmp == DISAS_NEXT) {
- if (ctx->itrigger || !is_same_page(&ctx->base, ctx->base.pc_next)) {
+ if (ctx->itrigger || !translator_is_same_page(&ctx->base, ctx->base.pc_next)) {
ctx->base.is_jmp = DISAS_TOO_MANY;
} else {
unsigned page_ofs = ctx->base.pc_next & ~TARGET_PAGE_MASK;
@@ -1282,7 +1355,7 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
translator_lduw(env, &ctx->base, ctx->base.pc_next);
int len = insn_len(next_insn);
- if (!is_same_page(&ctx->base, ctx->base.pc_next + len - 1)) {
+ if (!translator_is_same_page(&ctx->base, ctx->base.pc_next + len - 1)) {
ctx->base.is_jmp = DISAS_TOO_MANY;
}
}
@@ -1313,8 +1386,8 @@ static const TranslatorOps riscv_tr_ops = {
.tb_stop = riscv_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext ctx;
@@ -1353,9 +1426,4 @@ void riscv_translate_init(void)
"load_res");
load_val = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, load_val),
"load_val");
- /* Assign PM CSRs to tcg globals */
- pm_mask = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmmask),
- "pmmask");
- pm_base = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmbase),
- "pmbase");
}
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
index f7423df..9a0d9b4 100644
--- a/target/riscv/vcrypto_helper.c
+++ b/target/riscv/vcrypto_helper.c
@@ -26,7 +26,6 @@
#include "crypto/aes-round.h"
#include "crypto/sm4.h"
#include "exec/memop.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "internals.h"
#include "vector_internals.h"
@@ -222,7 +221,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
uint32_t vta = vext_vta(desc); \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
AESState round_key; \
@@ -248,7 +247,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
uint32_t vta = vext_vta(desc); \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
AESState round_key; \
@@ -309,7 +308,7 @@ void HELPER(vaeskf1_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
uimm &= 0b1111;
if (uimm > 10 || uimm == 0) {
@@ -357,7 +356,7 @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
uimm &= 0b1111;
if (uimm > 14 || uimm < 2) {
@@ -465,7 +464,7 @@ void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
if (sew == MO_32) {
@@ -582,7 +581,7 @@ void HELPER(vsha2ch32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
@@ -602,7 +601,7 @@ void HELPER(vsha2ch64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
@@ -622,7 +621,7 @@ void HELPER(vsha2cl32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
@@ -642,7 +641,7 @@ void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
@@ -676,7 +675,7 @@ void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
uint32_t *vs1 = vs1_vptr;
uint32_t *vs2 = vs2_vptr;
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (int i = env->vstart / 8; i < env->vl / 8; i++) {
uint32_t w[24];
@@ -777,7 +776,7 @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
uint32_t *vs2 = vs2_vptr;
uint32_t v1[8], v2[8], v3[8];
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (int i = env->vstart / 8; i < env->vl / 8; i++) {
for (int k = 0; k < 8; k++) {
@@ -802,7 +801,7 @@ void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
uint32_t vta = vext_vta(desc);
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
uint64_t Y[2] = {vd[i * 2 + 0], vd[i * 2 + 1]};
@@ -841,7 +840,7 @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
uint32_t vta = vext_vta(desc);
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
uint64_t Y[2] = {brev8(vd[i * 2 + 0]), brev8(vd[i * 2 + 1])};
@@ -879,7 +878,7 @@ void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env,
uint32_t esz = sizeof(uint32_t);
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = group_start; i < group_end; ++i) {
uint32_t vstart = i * egs;
@@ -937,7 +936,7 @@ void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
uint32_t esz = sizeof(uint32_t);
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = group_start; i < group_end; ++i) {
uint32_t vstart = i * egs;
@@ -973,7 +972,7 @@ void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
uint32_t esz = sizeof(uint32_t);
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = group_start; i < group_end; ++i) {
uint32_t vstart = i * egs;
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 1b4d5a8..5dc1c10 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -21,10 +21,13 @@
#include "qemu/bitops.h"
#include "cpu.h"
#include "exec/memop.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/page-protection.h"
#include "exec/helper-proto.h"
+#include "exec/tlb-flags.h"
+#include "exec/target_page.h"
+#include "exec/tswap.h"
#include "fpu/softfloat.h"
#include "tcg/tcg-gvec-desc.h"
#include "internals.h"
@@ -75,6 +78,8 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
if (s1 <= vlmax) {
vl = s1;
+ } else if (s1 < 2 * vlmax && cpu->cfg.rvv_vl_half_avl) {
+ vl = (s1 + 1) >> 1;
} else {
vl = vlmax;
}
@@ -103,11 +108,6 @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz)
return scale < 0 ? vlenb >> -scale : vlenb << scale;
}
-static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
-{
- return (addr & ~env->cur_pmmask) | env->cur_pmbase;
-}
-
/*
* This function checks watchpoint before real load operation.
*
@@ -117,25 +117,42 @@ static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
* It will trigger an exception if there is no mapping in TLB
* and page table walk can't fill the TLB entry. Then the guest
* software can return here after process the exception or never return.
+ *
+ * This function can also be used when direct access to probe_access_flags is
+ * needed in order to access the flags. If a pointer to a flags operand is
+ * provided the function will call probe_access_flags instead, use nonfault
+ * and update host and flags.
*/
-static void probe_pages(CPURISCVState *env, target_ulong addr,
- target_ulong len, uintptr_t ra,
- MMUAccessType access_type)
+static void probe_pages(CPURISCVState *env, target_ulong addr, target_ulong len,
+ uintptr_t ra, MMUAccessType access_type, int mmu_index,
+ void **host, int *flags, bool nonfault)
{
target_ulong pagelen = -(addr | TARGET_PAGE_MASK);
target_ulong curlen = MIN(pagelen, len);
- int mmu_index = riscv_env_mmu_index(env, false);
- probe_access(env, adjust_addr(env, addr), curlen, access_type,
- mmu_index, ra);
+ if (flags != NULL) {
+ *flags = probe_access_flags(env, adjust_addr(env, addr), curlen,
+ access_type, mmu_index, nonfault, host, ra);
+ } else {
+ probe_access(env, adjust_addr(env, addr), curlen, access_type,
+ mmu_index, ra);
+ }
+
if (len > curlen) {
addr += curlen;
curlen = len - curlen;
- probe_access(env, adjust_addr(env, addr), curlen, access_type,
- mmu_index, ra);
+ if (flags != NULL) {
+ *flags = probe_access_flags(env, adjust_addr(env, addr), curlen,
+ access_type, mmu_index, nonfault,
+ host, ra);
+ } else {
+ probe_access(env, adjust_addr(env, addr), curlen, access_type,
+ mmu_index, ra);
+ }
}
}
+
static inline void vext_set_elem_mask(void *v0, int index,
uint8_t value)
{
@@ -146,34 +163,90 @@ static inline void vext_set_elem_mask(void *v0, int index,
}
/* elements operations for load and store */
-typedef void vext_ldst_elem_fn(CPURISCVState *env, abi_ptr addr,
- uint32_t idx, void *vd, uintptr_t retaddr);
+typedef void vext_ldst_elem_fn_tlb(CPURISCVState *env, abi_ptr addr,
+ uint32_t idx, void *vd, uintptr_t retaddr);
+typedef void vext_ldst_elem_fn_host(void *vd, uint32_t idx, void *host);
+
+#define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF) \
+static inline QEMU_ALWAYS_INLINE \
+void NAME##_tlb(CPURISCVState *env, abi_ptr addr, \
+ uint32_t idx, void *vd, uintptr_t retaddr) \
+{ \
+ ETYPE *cur = ((ETYPE *)vd + H(idx)); \
+ *cur = cpu_##LDSUF##_data_ra(env, addr, retaddr); \
+} \
+ \
+static inline QEMU_ALWAYS_INLINE \
+void NAME##_host(void *vd, uint32_t idx, void *host) \
+{ \
+ ETYPE *cur = ((ETYPE *)vd + H(idx)); \
+ *cur = (ETYPE)LDSUF##_p(host); \
+}
-#define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF) \
-static void NAME(CPURISCVState *env, abi_ptr addr, \
- uint32_t idx, void *vd, uintptr_t retaddr)\
-{ \
- ETYPE *cur = ((ETYPE *)vd + H(idx)); \
- *cur = cpu_##LDSUF##_data_ra(env, addr, retaddr); \
-} \
-
-GEN_VEXT_LD_ELEM(lde_b, int8_t, H1, ldsb)
-GEN_VEXT_LD_ELEM(lde_h, int16_t, H2, ldsw)
-GEN_VEXT_LD_ELEM(lde_w, int32_t, H4, ldl)
-GEN_VEXT_LD_ELEM(lde_d, int64_t, H8, ldq)
-
-#define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \
-static void NAME(CPURISCVState *env, abi_ptr addr, \
- uint32_t idx, void *vd, uintptr_t retaddr)\
-{ \
- ETYPE data = *((ETYPE *)vd + H(idx)); \
- cpu_##STSUF##_data_ra(env, addr, data, retaddr); \
+GEN_VEXT_LD_ELEM(lde_b, uint8_t, H1, ldub)
+GEN_VEXT_LD_ELEM(lde_h, uint16_t, H2, lduw)
+GEN_VEXT_LD_ELEM(lde_w, uint32_t, H4, ldl)
+GEN_VEXT_LD_ELEM(lde_d, uint64_t, H8, ldq)
+
+#define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \
+static inline QEMU_ALWAYS_INLINE \
+void NAME##_tlb(CPURISCVState *env, abi_ptr addr, \
+ uint32_t idx, void *vd, uintptr_t retaddr) \
+{ \
+ ETYPE data = *((ETYPE *)vd + H(idx)); \
+ cpu_##STSUF##_data_ra(env, addr, data, retaddr); \
+} \
+ \
+static inline QEMU_ALWAYS_INLINE \
+void NAME##_host(void *vd, uint32_t idx, void *host) \
+{ \
+ ETYPE data = *((ETYPE *)vd + H(idx)); \
+ STSUF##_p(host, data); \
+}
+
+GEN_VEXT_ST_ELEM(ste_b, uint8_t, H1, stb)
+GEN_VEXT_ST_ELEM(ste_h, uint16_t, H2, stw)
+GEN_VEXT_ST_ELEM(ste_w, uint32_t, H4, stl)
+GEN_VEXT_ST_ELEM(ste_d, uint64_t, H8, stq)
+
+static inline QEMU_ALWAYS_INLINE void
+vext_continuous_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb,
+ void *vd, uint32_t evl, target_ulong addr,
+ uint32_t reg_start, uintptr_t ra, uint32_t esz,
+ bool is_load)
+{
+ uint32_t i;
+ for (i = env->vstart; i < evl; env->vstart = ++i, addr += esz) {
+ ldst_tlb(env, adjust_addr(env, addr), i, vd, ra);
+ }
}
-GEN_VEXT_ST_ELEM(ste_b, int8_t, H1, stb)
-GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw)
-GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl)
-GEN_VEXT_ST_ELEM(ste_d, int64_t, H8, stq)
+static inline QEMU_ALWAYS_INLINE void
+vext_continuous_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host,
+ void *vd, uint32_t evl, uint32_t reg_start, void *host,
+ uint32_t esz, bool is_load)
+{
+#if HOST_BIG_ENDIAN
+ for (; reg_start < evl; reg_start++, host += esz) {
+ ldst_host(vd, reg_start, host);
+ }
+#else
+ if (esz == 1) {
+ uint32_t byte_offset = reg_start * esz;
+ uint32_t size = (evl - reg_start) * esz;
+
+ if (is_load) {
+ memcpy(vd + byte_offset, host, size);
+ } else {
+ memcpy(host, vd + byte_offset, size);
+ }
+ } else {
+ for (; reg_start < evl; reg_start++, host += esz) {
+ ldst_host(vd, reg_start, host);
+ }
+ }
+#endif
+}
static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
uint32_t desc, uint32_t nf,
@@ -196,11 +269,10 @@ static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
* stride: access vector element from strided memory
*/
static void
-vext_ldst_stride(void *vd, void *v0, target_ulong base,
- target_ulong stride, CPURISCVState *env,
- uint32_t desc, uint32_t vm,
- vext_ldst_elem_fn *ldst_elem,
- uint32_t log2_esz, uintptr_t ra)
+vext_ldst_stride(void *vd, void *v0, target_ulong base, target_ulong stride,
+ CPURISCVState *env, uint32_t desc, uint32_t vm,
+ vext_ldst_elem_fn_tlb *ldst_elem, uint32_t log2_esz,
+ uintptr_t ra)
{
uint32_t i, k;
uint32_t nf = vext_nf(desc);
@@ -208,7 +280,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
uint32_t esz = 1 << log2_esz;
uint32_t vma = vext_vma(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (i = env->vstart; i < env->vl; env->vstart = ++i) {
k = 0;
@@ -240,10 +312,10 @@ void HELPER(NAME)(void *vd, void * v0, target_ulong base, \
ctzl(sizeof(ETYPE)), GETPC()); \
}
-GEN_VEXT_LD_STRIDE(vlse8_v, int8_t, lde_b)
-GEN_VEXT_LD_STRIDE(vlse16_v, int16_t, lde_h)
-GEN_VEXT_LD_STRIDE(vlse32_v, int32_t, lde_w)
-GEN_VEXT_LD_STRIDE(vlse64_v, int64_t, lde_d)
+GEN_VEXT_LD_STRIDE(vlse8_v, int8_t, lde_b_tlb)
+GEN_VEXT_LD_STRIDE(vlse16_v, int16_t, lde_h_tlb)
+GEN_VEXT_LD_STRIDE(vlse32_v, int32_t, lde_w_tlb)
+GEN_VEXT_LD_STRIDE(vlse64_v, int64_t, lde_d_tlb)
#define GEN_VEXT_ST_STRIDE(NAME, ETYPE, STORE_FN) \
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
@@ -255,39 +327,137 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
ctzl(sizeof(ETYPE)), GETPC()); \
}
-GEN_VEXT_ST_STRIDE(vsse8_v, int8_t, ste_b)
-GEN_VEXT_ST_STRIDE(vsse16_v, int16_t, ste_h)
-GEN_VEXT_ST_STRIDE(vsse32_v, int32_t, ste_w)
-GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d)
+GEN_VEXT_ST_STRIDE(vsse8_v, int8_t, ste_b_tlb)
+GEN_VEXT_ST_STRIDE(vsse16_v, int16_t, ste_h_tlb)
+GEN_VEXT_ST_STRIDE(vsse32_v, int32_t, ste_w_tlb)
+GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d_tlb)
/*
* unit-stride: access elements stored contiguously in memory
*/
/* unmasked unit-stride load and store operation */
-static void
+static inline QEMU_ALWAYS_INLINE void
+vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr,
+ uint32_t elems, uint32_t nf, uint32_t max_elems,
+ uint32_t log2_esz, bool is_load, int mmu_index,
+ vext_ldst_elem_fn_tlb *ldst_tlb,
+ vext_ldst_elem_fn_host *ldst_host, uintptr_t ra)
+{
+ void *host;
+ int i, k, flags;
+ uint32_t esz = 1 << log2_esz;
+ uint32_t size = (elems * nf) << log2_esz;
+ uint32_t evl = env->vstart + elems;
+ MMUAccessType access_type = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
+
+ /* Check page permission/pmp/watchpoint/etc. */
+ probe_pages(env, addr, size, ra, access_type, mmu_index, &host, &flags,
+ true);
+
+ if (flags == 0) {
+ if (nf == 1) {
+ vext_continuous_ldst_host(env, ldst_host, vd, evl, env->vstart,
+ host, esz, is_load);
+ } else {
+ for (i = env->vstart; i < evl; ++i) {
+ k = 0;
+ while (k < nf) {
+ ldst_host(vd, i + k * max_elems, host);
+ host += esz;
+ k++;
+ }
+ }
+ }
+ env->vstart += elems;
+ } else {
+ if (nf == 1) {
+ vext_continuous_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart,
+ ra, esz, is_load);
+ } else {
+ /* load bytes from guest memory */
+ for (i = env->vstart; i < evl; env->vstart = ++i) {
+ k = 0;
+ while (k < nf) {
+ ldst_tlb(env, adjust_addr(env, addr), i + k * max_elems,
+ vd, ra);
+ addr += esz;
+ k++;
+ }
+ }
+ }
+ }
+}
+
+static inline QEMU_ALWAYS_INLINE void
vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
- vext_ldst_elem_fn *ldst_elem, uint32_t log2_esz, uint32_t evl,
- uintptr_t ra)
+ vext_ldst_elem_fn_tlb *ldst_tlb,
+ vext_ldst_elem_fn_host *ldst_host, uint32_t log2_esz,
+ uint32_t evl, uintptr_t ra, bool is_load)
{
- uint32_t i, k;
+ uint32_t k;
+ target_ulong page_split, elems, addr;
uint32_t nf = vext_nf(desc);
uint32_t max_elems = vext_max_elems(desc, log2_esz);
uint32_t esz = 1 << log2_esz;
+ uint32_t msize = nf * esz;
+ int mmu_index = riscv_env_mmu_index(env, false);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, evl);
- /* load bytes from guest memory */
- for (i = env->vstart; i < evl; env->vstart = ++i) {
- k = 0;
- while (k < nf) {
- target_ulong addr = base + ((i * nf + k) << log2_esz);
- ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
- k++;
+#if defined(CONFIG_USER_ONLY)
+ /*
+ * For data sizes <= 6 bytes we get better performance by simply calling
+ * vext_continuous_ldst_tlb
+ */
+ if (nf == 1 && (evl << log2_esz) <= 6) {
+ addr = base + (env->vstart << log2_esz);
+ vext_continuous_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart, ra,
+ esz, is_load);
+
+ env->vstart = 0;
+ vext_set_tail_elems_1s(evl, vd, desc, nf, esz, max_elems);
+ return;
+ }
+#endif
+
+ /* Calculate the page range of first page */
+ addr = base + ((env->vstart * nf) << log2_esz);
+ page_split = -(addr | TARGET_PAGE_MASK);
+ /* Get number of elements */
+ elems = page_split / msize;
+ if (unlikely(env->vstart + elems >= evl)) {
+ elems = evl - env->vstart;
+ }
+
+ /* Load/store elements in the first page */
+ if (likely(elems)) {
+ vext_page_ldst_us(env, vd, addr, elems, nf, max_elems, log2_esz,
+ is_load, mmu_index, ldst_tlb, ldst_host, ra);
+ }
+
+ /* Load/store elements in the second page */
+ if (unlikely(env->vstart < evl)) {
+ /* Cross page element */
+ if (unlikely(page_split % msize)) {
+ for (k = 0; k < nf; k++) {
+ addr = base + ((env->vstart * nf + k) << log2_esz);
+ ldst_tlb(env, adjust_addr(env, addr),
+ env->vstart + k * max_elems, vd, ra);
+ }
+ env->vstart++;
}
+
+ addr = base + ((env->vstart * nf) << log2_esz);
+ /* Get number of elements of second page */
+ elems = evl - env->vstart;
+
+ /* Load/store elements in the second page */
+ vext_page_ldst_us(env, vd, addr, elems, nf, max_elems, log2_esz,
+ is_load, mmu_index, ldst_tlb, ldst_host, ra);
}
- env->vstart = 0;
+ env->vstart = 0;
vext_set_tail_elems_1s(evl, vd, desc, nf, esz, max_elems);
}
@@ -296,47 +466,47 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
* stride, stride = NF * sizeof (ETYPE)
*/
-#define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN) \
-void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
- vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
-} \
- \
-void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldst_us(vd, base, env, desc, LOAD_FN, \
- ctzl(sizeof(ETYPE)), env->vl, GETPC()); \
+#define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN_TLB, LOAD_FN_HOST) \
+void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
+ vext_ldst_stride(vd, v0, base, stride, env, desc, false, \
+ LOAD_FN_TLB, ctzl(sizeof(ETYPE)), GETPC()); \
+} \
+ \
+void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_ldst_us(vd, base, env, desc, LOAD_FN_TLB, LOAD_FN_HOST, \
+ ctzl(sizeof(ETYPE)), env->vl, GETPC(), true); \
}
-GEN_VEXT_LD_US(vle8_v, int8_t, lde_b)
-GEN_VEXT_LD_US(vle16_v, int16_t, lde_h)
-GEN_VEXT_LD_US(vle32_v, int32_t, lde_w)
-GEN_VEXT_LD_US(vle64_v, int64_t, lde_d)
+GEN_VEXT_LD_US(vle8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_US(vle16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_US(vle32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_US(vle64_v, int64_t, lde_d_tlb, lde_d_host)
-#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \
+#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN_TLB, STORE_FN_HOST) \
void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
CPURISCVState *env, uint32_t desc) \
{ \
uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
- vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
+ vext_ldst_stride(vd, v0, base, stride, env, desc, false, \
+ STORE_FN_TLB, ctzl(sizeof(ETYPE)), GETPC()); \
} \
\
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
CPURISCVState *env, uint32_t desc) \
{ \
- vext_ldst_us(vd, base, env, desc, STORE_FN, \
- ctzl(sizeof(ETYPE)), env->vl, GETPC()); \
+ vext_ldst_us(vd, base, env, desc, STORE_FN_TLB, STORE_FN_HOST, \
+ ctzl(sizeof(ETYPE)), env->vl, GETPC(), false); \
}
-GEN_VEXT_ST_US(vse8_v, int8_t, ste_b)
-GEN_VEXT_ST_US(vse16_v, int16_t, ste_h)
-GEN_VEXT_ST_US(vse32_v, int32_t, ste_w)
-GEN_VEXT_ST_US(vse64_v, int64_t, ste_d)
+GEN_VEXT_ST_US(vse8_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_US(vse16_v, int16_t, ste_h_tlb, ste_h_host)
+GEN_VEXT_ST_US(vse32_v, int32_t, ste_w_tlb, ste_w_host)
+GEN_VEXT_ST_US(vse64_v, int64_t, ste_d_tlb, ste_d_host)
/*
* unit stride mask load and store, EEW = 1
@@ -346,8 +516,8 @@ void HELPER(vlm_v)(void *vd, void *v0, target_ulong base,
{
/* evl = ceil(vl/8) */
uint8_t evl = (env->vl + 7) >> 3;
- vext_ldst_us(vd, base, env, desc, lde_b,
- 0, evl, GETPC());
+ vext_ldst_us(vd, base, env, desc, lde_b_tlb, lde_b_host,
+ 0, evl, GETPC(), true);
}
void HELPER(vsm_v)(void *vd, void *v0, target_ulong base,
@@ -355,8 +525,8 @@ void HELPER(vsm_v)(void *vd, void *v0, target_ulong base,
{
/* evl = ceil(vl/8) */
uint8_t evl = (env->vl + 7) >> 3;
- vext_ldst_us(vd, base, env, desc, ste_b,
- 0, evl, GETPC());
+ vext_ldst_us(vd, base, env, desc, ste_b_tlb, ste_b_host,
+ 0, evl, GETPC(), false);
}
/*
@@ -381,7 +551,7 @@ static inline void
vext_ldst_index(void *vd, void *v0, target_ulong base,
void *vs2, CPURISCVState *env, uint32_t desc,
vext_get_index_addr get_index_addr,
- vext_ldst_elem_fn *ldst_elem,
+ vext_ldst_elem_fn_tlb *ldst_elem,
uint32_t log2_esz, uintptr_t ra)
{
uint32_t i, k;
@@ -391,7 +561,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
uint32_t esz = 1 << log2_esz;
uint32_t vma = vext_vma(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
/* load bytes from guest memory */
for (i = env->vstart; i < env->vl; env->vstart = ++i) {
@@ -422,22 +592,22 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
LOAD_FN, ctzl(sizeof(ETYPE)), GETPC()); \
}
-GEN_VEXT_LD_INDEX(vlxei8_8_v, int8_t, idx_b, lde_b)
-GEN_VEXT_LD_INDEX(vlxei8_16_v, int16_t, idx_b, lde_h)
-GEN_VEXT_LD_INDEX(vlxei8_32_v, int32_t, idx_b, lde_w)
-GEN_VEXT_LD_INDEX(vlxei8_64_v, int64_t, idx_b, lde_d)
-GEN_VEXT_LD_INDEX(vlxei16_8_v, int8_t, idx_h, lde_b)
-GEN_VEXT_LD_INDEX(vlxei16_16_v, int16_t, idx_h, lde_h)
-GEN_VEXT_LD_INDEX(vlxei16_32_v, int32_t, idx_h, lde_w)
-GEN_VEXT_LD_INDEX(vlxei16_64_v, int64_t, idx_h, lde_d)
-GEN_VEXT_LD_INDEX(vlxei32_8_v, int8_t, idx_w, lde_b)
-GEN_VEXT_LD_INDEX(vlxei32_16_v, int16_t, idx_w, lde_h)
-GEN_VEXT_LD_INDEX(vlxei32_32_v, int32_t, idx_w, lde_w)
-GEN_VEXT_LD_INDEX(vlxei32_64_v, int64_t, idx_w, lde_d)
-GEN_VEXT_LD_INDEX(vlxei64_8_v, int8_t, idx_d, lde_b)
-GEN_VEXT_LD_INDEX(vlxei64_16_v, int16_t, idx_d, lde_h)
-GEN_VEXT_LD_INDEX(vlxei64_32_v, int32_t, idx_d, lde_w)
-GEN_VEXT_LD_INDEX(vlxei64_64_v, int64_t, idx_d, lde_d)
+GEN_VEXT_LD_INDEX(vlxei8_8_v, int8_t, idx_b, lde_b_tlb)
+GEN_VEXT_LD_INDEX(vlxei8_16_v, int16_t, idx_b, lde_h_tlb)
+GEN_VEXT_LD_INDEX(vlxei8_32_v, int32_t, idx_b, lde_w_tlb)
+GEN_VEXT_LD_INDEX(vlxei8_64_v, int64_t, idx_b, lde_d_tlb)
+GEN_VEXT_LD_INDEX(vlxei16_8_v, int8_t, idx_h, lde_b_tlb)
+GEN_VEXT_LD_INDEX(vlxei16_16_v, int16_t, idx_h, lde_h_tlb)
+GEN_VEXT_LD_INDEX(vlxei16_32_v, int32_t, idx_h, lde_w_tlb)
+GEN_VEXT_LD_INDEX(vlxei16_64_v, int64_t, idx_h, lde_d_tlb)
+GEN_VEXT_LD_INDEX(vlxei32_8_v, int8_t, idx_w, lde_b_tlb)
+GEN_VEXT_LD_INDEX(vlxei32_16_v, int16_t, idx_w, lde_h_tlb)
+GEN_VEXT_LD_INDEX(vlxei32_32_v, int32_t, idx_w, lde_w_tlb)
+GEN_VEXT_LD_INDEX(vlxei32_64_v, int64_t, idx_w, lde_d_tlb)
+GEN_VEXT_LD_INDEX(vlxei64_8_v, int8_t, idx_d, lde_b_tlb)
+GEN_VEXT_LD_INDEX(vlxei64_16_v, int16_t, idx_d, lde_h_tlb)
+GEN_VEXT_LD_INDEX(vlxei64_32_v, int32_t, idx_d, lde_w_tlb)
+GEN_VEXT_LD_INDEX(vlxei64_64_v, int64_t, idx_d, lde_d_tlb)
#define GEN_VEXT_ST_INDEX(NAME, ETYPE, INDEX_FN, STORE_FN) \
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
@@ -448,76 +618,101 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
GETPC()); \
}
-GEN_VEXT_ST_INDEX(vsxei8_8_v, int8_t, idx_b, ste_b)
-GEN_VEXT_ST_INDEX(vsxei8_16_v, int16_t, idx_b, ste_h)
-GEN_VEXT_ST_INDEX(vsxei8_32_v, int32_t, idx_b, ste_w)
-GEN_VEXT_ST_INDEX(vsxei8_64_v, int64_t, idx_b, ste_d)
-GEN_VEXT_ST_INDEX(vsxei16_8_v, int8_t, idx_h, ste_b)
-GEN_VEXT_ST_INDEX(vsxei16_16_v, int16_t, idx_h, ste_h)
-GEN_VEXT_ST_INDEX(vsxei16_32_v, int32_t, idx_h, ste_w)
-GEN_VEXT_ST_INDEX(vsxei16_64_v, int64_t, idx_h, ste_d)
-GEN_VEXT_ST_INDEX(vsxei32_8_v, int8_t, idx_w, ste_b)
-GEN_VEXT_ST_INDEX(vsxei32_16_v, int16_t, idx_w, ste_h)
-GEN_VEXT_ST_INDEX(vsxei32_32_v, int32_t, idx_w, ste_w)
-GEN_VEXT_ST_INDEX(vsxei32_64_v, int64_t, idx_w, ste_d)
-GEN_VEXT_ST_INDEX(vsxei64_8_v, int8_t, idx_d, ste_b)
-GEN_VEXT_ST_INDEX(vsxei64_16_v, int16_t, idx_d, ste_h)
-GEN_VEXT_ST_INDEX(vsxei64_32_v, int32_t, idx_d, ste_w)
-GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d)
+GEN_VEXT_ST_INDEX(vsxei8_8_v, int8_t, idx_b, ste_b_tlb)
+GEN_VEXT_ST_INDEX(vsxei8_16_v, int16_t, idx_b, ste_h_tlb)
+GEN_VEXT_ST_INDEX(vsxei8_32_v, int32_t, idx_b, ste_w_tlb)
+GEN_VEXT_ST_INDEX(vsxei8_64_v, int64_t, idx_b, ste_d_tlb)
+GEN_VEXT_ST_INDEX(vsxei16_8_v, int8_t, idx_h, ste_b_tlb)
+GEN_VEXT_ST_INDEX(vsxei16_16_v, int16_t, idx_h, ste_h_tlb)
+GEN_VEXT_ST_INDEX(vsxei16_32_v, int32_t, idx_h, ste_w_tlb)
+GEN_VEXT_ST_INDEX(vsxei16_64_v, int64_t, idx_h, ste_d_tlb)
+GEN_VEXT_ST_INDEX(vsxei32_8_v, int8_t, idx_w, ste_b_tlb)
+GEN_VEXT_ST_INDEX(vsxei32_16_v, int16_t, idx_w, ste_h_tlb)
+GEN_VEXT_ST_INDEX(vsxei32_32_v, int32_t, idx_w, ste_w_tlb)
+GEN_VEXT_ST_INDEX(vsxei32_64_v, int64_t, idx_w, ste_d_tlb)
+GEN_VEXT_ST_INDEX(vsxei64_8_v, int8_t, idx_d, ste_b_tlb)
+GEN_VEXT_ST_INDEX(vsxei64_16_v, int16_t, idx_d, ste_h_tlb)
+GEN_VEXT_ST_INDEX(vsxei64_32_v, int32_t, idx_d, ste_w_tlb)
+GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d_tlb)
/*
* unit-stride fault-only-fisrt load instructions
*/
static inline void
-vext_ldff(void *vd, void *v0, target_ulong base,
- CPURISCVState *env, uint32_t desc,
- vext_ldst_elem_fn *ldst_elem,
- uint32_t log2_esz, uintptr_t ra)
+vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
+ uint32_t desc, vext_ldst_elem_fn_tlb *ldst_tlb,
+ vext_ldst_elem_fn_host *ldst_host, uint32_t log2_esz, uintptr_t ra)
{
- void *host;
uint32_t i, k, vl = 0;
uint32_t nf = vext_nf(desc);
uint32_t vm = vext_vm(desc);
uint32_t max_elems = vext_max_elems(desc, log2_esz);
uint32_t esz = 1 << log2_esz;
+ uint32_t msize = nf * esz;
uint32_t vma = vext_vma(desc);
- target_ulong addr, offset, remain;
+ target_ulong addr, addr_probe, addr_i, offset, remain, page_split, elems;
int mmu_index = riscv_env_mmu_index(env, false);
+ int flags, probe_flags;
+ void *host;
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
- /* probe every access */
- for (i = env->vstart; i < env->vl; i++) {
- if (!vm && !vext_elem_mask(v0, i)) {
- continue;
- }
- addr = adjust_addr(env, base + i * (nf << log2_esz));
- if (i == 0) {
- probe_pages(env, addr, nf << log2_esz, ra, MMU_DATA_LOAD);
- } else {
- /* if it triggers an exception, no need to check watchpoint */
- remain = nf << log2_esz;
- while (remain > 0) {
- offset = -(addr | TARGET_PAGE_MASK);
- host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD, mmu_index);
- if (host) {
-#ifdef CONFIG_USER_ONLY
- if (!page_check_range(addr, offset, PAGE_READ)) {
+ addr = base + ((env->vstart * nf) << log2_esz);
+ page_split = -(addr | TARGET_PAGE_MASK);
+ /* Get number of elements */
+ elems = page_split / msize;
+ if (unlikely(env->vstart + elems >= env->vl)) {
+ elems = env->vl - env->vstart;
+ }
+
+ /* Check page permission/pmp/watchpoint/etc. */
+ probe_pages(env, addr, elems * msize, ra, MMU_DATA_LOAD, mmu_index, &host,
+ &flags, true);
+
+ /* If we are crossing a page check also the second page. */
+ if (env->vl > elems) {
+ addr_probe = addr + (elems << log2_esz);
+ probe_pages(env, addr_probe, elems * msize, ra, MMU_DATA_LOAD,
+ mmu_index, &host, &probe_flags, true);
+ flags |= probe_flags;
+ }
+
+ if (flags & ~TLB_WATCHPOINT) {
+ /* probe every access */
+ for (i = env->vstart; i < env->vl; i++) {
+ if (!vm && !vext_elem_mask(v0, i)) {
+ continue;
+ }
+ addr_i = adjust_addr(env, base + i * (nf << log2_esz));
+ if (i == 0) {
+ /* Allow fault on first element. */
+ probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD,
+ mmu_index, &host, NULL, false);
+ } else {
+ remain = nf << log2_esz;
+ while (remain > 0) {
+ offset = -(addr_i | TARGET_PAGE_MASK);
+
+ /* Probe nonfault on subsequent elements. */
+ probe_pages(env, addr_i, offset, 0, MMU_DATA_LOAD,
+ mmu_index, &host, &flags, true);
+
+ /*
+ * Stop if invalid (unmapped) or mmio (transaction may
+ * fail). Do not stop if watchpoint, as the spec says that
+ * first-fault should continue to access the same
+ * elements regardless of any watchpoint.
+ */
+ if (flags & ~TLB_WATCHPOINT) {
vl = i;
goto ProbeSuccess;
}
-#else
- probe_pages(env, addr, offset, ra, MMU_DATA_LOAD);
-#endif
- } else {
- vl = i;
- goto ProbeSuccess;
- }
- if (remain <= offset) {
- break;
+ if (remain <= offset) {
+ break;
+ }
+ remain -= offset;
+ addr_i = adjust_addr(env, addr_i + offset);
}
- remain -= offset;
- addr = adjust_addr(env, addr + offset);
}
}
}
@@ -526,19 +721,54 @@ ProbeSuccess:
if (vl != 0) {
env->vl = vl;
}
- for (i = env->vstart; i < env->vl; i++) {
- k = 0;
- while (k < nf) {
- if (!vm && !vext_elem_mask(v0, i)) {
- /* set masked-off elements to 1s */
- vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
- (i + k * max_elems + 1) * esz);
- k++;
- continue;
+
+ if (env->vstart < env->vl) {
+ if (vm) {
+ /* Load/store elements in the first page */
+ if (likely(elems)) {
+ vext_page_ldst_us(env, vd, addr, elems, nf, max_elems,
+ log2_esz, true, mmu_index, ldst_tlb,
+ ldst_host, ra);
+ }
+
+ /* Load/store elements in the second page */
+ if (unlikely(env->vstart < env->vl)) {
+ /* Cross page element */
+ if (unlikely(page_split % msize)) {
+ for (k = 0; k < nf; k++) {
+ addr = base + ((env->vstart * nf + k) << log2_esz);
+ ldst_tlb(env, adjust_addr(env, addr),
+ env->vstart + k * max_elems, vd, ra);
+ }
+ env->vstart++;
+ }
+
+ addr = base + ((env->vstart * nf) << log2_esz);
+ /* Get number of elements of second page */
+ elems = env->vl - env->vstart;
+
+ /* Load/store elements in the second page */
+ vext_page_ldst_us(env, vd, addr, elems, nf, max_elems,
+ log2_esz, true, mmu_index, ldst_tlb,
+ ldst_host, ra);
+ }
+ } else {
+ for (i = env->vstart; i < env->vl; i++) {
+ k = 0;
+ while (k < nf) {
+ if (!vext_elem_mask(v0, i)) {
+ /* set masked-off elements to 1s */
+ vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
+ (i + k * max_elems + 1) * esz);
+ k++;
+ continue;
+ }
+ addr = base + ((i * nf + k) << log2_esz);
+ ldst_tlb(env, adjust_addr(env, addr), i + k * max_elems,
+ vd, ra);
+ k++;
+ }
}
- addr = base + ((i * nf + k) << log2_esz);
- ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
- k++;
}
}
env->vstart = 0;
@@ -546,18 +776,18 @@ ProbeSuccess:
vext_set_tail_elems_1s(env->vl, vd, desc, nf, esz, max_elems);
}
-#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN) \
-void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldff(vd, v0, base, env, desc, LOAD_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
+#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN_TLB, LOAD_FN_HOST) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_ldff(vd, v0, base, env, desc, LOAD_FN_TLB, \
+ LOAD_FN_HOST, ctzl(sizeof(ETYPE)), GETPC()); \
}
-GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b)
-GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h)
-GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w)
-GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d)
+GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d_tlb, lde_d_host)
#define DO_SWAP(N, M) (M)
#define DO_AND(N, M) (N & M)
@@ -572,81 +802,93 @@ GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d)
/*
* load and store whole register instructions
*/
-static void
+static inline QEMU_ALWAYS_INLINE void
vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
- vext_ldst_elem_fn *ldst_elem, uint32_t log2_esz, uintptr_t ra)
+ vext_ldst_elem_fn_tlb *ldst_tlb,
+ vext_ldst_elem_fn_host *ldst_host, uint32_t log2_esz,
+ uintptr_t ra, bool is_load)
{
- uint32_t i, k, off, pos;
+ target_ulong page_split, elems, addr;
uint32_t nf = vext_nf(desc);
uint32_t vlenb = riscv_cpu_cfg(env)->vlenb;
uint32_t max_elems = vlenb >> log2_esz;
+ uint32_t evl = nf * max_elems;
+ uint32_t esz = 1 << log2_esz;
+ int mmu_index = riscv_env_mmu_index(env, false);
- if (env->vstart >= ((vlenb * nf) >> log2_esz)) {
- env->vstart = 0;
- return;
+ /* Calculate the page range of first page */
+ addr = base + (env->vstart << log2_esz);
+ page_split = -(addr | TARGET_PAGE_MASK);
+ /* Get number of elements */
+ elems = page_split / esz;
+ if (unlikely(env->vstart + elems >= evl)) {
+ elems = evl - env->vstart;
}
- k = env->vstart / max_elems;
- off = env->vstart % max_elems;
-
- if (off) {
- /* load/store rest of elements of current segment pointed by vstart */
- for (pos = off; pos < max_elems; pos++, env->vstart++) {
- target_ulong addr = base + ((pos + k * max_elems) << log2_esz);
- ldst_elem(env, adjust_addr(env, addr), pos + k * max_elems, vd,
- ra);
- }
- k++;
+ /* Load/store elements in the first page */
+ if (likely(elems)) {
+ vext_page_ldst_us(env, vd, addr, elems, 1, max_elems, log2_esz,
+ is_load, mmu_index, ldst_tlb, ldst_host, ra);
}
- /* load/store elements for rest of segments */
- for (; k < nf; k++) {
- for (i = 0; i < max_elems; i++, env->vstart++) {
- target_ulong addr = base + ((i + k * max_elems) << log2_esz);
- ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
+ /* Load/store elements in the second page */
+ if (unlikely(env->vstart < evl)) {
+ /* Cross page element */
+ if (unlikely(page_split % esz)) {
+ addr = base + (env->vstart << log2_esz);
+ ldst_tlb(env, adjust_addr(env, addr), env->vstart, vd, ra);
+ env->vstart++;
}
+
+ addr = base + (env->vstart << log2_esz);
+ /* Get number of elements of second page */
+ elems = evl - env->vstart;
+
+ /* Load/store elements in the second page */
+ vext_page_ldst_us(env, vd, addr, elems, 1, max_elems, log2_esz,
+ is_load, mmu_index, ldst_tlb, ldst_host, ra);
}
env->vstart = 0;
}
-#define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN) \
-void HELPER(NAME)(void *vd, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldst_whole(vd, base, env, desc, LOAD_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
-}
-
-GEN_VEXT_LD_WHOLE(vl1re8_v, int8_t, lde_b)
-GEN_VEXT_LD_WHOLE(vl1re16_v, int16_t, lde_h)
-GEN_VEXT_LD_WHOLE(vl1re32_v, int32_t, lde_w)
-GEN_VEXT_LD_WHOLE(vl1re64_v, int64_t, lde_d)
-GEN_VEXT_LD_WHOLE(vl2re8_v, int8_t, lde_b)
-GEN_VEXT_LD_WHOLE(vl2re16_v, int16_t, lde_h)
-GEN_VEXT_LD_WHOLE(vl2re32_v, int32_t, lde_w)
-GEN_VEXT_LD_WHOLE(vl2re64_v, int64_t, lde_d)
-GEN_VEXT_LD_WHOLE(vl4re8_v, int8_t, lde_b)
-GEN_VEXT_LD_WHOLE(vl4re16_v, int16_t, lde_h)
-GEN_VEXT_LD_WHOLE(vl4re32_v, int32_t, lde_w)
-GEN_VEXT_LD_WHOLE(vl4re64_v, int64_t, lde_d)
-GEN_VEXT_LD_WHOLE(vl8re8_v, int8_t, lde_b)
-GEN_VEXT_LD_WHOLE(vl8re16_v, int16_t, lde_h)
-GEN_VEXT_LD_WHOLE(vl8re32_v, int32_t, lde_w)
-GEN_VEXT_LD_WHOLE(vl8re64_v, int64_t, lde_d)
-
-#define GEN_VEXT_ST_WHOLE(NAME, ETYPE, STORE_FN) \
-void HELPER(NAME)(void *vd, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldst_whole(vd, base, env, desc, STORE_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
-}
-
-GEN_VEXT_ST_WHOLE(vs1r_v, int8_t, ste_b)
-GEN_VEXT_ST_WHOLE(vs2r_v, int8_t, ste_b)
-GEN_VEXT_ST_WHOLE(vs4r_v, int8_t, ste_b)
-GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
+#define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN_TLB, LOAD_FN_HOST) \
+void HELPER(NAME)(void *vd, target_ulong base, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ vext_ldst_whole(vd, base, env, desc, LOAD_FN_TLB, LOAD_FN_HOST, \
+ ctzl(sizeof(ETYPE)), GETPC(), true); \
+}
+
+GEN_VEXT_LD_WHOLE(vl1re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl1re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl1re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl1re64_v, int64_t, lde_d_tlb, lde_d_host)
+GEN_VEXT_LD_WHOLE(vl2re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl2re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl2re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl2re64_v, int64_t, lde_d_tlb, lde_d_host)
+GEN_VEXT_LD_WHOLE(vl4re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl4re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl4re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl4re64_v, int64_t, lde_d_tlb, lde_d_host)
+GEN_VEXT_LD_WHOLE(vl8re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl8re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl8re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl8re64_v, int64_t, lde_d_tlb, lde_d_host)
+
+#define GEN_VEXT_ST_WHOLE(NAME, ETYPE, STORE_FN_TLB, STORE_FN_HOST) \
+void HELPER(NAME)(void *vd, target_ulong base, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ vext_ldst_whole(vd, base, env, desc, STORE_FN_TLB, STORE_FN_HOST, \
+ ctzl(sizeof(ETYPE)), GETPC(), false); \
+}
+
+GEN_VEXT_ST_WHOLE(vs1r_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_WHOLE(vs2r_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_WHOLE(vs4r_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b_tlb, ste_b_host)
/*
* Vector Integer Arithmetic Instructions
@@ -891,7 +1133,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -925,7 +1167,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -962,7 +1204,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -1002,7 +1244,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -1100,7 +1342,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -1149,7 +1391,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -1213,7 +1455,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -1280,7 +1522,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -1829,7 +2071,7 @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -1855,7 +2097,7 @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
*((ETYPE *)vd + H(i)) = (ETYPE)s1; \
@@ -1880,7 +2122,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \
@@ -1906,7 +2148,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -1953,8 +2195,6 @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
uint32_t vl, uint32_t vm, int vxrm,
opivv2_rm_fn *fn, uint32_t vma, uint32_t esz)
{
- VSTART_CHECK_EARLY_EXIT(env);
-
for (uint32_t i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
@@ -1978,6 +2218,8 @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
uint32_t vta = vext_vta(desc);
uint32_t vma = vext_vma(desc);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
+
switch (env->vxrm) {
case 0: /* rnu */
vext_vv_rm_1(vd, v0, vs1, vs2,
@@ -2080,8 +2322,6 @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
uint32_t vl, uint32_t vm, int vxrm,
opivx2_rm_fn *fn, uint32_t vma, uint32_t esz)
{
- VSTART_CHECK_EARLY_EXIT(env);
-
for (uint32_t i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
@@ -2105,6 +2345,8 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
uint32_t vta = vext_vta(desc);
uint32_t vma = vext_vma(desc);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
+
switch (env->vxrm) {
case 0: /* rnu */
vext_vx_rm_1(vd, v0, s1, vs2,
@@ -2879,7 +3121,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -2924,7 +3166,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -3512,7 +3754,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
if (vl == 0) { \
return; \
@@ -4035,7 +4277,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -4077,7 +4319,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -4272,7 +4514,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -4440,6 +4682,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t i; \
TD s1 = *((TD *)vs1 + HD(0)); \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -4447,7 +4691,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
} \
s1 = OP(s1, (TD)s2); \
} \
- *((TD *)vd + HD(0)) = s1; \
+ if (vl > 0) { \
+ *((TD *)vd + HD(0)) = s1; \
+ } \
env->vstart = 0; \
/* set tail elements to 1s */ \
vext_set_elems_1s(vd, vta, esz, vlenb); \
@@ -4526,6 +4772,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t i; \
TD s1 = *((TD *)vs1 + HD(0)); \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -4533,7 +4781,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
} \
s1 = OP(s1, (TD)s2, &env->fp_status); \
} \
- *((TD *)vd + HD(0)) = s1; \
+ if (vl > 0) { \
+ *((TD *)vd + HD(0)) = s1; \
+ } \
env->vstart = 0; \
/* set tail elements to 1s */ \
vext_set_elems_1s(vd, vta, esz, vlenb); \
@@ -4598,7 +4848,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t i; \
int a, b; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
a = vext_elem_mask(vs1, i); \
@@ -4688,6 +4938,8 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
int i;
bool first_mask_bit = false;
+ VSTART_CHECK_EARLY_EXIT(env, vl);
+
for (i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
@@ -4760,6 +5012,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
uint32_t sum = 0; \
int i; \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
@@ -4793,7 +5047,7 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
uint32_t vma = vext_vma(desc); \
int i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -4830,7 +5084,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
target_ulong offset = s1, i_min, i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
i_min = MAX(env->vstart, offset); \
for (i = i_min; i < vl; i++) { \
@@ -4865,7 +5119,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
target_ulong i_max, i_min, i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
i_min = MIN(s1 < vlmax ? vlmax - s1 : 0, vl); \
i_max = MAX(i_min, env->vstart); \
@@ -4879,9 +5133,11 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
} \
\
for (i = i_max; i < vl; ++i) { \
- if (vm || vext_elem_mask(v0, i)) { \
- *((ETYPE *)vd + H(i)) = 0; \
+ if (!vm && !vext_elem_mask(v0, i)) { \
+ vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
+ continue; \
} \
+ *((ETYPE *)vd + H(i)) = 0; \
} \
\
env->vstart = 0; \
@@ -4909,7 +5165,7 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -4960,7 +5216,7 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -5037,7 +5293,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint64_t index; \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -5082,7 +5338,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint64_t index = s1; \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -5118,6 +5374,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t num = 0, i; \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
if (!vext_elem_mask(vs1, i)) { \
continue; \
@@ -5127,7 +5385,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, num * esz, total_elems * esz); \
}
/* Compress into vd elements of vs2 where vs1 is enabled */
@@ -5178,7 +5436,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c
index 05b2d01..b490b1d 100644
--- a/target/riscv/vector_internals.c
+++ b/target/riscv/vector_internals.c
@@ -66,7 +66,7 @@ void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
uint32_t vma = vext_vma(desc);
uint32_t i;
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
for (i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
@@ -92,7 +92,7 @@ void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
uint32_t vma = vext_vma(desc);
uint32_t i;
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
for (i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
index 9e1e15b..8eee7e5 100644
--- a/target/riscv/vector_internals.h
+++ b/target/riscv/vector_internals.h
@@ -20,15 +20,16 @@
#define TARGET_RISCV_VECTOR_INTERNALS_H
#include "qemu/bitops.h"
+#include "hw/registerfields.h"
#include "cpu.h"
#include "tcg/tcg-gvec-desc.h"
#include "internals.h"
-#define VSTART_CHECK_EARLY_EXIT(env) do { \
- if (env->vstart >= env->vl) { \
- env->vstart = 0; \
- return; \
- } \
+#define VSTART_CHECK_EARLY_EXIT(env, vl) do { \
+ if (env->vstart >= vl) { \
+ env->vstart = 0; \
+ return; \
+ } \
} while (0)
static inline uint32_t vext_nf(uint32_t desc)
@@ -158,7 +159,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
diff --git a/target/riscv/zce_helper.c b/target/riscv/zce_helper.c
index b433bda..55221f5 100644
--- a/target/riscv/zce_helper.c
+++ b/target/riscv/zce_helper.c
@@ -18,9 +18,8 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
target_ulong HELPER(cm_jalt)(CPURISCVState *env, uint32_t index)
{
diff --git a/target/rx/cpu-param.h b/target/rx/cpu-param.h
index 521d669..84934f3 100644
--- a/target/rx/cpu-param.h
+++ b/target/rx/cpu-param.h
@@ -19,10 +19,11 @@
#ifndef RX_CPU_PARAM_H
#define RX_CPU_PARAM_H
-#define TARGET_LONG_BITS 32
#define TARGET_PAGE_BITS 12
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#define TARGET_INSN_START_EXTRA_WORDS 0
+
#endif
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
index 36d2a6f..c6dd5d6 100644
--- a/target/rx/cpu.c
+++ b/target/rx/cpu.c
@@ -21,11 +21,14 @@
#include "qapi/error.h"
#include "cpu.h"
#include "migration/vmstate.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
+#include "exec/translation-block.h"
+#include "exec/target_page.h"
#include "hw/loader.h"
#include "fpu/softfloat.h"
#include "tcg/debug-assert.h"
+#include "accel/tcg/cpu-ops.h"
static void rx_cpu_set_pc(CPUState *cs, vaddr value)
{
@@ -41,6 +44,17 @@ static vaddr rx_cpu_get_pc(CPUState *cs)
return cpu->env.pc;
}
+static TCGTBCPUState rx_get_tb_cpu_state(CPUState *cs)
+{
+ CPURXState *env = cpu_env(cs);
+ uint32_t flags = 0;
+
+ flags = FIELD_DP32(flags, PSW, PM, env->psw_pm);
+ flags = FIELD_DP32(flags, PSW, U, env->psw_u);
+
+ return (TCGTBCPUState){ .pc = env->pc, .flags = flags };
+}
+
static void rx_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -65,7 +79,7 @@ static bool rx_cpu_has_work(CPUState *cs)
(CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR);
}
-static int riscv_cpu_mmu_index(CPUState *cs, bool ifunc)
+static int rx_cpu_mmu_index(CPUState *cs, bool ifunc)
{
return 0;
}
@@ -93,6 +107,23 @@ static void rx_cpu_reset_hold(Object *obj, ResetType type)
env->fpsw = 0;
set_flush_to_zero(1, &env->fp_status);
set_flush_inputs_to_zero(1, &env->fp_status);
+ /*
+ * TODO: this is not the correct NaN propagation rule for this
+ * architecture. The "RX Family User's Manual: Software" table 1.6
+ * defines the propagation rules as "prefer SNaN over QNaN;
+ * then prefer dest over source", which is float_2nan_prop_s_ab.
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status);
+ /* Default NaN value: sign bit clear, set frac msb */
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
+ /*
+ * TODO: "RX Family RXv1 Instruction Set Architecture" is not 100% clear
+ * on whether flush-to-zero should happen before or after rounding, but
+ * section 1.3.2 says that it happens when underflow is detected, and
+ * implies that underflow is detected after rounding. So this may not
+ * be the correct setting.
+ */
+ set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status);
}
static ObjectClass *rx_cpu_class_by_name(const char *cpu_model)
@@ -150,6 +181,7 @@ static void rx_cpu_set_irq(void *opaque, int no, int request)
static void rx_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
{
+ info->endian = BFD_ENDIAN_LITTLE;
info->mach = bfd_mach_rx;
info->print_insn = print_insn_rx;
}
@@ -174,30 +206,34 @@ static void rx_cpu_init(Object *obj)
qdev_init_gpio_in(DEVICE(cpu), rx_cpu_set_irq, 2);
}
-#ifndef CONFIG_USER_ONLY
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps rx_sysemu_ops = {
+ .has_work = rx_cpu_has_work,
.get_phys_page_debug = rx_cpu_get_phys_page_debug,
};
-#endif
-
-#include "hw/core/tcg-cpu-ops.h"
static const TCGCPUOps rx_tcg_ops = {
+ /* MTTCG not yet supported: require strict ordering */
+ .guest_default_memory_order = TCG_MO_ALL,
+ .mttcg_supported = false,
+
.initialize = rx_translate_init,
+ .translate_code = rx_translate_code,
+ .get_tb_cpu_state = rx_get_tb_cpu_state,
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
.restore_state_to_opc = rx_restore_state_to_opc,
+ .mmu_index = rx_cpu_mmu_index,
.tlb_fill = rx_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
-#ifndef CONFIG_USER_ONLY
.cpu_exec_interrupt = rx_cpu_exec_interrupt,
.cpu_exec_halt = rx_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = rx_cpu_do_interrupt,
-#endif /* !CONFIG_USER_ONLY */
};
-static void rx_cpu_class_init(ObjectClass *klass, void *data)
+static void rx_cpu_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
CPUClass *cc = CPU_CLASS(klass);
@@ -210,15 +246,11 @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
&rcc->parent_phases);
cc->class_by_name = rx_cpu_class_by_name;
- cc->has_work = rx_cpu_has_work;
- cc->mmu_index = riscv_cpu_mmu_index;
cc->dump_state = rx_cpu_dump_state;
cc->set_pc = rx_cpu_set_pc;
cc->get_pc = rx_cpu_get_pc;
-#ifndef CONFIG_USER_ONLY
cc->sysemu_ops = &rx_sysemu_ops;
-#endif
cc->gdb_read_register = rx_cpu_gdb_read_register;
cc->gdb_write_register = rx_cpu_gdb_write_register;
cc->disas_set_info = rx_cpu_disas_set_info;
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
index c53593d..ba5761b 100644
--- a/target/rx/cpu.h
+++ b/target/rx/cpu.h
@@ -23,9 +23,15 @@
#include "hw/registerfields.h"
#include "cpu-qom.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "qemu/cpu-float.h"
+#ifdef CONFIG_USER_ONLY
+#error "RX does not support user mode emulation"
+#endif
+
/* PSW define */
REG32(PSW, 0)
FIELD(PSW, C, 0, 1)
@@ -129,35 +135,24 @@ struct RXCPUClass {
#define CPU_RESOLVING_TYPE TYPE_RX_CPU
const char *rx_crname(uint8_t cr);
-#ifndef CONFIG_USER_ONLY
void rx_cpu_do_interrupt(CPUState *cpu);
bool rx_cpu_exec_interrupt(CPUState *cpu, int int_req);
hwaddr rx_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
-#endif /* !CONFIG_USER_ONLY */
void rx_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void rx_translate_init(void);
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
-#include "exec/cpu-all.h"
-
#define CPU_INTERRUPT_SOFT CPU_INTERRUPT_TGT_INT_0
#define CPU_INTERRUPT_FIR CPU_INTERRUPT_TGT_INT_1
#define RX_CPU_IRQ 0
#define RX_CPU_FIR 1
-static inline void cpu_get_tb_cpu_state(CPURXState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- *cs_base = 0;
- *flags = FIELD_DP32(0, PSW, PM, env->psw_pm);
- *flags = FIELD_DP32(*flags, PSW, U, env->psw_u);
-}
-
static inline uint32_t rx_cpu_pack_psw(CPURXState *env)
{
uint32_t psw = 0;
diff --git a/target/rx/helper.c b/target/rx/helper.c
index 80912e8..0640ab3 100644
--- a/target/rx/helper.c
+++ b/target/rx/helper.c
@@ -20,7 +20,7 @@
#include "qemu/bitops.h"
#include "cpu.h"
#include "exec/log.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "hw/irq.h"
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte)
@@ -40,8 +40,6 @@ void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte)
env->psw_c = FIELD_EX32(psw, PSW, C);
}
-#ifndef CONFIG_USER_ONLY
-
#define INT_FLAGS (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR)
void rx_cpu_do_interrupt(CPUState *cs)
{
@@ -90,7 +88,7 @@ void rx_cpu_do_interrupt(CPUState *cs)
cpu_stl_data(env, env->isp, env->pc);
if (vec < 0x100) {
- env->pc = cpu_ldl_data(env, 0xffffffc0 + vec * 4);
+ env->pc = cpu_ldl_data(env, 0xffffff80 + vec * 4);
} else {
env->pc = cpu_ldl_data(env, env->intb + (vec & 0xff) * 4);
}
@@ -146,5 +144,3 @@ hwaddr rx_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
return addr;
}
-
-#endif /* !CONFIG_USER_ONLY */
diff --git a/target/rx/helper.h b/target/rx/helper.h
index ebb4739..8cc38b0 100644
--- a/target/rx/helper.h
+++ b/target/rx/helper.h
@@ -4,27 +4,27 @@ DEF_HELPER_1(raise_privilege_violation, noreturn, env)
DEF_HELPER_1(wait, noreturn, env)
DEF_HELPER_2(rxint, noreturn, env, i32)
DEF_HELPER_1(rxbrk, noreturn, env)
-DEF_HELPER_FLAGS_3(fadd, TCG_CALL_NO_WG, f32, env, f32, f32)
-DEF_HELPER_FLAGS_3(fsub, TCG_CALL_NO_WG, f32, env, f32, f32)
-DEF_HELPER_FLAGS_3(fmul, TCG_CALL_NO_WG, f32, env, f32, f32)
-DEF_HELPER_FLAGS_3(fdiv, TCG_CALL_NO_WG, f32, env, f32, f32)
-DEF_HELPER_FLAGS_3(fcmp, TCG_CALL_NO_WG, void, env, f32, f32)
-DEF_HELPER_FLAGS_2(ftoi, TCG_CALL_NO_WG, i32, env, f32)
-DEF_HELPER_FLAGS_2(round, TCG_CALL_NO_WG, i32, env, f32)
-DEF_HELPER_FLAGS_2(itof, TCG_CALL_NO_WG, f32, env, i32)
+DEF_HELPER_3(fadd, f32, env, f32, f32)
+DEF_HELPER_3(fsub, f32, env, f32, f32)
+DEF_HELPER_3(fmul, f32, env, f32, f32)
+DEF_HELPER_3(fdiv, f32, env, f32, f32)
+DEF_HELPER_3(fcmp, void, env, f32, f32)
+DEF_HELPER_2(ftoi, i32, env, f32)
+DEF_HELPER_2(round, i32, env, f32)
+DEF_HELPER_2(itof, f32, env, i32)
DEF_HELPER_2(set_fpsw, void, env, i32)
-DEF_HELPER_FLAGS_2(racw, TCG_CALL_NO_WG, void, env, i32)
-DEF_HELPER_FLAGS_2(set_psw_rte, TCG_CALL_NO_WG, void, env, i32)
-DEF_HELPER_FLAGS_2(set_psw, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_2(racw, void, env, i32)
+DEF_HELPER_2(set_psw_rte, void, env, i32)
+DEF_HELPER_2(set_psw, void, env, i32)
DEF_HELPER_1(pack_psw, i32, env)
-DEF_HELPER_FLAGS_3(div, TCG_CALL_NO_WG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(divu, TCG_CALL_NO_WG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_1(scmpu, TCG_CALL_NO_WG, void, env)
+DEF_HELPER_3(div, i32, env, i32, i32)
+DEF_HELPER_3(divu, i32, env, i32, i32)
+DEF_HELPER_1(scmpu, void, env)
DEF_HELPER_1(smovu, void, env)
DEF_HELPER_1(smovf, void, env)
DEF_HELPER_1(smovb, void, env)
DEF_HELPER_2(sstr, void, env, i32)
-DEF_HELPER_FLAGS_2(swhile, TCG_CALL_NO_WG, void, env, i32)
-DEF_HELPER_FLAGS_2(suntil, TCG_CALL_NO_WG, void, env, i32)
-DEF_HELPER_FLAGS_2(rmpa, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_2(swhile, void, env, i32)
+DEF_HELPER_2(suntil, void, env, i32)
+DEF_HELPER_2(rmpa, void, env, i32)
DEF_HELPER_1(satr, void, env)
diff --git a/target/rx/op_helper.c b/target/rx/op_helper.c
index 691a12b..2b190a4 100644
--- a/target/rx/op_helper.c
+++ b/target/rx/op_helper.c
@@ -19,9 +19,8 @@
#include "qemu/osdep.h"
#include "qemu/bitops.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "fpu/softfloat.h"
#include "tcg/debug-assert.h"
@@ -99,8 +98,8 @@ static void update_fpsw(CPURXState *env, float32 ret, uintptr_t retaddr)
if (xcpt & float_flag_inexact) {
SET_FPSW(X);
}
- if ((xcpt & (float_flag_input_denormal
- | float_flag_output_denormal))
+ if ((xcpt & (float_flag_input_denormal_flushed
+ | float_flag_output_denormal_flushed))
&& !FIELD_EX32(env->fpsw, FPSW, DN)) {
env->fpsw = FIELD_DP32(env->fpsw, FPSW, CE, 1);
}
diff --git a/target/rx/translate.c b/target/rx/translate.c
index 9b81cf2..19a9584 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -20,11 +20,11 @@
#include "qemu/bswap.h"
#include "qemu/qemu-print.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
#include "exec/translator.h"
+#include "exec/translation-block.h"
#include "exec/log.h"
#define HELPER_H "helper.h"
@@ -85,7 +85,8 @@ static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
static uint32_t li(DisasContext *ctx, int sz)
{
- int32_t tmp, addr;
+ target_ulong addr;
+ uint32_t tmp;
CPURXState *env = ctx->env;
addr = ctx->base.pc_next;
@@ -2256,8 +2257,8 @@ static const TranslatorOps rx_tr_ops = {
.tb_stop = rx_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext dc;
diff --git a/target/s390x/arch_dump.c b/target/s390x/arch_dump.c
index 029d91d..2c26e99 100644
--- a/target/s390x/arch_dump.c
+++ b/target/s390x/arch_dump.c
@@ -16,7 +16,7 @@
#include "cpu.h"
#include "s390x-internal.h"
#include "elf.h"
-#include "sysemu/dump.h"
+#include "system/dump.h"
#include "kvm/kvm_s390x.h"
#include "target/s390x/kvm/pv.h"
diff --git a/target/s390x/cpu-dump.c b/target/s390x/cpu-dump.c
index 69cc9f7..869d3a4 100644
--- a/target/s390x/cpu-dump.c
+++ b/target/s390x/cpu-dump.c
@@ -23,7 +23,7 @@
#include "cpu.h"
#include "s390x-internal.h"
#include "qemu/qemu-print.h"
-#include "sysemu/tcg.h"
+#include "system/tcg.h"
void s390_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
diff --git a/target/s390x/cpu-param.h b/target/s390x/cpu-param.h
index 11d23b6..abfae3b 100644
--- a/target/s390x/cpu-param.h
+++ b/target/s390x/cpu-param.h
@@ -2,21 +2,16 @@
* S/390 cpu parameters for qemu.
*
* Copyright (c) 2009 Ulrich Hecht
- * SPDX-License-Identifier: GPL-2.0+
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef S390_CPU_PARAM_H
#define S390_CPU_PARAM_H
-#define TARGET_LONG_BITS 64
#define TARGET_PAGE_BITS 12
#define TARGET_PHYS_ADDR_SPACE_BITS 64
#define TARGET_VIRT_ADDR_SPACE_BITS 64
-/*
- * The z/Architecture has a strong memory model with some
- * store-after-load re-ordering.
- */
-#define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
+#define TARGET_INSN_START_EXTRA_WORDS 2
#endif
diff --git a/target/s390x/cpu-sysemu.c b/target/s390x/cpu-sysemu.c
deleted file mode 100644
index 1cd30c1..0000000
--- a/target/s390x/cpu-sysemu.c
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * QEMU S/390 CPU - System Emulation-only code
- *
- * Copyright (c) 2009 Ulrich Hecht
- * Copyright (c) 2011 Alexander Graf
- * Copyright (c) 2012 SUSE LINUX Products GmbH
- * Copyright (c) 2012 IBM Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/error-report.h"
-#include "qapi/error.h"
-#include "cpu.h"
-#include "s390x-internal.h"
-#include "kvm/kvm_s390x.h"
-#include "sysemu/kvm.h"
-#include "sysemu/reset.h"
-#include "qemu/timer.h"
-#include "trace.h"
-#include "qapi/qapi-visit-run-state.h"
-#include "sysemu/hw_accel.h"
-
-#include "target/s390x/kvm/pv.h"
-#include "hw/boards.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/tcg.h"
-#include "hw/core/sysemu-cpu-ops.h"
-
-/* S390CPUClass::load_normal() */
-static void s390_cpu_load_normal(CPUState *s)
-{
- S390CPU *cpu = S390_CPU(s);
- uint64_t spsw;
-
- if (!s390_is_pv()) {
- spsw = ldq_phys(s->as, 0);
- cpu->env.psw.mask = spsw & PSW_MASK_SHORT_CTRL;
- /*
- * Invert short psw indication, so SIE will report a specification
- * exception if it was not set.
- */
- cpu->env.psw.mask ^= PSW_MASK_SHORTPSW;
- cpu->env.psw.addr = spsw & PSW_MASK_SHORT_ADDR;
- } else {
- /*
- * Firmware requires us to set the load state before we set
- * the cpu to operating on protected guests.
- */
- s390_cpu_set_state(S390_CPU_STATE_LOAD, cpu);
- }
- s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu);
-}
-
-void s390_cpu_machine_reset_cb(void *opaque)
-{
- S390CPU *cpu = opaque;
-
- run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
-}
-
-static GuestPanicInformation *s390_cpu_get_crash_info(CPUState *cs)
-{
- GuestPanicInformation *panic_info;
- S390CPU *cpu = S390_CPU(cs);
-
- cpu_synchronize_state(cs);
- panic_info = g_new0(GuestPanicInformation, 1);
-
- panic_info->type = GUEST_PANIC_INFORMATION_TYPE_S390;
- panic_info->u.s390.core = cpu->env.core_id;
- panic_info->u.s390.psw_mask = cpu->env.psw.mask;
- panic_info->u.s390.psw_addr = cpu->env.psw.addr;
- panic_info->u.s390.reason = cpu->env.crash_reason;
-
- return panic_info;
-}
-
-static void s390_cpu_get_crash_info_qom(Object *obj, Visitor *v,
- const char *name, void *opaque,
- Error **errp)
-{
- CPUState *cs = CPU(obj);
- GuestPanicInformation *panic_info;
-
- if (!cs->crash_occurred) {
- error_setg(errp, "No crash occurred");
- return;
- }
-
- panic_info = s390_cpu_get_crash_info(cs);
-
- visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
- errp);
- qapi_free_GuestPanicInformation(panic_info);
-}
-
-void s390_cpu_init_sysemu(Object *obj)
-{
- CPUState *cs = CPU(obj);
- S390CPU *cpu = S390_CPU(obj);
-
- cs->start_powered_off = true;
- object_property_add(obj, "crash-information", "GuestPanicInformation",
- s390_cpu_get_crash_info_qom, NULL, NULL, NULL);
- cpu->env.tod_timer =
- timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu);
- cpu->env.cpu_timer =
- timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu);
- s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu);
-}
-
-bool s390_cpu_realize_sysemu(DeviceState *dev, Error **errp)
-{
- S390CPU *cpu = S390_CPU(dev);
- MachineState *ms = MACHINE(qdev_get_machine());
- unsigned int max_cpus = ms->smp.max_cpus;
-
- if (cpu->env.core_id >= max_cpus) {
- error_setg(errp, "Unable to add CPU with core-id: %" PRIu32
- ", maximum core-id: %d", cpu->env.core_id,
- max_cpus - 1);
- return false;
- }
-
- if (cpu_exists(cpu->env.core_id)) {
- error_setg(errp, "Unable to add CPU with core-id: %" PRIu32
- ", it already exists", cpu->env.core_id);
- return false;
- }
-
- /* sync cs->cpu_index and env->core_id. The latter is needed for TCG. */
- CPU(cpu)->cpu_index = cpu->env.core_id;
- return true;
-}
-
-void s390_cpu_finalize(Object *obj)
-{
- S390CPU *cpu = S390_CPU(obj);
-
- timer_free(cpu->env.tod_timer);
- timer_free(cpu->env.cpu_timer);
-
- qemu_unregister_reset(s390_cpu_machine_reset_cb, cpu);
- g_free(cpu->irqstate);
-}
-
-static const struct SysemuCPUOps s390_sysemu_ops = {
- .get_phys_page_debug = s390_cpu_get_phys_page_debug,
- .get_crash_info = s390_cpu_get_crash_info,
- .write_elf64_note = s390_cpu_write_elf64_note,
- .legacy_vmsd = &vmstate_s390_cpu,
-};
-
-void s390_cpu_class_init_sysemu(CPUClass *cc)
-{
- S390CPUClass *scc = S390_CPU_CLASS(cc);
-
- scc->load_normal = s390_cpu_load_normal;
- cc->sysemu_ops = &s390_sysemu_ops;
-}
-
-static bool disabled_wait(CPUState *cpu)
-{
- return cpu->halted && !(S390_CPU(cpu)->env.psw.mask &
- (PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK));
-}
-
-static unsigned s390_count_running_cpus(void)
-{
- CPUState *cpu;
- int nr_running = 0;
-
- CPU_FOREACH(cpu) {
- uint8_t state = S390_CPU(cpu)->env.cpu_state;
- if (state == S390_CPU_STATE_OPERATING ||
- state == S390_CPU_STATE_LOAD) {
- if (!disabled_wait(cpu)) {
- nr_running++;
- }
- }
- }
-
- return nr_running;
-}
-
-unsigned int s390_cpu_halt(S390CPU *cpu)
-{
- CPUState *cs = CPU(cpu);
- trace_cpu_halt(cs->cpu_index);
-
- if (!cs->halted) {
- cs->halted = 1;
- cs->exception_index = EXCP_HLT;
- }
-
- return s390_count_running_cpus();
-}
-
-void s390_cpu_unhalt(S390CPU *cpu)
-{
- CPUState *cs = CPU(cpu);
- trace_cpu_unhalt(cs->cpu_index);
-
- if (cs->halted) {
- cs->halted = 0;
- cs->exception_index = -1;
- }
-}
-
-unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
- {
- trace_cpu_set_state(CPU(cpu)->cpu_index, cpu_state);
-
- switch (cpu_state) {
- case S390_CPU_STATE_STOPPED:
- case S390_CPU_STATE_CHECK_STOP:
- /* halt the cpu for common infrastructure */
- s390_cpu_halt(cpu);
- break;
- case S390_CPU_STATE_OPERATING:
- case S390_CPU_STATE_LOAD:
- /*
- * Starting a CPU with a PSW WAIT bit set:
- * KVM: handles this internally and triggers another WAIT exit.
- * TCG: will actually try to continue to run. Don't unhalt, will
- * be done when the CPU actually has work (an interrupt).
- */
- if (!tcg_enabled() || !(cpu->env.psw.mask & PSW_MASK_WAIT)) {
- s390_cpu_unhalt(cpu);
- }
- break;
- default:
- error_report("Requested CPU state is not a valid S390 CPU state: %u",
- cpu_state);
- exit(1);
- }
- if (kvm_enabled() && cpu->env.cpu_state != cpu_state) {
- kvm_s390_set_cpu_state(cpu, cpu_state);
- }
- cpu->env.cpu_state = cpu_state;
-
- return s390_count_running_cpus();
-}
-
-int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit)
-{
- if (kvm_enabled()) {
- return kvm_s390_set_mem_limit(new_limit, hw_limit);
- }
- return 0;
-}
-
-void s390_set_max_pagesize(uint64_t pagesize, Error **errp)
-{
- if (kvm_enabled()) {
- kvm_s390_set_max_pagesize(pagesize, errp);
- }
-}
-
-void s390_cmma_reset(void)
-{
- if (kvm_enabled()) {
- kvm_s390_cmma_reset();
- }
-}
-
-int s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch_id,
- int vq, bool assign)
-{
- if (kvm_enabled()) {
- return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign);
- } else {
- return 0;
- }
-}
-
-void s390_crypto_reset(void)
-{
- if (kvm_enabled()) {
- kvm_s390_crypto_reset();
- }
-}
-
-void s390_enable_css_support(S390CPU *cpu)
-{
- if (kvm_enabled()) {
- kvm_s390_enable_css_support(cpu);
- }
-}
-
-void s390_do_cpu_set_diag318(CPUState *cs, run_on_cpu_data arg)
-{
- if (kvm_enabled()) {
- kvm_s390_set_diag318(cs, arg.host_ulong);
- }
-}
-
-void s390_cpu_topology_set_changed(bool changed)
-{
- int ret;
-
- if (kvm_enabled()) {
- ret = kvm_s390_topology_set_mtcr(changed);
- if (ret) {
- error_report("Failed to set Modified Topology Change Report: %s",
- strerror(-ret));
- }
- }
-}
diff --git a/target/s390x/cpu-system.c b/target/s390x/cpu-system.c
new file mode 100644
index 0000000..9b380e3
--- /dev/null
+++ b/target/s390x/cpu-system.c
@@ -0,0 +1,325 @@
+/*
+ * QEMU S/390 CPU - System-only code
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2011 Alexander Graf
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * Copyright (c) 2012 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "kvm/kvm_s390x.h"
+#include "system/kvm.h"
+#include "system/reset.h"
+#include "qemu/timer.h"
+#include "trace.h"
+#include "qapi/qapi-visit-run-state.h"
+#include "system/hw_accel.h"
+
+#include "target/s390x/kvm/pv.h"
+#include "hw/boards.h"
+#include "system/system.h"
+#include "system/tcg.h"
+#include "hw/core/sysemu-cpu-ops.h"
+
+bool s390_cpu_has_work(CPUState *cs)
+{
+ S390CPU *cpu = S390_CPU(cs);
+
+ /* STOPPED cpus can never wake up */
+ if (s390_cpu_get_state(cpu) != S390_CPU_STATE_LOAD &&
+ s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING) {
+ return false;
+ }
+
+ if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
+ return false;
+ }
+
+ return s390_cpu_has_int(cpu);
+}
+
+/* S390CPUClass::load_normal() */
+static void s390_cpu_load_normal(CPUState *s)
+{
+ S390CPU *cpu = S390_CPU(s);
+ uint64_t spsw;
+
+ if (!s390_is_pv()) {
+ spsw = ldq_phys(s->as, 0);
+ cpu->env.psw.mask = spsw & PSW_MASK_SHORT_CTRL;
+ /*
+ * Invert short psw indication, so SIE will report a specification
+ * exception if it was not set.
+ */
+ cpu->env.psw.mask ^= PSW_MASK_SHORTPSW;
+ cpu->env.psw.addr = spsw & PSW_MASK_SHORT_ADDR;
+ } else {
+ /*
+ * Firmware requires us to set the load state before we set
+ * the cpu to operating on protected guests.
+ */
+ s390_cpu_set_state(S390_CPU_STATE_LOAD, cpu);
+ }
+ s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu);
+}
+
+void s390_cpu_machine_reset_cb(void *opaque)
+{
+ S390CPU *cpu = opaque;
+
+ run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
+}
+
+static GuestPanicInformation *s390_cpu_get_crash_info(CPUState *cs)
+{
+ GuestPanicInformation *panic_info;
+ S390CPU *cpu = S390_CPU(cs);
+
+ cpu_synchronize_state(cs);
+ panic_info = g_new0(GuestPanicInformation, 1);
+
+ panic_info->type = GUEST_PANIC_INFORMATION_TYPE_S390;
+ panic_info->u.s390.core = cpu->env.core_id;
+ panic_info->u.s390.psw_mask = cpu->env.psw.mask;
+ panic_info->u.s390.psw_addr = cpu->env.psw.addr;
+ panic_info->u.s390.reason = cpu->env.crash_reason;
+
+ return panic_info;
+}
+
+static void s390_cpu_get_crash_info_qom(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ CPUState *cs = CPU(obj);
+ GuestPanicInformation *panic_info;
+
+ if (!cs->crash_occurred) {
+ error_setg(errp, "No crash occurred");
+ return;
+ }
+
+ panic_info = s390_cpu_get_crash_info(cs);
+
+ visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
+ errp);
+ qapi_free_GuestPanicInformation(panic_info);
+}
+
+void s390_cpu_system_init(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ S390CPU *cpu = S390_CPU(obj);
+
+ cs->start_powered_off = true;
+ object_property_add(obj, "crash-information", "GuestPanicInformation",
+ s390_cpu_get_crash_info_qom, NULL, NULL, NULL);
+ cpu->env.tod_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu);
+ cpu->env.cpu_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu);
+ s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu);
+}
+
+bool s390_cpu_system_realize(DeviceState *dev, Error **errp)
+{
+ S390CPU *cpu = S390_CPU(dev);
+ MachineState *ms = MACHINE(qdev_get_machine());
+ unsigned int max_cpus = ms->smp.max_cpus;
+
+ if (cpu->env.core_id >= max_cpus) {
+ error_setg(errp, "Unable to add CPU with core-id: %" PRIu32
+ ", maximum core-id: %d", cpu->env.core_id,
+ max_cpus - 1);
+ return false;
+ }
+
+ if (cpu_exists(cpu->env.core_id)) {
+ error_setg(errp, "Unable to add CPU with core-id: %" PRIu32
+ ", it already exists", cpu->env.core_id);
+ return false;
+ }
+
+ /* sync cs->cpu_index and env->core_id. The latter is needed for TCG. */
+ CPU(cpu)->cpu_index = cpu->env.core_id;
+ return true;
+}
+
+void s390_cpu_finalize(Object *obj)
+{
+ S390CPU *cpu = S390_CPU(obj);
+
+ timer_free(cpu->env.tod_timer);
+ timer_free(cpu->env.cpu_timer);
+
+ qemu_unregister_reset(s390_cpu_machine_reset_cb, cpu);
+ g_free(cpu->irqstate);
+}
+
+static const struct SysemuCPUOps s390_sysemu_ops = {
+ .has_work = s390_cpu_has_work,
+ .get_phys_page_debug = s390_cpu_get_phys_page_debug,
+ .get_crash_info = s390_cpu_get_crash_info,
+ .write_elf64_note = s390_cpu_write_elf64_note,
+ .legacy_vmsd = &vmstate_s390_cpu,
+};
+
+void s390_cpu_system_class_init(CPUClass *cc)
+{
+ S390CPUClass *scc = S390_CPU_CLASS(cc);
+
+ scc->load_normal = s390_cpu_load_normal;
+ cc->sysemu_ops = &s390_sysemu_ops;
+}
+
+static bool disabled_wait(CPUState *cpu)
+{
+ return cpu->halted && !(S390_CPU(cpu)->env.psw.mask &
+ (PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK));
+}
+
+static unsigned s390_count_running_cpus(void)
+{
+ CPUState *cpu;
+ int nr_running = 0;
+
+ CPU_FOREACH(cpu) {
+ uint8_t state = S390_CPU(cpu)->env.cpu_state;
+ if (state == S390_CPU_STATE_OPERATING ||
+ state == S390_CPU_STATE_LOAD) {
+ if (!disabled_wait(cpu)) {
+ nr_running++;
+ }
+ }
+ }
+
+ return nr_running;
+}
+
+unsigned int s390_cpu_halt(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ trace_cpu_halt(cs->cpu_index);
+
+ if (!cs->halted) {
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ }
+
+ return s390_count_running_cpus();
+}
+
+void s390_cpu_unhalt(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ trace_cpu_unhalt(cs->cpu_index);
+
+ if (cs->halted) {
+ cs->halted = 0;
+ cs->exception_index = -1;
+ }
+}
+
+unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
+ {
+ trace_cpu_set_state(CPU(cpu)->cpu_index, cpu_state);
+
+ switch (cpu_state) {
+ case S390_CPU_STATE_STOPPED:
+ case S390_CPU_STATE_CHECK_STOP:
+ /* halt the cpu for common infrastructure */
+ s390_cpu_halt(cpu);
+ break;
+ case S390_CPU_STATE_OPERATING:
+ case S390_CPU_STATE_LOAD:
+ /*
+ * Starting a CPU with a PSW WAIT bit set:
+ * KVM: handles this internally and triggers another WAIT exit.
+ * TCG: will actually try to continue to run. Don't unhalt, will
+ * be done when the CPU actually has work (an interrupt).
+ */
+ if (!tcg_enabled() || !(cpu->env.psw.mask & PSW_MASK_WAIT)) {
+ s390_cpu_unhalt(cpu);
+ }
+ break;
+ default:
+ error_report("Requested CPU state is not a valid S390 CPU state: %u",
+ cpu_state);
+ exit(1);
+ }
+ if (kvm_enabled() && cpu->env.cpu_state != cpu_state) {
+ kvm_s390_set_cpu_state(cpu, cpu_state);
+ }
+ cpu->env.cpu_state = cpu_state;
+
+ return s390_count_running_cpus();
+}
+
+void s390_cmma_reset(void)
+{
+ if (kvm_enabled()) {
+ kvm_s390_cmma_reset();
+ }
+}
+
+int s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch_id,
+ int vq, bool assign)
+{
+ if (kvm_enabled()) {
+ return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign);
+ } else {
+ return 0;
+ }
+}
+
+void s390_crypto_reset(void)
+{
+ if (kvm_enabled()) {
+ kvm_s390_crypto_reset();
+ }
+}
+
+void s390_enable_css_support(S390CPU *cpu)
+{
+ if (kvm_enabled()) {
+ kvm_s390_enable_css_support(cpu);
+ }
+}
+
+void s390_do_cpu_set_diag318(CPUState *cs, run_on_cpu_data arg)
+{
+ if (kvm_enabled()) {
+ kvm_s390_set_diag318(cs, arg.host_ulong);
+ }
+}
+
+void s390_cpu_topology_set_changed(bool changed)
+{
+ int ret;
+
+ if (kvm_enabled()) {
+ ret = kvm_s390_topology_set_mtcr(changed);
+ if (ret) {
+ error_report("Failed to set Modified Topology Change Report: %s",
+ strerror(-ret));
+ }
+ }
+}
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
index 0fbfcd3..f05ce31 100644
--- a/target/s390x/cpu.c
+++ b/target/s390x/cpu.c
@@ -25,18 +25,19 @@
#include "cpu.h"
#include "s390x-internal.h"
#include "kvm/kvm_s390x.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "qemu/module.h"
#include "trace.h"
#include "qapi/qapi-types-machine.h"
-#include "sysemu/hw_accel.h"
+#include "system/hw_accel.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
+#include "hw/resettable.h"
#include "fpu/softfloat-helpers.h"
#include "disas/capstone.h"
-#include "sysemu/tcg.h"
+#include "system/tcg.h"
#ifndef CONFIG_USER_ONLY
-#include "sysemu/reset.h"
+#include "system/reset.h"
#endif
#include "hw/s390x/cpu-topology.h"
@@ -125,28 +126,6 @@ static vaddr s390_cpu_get_pc(CPUState *cs)
return cpu->env.psw.addr;
}
-static bool s390_cpu_has_work(CPUState *cs)
-{
- S390CPU *cpu = S390_CPU(cs);
-
- /* STOPPED cpus can never wake up */
- if (s390_cpu_get_state(cpu) != S390_CPU_STATE_LOAD &&
- s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING) {
- return false;
- }
-
- if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
- return false;
- }
-
- return s390_cpu_has_int(cpu);
-}
-
-static int s390x_cpu_mmu_index(CPUState *cs, bool ifetch)
-{
- return s390x_env_mmu_index(cpu_env(cs), ifetch);
-}
-
static void s390_query_cpu_fast(CPUState *cpu, CpuInfoFast *value)
{
S390CPU *s390_cpu = S390_CPU(cpu);
@@ -162,23 +141,25 @@ static void s390_query_cpu_fast(CPUState *cpu, CpuInfoFast *value)
#endif
}
-/* S390CPUClass::reset() */
-static void s390_cpu_reset(CPUState *s, cpu_reset_type type)
+/* S390CPUClass Resettable reset_hold phase method */
+static void s390_cpu_reset_hold(Object *obj, ResetType type)
{
- S390CPU *cpu = S390_CPU(s);
+ S390CPU *cpu = S390_CPU(obj);
S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
CPUS390XState *env = &cpu->env;
- DeviceState *dev = DEVICE(s);
- scc->parent_reset(dev);
+ if (scc->parent_phases.hold) {
+ scc->parent_phases.hold(obj, type);
+ }
cpu->env.sigp_order = 0;
s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu);
switch (type) {
- case S390_CPU_RESET_CLEAR:
+ default:
+ /* RESET_TYPE_COLD: power on or "clear" reset */
memset(env, 0, offsetof(CPUS390XState, start_initial_reset_fields));
/* fall through */
- case S390_CPU_RESET_INITIAL:
+ case RESET_TYPE_S390_CPU_INITIAL:
/* initial reset does not clear everything! */
memset(&env->start_initial_reset_fields, 0,
offsetof(CPUS390XState, start_normal_reset_fields) -
@@ -202,8 +183,14 @@ static void s390_cpu_reset(CPUState *s, cpu_reset_type type)
/* tininess for underflow is detected before rounding */
set_float_detect_tininess(float_tininess_before_rounding,
&env->fpu_status);
+ set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->fpu_status);
+ set_float_3nan_prop_rule(float_3nan_prop_s_abc, &env->fpu_status);
+ set_float_infzeronan_rule(float_infzeronan_dnan_always,
+ &env->fpu_status);
+ /* Default NaN value: sign bit clear, frac msb set */
+ set_float_default_nan_pattern(0b01000000, &env->fpu_status);
/* fall through */
- case S390_CPU_RESET_NORMAL:
+ case RESET_TYPE_S390_CPU_NORMAL:
env->psw.mask &= ~PSW_MASK_RI;
memset(&env->start_normal_reset_fields, 0,
offsetof(CPUS390XState, end_reset_fields) -
@@ -212,20 +199,18 @@ static void s390_cpu_reset(CPUState *s, cpu_reset_type type)
env->pfault_token = -1UL;
env->bpbc = false;
break;
- default:
- g_assert_not_reached();
}
/* Reset state inside the kernel that we cannot access yet from QEMU. */
if (kvm_enabled()) {
switch (type) {
- case S390_CPU_RESET_CLEAR:
+ default:
kvm_s390_reset_vcpu_clear(cpu);
break;
- case S390_CPU_RESET_INITIAL:
+ case RESET_TYPE_S390_CPU_INITIAL:
kvm_s390_reset_vcpu_initial(cpu);
break;
- case S390_CPU_RESET_NORMAL:
+ case RESET_TYPE_S390_CPU_NORMAL:
kvm_s390_reset_vcpu_normal(cpu);
break;
}
@@ -236,6 +221,7 @@ static void s390_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
{
info->mach = bfd_mach_s390_64;
info->cap_arch = CS_ARCH_SYSZ;
+ info->endian = BFD_ENDIAN_BIG;
info->cap_insn_unit = 2;
info->cap_insn_split = 6;
}
@@ -253,7 +239,7 @@ static void s390_cpu_realizefn(DeviceState *dev, Error **errp)
}
#if !defined(CONFIG_USER_ONLY)
- if (!s390_cpu_realize_sysemu(dev, &err)) {
+ if (!s390_cpu_system_realize(dev, &err)) {
goto out;
}
#endif
@@ -293,7 +279,7 @@ static void s390_cpu_initfn(Object *obj)
cs->exception_index = EXCP_HLT;
#if !defined(CONFIG_USER_ONLY)
- s390_cpu_init_sysemu(obj);
+ s390_cpu_system_init(obj);
#endif
}
@@ -302,8 +288,8 @@ static const gchar *s390_gdb_arch_name(CPUState *cs)
return "s390:64-bit";
}
-static Property s390x_cpu_properties[] = {
-#if !defined(CONFIG_USER_ONLY)
+#ifndef CONFIG_USER_ONLY
+static const Property s390x_cpu_properties[] = {
DEFINE_PROP_UINT32("core-id", S390CPU, env.core_id, 0),
DEFINE_PROP_INT32("socket-id", S390CPU, env.socket_id, -1),
DEFINE_PROP_INT32("book-id", S390CPU, env.book_id, -1),
@@ -311,22 +297,21 @@ static Property s390x_cpu_properties[] = {
DEFINE_PROP_BOOL("dedicated", S390CPU, env.dedicated, false),
DEFINE_PROP_CPUS390ENTITLEMENT("entitlement", S390CPU, env.entitlement,
S390_CPU_ENTITLEMENT_AUTO),
-#endif
- DEFINE_PROP_END_OF_LIST()
};
+#endif
-static void s390_cpu_reset_full(DeviceState *dev)
+#ifdef CONFIG_TCG
+#include "accel/tcg/cpu-ops.h"
+#include "tcg/tcg_s390x.h"
+
+static int s390x_cpu_mmu_index(CPUState *cs, bool ifetch)
{
- CPUState *s = CPU(dev);
- return s390_cpu_reset(s, S390_CPU_RESET_CLEAR);
+ return s390x_env_mmu_index(cpu_env(cs), ifetch);
}
-#ifdef CONFIG_TCG
-#include "hw/core/tcg-cpu-ops.h"
-
-void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
+static TCGTBCPUState s390x_get_tb_cpu_state(CPUState *cs)
{
+ CPUS390XState *env = cpu_env(cs);
uint32_t flags;
if (env->psw.addr & 1) {
@@ -338,9 +323,6 @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, 0);
}
- *pc = env->psw.addr;
- *cs_base = env->ex_value;
-
flags = (env->psw.mask >> FLAG_MASK_PSW_SHIFT) & FLAG_MASK_PSW;
if (env->psw.mask & PSW_MASK_PER) {
flags |= env->cregs[9] & (FLAG_MASK_PER_BRANCH |
@@ -357,20 +339,46 @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
if (env->cregs[0] & CR0_VECTOR) {
flags |= FLAG_MASK_VECTOR;
}
- *pflags = flags;
+
+ return (TCGTBCPUState){
+ .pc = env->psw.addr,
+ .flags = flags,
+ .cs_base = env->ex_value,
+ };
}
+#ifndef CONFIG_USER_ONLY
+static vaddr s390_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return wrap_address(cpu_env(cs), result);
+}
+#endif
+
static const TCGCPUOps s390_tcg_ops = {
+ .mttcg_supported = true,
+ .precise_smc = true,
+ /*
+ * The z/Architecture has a strong memory model with some
+ * store-after-load re-ordering.
+ */
+ .guest_default_memory_order = TCG_MO_ALL & ~TCG_MO_ST_LD,
+
.initialize = s390x_translate_init,
+ .translate_code = s390x_translate_code,
+ .get_tb_cpu_state = s390x_get_tb_cpu_state,
.restore_state_to_opc = s390x_restore_state_to_opc,
+ .mmu_index = s390x_cpu_mmu_index,
#ifdef CONFIG_USER_ONLY
.record_sigsegv = s390_cpu_record_sigsegv,
.record_sigbus = s390_cpu_record_sigbus,
#else
.tlb_fill = s390_cpu_tlb_fill,
+ .pointer_wrap = s390_pointer_wrap,
.cpu_exec_interrupt = s390_cpu_exec_interrupt,
.cpu_exec_halt = s390_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = s390_cpu_do_interrupt,
.debug_excp_handler = s390x_cpu_debug_excp_handler,
.do_unaligned_access = s390x_cpu_do_unaligned_access,
@@ -378,23 +386,22 @@ static const TCGCPUOps s390_tcg_ops = {
};
#endif /* CONFIG_TCG */
-static void s390_cpu_class_init(ObjectClass *oc, void *data)
+static void s390_cpu_class_init(ObjectClass *oc, const void *data)
{
S390CPUClass *scc = S390_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(scc);
DeviceClass *dc = DEVICE_CLASS(oc);
+ ResettableClass *rc = RESETTABLE_CLASS(oc);
device_class_set_parent_realize(dc, s390_cpu_realizefn,
&scc->parent_realize);
- device_class_set_props(dc, s390x_cpu_properties);
dc->user_creatable = true;
- device_class_set_parent_reset(dc, s390_cpu_reset_full, &scc->parent_reset);
+ resettable_class_set_parent_phases(rc, NULL, s390_cpu_reset_hold, NULL,
+ &scc->parent_phases);
- scc->reset = s390_cpu_reset;
- cc->class_by_name = s390_cpu_class_by_name,
- cc->has_work = s390_cpu_has_work;
- cc->mmu_index = s390x_cpu_mmu_index;
+ cc->class_by_name = s390_cpu_class_by_name;
+ cc->list_cpus = s390_cpu_list;
cc->dump_state = s390_cpu_dump_state;
cc->query_cpu_fast = s390_query_cpu_fast;
cc->set_pc = s390_cpu_set_pc;
@@ -402,7 +409,8 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_read_register = s390_cpu_gdb_read_register;
cc->gdb_write_register = s390_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY
- s390_cpu_class_init_sysemu(cc);
+ device_class_set_props(dc, s390x_cpu_properties);
+ s390_cpu_system_class_init(cc);
#endif
cc->disas_set_info = s390_cpu_disas_set_info;
cc->gdb_core_xml_file = "s390x-core64.xml";
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index d6b75ad..aa931cb 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -27,16 +27,14 @@
#include "cpu-qom.h"
#include "cpu_models.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "qemu/cpu-float.h"
#include "qapi/qapi-types-machine-common.h"
#define ELF_MACHINE_UNAME "S390X"
-#define TARGET_HAS_PRECISE_SMC
-
-#define TARGET_INSN_START_EXTRA_WORDS 2
-
#define MMU_USER_IDX 0
#define S390_MAX_CPUS 248
@@ -133,7 +131,7 @@ typedef struct CPUArchState {
int32_t book_id;
int32_t drawer_id;
bool dedicated;
- CpuS390Entitlement entitlement; /* Used only for vertical polarization */
+ S390CpuEntitlement entitlement; /* Used only for vertical polarization */
uint64_t cpuid;
#endif
@@ -177,19 +175,11 @@ struct ArchCPU {
uint32_t irqstate_saved_size;
};
-typedef enum cpu_reset_type {
- S390_CPU_RESET_NORMAL,
- S390_CPU_RESET_INITIAL,
- S390_CPU_RESET_CLEAR,
-} cpu_reset_type;
-
/**
* S390CPUClass:
* @parent_realize: The parent class' realize handler.
- * @parent_reset: The parent class' reset handler.
+ * @parent_phases: The parent class' reset phase handlers.
* @load_normal: Performs a load normal.
- * @cpu_reset: Performs a CPU reset.
- * @initial_cpu_reset: Performs an initial CPU reset.
*
* An S/390 CPU model.
*/
@@ -203,9 +193,8 @@ struct S390CPUClass {
const char *desc;
DeviceRealize parent_realize;
- DeviceReset parent_reset;
+ ResettablePhases parent_phases;
void (*load_normal)(CPUState *cpu);
- void (*reset)(CPUState *cpu, cpu_reset_type type);
};
#ifndef CONFIG_USER_ONLY
@@ -422,15 +411,6 @@ static inline int s390x_env_mmu_index(CPUS390XState *env, bool ifetch)
#endif
}
-#ifdef CONFIG_TCG
-
-#include "tcg/tcg_s390x.h"
-
-void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags);
-
-#endif /* CONFIG_TCG */
-
/* PER bits from control register 9 */
#define PER_CR9_EVENT_BRANCH 0x80000000
#define PER_CR9_EVENT_IFETCH 0x40000000
@@ -872,16 +852,12 @@ static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg)
static inline void s390_do_cpu_reset(CPUState *cs, run_on_cpu_data arg)
{
- S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
-
- scc->reset(cs, S390_CPU_RESET_NORMAL);
+ resettable_reset(OBJECT(cs), RESET_TYPE_S390_CPU_NORMAL);
}
static inline void s390_do_cpu_initial_reset(CPUState *cs, run_on_cpu_data arg)
{
- S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
-
- scc->reset(cs, S390_CPU_RESET_INITIAL);
+ resettable_reset(OBJECT(cs), RESET_TYPE_S390_CPU_INITIAL);
}
static inline void s390_do_cpu_load_normal(CPUState *cs, run_on_cpu_data arg)
@@ -894,8 +870,6 @@ static inline void s390_do_cpu_load_normal(CPUState *cs, run_on_cpu_data arg)
/* cpu.c */
void s390_crypto_reset(void);
-int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit);
-void s390_set_max_pagesize(uint64_t pagesize, Error **errp);
void s390_cmma_reset(void);
void s390_enable_css_support(S390CPU *cpu);
void s390_do_cpu_set_diag318(CPUState *cs, run_on_cpu_data arg);
@@ -915,13 +889,6 @@ static inline uint8_t s390_cpu_get_state(S390CPU *cpu)
}
-/* cpu_models.c */
-void s390_cpu_list(void);
-#define cpu_list s390_cpu_list
-void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga,
- const S390FeatInit feat_init);
-
-
/* helper.c */
#define CPU_RESOLVING_TYPE TYPE_S390_CPU
@@ -961,6 +928,4 @@ uint64_t s390_cpu_get_psw_mask(CPUS390XState *env);
/* outside of target/s390x/ */
S390CPU *s390_cpu_addr2state(uint16_t cpu_addr);
-#include "exec/cpu-all.h"
-
#endif
diff --git a/target/s390x/cpu_features.c b/target/s390x/cpu_features.c
index cb4e2b8..4b5be67 100644
--- a/target/s390x/cpu_features.c
+++ b/target/s390x/cpu_features.c
@@ -93,6 +93,7 @@ void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type,
case S390_FEAT_TYPE_KDSA:
case S390_FEAT_TYPE_SORTL:
case S390_FEAT_TYPE_DFLTCC:
+ case S390_FEAT_TYPE_PFCR:
set_be_bit(0, data); /* query is always available */
break;
default:
@@ -239,8 +240,10 @@ void s390_get_deprecated_features(S390FeatBitmap features)
/* indexed by feature group number for easy lookup */
static S390FeatGroupDef s390_feature_groups[] = {
FEAT_GROUP_INIT("plo", PLO, "Perform-locked-operation facility"),
+ FEAT_GROUP_INIT("plo_ext", PLO_EXT, "PLO-extension facility"),
FEAT_GROUP_INIT("tods", TOD_CLOCK_STEERING, "Tod-clock-steering facility"),
FEAT_GROUP_INIT("gen13ptff", GEN13_PTFF, "PTFF enhancements introduced with z13"),
+ FEAT_GROUP_INIT("gen17ptff", GEN17_PTFF, "PTFF enhancements introduced with gen17"),
FEAT_GROUP_INIT("msa", MSA, "Message-security-assist facility"),
FEAT_GROUP_INIT("msa1", MSA_EXT_1, "Message-security-assist-extension 1 facility"),
FEAT_GROUP_INIT("msa2", MSA_EXT_2, "Message-security-assist-extension 2 facility"),
@@ -252,9 +255,17 @@ static S390FeatGroupDef s390_feature_groups[] = {
FEAT_GROUP_INIT("msa8", MSA_EXT_8, "Message-security-assist-extension 8 facility"),
FEAT_GROUP_INIT("msa9", MSA_EXT_9, "Message-security-assist-extension 9 facility"),
FEAT_GROUP_INIT("msa9_pckmo", MSA_EXT_9_PCKMO, "Message-security-assist-extension 9 PCKMO subfunctions"),
+ FEAT_GROUP_INIT("msa10", MSA_EXT_10, "Message-security-assist-extension 10 facility"),
+ FEAT_GROUP_INIT("msa10_pckmo", MSA_EXT_10_PCKMO, "Message-security-assist-extension 10 PCKMO subfunctions"),
+ FEAT_GROUP_INIT("msa11", MSA_EXT_11, "Message-security-assist-extension 11 facility"),
+ FEAT_GROUP_INIT("msa11_pckmo", MSA_EXT_11_PCKMO, "Message-security-assist-extension 11 PCKMO subfunctions"),
+ FEAT_GROUP_INIT("msa12", MSA_EXT_12, "Message-security-assist-extension 12 facility"),
+ FEAT_GROUP_INIT("msa13", MSA_EXT_13, "Message-security-assist-extension 13 facility"),
+ FEAT_GROUP_INIT("msa13_pckmo", MSA_EXT_13_PCKMO, "Message-security-assist-extension 13 PCKMO subfunctions"),
FEAT_GROUP_INIT("mepochptff", MULTIPLE_EPOCH_PTFF, "PTFF enhancements introduced with Multiple-epoch facility"),
FEAT_GROUP_INIT("esort", ENH_SORT, "Enhanced-sort facility"),
FEAT_GROUP_INIT("deflate", DEFLATE_CONVERSION, "Deflate-conversion facility"),
+ FEAT_GROUP_INIT("ccf", CONCURRENT_FUNCTIONS, "Concurrent-functions facility"),
};
const S390FeatGroupDef *s390_feat_group_def(S390FeatGroup group)
diff --git a/target/s390x/cpu_features.h b/target/s390x/cpu_features.h
index 661a8cd..5635839 100644
--- a/target/s390x/cpu_features.h
+++ b/target/s390x/cpu_features.h
@@ -44,6 +44,7 @@ typedef enum {
S390_FEAT_TYPE_SORTL,
S390_FEAT_TYPE_DFLTCC,
S390_FEAT_TYPE_UV_FEAT_GUEST,
+ S390_FEAT_TYPE_PFCR,
} S390FeatType;
/* Definition of a CPU feature */
diff --git a/target/s390x/cpu_features_def.h.inc b/target/s390x/cpu_features_def.h.inc
index c53ac13..e23e603 100644
--- a/target/s390x/cpu_features_def.h.inc
+++ b/target/s390x/cpu_features_def.h.inc
@@ -90,6 +90,10 @@ DEF_FEAT(EDAT_2, "edat2", STFL, 78, "Enhanced-DAT facility 2")
DEF_FEAT(DFP_PACKED_CONVERSION, "dfppc", STFL, 80, "Decimal-floating-point packed-conversion facility")
DEF_FEAT(PPA15, "ppa15", STFL, 81, "PPA15 is installed")
DEF_FEAT(BPB, "bpb", STFL, 82, "Branch prediction blocking")
+DEF_FEAT(MISC_INSTRUCTION_EXT4, "minste4", STFL, 84, "Miscellaneous-Instruction-Extensions Facility 4")
+DEF_FEAT(SIF, "sif", STFL, 85, "Sequential-instruction-fetching facility")
+DEF_FEAT(MSA_EXT_12, "msa12-base", STFL, 86, "Message-security-assist-extension-12 facility (excluding subfunctions)")
+DEF_FEAT(PLO_EXT, "plo-ext", STFL, 87, "PLO-extension facility")
DEF_FEAT(VECTOR, "vx", STFL, 129, "Vector facility")
DEF_FEAT(INSTRUCTION_EXEC_PROT, "iep", STFL, 130, "Instruction-execution-protection facility")
DEF_FEAT(SIDE_EFFECT_ACCESS_ESOP2, "sea_esop2", STFL, 131, "Side-effect-access facility and Enhanced-suppression-on-protection facility 2")
@@ -110,11 +114,15 @@ DEF_FEAT(MSA_EXT_9, "msa9-base", STFL, 155, "Message-security-assist-extension-9
DEF_FEAT(ETOKEN, "etoken", STFL, 156, "Etoken facility")
DEF_FEAT(UNPACK, "unpack", STFL, 161, "Unpack facility")
DEF_FEAT(NNPA, "nnpa", STFL, 165, "NNPA facility")
+DEF_FEAT(INEFF_NC_TX, "ineff_nc_tx", STFL, 170, "Ineffective-nonconstrained-transaction facility")
DEF_FEAT(VECTOR_PACKED_DECIMAL_ENH2, "vxpdeh2", STFL, 192, "Vector-Packed-Decimal-Enhancement facility 2")
DEF_FEAT(BEAR_ENH, "beareh", STFL, 193, "BEAR-enhancement facility")
DEF_FEAT(RDP, "rdp", STFL, 194, "Reset-DAT-protection facility")
DEF_FEAT(PAI, "pai", STFL, 196, "Processor-Activity-Instrumentation facility")
DEF_FEAT(PAIE, "paie", STFL, 197, "Processor-Activity-Instrumentation extension-1")
+DEF_FEAT(VECTOR_ENH3, "vxeh3", STFL, 198, "Vector Enhancements facility 3")
+DEF_FEAT(VECTOR_PACKED_DECIMAL_ENH3, "vxpdeh3", STFL, 199, "Vector-Packed-Decimal-Enhancement facility 3")
+DEF_FEAT(CCF_BASE, "ccf-base", STFL, 201, "Concurrent-Functions facility")
/* Features exposed via SCLP SCCB Byte 80 - 98 (bit numbers relative to byte-80) */
DEF_FEAT(SIE_GSLS, "gsls", SCLP_CONF_CHAR, 40, "SIE: Guest-storage-limit-suppression facility")
@@ -151,28 +159,66 @@ DEF_FEAT(AP, "ap", MISC, 0, "AP instructions installed")
/* Features exposed via the PLO instruction. */
DEF_FEAT(PLO_CL, "plo-cl", PLO, 0, "PLO Compare and load (32 bit in general registers)")
DEF_FEAT(PLO_CLG, "plo-clg", PLO, 1, "PLO Compare and load (64 bit in parameter list)")
-DEF_FEAT(PLO_CLGR, "plo-clgr", PLO, 2, "PLO Compare and load (32 bit in general registers)")
+DEF_FEAT(PLO_CLGR, "plo-clgr", PLO, 2, "PLO Compare and load (64 bit in general registers)")
DEF_FEAT(PLO_CLX, "plo-clx", PLO, 3, "PLO Compare and load (128 bit in parameter list)")
DEF_FEAT(PLO_CS, "plo-cs", PLO, 4, "PLO Compare and swap (32 bit in general registers)")
DEF_FEAT(PLO_CSG, "plo-csg", PLO, 5, "PLO Compare and swap (64 bit in parameter list)")
-DEF_FEAT(PLO_CSGR, "plo-csgr", PLO, 6, "PLO Compare and swap (32 bit in general registers)")
+DEF_FEAT(PLO_CSGR, "plo-csgr", PLO, 6, "PLO Compare and swap (64 bit in general registers)")
DEF_FEAT(PLO_CSX, "plo-csx", PLO, 7, "PLO Compare and swap (128 bit in parameter list)")
DEF_FEAT(PLO_DCS, "plo-dcs", PLO, 8, "PLO Double compare and swap (32 bit in general registers)")
DEF_FEAT(PLO_DCSG, "plo-dcsg", PLO, 9, "PLO Double compare and swap (64 bit in parameter list)")
-DEF_FEAT(PLO_DCSGR, "plo-dcsgr", PLO, 10, "PLO Double compare and swap (32 bit in general registers)")
+DEF_FEAT(PLO_DCSGR, "plo-dcsgr", PLO, 10, "PLO Double compare and swap (64 bit in general registers)")
DEF_FEAT(PLO_DCSX, "plo-dcsx", PLO, 11, "PLO Double compare and swap (128 bit in parameter list)")
DEF_FEAT(PLO_CSST, "plo-csst", PLO, 12, "PLO Compare and swap and store (32 bit in general registers)")
DEF_FEAT(PLO_CSSTG, "plo-csstg", PLO, 13, "PLO Compare and swap and store (64 bit in parameter list)")
-DEF_FEAT(PLO_CSSTGR, "plo-csstgr", PLO, 14, "PLO Compare and swap and store (32 bit in general registers)")
+DEF_FEAT(PLO_CSSTGR, "plo-csstgr", PLO, 14, "PLO Compare and swap and store (64 bit in general registers)")
DEF_FEAT(PLO_CSSTX, "plo-csstx", PLO, 15, "PLO Compare and swap and store (128 bit in parameter list)")
DEF_FEAT(PLO_CSDST, "plo-csdst", PLO, 16, "PLO Compare and swap and double store (32 bit in general registers)")
DEF_FEAT(PLO_CSDSTG, "plo-csdstg", PLO, 17, "PLO Compare and swap and double store (64 bit in parameter list)")
-DEF_FEAT(PLO_CSDSTGR, "plo-csdstgr", PLO, 18, "PLO Compare and swap and double store (32 bit in general registers)")
+DEF_FEAT(PLO_CSDSTGR, "plo-csdstgr", PLO, 18, "PLO Compare and swap and double store (64 bit in general registers)")
DEF_FEAT(PLO_CSDSTX, "plo-csdstx", PLO, 19, "PLO Compare and swap and double store (128 bit in parameter list)")
DEF_FEAT(PLO_CSTST, "plo-cstst", PLO, 20, "PLO Compare and swap and triple store (32 bit in general registers)")
DEF_FEAT(PLO_CSTSTG, "plo-cststg", PLO, 21, "PLO Compare and swap and triple store (64 bit in parameter list)")
-DEF_FEAT(PLO_CSTSTGR, "plo-cststgr", PLO, 22, "PLO Compare and swap and triple store (32 bit in general registers)")
+DEF_FEAT(PLO_CSTSTGR, "plo-cststgr", PLO, 22, "PLO Compare and swap and triple store (64 bit in general registers)")
DEF_FEAT(PLO_CSTSTX, "plo-cststx", PLO, 23, "PLO Compare and swap and triple store (128 bit in parameter list)")
+DEF_FEAT(PLO_CLO, "plo-clo", PLO, 24, "PLO Compare and load (256 bit in parameter list)")
+DEF_FEAT(PLO_CSO, "plo-cso", PLO, 25, "PLO Compare and swap (256 bit in parameter list)")
+DEF_FEAT(PLO_DCSO, "plo-dcso", PLO, 26, "PLO Double compare and swap (256 bit in parameter list)")
+DEF_FEAT(PLO_CSSTO, "plo-cssto", PLO, 27, "PLO Compare and swap and store (256 bit in parameter list)")
+DEF_FEAT(PLO_CSDSTO, "plo-csdsto", PLO, 28, "PLO Compare and swap and double store (256 bit in parameter list)")
+DEF_FEAT(PLO_CSTSTO, "plo-cststo", PLO, 29, "PLO Compare and swap and trible store (256 bit in parameter list)")
+DEF_FEAT(PLO_TCS, "plo-tcs", PLO, 30, "Triple compare and swap (32 bit in parameter list)")
+DEF_FEAT(PLO_TCSG, "plo-tcsg", PLO, 31, "Triple compare and swap (64 bit in parameter list)")
+DEF_FEAT(PLO_TCSX, "plo-tcsx", PLO, 32, "Triple compare and swap (128 bit in parameter list)")
+DEF_FEAT(PLO_TCSO, "plo-tcso", PLO, 33, "Triple compare and swap (256 bit in parameter list)")
+DEF_FEAT(PLO_QCS, "plo-qcs", PLO, 34, "Quadruple compare and swap (32 bit in parameter list)")
+DEF_FEAT(PLO_QCSG, "plo-qcsg", PLO, 35, "Quadruple compare and swap (64 bit in parameter list)")
+DEF_FEAT(PLO_QCSX, "plo-qcsx", PLO, 36, "Quadruple compare and swap (128 bit in parameter list)")
+DEF_FEAT(PLO_QCSO, "plo-qcso", PLO, 37, "Quadruple compare and swap (256 bit in parameter list)")
+DEF_FEAT(PLO_LO, "plo-lo", PLO, 38, "Load (256 bit in parameter list)")
+DEF_FEAT(PLO_DLX, "plo-dlx", PLO, 39, "Double load (128 bit in parameter list)")
+DEF_FEAT(PLO_DLO, "plo-dlo", PLO, 40, "Double load (256 bit in parameter list)")
+DEF_FEAT(PLO_TL, "plo-tl", PLO, 41, "Triple load (32 bit in parameter list)")
+DEF_FEAT(PLO_TLG, "plo-tlg", PLO, 42, "Triple load (64 bit in parameter list)")
+DEF_FEAT(PLO_TLX, "plo-tlx", PLO, 43, "Triple load (128 bit in parameter list)")
+DEF_FEAT(PLO_TLO, "plo-tlo", PLO, 44, "Triple load (256 bit in parameter list)")
+DEF_FEAT(PLO_QL, "plo-ql", PLO, 45, "Quadruple load (32 bit in parameter list)")
+DEF_FEAT(PLO_QLG, "plo-qlg", PLO, 46, "Quadruple load (64 bit in parameter list)")
+DEF_FEAT(PLO_QLX, "plo-qlx", PLO, 47, "Quadruple load (128 bit in parameter list)")
+DEF_FEAT(PLO_QLO, "plo-qlo", PLO, 48, "Quadruple load (256 bit in parameter list)")
+DEF_FEAT(PLO_STO, "plo-sto", PLO, 49, "Store (256 bit in parameter list)")
+DEF_FEAT(PLO_DST, "plo-dst", PLO, 50, "Double store (32 bit in parameter list)")
+DEF_FEAT(PLO_DSTG, "plo-dstg", PLO, 51, "Double store (64 bit in parameter list)")
+DEF_FEAT(PLO_DSTX, "plo-dstx", PLO, 52, "Double store (128 bit in parameter list)")
+DEF_FEAT(PLO_DSTO, "plo-dsto", PLO, 53, "Double store (256 bit in parameter list)")
+DEF_FEAT(PLO_TST, "plo-tst", PLO, 54, "Triple store (32 bit in parameter list)")
+DEF_FEAT(PLO_TSTG, "plo-tstg", PLO, 55, "Triple store (64 bit in parameter list)")
+DEF_FEAT(PLO_TSTX, "plo-tstx", PLO, 56, "Triple store (128 bit in parameter list)")
+DEF_FEAT(PLO_TSTO, "plo-tsto", PLO, 57, "Triple store (256 bit in parameter list)")
+DEF_FEAT(PLO_QST, "plo-qst", PLO, 58, "Quadruple store (32 bit in parameter list)")
+DEF_FEAT(PLO_QSTG, "plo-qstg", PLO, 59, "Quadruple store (64 bit in parameter list)")
+DEF_FEAT(PLO_QSTX, "plo-qstx", PLO, 60, "Quadruple store (128 bit in parameter list)")
+DEF_FEAT(PLO_QSTO, "plo-qsto", PLO, 61, "Quadruple store (256 bit in parameter list)")
/* Features exposed via the PTFF instruction. */
DEF_FEAT(PTFF_QTO, "ptff-qto", PTFF, 1, "PTFF Query TOD Offset")
@@ -180,6 +226,7 @@ DEF_FEAT(PTFF_QSI, "ptff-qsi", PTFF, 2, "PTFF Query Steering Information")
DEF_FEAT(PTFF_QPT, "ptff-qpc", PTFF, 3, "PTFF Query Physical Clock")
DEF_FEAT(PTFF_QUI, "ptff-qui", PTFF, 4, "PTFF Query UTC Information")
DEF_FEAT(PTFF_QTOU, "ptff-qtou", PTFF, 5, "PTFF Query TOD Offset User")
+DEF_FEAT(PTFF_QTSE, "ptff-qtse", PTFF, 6, "PTFF Query Time-Stamp Event")
DEF_FEAT(PTFF_QSIE, "ptff-qsie", PTFF, 10, "PTFF Query Steering Information Extended")
DEF_FEAT(PTFF_QTOUE, "ptff-qtoue", PTFF, 13, "PTFF Query TOD Offset User Extended")
DEF_FEAT(PTFF_STO, "ptff-sto", PTFF, 65, "PTFF Set TOD Offset")
@@ -200,6 +247,15 @@ DEF_FEAT(KMAC_AES_256, "kmac-aes-256", KMAC, 20, "KMAC AES-256")
DEF_FEAT(KMAC_EAES_128, "kmac-eaes-128", KMAC, 26, "KMAC Encrypted-AES-128")
DEF_FEAT(KMAC_EAES_192, "kmac-eaes-192", KMAC, 27, "KMAC Encrypted-AES-192")
DEF_FEAT(KMAC_EAES_256, "kmac-eaes-256", KMAC, 28, "KMAC Encrypted-AES-256")
+DEF_FEAT(KMAC_HMAC_SHA_224, "kmac-hmac-sha-224", KMAC, 112, "KMAC HMAC-SHA-224")
+DEF_FEAT(KMAC_HMAC_SHA_256, "kmac-hmac-sha-246", KMAC, 113, "KMAC HMAC-SHA-256")
+DEF_FEAT(KMAC_HMAC_SHA_384, "kmac-hmac-sha-384", KMAC, 114, "KMAC HMAC-SHA-384")
+DEF_FEAT(KMAC_HMAC_SHA_512, "kmac-hmac-sha-512", KMAC, 115, "KMAC HMAC-SHA-512")
+DEF_FEAT(KMAC_HMAC_ESHA_224, "kmac-hmac-esha-224", KMAC, 120, "KMAC HMAC-Encrypted-SHA-224")
+DEF_FEAT(KMAC_HMAC_ESHA_256, "kmac-hmac-esha-246", KMAC, 121, "KMAC HMAC-Encrypted-SHA-256")
+DEF_FEAT(KMAC_HMAC_ESHA_384, "kmac-hmac-esha-384", KMAC, 122, "KMAC HMAC-Encrypted-SHA-384")
+DEF_FEAT(KMAC_HMAC_ESHA_512, "kmac-hmac-esha-512", KMAC, 123, "KMAC HMAC-Encrypted-SHA-512")
+DEF_FEAT(KMAC_QAI, "kmac-qai", KMAC, 127, "KMAC Query-Authentication-Information")
/* Features exposed via the KMC instruction. */
DEF_FEAT(KMC_DEA, "kmc-dea", KMC, 1, "KMC DEA")
@@ -233,6 +289,11 @@ DEF_FEAT(KM_XTS_AES_128, "km-xts-aes-128", KM, 50, "KM XTS-AES-128")
DEF_FEAT(KM_XTS_AES_256, "km-xts-aes-256", KM, 52, "KM XTS-AES-256")
DEF_FEAT(KM_XTS_EAES_128, "km-xts-eaes-128", KM, 58, "KM XTS-Encrypted-AES-128")
DEF_FEAT(KM_XTS_EAES_256, "km-xts-eaes-256", KM, 60, "KM XTS-Encrypted-AES-256")
+DEF_FEAT(KM_FULL_XTS_AES_128, "km-full-xts-aes-128", KM, 82, "KM Full-XTS-AES-128")
+DEF_FEAT(KM_FULL_XTS_AES_256, "km-full-xts-aes-256", KM, 84, "KM Full-XTS-AES-256")
+DEF_FEAT(KM_FULL_XTS_EAES_128, "km-full-xts-eaes-128", KM, 90, "KM Full-XTS-Encrypted-AES-128")
+DEF_FEAT(KM_FULL_XTS_EAES_256, "km-full-xts-eaes-256", KM, 92, "KM Full-XTS-Encrypted-AES-256")
+DEF_FEAT(KM_QAI, "km-qai", KM, 127, "KM Query-Authentication-Information")
/* Features exposed via the KIMD instruction. */
DEF_FEAT(KIMD_SHA_1, "kimd-sha-1", KIMD, 1, "KIMD SHA-1")
@@ -245,6 +306,7 @@ DEF_FEAT(KIMD_SHA3_512, "kimd-sha3-512", KIMD, 35, "KIMD SHA3-512")
DEF_FEAT(KIMD_SHAKE_128, "kimd-shake-128", KIMD, 36, "KIMD SHAKE-128")
DEF_FEAT(KIMD_SHAKE_256, "kimd-shake-256", KIMD, 37, "KIMD SHAKE-256")
DEF_FEAT(KIMD_GHASH, "kimd-ghash", KIMD, 65, "KIMD GHASH")
+DEF_FEAT(KIMD_QAI, "kimd-qai", KIMD, 127, "KIMD Query-Authentication-Information")
/* Features exposed via the KLMD instruction. */
DEF_FEAT(KLMD_SHA_1, "klmd-sha-1", KLMD, 1, "KLMD SHA-1")
@@ -256,6 +318,7 @@ DEF_FEAT(KLMD_SHA3_384, "klmd-sha3-384", KLMD, 34, "KLMD SHA3-384")
DEF_FEAT(KLMD_SHA3_512, "klmd-sha3-512", KLMD, 35, "KLMD SHA3-512")
DEF_FEAT(KLMD_SHAKE_128, "klmd-shake-128", KLMD, 36, "KLMD SHAKE-128")
DEF_FEAT(KLMD_SHAKE_256, "klmd-shake-256", KLMD, 37, "KLMD SHAKE-256")
+DEF_FEAT(KLMD_QAI, "klmd-qai", KLMD, 127, "KLMD Query-Authentication-Information")
/* Features exposed via the PCKMO instruction. */
DEF_FEAT(PCKMO_EDEA, "pckmo-edea", PCKMO, 1, "PCKMO Encrypted-DEA-Key")
@@ -264,11 +327,16 @@ DEF_FEAT(PCKMO_ETDEA_256, "pckmo-etdea-192", PCKMO, 3, "PCKMO Encrypted-TDEA-192
DEF_FEAT(PCKMO_AES_128, "pckmo-aes-128", PCKMO, 18, "PCKMO Encrypted-AES-128-Key")
DEF_FEAT(PCKMO_AES_192, "pckmo-aes-192", PCKMO, 19, "PCKMO Encrypted-AES-192-Key")
DEF_FEAT(PCKMO_AES_256, "pckmo-aes-256", PCKMO, 20, "PCKMO Encrypted-AES-256-Key")
+DEF_FEAT(PCKMO_AES_XTS_128_DK, "pckmo-aes-xts-128-dk", PCKMO, 21, "PCKMO Encrypt-AES-XTS-128-Double-Key")
+DEF_FEAT(PCKMO_AES_XTS_256_DK, "pckmo-aes-xts-256-dk", PCKMO, 22, "PCKMO Encrypt-AES-XTS-256-Double-Key")
DEF_FEAT(PCKMO_ECC_P256, "pckmo-ecc-p256", PCKMO, 32, "PCKMO Encrypt-ECC-P256-Key")
DEF_FEAT(PCKMO_ECC_P384, "pckmo-ecc-p384", PCKMO, 33, "PCKMO Encrypt-ECC-P384-Key")
DEF_FEAT(PCKMO_ECC_P521, "pckmo-ecc-p521", PCKMO, 34, "PCKMO Encrypt-ECC-P521-Key")
DEF_FEAT(PCKMO_ECC_ED25519, "pckmo-ecc-ed25519", PCKMO, 40 , "PCKMO Encrypt-ECC-Ed25519-Key")
DEF_FEAT(PCKMO_ECC_ED448, "pckmo-ecc-ed448", PCKMO, 41 , "PCKMO Encrypt-ECC-Ed448-Key")
+DEF_FEAT(PCKMO_HMAC_512, "pckmo-hmac-512", PCKMO, 118, "PCKMO Encrypt-HMAC-512-Key")
+DEF_FEAT(PCKMO_HMAC_1024, "pckmo-hmac-1024", PCKMO, 122, "PCKMO Encrypt-HMAC-1024-Key")
+DEF_FEAT(PCKMO_QAI, "pckmo-qai", PCKMO, 127, "PCKMO Query-Authentication-Information")
/* Features exposed via the KMCTR instruction. */
DEF_FEAT(KMCTR_DEA, "kmctr-dea", KMCTR, 1, "KMCTR DEA")
@@ -283,6 +351,7 @@ DEF_FEAT(KMCTR_AES_256, "kmctr-aes-256", KMCTR, 20, "KMCTR AES-256")
DEF_FEAT(KMCTR_EAES_128, "kmctr-eaes-128", KMCTR, 26, "KMCTR Encrypted-AES-128")
DEF_FEAT(KMCTR_EAES_192, "kmctr-eaes-192", KMCTR, 27, "KMCTR Encrypted-AES-192")
DEF_FEAT(KMCTR_EAES_256, "kmctr-eaes-256", KMCTR, 28, "KMCTR Encrypted-AES-256")
+DEF_FEAT(KMCTR_QAI, "kmctr-qai", KMCTR, 127, "KMCTR Query-Authentication-Information")
/* Features exposed via the KMF instruction. */
DEF_FEAT(KMF_DEA, "kmf-dea", KMF, 1, "KMF DEA")
@@ -297,6 +366,7 @@ DEF_FEAT(KMF_AES_256, "kmf-aes-256", KMF, 20, "KMF AES-256")
DEF_FEAT(KMF_EAES_128, "kmf-eaes-128", KMF, 26, "KMF Encrypted-AES-128")
DEF_FEAT(KMF_EAES_192, "kmf-eaes-192", KMF, 27, "KMF Encrypted-AES-192")
DEF_FEAT(KMF_EAES_256, "kmf-eaes-256", KMF, 28, "KMF Encrypted-AES-256")
+DEF_FEAT(KMF_QAI, "kmf-qai", KMF, 127, "KMF Query-Authentication-Information")
/* Features exposed via the KMO instruction. */
DEF_FEAT(KMO_DEA, "kmo-dea", KMO, 1, "KMO DEA")
@@ -311,6 +381,7 @@ DEF_FEAT(KMO_AES_256, "kmo-aes-256", KMO, 20, "KMO AES-256")
DEF_FEAT(KMO_EAES_128, "kmo-eaes-128", KMO, 26, "KMO Encrypted-AES-128")
DEF_FEAT(KMO_EAES_192, "kmo-eaes-192", KMO, 27, "KMO Encrypted-AES-192")
DEF_FEAT(KMO_EAES_256, "kmo-eaes-256", KMO, 28, "KMO Encrypted-AES-256")
+DEF_FEAT(KMO_QAI, "kmo-qai", KMO, 127, "KMO Query-Authentication-Information")
/* Features exposed via the PCC instruction. */
DEF_FEAT(PCC_CMAC_DEA, "pcc-cmac-dea", PCC, 1, "PCC Compute-Last-Block-CMAC-Using-DEA")
@@ -336,11 +407,13 @@ DEF_FEAT(PCC_SCALAR_MULT_ED25519, "pcc-scalar-mult-ed25519", PCC, 72, "PCC Scala
DEF_FEAT(PCC_SCALAR_MULT_ED448, "pcc-scalar-mult-ed448", PCC, 73, "PCC Scalar-Multiply-Ed448")
DEF_FEAT(PCC_SCALAR_MULT_X25519, "pcc-scalar-mult-x25519", PCC, 80, "PCC Scalar-Multiply-X25519")
DEF_FEAT(PCC_SCALAR_MULT_X448, "pcc-scalar-mult-x448", PCC, 81, "PCC Scalar-Multiply-X448")
+DEF_FEAT(PCC_QAI, "pcc-qai", PCC, 127, "PCC Query-Authentication-Information")
/* Features exposed via the PPNO/PRNO instruction. */
DEF_FEAT(PPNO_SHA_512_DRNG, "ppno-sha-512-drng", PPNO, 3, "PPNO SHA-512-DRNG")
DEF_FEAT(PRNO_TRNG_QRTCR, "prno-trng-qrtcr", PPNO, 112, "PRNO TRNG-Query-Raw-to-Conditioned-Ratio")
DEF_FEAT(PRNO_TRNG, "prno-trng", PPNO, 114, "PRNO TRNG")
+DEF_FEAT(PRNO_QAI, "prno-qai", PPNO, 127, "PRNO Query-Authentication-Information")
/* Features exposed via the KMA instruction. */
DEF_FEAT(KMA_GCM_AES_128, "kma-gcm-aes-128", KMA, 18, "KMA GCM-AES-128")
@@ -349,6 +422,7 @@ DEF_FEAT(KMA_GCM_AES_256, "kma-gcm-aes-256", KMA, 20, "KMA GCM-AES-256")
DEF_FEAT(KMA_GCM_EAES_128, "kma-gcm-eaes-128", KMA, 26, "KMA GCM-Encrypted-AES-128")
DEF_FEAT(KMA_GCM_EAES_192, "kma-gcm-eaes-192", KMA, 27, "KMA GCM-Encrypted-AES-192")
DEF_FEAT(KMA_GCM_EAES_256, "kma-gcm-eaes-256", KMA, 28, "KMA GCM-Encrypted-AES-256")
+DEF_FEAT(KMA_QAI, "kma-qai", KMA, 127, "KMA Query-Authentication-Information")
/* Features exposed via the KDSA instruction. */
DEF_FEAT(KDSA_ECDSA_VERIFY_P256, "kdsa-ecdsa-verify-p256", KDSA, 1, "KDSA ECDSA-Verify-P256")
@@ -366,6 +440,7 @@ DEF_FEAT(KDSA_EDDSA_SIGN_ED25519, "kdsa-eddsa-sign-ed25519", KDSA, 40, "KDSA EdD
DEF_FEAT(KDSA_EDDSA_SIGN_ED448, "kdsa-eddsa-sign-ed448", KDSA, 44, "KDSA EdDSA-Sign-Ed448")
DEF_FEAT(KDSA_EEDDSA_SIGN_ED25519, "kdsa-eeddsa-sign-ed25519", KDSA, 48, "KDSA Encrypted-EdDSA-Sign-Ed25519")
DEF_FEAT(KDSA_EEDDSA_SIGN_ED448, "kdsa-eeddsa-sign-ed448", KDSA, 52, "KDSA Encrypted-EdDSA-Sign-Ed448")
+DEF_FEAT(KDSA_QAI, "kdsa-qai", KDSA, 127, "KDSA Query-Authentication-Information")
/* Features exposed via the SORTL instruction. */
DEF_FEAT(SORTL_SFLR, "sortl-sflr", SORTL, 1, "SORTL SFLR")
@@ -383,3 +458,10 @@ DEF_FEAT(DEFLATE_F0, "dfltcc-f0", DFLTCC, 192, "DFLTCC format 0 parameter-block"
/* Features exposed via the UV-CALL instruction */
DEF_FEAT(UV_FEAT_AP, "appv", UV_FEAT_GUEST, 4, "AP instructions installed for secure guests")
DEF_FEAT(UV_FEAT_AP_INTR, "appvi", UV_FEAT_GUEST, 5, "AP instructions interruption support for secure guests")
+
+/* Features exposed via the PFCR instruction (concurrent-functions facility). */
+DEF_FEAT(PFCR_QAF, "pfcr-qaf", PFCR, 0, "PFCR Query-Available-Functions")
+DEF_FEAT(PFCR_CSDST, "pfcr-csdst", PFCR, 1, "PFCR Compare-and-Swap-and-Double-Store (32)")
+DEF_FEAT(PFCR_CSDSTG, "pfcr-csdstg", PFCR, 2, "PFCR Compare-and-Swap-and-Double-Store (64)")
+DEF_FEAT(PFCR_CSTST, "pfcr-cstst", PFCR, 3, "PFCR Compare-and-Swap-and-Triple-Store (32)")
+DEF_FEAT(PFCR_CSTSTG, "pfcr-cststg", PFCR, 4, "PFCR Compare-and-Swap-and-Triple-Store (64)")
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
index a27f4b6..954a7a9 100644
--- a/target/s390x/cpu_models.c
+++ b/target/s390x/cpu_models.c
@@ -14,8 +14,8 @@
#include "cpu.h"
#include "s390x-internal.h"
#include "kvm/kvm_s390x.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qapi/visitor.h"
@@ -23,7 +23,7 @@
#include "qemu/hw-version.h"
#include "qemu/qemu-print.h"
#ifndef CONFIG_USER_ONLY
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "target/s390x/kvm/pv.h"
#include CONFIG_DEVICES
#endif
@@ -94,6 +94,8 @@ static S390CPUDef s390_cpu_defs[] = {
CPUDEF_INIT(0x8562, 15, 1, 47, 0x08000000U, "gen15b", "IBM z15 T02 GA1"),
CPUDEF_INIT(0x3931, 16, 1, 47, 0x08000000U, "gen16a", "IBM 3931 GA1"),
CPUDEF_INIT(0x3932, 16, 1, 47, 0x08000000U, "gen16b", "IBM 3932 GA1"),
+ CPUDEF_INIT(0x9175, 17, 1, 47, 0x08000000U, "gen17a", "IBM 9175 GA1"),
+ CPUDEF_INIT(0x9176, 17, 1, 47, 0x08000000U, "gen17b", "IBM 9176 GA1"),
};
#define QEMU_MAX_CPU_TYPE 0x8561
@@ -371,7 +373,7 @@ static void s390_print_cpu_model_list_entry(gpointer data, gpointer user_data)
g_free(name);
}
-static gint s390_cpu_list_compare(gconstpointer a, gconstpointer b)
+static gint s390_cpu_list_compare(gconstpointer a, gconstpointer b, gpointer d)
{
const S390CPUClass *cc_a = S390_CPU_CLASS((ObjectClass *)a);
const S390CPUClass *cc_b = S390_CPU_CLASS((ObjectClass *)b);
@@ -413,7 +415,7 @@ void s390_cpu_list(void)
qemu_printf("Available CPUs:\n");
list = object_class_get_list(TYPE_S390_CPU, false);
- list = g_slist_sort(list, s390_cpu_list_compare);
+ list = g_slist_sort_with_data(list, s390_cpu_list_compare, NULL);
g_slist_foreach(list, s390_print_cpu_model_list_entry, NULL);
g_slist_free(list);
@@ -457,7 +459,10 @@ static void check_consistency(const S390CPUModel *model)
{ S390_FEAT_VECTOR_PACKED_DECIMAL, S390_FEAT_VECTOR },
{ S390_FEAT_VECTOR_PACKED_DECIMAL_ENH, S390_FEAT_VECTOR_PACKED_DECIMAL },
{ S390_FEAT_VECTOR_PACKED_DECIMAL_ENH2, S390_FEAT_VECTOR_PACKED_DECIMAL_ENH },
+ { S390_FEAT_VECTOR_PACKED_DECIMAL_ENH3, S390_FEAT_VECTOR_PACKED_DECIMAL_ENH2 },
{ S390_FEAT_VECTOR_ENH, S390_FEAT_VECTOR },
+ { S390_FEAT_VECTOR_ENH2, S390_FEAT_VECTOR_ENH },
+ { S390_FEAT_VECTOR_ENH3, S390_FEAT_VECTOR_ENH2 },
{ S390_FEAT_INSTRUCTION_EXEC_PROT, S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2 },
{ S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2, S390_FEAT_ESOP },
{ S390_FEAT_CMM_NT, S390_FEAT_CMM },
@@ -477,6 +482,18 @@ static void check_consistency(const S390CPUModel *model)
{ S390_FEAT_KLMD_SHA3_512, S390_FEAT_MSA },
{ S390_FEAT_KLMD_SHAKE_128, S390_FEAT_MSA },
{ S390_FEAT_KLMD_SHAKE_256, S390_FEAT_MSA },
+ { S390_FEAT_KMAC_HMAC_SHA_224, S390_FEAT_MSA_EXT_3 },
+ { S390_FEAT_KMAC_HMAC_SHA_256, S390_FEAT_MSA_EXT_3 },
+ { S390_FEAT_KMAC_HMAC_SHA_384, S390_FEAT_MSA_EXT_3 },
+ { S390_FEAT_KMAC_HMAC_SHA_512, S390_FEAT_MSA_EXT_3 },
+ { S390_FEAT_KMAC_HMAC_ESHA_224, S390_FEAT_MSA_EXT_3 },
+ { S390_FEAT_KMAC_HMAC_ESHA_256, S390_FEAT_MSA_EXT_3 },
+ { S390_FEAT_KMAC_HMAC_ESHA_384, S390_FEAT_MSA_EXT_3 },
+ { S390_FEAT_KMAC_HMAC_ESHA_512, S390_FEAT_MSA_EXT_3 },
+ { S390_FEAT_KM_FULL_XTS_AES_128, S390_FEAT_MSA_EXT_4 },
+ { S390_FEAT_KM_FULL_XTS_AES_256, S390_FEAT_MSA_EXT_4 },
+ { S390_FEAT_KM_FULL_XTS_EAES_128, S390_FEAT_MSA_EXT_4 },
+ { S390_FEAT_KM_FULL_XTS_EAES_256, S390_FEAT_MSA_EXT_4 },
{ S390_FEAT_PRNO_TRNG_QRTCR, S390_FEAT_MSA_EXT_5 },
{ S390_FEAT_PRNO_TRNG, S390_FEAT_MSA_EXT_5 },
{ S390_FEAT_SIE_KSS, S390_FEAT_SIE_F2 },
@@ -492,6 +509,50 @@ static void check_consistency(const S390CPUModel *model)
{ S390_FEAT_RDP, S390_FEAT_LOCAL_TLB_CLEARING },
{ S390_FEAT_UV_FEAT_AP, S390_FEAT_AP },
{ S390_FEAT_UV_FEAT_AP_INTR, S390_FEAT_UV_FEAT_AP },
+ { S390_FEAT_PFCR_QAF, S390_FEAT_CCF_BASE },
+ { S390_FEAT_PFCR_CSDST, S390_FEAT_CCF_BASE },
+ { S390_FEAT_PFCR_CSDSTG, S390_FEAT_CCF_BASE },
+ { S390_FEAT_PFCR_CSTST, S390_FEAT_CCF_BASE },
+ { S390_FEAT_PFCR_CSTSTG, S390_FEAT_CCF_BASE },
+ { S390_FEAT_INEFF_NC_TX, S390_FEAT_TRANSACTIONAL_EXE },
+ { S390_FEAT_PLO_CLO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_CSO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_DCSO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_CSSTO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_CSDSTO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_CSTSTO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_TCS, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_TCSG, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_TCSX, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_TCSO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_QCS, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_QCSG, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_QCSX, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_QCSO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_LO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_DLX, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_DLO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_TL, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_TLG, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_TLX, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_TLO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_QL, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_QLG, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_QLX, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_QLO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_STO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_DST, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_DSTG, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_DSTX, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_DSTO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_TST, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_TSTG, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_TSTX, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_TSTO, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_QST, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_QSTG, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_QSTX, S390_FEAT_PLO_EXT },
+ { S390_FEAT_PLO_QSTO, S390_FEAT_PLO_EXT },
};
int i;
@@ -517,7 +578,6 @@ static void check_compat_model_failed(Error **errp,
error_setg(errp, "%s. Maximum supported model in the current configuration: \'%s\'",
msg, max_model->def->name);
error_append_hint(errp, "Consider a different accelerator, try \"-accel help\"\n");
- return;
}
static bool check_compatibility(const S390CPUModel *max_model,
@@ -859,7 +919,7 @@ void s390_cpu_model_class_register_props(ObjectClass *oc)
}
#ifdef CONFIG_KVM
-static void s390_host_cpu_model_class_init(ObjectClass *oc, void *data)
+static void s390_host_cpu_model_class_init(ObjectClass *oc, const void *data)
{
S390CPUClass *xcc = S390_CPU_CLASS(oc);
@@ -868,7 +928,7 @@ static void s390_host_cpu_model_class_init(ObjectClass *oc, void *data)
}
#endif
-static void s390_base_cpu_model_class_init(ObjectClass *oc, void *data)
+static void s390_base_cpu_model_class_init(ObjectClass *oc, const void *data)
{
S390CPUClass *xcc = S390_CPU_CLASS(oc);
@@ -879,7 +939,7 @@ static void s390_base_cpu_model_class_init(ObjectClass *oc, void *data)
xcc->desc = xcc->cpu_def->desc;
}
-static void s390_cpu_model_class_init(ObjectClass *oc, void *data)
+static void s390_cpu_model_class_init(ObjectClass *oc, const void *data)
{
S390CPUClass *xcc = S390_CPU_CLASS(oc);
@@ -889,7 +949,7 @@ static void s390_cpu_model_class_init(ObjectClass *oc, void *data)
xcc->desc = xcc->cpu_def->desc;
}
-static void s390_qemu_cpu_model_class_init(ObjectClass *oc, void *data)
+static void s390_qemu_cpu_model_class_init(ObjectClass *oc, const void *data)
{
S390CPUClass *xcc = S390_CPU_CLASS(oc);
@@ -898,7 +958,7 @@ static void s390_qemu_cpu_model_class_init(ObjectClass *oc, void *data)
qemu_hw_version());
}
-static void s390_max_cpu_model_class_init(ObjectClass *oc, void *data)
+static void s390_max_cpu_model_class_init(ObjectClass *oc, const void *data)
{
S390CPUClass *xcc = S390_CPU_CLASS(oc);
@@ -1012,7 +1072,7 @@ static void register_types(void)
.instance_init = s390_cpu_model_initfn,
.instance_finalize = s390_cpu_model_finalize,
.class_init = s390_base_cpu_model_class_init,
- .class_data = (void *) &s390_cpu_defs[i],
+ .class_data = &s390_cpu_defs[i],
};
char *name = s390_cpu_type_name(s390_cpu_defs[i].name);
TypeInfo ti = {
@@ -1021,7 +1081,7 @@ static void register_types(void)
.instance_init = s390_cpu_model_initfn,
.instance_finalize = s390_cpu_model_finalize,
.class_init = s390_cpu_model_class_init,
- .class_data = (void *) &s390_cpu_defs[i],
+ .class_data = &s390_cpu_defs[i],
};
type_register_static(&ti_base);
diff --git a/target/s390x/cpu_models.h b/target/s390x/cpu_models.h
index 71d4bc2..f701bc0 100644
--- a/target/s390x/cpu_models.h
+++ b/target/s390x/cpu_models.h
@@ -113,6 +113,9 @@ static inline uint64_t s390_cpuid_from_cpu_model(const S390CPUModel *model)
}
S390CPUDef const *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga,
S390FeatBitmap features);
+void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga,
+ const S390FeatInit feat_init);
+void s390_cpu_list(void);
bool kvm_s390_cpu_models_supported(void);
bool kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp);
diff --git a/target/s390x/cpu_models_sysemu.c b/target/s390x/cpu_models_sysemu.c
deleted file mode 100644
index 977fbc6..0000000
--- a/target/s390x/cpu_models_sysemu.c
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- * CPU models for s390x - System Emulation-only
- *
- * Copyright 2016 IBM Corp.
- *
- * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or (at
- * your option) any later version. See the COPYING file in the top-level
- * directory.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "s390x-internal.h"
-#include "kvm/kvm_s390x.h"
-#include "sysemu/kvm.h"
-#include "qapi/error.h"
-#include "qapi/visitor.h"
-#include "qapi/qobject-input-visitor.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qapi-commands-machine-target.h"
-
-static void list_add_feat(const char *name, void *opaque);
-
-static void check_unavailable_features(const S390CPUModel *max_model,
- const S390CPUModel *model,
- strList **unavailable)
-{
- S390FeatBitmap missing;
-
- /* check general model compatibility */
- if (max_model->def->gen < model->def->gen ||
- (max_model->def->gen == model->def->gen &&
- max_model->def->ec_ga < model->def->ec_ga)) {
- list_add_feat("type", unavailable);
- }
-
- /* detect missing features if any to properly report them */
- bitmap_andnot(missing, model->features, max_model->features,
- S390_FEAT_MAX);
- if (!bitmap_empty(missing, S390_FEAT_MAX)) {
- s390_feat_bitmap_to_ascii(missing, unavailable, list_add_feat);
- }
-}
-
-struct CpuDefinitionInfoListData {
- CpuDefinitionInfoList *list;
- S390CPUModel *model;
-};
-
-static void create_cpu_model_list(ObjectClass *klass, void *opaque)
-{
- struct CpuDefinitionInfoListData *cpu_list_data = opaque;
- CpuDefinitionInfoList **cpu_list = &cpu_list_data->list;
- CpuDefinitionInfo *info;
- char *name = g_strdup(object_class_get_name(klass));
- S390CPUClass *scc = S390_CPU_CLASS(klass);
-
- /* strip off the -s390x-cpu */
- g_strrstr(name, "-" TYPE_S390_CPU)[0] = 0;
- info = g_new0(CpuDefinitionInfo, 1);
- info->name = name;
- info->has_migration_safe = true;
- info->migration_safe = scc->is_migration_safe;
- info->q_static = scc->is_static;
- info->q_typename = g_strdup(object_class_get_name(klass));
- /* check for unavailable features */
- if (cpu_list_data->model) {
- Object *obj;
- S390CPU *sc;
- obj = object_new_with_class(klass);
- sc = S390_CPU(obj);
- if (sc->model) {
- info->has_unavailable_features = true;
- check_unavailable_features(cpu_list_data->model, sc->model,
- &info->unavailable_features);
- }
- object_unref(obj);
- }
-
- QAPI_LIST_PREPEND(*cpu_list, info);
-}
-
-CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
-{
- struct CpuDefinitionInfoListData list_data = {
- .list = NULL,
- };
-
- list_data.model = get_max_cpu_model(NULL);
-
- object_class_foreach(create_cpu_model_list, TYPE_S390_CPU, false,
- &list_data);
-
- return list_data.list;
-}
-
-static void cpu_model_from_info(S390CPUModel *model, const CpuModelInfo *info,
- const char *info_arg_name, Error **errp)
-{
- Error *err = NULL;
- const QDict *qdict;
- const QDictEntry *e;
- Visitor *visitor;
- ObjectClass *oc;
- S390CPU *cpu;
- Object *obj;
-
- oc = cpu_class_by_name(TYPE_S390_CPU, info->name);
- if (!oc) {
- error_setg(errp, "The CPU definition \'%s\' is unknown.", info->name);
- return;
- }
- if (S390_CPU_CLASS(oc)->kvm_required && !kvm_enabled()) {
- error_setg(errp, "The CPU definition '%s' requires KVM", info->name);
- return;
- }
- obj = object_new_with_class(oc);
- cpu = S390_CPU(obj);
-
- if (!cpu->model) {
- error_setg(errp, "Details about the host CPU model are not available, "
- "it cannot be used.");
- object_unref(obj);
- return;
- }
-
- if (info->props) {
- g_autofree const char *props_name = g_strdup_printf("%s.props",
- info_arg_name);
-
- visitor = qobject_input_visitor_new(info->props);
- if (!visit_start_struct(visitor, props_name, NULL, 0, errp)) {
- visit_free(visitor);
- object_unref(obj);
- return;
- }
- qdict = qobject_to(QDict, info->props);
- for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) {
- if (!object_property_set(obj, e->key, visitor, &err)) {
- break;
- }
- }
- if (!err) {
- visit_check_struct(visitor, &err);
- }
- visit_end_struct(visitor, NULL);
- visit_free(visitor);
- if (err) {
- error_propagate(errp, err);
- object_unref(obj);
- return;
- }
- }
-
- /* copy the model and throw the cpu away */
- memcpy(model, cpu->model, sizeof(*model));
- object_unref(obj);
-}
-
-static void qdict_add_disabled_feat(const char *name, void *opaque)
-{
- qdict_put_bool(opaque, name, false);
-}
-
-static void qdict_add_enabled_feat(const char *name, void *opaque)
-{
- qdict_put_bool(opaque, name, true);
-}
-
-/* convert S390CPUDef into a static CpuModelInfo */
-static void cpu_info_from_model(CpuModelInfo *info, const S390CPUModel *model,
- bool delta_changes)
-{
- QDict *qdict = qdict_new();
- S390FeatBitmap bitmap;
-
- /* always fallback to the static base model */
- info->name = g_strdup_printf("%s-base", model->def->name);
-
- if (delta_changes) {
- /* features deleted from the base feature set */
- bitmap_andnot(bitmap, model->def->base_feat, model->features,
- S390_FEAT_MAX);
- if (!bitmap_empty(bitmap, S390_FEAT_MAX)) {
- s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat);
- }
-
- /* features added to the base feature set */
- bitmap_andnot(bitmap, model->features, model->def->base_feat,
- S390_FEAT_MAX);
- if (!bitmap_empty(bitmap, S390_FEAT_MAX)) {
- s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_enabled_feat);
- }
- } else {
- /* expand all features */
- s390_feat_bitmap_to_ascii(model->features, qdict,
- qdict_add_enabled_feat);
- bitmap_complement(bitmap, model->features, S390_FEAT_MAX);
- s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat);
- }
-
- if (!qdict_size(qdict)) {
- qobject_unref(qdict);
- } else {
- info->props = QOBJECT(qdict);
- }
-
- /* features flagged as deprecated */
- bitmap_zero(bitmap, S390_FEAT_MAX);
- s390_get_deprecated_features(bitmap);
-
- bitmap_and(bitmap, bitmap, model->def->full_feat, S390_FEAT_MAX);
- s390_feat_bitmap_to_ascii(bitmap, &info->deprecated_props, list_add_feat);
- info->has_deprecated_props = !!info->deprecated_props;
-}
-
-CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
- CpuModelInfo *model,
- Error **errp)
-{
- Error *err = NULL;
- CpuModelExpansionInfo *expansion_info = NULL;
- S390CPUModel s390_model;
- bool delta_changes = false;
-
- /* convert it to our internal representation */
- cpu_model_from_info(&s390_model, model, "model", &err);
- if (err) {
- error_propagate(errp, err);
- return NULL;
- }
-
- if (type == CPU_MODEL_EXPANSION_TYPE_STATIC) {
- delta_changes = true;
- } else if (type != CPU_MODEL_EXPANSION_TYPE_FULL) {
- error_setg(errp, "The requested expansion type is not supported.");
- return NULL;
- }
-
- /* convert it back to a static representation */
- expansion_info = g_new0(CpuModelExpansionInfo, 1);
- expansion_info->model = g_malloc0(sizeof(*expansion_info->model));
- cpu_info_from_model(expansion_info->model, &s390_model, delta_changes);
- return expansion_info;
-}
-
-static void list_add_feat(const char *name, void *opaque)
-{
- strList **last = (strList **) opaque;
-
- QAPI_LIST_PREPEND(*last, g_strdup(name));
-}
-
-CpuModelCompareInfo *qmp_query_cpu_model_comparison(CpuModelInfo *infoa,
- CpuModelInfo *infob,
- Error **errp)
-{
- Error *err = NULL;
- CpuModelCompareResult feat_result, gen_result;
- CpuModelCompareInfo *compare_info;
- S390FeatBitmap missing, added;
- S390CPUModel modela, modelb;
-
- /* convert both models to our internal representation */
- cpu_model_from_info(&modela, infoa, "modela", &err);
- if (err) {
- error_propagate(errp, err);
- return NULL;
- }
- cpu_model_from_info(&modelb, infob, "modelb", &err);
- if (err) {
- error_propagate(errp, err);
- return NULL;
- }
- compare_info = g_new0(CpuModelCompareInfo, 1);
-
- /* check the cpu generation and ga level */
- if (modela.def->gen == modelb.def->gen) {
- if (modela.def->ec_ga == modelb.def->ec_ga) {
- /* ec and corresponding bc are identical */
- gen_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL;
- } else if (modela.def->ec_ga < modelb.def->ec_ga) {
- gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
- } else {
- gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
- }
- } else if (modela.def->gen < modelb.def->gen) {
- gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
- } else {
- gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
- }
- if (gen_result != CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
- /* both models cannot be made identical */
- list_add_feat("type", &compare_info->responsible_properties);
- }
-
- /* check the feature set */
- if (bitmap_equal(modela.features, modelb.features, S390_FEAT_MAX)) {
- feat_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL;
- } else {
- bitmap_andnot(missing, modela.features, modelb.features, S390_FEAT_MAX);
- s390_feat_bitmap_to_ascii(missing,
- &compare_info->responsible_properties,
- list_add_feat);
- bitmap_andnot(added, modelb.features, modela.features, S390_FEAT_MAX);
- s390_feat_bitmap_to_ascii(added, &compare_info->responsible_properties,
- list_add_feat);
- if (bitmap_empty(missing, S390_FEAT_MAX)) {
- feat_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
- } else if (bitmap_empty(added, S390_FEAT_MAX)) {
- feat_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
- } else {
- feat_result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE;
- }
- }
-
- /* combine the results */
- if (gen_result == feat_result) {
- compare_info->result = gen_result;
- } else if (feat_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
- compare_info->result = gen_result;
- } else if (gen_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
- compare_info->result = feat_result;
- } else {
- compare_info->result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE;
- }
- return compare_info;
-}
-
-CpuModelBaselineInfo *qmp_query_cpu_model_baseline(CpuModelInfo *infoa,
- CpuModelInfo *infob,
- Error **errp)
-{
- Error *err = NULL;
- CpuModelBaselineInfo *baseline_info;
- S390CPUModel modela, modelb, model;
- uint16_t cpu_type;
- uint8_t max_gen_ga;
- uint8_t max_gen;
-
- /* convert both models to our internal representation */
- cpu_model_from_info(&modela, infoa, "modela", &err);
- if (err) {
- error_propagate(errp, err);
- return NULL;
- }
-
- cpu_model_from_info(&modelb, infob, "modelb", &err);
- if (err) {
- error_propagate(errp, err);
- return NULL;
- }
-
- /* features both models support */
- bitmap_and(model.features, modela.features, modelb.features, S390_FEAT_MAX);
-
- /* detect the maximum model not regarding features */
- if (modela.def->gen == modelb.def->gen) {
- if (modela.def->type == modelb.def->type) {
- cpu_type = modela.def->type;
- } else {
- cpu_type = 0;
- }
- max_gen = modela.def->gen;
- max_gen_ga = MIN(modela.def->ec_ga, modelb.def->ec_ga);
- } else if (modela.def->gen > modelb.def->gen) {
- cpu_type = modelb.def->type;
- max_gen = modelb.def->gen;
- max_gen_ga = modelb.def->ec_ga;
- } else {
- cpu_type = modela.def->type;
- max_gen = modela.def->gen;
- max_gen_ga = modela.def->ec_ga;
- }
-
- model.def = s390_find_cpu_def(cpu_type, max_gen, max_gen_ga,
- model.features);
-
- /* models without early base features (esan3) are bad */
- if (!model.def) {
- error_setg(errp, "No compatible CPU model could be created as"
- " important base features are disabled");
- return NULL;
- }
-
- /* strip off features not part of the max model */
- bitmap_and(model.features, model.features, model.def->full_feat,
- S390_FEAT_MAX);
-
- baseline_info = g_new0(CpuModelBaselineInfo, 1);
- baseline_info->model = g_malloc0(sizeof(*baseline_info->model));
- cpu_info_from_model(baseline_info->model, &model, true);
- return baseline_info;
-}
-
-void apply_cpu_model(const S390CPUModel *model, Error **errp)
-{
- static S390CPUModel applied_model;
- static bool applied;
-
- /*
- * We have the same model for all VCPUs. KVM can only be configured before
- * any VCPUs are defined in KVM.
- */
- if (applied) {
- if (model && memcmp(&applied_model, model, sizeof(S390CPUModel))) {
- error_setg(errp, "Mixed CPU models are not supported on s390x.");
- }
- return;
- }
-
- if (kvm_enabled()) {
- if (!kvm_s390_apply_cpu_model(model, errp)) {
- return;
- }
- }
-
- applied = true;
- if (model) {
- applied_model = *model;
- }
-}
diff --git a/target/s390x/cpu_models_system.c b/target/s390x/cpu_models_system.c
new file mode 100644
index 0000000..9d84faa
--- /dev/null
+++ b/target/s390x/cpu_models_system.c
@@ -0,0 +1,433 @@
+/*
+ * CPU models for s390x - System-only
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "kvm/kvm_s390x.h"
+#include "system/kvm.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "qapi/qobject-input-visitor.h"
+#include "qobject/qdict.h"
+#include "qapi/qapi-commands-machine.h"
+
+static void list_add_feat(const char *name, void *opaque);
+
+static void check_unavailable_features(const S390CPUModel *max_model,
+ const S390CPUModel *model,
+ strList **unavailable)
+{
+ S390FeatBitmap missing;
+
+ /* check general model compatibility */
+ if (max_model->def->gen < model->def->gen ||
+ (max_model->def->gen == model->def->gen &&
+ max_model->def->ec_ga < model->def->ec_ga)) {
+ list_add_feat("type", unavailable);
+ }
+
+ /* detect missing features if any to properly report them */
+ bitmap_andnot(missing, model->features, max_model->features,
+ S390_FEAT_MAX);
+ if (!bitmap_empty(missing, S390_FEAT_MAX)) {
+ s390_feat_bitmap_to_ascii(missing, unavailable, list_add_feat);
+ }
+}
+
+struct CpuDefinitionInfoListData {
+ CpuDefinitionInfoList *list;
+ S390CPUModel *model;
+};
+
+static void create_cpu_model_list(ObjectClass *klass, void *opaque)
+{
+ struct CpuDefinitionInfoListData *cpu_list_data = opaque;
+ CpuDefinitionInfoList **cpu_list = &cpu_list_data->list;
+ CpuDefinitionInfo *info;
+ char *name = g_strdup(object_class_get_name(klass));
+ S390CPUClass *scc = S390_CPU_CLASS(klass);
+
+ /* strip off the -s390x-cpu */
+ g_strrstr(name, "-" TYPE_S390_CPU)[0] = 0;
+ info = g_new0(CpuDefinitionInfo, 1);
+ info->name = name;
+ info->has_migration_safe = true;
+ info->migration_safe = scc->is_migration_safe;
+ info->q_static = scc->is_static;
+ info->q_typename = g_strdup(object_class_get_name(klass));
+ /* check for unavailable features */
+ if (cpu_list_data->model) {
+ Object *obj;
+ S390CPU *sc;
+ obj = object_new_with_class(klass);
+ sc = S390_CPU(obj);
+ if (sc->model) {
+ info->has_unavailable_features = true;
+ check_unavailable_features(cpu_list_data->model, sc->model,
+ &info->unavailable_features);
+ }
+ object_unref(obj);
+ }
+
+ QAPI_LIST_PREPEND(*cpu_list, info);
+}
+
+CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
+{
+ struct CpuDefinitionInfoListData list_data = {
+ .list = NULL,
+ };
+
+ list_data.model = get_max_cpu_model(NULL);
+
+ object_class_foreach(create_cpu_model_list, TYPE_S390_CPU, false,
+ &list_data);
+
+ return list_data.list;
+}
+
+static void cpu_model_from_info(S390CPUModel *model, const CpuModelInfo *info,
+ const char *info_arg_name, Error **errp)
+{
+ Error *err = NULL;
+ const QDict *qdict;
+ const QDictEntry *e;
+ Visitor *visitor;
+ ObjectClass *oc;
+ S390CPU *cpu;
+ Object *obj;
+
+ oc = cpu_class_by_name(TYPE_S390_CPU, info->name);
+ if (!oc) {
+ error_setg(errp, "The CPU definition \'%s\' is unknown.", info->name);
+ return;
+ }
+ if (S390_CPU_CLASS(oc)->kvm_required && !kvm_enabled()) {
+ error_setg(errp, "The CPU definition '%s' requires KVM", info->name);
+ return;
+ }
+ obj = object_new_with_class(oc);
+ cpu = S390_CPU(obj);
+
+ if (!cpu->model) {
+ error_setg(errp, "Details about the host CPU model are not available, "
+ "it cannot be used.");
+ object_unref(obj);
+ return;
+ }
+
+ if (info->props) {
+ g_autofree const char *props_name = g_strdup_printf("%s.props",
+ info_arg_name);
+
+ visitor = qobject_input_visitor_new(info->props);
+ if (!visit_start_struct(visitor, props_name, NULL, 0, errp)) {
+ visit_free(visitor);
+ object_unref(obj);
+ return;
+ }
+ qdict = qobject_to(QDict, info->props);
+ for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) {
+ if (!object_property_set(obj, e->key, visitor, &err)) {
+ break;
+ }
+ }
+ if (!err) {
+ visit_check_struct(visitor, &err);
+ }
+ visit_end_struct(visitor, NULL);
+ visit_free(visitor);
+ if (err) {
+ error_propagate(errp, err);
+ object_unref(obj);
+ return;
+ }
+ }
+
+ /* copy the model and throw the cpu away */
+ memcpy(model, cpu->model, sizeof(*model));
+ object_unref(obj);
+}
+
+static void qdict_add_disabled_feat(const char *name, void *opaque)
+{
+ qdict_put_bool(opaque, name, false);
+}
+
+static void qdict_add_enabled_feat(const char *name, void *opaque)
+{
+ qdict_put_bool(opaque, name, true);
+}
+
+/* convert S390CPUDef into a static CpuModelInfo */
+static void cpu_info_from_model(CpuModelInfo *info, const S390CPUModel *model,
+ bool delta_changes)
+{
+ QDict *qdict = qdict_new();
+ S390FeatBitmap bitmap;
+
+ /* always fallback to the static base model */
+ info->name = g_strdup_printf("%s-base", model->def->name);
+
+ if (delta_changes) {
+ /* features deleted from the base feature set */
+ bitmap_andnot(bitmap, model->def->base_feat, model->features,
+ S390_FEAT_MAX);
+ if (!bitmap_empty(bitmap, S390_FEAT_MAX)) {
+ s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat);
+ }
+
+ /* features added to the base feature set */
+ bitmap_andnot(bitmap, model->features, model->def->base_feat,
+ S390_FEAT_MAX);
+ if (!bitmap_empty(bitmap, S390_FEAT_MAX)) {
+ s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_enabled_feat);
+ }
+ } else {
+ /* expand all features */
+ s390_feat_bitmap_to_ascii(model->features, qdict,
+ qdict_add_enabled_feat);
+ bitmap_complement(bitmap, model->features, S390_FEAT_MAX);
+ s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat);
+ }
+
+ if (!qdict_size(qdict)) {
+ qobject_unref(qdict);
+ } else {
+ info->props = QOBJECT(qdict);
+ }
+}
+
+CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
+ CpuModelInfo *model,
+ Error **errp)
+{
+ Error *err = NULL;
+ CpuModelExpansionInfo *expansion_info = NULL;
+ S390CPUModel s390_model;
+ bool delta_changes = false;
+ S390FeatBitmap deprecated_feats;
+
+ /* convert it to our internal representation */
+ cpu_model_from_info(&s390_model, model, "model", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return NULL;
+ }
+
+ if (type == CPU_MODEL_EXPANSION_TYPE_STATIC) {
+ delta_changes = true;
+ } else if (type != CPU_MODEL_EXPANSION_TYPE_FULL) {
+ error_setg(errp, "The requested expansion type is not supported.");
+ return NULL;
+ }
+
+ /* convert it back to a static representation */
+ expansion_info = g_new0(CpuModelExpansionInfo, 1);
+ expansion_info->model = g_malloc0(sizeof(*expansion_info->model));
+ cpu_info_from_model(expansion_info->model, &s390_model, delta_changes);
+
+ /* populate list of deprecated features */
+ bitmap_zero(deprecated_feats, S390_FEAT_MAX);
+ s390_get_deprecated_features(deprecated_feats);
+
+ if (delta_changes) {
+ /*
+ * Only populate deprecated features that are a
+ * subset of the features enabled on the CPU model.
+ */
+ bitmap_and(deprecated_feats, deprecated_feats,
+ s390_model.features, S390_FEAT_MAX);
+ }
+
+ s390_feat_bitmap_to_ascii(deprecated_feats,
+ &expansion_info->deprecated_props, list_add_feat);
+ return expansion_info;
+}
+
+static void list_add_feat(const char *name, void *opaque)
+{
+ strList **last = (strList **) opaque;
+
+ QAPI_LIST_PREPEND(*last, g_strdup(name));
+}
+
+CpuModelCompareInfo *qmp_query_cpu_model_comparison(CpuModelInfo *infoa,
+ CpuModelInfo *infob,
+ Error **errp)
+{
+ Error *err = NULL;
+ CpuModelCompareResult feat_result, gen_result;
+ CpuModelCompareInfo *compare_info;
+ S390FeatBitmap missing, added;
+ S390CPUModel modela, modelb;
+
+ /* convert both models to our internal representation */
+ cpu_model_from_info(&modela, infoa, "modela", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return NULL;
+ }
+ cpu_model_from_info(&modelb, infob, "modelb", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return NULL;
+ }
+ compare_info = g_new0(CpuModelCompareInfo, 1);
+
+ /* check the cpu generation and ga level */
+ if (modela.def->gen == modelb.def->gen) {
+ if (modela.def->ec_ga == modelb.def->ec_ga) {
+ /* ec and corresponding bc are identical */
+ gen_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL;
+ } else if (modela.def->ec_ga < modelb.def->ec_ga) {
+ gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
+ } else {
+ gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
+ }
+ } else if (modela.def->gen < modelb.def->gen) {
+ gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
+ } else {
+ gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
+ }
+ if (gen_result != CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
+ /* both models cannot be made identical */
+ list_add_feat("type", &compare_info->responsible_properties);
+ }
+
+ /* check the feature set */
+ if (bitmap_equal(modela.features, modelb.features, S390_FEAT_MAX)) {
+ feat_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL;
+ } else {
+ bitmap_andnot(missing, modela.features, modelb.features, S390_FEAT_MAX);
+ s390_feat_bitmap_to_ascii(missing,
+ &compare_info->responsible_properties,
+ list_add_feat);
+ bitmap_andnot(added, modelb.features, modela.features, S390_FEAT_MAX);
+ s390_feat_bitmap_to_ascii(added, &compare_info->responsible_properties,
+ list_add_feat);
+ if (bitmap_empty(missing, S390_FEAT_MAX)) {
+ feat_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
+ } else if (bitmap_empty(added, S390_FEAT_MAX)) {
+ feat_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
+ } else {
+ feat_result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE;
+ }
+ }
+
+ /* combine the results */
+ if (gen_result == feat_result) {
+ compare_info->result = gen_result;
+ } else if (feat_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
+ compare_info->result = gen_result;
+ } else if (gen_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
+ compare_info->result = feat_result;
+ } else {
+ compare_info->result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE;
+ }
+ return compare_info;
+}
+
+CpuModelBaselineInfo *qmp_query_cpu_model_baseline(CpuModelInfo *infoa,
+ CpuModelInfo *infob,
+ Error **errp)
+{
+ Error *err = NULL;
+ CpuModelBaselineInfo *baseline_info;
+ S390CPUModel modela, modelb, model;
+ uint16_t cpu_type;
+ uint8_t max_gen_ga;
+ uint8_t max_gen;
+
+ /* convert both models to our internal representation */
+ cpu_model_from_info(&modela, infoa, "modela", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return NULL;
+ }
+
+ cpu_model_from_info(&modelb, infob, "modelb", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return NULL;
+ }
+
+ /* features both models support */
+ bitmap_and(model.features, modela.features, modelb.features, S390_FEAT_MAX);
+
+ /* detect the maximum model not regarding features */
+ if (modela.def->gen == modelb.def->gen) {
+ if (modela.def->type == modelb.def->type) {
+ cpu_type = modela.def->type;
+ } else {
+ cpu_type = 0;
+ }
+ max_gen = modela.def->gen;
+ max_gen_ga = MIN(modela.def->ec_ga, modelb.def->ec_ga);
+ } else if (modela.def->gen > modelb.def->gen) {
+ cpu_type = modelb.def->type;
+ max_gen = modelb.def->gen;
+ max_gen_ga = modelb.def->ec_ga;
+ } else {
+ cpu_type = modela.def->type;
+ max_gen = modela.def->gen;
+ max_gen_ga = modela.def->ec_ga;
+ }
+
+ model.def = s390_find_cpu_def(cpu_type, max_gen, max_gen_ga,
+ model.features);
+
+ /* models without early base features (esan3) are bad */
+ if (!model.def) {
+ error_setg(errp, "No compatible CPU model could be created as"
+ " important base features are disabled");
+ return NULL;
+ }
+
+ /* strip off features not part of the max model */
+ bitmap_and(model.features, model.features, model.def->full_feat,
+ S390_FEAT_MAX);
+
+ baseline_info = g_new0(CpuModelBaselineInfo, 1);
+ baseline_info->model = g_malloc0(sizeof(*baseline_info->model));
+ cpu_info_from_model(baseline_info->model, &model, true);
+ return baseline_info;
+}
+
+void apply_cpu_model(const S390CPUModel *model, Error **errp)
+{
+ static S390CPUModel applied_model;
+ static bool applied;
+
+ /*
+ * We have the same model for all VCPUs. KVM can only be configured before
+ * any VCPUs are defined in KVM.
+ */
+ if (applied) {
+ if (model && memcmp(&applied_model, model, sizeof(S390CPUModel))) {
+ error_setg(errp, "Mixed CPU models are not supported on s390x.");
+ }
+ return;
+ }
+
+ if (kvm_enabled()) {
+ if (!kvm_s390_apply_cpu_model(model, errp)) {
+ return;
+ }
+ }
+
+ applied = true;
+ if (model) {
+ applied_model = *model;
+ }
+}
diff --git a/target/s390x/diag.c b/target/s390x/diag.c
index 27ffd48..da44b01 100644
--- a/target/s390x/diag.c
+++ b/target/s390x/diag.c
@@ -16,10 +16,10 @@
#include "cpu.h"
#include "s390x-internal.h"
#include "hw/watchdog/wdt_diag288.h"
-#include "sysemu/cpus.h"
+#include "system/cpus.h"
#include "hw/s390x/ipl.h"
#include "hw/s390x/s390-virtio-ccw.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "kvm/kvm_s390x.h"
#include "target/s390x/kvm/pv.h"
#include "qemu/error-report.h"
@@ -133,7 +133,14 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra)
valid = subcode == DIAG308_PV_SET ? iplb_valid_pv(iplb) : iplb_valid(iplb);
if (!valid) {
- env->regs[r1 + 1] = DIAG_308_RC_INVALID;
+ if (subcode == DIAG308_SET && iplb->pbt == S390_IPL_TYPE_QEMU_SCSI) {
+ s390_rebuild_iplb(iplb->devno, iplb);
+ s390_ipl_update_diag308(iplb);
+ env->regs[r1 + 1] = DIAG_308_RC_OK;
+ } else {
+ env->regs[r1 + 1] = DIAG_308_RC_INVALID;
+ }
+
goto out;
}
diff --git a/target/s390x/gdbstub.c b/target/s390x/gdbstub.c
index a9f4eb9..6bca376 100644
--- a/target/s390x/gdbstub.c
+++ b/target/s390x/gdbstub.c
@@ -21,12 +21,12 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "s390x-internal.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/gdbstub.h"
#include "gdbstub/helpers.h"
#include "qemu/bitops.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/tcg.h"
+#include "system/hw_accel.h"
+#include "system/tcg.h"
int s390_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
@@ -46,7 +46,7 @@ int s390_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
int s390_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
CPUS390XState *env = cpu_env(cs);
- target_ulong tmpl = ldtul_p(mem_buf);
+ target_ulong tmpl = ldq_be_p(mem_buf);
switch (n) {
case S390_PSWM_REGNUM:
@@ -88,7 +88,7 @@ static int cpu_write_ac_reg(CPUState *cs, uint8_t *mem_buf, int n)
switch (n) {
case S390_A0_REGNUM ... S390_A15_REGNUM:
- env->aregs[n] = ldl_p(mem_buf);
+ env->aregs[n] = ldl_be_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env));
return 4;
default:
@@ -123,10 +123,10 @@ static int cpu_write_fp_reg(CPUState *cs, uint8_t *mem_buf, int n)
switch (n) {
case S390_FPC_REGNUM:
- env->fpc = ldl_p(mem_buf);
+ env->fpc = ldl_be_p(mem_buf);
return 4;
case S390_F0_REGNUM ... S390_F15_REGNUM:
- *get_freg(env, n - S390_F0_REGNUM) = ldtul_p(mem_buf);
+ *get_freg(env, n - S390_F0_REGNUM) = ldq_be_p(mem_buf);
return 8;
default:
return 0;
@@ -167,11 +167,11 @@ static int cpu_write_vreg(CPUState *cs, uint8_t *mem_buf, int n)
switch (n) {
case S390_V0L_REGNUM ... S390_V15L_REGNUM:
- env->vregs[n][1] = ldtul_p(mem_buf + 8);
+ env->vregs[n][1] = ldq_be_p(mem_buf + 8);
return 8;
case S390_V16_REGNUM ... S390_V31_REGNUM:
- env->vregs[n][0] = ldtul_p(mem_buf);
- env->vregs[n][1] = ldtul_p(mem_buf + 8);
+ env->vregs[n][0] = ldq_be_p(mem_buf);
+ env->vregs[n][1] = ldq_be_p(mem_buf + 8);
return 16;
default:
return 0;
@@ -203,7 +203,7 @@ static int cpu_write_c_reg(CPUState *cs, uint8_t *mem_buf, int n)
switch (n) {
case S390_C0_REGNUM ... S390_C15_REGNUM:
- env->cregs[n] = ldtul_p(mem_buf);
+ env->cregs[n] = ldq_be_p(mem_buf);
if (tcg_enabled()) {
tlb_flush(env_cpu(env));
}
@@ -246,19 +246,19 @@ static int cpu_write_virt_reg(CPUState *cs, uint8_t *mem_buf, int n)
switch (n) {
case S390_VIRT_CKC_REGNUM:
- env->ckc = ldtul_p(mem_buf);
+ env->ckc = ldq_be_p(mem_buf);
cpu_synchronize_post_init(cs);
return 8;
case S390_VIRT_CPUTM_REGNUM:
- env->cputm = ldtul_p(mem_buf);
+ env->cputm = ldq_be_p(mem_buf);
cpu_synchronize_post_init(cs);
return 8;
case S390_VIRT_BEA_REGNUM:
- env->gbea = ldtul_p(mem_buf);
+ env->gbea = ldq_be_p(mem_buf);
cpu_synchronize_post_init(cs);
return 8;
case S390_VIRT_PREFIX_REGNUM:
- env->psa = ldtul_p(mem_buf);
+ env->psa = ldq_be_p(mem_buf);
cpu_synchronize_post_init(cs);
return 8;
default:
@@ -298,19 +298,19 @@ static int cpu_write_virt_kvm_reg(CPUState *cs, uint8_t *mem_buf, int n)
switch (n) {
case S390_VIRT_KVM_PP_REGNUM:
- env->pp = ldtul_p(mem_buf);
+ env->pp = ldq_be_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env));
return 8;
case S390_VIRT_KVM_PFT_REGNUM:
- env->pfault_token = ldtul_p(mem_buf);
+ env->pfault_token = ldq_be_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env));
return 8;
case S390_VIRT_KVM_PFS_REGNUM:
- env->pfault_select = ldtul_p(mem_buf);
+ env->pfault_select = ldq_be_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env));
return 8;
case S390_VIRT_KVM_PFC_REGNUM:
- env->pfault_compare = ldtul_p(mem_buf);
+ env->pfault_compare = ldq_be_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env));
return 8;
default:
@@ -338,7 +338,7 @@ static int cpu_write_gs_reg(CPUState *cs, uint8_t *mem_buf, int n)
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
- env->gscb[n] = ldtul_p(mem_buf);
+ env->gscb[n] = ldq_be_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env));
return 8;
}
diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c
index 2b2bfc3..8218e64 100644
--- a/target/s390x/gen-features.c
+++ b/target/s390x/gen-features.c
@@ -46,6 +46,47 @@
S390_FEAT_PLO_CSTSTGR, \
S390_FEAT_PLO_CSTSTX
+#define S390_FEAT_GROUP_PLO_EXT \
+ S390_FEAT_PLO_EXT, \
+ S390_FEAT_PLO_CLO, \
+ S390_FEAT_PLO_CSO, \
+ S390_FEAT_PLO_DCSO, \
+ S390_FEAT_PLO_CSSTO, \
+ S390_FEAT_PLO_CSDSTO, \
+ S390_FEAT_PLO_CSTSTO, \
+ S390_FEAT_PLO_TCS, \
+ S390_FEAT_PLO_TCSG, \
+ S390_FEAT_PLO_TCSX, \
+ S390_FEAT_PLO_TCSO, \
+ S390_FEAT_PLO_QCS, \
+ S390_FEAT_PLO_QCSG, \
+ S390_FEAT_PLO_QCSX, \
+ S390_FEAT_PLO_QCSO, \
+ S390_FEAT_PLO_LO, \
+ S390_FEAT_PLO_DLX, \
+ S390_FEAT_PLO_DLO, \
+ S390_FEAT_PLO_TL, \
+ S390_FEAT_PLO_TLG, \
+ S390_FEAT_PLO_TLX, \
+ S390_FEAT_PLO_TLO, \
+ S390_FEAT_PLO_QL, \
+ S390_FEAT_PLO_QLG, \
+ S390_FEAT_PLO_QLX, \
+ S390_FEAT_PLO_QLO, \
+ S390_FEAT_PLO_STO, \
+ S390_FEAT_PLO_DST, \
+ S390_FEAT_PLO_DSTG, \
+ S390_FEAT_PLO_DSTX, \
+ S390_FEAT_PLO_DSTO, \
+ S390_FEAT_PLO_TST, \
+ S390_FEAT_PLO_TSTG, \
+ S390_FEAT_PLO_TSTX, \
+ S390_FEAT_PLO_TSTO, \
+ S390_FEAT_PLO_QST, \
+ S390_FEAT_PLO_QSTG, \
+ S390_FEAT_PLO_QSTX, \
+ S390_FEAT_PLO_QSTO
+
#define S390_FEAT_GROUP_TOD_CLOCK_STEERING \
S390_FEAT_TOD_CLOCK_STEERING, \
S390_FEAT_PTFF_QTO, \
@@ -64,6 +105,9 @@
S390_FEAT_PTFF_STOE, \
S390_FEAT_PTFF_STOUE
+#define S390_FEAT_GROUP_GEN17_PTFF \
+ S390_FEAT_PTFF_QTSE
+
#define S390_FEAT_GROUP_MSA \
S390_FEAT_MSA, \
S390_FEAT_KMAC_DEA, \
@@ -246,6 +290,49 @@
S390_FEAT_PCKMO_ECC_ED25519, \
S390_FEAT_PCKMO_ECC_ED448
+#define S390_FEAT_GROUP_MSA_EXT_10 \
+ S390_FEAT_KM_FULL_XTS_AES_128, \
+ S390_FEAT_KM_FULL_XTS_AES_256, \
+ S390_FEAT_KM_FULL_XTS_EAES_128, \
+ S390_FEAT_KM_FULL_XTS_EAES_256
+
+#define S390_FEAT_GROUP_MSA_EXT_10_PCKMO \
+ S390_FEAT_PCKMO_AES_XTS_128_DK, \
+ S390_FEAT_PCKMO_AES_XTS_256_DK
+
+#define S390_FEAT_GROUP_MSA_EXT_11 \
+ S390_FEAT_KMAC_HMAC_SHA_224, \
+ S390_FEAT_KMAC_HMAC_SHA_256, \
+ S390_FEAT_KMAC_HMAC_SHA_384, \
+ S390_FEAT_KMAC_HMAC_SHA_512, \
+ S390_FEAT_KMAC_HMAC_ESHA_224, \
+ S390_FEAT_KMAC_HMAC_ESHA_256, \
+ S390_FEAT_KMAC_HMAC_ESHA_384, \
+ S390_FEAT_KMAC_HMAC_ESHA_512
+
+#define S390_FEAT_GROUP_MSA_EXT_11_PCKMO \
+ S390_FEAT_PCKMO_HMAC_512, \
+ S390_FEAT_PCKMO_HMAC_1024
+
+#define S390_FEAT_GROUP_MSA_EXT_12 \
+ S390_FEAT_MSA_EXT_12
+
+#define S390_FEAT_GROUP_MSA_EXT_13 \
+ S390_FEAT_KDSA_QAI, \
+ S390_FEAT_KIMD_QAI, \
+ S390_FEAT_KLMD_QAI, \
+ S390_FEAT_KMAC_QAI, \
+ S390_FEAT_KMA_QAI, \
+ S390_FEAT_KMCTR_QAI, \
+ S390_FEAT_KMF_QAI, \
+ S390_FEAT_KMO_QAI, \
+ S390_FEAT_KM_QAI, \
+ S390_FEAT_PCC_QAI, \
+ S390_FEAT_PRNO_QAI
+
+#define S390_FEAT_GROUP_MSA_EXT_13_PCKMO \
+ S390_FEAT_PCKMO_QAI
+
#define S390_FEAT_GROUP_ENH_SORT \
S390_FEAT_ESORT_BASE, \
S390_FEAT_SORTL_SFLR, \
@@ -262,10 +349,21 @@
S390_FEAT_DEFLATE_XPND, \
S390_FEAT_DEFLATE_F0
+#define S390_FEAT_GROUP_CONCURRENT_FUNCTIONS \
+ S390_FEAT_CCF_BASE, \
+ S390_FEAT_PFCR_QAF, \
+ S390_FEAT_PFCR_CSDST, \
+ S390_FEAT_PFCR_CSDSTG, \
+ S390_FEAT_PFCR_CSTST, \
+ S390_FEAT_PFCR_CSTSTG
+
/* cpu feature groups */
static uint16_t group_PLO[] = {
S390_FEAT_GROUP_PLO,
};
+static uint16_t group_PLO_EXT[] = {
+ S390_FEAT_GROUP_PLO_EXT,
+};
static uint16_t group_TOD_CLOCK_STEERING[] = {
S390_FEAT_GROUP_TOD_CLOCK_STEERING,
};
@@ -275,6 +373,11 @@ static uint16_t group_GEN13_PTFF[] = {
static uint16_t group_MULTIPLE_EPOCH_PTFF[] = {
S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF,
};
+
+static uint16_t group_GEN17_PTFF[] = {
+ S390_FEAT_GROUP_GEN17_PTFF,
+};
+
static uint16_t group_MSA[] = {
S390_FEAT_GROUP_MSA,
};
@@ -307,10 +410,38 @@ static uint16_t group_MSA_EXT_9[] = {
S390_FEAT_GROUP_MSA_EXT_9,
};
+static uint16_t group_MSA_EXT_10[] = {
+ S390_FEAT_GROUP_MSA_EXT_10,
+};
+
+static uint16_t group_MSA_EXT_11[] = {
+ S390_FEAT_GROUP_MSA_EXT_11,
+};
+
+static uint16_t group_MSA_EXT_12[] = {
+ S390_FEAT_GROUP_MSA_EXT_12,
+};
+
+static uint16_t group_MSA_EXT_13[] = {
+ S390_FEAT_GROUP_MSA_EXT_13,
+};
+
static uint16_t group_MSA_EXT_9_PCKMO[] = {
S390_FEAT_GROUP_MSA_EXT_9_PCKMO,
};
+static uint16_t group_MSA_EXT_10_PCKMO[] = {
+ S390_FEAT_GROUP_MSA_EXT_10_PCKMO,
+};
+
+static uint16_t group_MSA_EXT_11_PCKMO[] = {
+ S390_FEAT_GROUP_MSA_EXT_11_PCKMO,
+};
+
+static uint16_t group_MSA_EXT_13_PCKMO[] = {
+ S390_FEAT_GROUP_MSA_EXT_13_PCKMO,
+};
+
static uint16_t group_ENH_SORT[] = {
S390_FEAT_GROUP_ENH_SORT,
};
@@ -319,6 +450,10 @@ static uint16_t group_DEFLATE_CONVERSION[] = {
S390_FEAT_GROUP_DEFLATE_CONVERSION,
};
+static uint16_t group_CONCURRENT_FUNCTIONS[] = {
+ S390_FEAT_GROUP_CONCURRENT_FUNCTIONS,
+};
+
/* Base features (in order of release)
* Only non-hypervisor managed features belong here.
* Base feature sets are static meaning they do not change in future QEMU
@@ -426,6 +561,13 @@ static uint16_t base_GEN15_GA1[] = {
#define base_GEN16_GA1 EmptyFeat
+static uint16_t base_GEN17_GA1[] = {
+ S390_FEAT_MISC_INSTRUCTION_EXT4,
+ S390_FEAT_SIF,
+ S390_FEAT_GROUP_MSA_EXT_12,
+ S390_FEAT_GROUP_PLO_EXT,
+};
+
/* Full features (in order of release)
* Automatically includes corresponding base features.
* Full features are all features this hardware supports even if kvm/QEMU do not
@@ -580,6 +722,20 @@ static uint16_t full_GEN16_GA1[] = {
S390_FEAT_UV_FEAT_AP_INTR,
};
+static uint16_t full_GEN17_GA1[] = {
+ S390_FEAT_VECTOR_ENH3,
+ S390_FEAT_VECTOR_PACKED_DECIMAL_ENH3,
+ S390_FEAT_INEFF_NC_TX,
+ S390_FEAT_GROUP_GEN17_PTFF,
+ S390_FEAT_GROUP_MSA_EXT_10,
+ S390_FEAT_GROUP_MSA_EXT_10_PCKMO,
+ S390_FEAT_GROUP_MSA_EXT_11,
+ S390_FEAT_GROUP_MSA_EXT_11_PCKMO,
+ S390_FEAT_GROUP_MSA_EXT_13,
+ S390_FEAT_GROUP_MSA_EXT_13_PCKMO,
+ S390_FEAT_GROUP_CONCURRENT_FUNCTIONS,
+};
+
/* Default features (in order of release)
* Automatically includes corresponding base features.
@@ -675,15 +831,24 @@ static uint16_t default_GEN16_GA1[] = {
S390_FEAT_PAIE,
};
+static uint16_t default_GEN17_GA1[] = {
+ S390_FEAT_VECTOR_ENH3,
+ S390_FEAT_VECTOR_PACKED_DECIMAL_ENH3,
+ S390_FEAT_GROUP_MSA_EXT_10,
+ S390_FEAT_GROUP_MSA_EXT_10_PCKMO,
+ S390_FEAT_GROUP_MSA_EXT_11,
+ S390_FEAT_GROUP_MSA_EXT_11_PCKMO,
+ S390_FEAT_GROUP_MSA_EXT_13,
+ S390_FEAT_GROUP_MSA_EXT_13_PCKMO,
+};
+
/* QEMU (CPU model) features */
-static uint16_t qemu_V2_11[] = {
+static uint16_t qemu_MIN[] = {
+ /* Features supported by the default CPU of the oldest machine type */
S390_FEAT_GROUP_PLO,
S390_FEAT_ESAN3,
S390_FEAT_ZARCH,
-};
-
-static uint16_t qemu_V3_1[] = {
S390_FEAT_DAT_ENH,
S390_FEAT_IDTE_SEGMENT,
S390_FEAT_STFLE,
@@ -713,18 +878,12 @@ static uint16_t qemu_V3_1[] = {
S390_FEAT_ADAPTER_INT_SUPPRESSION,
S390_FEAT_MSA_EXT_3,
S390_FEAT_MSA_EXT_4,
-};
-
-static uint16_t qemu_V4_0[] = {
/*
* Only BFP bits are implemented (HFP, DFP, PFPO and DIVIDE TO INTEGER not
* implemented yet).
*/
S390_FEAT_FLOATING_POINT_EXT,
S390_FEAT_ZPCI,
-};
-
-static uint16_t qemu_V4_1[] = {
S390_FEAT_STFLE_53,
S390_FEAT_VECTOR,
};
@@ -823,6 +982,7 @@ static CpuFeatDefSpec CpuFeatDef[] = {
CPU_FEAT_INITIALIZER(GEN14_GA2),
CPU_FEAT_INITIALIZER(GEN15_GA1),
CPU_FEAT_INITIALIZER(GEN16_GA1),
+ CPU_FEAT_INITIALIZER(GEN17_GA1),
};
#define FEAT_GROUP_INITIALIZER(_name) \
@@ -845,8 +1005,10 @@ typedef struct {
*******************************/
static FeatGroupDefSpec FeatGroupDef[] = {
FEAT_GROUP_INITIALIZER(PLO),
+ FEAT_GROUP_INITIALIZER(PLO_EXT),
FEAT_GROUP_INITIALIZER(TOD_CLOCK_STEERING),
FEAT_GROUP_INITIALIZER(GEN13_PTFF),
+ FEAT_GROUP_INITIALIZER(GEN17_PTFF),
FEAT_GROUP_INITIALIZER(MSA),
FEAT_GROUP_INITIALIZER(MSA_EXT_1),
FEAT_GROUP_INITIALIZER(MSA_EXT_2),
@@ -858,9 +1020,17 @@ static FeatGroupDefSpec FeatGroupDef[] = {
FEAT_GROUP_INITIALIZER(MSA_EXT_8),
FEAT_GROUP_INITIALIZER(MSA_EXT_9),
FEAT_GROUP_INITIALIZER(MSA_EXT_9_PCKMO),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_10),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_10_PCKMO),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_11),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_11_PCKMO),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_12),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_13),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_13_PCKMO),
FEAT_GROUP_INITIALIZER(MULTIPLE_EPOCH_PTFF),
FEAT_GROUP_INITIALIZER(ENH_SORT),
FEAT_GROUP_INITIALIZER(DEFLATE_CONVERSION),
+ FEAT_GROUP_INITIALIZER(CONCURRENT_FUNCTIONS),
};
#define QEMU_FEAT_INITIALIZER(_name) \
@@ -875,10 +1045,7 @@ static FeatGroupDefSpec FeatGroupDef[] = {
* QEMU (CPU model) features
*******************************/
static FeatGroupDefSpec QemuFeatDef[] = {
- QEMU_FEAT_INITIALIZER(V2_11),
- QEMU_FEAT_INITIALIZER(V3_1),
- QEMU_FEAT_INITIALIZER(V4_0),
- QEMU_FEAT_INITIALIZER(V4_1),
+ QEMU_FEAT_INITIALIZER(MIN),
QEMU_FEAT_INITIALIZER(V6_0),
QEMU_FEAT_INITIALIZER(V6_2),
QEMU_FEAT_INITIALIZER(V7_0),
diff --git a/target/s390x/helper.c b/target/s390x/helper.c
index 00d5d40..3c57c32 100644
--- a/target/s390x/helper.c
+++ b/target/s390x/helper.c
@@ -1,5 +1,5 @@
/*
- * S/390 helpers - sysemu only
+ * S/390 helpers - system only
*
* Copyright (c) 2009 Ulrich Hecht
* Copyright (c) 2011 Alexander Graf
@@ -25,8 +25,10 @@
#include "qemu/timer.h"
#include "hw/s390x/ioinst.h"
#include "target/s390x/kvm/pv.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/runstate.h"
+#include "system/hw_accel.h"
+#include "system/runstate.h"
+#include "exec/target_page.h"
+#include "exec/watchpoint.h"
void s390x_tod_timer(void *opaque)
{
diff --git a/target/s390x/interrupt.c b/target/s390x/interrupt.c
index 5195f06..1dca835 100644
--- a/target/s390x/interrupt.c
+++ b/target/s390x/interrupt.c
@@ -11,9 +11,8 @@
#include "cpu.h"
#include "kvm/kvm_s390x.h"
#include "s390x-internal.h"
-#include "exec/exec-all.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
#include "hw/s390x/ioinst.h"
#include "tcg/tcg_s390x.h"
#if !defined(CONFIG_USER_ONLY)
@@ -30,6 +29,7 @@ void trigger_pgm_exception(CPUS390XState *env, uint32_t code)
/* env->int_pgm_ilen is already set, or will be set during unwinding */
}
+#if !defined(CONFIG_USER_ONLY)
void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra)
{
if (kvm_enabled()) {
@@ -41,7 +41,6 @@ void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra)
}
}
-#if !defined(CONFIG_USER_ONLY)
void cpu_inject_clock_comparator(S390CPU *cpu)
{
CPUS390XState *env = &cpu->env;
@@ -225,11 +224,9 @@ bool s390_cpu_has_stop_int(S390CPU *cpu)
return env->pending_int & INTERRUPT_STOP;
}
-#endif
bool s390_cpu_has_int(S390CPU *cpu)
{
-#ifndef CONFIG_USER_ONLY
if (!tcg_enabled()) {
return false;
}
@@ -238,7 +235,5 @@ bool s390_cpu_has_int(S390CPU *cpu)
s390_cpu_has_io_int(cpu) ||
s390_cpu_has_restart_int(cpu) ||
s390_cpu_has_stop_int(cpu);
-#else
- return false;
-#endif
}
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/s390x/ioinst.c b/target/s390x/ioinst.c
index bbe45a4..2320dd4 100644
--- a/target/s390x/ioinst.c
+++ b/target/s390x/ioinst.c
@@ -12,11 +12,13 @@
#include "qemu/osdep.h"
#include "cpu.h"
+#include "exec/target_page.h"
#include "s390x-internal.h"
#include "hw/s390x/ioinst.h"
#include "trace.h"
#include "hw/s390x/s390-pci-bus.h"
#include "target/s390x/kvm/pv.h"
+#include "hw/s390x/ap-bridge.h"
/* All I/O instructions but chsc use the s format */
static uint64_t get_address_from_regs(CPUS390XState *env, uint32_t ipb,
@@ -573,13 +575,19 @@ out:
static int chsc_sei_nt0_get_event(void *res)
{
- /* no events yet */
+ if (s390_has_feat(S390_FEAT_AP)) {
+ return ap_chsc_sei_nt0_get_event(res);
+ }
+
return 1;
}
static int chsc_sei_nt0_have_event(void)
{
- /* no events yet */
+ if (s390_has_feat(S390_FEAT_AP)) {
+ return ap_chsc_sei_nt0_have_event();
+ }
+
return 0;
}
@@ -603,7 +611,7 @@ static int chsc_sei_nt2_have_event(void)
#define CHSC_SEI_NT2 (1ULL << 61)
static void ioinst_handle_chsc_sei(ChscReq *req, ChscResp *res)
{
- uint64_t selection_mask = ldq_p(&req->param1);
+ uint64_t selection_mask = ldq_be_p(&req->param1);
uint8_t *res_flags = (uint8_t *)res->data;
int have_event = 0;
int have_more = 0;
diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c
index 94181d9..67d9a19 100644
--- a/target/s390x/kvm/kvm.c
+++ b/target/s390x/kvm/kvm.c
@@ -27,7 +27,7 @@
#include "cpu.h"
#include "s390x-internal.h"
#include "kvm_s390x.h"
-#include "sysemu/kvm_int.h"
+#include "system/kvm_int.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
@@ -36,12 +36,12 @@
#include "qemu/main-loop.h"
#include "qemu/mmap-alloc.h"
#include "qemu/log.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/runstate.h"
-#include "sysemu/device_tree.h"
+#include "system/system.h"
+#include "system/hw_accel.h"
+#include "system/runstate.h"
+#include "system/device_tree.h"
#include "gdbstub/enums.h"
-#include "exec/ram_addr.h"
+#include "system/ram_addr.h"
#include "trace.h"
#include "hw/s390x/s390-pci-inst.h"
#include "hw/s390x/s390-pci-bus.h"
@@ -49,8 +49,9 @@
#include "hw/s390x/ebcdic.h"
#include "exec/memattrs.h"
#include "hw/s390x/s390-virtio-ccw.h"
-#include "hw/s390x/s390-virtio-hcall.h"
+#include "hw/s390x/s390-hypercall.h"
#include "target/s390x/kvm/pv.h"
+#include CONFIG_DEVICES
#define kvm_vm_check_mem_attr(s, attr) \
kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr)
@@ -297,12 +298,6 @@ void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp)
return;
}
- if (!hpage_1m_allowed()) {
- error_setg(errp, "This QEMU machine does not support huge page "
- "mappings");
- return;
- }
-
if (pagesize != 1 * MiB) {
error_setg(errp, "Memory backing with 2G pages was specified, "
"but KVM does not support this memory backing");
@@ -373,13 +368,9 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0);
kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0);
kvm_vm_enable_cap(s, KVM_CAP_S390_CPU_TOPOLOGY, 0);
- if (ri_allowed()) {
- if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) {
- cap_ri = 1;
- }
- }
- if (cpu_model_allowed()) {
- kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0);
+ kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0);
+ if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) {
+ cap_ri = 1;
}
/*
@@ -388,7 +379,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
* support is considered necessary, we only try to enable this for
* newer machine types if KVM_CAP_S390_AIS_MIGRATION is available.
*/
- if (cpu_model_allowed() && kvm_kernel_irqchip_allowed() &&
+ if (kvm_kernel_irqchip_allowed() &&
kvm_check_extension(s, KVM_CAP_S390_AIS_MIGRATION)) {
kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0);
}
@@ -407,6 +398,11 @@ unsigned long kvm_arch_vcpu_id(CPUState *cpu)
return cpu->cpu_index;
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus;
@@ -472,7 +468,7 @@ static int can_sync_regs(CPUState *cs, int regs)
#define KVM_SYNC_REQUIRED_REGS (KVM_SYNC_GPRS | KVM_SYNC_ACRS | \
KVM_SYNC_CRS | KVM_SYNC_PREFIX)
-int kvm_arch_put_registers(CPUState *cs, int level)
+int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
{
CPUS390XState *env = cpu_env(cs);
struct kvm_fpu fpu = {};
@@ -598,7 +594,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return 0;
}
-int kvm_arch_get_registers(CPUState *cs)
+int kvm_arch_get_registers(CPUState *cs, Error **errp)
{
CPUS390XState *env = cpu_env(cs);
struct kvm_fpu fpu;
@@ -1491,20 +1487,6 @@ static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
return r;
}
-static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
-{
- CPUS390XState *env = &cpu->env;
- int ret;
-
- ret = s390_virtio_hypercall(env);
- if (ret == -EINVAL) {
- kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
- return 0;
- }
-
- return ret;
-}
-
static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run)
{
uint64_t r1, r3;
@@ -1600,9 +1582,11 @@ static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
case DIAG_SET_CONTROL_PROGRAM_CODES:
handle_diag_318(cpu, run);
break;
+#ifdef CONFIG_S390_CCW_VIRTIO
case DIAG_KVM_HYPERCALL:
- r = handle_hypercall(cpu, run);
+ handle_diag_500(cpu, RA_IGNORED);
break;
+#endif /* CONFIG_S390_CCW_VIRTIO */
case DIAG_KVM_BREAKPOINT:
r = handle_sw_breakpoint(cpu, run);
break;
@@ -2195,6 +2179,9 @@ static int query_cpu_subfunc(S390FeatBitmap features)
if (test_bit(S390_FEAT_DEFLATE_BASE, features)) {
s390_add_from_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc);
}
+ if (test_bit(S390_FEAT_CCF_BASE, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_PFCR, prop.pfcr);
+ }
return 0;
}
@@ -2248,6 +2235,9 @@ static int configure_cpu_subfunc(const S390FeatBitmap features)
if (test_bit(S390_FEAT_DEFLATE_BASE, features)) {
s390_fill_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc);
}
+ if (test_bit(S390_FEAT_CCF_BASE, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_PFCR, prop.pfcr);
+ }
return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
}
@@ -2359,10 +2349,6 @@ static int configure_cpu_feat(const S390FeatBitmap features)
bool kvm_s390_cpu_models_supported(void)
{
- if (!cpu_model_allowed()) {
- /* compatibility machines interfere with the cpu model */
- return false;
- }
return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
KVM_S390_VM_CPU_MACHINE) &&
kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
diff --git a/target/s390x/kvm/pv.c b/target/s390x/kvm/pv.c
index dde836d..2bc916a 100644
--- a/target/s390x/kvm/pv.c
+++ b/target/s390x/kvm/pv.c
@@ -16,10 +16,10 @@
#include "qemu/units.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "sysemu/kvm.h"
-#include "sysemu/cpus.h"
+#include "system/kvm.h"
+#include "system/cpus.h"
#include "qom/object_interfaces.h"
-#include "exec/confidential-guest-support.h"
+#include "system/confidential-guest-support.h"
#include "hw/s390x/ipl.h"
#include "hw/s390x/sclp.h"
#include "target/s390x/kvm/kvm_s390x.h"
@@ -30,7 +30,7 @@ static struct kvm_s390_pv_info_vm info_vm;
static struct kvm_s390_pv_info_dump info_dump;
static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data,
- int *pvrc)
+ struct S390PVResponse *pv_resp)
{
struct kvm_pv_cmd pv_cmd = {
.cmd = cmd,
@@ -47,8 +47,10 @@ static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data,
"IOCTL rc: %d", cmd, cmdname, pv_cmd.rc, pv_cmd.rrc,
rc);
}
- if (pvrc) {
- *pvrc = pv_cmd.rc;
+ if (pv_resp) {
+ pv_resp->cmd = cmd;
+ pv_resp->rc = pv_cmd.rc;
+ pv_resp->rrc = pv_cmd.rrc;
}
return rc;
}
@@ -57,16 +59,15 @@ static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data,
* This macro lets us pass the command as a string to the function so
* we can print it on an error.
*/
-#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data, NULL)
-#define s390_pv_cmd_pvrc(cmd, data, pvrc) __s390_pv_cmd(cmd, #cmd, data, pvrc)
-#define s390_pv_cmd_exit(cmd, data) \
-{ \
- int rc; \
- \
- rc = __s390_pv_cmd(cmd, #cmd, data, NULL); \
- if (rc) { \
- exit(1); \
- } \
+#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data, NULL)
+#define s390_pv_cmd_pv_resp(cmd, data, pv_resp) \
+ __s390_pv_cmd(cmd, #cmd, data, pv_resp)
+
+static void s390_pv_cmd_exit(uint32_t cmd, void *data)
+{
+ if (s390_pv_cmd(cmd, data)) {
+ exit(1);
+ }
}
int s390_pv_query_info(void)
@@ -133,7 +134,7 @@ bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms)
* If the feature is not present or if the VM is not larger than 2 GiB,
* KVM_PV_ASYNC_CLEANUP_PREPARE fill fail; no point in attempting it.
*/
- if ((MACHINE(ms)->maxram_size <= 2 * GiB) ||
+ if (s390_get_memory_limit(ms) <= 2 * GiB ||
!kvm_check_extension(kvm_state, KVM_CAP_S390_PROTECTED_ASYNC_DISABLE)) {
return false;
}
@@ -147,18 +148,20 @@ bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms)
return true;
}
-int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, Error **errp)
+#define UV_RC_SSC_INVAL_HOSTKEY 0x0108
+int s390_pv_set_sec_parms(uint64_t origin, uint64_t length,
+ struct S390PVResponse *pv_resp, Error **errp)
{
- int ret, pvrc;
+ int ret;
struct kvm_s390_pv_sec_parm args = {
.origin = origin,
.length = length,
};
- ret = s390_pv_cmd_pvrc(KVM_PV_SET_SEC_PARMS, &args, &pvrc);
+ ret = s390_pv_cmd_pv_resp(KVM_PV_SET_SEC_PARMS, &args, pv_resp);
if (ret) {
error_setg(errp, "Failed to set secure execution parameters");
- if (pvrc == 0x108) {
+ if (pv_resp->rc == UV_RC_SSC_INVAL_HOSTKEY) {
error_append_hint(errp, "Please check whether the image is "
"correctly encrypted for this host\n");
}
@@ -170,7 +173,8 @@ int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, Error **errp)
/*
* Called for each component in the SE type IPL parameter block 0.
*/
-int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak)
+int s390_pv_unpack(uint64_t addr, uint64_t size,
+ uint64_t tweak, struct S390PVResponse *pv_resp)
{
struct kvm_s390_pv_unp args = {
.addr = addr,
@@ -178,7 +182,7 @@ int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak)
.tweak = tweak,
};
- return s390_pv_cmd(KVM_PV_UNPACK, &args);
+ return s390_pv_cmd_pv_resp(KVM_PV_UNPACK, &args, pv_resp);
}
void s390_pv_prep_reset(void)
@@ -186,9 +190,9 @@ void s390_pv_prep_reset(void)
s390_pv_cmd_exit(KVM_PV_PREP_RESET, NULL);
}
-int s390_pv_verify(void)
+int s390_pv_verify(struct S390PVResponse *pv_resp)
{
- return s390_pv_cmd(KVM_PV_VERIFY, NULL);
+ return s390_pv_cmd_pv_resp(KVM_PV_VERIFY, NULL, pv_resp);
}
void s390_pv_unshare(void)
@@ -196,13 +200,29 @@ void s390_pv_unshare(void)
s390_pv_cmd_exit(KVM_PV_UNSHARE_ALL, NULL);
}
-void s390_pv_inject_reset_error(CPUState *cs)
+void s390_pv_inject_reset_error(CPUState *cs,
+ struct S390PVResponse pv_resp)
{
int r1 = (cs->kvm_run->s390_sieic.ipa & 0x00f0) >> 4;
CPUS390XState *env = &S390_CPU(cs)->env;
+ union {
+ struct {
+ uint16_t pv_cmd;
+ uint16_t pv_rrc;
+ uint16_t pv_rc;
+ uint16_t diag_rc;
+ };
+ uint64_t regs;
+ } resp = {
+ .pv_cmd = pv_resp.cmd,
+ .pv_rrc = pv_resp.rrc,
+ .pv_rc = pv_resp.rc,
+ .diag_rc = DIAG_308_RC_INVAL_FOR_PV
+ };
+
/* Report that we are unable to enter protected mode */
- env->regs[r1 + 1] = DIAG_308_RC_INVAL_FOR_PV;
+ env->regs[r1 + 1] = resp.regs;
}
uint64_t kvm_s390_pv_dmp_get_size_cpu(void)
@@ -367,7 +387,7 @@ OBJECT_DEFINE_TYPE_WITH_INTERFACES(S390PVGuest,
{ TYPE_USER_CREATABLE },
{ NULL })
-static void s390_pv_guest_class_init(ObjectClass *oc, void *data)
+static void s390_pv_guest_class_init(ObjectClass *oc, const void *data)
{
ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc);
diff --git a/target/s390x/kvm/pv.h b/target/s390x/kvm/pv.h
index 4b40817..94e885e 100644
--- a/target/s390x/kvm/pv.h
+++ b/target/s390x/kvm/pv.h
@@ -13,9 +13,15 @@
#define HW_S390_PV_H
#include "qapi/error.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "hw/s390x/s390-virtio-ccw.h"
+struct S390PVResponse {
+ uint16_t cmd;
+ uint16_t rrc;
+ uint16_t rc;
+};
+
#ifdef CONFIG_KVM
#include "cpu.h"
@@ -42,12 +48,15 @@ int s390_pv_query_info(void);
int s390_pv_vm_enable(void);
void s390_pv_vm_disable(void);
bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms);
-int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, Error **errp);
-int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak);
+int s390_pv_set_sec_parms(uint64_t origin, uint64_t length,
+ struct S390PVResponse *pv_resp, Error **errp);
+int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak,
+ struct S390PVResponse *pv_resp);
void s390_pv_prep_reset(void);
-int s390_pv_verify(void);
+int s390_pv_verify(struct S390PVResponse *pv_resp);
void s390_pv_unshare(void);
-void s390_pv_inject_reset_error(CPUState *cs);
+void s390_pv_inject_reset_error(CPUState *cs,
+ struct S390PVResponse pv_resp);
uint64_t kvm_s390_pv_dmp_get_size_cpu(void);
uint64_t kvm_s390_pv_dmp_get_size_mem_state(void);
uint64_t kvm_s390_pv_dmp_get_size_completion_data(void);
@@ -63,12 +72,15 @@ static inline int s390_pv_vm_enable(void) { return 0; }
static inline void s390_pv_vm_disable(void) {}
static inline bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms) { return false; }
static inline int s390_pv_set_sec_parms(uint64_t origin, uint64_t length,
+ struct S390PVResponse *pv_resp,
Error **errp) { return 0; }
-static inline int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak) { return 0; }
+static inline int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak,
+ struct S390PVResponse *pv_resp) { return 0; }
static inline void s390_pv_prep_reset(void) {}
-static inline int s390_pv_verify(void) { return 0; }
+static inline int s390_pv_verify(struct S390PVResponse *pv_resp) { return 0; }
static inline void s390_pv_unshare(void) {}
-static inline void s390_pv_inject_reset_error(CPUState *cs) {};
+static inline void s390_pv_inject_reset_error(CPUState *cs,
+ struct S390PVResponse pv_resp) {};
static inline uint64_t kvm_s390_pv_dmp_get_size_cpu(void) { return 0; }
static inline uint64_t kvm_s390_pv_dmp_get_size_mem_state(void) { return 0; }
static inline uint64_t kvm_s390_pv_dmp_get_size_completion_data(void) { return 0; }
diff --git a/target/s390x/machine.c b/target/s390x/machine.c
index a125ebc..3bea610 100644
--- a/target/s390x/machine.c
+++ b/target/s390x/machine.c
@@ -20,8 +20,8 @@
#include "kvm/kvm_s390x.h"
#include "migration/vmstate.h"
#include "tcg/tcg_s390x.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
static int cpu_post_load(void *opaque, int version_id)
{
diff --git a/target/s390x/meson.build b/target/s390x/meson.build
index 02ca43d..3b34ae0 100644
--- a/target/s390x/meson.build
+++ b/target/s390x/meson.build
@@ -27,8 +27,8 @@ s390x_system_ss.add(files(
'machine.c',
'mmu_helper.c',
'sigp.c',
- 'cpu-sysemu.c',
- 'cpu_models_sysemu.c',
+ 'cpu-system.c',
+ 'cpu_models_system.c',
))
s390x_user_ss = ss.source_set()
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
index 6c59d0d..00946e9 100644
--- a/target/s390x/mmu_helper.c
+++ b/target/s390x/mmu_helper.c
@@ -17,14 +17,14 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "cpu.h"
#include "s390x-internal.h"
#include "kvm/kvm_s390x.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
-#include "exec/exec-all.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
#include "hw/hw.h"
#include "hw/s390x/storage-keys.h"
#include "hw/boards.h"
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
index 825252d..a4ba622 100644
--- a/target/s390x/s390x-internal.h
+++ b/target/s390x/s390x-internal.h
@@ -240,11 +240,12 @@ uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
#ifndef CONFIG_USER_ONLY
unsigned int s390_cpu_halt(S390CPU *cpu);
void s390_cpu_unhalt(S390CPU *cpu);
-void s390_cpu_init_sysemu(Object *obj);
-bool s390_cpu_realize_sysemu(DeviceState *dev, Error **errp);
+void s390_cpu_system_init(Object *obj);
+bool s390_cpu_system_realize(DeviceState *dev, Error **errp);
void s390_cpu_finalize(Object *obj);
-void s390_cpu_class_init_sysemu(CPUClass *cc);
+void s390_cpu_system_class_init(CPUClass *cc);
void s390_cpu_machine_reset_cb(void *opaque);
+bool s390_cpu_has_work(CPUState *cs);
#else
static inline unsigned int s390_cpu_halt(S390CPU *cpu)
@@ -341,6 +342,7 @@ void cpu_unmap_lowcore(LowCore *lowcore);
/* interrupt.c */
void trigger_pgm_exception(CPUS390XState *env, uint32_t code);
+#ifndef CONFIG_USER_ONLY
void cpu_inject_clock_comparator(S390CPU *cpu);
void cpu_inject_cpu_timer(S390CPU *cpu);
void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr);
@@ -353,9 +355,11 @@ bool s390_cpu_has_restart_int(S390CPU *cpu);
bool s390_cpu_has_stop_int(S390CPU *cpu);
void cpu_inject_restart(S390CPU *cpu);
void cpu_inject_stop(S390CPU *cpu);
+#endif /* CONFIG_USER_ONLY */
/* ioinst.c */
+#ifndef CONFIG_USER_ONLY
void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
@@ -373,6 +377,7 @@ void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
+#endif /* CONFIG_USER_ONLY */
/* mem_helper.c */
@@ -399,6 +404,8 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
/* translate.c */
void s390x_translate_init(void);
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
void s390x_restore_state_to_opc(CPUState *cs,
const TranslationBlock *tb,
const uint64_t *data);
diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c
index ad0ad61..5e95c497 100644
--- a/target/s390x/sigp.c
+++ b/target/s390x/sigp.c
@@ -12,11 +12,11 @@
#include "cpu.h"
#include "s390x-internal.h"
#include "hw/boards.h"
-#include "sysemu/hw_accel.h"
-#include "sysemu/runstate.h"
-#include "exec/address-spaces.h"
-#include "exec/exec-all.h"
-#include "sysemu/tcg.h"
+#include "system/hw_accel.h"
+#include "system/runstate.h"
+#include "system/address-spaces.h"
+#include "exec/cputlb.h"
+#include "system/tcg.h"
#include "trace.h"
#include "qapi/qapi-types-machine.h"
@@ -251,24 +251,20 @@ static void sigp_restart(CPUState *cs, run_on_cpu_data arg)
static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg)
{
- S390CPU *cpu = S390_CPU(cs);
- S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
SigpInfo *si = arg.host_ptr;
cpu_synchronize_state(cs);
- scc->reset(cs, S390_CPU_RESET_INITIAL);
+ resettable_reset(OBJECT(cs), RESET_TYPE_S390_CPU_INITIAL);
cpu_synchronize_post_reset(cs);
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
}
static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg)
{
- S390CPU *cpu = S390_CPU(cs);
- S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
SigpInfo *si = arg.host_ptr;
cpu_synchronize_state(cs);
- scc->reset(cs, S390_CPU_RESET_NORMAL);
+ resettable_reset(OBJECT(cs), RESET_TYPE_S390_CPU_NORMAL);
cpu_synchronize_post_reset(cs);
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
}
diff --git a/target/s390x/tcg/cc_helper.c b/target/s390x/tcg/cc_helper.c
index b36f8cd..6595ac7 100644
--- a/target/s390x/tcg/cc_helper.c
+++ b/target/s390x/tcg/cc_helper.c
@@ -22,7 +22,6 @@
#include "cpu.h"
#include "s390x-internal.h"
#include "tcg_s390x.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
diff --git a/target/s390x/tcg/crypto_helper.c b/target/s390x/tcg/crypto_helper.c
index 93aabd2..4447bb6 100644
--- a/target/s390x/tcg/crypto_helper.c
+++ b/target/s390x/tcg/crypto_helper.c
@@ -17,8 +17,7 @@
#include "s390x-internal.h"
#include "tcg_s390x.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
static uint64_t R(uint64_t x, int c)
{
diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c
index 4c0b692..e4c75d0 100644
--- a/target/s390x/tcg/excp_helper.c
+++ b/target/s390x/tcg/excp_helper.c
@@ -22,12 +22,14 @@
#include "qemu/log.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
+#include "exec/target_page.h"
+#include "exec/watchpoint.h"
#include "s390x-internal.h"
#include "tcg_s390x.h"
#ifndef CONFIG_USER_ONLY
#include "qemu/timer.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/s390x/ioinst.h"
#include "hw/s390x/s390_flic.h"
#include "hw/boards.h"
diff --git a/target/s390x/tcg/fpu_helper.c b/target/s390x/tcg/fpu_helper.c
index d8bd574..1ba4371 100644
--- a/target/s390x/tcg/fpu_helper.c
+++ b/target/s390x/tcg/fpu_helper.c
@@ -22,7 +22,6 @@
#include "cpu.h"
#include "s390x-internal.h"
#include "tcg_s390x.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
@@ -780,7 +779,7 @@ uint32_t HELPER(kxb)(CPUS390XState *env, Int128 a, Int128 b)
uint64_t HELPER(maeb)(CPUS390XState *env, uint64_t f1,
uint64_t f2, uint64_t f3)
{
- float32 ret = float32_muladd(f2, f3, f1, 0, &env->fpu_status);
+ float32 ret = float32_muladd(f3, f2, f1, 0, &env->fpu_status);
handle_exceptions(env, false, GETPC());
return ret;
}
@@ -789,7 +788,7 @@ uint64_t HELPER(maeb)(CPUS390XState *env, uint64_t f1,
uint64_t HELPER(madb)(CPUS390XState *env, uint64_t f1,
uint64_t f2, uint64_t f3)
{
- float64 ret = float64_muladd(f2, f3, f1, 0, &env->fpu_status);
+ float64 ret = float64_muladd(f3, f2, f1, 0, &env->fpu_status);
handle_exceptions(env, false, GETPC());
return ret;
}
@@ -798,7 +797,7 @@ uint64_t HELPER(madb)(CPUS390XState *env, uint64_t f1,
uint64_t HELPER(mseb)(CPUS390XState *env, uint64_t f1,
uint64_t f2, uint64_t f3)
{
- float32 ret = float32_muladd(f2, f3, f1, float_muladd_negate_c,
+ float32 ret = float32_muladd(f3, f2, f1, float_muladd_negate_c,
&env->fpu_status);
handle_exceptions(env, false, GETPC());
return ret;
@@ -808,7 +807,7 @@ uint64_t HELPER(mseb)(CPUS390XState *env, uint64_t f1,
uint64_t HELPER(msdb)(CPUS390XState *env, uint64_t f1,
uint64_t f2, uint64_t f3)
{
- float64 ret = float64_muladd(f2, f3, f1, float_muladd_negate_c,
+ float64 ret = float64_muladd(f3, f2, f1, float_muladd_negate_c,
&env->fpu_status);
handle_exceptions(env, false, GETPC());
return ret;
diff --git a/target/s390x/tcg/insn-data.h.inc b/target/s390x/tcg/insn-data.h.inc
index e7d61cd..ec730ee 100644
--- a/target/s390x/tcg/insn-data.h.inc
+++ b/target/s390x/tcg/insn-data.h.inc
@@ -1012,7 +1012,7 @@
D(0xb92e, KM, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KM)
D(0xb92f, KMC, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMC)
D(0xb929, KMA, RRF_b, MSA8, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMA)
- D(0xb93c, PPNO, RRE, MSA5, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PPNO)
+ E(0xb93c, PPNO, RRE, MSA5, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PPNO, IF_IO)
D(0xb93e, KIMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KIMD)
D(0xb93f, KLMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KLMD)
diff --git a/target/s390x/tcg/int_helper.c b/target/s390x/tcg/int_helper.c
index 2af970f..fbda396 100644
--- a/target/s390x/tcg/int_helper.c
+++ b/target/s390x/tcg/int_helper.c
@@ -22,10 +22,9 @@
#include "cpu.h"
#include "s390x-internal.h"
#include "tcg_s390x.h"
-#include "exec/exec-all.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
/* #define DEBUG_HELPER */
#ifdef DEBUG_HELPER
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
index 6cdbc34..a03609a 100644
--- a/target/s390x/tcg/mem_helper.c
+++ b/target/s390x/tcg/mem_helper.c
@@ -24,14 +24,21 @@
#include "s390x-internal.h"
#include "tcg_s390x.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
+#include "exec/cpu-common.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
-#include "exec/cpu_ldst.h"
-#include "hw/core/tcg-cpu-ops.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
+#include "exec/target_page.h"
+#include "exec/tlb-flags.h"
+#include "accel/tcg/cpu-ops.h"
+#include "accel/tcg/helper-retaddr.h"
#include "qemu/int128.h"
#include "qemu/atomic128.h"
-#if !defined(CONFIG_USER_ONLY)
+#if defined(CONFIG_USER_ONLY)
+#include "user/page-protection.h"
+#else
#include "hw/s390x/storage-keys.h"
#include "hw/boards.h"
#endif
@@ -146,7 +153,7 @@ static inline int s390_probe_access(CPUArchState *env, target_ulong addr,
int mmu_idx, bool nonfault,
void **phost, uintptr_t ra)
{
- int flags = probe_access_flags(env, addr, 0, access_type, mmu_idx,
+ int flags = probe_access_flags(env, addr, size, access_type, mmu_idx,
nonfault, phost, ra);
if (unlikely(flags & TLB_INVALID_MASK)) {
@@ -225,10 +232,7 @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
uint8_t byte, uint16_t size, int mmu_idx,
uintptr_t ra)
{
-#ifdef CONFIG_USER_ONLY
- memset(haddr, byte, size);
-#else
- if (likely(haddr)) {
+ if (user_or_likely(haddr)) {
memset(haddr, byte, size);
} else {
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
@@ -236,20 +240,19 @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
cpu_stb_mmu(env, vaddr + i, byte, oi, ra);
}
}
-#endif
}
static void access_memset(CPUS390XState *env, S390Access *desta,
uint8_t byte, uintptr_t ra)
{
-
+ set_helper_retaddr(ra);
do_access_memset(env, desta->vaddr1, desta->haddr1, byte, desta->size1,
desta->mmu_idx, ra);
- if (likely(!desta->size2)) {
- return;
+ if (unlikely(desta->size2)) {
+ do_access_memset(env, desta->vaddr2, desta->haddr2, byte,
+ desta->size2, desta->mmu_idx, ra);
}
- do_access_memset(env, desta->vaddr2, desta->haddr2, byte, desta->size2,
- desta->mmu_idx, ra);
+ clear_helper_retaddr();
}
static uint8_t access_get_byte(CPUS390XState *env, S390Access *access,
@@ -300,41 +303,39 @@ static void access_memmove(CPUS390XState *env, S390Access *desta,
S390Access *srca, uintptr_t ra)
{
int len = desta->size1 + desta->size2;
- int diff;
assert(len == srca->size1 + srca->size2);
/* Fallback to slow access in case we don't have access to all host pages */
- if (unlikely(!desta->haddr1 || (desta->size2 && !desta->haddr2) ||
- !srca->haddr1 || (srca->size2 && !srca->haddr2))) {
- int i;
-
- for (i = 0; i < len; i++) {
- uint8_t byte = access_get_byte(env, srca, i, ra);
-
- access_set_byte(env, desta, i, byte, ra);
- }
- return;
- }
-
- diff = desta->size1 - srca->size1;
- if (likely(diff == 0)) {
- memmove(desta->haddr1, srca->haddr1, srca->size1);
- if (unlikely(srca->size2)) {
- memmove(desta->haddr2, srca->haddr2, srca->size2);
- }
- } else if (diff > 0) {
- memmove(desta->haddr1, srca->haddr1, srca->size1);
- memmove(desta->haddr1 + srca->size1, srca->haddr2, diff);
- if (likely(desta->size2)) {
- memmove(desta->haddr2, srca->haddr2 + diff, desta->size2);
+ if (user_or_likely(desta->haddr1 &&
+ srca->haddr1 &&
+ (!desta->size2 || desta->haddr2) &&
+ (!srca->size2 || srca->haddr2))) {
+ int diff = desta->size1 - srca->size1;
+
+ if (likely(diff == 0)) {
+ memmove(desta->haddr1, srca->haddr1, srca->size1);
+ if (unlikely(srca->size2)) {
+ memmove(desta->haddr2, srca->haddr2, srca->size2);
+ }
+ } else if (diff > 0) {
+ memmove(desta->haddr1, srca->haddr1, srca->size1);
+ memmove(desta->haddr1 + srca->size1, srca->haddr2, diff);
+ if (likely(desta->size2)) {
+ memmove(desta->haddr2, srca->haddr2 + diff, desta->size2);
+ }
+ } else {
+ diff = -diff;
+ memmove(desta->haddr1, srca->haddr1, desta->size1);
+ memmove(desta->haddr2, srca->haddr1 + desta->size1, diff);
+ if (likely(srca->size2)) {
+ memmove(desta->haddr2 + diff, srca->haddr2, srca->size2);
+ }
}
} else {
- diff = -diff;
- memmove(desta->haddr1, srca->haddr1, desta->size1);
- memmove(desta->haddr2, srca->haddr1 + desta->size1, diff);
- if (likely(srca->size2)) {
- memmove(desta->haddr2 + diff, srca->haddr2, srca->size2);
+ for (int i = 0; i < len; i++) {
+ uint8_t byte = access_get_byte(env, srca, i, ra);
+ access_set_byte(env, desta, i, byte, ra);
}
}
}
@@ -372,6 +373,8 @@ static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ set_helper_retaddr(ra);
+
for (i = 0; i < l; i++) {
const uint8_t x = access_get_byte(env, &srca1, i, ra) &
access_get_byte(env, &srca2, i, ra);
@@ -379,6 +382,8 @@ static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
c |= x;
access_set_byte(env, &desta, i, x, ra);
}
+
+ clear_helper_retaddr();
return c != 0;
}
@@ -413,6 +418,7 @@ static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest,
return 0;
}
+ set_helper_retaddr(ra);
for (i = 0; i < l; i++) {
const uint8_t x = access_get_byte(env, &srca1, i, ra) ^
access_get_byte(env, &srca2, i, ra);
@@ -420,6 +426,7 @@ static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest,
c |= x;
access_set_byte(env, &desta, i, x, ra);
}
+ clear_helper_retaddr();
return c != 0;
}
@@ -447,6 +454,8 @@ static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ set_helper_retaddr(ra);
+
for (i = 0; i < l; i++) {
const uint8_t x = access_get_byte(env, &srca1, i, ra) |
access_get_byte(env, &srca2, i, ra);
@@ -454,6 +463,8 @@ static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
c |= x;
access_set_byte(env, &desta, i, x, ra);
}
+
+ clear_helper_retaddr();
return c != 0;
}
@@ -490,11 +501,13 @@ static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest,
} else if (!is_destructive_overlap(env, dest, src, l)) {
access_memmove(env, &desta, &srca, ra);
} else {
+ set_helper_retaddr(ra);
for (i = 0; i < l; i++) {
uint8_t byte = access_get_byte(env, &srca, i, ra);
access_set_byte(env, &desta, i, byte, ra);
}
+ clear_helper_retaddr();
}
return env->cc_op;
@@ -520,10 +533,12 @@ void HELPER(mvcrl)(CPUS390XState *env, uint64_t l, uint64_t dest, uint64_t src)
access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ set_helper_retaddr(ra);
for (i = l - 1; i >= 0; i--) {
uint8_t byte = access_get_byte(env, &srca, i, ra);
access_set_byte(env, &desta, i, byte, ra);
}
+ clear_helper_retaddr();
}
/* move inverse */
@@ -540,11 +555,13 @@ void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
src = wrap_address(env, src - l + 1);
access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+
+ set_helper_retaddr(ra);
for (i = 0; i < l; i++) {
const uint8_t x = access_get_byte(env, &srca, l - i - 1, ra);
-
access_set_byte(env, &desta, i, x, ra);
}
+ clear_helper_retaddr();
}
/* move numerics */
@@ -561,12 +578,15 @@ void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+
+ set_helper_retaddr(ra);
for (i = 0; i < l; i++) {
const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0x0f) |
(access_get_byte(env, &srca2, i, ra) & 0xf0);
access_set_byte(env, &desta, i, x, ra);
}
+ clear_helper_retaddr();
}
/* move with offset */
@@ -586,6 +606,8 @@ void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
/* Handle rightmost byte */
byte_dest = cpu_ldub_data_ra(env, dest + len_dest - 1, ra);
+
+ set_helper_retaddr(ra);
byte_src = access_get_byte(env, &srca, len_src - 1, ra);
byte_dest = (byte_dest & 0x0f) | (byte_src << 4);
access_set_byte(env, &desta, len_dest - 1, byte_dest, ra);
@@ -601,6 +623,7 @@ void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
byte_dest |= byte_src << 4;
access_set_byte(env, &desta, i, byte_dest, ra);
}
+ clear_helper_retaddr();
}
/* move zones */
@@ -617,12 +640,15 @@ void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+
+ set_helper_retaddr(ra);
for (i = 0; i < l; i++) {
const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0xf0) |
(access_get_byte(env, &srca2, i, ra) & 0x0f);
access_set_byte(env, &desta, i, x, ra);
}
+ clear_helper_retaddr();
}
/* compare unsigned byte arrays */
@@ -967,15 +993,19 @@ uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
*/
access_prepare(&srca, env, s, len, MMU_DATA_LOAD, mmu_idx, ra);
access_prepare(&desta, env, d, len, MMU_DATA_STORE, mmu_idx, ra);
+
+ set_helper_retaddr(ra);
for (i = 0; i < len; i++) {
const uint8_t v = access_get_byte(env, &srca, i, ra);
access_set_byte(env, &desta, i, v, ra);
if (v == c) {
+ clear_helper_retaddr();
set_address_zero(env, r1, d + i);
return 1;
}
}
+ clear_helper_retaddr();
set_address_zero(env, r1, d + len);
set_address_zero(env, r2, s + len);
return 3;
@@ -1066,6 +1096,7 @@ static inline uint32_t do_mvcl(CPUS390XState *env,
*dest = wrap_address(env, *dest + len);
} else {
access_prepare(&desta, env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
+ set_helper_retaddr(ra);
/* The remaining length selects the padding byte. */
for (i = 0; i < len; (*destlen)--, i++) {
@@ -1075,6 +1106,7 @@ static inline uint32_t do_mvcl(CPUS390XState *env,
access_set_byte(env, &desta, i, pad >> 8, ra);
}
}
+ clear_helper_retaddr();
*dest = wrap_address(env, *dest + len);
}
diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c
index 303f86d..f7101be 100644
--- a/target/s390x/tcg/misc_helper.c
+++ b/target/s390x/tcg/misc_helper.c
@@ -26,23 +26,25 @@
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
#include "qemu/timer.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "exec/cputlb.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "exec/target_page.h"
#include "qapi/error.h"
#include "tcg_s390x.h"
#include "s390-tod.h"
#if !defined(CONFIG_USER_ONLY)
-#include "sysemu/cpus.h"
-#include "sysemu/sysemu.h"
+#include "system/cpus.h"
+#include "system/system.h"
#include "hw/s390x/ebcdic.h"
-#include "hw/s390x/s390-virtio-hcall.h"
+#include "hw/s390x/s390-hypercall.h"
#include "hw/s390x/sclp.h"
#include "hw/s390x/s390_flic.h"
#include "hw/s390x/ioinst.h"
#include "hw/s390x/s390-pci-inst.h"
#include "hw/boards.h"
#include "hw/s390x/tod.h"
+#include CONFIG_DEVICES
#endif
/* #define DEBUG_HELPER */
@@ -116,12 +118,15 @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
uint64_t r;
switch (num) {
+#ifdef CONFIG_S390_CCW_VIRTIO
case 0x500:
- /* KVM hypercall */
+ /* QEMU/KVM hypercall */
bql_lock();
- r = s390_virtio_hypercall(env);
+ handle_diag_500(env_archcpu(env), GETPC());
bql_unlock();
+ r = 0;
break;
+#endif /* CONFIG_S390_CCW_VIRTIO */
case 0x44:
/* yield */
r = 0;
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index c81e035..c7e8574 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -31,7 +31,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "s390x-internal.h"
-#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "qemu/log.h"
@@ -40,6 +39,7 @@
#include "exec/helper-gen.h"
#include "exec/translator.h"
+#include "exec/translation-block.h"
#include "exec/log.h"
#include "qemu/atomic128.h"
@@ -392,7 +392,6 @@ static int get_mem_index(DisasContext *s)
return MMU_HOME_IDX;
default:
g_assert_not_reached();
- break;
}
#endif
}
@@ -1250,11 +1249,7 @@ static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
{
compute_carry(s);
-
- TCGv_i64 zero = tcg_constant_i64(0);
- tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
- tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
-
+ tcg_gen_addcio_i64(o->out, cc_src, o->in1, o->in2, cc_src);
return DISAS_NEXT;
}
@@ -6423,8 +6418,8 @@ static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
dc->base.is_jmp = translate_one(env, dc);
if (dc->base.is_jmp == DISAS_NEXT) {
if (dc->ex_value ||
- !is_same_page(dcbase, dc->base.pc_next) ||
- !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
+ !translator_is_same_page(dcbase, dc->base.pc_next) ||
+ !translator_is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
dc->base.is_jmp = DISAS_TOO_MANY;
}
}
@@ -6481,8 +6476,8 @@ static const TranslatorOps s390x_tr_ops = {
.disas_log = s390x_tr_disas_log,
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext dc;
diff --git a/target/s390x/tcg/vec_fpu_helper.c b/target/s390x/tcg/vec_fpu_helper.c
index 75cf605..744f800 100644
--- a/target/s390x/tcg/vec_fpu_helper.c
+++ b/target/s390x/tcg/vec_fpu_helper.c
@@ -15,7 +15,6 @@
#include "vec.h"
#include "tcg_s390x.h"
#include "tcg/tcg-gvec-desc.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
@@ -621,8 +620,8 @@ static void vfma32(S390Vector *v1, const S390Vector *v2, const S390Vector *v3,
int i;
for (i = 0; i < 4; i++) {
- const float32 a = s390_vec_read_float32(v2, i);
- const float32 b = s390_vec_read_float32(v3, i);
+ const float32 a = s390_vec_read_float32(v3, i);
+ const float32 b = s390_vec_read_float32(v2, i);
const float32 c = s390_vec_read_float32(v4, i);
float32 ret = float32_muladd(a, b, c, flags, &env->fpu_status);
@@ -645,8 +644,8 @@ static void vfma64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3,
int i;
for (i = 0; i < 2; i++) {
- const float64 a = s390_vec_read_float64(v2, i);
- const float64 b = s390_vec_read_float64(v3, i);
+ const float64 a = s390_vec_read_float64(v3, i);
+ const float64 b = s390_vec_read_float64(v2, i);
const float64 c = s390_vec_read_float64(v4, i);
const float64 ret = float64_muladd(a, b, c, flags, &env->fpu_status);
@@ -664,8 +663,8 @@ static void vfma128(S390Vector *v1, const S390Vector *v2, const S390Vector *v3,
const S390Vector *v4, CPUS390XState *env, bool s, int flags,
uintptr_t retaddr)
{
- const float128 a = s390_vec_read_float128(v2);
- const float128 b = s390_vec_read_float128(v3);
+ const float128 a = s390_vec_read_float128(v3);
+ const float128 b = s390_vec_read_float128(v2);
const float128 c = s390_vec_read_float128(v4);
uint8_t vxc, vec_exc = 0;
float128 ret;
diff --git a/target/s390x/tcg/vec_helper.c b/target/s390x/tcg/vec_helper.c
index dafc4c3..46ec4a9 100644
--- a/target/s390x/tcg/vec_helper.c
+++ b/target/s390x/tcg/vec_helper.c
@@ -16,8 +16,7 @@
#include "tcg/tcg.h"
#include "tcg/tcg-gvec-desc.h"
#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-ldst.h"
void HELPER(gvec_vbperm)(void *v1, const void *v2, const void *v3,
uint32_t desc)
diff --git a/target/s390x/trace-events b/target/s390x/trace-events
index d371ef7..ef3120d 100644
--- a/target/s390x/trace-events
+++ b/target/s390x/trace-events
@@ -6,7 +6,7 @@ ioinst_sch_id(const char *insn, int cssid, int ssid, int schid) "IOINST: %s (%x.
ioinst_chp_id(const char *insn, int cssid, int chpid) "IOINST: %s (%x.%02x)"
ioinst_chsc_cmd(uint16_t cmd, uint16_t len) "IOINST: chsc command 0x%04x, len 0x%04x"
-# cpu-sysemu.c
+# cpu-system.c
cpu_set_state(int cpu_index, uint8_t state) "setting cpu %d state to %" PRIu8
cpu_halt(int cpu_index) "halting cpu %d"
cpu_unhalt(int cpu_index) "unhalting cpu %d"
diff --git a/target/sh4/cpu-param.h b/target/sh4/cpu-param.h
index a7cdb7e..f328715 100644
--- a/target/sh4/cpu-param.h
+++ b/target/sh4/cpu-param.h
@@ -2,13 +2,12 @@
* SH4 cpu parameters for qemu.
*
* Copyright (c) 2005 Samuel Tardieu
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#ifndef SH4_CPU_PARAM_H
#define SH4_CPU_PARAM_H
-#define TARGET_LONG_BITS 32
#define TARGET_PAGE_BITS 12 /* 4k */
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#ifdef CONFIG_USER_ONLY
@@ -17,4 +16,6 @@
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
#endif
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
index 8f07261..4f561e8 100644
--- a/target/sh4/cpu.c
+++ b/target/sh4/cpu.c
@@ -24,8 +24,9 @@
#include "qemu/qemu-print.h"
#include "cpu.h"
#include "migration/vmstate.h"
-#include "exec/exec-all.h"
+#include "exec/translation-block.h"
#include "fpu/softfloat-helpers.h"
+#include "accel/tcg/cpu-ops.h"
#include "tcg/tcg.h"
static void superh_cpu_set_pc(CPUState *cs, vaddr value)
@@ -42,6 +43,29 @@ static vaddr superh_cpu_get_pc(CPUState *cs)
return cpu->env.pc;
}
+static TCGTBCPUState superh_get_tb_cpu_state(CPUState *cs)
+{
+ CPUSH4State *env = cpu_env(cs);
+ uint32_t flags;
+
+ flags = env->flags
+ | (env->fpscr & TB_FLAG_FPSCR_MASK)
+ | (env->sr & TB_FLAG_SR_MASK)
+ | (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */
+#ifdef CONFIG_USER_ONLY
+ flags |= TB_FLAG_UNALIGN * !cs->prctl_unalign_sigbus;
+#endif
+
+ return (TCGTBCPUState){
+ .pc = env->pc,
+ .flags = flags,
+#ifdef CONFIG_USER_ONLY
+ /* For a gUSA region, notice the end of the region. */
+ .cs_base = flags & TB_FLAG_GUSA_MASK ? env->gregs[0] : 0,
+#endif
+ };
+}
+
static void superh_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -81,12 +105,12 @@ static bool superh_io_recompile_replay_branch(CPUState *cs,
}
return false;
}
-#endif
static bool superh_cpu_has_work(CPUState *cs)
{
return cs->interrupt_request & CPU_INTERRUPT_HARD;
}
+#endif /* !CONFIG_USER_ONLY */
static int sh4_cpu_mmu_index(CPUState *cs, bool ifetch)
{
@@ -127,10 +151,23 @@ static void superh_cpu_reset_hold(Object *obj, ResetType type)
set_flush_to_zero(1, &env->fp_status);
#endif
set_default_nan_mode(1, &env->fp_status);
+ set_snan_bit_is_one(true, &env->fp_status);
+ /* sign bit clear, set all frac bits other than msb */
+ set_float_default_nan_pattern(0b00111111, &env->fp_status);
+ /*
+ * TODO: "SH-4 CPU Core Architecture ADCS 7182230F" doesn't say whether
+ * it detects tininess before or after rounding. Section 6.4 is clear
+ * that flush-to-zero happens when the result underflows, though, so
+ * either this should be "detect ftz after rounding" or else we should
+ * be setting "detect tininess before rounding".
+ */
+ set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status);
}
static void superh_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
{
+ info->endian = TARGET_BIG_ENDIAN ? BFD_ENDIAN_BIG
+ : BFD_ENDIAN_LITTLE;
info->mach = bfd_mach_sh4;
info->print_insn = print_insn_sh;
}
@@ -163,7 +200,7 @@ static void sh7750r_cpu_initfn(Object *obj)
env->features = SH_FEATURE_BCR3_AND_BCR4;
}
-static void sh7750r_class_init(ObjectClass *oc, void *data)
+static void sh7750r_class_init(ObjectClass *oc, const void *data)
{
SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc);
@@ -180,7 +217,7 @@ static void sh7751r_cpu_initfn(Object *obj)
env->features = SH_FEATURE_BCR3_AND_BCR4;
}
-static void sh7751r_class_init(ObjectClass *oc, void *data)
+static void sh7751r_class_init(ObjectClass *oc, const void *data)
{
SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc);
@@ -197,7 +234,7 @@ static void sh7785_cpu_initfn(Object *obj)
env->features = SH_FEATURE_SH4A;
}
-static void sh7785_class_init(ObjectClass *oc, void *data)
+static void sh7785_class_init(ObjectClass *oc, const void *data)
{
SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc);
@@ -240,28 +277,36 @@ static const VMStateDescription vmstate_sh_cpu = {
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps sh4_sysemu_ops = {
+ .has_work = superh_cpu_has_work,
.get_phys_page_debug = superh_cpu_get_phys_page_debug,
};
#endif
-#include "hw/core/tcg-cpu-ops.h"
-
static const TCGCPUOps superh_tcg_ops = {
+ /* MTTCG not yet supported: require strict ordering */
+ .guest_default_memory_order = TCG_MO_ALL,
+ .mttcg_supported = false,
+
.initialize = sh4_translate_init,
+ .translate_code = sh4_translate_code,
+ .get_tb_cpu_state = superh_get_tb_cpu_state,
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
.restore_state_to_opc = superh_restore_state_to_opc,
+ .mmu_index = sh4_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = superh_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_notreached,
.cpu_exec_interrupt = superh_cpu_exec_interrupt,
.cpu_exec_halt = superh_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = superh_cpu_do_interrupt,
.do_unaligned_access = superh_cpu_do_unaligned_access,
.io_recompile_replay_branch = superh_io_recompile_replay_branch,
#endif /* !CONFIG_USER_ONLY */
};
-static void superh_cpu_class_init(ObjectClass *oc, void *data)
+static void superh_cpu_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
@@ -275,8 +320,6 @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
&scc->parent_phases);
cc->class_by_name = superh_cpu_class_by_name;
- cc->has_work = superh_cpu_has_work;
- cc->mmu_index = sh4_cpu_mmu_index;
cc->dump_state = superh_cpu_dump_state;
cc->set_pc = superh_cpu_set_pc;
cc->get_pc = superh_cpu_get_pc;
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index d928bcf..c41ab70 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -21,7 +21,9 @@
#define SH4_CPU_H
#include "cpu-qom.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "qemu/cpu-float.h"
/* CPU Subtypes */
@@ -125,8 +127,6 @@ typedef struct tlb_t {
#define UTLB_SIZE 64
#define ITLB_SIZE 4
-#define TARGET_INSN_START_EXTRA_WORDS 1
-
enum sh_features {
SH_FEATURE_SH4A = 1,
SH_FEATURE_BCR3_AND_BCR4 = 2,
@@ -248,6 +248,8 @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
uintptr_t retaddr);
void sh4_translate_init(void);
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
#if !defined(CONFIG_USER_ONLY)
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
@@ -284,8 +286,6 @@ void cpu_load_tlb(CPUSH4State * env);
/* MMU modes definitions */
#define MMU_USER_IDX 1
-#include "exec/cpu-all.h"
-
/* MMU control register */
#define MMUCR 0x1F000010
#define MMUCR_AT (1<<0)
@@ -380,19 +380,4 @@ static inline void cpu_write_sr(CPUSH4State *env, target_ulong sr)
env->sr = sr & ~((1u << SR_M) | (1u << SR_Q) | (1u << SR_T));
}
-static inline void cpu_get_tb_cpu_state(CPUSH4State *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- /* For a gUSA region, notice the end of the region. */
- *cs_base = env->flags & TB_FLAG_GUSA_MASK ? env->gregs[0] : 0;
- *flags = env->flags
- | (env->fpscr & TB_FLAG_FPSCR_MASK)
- | (env->sr & TB_FLAG_SR_MASK)
- | (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */
-#ifdef CONFIG_USER_ONLY
- *flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
-#endif
-}
-
#endif /* SH4_CPU_H */
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index 6702910..fb7642b 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -20,13 +20,14 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
#include "exec/log.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/sh4/sh_intc.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#endif
#define MMU_OK 0
@@ -187,7 +188,7 @@ void superh_cpu_do_interrupt(CPUState *cs)
static void update_itlb_use(CPUSH4State * env, int itlbnb)
{
- uint8_t or_mask = 0, and_mask = (uint8_t) - 1;
+ uint32_t or_mask = 0, and_mask = 0xff;
switch (itlbnb) {
case 0:
diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c
index 99394b7..557b1bf 100644
--- a/target/sh4/op_helper.c
+++ b/target/sh4/op_helper.c
@@ -19,8 +19,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "fpu/softfloat.h"
#ifndef CONFIG_USER_ONLY
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index 53b0921..70fd13a 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -19,11 +19,12 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
+#include "exec/translation-block.h"
#include "exec/translator.h"
+#include "exec/target_page.h"
#include "exec/log.h"
#include "qemu/qemu-print.h"
@@ -53,7 +54,7 @@ typedef struct DisasContext {
#define UNALIGN(C) (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
#else
#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
-#define UNALIGN(C) 0
+#define UNALIGN(C) MO_ALIGN
#endif
/* Target-specific values for ctx->base.is_jmp. */
@@ -693,14 +694,8 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
return;
case 0x300e: /* addc Rm,Rn */
- {
- TCGv t0, t1;
- t0 = tcg_constant_tl(0);
- t1 = tcg_temp_new();
- tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
- tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
- REG(B11_8), t0, t1, cpu_sr_t);
- }
+ tcg_gen_addcio_i32(REG(B11_8), cpu_sr_t,
+ REG(B11_8), REG(B7_4), cpu_sr_t);
return;
case 0x300f: /* addv Rm,Rn */
{
@@ -1791,7 +1786,6 @@ static void _decode_opc(DisasContext * ctx)
gen_helper_raise_fpu_disable(tcg_env);
}
ctx->base.is_jmp = DISAS_NORETURN;
- return;
}
static void decode_opc(DisasContext * ctx)
@@ -1939,16 +1933,16 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
NEXT_INSN;
switch (ctx->opcode & 0xf00f) {
case 0x300c: /* add Rm,Rn */
- op_opc = INDEX_op_add_i32;
+ op_opc = INDEX_op_add;
goto do_reg_op;
case 0x2009: /* and Rm,Rn */
- op_opc = INDEX_op_and_i32;
+ op_opc = INDEX_op_and;
goto do_reg_op;
case 0x200a: /* xor Rm,Rn */
- op_opc = INDEX_op_xor_i32;
+ op_opc = INDEX_op_xor;
goto do_reg_op;
case 0x200b: /* or Rm,Rn */
- op_opc = INDEX_op_or_i32;
+ op_opc = INDEX_op_or;
do_reg_op:
/* The operation register should be as expected, and the
other input cannot depend on the load. */
@@ -1975,7 +1969,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
goto fail;
}
op_dst = B11_8;
- op_opc = INDEX_op_xor_i32;
+ op_opc = INDEX_op_xor;
op_arg = tcg_constant_i32(-1);
break;
@@ -1983,7 +1977,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
if (op_dst != B11_8 || mv_src >= 0) {
goto fail;
}
- op_opc = INDEX_op_add_i32;
+ op_opc = INDEX_op_add;
op_arg = tcg_constant_i32(B7_0s);
break;
@@ -1994,7 +1988,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
goto fail;
}
- op_opc = INDEX_op_setcond_i32; /* placeholder */
+ op_opc = INDEX_op_setcond; /* placeholder */
op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
op_arg = REG(op_src);
@@ -2029,7 +2023,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
goto fail;
}
- op_opc = INDEX_op_setcond_i32;
+ op_opc = INDEX_op_setcond;
op_arg = tcg_constant_i32(0);
NEXT_INSN;
@@ -2086,7 +2080,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
ctx->memidx, ld_mop);
break;
- case INDEX_op_add_i32:
+ case INDEX_op_add:
if (op_dst != st_src) {
goto fail;
}
@@ -2104,7 +2098,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
}
break;
- case INDEX_op_and_i32:
+ case INDEX_op_and:
if (op_dst != st_src) {
goto fail;
}
@@ -2118,7 +2112,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
}
break;
- case INDEX_op_or_i32:
+ case INDEX_op_or:
if (op_dst != st_src) {
goto fail;
}
@@ -2132,7 +2126,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
}
break;
- case INDEX_op_xor_i32:
+ case INDEX_op_xor:
if (op_dst != st_src) {
goto fail;
}
@@ -2146,7 +2140,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
}
break;
- case INDEX_op_setcond_i32:
+ case INDEX_op_setcond:
if (st_src == ld_dst) {
goto fail;
}
@@ -2317,8 +2311,8 @@ static const TranslatorOps sh4_tr_ops = {
.tb_stop = sh4_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext ctx;
diff --git a/target/sparc/cpu-param.h b/target/sparc/cpu-param.h
index 82293fb..45eea9d 100644
--- a/target/sparc/cpu-param.h
+++ b/target/sparc/cpu-param.h
@@ -1,14 +1,13 @@
/*
* Sparc cpu parameters for qemu.
*
- * SPDX-License-Identifier: LGPL-2.0+
+ * SPDX-License-Identifier: LGPL-2.0-or-later
*/
#ifndef SPARC_CPU_PARAM_H
#define SPARC_CPU_PARAM_H
#ifdef TARGET_SPARC64
-# define TARGET_LONG_BITS 64
# define TARGET_PAGE_BITS 13 /* 8k */
# define TARGET_PHYS_ADDR_SPACE_BITS 41
# ifdef TARGET_ABI32
@@ -17,33 +16,11 @@
# define TARGET_VIRT_ADDR_SPACE_BITS 44
# endif
#else
-# define TARGET_LONG_BITS 32
# define TARGET_PAGE_BITS 12 /* 4k */
# define TARGET_PHYS_ADDR_SPACE_BITS 36
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
-/*
- * From Oracle SPARC Architecture 2015:
- *
- * Compatibility notes: The PSO memory model described in SPARC V8 and
- * SPARC V9 compatibility architecture specifications was never implemented
- * in a SPARC V9 implementation and is not included in the Oracle SPARC
- * Architecture specification.
- *
- * The RMO memory model described in the SPARC V9 specification was
- * implemented in some non-Sun SPARC V9 implementations, but is not
- * directly supported in Oracle SPARC Architecture 2015 implementations.
- *
- * Therefore always use TSO in QEMU.
- *
- * D.5 Specification of Partial Store Order (PSO)
- * ... [loads] are followed by an implied MEMBAR #LoadLoad | #LoadStore.
- *
- * D.6 Specification of Total Store Order (TSO)
- * ... PSO with the additional requirement that all [stores] are followed
- * by an implied MEMBAR #StoreStore.
- */
-#define TCG_GUEST_DEFAULT_MO (TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST)
+#define TARGET_INSN_START_EXTRA_WORDS 1
#endif
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
index 54cb269..ed7701b 100644
--- a/target/sparc/cpu.c
+++ b/target/sparc/cpu.c
@@ -22,10 +22,13 @@
#include "cpu.h"
#include "qemu/module.h"
#include "qemu/qemu-print.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-mmu-index.h"
+#include "exec/translation-block.h"
#include "hw/qdev-properties.h"
#include "qapi/visitor.h"
#include "tcg/tcg.h"
+#include "fpu/softfloat.h"
+#include "target/sparc/translate.h"
//#define DEBUG_FEATURES
@@ -76,6 +79,7 @@ static void sparc_cpu_reset_hold(Object *obj, ResetType type)
env->npc = env->pc + 4;
#endif
env->cache_control = 0;
+ cpu_put_fsr(env, 0);
}
#ifndef CONFIG_USER_ONLY
@@ -102,6 +106,7 @@ static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
static void cpu_sparc_disas_set_info(CPUState *cpu, disassemble_info *info)
{
info->print_insn = print_insn_sparc;
+ info->endian = BFD_ENDIAN_BIG;
#ifdef TARGET_SPARC64
info->mach = bfd_mach_sparc_v9b;
#endif
@@ -574,7 +579,7 @@ static void print_features(uint32_t features, const char *prefix)
}
}
-void sparc_cpu_list(void)
+static void sparc_cpu_list(void)
{
unsigned int i;
@@ -711,11 +716,77 @@ static void sparc_cpu_synchronize_from_tb(CPUState *cs,
cpu->env.npc = tb->cs_base;
}
+static TCGTBCPUState sparc_get_tb_cpu_state(CPUState *cs)
+{
+ CPUSPARCState *env = cpu_env(cs);
+ uint32_t flags = cpu_mmu_index(cs, false);
+
+#ifndef CONFIG_USER_ONLY
+ if (cpu_supervisor_mode(env)) {
+ flags |= TB_FLAG_SUPER;
+ }
+#endif
+#ifdef TARGET_SPARC64
+#ifndef CONFIG_USER_ONLY
+ if (cpu_hypervisor_mode(env)) {
+ flags |= TB_FLAG_HYPER;
+ }
+#endif
+ if (env->pstate & PS_AM) {
+ flags |= TB_FLAG_AM_ENABLED;
+ }
+ if ((env->pstate & PS_PEF) && (env->fprs & FPRS_FEF)) {
+ flags |= TB_FLAG_FPU_ENABLED;
+ }
+ flags |= env->asi << TB_FLAG_ASI_SHIFT;
+#else
+ if (env->psref) {
+ flags |= TB_FLAG_FPU_ENABLED;
+ }
+#ifndef CONFIG_USER_ONLY
+ if (env->fsr_qne) {
+ flags |= TB_FLAG_FSR_QNE;
+ }
+#endif /* !CONFIG_USER_ONLY */
+#endif /* TARGET_SPARC64 */
+
+ return (TCGTBCPUState){
+ .pc = env->pc,
+ .flags = flags,
+ .cs_base = env->npc,
+ };
+}
+
+static void sparc_restore_state_to_opc(CPUState *cs,
+ const TranslationBlock *tb,
+ const uint64_t *data)
+{
+ CPUSPARCState *env = cpu_env(cs);
+ target_ulong pc = data[0];
+ target_ulong npc = data[1];
+
+ env->pc = pc;
+ if (npc == DYNAMIC_PC) {
+ /* dynamic NPC: already stored */
+ } else if (npc & JUMP_PC) {
+ /* jump PC: use 'cond' and the jump targets of the translation */
+ if (env->cond) {
+ env->npc = npc & ~3;
+ } else {
+ env->npc = pc + 4;
+ }
+ } else {
+ env->npc = npc;
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
static bool sparc_cpu_has_work(CPUState *cs)
{
return (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
cpu_interrupts_enabled(cpu_env(cs));
}
+#endif /* !CONFIG_USER_ONLY */
static int sparc_cpu_mmu_index(CPUState *cs, bool ifetch)
{
@@ -805,7 +876,19 @@ static void sparc_cpu_realizefn(DeviceState *dev, Error **errp)
env->version |= env->def.maxtl << 8;
env->version |= env->def.nwindows - 1;
#endif
- cpu_put_fsr(env, 0);
+
+ /*
+ * Prefer SNaN over QNaN, order B then A. It's OK to do this in realize
+ * rather than reset, because fp_status is after 'end_reset_fields' in
+ * the CPU state struct so it won't get zeroed on reset.
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_s_ba, &env->fp_status);
+ /* For fused-multiply add, prefer SNaN over QNaN, then C->B->A */
+ set_float_3nan_prop_rule(float_3nan_prop_s_cba, &env->fp_status);
+ /* For inf * 0 + NaN, return the input NaN */
+ set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
+ /* Default NaN value: sign bit clear, all frac bits set */
+ set_float_default_nan_pattern(0b01111111, &env->fp_status);
cpu_exec_realizefn(cs, &local_err);
if (local_err != NULL) {
@@ -860,14 +943,15 @@ static void sparc_set_nwindows(Object *obj, Visitor *v, const char *name,
cpu->env.def.nwindows = value;
}
-static PropertyInfo qdev_prop_nwindows = {
- .name = "int",
+static const PropertyInfo qdev_prop_nwindows = {
+ .type = "int",
+ .description = "Number of register windows",
.get = sparc_get_nwindows,
.set = sparc_set_nwindows,
};
/* This must match feature_name[]. */
-static Property sparc_cpu_properties[] = {
+static const Property sparc_cpu_properties[] = {
DEFINE_PROP_BIT("float128", SPARCCPU, env.def.features,
CPU_FEATURE_BIT_FLOAT128, false),
#ifdef TARGET_SPARC64
@@ -903,30 +987,71 @@ static Property sparc_cpu_properties[] = {
DEFINE_PROP_UINT32("mmu-version", SPARCCPU, env.def.mmu_version, 0),
DEFINE_PROP("nwindows", SPARCCPU, env.def.nwindows,
qdev_prop_nwindows, uint32_t),
- DEFINE_PROP_END_OF_LIST()
};
#ifndef CONFIG_USER_ONLY
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps sparc_sysemu_ops = {
+ .has_work = sparc_cpu_has_work,
.get_phys_page_debug = sparc_cpu_get_phys_page_debug,
.legacy_vmsd = &vmstate_sparc_cpu,
};
#endif
#ifdef CONFIG_TCG
-#include "hw/core/tcg-cpu-ops.h"
+#include "accel/tcg/cpu-ops.h"
+
+#ifndef CONFIG_USER_ONLY
+static vaddr sparc_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+#ifdef TARGET_SPARC64
+ return cpu_env(cs)->pstate & PS_AM ? (uint32_t)result : result;
+#else
+ return (uint32_t)result;
+#endif
+}
+#endif
static const TCGCPUOps sparc_tcg_ops = {
+ /*
+ * From Oracle SPARC Architecture 2015:
+ *
+ * Compatibility notes: The PSO memory model described in SPARC V8 and
+ * SPARC V9 compatibility architecture specifications was never
+ * implemented in a SPARC V9 implementation and is not included in the
+ * Oracle SPARC Architecture specification.
+ *
+ * The RMO memory model described in the SPARC V9 specification was
+ * implemented in some non-Sun SPARC V9 implementations, but is not
+ * directly supported in Oracle SPARC Architecture 2015 implementations.
+ *
+ * Therefore always use TSO in QEMU.
+ *
+ * D.5 Specification of Partial Store Order (PSO)
+ * ... [loads] are followed by an implied MEMBAR #LoadLoad | #LoadStore.
+ *
+ * D.6 Specification of Total Store Order (TSO)
+ * ... PSO with the additional requirement that all [stores] are followed
+ * by an implied MEMBAR #StoreStore.
+ */
+ .guest_default_memory_order = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST,
+ .mttcg_supported = true,
+
.initialize = sparc_tcg_init,
+ .translate_code = sparc_translate_code,
+ .get_tb_cpu_state = sparc_get_tb_cpu_state,
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
.restore_state_to_opc = sparc_restore_state_to_opc,
+ .mmu_index = sparc_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = sparc_cpu_tlb_fill,
+ .pointer_wrap = sparc_pointer_wrap,
.cpu_exec_interrupt = sparc_cpu_exec_interrupt,
.cpu_exec_halt = sparc_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = sparc_cpu_do_interrupt,
.do_transaction_failed = sparc_cpu_do_transaction_failed,
.do_unaligned_access = sparc_cpu_do_unaligned_access,
@@ -934,7 +1059,7 @@ static const TCGCPUOps sparc_tcg_ops = {
};
#endif /* CONFIG_TCG */
-static void sparc_cpu_class_init(ObjectClass *oc, void *data)
+static void sparc_cpu_class_init(ObjectClass *oc, const void *data)
{
SPARCCPUClass *scc = SPARC_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
@@ -949,9 +1074,8 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
&scc->parent_phases);
cc->class_by_name = sparc_cpu_class_by_name;
+ cc->list_cpus = sparc_cpu_list,
cc->parse_features = sparc_cpu_parse_features;
- cc->has_work = sparc_cpu_has_work;
- cc->mmu_index = sparc_cpu_mmu_index;
cc->dump_state = sparc_cpu_dump_state;
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
cc->memory_rw_debug = sparc_cpu_memory_rw_debug;
@@ -984,7 +1108,7 @@ static const TypeInfo sparc_cpu_type_info = {
.class_init = sparc_cpu_class_init,
};
-static void sparc_cpu_cpudef_class_init(ObjectClass *oc, void *data)
+static void sparc_cpu_cpudef_class_init(ObjectClass *oc, const void *data)
{
SPARCCPUClass *scc = SPARC_CPU_CLASS(oc);
scc->cpu_def = data;
@@ -997,10 +1121,10 @@ static void sparc_register_cpudef_type(const struct sparc_def_t *def)
.name = typename,
.parent = TYPE_SPARC_CPU,
.class_init = sparc_cpu_cpudef_class_init,
- .class_data = (void *)def,
+ .class_data = def,
};
- type_register(&ti);
+ type_register_static(&ti);
g_free(typename);
}
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
index dfd9512..31cb3d9 100644
--- a/target/sparc/cpu.h
+++ b/target/sparc/cpu.h
@@ -3,7 +3,9 @@
#include "qemu/bswap.h"
#include "cpu-qom.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "qemu/cpu-float.h"
#if !defined(TARGET_SPARC64)
@@ -184,6 +186,8 @@ enum {
#define FSR_FTT_SEQ_ERROR (4ULL << 14)
#define FSR_FTT_INVAL_FPR (6ULL << 14)
+#define FSR_QNE (1ULL << 13)
+
#define FSR_FCC0_SHIFT 10
#define FSR_FCC1_SHIFT 32
#define FSR_FCC2_SHIFT 34
@@ -219,7 +223,6 @@ typedef struct trap_state {
uint32_t tt;
} trap_state;
#endif
-#define TARGET_INSN_START_EXTRA_WORDS 1
typedef struct sparc_def_t {
const char *name;
@@ -438,6 +441,26 @@ struct CPUArchState {
uint32_t fsr_cexc_ftt; /* cexc, ftt */
uint32_t fcc[TARGET_FCCREGS]; /* fcc* */
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+ /*
+ * Single-element FPU fault queue, with address and insn,
+ * packaged into the double-word with which it is stored.
+ */
+ uint32_t fsr_qne; /* qne */
+ union {
+ uint64_t d;
+ struct {
+#if HOST_BIG_ENDIAN
+ uint32_t addr;
+ uint32_t insn;
+#else
+ uint32_t insn;
+ uint32_t addr;
+#endif
+ } s;
+ } fq;
+#endif
+
CPU_DoubleU fpr[TARGET_DPREGS]; /* floating point registers */
uint32_t cwp; /* index of current register window (extracted
from PSR) */
@@ -552,7 +575,7 @@ struct SPARCCPUClass {
DeviceRealize parent_realize;
ResettablePhases parent_phases;
- sparc_def_t *cpu_def;
+ const sparc_def_t *cpu_def;
};
#ifndef CONFIG_USER_ONLY
@@ -572,7 +595,6 @@ G_NORETURN void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t);
/* cpu_init.c */
void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
-void sparc_cpu_list(void);
/* mmu_helper.c */
bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
@@ -582,15 +604,13 @@ void dump_mmu(CPUSPARCState *env);
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
- uint8_t *buf, int len, bool is_write);
+ uint8_t *buf, size_t len, bool is_write);
#endif
-
/* translate.c */
void sparc_tcg_init(void);
-void sparc_restore_state_to_opc(CPUState *cs,
- const TranslationBlock *tb,
- const uint64_t *data);
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
/* fop_helper.c */
target_ulong cpu_get_fsr(CPUSPARCState *);
@@ -645,8 +665,6 @@ hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
#define CPU_RESOLVING_TYPE TYPE_SPARC_CPU
-#define cpu_list sparc_cpu_list
-
/* MMU modes definitions */
#if defined (TARGET_SPARC64)
#define MMU_USER_IDX 0
@@ -707,8 +725,6 @@ static inline int cpu_pil_allowed(CPUSPARCState *env1, int pil)
#endif
}
-#include "exec/cpu-all.h"
-
#ifdef TARGET_SPARC64
/* sun4u.c */
void cpu_tick_set_count(CPUTimer *timer, uint64_t count);
@@ -722,41 +738,9 @@ trap_state* cpu_tsptr(CPUSPARCState* env);
#define TB_FLAG_AM_ENABLED (1 << 5)
#define TB_FLAG_SUPER (1 << 6)
#define TB_FLAG_HYPER (1 << 7)
+#define TB_FLAG_FSR_QNE (1 << 8)
#define TB_FLAG_ASI_SHIFT 24
-static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
-{
- uint32_t flags;
- *pc = env->pc;
- *cs_base = env->npc;
- flags = cpu_mmu_index(env_cpu(env), false);
-#ifndef CONFIG_USER_ONLY
- if (cpu_supervisor_mode(env)) {
- flags |= TB_FLAG_SUPER;
- }
-#endif
-#ifdef TARGET_SPARC64
-#ifndef CONFIG_USER_ONLY
- if (cpu_hypervisor_mode(env)) {
- flags |= TB_FLAG_HYPER;
- }
-#endif
- if (env->pstate & PS_AM) {
- flags |= TB_FLAG_AM_ENABLED;
- }
- if ((env->pstate & PS_PEF) && (env->fprs & FPRS_FEF)) {
- flags |= TB_FLAG_FPU_ENABLED;
- }
- flags |= env->asi << TB_FLAG_ASI_SHIFT;
-#else
- if (env->psref) {
- flags |= TB_FLAG_FPU_ENABLED;
- }
-#endif
- *pflags = flags;
-}
-
static inline bool tb_fpu_enabled(int tb_flags)
{
#if defined(CONFIG_USER_ONLY)
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
index 0b30665..29fd166 100644
--- a/target/sparc/fop_helper.c
+++ b/target/sparc/fop_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
@@ -344,17 +343,17 @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
}
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
- float32 s2, float32 s3, uint32_t op)
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
{
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
check_ieee_exceptions(env, GETPC());
return ret;
}
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
- float64 s2, float64 s3, uint32_t op)
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
{
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
check_ieee_exceptions(env, GETPC());
return ret;
}
@@ -446,7 +445,6 @@ static uint32_t finish_fcmp(CPUSPARCState *env, FloatRelation r, uintptr_t ra)
case float_relation_greater:
return 2;
case float_relation_unordered:
- env->fsr |= FSR_NVA;
return 3;
}
g_assert_not_reached();
@@ -490,14 +488,17 @@ uint32_t helper_fcmpeq(CPUSPARCState *env, Int128 src1, Int128 src2)
return finish_fcmp(env, r, GETPC());
}
-uint32_t helper_flcmps(float32 src1, float32 src2)
+uint32_t helper_flcmps(CPUSPARCState *env, float32 src1, float32 src2)
{
/*
* FLCMP never raises an exception nor modifies any FSR fields.
* Perform the comparison with a dummy fp environment.
*/
- float_status discard = { };
- FloatRelation r = float32_compare_quiet(src1, src2, &discard);
+ float_status discard = env->fp_status;
+ FloatRelation r;
+
+ set_float_2nan_prop_rule(float_2nan_prop_s_ba, &discard);
+ r = float32_compare_quiet(src1, src2, &discard);
switch (r) {
case float_relation_equal:
@@ -515,10 +516,13 @@ uint32_t helper_flcmps(float32 src1, float32 src2)
g_assert_not_reached();
}
-uint32_t helper_flcmpd(float64 src1, float64 src2)
+uint32_t helper_flcmpd(CPUSPARCState *env, float64 src1, float64 src2)
{
- float_status discard = { };
- FloatRelation r = float64_compare_quiet(src1, src2, &discard);
+ float_status discard = env->fp_status;
+ FloatRelation r;
+
+ set_float_2nan_prop_rule(float_2nan_prop_s_ba, &discard);
+ r = float64_compare_quiet(src1, src2, &discard);
switch (r) {
case float_relation_equal:
@@ -545,6 +549,8 @@ target_ulong cpu_get_fsr(CPUSPARCState *env)
fsr |= (uint64_t)env->fcc[1] << FSR_FCC1_SHIFT;
fsr |= (uint64_t)env->fcc[2] << FSR_FCC2_SHIFT;
fsr |= (uint64_t)env->fcc[3] << FSR_FCC3_SHIFT;
+#elif !defined(CONFIG_USER_ONLY)
+ fsr |= env->fsr_qne;
#endif
/* VER is kept completely separate until re-assembly. */
@@ -591,6 +597,8 @@ void cpu_put_fsr(CPUSPARCState *env, target_ulong fsr)
env->fcc[1] = extract64(fsr, FSR_FCC1_SHIFT, 2);
env->fcc[2] = extract64(fsr, FSR_FCC2_SHIFT, 2);
env->fcc[3] = extract64(fsr, FSR_FCC3_SHIFT, 2);
+#elif !defined(CONFIG_USER_ONLY)
+ env->fsr_qne = fsr & FSR_QNE;
#endif
set_fsr_nonsplit(env, fsr);
diff --git a/target/sparc/gdbstub.c b/target/sparc/gdbstub.c
index ec0036e..134617f 100644
--- a/target/sparc/gdbstub.c
+++ b/target/sparc/gdbstub.c
@@ -79,8 +79,13 @@ int sparc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
}
}
if (n < 80) {
- /* f32-f62 (double width, even numbers only) */
- return gdb_get_reg64(mem_buf, env->fpr[(n - 32) / 2].ll);
+ /* f32-f62 (16 double width registers, even register numbers only)
+ * n == 64: f32 : env->fpr[16]
+ * n == 65: f34 : env->fpr[17]
+ * etc...
+ * n == 79: f62 : env->fpr[31]
+ */
+ return gdb_get_reg64(mem_buf, env->fpr[(n - 64) + 16].ll);
}
switch (n) {
case 80:
@@ -173,8 +178,13 @@ int sparc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
}
return 4;
} else if (n < 80) {
- /* f32-f62 (double width, even numbers only) */
- env->fpr[(n - 32) / 2].ll = tmp;
+ /* f32-f62 (16 double width registers, even register numbers only)
+ * n == 64: f32 : env->fpr[16]
+ * n == 65: f34 : env->fpr[17]
+ * etc...
+ * n == 79: f62 : env->fpr[31]
+ */
+ env->fpr[(n - 64) + 16].ll = tmp;
} else {
switch (n) {
case 80:
diff --git a/target/sparc/helper.c b/target/sparc/helper.c
index 7846ddd..9163b9d 100644
--- a/target/sparc/helper.c
+++ b/target/sparc/helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "qemu/timer.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
index 134e519..3a7f7dc 100644
--- a/target/sparc/helper.h
+++ b/target/sparc/helper.h
@@ -51,15 +51,15 @@ DEF_HELPER_FLAGS_3(fcmpd, TCG_CALL_NO_WG, i32, env, f64, f64)
DEF_HELPER_FLAGS_3(fcmped, TCG_CALL_NO_WG, i32, env, f64, f64)
DEF_HELPER_FLAGS_3(fcmpq, TCG_CALL_NO_WG, i32, env, i128, i128)
DEF_HELPER_FLAGS_3(fcmpeq, TCG_CALL_NO_WG, i32, env, i128, i128)
-DEF_HELPER_FLAGS_2(flcmps, TCG_CALL_NO_RWG_SE, i32, f32, f32)
-DEF_HELPER_FLAGS_2(flcmpd, TCG_CALL_NO_RWG_SE, i32, f64, f64)
+DEF_HELPER_FLAGS_3(flcmps, TCG_CALL_NO_RWG_SE, i32, env, f32, f32)
+DEF_HELPER_FLAGS_3(flcmpd, TCG_CALL_NO_RWG_SE, i32, env, f64, f64)
DEF_HELPER_2(raise_exception, noreturn, env, int)
DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
@@ -72,7 +72,7 @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
diff --git a/target/sparc/insns.decode b/target/sparc/insns.decode
index fbcb4f7..9e39d23 100644
--- a/target/sparc/insns.decode
+++ b/target/sparc/insns.decode
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: LGPL-2.0+
+# SPDX-License-Identifier: LGPL-2.0-or-later
#
# Sparc instruction decode definitions.
# Copyright (c) 2023 Richard Henderson <rth@twiddle.net>
@@ -96,7 +96,10 @@ CALL 01 i:s30
RDTICK 10 rd:5 101000 00100 0 0000000000000
RDPC 10 rd:5 101000 00101 0 0000000000000
RDFPRS 10 rd:5 101000 00110 0 0000000000000
- RDASR17 10 rd:5 101000 10001 0 0000000000000
+ {
+ RDASR17 10 rd:5 101000 10001 0 0000000000000
+ RDPIC 10 rd:5 101000 10001 0 0000000000000
+ }
RDGSR 10 rd:5 101000 10011 0 0000000000000
RDSOFTINT 10 rd:5 101000 10110 0 0000000000000
RDTICK_CMPR 10 rd:5 101000 10111 0 0000000000000
@@ -114,6 +117,8 @@ CALL 01 i:s30
WRCCR 10 00010 110000 ..... . ............. @n_r_ri
WRASI 10 00011 110000 ..... . ............. @n_r_ri
WRFPRS 10 00110 110000 ..... . ............. @n_r_ri
+ WRPCR 10 10000 110000 01000 0 0000000000000
+ WRPIC 10 10001 110000 01000 0 0000000000000
{
WRGSR 10 10011 110000 ..... . ............. @n_r_ri
WRPOWERDOWN 10 10011 110000 ..... . ............. @n_r_ri
@@ -321,12 +326,12 @@ FdMULq 10 ..... 110100 ..... 0 0110 1110 ..... @q_d_d
FNHADDs 10 ..... 110100 ..... 0 0111 0001 ..... @r_r_r
FNHADDd 10 ..... 110100 ..... 0 0111 0010 ..... @d_d_d
FNsMULd 10 ..... 110100 ..... 0 0111 1001 ..... @d_r_r
-FsTOx 10 ..... 110100 00000 0 1000 0001 ..... @r_r2
-FdTOx 10 ..... 110100 00000 0 1000 0010 ..... @r_d2
-FqTOx 10 ..... 110100 00000 0 1000 0011 ..... @r_q2
-FxTOs 10 ..... 110100 00000 0 1000 0100 ..... @r_r2
-FxTOd 10 ..... 110100 00000 0 1000 1000 ..... @d_r2
-FxTOq 10 ..... 110100 00000 0 1000 1100 ..... @q_r2
+FsTOx 10 ..... 110100 00000 0 1000 0001 ..... @d_r2
+FdTOx 10 ..... 110100 00000 0 1000 0010 ..... @d_d2
+FqTOx 10 ..... 110100 00000 0 1000 0011 ..... @d_q2
+FxTOs 10 ..... 110100 00000 0 1000 0100 ..... @r_d2
+FxTOd 10 ..... 110100 00000 0 1000 1000 ..... @d_d2
+FxTOq 10 ..... 110100 00000 0 1000 1100 ..... @q_d2
FiTOs 10 ..... 110100 00000 0 1100 0100 ..... @r_r2
FdTOs 10 ..... 110100 00000 0 1100 0110 ..... @r_d2
FqTOs 10 ..... 110100 00000 0 1100 0111 ..... @r_q2
@@ -644,8 +649,8 @@ STF 11 ..... 100100 ..... . ............. @r_r_ri_na
STFSR 11 00000 100101 ..... . ............. @n_r_ri
STXFSR 11 00001 100101 ..... . ............. @n_r_ri
{
- STQF 11 ..... 100110 ..... . ............. @q_r_ri_na
- STDFQ 11 ----- 100110 ----- - -------------
+ STQF 11 ..... 100110 ..... . ............. @q_r_ri_na # v9
+ STDFQ 11 ..... 100110 ..... . ............. @r_r_ri # v7,v8
}
STDF 11 ..... 100111 ..... . ............. @d_r_ri_na
diff --git a/target/sparc/int32_helper.c b/target/sparc/int32_helper.c
index 6b7d65b..39db4ff 100644
--- a/target/sparc/int32_helper.c
+++ b/target/sparc/int32_helper.c
@@ -21,9 +21,9 @@
#include "qemu/main-loop.h"
#include "cpu.h"
#include "trace.h"
+#include "accel/tcg/cpu-ldst.h"
#include "exec/log.h"
-#include "sysemu/runstate.h"
-
+#include "system/runstate.h"
static const char * const excp_names[0x80] = {
[TT_TFAULT] = "Instruction Access Fault",
@@ -116,22 +116,9 @@ void sparc_cpu_do_interrupt(CPUState *cs)
qemu_log("%6d: %s (v=%02x)\n", count, name, intno);
log_cpu_state(cs, 0);
-#if 0
- {
- int i;
- uint8_t *ptr;
-
- qemu_log(" code=");
- ptr = (uint8_t *)env->pc;
- for (i = 0; i < 16; i++) {
- qemu_log(" %02x", ldub(ptr + i));
- }
- qemu_log("\n");
- }
-#endif
count++;
}
-#if !defined(CONFIG_USER_ONLY)
+#ifndef CONFIG_USER_ONLY
if (env->psret == 0) {
if (cs->exception_index == 0x80 &&
env->def.features & CPU_FEATURE_TA0_SHUTDOWN) {
@@ -143,6 +130,29 @@ void sparc_cpu_do_interrupt(CPUState *cs)
}
return;
}
+ if (intno == TT_FP_EXCP) {
+ /*
+ * The sparc32 fpu has three states related to exception handling.
+ * The FPop that signals an exception transitions from fp_execute
+ * to fp_exception_pending. A subsequent FPop transitions from
+ * fp_exception_pending to fp_exception, which forces the trap.
+ *
+ * If the queue is not empty, this trap is due to execution of an
+ * illegal FPop while in fp_exception state. Here we are to
+ * re-enter fp_exception_pending state without queuing the insn.
+ *
+ * We do not model the fp_exception_pending state, but instead
+ * skip directly to fp_exception state. We advance pc/npc to
+ * mimic delayed trap delivery as if by the subsequent insn.
+ */
+ if (!env->fsr_qne) {
+ env->fsr_qne = FSR_QNE;
+ env->fq.s.addr = env->pc;
+ env->fq.s.insn = cpu_ldl_code(env, env->pc);
+ }
+ env->pc = env->npc;
+ env->npc = env->npc + 4;
+ }
#endif
env->psret = 0;
cwp = cpu_cwp_dec(env, env->cwp - 1);
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
index 2d48e98..2c63eb9 100644
--- a/target/sparc/ldst_helper.c
+++ b/target/sparc/ldst_helper.c
@@ -19,12 +19,18 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
+#include "qemu/range.h"
#include "cpu.h"
#include "tcg/tcg.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
-#include "exec/cpu_ldst.h"
+#include "exec/target_page.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "system/memory.h"
+#ifdef CONFIG_USER_ONLY
+#include "user/page-protection.h"
+#endif
#include "asi.h"
//#define DEBUG_MMU
@@ -240,9 +246,7 @@ static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
if (new_ctx == ctx) {
uint64_t vaddr = tlb[i].tag & ~0x1fffULL;
uint64_t size = 8192ULL << 3 * TTE_PGSIZE(tlb[i].tte);
- if (new_vaddr == vaddr
- || (new_vaddr < vaddr + size
- && vaddr < new_vaddr + new_size)) {
+ if (ranges_overlap(new_vaddr, new_size, vaddr, size)) {
DPRINTF_MMU("auto demap entry [%d] %lx->%lx\n", i, vaddr,
new_vaddr);
replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
@@ -597,6 +601,9 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
case 0x0C: /* Leon3 Date Cache config */
if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
ret = leon3_cache_control_ld(env, addr, size);
+ } else {
+ qemu_log_mask(LOG_UNIMP, "0x" TARGET_FMT_lx ": unimplemented"
+ " address, size: %d\n", addr, size);
}
break;
case 0x01c00a00: /* MXCC control register */
@@ -813,6 +820,9 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
case 0x0C: /* Leon3 Date Cache config */
if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
leon3_cache_control_st(env, addr, val, size);
+ } else {
+ qemu_log_mask(LOG_UNIMP, "0x" TARGET_FMT_lx ": unimplemented"
+ " address, size: %d\n", addr, size);
}
break;
diff --git a/target/sparc/machine.c b/target/sparc/machine.c
index 48e0cf2..4dd75af 100644
--- a/target/sparc/machine.c
+++ b/target/sparc/machine.c
@@ -1,6 +1,5 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "qemu/timer.h"
#include "migration/cpu.h"
@@ -143,6 +142,24 @@ static const VMStateInfo vmstate_xcc = {
.get = get_xcc,
.put = put_xcc,
};
+#else
+static bool fq_needed(void *opaque)
+{
+ SPARCCPU *cpu = opaque;
+ return cpu->env.fsr_qne;
+}
+
+static const VMStateDescription vmstate_fq = {
+ .name = "cpu/fq",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = fq_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32(env.fq.s.addr, SPARCCPU),
+ VMSTATE_UINT32(env.fq.s.insn, SPARCCPU),
+ VMSTATE_END_OF_LIST()
+ },
+};
#endif
static int cpu_pre_save(void *opaque)
@@ -265,4 +282,11 @@ const VMStateDescription vmstate_sparc_cpu = {
#endif
VMSTATE_END_OF_LIST()
},
+#ifndef TARGET_SPARC64
+ .subsections = (const VMStateDescription * const []) {
+ &vmstate_fq,
+ NULL
+ },
+#endif
+
};
diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c
index 9ff0602..217580a 100644
--- a/target/sparc/mmu_helper.c
+++ b/target/sparc/mmu_helper.c
@@ -20,8 +20,12 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
+#include "accel/tcg/cpu-mmu-index.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
+#include "exec/tlb-flags.h"
+#include "system/memory.h"
#include "qemu/qemu-print.h"
#include "trace.h"
@@ -389,7 +393,7 @@ void dump_mmu(CPUSPARCState *env)
* that the sparc ABI is followed.
*/
int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address,
- uint8_t *buf, int len, bool is_write)
+ uint8_t *buf, size_t len, bool is_write)
{
CPUSPARCState *env = cpu_env(cs);
target_ulong addr = address;
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index 1136390..b922e53 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -22,14 +22,16 @@
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
+#include "exec/target_page.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "exec/helper-gen.h"
#include "exec/translator.h"
+#include "exec/translation-block.h"
#include "exec/log.h"
#include "fpu/softfloat.h"
#include "asi.h"
+#include "target/sparc/translate.h"
#define HELPER_H "helper.h"
#include "exec/helper-info.c.inc"
@@ -101,13 +103,6 @@
# define MAXTL_MASK 0
#endif
-/* Dynamic PC, must exit to main loop. */
-#define DYNAMIC_PC 1
-/* Dynamic PC, one of two values according to jump_pc[T2]. */
-#define JUMP_PC 2
-/* Dynamic PC, may lookup next TB. */
-#define DYNAMIC_PC_LOOKUP 3
-
#define DISAS_EXIT DISAS_TARGET_0
/* global register indexes */
@@ -185,6 +180,8 @@ typedef struct DisasContext {
bool supervisor;
#ifdef TARGET_SPARC64
bool hypervisor;
+#else
+ bool fsr_qne;
#endif
#endif
@@ -398,8 +395,7 @@ static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
TCGv z = tcg_constant_tl(0);
if (cin) {
- tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
- tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
+ tcg_gen_addcio_tl(cpu_cc_N, cpu_cc_C, src1, src2, cin);
} else {
tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
}
@@ -1362,93 +1358,109 @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
{
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
+ TCGv_i32 z = tcg_constant_i32(0);
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
}
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
{
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
+ TCGv_i32 z = tcg_constant_i32(0);
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
}
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
{
- int op = float_muladd_negate_c;
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
+ TCGv_i32 z = tcg_constant_i32(0);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
}
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
{
- int op = float_muladd_negate_c;
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
+ TCGv_i32 z = tcg_constant_i32(0);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
}
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
{
- int op = float_muladd_negate_c | float_muladd_negate_result;
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
+ TCGv_i32 z = tcg_constant_i32(0);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
+ float_muladd_negate_result);
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
}
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
{
- int op = float_muladd_negate_c | float_muladd_negate_result;
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
+ TCGv_i32 z = tcg_constant_i32(0);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
+ float_muladd_negate_result);
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
}
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
{
- int op = float_muladd_negate_result;
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
+ TCGv_i32 z = tcg_constant_i32(0);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
}
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
{
- int op = float_muladd_negate_result;
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
+ TCGv_i32 z = tcg_constant_i32(0);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
}
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
{
- TCGv_i32 one = tcg_constant_i32(float32_one);
- int op = float_muladd_halve_result;
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
+ TCGv_i32 mone = tcg_constant_i32(-1);
+ TCGv_i32 op = tcg_constant_i32(0);
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
}
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
{
- TCGv_i64 one = tcg_constant_i64(float64_one);
- int op = float_muladd_halve_result;
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
+ TCGv_i32 mone = tcg_constant_i32(-1);
+ TCGv_i32 op = tcg_constant_i32(0);
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
}
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
{
- TCGv_i32 one = tcg_constant_i32(float32_one);
- int op = float_muladd_negate_c | float_muladd_halve_result;
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
+ TCGv_i32 mone = tcg_constant_i32(-1);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
}
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
{
- TCGv_i64 one = tcg_constant_i64(float64_one);
- int op = float_muladd_negate_c | float_muladd_halve_result;
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
+ TCGv_i32 mone = tcg_constant_i32(-1);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
}
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
{
- TCGv_i32 one = tcg_constant_i32(float32_one);
- int op = float_muladd_negate_result | float_muladd_halve_result;
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
+ TCGv_i32 mone = tcg_constant_i32(-1);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
}
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
{
- TCGv_i64 one = tcg_constant_i64(float64_one);
- int op = float_muladd_negate_result | float_muladd_halve_result;
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
+ TCGv_i32 mone = tcg_constant_i32(-1);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
}
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
@@ -1463,15 +1475,48 @@ static void gen_op_fpexception_im(DisasContext *dc, int ftt)
gen_exception(dc, TT_FP_EXCP);
}
-static int gen_trap_ifnofpu(DisasContext *dc)
+static bool gen_trap_ifnofpu(DisasContext *dc)
{
#if !defined(CONFIG_USER_ONLY)
if (!dc->fpu_enabled) {
gen_exception(dc, TT_NFPU_INSN);
- return 1;
+ return true;
}
#endif
- return 0;
+ return false;
+}
+
+static bool gen_trap_iffpexception(DisasContext *dc)
+{
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+ /*
+ * There are 3 states for the sparc32 fpu:
+ * Normally the fpu is in fp_execute, and all insns are allowed.
+ * When an exception is signaled, it moves to fp_exception_pending state.
+ * Upon seeing the next FPop, the fpu moves to fp_exception state,
+ * populates the FQ, and generates an fp_exception trap.
+ * The fpu remains in fp_exception state until FQ becomes empty
+ * after execution of a STDFQ instruction. While the fpu is in
+ * fp_exception state, and FPop, fp load or fp branch insn will
+ * return to fp_exception_pending state, set FSR.FTT to sequence_error,
+ * and the insn will not be entered into the FQ.
+ *
+ * In QEMU, we do not model the fp_exception_pending state and
+ * instead populate FQ and raise the exception immediately.
+ * But we can still honor fp_exception state by noticing when
+ * the FQ is not empty.
+ */
+ if (dc->fsr_qne) {
+ gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
+ return true;
+ }
+#endif
+ return false;
+}
+
+static bool gen_trap_if_nofpu_fpexception(DisasContext *dc)
+{
+ return gen_trap_ifnofpu(dc) || gen_trap_iffpexception(dc);
}
/* asi moves */
@@ -2641,7 +2686,7 @@ static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
{
DisasCompare cmp;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
gen_fcompare(&cmp, a->cc, a->cond);
@@ -2836,6 +2881,14 @@ static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
+static TCGv do_rdpic(DisasContext *dc, TCGv dst)
+{
+ return tcg_constant_tl(0);
+}
+
+TRANS(RDPIC, HYPV, do_rd_special, supervisor(dc), a->rd, do_rdpic)
+
+
static TCGv do_rdccr(DisasContext *dc, TCGv dst)
{
gen_helper_rdccr(dst, tcg_env);
@@ -3269,6 +3322,17 @@ static void do_wrfprs(DisasContext *dc, TCGv src)
TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
+static bool do_priv_nop(DisasContext *dc, bool priv)
+{
+ if (!priv) {
+ return raise_priv(dc);
+ }
+ return advance_pc(dc);
+}
+
+TRANS(WRPCR, HYPV, do_priv_nop, supervisor(dc))
+TRANS(WRPIC, HYPV, do_priv_nop, supervisor(dc))
+
static void do_wrgsr(DisasContext *dc, TCGv src)
{
gen_trap_ifnofpu(dc);
@@ -4480,7 +4544,7 @@ static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
if (addr == NULL) {
return false;
}
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
if (sz == MO_128 && gen_trap_float128(dc)) {
@@ -4508,6 +4572,7 @@ static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
if (addr == NULL) {
return false;
}
+ /* Store insns are ok in fp_exception_pending state. */
if (gen_trap_ifnofpu(dc)) {
return true;
}
@@ -4521,7 +4586,7 @@ static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
TRANS(STF, ALL, do_st_fpr, a, MO_32)
TRANS(STDF, ALL, do_st_fpr, a, MO_64)
-TRANS(STQF, ALL, do_st_fpr, a, MO_128)
+TRANS(STQF, 64, do_st_fpr, a, MO_128)
TRANS(STFA, 64, do_st_fpr, a, MO_32)
TRANS(STDFA, 64, do_st_fpr, a, MO_64)
@@ -4529,17 +4594,41 @@ TRANS(STQFA, 64, do_st_fpr, a, MO_128)
static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
{
+ TCGv addr;
+
if (!avail_32(dc)) {
return false;
}
+ addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ if (addr == NULL) {
+ return false;
+ }
if (!supervisor(dc)) {
return raise_priv(dc);
}
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
if (gen_trap_ifnofpu(dc)) {
return true;
}
- gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
- return true;
+ if (!dc->fsr_qne) {
+ gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
+ return true;
+ }
+
+ /* Store the single element from the queue. */
+ TCGv_i64 fq = tcg_temp_new_i64();
+ tcg_gen_ld_i64(fq, tcg_env, offsetof(CPUSPARCState, fq.d));
+ tcg_gen_qemu_st_i64(fq, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN_4);
+
+ /* Mark the queue empty, transitioning to fp_execute state. */
+ tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
+ offsetof(CPUSPARCState, fsr_qne));
+ dc->fsr_qne = 0;
+
+ return advance_pc(dc);
+#else
+ qemu_build_not_reached();
+#endif
}
static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
@@ -4550,7 +4639,7 @@ static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
if (addr == NULL) {
return false;
}
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
@@ -4574,7 +4663,7 @@ static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
if (addr == NULL) {
return false;
}
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
@@ -4611,6 +4700,7 @@ static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
if (addr == NULL) {
return false;
}
+ /* Store insns are ok in fp_exception_pending state. */
if (gen_trap_ifnofpu(dc)) {
return true;
}
@@ -4653,7 +4743,7 @@ static bool do_ff(DisasContext *dc, arg_r_r *a,
{
TCGv_i32 tmp;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
@@ -4694,7 +4784,7 @@ static bool do_env_ff(DisasContext *dc, arg_r_r *a,
{
TCGv_i32 tmp;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
@@ -4714,7 +4804,7 @@ static bool do_env_fd(DisasContext *dc, arg_r_r *a,
TCGv_i32 dst;
TCGv_i64 src;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
@@ -4734,7 +4824,7 @@ static bool do_dd(DisasContext *dc, arg_r_r *a,
{
TCGv_i64 dst, src;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
@@ -4756,7 +4846,7 @@ static bool do_env_dd(DisasContext *dc, arg_r_r *a,
{
TCGv_i64 dst, src;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
@@ -4796,7 +4886,7 @@ static bool do_env_df(DisasContext *dc, arg_r_r *a,
TCGv_i64 dst;
TCGv_i32 src;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
@@ -4839,7 +4929,7 @@ static bool do_env_qq(DisasContext *dc, arg_r_r *a,
{
TCGv_i128 t;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
if (gen_trap_float128(dc)) {
@@ -4860,7 +4950,7 @@ static bool do_env_fq(DisasContext *dc, arg_r_r *a,
TCGv_i128 src;
TCGv_i32 dst;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
if (gen_trap_float128(dc)) {
@@ -4883,7 +4973,7 @@ static bool do_env_dq(DisasContext *dc, arg_r_r *a,
TCGv_i128 src;
TCGv_i64 dst;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
if (gen_trap_float128(dc)) {
@@ -4906,7 +4996,7 @@ static bool do_env_qf(DisasContext *dc, arg_r_r *a,
TCGv_i32 src;
TCGv_i128 dst;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
if (gen_trap_float128(dc)) {
@@ -4929,10 +5019,7 @@ static bool do_env_qd(DisasContext *dc, arg_r_r *a,
TCGv_i64 src;
TCGv_i128 dst;
- if (gen_trap_ifnofpu(dc)) {
- return true;
- }
- if (gen_trap_float128(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
@@ -4989,7 +5076,7 @@ static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
{
TCGv_i32 src1, src2;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
@@ -5198,7 +5285,7 @@ static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
{
TCGv_i64 dst, src1, src2;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
@@ -5222,7 +5309,7 @@ static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
TCGv_i64 dst;
TCGv_i32 src1, src2;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
@@ -5331,7 +5418,7 @@ static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
{
TCGv_i128 src1, src2;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
if (gen_trap_float128(dc)) {
@@ -5355,7 +5442,7 @@ static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
TCGv_i64 src1, src2;
TCGv_i128 dst;
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
if (gen_trap_float128(dc)) {
@@ -5445,7 +5532,7 @@ static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
if (avail_32(dc) && a->cc != 0) {
return false;
}
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
@@ -5469,7 +5556,7 @@ static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
if (avail_32(dc) && a->cc != 0) {
return false;
}
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
@@ -5493,7 +5580,7 @@ static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
if (avail_32(dc) && a->cc != 0) {
return false;
}
- if (gen_trap_ifnofpu(dc)) {
+ if (gen_trap_if_nofpu_fpexception(dc)) {
return true;
}
if (gen_trap_float128(dc)) {
@@ -5526,7 +5613,7 @@ static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
src1 = gen_load_fpr_F(dc, a->rs1);
src2 = gen_load_fpr_F(dc, a->rs2);
- gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
+ gen_helper_flcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
return advance_pc(dc);
}
@@ -5543,7 +5630,7 @@ static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
src1 = gen_load_fpr_D(dc, a->rs1);
src2 = gen_load_fpr_D(dc, a->rs2);
- gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
+ gen_helper_flcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
return advance_pc(dc);
}
@@ -5596,13 +5683,15 @@ static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
#ifndef CONFIG_USER_ONLY
dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
+# ifdef TARGET_SPARC64
+ dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
+# else
+ dc->fsr_qne = (dc->base.tb->flags & TB_FLAG_FSR_QNE) != 0;
+# endif
#endif
#ifdef TARGET_SPARC64
dc->fprs_dirty = 0;
dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
-#ifndef CONFIG_USER_ONLY
- dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
-#endif
#endif
/*
* if we reach a page boundary, we stop generation so that the
@@ -5748,8 +5837,8 @@ static const TranslatorOps sparc_tr_ops = {
.tb_stop = sparc_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext dc = {};
@@ -5821,26 +5910,3 @@ void sparc_tcg_init(void)
gregnames[i]);
}
}
-
-void sparc_restore_state_to_opc(CPUState *cs,
- const TranslationBlock *tb,
- const uint64_t *data)
-{
- CPUSPARCState *env = cpu_env(cs);
- target_ulong pc = data[0];
- target_ulong npc = data[1];
-
- env->pc = pc;
- if (npc == DYNAMIC_PC) {
- /* dynamic NPC: already stored */
- } else if (npc & JUMP_PC) {
- /* jump PC: use 'cond' and the jump targets of the translation */
- if (env->cond) {
- env->npc = npc & ~3;
- } else {
- env->npc = pc + 4;
- }
- } else {
- env->npc = npc;
- }
-}
diff --git a/target/sparc/translate.h b/target/sparc/translate.h
new file mode 100644
index 0000000..a46fa4f
--- /dev/null
+++ b/target/sparc/translate.h
@@ -0,0 +1,17 @@
+/*
+ * QEMU translation definitions for SPARC
+ *
+ * Copyright (c) 2024 Linaro, Ltd
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef SPARC_TRANSLATION_H
+#define SPARC_TRANSLATION_H
+
+/* Dynamic PC, must exit to main loop. */
+#define DYNAMIC_PC 1
+/* Dynamic PC, one of two values according to jump_pc[T2]. */
+#define JUMP_PC 2
+/* Dynamic PC, may lookup next TB. */
+#define DYNAMIC_PC_LOOKUP 3
+
+#endif
diff --git a/target/sparc/win_helper.c b/target/sparc/win_helper.c
index b53fc9c..9ad9d01 100644
--- a/target/sparc/win_helper.c
+++ b/target/sparc/win_helper.c
@@ -20,33 +20,22 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "trace.h"
-static inline void memcpy32(target_ulong *dst, const target_ulong *src)
-{
- dst[0] = src[0];
- dst[1] = src[1];
- dst[2] = src[2];
- dst[3] = src[3];
- dst[4] = src[4];
- dst[5] = src[5];
- dst[6] = src[6];
- dst[7] = src[7];
-}
-
void cpu_set_cwp(CPUSPARCState *env, int new_cwp)
{
/* put the modified wrap registers at their proper location */
if (env->cwp == env->nwindows - 1) {
- memcpy32(env->regbase, env->regbase + env->nwindows * 16);
+ memcpy(env->regbase, env->regbase + env->nwindows * 16,
+ sizeof(env->gregs));
}
env->cwp = new_cwp;
/* put the wrap registers at their temporary location */
if (new_cwp == env->nwindows - 1) {
- memcpy32(env->regbase + env->nwindows * 16, env->regbase);
+ memcpy(env->regbase + env->nwindows * 16, env->regbase,
+ sizeof(env->gregs));
}
env->regwptr = env->regbase + (new_cwp * 16);
}
@@ -361,8 +350,8 @@ void cpu_gl_switch_gregs(CPUSPARCState *env, uint32_t new_gl)
dst = get_gl_gregset(env, env->gl);
if (src != dst) {
- memcpy32(dst, env->gregs);
- memcpy32(env->gregs, src);
+ memcpy(dst, env->gregs, sizeof(env->gregs));
+ memcpy(env->gregs, src, sizeof(env->gregs));
}
}
@@ -393,8 +382,8 @@ void cpu_change_pstate(CPUSPARCState *env, uint32_t new_pstate)
/* Switch global register bank */
src = get_gregset(env, new_pstate_regs);
dst = get_gregset(env, pstate_regs);
- memcpy32(dst, env->gregs);
- memcpy32(env->gregs, src);
+ memcpy(dst, env->gregs, sizeof(env->gregs));
+ memcpy(env->gregs, src, sizeof(env->gregs));
} else {
trace_win_helper_no_switch_pstate(new_pstate_regs);
}
diff --git a/target/tricore/cpu-param.h b/target/tricore/cpu-param.h
index e29d551..eb33a67 100644
--- a/target/tricore/cpu-param.h
+++ b/target/tricore/cpu-param.h
@@ -8,9 +8,10 @@
#ifndef TRICORE_CPU_PARAM_H
#define TRICORE_CPU_PARAM_H
-#define TARGET_LONG_BITS 32
#define TARGET_PAGE_BITS 14
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#define TARGET_INSN_START_EXTRA_WORDS 0
+
#endif
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
index 1a26171..4f035b6 100644
--- a/target/tricore/cpu.c
+++ b/target/tricore/cpu.c
@@ -20,9 +20,10 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/translation-block.h"
#include "qemu/error-report.h"
#include "tcg/debug-assert.h"
+#include "accel/tcg/cpu-ops.h"
static inline void set_feature(CPUTriCoreState *env, int feature)
{
@@ -44,6 +45,16 @@ static vaddr tricore_cpu_get_pc(CPUState *cs)
return cpu_env(cs)->PC;
}
+static TCGTBCPUState tricore_get_tb_cpu_state(CPUState *cs)
+{
+ CPUTriCoreState *env = cpu_env(cs);
+
+ return (TCGTBCPUState){
+ .pc = env->PC,
+ .flags = FIELD_DP32(0, TB_FLAGS, PRIV, extract32(env->PSW, 10, 2)),
+ };
+}
+
static void tricore_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -164,21 +175,28 @@ static bool tricore_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps tricore_sysemu_ops = {
+ .has_work = tricore_cpu_has_work,
.get_phys_page_debug = tricore_cpu_get_phys_page_debug,
};
-#include "hw/core/tcg-cpu-ops.h"
-
static const TCGCPUOps tricore_tcg_ops = {
+ /* MTTCG not yet supported: require strict ordering */
+ .guest_default_memory_order = TCG_MO_ALL,
+ .mttcg_supported = false,
.initialize = tricore_tcg_init,
+ .translate_code = tricore_translate_code,
+ .get_tb_cpu_state = tricore_get_tb_cpu_state,
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
.restore_state_to_opc = tricore_restore_state_to_opc,
+ .mmu_index = tricore_cpu_mmu_index,
.tlb_fill = tricore_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = tricore_cpu_exec_interrupt,
.cpu_exec_halt = tricore_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
};
-static void tricore_cpu_class_init(ObjectClass *c, void *data)
+static void tricore_cpu_class_init(ObjectClass *c, const void *data)
{
TriCoreCPUClass *mcc = TRICORE_CPU_CLASS(c);
CPUClass *cc = CPU_CLASS(c);
@@ -191,8 +209,6 @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
resettable_class_set_parent_phases(rc, NULL, tricore_cpu_reset_hold, NULL,
&mcc->parent_phases);
cc->class_by_name = tricore_cpu_class_by_name;
- cc->has_work = tricore_cpu_has_work;
- cc->mmu_index = tricore_cpu_mmu_index;
cc->gdb_read_register = tricore_cpu_gdb_read_register;
cc->gdb_write_register = tricore_cpu_gdb_write_register;
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
index 220af69..82085fb 100644
--- a/target/tricore/cpu.h
+++ b/target/tricore/cpu.h
@@ -22,10 +22,15 @@
#include "cpu-qom.h"
#include "hw/registerfields.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
#include "qemu/cpu-float.h"
#include "tricore-defs.h"
+#ifdef CONFIG_USER_ONLY
+#error "TriCore does not support user mode emulation"
+#endif
+
typedef struct CPUArchState {
/* GPR Register */
uint32_t gpr_a[16];
@@ -246,24 +251,12 @@ void fpu_set_state(CPUTriCoreState *env);
#define MMU_USER_IDX 2
-#include "exec/cpu-all.h"
-
FIELD(TB_FLAGS, PRIV, 0, 2)
void cpu_state_reset(CPUTriCoreState *s);
void tricore_tcg_init(void);
-
-static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- uint32_t new_flags = 0;
- *pc = env->PC;
- *cs_base = 0;
-
- new_flags |= FIELD_DP32(new_flags, TB_FLAGS, PRIV,
- extract32(env->PSW, 10, 2));
- *flags = new_flags;
-}
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
#define CPU_RESOLVING_TYPE TYPE_TRICORE_CPU
diff --git a/target/tricore/fpu_helper.c b/target/tricore/fpu_helper.c
index 5d38aea..1b72dcc 100644
--- a/target/tricore/fpu_helper.c
+++ b/target/tricore/fpu_helper.c
@@ -43,7 +43,7 @@ static inline uint8_t f_get_excp_flags(CPUTriCoreState *env)
& (float_flag_invalid
| float_flag_overflow
| float_flag_underflow
- | float_flag_output_denormal
+ | float_flag_output_denormal_flushed
| float_flag_divbyzero
| float_flag_inexact);
}
@@ -99,7 +99,7 @@ static void f_update_psw_flags(CPUTriCoreState *env, uint8_t flags)
some_excp = 1;
}
- if (flags & float_flag_underflow || flags & float_flag_output_denormal) {
+ if (flags & float_flag_underflow || flags & float_flag_output_denormal_flushed) {
env->FPU_FU = 1 << 31;
some_excp = 1;
}
@@ -109,7 +109,7 @@ static void f_update_psw_flags(CPUTriCoreState *env, uint8_t flags)
some_excp = 1;
}
- if (flags & float_flag_inexact || flags & float_flag_output_denormal) {
+ if (flags & float_flag_inexact || flags & float_flag_output_denormal_flushed) {
env->PSW |= 1 << 26;
some_excp = 1;
}
diff --git a/target/tricore/gdbstub.c b/target/tricore/gdbstub.c
index 29a7005..0b73b12 100644
--- a/target/tricore/gdbstub.c
+++ b/target/tricore/gdbstub.c
@@ -124,7 +124,7 @@ int tricore_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
CPUTriCoreState *env = cpu_env(cs);
uint32_t tmp;
- tmp = ldl_p(mem_buf);
+ tmp = ldl_le_p(mem_buf);
if (n < 16) { /* data registers */
env->gpr_d[n] = tmp;
diff --git a/target/tricore/helper.c b/target/tricore/helper.c
index 7014255..e4c53d4 100644
--- a/target/tricore/helper.c
+++ b/target/tricore/helper.c
@@ -19,8 +19,10 @@
#include "qemu/log.h"
#include "hw/registerfields.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
+#include "accel/tcg/cpu-mmu-index.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
#include "fpu/softfloat-helpers.h"
#include "qemu/qemu-print.h"
@@ -116,7 +118,10 @@ void fpu_set_state(CPUTriCoreState *env)
set_flush_inputs_to_zero(1, &env->fp_status);
set_flush_to_zero(1, &env->fp_status);
set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status);
+ set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status);
set_default_nan_mode(1, &env->fp_status);
+ /* Default NaN pattern: sign bit clear, frac msb set */
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
}
uint32_t psw_read(CPUTriCoreState *env)
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
index ba9c444..9910c13 100644
--- a/target/tricore/op_helper.c
+++ b/target/tricore/op_helper.c
@@ -18,8 +18,7 @@
#include "cpu.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include <zlib.h> /* for crc32 */
@@ -1505,8 +1504,8 @@ uint32_t helper_sub_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
uint32_t helper_eq_b(target_ulong r1, target_ulong r2)
{
- int32_t ret;
- int32_t i, msk;
+ uint32_t ret, msk;
+ int32_t i;
ret = 0;
msk = 0xff;
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index a46a03e..3d0e7a1 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -20,9 +20,8 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "qemu/qemu-print.h"
#include "exec/helper-proto.h"
@@ -30,6 +29,8 @@
#include "tricore-opcodes.h"
#include "exec/translator.h"
+#include "exec/translation-block.h"
+#include "exec/target_page.h"
#include "exec/log.h"
#define HELPER_H "helper.h"
@@ -1344,15 +1345,11 @@ static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
{
- TCGv carry = tcg_temp_new_i32();
- TCGv t0 = tcg_temp_new_i32();
+ TCGv t0 = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
- tcg_gen_movi_tl(t0, 0);
- tcg_gen_setcondi_tl(TCG_COND_NE, carry, cpu_PSW_C, 0);
/* Addition, carry and set C/V/SV bits */
- tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, carry, t0);
- tcg_gen_add2_i32(result, cpu_PSW_C, result, cpu_PSW_C, r2, t0);
+ tcg_gen_addcio_i32(result, cpu_PSW_C, r1, r2, cpu_PSW_C);
/* calc V bit */
tcg_gen_xor_tl(cpu_PSW_V, result, r1);
tcg_gen_xor_tl(t0, r1, r2);
@@ -2732,8 +2729,7 @@ static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
- tcg_gen_movi_tl(mask, 1);
- tcg_gen_shl_tl(mask, mask, width);
+ tcg_gen_shl_tl(mask, tcg_constant_tl(1), width);
tcg_gen_subi_tl(mask, mask, 1);
tcg_gen_shl_tl(mask, mask, pos);
@@ -3980,7 +3976,7 @@ static void decode_bit_andacc(DisasContext *ctx)
pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl);
break;
case OPC2_32_BIT_AND_NOR_T:
- if (TCG_TARGET_HAS_andc_i32) {
+ if (tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0)) {
gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl);
} else {
@@ -4113,7 +4109,7 @@ static void decode_bit_orand(DisasContext *ctx)
pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_or_tl);
break;
case OPC2_32_BIT_OR_NOR_T:
- if (TCG_TARGET_HAS_orc_i32) {
+ if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I32, 0)) {
gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
pos1, pos2, &tcg_gen_or_tl, &tcg_gen_orc_tl);
} else {
@@ -8460,9 +8456,8 @@ static const TranslatorOps tricore_tr_ops = {
.tb_stop = tricore_tr_tb_stop,
};
-
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext ctx;
translator_loop(cs, tb, max_insns, pc, host_pc,
diff --git a/target/xtensa/Kconfig b/target/xtensa/Kconfig
index 5e46049..e8c2598 100644
--- a/target/xtensa/Kconfig
+++ b/target/xtensa/Kconfig
@@ -1,3 +1,3 @@
config XTENSA
bool
- select SEMIHOSTING
+ imply SEMIHOSTING if TCG
diff --git a/target/xtensa/core-dc232b/gdb-config.c.inc b/target/xtensa/core-dc232b/gdb-config.c.inc
index d871686..8c88cae 100644
--- a/target/xtensa/core-dc232b/gdb-config.c.inc
+++ b/target/xtensa/core-dc232b/gdb-config.c.inc
@@ -15,9 +15,8 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor,
- Boston, MA 02110-1301, USA. */
+ along with this program; if not, see
+ <https://www.gnu.org/licenses/>. */
XTREG(0, 0, 32, 4, 4, 0x0020, 0x0006, -2, 9, 0x0100, pc,
0, 0, 0, 0, 0, 0)
diff --git a/target/xtensa/core-dc232b/xtensa-modules.c.inc b/target/xtensa/core-dc232b/xtensa-modules.c.inc
index 164df3b..bb9ebd2 100644
--- a/target/xtensa/core-dc232b/xtensa-modules.c.inc
+++ b/target/xtensa/core-dc232b/xtensa-modules.c.inc
@@ -14,9 +14,8 @@
General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with this program; if not, see
+ <https://www.gnu.org/licenses/>. */
#include "qemu/osdep.h"
#include "xtensa-isa.h"
diff --git a/target/xtensa/core-fsf/xtensa-modules.c.inc b/target/xtensa/core-fsf/xtensa-modules.c.inc
index c32683f..531f5e2 100644
--- a/target/xtensa/core-fsf/xtensa-modules.c.inc
+++ b/target/xtensa/core-fsf/xtensa-modules.c.inc
@@ -14,9 +14,8 @@
General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with this program; if not, see
+ <https://www.gnu.org/licenses/>. */
#include "qemu/osdep.h"
#include "xtensa-isa.h"
diff --git a/target/xtensa/cpu-param.h b/target/xtensa/cpu-param.h
index 0000725..7a0c22c 100644
--- a/target/xtensa/cpu-param.h
+++ b/target/xtensa/cpu-param.h
@@ -8,7 +8,6 @@
#ifndef XTENSA_CPU_PARAM_H
#define XTENSA_CPU_PARAM_H
-#define TARGET_LONG_BITS 32
#define TARGET_PAGE_BITS 12
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#ifdef CONFIG_USER_ONLY
@@ -17,7 +16,6 @@
#define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
-/* Xtensa processors have a weak memory model */
-#define TCG_GUEST_DEFAULT_MO (0)
+#define TARGET_INSN_START_EXTRA_WORDS 0
#endif
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
index a08c7a0..ea9b6df 100644
--- a/target/xtensa/cpu.c
+++ b/target/xtensa/cpu.c
@@ -35,8 +35,9 @@
#include "qemu/module.h"
#include "migration/vmstate.h"
#include "hw/qdev-clock.h"
+#include "accel/tcg/cpu-ops.h"
#ifndef CONFIG_USER_ONLY
-#include "exec/memory.h"
+#include "system/memory.h"
#endif
@@ -54,6 +55,80 @@ static vaddr xtensa_cpu_get_pc(CPUState *cs)
return cpu->env.pc;
}
+static TCGTBCPUState xtensa_get_tb_cpu_state(CPUState *cs)
+{
+ CPUXtensaState *env = cpu_env(cs);
+ uint32_t flags = 0;
+ target_ulong cs_base = 0;
+
+ flags |= xtensa_get_ring(env);
+ if (env->sregs[PS] & PS_EXCM) {
+ flags |= XTENSA_TBFLAG_EXCM;
+ } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_LOOP)) {
+ target_ulong lend_dist =
+ env->sregs[LEND] - (env->pc & -(1u << TARGET_PAGE_BITS));
+
+ /*
+ * 0 in the csbase_lend field means that there may not be a loopback
+ * for any instruction that starts inside this page. Any other value
+ * means that an instruction that ends at this offset from the page
+ * start may loop back and will need loopback code to be generated.
+ *
+ * lend_dist is 0 when LEND points to the start of the page, but
+ * no instruction that starts inside this page may end at offset 0,
+ * so it's still correct.
+ *
+ * When an instruction ends at a page boundary it may only start in
+ * the previous page. lend_dist will be encoded as TARGET_PAGE_SIZE
+ * for the TB that contains this instruction.
+ */
+ if (lend_dist < (1u << TARGET_PAGE_BITS) + env->config->max_insn_size) {
+ target_ulong lbeg_off = env->sregs[LEND] - env->sregs[LBEG];
+
+ cs_base = lend_dist;
+ if (lbeg_off < 256) {
+ cs_base |= lbeg_off << XTENSA_CSBASE_LBEG_OFF_SHIFT;
+ }
+ }
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_EXTENDED_L32R) &&
+ (env->sregs[LITBASE] & 1)) {
+ flags |= XTENSA_TBFLAG_LITBASE;
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_DEBUG)) {
+ if (xtensa_get_cintlevel(env) < env->config->debug_level) {
+ flags |= XTENSA_TBFLAG_DEBUG;
+ }
+ if (xtensa_get_cintlevel(env) < env->sregs[ICOUNTLEVEL]) {
+ flags |= XTENSA_TBFLAG_ICOUNT;
+ }
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_COPROCESSOR)) {
+ flags |= env->sregs[CPENABLE] << XTENSA_TBFLAG_CPENABLE_SHIFT;
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER) &&
+ (env->sregs[PS] & (PS_WOE | PS_EXCM)) == PS_WOE) {
+ uint32_t windowstart = xtensa_replicate_windowstart(env) >>
+ (env->sregs[WINDOW_BASE] + 1);
+ uint32_t w = ctz32(windowstart | 0x8);
+
+ flags |= (w << XTENSA_TBFLAG_WINDOW_SHIFT) | XTENSA_TBFLAG_CWOE;
+ flags |= extract32(env->sregs[PS], PS_CALLINC_SHIFT,
+ PS_CALLINC_LEN) << XTENSA_TBFLAG_CALLINC_SHIFT;
+ } else {
+ flags |= 3 << XTENSA_TBFLAG_WINDOW_SHIFT;
+ }
+ if (env->yield_needed) {
+ flags |= XTENSA_TBFLAG_YIELD;
+ }
+
+ return (TCGTBCPUState){
+ .pc = env->pc,
+ .flags = flags,
+ .cs_base = cs_base,
+ };
+}
+
static void xtensa_restore_state_to_opc(CPUState *cs,
const TranslationBlock *tb,
const uint64_t *data)
@@ -63,16 +138,14 @@ static void xtensa_restore_state_to_opc(CPUState *cs,
cpu->env.pc = data[0];
}
+#ifndef CONFIG_USER_ONLY
static bool xtensa_cpu_has_work(CPUState *cs)
{
-#ifndef CONFIG_USER_ONLY
- XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = cpu_env(cs);
- return !cpu->env.runstall && cpu->env.pending_irq_level;
-#else
- return true;
-#endif
+ return !env->runstall && env->pending_irq_level;
}
+#endif /* !CONFIG_USER_ONLY */
static int xtensa_cpu_mmu_index(CPUState *cs, bool ifetch)
{
@@ -133,8 +206,12 @@ static void xtensa_cpu_reset_hold(Object *obj, ResetType type)
reset_mmu(env);
cs->halted = env->runstall;
#endif
+ /* For inf * 0 + NaN, return the input NaN */
+ set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
set_no_signaling_nans(!dfpu, &env->fp_status);
- set_use_first_nan(!dfpu, &env->fp_status);
+ /* Default NaN value: sign bit clear, set frac msb */
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
+ xtensa_use_first_nan(env, !dfpu);
}
static ObjectClass *xtensa_cpu_class_by_name(const char *cpu_model)
@@ -155,6 +232,8 @@ static void xtensa_cpu_disas_set_info(CPUState *cs, disassemble_info *info)
info->private_data = cpu->env.config->isa;
info->print_insn = print_insn_xtensa;
+ info->endian = TARGET_BIG_ENDIAN ? BFD_ENDIAN_BIG
+ : BFD_ENDIAN_LITTLE;
}
static void xtensa_cpu_realizefn(DeviceState *dev, Error **errp)
@@ -204,7 +283,7 @@ XtensaCPU *xtensa_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk)
{
DeviceState *cpu;
- cpu = DEVICE(object_new(cpu_type));
+ cpu = qdev_new(cpu_type);
qdev_connect_clock_in(cpu, "clk-in", cpu_refclk);
qdev_realize(cpu, NULL, &error_abort);
@@ -220,21 +299,29 @@ static const VMStateDescription vmstate_xtensa_cpu = {
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps xtensa_sysemu_ops = {
+ .has_work = xtensa_cpu_has_work,
.get_phys_page_debug = xtensa_cpu_get_phys_page_debug,
};
#endif
-#include "hw/core/tcg-cpu-ops.h"
-
static const TCGCPUOps xtensa_tcg_ops = {
+ /* Xtensa processors have a weak memory model */
+ .guest_default_memory_order = 0,
+ .mttcg_supported = true,
+
.initialize = xtensa_translate_init,
+ .translate_code = xtensa_translate_code,
.debug_excp_handler = xtensa_breakpoint_handler,
+ .get_tb_cpu_state = xtensa_get_tb_cpu_state,
.restore_state_to_opc = xtensa_restore_state_to_opc,
+ .mmu_index = xtensa_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = xtensa_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
.cpu_exec_halt = xtensa_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = xtensa_cpu_do_interrupt,
.do_transaction_failed = xtensa_cpu_do_transaction_failed,
.do_unaligned_access = xtensa_cpu_do_unaligned_access,
@@ -242,7 +329,7 @@ static const TCGCPUOps xtensa_tcg_ops = {
#endif /* !CONFIG_USER_ONLY */
};
-static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
+static void xtensa_cpu_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
@@ -256,8 +343,6 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
&xcc->parent_phases);
cc->class_by_name = xtensa_cpu_class_by_name;
- cc->has_work = xtensa_cpu_has_work;
- cc->mmu_index = xtensa_cpu_mmu_index;
cc->dump_state = xtensa_cpu_dump_state;
cc->set_pc = xtensa_cpu_set_pc;
cc->get_pc = xtensa_cpu_get_pc;
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
index 9f2341d..74122eb 100644
--- a/target/xtensa/cpu.h
+++ b/target/xtensa/cpu.h
@@ -30,7 +30,9 @@
#include "cpu-qom.h"
#include "qemu/cpu-float.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "hw/clock.h"
#include "xtensa-isa.h"
@@ -490,7 +492,7 @@ typedef struct XtensaConfig {
} XtensaConfig;
typedef struct XtensaConfigList {
- const XtensaConfig *config;
+ XtensaConfig *config;
struct XtensaConfigList *next;
} XtensaConfigList;
@@ -617,6 +619,8 @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
void xtensa_collect_sr_names(const XtensaConfig *config);
void xtensa_translate_init(void);
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
void xtensa_breakpoint_handler(CPUState *cs);
void xtensa_register_core(XtensaConfigList *node);
@@ -729,77 +733,13 @@ static inline uint32_t xtensa_replicate_windowstart(CPUXtensaState *env)
#define XTENSA_CSBASE_LBEG_OFF_MASK 0x00ff0000
#define XTENSA_CSBASE_LBEG_OFF_SHIFT 16
-#include "exec/cpu-all.h"
-
-static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- *cs_base = 0;
- *flags = 0;
- *flags |= xtensa_get_ring(env);
- if (env->sregs[PS] & PS_EXCM) {
- *flags |= XTENSA_TBFLAG_EXCM;
- } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_LOOP)) {
- target_ulong lend_dist =
- env->sregs[LEND] - (env->pc & -(1u << TARGET_PAGE_BITS));
-
- /*
- * 0 in the csbase_lend field means that there may not be a loopback
- * for any instruction that starts inside this page. Any other value
- * means that an instruction that ends at this offset from the page
- * start may loop back and will need loopback code to be generated.
- *
- * lend_dist is 0 when LEND points to the start of the page, but
- * no instruction that starts inside this page may end at offset 0,
- * so it's still correct.
- *
- * When an instruction ends at a page boundary it may only start in
- * the previous page. lend_dist will be encoded as TARGET_PAGE_SIZE
- * for the TB that contains this instruction.
- */
- if (lend_dist < (1u << TARGET_PAGE_BITS) + env->config->max_insn_size) {
- target_ulong lbeg_off = env->sregs[LEND] - env->sregs[LBEG];
-
- *cs_base = lend_dist;
- if (lbeg_off < 256) {
- *cs_base |= lbeg_off << XTENSA_CSBASE_LBEG_OFF_SHIFT;
- }
- }
- }
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_EXTENDED_L32R) &&
- (env->sregs[LITBASE] & 1)) {
- *flags |= XTENSA_TBFLAG_LITBASE;
- }
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_DEBUG)) {
- if (xtensa_get_cintlevel(env) < env->config->debug_level) {
- *flags |= XTENSA_TBFLAG_DEBUG;
- }
- if (xtensa_get_cintlevel(env) < env->sregs[ICOUNTLEVEL]) {
- *flags |= XTENSA_TBFLAG_ICOUNT;
- }
- }
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_COPROCESSOR)) {
- *flags |= env->sregs[CPENABLE] << XTENSA_TBFLAG_CPENABLE_SHIFT;
- }
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER) &&
- (env->sregs[PS] & (PS_WOE | PS_EXCM)) == PS_WOE) {
- uint32_t windowstart = xtensa_replicate_windowstart(env) >>
- (env->sregs[WINDOW_BASE] + 1);
- uint32_t w = ctz32(windowstart | 0x8);
-
- *flags |= (w << XTENSA_TBFLAG_WINDOW_SHIFT) | XTENSA_TBFLAG_CWOE;
- *flags |= extract32(env->sregs[PS], PS_CALLINC_SHIFT,
- PS_CALLINC_LEN) << XTENSA_TBFLAG_CALLINC_SHIFT;
- } else {
- *flags |= 3 << XTENSA_TBFLAG_WINDOW_SHIFT;
- }
- if (env->yield_needed) {
- *flags |= XTENSA_TBFLAG_YIELD;
- }
-}
-
XtensaCPU *xtensa_cpu_create_with_clock(const char *cpu_type,
Clock *cpu_refclk);
+/*
+ * Set the NaN propagation rule for future FPU operations:
+ * use_first is true to pick the first NaN as the result if both
+ * inputs are NaNs, false to pick the second.
+ */
+void xtensa_use_first_nan(CPUXtensaState *env, bool use_first);
#endif
diff --git a/target/xtensa/dbg_helper.c b/target/xtensa/dbg_helper.c
index 5546c82..3b91f7c 100644
--- a/target/xtensa/dbg_helper.c
+++ b/target/xtensa/dbg_helper.c
@@ -30,8 +30,8 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
-#include "exec/address-spaces.h"
+#include "exec/watchpoint.h"
+#include "system/address-spaces.h"
void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
{
diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c
index 0514c2c..b611c9b 100644
--- a/target/xtensa/exc_helper.c
+++ b/target/xtensa/exc_helper.c
@@ -32,7 +32,6 @@
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
#include "qemu/atomic.h"
-#include "exec/exec-all.h"
void HELPER(exception)(CPUXtensaState *env, uint32_t excp)
{
@@ -171,7 +170,7 @@ static void handle_interrupt(CPUXtensaState *env)
if (level > 1) {
/* env->config->nlevel check should have ensured this */
- assert(level < sizeof(env->config->interrupt_vector));
+ assert(level < ARRAY_SIZE(env->config->interrupt_vector));
env->sregs[EPC1 + level - 1] = env->pc;
env->sregs[EPS2 + level - 2] = env->sregs[PS];
diff --git a/target/xtensa/fpu_helper.c b/target/xtensa/fpu_helper.c
index 381e83d..5358060 100644
--- a/target/xtensa/fpu_helper.c
+++ b/target/xtensa/fpu_helper.c
@@ -30,7 +30,6 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
#include "fpu/softfloat.h"
enum {
@@ -57,6 +56,14 @@ static const struct {
{ XTENSA_FP_V, float_flag_invalid, },
};
+void xtensa_use_first_nan(CPUXtensaState *env, bool use_first)
+{
+ set_float_2nan_prop_rule(use_first ? float_2nan_prop_ab : float_2nan_prop_ba,
+ &env->fp_status);
+ set_float_3nan_prop_rule(use_first ? float_3nan_prop_abc : float_3nan_prop_cba,
+ &env->fp_status);
+}
+
void HELPER(wur_fpu2k_fcr)(CPUXtensaState *env, uint32_t v)
{
static const int rounding_mode[] = {
@@ -171,87 +178,87 @@ float32 HELPER(fpu2k_msub_s)(CPUXtensaState *env,
float64 HELPER(add_d)(CPUXtensaState *env, float64 a, float64 b)
{
- set_use_first_nan(true, &env->fp_status);
+ xtensa_use_first_nan(env, true);
return float64_add(a, b, &env->fp_status);
}
float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b)
{
- set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ xtensa_use_first_nan(env, env->config->use_first_nan);
return float32_add(a, b, &env->fp_status);
}
float64 HELPER(sub_d)(CPUXtensaState *env, float64 a, float64 b)
{
- set_use_first_nan(true, &env->fp_status);
+ xtensa_use_first_nan(env, true);
return float64_sub(a, b, &env->fp_status);
}
float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b)
{
- set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ xtensa_use_first_nan(env, env->config->use_first_nan);
return float32_sub(a, b, &env->fp_status);
}
float64 HELPER(mul_d)(CPUXtensaState *env, float64 a, float64 b)
{
- set_use_first_nan(true, &env->fp_status);
+ xtensa_use_first_nan(env, true);
return float64_mul(a, b, &env->fp_status);
}
float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b)
{
- set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ xtensa_use_first_nan(env, env->config->use_first_nan);
return float32_mul(a, b, &env->fp_status);
}
float64 HELPER(madd_d)(CPUXtensaState *env, float64 a, float64 b, float64 c)
{
- set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ xtensa_use_first_nan(env, env->config->use_first_nan);
return float64_muladd(b, c, a, 0, &env->fp_status);
}
float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
{
- set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ xtensa_use_first_nan(env, env->config->use_first_nan);
return float32_muladd(b, c, a, 0, &env->fp_status);
}
float64 HELPER(msub_d)(CPUXtensaState *env, float64 a, float64 b, float64 c)
{
- set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ xtensa_use_first_nan(env, env->config->use_first_nan);
return float64_muladd(b, c, a, float_muladd_negate_product,
&env->fp_status);
}
float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
{
- set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ xtensa_use_first_nan(env, env->config->use_first_nan);
return float32_muladd(b, c, a, float_muladd_negate_product,
&env->fp_status);
}
float64 HELPER(mkdadj_d)(CPUXtensaState *env, float64 a, float64 b)
{
- set_use_first_nan(true, &env->fp_status);
+ xtensa_use_first_nan(env, true);
return float64_div(b, a, &env->fp_status);
}
float32 HELPER(mkdadj_s)(CPUXtensaState *env, float32 a, float32 b)
{
- set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ xtensa_use_first_nan(env, env->config->use_first_nan);
return float32_div(b, a, &env->fp_status);
}
float64 HELPER(mksadj_d)(CPUXtensaState *env, float64 v)
{
- set_use_first_nan(true, &env->fp_status);
+ xtensa_use_first_nan(env, true);
return float64_sqrt(v, &env->fp_status);
}
float32 HELPER(mksadj_s)(CPUXtensaState *env, float32 v)
{
- set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ xtensa_use_first_nan(env, env->config->use_first_nan);
return float32_sqrt(v, &env->fp_status);
}
diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c
index ca214b9..2d93b45 100644
--- a/target/xtensa/helper.c
+++ b/target/xtensa/helper.c
@@ -28,7 +28,8 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
+#include "exec/target_page.h"
#include "gdbstub/helpers.h"
#include "exec/helper-proto.h"
#include "qemu/error-report.h"
@@ -169,13 +170,12 @@ static void xtensa_finalize_config(XtensaConfig *config)
}
}
-static void xtensa_core_class_init(ObjectClass *oc, void *data)
+static void xtensa_core_class_init(ObjectClass *oc, const void *data)
{
CPUClass *cc = CPU_CLASS(oc);
XtensaCPUClass *xcc = XTENSA_CPU_CLASS(oc);
- XtensaConfig *config = data;
+ const XtensaConfig *config = data;
- xtensa_finalize_config(config);
xcc->config = config;
/*
@@ -192,13 +192,15 @@ void xtensa_register_core(XtensaConfigList *node)
TypeInfo type = {
.parent = TYPE_XTENSA_CPU,
.class_init = xtensa_core_class_init,
- .class_data = (void *)node->config,
+ .class_data = node->config,
};
+ xtensa_finalize_config(node->config);
+
node->next = xtensa_cores;
xtensa_cores = node;
type.name = g_strdup_printf(XTENSA_CPU_TYPE_NAME("%s"), node->config->name);
- type_register(&type);
+ type_register_static(&type);
g_free((gpointer)type.name);
}
diff --git a/target/xtensa/mmu_helper.c b/target/xtensa/mmu_helper.c
index 997b21d..71330fc 100644
--- a/target/xtensa/mmu_helper.c
+++ b/target/xtensa/mmu_helper.c
@@ -32,8 +32,12 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
+#include "accel/tcg/cpu-mmu-index.h"
+#include "accel/tcg/probe.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
+#include "system/memory.h"
#define XTENSA_MPU_SEGMENT_MASK 0x0000001f
#define XTENSA_MPU_ACC_RIGHTS_MASK 0x00000f00
@@ -991,7 +995,7 @@ uint32_t HELPER(rptlb1)(CPUXtensaState *env, uint32_t s)
uint32_t HELPER(pptlb)(CPUXtensaState *env, uint32_t v)
{
unsigned nhits;
- unsigned segment = XTENSA_MPU_PROBE_B;
+ unsigned segment;
unsigned bg_segment;
nhits = xtensa_mpu_lookup(env->mpu_fg, env->config->n_mpu_fg_segments,
@@ -1005,7 +1009,7 @@ uint32_t HELPER(pptlb)(CPUXtensaState *env, uint32_t v)
xtensa_mpu_lookup(env->config->mpu_bg,
env->config->n_mpu_bg_segments,
v, &bg_segment);
- return env->config->mpu_bg[bg_segment].attr | segment;
+ return env->config->mpu_bg[bg_segment].attr | XTENSA_MPU_PROBE_B;
}
}
diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c
index 028d4e0..fc47eba 100644
--- a/target/xtensa/op_helper.c
+++ b/target/xtensa/op_helper.c
@@ -30,7 +30,7 @@
#include "exec/helper-proto.h"
#include "exec/page-protection.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
+#include "system/memory.h"
#include "qemu/atomic.h"
#include "qemu/timer.h"
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index 75b7bfd..34ae2f4 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -31,17 +31,18 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
#include "qemu/log.h"
#include "qemu/qemu-print.h"
-#include "semihosting/semihost.h"
#include "exec/translator.h"
-
+#include "exec/translation-block.h"
+#include "exec/target_page.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
-
#include "exec/log.h"
+#ifndef CONFIG_USER_ONLY
+#include "semihosting/semihost.h"
+#endif
#define HELPER_H "helper.h"
#include "exec/helper-info.c.inc"
@@ -521,7 +522,7 @@ static MemOp gen_load_store_alignment(DisasContext *dc, MemOp mop,
mop |= MO_ALIGN;
}
if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
- tcg_gen_andi_i32(addr, addr, ~0 << get_alignment_bits(mop));
+ tcg_gen_andi_i32(addr, addr, ~0 << memop_alignment_bits(mop));
}
return mop;
}
@@ -1227,8 +1228,8 @@ static const TranslatorOps xtensa_translator_ops = {
.tb_stop = xtensa_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext dc = {};
translator_loop(cpu, tb, max_insns, pc, host_pc,
@@ -1393,11 +1394,11 @@ static void translate_bbi(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
TCGv_i32 tmp = tcg_temp_new_i32();
-#if TARGET_BIG_ENDIAN
- tcg_gen_andi_i32(tmp, arg[0].in, 0x80000000u >> arg[1].imm);
-#else
- tcg_gen_andi_i32(tmp, arg[0].in, 0x00000001u << arg[1].imm);
-#endif
+ if (TARGET_BIG_ENDIAN) {
+ tcg_gen_andi_i32(tmp, arg[0].in, 0x80000000u >> arg[1].imm);
+ } else {
+ tcg_gen_andi_i32(tmp, arg[0].in, 0x00000001u << arg[1].imm);
+ }
gen_brcondi(dc, par[0], tmp, 0, arg[2].imm);
}
@@ -2240,17 +2241,15 @@ static uint32_t test_exceptions_simcall(DisasContext *dc,
const OpcodeArg arg[],
const uint32_t par[])
{
- bool is_semi = semihosting_enabled(dc->cring != 0);
-#ifdef CONFIG_USER_ONLY
- bool ill = true;
-#else
- /* Between RE.2 and RE.3 simcall opcode's become nop for the hardware. */
- bool ill = dc->config->hw_version <= 250002 && !is_semi;
-#endif
- if (ill || !is_semi) {
- qemu_log_mask(LOG_GUEST_ERROR, "SIMCALL but semihosting is disabled\n");
+#ifndef CONFIG_USER_ONLY
+ if (semihosting_enabled(dc->cring != 0)) {
+ return 0;
}
- return ill ? XTENSA_OP_ILL : 0;
+#endif
+ qemu_log_mask(LOG_GUEST_ERROR, "SIMCALL but semihosting is disabled\n");
+
+ /* Between RE.2 and RE.3 simcall opcode's become nop for the hardware. */
+ return dc->config->hw_version <= 250002 ? XTENSA_OP_ILL : 0;
}
static void translate_simcall(DisasContext *dc, const OpcodeArg arg[],
diff --git a/target/xtensa/win_helper.c b/target/xtensa/win_helper.c
index ec9ff44..4b25f8f 100644
--- a/target/xtensa/win_helper.c
+++ b/target/xtensa/win_helper.c
@@ -30,7 +30,6 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
static void copy_window_from_phys(CPUXtensaState *env,
uint32_t window, uint32_t phys, uint32_t n)
diff --git a/target/xtensa/xtensa-semi.c b/target/xtensa/xtensa-semi.c
index fa21b7e..636f421 100644
--- a/target/xtensa/xtensa-semi.c
+++ b/target/xtensa/xtensa-semi.c
@@ -29,7 +29,9 @@
#include "cpu.h"
#include "chardev/char-fe.h"
#include "exec/helper-proto.h"
+#include "exec/target_page.h"
#include "semihosting/semihost.h"
+#include "semihosting/uaccess.h"
#include "qapi/error.h"
#include "qemu/log.h"
@@ -323,15 +325,12 @@ void HELPER(simcall)(CPUXtensaState *env)
uint32_t fd = regs[3];
uint32_t rq = regs[4];
uint32_t target_tv = regs[5];
- uint32_t target_tvv[2];
struct timeval tv = {0};
if (target_tv) {
- cpu_memory_rw_debug(cs, target_tv,
- (uint8_t *)target_tvv, sizeof(target_tvv), 0);
- tv.tv_sec = (int32_t)tswap32(target_tvv[0]);
- tv.tv_usec = (int32_t)tswap32(target_tvv[1]);
+ get_user_u32(tv.tv_sec, target_tv);
+ get_user_u32(tv.tv_sec, target_tv + 4);
}
if (fd < 3 && sim_console) {
if ((fd == 1 || fd == 2) && rq == SELECT_ONE_WRITE) {
@@ -387,11 +386,8 @@ void HELPER(simcall)(CPUXtensaState *env)
const char *str = semihosting_get_arg(i);
int str_size = strlen(str) + 1;
- argptr = tswap32(regs[3] + str_offset);
-
- cpu_memory_rw_debug(cs,
- regs[3] + i * sizeof(uint32_t),
- (uint8_t *)&argptr, sizeof(argptr), 1);
+ put_user_u32(regs[3] + str_offset,
+ regs[3] + i * sizeof(uint32_t));
cpu_memory_rw_debug(cs,
regs[3] + str_offset,
(uint8_t *)str, str_size, 1);
diff --git a/tcg/aarch64/tcg-target-con-set.h b/tcg/aarch64/tcg-target-con-set.h
index 44fcc12..d0622e6 100644
--- a/tcg/aarch64/tcg-target-con-set.h
+++ b/tcg/aarch64/tcg-target-con-set.h
@@ -11,20 +11,22 @@
*/
C_O0_I1(r)
C_O0_I2(r, rC)
-C_O0_I2(rZ, r)
+C_O0_I2(rz, r)
C_O0_I2(w, r)
-C_O0_I3(rZ, rZ, r)
+C_O0_I3(rz, rz, r)
C_O1_I1(r, r)
C_O1_I1(w, r)
C_O1_I1(w, w)
C_O1_I1(w, wr)
-C_O1_I2(r, 0, rZ)
C_O1_I2(r, r, r)
C_O1_I2(r, r, rA)
C_O1_I2(r, r, rAL)
C_O1_I2(r, r, rC)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rL)
+C_O1_I2(r, rZ, rA)
+C_O1_I2(r, rz, rMZ)
+C_O1_I2(r, rz, rz)
C_O1_I2(r, rZ, rZ)
C_O1_I2(w, 0, w)
C_O1_I2(w, w, w)
@@ -32,6 +34,5 @@ C_O1_I2(w, w, wN)
C_O1_I2(w, w, wO)
C_O1_I2(w, w, wZ)
C_O1_I3(w, w, w, w)
-C_O1_I4(r, r, rC, rZ, rZ)
+C_O1_I4(r, r, rC, rz, rz)
C_O2_I1(r, r, r)
-C_O2_I4(r, r, rZ, rZ, rA, rMZ)
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
new file mode 100644
index 0000000..69e83ef
--- /dev/null
+++ b/tcg/aarch64/tcg-target-has.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Define target-specific opcode support
+ * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH
+ */
+
+#ifndef TCG_TARGET_HAS_H
+#define TCG_TARGET_HAS_H
+
+#include "host/cpuinfo.h"
+
+#define have_lse (cpuinfo & CPUINFO_LSE)
+#define have_lse2 (cpuinfo & CPUINFO_LSE2)
+
+/* optional instructions */
+#define TCG_TARGET_HAS_extr_i64_i32 0
+
+/*
+ * Without FEAT_LSE2, we must use LDXP+STXP to implement atomic 128-bit load,
+ * which requires writable pages. We must defer to the helper for user-only,
+ * but in system mode all ram is writable for the host.
+ */
+#ifdef CONFIG_USER_ONLY
+#define TCG_TARGET_HAS_qemu_ldst_i128 have_lse2
+#else
+#define TCG_TARGET_HAS_qemu_ldst_i128 1
+#endif
+
+#define TCG_TARGET_HAS_tst 1
+
+#define TCG_TARGET_HAS_v64 1
+#define TCG_TARGET_HAS_v128 1
+#define TCG_TARGET_HAS_v256 0
+
+#define TCG_TARGET_HAS_andc_vec 1
+#define TCG_TARGET_HAS_orc_vec 1
+#define TCG_TARGET_HAS_nand_vec 0
+#define TCG_TARGET_HAS_nor_vec 0
+#define TCG_TARGET_HAS_eqv_vec 0
+#define TCG_TARGET_HAS_not_vec 1
+#define TCG_TARGET_HAS_neg_vec 1
+#define TCG_TARGET_HAS_abs_vec 1
+#define TCG_TARGET_HAS_roti_vec 0
+#define TCG_TARGET_HAS_rots_vec 0
+#define TCG_TARGET_HAS_rotv_vec 0
+#define TCG_TARGET_HAS_shi_vec 1
+#define TCG_TARGET_HAS_shs_vec 0
+#define TCG_TARGET_HAS_shv_vec 1
+#define TCG_TARGET_HAS_mul_vec 1
+#define TCG_TARGET_HAS_sat_vec 1
+#define TCG_TARGET_HAS_minmax_vec 1
+#define TCG_TARGET_HAS_bitsel_vec 1
+#define TCG_TARGET_HAS_cmpsel_vec 0
+#define TCG_TARGET_HAS_tst_vec 1
+
+#define TCG_TARGET_extract_valid(type, ofs, len) 1
+#define TCG_TARGET_sextract_valid(type, ofs, len) 1
+#define TCG_TARGET_deposit_valid(type, ofs, len) 1
+
+#endif
diff --git a/tcg/aarch64/tcg-target-mo.h b/tcg/aarch64/tcg-target-mo.h
new file mode 100644
index 0000000..e8e8923
--- /dev/null
+++ b/tcg/aarch64/tcg-target-mo.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Define target-specific memory model
+ * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH
+ */
+
+#ifndef TCG_TARGET_MO_H
+#define TCG_TARGET_MO_H
+
+#define TCG_TARGET_DEFAULT_MO 0
+
+#endif
diff --git a/tcg/aarch64/tcg-target-opc.h.inc b/tcg/aarch64/tcg-target-opc.h.inc
new file mode 100644
index 0000000..5382315
--- /dev/null
+++ b/tcg/aarch64/tcg-target-opc.h.inc
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2019 Linaro
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version.
+ *
+ * See the COPYING file in the top-level directory for details.
+ *
+ * Target-specific opcodes for host vector expansion. These will be
+ * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
+ * consider these to be UNSPEC with names.
+ */
+
+DEF(aa64_sshl_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(aa64_sli_vec, 1, 2, 1, TCG_OPF_VECTOR)
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index ffa8a3e5..3b088b7 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -10,10 +10,21 @@
* See the COPYING file in the top-level directory for details.
*/
-#include "../tcg-ldst.c.inc"
-#include "../tcg-pool.c.inc"
#include "qemu/bitops.h"
+/* Used for function call generation. */
+#define TCG_REG_CALL_STACK TCG_REG_SP
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
+#ifdef CONFIG_DARWIN
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
+#else
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
+#endif
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
+
/* We're going to re-use TCGType in setting of the SF bit, which controls
the size of the operation performed. If we know the values match, it
makes things much cleaner. */
@@ -497,7 +508,9 @@ typedef enum {
/* Add/subtract with carry instructions. */
I3503_ADC = 0x1a000000,
+ I3503_ADCS = 0x3a000000,
I3503_SBC = 0x5a000000,
+ I3503_SBCS = 0x7a000000,
/* Conditional select instructions. */
I3506_CSEL = 0x1a800000,
@@ -1336,70 +1349,37 @@ static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd,
tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a);
}
-static inline void tcg_out_shl(TCGContext *s, TCGType ext,
- TCGReg rd, TCGReg rn, unsigned int m)
-{
- int bits = ext ? 64 : 32;
- int max = bits - 1;
- tcg_out_ubfm(s, ext, rd, rn, (bits - m) & max, (max - m) & max);
-}
-
-static inline void tcg_out_shr(TCGContext *s, TCGType ext,
- TCGReg rd, TCGReg rn, unsigned int m)
-{
- int max = ext ? 63 : 31;
- tcg_out_ubfm(s, ext, rd, rn, m & max, max);
-}
-
-static inline void tcg_out_sar(TCGContext *s, TCGType ext,
- TCGReg rd, TCGReg rn, unsigned int m)
-{
- int max = ext ? 63 : 31;
- tcg_out_sbfm(s, ext, rd, rn, m & max, max);
-}
-
-static inline void tcg_out_rotr(TCGContext *s, TCGType ext,
- TCGReg rd, TCGReg rn, unsigned int m)
-{
- int max = ext ? 63 : 31;
- tcg_out_extr(s, ext, rd, rn, rn, m & max);
-}
-
-static inline void tcg_out_rotl(TCGContext *s, TCGType ext,
- TCGReg rd, TCGReg rn, unsigned int m)
+static void tgen_cmp(TCGContext *s, TCGType ext, TCGCond cond,
+ TCGReg a, TCGReg b)
{
- int max = ext ? 63 : 31;
- tcg_out_extr(s, ext, rd, rn, rn, -m & max);
+ if (is_tst_cond(cond)) {
+ tcg_out_insn(s, 3510, ANDS, ext, TCG_REG_XZR, a, b);
+ } else {
+ tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
+ }
}
-static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd,
- TCGReg rn, unsigned lsb, unsigned width)
+static void tgen_cmpi(TCGContext *s, TCGType ext, TCGCond cond,
+ TCGReg a, tcg_target_long b)
{
- unsigned size = ext ? 64 : 32;
- unsigned a = (size - lsb) & (size - 1);
- unsigned b = width - 1;
- tcg_out_bfm(s, ext, rd, rn, a, b);
+ if (is_tst_cond(cond)) {
+ tcg_out_logicali(s, I3404_ANDSI, ext, TCG_REG_XZR, a, b);
+ } else if (b >= 0) {
+ tcg_debug_assert(is_aimm(b));
+ tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b);
+ } else {
+ tcg_debug_assert(is_aimm(-b));
+ tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b);
+ }
}
static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGCond cond, TCGReg a,
tcg_target_long b, bool const_b)
{
- if (is_tst_cond(cond)) {
- if (!const_b) {
- tcg_out_insn(s, 3510, ANDS, ext, TCG_REG_XZR, a, b);
- } else {
- tcg_out_logicali(s, I3404_ANDSI, ext, TCG_REG_XZR, a, b);
- }
+ if (const_b) {
+ tgen_cmpi(s, ext, cond, a, b);
} else {
- if (!const_b) {
- tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
- } else if (b >= 0) {
- tcg_debug_assert(is_aimm(b));
- tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b);
- } else {
- tcg_debug_assert(is_aimm(-b));
- tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b);
- }
+ tgen_cmp(s, ext, cond, a, b);
}
}
@@ -1427,7 +1407,7 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
tcg_out_call_int(s, target);
}
-static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
{
if (!l->has_value) {
tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, l, 0);
@@ -1437,8 +1417,16 @@ static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
}
}
-static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
- TCGArg b, bool b_const, TCGLabel *l)
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
+ TCGReg a, TCGReg b, TCGLabel *l)
+{
+ tgen_cmp(s, type, c, a, b);
+ tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
+ tcg_out_insn(s, 3202, B_C, c, 0);
+}
+
+static void tgen_brcondi(TCGContext *s, TCGType ext, TCGCond c,
+ TCGReg a, tcg_target_long b, TCGLabel *l)
{
int tbit = -1;
bool need_cmp = true;
@@ -1447,14 +1435,14 @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
case TCG_COND_EQ:
case TCG_COND_NE:
/* cmp xN,0; b.ne L -> cbnz xN,L */
- if (b_const && b == 0) {
+ if (b == 0) {
need_cmp = false;
}
break;
case TCG_COND_LT:
case TCG_COND_GE:
/* cmp xN,0; b.mi L -> tbnz xN,63,L */
- if (b_const && b == 0) {
+ if (b == 0) {
c = (c == TCG_COND_LT ? TCG_COND_TSTNE : TCG_COND_TSTEQ);
tbit = ext ? 63 : 31;
need_cmp = false;
@@ -1463,14 +1451,14 @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
case TCG_COND_TSTEQ:
case TCG_COND_TSTNE:
/* tst xN,0xffffffff; b.ne L -> cbnz wN,L */
- if (b_const && b == UINT32_MAX) {
+ if (b == UINT32_MAX) {
c = tcg_tst_eqne_cond(c);
ext = TCG_TYPE_I32;
need_cmp = false;
break;
}
/* tst xN,1<<B; b.ne L -> tbnz xN,B,L */
- if (b_const && is_power_of_2(b)) {
+ if (is_power_of_2(b)) {
tbit = ctz64(b);
need_cmp = false;
}
@@ -1480,7 +1468,7 @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
}
if (need_cmp) {
- tcg_out_cmp(s, ext, c, a, b, b_const);
+ tgen_cmpi(s, ext, c, a, b);
tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
tcg_out_insn(s, 3202, B_C, c, 0);
return;
@@ -1513,6 +1501,12 @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
}
}
+static const TCGOutOpBrcond outop_brcond = {
+ .base.static_constraint = C_O0_I2(r, rC),
+ .out_rr = tgen_brcond,
+ .out_ri = tgen_brcondi,
+};
+
static inline void tcg_out_rev(TCGContext *s, int ext, MemOp s_bits,
TCGReg rd, TCGReg rn)
{
@@ -1581,67 +1575,7 @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
tcg_out_mov(s, TCG_TYPE_I32, rd, rn);
}
-static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
- TCGReg rn, int64_t aimm)
-{
- if (aimm >= 0) {
- tcg_out_insn(s, 3401, ADDI, ext, rd, rn, aimm);
- } else {
- tcg_out_insn(s, 3401, SUBI, ext, rd, rn, -aimm);
- }
-}
-
-static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
- TCGReg rh, TCGReg al, TCGReg ah,
- tcg_target_long bl, tcg_target_long bh,
- bool const_bl, bool const_bh, bool sub)
-{
- TCGReg orig_rl = rl;
- AArch64Insn insn;
-
- if (rl == ah || (!const_bh && rl == bh)) {
- rl = TCG_REG_TMP0;
- }
-
- if (const_bl) {
- if (bl < 0) {
- bl = -bl;
- insn = sub ? I3401_ADDSI : I3401_SUBSI;
- } else {
- insn = sub ? I3401_SUBSI : I3401_ADDSI;
- }
-
- if (unlikely(al == TCG_REG_XZR)) {
- /* ??? We want to allow al to be zero for the benefit of
- negation via subtraction. However, that leaves open the
- possibility of adding 0+const in the low part, and the
- immediate add instructions encode XSP not XZR. Don't try
- anything more elaborate here than loading another zero. */
- al = TCG_REG_TMP0;
- tcg_out_movi(s, ext, al, 0);
- }
- tcg_out_insn_3401(s, insn, ext, rl, al, bl);
- } else {
- tcg_out_insn_3502(s, sub ? I3502_SUBS : I3502_ADDS, ext, rl, al, bl);
- }
-
- insn = I3503_ADC;
- if (const_bh) {
- /* Note that the only two constants we support are 0 and -1, and
- that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa. */
- if ((bh != 0) ^ sub) {
- insn = I3503_SBC;
- }
- bh = TCG_REG_XZR;
- } else if (sub) {
- insn = I3503_SBC;
- }
- tcg_out_insn_3503(s, insn, ext, rh, ah, bh);
-
- tcg_out_mov(s, ext, orig_rl, rl);
-}
-
-static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
static const uint32_t sync[] = {
[0 ... TCG_MO_ALL] = DMB_ISH | DMB_LD | DMB_ST,
@@ -1653,37 +1587,6 @@ static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
tcg_out32(s, sync[a0 & TCG_MO_ALL]);
}
-static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
- TCGReg a0, TCGArg b, bool const_b, bool is_ctz)
-{
- TCGReg a1 = a0;
- if (is_ctz) {
- a1 = TCG_REG_TMP0;
- tcg_out_insn(s, 3507, RBIT, ext, a1, a0);
- }
- if (const_b && b == (ext ? 64 : 32)) {
- tcg_out_insn(s, 3507, CLZ, ext, d, a1);
- } else {
- AArch64Insn sel = I3506_CSEL;
-
- tcg_out_cmp(s, ext, TCG_COND_NE, a0, 0, 1);
- tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP0, a1);
-
- if (const_b) {
- if (b == -1) {
- b = TCG_REG_XZR;
- sel = I3506_CSINV;
- } else if (b == 0) {
- b = TCG_REG_XZR;
- } else {
- tcg_out_movi(s, ext, d, b);
- b = d;
- }
- }
- tcg_out_insn_3506(s, sel, ext, d, TCG_REG_TMP0, b, TCG_COND_NE);
- }
-}
-
typedef struct {
TCGReg base;
TCGReg index;
@@ -1758,16 +1661,12 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
unsigned s_mask = (1u << s_bits) - 1;
unsigned mem_index = get_mmuidx(oi);
TCGReg addr_adj;
- TCGType mask_type;
uint64_t compare_mask;
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
-
- mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32
- ? TCG_TYPE_I64 : TCG_TYPE_I32);
+ ldst->addr_reg = addr_reg;
/* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
@@ -1776,9 +1675,9 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tlb_mask_table_ofs(s, mem_index), 1, 0);
/* Extract the TLB index from the address into X0. */
- tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64,
+ tcg_out_insn(s, 3502S, AND_LSR, TCG_TYPE_I64,
TCG_REG_TMP0, TCG_REG_TMP0, addr_reg,
- s->page_bits - CPU_TLB_ENTRY_BITS);
+ TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
/* Add the tlb_table pointer, forming the CPUTLBEntry address. */
tcg_out_insn(s, 3502, ADD, 1, TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP0);
@@ -1804,7 +1703,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tcg_out_insn(s, 3401, ADDI, addr_type,
addr_adj, addr_reg, s_mask - a_mask);
}
- compare_mask = (uint64_t)s->page_mask | a_mask;
+ compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
/* Store the page mask part of the address into TMP2. */
tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_TMP2,
@@ -1826,7 +1725,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
+ ldst->addr_reg = addr_reg;
/* tst addr, #mask */
tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask);
@@ -1903,8 +1802,8 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
}
}
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
- MemOpIdx oi, TCGType data_type)
+static void tgen_qemu_ld(TCGContext *s, TCGType data_type, TCGReg data_reg,
+ TCGReg addr_reg, MemOpIdx oi)
{
TCGLabelQemuLdst *ldst;
HostAddress h;
@@ -1919,8 +1818,13 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
}
}
-static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
- MemOpIdx oi, TCGType data_type)
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_qemu_ld,
+};
+
+static void tgen_qemu_st(TCGContext *s, TCGType data_type, TCGReg data_reg,
+ TCGReg addr_reg, MemOpIdx oi)
{
TCGLabelQemuLdst *ldst;
HostAddress h;
@@ -1935,6 +1839,11 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
}
}
+static const TCGOutOpQemuLdSt outop_qemu_st = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out = tgen_qemu_st,
+};
+
static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg addr_reg, MemOpIdx oi, bool is_ld)
{
@@ -2037,6 +1946,28 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
}
}
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
+{
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, true);
+}
+
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
+ .base.static_constraint = C_O2_I1(r, r, r),
+ .out = tgen_qemu_ld2,
+};
+
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
+{
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, false);
+}
+
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
+ .base.static_constraint = C_O0_I3(rz, rz, r),
+ .out = tgen_qemu_st2,
+};
+
static const tcg_insn_unit *tb_ret_addr;
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
@@ -2083,6 +2014,11 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
tcg_out_bti(s, BTI_J);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_insn(s, 3207, BR, a0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
@@ -2104,413 +2040,859 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
flush_idcache_range(jmp_rx, jmp_rw, 4);
}
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg args[TCG_MAX_OP_ARGS],
- const int const_args[TCG_MAX_OP_ARGS])
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
{
- /* 99% of the time, we can signal the use of extension registers
- by looking to see if the opcode handles 64-bit data. */
- TCGType ext = (tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0;
+ tcg_out_insn(s, 3502, ADD, type, a0, a1, a2);
+}
- /* Hoist the loads of the most common arguments. */
- TCGArg a0 = args[0];
- TCGArg a1 = args[1];
- TCGArg a2 = args[2];
- int c2 = const_args[2];
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (a2 >= 0) {
+ tcg_out_insn(s, 3401, ADDI, type, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3401, SUBI, type, a0, a1, -a2);
+ }
+}
- /* Some operands are defined with "rZ" constraint, a register or
- the zero register. These need not actually test args[I] == 0. */
-#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I])
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rA),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
- switch (opc) {
- case INDEX_op_goto_ptr:
- tcg_out_insn(s, 3207, BR, a0);
- break;
+static void tgen_addco(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3502, ADDS, type, a0, a1, a2);
+}
- case INDEX_op_br:
- tcg_out_goto_label(s, arg_label(a0));
- break;
+static void tgen_addco_imm(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (a2 >= 0) {
+ tcg_out_insn(s, 3401, ADDSI, type, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3401, SUBSI, type, a0, a1, -a2);
+ }
+}
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8u_i64:
- tcg_out_ldst(s, I3312_LDRB, a0, a1, a2, 0);
- break;
- case INDEX_op_ld8s_i32:
- tcg_out_ldst(s, I3312_LDRSBW, a0, a1, a2, 0);
- break;
- case INDEX_op_ld8s_i64:
- tcg_out_ldst(s, I3312_LDRSBX, a0, a1, a2, 0);
- break;
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16u_i64:
- tcg_out_ldst(s, I3312_LDRH, a0, a1, a2, 1);
- break;
- case INDEX_op_ld16s_i32:
- tcg_out_ldst(s, I3312_LDRSHW, a0, a1, a2, 1);
- break;
- case INDEX_op_ld16s_i64:
- tcg_out_ldst(s, I3312_LDRSHX, a0, a1, a2, 1);
- break;
- case INDEX_op_ld_i32:
- case INDEX_op_ld32u_i64:
- tcg_out_ldst(s, I3312_LDRW, a0, a1, a2, 2);
- break;
- case INDEX_op_ld32s_i64:
- tcg_out_ldst(s, I3312_LDRSWX, a0, a1, a2, 2);
- break;
- case INDEX_op_ld_i64:
- tcg_out_ldst(s, I3312_LDRX, a0, a1, a2, 3);
- break;
+static const TCGOutOpBinary outop_addco = {
+ .base.static_constraint = C_O1_I2(r, r, rA),
+ .out_rrr = tgen_addco,
+ .out_rri = tgen_addco_imm,
+};
- case INDEX_op_st8_i32:
- case INDEX_op_st8_i64:
- tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2, 0);
- break;
- case INDEX_op_st16_i32:
- case INDEX_op_st16_i64:
- tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2, 1);
+static void tgen_addci_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3503, ADC, type, a0, a1, a2);
+}
+
+static void tgen_addci_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ /*
+ * Note that the only two constants we support are 0 and -1, and
+ * that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa.
+ */
+ if (a2) {
+ tcg_out_insn(s, 3503, SBC, type, a0, a1, TCG_REG_XZR);
+ } else {
+ tcg_out_insn(s, 3503, ADC, type, a0, a1, TCG_REG_XZR);
+ }
+}
+
+static const TCGOutOpAddSubCarry outop_addci = {
+ .base.static_constraint = C_O1_I2(r, rz, rMZ),
+ .out_rrr = tgen_addci_rrr,
+ .out_rri = tgen_addci_rri,
+};
+
+static void tgen_addcio(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3503, ADCS, type, a0, a1, a2);
+}
+
+static void tgen_addcio_imm(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ /* Use SBCS w/0 for ADCS w/-1 -- see above. */
+ if (a2) {
+ tcg_out_insn(s, 3503, SBCS, type, a0, a1, TCG_REG_XZR);
+ } else {
+ tcg_out_insn(s, 3503, ADCS, type, a0, a1, TCG_REG_XZR);
+ }
+}
+
+static const TCGOutOpBinary outop_addcio = {
+ .base.static_constraint = C_O1_I2(r, rz, rMZ),
+ .out_rrr = tgen_addcio,
+ .out_rri = tgen_addcio_imm,
+};
+
+static void tcg_out_set_carry(TCGContext *s)
+{
+ tcg_out_insn(s, 3502, SUBS, TCG_TYPE_I32,
+ TCG_REG_XZR, TCG_REG_XZR, TCG_REG_XZR);
+}
+
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3510, AND, type, a0, a1, a2);
+}
+
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_logicali(s, I3404_ANDI, type, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rL),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
+
+static void tgen_andc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3510, BIC, type, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_andc = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_andc,
+};
+
+static void tgen_clz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_cmp(s, type, TCG_COND_NE, a1, 0, true);
+ tcg_out_insn(s, 3507, CLZ, type, TCG_REG_TMP0, a1);
+ tcg_out_insn(s, 3506, CSEL, type, a0, TCG_REG_TMP0, a2, TCG_COND_NE);
+}
+
+static void tgen_clzi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
+ tcg_out_insn(s, 3507, CLZ, type, a0, a1);
+ return;
+ }
+
+ tcg_out_cmp(s, type, TCG_COND_NE, a1, 0, true);
+ tcg_out_insn(s, 3507, CLZ, type, a0, a1);
+
+ switch (a2) {
+ case -1:
+ tcg_out_insn(s, 3506, CSINV, type, a0, a0, TCG_REG_XZR, TCG_COND_NE);
break;
- case INDEX_op_st_i32:
- case INDEX_op_st32_i64:
- tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2, 2);
+ case 0:
+ tcg_out_insn(s, 3506, CSEL, type, a0, a0, TCG_REG_XZR, TCG_COND_NE);
break;
- case INDEX_op_st_i64:
- tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2, 3);
+ default:
+ tcg_out_movi(s, type, TCG_REG_TMP0, a2);
+ tcg_out_insn(s, 3506, CSEL, type, a0, a0, TCG_REG_TMP0, TCG_COND_NE);
break;
+ }
+}
- case INDEX_op_add_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
- case INDEX_op_add_i64:
- if (c2) {
- tcg_out_addsubi(s, ext, a0, a1, a2);
- } else {
- tcg_out_insn(s, 3502, ADD, ext, a0, a1, a2);
- }
- break;
+static const TCGOutOpBinary outop_clz = {
+ .base.static_constraint = C_O1_I2(r, r, rAL),
+ .out_rrr = tgen_clz,
+ .out_rri = tgen_clzi,
+};
- case INDEX_op_sub_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
- case INDEX_op_sub_i64:
- if (c2) {
- tcg_out_addsubi(s, ext, a0, a1, -a2);
- } else {
- tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2);
- }
- break;
+static const TCGOutOpUnary outop_ctpop = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_neg_i64:
- case INDEX_op_neg_i32:
- tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
- break;
+static void tgen_ctz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3507, RBIT, type, TCG_REG_TMP0, a1);
+ tgen_clz(s, type, a0, TCG_REG_TMP0, a2);
+}
- case INDEX_op_and_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
- case INDEX_op_and_i64:
- if (c2) {
- tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, a2);
- } else {
- tcg_out_insn(s, 3510, AND, ext, a0, a1, a2);
- }
- break;
+static void tgen_ctzi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_insn(s, 3507, RBIT, type, TCG_REG_TMP0, a1);
+ tgen_clzi(s, type, a0, TCG_REG_TMP0, a2);
+}
- case INDEX_op_andc_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
- case INDEX_op_andc_i64:
- if (c2) {
- tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, ~a2);
- } else {
- tcg_out_insn(s, 3510, BIC, ext, a0, a1, a2);
- }
- break;
+static const TCGOutOpBinary outop_ctz = {
+ .base.static_constraint = C_O1_I2(r, r, rAL),
+ .out_rrr = tgen_ctz,
+ .out_rri = tgen_ctzi,
+};
- case INDEX_op_or_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
- case INDEX_op_or_i64:
- if (c2) {
- tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, a2);
- } else {
- tcg_out_insn(s, 3510, ORR, ext, a0, a1, a2);
- }
- break;
+static void tgen_divs(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3508, SDIV, type, a0, a1, a2);
+}
- case INDEX_op_orc_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
- case INDEX_op_orc_i64:
- if (c2) {
- tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, ~a2);
- } else {
- tcg_out_insn(s, 3510, ORN, ext, a0, a1, a2);
- }
- break;
+static const TCGOutOpBinary outop_divs = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_divs,
+};
- case INDEX_op_xor_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
- case INDEX_op_xor_i64:
- if (c2) {
- tcg_out_logicali(s, I3404_EORI, ext, a0, a1, a2);
- } else {
- tcg_out_insn(s, 3510, EOR, ext, a0, a1, a2);
- }
- break;
+static const TCGOutOpDivRem outop_divs2 = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_eqv_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
- case INDEX_op_eqv_i64:
- if (c2) {
- tcg_out_logicali(s, I3404_EORI, ext, a0, a1, ~a2);
- } else {
- tcg_out_insn(s, 3510, EON, ext, a0, a1, a2);
- }
- break;
+static void tgen_divu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3508, UDIV, type, a0, a1, a2);
+}
- case INDEX_op_not_i64:
- case INDEX_op_not_i32:
- tcg_out_insn(s, 3510, ORN, ext, a0, TCG_REG_XZR, a1);
- break;
+static const TCGOutOpBinary outop_divu = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_divu,
+};
- case INDEX_op_mul_i64:
- case INDEX_op_mul_i32:
- tcg_out_insn(s, 3509, MADD, ext, a0, a1, a2, TCG_REG_XZR);
- break;
+static const TCGOutOpDivRem outop_divu2 = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_div_i64:
- case INDEX_op_div_i32:
- tcg_out_insn(s, 3508, SDIV, ext, a0, a1, a2);
- break;
- case INDEX_op_divu_i64:
- case INDEX_op_divu_i32:
- tcg_out_insn(s, 3508, UDIV, ext, a0, a1, a2);
- break;
+static void tgen_eqv(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3510, EON, type, a0, a1, a2);
+}
- case INDEX_op_rem_i64:
- case INDEX_op_rem_i32:
- tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP0, a1, a2);
- tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP0, a2, a1);
- break;
- case INDEX_op_remu_i64:
- case INDEX_op_remu_i32:
- tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP0, a1, a2);
- tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP0, a2, a1);
- break;
+static const TCGOutOpBinary outop_eqv = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_eqv,
+};
- case INDEX_op_shl_i64:
- case INDEX_op_shl_i32:
- if (c2) {
- tcg_out_shl(s, ext, a0, a1, a2);
- } else {
- tcg_out_insn(s, 3508, LSLV, ext, a0, a1, a2);
- }
- break;
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
+{
+ tcg_out_ubfm(s, TCG_TYPE_I64, a0, a1, 32, 63);
+}
- case INDEX_op_shr_i64:
- case INDEX_op_shr_i32:
- if (c2) {
- tcg_out_shr(s, ext, a0, a1, a2);
- } else {
- tcg_out_insn(s, 3508, LSRV, ext, a0, a1, a2);
- }
- break;
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extrh_i64_i32,
+};
- case INDEX_op_sar_i64:
- case INDEX_op_sar_i32:
- if (c2) {
- tcg_out_sar(s, ext, a0, a1, a2);
- } else {
- tcg_out_insn(s, 3508, ASRV, ext, a0, a1, a2);
- }
- break;
+static void tgen_mul(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3509, MADD, type, a0, a1, a2, TCG_REG_XZR);
+}
- case INDEX_op_rotr_i64:
- case INDEX_op_rotr_i32:
- if (c2) {
- tcg_out_rotr(s, ext, a0, a1, a2);
- } else {
- tcg_out_insn(s, 3508, RORV, ext, a0, a1, a2);
- }
- break;
+static const TCGOutOpBinary outop_mul = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_mul,
+};
- case INDEX_op_rotl_i64:
- case INDEX_op_rotl_i32:
- if (c2) {
- tcg_out_rotl(s, ext, a0, a1, a2);
- } else {
- tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP0, TCG_REG_XZR, a2);
- tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP0);
- }
- break;
+static const TCGOutOpMul2 outop_muls2 = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_clz_i64:
- case INDEX_op_clz_i32:
- tcg_out_cltz(s, ext, a0, a1, a2, c2, false);
- break;
- case INDEX_op_ctz_i64:
- case INDEX_op_ctz_i32:
- tcg_out_cltz(s, ext, a0, a1, a2, c2, true);
- break;
+static TCGConstraintSetIndex cset_mulh(TCGType type, unsigned flags)
+{
+ return type == TCG_TYPE_I64 ? C_O1_I2(r, r, r) : C_NotImplemented;
+}
- case INDEX_op_brcond_i32:
- a1 = (int32_t)a1;
- /* FALLTHRU */
- case INDEX_op_brcond_i64:
- tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3]));
- break;
+static void tgen_mulsh(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2);
+}
- case INDEX_op_setcond_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
- case INDEX_op_setcond_i64:
- tcg_out_cmp(s, ext, args[3], a1, a2, c2);
- /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */
- tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR,
- TCG_REG_XZR, tcg_invert_cond(args[3]));
- break;
+static const TCGOutOpBinary outop_mulsh = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mulh,
+ .out_rrr = tgen_mulsh,
+};
- case INDEX_op_negsetcond_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
- case INDEX_op_negsetcond_i64:
- tcg_out_cmp(s, ext, args[3], a1, a2, c2);
- /* Use CSETM alias of CSINV Wd, WZR, WZR, invert(cond). */
- tcg_out_insn(s, 3506, CSINV, ext, a0, TCG_REG_XZR,
- TCG_REG_XZR, tcg_invert_cond(args[3]));
- break;
+static const TCGOutOpMul2 outop_mulu2 = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_movcond_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
- case INDEX_op_movcond_i64:
- tcg_out_cmp(s, ext, args[5], a1, a2, c2);
- tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]);
- break;
+static void tgen_muluh(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2);
+}
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_ld_a64_i32:
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_ld_a64_i64:
- tcg_out_qemu_ld(s, a0, a1, a2, ext);
- break;
- case INDEX_op_qemu_st_a32_i32:
- case INDEX_op_qemu_st_a64_i32:
- case INDEX_op_qemu_st_a32_i64:
- case INDEX_op_qemu_st_a64_i64:
- tcg_out_qemu_st(s, REG0(0), a1, a2, ext);
- break;
- case INDEX_op_qemu_ld_a32_i128:
- case INDEX_op_qemu_ld_a64_i128:
- tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], true);
- break;
- case INDEX_op_qemu_st_a32_i128:
- case INDEX_op_qemu_st_a64_i128:
- tcg_out_qemu_ldst_i128(s, REG0(0), REG0(1), a2, args[3], false);
- break;
+static const TCGOutOpBinary outop_muluh = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mulh,
+ .out_rrr = tgen_muluh,
+};
- case INDEX_op_bswap64_i64:
- tcg_out_rev(s, TCG_TYPE_I64, MO_64, a0, a1);
- break;
- case INDEX_op_bswap32_i64:
- tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
- if (a2 & TCG_BSWAP_OS) {
- tcg_out_ext32s(s, a0, a0);
- }
- break;
- case INDEX_op_bswap32_i32:
- tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
- break;
- case INDEX_op_bswap16_i64:
- case INDEX_op_bswap16_i32:
- tcg_out_rev(s, TCG_TYPE_I32, MO_16, a0, a1);
- if (a2 & TCG_BSWAP_OS) {
- /* Output must be sign-extended. */
- tcg_out_ext16s(s, ext, a0, a0);
- } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
- /* Output must be zero-extended, but input isn't. */
- tcg_out_ext16u(s, a0, a0);
- }
- break;
+static const TCGOutOpBinary outop_nand = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_deposit_i64:
- case INDEX_op_deposit_i32:
- tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]);
- break;
+static const TCGOutOpBinary outop_nor = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_extract_i64:
- case INDEX_op_extract_i32:
- tcg_out_ubfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
- break;
+static void tgen_or(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3510, ORR, type, a0, a1, a2);
+}
- case INDEX_op_sextract_i64:
- case INDEX_op_sextract_i32:
- tcg_out_sbfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
- break;
+static void tgen_ori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_logicali(s, I3404_ORRI, type, a0, a1, a2);
+}
- case INDEX_op_extract2_i64:
- case INDEX_op_extract2_i32:
- tcg_out_extr(s, ext, a0, REG0(2), REG0(1), args[3]);
- break;
+static const TCGOutOpBinary outop_or = {
+ .base.static_constraint = C_O1_I2(r, r, rL),
+ .out_rrr = tgen_or,
+ .out_rri = tgen_ori,
+};
- case INDEX_op_add2_i32:
- tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
- (int32_t)args[4], args[5], const_args[4],
- const_args[5], false);
- break;
- case INDEX_op_add2_i64:
- tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4],
- args[5], const_args[4], const_args[5], false);
- break;
- case INDEX_op_sub2_i32:
- tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
- (int32_t)args[4], args[5], const_args[4],
- const_args[5], true);
- break;
- case INDEX_op_sub2_i64:
- tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4],
- args[5], const_args[4], const_args[5], true);
- break;
+static void tgen_orc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3510, ORN, type, a0, a1, a2);
+}
- case INDEX_op_muluh_i64:
- tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2);
- break;
- case INDEX_op_mulsh_i64:
- tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2);
- break;
+static const TCGOutOpBinary outop_orc = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_orc,
+};
- case INDEX_op_mb:
- tcg_out_mb(s, a0);
- break;
+static void tgen_rems(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3508, SDIV, type, TCG_REG_TMP0, a1, a2);
+ tcg_out_insn(s, 3509, MSUB, type, a0, TCG_REG_TMP0, a2, a1);
+}
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
- case INDEX_op_mov_i64:
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- default:
- g_assert_not_reached();
+static const TCGOutOpBinary outop_rems = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_rems,
+};
+
+static void tgen_remu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3508, UDIV, type, TCG_REG_TMP0, a1, a2);
+ tcg_out_insn(s, 3509, MSUB, type, a0, TCG_REG_TMP0, a2, a1);
+}
+
+static const TCGOutOpBinary outop_remu = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_remu,
+};
+
+static const TCGOutOpBinary outop_rotl = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_rotr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3508, RORV, type, a0, a1, a2);
+}
+
+static void tgen_rotri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int max = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_extr(s, type, a0, a1, a1, a2 & max);
+}
+
+static const TCGOutOpBinary outop_rotr = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_rotr,
+ .out_rri = tgen_rotri,
+};
+
+static void tgen_sar(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3508, ASRV, type, a0, a1, a2);
+}
+
+static void tgen_sari(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int max = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_sbfm(s, type, a0, a1, a2 & max, max);
+}
+
+static const TCGOutOpBinary outop_sar = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_sar,
+ .out_rri = tgen_sari,
+};
+
+static void tgen_shl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3508, LSLV, type, a0, a1, a2);
+}
+
+static void tgen_shli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int max = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_ubfm(s, type, a0, a1, -a2 & max, ~a2 & max);
+}
+
+static const TCGOutOpBinary outop_shl = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_shl,
+ .out_rri = tgen_shli,
+};
+
+static void tgen_shr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3508, LSRV, type, a0, a1, a2);
+}
+
+static void tgen_shri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int max = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_ubfm(s, type, a0, a1, a2 & max, max);
+}
+
+static const TCGOutOpBinary outop_shr = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_shr,
+ .out_rri = tgen_shri,
+};
+
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3502, SUB, type, a0, a1, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
+static void tgen_subbo_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3502, SUBS, type, a0, a1, a2);
+}
+
+static void tgen_subbo_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (a2 >= 0) {
+ tcg_out_insn(s, 3401, SUBSI, type, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3401, ADDSI, type, a0, a1, -a2);
+ }
+}
+
+static void tgen_subbo_rir(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
+{
+ tgen_subbo_rrr(s, type, a0, TCG_REG_XZR, a2);
+}
+
+static void tgen_subbo_rii(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, tcg_target_long a2)
+{
+ if (a2 == 0) {
+ tgen_subbo_rrr(s, type, a0, TCG_REG_XZR, TCG_REG_XZR);
+ return;
}
-#undef REG0
+ /*
+ * We want to allow a1 to be zero for the benefit of negation via
+ * subtraction. However, that leaves open the possibility of
+ * adding 0 +/- const, and the immediate add/sub instructions
+ * encode XSP not XZR. Since we have 0 - non-zero, borrow is
+ * always set.
+ */
+ tcg_out_movi(s, type, a0, -a2);
+ tcg_out_set_borrow(s);
}
+static const TCGOutOpAddSubCarry outop_subbo = {
+ .base.static_constraint = C_O1_I2(r, rZ, rA),
+ .out_rrr = tgen_subbo_rrr,
+ .out_rri = tgen_subbo_rri,
+ .out_rir = tgen_subbo_rir,
+ .out_rii = tgen_subbo_rii,
+};
+
+static void tgen_subbi_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3503, SBC, type, a0, a1, a2);
+}
+
+static void tgen_subbi_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_addci_rri(s, type, a0, a1, ~a2);
+}
+
+static const TCGOutOpAddSubCarry outop_subbi = {
+ .base.static_constraint = C_O1_I2(r, rz, rMZ),
+ .out_rrr = tgen_subbi_rrr,
+ .out_rri = tgen_subbi_rri,
+};
+
+static void tgen_subbio_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3503, SBCS, type, a0, a1, a2);
+}
+
+static void tgen_subbio_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_addcio_imm(s, type, a0, a1, ~a2);
+}
+
+static const TCGOutOpAddSubCarry outop_subbio = {
+ .base.static_constraint = C_O1_I2(r, rz, rMZ),
+ .out_rrr = tgen_subbio_rrr,
+ .out_rri = tgen_subbio_rri,
+};
+
+static void tcg_out_set_borrow(TCGContext *s)
+{
+ tcg_out_insn(s, 3502, ADDS, TCG_TYPE_I32,
+ TCG_REG_XZR, TCG_REG_XZR, TCG_REG_XZR);
+}
+
+static void tgen_xor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3510, EOR, type, a0, a1, a2);
+}
+
+static void tgen_xori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_logicali(s, I3404_EORI, type, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_xor = {
+ .base.static_constraint = C_O1_I2(r, r, rL),
+ .out_rrr = tgen_xor,
+ .out_rri = tgen_xori,
+};
+
+static void tgen_bswap16(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, unsigned flags)
+{
+ tcg_out_rev(s, TCG_TYPE_I32, MO_16, a0, a1);
+ if (flags & TCG_BSWAP_OS) {
+ /* Output must be sign-extended. */
+ tcg_out_ext16s(s, type, a0, a0);
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
+ /* Output must be zero-extended, but input isn't. */
+ tcg_out_ext16u(s, a0, a0);
+ }
+}
+
+static const TCGOutOpBswap outop_bswap16 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap16,
+};
+
+static void tgen_bswap32(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, unsigned flags)
+{
+ tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
+ if (flags & TCG_BSWAP_OS) {
+ tcg_out_ext32s(s, a0, a0);
+ }
+}
+
+static const TCGOutOpBswap outop_bswap32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap32,
+};
+
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tcg_out_rev(s, TCG_TYPE_I64, MO_64, a0, a1);
+}
+
+static const TCGOutOpUnary outop_bswap64 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap64,
+};
+
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_sub(s, type, a0, TCG_REG_XZR, a1);
+}
+
+static const TCGOutOpUnary outop_neg = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_neg,
+};
+
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_orc(s, type, a0, TCG_REG_XZR, a1);
+}
+
+static const TCGOutOpUnary outop_not = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_not,
+};
+
+static void tgen_cset(TCGContext *s, TCGCond cond, TCGReg ret)
+{
+ /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */
+ tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, ret, TCG_REG_XZR,
+ TCG_REG_XZR, tcg_invert_cond(cond));
+}
+
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tgen_cmp(s, type, cond, a1, a2);
+ tgen_cset(s, cond, a0);
+}
+
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_cmpi(s, type, cond, a1, a2);
+ tgen_cset(s, cond, a0);
+}
+
+static const TCGOutOpSetcond outop_setcond = {
+ .base.static_constraint = C_O1_I2(r, r, rC),
+ .out_rrr = tgen_setcond,
+ .out_rri = tgen_setcondi,
+};
+
+static void tgen_csetm(TCGContext *s, TCGType ext, TCGCond cond, TCGReg ret)
+{
+ /* Use CSETM alias of CSINV Wd, WZR, WZR, invert(cond). */
+ tcg_out_insn(s, 3506, CSINV, ext, ret, TCG_REG_XZR,
+ TCG_REG_XZR, tcg_invert_cond(cond));
+}
+
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tgen_cmp(s, type, cond, a1, a2);
+ tgen_csetm(s, type, cond, a0);
+}
+
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_cmpi(s, type, cond, a1, a2);
+ tgen_csetm(s, type, cond, a0);
+}
+
+static const TCGOutOpSetcond outop_negsetcond = {
+ .base.static_constraint = C_O1_I2(r, r, rC),
+ .out_rrr = tgen_negsetcond,
+ .out_rri = tgen_negsetcondi,
+};
+
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg vt, bool const_vt, TCGArg vf, bool const_vf)
+{
+ tcg_out_cmp(s, type, cond, c1, c2, const_c2);
+ tcg_out_insn(s, 3506, CSEL, type, ret, vt, vf, cond);
+}
+
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rC, rz, rz),
+ .out = tgen_movcond,
+};
+
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ TCGReg a2, unsigned ofs, unsigned len)
+{
+ unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
+
+ /*
+ * Since we can't support "0Z" as a constraint, we allow a1 in
+ * any register. Fix things up as if a matching constraint.
+ */
+ if (a0 != a1) {
+ if (a0 == a2) {
+ tcg_out_mov(s, type, TCG_REG_TMP0, a2);
+ a2 = TCG_REG_TMP0;
+ }
+ tcg_out_mov(s, type, a0, a1);
+ }
+ tcg_out_bfm(s, type, a0, a2, -ofs & mask, len - 1);
+}
+
+static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ tcg_target_long a2, unsigned ofs, unsigned len)
+{
+ tgen_andi(s, type, a0, a1, ~MAKE_64BIT_MASK(ofs, len));
+}
+
+static void tgen_depositz(TCGContext *s, TCGType type, TCGReg a0, TCGReg a2,
+ unsigned ofs, unsigned len)
+{
+ int max = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_ubfm(s, type, a0, a2, -ofs & max, len - 1);
+}
+
+static const TCGOutOpDeposit outop_deposit = {
+ .base.static_constraint = C_O1_I2(r, rZ, rZ),
+ .out_rrr = tgen_deposit,
+ .out_rri = tgen_depositi,
+ .out_rzr = tgen_depositz,
+};
+
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ if (ofs == 0) {
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
+ tcg_out_logicali(s, I3404_ANDI, type, a0, a1, mask);
+ } else {
+ tcg_out_ubfm(s, type, a0, a1, ofs, ofs + len - 1);
+ }
+}
+
+static const TCGOutOpExtract outop_extract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extract,
+};
+
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ tcg_out_sbfm(s, type, a0, a1, ofs, ofs + len - 1);
+}
+
+static const TCGOutOpExtract outop_sextract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_sextract,
+};
+
+static void tgen_extract2(TCGContext *s, TCGType type, TCGReg a0,
+ TCGReg a1, TCGReg a2, unsigned shr)
+{
+ tcg_out_extr(s, type, a0, a2, a1, shr);
+}
+
+static const TCGOutOpExtract2 outop_extract2 = {
+ .base.static_constraint = C_O1_I2(r, rz, rz),
+ .out_rrr = tgen_extract2,
+};
+
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, I3312_LDRB, dest, base, offset, 0);
+}
+
+static const TCGOutOpLoad outop_ld8u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8u,
+};
+
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ AArch64Insn insn = type == TCG_TYPE_I32 ? I3312_LDRSBW : I3312_LDRSBX;
+ tcg_out_ldst(s, insn, dest, base, offset, 0);
+}
+
+static const TCGOutOpLoad outop_ld8s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8s,
+};
+
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, I3312_LDRH, dest, base, offset, 1);
+}
+
+static const TCGOutOpLoad outop_ld16u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16u,
+};
+
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ AArch64Insn insn = type == TCG_TYPE_I32 ? I3312_LDRSHW : I3312_LDRSHX;
+ tcg_out_ldst(s, insn, dest, base, offset, 1);
+}
+
+static const TCGOutOpLoad outop_ld16s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16s,
+};
+
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, I3312_LDRW, dest, base, offset, 2);
+}
+
+static const TCGOutOpLoad outop_ld32u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32u,
+};
+
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, I3312_LDRSWX, dest, base, offset, 2);
+}
+
+static const TCGOutOpLoad outop_ld32s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32s,
+};
+
+static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, I3312_STRB, data, base, offset, 0);
+}
+
+static const TCGOutOpStore outop_st8 = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tgen_st8_r,
+};
+
+static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, I3312_STRH, data, base, offset, 1);
+}
+
+static const TCGOutOpStore outop_st16 = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tgen_st16_r,
+};
+
+static const TCGOutOpStore outop_st = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tcg_out_st,
+};
+
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
unsigned vecl, unsigned vece,
const TCGArg args[TCG_MAX_OP_ARGS],
@@ -2951,157 +3333,10 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
}
}
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
+static TCGConstraintSetIndex
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld_i64:
- case INDEX_op_neg_i32:
- case INDEX_op_neg_i64:
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap32_i32:
- case INDEX_op_bswap16_i64:
- case INDEX_op_bswap32_i64:
- case INDEX_op_bswap64_i64:
- case INDEX_op_ext8s_i32:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extract_i32:
- case INDEX_op_extract_i64:
- case INDEX_op_sextract_i32:
- case INDEX_op_sextract_i64:
- return C_O1_I1(r, r);
-
- case INDEX_op_st8_i32:
- case INDEX_op_st16_i32:
- case INDEX_op_st_i32:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i64:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i64:
- return C_O0_I2(rZ, r);
-
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- return C_O1_I2(r, r, rA);
-
- case INDEX_op_setcond_i32:
- case INDEX_op_setcond_i64:
- case INDEX_op_negsetcond_i32:
- case INDEX_op_negsetcond_i64:
- return C_O1_I2(r, r, rC);
-
- case INDEX_op_mul_i32:
- case INDEX_op_mul_i64:
- case INDEX_op_div_i32:
- case INDEX_op_div_i64:
- case INDEX_op_divu_i32:
- case INDEX_op_divu_i64:
- case INDEX_op_rem_i32:
- case INDEX_op_rem_i64:
- case INDEX_op_remu_i32:
- case INDEX_op_remu_i64:
- case INDEX_op_muluh_i64:
- case INDEX_op_mulsh_i64:
- return C_O1_I2(r, r, r);
-
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- case INDEX_op_or_i32:
- case INDEX_op_or_i64:
- case INDEX_op_xor_i32:
- case INDEX_op_xor_i64:
- case INDEX_op_andc_i32:
- case INDEX_op_andc_i64:
- case INDEX_op_orc_i32:
- case INDEX_op_orc_i64:
- case INDEX_op_eqv_i32:
- case INDEX_op_eqv_i64:
- return C_O1_I2(r, r, rL);
-
- case INDEX_op_shl_i32:
- case INDEX_op_shr_i32:
- case INDEX_op_sar_i32:
- case INDEX_op_rotl_i32:
- case INDEX_op_rotr_i32:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i64:
- case INDEX_op_rotl_i64:
- case INDEX_op_rotr_i64:
- return C_O1_I2(r, r, ri);
-
- case INDEX_op_clz_i32:
- case INDEX_op_ctz_i32:
- case INDEX_op_clz_i64:
- case INDEX_op_ctz_i64:
- return C_O1_I2(r, r, rAL);
-
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
- return C_O0_I2(r, rC);
-
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, rC, rZ, rZ);
-
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_ld_a64_i32:
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_ld_a64_i64:
- return C_O1_I1(r, r);
- case INDEX_op_qemu_ld_a32_i128:
- case INDEX_op_qemu_ld_a64_i128:
- return C_O2_I1(r, r, r);
- case INDEX_op_qemu_st_a32_i32:
- case INDEX_op_qemu_st_a64_i32:
- case INDEX_op_qemu_st_a32_i64:
- case INDEX_op_qemu_st_a64_i64:
- return C_O0_I2(rZ, r);
- case INDEX_op_qemu_st_a32_i128:
- case INDEX_op_qemu_st_a64_i128:
- return C_O0_I3(rZ, rZ, r);
-
- case INDEX_op_deposit_i32:
- case INDEX_op_deposit_i64:
- return C_O1_I2(r, 0, rZ);
-
- case INDEX_op_extract2_i32:
- case INDEX_op_extract2_i64:
- return C_O1_I2(r, rZ, rZ);
-
- case INDEX_op_add2_i32:
- case INDEX_op_add2_i64:
- case INDEX_op_sub2_i32:
- case INDEX_op_sub2_i64:
- return C_O2_I4(r, r, rZ, rZ, rA, rMZ);
-
case INDEX_op_add_vec:
case INDEX_op_sub_vec:
case INDEX_op_mul_vec:
@@ -3147,7 +3382,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
return C_O1_I2(w, 0, w);
default:
- g_assert_not_reached();
+ return C_NotImplemented;
}
}
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
index 8bd9e6a..3f3df51 100644
--- a/tcg/aarch64/tcg-target.h
+++ b/tcg/aarch64/tcg-target.h
@@ -13,8 +13,6 @@
#ifndef AARCH64_TCG_TARGET_H
#define AARCH64_TCG_TARGET_H
-#include "host/cpuinfo.h"
-
#define TCG_TARGET_INSN_UNIT_SIZE 4
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
@@ -47,130 +45,8 @@ typedef enum {
TCG_AREG0 = TCG_REG_X19,
} TCGReg;
-#define TCG_TARGET_NB_REGS 64
-
-/* used for function call generation */
-#define TCG_REG_CALL_STACK TCG_REG_SP
-#define TCG_TARGET_STACK_ALIGN 16
-#define TCG_TARGET_CALL_STACK_OFFSET 0
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
-#ifdef CONFIG_DARWIN
-# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
-#else
-# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
-#endif
-#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
-
-#define have_lse (cpuinfo & CPUINFO_LSE)
-#define have_lse2 (cpuinfo & CPUINFO_LSE2)
+#define TCG_REG_ZERO TCG_REG_XZR
-/* optional instructions */
-#define TCG_TARGET_HAS_div_i32 1
-#define TCG_TARGET_HAS_rem_i32 1
-#define TCG_TARGET_HAS_ext8s_i32 1
-#define TCG_TARGET_HAS_ext16s_i32 1
-#define TCG_TARGET_HAS_ext8u_i32 1
-#define TCG_TARGET_HAS_ext16u_i32 1
-#define TCG_TARGET_HAS_bswap16_i32 1
-#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_not_i32 1
-#define TCG_TARGET_HAS_rot_i32 1
-#define TCG_TARGET_HAS_andc_i32 1
-#define TCG_TARGET_HAS_orc_i32 1
-#define TCG_TARGET_HAS_eqv_i32 1
-#define TCG_TARGET_HAS_nand_i32 0
-#define TCG_TARGET_HAS_nor_i32 0
-#define TCG_TARGET_HAS_clz_i32 1
-#define TCG_TARGET_HAS_ctz_i32 1
-#define TCG_TARGET_HAS_ctpop_i32 0
-#define TCG_TARGET_HAS_deposit_i32 1
-#define TCG_TARGET_HAS_extract_i32 1
-#define TCG_TARGET_HAS_sextract_i32 1
-#define TCG_TARGET_HAS_extract2_i32 1
-#define TCG_TARGET_HAS_negsetcond_i32 1
-#define TCG_TARGET_HAS_add2_i32 1
-#define TCG_TARGET_HAS_sub2_i32 1
-#define TCG_TARGET_HAS_mulu2_i32 0
-#define TCG_TARGET_HAS_muls2_i32 0
-#define TCG_TARGET_HAS_muluh_i32 0
-#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_extr_i64_i32 0
-#define TCG_TARGET_HAS_qemu_st8_i32 0
-
-#define TCG_TARGET_HAS_div_i64 1
-#define TCG_TARGET_HAS_rem_i64 1
-#define TCG_TARGET_HAS_ext8s_i64 1
-#define TCG_TARGET_HAS_ext16s_i64 1
-#define TCG_TARGET_HAS_ext32s_i64 1
-#define TCG_TARGET_HAS_ext8u_i64 1
-#define TCG_TARGET_HAS_ext16u_i64 1
-#define TCG_TARGET_HAS_ext32u_i64 1
-#define TCG_TARGET_HAS_bswap16_i64 1
-#define TCG_TARGET_HAS_bswap32_i64 1
-#define TCG_TARGET_HAS_bswap64_i64 1
-#define TCG_TARGET_HAS_not_i64 1
-#define TCG_TARGET_HAS_rot_i64 1
-#define TCG_TARGET_HAS_andc_i64 1
-#define TCG_TARGET_HAS_orc_i64 1
-#define TCG_TARGET_HAS_eqv_i64 1
-#define TCG_TARGET_HAS_nand_i64 0
-#define TCG_TARGET_HAS_nor_i64 0
-#define TCG_TARGET_HAS_clz_i64 1
-#define TCG_TARGET_HAS_ctz_i64 1
-#define TCG_TARGET_HAS_ctpop_i64 0
-#define TCG_TARGET_HAS_deposit_i64 1
-#define TCG_TARGET_HAS_extract_i64 1
-#define TCG_TARGET_HAS_sextract_i64 1
-#define TCG_TARGET_HAS_extract2_i64 1
-#define TCG_TARGET_HAS_negsetcond_i64 1
-#define TCG_TARGET_HAS_add2_i64 1
-#define TCG_TARGET_HAS_sub2_i64 1
-#define TCG_TARGET_HAS_mulu2_i64 0
-#define TCG_TARGET_HAS_muls2_i64 0
-#define TCG_TARGET_HAS_muluh_i64 1
-#define TCG_TARGET_HAS_mulsh_i64 1
-
-/*
- * Without FEAT_LSE2, we must use LDXP+STXP to implement atomic 128-bit load,
- * which requires writable pages. We must defer to the helper for user-only,
- * but in system mode all ram is writable for the host.
- */
-#ifdef CONFIG_USER_ONLY
-#define TCG_TARGET_HAS_qemu_ldst_i128 have_lse2
-#else
-#define TCG_TARGET_HAS_qemu_ldst_i128 1
-#endif
-
-#define TCG_TARGET_HAS_tst 1
-
-#define TCG_TARGET_HAS_v64 1
-#define TCG_TARGET_HAS_v128 1
-#define TCG_TARGET_HAS_v256 0
-
-#define TCG_TARGET_HAS_andc_vec 1
-#define TCG_TARGET_HAS_orc_vec 1
-#define TCG_TARGET_HAS_nand_vec 0
-#define TCG_TARGET_HAS_nor_vec 0
-#define TCG_TARGET_HAS_eqv_vec 0
-#define TCG_TARGET_HAS_not_vec 1
-#define TCG_TARGET_HAS_neg_vec 1
-#define TCG_TARGET_HAS_abs_vec 1
-#define TCG_TARGET_HAS_roti_vec 0
-#define TCG_TARGET_HAS_rots_vec 0
-#define TCG_TARGET_HAS_rotv_vec 0
-#define TCG_TARGET_HAS_shi_vec 1
-#define TCG_TARGET_HAS_shs_vec 0
-#define TCG_TARGET_HAS_shv_vec 1
-#define TCG_TARGET_HAS_mul_vec 1
-#define TCG_TARGET_HAS_sat_vec 1
-#define TCG_TARGET_HAS_minmax_vec 1
-#define TCG_TARGET_HAS_bitsel_vec 1
-#define TCG_TARGET_HAS_cmpsel_vec 0
-#define TCG_TARGET_HAS_tst_vec 1
-
-#define TCG_TARGET_DEFAULT_MO (0)
-#define TCG_TARGET_NEED_LDST_LABELS
-#define TCG_TARGET_NEED_POOL_LABELS
+#define TCG_TARGET_NB_REGS 64
#endif /* AARCH64_TCG_TARGET_H */
diff --git a/tcg/aarch64/tcg-target.opc.h b/tcg/aarch64/tcg-target.opc.h
deleted file mode 100644
index bce30ac..0000000
--- a/tcg/aarch64/tcg-target.opc.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright (c) 2019 Linaro
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or
- * (at your option) any later version.
- *
- * See the COPYING file in the top-level directory for details.
- *
- * Target-specific opcodes for host vector expansion. These will be
- * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
- * consider these to be UNSPEC with names.
- */
-
-DEF(aa64_sshl_vec, 1, 2, 0, IMPLVEC)
-DEF(aa64_sli_vec, 1, 2, 1, IMPLVEC)
diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h
index 229ae25..16b1193 100644
--- a/tcg/arm/tcg-target-con-set.h
+++ b/tcg/arm/tcg-target-con-set.h
@@ -30,6 +30,9 @@ C_O1_I2(r, r, rI)
C_O1_I2(r, r, rIK)
C_O1_I2(r, r, rIN)
C_O1_I2(r, r, ri)
+C_O1_I2(r, rI, r)
+C_O1_I2(r, rI, rIK)
+C_O1_I2(r, rI, rIN)
C_O1_I2(r, rZ, rZ)
C_O1_I2(w, 0, w)
C_O1_I2(w, w, w)
@@ -42,5 +45,3 @@ C_O1_I4(r, r, rIN, rIK, 0)
C_O2_I1(e, p, q)
C_O2_I2(e, p, q, q)
C_O2_I2(r, r, r, r)
-C_O2_I4(r, r, r, r, rIN, rIK)
-C_O2_I4(r, r, rI, rI, rIN, rIK)
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
new file mode 100644
index 0000000..3bbbde5
--- /dev/null
+++ b/tcg/arm/tcg-target-has.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific opcode support
+ * Copyright (c) 2008 Fabrice Bellard
+ * Copyright (c) 2008 Andrzej Zaborowski
+ */
+
+#ifndef TCG_TARGET_HAS_H
+#define TCG_TARGET_HAS_H
+
+extern int arm_arch;
+
+#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+#define use_idiv_instructions 1
+#else
+extern bool use_idiv_instructions;
+#endif
+#ifdef __ARM_NEON__
+#define use_neon_instructions 1
+#else
+extern bool use_neon_instructions;
+#endif
+
+/* optional instructions */
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+#define TCG_TARGET_HAS_tst 1
+
+#define TCG_TARGET_HAS_v64 use_neon_instructions
+#define TCG_TARGET_HAS_v128 use_neon_instructions
+#define TCG_TARGET_HAS_v256 0
+
+#define TCG_TARGET_HAS_andc_vec 1
+#define TCG_TARGET_HAS_orc_vec 1
+#define TCG_TARGET_HAS_nand_vec 0
+#define TCG_TARGET_HAS_nor_vec 0
+#define TCG_TARGET_HAS_eqv_vec 0
+#define TCG_TARGET_HAS_not_vec 1
+#define TCG_TARGET_HAS_neg_vec 1
+#define TCG_TARGET_HAS_abs_vec 1
+#define TCG_TARGET_HAS_roti_vec 0
+#define TCG_TARGET_HAS_rots_vec 0
+#define TCG_TARGET_HAS_rotv_vec 0
+#define TCG_TARGET_HAS_shi_vec 1
+#define TCG_TARGET_HAS_shs_vec 0
+#define TCG_TARGET_HAS_shv_vec 0
+#define TCG_TARGET_HAS_mul_vec 1
+#define TCG_TARGET_HAS_sat_vec 1
+#define TCG_TARGET_HAS_minmax_vec 1
+#define TCG_TARGET_HAS_bitsel_vec 1
+#define TCG_TARGET_HAS_cmpsel_vec 0
+#define TCG_TARGET_HAS_tst_vec 1
+
+static inline bool
+tcg_target_extract_valid(TCGType type, unsigned ofs, unsigned len)
+{
+ if (use_armv7_instructions) {
+ return true; /* SBFX or UBFX */
+ }
+ switch (len) {
+ case 8: /* SXTB or UXTB */
+ case 16: /* SXTH or UXTH */
+ return (ofs % 8) == 0;
+ }
+ return false;
+}
+
+#define TCG_TARGET_extract_valid tcg_target_extract_valid
+#define TCG_TARGET_sextract_valid tcg_target_extract_valid
+#define TCG_TARGET_deposit_valid(type, ofs, len) use_armv7_instructions
+
+#endif
diff --git a/tcg/arm/tcg-target-mo.h b/tcg/arm/tcg-target-mo.h
new file mode 100644
index 0000000..12542df
--- /dev/null
+++ b/tcg/arm/tcg-target-mo.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific memory model
+ * Copyright (c) 2008 Fabrice Bellard
+ * Copyright (c) 2008 Andrzej Zaborowski
+ */
+
+#ifndef TCG_TARGET_MO_H
+#define TCG_TARGET_MO_H
+
+#define TCG_TARGET_DEFAULT_MO 0
+
+#endif
diff --git a/tcg/arm/tcg-target-opc.h.inc b/tcg/arm/tcg-target-opc.h.inc
new file mode 100644
index 0000000..70394e0
--- /dev/null
+++ b/tcg/arm/tcg-target-opc.h.inc
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2019 Linaro
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version.
+ *
+ * See the COPYING file in the top-level directory for details.
+ *
+ * Target-specific opcodes for host vector expansion. These will be
+ * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
+ * consider these to be UNSPEC with names.
+ */
+
+DEF(arm_sli_vec, 1, 2, 1, TCG_OPF_VECTOR)
+DEF(arm_sshl_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(arm_ushl_vec, 1, 2, 0, TCG_OPF_VECTOR)
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
index 3de5f50..836894b 100644
--- a/tcg/arm/tcg-target.c.inc
+++ b/tcg/arm/tcg-target.c.inc
@@ -23,8 +23,6 @@
*/
#include "elf.h"
-#include "../tcg-ldst.c.inc"
-#include "../tcg-pool.c.inc"
int arm_arch = __ARM_ARCH;
@@ -35,6 +33,14 @@ bool use_idiv_instructions;
bool use_neon_instructions;
#endif
+/* Used for function call generation. */
+#define TCG_TARGET_STACK_ALIGN 8
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
+#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
+
#ifdef CONFIG_DEBUG_TCG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
"%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
@@ -172,6 +178,8 @@ typedef enum {
INSN_DMB_ISH = 0xf57ff05b,
INSN_DMB_MCR = 0xee070fba,
+ INSN_MSRI_CPSR = 0x0360f000,
+
/* Architected nop introduced in v6k. */
/* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
also Just So Happened to do nothing on pre-v6k so that we
@@ -670,14 +678,8 @@ static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
}
-static void __attribute__((unused))
-tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm)
-{
- tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
-}
-
-static void __attribute__((unused))
-tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, int imm8)
+static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm8)
{
tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
}
@@ -874,22 +876,39 @@ static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
* Emit either the reg,imm or reg,reg form of a data-processing insn.
* rhs must satisfy the "rIK" constraint.
*/
+static void tcg_out_dat_IK(TCGContext *s, ARMCond cond, ARMInsn opc,
+ ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs)
+{
+ int imm12 = encode_imm(rhs);
+ if (imm12 < 0) {
+ imm12 = encode_imm_nofail(~rhs);
+ opc = opinv;
+ }
+ tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
+}
+
static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
bool rhs_is_const)
{
if (rhs_is_const) {
- int imm12 = encode_imm(rhs);
- if (imm12 < 0) {
- imm12 = encode_imm_nofail(~rhs);
- opc = opinv;
- }
- tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
+ tcg_out_dat_IK(s, cond, opc, opinv, dst, lhs, rhs);
} else {
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
}
}
+static void tcg_out_dat_IN(TCGContext *s, ARMCond cond, ARMInsn opc,
+ ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs)
+{
+ int imm12 = encode_imm(rhs);
+ if (imm12 < 0) {
+ imm12 = encode_imm_nofail(-rhs);
+ opc = opneg;
+ }
+ tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
+}
+
static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
bool rhs_is_const)
@@ -898,52 +917,12 @@ static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
* rhs must satisfy the "rIN" constraint.
*/
if (rhs_is_const) {
- int imm12 = encode_imm(rhs);
- if (imm12 < 0) {
- imm12 = encode_imm_nofail(-rhs);
- opc = opneg;
- }
- tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
+ tcg_out_dat_IN(s, cond, opc, opneg, dst, lhs, rhs);
} else {
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
}
}
-static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
- TCGReg rn, TCGReg rm)
-{
- /* mul */
- tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
-}
-
-static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
- TCGReg rd1, TCGReg rn, TCGReg rm)
-{
- /* umull */
- tcg_out32(s, (cond << 28) | 0x00800090 |
- (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
-}
-
-static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
- TCGReg rd1, TCGReg rn, TCGReg rm)
-{
- /* smull */
- tcg_out32(s, (cond << 28) | 0x00c00090 |
- (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
-}
-
-static void tcg_out_sdiv(TCGContext *s, ARMCond cond,
- TCGReg rd, TCGReg rn, TCGReg rm)
-{
- tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
-}
-
-static void tcg_out_udiv(TCGContext *s, ARMCond cond,
- TCGReg rd, TCGReg rn, TCGReg rm)
-{
- tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
-}
-
static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
{
/* sxtb */
@@ -992,56 +971,98 @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
g_assert_not_reached();
}
-static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
- TCGReg rd, TCGReg rn, int flags)
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ TCGReg a2, unsigned ofs, unsigned len)
{
- if (flags & TCG_BSWAP_OS) {
- /* revsh */
- tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
+ /* bfi/bfc */
+ tcg_out32(s, 0x07c00010 | (COND_AL << 28) | (a0 << 12) | a1
+ | (ofs << 7) | ((ofs + len - 1) << 16));
+}
+
+static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ tcg_target_long a2, unsigned ofs, unsigned len)
+{
+ /* bfi becomes bfc with rn == 15. */
+ tgen_deposit(s, type, a0, a1, 15, ofs, len);
+}
+
+static const TCGOutOpDeposit outop_deposit = {
+ .base.static_constraint = C_O1_I2(r, 0, rZ),
+ .out_rrr = tgen_deposit,
+ .out_rri = tgen_depositi,
+};
+
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn,
+ unsigned ofs, unsigned len)
+{
+ /* According to gcc, AND can be faster. */
+ if (ofs == 0 && len <= 8) {
+ tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn,
+ encode_imm_nofail((1 << len) - 1));
return;
}
- /* rev16 */
- tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
- if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
+ if (use_armv7_instructions) {
+ /* ubfx */
+ tcg_out32(s, 0x07e00050 | (COND_AL << 28) | (rd << 12) | rn
+ | (ofs << 7) | ((len - 1) << 16));
+ return;
+ }
+
+ assert(ofs % 8 == 0);
+ switch (len) {
+ case 8:
+ /* uxtb */
+ tcg_out32(s, 0x06ef0070 | (COND_AL << 28) |
+ (rd << 12) | (ofs << 7) | rn);
+ break;
+ case 16:
/* uxth */
- tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
+ tcg_out32(s, 0x06ff0070 | (COND_AL << 28) |
+ (rd << 12) | (ofs << 7) | rn);
+ break;
+ default:
+ g_assert_not_reached();
}
}
-static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
-{
- /* rev */
- tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
-}
+static const TCGOutOpExtract outop_extract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extract,
+};
-static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
- TCGArg a1, int ofs, int len, bool const_a1)
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn,
+ unsigned ofs, unsigned len)
{
- if (const_a1) {
- /* bfi becomes bfc with rn == 15. */
- a1 = 15;
+ if (use_armv7_instructions) {
+ /* sbfx */
+ tcg_out32(s, 0x07a00050 | (COND_AL << 28) | (rd << 12) | rn
+ | (ofs << 7) | ((len - 1) << 16));
+ return;
}
- /* bfi/bfc */
- tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
- | (ofs << 7) | ((ofs + len - 1) << 16));
-}
-static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
- TCGReg rn, int ofs, int len)
-{
- /* ubfx */
- tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
- | (ofs << 7) | ((len - 1) << 16));
+ assert(ofs % 8 == 0);
+ switch (len) {
+ case 8:
+ /* sxtb */
+ tcg_out32(s, 0x06af0070 | (COND_AL << 28) |
+ (rd << 12) | (ofs << 7) | rn);
+ break;
+ case 16:
+ /* sxth */
+ tcg_out32(s, 0x06bf0070 | (COND_AL << 28) |
+ (rd << 12) | (ofs << 7) | rn);
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
-static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
- TCGReg rn, int ofs, int len)
-{
- /* sbfx */
- tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
- | (ofs << 7) | ((len - 1) << 16));
-}
+static const TCGOutOpExtract outop_sextract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_sextract,
+};
+
static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
TCGReg rd, TCGReg rn, int32_t offset)
@@ -1063,66 +1084,6 @@ static void tcg_out_st32(TCGContext *s, ARMCond cond,
tcg_out_st32_12(s, cond, rd, rn, offset);
}
-static void tcg_out_ld16u(TCGContext *s, ARMCond cond,
- TCGReg rd, TCGReg rn, int32_t offset)
-{
- if (offset > 0xff || offset < -0xff) {
- tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
- tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
- } else
- tcg_out_ld16u_8(s, cond, rd, rn, offset);
-}
-
-static void tcg_out_ld16s(TCGContext *s, ARMCond cond,
- TCGReg rd, TCGReg rn, int32_t offset)
-{
- if (offset > 0xff || offset < -0xff) {
- tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
- tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
- } else
- tcg_out_ld16s_8(s, cond, rd, rn, offset);
-}
-
-static void tcg_out_st16(TCGContext *s, ARMCond cond,
- TCGReg rd, TCGReg rn, int32_t offset)
-{
- if (offset > 0xff || offset < -0xff) {
- tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
- tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
- } else
- tcg_out_st16_8(s, cond, rd, rn, offset);
-}
-
-static void tcg_out_ld8u(TCGContext *s, ARMCond cond,
- TCGReg rd, TCGReg rn, int32_t offset)
-{
- if (offset > 0xfff || offset < -0xfff) {
- tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
- tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
- } else
- tcg_out_ld8_12(s, cond, rd, rn, offset);
-}
-
-static void tcg_out_ld8s(TCGContext *s, ARMCond cond,
- TCGReg rd, TCGReg rn, int32_t offset)
-{
- if (offset > 0xff || offset < -0xff) {
- tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
- tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
- } else
- tcg_out_ld8s_8(s, cond, rd, rn, offset);
-}
-
-static void tcg_out_st8(TCGContext *s, ARMCond cond,
- TCGReg rd, TCGReg rn, int32_t offset)
-{
- if (offset > 0xfff || offset < -0xfff) {
- tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
- tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
- } else
- tcg_out_st8_12(s, cond, rd, rn, offset);
-}
-
/*
* The _goto case is normally between TBs within the same code buffer, and
* with the code buffer limited to 16MB we wouldn't need the long case.
@@ -1182,7 +1143,12 @@ static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
}
}
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
+{
+ tcg_out_goto_label(s, COND_AL, l);
+}
+
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
if (use_armv7_instructions) {
tcg_out32(s, INSN_DMB_ISH);
@@ -1191,44 +1157,53 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
}
}
-static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a,
- TCGArg b, int b_const)
+static TCGCond tgen_cmp(TCGContext *s, TCGCond cond, TCGReg a, TCGReg b)
+{
+ if (is_tst_cond(cond)) {
+ tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0));
+ return tcg_tst_eqne_cond(cond);
+ }
+ tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, a, b, SHIFT_IMM_LSL(0));
+ return cond;
+}
+
+static TCGCond tgen_cmpi(TCGContext *s, TCGCond cond, TCGReg a, TCGArg b)
{
+ int imm12;
+
if (!is_tst_cond(cond)) {
- tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const);
+ tcg_out_dat_IN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b);
return cond;
}
- cond = tcg_tst_eqne_cond(cond);
- if (b_const) {
- int imm12 = encode_imm(b);
-
- /*
- * The compare constraints allow rIN, but TST does not support N.
- * Be prepared to load the constant into a scratch register.
- */
- if (imm12 >= 0) {
- tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12);
- return cond;
- }
+ /*
+ * The compare constraints allow rIN, but TST does not support N.
+ * Be prepared to load the constant into a scratch register.
+ */
+ imm12 = encode_imm(b);
+ if (imm12 >= 0) {
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12);
+ } else {
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, b);
- b = TCG_REG_TMP;
+ tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0,
+ a, TCG_REG_TMP, SHIFT_IMM_LSL(0));
}
- tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0));
- return cond;
+ return tcg_tst_eqne_cond(cond);
}
-static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
- const int *const_args)
+static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a,
+ TCGArg b, int b_const)
{
- TCGReg al = args[0];
- TCGReg ah = args[1];
- TCGArg bl = args[2];
- TCGArg bh = args[3];
- TCGCond cond = args[4];
- int const_bl = const_args[2];
- int const_bh = const_args[3];
+ if (b_const) {
+ return tgen_cmpi(s, cond, a, b);
+ } else {
+ return tgen_cmp(s, cond, a, b);
+ }
+}
+static TCGCond tcg_out_cmp2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
+ TCGArg bl, bool const_bl, TCGArg bh, bool const_bh)
+{
switch (cond) {
case TCG_COND_EQ:
case TCG_COND_NE:
@@ -1407,8 +1382,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
#define MIN_TLB_MASK_TABLE_OFS -256
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
- TCGReg addrlo, TCGReg addrhi,
- MemOpIdx oi, bool is_ld)
+ TCGReg addr, MemOpIdx oi, bool is_ld)
{
TCGLabelQemuLdst *ldst = NULL;
MemOp opc = get_memop(oi);
@@ -1417,14 +1391,14 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
if (tcg_use_softmmu) {
*h = (HostAddress){
.cond = COND_AL,
- .base = addrlo,
+ .base = addr,
.index = TCG_REG_R1,
.index_scratch = true,
};
} else {
*h = (HostAddress){
.cond = COND_AL,
- .base = addrlo,
+ .base = addr,
.index = guest_base ? TCG_REG_GUEST_BASE : -1,
.index_scratch = false,
};
@@ -1444,8 +1418,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addrlo;
- ldst->addrhi_reg = addrhi;
+ ldst->addr_reg = addr;
/* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
@@ -1453,30 +1426,19 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
/* Extract the tlb index from the address into R0. */
- tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
- SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
+ tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addr,
+ SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
/*
* Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
- * Load the tlb comparator into R2/R3 and the fast path addend into R1.
+ * Load the tlb comparator into R2 and the fast path addend into R1.
*/
- QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
if (cmp_off == 0) {
- if (s->addr_type == TCG_TYPE_I32) {
- tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2,
- TCG_REG_R1, TCG_REG_R0);
- } else {
- tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2,
- TCG_REG_R1, TCG_REG_R0);
- }
+ tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
} else {
tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
- if (s->addr_type == TCG_TYPE_I32) {
- tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
- } else {
- tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
- }
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
}
/* Load the tlb addend. */
@@ -1495,14 +1457,14 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
* This leaves the least significant alignment bits unchanged, and of
* course must be zero.
*/
- t_addr = addrlo;
+ t_addr = addr;
if (a_mask < s_mask) {
t_addr = TCG_REG_R0;
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
- addrlo, s_mask - a_mask);
+ addr, s_mask - a_mask);
}
- if (use_armv7_instructions && s->page_bits <= 16) {
- tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
+ if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask));
tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
t_addr, TCG_REG_TMP, 0);
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
@@ -1510,29 +1472,24 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
} else {
if (a_mask) {
tcg_debug_assert(a_mask <= 0xff);
- tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask);
}
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
- SHIFT_IMM_LSR(s->page_bits));
+ SHIFT_IMM_LSR(TARGET_PAGE_BITS));
tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
0, TCG_REG_R2, TCG_REG_TMP,
- SHIFT_IMM_LSL(s->page_bits));
- }
-
- if (s->addr_type != TCG_TYPE_I32) {
- tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
+ SHIFT_IMM_LSL(TARGET_PAGE_BITS));
}
} else if (a_mask) {
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addrlo;
- ldst->addrhi_reg = addrhi;
+ ldst->addr_reg = addr;
/* We are expecting alignment to max out at 7 */
tcg_debug_assert(a_mask <= 0xff);
/* tst addr, #mask */
- tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask);
}
return ldst;
@@ -1587,7 +1544,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
tcg_debug_assert((datalo & 1) == 0);
tcg_debug_assert(datahi == datalo + 1);
/* LDRD requires alignment; double-check that. */
- if (get_alignment_bits(opc) >= MO_64) {
+ if (memop_alignment_bits(opc) >= MO_64) {
if (h.index < 0) {
tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
break;
@@ -1629,17 +1586,50 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
}
}
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
- TCGReg addrlo, TCGReg addrhi,
- MemOpIdx oi, TCGType data_type)
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg addr, MemOpIdx oi)
{
MemOp opc = get_memop(oi);
TCGLabelQemuLdst *ldst;
HostAddress h;
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
+ ldst->datalo_reg = data;
+ ldst->datahi_reg = -1;
+
+ /*
+ * This a conditional BL only to load a pointer within this
+ * opcode into LR for the slow path. We will not be using
+ * the value for a tail call.
+ */
+ ldst->label_ptr[0] = s->code_ptr;
+ tcg_out_bl_imm(s, COND_NE, 0);
+ }
+
+ tcg_out_qemu_ld_direct(s, opc, data, -1, h);
+
+ if (ldst) {
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
+ }
+}
+
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
+ .base.static_constraint = C_O1_I1(r, q),
+ .out = tgen_qemu_ld,
+};
+
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
+{
+ MemOp opc = get_memop(oi);
+ TCGLabelQemuLdst *ldst;
+ HostAddress h;
+
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
+ if (ldst) {
+ ldst->type = type;
ldst->datalo_reg = datalo;
ldst->datahi_reg = datahi;
@@ -1650,14 +1640,20 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
*/
ldst->label_ptr[0] = s->code_ptr;
tcg_out_bl_imm(s, COND_NE, 0);
+ }
+
+ tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
- tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
+ if (ldst) {
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
- } else {
- tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
}
}
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
+ .base.static_constraint = C_O2_I1(e, p, q),
+ .out = tgen_qemu_ld2,
+};
+
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
TCGReg datahi, HostAddress h)
{
@@ -1691,7 +1687,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
tcg_debug_assert((datalo & 1) == 0);
tcg_debug_assert(datahi == datalo + 1);
/* STRD requires alignment; double-check that. */
- if (get_alignment_bits(opc) >= MO_64) {
+ if (memop_alignment_bits(opc) >= MO_64) {
if (h.index < 0) {
tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
} else {
@@ -1715,17 +1711,46 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
}
}
-static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
- TCGReg addrlo, TCGReg addrhi,
- MemOpIdx oi, TCGType data_type)
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg addr, MemOpIdx oi)
{
MemOp opc = get_memop(oi);
TCGLabelQemuLdst *ldst;
HostAddress h;
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
+ ldst->datalo_reg = data;
+ ldst->datahi_reg = -1;
+
+ h.cond = COND_EQ;
+ tcg_out_qemu_st_direct(s, opc, data, -1, h);
+
+ /* The conditional call is last, as we're going to return here. */
+ ldst->label_ptr[0] = s->code_ptr;
+ tcg_out_bl_imm(s, COND_NE, 0);
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
+ } else {
+ tcg_out_qemu_st_direct(s, opc, data, -1, h);
+ }
+}
+
+static const TCGOutOpQemuLdSt outop_qemu_st = {
+ .base.static_constraint = C_O0_I2(q, q),
+ .out = tgen_qemu_st,
+};
+
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
+{
+ MemOp opc = get_memop(oi);
+ TCGLabelQemuLdst *ldst;
+ HostAddress h;
+
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
+ if (ldst) {
+ ldst->type = type;
ldst->datalo_reg = datalo;
ldst->datahi_reg = datahi;
@@ -1741,6 +1766,11 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
}
}
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
+ .base.static_constraint = C_O0_I3(Q, p, q),
+ .out = tgen_qemu_st2,
+};
+
static void tcg_out_epilogue(TCGContext *s);
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
@@ -1780,6 +1810,11 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
set_jmp_reset_offset(s, which);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_b_reg(s, COND_AL, a0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
@@ -1799,418 +1834,816 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
flush_idcache_range(jmp_rx, jmp_rw, 4);
}
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg args[TCG_MAX_OP_ARGS],
- const int const_args[TCG_MAX_OP_ARGS])
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
{
- TCGArg a0, a1, a2, a3, a4, a5;
- int c;
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADD, a0, a1, a2, SHIFT_IMM_LSL(0));
+}
- switch (opc) {
- case INDEX_op_goto_ptr:
- tcg_out_b_reg(s, COND_AL, args[0]);
- break;
- case INDEX_op_br:
- tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
- break;
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_IN(s, COND_AL, ARITH_ADD, ARITH_SUB, a0, a1, a2);
+}
- case INDEX_op_ld8u_i32:
- tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld8s_i32:
- tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld16u_i32:
- tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld16s_i32:
- tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld_i32:
- tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
- break;
- case INDEX_op_st8_i32:
- tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
- break;
- case INDEX_op_st16_i32:
- tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
- break;
- case INDEX_op_st_i32:
- tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
- break;
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rIN),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
- case INDEX_op_movcond_i32:
- /* Constraints mean that v2 is always in the same register as dest,
- * so we only need to do "if condition passed, move v1 to dest".
- */
- c = tcg_out_cmp(s, args[5], args[1], args[2], const_args[2]);
- tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV,
- ARITH_MVN, args[0], 0, args[3], const_args[3]);
- break;
- case INDEX_op_add_i32:
- tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
- args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_sub_i32:
- if (const_args[1]) {
- if (const_args[2]) {
- tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
- } else {
- tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
- args[0], args[2], args[1], 1);
- }
- } else {
- tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
- args[0], args[1], args[2], const_args[2]);
- }
- break;
- case INDEX_op_and_i32:
- tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
- args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_andc_i32:
- tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
- args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_or_i32:
- c = ARITH_ORR;
- goto gen_arith;
- case INDEX_op_xor_i32:
- c = ARITH_EOR;
- /* Fall through. */
- gen_arith:
- tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_add2_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- a3 = args[3], a4 = args[4], a5 = args[5];
- if (a0 == a3 || (a0 == a5 && !const_args[5])) {
- a0 = TCG_REG_TMP;
- }
- tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
- a0, a2, a4, const_args[4]);
- tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
- a1, a3, a5, const_args[5]);
- tcg_out_mov_reg(s, COND_AL, args[0], a0);
- break;
- case INDEX_op_sub2_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- a3 = args[3], a4 = args[4], a5 = args[5];
- if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
- a0 = TCG_REG_TMP;
- }
- if (const_args[2]) {
- if (const_args[4]) {
- tcg_out_movi32(s, COND_AL, a0, a4);
- a4 = a0;
- }
- tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
- } else {
- tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
- ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
- }
- if (const_args[3]) {
- if (const_args[5]) {
- tcg_out_movi32(s, COND_AL, a1, a5);
- a5 = a1;
- }
- tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
- } else {
- tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
- a1, a3, a5, const_args[5]);
- }
- tcg_out_mov_reg(s, COND_AL, args[0], a0);
- break;
- case INDEX_op_neg_i32:
- tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
- break;
- case INDEX_op_not_i32:
- tcg_out_dat_reg(s, COND_AL,
- ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
- break;
- case INDEX_op_mul_i32:
- tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
- break;
- case INDEX_op_mulu2_i32:
- tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
- break;
- case INDEX_op_muls2_i32:
- tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
- break;
- /* XXX: Perhaps args[2] & 0x1f is wrong */
- case INDEX_op_shl_i32:
- c = const_args[2] ?
- SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
- goto gen_shift32;
- case INDEX_op_shr_i32:
- c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
- SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
- goto gen_shift32;
- case INDEX_op_sar_i32:
- c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
- SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
- goto gen_shift32;
- case INDEX_op_rotr_i32:
- c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
- SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
- /* Fall through. */
- gen_shift32:
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
- break;
+static void tgen_addco(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADD | TO_CPSR,
+ a0, a1, a2, SHIFT_IMM_LSL(0));
+}
- case INDEX_op_rotl_i32:
- if (const_args[2]) {
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
- ((0x20 - args[2]) & 0x1f) ?
- SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
- SHIFT_IMM_LSL(0));
- } else {
- tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
- SHIFT_REG_ROR(TCG_REG_TMP));
- }
- break;
+static void tgen_addco_imm(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_IN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
+ a0, a1, a2);
+}
- case INDEX_op_ctz_i32:
- tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
- a1 = TCG_REG_TMP;
- goto do_clz;
-
- case INDEX_op_clz_i32:
- a1 = args[1];
- do_clz:
- a0 = args[0];
- a2 = args[2];
- c = const_args[2];
- if (c && a2 == 32) {
- tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
- break;
- }
+static const TCGOutOpBinary outop_addco = {
+ .base.static_constraint = C_O1_I2(r, r, rIN),
+ .out_rrr = tgen_addco,
+ .out_rri = tgen_addco_imm,
+};
+
+static void tgen_addci(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADC, a0, a1, a2, SHIFT_IMM_LSL(0));
+}
+
+static void tgen_addci_imm(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_IK(s, COND_AL, ARITH_ADC, ARITH_SBC, a0, a1, a2);
+}
+
+static const TCGOutOpAddSubCarry outop_addci = {
+ .base.static_constraint = C_O1_I2(r, r, rIK),
+ .out_rrr = tgen_addci,
+ .out_rri = tgen_addci_imm,
+};
+
+static void tgen_addcio(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADC | TO_CPSR,
+ a0, a1, a2, SHIFT_IMM_LSL(0));
+}
+
+static void tgen_addcio_imm(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_IK(s, COND_AL, ARITH_ADC | TO_CPSR, ARITH_SBC | TO_CPSR,
+ a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_addcio = {
+ .base.static_constraint = C_O1_I2(r, r, rIK),
+ .out_rrr = tgen_addcio,
+ .out_rri = tgen_addcio_imm,
+};
+
+/* Set C to @c; NZVQ all set to 0. */
+static void tcg_out_movi_apsr_c(TCGContext *s, bool c)
+{
+ int imm12 = encode_imm_nofail(c << 29);
+ tcg_out32(s, (COND_AL << 28) | INSN_MSRI_CPSR | 0x80000 | imm12);
+}
+
+static void tcg_out_set_carry(TCGContext *s)
+{
+ tcg_out_movi_apsr_c(s, 1);
+}
+
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_AND, a0, a1, a2, SHIFT_IMM_LSL(0));
+}
+
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_IK(s, COND_AL, ARITH_AND, ARITH_BIC, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rIK),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
+
+static void tgen_andc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_BIC, a0, a1, a2, SHIFT_IMM_LSL(0));
+}
+
+static const TCGOutOpBinary outop_andc = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_andc,
+};
+
+static void tgen_clz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
+ tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
+ tcg_out_mov_reg(s, COND_EQ, a0, a2);
+}
+
+static void tgen_clzi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (a2 == 32) {
+ tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
+ } else {
tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
- if (c || a0 != a2) {
- tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
- }
- break;
+ tcg_out_movi32(s, COND_EQ, a0, a2);
+ }
+}
- case INDEX_op_brcond_i32:
- c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]);
- tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[3]));
- break;
- case INDEX_op_setcond_i32:
- c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]);
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c],
- ARITH_MOV, args[0], 0, 1);
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
- ARITH_MOV, args[0], 0, 0);
- break;
- case INDEX_op_negsetcond_i32:
- c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]);
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c],
- ARITH_MVN, args[0], 0, 0);
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
- ARITH_MOV, args[0], 0, 0);
- break;
+static const TCGOutOpBinary outop_clz = {
+ .base.static_constraint = C_O1_I2(r, r, rIK),
+ .out_rrr = tgen_clz,
+ .out_rri = tgen_clzi,
+};
- case INDEX_op_brcond2_i32:
- c = tcg_out_cmp2(s, args, const_args);
- tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
- break;
- case INDEX_op_setcond2_i32:
- c = tcg_out_cmp2(s, args + 1, const_args + 1);
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
- ARITH_MOV, args[0], 0, 0);
- break;
+static const TCGOutOpUnary outop_ctpop = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_qemu_ld_a32_i32:
- tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_ld_a64_i32:
- tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
- args[3], TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_ld_a32_i64:
- tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
- args[3], TCG_TYPE_I64);
- break;
- case INDEX_op_qemu_ld_a64_i64:
- tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
- args[4], TCG_TYPE_I64);
- break;
+static void tgen_ctz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, a1, 0);
+ tgen_clz(s, TCG_TYPE_I32, a0, TCG_REG_TMP, a2);
+}
- case INDEX_op_qemu_st_a32_i32:
- tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_st_a64_i32:
- tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
- args[3], TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_st_a32_i64:
- tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
- args[3], TCG_TYPE_I64);
- break;
- case INDEX_op_qemu_st_a64_i64:
- tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
- args[4], TCG_TYPE_I64);
- break;
+static void tgen_ctzi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, a1, 0);
+ tgen_clzi(s, TCG_TYPE_I32, a0, TCG_REG_TMP, a2);
+}
- case INDEX_op_bswap16_i32:
- tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
- break;
- case INDEX_op_bswap32_i32:
- tcg_out_bswap32(s, COND_AL, args[0], args[1]);
- break;
+static TCGConstraintSetIndex cset_ctz(TCGType type, unsigned flags)
+{
+ return use_armv7_instructions ? C_O1_I2(r, r, rIK) : C_NotImplemented;
+}
- case INDEX_op_deposit_i32:
- tcg_out_deposit(s, COND_AL, args[0], args[2],
- args[3], args[4], const_args[2]);
- break;
- case INDEX_op_extract_i32:
- tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
- break;
- case INDEX_op_sextract_i32:
- tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
- break;
- case INDEX_op_extract2_i32:
- /* ??? These optimization vs zero should be generic. */
- /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
- if (const_args[1]) {
- if (const_args[2]) {
- tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
- } else {
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
- args[2], SHIFT_IMM_LSL(32 - args[3]));
- }
- } else if (const_args[2]) {
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
- args[1], SHIFT_IMM_LSR(args[3]));
- } else {
- /* We can do extract2 in 2 insns, vs the 3 required otherwise. */
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
- args[2], SHIFT_IMM_LSL(32 - args[3]));
- tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
- args[1], SHIFT_IMM_LSR(args[3]));
- }
- break;
+static const TCGOutOpBinary outop_ctz = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_ctz,
+ .out_rrr = tgen_ctz,
+ .out_rri = tgen_ctzi,
+};
- case INDEX_op_div_i32:
- tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
- break;
- case INDEX_op_divu_i32:
- tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
- break;
+static TCGConstraintSetIndex cset_idiv(TCGType type, unsigned flags)
+{
+ return use_idiv_instructions ? C_O1_I2(r, r, r) : C_NotImplemented;
+}
- case INDEX_op_mb:
- tcg_out_mb(s, args[0]);
- break;
+static void tgen_divs(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ /* sdiv */
+ tcg_out32(s, 0x0710f010 | (COND_AL << 28) | (a0 << 16) | a1 | (a2 << 8));
+}
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16u_i32:
- default:
- g_assert_not_reached();
+static const TCGOutOpBinary outop_divs = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_idiv,
+ .out_rrr = tgen_divs,
+};
+
+static const TCGOutOpDivRem outop_divs2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_divu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ /* udiv */
+ tcg_out32(s, 0x0730f010 | (COND_AL << 28) | (a0 << 16) | a1 | (a2 << 8));
+}
+
+static const TCGOutOpBinary outop_divu = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_idiv,
+ .out_rrr = tgen_divu,
+};
+
+static const TCGOutOpDivRem outop_divu2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_eqv = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_mul(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ /* mul */
+ tcg_out32(s, (COND_AL << 28) | 0x90 | (a0 << 16) | (a1 << 8) | a2);
+}
+
+static const TCGOutOpBinary outop_mul = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_mul,
+};
+
+static void tgen_muls2(TCGContext *s, TCGType type,
+ TCGReg rd0, TCGReg rd1, TCGReg rn, TCGReg rm)
+{
+ /* smull */
+ tcg_out32(s, (COND_AL << 28) | 0x00c00090 |
+ (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
+}
+
+static const TCGOutOpMul2 outop_muls2 = {
+ .base.static_constraint = C_O2_I2(r, r, r, r),
+ .out_rrrr = tgen_muls2,
+};
+
+static const TCGOutOpBinary outop_mulsh = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_mulu2(TCGContext *s, TCGType type,
+ TCGReg rd0, TCGReg rd1, TCGReg rn, TCGReg rm)
+{
+ /* umull */
+ tcg_out32(s, (COND_AL << 28) | 0x00800090 |
+ (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
+}
+
+static const TCGOutOpMul2 outop_mulu2 = {
+ .base.static_constraint = C_O2_I2(r, r, r, r),
+ .out_rrrr = tgen_mulu2,
+};
+
+static const TCGOutOpBinary outop_muluh = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_nand = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_nor = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_or(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_ORR, a0, a1, a2, SHIFT_IMM_LSL(0));
+}
+
+static void tgen_ori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_imm(s, COND_AL, ARITH_ORR, a0, a1, encode_imm_nofail(a2));
+}
+
+static const TCGOutOpBinary outop_or = {
+ .base.static_constraint = C_O1_I2(r, r, rI),
+ .out_rrr = tgen_or,
+ .out_rri = tgen_ori,
+};
+
+static const TCGOutOpBinary outop_orc = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_rems = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_remu = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_rotl = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_rotr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_ROR(a2));
+}
+
+static void tgen_rotri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_IMM_ROR(a2 & 0x1f));
+}
+
+static const TCGOutOpBinary outop_rotr = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_rotr,
+ .out_rri = tgen_rotri,
+};
+
+static void tgen_sar(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_ASR(a2));
+}
+
+static void tgen_sari(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1,
+ SHIFT_IMM_ASR(a2 & 0x1f));
+}
+
+static const TCGOutOpBinary outop_sar = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_sar,
+ .out_rri = tgen_sari,
+};
+
+static void tgen_shl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_LSL(a2));
+}
+
+static void tgen_shli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1,
+ SHIFT_IMM_LSL(a2 & 0x1f));
+}
+
+static const TCGOutOpBinary outop_shl = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_shl,
+ .out_rri = tgen_shli,
+};
+
+static void tgen_shr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_LSR(a2));
+}
+
+static void tgen_shri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1,
+ SHIFT_IMM_LSR(a2 & 0x1f));
+}
+
+static const TCGOutOpBinary outop_shr = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_shr,
+ .out_rri = tgen_shri,
+};
+
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_SUB, a0, a1, a2, SHIFT_IMM_LSL(0));
+}
+
+static void tgen_subfi(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
+{
+ tcg_out_dat_imm(s, COND_AL, ARITH_RSB, a0, a2, encode_imm_nofail(a1));
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, rI, r),
+ .out_rrr = tgen_sub,
+ .out_rir = tgen_subfi,
+};
+
+static void tgen_subbo_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_SUB | TO_CPSR,
+ a0, a1, a2, SHIFT_IMM_LSL(0));
+}
+
+static void tgen_subbo_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_IN(s, COND_AL, ARITH_SUB | TO_CPSR, ARITH_ADD | TO_CPSR,
+ a0, a1, a2);
+}
+
+static void tgen_subbo_rir(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
+{
+ tcg_out_dat_imm(s, COND_AL, ARITH_RSB | TO_CPSR,
+ a0, a2, encode_imm_nofail(a1));
+}
+
+static void tgen_subbo_rii(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, tcg_target_long a2)
+{
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, a2);
+ tgen_subbo_rir(s, TCG_TYPE_I32, a0, a1, TCG_REG_TMP);
+}
+
+static const TCGOutOpAddSubCarry outop_subbo = {
+ .base.static_constraint = C_O1_I2(r, rI, rIN),
+ .out_rrr = tgen_subbo_rrr,
+ .out_rri = tgen_subbo_rri,
+ .out_rir = tgen_subbo_rir,
+ .out_rii = tgen_subbo_rii,
+};
+
+static void tgen_subbi_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_SBC,
+ a0, a1, a2, SHIFT_IMM_LSL(0));
+}
+
+static void tgen_subbi_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_IK(s, COND_AL, ARITH_SBC, ARITH_ADC, a0, a1, a2);
+}
+
+static void tgen_subbi_rir(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
+{
+ tcg_out_dat_imm(s, COND_AL, ARITH_RSC, a0, a2, encode_imm_nofail(a1));
+}
+
+static void tgen_subbi_rii(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, tcg_target_long a2)
+{
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, a2);
+ tgen_subbi_rir(s, TCG_TYPE_I32, a0, a1, TCG_REG_TMP);
+}
+
+static const TCGOutOpAddSubCarry outop_subbi = {
+ .base.static_constraint = C_O1_I2(r, rI, rIK),
+ .out_rrr = tgen_subbi_rrr,
+ .out_rri = tgen_subbi_rri,
+ .out_rir = tgen_subbi_rir,
+ .out_rii = tgen_subbi_rii,
+};
+
+static void tgen_subbio_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_SBC | TO_CPSR,
+ a0, a1, a2, SHIFT_IMM_LSL(0));
+}
+
+static void tgen_subbio_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_IK(s, COND_AL, ARITH_SBC | TO_CPSR, ARITH_ADC | TO_CPSR,
+ a0, a1, a2);
+}
+
+static void tgen_subbio_rir(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
+{
+ tcg_out_dat_imm(s, COND_AL, ARITH_RSC | TO_CPSR,
+ a0, a2, encode_imm_nofail(a1));
+}
+
+static void tgen_subbio_rii(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, tcg_target_long a2)
+{
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, a2);
+ tgen_subbio_rir(s, TCG_TYPE_I32, a0, a1, TCG_REG_TMP);
+}
+
+static const TCGOutOpAddSubCarry outop_subbio = {
+ .base.static_constraint = C_O1_I2(r, rI, rIK),
+ .out_rrr = tgen_subbio_rrr,
+ .out_rri = tgen_subbio_rri,
+ .out_rir = tgen_subbio_rir,
+ .out_rii = tgen_subbio_rii,
+};
+
+static void tcg_out_set_borrow(TCGContext *s)
+{
+ tcg_out_movi_apsr_c(s, 0); /* borrow = !carry */
+}
+
+static void tgen_xor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_EOR, a0, a1, a2, SHIFT_IMM_LSL(0));
+}
+
+static void tgen_xori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_imm(s, COND_AL, ARITH_EOR, a0, a1, encode_imm_nofail(a2));
+}
+
+static const TCGOutOpBinary outop_xor = {
+ .base.static_constraint = C_O1_I2(r, r, rI),
+ .out_rrr = tgen_xor,
+ .out_rri = tgen_xori,
+};
+
+static void tgen_bswap16(TCGContext *s, TCGType type,
+ TCGReg rd, TCGReg rn, unsigned flags)
+{
+ if (flags & TCG_BSWAP_OS) {
+ /* revsh */
+ tcg_out32(s, 0x06ff0fb0 | (COND_AL << 28) | (rd << 12) | rn);
+ return;
+ }
+
+ /* rev16 */
+ tcg_out32(s, 0x06bf0fb0 | (COND_AL << 28) | (rd << 12) | rn);
+ if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
+ tcg_out_ext16u(s, rd, rd);
}
}
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
+static const TCGOutOpBswap outop_bswap16 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap16,
+};
+
+static void tgen_bswap32(TCGContext *s, TCGType type,
+ TCGReg rd, TCGReg rn, unsigned flags)
{
- switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld_i32:
- case INDEX_op_neg_i32:
- case INDEX_op_not_i32:
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap32_i32:
- case INDEX_op_ext8s_i32:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16u_i32:
- case INDEX_op_extract_i32:
- case INDEX_op_sextract_i32:
- return C_O1_I1(r, r);
-
- case INDEX_op_st8_i32:
- case INDEX_op_st16_i32:
- case INDEX_op_st_i32:
- return C_O0_I2(r, r);
-
- case INDEX_op_add_i32:
- case INDEX_op_sub_i32:
- case INDEX_op_setcond_i32:
- case INDEX_op_negsetcond_i32:
- return C_O1_I2(r, r, rIN);
-
- case INDEX_op_and_i32:
- case INDEX_op_andc_i32:
- case INDEX_op_clz_i32:
- case INDEX_op_ctz_i32:
- return C_O1_I2(r, r, rIK);
-
- case INDEX_op_mul_i32:
- case INDEX_op_div_i32:
- case INDEX_op_divu_i32:
- return C_O1_I2(r, r, r);
-
- case INDEX_op_mulu2_i32:
- case INDEX_op_muls2_i32:
- return C_O2_I2(r, r, r, r);
-
- case INDEX_op_or_i32:
- case INDEX_op_xor_i32:
- return C_O1_I2(r, r, rI);
-
- case INDEX_op_shl_i32:
- case INDEX_op_shr_i32:
- case INDEX_op_sar_i32:
- case INDEX_op_rotl_i32:
- case INDEX_op_rotr_i32:
- return C_O1_I2(r, r, ri);
-
- case INDEX_op_brcond_i32:
- return C_O0_I2(r, rIN);
- case INDEX_op_deposit_i32:
- return C_O1_I2(r, 0, rZ);
- case INDEX_op_extract2_i32:
- return C_O1_I2(r, rZ, rZ);
- case INDEX_op_movcond_i32:
- return C_O1_I4(r, r, rIN, rIK, 0);
- case INDEX_op_add2_i32:
- return C_O2_I4(r, r, r, r, rIN, rIK);
- case INDEX_op_sub2_i32:
- return C_O2_I4(r, r, rI, rI, rIN, rIK);
- case INDEX_op_brcond2_i32:
- return C_O0_I4(r, r, rI, rI);
- case INDEX_op_setcond2_i32:
- return C_O1_I4(r, r, r, rI, rI);
-
- case INDEX_op_qemu_ld_a32_i32:
- return C_O1_I1(r, q);
- case INDEX_op_qemu_ld_a64_i32:
- return C_O1_I2(r, q, q);
- case INDEX_op_qemu_ld_a32_i64:
- return C_O2_I1(e, p, q);
- case INDEX_op_qemu_ld_a64_i64:
- return C_O2_I2(e, p, q, q);
- case INDEX_op_qemu_st_a32_i32:
- return C_O0_I2(q, q);
- case INDEX_op_qemu_st_a64_i32:
- return C_O0_I3(q, q, q);
- case INDEX_op_qemu_st_a32_i64:
- return C_O0_I3(Q, p, q);
- case INDEX_op_qemu_st_a64_i64:
- return C_O0_I4(Q, p, q, q);
+ /* rev */
+ tcg_out32(s, 0x06bf0f30 | (COND_AL << 28) | (rd << 12) | rn);
+}
+
+static const TCGOutOpBswap outop_bswap32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap32,
+};
+
+static const TCGOutOpUnary outop_bswap64 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_subfi(s, type, a0, 0, a1);
+}
+
+static const TCGOutOpUnary outop_neg = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_neg,
+};
+
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_MVN, a0, 0, a1, SHIFT_IMM_LSL(0));
+}
+
+static const TCGOutOpUnary outop_not = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_not,
+};
+
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg a0, TCGReg a1, TCGLabel *l)
+{
+ cond = tgen_cmp(s, cond, a0, a1);
+ tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l);
+}
+
+static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg a0, tcg_target_long a1, TCGLabel *l)
+{
+ cond = tgen_cmpi(s, cond, a0, a1);
+ tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l);
+}
+
+static const TCGOutOpBrcond outop_brcond = {
+ .base.static_constraint = C_O0_I2(r, rIN),
+ .out_rr = tgen_brcond,
+ .out_ri = tgen_brcondi,
+};
+
+static void finish_setcond(TCGContext *s, TCGCond cond, TCGReg ret, bool neg)
+{
+ tcg_out_movi32(s, tcg_cond_to_arm_cond[tcg_invert_cond(cond)], ret, 0);
+ tcg_out_movi32(s, tcg_cond_to_arm_cond[cond], ret, neg ? -1 : 1);
+}
+
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ cond = tgen_cmp(s, cond, a1, a2);
+ finish_setcond(s, cond, a0, false);
+}
+
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ cond = tgen_cmpi(s, cond, a1, a2);
+ finish_setcond(s, cond, a0, false);
+}
+
+static const TCGOutOpSetcond outop_setcond = {
+ .base.static_constraint = C_O1_I2(r, r, rIN),
+ .out_rrr = tgen_setcond,
+ .out_rri = tgen_setcondi,
+};
+
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ cond = tgen_cmp(s, cond, a1, a2);
+ finish_setcond(s, cond, a0, true);
+}
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ cond = tgen_cmpi(s, cond, a1, a2);
+ finish_setcond(s, cond, a0, true);
+}
+
+static const TCGOutOpSetcond outop_negsetcond = {
+ .base.static_constraint = C_O1_I2(r, r, rIN),
+ .out_rrr = tgen_negsetcond,
+ .out_rri = tgen_negsetcondi,
+};
+
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf)
+{
+ cond = tcg_out_cmp(s, cond, c1, c2, const_c2);
+ tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[cond], ARITH_MOV, ARITH_MVN,
+ ret, 0, vt, const_vt);
+}
+
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rIN, rIK, 0),
+ .out = tgen_movcond,
+};
+
+static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
+ TCGArg bl, bool const_bl, TCGArg bh, bool const_bh,
+ TCGLabel *l)
+{
+ cond = tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh);
+ tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l);
+}
+
+static const TCGOutOpBrcond2 outop_brcond2 = {
+ .base.static_constraint = C_O0_I4(r, r, rI, rI),
+ .out = tgen_brcond2,
+};
+
+static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
+ TCGReg al, TCGReg ah,
+ TCGArg bl, bool const_bl,
+ TCGArg bh, bool const_bh)
+{
+ cond = tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh);
+ finish_setcond(s, cond, ret, false);
+}
+
+static const TCGOutOpSetcond2 outop_setcond2 = {
+ .base.static_constraint = C_O1_I4(r, r, r, rI, rI),
+ .out = tgen_setcond2,
+};
+
+static void tgen_extract2(TCGContext *s, TCGType type, TCGReg a0,
+ TCGReg a1, TCGReg a2, unsigned shr)
+{
+ /* We can do extract2 in 2 insns, vs the 3 required otherwise. */
+ tgen_shli(s, TCG_TYPE_I32, TCG_REG_TMP, a2, 32 - shr);
+ tcg_out_dat_reg(s, COND_AL, ARITH_ORR, a0, TCG_REG_TMP,
+ a1, SHIFT_IMM_LSR(shr));
+}
+
+static const TCGOutOpExtract2 outop_extract2 = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_extract2,
+};
+
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg rd,
+ TCGReg rn, ptrdiff_t offset)
+{
+ if (offset > 0xfff || offset < -0xfff) {
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
+ tcg_out_ld8_r(s, COND_AL, rd, rn, TCG_REG_TMP);
+ } else {
+ tcg_out_ld8_12(s, COND_AL, rd, rn, offset);
+ }
+}
+
+static const TCGOutOpLoad outop_ld8u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8u,
+};
+
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg rd,
+ TCGReg rn, ptrdiff_t offset)
+{
+ if (offset > 0xff || offset < -0xff) {
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
+ tcg_out_ld8s_r(s, COND_AL, rd, rn, TCG_REG_TMP);
+ } else {
+ tcg_out_ld8s_8(s, COND_AL, rd, rn, offset);
+ }
+}
+
+static const TCGOutOpLoad outop_ld8s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8s,
+};
+
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg rd,
+ TCGReg rn, ptrdiff_t offset)
+{
+ if (offset > 0xff || offset < -0xff) {
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
+ tcg_out_ld16u_r(s, COND_AL, rd, rn, TCG_REG_TMP);
+ } else {
+ tcg_out_ld16u_8(s, COND_AL, rd, rn, offset);
+ }
+}
+
+static const TCGOutOpLoad outop_ld16u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16u,
+};
+
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg rd,
+ TCGReg rn, ptrdiff_t offset)
+{
+ if (offset > 0xff || offset < -0xff) {
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
+ tcg_out_ld16s_r(s, COND_AL, rd, rn, TCG_REG_TMP);
+ } else {
+ tcg_out_ld16s_8(s, COND_AL, rd, rn, offset);
+ }
+}
+
+static const TCGOutOpLoad outop_ld16s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16s,
+};
+
+static void tgen_st8(TCGContext *s, TCGType type, TCGReg rd,
+ TCGReg rn, ptrdiff_t offset)
+{
+ if (offset > 0xfff || offset < -0xfff) {
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
+ tcg_out_st8_r(s, COND_AL, rd, rn, TCG_REG_TMP);
+ } else {
+ tcg_out_st8_12(s, COND_AL, rd, rn, offset);
+ }
+}
+
+static const TCGOutOpStore outop_st8 = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out_r = tgen_st8,
+};
+
+static void tgen_st16(TCGContext *s, TCGType type, TCGReg rd,
+ TCGReg rn, ptrdiff_t offset)
+{
+ if (offset > 0xff || offset < -0xff) {
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
+ tcg_out_st16_r(s, COND_AL, rd, rn, TCG_REG_TMP);
+ } else {
+ tcg_out_st16_8(s, COND_AL, rd, rn, offset);
+ }
+}
+
+static const TCGOutOpStore outop_st16 = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out_r = tgen_st16,
+};
+
+static const TCGOutOpStore outop_st = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out_r = tcg_out_st,
+};
+
+static TCGConstraintSetIndex
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
+{
+ switch (op) {
case INDEX_op_st_vec:
return C_O0_I2(w, r);
case INDEX_op_ld_vec:
@@ -2254,7 +2687,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_bitsel_vec:
return C_O1_I3(w, w, w, w);
default:
- g_assert_not_reached();
+ return C_NotImplemented;
}
}
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
index fb72614..4f9f877 100644
--- a/tcg/arm/tcg-target.h
+++ b/tcg/arm/tcg-target.h
@@ -26,10 +26,6 @@
#ifndef ARM_TCG_TARGET_H
#define ARM_TCG_TARGET_H
-extern int arm_arch;
-
-#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
-
#define TCG_TARGET_INSN_UNIT_SIZE 4
#define MAX_CODE_GEN_BUFFER_SIZE UINT32_MAX
@@ -74,86 +70,4 @@ typedef enum {
#define TCG_TARGET_NB_REGS 32
-#ifdef __ARM_ARCH_EXT_IDIV__
-#define use_idiv_instructions 1
-#else
-extern bool use_idiv_instructions;
-#endif
-#ifdef __ARM_NEON__
-#define use_neon_instructions 1
-#else
-extern bool use_neon_instructions;
-#endif
-
-/* used for function call generation */
-#define TCG_TARGET_STACK_ALIGN 8
-#define TCG_TARGET_CALL_STACK_OFFSET 0
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
-#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
-#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
-
-/* optional instructions */
-#define TCG_TARGET_HAS_ext8s_i32 1
-#define TCG_TARGET_HAS_ext16s_i32 1
-#define TCG_TARGET_HAS_ext8u_i32 0 /* and r0, r1, #0xff */
-#define TCG_TARGET_HAS_ext16u_i32 1
-#define TCG_TARGET_HAS_bswap16_i32 1
-#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_not_i32 1
-#define TCG_TARGET_HAS_rot_i32 1
-#define TCG_TARGET_HAS_andc_i32 1
-#define TCG_TARGET_HAS_orc_i32 0
-#define TCG_TARGET_HAS_eqv_i32 0
-#define TCG_TARGET_HAS_nand_i32 0
-#define TCG_TARGET_HAS_nor_i32 0
-#define TCG_TARGET_HAS_clz_i32 1
-#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
-#define TCG_TARGET_HAS_ctpop_i32 0
-#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions
-#define TCG_TARGET_HAS_extract_i32 use_armv7_instructions
-#define TCG_TARGET_HAS_sextract_i32 use_armv7_instructions
-#define TCG_TARGET_HAS_extract2_i32 1
-#define TCG_TARGET_HAS_negsetcond_i32 1
-#define TCG_TARGET_HAS_mulu2_i32 1
-#define TCG_TARGET_HAS_muls2_i32 1
-#define TCG_TARGET_HAS_muluh_i32 0
-#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
-#define TCG_TARGET_HAS_rem_i32 0
-#define TCG_TARGET_HAS_qemu_st8_i32 0
-
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
-
-#define TCG_TARGET_HAS_tst 1
-
-#define TCG_TARGET_HAS_v64 use_neon_instructions
-#define TCG_TARGET_HAS_v128 use_neon_instructions
-#define TCG_TARGET_HAS_v256 0
-
-#define TCG_TARGET_HAS_andc_vec 1
-#define TCG_TARGET_HAS_orc_vec 1
-#define TCG_TARGET_HAS_nand_vec 0
-#define TCG_TARGET_HAS_nor_vec 0
-#define TCG_TARGET_HAS_eqv_vec 0
-#define TCG_TARGET_HAS_not_vec 1
-#define TCG_TARGET_HAS_neg_vec 1
-#define TCG_TARGET_HAS_abs_vec 1
-#define TCG_TARGET_HAS_roti_vec 0
-#define TCG_TARGET_HAS_rots_vec 0
-#define TCG_TARGET_HAS_rotv_vec 0
-#define TCG_TARGET_HAS_shi_vec 1
-#define TCG_TARGET_HAS_shs_vec 0
-#define TCG_TARGET_HAS_shv_vec 0
-#define TCG_TARGET_HAS_mul_vec 1
-#define TCG_TARGET_HAS_sat_vec 1
-#define TCG_TARGET_HAS_minmax_vec 1
-#define TCG_TARGET_HAS_bitsel_vec 1
-#define TCG_TARGET_HAS_cmpsel_vec 0
-#define TCG_TARGET_HAS_tst_vec 1
-
-#define TCG_TARGET_DEFAULT_MO (0)
-#define TCG_TARGET_NEED_LDST_LABELS
-#define TCG_TARGET_NEED_POOL_LABELS
-
#endif
diff --git a/tcg/arm/tcg-target.opc.h b/tcg/arm/tcg-target.opc.h
deleted file mode 100644
index d38af9a..0000000
--- a/tcg/arm/tcg-target.opc.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (c) 2019 Linaro
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or
- * (at your option) any later version.
- *
- * See the COPYING file in the top-level directory for details.
- *
- * Target-specific opcodes for host vector expansion. These will be
- * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
- * consider these to be UNSPEC with names.
- */
-
-DEF(arm_sli_vec, 1, 2, 1, IMPLVEC)
-DEF(arm_sshl_vec, 1, 2, 0, IMPLVEC)
-DEF(arm_ushl_vec, 1, 2, 0, IMPLVEC)
diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h
index e24241c..458d69c 100644
--- a/tcg/i386/tcg-target-con-set.h
+++ b/tcg/i386/tcg-target-con-set.h
@@ -42,18 +42,19 @@ C_O1_I2(r, 0, reZ)
C_O1_I2(r, 0, ri)
C_O1_I2(r, 0, rI)
C_O1_I2(r, L, L)
+C_O1_I2(r, r, r)
C_O1_I2(r, r, re)
C_O1_I2(r, r, ri)
-C_O1_I2(r, r, rI)
+C_O1_I2(r, rO, re)
C_O1_I2(x, x, x)
C_N1_I2(r, r, r)
C_N1_I2(r, r, rW)
C_O1_I3(x, 0, x, x)
C_O1_I3(x, x, x, x)
+C_O1_I4(x, x, x, xO, x)
C_O1_I4(r, r, reT, r, 0)
C_O1_I4(r, r, r, ri, ri)
C_O2_I1(r, r, L)
C_O2_I2(a, d, a, r)
C_O2_I2(r, r, L, L)
C_O2_I3(a, d, 0, 1, r)
-C_N1_O1_I4(r, r, 0, 1, re, re)
diff --git a/tcg/i386/tcg-target-con-str.h b/tcg/i386/tcg-target-con-str.h
index cc22db2..dbedff1 100644
--- a/tcg/i386/tcg-target-con-str.h
+++ b/tcg/i386/tcg-target-con-str.h
@@ -20,7 +20,7 @@ REGS('r', ALL_GENERAL_REGS)
REGS('x', ALL_VECTOR_REGS)
REGS('q', ALL_BYTEL_REGS) /* regs that can be used as a byte operand */
REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_ld/st */
-REGS('s', ALL_BYTEL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_st8_i32 data */
+REGS('s', ALL_BYTEL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_st MO_8 data */
/*
* Define constraint letters for constants:
@@ -28,6 +28,7 @@ REGS('s', ALL_BYTEL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_st8_i32 data */
*/
CONST('e', TCG_CT_CONST_S32)
CONST('I', TCG_CT_CONST_I32)
+CONST('O', TCG_CT_CONST_ZERO)
CONST('T', TCG_CT_CONST_TST)
CONST('W', TCG_CT_CONST_WSZ)
CONST('Z', TCG_CT_CONST_U32)
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
new file mode 100644
index 0000000..42647fa
--- /dev/null
+++ b/tcg/i386/tcg-target-has.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific opcode support
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef TCG_TARGET_HAS_H
+#define TCG_TARGET_HAS_H
+
+#include "host/cpuinfo.h"
+
+#define have_bmi1 (cpuinfo & CPUINFO_BMI1)
+#define have_popcnt (cpuinfo & CPUINFO_POPCNT)
+#define have_avx1 (cpuinfo & CPUINFO_AVX1)
+#define have_avx2 (cpuinfo & CPUINFO_AVX2)
+#define have_movbe (cpuinfo & CPUINFO_MOVBE)
+
+/*
+ * There are interesting instructions in AVX512, so long as we have AVX512VL,
+ * which indicates support for EVEX on sizes smaller than 512 bits.
+ */
+#define have_avx512vl ((cpuinfo & CPUINFO_AVX512VL) && \
+ (cpuinfo & CPUINFO_AVX512F))
+#define have_avx512bw ((cpuinfo & CPUINFO_AVX512BW) && have_avx512vl)
+#define have_avx512dq ((cpuinfo & CPUINFO_AVX512DQ) && have_avx512vl)
+#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl)
+
+/* optional instructions */
+#if TCG_TARGET_REG_BITS == 64
+/* Keep 32-bit values zero-extended in a register. */
+#define TCG_TARGET_HAS_extr_i64_i32 1
+#endif
+
+#define TCG_TARGET_HAS_qemu_ldst_i128 \
+ (TCG_TARGET_REG_BITS == 64 && (cpuinfo & CPUINFO_ATOMIC_VMOVDQA))
+
+#define TCG_TARGET_HAS_tst 1
+
+/* We do not support older SSE systems, only beginning with AVX1. */
+#define TCG_TARGET_HAS_v64 have_avx1
+#define TCG_TARGET_HAS_v128 have_avx1
+#define TCG_TARGET_HAS_v256 have_avx2
+
+#define TCG_TARGET_HAS_andc_vec 1
+#define TCG_TARGET_HAS_orc_vec have_avx512vl
+#define TCG_TARGET_HAS_nand_vec have_avx512vl
+#define TCG_TARGET_HAS_nor_vec have_avx512vl
+#define TCG_TARGET_HAS_eqv_vec have_avx512vl
+#define TCG_TARGET_HAS_not_vec have_avx512vl
+#define TCG_TARGET_HAS_neg_vec 0
+#define TCG_TARGET_HAS_abs_vec 1
+#define TCG_TARGET_HAS_roti_vec have_avx512vl
+#define TCG_TARGET_HAS_rots_vec 0
+#define TCG_TARGET_HAS_rotv_vec have_avx512vl
+#define TCG_TARGET_HAS_shi_vec 1
+#define TCG_TARGET_HAS_shs_vec 1
+#define TCG_TARGET_HAS_shv_vec have_avx2
+#define TCG_TARGET_HAS_mul_vec 1
+#define TCG_TARGET_HAS_sat_vec 1
+#define TCG_TARGET_HAS_minmax_vec 1
+#define TCG_TARGET_HAS_bitsel_vec have_avx512vl
+#define TCG_TARGET_HAS_cmpsel_vec 1
+#define TCG_TARGET_HAS_tst_vec have_avx512bw
+
+#define TCG_TARGET_deposit_valid(type, ofs, len) \
+ (((ofs) == 0 && ((len) == 8 || (len) == 16)) || \
+ (TCG_TARGET_REG_BITS == 32 && (ofs) == 8 && (len) == 8))
+
+/*
+ * Check for the possibility of low byte/word extraction, high-byte extraction
+ * and zero-extending 32-bit right-shift.
+ *
+ * We cannot sign-extend from high byte to 64-bits without using the
+ * REX prefix that explicitly excludes access to the high-byte registers.
+ */
+static inline bool
+tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
+{
+ switch (ofs) {
+ case 0:
+ switch (len) {
+ case 8:
+ case 16:
+ return true;
+ case 32:
+ return type == TCG_TYPE_I64;
+ }
+ return false;
+ case 8:
+ return len == 8 && type == TCG_TYPE_I32;
+ }
+ return false;
+}
+#define TCG_TARGET_sextract_valid tcg_target_sextract_valid
+
+static inline bool
+tcg_target_extract_valid(TCGType type, unsigned ofs, unsigned len)
+{
+ if (type == TCG_TYPE_I64 && ofs + len == 32) {
+ return true;
+ }
+ switch (ofs) {
+ case 0:
+ return len == 8 || len == 16;
+ case 8:
+ return len == 8;
+ }
+ return false;
+}
+#define TCG_TARGET_extract_valid tcg_target_extract_valid
+
+#endif
diff --git a/tcg/i386/tcg-target-mo.h b/tcg/i386/tcg-target-mo.h
new file mode 100644
index 0000000..7567dc7
--- /dev/null
+++ b/tcg/i386/tcg-target-mo.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific memory model
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef TCG_TARGET_MO_H
+#define TCG_TARGET_MO_H
+
+/*
+ * This defines the natural memory order supported by this architecture
+ * before guarantees made by various barrier instructions.
+ *
+ * The x86 has a pretty strong memory ordering which only really
+ * allows for some stores to be re-ordered after loads.
+ */
+#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
+
+#endif
diff --git a/tcg/i386/tcg-target-opc.h.inc b/tcg/i386/tcg-target-opc.h.inc
new file mode 100644
index 0000000..8cc0dba
--- /dev/null
+++ b/tcg/i386/tcg-target-opc.h.inc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2019 Linaro
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Target-specific opcodes for host vector expansion. These will be
+ * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
+ * consider these to be UNSPEC with names.
+ */
+
+DEF(x86_shufps_vec, 1, 2, 1, TCG_OPF_VECTOR)
+DEF(x86_blend_vec, 1, 2, 1, TCG_OPF_VECTOR)
+DEF(x86_packss_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(x86_packus_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(x86_psrldq_vec, 1, 1, 1, TCG_OPF_VECTOR)
+DEF(x86_vperm2i128_vec, 1, 2, 1, TCG_OPF_VECTOR)
+DEF(x86_punpckl_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(x86_punpckh_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(x86_vpshldi_vec, 1, 2, 1, TCG_OPF_VECTOR)
+DEF(x86_vpshldv_vec, 1, 3, 0, TCG_OPF_VECTOR)
+DEF(x86_vpshrdv_vec, 1, 3, 0, TCG_OPF_VECTOR)
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 9a54ef7..088c6c9 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -22,8 +22,25 @@
* THE SOFTWARE.
*/
-#include "../tcg-ldst.c.inc"
-#include "../tcg-pool.c.inc"
+/* Used for function call generation. */
+#define TCG_TARGET_STACK_ALIGN 16
+#if defined(_WIN64)
+#define TCG_TARGET_CALL_STACK_OFFSET 32
+#else
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+#endif
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
+#if defined(_WIN64)
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF
+# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_VEC
+#elif TCG_TARGET_REG_BITS == 64
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
+# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
+#else
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
+# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
+#endif
#ifdef CONFIG_DEBUG_TCG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
@@ -133,6 +150,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define TCG_CT_CONST_I32 0x400
#define TCG_CT_CONST_WSZ 0x800
#define TCG_CT_CONST_TST 0x1000
+#define TCG_CT_CONST_ZERO 0x2000
/* Registers used with L constraint, which are the first argument
registers on x86_64, and two random call clobbered registers on
@@ -226,6 +244,9 @@ static bool tcg_target_const_match(int64_t val, int ct,
if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
return 1;
}
+ if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
+ return 1;
+ }
return 0;
}
@@ -403,12 +424,25 @@ static bool tcg_target_const_match(int64_t val, int ct,
#define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
#define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
#define OPC_SHRD_Ib (0xac | P_EXT)
+#define OPC_STC (0xf9)
#define OPC_TESTB (0x84)
#define OPC_TESTL (0x85)
#define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3)
#define OPC_UD2 (0x0b | P_EXT)
#define OPC_VPBLENDD (0x02 | P_EXT3A | P_DATA16)
#define OPC_VPBLENDVB (0x4c | P_EXT3A | P_DATA16)
+#define OPC_VPBLENDMB (0x66 | P_EXT38 | P_DATA16 | P_EVEX)
+#define OPC_VPBLENDMW (0x66 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
+#define OPC_VPBLENDMD (0x64 | P_EXT38 | P_DATA16 | P_EVEX)
+#define OPC_VPBLENDMQ (0x64 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
+#define OPC_VPCMPB (0x3f | P_EXT3A | P_DATA16 | P_EVEX)
+#define OPC_VPCMPUB (0x3e | P_EXT3A | P_DATA16 | P_EVEX)
+#define OPC_VPCMPW (0x3f | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
+#define OPC_VPCMPUW (0x3e | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
+#define OPC_VPCMPD (0x1f | P_EXT3A | P_DATA16 | P_EVEX)
+#define OPC_VPCMPUD (0x1e | P_EXT3A | P_DATA16 | P_EVEX)
+#define OPC_VPCMPQ (0x1f | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
+#define OPC_VPCMPUQ (0x1e | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
#define OPC_VPINSRB (0x20 | P_EXT3A | P_DATA16)
#define OPC_VPINSRW (0xc4 | P_EXT | P_DATA16)
#define OPC_VBROADCASTSS (0x18 | P_EXT38 | P_DATA16)
@@ -417,6 +451,10 @@ static bool tcg_target_const_match(int64_t val, int ct,
#define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16)
#define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16)
#define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
+#define OPC_VPMOVM2B (0x28 | P_EXT38 | P_SIMDF3 | P_EVEX)
+#define OPC_VPMOVM2W (0x28 | P_EXT38 | P_SIMDF3 | P_VEXW | P_EVEX)
+#define OPC_VPMOVM2D (0x38 | P_EXT38 | P_SIMDF3 | P_EVEX)
+#define OPC_VPMOVM2Q (0x38 | P_EXT38 | P_SIMDF3 | P_VEXW | P_EVEX)
#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_VEXW)
#define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
#define OPC_VPROLVD (0x15 | P_EXT38 | P_DATA16 | P_EVEX)
@@ -442,6 +480,14 @@ static bool tcg_target_const_match(int64_t val, int ct,
#define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16)
#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_VEXW)
#define OPC_VPTERNLOGQ (0x25 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
+#define OPC_VPTESTMB (0x26 | P_EXT38 | P_DATA16 | P_EVEX)
+#define OPC_VPTESTMW (0x26 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
+#define OPC_VPTESTMD (0x27 | P_EXT38 | P_DATA16 | P_EVEX)
+#define OPC_VPTESTMQ (0x27 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
+#define OPC_VPTESTNMB (0x26 | P_EXT38 | P_SIMDF3 | P_EVEX)
+#define OPC_VPTESTNMW (0x26 | P_EXT38 | P_SIMDF3 | P_VEXW | P_EVEX)
+#define OPC_VPTESTNMD (0x27 | P_EXT38 | P_SIMDF3 | P_EVEX)
+#define OPC_VPTESTNMQ (0x27 | P_EXT38 | P_SIMDF3 | P_VEXW | P_EVEX)
#define OPC_VZEROUPPER (0x77 | P_EXT)
#define OPC_XCHG_ax_r32 (0x90)
#define OPC_XCHG_EvGv (0x87)
@@ -658,7 +704,7 @@ static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v,
}
static void tcg_out_evex_opc(TCGContext *s, int opc, int r, int v,
- int rm, int index)
+ int rm, int index, int aaa, bool z)
{
/* The entire 4-byte evex prefix; with R' and V' set. */
uint32_t p = 0x08041062;
@@ -695,7 +741,9 @@ static void tcg_out_evex_opc(TCGContext *s, int opc, int r, int v,
p = deposit32(p, 16, 2, pp);
p = deposit32(p, 19, 4, ~v);
p = deposit32(p, 23, 1, (opc & P_VEXW) != 0);
+ p = deposit32(p, 24, 3, aaa);
p = deposit32(p, 29, 2, (opc & P_VEXL) != 0);
+ p = deposit32(p, 31, 1, z);
tcg_out32(s, p);
tcg_out8(s, opc);
@@ -704,13 +752,32 @@ static void tcg_out_evex_opc(TCGContext *s, int opc, int r, int v,
static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm)
{
if (opc & P_EVEX) {
- tcg_out_evex_opc(s, opc, r, v, rm, 0);
+ tcg_out_evex_opc(s, opc, r, v, rm, 0, 0, false);
} else {
tcg_out_vex_opc(s, opc, r, v, rm, 0);
}
tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
}
+static void tcg_out_vex_modrm_type(TCGContext *s, int opc,
+ int r, int v, int rm, TCGType type)
+{
+ if (type == TCG_TYPE_V256) {
+ opc |= P_VEXL;
+ }
+ tcg_out_vex_modrm(s, opc, r, v, rm);
+}
+
+static void tcg_out_evex_modrm_type(TCGContext *s, int opc, int r, int v,
+ int rm, int aaa, bool z, TCGType type)
+{
+ if (type == TCG_TYPE_V256) {
+ opc |= P_VEXL;
+ }
+ tcg_out_evex_opc(s, opc, r, v, rm, 0, aaa, z);
+ tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
+}
+
/* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
We handle either RM and INDEX missing with a negative value. In 64-bit
mode for absolute addresses, ~RM is the size of the immediate operand
@@ -904,8 +971,7 @@ static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg r, TCGReg a)
{
if (have_avx2) {
- int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
- tcg_out_vex_modrm(s, avx2_dup_insn[vece] + vex_l, r, 0, a);
+ tcg_out_vex_modrm_type(s, avx2_dup_insn[vece], r, 0, a, type);
} else {
switch (vece) {
case MO_8:
@@ -1027,7 +1093,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type,
{
tcg_target_long diff;
- if (arg == 0) {
+ if (arg == 0 && !s->carry_live) {
tgen_arithr(s, ARITH_XOR, ret, ret);
return;
}
@@ -1102,7 +1168,7 @@ static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
}
}
-static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
/* Given the strength of x86 memory ordering, we only need care for
store-load ordering. Experimentally, "lock orl $0,0(%esp)" is
@@ -1264,16 +1330,31 @@ static inline void tcg_out_rolw_8(TCGContext *s, int reg)
static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src)
{
- /* movzbl */
- tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
+ if (TCG_TARGET_REG_BITS == 32 && src >= 4) {
+ tcg_out_mov(s, TCG_TYPE_I32, dest, src);
+ if (dest >= 4) {
+ tcg_out_modrm(s, OPC_ARITH_EvIz, ARITH_AND, dest);
+ tcg_out32(s, 0xff);
+ return;
+ }
+ src = dest;
+ }
tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
}
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
{
int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
- /* movsbl */
- tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
+
+ if (TCG_TARGET_REG_BITS == 32 && src >= 4) {
+ tcg_out_mov(s, TCG_TYPE_I32, dest, src);
+ if (dest >= 4) {
+ tcg_out_shifti(s, SHIFT_SHL, dest, 24);
+ tcg_out_shifti(s, SHIFT_SAR, dest, 24);
+ return;
+ }
+ src = dest;
+ }
tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
}
@@ -1465,6 +1546,11 @@ static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, bool small)
}
}
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
+{
+ tcg_out_jxx(s, JCC_JMP, l, 0);
+}
+
static int tcg_out_cmp(TCGContext *s, TCGCond cond, TCGArg arg1,
TCGArg arg2, int const_arg2, int rexw)
{
@@ -1562,96 +1648,78 @@ static void tcg_out_brcond(TCGContext *s, int rexw, TCGCond cond,
tcg_out_jxx(s, jcc, label, small);
}
-#if TCG_TARGET_REG_BITS == 32
-static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
- const int *const_args, bool small)
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg arg1, TCGReg arg2, TCGLabel *label)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_brcond(s, rexw, cond, arg1, arg2, false, label, false);
+}
+
+static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg arg1, tcg_target_long arg2, TCGLabel *label)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_brcond(s, rexw, cond, arg1, arg2, true, label, false);
+}
+
+static const TCGOutOpBrcond outop_brcond = {
+ .base.static_constraint = C_O0_I2(r, reT),
+ .out_rr = tgen_brcond,
+ .out_ri = tgen_brcondi,
+};
+
+static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al,
+ TCGReg ah, TCGArg bl, bool blconst,
+ TCGArg bh, bool bhconst,
+ TCGLabel *label_this, bool small)
{
TCGLabel *label_next = gen_new_label();
- TCGLabel *label_this = arg_label(args[5]);
- TCGCond cond = args[4];
switch (cond) {
case TCG_COND_EQ:
case TCG_COND_TSTEQ:
tcg_out_brcond(s, 0, tcg_invert_cond(cond),
- args[0], args[2], const_args[2], label_next, 1);
- tcg_out_brcond(s, 0, cond, args[1], args[3], const_args[3],
- label_this, small);
+ al, bl, blconst, label_next, true);
+ tcg_out_brcond(s, 0, cond, ah, bh, bhconst, label_this, small);
break;
+
case TCG_COND_NE:
case TCG_COND_TSTNE:
- tcg_out_brcond(s, 0, cond, args[0], args[2], const_args[2],
- label_this, small);
- tcg_out_brcond(s, 0, cond, args[1], args[3], const_args[3],
- label_this, small);
- break;
- case TCG_COND_LT:
- tcg_out_brcond(s, 0, TCG_COND_LT, args[1], args[3], const_args[3],
- label_this, small);
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, 0, TCG_COND_LTU, args[0], args[2], const_args[2],
- label_this, small);
- break;
- case TCG_COND_LE:
- tcg_out_brcond(s, 0, TCG_COND_LT, args[1], args[3], const_args[3],
- label_this, small);
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, 0, TCG_COND_LEU, args[0], args[2], const_args[2],
- label_this, small);
- break;
- case TCG_COND_GT:
- tcg_out_brcond(s, 0, TCG_COND_GT, args[1], args[3], const_args[3],
- label_this, small);
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, 0, TCG_COND_GTU, args[0], args[2], const_args[2],
- label_this, small);
- break;
- case TCG_COND_GE:
- tcg_out_brcond(s, 0, TCG_COND_GT, args[1], args[3], const_args[3],
- label_this, small);
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, 0, TCG_COND_GEU, args[0], args[2], const_args[2],
- label_this, small);
- break;
- case TCG_COND_LTU:
- tcg_out_brcond(s, 0, TCG_COND_LTU, args[1], args[3], const_args[3],
- label_this, small);
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, 0, TCG_COND_LTU, args[0], args[2], const_args[2],
- label_this, small);
+ tcg_out_brcond(s, 0, cond, al, bl, blconst, label_this, small);
+ tcg_out_brcond(s, 0, cond, ah, bh, bhconst, label_this, small);
break;
- case TCG_COND_LEU:
- tcg_out_brcond(s, 0, TCG_COND_LTU, args[1], args[3], const_args[3],
- label_this, small);
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, 0, TCG_COND_LEU, args[0], args[2], const_args[2],
- label_this, small);
- break;
- case TCG_COND_GTU:
- tcg_out_brcond(s, 0, TCG_COND_GTU, args[1], args[3], const_args[3],
- label_this, small);
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, 0, TCG_COND_GTU, args[0], args[2], const_args[2],
- label_this, small);
- break;
- case TCG_COND_GEU:
- tcg_out_brcond(s, 0, TCG_COND_GTU, args[1], args[3], const_args[3],
- label_this, small);
+
+ default:
+ tcg_out_brcond(s, 0, tcg_high_cond(cond),
+ ah, bh, bhconst, label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, 0, TCG_COND_GEU, args[0], args[2], const_args[2],
- label_this, small);
+ tcg_out_brcond(s, 0, tcg_unsigned_cond(cond),
+ al, bl, blconst, label_this, small);
break;
- default:
- g_assert_not_reached();
}
tcg_out_label(s, label_next);
}
+
+static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al,
+ TCGReg ah, TCGArg bl, bool blconst,
+ TCGArg bh, bool bhconst, TCGLabel *l)
+{
+ tcg_out_brcond2(s, cond, al, ah, bl, blconst, bh, bhconst, l, false);
+}
+
+#if TCG_TARGET_REG_BITS != 32
+__attribute__((unused))
#endif
+static const TCGOutOpBrcond2 outop_brcond2 = {
+ .base.static_constraint = C_O0_I4(r, r, ri, ri),
+ .out = tgen_brcond2,
+};
-static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
- TCGArg dest, TCGArg arg1, TCGArg arg2,
- int const_arg2, bool neg)
+static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGArg arg2,
+ bool const_arg2, bool neg)
{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
int cmp_rexw = rexw;
bool inv = false;
bool cleared;
@@ -1726,7 +1794,7 @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
case TCG_COND_LT:
/* If arg2 is 0, extract the sign bit. */
if (const_arg2 && arg2 == 0) {
- tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, dest, arg1);
+ tcg_out_mov(s, type, dest, arg1);
if (inv) {
tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, dest);
}
@@ -1762,49 +1830,89 @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
}
}
-#if TCG_TARGET_REG_BITS == 32
-static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
- const int *const_args)
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, false);
+}
+
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, false);
+}
+
+static const TCGOutOpSetcond outop_setcond = {
+ .base.static_constraint = C_O1_I2(q, r, reT),
+ .out_rrr = tgen_setcond,
+ .out_rri = tgen_setcondi,
+};
+
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, true);
+}
+
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
{
- TCGArg new_args[6];
- TCGLabel *label_true, *label_over;
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, true);
+}
- memcpy(new_args, args+1, 5*sizeof(TCGArg));
+static const TCGOutOpSetcond outop_negsetcond = {
+ .base.static_constraint = C_O1_I2(q, r, reT),
+ .out_rrr = tgen_negsetcond,
+ .out_rri = tgen_negsetcondi,
+};
- if (args[0] == args[1] || args[0] == args[2]
- || (!const_args[3] && args[0] == args[3])
- || (!const_args[4] && args[0] == args[4])) {
- /* When the destination overlaps with one of the argument
- registers, don't do anything tricky. */
- label_true = gen_new_label();
- label_over = gen_new_label();
+static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
+ TCGReg al, TCGReg ah,
+ TCGArg bl, bool const_bl,
+ TCGArg bh, bool const_bh)
+{
+ TCGLabel *label_over = gen_new_label();
+
+ if (ret == al || ret == ah
+ || (!const_bl && ret == bl)
+ || (!const_bh && ret == bh)) {
+ /*
+ * When the destination overlaps with one of the argument
+ * registers, don't do anything tricky.
+ */
+ TCGLabel *label_true = gen_new_label();
- new_args[5] = label_arg(label_true);
- tcg_out_brcond2(s, new_args, const_args+1, 1);
+ tcg_out_brcond2(s, cond, al, ah, bl, const_bl,
+ bh, const_bh, label_true, true);
- tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
+ tcg_out_movi(s, TCG_TYPE_I32, ret, 0);
tcg_out_jxx(s, JCC_JMP, label_over, 1);
tcg_out_label(s, label_true);
- tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
- tcg_out_label(s, label_over);
+ tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
} else {
- /* When the destination does not overlap one of the arguments,
- clear the destination first, jump if cond false, and emit an
- increment in the true case. This results in smaller code. */
-
- tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
+ /*
+ * When the destination does not overlap one of the arguments,
+ * clear the destination first, jump if cond false, and emit an
+ * increment in the true case. This results in smaller code.
+ */
+ tcg_out_movi(s, TCG_TYPE_I32, ret, 0);
- label_over = gen_new_label();
- new_args[4] = tcg_invert_cond(new_args[4]);
- new_args[5] = label_arg(label_over);
- tcg_out_brcond2(s, new_args, const_args+1, 1);
+ tcg_out_brcond2(s, tcg_invert_cond(cond), al, ah, bl, const_bl,
+ bh, const_bh, label_over, true);
- tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
- tcg_out_label(s, label_over);
+ tgen_arithi(s, ARITH_ADD, ret, 1, 0);
}
+ tcg_out_label(s, label_over);
}
+
+#if TCG_TARGET_REG_BITS != 32
+__attribute__((unused))
#endif
+static const TCGOutOpSetcond2 outop_setcond2 = {
+ .base.static_constraint = C_O1_I4(r, r, r, ri, ri),
+ .out = tgen_setcond2,
+};
static void tcg_out_cmov(TCGContext *s, int jcc, int rexw,
TCGReg dest, TCGReg v1)
@@ -1812,57 +1920,20 @@ static void tcg_out_cmov(TCGContext *s, int jcc, int rexw,
tcg_out_modrm(s, OPC_CMOVCC | jcc | rexw, dest, v1);
}
-static void tcg_out_movcond(TCGContext *s, int rexw, TCGCond cond,
- TCGReg dest, TCGReg c1, TCGArg c2, int const_c2,
- TCGReg v1)
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg vt, bool const_vt,
+ TCGArg vf, bool consf_vf)
{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
int jcc = tcg_out_cmp(s, cond, c1, c2, const_c2, rexw);
- tcg_out_cmov(s, jcc, rexw, dest, v1);
-}
-
-static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
- TCGArg arg2, bool const_a2)
-{
- if (have_bmi1) {
- tcg_out_modrm(s, OPC_TZCNT + rexw, dest, arg1);
- if (const_a2) {
- tcg_debug_assert(arg2 == (rexw ? 64 : 32));
- } else {
- tcg_debug_assert(dest != arg2);
- tcg_out_cmov(s, JCC_JB, rexw, dest, arg2);
- }
- } else {
- tcg_debug_assert(dest != arg2);
- tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1);
- tcg_out_cmov(s, JCC_JE, rexw, dest, arg2);
- }
+ tcg_out_cmov(s, jcc, rexw, dest, vt);
}
-static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
- TCGArg arg2, bool const_a2)
-{
- if (have_lzcnt) {
- tcg_out_modrm(s, OPC_LZCNT + rexw, dest, arg1);
- if (const_a2) {
- tcg_debug_assert(arg2 == (rexw ? 64 : 32));
- } else {
- tcg_debug_assert(dest != arg2);
- tcg_out_cmov(s, JCC_JB, rexw, dest, arg2);
- }
- } else {
- tcg_debug_assert(!const_a2);
- tcg_debug_assert(dest != arg1);
- tcg_debug_assert(dest != arg2);
-
- /* Recall that the output of BSR is the index not the count. */
- tcg_out_modrm(s, OPC_BSR + rexw, dest, arg1);
- tgen_arithi(s, ARITH_XOR + rexw, dest, rexw ? 63 : 31, 0);
-
- /* Since we have destroyed the flags from BSR, we have to re-test. */
- int jcc = tcg_out_cmp(s, TCG_COND_EQ, arg1, 0, 1, rexw);
- tcg_out_cmov(s, jcc, rexw, dest, arg2);
- }
-}
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, reT, r, 0),
+ .out = tgen_movcond,
+};
static void tcg_out_branch(TCGContext *s, int call, const tcg_insn_unit *dest)
{
@@ -2089,8 +2160,7 @@ static inline int setup_guest_base_seg(void)
* is required and fill in @h with the host address for the fast path.
*/
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
- TCGReg addrlo, TCGReg addrhi,
- MemOpIdx oi, bool is_ld)
+ TCGReg addr, MemOpIdx oi, bool is_ld)
{
TCGLabelQemuLdst *ldst = NULL;
MemOp opc = get_memop(oi);
@@ -2104,7 +2174,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
} else {
*h = x86_guest_base;
}
- h->base = addrlo;
+ h->base = addr;
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
a_mask = (1 << h->aa.align) - 1;
@@ -2122,24 +2192,21 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addrlo;
- ldst->addrhi_reg = addrhi;
+ ldst->addr_reg = addr;
if (TCG_TARGET_REG_BITS == 64) {
ttype = s->addr_type;
trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW);
if (TCG_TYPE_PTR == TCG_TYPE_I64) {
hrexw = P_REXW;
- if (s->page_bits + s->tlb_dyn_max_bits > 32) {
- tlbtype = TCG_TYPE_I64;
- tlbrexw = P_REXW;
- }
+ tlbtype = TCG_TYPE_I64;
+ tlbrexw = P_REXW;
}
}
- tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
+ tcg_out_mov(s, tlbtype, TCG_REG_L0, addr);
tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
- s->page_bits - CPU_TLB_ENTRY_BITS);
+ TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
fast_ofs + offsetof(CPUTLBDescFast, mask));
@@ -2153,12 +2220,12 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
* check that we don't cross pages for the complete access.
*/
if (a_mask >= s_mask) {
- tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
+ tcg_out_mov(s, ttype, TCG_REG_L1, addr);
} else {
tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
- addrlo, s_mask - a_mask);
+ addr, s_mask - a_mask);
}
- tlb_mask = s->page_mask | a_mask;
+ tlb_mask = TARGET_PAGE_MASK | a_mask;
tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
/* cmp 0(TCG_REG_L0), TCG_REG_L1 */
@@ -2170,17 +2237,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst->label_ptr[0] = s->code_ptr;
s->code_ptr += 4;
- if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) {
- /* cmp 4(TCG_REG_L0), addrhi */
- tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi,
- TCG_REG_L0, cmp_ofs + 4);
-
- /* jne slow_path */
- tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
- ldst->label_ptr[1] = s->code_ptr;
- s->code_ptr += 4;
- }
-
/* TLB Hit. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
offsetof(CPUTLBEntry, addend));
@@ -2190,11 +2246,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addrlo;
- ldst->addrhi_reg = addrhi;
+ ldst->addr_reg = addr;
/* jne slow_path */
- jcc = tcg_out_cmp(s, TCG_COND_TSTNE, addrlo, a_mask, true, false);
+ jcc = tcg_out_cmp(s, TCG_COND_TSTNE, addr, a_mask, true, false);
tcg_out_opc(s, OPC_JCC_long + jcc, 0, 0, 0);
ldst->label_ptr[0] = s->code_ptr;
s->code_ptr += 4;
@@ -2365,24 +2420,50 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
}
}
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
- TCGReg addrlo, TCGReg addrhi,
- MemOpIdx oi, TCGType data_type)
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg addr, MemOpIdx oi)
+{
+ TCGLabelQemuLdst *ldst;
+ HostAddress h;
+
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
+ tcg_out_qemu_ld_direct(s, data, -1, h, type, get_memop(oi));
+
+ if (ldst) {
+ ldst->type = type;
+ ldst->datalo_reg = data;
+ ldst->datahi_reg = -1;
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
+ }
+}
+
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
+ .base.static_constraint = C_O1_I1(r, L),
+ .out = tgen_qemu_ld,
+};
+
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
{
TCGLabelQemuLdst *ldst;
HostAddress h;
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
- tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, get_memop(oi));
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
+ tcg_out_qemu_ld_direct(s, datalo, datahi, h, type, get_memop(oi));
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
ldst->datalo_reg = datalo;
ldst->datahi_reg = datahi;
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
}
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
+ .base.static_constraint = C_O2_I1(r, r, L),
+ .out = tgen_qemu_ld2,
+};
+
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
HostAddress h, MemOp memop)
{
@@ -2401,7 +2482,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
switch (memop & MO_SIZE) {
case MO_8:
- /* This is handled with constraints on INDEX_op_qemu_st8_i32. */
+ /* This is handled with constraints in cset_qemu_st(). */
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4);
tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + h.seg,
datalo, h.base, h.index, 0, h.ofs);
@@ -2493,24 +2574,58 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
}
}
-static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
- TCGReg addrlo, TCGReg addrhi,
- MemOpIdx oi, TCGType data_type)
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg addr, MemOpIdx oi)
+{
+ TCGLabelQemuLdst *ldst;
+ HostAddress h;
+
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
+ tcg_out_qemu_st_direct(s, data, -1, h, get_memop(oi));
+
+ if (ldst) {
+ ldst->type = type;
+ ldst->datalo_reg = data;
+ ldst->datahi_reg = -1;
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
+ }
+}
+
+static TCGConstraintSetIndex cset_qemu_st(TCGType type, unsigned flags)
+{
+ return flags == MO_8 ? C_O0_I2(s, L) : C_O0_I2(L, L);
+}
+
+static const TCGOutOpQemuLdSt outop_qemu_st = {
+ .base.static_constraint =
+ TCG_TARGET_REG_BITS == 32 ? C_Dynamic : C_O0_I2(L, L),
+ .base.dynamic_constraint =
+ TCG_TARGET_REG_BITS == 32 ? cset_qemu_st : NULL,
+ .out = tgen_qemu_st,
+};
+
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
{
TCGLabelQemuLdst *ldst;
HostAddress h;
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
tcg_out_qemu_st_direct(s, datalo, datahi, h, get_memop(oi));
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
ldst->datalo_reg = datalo;
ldst->datahi_reg = datahi;
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
}
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
+ .base.static_constraint = C_O0_I3(L, L, L),
+ .out = tgen_qemu_st2,
+};
+
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
{
/* Reuse the zeroing that exists for goto_ptr. */
@@ -2538,6 +2653,12 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
set_jmp_reset_offset(s, which);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ /* Jump to the given host address (could be epilogue) */
+ tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
@@ -2547,478 +2668,1144 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
/* no need to flush icache explicitly */
}
-static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg args[TCG_MAX_OP_ARGS],
- const int const_args[TCG_MAX_OP_ARGS])
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
{
- TCGArg a0, a1, a2;
- int c, const_a2, vexop, rexw = 0;
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
-#if TCG_TARGET_REG_BITS == 64
-# define OP_32_64(x) \
- case glue(glue(INDEX_op_, x), _i64): \
- rexw = P_REXW; /* FALLTHRU */ \
- case glue(glue(INDEX_op_, x), _i32)
-#else
-# define OP_32_64(x) \
- case glue(glue(INDEX_op_, x), _i32)
-#endif
+ if (a0 == a1) {
+ tgen_arithr(s, ARITH_ADD + rexw, a0, a2);
+ } else if (a0 == a2) {
+ tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
+ } else {
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, 0);
+ }
+}
- /* Hoist the loads of the most common arguments. */
- a0 = args[0];
- a1 = args[1];
- a2 = args[2];
- const_a2 = const_args[2];
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
- switch (opc) {
- case INDEX_op_goto_ptr:
- /* jmp to the given host address (could be epilogue) */
- tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
- break;
- case INDEX_op_br:
- tcg_out_jxx(s, JCC_JMP, arg_label(a0), 0);
- break;
- OP_32_64(ld8u):
- /* Note that we can ignore REXW for the zero-extend to 64-bit. */
- tcg_out_modrm_offset(s, OPC_MOVZBL, a0, a1, a2);
- break;
- OP_32_64(ld8s):
- tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, a0, a1, a2);
- break;
- OP_32_64(ld16u):
- /* Note that we can ignore REXW for the zero-extend to 64-bit. */
- tcg_out_modrm_offset(s, OPC_MOVZWL, a0, a1, a2);
- break;
- OP_32_64(ld16s):
- tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, a0, a1, a2);
- break;
-#if TCG_TARGET_REG_BITS == 64
- case INDEX_op_ld32u_i64:
-#endif
- case INDEX_op_ld_i32:
- tcg_out_ld(s, TCG_TYPE_I32, a0, a1, a2);
- break;
+ if (a0 == a1) {
+ tgen_arithi(s, ARITH_ADD + rexw, a0, a2, false);
+ } else {
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, -1, 0, a2);
+ }
+}
+
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, re),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
+
+static void tgen_addco(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithr(s, ARITH_ADD + rexw, a0, a2);
+}
+
+static void tgen_addco_imm(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithi(s, ARITH_ADD + rexw, a0, a2, true);
+}
+
+static const TCGOutOpBinary outop_addco = {
+ .base.static_constraint = C_O1_I2(r, 0, re),
+ .out_rrr = tgen_addco,
+ .out_rri = tgen_addco_imm,
+};
+
+static void tgen_addcio(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithr(s, ARITH_ADC + rexw, a0, a2);
+}
+
+static void tgen_addcio_imm(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithi(s, ARITH_ADC + rexw, a0, a2, true);
+}
+
+static const TCGOutOpBinary outop_addcio = {
+ .base.static_constraint = C_O1_I2(r, 0, re),
+ .out_rrr = tgen_addcio,
+ .out_rri = tgen_addcio_imm,
+};
+
+static void tgen_addci_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ /* Because "0O" is not a valid constraint, we must match ourselves. */
+ if (a0 == a2) {
+ tgen_addcio(s, type, a0, a0, a1);
+ } else {
+ tcg_out_mov(s, type, a0, a1);
+ tgen_addcio(s, type, a0, a0, a2);
+ }
+}
+
+static void tgen_addci_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_mov(s, type, a0, a1);
+ tgen_addcio_imm(s, type, a0, a0, a2);
+}
+
+static void tgen_addci_rir(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
+{
+ tgen_addci_rri(s, type, a0, a2, a1);
+}
+
+static void tgen_addci_rii(TCGContext *s, TCGType type, TCGReg a0,
+ tcg_target_long a1, tcg_target_long a2)
+{
+ if (a2 == 0) {
+ /* Implement 0 + 0 + C with -(x - x - c). */
+ tgen_arithr(s, ARITH_SBB, a0, a0);
+ tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_NEG, a0);
+ } else {
+ tcg_out_movi(s, type, a0, a2);
+ tgen_addcio_imm(s, type, a0, a0, a1);
+ }
+}
+
+static const TCGOutOpAddSubCarry outop_addci = {
+ .base.static_constraint = C_O1_I2(r, rO, re),
+ .out_rrr = tgen_addci_rrr,
+ .out_rri = tgen_addci_rri,
+ .out_rir = tgen_addci_rir,
+ .out_rii = tgen_addci_rii,
+};
+
+static void tcg_out_set_carry(TCGContext *s)
+{
+ tcg_out8(s, OPC_STC);
+}
+
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithr(s, ARITH_AND + rexw, a0, a2);
+}
+
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithi(s, ARITH_AND + rexw, a0, a2, false);
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, 0, reZ),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
+
+static void tgen_andc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_vex_modrm(s, OPC_ANDN + rexw, a0, a2, a1);
+}
+
+static TCGConstraintSetIndex cset_andc(TCGType type, unsigned flags)
+{
+ return have_bmi1 ? C_O1_I2(r, r, r) : C_NotImplemented;
+}
+
+static const TCGOutOpBinary outop_andc = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_andc,
+ .out_rrr = tgen_andc,
+};
+
+static void tgen_clz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ int jcc;
+
+ if (have_lzcnt) {
+ tcg_out_modrm(s, OPC_LZCNT + rexw, a0, a1);
+ jcc = JCC_JB;
+ } else {
+ /* Recall that the output of BSR is the index not the count. */
+ tcg_out_modrm(s, OPC_BSR + rexw, a0, a1);
+ tgen_arithi(s, ARITH_XOR + rexw, a0, rexw ? 63 : 31, 0);
+
+ /* Since we have destroyed the flags from BSR, we have to re-test. */
+ jcc = tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, rexw);
+ }
+ tcg_out_cmov(s, jcc, rexw, a0, a2);
+}
+
+static void tgen_clzi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_modrm(s, OPC_LZCNT + rexw, a0, a1);
+}
+
+static TCGConstraintSetIndex cset_clz(TCGType type, unsigned flags)
+{
+ return have_lzcnt ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
+}
+
+static const TCGOutOpBinary outop_clz = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_clz,
+ .out_rrr = tgen_clz,
+ .out_rri = tgen_clzi,
+};
+
+static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1);
+}
+
+static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
+{
+ return have_popcnt ? C_O1_I1(r, r) : C_NotImplemented;
+}
+
+static const TCGOutOpUnary outop_ctpop = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_ctpop,
+ .out_rr = tgen_ctpop,
+};
+
+static void tgen_ctz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ int jcc;
+
+ if (have_bmi1) {
+ tcg_out_modrm(s, OPC_TZCNT + rexw, a0, a1);
+ jcc = JCC_JB;
+ } else {
+ tcg_out_modrm(s, OPC_BSF + rexw, a0, a1);
+ jcc = JCC_JE;
+ }
+ tcg_out_cmov(s, jcc, rexw, a0, a2);
+}
+
+static void tgen_ctzi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_modrm(s, OPC_TZCNT + rexw, a0, a1);
+}
+
+static TCGConstraintSetIndex cset_ctz(TCGType type, unsigned flags)
+{
+ return have_bmi1 ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
+}
+
+static const TCGOutOpBinary outop_ctz = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_ctz,
+ .out_rrr = tgen_ctz,
+ .out_rri = tgen_ctzi,
+};
+
+static const TCGOutOpBinary outop_divs = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_divs2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a4)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, a4);
+}
+
+static const TCGOutOpDivRem outop_divs2 = {
+ .base.static_constraint = C_O2_I3(a, d, 0, 1, r),
+ .out_rr01r = tgen_divs2,
+};
+
+static const TCGOutOpBinary outop_divu = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_divu2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a4)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, a4);
+}
+
+static const TCGOutOpDivRem outop_divu2 = {
+ .base.static_constraint = C_O2_I3(a, d, 0, 1, r),
+ .out_rr01r = tgen_divu2,
+};
+
+static const TCGOutOpBinary outop_eqv = {
+ .base.static_constraint = C_NotImplemented,
+};
- OP_32_64(st8):
- if (const_args[0]) {
- tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, a1, a2);
- tcg_out8(s, a0);
- } else {
- tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, a0, a1, a2);
- }
- break;
- OP_32_64(st16):
- if (const_args[0]) {
- tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, 0, a1, a2);
- tcg_out16(s, a0);
- } else {
- tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, a0, a1, a2);
- }
- break;
#if TCG_TARGET_REG_BITS == 64
- case INDEX_op_st32_i64:
-#endif
- case INDEX_op_st_i32:
- if (const_args[0]) {
- tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, a1, a2);
- tcg_out32(s, a0);
- } else {
- tcg_out_st(s, TCG_TYPE_I32, a0, a1, a2);
- }
- break;
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
+{
+ tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32);
+}
- OP_32_64(add):
- /* For 3-operand addition, use LEA. */
- if (a0 != a1) {
- TCGArg c3 = 0;
- if (const_a2) {
- c3 = a2, a2 = -1;
- } else if (a0 == a2) {
- /* Watch out for dest = src + dest, since we've removed
- the matching constraint on the add. */
- tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
- break;
- }
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
+ .base.static_constraint = C_O1_I1(r, 0),
+ .out_rr = tgen_extrh_i64_i32,
+};
+#endif /* TCG_TARGET_REG_BITS == 64 */
- tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
- break;
- }
- c = ARITH_ADD;
- goto gen_arith;
- OP_32_64(sub):
- c = ARITH_SUB;
- goto gen_arith;
- OP_32_64(and):
- c = ARITH_AND;
- goto gen_arith;
- OP_32_64(or):
- c = ARITH_OR;
- goto gen_arith;
- OP_32_64(xor):
- c = ARITH_XOR;
- goto gen_arith;
- gen_arith:
- if (const_a2) {
- tgen_arithi(s, c + rexw, a0, a2, 0);
- } else {
- tgen_arithr(s, c + rexw, a0, a2);
- }
- break;
+static void tgen_mul(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, a0, a2);
+}
- OP_32_64(andc):
- if (const_a2) {
- tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
- tgen_arithi(s, ARITH_AND + rexw, a0, ~a2, 0);
- } else {
- tcg_out_vex_modrm(s, OPC_ANDN + rexw, a0, a2, a1);
- }
- break;
+static void tgen_muli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
- OP_32_64(mul):
- if (const_a2) {
- int32_t val;
- val = a2;
- if (val == (int8_t)val) {
- tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, a0, a0);
- tcg_out8(s, val);
- } else {
- tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, a0, a0);
- tcg_out32(s, val);
- }
- } else {
- tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, a0, a2);
- }
- break;
+ if (a2 == (int8_t)a2) {
+ tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, a0, a0);
+ tcg_out8(s, a2);
+ } else {
+ tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, a0, a0);
+ tcg_out32(s, a2);
+ }
+}
- OP_32_64(div2):
- tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
- break;
- OP_32_64(divu2):
- tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
- break;
+static const TCGOutOpBinary outop_mul = {
+ .base.static_constraint = C_O1_I2(r, 0, re),
+ .out_rrr = tgen_mul,
+ .out_rri = tgen_muli,
+};
- OP_32_64(shl):
- /* For small constant 3-operand shift, use LEA. */
- if (const_a2 && a0 != a1 && (a2 - 1) < 3) {
- if (a2 - 1 == 0) {
- /* shl $1,a1,a0 -> lea (a1,a1),a0 */
- tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0);
- } else {
- /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */
- tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0);
- }
- break;
- }
- c = SHIFT_SHL;
- vexop = OPC_SHLX;
- goto gen_shift_maybe_vex;
- OP_32_64(shr):
- c = SHIFT_SHR;
- vexop = OPC_SHRX;
- goto gen_shift_maybe_vex;
- OP_32_64(sar):
- c = SHIFT_SAR;
- vexop = OPC_SARX;
- goto gen_shift_maybe_vex;
- OP_32_64(rotl):
- c = SHIFT_ROL;
- goto gen_shift;
- OP_32_64(rotr):
- c = SHIFT_ROR;
- goto gen_shift;
- gen_shift_maybe_vex:
- if (have_bmi2) {
- if (!const_a2) {
- tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1);
- break;
- }
- tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
- }
- /* FALLTHRU */
- gen_shift:
- if (const_a2) {
- tcg_out_shifti(s, c + rexw, a0, a2);
- } else {
- tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, a0);
- }
- break;
+static void tgen_muls2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, a3);
+}
- OP_32_64(ctz):
- tcg_out_ctz(s, rexw, args[0], args[1], args[2], const_args[2]);
- break;
- OP_32_64(clz):
- tcg_out_clz(s, rexw, args[0], args[1], args[2], const_args[2]);
- break;
- OP_32_64(ctpop):
- tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1);
- break;
+static const TCGOutOpMul2 outop_muls2 = {
+ .base.static_constraint = C_O2_I2(a, d, a, r),
+ .out_rrrr = tgen_muls2,
+};
- OP_32_64(brcond):
- tcg_out_brcond(s, rexw, a2, a0, a1, const_args[1],
- arg_label(args[3]), 0);
- break;
- OP_32_64(setcond):
- tcg_out_setcond(s, rexw, args[3], a0, a1, a2, const_a2, false);
- break;
- OP_32_64(negsetcond):
- tcg_out_setcond(s, rexw, args[3], a0, a1, a2, const_a2, true);
- break;
- OP_32_64(movcond):
- tcg_out_movcond(s, rexw, args[5], a0, a1, a2, const_a2, args[3]);
- break;
+static const TCGOutOpBinary outop_mulsh = {
+ .base.static_constraint = C_NotImplemented,
+};
- OP_32_64(bswap16):
- if (a2 & TCG_BSWAP_OS) {
- /* Output must be sign-extended. */
- if (rexw) {
- tcg_out_bswap64(s, a0);
- tcg_out_shifti(s, SHIFT_SAR + rexw, a0, 48);
- } else {
- tcg_out_bswap32(s, a0);
- tcg_out_shifti(s, SHIFT_SAR, a0, 16);
- }
- } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
- /* Output must be zero-extended, but input isn't. */
- tcg_out_bswap32(s, a0);
- tcg_out_shifti(s, SHIFT_SHR, a0, 16);
- } else {
- tcg_out_rolw_8(s, a0);
- }
- break;
- OP_32_64(bswap32):
- tcg_out_bswap32(s, a0);
- if (rexw && (a2 & TCG_BSWAP_OS)) {
- tcg_out_ext32s(s, a0, a0);
- }
- break;
+static const TCGOutOpBinary outop_muluh = {
+ .base.static_constraint = C_NotImplemented,
+};
- OP_32_64(neg):
- tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, a0);
- break;
- OP_32_64(not):
- tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
- break;
+static void tgen_mulu2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, a3);
+}
- case INDEX_op_qemu_ld_a64_i32:
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_out_qemu_ld(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32);
- break;
- }
- /* fall through */
- case INDEX_op_qemu_ld_a32_i32:
- tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_ld_a32_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
- } else {
- tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
- }
- break;
- case INDEX_op_qemu_ld_a64_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
- } else {
- tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
- }
- break;
- case INDEX_op_qemu_ld_a32_i128:
- case INDEX_op_qemu_ld_a64_i128:
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
- tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128);
- break;
+static const TCGOutOpMul2 outop_mulu2 = {
+ .base.static_constraint = C_O2_I2(a, d, a, r),
+ .out_rrrr = tgen_mulu2,
+};
- case INDEX_op_qemu_st_a64_i32:
- case INDEX_op_qemu_st8_a64_i32:
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_out_qemu_st(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32);
- break;
- }
- /* fall through */
- case INDEX_op_qemu_st_a32_i32:
- case INDEX_op_qemu_st8_a32_i32:
- tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_st_a32_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
- } else {
- tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
- }
- break;
- case INDEX_op_qemu_st_a64_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
- } else {
- tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
- }
- break;
- case INDEX_op_qemu_st_a32_i128:
- case INDEX_op_qemu_st_a64_i128:
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
- tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128);
- break;
+static const TCGOutOpBinary outop_nand = {
+ .base.static_constraint = C_NotImplemented,
+};
- OP_32_64(mulu2):
- tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
- break;
- OP_32_64(muls2):
- tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]);
- break;
- OP_32_64(add2):
- if (const_args[4]) {
- tgen_arithi(s, ARITH_ADD + rexw, a0, args[4], 1);
- } else {
- tgen_arithr(s, ARITH_ADD + rexw, a0, args[4]);
- }
- if (const_args[5]) {
- tgen_arithi(s, ARITH_ADC + rexw, a1, args[5], 1);
- } else {
- tgen_arithr(s, ARITH_ADC + rexw, a1, args[5]);
- }
- break;
- OP_32_64(sub2):
- if (const_args[4]) {
- tgen_arithi(s, ARITH_SUB + rexw, a0, args[4], 1);
- } else {
- tgen_arithr(s, ARITH_SUB + rexw, a0, args[4]);
- }
- if (const_args[5]) {
- tgen_arithi(s, ARITH_SBB + rexw, a1, args[5], 1);
+static const TCGOutOpBinary outop_nor = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_or(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithr(s, ARITH_OR + rexw, a0, a2);
+}
+
+static void tgen_ori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithi(s, ARITH_OR + rexw, a0, a2, false);
+}
+
+static const TCGOutOpBinary outop_or = {
+ .base.static_constraint = C_O1_I2(r, 0, re),
+ .out_rrr = tgen_or,
+ .out_rri = tgen_ori,
+};
+
+static const TCGOutOpBinary outop_orc = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_rems = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_remu = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_rotl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_ROL, a0);
+}
+
+static void tgen_rotli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_shifti(s, SHIFT_ROL + rexw, a0, a2);
+}
+
+static const TCGOutOpBinary outop_rotl = {
+ .base.static_constraint = C_O1_I2(r, 0, ci),
+ .out_rrr = tgen_rotl,
+ .out_rri = tgen_rotli,
+};
+
+static void tgen_rotr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_ROR, a0);
+}
+
+static void tgen_rotri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_shifti(s, SHIFT_ROR + rexw, a0, a2);
+}
+
+static const TCGOutOpBinary outop_rotr = {
+ .base.static_constraint = C_O1_I2(r, 0, ci),
+ .out_rrr = tgen_rotr,
+ .out_rri = tgen_rotri,
+};
+
+static TCGConstraintSetIndex cset_shift(TCGType type, unsigned flags)
+{
+ return have_bmi2 ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ci);
+}
+
+static void tgen_sar(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ if (have_bmi2) {
+ tcg_out_vex_modrm(s, OPC_SARX + rexw, a0, a2, a1);
+ } else {
+ tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_SAR, a0);
+ }
+}
+
+static void tgen_sari(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+
+ tcg_out_mov(s, type, a0, a1);
+ tcg_out_shifti(s, SHIFT_SAR + rexw, a0, a2);
+}
+
+static const TCGOutOpBinary outop_sar = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_shift,
+ .out_rrr = tgen_sar,
+ .out_rri = tgen_sari,
+};
+
+static void tgen_shl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ if (have_bmi2) {
+ tcg_out_vex_modrm(s, OPC_SHLX + rexw, a0, a2, a1);
+ } else {
+ tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_SHL, a0);
+ }
+}
+
+static void tgen_shli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+
+ /* For small constant 3-operand shift, use LEA. */
+ if (a0 != a1 && a2 >= 1 && a2 <= 3) {
+ if (a2 == 1) {
+ /* shl $1,a1,a0 -> lea (a1,a1),a0 */
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0);
} else {
- tgen_arithr(s, ARITH_SBB + rexw, a1, args[5]);
+ /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0);
}
- break;
+ return;
+ }
+ tcg_out_mov(s, type, a0, a1);
+ tcg_out_shifti(s, SHIFT_SHL + rexw, a0, a2);
+}
-#if TCG_TARGET_REG_BITS == 32
- case INDEX_op_brcond2_i32:
- tcg_out_brcond2(s, args, const_args, 0);
- break;
- case INDEX_op_setcond2_i32:
- tcg_out_setcond2(s, args, const_args);
- break;
-#else /* TCG_TARGET_REG_BITS == 64 */
- case INDEX_op_ld32s_i64:
- tcg_out_modrm_offset(s, OPC_MOVSLQ, a0, a1, a2);
- break;
- case INDEX_op_ld_i64:
- tcg_out_ld(s, TCG_TYPE_I64, a0, a1, a2);
- break;
- case INDEX_op_st_i64:
- if (const_args[0]) {
- tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW, 0, a1, a2);
- tcg_out32(s, a0);
+static const TCGOutOpBinary outop_shl = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_shift,
+ .out_rrr = tgen_shl,
+ .out_rri = tgen_shli,
+};
+
+static void tgen_shr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ if (have_bmi2) {
+ tcg_out_vex_modrm(s, OPC_SHRX + rexw, a0, a2, a1);
+ } else {
+ tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_SHR, a0);
+ }
+}
+
+static void tgen_shri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+
+ tcg_out_mov(s, type, a0, a1);
+ tcg_out_shifti(s, SHIFT_SHR + rexw, a0, a2);
+}
+
+static const TCGOutOpBinary outop_shr = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_shift,
+ .out_rrr = tgen_shr,
+ .out_rri = tgen_shri,
+};
+
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithr(s, ARITH_SUB + rexw, a0, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, 0, r),
+ .out_rrr = tgen_sub,
+};
+
+static void tgen_subbo_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithi(s, ARITH_SUB + rexw, a0, a2, 1);
+}
+
+static const TCGOutOpAddSubCarry outop_subbo = {
+ .base.static_constraint = C_O1_I2(r, 0, re),
+ .out_rrr = tgen_sub,
+ .out_rri = tgen_subbo_rri,
+};
+
+static void tgen_subbio_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithr(s, ARITH_SBB + rexw, a0, a2);
+}
+
+static void tgen_subbio_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithi(s, ARITH_SBB + rexw, a0, a2, 1);
+}
+
+static const TCGOutOpAddSubCarry outop_subbio = {
+ .base.static_constraint = C_O1_I2(r, 0, re),
+ .out_rrr = tgen_subbio_rrr,
+ .out_rri = tgen_subbio_rri,
+};
+
+#define outop_subbi outop_subbio
+
+static void tcg_out_set_borrow(TCGContext *s)
+{
+ tcg_out8(s, OPC_STC);
+}
+
+static void tgen_xor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithr(s, ARITH_XOR + rexw, a0, a2);
+}
+
+static void tgen_xori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithi(s, ARITH_XOR + rexw, a0, a2, false);
+}
+
+static const TCGOutOpBinary outop_xor = {
+ .base.static_constraint = C_O1_I2(r, 0, re),
+ .out_rrr = tgen_xor,
+ .out_rri = tgen_xori,
+};
+
+static void tgen_bswap16(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, unsigned flags)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+
+ if (flags & TCG_BSWAP_OS) {
+ /* Output must be sign-extended. */
+ if (rexw) {
+ tcg_out_bswap64(s, a0);
+ tcg_out_shifti(s, SHIFT_SAR + rexw, a0, 48);
} else {
- tcg_out_st(s, TCG_TYPE_I64, a0, a1, a2);
+ tcg_out_bswap32(s, a0);
+ tcg_out_shifti(s, SHIFT_SAR, a0, 16);
}
- break;
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
+ /* Output must be zero-extended, but input isn't. */
+ tcg_out_bswap32(s, a0);
+ tcg_out_shifti(s, SHIFT_SHR, a0, 16);
+ } else {
+ tcg_out_rolw_8(s, a0);
+ }
+}
- case INDEX_op_bswap64_i64:
- tcg_out_bswap64(s, a0);
- break;
- case INDEX_op_extrh_i64_i32:
- tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32);
- break;
+static const TCGOutOpBswap outop_bswap16 = {
+ .base.static_constraint = C_O1_I1(r, 0),
+ .out_rr = tgen_bswap16,
+};
+
+static void tgen_bswap32(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, unsigned flags)
+{
+ tcg_out_bswap32(s, a0);
+ if (flags & TCG_BSWAP_OS) {
+ tcg_out_ext32s(s, a0, a0);
+ }
+}
+
+static const TCGOutOpBswap outop_bswap32 = {
+ .base.static_constraint = C_O1_I1(r, 0),
+ .out_rr = tgen_bswap32,
+};
+
+#if TCG_TARGET_REG_BITS == 64
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tcg_out_bswap64(s, a0);
+}
+
+static const TCGOutOpUnary outop_bswap64 = {
+ .base.static_constraint = C_O1_I1(r, 0),
+ .out_rr = tgen_bswap64,
+};
#endif
- OP_32_64(deposit):
- if (args[3] == 0 && args[4] == 8) {
- /* load bits 0..7 */
- if (const_a2) {
- tcg_out_opc(s, OPC_MOVB_Ib | P_REXB_RM | LOWREGMASK(a0),
- 0, a0, 0);
- tcg_out8(s, a2);
- } else {
- tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0);
- }
- } else if (TCG_TARGET_REG_BITS == 32 && args[3] == 8 && args[4] == 8) {
- /* load bits 8..15 */
- if (const_a2) {
- tcg_out8(s, OPC_MOVB_Ib + a0 + 4);
- tcg_out8(s, a2);
- } else {
- tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4);
- }
- } else if (args[3] == 0 && args[4] == 16) {
- /* load bits 0..15 */
- if (const_a2) {
- tcg_out_opc(s, OPC_MOVL_Iv | P_DATA16 | LOWREGMASK(a0),
- 0, a0, 0);
- tcg_out16(s, a2);
- } else {
- tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0);
- }
- } else {
- g_assert_not_reached();
- }
- break;
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, a0);
+}
- case INDEX_op_extract_i64:
- if (a2 + args[3] == 32) {
- /* This is a 32-bit zero-extending right shift. */
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
- tcg_out_shifti(s, SHIFT_SHR, a0, a2);
- break;
+static const TCGOutOpUnary outop_neg = {
+ .base.static_constraint = C_O1_I1(r, 0),
+ .out_rr = tgen_neg,
+};
+
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
+}
+
+static const TCGOutOpUnary outop_not = {
+ .base.static_constraint = C_O1_I1(r, 0),
+ .out_rr = tgen_not,
+};
+
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ TCGReg a2, unsigned ofs, unsigned len)
+{
+ if (ofs == 0 && len == 8) {
+ tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0);
+ } else if (ofs == 0 && len == 16) {
+ tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0);
+ } else if (TCG_TARGET_REG_BITS == 32 && ofs == 8 && len == 8) {
+ tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4);
+ } else {
+ g_assert_not_reached();
+ }
+}
+
+static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ tcg_target_long a2, unsigned ofs, unsigned len)
+{
+ if (ofs == 0 && len == 8) {
+ tcg_out_opc(s, OPC_MOVB_Ib | P_REXB_RM | LOWREGMASK(a0), 0, a0, 0);
+ tcg_out8(s, a2);
+ } else if (ofs == 0 && len == 16) {
+ tcg_out_opc(s, OPC_MOVL_Iv | P_DATA16 | LOWREGMASK(a0), 0, a0, 0);
+ tcg_out16(s, a2);
+ } else if (TCG_TARGET_REG_BITS == 32 && ofs == 8 && len == 8) {
+ tcg_out8(s, OPC_MOVB_Ib + a0 + 4);
+ tcg_out8(s, a2);
+ } else {
+ g_assert_not_reached();
+ }
+}
+
+static const TCGOutOpDeposit outop_deposit = {
+ .base.static_constraint = C_O1_I2(q, 0, qi),
+ .out_rrr = tgen_deposit,
+ .out_rri = tgen_depositi,
+};
+
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ if (ofs == 0) {
+ switch (len) {
+ case 8:
+ tcg_out_ext8u(s, a0, a1);
+ return;
+ case 16:
+ tcg_out_ext16u(s, a0, a1);
+ return;
+ case 32:
+ tcg_out_ext32u(s, a0, a1);
+ return;
}
- /* FALLTHRU */
- case INDEX_op_extract_i32:
- /* On the off-chance that we can use the high-byte registers.
- Otherwise we emit the same ext16 + shift pattern that we
- would have gotten from the normal tcg-op.c expansion. */
- tcg_debug_assert(a2 == 8 && args[3] == 8);
- if (a1 < 4 && a0 < 8) {
+ } else if (TCG_TARGET_REG_BITS == 64 && ofs + len == 32) {
+ /* This is a 32-bit zero-extending right shift. */
+ tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
+ tcg_out_shifti(s, SHIFT_SHR, a0, ofs);
+ return;
+ } else if (ofs == 8 && len == 8) {
+ /*
+ * On the off-chance that we can use the high-byte registers.
+ * Otherwise we emit the same ext16 + shift pattern that we
+ * would have gotten from the normal tcg-op.c expansion.
+ */
+ if (a1 < 4 && (TCG_TARGET_REG_BITS == 32 || a0 < 8)) {
tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4);
} else {
tcg_out_ext16u(s, a0, a1);
tcg_out_shifti(s, SHIFT_SHR, a0, 8);
}
- break;
+ return;
+ }
+ g_assert_not_reached();
+}
+
+static const TCGOutOpExtract outop_extract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extract,
+};
- case INDEX_op_sextract_i32:
- /* We don't implement sextract_i64, as we cannot sign-extend to
- 64-bits without using the REX prefix that explicitly excludes
- access to the high-byte registers. */
- tcg_debug_assert(a2 == 8 && args[3] == 8);
- if (a1 < 4 && a0 < 8) {
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ if (ofs == 0) {
+ switch (len) {
+ case 8:
+ tcg_out_ext8s(s, type, a0, a1);
+ return;
+ case 16:
+ tcg_out_ext16s(s, type, a0, a1);
+ return;
+ case 32:
+ tcg_out_ext32s(s, a0, a1);
+ return;
+ }
+ } else if (ofs == 8 && len == 8) {
+ if (type == TCG_TYPE_I32 && a1 < 4 && a0 < 8) {
tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4);
} else {
- tcg_out_ext16s(s, TCG_TYPE_I32, a0, a1);
- tcg_out_shifti(s, SHIFT_SAR, a0, 8);
+ tcg_out_ext16s(s, type, a0, a1);
+ tgen_sari(s, type, a0, a0, 8);
}
- break;
+ return;
+ }
+ g_assert_not_reached();
+}
- OP_32_64(extract2):
- /* Note that SHRD outputs to the r/m operand. */
- tcg_out_modrm(s, OPC_SHRD_Ib + rexw, a2, a0);
- tcg_out8(s, args[3]);
- break;
+static const TCGOutOpExtract outop_sextract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_sextract,
+};
+
+static void tgen_extract2(TCGContext *s, TCGType type, TCGReg a0,
+ TCGReg a1, TCGReg a2, unsigned shr)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+
+ /* Note that SHRD outputs to the r/m operand. */
+ tcg_out_modrm(s, OPC_SHRD_Ib + rexw, a2, a0);
+ tcg_out8(s, shr);
+}
+
+static const TCGOutOpExtract2 outop_extract2 = {
+ .base.static_constraint = C_O1_I2(r, 0, r),
+ .out_rrr = tgen_extract2,
+};
+
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_modrm_offset(s, OPC_MOVZBL, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld8u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8u,
+};
+
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld8s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8s,
+};
+
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_modrm_offset(s, OPC_MOVZWL, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld16u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16u,
+};
+
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld16s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16s,
+};
- case INDEX_op_mb:
- tcg_out_mb(s, a0);
+#if TCG_TARGET_REG_BITS == 64
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_modrm_offset(s, OPC_MOVL_GvEv, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld32u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32u,
+};
+
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_modrm_offset(s, OPC_MOVSLQ, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld32s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32s,
+};
+#endif
+
+static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, data, base, offset);
+}
+
+static void tgen_st8_i(TCGContext *s, TCGType type, tcg_target_long data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, base, offset);
+ tcg_out8(s, data);
+}
+
+static const TCGOutOpStore outop_st8 = {
+ .base.static_constraint = C_O0_I2(qi, r),
+ .out_r = tgen_st8_r,
+ .out_i = tgen_st8_i,
+};
+
+static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, data, base, offset);
+}
+
+static void tgen_st16_i(TCGContext *s, TCGType type, tcg_target_long data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, 0, base, offset);
+ tcg_out16(s, data);
+}
+
+static const TCGOutOpStore outop_st16 = {
+ .base.static_constraint = C_O0_I2(ri, r),
+ .out_r = tgen_st16_r,
+ .out_i = tgen_st16_i,
+};
+
+static void tgen_st_i(TCGContext *s, TCGType type, tcg_target_long data,
+ TCGReg base, ptrdiff_t offset)
+{
+ bool ok = tcg_out_sti(s, type, data, base, offset);
+ tcg_debug_assert(ok);
+}
+
+static const TCGOutOpStore outop_st = {
+ .base.static_constraint = C_O0_I2(re, r),
+ .out_r = tcg_out_st,
+ .out_i = tgen_st_i,
+};
+
+static int const umin_insn[4] = {
+ OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_VPMINUQ
+};
+
+static int const umax_insn[4] = {
+ OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_VPMAXUQ
+};
+
+static bool tcg_out_cmp_vec_noinv(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg v0, TCGReg v1, TCGReg v2, TCGCond cond)
+{
+ static int const cmpeq_insn[4] = {
+ OPC_PCMPEQB, OPC_PCMPEQW, OPC_PCMPEQD, OPC_PCMPEQQ
+ };
+ static int const cmpgt_insn[4] = {
+ OPC_PCMPGTB, OPC_PCMPGTW, OPC_PCMPGTD, OPC_PCMPGTQ
+ };
+
+ enum {
+ NEED_INV = 1,
+ NEED_SWAP = 2,
+ NEED_UMIN = 4,
+ NEED_UMAX = 8,
+ INVALID = 16,
+ };
+ static const uint8_t cond_fixup[16] = {
+ [0 ... 15] = INVALID,
+ [TCG_COND_EQ] = 0,
+ [TCG_COND_GT] = 0,
+ [TCG_COND_NE] = NEED_INV,
+ [TCG_COND_LE] = NEED_INV,
+ [TCG_COND_LT] = NEED_SWAP,
+ [TCG_COND_GE] = NEED_SWAP | NEED_INV,
+ [TCG_COND_LEU] = NEED_UMIN,
+ [TCG_COND_GTU] = NEED_UMIN | NEED_INV,
+ [TCG_COND_GEU] = NEED_UMAX,
+ [TCG_COND_LTU] = NEED_UMAX | NEED_INV,
+ };
+ int fixup = cond_fixup[cond];
+
+ assert(!(fixup & INVALID));
+
+ if (fixup & NEED_INV) {
+ cond = tcg_invert_cond(cond);
+ }
+
+ if (fixup & NEED_SWAP) {
+ TCGReg swap = v1;
+ v1 = v2;
+ v2 = swap;
+ cond = tcg_swap_cond(cond);
+ }
+
+ if (fixup & (NEED_UMIN | NEED_UMAX)) {
+ int op = (fixup & NEED_UMIN ? umin_insn[vece] : umax_insn[vece]);
+
+ /* avx2 does not have 64-bit min/max; adjusted during expand. */
+ assert(vece <= MO_32);
+
+ tcg_out_vex_modrm_type(s, op, TCG_TMP_VEC, v1, v2, type);
+ v2 = TCG_TMP_VEC;
+ cond = TCG_COND_EQ;
+ }
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ tcg_out_vex_modrm_type(s, cmpeq_insn[vece], v0, v1, v2, type);
+ break;
+ case TCG_COND_GT:
+ tcg_out_vex_modrm_type(s, cmpgt_insn[vece], v0, v1, v2, type);
break;
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
- case INDEX_op_mov_i64:
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
default:
g_assert_not_reached();
}
+ return fixup & NEED_INV;
+}
+
+static void tcg_out_cmp_vec_k1(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg v1, TCGReg v2, TCGCond cond)
+{
+ static const int cmpm_insn[2][4] = {
+ { OPC_VPCMPB, OPC_VPCMPW, OPC_VPCMPD, OPC_VPCMPQ },
+ { OPC_VPCMPUB, OPC_VPCMPUW, OPC_VPCMPUD, OPC_VPCMPUQ }
+ };
+ static const int testm_insn[4] = {
+ OPC_VPTESTMB, OPC_VPTESTMW, OPC_VPTESTMD, OPC_VPTESTMQ
+ };
+ static const int testnm_insn[4] = {
+ OPC_VPTESTNMB, OPC_VPTESTNMW, OPC_VPTESTNMD, OPC_VPTESTNMQ
+ };
+
+ static const int cond_ext[16] = {
+ [TCG_COND_EQ] = 0,
+ [TCG_COND_NE] = 4,
+ [TCG_COND_LT] = 1,
+ [TCG_COND_LTU] = 1,
+ [TCG_COND_LE] = 2,
+ [TCG_COND_LEU] = 2,
+ [TCG_COND_NEVER] = 3,
+ [TCG_COND_GE] = 5,
+ [TCG_COND_GEU] = 5,
+ [TCG_COND_GT] = 6,
+ [TCG_COND_GTU] = 6,
+ [TCG_COND_ALWAYS] = 7,
+ };
+
+ switch (cond) {
+ case TCG_COND_TSTNE:
+ tcg_out_vex_modrm_type(s, testm_insn[vece], /* k1 */ 1, v1, v2, type);
+ break;
+ case TCG_COND_TSTEQ:
+ tcg_out_vex_modrm_type(s, testnm_insn[vece], /* k1 */ 1, v1, v2, type);
+ break;
+ default:
+ tcg_out_vex_modrm_type(s, cmpm_insn[is_unsigned_cond(cond)][vece],
+ /* k1 */ 1, v1, v2, type);
+ tcg_out8(s, cond_ext[cond]);
+ break;
+ }
+}
+
+static void tcg_out_k1_to_vec(TCGContext *s, TCGType type,
+ unsigned vece, TCGReg dest)
+{
+ static const int movm_insn[] = {
+ OPC_VPMOVM2B, OPC_VPMOVM2W, OPC_VPMOVM2D, OPC_VPMOVM2Q
+ };
+ tcg_out_vex_modrm_type(s, movm_insn[vece], dest, 0, /* k1 */ 1, type);
+}
+
+static void tcg_out_cmp_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg v0, TCGReg v1, TCGReg v2, TCGCond cond)
+{
+ /*
+ * With avx512, we have a complete set of comparisons into mask.
+ * Unless there's a single insn expansion for the comparision,
+ * expand via a mask in k1.
+ */
+ if ((vece <= MO_16 ? have_avx512bw : have_avx512dq)
+ && cond != TCG_COND_EQ
+ && cond != TCG_COND_LT
+ && cond != TCG_COND_GT) {
+ tcg_out_cmp_vec_k1(s, type, vece, v1, v2, cond);
+ tcg_out_k1_to_vec(s, type, vece, v0);
+ return;
+ }
+
+ if (tcg_out_cmp_vec_noinv(s, type, vece, v0, v1, v2, cond)) {
+ tcg_out_dupi_vec(s, type, vece, TCG_TMP_VEC, -1);
+ tcg_out_vex_modrm_type(s, OPC_PXOR, v0, v0, TCG_TMP_VEC, type);
+ }
+}
+
+static void tcg_out_cmpsel_vec_k1(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg v0, TCGReg c1, TCGReg c2,
+ TCGReg v3, TCGReg v4, TCGCond cond)
+{
+ static const int vpblendm_insn[] = {
+ OPC_VPBLENDMB, OPC_VPBLENDMW, OPC_VPBLENDMD, OPC_VPBLENDMQ
+ };
+ bool z = false;
+
+ /* Swap to place constant in V4 to take advantage of zero-masking. */
+ if (!v3) {
+ z = true;
+ v3 = v4;
+ cond = tcg_invert_cond(cond);
+ }
+
+ tcg_out_cmp_vec_k1(s, type, vece, c1, c2, cond);
+ tcg_out_evex_modrm_type(s, vpblendm_insn[vece], v0, v4, v3,
+ /* k1 */1, z, type);
+}
+
+static void tcg_out_cmpsel_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg v0, TCGReg c1, TCGReg c2,
+ TCGReg v3, TCGReg v4, TCGCond cond)
+{
+ bool inv;
+
+ if (vece <= MO_16 ? have_avx512bw : have_avx512vl) {
+ tcg_out_cmpsel_vec_k1(s, type, vece, v0, c1, c2, v3, v4, cond);
+ return;
+ }
-#undef OP_32_64
+ inv = tcg_out_cmp_vec_noinv(s, type, vece, TCG_TMP_VEC, c1, c2, cond);
+
+ /*
+ * Since XMM0 is 16, the only way we get 0 into V3
+ * is via the constant zero constraint.
+ */
+ if (!v3) {
+ if (inv) {
+ tcg_out_vex_modrm_type(s, OPC_PAND, v0, TCG_TMP_VEC, v4, type);
+ } else {
+ tcg_out_vex_modrm_type(s, OPC_PANDN, v0, TCG_TMP_VEC, v4, type);
+ }
+ } else {
+ if (inv) {
+ TCGReg swap = v3;
+ v3 = v4;
+ v4 = swap;
+ }
+ tcg_out_vex_modrm_type(s, OPC_VPBLENDVB, v0, v4, v3, type);
+ tcg_out8(s, (TCG_TMP_VEC - TCG_REG_XMM0) << 4);
+ }
}
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
@@ -3050,12 +3837,6 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
static int const shift_imm_insn[4] = {
OPC_UD2, OPC_PSHIFTW_Ib, OPC_PSHIFTD_Ib, OPC_PSHIFTQ_Ib
};
- static int const cmpeq_insn[4] = {
- OPC_PCMPEQB, OPC_PCMPEQW, OPC_PCMPEQD, OPC_PCMPEQQ
- };
- static int const cmpgt_insn[4] = {
- OPC_PCMPGTB, OPC_PCMPGTW, OPC_PCMPGTD, OPC_PCMPGTQ
- };
static int const punpckl_insn[4] = {
OPC_PUNPCKLBW, OPC_PUNPCKLWD, OPC_PUNPCKLDQ, OPC_PUNPCKLQDQ
};
@@ -3074,12 +3855,6 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
static int const smax_insn[4] = {
OPC_PMAXSB, OPC_PMAXSW, OPC_PMAXSD, OPC_VPMAXSQ
};
- static int const umin_insn[4] = {
- OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_VPMINUQ
- };
- static int const umax_insn[4] = {
- OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_VPMAXUQ
- };
static int const rotlv_insn[4] = {
OPC_UD2, OPC_UD2, OPC_VPROLVD, OPC_VPROLVQ
};
@@ -3231,29 +4006,21 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
goto gen_simd;
gen_simd:
tcg_debug_assert(insn != OPC_UD2);
- if (type == TCG_TYPE_V256) {
- insn |= P_VEXL;
- }
- tcg_out_vex_modrm(s, insn, a0, a1, a2);
+ tcg_out_vex_modrm_type(s, insn, a0, a1, a2, type);
break;
case INDEX_op_cmp_vec:
- sub = args[3];
- if (sub == TCG_COND_EQ) {
- insn = cmpeq_insn[vece];
- } else if (sub == TCG_COND_GT) {
- insn = cmpgt_insn[vece];
- } else {
- g_assert_not_reached();
- }
- goto gen_simd;
+ tcg_out_cmp_vec(s, type, vece, a0, a1, a2, args[3]);
+ break;
+
+ case INDEX_op_cmpsel_vec:
+ tcg_out_cmpsel_vec(s, type, vece, a0, a1, a2,
+ args[3], args[4], args[5]);
+ break;
case INDEX_op_andc_vec:
insn = OPC_PANDN;
- if (type == TCG_TYPE_V256) {
- insn |= P_VEXL;
- }
- tcg_out_vex_modrm(s, insn, a0, a2, a1);
+ tcg_out_vex_modrm_type(s, insn, a0, a2, a1, type);
break;
case INDEX_op_shli_vec:
@@ -3281,10 +4048,7 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
goto gen_shift;
gen_shift:
tcg_debug_assert(vece != MO_8);
- if (type == TCG_TYPE_V256) {
- insn |= P_VEXL;
- }
- tcg_out_vex_modrm(s, insn, sub, a0, a1);
+ tcg_out_vex_modrm_type(s, insn, sub, a0, a1, type);
tcg_out8(s, a2);
break;
@@ -3361,22 +4125,10 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
gen_simd_imm8:
tcg_debug_assert(insn != OPC_UD2);
- if (type == TCG_TYPE_V256) {
- insn |= P_VEXL;
- }
- tcg_out_vex_modrm(s, insn, a0, a1, a2);
+ tcg_out_vex_modrm_type(s, insn, a0, a1, a2, type);
tcg_out8(s, sub);
break;
- case INDEX_op_x86_vpblendvb_vec:
- insn = OPC_VPBLENDVB;
- if (type == TCG_TYPE_V256) {
- insn |= P_VEXL;
- }
- tcg_out_vex_modrm(s, insn, a0, a1, a2);
- tcg_out8(s, args[3] << 4);
- break;
-
case INDEX_op_x86_psrldq_vec:
tcg_out_vex_modrm(s, OPC_GRP14, 3, a0, a1);
tcg_out8(s, a2);
@@ -3389,196 +4141,10 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
}
}
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
+static TCGConstraintSetIndex
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld_i32:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld_i64:
- return C_O1_I1(r, r);
-
- case INDEX_op_st8_i32:
- case INDEX_op_st8_i64:
- return C_O0_I2(qi, r);
-
- case INDEX_op_st16_i32:
- case INDEX_op_st16_i64:
- case INDEX_op_st_i32:
- case INDEX_op_st32_i64:
- return C_O0_I2(ri, r);
-
- case INDEX_op_st_i64:
- return C_O0_I2(re, r);
-
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
- return C_O1_I2(r, r, re);
-
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- case INDEX_op_mul_i32:
- case INDEX_op_mul_i64:
- case INDEX_op_or_i32:
- case INDEX_op_or_i64:
- case INDEX_op_xor_i32:
- case INDEX_op_xor_i64:
- return C_O1_I2(r, 0, re);
-
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- return C_O1_I2(r, 0, reZ);
-
- case INDEX_op_andc_i32:
- case INDEX_op_andc_i64:
- return C_O1_I2(r, r, rI);
-
- case INDEX_op_shl_i32:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i32:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i32:
- case INDEX_op_sar_i64:
- return have_bmi2 ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ci);
-
- case INDEX_op_rotl_i32:
- case INDEX_op_rotl_i64:
- case INDEX_op_rotr_i32:
- case INDEX_op_rotr_i64:
- return C_O1_I2(r, 0, ci);
-
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
- return C_O0_I2(r, reT);
-
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
- case INDEX_op_bswap32_i32:
- case INDEX_op_bswap32_i64:
- case INDEX_op_bswap64_i64:
- case INDEX_op_neg_i32:
- case INDEX_op_neg_i64:
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
- case INDEX_op_extrh_i64_i32:
- return C_O1_I1(r, 0);
-
- case INDEX_op_ext8s_i32:
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- return C_O1_I1(r, q);
-
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- case INDEX_op_extract_i32:
- case INDEX_op_extract_i64:
- case INDEX_op_sextract_i32:
- case INDEX_op_ctpop_i32:
- case INDEX_op_ctpop_i64:
- return C_O1_I1(r, r);
-
- case INDEX_op_extract2_i32:
- case INDEX_op_extract2_i64:
- return C_O1_I2(r, 0, r);
-
- case INDEX_op_deposit_i32:
- case INDEX_op_deposit_i64:
- return C_O1_I2(q, 0, qi);
-
- case INDEX_op_setcond_i32:
- case INDEX_op_setcond_i64:
- case INDEX_op_negsetcond_i32:
- case INDEX_op_negsetcond_i64:
- return C_O1_I2(q, r, reT);
-
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, reT, r, 0);
-
- case INDEX_op_div2_i32:
- case INDEX_op_div2_i64:
- case INDEX_op_divu2_i32:
- case INDEX_op_divu2_i64:
- return C_O2_I3(a, d, 0, 1, r);
-
- case INDEX_op_mulu2_i32:
- case INDEX_op_mulu2_i64:
- case INDEX_op_muls2_i32:
- case INDEX_op_muls2_i64:
- return C_O2_I2(a, d, a, r);
-
- case INDEX_op_add2_i32:
- case INDEX_op_add2_i64:
- case INDEX_op_sub2_i32:
- case INDEX_op_sub2_i64:
- return C_N1_O1_I4(r, r, 0, 1, re, re);
-
- case INDEX_op_ctz_i32:
- case INDEX_op_ctz_i64:
- return have_bmi1 ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
-
- case INDEX_op_clz_i32:
- case INDEX_op_clz_i64:
- return have_lzcnt ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
-
- case INDEX_op_qemu_ld_a32_i32:
- return C_O1_I1(r, L);
- case INDEX_op_qemu_ld_a64_i32:
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O1_I2(r, L, L);
-
- case INDEX_op_qemu_st_a32_i32:
- return C_O0_I2(L, L);
- case INDEX_op_qemu_st_a64_i32:
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I3(L, L, L);
- case INDEX_op_qemu_st8_a32_i32:
- return C_O0_I2(s, L);
- case INDEX_op_qemu_st8_a64_i32:
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(s, L) : C_O0_I3(s, L, L);
-
- case INDEX_op_qemu_ld_a32_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I1(r, r, L);
- case INDEX_op_qemu_ld_a64_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I2(r, r, L, L);
-
- case INDEX_op_qemu_st_a32_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I3(L, L, L);
- case INDEX_op_qemu_st_a64_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I4(L, L, L, L);
-
- case INDEX_op_qemu_ld_a32_i128:
- case INDEX_op_qemu_ld_a64_i128:
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
- return C_O2_I1(r, r, L);
- case INDEX_op_qemu_st_a32_i128:
- case INDEX_op_qemu_st_a64_i128:
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
- return C_O0_I3(L, L, L);
-
- case INDEX_op_brcond2_i32:
- return C_O0_I4(r, r, ri, ri);
-
- case INDEX_op_setcond2_i32:
- return C_O1_I4(r, r, r, ri, ri);
-
case INDEX_op_ld_vec:
case INDEX_op_dupm_vec:
return C_O1_I1(x, r);
@@ -3642,11 +4208,12 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
return C_O1_I3(x, 0, x, x);
case INDEX_op_bitsel_vec:
- case INDEX_op_x86_vpblendvb_vec:
return C_O1_I3(x, x, x, x);
+ case INDEX_op_cmpsel_vec:
+ return C_O1_I4(x, x, x, xO, x);
default:
- g_assert_not_reached();
+ return C_NotImplemented;
}
}
@@ -3979,145 +4546,59 @@ static void expand_vec_mul(TCGType type, unsigned vece,
}
}
-static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
- TCGv_vec v1, TCGv_vec v2, TCGCond cond)
+static TCGCond expand_vec_cond(TCGType type, unsigned vece,
+ TCGArg *a1, TCGArg *a2, TCGCond cond)
{
- enum {
- NEED_INV = 1,
- NEED_SWAP = 2,
- NEED_BIAS = 4,
- NEED_UMIN = 8,
- NEED_UMAX = 16,
- };
- TCGv_vec t1, t2, t3;
- uint8_t fixup;
-
- switch (cond) {
- case TCG_COND_EQ:
- case TCG_COND_GT:
- fixup = 0;
- break;
- case TCG_COND_NE:
- case TCG_COND_LE:
- fixup = NEED_INV;
- break;
- case TCG_COND_LT:
- fixup = NEED_SWAP;
- break;
- case TCG_COND_GE:
- fixup = NEED_SWAP | NEED_INV;
- break;
- case TCG_COND_LEU:
- if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece)) {
- fixup = NEED_UMIN;
- } else {
- fixup = NEED_BIAS | NEED_INV;
- }
- break;
- case TCG_COND_GTU:
- if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece)) {
- fixup = NEED_UMIN | NEED_INV;
- } else {
- fixup = NEED_BIAS;
- }
- break;
- case TCG_COND_GEU:
- if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece)) {
- fixup = NEED_UMAX;
- } else {
- fixup = NEED_BIAS | NEED_SWAP | NEED_INV;
- }
- break;
- case TCG_COND_LTU:
- if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece)) {
- fixup = NEED_UMAX | NEED_INV;
- } else {
- fixup = NEED_BIAS | NEED_SWAP;
- }
- break;
- default:
- g_assert_not_reached();
- }
-
- if (fixup & NEED_INV) {
- cond = tcg_invert_cond(cond);
- }
- if (fixup & NEED_SWAP) {
- t1 = v1, v1 = v2, v2 = t1;
- cond = tcg_swap_cond(cond);
- }
+ /*
+ * Without AVX512, there are no 64-bit unsigned comparisons.
+ * We must bias the inputs so that they become signed.
+ * All other swapping and inversion are handled during code generation.
+ */
+ if (vece == MO_64 && !have_avx512dq && is_unsigned_cond(cond)) {
+ TCGv_vec v1 = temp_tcgv_vec(arg_temp(*a1));
+ TCGv_vec v2 = temp_tcgv_vec(arg_temp(*a2));
+ TCGv_vec t1 = tcg_temp_new_vec(type);
+ TCGv_vec t2 = tcg_temp_new_vec(type);
+ TCGv_vec t3 = tcg_constant_vec(type, vece, 1ull << ((8 << vece) - 1));
- t1 = t2 = NULL;
- if (fixup & (NEED_UMIN | NEED_UMAX)) {
- t1 = tcg_temp_new_vec(type);
- if (fixup & NEED_UMIN) {
- tcg_gen_umin_vec(vece, t1, v1, v2);
- } else {
- tcg_gen_umax_vec(vece, t1, v1, v2);
- }
- v2 = t1;
- cond = TCG_COND_EQ;
- } else if (fixup & NEED_BIAS) {
- t1 = tcg_temp_new_vec(type);
- t2 = tcg_temp_new_vec(type);
- t3 = tcg_constant_vec(type, vece, 1ull << ((8 << vece) - 1));
tcg_gen_sub_vec(vece, t1, v1, t3);
tcg_gen_sub_vec(vece, t2, v2, t3);
- v1 = t1;
- v2 = t2;
+ *a1 = tcgv_vec_arg(t1);
+ *a2 = tcgv_vec_arg(t2);
cond = tcg_signed_cond(cond);
}
-
- tcg_debug_assert(cond == TCG_COND_EQ || cond == TCG_COND_GT);
- /* Expand directly; do not recurse. */
- vec_gen_4(INDEX_op_cmp_vec, type, vece,
- tcgv_vec_arg(v0), tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
-
- if (t1) {
- tcg_temp_free_vec(t1);
- if (t2) {
- tcg_temp_free_vec(t2);
- }
- }
- return fixup & NEED_INV;
+ return cond;
}
-static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
- TCGv_vec v1, TCGv_vec v2, TCGCond cond)
+static void expand_vec_cmp(TCGType type, unsigned vece, TCGArg a0,
+ TCGArg a1, TCGArg a2, TCGCond cond)
{
- if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) {
- tcg_gen_not_vec(vece, v0, v0);
- }
+ cond = expand_vec_cond(type, vece, &a1, &a2, cond);
+ /* Expand directly; do not recurse. */
+ vec_gen_4(INDEX_op_cmp_vec, type, vece, a0, a1, a2, cond);
}
-static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0,
- TCGv_vec c1, TCGv_vec c2,
- TCGv_vec v3, TCGv_vec v4, TCGCond cond)
+static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGArg a0,
+ TCGArg a1, TCGArg a2,
+ TCGArg a3, TCGArg a4, TCGCond cond)
{
- TCGv_vec t = tcg_temp_new_vec(type);
-
- if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) {
- /* Invert the sense of the compare by swapping arguments. */
- TCGv_vec x;
- x = v3, v3 = v4, v4 = x;
- }
- vec_gen_4(INDEX_op_x86_vpblendvb_vec, type, vece,
- tcgv_vec_arg(v0), tcgv_vec_arg(v4),
- tcgv_vec_arg(v3), tcgv_vec_arg(t));
- tcg_temp_free_vec(t);
+ cond = expand_vec_cond(type, vece, &a1, &a2, cond);
+ /* Expand directly; do not recurse. */
+ vec_gen_6(INDEX_op_cmpsel_vec, type, vece, a0, a1, a2, a3, a4, cond);
}
void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
TCGArg a0, ...)
{
va_list va;
- TCGArg a2;
- TCGv_vec v0, v1, v2, v3, v4;
+ TCGArg a1, a2, a3, a4, a5;
+ TCGv_vec v0, v1, v2;
va_start(va, a0);
- v0 = temp_tcgv_vec(arg_temp(a0));
- v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
+ a1 = va_arg(va, TCGArg);
a2 = va_arg(va, TCGArg);
+ v0 = temp_tcgv_vec(arg_temp(a0));
+ v1 = temp_tcgv_vec(arg_temp(a1));
switch (opc) {
case INDEX_op_shli_vec:
@@ -4153,15 +4634,15 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
break;
case INDEX_op_cmp_vec:
- v2 = temp_tcgv_vec(arg_temp(a2));
- expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
+ a3 = va_arg(va, TCGArg);
+ expand_vec_cmp(type, vece, a0, a1, a2, a3);
break;
case INDEX_op_cmpsel_vec:
- v2 = temp_tcgv_vec(arg_temp(a2));
- v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
- v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
- expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg));
+ a3 = va_arg(va, TCGArg);
+ a4 = va_arg(va, TCGArg);
+ a5 = va_arg(va, TCGArg);
+ expand_vec_cmpsel(type, vece, a0, a1, a2, a3, a4, a5);
break;
default:
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
index 2f67a97..3cbdfbc 100644
--- a/tcg/i386/tcg-target.h
+++ b/tcg/i386/tcg-target.h
@@ -25,8 +25,6 @@
#ifndef I386_TCG_TARGET_H
#define I386_TCG_TARGET_H
-#include "host/cpuinfo.h"
-
#define TCG_TARGET_INSN_UNIT_SIZE 1
#ifdef __x86_64__
@@ -90,164 +88,4 @@ typedef enum {
TCG_REG_CALL_STACK = TCG_REG_ESP
} TCGReg;
-/* used for function call generation */
-#define TCG_TARGET_STACK_ALIGN 16
-#if defined(_WIN64)
-#define TCG_TARGET_CALL_STACK_OFFSET 32
-#else
-#define TCG_TARGET_CALL_STACK_OFFSET 0
-#endif
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
-#if defined(_WIN64)
-# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF
-# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_VEC
-#elif TCG_TARGET_REG_BITS == 64
-# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
-# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
-#else
-# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
-# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
-#endif
-
-#define have_bmi1 (cpuinfo & CPUINFO_BMI1)
-#define have_popcnt (cpuinfo & CPUINFO_POPCNT)
-#define have_avx1 (cpuinfo & CPUINFO_AVX1)
-#define have_avx2 (cpuinfo & CPUINFO_AVX2)
-#define have_movbe (cpuinfo & CPUINFO_MOVBE)
-
-/*
- * There are interesting instructions in AVX512, so long as we have AVX512VL,
- * which indicates support for EVEX on sizes smaller than 512 bits.
- */
-#define have_avx512vl ((cpuinfo & CPUINFO_AVX512VL) && \
- (cpuinfo & CPUINFO_AVX512F))
-#define have_avx512bw ((cpuinfo & CPUINFO_AVX512BW) && have_avx512vl)
-#define have_avx512dq ((cpuinfo & CPUINFO_AVX512DQ) && have_avx512vl)
-#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl)
-
-/* optional instructions */
-#define TCG_TARGET_HAS_div2_i32 1
-#define TCG_TARGET_HAS_rot_i32 1
-#define TCG_TARGET_HAS_ext8s_i32 1
-#define TCG_TARGET_HAS_ext16s_i32 1
-#define TCG_TARGET_HAS_ext8u_i32 1
-#define TCG_TARGET_HAS_ext16u_i32 1
-#define TCG_TARGET_HAS_bswap16_i32 1
-#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_not_i32 1
-#define TCG_TARGET_HAS_andc_i32 have_bmi1
-#define TCG_TARGET_HAS_orc_i32 0
-#define TCG_TARGET_HAS_eqv_i32 0
-#define TCG_TARGET_HAS_nand_i32 0
-#define TCG_TARGET_HAS_nor_i32 0
-#define TCG_TARGET_HAS_clz_i32 1
-#define TCG_TARGET_HAS_ctz_i32 1
-#define TCG_TARGET_HAS_ctpop_i32 have_popcnt
-#define TCG_TARGET_HAS_deposit_i32 1
-#define TCG_TARGET_HAS_extract_i32 1
-#define TCG_TARGET_HAS_sextract_i32 1
-#define TCG_TARGET_HAS_extract2_i32 1
-#define TCG_TARGET_HAS_negsetcond_i32 1
-#define TCG_TARGET_HAS_add2_i32 1
-#define TCG_TARGET_HAS_sub2_i32 1
-#define TCG_TARGET_HAS_mulu2_i32 1
-#define TCG_TARGET_HAS_muls2_i32 1
-#define TCG_TARGET_HAS_muluh_i32 0
-#define TCG_TARGET_HAS_mulsh_i32 0
-
-#if TCG_TARGET_REG_BITS == 64
-/* Keep 32-bit values zero-extended in a register. */
-#define TCG_TARGET_HAS_extr_i64_i32 1
-#define TCG_TARGET_HAS_div2_i64 1
-#define TCG_TARGET_HAS_rot_i64 1
-#define TCG_TARGET_HAS_ext8s_i64 1
-#define TCG_TARGET_HAS_ext16s_i64 1
-#define TCG_TARGET_HAS_ext32s_i64 1
-#define TCG_TARGET_HAS_ext8u_i64 1
-#define TCG_TARGET_HAS_ext16u_i64 1
-#define TCG_TARGET_HAS_ext32u_i64 1
-#define TCG_TARGET_HAS_bswap16_i64 1
-#define TCG_TARGET_HAS_bswap32_i64 1
-#define TCG_TARGET_HAS_bswap64_i64 1
-#define TCG_TARGET_HAS_not_i64 1
-#define TCG_TARGET_HAS_andc_i64 have_bmi1
-#define TCG_TARGET_HAS_orc_i64 0
-#define TCG_TARGET_HAS_eqv_i64 0
-#define TCG_TARGET_HAS_nand_i64 0
-#define TCG_TARGET_HAS_nor_i64 0
-#define TCG_TARGET_HAS_clz_i64 1
-#define TCG_TARGET_HAS_ctz_i64 1
-#define TCG_TARGET_HAS_ctpop_i64 have_popcnt
-#define TCG_TARGET_HAS_deposit_i64 1
-#define TCG_TARGET_HAS_extract_i64 1
-#define TCG_TARGET_HAS_sextract_i64 0
-#define TCG_TARGET_HAS_extract2_i64 1
-#define TCG_TARGET_HAS_negsetcond_i64 1
-#define TCG_TARGET_HAS_add2_i64 1
-#define TCG_TARGET_HAS_sub2_i64 1
-#define TCG_TARGET_HAS_mulu2_i64 1
-#define TCG_TARGET_HAS_muls2_i64 1
-#define TCG_TARGET_HAS_muluh_i64 0
-#define TCG_TARGET_HAS_mulsh_i64 0
-#define TCG_TARGET_HAS_qemu_st8_i32 0
-#else
-#define TCG_TARGET_HAS_qemu_st8_i32 1
-#endif
-
-#define TCG_TARGET_HAS_qemu_ldst_i128 \
- (TCG_TARGET_REG_BITS == 64 && (cpuinfo & CPUINFO_ATOMIC_VMOVDQA))
-
-#define TCG_TARGET_HAS_tst 1
-
-/* We do not support older SSE systems, only beginning with AVX1. */
-#define TCG_TARGET_HAS_v64 have_avx1
-#define TCG_TARGET_HAS_v128 have_avx1
-#define TCG_TARGET_HAS_v256 have_avx2
-
-#define TCG_TARGET_HAS_andc_vec 1
-#define TCG_TARGET_HAS_orc_vec have_avx512vl
-#define TCG_TARGET_HAS_nand_vec have_avx512vl
-#define TCG_TARGET_HAS_nor_vec have_avx512vl
-#define TCG_TARGET_HAS_eqv_vec have_avx512vl
-#define TCG_TARGET_HAS_not_vec have_avx512vl
-#define TCG_TARGET_HAS_neg_vec 0
-#define TCG_TARGET_HAS_abs_vec 1
-#define TCG_TARGET_HAS_roti_vec have_avx512vl
-#define TCG_TARGET_HAS_rots_vec 0
-#define TCG_TARGET_HAS_rotv_vec have_avx512vl
-#define TCG_TARGET_HAS_shi_vec 1
-#define TCG_TARGET_HAS_shs_vec 1
-#define TCG_TARGET_HAS_shv_vec have_avx2
-#define TCG_TARGET_HAS_mul_vec 1
-#define TCG_TARGET_HAS_sat_vec 1
-#define TCG_TARGET_HAS_minmax_vec 1
-#define TCG_TARGET_HAS_bitsel_vec have_avx512vl
-#define TCG_TARGET_HAS_cmpsel_vec -1
-#define TCG_TARGET_HAS_tst_vec 0
-
-#define TCG_TARGET_deposit_i32_valid(ofs, len) \
- (((ofs) == 0 && ((len) == 8 || (len) == 16)) || \
- (TCG_TARGET_REG_BITS == 32 && (ofs) == 8 && (len) == 8))
-#define TCG_TARGET_deposit_i64_valid TCG_TARGET_deposit_i32_valid
-
-/* Check for the possibility of high-byte extraction and, for 64-bit,
- zero-extending 32-bit right-shift. */
-#define TCG_TARGET_extract_i32_valid(ofs, len) ((ofs) == 8 && (len) == 8)
-#define TCG_TARGET_extract_i64_valid(ofs, len) \
- (((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32)
-
-/* This defines the natural memory order supported by this
- * architecture before guarantees made by various barrier
- * instructions.
- *
- * The x86 has a pretty strong memory ordering which only really
- * allows for some stores to be re-ordered after loads.
- */
-#include "tcg/tcg-mo.h"
-
-#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
-#define TCG_TARGET_NEED_LDST_LABELS
-#define TCG_TARGET_NEED_POOL_LABELS
-
#endif
diff --git a/tcg/i386/tcg-target.opc.h b/tcg/i386/tcg-target.opc.h
deleted file mode 100644
index b5f403e..0000000
--- a/tcg/i386/tcg-target.opc.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2019 Linaro
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- * Target-specific opcodes for host vector expansion. These will be
- * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
- * consider these to be UNSPEC with names.
- */
-
-DEF(x86_shufps_vec, 1, 2, 1, IMPLVEC)
-DEF(x86_vpblendvb_vec, 1, 3, 0, IMPLVEC)
-DEF(x86_blend_vec, 1, 2, 1, IMPLVEC)
-DEF(x86_packss_vec, 1, 2, 0, IMPLVEC)
-DEF(x86_packus_vec, 1, 2, 0, IMPLVEC)
-DEF(x86_psrldq_vec, 1, 1, 1, IMPLVEC)
-DEF(x86_vperm2i128_vec, 1, 2, 1, IMPLVEC)
-DEF(x86_punpckl_vec, 1, 2, 0, IMPLVEC)
-DEF(x86_punpckh_vec, 1, 2, 0, IMPLVEC)
-DEF(x86_vpshldi_vec, 1, 2, 1, IMPLVEC)
-DEF(x86_vpshldv_vec, 1, 3, 0, IMPLVEC)
-DEF(x86_vpshrdv_vec, 1, 3, 0, IMPLVEC)
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
index cae6c2a..fd731c0 100644
--- a/tcg/loongarch64/tcg-target-con-set.h
+++ b/tcg/loongarch64/tcg-target-con-set.h
@@ -15,27 +15,23 @@
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I1(r)
-C_O0_I2(rZ, r)
-C_O0_I2(rZ, rZ)
+C_O0_I2(rz, r)
+C_O0_I2(r, rz)
C_O0_I2(w, r)
C_O0_I3(r, r, r)
C_O1_I1(r, r)
C_O1_I1(w, r)
C_O1_I1(w, w)
-C_O1_I2(r, r, rC)
+C_O1_I2(r, r, r)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rI)
C_O1_I2(r, r, rJ)
C_O1_I2(r, r, rU)
C_O1_I2(r, r, rW)
-C_O1_I2(r, r, rZ)
-C_O1_I2(r, 0, rZ)
-C_O1_I2(r, rZ, ri)
-C_O1_I2(r, rZ, rJ)
-C_O1_I2(r, rZ, rZ)
+C_O1_I2(r, 0, rz)
C_O1_I2(w, w, w)
C_O1_I2(w, w, wM)
C_O1_I2(w, w, wA)
C_O1_I3(w, w, w, w)
-C_O1_I4(r, rZ, rJ, rZ, rZ)
+C_O1_I4(r, r, rJ, rz, rz)
C_N2_I1(r, r, r)
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
index 2ba9c13..e5e5745 100644
--- a/tcg/loongarch64/tcg-target-con-str.h
+++ b/tcg/loongarch64/tcg-target-con-str.h
@@ -23,8 +23,6 @@ REGS('w', ALL_VECTOR_REGS)
CONST('I', TCG_CT_CONST_S12)
CONST('J', TCG_CT_CONST_S32)
CONST('U', TCG_CT_CONST_U12)
-CONST('Z', TCG_CT_CONST_ZERO)
-CONST('C', TCG_CT_CONST_C12)
CONST('W', TCG_CT_CONST_WSZ)
CONST('M', TCG_CT_CONST_VCMP)
CONST('A', TCG_CT_CONST_VADD)
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
new file mode 100644
index 0000000..32abc6f
--- /dev/null
+++ b/tcg/loongarch64/tcg-target-has.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific opcode support
+ * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
+ */
+
+#ifndef TCG_TARGET_HAS_H
+#define TCG_TARGET_HAS_H
+
+#include "host/cpuinfo.h"
+
+/* 64-bit operations */
+#define TCG_TARGET_HAS_extr_i64_i32 1
+
+#define TCG_TARGET_HAS_qemu_ldst_i128 (cpuinfo & CPUINFO_LSX)
+
+#define TCG_TARGET_HAS_tst 0
+
+#define TCG_TARGET_HAS_v64 (cpuinfo & CPUINFO_LSX)
+#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_LSX)
+#define TCG_TARGET_HAS_v256 (cpuinfo & CPUINFO_LASX)
+
+#define TCG_TARGET_HAS_not_vec 1
+#define TCG_TARGET_HAS_neg_vec 1
+#define TCG_TARGET_HAS_abs_vec 0
+#define TCG_TARGET_HAS_andc_vec 1
+#define TCG_TARGET_HAS_orc_vec 1
+#define TCG_TARGET_HAS_nand_vec 0
+#define TCG_TARGET_HAS_nor_vec 1
+#define TCG_TARGET_HAS_eqv_vec 0
+#define TCG_TARGET_HAS_mul_vec 1
+#define TCG_TARGET_HAS_shi_vec 1
+#define TCG_TARGET_HAS_shs_vec 0
+#define TCG_TARGET_HAS_shv_vec 1
+#define TCG_TARGET_HAS_roti_vec 1
+#define TCG_TARGET_HAS_rots_vec 0
+#define TCG_TARGET_HAS_rotv_vec 1
+#define TCG_TARGET_HAS_sat_vec 1
+#define TCG_TARGET_HAS_minmax_vec 1
+#define TCG_TARGET_HAS_bitsel_vec 1
+#define TCG_TARGET_HAS_cmpsel_vec 0
+#define TCG_TARGET_HAS_tst_vec 0
+
+#define TCG_TARGET_extract_valid(type, ofs, len) 1
+#define TCG_TARGET_deposit_valid(type, ofs, len) 1
+
+static inline bool
+tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
+{
+ if (type == TCG_TYPE_I64 && ofs + len == 32) {
+ return true;
+ }
+ return ofs == 0 && (len == 8 || len == 16);
+}
+#define TCG_TARGET_sextract_valid tcg_target_sextract_valid
+
+#endif
diff --git a/tcg/loongarch64/tcg-target-mo.h b/tcg/loongarch64/tcg-target-mo.h
new file mode 100644
index 0000000..d355069
--- /dev/null
+++ b/tcg/loongarch64/tcg-target-mo.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific memory model
+ * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
+ */
+
+#ifndef TCG_TARGET_MO_H
+#define TCG_TARGET_MO_H
+
+#define TCG_TARGET_DEFAULT_MO 0
+
+#endif
diff --git a/tcg/loongarch64/tcg-target.opc.h b/tcg/loongarch64/tcg-target-opc.h.inc
index fd1a40b..fd1a40b 100644
--- a/tcg/loongarch64/tcg-target.opc.h
+++ b/tcg/loongarch64/tcg-target-opc.h.inc
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
index 5b7ed5c..10c6921 100644
--- a/tcg/loongarch64/tcg-target.c.inc
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -29,9 +29,17 @@
* THE SOFTWARE.
*/
-#include "../tcg-ldst.c.inc"
#include <asm/hwcap.h>
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_SP
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
+#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
+
#ifdef CONFIG_DEBUG_TCG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
"zero",
@@ -165,14 +173,12 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define TCG_GUEST_BASE_REG TCG_REG_S1
-#define TCG_CT_CONST_ZERO 0x100
-#define TCG_CT_CONST_S12 0x200
-#define TCG_CT_CONST_S32 0x400
-#define TCG_CT_CONST_U12 0x800
-#define TCG_CT_CONST_C12 0x1000
-#define TCG_CT_CONST_WSZ 0x2000
-#define TCG_CT_CONST_VCMP 0x4000
-#define TCG_CT_CONST_VADD 0x8000
+#define TCG_CT_CONST_S12 0x100
+#define TCG_CT_CONST_S32 0x200
+#define TCG_CT_CONST_U12 0x400
+#define TCG_CT_CONST_WSZ 0x800
+#define TCG_CT_CONST_VCMP 0x1000
+#define TCG_CT_CONST_VADD 0x2000
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
@@ -189,9 +195,6 @@ static bool tcg_target_const_match(int64_t val, int ct,
if (ct & TCG_CT_CONST) {
return true;
}
- if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
- return true;
- }
if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
return true;
}
@@ -201,18 +204,27 @@ static bool tcg_target_const_match(int64_t val, int ct,
if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
return true;
}
- if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
- return true;
- }
if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
return true;
}
- int64_t vec_val = sextract64(val, 0, 8 << vece);
- if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
- return true;
- }
- if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
- return true;
+ if (ct & (TCG_CT_CONST_VCMP | TCG_CT_CONST_VADD)) {
+ int64_t vec_val = sextract64(val, 0, 8 << vece);
+ if (ct & TCG_CT_CONST_VCMP) {
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_LE:
+ case TCG_COND_LT:
+ return -0x10 <= vec_val && vec_val <= 0x0f;
+ case TCG_COND_LEU:
+ case TCG_COND_LTU:
+ return 0x00 <= vec_val && vec_val <= 0x1f;
+ default:
+ return false;
+ }
+ }
+ if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
+ return true;
+ }
}
return false;
}
@@ -289,7 +301,7 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
* TCG intrinsics
*/
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
/* Baseline LoongArch only has the full barrier, unfortunately. */
tcg_out_opc_dbar(s, 0);
@@ -534,28 +546,6 @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
tcg_out_ext32s(s, ret, arg);
}
-static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
- TCGReg a0, TCGReg a1, TCGReg a2,
- bool c2, bool is_32bit)
-{
- if (c2) {
- /*
- * Fast path: semantics already satisfied due to constraint and
- * insn behavior, single instruction is enough.
- */
- tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
- /* all clz/ctz insns belong to DJ-format */
- tcg_out32(s, encode_dj_insn(opc, a0, a1));
- return;
- }
-
- tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
- /* a0 = a1 ? REG_TMP0 : a2 */
- tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
- tcg_out_opc_masknez(s, a0, a2, a1);
- tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
-}
-
#define SETCOND_INV TCG_TARGET_NB_REGS
#define SETCOND_NEZ (SETCOND_INV << 1)
#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
@@ -650,21 +640,35 @@ static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
default:
g_assert_not_reached();
- break;
}
return ret | flags;
}
static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg arg1, tcg_target_long arg2, bool c2)
+ TCGReg arg1, tcg_target_long arg2,
+ bool c2, bool neg)
{
int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
+ TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
- if (tmpflags != ret) {
- TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
-
+ if (neg) {
+ /* If intermediate result is zero/non-zero: test != 0. */
+ if (tmpflags & SETCOND_NEZ) {
+ tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
+ tmp = ret;
+ }
+ /* Produce the 0/-1 result. */
+ if (tmpflags & SETCOND_INV) {
+ tcg_out_opc_addi_d(s, ret, tmp, -1);
+ } else {
+ tcg_out_opc_sub_d(s, ret, TCG_REG_ZERO, tmp);
+ }
+ } else {
switch (tmpflags & SETCOND_FLAGS) {
+ case 0:
+ tcg_debug_assert(tmp == ret);
+ break;
case SETCOND_INV:
/* Intermediate result is boolean: simply invert. */
tcg_out_opc_xori(s, ret, tmp, 1);
@@ -683,11 +687,47 @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
}
}
-static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg c1, tcg_target_long c2, bool const2,
- TCGReg v1, TCGReg v2)
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
{
- int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
+ tcg_out_setcond(s, cond, dest, arg1, arg2, false, false);
+}
+
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
+{
+ tcg_out_setcond(s, cond, dest, arg1, arg2, true, false);
+}
+
+static const TCGOutOpSetcond outop_setcond = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_setcond,
+ .out_rri = tgen_setcondi,
+};
+
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ tcg_out_setcond(s, cond, dest, arg1, arg2, false, true);
+}
+
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
+{
+ tcg_out_setcond(s, cond, dest, arg1, arg2, true, true);
+}
+
+static const TCGOutOpSetcond outop_negsetcond = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_negsetcond,
+ .out_rri = tgen_negsetcondi,
+};
+
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg v1, bool const_v1, TCGArg v2, bool const_v2)
+{
+ int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const_c2);
TCGReg t;
/* Standardize the test below to t != 0. */
@@ -707,10 +747,21 @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
}
}
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rJ, rz, rz),
+ .out = tgen_movcond,
+};
+
/*
* Branch helpers
*/
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
+{
+ tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, l, 0);
+ tcg_out_opc_b(s, 0);
+}
+
static const struct {
LoongArchInsn op;
bool swap;
@@ -727,8 +778,8 @@ static const struct {
[TCG_COND_GTU] = { OPC_BGTU, false }
};
-static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
- TCGReg arg2, TCGLabel *l)
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg arg1, TCGReg arg2, TCGLabel *l)
{
LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
@@ -745,6 +796,11 @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
}
+static const TCGOutOpBrcond outop_brcond = {
+ .base.static_constraint = C_O0_I2(r, rz),
+ .out_rr = tgen_brcond,
+};
+
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
{
TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
@@ -1003,13 +1059,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
+ ldst->addr_reg = addr_reg;
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
- s->page_bits - CPU_TLB_ENTRY_BITS);
+ TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
@@ -1035,7 +1091,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
}
tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
- a_bits, s->page_bits - 1);
+ a_bits, TARGET_PAGE_BITS - 1);
/* Compare masked address with the TLB entry. */
ldst->label_ptr[0] = s->code_ptr;
@@ -1048,7 +1104,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
+ ldst->addr_reg = addr_reg;
/*
* Without micro-architecture details, we don't know which of
@@ -1111,22 +1167,27 @@ static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
}
}
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
- MemOpIdx oi, TCGType data_type)
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data_reg,
+ TCGReg addr_reg, MemOpIdx oi)
{
TCGLabelQemuLdst *ldst;
HostAddress h;
ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
- tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h);
+ tcg_out_qemu_ld_indexed(s, get_memop(oi), type, data_reg, h);
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
ldst->datalo_reg = data_reg;
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
}
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_qemu_ld,
+};
+
static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
TCGReg rd, HostAddress h)
{
@@ -1151,8 +1212,8 @@ static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
}
}
-static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
- MemOpIdx oi, TCGType data_type)
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data_reg,
+ TCGReg addr_reg, MemOpIdx oi)
{
TCGLabelQemuLdst *ldst;
HostAddress h;
@@ -1161,12 +1222,17 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h);
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
ldst->datalo_reg = data_reg;
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
}
+static const TCGOutOpQemuLdSt outop_qemu_st = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out = tgen_qemu_st,
+};
+
static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi,
TCGReg addr_reg, MemOpIdx oi, bool is_ld)
{
@@ -1214,6 +1280,28 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi
}
}
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
+{
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, true);
+}
+
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
+ .base.static_constraint = C_N2_I1(r, r, r),
+ .out = tgen_qemu_ld2,
+};
+
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
+{
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, false);
+}
+
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
+ .base.static_constraint = C_O0_I3(r, r, r),
+ .out = tgen_qemu_st2,
+};
+
/*
* Entry-points
*/
@@ -1251,6 +1339,11 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
set_jmp_reset_offset(s, which);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
@@ -1271,423 +1364,684 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
flush_idcache_range(jmp_rx, jmp_rw, 4);
}
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg args[TCG_MAX_OP_ARGS],
- const int const_args[TCG_MAX_OP_ARGS])
-{
- TCGArg a0 = args[0];
- TCGArg a1 = args[1];
- TCGArg a2 = args[2];
- TCGArg a3 = args[3];
- int c2 = const_args[2];
- switch (opc) {
- case INDEX_op_mb:
- tcg_out_mb(s, a0);
- break;
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_add_w(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_add_d(s, a0, a1, a2);
+ }
+}
- case INDEX_op_goto_ptr:
- tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
- break;
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_add,
+ .out_rri = tcg_out_addi,
+};
- case INDEX_op_br:
- tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
- 0);
- tcg_out_opc_b(s, 0);
- break;
+static const TCGOutOpBinary outop_addco = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
- tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
- break;
+static const TCGOutOpAddSubCarry outop_addci = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_extrh_i64_i32:
- tcg_out_opc_srai_d(s, a0, a1, 32);
- break;
+static const TCGOutOpBinary outop_addcio = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
- tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
- break;
+static void tcg_out_set_carry(TCGContext *s)
+{
+ g_assert_not_reached();
+}
- case INDEX_op_nor_i32:
- case INDEX_op_nor_i64:
- if (c2) {
- tcg_out_opc_ori(s, a0, a1, a2);
- tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
- } else {
- tcg_out_opc_nor(s, a0, a1, a2);
- }
- break;
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_and(s, a0, a1, a2);
+}
- case INDEX_op_andc_i32:
- case INDEX_op_andc_i64:
- if (c2) {
- /* guaranteed to fit due to constraint */
- tcg_out_opc_andi(s, a0, a1, ~a2);
- } else {
- tcg_out_opc_andn(s, a0, a1, a2);
- }
- break;
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_opc_andi(s, a0, a1, a2);
+}
- case INDEX_op_orc_i32:
- case INDEX_op_orc_i64:
- if (c2) {
- /* guaranteed to fit due to constraint */
- tcg_out_opc_ori(s, a0, a1, ~a2);
- } else {
- tcg_out_opc_orn(s, a0, a1, a2);
- }
- break;
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rU),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- if (c2) {
- tcg_out_opc_andi(s, a0, a1, a2);
- } else {
- tcg_out_opc_and(s, a0, a1, a2);
- }
- break;
+static void tgen_andc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_andn(s, a0, a1, a2);
+}
- case INDEX_op_or_i32:
- case INDEX_op_or_i64:
- if (c2) {
- tcg_out_opc_ori(s, a0, a1, a2);
- } else {
- tcg_out_opc_or(s, a0, a1, a2);
- }
- break;
+static const TCGOutOpBinary outop_andc = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_andc,
+};
- case INDEX_op_xor_i32:
- case INDEX_op_xor_i64:
- if (c2) {
- tcg_out_opc_xori(s, a0, a1, a2);
- } else {
- tcg_out_opc_xor(s, a0, a1, a2);
- }
- break;
+static void tgen_clzi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ /* a2 is constrained to exactly the type width. */
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_clz_w(s, a0, a1);
+ } else {
+ tcg_out_opc_clz_d(s, a0, a1);
+ }
+}
- case INDEX_op_extract_i32:
- tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
- break;
- case INDEX_op_extract_i64:
- tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
- break;
+static void tgen_clz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tgen_clzi(s, type, TCG_REG_TMP0, a1, /* ignored */ 0);
+ /* a0 = a1 ? REG_TMP0 : a2 */
+ tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
+ tcg_out_opc_masknez(s, a0, a2, a1);
+ tcg_out_opc_or(s, a0, a0, TCG_REG_TMP0);
+}
- case INDEX_op_deposit_i32:
- tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
- break;
- case INDEX_op_deposit_i64:
- tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
- break;
+static const TCGOutOpBinary outop_clz = {
+ .base.static_constraint = C_O1_I2(r, r, rW),
+ .out_rrr = tgen_clz,
+ .out_rri = tgen_clzi,
+};
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
- tcg_out_opc_revb_2h(s, a0, a1);
- if (a2 & TCG_BSWAP_OS) {
- tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
- } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
- tcg_out_ext16u(s, a0, a0);
- }
- break;
+static const TCGOutOpUnary outop_ctpop = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_bswap32_i32:
- /* All 32-bit values are computed sign-extended in the register. */
- a2 = TCG_BSWAP_OS;
- /* fallthrough */
- case INDEX_op_bswap32_i64:
- tcg_out_opc_revb_2w(s, a0, a1);
- if (a2 & TCG_BSWAP_OS) {
- tcg_out_ext32s(s, a0, a0);
- } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
- tcg_out_ext32u(s, a0, a0);
- }
- break;
+static void tgen_ctzi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ /* a2 is constrained to exactly the type width. */
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_ctz_w(s, a0, a1);
+ } else {
+ tcg_out_opc_ctz_d(s, a0, a1);
+ }
+}
- case INDEX_op_bswap64_i64:
- tcg_out_opc_revb_d(s, a0, a1);
- break;
+static void tgen_ctz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tgen_ctzi(s, type, TCG_REG_TMP0, a1, /* ignored */ 0);
+ /* a0 = a1 ? REG_TMP0 : a2 */
+ tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
+ tcg_out_opc_masknez(s, a0, a2, a1);
+ tcg_out_opc_or(s, a0, a0, TCG_REG_TMP0);
+}
- case INDEX_op_clz_i32:
- tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
- break;
- case INDEX_op_clz_i64:
- tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
- break;
+static const TCGOutOpBinary outop_ctz = {
+ .base.static_constraint = C_O1_I2(r, r, rW),
+ .out_rrr = tgen_ctz,
+ .out_rri = tgen_ctzi,
+};
- case INDEX_op_ctz_i32:
- tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
- break;
- case INDEX_op_ctz_i64:
- tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
- break;
+static void tgen_divs(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_div_w(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_div_d(s, a0, a1, a2);
+ }
+}
- case INDEX_op_shl_i32:
- if (c2) {
- tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
- } else {
- tcg_out_opc_sll_w(s, a0, a1, a2);
- }
- break;
- case INDEX_op_shl_i64:
- if (c2) {
- tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
- } else {
- tcg_out_opc_sll_d(s, a0, a1, a2);
- }
- break;
+static const TCGOutOpBinary outop_divs = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_divs,
+};
- case INDEX_op_shr_i32:
- if (c2) {
- tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
- } else {
- tcg_out_opc_srl_w(s, a0, a1, a2);
- }
- break;
- case INDEX_op_shr_i64:
- if (c2) {
- tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
- } else {
- tcg_out_opc_srl_d(s, a0, a1, a2);
- }
- break;
+static const TCGOutOpDivRem outop_divs2 = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_sar_i32:
- if (c2) {
- tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
- } else {
- tcg_out_opc_sra_w(s, a0, a1, a2);
- }
- break;
- case INDEX_op_sar_i64:
- if (c2) {
- tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
- } else {
- tcg_out_opc_sra_d(s, a0, a1, a2);
- }
- break;
+static void tgen_divu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_div_wu(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_div_du(s, a0, a1, a2);
+ }
+}
- case INDEX_op_rotl_i32:
- /* transform into equivalent rotr/rotri */
- if (c2) {
- tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
- } else {
- tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
- tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
- }
- break;
- case INDEX_op_rotl_i64:
- /* transform into equivalent rotr/rotri */
- if (c2) {
- tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
- } else {
- tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
- tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
- }
- break;
+static const TCGOutOpBinary outop_divu = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_divu,
+};
- case INDEX_op_rotr_i32:
- if (c2) {
- tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
- } else {
- tcg_out_opc_rotr_w(s, a0, a1, a2);
- }
- break;
- case INDEX_op_rotr_i64:
- if (c2) {
- tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
- } else {
- tcg_out_opc_rotr_d(s, a0, a1, a2);
- }
- break;
+static const TCGOutOpDivRem outop_divu2 = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_add_i32:
- if (c2) {
- tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2);
- } else {
- tcg_out_opc_add_w(s, a0, a1, a2);
- }
- break;
- case INDEX_op_add_i64:
- if (c2) {
- tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2);
- } else {
- tcg_out_opc_add_d(s, a0, a1, a2);
- }
- break;
+static const TCGOutOpBinary outop_eqv = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_sub_i32:
- if (c2) {
- tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
- } else {
- tcg_out_opc_sub_w(s, a0, a1, a2);
- }
- break;
- case INDEX_op_sub_i64:
- if (c2) {
- tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
- } else {
- tcg_out_opc_sub_d(s, a0, a1, a2);
- }
- break;
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
+{
+ tcg_out_opc_srai_d(s, a0, a1, 32);
+}
- case INDEX_op_neg_i32:
- tcg_out_opc_sub_w(s, a0, TCG_REG_ZERO, a1);
- break;
- case INDEX_op_neg_i64:
- tcg_out_opc_sub_d(s, a0, TCG_REG_ZERO, a1);
- break;
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extrh_i64_i32,
+};
- case INDEX_op_mul_i32:
+static void tgen_mul(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
tcg_out_opc_mul_w(s, a0, a1, a2);
- break;
- case INDEX_op_mul_i64:
+ } else {
tcg_out_opc_mul_d(s, a0, a1, a2);
- break;
+ }
+}
+
+static const TCGOutOpBinary outop_mul = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_mul,
+};
+
+static const TCGOutOpMul2 outop_muls2 = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_mulsh_i32:
+static void tgen_mulsh(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
tcg_out_opc_mulh_w(s, a0, a1, a2);
- break;
- case INDEX_op_mulsh_i64:
+ } else {
tcg_out_opc_mulh_d(s, a0, a1, a2);
- break;
+ }
+}
- case INDEX_op_muluh_i32:
+static const TCGOutOpBinary outop_mulsh = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_mulsh,
+};
+
+static const TCGOutOpMul2 outop_mulu2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_muluh(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
tcg_out_opc_mulh_wu(s, a0, a1, a2);
- break;
- case INDEX_op_muluh_i64:
+ } else {
tcg_out_opc_mulh_du(s, a0, a1, a2);
- break;
+ }
+}
- case INDEX_op_div_i32:
- tcg_out_opc_div_w(s, a0, a1, a2);
- break;
- case INDEX_op_div_i64:
- tcg_out_opc_div_d(s, a0, a1, a2);
- break;
+static const TCGOutOpBinary outop_muluh = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_muluh,
+};
- case INDEX_op_divu_i32:
- tcg_out_opc_div_wu(s, a0, a1, a2);
- break;
- case INDEX_op_divu_i64:
- tcg_out_opc_div_du(s, a0, a1, a2);
- break;
+static const TCGOutOpBinary outop_nand = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_nor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_nor(s, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_nor = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_nor,
+};
+
+static void tgen_or(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_or(s, a0, a1, a2);
+}
+
+static void tgen_ori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_opc_ori(s, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_or = {
+ .base.static_constraint = C_O1_I2(r, r, rU),
+ .out_rrr = tgen_or,
+ .out_rri = tgen_ori,
+};
- case INDEX_op_rem_i32:
+static void tgen_orc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_orn(s, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_orc = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_orc,
+};
+
+static void tgen_rems(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
tcg_out_opc_mod_w(s, a0, a1, a2);
- break;
- case INDEX_op_rem_i64:
+ } else {
tcg_out_opc_mod_d(s, a0, a1, a2);
- break;
+ }
+}
+
+static const TCGOutOpBinary outop_rems = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_rems,
+};
- case INDEX_op_remu_i32:
+static void tgen_remu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
tcg_out_opc_mod_wu(s, a0, a1, a2);
- break;
- case INDEX_op_remu_i64:
+ } else {
tcg_out_opc_mod_du(s, a0, a1, a2);
- break;
+ }
+}
- case INDEX_op_setcond_i32:
- case INDEX_op_setcond_i64:
- tcg_out_setcond(s, args[3], a0, a1, a2, c2);
- break;
+static const TCGOutOpBinary outop_remu = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_remu,
+};
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
- break;
+static const TCGOutOpBinary outop_rotl = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld8s_i64:
- tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
- break;
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8u_i64:
- tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
- break;
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld16s_i64:
- tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
- break;
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16u_i64:
- tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
- break;
- case INDEX_op_ld_i32:
- case INDEX_op_ld32s_i64:
- tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
- break;
- case INDEX_op_ld32u_i64:
- tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
- break;
- case INDEX_op_ld_i64:
- tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
- break;
+static void tgen_rotr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_rotr_w(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_rotr_d(s, a0, a1, a2);
+ }
+}
- case INDEX_op_st8_i32:
- case INDEX_op_st8_i64:
- tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
- break;
- case INDEX_op_st16_i32:
- case INDEX_op_st16_i64:
- tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
- break;
- case INDEX_op_st_i32:
- case INDEX_op_st32_i64:
- tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
- break;
- case INDEX_op_st_i64:
- tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
- break;
+static void tgen_rotri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
+ } else {
+ tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
+ }
+}
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_ld_a64_i32:
- tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_ld_a64_i64:
- tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
- break;
- case INDEX_op_qemu_ld_a32_i128:
- case INDEX_op_qemu_ld_a64_i128:
- tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
- break;
- case INDEX_op_qemu_st_a32_i32:
- case INDEX_op_qemu_st_a64_i32:
- tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_st_a32_i64:
- case INDEX_op_qemu_st_a64_i64:
- tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
- break;
- case INDEX_op_qemu_st_a32_i128:
- case INDEX_op_qemu_st_a64_i128:
- tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
- break;
+static const TCGOutOpBinary outop_rotr = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_rotr,
+ .out_rri = tgen_rotri,
+};
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
- case INDEX_op_mov_i64:
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- default:
- g_assert_not_reached();
+static void tgen_sar(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_sra_w(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_sra_d(s, a0, a1, a2);
+ }
+}
+
+static void tgen_sari(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
+ } else {
+ tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
}
}
+static const TCGOutOpBinary outop_sar = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_sar,
+ .out_rri = tgen_sari,
+};
+
+static void tgen_shl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_sll_w(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_sll_d(s, a0, a1, a2);
+ }
+}
+
+static void tgen_shli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
+ } else {
+ tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
+ }
+}
+
+static const TCGOutOpBinary outop_shl = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_shl,
+ .out_rri = tgen_shli,
+};
+
+static void tgen_shr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_srl_w(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_srl_d(s, a0, a1, a2);
+ }
+}
+
+static void tgen_shri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
+ } else {
+ tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
+ }
+}
+
+static const TCGOutOpBinary outop_shr = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_shr,
+ .out_rri = tgen_shri,
+};
+
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_sub_w(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_sub_d(s, a0, a1, a2);
+ }
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
+static const TCGOutOpAddSubCarry outop_subbo = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpAddSubCarry outop_subbi = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpAddSubCarry outop_subbio = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tcg_out_set_borrow(TCGContext *s)
+{
+ g_assert_not_reached();
+}
+
+static void tgen_xor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_xor(s, a0, a1, a2);
+}
+
+static void tgen_xori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_opc_xori(s, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_xor = {
+ .base.static_constraint = C_O1_I2(r, r, rU),
+ .out_rrr = tgen_xor,
+ .out_rri = tgen_xori,
+};
+
+static void tgen_bswap16(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, unsigned flags)
+{
+ tcg_out_opc_revb_2h(s, a0, a1);
+ if (flags & TCG_BSWAP_OS) {
+ tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
+ tcg_out_ext16u(s, a0, a0);
+ }
+}
+
+static const TCGOutOpBswap outop_bswap16 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap16,
+};
+
+static void tgen_bswap32(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, unsigned flags)
+{
+ tcg_out_opc_revb_2w(s, a0, a1);
+
+ /* All 32-bit values are computed sign-extended in the register. */
+ if (type == TCG_TYPE_I32 || (flags & TCG_BSWAP_OS)) {
+ tcg_out_ext32s(s, a0, a0);
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
+ tcg_out_ext32u(s, a0, a0);
+ }
+}
+
+static const TCGOutOpBswap outop_bswap32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap32,
+};
+
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tcg_out_opc_revb_d(s, a0, a1);
+}
+
+static const TCGOutOpUnary outop_bswap64 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap64,
+};
+
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
+}
+
+static const TCGOutOpUnary outop_neg = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_neg,
+};
+
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_nor(s, type, a0, a1, TCG_REG_ZERO);
+}
+
+static const TCGOutOpUnary outop_not = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_not,
+};
+
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ TCGReg a2, unsigned ofs, unsigned len)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_bstrins_w(s, a0, a2, ofs, ofs + len - 1);
+ } else {
+ tcg_out_opc_bstrins_d(s, a0, a2, ofs, ofs + len - 1);
+ }
+}
+
+static const TCGOutOpDeposit outop_deposit = {
+ .base.static_constraint = C_O1_I2(r, 0, rz),
+ .out_rrr = tgen_deposit,
+};
+
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ if (ofs == 0 && len <= 12) {
+ tcg_out_opc_andi(s, a0, a1, (1 << len) - 1);
+ } else if (type == TCG_TYPE_I32) {
+ tcg_out_opc_bstrpick_w(s, a0, a1, ofs, ofs + len - 1);
+ } else {
+ tcg_out_opc_bstrpick_d(s, a0, a1, ofs, ofs + len - 1);
+ }
+}
+
+static const TCGOutOpExtract outop_extract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extract,
+};
+
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ if (ofs == 0) {
+ switch (len) {
+ case 8:
+ tcg_out_ext8s(s, type, a0, a1);
+ return;
+ case 16:
+ tcg_out_ext16s(s, type, a0, a1);
+ return;
+ case 32:
+ tcg_out_ext32s(s, a0, a1);
+ return;
+ }
+ } else if (ofs + len == 32) {
+ tcg_out_opc_srai_w(s, a0, a1, ofs);
+ return;
+ }
+ g_assert_not_reached();
+}
+
+static const TCGOutOpExtract outop_sextract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_sextract,
+};
+
+static const TCGOutOpExtract2 outop_extract2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LD_BU, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld8u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8u,
+};
+
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LD_B, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld8s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8s,
+};
+
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LD_HU, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld16u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16u,
+};
+
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LD_H, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld16s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16s,
+};
+
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LD_WU, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld32u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32u,
+};
+
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LD_W, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld32s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32s,
+};
+
+static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_ST_B, data, base, offset);
+}
+
+static const TCGOutOpStore outop_st8 = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tgen_st8_r,
+};
+
+static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_ST_H, data, base, offset);
+}
+
+static const TCGOutOpStore outop_st16 = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tgen_st16_r,
+};
+
+static const TCGOutOpStore outop_st = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tcg_out_st,
+};
+
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg rd, TCGReg rs)
{
@@ -2002,28 +2356,22 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
* Try vseqi/vslei/vslti
*/
int64_t value = sextract64(a2, 0, 8 << vece);
- if ((cond == TCG_COND_EQ ||
- cond == TCG_COND_LE ||
- cond == TCG_COND_LT) &&
- (-0x10 <= value && value <= 0x0f)) {
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_LE:
+ case TCG_COND_LT:
insn = cmp_vec_imm_insn[cond][lasx][vece];
tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value));
break;
- } else if ((cond == TCG_COND_LEU ||
- cond == TCG_COND_LTU) &&
- (0x00 <= value && value <= 0x1f)) {
+ case TCG_COND_LEU:
+ case TCG_COND_LTU:
insn = cmp_vec_imm_insn[cond][lasx][vece];
tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
break;
+ default:
+ g_assert_not_reached();
}
-
- /*
- * Fallback to:
- * dupi_vec temp, a2
- * cmp_vec a0, a1, temp, cond
- */
- tcg_out_dupi_vec(s, type, vece, TCG_VEC_TMP0, a2);
- a2 = TCG_VEC_TMP0;
+ break;
}
insn = cmp_vec_insn[cond][lasx][vece];
@@ -2184,157 +2532,10 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
g_assert_not_reached();
}
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
+static TCGConstraintSetIndex
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
- case INDEX_op_st8_i32:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i32:
- case INDEX_op_st16_i64:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i32:
- case INDEX_op_st_i64:
- case INDEX_op_qemu_st_a32_i32:
- case INDEX_op_qemu_st_a64_i32:
- case INDEX_op_qemu_st_a32_i64:
- case INDEX_op_qemu_st_a64_i64:
- return C_O0_I2(rZ, r);
-
- case INDEX_op_qemu_ld_a32_i128:
- case INDEX_op_qemu_ld_a64_i128:
- return C_N2_I1(r, r, r);
-
- case INDEX_op_qemu_st_a32_i128:
- case INDEX_op_qemu_st_a64_i128:
- return C_O0_I3(r, r, r);
-
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
- return C_O0_I2(rZ, rZ);
-
- case INDEX_op_ext8s_i32:
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- case INDEX_op_extrh_i64_i32:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_neg_i32:
- case INDEX_op_neg_i64:
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
- case INDEX_op_extract_i32:
- case INDEX_op_extract_i64:
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
- case INDEX_op_bswap32_i32:
- case INDEX_op_bswap32_i64:
- case INDEX_op_bswap64_i64:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld_i32:
- case INDEX_op_ld_i64:
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_ld_a64_i32:
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_ld_a64_i64:
- return C_O1_I1(r, r);
-
- case INDEX_op_andc_i32:
- case INDEX_op_andc_i64:
- case INDEX_op_orc_i32:
- case INDEX_op_orc_i64:
- /*
- * LoongArch insns for these ops don't have reg-imm forms, but we
- * can express using andi/ori if ~constant satisfies
- * TCG_CT_CONST_U12.
- */
- return C_O1_I2(r, r, rC);
-
- case INDEX_op_shl_i32:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i32:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i32:
- case INDEX_op_sar_i64:
- case INDEX_op_rotl_i32:
- case INDEX_op_rotl_i64:
- case INDEX_op_rotr_i32:
- case INDEX_op_rotr_i64:
- return C_O1_I2(r, r, ri);
-
- case INDEX_op_add_i32:
- return C_O1_I2(r, r, ri);
- case INDEX_op_add_i64:
- return C_O1_I2(r, r, rJ);
-
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- case INDEX_op_nor_i32:
- case INDEX_op_nor_i64:
- case INDEX_op_or_i32:
- case INDEX_op_or_i64:
- case INDEX_op_xor_i32:
- case INDEX_op_xor_i64:
- /* LoongArch reg-imm bitops have their imms ZERO-extended */
- return C_O1_I2(r, r, rU);
-
- case INDEX_op_clz_i32:
- case INDEX_op_clz_i64:
- case INDEX_op_ctz_i32:
- case INDEX_op_ctz_i64:
- return C_O1_I2(r, r, rW);
-
- case INDEX_op_deposit_i32:
- case INDEX_op_deposit_i64:
- /* Must deposit into the same register as input */
- return C_O1_I2(r, 0, rZ);
-
- case INDEX_op_sub_i32:
- case INDEX_op_setcond_i32:
- return C_O1_I2(r, rZ, ri);
- case INDEX_op_sub_i64:
- case INDEX_op_setcond_i64:
- return C_O1_I2(r, rZ, rJ);
-
- case INDEX_op_mul_i32:
- case INDEX_op_mul_i64:
- case INDEX_op_mulsh_i32:
- case INDEX_op_mulsh_i64:
- case INDEX_op_muluh_i32:
- case INDEX_op_muluh_i64:
- case INDEX_op_div_i32:
- case INDEX_op_div_i64:
- case INDEX_op_divu_i32:
- case INDEX_op_divu_i64:
- case INDEX_op_rem_i32:
- case INDEX_op_rem_i64:
- case INDEX_op_remu_i32:
- case INDEX_op_remu_i64:
- return C_O1_I2(r, rZ, rZ);
-
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, rZ, rJ, rZ, rZ);
-
case INDEX_op_ld_vec:
case INDEX_op_dupm_vec:
case INDEX_op_dup_vec:
@@ -2384,7 +2585,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
return C_O1_I3(w, w, w, w);
default:
- g_assert_not_reached();
+ return C_NotImplemented;
}
}
@@ -2457,6 +2658,14 @@ static void tcg_out_tb_start(TCGContext *s)
/* nothing to do */
}
+static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
+{
+ for (int i = 0; i < count; ++i) {
+ /* Canonical nop is andi r0,r0,0 */
+ p[i] = OPC_ANDI;
+ }
+}
+
static void tcg_target_init(TCGContext *s)
{
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
index 58bd7d2..6a206fb 100644
--- a/tcg/loongarch64/tcg-target.h
+++ b/tcg/loongarch64/tcg-target.h
@@ -29,8 +29,6 @@
#ifndef LOONGARCH_TCG_TARGET_H
#define LOONGARCH_TCG_TARGET_H
-#include "host/cpuinfo.h"
-
#define TCG_TARGET_INSN_UNIT_SIZE 4
#define TCG_TARGET_NB_REGS 64
@@ -87,117 +85,6 @@ typedef enum {
TCG_VEC_TMP0 = TCG_REG_V23,
} TCGReg;
-/* used for function call generation */
-#define TCG_REG_CALL_STACK TCG_REG_SP
-#define TCG_TARGET_STACK_ALIGN 16
-#define TCG_TARGET_CALL_STACK_OFFSET 0
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
-#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
-#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
-
-/* optional instructions */
-#define TCG_TARGET_HAS_negsetcond_i32 0
-#define TCG_TARGET_HAS_div_i32 1
-#define TCG_TARGET_HAS_rem_i32 1
-#define TCG_TARGET_HAS_div2_i32 0
-#define TCG_TARGET_HAS_rot_i32 1
-#define TCG_TARGET_HAS_deposit_i32 1
-#define TCG_TARGET_HAS_extract_i32 1
-#define TCG_TARGET_HAS_sextract_i32 0
-#define TCG_TARGET_HAS_extract2_i32 0
-#define TCG_TARGET_HAS_add2_i32 0
-#define TCG_TARGET_HAS_sub2_i32 0
-#define TCG_TARGET_HAS_mulu2_i32 0
-#define TCG_TARGET_HAS_muls2_i32 0
-#define TCG_TARGET_HAS_muluh_i32 1
-#define TCG_TARGET_HAS_mulsh_i32 1
-#define TCG_TARGET_HAS_ext8s_i32 1
-#define TCG_TARGET_HAS_ext16s_i32 1
-#define TCG_TARGET_HAS_ext8u_i32 1
-#define TCG_TARGET_HAS_ext16u_i32 1
-#define TCG_TARGET_HAS_bswap16_i32 1
-#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_not_i32 1
-#define TCG_TARGET_HAS_andc_i32 1
-#define TCG_TARGET_HAS_orc_i32 1
-#define TCG_TARGET_HAS_eqv_i32 0
-#define TCG_TARGET_HAS_nand_i32 0
-#define TCG_TARGET_HAS_nor_i32 1
-#define TCG_TARGET_HAS_clz_i32 1
-#define TCG_TARGET_HAS_ctz_i32 1
-#define TCG_TARGET_HAS_ctpop_i32 0
-#define TCG_TARGET_HAS_brcond2 0
-#define TCG_TARGET_HAS_setcond2 0
-#define TCG_TARGET_HAS_qemu_st8_i32 0
-
-/* 64-bit operations */
-#define TCG_TARGET_HAS_negsetcond_i64 0
-#define TCG_TARGET_HAS_div_i64 1
-#define TCG_TARGET_HAS_rem_i64 1
-#define TCG_TARGET_HAS_div2_i64 0
-#define TCG_TARGET_HAS_rot_i64 1
-#define TCG_TARGET_HAS_deposit_i64 1
-#define TCG_TARGET_HAS_extract_i64 1
-#define TCG_TARGET_HAS_sextract_i64 0
-#define TCG_TARGET_HAS_extract2_i64 0
-#define TCG_TARGET_HAS_extr_i64_i32 1
-#define TCG_TARGET_HAS_ext8s_i64 1
-#define TCG_TARGET_HAS_ext16s_i64 1
-#define TCG_TARGET_HAS_ext32s_i64 1
-#define TCG_TARGET_HAS_ext8u_i64 1
-#define TCG_TARGET_HAS_ext16u_i64 1
-#define TCG_TARGET_HAS_ext32u_i64 1
-#define TCG_TARGET_HAS_bswap16_i64 1
-#define TCG_TARGET_HAS_bswap32_i64 1
-#define TCG_TARGET_HAS_bswap64_i64 1
-#define TCG_TARGET_HAS_not_i64 1
-#define TCG_TARGET_HAS_andc_i64 1
-#define TCG_TARGET_HAS_orc_i64 1
-#define TCG_TARGET_HAS_eqv_i64 0
-#define TCG_TARGET_HAS_nand_i64 0
-#define TCG_TARGET_HAS_nor_i64 1
-#define TCG_TARGET_HAS_clz_i64 1
-#define TCG_TARGET_HAS_ctz_i64 1
-#define TCG_TARGET_HAS_ctpop_i64 0
-#define TCG_TARGET_HAS_add2_i64 0
-#define TCG_TARGET_HAS_sub2_i64 0
-#define TCG_TARGET_HAS_mulu2_i64 0
-#define TCG_TARGET_HAS_muls2_i64 0
-#define TCG_TARGET_HAS_muluh_i64 1
-#define TCG_TARGET_HAS_mulsh_i64 1
-
-#define TCG_TARGET_HAS_qemu_ldst_i128 (cpuinfo & CPUINFO_LSX)
-
-#define TCG_TARGET_HAS_tst 0
-
-#define TCG_TARGET_HAS_v64 (cpuinfo & CPUINFO_LSX)
-#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_LSX)
-#define TCG_TARGET_HAS_v256 (cpuinfo & CPUINFO_LASX)
-
-#define TCG_TARGET_HAS_not_vec 1
-#define TCG_TARGET_HAS_neg_vec 1
-#define TCG_TARGET_HAS_abs_vec 0
-#define TCG_TARGET_HAS_andc_vec 1
-#define TCG_TARGET_HAS_orc_vec 1
-#define TCG_TARGET_HAS_nand_vec 0
-#define TCG_TARGET_HAS_nor_vec 1
-#define TCG_TARGET_HAS_eqv_vec 0
-#define TCG_TARGET_HAS_mul_vec 1
-#define TCG_TARGET_HAS_shi_vec 1
-#define TCG_TARGET_HAS_shs_vec 0
-#define TCG_TARGET_HAS_shv_vec 1
-#define TCG_TARGET_HAS_roti_vec 1
-#define TCG_TARGET_HAS_rots_vec 0
-#define TCG_TARGET_HAS_rotv_vec 1
-#define TCG_TARGET_HAS_sat_vec 1
-#define TCG_TARGET_HAS_minmax_vec 1
-#define TCG_TARGET_HAS_bitsel_vec 1
-#define TCG_TARGET_HAS_cmpsel_vec 0
-#define TCG_TARGET_HAS_tst_vec 0
-
-#define TCG_TARGET_DEFAULT_MO (0)
-
-#define TCG_TARGET_NEED_LDST_LABELS
+#define TCG_REG_ZERO TCG_REG_ZERO
#endif /* LOONGARCH_TCG_TARGET_H */
diff --git a/tcg/meson.build b/tcg/meson.build
index 69ebb49..706a6eb 100644
--- a/tcg/meson.build
+++ b/tcg/meson.build
@@ -1,4 +1,4 @@
-if not get_option('tcg').allowed()
+if not have_tcg
subdir_done()
endif
@@ -27,24 +27,5 @@ if host_os == 'linux'
tcg_ss.add(files('perf.c'))
endif
-tcg_ss = tcg_ss.apply({})
-
-libtcg_user = static_library('tcg_user',
- tcg_ss.sources() + genh,
- dependencies: tcg_ss.dependencies(),
- c_args: '-DCONFIG_USER_ONLY',
- build_by_default: false)
-
-tcg_user = declare_dependency(objects: libtcg_user.extract_all_objects(recursive: false),
- dependencies: tcg_ss.dependencies())
-user_ss.add(tcg_user)
-
-libtcg_system = static_library('tcg_system',
- tcg_ss.sources() + genh,
- dependencies: tcg_ss.dependencies(),
- c_args: '-DCONFIG_SOFTMMU',
- build_by_default: false)
-
-tcg_system = declare_dependency(objects: libtcg_system.extract_all_objects(recursive: false),
- dependencies: tcg_ss.dependencies())
-system_ss.add(tcg_system)
+user_ss.add_all(tcg_ss)
+system_ss.add_all(tcg_ss)
diff --git a/tcg/mips/tcg-target-con-set.h b/tcg/mips/tcg-target-con-set.h
index 864034f..5304691 100644
--- a/tcg/mips/tcg-target-con-set.h
+++ b/tcg/mips/tcg-target-con-set.h
@@ -10,24 +10,21 @@
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I1(r)
-C_O0_I2(rZ, r)
-C_O0_I2(rZ, rZ)
-C_O0_I3(rZ, r, r)
-C_O0_I3(rZ, rZ, r)
-C_O0_I4(rZ, rZ, rZ, rZ)
-C_O0_I4(rZ, rZ, r, r)
+C_O0_I2(r, rz)
+C_O0_I2(rz, r)
+C_O0_I3(rz, rz, r)
+C_O0_I4(r, r, rz, rz)
C_O1_I1(r, r)
-C_O1_I2(r, 0, rZ)
+C_O1_I2(r, 0, rz)
C_O1_I2(r, r, r)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rI)
C_O1_I2(r, r, rIK)
C_O1_I2(r, r, rJ)
-C_O1_I2(r, r, rWZ)
-C_O1_I2(r, rZ, rN)
-C_O1_I2(r, rZ, rZ)
-C_O1_I4(r, rZ, rZ, rZ, 0)
-C_O1_I4(r, rZ, rZ, rZ, rZ)
+C_O1_I2(r, r, rz)
+C_O1_I2(r, r, rzW)
+C_O1_I4(r, r, rz, rz, 0)
+C_O1_I4(r, r, rz, rz, rz)
+C_O1_I4(r, r, r, rz, rz)
C_O2_I1(r, r, r)
C_O2_I2(r, r, r, r)
-C_O2_I4(r, r, rZ, rZ, rN, rN)
diff --git a/tcg/mips/tcg-target-con-str.h b/tcg/mips/tcg-target-con-str.h
index 413c280..db2b225 100644
--- a/tcg/mips/tcg-target-con-str.h
+++ b/tcg/mips/tcg-target-con-str.h
@@ -17,6 +17,4 @@ REGS('r', ALL_GENERAL_REGS)
CONST('I', TCG_CT_CONST_U16)
CONST('J', TCG_CT_CONST_S16)
CONST('K', TCG_CT_CONST_P2M1)
-CONST('N', TCG_CT_CONST_N16)
CONST('W', TCG_CT_CONST_WSZ)
-CONST('Z', TCG_CT_CONST_ZERO)
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
new file mode 100644
index 0000000..b9eb338
--- /dev/null
+++ b/tcg/mips/tcg-target-has.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific opcode support
+ * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
+ * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
+ */
+
+#ifndef TCG_TARGET_HAS_H
+#define TCG_TARGET_HAS_H
+
+/* MOVN/MOVZ instructions detection */
+#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \
+ defined(_MIPS_ARCH_LOONGSON2E) || defined(_MIPS_ARCH_LOONGSON2F) || \
+ defined(_MIPS_ARCH_MIPS4)
+#define use_movnz_instructions 1
+#else
+extern bool use_movnz_instructions;
+#endif
+
+/* MIPS32 instruction set detection */
+#if defined(__mips_isa_rev) && (__mips_isa_rev >= 1)
+#define use_mips32_instructions 1
+#else
+extern bool use_mips32_instructions;
+#endif
+
+/* MIPS32R2 instruction set detection */
+#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
+#define use_mips32r2_instructions 1
+#else
+extern bool use_mips32r2_instructions;
+#endif
+
+/* MIPS32R6 instruction set detection */
+#if defined(__mips_isa_rev) && (__mips_isa_rev >= 6)
+#define use_mips32r6_instructions 1
+#else
+#define use_mips32r6_instructions 0
+#endif
+
+/* optional instructions */
+#if TCG_TARGET_REG_BITS == 64
+#define TCG_TARGET_HAS_extr_i64_i32 1
+#define TCG_TARGET_HAS_ext32s_i64 1
+#define TCG_TARGET_HAS_ext32u_i64 1
+#endif
+
+/* optional instructions detected at runtime */
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+#define TCG_TARGET_HAS_tst 0
+
+#define TCG_TARGET_extract_valid(type, ofs, len) use_mips32r2_instructions
+#define TCG_TARGET_deposit_valid(type, ofs, len) use_mips32r2_instructions
+
+static inline bool
+tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
+{
+ if (ofs == 0) {
+ switch (len) {
+ case 8:
+ case 16:
+ return use_mips32r2_instructions;
+ case 32:
+ return type == TCG_TYPE_I64;
+ }
+ }
+ return false;
+}
+#define TCG_TARGET_sextract_valid tcg_target_sextract_valid
+
+#endif
diff --git a/tcg/mips/tcg-target-mo.h b/tcg/mips/tcg-target-mo.h
new file mode 100644
index 0000000..50cefc2
--- /dev/null
+++ b/tcg/mips/tcg-target-mo.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific memory model
+ * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
+ * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
+ */
+
+#ifndef TCG_TARGET_MO_H
+#define TCG_TARGET_MO_H
+
+#define TCG_TARGET_DEFAULT_MO 0
+
+#endif
diff --git a/tcg/mips/tcg-target-opc.h.inc b/tcg/mips/tcg-target-opc.h.inc
new file mode 100644
index 0000000..84e777b
--- /dev/null
+++ b/tcg/mips/tcg-target-opc.h.inc
@@ -0,0 +1 @@
+/* No target specific opcodes. */
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
index 3b5b5c6..400eafb 100644
--- a/tcg/mips/tcg-target.c.inc
+++ b/tcg/mips/tcg-target.c.inc
@@ -24,8 +24,19 @@
* THE SOFTWARE.
*/
-#include "../tcg-ldst.c.inc"
-#include "../tcg-pool.c.inc"
+/* used for function call generation */
+#define TCG_TARGET_STACK_ALIGN 16
+#if _MIPS_SIM == _ABIO32
+# define TCG_TARGET_CALL_STACK_OFFSET 16
+# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
+# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
+#else
+# define TCG_TARGET_CALL_STACK_OFFSET 0
+# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
+# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
+#endif
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
+#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
#if TCG_TARGET_REG_BITS == 32
# define LO_OFF (HOST_BIG_ENDIAN * 4)
@@ -173,12 +184,10 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
g_assert_not_reached();
}
-#define TCG_CT_CONST_ZERO 0x100
-#define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */
-#define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */
-#define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */
-#define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */
-#define TCG_CT_CONST_WSZ 0x2000 /* word size */
+#define TCG_CT_CONST_U16 0x100 /* Unsigned 16-bit: 0 - 0xffff. */
+#define TCG_CT_CONST_S16 0x200 /* Signed 16-bit: -32768 - 32767 */
+#define TCG_CT_CONST_P2M1 0x400 /* Power of 2 minus 1. */
+#define TCG_CT_CONST_WSZ 0x800 /* word size */
#define ALL_GENERAL_REGS 0xffffffffu
@@ -193,14 +202,10 @@ static bool tcg_target_const_match(int64_t val, int ct,
{
if (ct & TCG_CT_CONST) {
return 1;
- } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
- return 1;
} else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
return 1;
} else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
return 1;
- } else if ((ct & TCG_CT_CONST_N16) && val >= -32767 && val <= 32767) {
- return 1;
} else if ((ct & TCG_CT_CONST_P2M1)
&& use_mips32r2_instructions && is_p2m1(val)) {
return 1;
@@ -639,7 +644,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
{
- tcg_debug_assert(TCG_TARGET_HAS_ext8s_i32);
+ tcg_debug_assert(use_mips32r2_instructions);
tcg_out_opc_reg(s, OPC_SEB, rd, TCG_REG_ZERO, rs);
}
@@ -650,7 +655,7 @@ static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
{
- tcg_debug_assert(TCG_TARGET_HAS_ext16s_i32);
+ tcg_debug_assert(use_mips32r2_instructions);
tcg_out_opc_reg(s, OPC_SEH, rd, TCG_REG_ZERO, rs);
}
@@ -694,39 +699,6 @@ static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
g_assert_not_reached();
}
-static void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg, int flags)
-{
- /* ret and arg can't be register tmp0 */
- tcg_debug_assert(ret != TCG_TMP0);
- tcg_debug_assert(arg != TCG_TMP0);
-
- /* With arg = abcd: */
- if (use_mips32r2_instructions) {
- tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); /* badc */
- if (flags & TCG_BSWAP_OS) {
- tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret); /* ssdc */
- } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
- tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xffff); /* 00dc */
- }
- return;
- }
-
- tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); /* 0abc */
- if (!(flags & TCG_BSWAP_IZ)) {
- tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0x00ff); /* 000c */
- }
- if (flags & TCG_BSWAP_OS) {
- tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); /* d000 */
- tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); /* ssd0 */
- } else {
- tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8); /* bcd0 */
- if (flags & TCG_BSWAP_OZ) {
- tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00); /* 00d0 */
- }
- }
- tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); /* ssdc */
-}
-
static void tcg_out_bswap_subr(TCGContext *s, const tcg_insn_unit *sub)
{
if (!tcg_out_opc_jmp(s, OPC_JAL, sub)) {
@@ -735,39 +707,6 @@ static void tcg_out_bswap_subr(TCGContext *s, const tcg_insn_unit *sub)
}
}
-static void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg, int flags)
-{
- if (use_mips32r2_instructions) {
- tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
- tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
- if (flags & TCG_BSWAP_OZ) {
- tcg_out_opc_bf(s, OPC_DEXT, ret, ret, 31, 0);
- }
- } else {
- if (flags & TCG_BSWAP_OZ) {
- tcg_out_bswap_subr(s, bswap32u_addr);
- } else {
- tcg_out_bswap_subr(s, bswap32_addr);
- }
- /* delay slot -- never omit the insn, like tcg_out_mov might. */
- tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
- tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
- }
-}
-
-static void tcg_out_bswap64(TCGContext *s, TCGReg ret, TCGReg arg)
-{
- if (use_mips32r2_instructions) {
- tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg);
- tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret);
- } else {
- tcg_out_bswap_subr(s, bswap64_addr);
- /* delay slot -- never omit the insn, like tcg_out_mov might. */
- tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
- tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
- }
-}
-
static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
{
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
@@ -823,55 +762,6 @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
return false;
}
-static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
- TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
- bool cbh, bool is_sub)
-{
- TCGReg th = TCG_TMP1;
-
- /* If we have a negative constant such that negating it would
- make the high part zero, we can (usually) eliminate one insn. */
- if (cbl && cbh && bh == -1 && bl != 0) {
- bl = -bl;
- bh = 0;
- is_sub = !is_sub;
- }
-
- /* By operating on the high part first, we get to use the final
- carry operation to move back from the temporary. */
- if (!cbh) {
- tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh);
- } else if (bh != 0 || ah == rl) {
- tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh));
- } else {
- th = ah;
- }
-
- /* Note that tcg optimization should eliminate the bl == 0 case. */
- if (is_sub) {
- if (cbl) {
- tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl);
- tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl);
- } else {
- tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl);
- tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl);
- }
- tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0);
- } else {
- if (cbl) {
- tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
- tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
- } else if (rl == al && rl == bl) {
- tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, TCG_TARGET_REG_BITS - 1);
- tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
- } else {
- tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
- tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl));
- }
- tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0);
- }
-}
-
#define SETCOND_INV TCG_TARGET_NB_REGS
#define SETCOND_NEZ (SETCOND_INV << 1)
#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
@@ -944,15 +834,44 @@ static void tcg_out_setcond_end(TCGContext *s, TCGReg ret, int tmpflags)
}
}
-static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg arg1, TCGReg arg2)
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg arg1, TCGReg arg2)
{
int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2);
tcg_out_setcond_end(s, ret, tmpflags);
}
-static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
- TCGReg arg2, TCGLabel *l)
+static const TCGOutOpSetcond outop_setcond = {
+ .base.static_constraint = C_O1_I2(r, r, rz),
+ .out_rrr = tgen_setcond,
+};
+
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg arg1, TCGReg arg2)
+{
+ int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2);
+ TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
+
+ /* If intermediate result is zero/non-zero: test != 0. */
+ if (tmpflags & SETCOND_NEZ) {
+ tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp);
+ tmp = ret;
+ }
+ /* Produce the 0/-1 result. */
+ if (tmpflags & SETCOND_INV) {
+ tcg_out_opc_imm(s, OPC_ADDIU, ret, tmp, -1);
+ } else {
+ tcg_out_opc_reg(s, OPC_SUBU, ret, TCG_REG_ZERO, tmp);
+ }
+}
+
+static const TCGOutOpSetcond outop_negsetcond = {
+ .base.static_constraint = C_O1_I2(r, r, rz),
+ .out_rrr = tgen_negsetcond,
+};
+
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg arg1, TCGReg arg2, TCGLabel *l)
{
static const MIPSInsn b_zero[16] = {
[TCG_COND_LT] = OPC_BLTZ,
@@ -997,6 +916,16 @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
tcg_out_nop(s);
}
+static const TCGOutOpBrcond outop_brcond = {
+ .base.static_constraint = C_O0_I2(r, rz),
+ .out_rr = tgen_brcond,
+};
+
+void tcg_out_br(TCGContext *s, TCGLabel *l)
+{
+ tgen_brcond(s, TCG_TYPE_I32, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO, l);
+}
+
static int tcg_out_setcond2_int(TCGContext *s, TCGCond cond, TCGReg ret,
TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
{
@@ -1014,25 +943,37 @@ static int tcg_out_setcond2_int(TCGContext *s, TCGCond cond, TCGReg ret,
break;
default:
- tcg_out_setcond(s, TCG_COND_EQ, TCG_TMP0, ah, bh);
- tcg_out_setcond(s, tcg_unsigned_cond(cond), TCG_TMP1, al, bl);
+ tgen_setcond(s, TCG_TYPE_I32, TCG_COND_EQ, TCG_TMP0, ah, bh);
+ tgen_setcond(s, TCG_TYPE_I32, tcg_unsigned_cond(cond),
+ TCG_TMP1, al, bl);
tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP0);
- tcg_out_setcond(s, tcg_high_cond(cond), TCG_TMP0, ah, bh);
+ tgen_setcond(s, TCG_TYPE_I32, tcg_high_cond(cond), TCG_TMP0, ah, bh);
tcg_out_opc_reg(s, OPC_OR, ret, TCG_TMP0, TCG_TMP1);
break;
}
return ret | flags;
}
-static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
+static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
+ TCGReg al, TCGReg ah,
+ TCGArg bl, bool const_bl,
+ TCGArg bh, bool const_bh)
{
int tmpflags = tcg_out_setcond2_int(s, cond, ret, al, ah, bl, bh);
tcg_out_setcond_end(s, ret, tmpflags);
}
-static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
- TCGReg bl, TCGReg bh, TCGLabel *l)
+#if TCG_TARGET_REG_BITS != 32
+__attribute__((unused))
+#endif
+static const TCGOutOpSetcond2 outop_setcond2 = {
+ .base.static_constraint = C_O1_I4(r, r, r, rz, rz),
+ .out = tgen_setcond2,
+};
+
+static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
+ TCGArg bl, bool const_bl,
+ TCGArg bh, bool const_bh, TCGLabel *l)
{
int tmpflags = tcg_out_setcond2_int(s, cond, TCG_TMP0, al, ah, bl, bh);
TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
@@ -1043,8 +984,17 @@ static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
tcg_out_nop(s);
}
-static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg c1, TCGReg c2, TCGReg v1, TCGReg v2)
+#if TCG_TARGET_REG_BITS != 32
+__attribute__((unused))
+#endif
+static const TCGOutOpBrcond2 outop_brcond2 = {
+ .base.static_constraint = C_O0_I4(r, r, rz, rz),
+ .out = tgen_brcond2,
+};
+
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg v1, bool const_v1, TCGArg v2, bool const_v2)
{
int tmpflags;
bool eqz;
@@ -1090,6 +1040,13 @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
}
}
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = (use_mips32r6_instructions
+ ? C_O1_I4(r, r, rz, rz, rz)
+ : C_O1_I4(r, r, rz, rz, 0)),
+ .out = tgen_movcond,
+};
+
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
{
/*
@@ -1206,8 +1163,7 @@ bool tcg_target_has_memory_bswap(MemOp memop)
* is required and fill in @h with the host address for the fast path.
*/
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
- TCGReg addrlo, TCGReg addrhi,
- MemOpIdx oi, bool is_ld)
+ TCGReg addr, MemOpIdx oi, bool is_ld)
{
TCGType addr_type = s->addr_type;
TCGLabelQemuLdst *ldst = NULL;
@@ -1234,8 +1190,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addrlo;
- ldst->addrhi_reg = addrhi;
+ ldst->addr_reg = addr;
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
@@ -1243,82 +1198,66 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
/* Extract the TLB index from the address into TMP3. */
if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
- tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo,
- s->page_bits - CPU_TLB_ENTRY_BITS);
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addr,
+ TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
} else {
- tcg_out_dsrl(s, TCG_TMP3, addrlo,
- s->page_bits - CPU_TLB_ENTRY_BITS);
+ tcg_out_dsrl(s, TCG_TMP3, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
}
tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0);
/* Add the tlb_table pointer, creating the CPUTLBEntry address. */
tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1);
- if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
- /* Load the (low half) tlb comparator. */
+ /* Load the tlb comparator. */
+ if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3,
cmp_off + HOST_BIG_ENDIAN * 4);
} else {
- tcg_out_ld(s, TCG_TYPE_I64, TCG_TMP0, TCG_TMP3, cmp_off);
+ tcg_out_ld(s, TCG_TYPE_REG, TCG_TMP0, TCG_TMP3, cmp_off);
}
- if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
- /* Load the tlb addend for the fast path. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
- }
+ /* Load the tlb addend for the fast path. */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
/*
* Mask the page bits, keeping the alignment bits to compare against.
* For unaligned accesses, compare against the end of the access to
* verify that it does not cross a page boundary.
*/
- tcg_out_movi(s, addr_type, TCG_TMP1, s->page_mask | a_mask);
+ tcg_out_movi(s, addr_type, TCG_TMP1, TARGET_PAGE_MASK | a_mask);
if (a_mask < s_mask) {
tcg_out_opc_imm(s, (TCG_TARGET_REG_BITS == 32
|| addr_type == TCG_TYPE_I32
? OPC_ADDIU : OPC_DADDIU),
- TCG_TMP2, addrlo, s_mask - a_mask);
+ TCG_TMP2, addr, s_mask - a_mask);
tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2);
} else {
- tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrlo);
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addr);
}
/* Zero extend a 32-bit guest address for a 64-bit host. */
if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
- tcg_out_ext32u(s, TCG_TMP2, addrlo);
- addrlo = TCG_TMP2;
+ tcg_out_ext32u(s, TCG_TMP2, addr);
+ addr = TCG_TMP2;
}
ldst->label_ptr[0] = s->code_ptr;
tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
- /* Load and test the high half tlb comparator. */
- if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
- /* delay slot */
- tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
-
- /* Load the tlb addend for the fast path. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
-
- ldst->label_ptr[1] = s->code_ptr;
- tcg_out_opc_br(s, OPC_BNE, addrhi, TCG_TMP0);
- }
-
/* delay slot */
base = TCG_TMP3;
- tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addrlo);
+ tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addr);
} else {
if (a_mask && (use_mips32r6_instructions || a_bits != s_bits)) {
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addrlo;
- ldst->addrhi_reg = addrhi;
+ ldst->addr_reg = addr;
/* We are expecting a_bits to max out at 7, much lower than ANDI. */
tcg_debug_assert(a_bits < 16);
- tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask);
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addr, a_mask);
ldst->label_ptr[0] = s->code_ptr;
if (use_mips32r6_instructions) {
@@ -1329,7 +1268,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
}
}
- base = addrlo;
+ base = addr;
if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
tcg_out_ext32u(s, TCG_REG_A0, base);
base = TCG_REG_A0;
@@ -1448,30 +1387,66 @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
}
}
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
- TCGReg addrlo, TCGReg addrhi,
- MemOpIdx oi, TCGType data_type)
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg addr, MemOpIdx oi)
+{
+ MemOp opc = get_memop(oi);
+ TCGLabelQemuLdst *ldst;
+ HostAddress h;
+
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
+
+ if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) {
+ tcg_out_qemu_ld_direct(s, data, 0, h.base, opc, type);
+ } else {
+ tcg_out_qemu_ld_unalign(s, data, 0, h.base, opc, type);
+ }
+
+ if (ldst) {
+ ldst->type = type;
+ ldst->datalo_reg = data;
+ ldst->datahi_reg = 0;
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
+ }
+}
+
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_qemu_ld,
+};
+
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
{
MemOp opc = get_memop(oi);
TCGLabelQemuLdst *ldst;
HostAddress h;
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) {
- tcg_out_qemu_ld_direct(s, datalo, datahi, h.base, opc, data_type);
+ tcg_out_qemu_ld_direct(s, datalo, datahi, h.base, opc, type);
} else {
- tcg_out_qemu_ld_unalign(s, datalo, datahi, h.base, opc, data_type);
+ tcg_out_qemu_ld_unalign(s, datalo, datahi, h.base, opc, type);
}
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
ldst->datalo_reg = datalo;
ldst->datahi_reg = datahi;
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
}
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
+ /* Ensure that the mips32 code is compiled but discarded for mips64. */
+ .base.static_constraint =
+ TCG_TARGET_REG_BITS == 32 ? C_O2_I1(r, r, r) : C_NotImplemented,
+ .out =
+ TCG_TARGET_REG_BITS == 32 ? tgen_qemu_ld2 : NULL,
+};
+
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
TCGReg base, MemOp opc)
{
@@ -1535,15 +1510,43 @@ static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
}
}
-static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
- TCGReg addrlo, TCGReg addrhi,
- MemOpIdx oi, TCGType data_type)
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg addr, MemOpIdx oi)
{
MemOp opc = get_memop(oi);
TCGLabelQemuLdst *ldst;
HostAddress h;
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
+
+ if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) {
+ tcg_out_qemu_st_direct(s, data, 0, h.base, opc);
+ } else {
+ tcg_out_qemu_st_unalign(s, data, 0, h.base, opc);
+ }
+
+ if (ldst) {
+ ldst->type = type;
+ ldst->datalo_reg = data;
+ ldst->datahi_reg = 0;
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
+ }
+}
+
+static const TCGOutOpQemuLdSt outop_qemu_st = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out = tgen_qemu_st,
+};
+
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
+{
+ MemOp opc = get_memop(oi);
+ TCGLabelQemuLdst *ldst;
+ HostAddress h;
+
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) {
tcg_out_qemu_st_direct(s, datalo, datahi, h.base, opc);
@@ -1552,14 +1555,22 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
}
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
ldst->datalo_reg = datalo;
ldst->datahi_reg = datahi;
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
}
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
+ /* Ensure that the mips32 code is compiled but discarded for mips64. */
+ .base.static_constraint =
+ TCG_TARGET_REG_BITS == 32 ? C_O0_I3(rz, rz, r) : C_NotImplemented,
+ .out =
+ TCG_TARGET_REG_BITS == 32 ? tgen_qemu_st2 : NULL,
+};
+
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
static const MIPSInsn sync[] = {
/* Note that SYNC_MB is a slightly weaker than SYNC 0,
@@ -1575,33 +1586,6 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
tcg_out32(s, sync[a0 & TCG_MO_ALL]);
}
-static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6,
- int width, TCGReg a0, TCGReg a1, TCGArg a2)
-{
- if (use_mips32r6_instructions) {
- if (a2 == width) {
- tcg_out_opc_reg(s, opcv6, a0, a1, 0);
- } else {
- tcg_out_opc_reg(s, opcv6, TCG_TMP0, a1, 0);
- tcg_out_movcond(s, TCG_COND_EQ, a0, a1, 0, a2, TCG_TMP0);
- }
- } else {
- if (a2 == width) {
- tcg_out_opc_reg(s, opcv2, a0, a1, a1);
- } else if (a0 == a2) {
- tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1);
- tcg_out_opc_reg(s, OPC_MOVN, a0, TCG_TMP0, a1);
- } else if (a0 != a1) {
- tcg_out_opc_reg(s, opcv2, a0, a1, a1);
- tcg_out_opc_reg(s, OPC_MOVZ, a0, a2, a1);
- } else {
- tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1);
- tcg_out_opc_reg(s, OPC_MOVZ, TCG_TMP0, a2, a1);
- tcg_out_mov(s, TCG_TYPE_REG, a0, TCG_TMP0);
- }
- }
-}
-
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
{
TCGReg base = TCG_REG_ZERO;
@@ -1661,630 +1645,811 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
}
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
+ } else {
+ tcg_out_nop(s);
+ }
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
/* Always indirect, nothing to do */
}
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg args[TCG_MAX_OP_ARGS],
- const int const_args[TCG_MAX_OP_ARGS])
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
{
- MIPSInsn i1, i2;
- TCGArg a0, a1, a2;
- int c2;
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_ADDU : OPC_DADDU;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
- /*
- * Note that many operands use the constraint set "rZ".
- * We make use of the fact that 0 is the ZERO register,
- * and hence such cases need not check for const_args.
- */
- a0 = args[0];
- a1 = args[1];
- a2 = args[2];
- c2 = const_args[2];
-
- switch (opc) {
- case INDEX_op_goto_ptr:
- /* jmp to the given host address (could be epilogue) */
- tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_ADDIU : OPC_DADDIU;
+ tcg_out_opc_imm(s, insn, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
+
+static const TCGOutOpBinary outop_addco = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpAddSubCarry outop_addci = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_addcio = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tcg_out_set_carry(TCGContext *s)
+{
+ g_assert_not_reached();
+}
+
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
+}
+
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int msb;
+
+ if (a2 == (uint16_t)a2) {
+ tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
+ return;
+ }
+
+ tcg_debug_assert(use_mips32r2_instructions);
+ tcg_debug_assert(is_p2m1(a2));
+ msb = ctz64(~a2) - 1;
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0);
+ } else {
+ tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, msb, 0);
+ }
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rIK),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
+
+static const TCGOutOpBinary outop_andc = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_clz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (use_mips32r6_instructions) {
+ MIPSInsn opcv6 = type == TCG_TYPE_I32 ? OPC_CLZ_R6 : OPC_DCLZ_R6;
+ tcg_out_opc_reg(s, opcv6, TCG_TMP0, a1, 0);
+ tgen_movcond(s, TCG_TYPE_REG, TCG_COND_EQ, a0, a1, a2, false,
+ TCG_TMP0, false, TCG_REG_ZERO, false);
+ } else {
+ MIPSInsn opcv2 = type == TCG_TYPE_I32 ? OPC_CLZ : OPC_DCLZ;
+ if (a0 == a2) {
+ tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1);
+ tcg_out_opc_reg(s, OPC_MOVN, a0, TCG_TMP0, a1);
+ } else if (a0 != a1) {
+ tcg_out_opc_reg(s, opcv2, a0, a1, a1);
+ tcg_out_opc_reg(s, OPC_MOVZ, a0, a2, a1);
} else {
- tcg_out_nop(s);
+ tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1);
+ tcg_out_opc_reg(s, OPC_MOVZ, TCG_TMP0, a2, a1);
+ tcg_out_mov(s, type, a0, TCG_TMP0);
}
- break;
- case INDEX_op_br:
- tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO,
- arg_label(a0));
- break;
+ }
+}
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8u_i64:
- i1 = OPC_LBU;
- goto do_ldst;
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld8s_i64:
- i1 = OPC_LB;
- goto do_ldst;
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16u_i64:
- i1 = OPC_LHU;
- goto do_ldst;
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld16s_i64:
- i1 = OPC_LH;
- goto do_ldst;
- case INDEX_op_ld_i32:
- case INDEX_op_ld32s_i64:
- i1 = OPC_LW;
- goto do_ldst;
- case INDEX_op_ld32u_i64:
- i1 = OPC_LWU;
- goto do_ldst;
- case INDEX_op_ld_i64:
- i1 = OPC_LD;
- goto do_ldst;
- case INDEX_op_st8_i32:
- case INDEX_op_st8_i64:
- i1 = OPC_SB;
- goto do_ldst;
- case INDEX_op_st16_i32:
- case INDEX_op_st16_i64:
- i1 = OPC_SH;
- goto do_ldst;
- case INDEX_op_st_i32:
- case INDEX_op_st32_i64:
- i1 = OPC_SW;
- goto do_ldst;
- case INDEX_op_st_i64:
- i1 = OPC_SD;
- do_ldst:
- tcg_out_ldst(s, i1, a0, a1, a2);
- break;
+static void tgen_clzi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (a2 == 0) {
+ tgen_clz(s, type, a0, a1, TCG_REG_ZERO);
+ } else if (use_mips32r6_instructions) {
+ MIPSInsn opcv6 = type == TCG_TYPE_I32 ? OPC_CLZ_R6 : OPC_DCLZ_R6;
+ tcg_out_opc_reg(s, opcv6, a0, a1, 0);
+ } else {
+ MIPSInsn opcv2 = type == TCG_TYPE_I32 ? OPC_CLZ : OPC_DCLZ;
+ tcg_out_opc_reg(s, opcv2, a0, a1, a1);
+ }
+}
- case INDEX_op_add_i32:
- i1 = OPC_ADDU, i2 = OPC_ADDIU;
- goto do_binary;
- case INDEX_op_add_i64:
- i1 = OPC_DADDU, i2 = OPC_DADDIU;
- goto do_binary;
- case INDEX_op_or_i32:
- case INDEX_op_or_i64:
- i1 = OPC_OR, i2 = OPC_ORI;
- goto do_binary;
- case INDEX_op_xor_i32:
- case INDEX_op_xor_i64:
- i1 = OPC_XOR, i2 = OPC_XORI;
- do_binary:
- if (c2) {
- tcg_out_opc_imm(s, i2, a0, a1, a2);
- break;
- }
- do_binaryv:
- tcg_out_opc_reg(s, i1, a0, a1, a2);
- break;
+static TCGConstraintSetIndex cset_clz(TCGType type, unsigned flags)
+{
+ return use_mips32r2_instructions ? C_O1_I2(r, r, rzW) : C_NotImplemented;
+}
- case INDEX_op_sub_i32:
- i1 = OPC_SUBU, i2 = OPC_ADDIU;
- goto do_subtract;
- case INDEX_op_sub_i64:
- i1 = OPC_DSUBU, i2 = OPC_DADDIU;
- do_subtract:
- if (c2) {
- tcg_out_opc_imm(s, i2, a0, a1, -a2);
- break;
- }
- goto do_binaryv;
- case INDEX_op_and_i32:
- if (c2 && a2 != (uint16_t)a2) {
- int msb = ctz32(~a2) - 1;
- tcg_debug_assert(use_mips32r2_instructions);
- tcg_debug_assert(is_p2m1(a2));
- tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0);
- break;
- }
- i1 = OPC_AND, i2 = OPC_ANDI;
- goto do_binary;
- case INDEX_op_and_i64:
- if (c2 && a2 != (uint16_t)a2) {
- int msb = ctz64(~a2) - 1;
- tcg_debug_assert(use_mips32r2_instructions);
- tcg_debug_assert(is_p2m1(a2));
- tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, msb, 0);
- break;
- }
- i1 = OPC_AND, i2 = OPC_ANDI;
- goto do_binary;
- case INDEX_op_nor_i32:
- case INDEX_op_nor_i64:
- i1 = OPC_NOR;
- goto do_binaryv;
-
- case INDEX_op_mul_i32:
- if (use_mips32_instructions) {
- tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
- break;
- }
- i1 = OPC_MULT, i2 = OPC_MFLO;
- goto do_hilo1;
- case INDEX_op_mulsh_i32:
- if (use_mips32r6_instructions) {
- tcg_out_opc_reg(s, OPC_MUH, a0, a1, a2);
- break;
- }
- i1 = OPC_MULT, i2 = OPC_MFHI;
- goto do_hilo1;
- case INDEX_op_muluh_i32:
- if (use_mips32r6_instructions) {
- tcg_out_opc_reg(s, OPC_MUHU, a0, a1, a2);
- break;
- }
- i1 = OPC_MULTU, i2 = OPC_MFHI;
- goto do_hilo1;
- case INDEX_op_div_i32:
- if (use_mips32r6_instructions) {
+static const TCGOutOpBinary outop_clz = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_clz,
+ .out_rrr = tgen_clz,
+ .out_rri = tgen_clzi,
+};
+
+static const TCGOutOpUnary outop_ctpop = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_ctz = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_divs(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (use_mips32r6_instructions) {
+ if (type == TCG_TYPE_I32) {
tcg_out_opc_reg(s, OPC_DIV_R6, a0, a1, a2);
- break;
- }
- i1 = OPC_DIV, i2 = OPC_MFLO;
- goto do_hilo1;
- case INDEX_op_divu_i32:
- if (use_mips32r6_instructions) {
- tcg_out_opc_reg(s, OPC_DIVU_R6, a0, a1, a2);
- break;
- }
- i1 = OPC_DIVU, i2 = OPC_MFLO;
- goto do_hilo1;
- case INDEX_op_rem_i32:
- if (use_mips32r6_instructions) {
- tcg_out_opc_reg(s, OPC_MOD, a0, a1, a2);
- break;
- }
- i1 = OPC_DIV, i2 = OPC_MFHI;
- goto do_hilo1;
- case INDEX_op_remu_i32:
- if (use_mips32r6_instructions) {
- tcg_out_opc_reg(s, OPC_MODU, a0, a1, a2);
- break;
- }
- i1 = OPC_DIVU, i2 = OPC_MFHI;
- goto do_hilo1;
- case INDEX_op_mul_i64:
- if (use_mips32r6_instructions) {
- tcg_out_opc_reg(s, OPC_DMUL, a0, a1, a2);
- break;
- }
- i1 = OPC_DMULT, i2 = OPC_MFLO;
- goto do_hilo1;
- case INDEX_op_mulsh_i64:
- if (use_mips32r6_instructions) {
- tcg_out_opc_reg(s, OPC_DMUH, a0, a1, a2);
- break;
- }
- i1 = OPC_DMULT, i2 = OPC_MFHI;
- goto do_hilo1;
- case INDEX_op_muluh_i64:
- if (use_mips32r6_instructions) {
- tcg_out_opc_reg(s, OPC_DMUHU, a0, a1, a2);
- break;
- }
- i1 = OPC_DMULTU, i2 = OPC_MFHI;
- goto do_hilo1;
- case INDEX_op_div_i64:
- if (use_mips32r6_instructions) {
+ } else {
tcg_out_opc_reg(s, OPC_DDIV_R6, a0, a1, a2);
- break;
}
- i1 = OPC_DDIV, i2 = OPC_MFLO;
- goto do_hilo1;
- case INDEX_op_divu_i64:
- if (use_mips32r6_instructions) {
+ } else {
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_DIV : OPC_DDIV;
+ tcg_out_opc_reg(s, insn, 0, a1, a2);
+ tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
+ }
+}
+
+static const TCGOutOpBinary outop_divs = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_divs,
+};
+
+static const TCGOutOpDivRem outop_divs2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_divu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (use_mips32r6_instructions) {
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_reg(s, OPC_DIVU_R6, a0, a1, a2);
+ } else {
tcg_out_opc_reg(s, OPC_DDIVU_R6, a0, a1, a2);
- break;
}
- i1 = OPC_DDIVU, i2 = OPC_MFLO;
- goto do_hilo1;
- case INDEX_op_rem_i64:
- if (use_mips32r6_instructions) {
- tcg_out_opc_reg(s, OPC_DMOD, a0, a1, a2);
- break;
+ } else {
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_DIVU : OPC_DDIVU;
+ tcg_out_opc_reg(s, insn, 0, a1, a2);
+ tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
+ }
+}
+
+static const TCGOutOpBinary outop_divu = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_divu,
+};
+
+static const TCGOutOpDivRem outop_divu2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_eqv = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+#if TCG_TARGET_REG_BITS == 64
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
+{
+ tcg_out_dsra(s, a0, a1, 32);
+}
+
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extrh_i64_i32,
+};
+#endif
+
+static void tgen_mul(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ MIPSInsn insn;
+
+ if (type == TCG_TYPE_I32) {
+ if (use_mips32_instructions) {
+ tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
+ return;
}
- i1 = OPC_DDIV, i2 = OPC_MFHI;
- goto do_hilo1;
- case INDEX_op_remu_i64:
+ insn = OPC_MULT;
+ } else {
if (use_mips32r6_instructions) {
- tcg_out_opc_reg(s, OPC_DMODU, a0, a1, a2);
- break;
+ tcg_out_opc_reg(s, OPC_DMUL, a0, a1, a2);
+ return;
}
- i1 = OPC_DDIVU, i2 = OPC_MFHI;
- do_hilo1:
- tcg_out_opc_reg(s, i1, 0, a1, a2);
- tcg_out_opc_reg(s, i2, a0, 0, 0);
- break;
+ insn = OPC_DMULT;
+ }
+ tcg_out_opc_reg(s, insn, 0, a1, a2);
+ tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
+}
- case INDEX_op_muls2_i32:
- i1 = OPC_MULT;
- goto do_hilo2;
- case INDEX_op_mulu2_i32:
- i1 = OPC_MULTU;
- goto do_hilo2;
- case INDEX_op_muls2_i64:
- i1 = OPC_DMULT;
- goto do_hilo2;
- case INDEX_op_mulu2_i64:
- i1 = OPC_DMULTU;
- do_hilo2:
- tcg_out_opc_reg(s, i1, 0, a2, args[3]);
- tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
- tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0);
- break;
+static const TCGOutOpBinary outop_mul = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_mul,
+};
- case INDEX_op_neg_i32:
- i1 = OPC_SUBU;
- goto do_unary;
- case INDEX_op_neg_i64:
- i1 = OPC_DSUBU;
- goto do_unary;
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
- i1 = OPC_NOR;
- goto do_unary;
- do_unary:
- tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1);
- break;
+static void tgen_muls2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
+{
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MULT : OPC_DMULT;
+ tcg_out_opc_reg(s, insn, 0, a2, a3);
+ tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
+ tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0);
+}
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
- tcg_out_bswap16(s, a0, a1, a2);
- break;
- case INDEX_op_bswap32_i32:
- tcg_out_bswap32(s, a0, a1, 0);
- break;
- case INDEX_op_bswap32_i64:
- tcg_out_bswap32(s, a0, a1, a2);
- break;
- case INDEX_op_bswap64_i64:
- tcg_out_bswap64(s, a0, a1);
- break;
- case INDEX_op_extrh_i64_i32:
- tcg_out_dsra(s, a0, a1, 32);
- break;
+static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
+{
+ return use_mips32r6_instructions ? C_NotImplemented : C_O2_I2(r, r, r, r);
+}
- case INDEX_op_sar_i32:
- i1 = OPC_SRAV, i2 = OPC_SRA;
- goto do_shift;
- case INDEX_op_shl_i32:
- i1 = OPC_SLLV, i2 = OPC_SLL;
- goto do_shift;
- case INDEX_op_shr_i32:
- i1 = OPC_SRLV, i2 = OPC_SRL;
- goto do_shift;
- case INDEX_op_rotr_i32:
- i1 = OPC_ROTRV, i2 = OPC_ROTR;
- do_shift:
- if (c2) {
- tcg_out_opc_sa(s, i2, a0, a1, a2);
- break;
- }
- do_shiftv:
- tcg_out_opc_reg(s, i1, a0, a2, a1);
- break;
- case INDEX_op_rotl_i32:
- if (c2) {
- tcg_out_opc_sa(s, OPC_ROTR, a0, a1, 32 - a2);
+static const TCGOutOpMul2 outop_muls2 = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mul2,
+ .out_rrrr = tgen_muls2,
+};
+
+static void tgen_mulsh(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (use_mips32r6_instructions) {
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MUH : OPC_DMUH;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+ } else {
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MULT : OPC_DMULT;
+ tcg_out_opc_reg(s, insn, 0, a1, a2);
+ tcg_out_opc_reg(s, OPC_MFHI, a0, 0, 0);
+ }
+}
+
+static const TCGOutOpBinary outop_mulsh = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_mulsh,
+};
+
+static void tgen_mulu2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
+{
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MULTU : OPC_DMULTU;
+ tcg_out_opc_reg(s, insn, 0, a2, a3);
+ tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
+ tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0);
+}
+
+static const TCGOutOpMul2 outop_mulu2 = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mul2,
+ .out_rrrr = tgen_mulu2,
+};
+
+static void tgen_muluh(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (use_mips32r6_instructions) {
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MUHU : OPC_DMUHU;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+ } else {
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MULTU : OPC_DMULTU;
+ tcg_out_opc_reg(s, insn, 0, a1, a2);
+ tcg_out_opc_reg(s, OPC_MFHI, a0, 0, 0);
+ }
+}
+
+static const TCGOutOpBinary outop_muluh = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_muluh,
+};
+
+static const TCGOutOpBinary outop_nand = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_nor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_reg(s, OPC_NOR, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_nor = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_nor,
+};
+
+static void tgen_or(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_reg(s, OPC_OR, a0, a1, a2);
+}
+
+static void tgen_ori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_or = {
+ .base.static_constraint = C_O1_I2(r, r, rI),
+ .out_rrr = tgen_or,
+ .out_rri = tgen_ori,
+};
+
+static const TCGOutOpBinary outop_orc = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_rems(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (use_mips32r6_instructions) {
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_reg(s, OPC_MOD, a0, a1, a2);
} else {
- tcg_out_opc_reg(s, OPC_SUBU, TCG_TMP0, TCG_REG_ZERO, a2);
- tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1);
- }
- break;
- case INDEX_op_sar_i64:
- if (c2) {
- tcg_out_dsra(s, a0, a1, a2);
- break;
- }
- i1 = OPC_DSRAV;
- goto do_shiftv;
- case INDEX_op_shl_i64:
- if (c2) {
- tcg_out_dsll(s, a0, a1, a2);
- break;
- }
- i1 = OPC_DSLLV;
- goto do_shiftv;
- case INDEX_op_shr_i64:
- if (c2) {
- tcg_out_dsrl(s, a0, a1, a2);
- break;
- }
- i1 = OPC_DSRLV;
- goto do_shiftv;
- case INDEX_op_rotr_i64:
- if (c2) {
- tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, a2);
- break;
+ tcg_out_opc_reg(s, OPC_DMOD, a0, a1, a2);
}
- i1 = OPC_DROTRV;
- goto do_shiftv;
- case INDEX_op_rotl_i64:
- if (c2) {
- tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, 64 - a2);
+ } else {
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_DIV : OPC_DDIV;
+ tcg_out_opc_reg(s, insn, 0, a1, a2);
+ tcg_out_opc_reg(s, OPC_MFHI, a0, 0, 0);
+ }
+}
+
+static const TCGOutOpBinary outop_rems = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_rems,
+};
+
+static void tgen_remu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (use_mips32r6_instructions) {
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_reg(s, OPC_MODU, a0, a1, a2);
} else {
- tcg_out_opc_reg(s, OPC_DSUBU, TCG_TMP0, TCG_REG_ZERO, a2);
- tcg_out_opc_reg(s, OPC_DROTRV, a0, TCG_TMP0, a1);
+ tcg_out_opc_reg(s, OPC_DMODU, a0, a1, a2);
}
- break;
+ } else {
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_DIVU : OPC_DDIVU;
+ tcg_out_opc_reg(s, insn, 0, a1, a2);
+ tcg_out_opc_reg(s, OPC_MFHI, a0, 0, 0);
+ }
+}
- case INDEX_op_clz_i32:
- tcg_out_clz(s, OPC_CLZ, OPC_CLZ_R6, 32, a0, a1, a2);
- break;
- case INDEX_op_clz_i64:
- tcg_out_clz(s, OPC_DCLZ, OPC_DCLZ_R6, 64, a0, a1, a2);
- break;
+static const TCGOutOpBinary outop_remu = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_remu,
+};
- case INDEX_op_deposit_i32:
- tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]);
- break;
- case INDEX_op_deposit_i64:
- tcg_out_opc_bf64(s, OPC_DINS, OPC_DINSM, OPC_DINSU, a0, a2,
- args[3] + args[4] - 1, args[3]);
- break;
- case INDEX_op_extract_i32:
- tcg_out_opc_bf(s, OPC_EXT, a0, a1, args[3] - 1, a2);
- break;
- case INDEX_op_extract_i64:
- tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1,
- args[3] - 1, a2);
- break;
+static const TCGOutOpBinary outop_rotl = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
- tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
- break;
- case INDEX_op_brcond2_i32:
- tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
- break;
+static TCGConstraintSetIndex cset_rotr(TCGType type, unsigned flags)
+{
+ return use_mips32r2_instructions ? C_O1_I2(r, r, ri) : C_NotImplemented;
+}
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- tcg_out_movcond(s, args[5], a0, a1, a2, args[3], args[4]);
- break;
+static void tgen_rotr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_ROTRV : OPC_DROTRV;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
- case INDEX_op_setcond_i32:
- case INDEX_op_setcond_i64:
- tcg_out_setcond(s, args[3], a0, a1, a2);
- break;
- case INDEX_op_setcond2_i32:
- tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
- break;
+static void tgen_rotri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_sa(s, OPC_ROTR, a0, a1, a2);
+ } else {
+ tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, a2);
+ }
+}
- case INDEX_op_qemu_ld_a64_i32:
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_out_qemu_ld(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32);
- break;
- }
- /* fall through */
- case INDEX_op_qemu_ld_a32_i32:
- tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_ld_a32_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
- } else {
- tcg_out_qemu_ld(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
- }
- break;
- case INDEX_op_qemu_ld_a64_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
- } else {
- tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
+static const TCGOutOpBinary outop_rotr = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_rotr,
+ .out_rrr = tgen_rotr,
+ .out_rri = tgen_rotri,
+};
+
+static void tgen_sar(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_SRAV : OPC_DSRAV;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static void tgen_sari(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_sa(s, OPC_SRA, a0, a1, a2);
+ } else {
+ tcg_out_dsra(s, a0, a1, a2);
+ }
+}
+
+static const TCGOutOpBinary outop_sar = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_sar,
+ .out_rri = tgen_sari,
+};
+
+static void tgen_shl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_SLLV : OPC_DSLLV;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static void tgen_shli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_sa(s, OPC_SLL, a0, a1, a2);
+ } else {
+ tcg_out_dsll(s, a0, a1, a2);
+ }
+}
+
+static const TCGOutOpBinary outop_shl = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_shl,
+ .out_rri = tgen_shli,
+};
+
+static void tgen_shr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_SRLV : OPC_DSRLV;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static void tgen_shri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_sa(s, OPC_SRL, a0, a1, a2);
+ } else {
+ tcg_out_dsrl(s, a0, a1, a2);
+ }
+}
+
+static const TCGOutOpBinary outop_shr = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_shr,
+ .out_rri = tgen_shri,
+};
+
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_SUBU : OPC_DSUBU;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
+static const TCGOutOpAddSubCarry outop_subbo = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpAddSubCarry outop_subbi = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpAddSubCarry outop_subbio = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tcg_out_set_borrow(TCGContext *s)
+{
+ g_assert_not_reached();
+}
+
+static void tgen_xor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2);
+}
+
+static void tgen_xori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_xor = {
+ .base.static_constraint = C_O1_I2(r, r, rI),
+ .out_rrr = tgen_xor,
+ .out_rri = tgen_xori,
+};
+
+static void tgen_bswap16(TCGContext *s, TCGType type,
+ TCGReg ret, TCGReg arg, unsigned flags)
+{
+ /* With arg = abcd: */
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); /* badc */
+ if (flags & TCG_BSWAP_OS) {
+ tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret); /* ssdc */
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
+ tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xffff); /* 00dc */
}
- break;
+ return;
+ }
- case INDEX_op_qemu_st_a64_i32:
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_out_qemu_st(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32);
- break;
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); /* 0abc */
+ if (!(flags & TCG_BSWAP_IZ)) {
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0x00ff); /* 000c */
+ }
+ if (flags & TCG_BSWAP_OS) {
+ tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); /* d000 */
+ tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); /* ssd0 */
+ } else {
+ tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8); /* bcd0 */
+ if (flags & TCG_BSWAP_OZ) {
+ tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00); /* 00d0 */
}
- /* fall through */
- case INDEX_op_qemu_st_a32_i32:
- tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_st_a32_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
- } else {
- tcg_out_qemu_st(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
+ }
+ tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); /* ssdc */
+}
+
+static const TCGOutOpBswap outop_bswap16 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap16,
+};
+
+static void tgen_bswap32(TCGContext *s, TCGType type,
+ TCGReg ret, TCGReg arg, unsigned flags)
+{
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
+ tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
+ if (flags & TCG_BSWAP_OZ) {
+ tcg_out_opc_bf(s, OPC_DEXT, ret, ret, 31, 0);
}
- break;
- case INDEX_op_qemu_st_a64_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
+ } else {
+ if (flags & TCG_BSWAP_OZ) {
+ tcg_out_bswap_subr(s, bswap32u_addr);
} else {
- tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
+ tcg_out_bswap_subr(s, bswap32_addr);
}
- break;
+ /* delay slot -- never omit the insn, like tcg_out_mov might. */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
+ tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
+ }
+}
- case INDEX_op_add2_i32:
- tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
- const_args[4], const_args[5], false);
- break;
- case INDEX_op_sub2_i32:
- tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
- const_args[4], const_args[5], true);
- break;
+static const TCGOutOpBswap outop_bswap32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap32,
+};
- case INDEX_op_mb:
- tcg_out_mb(s, a0);
- break;
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
- case INDEX_op_mov_i64:
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- default:
- g_assert_not_reached();
+#if TCG_TARGET_REG_BITS == 64
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
+{
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg);
+ tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret);
+ } else {
+ tcg_out_bswap_subr(s, bswap64_addr);
+ /* delay slot -- never omit the insn, like tcg_out_mov might. */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
+ tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
}
}
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
-{
- switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld_i32:
- case INDEX_op_neg_i32:
- case INDEX_op_not_i32:
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap32_i32:
- case INDEX_op_ext8s_i32:
- case INDEX_op_ext16s_i32:
- case INDEX_op_extract_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld_i64:
- case INDEX_op_neg_i64:
- case INDEX_op_not_i64:
- case INDEX_op_bswap16_i64:
- case INDEX_op_bswap32_i64:
- case INDEX_op_bswap64_i64:
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- case INDEX_op_extrh_i64_i32:
- case INDEX_op_extract_i64:
- return C_O1_I1(r, r);
-
- case INDEX_op_st8_i32:
- case INDEX_op_st16_i32:
- case INDEX_op_st_i32:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i64:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i64:
- return C_O0_I2(rZ, r);
-
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
- return C_O1_I2(r, r, rJ);
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- return C_O1_I2(r, rZ, rN);
- case INDEX_op_mul_i32:
- case INDEX_op_mulsh_i32:
- case INDEX_op_muluh_i32:
- case INDEX_op_div_i32:
- case INDEX_op_divu_i32:
- case INDEX_op_rem_i32:
- case INDEX_op_remu_i32:
- case INDEX_op_nor_i32:
- case INDEX_op_setcond_i32:
- case INDEX_op_mul_i64:
- case INDEX_op_mulsh_i64:
- case INDEX_op_muluh_i64:
- case INDEX_op_div_i64:
- case INDEX_op_divu_i64:
- case INDEX_op_rem_i64:
- case INDEX_op_remu_i64:
- case INDEX_op_nor_i64:
- case INDEX_op_setcond_i64:
- return C_O1_I2(r, rZ, rZ);
- case INDEX_op_muls2_i32:
- case INDEX_op_mulu2_i32:
- case INDEX_op_muls2_i64:
- case INDEX_op_mulu2_i64:
- return C_O2_I2(r, r, r, r);
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- return C_O1_I2(r, r, rIK);
- case INDEX_op_or_i32:
- case INDEX_op_xor_i32:
- case INDEX_op_or_i64:
- case INDEX_op_xor_i64:
- return C_O1_I2(r, r, rI);
- case INDEX_op_shl_i32:
- case INDEX_op_shr_i32:
- case INDEX_op_sar_i32:
- case INDEX_op_rotr_i32:
- case INDEX_op_rotl_i32:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i64:
- case INDEX_op_rotr_i64:
- case INDEX_op_rotl_i64:
- return C_O1_I2(r, r, ri);
- case INDEX_op_clz_i32:
- case INDEX_op_clz_i64:
- return C_O1_I2(r, r, rWZ);
-
- case INDEX_op_deposit_i32:
- case INDEX_op_deposit_i64:
- return C_O1_I2(r, 0, rZ);
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
- return C_O0_I2(rZ, rZ);
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return (use_mips32r6_instructions
- ? C_O1_I4(r, rZ, rZ, rZ, rZ)
- : C_O1_I4(r, rZ, rZ, rZ, 0));
- case INDEX_op_add2_i32:
- case INDEX_op_sub2_i32:
- return C_O2_I4(r, r, rZ, rZ, rN, rN);
- case INDEX_op_setcond2_i32:
- return C_O1_I4(r, rZ, rZ, rZ, rZ);
- case INDEX_op_brcond2_i32:
- return C_O0_I4(rZ, rZ, rZ, rZ);
-
- case INDEX_op_qemu_ld_a32_i32:
- return C_O1_I1(r, r);
- case INDEX_op_qemu_ld_a64_i32:
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r);
- case INDEX_op_qemu_st_a32_i32:
- return C_O0_I2(rZ, r);
- case INDEX_op_qemu_st_a64_i32:
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) : C_O0_I3(rZ, r, r);
- case INDEX_op_qemu_ld_a32_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
- case INDEX_op_qemu_ld_a64_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r);
- case INDEX_op_qemu_st_a32_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) : C_O0_I3(rZ, rZ, r);
- case INDEX_op_qemu_st_a64_i64:
- return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r)
- : C_O0_I4(rZ, rZ, r, r));
+static const TCGOutOpUnary outop_bswap64 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap64,
+};
+#endif /* TCG_TARGET_REG_BITS == 64 */
- default:
- g_assert_not_reached();
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
+}
+
+static const TCGOutOpUnary outop_neg = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_neg,
+};
+
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_nor(s, type, a0, TCG_REG_ZERO, a1);
+}
+
+static const TCGOutOpUnary outop_not = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_not,
+};
+
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ TCGReg a2, unsigned ofs, unsigned len)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_bf(s, OPC_INS, a0, a2, ofs + len - 1, ofs);
+ } else {
+ tcg_out_opc_bf64(s, OPC_DINS, OPC_DINSM, OPC_DINSU, a0, a2,
+ ofs + len - 1, ofs);
+ }
+}
+
+static const TCGOutOpDeposit outop_deposit = {
+ .base.static_constraint = C_O1_I2(r, 0, rz),
+ .out_rrr = tgen_deposit,
+};
+
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ if (ofs == 0 && len <= 16) {
+ tcg_out_opc_imm(s, OPC_ANDI, a0, a1, (1 << len) - 1);
+ } else if (type == TCG_TYPE_I32) {
+ tcg_out_opc_bf(s, OPC_EXT, a0, a1, len - 1, ofs);
+ } else {
+ tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU,
+ a0, a1, len - 1, ofs);
}
}
+static const TCGOutOpExtract outop_extract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extract,
+};
+
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ if (ofs == 0) {
+ switch (len) {
+ case 8:
+ tcg_out_ext8s(s, type, a0, a1);
+ return;
+ case 16:
+ tcg_out_ext16s(s, type, a0, a1);
+ return;
+ case 32:
+ tcg_out_ext32s(s, a0, a1);
+ return;
+ }
+ }
+ g_assert_not_reached();
+}
+
+static const TCGOutOpExtract outop_sextract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_sextract,
+};
+
+static const TCGOutOpExtract2 outop_extract2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LBU, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld8u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8u,
+};
+
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LB, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld8s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8s,
+};
+
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LHU, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld16u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16u,
+};
+
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LH, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld16s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16s,
+};
+
+#if TCG_TARGET_REG_BITS == 64
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LWU, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld32u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32u,
+};
+
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LW, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld32s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32s,
+};
+#endif
+
+static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_SB, data, base, offset);
+}
+
+static const TCGOutOpStore outop_st8 = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tgen_st8_r,
+};
+
+static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_SH, data, base, offset);
+}
+
+static const TCGOutOpStore outop_st16 = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tgen_st16_r,
+};
+
+static const TCGOutOpStore outop_st = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tcg_out_st,
+};
+
+
+static TCGConstraintSetIndex
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
+{
+ return C_NotImplemented;
+}
+
static const int tcg_target_callee_save_regs[] = {
TCG_REG_S0,
TCG_REG_S1,
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
index a996aa1..bd4ca5f 100644
--- a/tcg/mips/tcg-target.h
+++ b/tcg/mips/tcg-target.h
@@ -70,134 +70,6 @@ typedef enum {
TCG_AREG0 = TCG_REG_S8,
} TCGReg;
-/* used for function call generation */
-#define TCG_TARGET_STACK_ALIGN 16
-#if _MIPS_SIM == _ABIO32
-# define TCG_TARGET_CALL_STACK_OFFSET 16
-# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
-# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
-#else
-# define TCG_TARGET_CALL_STACK_OFFSET 0
-# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
-# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
-#endif
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
-#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
-
-/* MOVN/MOVZ instructions detection */
-#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \
- defined(_MIPS_ARCH_LOONGSON2E) || defined(_MIPS_ARCH_LOONGSON2F) || \
- defined(_MIPS_ARCH_MIPS4)
-#define use_movnz_instructions 1
-#else
-extern bool use_movnz_instructions;
-#endif
-
-/* MIPS32 instruction set detection */
-#if defined(__mips_isa_rev) && (__mips_isa_rev >= 1)
-#define use_mips32_instructions 1
-#else
-extern bool use_mips32_instructions;
-#endif
-
-/* MIPS32R2 instruction set detection */
-#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
-#define use_mips32r2_instructions 1
-#else
-extern bool use_mips32r2_instructions;
-#endif
-
-/* MIPS32R6 instruction set detection */
-#if defined(__mips_isa_rev) && (__mips_isa_rev >= 6)
-#define use_mips32r6_instructions 1
-#else
-#define use_mips32r6_instructions 0
-#endif
-
-/* optional instructions */
-#define TCG_TARGET_HAS_div_i32 1
-#define TCG_TARGET_HAS_rem_i32 1
-#define TCG_TARGET_HAS_not_i32 1
-#define TCG_TARGET_HAS_nor_i32 1
-#define TCG_TARGET_HAS_andc_i32 0
-#define TCG_TARGET_HAS_orc_i32 0
-#define TCG_TARGET_HAS_eqv_i32 0
-#define TCG_TARGET_HAS_nand_i32 0
-#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
-#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
-#define TCG_TARGET_HAS_muluh_i32 1
-#define TCG_TARGET_HAS_mulsh_i32 1
-#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_negsetcond_i32 0
-
-#if TCG_TARGET_REG_BITS == 64
-#define TCG_TARGET_HAS_add2_i32 0
-#define TCG_TARGET_HAS_sub2_i32 0
-#define TCG_TARGET_HAS_extr_i64_i32 1
-#define TCG_TARGET_HAS_div_i64 1
-#define TCG_TARGET_HAS_rem_i64 1
-#define TCG_TARGET_HAS_not_i64 1
-#define TCG_TARGET_HAS_nor_i64 1
-#define TCG_TARGET_HAS_andc_i64 0
-#define TCG_TARGET_HAS_orc_i64 0
-#define TCG_TARGET_HAS_eqv_i64 0
-#define TCG_TARGET_HAS_nand_i64 0
-#define TCG_TARGET_HAS_add2_i64 0
-#define TCG_TARGET_HAS_sub2_i64 0
-#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions)
-#define TCG_TARGET_HAS_muls2_i64 (!use_mips32r6_instructions)
-#define TCG_TARGET_HAS_muluh_i64 1
-#define TCG_TARGET_HAS_mulsh_i64 1
-#define TCG_TARGET_HAS_ext32s_i64 1
-#define TCG_TARGET_HAS_ext32u_i64 1
-#define TCG_TARGET_HAS_negsetcond_i64 0
-#endif
-
-/* optional instructions detected at runtime */
-#define TCG_TARGET_HAS_bswap16_i32 use_mips32r2_instructions
-#define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions
-#define TCG_TARGET_HAS_extract_i32 use_mips32r2_instructions
-#define TCG_TARGET_HAS_sextract_i32 0
-#define TCG_TARGET_HAS_extract2_i32 0
-#define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions
-#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
-#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions
-#define TCG_TARGET_HAS_clz_i32 use_mips32r2_instructions
-#define TCG_TARGET_HAS_ctz_i32 0
-#define TCG_TARGET_HAS_ctpop_i32 0
-#define TCG_TARGET_HAS_qemu_st8_i32 0
-
-#if TCG_TARGET_REG_BITS == 64
-#define TCG_TARGET_HAS_bswap16_i64 use_mips32r2_instructions
-#define TCG_TARGET_HAS_bswap32_i64 use_mips32r2_instructions
-#define TCG_TARGET_HAS_bswap64_i64 use_mips32r2_instructions
-#define TCG_TARGET_HAS_deposit_i64 use_mips32r2_instructions
-#define TCG_TARGET_HAS_extract_i64 use_mips32r2_instructions
-#define TCG_TARGET_HAS_sextract_i64 0
-#define TCG_TARGET_HAS_extract2_i64 0
-#define TCG_TARGET_HAS_ext8s_i64 use_mips32r2_instructions
-#define TCG_TARGET_HAS_ext16s_i64 use_mips32r2_instructions
-#define TCG_TARGET_HAS_rot_i64 use_mips32r2_instructions
-#define TCG_TARGET_HAS_clz_i64 use_mips32r2_instructions
-#define TCG_TARGET_HAS_ctz_i64 0
-#define TCG_TARGET_HAS_ctpop_i64 0
-#endif
-
-/* optional instructions automatically implemented */
-#define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */
-#define TCG_TARGET_HAS_ext16u_i32 0 /* andi rt, rs, 0xffff */
-
-#if TCG_TARGET_REG_BITS == 64
-#define TCG_TARGET_HAS_ext8u_i64 0 /* andi rt, rs, 0xff */
-#define TCG_TARGET_HAS_ext16u_i64 0 /* andi rt, rs, 0xffff */
-#endif
-
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
-
-#define TCG_TARGET_HAS_tst 0
-
-#define TCG_TARGET_DEFAULT_MO 0
-#define TCG_TARGET_NEED_LDST_LABELS
-#define TCG_TARGET_NEED_POOL_LABELS
+#define TCG_REG_ZERO TCG_REG_ZERO
#endif
diff --git a/tcg/optimize.c b/tcg/optimize.c
index ba16ec2..62a128b 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -28,15 +28,8 @@
#include "qemu/interval-tree.h"
#include "tcg/tcg-op-common.h"
#include "tcg-internal.h"
+#include "tcg-has.h"
-#define CASE_OP_32_64(x) \
- glue(glue(case INDEX_op_, x), _i32): \
- glue(glue(case INDEX_op_, x), _i64)
-
-#define CASE_OP_32_64_VEC(x) \
- glue(glue(case INDEX_op_, x), _i32): \
- glue(glue(case INDEX_op_, x), _i64): \
- glue(glue(case INDEX_op_, x), _vec)
typedef struct MemCopyInfo {
IntervalTreeNode itree;
@@ -46,13 +39,12 @@ typedef struct MemCopyInfo {
} MemCopyInfo;
typedef struct TempOptInfo {
- bool is_const;
TCGTemp *prev_copy;
TCGTemp *next_copy;
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
- uint64_t val;
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
+ uint64_t o_mask; /* mask bit is 1 if and only if value bit is 1 */
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
} TempOptInfo;
typedef struct OptContext {
@@ -64,70 +56,45 @@ typedef struct OptContext {
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
/* In flight values from optimization. */
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
- uint64_t s_mask; /* mask of clrsb(value) bits */
TCGType type;
+ int carry_state; /* -1 = non-constant, {0,1} = constant carry-in */
} OptContext;
-/* Calculate the smask for a specific value. */
-static uint64_t smask_from_value(uint64_t value)
+static inline TempOptInfo *ts_info(TCGTemp *ts)
{
- int rep = clrsb64(value);
- return ~(~0ull >> rep);
+ return ts->state_ptr;
}
-/*
- * Calculate the smask for a given set of known-zeros.
- * If there are lots of zeros on the left, we can consider the remainder
- * an unsigned field, and thus the corresponding signed field is one bit
- * larger.
- */
-static uint64_t smask_from_zmask(uint64_t zmask)
+static inline TempOptInfo *arg_info(TCGArg arg)
{
- /*
- * Only the 0 bits are significant for zmask, thus the msb itself
- * must be zero, else we have no sign information.
- */
- int rep = clz64(zmask);
- if (rep == 0) {
- return 0;
- }
- rep -= 1;
- return ~(~0ull >> rep);
+ return ts_info(arg_temp(arg));
}
-/*
- * Recreate a properly left-aligned smask after manipulation.
- * Some bit-shuffling, particularly shifts and rotates, may
- * retain sign bits on the left, but may scatter disconnected
- * sign bits on the right. Retain only what remains to the left.
- */
-static uint64_t smask_from_smask(int64_t smask)
+static inline bool ti_is_const(TempOptInfo *ti)
{
- /* Only the 1 bits are significant for smask */
- return smask_from_zmask(~smask);
+ /* If all bits that are not known zeros are known ones, it's constant. */
+ return ti->z_mask == ti->o_mask;
}
-static inline TempOptInfo *ts_info(TCGTemp *ts)
+static inline uint64_t ti_const_val(TempOptInfo *ti)
{
- return ts->state_ptr;
+ /* If constant, both z_mask and o_mask contain the value. */
+ return ti->z_mask;
}
-static inline TempOptInfo *arg_info(TCGArg arg)
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
{
- return ts_info(arg_temp(arg));
+ return ti_is_const(ti) && ti_const_val(ti) == val;
}
static inline bool ts_is_const(TCGTemp *ts)
{
- return ts_info(ts)->is_const;
+ return ti_is_const(ts_info(ts));
}
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
{
- TempOptInfo *ti = ts_info(ts);
- return ti->is_const && ti->val == val;
+ return ti_is_const_val(ts_info(ts), val);
}
static inline bool arg_is_const(TCGArg arg)
@@ -135,6 +102,11 @@ static inline bool arg_is_const(TCGArg arg)
return ts_is_const(arg_temp(arg));
}
+static inline uint64_t arg_const_val(TCGArg arg)
+{
+ return ti_const_val(arg_info(arg));
+}
+
static inline bool arg_is_const_val(TCGArg arg, uint64_t val)
{
return ts_is_const_val(arg_temp(arg), val);
@@ -171,13 +143,12 @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
ti->prev_copy = ts;
QSIMPLEQ_INIT(&ti->mem_copy);
if (ts->kind == TEMP_CONST) {
- ti->is_const = true;
- ti->val = ts->val;
ti->z_mask = ts->val;
- ti->s_mask = smask_from_value(ts->val);
+ ti->o_mask = ts->val;
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
} else {
- ti->is_const = false;
ti->z_mask = -1;
+ ti->o_mask = 0;
ti->s_mask = 0;
}
}
@@ -263,8 +234,8 @@ static void reset_ts(OptContext *ctx, TCGTemp *ts)
pi->next_copy = ti->next_copy;
ti->next_copy = ts;
ti->prev_copy = ts;
- ti->is_const = false;
ti->z_mask = -1;
+ ti->o_mask = 0;
ti->s_mask = 0;
if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) {
@@ -371,6 +342,18 @@ static TCGArg arg_new_temp(OptContext *ctx)
return temp_arg(ts);
}
+static TCGOp *opt_insert_after(OptContext *ctx, TCGOp *op,
+ TCGOpcode opc, unsigned narg)
+{
+ return tcg_op_insert_after(ctx->tcg, op, opc, ctx->type, narg);
+}
+
+static TCGOp *opt_insert_before(OptContext *ctx, TCGOp *op,
+ TCGOpcode opc, unsigned narg)
+{
+ return tcg_op_insert_before(ctx->tcg, op, opc, ctx->type, narg);
+}
+
static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
{
TCGTemp *dst_ts = arg_temp(dst);
@@ -390,15 +373,13 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
switch (ctx->type) {
case TCG_TYPE_I32:
- new_op = INDEX_op_mov_i32;
- break;
case TCG_TYPE_I64:
- new_op = INDEX_op_mov_i64;
+ new_op = INDEX_op_mov;
break;
case TCG_TYPE_V64:
case TCG_TYPE_V128:
case TCG_TYPE_V256:
- /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
+ /* TCGOP_TYPE and TCGOP_VECE remain unchanged. */
new_op = INDEX_op_mov_vec;
break;
default:
@@ -409,6 +390,7 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
op->args[1] = src;
di->z_mask = si->z_mask;
+ di->o_mask = si->o_mask;
di->s_mask = si->s_mask;
if (src_ts->type == dst_ts->type) {
@@ -418,13 +400,19 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
di->prev_copy = src_ts;
ni->prev_copy = dst_ts;
si->next_copy = dst_ts;
- di->is_const = si->is_const;
- di->val = si->val;
if (!QSIMPLEQ_EMPTY(&si->mem_copy)
&& cmp_better_copy(src_ts, dst_ts) == dst_ts) {
move_mem_copies(dst_ts, src_ts);
}
+ } else if (dst_ts->type == TCG_TYPE_I32) {
+ di->z_mask = (int32_t)di->z_mask;
+ di->o_mask = (int32_t)di->o_mask;
+ di->s_mask |= INT32_MIN;
+ } else {
+ di->z_mask |= MAKE_64BIT_MASK(32, 32);
+ di->o_mask = (uint32_t)di->o_mask;
+ di->s_mask = INT64_MIN;
}
return true;
}
@@ -436,162 +424,163 @@ static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
}
-static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
+static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
+ uint64_t x, uint64_t y)
{
uint64_t l64, h64;
switch (op) {
- CASE_OP_32_64(add):
+ case INDEX_op_add:
return x + y;
- CASE_OP_32_64(sub):
+ case INDEX_op_sub:
return x - y;
- CASE_OP_32_64(mul):
+ case INDEX_op_mul:
return x * y;
- CASE_OP_32_64_VEC(and):
+ case INDEX_op_and:
+ case INDEX_op_and_vec:
return x & y;
- CASE_OP_32_64_VEC(or):
+ case INDEX_op_or:
+ case INDEX_op_or_vec:
return x | y;
- CASE_OP_32_64_VEC(xor):
+ case INDEX_op_xor:
+ case INDEX_op_xor_vec:
return x ^ y;
- case INDEX_op_shl_i32:
- return (uint32_t)x << (y & 31);
-
- case INDEX_op_shl_i64:
+ case INDEX_op_shl:
+ if (type == TCG_TYPE_I32) {
+ return (uint32_t)x << (y & 31);
+ }
return (uint64_t)x << (y & 63);
- case INDEX_op_shr_i32:
- return (uint32_t)x >> (y & 31);
-
- case INDEX_op_shr_i64:
+ case INDEX_op_shr:
+ if (type == TCG_TYPE_I32) {
+ return (uint32_t)x >> (y & 31);
+ }
return (uint64_t)x >> (y & 63);
- case INDEX_op_sar_i32:
- return (int32_t)x >> (y & 31);
-
- case INDEX_op_sar_i64:
+ case INDEX_op_sar:
+ if (type == TCG_TYPE_I32) {
+ return (int32_t)x >> (y & 31);
+ }
return (int64_t)x >> (y & 63);
- case INDEX_op_rotr_i32:
- return ror32(x, y & 31);
-
- case INDEX_op_rotr_i64:
+ case INDEX_op_rotr:
+ if (type == TCG_TYPE_I32) {
+ return ror32(x, y & 31);
+ }
return ror64(x, y & 63);
- case INDEX_op_rotl_i32:
- return rol32(x, y & 31);
-
- case INDEX_op_rotl_i64:
+ case INDEX_op_rotl:
+ if (type == TCG_TYPE_I32) {
+ return rol32(x, y & 31);
+ }
return rol64(x, y & 63);
- CASE_OP_32_64_VEC(not):
+ case INDEX_op_not:
+ case INDEX_op_not_vec:
return ~x;
- CASE_OP_32_64(neg):
+ case INDEX_op_neg:
return -x;
- CASE_OP_32_64_VEC(andc):
+ case INDEX_op_andc:
+ case INDEX_op_andc_vec:
return x & ~y;
- CASE_OP_32_64_VEC(orc):
+ case INDEX_op_orc:
+ case INDEX_op_orc_vec:
return x | ~y;
- CASE_OP_32_64_VEC(eqv):
+ case INDEX_op_eqv:
+ case INDEX_op_eqv_vec:
return ~(x ^ y);
- CASE_OP_32_64_VEC(nand):
+ case INDEX_op_nand:
+ case INDEX_op_nand_vec:
return ~(x & y);
- CASE_OP_32_64_VEC(nor):
+ case INDEX_op_nor:
+ case INDEX_op_nor_vec:
return ~(x | y);
- case INDEX_op_clz_i32:
- return (uint32_t)x ? clz32(x) : y;
-
- case INDEX_op_clz_i64:
+ case INDEX_op_clz:
+ if (type == TCG_TYPE_I32) {
+ return (uint32_t)x ? clz32(x) : y;
+ }
return x ? clz64(x) : y;
- case INDEX_op_ctz_i32:
- return (uint32_t)x ? ctz32(x) : y;
-
- case INDEX_op_ctz_i64:
+ case INDEX_op_ctz:
+ if (type == TCG_TYPE_I32) {
+ return (uint32_t)x ? ctz32(x) : y;
+ }
return x ? ctz64(x) : y;
- case INDEX_op_ctpop_i32:
- return ctpop32(x);
-
- case INDEX_op_ctpop_i64:
- return ctpop64(x);
-
- CASE_OP_32_64(ext8s):
- return (int8_t)x;
-
- CASE_OP_32_64(ext16s):
- return (int16_t)x;
-
- CASE_OP_32_64(ext8u):
- return (uint8_t)x;
-
- CASE_OP_32_64(ext16u):
- return (uint16_t)x;
+ case INDEX_op_ctpop:
+ return type == TCG_TYPE_I32 ? ctpop32(x) : ctpop64(x);
- CASE_OP_32_64(bswap16):
+ case INDEX_op_bswap16:
x = bswap16(x);
return y & TCG_BSWAP_OS ? (int16_t)x : x;
- CASE_OP_32_64(bswap32):
+ case INDEX_op_bswap32:
x = bswap32(x);
return y & TCG_BSWAP_OS ? (int32_t)x : x;
- case INDEX_op_bswap64_i64:
+ case INDEX_op_bswap64:
return bswap64(x);
case INDEX_op_ext_i32_i64:
- case INDEX_op_ext32s_i64:
return (int32_t)x;
case INDEX_op_extu_i32_i64:
case INDEX_op_extrl_i64_i32:
- case INDEX_op_ext32u_i64:
return (uint32_t)x;
case INDEX_op_extrh_i64_i32:
return (uint64_t)x >> 32;
- case INDEX_op_muluh_i32:
- return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
- case INDEX_op_mulsh_i32:
- return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
-
- case INDEX_op_muluh_i64:
+ case INDEX_op_muluh:
+ if (type == TCG_TYPE_I32) {
+ return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
+ }
mulu64(&l64, &h64, x, y);
return h64;
- case INDEX_op_mulsh_i64:
+
+ case INDEX_op_mulsh:
+ if (type == TCG_TYPE_I32) {
+ return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
+ }
muls64(&l64, &h64, x, y);
return h64;
- case INDEX_op_div_i32:
+ case INDEX_op_divs:
/* Avoid crashing on divide by zero, otherwise undefined. */
- return (int32_t)x / ((int32_t)y ? : 1);
- case INDEX_op_divu_i32:
- return (uint32_t)x / ((uint32_t)y ? : 1);
- case INDEX_op_div_i64:
+ if (type == TCG_TYPE_I32) {
+ return (int32_t)x / ((int32_t)y ? : 1);
+ }
return (int64_t)x / ((int64_t)y ? : 1);
- case INDEX_op_divu_i64:
+
+ case INDEX_op_divu:
+ if (type == TCG_TYPE_I32) {
+ return (uint32_t)x / ((uint32_t)y ? : 1);
+ }
return (uint64_t)x / ((uint64_t)y ? : 1);
- case INDEX_op_rem_i32:
- return (int32_t)x % ((int32_t)y ? : 1);
- case INDEX_op_remu_i32:
- return (uint32_t)x % ((uint32_t)y ? : 1);
- case INDEX_op_rem_i64:
+ case INDEX_op_rems:
+ if (type == TCG_TYPE_I32) {
+ return (int32_t)x % ((int32_t)y ? : 1);
+ }
return (int64_t)x % ((int64_t)y ? : 1);
- case INDEX_op_remu_i64:
+
+ case INDEX_op_remu:
+ if (type == TCG_TYPE_I32) {
+ return (uint32_t)x % ((uint32_t)y ? : 1);
+ }
return (uint64_t)x % ((uint64_t)y ? : 1);
default:
@@ -602,7 +591,7 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
uint64_t x, uint64_t y)
{
- uint64_t res = do_constant_folding_2(op, x, y);
+ uint64_t res = do_constant_folding_2(op, type, x, y);
if (type == TCG_TYPE_I32) {
res = (int32_t)res;
}
@@ -710,8 +699,8 @@ static int do_constant_folding_cond(TCGType type, TCGArg x,
TCGArg y, TCGCond c)
{
if (arg_is_const(x) && arg_is_const(y)) {
- uint64_t xv = arg_info(x)->val;
- uint64_t yv = arg_info(y)->val;
+ uint64_t xv = arg_const_val(x);
+ uint64_t yv = arg_const_val(y);
switch (type) {
case TCG_TYPE_I32:
@@ -752,12 +741,18 @@ static int do_constant_folding_cond(TCGType type, TCGArg x,
#define NO_DEST temp_arg(NULL)
+static int pref_commutative(TempOptInfo *ti)
+{
+ /* Slight preference for non-zero constants second. */
+ return !ti_is_const(ti) ? 0 : ti_const_val(ti) ? 3 : 2;
+}
+
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
{
TCGArg a1 = *p1, a2 = *p2;
int sum = 0;
- sum += arg_is_const(a1);
- sum -= arg_is_const(a2);
+ sum += pref_commutative(arg_info(a1));
+ sum -= pref_commutative(arg_info(a2));
/* Prefer the constant in second argument, and then the form
op a, a, b, which is better handled on non-RISC hosts. */
@@ -772,10 +767,10 @@ static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
{
int sum = 0;
- sum += arg_is_const(p1[0]);
- sum += arg_is_const(p1[1]);
- sum -= arg_is_const(p2[0]);
- sum -= arg_is_const(p2[1]);
+ sum += pref_commutative(arg_info(p1[0]));
+ sum += pref_commutative(arg_info(p1[1]));
+ sum -= pref_commutative(arg_info(p2[0]));
+ sum -= pref_commutative(arg_info(p2[1]));
if (sum > 0) {
TCGArg t;
t = p1[0], p1[0] = p2[0], p2[0] = t;
@@ -789,10 +784,12 @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
* Return -1 if the condition can't be simplified,
* and the result of the condition (0 or 1) if it can.
*/
+static bool fold_and(OptContext *ctx, TCGOp *op);
static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
TCGArg *p1, TCGArg *p2, TCGArg *pcond)
{
TCGCond cond;
+ TempOptInfo *i1;
bool swap;
int r;
@@ -810,19 +807,21 @@ static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
return -1;
}
+ i1 = arg_info(*p1);
+
/*
* TSTNE x,x -> NE x,0
- * TSTNE x,-1 -> NE x,0
+ * TSTNE x,i -> NE x,0 if i includes all nonzero bits of x
*/
- if (args_are_copies(*p1, *p2) || arg_is_const_val(*p2, -1)) {
+ if (args_are_copies(*p1, *p2) ||
+ (arg_is_const(*p2) && (i1->z_mask & ~arg_const_val(*p2)) == 0)) {
*p2 = arg_new_constant(ctx, 0);
*pcond = tcg_tst_eqne_cond(cond);
return -1;
}
- /* TSTNE x,sign -> LT x,0 */
- if (arg_is_const_val(*p2, (ctx->type == TCG_TYPE_I32
- ? INT32_MIN : INT64_MIN))) {
+ /* TSTNE x,i -> LT x,0 if i only includes sign bit copies */
+ if (arg_is_const(*p2) && (arg_const_val(*p2) & ~i1->s_mask) == 0) {
*p2 = arg_new_constant(ctx, 0);
*pcond = tcg_tst_ltge_cond(cond);
return -1;
@@ -830,14 +829,13 @@ static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
/* Expand to AND with a temporary if no backend support. */
if (!TCG_TARGET_HAS_tst) {
- TCGOpcode and_opc = (ctx->type == TCG_TYPE_I32
- ? INDEX_op_and_i32 : INDEX_op_and_i64);
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, and_opc, 3);
+ TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
TCGArg tmp = arg_new_temp(ctx);
op2->args[0] = tmp;
op2->args[1] = *p1;
op2->args[2] = *p2;
+ fold_and(ctx, op2);
*p1 = tmp;
*p2 = arg_new_constant(ctx, 0);
@@ -865,13 +863,13 @@ static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
bh = args[3];
if (arg_is_const(bl) && arg_is_const(bh)) {
- tcg_target_ulong blv = arg_info(bl)->val;
- tcg_target_ulong bhv = arg_info(bh)->val;
+ tcg_target_ulong blv = arg_const_val(bl);
+ tcg_target_ulong bhv = arg_const_val(bh);
uint64_t b = deposit64(blv, 32, 32, bhv);
if (arg_is_const(al) && arg_is_const(ah)) {
- tcg_target_ulong alv = arg_info(al)->val;
- tcg_target_ulong ahv = arg_info(ah)->val;
+ tcg_target_ulong alv = arg_const_val(al);
+ tcg_target_ulong ahv = arg_const_val(ah);
uint64_t a = deposit64(alv, 32, 32, ahv);
r = do_constant_folding_cond_64(a, b, c);
@@ -925,17 +923,20 @@ static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
/* Expand to AND with a temporary if no backend support. */
if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
- TCGOp *op1 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
+ TCGOp *op1 = opt_insert_before(ctx, op, INDEX_op_and, 3);
+ TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
TCGArg t1 = arg_new_temp(ctx);
TCGArg t2 = arg_new_temp(ctx);
op1->args[0] = t1;
op1->args[1] = al;
op1->args[2] = bl;
+ fold_and(ctx, op1);
+
op2->args[0] = t2;
op2->args[1] = ah;
op2->args[2] = bh;
+ fold_and(ctx, op1);
args[0] = t1;
args[1] = t2;
@@ -964,37 +965,31 @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
}
}
-static void finish_folding(OptContext *ctx, TCGOp *op)
+static void finish_bb(OptContext *ctx)
+{
+ /* We only optimize memory barriers across basic blocks. */
+ ctx->prev_mb = NULL;
+}
+
+static void finish_ebb(OptContext *ctx)
+{
+ finish_bb(ctx);
+ /* We only optimize across extended basic blocks. */
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
+ remove_mem_copy_all(ctx);
+}
+
+static bool finish_folding(OptContext *ctx, TCGOp *op)
{
const TCGOpDef *def = &tcg_op_defs[op->opc];
int i, nb_oargs;
- /*
- * We only optimize extended basic blocks. If the opcode ends a BB
- * and is not a conditional branch, reset all temp data.
- */
- if (def->flags & TCG_OPF_BB_END) {
- ctx->prev_mb = NULL;
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
- remove_mem_copy_all(ctx);
- }
- return;
- }
-
nb_oargs = def->nb_oargs;
for (i = 0; i < nb_oargs; i++) {
TCGTemp *ts = arg_temp(op->args[i]);
reset_ts(ctx, ts);
- /*
- * Save the corresponding known-zero/sign bits mask for the
- * first output argument (only one supported so far).
- */
- if (i == 0) {
- ts_info(ts)->z_mask = ctx->z_mask;
- ts_info(ts)->s_mask = ctx->s_mask;
- }
}
+ return true;
}
/*
@@ -1011,9 +1006,8 @@ static void finish_folding(OptContext *ctx, TCGOp *op)
static bool fold_const1(OptContext *ctx, TCGOp *op)
{
if (arg_is_const(op->args[1])) {
- uint64_t t;
+ uint64_t t = arg_const_val(op->args[1]);
- t = arg_info(op->args[1])->val;
t = do_constant_folding(op->opc, ctx->type, t, 0);
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
}
@@ -1023,8 +1017,8 @@ static bool fold_const1(OptContext *ctx, TCGOp *op)
static bool fold_const2(OptContext *ctx, TCGOp *op)
{
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- uint64_t t1 = arg_info(op->args[1])->val;
- uint64_t t2 = arg_info(op->args[2])->val;
+ uint64_t t1 = arg_const_val(op->args[1]);
+ uint64_t t2 = arg_const_val(op->args[2]);
t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
@@ -1044,11 +1038,23 @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
return fold_const2(ctx, op);
}
-static bool fold_masks(OptContext *ctx, TCGOp *op)
+/*
+ * Record "zero" and "sign" masks for the single output of @op.
+ * See TempOptInfo definition of z_mask and s_mask.
+ * If z_mask allows, fold the output to constant zero.
+ * The passed s_mask may be augmented by z_mask.
+ */
+static bool fold_masks_zosa_int(OptContext *ctx, TCGOp *op,
+ uint64_t z_mask, uint64_t o_mask,
+ int64_t s_mask, uint64_t a_mask)
{
- uint64_t a_mask = ctx->a_mask;
- uint64_t z_mask = ctx->z_mask;
- uint64_t s_mask = ctx->s_mask;
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
+ TCGTemp *ts;
+ TempOptInfo *ti;
+ int rep;
+
+ /* Only single-output opcodes are supported here. */
+ tcg_debug_assert(def->nb_oargs == 1);
/*
* 32-bit ops generate 32-bit results, which for the purpose of
@@ -1058,22 +1064,76 @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
* type changing opcodes.
*/
if (ctx->type == TCG_TYPE_I32) {
- a_mask = (int32_t)a_mask;
z_mask = (int32_t)z_mask;
- s_mask |= MAKE_64BIT_MASK(32, 32);
- ctx->z_mask = z_mask;
- ctx->s_mask = s_mask;
+ o_mask = (int32_t)o_mask;
+ s_mask |= INT32_MIN;
+ a_mask = (uint32_t)a_mask;
}
- if (z_mask == 0) {
- return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
+ /* Bits that are known 1 and bits that are known 0 must not overlap. */
+ tcg_debug_assert((o_mask & ~z_mask) == 0);
+
+ /* All bits that are not known zero are known one is a constant. */
+ if (z_mask == o_mask) {
+ return tcg_opt_gen_movi(ctx, op, op->args[0], o_mask);
}
+
+ /* If no bits are affected, the operation devolves to a copy. */
if (a_mask == 0) {
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
}
+
+ ts = arg_temp(op->args[0]);
+ reset_ts(ctx, ts);
+
+ ti = ts_info(ts);
+ ti->z_mask = z_mask;
+
+ /* Canonicalize s_mask and incorporate data from z_mask. */
+ rep = clz64(~s_mask);
+ rep = MAX(rep, clz64(z_mask));
+ rep = MAX(rep, clz64(~o_mask));
+ rep = MAX(rep - 1, 0);
+ ti->s_mask = INT64_MIN >> rep;
+
return false;
}
+static bool fold_masks_zosa(OptContext *ctx, TCGOp *op, uint64_t z_mask,
+ uint64_t o_mask, int64_t s_mask, uint64_t a_mask)
+{
+ fold_masks_zosa_int(ctx, op, z_mask, o_mask, s_mask, -1);
+ return true;
+}
+
+static bool fold_masks_zos(OptContext *ctx, TCGOp *op,
+ uint64_t z_mask, uint64_t o_mask, uint64_t s_mask)
+{
+ return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, -1);
+}
+
+static bool fold_masks_zo(OptContext *ctx, TCGOp *op,
+ uint64_t z_mask, uint64_t o_mask)
+{
+ return fold_masks_zosa(ctx, op, z_mask, o_mask, 0, -1);
+}
+
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
+ uint64_t z_mask, uint64_t s_mask)
+{
+ return fold_masks_zosa(ctx, op, z_mask, 0, s_mask, -1);
+}
+
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
+{
+ return fold_masks_zosa(ctx, op, z_mask, 0, 0, -1);
+}
+
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
+{
+ return fold_masks_zosa(ctx, op, -1, 0, s_mask, -1);
+}
+
/*
* Convert @op to NOT, if NOT is supported by the host.
* Return true f the conversion is successful, which will still
@@ -1087,12 +1147,9 @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
switch (ctx->type) {
case TCG_TYPE_I32:
- not_op = INDEX_op_not_i32;
- have_not = TCG_TARGET_HAS_not_i32;
- break;
case TCG_TYPE_I64:
- not_op = INDEX_op_not_i64;
- have_not = TCG_TARGET_HAS_not_i64;
+ not_op = INDEX_op_not;
+ have_not = tcg_op_supported(INDEX_op_not, ctx->type, 0);
break;
case TCG_TYPE_V64:
case TCG_TYPE_V128:
@@ -1183,13 +1240,19 @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
* 3) those that produce information about the result value.
*/
+static bool fold_addco(OptContext *ctx, TCGOp *op);
+static bool fold_or(OptContext *ctx, TCGOp *op);
+static bool fold_orc(OptContext *ctx, TCGOp *op);
+static bool fold_subbo(OptContext *ctx, TCGOp *op);
+static bool fold_xor(OptContext *ctx, TCGOp *op);
+
static bool fold_add(OptContext *ctx, TCGOp *op)
{
if (fold_const2_commutative(ctx, op) ||
fold_xi_to_x(ctx, op, 0)) {
return true;
}
- return false;
+ return finish_folding(ctx, op);
}
/* We cannot as yet do_constant_folding with vectors. */
@@ -1199,145 +1262,319 @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
fold_xi_to_x(ctx, op, 0)) {
return true;
}
- return false;
+ return finish_folding(ctx, op);
}
-static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
+static void squash_prev_carryout(OptContext *ctx, TCGOp *op)
{
- bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]);
- bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]);
+ TempOptInfo *t2;
- if (a_const && b_const) {
- uint64_t al = arg_info(op->args[2])->val;
- uint64_t ah = arg_info(op->args[3])->val;
- uint64_t bl = arg_info(op->args[4])->val;
- uint64_t bh = arg_info(op->args[5])->val;
- TCGArg rl, rh;
- TCGOp *op2;
+ op = QTAILQ_PREV(op, link);
+ switch (op->opc) {
+ case INDEX_op_addco:
+ op->opc = INDEX_op_add;
+ fold_add(ctx, op);
+ break;
+ case INDEX_op_addcio:
+ op->opc = INDEX_op_addci;
+ break;
+ case INDEX_op_addc1o:
+ op->opc = INDEX_op_add;
+ t2 = arg_info(op->args[2]);
+ if (ti_is_const(t2)) {
+ op->args[2] = arg_new_constant(ctx, ti_const_val(t2) + 1);
+ /* Perform other constant folding, if needed. */
+ fold_add(ctx, op);
+ } else {
+ TCGArg ret = op->args[0];
+ op = opt_insert_after(ctx, op, INDEX_op_add, 3);
+ op->args[0] = ret;
+ op->args[1] = ret;
+ op->args[2] = arg_new_constant(ctx, 1);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
- if (ctx->type == TCG_TYPE_I32) {
- uint64_t a = deposit64(al, 32, 32, ah);
- uint64_t b = deposit64(bl, 32, 32, bh);
+static bool fold_addci(OptContext *ctx, TCGOp *op)
+{
+ fold_commutative(ctx, op);
- if (add) {
- a += b;
- } else {
- a -= b;
- }
+ if (ctx->carry_state < 0) {
+ return finish_folding(ctx, op);
+ }
+
+ squash_prev_carryout(ctx, op);
+ op->opc = INDEX_op_add;
- al = sextract64(a, 0, 32);
- ah = sextract64(a, 32, 32);
+ if (ctx->carry_state > 0) {
+ TempOptInfo *t2 = arg_info(op->args[2]);
+
+ /*
+ * Propagate the known carry-in into a constant, if possible.
+ * Otherwise emit a second add +1.
+ */
+ if (ti_is_const(t2)) {
+ op->args[2] = arg_new_constant(ctx, ti_const_val(t2) + 1);
} else {
- Int128 a = int128_make128(al, ah);
- Int128 b = int128_make128(bl, bh);
+ TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_add, 3);
- if (add) {
- a = int128_add(a, b);
- } else {
- a = int128_sub(a, b);
- }
+ op2->args[0] = op->args[0];
+ op2->args[1] = op->args[1];
+ op2->args[2] = op->args[2];
+ fold_add(ctx, op2);
- al = int128_getlo(a);
- ah = int128_gethi(a);
+ op->args[1] = op->args[0];
+ op->args[2] = arg_new_constant(ctx, 1);
}
+ }
- rl = op->args[0];
- rh = op->args[1];
+ ctx->carry_state = -1;
+ return fold_add(ctx, op);
+}
- /* The proper opcode is supplied by tcg_opt_gen_mov. */
- op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2);
+static bool fold_addcio(OptContext *ctx, TCGOp *op)
+{
+ TempOptInfo *t1, *t2;
+ int carry_out = -1;
+ uint64_t sum, max;
- tcg_opt_gen_movi(ctx, op, rl, al);
- tcg_opt_gen_movi(ctx, op2, rh, ah);
- return true;
+ fold_commutative(ctx, op);
+ t1 = arg_info(op->args[1]);
+ t2 = arg_info(op->args[2]);
+
+ /*
+ * The z_mask value is >= the maximum value that can be represented
+ * with the known zero bits. So adding the z_mask values will not
+ * overflow if and only if the true values cannot overflow.
+ */
+ if (!uadd64_overflow(t1->z_mask, t2->z_mask, &sum) &&
+ !uadd64_overflow(sum, ctx->carry_state != 0, &sum)) {
+ carry_out = 0;
}
- /* Fold sub2 r,x,i to add2 r,x,-i */
- if (!add && b_const) {
- uint64_t bl = arg_info(op->args[4])->val;
- uint64_t bh = arg_info(op->args[5])->val;
+ if (ctx->carry_state < 0) {
+ ctx->carry_state = carry_out;
+ return finish_folding(ctx, op);
+ }
- /* Negate the two parts without assembling and disassembling. */
- bl = -bl;
- bh = ~bh + !bl;
+ squash_prev_carryout(ctx, op);
+ if (ctx->carry_state == 0) {
+ goto do_addco;
+ }
- op->opc = (ctx->type == TCG_TYPE_I32
- ? INDEX_op_add2_i32 : INDEX_op_add2_i64);
- op->args[4] = arg_new_constant(ctx, bl);
- op->args[5] = arg_new_constant(ctx, bh);
+ /* Propagate the known carry-in into a constant, if possible. */
+ max = ctx->type == TCG_TYPE_I32 ? UINT32_MAX : UINT64_MAX;
+ if (ti_is_const(t2)) {
+ uint64_t v = ti_const_val(t2) & max;
+ if (v < max) {
+ op->args[2] = arg_new_constant(ctx, v + 1);
+ goto do_addco;
+ }
+ /* max + known carry in produces known carry out. */
+ carry_out = 1;
+ }
+ if (ti_is_const(t1)) {
+ uint64_t v = ti_const_val(t1) & max;
+ if (v < max) {
+ op->args[1] = arg_new_constant(ctx, v + 1);
+ goto do_addco;
+ }
+ carry_out = 1;
}
- return false;
+
+ /* Adjust the opcode to remember the known carry-in. */
+ op->opc = INDEX_op_addc1o;
+ ctx->carry_state = carry_out;
+ return finish_folding(ctx, op);
+
+ do_addco:
+ op->opc = INDEX_op_addco;
+ return fold_addco(ctx, op);
}
-static bool fold_add2(OptContext *ctx, TCGOp *op)
+static bool fold_addco(OptContext *ctx, TCGOp *op)
{
- /* Note that the high and low parts may be independently swapped. */
- swap_commutative(op->args[0], &op->args[2], &op->args[4]);
- swap_commutative(op->args[1], &op->args[3], &op->args[5]);
+ TempOptInfo *t1, *t2;
+ int carry_out = -1;
+ uint64_t ign;
+
+ fold_commutative(ctx, op);
+ t1 = arg_info(op->args[1]);
+ t2 = arg_info(op->args[2]);
+
+ if (ti_is_const(t2)) {
+ uint64_t v2 = ti_const_val(t2);
- return fold_addsub2(ctx, op, true);
+ if (ti_is_const(t1)) {
+ uint64_t v1 = ti_const_val(t1);
+ /* Given sign-extension of z_mask for I32, we need not truncate. */
+ carry_out = uadd64_overflow(v1, v2, &ign);
+ } else if (v2 == 0) {
+ carry_out = 0;
+ }
+ } else {
+ /*
+ * The z_mask value is >= the maximum value that can be represented
+ * with the known zero bits. So adding the z_mask values will not
+ * overflow if and only if the true values cannot overflow.
+ */
+ if (!uadd64_overflow(t1->z_mask, t2->z_mask, &ign)) {
+ carry_out = 0;
+ }
+ }
+ ctx->carry_state = carry_out;
+ return finish_folding(ctx, op);
}
static bool fold_and(OptContext *ctx, TCGOp *op)
{
- uint64_t z1, z2;
+ uint64_t z_mask, o_mask, s_mask, a_mask;
+ TempOptInfo *t1, *t2;
- if (fold_const2_commutative(ctx, op) ||
- fold_xi_to_i(ctx, op, 0) ||
- fold_xi_to_x(ctx, op, -1) ||
- fold_xx_to_x(ctx, op)) {
+ if (fold_const2_commutative(ctx, op)) {
return true;
}
- z1 = arg_info(op->args[1])->z_mask;
- z2 = arg_info(op->args[2])->z_mask;
- ctx->z_mask = z1 & z2;
+ t1 = arg_info(op->args[1]);
+ t2 = arg_info(op->args[2]);
+
+ z_mask = t1->z_mask & t2->z_mask;
+ o_mask = t1->o_mask & t2->o_mask;
/*
* Sign repetitions are perforce all identical, whether they are 1 or 0.
* Bitwise operations preserve the relative quantity of the repetitions.
*/
- ctx->s_mask = arg_info(op->args[1])->s_mask
- & arg_info(op->args[2])->s_mask;
-
- /*
- * Known-zeros does not imply known-ones. Therefore unless
- * arg2 is constant, we can't infer affected bits from it.
- */
- if (arg_is_const(op->args[2])) {
- ctx->a_mask = z1 & ~z2;
+ s_mask = t1->s_mask & t2->s_mask;
+
+ /* Affected bits are those not known zero, masked by those known one. */
+ a_mask = t1->z_mask & ~t2->o_mask;
+
+ if (!fold_masks_zosa_int(ctx, op, z_mask, o_mask, s_mask, a_mask)) {
+ if (ti_is_const(t2)) {
+ /*
+ * Canonicalize on extract, if valid. This aids x86 with its
+ * 2 operand MOVZBL and 2 operand AND, selecting the TCGOpcode
+ * which does not require matching operands. Other backends can
+ * trivially expand the extract to AND during code generation.
+ */
+ uint64_t val = ti_const_val(t2);
+ if (!(val & (val + 1))) {
+ unsigned len = ctz64(~val);
+ if (TCG_TARGET_extract_valid(ctx->type, 0, len)) {
+ op->opc = INDEX_op_extract;
+ op->args[2] = 0;
+ op->args[3] = len;
+ }
+ }
+ } else {
+ fold_xx_to_x(ctx, op);
+ }
}
-
- return fold_masks(ctx, op);
+ return true;
}
static bool fold_andc(OptContext *ctx, TCGOp *op)
{
- uint64_t z1;
+ uint64_t z_mask, o_mask, s_mask, a_mask;
+ TempOptInfo *t1, *t2;
- if (fold_const2(ctx, op) ||
- fold_xx_to_i(ctx, op, 0) ||
- fold_xi_to_x(ctx, op, 0) ||
+ if (fold_const2(ctx, op)) {
+ return true;
+ }
+
+ t1 = arg_info(op->args[1]);
+ t2 = arg_info(op->args[2]);
+
+ if (ti_is_const(t2)) {
+ /* Fold andc r,x,i to and r,x,~i. */
+ switch (ctx->type) {
+ case TCG_TYPE_I32:
+ case TCG_TYPE_I64:
+ op->opc = INDEX_op_and;
+ break;
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ op->opc = INDEX_op_and_vec;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
+ return fold_and(ctx, op);
+ }
+ if (fold_xx_to_i(ctx, op, 0) ||
fold_ix_to_not(ctx, op, -1)) {
return true;
}
- z1 = arg_info(op->args[1])->z_mask;
+ z_mask = t1->z_mask & ~t2->o_mask;
+ o_mask = t1->o_mask & ~t2->z_mask;
+ s_mask = t1->s_mask & t2->s_mask;
- /*
- * Known-zeros does not imply known-ones. Therefore unless
- * arg2 is constant, we can't infer anything from it.
- */
- if (arg_is_const(op->args[2])) {
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
- ctx->a_mask = z1 & ~z2;
- z1 &= z2;
+ /* Affected bits are those not known zero, masked by those known zero. */
+ a_mask = t1->z_mask & t2->z_mask;
+
+ return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask);
+}
+
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
+{
+ /* If true and false values are the same, eliminate the cmp. */
+ if (args_are_copies(op->args[2], op->args[3])) {
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
}
- ctx->z_mask = z1;
- ctx->s_mask = arg_info(op->args[1])->s_mask
- & arg_info(op->args[2])->s_mask;
- return fold_masks(ctx, op);
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
+ uint64_t tv = arg_const_val(op->args[2]);
+ uint64_t fv = arg_const_val(op->args[3]);
+
+ if (tv == -1 && fv == 0) {
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
+ }
+ if (tv == 0 && fv == -1) {
+ if (TCG_TARGET_HAS_not_vec) {
+ op->opc = INDEX_op_not_vec;
+ return fold_not(ctx, op);
+ } else {
+ op->opc = INDEX_op_xor_vec;
+ op->args[2] = arg_new_constant(ctx, -1);
+ return fold_xor(ctx, op);
+ }
+ }
+ }
+ if (arg_is_const(op->args[2])) {
+ uint64_t tv = arg_const_val(op->args[2]);
+ if (tv == -1) {
+ op->opc = INDEX_op_or_vec;
+ op->args[2] = op->args[3];
+ return fold_or(ctx, op);
+ }
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
+ op->opc = INDEX_op_andc_vec;
+ op->args[2] = op->args[1];
+ op->args[1] = op->args[3];
+ return fold_andc(ctx, op);
+ }
+ }
+ if (arg_is_const(op->args[3])) {
+ uint64_t fv = arg_const_val(op->args[3]);
+ if (fv == 0) {
+ op->opc = INDEX_op_and_vec;
+ return fold_and(ctx, op);
+ }
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
+ op->opc = INDEX_op_orc_vec;
+ op->args[2] = op->args[1];
+ op->args[1] = op->args[3];
+ return fold_orc(ctx, op);
+ }
+ }
+ return finish_folding(ctx, op);
}
static bool fold_brcond(OptContext *ctx, TCGOp *op)
@@ -1351,8 +1588,11 @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
if (i > 0) {
op->opc = INDEX_op_br;
op->args[0] = op->args[3];
+ finish_ebb(ctx);
+ } else {
+ finish_bb(ctx);
}
- return false;
+ return true;
}
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
@@ -1422,14 +1662,14 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
break;
do_brcond_low:
- op->opc = INDEX_op_brcond_i32;
+ op->opc = INDEX_op_brcond;
op->args[1] = op->args[2];
op->args[2] = cond;
op->args[3] = label;
return fold_brcond(ctx, op);
do_brcond_high:
- op->opc = INDEX_op_brcond_i32;
+ op->opc = INDEX_op_brcond;
op->args[0] = op->args[1];
op->args[1] = op->args[3];
op->args[2] = cond;
@@ -1443,64 +1683,62 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
}
op->opc = INDEX_op_br;
op->args[0] = label;
- break;
+ finish_ebb(ctx);
+ return true;
}
- return false;
+
+ finish_bb(ctx);
+ return true;
}
static bool fold_bswap(OptContext *ctx, TCGOp *op)
{
- uint64_t z_mask, s_mask, sign;
-
- if (arg_is_const(op->args[1])) {
- uint64_t t = arg_info(op->args[1])->val;
+ uint64_t z_mask, o_mask, s_mask;
+ TempOptInfo *t1 = arg_info(op->args[1]);
+ int flags = op->args[2];
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+ if (ti_is_const(t1)) {
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
+ do_constant_folding(op->opc, ctx->type,
+ ti_const_val(t1), flags));
}
- z_mask = arg_info(op->args[1])->z_mask;
+ z_mask = t1->z_mask;
+ o_mask = t1->o_mask;
+ s_mask = 0;
switch (op->opc) {
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap16:
z_mask = bswap16(z_mask);
- sign = INT16_MIN;
+ o_mask = bswap16(o_mask);
+ if (flags & TCG_BSWAP_OS) {
+ z_mask = (int16_t)z_mask;
+ o_mask = (int16_t)o_mask;
+ s_mask = INT16_MIN;
+ } else if (!(flags & TCG_BSWAP_OZ)) {
+ z_mask |= MAKE_64BIT_MASK(16, 48);
+ }
break;
- case INDEX_op_bswap32_i32:
- case INDEX_op_bswap32_i64:
+ case INDEX_op_bswap32:
z_mask = bswap32(z_mask);
- sign = INT32_MIN;
+ o_mask = bswap32(o_mask);
+ if (flags & TCG_BSWAP_OS) {
+ z_mask = (int32_t)z_mask;
+ o_mask = (int32_t)o_mask;
+ s_mask = INT32_MIN;
+ } else if (!(flags & TCG_BSWAP_OZ)) {
+ z_mask |= MAKE_64BIT_MASK(32, 32);
+ }
break;
- case INDEX_op_bswap64_i64:
+ case INDEX_op_bswap64:
z_mask = bswap64(z_mask);
- sign = INT64_MIN;
+ o_mask = bswap64(o_mask);
break;
default:
g_assert_not_reached();
}
- s_mask = smask_from_zmask(z_mask);
-
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
- case TCG_BSWAP_OZ:
- break;
- case TCG_BSWAP_OS:
- /* If the sign bit may be 1, force all the bits above to 1. */
- if (z_mask & sign) {
- z_mask |= sign;
- s_mask = sign << 1;
- }
- break;
- default:
- /* The high bits are undefined: force all bits above the sign to 1. */
- z_mask |= sign << 1;
- s_mask = 0;
- break;
- }
- ctx->z_mask = z_mask;
- ctx->s_mask = s_mask;
- return fold_masks(ctx, op);
+ return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
}
static bool fold_call(OptContext *ctx, TCGOp *op)
@@ -1540,12 +1778,44 @@ static bool fold_call(OptContext *ctx, TCGOp *op)
return true;
}
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
+{
+ /* Canonicalize the comparison to put immediate second. */
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
+ op->args[3] = tcg_swap_cond(op->args[3]);
+ }
+ return finish_folding(ctx, op);
+}
+
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
+{
+ /* If true and false values are the same, eliminate the cmp. */
+ if (args_are_copies(op->args[3], op->args[4])) {
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
+ }
+
+ /* Canonicalize the comparison to put immediate second. */
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
+ op->args[5] = tcg_swap_cond(op->args[5]);
+ }
+ /*
+ * Canonicalize the "false" input reg to match the destination,
+ * so that the tcg backend can implement "move if true".
+ */
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
+ op->args[5] = tcg_invert_cond(op->args[5]);
+ }
+ return finish_folding(ctx, op);
+}
+
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
{
- uint64_t z_mask;
+ uint64_t z_mask, s_mask;
+ TempOptInfo *t1 = arg_info(op->args[1]);
+ TempOptInfo *t2 = arg_info(op->args[2]);
- if (arg_is_const(op->args[1])) {
- uint64_t t = arg_info(op->args[1])->val;
+ if (ti_is_const(t1)) {
+ uint64_t t = ti_const_val(t1);
if (t != 0) {
t = do_constant_folding(op->opc, ctx->type, t, 0);
@@ -1564,79 +1834,79 @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
default:
g_assert_not_reached();
}
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
- return false;
+ s_mask = ~z_mask;
+ z_mask |= t2->z_mask;
+ s_mask &= t2->s_mask;
+
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
}
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
{
+ uint64_t z_mask;
+
if (fold_const1(ctx, op)) {
return true;
}
switch (ctx->type) {
case TCG_TYPE_I32:
- ctx->z_mask = 32 | 31;
+ z_mask = 32 | 31;
break;
case TCG_TYPE_I64:
- ctx->z_mask = 64 | 63;
+ z_mask = 64 | 63;
break;
default:
g_assert_not_reached();
}
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
- return false;
+ return fold_masks_z(ctx, op, z_mask);
}
static bool fold_deposit(OptContext *ctx, TCGOp *op)
{
- TCGOpcode and_opc;
+ TempOptInfo *t1 = arg_info(op->args[1]);
+ TempOptInfo *t2 = arg_info(op->args[2]);
+ int ofs = op->args[3];
+ int len = op->args[4];
+ int width = 8 * tcg_type_size(ctx->type);
+ uint64_t z_mask, o_mask, s_mask;
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- uint64_t t1 = arg_info(op->args[1])->val;
- uint64_t t2 = arg_info(op->args[2])->val;
-
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
- }
-
- switch (ctx->type) {
- case TCG_TYPE_I32:
- and_opc = INDEX_op_and_i32;
- break;
- case TCG_TYPE_I64:
- and_opc = INDEX_op_and_i64;
- break;
- default:
- g_assert_not_reached();
+ if (ti_is_const(t1) && ti_is_const(t2)) {
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
+ deposit64(ti_const_val(t1), ofs, len,
+ ti_const_val(t2)));
}
/* Inserting a value into zero at offset 0. */
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
- op->opc = and_opc;
+ op->opc = INDEX_op_and;
op->args[1] = op->args[2];
op->args[2] = arg_new_constant(ctx, mask);
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
- return false;
+ return fold_and(ctx, op);
}
/* Inserting zero into a value. */
- if (arg_is_const_val(op->args[2], 0)) {
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
+ if (ti_is_const_val(t2, 0)) {
+ uint64_t mask = deposit64(-1, ofs, len, 0);
- op->opc = and_opc;
+ op->opc = INDEX_op_and;
op->args[2] = arg_new_constant(ctx, mask);
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
- return false;
+ return fold_and(ctx, op);
}
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
- op->args[3], op->args[4],
- arg_info(op->args[2])->z_mask);
- return false;
+ /* The s_mask from the top portion of the deposit is still valid. */
+ if (ofs + len == width) {
+ s_mask = t2->s_mask << ofs;
+ } else {
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
+ }
+
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
+ o_mask = deposit64(t1->o_mask, ofs, len, t2->o_mask);
+
+ return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
}
static bool fold_divide(OptContext *ctx, TCGOp *op)
@@ -1645,24 +1915,24 @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
fold_xi_to_x(ctx, op, 1)) {
return true;
}
- return false;
+ return finish_folding(ctx, op);
}
static bool fold_dup(OptContext *ctx, TCGOp *op)
{
if (arg_is_const(op->args[1])) {
- uint64_t t = arg_info(op->args[1])->val;
+ uint64_t t = arg_const_val(op->args[1]);
t = dup_const(TCGOP_VECE(op), t);
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
}
- return false;
+ return finish_folding(ctx, op);
}
static bool fold_dup2(OptContext *ctx, TCGOp *op)
{
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
- arg_info(op->args[2])->val);
+ uint64_t t = deposit64(arg_const_val(op->args[1]), 32, 32,
+ arg_const_val(op->args[2]));
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
}
@@ -1670,152 +1940,144 @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
op->opc = INDEX_op_dup_vec;
TCGOP_VECE(op) = MO_32;
}
- return false;
+ return finish_folding(ctx, op);
}
static bool fold_eqv(OptContext *ctx, TCGOp *op)
{
- if (fold_const2_commutative(ctx, op) ||
- fold_xi_to_x(ctx, op, -1) ||
- fold_xi_to_not(ctx, op, 0)) {
+ uint64_t z_mask, o_mask, s_mask;
+ TempOptInfo *t1, *t2;
+
+ if (fold_const2_commutative(ctx, op)) {
return true;
}
- ctx->s_mask = arg_info(op->args[1])->s_mask
- & arg_info(op->args[2])->s_mask;
- return false;
+ t2 = arg_info(op->args[2]);
+ if (ti_is_const(t2)) {
+ /* Fold eqv r,x,i to xor r,x,~i. */
+ switch (ctx->type) {
+ case TCG_TYPE_I32:
+ case TCG_TYPE_I64:
+ op->opc = INDEX_op_xor;
+ break;
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ op->opc = INDEX_op_xor_vec;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
+ return fold_xor(ctx, op);
+ }
+
+ t1 = arg_info(op->args[1]);
+
+ z_mask = (t1->z_mask | ~t2->o_mask) & (t2->z_mask | ~t1->o_mask);
+ o_mask = ~(t1->z_mask | t2->z_mask) | (t1->o_mask & t2->o_mask);
+ s_mask = t1->s_mask & t2->s_mask;
+
+ return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
}
static bool fold_extract(OptContext *ctx, TCGOp *op)
{
- uint64_t z_mask_old, z_mask;
+ uint64_t z_mask, o_mask, a_mask;
+ TempOptInfo *t1 = arg_info(op->args[1]);
int pos = op->args[2];
int len = op->args[3];
- if (arg_is_const(op->args[1])) {
- uint64_t t;
-
- t = arg_info(op->args[1])->val;
- t = extract64(t, pos, len);
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+ if (ti_is_const(t1)) {
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
+ extract64(ti_const_val(t1), pos, len));
}
- z_mask_old = arg_info(op->args[1])->z_mask;
- z_mask = extract64(z_mask_old, pos, len);
- if (pos == 0) {
- ctx->a_mask = z_mask_old ^ z_mask;
- }
- ctx->z_mask = z_mask;
- ctx->s_mask = smask_from_zmask(z_mask);
+ z_mask = extract64(t1->z_mask, pos, len);
+ o_mask = extract64(t1->o_mask, pos, len);
+ a_mask = pos ? -1 : t1->z_mask ^ z_mask;
- return fold_masks(ctx, op);
+ return fold_masks_zosa(ctx, op, z_mask, o_mask, 0, a_mask);
}
static bool fold_extract2(OptContext *ctx, TCGOp *op)
{
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- uint64_t v1 = arg_info(op->args[1])->val;
- uint64_t v2 = arg_info(op->args[2])->val;
- int shr = op->args[3];
+ TempOptInfo *t1 = arg_info(op->args[1]);
+ TempOptInfo *t2 = arg_info(op->args[2]);
+ uint64_t z1 = t1->z_mask;
+ uint64_t z2 = t2->z_mask;
+ uint64_t o1 = t1->o_mask;
+ uint64_t o2 = t2->o_mask;
+ int shr = op->args[3];
- if (op->opc == INDEX_op_extract2_i64) {
- v1 >>= shr;
- v2 <<= 64 - shr;
- } else {
- v1 = (uint32_t)v1 >> shr;
- v2 = (uint64_t)((int32_t)v2 << (32 - shr));
- }
- return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
+ if (ctx->type == TCG_TYPE_I32) {
+ z1 = (uint32_t)z1 >> shr;
+ o1 = (uint32_t)o1 >> shr;
+ z2 = (uint64_t)((int32_t)z2 << (32 - shr));
+ o2 = (uint64_t)((int32_t)o2 << (32 - shr));
+ } else {
+ z1 >>= shr;
+ o1 >>= shr;
+ z2 <<= 64 - shr;
+ o2 <<= 64 - shr;
}
- return false;
+
+ return fold_masks_zo(ctx, op, z1 | z2, o1 | o2);
}
static bool fold_exts(OptContext *ctx, TCGOp *op)
{
- uint64_t s_mask_old, s_mask, z_mask, sign;
- bool type_change = false;
+ uint64_t z_mask, o_mask, s_mask;
+ TempOptInfo *t1;
if (fold_const1(ctx, op)) {
return true;
}
- z_mask = arg_info(op->args[1])->z_mask;
- s_mask = arg_info(op->args[1])->s_mask;
- s_mask_old = s_mask;
+ t1 = arg_info(op->args[1]);
+ z_mask = t1->z_mask;
+ o_mask = t1->o_mask;
+ s_mask = t1->s_mask;
switch (op->opc) {
- CASE_OP_32_64(ext8s):
- sign = INT8_MIN;
- z_mask = (uint8_t)z_mask;
- break;
- CASE_OP_32_64(ext16s):
- sign = INT16_MIN;
- z_mask = (uint16_t)z_mask;
- break;
case INDEX_op_ext_i32_i64:
- type_change = true;
- QEMU_FALLTHROUGH;
- case INDEX_op_ext32s_i64:
- sign = INT32_MIN;
- z_mask = (uint32_t)z_mask;
+ s_mask |= INT32_MIN;
+ z_mask = (int32_t)z_mask;
+ o_mask = (int32_t)o_mask;
break;
default:
g_assert_not_reached();
}
-
- if (z_mask & sign) {
- z_mask |= sign;
- }
- s_mask |= sign << 1;
-
- ctx->z_mask = z_mask;
- ctx->s_mask = s_mask;
- if (!type_change) {
- ctx->a_mask = s_mask & ~s_mask_old;
- }
-
- return fold_masks(ctx, op);
+ return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
}
static bool fold_extu(OptContext *ctx, TCGOp *op)
{
- uint64_t z_mask_old, z_mask;
- bool type_change = false;
+ uint64_t z_mask, o_mask;
+ TempOptInfo *t1;
if (fold_const1(ctx, op)) {
return true;
}
- z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
+ t1 = arg_info(op->args[1]);
+ z_mask = t1->z_mask;
+ o_mask = t1->o_mask;
switch (op->opc) {
- CASE_OP_32_64(ext8u):
- z_mask = (uint8_t)z_mask;
- break;
- CASE_OP_32_64(ext16u):
- z_mask = (uint16_t)z_mask;
- break;
case INDEX_op_extrl_i64_i32:
case INDEX_op_extu_i32_i64:
- type_change = true;
- QEMU_FALLTHROUGH;
- case INDEX_op_ext32u_i64:
z_mask = (uint32_t)z_mask;
+ o_mask = (uint32_t)o_mask;
break;
case INDEX_op_extrh_i64_i32:
- type_change = true;
z_mask >>= 32;
+ o_mask >>= 32;
break;
default:
g_assert_not_reached();
}
-
- ctx->z_mask = z_mask;
- ctx->s_mask = smask_from_zmask(z_mask);
- if (!type_change) {
- ctx->a_mask = z_mask_old ^ z_mask;
- }
- return fold_masks(ctx, op);
+ return fold_masks_zo(ctx, op, z_mask, o_mask);
}
static bool fold_mb(OptContext *ctx, TCGOp *op)
@@ -1849,8 +2111,15 @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
static bool fold_movcond(OptContext *ctx, TCGOp *op)
{
+ uint64_t z_mask, o_mask, s_mask;
+ TempOptInfo *tt, *ft;
int i;
+ /* If true and false values are the same, eliminate the cmp. */
+ if (args_are_copies(op->args[3], op->args[4])) {
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
+ }
+
/*
* Canonicalize the "false" input reg to match the destination reg so
* that the tcg backend can implement a "move if true" operation.
@@ -1865,53 +2134,33 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
}
- ctx->z_mask = arg_info(op->args[3])->z_mask
- | arg_info(op->args[4])->z_mask;
- ctx->s_mask = arg_info(op->args[3])->s_mask
- & arg_info(op->args[4])->s_mask;
+ tt = arg_info(op->args[3]);
+ ft = arg_info(op->args[4]);
+ z_mask = tt->z_mask | ft->z_mask;
+ o_mask = tt->o_mask & ft->o_mask;
+ s_mask = tt->s_mask & ft->s_mask;
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
- uint64_t tv = arg_info(op->args[3])->val;
- uint64_t fv = arg_info(op->args[4])->val;
- TCGOpcode opc, negopc = 0;
+ if (ti_is_const(tt) && ti_is_const(ft)) {
+ uint64_t tv = ti_const_val(tt);
+ uint64_t fv = ti_const_val(ft);
TCGCond cond = op->args[5];
- switch (ctx->type) {
- case TCG_TYPE_I32:
- opc = INDEX_op_setcond_i32;
- if (TCG_TARGET_HAS_negsetcond_i32) {
- negopc = INDEX_op_negsetcond_i32;
- }
- tv = (int32_t)tv;
- fv = (int32_t)fv;
- break;
- case TCG_TYPE_I64:
- opc = INDEX_op_setcond_i64;
- if (TCG_TARGET_HAS_negsetcond_i64) {
- negopc = INDEX_op_negsetcond_i64;
- }
- break;
- default:
- g_assert_not_reached();
- }
-
if (tv == 1 && fv == 0) {
- op->opc = opc;
+ op->opc = INDEX_op_setcond;
op->args[3] = cond;
} else if (fv == 1 && tv == 0) {
- op->opc = opc;
+ op->opc = INDEX_op_setcond;
+ op->args[3] = tcg_invert_cond(cond);
+ } else if (tv == -1 && fv == 0) {
+ op->opc = INDEX_op_negsetcond;
+ op->args[3] = cond;
+ } else if (fv == -1 && tv == 0) {
+ op->opc = INDEX_op_negsetcond;
op->args[3] = tcg_invert_cond(cond);
- } else if (negopc) {
- if (tv == -1 && fv == 0) {
- op->opc = negopc;
- op->args[3] = cond;
- } else if (fv == -1 && tv == 0) {
- op->opc = negopc;
- op->args[3] = tcg_invert_cond(cond);
- }
}
}
- return false;
+
+ return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
}
static bool fold_mul(OptContext *ctx, TCGOp *op)
@@ -1921,7 +2170,7 @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
fold_xi_to_x(ctx, op, 1)) {
return true;
}
- return false;
+ return finish_folding(ctx, op);
}
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
@@ -1930,7 +2179,7 @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
fold_xi_to_i(ctx, op, 0)) {
return true;
}
- return false;
+ return finish_folding(ctx, op);
}
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
@@ -1938,28 +2187,30 @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
swap_commutative(op->args[0], &op->args[2], &op->args[3]);
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
- uint64_t a = arg_info(op->args[2])->val;
- uint64_t b = arg_info(op->args[3])->val;
+ uint64_t a = arg_const_val(op->args[2]);
+ uint64_t b = arg_const_val(op->args[3]);
uint64_t h, l;
TCGArg rl, rh;
TCGOp *op2;
switch (op->opc) {
- case INDEX_op_mulu2_i32:
- l = (uint64_t)(uint32_t)a * (uint32_t)b;
- h = (int32_t)(l >> 32);
- l = (int32_t)l;
- break;
- case INDEX_op_muls2_i32:
- l = (int64_t)(int32_t)a * (int32_t)b;
- h = l >> 32;
- l = (int32_t)l;
- break;
- case INDEX_op_mulu2_i64:
- mulu64(&l, &h, a, b);
+ case INDEX_op_mulu2:
+ if (ctx->type == TCG_TYPE_I32) {
+ l = (uint64_t)(uint32_t)a * (uint32_t)b;
+ h = (int32_t)(l >> 32);
+ l = (int32_t)l;
+ } else {
+ mulu64(&l, &h, a, b);
+ }
break;
- case INDEX_op_muls2_i64:
- muls64(&l, &h, a, b);
+ case INDEX_op_muls2:
+ if (ctx->type == TCG_TYPE_I32) {
+ l = (int64_t)(int32_t)a * (int32_t)b;
+ h = l >> 32;
+ l = (int32_t)l;
+ } else {
+ muls64(&l, &h, a, b);
+ }
break;
default:
g_assert_not_reached();
@@ -1969,39 +2220,42 @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
rh = op->args[1];
/* The proper opcode is supplied by tcg_opt_gen_mov. */
- op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2);
+ op2 = opt_insert_before(ctx, op, 0, 2);
tcg_opt_gen_movi(ctx, op, rl, l);
tcg_opt_gen_movi(ctx, op2, rh, h);
return true;
}
- return false;
+ return finish_folding(ctx, op);
}
static bool fold_nand(OptContext *ctx, TCGOp *op)
{
+ uint64_t z_mask, o_mask, s_mask;
+ TempOptInfo *t1, *t2;
+
if (fold_const2_commutative(ctx, op) ||
fold_xi_to_not(ctx, op, -1)) {
return true;
}
- ctx->s_mask = arg_info(op->args[1])->s_mask
- & arg_info(op->args[2])->s_mask;
- return false;
+ t1 = arg_info(op->args[1]);
+ t2 = arg_info(op->args[2]);
+
+ z_mask = ~(t1->o_mask & t2->o_mask);
+ o_mask = ~(t1->z_mask & t2->z_mask);
+ s_mask = t1->s_mask & t2->s_mask;
+
+ return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
}
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
{
/* Set to 1 all bits to the left of the rightmost. */
uint64_t z_mask = arg_info(op->args[1])->z_mask;
- ctx->z_mask = -(z_mask & -z_mask);
+ z_mask = -(z_mask & -z_mask);
- /*
- * Because of fold_sub_to_neg, we want to always return true,
- * via finish_folding.
- */
- finish_folding(ctx, op);
- return true;
+ return fold_masks_z(ctx, op, z_mask);
}
static bool fold_neg(OptContext *ctx, TCGOp *op)
@@ -2011,83 +2265,138 @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
static bool fold_nor(OptContext *ctx, TCGOp *op)
{
+ uint64_t z_mask, o_mask, s_mask;
+ TempOptInfo *t1, *t2;
+
if (fold_const2_commutative(ctx, op) ||
fold_xi_to_not(ctx, op, 0)) {
return true;
}
- ctx->s_mask = arg_info(op->args[1])->s_mask
- & arg_info(op->args[2])->s_mask;
- return false;
+ t1 = arg_info(op->args[1]);
+ t2 = arg_info(op->args[2]);
+
+ z_mask = ~(t1->o_mask | t2->o_mask);
+ o_mask = ~(t1->z_mask | t2->z_mask);
+ s_mask = t1->s_mask & t2->s_mask;
+
+ return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
}
static bool fold_not(OptContext *ctx, TCGOp *op)
{
+ TempOptInfo *t1;
+
if (fold_const1(ctx, op)) {
return true;
}
- ctx->s_mask = arg_info(op->args[1])->s_mask;
-
- /* Because of fold_to_not, we want to always return true, via finish. */
- finish_folding(ctx, op);
- return true;
+ t1 = arg_info(op->args[1]);
+ return fold_masks_zos(ctx, op, ~t1->o_mask, ~t1->z_mask, t1->s_mask);
}
static bool fold_or(OptContext *ctx, TCGOp *op)
{
+ uint64_t z_mask, o_mask, s_mask, a_mask;
+ TempOptInfo *t1, *t2;
+
if (fold_const2_commutative(ctx, op) ||
fold_xi_to_x(ctx, op, 0) ||
fold_xx_to_x(ctx, op)) {
return true;
}
- ctx->z_mask = arg_info(op->args[1])->z_mask
- | arg_info(op->args[2])->z_mask;
- ctx->s_mask = arg_info(op->args[1])->s_mask
- & arg_info(op->args[2])->s_mask;
- return fold_masks(ctx, op);
+ t1 = arg_info(op->args[1]);
+ t2 = arg_info(op->args[2]);
+
+ z_mask = t1->z_mask | t2->z_mask;
+ o_mask = t1->o_mask | t2->o_mask;
+ s_mask = t1->s_mask & t2->s_mask;
+
+ /* Affected bits are those not known one, masked by those known zero. */
+ a_mask = ~t1->o_mask & t2->z_mask;
+
+ return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask);
}
static bool fold_orc(OptContext *ctx, TCGOp *op)
{
- if (fold_const2(ctx, op) ||
- fold_xx_to_i(ctx, op, -1) ||
- fold_xi_to_x(ctx, op, -1) ||
+ uint64_t z_mask, o_mask, s_mask, a_mask;
+ TempOptInfo *t1, *t2;
+
+ if (fold_const2(ctx, op)) {
+ return true;
+ }
+
+ t2 = arg_info(op->args[2]);
+ if (ti_is_const(t2)) {
+ /* Fold orc r,x,i to or r,x,~i. */
+ switch (ctx->type) {
+ case TCG_TYPE_I32:
+ case TCG_TYPE_I64:
+ op->opc = INDEX_op_or;
+ break;
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ op->opc = INDEX_op_or_vec;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
+ return fold_or(ctx, op);
+ }
+ if (fold_xx_to_i(ctx, op, -1) ||
fold_ix_to_not(ctx, op, 0)) {
return true;
}
+ t1 = arg_info(op->args[1]);
- ctx->s_mask = arg_info(op->args[1])->s_mask
- & arg_info(op->args[2])->s_mask;
- return false;
+ z_mask = t1->z_mask | ~t2->o_mask;
+ o_mask = t1->o_mask | ~t2->z_mask;
+ s_mask = t1->s_mask & t2->s_mask;
+
+ /* Affected bits are those not known one, masked by those known one. */
+ a_mask = ~t1->o_mask & t2->o_mask;
+
+ return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask);
}
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
{
const TCGOpDef *def = &tcg_op_defs[op->opc];
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
MemOp mop = get_memop(oi);
int width = 8 * memop_size(mop);
+ uint64_t z_mask = -1, s_mask = 0;
if (width < 64) {
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
- if (!(mop & MO_SIGN)) {
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
- ctx->s_mask <<= 1;
+ if (mop & MO_SIGN) {
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
+ } else {
+ z_mask = MAKE_64BIT_MASK(0, width);
}
}
/* Opcodes that touch guest memory stop the mb optimization. */
ctx->prev_mb = NULL;
- return false;
+
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
+}
+
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
+{
+ /* Opcodes that touch guest memory stop the mb optimization. */
+ ctx->prev_mb = NULL;
+ return finish_folding(ctx, op);
}
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
{
/* Opcodes that touch guest memory stop the mb optimization. */
ctx->prev_mb = NULL;
- return false;
+ return true;
}
static bool fold_remainder(OptContext *ctx, TCGOp *op)
@@ -2096,10 +2405,11 @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
fold_xx_to_i(ctx, op, 0)) {
return true;
}
- return false;
+ return finish_folding(ctx, op);
}
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
{
uint64_t a_zmask, b_val;
TCGCond cond;
@@ -2109,7 +2419,7 @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
}
a_zmask = arg_info(op->args[1])->z_mask;
- b_val = arg_info(op->args[2])->val;
+ b_val = arg_const_val(op->args[2]);
cond = op->args[3];
if (ctx->type == TCG_TYPE_I32) {
@@ -2164,47 +2474,27 @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
break;
}
if (convert) {
- TCGOpcode add_opc, xor_opc, neg_opc;
-
if (!inv && !neg) {
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
}
- switch (ctx->type) {
- case TCG_TYPE_I32:
- add_opc = INDEX_op_add_i32;
- neg_opc = INDEX_op_neg_i32;
- xor_opc = INDEX_op_xor_i32;
- break;
- case TCG_TYPE_I64:
- add_opc = INDEX_op_add_i64;
- neg_opc = INDEX_op_neg_i64;
- xor_opc = INDEX_op_xor_i64;
- break;
- default:
- g_assert_not_reached();
- }
-
if (!inv) {
- op->opc = neg_opc;
+ op->opc = INDEX_op_neg;
} else if (neg) {
- op->opc = add_opc;
+ op->opc = INDEX_op_add;
op->args[2] = arg_new_constant(ctx, -1);
} else {
- op->opc = xor_opc;
+ op->opc = INDEX_op_xor;
op->args[2] = arg_new_constant(ctx, 1);
}
- return false;
+ return -1;
}
}
-
- return false;
+ return 0;
}
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
{
- TCGOpcode and_opc, sub_opc, xor_opc, neg_opc, shr_opc;
- TCGOpcode uext_opc = 0, sext_opc = 0;
TCGCond cond = op->args[3];
TCGArg ret, src1, src2;
TCGOp *op2;
@@ -2217,79 +2507,52 @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
}
src2 = op->args[2];
- val = arg_info(src2)->val;
+ val = arg_const_val(src2);
if (!is_power_of_2(val)) {
return;
}
sh = ctz64(val);
- switch (ctx->type) {
- case TCG_TYPE_I32:
- and_opc = INDEX_op_and_i32;
- sub_opc = INDEX_op_sub_i32;
- xor_opc = INDEX_op_xor_i32;
- shr_opc = INDEX_op_shr_i32;
- neg_opc = INDEX_op_neg_i32;
- if (TCG_TARGET_extract_i32_valid(sh, 1)) {
- uext_opc = TCG_TARGET_HAS_extract_i32 ? INDEX_op_extract_i32 : 0;
- sext_opc = TCG_TARGET_HAS_sextract_i32 ? INDEX_op_sextract_i32 : 0;
- }
- break;
- case TCG_TYPE_I64:
- and_opc = INDEX_op_and_i64;
- sub_opc = INDEX_op_sub_i64;
- xor_opc = INDEX_op_xor_i64;
- shr_opc = INDEX_op_shr_i64;
- neg_opc = INDEX_op_neg_i64;
- if (TCG_TARGET_extract_i64_valid(sh, 1)) {
- uext_opc = TCG_TARGET_HAS_extract_i64 ? INDEX_op_extract_i64 : 0;
- sext_opc = TCG_TARGET_HAS_sextract_i64 ? INDEX_op_sextract_i64 : 0;
- }
- break;
- default:
- g_assert_not_reached();
- }
-
ret = op->args[0];
src1 = op->args[1];
inv = cond == TCG_COND_TSTEQ;
- if (sh && sext_opc && neg && !inv) {
- op->opc = sext_opc;
+ if (sh && neg && !inv && TCG_TARGET_sextract_valid(ctx->type, sh, 1)) {
+ op->opc = INDEX_op_sextract;
op->args[1] = src1;
op->args[2] = sh;
op->args[3] = 1;
return;
- } else if (sh && uext_opc) {
- op->opc = uext_opc;
+ } else if (sh && TCG_TARGET_extract_valid(ctx->type, sh, 1)) {
+ op->opc = INDEX_op_extract;
op->args[1] = src1;
op->args[2] = sh;
op->args[3] = 1;
} else {
if (sh) {
- op2 = tcg_op_insert_before(ctx->tcg, op, shr_opc, 3);
+ op2 = opt_insert_before(ctx, op, INDEX_op_shr, 3);
op2->args[0] = ret;
op2->args[1] = src1;
op2->args[2] = arg_new_constant(ctx, sh);
src1 = ret;
}
- op->opc = and_opc;
+ op->opc = INDEX_op_and;
op->args[1] = src1;
op->args[2] = arg_new_constant(ctx, 1);
}
if (neg && inv) {
- op2 = tcg_op_insert_after(ctx->tcg, op, sub_opc, 3);
+ op2 = opt_insert_after(ctx, op, INDEX_op_add, 3);
op2->args[0] = ret;
op2->args[1] = ret;
- op2->args[2] = arg_new_constant(ctx, 1);
+ op2->args[2] = arg_new_constant(ctx, -1);
} else if (inv) {
- op2 = tcg_op_insert_after(ctx->tcg, op, xor_opc, 3);
+ op2 = opt_insert_after(ctx, op, INDEX_op_xor, 3);
op2->args[0] = ret;
op2->args[1] = ret;
op2->args[2] = arg_new_constant(ctx, 1);
} else if (neg) {
- op2 = tcg_op_insert_after(ctx->tcg, op, neg_opc, 2);
+ op2 = opt_insert_after(ctx, op, INDEX_op_neg, 2);
op2->args[0] = ret;
op2->args[1] = ret;
}
@@ -2303,14 +2566,15 @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
}
- if (fold_setcond_zmask(ctx, op, false)) {
+ i = fold_setcond_zmask(ctx, op, false);
+ if (i > 0) {
return true;
}
- fold_setcond_tst_pow2(ctx, op, false);
+ if (i == 0) {
+ fold_setcond_tst_pow2(ctx, op, false);
+ }
- ctx->z_mask = 1;
- ctx->s_mask = smask_from_zmask(1);
- return false;
+ return fold_masks_z(ctx, op, 1);
}
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
@@ -2321,14 +2585,16 @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
}
- if (fold_setcond_zmask(ctx, op, true)) {
+ i = fold_setcond_zmask(ctx, op, true);
+ if (i > 0) {
return true;
}
- fold_setcond_tst_pow2(ctx, op, true);
+ if (i == 0) {
+ fold_setcond_tst_pow2(ctx, op, true);
+ }
/* Value is {0,-1} so all bits are repetitions of the sign. */
- ctx->s_mask = -1;
- return false;
+ return fold_masks_s(ctx, op, -1);
}
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
@@ -2398,20 +2664,18 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
do_setcond_low:
op->args[2] = op->args[3];
op->args[3] = cond;
- op->opc = INDEX_op_setcond_i32;
+ op->opc = INDEX_op_setcond;
return fold_setcond(ctx, op);
do_setcond_high:
op->args[1] = op->args[2];
op->args[2] = op->args[4];
op->args[3] = cond;
- op->opc = INDEX_op_setcond_i32;
+ op->opc = INDEX_op_setcond;
return fold_setcond(ctx, op);
}
- ctx->z_mask = 1;
- ctx->s_mask = smask_from_zmask(1);
- return false;
+ return fold_masks_z(ctx, op, 1);
do_setcond_const:
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
@@ -2419,37 +2683,30 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
static bool fold_sextract(OptContext *ctx, TCGOp *op)
{
- uint64_t z_mask, s_mask, s_mask_old;
+ uint64_t z_mask, o_mask, s_mask, a_mask;
+ TempOptInfo *t1 = arg_info(op->args[1]);
int pos = op->args[2];
int len = op->args[3];
- if (arg_is_const(op->args[1])) {
- uint64_t t;
-
- t = arg_info(op->args[1])->val;
- t = sextract64(t, pos, len);
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+ if (ti_is_const(t1)) {
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
+ sextract64(ti_const_val(t1), pos, len));
}
- z_mask = arg_info(op->args[1])->z_mask;
- z_mask = sextract64(z_mask, pos, len);
- ctx->z_mask = z_mask;
-
- s_mask_old = arg_info(op->args[1])->s_mask;
- s_mask = sextract64(s_mask_old, pos, len);
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
- ctx->s_mask = s_mask;
+ s_mask = t1->s_mask >> pos;
+ s_mask |= -1ull << (len - 1);
+ a_mask = pos ? -1 : s_mask & ~t1->s_mask;
- if (pos == 0) {
- ctx->a_mask = s_mask & ~s_mask_old;
- }
+ z_mask = sextract64(t1->z_mask, pos, len);
+ o_mask = sextract64(t1->o_mask, pos, len);
- return fold_masks(ctx, op);
+ return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask);
}
static bool fold_shift(OptContext *ctx, TCGOp *op)
{
- uint64_t s_mask, z_mask, sign;
+ uint64_t s_mask, z_mask, o_mask;
+ TempOptInfo *t1, *t2;
if (fold_const2(ctx, op) ||
fold_ix_to_i(ctx, op, 0) ||
@@ -2457,43 +2714,43 @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
return true;
}
- s_mask = arg_info(op->args[1])->s_mask;
- z_mask = arg_info(op->args[1])->z_mask;
-
- if (arg_is_const(op->args[2])) {
- int sh = arg_info(op->args[2])->val;
+ t1 = arg_info(op->args[1]);
+ t2 = arg_info(op->args[2]);
+ s_mask = t1->s_mask;
+ z_mask = t1->z_mask;
+ o_mask = t1->o_mask;
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
+ if (ti_is_const(t2)) {
+ int sh = ti_const_val(t2);
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
+ o_mask = do_constant_folding(op->opc, ctx->type, o_mask, sh);
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
- ctx->s_mask = smask_from_smask(s_mask);
- return fold_masks(ctx, op);
+ return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
}
switch (op->opc) {
- CASE_OP_32_64(sar):
+ case INDEX_op_sar:
/*
* Arithmetic right shift will not reduce the number of
* input sign repetitions.
*/
- ctx->s_mask = s_mask;
- break;
- CASE_OP_32_64(shr):
+ return fold_masks_s(ctx, op, s_mask);
+ case INDEX_op_shr:
/*
* If the sign bit is known zero, then logical right shift
- * will not reduced the number of input sign repetitions.
+ * will not reduce the number of input sign repetitions.
*/
- sign = (s_mask & -s_mask) >> 1;
- if (sign && !(z_mask & sign)) {
- ctx->s_mask = s_mask;
+ if (~z_mask & -s_mask) {
+ return fold_masks_s(ctx, op, s_mask);
}
break;
default:
break;
}
- return false;
+ return finish_folding(ctx, op);
}
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
@@ -2501,17 +2758,14 @@ static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
TCGOpcode neg_op;
bool have_neg;
- if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
+ if (!arg_is_const_val(op->args[1], 0)) {
return false;
}
switch (ctx->type) {
case TCG_TYPE_I32:
- neg_op = INDEX_op_neg_i32;
- have_neg = true;
- break;
case TCG_TYPE_I64:
- neg_op = INDEX_op_neg_i64;
+ neg_op = INDEX_op_neg;
have_neg = true;
break;
case TCG_TYPE_V64:
@@ -2540,60 +2794,195 @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
fold_sub_to_neg(ctx, op)) {
return true;
}
- return false;
+ return finish_folding(ctx, op);
}
static bool fold_sub(OptContext *ctx, TCGOp *op)
{
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
+ if (fold_const2(ctx, op) ||
+ fold_xx_to_i(ctx, op, 0) ||
+ fold_xi_to_x(ctx, op, 0) ||
+ fold_sub_to_neg(ctx, op)) {
return true;
}
/* Fold sub r,x,i to add r,x,-i */
if (arg_is_const(op->args[2])) {
- uint64_t val = arg_info(op->args[2])->val;
+ uint64_t val = arg_const_val(op->args[2]);
- op->opc = (ctx->type == TCG_TYPE_I32
- ? INDEX_op_add_i32 : INDEX_op_add_i64);
+ op->opc = INDEX_op_add;
op->args[2] = arg_new_constant(ctx, -val);
}
- return false;
+ return finish_folding(ctx, op);
+}
+
+static void squash_prev_borrowout(OptContext *ctx, TCGOp *op)
+{
+ TempOptInfo *t2;
+
+ op = QTAILQ_PREV(op, link);
+ switch (op->opc) {
+ case INDEX_op_subbo:
+ op->opc = INDEX_op_sub;
+ fold_sub(ctx, op);
+ break;
+ case INDEX_op_subbio:
+ op->opc = INDEX_op_subbi;
+ break;
+ case INDEX_op_subb1o:
+ t2 = arg_info(op->args[2]);
+ if (ti_is_const(t2)) {
+ op->opc = INDEX_op_add;
+ op->args[2] = arg_new_constant(ctx, -(ti_const_val(t2) + 1));
+ /* Perform other constant folding, if needed. */
+ fold_add(ctx, op);
+ } else {
+ TCGArg ret = op->args[0];
+ op->opc = INDEX_op_sub;
+ op = opt_insert_after(ctx, op, INDEX_op_add, 3);
+ op->args[0] = ret;
+ op->args[1] = ret;
+ op->args[2] = arg_new_constant(ctx, -1);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
-static bool fold_sub2(OptContext *ctx, TCGOp *op)
+static bool fold_subbi(OptContext *ctx, TCGOp *op)
{
- return fold_addsub2(ctx, op, false);
+ TempOptInfo *t2;
+ int borrow_in = ctx->carry_state;
+
+ if (borrow_in < 0) {
+ return finish_folding(ctx, op);
+ }
+ ctx->carry_state = -1;
+
+ squash_prev_borrowout(ctx, op);
+ if (borrow_in == 0) {
+ op->opc = INDEX_op_sub;
+ return fold_sub(ctx, op);
+ }
+
+ /*
+ * Propagate the known carry-in into any constant, then negate to
+ * transform from sub to add. If there is no constant, emit a
+ * separate add -1.
+ */
+ t2 = arg_info(op->args[2]);
+ if (ti_is_const(t2)) {
+ op->args[2] = arg_new_constant(ctx, -(ti_const_val(t2) + 1));
+ } else {
+ TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_sub, 3);
+
+ op2->args[0] = op->args[0];
+ op2->args[1] = op->args[1];
+ op2->args[2] = op->args[2];
+ fold_sub(ctx, op2);
+
+ op->args[1] = op->args[0];
+ op->args[2] = arg_new_constant(ctx, -1);
+ }
+ op->opc = INDEX_op_add;
+ return fold_add(ctx, op);
+}
+
+static bool fold_subbio(OptContext *ctx, TCGOp *op)
+{
+ TempOptInfo *t1, *t2;
+ int borrow_out = -1;
+
+ if (ctx->carry_state < 0) {
+ return finish_folding(ctx, op);
+ }
+
+ squash_prev_borrowout(ctx, op);
+ if (ctx->carry_state == 0) {
+ goto do_subbo;
+ }
+
+ t1 = arg_info(op->args[1]);
+ t2 = arg_info(op->args[2]);
+
+ /* Propagate the known borrow-in into a constant, if possible. */
+ if (ti_is_const(t2)) {
+ uint64_t max = ctx->type == TCG_TYPE_I32 ? UINT32_MAX : UINT64_MAX;
+ uint64_t v = ti_const_val(t2) & max;
+
+ if (v < max) {
+ op->args[2] = arg_new_constant(ctx, v + 1);
+ goto do_subbo;
+ }
+ /* subtracting max + 1 produces known borrow out. */
+ borrow_out = 1;
+ }
+ if (ti_is_const(t1)) {
+ uint64_t v = ti_const_val(t1);
+ if (v != 0) {
+ op->args[2] = arg_new_constant(ctx, v - 1);
+ goto do_subbo;
+ }
+ }
+
+ /* Adjust the opcode to remember the known carry-in. */
+ op->opc = INDEX_op_subb1o;
+ ctx->carry_state = borrow_out;
+ return finish_folding(ctx, op);
+
+ do_subbo:
+ op->opc = INDEX_op_subbo;
+ return fold_subbo(ctx, op);
+}
+
+static bool fold_subbo(OptContext *ctx, TCGOp *op)
+{
+ TempOptInfo *t1 = arg_info(op->args[1]);
+ TempOptInfo *t2 = arg_info(op->args[2]);
+ int borrow_out = -1;
+
+ if (ti_is_const(t2)) {
+ uint64_t v2 = ti_const_val(t2);
+ if (v2 == 0) {
+ borrow_out = 0;
+ } else if (ti_is_const(t1)) {
+ uint64_t v1 = ti_const_val(t1);
+ borrow_out = v1 < v2;
+ }
+ }
+ ctx->carry_state = borrow_out;
+ return finish_folding(ctx, op);
}
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
{
+ uint64_t z_mask = -1, s_mask = 0;
+
/* We can't do any folding with a load, but we can record bits. */
switch (op->opc) {
- CASE_OP_32_64(ld8s):
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
+ case INDEX_op_ld8s:
+ s_mask = INT8_MIN;
break;
- CASE_OP_32_64(ld8u):
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
+ case INDEX_op_ld8u:
+ z_mask = MAKE_64BIT_MASK(0, 8);
break;
- CASE_OP_32_64(ld16s):
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
+ case INDEX_op_ld16s:
+ s_mask = INT16_MIN;
break;
- CASE_OP_32_64(ld16u):
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
+ case INDEX_op_ld16u:
+ z_mask = MAKE_64BIT_MASK(0, 16);
break;
- case INDEX_op_ld32s_i64:
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
+ case INDEX_op_ld32s:
+ s_mask = INT32_MIN;
break;
- case INDEX_op_ld32u_i64:
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
+ case INDEX_op_ld32u:
+ z_mask = MAKE_64BIT_MASK(0, 32);
break;
default:
g_assert_not_reached();
}
- return false;
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
}
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
@@ -2603,7 +2992,7 @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
TCGType type;
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
- return false;
+ return finish_folding(ctx, op);
}
type = ctx->type;
@@ -2626,23 +3015,20 @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
remove_mem_copy_all(ctx);
- return false;
+ return true;
}
switch (op->opc) {
- CASE_OP_32_64(st8):
+ case INDEX_op_st8:
lm1 = 0;
break;
- CASE_OP_32_64(st16):
+ case INDEX_op_st16:
lm1 = 1;
break;
- case INDEX_op_st32_i64:
- case INDEX_op_st_i32:
+ case INDEX_op_st32:
lm1 = 3;
break;
- case INDEX_op_st_i64:
- lm1 = 7;
- break;
+ case INDEX_op_st:
case INDEX_op_st_vec:
lm1 = tcg_type_size(ctx->type) - 1;
break;
@@ -2650,7 +3036,7 @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
g_assert_not_reached();
}
remove_mem_copy_in(ctx, ofs, ofs + lm1);
- return false;
+ return true;
}
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
@@ -2660,8 +3046,7 @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
TCGType type;
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
- fold_tcg_st(ctx, op);
- return false;
+ return fold_tcg_st(ctx, op);
}
src = arg_temp(op->args[0]);
@@ -2683,11 +3068,14 @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
last = ofs + tcg_type_size(type) - 1;
remove_mem_copy_in(ctx, ofs, last);
record_mem_copy(ctx, type, src, ofs, last);
- return false;
+ return true;
}
static bool fold_xor(OptContext *ctx, TCGOp *op)
{
+ uint64_t z_mask, o_mask, s_mask;
+ TempOptInfo *t1, *t2;
+
if (fold_const2_commutative(ctx, op) ||
fold_xx_to_i(ctx, op, 0) ||
fold_xi_to_x(ctx, op, 0) ||
@@ -2695,11 +3083,14 @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
return true;
}
- ctx->z_mask = arg_info(op->args[1])->z_mask
- | arg_info(op->args[2])->z_mask;
- ctx->s_mask = arg_info(op->args[1])->s_mask
- & arg_info(op->args[2])->s_mask;
- return fold_masks(ctx, op);
+ t1 = arg_info(op->args[1]);
+ t2 = arg_info(op->args[2]);
+
+ z_mask = (t1->z_mask | t2->z_mask) & ~(t1->o_mask & t2->o_mask);
+ o_mask = (t1->o_mask & ~t2->z_mask) | (t2->o_mask & ~t1->z_mask);
+ s_mask = t1->s_mask & t2->s_mask;
+
+ return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
}
/* Propagate constants and copies, fold constant expressions. */
@@ -2737,62 +3128,59 @@ void tcg_optimize(TCGContext *s)
copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
/* Pre-compute the type of the operation. */
- if (def->flags & TCG_OPF_VECTOR) {
- ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
- } else if (def->flags & TCG_OPF_64BIT) {
- ctx.type = TCG_TYPE_I64;
- } else {
- ctx.type = TCG_TYPE_I32;
- }
-
- /* Assume all bits affected, no bits known zero, no sign reps. */
- ctx.a_mask = -1;
- ctx.z_mask = -1;
- ctx.s_mask = 0;
+ ctx.type = TCGOP_TYPE(op);
/*
* Process each opcode.
* Sorted alphabetically by opcode as much as possible.
*/
switch (opc) {
- CASE_OP_32_64(add):
+ case INDEX_op_add:
done = fold_add(&ctx, op);
break;
case INDEX_op_add_vec:
done = fold_add_vec(&ctx, op);
break;
- CASE_OP_32_64(add2):
- done = fold_add2(&ctx, op);
+ case INDEX_op_addci:
+ done = fold_addci(&ctx, op);
+ break;
+ case INDEX_op_addcio:
+ done = fold_addcio(&ctx, op);
+ break;
+ case INDEX_op_addco:
+ done = fold_addco(&ctx, op);
break;
- CASE_OP_32_64_VEC(and):
+ case INDEX_op_and:
+ case INDEX_op_and_vec:
done = fold_and(&ctx, op);
break;
- CASE_OP_32_64_VEC(andc):
+ case INDEX_op_andc:
+ case INDEX_op_andc_vec:
done = fold_andc(&ctx, op);
break;
- CASE_OP_32_64(brcond):
+ case INDEX_op_brcond:
done = fold_brcond(&ctx, op);
break;
case INDEX_op_brcond2_i32:
done = fold_brcond2(&ctx, op);
break;
- CASE_OP_32_64(bswap16):
- CASE_OP_32_64(bswap32):
- case INDEX_op_bswap64_i64:
+ case INDEX_op_bswap16:
+ case INDEX_op_bswap32:
+ case INDEX_op_bswap64:
done = fold_bswap(&ctx, op);
break;
- CASE_OP_32_64(clz):
- CASE_OP_32_64(ctz):
+ case INDEX_op_clz:
+ case INDEX_op_ctz:
done = fold_count_zeros(&ctx, op);
break;
- CASE_OP_32_64(ctpop):
+ case INDEX_op_ctpop:
done = fold_ctpop(&ctx, op);
break;
- CASE_OP_32_64(deposit):
+ case INDEX_op_deposit:
done = fold_deposit(&ctx, op);
break;
- CASE_OP_32_64(div):
- CASE_OP_32_64(divu):
+ case INDEX_op_divs:
+ case INDEX_op_divu:
done = fold_divide(&ctx, op);
break;
case INDEX_op_dup_vec:
@@ -2801,149 +3189,162 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_dup2_vec:
done = fold_dup2(&ctx, op);
break;
- CASE_OP_32_64_VEC(eqv):
+ case INDEX_op_eqv:
+ case INDEX_op_eqv_vec:
done = fold_eqv(&ctx, op);
break;
- CASE_OP_32_64(extract):
+ case INDEX_op_extract:
done = fold_extract(&ctx, op);
break;
- CASE_OP_32_64(extract2):
+ case INDEX_op_extract2:
done = fold_extract2(&ctx, op);
break;
- CASE_OP_32_64(ext8s):
- CASE_OP_32_64(ext16s):
- case INDEX_op_ext32s_i64:
case INDEX_op_ext_i32_i64:
done = fold_exts(&ctx, op);
break;
- CASE_OP_32_64(ext8u):
- CASE_OP_32_64(ext16u):
- case INDEX_op_ext32u_i64:
case INDEX_op_extu_i32_i64:
case INDEX_op_extrl_i64_i32:
case INDEX_op_extrh_i64_i32:
done = fold_extu(&ctx, op);
break;
- CASE_OP_32_64(ld8s):
- CASE_OP_32_64(ld8u):
- CASE_OP_32_64(ld16s):
- CASE_OP_32_64(ld16u):
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld32u_i64:
+ case INDEX_op_ld8s:
+ case INDEX_op_ld8u:
+ case INDEX_op_ld16s:
+ case INDEX_op_ld16u:
+ case INDEX_op_ld32s:
+ case INDEX_op_ld32u:
done = fold_tcg_ld(&ctx, op);
break;
- case INDEX_op_ld_i32:
- case INDEX_op_ld_i64:
+ case INDEX_op_ld:
case INDEX_op_ld_vec:
done = fold_tcg_ld_memcopy(&ctx, op);
break;
- CASE_OP_32_64(st8):
- CASE_OP_32_64(st16):
- case INDEX_op_st32_i64:
+ case INDEX_op_st8:
+ case INDEX_op_st16:
+ case INDEX_op_st32:
done = fold_tcg_st(&ctx, op);
break;
- case INDEX_op_st_i32:
- case INDEX_op_st_i64:
+ case INDEX_op_st:
case INDEX_op_st_vec:
done = fold_tcg_st_memcopy(&ctx, op);
break;
case INDEX_op_mb:
done = fold_mb(&ctx, op);
break;
- CASE_OP_32_64_VEC(mov):
+ case INDEX_op_mov:
+ case INDEX_op_mov_vec:
done = fold_mov(&ctx, op);
break;
- CASE_OP_32_64(movcond):
+ case INDEX_op_movcond:
done = fold_movcond(&ctx, op);
break;
- CASE_OP_32_64(mul):
+ case INDEX_op_mul:
done = fold_mul(&ctx, op);
break;
- CASE_OP_32_64(mulsh):
- CASE_OP_32_64(muluh):
+ case INDEX_op_mulsh:
+ case INDEX_op_muluh:
done = fold_mul_highpart(&ctx, op);
break;
- CASE_OP_32_64(muls2):
- CASE_OP_32_64(mulu2):
+ case INDEX_op_muls2:
+ case INDEX_op_mulu2:
done = fold_multiply2(&ctx, op);
break;
- CASE_OP_32_64_VEC(nand):
+ case INDEX_op_nand:
+ case INDEX_op_nand_vec:
done = fold_nand(&ctx, op);
break;
- CASE_OP_32_64(neg):
+ case INDEX_op_neg:
done = fold_neg(&ctx, op);
break;
- CASE_OP_32_64_VEC(nor):
+ case INDEX_op_nor:
+ case INDEX_op_nor_vec:
done = fold_nor(&ctx, op);
break;
- CASE_OP_32_64_VEC(not):
+ case INDEX_op_not:
+ case INDEX_op_not_vec:
done = fold_not(&ctx, op);
break;
- CASE_OP_32_64_VEC(or):
+ case INDEX_op_or:
+ case INDEX_op_or_vec:
done = fold_or(&ctx, op);
break;
- CASE_OP_32_64_VEC(orc):
+ case INDEX_op_orc:
+ case INDEX_op_orc_vec:
done = fold_orc(&ctx, op);
break;
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_ld_a64_i32:
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_ld_a64_i64:
- case INDEX_op_qemu_ld_a32_i128:
- case INDEX_op_qemu_ld_a64_i128:
- done = fold_qemu_ld(&ctx, op);
- break;
- case INDEX_op_qemu_st8_a32_i32:
- case INDEX_op_qemu_st8_a64_i32:
- case INDEX_op_qemu_st_a32_i32:
- case INDEX_op_qemu_st_a64_i32:
- case INDEX_op_qemu_st_a32_i64:
- case INDEX_op_qemu_st_a64_i64:
- case INDEX_op_qemu_st_a32_i128:
- case INDEX_op_qemu_st_a64_i128:
+ case INDEX_op_qemu_ld:
+ done = fold_qemu_ld_1reg(&ctx, op);
+ break;
+ case INDEX_op_qemu_ld2:
+ done = fold_qemu_ld_2reg(&ctx, op);
+ break;
+ case INDEX_op_qemu_st:
+ case INDEX_op_qemu_st2:
done = fold_qemu_st(&ctx, op);
break;
- CASE_OP_32_64(rem):
- CASE_OP_32_64(remu):
+ case INDEX_op_rems:
+ case INDEX_op_remu:
done = fold_remainder(&ctx, op);
break;
- CASE_OP_32_64(rotl):
- CASE_OP_32_64(rotr):
- CASE_OP_32_64(sar):
- CASE_OP_32_64(shl):
- CASE_OP_32_64(shr):
+ case INDEX_op_rotl:
+ case INDEX_op_rotr:
+ case INDEX_op_sar:
+ case INDEX_op_shl:
+ case INDEX_op_shr:
done = fold_shift(&ctx, op);
break;
- CASE_OP_32_64(setcond):
+ case INDEX_op_setcond:
done = fold_setcond(&ctx, op);
break;
- CASE_OP_32_64(negsetcond):
+ case INDEX_op_negsetcond:
done = fold_negsetcond(&ctx, op);
break;
case INDEX_op_setcond2_i32:
done = fold_setcond2(&ctx, op);
break;
- CASE_OP_32_64(sextract):
+ case INDEX_op_cmp_vec:
+ done = fold_cmp_vec(&ctx, op);
+ break;
+ case INDEX_op_cmpsel_vec:
+ done = fold_cmpsel_vec(&ctx, op);
+ break;
+ case INDEX_op_bitsel_vec:
+ done = fold_bitsel_vec(&ctx, op);
+ break;
+ case INDEX_op_sextract:
done = fold_sextract(&ctx, op);
break;
- CASE_OP_32_64(sub):
+ case INDEX_op_sub:
done = fold_sub(&ctx, op);
break;
+ case INDEX_op_subbi:
+ done = fold_subbi(&ctx, op);
+ break;
+ case INDEX_op_subbio:
+ done = fold_subbio(&ctx, op);
+ break;
+ case INDEX_op_subbo:
+ done = fold_subbo(&ctx, op);
+ break;
case INDEX_op_sub_vec:
done = fold_sub_vec(&ctx, op);
break;
- CASE_OP_32_64(sub2):
- done = fold_sub2(&ctx, op);
- break;
- CASE_OP_32_64_VEC(xor):
+ case INDEX_op_xor:
+ case INDEX_op_xor_vec:
done = fold_xor(&ctx, op);
break;
+ case INDEX_op_set_label:
+ case INDEX_op_br:
+ case INDEX_op_exit_tb:
+ case INDEX_op_goto_tb:
+ case INDEX_op_goto_ptr:
+ finish_ebb(&ctx);
+ done = true;
+ break;
default:
+ done = finish_folding(&ctx, op);
break;
}
-
- if (!done) {
- finish_folding(&ctx, op);
- }
+ tcg_debug_assert(done);
}
}
diff --git a/tcg/perf.c b/tcg/perf.c
index 412a987..8fa5fa9 100644
--- a/tcg/perf.c
+++ b/tcg/perf.c
@@ -313,7 +313,7 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
const void *start)
{
struct debuginfo_query *q;
- size_t insn, start_words;
+ size_t insn;
uint64_t *gen_insn_data;
if (!perfmap && !jitdump) {
@@ -329,13 +329,12 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
/* Query debuginfo for each guest instruction. */
gen_insn_data = tcg_ctx->gen_insn_data;
- start_words = tcg_ctx->insn_start_words;
for (insn = 0; insn < tb->icount; insn++) {
/* FIXME: This replicates the restore_state_to_opc() logic. */
- q[insn].address = gen_insn_data[insn * start_words + 0];
+ q[insn].address = gen_insn_data[insn * INSN_START_WORDS + 0];
if (tb_cflags(tb) & CF_PCREL) {
- q[insn].address |= (guest_pc & qemu_target_page_mask());
+ q[insn].address |= guest_pc & TARGET_PAGE_MASK;
}
q[insn].flags = DEBUGINFO_SYMBOL | (jitdump ? DEBUGINFO_LINE : 0);
}
diff --git a/tcg/ppc/tcg-target-con-set.h b/tcg/ppc/tcg-target-con-set.h
index 9f99bde..da7a383 100644
--- a/tcg/ppc/tcg-target-con-set.h
+++ b/tcg/ppc/tcg-target-con-set.h
@@ -15,28 +15,29 @@ C_O0_I2(r, rC)
C_O0_I2(v, r)
C_O0_I3(r, r, r)
C_O0_I3(o, m, r)
-C_O0_I4(r, r, ri, ri)
+C_O0_I4(r, r, rU, rC)
C_O0_I4(r, r, r, r)
C_O1_I1(r, r)
C_O1_I1(v, r)
C_O1_I1(v, v)
C_O1_I1(v, vr)
C_O1_I2(r, 0, rZ)
-C_O1_I2(r, rI, ri)
-C_O1_I2(r, rI, rT)
+C_O1_I2(r, rI, r)
C_O1_I2(r, r, r)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rC)
C_O1_I2(r, r, rI)
C_O1_I2(r, r, rT)
C_O1_I2(r, r, rU)
+C_O1_I2(r, r, rZM)
C_O1_I2(r, r, rZW)
+C_O1_I2(r, rI, rN)
+C_O1_I2(r, rZM, rZM)
C_O1_I2(v, v, v)
C_O1_I3(v, v, v, v)
+C_O1_I4(v, v, v, vZM, v)
C_O1_I4(r, r, rC, rZ, rZ)
-C_O1_I4(r, r, r, ri, ri)
+C_O1_I4(r, r, r, rU, rC)
C_O2_I1(r, r, r)
C_N1O1_I1(o, m, r)
C_O2_I2(r, r, r, r)
-C_O2_I4(r, r, rI, rZM, r, r)
-C_O2_I4(r, r, r, r, rI, rZM)
diff --git a/tcg/ppc/tcg-target-con-str.h b/tcg/ppc/tcg-target-con-str.h
index 16b6872..faf92da 100644
--- a/tcg/ppc/tcg-target-con-str.h
+++ b/tcg/ppc/tcg-target-con-str.h
@@ -19,6 +19,7 @@ REGS('v', ALL_VECTOR_REGS)
CONST('C', TCG_CT_CONST_CMP)
CONST('I', TCG_CT_CONST_S16)
CONST('M', TCG_CT_CONST_MONE)
+CONST('N', TCG_CT_CONST_N16)
CONST('T', TCG_CT_CONST_S32)
CONST('U', TCG_CT_CONST_U32)
CONST('W', TCG_CT_CONST_WSZ)
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
new file mode 100644
index 0000000..81ec5ae
--- /dev/null
+++ b/tcg/ppc/tcg-target-has.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific opcode support
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef TCG_TARGET_HAS_H
+#define TCG_TARGET_HAS_H
+
+#include "host/cpuinfo.h"
+
+#define have_isa_2_06 (cpuinfo & CPUINFO_V2_06)
+#define have_isa_2_07 (cpuinfo & CPUINFO_V2_07)
+#define have_isa_3_00 (cpuinfo & CPUINFO_V3_0)
+#define have_isa_3_10 (cpuinfo & CPUINFO_V3_1)
+#define have_altivec (cpuinfo & CPUINFO_ALTIVEC)
+#define have_vsx (cpuinfo & CPUINFO_VSX)
+
+/* optional instructions */
+#if TCG_TARGET_REG_BITS == 64
+#define TCG_TARGET_HAS_extr_i64_i32 0
+#endif
+
+#define TCG_TARGET_HAS_qemu_ldst_i128 \
+ (TCG_TARGET_REG_BITS == 64 && have_isa_2_07)
+
+#define TCG_TARGET_HAS_tst 1
+
+/*
+ * While technically Altivec could support V64, it has no 64-bit store
+ * instruction and substituting two 32-bit stores makes the generated
+ * code quite large.
+ */
+#define TCG_TARGET_HAS_v64 have_vsx
+#define TCG_TARGET_HAS_v128 have_altivec
+#define TCG_TARGET_HAS_v256 0
+
+#define TCG_TARGET_HAS_andc_vec 1
+#define TCG_TARGET_HAS_orc_vec have_isa_2_07
+#define TCG_TARGET_HAS_nand_vec have_isa_2_07
+#define TCG_TARGET_HAS_nor_vec 1
+#define TCG_TARGET_HAS_eqv_vec have_isa_2_07
+#define TCG_TARGET_HAS_not_vec 1
+#define TCG_TARGET_HAS_neg_vec have_isa_3_00
+#define TCG_TARGET_HAS_abs_vec 0
+#define TCG_TARGET_HAS_roti_vec 0
+#define TCG_TARGET_HAS_rots_vec 0
+#define TCG_TARGET_HAS_rotv_vec 1
+#define TCG_TARGET_HAS_shi_vec 0
+#define TCG_TARGET_HAS_shs_vec 0
+#define TCG_TARGET_HAS_shv_vec 1
+#define TCG_TARGET_HAS_mul_vec 1
+#define TCG_TARGET_HAS_sat_vec 1
+#define TCG_TARGET_HAS_minmax_vec 1
+#define TCG_TARGET_HAS_bitsel_vec have_vsx
+#define TCG_TARGET_HAS_cmpsel_vec 1
+#define TCG_TARGET_HAS_tst_vec 0
+
+#define TCG_TARGET_extract_valid(type, ofs, len) 1
+#define TCG_TARGET_deposit_valid(type, ofs, len) 1
+
+static inline bool
+tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
+{
+ if (type == TCG_TYPE_I64 && ofs + len == 32) {
+ return true;
+ }
+ return ofs == 0 && (len == 8 || len == 16);
+}
+#define TCG_TARGET_sextract_valid tcg_target_sextract_valid
+
+#endif
diff --git a/tcg/ppc/tcg-target-mo.h b/tcg/ppc/tcg-target-mo.h
new file mode 100644
index 0000000..98bfe03
--- /dev/null
+++ b/tcg/ppc/tcg-target-mo.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific memory model
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef TCG_TARGET_MO_H
+#define TCG_TARGET_MO_H
+
+#define TCG_TARGET_DEFAULT_MO 0
+
+#endif
diff --git a/tcg/ppc/tcg-target-opc.h.inc b/tcg/ppc/tcg-target-opc.h.inc
new file mode 100644
index 0000000..c363583
--- /dev/null
+++ b/tcg/ppc/tcg-target-opc.h.inc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2019 Linaro Limited
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Target-specific opcodes for host vector expansion. These will be
+ * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
+ * consider these to be UNSPEC with names.
+ */
+
+DEF(ppc_mrgh_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(ppc_mrgl_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(ppc_msum_vec, 1, 3, 0, TCG_OPF_VECTOR)
+DEF(ppc_muleu_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(ppc_mulou_vec, 1, 2, 0, TCG_OPF_VECTOR)
+DEF(ppc_pkum_vec, 1, 2, 0, TCG_OPF_VECTOR)
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index 7f3829b..b8b23d4 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -23,8 +23,6 @@
*/
#include "elf.h"
-#include "../tcg-pool.c.inc"
-#include "../tcg-ldst.c.inc"
/*
* Standardize on the _CALL_FOO symbols used by GCC:
@@ -91,14 +89,15 @@
/* Shorthand for size of a register. */
#define SZR (TCG_TARGET_REG_BITS / 8)
-#define TCG_CT_CONST_S16 0x100
-#define TCG_CT_CONST_U16 0x200
-#define TCG_CT_CONST_S32 0x400
-#define TCG_CT_CONST_U32 0x800
-#define TCG_CT_CONST_ZERO 0x1000
-#define TCG_CT_CONST_MONE 0x2000
-#define TCG_CT_CONST_WSZ 0x4000
-#define TCG_CT_CONST_CMP 0x8000
+#define TCG_CT_CONST_S16 0x00100
+#define TCG_CT_CONST_U16 0x00200
+#define TCG_CT_CONST_N16 0x00400
+#define TCG_CT_CONST_S32 0x00800
+#define TCG_CT_CONST_U32 0x01000
+#define TCG_CT_CONST_ZERO 0x02000
+#define TCG_CT_CONST_MONE 0x04000
+#define TCG_CT_CONST_WSZ 0x08000
+#define TCG_CT_CONST_CMP 0x10000
#define ALL_GENERAL_REGS 0xffffffffu
#define ALL_VECTOR_REGS 0xffffffff00000000ull
@@ -325,9 +324,11 @@ static bool tcg_target_const_match(int64_t sval, int ct,
if ((uval & ~0xffff) == 0 || (uval & ~0xffff0000ull) == 0) {
return 1;
}
- if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32
- ? mask_operand(uval, &mb, &me)
- : mask64_operand(uval << clz64(uval), &mb, &me)) {
+ if (uval == (uint32_t)uval && mask_operand(uval, &mb, &me)) {
+ return 1;
+ }
+ if (TCG_TARGET_REG_BITS == 64 &&
+ mask64_operand(uval << clz64(uval), &mb, &me)) {
return 1;
}
return 0;
@@ -342,6 +343,9 @@ static bool tcg_target_const_match(int64_t sval, int ct,
if ((ct & TCG_CT_CONST_U16) && uval == (uint16_t)uval) {
return 1;
}
+ if ((ct & TCG_CT_CONST_N16) && -sval == (int16_t)-sval) {
+ return 1;
+ }
if ((ct & TCG_CT_CONST_S32) && sval == (int32_t)sval) {
return 1;
}
@@ -909,7 +913,9 @@ static void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
static void tcg_out_rlw_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs,
int sh, int mb, int me, bool rc)
{
- tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me) | rc);
+ tcg_debug_assert((mb & 0x1f) == mb);
+ tcg_debug_assert((me & 0x1f) == me);
+ tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh & 0x1f) | MB(mb) | ME(me) | rc);
}
static void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
@@ -1010,111 +1016,6 @@ static void tcg_out_addpcis(TCGContext *s, TCGReg dst, intptr_t imm)
tcg_out32(s, ADDPCIS | RT(dst) | (d1 << 16) | (d0 << 6) | d2);
}
-static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
-{
- TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
-
- if (have_isa_3_10) {
- tcg_out32(s, BRH | RA(dst) | RS(src));
- if (flags & TCG_BSWAP_OS) {
- tcg_out_ext16s(s, TCG_TYPE_REG, dst, dst);
- } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
- tcg_out_ext16u(s, dst, dst);
- }
- return;
- }
-
- /*
- * In the following,
- * dep(a, b, m) -> (a & ~m) | (b & m)
- *
- * Begin with: src = xxxxabcd
- */
- /* tmp = rol32(src, 24) & 0x000000ff = 0000000c */
- tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31);
- /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */
- tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
-
- if (flags & TCG_BSWAP_OS) {
- tcg_out_ext16s(s, TCG_TYPE_REG, dst, tmp);
- } else {
- tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
- }
-}
-
-static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags)
-{
- TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
-
- if (have_isa_3_10) {
- tcg_out32(s, BRW | RA(dst) | RS(src));
- if (flags & TCG_BSWAP_OS) {
- tcg_out_ext32s(s, dst, dst);
- } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
- tcg_out_ext32u(s, dst, dst);
- }
- return;
- }
-
- /*
- * Stolen from gcc's builtin_bswap32.
- * In the following,
- * dep(a, b, m) -> (a & ~m) | (b & m)
- *
- * Begin with: src = xxxxabcd
- */
- /* tmp = rol32(src, 8) & 0xffffffff = 0000bcda */
- tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31);
- /* tmp = dep(tmp, rol32(src, 24), 0xff000000) = 0000dcda */
- tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7);
- /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */
- tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
-
- if (flags & TCG_BSWAP_OS) {
- tcg_out_ext32s(s, dst, tmp);
- } else {
- tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
- }
-}
-
-static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
-{
- TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
- TCGReg t1 = dst == src ? dst : TCG_REG_R0;
-
- if (have_isa_3_10) {
- tcg_out32(s, BRD | RA(dst) | RS(src));
- return;
- }
-
- /*
- * In the following,
- * dep(a, b, m) -> (a & ~m) | (b & m)
- *
- * Begin with: src = abcdefgh
- */
- /* t0 = rol32(src, 8) & 0xffffffff = 0000fghe */
- tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31);
- /* t0 = dep(t0, rol32(src, 24), 0xff000000) = 0000hghe */
- tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7);
- /* t0 = dep(t0, rol32(src, 24), 0x0000ff00) = 0000hgfe */
- tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23);
-
- /* t0 = rol64(t0, 32) = hgfe0000 */
- tcg_out_rld(s, RLDICL, t0, t0, 32, 0);
- /* t1 = rol64(src, 32) = efghabcd */
- tcg_out_rld(s, RLDICL, t1, src, 32, 0);
-
- /* t0 = dep(t0, rol32(t1, 24), 0xffffffff) = hgfebcda */
- tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31);
- /* t0 = dep(t0, rol32(t1, 24), 0xff000000) = hgfedcda */
- tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7);
- /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00) = hgfedcba */
- tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23);
-
- tcg_out_mov(s, TCG_TYPE_REG, dst, t0);
-}
-
/* Emit a move into ret of arg, if it can be done in one insn. */
static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
{
@@ -1749,8 +1650,6 @@ static void tcg_out_test(TCGContext *s, TCGReg dest, TCGReg arg1, TCGArg arg2,
if (type == TCG_TYPE_I32) {
arg2 = (uint32_t)arg2;
- } else if (arg2 == (uint32_t)arg2) {
- type = TCG_TYPE_I32;
}
if ((arg2 & ~0xffff) == 0) {
@@ -1761,12 +1660,11 @@ static void tcg_out_test(TCGContext *s, TCGReg dest, TCGReg arg1, TCGArg arg2,
tcg_out32(s, ANDIS | SAI(arg1, dest, arg2 >> 16));
return;
}
- if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) {
- if (mask_operand(arg2, &mb, &me)) {
- tcg_out_rlw_rc(s, RLWINM, dest, arg1, 0, mb, me, rc);
- return;
- }
- } else {
+ if (arg2 == (uint32_t)arg2 && mask_operand(arg2, &mb, &me)) {
+ tcg_out_rlw_rc(s, RLWINM, dest, arg1, 0, mb, me, rc);
+ return;
+ }
+ if (TCG_TARGET_REG_BITS == 64) {
int sh = clz64(arg2);
if (mask64_operand(arg2 << sh, &mb, &me)) {
tcg_out_rld_rc(s, RLDICR, dest, arg1, sh, me, rc);
@@ -1778,9 +1676,8 @@ static void tcg_out_test(TCGContext *s, TCGReg dest, TCGReg arg1, TCGArg arg2,
}
static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
- int const_arg2, int cr, TCGType type)
+ bool const_arg2, int cr, TCGType type)
{
- int imm;
uint32_t op;
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
@@ -1797,18 +1694,15 @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
case TCG_COND_EQ:
case TCG_COND_NE:
if (const_arg2) {
- if ((int16_t) arg2 == arg2) {
+ if ((int16_t)arg2 == arg2) {
op = CMPI;
- imm = 1;
- break;
- } else if ((uint16_t) arg2 == arg2) {
- op = CMPLI;
- imm = 1;
break;
}
+ tcg_debug_assert((uint16_t)arg2 == arg2);
+ op = CMPLI;
+ break;
}
op = CMPL;
- imm = 0;
break;
case TCG_COND_TSTEQ:
@@ -1822,14 +1716,11 @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
case TCG_COND_LE:
case TCG_COND_GT:
if (const_arg2) {
- if ((int16_t) arg2 == arg2) {
- op = CMPI;
- imm = 1;
- break;
- }
+ tcg_debug_assert((int16_t)arg2 == arg2);
+ op = CMPI;
+ break;
}
op = CMP;
- imm = 0;
break;
case TCG_COND_LTU:
@@ -1837,30 +1728,20 @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
case TCG_COND_LEU:
case TCG_COND_GTU:
if (const_arg2) {
- if ((uint16_t) arg2 == arg2) {
- op = CMPLI;
- imm = 1;
- break;
- }
+ tcg_debug_assert((uint16_t)arg2 == arg2);
+ op = CMPLI;
+ break;
}
op = CMPL;
- imm = 0;
break;
default:
g_assert_not_reached();
}
op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
-
- if (imm) {
- tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
- } else {
- if (const_arg2) {
- tcg_out_movi(s, type, TCG_REG_R0, arg2);
- arg2 = TCG_REG_R0;
- }
- tcg_out32(s, op | RA(arg1) | RB(arg2));
- }
+ op |= RA(arg1);
+ op |= const_arg2 ? arg2 & 0xffff : RB(arg2);
+ tcg_out32(s, op);
}
static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
@@ -1927,8 +1808,8 @@ static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
}
static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
- TCGArg arg0, TCGArg arg1, TCGArg arg2,
- int const_arg2, bool neg)
+ TCGReg arg0, TCGReg arg1, TCGArg arg2,
+ bool const_arg2, bool neg)
{
int sh;
bool inv;
@@ -2073,6 +1954,54 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
}
}
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, false);
+}
+
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, false);
+}
+
+static const TCGOutOpSetcond outop_setcond = {
+ .base.static_constraint = C_O1_I2(r, r, rC),
+ .out_rrr = tgen_setcond,
+ .out_rri = tgen_setcondi,
+};
+
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, true);
+}
+
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, true);
+}
+
+static const TCGOutOpSetcond outop_negsetcond = {
+ .base.static_constraint = C_O1_I2(r, r, rC),
+ .out_rrr = tgen_negsetcond,
+ .out_rri = tgen_negsetcondi,
+};
+
+void tcg_out_br(TCGContext *s, TCGLabel *l)
+{
+ uint32_t insn = B;
+
+ if (l->has_value) {
+ insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
+ } else {
+ tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
+ }
+ tcg_out32(s, insn);
+}
+
static void tcg_out_bc(TCGContext *s, TCGCond cond, int bd)
{
tcg_out32(s, tcg_to_bc[cond] | bd);
@@ -2089,17 +2018,29 @@ static void tcg_out_bc_lab(TCGContext *s, TCGCond cond, TCGLabel *l)
tcg_out_bc(s, cond, bd);
}
-static void tcg_out_brcond(TCGContext *s, TCGCond cond,
- TCGArg arg1, TCGArg arg2, int const_arg2,
- TCGLabel *l, TCGType type)
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg arg1, TCGReg arg2, TCGLabel *l)
{
- tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type);
+ tcg_out_cmp(s, cond, arg1, arg2, false, 0, type);
tcg_out_bc_lab(s, cond, l);
}
-static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
- TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
- TCGArg v2, bool const_c2)
+static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg arg1, tcg_target_long arg2, TCGLabel *l)
+{
+ tcg_out_cmp(s, cond, arg1, arg2, true, 0, type);
+ tcg_out_bc_lab(s, cond, l);
+}
+
+static const TCGOutOpBrcond outop_brcond = {
+ .base.static_constraint = C_O0_I2(r, rC),
+ .out_rr = tgen_brcond,
+ .out_ri = tgen_brcondi,
+};
+
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg v1, bool const_v1, TCGArg v2, bool const_v2)
{
/* If for some reason both inputs are zero, don't produce bad code. */
if (v1 == 0 && v2 == 0) {
@@ -2145,6 +2086,11 @@ static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
}
}
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rC, rZ, rZ),
+ .out = tgen_movcond,
+};
+
static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
{
@@ -2171,8 +2117,8 @@ static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
}
}
-static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
- const int *const_args)
+static void tcg_out_cmp2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
+ TCGArg bl, bool blconst, TCGArg bh, bool bhconst)
{
static const struct { uint8_t bit1, bit2; } bits[] = {
[TCG_COND_LT ] = { CR_LT, CR_LT },
@@ -2185,18 +2131,9 @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
[TCG_COND_GEU] = { CR_GT, CR_LT },
};
- TCGCond cond = args[4], cond2;
- TCGArg al, ah, bl, bh;
- int blconst, bhconst;
+ TCGCond cond2;
int op, bit1, bit2;
- al = args[0];
- ah = args[1];
- bl = args[2];
- bh = args[3];
- blconst = const_args[2];
- bhconst = const_args[3];
-
switch (cond) {
case TCG_COND_EQ:
op = CRAND;
@@ -2248,22 +2185,42 @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
}
}
-static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
- const int *const_args)
+static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
+ TCGReg al, TCGReg ah,
+ TCGArg bl, bool const_bl,
+ TCGArg bh, bool const_bh)
{
- tcg_out_cmp2(s, args + 1, const_args + 1);
+ tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh);
tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(0));
- tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, CR_EQ + 0*4 + 1, 31, 31);
+ tcg_out_rlw(s, RLWINM, ret, TCG_REG_R0, CR_EQ + 0*4 + 1, 31, 31);
}
-static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
- const int *const_args)
+#if TCG_TARGET_REG_BITS != 32
+__attribute__((unused))
+#endif
+static const TCGOutOpSetcond2 outop_setcond2 = {
+ .base.static_constraint = C_O1_I4(r, r, r, rU, rC),
+ .out = tgen_setcond2,
+};
+
+static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
+ TCGArg bl, bool const_bl,
+ TCGArg bh, bool const_bh, TCGLabel *l)
{
- tcg_out_cmp2(s, args, const_args);
- tcg_out_bc_lab(s, TCG_COND_EQ, arg_label(args[5]));
+ assert(TCG_TARGET_REG_BITS == 32);
+ tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh);
+ tcg_out_bc_lab(s, TCG_COND_EQ, l);
}
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
+#if TCG_TARGET_REG_BITS != 32
+__attribute__((unused))
+#endif
+static const TCGOutOpBrcond2 outop_brcond2 = {
+ .base.static_constraint = C_O0_I4(r, r, rU, rC),
+ .out = tgen_brcond2,
+};
+
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
uint32_t insn;
@@ -2439,8 +2396,7 @@ bool tcg_target_has_memory_bswap(MemOp memop)
* is required and fill in @h with the host address for the fast path.
*/
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
- TCGReg addrlo, TCGReg addrhi,
- MemOpIdx oi, bool is_ld)
+ TCGReg addr, MemOpIdx oi, bool is_ld)
{
TCGType addr_type = s->addr_type;
TCGLabelQemuLdst *ldst = NULL;
@@ -2475,8 +2431,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addrlo;
- ldst->addrhi_reg = addrhi;
+ ldst->addr_reg = addr;
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
@@ -2484,36 +2439,25 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
/* Extract the page index, shifted into place for tlb index. */
if (TCG_TARGET_REG_BITS == 32) {
- tcg_out_shri32(s, TCG_REG_R0, addrlo,
- s->page_bits - CPU_TLB_ENTRY_BITS);
+ tcg_out_shri32(s, TCG_REG_R0, addr,
+ TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
} else {
- tcg_out_shri64(s, TCG_REG_R0, addrlo,
- s->page_bits - CPU_TLB_ENTRY_BITS);
+ tcg_out_shri64(s, TCG_REG_R0, addr,
+ TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
}
tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
/*
- * Load the (low part) TLB comparator into TMP2.
+ * Load the TLB comparator into TMP2.
* For 64-bit host, always load the entire 64-bit slot for simplicity.
* We will ignore the high bits with tcg_out_cmp(..., addr_type).
*/
- if (TCG_TARGET_REG_BITS == 64) {
- if (cmp_off == 0) {
- tcg_out32(s, LDUX | TAB(TCG_REG_TMP2,
- TCG_REG_TMP1, TCG_REG_TMP2));
- } else {
- tcg_out32(s, ADD | TAB(TCG_REG_TMP1,
- TCG_REG_TMP1, TCG_REG_TMP2));
- tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_TMP2,
- TCG_REG_TMP1, cmp_off);
- }
- } else if (cmp_off == 0 && !HOST_BIG_ENDIAN) {
- tcg_out32(s, LWZUX | TAB(TCG_REG_TMP2,
- TCG_REG_TMP1, TCG_REG_TMP2));
+ if (cmp_off == 0) {
+ tcg_out32(s, (TCG_TARGET_REG_BITS == 64 ? LDUX : LWZUX)
+ | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
} else {
tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
- cmp_off + 4 * HOST_BIG_ENDIAN);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off);
}
/*
@@ -2535,10 +2479,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
if (a_bits < s_bits) {
a_bits = s_bits;
}
- tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
- (32 - a_bits) & 31, 31 - s->page_bits);
+ tcg_out_rlw(s, RLWINM, TCG_REG_R0, addr, 0,
+ (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
} else {
- TCGReg t = addrlo;
+ TCGReg t = addr;
/*
* If the access is unaligned, we need to make sure we fail if we
@@ -2557,40 +2501,18 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
/* Mask the address for the requested alignment. */
if (addr_type == TCG_TYPE_I32) {
tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
- (32 - a_bits) & 31, 31 - s->page_bits);
+ (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
} else if (a_bits == 0) {
- tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits);
+ tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
} else {
tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
- 64 - s->page_bits, s->page_bits - a_bits);
- tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0);
+ 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
+ tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
}
}
- if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
- /* Low part comparison into cr7. */
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
- 0, 7, TCG_TYPE_I32);
-
- /* Load the high part TLB comparator into TMP2. */
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
- cmp_off + 4 * !HOST_BIG_ENDIAN);
-
- /* Load addend, deferred for this case. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
- offsetof(CPUTLBEntry, addend));
-
- /* High part comparison into cr6. */
- tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2,
- 0, 6, TCG_TYPE_I32);
-
- /* Combine comparisons into cr0. */
- tcg_out32(s, CRAND | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
- } else {
- /* Full comparison into cr0. */
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
- 0, 0, addr_type);
- }
+ /* Full comparison into cr0. */
+ tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, 0, 0, addr_type);
/* Load a pointer into the current opcode w/conditional branch-link. */
ldst->label_ptr[0] = s->code_ptr;
@@ -2602,12 +2524,11 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addrlo;
- ldst->addrhi_reg = addrhi;
+ ldst->addr_reg = addr;
/* We are expecting a_bits to max out at 7, much lower than ANDI. */
tcg_debug_assert(a_bits < 16);
- tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1));
+ tcg_out32(s, ANDI | SAI(addr, TCG_REG_R0, (1 << a_bits) - 1));
ldst->label_ptr[0] = s->code_ptr;
tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
@@ -2618,24 +2539,23 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
/* Zero-extend the guest address for use in the host address. */
- tcg_out_ext32u(s, TCG_REG_R0, addrlo);
- h->index = TCG_REG_R0;
+ tcg_out_ext32u(s, TCG_REG_TMP2, addr);
+ h->index = TCG_REG_TMP2;
} else {
- h->index = addrlo;
+ h->index = addr;
}
return ldst;
}
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
- TCGReg addrlo, TCGReg addrhi,
- MemOpIdx oi, TCGType data_type)
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
{
MemOp opc = get_memop(oi);
TCGLabelQemuLdst *ldst;
HostAddress h;
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
if (opc & MO_BSWAP) {
@@ -2679,14 +2599,13 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
}
static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
- TCGReg addrlo, TCGReg addrhi,
- MemOpIdx oi, TCGType data_type)
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
{
MemOp opc = get_memop(oi);
TCGLabelQemuLdst *ldst;
HostAddress h;
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
if (opc & MO_BSWAP) {
@@ -2705,9 +2624,9 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
if (!have_isa_2_06 && insn == STDBRX) {
tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
- tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, h.index, 4));
+ tcg_out32(s, ADDI | TAI(TCG_REG_TMP2, h.index, 4));
tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
- tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP1));
+ tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP2));
} else {
tcg_out32(s, insn | SAB(datalo, h.base, h.index));
}
@@ -2730,7 +2649,7 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
uint32_t insn;
TCGReg index;
- ldst = prepare_host_addr(s, &h, addr_reg, -1, oi, is_ld);
+ ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
/* Compose the final address, as LQ/STQ have no indexing. */
index = h.index;
@@ -2776,6 +2695,60 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
}
}
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg addr, MemOpIdx oi)
+{
+ tcg_out_qemu_ld(s, data, -1, addr, oi, type);
+}
+
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_qemu_ld,
+};
+
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_out_qemu_ld(s, datalo, datahi, addr, oi, type);
+ } else {
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr, oi, true);
+ }
+}
+
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
+ .base.static_constraint =
+ TCG_TARGET_REG_BITS == 64 ? C_N1O1_I1(o, m, r) : C_O2_I1(r, r, r),
+ .out = tgen_qemu_ld2,
+};
+
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg addr, MemOpIdx oi)
+{
+ tcg_out_qemu_st(s, data, -1, addr, oi, type);
+}
+
+static const TCGOutOpQemuLdSt outop_qemu_st = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out = tgen_qemu_st,
+};
+
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_out_qemu_st(s, datalo, datahi, addr, oi, type);
+ } else {
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr, oi, false);
+ }
+}
+
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
+ .base.static_constraint =
+ TCG_TARGET_REG_BITS == 64 ? C_O0_I3(o, m, r) : C_O0_I3(r, r, r),
+ .out = tgen_qemu_st2,
+};
+
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
{
int i;
@@ -2924,6 +2897,13 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
set_jmp_reset_offset(s, which);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out32(s, MTSPR | RS(a0) | CTR);
+ tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
+ tcg_out32(s, BCCTR | BO_ALWAYS);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
@@ -2941,603 +2921,913 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
flush_idcache_range(jmp_rx, jmp_rw, 4);
}
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg args[TCG_MAX_OP_ARGS],
- const int const_args[TCG_MAX_OP_ARGS])
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
{
- TCGArg a0, a1, a2;
+ tcg_out32(s, ADD | TAB(a0, a1, a2));
+}
- switch (opc) {
- case INDEX_op_goto_ptr:
- tcg_out32(s, MTSPR | RS(args[0]) | CTR);
- tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
- tcg_out32(s, BCCTR | BO_ALWAYS);
- break;
- case INDEX_op_br:
- {
- TCGLabel *l = arg_label(args[0]);
- uint32_t insn = B;
-
- if (l->has_value) {
- insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr),
- l->u.value_ptr);
- } else {
- tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
- }
- tcg_out32(s, insn);
- }
- break;
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8u_i64:
- tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld8s_i64:
- tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
- tcg_out_ext8s(s, TCG_TYPE_REG, args[0], args[0]);
- break;
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16u_i64:
- tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld16s_i64:
- tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld_i32:
- case INDEX_op_ld32u_i64:
- tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld32s_i64:
- tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld_i64:
- tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
- break;
- case INDEX_op_st8_i32:
- case INDEX_op_st8_i64:
- tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
- break;
- case INDEX_op_st16_i32:
- case INDEX_op_st16_i64:
- tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
- break;
- case INDEX_op_st_i32:
- case INDEX_op_st32_i64:
- tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
- break;
- case INDEX_op_st_i64:
- tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
- break;
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
+}
- case INDEX_op_add_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- do_addi_32:
- tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
- } else {
- tcg_out32(s, ADD | TAB(a0, a1, a2));
- }
- break;
- case INDEX_op_sub_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[1]) {
- if (const_args[2]) {
- tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
- } else {
- tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
- }
- } else if (const_args[2]) {
- a2 = -a2;
- goto do_addi_32;
- } else {
- tcg_out32(s, SUBF | TAB(a0, a2, a1));
- }
- break;
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rT),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
- case INDEX_op_and_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_andi32(s, a0, a1, a2);
- } else {
- tcg_out32(s, AND | SAB(a1, a0, a2));
- }
- break;
- case INDEX_op_and_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_andi64(s, a0, a1, a2);
- } else {
- tcg_out32(s, AND | SAB(a1, a0, a2));
- }
- break;
- case INDEX_op_or_i64:
- case INDEX_op_or_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_ori32(s, a0, a1, a2);
- } else {
- tcg_out32(s, OR | SAB(a1, a0, a2));
- }
- break;
- case INDEX_op_xor_i64:
- case INDEX_op_xor_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_xori32(s, a0, a1, a2);
- } else {
- tcg_out32(s, XOR | SAB(a1, a0, a2));
- }
- break;
- case INDEX_op_andc_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_andi32(s, a0, a1, ~a2);
- } else {
- tcg_out32(s, ANDC | SAB(a1, a0, a2));
- }
- break;
- case INDEX_op_andc_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_andi64(s, a0, a1, ~a2);
- } else {
- tcg_out32(s, ANDC | SAB(a1, a0, a2));
- }
- break;
- case INDEX_op_orc_i32:
- if (const_args[2]) {
- tcg_out_ori32(s, args[0], args[1], ~args[2]);
- break;
- }
- /* FALLTHRU */
- case INDEX_op_orc_i64:
- tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
- break;
- case INDEX_op_eqv_i32:
- if (const_args[2]) {
- tcg_out_xori32(s, args[0], args[1], ~args[2]);
- break;
- }
- /* FALLTHRU */
- case INDEX_op_eqv_i64:
- tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
- break;
- case INDEX_op_nand_i32:
- case INDEX_op_nand_i64:
- tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
- break;
- case INDEX_op_nor_i32:
- case INDEX_op_nor_i64:
- tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
- break;
+static void tgen_addco_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, ADDC | TAB(a0, a1, a2));
+}
- case INDEX_op_clz_i32:
- tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
- args[2], const_args[2]);
- break;
- case INDEX_op_ctz_i32:
- tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
- args[2], const_args[2]);
- break;
- case INDEX_op_ctpop_i32:
- tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
- break;
+static void tgen_addco_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out32(s, ADDIC | TAI(a0, a1, a2));
+}
- case INDEX_op_clz_i64:
- tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
- args[2], const_args[2]);
- break;
- case INDEX_op_ctz_i64:
- tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
- args[2], const_args[2]);
- break;
- case INDEX_op_ctpop_i64:
- tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
- break;
+static TCGConstraintSetIndex cset_addco(TCGType type, unsigned flags)
+{
+ /*
+ * Note that the CA bit is defined based on the word size of the
+ * environment. So in 64-bit mode it's always carry-out of bit 63.
+ * The fallback code using deposit works just as well for TCG_TYPE_I32.
+ */
+ return type == TCG_TYPE_REG ? C_O1_I2(r, r, rI) : C_NotImplemented;
+}
- case INDEX_op_mul_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out32(s, MULLI | TAI(a0, a1, a2));
- } else {
- tcg_out32(s, MULLW | TAB(a0, a1, a2));
- }
- break;
+static const TCGOutOpBinary outop_addco = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_addco,
+ .out_rrr = tgen_addco_rrr,
+ .out_rri = tgen_addco_rri,
+};
- case INDEX_op_div_i32:
- tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
- break;
+static void tgen_addcio_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, ADDE | TAB(a0, a1, a2));
+}
- case INDEX_op_divu_i32:
- tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
- break;
+static void tgen_addcio_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out32(s, (a2 ? ADDME : ADDZE) | RT(a0) | RA(a1));
+}
- case INDEX_op_rem_i32:
- tcg_out32(s, MODSW | TAB(args[0], args[1], args[2]));
- break;
+static TCGConstraintSetIndex cset_addcio(TCGType type, unsigned flags)
+{
+ return type == TCG_TYPE_REG ? C_O1_I2(r, r, rZM) : C_NotImplemented;
+}
- case INDEX_op_remu_i32:
- tcg_out32(s, MODUW | TAB(args[0], args[1], args[2]));
- break;
+static const TCGOutOpBinary outop_addcio = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_addcio,
+ .out_rrr = tgen_addcio_rrr,
+ .out_rri = tgen_addcio_rri,
+};
- case INDEX_op_shl_i32:
- if (const_args[2]) {
- /* Limit immediate shift count lest we create an illegal insn. */
- tcg_out_shli32(s, args[0], args[1], args[2] & 31);
- } else {
- tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
- }
- break;
- case INDEX_op_shr_i32:
- if (const_args[2]) {
- /* Limit immediate shift count lest we create an illegal insn. */
- tcg_out_shri32(s, args[0], args[1], args[2] & 31);
- } else {
- tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
- }
- break;
- case INDEX_op_sar_i32:
- if (const_args[2]) {
- tcg_out_sari32(s, args[0], args[1], args[2]);
- } else {
- tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
- }
- break;
- case INDEX_op_rotl_i32:
- if (const_args[2]) {
- tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
- } else {
- tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
- | MB(0) | ME(31));
- }
- break;
- case INDEX_op_rotr_i32:
- if (const_args[2]) {
- tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
- } else {
- tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
- tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
- | MB(0) | ME(31));
- }
- break;
+static const TCGOutOpAddSubCarry outop_addci = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_addcio,
+ .out_rrr = tgen_addcio_rrr,
+ .out_rri = tgen_addcio_rri,
+};
- case INDEX_op_brcond_i32:
- tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
- arg_label(args[3]), TCG_TYPE_I32);
- break;
- case INDEX_op_brcond_i64:
- tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
- arg_label(args[3]), TCG_TYPE_I64);
- break;
- case INDEX_op_brcond2_i32:
- tcg_out_brcond2(s, args, const_args);
- break;
+static void tcg_out_set_carry(TCGContext *s)
+{
+ tcg_out32(s, SUBFC | TAB(TCG_REG_R0, TCG_REG_R0, TCG_REG_R0));
+}
- case INDEX_op_neg_i32:
- case INDEX_op_neg_i64:
- tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
- break;
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, AND | SAB(a1, a0, a2));
+}
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
- tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
- break;
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_andi32(s, a0, a1, a2);
+ } else {
+ tcg_out_andi64(s, a0, a1, a2);
+ }
+}
- case INDEX_op_add_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- do_addi_64:
- tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
- } else {
- tcg_out32(s, ADD | TAB(a0, a1, a2));
- }
- break;
- case INDEX_op_sub_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[1]) {
- if (const_args[2]) {
- tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
- } else {
- tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
- }
- } else if (const_args[2]) {
- a2 = -a2;
- goto do_addi_64;
- } else {
- tcg_out32(s, SUBF | TAB(a0, a2, a1));
- }
- break;
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
- case INDEX_op_shl_i64:
- if (const_args[2]) {
- /* Limit immediate shift count lest we create an illegal insn. */
- tcg_out_shli64(s, args[0], args[1], args[2] & 63);
- } else {
- tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
- }
- break;
- case INDEX_op_shr_i64:
- if (const_args[2]) {
- /* Limit immediate shift count lest we create an illegal insn. */
- tcg_out_shri64(s, args[0], args[1], args[2] & 63);
- } else {
- tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
- }
- break;
- case INDEX_op_sar_i64:
- if (const_args[2]) {
- tcg_out_sari64(s, args[0], args[1], args[2]);
- } else {
- tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
- }
- break;
- case INDEX_op_rotl_i64:
- if (const_args[2]) {
- tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
- } else {
- tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
- }
- break;
- case INDEX_op_rotr_i64:
- if (const_args[2]) {
- tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
- } else {
- tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
- tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
- }
- break;
+static void tgen_andc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, ANDC | SAB(a1, a0, a2));
+}
- case INDEX_op_mul_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out32(s, MULLI | TAI(a0, a1, a2));
- } else {
- tcg_out32(s, MULLD | TAB(a0, a1, a2));
- }
- break;
- case INDEX_op_div_i64:
- tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
- break;
- case INDEX_op_divu_i64:
- tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
- break;
- case INDEX_op_rem_i64:
- tcg_out32(s, MODSD | TAB(args[0], args[1], args[2]));
- break;
- case INDEX_op_remu_i64:
- tcg_out32(s, MODUD | TAB(args[0], args[1], args[2]));
- break;
+static const TCGOutOpBinary outop_andc = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_andc,
+};
- case INDEX_op_qemu_ld_a64_i32:
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
- args[3], TCG_TYPE_I32);
- break;
- }
- /* fall through */
- case INDEX_op_qemu_ld_a32_i32:
- tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_ld_a32_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
- args[2], TCG_TYPE_I64);
- } else {
- tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
- args[3], TCG_TYPE_I64);
- }
- break;
- case INDEX_op_qemu_ld_a64_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
- args[2], TCG_TYPE_I64);
- } else {
- tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
- args[4], TCG_TYPE_I64);
- }
- break;
- case INDEX_op_qemu_ld_a32_i128:
- case INDEX_op_qemu_ld_a64_i128:
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
- tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
- break;
+static void tgen_clz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? CNTLZW : CNTLZD;
+ tcg_out_cntxz(s, type, insn, a0, a1, a2, false);
+}
- case INDEX_op_qemu_st_a64_i32:
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
- args[3], TCG_TYPE_I32);
- break;
- }
- /* fall through */
- case INDEX_op_qemu_st_a32_i32:
- tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_st_a32_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_qemu_st(s, args[0], -1, args[1], -1,
- args[2], TCG_TYPE_I64);
- } else {
- tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
- args[3], TCG_TYPE_I64);
- }
- break;
- case INDEX_op_qemu_st_a64_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_qemu_st(s, args[0], -1, args[1], -1,
- args[2], TCG_TYPE_I64);
- } else {
- tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
- args[4], TCG_TYPE_I64);
- }
- break;
- case INDEX_op_qemu_st_a32_i128:
- case INDEX_op_qemu_st_a64_i128:
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
- tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
- break;
+static void tgen_clzi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? CNTLZW : CNTLZD;
+ tcg_out_cntxz(s, type, insn, a0, a1, a2, true);
+}
- case INDEX_op_setcond_i32:
- tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
- const_args[2], false);
- break;
- case INDEX_op_setcond_i64:
- tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
- const_args[2], false);
- break;
- case INDEX_op_negsetcond_i32:
- tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
- const_args[2], true);
- break;
- case INDEX_op_negsetcond_i64:
- tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
- const_args[2], true);
- break;
- case INDEX_op_setcond2_i32:
- tcg_out_setcond2(s, args, const_args);
- break;
+static const TCGOutOpBinary outop_clz = {
+ .base.static_constraint = C_O1_I2(r, r, rZW),
+ .out_rrr = tgen_clz,
+ .out_rri = tgen_clzi,
+};
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
- tcg_out_bswap16(s, args[0], args[1], args[2]);
- break;
- case INDEX_op_bswap32_i32:
- tcg_out_bswap32(s, args[0], args[1], 0);
- break;
- case INDEX_op_bswap32_i64:
- tcg_out_bswap32(s, args[0], args[1], args[2]);
- break;
- case INDEX_op_bswap64_i64:
- tcg_out_bswap64(s, args[0], args[1]);
- break;
+static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? CNTPOPW : CNTPOPD;
+ tcg_out32(s, insn | SAB(a1, a0, 0));
+}
- case INDEX_op_deposit_i32:
- if (const_args[2]) {
- uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
- tcg_out_andi32(s, args[0], args[0], ~mask);
- } else {
- tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
- 32 - args[3] - args[4], 31 - args[3]);
- }
- break;
- case INDEX_op_deposit_i64:
- if (const_args[2]) {
- uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
- tcg_out_andi64(s, args[0], args[0], ~mask);
- } else {
- tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
- 64 - args[3] - args[4]);
- }
- break;
+static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
+{
+ return have_isa_2_06 ? C_O1_I1(r, r) : C_NotImplemented;
+}
- case INDEX_op_extract_i32:
- tcg_out_rlw(s, RLWINM, args[0], args[1],
- 32 - args[2], 32 - args[3], 31);
- break;
- case INDEX_op_extract_i64:
- tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
- break;
+static const TCGOutOpUnary outop_ctpop = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_ctpop,
+ .out_rr = tgen_ctpop,
+};
- case INDEX_op_movcond_i32:
- tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
- args[3], args[4], const_args[2]);
- break;
- case INDEX_op_movcond_i64:
- tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
- args[3], args[4], const_args[2]);
- break;
+static void tgen_ctz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? CNTTZW : CNTTZD;
+ tcg_out_cntxz(s, type, insn, a0, a1, a2, false);
+}
+
+static void tgen_ctzi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? CNTTZW : CNTTZD;
+ tcg_out_cntxz(s, type, insn, a0, a1, a2, true);
+}
+
+static TCGConstraintSetIndex cset_ctz(TCGType type, unsigned flags)
+{
+ return have_isa_3_00 ? C_O1_I2(r, r, rZW) : C_NotImplemented;
+}
+
+static const TCGOutOpBinary outop_ctz = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_ctz,
+ .out_rrr = tgen_ctz,
+ .out_rri = tgen_ctzi,
+};
+
+static void tgen_eqv(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, EQV | SAB(a1, a0, a2));
+}
#if TCG_TARGET_REG_BITS == 64
- case INDEX_op_add2_i64:
-#else
- case INDEX_op_add2_i32:
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
+{
+ tcg_out_shri64(s, a0, a1, 32);
+}
+
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extrh_i64_i32,
+};
#endif
- /* Note that the CA bit is defined based on the word size of the
- environment. So in 64-bit mode it's always carry-out of bit 63.
- The fallback code using deposit works just as well for 32-bit. */
- a0 = args[0], a1 = args[1];
- if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
- a0 = TCG_REG_R0;
- }
- if (const_args[4]) {
- tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
- } else {
- tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
+
+static void tgen_divs(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? DIVW : DIVD;
+ tcg_out32(s, insn | TAB(a0, a1, a2));
+}
+
+static const TCGOutOpBinary outop_divs = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_divs,
+};
+
+static const TCGOutOpDivRem outop_divs2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_divu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? DIVWU : DIVDU;
+ tcg_out32(s, insn | TAB(a0, a1, a2));
+}
+
+static const TCGOutOpBinary outop_divu = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_divu,
+};
+
+static const TCGOutOpDivRem outop_divu2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_eqv = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_eqv,
+};
+
+static void tgen_mul(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? MULLW : MULLD;
+ tcg_out32(s, insn | TAB(a0, a1, a2));
+}
+
+static void tgen_muli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out32(s, MULLI | TAI(a0, a1, a2));
+}
+
+static const TCGOutOpBinary outop_mul = {
+ .base.static_constraint = C_O1_I2(r, r, rI),
+ .out_rrr = tgen_mul,
+ .out_rri = tgen_muli,
+};
+
+static const TCGOutOpMul2 outop_muls2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_mulsh(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? MULHW : MULHD;
+ tcg_out32(s, insn | TAB(a0, a1, a2));
+}
+
+static const TCGOutOpBinary outop_mulsh = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_mulsh,
+};
+
+static const TCGOutOpMul2 outop_mulu2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_muluh(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? MULHWU : MULHDU;
+ tcg_out32(s, insn | TAB(a0, a1, a2));
+}
+
+static const TCGOutOpBinary outop_muluh = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_muluh,
+};
+
+static void tgen_nand(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, NAND | SAB(a1, a0, a2));
+}
+
+static const TCGOutOpBinary outop_nand = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_nand,
+};
+
+static void tgen_nor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, NOR | SAB(a1, a0, a2));
+}
+
+static const TCGOutOpBinary outop_nor = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_nor,
+};
+
+static void tgen_or(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, OR | SAB(a1, a0, a2));
+}
+
+static void tgen_ori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_ori32(s, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_or = {
+ .base.static_constraint = C_O1_I2(r, r, rU),
+ .out_rrr = tgen_or,
+ .out_rri = tgen_ori,
+};
+
+static void tgen_orc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, ORC | SAB(a1, a0, a2));
+}
+
+static const TCGOutOpBinary outop_orc = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_orc,
+};
+
+static TCGConstraintSetIndex cset_mod(TCGType type, unsigned flags)
+{
+ return have_isa_3_00 ? C_O1_I2(r, r, r) : C_NotImplemented;
+}
+
+static void tgen_rems(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? MODSW : MODSD;
+ tcg_out32(s, insn | TAB(a0, a1, a2));
+}
+
+static const TCGOutOpBinary outop_rems = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mod,
+ .out_rrr = tgen_rems,
+};
+
+static void tgen_remu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? MODUW : MODUD;
+ tcg_out32(s, insn | TAB(a0, a1, a2));
+}
+
+static const TCGOutOpBinary outop_remu = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mod,
+ .out_rrr = tgen_remu,
+};
+
+static void tgen_rotl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out32(s, RLWNM | SAB(a1, a0, a2) | MB(0) | ME(31));
+ } else {
+ tcg_out32(s, RLDCL | SAB(a1, a0, a2) | MB64(0));
+ }
+}
+
+static void tgen_rotli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_rlw(s, RLWINM, a0, a1, a2, 0, 31);
+ } else {
+ tcg_out_rld(s, RLDICL, a0, a1, a2, 0);
+ }
+}
+
+static const TCGOutOpBinary outop_rotl = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_rotl,
+ .out_rri = tgen_rotli,
+};
+
+static const TCGOutOpBinary outop_rotr = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_sar(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SRAW : SRAD;
+ tcg_out32(s, insn | SAB(a1, a0, a2));
+}
+
+static void tgen_sari(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ /* Limit immediate shift count lest we create an illegal insn. */
+ if (type == TCG_TYPE_I32) {
+ tcg_out_sari32(s, a0, a1, a2 & 31);
+ } else {
+ tcg_out_sari64(s, a0, a1, a2 & 63);
+ }
+}
+
+static const TCGOutOpBinary outop_sar = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_sar,
+ .out_rri = tgen_sari,
+};
+
+static void tgen_shl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SLW : SLD;
+ tcg_out32(s, insn | SAB(a1, a0, a2));
+}
+
+static void tgen_shli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ /* Limit immediate shift count lest we create an illegal insn. */
+ if (type == TCG_TYPE_I32) {
+ tcg_out_shli32(s, a0, a1, a2 & 31);
+ } else {
+ tcg_out_shli64(s, a0, a1, a2 & 63);
+ }
+}
+
+static const TCGOutOpBinary outop_shl = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_shl,
+ .out_rri = tgen_shli,
+};
+
+static void tgen_shr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SRW : SRD;
+ tcg_out32(s, insn | SAB(a1, a0, a2));
+}
+
+static void tgen_shri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ /* Limit immediate shift count lest we create an illegal insn. */
+ if (type == TCG_TYPE_I32) {
+ tcg_out_shri32(s, a0, a1, a2 & 31);
+ } else {
+ tcg_out_shri64(s, a0, a1, a2 & 63);
+ }
+}
+
+static const TCGOutOpBinary outop_shr = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_shr,
+ .out_rri = tgen_shri,
+};
+
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, SUBF | TAB(a0, a2, a1));
+}
+
+static void tgen_subfi(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
+{
+ tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, rI, r),
+ .out_rrr = tgen_sub,
+ .out_rir = tgen_subfi,
+};
+
+static void tgen_subbo_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, SUBFC | TAB(a0, a2, a1));
+}
+
+static void tgen_subbo_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (a2 == 0) {
+ tcg_out_movi(s, type, TCG_REG_R0, 0);
+ tgen_subbo_rrr(s, type, a0, a1, TCG_REG_R0);
+ } else {
+ tgen_addco_rri(s, type, a0, a1, -a2);
+ }
+}
+
+/* The underlying insn for subfi is subfic. */
+#define tgen_subbo_rir tgen_subfi
+
+static void tgen_subbo_rii(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, tcg_target_long a2)
+{
+ tcg_out_movi(s, type, TCG_REG_R0, a2);
+ tgen_subbo_rir(s, type, a0, a1, TCG_REG_R0);
+}
+
+static TCGConstraintSetIndex cset_subbo(TCGType type, unsigned flags)
+{
+ /* Recall that the CA bit is defined based on the host word size. */
+ return type == TCG_TYPE_REG ? C_O1_I2(r, rI, rN) : C_NotImplemented;
+}
+
+static const TCGOutOpAddSubCarry outop_subbo = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_subbo,
+ .out_rrr = tgen_subbo_rrr,
+ .out_rri = tgen_subbo_rri,
+ .out_rir = tgen_subbo_rir,
+ .out_rii = tgen_subbo_rii,
+};
+
+static void tgen_subbio_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, SUBFE | TAB(a0, a2, a1));
+}
+
+static void tgen_subbio_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_addcio_rri(s, type, a0, a1, ~a2);
+}
+
+static void tgen_subbio_rir(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
+{
+ tcg_debug_assert(a1 == 0 || a1 == -1);
+ tcg_out32(s, (a1 ? SUBFME : SUBFZE) | RT(a0) | RA(a2));
+}
+
+static void tgen_subbio_rii(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, tcg_target_long a2)
+{
+ tcg_out_movi(s, type, TCG_REG_R0, a2);
+ tgen_subbio_rir(s, type, a0, a1, TCG_REG_R0);
+}
+
+static TCGConstraintSetIndex cset_subbio(TCGType type, unsigned flags)
+{
+ return type == TCG_TYPE_REG ? C_O1_I2(r, rZM, rZM) : C_NotImplemented;
+}
+
+static const TCGOutOpAddSubCarry outop_subbio = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_subbio,
+ .out_rrr = tgen_subbio_rrr,
+ .out_rri = tgen_subbio_rri,
+ .out_rir = tgen_subbio_rir,
+ .out_rii = tgen_subbio_rii,
+};
+
+#define outop_subbi outop_subbio
+
+static void tcg_out_set_borrow(TCGContext *s)
+{
+ /* borrow = !carry */
+ tcg_out32(s, ADDIC | TAI(TCG_REG_R0, TCG_REG_R0, 0));
+}
+
+static void tgen_xor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, XOR | SAB(a1, a0, a2));
+}
+
+static void tgen_xori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_xori32(s, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_xor = {
+ .base.static_constraint = C_O1_I2(r, r, rU),
+ .out_rrr = tgen_xor,
+ .out_rri = tgen_xori,
+};
+
+static void tgen_bswap16(TCGContext *s, TCGType type,
+ TCGReg dst, TCGReg src, unsigned flags)
+{
+ TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
+
+ if (have_isa_3_10) {
+ tcg_out32(s, BRH | RA(dst) | RS(src));
+ if (flags & TCG_BSWAP_OS) {
+ tcg_out_ext16s(s, TCG_TYPE_REG, dst, dst);
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
+ tcg_out_ext16u(s, dst, dst);
}
- if (const_args[5]) {
- tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
- } else {
- tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
+ return;
+ }
+
+ /*
+ * In the following,
+ * dep(a, b, m) -> (a & ~m) | (b & m)
+ *
+ * Begin with: src = xxxxabcd
+ */
+ /* tmp = rol32(src, 24) & 0x000000ff = 0000000c */
+ tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31);
+ /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */
+ tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
+
+ if (flags & TCG_BSWAP_OS) {
+ tcg_out_ext16s(s, TCG_TYPE_REG, dst, tmp);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
+ }
+}
+
+static const TCGOutOpBswap outop_bswap16 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap16,
+};
+
+static void tgen_bswap32(TCGContext *s, TCGType type,
+ TCGReg dst, TCGReg src, unsigned flags)
+{
+ TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
+
+ if (have_isa_3_10) {
+ tcg_out32(s, BRW | RA(dst) | RS(src));
+ if (flags & TCG_BSWAP_OS) {
+ tcg_out_ext32s(s, dst, dst);
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
+ tcg_out_ext32u(s, dst, dst);
}
- if (a0 != args[0]) {
- tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
+ return;
+ }
+
+ /*
+ * Stolen from gcc's builtin_bswap32.
+ * In the following,
+ * dep(a, b, m) -> (a & ~m) | (b & m)
+ *
+ * Begin with: src = xxxxabcd
+ */
+ /* tmp = rol32(src, 8) & 0xffffffff = 0000bcda */
+ tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31);
+ /* tmp = dep(tmp, rol32(src, 24), 0xff000000) = 0000dcda */
+ tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7);
+ /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */
+ tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
+
+ if (flags & TCG_BSWAP_OS) {
+ tcg_out_ext32s(s, dst, tmp);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
+ }
+}
+
+static const TCGOutOpBswap outop_bswap32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap32,
+};
+
+#if TCG_TARGET_REG_BITS == 64
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
+{
+ TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
+ TCGReg t1 = dst == src ? dst : TCG_REG_R0;
+
+ if (have_isa_3_10) {
+ tcg_out32(s, BRD | RA(dst) | RS(src));
+ return;
+ }
+
+ /*
+ * In the following,
+ * dep(a, b, m) -> (a & ~m) | (b & m)
+ *
+ * Begin with: src = abcdefgh
+ */
+ /* t0 = rol32(src, 8) & 0xffffffff = 0000fghe */
+ tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31);
+ /* t0 = dep(t0, rol32(src, 24), 0xff000000) = 0000hghe */
+ tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7);
+ /* t0 = dep(t0, rol32(src, 24), 0x0000ff00) = 0000hgfe */
+ tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23);
+
+ /* t0 = rol64(t0, 32) = hgfe0000 */
+ tcg_out_rld(s, RLDICL, t0, t0, 32, 0);
+ /* t1 = rol64(src, 32) = efghabcd */
+ tcg_out_rld(s, RLDICL, t1, src, 32, 0);
+
+ /* t0 = dep(t0, rol32(t1, 24), 0xffffffff) = hgfebcda */
+ tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31);
+ /* t0 = dep(t0, rol32(t1, 24), 0xff000000) = hgfedcda */
+ tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7);
+ /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00) = hgfedcba */
+ tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23);
+
+ tcg_out_mov(s, TCG_TYPE_REG, dst, t0);
+}
+
+static const TCGOutOpUnary outop_bswap64 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap64,
+};
+#endif /* TCG_TARGET_REG_BITS == 64 */
+
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tcg_out32(s, NEG | RT(a0) | RA(a1));
+}
+
+static const TCGOutOpUnary outop_neg = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_neg,
+};
+
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_nor(s, type, a0, a1, a1);
+}
+
+static const TCGOutOpUnary outop_not = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_not,
+};
+
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ TCGReg a2, unsigned ofs, unsigned len)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_rlw(s, RLWIMI, a0, a2, ofs, 32 - ofs - len, 31 - ofs);
+ } else {
+ tcg_out_rld(s, RLDIMI, a0, a2, ofs, 64 - ofs - len);
+ }
+}
+
+static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ tcg_target_long a2, unsigned ofs, unsigned len)
+{
+ tgen_andi(s, type, a0, a1, ~MAKE_64BIT_MASK(ofs, len));
+}
+
+static const TCGOutOpDeposit outop_deposit = {
+ .base.static_constraint = C_O1_I2(r, 0, rZ),
+ .out_rrr = tgen_deposit,
+ .out_rri = tgen_depositi,
+};
+
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ if (ofs == 0 && len <= 16) {
+ tgen_andi(s, TCG_TYPE_I32, a0, a1, (1 << len) - 1);
+ } else if (type == TCG_TYPE_I32) {
+ tcg_out_rlw(s, RLWINM, a0, a1, 32 - ofs, 32 - len, 31);
+ } else {
+ tcg_out_rld(s, RLDICL, a0, a1, 64 - ofs, 64 - len);
+ }
+}
+
+static const TCGOutOpExtract outop_extract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extract,
+};
+
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ if (ofs == 0) {
+ switch (len) {
+ case 8:
+ tcg_out_ext8s(s, type, a0, a1);
+ return;
+ case 16:
+ tcg_out_ext16s(s, type, a0, a1);
+ return;
+ case 32:
+ tcg_out_ext32s(s, a0, a1);
+ return;
}
- break;
+ } else if (ofs + len == 32) {
+ tcg_out_sari32(s, a0, a1, ofs);
+ return;
+ }
+ g_assert_not_reached();
+}
+
+static const TCGOutOpExtract outop_sextract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_sextract,
+};
+
+static const TCGOutOpExtract2 outop_extract2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_mem_long(s, LBZ, LBZX, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld8u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8u,
+};
+
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tgen_ld8u(s, type, dest, base, offset);
+ tcg_out_ext8s(s, type, dest, dest);
+}
+
+static const TCGOutOpLoad outop_ld8s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8s,
+};
+
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_mem_long(s, LHZ, LHZX, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld16u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16u,
+};
+
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_mem_long(s, LHA, LHAX, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld16s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16s,
+};
#if TCG_TARGET_REG_BITS == 64
- case INDEX_op_sub2_i64:
-#else
- case INDEX_op_sub2_i32:
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_mem_long(s, LWZ, LWZX, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld32u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32u,
+};
+
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_mem_long(s, LWA, LWAX, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld32s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32s,
+};
#endif
- a0 = args[0], a1 = args[1];
- if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
- a0 = TCG_REG_R0;
- }
- if (const_args[2]) {
- tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
- } else {
- tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
- }
- if (const_args[3]) {
- tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
- } else {
- tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
- }
- if (a0 != args[0]) {
- tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
- }
- break;
- case INDEX_op_muluh_i32:
- tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
- break;
- case INDEX_op_mulsh_i32:
- tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
- break;
- case INDEX_op_muluh_i64:
- tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
- break;
- case INDEX_op_mulsh_i64:
- tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
- break;
+static void tgen_st8(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_mem_long(s, STB, STBX, data, base, offset);
+}
- case INDEX_op_mb:
- tcg_out_mb(s, args[0]);
- break;
+static const TCGOutOpStore outop_st8 = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out_r = tgen_st8,
+};
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
- case INDEX_op_mov_i64:
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- default:
- g_assert_not_reached();
- }
+static void tgen_st16(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_mem_long(s, STH, STHX, data, base, offset);
}
+static const TCGOutOpStore outop_st16 = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out_r = tgen_st16,
+};
+
+static const TCGOutOpStore outop_st = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out_r = tcg_out_st,
+};
+
+
int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
{
switch (opc) {
@@ -3568,12 +3858,14 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
case INDEX_op_usadd_vec:
case INDEX_op_ussub_vec:
return vece <= MO_32;
- case INDEX_op_cmp_vec:
case INDEX_op_shli_vec:
case INDEX_op_shri_vec:
case INDEX_op_sari_vec:
case INDEX_op_rotli_vec:
return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
+ case INDEX_op_cmp_vec:
+ case INDEX_op_cmpsel_vec:
+ return vece <= MO_32 || have_isa_2_07 ? 1 : 0;
case INDEX_op_neg_vec:
return vece >= MO_32 && have_isa_3_00;
case INDEX_op_mul_vec:
@@ -3714,6 +4006,149 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
return true;
}
+static void tcg_out_not_vec(TCGContext *s, TCGReg a0, TCGReg a1)
+{
+ tcg_out32(s, VNOR | VRT(a0) | VRA(a1) | VRB(a1));
+}
+
+static void tcg_out_or_vec(TCGContext *s, TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, VOR | VRT(a0) | VRA(a1) | VRB(a2));
+}
+
+static void tcg_out_orc_vec(TCGContext *s, TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, VORC | VRT(a0) | VRA(a1) | VRB(a2));
+}
+
+static void tcg_out_and_vec(TCGContext *s, TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, VAND | VRT(a0) | VRA(a1) | VRB(a2));
+}
+
+static void tcg_out_andc_vec(TCGContext *s, TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, VANDC | VRT(a0) | VRA(a1) | VRB(a2));
+}
+
+static void tcg_out_bitsel_vec(TCGContext *s, TCGReg d,
+ TCGReg c, TCGReg t, TCGReg f)
+{
+ if (TCG_TARGET_HAS_bitsel_vec) {
+ tcg_out32(s, XXSEL | VRT(d) | VRC(c) | VRB(t) | VRA(f));
+ } else {
+ tcg_out_and_vec(s, TCG_VEC_TMP2, t, c);
+ tcg_out_andc_vec(s, d, f, c);
+ tcg_out_or_vec(s, d, d, TCG_VEC_TMP2);
+ }
+}
+
+static bool tcg_out_cmp_vec_noinv(TCGContext *s, unsigned vece, TCGReg a0,
+ TCGReg a1, TCGReg a2, TCGCond cond)
+{
+ static const uint32_t
+ eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
+ ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
+ gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
+ gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD };
+ uint32_t insn;
+
+ bool need_swap = false, need_inv = false;
+
+ tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_GT:
+ case TCG_COND_GTU:
+ break;
+ case TCG_COND_NE:
+ if (have_isa_3_00 && vece <= MO_32) {
+ break;
+ }
+ /* fall through */
+ case TCG_COND_LE:
+ case TCG_COND_LEU:
+ need_inv = true;
+ break;
+ case TCG_COND_LT:
+ case TCG_COND_LTU:
+ need_swap = true;
+ break;
+ case TCG_COND_GE:
+ case TCG_COND_GEU:
+ need_swap = need_inv = true;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (need_inv) {
+ cond = tcg_invert_cond(cond);
+ }
+ if (need_swap) {
+ TCGReg swap = a1;
+ a1 = a2;
+ a2 = swap;
+ cond = tcg_swap_cond(cond);
+ }
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ insn = eq_op[vece];
+ break;
+ case TCG_COND_NE:
+ insn = ne_op[vece];
+ break;
+ case TCG_COND_GT:
+ insn = gts_op[vece];
+ break;
+ case TCG_COND_GTU:
+ insn = gtu_op[vece];
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
+
+ return need_inv;
+}
+
+static void tcg_out_cmp_vec(TCGContext *s, unsigned vece, TCGReg a0,
+ TCGReg a1, TCGReg a2, TCGCond cond)
+{
+ if (tcg_out_cmp_vec_noinv(s, vece, a0, a1, a2, cond)) {
+ tcg_out_not_vec(s, a0, a0);
+ }
+}
+
+static void tcg_out_cmpsel_vec(TCGContext *s, unsigned vece, TCGReg a0,
+ TCGReg c1, TCGReg c2, TCGArg v3, int const_v3,
+ TCGReg v4, TCGCond cond)
+{
+ bool inv = tcg_out_cmp_vec_noinv(s, vece, TCG_VEC_TMP1, c1, c2, cond);
+
+ if (!const_v3) {
+ if (inv) {
+ tcg_out_bitsel_vec(s, a0, TCG_VEC_TMP1, v4, v3);
+ } else {
+ tcg_out_bitsel_vec(s, a0, TCG_VEC_TMP1, v3, v4);
+ }
+ } else if (v3) {
+ if (inv) {
+ tcg_out_orc_vec(s, a0, v4, TCG_VEC_TMP1);
+ } else {
+ tcg_out_or_vec(s, a0, v4, TCG_VEC_TMP1);
+ }
+ } else {
+ if (inv) {
+ tcg_out_and_vec(s, a0, v4, TCG_VEC_TMP1);
+ } else {
+ tcg_out_andc_vec(s, a0, v4, TCG_VEC_TMP1);
+ }
+ }
+}
+
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
unsigned vecl, unsigned vece,
const TCGArg args[TCG_MAX_OP_ARGS],
@@ -3724,10 +4159,6 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
mul_op[4] = { 0, 0, VMULUWM, VMULLD },
neg_op[4] = { 0, 0, VNEGW, VNEGD },
- eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
- ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
- gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
- gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD },
ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 },
usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 },
sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 },
@@ -3809,24 +4240,23 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
insn = sarv_op[vece];
break;
case INDEX_op_and_vec:
- insn = VAND;
- break;
+ tcg_out_and_vec(s, a0, a1, a2);
+ return;
case INDEX_op_or_vec:
- insn = VOR;
- break;
+ tcg_out_or_vec(s, a0, a1, a2);
+ return;
case INDEX_op_xor_vec:
insn = VXOR;
break;
case INDEX_op_andc_vec:
- insn = VANDC;
- break;
+ tcg_out_andc_vec(s, a0, a1, a2);
+ return;
case INDEX_op_not_vec:
- insn = VNOR;
- a2 = a1;
- break;
+ tcg_out_not_vec(s, a0, a1);
+ return;
case INDEX_op_orc_vec:
- insn = VORC;
- break;
+ tcg_out_orc_vec(s, a0, a1, a2);
+ return;
case INDEX_op_nand_vec:
insn = VNAND;
break;
@@ -3838,26 +4268,14 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_cmp_vec:
- switch (args[3]) {
- case TCG_COND_EQ:
- insn = eq_op[vece];
- break;
- case TCG_COND_NE:
- insn = ne_op[vece];
- break;
- case TCG_COND_GT:
- insn = gts_op[vece];
- break;
- case TCG_COND_GTU:
- insn = gtu_op[vece];
- break;
- default:
- g_assert_not_reached();
- }
- break;
-
+ tcg_out_cmp_vec(s, vece, a0, a1, a2, args[3]);
+ return;
+ case INDEX_op_cmpsel_vec:
+ tcg_out_cmpsel_vec(s, vece, a0, a1, a2,
+ args[3], const_args[3], args[4], args[5]);
+ return;
case INDEX_op_bitsel_vec:
- tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3]));
+ tcg_out_bitsel_vec(s, a0, a1, a2, args[3]);
return;
case INDEX_op_dup2_vec:
@@ -3922,56 +4340,6 @@ static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
tcgv_vec_arg(v1), tcgv_vec_arg(t1));
}
-static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
- TCGv_vec v1, TCGv_vec v2, TCGCond cond)
-{
- bool need_swap = false, need_inv = false;
-
- tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
-
- switch (cond) {
- case TCG_COND_EQ:
- case TCG_COND_GT:
- case TCG_COND_GTU:
- break;
- case TCG_COND_NE:
- if (have_isa_3_00 && vece <= MO_32) {
- break;
- }
- /* fall through */
- case TCG_COND_LE:
- case TCG_COND_LEU:
- need_inv = true;
- break;
- case TCG_COND_LT:
- case TCG_COND_LTU:
- need_swap = true;
- break;
- case TCG_COND_GE:
- case TCG_COND_GEU:
- need_swap = need_inv = true;
- break;
- default:
- g_assert_not_reached();
- }
-
- if (need_inv) {
- cond = tcg_invert_cond(cond);
- }
- if (need_swap) {
- TCGv_vec t1;
- t1 = v1, v1 = v2, v2 = t1;
- cond = tcg_swap_cond(cond);
- }
-
- vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
- tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
-
- if (need_inv) {
- tcg_gen_not_vec(vece, v0, v0);
- }
-}
-
static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
TCGv_vec v1, TCGv_vec v2)
{
@@ -4046,10 +4414,6 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
case INDEX_op_rotli_vec:
expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec);
break;
- case INDEX_op_cmp_vec:
- v2 = temp_tcgv_vec(arg_temp(a2));
- expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
- break;
case INDEX_op_mul_vec:
v2 = temp_tcgv_vec(arg_temp(a2));
expand_vec_mul(type, vece, v0, v1, v2);
@@ -4067,165 +4431,15 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
va_end(va);
}
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
+static TCGConstraintSetIndex
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld_i32:
- case INDEX_op_ctpop_i32:
- case INDEX_op_neg_i32:
- case INDEX_op_not_i32:
- case INDEX_op_ext8s_i32:
- case INDEX_op_ext16s_i32:
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap32_i32:
- case INDEX_op_extract_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld_i64:
- case INDEX_op_ctpop_i64:
- case INDEX_op_neg_i64:
- case INDEX_op_not_i64:
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_bswap16_i64:
- case INDEX_op_bswap32_i64:
- case INDEX_op_bswap64_i64:
- case INDEX_op_extract_i64:
- return C_O1_I1(r, r);
-
- case INDEX_op_st8_i32:
- case INDEX_op_st16_i32:
- case INDEX_op_st_i32:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i64:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i64:
+ case INDEX_op_qemu_st:
return C_O0_I2(r, r);
-
- case INDEX_op_add_i32:
- case INDEX_op_and_i32:
- case INDEX_op_or_i32:
- case INDEX_op_xor_i32:
- case INDEX_op_andc_i32:
- case INDEX_op_orc_i32:
- case INDEX_op_eqv_i32:
- case INDEX_op_shl_i32:
- case INDEX_op_shr_i32:
- case INDEX_op_sar_i32:
- case INDEX_op_rotl_i32:
- case INDEX_op_rotr_i32:
- case INDEX_op_and_i64:
- case INDEX_op_andc_i64:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i64:
- case INDEX_op_rotl_i64:
- case INDEX_op_rotr_i64:
- return C_O1_I2(r, r, ri);
-
- case INDEX_op_mul_i32:
- case INDEX_op_mul_i64:
- return C_O1_I2(r, r, rI);
-
- case INDEX_op_div_i32:
- case INDEX_op_divu_i32:
- case INDEX_op_rem_i32:
- case INDEX_op_remu_i32:
- case INDEX_op_nand_i32:
- case INDEX_op_nor_i32:
- case INDEX_op_muluh_i32:
- case INDEX_op_mulsh_i32:
- case INDEX_op_orc_i64:
- case INDEX_op_eqv_i64:
- case INDEX_op_nand_i64:
- case INDEX_op_nor_i64:
- case INDEX_op_div_i64:
- case INDEX_op_divu_i64:
- case INDEX_op_rem_i64:
- case INDEX_op_remu_i64:
- case INDEX_op_mulsh_i64:
- case INDEX_op_muluh_i64:
- return C_O1_I2(r, r, r);
-
- case INDEX_op_sub_i32:
- return C_O1_I2(r, rI, ri);
- case INDEX_op_add_i64:
- return C_O1_I2(r, r, rT);
- case INDEX_op_or_i64:
- case INDEX_op_xor_i64:
- return C_O1_I2(r, r, rU);
- case INDEX_op_sub_i64:
- return C_O1_I2(r, rI, rT);
- case INDEX_op_clz_i32:
- case INDEX_op_ctz_i32:
- case INDEX_op_clz_i64:
- case INDEX_op_ctz_i64:
- return C_O1_I2(r, r, rZW);
-
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
- return C_O0_I2(r, rC);
- case INDEX_op_setcond_i32:
- case INDEX_op_setcond_i64:
- case INDEX_op_negsetcond_i32:
- case INDEX_op_negsetcond_i64:
- return C_O1_I2(r, r, rC);
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, rC, rZ, rZ);
-
- case INDEX_op_deposit_i32:
- case INDEX_op_deposit_i64:
- return C_O1_I2(r, 0, rZ);
- case INDEX_op_brcond2_i32:
- return C_O0_I4(r, r, ri, ri);
- case INDEX_op_setcond2_i32:
- return C_O1_I4(r, r, r, ri, ri);
- case INDEX_op_add2_i64:
- case INDEX_op_add2_i32:
- return C_O2_I4(r, r, r, r, rI, rZM);
- case INDEX_op_sub2_i64:
- case INDEX_op_sub2_i32:
- return C_O2_I4(r, r, rI, rZM, r, r);
-
- case INDEX_op_qemu_ld_a32_i32:
- return C_O1_I1(r, r);
- case INDEX_op_qemu_ld_a64_i32:
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r);
- case INDEX_op_qemu_ld_a32_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
- case INDEX_op_qemu_ld_a64_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r);
-
- case INDEX_op_qemu_st_a32_i32:
- return C_O0_I2(r, r);
- case INDEX_op_qemu_st_a64_i32:
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
- case INDEX_op_qemu_st_a32_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
- case INDEX_op_qemu_st_a64_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r);
-
- case INDEX_op_qemu_ld_a32_i128:
- case INDEX_op_qemu_ld_a64_i128:
- return C_N1O1_I1(o, m, r);
- case INDEX_op_qemu_st_a32_i128:
- case INDEX_op_qemu_st_a64_i128:
- return C_O0_I3(o, m, r);
+ case INDEX_op_qemu_st2:
+ return TCG_TARGET_REG_BITS == 64
+ ? C_O0_I3(o, m, r) : C_O0_I3(r, r, r);
case INDEX_op_add_vec:
case INDEX_op_sub_vec:
@@ -4277,9 +4491,11 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_bitsel_vec:
case INDEX_op_ppc_msum_vec:
return C_O1_I3(v, v, v, v);
+ case INDEX_op_cmpsel_vec:
+ return C_O1_I4(v, v, v, vZM, v);
default:
- g_assert_not_reached();
+ return C_NotImplemented;
}
}
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
index e154fb1..5607634 100644
--- a/tcg/ppc/tcg-target.h
+++ b/tcg/ppc/tcg-target.h
@@ -25,8 +25,6 @@
#ifndef PPC_TCG_TARGET_H
#define PPC_TCG_TARGET_H
-#include "host/cpuinfo.h"
-
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
#define TCG_TARGET_NB_REGS 64
@@ -55,128 +53,4 @@ typedef enum {
TCG_AREG0 = TCG_REG_R27
} TCGReg;
-typedef enum {
- tcg_isa_base,
- tcg_isa_2_06,
- tcg_isa_2_07,
- tcg_isa_3_00,
- tcg_isa_3_10,
-} TCGPowerISA;
-
-#define have_isa_2_06 (cpuinfo & CPUINFO_V2_06)
-#define have_isa_2_07 (cpuinfo & CPUINFO_V2_07)
-#define have_isa_3_00 (cpuinfo & CPUINFO_V3_0)
-#define have_isa_3_10 (cpuinfo & CPUINFO_V3_1)
-#define have_altivec (cpuinfo & CPUINFO_ALTIVEC)
-#define have_vsx (cpuinfo & CPUINFO_VSX)
-
-/* optional instructions automatically implemented */
-#define TCG_TARGET_HAS_ext8u_i32 0 /* andi */
-#define TCG_TARGET_HAS_ext16u_i32 0
-
-/* optional instructions */
-#define TCG_TARGET_HAS_div_i32 1
-#define TCG_TARGET_HAS_rem_i32 have_isa_3_00
-#define TCG_TARGET_HAS_rot_i32 1
-#define TCG_TARGET_HAS_ext8s_i32 1
-#define TCG_TARGET_HAS_ext16s_i32 1
-#define TCG_TARGET_HAS_bswap16_i32 1
-#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_not_i32 1
-#define TCG_TARGET_HAS_andc_i32 1
-#define TCG_TARGET_HAS_orc_i32 1
-#define TCG_TARGET_HAS_eqv_i32 1
-#define TCG_TARGET_HAS_nand_i32 1
-#define TCG_TARGET_HAS_nor_i32 1
-#define TCG_TARGET_HAS_clz_i32 1
-#define TCG_TARGET_HAS_ctz_i32 have_isa_3_00
-#define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06
-#define TCG_TARGET_HAS_deposit_i32 1
-#define TCG_TARGET_HAS_extract_i32 1
-#define TCG_TARGET_HAS_sextract_i32 0
-#define TCG_TARGET_HAS_extract2_i32 0
-#define TCG_TARGET_HAS_negsetcond_i32 1
-#define TCG_TARGET_HAS_mulu2_i32 0
-#define TCG_TARGET_HAS_muls2_i32 0
-#define TCG_TARGET_HAS_muluh_i32 1
-#define TCG_TARGET_HAS_mulsh_i32 1
-#define TCG_TARGET_HAS_qemu_st8_i32 0
-
-#if TCG_TARGET_REG_BITS == 64
-#define TCG_TARGET_HAS_add2_i32 0
-#define TCG_TARGET_HAS_sub2_i32 0
-#define TCG_TARGET_HAS_extr_i64_i32 0
-#define TCG_TARGET_HAS_div_i64 1
-#define TCG_TARGET_HAS_rem_i64 have_isa_3_00
-#define TCG_TARGET_HAS_rot_i64 1
-#define TCG_TARGET_HAS_ext8s_i64 1
-#define TCG_TARGET_HAS_ext16s_i64 1
-#define TCG_TARGET_HAS_ext32s_i64 1
-#define TCG_TARGET_HAS_ext8u_i64 0
-#define TCG_TARGET_HAS_ext16u_i64 0
-#define TCG_TARGET_HAS_ext32u_i64 0
-#define TCG_TARGET_HAS_bswap16_i64 1
-#define TCG_TARGET_HAS_bswap32_i64 1
-#define TCG_TARGET_HAS_bswap64_i64 1
-#define TCG_TARGET_HAS_not_i64 1
-#define TCG_TARGET_HAS_andc_i64 1
-#define TCG_TARGET_HAS_orc_i64 1
-#define TCG_TARGET_HAS_eqv_i64 1
-#define TCG_TARGET_HAS_nand_i64 1
-#define TCG_TARGET_HAS_nor_i64 1
-#define TCG_TARGET_HAS_clz_i64 1
-#define TCG_TARGET_HAS_ctz_i64 have_isa_3_00
-#define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06
-#define TCG_TARGET_HAS_deposit_i64 1
-#define TCG_TARGET_HAS_extract_i64 1
-#define TCG_TARGET_HAS_sextract_i64 0
-#define TCG_TARGET_HAS_extract2_i64 0
-#define TCG_TARGET_HAS_negsetcond_i64 1
-#define TCG_TARGET_HAS_add2_i64 1
-#define TCG_TARGET_HAS_sub2_i64 1
-#define TCG_TARGET_HAS_mulu2_i64 0
-#define TCG_TARGET_HAS_muls2_i64 0
-#define TCG_TARGET_HAS_muluh_i64 1
-#define TCG_TARGET_HAS_mulsh_i64 1
-#endif
-
-#define TCG_TARGET_HAS_qemu_ldst_i128 \
- (TCG_TARGET_REG_BITS == 64 && have_isa_2_07)
-
-#define TCG_TARGET_HAS_tst 1
-
-/*
- * While technically Altivec could support V64, it has no 64-bit store
- * instruction and substituting two 32-bit stores makes the generated
- * code quite large.
- */
-#define TCG_TARGET_HAS_v64 have_vsx
-#define TCG_TARGET_HAS_v128 have_altivec
-#define TCG_TARGET_HAS_v256 0
-
-#define TCG_TARGET_HAS_andc_vec 1
-#define TCG_TARGET_HAS_orc_vec have_isa_2_07
-#define TCG_TARGET_HAS_nand_vec have_isa_2_07
-#define TCG_TARGET_HAS_nor_vec 1
-#define TCG_TARGET_HAS_eqv_vec have_isa_2_07
-#define TCG_TARGET_HAS_not_vec 1
-#define TCG_TARGET_HAS_neg_vec have_isa_3_00
-#define TCG_TARGET_HAS_abs_vec 0
-#define TCG_TARGET_HAS_roti_vec 0
-#define TCG_TARGET_HAS_rots_vec 0
-#define TCG_TARGET_HAS_rotv_vec 1
-#define TCG_TARGET_HAS_shi_vec 0
-#define TCG_TARGET_HAS_shs_vec 0
-#define TCG_TARGET_HAS_shv_vec 1
-#define TCG_TARGET_HAS_mul_vec 1
-#define TCG_TARGET_HAS_sat_vec 1
-#define TCG_TARGET_HAS_minmax_vec 1
-#define TCG_TARGET_HAS_bitsel_vec have_vsx
-#define TCG_TARGET_HAS_cmpsel_vec 0
-#define TCG_TARGET_HAS_tst_vec 0
-
-#define TCG_TARGET_DEFAULT_MO (0)
-#define TCG_TARGET_NEED_LDST_LABELS
-#define TCG_TARGET_NEED_POOL_LABELS
-
#endif
diff --git a/tcg/ppc/tcg-target.opc.h b/tcg/ppc/tcg-target.opc.h
deleted file mode 100644
index db51440..0000000
--- a/tcg/ppc/tcg-target.opc.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2019 Linaro Limited
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- * Target-specific opcodes for host vector expansion. These will be
- * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
- * consider these to be UNSPEC with names.
- */
-
-DEF(ppc_mrgh_vec, 1, 2, 0, IMPLVEC)
-DEF(ppc_mrgl_vec, 1, 2, 0, IMPLVEC)
-DEF(ppc_msum_vec, 1, 3, 0, IMPLVEC)
-DEF(ppc_muleu_vec, 1, 2, 0, IMPLVEC)
-DEF(ppc_mulou_vec, 1, 2, 0, IMPLVEC)
-DEF(ppc_pkum_vec, 1, 2, 0, IMPLVEC)
diff --git a/tcg/region.c b/tcg/region.c
index 478ec05..7ea0b37 100644
--- a/tcg/region.c
+++ b/tcg/region.c
@@ -422,7 +422,7 @@ void tcg_region_reset_all(void)
tcg_region_tree_reset_all();
}
-static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus)
+static size_t tcg_n_regions(size_t tb_size, unsigned max_threads)
{
#ifdef CONFIG_USER_ONLY
return 1;
@@ -431,24 +431,25 @@ static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus)
/*
* It is likely that some vCPUs will translate more code than others,
- * so we first try to set more regions than max_cpus, with those regions
+ * so we first try to set more regions than threads, with those regions
* being of reasonable size. If that's not possible we make do by evenly
* dividing the code_gen_buffer among the vCPUs.
+ *
+ * Use a single region if all we have is one vCPU thread.
*/
- /* Use a single region if all we have is one vCPU thread */
- if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
+ if (max_threads == 1) {
return 1;
}
/*
- * Try to have more regions than max_cpus, with each region being >= 2 MB.
+ * Try to have more regions than threads, with each region being >= 2 MB.
* If we can't, then just allocate one region per vCPU thread.
*/
n_regions = tb_size / (2 * MiB);
- if (n_regions <= max_cpus) {
- return max_cpus;
+ if (n_regions <= max_threads) {
+ return max_threads;
}
- return MIN(n_regions, max_cpus * 8);
+ return MIN(n_regions, max_threads * 8);
#endif
}
@@ -731,11 +732,7 @@ static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
* and then assigning regions to TCG threads so that the threads can translate
* code in parallel without synchronization.
*
- * In system-mode the number of TCG threads is bounded by max_cpus, so we use at
- * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
- * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
- * must have been parsed before calling this function, since it calls
- * qemu_tcg_mttcg_enabled().
+ * In system-mode the number of TCG threads is bounded by max_threads,
*
* In user-mode we use a single region. Having multiple regions in user-mode
* is not supported, because the number of vCPU threads (recall that each thread
@@ -749,7 +746,7 @@ static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
* in practice. Multi-threaded guests share most if not all of their translated
* code, which makes parallel code generation less appealing than in system-mode
*/
-void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
+void tcg_region_init(size_t tb_size, int splitwx, unsigned max_threads)
{
const size_t page_size = qemu_real_host_page_size();
size_t region_size;
@@ -787,7 +784,7 @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
* As a result of this we might end up with a few extra pages at the end of
* the buffer; we will assign those to the last region.
*/
- region.n = tcg_n_regions(tb_size, max_cpus);
+ region.n = tcg_n_regions(tb_size, max_threads);
region_size = tb_size / region.n;
region_size = QEMU_ALIGN_DOWN(region_size, page_size);
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
index aac5cee..0fc26d3 100644
--- a/tcg/riscv/tcg-target-con-set.h
+++ b/tcg/riscv/tcg-target-con-set.h
@@ -10,14 +10,20 @@
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I1(r)
-C_O0_I2(rZ, r)
-C_O0_I2(rZ, rZ)
+C_O0_I2(rz, r)
+C_O0_I2(r, rz)
C_O1_I1(r, r)
+C_O1_I2(r, r, r)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rI)
-C_O1_I2(r, r, rJ)
-C_O1_I2(r, rZ, rN)
-C_O1_I2(r, rZ, rZ)
C_N1_I2(r, r, rM)
C_O1_I4(r, r, rI, rM, rM)
-C_O2_I4(r, r, rZ, rZ, rM, rM)
+C_O0_I2(v, r)
+C_O1_I1(v, r)
+C_O1_I1(v, v)
+C_O1_I2(v, v, r)
+C_O1_I2(v, v, v)
+C_O1_I2(v, vK, v)
+C_O1_I2(v, v, vK)
+C_O1_I2(v, v, vL)
+C_O1_I4(v, v, vL, vK, vK)
diff --git a/tcg/riscv/tcg-target-con-str.h b/tcg/riscv/tcg-target-con-str.h
index d5c419d..c04e15d 100644
--- a/tcg/riscv/tcg-target-con-str.h
+++ b/tcg/riscv/tcg-target-con-str.h
@@ -9,13 +9,13 @@
* REGS(letter, register_mask)
*/
REGS('r', ALL_GENERAL_REGS)
+REGS('v', ALL_VECTOR_REGS)
/*
* Define constraint letters for constants:
* CONST(letter, TCG_CT_CONST_* bit set)
*/
CONST('I', TCG_CT_CONST_S12)
-CONST('J', TCG_CT_CONST_J12)
-CONST('N', TCG_CT_CONST_N12)
+CONST('K', TCG_CT_CONST_S5)
+CONST('L', TCG_CT_CONST_CMP_VI)
CONST('M', TCG_CT_CONST_M12)
-CONST('Z', TCG_CT_CONST_ZERO)
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
new file mode 100644
index 0000000..aef10c2
--- /dev/null
+++ b/tcg/riscv/tcg-target-has.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific opcode support
+ * Copyright (c) 2018 SiFive, Inc
+ */
+
+#ifndef TCG_TARGET_HAS_H
+#define TCG_TARGET_HAS_H
+
+#include "host/cpuinfo.h"
+
+/* optional instructions */
+#define TCG_TARGET_HAS_extr_i64_i32 1
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+#define TCG_TARGET_HAS_tst 0
+
+/* vector instructions */
+#define TCG_TARGET_HAS_v64 (cpuinfo & CPUINFO_ZVE64X)
+#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_ZVE64X)
+#define TCG_TARGET_HAS_v256 (cpuinfo & CPUINFO_ZVE64X)
+#define TCG_TARGET_HAS_andc_vec 0
+#define TCG_TARGET_HAS_orc_vec 0
+#define TCG_TARGET_HAS_nand_vec 0
+#define TCG_TARGET_HAS_nor_vec 0
+#define TCG_TARGET_HAS_eqv_vec 0
+#define TCG_TARGET_HAS_not_vec 1
+#define TCG_TARGET_HAS_neg_vec 1
+#define TCG_TARGET_HAS_abs_vec 0
+#define TCG_TARGET_HAS_roti_vec 1
+#define TCG_TARGET_HAS_rots_vec 1
+#define TCG_TARGET_HAS_rotv_vec 1
+#define TCG_TARGET_HAS_shi_vec 1
+#define TCG_TARGET_HAS_shs_vec 1
+#define TCG_TARGET_HAS_shv_vec 1
+#define TCG_TARGET_HAS_mul_vec 1
+#define TCG_TARGET_HAS_sat_vec 1
+#define TCG_TARGET_HAS_minmax_vec 1
+#define TCG_TARGET_HAS_bitsel_vec 0
+#define TCG_TARGET_HAS_cmpsel_vec 1
+
+#define TCG_TARGET_HAS_tst_vec 0
+
+static inline bool
+tcg_target_extract_valid(TCGType type, unsigned ofs, unsigned len)
+{
+ if (type == TCG_TYPE_I64 && ofs + len == 32) {
+ /* ofs > 0 uses SRLIW; ofs == 0 uses add.uw. */
+ return ofs || (cpuinfo & CPUINFO_ZBA);
+ }
+ switch (len) {
+ case 1:
+ return (cpuinfo & CPUINFO_ZBS) && ofs != 0;
+ case 16:
+ return (cpuinfo & CPUINFO_ZBB) && ofs == 0;
+ }
+ return false;
+}
+#define TCG_TARGET_extract_valid tcg_target_extract_valid
+
+static inline bool
+tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
+{
+ if (type == TCG_TYPE_I64 && ofs + len == 32) {
+ return true;
+ }
+ return (cpuinfo & CPUINFO_ZBB) && ofs == 0 && (len == 8 || len == 16);
+}
+#define TCG_TARGET_sextract_valid tcg_target_sextract_valid
+
+#define TCG_TARGET_deposit_valid(type, ofs, len) 0
+
+#endif
diff --git a/tcg/riscv/tcg-target-mo.h b/tcg/riscv/tcg-target-mo.h
new file mode 100644
index 0000000..691b5d0
--- /dev/null
+++ b/tcg/riscv/tcg-target-mo.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific memory model
+ * Copyright (c) 2018 SiFive, Inc
+ */
+
+#ifndef TCG_TARGET_MO_H
+#define TCG_TARGET_MO_H
+
+#define TCG_TARGET_DEFAULT_MO 0
+
+#endif
diff --git a/tcg/riscv/tcg-target-opc.h.inc b/tcg/riscv/tcg-target-opc.h.inc
new file mode 100644
index 0000000..b80b39e
--- /dev/null
+++ b/tcg/riscv/tcg-target-opc.h.inc
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) C-SKY Microsystems Co., Ltd.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version.
+ *
+ * See the COPYING file in the top-level directory for details.
+ *
+ * Target-specific opcodes for host vector expansion. These will be
+ * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
+ * consider these to be UNSPEC with names.
+ */
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
index d334857..31b9f7d 100644
--- a/tcg/riscv/tcg-target.c.inc
+++ b/tcg/riscv/tcg-target.c.inc
@@ -27,43 +27,25 @@
* THE SOFTWARE.
*/
-#include "../tcg-ldst.c.inc"
-#include "../tcg-pool.c.inc"
+/* Used for function call generation. */
+#define TCG_REG_CALL_STACK TCG_REG_SP
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
+#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
#ifdef CONFIG_DEBUG_TCG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
- "zero",
- "ra",
- "sp",
- "gp",
- "tp",
- "t0",
- "t1",
- "t2",
- "s0",
- "s1",
- "a0",
- "a1",
- "a2",
- "a3",
- "a4",
- "a5",
- "a6",
- "a7",
- "s2",
- "s3",
- "s4",
- "s5",
- "s6",
- "s7",
- "s8",
- "s9",
- "s10",
- "s11",
- "t3",
- "t4",
- "t5",
- "t6"
+ "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2",
+ "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5",
+ "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7",
+ "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6",
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
+ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
};
#endif
@@ -100,6 +82,16 @@ static const int tcg_target_reg_alloc_order[] = {
TCG_REG_A5,
TCG_REG_A6,
TCG_REG_A7,
+
+ /* Vector registers and TCG_REG_V0 reserved for mask. */
+ TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, TCG_REG_V4,
+ TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, TCG_REG_V8,
+ TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, TCG_REG_V12,
+ TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, TCG_REG_V16,
+ TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, TCG_REG_V20,
+ TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, TCG_REG_V24,
+ TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, TCG_REG_V28,
+ TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
};
static const int tcg_target_call_iarg_regs[] = {
@@ -120,62 +112,47 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
return TCG_REG_A0 + slot;
}
-#define TCG_CT_CONST_ZERO 0x100
-#define TCG_CT_CONST_S12 0x200
-#define TCG_CT_CONST_N12 0x400
-#define TCG_CT_CONST_M12 0x800
-#define TCG_CT_CONST_J12 0x1000
+#define TCG_CT_CONST_S12 0x100
+#define TCG_CT_CONST_M12 0x200
+#define TCG_CT_CONST_S5 0x400
+#define TCG_CT_CONST_CMP_VI 0x800
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
+#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
+#define ALL_DVECTOR_REG_GROUPS 0x5555555500000000
+#define ALL_QVECTOR_REG_GROUPS 0x1111111100000000
#define sextreg sextract64
-/* test if a constant matches the constraint */
-static bool tcg_target_const_match(int64_t val, int ct,
- TCGType type, TCGCond cond, int vece)
-{
- if (ct & TCG_CT_CONST) {
- return 1;
- }
- if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
- return 1;
- }
- /*
- * Sign extended from 12 bits: [-0x800, 0x7ff].
- * Used for most arithmetic, as this is the isa field.
- */
- if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) {
- return 1;
- }
- /*
- * Sign extended from 12 bits, negated: [-0x7ff, 0x800].
- * Used for subtraction, where a constant must be handled by ADDI.
- */
- if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) {
- return 1;
- }
- /*
- * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
- * Used by addsub2 and movcond, which may need the negative value,
- * and requires the modified constant to be representable.
- */
- if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) {
- return 1;
- }
- /*
- * Inverse of sign extended from 12 bits: ~[-0x800, 0x7ff].
- * Used to map ANDN back to ANDI, etc.
- */
- if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) {
- return 1;
- }
- return 0;
-}
-
/*
* RISC-V Base ISA opcodes (IM)
*/
+#define V_OPIVV (0x0 << 12)
+#define V_OPFVV (0x1 << 12)
+#define V_OPMVV (0x2 << 12)
+#define V_OPIVI (0x3 << 12)
+#define V_OPIVX (0x4 << 12)
+#define V_OPFVF (0x5 << 12)
+#define V_OPMVX (0x6 << 12)
+#define V_OPCFG (0x7 << 12)
+
+/* NF <= 7 && NF >= 0 */
+#define V_NF(x) (x << 29)
+#define V_UNIT_STRIDE (0x0 << 20)
+#define V_UNIT_STRIDE_WHOLE_REG (0x8 << 20)
+
+typedef enum {
+ VLMUL_M1 = 0, /* LMUL=1 */
+ VLMUL_M2, /* LMUL=2 */
+ VLMUL_M4, /* LMUL=4 */
+ VLMUL_M8, /* LMUL=8 */
+ VLMUL_RESERVED,
+ VLMUL_MF8, /* LMUL=1/8 */
+ VLMUL_MF4, /* LMUL=1/4 */
+ VLMUL_MF2, /* LMUL=1/2 */
+} RISCVVlmul;
+
typedef enum {
OPC_ADD = 0x33,
OPC_ADDI = 0x13,
@@ -183,6 +160,7 @@ typedef enum {
OPC_ANDI = 0x7013,
OPC_AUIPC = 0x17,
OPC_BEQ = 0x63,
+ OPC_BEXTI = 0x48005013,
OPC_BGE = 0x5063,
OPC_BGEU = 0x7063,
OPC_BLT = 0x4063,
@@ -271,8 +249,182 @@ typedef enum {
/* Zicond: integer conditional operations */
OPC_CZERO_EQZ = 0x0e005033,
OPC_CZERO_NEZ = 0x0e007033,
+
+ /* V: Vector extension 1.0 */
+ OPC_VSETVLI = 0x57 | V_OPCFG,
+ OPC_VSETIVLI = 0xc0000057 | V_OPCFG,
+ OPC_VSETVL = 0x80000057 | V_OPCFG,
+
+ OPC_VLE8_V = 0x7 | V_UNIT_STRIDE,
+ OPC_VLE16_V = 0x5007 | V_UNIT_STRIDE,
+ OPC_VLE32_V = 0x6007 | V_UNIT_STRIDE,
+ OPC_VLE64_V = 0x7007 | V_UNIT_STRIDE,
+ OPC_VSE8_V = 0x27 | V_UNIT_STRIDE,
+ OPC_VSE16_V = 0x5027 | V_UNIT_STRIDE,
+ OPC_VSE32_V = 0x6027 | V_UNIT_STRIDE,
+ OPC_VSE64_V = 0x7027 | V_UNIT_STRIDE,
+
+ OPC_VL1RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(0),
+ OPC_VL2RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1),
+ OPC_VL4RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3),
+ OPC_VL8RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7),
+
+ OPC_VS1R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(0),
+ OPC_VS2R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1),
+ OPC_VS4R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3),
+ OPC_VS8R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7),
+
+ OPC_VMERGE_VIM = 0x5c000057 | V_OPIVI,
+ OPC_VMERGE_VVM = 0x5c000057 | V_OPIVV,
+
+ OPC_VADD_VV = 0x57 | V_OPIVV,
+ OPC_VADD_VI = 0x57 | V_OPIVI,
+ OPC_VSUB_VV = 0x8000057 | V_OPIVV,
+ OPC_VRSUB_VI = 0xc000057 | V_OPIVI,
+ OPC_VAND_VV = 0x24000057 | V_OPIVV,
+ OPC_VAND_VI = 0x24000057 | V_OPIVI,
+ OPC_VOR_VV = 0x28000057 | V_OPIVV,
+ OPC_VOR_VI = 0x28000057 | V_OPIVI,
+ OPC_VXOR_VV = 0x2c000057 | V_OPIVV,
+ OPC_VXOR_VI = 0x2c000057 | V_OPIVI,
+
+ OPC_VMUL_VV = 0x94000057 | V_OPMVV,
+ OPC_VSADD_VV = 0x84000057 | V_OPIVV,
+ OPC_VSADD_VI = 0x84000057 | V_OPIVI,
+ OPC_VSSUB_VV = 0x8c000057 | V_OPIVV,
+ OPC_VSSUB_VI = 0x8c000057 | V_OPIVI,
+ OPC_VSADDU_VV = 0x80000057 | V_OPIVV,
+ OPC_VSADDU_VI = 0x80000057 | V_OPIVI,
+ OPC_VSSUBU_VV = 0x88000057 | V_OPIVV,
+ OPC_VSSUBU_VI = 0x88000057 | V_OPIVI,
+
+ OPC_VMAX_VV = 0x1c000057 | V_OPIVV,
+ OPC_VMAX_VI = 0x1c000057 | V_OPIVI,
+ OPC_VMAXU_VV = 0x18000057 | V_OPIVV,
+ OPC_VMAXU_VI = 0x18000057 | V_OPIVI,
+ OPC_VMIN_VV = 0x14000057 | V_OPIVV,
+ OPC_VMIN_VI = 0x14000057 | V_OPIVI,
+ OPC_VMINU_VV = 0x10000057 | V_OPIVV,
+ OPC_VMINU_VI = 0x10000057 | V_OPIVI,
+
+ OPC_VMSEQ_VV = 0x60000057 | V_OPIVV,
+ OPC_VMSEQ_VI = 0x60000057 | V_OPIVI,
+ OPC_VMSEQ_VX = 0x60000057 | V_OPIVX,
+ OPC_VMSNE_VV = 0x64000057 | V_OPIVV,
+ OPC_VMSNE_VI = 0x64000057 | V_OPIVI,
+ OPC_VMSNE_VX = 0x64000057 | V_OPIVX,
+
+ OPC_VMSLTU_VV = 0x68000057 | V_OPIVV,
+ OPC_VMSLTU_VX = 0x68000057 | V_OPIVX,
+ OPC_VMSLT_VV = 0x6c000057 | V_OPIVV,
+ OPC_VMSLT_VX = 0x6c000057 | V_OPIVX,
+ OPC_VMSLEU_VV = 0x70000057 | V_OPIVV,
+ OPC_VMSLEU_VX = 0x70000057 | V_OPIVX,
+ OPC_VMSLE_VV = 0x74000057 | V_OPIVV,
+ OPC_VMSLE_VX = 0x74000057 | V_OPIVX,
+
+ OPC_VMSLEU_VI = 0x70000057 | V_OPIVI,
+ OPC_VMSLE_VI = 0x74000057 | V_OPIVI,
+ OPC_VMSGTU_VI = 0x78000057 | V_OPIVI,
+ OPC_VMSGTU_VX = 0x78000057 | V_OPIVX,
+ OPC_VMSGT_VI = 0x7c000057 | V_OPIVI,
+ OPC_VMSGT_VX = 0x7c000057 | V_OPIVX,
+
+ OPC_VSLL_VV = 0x94000057 | V_OPIVV,
+ OPC_VSLL_VI = 0x94000057 | V_OPIVI,
+ OPC_VSLL_VX = 0x94000057 | V_OPIVX,
+ OPC_VSRL_VV = 0xa0000057 | V_OPIVV,
+ OPC_VSRL_VI = 0xa0000057 | V_OPIVI,
+ OPC_VSRL_VX = 0xa0000057 | V_OPIVX,
+ OPC_VSRA_VV = 0xa4000057 | V_OPIVV,
+ OPC_VSRA_VI = 0xa4000057 | V_OPIVI,
+ OPC_VSRA_VX = 0xa4000057 | V_OPIVX,
+
+ OPC_VMV_V_V = 0x5e000057 | V_OPIVV,
+ OPC_VMV_V_I = 0x5e000057 | V_OPIVI,
+ OPC_VMV_V_X = 0x5e000057 | V_OPIVX,
+
+ OPC_VMVNR_V = 0x9e000057 | V_OPIVI,
} RISCVInsn;
+static const struct {
+ RISCVInsn op;
+ bool swap;
+} tcg_cmpcond_to_rvv_vv[] = {
+ [TCG_COND_EQ] = { OPC_VMSEQ_VV, false },
+ [TCG_COND_NE] = { OPC_VMSNE_VV, false },
+ [TCG_COND_LT] = { OPC_VMSLT_VV, false },
+ [TCG_COND_GE] = { OPC_VMSLE_VV, true },
+ [TCG_COND_GT] = { OPC_VMSLT_VV, true },
+ [TCG_COND_LE] = { OPC_VMSLE_VV, false },
+ [TCG_COND_LTU] = { OPC_VMSLTU_VV, false },
+ [TCG_COND_GEU] = { OPC_VMSLEU_VV, true },
+ [TCG_COND_GTU] = { OPC_VMSLTU_VV, true },
+ [TCG_COND_LEU] = { OPC_VMSLEU_VV, false }
+};
+
+static const struct {
+ RISCVInsn op;
+ int min;
+ int max;
+ bool adjust;
+} tcg_cmpcond_to_rvv_vi[] = {
+ [TCG_COND_EQ] = { OPC_VMSEQ_VI, -16, 15, false },
+ [TCG_COND_NE] = { OPC_VMSNE_VI, -16, 15, false },
+ [TCG_COND_GT] = { OPC_VMSGT_VI, -16, 15, false },
+ [TCG_COND_LE] = { OPC_VMSLE_VI, -16, 15, false },
+ [TCG_COND_LT] = { OPC_VMSLE_VI, -15, 16, true },
+ [TCG_COND_GE] = { OPC_VMSGT_VI, -15, 16, true },
+ [TCG_COND_LEU] = { OPC_VMSLEU_VI, 0, 15, false },
+ [TCG_COND_GTU] = { OPC_VMSGTU_VI, 0, 15, false },
+ [TCG_COND_LTU] = { OPC_VMSLEU_VI, 1, 16, true },
+ [TCG_COND_GEU] = { OPC_VMSGTU_VI, 1, 16, true },
+};
+
+/* test if a constant matches the constraint */
+static bool tcg_target_const_match(int64_t val, int ct,
+ TCGType type, TCGCond cond, int vece)
+{
+ if (ct & TCG_CT_CONST) {
+ return 1;
+ }
+ if (type >= TCG_TYPE_V64) {
+ /* Val is replicated by VECE; extract the highest element. */
+ val >>= (-8 << vece) & 63;
+ }
+ /*
+ * Sign extended from 12 bits: [-0x800, 0x7ff].
+ * Used for most arithmetic, as this is the isa field.
+ */
+ if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) {
+ return 1;
+ }
+ /*
+ * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
+ * Used by movcond, which may need the negative value,
+ * and requires the modified constant to be representable.
+ */
+ if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) {
+ return 1;
+ }
+ /*
+ * Sign extended from 5 bits: [-0x10, 0x0f].
+ * Used for vector-immediate.
+ */
+ if ((ct & TCG_CT_CONST_S5) && val >= -0x10 && val <= 0x0f) {
+ return 1;
+ }
+ /*
+ * Used for vector compare OPIVI instructions.
+ */
+ if ((ct & TCG_CT_CONST_CMP_VI) &&
+ val >= tcg_cmpcond_to_rvv_vi[cond].min &&
+ val <= tcg_cmpcond_to_rvv_vi[cond].max) {
+ return true;
+ }
+ return 0;
+}
+
/*
* RISC-V immediate and instruction encoders (excludes 16-bit RVC)
*/
@@ -363,6 +515,45 @@ static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm)
return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm);
}
+
+/* Type-OPIVI */
+
+static int32_t encode_vi(RISCVInsn opc, TCGReg rd, int32_t imm,
+ TCGReg vs2, bool vm)
+{
+ return opc | (rd & 0x1f) << 7 | (imm & 0x1f) << 15 |
+ (vs2 & 0x1f) << 20 | (vm << 25);
+}
+
+/* Type-OPIVV/OPMVV/OPIVX/OPMVX, Vector load and store */
+
+static int32_t encode_v(RISCVInsn opc, TCGReg d, TCGReg s1,
+ TCGReg s2, bool vm)
+{
+ return opc | (d & 0x1f) << 7 | (s1 & 0x1f) << 15 |
+ (s2 & 0x1f) << 20 | (vm << 25);
+}
+
+/* Vector vtype */
+
+static uint32_t encode_vtype(bool vta, bool vma,
+ MemOp vsew, RISCVVlmul vlmul)
+{
+ return vma << 7 | vta << 6 | vsew << 3 | vlmul;
+}
+
+static int32_t encode_vset(RISCVInsn opc, TCGReg rd,
+ TCGArg rs1, uint32_t vtype)
+{
+ return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (vtype & 0x7ff) << 20;
+}
+
+static int32_t encode_vseti(RISCVInsn opc, TCGReg rd,
+ uint32_t uimm, uint32_t vtype)
+{
+ return opc | (rd & 0x1f) << 7 | (uimm & 0x1f) << 15 | (vtype & 0x3ff) << 20;
+}
+
/*
* RISC-V instruction emitters
*/
@@ -476,6 +667,91 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
}
/*
+ * RISC-V vector instruction emitters
+ */
+
+/*
+ * Vector registers uses the same 5 lower bits as GPR registers,
+ * and vm=0 (vm = false) means vector masking ENABLED.
+ * With RVV 1.0, vs2 is the first operand, while rs1/imm is the
+ * second operand.
+ */
+static void tcg_out_opc_vv(TCGContext *s, RISCVInsn opc,
+ TCGReg vd, TCGReg vs2, TCGReg vs1)
+{
+ tcg_out32(s, encode_v(opc, vd, vs1, vs2, true));
+}
+
+static void tcg_out_opc_vx(TCGContext *s, RISCVInsn opc,
+ TCGReg vd, TCGReg vs2, TCGReg rs1)
+{
+ tcg_out32(s, encode_v(opc, vd, rs1, vs2, true));
+}
+
+static void tcg_out_opc_vi(TCGContext *s, RISCVInsn opc,
+ TCGReg vd, TCGReg vs2, int32_t imm)
+{
+ tcg_out32(s, encode_vi(opc, vd, imm, vs2, true));
+}
+
+static void tcg_out_opc_vv_vi(TCGContext *s, RISCVInsn o_vv, RISCVInsn o_vi,
+ TCGReg vd, TCGReg vs2, TCGArg vi1, int c_vi1)
+{
+ if (c_vi1) {
+ tcg_out_opc_vi(s, o_vi, vd, vs2, vi1);
+ } else {
+ tcg_out_opc_vv(s, o_vv, vd, vs2, vi1);
+ }
+}
+
+static void tcg_out_opc_vim_mask(TCGContext *s, RISCVInsn opc, TCGReg vd,
+ TCGReg vs2, int32_t imm)
+{
+ tcg_out32(s, encode_vi(opc, vd, imm, vs2, false));
+}
+
+static void tcg_out_opc_vvm_mask(TCGContext *s, RISCVInsn opc, TCGReg vd,
+ TCGReg vs2, TCGReg vs1)
+{
+ tcg_out32(s, encode_v(opc, vd, vs1, vs2, false));
+}
+
+typedef struct VsetCache {
+ uint32_t movi_insn;
+ uint32_t vset_insn;
+} VsetCache;
+
+static VsetCache riscv_vset_cache[3][4];
+
+static void set_vtype(TCGContext *s, TCGType type, MemOp vsew)
+{
+ const VsetCache *p = &riscv_vset_cache[type - TCG_TYPE_V64][vsew];
+
+ s->riscv_cur_type = type;
+ s->riscv_cur_vsew = vsew;
+
+ if (p->movi_insn) {
+ tcg_out32(s, p->movi_insn);
+ }
+ tcg_out32(s, p->vset_insn);
+}
+
+static MemOp set_vtype_len(TCGContext *s, TCGType type)
+{
+ if (type != s->riscv_cur_type) {
+ set_vtype(s, type, MO_64);
+ }
+ return s->riscv_cur_vsew;
+}
+
+static void set_vtype_len_sew(TCGContext *s, TCGType type, MemOp vsew)
+{
+ if (type != s->riscv_cur_type || vsew != s->riscv_cur_vsew) {
+ set_vtype(s, type, vsew);
+ }
+}
+
+/*
* TCG intrinsics
*/
@@ -489,6 +765,15 @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
case TCG_TYPE_I64:
tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0);
break;
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ {
+ int lmul = type - riscv_lg2_vlenb;
+ int nf = 1 << MAX(lmul, 0);
+ tcg_out_opc_vi(s, OPC_VMVNR_V, ret, arg, nf - 1);
+ }
+ break;
default:
g_assert_not_reached();
}
@@ -681,18 +966,101 @@ static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
}
}
+static void tcg_out_vec_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
+ TCGReg addr, intptr_t offset)
+{
+ tcg_debug_assert(data >= TCG_REG_V0);
+ tcg_debug_assert(addr < TCG_REG_V0);
+
+ if (offset) {
+ tcg_debug_assert(addr != TCG_REG_ZERO);
+ if (offset == sextreg(offset, 0, 12)) {
+ tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP0, addr, offset);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, addr);
+ }
+ addr = TCG_REG_TMP0;
+ }
+ tcg_out32(s, encode_v(opc, data, addr, 0, true));
+}
+
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
TCGReg arg1, intptr_t arg2)
{
- RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD;
- tcg_out_ldst(s, insn, arg, arg1, arg2);
+ RISCVInsn insn;
+
+ switch (type) {
+ case TCG_TYPE_I32:
+ tcg_out_ldst(s, OPC_LW, arg, arg1, arg2);
+ break;
+ case TCG_TYPE_I64:
+ tcg_out_ldst(s, OPC_LD, arg, arg1, arg2);
+ break;
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ if (type >= riscv_lg2_vlenb) {
+ static const RISCVInsn whole_reg_ld[] = {
+ OPC_VL1RE64_V, OPC_VL2RE64_V, OPC_VL4RE64_V, OPC_VL8RE64_V
+ };
+ unsigned idx = type - riscv_lg2_vlenb;
+
+ tcg_debug_assert(idx < ARRAY_SIZE(whole_reg_ld));
+ insn = whole_reg_ld[idx];
+ } else {
+ static const RISCVInsn unit_stride_ld[] = {
+ OPC_VLE8_V, OPC_VLE16_V, OPC_VLE32_V, OPC_VLE64_V
+ };
+ MemOp prev_vsew = set_vtype_len(s, type);
+
+ tcg_debug_assert(prev_vsew < ARRAY_SIZE(unit_stride_ld));
+ insn = unit_stride_ld[prev_vsew];
+ }
+ tcg_out_vec_ldst(s, insn, arg, arg1, arg2);
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
TCGReg arg1, intptr_t arg2)
{
- RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD;
- tcg_out_ldst(s, insn, arg, arg1, arg2);
+ RISCVInsn insn;
+
+ switch (type) {
+ case TCG_TYPE_I32:
+ tcg_out_ldst(s, OPC_SW, arg, arg1, arg2);
+ break;
+ case TCG_TYPE_I64:
+ tcg_out_ldst(s, OPC_SD, arg, arg1, arg2);
+ break;
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ if (type >= riscv_lg2_vlenb) {
+ static const RISCVInsn whole_reg_st[] = {
+ OPC_VS1R_V, OPC_VS2R_V, OPC_VS4R_V, OPC_VS8R_V
+ };
+ unsigned idx = type - riscv_lg2_vlenb;
+
+ tcg_debug_assert(idx < ARRAY_SIZE(whole_reg_st));
+ insn = whole_reg_st[idx];
+ } else {
+ static const RISCVInsn unit_stride_st[] = {
+ OPC_VSE8_V, OPC_VSE16_V, OPC_VSE32_V, OPC_VSE64_V
+ };
+ MemOp prev_vsew = set_vtype_len(s, type);
+
+ tcg_debug_assert(prev_vsew < ARRAY_SIZE(unit_stride_st));
+ insn = unit_stride_st[prev_vsew];
+ }
+ tcg_out_vec_ldst(s, insn, arg, arg1, arg2);
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
@@ -705,65 +1073,44 @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
return false;
}
-static void tcg_out_addsub2(TCGContext *s,
- TCGReg rl, TCGReg rh,
- TCGReg al, TCGReg ah,
- TCGArg bl, TCGArg bh,
- bool cbl, bool cbh, bool is_sub, bool is32bit)
+static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg dst, TCGReg src)
{
- const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD;
- const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI;
- const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB;
- TCGReg th = TCG_REG_TMP1;
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vx(s, OPC_VMV_V_X, dst, 0, src);
+ return true;
+}
- /* If we have a negative constant such that negating it would
- make the high part zero, we can (usually) eliminate one insn. */
- if (cbl && cbh && bh == -1 && bl != 0) {
- bl = -bl;
- bh = 0;
- is_sub = !is_sub;
- }
+static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg dst, TCGReg base, intptr_t offset)
+{
+ tcg_out_ld(s, TCG_TYPE_REG, TCG_REG_TMP0, base, offset);
+ return tcg_out_dup_vec(s, type, vece, dst, TCG_REG_TMP0);
+}
- /* By operating on the high part first, we get to use the final
- carry operation to move back from the temporary. */
- if (!cbh) {
- tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh);
- } else if (bh != 0 || ah == rl) {
- tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh));
- } else {
- th = ah;
- }
+static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg dst, int64_t arg)
+{
+ /* Arg is replicated by VECE; extract the highest element. */
+ arg >>= (-8 << vece) & 63;
- /* Note that tcg optimization should eliminate the bl == 0 case. */
- if (is_sub) {
- if (cbl) {
- tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl);
- tcg_out_opc_imm(s, opc_addi, rl, al, -bl);
- } else {
- tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl);
- tcg_out_opc_reg(s, opc_sub, rl, al, bl);
- }
- tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0);
- } else {
- if (cbl) {
- tcg_out_opc_imm(s, opc_addi, rl, al, bl);
- tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl);
- } else if (al == bl) {
- /*
- * If the input regs overlap, this is a simple doubling
- * and carry-out is the input msb. This special case is
- * required when the output reg overlaps the input,
- * but we might as well use it always.
- */
- tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0);
- tcg_out_opc_reg(s, opc_add, rl, al, al);
+ if (arg >= -16 && arg < 16) {
+ if (arg == 0 || arg == -1) {
+ set_vtype_len(s, type);
} else {
- tcg_out_opc_reg(s, opc_add, rl, al, bl);
- tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0,
- rl, (rl == bl ? al : bl));
+ set_vtype_len_sew(s, type, vece);
}
- tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0);
+ tcg_out_opc_vi(s, OPC_VMV_V_I, dst, 0, arg);
+ return;
}
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, arg);
+ tcg_out_dup_vec(s, type, vece, dst, TCG_REG_TMP0);
+}
+
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
+{
+ tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, l, 0);
+ tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
}
static const struct {
@@ -782,8 +1129,8 @@ static const struct {
[TCG_COND_GTU] = { OPC_BLTU, true }
};
-static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
- TCGReg arg2, TCGLabel *l)
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg arg1, TCGReg arg2, TCGLabel *l)
{
RISCVInsn op = tcg_brcond_to_riscv[cond].op;
@@ -799,6 +1146,11 @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
tcg_out_opc_branch(s, op, arg1, arg2, 0);
}
+static const TCGOutOpBrcond outop_brcond = {
+ .base.static_constraint = C_O0_I2(r, rz),
+ .out_rr = tgen_brcond,
+};
+
#define SETCOND_INV TCG_TARGET_NB_REGS
#define SETCOND_NEZ (SETCOND_INV << 1)
#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
@@ -923,6 +1275,24 @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
}
}
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ tcg_out_setcond(s, cond, dest, arg1, arg2, false);
+}
+
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
+{
+ tcg_out_setcond(s, cond, dest, arg1, arg2, true);
+}
+
+static const TCGOutOpSetcond outop_setcond = {
+ .base.static_constraint = C_O1_I2(r, r, rI),
+ .out_rrr = tgen_setcond,
+ .out_rri = tgen_setcondi,
+};
+
static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret,
TCGReg arg1, tcg_target_long arg2, bool c2)
{
@@ -961,6 +1331,24 @@ static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret,
}
}
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ tcg_out_negsetcond(s, cond, dest, arg1, arg2, false);
+}
+
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
+{
+ tcg_out_negsetcond(s, cond, dest, arg1, arg2, true);
+}
+
+static const TCGOutOpSetcond outop_negsetcond = {
+ .base.static_constraint = C_O1_I2(r, r, rI),
+ .out_rrr = tgen_negsetcond,
+ .out_rri = tgen_negsetcondi,
+};
+
static void tcg_out_movcond_zicond(TCGContext *s, TCGReg ret, TCGReg test_ne,
int val1, bool c_val1,
int val2, bool c_val2)
@@ -1058,10 +1446,10 @@ static void tcg_out_movcond_br2(TCGContext *s, TCGCond cond, TCGReg ret,
tcg_out_mov(s, TCG_TYPE_REG, ret, tmp);
}
-static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg cmp1, int cmp2, bool c_cmp2,
- TCGReg val1, bool c_val1,
- TCGReg val2, bool c_val2)
+static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg cmp1, TCGArg cmp2, bool c_cmp2,
+ TCGArg val1, bool c_val1,
+ TCGArg val2, bool c_val2)
{
int tmpflags;
TCGReg t;
@@ -1088,6 +1476,11 @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
}
}
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rI, rM, rM),
+ .out = tcg_out_movcond,
+};
+
static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn,
TCGReg ret, TCGReg src1, int src2, bool c_src2)
{
@@ -1099,17 +1492,77 @@ static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn,
* Note that constraints put 'ret' in a new register, so the
* computation above did not clobber either 'src1' or 'src2'.
*/
- tcg_out_movcond(s, TCG_COND_EQ, ret, src1, 0, true,
+ tcg_out_movcond(s, type, TCG_COND_EQ, ret, src1, 0, true,
src2, c_src2, ret, false);
}
}
+static void tcg_out_cmpsel(TCGContext *s, TCGType type, unsigned vece,
+ TCGCond cond, TCGReg ret,
+ TCGReg cmp1, TCGReg cmp2, bool c_cmp2,
+ TCGReg val1, bool c_val1,
+ TCGReg val2, bool c_val2)
+{
+ set_vtype_len_sew(s, type, vece);
+
+ /* Use only vmerge_vim if possible, by inverting the test. */
+ if (c_val2 && !c_val1) {
+ TCGArg temp = val1;
+ cond = tcg_invert_cond(cond);
+ val1 = val2;
+ val2 = temp;
+ c_val1 = true;
+ c_val2 = false;
+ }
+
+ /* Perform the comparison into V0 mask. */
+ if (c_cmp2) {
+ tcg_out_opc_vi(s, tcg_cmpcond_to_rvv_vi[cond].op, TCG_REG_V0, cmp1,
+ cmp2 - tcg_cmpcond_to_rvv_vi[cond].adjust);
+ } else if (tcg_cmpcond_to_rvv_vv[cond].swap) {
+ tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op,
+ TCG_REG_V0, cmp2, cmp1);
+ } else {
+ tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op,
+ TCG_REG_V0, cmp1, cmp2);
+ }
+ if (c_val1) {
+ if (c_val2) {
+ tcg_out_opc_vi(s, OPC_VMV_V_I, ret, 0, val2);
+ val2 = ret;
+ }
+ /* vd[i] == v0.mask[i] ? imm : vs2[i] */
+ tcg_out_opc_vim_mask(s, OPC_VMERGE_VIM, ret, val2, val1);
+ } else {
+ /* vd[i] == v0.mask[i] ? vs1[i] : vs2[i] */
+ tcg_out_opc_vvm_mask(s, OPC_VMERGE_VVM, ret, val2, val1);
+ }
+}
+
+static void tcg_out_vshifti(TCGContext *s, RISCVInsn opc_vi, RISCVInsn opc_vx,
+ TCGReg dst, TCGReg src, unsigned imm)
+{
+ if (imm < 32) {
+ tcg_out_opc_vi(s, opc_vi, dst, src, imm);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP0, imm);
+ tcg_out_opc_vx(s, opc_vx, dst, src, TCG_REG_TMP0);
+ }
+}
+
+static void init_setting_vtype(TCGContext *s)
+{
+ s->riscv_cur_type = TCG_TYPE_COUNT;
+}
+
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
{
TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
ptrdiff_t offset = tcg_pcrel_diff(s, arg);
int ret;
+ init_setting_vtype(s);
+
tcg_debug_assert((offset & 1) == 0);
if (offset == sextreg(offset, 0, 20)) {
/* short jump: -2097150 to 2097152 */
@@ -1135,7 +1588,7 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
tcg_out_call_int(s, arg, false);
}
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
tcg_insn_unit insn = OPC_FENCE;
@@ -1149,7 +1602,7 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
insn |= 0x02100000;
}
if (a0 & TCG_MO_ST_ST) {
- insn |= 0x02200000;
+ insn |= 0x01100000;
}
tcg_out32(s, insn);
}
@@ -1245,13 +1698,15 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
+ ldst->addr_reg = addr_reg;
+
+ init_setting_vtype(s);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
- s->page_bits - CPU_TLB_ENTRY_BITS);
+ TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
@@ -1267,7 +1722,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI,
addr_adj, addr_reg, s_mask - a_mask);
}
- compare_mask = s->page_mask | a_mask;
+ compare_mask = TARGET_PAGE_MASK | a_mask;
if (compare_mask == sextreg(compare_mask, 0, 12)) {
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
} else {
@@ -1306,7 +1761,9 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
+ ldst->addr_reg = addr_reg;
+
+ init_setting_vtype(s);
/* We are expecting alignment max 7, so we can always use andi. */
tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12));
@@ -1376,22 +1833,31 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
}
}
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
- MemOpIdx oi, TCGType data_type)
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data_reg,
+ TCGReg addr_reg, MemOpIdx oi)
{
TCGLabelQemuLdst *ldst;
TCGReg base;
ldst = prepare_host_addr(s, &base, addr_reg, oi, true);
- tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), data_type);
+ tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), type);
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
ldst->datalo_reg = data_reg;
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
}
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_qemu_ld,
+};
+
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
TCGReg base, MemOp opc)
{
@@ -1416,8 +1882,8 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
}
}
-static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
- MemOpIdx oi, TCGType data_type)
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data_reg,
+ TCGReg addr_reg, MemOpIdx oi)
{
TCGLabelQemuLdst *ldst;
TCGReg base;
@@ -1426,12 +1892,21 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
tcg_out_qemu_st_direct(s, data_reg, base, get_memop(oi));
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
ldst->datalo_reg = data_reg;
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
}
+static const TCGOutOpQemuLdSt outop_qemu_st = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out = tgen_qemu_st,
+};
+
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
static const tcg_insn_unit *tb_ret_addr;
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
@@ -1458,6 +1933,11 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
set_jmp_reset_offset(s, which);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
@@ -1475,553 +1955,960 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
flush_idcache_range(jmp_rx, jmp_rw, 4);
}
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg args[TCG_MAX_OP_ARGS],
- const int const_args[TCG_MAX_OP_ARGS])
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
{
- TCGArg a0 = args[0];
- TCGArg a1 = args[1];
- TCGArg a2 = args[2];
- int c2 = const_args[2];
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ADDW : OPC_ADD;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
- switch (opc) {
- case INDEX_op_goto_ptr:
- tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
- break;
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI;
+ tcg_out_opc_imm(s, insn, a0, a1, a2);
+}
- case INDEX_op_br:
- tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0);
- tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
- break;
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rI),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8u_i64:
- tcg_out_ldst(s, OPC_LBU, a0, a1, a2);
- break;
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld8s_i64:
- tcg_out_ldst(s, OPC_LB, a0, a1, a2);
- break;
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16u_i64:
- tcg_out_ldst(s, OPC_LHU, a0, a1, a2);
- break;
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld16s_i64:
- tcg_out_ldst(s, OPC_LH, a0, a1, a2);
- break;
- case INDEX_op_ld32u_i64:
- tcg_out_ldst(s, OPC_LWU, a0, a1, a2);
- break;
- case INDEX_op_ld_i32:
- case INDEX_op_ld32s_i64:
- tcg_out_ldst(s, OPC_LW, a0, a1, a2);
- break;
- case INDEX_op_ld_i64:
- tcg_out_ldst(s, OPC_LD, a0, a1, a2);
- break;
+static const TCGOutOpBinary outop_addco = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_st8_i32:
- case INDEX_op_st8_i64:
- tcg_out_ldst(s, OPC_SB, a0, a1, a2);
- break;
- case INDEX_op_st16_i32:
- case INDEX_op_st16_i64:
- tcg_out_ldst(s, OPC_SH, a0, a1, a2);
- break;
- case INDEX_op_st_i32:
- case INDEX_op_st32_i64:
- tcg_out_ldst(s, OPC_SW, a0, a1, a2);
- break;
- case INDEX_op_st_i64:
- tcg_out_ldst(s, OPC_SD, a0, a1, a2);
- break;
+static const TCGOutOpAddSubCarry outop_addci = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_add_i32:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2);
- } else {
- tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2);
- }
- break;
- case INDEX_op_add_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2);
- } else {
- tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2);
- }
- break;
+static const TCGOutOpBinary outop_addcio = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_sub_i32:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2);
- } else {
- tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2);
- }
- break;
- case INDEX_op_sub_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2);
- } else {
- tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2);
- }
- break;
+static void tcg_out_set_carry(TCGContext *s)
+{
+ g_assert_not_reached();
+}
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
- } else {
- tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
- }
- break;
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
+}
- case INDEX_op_or_i32:
- case INDEX_op_or_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2);
- } else {
- tcg_out_opc_reg(s, OPC_OR, a0, a1, a2);
- }
- break;
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
+}
- case INDEX_op_xor_i32:
- case INDEX_op_xor_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2);
- } else {
- tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2);
- }
- break;
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rI),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
- case INDEX_op_andc_i32:
- case INDEX_op_andc_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ANDI, a0, a1, ~a2);
- } else {
- tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2);
- }
- break;
- case INDEX_op_orc_i32:
- case INDEX_op_orc_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ORI, a0, a1, ~a2);
- } else {
- tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2);
- }
- break;
- case INDEX_op_eqv_i32:
- case INDEX_op_eqv_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_XORI, a0, a1, ~a2);
- } else {
- tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2);
- }
- break;
+static void tgen_andc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2);
+}
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
- tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
- break;
+static TCGConstraintSetIndex cset_zbb_rrr(TCGType type, unsigned flags)
+{
+ return cpuinfo & CPUINFO_ZBB ? C_O1_I2(r, r, r) : C_NotImplemented;
+}
- case INDEX_op_neg_i32:
- tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1);
- break;
- case INDEX_op_neg_i64:
- tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1);
- break;
+static const TCGOutOpBinary outop_andc = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_zbb_rrr,
+ .out_rrr = tgen_andc,
+};
- case INDEX_op_mul_i32:
- tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2);
- break;
- case INDEX_op_mul_i64:
- tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
- break;
+static void tgen_clz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CLZW : OPC_CLZ;
+ tcg_out_cltz(s, type, insn, a0, a1, a2, false);
+}
- case INDEX_op_div_i32:
- tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2);
- break;
- case INDEX_op_div_i64:
- tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2);
- break;
+static void tgen_clzi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CLZW : OPC_CLZ;
+ tcg_out_cltz(s, type, insn, a0, a1, a2, true);
+}
- case INDEX_op_divu_i32:
- tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2);
- break;
- case INDEX_op_divu_i64:
- tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2);
- break;
+static TCGConstraintSetIndex cset_clzctz(TCGType type, unsigned flags)
+{
+ return cpuinfo & CPUINFO_ZBB ? C_N1_I2(r, r, rM) : C_NotImplemented;
+}
- case INDEX_op_rem_i32:
- tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2);
- break;
- case INDEX_op_rem_i64:
- tcg_out_opc_reg(s, OPC_REM, a0, a1, a2);
- break;
+static const TCGOutOpBinary outop_clz = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_clzctz,
+ .out_rrr = tgen_clz,
+ .out_rri = tgen_clzi,
+};
- case INDEX_op_remu_i32:
- tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2);
- break;
- case INDEX_op_remu_i64:
- tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2);
- break;
+static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CPOPW : OPC_CPOP;
+ tcg_out_opc_imm(s, insn, a0, a1, 0);
+}
- case INDEX_op_shl_i32:
- if (c2) {
- tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f);
- } else {
- tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2);
- }
- break;
- case INDEX_op_shl_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f);
- } else {
- tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2);
- }
- break;
+static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
+{
+ return cpuinfo & CPUINFO_ZBB ? C_O1_I1(r, r) : C_NotImplemented;
+}
- case INDEX_op_shr_i32:
- if (c2) {
- tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f);
- } else {
- tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2);
- }
- break;
- case INDEX_op_shr_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f);
- } else {
- tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2);
- }
- break;
+static const TCGOutOpUnary outop_ctpop = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_ctpop,
+ .out_rr = tgen_ctpop,
+};
- case INDEX_op_sar_i32:
- if (c2) {
- tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f);
- } else {
- tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2);
- }
- break;
- case INDEX_op_sar_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f);
- } else {
- tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2);
- }
- break;
+static void tgen_ctz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CTZW : OPC_CTZ;
+ tcg_out_cltz(s, type, insn, a0, a1, a2, false);
+}
- case INDEX_op_rotl_i32:
- if (c2) {
- tcg_out_opc_imm(s, OPC_RORIW, a0, a1, -a2 & 0x1f);
- } else {
- tcg_out_opc_reg(s, OPC_ROLW, a0, a1, a2);
- }
- break;
- case INDEX_op_rotl_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_RORI, a0, a1, -a2 & 0x3f);
- } else {
- tcg_out_opc_reg(s, OPC_ROL, a0, a1, a2);
- }
- break;
+static void tgen_ctzi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CTZW : OPC_CTZ;
+ tcg_out_cltz(s, type, insn, a0, a1, a2, true);
+}
- case INDEX_op_rotr_i32:
- if (c2) {
- tcg_out_opc_imm(s, OPC_RORIW, a0, a1, a2 & 0x1f);
- } else {
- tcg_out_opc_reg(s, OPC_RORW, a0, a1, a2);
- }
- break;
- case INDEX_op_rotr_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_RORI, a0, a1, a2 & 0x3f);
- } else {
- tcg_out_opc_reg(s, OPC_ROR, a0, a1, a2);
- }
- break;
+static const TCGOutOpBinary outop_ctz = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_clzctz,
+ .out_rrr = tgen_ctz,
+ .out_rri = tgen_ctzi,
+};
- case INDEX_op_bswap64_i64:
- tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
- break;
- case INDEX_op_bswap32_i32:
- a2 = 0;
- /* fall through */
- case INDEX_op_bswap32_i64:
- tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
- if (a2 & TCG_BSWAP_OZ) {
- tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32);
- } else {
- tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32);
+static void tgen_divs(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_DIVW : OPC_DIV;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_divs = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_divs,
+};
+
+static const TCGOutOpDivRem outop_divs2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_divu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_DIVUW : OPC_DIVU;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_divu = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_divu,
+};
+
+static const TCGOutOpDivRem outop_divu2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_eqv(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_eqv = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_zbb_rrr,
+ .out_rrr = tgen_eqv,
+};
+
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
+{
+ tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32);
+}
+
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extrh_i64_i32,
+};
+
+static void tgen_mul(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_MULW : OPC_MUL;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_mul = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_mul,
+};
+
+static const TCGOutOpMul2 outop_muls2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static TCGConstraintSetIndex cset_mulh(TCGType type, unsigned flags)
+{
+ return type == TCG_TYPE_I32 ? C_NotImplemented : C_O1_I2(r, r, r);
+}
+
+static void tgen_mulsh(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_mulsh = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mulh,
+ .out_rrr = tgen_mulsh,
+};
+
+static const TCGOutOpMul2 outop_mulu2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_muluh(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_muluh = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mulh,
+ .out_rrr = tgen_muluh,
+};
+
+static const TCGOutOpBinary outop_nand = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_nor = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_or(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_reg(s, OPC_OR, a0, a1, a2);
+}
+
+static void tgen_ori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_or = {
+ .base.static_constraint = C_O1_I2(r, r, rI),
+ .out_rrr = tgen_or,
+ .out_rri = tgen_ori,
+};
+
+static void tgen_orc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_orc = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_zbb_rrr,
+ .out_rrr = tgen_orc,
+};
+
+static void tgen_rems(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_REMW : OPC_REM;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_rems = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_rems,
+};
+
+static void tgen_remu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_REMUW : OPC_REMU;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_remu = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_remu,
+};
+
+static TCGConstraintSetIndex cset_rot(TCGType type, unsigned flags)
+{
+ return cpuinfo & CPUINFO_ZBB ? C_O1_I2(r, r, ri) : C_NotImplemented;
+}
+
+static void tgen_rotr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_RORW : OPC_ROR;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static void tgen_rotri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_RORIW : OPC_RORI;
+ unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_opc_imm(s, insn, a0, a1, a2 & mask);
+}
+
+static const TCGOutOpBinary outop_rotr = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_rot,
+ .out_rrr = tgen_rotr,
+ .out_rri = tgen_rotri,
+};
+
+static void tgen_rotl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ROLW : OPC_ROL;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static void tgen_rotli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_rotri(s, type, a0, a1, -a2);
+}
+
+static const TCGOutOpBinary outop_rotl = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_rot,
+ .out_rrr = tgen_rotl,
+ .out_rri = tgen_rotli,
+};
+
+static void tgen_sar(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRAW : OPC_SRA;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static void tgen_sari(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRAIW : OPC_SRAI;
+ unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_opc_imm(s, insn, a0, a1, a2 & mask);
+}
+
+static const TCGOutOpBinary outop_sar = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_sar,
+ .out_rri = tgen_sari,
+};
+
+static void tgen_shl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SLLW : OPC_SLL;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static void tgen_shli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SLLIW : OPC_SLLI;
+ unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_opc_imm(s, insn, a0, a1, a2 & mask);
+}
+
+static const TCGOutOpBinary outop_shl = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_shl,
+ .out_rri = tgen_shli,
+};
+
+static void tgen_shr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRLW : OPC_SRL;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static void tgen_shri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRLIW : OPC_SRLI;
+ unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_opc_imm(s, insn, a0, a1, a2 & mask);
+}
+
+static const TCGOutOpBinary outop_shr = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_shr,
+ .out_rri = tgen_shri,
+};
+
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SUBW : OPC_SUB;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
+static const TCGOutOpAddSubCarry outop_subbo = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpAddSubCarry outop_subbi = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpAddSubCarry outop_subbio = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tcg_out_set_borrow(TCGContext *s)
+{
+ g_assert_not_reached();
+}
+
+static void tgen_xor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2);
+}
+
+static void tgen_xori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_xor = {
+ .base.static_constraint = C_O1_I2(r, r, rI),
+ .out_rrr = tgen_xor,
+ .out_rri = tgen_xori,
+};
+
+static TCGConstraintSetIndex cset_bswap(TCGType type, unsigned flags)
+{
+ return cpuinfo & CPUINFO_ZBB ? C_O1_I1(r, r) : C_NotImplemented;
+}
+
+static void tgen_bswap16(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, unsigned flags)
+{
+ tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
+ if (flags & TCG_BSWAP_OZ) {
+ tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48);
+ } else {
+ tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48);
+ }
+}
+
+static const TCGOutOpBswap outop_bswap16 = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_bswap,
+ .out_rr = tgen_bswap16,
+};
+
+static void tgen_bswap32(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, unsigned flags)
+{
+ tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
+ if (flags & TCG_BSWAP_OZ) {
+ tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32);
+ } else {
+ tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32);
+ }
+}
+
+static const TCGOutOpBswap outop_bswap32 = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_bswap,
+ .out_rr = tgen_bswap32,
+};
+
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
+}
+
+static const TCGOutOpUnary outop_bswap64 = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_bswap,
+ .out_rr = tgen_bswap64,
+};
+
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
+}
+
+static const TCGOutOpUnary outop_neg = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_neg,
+};
+
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_xori(s, type, a0, a1, -1);
+}
+
+static const TCGOutOpUnary outop_not = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_not,
+};
+
+static const TCGOutOpDeposit outop_deposit = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ if (ofs == 0) {
+ switch (len) {
+ case 16:
+ tcg_out_ext16u(s, a0, a1);
+ return;
+ case 32:
+ tcg_out_ext32u(s, a0, a1);
+ return;
}
- break;
- case INDEX_op_bswap16_i64:
- case INDEX_op_bswap16_i32:
- tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
- if (a2 & TCG_BSWAP_OZ) {
- tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48);
- } else {
- tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48);
+ }
+ if (ofs + len == 32) {
+ tgen_shri(s, TCG_TYPE_I32, a0, a1, ofs);
+ return;
+ }
+ if (len == 1) {
+ tcg_out_opc_imm(s, OPC_BEXTI, a0, a1, ofs);
+ return;
+ }
+ g_assert_not_reached();
+}
+
+static const TCGOutOpExtract outop_extract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extract,
+};
+
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ if (ofs == 0) {
+ switch (len) {
+ case 8:
+ tcg_out_ext8s(s, type, a0, a1);
+ return;
+ case 16:
+ tcg_out_ext16s(s, type, a0, a1);
+ return;
+ case 32:
+ tcg_out_ext32s(s, a0, a1);
+ return;
}
- break;
+ } else if (ofs + len == 32) {
+ tgen_sari(s, TCG_TYPE_I32, a0, a1, ofs);
+ return;
+ }
+ g_assert_not_reached();
+}
- case INDEX_op_ctpop_i32:
- tcg_out_opc_imm(s, OPC_CPOPW, a0, a1, 0);
- break;
- case INDEX_op_ctpop_i64:
- tcg_out_opc_imm(s, OPC_CPOP, a0, a1, 0);
- break;
+static const TCGOutOpExtract outop_sextract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_sextract,
+};
- case INDEX_op_clz_i32:
- tcg_out_cltz(s, TCG_TYPE_I32, OPC_CLZW, a0, a1, a2, c2);
- break;
- case INDEX_op_clz_i64:
- tcg_out_cltz(s, TCG_TYPE_I64, OPC_CLZ, a0, a1, a2, c2);
- break;
- case INDEX_op_ctz_i32:
- tcg_out_cltz(s, TCG_TYPE_I32, OPC_CTZW, a0, a1, a2, c2);
- break;
- case INDEX_op_ctz_i64:
- tcg_out_cltz(s, TCG_TYPE_I64, OPC_CTZ, a0, a1, a2, c2);
- break;
+static const TCGOutOpExtract2 outop_extract2 = {
+ .base.static_constraint = C_NotImplemented,
+};
- case INDEX_op_add2_i32:
- tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
- const_args[4], const_args[5], false, true);
- break;
- case INDEX_op_add2_i64:
- tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
- const_args[4], const_args[5], false, false);
- break;
- case INDEX_op_sub2_i32:
- tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
- const_args[4], const_args[5], true, true);
- break;
- case INDEX_op_sub2_i64:
- tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
- const_args[4], const_args[5], true, false);
- break;
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LBU, dest, base, offset);
+}
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
- tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
- break;
+static const TCGOutOpLoad outop_ld8u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8u,
+};
- case INDEX_op_setcond_i32:
- case INDEX_op_setcond_i64:
- tcg_out_setcond(s, args[3], a0, a1, a2, c2);
- break;
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LB, dest, base, offset);
+}
- case INDEX_op_negsetcond_i32:
- case INDEX_op_negsetcond_i64:
- tcg_out_negsetcond(s, args[3], a0, a1, a2, c2);
- break;
+static const TCGOutOpLoad outop_ld8s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8s,
+};
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- tcg_out_movcond(s, args[5], a0, a1, a2, c2,
- args[3], const_args[3], args[4], const_args[4]);
- break;
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LHU, dest, base, offset);
+}
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_ld_a64_i32:
- tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_ld_a64_i64:
- tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
- break;
- case INDEX_op_qemu_st_a32_i32:
- case INDEX_op_qemu_st_a64_i32:
- tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
+static const TCGOutOpLoad outop_ld16u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16u,
+};
+
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LH, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld16s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16s,
+};
+
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LWU, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld32u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32u,
+};
+
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_LW, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld32s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32s,
+};
+
+static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_SB, data, base, offset);
+}
+
+static const TCGOutOpStore outop_st8 = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tgen_st8_r,
+};
+
+static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, OPC_SH, data, base, offset);
+}
+
+static const TCGOutOpStore outop_st16 = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tgen_st16_r,
+};
+
+static const TCGOutOpStore outop_st = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tcg_out_st,
+};
+
+
+static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
+ unsigned vecl, unsigned vece,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
+{
+ TCGType type = vecl + TCG_TYPE_V64;
+ TCGArg a0, a1, a2;
+ int c2;
+
+ a0 = args[0];
+ a1 = args[1];
+ a2 = args[2];
+ c2 = const_args[2];
+
+ switch (opc) {
+ case INDEX_op_dupm_vec:
+ tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
break;
- case INDEX_op_qemu_st_a32_i64:
- case INDEX_op_qemu_st_a64_i64:
- tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
+ case INDEX_op_ld_vec:
+ tcg_out_ld(s, type, a0, a1, a2);
break;
-
- case INDEX_op_extrh_i64_i32:
- tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32);
+ case INDEX_op_st_vec:
+ tcg_out_st(s, type, a0, a1, a2);
break;
-
- case INDEX_op_mulsh_i32:
- case INDEX_op_mulsh_i64:
- tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2);
+ case INDEX_op_add_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv_vi(s, OPC_VADD_VV, OPC_VADD_VI, a0, a1, a2, c2);
break;
-
- case INDEX_op_muluh_i32:
- case INDEX_op_muluh_i64:
- tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2);
+ case INDEX_op_sub_vec:
+ set_vtype_len_sew(s, type, vece);
+ if (const_args[1]) {
+ tcg_out_opc_vi(s, OPC_VRSUB_VI, a0, a2, a1);
+ } else {
+ tcg_out_opc_vv(s, OPC_VSUB_VV, a0, a1, a2);
+ }
break;
+ case INDEX_op_and_vec:
+ set_vtype_len(s, type);
+ tcg_out_opc_vv_vi(s, OPC_VAND_VV, OPC_VAND_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_or_vec:
+ set_vtype_len(s, type);
+ tcg_out_opc_vv_vi(s, OPC_VOR_VV, OPC_VOR_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_xor_vec:
+ set_vtype_len(s, type);
+ tcg_out_opc_vv_vi(s, OPC_VXOR_VV, OPC_VXOR_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_not_vec:
+ set_vtype_len(s, type);
+ tcg_out_opc_vi(s, OPC_VXOR_VI, a0, a1, -1);
+ break;
+ case INDEX_op_neg_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vi(s, OPC_VRSUB_VI, a0, a1, 0);
+ break;
+ case INDEX_op_mul_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv(s, OPC_VMUL_VV, a0, a1, a2);
+ break;
+ case INDEX_op_ssadd_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv_vi(s, OPC_VSADD_VV, OPC_VSADD_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_sssub_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv_vi(s, OPC_VSSUB_VV, OPC_VSSUB_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_usadd_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv_vi(s, OPC_VSADDU_VV, OPC_VSADDU_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_ussub_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv_vi(s, OPC_VSSUBU_VV, OPC_VSSUBU_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_smax_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv_vi(s, OPC_VMAX_VV, OPC_VMAX_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_smin_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv_vi(s, OPC_VMIN_VV, OPC_VMIN_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_umax_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv_vi(s, OPC_VMAXU_VV, OPC_VMAXU_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_umin_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv_vi(s, OPC_VMINU_VV, OPC_VMINU_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_shls_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vx(s, OPC_VSLL_VX, a0, a1, a2);
+ break;
+ case INDEX_op_shrs_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, a2);
+ break;
+ case INDEX_op_sars_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vx(s, OPC_VSRA_VX, a0, a1, a2);
+ break;
+ case INDEX_op_shlv_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv(s, OPC_VSLL_VV, a0, a1, a2);
+ break;
+ case INDEX_op_shrv_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv(s, OPC_VSRL_VV, a0, a1, a2);
+ break;
+ case INDEX_op_sarv_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv(s, OPC_VSRA_VV, a0, a1, a2);
+ break;
+ case INDEX_op_shli_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, a0, a1, a2);
+ break;
+ case INDEX_op_shri_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_vshifti(s, OPC_VSRL_VI, OPC_VSRL_VX, a0, a1, a2);
+ break;
+ case INDEX_op_sari_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_vshifti(s, OPC_VSRA_VI, OPC_VSRA_VX, a0, a1, a2);
+ break;
+ case INDEX_op_rotli_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, TCG_REG_V0, a1, a2);
+ tcg_out_vshifti(s, OPC_VSRL_VI, OPC_VSRL_VX, a0, a1,
+ -a2 & ((8 << vece) - 1));
+ tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0);
+ break;
+ case INDEX_op_rotls_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vx(s, OPC_VSLL_VX, TCG_REG_V0, a1, a2);
+ tcg_out_opc_reg(s, OPC_SUBW, TCG_REG_TMP0, TCG_REG_ZERO, a2);
+ tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, TCG_REG_TMP0);
+ tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0);
+ break;
+ case INDEX_op_rotlv_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vi(s, OPC_VRSUB_VI, TCG_REG_V0, a2, 0);
+ tcg_out_opc_vv(s, OPC_VSRL_VV, TCG_REG_V0, a1, TCG_REG_V0);
+ tcg_out_opc_vv(s, OPC_VSLL_VV, a0, a1, a2);
+ tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0);
+ break;
+ case INDEX_op_rotrv_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vi(s, OPC_VRSUB_VI, TCG_REG_V0, a2, 0);
+ tcg_out_opc_vv(s, OPC_VSLL_VV, TCG_REG_V0, a1, TCG_REG_V0);
+ tcg_out_opc_vv(s, OPC_VSRL_VV, a0, a1, a2);
+ tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0);
+ break;
+ case INDEX_op_cmp_vec:
+ tcg_out_cmpsel(s, type, vece, args[3], a0, a1, a2, c2,
+ -1, true, 0, true);
+ break;
+ case INDEX_op_cmpsel_vec:
+ tcg_out_cmpsel(s, type, vece, args[5], a0, a1, a2, c2,
+ args[3], const_args[3], args[4], const_args[4]);
+ break;
+ case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
+ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
+ default:
+ g_assert_not_reached();
+ }
+}
- case INDEX_op_mb:
- tcg_out_mb(s, a0);
- break;
+void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
+ TCGArg a0, ...)
+{
+ g_assert_not_reached();
+}
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
- case INDEX_op_mov_i64:
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
+int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
+{
+ switch (opc) {
+ case INDEX_op_add_vec:
+ case INDEX_op_sub_vec:
+ case INDEX_op_and_vec:
+ case INDEX_op_or_vec:
+ case INDEX_op_xor_vec:
+ case INDEX_op_not_vec:
+ case INDEX_op_neg_vec:
+ case INDEX_op_mul_vec:
+ case INDEX_op_ssadd_vec:
+ case INDEX_op_sssub_vec:
+ case INDEX_op_usadd_vec:
+ case INDEX_op_ussub_vec:
+ case INDEX_op_smax_vec:
+ case INDEX_op_smin_vec:
+ case INDEX_op_umax_vec:
+ case INDEX_op_umin_vec:
+ case INDEX_op_shls_vec:
+ case INDEX_op_shrs_vec:
+ case INDEX_op_sars_vec:
+ case INDEX_op_shlv_vec:
+ case INDEX_op_shrv_vec:
+ case INDEX_op_sarv_vec:
+ case INDEX_op_shri_vec:
+ case INDEX_op_shli_vec:
+ case INDEX_op_sari_vec:
+ case INDEX_op_rotls_vec:
+ case INDEX_op_rotlv_vec:
+ case INDEX_op_rotrv_vec:
+ case INDEX_op_rotli_vec:
+ case INDEX_op_cmp_vec:
+ case INDEX_op_cmpsel_vec:
+ return 1;
default:
- g_assert_not_reached();
+ return 0;
}
}
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
+static TCGConstraintSetIndex
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld_i32:
- case INDEX_op_not_i32:
- case INDEX_op_neg_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld_i64:
- case INDEX_op_not_i64:
- case INDEX_op_neg_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_ext8s_i32:
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_extrl_i64_i32:
- case INDEX_op_extrh_i64_i32:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap32_i32:
- case INDEX_op_bswap16_i64:
- case INDEX_op_bswap32_i64:
- case INDEX_op_bswap64_i64:
- case INDEX_op_ctpop_i32:
- case INDEX_op_ctpop_i64:
- return C_O1_I1(r, r);
-
- case INDEX_op_st8_i32:
- case INDEX_op_st16_i32:
- case INDEX_op_st_i32:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i64:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i64:
- return C_O0_I2(rZ, r);
-
- case INDEX_op_add_i32:
- case INDEX_op_and_i32:
- case INDEX_op_or_i32:
- case INDEX_op_xor_i32:
- case INDEX_op_add_i64:
- case INDEX_op_and_i64:
- case INDEX_op_or_i64:
- case INDEX_op_xor_i64:
- case INDEX_op_setcond_i32:
- case INDEX_op_setcond_i64:
- case INDEX_op_negsetcond_i32:
- case INDEX_op_negsetcond_i64:
- return C_O1_I2(r, r, rI);
-
- case INDEX_op_andc_i32:
- case INDEX_op_andc_i64:
- case INDEX_op_orc_i32:
- case INDEX_op_orc_i64:
- case INDEX_op_eqv_i32:
- case INDEX_op_eqv_i64:
- return C_O1_I2(r, r, rJ);
-
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- return C_O1_I2(r, rZ, rN);
-
- case INDEX_op_mul_i32:
- case INDEX_op_mulsh_i32:
- case INDEX_op_muluh_i32:
- case INDEX_op_div_i32:
- case INDEX_op_divu_i32:
- case INDEX_op_rem_i32:
- case INDEX_op_remu_i32:
- case INDEX_op_mul_i64:
- case INDEX_op_mulsh_i64:
- case INDEX_op_muluh_i64:
- case INDEX_op_div_i64:
- case INDEX_op_divu_i64:
- case INDEX_op_rem_i64:
- case INDEX_op_remu_i64:
- return C_O1_I2(r, rZ, rZ);
-
- case INDEX_op_shl_i32:
- case INDEX_op_shr_i32:
- case INDEX_op_sar_i32:
- case INDEX_op_rotl_i32:
- case INDEX_op_rotr_i32:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i64:
- case INDEX_op_rotl_i64:
- case INDEX_op_rotr_i64:
- return C_O1_I2(r, r, ri);
-
- case INDEX_op_clz_i32:
- case INDEX_op_clz_i64:
- case INDEX_op_ctz_i32:
- case INDEX_op_ctz_i64:
- return C_N1_I2(r, r, rM);
-
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
- return C_O0_I2(rZ, rZ);
-
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, rI, rM, rM);
-
- case INDEX_op_add2_i32:
- case INDEX_op_add2_i64:
- case INDEX_op_sub2_i32:
- case INDEX_op_sub2_i64:
- return C_O2_I4(r, r, rZ, rZ, rM, rM);
-
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_ld_a64_i32:
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_ld_a64_i64:
- return C_O1_I1(r, r);
- case INDEX_op_qemu_st_a32_i32:
- case INDEX_op_qemu_st_a64_i32:
- case INDEX_op_qemu_st_a32_i64:
- case INDEX_op_qemu_st_a64_i64:
- return C_O0_I2(rZ, r);
-
+ case INDEX_op_st_vec:
+ return C_O0_I2(v, r);
+ case INDEX_op_dup_vec:
+ case INDEX_op_dupm_vec:
+ case INDEX_op_ld_vec:
+ return C_O1_I1(v, r);
+ case INDEX_op_neg_vec:
+ case INDEX_op_not_vec:
+ case INDEX_op_shli_vec:
+ case INDEX_op_shri_vec:
+ case INDEX_op_sari_vec:
+ case INDEX_op_rotli_vec:
+ return C_O1_I1(v, v);
+ case INDEX_op_add_vec:
+ case INDEX_op_and_vec:
+ case INDEX_op_or_vec:
+ case INDEX_op_xor_vec:
+ case INDEX_op_ssadd_vec:
+ case INDEX_op_sssub_vec:
+ case INDEX_op_usadd_vec:
+ case INDEX_op_ussub_vec:
+ case INDEX_op_smax_vec:
+ case INDEX_op_smin_vec:
+ case INDEX_op_umax_vec:
+ case INDEX_op_umin_vec:
+ return C_O1_I2(v, v, vK);
+ case INDEX_op_sub_vec:
+ return C_O1_I2(v, vK, v);
+ case INDEX_op_mul_vec:
+ case INDEX_op_shlv_vec:
+ case INDEX_op_shrv_vec:
+ case INDEX_op_sarv_vec:
+ case INDEX_op_rotlv_vec:
+ case INDEX_op_rotrv_vec:
+ return C_O1_I2(v, v, v);
+ case INDEX_op_shls_vec:
+ case INDEX_op_shrs_vec:
+ case INDEX_op_sars_vec:
+ case INDEX_op_rotls_vec:
+ return C_O1_I2(v, v, r);
+ case INDEX_op_cmp_vec:
+ return C_O1_I2(v, v, vL);
+ case INDEX_op_cmpsel_vec:
+ return C_O1_I4(v, v, vL, vK, vK);
default:
- g_assert_not_reached();
+ return C_NotImplemented;
}
}
@@ -2093,7 +2980,65 @@ static void tcg_target_qemu_prologue(TCGContext *s)
static void tcg_out_tb_start(TCGContext *s)
{
- /* nothing to do */
+ init_setting_vtype(s);
+}
+
+static bool vtype_check(unsigned vtype)
+{
+ unsigned long tmp;
+
+ /* vsetvl tmp, zero, vtype */
+ asm(".insn r 0x57, 7, 0x40, %0, zero, %1" : "=r"(tmp) : "r"(vtype));
+ return tmp != 0;
+}
+
+static void probe_frac_lmul_1(TCGType type, MemOp vsew)
+{
+ VsetCache *p = &riscv_vset_cache[type - TCG_TYPE_V64][vsew];
+ unsigned avl = tcg_type_size(type) >> vsew;
+ int lmul = type - riscv_lg2_vlenb;
+ unsigned vtype = encode_vtype(true, true, vsew, lmul & 7);
+ bool lmul_eq_avl = true;
+
+ /* Guaranteed by Zve64x. */
+ assert(lmul < 3);
+
+ /*
+ * For LMUL < -3, the host vector size is so large that TYPE
+ * is smaller than the minimum 1/8 fraction.
+ *
+ * For other fractional LMUL settings, implementations must
+ * support SEW settings between SEW_MIN and LMUL * ELEN, inclusive.
+ * So if ELEN = 64, LMUL = 1/2, then SEW will support e8, e16, e32,
+ * but e64 may not be supported. In other words, the hardware only
+ * guarantees SEW_MIN <= SEW <= LMUL * ELEN. Check.
+ */
+ if (lmul < 0 && (lmul < -3 || !vtype_check(vtype))) {
+ vtype = encode_vtype(true, true, vsew, VLMUL_M1);
+ lmul_eq_avl = false;
+ }
+
+ if (avl < 32) {
+ p->vset_insn = encode_vseti(OPC_VSETIVLI, TCG_REG_ZERO, avl, vtype);
+ } else if (lmul_eq_avl) {
+ /* rd != 0 and rs1 == 0 uses vlmax */
+ p->vset_insn = encode_vset(OPC_VSETVLI, TCG_REG_TMP0, TCG_REG_ZERO, vtype);
+ } else {
+ p->movi_insn = encode_i(OPC_ADDI, TCG_REG_TMP0, TCG_REG_ZERO, avl);
+ p->vset_insn = encode_vset(OPC_VSETVLI, TCG_REG_ZERO, TCG_REG_TMP0, vtype);
+ }
+}
+
+static void probe_frac_lmul(void)
+{
+ /* Match riscv_lg2_vlenb to TCG_TYPE_V64. */
+ QEMU_BUILD_BUG_ON(TCG_TYPE_V64 != 3);
+
+ for (TCGType t = TCG_TYPE_V64; t <= TCG_TYPE_V256; t++) {
+ for (MemOp e = MO_8; e <= MO_64; e++) {
+ probe_frac_lmul_1(t, e);
+ }
+ }
}
static void tcg_target_init(TCGContext *s)
@@ -2101,7 +3046,7 @@ static void tcg_target_init(TCGContext *s)
tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
- tcg_target_call_clobber_regs = -1u;
+ tcg_target_call_clobber_regs = -1;
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
@@ -2123,6 +3068,32 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
+
+ if (cpuinfo & CPUINFO_ZVE64X) {
+ switch (riscv_lg2_vlenb) {
+ case TCG_TYPE_V64:
+ tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
+ tcg_target_available_regs[TCG_TYPE_V128] = ALL_DVECTOR_REG_GROUPS;
+ tcg_target_available_regs[TCG_TYPE_V256] = ALL_QVECTOR_REG_GROUPS;
+ s->reserved_regs |= (~ALL_QVECTOR_REG_GROUPS & ALL_VECTOR_REGS);
+ break;
+ case TCG_TYPE_V128:
+ tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
+ tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
+ tcg_target_available_regs[TCG_TYPE_V256] = ALL_DVECTOR_REG_GROUPS;
+ s->reserved_regs |= (~ALL_DVECTOR_REG_GROUPS & ALL_VECTOR_REGS);
+ break;
+ default:
+ /* Guaranteed by Zve64x. */
+ tcg_debug_assert(riscv_lg2_vlenb >= TCG_TYPE_V256);
+ tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
+ tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
+ tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS;
+ break;
+ }
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_V0);
+ probe_frac_lmul();
+ }
}
typedef struct {
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
index 1a347ea..6dc77d9 100644
--- a/tcg/riscv/tcg-target.h
+++ b/tcg/riscv/tcg-target.h
@@ -25,45 +25,29 @@
#ifndef RISCV_TCG_TARGET_H
#define RISCV_TCG_TARGET_H
-#include "host/cpuinfo.h"
-
#define TCG_TARGET_INSN_UNIT_SIZE 4
-#define TCG_TARGET_NB_REGS 32
+#define TCG_TARGET_NB_REGS 64
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
typedef enum {
- TCG_REG_ZERO,
- TCG_REG_RA,
- TCG_REG_SP,
- TCG_REG_GP,
- TCG_REG_TP,
- TCG_REG_T0,
- TCG_REG_T1,
- TCG_REG_T2,
- TCG_REG_S0,
- TCG_REG_S1,
- TCG_REG_A0,
- TCG_REG_A1,
- TCG_REG_A2,
- TCG_REG_A3,
- TCG_REG_A4,
- TCG_REG_A5,
- TCG_REG_A6,
- TCG_REG_A7,
- TCG_REG_S2,
- TCG_REG_S3,
- TCG_REG_S4,
- TCG_REG_S5,
- TCG_REG_S6,
- TCG_REG_S7,
- TCG_REG_S8,
- TCG_REG_S9,
- TCG_REG_S10,
- TCG_REG_S11,
- TCG_REG_T3,
- TCG_REG_T4,
- TCG_REG_T5,
- TCG_REG_T6,
+ TCG_REG_ZERO, TCG_REG_RA, TCG_REG_SP, TCG_REG_GP,
+ TCG_REG_TP, TCG_REG_T0, TCG_REG_T1, TCG_REG_T2,
+ TCG_REG_S0, TCG_REG_S1, TCG_REG_A0, TCG_REG_A1,
+ TCG_REG_A2, TCG_REG_A3, TCG_REG_A4, TCG_REG_A5,
+ TCG_REG_A6, TCG_REG_A7, TCG_REG_S2, TCG_REG_S3,
+ TCG_REG_S4, TCG_REG_S5, TCG_REG_S6, TCG_REG_S7,
+ TCG_REG_S8, TCG_REG_S9, TCG_REG_S10, TCG_REG_S11,
+ TCG_REG_T3, TCG_REG_T4, TCG_REG_T5, TCG_REG_T6,
+
+ /* RISC-V V Extension registers */
+ TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
+ TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
+ TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
+ TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
+ TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
+ TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
+ TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27,
+ TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
/* aliases */
TCG_AREG0 = TCG_REG_S0,
@@ -73,92 +57,6 @@ typedef enum {
TCG_REG_TMP2 = TCG_REG_T4,
} TCGReg;
-/* used for function call generation */
-#define TCG_REG_CALL_STACK TCG_REG_SP
-#define TCG_TARGET_STACK_ALIGN 16
-#define TCG_TARGET_CALL_STACK_OFFSET 0
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
-#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
-#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
-
-/* optional instructions */
-#define TCG_TARGET_HAS_negsetcond_i32 1
-#define TCG_TARGET_HAS_div_i32 1
-#define TCG_TARGET_HAS_rem_i32 1
-#define TCG_TARGET_HAS_div2_i32 0
-#define TCG_TARGET_HAS_rot_i32 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_deposit_i32 0
-#define TCG_TARGET_HAS_extract_i32 0
-#define TCG_TARGET_HAS_sextract_i32 0
-#define TCG_TARGET_HAS_extract2_i32 0
-#define TCG_TARGET_HAS_add2_i32 1
-#define TCG_TARGET_HAS_sub2_i32 1
-#define TCG_TARGET_HAS_mulu2_i32 0
-#define TCG_TARGET_HAS_muls2_i32 0
-#define TCG_TARGET_HAS_muluh_i32 0
-#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_ext8s_i32 1
-#define TCG_TARGET_HAS_ext16s_i32 1
-#define TCG_TARGET_HAS_ext8u_i32 1
-#define TCG_TARGET_HAS_ext16u_i32 1
-#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_not_i32 1
-#define TCG_TARGET_HAS_andc_i32 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_orc_i32 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_eqv_i32 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_nand_i32 0
-#define TCG_TARGET_HAS_nor_i32 0
-#define TCG_TARGET_HAS_clz_i32 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_ctz_i32 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_ctpop_i32 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_brcond2 1
-#define TCG_TARGET_HAS_setcond2 1
-#define TCG_TARGET_HAS_qemu_st8_i32 0
-
-#define TCG_TARGET_HAS_negsetcond_i64 1
-#define TCG_TARGET_HAS_div_i64 1
-#define TCG_TARGET_HAS_rem_i64 1
-#define TCG_TARGET_HAS_div2_i64 0
-#define TCG_TARGET_HAS_rot_i64 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_deposit_i64 0
-#define TCG_TARGET_HAS_extract_i64 0
-#define TCG_TARGET_HAS_sextract_i64 0
-#define TCG_TARGET_HAS_extract2_i64 0
-#define TCG_TARGET_HAS_extr_i64_i32 1
-#define TCG_TARGET_HAS_ext8s_i64 1
-#define TCG_TARGET_HAS_ext16s_i64 1
-#define TCG_TARGET_HAS_ext32s_i64 1
-#define TCG_TARGET_HAS_ext8u_i64 1
-#define TCG_TARGET_HAS_ext16u_i64 1
-#define TCG_TARGET_HAS_ext32u_i64 1
-#define TCG_TARGET_HAS_bswap16_i64 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_not_i64 1
-#define TCG_TARGET_HAS_andc_i64 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_orc_i64 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_eqv_i64 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_nand_i64 0
-#define TCG_TARGET_HAS_nor_i64 0
-#define TCG_TARGET_HAS_clz_i64 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_ctz_i64 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_ctpop_i64 (cpuinfo & CPUINFO_ZBB)
-#define TCG_TARGET_HAS_add2_i64 1
-#define TCG_TARGET_HAS_sub2_i64 1
-#define TCG_TARGET_HAS_mulu2_i64 0
-#define TCG_TARGET_HAS_muls2_i64 0
-#define TCG_TARGET_HAS_muluh_i64 1
-#define TCG_TARGET_HAS_mulsh_i64 1
-
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
-
-#define TCG_TARGET_HAS_tst 0
-
-#define TCG_TARGET_DEFAULT_MO (0)
-
-#define TCG_TARGET_NEED_LDST_LABELS
-#define TCG_TARGET_NEED_POOL_LABELS
+#define TCG_REG_ZERO TCG_REG_ZERO
#endif
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
index f75955e..f67fd78 100644
--- a/tcg/s390x/tcg-target-con-set.h
+++ b/tcg/s390x/tcg-target-con-set.h
@@ -22,6 +22,7 @@ C_O1_I1(r, r)
C_O1_I1(v, r)
C_O1_I1(v, v)
C_O1_I1(v, vr)
+C_O1_I2(r, 0, r)
C_O1_I2(r, 0, ri)
C_O1_I2(r, 0, rI)
C_O1_I2(r, 0, rJ)
@@ -31,18 +32,16 @@ C_O1_I2(r, r, rC)
C_O1_I2(r, r, rI)
C_O1_I2(r, r, rJ)
C_O1_I2(r, r, rK)
-C_O1_I2(r, r, rKR)
-C_O1_I2(r, r, rNK)
C_O1_I2(r, r, rNKR)
+C_O1_I2(r, r, rUV)
C_O1_I2(r, rZ, r)
C_O1_I2(v, v, r)
C_O1_I2(v, v, v)
C_O1_I3(v, v, v, v)
-C_O1_I4(r, r, ri, rI, r)
+C_O1_I4(v, v, v, vZ, v)
+C_O1_I4(v, v, v, vZM, v)
C_O1_I4(r, r, rC, rI, r)
C_O2_I1(o, m, r)
C_O2_I2(o, m, 0, r)
C_O2_I2(o, m, r, r)
C_O2_I3(o, m, 0, 1, r)
-C_N1_O1_I4(r, r, 0, 1, ri, r)
-C_N1_O1_I4(r, r, 0, 1, rJU, r)
diff --git a/tcg/s390x/tcg-target-con-str.h b/tcg/s390x/tcg-target-con-str.h
index 745f6c0d..636a38a 100644
--- a/tcg/s390x/tcg-target-con-str.h
+++ b/tcg/s390x/tcg-target-con-str.h
@@ -20,7 +20,9 @@ CONST('C', TCG_CT_CONST_CMP)
CONST('I', TCG_CT_CONST_S16)
CONST('J', TCG_CT_CONST_S32)
CONST('K', TCG_CT_CONST_P32)
+CONST('M', TCG_CT_CONST_M1)
CONST('N', TCG_CT_CONST_INV)
CONST('R', TCG_CT_CONST_INVRISBG)
CONST('U', TCG_CT_CONST_U32)
+CONST('V', TCG_CT_CONST_N32)
CONST('Z', TCG_CT_CONST_ZERO)
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
new file mode 100644
index 0000000..0aeb5ba
--- /dev/null
+++ b/tcg/s390x/tcg-target-has.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific opcode support
+ * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
+ */
+
+#ifndef TCG_TARGET_HAS_H
+#define TCG_TARGET_HAS_H
+
+/* Facilities required for proper operation; checked at startup. */
+
+#define FACILITY_ZARCH_ACTIVE 2
+#define FACILITY_LONG_DISP 18
+#define FACILITY_EXT_IMM 21
+#define FACILITY_GEN_INST_EXT 34
+#define FACILITY_45 45
+
+/* Facilities that are checked at runtime. */
+
+#define FACILITY_LOAD_ON_COND2 53
+#define FACILITY_MISC_INSN_EXT2 58
+#define FACILITY_MISC_INSN_EXT3 61
+#define FACILITY_VECTOR 129
+#define FACILITY_VECTOR_ENH1 135
+
+extern uint64_t s390_facilities[3];
+
+#define HAVE_FACILITY(X) \
+ ((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
+
+/* optional instructions */
+#define TCG_TARGET_HAS_extr_i64_i32 0
+#define TCG_TARGET_HAS_qemu_ldst_i128 1
+#define TCG_TARGET_HAS_tst 1
+
+#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR)
+#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR)
+#define TCG_TARGET_HAS_v256 0
+
+#define TCG_TARGET_HAS_andc_vec 1
+#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1)
+#define TCG_TARGET_HAS_nand_vec HAVE_FACILITY(VECTOR_ENH1)
+#define TCG_TARGET_HAS_nor_vec 1
+#define TCG_TARGET_HAS_eqv_vec HAVE_FACILITY(VECTOR_ENH1)
+#define TCG_TARGET_HAS_not_vec 1
+#define TCG_TARGET_HAS_neg_vec 1
+#define TCG_TARGET_HAS_abs_vec 1
+#define TCG_TARGET_HAS_roti_vec 1
+#define TCG_TARGET_HAS_rots_vec 1
+#define TCG_TARGET_HAS_rotv_vec 1
+#define TCG_TARGET_HAS_shi_vec 1
+#define TCG_TARGET_HAS_shs_vec 1
+#define TCG_TARGET_HAS_shv_vec 1
+#define TCG_TARGET_HAS_mul_vec 1
+#define TCG_TARGET_HAS_sat_vec 0
+#define TCG_TARGET_HAS_minmax_vec 1
+#define TCG_TARGET_HAS_bitsel_vec 1
+#define TCG_TARGET_HAS_cmpsel_vec 1
+#define TCG_TARGET_HAS_tst_vec 0
+
+#define TCG_TARGET_extract_valid(type, ofs, len) 1
+#define TCG_TARGET_deposit_valid(type, ofs, len) 1
+
+static inline bool
+tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
+{
+ if (ofs == 0) {
+ switch (len) {
+ case 8:
+ case 16:
+ return true;
+ case 32:
+ return type == TCG_TYPE_I64;
+ }
+ }
+ return false;
+}
+#define TCG_TARGET_sextract_valid tcg_target_sextract_valid
+
+#endif
diff --git a/tcg/s390x/tcg-target-mo.h b/tcg/s390x/tcg-target-mo.h
new file mode 100644
index 0000000..962295e
--- /dev/null
+++ b/tcg/s390x/tcg-target-mo.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific memory model
+ * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
+ */
+
+#ifndef TCG_TARGET_MO_H
+#define TCG_TARGET_MO_H
+
+#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
+
+#endif
diff --git a/tcg/s390x/tcg-target-opc.h.inc b/tcg/s390x/tcg-target-opc.h.inc
new file mode 100644
index 0000000..61237b3
--- /dev/null
+++ b/tcg/s390x/tcg-target-opc.h.inc
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2021 Linaro
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version.
+ *
+ * See the COPYING file in the top-level directory for details.
+ *
+ * Target-specific opcodes for host vector expansion. These will be
+ * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
+ * consider these to be UNSPEC with names.
+ */
+DEF(s390_vuph_vec, 1, 1, 0, TCG_OPF_VECTOR)
+DEF(s390_vupl_vec, 1, 1, 0, TCG_OPF_VECTOR)
+DEF(s390_vpks_vec, 1, 2, 0, TCG_OPF_VECTOR)
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
index ad58732..84a9e73 100644
--- a/tcg/s390x/tcg-target.c.inc
+++ b/tcg/s390x/tcg-target.c.inc
@@ -24,10 +24,16 @@
* THE SOFTWARE.
*/
-#include "../tcg-ldst.c.inc"
-#include "../tcg-pool.c.inc"
#include "elf.h"
+/* Used for function call generation. */
+#define TCG_TARGET_STACK_ALIGN 8
+#define TCG_TARGET_CALL_STACK_OFFSET 160
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
+#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
+
#define TCG_CT_CONST_S16 (1 << 8)
#define TCG_CT_CONST_S32 (1 << 9)
#define TCG_CT_CONST_U32 (1 << 10)
@@ -36,6 +42,8 @@
#define TCG_CT_CONST_INV (1 << 13)
#define TCG_CT_CONST_INVRISBG (1 << 14)
#define TCG_CT_CONST_CMP (1 << 15)
+#define TCG_CT_CONST_M1 (1 << 16)
+#define TCG_CT_CONST_N32 (1 << 17)
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16)
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
@@ -46,6 +54,7 @@
/* A scratch register that may be be used throughout the backend. */
#define TCG_TMP0 TCG_REG_R1
+#define TCG_VEC_TMP0 TCG_REG_V31
#define TCG_GUEST_BASE_REG TCG_REG_R13
@@ -126,6 +135,9 @@ typedef enum S390Opcode {
RIEc_CLGIJ = 0xec7d,
RIEc_CLIJ = 0xec7f,
+ RIEd_ALHSIK = 0xecda,
+ RIEd_ALGHSIK = 0xecdb,
+
RIEf_RISBG = 0xec55,
RIEg_LOCGHI = 0xec46,
@@ -164,6 +176,8 @@ typedef enum S390Opcode {
RRE_SLBGR = 0xb989,
RRE_XGR = 0xb982,
+ RRFa_ALRK = 0xb9fa,
+ RRFa_ALGRK = 0xb9ea,
RRFa_MGRK = 0xb9ec,
RRFa_MSRKC = 0xb9fd,
RRFa_MSGRKC = 0xb9ed,
@@ -563,6 +577,20 @@ static bool tcg_target_const_match(int64_t val, int ct,
}
if (ct & TCG_CT_CONST_CMP) {
+ if (is_tst_cond(cond)) {
+ if (is_const_p16(uval) >= 0) {
+ return true; /* TMxx */
+ }
+ if (risbg_mask(uval)) {
+ return true; /* RISBG */
+ }
+ return false;
+ }
+
+ if (type == TCG_TYPE_I32) {
+ return true;
+ }
+
switch (cond) {
case TCG_COND_EQ:
case TCG_COND_NE:
@@ -582,13 +610,7 @@ static bool tcg_target_const_match(int64_t val, int ct,
break;
case TCG_COND_TSTNE:
case TCG_COND_TSTEQ:
- if (is_const_p16(uval) >= 0) {
- return true; /* TMxx */
- }
- if (risbg_mask(uval)) {
- return true; /* RISBG */
- }
- break;
+ /* checked above, fallthru */
default:
g_assert_not_reached();
}
@@ -597,7 +619,10 @@ static bool tcg_target_const_match(int64_t val, int ct,
if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
return true;
}
- if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
+ if ((ct & TCG_CT_CONST_U32) && uval <= UINT32_MAX) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_N32) && -uval <= UINT32_MAX) {
return true;
}
if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
@@ -606,6 +631,9 @@ static bool tcg_target_const_match(int64_t val, int ct,
if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
return true;
}
+ if ((ct & TCG_CT_CONST_M1) && val == -1) {
+ return true;
+ }
if (ct & TCG_CT_CONST_INV) {
val = ~val;
@@ -657,8 +685,16 @@ static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
}
+static void tcg_out_insn_RIEd(TCGContext *s, S390Opcode op,
+ TCGReg r1, TCGReg r3, int i2)
+{
+ tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
+ tcg_out16(s, i2);
+ tcg_out16(s, op & 0xff);
+}
+
static void tcg_out_insn_RIEg(TCGContext *s, S390Opcode op, TCGReg r1,
- int i2, int m3)
+ int i2, int m3)
{
tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
tcg_out32(s, (i2 << 16) | (op & 0xff));
@@ -932,25 +968,32 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
if (pc_off == (int32_t)pc_off) {
tcg_out_insn(s, RIL, LARL, ret, pc_off);
if (sval & 1) {
- tcg_out_insn(s, RI, AGHI, ret, 1);
+ tcg_out_insn(s, RX, LA, ret, ret, TCG_REG_NONE, 1);
}
return;
}
- /* Otherwise, load it by parts. */
- i = is_const_p16((uint32_t)uval);
- if (i >= 0) {
- tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
- } else {
- tcg_out_insn(s, RIL, LLILF, ret, uval);
- }
- uval >>= 32;
- i = is_const_p16(uval);
- if (i >= 0) {
- tcg_out_insn_RI(s, oi_insns[i + 2], ret, uval >> (i * 16));
- } else {
- tcg_out_insn(s, RIL, OIHF, ret, uval);
+ if (!s->carry_live) {
+ /* Load by parts, at most 2 instructions. */
+ i = is_const_p16((uint32_t)uval);
+ if (i >= 0) {
+ tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
+ } else {
+ tcg_out_insn(s, RIL, LLILF, ret, uval);
+ }
+ uval >>= 32;
+ i = is_const_p16(uval);
+ if (i >= 0) {
+ tcg_out_insn_RI(s, oi_insns[i + 2], ret, uval >> (i * 16));
+ } else {
+ tcg_out_insn(s, RIL, OIHF, ret, uval);
+ }
+ return;
}
+
+ /* Otherwise, stuff it in the constant pool. */
+ tcg_out_insn(s, RIL, LGRL, ret, 0);
+ new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
}
/* Emit a load/store type instruction. Inputs are:
@@ -1351,9 +1394,9 @@ static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
return tgen_cmp2(s, type, c, r1, c2, c2const, need_carry, &inv_cc);
}
-static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
- TCGReg dest, TCGReg c1, TCGArg c2,
- bool c2const, bool neg)
+static void tgen_setcond_int(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg c1, TCGArg c2,
+ bool c2const, bool neg)
{
int cc;
@@ -1445,6 +1488,42 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
tcg_out_insn(s, RRFc, LOCGR, dest, TCG_TMP0, cc);
}
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ tgen_setcond_int(s, type, cond, dest, arg1, arg2, false, false);
+}
+
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
+{
+ tgen_setcond_int(s, type, cond, dest, arg1, arg2, true, false);
+}
+
+static const TCGOutOpSetcond outop_setcond = {
+ .base.static_constraint = C_O1_I2(r, r, rC),
+ .out_rrr = tgen_setcond,
+ .out_rri = tgen_setcondi,
+};
+
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ tgen_setcond_int(s, type, cond, dest, arg1, arg2, false, true);
+}
+
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
+{
+ tgen_setcond_int(s, type, cond, dest, arg1, arg2, true, true);
+}
+
+static const TCGOutOpSetcond outop_negsetcond = {
+ .base.static_constraint = C_O1_I2(r, r, rC),
+ .out_rrr = tgen_negsetcond,
+ .out_rri = tgen_negsetcondi,
+};
+
static void tgen_movcond_int(TCGContext *s, TCGType type, TCGReg dest,
TCGArg v3, int v3const, TCGReg v4,
int cc, int inv_cc)
@@ -1485,9 +1564,9 @@ static void tgen_movcond_int(TCGContext *s, TCGType type, TCGReg dest,
tcg_out_insn(s, RRFc, LOCGR, dest, src, cc);
}
-static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
- TCGReg c1, TCGArg c2, int c2const,
- TCGArg v3, int v3const, TCGReg v4)
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c,
+ TCGReg dest, TCGReg c1, TCGArg c2, bool c2const,
+ TCGArg v3, bool v3const, TCGArg v4, bool v4const)
{
int cc, inv_cc;
@@ -1495,66 +1574,96 @@ static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
tgen_movcond_int(s, type, dest, v3, v3const, v4, cc, inv_cc);
}
-static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
- TCGArg a2, int a2const)
-{
- /* Since this sets both R and R+1, we have no choice but to store the
- result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */
- QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
- tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rC, rI, r),
+ .out = tgen_movcond,
+};
- if (a2const && a2 == 64) {
- tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
- return;
- }
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ TCGReg a2, unsigned ofs, unsigned len)
+{
+ unsigned lsb = (63 - ofs);
+ unsigned msb = lsb - (len - 1);
/*
- * Conditions from FLOGR are:
- * 2 -> one bit found
- * 8 -> no one bit found
+ * Since we can't support "0Z" as a constraint, we allow a1 in
+ * any register. Fix things up as if a matching constraint.
*/
- tgen_movcond_int(s, TCG_TYPE_I64, dest, a2, a2const, TCG_REG_R0, 8, 2);
+ if (a0 != a1) {
+ if (a0 == a2) {
+ tcg_out_mov(s, type, TCG_TMP0, a2);
+ a2 = TCG_TMP0;
+ }
+ tcg_out_mov(s, type, a0, a1);
+ }
+ tcg_out_risbg(s, a0, a2, msb, lsb, ofs, false);
}
-static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
+static void tgen_depositz(TCGContext *s, TCGType type, TCGReg a0, TCGReg a2,
+ unsigned ofs, unsigned len)
{
- /* With MIE3, and bit 0 of m4 set, we get the complete result. */
- if (HAVE_FACILITY(MISC_INSN_EXT3)) {
- if (type == TCG_TYPE_I32) {
+ unsigned lsb = (63 - ofs);
+ unsigned msb = lsb - (len - 1);
+ tcg_out_risbg(s, a0, a2, msb, lsb, ofs, true);
+}
+
+static const TCGOutOpDeposit outop_deposit = {
+ .base.static_constraint = C_O1_I2(r, rZ, r),
+ .out_rrr = tgen_deposit,
+ .out_rzr = tgen_depositz,
+};
+
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg src, unsigned ofs, unsigned len)
+{
+ if (ofs == 0) {
+ switch (len) {
+ case 8:
+ tcg_out_ext8u(s, dest, src);
+ return;
+ case 16:
+ tcg_out_ext16u(s, dest, src);
+ return;
+ case 32:
tcg_out_ext32u(s, dest, src);
- src = dest;
+ return;
}
- tcg_out_insn(s, RRFc, POPCNT, dest, src, 8);
- return;
}
+ tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
+}
- /* Without MIE3, each byte gets the count of bits for the byte. */
- tcg_out_insn(s, RRFc, POPCNT, dest, src, 0);
+static const TCGOutOpExtract outop_extract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extract,
+};
- /* Multiply to sum each byte at the top of the word. */
- if (type == TCG_TYPE_I32) {
- tcg_out_insn(s, RIL, MSFI, dest, 0x01010101);
- tcg_out_sh32(s, RS_SRL, dest, TCG_REG_NONE, 24);
- } else {
- tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0x0101010101010101ull);
- tcg_out_insn(s, RRE, MSGR, dest, TCG_TMP0);
- tcg_out_sh64(s, RSY_SRLG, dest, dest, TCG_REG_NONE, 56);
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg src, unsigned ofs, unsigned len)
+{
+ if (ofs == 0) {
+ switch (len) {
+ case 8:
+ tcg_out_ext8s(s, TCG_TYPE_REG, dest, src);
+ return;
+ case 16:
+ tcg_out_ext16s(s, TCG_TYPE_REG, dest, src);
+ return;
+ case 32:
+ tcg_out_ext32s(s, dest, src);
+ return;
+ }
}
+ g_assert_not_reached();
}
-static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
- int ofs, int len, int z)
-{
- int lsb = (63 - ofs);
- int msb = lsb - (len - 1);
- tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
-}
+static const TCGOutOpExtract outop_sextract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_sextract,
+};
-static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
- int ofs, int len)
-{
- tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
-}
+static const TCGOutOpExtract2 outop_extract2 = {
+ .base.static_constraint = C_NotImplemented,
+};
static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest)
{
@@ -1580,6 +1689,11 @@ static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
}
}
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
+{
+ tgen_branch(s, S390_CC_ALWAYS, l);
+}
+
static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
TCGReg r1, TCGReg r2, TCGLabel *l)
{
@@ -1653,6 +1767,24 @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
tgen_branch(s, cc, l);
}
+static void tgen_brcondr(TCGContext *s, TCGType type, TCGCond c,
+ TCGReg a0, TCGReg a1, TCGLabel *l)
+{
+ tgen_brcond(s, type, c, a0, a1, false, l);
+}
+
+static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond c,
+ TCGReg a0, tcg_target_long a1, TCGLabel *l)
+{
+ tgen_brcond(s, type, c, a0, a1, true, l);
+}
+
+static const TCGOutOpBrcond outop_brcond = {
+ .base.static_constraint = C_O0_I2(r, rC),
+ .out_rr = tgen_brcondr,
+ .out_ri = tgen_brcondi,
+};
+
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *dest)
{
ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
@@ -1869,10 +2001,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
+ ldst->addr_reg = addr_reg;
tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
- s->page_bits - CPU_TLB_ENTRY_BITS);
+ TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off);
tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off);
@@ -1884,7 +2016,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
* byte of the access.
*/
a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask);
- tlb_mask = (uint64_t)s->page_mask | a_mask;
+ tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
if (a_off == 0) {
tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
} else {
@@ -1923,7 +2055,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
+ ldst->addr_reg = addr_reg;
tcg_debug_assert(a_mask <= 0xffff);
tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
@@ -1949,8 +2081,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
return ldst;
}
-static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
- MemOpIdx oi, TCGType data_type)
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data_reg,
+ TCGReg addr_reg, MemOpIdx oi)
{
TCGLabelQemuLdst *ldst;
HostAddress h;
@@ -1959,14 +2091,19 @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
tcg_out_qemu_ld_direct(s, get_memop(oi), data_reg, h);
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
ldst->datalo_reg = data_reg;
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
}
-static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
- MemOpIdx oi, TCGType data_type)
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_qemu_ld,
+};
+
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data_reg,
+ TCGReg addr_reg, MemOpIdx oi)
{
TCGLabelQemuLdst *ldst;
HostAddress h;
@@ -1975,12 +2112,17 @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
tcg_out_qemu_st_direct(s, get_memop(oi), data_reg, h);
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
ldst->datalo_reg = data_reg;
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
}
+static const TCGOutOpQemuLdSt outop_qemu_st = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out = tgen_qemu_st,
+};
+
static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg addr_reg, MemOpIdx oi, bool is_ld)
{
@@ -2055,6 +2197,28 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
}
}
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
+{
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, true);
+}
+
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
+ .base.static_constraint = C_O2_I1(o, m, r),
+ .out = tgen_qemu_ld2,
+};
+
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
+{
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, false);
+}
+
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
+ .base.static_constraint = C_O0_I3(o, m, r),
+ .out = tgen_qemu_st2,
+};
+
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
{
/* Reuse the zeroing that exists for goto_ptr. */
@@ -2081,6 +2245,11 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
set_jmp_reset_offset(s, which);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
@@ -2094,664 +2263,903 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
/* no need to flush icache explicitly */
}
-# define OP_32_64(x) \
- case glue(glue(INDEX_op_,x),_i32): \
- case glue(glue(INDEX_op_,x),_i64)
-static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg args[TCG_MAX_OP_ARGS],
- const int const_args[TCG_MAX_OP_ARGS])
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
{
- S390Opcode op, op2;
- TCGArg a0, a1, a2;
+ if (a0 != a1) {
+ tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
+ } else if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RR, AR, a0, a2);
+ } else {
+ tcg_out_insn(s, RRE, AGR, a0, a2);
+ }
+}
- switch (opc) {
- case INDEX_op_goto_ptr:
- a0 = args[0];
- tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
- break;
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (a0 == a1) {
+ if (type == TCG_TYPE_I32) {
+ if (a2 == (int16_t)a2) {
+ tcg_out_insn(s, RI, AHI, a0, a2);
+ } else {
+ tcg_out_insn(s, RIL, AFI, a0, a2);
+ }
+ return;
+ }
+ if (a2 == (int16_t)a2) {
+ tcg_out_insn(s, RI, AGHI, a0, a2);
+ return;
+ }
+ if (a2 == (int32_t)a2) {
+ tcg_out_insn(s, RIL, AGFI, a0, a2);
+ return;
+ }
+ if (a2 == (uint32_t)a2) {
+ tcg_out_insn(s, RIL, ALGFI, a0, a2);
+ return;
+ }
+ if (-a2 == (uint32_t)-a2) {
+ tcg_out_insn(s, RIL, SLGFI, a0, -a2);
+ return;
+ }
+ }
+ tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
+}
- OP_32_64(ld8u):
- /* ??? LLC (RXY format) is only present with the extended-immediate
- facility, whereas LLGC is always present. */
- tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
- break;
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
- OP_32_64(ld8s):
- /* ??? LB is no smaller than LGB, so no point to using it. */
- tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
- break;
+static void tgen_addco_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type != TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, ALGRK, a0, a1, a2);
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RR, ALR, a0, a2);
+ } else {
+ tcg_out_insn(s, RRFa, ALRK, a0, a1, a2);
+ }
+}
- OP_32_64(ld16u):
- /* ??? LLH (RXY format) is only present with the extended-immediate
- facility, whereas LLGH is always present. */
- tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
- break;
+static void tgen_addco_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (a2 == (int16_t)a2) {
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RIEd, ALHSIK, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, RIEd, ALGHSIK, a0, a1, a2);
+ }
+ return;
+ }
- case INDEX_op_ld16s_i32:
- tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
- break;
+ tcg_out_mov(s, type, a0, a1);
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RIL, ALFI, a0, a2);
+ } else if (a2 >= 0) {
+ tcg_out_insn(s, RIL, ALGFI, a0, a2);
+ } else {
+ tcg_out_insn(s, RIL, SLGFI, a0, -a2);
+ }
+}
- case INDEX_op_ld_i32:
- tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
- break;
+static const TCGOutOpBinary outop_addco = {
+ .base.static_constraint = C_O1_I2(r, r, rUV),
+ .out_rrr = tgen_addco_rrr,
+ .out_rri = tgen_addco_rri,
+};
- OP_32_64(st8):
- tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
- TCG_REG_NONE, args[2]);
- break;
+static void tgen_addcio(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RRE, ALCR, a0, a2);
+ } else {
+ tcg_out_insn(s, RRE, ALCGR, a0, a2);
+ }
+}
- OP_32_64(st16):
- tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
- TCG_REG_NONE, args[2]);
- break;
+static const TCGOutOpBinary outop_addcio = {
+ .base.static_constraint = C_O1_I2(r, 0, r),
+ .out_rrr = tgen_addcio,
+};
- case INDEX_op_st_i32:
- tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
- break;
+static const TCGOutOpAddSubCarry outop_addci = {
+ .base.static_constraint = C_O1_I2(r, 0, r),
+ .out_rrr = tgen_addcio,
+};
- case INDEX_op_add_i32:
- a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
- if (const_args[2]) {
- do_addi_32:
- if (a0 == a1) {
- if (a2 == (int16_t)a2) {
- tcg_out_insn(s, RI, AHI, a0, a2);
- break;
- }
- tcg_out_insn(s, RIL, AFI, a0, a2);
- break;
- }
- tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
- } else if (a0 == a1) {
- tcg_out_insn(s, RR, AR, a0, a2);
- } else {
- tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
- }
- break;
- case INDEX_op_sub_i32:
- a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
- if (const_args[2]) {
- a2 = -a2;
- goto do_addi_32;
- } else if (a0 == a1) {
- tcg_out_insn(s, RR, SR, a0, a2);
- } else {
- tcg_out_insn(s, RRFa, SRK, a0, a1, a2);
- }
- break;
+static void tcg_out_set_carry(TCGContext *s)
+{
+ tcg_out_insn(s, RR, SLR, TCG_REG_R0, TCG_REG_R0); /* cc = 2 */
+}
- case INDEX_op_and_i32:
- a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
- tgen_andi(s, TCG_TYPE_I32, a0, a2);
- } else if (a0 == a1) {
- tcg_out_insn(s, RR, NR, a0, a2);
- } else {
- tcg_out_insn(s, RRFa, NRK, a0, a1, a2);
- }
- break;
- case INDEX_op_or_i32:
- a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
- tgen_ori(s, a0, a2);
- } else if (a0 == a1) {
- tcg_out_insn(s, RR, OR, a0, a2);
- } else {
- tcg_out_insn(s, RRFa, ORK, a0, a1, a2);
- }
- break;
- case INDEX_op_xor_i32:
- a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
- tcg_out_insn(s, RIL, XILF, a0, a2);
- } else if (a0 == a1) {
- tcg_out_insn(s, RR, XR, args[0], args[2]);
- } else {
- tcg_out_insn(s, RRFa, XRK, a0, a1, a2);
- }
- break;
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type != TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, NGRK, a0, a1, a2);
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RR, NR, a0, a2);
+ } else {
+ tcg_out_insn(s, RRFa, NRK, a0, a1, a2);
+ }
+}
- case INDEX_op_andc_i32:
- a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
- tgen_andi(s, TCG_TYPE_I32, a0, (uint32_t)~a2);
- } else {
- tcg_out_insn(s, RRFa, NCRK, a0, a1, a2);
- }
- break;
- case INDEX_op_orc_i32:
- a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
- tgen_ori(s, a0, (uint32_t)~a2);
- } else {
- tcg_out_insn(s, RRFa, OCRK, a0, a1, a2);
- }
- break;
- case INDEX_op_eqv_i32:
- a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
- tcg_out_insn(s, RIL, XILF, a0, ~a2);
- } else {
- tcg_out_insn(s, RRFa, NXRK, a0, a1, a2);
+static void tgen_andi_3(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_mov(s, type, a0, a1);
+ tgen_andi(s, type, a0, a2);
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rNKR),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi_3,
+};
+
+static void tgen_andc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, NCRK, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, RRFa, NCGRK, a0, a1, a2);
+ }
+}
+
+static TCGConstraintSetIndex cset_misc3_rrr(TCGType type, unsigned flags)
+{
+ return HAVE_FACILITY(MISC_INSN_EXT3) ? C_O1_I2(r, r, r) : C_NotImplemented;
+}
+
+static const TCGOutOpBinary outop_andc = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_misc3_rrr,
+ .out_rrr = tgen_andc,
+};
+
+static void tgen_clz_int(TCGContext *s, TCGReg dest, TCGReg a1,
+ TCGArg a2, int a2const)
+{
+ /*
+ * Since this sets both R and R+1, we have no choice but to store the
+ * result into R0, allowing R1 == TCG_TMP0 to be clobbered as well.
+ */
+ QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
+ tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
+
+ if (a2const && a2 == 64) {
+ tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
+ return;
+ }
+
+ /*
+ * Conditions from FLOGR are:
+ * 2 -> one bit found
+ * 8 -> no one bit found
+ */
+ tgen_movcond_int(s, TCG_TYPE_I64, dest, a2, a2const, TCG_REG_R0, 8, 2);
+}
+
+static void tgen_clz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tgen_clz_int(s, a0, a1, a2, false);
+}
+
+static void tgen_clzi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_clz_int(s, a0, a1, a2, true);
+}
+
+static TCGConstraintSetIndex cset_clz(TCGType type, unsigned flags)
+{
+ return type == TCG_TYPE_I64 ? C_O1_I2(r, r, rI) : C_NotImplemented;
+}
+
+static const TCGOutOpBinary outop_clz = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_clz,
+ .out_rrr = tgen_clz,
+ .out_rri = tgen_clzi,
+};
+
+static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
+{
+ /* With MIE3, and bit 0 of m4 set, we get the complete result. */
+ if (HAVE_FACILITY(MISC_INSN_EXT3)) {
+ if (type == TCG_TYPE_I32) {
+ tcg_out_ext32u(s, dest, src);
+ src = dest;
}
- break;
- case INDEX_op_nand_i32:
- tcg_out_insn(s, RRFa, NNRK, args[0], args[1], args[2]);
- break;
- case INDEX_op_nor_i32:
- tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[2]);
- break;
+ tcg_out_insn(s, RRFc, POPCNT, dest, src, 8);
+ return;
+ }
- case INDEX_op_neg_i32:
- tcg_out_insn(s, RR, LCR, args[0], args[1]);
- break;
- case INDEX_op_not_i32:
- tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[1]);
- break;
+ /* Without MIE3, each byte gets the count of bits for the byte. */
+ tcg_out_insn(s, RRFc, POPCNT, dest, src, 0);
- case INDEX_op_mul_i32:
- a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
- if (a2 == (int16_t)a2) {
- tcg_out_insn(s, RI, MHI, a0, a2);
- } else {
- tcg_out_insn(s, RIL, MSFI, a0, a2);
- }
- } else if (a0 == a1) {
+ /* Multiply to sum each byte at the top of the word. */
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RIL, MSFI, dest, 0x01010101);
+ tcg_out_sh32(s, RS_SRL, dest, TCG_REG_NONE, 24);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0x0101010101010101ull);
+ tcg_out_insn(s, RRE, MSGR, dest, TCG_TMP0);
+ tcg_out_sh64(s, RSY_SRLG, dest, dest, TCG_REG_NONE, 56);
+ }
+}
+
+static const TCGOutOpUnary outop_ctpop = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_ctpop,
+};
+
+static const TCGOutOpBinary outop_ctz = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_divs = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_divs2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a4)
+{
+ tcg_debug_assert((a1 & 1) == 0);
+ tcg_debug_assert(a0 == a1 + 1);
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RR, DR, a1, a4);
+ } else {
+ /*
+ * TODO: Move the sign-extend of the numerator from a2 into a3
+ * into the tcg backend, instead of in early expansion. It is
+ * required for 32-bit DR, but not 64-bit DSGR.
+ */
+ tcg_out_insn(s, RRE, DSGR, a1, a4);
+ }
+}
+
+static const TCGOutOpDivRem outop_divs2 = {
+ .base.static_constraint = C_O2_I3(o, m, 0, 1, r),
+ .out_rr01r = tgen_divs2,
+};
+
+static const TCGOutOpBinary outop_divu = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_divu2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a4)
+{
+ tcg_debug_assert((a1 & 1) == 0);
+ tcg_debug_assert(a0 == a1 + 1);
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RRE, DLR, a1, a4);
+ } else {
+ tcg_out_insn(s, RRE, DLGR, a1, a4);
+ }
+}
+
+static const TCGOutOpDivRem outop_divu2 = {
+ .base.static_constraint = C_O2_I3(o, m, 0, 1, r),
+ .out_rr01r = tgen_divu2,
+};
+
+static void tgen_eqv(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, NXRK, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, RRFa, NXGRK, a0, a1, a2);
+ }
+}
+
+static const TCGOutOpBinary outop_eqv = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_misc3_rrr,
+ .out_rrr = tgen_eqv,
+};
+
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
+{
+ tcg_out_sh64(s, RSY_SRLG, a0, a1, TCG_REG_NONE, 32);
+}
+
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extrh_i64_i32,
+};
+
+static void tgen_mul(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ if (a0 == a1) {
tcg_out_insn(s, RRE, MSR, a0, a2);
} else {
tcg_out_insn(s, RRFa, MSRKC, a0, a1, a2);
}
- break;
-
- case INDEX_op_div2_i32:
- tcg_debug_assert(args[0] == args[2]);
- tcg_debug_assert(args[1] == args[3]);
- tcg_debug_assert((args[1] & 1) == 0);
- tcg_debug_assert(args[0] == args[1] + 1);
- tcg_out_insn(s, RR, DR, args[1], args[4]);
- break;
- case INDEX_op_divu2_i32:
- tcg_debug_assert(args[0] == args[2]);
- tcg_debug_assert(args[1] == args[3]);
- tcg_debug_assert((args[1] & 1) == 0);
- tcg_debug_assert(args[0] == args[1] + 1);
- tcg_out_insn(s, RRE, DLR, args[1], args[4]);
- break;
-
- case INDEX_op_shl_i32:
- op = RS_SLL;
- op2 = RSY_SLLK;
- do_shift32:
- a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
+ } else {
if (a0 == a1) {
- if (const_args[2]) {
- tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
- } else {
- tcg_out_sh32(s, op, a0, a2, 0);
- }
+ tcg_out_insn(s, RRE, MSGR, a0, a2);
} else {
- /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */
- if (const_args[2]) {
- tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
- } else {
- tcg_out_sh64(s, op2, a0, a1, a2, 0);
- }
+ tcg_out_insn(s, RRFa, MSGRKC, a0, a1, a2);
}
- break;
- case INDEX_op_shr_i32:
- op = RS_SRL;
- op2 = RSY_SRLK;
- goto do_shift32;
- case INDEX_op_sar_i32:
- op = RS_SRA;
- op2 = RSY_SRAK;
- goto do_shift32;
+ }
+}
- case INDEX_op_rotl_i32:
- /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
- if (const_args[2]) {
- tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
+static void tgen_muli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_mov(s, type, a0, a1);
+ if (type == TCG_TYPE_I32) {
+ if (a2 == (int16_t)a2) {
+ tcg_out_insn(s, RI, MHI, a0, a2);
} else {
- tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
+ tcg_out_insn(s, RIL, MSFI, a0, a2);
}
- break;
- case INDEX_op_rotr_i32:
- if (const_args[2]) {
- tcg_out_sh64(s, RSY_RLL, args[0], args[1],
- TCG_REG_NONE, (32 - args[2]) & 31);
+ } else {
+ if (a2 == (int16_t)a2) {
+ tcg_out_insn(s, RI, MGHI, a0, a2);
} else {
- tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
- tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
+ tcg_out_insn(s, RIL, MSGFI, a0, a2);
}
- break;
+ }
+}
+
+static TCGConstraintSetIndex cset_mul(TCGType type, unsigned flags)
+{
+ return (HAVE_FACILITY(MISC_INSN_EXT2)
+ ? C_O1_I2(r, r, rJ)
+ : C_O1_I2(r, 0, rJ));
+}
+
+static const TCGOutOpBinary outop_mul = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mul,
+ .out_rrr = tgen_mul,
+ .out_rri = tgen_muli,
+};
+
+static void tgen_muls2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
+{
+ tcg_debug_assert((a1 & 1) == 0);
+ tcg_debug_assert(a0 == a1 + 1);
+ tcg_out_insn(s, RRFa, MGRK, a1, a2, a3);
+}
+
+static TCGConstraintSetIndex cset_muls2(TCGType type, unsigned flags)
+{
+ return (type == TCG_TYPE_I64 && HAVE_FACILITY(MISC_INSN_EXT2)
+ ? C_O2_I2(o, m, r, r) : C_NotImplemented);
+}
+
+static const TCGOutOpMul2 outop_muls2 = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_muls2,
+ .out_rrrr = tgen_muls2,
+};
+
+static const TCGOutOpBinary outop_mulsh = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_mulu2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
+{
+ tcg_debug_assert(a0 == a2);
+ tcg_debug_assert((a1 & 1) == 0);
+ tcg_debug_assert(a0 == a1 + 1);
+ tcg_out_insn(s, RRE, MLGR, a1, a3);
+}
+
+static TCGConstraintSetIndex cset_mulu2(TCGType type, unsigned flags)
+{
+ return (type == TCG_TYPE_I64 && HAVE_FACILITY(MISC_INSN_EXT2)
+ ? C_O2_I2(o, m, 0, r) : C_NotImplemented);
+}
+
+static const TCGOutOpMul2 outop_mulu2 = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mulu2,
+ .out_rrrr = tgen_mulu2,
+};
+
+static const TCGOutOpBinary outop_muluh = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_nand(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, NNRK, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, RRFa, NNGRK, a0, a1, a2);
+ }
+}
+
+static const TCGOutOpBinary outop_nand = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_misc3_rrr,
+ .out_rrr = tgen_nand,
+};
+
+static void tgen_nor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, NORK, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, RRFa, NOGRK, a0, a1, a2);
+ }
+}
- case INDEX_op_bswap16_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
+static const TCGOutOpBinary outop_nor = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_misc3_rrr,
+ .out_rrr = tgen_nor,
+};
+
+static void tgen_or(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type != TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, OGRK, a0, a1, a2);
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RR, OR, a0, a2);
+ } else {
+ tcg_out_insn(s, RRFa, ORK, a0, a1, a2);
+ }
+}
+
+static void tgen_ori_3(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_mov(s, type, a0, a1);
+ tgen_ori(s, a0, type == TCG_TYPE_I32 ? (uint32_t)a2 : a2);
+}
+
+static const TCGOutOpBinary outop_or = {
+ .base.static_constraint = C_O1_I2(r, r, rK),
+ .out_rrr = tgen_or,
+ .out_rri = tgen_ori_3,
+};
+
+static void tgen_orc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, OCRK, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, RRFa, OCGRK, a0, a1, a2);
+ }
+}
+
+static const TCGOutOpBinary outop_orc = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_misc3_rrr,
+ .out_rrr = tgen_orc,
+};
+
+static const TCGOutOpBinary outop_rems = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_remu = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_rotl_int(TCGContext *s, TCGType type, TCGReg dst,
+ TCGReg src, TCGReg v, tcg_target_long i)
+{
+ S390Opcode insn = type == TCG_TYPE_I32 ? RSY_RLL : RSY_RLLG;
+ tcg_out_sh64(s, insn, dst, src, v, i);
+}
+
+static void tgen_rotl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tgen_rotl_int(s, type, a0, a1, a2, 0);
+}
+
+static void tgen_rotli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_rotl_int(s, type, a0, a1, TCG_REG_NONE, a2);
+}
+
+static const TCGOutOpBinary outop_rotl = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_rotl,
+ .out_rri = tgen_rotli,
+};
+
+static const TCGOutOpBinary outop_rotr = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_sar_int(TCGContext *s, TCGType type, TCGReg dst,
+ TCGReg src, TCGReg v, tcg_target_long i)
+{
+ if (type != TCG_TYPE_I32) {
+ tcg_out_sh64(s, RSY_SRAG, dst, src, v, i);
+ } else if (dst == src) {
+ tcg_out_sh32(s, RS_SRA, dst, v, i);
+ } else {
+ tcg_out_sh64(s, RSY_SRAK, dst, src, v, i);
+ }
+}
+
+static void tgen_sar(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tgen_sar_int(s, type, a0, a1, a2, 0);
+}
+
+static void tgen_sari(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_sar_int(s, type, a0, a1, TCG_REG_NONE, a2);
+}
+
+static const TCGOutOpBinary outop_sar = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_sar,
+ .out_rri = tgen_sari,
+};
+
+static void tgen_shl_int(TCGContext *s, TCGType type, TCGReg dst,
+ TCGReg src, TCGReg v, tcg_target_long i)
+{
+ if (type != TCG_TYPE_I32) {
+ tcg_out_sh64(s, RSY_SLLG, dst, src, v, i);
+ } else if (dst == src) {
+ tcg_out_sh32(s, RS_SLL, dst, v, i);
+ } else {
+ tcg_out_sh64(s, RSY_SLLK, dst, src, v, i);
+ }
+}
+
+static void tgen_shl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tgen_shl_int(s, type, a0, a1, a2, 0);
+}
+
+static void tgen_shli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_shl_int(s, type, a0, a1, TCG_REG_NONE, a2);
+}
+
+static const TCGOutOpBinary outop_shl = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_shl,
+ .out_rri = tgen_shli,
+};
+
+static void tgen_shr_int(TCGContext *s, TCGType type, TCGReg dst,
+ TCGReg src, TCGReg v, tcg_target_long i)
+{
+ if (type != TCG_TYPE_I32) {
+ tcg_out_sh64(s, RSY_SRLG, dst, src, v, i);
+ } else if (dst == src) {
+ tcg_out_sh32(s, RS_SRL, dst, v, i);
+ } else {
+ tcg_out_sh64(s, RSY_SRLK, dst, src, v, i);
+ }
+}
+
+static void tgen_shr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tgen_shr_int(s, type, a0, a1, a2, 0);
+}
+
+static void tgen_shri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_shr_int(s, type, a0, a1, TCG_REG_NONE, a2);
+}
+
+static const TCGOutOpBinary outop_shr = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_shr,
+ .out_rri = tgen_shri,
+};
+
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type != TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RR, SR, a0, a2);
+ } else {
+ tcg_out_insn(s, RRFa, SRK, a0, a1, a2);
+ }
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
+static void tgen_subbo_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type != TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, SLGRK, a0, a1, a2);
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RR, SLR, a0, a2);
+ } else {
+ tcg_out_insn(s, RRFa, SLRK, a0, a1, a2);
+ }
+}
+
+static void tgen_subbo_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_mov(s, type, a0, a1);
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RIL, SLFI, a0, a2);
+ } else if (a2 >= 0) {
+ tcg_out_insn(s, RIL, SLGFI, a0, a2);
+ } else {
+ tcg_out_insn(s, RIL, ALGFI, a0, -a2);
+ }
+}
+
+static const TCGOutOpAddSubCarry outop_subbo = {
+ .base.static_constraint = C_O1_I2(r, r, rUV),
+ .out_rrr = tgen_subbo_rrr,
+ .out_rri = tgen_subbo_rri,
+};
+
+static void tgen_subbio(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RRE, SLBR, a0, a2);
+ } else {
+ tcg_out_insn(s, RRE, SLBGR, a0, a2);
+ }
+}
+
+static const TCGOutOpAddSubCarry outop_subbio = {
+ .base.static_constraint = C_O1_I2(r, 0, r),
+ .out_rrr = tgen_subbio,
+};
+
+#define outop_subbi outop_subbio
+
+static void tcg_out_set_borrow(TCGContext *s)
+{
+ tcg_out_insn(s, RR, CLR, TCG_REG_R0, TCG_REG_R0); /* cc = 0 */
+}
+
+static void tgen_xor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type != TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, XGRK, a0, a1, a2);
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RR, XR, a0, a2);
+ } else {
+ tcg_out_insn(s, RRFa, XRK, a0, a1, a2);
+ }
+}
+
+static void tgen_xori_3(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_mov(s, type, a0, a1);
+ tgen_xori(s, a0, type == TCG_TYPE_I32 ? (uint32_t)a2 : a2);
+}
+
+static const TCGOutOpBinary outop_xor = {
+ .base.static_constraint = C_O1_I2(r, r, rK),
+ .out_rrr = tgen_xor,
+ .out_rri = tgen_xori_3,
+};
+
+static void tgen_bswap16(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, unsigned flags)
+{
+ if (type == TCG_TYPE_I32) {
tcg_out_insn(s, RRE, LRVR, a0, a1);
- if (a2 & TCG_BSWAP_OS) {
- tcg_out_sh32(s, RS_SRA, a0, TCG_REG_NONE, 16);
- } else {
- tcg_out_sh32(s, RS_SRL, a0, TCG_REG_NONE, 16);
- }
- break;
- case INDEX_op_bswap16_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
+ tcg_out_sh32(s, (flags & TCG_BSWAP_OS ? RS_SRA : RS_SRL),
+ a0, TCG_REG_NONE, 16);
+ } else {
tcg_out_insn(s, RRE, LRVGR, a0, a1);
- if (a2 & TCG_BSWAP_OS) {
- tcg_out_sh64(s, RSY_SRAG, a0, a0, TCG_REG_NONE, 48);
- } else {
- tcg_out_sh64(s, RSY_SRLG, a0, a0, TCG_REG_NONE, 48);
- }
- break;
+ tcg_out_sh64(s, (flags & TCG_BSWAP_OS ? RSY_SRAG : RSY_SRLG),
+ a0, a0, TCG_REG_NONE, 48);
+ }
+}
- case INDEX_op_bswap32_i32:
- tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
- break;
- case INDEX_op_bswap32_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- tcg_out_insn(s, RRE, LRVR, a0, a1);
- if (a2 & TCG_BSWAP_OS) {
- tcg_out_ext32s(s, a0, a0);
- } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
- tcg_out_ext32u(s, a0, a0);
- }
- break;
+static const TCGOutOpBswap outop_bswap16 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap16,
+};
- case INDEX_op_add2_i32:
- if (const_args[4]) {
- tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
- } else {
- tcg_out_insn(s, RR, ALR, args[0], args[4]);
- }
- tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
- break;
- case INDEX_op_sub2_i32:
- if (const_args[4]) {
- tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
- } else {
- tcg_out_insn(s, RR, SLR, args[0], args[4]);
- }
- tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
- break;
+static void tgen_bswap32(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, unsigned flags)
+{
+ tcg_out_insn(s, RRE, LRVR, a0, a1);
+ if (flags & TCG_BSWAP_OS) {
+ tcg_out_ext32s(s, a0, a0);
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
+ tcg_out_ext32u(s, a0, a0);
+ }
+}
- case INDEX_op_br:
- tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
- break;
+static const TCGOutOpBswap outop_bswap32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap32,
+};
- case INDEX_op_brcond_i32:
- tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
- args[1], const_args[1], arg_label(args[3]));
- break;
- case INDEX_op_setcond_i32:
- tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
- args[2], const_args[2], false);
- break;
- case INDEX_op_negsetcond_i32:
- tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
- args[2], const_args[2], true);
- break;
- case INDEX_op_movcond_i32:
- tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
- args[2], const_args[2], args[3], const_args[3], args[4]);
- break;
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tcg_out_insn(s, RRE, LRVGR, a0, a1);
+}
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_ld_a64_i32:
- tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_ld_a64_i64:
- tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64);
- break;
- case INDEX_op_qemu_st_a32_i32:
- case INDEX_op_qemu_st_a64_i32:
- tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_st_a32_i64:
- case INDEX_op_qemu_st_a64_i64:
- tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64);
- break;
- case INDEX_op_qemu_ld_a32_i128:
- case INDEX_op_qemu_ld_a64_i128:
- tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
- break;
- case INDEX_op_qemu_st_a32_i128:
- case INDEX_op_qemu_st_a64_i128:
- tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
- break;
+static const TCGOutOpUnary outop_bswap64 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap64,
+};
- case INDEX_op_ld16s_i64:
- tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
- break;
- case INDEX_op_ld32u_i64:
- tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
- break;
- case INDEX_op_ld32s_i64:
- tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
- break;
- case INDEX_op_ld_i64:
- tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
- break;
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RR, LCR, a0, a1);
+ } else {
+ tcg_out_insn(s, RRE, LCGR, a0, a1);
+ }
+}
- case INDEX_op_st32_i64:
- tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
- break;
- case INDEX_op_st_i64:
- tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
- break;
+static const TCGOutOpUnary outop_neg = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_neg,
+};
- case INDEX_op_add_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- do_addi_64:
- if (a0 == a1) {
- if (a2 == (int16_t)a2) {
- tcg_out_insn(s, RI, AGHI, a0, a2);
- break;
- }
- if (a2 == (int32_t)a2) {
- tcg_out_insn(s, RIL, AGFI, a0, a2);
- break;
- }
- if (a2 == (uint32_t)a2) {
- tcg_out_insn(s, RIL, ALGFI, a0, a2);
- break;
- }
- if (-a2 == (uint32_t)-a2) {
- tcg_out_insn(s, RIL, SLGFI, a0, -a2);
- break;
- }
- }
- tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
- } else if (a0 == a1) {
- tcg_out_insn(s, RRE, AGR, a0, a2);
- } else {
- tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
- }
- break;
- case INDEX_op_sub_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- a2 = -a2;
- goto do_addi_64;
- } else {
- tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
- }
- break;
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_nor(s, type, a0, a1, a1);
+}
- case INDEX_op_and_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
- tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
- } else {
- tcg_out_insn(s, RRFa, NGRK, a0, a1, a2);
- }
- break;
- case INDEX_op_or_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
- tgen_ori(s, a0, a2);
- } else {
- tcg_out_insn(s, RRFa, OGRK, a0, a1, a2);
- }
- break;
- case INDEX_op_xor_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
- tgen_xori(s, a0, a2);
- } else {
- tcg_out_insn(s, RRFa, XGRK, a0, a1, a2);
- }
- break;
+static TCGConstraintSetIndex cset_not(TCGType type, unsigned flags)
+{
+ return HAVE_FACILITY(MISC_INSN_EXT3) ? C_O1_I1(r, r) : C_NotImplemented;
+}
- case INDEX_op_andc_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
- tgen_andi(s, TCG_TYPE_I64, a0, ~a2);
- } else {
- tcg_out_insn(s, RRFa, NCGRK, a0, a1, a2);
- }
- break;
- case INDEX_op_orc_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
- tgen_ori(s, a0, ~a2);
- } else {
- tcg_out_insn(s, RRFa, OCGRK, a0, a1, a2);
- }
- break;
- case INDEX_op_eqv_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
- tgen_xori(s, a0, ~a2);
- } else {
- tcg_out_insn(s, RRFa, NXGRK, a0, a1, a2);
- }
- break;
- case INDEX_op_nand_i64:
- tcg_out_insn(s, RRFa, NNGRK, args[0], args[1], args[2]);
- break;
- case INDEX_op_nor_i64:
- tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[2]);
- break;
+static const TCGOutOpUnary outop_not = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_not,
+ .out_rr = tgen_not,
+};
- case INDEX_op_neg_i64:
- tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
- break;
- case INDEX_op_not_i64:
- tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[1]);
- break;
- case INDEX_op_bswap64_i64:
- tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
- break;
+static void tcg_out_mb(TCGContext *s, unsigned a0)
+{
+ /*
+ * The host memory model is quite strong, we simply need to
+ * serialize the instruction stream.
+ */
+ if (a0 & TCG_MO_ST_LD) {
+ /* fast-bcr-serialization facility (45) is present */
+ tcg_out_insn(s, RR, BCR, 14, 0);
+ }
+}
- case INDEX_op_mul_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
- if (a2 == (int16_t)a2) {
- tcg_out_insn(s, RI, MGHI, a0, a2);
- } else {
- tcg_out_insn(s, RIL, MSGFI, a0, a2);
- }
- } else if (a0 == a1) {
- tcg_out_insn(s, RRE, MSGR, a0, a2);
- } else {
- tcg_out_insn(s, RRFa, MSGRKC, a0, a1, a2);
- }
- break;
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_mem(s, 0, RXY_LLGC, dest, base, TCG_REG_NONE, offset);
+}
- case INDEX_op_div2_i64:
- /*
- * ??? We get an unnecessary sign-extension of the dividend
- * into op0 with this definition, but as we do in fact always
- * produce both quotient and remainder using INDEX_op_div_i64
- * instead requires jumping through even more hoops.
- */
- tcg_debug_assert(args[0] == args[2]);
- tcg_debug_assert(args[1] == args[3]);
- tcg_debug_assert((args[1] & 1) == 0);
- tcg_debug_assert(args[0] == args[1] + 1);
- tcg_out_insn(s, RRE, DSGR, args[1], args[4]);
- break;
- case INDEX_op_divu2_i64:
- tcg_debug_assert(args[0] == args[2]);
- tcg_debug_assert(args[1] == args[3]);
- tcg_debug_assert((args[1] & 1) == 0);
- tcg_debug_assert(args[0] == args[1] + 1);
- tcg_out_insn(s, RRE, DLGR, args[1], args[4]);
- break;
- case INDEX_op_mulu2_i64:
- tcg_debug_assert(args[0] == args[2]);
- tcg_debug_assert((args[1] & 1) == 0);
- tcg_debug_assert(args[0] == args[1] + 1);
- tcg_out_insn(s, RRE, MLGR, args[1], args[3]);
- break;
- case INDEX_op_muls2_i64:
- tcg_debug_assert((args[1] & 1) == 0);
- tcg_debug_assert(args[0] == args[1] + 1);
- tcg_out_insn(s, RRFa, MGRK, args[1], args[2], args[3]);
- break;
-
- case INDEX_op_shl_i64:
- op = RSY_SLLG;
- do_shift64:
- if (const_args[2]) {
- tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
- } else {
- tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
- }
- break;
- case INDEX_op_shr_i64:
- op = RSY_SRLG;
- goto do_shift64;
- case INDEX_op_sar_i64:
- op = RSY_SRAG;
- goto do_shift64;
+static const TCGOutOpLoad outop_ld8u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8u,
+};
- case INDEX_op_rotl_i64:
- if (const_args[2]) {
- tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
- TCG_REG_NONE, args[2]);
- } else {
- tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
- }
- break;
- case INDEX_op_rotr_i64:
- if (const_args[2]) {
- tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
- TCG_REG_NONE, (64 - args[2]) & 63);
- } else {
- /* We can use the smaller 32-bit negate because only the
- low 6 bits are examined for the rotate. */
- tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
- tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
- }
- break;
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_mem(s, 0, RXY_LGB, dest, base, TCG_REG_NONE, offset);
+}
- case INDEX_op_add2_i64:
- if (const_args[4]) {
- if ((int64_t)args[4] >= 0) {
- tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
- } else {
- tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
- }
- } else {
- tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
- }
- tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
- break;
- case INDEX_op_sub2_i64:
- if (const_args[4]) {
- if ((int64_t)args[4] >= 0) {
- tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
- } else {
- tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
- }
- } else {
- tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
- }
- tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
- break;
+static const TCGOutOpLoad outop_ld8s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8s,
+};
- case INDEX_op_brcond_i64:
- tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
- args[1], const_args[1], arg_label(args[3]));
- break;
- case INDEX_op_setcond_i64:
- tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
- args[2], const_args[2], false);
- break;
- case INDEX_op_negsetcond_i64:
- tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
- args[2], const_args[2], true);
- break;
- case INDEX_op_movcond_i64:
- tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
- args[2], const_args[2], args[3], const_args[3], args[4]);
- break;
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_mem(s, 0, RXY_LLGH, dest, base, TCG_REG_NONE, offset);
+}
- OP_32_64(deposit):
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[1]) {
- tgen_deposit(s, a0, a2, args[3], args[4], 1);
- } else {
- /* Since we can't support "0Z" as a constraint, we allow a1 in
- any register. Fix things up as if a matching constraint. */
- if (a0 != a1) {
- TCGType type = (opc == INDEX_op_deposit_i64);
- if (a0 == a2) {
- tcg_out_mov(s, type, TCG_TMP0, a2);
- a2 = TCG_TMP0;
- }
- tcg_out_mov(s, type, a0, a1);
- }
- tgen_deposit(s, a0, a2, args[3], args[4], 0);
- }
- break;
+static const TCGOutOpLoad outop_ld16u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16u,
+};
- OP_32_64(extract):
- tgen_extract(s, args[0], args[1], args[2], args[3]);
- break;
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_mem(s, RX_LH, RXY_LHY, dest, base, TCG_REG_NONE, offset);
+ } else {
+ tcg_out_mem(s, 0, RXY_LGH, dest, base, TCG_REG_NONE, offset);
+ }
+}
- case INDEX_op_clz_i64:
- tgen_clz(s, args[0], args[1], args[2], const_args[2]);
- break;
+static const TCGOutOpLoad outop_ld16s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16s,
+};
- case INDEX_op_ctpop_i32:
- tgen_ctpop(s, TCG_TYPE_I32, args[0], args[1]);
- break;
- case INDEX_op_ctpop_i64:
- tgen_ctpop(s, TCG_TYPE_I64, args[0], args[1]);
- break;
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_mem(s, 0, RXY_LLGF, dest, base, TCG_REG_NONE, offset);
+}
- case INDEX_op_mb:
- /* The host memory model is quite strong, we simply need to
- serialize the instruction stream. */
- if (args[0] & TCG_MO_ST_LD) {
- /* fast-bcr-serialization facility (45) is present */
- tcg_out_insn(s, RR, BCR, 14, 0);
- }
- break;
+static const TCGOutOpLoad outop_ld32u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32u,
+};
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
- case INDEX_op_mov_i64:
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- default:
- g_assert_not_reached();
- }
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_mem(s, 0, RXY_LGF, dest, base, TCG_REG_NONE, offset);
+}
+
+static const TCGOutOpLoad outop_ld32s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32s,
+};
+
+static void tgen_st8(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_mem(s, RX_STC, RXY_STCY, data, base, TCG_REG_NONE, offset);
}
+static const TCGOutOpStore outop_st8 = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out_r = tgen_st8,
+};
+
+static void tgen_st16(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_mem(s, RX_STH, RXY_STHY, data, base, TCG_REG_NONE, offset);
+}
+
+static const TCGOutOpStore outop_st16 = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out_r = tgen_st16,
+};
+
+static const TCGOutOpStore outop_st = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out_r = tcg_out_st,
+};
+
+
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, TCGReg src)
{
@@ -2841,6 +3249,94 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
tcg_out_insn(s, VRX, VLREP, dst, TCG_TMP0, TCG_REG_NONE, 0, MO_64);
}
+static bool tcg_out_cmp_vec_noinv(TCGContext *s, unsigned vece, TCGReg a0,
+ TCGReg a1, TCGReg a2, TCGCond cond)
+{
+ bool need_swap = false, need_inv = false;
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_GT:
+ case TCG_COND_GTU:
+ break;
+ case TCG_COND_NE:
+ case TCG_COND_LE:
+ case TCG_COND_LEU:
+ need_inv = true;
+ break;
+ case TCG_COND_LT:
+ case TCG_COND_LTU:
+ need_swap = true;
+ break;
+ case TCG_COND_GE:
+ case TCG_COND_GEU:
+ need_swap = need_inv = true;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (need_inv) {
+ cond = tcg_invert_cond(cond);
+ }
+ if (need_swap) {
+ TCGReg swap = a1;
+ a1 = a2;
+ a2 = swap;
+ cond = tcg_swap_cond(cond);
+ }
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ tcg_out_insn(s, VRRc, VCEQ, a0, a1, a2, vece);
+ break;
+ case TCG_COND_GT:
+ tcg_out_insn(s, VRRc, VCH, a0, a1, a2, vece);
+ break;
+ case TCG_COND_GTU:
+ tcg_out_insn(s, VRRc, VCHL, a0, a1, a2, vece);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return need_inv;
+}
+
+static void tcg_out_cmp_vec(TCGContext *s, unsigned vece, TCGReg a0,
+ TCGReg a1, TCGReg a2, TCGCond cond)
+{
+ if (tcg_out_cmp_vec_noinv(s, vece, a0, a1, a2, cond)) {
+ tcg_out_insn(s, VRRc, VNO, a0, a0, a0, 0);
+ }
+}
+
+static void tcg_out_cmpsel_vec(TCGContext *s, unsigned vece, TCGReg a0,
+ TCGReg c1, TCGReg c2, TCGArg v3,
+ int const_v3, TCGReg v4, TCGCond cond)
+{
+ bool inv = tcg_out_cmp_vec_noinv(s, vece, TCG_VEC_TMP0, c1, c2, cond);
+
+ if (!const_v3) {
+ if (inv) {
+ tcg_out_insn(s, VRRe, VSEL, a0, v4, v3, TCG_VEC_TMP0);
+ } else {
+ tcg_out_insn(s, VRRe, VSEL, a0, v3, v4, TCG_VEC_TMP0);
+ }
+ } else if (v3) {
+ if (inv) {
+ tcg_out_insn(s, VRRc, VOC, a0, v4, TCG_VEC_TMP0, 0);
+ } else {
+ tcg_out_insn(s, VRRc, VO, a0, v4, TCG_VEC_TMP0, 0);
+ }
+ } else {
+ if (inv) {
+ tcg_out_insn(s, VRRc, VN, a0, v4, TCG_VEC_TMP0, 0);
+ } else {
+ tcg_out_insn(s, VRRc, VNC, a0, v4, TCG_VEC_TMP0, 0);
+ }
+ }
+}
+
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
unsigned vecl, unsigned vece,
const TCGArg args[TCG_MAX_OP_ARGS],
@@ -2959,19 +3455,11 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_cmp_vec:
- switch ((TCGCond)args[3]) {
- case TCG_COND_EQ:
- tcg_out_insn(s, VRRc, VCEQ, a0, a1, a2, vece);
- break;
- case TCG_COND_GT:
- tcg_out_insn(s, VRRc, VCH, a0, a1, a2, vece);
- break;
- case TCG_COND_GTU:
- tcg_out_insn(s, VRRc, VCHL, a0, a1, a2, vece);
- break;
- default:
- g_assert_not_reached();
- }
+ tcg_out_cmp_vec(s, vece, a0, a1, a2, args[3]);
+ break;
+ case INDEX_op_cmpsel_vec:
+ tcg_out_cmpsel_vec(s, vece, a0, a1, a2, args[3], const_args[3],
+ args[4], args[5]);
break;
case INDEX_op_s390_vuph_vec:
@@ -3024,9 +3512,9 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
case INDEX_op_umax_vec:
case INDEX_op_umin_vec:
case INDEX_op_xor_vec:
- return 1;
case INDEX_op_cmp_vec:
case INDEX_op_cmpsel_vec:
+ return 1;
case INDEX_op_rotrv_vec:
return -1;
case INDEX_op_mul_vec:
@@ -3039,71 +3527,6 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
}
}
-static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
- TCGv_vec v1, TCGv_vec v2, TCGCond cond)
-{
- bool need_swap = false, need_inv = false;
-
- switch (cond) {
- case TCG_COND_EQ:
- case TCG_COND_GT:
- case TCG_COND_GTU:
- break;
- case TCG_COND_NE:
- case TCG_COND_LE:
- case TCG_COND_LEU:
- need_inv = true;
- break;
- case TCG_COND_LT:
- case TCG_COND_LTU:
- need_swap = true;
- break;
- case TCG_COND_GE:
- case TCG_COND_GEU:
- need_swap = need_inv = true;
- break;
- default:
- g_assert_not_reached();
- }
-
- if (need_inv) {
- cond = tcg_invert_cond(cond);
- }
- if (need_swap) {
- TCGv_vec t1;
- t1 = v1, v1 = v2, v2 = t1;
- cond = tcg_swap_cond(cond);
- }
-
- vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
- tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
-
- return need_inv;
-}
-
-static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
- TCGv_vec v1, TCGv_vec v2, TCGCond cond)
-{
- if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) {
- tcg_gen_not_vec(vece, v0, v0);
- }
-}
-
-static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0,
- TCGv_vec c1, TCGv_vec c2,
- TCGv_vec v3, TCGv_vec v4, TCGCond cond)
-{
- TCGv_vec t = tcg_temp_new_vec(type);
-
- if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) {
- /* Invert the sense of the compare by swapping arguments. */
- tcg_gen_bitsel_vec(vece, v0, t, v4, v3);
- } else {
- tcg_gen_bitsel_vec(vece, v0, t, v3, v4);
- }
- tcg_temp_free_vec(t);
-}
-
static void expand_vec_sat(TCGType type, unsigned vece, TCGv_vec v0,
TCGv_vec v1, TCGv_vec v2, TCGOpcode add_sub_opc)
{
@@ -3145,7 +3568,7 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
TCGArg a0, ...)
{
va_list va;
- TCGv_vec v0, v1, v2, v3, v4, t0;
+ TCGv_vec v0, v1, v2, t0;
va_start(va, a0);
v0 = temp_tcgv_vec(arg_temp(a0));
@@ -3153,16 +3576,6 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
switch (opc) {
- case INDEX_op_cmp_vec:
- expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
- break;
-
- case INDEX_op_cmpsel_vec:
- v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
- v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
- expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg));
- break;
-
case INDEX_op_rotrv_vec:
t0 = tcg_temp_new_vec(type);
tcg_gen_neg_vec(vece, t0, v2);
@@ -3183,173 +3596,10 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
va_end(va);
}
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
+static TCGConstraintSetIndex
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld_i32:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld_i64:
- return C_O1_I1(r, r);
-
- case INDEX_op_st8_i32:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i32:
- case INDEX_op_st16_i64:
- case INDEX_op_st_i32:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i64:
- return C_O0_I2(r, r);
-
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i64:
- case INDEX_op_rotl_i32:
- case INDEX_op_rotl_i64:
- case INDEX_op_rotr_i32:
- case INDEX_op_rotr_i64:
- case INDEX_op_setcond_i32:
- case INDEX_op_negsetcond_i32:
- return C_O1_I2(r, r, ri);
- case INDEX_op_setcond_i64:
- case INDEX_op_negsetcond_i64:
- return C_O1_I2(r, r, rC);
-
- case INDEX_op_clz_i64:
- return C_O1_I2(r, r, rI);
-
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- case INDEX_op_and_i32:
- case INDEX_op_or_i32:
- case INDEX_op_xor_i32:
- return C_O1_I2(r, r, ri);
- case INDEX_op_and_i64:
- return C_O1_I2(r, r, rNKR);
- case INDEX_op_or_i64:
- case INDEX_op_xor_i64:
- return C_O1_I2(r, r, rK);
-
- case INDEX_op_andc_i32:
- case INDEX_op_orc_i32:
- case INDEX_op_eqv_i32:
- return C_O1_I2(r, r, ri);
- case INDEX_op_andc_i64:
- return C_O1_I2(r, r, rKR);
- case INDEX_op_orc_i64:
- case INDEX_op_eqv_i64:
- return C_O1_I2(r, r, rNK);
-
- case INDEX_op_nand_i32:
- case INDEX_op_nand_i64:
- case INDEX_op_nor_i32:
- case INDEX_op_nor_i64:
- return C_O1_I2(r, r, r);
-
- case INDEX_op_mul_i32:
- return (HAVE_FACILITY(MISC_INSN_EXT2)
- ? C_O1_I2(r, r, ri)
- : C_O1_I2(r, 0, ri));
- case INDEX_op_mul_i64:
- return (HAVE_FACILITY(MISC_INSN_EXT2)
- ? C_O1_I2(r, r, rJ)
- : C_O1_I2(r, 0, rJ));
-
- case INDEX_op_shl_i32:
- case INDEX_op_shr_i32:
- case INDEX_op_sar_i32:
- return C_O1_I2(r, r, ri);
-
- case INDEX_op_brcond_i32:
- return C_O0_I2(r, ri);
- case INDEX_op_brcond_i64:
- return C_O0_I2(r, rC);
-
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
- case INDEX_op_bswap32_i32:
- case INDEX_op_bswap32_i64:
- case INDEX_op_bswap64_i64:
- case INDEX_op_neg_i32:
- case INDEX_op_neg_i64:
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
- case INDEX_op_ext8s_i32:
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extract_i32:
- case INDEX_op_extract_i64:
- case INDEX_op_ctpop_i32:
- case INDEX_op_ctpop_i64:
- return C_O1_I1(r, r);
-
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_ld_a64_i32:
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_ld_a64_i64:
- return C_O1_I1(r, r);
- case INDEX_op_qemu_st_a32_i64:
- case INDEX_op_qemu_st_a64_i64:
- case INDEX_op_qemu_st_a32_i32:
- case INDEX_op_qemu_st_a64_i32:
- return C_O0_I2(r, r);
- case INDEX_op_qemu_ld_a32_i128:
- case INDEX_op_qemu_ld_a64_i128:
- return C_O2_I1(o, m, r);
- case INDEX_op_qemu_st_a32_i128:
- case INDEX_op_qemu_st_a64_i128:
- return C_O0_I3(o, m, r);
-
- case INDEX_op_deposit_i32:
- case INDEX_op_deposit_i64:
- return C_O1_I2(r, rZ, r);
-
- case INDEX_op_movcond_i32:
- return C_O1_I4(r, r, ri, rI, r);
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, rC, rI, r);
-
- case INDEX_op_div2_i32:
- case INDEX_op_div2_i64:
- case INDEX_op_divu2_i32:
- case INDEX_op_divu2_i64:
- return C_O2_I3(o, m, 0, 1, r);
-
- case INDEX_op_mulu2_i64:
- return C_O2_I2(o, m, 0, r);
- case INDEX_op_muls2_i64:
- return C_O2_I2(o, m, r, r);
-
- case INDEX_op_add2_i32:
- case INDEX_op_sub2_i32:
- return C_N1_O1_I4(r, r, 0, 1, ri, r);
-
- case INDEX_op_add2_i64:
- case INDEX_op_sub2_i64:
- return C_N1_O1_I4(r, r, 0, 1, rJU, r);
-
case INDEX_op_st_vec:
return C_O0_I2(v, r);
case INDEX_op_ld_vec:
@@ -3397,9 +3647,13 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
return C_O1_I2(v, v, r);
case INDEX_op_bitsel_vec:
return C_O1_I3(v, v, v, v);
+ case INDEX_op_cmpsel_vec:
+ return (TCG_TARGET_HAS_orc_vec
+ ? C_O1_I4(v, v, v, vZM, v)
+ : C_O1_I4(v, v, v, vZ, v));
default:
- g_assert_not_reached();
+ return C_NotImplemented;
}
}
@@ -3521,6 +3775,7 @@ static void tcg_target_init(TCGContext *s)
s->reserved_regs = 0;
tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
+ tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
/* XXX many insns can't be used with R0, so we better avoid it for now */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
index 62ce9d7..0ef5a6d 100644
--- a/tcg/s390x/tcg-target.h
+++ b/tcg/s390x/tcg-target.h
@@ -51,130 +51,4 @@ typedef enum TCGReg {
#define TCG_TARGET_NB_REGS 64
-/* Facilities required for proper operation; checked at startup. */
-
-#define FACILITY_ZARCH_ACTIVE 2
-#define FACILITY_LONG_DISP 18
-#define FACILITY_EXT_IMM 21
-#define FACILITY_GEN_INST_EXT 34
-#define FACILITY_45 45
-
-/* Facilities that are checked at runtime. */
-
-#define FACILITY_LOAD_ON_COND2 53
-#define FACILITY_MISC_INSN_EXT2 58
-#define FACILITY_MISC_INSN_EXT3 61
-#define FACILITY_VECTOR 129
-#define FACILITY_VECTOR_ENH1 135
-
-extern uint64_t s390_facilities[3];
-
-#define HAVE_FACILITY(X) \
- ((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
-
-/* optional instructions */
-#define TCG_TARGET_HAS_div2_i32 1
-#define TCG_TARGET_HAS_rot_i32 1
-#define TCG_TARGET_HAS_ext8s_i32 1
-#define TCG_TARGET_HAS_ext16s_i32 1
-#define TCG_TARGET_HAS_ext8u_i32 1
-#define TCG_TARGET_HAS_ext16u_i32 1
-#define TCG_TARGET_HAS_bswap16_i32 1
-#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_not_i32 HAVE_FACILITY(MISC_INSN_EXT3)
-#define TCG_TARGET_HAS_andc_i32 HAVE_FACILITY(MISC_INSN_EXT3)
-#define TCG_TARGET_HAS_orc_i32 HAVE_FACILITY(MISC_INSN_EXT3)
-#define TCG_TARGET_HAS_eqv_i32 HAVE_FACILITY(MISC_INSN_EXT3)
-#define TCG_TARGET_HAS_nand_i32 HAVE_FACILITY(MISC_INSN_EXT3)
-#define TCG_TARGET_HAS_nor_i32 HAVE_FACILITY(MISC_INSN_EXT3)
-#define TCG_TARGET_HAS_clz_i32 0
-#define TCG_TARGET_HAS_ctz_i32 0
-#define TCG_TARGET_HAS_ctpop_i32 1
-#define TCG_TARGET_HAS_deposit_i32 1
-#define TCG_TARGET_HAS_extract_i32 1
-#define TCG_TARGET_HAS_sextract_i32 0
-#define TCG_TARGET_HAS_extract2_i32 0
-#define TCG_TARGET_HAS_negsetcond_i32 1
-#define TCG_TARGET_HAS_add2_i32 1
-#define TCG_TARGET_HAS_sub2_i32 1
-#define TCG_TARGET_HAS_mulu2_i32 0
-#define TCG_TARGET_HAS_muls2_i32 0
-#define TCG_TARGET_HAS_muluh_i32 0
-#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_extr_i64_i32 0
-#define TCG_TARGET_HAS_qemu_st8_i32 0
-
-#define TCG_TARGET_HAS_div2_i64 1
-#define TCG_TARGET_HAS_rot_i64 1
-#define TCG_TARGET_HAS_ext8s_i64 1
-#define TCG_TARGET_HAS_ext16s_i64 1
-#define TCG_TARGET_HAS_ext32s_i64 1
-#define TCG_TARGET_HAS_ext8u_i64 1
-#define TCG_TARGET_HAS_ext16u_i64 1
-#define TCG_TARGET_HAS_ext32u_i64 1
-#define TCG_TARGET_HAS_bswap16_i64 1
-#define TCG_TARGET_HAS_bswap32_i64 1
-#define TCG_TARGET_HAS_bswap64_i64 1
-#define TCG_TARGET_HAS_not_i64 HAVE_FACILITY(MISC_INSN_EXT3)
-#define TCG_TARGET_HAS_andc_i64 HAVE_FACILITY(MISC_INSN_EXT3)
-#define TCG_TARGET_HAS_orc_i64 HAVE_FACILITY(MISC_INSN_EXT3)
-#define TCG_TARGET_HAS_eqv_i64 HAVE_FACILITY(MISC_INSN_EXT3)
-#define TCG_TARGET_HAS_nand_i64 HAVE_FACILITY(MISC_INSN_EXT3)
-#define TCG_TARGET_HAS_nor_i64 HAVE_FACILITY(MISC_INSN_EXT3)
-#define TCG_TARGET_HAS_clz_i64 1
-#define TCG_TARGET_HAS_ctz_i64 0
-#define TCG_TARGET_HAS_ctpop_i64 1
-#define TCG_TARGET_HAS_deposit_i64 1
-#define TCG_TARGET_HAS_extract_i64 1
-#define TCG_TARGET_HAS_sextract_i64 0
-#define TCG_TARGET_HAS_extract2_i64 0
-#define TCG_TARGET_HAS_negsetcond_i64 1
-#define TCG_TARGET_HAS_add2_i64 1
-#define TCG_TARGET_HAS_sub2_i64 1
-#define TCG_TARGET_HAS_mulu2_i64 1
-#define TCG_TARGET_HAS_muls2_i64 HAVE_FACILITY(MISC_INSN_EXT2)
-#define TCG_TARGET_HAS_muluh_i64 0
-#define TCG_TARGET_HAS_mulsh_i64 0
-
-#define TCG_TARGET_HAS_qemu_ldst_i128 1
-
-#define TCG_TARGET_HAS_tst 1
-
-#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR)
-#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR)
-#define TCG_TARGET_HAS_v256 0
-
-#define TCG_TARGET_HAS_andc_vec 1
-#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1)
-#define TCG_TARGET_HAS_nand_vec HAVE_FACILITY(VECTOR_ENH1)
-#define TCG_TARGET_HAS_nor_vec 1
-#define TCG_TARGET_HAS_eqv_vec HAVE_FACILITY(VECTOR_ENH1)
-#define TCG_TARGET_HAS_not_vec 1
-#define TCG_TARGET_HAS_neg_vec 1
-#define TCG_TARGET_HAS_abs_vec 1
-#define TCG_TARGET_HAS_roti_vec 1
-#define TCG_TARGET_HAS_rots_vec 1
-#define TCG_TARGET_HAS_rotv_vec 1
-#define TCG_TARGET_HAS_shi_vec 1
-#define TCG_TARGET_HAS_shs_vec 1
-#define TCG_TARGET_HAS_shv_vec 1
-#define TCG_TARGET_HAS_mul_vec 1
-#define TCG_TARGET_HAS_sat_vec 0
-#define TCG_TARGET_HAS_minmax_vec 1
-#define TCG_TARGET_HAS_bitsel_vec 1
-#define TCG_TARGET_HAS_cmpsel_vec 0
-#define TCG_TARGET_HAS_tst_vec 0
-
-/* used for function call generation */
-#define TCG_TARGET_STACK_ALIGN 8
-#define TCG_TARGET_CALL_STACK_OFFSET 160
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
-#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF
-#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
-
-#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
-#define TCG_TARGET_NEED_LDST_LABELS
-#define TCG_TARGET_NEED_POOL_LABELS
-
#endif
diff --git a/tcg/s390x/tcg-target.opc.h b/tcg/s390x/tcg-target.opc.h
deleted file mode 100644
index 0eb2350..0000000
--- a/tcg/s390x/tcg-target.opc.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright (c) 2021 Linaro
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or
- * (at your option) any later version.
- *
- * See the COPYING file in the top-level directory for details.
- *
- * Target-specific opcodes for host vector expansion. These will be
- * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
- * consider these to be UNSPEC with names.
- */
-DEF(s390_vuph_vec, 1, 1, 0, IMPLVEC)
-DEF(s390_vupl_vec, 1, 1, 0, IMPLVEC)
-DEF(s390_vpks_vec, 1, 2, 0, IMPLVEC)
diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h
index 434bf25..1a57adc 100644
--- a/tcg/sparc64/tcg-target-con-set.h
+++ b/tcg/sparc64/tcg-target-con-set.h
@@ -10,11 +10,12 @@
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I1(r)
-C_O0_I2(rZ, r)
-C_O0_I2(rZ, rJ)
+C_O0_I2(rz, r)
+C_O0_I2(r, rJ)
C_O1_I1(r, r)
C_O1_I2(r, r, r)
-C_O1_I2(r, rZ, rJ)
-C_O1_I4(r, rZ, rJ, rI, 0)
-C_O2_I2(r, r, rZ, rJ)
-C_O2_I4(r, r, rZ, rZ, rJ, rJ)
+C_O1_I2(r, r, rJ)
+C_O1_I2(r, rz, rJ)
+C_O1_I2(r, rz, rz)
+C_O1_I4(r, r, rJ, rI, 0)
+C_O2_I2(r, r, r, r)
diff --git a/tcg/sparc64/tcg-target-con-str.h b/tcg/sparc64/tcg-target-con-str.h
index 0577ec4..2f033b3 100644
--- a/tcg/sparc64/tcg-target-con-str.h
+++ b/tcg/sparc64/tcg-target-con-str.h
@@ -16,4 +16,3 @@ REGS('r', ALL_GENERAL_REGS)
*/
CONST('I', TCG_CT_CONST_S11)
CONST('J', TCG_CT_CONST_S13)
-CONST('Z', TCG_CT_CONST_ZERO)
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
new file mode 100644
index 0000000..b29fd17
--- /dev/null
+++ b/tcg/sparc64/tcg-target-has.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific opcode support
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef TCG_TARGET_HAS_H
+#define TCG_TARGET_HAS_H
+
+/* optional instructions */
+#define TCG_TARGET_HAS_extr_i64_i32 0
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+#define TCG_TARGET_HAS_tst 1
+
+#define TCG_TARGET_extract_valid(type, ofs, len) \
+ ((type) == TCG_TYPE_I64 && (ofs) + (len) == 32)
+
+#define TCG_TARGET_sextract_valid TCG_TARGET_extract_valid
+
+#define TCG_TARGET_deposit_valid(type, ofs, len) 0
+
+#endif
diff --git a/tcg/sparc64/tcg-target-mo.h b/tcg/sparc64/tcg-target-mo.h
new file mode 100644
index 0000000..98bfe03
--- /dev/null
+++ b/tcg/sparc64/tcg-target-mo.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific memory model
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef TCG_TARGET_MO_H
+#define TCG_TARGET_MO_H
+
+#define TCG_TARGET_DEFAULT_MO 0
+
+#endif
diff --git a/tcg/sparc64/tcg-target-opc.h.inc b/tcg/sparc64/tcg-target-opc.h.inc
new file mode 100644
index 0000000..84e777b
--- /dev/null
+++ b/tcg/sparc64/tcg-target-opc.h.inc
@@ -0,0 +1 @@
+/* No target specific opcodes. */
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
index 176c987..5e5c3f1 100644
--- a/tcg/sparc64/tcg-target.c.inc
+++ b/tcg/sparc64/tcg-target.c.inc
@@ -27,8 +27,15 @@
#error "unsupported code generation mode"
#endif
-#include "../tcg-ldst.c.inc"
-#include "../tcg-pool.c.inc"
+/* Used for function call generation. */
+#define TCG_REG_CALL_STACK TCG_REG_O6
+#define TCG_TARGET_STACK_BIAS 2047
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_CALL_STACK_OFFSET (128 + 6 * 8 + TCG_TARGET_STACK_BIAS)
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
+#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
#ifdef CONFIG_DEBUG_TCG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
@@ -69,7 +76,6 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
#define TCG_CT_CONST_S11 0x100
#define TCG_CT_CONST_S13 0x200
-#define TCG_CT_CONST_ZERO 0x400
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
@@ -193,7 +199,9 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
+#define ARITH_ADDCCC (INSN_OP(2) | INSN_OP3(0x18))
#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
+#define ARITH_SUBCCC (INSN_OP(2) | INSN_OP3(0x1c))
#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
@@ -202,9 +210,11 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
+#define ARITH_POPC (INSN_OP(2) | INSN_OP3(0x2e))
#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
+#define ARITH_ADDXCCC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x13))
#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
@@ -217,6 +227,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
+#define WRCCR (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(2))
#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
@@ -264,8 +275,11 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
-#ifndef use_vis3_instructions
-bool use_vis3_instructions;
+static bool use_popc_instructions;
+#if defined(__VIS__) && __VIS__ >= 0x300
+#define use_vis3_instructions 1
+#else
+static bool use_vis3_instructions;
#endif
static bool check_fit_i64(int64_t val, unsigned int bits)
@@ -333,9 +347,7 @@ static bool tcg_target_const_match(int64_t val, int ct,
val = (int32_t)val;
}
- if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
- return 1;
- } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
+ if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
return 1;
} else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
return 1;
@@ -362,7 +374,7 @@ static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
}
static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
- int32_t val2, int val2const, int op)
+ int32_t val2, int val2const, int op)
{
tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
| (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
@@ -592,21 +604,6 @@ static void tcg_out_sety(TCGContext *s, TCGReg rs)
tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
}
-static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
- int32_t val2, int val2const, int uns)
-{
- /* Load Y with the sign/zero extension of RS1 to 64-bits. */
- if (uns) {
- tcg_out_sety(s, TCG_REG_G0);
- } else {
- tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
- tcg_out_sety(s, TCG_REG_T1);
- }
-
- tcg_out_arithc(s, rd, rs1, val2, val2const,
- uns ? ARITH_UDIV : ARITH_SDIV);
-}
-
static const uint8_t tcg_cond_to_bcond[16] = {
[TCG_COND_EQ] = COND_E,
[TCG_COND_NE] = COND_NE,
@@ -648,6 +645,12 @@ static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
tcg_out_bpcc0(s, scond, flags, off19);
}
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
+{
+ tcg_out_bpcc(s, COND_A, BPCC_PT, l);
+ tcg_out_nop(s);
+}
+
static void tcg_out_cmp(TCGContext *s, TCGCond cond,
TCGReg c1, int32_t c2, int c2const)
{
@@ -663,11 +666,10 @@ static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
tcg_out_nop(s);
}
-static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
+static void tcg_out_movcc(TCGContext *s, int scond, int cc, TCGReg ret,
int32_t v1, int v1const)
{
- tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
- | INSN_RS1(tcg_cond_to_bcond[cond])
+ tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret) | INSN_RS1(scond)
| (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
}
@@ -676,7 +678,7 @@ static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
int32_t v1, int v1const)
{
tcg_out_cmp(s, cond, c1, c2, c2const);
- tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
+ tcg_out_movcc(s, tcg_cond_to_bcond[cond], MOVCC_ICC, ret, v1, v1const);
}
static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
@@ -720,12 +722,12 @@ static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
tcg_out_movr(s, rcond, ret, c1, v1, v1const);
} else {
tcg_out_cmp(s, cond, c1, c2, c2const);
- tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
+ tcg_out_movcc(s, tcg_cond_to_bcond[cond], MOVCC_XCC, ret, v1, v1const);
}
}
static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg c1, int32_t c2, int c2const, bool neg)
+ TCGReg c1, int32_t c2, bool c2const, bool neg)
{
/* For 32-bit comparisons, we can play games with ADDC/SUBC. */
switch (cond) {
@@ -745,7 +747,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
}
c1 = TCG_REG_G0, c2const = 0;
cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
- break;
+ break;
case TCG_COND_TSTEQ:
case TCG_COND_TSTNE:
@@ -754,7 +756,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
c1 = TCG_REG_G0;
c2 = TCG_REG_T1, c2const = 0;
cond = (cond == TCG_COND_TSTEQ ? TCG_COND_GEU : TCG_COND_LTU);
- break;
+ break;
case TCG_COND_GTU:
case TCG_COND_LEU:
@@ -774,7 +776,8 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
default:
tcg_out_cmp(s, cond, c1, c2, c2const);
tcg_out_movi_s13(s, ret, 0);
- tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1);
+ tcg_out_movcc(s, tcg_cond_to_bcond[cond],
+ MOVCC_ICC, ret, neg ? -1 : 1, 1);
return;
}
@@ -799,7 +802,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
}
static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg c1, int32_t c2, int c2const, bool neg)
+ TCGReg c1, int32_t c2, bool c2const, bool neg)
{
int rcond;
@@ -829,78 +832,103 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
} else {
tcg_out_cmp(s, cond, c1, c2, c2const);
tcg_out_movi_s13(s, ret, 0);
- tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1);
+ tcg_out_movcc(s, tcg_cond_to_bcond[cond],
+ MOVCC_XCC, ret, neg ? -1 : 1, 1);
}
}
-static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
- TCGReg al, TCGReg ah, int32_t bl, int blconst,
- int32_t bh, int bhconst, int opl, int oph)
+static void tcg_out_brcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg arg1, TCGArg arg2, bool const_arg2,
+ TCGLabel *l)
{
- TCGReg tmp = TCG_REG_T1;
-
- /* Note that the low parts are fully consumed before tmp is set. */
- if (rl != ah && (bhconst || rl != bh)) {
- tmp = rl;
+ if (type == TCG_TYPE_I32) {
+ tcg_out_brcond_i32(s, cond, arg1, arg2, const_arg2, l);
+ } else {
+ tcg_out_brcond_i64(s, cond, arg1, arg2, const_arg2, l);
}
+}
- tcg_out_arithc(s, tmp, al, bl, blconst, opl);
- tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
- tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg arg1, TCGReg arg2, TCGLabel *l)
+{
+ tcg_out_brcond(s, type, cond, arg1, arg2, false, l);
}
-static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
- TCGReg al, TCGReg ah, int32_t bl, int blconst,
- int32_t bh, int bhconst, bool is_sub)
+static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg arg1, tcg_target_long arg2, TCGLabel *l)
{
- TCGReg tmp = TCG_REG_T1;
+ tcg_out_brcond(s, type, cond, arg1, arg2, true, l);
+}
+
+static const TCGOutOpBrcond outop_brcond = {
+ .base.static_constraint = C_O0_I2(r, rJ),
+ .out_rr = tgen_brcond,
+ .out_ri = tgen_brcondi,
+};
- /* Note that the low parts are fully consumed before tmp is set. */
- if (rl != ah && (bhconst || rl != bh)) {
- tmp = rl;
+static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1,
+ TCGArg c2, bool c2const, bool neg)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_setcond_i32(s, cond, ret, c1, c2, c2const, neg);
+ } else {
+ tcg_out_setcond_i64(s, cond, ret, c1, c2, c2const, neg);
}
+}
- tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, false);
+}
- if (use_vis3_instructions && !is_sub) {
- /* Note that ADDXC doesn't accept immediates. */
- if (bhconst && bh != 0) {
- tcg_out_movi_s13(s, TCG_REG_T2, bh);
- bh = TCG_REG_T2;
- }
- tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
- } else if (bh == TCG_REG_G0) {
- /* If we have a zero, we can perform the operation in two insns,
- with the arithmetic first, and a conditional move into place. */
- if (rh == ah) {
- tcg_out_arithi(s, TCG_REG_T2, ah, 1,
- is_sub ? ARITH_SUB : ARITH_ADD);
- tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
- } else {
- tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
- tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
- }
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, false);
+}
+
+static const TCGOutOpSetcond outop_setcond = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_setcond,
+ .out_rri = tgen_setcondi,
+};
+
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, true);
+}
+
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, true);
+}
+
+static const TCGOutOpSetcond outop_negsetcond = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_negsetcond,
+ .out_rri = tgen_negsetcondi,
+};
+
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool c2const,
+ TCGArg v1, bool v1const, TCGArg v2, bool v2consf)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_movcond_i32(s, cond, ret, c1, c2, c2const, v1, v1const);
} else {
- /*
- * Otherwise adjust BH as if there is carry into T2.
- * Note that constant BH is constrained to 11 bits for the MOVCC,
- * so the adjustment fits 12 bits.
- */
- if (bhconst) {
- tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
- } else {
- tcg_out_arithi(s, TCG_REG_T2, bh, 1,
- is_sub ? ARITH_SUB : ARITH_ADD);
- }
- /* ... smoosh T2 back to original BH if carry is clear ... */
- tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
- /* ... and finally perform the arithmetic with the new operand. */
- tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
+ tcg_out_movcond_i64(s, cond, ret, c1, c2, c2const, v1, v1const);
}
-
- tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
}
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rJ, rI, 0),
+ .out = tgen_movcond,
+};
+
static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
bool in_prologue, bool tail_call)
{
@@ -931,7 +959,7 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
tcg_out_nop(s);
}
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
/* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
@@ -1092,7 +1120,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
/* Extract the page index, shifted into place for tlb index. */
tcg_out_arithi(s, TCG_REG_T1, addr_reg,
- s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
+ TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND);
/* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
@@ -1108,7 +1136,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
h->base = TCG_REG_T1;
/* Mask out the page offset, except for the required alignment. */
- compare_mask = s->page_mask | a_mask;
+ compare_mask = TARGET_PAGE_MASK | a_mask;
if (check_fit_tl(compare_mask, 13)) {
tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND);
} else {
@@ -1120,7 +1148,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
+ ldst->addr_reg = addr_reg;
ldst->label_ptr[0] = s->code_ptr;
/* bne,pn %[xi]cc, label0 */
@@ -1133,14 +1161,14 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
* Otherwise, test for at least natural alignment and defer
* everything else to the helper functions.
*/
- if (s_bits != get_alignment_bits(opc)) {
+ if (s_bits != memop_alignment_bits(opc)) {
tcg_debug_assert(check_fit_tl(a_mask, 13));
tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC);
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
+ ldst->addr_reg = addr_reg;
ldst->label_ptr[0] = s->code_ptr;
/* bne,pn %icc, label0 */
@@ -1162,8 +1190,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
return ldst;
}
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
- MemOpIdx oi, TCGType data_type)
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg addr, MemOpIdx oi)
{
static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
[MO_UB] = LDUB,
@@ -1195,14 +1223,23 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]);
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
ldst->datalo_reg = data;
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
}
-static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
- MemOpIdx oi, TCGType data_type)
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_qemu_ld,
+};
+
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg addr, MemOpIdx oi)
{
static const int st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
[MO_UB] = STB,
@@ -1225,12 +1262,21 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
st_opc[get_memop(oi) & (MO_BSWAP | MO_SIZE)]);
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
ldst->datalo_reg = data;
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
}
+static const TCGOutOpQemuLdSt outop_qemu_st = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out = tgen_qemu_st,
+};
+
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
{
if (check_fit_ptr(a0, 13)) {
@@ -1276,365 +1322,794 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
}
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
+ tcg_out_mov_delay(s, TCG_REG_TB, a0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
}
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg args[TCG_MAX_OP_ARGS],
- const int const_args[TCG_MAX_OP_ARGS])
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
{
- TCGArg a0, a1, a2;
- int c, c2;
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADD);
+}
- /* Hoist the loads of the most common arguments. */
- a0 = args[0];
- a1 = args[1];
- a2 = args[2];
- c2 = const_args[2];
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_arithi(s, a0, a1, a2, ARITH_ADD);
+}
- switch (opc) {
- case INDEX_op_goto_ptr:
- tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
- tcg_out_mov_delay(s, TCG_REG_TB, a0);
- break;
- case INDEX_op_br:
- tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
- tcg_out_nop(s);
- break;
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
-#define OP_32_64(x) \
- glue(glue(case INDEX_op_, x), _i32): \
- glue(glue(case INDEX_op_, x), _i64)
+static void tgen_addco_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDCC);
+}
- OP_32_64(ld8u):
- tcg_out_ldst(s, a0, a1, a2, LDUB);
- break;
- OP_32_64(ld8s):
- tcg_out_ldst(s, a0, a1, a2, LDSB);
- break;
- OP_32_64(ld16u):
- tcg_out_ldst(s, a0, a1, a2, LDUH);
- break;
- OP_32_64(ld16s):
- tcg_out_ldst(s, a0, a1, a2, LDSH);
- break;
- case INDEX_op_ld_i32:
- case INDEX_op_ld32u_i64:
- tcg_out_ldst(s, a0, a1, a2, LDUW);
- break;
- OP_32_64(st8):
- tcg_out_ldst(s, a0, a1, a2, STB);
- break;
- OP_32_64(st16):
- tcg_out_ldst(s, a0, a1, a2, STH);
- break;
- case INDEX_op_st_i32:
- case INDEX_op_st32_i64:
- tcg_out_ldst(s, a0, a1, a2, STW);
- break;
- OP_32_64(add):
- c = ARITH_ADD;
- goto gen_arith;
- OP_32_64(sub):
- c = ARITH_SUB;
- goto gen_arith;
- OP_32_64(and):
- c = ARITH_AND;
- goto gen_arith;
- OP_32_64(andc):
- c = ARITH_ANDN;
- goto gen_arith;
- OP_32_64(or):
- c = ARITH_OR;
- goto gen_arith;
- OP_32_64(orc):
- c = ARITH_ORN;
- goto gen_arith;
- OP_32_64(xor):
- c = ARITH_XOR;
- goto gen_arith;
- case INDEX_op_shl_i32:
- c = SHIFT_SLL;
- do_shift32:
- /* Limit immediate shift count lest we create an illegal insn. */
- tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
- break;
- case INDEX_op_shr_i32:
- c = SHIFT_SRL;
- goto do_shift32;
- case INDEX_op_sar_i32:
- c = SHIFT_SRA;
- goto do_shift32;
- case INDEX_op_mul_i32:
- c = ARITH_UMUL;
- goto gen_arith;
-
- OP_32_64(neg):
- c = ARITH_SUB;
- goto gen_arith1;
- OP_32_64(not):
- c = ARITH_ORN;
- goto gen_arith1;
-
- case INDEX_op_div_i32:
- tcg_out_div32(s, a0, a1, a2, c2, 0);
- break;
- case INDEX_op_divu_i32:
- tcg_out_div32(s, a0, a1, a2, c2, 1);
- break;
+static void tgen_addco_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_arithi(s, a0, a1, a2, ARITH_ADDCC);
+}
- case INDEX_op_brcond_i32:
- tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
- break;
- case INDEX_op_setcond_i32:
- tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, false);
- break;
- case INDEX_op_negsetcond_i32:
- tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, true);
- break;
- case INDEX_op_movcond_i32:
- tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
- break;
+static const TCGOutOpBinary outop_addco = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_addco_rrr,
+ .out_rri = tgen_addco_rri,
+};
- case INDEX_op_add2_i32:
- tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
- args[4], const_args[4], args[5], const_args[5],
- ARITH_ADDCC, ARITH_ADDC);
- break;
- case INDEX_op_sub2_i32:
- tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
- args[4], const_args[4], args[5], const_args[5],
- ARITH_SUBCC, ARITH_SUBC);
- break;
- case INDEX_op_mulu2_i32:
- c = ARITH_UMUL;
- goto do_mul2;
- case INDEX_op_muls2_i32:
- c = ARITH_SMUL;
- do_mul2:
- /* The 32-bit multiply insns produce a full 64-bit result. */
- tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
- tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
- break;
+static void tgen_addci_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDC);
+ } else if (use_vis3_instructions) {
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDXC);
+ } else {
+ tcg_out_arith(s, TCG_REG_T1, a1, a2, ARITH_ADD); /* for CC */
+ tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_ADD); /* for CS */
+ /* Select the correct result based on actual carry value. */
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
+ }
+}
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_ld_a64_i32:
- tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_ld_a64_i64:
- tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
- break;
- case INDEX_op_qemu_st_a32_i32:
- case INDEX_op_qemu_st_a64_i32:
- tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_st_a32_i64:
- case INDEX_op_qemu_st_a64_i64:
- tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
- break;
+static void tgen_addci_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_arithi(s, a0, a1, a2, ARITH_ADDC);
+ return;
+ }
+ /* !use_vis3_instructions */
+ if (a2 != 0) {
+ tcg_out_arithi(s, TCG_REG_T1, a1, a2, ARITH_ADD); /* for CC */
+ tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_ADD); /* for CS */
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
+ } else if (a0 == a1) {
+ tcg_out_arithi(s, TCG_REG_T1, a1, 1, ARITH_ADD);
+ tcg_out_movcc(s, COND_CS, MOVCC_XCC, a0, TCG_REG_T1, false);
+ } else {
+ tcg_out_arithi(s, a0, a1, 1, ARITH_ADD);
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, a1, false);
+ }
+}
- case INDEX_op_ld32s_i64:
- tcg_out_ldst(s, a0, a1, a2, LDSW);
- break;
- case INDEX_op_ld_i64:
- tcg_out_ldst(s, a0, a1, a2, LDX);
- break;
- case INDEX_op_st_i64:
- tcg_out_ldst(s, a0, a1, a2, STX);
- break;
- case INDEX_op_shl_i64:
- c = SHIFT_SLLX;
- do_shift64:
- /* Limit immediate shift count lest we create an illegal insn. */
- tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
- break;
- case INDEX_op_shr_i64:
- c = SHIFT_SRLX;
- goto do_shift64;
- case INDEX_op_sar_i64:
- c = SHIFT_SRAX;
- goto do_shift64;
- case INDEX_op_mul_i64:
- c = ARITH_MULX;
- goto gen_arith;
- case INDEX_op_div_i64:
- c = ARITH_SDIVX;
- goto gen_arith;
- case INDEX_op_divu_i64:
- c = ARITH_UDIVX;
- goto gen_arith;
-
- case INDEX_op_brcond_i64:
- tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
- break;
- case INDEX_op_setcond_i64:
- tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, false);
- break;
- case INDEX_op_negsetcond_i64:
- tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, true);
- break;
- case INDEX_op_movcond_i64:
- tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
- break;
- case INDEX_op_add2_i64:
- tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
- const_args[4], args[5], const_args[5], false);
- break;
- case INDEX_op_sub2_i64:
- tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
- const_args[4], args[5], const_args[5], true);
- break;
- case INDEX_op_muluh_i64:
- tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
- break;
+static TCGConstraintSetIndex cset_addci(TCGType type, unsigned flags)
+{
+ if (use_vis3_instructions && type == TCG_TYPE_I64) {
+ /* Note that ADDXC doesn't accept immediates. */
+ return C_O1_I2(r, rz, rz);
+ }
+ return C_O1_I2(r, rz, rJ);
+}
- gen_arith:
- tcg_out_arithc(s, a0, a1, a2, c2, c);
- break;
+static const TCGOutOpAddSubCarry outop_addci = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_addci,
+ .out_rrr = tgen_addci_rrr,
+ .out_rri = tgen_addci_rri,
+};
- gen_arith1:
- tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
- break;
+/* Copy %xcc.c to %icc.c */
+static void tcg_out_dup_xcc_c(TCGContext *s)
+{
+ if (use_vis3_instructions) {
+ tcg_out_arith(s, TCG_REG_T1, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
+ } else {
+ tcg_out_movi_s13(s, TCG_REG_T1, 0);
+ tcg_out_movcc(s, COND_CS, MOVCC_XCC, TCG_REG_T1, 1, true);
+ }
+ /* Write carry-in into %icc via {0,1} + -1. */
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, -1, ARITH_ADDCC);
+}
- case INDEX_op_mb:
- tcg_out_mb(s, a0);
- break;
+static void tgen_addcio_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type != TCG_TYPE_I32) {
+ if (use_vis3_instructions) {
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDXCCC);
+ return;
+ }
+ tcg_out_dup_xcc_c(s);
+ }
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDCCC);
+}
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
- case INDEX_op_mov_i64:
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- default:
- g_assert_not_reached();
+static void tgen_addcio_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type != TCG_TYPE_I32) {
+ /* !use_vis3_instructions */
+ tcg_out_dup_xcc_c(s);
+ }
+ tcg_out_arithi(s, a0, a1, a2, ARITH_ADDCCC);
+}
+
+static TCGConstraintSetIndex cset_addcio(TCGType type, unsigned flags)
+{
+ if (use_vis3_instructions && type == TCG_TYPE_I64) {
+ /* Note that ADDXCCC doesn't accept immediates. */
+ return C_O1_I2(r, rz, rz);
}
+ return C_O1_I2(r, rz, rJ);
+}
+
+static const TCGOutOpBinary outop_addcio = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_addcio,
+ .out_rrr = tgen_addcio_rrr,
+ .out_rri = tgen_addcio_rri,
+};
+
+static void tcg_out_set_carry(TCGContext *s)
+{
+ /* 0x11 -> xcc = nzvC, icc = nzvC */
+ tcg_out_arithi(s, 0, TCG_REG_G0, 0x11, WRCCR);
+}
+
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_AND);
+}
+
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_arithi(s, a0, a1, a2, ARITH_AND);
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
+
+static void tgen_andc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_ANDN);
+}
+
+static const TCGOutOpBinary outop_andc = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_andc,
+};
+
+static const TCGOutOpBinary outop_clz = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tcg_out_arith(s, a0, TCG_REG_G0, a1, ARITH_POPC);
}
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
-{
- switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld_i32:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld_i64:
- case INDEX_op_neg_i32:
- case INDEX_op_neg_i64:
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_ld_a64_i32:
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_ld_a64_i64:
+static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
+{
+ if (use_popc_instructions && type == TCG_TYPE_I64) {
return C_O1_I1(r, r);
+ }
+ return C_NotImplemented;
+}
- case INDEX_op_st8_i32:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i32:
- case INDEX_op_st16_i64:
- case INDEX_op_st_i32:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i64:
- case INDEX_op_qemu_st_a32_i32:
- case INDEX_op_qemu_st_a64_i32:
- case INDEX_op_qemu_st_a32_i64:
- case INDEX_op_qemu_st_a64_i64:
- return C_O0_I2(rZ, r);
-
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
- case INDEX_op_mul_i32:
- case INDEX_op_mul_i64:
- case INDEX_op_div_i32:
- case INDEX_op_div_i64:
- case INDEX_op_divu_i32:
- case INDEX_op_divu_i64:
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- case INDEX_op_andc_i32:
- case INDEX_op_andc_i64:
- case INDEX_op_or_i32:
- case INDEX_op_or_i64:
- case INDEX_op_orc_i32:
- case INDEX_op_orc_i64:
- case INDEX_op_xor_i32:
- case INDEX_op_xor_i64:
- case INDEX_op_shl_i32:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i32:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i32:
- case INDEX_op_sar_i64:
- case INDEX_op_setcond_i32:
- case INDEX_op_setcond_i64:
- case INDEX_op_negsetcond_i32:
- case INDEX_op_negsetcond_i64:
- return C_O1_I2(r, rZ, rJ);
-
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
- return C_O0_I2(rZ, rJ);
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, rZ, rJ, rI, 0);
- case INDEX_op_add2_i32:
- case INDEX_op_add2_i64:
- case INDEX_op_sub2_i32:
- case INDEX_op_sub2_i64:
- return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
- case INDEX_op_mulu2_i32:
- case INDEX_op_muls2_i32:
- return C_O2_I2(r, r, rZ, rJ);
- case INDEX_op_muluh_i64:
- return C_O1_I2(r, r, r);
+static const TCGOutOpUnary outop_ctpop = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_ctpop,
+ .out_rr = tgen_ctpop,
+};
- default:
- g_assert_not_reached();
+static const TCGOutOpBinary outop_ctz = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_divs_rJ(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGArg a2, bool c2)
+{
+ uint32_t insn;
+
+ if (type == TCG_TYPE_I32) {
+ /* Load Y with the sign extension of a1 to 64-bits. */
+ tcg_out_arithi(s, TCG_REG_T1, a1, 31, SHIFT_SRA);
+ tcg_out_sety(s, TCG_REG_T1);
+ insn = ARITH_SDIV;
+ } else {
+ insn = ARITH_SDIVX;
+ }
+ tcg_out_arithc(s, a0, a1, a2, c2, insn);
+}
+
+static void tgen_divs(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tgen_divs_rJ(s, type, a0, a1, a2, false);
+}
+
+static void tgen_divsi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_divs_rJ(s, type, a0, a1, a2, true);
+}
+
+static const TCGOutOpBinary outop_divs = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_divs,
+ .out_rri = tgen_divsi,
+};
+
+static const TCGOutOpDivRem outop_divs2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_divu_rJ(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGArg a2, bool c2)
+{
+ uint32_t insn;
+
+ if (type == TCG_TYPE_I32) {
+ /* Load Y with the zero extension to 64-bits. */
+ tcg_out_sety(s, TCG_REG_G0);
+ insn = ARITH_UDIV;
+ } else {
+ insn = ARITH_UDIVX;
+ }
+ tcg_out_arithc(s, a0, a1, a2, c2, insn);
+}
+
+static void tgen_divu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tgen_divu_rJ(s, type, a0, a1, a2, false);
+}
+
+static void tgen_divui(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_divu_rJ(s, type, a0, a1, a2, true);
+}
+
+static const TCGOutOpBinary outop_divu = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_divu,
+ .out_rri = tgen_divui,
+};
+
+static const TCGOutOpDivRem outop_divu2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_eqv = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
+{
+ tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
+}
+
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extrh_i64_i32,
+};
+
+static void tgen_mul(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? ARITH_UMUL : ARITH_MULX;
+ tcg_out_arith(s, a0, a1, a2, insn);
+}
+
+static void tgen_muli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? ARITH_UMUL : ARITH_MULX;
+ tcg_out_arithi(s, a0, a1, a2, insn);
+}
+
+static const TCGOutOpBinary outop_mul = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_mul,
+ .out_rri = tgen_muli,
+};
+
+/*
+ * The 32-bit multiply insns produce a full 64-bit result.
+ * Supporting 32-bit mul[us]2 opcodes avoids sign/zero-extensions
+ * before the actual multiply; we only need extract the high part
+ * into the separate operand.
+ */
+static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
+{
+ return type == TCG_TYPE_I32 ? C_O2_I2(r, r, r, r) : C_NotImplemented;
+}
+
+static void tgen_muls2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
+{
+ tcg_out_arith(s, a0, a2, a3, ARITH_SMUL);
+ tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
+}
+
+static const TCGOutOpMul2 outop_muls2 = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mul2,
+ .out_rrrr = tgen_muls2,
+};
+
+static const TCGOutOpBinary outop_mulsh = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_mulu2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
+{
+ tcg_out_arith(s, a0, a2, a3, ARITH_UMUL);
+ tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
+}
+
+static const TCGOutOpMul2 outop_mulu2 = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mul2,
+ .out_rrrr = tgen_mulu2,
+};
+
+static void tgen_muluh(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_UMULXHI);
+}
+
+static TCGConstraintSetIndex cset_muluh(TCGType type, unsigned flags)
+{
+ return (type == TCG_TYPE_I64 && use_vis3_instructions
+ ? C_O1_I2(r, r, r) : C_NotImplemented);
+}
+
+static const TCGOutOpBinary outop_muluh = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_muluh,
+ .out_rrr = tgen_muluh,
+};
+
+static const TCGOutOpBinary outop_nand = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_nor = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_or(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_OR);
+}
+
+static void tgen_ori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_arithi(s, a0, a1, a2, ARITH_OR);
+}
+
+static const TCGOutOpBinary outop_or = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_or,
+ .out_rri = tgen_ori,
+};
+
+static void tgen_orc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_ORN);
+}
+
+static const TCGOutOpBinary outop_orc = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_orc,
+};
+
+static const TCGOutOpBinary outop_rems = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_remu = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_rotl = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_rotr = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_sar(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRA : SHIFT_SRAX;
+ tcg_out_arith(s, a0, a1, a2, insn);
+}
+
+static void tgen_sari(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRA : SHIFT_SRAX;
+ uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_arithi(s, a0, a1, a2 & mask, insn);
+}
+
+static const TCGOutOpBinary outop_sar = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_sar,
+ .out_rri = tgen_sari,
+};
+
+static void tgen_shl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SLL : SHIFT_SLLX;
+ tcg_out_arith(s, a0, a1, a2, insn);
+}
+
+static void tgen_shli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SLL : SHIFT_SLLX;
+ uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_arithi(s, a0, a1, a2 & mask, insn);
+}
+
+static const TCGOutOpBinary outop_shl = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_shl,
+ .out_rri = tgen_shli,
+};
+
+static void tgen_shr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRL : SHIFT_SRLX;
+ tcg_out_arith(s, a0, a1, a2, insn);
+}
+
+static void tgen_shri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRL : SHIFT_SRLX;
+ uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_arithi(s, a0, a1, a2 & mask, insn);
+}
+
+static const TCGOutOpBinary outop_shr = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_shr,
+ .out_rri = tgen_shri,
+};
+
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_SUB);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
+static void tgen_subbo_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_SUBCC);
+}
+
+static void tgen_subbo_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_arithi(s, a0, a1, a2, ARITH_SUBCC);
+}
+
+static const TCGOutOpAddSubCarry outop_subbo = {
+ .base.static_constraint = C_O1_I2(r, rz, rJ),
+ .out_rrr = tgen_subbo_rrr,
+ .out_rri = tgen_subbo_rri,
+};
+
+static void tgen_subbi_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ /* TODO: OSA 2015 added SUBXC */
+ if (type == TCG_TYPE_I32) {
+ tcg_out_arith(s, a0, a1, a2, ARITH_SUBC);
+ } else {
+ tcg_out_arith(s, TCG_REG_T1, a1, a2, ARITH_SUB); /* for CC */
+ tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_SUB); /* for CS */
+ /* Select the correct result based on actual borrow value. */
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
+ }
+}
+
+static void tgen_subbi_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_arithi(s, a0, a1, a2, ARITH_SUBC);
+ } else if (a2 != 0) {
+ tcg_out_arithi(s, TCG_REG_T1, a1, a2, ARITH_SUB); /* for CC */
+ tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_SUB); /* for CS */
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
+ } else if (a0 == a1) {
+ tcg_out_arithi(s, TCG_REG_T1, a1, 1, ARITH_SUB);
+ tcg_out_movcc(s, COND_CS, MOVCC_XCC, a0, TCG_REG_T1, false);
+ } else {
+ tcg_out_arithi(s, a0, a1, 1, ARITH_SUB);
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, a1, false);
+ }
+}
+
+static const TCGOutOpAddSubCarry outop_subbi = {
+ .base.static_constraint = C_O1_I2(r, rz, rJ),
+ .out_rrr = tgen_subbi_rrr,
+ .out_rri = tgen_subbi_rri,
+};
+
+static void tgen_subbio_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type != TCG_TYPE_I32) {
+ /* TODO: OSA 2015 added SUBXCCC */
+ tcg_out_dup_xcc_c(s);
}
+ tcg_out_arith(s, a0, a1, a2, ARITH_SUBCCC);
+}
+
+static void tgen_subbio_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type != TCG_TYPE_I32) {
+ tcg_out_dup_xcc_c(s);
+ }
+ tcg_out_arithi(s, a0, a1, a2, ARITH_SUBCCC);
+}
+
+static const TCGOutOpAddSubCarry outop_subbio = {
+ .base.static_constraint = C_O1_I2(r, rz, rJ),
+ .out_rrr = tgen_subbio_rrr,
+ .out_rri = tgen_subbio_rri,
+};
+
+static void tcg_out_set_borrow(TCGContext *s)
+{
+ tcg_out_set_carry(s); /* borrow == carry */
+}
+
+static void tgen_xor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_XOR);
+}
+
+static void tgen_xori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_arithi(s, a0, a1, a2, ARITH_XOR);
+}
+
+static const TCGOutOpBinary outop_xor = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_xor,
+ .out_rri = tgen_xori,
+};
+
+static const TCGOutOpBswap outop_bswap16 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBswap outop_bswap32 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpUnary outop_bswap64 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_sub(s, type, a0, TCG_REG_G0, a1);
+}
+
+static const TCGOutOpUnary outop_neg = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_neg,
+};
+
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_orc(s, type, a0, TCG_REG_G0, a1);
+}
+
+static const TCGOutOpUnary outop_not = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_not,
+};
+
+static const TCGOutOpDeposit outop_deposit = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ tcg_debug_assert(ofs + len == 32);
+ tcg_out_arithi(s, a0, a1, ofs, SHIFT_SRL);
+}
+
+static const TCGOutOpExtract outop_extract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extract,
+};
+
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ tcg_debug_assert(ofs + len == 32);
+ tcg_out_arithi(s, a0, a1, ofs, SHIFT_SRA);
+}
+
+static const TCGOutOpExtract outop_sextract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_sextract,
+};
+
+static const TCGOutOpExtract2 outop_extract2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, dest, base, offset, LDUB);
+}
+
+static const TCGOutOpLoad outop_ld8u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8u,
+};
+
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, dest, base, offset, LDSB);
+}
+
+static const TCGOutOpLoad outop_ld8s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8s,
+};
+
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, dest, base, offset, LDUH);
+}
+
+static const TCGOutOpLoad outop_ld16u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16u,
+};
+
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, dest, base, offset, LDSH);
+}
+
+static const TCGOutOpLoad outop_ld16s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16s,
+};
+
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, dest, base, offset, LDUW);
+}
+
+static const TCGOutOpLoad outop_ld32u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32u,
+};
+
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, dest, base, offset, LDSW);
+}
+
+static const TCGOutOpLoad outop_ld32s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32s,
+};
+
+static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, data, base, offset, STB);
+}
+
+static const TCGOutOpStore outop_st8 = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tgen_st8_r,
+};
+
+static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, data, base, offset, STH);
+}
+
+static const TCGOutOpStore outop_st16 = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tgen_st16_r,
+};
+
+static const TCGOutOpStore outop_st = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tcg_out_st,
+};
+
+
+static TCGConstraintSetIndex
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
+{
+ return C_NotImplemented;
}
static void tcg_target_init(TCGContext *s)
{
+ unsigned long hwcap = qemu_getauxval(AT_HWCAP);
+
/*
* Only probe for the platform and capabilities if we haven't already
* determined maximum values at compile time.
*/
+ use_popc_instructions = (hwcap & HWCAP_SPARC_POPC) != 0;
#ifndef use_vis3_instructions
- {
- unsigned long hwcap = qemu_getauxval(AT_HWCAP);
- use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
- }
+ use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
#endif
tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
index a18906a..1b9adcc 100644
--- a/tcg/sparc64/tcg-target.h
+++ b/tcg/sparc64/tcg-target.h
@@ -64,97 +64,7 @@ typedef enum {
TCG_REG_I7,
} TCGReg;
-/* used for function call generation */
-#define TCG_REG_CALL_STACK TCG_REG_O6
-
-#define TCG_TARGET_STACK_BIAS 2047
-#define TCG_TARGET_STACK_ALIGN 16
-#define TCG_TARGET_CALL_STACK_OFFSET (128 + 6*8 + TCG_TARGET_STACK_BIAS)
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
-#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
-#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
-
-#if defined(__VIS__) && __VIS__ >= 0x300
-#define use_vis3_instructions 1
-#else
-extern bool use_vis3_instructions;
-#endif
-
-/* optional instructions */
-#define TCG_TARGET_HAS_div_i32 1
-#define TCG_TARGET_HAS_rem_i32 0
-#define TCG_TARGET_HAS_rot_i32 0
-#define TCG_TARGET_HAS_ext8s_i32 0
-#define TCG_TARGET_HAS_ext16s_i32 0
-#define TCG_TARGET_HAS_ext8u_i32 0
-#define TCG_TARGET_HAS_ext16u_i32 0
-#define TCG_TARGET_HAS_bswap16_i32 0
-#define TCG_TARGET_HAS_bswap32_i32 0
-#define TCG_TARGET_HAS_not_i32 1
-#define TCG_TARGET_HAS_andc_i32 1
-#define TCG_TARGET_HAS_orc_i32 1
-#define TCG_TARGET_HAS_eqv_i32 0
-#define TCG_TARGET_HAS_nand_i32 0
-#define TCG_TARGET_HAS_nor_i32 0
-#define TCG_TARGET_HAS_clz_i32 0
-#define TCG_TARGET_HAS_ctz_i32 0
-#define TCG_TARGET_HAS_ctpop_i32 0
-#define TCG_TARGET_HAS_deposit_i32 0
-#define TCG_TARGET_HAS_extract_i32 0
-#define TCG_TARGET_HAS_sextract_i32 0
-#define TCG_TARGET_HAS_extract2_i32 0
-#define TCG_TARGET_HAS_negsetcond_i32 1
-#define TCG_TARGET_HAS_add2_i32 1
-#define TCG_TARGET_HAS_sub2_i32 1
-#define TCG_TARGET_HAS_mulu2_i32 1
-#define TCG_TARGET_HAS_muls2_i32 1
-#define TCG_TARGET_HAS_muluh_i32 0
-#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_qemu_st8_i32 0
-
-#define TCG_TARGET_HAS_extr_i64_i32 0
-#define TCG_TARGET_HAS_div_i64 1
-#define TCG_TARGET_HAS_rem_i64 0
-#define TCG_TARGET_HAS_rot_i64 0
-#define TCG_TARGET_HAS_ext8s_i64 0
-#define TCG_TARGET_HAS_ext16s_i64 0
-#define TCG_TARGET_HAS_ext32s_i64 1
-#define TCG_TARGET_HAS_ext8u_i64 0
-#define TCG_TARGET_HAS_ext16u_i64 0
-#define TCG_TARGET_HAS_ext32u_i64 1
-#define TCG_TARGET_HAS_bswap16_i64 0
-#define TCG_TARGET_HAS_bswap32_i64 0
-#define TCG_TARGET_HAS_bswap64_i64 0
-#define TCG_TARGET_HAS_not_i64 1
-#define TCG_TARGET_HAS_andc_i64 1
-#define TCG_TARGET_HAS_orc_i64 1
-#define TCG_TARGET_HAS_eqv_i64 0
-#define TCG_TARGET_HAS_nand_i64 0
-#define TCG_TARGET_HAS_nor_i64 0
-#define TCG_TARGET_HAS_clz_i64 0
-#define TCG_TARGET_HAS_ctz_i64 0
-#define TCG_TARGET_HAS_ctpop_i64 0
-#define TCG_TARGET_HAS_deposit_i64 0
-#define TCG_TARGET_HAS_extract_i64 0
-#define TCG_TARGET_HAS_sextract_i64 0
-#define TCG_TARGET_HAS_extract2_i64 0
-#define TCG_TARGET_HAS_negsetcond_i64 1
-#define TCG_TARGET_HAS_add2_i64 1
-#define TCG_TARGET_HAS_sub2_i64 1
-#define TCG_TARGET_HAS_mulu2_i64 0
-#define TCG_TARGET_HAS_muls2_i64 0
-#define TCG_TARGET_HAS_muluh_i64 use_vis3_instructions
-#define TCG_TARGET_HAS_mulsh_i64 0
-
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
-
-#define TCG_TARGET_HAS_tst 1
-
-#define TCG_AREG0 TCG_REG_I0
-
-#define TCG_TARGET_DEFAULT_MO (0)
-#define TCG_TARGET_NEED_LDST_LABELS
-#define TCG_TARGET_NEED_POOL_LABELS
+#define TCG_AREG0 TCG_REG_I0
+#define TCG_REG_ZERO TCG_REG_G0
#endif
diff --git a/tcg/tcg-common.c b/tcg/tcg-common.c
index 35e7616..e98b3e5 100644
--- a/tcg/tcg-common.c
+++ b/tcg/tcg-common.c
@@ -24,10 +24,11 @@
#include "qemu/osdep.h"
#include "tcg/tcg.h"
+#include "tcg-has.h"
-TCGOpDef tcg_op_defs[] = {
+const TCGOpDef tcg_op_defs[] = {
#define DEF(s, oargs, iargs, cargs, flags) \
- { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags, NULL },
+ { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
#include "tcg/tcg-opc.h"
#undef DEF
};
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
new file mode 100644
index 0000000..2fc0e50
--- /dev/null
+++ b/tcg/tcg-has.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific opcode support
+ * Copyright (c) 2024 Linaro, Ltd.
+ */
+
+#ifndef TCG_HAS_H
+#define TCG_HAS_H
+
+#include "tcg-target-has.h"
+
+#if TCG_TARGET_REG_BITS == 32
+/* Turn some undef macros into false macros. */
+#define TCG_TARGET_HAS_extr_i64_i32 0
+#endif
+
+#if !defined(TCG_TARGET_HAS_v64) \
+ && !defined(TCG_TARGET_HAS_v128) \
+ && !defined(TCG_TARGET_HAS_v256)
+#define TCG_TARGET_MAYBE_vec 0
+#define TCG_TARGET_HAS_abs_vec 0
+#define TCG_TARGET_HAS_neg_vec 0
+#define TCG_TARGET_HAS_not_vec 0
+#define TCG_TARGET_HAS_andc_vec 0
+#define TCG_TARGET_HAS_orc_vec 0
+#define TCG_TARGET_HAS_nand_vec 0
+#define TCG_TARGET_HAS_nor_vec 0
+#define TCG_TARGET_HAS_eqv_vec 0
+#define TCG_TARGET_HAS_roti_vec 0
+#define TCG_TARGET_HAS_rots_vec 0
+#define TCG_TARGET_HAS_rotv_vec 0
+#define TCG_TARGET_HAS_shi_vec 0
+#define TCG_TARGET_HAS_shs_vec 0
+#define TCG_TARGET_HAS_shv_vec 0
+#define TCG_TARGET_HAS_mul_vec 0
+#define TCG_TARGET_HAS_sat_vec 0
+#define TCG_TARGET_HAS_minmax_vec 0
+#define TCG_TARGET_HAS_bitsel_vec 0
+#define TCG_TARGET_HAS_cmpsel_vec 0
+#define TCG_TARGET_HAS_tst_vec 0
+#else
+#define TCG_TARGET_MAYBE_vec 1
+#endif
+#ifndef TCG_TARGET_HAS_v64
+#define TCG_TARGET_HAS_v64 0
+#endif
+#ifndef TCG_TARGET_HAS_v128
+#define TCG_TARGET_HAS_v128 0
+#endif
+#ifndef TCG_TARGET_HAS_v256
+#define TCG_TARGET_HAS_v256 0
+#endif
+
+#endif
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
index 9b0d982..d6a12af 100644
--- a/tcg/tcg-internal.h
+++ b/tcg/tcg-internal.h
@@ -34,7 +34,7 @@ extern TCGContext **tcg_ctxs;
extern unsigned int tcg_cur_ctxs;
extern unsigned int tcg_max_ctxs;
-void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus);
+void tcg_region_init(size_t tb_size, int splitwx, unsigned max_threads);
bool tcg_region_alloc(TCGContext *s);
void tcg_region_initial_alloc(TCGContext *s);
void tcg_region_prologue_set(TCGContext *s);
@@ -92,15 +92,23 @@ TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind);
*/
TCGTemp *tcg_constant_internal(TCGType type, int64_t val);
-void tcg_gen_op1(TCGOpcode, TCGArg);
-void tcg_gen_op2(TCGOpcode, TCGArg, TCGArg);
-void tcg_gen_op3(TCGOpcode, TCGArg, TCGArg, TCGArg);
-void tcg_gen_op4(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg);
-void tcg_gen_op5(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
-void tcg_gen_op6(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
+TCGOp *tcg_gen_op1(TCGOpcode, TCGType, TCGArg);
+TCGOp *tcg_gen_op2(TCGOpcode, TCGType, TCGArg, TCGArg);
+TCGOp *tcg_gen_op3(TCGOpcode, TCGType, TCGArg, TCGArg, TCGArg);
+TCGOp *tcg_gen_op4(TCGOpcode, TCGType, TCGArg, TCGArg, TCGArg, TCGArg);
+TCGOp *tcg_gen_op5(TCGOpcode, TCGType, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
+TCGOp *tcg_gen_op6(TCGOpcode, TCGType, TCGArg, TCGArg,
+ TCGArg, TCGArg, TCGArg, TCGArg);
void vec_gen_2(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg);
void vec_gen_3(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg);
void vec_gen_4(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg, TCGArg);
+void vec_gen_6(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r,
+ TCGArg a, TCGArg b, TCGArg c, TCGArg d, TCGArg e);
+
+TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op,
+ TCGOpcode, TCGType, unsigned nargs);
+TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op,
+ TCGOpcode, TCGType, unsigned nargs);
#endif /* TCG_INTERNAL_H */
diff --git a/tcg/tcg-ldst.c.inc b/tcg/tcg-ldst.c.inc
deleted file mode 100644
index ffada04..0000000
--- a/tcg/tcg-ldst.c.inc
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * TCG Backend Data: load-store optimization only.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/*
- * Generate TB finalization at the end of block
- */
-
-static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
-static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
-
-static int tcg_out_ldst_finalize(TCGContext *s)
-{
- TCGLabelQemuLdst *lb;
-
- /* qemu_ld/st slow paths */
- QSIMPLEQ_FOREACH(lb, &s->ldst_labels, next) {
- if (lb->is_ld
- ? !tcg_out_qemu_ld_slow_path(s, lb)
- : !tcg_out_qemu_st_slow_path(s, lb)) {
- return -2;
- }
-
- /* Test for (pending) buffer overflow. The assumption is that any
- one operation beginning below the high water mark cannot overrun
- the buffer completely. Thus we can test for overflow after
- generating code without having to check during generation. */
- if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
- return -1;
- }
- }
- return 0;
-}
-
-/*
- * Allocate a new TCGLabelQemuLdst entry.
- */
-
-static inline TCGLabelQemuLdst *new_ldst_label(TCGContext *s)
-{
- TCGLabelQemuLdst *l = tcg_malloc(sizeof(*l));
-
- memset(l, 0, sizeof(*l));
- QSIMPLEQ_INSERT_TAIL(&s->ldst_labels, l, next);
-
- return l;
-}
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index 0308732..2d18454 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -23,6 +23,7 @@
#include "tcg/tcg-op-common.h"
#include "tcg/tcg-op-gvec-common.h"
#include "tcg/tcg-gvec-desc.h"
+#include "tcg-has.h"
#define MAX_UNROLL 4
@@ -56,30 +57,39 @@ static void check_size_align(uint32_t oprsz, uint32_t maxsz, uint32_t ofs)
tcg_debug_assert((ofs & max_align) == 0);
}
-/* Verify vector overlap rules for two operands. */
-static void check_overlap_2(uint32_t d, uint32_t a, uint32_t s)
+/*
+ * Verify vector overlap rules for two operands.
+ * When dbase and abase are not the same pointer, we cannot check for
+ * overlap at compile-time, but the runtime restrictions remain.
+ */
+static void check_overlap_2(TCGv_ptr dbase, uint32_t d,
+ TCGv_ptr abase, uint32_t a, uint32_t s)
{
- tcg_debug_assert(d == a || d + s <= a || a + s <= d);
+ tcg_debug_assert(dbase != abase || d == a || d + s <= a || a + s <= d);
}
/* Verify vector overlap rules for three operands. */
-static void check_overlap_3(uint32_t d, uint32_t a, uint32_t b, uint32_t s)
+static void check_overlap_3(TCGv_ptr dbase, uint32_t d,
+ TCGv_ptr abase, uint32_t a,
+ TCGv_ptr bbase, uint32_t b, uint32_t s)
{
- check_overlap_2(d, a, s);
- check_overlap_2(d, b, s);
- check_overlap_2(a, b, s);
+ check_overlap_2(dbase, d, abase, a, s);
+ check_overlap_2(dbase, d, bbase, b, s);
+ check_overlap_2(abase, a, bbase, b, s);
}
/* Verify vector overlap rules for four operands. */
-static void check_overlap_4(uint32_t d, uint32_t a, uint32_t b,
- uint32_t c, uint32_t s)
+static void check_overlap_4(TCGv_ptr dbase, uint32_t d,
+ TCGv_ptr abase, uint32_t a,
+ TCGv_ptr bbase, uint32_t b,
+ TCGv_ptr cbase, uint32_t c, uint32_t s)
{
- check_overlap_2(d, a, s);
- check_overlap_2(d, b, s);
- check_overlap_2(d, c, s);
- check_overlap_2(a, b, s);
- check_overlap_2(a, c, s);
- check_overlap_2(b, c, s);
+ check_overlap_2(dbase, d, abase, a, s);
+ check_overlap_2(dbase, d, bbase, b, s);
+ check_overlap_2(dbase, d, cbase, c, s);
+ check_overlap_2(abase, a, bbase, b, s);
+ check_overlap_2(abase, a, cbase, c, s);
+ check_overlap_2(bbase, b, cbase, c, s);
}
/* Create a descriptor from components. */
@@ -88,7 +98,20 @@ uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data)
uint32_t desc = 0;
check_size_align(oprsz, maxsz, 0);
- tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS));
+
+ /*
+ * We want to check that 'data' will fit into SIMD_DATA_BITS.
+ * However, some callers want to treat the data as a signed
+ * value (which they can later get back with simd_data())
+ * and some want to treat it as an unsigned value.
+ * So here we assert only that the data will fit into the
+ * field in at least one way. This means that some invalid
+ * values from the caller will not be detected, e.g. if the
+ * caller wants to handle the value as a signed integer but
+ * incorrectly passes us 1 << (SIMD_DATA_BITS - 1).
+ */
+ tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS) ||
+ data == extract32(data, 0, SIMD_DATA_BITS));
oprsz = (oprsz / 8) - 1;
maxsz = (maxsz / 8) - 1;
@@ -110,9 +133,10 @@ uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data)
}
/* Generate a call to a gvec-style helper with two vector operands. */
-void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz, int32_t data,
- gen_helper_gvec_2 *fn)
+static void expand_2_ool(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz,
+ int32_t data, gen_helper_gvec_2 *fn)
{
TCGv_ptr a0, a1;
TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
@@ -120,8 +144,8 @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
a0 = tcg_temp_ebb_new_ptr();
a1 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, tcg_env, dofs);
- tcg_gen_addi_ptr(a1, tcg_env, aofs);
+ tcg_gen_addi_ptr(a0, dbase, dofs);
+ tcg_gen_addi_ptr(a1, abase, aofs);
fn(a0, a1, desc);
@@ -129,6 +153,13 @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
tcg_temp_free_ptr(a1);
}
+void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
+ gen_helper_gvec_2 *fn)
+{
+ expand_2_ool(tcg_env, dofs, tcg_env, aofs, oprsz, maxsz, data, fn);
+}
+
/* Generate a call to a gvec-style helper with two vector operands
and one scalar operand. */
void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
@@ -151,9 +182,11 @@ void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
}
/* Generate a call to a gvec-style helper with three vector operands. */
-void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, uint32_t maxsz, int32_t data,
- gen_helper_gvec_3 *fn)
+static void expand_3_ool(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz,
+ int32_t data, gen_helper_gvec_3 *fn)
{
TCGv_ptr a0, a1, a2;
TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
@@ -162,9 +195,9 @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
a1 = tcg_temp_ebb_new_ptr();
a2 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, tcg_env, dofs);
- tcg_gen_addi_ptr(a1, tcg_env, aofs);
- tcg_gen_addi_ptr(a2, tcg_env, bofs);
+ tcg_gen_addi_ptr(a0, dbase, dofs);
+ tcg_gen_addi_ptr(a1, abase, aofs);
+ tcg_gen_addi_ptr(a2, bbase, bofs);
fn(a0, a1, a2, desc);
@@ -173,6 +206,14 @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
tcg_temp_free_ptr(a2);
}
+void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
+ gen_helper_gvec_3 *fn)
+{
+ expand_3_ool(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs,
+ oprsz, maxsz, data, fn);
+}
+
/* Generate a call to a gvec-style helper with four vector operands. */
void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t cofs, uint32_t oprsz, uint32_t maxsz,
@@ -366,7 +407,7 @@ static inline bool check_size_impl(uint32_t oprsz, uint32_t lnsz)
return q <= MAX_UNROLL;
}
-static void expand_clr(uint32_t dofs, uint32_t maxsz);
+static void expand_clr(TCGv_ptr dbase, uint32_t dofs, uint32_t maxsz);
/* Duplicate C as per VECE. */
uint64_t (dup_const)(unsigned vece, uint64_t c)
@@ -469,8 +510,8 @@ static TCGType choose_vector_type(const TCGOpcode *list, unsigned vece,
return 0;
}
-static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz,
- uint32_t maxsz, TCGv_vec t_vec)
+static void do_dup_store(TCGType type, TCGv_ptr dbase, uint32_t dofs,
+ uint32_t oprsz, uint32_t maxsz, TCGv_vec t_vec)
{
uint32_t i = 0;
@@ -482,7 +523,7 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz,
* are misaligned wrt the maximum vector size, so do that first.
*/
if (dofs & 8) {
- tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V64);
+ tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V64);
i += 8;
}
@@ -494,17 +535,17 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz,
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
for (; i + 32 <= oprsz; i += 32) {
- tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V256);
+ tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V256);
}
/* fallthru */
case TCG_TYPE_V128:
for (; i + 16 <= oprsz; i += 16) {
- tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V128);
+ tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V128);
}
break;
case TCG_TYPE_V64:
for (; i < oprsz; i += 8) {
- tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V64);
+ tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V64);
}
break;
default:
@@ -512,17 +553,18 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz,
}
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(dbase, dofs + oprsz, maxsz - oprsz);
}
}
-/* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C.
+/*
+ * Set OPRSZ bytes at DBASE + DOFS to replications of IN_32, IN_64 or IN_C.
* Only one of IN_32 or IN_64 may be set;
* IN_C is used if IN_32 and IN_64 are unset.
*/
-static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
- uint32_t maxsz, TCGv_i32 in_32, TCGv_i64 in_64,
- uint64_t in_c)
+static void do_dup(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ uint32_t oprsz, uint32_t maxsz,
+ TCGv_i32 in_32, TCGv_i64 in_64, uint64_t in_c)
{
TCGType type;
TCGv_i64 t_64;
@@ -560,7 +602,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
} else {
tcg_gen_dupi_vec(vece, t_vec, in_c);
}
- do_dup_store(type, dofs, oprsz, maxsz, t_vec);
+ do_dup_store(type, dbase, dofs, oprsz, maxsz, t_vec);
return;
}
@@ -604,14 +646,14 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
/* Implement inline if we picked an implementation size above. */
if (t_32) {
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_st_i32(t_32, tcg_env, dofs + i);
+ tcg_gen_st_i32(t_32, dbase, dofs + i);
}
tcg_temp_free_i32(t_32);
goto done;
}
if (t_64) {
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_st_i64(t_64, tcg_env, dofs + i);
+ tcg_gen_st_i64(t_64, dbase, dofs + i);
}
tcg_temp_free_i64(t_64);
goto done;
@@ -620,7 +662,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
/* Otherwise implement out of line. */
t_ptr = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(t_ptr, tcg_env, dofs);
+ tcg_gen_addi_ptr(t_ptr, dbase, dofs);
/*
* This may be expand_clr for the tail of an operation, e.g.
@@ -689,31 +731,32 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
done:
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(dbase, dofs + oprsz, maxsz - oprsz);
}
}
/* Likewise, but with zero. */
-static void expand_clr(uint32_t dofs, uint32_t maxsz)
+static void expand_clr(TCGv_ptr dbase, uint32_t dofs, uint32_t maxsz)
{
- do_dup(MO_8, dofs, maxsz, maxsz, NULL, NULL, 0);
+ do_dup(MO_8, dbase, dofs, maxsz, maxsz, NULL, NULL, 0);
}
/* Expand OPSZ bytes worth of two-operand operations using i32 elements. */
-static void expand_2_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- bool load_dest, void (*fni)(TCGv_i32, TCGv_i32))
+static void expand_2_i32(TCGv_ptr dbase, uint32_t dofs, TCGv_ptr abase,
+ uint32_t aofs, uint32_t oprsz, bool load_dest,
+ void (*fni)(TCGv_i32, TCGv_i32))
{
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, tcg_env, aofs + i);
+ tcg_gen_ld_i32(t0, abase, aofs + i);
if (load_dest) {
- tcg_gen_ld_i32(t1, tcg_env, dofs + i);
+ tcg_gen_ld_i32(t1, dbase, dofs + i);
}
fni(t1, t0);
- tcg_gen_st_i32(t1, tcg_env, dofs + i);
+ tcg_gen_st_i32(t1, dbase, dofs + i);
}
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
@@ -761,8 +804,10 @@ static void expand_2s_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
}
/* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
-static void expand_3_i32(uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, bool load_dest,
+static void expand_3_i32(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, bool load_dest,
void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32))
{
TCGv_i32 t0 = tcg_temp_new_i32();
@@ -771,13 +816,13 @@ static void expand_3_i32(uint32_t dofs, uint32_t aofs,
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, tcg_env, aofs + i);
- tcg_gen_ld_i32(t1, tcg_env, bofs + i);
+ tcg_gen_ld_i32(t0, abase, aofs + i);
+ tcg_gen_ld_i32(t1, bbase, bofs + i);
if (load_dest) {
- tcg_gen_ld_i32(t2, tcg_env, dofs + i);
+ tcg_gen_ld_i32(t2, dbase, dofs + i);
}
fni(t2, t0, t1);
- tcg_gen_st_i32(t2, tcg_env, dofs + i);
+ tcg_gen_st_i32(t2, dbase, dofs + i);
}
tcg_temp_free_i32(t2);
tcg_temp_free_i32(t1);
@@ -863,20 +908,21 @@ static void expand_4i_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
}
/* Expand OPSZ bytes worth of two-operand operations using i64 elements. */
-static void expand_2_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- bool load_dest, void (*fni)(TCGv_i64, TCGv_i64))
+static void expand_2_i64(TCGv_ptr dbase, uint32_t dofs, TCGv_ptr abase,
+ uint32_t aofs, uint32_t oprsz, bool load_dest,
+ void (*fni)(TCGv_i64, TCGv_i64))
{
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, tcg_env, aofs + i);
+ tcg_gen_ld_i64(t0, abase, aofs + i);
if (load_dest) {
- tcg_gen_ld_i64(t1, tcg_env, dofs + i);
+ tcg_gen_ld_i64(t1, dbase, dofs + i);
}
fni(t1, t0);
- tcg_gen_st_i64(t1, tcg_env, dofs + i);
+ tcg_gen_st_i64(t1, dbase, dofs + i);
}
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
@@ -924,8 +970,10 @@ static void expand_2s_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
}
/* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
-static void expand_3_i64(uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, bool load_dest,
+static void expand_3_i64(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, bool load_dest,
void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64))
{
TCGv_i64 t0 = tcg_temp_new_i64();
@@ -934,13 +982,13 @@ static void expand_3_i64(uint32_t dofs, uint32_t aofs,
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, tcg_env, aofs + i);
- tcg_gen_ld_i64(t1, tcg_env, bofs + i);
+ tcg_gen_ld_i64(t0, abase, aofs + i);
+ tcg_gen_ld_i64(t1, bbase, bofs + i);
if (load_dest) {
- tcg_gen_ld_i64(t2, tcg_env, dofs + i);
+ tcg_gen_ld_i64(t2, dbase, dofs + i);
}
fni(t2, t0, t1);
- tcg_gen_st_i64(t2, tcg_env, dofs + i);
+ tcg_gen_st_i64(t2, dbase, dofs + i);
}
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t1);
@@ -1026,7 +1074,8 @@ static void expand_4i_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
}
/* Expand OPSZ bytes worth of two-operand operations using host vectors. */
-static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
+static void expand_2_vec(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
uint32_t oprsz, uint32_t tysz, TCGType type,
bool load_dest,
void (*fni)(unsigned, TCGv_vec, TCGv_vec))
@@ -1035,12 +1084,12 @@ static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
TCGv_vec t0 = tcg_temp_new_vec(type);
TCGv_vec t1 = tcg_temp_new_vec(type);
- tcg_gen_ld_vec(t0, tcg_env, aofs + i);
+ tcg_gen_ld_vec(t0, abase, aofs + i);
if (load_dest) {
- tcg_gen_ld_vec(t1, tcg_env, dofs + i);
+ tcg_gen_ld_vec(t1, dbase, dofs + i);
}
fni(vece, t1, t0);
- tcg_gen_st_vec(t1, tcg_env, dofs + i);
+ tcg_gen_st_vec(t1, dbase, dofs + i);
}
}
@@ -1084,8 +1133,9 @@ static void expand_2s_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
}
/* Expand OPSZ bytes worth of three-operand operations using host vectors. */
-static void expand_3_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz,
+static void expand_3_vec(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs, uint32_t oprsz,
uint32_t tysz, TCGType type, bool load_dest,
void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
{
@@ -1094,13 +1144,13 @@ static void expand_3_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
TCGv_vec t1 = tcg_temp_new_vec(type);
TCGv_vec t2 = tcg_temp_new_vec(type);
- tcg_gen_ld_vec(t0, tcg_env, aofs + i);
- tcg_gen_ld_vec(t1, tcg_env, bofs + i);
+ tcg_gen_ld_vec(t0, abase, aofs + i);
+ tcg_gen_ld_vec(t1, bbase, bofs + i);
if (load_dest) {
- tcg_gen_ld_vec(t2, tcg_env, dofs + i);
+ tcg_gen_ld_vec(t2, dbase, dofs + i);
}
fni(vece, t2, t0, t1);
- tcg_gen_st_vec(t2, tcg_env, dofs + i);
+ tcg_gen_st_vec(t2, dbase, dofs + i);
}
}
@@ -1182,8 +1232,9 @@ static void expand_4i_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
}
/* Expand a vector two-operand operation. */
-void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g)
+void tcg_gen_gvec_2_var(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g)
{
const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
@@ -1191,7 +1242,7 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
+ check_overlap_2(dbase, dofs, abase, aofs, maxsz);
type = 0;
if (g->fniv) {
@@ -1204,8 +1255,8 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
- g->load_dest, g->fniv);
+ expand_2_vec(g->vece, dbase, dofs, abase, aofs, some, 32,
+ TCG_TYPE_V256, g->load_dest, g->fniv);
if (some == oprsz) {
break;
}
@@ -1215,22 +1266,25 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
maxsz -= some;
/* fallthru */
case TCG_TYPE_V128:
- expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
- g->load_dest, g->fniv);
+ expand_2_vec(g->vece, dbase, dofs, abase, aofs, oprsz, 16,
+ TCG_TYPE_V128, g->load_dest, g->fniv);
break;
case TCG_TYPE_V64:
- expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
- g->load_dest, g->fniv);
+ expand_2_vec(g->vece, dbase, dofs, abase, aofs, oprsz, 8,
+ TCG_TYPE_V64, g->load_dest, g->fniv);
break;
case 0:
if (g->fni8 && check_size_impl(oprsz, 8)) {
- expand_2_i64(dofs, aofs, oprsz, g->load_dest, g->fni8);
+ expand_2_i64(dbase, dofs, abase, aofs,
+ oprsz, g->load_dest, g->fni8);
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
- expand_2_i32(dofs, aofs, oprsz, g->load_dest, g->fni4);
+ expand_2_i32(dbase, dofs, abase, aofs,
+ oprsz, g->load_dest, g->fni4);
} else {
assert(g->fno != NULL);
- tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno);
+ expand_2_ool(dbase, dofs, abase, aofs,
+ oprsz, maxsz, g->data, g->fno);
oprsz = maxsz;
}
break;
@@ -1241,10 +1295,16 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(dbase, dofs + oprsz, maxsz - oprsz);
}
}
+void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g)
+{
+ tcg_gen_gvec_2_var(tcg_env, dofs, tcg_env, aofs, oprsz, maxsz, g);
+}
+
/* Expand a vector operation with two vectors and an immediate. */
void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
uint32_t maxsz, int64_t c, const GVecGen2i *g)
@@ -1255,7 +1315,7 @@ void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
+ check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz);
type = 0;
if (g->fniv) {
@@ -1310,7 +1370,7 @@ void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
@@ -1321,7 +1381,7 @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
TCGType type;
check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
+ check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz);
type = 0;
if (g->fniv) {
@@ -1387,13 +1447,15 @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
}
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
/* Expand a vector three-operand operation. */
-void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g)
+void tcg_gen_gvec_3_var(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g)
{
const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
@@ -1401,7 +1463,7 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs | bofs);
- check_overlap_3(dofs, aofs, bofs, maxsz);
+ check_overlap_3(dbase, dofs, abase, aofs, bbase, bofs, maxsz);
type = 0;
if (g->fniv) {
@@ -1414,8 +1476,8 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_3_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256,
- g->load_dest, g->fniv);
+ expand_3_vec(g->vece, dbase, dofs, abase, aofs, bbase, bofs,
+ some, 32, TCG_TYPE_V256, g->load_dest, g->fniv);
if (some == oprsz) {
break;
}
@@ -1426,23 +1488,25 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
maxsz -= some;
/* fallthru */
case TCG_TYPE_V128:
- expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128,
- g->load_dest, g->fniv);
+ expand_3_vec(g->vece, dbase, dofs, abase, aofs, bbase, bofs,
+ oprsz, 16, TCG_TYPE_V128, g->load_dest, g->fniv);
break;
case TCG_TYPE_V64:
- expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64,
- g->load_dest, g->fniv);
+ expand_3_vec(g->vece, dbase, dofs, abase, aofs, bbase, bofs,
+ oprsz, 8, TCG_TYPE_V64, g->load_dest, g->fniv);
break;
case 0:
if (g->fni8 && check_size_impl(oprsz, 8)) {
- expand_3_i64(dofs, aofs, bofs, oprsz, g->load_dest, g->fni8);
+ expand_3_i64(dbase, dofs, abase, aofs, bbase, bofs,
+ oprsz, g->load_dest, g->fni8);
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
- expand_3_i32(dofs, aofs, bofs, oprsz, g->load_dest, g->fni4);
+ expand_3_i32(dbase, dofs, abase, aofs, bbase, bofs,
+ oprsz, g->load_dest, g->fni4);
} else {
assert(g->fno != NULL);
- tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz,
- maxsz, g->data, g->fno);
+ expand_3_ool(dbase, dofs, abase, aofs, bbase, bofs,
+ oprsz, maxsz, g->data, g->fno);
oprsz = maxsz;
}
break;
@@ -1453,10 +1517,17 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(dbase, dofs + oprsz, maxsz - oprsz);
}
}
+void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g)
+{
+ tcg_gen_gvec_3_var(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs,
+ oprsz, maxsz, g);
+}
+
/* Expand a vector operation with three vectors and an immediate. */
void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t oprsz, uint32_t maxsz, int64_t c,
@@ -1468,7 +1539,7 @@ void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs | bofs);
- check_overlap_3(dofs, aofs, bofs, maxsz);
+ check_overlap_3(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, maxsz);
type = 0;
if (g->fniv) {
@@ -1522,7 +1593,7 @@ void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
@@ -1536,7 +1607,8 @@ void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs);
- check_overlap_4(dofs, aofs, bofs, cofs, maxsz);
+ check_overlap_4(tcg_env, dofs, tcg_env, aofs,
+ tcg_env, bofs, tcg_env, cofs, maxsz);
type = 0;
if (g->fniv) {
@@ -1591,7 +1663,7 @@ void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
@@ -1606,7 +1678,8 @@ void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs);
- check_overlap_4(dofs, aofs, bofs, cofs, maxsz);
+ check_overlap_4(tcg_env, dofs, tcg_env, aofs,
+ tcg_env, bofs, tcg_env, cofs, maxsz);
type = 0;
if (g->fniv) {
@@ -1660,7 +1733,7 @@ void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
@@ -1673,8 +1746,9 @@ static void vec_mov2(unsigned vece, TCGv_vec a, TCGv_vec b)
tcg_gen_mov_vec(a, b);
}
-void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz)
+void tcg_gen_gvec_mov_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz)
{
static const GVecGen2 g = {
.fni8 = tcg_gen_mov_i64,
@@ -1682,14 +1756,22 @@ void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
.fno = gen_helper_gvec_mov,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
};
- if (dofs != aofs) {
- tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g);
- } else {
+
+ if (dofs == aofs && dbase == abase) {
check_size_align(oprsz, maxsz, dofs);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(dbase, dofs + oprsz, maxsz - oprsz);
}
+ return;
}
+
+ tcg_gen_gvec_2_var(dbase, dofs, abase, aofs, oprsz, maxsz, &g);
+}
+
+void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz)
+{
+ tcg_gen_gvec_mov_var(vece, tcg_env, dofs, tcg_env, aofs, oprsz, maxsz);
}
void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t oprsz,
@@ -1697,7 +1779,7 @@ void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t oprsz,
{
check_size_align(oprsz, maxsz, dofs);
tcg_debug_assert(vece <= MO_32);
- do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0);
+ do_dup(vece, tcg_env, dofs, oprsz, maxsz, in, NULL, 0);
}
void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t oprsz,
@@ -1705,7 +1787,7 @@ void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t oprsz,
{
check_size_align(oprsz, maxsz, dofs);
tcg_debug_assert(vece <= MO_64);
- do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0);
+ do_dup(vece, tcg_env, dofs, oprsz, maxsz, NULL, in, 0);
}
void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
@@ -1717,7 +1799,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
if (type != 0) {
TCGv_vec t_vec = tcg_temp_new_vec(type);
tcg_gen_dup_mem_vec(vece, t_vec, tcg_env, aofs);
- do_dup_store(type, dofs, oprsz, maxsz, t_vec);
+ do_dup_store(type, tcg_env, dofs, oprsz, maxsz, t_vec);
} else if (vece <= MO_32) {
TCGv_i32 in = tcg_temp_ebb_new_i32();
switch (vece) {
@@ -1731,12 +1813,12 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
tcg_gen_ld_i32(in, tcg_env, aofs);
break;
}
- do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0);
+ do_dup(vece, tcg_env, dofs, oprsz, maxsz, in, NULL, 0);
tcg_temp_free_i32(in);
} else {
TCGv_i64 in = tcg_temp_ebb_new_i64();
tcg_gen_ld_i64(in, tcg_env, aofs);
- do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0);
+ do_dup(vece, tcg_env, dofs, oprsz, maxsz, NULL, in, 0);
tcg_temp_free_i64(in);
}
} else if (vece == 4) {
@@ -1765,7 +1847,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
tcg_temp_free_i64(in1);
}
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
} else if (vece == 5) {
/* 256-bit duplicate. */
@@ -1808,18 +1890,24 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
}
}
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
} else {
g_assert_not_reached();
}
}
+void tcg_gen_gvec_dup_imm_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ uint32_t oprsz, uint32_t maxsz, uint64_t x)
+{
+ check_size_align(oprsz, maxsz, dofs);
+ do_dup(vece, dbase, dofs, oprsz, maxsz, NULL, NULL, x);
+}
+
void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t oprsz,
uint32_t maxsz, uint64_t x)
{
- check_size_align(oprsz, maxsz, dofs);
- do_dup(vece, dofs, oprsz, maxsz, NULL, NULL, x);
+ tcg_gen_gvec_dup_imm_var(vece, tcg_env, dofs, oprsz, maxsz, x);
}
void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
@@ -1917,8 +2005,10 @@ void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
static const TCGOpcode vecop_list_add[] = { INDEX_op_add_vec, 0 };
-void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+void tcg_gen_gvec_add_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz)
{
static const GVecGen3 g[4] = {
{ .fni8 = tcg_gen_vec_add8_i64,
@@ -1945,7 +2035,15 @@ void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
};
tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
+ tcg_gen_gvec_3_var(dbase, dofs, abase, aofs, bbase, bofs,
+ oprsz, maxsz, &g[vece]);
+}
+
+void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+{
+ tcg_gen_gvec_add_var(vece, tcg_env, dofs, tcg_env, aofs, tcg_env, bofs,
+ oprsz, maxsz);
}
void tcg_gen_gvec_adds(unsigned vece, uint32_t dofs, uint32_t aofs,
@@ -2098,8 +2196,10 @@ void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
tcg_temp_free_i64(t2);
}
-void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+void tcg_gen_gvec_sub_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz)
{
static const GVecGen3 g[4] = {
{ .fni8 = tcg_gen_vec_sub8_i64,
@@ -2126,7 +2226,15 @@ void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
};
tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
+ tcg_gen_gvec_3_var(dbase, dofs, abase, aofs, bbase, bofs,
+ oprsz, maxsz, &g[vece]);
+}
+
+void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+{
+ tcg_gen_gvec_sub_var(vece, tcg_env, dofs, tcg_env, aofs, tcg_env, bofs,
+ oprsz, maxsz);
}
static const TCGOpcode vecop_list_mul[] = { INDEX_op_mul_vec, 0 };
@@ -3135,7 +3243,7 @@ do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
+ check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz);
/* If the backend has a scalar expansion, great. */
type = choose_vector_type(g->s_list, vece, oprsz, vece == MO_64);
@@ -3241,7 +3349,7 @@ do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift,
clear_tail:
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
@@ -3755,10 +3863,10 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs | bofs);
- check_overlap_3(dofs, aofs, bofs, maxsz);
+ check_overlap_3(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, maxsz);
if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) {
- do_dup(MO_8, dofs, oprsz, maxsz,
+ do_dup(MO_8, tcg_env, dofs, oprsz, maxsz,
NULL, NULL, -(cond == TCG_COND_ALWAYS));
return;
}
@@ -3820,7 +3928,7 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
@@ -3875,10 +3983,10 @@ void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
TCGType type;
check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
+ check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz);
if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) {
- do_dup(MO_8, dofs, oprsz, maxsz,
+ do_dup(MO_8, tcg_env, dofs, oprsz, maxsz,
NULL, NULL, -(cond == TCG_COND_ALWAYS));
return;
}
@@ -3939,7 +4047,7 @@ void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
uint32_t i;
tcg_gen_extrl_i64_i32(t1, c);
- for (i = 0; i < oprsz; i += 8) {
+ for (i = 0; i < oprsz; i += 4) {
tcg_gen_ld_i32(t0, tcg_env, aofs + i);
tcg_gen_negsetcond_i32(cond, t0, t0, t1);
tcg_gen_st_i32(t0, tcg_env, dofs + i);
@@ -3961,7 +4069,7 @@ void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
}
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
index 8510160..5484960 100644
--- a/tcg/tcg-op-ldst.c
+++ b/tcg/tcg-op-ldst.c
@@ -27,25 +27,27 @@
#include "tcg/tcg-temp-internal.h"
#include "tcg/tcg-op-common.h"
#include "tcg/tcg-mo.h"
+#include "exec/target_page.h"
#include "exec/translation-block.h"
#include "exec/plugin-gen.h"
#include "tcg-internal.h"
-
+#include "tcg-has.h"
+#include "tcg-target-mo.h"
static void check_max_alignment(unsigned a_bits)
{
/*
* The requested alignment cannot overlap the TLB flags.
- * FIXME: Must keep the count up-to-date with "exec/cpu-all.h".
+ * FIXME: Must keep the count up-to-date with "exec/tlb-flags.h".
*/
if (tcg_use_softmmu) {
- tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits);
+ tcg_debug_assert(a_bits + 5 <= TARGET_PAGE_BITS);
}
}
static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
{
- unsigned a_bits = get_alignment_bits(op);
+ unsigned a_bits = memop_alignment_bits(op);
check_max_alignment(a_bits);
@@ -87,37 +89,40 @@ static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
return op;
}
-static void gen_ldst(TCGOpcode opc, TCGTemp *vl, TCGTemp *vh,
- TCGTemp *addr, MemOpIdx oi)
+static void gen_ldst1(TCGOpcode opc, TCGType type, TCGTemp *v,
+ TCGTemp *addr, MemOpIdx oi)
{
- if (TCG_TARGET_REG_BITS == 64 || tcg_ctx->addr_type == TCG_TYPE_I32) {
- if (vh) {
- tcg_gen_op4(opc, temp_arg(vl), temp_arg(vh), temp_arg(addr), oi);
- } else {
- tcg_gen_op3(opc, temp_arg(vl), temp_arg(addr), oi);
- }
- } else {
- /* See TCGV_LOW/HIGH. */
- TCGTemp *al = addr + HOST_BIG_ENDIAN;
- TCGTemp *ah = addr + !HOST_BIG_ENDIAN;
+ TCGOp *op = tcg_gen_op3(opc, type, temp_arg(v), temp_arg(addr), oi);
+ TCGOP_FLAGS(op) = get_memop(oi) & MO_SIZE;
+}
- if (vh) {
- tcg_gen_op5(opc, temp_arg(vl), temp_arg(vh),
- temp_arg(al), temp_arg(ah), oi);
- } else {
- tcg_gen_op4(opc, temp_arg(vl), temp_arg(al), temp_arg(ah), oi);
- }
+static void gen_ldst2(TCGOpcode opc, TCGType type, TCGTemp *vl, TCGTemp *vh,
+ TCGTemp *addr, MemOpIdx oi)
+{
+ TCGOp *op = tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh),
+ temp_arg(addr), oi);
+ TCGOP_FLAGS(op) = get_memop(oi) & MO_SIZE;
+}
+
+static void gen_ld_i64(TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ gen_ldst2(INDEX_op_qemu_ld2, TCG_TYPE_I64,
+ tcgv_i32_temp(TCGV_LOW(v)), tcgv_i32_temp(TCGV_HIGH(v)),
+ addr, oi);
+ } else {
+ gen_ldst1(INDEX_op_qemu_ld, TCG_TYPE_I64, tcgv_i64_temp(v), addr, oi);
}
}
-static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
+static void gen_st_i64(TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
{
if (TCG_TARGET_REG_BITS == 32) {
- TCGTemp *vl = tcgv_i32_temp(TCGV_LOW(v));
- TCGTemp *vh = tcgv_i32_temp(TCGV_HIGH(v));
- gen_ldst(opc, vl, vh, addr, oi);
+ gen_ldst2(INDEX_op_qemu_st2, TCG_TYPE_I64,
+ tcgv_i32_temp(TCGV_LOW(v)), tcgv_i32_temp(TCGV_HIGH(v)),
+ addr, oi);
} else {
- gen_ldst(opc, tcgv_i64_temp(v), NULL, addr, oi);
+ gen_ldst1(INDEX_op_qemu_st, TCG_TYPE_I64, tcgv_i64_temp(v), addr, oi);
}
}
@@ -148,11 +153,11 @@ static TCGv_i64 plugin_maybe_preserve_addr(TCGTemp *addr)
return NULL;
}
+#ifdef CONFIG_PLUGIN
static void
plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi,
enum qemu_plugin_mem_rw rw)
{
-#ifdef CONFIG_PLUGIN
if (tcg_ctx->plugin_insn != NULL) {
qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw);
@@ -172,6 +177,54 @@ plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi,
}
}
}
+}
+#endif
+
+static void
+plugin_gen_mem_callbacks_i32(TCGv_i32 val,
+ TCGv_i64 copy_addr, TCGTemp *orig_addr,
+ MemOpIdx oi, enum qemu_plugin_mem_rw rw)
+{
+#ifdef CONFIG_PLUGIN
+ if (tcg_ctx->plugin_insn != NULL) {
+ tcg_gen_st_i32(val, tcg_env,
+ offsetof(CPUState, neg.plugin_mem_value_low) -
+ sizeof(CPUState) + (HOST_BIG_ENDIAN * 4));
+ plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw);
+ }
+#endif
+}
+
+static void
+plugin_gen_mem_callbacks_i64(TCGv_i64 val,
+ TCGv_i64 copy_addr, TCGTemp *orig_addr,
+ MemOpIdx oi, enum qemu_plugin_mem_rw rw)
+{
+#ifdef CONFIG_PLUGIN
+ if (tcg_ctx->plugin_insn != NULL) {
+ tcg_gen_st_i64(val, tcg_env,
+ offsetof(CPUState, neg.plugin_mem_value_low) -
+ sizeof(CPUState));
+ plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw);
+ }
+#endif
+}
+
+static void
+plugin_gen_mem_callbacks_i128(TCGv_i128 val,
+ TCGv_i64 copy_addr, TCGTemp *orig_addr,
+ MemOpIdx oi, enum qemu_plugin_mem_rw rw)
+{
+#ifdef CONFIG_PLUGIN
+ if (tcg_ctx->plugin_insn != NULL) {
+ tcg_gen_st_i64(TCGV128_LOW(val), tcg_env,
+ offsetof(CPUState, neg.plugin_mem_value_low) -
+ sizeof(CPUState));
+ tcg_gen_st_i64(TCGV128_HIGH(val), tcg_env,
+ offsetof(CPUState, neg.plugin_mem_value_high) -
+ sizeof(CPUState));
+ plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw);
+ }
#endif
}
@@ -181,7 +234,6 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
MemOp orig_memop;
MemOpIdx orig_oi, oi;
TCGv_i64 copy_addr;
- TCGOpcode opc;
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0);
@@ -197,13 +249,9 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
}
copy_addr = plugin_maybe_preserve_addr(addr);
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
- opc = INDEX_op_qemu_ld_a32_i32;
- } else {
- opc = INDEX_op_qemu_ld_a64_i32;
- }
- gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi);
- plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
+ gen_ldst1(INDEX_op_qemu_ld, TCG_TYPE_I32, tcgv_i32_temp(val), addr, oi);
+ plugin_gen_mem_callbacks_i32(val, copy_addr, addr, orig_oi,
+ QEMU_PLUGIN_MEM_R);
if ((orig_memop ^ memop) & MO_BSWAP) {
switch (orig_memop & MO_SIZE) {
@@ -234,7 +282,6 @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
{
TCGv_i32 swap = NULL;
MemOpIdx orig_oi, oi;
- TCGOpcode opc;
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 0, 1);
@@ -257,21 +304,8 @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
oi = make_memop_idx(memop, idx);
}
- if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) {
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
- opc = INDEX_op_qemu_st8_a32_i32;
- } else {
- opc = INDEX_op_qemu_st8_a64_i32;
- }
- } else {
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
- opc = INDEX_op_qemu_st_a32_i32;
- } else {
- opc = INDEX_op_qemu_st_a64_i32;
- }
- }
- gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi);
- plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
+ gen_ldst1(INDEX_op_qemu_st, TCG_TYPE_I32, tcgv_i32_temp(val), addr, oi);
+ plugin_gen_mem_callbacks_i32(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
if (swap) {
tcg_temp_free_i32(swap);
@@ -292,7 +326,6 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
MemOp orig_memop;
MemOpIdx orig_oi, oi;
TCGv_i64 copy_addr;
- TCGOpcode opc;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop);
@@ -318,13 +351,9 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
}
copy_addr = plugin_maybe_preserve_addr(addr);
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
- opc = INDEX_op_qemu_ld_a32_i64;
- } else {
- opc = INDEX_op_qemu_ld_a64_i64;
- }
- gen_ldst_i64(opc, val, addr, oi);
- plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
+ gen_ld_i64(val, addr, oi);
+ plugin_gen_mem_callbacks_i64(val, copy_addr, addr, orig_oi,
+ QEMU_PLUGIN_MEM_R);
if ((orig_memop ^ memop) & MO_BSWAP) {
int flags = (orig_memop & MO_SIGN
@@ -359,7 +388,6 @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
{
TCGv_i64 swap = NULL;
MemOpIdx orig_oi, oi;
- TCGOpcode opc;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop);
@@ -390,13 +418,8 @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
oi = make_memop_idx(memop, idx);
}
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
- opc = INDEX_op_qemu_st_a32_i64;
- } else {
- opc = INDEX_op_qemu_st_a64_i64;
- }
- gen_ldst_i64(opc, val, addr, oi);
- plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
+ gen_st_i64(val, addr, oi);
+ plugin_gen_mem_callbacks_i64(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
if (swap) {
tcg_temp_free_i64(swap);
@@ -507,9 +530,8 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
{
MemOpIdx orig_oi;
TCGv_i64 ext_addr = NULL;
- TCGOpcode opc;
- check_max_alignment(get_alignment_bits(memop));
+ check_max_alignment(memop_alignment_bits(memop));
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
/* In serial mode, reduce atomicity. */
@@ -535,12 +557,8 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
hi = TCGV128_HIGH(val);
}
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
- opc = INDEX_op_qemu_ld_a32_i128;
- } else {
- opc = INDEX_op_qemu_ld_a64_i128;
- }
- gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
+ gen_ldst2(INDEX_op_qemu_ld2, TCG_TYPE_I128, tcgv_i64_temp(lo),
+ tcgv_i64_temp(hi), addr, oi);
if (need_bswap) {
tcg_gen_bswap64_i64(lo, lo);
@@ -555,12 +573,6 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
canonicalize_memop_i128_as_i64(mop, memop);
need_bswap = (mop[0] ^ memop) & MO_BSWAP;
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
- opc = INDEX_op_qemu_ld_a32_i64;
- } else {
- opc = INDEX_op_qemu_ld_a64_i64;
- }
-
/*
* Since there are no global TCGv_i128, there is no visible state
* changed if the second load faults. Load directly into the two
@@ -574,7 +586,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
y = TCGV128_LOW(val);
}
- gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx));
+ gen_ld_i64(x, addr, make_memop_idx(mop[0], idx));
if (need_bswap) {
tcg_gen_bswap64_i64(x, x);
@@ -590,7 +602,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
addr_p8 = tcgv_i64_temp(t);
}
- gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx));
+ gen_ld_i64(y, addr_p8, make_memop_idx(mop[1], idx));
tcg_temp_free_internal(addr_p8);
if (need_bswap) {
@@ -606,7 +618,8 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
tcg_constant_i32(orig_oi));
}
- plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
+ plugin_gen_mem_callbacks_i128(val, ext_addr, addr, orig_oi,
+ QEMU_PLUGIN_MEM_R);
}
void tcg_gen_qemu_ld_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
@@ -623,9 +636,8 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
{
MemOpIdx orig_oi;
TCGv_i64 ext_addr = NULL;
- TCGOpcode opc;
- check_max_alignment(get_alignment_bits(memop));
+ check_max_alignment(memop_alignment_bits(memop));
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
/* In serial mode, reduce atomicity. */
@@ -654,12 +666,8 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
hi = TCGV128_HIGH(val);
}
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
- opc = INDEX_op_qemu_st_a32_i128;
- } else {
- opc = INDEX_op_qemu_st_a64_i128;
- }
- gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
+ gen_ldst2(INDEX_op_qemu_st2, TCG_TYPE_I128,
+ tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
if (need_bswap) {
tcg_temp_free_i64(lo);
@@ -672,12 +680,6 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
canonicalize_memop_i128_as_i64(mop, memop);
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
- opc = INDEX_op_qemu_st_a32_i64;
- } else {
- opc = INDEX_op_qemu_st_a64_i64;
- }
-
if ((memop & MO_BSWAP) == MO_LE) {
x = TCGV128_LOW(val);
y = TCGV128_HIGH(val);
@@ -692,7 +694,7 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
x = b;
}
- gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx));
+ gen_st_i64(x, addr, make_memop_idx(mop[0], idx));
if (tcg_ctx->addr_type == TCG_TYPE_I32) {
TCGv_i32 t = tcg_temp_ebb_new_i32();
@@ -706,10 +708,10 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
if (b) {
tcg_gen_bswap64_i64(b, y);
- gen_ldst_i64(opc, b, addr_p8, make_memop_idx(mop[1], idx));
+ gen_st_i64(b, addr_p8, make_memop_idx(mop[1], idx));
tcg_temp_free_i64(b);
} else {
- gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx));
+ gen_st_i64(y, addr_p8, make_memop_idx(mop[1], idx));
}
tcg_temp_free_internal(addr_p8);
} else {
@@ -722,7 +724,8 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
tcg_constant_i32(orig_oi));
}
- plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_W);
+ plugin_gen_mem_callbacks_i128(val, ext_addr, addr, orig_oi,
+ QEMU_PLUGIN_MEM_W);
}
void tcg_gen_qemu_st_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
index 84af210..893d68e 100644
--- a/tcg/tcg-op-vec.c
+++ b/tcg/tcg-op-vec.c
@@ -23,6 +23,7 @@
#include "tcg/tcg-op-common.h"
#include "tcg/tcg-mo.h"
#include "tcg-internal.h"
+#include "tcg-has.h"
/*
* Vector optional opcode tracking.
@@ -143,7 +144,7 @@ bool tcg_can_emit_vecop_list(const TCGOpcode *list,
void vec_gen_2(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, TCGArg a)
{
TCGOp *op = tcg_emit_op(opc, 2);
- TCGOP_VECL(op) = type - TCG_TYPE_V64;
+ TCGOP_TYPE(op) = type;
TCGOP_VECE(op) = vece;
op->args[0] = r;
op->args[1] = a;
@@ -153,7 +154,7 @@ void vec_gen_3(TCGOpcode opc, TCGType type, unsigned vece,
TCGArg r, TCGArg a, TCGArg b)
{
TCGOp *op = tcg_emit_op(opc, 3);
- TCGOP_VECL(op) = type - TCG_TYPE_V64;
+ TCGOP_TYPE(op) = type;
TCGOP_VECE(op) = vece;
op->args[0] = r;
op->args[1] = a;
@@ -164,7 +165,7 @@ void vec_gen_4(TCGOpcode opc, TCGType type, unsigned vece,
TCGArg r, TCGArg a, TCGArg b, TCGArg c)
{
TCGOp *op = tcg_emit_op(opc, 4);
- TCGOP_VECL(op) = type - TCG_TYPE_V64;
+ TCGOP_TYPE(op) = type;
TCGOP_VECE(op) = vece;
op->args[0] = r;
op->args[1] = a;
@@ -172,11 +173,11 @@ void vec_gen_4(TCGOpcode opc, TCGType type, unsigned vece,
op->args[3] = c;
}
-static void vec_gen_6(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r,
- TCGArg a, TCGArg b, TCGArg c, TCGArg d, TCGArg e)
+void vec_gen_6(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r,
+ TCGArg a, TCGArg b, TCGArg c, TCGArg d, TCGArg e)
{
TCGOp *op = tcg_emit_op(opc, 6);
- TCGOP_VECL(op) = type - TCG_TYPE_V64;
+ TCGOP_TYPE(op) = type;
TCGOP_VECE(op) = vece;
op->args[0] = r;
op->args[1] = a;
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index eff3728..dfa5c38 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -29,7 +29,7 @@
#include "exec/translation-block.h"
#include "exec/plugin-gen.h"
#include "tcg-internal.h"
-
+#include "tcg-has.h"
/*
* Encourage the compiler to tail-call to a function, rather than inlining.
@@ -37,57 +37,71 @@
*/
#define NI __attribute__((noinline))
-void NI tcg_gen_op1(TCGOpcode opc, TCGArg a1)
+TCGOp * NI tcg_gen_op1(TCGOpcode opc, TCGType type, TCGArg a1)
{
TCGOp *op = tcg_emit_op(opc, 1);
+ TCGOP_TYPE(op) = type;
op->args[0] = a1;
+ return op;
}
-void NI tcg_gen_op2(TCGOpcode opc, TCGArg a1, TCGArg a2)
+TCGOp * NI tcg_gen_op2(TCGOpcode opc, TCGType type, TCGArg a1, TCGArg a2)
{
TCGOp *op = tcg_emit_op(opc, 2);
+ TCGOP_TYPE(op) = type;
op->args[0] = a1;
op->args[1] = a2;
+ return op;
}
-void NI tcg_gen_op3(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3)
+TCGOp * NI tcg_gen_op3(TCGOpcode opc, TCGType type, TCGArg a1,
+ TCGArg a2, TCGArg a3)
{
TCGOp *op = tcg_emit_op(opc, 3);
+ TCGOP_TYPE(op) = type;
op->args[0] = a1;
op->args[1] = a2;
op->args[2] = a3;
+ return op;
}
-void NI tcg_gen_op4(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, TCGArg a4)
+TCGOp * NI tcg_gen_op4(TCGOpcode opc, TCGType type, TCGArg a1, TCGArg a2,
+ TCGArg a3, TCGArg a4)
{
TCGOp *op = tcg_emit_op(opc, 4);
+ TCGOP_TYPE(op) = type;
op->args[0] = a1;
op->args[1] = a2;
op->args[2] = a3;
op->args[3] = a4;
+ return op;
}
-void NI tcg_gen_op5(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
- TCGArg a4, TCGArg a5)
+TCGOp * NI tcg_gen_op5(TCGOpcode opc, TCGType type, TCGArg a1, TCGArg a2,
+ TCGArg a3, TCGArg a4, TCGArg a5)
{
TCGOp *op = tcg_emit_op(opc, 5);
+ TCGOP_TYPE(op) = type;
op->args[0] = a1;
op->args[1] = a2;
op->args[2] = a3;
op->args[3] = a4;
op->args[4] = a5;
+ return op;
}
-void NI tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
- TCGArg a4, TCGArg a5, TCGArg a6)
+TCGOp * NI tcg_gen_op6(TCGOpcode opc, TCGType type, TCGArg a1, TCGArg a2,
+ TCGArg a3, TCGArg a4, TCGArg a5, TCGArg a6)
{
TCGOp *op = tcg_emit_op(opc, 6);
+ TCGOP_TYPE(op) = type;
op->args[0] = a1;
op->args[1] = a2;
op->args[2] = a3;
op->args[3] = a4;
op->args[4] = a5;
op->args[5] = a6;
+ return op;
}
/*
@@ -100,158 +114,146 @@ void NI tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
# define DNI
#endif
-static void DNI tcg_gen_op1_i32(TCGOpcode opc, TCGv_i32 a1)
+static void DNI tcg_gen_op1_i32(TCGOpcode opc, TCGType type, TCGv_i32 a1)
{
- tcg_gen_op1(opc, tcgv_i32_arg(a1));
+ tcg_gen_op1(opc, type, tcgv_i32_arg(a1));
}
-static void DNI tcg_gen_op1_i64(TCGOpcode opc, TCGv_i64 a1)
+static void DNI tcg_gen_op1_i64(TCGOpcode opc, TCGType type, TCGv_i64 a1)
{
- tcg_gen_op1(opc, tcgv_i64_arg(a1));
+ tcg_gen_op1(opc, type, tcgv_i64_arg(a1));
}
-static void DNI tcg_gen_op1i(TCGOpcode opc, TCGArg a1)
+static TCGOp * DNI tcg_gen_op1i(TCGOpcode opc, TCGType type, TCGArg a1)
{
- tcg_gen_op1(opc, a1);
+ return tcg_gen_op1(opc, type, a1);
}
static void DNI tcg_gen_op2_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2)
{
- tcg_gen_op2(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2));
+ tcg_gen_op2(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2));
}
static void DNI tcg_gen_op2_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2)
{
- tcg_gen_op2(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2));
+ tcg_gen_op2(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2));
}
static void DNI tcg_gen_op3_i32(TCGOpcode opc, TCGv_i32 a1,
TCGv_i32 a2, TCGv_i32 a3)
{
- tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), tcgv_i32_arg(a3));
+ tcg_gen_op3(opc, TCG_TYPE_I32, tcgv_i32_arg(a1),
+ tcgv_i32_arg(a2), tcgv_i32_arg(a3));
}
static void DNI tcg_gen_op3_i64(TCGOpcode opc, TCGv_i64 a1,
TCGv_i64 a2, TCGv_i64 a3)
{
- tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), tcgv_i64_arg(a3));
+ tcg_gen_op3(opc, TCG_TYPE_I64, tcgv_i64_arg(a1),
+ tcgv_i64_arg(a2), tcgv_i64_arg(a3));
}
static void DNI tcg_gen_op3i_i32(TCGOpcode opc, TCGv_i32 a1,
TCGv_i32 a2, TCGArg a3)
{
- tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3);
+ tcg_gen_op3(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3);
}
static void DNI tcg_gen_op3i_i64(TCGOpcode opc, TCGv_i64 a1,
TCGv_i64 a2, TCGArg a3)
{
- tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3);
+ tcg_gen_op3(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3);
}
static void DNI tcg_gen_ldst_op_i32(TCGOpcode opc, TCGv_i32 val,
TCGv_ptr base, TCGArg offset)
{
- tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_ptr_arg(base), offset);
+ tcg_gen_op3(opc, TCG_TYPE_I32, tcgv_i32_arg(val),
+ tcgv_ptr_arg(base), offset);
}
static void DNI tcg_gen_ldst_op_i64(TCGOpcode opc, TCGv_i64 val,
TCGv_ptr base, TCGArg offset)
{
- tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_ptr_arg(base), offset);
+ tcg_gen_op3(opc, TCG_TYPE_I64, tcgv_i64_arg(val),
+ tcgv_ptr_arg(base), offset);
}
static void DNI tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4)
{
- tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
+ tcg_gen_op4(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
tcgv_i32_arg(a3), tcgv_i32_arg(a4));
}
static void DNI tcg_gen_op4_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4)
{
- tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
+ tcg_gen_op4(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
tcgv_i64_arg(a3), tcgv_i64_arg(a4));
}
static void DNI tcg_gen_op4i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGArg a4)
{
- tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
+ tcg_gen_op4(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
tcgv_i32_arg(a3), a4);
}
static void DNI tcg_gen_op4i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGArg a4)
{
- tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
+ tcg_gen_op4(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
tcgv_i64_arg(a3), a4);
}
-static void DNI tcg_gen_op4ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
- TCGArg a3, TCGArg a4)
+static TCGOp * DNI tcg_gen_op4ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
+ TCGArg a3, TCGArg a4)
{
- tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3, a4);
+ return tcg_gen_op4(opc, TCG_TYPE_I32,
+ tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3, a4);
}
-static void DNI tcg_gen_op4ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
- TCGArg a3, TCGArg a4)
+static TCGOp * DNI tcg_gen_op4ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
+ TCGArg a3, TCGArg a4)
{
- tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3, a4);
+ return tcg_gen_op4(opc, TCG_TYPE_I64,
+ tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3, a4);
}
static void DNI tcg_gen_op5_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5)
{
- tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
+ tcg_gen_op5(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5));
}
static void DNI tcg_gen_op5_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5)
{
- tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
+ tcg_gen_op5(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5));
}
static void DNI tcg_gen_op5ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGArg a4, TCGArg a5)
{
- tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
+ tcg_gen_op5(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
tcgv_i32_arg(a3), a4, a5);
}
static void DNI tcg_gen_op5ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGArg a4, TCGArg a5)
{
- tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
+ tcg_gen_op5(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
tcgv_i64_arg(a3), a4, a5);
}
-static void DNI tcg_gen_op6_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
- TCGv_i32 a3, TCGv_i32 a4,
- TCGv_i32 a5, TCGv_i32 a6)
-{
- tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
- tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5),
- tcgv_i32_arg(a6));
-}
-
-static void DNI tcg_gen_op6_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
- TCGv_i64 a3, TCGv_i64 a4,
- TCGv_i64 a5, TCGv_i64 a6)
-{
- tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
- tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5),
- tcgv_i64_arg(a6));
-}
-
static void DNI tcg_gen_op6i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4,
TCGv_i32 a5, TCGArg a6)
{
- tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
+ tcg_gen_op6(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5), a6);
}
@@ -259,16 +261,16 @@ static void DNI tcg_gen_op6i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4,
TCGv_i64 a5, TCGArg a6)
{
- tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
+ tcg_gen_op6(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5), a6);
}
-static void DNI tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
- TCGv_i32 a3, TCGv_i32 a4,
- TCGArg a5, TCGArg a6)
+static TCGOp * DNI tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
+ TCGv_i32 a3, TCGv_i32 a4,
+ TCGArg a5, TCGArg a6)
{
- tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
- tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5, a6);
+ return tcg_gen_op6(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
+ tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5, a6);
}
/* Generic ops. */
@@ -276,21 +278,20 @@ static void DNI tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
void gen_set_label(TCGLabel *l)
{
l->present = 1;
- tcg_gen_op1(INDEX_op_set_label, label_arg(l));
+ tcg_gen_op1(INDEX_op_set_label, 0, label_arg(l));
}
-static void add_last_as_label_use(TCGLabel *l)
+static void add_as_label_use(TCGLabel *l, TCGOp *op)
{
TCGLabelUse *u = tcg_malloc(sizeof(TCGLabelUse));
- u->op = tcg_last_op();
+ u->op = op;
QSIMPLEQ_INSERT_TAIL(&l->branches, u, next);
}
void tcg_gen_br(TCGLabel *l)
{
- tcg_gen_op1(INDEX_op_br, label_arg(l));
- add_last_as_label_use(l);
+ add_as_label_use(l, tcg_gen_op1(INDEX_op_br, 0, label_arg(l)));
}
void tcg_gen_mb(TCGBar mb_type)
@@ -308,31 +309,31 @@ void tcg_gen_mb(TCGBar mb_type)
#endif
if (parallel) {
- tcg_gen_op1(INDEX_op_mb, mb_type);
+ tcg_gen_op1(INDEX_op_mb, 0, mb_type);
}
}
void tcg_gen_plugin_cb(unsigned from)
{
- tcg_gen_op1(INDEX_op_plugin_cb, from);
+ tcg_gen_op1(INDEX_op_plugin_cb, 0, from);
}
void tcg_gen_plugin_mem_cb(TCGv_i64 addr, unsigned meminfo)
{
- tcg_gen_op2(INDEX_op_plugin_mem_cb, tcgv_i64_arg(addr), meminfo);
+ tcg_gen_op2(INDEX_op_plugin_mem_cb, 0, tcgv_i64_arg(addr), meminfo);
}
/* 32 bit ops */
void tcg_gen_discard_i32(TCGv_i32 arg)
{
- tcg_gen_op1_i32(INDEX_op_discard, arg);
+ tcg_gen_op1_i32(INDEX_op_discard, TCG_TYPE_I32, arg);
}
void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg)
{
if (ret != arg) {
- tcg_gen_op2_i32(INDEX_op_mov_i32, ret, arg);
+ tcg_gen_op2_i32(INDEX_op_mov, ret, arg);
}
}
@@ -343,7 +344,7 @@ void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg)
void tcg_gen_add_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- tcg_gen_op3_i32(INDEX_op_add_i32, ret, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_add, ret, arg1, arg2);
}
void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
@@ -358,7 +359,7 @@ void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
void tcg_gen_sub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- tcg_gen_op3_i32(INDEX_op_sub_i32, ret, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_sub, ret, arg1, arg2);
}
void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2)
@@ -377,12 +378,12 @@ void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 arg)
{
- tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg);
+ tcg_gen_op2_i32(INDEX_op_neg, ret, arg);
}
void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_and, ret, arg1, arg2);
}
void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
@@ -395,17 +396,19 @@ void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
case -1:
tcg_gen_mov_i32(ret, arg1);
return;
- case 0xff:
- /* Don't recurse with tcg_gen_ext8u_i32. */
- if (TCG_TARGET_HAS_ext8u_i32) {
- tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg1);
- return;
- }
- break;
- case 0xffff:
- if (TCG_TARGET_HAS_ext16u_i32) {
- tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg1);
- return;
+ default:
+ /*
+ * Canonicalize on extract, if valid. This aids x86 with its
+ * 2 operand MOVZBL and 2 operand AND, selecting the TCGOpcode
+ * which does not require matching operands. Other backends can
+ * trivially expand the extract to AND during code generation.
+ */
+ if (!(arg2 & (arg2 + 1))) {
+ unsigned len = ctz32(~arg2);
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, len)) {
+ tcg_gen_extract_i32(ret, arg1, 0, len);
+ return;
+ }
}
break;
}
@@ -415,7 +418,7 @@ void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
void tcg_gen_or_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- tcg_gen_op3_i32(INDEX_op_or_i32, ret, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_or, ret, arg1, arg2);
}
void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
@@ -432,7 +435,7 @@ void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
void tcg_gen_xor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- tcg_gen_op3_i32(INDEX_op_xor_i32, ret, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_xor, ret, arg1, arg2);
}
void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
@@ -440,9 +443,10 @@ void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
/* Some cases can be optimized here. */
if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1);
- } else if (arg2 == -1 && TCG_TARGET_HAS_not_i32) {
+ } else if (arg2 == -1 &&
+ tcg_op_supported(INDEX_op_not, TCG_TYPE_I32, 0)) {
/* Don't recurse with tcg_gen_not_i32. */
- tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1);
+ tcg_gen_op2_i32(INDEX_op_not, ret, arg1);
} else {
tcg_gen_xor_i32(ret, arg1, tcg_constant_i32(arg2));
}
@@ -450,8 +454,8 @@ void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg)
{
- if (TCG_TARGET_HAS_not_i32) {
- tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg);
+ if (tcg_op_supported(INDEX_op_not, TCG_TYPE_I32, 0)) {
+ tcg_gen_op2_i32(INDEX_op_not, ret, arg);
} else {
tcg_gen_xori_i32(ret, arg, -1);
}
@@ -459,7 +463,7 @@ void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg)
void tcg_gen_shl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- tcg_gen_op3_i32(INDEX_op_shl_i32, ret, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_shl, ret, arg1, arg2);
}
void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
@@ -474,7 +478,7 @@ void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
void tcg_gen_shr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- tcg_gen_op3_i32(INDEX_op_shr_i32, ret, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_shr, ret, arg1, arg2);
}
void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
@@ -489,7 +493,7 @@ void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
void tcg_gen_sar_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- tcg_gen_op3_i32(INDEX_op_sar_i32, ret, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_sar, ret, arg1, arg2);
}
void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
@@ -507,8 +511,9 @@ void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *l)
if (cond == TCG_COND_ALWAYS) {
tcg_gen_br(l);
} else if (cond != TCG_COND_NEVER) {
- tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label_arg(l));
- add_last_as_label_use(l);
+ TCGOp *op = tcg_gen_op4ii_i32(INDEX_op_brcond,
+ arg1, arg2, cond, label_arg(l));
+ add_as_label_use(l, op);
}
}
@@ -529,7 +534,7 @@ void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
} else if (cond == TCG_COND_NEVER) {
tcg_gen_movi_i32(ret, 0);
} else {
- tcg_gen_op4i_i32(INDEX_op_setcond_i32, ret, arg1, arg2, cond);
+ tcg_gen_op4i_i32(INDEX_op_setcond, ret, arg1, arg2, cond);
}
}
@@ -546,11 +551,8 @@ void tcg_gen_negsetcond_i32(TCGCond cond, TCGv_i32 ret,
tcg_gen_movi_i32(ret, -1);
} else if (cond == TCG_COND_NEVER) {
tcg_gen_movi_i32(ret, 0);
- } else if (TCG_TARGET_HAS_negsetcond_i32) {
- tcg_gen_op4i_i32(INDEX_op_negsetcond_i32, ret, arg1, arg2, cond);
} else {
- tcg_gen_setcond_i32(cond, ret, arg1, arg2);
- tcg_gen_neg_i32(ret, ret);
+ tcg_gen_op4i_i32(INDEX_op_negsetcond, ret, arg1, arg2, cond);
}
}
@@ -562,7 +564,7 @@ void tcg_gen_negsetcondi_i32(TCGCond cond, TCGv_i32 ret,
void tcg_gen_mul_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- tcg_gen_op3_i32(INDEX_op_mul_i32, ret, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_mul, ret, arg1, arg2);
}
void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
@@ -578,12 +580,12 @@ void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_div_i32) {
- tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div2_i32) {
+ if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_divs, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_divs2, TCG_TYPE_I32, 0)) {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_sari_i32(t0, arg1, 31);
- tcg_gen_op5_i32(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2);
+ tcg_gen_op5_i32(INDEX_op_divs2, ret, t0, arg1, t0, arg2);
tcg_temp_free_i32(t0);
} else {
gen_helper_div_i32(ret, arg1, arg2);
@@ -592,18 +594,18 @@ void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_rem_i32) {
- tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div_i32) {
+ if (tcg_op_supported(INDEX_op_rems, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_rems, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
- tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_divs, t0, arg1, arg2);
tcg_gen_mul_i32(t0, t0, arg2);
tcg_gen_sub_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
- } else if (TCG_TARGET_HAS_div2_i32) {
+ } else if (tcg_op_supported(INDEX_op_divs2, TCG_TYPE_I32, 0)) {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_sari_i32(t0, arg1, 31);
- tcg_gen_op5_i32(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2);
+ tcg_gen_op5_i32(INDEX_op_divs2, t0, ret, arg1, t0, arg2);
tcg_temp_free_i32(t0);
} else {
gen_helper_rem_i32(ret, arg1, arg2);
@@ -612,12 +614,12 @@ void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_div_i32) {
- tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div2_i32) {
+ if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_divu, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_divu2, TCG_TYPE_I32, 0)) {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
TCGv_i32 zero = tcg_constant_i32(0);
- tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, zero, arg2);
+ tcg_gen_op5_i32(INDEX_op_divu2, ret, t0, arg1, zero, arg2);
tcg_temp_free_i32(t0);
} else {
gen_helper_divu_i32(ret, arg1, arg2);
@@ -626,18 +628,18 @@ void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_rem_i32) {
- tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div_i32) {
+ if (tcg_op_supported(INDEX_op_remu, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_remu, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I32, 0)) {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
- tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_divu, t0, arg1, arg2);
tcg_gen_mul_i32(t0, t0, arg2);
tcg_gen_sub_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
- } else if (TCG_TARGET_HAS_div2_i32) {
+ } else if (tcg_op_supported(INDEX_op_divu2, TCG_TYPE_I32, 0)) {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
TCGv_i32 zero = tcg_constant_i32(0);
- tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, zero, arg2);
+ tcg_gen_op5_i32(INDEX_op_divu2, t0, ret, arg1, zero, arg2);
tcg_temp_free_i32(t0);
} else {
gen_helper_remu_i32(ret, arg1, arg2);
@@ -646,8 +648,8 @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_andc_i32) {
- tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_andc, ret, arg1, arg2);
} else {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_not_i32(t0, arg2);
@@ -658,8 +660,8 @@ void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_eqv_i32) {
- tcg_gen_op3_i32(INDEX_op_eqv_i32, ret, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_eqv, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_eqv, ret, arg1, arg2);
} else {
tcg_gen_xor_i32(ret, arg1, arg2);
tcg_gen_not_i32(ret, ret);
@@ -668,8 +670,8 @@ void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_nand_i32) {
- tcg_gen_op3_i32(INDEX_op_nand_i32, ret, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_nand, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_nand, ret, arg1, arg2);
} else {
tcg_gen_and_i32(ret, arg1, arg2);
tcg_gen_not_i32(ret, ret);
@@ -678,8 +680,8 @@ void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_nor_i32) {
- tcg_gen_op3_i32(INDEX_op_nor_i32, ret, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_nor, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_nor, ret, arg1, arg2);
} else {
tcg_gen_or_i32(ret, arg1, arg2);
tcg_gen_not_i32(ret, ret);
@@ -688,8 +690,8 @@ void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_orc_i32) {
- tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_orc, ret, arg1, arg2);
} else {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_not_i32(t0, arg2);
@@ -700,9 +702,9 @@ void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_clz_i32) {
- tcg_gen_op3_i32(INDEX_op_clz_i32, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_clz_i64) {
+ if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_clz, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I64, 0)) {
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
tcg_gen_extu_i32_i64(t1, arg1);
@@ -725,9 +727,13 @@ void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_ctz_i32) {
- tcg_gen_op3_i32(INDEX_op_ctz_i32, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_ctz_i64) {
+ TCGv_i32 z, t;
+
+ if (tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_ctz, ret, arg1, arg2);
+ return;
+ }
+ if (tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I64, 0)) {
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
tcg_gen_extu_i32_i64(t1, arg1);
@@ -736,34 +742,34 @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
tcg_gen_extrl_i64_i32(ret, t1);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
- } else if (TCG_TARGET_HAS_ctpop_i32
- || TCG_TARGET_HAS_ctpop_i64
- || TCG_TARGET_HAS_clz_i32
- || TCG_TARGET_HAS_clz_i64) {
- TCGv_i32 z, t = tcg_temp_ebb_new_i32();
-
- if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) {
- tcg_gen_subi_i32(t, arg1, 1);
- tcg_gen_andc_i32(t, t, arg1);
- tcg_gen_ctpop_i32(t, t);
- } else {
- /* Since all non-x86 hosts have clz(0) == 32, don't fight it. */
- tcg_gen_neg_i32(t, arg1);
- tcg_gen_and_i32(t, t, arg1);
- tcg_gen_clzi_i32(t, t, 32);
- tcg_gen_xori_i32(t, t, 31);
- }
- z = tcg_constant_i32(0);
- tcg_gen_movcond_i32(TCG_COND_EQ, ret, arg1, z, arg2, t);
- tcg_temp_free_i32(t);
+ return;
+ }
+ if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_REG, 0)) {
+ t = tcg_temp_ebb_new_i32();
+ tcg_gen_subi_i32(t, arg1, 1);
+ tcg_gen_andc_i32(t, t, arg1);
+ tcg_gen_ctpop_i32(t, t);
+ } else if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_REG, 0)) {
+ t = tcg_temp_ebb_new_i32();
+ tcg_gen_neg_i32(t, arg1);
+ tcg_gen_and_i32(t, t, arg1);
+ tcg_gen_clzi_i32(t, t, 32);
+ tcg_gen_xori_i32(t, t, 31);
} else {
gen_helper_ctz_i32(ret, arg1, arg2);
+ return;
}
+
+ z = tcg_constant_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_EQ, ret, arg1, z, arg2, t);
+ tcg_temp_free_i32(t);
}
void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
{
- if (!TCG_TARGET_HAS_ctz_i32 && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) {
+ if (arg2 == 32
+ && !tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I32, 0)
+ && tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_REG, 0)) {
/* This equivalence has the advantage of not requiring a fixup. */
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_subi_i32(t, arg1, 1);
@@ -777,7 +783,7 @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg)
{
- if (TCG_TARGET_HAS_clz_i32) {
+ if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_REG, 0)) {
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_sari_i32(t, arg, 31);
tcg_gen_xor_i32(t, t, arg);
@@ -791,9 +797,9 @@ void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg)
void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1)
{
- if (TCG_TARGET_HAS_ctpop_i32) {
- tcg_gen_op2_i32(INDEX_op_ctpop_i32, ret, arg1);
- } else if (TCG_TARGET_HAS_ctpop_i64) {
+ if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I32, 0)) {
+ tcg_gen_op2_i32(INDEX_op_ctpop, ret, arg1);
+ } else if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I64, 0)) {
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_extu_i32_i64(t, arg1);
tcg_gen_ctpop_i64(t, t);
@@ -806,15 +812,18 @@ void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1)
void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_rot_i32) {
- tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_rotl, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I32, 0)) {
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
+ tcg_gen_neg_i32(t0, arg2);
+ tcg_gen_op3_i32(INDEX_op_rotr, ret, arg1, t0);
+ tcg_temp_free_i32(t0);
} else {
- TCGv_i32 t0, t1;
-
- t0 = tcg_temp_ebb_new_i32();
- t1 = tcg_temp_ebb_new_i32();
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
tcg_gen_shl_i32(t0, arg1, arg2);
- tcg_gen_subfi_i32(t1, 32, arg2);
+ tcg_gen_neg_i32(t1, arg2);
tcg_gen_shr_i32(t1, arg1, t1);
tcg_gen_or_i32(ret, t0, t1);
tcg_temp_free_i32(t0);
@@ -828,12 +837,15 @@ void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
/* some cases can be optimized here */
if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1);
- } else if (TCG_TARGET_HAS_rot_i32) {
- tcg_gen_rotl_i32(ret, arg1, tcg_constant_i32(arg2));
+ } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I32, 0)) {
+ TCGv_i32 t0 = tcg_constant_i32(arg2);
+ tcg_gen_op3_i32(INDEX_op_rotl, ret, arg1, t0);
+ } else if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I32, 0)) {
+ TCGv_i32 t0 = tcg_constant_i32(32 - arg2);
+ tcg_gen_op3_i32(INDEX_op_rotr, ret, arg1, t0);
} else {
- TCGv_i32 t0, t1;
- t0 = tcg_temp_ebb_new_i32();
- t1 = tcg_temp_ebb_new_i32();
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
tcg_gen_shli_i32(t0, arg1, arg2);
tcg_gen_shri_i32(t1, arg1, 32 - arg2);
tcg_gen_or_i32(ret, t0, t1);
@@ -844,15 +856,18 @@ void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_rot_i32) {
- tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_rotr, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I32, 0)) {
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
+ tcg_gen_neg_i32(t0, arg2);
+ tcg_gen_op3_i32(INDEX_op_rotl, ret, arg1, t0);
+ tcg_temp_free_i32(t0);
} else {
- TCGv_i32 t0, t1;
-
- t0 = tcg_temp_ebb_new_i32();
- t1 = tcg_temp_ebb_new_i32();
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
tcg_gen_shr_i32(t0, arg1, arg2);
- tcg_gen_subfi_i32(t1, 32, arg2);
+ tcg_gen_neg_i32(t1, arg2);
tcg_gen_shl_i32(t1, arg1, t1);
tcg_gen_or_i32(ret, t0, t1);
tcg_temp_free_i32(t0);
@@ -863,12 +878,7 @@ void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
tcg_debug_assert(arg2 >= 0 && arg2 < 32);
- /* some cases can be optimized here */
- if (arg2 == 0) {
- tcg_gen_mov_i32(ret, arg1);
- } else {
- tcg_gen_rotli_i32(ret, arg1, 32 - arg2);
- }
+ tcg_gen_rotli_i32(ret, arg1, -arg2 & 31);
}
void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
@@ -886,14 +896,14 @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
tcg_gen_mov_i32(ret, arg2);
return;
}
- if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) {
- tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len);
+ if (TCG_TARGET_deposit_valid(TCG_TYPE_I32, ofs, len)) {
+ tcg_gen_op5ii_i32(INDEX_op_deposit, ret, arg1, arg2, ofs, len);
return;
}
t1 = tcg_temp_ebb_new_i32();
- if (TCG_TARGET_HAS_extract2_i32) {
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
if (ofs + len == 32) {
tcg_gen_shli_i32(t1, arg1, len);
tcg_gen_extract2_i32(ret, t1, arg2, len);
@@ -931,45 +941,24 @@ void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
tcg_gen_shli_i32(ret, arg, ofs);
} else if (ofs == 0) {
tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
- } else if (TCG_TARGET_HAS_deposit_i32
- && TCG_TARGET_deposit_i32_valid(ofs, len)) {
+ } else if (TCG_TARGET_deposit_valid(TCG_TYPE_I32, ofs, len)) {
TCGv_i32 zero = tcg_constant_i32(0);
- tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, zero, arg, ofs, len);
- } else {
- /* To help two-operand hosts we prefer to zero-extend first,
- which allows ARG to stay live. */
- switch (len) {
- case 16:
- if (TCG_TARGET_HAS_ext16u_i32) {
- tcg_gen_ext16u_i32(ret, arg);
- tcg_gen_shli_i32(ret, ret, ofs);
- return;
- }
- break;
- case 8:
- if (TCG_TARGET_HAS_ext8u_i32) {
- tcg_gen_ext8u_i32(ret, arg);
- tcg_gen_shli_i32(ret, ret, ofs);
- return;
- }
- break;
+ tcg_gen_op5ii_i32(INDEX_op_deposit, ret, zero, arg, ofs, len);
+ } else {
+ /*
+ * To help two-operand hosts we prefer to zero-extend first,
+ * which allows ARG to stay live.
+ */
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, len)) {
+ tcg_gen_extract_i32(ret, arg, 0, len);
+ tcg_gen_shli_i32(ret, ret, ofs);
+ return;
}
/* Otherwise prefer zero-extension over AND for code size. */
- switch (ofs + len) {
- case 16:
- if (TCG_TARGET_HAS_ext16u_i32) {
- tcg_gen_shli_i32(ret, arg, ofs);
- tcg_gen_ext16u_i32(ret, ret);
- return;
- }
- break;
- case 8:
- if (TCG_TARGET_HAS_ext8u_i32) {
- tcg_gen_shli_i32(ret, arg, ofs);
- tcg_gen_ext8u_i32(ret, ret);
- return;
- }
- break;
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, ofs + len)) {
+ tcg_gen_shli_i32(ret, arg, ofs);
+ tcg_gen_extract_i32(ret, ret, 0, ofs + len);
+ return;
}
tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
tcg_gen_shli_i32(ret, ret, ofs);
@@ -989,33 +978,21 @@ void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
tcg_gen_shri_i32(ret, arg, 32 - len);
return;
}
- if (ofs == 0) {
- tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
+
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I32, ofs, len)) {
+ tcg_gen_op4ii_i32(INDEX_op_extract, ret, arg, ofs, len);
return;
}
-
- if (TCG_TARGET_HAS_extract_i32
- && TCG_TARGET_extract_i32_valid(ofs, len)) {
- tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, ofs, len);
+ if (ofs == 0) {
+ tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
return;
}
/* Assume that zero-extension, if available, is cheaper than a shift. */
- switch (ofs + len) {
- case 16:
- if (TCG_TARGET_HAS_ext16u_i32) {
- tcg_gen_ext16u_i32(ret, arg);
- tcg_gen_shri_i32(ret, ret, ofs);
- return;
- }
- break;
- case 8:
- if (TCG_TARGET_HAS_ext8u_i32) {
- tcg_gen_ext8u_i32(ret, arg);
- tcg_gen_shri_i32(ret, ret, ofs);
- return;
- }
- break;
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, ofs + len)) {
+ tcg_gen_op4ii_i32(INDEX_op_extract, ret, arg, 0, ofs + len);
+ tcg_gen_shri_i32(ret, ret, ofs);
+ return;
}
/* ??? Ideally we'd know what values are available for immediate AND.
@@ -1046,55 +1023,22 @@ void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
tcg_gen_sari_i32(ret, arg, 32 - len);
return;
}
- if (ofs == 0) {
- switch (len) {
- case 16:
- tcg_gen_ext16s_i32(ret, arg);
- return;
- case 8:
- tcg_gen_ext8s_i32(ret, arg);
- return;
- }
- }
- if (TCG_TARGET_HAS_sextract_i32
- && TCG_TARGET_extract_i32_valid(ofs, len)) {
- tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, arg, ofs, len);
+ if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, ofs, len)) {
+ tcg_gen_op4ii_i32(INDEX_op_sextract, ret, arg, ofs, len);
return;
}
/* Assume that sign-extension, if available, is cheaper than a shift. */
- switch (ofs + len) {
- case 16:
- if (TCG_TARGET_HAS_ext16s_i32) {
- tcg_gen_ext16s_i32(ret, arg);
- tcg_gen_sari_i32(ret, ret, ofs);
- return;
- }
- break;
- case 8:
- if (TCG_TARGET_HAS_ext8s_i32) {
- tcg_gen_ext8s_i32(ret, arg);
- tcg_gen_sari_i32(ret, ret, ofs);
- return;
- }
- break;
+ if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, 0, ofs + len)) {
+ tcg_gen_op4ii_i32(INDEX_op_sextract, ret, arg, 0, ofs + len);
+ tcg_gen_sari_i32(ret, ret, ofs);
+ return;
}
- switch (len) {
- case 16:
- if (TCG_TARGET_HAS_ext16s_i32) {
- tcg_gen_shri_i32(ret, arg, ofs);
- tcg_gen_ext16s_i32(ret, ret);
- return;
- }
- break;
- case 8:
- if (TCG_TARGET_HAS_ext8s_i32) {
- tcg_gen_shri_i32(ret, arg, ofs);
- tcg_gen_ext8s_i32(ret, ret);
- return;
- }
- break;
+ if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, 0, len)) {
+ tcg_gen_shri_i32(ret, arg, ofs);
+ tcg_gen_op4ii_i32(INDEX_op_sextract, ret, ret, 0, len);
+ return;
}
tcg_gen_shli_i32(ret, arg, 32 - len - ofs);
@@ -1115,8 +1059,8 @@ void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
tcg_gen_mov_i32(ret, ah);
} else if (al == ah) {
tcg_gen_rotri_i32(ret, al, ofs);
- } else if (TCG_TARGET_HAS_extract2_i32) {
- tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs);
+ } else if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
+ tcg_gen_op4i_i32(INDEX_op_extract2, ret, al, ah, ofs);
} else {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_shri_i32(t0, al, ofs);
@@ -1133,52 +1077,89 @@ void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
} else if (cond == TCG_COND_NEVER) {
tcg_gen_mov_i32(ret, v2);
} else {
- tcg_gen_op6i_i32(INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond);
+ tcg_gen_op6i_i32(INDEX_op_movcond, ret, c1, c2, v1, v2, cond);
}
}
void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
{
- if (TCG_TARGET_HAS_add2_i32) {
- tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh);
+ if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_I32, 0)) {
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
+ tcg_gen_op3_i32(INDEX_op_addco, t0, al, bl);
+ tcg_gen_op3_i32(INDEX_op_addci, rh, ah, bh);
+ tcg_gen_mov_i32(rl, t0);
+ tcg_temp_free_i32(t0);
} else {
- TCGv_i64 t0 = tcg_temp_ebb_new_i64();
- TCGv_i64 t1 = tcg_temp_ebb_new_i64();
- tcg_gen_concat_i32_i64(t0, al, ah);
- tcg_gen_concat_i32_i64(t1, bl, bh);
- tcg_gen_add_i64(t0, t0, t1);
- tcg_gen_extr_i64_i32(rl, rh, t0);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
+ tcg_gen_add_i32(t0, al, bl);
+ tcg_gen_setcond_i32(TCG_COND_LTU, t1, t0, al);
+ tcg_gen_add_i32(rh, ah, bh);
+ tcg_gen_add_i32(rh, rh, t1);
+ tcg_gen_mov_i32(rl, t0);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ }
+}
+
+void tcg_gen_addcio_i32(TCGv_i32 r, TCGv_i32 co,
+ TCGv_i32 a, TCGv_i32 b, TCGv_i32 ci)
+{
+ if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_I32, 0)) {
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
+ TCGv_i32 zero = tcg_constant_i32(0);
+ TCGv_i32 mone = tcg_constant_i32(-1);
+
+ tcg_gen_op3_i32(INDEX_op_addco, t0, ci, mone);
+ tcg_gen_op3_i32(INDEX_op_addcio, r, a, b);
+ tcg_gen_op3_i32(INDEX_op_addci, co, zero, zero);
+ tcg_temp_free_i32(t0);
+ } else {
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
+
+ tcg_gen_add_i32(t0, a, b);
+ tcg_gen_setcond_i32(TCG_COND_LTU, t1, t0, a);
+ tcg_gen_add_i32(r, t0, ci);
+ tcg_gen_setcond_i32(TCG_COND_LTU, t0, r, t0);
+ tcg_gen_or_i32(co, t0, t1);
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
}
}
void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
{
- if (TCG_TARGET_HAS_sub2_i32) {
- tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh);
+ if (tcg_op_supported(INDEX_op_subbi, TCG_TYPE_I32, 0)) {
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
+ tcg_gen_op3_i32(INDEX_op_subbo, t0, al, bl);
+ tcg_gen_op3_i32(INDEX_op_subbi, rh, ah, bh);
+ tcg_gen_mov_i32(rl, t0);
+ tcg_temp_free_i32(t0);
} else {
- TCGv_i64 t0 = tcg_temp_ebb_new_i64();
- TCGv_i64 t1 = tcg_temp_ebb_new_i64();
- tcg_gen_concat_i32_i64(t0, al, ah);
- tcg_gen_concat_i32_i64(t1, bl, bh);
- tcg_gen_sub_i64(t0, t0, t1);
- tcg_gen_extr_i64_i32(rl, rh, t0);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
+ tcg_gen_sub_i32(t0, al, bl);
+ tcg_gen_setcond_i32(TCG_COND_LTU, t1, al, bl);
+ tcg_gen_sub_i32(rh, ah, bh);
+ tcg_gen_sub_i32(rh, rh, t1);
+ tcg_gen_mov_i32(rl, t0);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
}
}
void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_mulu2_i32) {
- tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
- } else if (TCG_TARGET_HAS_muluh_i32) {
+ if (tcg_op_supported(INDEX_op_mulu2, TCG_TYPE_I32, 0)) {
+ tcg_gen_op4_i32(INDEX_op_mulu2, rl, rh, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I32, 0)) {
TCGv_i32 t = tcg_temp_ebb_new_i32();
- tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
- tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_muluh, rh, arg1, arg2);
tcg_gen_mov_i32(rl, t);
tcg_temp_free_i32(t);
} else if (TCG_TARGET_REG_BITS == 64) {
@@ -1191,18 +1172,18 @@ void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
} else {
- qemu_build_not_reached();
+ g_assert_not_reached();
}
}
void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCG_TARGET_HAS_muls2_i32) {
- tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2);
- } else if (TCG_TARGET_HAS_mulsh_i32) {
+ if (tcg_op_supported(INDEX_op_muls2, TCG_TYPE_I32, 0)) {
+ tcg_gen_op4_i32(INDEX_op_muls2, rl, rh, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_mulsh, TCG_TYPE_I32, 0)) {
TCGv_i32 t = tcg_temp_ebb_new_i32();
- tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
- tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_mulsh, rh, arg1, arg2);
tcg_gen_mov_i32(rl, t);
tcg_temp_free_i32(t);
} else if (TCG_TARGET_REG_BITS == 32) {
@@ -1264,40 +1245,22 @@ void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg)
{
- if (TCG_TARGET_HAS_ext8s_i32) {
- tcg_gen_op2_i32(INDEX_op_ext8s_i32, ret, arg);
- } else {
- tcg_gen_shli_i32(ret, arg, 24);
- tcg_gen_sari_i32(ret, ret, 24);
- }
+ tcg_gen_sextract_i32(ret, arg, 0, 8);
}
void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg)
{
- if (TCG_TARGET_HAS_ext16s_i32) {
- tcg_gen_op2_i32(INDEX_op_ext16s_i32, ret, arg);
- } else {
- tcg_gen_shli_i32(ret, arg, 16);
- tcg_gen_sari_i32(ret, ret, 16);
- }
+ tcg_gen_sextract_i32(ret, arg, 0, 16);
}
void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg)
{
- if (TCG_TARGET_HAS_ext8u_i32) {
- tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg);
- } else {
- tcg_gen_andi_i32(ret, arg, 0xffu);
- }
+ tcg_gen_extract_i32(ret, arg, 0, 8);
}
void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg)
{
- if (TCG_TARGET_HAS_ext16u_i32) {
- tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg);
- } else {
- tcg_gen_andi_i32(ret, arg, 0xffffu);
- }
+ tcg_gen_extract_i32(ret, arg, 0, 16);
}
/*
@@ -1313,8 +1276,8 @@ void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags)
/* Only one extension flag may be present. */
tcg_debug_assert(!(flags & TCG_BSWAP_OS) || !(flags & TCG_BSWAP_OZ));
- if (TCG_TARGET_HAS_bswap16_i32) {
- tcg_gen_op3i_i32(INDEX_op_bswap16_i32, ret, arg, flags);
+ if (tcg_op_supported(INDEX_op_bswap16, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3i_i32(INDEX_op_bswap16, ret, arg, flags);
} else {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
@@ -1350,8 +1313,8 @@ void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags)
*/
void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
{
- if (TCG_TARGET_HAS_bswap32_i32) {
- tcg_gen_op3i_i32(INDEX_op_bswap32_i32, ret, arg, 0);
+ if (tcg_op_supported(INDEX_op_bswap32, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3i_i32(INDEX_op_bswap32, ret, arg, 0);
} else {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
@@ -1416,42 +1379,42 @@ void tcg_gen_abs_i32(TCGv_i32 ret, TCGv_i32 a)
void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
{
- tcg_gen_ldst_op_i32(INDEX_op_ld8u_i32, ret, arg2, offset);
+ tcg_gen_ldst_op_i32(INDEX_op_ld8u, ret, arg2, offset);
}
void tcg_gen_ld8s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
{
- tcg_gen_ldst_op_i32(INDEX_op_ld8s_i32, ret, arg2, offset);
+ tcg_gen_ldst_op_i32(INDEX_op_ld8s, ret, arg2, offset);
}
void tcg_gen_ld16u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
{
- tcg_gen_ldst_op_i32(INDEX_op_ld16u_i32, ret, arg2, offset);
+ tcg_gen_ldst_op_i32(INDEX_op_ld16u, ret, arg2, offset);
}
void tcg_gen_ld16s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
{
- tcg_gen_ldst_op_i32(INDEX_op_ld16s_i32, ret, arg2, offset);
+ tcg_gen_ldst_op_i32(INDEX_op_ld16s, ret, arg2, offset);
}
void tcg_gen_ld_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
{
- tcg_gen_ldst_op_i32(INDEX_op_ld_i32, ret, arg2, offset);
+ tcg_gen_ldst_op_i32(INDEX_op_ld, ret, arg2, offset);
}
void tcg_gen_st8_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset)
{
- tcg_gen_ldst_op_i32(INDEX_op_st8_i32, arg1, arg2, offset);
+ tcg_gen_ldst_op_i32(INDEX_op_st8, arg1, arg2, offset);
}
void tcg_gen_st16_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset)
{
- tcg_gen_ldst_op_i32(INDEX_op_st16_i32, arg1, arg2, offset);
+ tcg_gen_ldst_op_i32(INDEX_op_st16, arg1, arg2, offset);
}
void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset)
{
- tcg_gen_ldst_op_i32(INDEX_op_st_i32, arg1, arg2, offset);
+ tcg_gen_ldst_op_i32(INDEX_op_st, arg1, arg2, offset);
}
@@ -1460,7 +1423,7 @@ void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset)
void tcg_gen_discard_i64(TCGv_i64 arg)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op1_i64(INDEX_op_discard, arg);
+ tcg_gen_op1_i64(INDEX_op_discard, TCG_TYPE_I64, arg);
} else {
tcg_gen_discard_i32(TCGV_LOW(arg));
tcg_gen_discard_i32(TCGV_HIGH(arg));
@@ -1473,7 +1436,7 @@ void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
return;
}
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op2_i64(INDEX_op_mov_i64, ret, arg);
+ tcg_gen_op2_i64(INDEX_op_mov, ret, arg);
} else {
TCGTemp *ts = tcgv_i64_temp(arg);
@@ -1500,7 +1463,7 @@ void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg)
void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_ldst_op_i64(INDEX_op_ld8u_i64, ret, arg2, offset);
+ tcg_gen_ldst_op_i64(INDEX_op_ld8u, ret, arg2, offset);
} else {
tcg_gen_ld8u_i32(TCGV_LOW(ret), arg2, offset);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
@@ -1510,7 +1473,7 @@ void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_ldst_op_i64(INDEX_op_ld8s_i64, ret, arg2, offset);
+ tcg_gen_ldst_op_i64(INDEX_op_ld8s, ret, arg2, offset);
} else {
tcg_gen_ld8s_i32(TCGV_LOW(ret), arg2, offset);
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
@@ -1520,7 +1483,7 @@ void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_ldst_op_i64(INDEX_op_ld16u_i64, ret, arg2, offset);
+ tcg_gen_ldst_op_i64(INDEX_op_ld16u, ret, arg2, offset);
} else {
tcg_gen_ld16u_i32(TCGV_LOW(ret), arg2, offset);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
@@ -1530,7 +1493,7 @@ void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_ldst_op_i64(INDEX_op_ld16s_i64, ret, arg2, offset);
+ tcg_gen_ldst_op_i64(INDEX_op_ld16s, ret, arg2, offset);
} else {
tcg_gen_ld16s_i32(TCGV_LOW(ret), arg2, offset);
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
@@ -1540,7 +1503,7 @@ void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_ldst_op_i64(INDEX_op_ld32u_i64, ret, arg2, offset);
+ tcg_gen_ldst_op_i64(INDEX_op_ld32u, ret, arg2, offset);
} else {
tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
@@ -1550,7 +1513,7 @@ void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_ldst_op_i64(INDEX_op_ld32s_i64, ret, arg2, offset);
+ tcg_gen_ldst_op_i64(INDEX_op_ld32s, ret, arg2, offset);
} else {
tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
@@ -1564,7 +1527,7 @@ void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
* they cannot be the same temporary -- no chance of overlap.
*/
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_ldst_op_i64(INDEX_op_ld_i64, ret, arg2, offset);
+ tcg_gen_ldst_op_i64(INDEX_op_ld, ret, arg2, offset);
} else if (HOST_BIG_ENDIAN) {
tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset);
tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4);
@@ -1577,7 +1540,7 @@ void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_ldst_op_i64(INDEX_op_st8_i64, arg1, arg2, offset);
+ tcg_gen_ldst_op_i64(INDEX_op_st8, arg1, arg2, offset);
} else {
tcg_gen_st8_i32(TCGV_LOW(arg1), arg2, offset);
}
@@ -1586,7 +1549,7 @@ void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_ldst_op_i64(INDEX_op_st16_i64, arg1, arg2, offset);
+ tcg_gen_ldst_op_i64(INDEX_op_st16, arg1, arg2, offset);
} else {
tcg_gen_st16_i32(TCGV_LOW(arg1), arg2, offset);
}
@@ -1595,7 +1558,7 @@ void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_ldst_op_i64(INDEX_op_st32_i64, arg1, arg2, offset);
+ tcg_gen_ldst_op_i64(INDEX_op_st32, arg1, arg2, offset);
} else {
tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset);
}
@@ -1604,7 +1567,7 @@ void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_ldst_op_i64(INDEX_op_st_i64, arg1, arg2, offset);
+ tcg_gen_ldst_op_i64(INDEX_op_st, arg1, arg2, offset);
} else if (HOST_BIG_ENDIAN) {
tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset);
tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset + 4);
@@ -1617,7 +1580,7 @@ void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op3_i64(INDEX_op_add_i64, ret, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_add, ret, arg1, arg2);
} else {
tcg_gen_add2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1),
TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2));
@@ -1627,7 +1590,7 @@ void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op3_i64(INDEX_op_sub_i64, ret, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_sub, ret, arg1, arg2);
} else {
tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1),
TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2));
@@ -1637,7 +1600,7 @@ void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op3_i64(INDEX_op_and_i64, ret, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_and, ret, arg1, arg2);
} else {
tcg_gen_and_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
tcg_gen_and_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
@@ -1647,7 +1610,7 @@ void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op3_i64(INDEX_op_or_i64, ret, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_or, ret, arg1, arg2);
} else {
tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
@@ -1657,7 +1620,7 @@ void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op3_i64(INDEX_op_xor_i64, ret, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_xor, ret, arg1, arg2);
} else {
tcg_gen_xor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
tcg_gen_xor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
@@ -1667,7 +1630,7 @@ void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op3_i64(INDEX_op_shl_i64, ret, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_shl, ret, arg1, arg2);
} else {
gen_helper_shl_i64(ret, arg1, arg2);
}
@@ -1676,7 +1639,7 @@ void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op3_i64(INDEX_op_shr_i64, ret, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_shr, ret, arg1, arg2);
} else {
gen_helper_shr_i64(ret, arg1, arg2);
}
@@ -1685,7 +1648,7 @@ void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op3_i64(INDEX_op_sar_i64, ret, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_sar, ret, arg1, arg2);
} else {
gen_helper_sar_i64(ret, arg1, arg2);
}
@@ -1697,7 +1660,7 @@ void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
TCGv_i32 t1;
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op3_i64(INDEX_op_mul_i64, ret, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_mul, ret, arg1, arg2);
return;
}
@@ -1753,7 +1716,7 @@ void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
void tcg_gen_neg_i64(TCGv_i64 ret, TCGv_i64 arg)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg);
+ tcg_gen_op2_i64(INDEX_op_neg, ret, arg);
} else {
TCGv_i32 zero = tcg_constant_i32(0);
tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret),
@@ -1777,23 +1740,19 @@ void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
case -1:
tcg_gen_mov_i64(ret, arg1);
return;
- case 0xff:
- /* Don't recurse with tcg_gen_ext8u_i64. */
- if (TCG_TARGET_HAS_ext8u_i64) {
- tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg1);
- return;
- }
- break;
- case 0xffff:
- if (TCG_TARGET_HAS_ext16u_i64) {
- tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg1);
- return;
- }
- break;
- case 0xffffffffu:
- if (TCG_TARGET_HAS_ext32u_i64) {
- tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg1);
- return;
+ default:
+ /*
+ * Canonicalize on extract, if valid. This aids x86 with its
+ * 2 operand MOVZBL and 2 operand AND, selecting the TCGOpcode
+ * which does not require matching operands. Other backends can
+ * trivially expand the extract to AND during code generation.
+ */
+ if (!(arg2 & (arg2 + 1))) {
+ unsigned len = ctz64(~arg2);
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, len)) {
+ tcg_gen_extract_i64(ret, arg1, 0, len);
+ return;
+ }
}
break;
}
@@ -1828,9 +1787,10 @@ void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
/* Some cases can be optimized here. */
if (arg2 == 0) {
tcg_gen_mov_i64(ret, arg1);
- } else if (arg2 == -1 && TCG_TARGET_HAS_not_i64) {
+ } else if (arg2 == -1 &&
+ tcg_op_supported(INDEX_op_not, TCG_TYPE_I64, 0)) {
/* Don't recurse with tcg_gen_not_i64. */
- tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1);
+ tcg_gen_op2_i64(INDEX_op_not, ret, arg1);
} else {
tcg_gen_xor_i64(ret, arg1, tcg_constant_i64(arg2));
}
@@ -1858,7 +1818,7 @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
tcg_gen_movi_i32(TCGV_LOW(ret), 0);
}
} else if (right) {
- if (TCG_TARGET_HAS_extract2_i32) {
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
tcg_gen_extract2_i32(TCGV_LOW(ret),
TCGV_LOW(arg1), TCGV_HIGH(arg1), c);
} else {
@@ -1872,7 +1832,7 @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
tcg_gen_shri_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
}
} else {
- if (TCG_TARGET_HAS_extract2_i32) {
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
tcg_gen_extract2_i32(TCGV_HIGH(ret),
TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c);
} else {
@@ -1927,15 +1887,16 @@ void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l)
if (cond == TCG_COND_ALWAYS) {
tcg_gen_br(l);
} else if (cond != TCG_COND_NEVER) {
+ TCGOp *op;
if (TCG_TARGET_REG_BITS == 32) {
- tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1),
- TCGV_HIGH(arg1), TCGV_LOW(arg2),
- TCGV_HIGH(arg2), cond, label_arg(l));
+ op = tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1),
+ TCGV_HIGH(arg1), TCGV_LOW(arg2),
+ TCGV_HIGH(arg2), cond, label_arg(l));
} else {
- tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond,
- label_arg(l));
+ op = tcg_gen_op4ii_i64(INDEX_op_brcond, arg1, arg2, cond,
+ label_arg(l));
}
- add_last_as_label_use(l);
+ add_as_label_use(l, op);
}
}
@@ -1946,12 +1907,12 @@ void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *l)
} else if (cond == TCG_COND_ALWAYS) {
tcg_gen_br(l);
} else if (cond != TCG_COND_NEVER) {
- tcg_gen_op6ii_i32(INDEX_op_brcond2_i32,
- TCGV_LOW(arg1), TCGV_HIGH(arg1),
- tcg_constant_i32(arg2),
- tcg_constant_i32(arg2 >> 32),
- cond, label_arg(l));
- add_last_as_label_use(l);
+ TCGOp *op = tcg_gen_op6ii_i32(INDEX_op_brcond2_i32,
+ TCGV_LOW(arg1), TCGV_HIGH(arg1),
+ tcg_constant_i32(arg2),
+ tcg_constant_i32(arg2 >> 32),
+ cond, label_arg(l));
+ add_as_label_use(l, op);
}
}
@@ -1969,7 +1930,7 @@ void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
TCGV_LOW(arg2), TCGV_HIGH(arg2), cond);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
} else {
- tcg_gen_op4i_i64(INDEX_op_setcond_i64, ret, arg1, arg2, cond);
+ tcg_gen_op4i_i64(INDEX_op_setcond, ret, arg1, arg2, cond);
}
}
}
@@ -2005,17 +1966,14 @@ void tcg_gen_negsetcond_i64(TCGCond cond, TCGv_i64 ret,
tcg_gen_movi_i64(ret, -1);
} else if (cond == TCG_COND_NEVER) {
tcg_gen_movi_i64(ret, 0);
- } else if (TCG_TARGET_HAS_negsetcond_i64) {
- tcg_gen_op4i_i64(INDEX_op_negsetcond_i64, ret, arg1, arg2, cond);
- } else if (TCG_TARGET_REG_BITS == 32) {
+ } else if (TCG_TARGET_REG_BITS == 64) {
+ tcg_gen_op4i_i64(INDEX_op_negsetcond, ret, arg1, arg2, cond);
+ } else {
tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret),
TCGV_LOW(arg1), TCGV_HIGH(arg1),
TCGV_LOW(arg2), TCGV_HIGH(arg2), cond);
tcg_gen_neg_i32(TCGV_LOW(ret), TCGV_LOW(ret));
tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_LOW(ret));
- } else {
- tcg_gen_setcond_i64(cond, ret, arg1, arg2);
- tcg_gen_neg_i64(ret, ret);
}
}
@@ -2032,12 +1990,12 @@ void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCG_TARGET_HAS_div_i64) {
- tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div2_i64) {
+ if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_divs, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_divs2, TCG_TYPE_I64, 0)) {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_sari_i64(t0, arg1, 63);
- tcg_gen_op5_i64(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2);
+ tcg_gen_op5_i64(INDEX_op_divs2, ret, t0, arg1, t0, arg2);
tcg_temp_free_i64(t0);
} else {
gen_helper_div_i64(ret, arg1, arg2);
@@ -2046,18 +2004,18 @@ void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCG_TARGET_HAS_rem_i64) {
- tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div_i64) {
+ if (tcg_op_supported(INDEX_op_rems, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_rems, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
- tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_divs, t0, arg1, arg2);
tcg_gen_mul_i64(t0, t0, arg2);
tcg_gen_sub_i64(ret, arg1, t0);
tcg_temp_free_i64(t0);
- } else if (TCG_TARGET_HAS_div2_i64) {
+ } else if (tcg_op_supported(INDEX_op_divs2, TCG_TYPE_I64, 0)) {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_sari_i64(t0, arg1, 63);
- tcg_gen_op5_i64(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2);
+ tcg_gen_op5_i64(INDEX_op_divs2, t0, ret, arg1, t0, arg2);
tcg_temp_free_i64(t0);
} else {
gen_helper_rem_i64(ret, arg1, arg2);
@@ -2066,12 +2024,12 @@ void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCG_TARGET_HAS_div_i64) {
- tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div2_i64) {
+ if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_divu, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_divu2, TCG_TYPE_I64, 0)) {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 zero = tcg_constant_i64(0);
- tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, zero, arg2);
+ tcg_gen_op5_i64(INDEX_op_divu2, ret, t0, arg1, zero, arg2);
tcg_temp_free_i64(t0);
} else {
gen_helper_divu_i64(ret, arg1, arg2);
@@ -2080,18 +2038,18 @@ void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCG_TARGET_HAS_rem_i64) {
- tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div_i64) {
+ if (tcg_op_supported(INDEX_op_remu, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_remu, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I64, 0)) {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
- tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_divu, t0, arg1, arg2);
tcg_gen_mul_i64(t0, t0, arg2);
tcg_gen_sub_i64(ret, arg1, t0);
tcg_temp_free_i64(t0);
- } else if (TCG_TARGET_HAS_div2_i64) {
+ } else if (tcg_op_supported(INDEX_op_divu2, TCG_TYPE_I64, 0)) {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 zero = tcg_constant_i64(0);
- tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, zero, arg2);
+ tcg_gen_op5_i64(INDEX_op_divu2, t0, ret, arg1, zero, arg2);
tcg_temp_free_i64(t0);
} else {
gen_helper_remu_i64(ret, arg1, arg2);
@@ -2100,77 +2058,32 @@ void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg)
{
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_gen_ext8s_i32(TCGV_LOW(ret), TCGV_LOW(arg));
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
- } else if (TCG_TARGET_HAS_ext8s_i64) {
- tcg_gen_op2_i64(INDEX_op_ext8s_i64, ret, arg);
- } else {
- tcg_gen_shli_i64(ret, arg, 56);
- tcg_gen_sari_i64(ret, ret, 56);
- }
+ tcg_gen_sextract_i64(ret, arg, 0, 8);
}
void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg)
{
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_gen_ext16s_i32(TCGV_LOW(ret), TCGV_LOW(arg));
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
- } else if (TCG_TARGET_HAS_ext16s_i64) {
- tcg_gen_op2_i64(INDEX_op_ext16s_i64, ret, arg);
- } else {
- tcg_gen_shli_i64(ret, arg, 48);
- tcg_gen_sari_i64(ret, ret, 48);
- }
+ tcg_gen_sextract_i64(ret, arg, 0, 16);
}
void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg)
{
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
- } else if (TCG_TARGET_HAS_ext32s_i64) {
- tcg_gen_op2_i64(INDEX_op_ext32s_i64, ret, arg);
- } else {
- tcg_gen_shli_i64(ret, arg, 32);
- tcg_gen_sari_i64(ret, ret, 32);
- }
+ tcg_gen_sextract_i64(ret, arg, 0, 32);
}
void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg)
{
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_gen_ext8u_i32(TCGV_LOW(ret), TCGV_LOW(arg));
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
- } else if (TCG_TARGET_HAS_ext8u_i64) {
- tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg);
- } else {
- tcg_gen_andi_i64(ret, arg, 0xffu);
- }
+ tcg_gen_extract_i64(ret, arg, 0, 8);
}
void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg)
{
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_gen_ext16u_i32(TCGV_LOW(ret), TCGV_LOW(arg));
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
- } else if (TCG_TARGET_HAS_ext16u_i64) {
- tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg);
- } else {
- tcg_gen_andi_i64(ret, arg, 0xffffu);
- }
+ tcg_gen_extract_i64(ret, arg, 0, 16);
}
void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg)
{
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
- } else if (TCG_TARGET_HAS_ext32u_i64) {
- tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg);
- } else {
- tcg_gen_andi_i64(ret, arg, 0xffffffffu);
- }
+ tcg_gen_extract_i64(ret, arg, 0, 32);
}
/*
@@ -2193,8 +2106,8 @@ void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
} else {
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
}
- } else if (TCG_TARGET_HAS_bswap16_i64) {
- tcg_gen_op3i_i64(INDEX_op_bswap16_i64, ret, arg, flags);
+ } else if (tcg_op_supported(INDEX_op_bswap16, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3i_i64(INDEX_op_bswap16, ret, arg, flags);
} else {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
@@ -2243,8 +2156,8 @@ void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
} else {
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
}
- } else if (TCG_TARGET_HAS_bswap32_i64) {
- tcg_gen_op3i_i64(INDEX_op_bswap32_i64, ret, arg, flags);
+ } else if (tcg_op_supported(INDEX_op_bswap32, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3i_i64(INDEX_op_bswap32, ret, arg, flags);
} else {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
@@ -2290,8 +2203,8 @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
tcg_gen_mov_i32(TCGV_HIGH(ret), t0);
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
- } else if (TCG_TARGET_HAS_bswap64_i64) {
- tcg_gen_op3i_i64(INDEX_op_bswap64_i64, ret, arg, 0);
+ } else if (tcg_op_supported(INDEX_op_bswap64, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3i_i64(INDEX_op_bswap64, ret, arg, 0);
} else {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
@@ -2362,8 +2275,8 @@ void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg)
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_not_i32(TCGV_LOW(ret), TCGV_LOW(arg));
tcg_gen_not_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
- } else if (TCG_TARGET_HAS_not_i64) {
- tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg);
+ } else if (tcg_op_supported(INDEX_op_not, TCG_TYPE_I64, 0)) {
+ tcg_gen_op2_i64(INDEX_op_not, ret, arg);
} else {
tcg_gen_xori_i64(ret, arg, -1);
}
@@ -2374,8 +2287,8 @@ void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
- } else if (TCG_TARGET_HAS_andc_i64) {
- tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_andc, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_andc, ret, arg1, arg2);
} else {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_not_i64(t0, arg2);
@@ -2389,8 +2302,8 @@ void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_eqv_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
tcg_gen_eqv_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
- } else if (TCG_TARGET_HAS_eqv_i64) {
- tcg_gen_op3_i64(INDEX_op_eqv_i64, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_eqv, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_eqv, ret, arg1, arg2);
} else {
tcg_gen_xor_i64(ret, arg1, arg2);
tcg_gen_not_i64(ret, ret);
@@ -2402,8 +2315,8 @@ void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_nand_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
tcg_gen_nand_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
- } else if (TCG_TARGET_HAS_nand_i64) {
- tcg_gen_op3_i64(INDEX_op_nand_i64, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_nand, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_nand, ret, arg1, arg2);
} else {
tcg_gen_and_i64(ret, arg1, arg2);
tcg_gen_not_i64(ret, ret);
@@ -2415,8 +2328,8 @@ void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_nor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
tcg_gen_nor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
- } else if (TCG_TARGET_HAS_nor_i64) {
- tcg_gen_op3_i64(INDEX_op_nor_i64, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_nor, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_nor, ret, arg1, arg2);
} else {
tcg_gen_or_i64(ret, arg1, arg2);
tcg_gen_not_i64(ret, ret);
@@ -2428,8 +2341,8 @@ void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_orc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
tcg_gen_orc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
- } else if (TCG_TARGET_HAS_orc_i64) {
- tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_orc, ret, arg1, arg2);
} else {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_not_i64(t0, arg2);
@@ -2440,8 +2353,8 @@ void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCG_TARGET_HAS_clz_i64) {
- tcg_gen_op3_i64(INDEX_op_clz_i64, ret, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_clz, ret, arg1, arg2);
} else {
gen_helper_clz_i64(ret, arg1, arg2);
}
@@ -2450,8 +2363,8 @@ void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
{
if (TCG_TARGET_REG_BITS == 32
- && TCG_TARGET_HAS_clz_i32
- && arg2 <= 0xffffffffu) {
+ && arg2 <= 0xffffffffu
+ && tcg_op_supported(INDEX_op_clz, TCG_TYPE_I32, 0)) {
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_clzi_i32(t, TCGV_LOW(arg1), arg2 - 32);
tcg_gen_addi_i32(t, t, 32);
@@ -2465,45 +2378,47 @@ void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCG_TARGET_HAS_ctz_i64) {
- tcg_gen_op3_i64(INDEX_op_ctz_i64, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i64) {
- TCGv_i64 z, t = tcg_temp_ebb_new_i64();
+ TCGv_i64 z, t;
- if (TCG_TARGET_HAS_ctpop_i64) {
- tcg_gen_subi_i64(t, arg1, 1);
- tcg_gen_andc_i64(t, t, arg1);
- tcg_gen_ctpop_i64(t, t);
- } else {
- /* Since all non-x86 hosts have clz(0) == 64, don't fight it. */
- tcg_gen_neg_i64(t, arg1);
- tcg_gen_and_i64(t, t, arg1);
- tcg_gen_clzi_i64(t, t, 64);
- tcg_gen_xori_i64(t, t, 63);
- }
- z = tcg_constant_i64(0);
- tcg_gen_movcond_i64(TCG_COND_EQ, ret, arg1, z, arg2, t);
- tcg_temp_free_i64(t);
- tcg_temp_free_i64(z);
+ if (tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_ctz, ret, arg1, arg2);
+ return;
+ }
+ if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I64, 0)) {
+ t = tcg_temp_ebb_new_i64();
+ tcg_gen_subi_i64(t, arg1, 1);
+ tcg_gen_andc_i64(t, t, arg1);
+ tcg_gen_ctpop_i64(t, t);
+ } else if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I64, 0)) {
+ t = tcg_temp_ebb_new_i64();
+ tcg_gen_neg_i64(t, arg1);
+ tcg_gen_and_i64(t, t, arg1);
+ tcg_gen_clzi_i64(t, t, 64);
+ tcg_gen_xori_i64(t, t, 63);
} else {
gen_helper_ctz_i64(ret, arg1, arg2);
+ return;
}
+
+ z = tcg_constant_i64(0);
+ tcg_gen_movcond_i64(TCG_COND_EQ, ret, arg1, z, arg2, t);
+ tcg_temp_free_i64(t);
}
void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
{
if (TCG_TARGET_REG_BITS == 32
- && TCG_TARGET_HAS_ctz_i32
- && arg2 <= 0xffffffffu) {
+ && arg2 <= 0xffffffffu
+ && tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I32, 0)) {
TCGv_i32 t32 = tcg_temp_ebb_new_i32();
tcg_gen_ctzi_i32(t32, TCGV_HIGH(arg1), arg2 - 32);
tcg_gen_addi_i32(t32, t32, 32);
tcg_gen_ctz_i32(TCGV_LOW(ret), TCGV_LOW(arg1), t32);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
tcg_temp_free_i32(t32);
- } else if (!TCG_TARGET_HAS_ctz_i64
- && TCG_TARGET_HAS_ctpop_i64
- && arg2 == 64) {
+ } else if (arg2 == 64
+ && !tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I64, 0)
+ && tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I64, 0)) {
/* This equivalence has the advantage of not requiring a fixup. */
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_subi_i64(t, arg1, 1);
@@ -2517,7 +2432,7 @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg)
{
- if (TCG_TARGET_HAS_clz_i64 || TCG_TARGET_HAS_clz_i32) {
+ if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I64, 0)) {
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_sari_i64(t, arg, 63);
tcg_gen_xor_i64(t, t, arg);
@@ -2531,28 +2446,37 @@ void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg)
void tcg_gen_ctpop_i64(TCGv_i64 ret, TCGv_i64 arg1)
{
- if (TCG_TARGET_HAS_ctpop_i64) {
- tcg_gen_op2_i64(INDEX_op_ctpop_i64, ret, arg1);
- } else if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctpop_i32) {
- tcg_gen_ctpop_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
- tcg_gen_ctpop_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
- tcg_gen_add_i32(TCGV_LOW(ret), TCGV_LOW(ret), TCGV_HIGH(ret));
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ if (TCG_TARGET_REG_BITS == 64) {
+ if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I64, 0)) {
+ tcg_gen_op2_i64(INDEX_op_ctpop, ret, arg1);
+ return;
+ }
} else {
- gen_helper_ctpop_i64(ret, arg1);
+ if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I32, 0)) {
+ tcg_gen_ctpop_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
+ tcg_gen_ctpop_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
+ tcg_gen_add_i32(TCGV_LOW(ret), TCGV_LOW(ret), TCGV_HIGH(ret));
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ return;
+ }
}
+ gen_helper_ctpop_i64(ret, arg1);
}
void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCG_TARGET_HAS_rot_i64) {
- tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_rotl, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I64, 0)) {
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
+ tcg_gen_neg_i64(t0, arg2);
+ tcg_gen_op3_i64(INDEX_op_rotr, ret, arg1, t0);
+ tcg_temp_free_i64(t0);
} else {
- TCGv_i64 t0, t1;
- t0 = tcg_temp_ebb_new_i64();
- t1 = tcg_temp_ebb_new_i64();
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
+ TCGv_i64 t1 = tcg_temp_ebb_new_i64();
tcg_gen_shl_i64(t0, arg1, arg2);
- tcg_gen_subfi_i64(t1, 64, arg2);
+ tcg_gen_neg_i64(t1, arg2);
tcg_gen_shr_i64(t1, arg1, t1);
tcg_gen_or_i64(ret, t0, t1);
tcg_temp_free_i64(t0);
@@ -2566,12 +2490,15 @@ void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
/* some cases can be optimized here */
if (arg2 == 0) {
tcg_gen_mov_i64(ret, arg1);
- } else if (TCG_TARGET_HAS_rot_i64) {
- tcg_gen_rotl_i64(ret, arg1, tcg_constant_i64(arg2));
+ } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I64, 0)) {
+ TCGv_i64 t0 = tcg_constant_i64(arg2);
+ tcg_gen_op3_i64(INDEX_op_rotl, ret, arg1, t0);
+ } else if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I64, 0)) {
+ TCGv_i64 t0 = tcg_constant_i64(64 - arg2);
+ tcg_gen_op3_i64(INDEX_op_rotr, ret, arg1, t0);
} else {
- TCGv_i64 t0, t1;
- t0 = tcg_temp_ebb_new_i64();
- t1 = tcg_temp_ebb_new_i64();
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
+ TCGv_i64 t1 = tcg_temp_ebb_new_i64();
tcg_gen_shli_i64(t0, arg1, arg2);
tcg_gen_shri_i64(t1, arg1, 64 - arg2);
tcg_gen_or_i64(ret, t0, t1);
@@ -2582,14 +2509,18 @@ void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCG_TARGET_HAS_rot_i64) {
- tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_rotr, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I64, 0)) {
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
+ tcg_gen_neg_i64(t0, arg2);
+ tcg_gen_op3_i64(INDEX_op_rotl, ret, arg1, t0);
+ tcg_temp_free_i64(t0);
} else {
- TCGv_i64 t0, t1;
- t0 = tcg_temp_ebb_new_i64();
- t1 = tcg_temp_ebb_new_i64();
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
+ TCGv_i64 t1 = tcg_temp_ebb_new_i64();
tcg_gen_shr_i64(t0, arg1, arg2);
- tcg_gen_subfi_i64(t1, 64, arg2);
+ tcg_gen_neg_i64(t1, arg2);
tcg_gen_shl_i64(t1, arg1, t1);
tcg_gen_or_i64(ret, t0, t1);
tcg_temp_free_i64(t0);
@@ -2600,12 +2531,7 @@ void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
tcg_debug_assert(arg2 >= 0 && arg2 < 64);
- /* some cases can be optimized here */
- if (arg2 == 0) {
- tcg_gen_mov_i64(ret, arg1);
- } else {
- tcg_gen_rotli_i64(ret, arg1, 64 - arg2);
- }
+ tcg_gen_rotli_i64(ret, arg1, -arg2 & 63);
}
void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
@@ -2623,12 +2549,13 @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
tcg_gen_mov_i64(ret, arg2);
return;
}
- if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(ofs, len)) {
- tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len);
- return;
- }
- if (TCG_TARGET_REG_BITS == 32) {
+ if (TCG_TARGET_REG_BITS == 64) {
+ if (TCG_TARGET_deposit_valid(TCG_TYPE_I64, ofs, len)) {
+ tcg_gen_op5ii_i64(INDEX_op_deposit, ret, arg1, arg2, ofs, len);
+ return;
+ }
+ } else {
if (ofs >= 32) {
tcg_gen_deposit_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1),
TCGV_LOW(arg2), ofs - 32, len);
@@ -2645,7 +2572,7 @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
t1 = tcg_temp_ebb_new_i64();
- if (TCG_TARGET_HAS_extract2_i64) {
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I64, 0)) {
if (ofs + len == 64) {
tcg_gen_shli_i64(t1, arg1, len);
tcg_gen_extract2_i64(ret, t1, arg2, len);
@@ -2683,10 +2610,10 @@ void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
tcg_gen_shli_i64(ret, arg, ofs);
} else if (ofs == 0) {
tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
- } else if (TCG_TARGET_HAS_deposit_i64
- && TCG_TARGET_deposit_i64_valid(ofs, len)) {
+ } else if (TCG_TARGET_REG_BITS == 64 &&
+ TCG_TARGET_deposit_valid(TCG_TYPE_I64, ofs, len)) {
TCGv_i64 zero = tcg_constant_i64(0);
- tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, zero, arg, ofs, len);
+ tcg_gen_op5ii_i64(INDEX_op_deposit, ret, zero, arg, ofs, len);
} else {
if (TCG_TARGET_REG_BITS == 32) {
if (ofs >= 32) {
@@ -2701,54 +2628,20 @@ void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
return;
}
}
- /* To help two-operand hosts we prefer to zero-extend first,
- which allows ARG to stay live. */
- switch (len) {
- case 32:
- if (TCG_TARGET_HAS_ext32u_i64) {
- tcg_gen_ext32u_i64(ret, arg);
- tcg_gen_shli_i64(ret, ret, ofs);
- return;
- }
- break;
- case 16:
- if (TCG_TARGET_HAS_ext16u_i64) {
- tcg_gen_ext16u_i64(ret, arg);
- tcg_gen_shli_i64(ret, ret, ofs);
- return;
- }
- break;
- case 8:
- if (TCG_TARGET_HAS_ext8u_i64) {
- tcg_gen_ext8u_i64(ret, arg);
- tcg_gen_shli_i64(ret, ret, ofs);
- return;
- }
- break;
+ /*
+ * To help two-operand hosts we prefer to zero-extend first,
+ * which allows ARG to stay live.
+ */
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, len)) {
+ tcg_gen_extract_i64(ret, arg, 0, len);
+ tcg_gen_shli_i64(ret, ret, ofs);
+ return;
}
/* Otherwise prefer zero-extension over AND for code size. */
- switch (ofs + len) {
- case 32:
- if (TCG_TARGET_HAS_ext32u_i64) {
- tcg_gen_shli_i64(ret, arg, ofs);
- tcg_gen_ext32u_i64(ret, ret);
- return;
- }
- break;
- case 16:
- if (TCG_TARGET_HAS_ext16u_i64) {
- tcg_gen_shli_i64(ret, arg, ofs);
- tcg_gen_ext16u_i64(ret, ret);
- return;
- }
- break;
- case 8:
- if (TCG_TARGET_HAS_ext8u_i64) {
- tcg_gen_shli_i64(ret, arg, ofs);
- tcg_gen_ext8u_i64(ret, ret);
- return;
- }
- break;
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, ofs + len)) {
+ tcg_gen_shli_i64(ret, arg, ofs);
+ tcg_gen_extract_i64(ret, ret, 0, ofs + len);
+ return;
}
tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
tcg_gen_shli_i64(ret, ret, ofs);
@@ -2768,10 +2661,6 @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
tcg_gen_shri_i64(ret, arg, 64 - len);
return;
}
- if (ofs == 0) {
- tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
- return;
- }
if (TCG_TARGET_REG_BITS == 32) {
/* Look for a 32-bit extract within one of the two words. */
@@ -2785,40 +2674,34 @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
return;
}
- /* The field is split across two words. One double-word
- shift is better than two double-word shifts. */
- goto do_shift_and;
+
+ /* The field is split across two words. */
+ tcg_gen_extract2_i32(TCGV_LOW(ret), TCGV_LOW(arg),
+ TCGV_HIGH(arg), ofs);
+ if (len <= 32) {
+ tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_LOW(ret), 0, len);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ } else {
+ tcg_gen_extract_i32(TCGV_HIGH(ret), TCGV_HIGH(arg),
+ ofs, len - 32);
+ }
+ return;
}
- if (TCG_TARGET_HAS_extract_i64
- && TCG_TARGET_extract_i64_valid(ofs, len)) {
- tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, ofs, len);
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I64, ofs, len)) {
+ tcg_gen_op4ii_i64(INDEX_op_extract, ret, arg, ofs, len);
+ return;
+ }
+ if (ofs == 0) {
+ tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
return;
}
/* Assume that zero-extension, if available, is cheaper than a shift. */
- switch (ofs + len) {
- case 32:
- if (TCG_TARGET_HAS_ext32u_i64) {
- tcg_gen_ext32u_i64(ret, arg);
- tcg_gen_shri_i64(ret, ret, ofs);
- return;
- }
- break;
- case 16:
- if (TCG_TARGET_HAS_ext16u_i64) {
- tcg_gen_ext16u_i64(ret, arg);
- tcg_gen_shri_i64(ret, ret, ofs);
- return;
- }
- break;
- case 8:
- if (TCG_TARGET_HAS_ext8u_i64) {
- tcg_gen_ext8u_i64(ret, arg);
- tcg_gen_shri_i64(ret, ret, ofs);
- return;
- }
- break;
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, ofs + len)) {
+ tcg_gen_op4ii_i64(INDEX_op_extract, ret, arg, 0, ofs + len);
+ tcg_gen_shri_i64(ret, ret, ofs);
+ return;
}
/* ??? Ideally we'd know what values are available for immediate AND.
@@ -2826,7 +2709,6 @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
so that we get ext8u, ext16u, and ext32u. */
switch (len) {
case 1 ... 8: case 16: case 32:
- do_shift_and:
tcg_gen_shri_i64(ret, arg, ofs);
tcg_gen_andi_i64(ret, ret, (1ull << len) - 1);
break;
@@ -2850,19 +2732,6 @@ void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
tcg_gen_sari_i64(ret, arg, 64 - len);
return;
}
- if (ofs == 0) {
- switch (len) {
- case 32:
- tcg_gen_ext32s_i64(ret, arg);
- return;
- case 16:
- tcg_gen_ext16s_i64(ret, arg);
- return;
- case 8:
- tcg_gen_ext8s_i64(ret, arg);
- return;
- }
- }
if (TCG_TARGET_REG_BITS == 32) {
/* Look for a 32-bit extract within one of the two words. */
@@ -2896,59 +2765,23 @@ void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
return;
}
- if (TCG_TARGET_HAS_sextract_i64
- && TCG_TARGET_extract_i64_valid(ofs, len)) {
- tcg_gen_op4ii_i64(INDEX_op_sextract_i64, ret, arg, ofs, len);
+ if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, ofs, len)) {
+ tcg_gen_op4ii_i64(INDEX_op_sextract, ret, arg, ofs, len);
return;
}
/* Assume that sign-extension, if available, is cheaper than a shift. */
- switch (ofs + len) {
- case 32:
- if (TCG_TARGET_HAS_ext32s_i64) {
- tcg_gen_ext32s_i64(ret, arg);
- tcg_gen_sari_i64(ret, ret, ofs);
- return;
- }
- break;
- case 16:
- if (TCG_TARGET_HAS_ext16s_i64) {
- tcg_gen_ext16s_i64(ret, arg);
- tcg_gen_sari_i64(ret, ret, ofs);
- return;
- }
- break;
- case 8:
- if (TCG_TARGET_HAS_ext8s_i64) {
- tcg_gen_ext8s_i64(ret, arg);
- tcg_gen_sari_i64(ret, ret, ofs);
- return;
- }
- break;
+ if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, 0, ofs + len)) {
+ tcg_gen_op4ii_i64(INDEX_op_sextract, ret, arg, 0, ofs + len);
+ tcg_gen_sari_i64(ret, ret, ofs);
+ return;
}
- switch (len) {
- case 32:
- if (TCG_TARGET_HAS_ext32s_i64) {
- tcg_gen_shri_i64(ret, arg, ofs);
- tcg_gen_ext32s_i64(ret, ret);
- return;
- }
- break;
- case 16:
- if (TCG_TARGET_HAS_ext16s_i64) {
- tcg_gen_shri_i64(ret, arg, ofs);
- tcg_gen_ext16s_i64(ret, ret);
- return;
- }
- break;
- case 8:
- if (TCG_TARGET_HAS_ext8s_i64) {
- tcg_gen_shri_i64(ret, arg, ofs);
- tcg_gen_ext8s_i64(ret, ret);
- return;
- }
- break;
+ if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, 0, len)) {
+ tcg_gen_shri_i64(ret, arg, ofs);
+ tcg_gen_op4ii_i64(INDEX_op_sextract, ret, ret, 0, len);
+ return;
}
+
tcg_gen_shli_i64(ret, arg, 64 - len - ofs);
tcg_gen_sari_i64(ret, ret, 64 - len);
}
@@ -2967,8 +2800,8 @@ void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
tcg_gen_mov_i64(ret, ah);
} else if (al == ah) {
tcg_gen_rotri_i64(ret, al, ofs);
- } else if (TCG_TARGET_HAS_extract2_i64) {
- tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs);
+ } else if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I64, 0)) {
+ tcg_gen_op4i_i64(INDEX_op_extract2, ret, al, ah, ofs);
} else {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_shri_i64(t0, al, ofs);
@@ -2985,7 +2818,7 @@ void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
} else if (cond == TCG_COND_NEVER) {
tcg_gen_mov_i64(ret, v2);
} else if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op6i_i64(INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond);
+ tcg_gen_op6i_i64(INDEX_op_movcond, ret, c1, c2, v1, v2, cond);
} else {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
TCGv_i32 zero = tcg_constant_i32(0);
@@ -3006,8 +2839,25 @@ void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
{
- if (TCG_TARGET_HAS_add2_i64) {
- tcg_gen_op6_i64(INDEX_op_add2_i64, rl, rh, al, ah, bl, bh);
+ if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_REG, 0)) {
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_op3_i32(INDEX_op_addco, TCGV_LOW(t0),
+ TCGV_LOW(al), TCGV_LOW(bl));
+ tcg_gen_op3_i32(INDEX_op_addcio, TCGV_HIGH(t0),
+ TCGV_HIGH(al), TCGV_HIGH(bl));
+ tcg_gen_op3_i32(INDEX_op_addcio, TCGV_LOW(rh),
+ TCGV_LOW(ah), TCGV_LOW(bh));
+ tcg_gen_op3_i32(INDEX_op_addci, TCGV_HIGH(rh),
+ TCGV_HIGH(ah), TCGV_HIGH(bh));
+ } else {
+ tcg_gen_op3_i64(INDEX_op_addco, t0, al, bl);
+ tcg_gen_op3_i64(INDEX_op_addci, rh, ah, bh);
+ }
+
+ tcg_gen_mov_i64(rl, t0);
+ tcg_temp_free_i64(t0);
} else {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
@@ -3021,11 +2871,96 @@ void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
}
}
+void tcg_gen_addcio_i64(TCGv_i64 r, TCGv_i64 co,
+ TCGv_i64 a, TCGv_i64 b, TCGv_i64 ci)
+{
+ if (TCG_TARGET_REG_BITS == 64) {
+ if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_I64, 0)) {
+ TCGv_i64 discard = tcg_temp_ebb_new_i64();
+ TCGv_i64 zero = tcg_constant_i64(0);
+ TCGv_i64 mone = tcg_constant_i64(-1);
+
+ tcg_gen_op3_i64(INDEX_op_addco, discard, ci, mone);
+ tcg_gen_op3_i64(INDEX_op_addcio, r, a, b);
+ tcg_gen_op3_i64(INDEX_op_addci, co, zero, zero);
+ tcg_temp_free_i64(discard);
+ } else {
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
+ TCGv_i64 t1 = tcg_temp_ebb_new_i64();
+
+ tcg_gen_add_i64(t0, a, b);
+ tcg_gen_setcond_i64(TCG_COND_LTU, t1, t0, a);
+ tcg_gen_add_i64(r, t0, ci);
+ tcg_gen_setcond_i64(TCG_COND_LTU, t0, r, t0);
+ tcg_gen_or_i64(co, t0, t1);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+ } else {
+ if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_I32, 0)) {
+ TCGv_i32 discard = tcg_temp_ebb_new_i32();
+ TCGv_i32 zero = tcg_constant_i32(0);
+ TCGv_i32 mone = tcg_constant_i32(-1);
+
+ tcg_gen_op3_i32(INDEX_op_addco, discard, TCGV_LOW(ci), mone);
+ tcg_gen_op3_i32(INDEX_op_addcio, discard, TCGV_HIGH(ci), mone);
+ tcg_gen_op3_i32(INDEX_op_addcio, TCGV_LOW(r),
+ TCGV_LOW(a), TCGV_LOW(b));
+ tcg_gen_op3_i32(INDEX_op_addcio, TCGV_HIGH(r),
+ TCGV_HIGH(a), TCGV_HIGH(b));
+ tcg_gen_op3_i32(INDEX_op_addci, TCGV_LOW(co), zero, zero);
+ tcg_temp_free_i32(discard);
+ } else {
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
+ TCGv_i32 c0 = tcg_temp_ebb_new_i32();
+ TCGv_i32 c1 = tcg_temp_ebb_new_i32();
+
+ tcg_gen_or_i32(c1, TCGV_LOW(ci), TCGV_HIGH(ci));
+ tcg_gen_setcondi_i32(TCG_COND_NE, c1, c1, 0);
+
+ tcg_gen_add_i32(t0, TCGV_LOW(a), TCGV_LOW(b));
+ tcg_gen_setcond_i32(TCG_COND_LTU, c0, t0, TCGV_LOW(a));
+ tcg_gen_add_i32(TCGV_LOW(r), t0, c1);
+ tcg_gen_setcond_i32(TCG_COND_LTU, c1, TCGV_LOW(r), c1);
+ tcg_gen_or_i32(c1, c1, c0);
+
+ tcg_gen_add_i32(t0, TCGV_HIGH(a), TCGV_HIGH(b));
+ tcg_gen_setcond_i32(TCG_COND_LTU, c0, t0, TCGV_HIGH(a));
+ tcg_gen_add_i32(TCGV_HIGH(r), t0, c1);
+ tcg_gen_setcond_i32(TCG_COND_LTU, c1, TCGV_HIGH(r), c1);
+ tcg_gen_or_i32(TCGV_LOW(co), c0, c1);
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(c0);
+ tcg_temp_free_i32(c1);
+ }
+ tcg_gen_movi_i32(TCGV_HIGH(co), 0);
+ }
+}
+
void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
{
- if (TCG_TARGET_HAS_sub2_i64) {
- tcg_gen_op6_i64(INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh);
+ if (tcg_op_supported(INDEX_op_subbi, TCG_TYPE_REG, 0)) {
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_op3_i32(INDEX_op_subbo, TCGV_LOW(t0),
+ TCGV_LOW(al), TCGV_LOW(bl));
+ tcg_gen_op3_i32(INDEX_op_subbio, TCGV_HIGH(t0),
+ TCGV_HIGH(al), TCGV_HIGH(bl));
+ tcg_gen_op3_i32(INDEX_op_subbio, TCGV_LOW(rh),
+ TCGV_LOW(ah), TCGV_LOW(bh));
+ tcg_gen_op3_i32(INDEX_op_subbi, TCGV_HIGH(rh),
+ TCGV_HIGH(ah), TCGV_HIGH(bh));
+ } else {
+ tcg_gen_op3_i64(INDEX_op_subbo, t0, al, bl);
+ tcg_gen_op3_i64(INDEX_op_subbi, rh, ah, bh);
+ }
+
+ tcg_gen_mov_i64(rl, t0);
+ tcg_temp_free_i64(t0);
} else {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
@@ -3041,12 +2976,12 @@ void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCG_TARGET_HAS_mulu2_i64) {
- tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
- } else if (TCG_TARGET_HAS_muluh_i64) {
+ if (tcg_op_supported(INDEX_op_mulu2, TCG_TYPE_I64, 0)) {
+ tcg_gen_op4_i64(INDEX_op_mulu2, rl, rh, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I64, 0)) {
TCGv_i64 t = tcg_temp_ebb_new_i64();
- tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
- tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_muluh, rh, arg1, arg2);
tcg_gen_mov_i64(rl, t);
tcg_temp_free_i64(t);
} else {
@@ -3060,15 +2995,16 @@ void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCG_TARGET_HAS_muls2_i64) {
- tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2);
- } else if (TCG_TARGET_HAS_mulsh_i64) {
+ if (tcg_op_supported(INDEX_op_muls2, TCG_TYPE_I64, 0)) {
+ tcg_gen_op4_i64(INDEX_op_muls2, rl, rh, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_mulsh, TCG_TYPE_I64, 0)) {
TCGv_i64 t = tcg_temp_ebb_new_i64();
- tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
- tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_mulsh, rh, arg1, arg2);
tcg_gen_mov_i64(rl, t);
tcg_temp_free_i64(t);
- } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) {
+ } else if (tcg_op_supported(INDEX_op_mulu2, TCG_TYPE_I64, 0) ||
+ tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I64, 0)) {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
@@ -3147,11 +3083,9 @@ void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
{
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_mov_i32(ret, TCGV_LOW(arg));
- } else if (TCG_TARGET_HAS_extr_i64_i32) {
- tcg_gen_op2(INDEX_op_extrl_i64_i32,
- tcgv_i32_arg(ret), tcgv_i64_arg(arg));
} else {
- tcg_gen_mov_i32(ret, (TCGv_i32)arg);
+ tcg_gen_op2(INDEX_op_extrl_i64_i32, TCG_TYPE_I32,
+ tcgv_i32_arg(ret), tcgv_i64_arg(arg));
}
}
@@ -3159,14 +3093,9 @@ void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
{
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_mov_i32(ret, TCGV_HIGH(arg));
- } else if (TCG_TARGET_HAS_extr_i64_i32) {
- tcg_gen_op2(INDEX_op_extrh_i64_i32,
- tcgv_i32_arg(ret), tcgv_i64_arg(arg));
} else {
- TCGv_i64 t = tcg_temp_ebb_new_i64();
- tcg_gen_shri_i64(t, arg, 32);
- tcg_gen_mov_i32(ret, (TCGv_i32)t);
- tcg_temp_free_i64(t);
+ tcg_gen_op2(INDEX_op_extrh_i64_i32, TCG_TYPE_I32,
+ tcgv_i32_arg(ret), tcgv_i64_arg(arg));
}
}
@@ -3176,7 +3105,7 @@ void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
tcg_gen_mov_i32(TCGV_LOW(ret), arg);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
} else {
- tcg_gen_op2(INDEX_op_extu_i32_i64,
+ tcg_gen_op2(INDEX_op_extu_i32_i64, TCG_TYPE_I64,
tcgv_i64_arg(ret), tcgv_i32_arg(arg));
}
}
@@ -3187,7 +3116,7 @@ void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
tcg_gen_mov_i32(TCGV_LOW(ret), arg);
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
} else {
- tcg_gen_op2(INDEX_op_ext_i32_i64,
+ tcg_gen_op2(INDEX_op_ext_i32_i64, TCG_TYPE_I64,
tcgv_i64_arg(ret), tcgv_i32_arg(arg));
}
}
@@ -3209,7 +3138,7 @@ void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high)
tcg_gen_extu_i32_i64(dest, low);
/* If deposit is available, use it. Otherwise use the extra
knowledge that we have of the zero-extensions above. */
- if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(32, 32)) {
+ if (TCG_TARGET_deposit_valid(TCG_TYPE_I64, 32, 32)) {
tcg_gen_deposit_i64(dest, dest, tmp, 32, 32);
} else {
tcg_gen_shli_i64(tmp, tmp, 32);
@@ -3312,7 +3241,7 @@ void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx)
tcg_debug_assert(idx == TB_EXIT_REQUESTED);
}
- tcg_gen_op1i(INDEX_op_exit_tb, val);
+ tcg_gen_op1i(INDEX_op_exit_tb, 0, val);
}
void tcg_gen_goto_tb(unsigned idx)
@@ -3327,7 +3256,7 @@ void tcg_gen_goto_tb(unsigned idx)
tcg_ctx->goto_tb_issue_mask |= 1 << idx;
#endif
plugin_gen_disable_mem_helpers();
- tcg_gen_op1i(INDEX_op_goto_tb, idx);
+ tcg_gen_op1i(INDEX_op_goto_tb, 0, idx);
}
void tcg_gen_lookup_and_goto_ptr(void)
@@ -3342,6 +3271,6 @@ void tcg_gen_lookup_and_goto_ptr(void)
plugin_gen_disable_mem_helpers();
ptr = tcg_temp_ebb_new_ptr();
gen_helper_lookup_tb_ptr(ptr, tcg_env);
- tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
+ tcg_gen_op1i(INDEX_op_goto_ptr, TCG_TYPE_PTR, tcgv_ptr_arg(ptr));
tcg_temp_free_ptr(ptr);
}
diff --git a/tcg/tcg-pool.c.inc b/tcg/tcg-pool.c.inc
deleted file mode 100644
index 90c2e63..0000000
--- a/tcg/tcg-pool.c.inc
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * TCG Backend Data: constant pool.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-typedef struct TCGLabelPoolData {
- struct TCGLabelPoolData *next;
- tcg_insn_unit *label;
- intptr_t addend;
- int rtype;
- unsigned nlong;
- tcg_target_ulong data[];
-} TCGLabelPoolData;
-
-
-static TCGLabelPoolData *new_pool_alloc(TCGContext *s, int nlong, int rtype,
- tcg_insn_unit *label, intptr_t addend)
-{
- TCGLabelPoolData *n = tcg_malloc(sizeof(TCGLabelPoolData)
- + sizeof(tcg_target_ulong) * nlong);
-
- n->label = label;
- n->addend = addend;
- n->rtype = rtype;
- n->nlong = nlong;
- return n;
-}
-
-static void new_pool_insert(TCGContext *s, TCGLabelPoolData *n)
-{
- TCGLabelPoolData *i, **pp;
- int nlong = n->nlong;
-
- /* Insertion sort on the pool. */
- for (pp = &s->pool_labels; (i = *pp) != NULL; pp = &i->next) {
- if (nlong > i->nlong) {
- break;
- }
- if (nlong < i->nlong) {
- continue;
- }
- if (memcmp(n->data, i->data, sizeof(tcg_target_ulong) * nlong) >= 0) {
- break;
- }
- }
- n->next = *pp;
- *pp = n;
-}
-
-/* The "usual" for generic integer code. */
-static inline void new_pool_label(TCGContext *s, tcg_target_ulong d, int rtype,
- tcg_insn_unit *label, intptr_t addend)
-{
- TCGLabelPoolData *n = new_pool_alloc(s, 1, rtype, label, addend);
- n->data[0] = d;
- new_pool_insert(s, n);
-}
-
-/* For v64 or v128, depending on the host. */
-static inline void new_pool_l2(TCGContext *s, int rtype, tcg_insn_unit *label,
- intptr_t addend, tcg_target_ulong d0,
- tcg_target_ulong d1)
-{
- TCGLabelPoolData *n = new_pool_alloc(s, 2, rtype, label, addend);
- n->data[0] = d0;
- n->data[1] = d1;
- new_pool_insert(s, n);
-}
-
-/* For v128 or v256, depending on the host. */
-static inline void new_pool_l4(TCGContext *s, int rtype, tcg_insn_unit *label,
- intptr_t addend, tcg_target_ulong d0,
- tcg_target_ulong d1, tcg_target_ulong d2,
- tcg_target_ulong d3)
-{
- TCGLabelPoolData *n = new_pool_alloc(s, 4, rtype, label, addend);
- n->data[0] = d0;
- n->data[1] = d1;
- n->data[2] = d2;
- n->data[3] = d3;
- new_pool_insert(s, n);
-}
-
-/* For v256, for 32-bit host. */
-static inline void new_pool_l8(TCGContext *s, int rtype, tcg_insn_unit *label,
- intptr_t addend, tcg_target_ulong d0,
- tcg_target_ulong d1, tcg_target_ulong d2,
- tcg_target_ulong d3, tcg_target_ulong d4,
- tcg_target_ulong d5, tcg_target_ulong d6,
- tcg_target_ulong d7)
-{
- TCGLabelPoolData *n = new_pool_alloc(s, 8, rtype, label, addend);
- n->data[0] = d0;
- n->data[1] = d1;
- n->data[2] = d2;
- n->data[3] = d3;
- n->data[4] = d4;
- n->data[5] = d5;
- n->data[6] = d6;
- n->data[7] = d7;
- new_pool_insert(s, n);
-}
-
-/* To be provided by cpu/tcg-target.c.inc. */
-static void tcg_out_nop_fill(tcg_insn_unit *p, int count);
-
-static int tcg_out_pool_finalize(TCGContext *s)
-{
- TCGLabelPoolData *p = s->pool_labels;
- TCGLabelPoolData *l = NULL;
- void *a;
-
- if (p == NULL) {
- return 0;
- }
-
- /* ??? Round up to qemu_icache_linesize, but then do not round
- again when allocating the next TranslationBlock structure. */
- a = (void *)ROUND_UP((uintptr_t)s->code_ptr,
- sizeof(tcg_target_ulong) * p->nlong);
- tcg_out_nop_fill(s->code_ptr, (tcg_insn_unit *)a - s->code_ptr);
- s->data_gen_ptr = a;
-
- for (; p != NULL; p = p->next) {
- size_t size = sizeof(tcg_target_ulong) * p->nlong;
- uintptr_t value;
-
- if (!l || l->nlong != p->nlong || memcmp(l->data, p->data, size)) {
- if (unlikely(a > s->code_gen_highwater)) {
- return -1;
- }
- memcpy(a, p->data, size);
- a += size;
- l = p;
- }
-
- value = (uintptr_t)tcg_splitwx_to_rx(a) - size;
- if (!patch_reloc(p->label, p->rtype, value, p->addend)) {
- return -2;
- }
- }
-
- s->code_ptr = a;
- return 0;
-}
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 34e3056..50d40b9 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -34,6 +34,7 @@
#include "qemu/cacheflush.h"
#include "qemu/cacheinfo.h"
#include "qemu/timer.h"
+#include "exec/target_page.h"
#include "exec/translation-block.h"
#include "exec/tlb-common.h"
#include "tcg/startup.h"
@@ -56,6 +57,7 @@
#include "tcg/tcg-temp-internal.h"
#include "tcg-internal.h"
#include "tcg/perf.h"
+#include "tcg-has.h"
#ifdef CONFIG_USER_ONLY
#include "user/guest-base.h"
#endif
@@ -66,6 +68,11 @@ static void tcg_target_init(TCGContext *s);
static void tcg_target_qemu_prologue(TCGContext *s);
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
intptr_t value, intptr_t addend);
+static void tcg_out_nop_fill(tcg_insn_unit *p, int count);
+
+typedef struct TCGLabelQemuLdst TCGLabelQemuLdst;
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
/* The CIE and FDE header definitions will be common to all hosts. */
typedef struct {
@@ -90,18 +97,17 @@ typedef struct QEMU_PACKED {
DebugFrameFDEHeader fde;
} DebugFrameHeader;
-typedef struct TCGLabelQemuLdst {
+struct TCGLabelQemuLdst {
bool is_ld; /* qemu_ld: true, qemu_st: false */
MemOpIdx oi;
TCGType type; /* result type of a load */
- TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
- TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
+ TCGReg addr_reg; /* reg index for guest virtual addr */
TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */
tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next;
-} TCGLabelQemuLdst;
+};
static void tcg_register_jit_int(const void *buf, size_t size,
const void *debug_frame,
@@ -128,9 +134,11 @@ static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2);
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
static void tcg_out_goto_tb(TCGContext *s, int which);
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg args[TCG_MAX_OP_ARGS],
- const int const_args[TCG_MAX_OP_ARGS]);
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg dest);
+static void tcg_out_mb(TCGContext *s, unsigned bar);
+static void tcg_out_br(TCGContext *s, TCGLabel *l);
+static void tcg_out_set_carry(TCGContext *s);
+static void tcg_out_set_borrow(TCGContext *s);
#if TCG_TARGET_MAYBE_vec
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, TCGReg src);
@@ -165,6 +173,10 @@ static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
{
g_assert_not_reached();
}
+int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
+{
+ return 0;
+}
#endif
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
intptr_t arg2);
@@ -175,9 +187,6 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot);
static bool tcg_target_const_match(int64_t val, int ct,
TCGType type, TCGCond cond, int vece);
-#ifdef TCG_TARGET_NEED_LDST_LABELS
-static int tcg_out_ldst_finalize(TCGContext *s);
-#endif
#ifndef CONFIG_USER_ONLY
#define guest_base ({ qemu_build_not_reached(); (uintptr_t)0; })
@@ -634,6 +643,197 @@ static void tcg_out_movext3(TCGContext *s, const TCGMovExtend *i1,
}
}
+/*
+ * Allocate a new TCGLabelQemuLdst entry.
+ */
+
+__attribute__((unused))
+static TCGLabelQemuLdst *new_ldst_label(TCGContext *s)
+{
+ TCGLabelQemuLdst *l = tcg_malloc(sizeof(*l));
+
+ memset(l, 0, sizeof(*l));
+ QSIMPLEQ_INSERT_TAIL(&s->ldst_labels, l, next);
+
+ return l;
+}
+
+/*
+ * Allocate new constant pool entries.
+ */
+
+typedef struct TCGLabelPoolData {
+ struct TCGLabelPoolData *next;
+ tcg_insn_unit *label;
+ intptr_t addend;
+ int rtype;
+ unsigned nlong;
+ tcg_target_ulong data[];
+} TCGLabelPoolData;
+
+static TCGLabelPoolData *new_pool_alloc(TCGContext *s, int nlong, int rtype,
+ tcg_insn_unit *label, intptr_t addend)
+{
+ TCGLabelPoolData *n = tcg_malloc(sizeof(TCGLabelPoolData)
+ + sizeof(tcg_target_ulong) * nlong);
+
+ n->label = label;
+ n->addend = addend;
+ n->rtype = rtype;
+ n->nlong = nlong;
+ return n;
+}
+
+static void new_pool_insert(TCGContext *s, TCGLabelPoolData *n)
+{
+ TCGLabelPoolData *i, **pp;
+ int nlong = n->nlong;
+
+ /* Insertion sort on the pool. */
+ for (pp = &s->pool_labels; (i = *pp) != NULL; pp = &i->next) {
+ if (nlong > i->nlong) {
+ break;
+ }
+ if (nlong < i->nlong) {
+ continue;
+ }
+ if (memcmp(n->data, i->data, sizeof(tcg_target_ulong) * nlong) >= 0) {
+ break;
+ }
+ }
+ n->next = *pp;
+ *pp = n;
+}
+
+/* The "usual" for generic integer code. */
+__attribute__((unused))
+static void new_pool_label(TCGContext *s, tcg_target_ulong d, int rtype,
+ tcg_insn_unit *label, intptr_t addend)
+{
+ TCGLabelPoolData *n = new_pool_alloc(s, 1, rtype, label, addend);
+ n->data[0] = d;
+ new_pool_insert(s, n);
+}
+
+/* For v64 or v128, depending on the host. */
+__attribute__((unused))
+static void new_pool_l2(TCGContext *s, int rtype, tcg_insn_unit *label,
+ intptr_t addend, tcg_target_ulong d0,
+ tcg_target_ulong d1)
+{
+ TCGLabelPoolData *n = new_pool_alloc(s, 2, rtype, label, addend);
+ n->data[0] = d0;
+ n->data[1] = d1;
+ new_pool_insert(s, n);
+}
+
+/* For v128 or v256, depending on the host. */
+__attribute__((unused))
+static void new_pool_l4(TCGContext *s, int rtype, tcg_insn_unit *label,
+ intptr_t addend, tcg_target_ulong d0,
+ tcg_target_ulong d1, tcg_target_ulong d2,
+ tcg_target_ulong d3)
+{
+ TCGLabelPoolData *n = new_pool_alloc(s, 4, rtype, label, addend);
+ n->data[0] = d0;
+ n->data[1] = d1;
+ n->data[2] = d2;
+ n->data[3] = d3;
+ new_pool_insert(s, n);
+}
+
+/* For v256, for 32-bit host. */
+__attribute__((unused))
+static void new_pool_l8(TCGContext *s, int rtype, tcg_insn_unit *label,
+ intptr_t addend, tcg_target_ulong d0,
+ tcg_target_ulong d1, tcg_target_ulong d2,
+ tcg_target_ulong d3, tcg_target_ulong d4,
+ tcg_target_ulong d5, tcg_target_ulong d6,
+ tcg_target_ulong d7)
+{
+ TCGLabelPoolData *n = new_pool_alloc(s, 8, rtype, label, addend);
+ n->data[0] = d0;
+ n->data[1] = d1;
+ n->data[2] = d2;
+ n->data[3] = d3;
+ n->data[4] = d4;
+ n->data[5] = d5;
+ n->data[6] = d6;
+ n->data[7] = d7;
+ new_pool_insert(s, n);
+}
+
+/*
+ * Generate TB finalization at the end of block
+ */
+
+static int tcg_out_ldst_finalize(TCGContext *s)
+{
+ TCGLabelQemuLdst *lb;
+
+ /* qemu_ld/st slow paths */
+ QSIMPLEQ_FOREACH(lb, &s->ldst_labels, next) {
+ if (lb->is_ld
+ ? !tcg_out_qemu_ld_slow_path(s, lb)
+ : !tcg_out_qemu_st_slow_path(s, lb)) {
+ return -2;
+ }
+
+ /*
+ * Test for (pending) buffer overflow. The assumption is that any
+ * one operation beginning below the high water mark cannot overrun
+ * the buffer completely. Thus we can test for overflow after
+ * generating code without having to check during generation.
+ */
+ if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int tcg_out_pool_finalize(TCGContext *s)
+{
+ TCGLabelPoolData *p = s->pool_labels;
+ TCGLabelPoolData *l = NULL;
+ void *a;
+
+ if (p == NULL) {
+ return 0;
+ }
+
+ /*
+ * ??? Round up to qemu_icache_linesize, but then do not round
+ * again when allocating the next TranslationBlock structure.
+ */
+ a = (void *)ROUND_UP((uintptr_t)s->code_ptr,
+ sizeof(tcg_target_ulong) * p->nlong);
+ tcg_out_nop_fill(s->code_ptr, (tcg_insn_unit *)a - s->code_ptr);
+ s->data_gen_ptr = a;
+
+ for (; p != NULL; p = p->next) {
+ size_t size = sizeof(tcg_target_ulong) * p->nlong;
+ uintptr_t value;
+
+ if (!l || l->nlong != p->nlong || memcmp(l->data, p->data, size)) {
+ if (unlikely(a > s->code_gen_highwater)) {
+ return -1;
+ }
+ memcpy(a, p->data, size);
+ a += size;
+ l = p;
+ }
+
+ value = (uintptr_t)tcg_splitwx_to_rx(a) - size;
+ if (!patch_reloc(p->label, p->rtype, value, p->addend)) {
+ return -2;
+ }
+ }
+
+ s->code_ptr = a;
+ return 0;
+}
+
#define C_PFX1(P, A) P##A
#define C_PFX2(P, A, B) P##A##_##B
#define C_PFX3(P, A, B, C) P##A##_##B##_##C
@@ -664,10 +864,12 @@ static void tcg_out_movext3(TCGContext *s, const TCGMovExtend *i1,
#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4),
typedef enum {
+ C_Dynamic = -2,
+ C_NotImplemented = -1,
#include "tcg-target-con-set.h"
} TCGConstraintSetIndex;
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
+static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode, TCGType, unsigned);
#undef C_O0_I1
#undef C_O0_I2
@@ -688,31 +890,35 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
/* Put all of the constraint sets into an array, indexed by the enum. */
-#define C_O0_I1(I1) { .args_ct_str = { #I1 } },
-#define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
-#define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
-#define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
+typedef struct TCGConstraintSet {
+ uint8_t nb_oargs, nb_iargs;
+ const char *args_ct_str[TCG_MAX_OP_ARGS];
+} TCGConstraintSet;
+
+#define C_O0_I1(I1) { 0, 1, { #I1 } },
+#define C_O0_I2(I1, I2) { 0, 2, { #I1, #I2 } },
+#define C_O0_I3(I1, I2, I3) { 0, 3, { #I1, #I2, #I3 } },
+#define C_O0_I4(I1, I2, I3, I4) { 0, 4, { #I1, #I2, #I3, #I4 } },
-#define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
-#define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
-#define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
-#define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
+#define C_O1_I1(O1, I1) { 1, 1, { #O1, #I1 } },
+#define C_O1_I2(O1, I1, I2) { 1, 2, { #O1, #I1, #I2 } },
+#define C_O1_I3(O1, I1, I2, I3) { 1, 3, { #O1, #I1, #I2, #I3 } },
+#define C_O1_I4(O1, I1, I2, I3, I4) { 1, 4, { #O1, #I1, #I2, #I3, #I4 } },
-#define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
-#define C_N1O1_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, #O2, #I1 } },
-#define C_N2_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, "&" #O2, #I1 } },
+#define C_N1_I2(O1, I1, I2) { 1, 2, { "&" #O1, #I1, #I2 } },
+#define C_N1O1_I1(O1, O2, I1) { 2, 1, { "&" #O1, #O2, #I1 } },
+#define C_N2_I1(O1, O2, I1) { 2, 1, { "&" #O1, "&" #O2, #I1 } },
-#define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
-#define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
-#define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
-#define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
-#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { "&" #O1, #O2, #I1, #I2, #I3, #I4 } },
+#define C_O2_I1(O1, O2, I1) { 2, 1, { #O1, #O2, #I1 } },
+#define C_O2_I2(O1, O2, I1, I2) { 2, 2, { #O1, #O2, #I1, #I2 } },
+#define C_O2_I3(O1, O2, I1, I2, I3) { 2, 3, { #O1, #O2, #I1, #I2, #I3 } },
+#define C_O2_I4(O1, O2, I1, I2, I3, I4) { 2, 4, { #O1, #O2, #I1, #I2, #I3, #I4 } },
+#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { 2, 4, { "&" #O1, #O2, #I1, #I2, #I3, #I4 } },
-static const TCGTargetOpDef constraint_sets[] = {
+static const TCGConstraintSet constraint_sets[] = {
#include "tcg-target-con-set.h"
};
-
#undef C_O0_I1
#undef C_O0_I2
#undef C_O0_I3
@@ -752,6 +958,164 @@ static const TCGTargetOpDef constraint_sets[] = {
#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4)
+/*
+ * TCGOutOp is the base class for a set of structures that describe how
+ * to generate code for a given TCGOpcode.
+ *
+ * @static_constraint:
+ * C_NotImplemented: The TCGOpcode is not supported by the backend.
+ * C_Dynamic: Use @dynamic_constraint to select a constraint set
+ * based on any of @type, @flags, or host isa.
+ * Otherwise: The register allocation constrains for the TCGOpcode.
+ *
+ * Subclasses of TCGOutOp will define a set of output routines that may
+ * be used. Such routines will often be selected by the set of registers
+ * and constants that come out of register allocation. The set of
+ * routines that are provided will guide the set of constraints that are
+ * legal. In particular, assume that tcg_optimize() has done its job in
+ * swapping commutative operands and folding operations for which all
+ * operands are constant.
+ */
+typedef struct TCGOutOp {
+ TCGConstraintSetIndex static_constraint;
+ TCGConstraintSetIndex (*dynamic_constraint)(TCGType type, unsigned flags);
+} TCGOutOp;
+
+typedef struct TCGOutOpAddSubCarry {
+ TCGOutOp base;
+ void (*out_rrr)(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2);
+ void (*out_rri)(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2);
+ void (*out_rir)(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2);
+ void (*out_rii)(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, tcg_target_long a2);
+} TCGOutOpAddSubCarry;
+
+typedef struct TCGOutOpBinary {
+ TCGOutOp base;
+ void (*out_rrr)(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2);
+ void (*out_rri)(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2);
+} TCGOutOpBinary;
+
+typedef struct TCGOutOpBrcond {
+ TCGOutOp base;
+ void (*out_rr)(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg a1, TCGReg a2, TCGLabel *label);
+ void (*out_ri)(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg a1, tcg_target_long a2, TCGLabel *label);
+} TCGOutOpBrcond;
+
+typedef struct TCGOutOpBrcond2 {
+ TCGOutOp base;
+ void (*out)(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
+ TCGArg bl, bool const_bl,
+ TCGArg bh, bool const_bh, TCGLabel *l);
+} TCGOutOpBrcond2;
+
+typedef struct TCGOutOpBswap {
+ TCGOutOp base;
+ void (*out_rr)(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, unsigned flags);
+} TCGOutOpBswap;
+
+typedef struct TCGOutOpDeposit {
+ TCGOutOp base;
+ void (*out_rrr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ TCGReg a2, unsigned ofs, unsigned len);
+ void (*out_rri)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ tcg_target_long a2, unsigned ofs, unsigned len);
+ void (*out_rzr)(TCGContext *s, TCGType type, TCGReg a0,
+ TCGReg a2, unsigned ofs, unsigned len);
+} TCGOutOpDeposit;
+
+typedef struct TCGOutOpDivRem {
+ TCGOutOp base;
+ void (*out_rr01r)(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a4);
+} TCGOutOpDivRem;
+
+typedef struct TCGOutOpExtract {
+ TCGOutOp base;
+ void (*out_rr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len);
+} TCGOutOpExtract;
+
+typedef struct TCGOutOpExtract2 {
+ TCGOutOp base;
+ void (*out_rrr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ TCGReg a2, unsigned shr);
+} TCGOutOpExtract2;
+
+typedef struct TCGOutOpLoad {
+ TCGOutOp base;
+ void (*out)(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, intptr_t offset);
+} TCGOutOpLoad;
+
+typedef struct TCGOutOpMovcond {
+ TCGOutOp base;
+ void (*out)(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf);
+} TCGOutOpMovcond;
+
+typedef struct TCGOutOpMul2 {
+ TCGOutOp base;
+ void (*out_rrrr)(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3);
+} TCGOutOpMul2;
+
+typedef struct TCGOutOpQemuLdSt {
+ TCGOutOp base;
+ void (*out)(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg addr, MemOpIdx oi);
+} TCGOutOpQemuLdSt;
+
+typedef struct TCGOutOpQemuLdSt2 {
+ TCGOutOp base;
+ void (*out)(TCGContext *s, TCGType type, TCGReg dlo, TCGReg dhi,
+ TCGReg addr, MemOpIdx oi);
+} TCGOutOpQemuLdSt2;
+
+typedef struct TCGOutOpUnary {
+ TCGOutOp base;
+ void (*out_rr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1);
+} TCGOutOpUnary;
+
+typedef struct TCGOutOpSetcond {
+ TCGOutOp base;
+ void (*out_rrr)(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg a1, TCGReg a2);
+ void (*out_rri)(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg a1, tcg_target_long a2);
+} TCGOutOpSetcond;
+
+typedef struct TCGOutOpSetcond2 {
+ TCGOutOp base;
+ void (*out)(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg al, TCGReg ah,
+ TCGArg bl, bool const_bl, TCGArg bh, bool const_bh);
+} TCGOutOpSetcond2;
+
+typedef struct TCGOutOpStore {
+ TCGOutOp base;
+ void (*out_r)(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, intptr_t offset);
+ void (*out_i)(TCGContext *s, TCGType type, tcg_target_long data,
+ TCGReg base, intptr_t offset);
+} TCGOutOpStore;
+
+typedef struct TCGOutOpSubtract {
+ TCGOutOp base;
+ void (*out_rrr)(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2);
+ void (*out_rir)(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2);
+} TCGOutOpSubtract;
+
#include "tcg-target.c.inc"
#ifndef CONFIG_TCG_INTERPRETER
@@ -761,6 +1125,144 @@ QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
< MIN_TLB_MASK_TABLE_OFS);
#endif
+#if TCG_TARGET_REG_BITS == 64
+/*
+ * We require these functions for slow-path function calls.
+ * Adapt them generically for opcode output.
+ */
+
+static void tgen_exts_i32_i64(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
+{
+ tcg_out_exts_i32_i64(s, a0, a1);
+}
+
+static const TCGOutOpUnary outop_exts_i32_i64 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_exts_i32_i64,
+};
+
+static void tgen_extu_i32_i64(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
+{
+ tcg_out_extu_i32_i64(s, a0, a1);
+}
+
+static const TCGOutOpUnary outop_extu_i32_i64 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extu_i32_i64,
+};
+
+static void tgen_extrl_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
+{
+ tcg_out_extrl_i64_i32(s, a0, a1);
+}
+
+static const TCGOutOpUnary outop_extrl_i64_i32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = TCG_TARGET_HAS_extr_i64_i32 ? tgen_extrl_i64_i32 : NULL,
+};
+#endif
+
+static const TCGOutOp outop_goto_ptr = {
+ .static_constraint = C_O0_I1(r),
+};
+
+static const TCGOutOpLoad outop_ld = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tcg_out_ld,
+};
+
+/*
+ * Register V as the TCGOutOp for O.
+ * This verifies that V is of type T, otherwise give a nice compiler error.
+ * This prevents trivial mistakes within each arch/tcg-target.c.inc.
+ */
+#define OUTOP(O, T, V) [O] = _Generic(V, T: &V.base)
+
+/* Register allocation descriptions for every TCGOpcode. */
+static const TCGOutOp * const all_outop[NB_OPS] = {
+ OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
+ OUTOP(INDEX_op_addci, TCGOutOpAddSubCarry, outop_addci),
+ OUTOP(INDEX_op_addcio, TCGOutOpBinary, outop_addcio),
+ OUTOP(INDEX_op_addco, TCGOutOpBinary, outop_addco),
+ /* addc1o is implemented with set_carry + addcio */
+ OUTOP(INDEX_op_addc1o, TCGOutOpBinary, outop_addcio),
+ OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
+ OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
+ OUTOP(INDEX_op_brcond, TCGOutOpBrcond, outop_brcond),
+ OUTOP(INDEX_op_bswap16, TCGOutOpBswap, outop_bswap16),
+ OUTOP(INDEX_op_bswap32, TCGOutOpBswap, outop_bswap32),
+ OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
+ OUTOP(INDEX_op_ctpop, TCGOutOpUnary, outop_ctpop),
+ OUTOP(INDEX_op_ctz, TCGOutOpBinary, outop_ctz),
+ OUTOP(INDEX_op_deposit, TCGOutOpDeposit, outop_deposit),
+ OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
+ OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
+ OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
+ OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
+ OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
+ OUTOP(INDEX_op_extract, TCGOutOpExtract, outop_extract),
+ OUTOP(INDEX_op_extract2, TCGOutOpExtract2, outop_extract2),
+ OUTOP(INDEX_op_ld8u, TCGOutOpLoad, outop_ld8u),
+ OUTOP(INDEX_op_ld8s, TCGOutOpLoad, outop_ld8s),
+ OUTOP(INDEX_op_ld16u, TCGOutOpLoad, outop_ld16u),
+ OUTOP(INDEX_op_ld16s, TCGOutOpLoad, outop_ld16s),
+ OUTOP(INDEX_op_ld, TCGOutOpLoad, outop_ld),
+ OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond),
+ OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
+ OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
+ OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
+ OUTOP(INDEX_op_mulu2, TCGOutOpMul2, outop_mulu2),
+ OUTOP(INDEX_op_muluh, TCGOutOpBinary, outop_muluh),
+ OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
+ OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
+ OUTOP(INDEX_op_negsetcond, TCGOutOpSetcond, outop_negsetcond),
+ OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
+ OUTOP(INDEX_op_not, TCGOutOpUnary, outop_not),
+ OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
+ OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
+ OUTOP(INDEX_op_qemu_ld, TCGOutOpQemuLdSt, outop_qemu_ld),
+ OUTOP(INDEX_op_qemu_ld2, TCGOutOpQemuLdSt2, outop_qemu_ld2),
+ OUTOP(INDEX_op_qemu_st, TCGOutOpQemuLdSt, outop_qemu_st),
+ OUTOP(INDEX_op_qemu_st2, TCGOutOpQemuLdSt2, outop_qemu_st2),
+ OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
+ OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu),
+ OUTOP(INDEX_op_rotl, TCGOutOpBinary, outop_rotl),
+ OUTOP(INDEX_op_rotr, TCGOutOpBinary, outop_rotr),
+ OUTOP(INDEX_op_sar, TCGOutOpBinary, outop_sar),
+ OUTOP(INDEX_op_setcond, TCGOutOpSetcond, outop_setcond),
+ OUTOP(INDEX_op_sextract, TCGOutOpExtract, outop_sextract),
+ OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
+ OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
+ OUTOP(INDEX_op_st, TCGOutOpStore, outop_st),
+ OUTOP(INDEX_op_st8, TCGOutOpStore, outop_st8),
+ OUTOP(INDEX_op_st16, TCGOutOpStore, outop_st16),
+ OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
+ OUTOP(INDEX_op_subbi, TCGOutOpAddSubCarry, outop_subbi),
+ OUTOP(INDEX_op_subbio, TCGOutOpAddSubCarry, outop_subbio),
+ OUTOP(INDEX_op_subbo, TCGOutOpAddSubCarry, outop_subbo),
+ /* subb1o is implemented with set_borrow + subbio */
+ OUTOP(INDEX_op_subb1o, TCGOutOpAddSubCarry, outop_subbio),
+ OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
+
+ [INDEX_op_goto_ptr] = &outop_goto_ptr,
+
+#if TCG_TARGET_REG_BITS == 32
+ OUTOP(INDEX_op_brcond2_i32, TCGOutOpBrcond2, outop_brcond2),
+ OUTOP(INDEX_op_setcond2_i32, TCGOutOpSetcond2, outop_setcond2),
+#else
+ OUTOP(INDEX_op_bswap64, TCGOutOpUnary, outop_bswap64),
+ OUTOP(INDEX_op_ext_i32_i64, TCGOutOpUnary, outop_exts_i32_i64),
+ OUTOP(INDEX_op_extu_i32_i64, TCGOutOpUnary, outop_extu_i32_i64),
+ OUTOP(INDEX_op_extrl_i64_i32, TCGOutOpUnary, outop_extrl_i64_i32),
+ OUTOP(INDEX_op_extrh_i64_i32, TCGOutOpUnary, outop_extrh_i64_i32),
+ OUTOP(INDEX_op_ld32u, TCGOutOpLoad, outop_ld32u),
+ OUTOP(INDEX_op_ld32s, TCGOutOpLoad, outop_ld32s),
+ OUTOP(INDEX_op_st32, TCGOutOpStore, outop_st),
+#endif
+};
+
+#undef OUTOP
+
/*
* All TCG threads except the parent (i.e. the one that called tcg_context_init
* and registered the target's TCG globals) must register with this function
@@ -1293,39 +1795,19 @@ static void init_call_layout(TCGHelperInfo *info)
}
static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
-static void process_op_defs(TCGContext *s);
+static void process_constraint_sets(void);
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
TCGReg reg, const char *name);
-static void tcg_context_init(unsigned max_cpus)
+static void tcg_context_init(unsigned max_threads)
{
TCGContext *s = &tcg_init_ctx;
- int op, total_args, n, i;
- TCGOpDef *def;
- TCGArgConstraint *args_ct;
+ int n, i;
TCGTemp *ts;
memset(s, 0, sizeof(*s));
s->nb_globals = 0;
- /* Count total number of arguments and allocate the corresponding
- space */
- total_args = 0;
- for(op = 0; op < NB_OPS; op++) {
- def = &tcg_op_defs[op];
- n = def->nb_iargs + def->nb_oargs;
- total_args += n;
- }
-
- args_ct = g_new0(TCGArgConstraint, total_args);
-
- for(op = 0; op < NB_OPS; op++) {
- def = &tcg_op_defs[op];
- def->args_ct = args_ct;
- n = def->nb_iargs + def->nb_oargs;
- args_ct += n;
- }
-
init_call_layout(&info_helper_ld32_mmu);
init_call_layout(&info_helper_ld64_mmu);
init_call_layout(&info_helper_ld128_mmu);
@@ -1334,7 +1816,7 @@ static void tcg_context_init(unsigned max_cpus)
init_call_layout(&info_helper_st128_mmu);
tcg_target_init(s);
- process_op_defs(s);
+ process_constraint_sets();
/* Reverse the order of the saved registers, assuming they're all at
the start of tcg_target_reg_alloc_order. */
@@ -1356,15 +1838,15 @@ static void tcg_context_init(unsigned max_cpus)
* In user-mode we simply share the init context among threads, since we
* use a single region. See the documentation tcg_region_init() for the
* reasoning behind this.
- * In system-mode we will have at most max_cpus TCG threads.
+ * In system-mode we will have at most max_threads TCG threads.
*/
#ifdef CONFIG_USER_ONLY
tcg_ctxs = &tcg_ctx;
tcg_cur_ctxs = 1;
tcg_max_ctxs = 1;
#else
- tcg_max_ctxs = max_cpus;
- tcg_ctxs = g_new0(TCGContext *, max_cpus);
+ tcg_max_ctxs = max_threads;
+ tcg_ctxs = g_new0(TCGContext *, max_threads);
#endif
tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
@@ -1372,10 +1854,10 @@ static void tcg_context_init(unsigned max_cpus)
tcg_env = temp_tcgv_ptr(ts);
}
-void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
+void tcg_init(size_t tb_size, int splitwx, unsigned max_threads)
{
- tcg_context_init(max_cpus);
- tcg_region_init(tb_size, splitwx, max_cpus);
+ tcg_context_init(max_threads);
+ tcg_region_init(tb_size, splitwx, max_threads);
}
/*
@@ -1399,7 +1881,6 @@ TranslationBlock *tcg_tb_alloc(TCGContext *s)
goto retry;
}
qatomic_set(&s->code_gen_ptr, next);
- s->data_gen_ptr = NULL;
return tb;
}
@@ -1416,21 +1897,17 @@ void tcg_prologue_init(void)
tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
#endif
-#ifdef TCG_TARGET_NEED_POOL_LABELS
s->pool_labels = NULL;
-#endif
qemu_thread_jit_write();
/* Generate the prologue. */
tcg_target_qemu_prologue(s);
-#ifdef TCG_TARGET_NEED_POOL_LABELS
/* Allow the prologue to put e.g. guest_base into a pool entry. */
{
int result = tcg_out_pool_finalize(s);
tcg_debug_assert(result == 0);
}
-#endif
prologue_size = tcg_current_code_size(s);
perf_report_prologue(s->code_gen_ptr, prologue_size);
@@ -1490,7 +1967,7 @@ void tcg_func_start(TCGContext *s)
s->nb_temps = s->nb_globals;
/* No temps have been previously allocated for size or locality. */
- memset(s->free_temps, 0, sizeof(s->free_temps));
+ tcg_temp_ebb_reset_freed(s);
/* No constant temps have been previously allocated. */
for (int i = 0; i < TCG_TYPE_COUNT; ++i) {
@@ -1512,10 +1989,7 @@ void tcg_func_start(TCGContext *s)
s->emit_before_op = NULL;
QSIMPLEQ_INIT(&s->labels);
- tcg_debug_assert(s->addr_type == TCG_TYPE_I32 ||
- s->addr_type == TCG_TYPE_I64);
-
- tcg_debug_assert(s->insn_start_words > 0);
+ tcg_debug_assert(s->addr_type <= TCG_TYPE_REG);
}
static TCGTemp *tcg_temp_alloc(TCGContext *s)
@@ -1894,6 +2368,11 @@ TCGv_i64 tcg_constant_i64(int64_t val)
return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64, val));
}
+TCGv_vaddr tcg_constant_vaddr(uintptr_t val)
+{
+ return temp_tcgv_vaddr(tcg_constant_internal(TCG_TYPE_PTR, val));
+}
+
TCGv_ptr tcg_constant_ptr_int(intptr_t val)
{
return temp_tcgv_ptr(tcg_constant_internal(TCG_TYPE_PTR, val));
@@ -1932,12 +2411,34 @@ TCGTemp *tcgv_i32_temp(TCGv_i32 v)
}
#endif /* CONFIG_DEBUG_TCG */
-/* Return true if OP may appear in the opcode stream.
- Test the runtime variable that controls each opcode. */
-bool tcg_op_supported(TCGOpcode op)
+/*
+ * Return true if OP may appear in the opcode stream with TYPE.
+ * Test the runtime variable that controls each opcode.
+ */
+bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
{
- const bool have_vec
- = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
+ bool has_type;
+
+ switch (type) {
+ case TCG_TYPE_I32:
+ has_type = true;
+ break;
+ case TCG_TYPE_I64:
+ has_type = TCG_TARGET_REG_BITS == 64;
+ break;
+ case TCG_TYPE_V64:
+ has_type = TCG_TARGET_HAS_v64;
+ break;
+ case TCG_TYPE_V128:
+ has_type = TCG_TARGET_HAS_v128;
+ break;
+ case TCG_TYPE_V256:
+ has_type = TCG_TARGET_HAS_v256;
+ break;
+ default:
+ has_type = false;
+ break;
+ }
switch (op) {
case INDEX_op_discard:
@@ -1949,221 +2450,56 @@ bool tcg_op_supported(TCGOpcode op)
case INDEX_op_exit_tb:
case INDEX_op_goto_tb:
case INDEX_op_goto_ptr:
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_ld_a64_i32:
- case INDEX_op_qemu_st_a32_i32:
- case INDEX_op_qemu_st_a64_i32:
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_ld_a64_i64:
- case INDEX_op_qemu_st_a32_i64:
- case INDEX_op_qemu_st_a64_i64:
return true;
- case INDEX_op_qemu_st8_a32_i32:
- case INDEX_op_qemu_st8_a64_i32:
- return TCG_TARGET_HAS_qemu_st8_i32;
-
- case INDEX_op_qemu_ld_a32_i128:
- case INDEX_op_qemu_ld_a64_i128:
- case INDEX_op_qemu_st_a32_i128:
- case INDEX_op_qemu_st_a64_i128:
- return TCG_TARGET_HAS_qemu_ldst_i128;
-
- case INDEX_op_mov_i32:
- case INDEX_op_setcond_i32:
- case INDEX_op_brcond_i32:
- case INDEX_op_movcond_i32:
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld_i32:
- case INDEX_op_st8_i32:
- case INDEX_op_st16_i32:
- case INDEX_op_st_i32:
- case INDEX_op_add_i32:
- case INDEX_op_sub_i32:
- case INDEX_op_neg_i32:
- case INDEX_op_mul_i32:
- case INDEX_op_and_i32:
- case INDEX_op_or_i32:
- case INDEX_op_xor_i32:
- case INDEX_op_shl_i32:
- case INDEX_op_shr_i32:
- case INDEX_op_sar_i32:
+ case INDEX_op_qemu_ld:
+ case INDEX_op_qemu_st:
+ tcg_debug_assert(type <= TCG_TYPE_REG);
return true;
- case INDEX_op_negsetcond_i32:
- return TCG_TARGET_HAS_negsetcond_i32;
- case INDEX_op_div_i32:
- case INDEX_op_divu_i32:
- return TCG_TARGET_HAS_div_i32;
- case INDEX_op_rem_i32:
- case INDEX_op_remu_i32:
- return TCG_TARGET_HAS_rem_i32;
- case INDEX_op_div2_i32:
- case INDEX_op_divu2_i32:
- return TCG_TARGET_HAS_div2_i32;
- case INDEX_op_rotl_i32:
- case INDEX_op_rotr_i32:
- return TCG_TARGET_HAS_rot_i32;
- case INDEX_op_deposit_i32:
- return TCG_TARGET_HAS_deposit_i32;
- case INDEX_op_extract_i32:
- return TCG_TARGET_HAS_extract_i32;
- case INDEX_op_sextract_i32:
- return TCG_TARGET_HAS_sextract_i32;
- case INDEX_op_extract2_i32:
- return TCG_TARGET_HAS_extract2_i32;
- case INDEX_op_add2_i32:
- return TCG_TARGET_HAS_add2_i32;
- case INDEX_op_sub2_i32:
- return TCG_TARGET_HAS_sub2_i32;
- case INDEX_op_mulu2_i32:
- return TCG_TARGET_HAS_mulu2_i32;
- case INDEX_op_muls2_i32:
- return TCG_TARGET_HAS_muls2_i32;
- case INDEX_op_muluh_i32:
- return TCG_TARGET_HAS_muluh_i32;
- case INDEX_op_mulsh_i32:
- return TCG_TARGET_HAS_mulsh_i32;
- case INDEX_op_ext8s_i32:
- return TCG_TARGET_HAS_ext8s_i32;
- case INDEX_op_ext16s_i32:
- return TCG_TARGET_HAS_ext16s_i32;
- case INDEX_op_ext8u_i32:
- return TCG_TARGET_HAS_ext8u_i32;
- case INDEX_op_ext16u_i32:
- return TCG_TARGET_HAS_ext16u_i32;
- case INDEX_op_bswap16_i32:
- return TCG_TARGET_HAS_bswap16_i32;
- case INDEX_op_bswap32_i32:
- return TCG_TARGET_HAS_bswap32_i32;
- case INDEX_op_not_i32:
- return TCG_TARGET_HAS_not_i32;
- case INDEX_op_andc_i32:
- return TCG_TARGET_HAS_andc_i32;
- case INDEX_op_orc_i32:
- return TCG_TARGET_HAS_orc_i32;
- case INDEX_op_eqv_i32:
- return TCG_TARGET_HAS_eqv_i32;
- case INDEX_op_nand_i32:
- return TCG_TARGET_HAS_nand_i32;
- case INDEX_op_nor_i32:
- return TCG_TARGET_HAS_nor_i32;
- case INDEX_op_clz_i32:
- return TCG_TARGET_HAS_clz_i32;
- case INDEX_op_ctz_i32:
- return TCG_TARGET_HAS_ctz_i32;
- case INDEX_op_ctpop_i32:
- return TCG_TARGET_HAS_ctpop_i32;
+ case INDEX_op_qemu_ld2:
+ case INDEX_op_qemu_st2:
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_debug_assert(type == TCG_TYPE_I64);
+ return true;
+ }
+ tcg_debug_assert(type == TCG_TYPE_I128);
+ goto do_lookup;
+
+ case INDEX_op_add:
+ case INDEX_op_and:
+ case INDEX_op_brcond:
+ case INDEX_op_deposit:
+ case INDEX_op_extract:
+ case INDEX_op_ld8u:
+ case INDEX_op_ld8s:
+ case INDEX_op_ld16u:
+ case INDEX_op_ld16s:
+ case INDEX_op_ld:
+ case INDEX_op_mov:
+ case INDEX_op_movcond:
+ case INDEX_op_negsetcond:
+ case INDEX_op_or:
+ case INDEX_op_setcond:
+ case INDEX_op_sextract:
+ case INDEX_op_st8:
+ case INDEX_op_st16:
+ case INDEX_op_st:
+ case INDEX_op_xor:
+ return has_type;
case INDEX_op_brcond2_i32:
case INDEX_op_setcond2_i32:
return TCG_TARGET_REG_BITS == 32;
- case INDEX_op_mov_i64:
- case INDEX_op_setcond_i64:
- case INDEX_op_brcond_i64:
- case INDEX_op_movcond_i64:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld_i64:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i64:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i64:
- case INDEX_op_add_i64:
- case INDEX_op_sub_i64:
- case INDEX_op_neg_i64:
- case INDEX_op_mul_i64:
- case INDEX_op_and_i64:
- case INDEX_op_or_i64:
- case INDEX_op_xor_i64:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i64:
+ case INDEX_op_ld32u:
+ case INDEX_op_ld32s:
+ case INDEX_op_st32:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
- return TCG_TARGET_REG_BITS == 64;
-
- case INDEX_op_negsetcond_i64:
- return TCG_TARGET_HAS_negsetcond_i64;
- case INDEX_op_div_i64:
- case INDEX_op_divu_i64:
- return TCG_TARGET_HAS_div_i64;
- case INDEX_op_rem_i64:
- case INDEX_op_remu_i64:
- return TCG_TARGET_HAS_rem_i64;
- case INDEX_op_div2_i64:
- case INDEX_op_divu2_i64:
- return TCG_TARGET_HAS_div2_i64;
- case INDEX_op_rotl_i64:
- case INDEX_op_rotr_i64:
- return TCG_TARGET_HAS_rot_i64;
- case INDEX_op_deposit_i64:
- return TCG_TARGET_HAS_deposit_i64;
- case INDEX_op_extract_i64:
- return TCG_TARGET_HAS_extract_i64;
- case INDEX_op_sextract_i64:
- return TCG_TARGET_HAS_sextract_i64;
- case INDEX_op_extract2_i64:
- return TCG_TARGET_HAS_extract2_i64;
case INDEX_op_extrl_i64_i32:
case INDEX_op_extrh_i64_i32:
- return TCG_TARGET_HAS_extr_i64_i32;
- case INDEX_op_ext8s_i64:
- return TCG_TARGET_HAS_ext8s_i64;
- case INDEX_op_ext16s_i64:
- return TCG_TARGET_HAS_ext16s_i64;
- case INDEX_op_ext32s_i64:
- return TCG_TARGET_HAS_ext32s_i64;
- case INDEX_op_ext8u_i64:
- return TCG_TARGET_HAS_ext8u_i64;
- case INDEX_op_ext16u_i64:
- return TCG_TARGET_HAS_ext16u_i64;
- case INDEX_op_ext32u_i64:
- return TCG_TARGET_HAS_ext32u_i64;
- case INDEX_op_bswap16_i64:
- return TCG_TARGET_HAS_bswap16_i64;
- case INDEX_op_bswap32_i64:
- return TCG_TARGET_HAS_bswap32_i64;
- case INDEX_op_bswap64_i64:
- return TCG_TARGET_HAS_bswap64_i64;
- case INDEX_op_not_i64:
- return TCG_TARGET_HAS_not_i64;
- case INDEX_op_andc_i64:
- return TCG_TARGET_HAS_andc_i64;
- case INDEX_op_orc_i64:
- return TCG_TARGET_HAS_orc_i64;
- case INDEX_op_eqv_i64:
- return TCG_TARGET_HAS_eqv_i64;
- case INDEX_op_nand_i64:
- return TCG_TARGET_HAS_nand_i64;
- case INDEX_op_nor_i64:
- return TCG_TARGET_HAS_nor_i64;
- case INDEX_op_clz_i64:
- return TCG_TARGET_HAS_clz_i64;
- case INDEX_op_ctz_i64:
- return TCG_TARGET_HAS_ctz_i64;
- case INDEX_op_ctpop_i64:
- return TCG_TARGET_HAS_ctpop_i64;
- case INDEX_op_add2_i64:
- return TCG_TARGET_HAS_add2_i64;
- case INDEX_op_sub2_i64:
- return TCG_TARGET_HAS_sub2_i64;
- case INDEX_op_mulu2_i64:
- return TCG_TARGET_HAS_mulu2_i64;
- case INDEX_op_muls2_i64:
- return TCG_TARGET_HAS_muls2_i64;
- case INDEX_op_muluh_i64:
- return TCG_TARGET_HAS_muluh_i64;
- case INDEX_op_mulsh_i64:
- return TCG_TARGET_HAS_mulsh_i64;
+ return TCG_TARGET_REG_BITS == 64;
case INDEX_op_mov_vec:
case INDEX_op_dup_vec:
@@ -2176,67 +2512,106 @@ bool tcg_op_supported(TCGOpcode op)
case INDEX_op_or_vec:
case INDEX_op_xor_vec:
case INDEX_op_cmp_vec:
- return have_vec;
+ return has_type;
case INDEX_op_dup2_vec:
- return have_vec && TCG_TARGET_REG_BITS == 32;
+ return has_type && TCG_TARGET_REG_BITS == 32;
case INDEX_op_not_vec:
- return have_vec && TCG_TARGET_HAS_not_vec;
+ return has_type && TCG_TARGET_HAS_not_vec;
case INDEX_op_neg_vec:
- return have_vec && TCG_TARGET_HAS_neg_vec;
+ return has_type && TCG_TARGET_HAS_neg_vec;
case INDEX_op_abs_vec:
- return have_vec && TCG_TARGET_HAS_abs_vec;
+ return has_type && TCG_TARGET_HAS_abs_vec;
case INDEX_op_andc_vec:
- return have_vec && TCG_TARGET_HAS_andc_vec;
+ return has_type && TCG_TARGET_HAS_andc_vec;
case INDEX_op_orc_vec:
- return have_vec && TCG_TARGET_HAS_orc_vec;
+ return has_type && TCG_TARGET_HAS_orc_vec;
case INDEX_op_nand_vec:
- return have_vec && TCG_TARGET_HAS_nand_vec;
+ return has_type && TCG_TARGET_HAS_nand_vec;
case INDEX_op_nor_vec:
- return have_vec && TCG_TARGET_HAS_nor_vec;
+ return has_type && TCG_TARGET_HAS_nor_vec;
case INDEX_op_eqv_vec:
- return have_vec && TCG_TARGET_HAS_eqv_vec;
+ return has_type && TCG_TARGET_HAS_eqv_vec;
case INDEX_op_mul_vec:
- return have_vec && TCG_TARGET_HAS_mul_vec;
+ return has_type && TCG_TARGET_HAS_mul_vec;
case INDEX_op_shli_vec:
case INDEX_op_shri_vec:
case INDEX_op_sari_vec:
- return have_vec && TCG_TARGET_HAS_shi_vec;
+ return has_type && TCG_TARGET_HAS_shi_vec;
case INDEX_op_shls_vec:
case INDEX_op_shrs_vec:
case INDEX_op_sars_vec:
- return have_vec && TCG_TARGET_HAS_shs_vec;
+ return has_type && TCG_TARGET_HAS_shs_vec;
case INDEX_op_shlv_vec:
case INDEX_op_shrv_vec:
case INDEX_op_sarv_vec:
- return have_vec && TCG_TARGET_HAS_shv_vec;
+ return has_type && TCG_TARGET_HAS_shv_vec;
case INDEX_op_rotli_vec:
- return have_vec && TCG_TARGET_HAS_roti_vec;
+ return has_type && TCG_TARGET_HAS_roti_vec;
case INDEX_op_rotls_vec:
- return have_vec && TCG_TARGET_HAS_rots_vec;
+ return has_type && TCG_TARGET_HAS_rots_vec;
case INDEX_op_rotlv_vec:
case INDEX_op_rotrv_vec:
- return have_vec && TCG_TARGET_HAS_rotv_vec;
+ return has_type && TCG_TARGET_HAS_rotv_vec;
case INDEX_op_ssadd_vec:
case INDEX_op_usadd_vec:
case INDEX_op_sssub_vec:
case INDEX_op_ussub_vec:
- return have_vec && TCG_TARGET_HAS_sat_vec;
+ return has_type && TCG_TARGET_HAS_sat_vec;
case INDEX_op_smin_vec:
case INDEX_op_umin_vec:
case INDEX_op_smax_vec:
case INDEX_op_umax_vec:
- return have_vec && TCG_TARGET_HAS_minmax_vec;
+ return has_type && TCG_TARGET_HAS_minmax_vec;
case INDEX_op_bitsel_vec:
- return have_vec && TCG_TARGET_HAS_bitsel_vec;
+ return has_type && TCG_TARGET_HAS_bitsel_vec;
case INDEX_op_cmpsel_vec:
- return have_vec && TCG_TARGET_HAS_cmpsel_vec;
+ return has_type && TCG_TARGET_HAS_cmpsel_vec;
default:
- tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
+ if (op < INDEX_op_last_generic) {
+ const TCGOutOp *outop;
+ TCGConstraintSetIndex con_set;
+
+ if (!has_type) {
+ return false;
+ }
+
+ do_lookup:
+ outop = all_outop[op];
+ tcg_debug_assert(outop != NULL);
+
+ con_set = outop->static_constraint;
+ if (con_set == C_Dynamic) {
+ con_set = outop->dynamic_constraint(type, flags);
+ }
+ if (con_set >= 0) {
+ return true;
+ }
+ tcg_debug_assert(con_set == C_NotImplemented);
+ return false;
+ }
+ tcg_debug_assert(op < NB_OPS);
return true;
+
+ case INDEX_op_last_generic:
+ g_assert_not_reached();
}
}
+bool tcg_op_deposit_valid(TCGType type, unsigned ofs, unsigned len)
+{
+ unsigned width;
+
+ tcg_debug_assert(type == TCG_TYPE_I32 || type == TCG_TYPE_I64);
+ width = (type == TCG_TYPE_I32 ? 32 : 64);
+
+ tcg_debug_assert(ofs < width);
+ tcg_debug_assert(len > 0);
+ tcg_debug_assert(len <= width - ofs);
+
+ return TCG_TARGET_deposit_valid(type, ofs, len);
+}
+
static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs);
static void tcg_gen_callN(void *func, TCGHelperInfo *info,
@@ -2573,7 +2948,7 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
nb_oargs = 0;
col += ne_fprintf(f, "\n ----");
- for (i = 0, k = s->insn_start_words; i < k; ++i) {
+ for (i = 0, k = INSN_START_WORDS; i < k; ++i) {
col += ne_fprintf(f, " %016" PRIx64,
tcg_get_insn_start_param(op, i));
}
@@ -2610,17 +2985,23 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
col += ne_fprintf(f, ",%s", t);
}
} else {
- col += ne_fprintf(f, " %s ", def->name);
+ if (def->flags & TCG_OPF_INT) {
+ col += ne_fprintf(f, " %s_i%d ",
+ def->name,
+ 8 * tcg_type_size(TCGOP_TYPE(op)));
+ } else if (def->flags & TCG_OPF_VECTOR) {
+ col += ne_fprintf(f, "%s v%d,e%d,",
+ def->name,
+ 8 * tcg_type_size(TCGOP_TYPE(op)),
+ 8 << TCGOP_VECE(op));
+ } else {
+ col += ne_fprintf(f, " %s ", def->name);
+ }
nb_oargs = def->nb_oargs;
nb_iargs = def->nb_iargs;
nb_cargs = def->nb_cargs;
- if (def->flags & TCG_OPF_VECTOR) {
- col += ne_fprintf(f, "v%d,e%d,", 64 << TCGOP_VECL(op),
- 8 << TCGOP_VECE(op));
- }
-
k = 0;
for (i = 0; i < nb_oargs; i++) {
const char *sep = k ? "," : "";
@@ -2635,16 +3016,12 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
op->args[k++]));
}
switch (c) {
- case INDEX_op_brcond_i32:
- case INDEX_op_setcond_i32:
- case INDEX_op_negsetcond_i32:
- case INDEX_op_movcond_i32:
+ case INDEX_op_brcond:
+ case INDEX_op_setcond:
+ case INDEX_op_negsetcond:
+ case INDEX_op_movcond:
case INDEX_op_brcond2_i32:
case INDEX_op_setcond2_i32:
- case INDEX_op_brcond_i64:
- case INDEX_op_setcond_i64:
- case INDEX_op_negsetcond_i64:
- case INDEX_op_movcond_i64:
case INDEX_op_cmp_vec:
case INDEX_op_cmpsel_vec:
if (op->args[k] < ARRAY_SIZE(cond_name)
@@ -2655,20 +3032,10 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
}
i = 1;
break;
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_ld_a64_i32:
- case INDEX_op_qemu_st_a32_i32:
- case INDEX_op_qemu_st_a64_i32:
- case INDEX_op_qemu_st8_a32_i32:
- case INDEX_op_qemu_st8_a64_i32:
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_ld_a64_i64:
- case INDEX_op_qemu_st_a32_i64:
- case INDEX_op_qemu_st_a64_i64:
- case INDEX_op_qemu_ld_a32_i128:
- case INDEX_op_qemu_ld_a64_i128:
- case INDEX_op_qemu_st_a32_i128:
- case INDEX_op_qemu_st_a64_i128:
+ case INDEX_op_qemu_ld:
+ case INDEX_op_qemu_st:
+ case INDEX_op_qemu_ld2:
+ case INDEX_op_qemu_st2:
{
const char *s_al, *s_op, *s_at;
MemOpIdx oi = op->args[k++];
@@ -2691,11 +3058,9 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
i = 1;
}
break;
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
- case INDEX_op_bswap32_i32:
- case INDEX_op_bswap32_i64:
- case INDEX_op_bswap64_i64:
+ case INDEX_op_bswap16:
+ case INDEX_op_bswap32:
+ case INDEX_op_bswap64:
{
TCGArg flags = op->args[k];
const char *name = NULL;
@@ -2736,8 +3101,7 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
switch (c) {
case INDEX_op_set_label:
case INDEX_op_br:
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
+ case INDEX_op_brcond:
case INDEX_op_brcond2_i32:
col += ne_fprintf(f, "%s$L%d", k ? "," : "",
arg_label(op->args[k])->id);
@@ -2890,10 +3254,12 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
}
/* we give more priority to constraints with less registers */
-static int get_constraint_priority(const TCGOpDef *def, int k)
+static int get_constraint_priority(const TCGArgConstraint *arg_ct, int k)
{
- const TCGArgConstraint *arg_ct = &def->args_ct[k];
- int n = ctpop64(arg_ct->regs);
+ int n;
+
+ arg_ct += k;
+ n = ctpop64(arg_ct->regs);
/*
* Sort constraints of a single register first, which includes output
@@ -2922,10 +3288,9 @@ static int get_constraint_priority(const TCGOpDef *def, int k)
}
/* sort from highest priority to lowest */
-static void sort_constraints(TCGOpDef *def, int start, int n)
+static void sort_constraints(TCGArgConstraint *a, int start, int n)
{
int i, j;
- TCGArgConstraint *a = def->args_ct;
for (i = 0; i < n; i++) {
a[start + i].sort_index = start + i;
@@ -2935,8 +3300,8 @@ static void sort_constraints(TCGOpDef *def, int start, int n)
}
for (i = 0; i < n - 1; i++) {
for (j = i + 1; j < n; j++) {
- int p1 = get_constraint_priority(def, a[start + i].sort_index);
- int p2 = get_constraint_priority(def, a[start + j].sort_index);
+ int p1 = get_constraint_priority(a, a[start + i].sort_index);
+ int p2 = get_constraint_priority(a, a[start + j].sort_index);
if (p1 < p2) {
int tmp = a[start + i].sort_index;
a[start + i].sort_index = a[start + j].sort_index;
@@ -2946,56 +3311,39 @@ static void sort_constraints(TCGOpDef *def, int start, int n)
}
}
-static void process_op_defs(TCGContext *s)
-{
- TCGOpcode op;
+static const TCGArgConstraint empty_cts[TCG_MAX_OP_ARGS];
+static TCGArgConstraint all_cts[ARRAY_SIZE(constraint_sets)][TCG_MAX_OP_ARGS];
- for (op = 0; op < NB_OPS; op++) {
- TCGOpDef *def = &tcg_op_defs[op];
- const TCGTargetOpDef *tdefs;
+static void process_constraint_sets(void)
+{
+ for (size_t c = 0; c < ARRAY_SIZE(constraint_sets); ++c) {
+ const TCGConstraintSet *tdefs = &constraint_sets[c];
+ TCGArgConstraint *args_ct = all_cts[c];
+ int nb_oargs = tdefs->nb_oargs;
+ int nb_iargs = tdefs->nb_iargs;
+ int nb_args = nb_oargs + nb_iargs;
bool saw_alias_pair = false;
- int i, o, i2, o2, nb_args;
-
- if (def->flags & TCG_OPF_NOT_PRESENT) {
- continue;
- }
-
- nb_args = def->nb_iargs + def->nb_oargs;
- if (nb_args == 0) {
- continue;
- }
-
- /*
- * Macro magic should make it impossible, but double-check that
- * the array index is in range. Since the signness of an enum
- * is implementation defined, force the result to unsigned.
- */
- unsigned con_set = tcg_target_op_def(op);
- tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
- tdefs = &constraint_sets[con_set];
- for (i = 0; i < nb_args; i++) {
+ for (int i = 0; i < nb_args; i++) {
const char *ct_str = tdefs->args_ct_str[i];
- bool input_p = i >= def->nb_oargs;
-
- /* Incomplete TCGTargetOpDef entry. */
- tcg_debug_assert(ct_str != NULL);
+ bool input_p = i >= nb_oargs;
+ int o;
switch (*ct_str) {
case '0' ... '9':
o = *ct_str - '0';
tcg_debug_assert(input_p);
- tcg_debug_assert(o < def->nb_oargs);
- tcg_debug_assert(def->args_ct[o].regs != 0);
- tcg_debug_assert(!def->args_ct[o].oalias);
- def->args_ct[i] = def->args_ct[o];
+ tcg_debug_assert(o < nb_oargs);
+ tcg_debug_assert(args_ct[o].regs != 0);
+ tcg_debug_assert(!args_ct[o].oalias);
+ args_ct[i] = args_ct[o];
/* The output sets oalias. */
- def->args_ct[o].oalias = 1;
- def->args_ct[o].alias_index = i;
+ args_ct[o].oalias = 1;
+ args_ct[o].alias_index = i;
/* The input sets ialias. */
- def->args_ct[i].ialias = 1;
- def->args_ct[i].alias_index = o;
- if (def->args_ct[i].pair) {
+ args_ct[i].ialias = 1;
+ args_ct[i].alias_index = o;
+ if (args_ct[i].pair) {
saw_alias_pair = true;
}
tcg_debug_assert(ct_str[1] == '\0');
@@ -3003,41 +3351,41 @@ static void process_op_defs(TCGContext *s)
case '&':
tcg_debug_assert(!input_p);
- def->args_ct[i].newreg = true;
+ args_ct[i].newreg = true;
ct_str++;
break;
case 'p': /* plus */
/* Allocate to the register after the previous. */
- tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
+ tcg_debug_assert(i > (input_p ? nb_oargs : 0));
o = i - 1;
- tcg_debug_assert(!def->args_ct[o].pair);
- tcg_debug_assert(!def->args_ct[o].ct);
- def->args_ct[i] = (TCGArgConstraint){
+ tcg_debug_assert(!args_ct[o].pair);
+ tcg_debug_assert(!args_ct[o].ct);
+ args_ct[i] = (TCGArgConstraint){
.pair = 2,
.pair_index = o,
- .regs = def->args_ct[o].regs << 1,
- .newreg = def->args_ct[o].newreg,
+ .regs = args_ct[o].regs << 1,
+ .newreg = args_ct[o].newreg,
};
- def->args_ct[o].pair = 1;
- def->args_ct[o].pair_index = i;
+ args_ct[o].pair = 1;
+ args_ct[o].pair_index = i;
tcg_debug_assert(ct_str[1] == '\0');
continue;
case 'm': /* minus */
/* Allocate to the register before the previous. */
- tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
+ tcg_debug_assert(i > (input_p ? nb_oargs : 0));
o = i - 1;
- tcg_debug_assert(!def->args_ct[o].pair);
- tcg_debug_assert(!def->args_ct[o].ct);
- def->args_ct[i] = (TCGArgConstraint){
+ tcg_debug_assert(!args_ct[o].pair);
+ tcg_debug_assert(!args_ct[o].ct);
+ args_ct[i] = (TCGArgConstraint){
.pair = 1,
.pair_index = o,
- .regs = def->args_ct[o].regs >> 1,
- .newreg = def->args_ct[o].newreg,
+ .regs = args_ct[o].regs >> 1,
+ .newreg = args_ct[o].newreg,
};
- def->args_ct[o].pair = 2;
- def->args_ct[o].pair_index = i;
+ args_ct[o].pair = 2;
+ args_ct[o].pair_index = i;
tcg_debug_assert(ct_str[1] == '\0');
continue;
}
@@ -3045,16 +3393,21 @@ static void process_op_defs(TCGContext *s)
do {
switch (*ct_str) {
case 'i':
- def->args_ct[i].ct |= TCG_CT_CONST;
+ args_ct[i].ct |= TCG_CT_CONST;
+ break;
+#ifdef TCG_REG_ZERO
+ case 'z':
+ args_ct[i].ct |= TCG_CT_REG_ZERO;
break;
+#endif
/* Include all of the target-specific constraints. */
#undef CONST
#define CONST(CASE, MASK) \
- case CASE: def->args_ct[i].ct |= MASK; break;
+ case CASE: args_ct[i].ct |= MASK; break;
#define REGS(CASE, MASK) \
- case CASE: def->args_ct[i].regs |= MASK; break;
+ case CASE: args_ct[i].regs |= MASK; break;
#include "tcg-target-con-str.h"
@@ -3065,15 +3418,12 @@ static void process_op_defs(TCGContext *s)
case '&':
case 'p':
case 'm':
- /* Typo in TCGTargetOpDef constraint. */
+ /* Typo in TCGConstraintSet constraint. */
g_assert_not_reached();
}
} while (*++ct_str != '\0');
}
- /* TCGTargetOpDef entry with too much information? */
- tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
-
/*
* Fix up output pairs that are aliased with inputs.
* When we created the alias, we copied pair from the output.
@@ -3094,51 +3444,53 @@ static void process_op_defs(TCGContext *s)
* first output to pair=3, and the pair_index'es to match.
*/
if (saw_alias_pair) {
- for (i = def->nb_oargs; i < nb_args; i++) {
+ for (int i = nb_oargs; i < nb_args; i++) {
+ int o, o2, i2;
+
/*
* Since [0-9pm] must be alone in the constraint string,
* the only way they can both be set is if the pair comes
* from the output alias.
*/
- if (!def->args_ct[i].ialias) {
+ if (!args_ct[i].ialias) {
continue;
}
- switch (def->args_ct[i].pair) {
+ switch (args_ct[i].pair) {
case 0:
break;
case 1:
- o = def->args_ct[i].alias_index;
- o2 = def->args_ct[o].pair_index;
- tcg_debug_assert(def->args_ct[o].pair == 1);
- tcg_debug_assert(def->args_ct[o2].pair == 2);
- if (def->args_ct[o2].oalias) {
+ o = args_ct[i].alias_index;
+ o2 = args_ct[o].pair_index;
+ tcg_debug_assert(args_ct[o].pair == 1);
+ tcg_debug_assert(args_ct[o2].pair == 2);
+ if (args_ct[o2].oalias) {
/* Case 1a */
- i2 = def->args_ct[o2].alias_index;
- tcg_debug_assert(def->args_ct[i2].pair == 2);
- def->args_ct[i2].pair_index = i;
- def->args_ct[i].pair_index = i2;
+ i2 = args_ct[o2].alias_index;
+ tcg_debug_assert(args_ct[i2].pair == 2);
+ args_ct[i2].pair_index = i;
+ args_ct[i].pair_index = i2;
} else {
/* Case 1b */
- def->args_ct[i].pair_index = i;
+ args_ct[i].pair_index = i;
}
break;
case 2:
- o = def->args_ct[i].alias_index;
- o2 = def->args_ct[o].pair_index;
- tcg_debug_assert(def->args_ct[o].pair == 2);
- tcg_debug_assert(def->args_ct[o2].pair == 1);
- if (def->args_ct[o2].oalias) {
+ o = args_ct[i].alias_index;
+ o2 = args_ct[o].pair_index;
+ tcg_debug_assert(args_ct[o].pair == 2);
+ tcg_debug_assert(args_ct[o2].pair == 1);
+ if (args_ct[o2].oalias) {
/* Case 1a */
- i2 = def->args_ct[o2].alias_index;
- tcg_debug_assert(def->args_ct[i2].pair == 1);
- def->args_ct[i2].pair_index = i;
- def->args_ct[i].pair_index = i2;
+ i2 = args_ct[o2].alias_index;
+ tcg_debug_assert(args_ct[i2].pair == 1);
+ args_ct[i2].pair_index = i;
+ args_ct[i].pair_index = i2;
} else {
/* Case 2 */
- def->args_ct[i].pair = 3;
- def->args_ct[o2].pair = 3;
- def->args_ct[i].pair_index = o2;
- def->args_ct[o2].pair_index = i;
+ args_ct[i].pair = 3;
+ args_ct[o2].pair = 3;
+ args_ct[i].pair_index = o2;
+ args_ct[o2].pair_index = i;
}
break;
default:
@@ -3148,9 +3500,40 @@ static void process_op_defs(TCGContext *s)
}
/* sort the constraints (XXX: this is just an heuristic) */
- sort_constraints(def, 0, def->nb_oargs);
- sort_constraints(def, def->nb_oargs, def->nb_iargs);
+ sort_constraints(args_ct, 0, nb_oargs);
+ sort_constraints(args_ct, nb_oargs, nb_iargs);
+ }
+}
+
+static const TCGArgConstraint *opcode_args_ct(const TCGOp *op)
+{
+ TCGOpcode opc = op->opc;
+ TCGType type = TCGOP_TYPE(op);
+ unsigned flags = TCGOP_FLAGS(op);
+ const TCGOpDef *def = &tcg_op_defs[opc];
+ const TCGOutOp *outop = all_outop[opc];
+ TCGConstraintSetIndex con_set;
+
+ if (def->flags & TCG_OPF_NOT_PRESENT) {
+ return empty_cts;
+ }
+
+ if (outop) {
+ con_set = outop->static_constraint;
+ if (con_set == C_Dynamic) {
+ con_set = outop->dynamic_constraint(type, flags);
+ }
+ } else {
+ con_set = tcg_target_op_def(opc, type, flags);
}
+ tcg_debug_assert(con_set >= 0);
+ tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
+
+ /* The constraint arguments must match TCGOpcode arguments. */
+ tcg_debug_assert(constraint_sets[con_set].nb_oargs == def->nb_oargs);
+ tcg_debug_assert(constraint_sets[con_set].nb_iargs == def->nb_iargs);
+
+ return all_cts[con_set];
}
static void remove_label_use(TCGOp *op, int idx)
@@ -3173,8 +3556,7 @@ void tcg_op_remove(TCGContext *s, TCGOp *op)
case INDEX_op_br:
remove_label_use(op, 0);
break;
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
+ case INDEX_op_brcond:
remove_label_use(op, 3);
break;
case INDEX_op_brcond2_i32:
@@ -3246,17 +3628,21 @@ TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs)
}
TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
- TCGOpcode opc, unsigned nargs)
+ TCGOpcode opc, TCGType type, unsigned nargs)
{
TCGOp *new_op = tcg_op_alloc(opc, nargs);
+
+ TCGOP_TYPE(new_op) = type;
QTAILQ_INSERT_BEFORE(old_op, new_op, link);
return new_op;
}
TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
- TCGOpcode opc, unsigned nargs)
+ TCGOpcode opc, TCGType type, unsigned nargs)
{
TCGOp *new_op = tcg_op_alloc(opc, nargs);
+
+ TCGOP_TYPE(new_op) = type;
QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
return new_op;
}
@@ -3271,8 +3657,7 @@ static void move_label_uses(TCGLabel *to, TCGLabel *from)
case INDEX_op_br:
op->args[0] = label_arg(to);
break;
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
+ case INDEX_op_brcond:
op->args[3] = label_arg(to);
break;
case INDEX_op_brcond2_i32:
@@ -3591,6 +3976,17 @@ liveness_pass_0(TCGContext *s)
}
}
+static void assert_carry_dead(TCGContext *s)
+{
+ /*
+ * Carry operations can be separated by a few insns like mov,
+ * load or store, but they should always be "close", and
+ * carry-out operations should always be paired with carry-in.
+ * At various boundaries, carry must have been consumed.
+ */
+ tcg_debug_assert(!s->carry_live);
+}
+
/* Liveness analysis : update the opc_arg_life array to tell if a
given input arguments is dead. Instructions updating dead
temporaries are removed. */
@@ -3601,27 +3997,28 @@ liveness_pass_1(TCGContext *s)
int nb_temps = s->nb_temps;
TCGOp *op, *op_prev;
TCGRegSet *prefs;
- int i;
prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps);
- for (i = 0; i < nb_temps; ++i) {
+ for (int i = 0; i < nb_temps; ++i) {
s->temps[i].state_ptr = prefs + i;
}
/* ??? Should be redundant with the exit_tb that ends the TB. */
la_func_end(s, nb_globals, nb_temps);
+ s->carry_live = false;
QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
int nb_iargs, nb_oargs;
TCGOpcode opc_new, opc_new2;
- bool have_opc_new2;
TCGLifeData arg_life = 0;
TCGTemp *ts;
TCGOpcode opc = op->opc;
- const TCGOpDef *def = &tcg_op_defs[opc];
+ const TCGOpDef *def;
+ const TCGArgConstraint *args_ct;
switch (opc) {
case INDEX_op_call:
+ assert_carry_dead(s);
{
const TCGHelperInfo *info = tcg_call_info(op);
int call_flags = tcg_call_flags(op);
@@ -3631,7 +4028,7 @@ liveness_pass_1(TCGContext *s)
/* pure functions can be removed if their result is unused */
if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
- for (i = 0; i < nb_oargs; i++) {
+ for (int i = 0; i < nb_oargs; i++) {
ts = arg_temp(op->args[i]);
if (ts->state != TS_DEAD) {
goto do_not_remove_call;
@@ -3642,7 +4039,7 @@ liveness_pass_1(TCGContext *s)
do_not_remove_call:
/* Output args are dead. */
- for (i = 0; i < nb_oargs; i++) {
+ for (int i = 0; i < nb_oargs; i++) {
ts = arg_temp(op->args[i]);
if (ts->state & TS_DEAD) {
arg_life |= DEAD_ARG << i;
@@ -3665,7 +4062,7 @@ liveness_pass_1(TCGContext *s)
}
/* Record arguments that die in this helper. */
- for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
+ for (int i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
ts = arg_temp(op->args[i]);
if (ts->state & TS_DEAD) {
arg_life |= DEAD_ARG << i;
@@ -3685,7 +4082,7 @@ liveness_pass_1(TCGContext *s)
* order so that if a temp is used more than once, the stack
* reset to max happens before the register reset to 0.
*/
- for (i = nb_iargs - 1; i >= 0; i--) {
+ for (int i = nb_iargs - 1; i >= 0; i--) {
const TCGCallArgumentLoc *loc = &info->in[i];
ts = arg_temp(op->args[nb_oargs + i]);
@@ -3713,7 +4110,7 @@ liveness_pass_1(TCGContext *s)
* If a temp is used once, this produces a single set bit;
* if a temp is used multiple times, this produces a set.
*/
- for (i = 0; i < nb_iargs; i++) {
+ for (int i = 0; i < nb_iargs; i++) {
const TCGCallArgumentLoc *loc = &info->in[i];
ts = arg_temp(op->args[nb_oargs + i]);
@@ -3733,6 +4130,7 @@ liveness_pass_1(TCGContext *s)
}
break;
case INDEX_op_insn_start:
+ assert_carry_dead(s);
break;
case INDEX_op_discard:
/* mark the temporary as dead */
@@ -3741,62 +4139,15 @@ liveness_pass_1(TCGContext *s)
la_reset_pref(ts);
break;
- case INDEX_op_add2_i32:
- opc_new = INDEX_op_add_i32;
- goto do_addsub2;
- case INDEX_op_sub2_i32:
- opc_new = INDEX_op_sub_i32;
- goto do_addsub2;
- case INDEX_op_add2_i64:
- opc_new = INDEX_op_add_i64;
- goto do_addsub2;
- case INDEX_op_sub2_i64:
- opc_new = INDEX_op_sub_i64;
- do_addsub2:
- nb_iargs = 4;
- nb_oargs = 2;
- /* Test if the high part of the operation is dead, but not
- the low part. The result can be optimized to a simple
- add or sub. This happens often for x86_64 guest when the
- cpu mode is set to 32 bit. */
- if (arg_temp(op->args[1])->state == TS_DEAD) {
- if (arg_temp(op->args[0])->state == TS_DEAD) {
- goto do_remove;
- }
- /* Replace the opcode and adjust the args in place,
- leaving 3 unused args at the end. */
- op->opc = opc = opc_new;
- op->args[1] = op->args[2];
- op->args[2] = op->args[4];
- /* Fall through and mark the single-word operation live. */
- nb_iargs = 2;
- nb_oargs = 1;
- }
- goto do_not_remove;
-
- case INDEX_op_mulu2_i32:
- opc_new = INDEX_op_mul_i32;
- opc_new2 = INDEX_op_muluh_i32;
- have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
- goto do_mul2;
- case INDEX_op_muls2_i32:
- opc_new = INDEX_op_mul_i32;
- opc_new2 = INDEX_op_mulsh_i32;
- have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
- goto do_mul2;
- case INDEX_op_mulu2_i64:
- opc_new = INDEX_op_mul_i64;
- opc_new2 = INDEX_op_muluh_i64;
- have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
- goto do_mul2;
- case INDEX_op_muls2_i64:
- opc_new = INDEX_op_mul_i64;
- opc_new2 = INDEX_op_mulsh_i64;
- have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
+ case INDEX_op_muls2:
+ opc_new = INDEX_op_mul;
+ opc_new2 = INDEX_op_mulsh;
goto do_mul2;
+ case INDEX_op_mulu2:
+ opc_new = INDEX_op_mul;
+ opc_new2 = INDEX_op_muluh;
do_mul2:
- nb_iargs = 2;
- nb_oargs = 2;
+ assert_carry_dead(s);
if (arg_temp(op->args[1])->state == TS_DEAD) {
if (arg_temp(op->args[0])->state == TS_DEAD) {
/* Both parts of the operation are dead. */
@@ -3806,7 +4157,8 @@ liveness_pass_1(TCGContext *s)
op->opc = opc = opc_new;
op->args[1] = op->args[2];
op->args[2] = op->args[3];
- } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
+ } else if (arg_temp(op->args[0])->state == TS_DEAD &&
+ tcg_op_supported(opc_new2, TCGOP_TYPE(op), 0)) {
/* The low part of the operation is dead; generate the high. */
op->opc = opc = opc_new2;
op->args[0] = op->args[1];
@@ -3816,19 +4168,94 @@ liveness_pass_1(TCGContext *s)
goto do_not_remove;
}
/* Mark the single-word operation live. */
- nb_oargs = 1;
goto do_not_remove;
- default:
- /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
- nb_iargs = def->nb_iargs;
- nb_oargs = def->nb_oargs;
+ case INDEX_op_addco:
+ if (s->carry_live) {
+ goto do_not_remove;
+ }
+ op->opc = opc = INDEX_op_add;
+ goto do_default;
+
+ case INDEX_op_addcio:
+ if (s->carry_live) {
+ goto do_not_remove;
+ }
+ op->opc = opc = INDEX_op_addci;
+ goto do_default;
+
+ case INDEX_op_subbo:
+ if (s->carry_live) {
+ goto do_not_remove;
+ }
+ /* Lower to sub, but this may also require canonicalization. */
+ op->opc = opc = INDEX_op_sub;
+ ts = arg_temp(op->args[2]);
+ if (ts->kind == TEMP_CONST) {
+ ts = tcg_constant_internal(ts->type, -ts->val);
+ if (ts->state_ptr == NULL) {
+ tcg_debug_assert(temp_idx(ts) == nb_temps);
+ nb_temps++;
+ ts->state_ptr = tcg_malloc(sizeof(TCGRegSet));
+ ts->state = TS_DEAD;
+ la_reset_pref(ts);
+ }
+ op->args[2] = temp_arg(ts);
+ op->opc = opc = INDEX_op_add;
+ }
+ goto do_default;
+
+ case INDEX_op_subbio:
+ if (s->carry_live) {
+ goto do_not_remove;
+ }
+ op->opc = opc = INDEX_op_subbi;
+ goto do_default;
+
+ case INDEX_op_addc1o:
+ if (s->carry_live) {
+ goto do_not_remove;
+ }
+ /* Lower to add, add +1. */
+ op_prev = tcg_op_insert_before(s, op, INDEX_op_add,
+ TCGOP_TYPE(op), 3);
+ op_prev->args[0] = op->args[0];
+ op_prev->args[1] = op->args[1];
+ op_prev->args[2] = op->args[2];
+ op->opc = opc = INDEX_op_add;
+ op->args[1] = op->args[0];
+ ts = arg_temp(op->args[0]);
+ ts = tcg_constant_internal(ts->type, 1);
+ op->args[2] = temp_arg(ts);
+ goto do_default;
+
+ case INDEX_op_subb1o:
+ if (s->carry_live) {
+ goto do_not_remove;
+ }
+ /* Lower to sub, add -1. */
+ op_prev = tcg_op_insert_before(s, op, INDEX_op_sub,
+ TCGOP_TYPE(op), 3);
+ op_prev->args[0] = op->args[0];
+ op_prev->args[1] = op->args[1];
+ op_prev->args[2] = op->args[2];
+ op->opc = opc = INDEX_op_add;
+ op->args[1] = op->args[0];
+ ts = arg_temp(op->args[0]);
+ ts = tcg_constant_internal(ts->type, -1);
+ op->args[2] = temp_arg(ts);
+ goto do_default;
- /* Test if the operation can be removed because all
- its outputs are dead. We assume that nb_oargs == 0
- implies side effects */
- if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
- for (i = 0; i < nb_oargs; i++) {
+ default:
+ do_default:
+ /*
+ * Test if the operation can be removed because all
+ * its outputs are dead. We assume that nb_oargs == 0
+ * implies side effects.
+ */
+ def = &tcg_op_defs[opc];
+ if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && def->nb_oargs != 0) {
+ for (int i = def->nb_oargs - 1; i >= 0; i--) {
if (arg_temp(op->args[i])->state != TS_DEAD) {
goto do_not_remove;
}
@@ -3842,7 +4269,11 @@ liveness_pass_1(TCGContext *s)
break;
do_not_remove:
- for (i = 0; i < nb_oargs; i++) {
+ def = &tcg_op_defs[opc];
+ nb_iargs = def->nb_iargs;
+ nb_oargs = def->nb_oargs;
+
+ for (int i = 0; i < nb_oargs; i++) {
ts = arg_temp(op->args[i]);
/* Remember the preference of the uses that followed. */
@@ -3863,12 +4294,16 @@ liveness_pass_1(TCGContext *s)
/* If end of basic block, update. */
if (def->flags & TCG_OPF_BB_EXIT) {
+ assert_carry_dead(s);
la_func_end(s, nb_globals, nb_temps);
} else if (def->flags & TCG_OPF_COND_BRANCH) {
+ assert_carry_dead(s);
la_bb_sync(s, nb_globals, nb_temps);
} else if (def->flags & TCG_OPF_BB_END) {
+ assert_carry_dead(s);
la_bb_end(s, nb_globals, nb_temps);
} else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
+ assert_carry_dead(s);
la_global_sync(s, nb_globals);
if (def->flags & TCG_OPF_CALL_CLOBBER) {
la_cross_call(s, nb_temps);
@@ -3876,15 +4311,18 @@ liveness_pass_1(TCGContext *s)
}
/* Record arguments that die in this opcode. */
- for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
+ for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
ts = arg_temp(op->args[i]);
if (ts->state & TS_DEAD) {
arg_life |= DEAD_ARG << i;
}
}
+ if (def->flags & TCG_OPF_CARRY_OUT) {
+ s->carry_live = false;
+ }
/* Input arguments are live for preceding opcodes. */
- for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
+ for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
ts = arg_temp(op->args[i]);
if (ts->state & TS_DEAD) {
/* For operands that were dead, initially allow
@@ -3893,11 +4331,13 @@ liveness_pass_1(TCGContext *s)
ts->state &= ~TS_DEAD;
}
}
+ if (def->flags & TCG_OPF_CARRY_IN) {
+ s->carry_live = true;
+ }
/* Incorporate constraints for this operand. */
switch (opc) {
- case INDEX_op_mov_i32:
- case INDEX_op_mov_i64:
+ case INDEX_op_mov:
/* Note that these are TCG_OPF_NOT_PRESENT and do not
have proper constraints. That said, special case
moves to propagate preferences backward. */
@@ -3908,8 +4348,9 @@ liveness_pass_1(TCGContext *s)
break;
default:
- for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
- const TCGArgConstraint *ct = &def->args_ct[i];
+ args_ct = opcode_args_ct(op);
+ for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
+ const TCGArgConstraint *ct = &args_ct[i];
TCGRegSet set, *pset;
ts = arg_temp(op->args[i]);
@@ -3932,6 +4373,7 @@ liveness_pass_1(TCGContext *s)
}
op->life = arg_life;
}
+ assert_carry_dead(s);
}
/* Liveness analysis: Convert indirect regs to direct temporaries. */
@@ -4002,10 +4444,8 @@ liveness_pass_2(TCGContext *s)
arg_ts = arg_temp(op->args[i]);
dir_ts = arg_ts->state_ptr;
if (dir_ts && arg_ts->state == TS_DEAD) {
- TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
- ? INDEX_op_ld_i32
- : INDEX_op_ld_i64);
- TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
+ TCGOp *lop = tcg_op_insert_before(s, op, INDEX_op_ld,
+ arg_ts->type, 3);
lop->args[0] = temp_arg(dir_ts);
lop->args[1] = temp_arg(arg_ts->mem_base);
@@ -4054,7 +4494,7 @@ liveness_pass_2(TCGContext *s)
}
/* Outputs become available. */
- if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) {
+ if (opc == INDEX_op_mov) {
arg_ts = arg_temp(op->args[0]);
dir_ts = arg_ts->state_ptr;
if (dir_ts) {
@@ -4065,10 +4505,8 @@ liveness_pass_2(TCGContext *s)
arg_ts->state = 0;
if (NEED_SYNC_ARG(0)) {
- TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
- ? INDEX_op_st_i32
- : INDEX_op_st_i64);
- TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
+ TCGOp *sop = tcg_op_insert_after(s, op, INDEX_op_st,
+ arg_ts->type, 3);
TCGTemp *out_ts = dir_ts;
if (IS_DEAD_ARG(0)) {
@@ -4101,10 +4539,8 @@ liveness_pass_2(TCGContext *s)
/* Sync outputs upon their last write. */
if (NEED_SYNC_ARG(i)) {
- TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
- ? INDEX_op_st_i32
- : INDEX_op_st_i64);
- TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
+ TCGOp *sop = tcg_op_insert_after(s, op, INDEX_op_st,
+ arg_ts->type, 3);
sop->args[0] = temp_arg(dir_ts);
sop->args[1] = temp_arg(arg_ts->mem_base);
@@ -4462,6 +4898,9 @@ static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
ts->mem_coherent = 0;
break;
case TEMP_VAL_MEM:
+ if (!ts->mem_allocated) {
+ temp_allocate_frame(s, ts);
+ }
reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
preferred_regs, ts->indirect_base);
tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
@@ -4514,9 +4953,8 @@ static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
all globals are stored at their canonical location. */
static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
{
- int i;
-
- for (i = s->nb_globals; i < s->nb_temps; i++) {
+ assert_carry_dead(s);
+ for (int i = s->nb_globals; i < s->nb_temps; i++) {
TCGTemp *ts = &s->temps[i];
switch (ts->kind) {
@@ -4547,6 +4985,7 @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
*/
static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
{
+ assert_carry_dead(s);
sync_globals(s, allocated_regs);
for (int i = s->nb_globals; i < s->nb_temps; i++) {
@@ -4696,6 +5135,7 @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
{
const TCGLifeData arg_life = op->life;
TCGRegSet dup_out_regs, dup_in_regs;
+ const TCGArgConstraint *dup_args_ct;
TCGTemp *its, *ots;
TCGType itype, vtype;
unsigned vece;
@@ -4710,11 +5150,11 @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
itype = its->type;
vece = TCGOP_VECE(op);
- vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
+ vtype = TCGOP_TYPE(op);
if (its->val_type == TEMP_VAL_CONST) {
/* Propagate constant via movi -> dupi. */
- tcg_target_ulong val = its->val;
+ tcg_target_ulong val = dup_const(vece, its->val);
if (IS_DEAD_ARG(1)) {
temp_dead(s, its);
}
@@ -4722,8 +5162,9 @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
return;
}
- dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
- dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs;
+ dup_args_ct = opcode_args_ct(op);
+ dup_out_regs = dup_args_ct[0].regs;
+ dup_in_regs = dup_args_ct[1].regs;
/* Allocate the output register now. */
if (ots->val_type != TEMP_VAL_REG) {
@@ -4809,12 +5250,17 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
int i, k, nb_iargs, nb_oargs;
TCGReg reg;
TCGArg arg;
+ const TCGArgConstraint *args_ct;
const TCGArgConstraint *arg_ct;
TCGTemp *ts;
TCGArg new_args[TCG_MAX_OP_ARGS];
int const_args[TCG_MAX_OP_ARGS];
TCGCond op_cond;
+ if (def->flags & TCG_OPF_CARRY_IN) {
+ tcg_debug_assert(s->carry_live);
+ }
+
nb_oargs = def->nb_oargs;
nb_iargs = def->nb_iargs;
@@ -4827,22 +5273,18 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
o_allocated_regs = s->reserved_regs;
switch (op->opc) {
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
+ case INDEX_op_brcond:
op_cond = op->args[2];
break;
- case INDEX_op_setcond_i32:
- case INDEX_op_setcond_i64:
- case INDEX_op_negsetcond_i32:
- case INDEX_op_negsetcond_i64:
+ case INDEX_op_setcond:
+ case INDEX_op_negsetcond:
case INDEX_op_cmp_vec:
op_cond = op->args[3];
break;
case INDEX_op_brcond2_i32:
op_cond = op->args[4];
break;
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
+ case INDEX_op_movcond:
case INDEX_op_setcond2_i32:
case INDEX_op_cmpsel_vec:
op_cond = op->args[5];
@@ -4853,6 +5295,8 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
break;
}
+ args_ct = opcode_args_ct(op);
+
/* satisfy input constraints */
for (k = 0; k < nb_iargs; k++) {
TCGRegSet i_preferred_regs, i_required_regs;
@@ -4860,18 +5304,28 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
TCGTemp *ts2;
int i1, i2;
- i = def->args_ct[nb_oargs + k].sort_index;
+ i = args_ct[nb_oargs + k].sort_index;
arg = op->args[i];
- arg_ct = &def->args_ct[i];
+ arg_ct = &args_ct[i];
ts = arg_temp(arg);
- if (ts->val_type == TEMP_VAL_CONST
- && tcg_target_const_match(ts->val, arg_ct->ct, ts->type,
- op_cond, TCGOP_VECE(op))) {
- /* constant is OK for instruction */
- const_args[i] = 1;
- new_args[i] = ts->val;
- continue;
+ if (ts->val_type == TEMP_VAL_CONST) {
+#ifdef TCG_REG_ZERO
+ if (ts->val == 0 && (arg_ct->ct & TCG_CT_REG_ZERO)) {
+ /* Hardware zero register: indicate register via non-const. */
+ const_args[i] = 0;
+ new_args[i] = TCG_REG_ZERO;
+ continue;
+ }
+#endif
+
+ if (tcg_target_const_match(ts->val, arg_ct->ct, ts->type,
+ op_cond, TCGOP_VECE(op))) {
+ /* constant is OK for instruction */
+ const_args[i] = 1;
+ new_args[i] = ts->val;
+ continue;
+ }
}
reg = ts->reg;
@@ -4892,7 +5346,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
* register and move it.
*/
if (temp_readonly(ts) || !IS_DEAD_ARG(i)
- || def->args_ct[arg_ct->alias_index].newreg) {
+ || args_ct[arg_ct->alias_index].newreg) {
allocate_new_reg = true;
} else if (ts->val_type == TEMP_VAL_REG) {
/*
@@ -5063,6 +5517,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
tcg_reg_alloc_bb_end(s, i_allocated_regs);
} else {
if (def->flags & TCG_OPF_CALL_CLOBBER) {
+ assert_carry_dead(s);
/* XXX: permit generic clobber register list ? */
for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
@@ -5077,10 +5532,10 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
}
/* satisfy the output constraints */
- for(k = 0; k < nb_oargs; k++) {
- i = def->args_ct[k].sort_index;
+ for (k = 0; k < nb_oargs; k++) {
+ i = args_ct[k].sort_index;
arg = op->args[i];
- arg_ct = &def->args_ct[i];
+ arg_ct = &args_ct[i];
ts = arg_temp(arg);
/* ENV should not be modified. */
@@ -5139,50 +5594,345 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
}
/* emit instruction */
+ TCGType type = TCGOP_TYPE(op);
switch (op->opc) {
- case INDEX_op_ext8s_i32:
- tcg_out_ext8s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
+ case INDEX_op_addc1o:
+ tcg_out_set_carry(s);
+ /* fall through */
+ case INDEX_op_add:
+ case INDEX_op_addcio:
+ case INDEX_op_addco:
+ case INDEX_op_and:
+ case INDEX_op_andc:
+ case INDEX_op_clz:
+ case INDEX_op_ctz:
+ case INDEX_op_divs:
+ case INDEX_op_divu:
+ case INDEX_op_eqv:
+ case INDEX_op_mul:
+ case INDEX_op_mulsh:
+ case INDEX_op_muluh:
+ case INDEX_op_nand:
+ case INDEX_op_nor:
+ case INDEX_op_or:
+ case INDEX_op_orc:
+ case INDEX_op_rems:
+ case INDEX_op_remu:
+ case INDEX_op_rotl:
+ case INDEX_op_rotr:
+ case INDEX_op_sar:
+ case INDEX_op_shl:
+ case INDEX_op_shr:
+ case INDEX_op_xor:
+ {
+ const TCGOutOpBinary *out =
+ container_of(all_outop[op->opc], TCGOutOpBinary, base);
+
+ /* Constants should never appear in the first source operand. */
+ tcg_debug_assert(!const_args[1]);
+ if (const_args[2]) {
+ out->out_rri(s, type, new_args[0], new_args[1], new_args[2]);
+ } else {
+ out->out_rrr(s, type, new_args[0], new_args[1], new_args[2]);
+ }
+ }
+ break;
+
+ case INDEX_op_sub:
+ {
+ const TCGOutOpSubtract *out = &outop_sub;
+
+ /*
+ * Constants should never appear in the second source operand.
+ * These are folded to add with negative constant.
+ */
+ tcg_debug_assert(!const_args[2]);
+ if (const_args[1]) {
+ out->out_rir(s, type, new_args[0], new_args[1], new_args[2]);
+ } else {
+ out->out_rrr(s, type, new_args[0], new_args[1], new_args[2]);
+ }
+ }
+ break;
+
+ case INDEX_op_subb1o:
+ tcg_out_set_borrow(s);
+ /* fall through */
+ case INDEX_op_addci:
+ case INDEX_op_subbi:
+ case INDEX_op_subbio:
+ case INDEX_op_subbo:
+ {
+ const TCGOutOpAddSubCarry *out =
+ container_of(all_outop[op->opc], TCGOutOpAddSubCarry, base);
+
+ if (const_args[2]) {
+ if (const_args[1]) {
+ out->out_rii(s, type, new_args[0],
+ new_args[1], new_args[2]);
+ } else {
+ out->out_rri(s, type, new_args[0],
+ new_args[1], new_args[2]);
+ }
+ } else if (const_args[1]) {
+ out->out_rir(s, type, new_args[0], new_args[1], new_args[2]);
+ } else {
+ out->out_rrr(s, type, new_args[0], new_args[1], new_args[2]);
+ }
+ }
+ break;
+
+ case INDEX_op_bswap64:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_extrl_i64_i32:
+ case INDEX_op_extrh_i64_i32:
+ assert(TCG_TARGET_REG_BITS == 64);
+ /* fall through */
+ case INDEX_op_ctpop:
+ case INDEX_op_neg:
+ case INDEX_op_not:
+ {
+ const TCGOutOpUnary *out =
+ container_of(all_outop[op->opc], TCGOutOpUnary, base);
+
+ /* Constants should have been folded. */
+ tcg_debug_assert(!const_args[1]);
+ out->out_rr(s, type, new_args[0], new_args[1]);
+ }
+ break;
+
+ case INDEX_op_bswap16:
+ case INDEX_op_bswap32:
+ {
+ const TCGOutOpBswap *out =
+ container_of(all_outop[op->opc], TCGOutOpBswap, base);
+
+ tcg_debug_assert(!const_args[1]);
+ out->out_rr(s, type, new_args[0], new_args[1], new_args[2]);
+ }
break;
- case INDEX_op_ext8s_i64:
- tcg_out_ext8s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
+
+ case INDEX_op_deposit:
+ {
+ const TCGOutOpDeposit *out = &outop_deposit;
+
+ if (const_args[2]) {
+ tcg_debug_assert(!const_args[1]);
+ out->out_rri(s, type, new_args[0], new_args[1],
+ new_args[2], new_args[3], new_args[4]);
+ } else if (const_args[1]) {
+ tcg_debug_assert(new_args[1] == 0);
+ tcg_debug_assert(!const_args[2]);
+ out->out_rzr(s, type, new_args[0], new_args[2],
+ new_args[3], new_args[4]);
+ } else {
+ out->out_rrr(s, type, new_args[0], new_args[1],
+ new_args[2], new_args[3], new_args[4]);
+ }
+ }
break;
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- tcg_out_ext8u(s, new_args[0], new_args[1]);
+
+ case INDEX_op_divs2:
+ case INDEX_op_divu2:
+ {
+ const TCGOutOpDivRem *out =
+ container_of(all_outop[op->opc], TCGOutOpDivRem, base);
+
+ /* Only used by x86 and s390x, which use matching constraints. */
+ tcg_debug_assert(new_args[0] == new_args[2]);
+ tcg_debug_assert(new_args[1] == new_args[3]);
+ tcg_debug_assert(!const_args[4]);
+ out->out_rr01r(s, type, new_args[0], new_args[1], new_args[4]);
+ }
break;
- case INDEX_op_ext16s_i32:
- tcg_out_ext16s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
+
+ case INDEX_op_extract:
+ case INDEX_op_sextract:
+ {
+ const TCGOutOpExtract *out =
+ container_of(all_outop[op->opc], TCGOutOpExtract, base);
+
+ tcg_debug_assert(!const_args[1]);
+ out->out_rr(s, type, new_args[0], new_args[1],
+ new_args[2], new_args[3]);
+ }
break;
- case INDEX_op_ext16s_i64:
- tcg_out_ext16s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
+
+ case INDEX_op_extract2:
+ {
+ const TCGOutOpExtract2 *out = &outop_extract2;
+
+ tcg_debug_assert(!const_args[1]);
+ tcg_debug_assert(!const_args[2]);
+ out->out_rrr(s, type, new_args[0], new_args[1],
+ new_args[2], new_args[3]);
+ }
break;
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- tcg_out_ext16u(s, new_args[0], new_args[1]);
+
+ case INDEX_op_ld8u:
+ case INDEX_op_ld8s:
+ case INDEX_op_ld16u:
+ case INDEX_op_ld16s:
+ case INDEX_op_ld32u:
+ case INDEX_op_ld32s:
+ case INDEX_op_ld:
+ {
+ const TCGOutOpLoad *out =
+ container_of(all_outop[op->opc], TCGOutOpLoad, base);
+
+ tcg_debug_assert(!const_args[1]);
+ out->out(s, type, new_args[0], new_args[1], new_args[2]);
+ }
break;
- case INDEX_op_ext32s_i64:
- tcg_out_ext32s(s, new_args[0], new_args[1]);
+
+ case INDEX_op_muls2:
+ case INDEX_op_mulu2:
+ {
+ const TCGOutOpMul2 *out =
+ container_of(all_outop[op->opc], TCGOutOpMul2, base);
+
+ tcg_debug_assert(!const_args[2]);
+ tcg_debug_assert(!const_args[3]);
+ out->out_rrrr(s, type, new_args[0], new_args[1],
+ new_args[2], new_args[3]);
+ }
break;
- case INDEX_op_ext32u_i64:
- tcg_out_ext32u(s, new_args[0], new_args[1]);
+
+ case INDEX_op_st32:
+ /* Use tcg_op_st w/ I32. */
+ type = TCG_TYPE_I32;
+ /* fall through */
+ case INDEX_op_st:
+ case INDEX_op_st8:
+ case INDEX_op_st16:
+ {
+ const TCGOutOpStore *out =
+ container_of(all_outop[op->opc], TCGOutOpStore, base);
+
+ if (const_args[0]) {
+ out->out_i(s, type, new_args[0], new_args[1], new_args[2]);
+ } else {
+ out->out_r(s, type, new_args[0], new_args[1], new_args[2]);
+ }
+ }
break;
- case INDEX_op_ext_i32_i64:
- tcg_out_exts_i32_i64(s, new_args[0], new_args[1]);
+
+ case INDEX_op_qemu_ld:
+ case INDEX_op_qemu_st:
+ {
+ const TCGOutOpQemuLdSt *out =
+ container_of(all_outop[op->opc], TCGOutOpQemuLdSt, base);
+
+ out->out(s, type, new_args[0], new_args[1], new_args[2]);
+ }
break;
- case INDEX_op_extu_i32_i64:
- tcg_out_extu_i32_i64(s, new_args[0], new_args[1]);
+
+ case INDEX_op_qemu_ld2:
+ case INDEX_op_qemu_st2:
+ {
+ const TCGOutOpQemuLdSt2 *out =
+ container_of(all_outop[op->opc], TCGOutOpQemuLdSt2, base);
+
+ out->out(s, type, new_args[0], new_args[1],
+ new_args[2], new_args[3]);
+ }
break;
- case INDEX_op_extrl_i64_i32:
- tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]);
+
+ case INDEX_op_brcond:
+ {
+ const TCGOutOpBrcond *out = &outop_brcond;
+ TCGCond cond = new_args[2];
+ TCGLabel *label = arg_label(new_args[3]);
+
+ tcg_debug_assert(!const_args[0]);
+ if (const_args[1]) {
+ out->out_ri(s, type, cond, new_args[0], new_args[1], label);
+ } else {
+ out->out_rr(s, type, cond, new_args[0], new_args[1], label);
+ }
+ }
break;
- default:
- if (def->flags & TCG_OPF_VECTOR) {
- tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
- new_args, const_args);
- } else {
- tcg_out_op(s, op->opc, new_args, const_args);
+
+ case INDEX_op_movcond:
+ {
+ const TCGOutOpMovcond *out = &outop_movcond;
+ TCGCond cond = new_args[5];
+
+ tcg_debug_assert(!const_args[1]);
+ out->out(s, type, cond, new_args[0],
+ new_args[1], new_args[2], const_args[2],
+ new_args[3], const_args[3],
+ new_args[4], const_args[4]);
+ }
+ break;
+
+ case INDEX_op_setcond:
+ case INDEX_op_negsetcond:
+ {
+ const TCGOutOpSetcond *out =
+ container_of(all_outop[op->opc], TCGOutOpSetcond, base);
+ TCGCond cond = new_args[3];
+
+ tcg_debug_assert(!const_args[1]);
+ if (const_args[2]) {
+ out->out_rri(s, type, cond,
+ new_args[0], new_args[1], new_args[2]);
+ } else {
+ out->out_rrr(s, type, cond,
+ new_args[0], new_args[1], new_args[2]);
+ }
+ }
+ break;
+
+#if TCG_TARGET_REG_BITS == 32
+ case INDEX_op_brcond2_i32:
+ {
+ const TCGOutOpBrcond2 *out = &outop_brcond2;
+ TCGCond cond = new_args[4];
+ TCGLabel *label = arg_label(new_args[5]);
+
+ tcg_debug_assert(!const_args[0]);
+ tcg_debug_assert(!const_args[1]);
+ out->out(s, cond, new_args[0], new_args[1],
+ new_args[2], const_args[2],
+ new_args[3], const_args[3], label);
}
break;
+ case INDEX_op_setcond2_i32:
+ {
+ const TCGOutOpSetcond2 *out = &outop_setcond2;
+ TCGCond cond = new_args[5];
+
+ tcg_debug_assert(!const_args[1]);
+ tcg_debug_assert(!const_args[2]);
+ out->out(s, cond, new_args[0], new_args[1], new_args[2],
+ new_args[3], const_args[3], new_args[4], const_args[4]);
+ }
+ break;
+#else
+ case INDEX_op_brcond2_i32:
+ case INDEX_op_setcond2_i32:
+ g_assert_not_reached();
+#endif
+
+ case INDEX_op_goto_ptr:
+ tcg_debug_assert(!const_args[0]);
+ tcg_out_goto_ptr(s, new_args[0]);
+ break;
+
+ default:
+ tcg_debug_assert(def->flags & TCG_OPF_VECTOR);
+ tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64,
+ TCGOP_VECE(op), new_args, const_args);
+ break;
+ }
+
+ if (def->flags & TCG_OPF_CARRY_IN) {
+ s->carry_live = false;
+ }
+ if (def->flags & TCG_OPF_CARRY_OUT) {
+ s->carry_live = true;
}
/* move the outputs in the correct register if needed */
@@ -5204,7 +5954,7 @@ static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
{
const TCGLifeData arg_life = op->life;
TCGTemp *ots, *itsl, *itsh;
- TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
+ TCGType vtype = TCGOP_TYPE(op);
/* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
@@ -5220,8 +5970,7 @@ static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
/* Allocate the output register now. */
if (ots->val_type != TEMP_VAL_REG) {
TCGRegSet allocated_regs = s->reserved_regs;
- TCGRegSet dup_out_regs =
- tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
+ TCGRegSet dup_out_regs = opcode_args_ct(op)[0].regs;
TCGReg oreg;
/* Make sure to not spill the input registers. */
@@ -5506,7 +6255,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
MemOp host_atom, bool allow_two_ops)
{
- MemOp align = get_alignment_bits(opc);
+ MemOp align = memop_alignment_bits(opc);
MemOp size = opc & MO_SIZE;
MemOp half = size ? size - 1 : 0;
MemOp atom = opc & MO_ATOM_MASK;
@@ -5852,7 +6601,7 @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
*/
tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
TCG_TYPE_I32, TCG_TYPE_I32,
- ldst->addrlo_reg, -1);
+ ldst->addr_reg, -1);
tcg_out_helper_load_slots(s, 1, mov, parm);
tcg_out_helper_load_imm(s, loc[!HOST_BIG_ENDIAN].arg_slot,
@@ -5860,7 +6609,7 @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
next_arg += 2;
} else {
nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
- ldst->addrlo_reg, ldst->addrhi_reg);
+ ldst->addr_reg, -1);
tcg_out_helper_load_slots(s, nmov, mov, parm);
next_arg += nmov;
}
@@ -6017,21 +6766,22 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
/* Handle addr argument. */
loc = &info->in[next_arg];
- if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
+ tcg_debug_assert(s->addr_type <= TCG_TYPE_REG);
+ if (TCG_TARGET_REG_BITS == 32) {
/*
- * 32-bit host with 32-bit guest: zero-extend the guest address
+ * 32-bit host (and thus 32-bit guest): zero-extend the guest address
* to 64-bits for the helper by storing the low part. Later,
* after we have processed the register inputs, we will load a
* zero for the high part.
*/
tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
TCG_TYPE_I32, TCG_TYPE_I32,
- ldst->addrlo_reg, -1);
+ ldst->addr_reg, -1);
next_arg += 2;
nmov += 1;
} else {
n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
- ldst->addrlo_reg, ldst->addrhi_reg);
+ ldst->addr_reg, -1);
next_arg += n;
nmov += n;
}
@@ -6079,7 +6829,7 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
g_assert_not_reached();
}
- if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
+ if (TCG_TARGET_REG_BITS == 32) {
/* Zero extend the address by loading a zero for the high part. */
loc = &info->in[1 + !HOST_BIG_ENDIAN];
tcg_out_helper_load_imm(s, loc->arg_slot, TCG_TYPE_I32, 0, parm);
@@ -6090,7 +6840,7 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
{
- int i, start_words, num_insns;
+ int i, num_insns;
TCGOp *op;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
@@ -6121,6 +6871,9 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
}
#endif
+ /* Do not reuse any EBB that may be allocated within the TB. */
+ tcg_temp_ebb_reset_freed(s);
+
tcg_optimize(s);
reachable_code_pass(s);
@@ -6172,27 +6925,39 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
*/
s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr);
s->code_ptr = s->code_buf;
+ s->data_gen_ptr = NULL;
-#ifdef TCG_TARGET_NEED_LDST_LABELS
QSIMPLEQ_INIT(&s->ldst_labels);
-#endif
-#ifdef TCG_TARGET_NEED_POOL_LABELS
s->pool_labels = NULL;
-#endif
- start_words = s->insn_start_words;
s->gen_insn_data =
- tcg_malloc(sizeof(uint64_t) * s->gen_tb->icount * start_words);
+ tcg_malloc(sizeof(uint64_t) * s->gen_tb->icount * INSN_START_WORDS);
tcg_out_tb_start(s);
num_insns = -1;
+ s->carry_live = false;
QTAILQ_FOREACH(op, &s->ops, link) {
TCGOpcode opc = op->opc;
switch (opc) {
- case INDEX_op_mov_i32:
- case INDEX_op_mov_i64:
+ case INDEX_op_extrl_i64_i32:
+ assert(TCG_TARGET_REG_BITS == 64);
+ /*
+ * If TCG_TYPE_I32 is represented in some canonical form,
+ * e.g. zero or sign-extended, then emit as a unary op.
+ * Otherwise we can treat this as a plain move.
+ * If the output dies, treat this as a plain move, because
+ * this will be implemented with a store.
+ */
+ if (TCG_TARGET_HAS_extr_i64_i32) {
+ TCGLifeData arg_life = op->life;
+ if (!IS_DEAD_ARG(0)) {
+ goto do_default;
+ }
+ }
+ /* fall through */
+ case INDEX_op_mov:
case INDEX_op_mov_vec:
tcg_reg_alloc_mov(s, op);
break;
@@ -6200,6 +6965,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
tcg_reg_alloc_dup(s, op);
break;
case INDEX_op_insn_start:
+ assert_carry_dead(s);
if (num_insns >= 0) {
size_t off = tcg_current_code_size(s);
s->gen_insn_end_off[num_insns] = off;
@@ -6207,8 +6973,8 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
assert(s->gen_insn_end_off[num_insns] == off);
}
num_insns++;
- for (i = 0; i < start_words; ++i) {
- s->gen_insn_data[num_insns * start_words + i] =
+ for (i = 0; i < INSN_START_WORDS; ++i) {
+ s->gen_insn_data[num_insns * INSN_START_WORDS + i] =
tcg_get_insn_start_param(op, i);
}
break;
@@ -6220,6 +6986,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
tcg_out_label(s, arg_label(op->args[0]));
break;
case INDEX_op_call:
+ assert_carry_dead(s);
tcg_reg_alloc_call(s, op);
break;
case INDEX_op_exit_tb:
@@ -6228,14 +6995,22 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
case INDEX_op_goto_tb:
tcg_out_goto_tb(s, op->args[0]);
break;
+ case INDEX_op_br:
+ tcg_out_br(s, arg_label(op->args[0]));
+ break;
+ case INDEX_op_mb:
+ tcg_out_mb(s, op->args[0]);
+ break;
case INDEX_op_dup2_vec:
if (tcg_reg_alloc_dup2(s, op)) {
break;
}
/* fall through */
default:
+ do_default:
/* Sanity check that we've not introduced any unhandled opcodes. */
- tcg_debug_assert(tcg_op_supported(opc));
+ tcg_debug_assert(tcg_op_supported(opc, TCGOP_TYPE(op),
+ TCGOP_FLAGS(op)));
/* Note: in order to speed up the code, it would be much
faster to have specialized register allocator functions for
some common argument patterns */
@@ -6254,22 +7029,20 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
return -2;
}
}
+ assert_carry_dead(s);
+
tcg_debug_assert(num_insns + 1 == s->gen_tb->icount);
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
/* Generate TB finalization at the end of block */
-#ifdef TCG_TARGET_NEED_LDST_LABELS
i = tcg_out_ldst_finalize(s);
if (i < 0) {
return i;
}
-#endif
-#ifdef TCG_TARGET_NEED_POOL_LABELS
i = tcg_out_pool_finalize(s);
if (i < 0) {
return i;
}
-#endif
if (!tcg_resolve_relocs(s)) {
return -2;
}
diff --git a/tcg/tci.c b/tcg/tci.c
index 3afb223..700e672 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -21,9 +21,16 @@
#include "tcg/tcg.h"
#include "tcg/helper-info.h"
#include "tcg/tcg-ldst.h"
+#include "disas/dis-asm.h"
+#include "tcg-has.h"
#include <ffi.h>
+#define ctpop_tr glue(ctpop, TCG_TARGET_REG_BITS)
+#define deposit_tr glue(deposit, TCG_TARGET_REG_BITS)
+#define extract_tr glue(extract, TCG_TARGET_REG_BITS)
+#define sextract_tr glue(sextract, TCG_TARGET_REG_BITS)
+
/*
* Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
* Without assertions, the interpreter runs much faster.
@@ -152,16 +159,6 @@ static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
*i4 = extract32(insn, 26, 6);
}
-static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
- TCGReg *r2, TCGReg *r3, TCGReg *r4)
-{
- *r0 = extract32(insn, 8, 4);
- *r1 = extract32(insn, 12, 4);
- *r2 = extract32(insn, 16, 4);
- *r3 = extract32(insn, 20, 4);
- *r4 = extract32(insn, 24, 4);
-}
-
static void tci_args_rrrr(uint32_t insn,
TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
{
@@ -182,17 +179,6 @@ static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
*c5 = extract32(insn, 28, 4);
}
-static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
- TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
-{
- *r0 = extract32(insn, 8, 4);
- *r1 = extract32(insn, 12, 4);
- *r2 = extract32(insn, 16, 4);
- *r3 = extract32(insn, 20, 4);
- *r4 = extract32(insn, 24, 4);
- *r5 = extract32(insn, 28, 4);
-}
-
static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
{
bool result = false;
@@ -339,18 +325,6 @@ static void tci_qemu_st(CPUArchState *env, uint64_t taddr, uint64_t val,
}
}
-#if TCG_TARGET_REG_BITS == 64
-# define CASE_32_64(x) \
- case glue(glue(INDEX_op_, x), _i64): \
- case glue(glue(INDEX_op_, x), _i32):
-# define CASE_64(x) \
- case glue(glue(INDEX_op_, x), _i64):
-#else
-# define CASE_32_64(x) \
- case glue(glue(INDEX_op_, x), _i32):
-# define CASE_64(x)
-#endif
-
/* Interpret pseudo code in tb. */
/*
* Disable CFI checks.
@@ -364,6 +338,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tcg_target_ulong regs[TCG_TARGET_NB_REGS];
uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
/ sizeof(uint64_t)];
+ bool carry = false;
regs[TCG_AREG0] = (tcg_target_ulong)env;
regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
@@ -372,13 +347,12 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
for (;;) {
uint32_t insn;
TCGOpcode opc;
- TCGReg r0, r1, r2, r3, r4, r5;
+ TCGReg r0, r1, r2, r3, r4;
tcg_target_ulong t1;
TCGCond condition;
uint8_t pos, len;
uint32_t tmp32;
uint64_t tmp64, taddr;
- uint64_t T1, T2;
MemOpIdx oi;
int32_t ofs;
void *ptr;
@@ -444,34 +418,25 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_args_l(insn, tb_ptr, &ptr);
tb_ptr = ptr;
continue;
- case INDEX_op_setcond_i32:
- tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
- regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
- break;
- case INDEX_op_movcond_i32:
- tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
- tmp32 = tci_compare32(regs[r1], regs[r2], condition);
- regs[r0] = regs[tmp32 ? r3 : r4];
- break;
#if TCG_TARGET_REG_BITS == 32
case INDEX_op_setcond2_i32:
tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
- T1 = tci_uint64(regs[r2], regs[r1]);
- T2 = tci_uint64(regs[r4], regs[r3]);
- regs[r0] = tci_compare64(T1, T2, condition);
+ regs[r0] = tci_compare64(tci_uint64(regs[r2], regs[r1]),
+ tci_uint64(regs[r4], regs[r3]),
+ condition);
break;
#elif TCG_TARGET_REG_BITS == 64
- case INDEX_op_setcond_i64:
+ case INDEX_op_setcond:
tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
break;
- case INDEX_op_movcond_i64:
+ case INDEX_op_movcond:
tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
tmp32 = tci_compare64(regs[r1], regs[r2], condition);
regs[r0] = regs[tmp32 ? r3 : r4];
break;
#endif
- CASE_32_64(mov)
+ case INDEX_op_mov:
tci_args_rr(insn, &r0, &r1);
regs[r0] = regs[r1];
break;
@@ -483,423 +448,325 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_args_rl(insn, tb_ptr, &r0, &ptr);
regs[r0] = *(tcg_target_ulong *)ptr;
break;
+ case INDEX_op_tci_setcarry:
+ carry = true;
+ break;
/* Load/store operations (32 bit). */
- CASE_32_64(ld8u)
+ case INDEX_op_ld8u:
tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
regs[r0] = *(uint8_t *)ptr;
break;
- CASE_32_64(ld8s)
+ case INDEX_op_ld8s:
tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
regs[r0] = *(int8_t *)ptr;
break;
- CASE_32_64(ld16u)
+ case INDEX_op_ld16u:
tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
regs[r0] = *(uint16_t *)ptr;
break;
- CASE_32_64(ld16s)
+ case INDEX_op_ld16s:
tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
regs[r0] = *(int16_t *)ptr;
break;
- case INDEX_op_ld_i32:
- CASE_64(ld32u)
+ case INDEX_op_ld:
tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
- regs[r0] = *(uint32_t *)ptr;
+ regs[r0] = *(tcg_target_ulong *)ptr;
break;
- CASE_32_64(st8)
+ case INDEX_op_st8:
tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
*(uint8_t *)ptr = regs[r0];
break;
- CASE_32_64(st16)
+ case INDEX_op_st16:
tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
*(uint16_t *)ptr = regs[r0];
break;
- case INDEX_op_st_i32:
- CASE_64(st32)
+ case INDEX_op_st:
tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
- *(uint32_t *)ptr = regs[r0];
+ *(tcg_target_ulong *)ptr = regs[r0];
break;
/* Arithmetic operations (mixed 32/64 bit). */
- CASE_32_64(add)
+ case INDEX_op_add:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] + regs[r2];
break;
- CASE_32_64(sub)
+ case INDEX_op_sub:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] - regs[r2];
break;
- CASE_32_64(mul)
+ case INDEX_op_mul:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] * regs[r2];
break;
- CASE_32_64(and)
+ case INDEX_op_and:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] & regs[r2];
break;
- CASE_32_64(or)
+ case INDEX_op_or:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] | regs[r2];
break;
- CASE_32_64(xor)
+ case INDEX_op_xor:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] ^ regs[r2];
break;
-#if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
- CASE_32_64(andc)
+ case INDEX_op_andc:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] & ~regs[r2];
break;
-#endif
-#if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
- CASE_32_64(orc)
+ case INDEX_op_orc:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] | ~regs[r2];
break;
-#endif
-#if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
- CASE_32_64(eqv)
+ case INDEX_op_eqv:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = ~(regs[r1] ^ regs[r2]);
break;
-#endif
-#if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
- CASE_32_64(nand)
+ case INDEX_op_nand:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = ~(regs[r1] & regs[r2]);
break;
-#endif
-#if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
- CASE_32_64(nor)
+ case INDEX_op_nor:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = ~(regs[r1] | regs[r2]);
break;
+ case INDEX_op_neg:
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = -regs[r1];
+ break;
+ case INDEX_op_not:
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = ~regs[r1];
+ break;
+ case INDEX_op_ctpop:
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = ctpop_tr(regs[r1]);
+ break;
+ case INDEX_op_addco:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ t1 = regs[r1] + regs[r2];
+ carry = t1 < regs[r1];
+ regs[r0] = t1;
+ break;
+ case INDEX_op_addci:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] + regs[r2] + carry;
+ break;
+ case INDEX_op_addcio:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ if (carry) {
+ t1 = regs[r1] + regs[r2] + 1;
+ carry = t1 <= regs[r1];
+ } else {
+ t1 = regs[r1] + regs[r2];
+ carry = t1 < regs[r1];
+ }
+ regs[r0] = t1;
+ break;
+ case INDEX_op_subbo:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ carry = regs[r1] < regs[r2];
+ regs[r0] = regs[r1] - regs[r2];
+ break;
+ case INDEX_op_subbi:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] - regs[r2] - carry;
+ break;
+ case INDEX_op_subbio:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ if (carry) {
+ carry = regs[r1] <= regs[r2];
+ regs[r0] = regs[r1] - regs[r2] - 1;
+ } else {
+ carry = regs[r1] < regs[r2];
+ regs[r0] = regs[r1] - regs[r2];
+ }
+ break;
+ case INDEX_op_muls2:
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+#if TCG_TARGET_REG_BITS == 32
+ tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
+ tci_write_reg64(regs, r1, r0, tmp64);
+#else
+ muls64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
#endif
+ break;
+ case INDEX_op_mulu2:
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+#if TCG_TARGET_REG_BITS == 32
+ tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
+ tci_write_reg64(regs, r1, r0, tmp64);
+#else
+ mulu64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
+#endif
+ break;
/* Arithmetic operations (32 bit). */
- case INDEX_op_div_i32:
+ case INDEX_op_tci_divs32:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
break;
- case INDEX_op_divu_i32:
+ case INDEX_op_tci_divu32:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
break;
- case INDEX_op_rem_i32:
+ case INDEX_op_tci_rems32:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
break;
- case INDEX_op_remu_i32:
+ case INDEX_op_tci_remu32:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
break;
-#if TCG_TARGET_HAS_clz_i32
- case INDEX_op_clz_i32:
+ case INDEX_op_tci_clz32:
tci_args_rrr(insn, &r0, &r1, &r2);
tmp32 = regs[r1];
regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
break;
-#endif
-#if TCG_TARGET_HAS_ctz_i32
- case INDEX_op_ctz_i32:
+ case INDEX_op_tci_ctz32:
tci_args_rrr(insn, &r0, &r1, &r2);
tmp32 = regs[r1];
regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
break;
-#endif
-#if TCG_TARGET_HAS_ctpop_i32
- case INDEX_op_ctpop_i32:
- tci_args_rr(insn, &r0, &r1);
- regs[r0] = ctpop32(regs[r1]);
+ case INDEX_op_tci_setcond32:
+ tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
+ regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
+ break;
+ case INDEX_op_tci_movcond32:
+ tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
+ tmp32 = tci_compare32(regs[r1], regs[r2], condition);
+ regs[r0] = regs[tmp32 ? r3 : r4];
break;
-#endif
- /* Shift/rotate operations (32 bit). */
+ /* Shift/rotate operations. */
- case INDEX_op_shl_i32:
+ case INDEX_op_shl:
tci_args_rrr(insn, &r0, &r1, &r2);
- regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
+ regs[r0] = regs[r1] << (regs[r2] % TCG_TARGET_REG_BITS);
break;
- case INDEX_op_shr_i32:
+ case INDEX_op_shr:
tci_args_rrr(insn, &r0, &r1, &r2);
- regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
+ regs[r0] = regs[r1] >> (regs[r2] % TCG_TARGET_REG_BITS);
break;
- case INDEX_op_sar_i32:
+ case INDEX_op_sar:
tci_args_rrr(insn, &r0, &r1, &r2);
- regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
+ regs[r0] = ((tcg_target_long)regs[r1]
+ >> (regs[r2] % TCG_TARGET_REG_BITS));
break;
-#if TCG_TARGET_HAS_rot_i32
- case INDEX_op_rotl_i32:
+ case INDEX_op_tci_rotl32:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = rol32(regs[r1], regs[r2] & 31);
break;
- case INDEX_op_rotr_i32:
+ case INDEX_op_tci_rotr32:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = ror32(regs[r1], regs[r2] & 31);
break;
-#endif
-#if TCG_TARGET_HAS_deposit_i32
- case INDEX_op_deposit_i32:
+ case INDEX_op_deposit:
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
- regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
+ regs[r0] = deposit_tr(regs[r1], pos, len, regs[r2]);
break;
-#endif
-#if TCG_TARGET_HAS_extract_i32
- case INDEX_op_extract_i32:
+ case INDEX_op_extract:
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
- regs[r0] = extract32(regs[r1], pos, len);
+ regs[r0] = extract_tr(regs[r1], pos, len);
break;
-#endif
-#if TCG_TARGET_HAS_sextract_i32
- case INDEX_op_sextract_i32:
+ case INDEX_op_sextract:
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
- regs[r0] = sextract32(regs[r1], pos, len);
+ regs[r0] = sextract_tr(regs[r1], pos, len);
break;
-#endif
- case INDEX_op_brcond_i32:
+ case INDEX_op_brcond:
tci_args_rl(insn, tb_ptr, &r0, &ptr);
- if ((uint32_t)regs[r0]) {
+ if (regs[r0]) {
tb_ptr = ptr;
}
break;
-#if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
- case INDEX_op_add2_i32:
- tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
- T1 = tci_uint64(regs[r3], regs[r2]);
- T2 = tci_uint64(regs[r5], regs[r4]);
- tci_write_reg64(regs, r1, r0, T1 + T2);
- break;
-#endif
-#if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
- case INDEX_op_sub2_i32:
- tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
- T1 = tci_uint64(regs[r3], regs[r2]);
- T2 = tci_uint64(regs[r5], regs[r4]);
- tci_write_reg64(regs, r1, r0, T1 - T2);
- break;
-#endif
-#if TCG_TARGET_HAS_mulu2_i32
- case INDEX_op_mulu2_i32:
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
- tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
- tci_write_reg64(regs, r1, r0, tmp64);
- break;
-#endif
-#if TCG_TARGET_HAS_muls2_i32
- case INDEX_op_muls2_i32:
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
- tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
- tci_write_reg64(regs, r1, r0, tmp64);
- break;
-#endif
-#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
- CASE_32_64(ext8s)
- tci_args_rr(insn, &r0, &r1);
- regs[r0] = (int8_t)regs[r1];
- break;
-#endif
-#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
- TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
- CASE_32_64(ext16s)
- tci_args_rr(insn, &r0, &r1);
- regs[r0] = (int16_t)regs[r1];
- break;
-#endif
-#if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
- CASE_32_64(ext8u)
- tci_args_rr(insn, &r0, &r1);
- regs[r0] = (uint8_t)regs[r1];
- break;
-#endif
-#if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
- CASE_32_64(ext16u)
- tci_args_rr(insn, &r0, &r1);
- regs[r0] = (uint16_t)regs[r1];
- break;
-#endif
-#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
- CASE_32_64(bswap16)
+ case INDEX_op_bswap16:
tci_args_rr(insn, &r0, &r1);
regs[r0] = bswap16(regs[r1]);
break;
-#endif
-#if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
- CASE_32_64(bswap32)
+ case INDEX_op_bswap32:
tci_args_rr(insn, &r0, &r1);
regs[r0] = bswap32(regs[r1]);
break;
-#endif
-#if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
- CASE_32_64(not)
- tci_args_rr(insn, &r0, &r1);
- regs[r0] = ~regs[r1];
- break;
-#endif
- CASE_32_64(neg)
- tci_args_rr(insn, &r0, &r1);
- regs[r0] = -regs[r1];
- break;
#if TCG_TARGET_REG_BITS == 64
/* Load/store operations (64 bit). */
- case INDEX_op_ld32s_i64:
+ case INDEX_op_ld32u:
tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
- regs[r0] = *(int32_t *)ptr;
+ regs[r0] = *(uint32_t *)ptr;
break;
- case INDEX_op_ld_i64:
+ case INDEX_op_ld32s:
tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
- regs[r0] = *(uint64_t *)ptr;
+ regs[r0] = *(int32_t *)ptr;
break;
- case INDEX_op_st_i64:
+ case INDEX_op_st32:
tci_args_rrs(insn, &r0, &r1, &ofs);
ptr = (void *)(regs[r1] + ofs);
- *(uint64_t *)ptr = regs[r0];
+ *(uint32_t *)ptr = regs[r0];
break;
/* Arithmetic operations (64 bit). */
- case INDEX_op_div_i64:
+ case INDEX_op_divs:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
break;
- case INDEX_op_divu_i64:
+ case INDEX_op_divu:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
break;
- case INDEX_op_rem_i64:
+ case INDEX_op_rems:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
break;
- case INDEX_op_remu_i64:
+ case INDEX_op_remu:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
break;
-#if TCG_TARGET_HAS_clz_i64
- case INDEX_op_clz_i64:
+ case INDEX_op_clz:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
break;
-#endif
-#if TCG_TARGET_HAS_ctz_i64
- case INDEX_op_ctz_i64:
+ case INDEX_op_ctz:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
break;
-#endif
-#if TCG_TARGET_HAS_ctpop_i64
- case INDEX_op_ctpop_i64:
- tci_args_rr(insn, &r0, &r1);
- regs[r0] = ctpop64(regs[r1]);
- break;
-#endif
-#if TCG_TARGET_HAS_mulu2_i64
- case INDEX_op_mulu2_i64:
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
- mulu64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
- break;
-#endif
-#if TCG_TARGET_HAS_muls2_i64
- case INDEX_op_muls2_i64:
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
- muls64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
- break;
-#endif
-#if TCG_TARGET_HAS_add2_i64
- case INDEX_op_add2_i64:
- tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
- T1 = regs[r2] + regs[r4];
- T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
- regs[r0] = T1;
- regs[r1] = T2;
- break;
-#endif
-#if TCG_TARGET_HAS_add2_i64
- case INDEX_op_sub2_i64:
- tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
- T1 = regs[r2] - regs[r4];
- T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
- regs[r0] = T1;
- regs[r1] = T2;
- break;
-#endif
/* Shift/rotate operations (64 bit). */
- case INDEX_op_shl_i64:
- tci_args_rrr(insn, &r0, &r1, &r2);
- regs[r0] = regs[r1] << (regs[r2] & 63);
- break;
- case INDEX_op_shr_i64:
- tci_args_rrr(insn, &r0, &r1, &r2);
- regs[r0] = regs[r1] >> (regs[r2] & 63);
- break;
- case INDEX_op_sar_i64:
- tci_args_rrr(insn, &r0, &r1, &r2);
- regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
- break;
-#if TCG_TARGET_HAS_rot_i64
- case INDEX_op_rotl_i64:
+ case INDEX_op_rotl:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = rol64(regs[r1], regs[r2] & 63);
break;
- case INDEX_op_rotr_i64:
+ case INDEX_op_rotr:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = ror64(regs[r1], regs[r2] & 63);
break;
-#endif
-#if TCG_TARGET_HAS_deposit_i64
- case INDEX_op_deposit_i64:
- tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
- regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
- break;
-#endif
-#if TCG_TARGET_HAS_extract_i64
- case INDEX_op_extract_i64:
- tci_args_rrbb(insn, &r0, &r1, &pos, &len);
- regs[r0] = extract64(regs[r1], pos, len);
- break;
-#endif
-#if TCG_TARGET_HAS_sextract_i64
- case INDEX_op_sextract_i64:
- tci_args_rrbb(insn, &r0, &r1, &pos, &len);
- regs[r0] = sextract64(regs[r1], pos, len);
- break;
-#endif
- case INDEX_op_brcond_i64:
- tci_args_rl(insn, tb_ptr, &r0, &ptr);
- if (regs[r0]) {
- tb_ptr = ptr;
- }
- break;
- case INDEX_op_ext32s_i64:
case INDEX_op_ext_i32_i64:
tci_args_rr(insn, &r0, &r1);
regs[r0] = (int32_t)regs[r1];
break;
- case INDEX_op_ext32u_i64:
case INDEX_op_extu_i32_i64:
tci_args_rr(insn, &r0, &r1);
regs[r0] = (uint32_t)regs[r1];
break;
-#if TCG_TARGET_HAS_bswap64_i64
- case INDEX_op_bswap64_i64:
+ case INDEX_op_bswap64:
tci_args_rr(insn, &r0, &r1);
regs[r0] = bswap64(regs[r1]);
break;
-#endif
#endif /* TCG_TARGET_REG_BITS == 64 */
/* QEMU specific operations. */
@@ -922,92 +789,33 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tb_ptr = ptr;
break;
- case INDEX_op_qemu_ld_a32_i32:
+ case INDEX_op_qemu_ld:
tci_args_rrm(insn, &r0, &r1, &oi);
- taddr = (uint32_t)regs[r1];
- goto do_ld_i32;
- case INDEX_op_qemu_ld_a64_i32:
- if (TCG_TARGET_REG_BITS == 64) {
- tci_args_rrm(insn, &r0, &r1, &oi);
- taddr = regs[r1];
- } else {
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
- taddr = tci_uint64(regs[r2], regs[r1]);
- oi = regs[r3];
- }
- do_ld_i32:
+ taddr = regs[r1];
regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr);
break;
- case INDEX_op_qemu_ld_a32_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tci_args_rrm(insn, &r0, &r1, &oi);
- taddr = (uint32_t)regs[r1];
- } else {
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
- taddr = (uint32_t)regs[r2];
- oi = regs[r3];
- }
- goto do_ld_i64;
- case INDEX_op_qemu_ld_a64_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tci_args_rrm(insn, &r0, &r1, &oi);
- taddr = regs[r1];
- } else {
- tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
- taddr = tci_uint64(regs[r3], regs[r2]);
- oi = regs[r4];
- }
- do_ld_i64:
- tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
- if (TCG_TARGET_REG_BITS == 32) {
- tci_write_reg64(regs, r1, r0, tmp64);
- } else {
- regs[r0] = tmp64;
- }
- break;
-
- case INDEX_op_qemu_st_a32_i32:
+ case INDEX_op_qemu_st:
tci_args_rrm(insn, &r0, &r1, &oi);
- taddr = (uint32_t)regs[r1];
- goto do_st_i32;
- case INDEX_op_qemu_st_a64_i32:
- if (TCG_TARGET_REG_BITS == 64) {
- tci_args_rrm(insn, &r0, &r1, &oi);
- taddr = regs[r1];
- } else {
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
- taddr = tci_uint64(regs[r2], regs[r1]);
- oi = regs[r3];
- }
- do_st_i32:
+ taddr = regs[r1];
tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr);
break;
- case INDEX_op_qemu_st_a32_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tci_args_rrm(insn, &r0, &r1, &oi);
- tmp64 = regs[r0];
- taddr = (uint32_t)regs[r1];
- } else {
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
- tmp64 = tci_uint64(regs[r1], regs[r0]);
- taddr = (uint32_t)regs[r2];
- oi = regs[r3];
- }
- goto do_st_i64;
- case INDEX_op_qemu_st_a64_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tci_args_rrm(insn, &r0, &r1, &oi);
- tmp64 = regs[r0];
- taddr = regs[r1];
- } else {
- tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
- tmp64 = tci_uint64(regs[r1], regs[r0]);
- taddr = tci_uint64(regs[r3], regs[r2]);
- oi = regs[r4];
- }
- do_st_i64:
+ case INDEX_op_qemu_ld2:
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ taddr = regs[r2];
+ oi = regs[r3];
+ tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
+ tci_write_reg64(regs, r1, r0, tmp64);
+ break;
+
+ case INDEX_op_qemu_st2:
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ tmp64 = tci_uint64(regs[r1], regs[r0]);
+ taddr = regs[r2];
+ oi = regs[r3];
tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
break;
@@ -1071,7 +879,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
const char *op_name;
uint32_t insn;
TCGOpcode op;
- TCGReg r0, r1, r2, r3, r4, r5;
+ TCGReg r0, r1, r2, r3, r4;
tcg_target_ulong i1;
int32_t s2;
TCGCond c;
@@ -1106,15 +914,14 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
break;
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
+ case INDEX_op_brcond:
tci_args_rl(insn, tb_ptr, &r0, &ptr);
info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
op_name, str_r(r0), ptr);
break;
- case INDEX_op_setcond_i32:
- case INDEX_op_setcond_i64:
+ case INDEX_op_setcond:
+ case INDEX_op_tci_setcond32:
tci_args_rrrc(insn, &r0, &r1, &r2, &c);
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
@@ -1132,126 +939,95 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
op_name, str_r(r0), ptr);
break;
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld_i32:
- case INDEX_op_ld_i64:
- case INDEX_op_st8_i32:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i32:
- case INDEX_op_st16_i64:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i32:
- case INDEX_op_st_i64:
+ case INDEX_op_tci_setcarry:
+ info->fprintf_func(info->stream, "%-12s", op_name);
+ break;
+
+ case INDEX_op_ld8u:
+ case INDEX_op_ld8s:
+ case INDEX_op_ld16u:
+ case INDEX_op_ld16s:
+ case INDEX_op_ld32u:
+ case INDEX_op_ld:
+ case INDEX_op_st8:
+ case INDEX_op_st16:
+ case INDEX_op_st32:
+ case INDEX_op_st:
tci_args_rrs(insn, &r0, &r1, &s2);
info->fprintf_func(info->stream, "%-12s %s, %s, %d",
op_name, str_r(r0), str_r(r1), s2);
break;
- case INDEX_op_mov_i32:
- case INDEX_op_mov_i64:
- case INDEX_op_ext8s_i32:
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
+ case INDEX_op_bswap16:
+ case INDEX_op_bswap32:
+ case INDEX_op_ctpop:
+ case INDEX_op_mov:
+ case INDEX_op_neg:
+ case INDEX_op_not:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
- case INDEX_op_bswap32_i32:
- case INDEX_op_bswap32_i64:
- case INDEX_op_bswap64_i64:
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
- case INDEX_op_neg_i32:
- case INDEX_op_neg_i64:
- case INDEX_op_ctpop_i32:
- case INDEX_op_ctpop_i64:
+ case INDEX_op_bswap64:
tci_args_rr(insn, &r0, &r1);
info->fprintf_func(info->stream, "%-12s %s, %s",
op_name, str_r(r0), str_r(r1));
break;
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- case INDEX_op_mul_i32:
- case INDEX_op_mul_i64:
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- case INDEX_op_or_i32:
- case INDEX_op_or_i64:
- case INDEX_op_xor_i32:
- case INDEX_op_xor_i64:
- case INDEX_op_andc_i32:
- case INDEX_op_andc_i64:
- case INDEX_op_orc_i32:
- case INDEX_op_orc_i64:
- case INDEX_op_eqv_i32:
- case INDEX_op_eqv_i64:
- case INDEX_op_nand_i32:
- case INDEX_op_nand_i64:
- case INDEX_op_nor_i32:
- case INDEX_op_nor_i64:
- case INDEX_op_div_i32:
- case INDEX_op_div_i64:
- case INDEX_op_rem_i32:
- case INDEX_op_rem_i64:
- case INDEX_op_divu_i32:
- case INDEX_op_divu_i64:
- case INDEX_op_remu_i32:
- case INDEX_op_remu_i64:
- case INDEX_op_shl_i32:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i32:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i32:
- case INDEX_op_sar_i64:
- case INDEX_op_rotl_i32:
- case INDEX_op_rotl_i64:
- case INDEX_op_rotr_i32:
- case INDEX_op_rotr_i64:
- case INDEX_op_clz_i32:
- case INDEX_op_clz_i64:
- case INDEX_op_ctz_i32:
- case INDEX_op_ctz_i64:
+ case INDEX_op_add:
+ case INDEX_op_addci:
+ case INDEX_op_addcio:
+ case INDEX_op_addco:
+ case INDEX_op_and:
+ case INDEX_op_andc:
+ case INDEX_op_clz:
+ case INDEX_op_ctz:
+ case INDEX_op_divs:
+ case INDEX_op_divu:
+ case INDEX_op_eqv:
+ case INDEX_op_mul:
+ case INDEX_op_nand:
+ case INDEX_op_nor:
+ case INDEX_op_or:
+ case INDEX_op_orc:
+ case INDEX_op_rems:
+ case INDEX_op_remu:
+ case INDEX_op_rotl:
+ case INDEX_op_rotr:
+ case INDEX_op_sar:
+ case INDEX_op_shl:
+ case INDEX_op_shr:
+ case INDEX_op_sub:
+ case INDEX_op_subbi:
+ case INDEX_op_subbio:
+ case INDEX_op_subbo:
+ case INDEX_op_xor:
+ case INDEX_op_tci_ctz32:
+ case INDEX_op_tci_clz32:
+ case INDEX_op_tci_divs32:
+ case INDEX_op_tci_divu32:
+ case INDEX_op_tci_rems32:
+ case INDEX_op_tci_remu32:
+ case INDEX_op_tci_rotl32:
+ case INDEX_op_tci_rotr32:
tci_args_rrr(insn, &r0, &r1, &r2);
info->fprintf_func(info->stream, "%-12s %s, %s, %s",
op_name, str_r(r0), str_r(r1), str_r(r2));
break;
- case INDEX_op_deposit_i32:
- case INDEX_op_deposit_i64:
+ case INDEX_op_deposit:
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
break;
- case INDEX_op_extract_i32:
- case INDEX_op_extract_i64:
- case INDEX_op_sextract_i32:
- case INDEX_op_sextract_i64:
+ case INDEX_op_extract:
+ case INDEX_op_sextract:
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
op_name, str_r(r0), str_r(r1), pos, len);
break;
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
+ case INDEX_op_tci_movcond32:
+ case INDEX_op_movcond:
case INDEX_op_setcond2_i32:
tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
@@ -1259,62 +1035,27 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
str_r(r3), str_r(r4), str_c(c));
break;
- case INDEX_op_mulu2_i32:
- case INDEX_op_mulu2_i64:
- case INDEX_op_muls2_i32:
- case INDEX_op_muls2_i64:
+ case INDEX_op_muls2:
+ case INDEX_op_mulu2:
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
op_name, str_r(r0), str_r(r1),
str_r(r2), str_r(r3));
break;
- case INDEX_op_add2_i32:
- case INDEX_op_add2_i64:
- case INDEX_op_sub2_i32:
- case INDEX_op_sub2_i64:
- tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
- info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
- op_name, str_r(r0), str_r(r1), str_r(r2),
- str_r(r3), str_r(r4), str_r(r5));
+ case INDEX_op_qemu_ld:
+ case INDEX_op_qemu_st:
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %x",
+ op_name, str_r(r0), str_r(r1), oi);
break;
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_st_a32_i32:
- len = 1 + 1;
- goto do_qemu_ldst;
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_st_a32_i64:
- case INDEX_op_qemu_ld_a64_i32:
- case INDEX_op_qemu_st_a64_i32:
- len = 1 + DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
- goto do_qemu_ldst;
- case INDEX_op_qemu_ld_a64_i64:
- case INDEX_op_qemu_st_a64_i64:
- len = 2 * DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
- goto do_qemu_ldst;
- do_qemu_ldst:
- switch (len) {
- case 2:
- tci_args_rrm(insn, &r0, &r1, &oi);
- info->fprintf_func(info->stream, "%-12s %s, %s, %x",
- op_name, str_r(r0), str_r(r1), oi);
- break;
- case 3:
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
- info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
- op_name, str_r(r0), str_r(r1),
- str_r(r2), str_r(r3));
- break;
- case 4:
- tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
- info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
- op_name, str_r(r0), str_r(r1),
- str_r(r2), str_r(r3), str_r(r4));
- break;
- default:
- g_assert_not_reached();
- }
+ case INDEX_op_qemu_ld2:
+ case INDEX_op_qemu_st2:
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1),
+ str_r(r2), str_r(r3));
break;
case 0:
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
new file mode 100644
index 0000000..ab07ce1
--- /dev/null
+++ b/tcg/tci/tcg-target-has.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific opcode support
+ * Copyright (c) 2009, 2011 Stefan Weil
+ */
+
+#ifndef TCG_TARGET_HAS_H
+#define TCG_TARGET_HAS_H
+
+#if TCG_TARGET_REG_BITS == 64
+#define TCG_TARGET_HAS_extr_i64_i32 0
+#endif /* TCG_TARGET_REG_BITS == 64 */
+
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+
+#define TCG_TARGET_HAS_tst 1
+
+#define TCG_TARGET_extract_valid(type, ofs, len) 1
+#define TCG_TARGET_sextract_valid(type, ofs, len) 1
+#define TCG_TARGET_deposit_valid(type, ofs, len) 1
+
+#endif
diff --git a/tcg/tci/tcg-target-mo.h b/tcg/tci/tcg-target-mo.h
new file mode 100644
index 0000000..779872e
--- /dev/null
+++ b/tcg/tci/tcg-target-mo.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define target-specific memory model
+ * Copyright (c) 2009, 2011 Stefan Weil
+ */
+
+#ifndef TCG_TARGET_MO_H
+#define TCG_TARGET_MO_H
+
+/*
+ * We could notice __i386__ or __s390x__ and reduce the barriers depending
+ * on the host. But if you want performance, you use the normal backend.
+ * We prefer consistency across hosts on this.
+ */
+#define TCG_TARGET_DEFAULT_MO 0
+
+#endif
diff --git a/tcg/tci/tcg-target-opc.h.inc b/tcg/tci/tcg-target-opc.h.inc
new file mode 100644
index 0000000..4eb32ed
--- /dev/null
+++ b/tcg/tci/tcg-target-opc.h.inc
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/* These opcodes for use between the tci generator and interpreter. */
+DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT)
+DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT)
+DEF(tci_setcarry, 0, 0, 0, TCG_OPF_NOT_PRESENT)
+DEF(tci_clz32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
+DEF(tci_ctz32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
+DEF(tci_divs32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
+DEF(tci_divu32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
+DEF(tci_rems32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
+DEF(tci_remu32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
+DEF(tci_rotl32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
+DEF(tci_rotr32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
+DEF(tci_setcond32, 1, 2, 1, TCG_OPF_NOT_PRESENT)
+DEF(tci_movcond32, 1, 2, 1, TCG_OPF_NOT_PRESENT)
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
index c740864..35c66a4 100644
--- a/tcg/tci/tcg-target.c.inc
+++ b/tcg/tci/tcg-target.c.inc
@@ -22,160 +22,24 @@
* THE SOFTWARE.
*/
-#include "../tcg-pool.c.inc"
-
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
-{
- switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld_i64:
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
- case INDEX_op_neg_i32:
- case INDEX_op_neg_i64:
- case INDEX_op_ext8s_i32:
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
- case INDEX_op_bswap32_i32:
- case INDEX_op_bswap32_i64:
- case INDEX_op_bswap64_i64:
- case INDEX_op_extract_i32:
- case INDEX_op_extract_i64:
- case INDEX_op_sextract_i32:
- case INDEX_op_sextract_i64:
- case INDEX_op_ctpop_i32:
- case INDEX_op_ctpop_i64:
- return C_O1_I1(r, r);
-
- case INDEX_op_st8_i32:
- case INDEX_op_st16_i32:
- case INDEX_op_st_i32:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i64:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i64:
- return C_O0_I2(r, r);
-
- case INDEX_op_div_i32:
- case INDEX_op_div_i64:
- case INDEX_op_divu_i32:
- case INDEX_op_divu_i64:
- case INDEX_op_rem_i32:
- case INDEX_op_rem_i64:
- case INDEX_op_remu_i32:
- case INDEX_op_remu_i64:
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- case INDEX_op_mul_i32:
- case INDEX_op_mul_i64:
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- case INDEX_op_andc_i32:
- case INDEX_op_andc_i64:
- case INDEX_op_eqv_i32:
- case INDEX_op_eqv_i64:
- case INDEX_op_nand_i32:
- case INDEX_op_nand_i64:
- case INDEX_op_nor_i32:
- case INDEX_op_nor_i64:
- case INDEX_op_or_i32:
- case INDEX_op_or_i64:
- case INDEX_op_orc_i32:
- case INDEX_op_orc_i64:
- case INDEX_op_xor_i32:
- case INDEX_op_xor_i64:
- case INDEX_op_shl_i32:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i32:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i32:
- case INDEX_op_sar_i64:
- case INDEX_op_rotl_i32:
- case INDEX_op_rotl_i64:
- case INDEX_op_rotr_i32:
- case INDEX_op_rotr_i64:
- case INDEX_op_setcond_i32:
- case INDEX_op_setcond_i64:
- case INDEX_op_deposit_i32:
- case INDEX_op_deposit_i64:
- case INDEX_op_clz_i32:
- case INDEX_op_clz_i64:
- case INDEX_op_ctz_i32:
- case INDEX_op_ctz_i64:
- return C_O1_I2(r, r, r);
-
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
- return C_O0_I2(r, r);
-
- case INDEX_op_add2_i32:
- case INDEX_op_add2_i64:
- case INDEX_op_sub2_i32:
- case INDEX_op_sub2_i64:
- return C_O2_I4(r, r, r, r, r, r);
-
+/* Used for function call generation. */
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+#define TCG_TARGET_STACK_ALIGN 8
#if TCG_TARGET_REG_BITS == 32
- case INDEX_op_brcond2_i32:
- return C_O0_I4(r, r, r, r);
+# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EVEN
+# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
+#else
+# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
+# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
#endif
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
- case INDEX_op_mulu2_i32:
- case INDEX_op_mulu2_i64:
- case INDEX_op_muls2_i32:
- case INDEX_op_muls2_i64:
- return C_O2_I2(r, r, r, r);
-
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- case INDEX_op_setcond2_i32:
- return C_O1_I4(r, r, r, r, r);
-
- case INDEX_op_qemu_ld_a32_i32:
- return C_O1_I1(r, r);
- case INDEX_op_qemu_ld_a64_i32:
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r);
- case INDEX_op_qemu_ld_a32_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
- case INDEX_op_qemu_ld_a64_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r);
- case INDEX_op_qemu_st_a32_i32:
- return C_O0_I2(r, r);
- case INDEX_op_qemu_st_a64_i32:
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
- case INDEX_op_qemu_st_a32_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
- case INDEX_op_qemu_st_a64_i64:
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r);
-
- default:
- g_assert_not_reached();
- }
+static TCGConstraintSetIndex
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
+{
+ return C_NotImplemented;
}
static const int tcg_target_reg_alloc_order[] = {
@@ -409,20 +273,6 @@ static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
tcg_out32(s, insn);
}
-static void tcg_out_op_rrrrr(TCGContext *s, TCGOpcode op, TCGReg r0,
- TCGReg r1, TCGReg r2, TCGReg r3, TCGReg r4)
-{
- tcg_insn_unit insn = 0;
-
- insn = deposit32(insn, 0, 8, op);
- insn = deposit32(insn, 8, 4, r0);
- insn = deposit32(insn, 12, 4, r1);
- insn = deposit32(insn, 16, 4, r2);
- insn = deposit32(insn, 20, 4, r3);
- insn = deposit32(insn, 24, 4, r4);
- tcg_out32(s, insn);
-}
-
static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
{
@@ -452,31 +302,13 @@ static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
tcg_out32(s, insn);
}
-static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
- TCGReg r0, TCGReg r1, TCGReg r2,
- TCGReg r3, TCGReg r4, TCGReg r5)
-{
- tcg_insn_unit insn = 0;
-
- insn = deposit32(insn, 0, 8, op);
- insn = deposit32(insn, 8, 4, r0);
- insn = deposit32(insn, 12, 4, r1);
- insn = deposit32(insn, 16, 4, r2);
- insn = deposit32(insn, 20, 4, r3);
- insn = deposit32(insn, 24, 4, r4);
- insn = deposit32(insn, 28, 4, r5);
- tcg_out32(s, insn);
-}
-
static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
TCGReg base, intptr_t offset)
{
stack_bounds_check(base, offset);
if (offset != sextract32(offset, 0, 16)) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
- tcg_out_op_rrr(s, (TCG_TARGET_REG_BITS == 32
- ? INDEX_op_add_i32 : INDEX_op_add_i64),
- TCG_REG_TMP, TCG_REG_TMP, base);
+ tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
base = TCG_REG_TMP;
offset = 0;
}
@@ -486,34 +318,17 @@ static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
intptr_t offset)
{
- switch (type) {
- case TCG_TYPE_I32:
- tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
- break;
-#if TCG_TARGET_REG_BITS == 64
- case TCG_TYPE_I64:
- tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
- break;
-#endif
- default:
- g_assert_not_reached();
+ TCGOpcode op = INDEX_op_ld;
+
+ if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
+ op = INDEX_op_ld32u;
}
+ tcg_out_ldst(s, op, val, base, offset);
}
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
{
- switch (type) {
- case TCG_TYPE_I32:
- tcg_out_op_rr(s, INDEX_op_mov_i32, ret, arg);
- break;
-#if TCG_TARGET_REG_BITS == 64
- case TCG_TYPE_I64:
- tcg_out_op_rr(s, INDEX_op_mov_i64, ret, arg);
- break;
-#endif
- default:
- g_assert_not_reached();
- }
+ tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
return true;
}
@@ -544,76 +359,62 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
}
}
+static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
+ TCGReg rs, unsigned pos, unsigned len)
+{
+ tcg_out_op_rrbb(s, INDEX_op_extract, rd, rs, pos, len);
+}
+
+static const TCGOutOpExtract outop_extract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tcg_out_extract,
+};
+
+static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
+ TCGReg rs, unsigned pos, unsigned len)
+{
+ tcg_out_op_rrbb(s, INDEX_op_sextract, rd, rs, pos, len);
+}
+
+static const TCGOutOpExtract outop_sextract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tcg_out_sextract,
+};
+
+static const TCGOutOpExtract2 outop_extract2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
{
- switch (type) {
- case TCG_TYPE_I32:
- tcg_debug_assert(TCG_TARGET_HAS_ext8s_i32);
- tcg_out_op_rr(s, INDEX_op_ext8s_i32, rd, rs);
- break;
-#if TCG_TARGET_REG_BITS == 64
- case TCG_TYPE_I64:
- tcg_debug_assert(TCG_TARGET_HAS_ext8s_i64);
- tcg_out_op_rr(s, INDEX_op_ext8s_i64, rd, rs);
- break;
-#endif
- default:
- g_assert_not_reached();
- }
+ tcg_out_sextract(s, type, rd, rs, 0, 8);
}
static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
{
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_debug_assert(TCG_TARGET_HAS_ext8u_i64);
- tcg_out_op_rr(s, INDEX_op_ext8u_i64, rd, rs);
- } else {
- tcg_debug_assert(TCG_TARGET_HAS_ext8u_i32);
- tcg_out_op_rr(s, INDEX_op_ext8u_i32, rd, rs);
- }
+ tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
}
static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
{
- switch (type) {
- case TCG_TYPE_I32:
- tcg_debug_assert(TCG_TARGET_HAS_ext16s_i32);
- tcg_out_op_rr(s, INDEX_op_ext16s_i32, rd, rs);
- break;
-#if TCG_TARGET_REG_BITS == 64
- case TCG_TYPE_I64:
- tcg_debug_assert(TCG_TARGET_HAS_ext16s_i64);
- tcg_out_op_rr(s, INDEX_op_ext16s_i64, rd, rs);
- break;
-#endif
- default:
- g_assert_not_reached();
- }
+ tcg_out_sextract(s, type, rd, rs, 0, 16);
}
static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
{
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_debug_assert(TCG_TARGET_HAS_ext16u_i64);
- tcg_out_op_rr(s, INDEX_op_ext16u_i64, rd, rs);
- } else {
- tcg_debug_assert(TCG_TARGET_HAS_ext16u_i32);
- tcg_out_op_rr(s, INDEX_op_ext16u_i32, rd, rs);
- }
+ tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
}
static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
{
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
- tcg_debug_assert(TCG_TARGET_HAS_ext32s_i64);
- tcg_out_op_rr(s, INDEX_op_ext32s_i64, rd, rs);
+ tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
}
static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
{
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
- tcg_debug_assert(TCG_TARGET_HAS_ext32u_i64);
- tcg_out_op_rr(s, INDEX_op_ext32u_i64, rd, rs);
+ tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
}
static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
@@ -665,18 +466,6 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
tcg_out32(s, insn);
}
-#if TCG_TARGET_REG_BITS == 64
-# define CASE_32_64(x) \
- case glue(glue(INDEX_op_, x), _i64): \
- case glue(glue(INDEX_op_, x), _i32):
-# define CASE_64(x) \
- case glue(glue(INDEX_op_, x), _i64):
-#else
-# define CASE_32_64(x) \
- case glue(glue(INDEX_op_, x), _i32):
-# define CASE_64(x)
-#endif
-
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
{
tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
@@ -689,221 +478,772 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
set_jmp_reset_offset(s, which);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_op_r(s, INDEX_op_goto_ptr, a0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
/* Always indirect, nothing to do */
}
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg args[TCG_MAX_OP_ARGS],
- const int const_args[TCG_MAX_OP_ARGS])
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
{
- TCGOpcode exts;
+ tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
+}
- switch (opc) {
- case INDEX_op_goto_ptr:
- tcg_out_op_r(s, opc, args[0]);
- break;
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_add,
+};
- case INDEX_op_br:
- tcg_out_op_l(s, opc, arg_label(args[0]));
- break;
+static TCGConstraintSetIndex cset_addsubcarry(TCGType type, unsigned flags)
+{
+ return type == TCG_TYPE_REG ? C_O1_I2(r, r, r) : C_NotImplemented;
+}
- CASE_32_64(setcond)
- tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
- break;
+static void tgen_addco(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_addco, a0, a1, a2);
+}
- CASE_32_64(movcond)
- case INDEX_op_setcond2_i32:
- tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
- args[3], args[4], args[5]);
- break;
+static const TCGOutOpBinary outop_addco = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_addsubcarry,
+ .out_rrr = tgen_addco,
+};
- CASE_32_64(ld8u)
- CASE_32_64(ld8s)
- CASE_32_64(ld16u)
- CASE_32_64(ld16s)
- case INDEX_op_ld_i32:
- CASE_64(ld32u)
- CASE_64(ld32s)
- CASE_64(ld)
- CASE_32_64(st8)
- CASE_32_64(st16)
- case INDEX_op_st_i32:
- CASE_64(st32)
- CASE_64(st)
- tcg_out_ldst(s, opc, args[0], args[1], args[2]);
- break;
+static void tgen_addci(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_addci, a0, a1, a2);
+}
- CASE_32_64(add)
- CASE_32_64(sub)
- CASE_32_64(mul)
- CASE_32_64(and)
- CASE_32_64(or)
- CASE_32_64(xor)
- CASE_32_64(andc) /* Optional (TCG_TARGET_HAS_andc_*). */
- CASE_32_64(orc) /* Optional (TCG_TARGET_HAS_orc_*). */
- CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */
- CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */
- CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */
- CASE_32_64(shl)
- CASE_32_64(shr)
- CASE_32_64(sar)
- CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */
- CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */
- CASE_32_64(div) /* Optional (TCG_TARGET_HAS_div_*). */
- CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */
- CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */
- CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */
- CASE_32_64(clz) /* Optional (TCG_TARGET_HAS_clz_*). */
- CASE_32_64(ctz) /* Optional (TCG_TARGET_HAS_ctz_*). */
- tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
- break;
+static const TCGOutOpAddSubCarry outop_addci = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_addsubcarry,
+ .out_rrr = tgen_addci,
+};
- CASE_32_64(deposit) /* Optional (TCG_TARGET_HAS_deposit_*). */
- {
- TCGArg pos = args[3], len = args[4];
- TCGArg max = opc == INDEX_op_deposit_i32 ? 32 : 64;
+static void tgen_addcio(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_addcio, a0, a1, a2);
+}
- tcg_debug_assert(pos < max);
- tcg_debug_assert(pos + len <= max);
+static const TCGOutOpBinary outop_addcio = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_addsubcarry,
+ .out_rrr = tgen_addcio,
+};
- tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], pos, len);
- }
- break;
+static void tcg_out_set_carry(TCGContext *s)
+{
+ tcg_out_op_v(s, INDEX_op_tci_setcarry);
+}
- CASE_32_64(extract) /* Optional (TCG_TARGET_HAS_extract_*). */
- CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
- {
- TCGArg pos = args[2], len = args[3];
- TCGArg max = tcg_op_defs[opc].flags & TCG_OPF_64BIT ? 64 : 32;
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
+}
- tcg_debug_assert(pos < max);
- tcg_debug_assert(pos + len <= max);
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_and,
+};
- tcg_out_op_rrbb(s, opc, args[0], args[1], pos, len);
- }
- break;
+static void tgen_andc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
+}
- CASE_32_64(brcond)
- tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
- ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
- TCG_REG_TMP, args[0], args[1], args[2]);
- tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
- break;
+static const TCGOutOpBinary outop_andc = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_andc,
+};
- CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
- CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
- CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */
- case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
- case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
- tcg_out_op_rr(s, opc, args[0], args[1]);
- break;
+static void tgen_clz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ TCGOpcode opc = (type == TCG_TYPE_I32
+ ? INDEX_op_tci_clz32
+ : INDEX_op_clz);
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
+}
- case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
- exts = INDEX_op_ext16s_i32;
- goto do_bswap;
- case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
- exts = INDEX_op_ext16s_i64;
- goto do_bswap;
- case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
- exts = INDEX_op_ext32s_i64;
- do_bswap:
- /* The base tci bswaps zero-extend, and ignore high bits. */
- tcg_out_op_rr(s, opc, args[0], args[1]);
- if (args[2] & TCG_BSWAP_OS) {
- tcg_out_op_rr(s, exts, args[0], args[0]);
- }
- break;
+static const TCGOutOpBinary outop_clz = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_clz,
+};
- CASE_32_64(add2)
- CASE_32_64(sub2)
- tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
- args[3], args[4], args[5]);
- break;
+static void tgen_ctz(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ TCGOpcode opc = (type == TCG_TYPE_I32
+ ? INDEX_op_tci_ctz32
+ : INDEX_op_ctz);
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
+}
-#if TCG_TARGET_REG_BITS == 32
- case INDEX_op_brcond2_i32:
- tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
- args[0], args[1], args[2], args[3], args[4]);
- tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
- break;
+static const TCGOutOpBinary outop_ctz = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_ctz,
+};
+
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ TCGReg a2, unsigned ofs, unsigned len)
+{
+ tcg_out_op_rrrbb(s, INDEX_op_deposit, a0, a1, a2, ofs, len);
+}
+
+static const TCGOutOpDeposit outop_deposit = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_deposit,
+};
+
+static void tgen_divs(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ TCGOpcode opc = (type == TCG_TYPE_I32
+ ? INDEX_op_tci_divs32
+ : INDEX_op_divs);
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_divs = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_divs,
+};
+
+static const TCGOutOpDivRem outop_divs2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_divu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ TCGOpcode opc = (type == TCG_TYPE_I32
+ ? INDEX_op_tci_divu32
+ : INDEX_op_divu);
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_divu = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_divu,
+};
+
+static const TCGOutOpDivRem outop_divu2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_eqv(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_eqv = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_eqv,
+};
+
+#if TCG_TARGET_REG_BITS == 64
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
+{
+ tcg_out_extract(s, TCG_TYPE_I64, a0, a1, 32, 32);
+}
+
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extrh_i64_i32,
+};
#endif
- CASE_32_64(mulu2)
- CASE_32_64(muls2)
- tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
- break;
+static void tgen_mul(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
+}
- case INDEX_op_qemu_ld_a32_i32:
- case INDEX_op_qemu_st_a32_i32:
- tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
- break;
- case INDEX_op_qemu_ld_a64_i32:
- case INDEX_op_qemu_st_a64_i32:
- case INDEX_op_qemu_ld_a32_i64:
- case INDEX_op_qemu_st_a32_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
- } else {
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
- tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
- }
- break;
- case INDEX_op_qemu_ld_a64_i64:
- case INDEX_op_qemu_st_a64_i64:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
- } else {
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[4]);
- tcg_out_op_rrrrr(s, opc, args[0], args[1],
- args[2], args[3], TCG_REG_TMP);
- }
- break;
+static const TCGOutOpBinary outop_mul = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_mul,
+};
- case INDEX_op_mb:
- tcg_out_op_v(s, opc);
- break;
+static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
+{
+ return type == TCG_TYPE_REG ? C_O2_I2(r, r, r, r) : C_NotImplemented;
+}
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
- case INDEX_op_mov_i64:
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- default:
- g_assert_not_reached();
+static void tgen_muls2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
+{
+ tcg_out_op_rrrr(s, INDEX_op_muls2, a0, a1, a2, a3);
+}
+
+static const TCGOutOpMul2 outop_muls2 = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mul2,
+ .out_rrrr = tgen_muls2,
+};
+
+static const TCGOutOpBinary outop_mulsh = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_mulu2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
+{
+ tcg_out_op_rrrr(s, INDEX_op_mulu2, a0, a1, a2, a3);
+}
+
+static const TCGOutOpMul2 outop_mulu2 = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mul2,
+ .out_rrrr = tgen_mulu2,
+};
+
+static const TCGOutOpBinary outop_muluh = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_nand(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_nand = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_nand,
+};
+
+static void tgen_nor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_nor = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_nor,
+};
+
+static void tgen_or(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_or = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_or,
+};
+
+static void tgen_orc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_orc = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_orc,
+};
+
+static void tgen_rems(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ TCGOpcode opc = (type == TCG_TYPE_I32
+ ? INDEX_op_tci_rems32
+ : INDEX_op_rems);
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_rems = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_rems,
+};
+
+static void tgen_remu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ TCGOpcode opc = (type == TCG_TYPE_I32
+ ? INDEX_op_tci_remu32
+ : INDEX_op_remu);
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_remu = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_remu,
+};
+
+static void tgen_rotl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ TCGOpcode opc = (type == TCG_TYPE_I32
+ ? INDEX_op_tci_rotl32
+ : INDEX_op_rotl);
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_rotl = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_rotl,
+};
+
+static void tgen_rotr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ TCGOpcode opc = (type == TCG_TYPE_I32
+ ? INDEX_op_tci_rotr32
+ : INDEX_op_rotr);
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_rotr = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_rotr,
+};
+
+static void tgen_sar(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type < TCG_TYPE_REG) {
+ tcg_out_ext32s(s, TCG_REG_TMP, a1);
+ a1 = TCG_REG_TMP;
}
+ tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2);
}
-static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
- intptr_t offset)
+static const TCGOutOpBinary outop_sar = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sar,
+};
+
+static void tgen_shl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
{
- switch (type) {
- case TCG_TYPE_I32:
- tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
- break;
+ tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_shl = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_shl,
+};
+
+static void tgen_shr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type < TCG_TYPE_REG) {
+ tcg_out_ext32u(s, TCG_REG_TMP, a1);
+ a1 = TCG_REG_TMP;
+ }
+ tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_shr = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_shr,
+};
+
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
+static void tgen_subbo(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_subbo, a0, a1, a2);
+}
+
+static const TCGOutOpAddSubCarry outop_subbo = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_addsubcarry,
+ .out_rrr = tgen_subbo,
+};
+
+static void tgen_subbi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_subbi, a0, a1, a2);
+}
+
+static const TCGOutOpAddSubCarry outop_subbi = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_addsubcarry,
+ .out_rrr = tgen_subbi,
+};
+
+static void tgen_subbio(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_subbio, a0, a1, a2);
+}
+
+static const TCGOutOpAddSubCarry outop_subbio = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_addsubcarry,
+ .out_rrr = tgen_subbio,
+};
+
+static void tcg_out_set_borrow(TCGContext *s)
+{
+ tcg_out_op_v(s, INDEX_op_tci_setcarry); /* borrow == carry */
+}
+
+static void tgen_xor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_xor = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_xor,
+};
+
+static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tcg_out_op_rr(s, INDEX_op_ctpop, a0, a1);
+}
+
+static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
+{
+ return type == TCG_TYPE_REG ? C_O1_I1(r, r) : C_NotImplemented;
+}
+
+static const TCGOutOpUnary outop_ctpop = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_ctpop,
+ .out_rr = tgen_ctpop,
+};
+
+static void tgen_bswap16(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, unsigned flags)
+{
+ tcg_out_op_rr(s, INDEX_op_bswap16, a0, a1);
+ if (flags & TCG_BSWAP_OS) {
+ tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 16);
+ }
+}
+
+static const TCGOutOpBswap outop_bswap16 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap16,
+};
+
+static void tgen_bswap32(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, unsigned flags)
+{
+ tcg_out_op_rr(s, INDEX_op_bswap32, a0, a1);
+ if (flags & TCG_BSWAP_OS) {
+ tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 32);
+ }
+}
+
+static const TCGOutOpBswap outop_bswap32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap32,
+};
+
#if TCG_TARGET_REG_BITS == 64
- case TCG_TYPE_I64:
- tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
- break;
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tcg_out_op_rr(s, INDEX_op_bswap64, a0, a1);
+}
+
+static const TCGOutOpUnary outop_bswap64 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_bswap64,
+};
#endif
- default:
- g_assert_not_reached();
+
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
+}
+
+static const TCGOutOpUnary outop_neg = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_neg,
+};
+
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tcg_out_op_rr(s, INDEX_op_not, a0, a1);
+}
+
+static const TCGOutOpUnary outop_not = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_not,
+};
+
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ TCGOpcode opc = (type == TCG_TYPE_I32
+ ? INDEX_op_tci_setcond32
+ : INDEX_op_setcond);
+ tcg_out_op_rrrc(s, opc, dest, arg1, arg2, cond);
+}
+
+static const TCGOutOpSetcond outop_setcond = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_setcond,
+};
+
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ tgen_setcond(s, type, cond, dest, arg1, arg2);
+ tgen_neg(s, type, dest, dest);
+}
+
+static const TCGOutOpSetcond outop_negsetcond = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_negsetcond,
+};
+
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg arg0, TCGReg arg1, TCGLabel *l)
+{
+ tgen_setcond(s, type, cond, TCG_REG_TMP, arg0, arg1);
+ tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l);
+}
+
+static const TCGOutOpBrcond outop_brcond = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out_rr = tgen_brcond,
+};
+
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf)
+{
+ TCGOpcode opc = (type == TCG_TYPE_I32
+ ? INDEX_op_tci_movcond32
+ : INDEX_op_movcond);
+ tcg_out_op_rrrrrc(s, opc, ret, c1, c2, vt, vf, cond);
+}
+
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, r, r, r),
+ .out = tgen_movcond,
+};
+
+static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
+ TCGArg bl, bool const_bl,
+ TCGArg bh, bool const_bh, TCGLabel *l)
+{
+ tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
+ al, ah, bl, bh, cond);
+ tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l);
+}
+
+#if TCG_TARGET_REG_BITS != 32
+__attribute__((unused))
+#endif
+static const TCGOutOpBrcond2 outop_brcond2 = {
+ .base.static_constraint = C_O0_I4(r, r, r, r),
+ .out = tgen_brcond2,
+};
+
+static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
+ TCGReg al, TCGReg ah,
+ TCGArg bl, bool const_bl,
+ TCGArg bh, bool const_bh)
+{
+ tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, ret, al, ah, bl, bh, cond);
+}
+
+#if TCG_TARGET_REG_BITS != 32
+__attribute__((unused))
+#endif
+static const TCGOutOpSetcond2 outop_setcond2 = {
+ .base.static_constraint = C_O1_I4(r, r, r, r, r),
+ .out = tgen_setcond2,
+};
+
+static void tcg_out_mb(TCGContext *s, unsigned a0)
+{
+ tcg_out_op_v(s, INDEX_op_mb);
+}
+
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
+{
+ tcg_out_op_l(s, INDEX_op_br, l);
+}
+
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, INDEX_op_ld8u, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld8u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8u,
+};
+
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, INDEX_op_ld8s, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld8s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8s,
+};
+
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, INDEX_op_ld16u, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld16u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16u,
+};
+
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, INDEX_op_ld16s, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld16s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16s,
+};
+
+#if TCG_TARGET_REG_BITS == 64
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, INDEX_op_ld32u, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld32u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32u,
+};
+
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, INDEX_op_ld32s, dest, base, offset);
+}
+
+static const TCGOutOpLoad outop_ld32s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32s,
+};
+#endif
+
+static void tgen_st8(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, INDEX_op_st8, data, base, offset);
+}
+
+static const TCGOutOpStore outop_st8 = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out_r = tgen_st8,
+};
+
+static void tgen_st16(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, INDEX_op_st16, data, base, offset);
+}
+
+static const TCGOutOpStore outop_st16 = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out_r = tgen_st16,
+};
+
+static const TCGOutOpStore outop_st = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out_r = tcg_out_st,
+};
+
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg addr, MemOpIdx oi)
+{
+ tcg_out_op_rrm(s, INDEX_op_qemu_ld, data, addr, oi);
+}
+
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_qemu_ld,
+};
+
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
+{
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, oi);
+ tcg_out_op_rrrr(s, INDEX_op_qemu_ld2, datalo, datahi, addr, TCG_REG_TMP);
+}
+
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
+ .base.static_constraint =
+ TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O2_I1(r, r, r),
+ .out =
+ TCG_TARGET_REG_BITS == 64 ? NULL : tgen_qemu_ld2,
+};
+
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg addr, MemOpIdx oi)
+{
+ tcg_out_op_rrm(s, INDEX_op_qemu_st, data, addr, oi);
+}
+
+static const TCGOutOpQemuLdSt outop_qemu_st = {
+ .base.static_constraint = C_O0_I2(r, r),
+ .out = tgen_qemu_st,
+};
+
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
+{
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, oi);
+ tcg_out_op_rrrr(s, INDEX_op_qemu_st2, datalo, datahi, addr, TCG_REG_TMP);
+}
+
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
+ .base.static_constraint =
+ TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O0_I3(r, r, r),
+ .out =
+ TCG_TARGET_REG_BITS == 64 ? NULL : tgen_qemu_st2,
+};
+
+static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
+ intptr_t offset)
+{
+ TCGOpcode op = INDEX_op_st;
+
+ if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
+ op = INDEX_op_st32;
}
+ tcg_out_ldst(s, op, val, base, offset);
}
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
@@ -965,3 +1305,13 @@ bool tcg_target_has_memory_bswap(MemOp memop)
{
return true;
}
+
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ g_assert_not_reached();
+}
+
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ g_assert_not_reached();
+}
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
index a076f40..bd03aa1 100644
--- a/tcg/tci/tcg-target.h
+++ b/tcg/tci/tcg-target.h
@@ -44,81 +44,6 @@
#define TCG_TARGET_INSN_UNIT_SIZE 4
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
-/* Optional instructions. */
-
-#define TCG_TARGET_HAS_bswap16_i32 1
-#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_div_i32 1
-#define TCG_TARGET_HAS_rem_i32 1
-#define TCG_TARGET_HAS_ext8s_i32 1
-#define TCG_TARGET_HAS_ext16s_i32 1
-#define TCG_TARGET_HAS_ext8u_i32 1
-#define TCG_TARGET_HAS_ext16u_i32 1
-#define TCG_TARGET_HAS_andc_i32 1
-#define TCG_TARGET_HAS_deposit_i32 1
-#define TCG_TARGET_HAS_extract_i32 1
-#define TCG_TARGET_HAS_sextract_i32 1
-#define TCG_TARGET_HAS_extract2_i32 0
-#define TCG_TARGET_HAS_eqv_i32 1
-#define TCG_TARGET_HAS_nand_i32 1
-#define TCG_TARGET_HAS_nor_i32 1
-#define TCG_TARGET_HAS_clz_i32 1
-#define TCG_TARGET_HAS_ctz_i32 1
-#define TCG_TARGET_HAS_ctpop_i32 1
-#define TCG_TARGET_HAS_not_i32 1
-#define TCG_TARGET_HAS_orc_i32 1
-#define TCG_TARGET_HAS_rot_i32 1
-#define TCG_TARGET_HAS_negsetcond_i32 0
-#define TCG_TARGET_HAS_muls2_i32 1
-#define TCG_TARGET_HAS_muluh_i32 0
-#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_qemu_st8_i32 0
-
-#if TCG_TARGET_REG_BITS == 64
-#define TCG_TARGET_HAS_extr_i64_i32 0
-#define TCG_TARGET_HAS_bswap16_i64 1
-#define TCG_TARGET_HAS_bswap32_i64 1
-#define TCG_TARGET_HAS_bswap64_i64 1
-#define TCG_TARGET_HAS_deposit_i64 1
-#define TCG_TARGET_HAS_extract_i64 1
-#define TCG_TARGET_HAS_sextract_i64 1
-#define TCG_TARGET_HAS_extract2_i64 0
-#define TCG_TARGET_HAS_div_i64 1
-#define TCG_TARGET_HAS_rem_i64 1
-#define TCG_TARGET_HAS_ext8s_i64 1
-#define TCG_TARGET_HAS_ext16s_i64 1
-#define TCG_TARGET_HAS_ext32s_i64 1
-#define TCG_TARGET_HAS_ext8u_i64 1
-#define TCG_TARGET_HAS_ext16u_i64 1
-#define TCG_TARGET_HAS_ext32u_i64 1
-#define TCG_TARGET_HAS_andc_i64 1
-#define TCG_TARGET_HAS_eqv_i64 1
-#define TCG_TARGET_HAS_nand_i64 1
-#define TCG_TARGET_HAS_nor_i64 1
-#define TCG_TARGET_HAS_clz_i64 1
-#define TCG_TARGET_HAS_ctz_i64 1
-#define TCG_TARGET_HAS_ctpop_i64 1
-#define TCG_TARGET_HAS_not_i64 1
-#define TCG_TARGET_HAS_orc_i64 1
-#define TCG_TARGET_HAS_rot_i64 1
-#define TCG_TARGET_HAS_negsetcond_i64 0
-#define TCG_TARGET_HAS_muls2_i64 1
-#define TCG_TARGET_HAS_add2_i32 1
-#define TCG_TARGET_HAS_sub2_i32 1
-#define TCG_TARGET_HAS_mulu2_i32 1
-#define TCG_TARGET_HAS_add2_i64 1
-#define TCG_TARGET_HAS_sub2_i64 1
-#define TCG_TARGET_HAS_mulu2_i64 1
-#define TCG_TARGET_HAS_muluh_i64 0
-#define TCG_TARGET_HAS_mulsh_i64 0
-#else
-#define TCG_TARGET_HAS_mulu2_i32 1
-#endif /* TCG_TARGET_REG_BITS == 64 */
-
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
-
-#define TCG_TARGET_HAS_tst 1
-
/* Number of registers available. */
#define TCG_TARGET_NB_REGS 16
@@ -146,26 +71,6 @@ typedef enum {
TCG_REG_CALL_STACK = TCG_REG_R15,
} TCGReg;
-/* Used for function call generation. */
-#define TCG_TARGET_CALL_STACK_OFFSET 0
-#define TCG_TARGET_STACK_ALIGN 8
-#if TCG_TARGET_REG_BITS == 32
-# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EVEN
-# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
-# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
-#else
-# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
-# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
-# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
-#endif
-#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
-
#define HAVE_TCG_QEMU_TB_EXEC
-#define TCG_TARGET_NEED_POOL_LABELS
-
-/* We could notice __i386__ or __s390x__ and reduce the barriers depending
- on the host. But if you want performance, you use the normal backend.
- We prefer consistency across hosts on this. */
-#define TCG_TARGET_DEFAULT_MO (0)
#endif /* TCG_TARGET_H */
diff --git a/tests/Makefile.include b/tests/Makefile.include
index d39d5dd..23fb722 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -3,28 +3,28 @@
.PHONY: check-help
check-help:
@echo "Regression testing targets:"
- @echo " $(MAKE) check Run block, qapi-schema, unit, softfloat, qtest and decodetree tests"
- @echo " $(MAKE) bench Run speed tests"
+ @echo " $(MAKE) check Run block, qapi-schema, unit, softfloat, qtest and decodetree tests"
+ @echo " $(MAKE) bench Run speed tests"
@echo
@echo "Individual test suites:"
- @echo " $(MAKE) check-qtest-TARGET Run qtest tests for given target"
- @echo " $(MAKE) check-qtest Run qtest tests"
- @echo " $(MAKE) check-unit Run qobject tests"
- @echo " $(MAKE) check-qapi-schema Run QAPI schema tests"
- @echo " $(MAKE) check-block Run block tests"
+ @echo " $(MAKE) check-qtest-TARGET Run qtest tests for given target"
+ @echo " $(MAKE) check-qtest Run qtest tests"
+ @echo " $(MAKE) check-functional Run python-based functional tests"
+ @echo " $(MAKE) check-functional-TARGET Run functional tests for a given target"
+ @echo " $(MAKE) check-unit Run qobject tests"
+ @echo " $(MAKE) check-qapi-schema Run QAPI schema tests"
+ @echo " $(MAKE) check-block Run block tests"
ifneq ($(filter $(all-check-targets), check-softfloat),)
- @echo " $(MAKE) check-tcg Run TCG tests"
- @echo " $(MAKE) check-softfloat Run FPU emulation tests"
+ @echo " $(MAKE) check-tcg Run TCG tests"
+ @echo " $(MAKE) check-softfloat Run FPU emulation tests"
endif
- @echo " $(MAKE) check-avocado Run avocado (integration) tests for currently configured targets"
@echo
- @echo " $(MAKE) check-report.junit.xml Generates an aggregated XML test report"
- @echo " $(MAKE) check-venv Creates a Python venv for tests"
- @echo " $(MAKE) check-clean Clean the tests and related data"
+ @echo " $(MAKE) check-report.junit.xml Generates an aggregated XML test report"
+ @echo " $(MAKE) check-venv Creates a Python venv for tests"
+ @echo " $(MAKE) check-clean Clean the tests and related data"
@echo
@echo "The following are useful for CI builds"
- @echo " $(MAKE) check-build Build most test binaries"
- @echo " $(MAKE) get-vm-images Downloads all images used by avocado tests, according to configured targets (~350 MB each, 1.5 GB max)"
+ @echo " $(MAKE) check-build Build most test binaries"
@echo
@echo
@echo "The variable SPEED can be set to control the gtester speed setting."
@@ -84,26 +84,12 @@ distclean-tcg: $(DISTCLEAN_TCG_TARGET_RULES)
# Python venv for running tests
-.PHONY: check-venv check-avocado check-acceptance check-acceptance-deprecated-warning
+.PHONY: check-venv
# Build up our target list from the filtered list of ninja targets
TARGETS=$(patsubst libqemu-%.a, %, $(filter libqemu-%.a, $(ninja-targets)))
TESTS_VENV_TOKEN=$(BUILD_DIR)/pyvenv/tests.group
-TESTS_RESULTS_DIR=$(BUILD_DIR)/tests/results
-ifndef AVOCADO_TESTS
- AVOCADO_TESTS=tests/avocado
-endif
-# Controls the output generated by Avocado when running tests.
-# Any number of command separated loggers are accepted. For more
-# information please refer to "avocado --help".
-AVOCADO_SHOW=app
-ifndef AVOCADO_TAGS
- AVOCADO_CMDLINE_TAGS=$(patsubst %-softmmu,-t arch:%, \
- $(filter %-softmmu,$(TARGETS)))
-else
- AVOCADO_CMDLINE_TAGS=$(addprefix -t , $(AVOCADO_TAGS))
-endif
quiet-venv-pip = $(quiet-@)$(call quiet-command-run, \
$(PYTHON) -m pip -q --disable-pip-version-check $1, \
@@ -111,56 +97,30 @@ quiet-venv-pip = $(quiet-@)$(call quiet-command-run, \
$(TESTS_VENV_TOKEN): $(SRC_PATH)/pythondeps.toml
$(call quiet-venv-pip,install -e "$(SRC_PATH)/python/")
- $(MKVENV_ENSUREGROUP) $< avocado
+ $(MKVENV_ENSUREGROUP) $< testdeps
$(call quiet-command, touch $@)
-$(TESTS_RESULTS_DIR):
- $(call quiet-command, mkdir -p $@, \
- MKDIR, $@)
-
check-venv: $(TESTS_VENV_TOKEN)
-FEDORA_31_ARCHES_TARGETS=$(patsubst %-softmmu,%, $(filter %-softmmu,$(TARGETS)))
-FEDORA_31_ARCHES_CANDIDATES=$(patsubst ppc64,ppc64le,$(FEDORA_31_ARCHES_TARGETS))
-FEDORA_31_ARCHES := x86_64 aarch64 ppc64le s390x
-FEDORA_31_DOWNLOAD=$(filter $(FEDORA_31_ARCHES),$(FEDORA_31_ARCHES_CANDIDATES))
-
-# download one specific Fedora 31 image
-get-vm-image-fedora-31-%: check-venv
- $(call quiet-command, \
- $(PYTHON) -m avocado vmimage get \
- --distro=fedora --distro-version=31 --arch=$*, \
- "AVOCADO", "Downloading avocado tests VM image for $*")
-
-# download all vm images, according to defined targets
-get-vm-images: check-venv $(patsubst %,get-vm-image-fedora-31-%, $(FEDORA_31_DOWNLOAD))
-
-check-avocado: check-venv $(TESTS_RESULTS_DIR) get-vm-images
- $(call quiet-command, \
- $(PYTHON) -m avocado \
- --show=$(AVOCADO_SHOW) run --job-results-dir=$(TESTS_RESULTS_DIR) \
- $(if $(AVOCADO_TAGS),, --filter-by-tags-include-empty \
- --filter-by-tags-include-empty-key) \
- $(AVOCADO_CMDLINE_TAGS) \
- $(if $(GITLAB_CI),,--failfast) $(AVOCADO_TESTS), \
- "AVOCADO", "tests/avocado")
-
-check-acceptance-deprecated-warning:
- @echo
- @echo "Note '$(MAKE) check-acceptance' is deprecated, use '$(MAKE) check-avocado' instead."
- @echo
+FUNCTIONAL_TARGETS=$(patsubst %-softmmu,check-functional-%, $(filter %-softmmu,$(TARGETS)))
+.PHONY: $(FUNCTIONAL_TARGETS)
+$(FUNCTIONAL_TARGETS):
+ @$(MAKE) SPEED=thorough $(subst -functional,-func,$@)
-check-acceptance: check-acceptance-deprecated-warning | check-avocado
+.PHONY: check-functional
+check-functional:
+ @$(NINJA) precache-functional
+ @QEMU_TEST_NO_DOWNLOAD=1 $(MAKE) SPEED=thorough check-func check-func-quick
# Consolidated targets
-.PHONY: check check-clean get-vm-images
+.PHONY: check check-clean
check:
check-build: run-ninja
check-clean:
- rm -rf $(TESTS_RESULTS_DIR)
+ rm -rf $(BUILD_DIR)/tests/functional
clean: check-clean clean-tcg
distclean: distclean-tcg
diff --git a/tests/avocado/README.rst b/tests/avocado/README.rst
deleted file mode 100644
index 9448837..0000000
--- a/tests/avocado/README.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-=============================================
-Integration tests using the Avocado Framework
-=============================================
-
-This directory contains integration tests. They're usually higher
-level, and may interact with external resources and with various
-guest operating systems.
-
-For more information, please refer to ``docs/devel/testing.rst``,
-section "Integration tests using the Avocado Framework".
diff --git a/tests/avocado/acpi-bits.py b/tests/avocado/acpi-bits.py
deleted file mode 100644
index efe4f52..0000000
--- a/tests/avocado/acpi-bits.py
+++ /dev/null
@@ -1,409 +0,0 @@
-#!/usr/bin/env python3
-# group: rw quick
-# Exercise QEMU generated ACPI/SMBIOS tables using biosbits,
-# https://biosbits.org/
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Author:
-# Ani Sinha <anisinha@redhat.com>
-
-# pylint: disable=invalid-name
-# pylint: disable=consider-using-f-string
-
-"""
-This is QEMU ACPI/SMBIOS avocado tests using biosbits.
-Biosbits is available originally at https://biosbits.org/.
-This test uses a fork of the upstream bits and has numerous fixes
-including an upgraded acpica. The fork is located here:
-https://gitlab.com/qemu-project/biosbits-bits .
-"""
-
-import logging
-import os
-import platform
-import re
-import shutil
-import subprocess
-import tarfile
-import tempfile
-import time
-import zipfile
-from typing import (
- List,
- Optional,
- Sequence,
-)
-from qemu.machine import QEMUMachine
-from avocado import skipIf
-from avocado.utils import datadrainer as drainer
-from avocado_qemu import QemuBaseTest
-
-deps = ["xorriso", "mformat"] # dependent tools needed in the test setup/box.
-supported_platforms = ['x86_64'] # supported test platforms.
-
-# default timeout of 120 secs is sometimes not enough for bits test.
-BITS_TIMEOUT = 200
-
-def which(tool):
- """ looks up the full path for @tool, returns None if not found
- or if @tool does not have executable permissions.
- """
- paths=os.getenv('PATH')
- for p in paths.split(os.path.pathsep):
- p = os.path.join(p, tool)
- if os.path.exists(p) and os.access(p, os.X_OK):
- return p
- return None
-
-def missing_deps():
- """ returns True if any of the test dependent tools are absent.
- """
- for dep in deps:
- if which(dep) is None:
- return True
- return False
-
-def supported_platform():
- """ checks if the test is running on a supported platform.
- """
- return platform.machine() in supported_platforms
-
-class QEMUBitsMachine(QEMUMachine): # pylint: disable=too-few-public-methods
- """
- A QEMU VM, with isa-debugcon enabled and bits iso passed
- using -cdrom to QEMU commandline.
-
- """
- def __init__(self,
- binary: str,
- args: Sequence[str] = (),
- wrapper: Sequence[str] = (),
- name: Optional[str] = None,
- base_temp_dir: str = "/var/tmp",
- debugcon_log: str = "debugcon-log.txt",
- debugcon_addr: str = "0x403",
- qmp_timer: Optional[float] = None):
- # pylint: disable=too-many-arguments
-
- if name is None:
- name = "qemu-bits-%d" % os.getpid()
- super().__init__(binary, args, wrapper=wrapper, name=name,
- base_temp_dir=base_temp_dir,
- qmp_timer=qmp_timer)
- self.debugcon_log = debugcon_log
- self.debugcon_addr = debugcon_addr
- self.base_temp_dir = base_temp_dir
-
- @property
- def _base_args(self) -> List[str]:
- args = super()._base_args
- args.extend([
- '-chardev',
- 'file,path=%s,id=debugcon' %os.path.join(self.base_temp_dir,
- self.debugcon_log),
- '-device',
- 'isa-debugcon,iobase=%s,chardev=debugcon' %self.debugcon_addr,
- ])
- return args
-
- def base_args(self):
- """return the base argument to QEMU binary"""
- return self._base_args
-
-@skipIf(not supported_platform() or missing_deps(),
- 'unsupported platform or dependencies (%s) not installed' \
- % ','.join(deps))
-class AcpiBitsTest(QemuBaseTest): #pylint: disable=too-many-instance-attributes
- """
- ACPI and SMBIOS tests using biosbits.
-
- :avocado: tags=arch:x86_64
- :avocado: tags=acpi
-
- """
- # in slower systems the test can take as long as 3 minutes to complete.
- timeout = BITS_TIMEOUT
-
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self._vm = None
- self._workDir = None
- self._baseDir = None
-
- # following are some standard configuration constants
- self._bitsInternalVer = 2020 # gitlab CI does shallow clones of depth 20
- self._bitsCommitHash = 'c7920d2b' # commit hash must match
- # the artifact tag below
- self._bitsTag = "qemu-bits-10262023" # this is the latest bits
- # release as of today.
- self._bitsArtSHA1Hash = 'b22cdfcfc7453875297d06d626f5474ee36a343f'
- self._bitsArtURL = ("https://gitlab.com/qemu-project/"
- "biosbits-bits/-/jobs/artifacts/%s/"
- "download?job=qemu-bits-build" %self._bitsTag)
- self._debugcon_addr = '0x403'
- self._debugcon_log = 'debugcon-log.txt'
- logging.basicConfig(level=logging.INFO)
- self.logger = logging.getLogger('acpi-bits')
-
- def _print_log(self, log):
- self.logger.info('\nlogs from biosbits follows:')
- self.logger.info('==========================================\n')
- self.logger.info(log)
- self.logger.info('==========================================\n')
-
- def copy_bits_config(self):
- """ copies the bios bits config file into bits.
- """
- config_file = 'bits-cfg.txt'
- bits_config_dir = os.path.join(self._baseDir, 'acpi-bits',
- 'bits-config')
- target_config_dir = os.path.join(self._workDir,
- 'bits-%d' %self._bitsInternalVer,
- 'boot')
- self.assertTrue(os.path.exists(bits_config_dir))
- self.assertTrue(os.path.exists(target_config_dir))
- self.assertTrue(os.access(os.path.join(bits_config_dir,
- config_file), os.R_OK))
- shutil.copy2(os.path.join(bits_config_dir, config_file),
- target_config_dir)
- self.logger.info('copied config file %s to %s',
- config_file, target_config_dir)
-
- def copy_test_scripts(self):
- """copies the python test scripts into bits. """
-
- bits_test_dir = os.path.join(self._baseDir, 'acpi-bits',
- 'bits-tests')
- target_test_dir = os.path.join(self._workDir,
- 'bits-%d' %self._bitsInternalVer,
- 'boot', 'python')
-
- self.assertTrue(os.path.exists(bits_test_dir))
- self.assertTrue(os.path.exists(target_test_dir))
-
- for filename in os.listdir(bits_test_dir):
- if os.path.isfile(os.path.join(bits_test_dir, filename)) and \
- filename.endswith('.py2'):
- # all test scripts are named with extension .py2 so that
- # avocado does not try to load them. These scripts are
- # written for python 2.7 not python 3 and hence if avocado
- # loaded them, it would complain about python 3 specific
- # syntaxes.
- newfilename = os.path.splitext(filename)[0] + '.py'
- shutil.copy2(os.path.join(bits_test_dir, filename),
- os.path.join(target_test_dir, newfilename))
- self.logger.info('copied test file %s to %s',
- filename, target_test_dir)
-
- # now remove the pyc test file if it exists, otherwise the
- # changes in the python test script won't be executed.
- testfile_pyc = os.path.splitext(filename)[0] + '.pyc'
- if os.access(os.path.join(target_test_dir, testfile_pyc),
- os.F_OK):
- os.remove(os.path.join(target_test_dir, testfile_pyc))
- self.logger.info('removed compiled file %s',
- os.path.join(target_test_dir,
- testfile_pyc))
-
- def fix_mkrescue(self, mkrescue):
- """ grub-mkrescue is a bash script with two variables, 'prefix' and
- 'libdir'. They must be pointed to the right location so that the
- iso can be generated appropriately. We point the two variables to
- the directory where we have extracted our pre-built bits grub
- tarball.
- """
- grub_x86_64_mods = os.path.join(self._workDir, 'grub-inst-x86_64-efi')
- grub_i386_mods = os.path.join(self._workDir, 'grub-inst')
-
- self.assertTrue(os.path.exists(grub_x86_64_mods))
- self.assertTrue(os.path.exists(grub_i386_mods))
-
- new_script = ""
- with open(mkrescue, 'r', encoding='utf-8') as filehandle:
- orig_script = filehandle.read()
- new_script = re.sub('(^prefix=)(.*)',
- r'\1"%s"' %grub_x86_64_mods,
- orig_script, flags=re.M)
- new_script = re.sub('(^libdir=)(.*)', r'\1"%s/lib"' %grub_i386_mods,
- new_script, flags=re.M)
-
- with open(mkrescue, 'w', encoding='utf-8') as filehandle:
- filehandle.write(new_script)
-
- def generate_bits_iso(self):
- """ Uses grub-mkrescue to generate a fresh bits iso with the python
- test scripts
- """
- bits_dir = os.path.join(self._workDir,
- 'bits-%d' %self._bitsInternalVer)
- iso_file = os.path.join(self._workDir,
- 'bits-%d.iso' %self._bitsInternalVer)
- mkrescue_script = os.path.join(self._workDir,
- 'grub-inst-x86_64-efi', 'bin',
- 'grub-mkrescue')
-
- self.assertTrue(os.access(mkrescue_script,
- os.R_OK | os.W_OK | os.X_OK))
-
- self.fix_mkrescue(mkrescue_script)
-
- self.logger.info('using grub-mkrescue for generating biosbits iso ...')
-
- try:
- if os.getenv('V') or os.getenv('BITS_DEBUG'):
- subprocess.check_call([mkrescue_script, '-o', iso_file,
- bits_dir], stderr=subprocess.STDOUT)
- else:
- subprocess.check_call([mkrescue_script, '-o',
- iso_file, bits_dir],
- stderr=subprocess.DEVNULL,
- stdout=subprocess.DEVNULL)
- except Exception as e: # pylint: disable=broad-except
- self.skipTest("Error while generating the bits iso. "
- "Pass V=1 in the environment to get more details. "
- + str(e))
-
- self.assertTrue(os.access(iso_file, os.R_OK))
-
- self.logger.info('iso file %s successfully generated.', iso_file)
-
- def setUp(self): # pylint: disable=arguments-differ
- super().setUp('qemu-system-')
-
- self._baseDir = os.getenv('AVOCADO_TEST_BASEDIR')
-
- # workdir could also be avocado's own workdir in self.workdir.
- # At present, I prefer to maintain my own temporary working
- # directory. It gives us more control over the generated bits
- # log files and also for debugging, we may chose not to remove
- # this working directory so that the logs and iso can be
- # inspected manually and archived if needed.
- self._workDir = tempfile.mkdtemp(prefix='acpi-bits-',
- suffix='.tmp')
- self.logger.info('working dir: %s', self._workDir)
-
- prebuiltDir = os.path.join(self._workDir, 'prebuilt')
- if not os.path.isdir(prebuiltDir):
- os.mkdir(prebuiltDir, mode=0o775)
-
- bits_zip_file = os.path.join(prebuiltDir, 'bits-%d-%s.zip'
- %(self._bitsInternalVer,
- self._bitsCommitHash))
- grub_tar_file = os.path.join(prebuiltDir,
- 'bits-%d-%s-grub.tar.gz'
- %(self._bitsInternalVer,
- self._bitsCommitHash))
-
- bitsLocalArtLoc = self.fetch_asset(self._bitsArtURL,
- asset_hash=self._bitsArtSHA1Hash)
- self.logger.info("downloaded bits artifacts to %s", bitsLocalArtLoc)
-
- # extract the bits artifact in the temp working directory
- with zipfile.ZipFile(bitsLocalArtLoc, 'r') as zref:
- zref.extractall(prebuiltDir)
-
- # extract the bits software in the temp working directory
- with zipfile.ZipFile(bits_zip_file, 'r') as zref:
- zref.extractall(self._workDir)
-
- with tarfile.open(grub_tar_file, 'r', encoding='utf-8') as tarball:
- tarball.extractall(self._workDir)
-
- self.copy_test_scripts()
- self.copy_bits_config()
- self.generate_bits_iso()
-
- def parse_log(self):
- """parse the log generated by running bits tests and
- check for failures.
- """
- debugconf = os.path.join(self._workDir, self._debugcon_log)
- log = ""
- with open(debugconf, 'r', encoding='utf-8') as filehandle:
- log = filehandle.read()
-
- matchiter = re.finditer(r'(.*Summary: )(\d+ passed), (\d+ failed).*',
- log)
- for match in matchiter:
- # verify that no test cases failed.
- try:
- self.assertEqual(match.group(3).split()[0], '0',
- 'Some bits tests seems to have failed. ' \
- 'Please check the test logs for more info.')
- except AssertionError as e:
- self._print_log(log)
- raise e
- else:
- if os.getenv('V') or os.getenv('BITS_DEBUG'):
- self._print_log(log)
-
- def tearDown(self):
- """
- Lets do some cleanups.
- """
- if self._vm:
- self.assertFalse(not self._vm.is_running)
- if not os.getenv('BITS_DEBUG') and self._workDir:
- self.logger.info('removing the work directory %s', self._workDir)
- shutil.rmtree(self._workDir)
- else:
- self.logger.info('not removing the work directory %s ' \
- 'as BITS_DEBUG is ' \
- 'passed in the environment', self._workDir)
- super().tearDown()
-
- def test_acpi_smbios_bits(self):
- """The main test case implementation."""
-
- iso_file = os.path.join(self._workDir,
- 'bits-%d.iso' %self._bitsInternalVer)
-
- self.assertTrue(os.access(iso_file, os.R_OK))
-
- self._vm = QEMUBitsMachine(binary=self.qemu_bin,
- base_temp_dir=self._workDir,
- debugcon_log=self._debugcon_log,
- debugcon_addr=self._debugcon_addr)
-
- self._vm.add_args('-cdrom', '%s' %iso_file)
- # the vm needs to be run under icount so that TCG emulation is
- # consistent in terms of timing. smilatency tests have consistent
- # timing requirements.
- self._vm.add_args('-icount', 'auto')
- # currently there is no support in bits for recognizing 64-bit SMBIOS
- # entry points. QEMU defaults to 64-bit entry points since the
- # upstream commit bf376f3020 ("hw/i386/pc: Default to use SMBIOS 3.0
- # for newer machine models"). Therefore, enforce 32-bit entry point.
- self._vm.add_args('-machine', 'smbios-entry-point-type=32')
-
- # enable console logging
- self._vm.set_console()
- self._vm.launch()
-
- self.logger.debug("Console output from bits VM follows ...")
- c_drainer = drainer.LineLogger(self._vm.console_socket.fileno(),
- logger=self.logger.getChild("console"),
- stop_check=(lambda :
- not self._vm.is_running()))
- c_drainer.start()
-
- # biosbits has been configured to run all the specified test suites
- # in batch mode and then automatically initiate a vm shutdown.
- # Set timeout to BITS_TIMEOUT for SHUTDOWN event from bits VM at par
- # with the avocado test timeout.
- self._vm.event_wait('SHUTDOWN', timeout=BITS_TIMEOUT)
- self._vm.wait(timeout=None)
- self.parse_log()
diff --git a/tests/avocado/avocado_qemu/__init__.py b/tests/avocado/avocado_qemu/__init__.py
deleted file mode 100644
index 304c428..0000000
--- a/tests/avocado/avocado_qemu/__init__.py
+++ /dev/null
@@ -1,681 +0,0 @@
-# Test class and utilities for functional tests
-#
-# Copyright (c) 2018 Red Hat, Inc.
-#
-# Author:
-# Cleber Rosa <crosa@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import logging
-import os
-import shutil
-import subprocess
-import sys
-import tempfile
-import time
-import uuid
-
-import avocado
-from avocado.utils import cloudinit, datadrainer, process, ssh, vmimage
-from avocado.utils.path import find_command
-
-from qemu.machine import QEMUMachine
-from qemu.utils import (get_info_usernet_hostfwd_port, kvm_available,
- tcg_available)
-
-
-#: The QEMU build root directory. It may also be the source directory
-#: if building from the source dir, but it's safer to use BUILD_DIR for
-#: that purpose. Be aware that if this code is moved outside of a source
-#: and build tree, it will not be accurate.
-BUILD_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
-
-if os.path.islink(os.path.dirname(os.path.dirname(__file__))):
- # The link to the avocado tests dir in the source code directory
- lnk = os.path.dirname(os.path.dirname(__file__))
- #: The QEMU root source directory
- SOURCE_DIR = os.path.dirname(os.path.dirname(os.readlink(lnk)))
-else:
- SOURCE_DIR = BUILD_DIR
-
-
-def has_cmd(name, args=None):
- """
- This function is for use in a @avocado.skipUnless decorator, e.g.:
-
- @skipUnless(*has_cmd('sudo -n', ('sudo', '-n', 'true')))
- def test_something_that_needs_sudo(self):
- ...
- """
-
- if args is None:
- args = ('which', name)
-
- try:
- _, stderr, exitcode = run_cmd(args)
- except Exception as e:
- exitcode = -1
- stderr = str(e)
-
- if exitcode != 0:
- cmd_line = ' '.join(args)
- err = f'{name} required, but "{cmd_line}" failed: {stderr.strip()}'
- return (False, err)
- else:
- return (True, '')
-
-def has_cmds(*cmds):
- """
- This function is for use in a @avocado.skipUnless decorator and
- allows checking for the availability of multiple commands, e.g.:
-
- @skipUnless(*has_cmds(('cmd1', ('cmd1', '--some-parameter')),
- 'cmd2', 'cmd3'))
- def test_something_that_needs_cmd1_and_cmd2(self):
- ...
- """
-
- for cmd in cmds:
- if isinstance(cmd, str):
- cmd = (cmd,)
-
- ok, errstr = has_cmd(*cmd)
- if not ok:
- return (False, errstr)
-
- return (True, '')
-
-def run_cmd(args):
- subp = subprocess.Popen(args,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- universal_newlines=True)
- stdout, stderr = subp.communicate()
- ret = subp.returncode
-
- return (stdout, stderr, ret)
-
-def is_readable_executable_file(path):
- return os.path.isfile(path) and os.access(path, os.R_OK | os.X_OK)
-
-
-def pick_default_qemu_bin(bin_prefix='qemu-system-', arch=None):
- """
- Picks the path of a QEMU binary, starting either in the current working
- directory or in the source tree root directory.
-
- :param arch: the arch to use when looking for a QEMU binary (the target
- will match the arch given). If None (the default), arch
- will be the current host system arch (as given by
- :func:`os.uname`).
- :type arch: str
- :returns: the path to the default QEMU binary or None if one could not
- be found
- :rtype: str or None
- """
- if arch is None:
- arch = os.uname()[4]
- # qemu binary path does not match arch for powerpc, handle it
- if 'ppc64le' in arch:
- arch = 'ppc64'
- qemu_bin_name = bin_prefix + arch
- qemu_bin_paths = [
- os.path.join(".", qemu_bin_name),
- os.path.join(BUILD_DIR, qemu_bin_name),
- os.path.join(BUILD_DIR, "build", qemu_bin_name),
- ]
- for path in qemu_bin_paths:
- if is_readable_executable_file(path):
- return path
- return None
-
-
-def _console_interaction(test, success_message, failure_message,
- send_string, keep_sending=False, vm=None):
- assert not keep_sending or send_string
- if vm is None:
- vm = test.vm
- console = vm.console_file
- console_logger = logging.getLogger('console')
- while True:
- if send_string:
- vm.console_socket.sendall(send_string.encode())
- if not keep_sending:
- send_string = None # send only once
- try:
- msg = console.readline().decode().strip()
- except UnicodeDecodeError:
- msg = None
- if not msg:
- continue
- console_logger.debug(msg)
- if success_message is None or success_message in msg:
- break
- if failure_message and failure_message in msg:
- console.close()
- fail = 'Failure message found in console: "%s". Expected: "%s"' % \
- (failure_message, success_message)
- test.fail(fail)
-
-def interrupt_interactive_console_until_pattern(test, success_message,
- failure_message=None,
- interrupt_string='\r'):
- """
- Keep sending a string to interrupt a console prompt, while logging the
- console output. Typical use case is to break a boot loader prompt, such:
-
- Press a key within 5 seconds to interrupt boot process.
- 5
- 4
- 3
- 2
- 1
- Booting default image...
-
- :param test: an Avocado test containing a VM that will have its console
- read and probed for a success or failure message
- :type test: :class:`avocado_qemu.QemuSystemTest`
- :param success_message: if this message appears, test succeeds
- :param failure_message: if this message appears, test fails
- :param interrupt_string: a string to send to the console before trying
- to read a new line
- """
- _console_interaction(test, success_message, failure_message,
- interrupt_string, True)
-
-def wait_for_console_pattern(test, success_message, failure_message=None,
- vm=None):
- """
- Waits for messages to appear on the console, while logging the content
-
- :param test: an Avocado test containing a VM that will have its console
- read and probed for a success or failure message
- :type test: :class:`avocado_qemu.QemuSystemTest`
- :param success_message: if this message appears, test succeeds
- :param failure_message: if this message appears, test fails
- """
- _console_interaction(test, success_message, failure_message, None, vm=vm)
-
-def exec_command(test, command):
- """
- Send a command to a console (appending CRLF characters), while logging
- the content.
-
- :param test: an Avocado test containing a VM.
- :type test: :class:`avocado_qemu.QemuSystemTest`
- :param command: the command to send
- :type command: str
- """
- _console_interaction(test, None, None, command + '\r')
-
-def exec_command_and_wait_for_pattern(test, command,
- success_message, failure_message=None):
- """
- Send a command to a console (appending CRLF characters), then wait
- for success_message to appear on the console, while logging the.
- content. Mark the test as failed if failure_message is found instead.
-
- :param test: an Avocado test containing a VM that will have its console
- read and probed for a success or failure message
- :type test: :class:`avocado_qemu.QemuSystemTest`
- :param command: the command to send
- :param success_message: if this message appears, test succeeds
- :param failure_message: if this message appears, test fails
- """
- _console_interaction(test, success_message, failure_message, command + '\r')
-
-class QemuBaseTest(avocado.Test):
-
- # default timeout for all tests, can be overridden
- timeout = 120
-
- def _get_unique_tag_val(self, tag_name):
- """
- Gets a tag value, if unique for a key
- """
- vals = self.tags.get(tag_name, [])
- if len(vals) == 1:
- return vals.pop()
- return None
-
- def setUp(self, bin_prefix):
- self.arch = self.params.get('arch',
- default=self._get_unique_tag_val('arch'))
-
- self.cpu = self.params.get('cpu',
- default=self._get_unique_tag_val('cpu'))
-
- default_qemu_bin = pick_default_qemu_bin(bin_prefix, arch=self.arch)
- self.qemu_bin = self.params.get('qemu_bin',
- default=default_qemu_bin)
- if self.qemu_bin is None:
- self.cancel("No QEMU binary defined or found in the build tree")
-
- def fetch_asset(self, name,
- asset_hash, algorithm=None,
- locations=None, expire=None,
- find_only=False, cancel_on_missing=True):
- return super().fetch_asset(name,
- asset_hash=asset_hash,
- algorithm=algorithm,
- locations=locations,
- expire=expire,
- find_only=find_only,
- cancel_on_missing=cancel_on_missing)
-
-
-class QemuSystemTest(QemuBaseTest):
- """Facilitates system emulation tests."""
-
- def setUp(self):
- self._vms = {}
-
- super().setUp('qemu-system-')
-
- accel_required = self._get_unique_tag_val('accel')
- if accel_required:
- self.require_accelerator(accel_required)
-
- self.machine = self.params.get('machine',
- default=self._get_unique_tag_val('machine'))
-
- def require_accelerator(self, accelerator):
- """
- Requires an accelerator to be available for the test to continue
-
- It takes into account the currently set qemu binary.
-
- If the check fails, the test is canceled. If the check itself
- for the given accelerator is not available, the test is also
- canceled.
-
- :param accelerator: name of the accelerator, such as "kvm" or "tcg"
- :type accelerator: str
- """
- checker = {'tcg': tcg_available,
- 'kvm': kvm_available}.get(accelerator)
- if checker is None:
- self.cancel("Don't know how to check for the presence "
- "of accelerator %s" % accelerator)
- if not checker(qemu_bin=self.qemu_bin):
- self.cancel("%s accelerator does not seem to be "
- "available" % accelerator)
-
- def require_netdev(self, netdevname):
- netdevhelp = run_cmd([self.qemu_bin,
- '-M', 'none', '-netdev', 'help'])[0];
- if netdevhelp.find('\n' + netdevname + '\n') < 0:
- self.cancel('no support for user networking')
-
- def require_multiprocess(self):
- """
- Test for the presence of the x-pci-proxy-dev which is required
- to support multiprocess.
- """
- devhelp = run_cmd([self.qemu_bin,
- '-M', 'none', '-device', 'help'])[0];
- if devhelp.find('x-pci-proxy-dev') < 0:
- self.cancel('no support for multiprocess device emulation')
-
- def _new_vm(self, name, *args):
- self._sd = tempfile.TemporaryDirectory(prefix="qemu_")
- vm = QEMUMachine(self.qemu_bin, base_temp_dir=self.workdir,
- log_dir=self.logdir)
- self.log.debug('QEMUMachine "%s" created', name)
- self.log.debug('QEMUMachine "%s" temp_dir: %s', name, vm.temp_dir)
- self.log.debug('QEMUMachine "%s" log_dir: %s', name, vm.log_dir)
- if args:
- vm.add_args(*args)
- return vm
-
- def get_qemu_img(self):
- self.log.debug('Looking for and selecting a qemu-img binary')
-
- # If qemu-img has been built, use it, otherwise the system wide one
- # will be used.
- qemu_img = os.path.join(BUILD_DIR, 'qemu-img')
- if not os.path.exists(qemu_img):
- qemu_img = find_command('qemu-img', False)
- if qemu_img is False:
- self.cancel('Could not find "qemu-img"')
-
- return qemu_img
-
- @property
- def vm(self):
- return self.get_vm(name='default')
-
- def get_vm(self, *args, name=None):
- if not name:
- name = str(uuid.uuid4())
- if self._vms.get(name) is None:
- self._vms[name] = self._new_vm(name, *args)
- if self.cpu is not None:
- self._vms[name].add_args('-cpu', self.cpu)
- if self.machine is not None:
- self._vms[name].set_machine(self.machine)
- return self._vms[name]
-
- def set_vm_arg(self, arg, value):
- """
- Set an argument to list of extra arguments to be given to the QEMU
- binary. If the argument already exists then its value is replaced.
-
- :param arg: the QEMU argument, such as "-cpu" in "-cpu host"
- :type arg: str
- :param value: the argument value, such as "host" in "-cpu host"
- :type value: str
- """
- if not arg or not value:
- return
- if arg not in self.vm.args:
- self.vm.args.extend([arg, value])
- else:
- idx = self.vm.args.index(arg) + 1
- if idx < len(self.vm.args):
- self.vm.args[idx] = value
- else:
- self.vm.args.append(value)
-
- def tearDown(self):
- for vm in self._vms.values():
- vm.shutdown()
- self._sd = None
- super().tearDown()
-
-
-class QemuUserTest(QemuBaseTest):
- """Facilitates user-mode emulation tests."""
-
- def setUp(self):
- self._ldpath = []
- super().setUp('qemu-')
-
- def add_ldpath(self, ldpath):
- self._ldpath.append(os.path.abspath(ldpath))
-
- def run(self, bin_path, args=[]):
- qemu_args = " ".join(["-L %s" % ldpath for ldpath in self._ldpath])
- bin_args = " ".join(args)
- return process.run("%s %s %s %s" % (self.qemu_bin, qemu_args,
- bin_path, bin_args))
-
-
-class LinuxSSHMixIn:
- """Contains utility methods for interacting with a guest via SSH."""
-
- def ssh_connect(self, username, credential, credential_is_key=True):
- self.ssh_logger = logging.getLogger('ssh')
- res = self.vm.cmd('human-monitor-command',
- command_line='info usernet')
- port = get_info_usernet_hostfwd_port(res)
- self.assertIsNotNone(port)
- self.assertGreater(port, 0)
- self.log.debug('sshd listening on port: %d', port)
- if credential_is_key:
- self.ssh_session = ssh.Session('127.0.0.1', port=port,
- user=username, key=credential)
- else:
- self.ssh_session = ssh.Session('127.0.0.1', port=port,
- user=username, password=credential)
- for i in range(10):
- try:
- self.ssh_session.connect()
- return
- except:
- time.sleep(i)
- self.fail('ssh connection timeout')
-
- def ssh_command(self, command):
- self.ssh_logger.info(command)
- result = self.ssh_session.cmd(command)
- stdout_lines = [line.rstrip() for line
- in result.stdout_text.splitlines()]
- for line in stdout_lines:
- self.ssh_logger.info(line)
- stderr_lines = [line.rstrip() for line
- in result.stderr_text.splitlines()]
- for line in stderr_lines:
- self.ssh_logger.warning(line)
-
- self.assertEqual(result.exit_status, 0,
- f'Guest command failed: {command}')
- return stdout_lines, stderr_lines
-
- def ssh_command_output_contains(self, cmd, exp):
- stdout, _ = self.ssh_command(cmd)
- for line in stdout:
- if exp in line:
- break
- else:
- self.fail('"%s" output does not contain "%s"' % (cmd, exp))
-
-class LinuxDistro:
- """Represents a Linux distribution
-
- Holds information of known distros.
- """
- #: A collection of known distros and their respective image checksum
- KNOWN_DISTROS = {
- 'fedora': {
- '31': {
- 'x86_64':
- {'checksum': ('e3c1b309d9203604922d6e255c2c5d09'
- '8a309c2d46215d8fc026954f3c5c27a0'),
- 'pxeboot_url': ('https://archives.fedoraproject.org/'
- 'pub/archive/fedora/linux/releases/31/'
- 'Everything/x86_64/os/images/pxeboot/'),
- 'kernel_params': ('root=UUID=b1438b9b-2cab-4065-a99a-'
- '08a96687f73c ro no_timer_check '
- 'net.ifnames=0 console=tty1 '
- 'console=ttyS0,115200n8'),
- },
- 'aarch64':
- {'checksum': ('1e18d9c0cf734940c4b5d5ec592facae'
- 'd2af0ad0329383d5639c997fdf16fe49'),
- 'pxeboot_url': 'https://archives.fedoraproject.org/'
- 'pub/archive/fedora/linux/releases/31/'
- 'Everything/aarch64/os/images/pxeboot/',
- 'kernel_params': ('root=UUID=b6950a44-9f3c-4076-a9c2-'
- '355e8475b0a7 ro earlyprintk=pl011,0x9000000'
- ' ignore_loglevel no_timer_check'
- ' printk.time=1 rd_NO_PLYMOUTH'
- ' console=ttyAMA0'),
- },
- 'ppc64':
- {'checksum': ('7c3528b85a3df4b2306e892199a9e1e4'
- '3f991c506f2cc390dc4efa2026ad2f58')},
- 's390x':
- {'checksum': ('4caaab5a434fd4d1079149a072fdc789'
- '1e354f834d355069ca982fdcaf5a122d')},
- },
- '32': {
- 'aarch64':
- {'checksum': ('b367755c664a2d7a26955bbfff985855'
- 'adfa2ca15e908baf15b4b176d68d3967'),
- 'pxeboot_url': ('http://dl.fedoraproject.org/pub/fedora/linux/'
- 'releases/32/Server/aarch64/os/images/'
- 'pxeboot/'),
- 'kernel_params': ('root=UUID=3df75b65-be8d-4db4-8655-'
- '14d95c0e90c5 ro no_timer_check net.ifnames=0'
- ' console=tty1 console=ttyS0,115200n8'),
- },
- },
- '33': {
- 'aarch64':
- {'checksum': ('e7f75cdfd523fe5ac2ca9eeece68edc1'
- 'a81f386a17f969c1d1c7c87031008a6b'),
- 'pxeboot_url': ('http://dl.fedoraproject.org/pub/fedora/linux/'
- 'releases/33/Server/aarch64/os/images/'
- 'pxeboot/'),
- 'kernel_params': ('root=UUID=d20b3ffa-6397-4a63-a734-'
- '1126a0208f8a ro no_timer_check net.ifnames=0'
- ' console=tty1 console=ttyS0,115200n8'
- ' console=tty0'),
- },
- },
- }
- }
-
- def __init__(self, name, version, arch):
- self.name = name
- self.version = version
- self.arch = arch
- try:
- info = self.KNOWN_DISTROS.get(name).get(version).get(arch)
- except AttributeError:
- # Unknown distro
- info = None
- self._info = info or {}
-
- @property
- def checksum(self):
- """Gets the cloud-image file checksum"""
- return self._info.get('checksum', None)
-
- @checksum.setter
- def checksum(self, value):
- self._info['checksum'] = value
-
- @property
- def pxeboot_url(self):
- """Gets the repository url where pxeboot files can be found"""
- return self._info.get('pxeboot_url', None)
-
- @property
- def default_kernel_params(self):
- """Gets the default kernel parameters"""
- return self._info.get('kernel_params', None)
-
-
-class LinuxTest(LinuxSSHMixIn, QemuSystemTest):
- """Facilitates having a cloud-image Linux based available.
-
- For tests that intend to interact with guests, this is a better choice
- to start with than the more vanilla `QemuSystemTest` class.
- """
-
- distro = None
- username = 'root'
- password = 'password'
- smp = '2'
- memory = '1024'
-
- def _set_distro(self):
- distro_name = self.params.get(
- 'distro',
- default=self._get_unique_tag_val('distro'))
- if not distro_name:
- distro_name = 'fedora'
-
- distro_version = self.params.get(
- 'distro_version',
- default=self._get_unique_tag_val('distro_version'))
- if not distro_version:
- distro_version = '31'
-
- self.distro = LinuxDistro(distro_name, distro_version, self.arch)
-
- # The distro checksum behaves differently than distro name and
- # version. First, it does not respect a tag with the same
- # name, given that it's not expected to be used for filtering
- # (distro name versions are the natural choice). Second, the
- # order of precedence is: parameter, attribute and then value
- # from KNOWN_DISTROS.
- distro_checksum = self.params.get('distro_checksum',
- default=None)
- if distro_checksum:
- self.distro.checksum = distro_checksum
-
- def setUp(self, ssh_pubkey=None, network_device_type='virtio-net'):
- super().setUp()
- self.require_netdev('user')
- self._set_distro()
- self.vm.add_args('-smp', self.smp)
- self.vm.add_args('-m', self.memory)
- # The following network device allows for SSH connections
- self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22',
- '-device', '%s,netdev=vnet' % network_device_type)
- self.set_up_boot()
- if ssh_pubkey is None:
- ssh_pubkey, self.ssh_key = self.set_up_existing_ssh_keys()
- self.set_up_cloudinit(ssh_pubkey)
-
- def set_up_existing_ssh_keys(self):
- ssh_public_key = os.path.join(SOURCE_DIR, 'tests', 'keys', 'id_rsa.pub')
- source_private_key = os.path.join(SOURCE_DIR, 'tests', 'keys', 'id_rsa')
- ssh_dir = os.path.join(self.workdir, '.ssh')
- os.mkdir(ssh_dir, mode=0o700)
- ssh_private_key = os.path.join(ssh_dir,
- os.path.basename(source_private_key))
- shutil.copyfile(source_private_key, ssh_private_key)
- os.chmod(ssh_private_key, 0o600)
- return (ssh_public_key, ssh_private_key)
-
- def download_boot(self):
- # Set the qemu-img binary.
- # If none is available, the test will cancel.
- vmimage.QEMU_IMG = super().get_qemu_img()
-
- self.log.info('Downloading/preparing boot image')
- # Fedora 31 only provides ppc64le images
- image_arch = self.arch
- if self.distro.name == 'fedora':
- if image_arch == 'ppc64':
- image_arch = 'ppc64le'
-
- try:
- boot = vmimage.get(
- self.distro.name, arch=image_arch, version=self.distro.version,
- checksum=self.distro.checksum,
- algorithm='sha256',
- cache_dir=self.cache_dirs[0],
- snapshot_dir=self.workdir)
- except:
- self.cancel('Failed to download/prepare boot image')
- return boot.path
-
- def prepare_cloudinit(self, ssh_pubkey=None):
- self.log.info('Preparing cloudinit image')
- try:
- cloudinit_iso = os.path.join(self.workdir, 'cloudinit.iso')
- pubkey_content = None
- if ssh_pubkey:
- with open(ssh_pubkey) as pubkey:
- pubkey_content = pubkey.read()
- cloudinit.iso(cloudinit_iso, self.name,
- username=self.username,
- password=self.password,
- # QEMU's hard coded usermode router address
- phone_home_host='10.0.2.2',
- phone_home_port=self.phone_server.server_port,
- authorized_key=pubkey_content)
- except Exception:
- self.cancel('Failed to prepare the cloudinit image')
- return cloudinit_iso
-
- def set_up_boot(self):
- path = self.download_boot()
- self.vm.add_args('-drive', 'file=%s' % path)
-
- def set_up_cloudinit(self, ssh_pubkey=None):
- self.phone_server = cloudinit.PhoneHomeServer(('0.0.0.0', 0),
- self.name)
- cloudinit_iso = self.prepare_cloudinit(ssh_pubkey)
- self.vm.add_args('-drive', 'file=%s,format=raw' % cloudinit_iso)
-
- def launch_and_wait(self, set_up_ssh_connection=True):
- self.vm.set_console()
- self.vm.launch()
- console_drainer = datadrainer.LineLogger(self.vm.console_socket.fileno(),
- logger=self.log.getChild('console'))
- console_drainer.start()
- self.log.info('VM launched, waiting for boot confirmation from guest')
- while not self.phone_server.instance_phoned_back:
- self.phone_server.handle_request()
-
- if set_up_ssh_connection:
- self.log.info('Setting up the SSH connection')
- self.ssh_connect(self.username, self.ssh_key)
diff --git a/tests/avocado/boot_linux.py b/tests/avocado/boot_linux.py
deleted file mode 100644
index cdce4cb..0000000
--- a/tests/avocado/boot_linux.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Functional test that boots a complete Linux system via a cloud image
-#
-# Copyright (c) 2018-2020 Red Hat, Inc.
-#
-# Author:
-# Cleber Rosa <crosa@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-
-from avocado_qemu import LinuxTest, BUILD_DIR
-
-from avocado import skipUnless
-
-
-class BootLinuxX8664(LinuxTest):
- """
- :avocado: tags=arch:x86_64
- """
- timeout = 480
-
- def test_pc_i440fx_tcg(self):
- """
- :avocado: tags=machine:pc
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- self.vm.add_args("-accel", "tcg")
- self.launch_and_wait(set_up_ssh_connection=False)
-
- def test_pc_i440fx_kvm(self):
- """
- :avocado: tags=machine:pc
- :avocado: tags=accel:kvm
- """
- self.require_accelerator("kvm")
- self.vm.add_args("-accel", "kvm")
- self.launch_and_wait(set_up_ssh_connection=False)
-
- def test_pc_q35_tcg(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- self.vm.add_args("-accel", "tcg")
- self.launch_and_wait(set_up_ssh_connection=False)
-
- def test_pc_q35_kvm(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=accel:kvm
- """
- self.require_accelerator("kvm")
- self.vm.add_args("-accel", "kvm")
- self.launch_and_wait(set_up_ssh_connection=False)
-
-
-# For Aarch64 we only boot KVM tests in CI as booting the current
-# Fedora OS in TCG tests is very heavyweight. There are lighter weight
-# distros which we use in the machine_aarch64_virt.py tests.
-class BootLinuxAarch64(LinuxTest):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:virt
- """
- timeout = 720
-
- def test_virt_kvm(self):
- """
- :avocado: tags=accel:kvm
- :avocado: tags=cpu:host
- """
- self.require_accelerator("kvm")
- self.vm.add_args("-accel", "kvm")
- self.vm.add_args("-machine", "virt,gic-version=host")
- self.vm.add_args('-bios',
- os.path.join(BUILD_DIR, 'pc-bios',
- 'edk2-aarch64-code.fd'))
- self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0')
- self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom')
- self.launch_and_wait(set_up_ssh_connection=False)
-
-
-# See the tux_baseline.py tests for almost the same coverage in a lot
-# less time.
-class BootLinuxPPC64(LinuxTest):
- """
- :avocado: tags=arch:ppc64
- """
-
- timeout = 360
-
- @skipUnless(os.getenv('SPEED') == 'slow', 'runtime limited')
- def test_pseries_tcg(self):
- """
- :avocado: tags=machine:pseries
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- self.vm.add_args("-accel", "tcg")
- self.launch_and_wait(set_up_ssh_connection=False)
-
- def test_pseries_kvm(self):
- """
- :avocado: tags=machine:pseries
- :avocado: tags=accel:kvm
- """
- self.require_accelerator("kvm")
- self.vm.add_args("-accel", "kvm")
- self.vm.add_args("-machine", "cap-ccf-assist=off")
- self.launch_and_wait(set_up_ssh_connection=False)
-
-class BootLinuxS390X(LinuxTest):
- """
- :avocado: tags=arch:s390x
- """
-
- timeout = 240
-
- @skipUnless(os.getenv('SPEED') == 'slow', 'runtime limited')
- def test_s390_ccw_virtio_tcg(self):
- """
- :avocado: tags=machine:s390-ccw-virtio
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- self.vm.add_args("-accel", "tcg")
- self.launch_and_wait(set_up_ssh_connection=False)
diff --git a/tests/avocado/boot_linux_console.py b/tests/avocado/boot_linux_console.py
deleted file mode 100644
index c35fc5e..0000000
--- a/tests/avocado/boot_linux_console.py
+++ /dev/null
@@ -1,1547 +0,0 @@
-# Functional test that boots a Linux kernel and checks the console
-#
-# Copyright (c) 2018 Red Hat, Inc.
-#
-# Author:
-# Cleber Rosa <crosa@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-import lzma
-import gzip
-import shutil
-
-from avocado import skip
-from avocado import skipUnless
-from avocado import skipUnless
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import exec_command
-from avocado_qemu import exec_command_and_wait_for_pattern
-from avocado_qemu import interrupt_interactive_console_until_pattern
-from avocado_qemu import wait_for_console_pattern
-from avocado.utils import process
-from avocado.utils import archive
-
-"""
-Round up to next power of 2
-"""
-def pow2ceil(x):
- return 1 if x == 0 else 2**(x - 1).bit_length()
-
-def file_truncate(path, size):
- if size != os.path.getsize(path):
- with open(path, 'ab+') as fd:
- fd.truncate(size)
-
-"""
-Expand file size to next power of 2
-"""
-def image_pow2ceil_expand(path):
- size = os.path.getsize(path)
- size_aligned = pow2ceil(size)
- if size != size_aligned:
- with open(path, 'ab+') as fd:
- fd.truncate(size_aligned)
-
-class LinuxKernelTest(QemuSystemTest):
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
-
- def wait_for_console_pattern(self, success_message, vm=None):
- wait_for_console_pattern(self, success_message,
- failure_message='Kernel panic - not syncing',
- vm=vm)
-
- def extract_from_deb(self, deb, path):
- """
- Extracts a file from a deb package into the test workdir
-
- :param deb: path to the deb archive
- :param path: path within the deb archive of the file to be extracted
- :returns: path of the extracted file
- """
- cwd = os.getcwd()
- os.chdir(self.workdir)
- file_path = process.run("ar t %s" % deb).stdout_text.split()[2]
- process.run("ar x %s %s" % (deb, file_path))
- archive.extract(file_path, self.workdir)
- os.chdir(cwd)
- # Return complete path to extracted file. Because callers to
- # extract_from_deb() specify 'path' with a leading slash, it is
- # necessary to use os.path.relpath() as otherwise os.path.join()
- # interprets it as an absolute path and drops the self.workdir part.
- return os.path.normpath(os.path.join(self.workdir,
- os.path.relpath(path, '/')))
-
- def extract_from_rpm(self, rpm, path):
- """
- Extracts a file from an RPM package into the test workdir.
-
- :param rpm: path to the rpm archive
- :param path: path within the rpm archive of the file to be extracted
- needs to be a relative path (starting with './') because
- cpio(1), which is used to extract the file, expects that.
- :returns: path of the extracted file
- """
- cwd = os.getcwd()
- os.chdir(self.workdir)
- process.run("rpm2cpio %s | cpio -id %s" % (rpm, path), shell=True)
- os.chdir(cwd)
- return os.path.normpath(os.path.join(self.workdir, path))
-
-class BootLinuxConsole(LinuxKernelTest):
- """
- Boots a Linux kernel and checks that the console is operational and the
- kernel command line is properly passed from QEMU to the kernel
- """
- timeout = 90
-
- def test_x86_64_pc(self):
- """
- :avocado: tags=arch:x86_64
- :avocado: tags=machine:pc
- """
- kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora'
- '/linux/releases/29/Everything/x86_64/os/images/pxeboot'
- '/vmlinuz')
- kernel_hash = '23bebd2680757891cf7adedb033532163a792495'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- self.vm.set_console()
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
- self.vm.add_args('-kernel', kernel_path,
- '-append', kernel_command_line)
- self.vm.launch()
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.wait_for_console_pattern(console_pattern)
-
- def test_mips_malta(self):
- """
- :avocado: tags=arch:mips
- :avocado: tags=machine:malta
- :avocado: tags=endian:big
- """
- deb_url = ('http://snapshot.debian.org/archive/debian/'
- '20130217T032700Z/pool/main/l/linux-2.6/'
- 'linux-image-2.6.32-5-4kc-malta_2.6.32-48_mips.deb')
- deb_hash = 'a8cfc28ad8f45f54811fc6cf74fc43ffcfe0ba04'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinux-2.6.32-5-4kc-malta')
-
- self.vm.set_console()
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
- self.vm.add_args('-kernel', kernel_path,
- '-append', kernel_command_line)
- self.vm.launch()
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.wait_for_console_pattern(console_pattern)
-
- def test_mips64el_malta(self):
- """
- This test requires the ar tool to extract "data.tar.gz" from
- the Debian package.
-
- The kernel can be rebuilt using this Debian kernel source [1] and
- following the instructions on [2].
-
- [1] http://snapshot.debian.org/package/linux-2.6/2.6.32-48/
- #linux-source-2.6.32_2.6.32-48
- [2] https://kernel-team.pages.debian.net/kernel-handbook/
- ch-common-tasks.html#s-common-official
-
- :avocado: tags=arch:mips64el
- :avocado: tags=machine:malta
- """
- deb_url = ('http://snapshot.debian.org/archive/debian/'
- '20130217T032700Z/pool/main/l/linux-2.6/'
- 'linux-image-2.6.32-5-5kc-malta_2.6.32-48_mipsel.deb')
- deb_hash = '1aaec92083bf22fda31e0d27fa8d9a388e5fc3d5'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinux-2.6.32-5-5kc-malta')
-
- self.vm.set_console()
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
- self.vm.add_args('-kernel', kernel_path,
- '-append', kernel_command_line)
- self.vm.launch()
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.wait_for_console_pattern(console_pattern)
-
- def test_mips64el_fuloong2e(self):
- """
- :avocado: tags=arch:mips64el
- :avocado: tags=machine:fuloong2e
- :avocado: tags=endian:little
- """
- deb_url = ('http://archive.debian.org/debian/pool/main/l/linux/'
- 'linux-image-3.16.0-6-loongson-2e_3.16.56-1+deb8u1_mipsel.deb')
- deb_hash = 'd04d446045deecf7b755ef576551de0c4184dd44'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinux-3.16.0-6-loongson-2e')
-
- self.vm.set_console()
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
- self.vm.add_args('-kernel', kernel_path,
- '-append', kernel_command_line)
- self.vm.launch()
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.wait_for_console_pattern(console_pattern)
-
- def test_mips_malta_cpio(self):
- """
- :avocado: tags=arch:mips
- :avocado: tags=machine:malta
- :avocado: tags=endian:big
- """
- deb_url = ('http://snapshot.debian.org/archive/debian/'
- '20160601T041800Z/pool/main/l/linux/'
- 'linux-image-4.5.0-2-4kc-malta_4.5.5-1_mips.deb')
- deb_hash = 'a3c84f3e88b54e06107d65a410d1d1e8e0f340f8'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinux-4.5.0-2-4kc-malta')
- initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
- '8584a59ed9e5eb5ee7ca91f6d74bbb06619205b8/rootfs/'
- 'mips/rootfs.cpio.gz')
- initrd_hash = 'bf806e17009360a866bf537f6de66590de349a99'
- initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
- initrd_path = self.workdir + "rootfs.cpio"
- archive.gzip_uncompress(initrd_path_gz, initrd_path)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE
- + 'console=ttyS0 console=tty '
- + 'rdinit=/sbin/init noreboot')
- self.vm.add_args('-kernel', kernel_path,
- '-initrd', initrd_path,
- '-append', kernel_command_line,
- '-no-reboot')
- self.vm.launch()
- self.wait_for_console_pattern('Boot successful.')
-
- exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
- 'BogoMIPS')
- exec_command_and_wait_for_pattern(self, 'uname -a',
- 'Debian')
- exec_command_and_wait_for_pattern(self, 'reboot',
- 'reboot: Restarting system')
- # Wait for VM to shut down gracefully
- self.vm.wait()
-
- @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
- def test_mips64el_malta_5KEc_cpio(self):
- """
- :avocado: tags=arch:mips64el
- :avocado: tags=machine:malta
- :avocado: tags=endian:little
- :avocado: tags=cpu:5KEc
- """
- kernel_url = ('https://github.com/philmd/qemu-testing-blob/'
- 'raw/9ad2df38/mips/malta/mips64el/'
- 'vmlinux-3.19.3.mtoman.20150408')
- kernel_hash = '00d1d268fb9f7d8beda1de6bebcc46e884d71754'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
- initrd_url = ('https://github.com/groeck/linux-build-test/'
- 'raw/8584a59e/rootfs/'
- 'mipsel64/rootfs.mipsel64r1.cpio.gz')
- initrd_hash = '1dbb8a396e916847325284dbe2151167'
- initrd_path_gz = self.fetch_asset(initrd_url, algorithm='md5',
- asset_hash=initrd_hash)
- initrd_path = self.workdir + "rootfs.cpio"
- archive.gzip_uncompress(initrd_path_gz, initrd_path)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE
- + 'console=ttyS0 console=tty '
- + 'rdinit=/sbin/init noreboot')
- self.vm.add_args('-kernel', kernel_path,
- '-initrd', initrd_path,
- '-append', kernel_command_line,
- '-no-reboot')
- self.vm.launch()
- wait_for_console_pattern(self, 'Boot successful.')
-
- exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
- 'MIPS 5KE')
- exec_command_and_wait_for_pattern(self, 'uname -a',
- '3.19.3.mtoman.20150408')
- exec_command_and_wait_for_pattern(self, 'reboot',
- 'reboot: Restarting system')
- # Wait for VM to shut down gracefully
- self.vm.wait()
-
- def do_test_mips_malta32el_nanomips(self, kernel_url, kernel_hash):
- kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
- kernel_path = self.workdir + "kernel"
- with lzma.open(kernel_path_xz, 'rb') as f_in:
- with open(kernel_path, 'wb') as f_out:
- shutil.copyfileobj(f_in, f_out)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE
- + 'mem=256m@@0x0 '
- + 'console=ttyS0')
- self.vm.add_args('-no-reboot',
- '-kernel', kernel_path,
- '-append', kernel_command_line)
- self.vm.launch()
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.wait_for_console_pattern(console_pattern)
-
- def test_mips_malta32el_nanomips_4k(self):
- """
- :avocado: tags=arch:mipsel
- :avocado: tags=machine:malta
- :avocado: tags=endian:little
- :avocado: tags=cpu:I7200
- """
- kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/'
- 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
- 'generic_nano32r6el_page4k.xz')
- kernel_hash = '477456aafd2a0f1ddc9482727f20fe9575565dd6'
- self.do_test_mips_malta32el_nanomips(kernel_url, kernel_hash)
-
- def test_mips_malta32el_nanomips_16k_up(self):
- """
- :avocado: tags=arch:mipsel
- :avocado: tags=machine:malta
- :avocado: tags=endian:little
- :avocado: tags=cpu:I7200
- """
- kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/'
- 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
- 'generic_nano32r6el_page16k_up.xz')
- kernel_hash = 'e882868f944c71c816e832e2303b7874d044a7bc'
- self.do_test_mips_malta32el_nanomips(kernel_url, kernel_hash)
-
- def test_mips_malta32el_nanomips_64k_dbg(self):
- """
- :avocado: tags=arch:mipsel
- :avocado: tags=machine:malta
- :avocado: tags=endian:little
- :avocado: tags=cpu:I7200
- """
- kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/'
- 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
- 'generic_nano32r6el_page64k_dbg.xz')
- kernel_hash = '18d1c68f2e23429e266ca39ba5349ccd0aeb7180'
- self.do_test_mips_malta32el_nanomips(kernel_url, kernel_hash)
-
- def test_aarch64_xlnx_versal_virt(self):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:xlnx-versal-virt
- :avocado: tags=device:pl011
- :avocado: tags=device:arm_gicv3
- :avocado: tags=accel:tcg
- """
- images_url = ('http://ports.ubuntu.com/ubuntu-ports/dists/'
- 'bionic-updates/main/installer-arm64/'
- '20101020ubuntu543.19/images/')
- kernel_url = images_url + 'netboot/ubuntu-installer/arm64/linux'
- kernel_hash = 'e167757620640eb26de0972f578741924abb3a82'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- initrd_url = images_url + 'netboot/ubuntu-installer/arm64/initrd.gz'
- initrd_hash = 'cab5cb3fcefca8408aa5aae57f24574bfce8bdb9'
- initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
-
- self.vm.set_console()
- self.vm.add_args('-m', '2G',
- '-accel', 'tcg',
- '-kernel', kernel_path,
- '-initrd', initrd_path)
- self.vm.launch()
- self.wait_for_console_pattern('Checked W+X mappings: passed')
-
- def test_arm_virt(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:virt
- :avocado: tags=accel:tcg
- """
- kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora'
- '/linux/releases/29/Everything/armhfp/os/images/pxeboot'
- '/vmlinuz')
- kernel_hash = 'e9826d741b4fb04cadba8d4824d1ed3b7fb8b4d4'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyAMA0')
- self.vm.add_args('-kernel', kernel_path,
- '-append', kernel_command_line)
- self.vm.launch()
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.wait_for_console_pattern(console_pattern)
-
- def test_arm_emcraft_sf2(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:emcraft-sf2
- :avocado: tags=endian:little
- :avocado: tags=u-boot
- :avocado: tags=accel:tcg
- """
- self.require_netdev('user')
-
- uboot_url = ('https://raw.githubusercontent.com/'
- 'Subbaraya-Sundeep/qemu-test-binaries/'
- 'fe371d32e50ca682391e1e70ab98c2942aeffb01/u-boot')
- uboot_hash = 'cbb8cbab970f594bf6523b9855be209c08374ae2'
- uboot_path = self.fetch_asset(uboot_url, asset_hash=uboot_hash)
- spi_url = ('https://raw.githubusercontent.com/'
- 'Subbaraya-Sundeep/qemu-test-binaries/'
- 'fe371d32e50ca682391e1e70ab98c2942aeffb01/spi.bin')
- spi_hash = '65523a1835949b6f4553be96dec1b6a38fb05501'
- spi_path = self.fetch_asset(spi_url, asset_hash=spi_hash)
-
- file_truncate(spi_path, 16 << 20) # Spansion S25FL128SDPBHICO is 16 MiB
-
- self.vm.set_console()
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE
- self.vm.add_args('-kernel', uboot_path,
- '-append', kernel_command_line,
- '-drive', 'file=' + spi_path + ',if=mtd,format=raw',
- '-no-reboot')
- self.vm.launch()
- self.wait_for_console_pattern('Enter \'help\' for a list')
-
- exec_command_and_wait_for_pattern(self, 'ifconfig eth0 10.0.2.15',
- 'eth0: link becomes ready')
- exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2',
- '3 packets transmitted, 3 packets received, 0% packet loss')
-
- def do_test_arm_raspi2(self, uart_id):
- """
- :avocado: tags=accel:tcg
-
- The kernel can be rebuilt using the kernel source referenced
- and following the instructions on the on:
- https://www.raspberrypi.org/documentation/linux/kernel/building.md
- """
- serial_kernel_cmdline = {
- 0: 'earlycon=pl011,0x3f201000 console=ttyAMA0',
- }
- deb_url = ('http://archive.raspberrypi.org/debian/'
- 'pool/main/r/raspberrypi-firmware/'
- 'raspberrypi-kernel_1.20190215-1_armhf.deb')
- deb_hash = 'cd284220b32128c5084037553db3c482426f3972'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path, '/boot/kernel7.img')
- dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2709-rpi-2-b.dtb')
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- serial_kernel_cmdline[uart_id] +
- ' root=/dev/mmcblk0p2 rootwait ' +
- 'dwc_otg.fiq_fsm_enable=0')
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-append', kernel_command_line,
- '-device', 'usb-kbd')
- self.vm.launch()
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.wait_for_console_pattern(console_pattern)
- console_pattern = 'Product: QEMU USB Keyboard'
- self.wait_for_console_pattern(console_pattern)
-
- def test_arm_raspi2_uart0(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:raspi2b
- :avocado: tags=device:pl011
- :avocado: tags=accel:tcg
- """
- self.do_test_arm_raspi2(0)
-
- def test_arm_raspi2_initrd(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:raspi2b
- """
- deb_url = ('http://archive.raspberrypi.org/debian/'
- 'pool/main/r/raspberrypi-firmware/'
- 'raspberrypi-kernel_1.20190215-1_armhf.deb')
- deb_hash = 'cd284220b32128c5084037553db3c482426f3972'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path, '/boot/kernel7.img')
- dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2709-rpi-2-b.dtb')
-
- initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
- '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
- 'arm/rootfs-armv7a.cpio.gz')
- initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c'
- initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
- initrd_path = os.path.join(self.workdir, 'rootfs.cpio')
- archive.gzip_uncompress(initrd_path_gz, initrd_path)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'earlycon=pl011,0x3f201000 console=ttyAMA0 '
- 'panic=-1 noreboot ' +
- 'dwc_otg.fiq_fsm_enable=0')
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-initrd', initrd_path,
- '-append', kernel_command_line,
- '-no-reboot')
- self.vm.launch()
- self.wait_for_console_pattern('Boot successful.')
-
- exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
- 'BCM2835')
- exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
- '/soc/cprman@7e101000')
- exec_command_and_wait_for_pattern(self, 'halt', 'reboot: System halted')
- # Wait for VM to shut down gracefully
- self.vm.wait()
-
- def test_arm_raspi4(self):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:raspi4b
- :avocado: tags=device:pl011
- :avocado: tags=accel:tcg
- :avocado: tags=rpi4b
-
- The kernel can be rebuilt using the kernel source referenced
- and following the instructions on the on:
- https://www.raspberrypi.org/documentation/linux/kernel/building.md
- """
-
- deb_url = ('http://archive.raspberrypi.org/debian/'
- 'pool/main/r/raspberrypi-firmware/'
- 'raspberrypi-kernel_1.20230106-1_arm64.deb')
- deb_hash = '08dc55696535b18a6d4fe6fa10d4c0d905cbb2ed'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path, '/boot/kernel8.img')
- dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2711-rpi-4-b.dtb')
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'earlycon=pl011,mmio32,0xfe201000 ' +
- 'console=ttyAMA0,115200 ' +
- 'root=/dev/mmcblk1p2 rootwait ' +
- 'dwc_otg.fiq_fsm_enable=0')
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-append', kernel_command_line)
- # When PCI is supported we can add a USB controller:
- # '-device', 'qemu-xhci,bus=pcie.1,id=xhci',
- # '-device', 'usb-kbd,bus=xhci.0',
- self.vm.launch()
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.wait_for_console_pattern(console_pattern)
- # When USB is enabled we can look for this
- # console_pattern = 'Product: QEMU USB Keyboard'
- # self.wait_for_console_pattern(console_pattern)
- console_pattern = 'Waiting for root device'
- self.wait_for_console_pattern(console_pattern)
-
-
- def test_arm_raspi4_initrd(self):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:raspi4b
- :avocado: tags=device:pl011
- :avocado: tags=accel:tcg
- :avocado: tags=rpi4b
-
- The kernel can be rebuilt using the kernel source referenced
- and following the instructions on the on:
- https://www.raspberrypi.org/documentation/linux/kernel/building.md
- """
- deb_url = ('http://archive.raspberrypi.org/debian/'
- 'pool/main/r/raspberrypi-firmware/'
- 'raspberrypi-kernel_1.20230106-1_arm64.deb')
- deb_hash = '08dc55696535b18a6d4fe6fa10d4c0d905cbb2ed'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path, '/boot/kernel8.img')
- dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2711-rpi-4-b.dtb')
-
- initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
- '86b2be1384d41c8c388e63078a847f1e1c4cb1de/rootfs/'
- 'arm64/rootfs.cpio.gz')
- initrd_hash = 'f3d4f9fa92a49aa542f1b44d34be77bbf8ca5b9d'
- initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
- initrd_path = os.path.join(self.workdir, 'rootfs.cpio')
- archive.gzip_uncompress(initrd_path_gz, initrd_path)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'earlycon=pl011,mmio32,0xfe201000 ' +
- 'console=ttyAMA0,115200 ' +
- 'panic=-1 noreboot ' +
- 'dwc_otg.fiq_fsm_enable=0')
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-initrd', initrd_path,
- '-append', kernel_command_line,
- '-no-reboot')
- # When PCI is supported we can add a USB controller:
- # '-device', 'qemu-xhci,bus=pcie.1,id=xhci',
- # '-device', 'usb-kbd,bus=xhci.0',
- self.vm.launch()
- self.wait_for_console_pattern('Boot successful.')
-
- exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
- 'BCM2835')
- exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
- 'cprman@7e101000')
- exec_command_and_wait_for_pattern(self, 'halt', 'reboot: System halted')
- # TODO: Raspberry Pi4 doesn't shut down properly with recent kernels
- # Wait for VM to shut down gracefully
- #self.vm.wait()
-
- def test_arm_exynos4210_initrd(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:smdkc210
- :avocado: tags=accel:tcg
- """
- deb_url = ('https://snapshot.debian.org/archive/debian/'
- '20190928T224601Z/pool/main/l/linux/'
- 'linux-image-4.19.0-6-armmp_4.19.67-2+deb10u1_armhf.deb')
- deb_hash = 'fa9df4a0d38936cb50084838f2cb933f570d7d82'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinuz-4.19.0-6-armmp')
- dtb_path = '/usr/lib/linux-image-4.19.0-6-armmp/exynos4210-smdkv310.dtb'
- dtb_path = self.extract_from_deb(deb_path, dtb_path)
-
- initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
- '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
- 'arm/rootfs-armv5.cpio.gz')
- initrd_hash = '2b50f1873e113523967806f4da2afe385462ff9b'
- initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
- initrd_path = os.path.join(self.workdir, 'rootfs.cpio')
- archive.gzip_uncompress(initrd_path_gz, initrd_path)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'earlycon=exynos4210,0x13800000 earlyprintk ' +
- 'console=ttySAC0,115200n8 ' +
- 'random.trust_cpu=off cryptomgr.notests ' +
- 'cpuidle.off=1 panic=-1 noreboot')
-
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-initrd', initrd_path,
- '-append', kernel_command_line,
- '-no-reboot')
- self.vm.launch()
-
- self.wait_for_console_pattern('Boot successful.')
- # TODO user command, for now the uart is stuck
-
- def test_arm_cubieboard_initrd(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:cubieboard
- :avocado: tags=accel:tcg
- """
- deb_url = ('https://apt.armbian.com/pool/main/l/'
- 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb')
- deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinuz-6.6.16-current-sunxi')
- dtb_path = '/usr/lib/linux-image-6.6.16-current-sunxi/sun4i-a10-cubieboard.dtb'
- dtb_path = self.extract_from_deb(deb_path, dtb_path)
- initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
- '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
- 'arm/rootfs-armv5.cpio.gz')
- initrd_hash = '2b50f1873e113523967806f4da2afe385462ff9b'
- initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
- initrd_path = os.path.join(self.workdir, 'rootfs.cpio')
- archive.gzip_uncompress(initrd_path_gz, initrd_path)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0,115200 '
- 'usbcore.nousb '
- 'panic=-1 noreboot')
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-initrd', initrd_path,
- '-append', kernel_command_line,
- '-no-reboot')
- self.vm.launch()
- self.wait_for_console_pattern('Boot successful.')
-
- exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
- 'Allwinner sun4i/sun5i')
- exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
- 'system-control@1c00000')
- exec_command_and_wait_for_pattern(self, 'reboot',
- 'reboot: Restarting system')
- # Wait for VM to shut down gracefully
- self.vm.wait()
-
- def test_arm_cubieboard_sata(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:cubieboard
- :avocado: tags=accel:tcg
- """
- deb_url = ('https://apt.armbian.com/pool/main/l/'
- 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb')
- deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinuz-6.6.16-current-sunxi')
- dtb_path = '/usr/lib/linux-image-6.6.16-current-sunxi/sun4i-a10-cubieboard.dtb'
- dtb_path = self.extract_from_deb(deb_path, dtb_path)
- rootfs_url = ('https://github.com/groeck/linux-build-test/raw/'
- '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
- 'arm/rootfs-armv5.ext2.gz')
- rootfs_hash = '093e89d2b4d982234bf528bc9fb2f2f17a9d1f93'
- rootfs_path_gz = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash)
- rootfs_path = os.path.join(self.workdir, 'rootfs.cpio')
- archive.gzip_uncompress(rootfs_path_gz, rootfs_path)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0,115200 '
- 'usbcore.nousb '
- 'root=/dev/sda ro '
- 'panic=-1 noreboot')
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-drive', 'if=none,format=raw,id=disk0,file='
- + rootfs_path,
- '-device', 'ide-hd,bus=ide.0,drive=disk0',
- '-append', kernel_command_line,
- '-no-reboot')
- self.vm.launch()
- self.wait_for_console_pattern('Boot successful.')
-
- exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
- 'Allwinner sun4i/sun5i')
- exec_command_and_wait_for_pattern(self, 'cat /proc/partitions',
- 'sda')
- exec_command_and_wait_for_pattern(self, 'reboot',
- 'reboot: Restarting system')
- # Wait for VM to shut down gracefully
- self.vm.wait()
-
- @skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited')
- def test_arm_cubieboard_openwrt_22_03_2(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:cubieboard
- :avocado: tags=device:sd
- """
-
- # This test download a 7.5 MiB compressed image and expand it
- # to 126 MiB.
- image_url = ('https://downloads.openwrt.org/releases/22.03.2/targets/'
- 'sunxi/cortexa8/openwrt-22.03.2-sunxi-cortexa8-'
- 'cubietech_a10-cubieboard-ext4-sdcard.img.gz')
- image_hash = ('94b5ecbfbc0b3b56276e5146b899eafa'
- '2ac5dc2d08733d6705af9f144f39f554')
- image_path_gz = self.fetch_asset(image_url, asset_hash=image_hash,
- algorithm='sha256')
- image_path = archive.extract(image_path_gz, self.workdir)
- image_pow2ceil_expand(image_path)
-
- self.vm.set_console()
- self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw',
- '-nic', 'user',
- '-no-reboot')
- self.vm.launch()
-
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'usbcore.nousb '
- 'noreboot')
-
- self.wait_for_console_pattern('U-Boot SPL')
-
- interrupt_interactive_console_until_pattern(
- self, 'Hit any key to stop autoboot:', '=>')
- exec_command_and_wait_for_pattern(self, "setenv extraargs '" +
- kernel_command_line + "'", '=>')
- exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...');
-
- self.wait_for_console_pattern(
- 'Please press Enter to activate this console.')
-
- exec_command_and_wait_for_pattern(self, ' ', 'root@')
-
- exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
- 'Allwinner sun4i/sun5i')
- exec_command_and_wait_for_pattern(self, 'reboot',
- 'reboot: Restarting system')
- # Wait for VM to shut down gracefully
- self.vm.wait()
-
- @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout')
- def test_arm_quanta_gsj(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:quanta-gsj
- :avocado: tags=accel:tcg
- """
- # 25 MiB compressed, 32 MiB uncompressed.
- image_url = (
- 'https://github.com/hskinnemoen/openbmc/releases/download/'
- '20200711-gsj-qemu-0/obmc-phosphor-image-gsj.static.mtd.gz')
- image_hash = '14895e634923345cb5c8776037ff7876df96f6b1'
- image_path_gz = self.fetch_asset(image_url, asset_hash=image_hash)
- image_name = 'obmc.mtd'
- image_path = os.path.join(self.workdir, image_name)
- archive.gzip_uncompress(image_path_gz, image_path)
-
- self.vm.set_console()
- drive_args = 'file=' + image_path + ',if=mtd,bus=0,unit=0'
- self.vm.add_args('-drive', drive_args)
- self.vm.launch()
-
- # Disable drivers and services that stall for a long time during boot,
- # to avoid running past the 90-second timeout. These may be removed
- # as the corresponding device support is added.
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + (
- 'console=${console} '
- 'mem=${mem} '
- 'initcall_blacklist=npcm_i2c_bus_driver_init '
- 'systemd.mask=systemd-random-seed.service '
- 'systemd.mask=dropbearkey.service '
- )
-
- self.wait_for_console_pattern('> BootBlock by Nuvoton')
- self.wait_for_console_pattern('>Device: Poleg BMC NPCM730')
- self.wait_for_console_pattern('>Skip DDR init.')
- self.wait_for_console_pattern('U-Boot ')
- interrupt_interactive_console_until_pattern(
- self, 'Hit any key to stop autoboot:', 'U-Boot>')
- exec_command_and_wait_for_pattern(
- self, "setenv bootargs ${bootargs} " + kernel_command_line,
- 'U-Boot>')
- exec_command_and_wait_for_pattern(
- self, 'run romboot', 'Booting Kernel from flash')
- self.wait_for_console_pattern('Booting Linux on physical CPU 0x0')
- self.wait_for_console_pattern('CPU1: thread -1, cpu 1, socket 0')
- self.wait_for_console_pattern('OpenBMC Project Reference Distro')
- self.wait_for_console_pattern('gsj login:')
-
- def test_arm_quanta_gsj_initrd(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:quanta-gsj
- :avocado: tags=accel:tcg
- """
- initrd_url = (
- 'https://github.com/hskinnemoen/openbmc/releases/download/'
- '20200711-gsj-qemu-0/obmc-phosphor-initramfs-gsj.cpio.xz')
- initrd_hash = '98fefe5d7e56727b1eb17d5c00311b1b5c945300'
- initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
- kernel_url = (
- 'https://github.com/hskinnemoen/openbmc/releases/download/'
- '20200711-gsj-qemu-0/uImage-gsj.bin')
- kernel_hash = 'fa67b2f141d56d39b3c54305c0e8a899c99eb2c7'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
- dtb_url = (
- 'https://github.com/hskinnemoen/openbmc/releases/download/'
- '20200711-gsj-qemu-0/nuvoton-npcm730-gsj.dtb')
- dtb_hash = '18315f7006d7b688d8312d5c727eecd819aa36a4'
- dtb_path = self.fetch_asset(dtb_url, asset_hash=dtb_hash)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0,115200n8 '
- 'earlycon=uart8250,mmio32,0xf0001000')
- self.vm.add_args('-kernel', kernel_path,
- '-initrd', initrd_path,
- '-dtb', dtb_path,
- '-append', kernel_command_line)
- self.vm.launch()
-
- self.wait_for_console_pattern('Booting Linux on physical CPU 0x0')
- self.wait_for_console_pattern('CPU1: thread -1, cpu 1, socket 0')
- self.wait_for_console_pattern(
- 'Give root password for system maintenance')
-
- def test_arm_bpim2u(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:bpim2u
- :avocado: tags=accel:tcg
- """
- deb_url = ('https://apt.armbian.com/pool/main/l/'
- 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb')
- deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinuz-6.6.16-current-sunxi')
- dtb_path = ('/usr/lib/linux-image-6.6.16-current-sunxi/'
- 'sun8i-r40-bananapi-m2-ultra.dtb')
- dtb_path = self.extract_from_deb(deb_path, dtb_path)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0,115200n8 '
- 'earlycon=uart,mmio32,0x1c28000')
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-append', kernel_command_line)
- self.vm.launch()
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.wait_for_console_pattern(console_pattern)
-
- def test_arm_bpim2u_initrd(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=accel:tcg
- :avocado: tags=machine:bpim2u
- """
- deb_url = ('https://apt.armbian.com/pool/main/l/'
- 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb')
- deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinuz-6.6.16-current-sunxi')
- dtb_path = ('/usr/lib/linux-image-6.6.16-current-sunxi/'
- 'sun8i-r40-bananapi-m2-ultra.dtb')
- dtb_path = self.extract_from_deb(deb_path, dtb_path)
- initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
- '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
- 'arm/rootfs-armv7a.cpio.gz')
- initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c'
- initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
- initrd_path = os.path.join(self.workdir, 'rootfs.cpio')
- archive.gzip_uncompress(initrd_path_gz, initrd_path)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0,115200 '
- 'panic=-1 noreboot')
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-initrd', initrd_path,
- '-append', kernel_command_line,
- '-no-reboot')
- self.vm.launch()
- self.wait_for_console_pattern('Boot successful.')
-
- exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
- 'Allwinner sun8i Family')
- exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
- 'system-control@1c00000')
- exec_command_and_wait_for_pattern(self, 'reboot',
- 'reboot: Restarting system')
- # Wait for VM to shut down gracefully
- self.vm.wait()
-
- def test_arm_bpim2u_gmac(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=accel:tcg
- :avocado: tags=machine:bpim2u
- :avocado: tags=device:sd
- """
- self.require_netdev('user')
-
- deb_url = ('https://apt.armbian.com/pool/main/l/'
- 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb')
- deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinuz-6.6.16-current-sunxi')
- dtb_path = ('/usr/lib/linux-image-6.6.16-current-sunxi/'
- 'sun8i-r40-bananapi-m2-ultra.dtb')
- dtb_path = self.extract_from_deb(deb_path, dtb_path)
- rootfs_url = ('http://storage.kernelci.org/images/rootfs/buildroot/'
- 'buildroot-baseline/20221116.0/armel/rootfs.ext2.xz')
- rootfs_hash = 'fae32f337c7b87547b10f42599acf109da8b6d9a'
- rootfs_path_xz = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash)
- rootfs_path = os.path.join(self.workdir, 'rootfs.cpio')
- archive.lzma_uncompress(rootfs_path_xz, rootfs_path)
- image_pow2ceil_expand(rootfs_path)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0,115200 '
- 'root=b300 rootwait rw '
- 'panic=-1 noreboot')
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-drive', 'file=' + rootfs_path + ',if=sd,format=raw',
- '-net', 'nic,model=gmac,netdev=host_gmac',
- '-netdev', 'user,id=host_gmac',
- '-append', kernel_command_line,
- '-no-reboot')
- self.vm.launch()
- shell_ready = "/bin/sh: can't access tty; job control turned off"
- self.wait_for_console_pattern(shell_ready)
-
- exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
- 'Allwinner sun8i Family')
- exec_command_and_wait_for_pattern(self, 'cat /proc/partitions',
- 'mmcblk')
- exec_command_and_wait_for_pattern(self, 'ifconfig eth0 up',
- 'eth0: Link is Up')
- exec_command_and_wait_for_pattern(self, 'udhcpc eth0',
- 'udhcpc: lease of 10.0.2.15 obtained')
- exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2',
- '3 packets transmitted, 3 packets received, 0% packet loss')
- exec_command_and_wait_for_pattern(self, 'reboot',
- 'reboot: Restarting system')
- # Wait for VM to shut down gracefully
- self.vm.wait()
-
- @skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited')
- def test_arm_bpim2u_openwrt_22_03_3(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:bpim2u
- :avocado: tags=device:sd
- """
-
- # This test download a 8.9 MiB compressed image and expand it
- # to 127 MiB.
- image_url = ('https://downloads.openwrt.org/releases/22.03.3/targets/'
- 'sunxi/cortexa7/openwrt-22.03.3-sunxi-cortexa7-'
- 'sinovoip_bananapi-m2-ultra-ext4-sdcard.img.gz')
- image_hash = ('5b41b4e11423e562c6011640f9a7cd3b'
- 'dd0a3d42b83430f7caa70a432e6cd82c')
- image_path_gz = self.fetch_asset(image_url, asset_hash=image_hash,
- algorithm='sha256')
- image_path = archive.extract(image_path_gz, self.workdir)
- image_pow2ceil_expand(image_path)
-
- self.vm.set_console()
- self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw',
- '-nic', 'user',
- '-no-reboot')
- self.vm.launch()
-
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'usbcore.nousb '
- 'noreboot')
-
- self.wait_for_console_pattern('U-Boot SPL')
-
- interrupt_interactive_console_until_pattern(
- self, 'Hit any key to stop autoboot:', '=>')
- exec_command_and_wait_for_pattern(self, "setenv extraargs '" +
- kernel_command_line + "'", '=>')
- exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...');
-
- self.wait_for_console_pattern(
- 'Please press Enter to activate this console.')
-
- exec_command_and_wait_for_pattern(self, ' ', 'root@')
-
- exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
- 'Allwinner sun8i Family')
- exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
- 'system-control@1c00000')
-
- def test_arm_orangepi(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:orangepi-pc
- :avocado: tags=accel:tcg
- """
- deb_url = ('https://apt.armbian.com/pool/main/l/'
- 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb')
- deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinuz-6.6.16-current-sunxi')
- dtb_path = '/usr/lib/linux-image-6.6.16-current-sunxi/sun8i-h3-orangepi-pc.dtb'
- dtb_path = self.extract_from_deb(deb_path, dtb_path)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0,115200n8 '
- 'earlycon=uart,mmio32,0x1c28000')
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-append', kernel_command_line)
- self.vm.launch()
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.wait_for_console_pattern(console_pattern)
-
- def test_arm_orangepi_initrd(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=accel:tcg
- :avocado: tags=machine:orangepi-pc
- """
- deb_url = ('https://apt.armbian.com/pool/main/l/'
- 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb')
- deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinuz-6.6.16-current-sunxi')
- dtb_path = '/usr/lib/linux-image-6.6.16-current-sunxi/sun8i-h3-orangepi-pc.dtb'
- dtb_path = self.extract_from_deb(deb_path, dtb_path)
- initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
- '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
- 'arm/rootfs-armv7a.cpio.gz')
- initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c'
- initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
- initrd_path = os.path.join(self.workdir, 'rootfs.cpio')
- archive.gzip_uncompress(initrd_path_gz, initrd_path)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0,115200 '
- 'panic=-1 noreboot')
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-initrd', initrd_path,
- '-append', kernel_command_line,
- '-no-reboot')
- self.vm.launch()
- self.wait_for_console_pattern('Boot successful.')
-
- exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
- 'Allwinner sun8i Family')
- exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
- 'system-control@1c00000')
- exec_command_and_wait_for_pattern(self, 'reboot',
- 'reboot: Restarting system')
- # Wait for VM to shut down gracefully
- self.vm.wait()
-
- def test_arm_orangepi_sd(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=accel:tcg
- :avocado: tags=machine:orangepi-pc
- :avocado: tags=device:sd
- """
- self.require_netdev('user')
-
- deb_url = ('https://apt.armbian.com/pool/main/l/'
- 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb')
- deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinuz-6.6.16-current-sunxi')
- dtb_path = '/usr/lib/linux-image-6.6.16-current-sunxi/sun8i-h3-orangepi-pc.dtb'
- dtb_path = self.extract_from_deb(deb_path, dtb_path)
- rootfs_url = ('http://storage.kernelci.org/images/rootfs/buildroot/'
- 'buildroot-baseline/20221116.0/armel/rootfs.ext2.xz')
- rootfs_hash = 'fae32f337c7b87547b10f42599acf109da8b6d9a'
- rootfs_path_xz = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash)
- rootfs_path = os.path.join(self.workdir, 'rootfs.cpio')
- archive.lzma_uncompress(rootfs_path_xz, rootfs_path)
- image_pow2ceil_expand(rootfs_path)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0,115200 '
- 'root=/dev/mmcblk0 rootwait rw '
- 'panic=-1 noreboot')
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-drive', 'file=' + rootfs_path + ',if=sd,format=raw',
- '-append', kernel_command_line,
- '-no-reboot')
- self.vm.launch()
- shell_ready = "/bin/sh: can't access tty; job control turned off"
- self.wait_for_console_pattern(shell_ready)
-
- exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
- 'Allwinner sun8i Family')
- exec_command_and_wait_for_pattern(self, 'cat /proc/partitions',
- 'mmcblk0')
- exec_command_and_wait_for_pattern(self, 'ifconfig eth0 up',
- 'eth0: Link is Up')
- exec_command_and_wait_for_pattern(self, 'udhcpc eth0',
- 'udhcpc: lease of 10.0.2.15 obtained')
- exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2',
- '3 packets transmitted, 3 packets received, 0% packet loss')
- exec_command_and_wait_for_pattern(self, 'reboot',
- 'reboot: Restarting system')
- # Wait for VM to shut down gracefully
- self.vm.wait()
-
- @skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited')
- def test_arm_orangepi_bionic_20_08(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:orangepi-pc
- :avocado: tags=device:sd
- """
-
- # This test download a 275 MiB compressed image and expand it
- # to 1036 MiB, but the underlying filesystem is 1552 MiB...
- # As we expand it to 2 GiB we are safe.
-
- image_url = ('https://archive.armbian.com/orangepipc/archive/'
- 'Armbian_20.08.1_Orangepipc_bionic_current_5.8.5.img.xz')
- image_hash = ('b4d6775f5673486329e45a0586bf06b6'
- 'dbe792199fd182ac6b9c7bb6c7d3e6dd')
- image_path_xz = self.fetch_asset(image_url, asset_hash=image_hash,
- algorithm='sha256')
- image_path = archive.extract(image_path_xz, self.workdir)
- image_pow2ceil_expand(image_path)
-
- self.vm.set_console()
- self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw',
- '-nic', 'user',
- '-no-reboot')
- self.vm.launch()
-
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0,115200 '
- 'loglevel=7 '
- 'nosmp '
- 'systemd.default_timeout_start_sec=9000 '
- 'systemd.mask=armbian-zram-config.service '
- 'systemd.mask=armbian-ramlog.service')
-
- self.wait_for_console_pattern('U-Boot SPL')
- self.wait_for_console_pattern('Autoboot in ')
- exec_command_and_wait_for_pattern(self, ' ', '=>')
- exec_command_and_wait_for_pattern(self, "setenv extraargs '" +
- kernel_command_line + "'", '=>')
- exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...');
-
- self.wait_for_console_pattern('systemd[1]: Set hostname ' +
- 'to <orangepipc>')
- self.wait_for_console_pattern('Starting Load Kernel Modules...')
-
- @skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited')
- def test_arm_orangepi_uboot_netbsd9(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:orangepi-pc
- :avocado: tags=device:sd
- :avocado: tags=os:netbsd
- """
- # This test download a 304MB compressed image and expand it to 2GB
- deb_url = ('http://snapshot.debian.org/archive/debian/'
- '20200108T145233Z/pool/main/u/u-boot/'
- 'u-boot-sunxi_2020.01%2Bdfsg-1_armhf.deb')
- deb_hash = 'f67f404a80753ca3d1258f13e38f2b060e13db99'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- # We use the common OrangePi PC 'plus' build of U-Boot for our secondary
- # program loader (SPL). We will then set the path to the more specific
- # OrangePi "PC" device tree blob with 'setenv fdtfile' in U-Boot prompt,
- # before to boot NetBSD.
- uboot_path = '/usr/lib/u-boot/orangepi_plus/u-boot-sunxi-with-spl.bin'
- uboot_path = self.extract_from_deb(deb_path, uboot_path)
- image_url = ('https://cdn.netbsd.org/pub/NetBSD/NetBSD-9.0/'
- 'evbarm-earmv7hf/binary/gzimg/armv7.img.gz')
- image_hash = '2babb29d36d8360adcb39c09e31060945259917a'
- image_path_gz = self.fetch_asset(image_url, asset_hash=image_hash)
- image_path = os.path.join(self.workdir, 'armv7.img')
- archive.gzip_uncompress(image_path_gz, image_path)
- image_pow2ceil_expand(image_path)
- image_drive_args = 'if=sd,format=raw,snapshot=on,file=' + image_path
-
- # dd if=u-boot-sunxi-with-spl.bin of=armv7.img bs=1K seek=8 conv=notrunc
- with open(uboot_path, 'rb') as f_in:
- with open(image_path, 'r+b') as f_out:
- f_out.seek(8 * 1024)
- shutil.copyfileobj(f_in, f_out)
-
- self.vm.set_console()
- self.vm.add_args('-nic', 'user',
- '-drive', image_drive_args,
- '-global', 'allwinner-rtc.base-year=2000',
- '-no-reboot')
- self.vm.launch()
- wait_for_console_pattern(self, 'U-Boot 2020.01+dfsg-1')
- interrupt_interactive_console_until_pattern(self,
- 'Hit any key to stop autoboot:',
- 'switch to partitions #0, OK')
-
- exec_command_and_wait_for_pattern(self, '', '=>')
- cmd = 'setenv bootargs root=ld0a'
- exec_command_and_wait_for_pattern(self, cmd, '=>')
- cmd = 'setenv kernel netbsd-GENERIC.ub'
- exec_command_and_wait_for_pattern(self, cmd, '=>')
- cmd = 'setenv fdtfile dtb/sun8i-h3-orangepi-pc.dtb'
- exec_command_and_wait_for_pattern(self, cmd, '=>')
- cmd = ("setenv bootcmd 'fatload mmc 0:1 ${kernel_addr_r} ${kernel}; "
- "fatload mmc 0:1 ${fdt_addr_r} ${fdtfile}; "
- "fdt addr ${fdt_addr_r}; "
- "bootm ${kernel_addr_r} - ${fdt_addr_r}'")
- exec_command_and_wait_for_pattern(self, cmd, '=>')
-
- exec_command_and_wait_for_pattern(self, 'boot',
- 'Booting kernel from Legacy Image')
- wait_for_console_pattern(self, 'Starting kernel ...')
- wait_for_console_pattern(self, 'NetBSD 9.0 (GENERIC)')
- # Wait for user-space
- wait_for_console_pattern(self, 'Starting root file system check')
-
- def test_aarch64_raspi3_atf(self):
- """
- :avocado: tags=accel:tcg
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:raspi3b
- :avocado: tags=cpu:cortex-a53
- :avocado: tags=device:pl011
- :avocado: tags=atf
- """
- zip_url = ('https://github.com/pbatard/RPi3/releases/download/'
- 'v1.15/RPi3_UEFI_Firmware_v1.15.zip')
- zip_hash = '74b3bd0de92683cadb14e008a7575e1d0c3cafb9'
- zip_path = self.fetch_asset(zip_url, asset_hash=zip_hash)
-
- archive.extract(zip_path, self.workdir)
- efi_fd = os.path.join(self.workdir, 'RPI_EFI.fd')
-
- self.vm.set_console(console_index=1)
- self.vm.add_args('-nodefaults',
- '-device', 'loader,file=%s,force-raw=true' % efi_fd)
- self.vm.launch()
- self.wait_for_console_pattern('version UEFI Firmware v1.15')
-
- def test_s390x_s390_ccw_virtio(self):
- """
- :avocado: tags=arch:s390x
- :avocado: tags=machine:s390-ccw-virtio
- """
- kernel_url = ('https://archives.fedoraproject.org/pub/archive'
- '/fedora-secondary/releases/29/Everything/s390x/os/images'
- '/kernel.img')
- kernel_hash = 'e8e8439103ef8053418ef062644ffd46a7919313'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- self.vm.set_console()
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=sclp0'
- self.vm.add_args('-nodefaults',
- '-kernel', kernel_path,
- '-append', kernel_command_line)
- self.vm.launch()
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.wait_for_console_pattern(console_pattern)
-
- def test_alpha_clipper(self):
- """
- :avocado: tags=arch:alpha
- :avocado: tags=machine:clipper
- """
- kernel_url = ('http://archive.debian.org/debian/dists/lenny/main/'
- 'installer-alpha/20090123lenny10/images/cdrom/vmlinuz')
- kernel_hash = '3a943149335529e2ed3e74d0d787b85fb5671ba3'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- uncompressed_kernel = archive.uncompress(kernel_path, self.workdir)
-
- self.vm.set_console()
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
- self.vm.add_args('-nodefaults',
- '-kernel', uncompressed_kernel,
- '-append', kernel_command_line)
- self.vm.launch()
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.wait_for_console_pattern(console_pattern)
-
- def test_m68k_q800(self):
- """
- :avocado: tags=arch:m68k
- :avocado: tags=machine:q800
- """
- deb_url = ('https://snapshot.debian.org/archive/debian-ports'
- '/20191021T083923Z/pool-m68k/main'
- '/l/linux/kernel-image-5.3.0-1-m68k-di_5.3.7-1_m68k.udeb')
- deb_hash = '044954bb9be4160a3ce81f8bc1b5e856b75cccd1'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinux-5.3.0-1-m68k')
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0 vga=off')
- self.vm.add_args('-kernel', kernel_path,
- '-append', kernel_command_line)
- self.vm.launch()
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.wait_for_console_pattern(console_pattern)
- console_pattern = 'No filesystem could mount root'
- self.wait_for_console_pattern(console_pattern)
-
- def do_test_advcal_2018(self, day, tar_hash, kernel_name, console=0):
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day' + day + '.tar.xz')
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- archive.extract(file_path, self.workdir)
- self.vm.set_console(console_index=console)
- self.vm.add_args('-kernel',
- self.workdir + '/day' + day + '/' + kernel_name)
- self.vm.launch()
- self.wait_for_console_pattern('QEMU advent calendar')
-
- def test_arm_vexpressa9(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:vexpress-a9
- """
- tar_hash = '32b7677ce8b6f1471fb0059865f451169934245b'
- self.vm.add_args('-dtb', self.workdir + '/day16/vexpress-v2p-ca9.dtb')
- self.do_test_advcal_2018('16', tar_hash, 'winter.zImage')
-
- def test_arm_ast2600_debian(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:rainier-bmc
- """
- deb_url = ('http://snapshot.debian.org/archive/debian/'
- '20220606T211338Z/'
- 'pool/main/l/linux/'
- 'linux-image-5.17.0-2-armmp_5.17.6-1%2Bb1_armhf.deb')
- deb_hash = '8acb2b4439faedc2f3ed4bdb2847ad4f6e0491f73debaeb7f660c8abe4dcdc0e'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash,
- algorithm='sha256')
- kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinuz-5.17.0-2-armmp')
- dtb_path = self.extract_from_deb(deb_path,
- '/usr/lib/linux-image-5.17.0-2-armmp/aspeed-bmc-ibm-rainier.dtb')
-
- self.vm.set_console()
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-net', 'nic')
- self.vm.launch()
- self.wait_for_console_pattern("Booting Linux on physical CPU 0xf00")
- self.wait_for_console_pattern("SMP: Total of 2 processors activated")
- self.wait_for_console_pattern("No filesystem could mount root")
-
- def test_m68k_mcf5208evb(self):
- """
- :avocado: tags=arch:m68k
- :avocado: tags=machine:mcf5208evb
- """
- tar_hash = 'ac688fd00561a2b6ce1359f9ff6aa2b98c9a570c'
- self.do_test_advcal_2018('07', tar_hash, 'sanity-clause.elf')
-
- def test_or1k_sim(self):
- """
- :avocado: tags=arch:or1k
- :avocado: tags=machine:or1k-sim
- """
- tar_hash = '20334cdaf386108c530ff0badaecc955693027dd'
- self.do_test_advcal_2018('20', tar_hash, 'vmlinux')
-
- def test_ppc64_e500(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:ppce500
- :avocado: tags=cpu:e5500
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- tar_hash = '6951d86d644b302898da2fd701739c9406527fe1'
- self.do_test_advcal_2018('19', tar_hash, 'uImage')
-
- def do_test_ppc64_powernv(self, proc):
- self.require_accelerator("tcg")
- images_url = ('https://github.com/open-power/op-build/releases/download/v2.7/')
-
- kernel_url = images_url + 'zImage.epapr'
- kernel_hash = '0ab237df661727e5392cee97460e8674057a883c5f74381a128fa772588d45cd'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash,
- algorithm='sha256')
- self.vm.set_console()
- self.vm.add_args('-kernel', kernel_path,
- '-append', 'console=tty0 console=hvc0',
- '-device', 'pcie-pci-bridge,id=bridge1,bus=pcie.1,addr=0x0',
- '-device', 'nvme,bus=pcie.2,addr=0x0,serial=1234',
- '-device', 'e1000e,bus=bridge1,addr=0x3',
- '-device', 'nec-usb-xhci,bus=bridge1,addr=0x2')
- self.vm.launch()
-
- self.wait_for_console_pattern("CPU: " + proc + " generation processor")
- self.wait_for_console_pattern("zImage starting: loaded")
- self.wait_for_console_pattern("Run /init as init process")
- # Device detection output driven by udev probing is sometimes cut off
- # from console output, suspect S14silence-console init script.
-
- def test_ppc_powernv8(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:powernv8
- :avocado: tags=accel:tcg
- """
- self.do_test_ppc64_powernv('P8')
-
- def test_ppc_powernv9(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:powernv9
- :avocado: tags=accel:tcg
- """
- self.do_test_ppc64_powernv('P9')
-
- def test_ppc_powernv10(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:powernv10
- :avocado: tags=accel:tcg
- """
- self.do_test_ppc64_powernv('P10')
-
- def test_ppc_g3beige(self):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=machine:g3beige
- :avocado: tags=accel:tcg
- """
- # TODO: g3beige works with kvm_pr but we don't have a
- # reliable way ATM (e.g. looking at /proc/modules) to detect
- # whether we're running kvm_hv or kvm_pr. For now let's
- # disable this test if we don't have TCG support.
- self.require_accelerator("tcg")
- tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc'
- self.vm.add_args('-M', 'graphics=off')
- self.do_test_advcal_2018('15', tar_hash, 'invaders.elf')
-
- def test_ppc_mac99(self):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=machine:mac99
- :avocado: tags=accel:tcg
- """
- # TODO: mac99 works with kvm_pr but we don't have a
- # reliable way ATM (e.g. looking at /proc/modules) to detect
- # whether we're running kvm_hv or kvm_pr. For now let's
- # disable this test if we don't have TCG support.
- self.require_accelerator("tcg")
- tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc'
- self.vm.add_args('-M', 'graphics=off')
- self.do_test_advcal_2018('15', tar_hash, 'invaders.elf')
-
- # This test has a 6-10% failure rate on various hosts that look
- # like issues with a buggy kernel. As a result we don't want it
- # gating releases on Gitlab.
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
-
- def test_sh4_r2d(self):
- """
- :avocado: tags=arch:sh4
- :avocado: tags=machine:r2d
- :avocado: tags=flaky
- """
- tar_hash = 'fe06a4fd8ccbf2e27928d64472939d47829d4c7e'
- self.vm.add_args('-append', 'console=ttySC1')
- self.do_test_advcal_2018('09', tar_hash, 'zImage', console=1)
-
- def test_sparc_ss20(self):
- """
- :avocado: tags=arch:sparc
- :avocado: tags=machine:SS-20
- """
- tar_hash = 'b18550d5d61c7615d989a06edace051017726a9f'
- self.do_test_advcal_2018('11', tar_hash, 'zImage.elf')
-
- def test_xtensa_lx60(self):
- """
- :avocado: tags=arch:xtensa
- :avocado: tags=machine:lx60
- :avocado: tags=cpu:dc233c
- """
- tar_hash = '49e88d9933742f0164b60839886c9739cb7a0d34'
- self.do_test_advcal_2018('02', tar_hash, 'santas-sleigh-ride.elf')
diff --git a/tests/avocado/boot_xen.py b/tests/avocado/boot_xen.py
deleted file mode 100644
index fc2faee..0000000
--- a/tests/avocado/boot_xen.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# Functional test that boots a Xen hypervisor with a domU kernel and
-# checks the console output is vaguely sane .
-#
-# Copyright (c) 2020 Linaro
-#
-# Author:
-# Alex BennƩe <alex.bennee@linaro.org>
-#
-# SPDX-License-Identifier: GPL-2.0-or-later
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-
-from avocado_qemu import wait_for_console_pattern
-from boot_linux_console import LinuxKernelTest
-
-
-class BootXenBase(LinuxKernelTest):
- """
- Boots a Xen hypervisor with a Linux DomU kernel.
- """
-
- timeout = 90
- XEN_COMMON_COMMAND_LINE = 'dom0_mem=128M loglvl=all guest_loglvl=all'
-
- def fetch_guest_kernel(self):
- # Using my own built kernel - which works
- kernel_url = ('https://fileserver.linaro.org/'
- 's/JSsewXGZ6mqxPr5/download?path=%2F&files='
- 'linux-5.9.9-arm64-ajb')
- kernel_sha1 = '4f92bc4b9f88d5ab792fa7a43a68555d344e1b83'
- kernel_path = self.fetch_asset(kernel_url,
- asset_hash=kernel_sha1)
-
- return kernel_path
-
- def launch_xen(self, xen_path):
- """
- Launch Xen with a dom0 guest kernel
- """
- self.log.info("launch with xen_path: %s", xen_path)
- kernel_path = self.fetch_guest_kernel()
-
- self.vm.set_console()
-
- xen_command_line = self.XEN_COMMON_COMMAND_LINE
- self.vm.add_args('-machine', 'virtualization=on',
- '-m', '768',
- '-kernel', xen_path,
- '-append', xen_command_line,
- '-device',
- 'guest-loader,addr=0x47000000,kernel=%s,bootargs=console=hvc0'
- % (kernel_path))
-
- self.vm.launch()
-
- console_pattern = 'VFS: Cannot open root device'
- wait_for_console_pattern(self, console_pattern, "Panic on CPU 0:")
-
-
-class BootXen(BootXenBase):
-
- def test_arm64_xen_411_and_dom0(self):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=accel:tcg
- :avocado: tags=cpu:cortex-a57
- :avocado: tags=machine:virt
- """
-
- # archive of file from https://deb.debian.org/debian/pool/main/x/xen/
- xen_url = ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/'
- 'download?path=%2F&files='
- 'xen-hypervisor-4.11-arm64_4.11.4%2B37-g3263f257ca-1_arm64.deb')
- xen_sha1 = '034e634d4416adbad1212d59b62bccdcda63e62a'
- xen_deb = self.fetch_asset(xen_url, asset_hash=xen_sha1)
- xen_path = self.extract_from_deb(xen_deb, "/boot/xen-4.11-arm64")
-
- self.launch_xen(xen_path)
-
- def test_arm64_xen_414_and_dom0(self):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=accel:tcg
- :avocado: tags=cpu:cortex-a57
- :avocado: tags=machine:virt
- """
-
- # archive of file from https://deb.debian.org/debian/pool/main/x/xen/
- xen_url = ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/'
- 'download?path=%2F&files='
- 'xen-hypervisor-4.14-arm64_4.14.0%2B80-gd101b417b7-1_arm64.deb')
- xen_sha1 = 'b9d209dd689ed2b393e625303a225badefec1160'
- xen_deb = self.fetch_asset(xen_url, asset_hash=xen_sha1)
- xen_path = self.extract_from_deb(xen_deb, "/boot/xen-4.14-arm64")
-
- self.launch_xen(xen_path)
-
- def test_arm64_xen_415_and_dom0(self):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=accel:tcg
- :avocado: tags=cpu:cortex-a57
- :avocado: tags=machine:virt
- """
-
- xen_url = ('https://fileserver.linaro.org/'
- 's/JSsewXGZ6mqxPr5/download'
- '?path=%2F&files=xen-upstream-4.15-unstable.deb')
- xen_sha1 = 'fc191172b85cf355abb95d275a24cc0f6d6579d8'
- xen_deb = self.fetch_asset(xen_url, asset_hash=xen_sha1)
- xen_path = self.extract_from_deb(xen_deb, "/boot/xen-4.15-unstable")
-
- self.launch_xen(xen_path)
diff --git a/tests/avocado/cpu_queries.py b/tests/avocado/cpu_queries.py
deleted file mode 100644
index d3faa14..0000000
--- a/tests/avocado/cpu_queries.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Sanity check of query-cpu-* results
-#
-# Copyright (c) 2019 Red Hat, Inc.
-#
-# Author:
-# Eduardo Habkost <ehabkost@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado_qemu import QemuSystemTest
-
-class QueryCPUModelExpansion(QemuSystemTest):
- """
- Run query-cpu-model-expansion for each CPU model, and validate results
- """
-
- def test(self):
- """
- :avocado: tags=arch:x86_64
- :avocado: tags=machine:none
- """
- self.vm.add_args('-S')
- self.vm.launch()
-
- cpus = self.vm.cmd('query-cpu-definitions')
- for c in cpus:
- self.log.info("Checking CPU: %s", c)
- self.assertNotIn('', c['unavailable-features'], c['name'])
-
- for c in cpus:
- model = {'name': c['name']}
- e = self.vm.cmd('query-cpu-model-expansion', model=model,
- type='full')
- self.assertEqual(e['model']['name'], c['name'])
diff --git a/tests/avocado/empty_cpu_model.py b/tests/avocado/empty_cpu_model.py
deleted file mode 100644
index d906ef3..0000000
--- a/tests/avocado/empty_cpu_model.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Check for crash when using empty -cpu option
-#
-# Copyright (c) 2019 Red Hat, Inc.
-#
-# Author:
-# Eduardo Habkost <ehabkost@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-from avocado_qemu import QemuSystemTest
-
-class EmptyCPUModel(QemuSystemTest):
- def test(self):
- self.vm.add_args('-S', '-display', 'none', '-machine', 'none', '-cpu', '')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- self.vm.wait()
- self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
- self.assertRegex(self.vm.get_log(), r'-cpu option cannot be empty')
diff --git a/tests/avocado/hotplug_blk.py b/tests/avocado/hotplug_blk.py
deleted file mode 100644
index 5dc30f6..0000000
--- a/tests/avocado/hotplug_blk.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Functional test that hotplugs a virtio blk disk and checks it on a Linux
-# guest
-#
-# Copyright (c) 2021 Red Hat, Inc.
-# Copyright (c) Yandex
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import time
-
-from avocado_qemu import LinuxTest
-
-
-class HotPlug(LinuxTest):
- def blockdev_add(self) -> None:
- self.vm.cmd('blockdev-add', **{
- 'driver': 'null-co',
- 'size': 1073741824,
- 'node-name': 'disk'
- })
-
- def assert_vda(self) -> None:
- self.ssh_command('test -e /sys/block/vda')
-
- def assert_no_vda(self) -> None:
- with self.assertRaises(AssertionError):
- self.assert_vda()
-
- def plug(self) -> None:
- args = {
- 'driver': 'virtio-blk-pci',
- 'drive': 'disk',
- 'id': 'virtio-disk0',
- 'bus': 'pci.1',
- 'addr': 1
- }
-
- self.assert_no_vda()
- self.vm.cmd('device_add', args)
- try:
- self.assert_vda()
- except AssertionError:
- time.sleep(1)
- self.assert_vda()
-
- def unplug(self) -> None:
- self.vm.cmd('device_del', id='virtio-disk0')
-
- self.vm.event_wait('DEVICE_DELETED', 1.0,
- match={'data': {'device': 'virtio-disk0'}})
-
- self.assert_no_vda()
-
- def test(self) -> None:
- """
- :avocado: tags=arch:x86_64
- :avocado: tags=machine:q35
- :avocado: tags=accel:kvm
- """
- self.require_accelerator('kvm')
- self.vm.add_args('-accel', 'kvm')
- self.vm.add_args('-device', 'pcie-pci-bridge,id=pci.1,bus=pcie.0')
-
- self.launch_and_wait()
- self.blockdev_add()
-
- self.plug()
- self.unplug()
diff --git a/tests/avocado/hotplug_cpu.py b/tests/avocado/hotplug_cpu.py
deleted file mode 100644
index 292bb43..0000000
--- a/tests/avocado/hotplug_cpu.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Functional test that hotplugs a CPU and checks it on a Linux guest
-#
-# Copyright (c) 2021 Red Hat, Inc.
-#
-# Author:
-# Cleber Rosa <crosa@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado_qemu import LinuxTest
-
-
-class HotPlugCPU(LinuxTest):
-
- def test(self):
- """
- :avocado: tags=arch:x86_64
- :avocado: tags=machine:q35
- :avocado: tags=accel:kvm
- """
- self.require_accelerator('kvm')
- self.vm.add_args('-accel', 'kvm')
- self.vm.add_args('-cpu', 'Haswell')
- self.vm.add_args('-smp', '1,sockets=1,cores=2,threads=1,maxcpus=2')
- self.launch_and_wait()
-
- self.ssh_command('test -e /sys/devices/system/cpu/cpu0')
- with self.assertRaises(AssertionError):
- self.ssh_command('test -e /sys/devices/system/cpu/cpu1')
-
- self.vm.cmd('device_add',
- driver='Haswell-x86_64-cpu',
- socket_id=0,
- core_id=1,
- thread_id=0)
- self.ssh_command('test -e /sys/devices/system/cpu/cpu1')
diff --git a/tests/avocado/info_usernet.py b/tests/avocado/info_usernet.py
deleted file mode 100644
index e1aa7a6..0000000
--- a/tests/avocado/info_usernet.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Test for the hmp command "info usernet"
-#
-# Copyright (c) 2021 Red Hat, Inc.
-#
-# Author:
-# Cleber Rosa <crosa@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado_qemu import QemuSystemTest
-
-from qemu.utils import get_info_usernet_hostfwd_port
-
-
-class InfoUsernet(QemuSystemTest):
- """
- :avocado: tags=machine:none
- """
-
- def test_hostfwd(self):
- self.require_netdev('user')
- self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22')
- self.vm.launch()
- res = self.vm.cmd('human-monitor-command',
- command_line='info usernet')
- port = get_info_usernet_hostfwd_port(res)
- self.assertIsNotNone(port,
- ('"info usernet" output content does not seem to '
- 'contain the redirected port'))
- self.assertGreater(port, 0,
- ('Found a redirected port that is not greater than'
- ' zero'))
diff --git a/tests/avocado/intel_iommu.py b/tests/avocado/intel_iommu.py
deleted file mode 100644
index 09e694b..0000000
--- a/tests/avocado/intel_iommu.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# INTEL_IOMMU Functional tests
-#
-# Copyright (c) 2021 Red Hat, Inc.
-#
-# Author:
-# Eric Auger <eric.auger@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-import os
-
-from avocado import skipUnless
-from avocado_qemu import LinuxTest
-
-@skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
-
-class IntelIOMMU(LinuxTest):
- """
- :avocado: tags=arch:x86_64
- :avocado: tags=distro:fedora
- :avocado: tags=distro_version:31
- :avocado: tags=machine:q35
- :avocado: tags=accel:kvm
- :avocado: tags=intel_iommu
- :avocado: tags=flaky
- """
-
- IOMMU_ADDON = ',iommu_platform=on,disable-modern=off,disable-legacy=on'
- kernel_path = None
- initrd_path = None
- kernel_params = None
-
- def set_up_boot(self):
- path = self.download_boot()
- self.vm.add_args('-device', 'virtio-blk-pci,bus=pcie.0,' +
- 'drive=drv0,id=virtio-disk0,bootindex=1,'
- 'werror=stop,rerror=stop' + self.IOMMU_ADDON)
- self.vm.add_args('-device', 'virtio-gpu-pci' + self.IOMMU_ADDON)
- self.vm.add_args('-drive',
- 'file=%s,if=none,cache=writethrough,id=drv0' % path)
-
- def setUp(self):
- super(IntelIOMMU, self).setUp(None, 'virtio-net-pci' + self.IOMMU_ADDON)
-
- def add_common_args(self):
- self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0')
- self.vm.add_args('-object',
- 'rng-random,id=rng0,filename=/dev/urandom')
-
- def common_vm_setup(self, custom_kernel=None):
- self.require_accelerator("kvm")
- self.add_common_args()
- self.vm.add_args("-accel", "kvm")
-
- if custom_kernel is None:
- return
-
- kernel_url = self.distro.pxeboot_url + 'vmlinuz'
- kernel_hash = '5b6f6876e1b5bda314f93893271da0d5777b1f3c'
- initrd_url = self.distro.pxeboot_url + 'initrd.img'
- initrd_hash = 'dd0340a1b39bd28f88532babd4581c67649ec5b1'
- self.kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
- self.initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
-
- def run_and_check(self):
- if self.kernel_path:
- self.vm.add_args('-kernel', self.kernel_path,
- '-append', self.kernel_params,
- '-initrd', self.initrd_path)
- self.launch_and_wait()
- self.ssh_command('cat /proc/cmdline')
- self.ssh_command('dmesg | grep -e DMAR -e IOMMU')
- self.ssh_command('find /sys/kernel/iommu_groups/ -type l')
- self.ssh_command('dnf -y install numactl-devel')
-
- def test_intel_iommu(self):
- """
- :avocado: tags=intel_iommu_intremap
- """
-
- self.common_vm_setup(True)
- self.vm.add_args('-device', 'intel-iommu,intremap=on')
- self.vm.add_args('-machine', 'kernel_irqchip=split')
-
- self.kernel_params = (self.distro.default_kernel_params +
- ' quiet intel_iommu=on')
- self.run_and_check()
-
- def test_intel_iommu_strict(self):
- """
- :avocado: tags=intel_iommu_strict
- """
-
- self.common_vm_setup(True)
- self.vm.add_args('-device', 'intel-iommu,intremap=on')
- self.vm.add_args('-machine', 'kernel_irqchip=split')
- self.kernel_params = (self.distro.default_kernel_params +
- ' quiet intel_iommu=on,strict')
- self.run_and_check()
-
- def test_intel_iommu_strict_cm(self):
- """
- :avocado: tags=intel_iommu_strict_cm
- """
-
- self.common_vm_setup(True)
- self.vm.add_args('-device', 'intel-iommu,intremap=on,caching-mode=on')
- self.vm.add_args('-machine', 'kernel_irqchip=split')
- self.kernel_params = (self.distro.default_kernel_params +
- ' quiet intel_iommu=on,strict')
- self.run_and_check()
-
- def test_intel_iommu_pt(self):
- """
- :avocado: tags=intel_iommu_pt
- """
-
- self.common_vm_setup(True)
- self.vm.add_args('-device', 'intel-iommu,intremap=on')
- self.vm.add_args('-machine', 'kernel_irqchip=split')
- self.kernel_params = (self.distro.default_kernel_params +
- ' quiet intel_iommu=on iommu=pt')
- self.run_and_check()
diff --git a/tests/avocado/kvm_xen_guest.py b/tests/avocado/kvm_xen_guest.py
deleted file mode 100644
index f8cb458..0000000
--- a/tests/avocado/kvm_xen_guest.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# KVM Xen guest functional tests
-#
-# Copyright Ā© 2021 Red Hat, Inc.
-# Copyright Ā© 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-#
-# Author:
-# David Woodhouse <dwmw2@infradead.org>
-# Alex BennƩe <alex.bennee@linaro.org>
-#
-# SPDX-License-Identifier: GPL-2.0-or-later
-
-import os
-
-from qemu.machine import machine
-
-from avocado_qemu import LinuxSSHMixIn
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-
-class KVMXenGuest(QemuSystemTest, LinuxSSHMixIn):
- """
- :avocado: tags=arch:x86_64
- :avocado: tags=machine:q35
- :avocado: tags=accel:kvm
- :avocado: tags=kvm_xen_guest
- """
-
- KERNEL_DEFAULT = 'printk.time=0 root=/dev/xvda console=ttyS0'
-
- kernel_path = None
- kernel_params = None
-
- # Fetch assets from the kvm-xen-guest subdir of my shared test
- # images directory on fileserver.linaro.org where you can find
- # build instructions for how they where assembled.
- def get_asset(self, name, sha1):
- base_url = ('https://fileserver.linaro.org/s/'
- 'kE4nCFLdQcoBF9t/download?'
- 'path=%2Fkvm-xen-guest&files=' )
- url = base_url + name
- # use explicit name rather than failing to neatly parse the
- # URL into a unique one
- return self.fetch_asset(name=name, locations=(url), asset_hash=sha1)
-
- def common_vm_setup(self):
- # We also catch lack of KVM_XEN support if we fail to launch
- self.require_accelerator("kvm")
-
- self.vm.set_console()
-
- self.vm.add_args("-accel", "kvm,xen-version=0x4000a,kernel-irqchip=split")
- self.vm.add_args("-smp", "2")
-
- self.kernel_path = self.get_asset("bzImage",
- "367962983d0d32109998a70b45dcee4672d0b045")
- self.rootfs = self.get_asset("rootfs.ext4",
- "f1478401ea4b3fa2ea196396be44315bab2bb5e4")
-
- def run_and_check(self):
- self.vm.add_args('-kernel', self.kernel_path,
- '-append', self.kernel_params,
- '-drive', f"file={self.rootfs},if=none,snapshot=on,format=raw,id=drv0",
- '-device', 'xen-disk,drive=drv0,vdev=xvda',
- '-device', 'virtio-net-pci,netdev=unet',
- '-netdev', 'user,id=unet,hostfwd=:127.0.0.1:0-:22')
-
- try:
- self.vm.launch()
- except machine.VMLaunchFailure as e:
- if "Xen HVM guest support not present" in e.output:
- self.cancel("KVM Xen support is not present "
- "(need v5.12+ kernel with CONFIG_KVM_XEN)")
- elif "Property 'kvm-accel.xen-version' not found" in e.output:
- self.cancel("QEMU not built with CONFIG_XEN_EMU support")
- else:
- raise e
-
- self.log.info('VM launched, waiting for sshd')
- console_pattern = 'Starting dropbear sshd: OK'
- wait_for_console_pattern(self, console_pattern, 'Oops')
- self.log.info('sshd ready')
- self.ssh_connect('root', '', False)
-
- self.ssh_command('cat /proc/cmdline')
- self.ssh_command('dmesg | grep -e "Grant table initialized"')
-
- def test_kvm_xen_guest(self):
- """
- :avocado: tags=kvm_xen_guest
- """
-
- self.common_vm_setup()
-
- self.kernel_params = (self.KERNEL_DEFAULT +
- ' xen_emul_unplug=ide-disks')
- self.run_and_check()
- self.ssh_command('grep xen-pirq.*msi /proc/interrupts')
-
- def test_kvm_xen_guest_nomsi(self):
- """
- :avocado: tags=kvm_xen_guest_nomsi
- """
-
- self.common_vm_setup()
-
- self.kernel_params = (self.KERNEL_DEFAULT +
- ' xen_emul_unplug=ide-disks pci=nomsi')
- self.run_and_check()
- self.ssh_command('grep xen-pirq.* /proc/interrupts')
-
- def test_kvm_xen_guest_noapic_nomsi(self):
- """
- :avocado: tags=kvm_xen_guest_noapic_nomsi
- """
-
- self.common_vm_setup()
-
- self.kernel_params = (self.KERNEL_DEFAULT +
- ' xen_emul_unplug=ide-disks noapic pci=nomsi')
- self.run_and_check()
- self.ssh_command('grep xen-pirq /proc/interrupts')
-
- def test_kvm_xen_guest_vapic(self):
- """
- :avocado: tags=kvm_xen_guest_vapic
- """
-
- self.common_vm_setup()
- self.vm.add_args('-cpu', 'host,+xen-vapic')
- self.kernel_params = (self.KERNEL_DEFAULT +
- ' xen_emul_unplug=ide-disks')
- self.run_and_check()
- self.ssh_command('grep xen-pirq /proc/interrupts')
- self.ssh_command('grep PCI-MSI /proc/interrupts')
-
- def test_kvm_xen_guest_novector(self):
- """
- :avocado: tags=kvm_xen_guest_novector
- """
-
- self.common_vm_setup()
- self.kernel_params = (self.KERNEL_DEFAULT +
- ' xen_emul_unplug=ide-disks' +
- ' xen_no_vector_callback')
- self.run_and_check()
- self.ssh_command('grep xen-platform-pci /proc/interrupts')
-
- def test_kvm_xen_guest_novector_nomsi(self):
- """
- :avocado: tags=kvm_xen_guest_novector_nomsi
- """
-
- self.common_vm_setup()
-
- self.kernel_params = (self.KERNEL_DEFAULT +
- ' xen_emul_unplug=ide-disks pci=nomsi' +
- ' xen_no_vector_callback')
- self.run_and_check()
- self.ssh_command('grep xen-platform-pci /proc/interrupts')
-
- def test_kvm_xen_guest_novector_noapic(self):
- """
- :avocado: tags=kvm_xen_guest_novector_noapic
- """
-
- self.common_vm_setup()
- self.kernel_params = (self.KERNEL_DEFAULT +
- ' xen_emul_unplug=ide-disks' +
- ' xen_no_vector_callback noapic')
- self.run_and_check()
- self.ssh_command('grep xen-platform-pci /proc/interrupts')
diff --git a/tests/avocado/linux_initrd.py b/tests/avocado/linux_initrd.py
deleted file mode 100644
index aad5b19..0000000
--- a/tests/avocado/linux_initrd.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# Linux initrd integration test.
-#
-# Copyright (c) 2018 Red Hat, Inc.
-#
-# Author:
-# Wainer dos Santos Moschetta <wainersm@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-import logging
-import tempfile
-
-from avocado_qemu import QemuSystemTest
-from avocado import skipUnless
-
-
-class LinuxInitrd(QemuSystemTest):
- """
- Checks QEMU evaluates correctly the initrd file passed as -initrd option.
-
- :avocado: tags=arch:x86_64
- :avocado: tags=machine:pc
- """
-
- timeout = 300
-
- def test_with_2gib_file_should_exit_error_msg_with_linux_v3_6(self):
- """
- Pretends to boot QEMU with an initrd file with size of 2GiB
- and expect it exits with error message.
- Fedora-18 shipped with linux-3.6 which have not supported xloadflags
- cannot support more than 2GiB initrd.
- """
- kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora/li'
- 'nux/releases/18/Fedora/x86_64/os/images/pxeboot/vmlinuz')
- kernel_hash = '41464f68efe42b9991250bed86c7081d2ccdbb21'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
- max_size = 2 * (1024 ** 3) - 1
-
- with tempfile.NamedTemporaryFile() as initrd:
- initrd.seek(max_size)
- initrd.write(b'\0')
- initrd.flush()
- self.vm.add_args('-kernel', kernel_path, '-initrd', initrd.name,
- '-m', '4096')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- self.vm.wait()
- self.assertEqual(self.vm.exitcode(), 1)
- expected_msg = r'.*initrd is too large.*max: \d+, need %s.*' % (
- max_size + 1)
- self.assertRegex(self.vm.get_log(), expected_msg)
-
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
-
- def test_with_2gib_file_should_work_with_linux_v4_16(self):
- """
- :avocado: tags=flaky
-
- QEMU has supported up to 4 GiB initrd for recent kernel
- Expect guest can reach 'Unpacking initramfs...'
- """
- kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora'
- '/linux/releases/28/Everything/x86_64/os/images/pxeboot/'
- 'vmlinuz')
- kernel_hash = '238e083e114c48200f80d889f7e32eeb2793e02a'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
- max_size = 2 * (1024 ** 3) + 1
-
- with tempfile.NamedTemporaryFile() as initrd:
- initrd.seek(max_size)
- initrd.write(b'\0')
- initrd.flush()
-
- self.vm.set_console()
- kernel_command_line = 'console=ttyS0'
- self.vm.add_args('-kernel', kernel_path,
- '-append', kernel_command_line,
- '-initrd', initrd.name,
- '-m', '5120')
- self.vm.launch()
- console = self.vm.console_socket.makefile()
- console_logger = logging.getLogger('console')
- while True:
- msg = console.readline()
- console_logger.debug(msg.strip())
- if 'Unpacking initramfs...' in msg:
- break
- if 'Kernel panic - not syncing' in msg:
- self.fail("Kernel panic reached")
diff --git a/tests/avocado/linux_ssh_mips_malta.py b/tests/avocado/linux_ssh_mips_malta.py
deleted file mode 100644
index d9bb525..0000000
--- a/tests/avocado/linux_ssh_mips_malta.py
+++ /dev/null
@@ -1,205 +0,0 @@
-# Functional test that boots a VM and run commands via a SSH session
-#
-# Copyright (c) Philippe Mathieu-DaudƩ <f4bug@amsat.org>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-import re
-import base64
-import logging
-import time
-
-from avocado import skipUnless
-from avocado_qemu import LinuxSSHMixIn
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-from avocado.utils import process
-from avocado.utils import archive
-from avocado.utils import ssh
-
-
-@skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout')
-@skipUnless(ssh.SSH_CLIENT_BINARY, 'No SSH client available')
-class LinuxSSH(QemuSystemTest, LinuxSSHMixIn):
- """
- :avocado: tags=accel:tcg
- """
-
- timeout = 150 # Not for 'configure --enable-debug --enable-debug-tcg'
-
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
- VM_IP = '127.0.0.1'
-
- BASE_URL = 'https://people.debian.org/~aurel32/qemu/'
- IMAGE_INFO = {
- 'be': {'base_url': 'mips',
- 'image_name': 'debian_wheezy_mips_standard.qcow2',
- 'image_hash': '8987a63270df67345b2135a6b7a4885a35e392d5',
- 'kernel_hash': {
- 32: '592e384a4edc16dade52a6cd5c785c637bcbc9ad',
- 64: 'db6eea7de35d36c77d8c165b6bcb222e16eb91db'}
- },
- 'le': {'base_url': 'mipsel',
- 'image_name': 'debian_wheezy_mipsel_standard.qcow2',
- 'image_hash': '7866764d9de3ef536ffca24c9fb9f04ffdb45802',
- 'kernel_hash': {
- 32: 'a66bea5a8adaa2cb3d36a1d4e0ccdb01be8f6c2a',
- 64: '6a7f77245acf231415a0e8b725d91ed2f3487794'}
- }
- }
- CPU_INFO = {
- 32: {'cpu': 'MIPS 24Kc', 'kernel_release': '3.2.0-4-4kc-malta'},
- 64: {'cpu': 'MIPS 20Kc', 'kernel_release': '3.2.0-4-5kc-malta'}
- }
-
- def get_url(self, endianess, path=''):
- qkey = {'le': 'el', 'be': ''}
- return '%s/mips%s/%s' % (self.BASE_URL, qkey[endianess], path)
-
- def get_image_info(self, endianess):
- dinfo = self.IMAGE_INFO[endianess]
- image_url = self.get_url(endianess, dinfo['image_name'])
- image_hash = dinfo['image_hash']
- return (image_url, image_hash)
-
- def get_kernel_info(self, endianess, wordsize):
- minfo = self.CPU_INFO[wordsize]
- kernel_url = self.get_url(endianess,
- 'vmlinux-%s' % minfo['kernel_release'])
- kernel_hash = self.IMAGE_INFO[endianess]['kernel_hash'][wordsize]
- return kernel_url, kernel_hash
-
- def ssh_disconnect_vm(self):
- self.ssh_session.quit()
-
- def boot_debian_wheezy_image_and_ssh_login(self, endianess, kernel_path):
- image_url, image_hash = self.get_image_info(endianess)
- image_path = self.fetch_asset(image_url, asset_hash=image_hash)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE
- + 'console=ttyS0 root=/dev/sda1')
- self.vm.add_args('-no-reboot',
- '-kernel', kernel_path,
- '-append', kernel_command_line,
- '-drive', 'file=%s,snapshot=on' % image_path,
- '-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22',
- '-device', 'pcnet,netdev=vnet')
- self.vm.launch()
-
- self.log.info('VM launched, waiting for sshd')
- console_pattern = 'Starting OpenBSD Secure Shell server: sshd'
- wait_for_console_pattern(self, console_pattern, 'Oops')
- self.log.info('sshd ready')
-
- self.ssh_connect('root', 'root', False)
-
- def shutdown_via_ssh(self):
- self.ssh_command('poweroff')
- self.ssh_disconnect_vm()
- wait_for_console_pattern(self, 'Power down', 'Oops')
-
- def run_common_commands(self, wordsize):
- self.ssh_command_output_contains(
- 'cat /proc/cpuinfo',
- self.CPU_INFO[wordsize]['cpu'])
- self.ssh_command_output_contains(
- 'uname -m',
- 'mips')
- self.ssh_command_output_contains(
- 'uname -r',
- self.CPU_INFO[wordsize]['kernel_release'])
- self.ssh_command_output_contains(
- 'cat /proc/interrupts',
- 'XT-PIC timer')
- self.ssh_command_output_contains(
- 'cat /proc/interrupts',
- 'XT-PIC i8042')
- self.ssh_command_output_contains(
- 'cat /proc/interrupts',
- 'XT-PIC serial')
- self.ssh_command_output_contains(
- 'cat /proc/interrupts',
- 'XT-PIC ata_piix')
- self.ssh_command_output_contains(
- 'cat /proc/interrupts',
- 'XT-PIC eth0')
- self.ssh_command_output_contains(
- 'cat /proc/devices',
- 'input')
- self.ssh_command_output_contains(
- 'cat /proc/devices',
- 'usb')
- self.ssh_command_output_contains(
- 'cat /proc/devices',
- 'fb')
- self.ssh_command_output_contains(
- 'cat /proc/ioports',
- ' : serial')
- self.ssh_command_output_contains(
- 'cat /proc/ioports',
- ' : ata_piix')
- self.ssh_command_output_contains(
- 'cat /proc/ioports',
- ' : piix4_smbus')
- self.ssh_command_output_contains(
- 'lspci -d 11ab:4620',
- 'GT-64120')
- self.ssh_command_output_contains(
- 'cat /sys/bus/i2c/devices/i2c-0/name',
- 'SMBus PIIX4 adapter')
- self.ssh_command_output_contains(
- 'cat /proc/mtd',
- 'YAMON')
- # Empty 'Board Config' (64KB)
- self.ssh_command_output_contains(
- 'md5sum /dev/mtd2ro',
- '0dfbe8aa4c20b52e1b8bf3cb6cbdf193')
-
- def check_mips_malta(self, uname_m, endianess):
- wordsize = 64 if '64' in uname_m else 32
- kernel_url, kernel_hash = self.get_kernel_info(endianess, wordsize)
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
- self.boot_debian_wheezy_image_and_ssh_login(endianess, kernel_path)
-
- stdout, _ = self.ssh_command('uname -a')
- self.assertIn(True, [uname_m + " GNU/Linux" in line for line in stdout])
-
- self.run_common_commands(wordsize)
- self.shutdown_via_ssh()
- # Wait for VM to shut down gracefully
- self.vm.wait()
-
- def test_mips_malta32eb_kernel3_2_0(self):
- """
- :avocado: tags=arch:mips
- :avocado: tags=endian:big
- :avocado: tags=device:pcnet32
- """
- self.check_mips_malta('mips', 'be')
-
- def test_mips_malta32el_kernel3_2_0(self):
- """
- :avocado: tags=arch:mipsel
- :avocado: tags=endian:little
- :avocado: tags=device:pcnet32
- """
- self.check_mips_malta('mips', 'le')
-
- def test_mips_malta64eb_kernel3_2_0(self):
- """
- :avocado: tags=arch:mips64
- :avocado: tags=endian:big
- :avocado: tags=device:pcnet32
- """
- self.check_mips_malta('mips64', 'be')
-
- def test_mips_malta64el_kernel3_2_0(self):
- """
- :avocado: tags=arch:mips64el
- :avocado: tags=endian:little
- :avocado: tags=device:pcnet32
- """
- self.check_mips_malta('mips64', 'le')
diff --git a/tests/avocado/load_bflt.py b/tests/avocado/load_bflt.py
deleted file mode 100644
index bb50cec..0000000
--- a/tests/avocado/load_bflt.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Test the bFLT loader format
-#
-# Copyright (C) 2019 Philippe Mathieu-DaudƩ <f4bug@amsat.org>
-#
-# SPDX-License-Identifier: GPL-2.0-or-later
-
-import os
-import bz2
-import subprocess
-
-from avocado import skipUnless
-from avocado_qemu import QemuUserTest
-from avocado_qemu import has_cmd
-
-
-class LoadBFLT(QemuUserTest):
-
- def extract_cpio(self, cpio_path):
- """
- Extracts a cpio archive into the test workdir
-
- :param cpio_path: path to the cpio archive
- """
- cwd = os.getcwd()
- os.chdir(self.workdir)
- with bz2.open(cpio_path, 'rb') as archive_cpio:
- subprocess.run(['cpio', '-i'], input=archive_cpio.read(),
- stderr=subprocess.DEVNULL)
- os.chdir(cwd)
-
- @skipUnless(*has_cmd('cpio'))
- @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
- def test_stm32(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=linux_user
- :avocado: tags=quick
- """
- # See https://elinux.org/STM32#User_Space
- rootfs_url = ('https://elinux.org/images/5/51/'
- 'Stm32_mini_rootfs.cpio.bz2')
- rootfs_hash = '9f065e6ba40cce7411ba757f924f30fcc57951e6'
- rootfs_path_bz2 = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash)
- busybox_path = os.path.join(self.workdir, "/bin/busybox")
-
- self.extract_cpio(rootfs_path_bz2)
-
- res = self.run(busybox_path)
- ver = 'BusyBox v1.24.0.git (2015-02-03 22:17:13 CET) multi-call binary.'
- self.assertIn(ver, res.stdout_text)
-
- res = self.run(busybox_path, ['uname', '-a'])
- unm = 'armv7l GNU/Linux'
- self.assertIn(unm, res.stdout_text)
diff --git a/tests/avocado/machine_aarch64_sbsaref.py b/tests/avocado/machine_aarch64_sbsaref.py
deleted file mode 100644
index e920bbf..0000000
--- a/tests/avocado/machine_aarch64_sbsaref.py
+++ /dev/null
@@ -1,236 +0,0 @@
-# Functional test that boots a Linux kernel and checks the console
-#
-# SPDX-FileCopyrightText: 2023-2024 Linaro Ltd.
-# SPDX-FileContributor: Philippe Mathieu-DaudƩ <philmd@linaro.org>
-# SPDX-FileContributor: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
-#
-# SPDX-License-Identifier: GPL-2.0-or-later
-
-import os
-
-from avocado import skipUnless
-from avocado.utils import archive
-
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-from avocado_qemu import interrupt_interactive_console_until_pattern
-
-
-class Aarch64SbsarefMachine(QemuSystemTest):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:sbsa-ref
- :avocado: tags=accel:tcg
-
- As firmware runs at a higher privilege level than the hypervisor we
- can only run these tests under TCG emulation.
- """
-
- timeout = 180
-
- def fetch_firmware(self):
- """
- Flash volumes generated using:
-
- Toolchain from Debian:
- aarch64-linux-gnu-gcc (Debian 12.2.0-14) 12.2.0
-
- Used components:
-
- - Trusted Firmware v2.11.0
- - Tianocore EDK2 4d4f569924
- - Tianocore EDK2-platforms 3f08401
-
- """
-
- # Secure BootRom (TF-A code)
- fs0_xz_url = (
- "https://artifacts.codelinaro.org/artifactory/linaro-419-sbsa-ref/"
- "20240619-148232/edk2/SBSA_FLASH0.fd.xz"
- )
- fs0_xz_hash = "0c954842a590988f526984de22e21ae0ab9cb351a0c99a8a58e928f0c7359cf7"
- tar_xz_path = self.fetch_asset(fs0_xz_url, asset_hash=fs0_xz_hash,
- algorithm='sha256')
- archive.extract(tar_xz_path, self.workdir)
- fs0_path = os.path.join(self.workdir, "SBSA_FLASH0.fd")
-
- # Non-secure rom (UEFI and EFI variables)
- fs1_xz_url = (
- "https://artifacts.codelinaro.org/artifactory/linaro-419-sbsa-ref/"
- "20240619-148232/edk2/SBSA_FLASH1.fd.xz"
- )
- fs1_xz_hash = "c6ec39374c4d79bb9e9cdeeb6db44732d90bb4a334cec92002b3f4b9cac4b5ee"
- tar_xz_path = self.fetch_asset(fs1_xz_url, asset_hash=fs1_xz_hash,
- algorithm='sha256')
- archive.extract(tar_xz_path, self.workdir)
- fs1_path = os.path.join(self.workdir, "SBSA_FLASH1.fd")
-
- for path in [fs0_path, fs1_path]:
- with open(path, "ab+") as fd:
- fd.truncate(256 << 20) # Expand volumes to 256MiB
-
- self.vm.set_console()
- self.vm.add_args(
- "-drive",
- f"if=pflash,file={fs0_path},format=raw",
- "-drive",
- f"if=pflash,file={fs1_path},format=raw",
- "-machine",
- "sbsa-ref",
- )
-
- def test_sbsaref_edk2_firmware(self):
- """
- :avocado: tags=cpu:cortex-a57
- """
-
- self.fetch_firmware()
- self.vm.launch()
-
- # TF-A boot sequence:
- #
- # https://github.com/ARM-software/arm-trusted-firmware/blob/v2.8.0/\
- # docs/design/trusted-board-boot.rst#trusted-board-boot-sequence
- # https://trustedfirmware-a.readthedocs.io/en/v2.8/\
- # design/firmware-design.html#cold-boot
-
- # AP Trusted ROM
- wait_for_console_pattern(self, "Booting Trusted Firmware")
- wait_for_console_pattern(self, "BL1: v2.11.0(release):")
- wait_for_console_pattern(self, "BL1: Booting BL2")
-
- # Trusted Boot Firmware
- wait_for_console_pattern(self, "BL2: v2.11.0(release)")
- wait_for_console_pattern(self, "Booting BL31")
-
- # EL3 Runtime Software
- wait_for_console_pattern(self, "BL31: v2.11.0(release)")
-
- # Non-trusted Firmware
- wait_for_console_pattern(self, "UEFI firmware (version 1.0")
- interrupt_interactive_console_until_pattern(self, "QEMU SBSA-REF Machine")
-
- # This tests the whole boot chain from EFI to Userspace
- # We only boot a whole OS for the current top level CPU and GIC
- # Other test profiles should use more minimal boots
- def boot_alpine_linux(self, cpu):
- self.fetch_firmware()
-
- iso_url = (
- "https://dl-cdn.alpinelinux.org/"
- "alpine/v3.17/releases/aarch64/alpine-standard-3.17.2-aarch64.iso"
- )
-
- iso_hash = "5a36304ecf039292082d92b48152a9ec21009d3a62f459de623e19c4bd9dc027"
- iso_path = self.fetch_asset(iso_url, algorithm="sha256", asset_hash=iso_hash)
-
- self.vm.set_console()
- self.vm.add_args(
- "-cpu",
- cpu,
- "-drive",
- f"file={iso_path},format=raw",
- )
-
- self.vm.launch()
- wait_for_console_pattern(self, "Welcome to Alpine Linux 3.17")
-
- def test_sbsaref_alpine_linux_cortex_a57(self):
- """
- :avocado: tags=cpu:cortex-a57
- :avocado: tags=os:linux
- """
- self.boot_alpine_linux("cortex-a57")
-
- def test_sbsaref_alpine_linux_neoverse_n1(self):
- """
- :avocado: tags=cpu:neoverse-n1
- :avocado: tags=os:linux
- """
- self.boot_alpine_linux("neoverse-n1")
-
- def test_sbsaref_alpine_linux_max_pauth_off(self):
- """
- :avocado: tags=cpu:max
- :avocado: tags=os:linux
- """
- self.boot_alpine_linux("max,pauth=off")
-
- def test_sbsaref_alpine_linux_max_pauth_impdef(self):
- """
- :avocado: tags=cpu:max
- :avocado: tags=os:linux
- """
- self.boot_alpine_linux("max,pauth-impdef=on")
-
- @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout')
- def test_sbsaref_alpine_linux_max(self):
- """
- :avocado: tags=cpu:max
- :avocado: tags=os:linux
- """
- self.boot_alpine_linux("max")
-
-
- # This tests the whole boot chain from EFI to Userspace
- # We only boot a whole OS for the current top level CPU and GIC
- # Other test profiles should use more minimal boots
- def boot_openbsd73(self, cpu):
- self.fetch_firmware()
-
- img_url = (
- "https://cdn.openbsd.org/pub/OpenBSD/7.3/arm64/miniroot73.img"
- )
-
- img_hash = "7fc2c75401d6f01fbfa25f4953f72ad7d7c18650056d30755c44b9c129b707e5"
- img_path = self.fetch_asset(img_url, algorithm="sha256", asset_hash=img_hash)
-
- self.vm.set_console()
- self.vm.add_args(
- "-cpu",
- cpu,
- "-drive",
- f"file={img_path},format=raw",
- )
-
- self.vm.launch()
- wait_for_console_pattern(self,
- "Welcome to the OpenBSD/arm64"
- " 7.3 installation program.")
-
- def test_sbsaref_openbsd73_cortex_a57(self):
- """
- :avocado: tags=cpu:cortex-a57
- :avocado: tags=os:openbsd
- """
- self.boot_openbsd73("cortex-a57")
-
- def test_sbsaref_openbsd73_neoverse_n1(self):
- """
- :avocado: tags=cpu:neoverse-n1
- :avocado: tags=os:openbsd
- """
- self.boot_openbsd73("neoverse-n1")
-
- def test_sbsaref_openbsd73_max_pauth_off(self):
- """
- :avocado: tags=cpu:max
- :avocado: tags=os:openbsd
- """
- self.boot_openbsd73("max,pauth=off")
-
- @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout')
- def test_sbsaref_openbsd73_max_pauth_impdef(self):
- """
- :avocado: tags=cpu:max
- :avocado: tags=os:openbsd
- """
- self.boot_openbsd73("max,pauth-impdef=on")
-
- @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout')
- def test_sbsaref_openbsd73_max(self):
- """
- :avocado: tags=cpu:max
- :avocado: tags=os:openbsd
- """
- self.boot_openbsd73("max")
diff --git a/tests/avocado/machine_aarch64_virt.py b/tests/avocado/machine_aarch64_virt.py
deleted file mode 100644
index a90dc6f..0000000
--- a/tests/avocado/machine_aarch64_virt.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# Functional test that boots a various Linux systems and checks the
-# console output.
-#
-# Copyright (c) 2022 Linaro Ltd.
-#
-# Author:
-# Alex BennƩe <alex.bennee@linaro.org>
-#
-# SPDX-License-Identifier: GPL-2.0-or-later
-
-import time
-import os
-import logging
-
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-from avocado_qemu import exec_command
-from avocado_qemu import BUILD_DIR
-from avocado.utils import process
-from avocado.utils.path import find_command
-
-class Aarch64VirtMachine(QemuSystemTest):
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
- timeout = 360
-
- def wait_for_console_pattern(self, success_message, vm=None):
- wait_for_console_pattern(self, success_message,
- failure_message='Kernel panic - not syncing',
- vm=vm)
-
- # This tests the whole boot chain from EFI to Userspace
- # We only boot a whole OS for the current top level CPU and GIC
- # Other test profiles should use more minimal boots
- def test_alpine_virt_tcg_gic_max(self):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:virt
- :avocado: tags=accel:tcg
- """
- iso_url = ('https://dl-cdn.alpinelinux.org/'
- 'alpine/v3.17/releases/aarch64/'
- 'alpine-standard-3.17.2-aarch64.iso')
-
- # Alpine use sha256 so I recalculated this myself
- iso_sha1 = '76284fcd7b41fe899b0c2375ceb8470803eea839'
- iso_path = self.fetch_asset(iso_url, asset_hash=iso_sha1)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyAMA0')
- self.require_accelerator("tcg")
-
- self.vm.add_args("-accel", "tcg")
- self.vm.add_args("-cpu", "max,pauth-impdef=on")
- self.vm.add_args("-machine",
- "virt,acpi=on,"
- "virtualization=on,"
- "mte=on,"
- "gic-version=max,iommu=smmuv3")
- self.vm.add_args("-smp", "2", "-m", "1024")
- self.vm.add_args('-bios', os.path.join(BUILD_DIR, 'pc-bios',
- 'edk2-aarch64-code.fd'))
- self.vm.add_args("-drive", f"file={iso_path},format=raw")
- self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0')
- self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom')
-
- self.vm.launch()
- self.wait_for_console_pattern('Welcome to Alpine Linux 3.17')
-
-
- def common_aarch64_virt(self, machine):
- """
- Common code to launch basic virt machine with kernel+initrd
- and a scratch disk.
- """
- logger = logging.getLogger('aarch64_virt')
-
- kernel_url = ('https://fileserver.linaro.org/s/'
- 'z6B2ARM7DQT3HWN/download')
- kernel_hash = 'ed11daab50c151dde0e1e9c9cb8b2d9bd3215347'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyAMA0')
- self.require_accelerator("tcg")
- self.vm.add_args('-cpu', 'max,pauth-impdef=on',
- '-machine', machine,
- '-accel', 'tcg',
- '-kernel', kernel_path,
- '-append', kernel_command_line)
-
- # A RNG offers an easy way to generate a few IRQs
- self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0')
- self.vm.add_args('-object',
- 'rng-random,id=rng0,filename=/dev/urandom')
-
- # Also add a scratch block device
- logger.info('creating scratch qcow2 image')
- image_path = os.path.join(self.workdir, 'scratch.qcow2')
- qemu_img = os.path.join(BUILD_DIR, 'qemu-img')
- if not os.path.exists(qemu_img):
- qemu_img = find_command('qemu-img', False)
- if qemu_img is False:
- self.cancel('Could not find "qemu-img", which is required to '
- 'create the temporary qcow2 image')
- cmd = '%s create -f qcow2 %s 8M' % (qemu_img, image_path)
- process.run(cmd)
-
- # Add the device
- self.vm.add_args('-blockdev',
- f"driver=qcow2,file.driver=file,file.filename={image_path},node-name=scratch")
- self.vm.add_args('-device',
- 'virtio-blk-device,drive=scratch')
-
- self.vm.launch()
- self.wait_for_console_pattern('Welcome to Buildroot')
- time.sleep(0.1)
- exec_command(self, 'root')
- time.sleep(0.1)
- exec_command(self, 'dd if=/dev/hwrng of=/dev/vda bs=512 count=4')
- time.sleep(0.1)
- exec_command(self, 'md5sum /dev/vda')
- time.sleep(0.1)
- exec_command(self, 'cat /proc/interrupts')
- time.sleep(0.1)
- exec_command(self, 'cat /proc/self/maps')
- time.sleep(0.1)
-
- def test_aarch64_virt_gicv3(self):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:virt
- :avocado: tags=accel:tcg
- :avocado: tags=cpu:max
- """
- self.common_aarch64_virt("virt,gic_version=3")
-
- def test_aarch64_virt_gicv2(self):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:virt
- :avocado: tags=accel:tcg
- :avocado: tags=cpu:max
- """
- self.common_aarch64_virt("virt,gic-version=2")
diff --git a/tests/avocado/machine_arm_canona1100.py b/tests/avocado/machine_arm_canona1100.py
deleted file mode 100644
index a42d8b0..0000000
--- a/tests/avocado/machine_arm_canona1100.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Functional test that boots the canon-a1100 machine with firmware
-#
-# Copyright (c) 2020 Red Hat, Inc.
-#
-# Author:
-# Thomas Huth <thuth@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-from avocado.utils import archive
-
-class CanonA1100Machine(QemuSystemTest):
- """Boots the barebox firmware and checks that the console is operational"""
-
- timeout = 90
-
- def test_arm_canona1100(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:canon-a1100
- :avocado: tags=device:pflash_cfi02
- """
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day18.tar.xz')
- tar_hash = '068b5fc4242b29381acee94713509f8a876e9db6'
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- archive.extract(file_path, self.workdir)
- self.vm.set_console()
- self.vm.add_args('-bios',
- self.workdir + '/day18/barebox.canon-a1100.bin')
- self.vm.launch()
- wait_for_console_pattern(self, 'running /env/bin/init')
diff --git a/tests/avocado/machine_arm_integratorcp.py b/tests/avocado/machine_arm_integratorcp.py
deleted file mode 100644
index 87f5cf3..0000000
--- a/tests/avocado/machine_arm_integratorcp.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Functional test that boots a Linux kernel and checks the console
-#
-# Copyright (c) 2020 Red Hat, Inc.
-#
-# Author:
-# Thomas Huth <thuth@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-import logging
-
-from avocado import skipUnless
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-
-
-NUMPY_AVAILABLE = True
-try:
- import numpy as np
-except ImportError:
- NUMPY_AVAILABLE = False
-
-CV2_AVAILABLE = True
-try:
- import cv2
-except ImportError:
- CV2_AVAILABLE = False
-
-
-class IntegratorMachine(QemuSystemTest):
-
- timeout = 90
-
- def boot_integratorcp(self):
- kernel_url = ('https://github.com/zayac/qemu-arm/raw/master/'
- 'arm-test/kernel/zImage.integrator')
- kernel_hash = '0d7adba893c503267c946a3cbdc63b4b54f25468'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- initrd_url = ('https://github.com/zayac/qemu-arm/raw/master/'
- 'arm-test/kernel/arm_root.img')
- initrd_hash = 'b51e4154285bf784e017a37586428332d8c7bd8b'
- initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
-
- self.vm.set_console()
- self.vm.add_args('-kernel', kernel_path,
- '-initrd', initrd_path,
- '-append', 'printk.time=0 console=ttyAMA0')
- self.vm.launch()
-
- @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
- def test_integratorcp_console(self):
- """
- Boots the Linux kernel and checks that the console is operational
- :avocado: tags=arch:arm
- :avocado: tags=machine:integratorcp
- :avocado: tags=device:pl011
- """
- self.boot_integratorcp()
- wait_for_console_pattern(self, 'Log in as root')
-
- @skipUnless(NUMPY_AVAILABLE, 'Python NumPy not installed')
- @skipUnless(CV2_AVAILABLE, 'Python OpenCV not installed')
- @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
- def test_framebuffer_tux_logo(self):
- """
- Boot Linux and verify the Tux logo is displayed on the framebuffer.
- :avocado: tags=arch:arm
- :avocado: tags=machine:integratorcp
- :avocado: tags=device:pl110
- :avocado: tags=device:framebuffer
- """
- screendump_path = os.path.join(self.workdir, "screendump.pbm")
- tuxlogo_url = ('https://github.com/torvalds/linux/raw/v2.6.12/'
- 'drivers/video/logo/logo_linux_vga16.ppm')
- tuxlogo_hash = '3991c2ddbd1ddaecda7601f8aafbcf5b02dc86af'
- tuxlogo_path = self.fetch_asset(tuxlogo_url, asset_hash=tuxlogo_hash)
-
- self.boot_integratorcp()
- framebuffer_ready = 'Console: switching to colour frame buffer device'
- wait_for_console_pattern(self, framebuffer_ready)
- self.vm.cmd('human-monitor-command', command_line='stop')
- self.vm.cmd('human-monitor-command',
- command_line='screendump %s' % screendump_path)
- logger = logging.getLogger('framebuffer')
-
- cpu_count = 1
- match_threshold = 0.92
- screendump_bgr = cv2.imread(screendump_path)
- screendump_gray = cv2.cvtColor(screendump_bgr, cv2.COLOR_BGR2GRAY)
- result = cv2.matchTemplate(screendump_gray, cv2.imread(tuxlogo_path, 0),
- cv2.TM_CCOEFF_NORMED)
- loc = np.where(result >= match_threshold)
- tux_count = 0
- for tux_count, pt in enumerate(zip(*loc[::-1]), start=1):
- logger.debug('found Tux at position [x, y] = %s', pt)
- self.assertGreaterEqual(tux_count, cpu_count)
diff --git a/tests/avocado/machine_arm_n8x0.py b/tests/avocado/machine_arm_n8x0.py
deleted file mode 100644
index 12e9a68..0000000
--- a/tests/avocado/machine_arm_n8x0.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Functional test that boots a Linux kernel and checks the console
-#
-# Copyright (c) 2020 Red Hat, Inc.
-#
-# Author:
-# Thomas Huth <thuth@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-
-from avocado import skipUnless
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-
-class N8x0Machine(QemuSystemTest):
- """Boots the Linux kernel and checks that the console is operational"""
-
- timeout = 90
-
- def __do_test_n8x0(self):
- kernel_url = ('http://stskeeps.subnetmask.net/meego-n8x0/'
- 'meego-arm-n8x0-1.0.80.20100712.1431-'
- 'vmlinuz-2.6.35~rc4-129.1-n8x0')
- kernel_hash = 'e9d5ab8d7548923a0061b6fbf601465e479ed269'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- self.vm.set_console(console_index=1)
- self.vm.add_args('-kernel', kernel_path,
- '-append', 'printk.time=0 console=ttyS1')
- self.vm.launch()
- wait_for_console_pattern(self, 'TSC2005 driver initializing')
-
- @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
- def test_n800(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:n800
- """
- self.__do_test_n8x0()
-
- @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
- def test_n810(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:n810
- """
- self.__do_test_n8x0()
diff --git a/tests/avocado/machine_aspeed.py b/tests/avocado/machine_aspeed.py
deleted file mode 100644
index f66ad38..0000000
--- a/tests/avocado/machine_aspeed.py
+++ /dev/null
@@ -1,441 +0,0 @@
-# Functional test that boots the ASPEED SoCs with firmware
-#
-# Copyright (C) 2022 ASPEED Technology Inc
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import time
-import os
-import tempfile
-import subprocess
-
-from avocado_qemu import LinuxSSHMixIn
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-from avocado_qemu import exec_command
-from avocado_qemu import exec_command_and_wait_for_pattern
-from avocado_qemu import interrupt_interactive_console_until_pattern
-from avocado_qemu import has_cmd
-from avocado.utils import archive
-from avocado import skipUnless
-from avocado import skipUnless
-
-
-class AST1030Machine(QemuSystemTest):
- """Boots the zephyr os and checks that the console is operational"""
-
- timeout = 10
-
- def test_ast1030_zephyros_1_04(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:ast1030-evb
- :avocado: tags=os:zephyr
- """
- tar_url = ('https://github.com/AspeedTech-BMC'
- '/zephyr/releases/download/v00.01.04/ast1030-evb-demo.zip')
- tar_hash = '4c6a8ce3a8ba76ef1a65dae419ae3409343c4b20'
- tar_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- archive.extract(tar_path, self.workdir)
- kernel_file = self.workdir + "/ast1030-evb-demo/zephyr.elf"
- self.vm.set_console()
- self.vm.add_args('-kernel', kernel_file,
- '-nographic')
- self.vm.launch()
- wait_for_console_pattern(self, "Booting Zephyr OS")
- exec_command_and_wait_for_pattern(self, "help",
- "Available commands")
-
- def test_ast1030_zephyros_1_07(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:ast1030-evb
- :avocado: tags=os:zephyr
- """
- tar_url = ('https://github.com/AspeedTech-BMC'
- '/zephyr/releases/download/v00.01.07/ast1030-evb-demo.zip')
- tar_hash = '40ac87eabdcd3b3454ce5aad11fedc72a33ecda2'
- tar_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- archive.extract(tar_path, self.workdir)
- kernel_file = self.workdir + "/ast1030-evb-demo/zephyr.bin"
- self.vm.set_console()
- self.vm.add_args('-kernel', kernel_file,
- '-nographic')
- self.vm.launch()
- wait_for_console_pattern(self, "Booting Zephyr OS")
- for shell_cmd in [
- 'kernel stacks',
- 'otp info conf',
- 'otp info scu',
- 'hwinfo devid',
- 'crypto aes256_cbc_vault',
- 'random get',
- 'jtag JTAG1 sw_xfer high TMS',
- 'adc ADC0 resolution 12',
- 'adc ADC0 read 42',
- 'adc ADC1 read 69',
- 'i2c scan I2C_0',
- 'i3c attach I3C_0',
- 'hash test',
- 'kernel uptime',
- 'kernel reboot warm',
- 'kernel uptime',
- 'kernel reboot cold',
- 'kernel uptime',
- ]: exec_command_and_wait_for_pattern(self, shell_cmd, "uart:~$")
-
-class AST2x00Machine(QemuSystemTest):
-
- timeout = 90
-
- def wait_for_console_pattern(self, success_message, vm=None):
- wait_for_console_pattern(self, success_message,
- failure_message='Kernel panic - not syncing',
- vm=vm)
-
- def do_test_arm_aspeed(self, image):
- self.vm.set_console()
- self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw',
- '-net', 'nic')
- self.vm.launch()
-
- self.wait_for_console_pattern("U-Boot 2016.07")
- self.wait_for_console_pattern("## Loading kernel from FIT Image at 20080000")
- self.wait_for_console_pattern("Starting kernel ...")
- self.wait_for_console_pattern("Booting Linux on physical CPU 0x0")
- wait_for_console_pattern(self,
- "aspeed-smc 1e620000.spi: read control register: 203b0641")
- self.wait_for_console_pattern("ftgmac100 1e660000.ethernet eth0: irq ")
- self.wait_for_console_pattern("systemd[1]: Set hostname to")
-
- def test_arm_ast2400_palmetto_openbmc_v2_9_0(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:palmetto-bmc
- """
-
- image_url = ('https://github.com/openbmc/openbmc/releases/download/2.9.0/'
- 'obmc-phosphor-image-palmetto.static.mtd')
- image_hash = ('3e13bbbc28e424865dc42f35ad672b10f2e82cdb11846bb28fa625b48beafd0d')
- image_path = self.fetch_asset(image_url, asset_hash=image_hash,
- algorithm='sha256')
-
- self.do_test_arm_aspeed(image_path)
-
- def test_arm_ast2500_romulus_openbmc_v2_9_0(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:romulus-bmc
- """
-
- image_url = ('https://github.com/openbmc/openbmc/releases/download/2.9.0/'
- 'obmc-phosphor-image-romulus.static.mtd')
- image_hash = ('820341076803f1955bc31e647a512c79f9add4f5233d0697678bab4604c7bb25')
- image_path = self.fetch_asset(image_url, asset_hash=image_hash,
- algorithm='sha256')
-
- self.do_test_arm_aspeed(image_path)
-
- def do_test_arm_aspeed_buildroot_start(self, image, cpu_id, pattern='Aspeed EVB'):
- self.require_netdev('user')
-
- self.vm.set_console()
- self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw',
- '-net', 'nic', '-net', 'user')
- self.vm.launch()
-
- self.wait_for_console_pattern('U-Boot 2019.04')
- self.wait_for_console_pattern('## Loading kernel from FIT Image')
- self.wait_for_console_pattern('Starting kernel ...')
- self.wait_for_console_pattern('Booting Linux on physical CPU ' + cpu_id)
- self.wait_for_console_pattern('lease of 10.0.2.15')
- # the line before login:
- self.wait_for_console_pattern(pattern)
- time.sleep(0.1)
- exec_command(self, 'root')
- time.sleep(0.1)
- exec_command(self, "passw0rd")
-
- def do_test_arm_aspeed_buildroot_poweroff(self):
- exec_command_and_wait_for_pattern(self, 'poweroff',
- 'reboot: System halted');
-
- def test_arm_ast2500_evb_buildroot(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:ast2500-evb
- """
-
- image_url = ('https://github.com/legoater/qemu-aspeed-boot/raw/master/'
- 'images/ast2500-evb/buildroot-2023.11/flash.img')
- image_hash = ('c23db6160cf77d0258397eb2051162c8473a56c441417c52a91ba217186e715f')
- image_path = self.fetch_asset(image_url, asset_hash=image_hash,
- algorithm='sha256')
-
- self.vm.add_args('-device',
- 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test');
- self.do_test_arm_aspeed_buildroot_start(image_path, '0x0', 'Aspeed AST2500 EVB')
-
- exec_command_and_wait_for_pattern(self,
- 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device',
- 'i2c i2c-3: new_device: Instantiated device lm75 at 0x4d');
- exec_command_and_wait_for_pattern(self,
- 'cat /sys/class/hwmon/hwmon1/temp1_input', '0')
- self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test',
- property='temperature', value=18000);
- exec_command_and_wait_for_pattern(self,
- 'cat /sys/class/hwmon/hwmon1/temp1_input', '18000')
-
- self.do_test_arm_aspeed_buildroot_poweroff()
-
- def test_arm_ast2600_evb_buildroot(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:ast2600-evb
- """
-
- image_url = ('https://github.com/legoater/qemu-aspeed-boot/raw/master/'
- 'images/ast2600-evb/buildroot-2023.11/flash.img')
- image_hash = ('b62808daef48b438d0728ee07662290490ecfa65987bb91294cafb1bb7ad1a68')
- image_path = self.fetch_asset(image_url, asset_hash=image_hash,
- algorithm='sha256')
-
- self.vm.add_args('-device',
- 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test');
- self.vm.add_args('-device',
- 'ds1338,bus=aspeed.i2c.bus.3,address=0x32');
- self.vm.add_args('-device',
- 'i2c-echo,bus=aspeed.i2c.bus.3,address=0x42');
- self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00', 'Aspeed AST2600 EVB')
-
- exec_command_and_wait_for_pattern(self,
- 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device',
- 'i2c i2c-3: new_device: Instantiated device lm75 at 0x4d');
- exec_command_and_wait_for_pattern(self,
- 'cat /sys/class/hwmon/hwmon1/temp1_input', '0')
- self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test',
- property='temperature', value=18000);
- exec_command_and_wait_for_pattern(self,
- 'cat /sys/class/hwmon/hwmon1/temp1_input', '18000')
-
- exec_command_and_wait_for_pattern(self,
- 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-3/device/new_device',
- 'i2c i2c-3: new_device: Instantiated device ds1307 at 0x32');
- year = time.strftime("%Y")
- exec_command_and_wait_for_pattern(self, 'hwclock -f /dev/rtc1', year);
-
- exec_command_and_wait_for_pattern(self,
- 'echo slave-24c02 0x1064 > /sys/bus/i2c/devices/i2c-3/new_device',
- 'i2c i2c-3: new_device: Instantiated device slave-24c02 at 0x64');
- exec_command(self, 'i2cset -y 3 0x42 0x64 0x00 0xaa i');
- time.sleep(0.1)
- exec_command_and_wait_for_pattern(self,
- 'hexdump /sys/bus/i2c/devices/3-1064/slave-eeprom',
- '0000000 ffaa ffff ffff ffff ffff ffff ffff ffff');
- self.do_test_arm_aspeed_buildroot_poweroff()
-
- @skipUnless(*has_cmd('swtpm'))
- def test_arm_ast2600_evb_buildroot_tpm(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:ast2600-evb
- """
-
- image_url = ('https://github.com/legoater/qemu-aspeed-boot/raw/master/'
- 'images/ast2600-evb/buildroot-2023.02-tpm/flash.img')
- image_hash = ('a46009ae8a5403a0826d607215e731a8c68d27c14c41e55331706b8f9c7bd997')
- image_path = self.fetch_asset(image_url, asset_hash=image_hash,
- algorithm='sha256')
-
- # force creation of VM object, which also defines self._sd
- vm = self.vm
-
- socket = os.path.join(self._sd.name, 'swtpm-socket')
-
- subprocess.run(['swtpm', 'socket', '-d', '--tpm2',
- '--tpmstate', f'dir={self.vm.temp_dir}',
- '--ctrl', f'type=unixio,path={socket}'])
-
- self.vm.add_args('-chardev', f'socket,id=chrtpm,path={socket}')
- self.vm.add_args('-tpmdev', 'emulator,id=tpm0,chardev=chrtpm')
- self.vm.add_args('-device',
- 'tpm-tis-i2c,tpmdev=tpm0,bus=aspeed.i2c.bus.12,address=0x2e')
- self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00', 'Aspeed AST2600 EVB')
-
- exec_command_and_wait_for_pattern(self,
- 'echo tpm_tis_i2c 0x2e > /sys/bus/i2c/devices/i2c-12/new_device',
- 'tpm_tis_i2c 12-002e: 2.0 TPM (device-id 0x1, rev-id 1)');
- exec_command_and_wait_for_pattern(self,
- 'cat /sys/class/tpm/tpm0/pcr-sha256/0',
- 'B804724EA13F52A9072BA87FE8FDCC497DFC9DF9AA15B9088694639C431688E0');
-
- self.do_test_arm_aspeed_buildroot_poweroff()
-
-class AST2x00MachineSDK(QemuSystemTest, LinuxSSHMixIn):
-
- EXTRA_BOOTARGS = (
- 'quiet '
- 'systemd.mask=org.openbmc.HostIpmi.service '
- 'systemd.mask=xyz.openbmc_project.Chassis.Control.Power@0.service '
- 'systemd.mask=modprobe@fuse.service '
- 'systemd.mask=rngd.service '
- 'systemd.mask=obmc-console@ttyS2.service '
- )
-
- # FIXME: Although these tests boot a whole distro they are still
- # slower than comparable machine models. There may be some
- # optimisations which bring down the runtime. In the meantime they
- # have generous timeouts and are disable for CI which aims for all
- # tests to run in less than 60 seconds.
- timeout = 240
-
- def wait_for_console_pattern(self, success_message, vm=None):
- wait_for_console_pattern(self, success_message,
- failure_message='Kernel panic - not syncing',
- vm=vm)
-
- def do_test_arm_aspeed_sdk_start(self, image):
- self.require_netdev('user')
- self.vm.set_console()
- self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw',
- '-net', 'nic', '-net', 'user,hostfwd=:127.0.0.1:0-:22')
- self.vm.launch()
-
- self.wait_for_console_pattern('U-Boot 2019.04')
- interrupt_interactive_console_until_pattern(
- self, 'Hit any key to stop autoboot:', 'ast#')
- exec_command_and_wait_for_pattern(
- self, 'setenv bootargs ${bootargs} ' + self.EXTRA_BOOTARGS, 'ast#')
- exec_command_and_wait_for_pattern(
- self, 'boot', '## Loading kernel from FIT Image')
- self.wait_for_console_pattern('Starting kernel ...')
-
- def do_test_aarch64_aspeed_sdk_start(self, image):
- self.vm.set_console()
- self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw',
- '-net', 'nic', '-net', 'user,hostfwd=:127.0.0.1:0-:22')
-
- self.vm.launch()
-
- self.wait_for_console_pattern('U-Boot 2023.10')
- self.wait_for_console_pattern('## Loading kernel from FIT Image')
- self.wait_for_console_pattern('Starting kernel ...')
-
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
-
- def test_arm_ast2500_evb_sdk(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:ast2500-evb
- :avocado: tags=flaky
- """
-
- image_url = ('https://github.com/AspeedTech-BMC/openbmc/releases/'
- 'download/v08.06/ast2500-default-obmc.tar.gz')
- image_hash = ('e1755f3cadff69190438c688d52dd0f0d399b70a1e14b1d3d5540fc4851d38ca')
- image_path = self.fetch_asset(image_url, asset_hash=image_hash,
- algorithm='sha256')
- archive.extract(image_path, self.workdir)
-
- self.do_test_arm_aspeed_sdk_start(
- self.workdir + '/ast2500-default/image-bmc')
- self.wait_for_console_pattern('nodistro.0 ast2500-default ttyS4')
-
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
-
- def test_arm_ast2600_evb_sdk(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:ast2600-evb
- :avocado: tags=flaky
- """
-
- image_url = ('https://github.com/AspeedTech-BMC/openbmc/releases/'
- 'download/v08.06/ast2600-a2-obmc.tar.gz')
- image_hash = ('9083506135f622d5e7351fcf7d4e1c7125cee5ba16141220c0ba88931f3681a4')
- image_path = self.fetch_asset(image_url, asset_hash=image_hash,
- algorithm='sha256')
- archive.extract(image_path, self.workdir)
-
- self.vm.add_args('-device',
- 'tmp105,bus=aspeed.i2c.bus.5,address=0x4d,id=tmp-test');
- self.vm.add_args('-device',
- 'ds1338,bus=aspeed.i2c.bus.5,address=0x32');
- self.do_test_arm_aspeed_sdk_start(
- self.workdir + '/ast2600-a2/image-bmc')
- self.wait_for_console_pattern('nodistro.0 ast2600-a2 ttyS4')
-
- self.ssh_connect('root', '0penBmc', False)
- self.ssh_command('dmesg -c > /dev/null')
-
- self.ssh_command_output_contains(
- 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-5/device/new_device ; '
- 'dmesg -c',
- 'i2c i2c-5: new_device: Instantiated device lm75 at 0x4d');
- self.ssh_command_output_contains(
- 'cat /sys/class/hwmon/hwmon19/temp1_input', '0')
- self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test',
- property='temperature', value=18000);
- self.ssh_command_output_contains(
- 'cat /sys/class/hwmon/hwmon19/temp1_input', '18000')
-
- self.ssh_command_output_contains(
- 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-5/device/new_device ; '
- 'dmesg -c',
- 'i2c i2c-5: new_device: Instantiated device ds1307 at 0x32');
- year = time.strftime("%Y")
- self.ssh_command_output_contains('/sbin/hwclock -f /dev/rtc1', year);
-
- def test_aarch64_ast2700_evb_sdk_v09_02(self):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:ast2700-evb
- """
-
- image_url = ('https://github.com/AspeedTech-BMC/openbmc/releases/'
- 'download/v09.02/ast2700-default-obmc.tar.gz')
- image_hash = 'ac969c2602f4e6bdb69562ff466b89ae3fe1d86e1f6797bb7969d787f82116a7'
- image_path = self.fetch_asset(image_url, asset_hash=image_hash,
- algorithm='sha256')
- archive.extract(image_path, self.workdir)
-
- num_cpu = 4
- image_dir = self.workdir + '/ast2700-default/'
- uboot_size = os.path.getsize(image_dir + 'u-boot-nodtb.bin')
- uboot_dtb_load_addr = hex(0x400000000 + uboot_size)
-
- load_images_list = [
- {
- 'addr': '0x400000000',
- 'file': image_dir + 'u-boot-nodtb.bin'
- },
- {
- 'addr': str(uboot_dtb_load_addr),
- 'file': image_dir + 'u-boot.dtb'
- },
- {
- 'addr': '0x430000000',
- 'file': image_dir + 'bl31.bin'
- },
- {
- 'addr': '0x430080000',
- 'file': image_dir + 'optee/tee-raw.bin'
- }
- ]
-
- for load_image in load_images_list:
- addr = load_image['addr']
- file = load_image['file']
- self.vm.add_args('-device',
- f'loader,force-raw=on,addr={addr},file={file}')
-
- for i in range(num_cpu):
- self.vm.add_args('-device',
- f'loader,addr=0x430000000,cpu-num={i}')
-
- self.vm.add_args('-smp', str(num_cpu))
- self.do_test_aarch64_aspeed_sdk_start(image_dir + 'image-bmc')
- self.wait_for_console_pattern('nodistro.0 ast2700-default ttyS12')
- self.ssh_connect('root', '0penBmc', False)
-
diff --git a/tests/avocado/machine_avr6.py b/tests/avocado/machine_avr6.py
deleted file mode 100644
index 5485db7..0000000
--- a/tests/avocado/machine_avr6.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-# QEMU AVR integration tests
-#
-# Copyright (c) 2019-2020 Michael Rolnik <mrolnik@gmail.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import time
-
-from avocado_qemu import QemuSystemTest
-
-class AVR6Machine(QemuSystemTest):
- timeout = 5
-
- def test_freertos(self):
- """
- :avocado: tags=arch:avr
- :avocado: tags=machine:arduino-mega-2560-v3
- """
- """
- https://github.com/seharris/qemu-avr-tests/raw/master/free-rtos/Demo/AVR_ATMega2560_GCC/demo.elf
- constantly prints out 'ABCDEFGHIJKLMNOPQRSTUVWXABCDEFGHIJKLMNOPQRSTUVWX'
- """
- rom_url = ('https://github.com/seharris/qemu-avr-tests'
- '/raw/36c3e67b8755dcf/free-rtos/Demo'
- '/AVR_ATMega2560_GCC/demo.elf')
- rom_hash = '7eb521f511ca8f2622e0a3c5e8dd686efbb911d4'
- rom_path = self.fetch_asset(rom_url, asset_hash=rom_hash)
-
- self.vm.add_args('-bios', rom_path)
- self.vm.add_args('-nographic')
- self.vm.launch()
-
- time.sleep(2)
- self.vm.shutdown()
-
- self.assertIn('ABCDEFGHIJKLMNOPQRSTUVWXABCDEFGHIJKLMNOPQRSTUVWX',
- self.vm.get_log())
diff --git a/tests/avocado/machine_loongarch.py b/tests/avocado/machine_loongarch.py
deleted file mode 100644
index 8de308f..0000000
--- a/tests/avocado/machine_loongarch.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-or-later
-#
-# LoongArch virt test.
-#
-# Copyright (c) 2023 Loongson Technology Corporation Limited
-#
-
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import exec_command_and_wait_for_pattern
-from avocado_qemu import wait_for_console_pattern
-
-class LoongArchMachine(QemuSystemTest):
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
-
- timeout = 120
-
- def wait_for_console_pattern(self, success_message, vm=None):
- wait_for_console_pattern(self, success_message,
- failure_message='Kernel panic - not syncing',
- vm=vm)
-
- def test_loongarch64_devices(self):
-
- """
- :avocado: tags=arch:loongarch64
- :avocado: tags=machine:virt
- """
-
- kernel_url = ('https://github.com/yangxiaojuan-loongson/qemu-binary/'
- 'releases/download/2024-05-30/vmlinuz.efi')
- kernel_hash = '951b485b16e3788b6db03a3e1793c067009e31a2'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- initrd_url = ('https://github.com/yangxiaojuan-loongson/qemu-binary/'
- 'releases/download/2024-05-30/ramdisk')
- initrd_hash = 'c67658d9b2a447ce7db2f73ba3d373c9b2b90ab2'
- initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
-
- bios_url = ('https://github.com/yangxiaojuan-loongson/qemu-binary/'
- 'releases/download/2024-05-30/QEMU_EFI.fd')
- bios_hash = ('f4d0966b5117d4cd82327c050dd668741046be69')
- bios_path = self.fetch_asset(bios_url, asset_hash=bios_hash)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'root=/dev/ram rdinit=/sbin/init console=ttyS0,115200')
- self.vm.add_args('-nographic',
- '-smp', '4',
- '-m', '1024',
- '-cpu', 'la464',
- '-kernel', kernel_path,
- '-initrd', initrd_path,
- '-bios', bios_path,
- '-append', kernel_command_line)
- self.vm.launch()
- self.wait_for_console_pattern('Run /sbin/init as init process')
- exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
- 'processor : 3')
diff --git a/tests/avocado/machine_m68k_nextcube.py b/tests/avocado/machine_m68k_nextcube.py
deleted file mode 100644
index 1f3c883..0000000
--- a/tests/avocado/machine_m68k_nextcube.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Functional test that boots a VM and run OCR on the framebuffer
-#
-# Copyright (c) 2019 Philippe Mathieu-DaudƩ <f4bug@amsat.org>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-import time
-
-from avocado_qemu import QemuSystemTest
-from avocado import skipUnless
-
-from tesseract_utils import tesseract_available, tesseract_ocr
-
-PIL_AVAILABLE = True
-try:
- from PIL import Image
-except ImportError:
- PIL_AVAILABLE = False
-
-
-class NextCubeMachine(QemuSystemTest):
- """
- :avocado: tags=arch:m68k
- :avocado: tags=machine:next-cube
- :avocado: tags=device:framebuffer
- """
-
- timeout = 15
-
- def check_bootrom_framebuffer(self, screenshot_path):
- rom_url = ('https://sourceforge.net/p/previous/code/1350/tree/'
- 'trunk/src/Rev_2.5_v66.BIN?format=raw')
- rom_hash = 'b3534796abae238a0111299fc406a9349f7fee24'
- rom_path = self.fetch_asset(rom_url, asset_hash=rom_hash)
-
- self.vm.add_args('-bios', rom_path)
- self.vm.launch()
-
- self.log.info('VM launched, waiting for display')
- # TODO: Use avocado.utils.wait.wait_for to catch the
- # 'displaysurface_create 1120x832' trace-event.
- time.sleep(2)
-
- self.vm.cmd('human-monitor-command',
- command_line='screendump %s' % screenshot_path)
-
- @skipUnless(PIL_AVAILABLE, 'Python PIL not installed')
- def test_bootrom_framebuffer_size(self):
- screenshot_path = os.path.join(self.workdir, "dump.ppm")
- self.check_bootrom_framebuffer(screenshot_path)
-
- width, height = Image.open(screenshot_path).size
- self.assertEqual(width, 1120)
- self.assertEqual(height, 832)
-
- # Tesseract 4 adds a new OCR engine based on LSTM neural networks. The
- # new version is faster and more accurate than version 3. The drawback is
- # that it is still alpha-level software.
- @skipUnless(tesseract_available(4), 'tesseract OCR tool not available')
- def test_bootrom_framebuffer_ocr_with_tesseract(self):
- screenshot_path = os.path.join(self.workdir, "dump.ppm")
- self.check_bootrom_framebuffer(screenshot_path)
- lines = tesseract_ocr(screenshot_path, tesseract_version=4)
- text = '\n'.join(lines)
- self.assertIn('Testing the FPU', text)
- self.assertIn('System test failed. Error code', text)
- self.assertIn('Boot command', text)
- self.assertIn('Next>', text)
diff --git a/tests/avocado/machine_microblaze.py b/tests/avocado/machine_microblaze.py
deleted file mode 100644
index 807709c..0000000
--- a/tests/avocado/machine_microblaze.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Functional test that boots a microblaze Linux kernel and checks the console
-#
-# Copyright (c) 2018, 2021 Red Hat, Inc.
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import time
-from avocado_qemu import exec_command, exec_command_and_wait_for_pattern
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-from avocado.utils import archive
-
-class MicroblazeMachine(QemuSystemTest):
-
- timeout = 90
-
- def test_microblaze_s3adsp1800(self):
- """
- :avocado: tags=arch:microblaze
- :avocado: tags=machine:petalogix-s3adsp1800
- """
-
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day17.tar.xz')
- tar_hash = '08bf3e3bfb6b6c7ce1e54ab65d54e189f2caf13f'
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- archive.extract(file_path, self.workdir)
- self.vm.set_console()
- self.vm.add_args('-kernel', self.workdir + '/day17/ballerina.bin')
- self.vm.launch()
- wait_for_console_pattern(self, 'This architecture does not have '
- 'kernel memory protection')
- # Note:
- # The kernel sometimes gets stuck after the "This architecture ..."
- # message, that's why we don't test for a later string here. This
- # needs some investigation by a microblaze wizard one day...
-
- def test_microblazeel_s3adsp1800(self):
- """
- :avocado: tags=arch:microblazeel
- :avocado: tags=machine:petalogix-s3adsp1800
- """
-
- self.require_netdev('user')
- tar_url = ('http://www.qemu-advent-calendar.org/2023/download/'
- 'day13.tar.gz')
- tar_hash = '6623d5fff5f84cfa8f34e286f32eff6a26546f44'
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- archive.extract(file_path, self.workdir)
- self.vm.set_console()
- self.vm.add_args('-kernel', self.workdir + '/day13/xmaton.bin')
- self.vm.add_args('-nic', 'user,tftp=' + self.workdir + '/day13/')
- self.vm.launch()
- wait_for_console_pattern(self, 'QEMU Advent Calendar 2023')
- time.sleep(0.1)
- exec_command(self, 'root')
- time.sleep(0.1)
- exec_command_and_wait_for_pattern(self,
- 'tftp -g -r xmaton.png 10.0.2.2 ; md5sum xmaton.png',
- '821cd3cab8efd16ad6ee5acc3642a8ea')
diff --git a/tests/avocado/machine_mips_fuloong2e.py b/tests/avocado/machine_mips_fuloong2e.py
deleted file mode 100644
index 89291f4..0000000
--- a/tests/avocado/machine_mips_fuloong2e.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Functional tests for the Lemote Fuloong-2E machine.
-#
-# Copyright (c) 2019 Philippe Mathieu-DaudƩ <f4bug@amsat.org>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or later.
-# See the COPYING file in the top-level directory.
-#
-# SPDX-License-Identifier: GPL-2.0-or-later
-
-import os
-
-from avocado import skipUnless
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-
-class MipsFuloong2e(QemuSystemTest):
-
- timeout = 60
-
- @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
- @skipUnless(os.getenv('RESCUE_YL_PATH'), 'RESCUE_YL_PATH not available')
- def test_linux_kernel_isa_serial(self):
- """
- :avocado: tags=arch:mips64el
- :avocado: tags=machine:fuloong2e
- :avocado: tags=endian:little
- :avocado: tags=device:bonito64
- :avocado: tags=device:via686b
- """
- # Recovery system for the Yeeloong laptop
- # (enough to test the fuloong2e southbridge, accessing its ISA bus)
- # http://dev.lemote.com/files/resource/download/rescue/rescue-yl
- kernel_hash = 'ec4d1bd89a8439c41033ca63db60160cc6d6f09a'
- kernel_path = self.fetch_asset('file://' + os.getenv('RESCUE_YL_PATH'),
- asset_hash=kernel_hash)
-
- self.vm.set_console()
- self.vm.add_args('-kernel', kernel_path)
- self.vm.launch()
- wait_for_console_pattern(self, 'Linux version 2.6.27.7lemote')
- cpu_revision = 'CPU revision is: 00006302 (ICT Loongson-2)'
- wait_for_console_pattern(self, cpu_revision)
diff --git a/tests/avocado/machine_mips_loongson3v.py b/tests/avocado/machine_mips_loongson3v.py
deleted file mode 100644
index 5194cf1..0000000
--- a/tests/avocado/machine_mips_loongson3v.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Functional tests for the Generic Loongson-3 Platform.
-#
-# Copyright (c) 2021 Jiaxun Yang <jiaxun.yang@flygoat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or later.
-# See the COPYING file in the top-level directory.
-#
-# SPDX-License-Identifier: GPL-2.0-or-later
-
-import os
-import time
-
-from avocado import skipUnless
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-
-class MipsLoongson3v(QemuSystemTest):
- timeout = 60
-
- @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
- def test_pmon_serial_console(self):
- """
- :avocado: tags=arch:mips64el
- :avocado: tags=endian:little
- :avocado: tags=machine:loongson3-virt
- :avocado: tags=cpu:Loongson-3A1000
- :avocado: tags=device:liointc
- :avocado: tags=device:goldfish_rtc
- """
-
- pmon_hash = '7c8b45dd81ccfc55ff28f5aa267a41c3'
- pmon_path = self.fetch_asset('https://github.com/loongson-community/pmon/'
- 'releases/download/20210112/pmon-3avirt.bin',
- asset_hash=pmon_hash, algorithm='md5')
-
- self.vm.set_console()
- self.vm.add_args('-bios', pmon_path)
- self.vm.launch()
- wait_for_console_pattern(self, 'CPU GODSON3 BogoMIPS:')
diff --git a/tests/avocado/machine_mips_malta.py b/tests/avocado/machine_mips_malta.py
deleted file mode 100644
index 8cf84bd..0000000
--- a/tests/avocado/machine_mips_malta.py
+++ /dev/null
@@ -1,164 +0,0 @@
-# Functional tests for the MIPS Malta board
-#
-# Copyright (c) Philippe Mathieu-DaudƩ <f4bug@amsat.org>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or later.
-# See the COPYING file in the top-level directory.
-#
-# SPDX-License-Identifier: GPL-2.0-or-later
-
-import os
-import gzip
-import logging
-
-from avocado import skipUnless
-from avocado import skipUnless
-from avocado.utils import archive
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import exec_command_and_wait_for_pattern
-from avocado_qemu import interrupt_interactive_console_until_pattern
-from avocado_qemu import wait_for_console_pattern
-
-
-NUMPY_AVAILABLE = True
-try:
- import numpy as np
-except ImportError:
- NUMPY_AVAILABLE = False
-
-CV2_AVAILABLE = True
-try:
- import cv2
-except ImportError:
- CV2_AVAILABLE = False
-
-
-@skipUnless(NUMPY_AVAILABLE, 'Python NumPy not installed')
-@skipUnless(CV2_AVAILABLE, 'Python OpenCV not installed')
-class MaltaMachineFramebuffer(QemuSystemTest):
-
- timeout = 30
-
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
-
- def do_test_i6400_framebuffer_logo(self, cpu_cores_count):
- """
- Boot Linux kernel and check Tux logo is displayed on the framebuffer.
- """
- screendump_path = os.path.join(self.workdir, 'screendump.pbm')
-
- kernel_url = ('https://github.com/philmd/qemu-testing-blob/raw/'
- 'a5966ca4b5/mips/malta/mips64el/'
- 'vmlinux-4.7.0-rc1.I6400.gz')
- kernel_hash = '096f50c377ec5072e6a366943324622c312045f6'
- kernel_path_gz = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
- kernel_path = self.workdir + "vmlinux"
- archive.gzip_uncompress(kernel_path_gz, kernel_path)
-
- tuxlogo_url = ('https://github.com/torvalds/linux/raw/v2.6.12/'
- 'drivers/video/logo/logo_linux_vga16.ppm')
- tuxlogo_hash = '3991c2ddbd1ddaecda7601f8aafbcf5b02dc86af'
- tuxlogo_path = self.fetch_asset(tuxlogo_url, asset_hash=tuxlogo_hash)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'clocksource=GIC console=tty0 console=ttyS0')
- self.vm.add_args('-kernel', kernel_path,
- '-smp', '%u' % cpu_cores_count,
- '-vga', 'std',
- '-append', kernel_command_line)
- self.vm.launch()
- framebuffer_ready = 'Console: switching to colour frame buffer device'
- wait_for_console_pattern(self, framebuffer_ready,
- failure_message='Kernel panic - not syncing')
- self.vm.cmd('human-monitor-command', command_line='stop')
- self.vm.cmd('human-monitor-command',
- command_line='screendump %s' % screendump_path)
- logger = logging.getLogger('framebuffer')
-
- match_threshold = 0.95
- screendump_bgr = cv2.imread(screendump_path, cv2.IMREAD_COLOR)
- tuxlogo_bgr = cv2.imread(tuxlogo_path, cv2.IMREAD_COLOR)
- result = cv2.matchTemplate(screendump_bgr, tuxlogo_bgr,
- cv2.TM_CCOEFF_NORMED)
- loc = np.where(result >= match_threshold)
- tuxlogo_count = 0
- h, w = tuxlogo_bgr.shape[:2]
- debug_png = os.getenv('AVOCADO_CV2_SCREENDUMP_PNG_PATH')
- for tuxlogo_count, pt in enumerate(zip(*loc[::-1]), start=1):
- logger.debug('found Tux at position (x, y) = %s', pt)
- cv2.rectangle(screendump_bgr, pt,
- (pt[0] + w, pt[1] + h), (0, 0, 255), 2)
- if debug_png:
- cv2.imwrite(debug_png, screendump_bgr)
- self.assertGreaterEqual(tuxlogo_count, cpu_cores_count)
-
- def test_mips_malta_i6400_framebuffer_logo_1core(self):
- """
- :avocado: tags=arch:mips64el
- :avocado: tags=machine:malta
- :avocado: tags=cpu:I6400
- """
- self.do_test_i6400_framebuffer_logo(1)
-
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
-
- def test_mips_malta_i6400_framebuffer_logo_7cores(self):
- """
- :avocado: tags=arch:mips64el
- :avocado: tags=machine:malta
- :avocado: tags=cpu:I6400
- :avocado: tags=mips:smp
- :avocado: tags=flaky
- """
- self.do_test_i6400_framebuffer_logo(7)
-
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
-
- def test_mips_malta_i6400_framebuffer_logo_8cores(self):
- """
- :avocado: tags=arch:mips64el
- :avocado: tags=machine:malta
- :avocado: tags=cpu:I6400
- :avocado: tags=mips:smp
- :avocado: tags=flaky
- """
- self.do_test_i6400_framebuffer_logo(8)
-
-class MaltaMachine(QemuSystemTest):
-
- def do_test_yamon(self):
- rom_url = ('https://s3-eu-west-1.amazonaws.com/'
- 'downloads-mips/mips-downloads/'
- 'YAMON/yamon-bin-02.22.zip')
- rom_hash = '8da7ecddbc5312704b8b324341ee238189bde480'
- zip_path = self.fetch_asset(rom_url, asset_hash=rom_hash)
-
- archive.extract(zip_path, self.workdir)
- yamon_path = os.path.join(self.workdir, 'yamon-02.22.bin')
-
- self.vm.set_console()
- self.vm.add_args('-bios', yamon_path)
- self.vm.launch()
-
- prompt = 'YAMON>'
- pattern = 'YAMON ROM Monitor'
- interrupt_interactive_console_until_pattern(self, pattern, prompt)
- wait_for_console_pattern(self, prompt)
- self.vm.shutdown()
-
- def test_mipsel_malta_yamon(self):
- """
- :avocado: tags=arch:mipsel
- :avocado: tags=machine:malta
- :avocado: tags=endian:little
- """
- self.do_test_yamon()
-
- def test_mips64el_malta_yamon(self):
- """
- :avocado: tags=arch:mips64el
- :avocado: tags=machine:malta
- :avocado: tags=endian:little
- """
- self.do_test_yamon()
diff --git a/tests/avocado/machine_rx_gdbsim.py b/tests/avocado/machine_rx_gdbsim.py
deleted file mode 100644
index 412a7a5..0000000
--- a/tests/avocado/machine_rx_gdbsim.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Functional test that boots a Linux kernel and checks the console
-#
-# Copyright (c) 2018 Red Hat, Inc.
-#
-# Author:
-# Cleber Rosa <crosa@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-
-from avocado import skipUnless
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import exec_command_and_wait_for_pattern
-from avocado_qemu import wait_for_console_pattern
-from avocado.utils import archive
-
-
-class RxGdbSimMachine(QemuSystemTest):
-
- timeout = 30
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
-
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
-
- def test_uboot(self):
- """
- U-Boot and checks that the console is operational.
-
- :avocado: tags=arch:rx
- :avocado: tags=machine:gdbsim-r5f562n8
- :avocado: tags=endian:little
- :avocado: tags=flaky
- """
- uboot_url = ('https://acc.dl.osdn.jp/users/23/23888/u-boot.bin.gz')
- uboot_hash = '9b78dbd43b40b2526848c0b1ce9de02c24f4dcdb'
- uboot_path = self.fetch_asset(uboot_url, asset_hash=uboot_hash)
- uboot_path = archive.uncompress(uboot_path, self.workdir)
-
- self.vm.set_console()
- self.vm.add_args('-bios', uboot_path,
- '-no-reboot')
- self.vm.launch()
- uboot_version = 'U-Boot 2016.05-rc3-23705-ga1ef3c71cb-dirty'
- wait_for_console_pattern(self, uboot_version)
- gcc_version = 'rx-unknown-linux-gcc (GCC) 9.0.0 20181105 (experimental)'
- # FIXME limit baudrate on chardev, else we type too fast
- #exec_command_and_wait_for_pattern(self, 'version', gcc_version)
-
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
-
- def test_linux_sash(self):
- """
- Boots a Linux kernel and checks that the console is operational.
-
- :avocado: tags=arch:rx
- :avocado: tags=machine:gdbsim-r5f562n7
- :avocado: tags=endian:little
- :avocado: tags=flaky
- """
- dtb_url = ('https://acc.dl.osdn.jp/users/23/23887/rx-virt.dtb')
- dtb_hash = '7b4e4e2c71905da44e86ce47adee2210b026ac18'
- dtb_path = self.fetch_asset(dtb_url, asset_hash=dtb_hash)
- kernel_url = ('http://acc.dl.osdn.jp/users/23/23845/zImage')
- kernel_hash = '39a81067f8d72faad90866ddfefa19165d68fc99'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- self.vm.set_console()
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'earlycon'
- self.vm.add_args('-kernel', kernel_path,
- '-dtb', dtb_path,
- '-no-reboot')
- self.vm.launch()
- wait_for_console_pattern(self, 'Sash command shell (version 1.1.1)',
- failure_message='Kernel panic - not syncing')
- exec_command_and_wait_for_pattern(self, 'printenv', 'TERM=linux')
diff --git a/tests/avocado/machine_s390_ccw_virtio.py b/tests/avocado/machine_s390_ccw_virtio.py
deleted file mode 100644
index 26e938c..0000000
--- a/tests/avocado/machine_s390_ccw_virtio.py
+++ /dev/null
@@ -1,277 +0,0 @@
-# Functional test that boots an s390x Linux guest with ccw and PCI devices
-# attached and checks whether the devices are recognized by Linux
-#
-# Copyright (c) 2020 Red Hat, Inc.
-#
-# Author:
-# Cornelia Huck <cohuck@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-import tempfile
-
-from avocado import skipUnless
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import exec_command_and_wait_for_pattern
-from avocado_qemu import wait_for_console_pattern
-from avocado.utils import archive
-
-class S390CCWVirtioMachine(QemuSystemTest):
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
-
- timeout = 120
-
- def wait_for_console_pattern(self, success_message, vm=None):
- wait_for_console_pattern(self, success_message,
- failure_message='Kernel panic - not syncing',
- vm=vm)
-
- def wait_for_crw_reports(self):
- exec_command_and_wait_for_pattern(self,
- 'while ! (dmesg -c | grep CRW) ; do sleep 1 ; done',
- 'CRW reports')
-
- dmesg_clear_count = 1
- def clear_guest_dmesg(self):
- exec_command_and_wait_for_pattern(self, 'dmesg -c > /dev/null; '
- r'echo dm_clear\ ' + str(self.dmesg_clear_count),
- r'dm_clear ' + str(self.dmesg_clear_count))
- self.dmesg_clear_count += 1
-
- def test_s390x_devices(self):
-
- """
- :avocado: tags=arch:s390x
- :avocado: tags=machine:s390-ccw-virtio
- """
-
- kernel_url = ('https://snapshot.debian.org/archive/debian/'
- '20201126T092837Z/dists/buster/main/installer-s390x/'
- '20190702+deb10u6/images/generic/kernel.debian')
- kernel_hash = '5821fbee57d6220a067a8b967d24595621aa1eb6'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- initrd_url = ('https://snapshot.debian.org/archive/debian/'
- '20201126T092837Z/dists/buster/main/installer-s390x/'
- '20190702+deb10u6/images/generic/initrd.debian')
- initrd_hash = '81ba09c97bef46e8f4660ac25b4ac0a5be3a94d6'
- initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=sclp0 root=/dev/ram0 BOOT_DEBUG=3')
- self.vm.add_args('-nographic',
- '-kernel', kernel_path,
- '-initrd', initrd_path,
- '-append', kernel_command_line,
- '-cpu', 'max,prno-trng=off',
- '-device', 'virtio-net-ccw,devno=fe.1.1111',
- '-device',
- 'virtio-rng-ccw,devno=fe.2.0000,max_revision=0,id=rn1',
- '-device',
- 'virtio-rng-ccw,devno=fe.3.1234,max_revision=2,id=rn2',
- '-device', 'zpci,uid=5,target=zzz',
- '-device', 'virtio-net-pci,id=zzz',
- '-device', 'zpci,uid=0xa,fid=12,target=serial',
- '-device', 'virtio-serial-pci,id=serial',
- '-device', 'virtio-balloon-ccw')
- self.vm.launch()
-
- shell_ready = "sh: can't access tty; job control turned off"
- self.wait_for_console_pattern(shell_ready)
- # first debug shell is too early, we need to wait for device detection
- exec_command_and_wait_for_pattern(self, 'exit', shell_ready)
-
- ccw_bus_ids="0.1.1111 0.2.0000 0.3.1234"
- pci_bus_ids="0005:00:00.0 000a:00:00.0"
- exec_command_and_wait_for_pattern(self, 'ls /sys/bus/ccw/devices/',
- ccw_bus_ids)
- exec_command_and_wait_for_pattern(self, 'ls /sys/bus/pci/devices/',
- pci_bus_ids)
- # check that the device at 0.2.0000 is in legacy mode, while the
- # device at 0.3.1234 has the virtio-1 feature bit set
- virtio_rng_features="00000000000000000000000000001100" + \
- "10000000000000000000000000000000"
- virtio_rng_features_legacy="00000000000000000000000000001100" + \
- "00000000000000000000000000000000"
- exec_command_and_wait_for_pattern(self,
- 'cat /sys/bus/ccw/devices/0.2.0000/virtio?/features',
- virtio_rng_features_legacy)
- exec_command_and_wait_for_pattern(self,
- 'cat /sys/bus/ccw/devices/0.3.1234/virtio?/features',
- virtio_rng_features)
- # check that /dev/hwrng works - and that it's gone after ejecting
- exec_command_and_wait_for_pattern(self,
- 'dd if=/dev/hwrng of=/dev/null bs=1k count=10',
- '10+0 records out')
- self.clear_guest_dmesg()
- self.vm.cmd('device_del', id='rn1')
- self.wait_for_crw_reports()
- self.clear_guest_dmesg()
- self.vm.cmd('device_del', id='rn2')
- self.wait_for_crw_reports()
- exec_command_and_wait_for_pattern(self,
- 'dd if=/dev/hwrng of=/dev/null bs=1k count=10',
- 'dd: /dev/hwrng: No such device')
- # verify that we indeed have virtio-net devices (without having the
- # virtio-net driver handy)
- exec_command_and_wait_for_pattern(self,
- 'cat /sys/bus/ccw/devices/0.1.1111/cutype',
- '3832/01')
- exec_command_and_wait_for_pattern(self,
- r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_vendor',
- r'0x1af4')
- exec_command_and_wait_for_pattern(self,
- r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_device',
- r'0x0001')
- # check fid propagation
- exec_command_and_wait_for_pattern(self,
- r'cat /sys/bus/pci/devices/000a\:00\:00.0/function_id',
- r'0x0000000c')
- # add another device
- self.clear_guest_dmesg()
- self.vm.cmd('device_add', driver='virtio-net-ccw',
- devno='fe.0.4711', id='net_4711')
- self.wait_for_crw_reports()
- exec_command_and_wait_for_pattern(self, 'for i in 1 2 3 4 5 6 7 ; do '
- 'if [ -e /sys/bus/ccw/devices/*4711 ]; then break; fi ;'
- 'sleep 1 ; done ; ls /sys/bus/ccw/devices/',
- '0.0.4711')
- # and detach it again
- self.clear_guest_dmesg()
- self.vm.cmd('device_del', id='net_4711')
- self.vm.event_wait(name='DEVICE_DELETED',
- match={'data': {'device': 'net_4711'}})
- self.wait_for_crw_reports()
- exec_command_and_wait_for_pattern(self,
- 'ls /sys/bus/ccw/devices/0.0.4711',
- 'No such file or directory')
- # test the virtio-balloon device
- exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo',
- 'MemTotal: 115640 kB')
- self.vm.cmd('human-monitor-command', command_line='balloon 96')
- exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo',
- 'MemTotal: 82872 kB')
- self.vm.cmd('human-monitor-command', command_line='balloon 128')
- exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo',
- 'MemTotal: 115640 kB')
-
-
- def test_s390x_fedora(self):
-
- """
- :avocado: tags=arch:s390x
- :avocado: tags=machine:s390-ccw-virtio
- :avocado: tags=device:virtio-gpu
- :avocado: tags=device:virtio-crypto
- :avocado: tags=device:virtio-net
- :avocado: tags=flaky
- """
-
- kernel_url = ('https://archives.fedoraproject.org/pub/archive'
- '/fedora-secondary/releases/31/Server/s390x/os'
- '/images/kernel.img')
- kernel_hash = 'b93d1efcafcf29c1673a4ce371a1f8b43941cfeb'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- initrd_url = ('https://archives.fedoraproject.org/pub/archive'
- '/fedora-secondary/releases/31/Server/s390x/os'
- '/images/initrd.img')
- initrd_hash = '3de45d411df5624b8d8ef21cd0b44419ab59b12f'
- initrd_path_xz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
- initrd_path = os.path.join(self.workdir, 'initrd-raw.img')
- archive.lzma_uncompress(initrd_path_xz, initrd_path)
-
- self.vm.set_console()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + ' audit=0 '
- 'rd.plymouth=0 plymouth.enable=0 rd.rescue')
- self.vm.add_args('-nographic',
- '-smp', '4',
- '-m', '512',
- '-name', 'Some Guest Name',
- '-uuid', '30de4fd9-b4d5-409e-86a5-09b387f70bfa',
- '-kernel', kernel_path,
- '-initrd', initrd_path,
- '-append', kernel_command_line,
- '-device', 'zpci,uid=7,target=n',
- '-device', 'virtio-net-pci,id=n,mac=02:ca:fe:fa:ce:12',
- '-device', 'virtio-rng-ccw,devno=fe.1.9876',
- '-device', 'virtio-gpu-ccw,devno=fe.2.5432')
- self.vm.launch()
- self.wait_for_console_pattern('Entering emergency mode')
-
- # Some tests to see whether the CLI options have been considered:
- self.log.info("Test whether QEMU CLI options have been considered")
- exec_command_and_wait_for_pattern(self,
- 'while ! (dmesg | grep enP7p0s0) ; do sleep 1 ; done',
- 'virtio_net virtio0 enP7p0s0: renamed')
- exec_command_and_wait_for_pattern(self, 'lspci',
- '0007:00:00.0 Class 0200: Device 1af4:1000')
- exec_command_and_wait_for_pattern(self,
- 'cat /sys/class/net/enP7p0s0/address',
- '02:ca:fe:fa:ce:12')
- exec_command_and_wait_for_pattern(self, 'lscss', '0.1.9876')
- exec_command_and_wait_for_pattern(self, 'lscss', '0.2.5432')
- exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
- 'processors : 4')
- exec_command_and_wait_for_pattern(self, 'grep MemTotal /proc/meminfo',
- 'MemTotal: 499848 kB')
- exec_command_and_wait_for_pattern(self, 'grep Name /proc/sysinfo',
- 'Extended Name: Some Guest Name')
- exec_command_and_wait_for_pattern(self, 'grep UUID /proc/sysinfo',
- '30de4fd9-b4d5-409e-86a5-09b387f70bfa')
-
- # Disable blinking cursor, then write some stuff into the framebuffer.
- # QEMU's PPM screendumps contain uncompressed 24-bit values, while the
- # framebuffer uses 32-bit, so we pad our text with some spaces when
- # writing to the framebuffer. Since the PPM is uncompressed, we then
- # can simply read the written "magic bytes" back from the PPM file to
- # check whether the framebuffer is working as expected.
- # Unfortunately, this test is flaky, so we don't run it by default
- if os.getenv('QEMU_TEST_FLAKY_TESTS'):
- self.log.info("Test screendump of virtio-gpu device")
- exec_command_and_wait_for_pattern(self,
- 'while ! (dmesg | grep gpudrmfb) ; do sleep 1 ; done',
- 'virtio_gpudrmfb frame buffer device')
- exec_command_and_wait_for_pattern(self,
- r'echo -e "\e[?25l" > /dev/tty0', ':/#')
- exec_command_and_wait_for_pattern(self, 'for ((i=0;i<250;i++)); do '
- 'echo " The qu ick fo x j ump s o ver a laz y d og" >> fox.txt;'
- 'done',
- ':/#')
- exec_command_and_wait_for_pattern(self,
- 'dd if=fox.txt of=/dev/fb0 bs=1000 oflag=sync,nocache ; rm fox.txt',
- '12+0 records out')
- with tempfile.NamedTemporaryFile(suffix='.ppm',
- prefix='qemu-scrdump-') as ppmfile:
- self.vm.cmd('screendump', filename=ppmfile.name)
- ppmfile.seek(0)
- line = ppmfile.readline()
- self.assertEqual(line, b"P6\n")
- line = ppmfile.readline()
- self.assertEqual(line, b"1280 800\n")
- line = ppmfile.readline()
- self.assertEqual(line, b"255\n")
- line = ppmfile.readline(256)
- self.assertEqual(line, b"The quick fox jumps over a lazy dog\n")
- else:
- self.log.info("Skipped flaky screendump of virtio-gpu device test")
-
- # Hot-plug a virtio-crypto device and see whether it gets accepted
- self.log.info("Test hot-plug virtio-crypto device")
- self.clear_guest_dmesg()
- self.vm.cmd('object-add', qom_type='cryptodev-backend-builtin',
- id='cbe0')
- self.vm.cmd('device_add', driver='virtio-crypto-ccw', id='crypdev0',
- cryptodev='cbe0', devno='fe.0.2342')
- exec_command_and_wait_for_pattern(self,
- 'while ! (dmesg -c | grep Accelerator.device) ; do'
- ' sleep 1 ; done', 'Accelerator device is ready')
- exec_command_and_wait_for_pattern(self, 'lscss', '0.0.2342')
- self.vm.cmd('device_del', id='crypdev0')
- self.vm.cmd('object-del', id='cbe0')
- exec_command_and_wait_for_pattern(self,
- 'while ! (dmesg -c | grep Start.virtcrypto_remove) ; do'
- ' sleep 1 ; done', 'Start virtcrypto_remove.')
diff --git a/tests/avocado/machine_sparc64_sun4u.py b/tests/avocado/machine_sparc64_sun4u.py
deleted file mode 100644
index d333c0a..0000000
--- a/tests/avocado/machine_sparc64_sun4u.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Functional test that boots a Linux kernel and checks the console
-#
-# Copyright (c) 2020 Red Hat, Inc.
-#
-# Author:
-# Thomas Huth <thuth@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-
-from avocado_qemu import wait_for_console_pattern
-from avocado.utils import archive
-from boot_linux_console import LinuxKernelTest
-
-class Sun4uMachine(LinuxKernelTest):
- """Boots the Linux kernel and checks that the console is operational"""
-
- timeout = 90
-
- def test_sparc64_sun4u(self):
- """
- :avocado: tags=arch:sparc64
- :avocado: tags=machine:sun4u
- """
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day23.tar.xz')
- tar_hash = '142db83cd974ffadc4f75c8a5cad5bcc5722c240'
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- archive.extract(file_path, self.workdir)
- self.vm.set_console()
- self.vm.add_args('-kernel', self.workdir + '/day23/vmlinux',
- '-append', self.KERNEL_COMMON_COMMAND_LINE)
- self.vm.launch()
- wait_for_console_pattern(self, 'Starting logging: OK')
diff --git a/tests/avocado/machine_sparc_leon3.py b/tests/avocado/machine_sparc_leon3.py
deleted file mode 100644
index e61b223..0000000
--- a/tests/avocado/machine_sparc_leon3.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Functional test that boots a Leon3 machine and checks its serial console.
-#
-# Copyright (c) Philippe Mathieu-DaudƩ <f4bug@amsat.org>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-from avocado import skip
-
-
-class Leon3Machine(QemuSystemTest):
-
- timeout = 60
-
- @skip("Test currently broken")
- # A Window Underflow exception occurs before booting the kernel,
- # and QEMU exit calling cpu_abort(), which makes this test to fail.
- def test_leon3_helenos_uimage(self):
- """
- :avocado: tags=arch:sparc
- :avocado: tags=machine:leon3_generic
- :avocado: tags=binfmt:uimage
- """
- kernel_url = ('http://www.helenos.org/releases/'
- 'HelenOS-0.6.0-sparc32-leon3.bin')
- kernel_hash = 'a88c9cfdb8430c66650e5290a08765f9bf049a30'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- self.vm.set_console()
- self.vm.add_args('-kernel', kernel_path)
-
- self.vm.launch()
-
- wait_for_console_pattern(self, 'Copyright (c) 2001-2014 HelenOS project')
- wait_for_console_pattern(self, 'Booting the kernel ...')
diff --git a/tests/avocado/mem-addr-space-check.py b/tests/avocado/mem-addr-space-check.py
deleted file mode 100644
index 85541ea..0000000
--- a/tests/avocado/mem-addr-space-check.py
+++ /dev/null
@@ -1,355 +0,0 @@
-# Check for crash when using memory beyond the available guest processor
-# address space.
-#
-# Copyright (c) 2023 Red Hat, Inc.
-#
-# Author:
-# Ani Sinha <anisinha@redhat.com>
-#
-# SPDX-License-Identifier: GPL-2.0-or-later
-
-from avocado_qemu import QemuSystemTest
-import signal
-import time
-
-class MemAddrCheck(QemuSystemTest):
- # after launch, in order to generate the logs from QEMU we need to
- # wait for some time. Launching and then immediately shutting down
- # the VM generates empty logs. A delay of 1 second is added for
- # this reason.
- DELAY_Q35_BOOT_SEQUENCE = 1
-
- # first, lets test some 32-bit processors.
- # for all 32-bit cases, pci64_hole_size is 0.
- def test_phybits_low_pse36(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- With pse36 feature ON, a processor has 36 bits of addressing. So it can
- access up to a maximum of 64GiB of memory. Memory hotplug region begins
- at 4 GiB boundary when "above_4g_mem_size" is 0 (this would be true when
- we have 0.5 GiB of VM memory, see pc_q35_init()). This means total
- hotpluggable memory size is 60 GiB. Per slot, we reserve 1 GiB of memory
- for dimm alignment for all machines. That leaves total hotpluggable
- actual memory size of 59 GiB. If the VM is started with 0.5 GiB of
- memory, maxmem should be set to a maximum value of 59.5 GiB to ensure
- that the processor can address all memory directly.
- Note that 64-bit pci hole size is 0 in this case. If maxmem is set to
- 59.6G, QEMU should fail to start with a message "phy-bits are too low".
- If maxmem is set to 59.5G with all other QEMU parameters identical, QEMU
- should start fine.
- """
- self.vm.add_args('-S', '-machine', 'q35', '-m',
- '512,slots=1,maxmem=59.6G',
- '-cpu', 'pentium,pse36=on', '-display', 'none',
- '-object', 'memory-backend-ram,id=mem1,size=1G',
- '-device', 'pc-dimm,id=vm0,memdev=mem1')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- self.vm.wait()
- self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
- self.assertRegex(self.vm.get_log(), r'phys-bits too low')
-
- def test_phybits_low_pae(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- With pae feature ON, a processor has 36 bits of addressing. So it can
- access up to a maximum of 64GiB of memory. Rest is the same as the case
- with pse36 above.
- """
- self.vm.add_args('-S', '-machine', 'q35', '-m',
- '512,slots=1,maxmem=59.6G',
- '-cpu', 'pentium,pae=on', '-display', 'none',
- '-object', 'memory-backend-ram,id=mem1,size=1G',
- '-device', 'pc-dimm,id=vm0,memdev=mem1')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- self.vm.wait()
- self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
- self.assertRegex(self.vm.get_log(), r'phys-bits too low')
-
- def test_phybits_ok_pentium_pse36(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- Setting maxmem to 59.5G and making sure that QEMU can start with the
- same options as the failing case above with pse36 cpu feature.
- """
- self.vm.add_args('-machine', 'q35', '-m',
- '512,slots=1,maxmem=59.5G',
- '-cpu', 'pentium,pse36=on', '-display', 'none',
- '-object', 'memory-backend-ram,id=mem1,size=1G',
- '-device', 'pc-dimm,id=vm0,memdev=mem1')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
- self.vm.shutdown()
- self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
-
- def test_phybits_ok_pentium_pae(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- Test is same as above but now with pae cpu feature turned on.
- Setting maxmem to 59.5G and making sure that QEMU can start fine
- with the same options as the case above.
- """
- self.vm.add_args('-machine', 'q35', '-m',
- '512,slots=1,maxmem=59.5G',
- '-cpu', 'pentium,pae=on', '-display', 'none',
- '-object', 'memory-backend-ram,id=mem1,size=1G',
- '-device', 'pc-dimm,id=vm0,memdev=mem1')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
- self.vm.shutdown()
- self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
-
- def test_phybits_ok_pentium2(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- Pentium2 has 36 bits of addressing, so its same as pentium
- with pse36 ON.
- """
- self.vm.add_args('-machine', 'q35', '-m',
- '512,slots=1,maxmem=59.5G',
- '-cpu', 'pentium2', '-display', 'none',
- '-object', 'memory-backend-ram,id=mem1,size=1G',
- '-device', 'pc-dimm,id=vm0,memdev=mem1')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
- self.vm.shutdown()
- self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
-
- def test_phybits_low_nonpse36(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- Pentium processor has 32 bits of addressing without pse36 or pae
- so it can access physical address up to 4 GiB. Setting maxmem to
- 4 GiB should make QEMU fail to start with "phys-bits too low"
- message because the region for memory hotplug is always placed
- above 4 GiB due to the PCI hole and simplicity.
- """
- self.vm.add_args('-S', '-machine', 'q35', '-m',
- '512,slots=1,maxmem=4G',
- '-cpu', 'pentium', '-display', 'none',
- '-object', 'memory-backend-ram,id=mem1,size=1G',
- '-device', 'pc-dimm,id=vm0,memdev=mem1')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- self.vm.wait()
- self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
- self.assertRegex(self.vm.get_log(), r'phys-bits too low')
-
- # now lets test some 64-bit CPU cases.
- def test_phybits_low_tcg_q35_70_amd(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- For q35 7.1 machines and above, there is a HT window that starts at
- 1024 GiB and ends at 1 TiB - 1. If the max GPA falls in this range,
- "above_4G" memory is adjusted to start at 1 TiB boundary for AMD cpus
- in the default case. Lets test without that case for machines 7.0.
- For q35-7.0 machines, "above 4G" memory starts are 4G.
- pci64_hole size is 32 GiB. Since TCG_PHYS_ADDR_BITS is defined to
- be 40, TCG emulated CPUs have maximum of 1 TiB (1024 GiB) of
- directly addressable memory.
- Hence, maxmem value at most can be
- 1024 GiB - 4 GiB - 1 GiB per slot for alignment - 32 GiB + 0.5 GiB
- which is equal to 987.5 GiB. Setting the value to 988 GiB should
- make QEMU fail with the error message.
- """
- self.vm.add_args('-S', '-machine', 'pc-q35-7.0', '-m',
- '512,slots=1,maxmem=988G',
- '-display', 'none',
- '-object', 'memory-backend-ram,id=mem1,size=1G',
- '-device', 'pc-dimm,id=vm0,memdev=mem1')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- self.vm.wait()
- self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
- self.assertRegex(self.vm.get_log(), r'phys-bits too low')
-
- def test_phybits_low_tcg_q35_71_amd(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- AMD_HT_START is defined to be at 1012 GiB. So for q35 machines
- version > 7.0 and AMD cpus, instead of 1024 GiB limit for 40 bit
- processor address space, it has to be 1012 GiB , that is 12 GiB
- less than the case above in order to accommodate HT hole.
- Make sure QEMU fails when maxmem size is 976 GiB (12 GiB less
- than 988 GiB).
- """
- self.vm.add_args('-S', '-machine', 'pc-q35-7.1', '-m',
- '512,slots=1,maxmem=976G',
- '-display', 'none',
- '-object', 'memory-backend-ram,id=mem1,size=1G',
- '-device', 'pc-dimm,id=vm0,memdev=mem1')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- self.vm.wait()
- self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
- self.assertRegex(self.vm.get_log(), r'phys-bits too low')
-
- def test_phybits_ok_tcg_q35_70_amd(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- Same as q35-7.0 AMD case except that here we check that QEMU can
- successfully start when maxmem is < 988G.
- """
- self.vm.add_args('-S', '-machine', 'pc-q35-7.0', '-m',
- '512,slots=1,maxmem=987.5G',
- '-display', 'none',
- '-object', 'memory-backend-ram,id=mem1,size=1G',
- '-device', 'pc-dimm,id=vm0,memdev=mem1')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
- self.vm.shutdown()
- self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
-
- def test_phybits_ok_tcg_q35_71_amd(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- Same as q35-7.1 AMD case except that here we check that QEMU can
- successfully start when maxmem is < 976G.
- """
- self.vm.add_args('-S', '-machine', 'pc-q35-7.1', '-m',
- '512,slots=1,maxmem=975.5G',
- '-display', 'none',
- '-object', 'memory-backend-ram,id=mem1,size=1G',
- '-device', 'pc-dimm,id=vm0,memdev=mem1')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
- self.vm.shutdown()
- self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
-
- def test_phybits_ok_tcg_q35_71_intel(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- Same parameters as test_phybits_low_tcg_q35_71_amd() but use
- Intel cpu instead. QEMU should start fine in this case as
- "above_4G" memory starts at 4G.
- """
- self.vm.add_args('-S', '-cpu', 'Skylake-Server',
- '-machine', 'pc-q35-7.1', '-m',
- '512,slots=1,maxmem=976G',
- '-display', 'none',
- '-object', 'memory-backend-ram,id=mem1,size=1G',
- '-device', 'pc-dimm,id=vm0,memdev=mem1')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
- self.vm.shutdown()
- self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
-
- def test_phybits_low_tcg_q35_71_amd_41bits(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- AMD processor with 41 bits. Max cpu hw address = 2 TiB.
- By setting maxram above 1012 GiB - 32 GiB - 4 GiB = 976 GiB, we can
- force "above_4G" memory to start at 1 TiB for q35-7.1 machines
- (max GPA will be above AMD_HT_START which is defined as 1012 GiB).
-
- With pci_64_hole size at 32 GiB, in this case, maxmem should be 991.5
- GiB with 1 GiB per slot for alignment and 0.5 GiB as non-hotplug
- memory for the VM (1024 - 32 - 1 + 0.5). With 992 GiB, QEMU should
- fail to start.
- """
- self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41',
- '-machine', 'pc-q35-7.1', '-m',
- '512,slots=1,maxmem=992G',
- '-display', 'none',
- '-object', 'memory-backend-ram,id=mem1,size=1G',
- '-device', 'pc-dimm,id=vm0,memdev=mem1')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- self.vm.wait()
- self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
- self.assertRegex(self.vm.get_log(), r'phys-bits too low')
-
- def test_phybits_ok_tcg_q35_71_amd_41bits(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- AMD processor with 41 bits. Max cpu hw address = 2 TiB.
- Same as above but by setting maxram between 976 GiB and 992 Gib,
- QEMU should start fine.
- """
- self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41',
- '-machine', 'pc-q35-7.1', '-m',
- '512,slots=1,maxmem=990G',
- '-display', 'none',
- '-object', 'memory-backend-ram,id=mem1,size=1G',
- '-device', 'pc-dimm,id=vm0,memdev=mem1')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
- self.vm.shutdown()
- self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
-
- def test_phybits_low_tcg_q35_intel_cxl(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- cxl memory window starts after memory device range. Here, we use 1 GiB
- of cxl window memory. 4G_mem end aligns at 4G. pci64_hole is 32 GiB and
- starts after the cxl memory window.
- So maxmem here should be at most 986 GiB considering all memory boundary
- alignment constraints with 40 bits (1 TiB) of processor physical bits.
- """
- self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40',
- '-machine', 'q35,cxl=on', '-m',
- '512,slots=1,maxmem=987G',
- '-display', 'none',
- '-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1',
- '-M', 'cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=1G')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- self.vm.wait()
- self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
- self.assertRegex(self.vm.get_log(), r'phys-bits too low')
-
- def test_phybits_ok_tcg_q35_intel_cxl(self):
- """
- :avocado: tags=machine:q35
- :avocado: tags=arch:x86_64
-
- Same as above but here we do not reserve any cxl memory window. Hence,
- with the exact same parameters as above, QEMU should start fine even
- with cxl enabled.
- """
- self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40',
- '-machine', 'q35,cxl=on', '-m',
- '512,slots=1,maxmem=987G',
- '-display', 'none',
- '-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1')
- self.vm.set_qmp_monitor(enabled=False)
- self.vm.launch()
- time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
- self.vm.shutdown()
- self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
diff --git a/tests/avocado/migration.py b/tests/avocado/migration.py
deleted file mode 100644
index be6234b..0000000
--- a/tests/avocado/migration.py
+++ /dev/null
@@ -1,135 +0,0 @@
-# Migration test
-#
-# Copyright (c) 2019 Red Hat, Inc.
-#
-# Authors:
-# Cleber Rosa <crosa@redhat.com>
-# Caio Carrara <ccarrara@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-
-import tempfile
-import os
-
-from avocado_qemu import QemuSystemTest
-from avocado import skipUnless
-
-from avocado.utils.network import ports
-from avocado.utils import wait
-from avocado.utils.path import find_command
-
-
-class MigrationTest(QemuSystemTest):
- """
- :avocado: tags=migration
- """
-
- timeout = 10
-
- @staticmethod
- def migration_finished(vm):
- return vm.cmd('query-migrate')['status'] in ('completed', 'failed')
-
- def assert_migration(self, src_vm, dst_vm):
- wait.wait_for(self.migration_finished,
- timeout=self.timeout,
- step=0.1,
- args=(src_vm,))
- wait.wait_for(self.migration_finished,
- timeout=self.timeout,
- step=0.1,
- args=(dst_vm,))
- self.assertEqual(src_vm.cmd('query-migrate')['status'], 'completed')
- self.assertEqual(dst_vm.cmd('query-migrate')['status'], 'completed')
- self.assertEqual(dst_vm.cmd('query-status')['status'], 'running')
- self.assertEqual(src_vm.cmd('query-status')['status'],'postmigrate')
-
- def do_migrate(self, dest_uri, src_uri=None):
- dest_vm = self.get_vm('-incoming', dest_uri)
- dest_vm.add_args('-nodefaults')
- dest_vm.launch()
- if src_uri is None:
- src_uri = dest_uri
- source_vm = self.get_vm()
- source_vm.add_args('-nodefaults')
- source_vm.launch()
- source_vm.qmp('migrate', uri=src_uri)
- self.assert_migration(source_vm, dest_vm)
-
- def _get_free_port(self):
- port = ports.find_free_port()
- if port is None:
- self.cancel('Failed to find a free port')
- return port
-
- def migration_with_tcp_localhost(self):
- dest_uri = 'tcp:localhost:%u' % self._get_free_port()
- self.do_migrate(dest_uri)
-
- def migration_with_unix(self):
- with tempfile.TemporaryDirectory(prefix='socket_') as socket_path:
- dest_uri = 'unix:%s/qemu-test.sock' % socket_path
- self.do_migrate(dest_uri)
-
- @skipUnless(find_command('nc', default=False), "'nc' command not found")
- def migration_with_exec(self):
- """The test works for both netcat-traditional and netcat-openbsd packages."""
- free_port = self._get_free_port()
- dest_uri = 'exec:nc -l localhost %u' % free_port
- src_uri = 'exec:nc localhost %u' % free_port
- self.do_migrate(dest_uri, src_uri)
-
-
-@skipUnless('aarch64' in os.uname()[4], "host != target")
-class Aarch64(MigrationTest):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:virt
- :avocado: tags=cpu:max
- """
-
- def test_migration_with_tcp_localhost(self):
- self.migration_with_tcp_localhost()
-
- def test_migration_with_unix(self):
- self.migration_with_unix()
-
- def test_migration_with_exec(self):
- self.migration_with_exec()
-
-
-@skipUnless('x86_64' in os.uname()[4], "host != target")
-class X86_64(MigrationTest):
- """
- :avocado: tags=arch:x86_64
- :avocado: tags=machine:pc
- :avocado: tags=cpu:qemu64
- """
-
- def test_migration_with_tcp_localhost(self):
- self.migration_with_tcp_localhost()
-
- def test_migration_with_unix(self):
- self.migration_with_unix()
-
- def test_migration_with_exec(self):
- self.migration_with_exec()
-
-
-@skipUnless('ppc64le' in os.uname()[4], "host != target")
-class PPC64(MigrationTest):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:pseries
- """
-
- def test_migration_with_tcp_localhost(self):
- self.migration_with_tcp_localhost()
-
- def test_migration_with_unix(self):
- self.migration_with_unix()
-
- def test_migration_with_exec(self):
- self.migration_with_exec()
diff --git a/tests/avocado/multiprocess.py b/tests/avocado/multiprocess.py
deleted file mode 100644
index ee7490a..0000000
--- a/tests/avocado/multiprocess.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Test for multiprocess qemu
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-
-import os
-import socket
-
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-from avocado_qemu import exec_command
-from avocado_qemu import exec_command_and_wait_for_pattern
-
-class Multiprocess(QemuSystemTest):
- """
- :avocado: tags=multiprocess
- """
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
-
- def do_test(self, kernel_url, kernel_hash, initrd_url, initrd_hash,
- kernel_command_line, machine_type):
- """Main test method"""
- self.require_accelerator('kvm')
- self.require_multiprocess()
-
- # Create socketpair to connect proxy and remote processes
- proxy_sock, remote_sock = socket.socketpair(socket.AF_UNIX,
- socket.SOCK_STREAM)
- os.set_inheritable(proxy_sock.fileno(), True)
- os.set_inheritable(remote_sock.fileno(), True)
-
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
- initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
-
- # Create remote process
- remote_vm = self.get_vm()
- remote_vm.add_args('-machine', 'x-remote')
- remote_vm.add_args('-nodefaults')
- remote_vm.add_args('-device', 'lsi53c895a,id=lsi1')
- remote_vm.add_args('-object', 'x-remote-object,id=robj1,'
- 'devid=lsi1,fd='+str(remote_sock.fileno()))
- remote_vm.launch()
-
- # Create proxy process
- self.vm.set_console()
- self.vm.add_args('-machine', machine_type)
- self.vm.add_args('-accel', 'kvm')
- self.vm.add_args('-cpu', 'host')
- self.vm.add_args('-object',
- 'memory-backend-memfd,id=sysmem-file,size=2G')
- self.vm.add_args('--numa', 'node,memdev=sysmem-file')
- self.vm.add_args('-m', '2048')
- self.vm.add_args('-kernel', kernel_path,
- '-initrd', initrd_path,
- '-append', kernel_command_line)
- self.vm.add_args('-device',
- 'x-pci-proxy-dev,'
- 'id=lsi1,fd='+str(proxy_sock.fileno()))
- self.vm.launch()
- wait_for_console_pattern(self, 'as init process',
- 'Kernel panic - not syncing')
- exec_command(self, 'mount -t sysfs sysfs /sys')
- exec_command_and_wait_for_pattern(self,
- 'cat /sys/bus/pci/devices/*/uevent',
- 'PCI_ID=1000:0012')
-
- def test_multiprocess_x86_64(self):
- """
- :avocado: tags=arch:x86_64
- """
- kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora'
- '/linux/releases/31/Everything/x86_64/os/images'
- '/pxeboot/vmlinuz')
- kernel_hash = '5b6f6876e1b5bda314f93893271da0d5777b1f3c'
- initrd_url = ('https://archives.fedoraproject.org/pub/archive/fedora'
- '/linux/releases/31/Everything/x86_64/os/images'
- '/pxeboot/initrd.img')
- initrd_hash = 'dd0340a1b39bd28f88532babd4581c67649ec5b1'
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0 rdinit=/bin/bash')
- machine_type = 'pc'
- self.do_test(kernel_url, kernel_hash, initrd_url, initrd_hash,
- kernel_command_line, machine_type)
-
- def test_multiprocess_aarch64(self):
- """
- :avocado: tags=arch:aarch64
- """
- kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora'
- '/linux/releases/31/Everything/aarch64/os/images'
- '/pxeboot/vmlinuz')
- kernel_hash = '3505f2751e2833c681de78cee8dda1e49cabd2e8'
- initrd_url = ('https://archives.fedoraproject.org/pub/archive/fedora'
- '/linux/releases/31/Everything/aarch64/os/images'
- '/pxeboot/initrd.img')
- initrd_hash = '519a1962daf17d67fc3a9c89d45affcb399607db'
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'rdinit=/bin/bash console=ttyAMA0')
- machine_type = 'virt,gic-version=3'
- self.do_test(kernel_url, kernel_hash, initrd_url, initrd_hash,
- kernel_command_line, machine_type)
diff --git a/tests/avocado/netdev-ethtool.py b/tests/avocado/netdev-ethtool.py
deleted file mode 100644
index 5f33288..0000000
--- a/tests/avocado/netdev-ethtool.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# ethtool tests for emulated network devices
-#
-# This test leverages ethtool's --test sequence to validate network
-# device behaviour.
-#
-# SPDX-License-Identifier: GPL-2.0-or-late
-
-from avocado import skip
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-
-class NetDevEthtool(QemuSystemTest):
- """
- :avocado: tags=arch:x86_64
- :avocado: tags=machine:q35
- """
-
- # Runs in about 17s under KVM, 19s under TCG, 25s under GCOV
- timeout = 45
-
- # Fetch assets from the netdev-ethtool subdir of my shared test
- # images directory on fileserver.linaro.org.
- def get_asset(self, name, sha1):
- base_url = ('https://fileserver.linaro.org/s/'
- 'kE4nCFLdQcoBF9t/download?'
- 'path=%2Fnetdev-ethtool&files=' )
- url = base_url + name
- # use explicit name rather than failing to neatly parse the
- # URL into a unique one
- return self.fetch_asset(name=name, locations=(url), asset_hash=sha1)
-
- def common_test_code(self, netdev, extra_args=None):
-
- # This custom kernel has drivers for all the supported network
- # devices we can emulate in QEMU
- kernel = self.get_asset("bzImage",
- "33469d7802732d5815226166581442395cb289e2")
-
- rootfs = self.get_asset("rootfs.squashfs",
- "9793cea7021414ae844bda51f558bd6565b50cdc")
-
- append = 'printk.time=0 console=ttyS0 '
- append += 'root=/dev/sr0 rootfstype=squashfs '
-
- # any additional kernel tweaks for the test
- if extra_args:
- append += extra_args
-
- # finally invoke ethtool directly
- append += ' init=/usr/sbin/ethtool -- -t eth1 offline'
-
- # add the rootfs via a readonly cdrom image
- drive = f"file={rootfs},if=ide,index=0,media=cdrom"
-
- self.vm.add_args('-kernel', kernel,
- '-append', append,
- '-drive', drive,
- '-device', netdev)
-
- self.vm.set_console(console_index=0)
- self.vm.launch()
-
- wait_for_console_pattern(self,
- "The test result is PASS",
- "The test result is FAIL",
- vm=None)
- # no need to gracefully shutdown, just finish
- self.vm.kill()
-
- def test_igb(self):
- """
- :avocado: tags=device:igb
- """
- self.common_test_code("igb")
-
- def test_igb_nomsi(self):
- """
- :avocado: tags=device:igb
- """
- self.common_test_code("igb", "pci=nomsi")
-
- # It seems the other popular cards we model in QEMU currently fail
- # the pattern test with:
- #
- # pattern test failed (reg 0x00178): got 0x00000000 expected 0x00005A5A
- #
- # So for now we skip them.
-
- @skip("Incomplete reg 0x00178 support")
- def test_e1000(self):
- """
- :avocado: tags=device:e1000
- """
- self.common_test_code("e1000")
-
- @skip("Incomplete reg 0x00178 support")
- def test_i82550(self):
- """
- :avocado: tags=device:i82550
- """
- self.common_test_code("i82550")
diff --git a/tests/avocado/pc_cpu_hotplug_props.py b/tests/avocado/pc_cpu_hotplug_props.py
deleted file mode 100644
index 4bd3e02..0000000
--- a/tests/avocado/pc_cpu_hotplug_props.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-# Ensure CPU die-id can be omitted on -device
-#
-# Copyright (c) 2019 Red Hat Inc
-#
-# Author:
-# Eduardo Habkost <ehabkost@redhat.com>
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, see <http://www.gnu.org/licenses/>.
-#
-
-from avocado_qemu import QemuSystemTest
-
-class OmittedCPUProps(QemuSystemTest):
- """
- :avocado: tags=arch:x86_64
- :avocado: tags=cpu:qemu64
- """
- def test_no_die_id(self):
- self.vm.add_args('-nodefaults', '-S')
- self.vm.add_args('-smp', '1,sockets=2,cores=2,threads=2,maxcpus=8')
- self.vm.add_args('-device', 'qemu64-x86_64-cpu,socket-id=1,core-id=0,thread-id=0')
- self.vm.launch()
- self.assertEqual(len(self.vm.cmd('query-cpus-fast')), 2)
diff --git a/tests/avocado/ppc_405.py b/tests/avocado/ppc_405.py
deleted file mode 100644
index 4e7e01a..0000000
--- a/tests/avocado/ppc_405.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Test that the U-Boot firmware boots on ppc 405 machines and check the console
-#
-# Copyright (c) 2021 Red Hat, Inc.
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado.utils import archive
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-from avocado_qemu import exec_command_and_wait_for_pattern
-
-class Ppc405Machine(QemuSystemTest):
-
- timeout = 90
-
- def do_test_ppc405(self):
- uboot_url = ('https://gitlab.com/huth/u-boot/-/raw/'
- 'taihu-2021-10-09/u-boot-taihu.bin')
- uboot_hash = ('3208940e908a5edc7c03eab072c60f0dcfadc2ab');
- file_path = self.fetch_asset(uboot_url, asset_hash=uboot_hash)
- self.vm.set_console(console_index=1)
- self.vm.add_args('-bios', file_path)
- self.vm.launch()
- wait_for_console_pattern(self, 'AMCC PPC405EP Evaluation Board')
- exec_command_and_wait_for_pattern(self, 'reset', 'AMCC PowerPC 405EP')
-
- def test_ppc_ref405ep(self):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=machine:ref405ep
- :avocado: tags=cpu:405ep
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- self.do_test_ppc405()
diff --git a/tests/avocado/ppc_74xx.py b/tests/avocado/ppc_74xx.py
deleted file mode 100644
index f54757c..0000000
--- a/tests/avocado/ppc_74xx.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# Smoke tests for 74xx cpus (aka G4).
-#
-# Copyright (c) 2021, IBM Corp.
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-
-class ppc74xxCpu(QemuSystemTest):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=accel:tcg
- """
- timeout = 5
-
- def test_ppc_7400(self):
- """
- :avocado: tags=cpu:7400
- """
- self.require_accelerator("tcg")
- self.vm.set_console()
- self.vm.launch()
- wait_for_console_pattern(self, '>> OpenBIOS')
- wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
-
- def test_ppc_7410(self):
- """
- :avocado: tags=cpu:7410
- """
- self.require_accelerator("tcg")
- self.vm.set_console()
- self.vm.launch()
- wait_for_console_pattern(self, '>> OpenBIOS')
- wait_for_console_pattern(self, '>> CPU type PowerPC,74xx')
-
- def test_ppc_7441(self):
- """
- :avocado: tags=cpu:7441
- """
- self.require_accelerator("tcg")
- self.vm.set_console()
- self.vm.launch()
- wait_for_console_pattern(self, '>> OpenBIOS')
- wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
-
- def test_ppc_7445(self):
- """
- :avocado: tags=cpu:7445
- """
- self.require_accelerator("tcg")
- self.vm.set_console()
- self.vm.launch()
- wait_for_console_pattern(self, '>> OpenBIOS')
- wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
-
- def test_ppc_7447(self):
- """
- :avocado: tags=cpu:7447
- """
- self.require_accelerator("tcg")
- self.vm.set_console()
- self.vm.launch()
- wait_for_console_pattern(self, '>> OpenBIOS')
- wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
-
- def test_ppc_7447a(self):
- """
- :avocado: tags=cpu:7447a
- """
- self.require_accelerator("tcg")
- self.vm.set_console()
- self.vm.launch()
- wait_for_console_pattern(self, '>> OpenBIOS')
- wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
-
- def test_ppc_7448(self):
- """
- :avocado: tags=cpu:7448
- """
- self.require_accelerator("tcg")
- self.vm.set_console()
- self.vm.launch()
- wait_for_console_pattern(self, '>> OpenBIOS')
- wait_for_console_pattern(self, '>> CPU type PowerPC,MPC86xx')
-
- def test_ppc_7450(self):
- """
- :avocado: tags=cpu:7450
- """
- self.require_accelerator("tcg")
- self.vm.set_console()
- self.vm.launch()
- wait_for_console_pattern(self, '>> OpenBIOS')
- wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
-
- def test_ppc_7451(self):
- """
- :avocado: tags=cpu:7451
- """
- self.require_accelerator("tcg")
- self.vm.set_console()
- self.vm.launch()
- wait_for_console_pattern(self, '>> OpenBIOS')
- wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
-
- def test_ppc_7455(self):
- """
- :avocado: tags=cpu:7455
- """
- self.require_accelerator("tcg")
- self.vm.set_console()
- self.vm.launch()
- wait_for_console_pattern(self, '>> OpenBIOS')
- wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
-
- def test_ppc_7457(self):
- """
- :avocado: tags=cpu:7457
- """
- self.require_accelerator("tcg")
- self.vm.set_console()
- self.vm.launch()
- wait_for_console_pattern(self, '>> OpenBIOS')
- wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
-
- def test_ppc_7457a(self):
- """
- :avocado: tags=cpu:7457a
- """
- self.require_accelerator("tcg")
- self.vm.set_console()
- self.vm.launch()
- wait_for_console_pattern(self, '>> OpenBIOS')
- wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
diff --git a/tests/avocado/ppc_amiga.py b/tests/avocado/ppc_amiga.py
deleted file mode 100644
index b6f866f..0000000
--- a/tests/avocado/ppc_amiga.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Test AmigaNG boards
-#
-# Copyright (c) 2023 BALATON Zoltan
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado.utils import archive
-from avocado.utils import process
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-
-class AmigaOneMachine(QemuSystemTest):
-
- timeout = 90
-
- def test_ppc_amigaone(self):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=machine:amigaone
- :avocado: tags=device:articia
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- tar_name = 'A1Firmware_Floppy_05-Mar-2005.zip'
- tar_url = ('https://www.hyperion-entertainment.com/index.php/'
- 'downloads?view=download&format=raw&file=25')
- tar_hash = 'c52e59bc73e31d8bcc3cc2106778f7ac84f6c755'
- zip_file = self.fetch_asset(tar_name, locations=tar_url,
- asset_hash=tar_hash)
- archive.extract(zip_file, self.workdir)
- cmd = f"tail -c 524288 {self.workdir}/floppy_edition/updater.image >{self.workdir}/u-boot-amigaone.bin"
- process.run(cmd, shell=True)
-
- self.vm.set_console()
- self.vm.add_args('-bios', self.workdir + '/u-boot-amigaone.bin')
- self.vm.launch()
- wait_for_console_pattern(self, 'FLASH:')
diff --git a/tests/avocado/ppc_bamboo.py b/tests/avocado/ppc_bamboo.py
deleted file mode 100644
index a81be3d..0000000
--- a/tests/avocado/ppc_bamboo.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Test that Linux kernel boots on the ppc bamboo board and check the console
-#
-# Copyright (c) 2021 Red Hat
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado.utils import archive
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-from avocado_qemu import exec_command_and_wait_for_pattern
-
-class BambooMachine(QemuSystemTest):
-
- timeout = 90
-
- def test_ppc_bamboo(self):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=machine:bamboo
- :avocado: tags=cpu:440epb
- :avocado: tags=device:rtl8139
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- self.require_netdev('user')
- tar_url = ('http://landley.net/aboriginal/downloads/binaries/'
- 'system-image-powerpc-440fp.tar.gz')
- tar_hash = '53e5f16414b195b82d2c70272f81c2eedb39bad9'
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- archive.extract(file_path, self.workdir)
- self.vm.set_console()
- self.vm.add_args('-kernel', self.workdir +
- '/system-image-powerpc-440fp/linux',
- '-initrd', self.workdir +
- '/system-image-powerpc-440fp/rootfs.cpio.gz',
- '-nic', 'user,model=rtl8139,restrict=on')
- self.vm.launch()
- wait_for_console_pattern(self, 'Type exit when done')
- exec_command_and_wait_for_pattern(self, 'ping 10.0.2.2',
- '10.0.2.2 is alive!')
- exec_command_and_wait_for_pattern(self, 'halt', 'System Halted')
diff --git a/tests/avocado/ppc_hv_tests.py b/tests/avocado/ppc_hv_tests.py
deleted file mode 100644
index bf8822b..0000000
--- a/tests/avocado/ppc_hv_tests.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# Tests that specifically try to exercise hypervisor features of the
-# target machines. powernv supports the Power hypervisor ISA, and
-# pseries supports the nested-HV hypervisor spec.
-#
-# Copyright (c) 2023 IBM Corporation
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado import skipIf, skipUnless
-from avocado.utils import archive
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern, exec_command
-import os
-import time
-import subprocess
-from datetime import datetime
-
-deps = ["xorriso"] # dependent tools needed in the test setup/box.
-
-def which(tool):
- """ looks up the full path for @tool, returns None if not found
- or if @tool does not have executable permissions.
- """
- paths=os.getenv('PATH')
- for p in paths.split(os.path.pathsep):
- p = os.path.join(p, tool)
- if os.path.exists(p) and os.access(p, os.X_OK):
- return p
- return None
-
-def missing_deps():
- """ returns True if any of the test dependent tools are absent.
- """
- for dep in deps:
- if which(dep) is None:
- return True
- return False
-
-# Alpine is a light weight distro that supports QEMU. These tests boot
-# that on the machine then run a QEMU guest inside it in KVM mode,
-# that runs the same Alpine distro image.
-# QEMU packages are downloaded and installed on each test. That's not a
-# large download, but it may be more polite to create qcow2 image with
-# QEMU already installed and use that.
-# XXX: The order of these tests seems to matter, see git blame.
-@skipIf(missing_deps(), 'dependencies (%s) not installed' % ','.join(deps))
-@skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test sometimes gets stuck due to console handling problem')
-@skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited')
-@skipUnless(os.getenv('SPEED') == 'slow', 'runtime limited')
-class HypervisorTest(QemuSystemTest):
-
- timeout = 1000
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 '
- panic_message = 'Kernel panic - not syncing'
- good_message = 'VFS: Cannot open root device'
-
- def extract_from_iso(self, iso, path):
- """
- Extracts a file from an iso file into the test workdir
-
- :param iso: path to the iso file
- :param path: path within the iso file of the file to be extracted
- :returns: path of the extracted file
- """
- filename = os.path.basename(path)
-
- cwd = os.getcwd()
- os.chdir(self.workdir)
-
- with open(filename, "w") as outfile:
- cmd = "xorriso -osirrox on -indev %s -cpx %s %s" % (iso, path, filename)
- subprocess.run(cmd.split(),
- stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
-
- os.chdir(cwd)
-
- # Return complete path to extracted file. Because callers to
- # extract_from_iso() specify 'path' with a leading slash, it is
- # necessary to use os.path.relpath() as otherwise os.path.join()
- # interprets it as an absolute path and drops the self.workdir part.
- return os.path.normpath(os.path.join(self.workdir, filename))
-
- def setUp(self):
- super().setUp()
-
- iso_url = ('https://dl-cdn.alpinelinux.org/alpine/v3.18/releases/ppc64le/alpine-standard-3.18.4-ppc64le.iso')
-
- # Alpine use sha256 so I recalculated this myself
- iso_sha256 = 'c26b8d3e17c2f3f0fed02b4b1296589c2390e6d5548610099af75300edd7b3ff'
- iso_path = self.fetch_asset(iso_url, asset_hash=iso_sha256,
- algorithm = "sha256")
-
- self.iso_path = iso_path
- self.vmlinuz = self.extract_from_iso(iso_path, '/boot/vmlinuz-lts')
- self.initramfs = self.extract_from_iso(iso_path, '/boot/initramfs-lts')
-
- def do_start_alpine(self):
- self.vm.set_console()
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE
- self.vm.add_args("-kernel", self.vmlinuz)
- self.vm.add_args("-initrd", self.initramfs)
- self.vm.add_args("-smp", "4", "-m", "2g")
- self.vm.add_args("-drive", f"file={self.iso_path},format=raw,if=none,id=drive0")
-
- self.vm.launch()
- wait_for_console_pattern(self, 'Welcome to Alpine Linux 3.18')
- exec_command(self, 'root')
- wait_for_console_pattern(self, 'localhost login:')
- wait_for_console_pattern(self, 'You may change this message by editing /etc/motd.')
- # If the time is wrong, SSL certificates can fail.
- exec_command(self, 'date -s "' + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S' + '"'))
- exec_command(self, 'setup-alpine -qe')
- wait_for_console_pattern(self, 'Updating repository indexes... done.')
-
- def do_stop_alpine(self):
- exec_command(self, 'poweroff')
- wait_for_console_pattern(self, 'alpine:~#')
- self.vm.wait()
-
- def do_setup_kvm(self):
- exec_command(self, 'echo http://dl-cdn.alpinelinux.org/alpine/v3.18/main > /etc/apk/repositories')
- wait_for_console_pattern(self, 'alpine:~#')
- exec_command(self, 'echo http://dl-cdn.alpinelinux.org/alpine/v3.18/community >> /etc/apk/repositories')
- wait_for_console_pattern(self, 'alpine:~#')
- exec_command(self, 'apk update')
- wait_for_console_pattern(self, 'alpine:~#')
- exec_command(self, 'apk add qemu-system-ppc64')
- wait_for_console_pattern(self, 'alpine:~#')
- exec_command(self, 'modprobe kvm-hv')
- wait_for_console_pattern(self, 'alpine:~#')
-
- # This uses the host's block device as the source file for guest block
- # device for install media. This is a bit hacky but allows reuse of the
- # iso without having a passthrough filesystem configured.
- def do_test_kvm(self, hpt=False):
- if hpt:
- append = 'disable_radix'
- else:
- append = ''
- exec_command(self, 'qemu-system-ppc64 -nographic -smp 2 -m 1g '
- '-machine pseries,x-vof=on,accel=kvm '
- '-machine cap-cfpc=broken,cap-sbbc=broken,'
- 'cap-ibs=broken,cap-ccf-assist=off '
- '-drive file=/dev/nvme0n1,format=raw,readonly=on '
- '-initrd /media/nvme0n1/boot/initramfs-lts '
- '-kernel /media/nvme0n1/boot/vmlinuz-lts '
- '-append \'usbcore.nousb ' + append + '\'')
- # Alpine 3.18 kernel seems to crash in XHCI USB driver.
- wait_for_console_pattern(self, 'Welcome to Alpine Linux 3.18')
- exec_command(self, 'root')
- wait_for_console_pattern(self, 'localhost login:')
- wait_for_console_pattern(self, 'You may change this message by editing /etc/motd.')
- exec_command(self, 'poweroff >& /dev/null')
- wait_for_console_pattern(self, 'localhost:~#')
- wait_for_console_pattern(self, 'reboot: Power down')
- time.sleep(1)
- exec_command(self, '')
- wait_for_console_pattern(self, 'alpine:~#')
-
- def test_hv_pseries(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:pseries
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- self.vm.add_args("-accel", "tcg,thread=multi")
- self.vm.add_args('-device', 'nvme,serial=1234,drive=drive0')
- self.vm.add_args("-machine", "x-vof=on,cap-nested-hv=on")
- self.do_start_alpine()
- self.do_setup_kvm()
- self.do_test_kvm()
- self.do_stop_alpine()
-
- def test_hv_pseries_kvm(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:pseries
- :avocado: tags=accel:kvm
- """
- self.require_accelerator("kvm")
- self.vm.add_args("-accel", "kvm")
- self.vm.add_args('-device', 'nvme,serial=1234,drive=drive0')
- self.vm.add_args("-machine", "x-vof=on,cap-nested-hv=on,cap-ccf-assist=off")
- self.do_start_alpine()
- self.do_setup_kvm()
- self.do_test_kvm()
- self.do_stop_alpine()
-
- def test_hv_powernv(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:powernv
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- self.vm.add_args("-accel", "tcg,thread=multi")
- self.vm.add_args('-device', 'nvme,bus=pcie.2,addr=0x0,serial=1234,drive=drive0',
- '-device', 'e1000e,netdev=net0,mac=C0:FF:EE:00:00:02,bus=pcie.0,addr=0x0',
- '-netdev', 'user,id=net0,hostfwd=::20022-:22,hostname=alpine')
- self.do_start_alpine()
- self.do_setup_kvm()
- self.do_test_kvm()
- self.do_test_kvm(True)
- self.do_stop_alpine()
diff --git a/tests/avocado/ppc_mpc8544ds.py b/tests/avocado/ppc_mpc8544ds.py
deleted file mode 100644
index b599fb1..0000000
--- a/tests/avocado/ppc_mpc8544ds.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Test that Linux kernel boots on ppc machines and check the console
-#
-# Copyright (c) 2018, 2020 Red Hat, Inc.
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado.utils import archive
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-
-class Mpc8544dsMachine(QemuSystemTest):
-
- timeout = 90
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
- panic_message = 'Kernel panic - not syncing'
-
- def test_ppc_mpc8544ds(self):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=machine:mpc8544ds
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day04.tar.xz')
- tar_hash = 'f46724d281a9f30fa892d458be7beb7d34dc25f9'
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- archive.extract(file_path, self.workdir)
- self.vm.set_console()
- self.vm.add_args('-kernel', self.workdir + '/creek/creek.bin')
- self.vm.launch()
- wait_for_console_pattern(self, 'QEMU advent calendar 2020',
- self.panic_message)
diff --git a/tests/avocado/ppc_powernv.py b/tests/avocado/ppc_powernv.py
deleted file mode 100644
index 4342941..0000000
--- a/tests/avocado/ppc_powernv.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Test that Linux kernel boots on ppc powernv machines and check the console
-#
-# Copyright (c) 2018, 2020 Red Hat, Inc.
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado.utils import archive
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-
-class powernvMachine(QemuSystemTest):
-
- timeout = 90
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 '
- panic_message = 'Kernel panic - not syncing'
- good_message = 'VFS: Cannot open root device'
-
- def do_test_linux_boot(self, command_line = KERNEL_COMMON_COMMAND_LINE):
- self.require_accelerator("tcg")
- kernel_url = ('https://archives.fedoraproject.org/pub/archive'
- '/fedora-secondary/releases/29/Everything/ppc64le/os'
- '/ppc/ppc64/vmlinuz')
- kernel_hash = '3fe04abfc852b66653b8c3c897a59a689270bc77'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- self.vm.set_console()
- self.vm.add_args('-kernel', kernel_path,
- '-append', command_line)
- self.vm.launch()
-
- def test_linux_boot(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:powernv
- :avocado: tags=accel:tcg
- """
-
- self.do_test_linux_boot()
- console_pattern = 'VFS: Cannot open root device'
- wait_for_console_pattern(self, console_pattern, self.panic_message)
-
- def test_linux_smp_boot(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:powernv
- :avocado: tags=accel:tcg
- """
-
- self.vm.add_args('-smp', '4')
- self.do_test_linux_boot()
- console_pattern = 'smp: Brought up 1 node, 4 CPUs'
- wait_for_console_pattern(self, console_pattern, self.panic_message)
- wait_for_console_pattern(self, self.good_message, self.panic_message)
-
- def test_linux_smp_hpt_boot(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:powernv
- :avocado: tags=accel:tcg
- """
-
- self.vm.add_args('-smp', '4')
- self.do_test_linux_boot(self.KERNEL_COMMON_COMMAND_LINE +
- 'disable_radix')
- console_pattern = 'smp: Brought up 1 node, 4 CPUs'
- wait_for_console_pattern(self, 'hash-mmu: Initializing hash mmu',
- self.panic_message)
- wait_for_console_pattern(self, console_pattern, self.panic_message)
- wait_for_console_pattern(self, self.good_message, self.panic_message)
-
- def test_linux_smt_boot(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:powernv
- :avocado: tags=accel:tcg
- """
-
- self.vm.add_args('-smp', '4,threads=4')
- self.do_test_linux_boot()
- console_pattern = 'CPU maps initialized for 4 threads per core'
- wait_for_console_pattern(self, console_pattern, self.panic_message)
- console_pattern = 'smp: Brought up 1 node, 4 CPUs'
- wait_for_console_pattern(self, console_pattern, self.panic_message)
- wait_for_console_pattern(self, self.good_message, self.panic_message)
-
- def test_linux_big_boot(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:powernv
- :avocado: tags=accel:tcg
- """
-
- self.vm.add_args('-smp', '16,threads=4,cores=2,sockets=2')
-
- # powernv does not support NUMA
- self.do_test_linux_boot()
- console_pattern = 'CPU maps initialized for 4 threads per core'
- wait_for_console_pattern(self, console_pattern, self.panic_message)
- console_pattern = 'smp: Brought up 2 nodes, 16 CPUs'
- wait_for_console_pattern(self, console_pattern, self.panic_message)
- wait_for_console_pattern(self, self.good_message, self.panic_message)
diff --git a/tests/avocado/ppc_prep_40p.py b/tests/avocado/ppc_prep_40p.py
deleted file mode 100644
index d4f1eb7..0000000
--- a/tests/avocado/ppc_prep_40p.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Functional test that boots a PReP/40p machine and checks its serial console.
-#
-# Copyright (c) Philippe Mathieu-DaudƩ <f4bug@amsat.org>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-
-from avocado import skipUnless
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-
-
-class IbmPrep40pMachine(QemuSystemTest):
-
- timeout = 60
-
- # 12H0455 PPS Firmware Licensed Materials
- # Property of IBM (C) Copyright IBM Corp. 1994.
- # All rights reserved.
- # U.S. Government Users Restricted Rights - Use, duplication or disclosure
- # restricted by GSA ADP Schedule Contract with IBM Corp.
- @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
- def test_factory_firmware_and_netbsd(self):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=machine:40p
- :avocado: tags=os:netbsd
- :avocado: tags=slowness:high
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- bios_url = ('http://ftpmirror.your.org/pub/misc/'
- 'ftp.software.ibm.com/rs6000/firmware/'
- '7020-40p/P12H0456.IMG')
- bios_hash = '1775face4e6dc27f3a6ed955ef6eb331bf817f03'
- bios_path = self.fetch_asset(bios_url, asset_hash=bios_hash)
- drive_url = ('https://archive.netbsd.org/pub/NetBSD-archive/'
- 'NetBSD-4.0/prep/installation/floppy/generic_com0.fs')
- drive_hash = 'dbcfc09912e71bd5f0d82c7c1ee43082fb596ceb'
- drive_path = self.fetch_asset(drive_url, asset_hash=drive_hash)
-
- self.vm.set_console()
- self.vm.add_args('-bios', bios_path,
- '-fda', drive_path)
- self.vm.launch()
- os_banner = 'NetBSD 4.0 (GENERIC) #0: Sun Dec 16 00:49:40 PST 2007'
- wait_for_console_pattern(self, os_banner)
- wait_for_console_pattern(self, 'Model: IBM PPS Model 6015')
-
- def test_openbios_192m(self):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=machine:40p
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- self.vm.set_console()
- self.vm.add_args('-m', '192') # test fw_cfg
-
- self.vm.launch()
- wait_for_console_pattern(self, '>> OpenBIOS')
- wait_for_console_pattern(self, '>> Memory: 192M')
- wait_for_console_pattern(self, '>> CPU type PowerPC,604')
-
- def test_openbios_and_netbsd(self):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=machine:40p
- :avocado: tags=os:netbsd
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- drive_url = ('https://archive.netbsd.org/pub/NetBSD-archive/'
- 'NetBSD-7.1.2/iso/NetBSD-7.1.2-prep.iso')
- drive_hash = 'ac6fa2707d888b36d6fa64de6e7fe48e'
- drive_path = self.fetch_asset(drive_url, asset_hash=drive_hash,
- algorithm='md5')
- self.vm.set_console()
- self.vm.add_args('-cdrom', drive_path,
- '-boot', 'd')
-
- self.vm.launch()
- wait_for_console_pattern(self, 'NetBSD/prep BOOT, Revision 1.9')
diff --git a/tests/avocado/ppc_pseries.py b/tests/avocado/ppc_pseries.py
deleted file mode 100644
index 74aaa4a..0000000
--- a/tests/avocado/ppc_pseries.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Test that Linux kernel boots on ppc machines and check the console
-#
-# Copyright (c) 2018, 2020 Red Hat, Inc.
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado.utils import archive
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-
-class pseriesMachine(QemuSystemTest):
-
- timeout = 90
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 '
- panic_message = 'Kernel panic - not syncing'
- good_message = 'VFS: Cannot open root device'
-
- def do_test_ppc64_linux_boot(self, kernel_command_line = KERNEL_COMMON_COMMAND_LINE):
- kernel_url = ('https://archives.fedoraproject.org/pub/archive'
- '/fedora-secondary/releases/29/Everything/ppc64le/os'
- '/ppc/ppc64/vmlinuz')
- kernel_hash = '3fe04abfc852b66653b8c3c897a59a689270bc77'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- self.vm.set_console()
- self.vm.add_args('-kernel', kernel_path,
- '-append', kernel_command_line)
- self.vm.launch()
-
- def test_ppc64_vof_linux_boot(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:pseries
- """
-
- self.vm.add_args('-machine', 'x-vof=on')
- self.do_test_ppc64_linux_boot()
- console_pattern = 'VFS: Cannot open root device'
- wait_for_console_pattern(self, console_pattern, self.panic_message)
-
- def test_ppc64_linux_boot(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:pseries
- """
-
- self.do_test_ppc64_linux_boot()
- console_pattern = 'VFS: Cannot open root device'
- wait_for_console_pattern(self, console_pattern, self.panic_message)
-
- def test_ppc64_linux_smp_boot(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:pseries
- """
-
- self.vm.add_args('-smp', '4')
- self.do_test_ppc64_linux_boot()
- console_pattern = 'smp: Brought up 1 node, 4 CPUs'
- wait_for_console_pattern(self, console_pattern, self.panic_message)
- wait_for_console_pattern(self, self.good_message, self.panic_message)
-
- def test_ppc64_linux_hpt_smp_boot(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:pseries
- """
-
- self.vm.add_args('-smp', '4')
- self.do_test_ppc64_linux_boot(self.KERNEL_COMMON_COMMAND_LINE +
- 'disable_radix')
- console_pattern = 'smp: Brought up 1 node, 4 CPUs'
- wait_for_console_pattern(self, 'hash-mmu: Initializing hash mmu',
- self.panic_message)
- wait_for_console_pattern(self, console_pattern, self.panic_message)
- wait_for_console_pattern(self, self.good_message, self.panic_message)
-
- def test_ppc64_linux_smt_boot(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:pseries
- """
-
- self.vm.add_args('-smp', '4,threads=4')
- self.do_test_ppc64_linux_boot()
- console_pattern = 'CPU maps initialized for 4 threads per core'
- wait_for_console_pattern(self, console_pattern, self.panic_message)
- console_pattern = 'smp: Brought up 1 node, 4 CPUs'
- wait_for_console_pattern(self, console_pattern, self.panic_message)
- wait_for_console_pattern(self, self.good_message, self.panic_message)
-
- def test_ppc64_linux_big_boot(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:pseries
- """
-
- self.vm.add_args('-smp', '16,threads=4,cores=2,sockets=2')
- self.vm.add_args('-m', '512M',
- '-object', 'memory-backend-ram,size=256M,id=m0',
- '-object', 'memory-backend-ram,size=256M,id=m1')
- self.vm.add_args('-numa', 'node,nodeid=0,memdev=m0')
- self.vm.add_args('-numa', 'node,nodeid=1,memdev=m1')
- self.do_test_ppc64_linux_boot()
- console_pattern = 'CPU maps initialized for 4 threads per core'
- wait_for_console_pattern(self, console_pattern, self.panic_message)
- console_pattern = 'smp: Brought up 2 nodes, 16 CPUs'
- wait_for_console_pattern(self, console_pattern, self.panic_message)
- wait_for_console_pattern(self, self.good_message, self.panic_message)
diff --git a/tests/avocado/ppc_virtex_ml507.py b/tests/avocado/ppc_virtex_ml507.py
deleted file mode 100644
index a73f8ae..0000000
--- a/tests/avocado/ppc_virtex_ml507.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Test that Linux kernel boots on ppc machines and check the console
-#
-# Copyright (c) 2018, 2020 Red Hat, Inc.
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado.utils import archive
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-
-class VirtexMl507Machine(QemuSystemTest):
-
- timeout = 90
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
- panic_message = 'Kernel panic - not syncing'
-
- def test_ppc_virtex_ml507(self):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=machine:virtex-ml507
- :avocado: tags=accel:tcg
- """
- self.require_accelerator("tcg")
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day08.tar.xz')
- tar_hash = '74c68f5af7a7b8f21c03097b298f3bb77ff52c1f'
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- archive.extract(file_path, self.workdir)
- self.vm.set_console()
- self.vm.add_args('-kernel', self.workdir + '/hippo/hippo.linux',
- '-dtb', self.workdir + '/hippo/virtex440-ml507.dtb',
- '-m', '512')
- self.vm.launch()
- wait_for_console_pattern(self, 'QEMU advent calendar 2020',
- self.panic_message)
diff --git a/tests/avocado/replay_kernel.py b/tests/avocado/replay_kernel.py
deleted file mode 100644
index 232d287..0000000
--- a/tests/avocado/replay_kernel.py
+++ /dev/null
@@ -1,550 +0,0 @@
-# Record/replay test that boots a Linux kernel
-#
-# Copyright (c) 2020 ISP RAS
-#
-# Author:
-# Pavel Dovgalyuk <Pavel.Dovgaluk@ispras.ru>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-import lzma
-import shutil
-import logging
-import time
-
-from avocado import skip
-from avocado import skipUnless
-from avocado import skipUnless
-from avocado_qemu import wait_for_console_pattern
-from avocado.utils import archive
-from avocado.utils import process
-from boot_linux_console import LinuxKernelTest
-
-class ReplayKernelBase(LinuxKernelTest):
- """
- Boots a Linux kernel in record mode and checks that the console
- is operational and the kernel command line is properly passed
- from QEMU to the kernel.
- Then replays the same scenario and verifies, that QEMU correctly
- terminates.
- """
-
- timeout = 120
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=1 panic=-1 '
-
- def run_vm(self, kernel_path, kernel_command_line, console_pattern,
- record, shift, args, replay_path):
- # icount requires TCG to be available
- self.require_accelerator('tcg')
-
- logger = logging.getLogger('replay')
- start_time = time.time()
- vm = self.get_vm()
- vm.set_console()
- if record:
- logger.info('recording the execution...')
- mode = 'record'
- else:
- logger.info('replaying the execution...')
- mode = 'replay'
- vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s' %
- (shift, mode, replay_path),
- '-kernel', kernel_path,
- '-append', kernel_command_line,
- '-net', 'none',
- '-no-reboot')
- if args:
- vm.add_args(*args)
- vm.launch()
- self.wait_for_console_pattern(console_pattern, vm)
- if record:
- vm.shutdown()
- logger.info('finished the recording with log size %s bytes'
- % os.path.getsize(replay_path))
- else:
- vm.wait()
- logger.info('successfully finished the replay')
- elapsed = time.time() - start_time
- logger.info('elapsed time %.2f sec' % elapsed)
- return elapsed
-
- def run_rr(self, kernel_path, kernel_command_line, console_pattern,
- shift=7, args=None):
- replay_path = os.path.join(self.workdir, 'replay.bin')
- t1 = self.run_vm(kernel_path, kernel_command_line, console_pattern,
- True, shift, args, replay_path)
- t2 = self.run_vm(kernel_path, kernel_command_line, console_pattern,
- False, shift, args, replay_path)
- logger = logging.getLogger('replay')
- logger.info('replay overhead {:.2%}'.format(t2 / t1 - 1))
-
-class ReplayKernelNormal(ReplayKernelBase):
-
- def test_i386_pc(self):
- """
- :avocado: tags=arch:i386
- :avocado: tags=machine:pc
- """
- kernel_url = ('https://storage.tuxboot.com/20230331/i386/bzImage')
- kernel_hash = 'a3e5b32a354729e65910f5a1ffcda7c14a6c12a55e8213fb86e277f1b76ed956'
- kernel_path = self.fetch_asset(kernel_url,
- asset_hash=kernel_hash,
- algorithm = "sha256")
-
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
- console_pattern = 'VFS: Cannot open root device'
-
- self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5)
-
- # See https://gitlab.com/qemu-project/qemu/-/issues/2094
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test sometimes gets stuck')
- def test_x86_64_pc(self):
- """
- :avocado: tags=arch:x86_64
- :avocado: tags=machine:pc
- :avocado: tags=flaky
- """
- kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora'
- '/linux/releases/29/Everything/x86_64/os/images/pxeboot'
- '/vmlinuz')
- kernel_hash = '23bebd2680757891cf7adedb033532163a792495'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
- console_pattern = 'VFS: Cannot open root device'
-
- self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5)
-
- def test_mips_malta(self):
- """
- :avocado: tags=arch:mips
- :avocado: tags=machine:malta
- :avocado: tags=endian:big
- """
- deb_url = ('http://snapshot.debian.org/archive/debian/'
- '20130217T032700Z/pool/main/l/linux-2.6/'
- 'linux-image-2.6.32-5-4kc-malta_2.6.32-48_mips.deb')
- deb_hash = 'a8cfc28ad8f45f54811fc6cf74fc43ffcfe0ba04'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinux-2.6.32-5-4kc-malta')
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
- console_pattern = 'Kernel command line: %s' % kernel_command_line
-
- self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5)
-
- def test_mips64el_malta(self):
- """
- This test requires the ar tool to extract "data.tar.gz" from
- the Debian package.
-
- The kernel can be rebuilt using this Debian kernel source [1] and
- following the instructions on [2].
-
- [1] http://snapshot.debian.org/package/linux-2.6/2.6.32-48/
- #linux-source-2.6.32_2.6.32-48
- [2] https://kernel-team.pages.debian.net/kernel-handbook/
- ch-common-tasks.html#s-common-official
-
- :avocado: tags=arch:mips64el
- :avocado: tags=machine:malta
- """
- deb_url = ('http://snapshot.debian.org/archive/debian/'
- '20130217T032700Z/pool/main/l/linux-2.6/'
- 'linux-image-2.6.32-5-5kc-malta_2.6.32-48_mipsel.deb')
- deb_hash = '1aaec92083bf22fda31e0d27fa8d9a388e5fc3d5'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinux-2.6.32-5-5kc-malta')
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5)
-
- def test_aarch64_virt(self):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:virt
- :avocado: tags=cpu:cortex-a53
- """
- kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora'
- '/linux/releases/29/Everything/aarch64/os/images/pxeboot'
- '/vmlinuz')
- kernel_hash = '8c73e469fc6ea06a58dc83a628fc695b693b8493'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyAMA0')
- console_pattern = 'VFS: Cannot open root device'
-
- self.run_rr(kernel_path, kernel_command_line, console_pattern)
-
- def test_arm_virt(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:virt
- """
- kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora'
- '/linux/releases/29/Everything/armhfp/os/images/pxeboot'
- '/vmlinuz')
- kernel_hash = 'e9826d741b4fb04cadba8d4824d1ed3b7fb8b4d4'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyAMA0')
- console_pattern = 'VFS: Cannot open root device'
-
- self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=1)
-
- def test_arm_cubieboard_initrd(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:cubieboard
- """
- deb_url = ('https://apt.armbian.com/pool/main/l/'
- 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb')
- deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinuz-6.6.16-current-sunxi')
- dtb_path = '/usr/lib/linux-image-6.6.16-current-sunxi/sun4i-a10-cubieboard.dtb'
- dtb_path = self.extract_from_deb(deb_path, dtb_path)
- initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
- '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
- 'arm/rootfs-armv5.cpio.gz')
- initrd_hash = '2b50f1873e113523967806f4da2afe385462ff9b'
- initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
- initrd_path = os.path.join(self.workdir, 'rootfs.cpio')
- archive.gzip_uncompress(initrd_path_gz, initrd_path)
-
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0,115200 '
- 'usbcore.nousb '
- 'panic=-1 noreboot')
- console_pattern = 'Boot successful.'
- self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=1,
- args=('-dtb', dtb_path,
- '-initrd', initrd_path,
- '-no-reboot'))
-
- def test_s390x_s390_ccw_virtio(self):
- """
- :avocado: tags=arch:s390x
- :avocado: tags=machine:s390-ccw-virtio
- """
- kernel_url = ('https://archives.fedoraproject.org/pub/archive'
- '/fedora-secondary/releases/29/Everything/s390x/os/images'
- '/kernel.img')
- kernel_hash = 'e8e8439103ef8053418ef062644ffd46a7919313'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=sclp0'
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=9)
-
- def test_alpha_clipper(self):
- """
- :avocado: tags=arch:alpha
- :avocado: tags=machine:clipper
- """
- kernel_url = ('http://archive.debian.org/debian/dists/lenny/main/'
- 'installer-alpha/20090123lenny10/images/cdrom/vmlinuz')
- kernel_hash = '3a943149335529e2ed3e74d0d787b85fb5671ba3'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- uncompressed_kernel = archive.uncompress(kernel_path, self.workdir)
-
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.run_rr(uncompressed_kernel, kernel_command_line, console_pattern, shift=9,
- args=('-nodefaults', ))
-
- def test_ppc64_pseries(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:pseries
- :avocado: tags=accel:tcg
- """
- kernel_url = ('https://archives.fedoraproject.org/pub/archive'
- '/fedora-secondary/releases/29/Everything/ppc64le/os'
- '/ppc/ppc64/vmlinuz')
- kernel_hash = '3fe04abfc852b66653b8c3c897a59a689270bc77'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=hvc0'
- console_pattern = 'VFS: Cannot open root device'
- self.run_rr(kernel_path, kernel_command_line, console_pattern)
-
- def test_ppc64_powernv(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:powernv
- :avocado: tags=accel:tcg
- """
- kernel_url = ('https://archives.fedoraproject.org/pub/archive'
- '/fedora-secondary/releases/29/Everything/ppc64le/os'
- '/ppc/ppc64/vmlinuz')
- kernel_hash = '3fe04abfc852b66653b8c3c897a59a689270bc77'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + \
- 'console=tty0 console=hvc0'
- console_pattern = 'VFS: Cannot open root device'
- self.run_rr(kernel_path, kernel_command_line, console_pattern)
-
- def test_m68k_q800(self):
- """
- :avocado: tags=arch:m68k
- :avocado: tags=machine:q800
- """
- deb_url = ('https://snapshot.debian.org/archive/debian-ports'
- '/20191021T083923Z/pool-m68k/main'
- '/l/linux/kernel-image-5.3.0-1-m68k-di_5.3.7-1_m68k.udeb')
- deb_hash = '044954bb9be4160a3ce81f8bc1b5e856b75cccd1'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinux-5.3.0-1-m68k')
-
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0 vga=off')
- console_pattern = 'No filesystem could mount root'
- self.run_rr(kernel_path, kernel_command_line, console_pattern)
-
- def do_test_advcal_2018(self, file_path, kernel_name, args=None):
- archive.extract(file_path, self.workdir)
-
- for entry in os.scandir(self.workdir):
- if entry.name.startswith('day') and entry.is_dir():
- kernel_path = os.path.join(entry.path, kernel_name)
- break
-
- kernel_command_line = ''
- console_pattern = 'QEMU advent calendar'
- self.run_rr(kernel_path, kernel_command_line, console_pattern,
- args=args)
-
- def test_arm_vexpressa9(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=machine:vexpress-a9
- """
- tar_hash = '32b7677ce8b6f1471fb0059865f451169934245b'
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day16.tar.xz')
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- dtb_path = self.workdir + '/day16/vexpress-v2p-ca9.dtb'
- self.do_test_advcal_2018(file_path, 'winter.zImage',
- args=('-dtb', dtb_path))
-
- def test_m68k_mcf5208evb(self):
- """
- :avocado: tags=arch:m68k
- :avocado: tags=machine:mcf5208evb
- """
- tar_hash = 'ac688fd00561a2b6ce1359f9ff6aa2b98c9a570c'
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day07.tar.xz')
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- self.do_test_advcal_2018(file_path, 'sanity-clause.elf')
-
- def test_microblaze_s3adsp1800(self):
- """
- :avocado: tags=arch:microblaze
- :avocado: tags=machine:petalogix-s3adsp1800
- """
- tar_hash = '08bf3e3bfb6b6c7ce1e54ab65d54e189f2caf13f'
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day17.tar.xz')
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- self.do_test_advcal_2018(file_path, 'ballerina.bin')
-
- def test_ppc64_e500(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:ppce500
- :avocado: tags=cpu:e5500
- """
- tar_hash = '6951d86d644b302898da2fd701739c9406527fe1'
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day19.tar.xz')
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- self.do_test_advcal_2018(file_path, 'uImage')
-
- def test_or1k_sim(self):
- """
- :avocado: tags=arch:or1k
- :avocado: tags=machine:or1k-sim
- """
- tar_hash = '20334cdaf386108c530ff0badaecc955693027dd'
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day20.tar.xz')
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- self.do_test_advcal_2018(file_path, 'vmlinux')
-
- def test_ppc_g3beige(self):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=machine:g3beige
- """
- tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc'
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day15.tar.xz')
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- self.do_test_advcal_2018(file_path, 'invaders.elf',
- args=('-M', 'graphics=off'))
-
- def test_ppc_mac99(self):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=machine:mac99
- """
- tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc'
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day15.tar.xz')
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- self.do_test_advcal_2018(file_path, 'invaders.elf',
- args=('-M', 'graphics=off'))
-
- def test_sparc_ss20(self):
- """
- :avocado: tags=arch:sparc
- :avocado: tags=machine:SS-20
- """
- tar_hash = 'b18550d5d61c7615d989a06edace051017726a9f'
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day11.tar.xz')
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- self.do_test_advcal_2018(file_path, 'zImage.elf')
-
- def test_xtensa_lx60(self):
- """
- :avocado: tags=arch:xtensa
- :avocado: tags=machine:lx60
- :avocado: tags=cpu:dc233c
- """
- tar_hash = '49e88d9933742f0164b60839886c9739cb7a0d34'
- tar_url = ('https://qemu-advcal.gitlab.io'
- '/qac-best-of-multiarch/download/day02.tar.xz')
- file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
- self.do_test_advcal_2018(file_path, 'santas-sleigh-ride.elf')
-
-@skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout')
-class ReplayKernelSlow(ReplayKernelBase):
- # Override the timeout, because this kernel includes an inner
- # loop which is executed with TB recompilings during replay,
- # making it very slow.
- timeout = 180
-
- def test_mips_malta_cpio(self):
- """
- :avocado: tags=arch:mips
- :avocado: tags=machine:malta
- :avocado: tags=endian:big
- :avocado: tags=slowness:high
- """
- deb_url = ('http://snapshot.debian.org/archive/debian/'
- '20160601T041800Z/pool/main/l/linux/'
- 'linux-image-4.5.0-2-4kc-malta_4.5.5-1_mips.deb')
- deb_hash = 'a3c84f3e88b54e06107d65a410d1d1e8e0f340f8'
- deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
- kernel_path = self.extract_from_deb(deb_path,
- '/boot/vmlinux-4.5.0-2-4kc-malta')
- initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
- '8584a59ed9e5eb5ee7ca91f6d74bbb06619205b8/rootfs/'
- 'mips/rootfs.cpio.gz')
- initrd_hash = 'bf806e17009360a866bf537f6de66590de349a99'
- initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
- initrd_path = self.workdir + "rootfs.cpio"
- archive.gzip_uncompress(initrd_path_gz, initrd_path)
-
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0 console=tty '
- 'rdinit=/sbin/init noreboot')
- console_pattern = 'Boot successful.'
- self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5,
- args=('-initrd', initrd_path))
-
- @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
- def test_mips64el_malta_5KEc_cpio(self):
- """
- :avocado: tags=arch:mips64el
- :avocado: tags=machine:malta
- :avocado: tags=endian:little
- :avocado: tags=slowness:high
- :avocado: tags=cpu:5KEc
- """
- kernel_url = ('https://github.com/philmd/qemu-testing-blob/'
- 'raw/9ad2df38/mips/malta/mips64el/'
- 'vmlinux-3.19.3.mtoman.20150408')
- kernel_hash = '00d1d268fb9f7d8beda1de6bebcc46e884d71754'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
- initrd_url = ('https://github.com/groeck/linux-build-test/'
- 'raw/8584a59e/rootfs/'
- 'mipsel64/rootfs.mipsel64r1.cpio.gz')
- initrd_hash = '1dbb8a396e916847325284dbe2151167'
- initrd_path_gz = self.fetch_asset(initrd_url, algorithm='md5',
- asset_hash=initrd_hash)
- initrd_path = self.workdir + "rootfs.cpio"
- archive.gzip_uncompress(initrd_path_gz, initrd_path)
-
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyS0 console=tty '
- 'rdinit=/sbin/init noreboot')
- console_pattern = 'Boot successful.'
- self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5,
- args=('-initrd', initrd_path))
-
- def do_test_mips_malta32el_nanomips(self, kernel_path_xz):
- kernel_path = self.workdir + "kernel"
- with lzma.open(kernel_path_xz, 'rb') as f_in:
- with open(kernel_path, 'wb') as f_out:
- shutil.copyfileobj(f_in, f_out)
-
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'mem=256m@@0x0 '
- 'console=ttyS0')
- console_pattern = 'Kernel command line: %s' % kernel_command_line
- self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5)
-
- def test_mips_malta32el_nanomips_4k(self):
- """
- :avocado: tags=arch:mipsel
- :avocado: tags=machine:malta
- :avocado: tags=endian:little
- :avocado: tags=cpu:I7200
- """
- kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
- 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
- 'generic_nano32r6el_page4k.xz')
- kernel_hash = '477456aafd2a0f1ddc9482727f20fe9575565dd6'
- kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
- self.do_test_mips_malta32el_nanomips(kernel_path_xz)
-
- def test_mips_malta32el_nanomips_16k_up(self):
- """
- :avocado: tags=arch:mipsel
- :avocado: tags=machine:malta
- :avocado: tags=endian:little
- :avocado: tags=cpu:I7200
- """
- kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
- 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
- 'generic_nano32r6el_page16k_up.xz')
- kernel_hash = 'e882868f944c71c816e832e2303b7874d044a7bc'
- kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
- self.do_test_mips_malta32el_nanomips(kernel_path_xz)
-
- def test_mips_malta32el_nanomips_64k_dbg(self):
- """
- :avocado: tags=arch:mipsel
- :avocado: tags=machine:malta
- :avocado: tags=endian:little
- :avocado: tags=cpu:I7200
- """
- kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
- 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
- 'generic_nano32r6el_page64k_dbg.xz')
- kernel_hash = '18d1c68f2e23429e266ca39ba5349ccd0aeb7180'
- kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
- self.do_test_mips_malta32el_nanomips(kernel_path_xz)
diff --git a/tests/avocado/replay_linux.py b/tests/avocado/replay_linux.py
deleted file mode 100644
index f3a43dc..0000000
--- a/tests/avocado/replay_linux.py
+++ /dev/null
@@ -1,196 +0,0 @@
-# Record/replay test that boots a complete Linux system via a cloud image
-#
-# Copyright (c) 2020 ISP RAS
-#
-# Author:
-# Pavel Dovgalyuk <Pavel.Dovgaluk@ispras.ru>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-import logging
-import time
-
-from avocado import skipUnless
-from avocado_qemu import BUILD_DIR
-from avocado.utils import cloudinit
-from avocado.utils import network
-from avocado.utils import vmimage
-from avocado.utils import datadrainer
-from avocado.utils.path import find_command
-from avocado_qemu import LinuxTest
-
-class ReplayLinux(LinuxTest):
- """
- Boots a Linux system, checking for a successful initialization
- """
-
- timeout = 1800
- chksum = None
- hdd = 'ide-hd'
- cd = 'ide-cd'
- bus = 'ide'
-
- def setUp(self):
- # LinuxTest does many replay-incompatible things, but includes
- # useful methods. Do not setup LinuxTest here and just
- # call some functions.
- super(LinuxTest, self).setUp()
- self._set_distro()
- self.boot_path = self.download_boot()
- self.phone_server = cloudinit.PhoneHomeServer(('0.0.0.0', 0),
- self.name)
- ssh_pubkey, self.ssh_key = self.set_up_existing_ssh_keys()
- self.cloudinit_path = self.prepare_cloudinit(ssh_pubkey)
-
- def vm_add_disk(self, vm, path, id, device):
- bus_string = ''
- if self.bus:
- bus_string = ',bus=%s.%d' % (self.bus, id,)
- vm.add_args('-drive', 'file=%s,snapshot=on,id=disk%s,if=none' % (path, id))
- vm.add_args('-drive',
- 'driver=blkreplay,id=disk%s-rr,if=none,image=disk%s' % (id, id))
- vm.add_args('-device',
- '%s,drive=disk%s-rr%s' % (device, id, bus_string))
-
- def vm_add_cdrom(self, vm, path, id, device):
- vm.add_args('-drive', 'file=%s,id=disk%s,if=none,media=cdrom' % (path, id))
-
- def launch_and_wait(self, record, args, shift):
- self.require_netdev('user')
- vm = self.get_vm()
- vm.add_args('-smp', '1')
- vm.add_args('-m', '1024')
- vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22',
- '-device', 'virtio-net,netdev=vnet')
- vm.add_args('-object', 'filter-replay,id=replay,netdev=vnet')
- if args:
- vm.add_args(*args)
- self.vm_add_disk(vm, self.boot_path, 0, self.hdd)
- self.vm_add_cdrom(vm, self.cloudinit_path, 1, self.cd)
- logger = logging.getLogger('replay')
- if record:
- logger.info('recording the execution...')
- mode = 'record'
- else:
- logger.info('replaying the execution...')
- mode = 'replay'
- replay_path = os.path.join(self.workdir, 'replay.bin')
- vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s' %
- (shift, mode, replay_path))
-
- start_time = time.time()
-
- vm.set_console()
- vm.launch()
- console_drainer = datadrainer.LineLogger(vm.console_socket.fileno(),
- logger=self.log.getChild('console'),
- stop_check=(lambda : not vm.is_running()))
- console_drainer.start()
- if record:
- while not self.phone_server.instance_phoned_back:
- self.phone_server.handle_request()
- vm.shutdown()
- logger.info('finished the recording with log size %s bytes'
- % os.path.getsize(replay_path))
- else:
- vm.event_wait('SHUTDOWN', self.timeout)
- vm.wait()
- logger.info('successfully finished the replay')
- elapsed = time.time() - start_time
- logger.info('elapsed time %.2f sec' % elapsed)
- return elapsed
-
- def run_rr(self, args=None, shift=7):
- t1 = self.launch_and_wait(True, args, shift)
- t2 = self.launch_and_wait(False, args, shift)
- logger = logging.getLogger('replay')
- logger.info('replay overhead {:.2%}'.format(t2 / t1 - 1))
-
-@skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout')
-class ReplayLinuxX8664(ReplayLinux):
- """
- :avocado: tags=arch:x86_64
- :avocado: tags=accel:tcg
- """
-
- chksum = 'e3c1b309d9203604922d6e255c2c5d098a309c2d46215d8fc026954f3c5c27a0'
-
- def test_pc_i440fx(self):
- """
- :avocado: tags=machine:pc
- """
- self.run_rr(shift=1)
-
- def test_pc_q35(self):
- """
- :avocado: tags=machine:q35
- """
- self.run_rr(shift=3)
-
-@skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout')
-class ReplayLinuxX8664Virtio(ReplayLinux):
- """
- :avocado: tags=arch:x86_64
- :avocado: tags=virtio
- :avocado: tags=accel:tcg
- """
-
- hdd = 'virtio-blk-pci'
- cd = 'virtio-blk-pci'
- bus = None
-
- chksum = 'e3c1b309d9203604922d6e255c2c5d098a309c2d46215d8fc026954f3c5c27a0'
-
- def test_pc_i440fx(self):
- """
- :avocado: tags=machine:pc
- """
- self.run_rr(shift=1)
-
- def test_pc_q35(self):
- """
- :avocado: tags=machine:q35
- """
- self.run_rr(shift=3)
-
-@skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout')
-class ReplayLinuxAarch64(ReplayLinux):
- """
- :avocado: tags=accel:tcg
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:virt
- :avocado: tags=cpu:max
- """
-
- chksum = '1e18d9c0cf734940c4b5d5ec592facaed2af0ad0329383d5639c997fdf16fe49'
-
- hdd = 'virtio-blk-device'
- cd = 'virtio-blk-device'
- bus = None
-
- def get_common_args(self):
- return ('-bios',
- os.path.join(BUILD_DIR, 'pc-bios', 'edk2-aarch64-code.fd'),
- "-cpu", "max,lpa2=off",
- '-device', 'virtio-rng-pci,rng=rng0',
- '-object', 'rng-builtin,id=rng0')
-
- def test_virt_gicv2(self):
- """
- :avocado: tags=machine:gic-version=2
- """
-
- self.run_rr(shift=3,
- args=(*self.get_common_args(),
- "-machine", "virt,gic-version=2"))
-
- def test_virt_gicv3(self):
- """
- :avocado: tags=machine:gic-version=3
- """
-
- self.run_rr(shift=3,
- args=(*self.get_common_args(),
- "-machine", "virt,gic-version=3"))
diff --git a/tests/avocado/reverse_debugging.py b/tests/avocado/reverse_debugging.py
deleted file mode 100644
index 92855a0..0000000
--- a/tests/avocado/reverse_debugging.py
+++ /dev/null
@@ -1,276 +0,0 @@
-# Reverse debugging test
-#
-# Copyright (c) 2020 ISP RAS
-#
-# Author:
-# Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-import os
-import logging
-
-from avocado import skipUnless
-from avocado_qemu import BUILD_DIR
-from avocado.utils import datadrainer
-from avocado.utils import gdb
-from avocado.utils import process
-from avocado.utils.network.ports import find_free_port
-from avocado.utils.path import find_command
-from boot_linux_console import LinuxKernelTest
-
-class ReverseDebugging(LinuxKernelTest):
- """
- Test GDB reverse debugging commands: reverse step and reverse continue.
- Recording saves the execution of some instructions and makes an initial
- VM snapshot to allow reverse execution.
- Replay saves the order of the first instructions and then checks that they
- are executed backwards in the correct order.
- After that the execution is replayed to the end, and reverse continue
- command is checked by setting several breakpoints, and asserting
- that the execution is stopped at the last of them.
- """
-
- timeout = 10
- STEPS = 10
- endian_is_le = True
-
- def run_vm(self, record, shift, args, replay_path, image_path, port):
- logger = logging.getLogger('replay')
- vm = self.get_vm()
- vm.set_console()
- if record:
- logger.info('recording the execution...')
- mode = 'record'
- else:
- logger.info('replaying the execution...')
- mode = 'replay'
- vm.add_args('-gdb', 'tcp::%d' % port, '-S')
- vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s,rrsnapshot=init' %
- (shift, mode, replay_path),
- '-net', 'none')
- vm.add_args('-drive', 'file=%s,if=none' % image_path)
- if args:
- vm.add_args(*args)
- vm.launch()
- console_drainer = datadrainer.LineLogger(vm.console_socket.fileno(),
- logger=self.log.getChild('console'),
- stop_check=(lambda : not vm.is_running()))
- console_drainer.start()
- return vm
-
- @staticmethod
- def get_reg_le(g, reg):
- res = g.cmd(b'p%x' % reg)
- num = 0
- for i in range(len(res))[-2::-2]:
- num = 0x100 * num + int(res[i:i + 2], 16)
- return num
-
- @staticmethod
- def get_reg_be(g, reg):
- res = g.cmd(b'p%x' % reg)
- return int(res, 16)
-
- def get_reg(self, g, reg):
- # value may be encoded in BE or LE order
- if self.endian_is_le:
- return self.get_reg_le(g, reg)
- else:
- return self.get_reg_be(g, reg)
-
- def get_pc(self, g):
- return self.get_reg(g, self.REG_PC)
-
- def check_pc(self, g, addr):
- pc = self.get_pc(g)
- if pc != addr:
- self.fail('Invalid PC (read %x instead of %x)' % (pc, addr))
-
- @staticmethod
- def gdb_step(g):
- g.cmd(b's', b'T05thread:01;')
-
- @staticmethod
- def gdb_bstep(g):
- g.cmd(b'bs', b'T05thread:01;')
-
- @staticmethod
- def vm_get_icount(vm):
- return vm.qmp('query-replay')['return']['icount']
-
- def reverse_debugging(self, shift=7, args=None):
- logger = logging.getLogger('replay')
-
- # create qcow2 for snapshots
- logger.info('creating qcow2 image for VM snapshots')
- image_path = os.path.join(self.workdir, 'disk.qcow2')
- qemu_img = os.path.join(BUILD_DIR, 'qemu-img')
- if not os.path.exists(qemu_img):
- qemu_img = find_command('qemu-img', False)
- if qemu_img is False:
- self.cancel('Could not find "qemu-img", which is required to '
- 'create the temporary qcow2 image')
- cmd = '%s create -f qcow2 %s 128M' % (qemu_img, image_path)
- process.run(cmd)
-
- replay_path = os.path.join(self.workdir, 'replay.bin')
- port = find_free_port()
-
- # record the log
- vm = self.run_vm(True, shift, args, replay_path, image_path, port)
- while self.vm_get_icount(vm) <= self.STEPS:
- pass
- last_icount = self.vm_get_icount(vm)
- vm.shutdown()
-
- logger.info("recorded log with %s+ steps" % last_icount)
-
- # replay and run debug commands
- vm = self.run_vm(False, shift, args, replay_path, image_path, port)
- logger.info('connecting to gdbstub')
- g = gdb.GDBRemote('127.0.0.1', port, False, False)
- g.connect()
- r = g.cmd(b'qSupported')
- if b'qXfer:features:read+' in r:
- g.cmd(b'qXfer:features:read:target.xml:0,ffb')
- if b'ReverseStep+' not in r:
- self.fail('Reverse step is not supported by QEMU')
- if b'ReverseContinue+' not in r:
- self.fail('Reverse continue is not supported by QEMU')
-
- logger.info('stepping forward')
- steps = []
- # record first instruction addresses
- for _ in range(self.STEPS):
- pc = self.get_pc(g)
- logger.info('saving position %x' % pc)
- steps.append(pc)
- self.gdb_step(g)
-
- # visit the recorded instruction in reverse order
- logger.info('stepping backward')
- for addr in steps[::-1]:
- self.gdb_bstep(g)
- self.check_pc(g, addr)
- logger.info('found position %x' % addr)
-
- # visit the recorded instruction in forward order
- logger.info('stepping forward')
- for addr in steps:
- self.check_pc(g, addr)
- self.gdb_step(g)
- logger.info('found position %x' % addr)
-
- # set breakpoints for the instructions just stepped over
- logger.info('setting breakpoints')
- for addr in steps:
- # hardware breakpoint at addr with len=1
- g.cmd(b'Z1,%x,1' % addr, b'OK')
-
- # this may hit a breakpoint if first instructions are executed
- # again
- logger.info('continuing execution')
- vm.qmp('replay-break', icount=last_icount - 1)
- # continue - will return after pausing
- # This could stop at the end and get a T02 return, or by
- # re-executing one of the breakpoints and get a T05 return.
- g.cmd(b'c')
- if self.vm_get_icount(vm) == last_icount - 1:
- logger.info('reached the end (icount %s)' % (last_icount - 1))
- else:
- logger.info('hit a breakpoint again at %x (icount %s)' %
- (self.get_pc(g), self.vm_get_icount(vm)))
-
- logger.info('running reverse continue to reach %x' % steps[-1])
- # reverse continue - will return after stopping at the breakpoint
- g.cmd(b'bc', b'T05thread:01;')
-
- # assume that none of the first instructions is executed again
- # breaking the order of the breakpoints
- self.check_pc(g, steps[-1])
- logger.info('successfully reached %x' % steps[-1])
-
- logger.info('exiting gdb and qemu')
- vm.shutdown()
-
-class ReverseDebugging_X86_64(ReverseDebugging):
- """
- :avocado: tags=accel:tcg
- """
-
- REG_PC = 0x10
- REG_CS = 0x12
- def get_pc(self, g):
- return self.get_reg_le(g, self.REG_PC) \
- + self.get_reg_le(g, self.REG_CS) * 0x10
-
- # unidentified gitlab timeout problem
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
-
- def test_x86_64_pc(self):
- """
- :avocado: tags=arch:x86_64
- :avocado: tags=machine:pc
- """
- # start with BIOS only
- self.reverse_debugging()
-
-class ReverseDebugging_AArch64(ReverseDebugging):
- """
- :avocado: tags=accel:tcg
- """
-
- REG_PC = 32
-
- # unidentified gitlab timeout problem
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
-
- def test_aarch64_virt(self):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:virt
- :avocado: tags=cpu:cortex-a53
- """
- kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora'
- '/linux/releases/29/Everything/aarch64/os/images/pxeboot'
- '/vmlinuz')
- kernel_hash = '8c73e469fc6ea06a58dc83a628fc695b693b8493'
- kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
-
- self.reverse_debugging(
- args=('-kernel', kernel_path))
-
-class ReverseDebugging_ppc64(ReverseDebugging):
- """
- :avocado: tags=accel:tcg
- """
-
- REG_PC = 0x40
-
- # unidentified gitlab timeout problem
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
-
- def test_ppc64_pseries(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:pseries
- :avocado: tags=flaky
- """
- # SLOF branches back to its entry point, which causes this test
- # to take the 'hit a breakpoint again' path. That's not a problem,
- # just slightly different than the other machines.
- self.endian_is_le = False
- self.reverse_debugging()
-
- # See https://gitlab.com/qemu-project/qemu/-/issues/1992
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
-
- def test_ppc64_powernv(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:powernv
- :avocado: tags=flaky
- """
- self.endian_is_le = False
- self.reverse_debugging()
diff --git a/tests/avocado/riscv_opensbi.py b/tests/avocado/riscv_opensbi.py
deleted file mode 100644
index bfff9cc..0000000
--- a/tests/avocado/riscv_opensbi.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# OpenSBI boot test for RISC-V machines
-#
-# Copyright (c) 2022, Ventana Micro
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-
-class RiscvOpenSBI(QemuSystemTest):
- """
- :avocado: tags=accel:tcg
- """
- timeout = 5
-
- def boot_opensbi(self):
- self.vm.set_console()
- self.vm.launch()
- wait_for_console_pattern(self, 'Platform Name')
- wait_for_console_pattern(self, 'Boot HART MEDELEG')
-
- def test_riscv32_spike(self):
- """
- :avocado: tags=arch:riscv32
- :avocado: tags=machine:spike
- """
- self.boot_opensbi()
-
- def test_riscv64_spike(self):
- """
- :avocado: tags=arch:riscv64
- :avocado: tags=machine:spike
- """
- self.boot_opensbi()
-
- def test_riscv32_sifive_u(self):
- """
- :avocado: tags=arch:riscv32
- :avocado: tags=machine:sifive_u
- """
- self.boot_opensbi()
-
- def test_riscv64_sifive_u(self):
- """
- :avocado: tags=arch:riscv64
- :avocado: tags=machine:sifive_u
- """
- self.boot_opensbi()
-
- def test_riscv32_virt(self):
- """
- :avocado: tags=arch:riscv32
- :avocado: tags=machine:virt
- """
- self.boot_opensbi()
-
- def test_riscv64_virt(self):
- """
- :avocado: tags=arch:riscv64
- :avocado: tags=machine:virt
- """
- self.boot_opensbi()
diff --git a/tests/avocado/s390_topology.py b/tests/avocado/s390_topology.py
deleted file mode 100644
index 9154ac8..0000000
--- a/tests/avocado/s390_topology.py
+++ /dev/null
@@ -1,439 +0,0 @@
-# Functional test that boots a Linux kernel and checks the console
-#
-# Copyright IBM Corp. 2023
-#
-# Author:
-# Pierre Morel <pmorel@linux.ibm.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import os
-import shutil
-import time
-
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import exec_command
-from avocado_qemu import exec_command_and_wait_for_pattern
-from avocado_qemu import interrupt_interactive_console_until_pattern
-from avocado_qemu import wait_for_console_pattern
-from avocado.utils import process
-from avocado.utils import archive
-
-
-class S390CPUTopology(QemuSystemTest):
- """
- S390x CPU topology consists of 4 topology layers, from bottom to top,
- the cores, sockets, books and drawers and 2 modifiers attributes,
- the entitlement and the dedication.
- See: docs/system/s390x/cpu-topology.rst.
-
- S390x CPU topology is setup in different ways:
- - implicitly from the '-smp' argument by completing each topology
- level one after the other beginning with drawer 0, book 0 and
- socket 0.
- - explicitly from the '-device' argument on the QEMU command line
- - explicitly by hotplug of a new CPU using QMP or HMP
- - it is modified by using QMP 'set-cpu-topology'
-
- The S390x modifier attribute entitlement depends on the machine
- polarization, which can be horizontal or vertical.
- The polarization is changed on a request from the guest.
- """
- timeout = 90
- event_timeout = 10
-
- KERNEL_COMMON_COMMAND_LINE = ('printk.time=0 '
- 'root=/dev/ram '
- 'selinux=0 '
- 'rdinit=/bin/sh')
-
- def wait_until_booted(self):
- wait_for_console_pattern(self, 'no job control',
- failure_message='Kernel panic - not syncing',
- vm=None)
-
- def check_topology(self, c, s, b, d, e, t):
- res = self.vm.qmp('query-cpus-fast')
- cpus = res['return']
- for cpu in cpus:
- core = cpu['props']['core-id']
- socket = cpu['props']['socket-id']
- book = cpu['props']['book-id']
- drawer = cpu['props']['drawer-id']
- entitlement = cpu.get('entitlement')
- dedicated = cpu.get('dedicated')
- if core == c:
- self.assertEqual(drawer, d)
- self.assertEqual(book, b)
- self.assertEqual(socket, s)
- self.assertEqual(entitlement, e)
- self.assertEqual(dedicated, t)
-
- def kernel_init(self):
- """
- We need a VM that supports CPU topology,
- currently this only the case when using KVM, not TCG.
- We need a kernel supporting the CPU topology.
- We need a minimal root filesystem with a shell.
- """
- self.require_accelerator("kvm")
- kernel_url = ('https://archives.fedoraproject.org/pub/archive'
- '/fedora-secondary/releases/35/Server/s390x/os'
- '/images/kernel.img')
- kernel_hash = '0d1aaaf303f07cf0160c8c48e56fe638'
- kernel_path = self.fetch_asset(kernel_url, algorithm='md5',
- asset_hash=kernel_hash)
-
- initrd_url = ('https://archives.fedoraproject.org/pub/archive'
- '/fedora-secondary/releases/35/Server/s390x/os'
- '/images/initrd.img')
- initrd_hash = 'a122057d95725ac030e2ec51df46e172'
- initrd_path_xz = self.fetch_asset(initrd_url, algorithm='md5',
- asset_hash=initrd_hash)
- initrd_path = os.path.join(self.workdir, 'initrd-raw.img')
- archive.lzma_uncompress(initrd_path_xz, initrd_path)
-
- self.vm.set_console()
- kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE
- self.vm.add_args('-nographic',
- '-enable-kvm',
- '-cpu', 'max,ctop=on',
- '-m', '512',
- '-kernel', kernel_path,
- '-initrd', initrd_path,
- '-append', kernel_command_line)
-
- def system_init(self):
- self.log.info("System init")
- exec_command_and_wait_for_pattern(self,
- """ mount proc -t proc /proc;
- mount sys -t sysfs /sys;
- cat /sys/devices/system/cpu/dispatching """,
- '0')
-
- def test_single(self):
- """
- This test checks the simplest topology with a single CPU.
-
- :avocado: tags=arch:s390x
- :avocado: tags=machine:s390-ccw-virtio
- """
- self.kernel_init()
- self.vm.launch()
- self.wait_until_booted()
- self.check_topology(0, 0, 0, 0, 'medium', False)
-
- def test_default(self):
- """
- This test checks the implicit topology.
-
- :avocado: tags=arch:s390x
- :avocado: tags=machine:s390-ccw-virtio
- """
- self.kernel_init()
- self.vm.add_args('-smp',
- '13,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
- self.vm.launch()
- self.wait_until_booted()
- self.check_topology(0, 0, 0, 0, 'medium', False)
- self.check_topology(1, 0, 0, 0, 'medium', False)
- self.check_topology(2, 1, 0, 0, 'medium', False)
- self.check_topology(3, 1, 0, 0, 'medium', False)
- self.check_topology(4, 2, 0, 0, 'medium', False)
- self.check_topology(5, 2, 0, 0, 'medium', False)
- self.check_topology(6, 0, 1, 0, 'medium', False)
- self.check_topology(7, 0, 1, 0, 'medium', False)
- self.check_topology(8, 1, 1, 0, 'medium', False)
- self.check_topology(9, 1, 1, 0, 'medium', False)
- self.check_topology(10, 2, 1, 0, 'medium', False)
- self.check_topology(11, 2, 1, 0, 'medium', False)
- self.check_topology(12, 0, 0, 1, 'medium', False)
-
- def test_move(self):
- """
- This test checks the topology modification by moving a CPU
- to another socket: CPU 0 is moved from socket 0 to socket 2.
-
- :avocado: tags=arch:s390x
- :avocado: tags=machine:s390-ccw-virtio
- """
- self.kernel_init()
- self.vm.add_args('-smp',
- '1,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
- self.vm.launch()
- self.wait_until_booted()
-
- self.check_topology(0, 0, 0, 0, 'medium', False)
- res = self.vm.qmp('set-cpu-topology',
- {'core-id': 0, 'socket-id': 2, 'entitlement': 'low'})
- self.assertEqual(res['return'], {})
- self.check_topology(0, 2, 0, 0, 'low', False)
-
- def test_dash_device(self):
- """
- This test verifies that a CPU defined with the '-device'
- command line option finds its right place inside the topology.
-
- :avocado: tags=arch:s390x
- :avocado: tags=machine:s390-ccw-virtio
- """
- self.kernel_init()
- self.vm.add_args('-smp',
- '1,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
- self.vm.add_args('-device', 'max-s390x-cpu,core-id=10')
- self.vm.add_args('-device',
- 'max-s390x-cpu,'
- 'core-id=1,socket-id=0,book-id=1,drawer-id=1,entitlement=low')
- self.vm.add_args('-device',
- 'max-s390x-cpu,'
- 'core-id=2,socket-id=0,book-id=1,drawer-id=1,entitlement=medium')
- self.vm.add_args('-device',
- 'max-s390x-cpu,'
- 'core-id=3,socket-id=1,book-id=1,drawer-id=1,entitlement=high')
- self.vm.add_args('-device',
- 'max-s390x-cpu,'
- 'core-id=4,socket-id=1,book-id=1,drawer-id=1')
- self.vm.add_args('-device',
- 'max-s390x-cpu,'
- 'core-id=5,socket-id=2,book-id=1,drawer-id=1,dedicated=true')
-
- self.vm.launch()
- self.wait_until_booted()
-
- self.check_topology(10, 2, 1, 0, 'medium', False)
- self.check_topology(1, 0, 1, 1, 'low', False)
- self.check_topology(2, 0, 1, 1, 'medium', False)
- self.check_topology(3, 1, 1, 1, 'high', False)
- self.check_topology(4, 1, 1, 1, 'medium', False)
- self.check_topology(5, 2, 1, 1, 'high', True)
-
-
- def guest_set_dispatching(self, dispatching):
- exec_command(self,
- f'echo {dispatching} > /sys/devices/system/cpu/dispatching')
- self.vm.event_wait('CPU_POLARIZATION_CHANGE', self.event_timeout)
- exec_command_and_wait_for_pattern(self,
- 'cat /sys/devices/system/cpu/dispatching', dispatching)
-
-
- def test_polarization(self):
- """
- This test verifies that QEMU modifies the entitlement change after
- several guest polarization change requests.
-
- :avocado: tags=arch:s390x
- :avocado: tags=machine:s390-ccw-virtio
- """
- self.kernel_init()
- self.vm.launch()
- self.wait_until_booted()
-
- self.system_init()
- res = self.vm.qmp('query-s390x-cpu-polarization')
- self.assertEqual(res['return']['polarization'], 'horizontal')
- self.check_topology(0, 0, 0, 0, 'medium', False)
-
- self.guest_set_dispatching('1');
- res = self.vm.qmp('query-s390x-cpu-polarization')
- self.assertEqual(res['return']['polarization'], 'vertical')
- self.check_topology(0, 0, 0, 0, 'medium', False)
-
- self.guest_set_dispatching('0');
- res = self.vm.qmp('query-s390x-cpu-polarization')
- self.assertEqual(res['return']['polarization'], 'horizontal')
- self.check_topology(0, 0, 0, 0, 'medium', False)
-
-
- def check_polarization(self, polarization):
- #We need to wait for the change to have been propagated to the kernel
- exec_command_and_wait_for_pattern(self,
- "\n".join([
- "timeout 1 sh -c 'while true",
- 'do',
- ' syspath="/sys/devices/system/cpu/cpu0/polarization"',
- ' polarization="$(cat "$syspath")" || exit',
- f' if [ "$polarization" = "{polarization}" ]; then',
- ' exit 0',
- ' fi',
- ' sleep 0.01',
- #searched for strings mustn't show up in command, '' to obfuscate
- "done' && echo succ''ess || echo fail''ure",
- ]),
- "success", "failure")
-
-
- def test_entitlement(self):
- """
- This test verifies that QEMU modifies the entitlement
- after a guest request and that the guest sees the change.
-
- :avocado: tags=arch:s390x
- :avocado: tags=machine:s390-ccw-virtio
- """
- self.kernel_init()
- self.vm.launch()
- self.wait_until_booted()
-
- self.system_init()
-
- self.check_polarization('horizontal')
- self.check_topology(0, 0, 0, 0, 'medium', False)
-
- self.guest_set_dispatching('1')
- self.check_polarization('vertical:medium')
- self.check_topology(0, 0, 0, 0, 'medium', False)
-
- res = self.vm.qmp('set-cpu-topology',
- {'core-id': 0, 'entitlement': 'low'})
- self.assertEqual(res['return'], {})
- self.check_polarization('vertical:low')
- self.check_topology(0, 0, 0, 0, 'low', False)
-
- res = self.vm.qmp('set-cpu-topology',
- {'core-id': 0, 'entitlement': 'medium'})
- self.assertEqual(res['return'], {})
- self.check_polarization('vertical:medium')
- self.check_topology(0, 0, 0, 0, 'medium', False)
-
- res = self.vm.qmp('set-cpu-topology',
- {'core-id': 0, 'entitlement': 'high'})
- self.assertEqual(res['return'], {})
- self.check_polarization('vertical:high')
- self.check_topology(0, 0, 0, 0, 'high', False)
-
- self.guest_set_dispatching('0');
- self.check_polarization("horizontal")
- self.check_topology(0, 0, 0, 0, 'high', False)
-
-
- def test_dedicated(self):
- """
- This test verifies that QEMU adjusts the entitlement correctly when a
- CPU is made dedicated.
- QEMU retains the entitlement value when horizontal polarization is in effect.
- For the guest, the field shows the effective value of the entitlement.
-
- :avocado: tags=arch:s390x
- :avocado: tags=machine:s390-ccw-virtio
- """
- self.kernel_init()
- self.vm.launch()
- self.wait_until_booted()
-
- self.system_init()
-
- self.check_polarization("horizontal")
-
- res = self.vm.qmp('set-cpu-topology',
- {'core-id': 0, 'dedicated': True})
- self.assertEqual(res['return'], {})
- self.check_topology(0, 0, 0, 0, 'high', True)
- self.check_polarization("horizontal")
-
- self.guest_set_dispatching('1');
- self.check_topology(0, 0, 0, 0, 'high', True)
- self.check_polarization("vertical:high")
-
- self.guest_set_dispatching('0');
- self.check_topology(0, 0, 0, 0, 'high', True)
- self.check_polarization("horizontal")
-
-
- def test_socket_full(self):
- """
- This test verifies that QEMU does not accept to overload a socket.
- The socket-id 0 on book-id 0 already contains CPUs 0 and 1 and can
- not accept any new CPU while socket-id 0 on book-id 1 is free.
-
- :avocado: tags=arch:s390x
- :avocado: tags=machine:s390-ccw-virtio
- """
- self.kernel_init()
- self.vm.add_args('-smp',
- '3,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
- self.vm.launch()
- self.wait_until_booted()
-
- self.system_init()
-
- res = self.vm.qmp('set-cpu-topology',
- {'core-id': 2, 'socket-id': 0, 'book-id': 0})
- self.assertEqual(res['error']['class'], 'GenericError')
-
- res = self.vm.qmp('set-cpu-topology',
- {'core-id': 2, 'socket-id': 0, 'book-id': 1})
- self.assertEqual(res['return'], {})
-
- def test_dedicated_error(self):
- """
- This test verifies that QEMU refuses to lower the entitlement
- of a dedicated CPU
-
- :avocado: tags=arch:s390x
- :avocado: tags=machine:s390-ccw-virtio
- """
- self.kernel_init()
- self.vm.launch()
- self.wait_until_booted()
-
- self.system_init()
-
- res = self.vm.qmp('set-cpu-topology',
- {'core-id': 0, 'dedicated': True})
- self.assertEqual(res['return'], {})
-
- self.check_topology(0, 0, 0, 0, 'high', True)
-
- self.guest_set_dispatching('1');
-
- self.check_topology(0, 0, 0, 0, 'high', True)
-
- res = self.vm.qmp('set-cpu-topology',
- {'core-id': 0, 'entitlement': 'low', 'dedicated': True})
- self.assertEqual(res['error']['class'], 'GenericError')
-
- res = self.vm.qmp('set-cpu-topology',
- {'core-id': 0, 'entitlement': 'low'})
- self.assertEqual(res['error']['class'], 'GenericError')
-
- res = self.vm.qmp('set-cpu-topology',
- {'core-id': 0, 'entitlement': 'medium', 'dedicated': True})
- self.assertEqual(res['error']['class'], 'GenericError')
-
- res = self.vm.qmp('set-cpu-topology',
- {'core-id': 0, 'entitlement': 'medium'})
- self.assertEqual(res['error']['class'], 'GenericError')
-
- res = self.vm.qmp('set-cpu-topology',
- {'core-id': 0, 'entitlement': 'low', 'dedicated': False})
- self.assertEqual(res['return'], {})
-
- res = self.vm.qmp('set-cpu-topology',
- {'core-id': 0, 'entitlement': 'medium', 'dedicated': False})
- self.assertEqual(res['return'], {})
-
- def test_move_error(self):
- """
- This test verifies that QEMU refuses to move a CPU to an
- nonexistent location
-
- :avocado: tags=arch:s390x
- :avocado: tags=machine:s390-ccw-virtio
- """
- self.kernel_init()
- self.vm.launch()
- self.wait_until_booted()
-
- self.system_init()
-
- res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'drawer-id': 1})
- self.assertEqual(res['error']['class'], 'GenericError')
-
- res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'book-id': 1})
- self.assertEqual(res['error']['class'], 'GenericError')
-
- res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'socket-id': 1})
- self.assertEqual(res['error']['class'], 'GenericError')
-
- self.check_topology(0, 0, 0, 0, 'medium', False)
diff --git a/tests/avocado/smmu.py b/tests/avocado/smmu.py
deleted file mode 100644
index 4ebfa71..0000000
--- a/tests/avocado/smmu.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# SMMUv3 Functional tests
-#
-# Copyright (c) 2021 Red Hat, Inc.
-#
-# Author:
-# Eric Auger <eric.auger@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-import os
-
-from avocado import skipUnless
-from avocado_qemu import LinuxTest, BUILD_DIR
-
-@skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
-
-class SMMU(LinuxTest):
- """
- :avocado: tags=accel:kvm
- :avocado: tags=cpu:host
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:virt
- :avocado: tags=distro:fedora
- :avocado: tags=smmu
- :avocado: tags=flaky
- """
-
- IOMMU_ADDON = ',iommu_platform=on,disable-modern=off,disable-legacy=on'
- kernel_path = None
- initrd_path = None
- kernel_params = None
-
- def set_up_boot(self):
- path = self.download_boot()
- self.vm.add_args('-device', 'virtio-blk-pci,bus=pcie.0,' +
- 'drive=drv0,id=virtio-disk0,bootindex=1,'
- 'werror=stop,rerror=stop' + self.IOMMU_ADDON)
- self.vm.add_args('-drive',
- 'file=%s,if=none,cache=writethrough,id=drv0' % path)
-
- def setUp(self):
- super(SMMU, self).setUp(None, 'virtio-net-pci' + self.IOMMU_ADDON)
-
- def common_vm_setup(self, custom_kernel=False):
- self.require_accelerator("kvm")
- self.vm.add_args("-accel", "kvm")
- self.vm.add_args("-cpu", "host")
- self.vm.add_args("-machine", "iommu=smmuv3")
- self.vm.add_args("-d", "guest_errors")
- self.vm.add_args('-bios', os.path.join(BUILD_DIR, 'pc-bios',
- 'edk2-aarch64-code.fd'))
- self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0')
- self.vm.add_args('-object',
- 'rng-random,id=rng0,filename=/dev/urandom')
-
- if custom_kernel is False:
- return
-
- kernel_url = self.distro.pxeboot_url + 'vmlinuz'
- initrd_url = self.distro.pxeboot_url + 'initrd.img'
- self.kernel_path = self.fetch_asset(kernel_url)
- self.initrd_path = self.fetch_asset(initrd_url)
-
- def run_and_check(self):
- if self.kernel_path:
- self.vm.add_args('-kernel', self.kernel_path,
- '-append', self.kernel_params,
- '-initrd', self.initrd_path)
- self.launch_and_wait()
- self.ssh_command('cat /proc/cmdline')
- self.ssh_command('dnf -y install numactl-devel')
-
-
- # 5.3 kernel without RIL #
-
- def test_smmu_noril(self):
- """
- :avocado: tags=smmu_noril
- :avocado: tags=smmu_noril_tests
- :avocado: tags=distro_version:31
- """
- self.common_vm_setup()
- self.run_and_check()
-
- def test_smmu_noril_passthrough(self):
- """
- :avocado: tags=smmu_noril_passthrough
- :avocado: tags=smmu_noril_tests
- :avocado: tags=distro_version:31
- """
- self.common_vm_setup(True)
- self.kernel_params = (self.distro.default_kernel_params +
- ' iommu.passthrough=on')
- self.run_and_check()
-
- def test_smmu_noril_nostrict(self):
- """
- :avocado: tags=smmu_noril_nostrict
- :avocado: tags=smmu_noril_tests
- :avocado: tags=distro_version:31
- """
- self.common_vm_setup(True)
- self.kernel_params = (self.distro.default_kernel_params +
- ' iommu.strict=0')
- self.run_and_check()
-
- # 5.8 kernel featuring range invalidation
- # >= v5.7 kernel
-
- def test_smmu_ril(self):
- """
- :avocado: tags=smmu_ril
- :avocado: tags=smmu_ril_tests
- :avocado: tags=distro_version:33
- """
- self.common_vm_setup()
- self.run_and_check()
-
- def test_smmu_ril_passthrough(self):
- """
- :avocado: tags=smmu_ril_passthrough
- :avocado: tags=smmu_ril_tests
- :avocado: tags=distro_version:33
- """
- self.common_vm_setup(True)
- self.kernel_params = (self.distro.default_kernel_params +
- ' iommu.passthrough=on')
- self.run_and_check()
-
- def test_smmu_ril_nostrict(self):
- """
- :avocado: tags=smmu_ril_nostrict
- :avocado: tags=smmu_ril_tests
- :avocado: tags=distro_version:33
- """
- self.common_vm_setup(True)
- self.kernel_params = (self.distro.default_kernel_params +
- ' iommu.strict=0')
- self.run_and_check()
diff --git a/tests/avocado/tcg_plugins.py b/tests/avocado/tcg_plugins.py
deleted file mode 100644
index 15fd87b..0000000
--- a/tests/avocado/tcg_plugins.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# TCG Plugins tests
-#
-# These are a little more involved than the basic tests run by check-tcg.
-#
-# Copyright (c) 2021 Linaro
-#
-# Author:
-# Alex BennƩe <alex.bennee@linaro.org>
-#
-# SPDX-License-Identifier: GPL-2.0-or-later
-
-import tempfile
-import mmap
-import re
-
-from boot_linux_console import LinuxKernelTest
-
-
-class PluginKernelBase(LinuxKernelTest):
- """
- Boots a Linux kernel with a TCG plugin enabled.
- """
-
- timeout = 120
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=1 panic=-1 '
-
- def run_vm(self, kernel_path, kernel_command_line,
- plugin, plugin_log, console_pattern, args=None):
-
- vm = self.get_vm()
- vm.set_console()
- vm.add_args('-kernel', kernel_path,
- '-append', kernel_command_line,
- '-plugin', plugin,
- '-d', 'plugin',
- '-D', plugin_log,
- '-net', 'none',
- '-no-reboot')
- if args:
- vm.add_args(*args)
-
- try:
- vm.launch()
- except:
- # TODO: probably fails because plugins not enabled but we
- # can't currently probe for the feature.
- self.cancel("TCG Plugins not enabled?")
-
- self.wait_for_console_pattern(console_pattern, vm)
- # ensure logs are flushed
- vm.shutdown()
-
-
-class PluginKernelNormal(PluginKernelBase):
-
- def _grab_aarch64_kernel(self):
- kernel_url = ('https://storage.tuxboot.com/20230331/arm64/Image')
- kernel_sha256 = 'ce95a7101a5fecebe0fe630deee6bd97b32ba41bc8754090e9ad8961ea8674c7'
- kernel_path = self.fetch_asset(kernel_url,
- asset_hash=kernel_sha256,
- algorithm = "sha256")
- return kernel_path
-
- def test_aarch64_virt_insn(self):
- """
- :avocado: tags=accel:tcg
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:virt
- :avocado: tags=cpu:cortex-a53
- """
- kernel_path = self._grab_aarch64_kernel()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyAMA0')
- console_pattern = 'Kernel panic - not syncing: VFS:'
-
- plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin",
- suffix=".log")
-
- self.run_vm(kernel_path, kernel_command_line,
- "tests/plugin/libinsn.so", plugin_log.name,
- console_pattern)
-
- with plugin_log as lf, \
- mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s:
-
- m = re.search(br"insns: (?P<count>\d+)", s)
- if "count" not in m.groupdict():
- self.fail("Failed to find instruction count")
- else:
- count = int(m.group("count"))
- self.log.info(f"Counted: {count} instructions")
-
-
- def test_aarch64_virt_insn_icount(self):
- """
- :avocado: tags=accel:tcg
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:virt
- :avocado: tags=cpu:cortex-a53
- """
- kernel_path = self._grab_aarch64_kernel()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyAMA0')
- console_pattern = 'Kernel panic - not syncing: VFS:'
-
- plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin",
- suffix=".log")
-
- self.run_vm(kernel_path, kernel_command_line,
- "tests/plugin/libinsn.so", plugin_log.name,
- console_pattern,
- args=('-icount', 'shift=1'))
-
- with plugin_log as lf, \
- mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s:
-
- m = re.search(br"insns: (?P<count>\d+)", s)
- if "count" not in m.groupdict():
- self.fail("Failed to find instruction count")
- else:
- count = int(m.group("count"))
- self.log.info(f"Counted: {count} instructions")
-
- def test_aarch64_virt_mem_icount(self):
- """
- :avocado: tags=accel:tcg
- :avocado: tags=arch:aarch64
- :avocado: tags=machine:virt
- :avocado: tags=cpu:cortex-a53
- """
- kernel_path = self._grab_aarch64_kernel()
- kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
- 'console=ttyAMA0')
- console_pattern = 'Kernel panic - not syncing: VFS:'
-
- plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin",
- suffix=".log")
-
- self.run_vm(kernel_path, kernel_command_line,
- "tests/plugin/libmem.so,inline=true,callback=true", plugin_log.name,
- console_pattern,
- args=('-icount', 'shift=1'))
-
- with plugin_log as lf, \
- mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s:
- m = re.findall(br"mem accesses: (?P<count>\d+)", s)
- if m is None or len(m) != 2:
- self.fail("no memory access counts found")
- else:
- inline = int(m[0])
- callback = int(m[1])
- if inline != callback:
- self.fail("mismatched access counts")
- else:
- self.log.info(f"Counted {inline} memory accesses")
diff --git a/tests/avocado/tesseract_utils.py b/tests/avocado/tesseract_utils.py
deleted file mode 100644
index 476f528..0000000
--- a/tests/avocado/tesseract_utils.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# ...
-#
-# Copyright (c) 2019 Philippe Mathieu-DaudƩ <f4bug@amsat.org>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import re
-import logging
-
-from avocado.utils import process
-from avocado.utils.path import find_command, CmdNotFoundError
-
-def tesseract_available(expected_version):
- try:
- find_command('tesseract')
- except CmdNotFoundError:
- return False
- res = process.run('tesseract --version')
- try:
- version = res.stdout_text.split()[1]
- except IndexError:
- version = res.stderr_text.split()[1]
- return int(version.split('.')[0]) >= expected_version
-
- match = re.match(r'tesseract\s(\d)', res)
- if match is None:
- return False
- # now this is guaranteed to be a digit
- return int(match.groups()[0]) >= expected_version
-
-
-def tesseract_ocr(image_path, tesseract_args='', tesseract_version=3):
- console_logger = logging.getLogger('tesseract')
- console_logger.debug(image_path)
- if tesseract_version == 4:
- tesseract_args += ' --oem 1'
- proc = process.run("tesseract {} {} stdout".format(tesseract_args,
- image_path))
- lines = []
- for line in proc.stdout_text.split('\n'):
- sline = line.strip()
- if len(sline):
- console_logger.debug(sline)
- lines += [sline]
- return lines
diff --git a/tests/avocado/tuxrun_baselines.py b/tests/avocado/tuxrun_baselines.py
deleted file mode 100644
index 736e4aa..0000000
--- a/tests/avocado/tuxrun_baselines.py
+++ /dev/null
@@ -1,620 +0,0 @@
-# Functional test that boots known good tuxboot images the same way
-# that tuxrun (www.tuxrun.org) does. This tool is used by things like
-# the LKFT project to run regression tests on kernels.
-#
-# Copyright (c) 2023 Linaro Ltd.
-#
-# Author:
-# Alex BennƩe <alex.bennee@linaro.org>
-#
-# SPDX-License-Identifier: GPL-2.0-or-later
-
-import os
-import time
-import tempfile
-
-from avocado import skip, skipUnless
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import exec_command, exec_command_and_wait_for_pattern
-from avocado_qemu import wait_for_console_pattern
-from avocado.utils import process
-from avocado.utils.path import find_command
-
-class TuxRunBaselineTest(QemuSystemTest):
- """
- :avocado: tags=accel:tcg
- """
-
- KERNEL_COMMON_COMMAND_LINE = 'printk.time=0'
- # Tests are ~10-40s, allow for --debug/--enable-gcov overhead
- timeout = 100
-
- def get_tag(self, tagname, default=None):
- """
- Get the metadata tag or return the default.
- """
- utag = self._get_unique_tag_val(tagname)
- print(f"{tagname}/{default} -> {utag}")
- if utag:
- return utag
-
- return default
-
- def setUp(self):
- super().setUp()
-
- # We need zstd for all the tuxrun tests
- # See https://github.com/avocado-framework/avocado/issues/5609
- zstd = find_command('zstd', False)
- if zstd is False:
- self.cancel('Could not find "zstd", which is required to '
- 'decompress rootfs')
- self.zstd = zstd
-
- # Process the TuxRun specific tags, most machines work with
- # reasonable defaults but we sometimes need to tweak the
- # config. To avoid open coding everything we store all these
- # details in the metadata for each test.
-
- # The tuxboot tag matches the root directory
- self.tuxboot = self.get_tag('tuxboot')
-
- # Most Linux's use ttyS0 for their serial port
- self.console = self.get_tag('console', "ttyS0")
-
- # Does the machine shutdown QEMU nicely on "halt"
- self.shutdown = self.get_tag('shutdown')
-
- # The name of the kernel Image file
- self.image = self.get_tag('image', "Image")
-
- self.root = self.get_tag('root', "vda")
-
- # Occasionally we need extra devices to hook things up
- self.extradev = self.get_tag('extradev')
-
- self.qemu_img = super().get_qemu_img()
-
- def wait_for_console_pattern(self, success_message, vm=None):
- wait_for_console_pattern(self, success_message,
- failure_message='Kernel panic - not syncing',
- vm=vm)
-
- def fetch_tuxrun_assets(self, csums=None, dt=None):
- """
- Fetch the TuxBoot assets. They are stored in a standard way so we
- use the per-test tags to fetch details.
- """
- base_url = f"https://storage.tuxboot.com/20230331/{self.tuxboot}/"
-
- # empty hash if we weren't passed one
- csums = {} if csums is None else csums
- ksum = csums.get(self.image, None)
- isum = csums.get("rootfs.ext4.zst", None)
-
- kernel_image = self.fetch_asset(base_url + self.image,
- asset_hash = ksum,
- algorithm = "sha256")
- disk_image_zst = self.fetch_asset(base_url + "rootfs.ext4.zst",
- asset_hash = isum,
- algorithm = "sha256")
-
- cmd = f"{self.zstd} -d {disk_image_zst} -o {self.workdir}/rootfs.ext4"
- process.run(cmd)
-
- if dt:
- dsum = csums.get(dt, None)
- dtb = self.fetch_asset(base_url + dt,
- asset_hash = dsum,
- algorithm = "sha256")
- else:
- dtb = None
-
- return (kernel_image, self.workdir + "/rootfs.ext4", dtb)
-
- def prepare_run(self, kernel, disk, drive, dtb=None, console_index=0):
- """
- Setup to run and add the common parameters to the system
- """
- self.vm.set_console(console_index=console_index)
-
- # all block devices are raw ext4's
- blockdev = "driver=raw,file.driver=file," \
- + f"file.filename={disk},node-name=hd0"
-
- kcmd_line = self.KERNEL_COMMON_COMMAND_LINE
- kcmd_line += f" root=/dev/{self.root}"
- kcmd_line += f" console={self.console}"
-
- self.vm.add_args('-kernel', kernel,
- '-append', kcmd_line,
- '-blockdev', blockdev)
-
- # Sometimes we need extra devices attached
- if self.extradev:
- self.vm.add_args('-device', self.extradev)
-
- self.vm.add_args('-device',
- f"{drive},drive=hd0")
-
- # Some machines need an explicit DTB
- if dtb:
- self.vm.add_args('-dtb', dtb)
-
- def run_tuxtest_tests(self, haltmsg):
- """
- Wait for the system to boot up, wait for the login prompt and
- then do a few things on the console. Trigger a shutdown and
- wait to exit cleanly.
- """
- self.wait_for_console_pattern("Welcome to TuxTest")
- time.sleep(0.2)
- exec_command(self, 'root')
- time.sleep(0.2)
- exec_command(self, 'cat /proc/interrupts')
- time.sleep(0.1)
- exec_command(self, 'cat /proc/self/maps')
- time.sleep(0.1)
- exec_command(self, 'uname -a')
- time.sleep(0.1)
- exec_command_and_wait_for_pattern(self, 'halt', haltmsg)
-
- # Wait for VM to shut down gracefully if it can
- if self.shutdown == "nowait":
- self.vm.shutdown()
- else:
- self.vm.wait()
-
- def common_tuxrun(self,
- csums=None,
- dt=None,
- drive="virtio-blk-device",
- haltmsg="reboot: System halted",
- console_index=0):
- """
- Common path for LKFT tests. Unless we need to do something
- special with the command line we can process most things using
- the tag metadata.
- """
- (kernel, disk, dtb) = self.fetch_tuxrun_assets(csums, dt)
-
- self.prepare_run(kernel, disk, drive, dtb, console_index)
- self.vm.launch()
- self.run_tuxtest_tests(haltmsg)
-
- def ppc64_common_tuxrun(self, sums, prefix):
- # add device args to command line.
- self.require_netdev('user')
- self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22',
- '-device', 'virtio-net,netdev=vnet')
- self.vm.add_args('-netdev', '{"type":"user","id":"hostnet0"}',
- '-device', '{"driver":"virtio-net-pci","netdev":'
- '"hostnet0","id":"net0","mac":"52:54:00:4c:e3:86",'
- '"bus":"pci.0","addr":"0x9"}')
- self.vm.add_args('-device', '{"driver":"qemu-xhci","p2":15,"p3":15,'
- '"id":"usb","bus":"pci.0","addr":"0x2"}')
- self.vm.add_args('-device', '{"driver":"virtio-scsi-pci","id":"scsi0"'
- ',"bus":"pci.0","addr":"0x3"}')
- self.vm.add_args('-device', '{"driver":"virtio-serial-pci","id":'
- '"virtio-serial0","bus":"pci.0","addr":"0x4"}')
- self.vm.add_args('-device', '{"driver":"scsi-cd","bus":"scsi0.0"'
- ',"channel":0,"scsi-id":0,"lun":0,"device_id":'
- '"drive-scsi0-0-0-0","id":"scsi0-0-0-0"}')
- self.vm.add_args('-device', '{"driver":"virtio-balloon-pci",'
- '"id":"balloon0","bus":"pci.0","addr":"0x6"}')
- self.vm.add_args('-audiodev', '{"id":"audio1","driver":"none"}')
- self.vm.add_args('-device', '{"driver":"usb-tablet","id":"input0"'
- ',"bus":"usb.0","port":"1"}')
- self.vm.add_args('-device', '{"driver":"usb-kbd","id":"input1"'
- ',"bus":"usb.0","port":"2"}')
- self.vm.add_args('-device', '{"driver":"VGA","id":"video0",'
- '"vgamem_mb":16,"bus":"pci.0","addr":"0x7"}')
- self.vm.add_args('-object', '{"qom-type":"rng-random","id":"objrng0"'
- ',"filename":"/dev/urandom"}',
- '-device', '{"driver":"virtio-rng-pci","rng":"objrng0"'
- ',"id":"rng0","bus":"pci.0","addr":"0x8"}')
- self.vm.add_args('-object', '{"qom-type":"cryptodev-backend-builtin",'
- '"id":"objcrypto0","queues":1}',
- '-device', '{"driver":"virtio-crypto-pci",'
- '"cryptodev":"objcrypto0","id":"crypto0","bus"'
- ':"pci.0","addr":"0xa"}')
- self.vm.add_args('-device', '{"driver":"spapr-pci-host-bridge"'
- ',"index":1,"id":"pci.1"}')
- self.vm.add_args('-device', '{"driver":"spapr-vscsi","id":"scsi1"'
- ',"reg":12288}')
- self.vm.add_args('-m', '2G,slots=32,maxmem=4G',
- '-object', 'memory-backend-ram,id=ram1,size=1G',
- '-device', 'pc-dimm,id=dimm1,memdev=ram1')
-
- # Create a temporary qcow2 and launch the test-case
- with tempfile.NamedTemporaryFile(prefix=prefix,
- suffix='.qcow2') as qcow2:
- process.run(self.qemu_img + ' create -f qcow2 ' +
- qcow2.name + ' 1G')
-
- self.vm.add_args('-drive', 'file=' + qcow2.name +
- ',format=qcow2,if=none,id='
- 'drive-virtio-disk1',
- '-device', 'virtio-blk-pci,bus=pci.0,'
- 'addr=0xb,drive=drive-virtio-disk1,id=virtio-disk1'
- ',bootindex=2')
- self.common_tuxrun(csums=sums, drive="scsi-hd")
-
- #
- # The tests themselves. The configuration is derived from how
- # tuxrun invokes qemu (with minor tweaks like using -blockdev
- # consistently). The tuxrun equivalent is something like:
- #
- # tuxrun --device qemu-{ARCH} \
- # --kernel https://storage.tuxboot.com/{TUXBOOT}/{IMAGE}
- #
-
- def test_arm64(self):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=cpu:cortex-a57
- :avocado: tags=machine:virt
- :avocado: tags=tuxboot:arm64
- :avocado: tags=console:ttyAMA0
- :avocado: tags=shutdown:nowait
- """
- sums = {"Image" :
- "ce95a7101a5fecebe0fe630deee6bd97b32ba41bc8754090e9ad8961ea8674c7",
- "rootfs.ext4.zst" :
- "bbd5ed4b9c7d3f4ca19ba71a323a843c6b585e880115df3b7765769dbd9dd061"}
- self.common_tuxrun(csums=sums)
-
- def test_arm64be(self):
- """
- :avocado: tags=arch:aarch64
- :avocado: tags=cpu:cortex-a57
- :avocado: tags=endian:big
- :avocado: tags=machine:virt
- :avocado: tags=tuxboot:arm64be
- :avocado: tags=console:ttyAMA0
- :avocado: tags=shutdown:nowait
- """
- sums = { "Image" :
- "e0df4425eb2cd9ea9a283e808037f805641c65d8fcecc8f6407d8f4f339561b4",
- "rootfs.ext4.zst" :
- "e6ffd8813c8a335bc15728f2835f90539c84be7f8f5f691a8b01451b47fb4bd7"}
- self.common_tuxrun(csums=sums)
-
- def test_armv5(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=cpu:arm926
- :avocado: tags=machine:versatilepb
- :avocado: tags=tuxboot:armv5
- :avocado: tags=image:zImage
- :avocado: tags=console:ttyAMA0
- :avocado: tags=shutdown:nowait
- """
- sums = { "rootfs.ext4.zst" :
- "17177afa74e7294da0642861f08c88ca3c836764299a54bf6d1ce276cb9712a5",
- "versatile-pb.dtb" :
- "0bc0c0b0858cefd3c32b385c0d66d97142ded29472a496f4f490e42fc7615b25",
- "zImage" :
- "c95af2f27647c12265d75e9df44c22ff5228c59855f54aaa70f41ec2842e3a4d" }
-
- self.common_tuxrun(csums=sums,
- drive="virtio-blk-pci",
- dt="versatile-pb.dtb")
-
- def test_armv7(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=cpu:cortex-a15
- :avocado: tags=machine:virt
- :avocado: tags=tuxboot:armv7
- :avocado: tags=image:zImage
- :avocado: tags=console:ttyAMA0
- :avocado: tags=shutdown:nowait
- """
- sums = { "rootfs.ext4.zst" :
- "ab1fbbeaddda1ffdd45c9405a28cd5370c20f23a7cbc809cc90dc9f243a8eb5a",
- "zImage" :
- "4c7a22e9f15875bec06bd2a29d822496571eb297d4f22694099ffcdb19077572" }
-
- self.common_tuxrun(csums=sums)
-
- def test_armv7be(self):
- """
- :avocado: tags=arch:arm
- :avocado: tags=cpu:cortex-a15
- :avocado: tags=endian:big
- :avocado: tags=machine:virt
- :avocado: tags=tuxboot:armv7be
- :avocado: tags=image:zImage
- :avocado: tags=console:ttyAMA0
- :avocado: tags=shutdown:nowait
- """
- sums = {"rootfs.ext4.zst" :
- "42ed46dd2d59986206c5b1f6cf35eab58fe3fd20c96b41aaa16b32f3f90a9835",
- "zImage" :
- "7facc62082b57af12015b08f7fdbaf2f123ba07a478367853ae12b219afc9f2f" }
-
- self.common_tuxrun(csums=sums)
-
- def test_i386(self):
- """
- :avocado: tags=arch:i386
- :avocado: tags=cpu:coreduo
- :avocado: tags=machine:q35
- :avocado: tags=tuxboot:i386
- :avocado: tags=image:bzImage
- :avocado: tags=shutdown:nowait
- """
- sums = {"bzImage" :
- "a3e5b32a354729e65910f5a1ffcda7c14a6c12a55e8213fb86e277f1b76ed956",
- "rootfs.ext4.zst" :
- "f15e66b2bf673a210ec2a4b2e744a80530b36289e04f5388aab812b97f69754a" }
-
- self.common_tuxrun(csums=sums, drive="virtio-blk-pci")
-
- def test_mips32(self):
- """
- :avocado: tags=arch:mips
- :avocado: tags=machine:malta
- :avocado: tags=cpu:mips32r6-generic
- :avocado: tags=endian:big
- :avocado: tags=tuxboot:mips32
- :avocado: tags=image:vmlinux
- :avocado: tags=root:sda
- :avocado: tags=shutdown:nowait
- """
- sums = { "rootfs.ext4.zst" :
- "fc3da0b4c2f38d74c6d705123bb0f633c76ed953128f9d0859378c328a6d11a0",
- "vmlinux" :
- "bfd2172f8b17fb32970ca0c8c58f59c5a4ca38aa5855d920be3a69b5d16e52f0" }
-
- self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0")
-
- def test_mips32el(self):
- """
- :avocado: tags=arch:mipsel
- :avocado: tags=machine:malta
- :avocado: tags=cpu:mips32r6-generic
- :avocado: tags=tuxboot:mips32el
- :avocado: tags=image:vmlinux
- :avocado: tags=root:sda
- :avocado: tags=shutdown:nowait
- """
- sums = { "rootfs.ext4.zst" :
- "e799768e289fd69209c21f4dacffa11baea7543d5db101e8ce27e3bc2c41d90e",
- "vmlinux" :
- "8573867c68a8443db8de6d08bb33fb291c189ca2ca671471d3973a3e712096a3" }
-
- self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0")
-
- def test_mips64(self):
- """
- :avocado: tags=arch:mips64
- :avocado: tags=machine:malta
- :avocado: tags=tuxboot:mips64
- :avocado: tags=endian:big
- :avocado: tags=image:vmlinux
- :avocado: tags=root:sda
- :avocado: tags=shutdown:nowait
- """
- sums = { "rootfs.ext4.zst" :
- "69d91eeb04df3d8d172922c6993bb37d4deeb6496def75d8580f6f9de3e431da",
- "vmlinux" :
- "09010e51e4b8bcbbd2494786ffb48eca78f228e96e5c5438344b0eac4029dc61" }
-
- self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0")
-
- def test_mips64el(self):
- """
- :avocado: tags=arch:mips64el
- :avocado: tags=machine:malta
- :avocado: tags=tuxboot:mips64el
- :avocado: tags=image:vmlinux
- :avocado: tags=root:sda
- :avocado: tags=shutdown:nowait
- """
- sums = { "rootfs.ext4.zst" :
- "fba585368f5915b1498ed081863474b2d7ec4e97cdd46d21bdcb2f9698f83de4",
- "vmlinux" :
- "d4e08965e2155c4cccce7c5f34d18fe34c636cda2f2c9844387d614950155266" }
-
- self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0")
-
- def test_ppc32(self):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=machine:ppce500
- :avocado: tags=cpu:e500mc
- :avocado: tags=tuxboot:ppc32
- :avocado: tags=image:uImage
- :avocado: tags=shutdown:nowait
- """
- sums = { "rootfs.ext4.zst" :
- "8885b9d999cc24d679542a02e9b6aaf48f718f2050ece6b8347074b6ee41dd09",
- "uImage" :
- "1a68f74b860fda022fb12e03c5efece8c2b8b590d96cca37a8481a3ae0b3f81f" }
-
- self.common_tuxrun(csums=sums, drive="virtio-blk-pci")
-
- def test_ppc64(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:pseries
- :avocado: tags=cpu:POWER10
- :avocado: tags=endian:big
- :avocado: tags=console:hvc0
- :avocado: tags=tuxboot:ppc64
- :avocado: tags=image:vmlinux
- :avocado: tags=extradev:driver=spapr-vscsi
- :avocado: tags=root:sda
- """
- sums = { "rootfs.ext4.zst" :
- "1d953e81a4379e537fc8e41e05a0a59d9b453eef97aa03d47866c6c45b00bdff",
- "vmlinux" :
- "f22a9b9e924174a4c199f4c7e5d91a2339fcfe51c6eafd0907dc3e09b64ab728" }
- self.ppc64_common_tuxrun(sums, prefix='tuxrun_ppc64_')
-
- def test_ppc64le(self):
- """
- :avocado: tags=arch:ppc64
- :avocado: tags=machine:pseries
- :avocado: tags=cpu:POWER10
- :avocado: tags=console:hvc0
- :avocado: tags=tuxboot:ppc64le
- :avocado: tags=image:vmlinux
- :avocado: tags=extradev:driver=spapr-vscsi
- :avocado: tags=root:sda
- """
- sums = { "rootfs.ext4.zst" :
- "b442678c93fb8abe1f7d3bfa20556488de6b475c22c8fed363f42cf81a0a3906",
- "vmlinux" :
- "979eb61b445a010fb13e2b927126991f8ceef9c590fa2be0996c00e293e80cf2" }
- self.ppc64_common_tuxrun(sums, prefix='tuxrun_ppc64le_')
-
- def test_riscv32(self):
- """
- :avocado: tags=arch:riscv32
- :avocado: tags=machine:virt
- :avocado: tags=tuxboot:riscv32
- """
- sums = { "Image" :
- "89599407d7334de629a40e7ad6503c73670359eb5f5ae9d686353a3d6deccbd5",
- "fw_jump.elf" :
- "f2ef28a0b77826f79d085d3e4aa686f1159b315eff9099a37046b18936676985",
- "rootfs.ext4.zst" :
- "7168d296d0283238ea73cd5a775b3dd608e55e04c7b92b76ecce31bb13108cba" }
-
- self.common_tuxrun(csums=sums)
-
- def test_riscv64(self):
- """
- :avocado: tags=arch:riscv64
- :avocado: tags=machine:virt
- :avocado: tags=tuxboot:riscv64
- """
- sums = { "Image" :
- "cd634badc65e52fb63465ec99e309c0de0369f0841b7d9486f9729e119bac25e",
- "fw_jump.elf" :
- "6e3373abcab4305fe151b564a4c71110d833c21f2c0a1753b7935459e36aedcf",
- "rootfs.ext4.zst" :
- "b18e3a3bdf27be03da0b285e84cb71bf09eca071c3a087b42884b6982ed679eb" }
-
- self.common_tuxrun(csums=sums)
-
- def test_riscv32_maxcpu(self):
- """
- :avocado: tags=arch:riscv32
- :avocado: tags=machine:virt
- :avocado: tags=cpu:max
- :avocado: tags=tuxboot:riscv32
- """
- sums = { "Image" :
- "89599407d7334de629a40e7ad6503c73670359eb5f5ae9d686353a3d6deccbd5",
- "fw_jump.elf" :
- "f2ef28a0b77826f79d085d3e4aa686f1159b315eff9099a37046b18936676985",
- "rootfs.ext4.zst" :
- "7168d296d0283238ea73cd5a775b3dd608e55e04c7b92b76ecce31bb13108cba" }
-
- self.common_tuxrun(csums=sums)
-
- def test_riscv64_maxcpu(self):
- """
- :avocado: tags=arch:riscv64
- :avocado: tags=machine:virt
- :avocado: tags=cpu:max
- :avocado: tags=tuxboot:riscv64
- """
- sums = { "Image" :
- "cd634badc65e52fb63465ec99e309c0de0369f0841b7d9486f9729e119bac25e",
- "fw_jump.elf" :
- "6e3373abcab4305fe151b564a4c71110d833c21f2c0a1753b7935459e36aedcf",
- "rootfs.ext4.zst" :
- "b18e3a3bdf27be03da0b285e84cb71bf09eca071c3a087b42884b6982ed679eb" }
-
- self.common_tuxrun(csums=sums)
-
- def test_s390(self):
- """
- :avocado: tags=arch:s390x
- :avocado: tags=endian:big
- :avocado: tags=tuxboot:s390
- :avocado: tags=image:bzImage
- :avocado: tags=shutdown:nowait
- """
- sums = { "bzImage" :
- "0414e98dd1c3dafff8496c9cd9c28a5f8d04553bb5ba37e906a812b48d442ef0",
- "rootfs.ext4.zst" :
- "88c37c32276677f873a25ab9ec6247895b8e3e6f8259134de2a616080b8ab3fc" }
-
- self.common_tuxrun(csums=sums,
- drive="virtio-blk-ccw",
- haltmsg="Requesting system halt")
-
- # Note: some segfaults caused by unaligned userspace access
- @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
- def test_sh4(self):
- """
- :avocado: tags=arch:sh4
- :avocado: tags=machine:r2d
- :avocado: tags=cpu:sh7785
- :avocado: tags=tuxboot:sh4
- :avocado: tags=image:zImage
- :avocado: tags=root:sda
- :avocado: tags=console:ttySC1
- :avocado: tags=flaky
- """
- sums = { "rootfs.ext4.zst" :
- "3592a7a3d5a641e8b9821449e77bc43c9904a56c30d45da0694349cfd86743fd",
- "zImage" :
- "29d9b2aba604a0f53a5dc3b5d0f2b8e35d497de1129f8ee5139eb6fdf0db692f" }
-
- # The test is currently too unstable to do much in userspace
- # so we skip common_tuxrun and do a minimal boot and shutdown.
- (kernel, disk, dtb) = self.fetch_tuxrun_assets(csums=sums)
-
- # the console comes on the second serial port
- self.prepare_run(kernel, disk,
- "driver=ide-hd,bus=ide.0,unit=0",
- console_index=1)
- self.vm.launch()
-
- self.wait_for_console_pattern("Welcome to TuxTest")
- time.sleep(0.1)
- exec_command(self, 'root')
- time.sleep(0.1)
- exec_command_and_wait_for_pattern(self, 'halt',
- "reboot: System halted")
-
- def test_sparc64(self):
- """
- :avocado: tags=arch:sparc64
- :avocado: tags=tuxboot:sparc64
- :avocado: tags=image:vmlinux
- :avocado: tags=root:sda
- :avocado: tags=shutdown:nowait
- """
-
- sums = { "rootfs.ext4.zst" :
- "ad2f1dc436ab51583543d25d2c210cab478645d47078d30d129a66ab0e281d76",
- "vmlinux" :
- "e34313e4325ff21deaa3d38a502aa09a373ef62b9bd4d7f8f29388b688225c55" }
-
- self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0")
-
- def test_x86_64(self):
- """
- :avocado: tags=arch:x86_64
- :avocado: tags=machine:q35
- :avocado: tags=cpu:Nehalem
- :avocado: tags=tuxboot:x86_64
- :avocado: tags=image:bzImage
- :avocado: tags=root:sda
- :avocado: tags=shutdown:nowait
- """
- sums = { "bzImage" :
- "2bc7480a669ee9b6b82500a236aba0c54233debe98cb968268fa230f52f03461",
- "rootfs.ext4.zst" :
- "b72ac729769b8f51c6dffb221113c9a063c774dbe1d66af30eb593c4e9999b4b" }
-
- self.common_tuxrun(csums=sums,
- drive="driver=ide-hd,bus=ide.0,unit=0")
diff --git a/tests/avocado/version.py b/tests/avocado/version.py
deleted file mode 100644
index c613956..0000000
--- a/tests/avocado/version.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Version check example test
-#
-# Copyright (c) 2018 Red Hat, Inc.
-#
-# Author:
-# Cleber Rosa <crosa@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-
-from avocado_qemu import QemuSystemTest
-
-
-class Version(QemuSystemTest):
- """
- :avocado: tags=quick
- :avocado: tags=machine:none
- """
- def test_qmp_human_info_version(self):
- self.vm.add_args('-nodefaults')
- self.vm.launch()
- res = self.vm.cmd('human-monitor-command',
- command_line='info version')
- self.assertRegex(res, r'^(\d+\.\d+\.\d)')
diff --git a/tests/avocado/virtio-gpu.py b/tests/avocado/virtio-gpu.py
deleted file mode 100644
index 6091f61..0000000
--- a/tests/avocado/virtio-gpu.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# virtio-gpu tests
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-
-from avocado_qemu import BUILD_DIR
-from avocado_qemu import QemuSystemTest
-from avocado_qemu import wait_for_console_pattern
-from avocado_qemu import exec_command_and_wait_for_pattern
-from avocado_qemu import is_readable_executable_file
-
-from qemu.utils import kvm_available
-
-import os
-import socket
-import subprocess
-
-
-def pick_default_vug_bin():
- relative_path = "./contrib/vhost-user-gpu/vhost-user-gpu"
- if is_readable_executable_file(relative_path):
- return relative_path
-
- bld_dir_path = os.path.join(BUILD_DIR, relative_path)
- if is_readable_executable_file(bld_dir_path):
- return bld_dir_path
-
-
-class VirtioGPUx86(QemuSystemTest):
- """
- :avocado: tags=virtio-gpu
- :avocado: tags=arch:x86_64
- :avocado: tags=cpu:host
- """
-
- KERNEL_COMMAND_LINE = "printk.time=0 console=ttyS0 rdinit=/bin/bash"
- KERNEL_URL = (
- "https://archives.fedoraproject.org/pub/archive/fedora"
- "/linux/releases/33/Everything/x86_64/os/images"
- "/pxeboot/vmlinuz"
- )
- KERNEL_HASH = '1433cfe3f2ffaa44de4ecfb57ec25dc2399cdecf'
- INITRD_URL = (
- "https://archives.fedoraproject.org/pub/archive/fedora"
- "/linux/releases/33/Everything/x86_64/os/images"
- "/pxeboot/initrd.img"
- )
- INITRD_HASH = 'c828d68a027b53e5220536585efe03412332c2d9'
-
- def wait_for_console_pattern(self, success_message, vm=None):
- wait_for_console_pattern(
- self,
- success_message,
- failure_message="Kernel panic - not syncing",
- vm=vm,
- )
-
- def test_virtio_vga_virgl(self):
- """
- :avocado: tags=device:virtio-vga-gl
- """
- # FIXME: should check presence of virtio, virgl etc
- self.require_accelerator('kvm')
-
- kernel_path = self.fetch_asset(self.KERNEL_URL, self.KERNEL_HASH)
- initrd_path = self.fetch_asset(self.INITRD_URL, self.INITRD_HASH)
-
- self.vm.set_console()
- self.vm.add_args("-m", "2G")
- self.vm.add_args("-machine", "pc,accel=kvm")
- self.vm.add_args("-device", "virtio-vga-gl")
- self.vm.add_args("-display", "egl-headless")
- self.vm.add_args(
- "-kernel",
- kernel_path,
- "-initrd",
- initrd_path,
- "-append",
- self.KERNEL_COMMAND_LINE,
- )
- try:
- self.vm.launch()
- except:
- # TODO: probably fails because we are missing the VirGL features
- self.cancel("VirGL not enabled?")
-
- self.wait_for_console_pattern("as init process")
- exec_command_and_wait_for_pattern(
- self, "/usr/sbin/modprobe virtio_gpu", ""
- )
- self.wait_for_console_pattern("features: +virgl +edid")
-
- def test_vhost_user_vga_virgl(self):
- """
- :avocado: tags=device:vhost-user-vga
- """
- # FIXME: should check presence of vhost-user-gpu, virgl, memfd etc
- self.require_accelerator('kvm')
-
- vug = pick_default_vug_bin()
- if not vug:
- self.cancel("Could not find vhost-user-gpu")
-
- kernel_path = self.fetch_asset(self.KERNEL_URL, self.KERNEL_HASH)
- initrd_path = self.fetch_asset(self.INITRD_URL, self.INITRD_HASH)
-
- # Create socketpair to connect proxy and remote processes
- qemu_sock, vug_sock = socket.socketpair(
- socket.AF_UNIX, socket.SOCK_STREAM
- )
- os.set_inheritable(qemu_sock.fileno(), True)
- os.set_inheritable(vug_sock.fileno(), True)
-
- self._vug_log_path = os.path.join(
- self.logdir, "vhost-user-gpu.log"
- )
- self._vug_log_file = open(self._vug_log_path, "wb")
- self.log.info('Complete vhost-user-gpu.log file can be '
- 'found at %s', self._vug_log_path)
-
- vugp = subprocess.Popen(
- [vug, "--virgl", "--fd=%d" % vug_sock.fileno()],
- stdin=subprocess.DEVNULL,
- stdout=self._vug_log_file,
- stderr=subprocess.STDOUT,
- shell=False,
- close_fds=False,
- )
-
- self.vm.set_console()
- self.vm.add_args("-m", "2G")
- self.vm.add_args("-object", "memory-backend-memfd,id=mem,size=2G")
- self.vm.add_args("-machine", "pc,memory-backend=mem,accel=kvm")
- self.vm.add_args("-chardev", "socket,id=vug,fd=%d" % qemu_sock.fileno())
- self.vm.add_args("-device", "vhost-user-vga,chardev=vug")
- self.vm.add_args("-display", "egl-headless")
- self.vm.add_args(
- "-kernel",
- kernel_path,
- "-initrd",
- initrd_path,
- "-append",
- self.KERNEL_COMMAND_LINE,
- )
- try:
- self.vm.launch()
- except:
- # TODO: probably fails because we are missing the VirGL features
- self.cancel("VirGL not enabled?")
- self.wait_for_console_pattern("as init process")
- exec_command_and_wait_for_pattern(self, "/usr/sbin/modprobe virtio_gpu",
- "features: +virgl +edid")
- self.vm.shutdown()
- qemu_sock.close()
- vugp.terminate()
- vugp.wait()
diff --git a/tests/avocado/virtio_version.py b/tests/avocado/virtio_version.py
deleted file mode 100644
index afe5e82..0000000
--- a/tests/avocado/virtio_version.py
+++ /dev/null
@@ -1,175 +0,0 @@
-"""
-Check compatibility of virtio device types
-"""
-# Copyright (c) 2018 Red Hat, Inc.
-#
-# Author:
-# Eduardo Habkost <ehabkost@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-import sys
-import os
-
-from qemu.machine import QEMUMachine
-from avocado_qemu import QemuSystemTest
-
-# Virtio Device IDs:
-VIRTIO_NET = 1
-VIRTIO_BLOCK = 2
-VIRTIO_CONSOLE = 3
-VIRTIO_RNG = 4
-VIRTIO_BALLOON = 5
-VIRTIO_RPMSG = 7
-VIRTIO_SCSI = 8
-VIRTIO_9P = 9
-VIRTIO_RPROC_SERIAL = 11
-VIRTIO_CAIF = 12
-VIRTIO_GPU = 16
-VIRTIO_INPUT = 18
-VIRTIO_VSOCK = 19
-VIRTIO_CRYPTO = 20
-
-PCI_VENDOR_ID_REDHAT_QUMRANET = 0x1af4
-
-# Device IDs for legacy/transitional devices:
-PCI_LEGACY_DEVICE_IDS = {
- VIRTIO_NET: 0x1000,
- VIRTIO_BLOCK: 0x1001,
- VIRTIO_BALLOON: 0x1002,
- VIRTIO_CONSOLE: 0x1003,
- VIRTIO_SCSI: 0x1004,
- VIRTIO_RNG: 0x1005,
- VIRTIO_9P: 0x1009,
- VIRTIO_VSOCK: 0x1012,
-}
-
-def pci_modern_device_id(virtio_devid):
- return virtio_devid + 0x1040
-
-def devtype_implements(vm, devtype, implements):
- return devtype in [d['name'] for d in
- vm.cmd('qom-list-types', implements=implements)]
-
-def get_pci_interfaces(vm, devtype):
- interfaces = ('pci-express-device', 'conventional-pci-device')
- return [i for i in interfaces if devtype_implements(vm, devtype, i)]
-
-class VirtioVersionCheck(QemuSystemTest):
- """
- Check if virtio-version-specific device types result in the
- same device tree created by `disable-modern` and
- `disable-legacy`.
-
- :avocado: tags=arch:x86_64
- """
-
- # just in case there are failures, show larger diff:
- maxDiff = 4096
-
- def run_device(self, devtype, opts=None, machine='pc'):
- """
- Run QEMU with `-device DEVTYPE`, return device info from `query-pci`
- """
- with QEMUMachine(self.qemu_bin) as vm:
- vm.set_machine(machine)
- if opts:
- devtype += ',' + opts
- vm.add_args('-device', '%s,id=devfortest' % (devtype))
- vm.add_args('-S')
- vm.launch()
-
- pcibuses = vm.cmd('query-pci')
- alldevs = [dev for bus in pcibuses for dev in bus['devices']]
- devfortest = [dev for dev in alldevs
- if dev['qdev_id'] == 'devfortest']
- return devfortest[0], get_pci_interfaces(vm, devtype)
-
-
- def assert_devids(self, dev, devid, non_transitional=False):
- self.assertEqual(dev['id']['vendor'], PCI_VENDOR_ID_REDHAT_QUMRANET)
- self.assertEqual(dev['id']['device'], devid)
- if non_transitional:
- self.assertTrue(0x1040 <= dev['id']['device'] <= 0x107f)
- self.assertGreaterEqual(dev['id']['subsystem'], 0x40)
-
- def check_all_variants(self, qemu_devtype, virtio_devid):
- """Check if a virtio device type and its variants behave as expected"""
- # Force modern mode:
- dev_modern, _ = self.run_device(qemu_devtype,
- 'disable-modern=off,disable-legacy=on')
- self.assert_devids(dev_modern, pci_modern_device_id(virtio_devid),
- non_transitional=True)
-
- # <prefix>-non-transitional device types should be 100% equivalent to
- # <prefix>,disable-modern=off,disable-legacy=on
- dev_1_0, nt_ifaces = self.run_device('%s-non-transitional' % (qemu_devtype))
- self.assertEqual(dev_modern, dev_1_0)
-
- # Force transitional mode:
- dev_trans, _ = self.run_device(qemu_devtype,
- 'disable-modern=off,disable-legacy=off')
- self.assert_devids(dev_trans, PCI_LEGACY_DEVICE_IDS[virtio_devid])
-
- # Force legacy mode:
- dev_legacy, _ = self.run_device(qemu_devtype,
- 'disable-modern=on,disable-legacy=off')
- self.assert_devids(dev_legacy, PCI_LEGACY_DEVICE_IDS[virtio_devid])
-
- # No options: default to transitional on PC machine-type:
- no_opts_pc, generic_ifaces = self.run_device(qemu_devtype)
- self.assertEqual(dev_trans, no_opts_pc)
-
- #TODO: check if plugging on a PCI Express bus will make the
- # device non-transitional
- #no_opts_q35 = self.run_device(qemu_devtype, machine='q35')
- #self.assertEqual(dev_modern, no_opts_q35)
-
- # <prefix>-transitional device types should be 100% equivalent to
- # <prefix>,disable-modern=off,disable-legacy=off
- dev_trans, trans_ifaces = self.run_device('%s-transitional' % (qemu_devtype))
- self.assertEqual(dev_trans, dev_trans)
-
- # ensure the interface information is correct:
- self.assertIn('conventional-pci-device', generic_ifaces)
- self.assertIn('pci-express-device', generic_ifaces)
-
- self.assertIn('conventional-pci-device', nt_ifaces)
- self.assertIn('pci-express-device', nt_ifaces)
-
- self.assertIn('conventional-pci-device', trans_ifaces)
- self.assertNotIn('pci-express-device', trans_ifaces)
-
-
- def test_conventional_devs(self):
- self.check_all_variants('virtio-net-pci', VIRTIO_NET)
- # virtio-blk requires 'driver' parameter
- #self.check_all_variants('virtio-blk-pci', VIRTIO_BLOCK)
- self.check_all_variants('virtio-serial-pci', VIRTIO_CONSOLE)
- self.check_all_variants('virtio-rng-pci', VIRTIO_RNG)
- self.check_all_variants('virtio-balloon-pci', VIRTIO_BALLOON)
- self.check_all_variants('virtio-scsi-pci', VIRTIO_SCSI)
- # virtio-9p requires 'fsdev' parameter
- #self.check_all_variants('virtio-9p-pci', VIRTIO_9P)
-
- def check_modern_only(self, qemu_devtype, virtio_devid):
- """Check if a modern-only virtio device type behaves as expected"""
- # Force modern mode:
- dev_modern, _ = self.run_device(qemu_devtype,
- 'disable-modern=off,disable-legacy=on')
- self.assert_devids(dev_modern, pci_modern_device_id(virtio_devid),
- non_transitional=True)
-
- # No options: should be modern anyway
- dev_no_opts, ifaces = self.run_device(qemu_devtype)
- self.assertEqual(dev_modern, dev_no_opts)
-
- self.assertIn('conventional-pci-device', ifaces)
- self.assertIn('pci-express-device', ifaces)
-
- def test_modern_only_devs(self):
- self.check_modern_only('virtio-vga', VIRTIO_GPU)
- self.check_modern_only('virtio-gpu-pci', VIRTIO_GPU)
- self.check_modern_only('virtio-mouse-pci', VIRTIO_INPUT)
- self.check_modern_only('virtio-tablet-pci', VIRTIO_INPUT)
- self.check_modern_only('virtio-keyboard-pci', VIRTIO_INPUT)
diff --git a/tests/avocado/virtiofs_submounts.py.data/cleanup.sh b/tests/avocado/virtiofs_submounts.py.data/cleanup.sh
deleted file mode 100644
index 2a6579a..0000000
--- a/tests/avocado/virtiofs_submounts.py.data/cleanup.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-function print_usage()
-{
- if [ -n "$2" ]; then
- echo "Error: $2"
- echo
- fi
- echo "Usage: $1 <scratch dir>"
-}
-
-scratch_dir=$1
-if [ -z "$scratch_dir" ]; then
- print_usage "$0" 'Scratch dir not given' >&2
- exit 1
-fi
-
-cd "$scratch_dir/share" || exit 1
-mps=(mnt*)
-mp_i=0
-for mp in "${mps[@]}"; do
- mp_i=$((mp_i + 1))
- printf "Unmounting %i/%i...\r" "$mp_i" "${#mps[@]}"
-
- sudo umount -R "$mp"
- rm -rf "$mp"
-done
-echo
-
-rm some-file
-cd ..
-rmdir share
-
-imgs=(fs*.img)
-img_i=0
-for img in "${imgs[@]}"; do
- img_i=$((img_i + 1))
- printf "Detaching and deleting %i/%i...\r" "$img_i" "${#imgs[@]}"
-
- dev=$(losetup -j "$img" | sed -e 's/:.*//')
- sudo losetup -d "$dev"
- rm -f "$img"
-done
-echo
-
-echo 'Done.'
diff --git a/tests/avocado/virtiofs_submounts.py.data/guest-cleanup.sh b/tests/avocado/virtiofs_submounts.py.data/guest-cleanup.sh
deleted file mode 100644
index 729cb2d..0000000
--- a/tests/avocado/virtiofs_submounts.py.data/guest-cleanup.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-function print_usage()
-{
- if [ -n "$2" ]; then
- echo "Error: $2"
- echo
- fi
- echo "Usage: $1 <scratch dir>"
-}
-
-scratch_dir=$1
-if [ -z "$scratch_dir" ]; then
- print_usage "$0" 'Scratch dir not given' >&2
- exit 1
-fi
-
-cd "$scratch_dir/share" || exit 1
-
-mps=(mnt*)
-mp_i=0
-for mp in "${mps[@]}"; do
- mp_i=$((mp_i + 1))
- printf "Unmounting %i/%i...\r" "$mp_i" "${#mps[@]}"
-
- sudo umount -R "$mp"
-done
-echo
-
-echo 'Done.'
diff --git a/tests/avocado/virtiofs_submounts.py.data/guest.sh b/tests/avocado/virtiofs_submounts.py.data/guest.sh
deleted file mode 100644
index 59ba40f..0000000
--- a/tests/avocado/virtiofs_submounts.py.data/guest.sh
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/bin/bash
-
-function print_usage()
-{
- if [ -n "$2" ]; then
- echo "Error: $2"
- echo
- fi
- echo "Usage: $1 <shared dir>"
- echo '(The shared directory is the "share" directory in the scratch' \
- 'directory)'
-}
-
-shared_dir=$1
-if [ -z "$shared_dir" ]; then
- print_usage "$0" 'Shared dir not given' >&2
- exit 1
-fi
-
-cd "$shared_dir"
-
-# FIXME: This should not be necessary, but it is. In order for all
-# submounts to be proper mount points, we need to visit them.
-# (Before we visit them, they will not be auto-mounted, and so just
-# appear as normal directories, with the catch that their st_ino will
-# be the st_ino of the filesystem they host, while the st_dev will
-# still be the st_dev of the parent.)
-# `find` does not work, because it will refuse to touch the mount
-# points as long as they are not mounted; their st_dev being shared
-# with the parent and st_ino just being the root node's inode ID
-# will practically ensure that this node exists elsewhere on the
-# filesystem, and `find` is required to recognize loops and not to
-# follow them.
-# Thus, we have to manually visit all nodes first.
-
-mnt_i=0
-
-function recursively_visit()
-{
- pushd "$1" >/dev/null
- for entry in *; do
- if [[ "$entry" == mnt* ]]; then
- mnt_i=$((mnt_i + 1))
- printf "Triggering auto-mount $mnt_i...\r"
- fi
-
- if [ -d "$entry" ]; then
- recursively_visit "$entry"
- fi
- done
- popd >/dev/null
-}
-
-recursively_visit .
-echo
-
-
-if [ -n "$(find -name not-mounted)" ]; then
- echo "Error: not-mounted files visible on mount points:" >&2
- find -name not-mounted >&2
- exit 1
-fi
-
-if [ ! -f some-file -o "$(cat some-file)" != 'root' ]; then
- echo "Error: Bad file in the share root" >&2
- exit 1
-fi
-
-shopt -s nullglob
-
-function check_submounts()
-{
- local base_path=$1
-
- for mp in mnt*; do
- printf "Checking submount %i...\r" "$((${#devs[@]} + 1))"
-
- mp_i=$(echo "$mp" | sed -e 's/mnt//')
- dev=$(stat -c '%D' "$mp")
-
- if [ -n "${devs[mp_i]}" ]; then
- echo "Error: $mp encountered twice" >&2
- exit 1
- fi
- devs[mp_i]=$dev
-
- pushd "$mp" >/dev/null
- path="$base_path$mp"
- while true; do
- expected_content="$(printf '%s\n%s\n' "$mp_i" "$path")"
- if [ ! -f some-file ]; then
- echo "Error: $PWD/some-file does not exist" >&2
- exit 1
- fi
-
- if [ "$(cat some-file)" != "$expected_content" ]; then
- echo "Error: Bad content in $PWD/some-file:" >&2
- echo '--- found ---'
- cat some-file
- echo '--- expected ---'
- echo "$expected_content"
- exit 1
- fi
- if [ "$(stat -c '%D' some-file)" != "$dev" ]; then
- echo "Error: $PWD/some-file has the wrong device ID" >&2
- exit 1
- fi
-
- if [ -d sub ]; then
- if [ "$(stat -c '%D' sub)" != "$dev" ]; then
- echo "Error: $PWD/some-file has the wrong device ID" >&2
- exit 1
- fi
- cd sub
- path="$path/sub"
- else
- if [ -n "$(echo mnt*)" ]; then
- check_submounts "$path/"
- fi
- break
- fi
- done
- popd >/dev/null
- done
-}
-
-root_dev=$(stat -c '%D' some-file)
-devs=()
-check_submounts ''
-echo
-
-reused_devs=$(echo "$root_dev ${devs[@]}" | tr ' ' '\n' | sort | uniq -d)
-if [ -n "$reused_devs" ]; then
- echo "Error: Reused device IDs: $reused_devs" >&2
- exit 1
-fi
-
-echo "Test passed for ${#devs[@]} submounts."
diff --git a/tests/avocado/virtiofs_submounts.py.data/host.sh b/tests/avocado/virtiofs_submounts.py.data/host.sh
deleted file mode 100644
index d8a9afe..0000000
--- a/tests/avocado/virtiofs_submounts.py.data/host.sh
+++ /dev/null
@@ -1,127 +0,0 @@
-#!/bin/bash
-
-mount_count=128
-
-function print_usage()
-{
- if [ -n "$2" ]; then
- echo "Error: $2"
- echo
- fi
- echo "Usage: $1 <scratch dir> [seed]"
- echo "(If no seed is given, it will be randomly generated.)"
-}
-
-scratch_dir=$1
-if [ -z "$scratch_dir" ]; then
- print_usage "$0" 'No scratch dir given' >&2
- exit 1
-fi
-
-if [ ! -d "$scratch_dir" ]; then
- print_usage "$0" "$scratch_dir is not a directory" >&2
- exit 1
-fi
-
-seed=$2
-if [ -z "$seed" ]; then
- seed=$RANDOM
-fi
-RANDOM=$seed
-
-echo "Seed: $seed"
-
-set -e
-shopt -s nullglob
-
-cd "$scratch_dir"
-if [ -d share ]; then
- echo 'Error: This directory seems to be in use already' >&2
- exit 1
-fi
-
-for ((i = 0; i < $mount_count; i++)); do
- printf "Setting up fs %i/%i...\r" "$((i + 1))" "$mount_count"
-
- rm -f fs$i.img
- truncate -s 512M fs$i.img
- mkfs.xfs -q fs$i.img
- devs[i]=$(sudo losetup -f --show fs$i.img)
-done
-echo
-
-top_level_mounts=$((RANDOM % mount_count + 1))
-
-mkdir -p share
-echo 'root' > share/some-file
-
-for ((i = 0; i < $top_level_mounts; i++)); do
- printf "Mounting fs %i/%i...\r" "$((i + 1))" "$mount_count"
-
- mkdir -p share/mnt$i
- touch share/mnt$i/not-mounted
- sudo mount "${devs[i]}" share/mnt$i
- sudo chown "$(id -u):$(id -g)" share/mnt$i
-
- pushd share/mnt$i >/dev/null
- path=mnt$i
- nesting=$((RANDOM % 4))
- for ((j = 0; j < $nesting; j++)); do
- cat > some-file <<EOF
-$i
-$path
-EOF
- mkdir sub
- cd sub
- path="$path/sub"
- done
-cat > some-file <<EOF
-$i
-$path
-EOF
- popd >/dev/null
-done
-
-for ((; i < $mount_count; i++)); do
- printf "Mounting fs %i/%i...\r" "$((i + 1))" "$mount_count"
-
- mp_i=$((i % top_level_mounts))
-
- pushd share/mnt$mp_i >/dev/null
- path=mnt$mp_i
- while true; do
- sub_mp="$(echo mnt*)"
- if cd sub 2>/dev/null; then
- path="$path/sub"
- elif [ -n "$sub_mp" ] && cd "$sub_mp" 2>/dev/null; then
- path="$path/$sub_mp"
- else
- break
- fi
- done
- mkdir mnt$i
- touch mnt$i/not-mounted
- sudo mount "${devs[i]}" mnt$i
- sudo chown "$(id -u):$(id -g)" mnt$i
-
- cd mnt$i
- path="$path/mnt$i"
- nesting=$((RANDOM % 4))
- for ((j = 0; j < $nesting; j++)); do
- cat > some-file <<EOF
-$i
-$path
-EOF
- mkdir sub
- cd sub
- path="$path/sub"
- done
- cat > some-file <<EOF
-$i
-$path
-EOF
- popd >/dev/null
-done
-echo
-
-echo 'Done.'
diff --git a/tests/avocado/vnc.py b/tests/avocado/vnc.py
deleted file mode 100644
index 862c899..0000000
--- a/tests/avocado/vnc.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Simple functional tests for VNC functionality
-#
-# Copyright (c) 2018 Red Hat, Inc.
-#
-# Author:
-# Cleber Rosa <crosa@redhat.com>
-#
-# This work is licensed under the terms of the GNU GPL, version 2 or
-# later. See the COPYING file in the top-level directory.
-
-import socket
-from typing import List
-
-from avocado_qemu import QemuSystemTest
-
-
-VNC_ADDR = '127.0.0.1'
-VNC_PORT_START = 32768
-VNC_PORT_END = VNC_PORT_START + 1024
-
-
-def check_bind(port: int) -> bool:
- with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
- try:
- sock.bind((VNC_ADDR, port))
- except OSError:
- return False
-
- return True
-
-
-def check_connect(port: int) -> bool:
- with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
- try:
- sock.connect((VNC_ADDR, port))
- except ConnectionRefusedError:
- return False
-
- return True
-
-
-def find_free_ports(count: int) -> List[int]:
- result = []
- for port in range(VNC_PORT_START, VNC_PORT_END):
- if check_bind(port):
- result.append(port)
- if len(result) >= count:
- break
- assert len(result) == count
- return result
-
-
-class Vnc(QemuSystemTest):
- """
- :avocado: tags=vnc,quick
- :avocado: tags=machine:none
- """
- def test_no_vnc(self):
- self.vm.add_args('-nodefaults', '-S')
- self.vm.launch()
- self.assertFalse(self.vm.qmp('query-vnc')['return']['enabled'])
-
- def test_no_vnc_change_password(self):
- self.vm.add_args('-nodefaults', '-S')
- self.vm.launch()
- self.assertFalse(self.vm.qmp('query-vnc')['return']['enabled'])
- set_password_response = self.vm.qmp('change-vnc-password',
- password='new_password')
- self.assertIn('error', set_password_response)
- self.assertEqual(set_password_response['error']['class'],
- 'GenericError')
- self.assertEqual(set_password_response['error']['desc'],
- 'Could not set password')
-
- def test_change_password_requires_a_password(self):
- self.vm.add_args('-nodefaults', '-S', '-vnc', ':0')
- self.vm.launch()
- self.assertTrue(self.vm.qmp('query-vnc')['return']['enabled'])
- set_password_response = self.vm.qmp('change-vnc-password',
- password='new_password')
- self.assertIn('error', set_password_response)
- self.assertEqual(set_password_response['error']['class'],
- 'GenericError')
- self.assertEqual(set_password_response['error']['desc'],
- 'Could not set password')
-
- def test_change_password(self):
- self.vm.add_args('-nodefaults', '-S', '-vnc', ':0,password=on')
- self.vm.launch()
- self.assertTrue(self.vm.qmp('query-vnc')['return']['enabled'])
- self.vm.cmd('change-vnc-password',
- password='new_password')
-
- def test_change_listen(self):
- a, b, c = find_free_ports(3)
- self.assertFalse(check_connect(a))
- self.assertFalse(check_connect(b))
- self.assertFalse(check_connect(c))
-
- self.vm.add_args('-nodefaults', '-S', '-vnc', f'{VNC_ADDR}:{a - 5900}')
- self.vm.launch()
- self.assertEqual(self.vm.qmp('query-vnc')['return']['service'], str(a))
- self.assertTrue(check_connect(a))
- self.assertFalse(check_connect(b))
- self.assertFalse(check_connect(c))
-
- self.vm.cmd('display-update', type='vnc',
- addresses=[{'type': 'inet', 'host': VNC_ADDR,
- 'port': str(b)},
- {'type': 'inet', 'host': VNC_ADDR,
- 'port': str(c)}])
- self.assertEqual(self.vm.qmp('query-vnc')['return']['service'], str(b))
- self.assertFalse(check_connect(a))
- self.assertTrue(check_connect(b))
- self.assertTrue(check_connect(c))
diff --git a/tests/avocado/x86_cpu_model_versions.py b/tests/avocado/x86_cpu_model_versions.py
deleted file mode 100644
index 11101e0..0000000
--- a/tests/avocado/x86_cpu_model_versions.py
+++ /dev/null
@@ -1,362 +0,0 @@
-#
-# Basic validation of x86 versioned CPU models and CPU model aliases
-#
-# Copyright (c) 2019 Red Hat Inc
-#
-# Author:
-# Eduardo Habkost <ehabkost@redhat.com>
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, see <http://www.gnu.org/licenses/>.
-#
-
-
-import avocado_qemu
-import re
-
-class X86CPUModelAliases(avocado_qemu.QemuSystemTest):
- """
- Validation of PC CPU model versions and CPU model aliases
-
- :avocado: tags=arch:x86_64
- """
- def validate_aliases(self, cpus):
- for c in cpus.values():
- if 'alias-of' in c:
- # all aliases must point to a valid CPU model name:
- self.assertIn(c['alias-of'], cpus,
- '%s.alias-of (%s) is not a valid CPU model name' % (c['name'], c['alias-of']))
- # aliases must not point to aliases
- self.assertNotIn('alias-of', cpus[c['alias-of']],
- '%s.alias-of (%s) points to another alias' % (c['name'], c['alias-of']))
-
- # aliases must not be static
- self.assertFalse(c['static'])
-
- def validate_variant_aliases(self, cpus):
- # -noTSX, -IBRS and -IBPB variants of CPU models are special:
- # they shouldn't have their own versions:
- self.assertNotIn("Haswell-noTSX-v1", cpus,
- "Haswell-noTSX shouldn't be versioned")
- self.assertNotIn("Broadwell-noTSX-v1", cpus,
- "Broadwell-noTSX shouldn't be versioned")
- self.assertNotIn("Nehalem-IBRS-v1", cpus,
- "Nehalem-IBRS shouldn't be versioned")
- self.assertNotIn("Westmere-IBRS-v1", cpus,
- "Westmere-IBRS shouldn't be versioned")
- self.assertNotIn("SandyBridge-IBRS-v1", cpus,
- "SandyBridge-IBRS shouldn't be versioned")
- self.assertNotIn("IvyBridge-IBRS-v1", cpus,
- "IvyBridge-IBRS shouldn't be versioned")
- self.assertNotIn("Haswell-noTSX-IBRS-v1", cpus,
- "Haswell-noTSX-IBRS shouldn't be versioned")
- self.assertNotIn("Haswell-IBRS-v1", cpus,
- "Haswell-IBRS shouldn't be versioned")
- self.assertNotIn("Broadwell-noTSX-IBRS-v1", cpus,
- "Broadwell-noTSX-IBRS shouldn't be versioned")
- self.assertNotIn("Broadwell-IBRS-v1", cpus,
- "Broadwell-IBRS shouldn't be versioned")
- self.assertNotIn("Skylake-Client-IBRS-v1", cpus,
- "Skylake-Client-IBRS shouldn't be versioned")
- self.assertNotIn("Skylake-Server-IBRS-v1", cpus,
- "Skylake-Server-IBRS shouldn't be versioned")
- self.assertNotIn("EPYC-IBPB-v1", cpus,
- "EPYC-IBPB shouldn't be versioned")
-
- def test_4_0_alias_compatibility(self):
- """
- Check if pc-*-4.0 unversioned CPU model won't be reported as aliases
-
- :avocado: tags=machine:pc-i440fx-4.0
- """
- # pc-*-4.0 won't expose non-versioned CPU models as aliases
- # We do this to help management software to keep compatibility
- # with older QEMU versions that didn't have the versioned CPU model
- self.vm.add_args('-S')
- self.vm.launch()
- cpus = dict((m['name'], m) for m in
- self.vm.cmd('query-cpu-definitions'))
-
- self.assertFalse(cpus['Cascadelake-Server']['static'],
- 'unversioned Cascadelake-Server CPU model must not be static')
- self.assertNotIn('alias-of', cpus['Cascadelake-Server'],
- 'Cascadelake-Server must not be an alias')
- self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'],
- 'Cascadelake-Server-v1 must not be an alias')
-
- self.assertFalse(cpus['qemu64']['static'],
- 'unversioned qemu64 CPU model must not be static')
- self.assertNotIn('alias-of', cpus['qemu64'],
- 'qemu64 must not be an alias')
- self.assertNotIn('alias-of', cpus['qemu64-v1'],
- 'qemu64-v1 must not be an alias')
-
- self.validate_variant_aliases(cpus)
-
- # On pc-*-4.0, no CPU model should be reported as an alias:
- for name,c in cpus.items():
- self.assertNotIn('alias-of', c, "%s shouldn't be an alias" % (name))
-
- def test_4_1_alias(self):
- """
- Check if unversioned CPU model is an alias pointing to right version
-
- :avocado: tags=machine:pc-i440fx-4.1
- """
- self.vm.add_args('-S')
- self.vm.launch()
-
- cpus = dict((m['name'], m) for m in
- self.vm.cmd('query-cpu-definitions'))
-
- self.assertFalse(cpus['Cascadelake-Server']['static'],
- 'unversioned Cascadelake-Server CPU model must not be static')
- self.assertEqual(cpus['Cascadelake-Server'].get('alias-of'),
- 'Cascadelake-Server-v1',
- 'Cascadelake-Server must be an alias of Cascadelake-Server-v1')
- self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'],
- 'Cascadelake-Server-v1 must not be an alias')
-
- self.assertFalse(cpus['qemu64']['static'],
- 'unversioned qemu64 CPU model must not be static')
- self.assertEqual(cpus['qemu64'].get('alias-of'), 'qemu64-v1',
- 'qemu64 must be an alias of qemu64-v1')
- self.assertNotIn('alias-of', cpus['qemu64-v1'],
- 'qemu64-v1 must not be an alias')
-
- self.validate_variant_aliases(cpus)
-
- # On pc-*-4.1, -noTSX and -IBRS models should be aliases:
- self.assertEqual(cpus["Haswell"].get('alias-of'),
- "Haswell-v1",
- "Haswell must be an alias")
- self.assertEqual(cpus["Haswell-noTSX"].get('alias-of'),
- "Haswell-v2",
- "Haswell-noTSX must be an alias")
- self.assertEqual(cpus["Haswell-IBRS"].get('alias-of'),
- "Haswell-v3",
- "Haswell-IBRS must be an alias")
- self.assertEqual(cpus["Haswell-noTSX-IBRS"].get('alias-of'),
- "Haswell-v4",
- "Haswell-noTSX-IBRS must be an alias")
-
- self.assertEqual(cpus["Broadwell"].get('alias-of'),
- "Broadwell-v1",
- "Broadwell must be an alias")
- self.assertEqual(cpus["Broadwell-noTSX"].get('alias-of'),
- "Broadwell-v2",
- "Broadwell-noTSX must be an alias")
- self.assertEqual(cpus["Broadwell-IBRS"].get('alias-of'),
- "Broadwell-v3",
- "Broadwell-IBRS must be an alias")
- self.assertEqual(cpus["Broadwell-noTSX-IBRS"].get('alias-of'),
- "Broadwell-v4",
- "Broadwell-noTSX-IBRS must be an alias")
-
- self.assertEqual(cpus["Nehalem"].get('alias-of'),
- "Nehalem-v1",
- "Nehalem must be an alias")
- self.assertEqual(cpus["Nehalem-IBRS"].get('alias-of'),
- "Nehalem-v2",
- "Nehalem-IBRS must be an alias")
-
- self.assertEqual(cpus["Westmere"].get('alias-of'),
- "Westmere-v1",
- "Westmere must be an alias")
- self.assertEqual(cpus["Westmere-IBRS"].get('alias-of'),
- "Westmere-v2",
- "Westmere-IBRS must be an alias")
-
- self.assertEqual(cpus["SandyBridge"].get('alias-of'),
- "SandyBridge-v1",
- "SandyBridge must be an alias")
- self.assertEqual(cpus["SandyBridge-IBRS"].get('alias-of'),
- "SandyBridge-v2",
- "SandyBridge-IBRS must be an alias")
-
- self.assertEqual(cpus["IvyBridge"].get('alias-of'),
- "IvyBridge-v1",
- "IvyBridge must be an alias")
- self.assertEqual(cpus["IvyBridge-IBRS"].get('alias-of'),
- "IvyBridge-v2",
- "IvyBridge-IBRS must be an alias")
-
- self.assertEqual(cpus["Skylake-Client"].get('alias-of'),
- "Skylake-Client-v1",
- "Skylake-Client must be an alias")
- self.assertEqual(cpus["Skylake-Client-IBRS"].get('alias-of'),
- "Skylake-Client-v2",
- "Skylake-Client-IBRS must be an alias")
-
- self.assertEqual(cpus["Skylake-Server"].get('alias-of'),
- "Skylake-Server-v1",
- "Skylake-Server must be an alias")
- self.assertEqual(cpus["Skylake-Server-IBRS"].get('alias-of'),
- "Skylake-Server-v2",
- "Skylake-Server-IBRS must be an alias")
-
- self.assertEqual(cpus["EPYC"].get('alias-of'),
- "EPYC-v1",
- "EPYC must be an alias")
- self.assertEqual(cpus["EPYC-IBPB"].get('alias-of'),
- "EPYC-v2",
- "EPYC-IBPB must be an alias")
-
- self.validate_aliases(cpus)
-
- def test_none_alias(self):
- """
- Check if unversioned CPU model is an alias pointing to some version
-
- :avocado: tags=machine:none
- """
- self.vm.add_args('-S')
- self.vm.launch()
-
- cpus = dict((m['name'], m) for m in
- self.vm.cmd('query-cpu-definitions'))
-
- self.assertFalse(cpus['Cascadelake-Server']['static'],
- 'unversioned Cascadelake-Server CPU model must not be static')
- self.assertTrue(re.match('Cascadelake-Server-v[0-9]+', cpus['Cascadelake-Server']['alias-of']),
- 'Cascadelake-Server must be an alias of versioned CPU model')
- self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'],
- 'Cascadelake-Server-v1 must not be an alias')
-
- self.assertFalse(cpus['qemu64']['static'],
- 'unversioned qemu64 CPU model must not be static')
- self.assertTrue(re.match('qemu64-v[0-9]+', cpus['qemu64']['alias-of']),
- 'qemu64 must be an alias of versioned CPU model')
- self.assertNotIn('alias-of', cpus['qemu64-v1'],
- 'qemu64-v1 must not be an alias')
-
- self.validate_aliases(cpus)
-
-
-class CascadelakeArchCapabilities(avocado_qemu.QemuSystemTest):
- """
- Validation of Cascadelake arch-capabilities
-
- :avocado: tags=arch:x86_64
- """
- def get_cpu_prop(self, prop):
- cpu_path = self.vm.cmd('query-cpus-fast')[0].get('qom-path')
- return self.vm.cmd('qom-get', path=cpu_path, property=prop)
-
- def test_4_1(self):
- """
- :avocado: tags=machine:pc-i440fx-4.1
- :avocado: tags=cpu:Cascadelake-Server
- """
- # machine-type only:
- self.vm.add_args('-S')
- self.set_vm_arg('-cpu',
- 'Cascadelake-Server,x-force-features=on,check=off,'
- 'enforce=off')
- self.vm.launch()
- self.assertFalse(self.get_cpu_prop('arch-capabilities'),
- 'pc-i440fx-4.1 + Cascadelake-Server should not have arch-capabilities')
-
- def test_4_0(self):
- """
- :avocado: tags=machine:pc-i440fx-4.0
- :avocado: tags=cpu:Cascadelake-Server
- """
- self.vm.add_args('-S')
- self.set_vm_arg('-cpu',
- 'Cascadelake-Server,x-force-features=on,check=off,'
- 'enforce=off')
- self.vm.launch()
- self.assertFalse(self.get_cpu_prop('arch-capabilities'),
- 'pc-i440fx-4.0 + Cascadelake-Server should not have arch-capabilities')
-
- def test_set_4_0(self):
- """
- :avocado: tags=machine:pc-i440fx-4.0
- :avocado: tags=cpu:Cascadelake-Server
- """
- # command line must override machine-type if CPU model is not versioned:
- self.vm.add_args('-S')
- self.set_vm_arg('-cpu',
- 'Cascadelake-Server,x-force-features=on,check=off,'
- 'enforce=off,+arch-capabilities')
- self.vm.launch()
- self.assertTrue(self.get_cpu_prop('arch-capabilities'),
- 'pc-i440fx-4.0 + Cascadelake-Server,+arch-capabilities should have arch-capabilities')
-
- def test_unset_4_1(self):
- """
- :avocado: tags=machine:pc-i440fx-4.1
- :avocado: tags=cpu:Cascadelake-Server
- """
- self.vm.add_args('-S')
- self.set_vm_arg('-cpu',
- 'Cascadelake-Server,x-force-features=on,check=off,'
- 'enforce=off,-arch-capabilities')
- self.vm.launch()
- self.assertFalse(self.get_cpu_prop('arch-capabilities'),
- 'pc-i440fx-4.1 + Cascadelake-Server,-arch-capabilities should not have arch-capabilities')
-
- def test_v1_4_0(self):
- """
- :avocado: tags=machine:pc-i440fx-4.0
- :avocado: tags=cpu:Cascadelake-Server
- """
- # versioned CPU model overrides machine-type:
- self.vm.add_args('-S')
- self.set_vm_arg('-cpu',
- 'Cascadelake-Server-v1,x-force-features=on,check=off,'
- 'enforce=off')
- self.vm.launch()
- self.assertFalse(self.get_cpu_prop('arch-capabilities'),
- 'pc-i440fx-4.0 + Cascadelake-Server-v1 should not have arch-capabilities')
-
- def test_v2_4_0(self):
- """
- :avocado: tags=machine:pc-i440fx-4.0
- :avocado: tags=cpu:Cascadelake-Server
- """
- self.vm.add_args('-S')
- self.set_vm_arg('-cpu',
- 'Cascadelake-Server-v2,x-force-features=on,check=off,'
- 'enforce=off')
- self.vm.launch()
- self.assertTrue(self.get_cpu_prop('arch-capabilities'),
- 'pc-i440fx-4.0 + Cascadelake-Server-v2 should have arch-capabilities')
-
- def test_v1_set_4_0(self):
- """
- :avocado: tags=machine:pc-i440fx-4.0
- :avocado: tags=cpu:Cascadelake-Server
- """
- # command line must override machine-type and versioned CPU model:
- self.vm.add_args('-S')
- self.set_vm_arg('-cpu',
- 'Cascadelake-Server-v1,x-force-features=on,check=off,'
- 'enforce=off,+arch-capabilities')
- self.vm.launch()
- self.assertTrue(self.get_cpu_prop('arch-capabilities'),
- 'pc-i440fx-4.0 + Cascadelake-Server-v1,+arch-capabilities should have arch-capabilities')
-
- def test_v2_unset_4_1(self):
- """
- :avocado: tags=machine:pc-i440fx-4.1
- :avocado: tags=cpu:Cascadelake-Server
- """
- self.vm.add_args('-S')
- self.set_vm_arg('-cpu',
- 'Cascadelake-Server-v2,x-force-features=on,check=off,'
- 'enforce=off,-arch-capabilities')
- self.vm.launch()
- self.assertFalse(self.get_cpu_prop('arch-capabilities'),
- 'pc-i440fx-4.1 + Cascadelake-Server-v2,-arch-capabilities should not have arch-capabilities')
diff --git a/tests/bench/benchmark-crypto-akcipher.c b/tests/bench/benchmark-crypto-akcipher.c
index 5e68cb0..0a6e5db 100644
--- a/tests/bench/benchmark-crypto-akcipher.c
+++ b/tests/bench/benchmark-crypto-akcipher.c
@@ -16,19 +16,19 @@
#include "crypto/akcipher.h"
#include "standard-headers/linux/virtio_crypto.h"
-#include "test_akcipher_keys.inc"
+#include "test_akcipher_keys.c.inc"
static QCryptoAkCipher *create_rsa_akcipher(const uint8_t *priv_key,
size_t keylen,
- QCryptoRSAPaddingAlgorithm padding,
- QCryptoHashAlgorithm hash)
+ QCryptoRSAPaddingAlgo padding,
+ QCryptoHashAlgo hash)
{
QCryptoAkCipherOptions opt;
- opt.alg = QCRYPTO_AKCIPHER_ALG_RSA;
+ opt.alg = QCRYPTO_AK_CIPHER_ALGO_RSA;
opt.u.rsa.padding_alg = padding;
opt.u.rsa.hash_alg = hash;
- return qcrypto_akcipher_new(&opt, QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE,
+ return qcrypto_akcipher_new(&opt, QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE,
priv_key, keylen, &error_abort);
}
@@ -39,8 +39,8 @@ static void test_rsa_speed(const uint8_t *priv_key, size_t keylen,
#define SHA1_DGST_LEN 20
#define SIGN_TIMES 10000
#define VERIFY_TIMES 100000
-#define PADDING QCRYPTO_RSA_PADDING_ALG_PKCS1
-#define HASH QCRYPTO_HASH_ALG_SHA1
+#define PADDING QCRYPTO_RSA_PADDING_ALGO_PKCS1
+#define HASH QCRYPTO_HASH_ALGO_SHA1
g_autoptr(QCryptoAkCipher) rsa =
create_rsa_akcipher(priv_key, keylen, PADDING, HASH);
@@ -53,8 +53,8 @@ static void test_rsa_speed(const uint8_t *priv_key, size_t keylen,
signature = g_new0(uint8_t, key_size / BYTE);
g_test_message("benchmark rsa%zu (%s-%s) sign...", key_size,
- QCryptoRSAPaddingAlgorithm_str(PADDING),
- QCryptoHashAlgorithm_str(HASH));
+ QCryptoRSAPaddingAlgo_str(PADDING),
+ QCryptoHashAlgo_str(HASH));
g_test_timer_start();
for (count = 0; count < SIGN_TIMES; ++count) {
g_assert(qcrypto_akcipher_sign(rsa, dgst, SHA1_DGST_LEN,
@@ -64,14 +64,14 @@ static void test_rsa_speed(const uint8_t *priv_key, size_t keylen,
g_test_timer_elapsed();
g_test_message("rsa%zu (%s-%s) sign %zu times in %.2f seconds,"
" %.2f times/sec ",
- key_size, QCryptoRSAPaddingAlgorithm_str(PADDING),
- QCryptoHashAlgorithm_str(HASH),
+ key_size, QCryptoRSAPaddingAlgo_str(PADDING),
+ QCryptoHashAlgo_str(HASH),
count, g_test_timer_last(),
(double)count / g_test_timer_last());
g_test_message("benchmark rsa%zu (%s-%s) verification...", key_size,
- QCryptoRSAPaddingAlgorithm_str(PADDING),
- QCryptoHashAlgorithm_str(HASH));
+ QCryptoRSAPaddingAlgo_str(PADDING),
+ QCryptoHashAlgo_str(HASH));
g_test_timer_start();
for (count = 0; count < VERIFY_TIMES; ++count) {
g_assert(qcrypto_akcipher_verify(rsa, signature, key_size / BYTE,
@@ -81,8 +81,8 @@ static void test_rsa_speed(const uint8_t *priv_key, size_t keylen,
g_test_timer_elapsed();
g_test_message("rsa%zu (%s-%s) verify %zu times in %.2f seconds,"
" %.2f times/sec ",
- key_size, QCryptoRSAPaddingAlgorithm_str(PADDING),
- QCryptoHashAlgorithm_str(HASH),
+ key_size, QCryptoRSAPaddingAlgo_str(PADDING),
+ QCryptoHashAlgo_str(HASH),
count, g_test_timer_last(),
(double)count / g_test_timer_last());
}
diff --git a/tests/bench/benchmark-crypto-cipher.c b/tests/bench/benchmark-crypto-cipher.c
index c04f0a0..889a29b 100644
--- a/tests/bench/benchmark-crypto-cipher.c
+++ b/tests/bench/benchmark-crypto-cipher.c
@@ -17,7 +17,7 @@
static void test_cipher_speed(size_t chunk_size,
QCryptoCipherMode mode,
- QCryptoCipherAlgorithm alg)
+ QCryptoCipherAlgo alg)
{
QCryptoCipher *cipher;
Error *err = NULL;
@@ -71,7 +71,7 @@ static void test_cipher_speed(size_t chunk_size,
g_test_timer_elapsed();
g_test_message("enc(%s-%s) chunk %zu bytes %.2f MB/sec ",
- QCryptoCipherAlgorithm_str(alg),
+ QCryptoCipherAlgo_str(alg),
QCryptoCipherMode_str(mode),
chunk_size, (double)total / MiB / g_test_timer_last());
@@ -88,7 +88,7 @@ static void test_cipher_speed(size_t chunk_size,
g_test_timer_elapsed();
g_test_message("dec(%s-%s) chunk %zu bytes %.2f MB/sec ",
- QCryptoCipherAlgorithm_str(alg),
+ QCryptoCipherAlgo_str(alg),
QCryptoCipherMode_str(mode),
chunk_size, (double)total / MiB / g_test_timer_last());
@@ -105,7 +105,7 @@ static void test_cipher_speed_ecb_aes_128(const void *opaque)
size_t chunk_size = (size_t)opaque;
test_cipher_speed(chunk_size,
QCRYPTO_CIPHER_MODE_ECB,
- QCRYPTO_CIPHER_ALG_AES_128);
+ QCRYPTO_CIPHER_ALGO_AES_128);
}
static void test_cipher_speed_ecb_aes_256(const void *opaque)
@@ -113,7 +113,7 @@ static void test_cipher_speed_ecb_aes_256(const void *opaque)
size_t chunk_size = (size_t)opaque;
test_cipher_speed(chunk_size,
QCRYPTO_CIPHER_MODE_ECB,
- QCRYPTO_CIPHER_ALG_AES_256);
+ QCRYPTO_CIPHER_ALGO_AES_256);
}
static void test_cipher_speed_cbc_aes_128(const void *opaque)
@@ -121,7 +121,7 @@ static void test_cipher_speed_cbc_aes_128(const void *opaque)
size_t chunk_size = (size_t)opaque;
test_cipher_speed(chunk_size,
QCRYPTO_CIPHER_MODE_CBC,
- QCRYPTO_CIPHER_ALG_AES_128);
+ QCRYPTO_CIPHER_ALGO_AES_128);
}
static void test_cipher_speed_cbc_aes_256(const void *opaque)
@@ -129,7 +129,7 @@ static void test_cipher_speed_cbc_aes_256(const void *opaque)
size_t chunk_size = (size_t)opaque;
test_cipher_speed(chunk_size,
QCRYPTO_CIPHER_MODE_CBC,
- QCRYPTO_CIPHER_ALG_AES_256);
+ QCRYPTO_CIPHER_ALGO_AES_256);
}
static void test_cipher_speed_ctr_aes_128(const void *opaque)
@@ -137,7 +137,7 @@ static void test_cipher_speed_ctr_aes_128(const void *opaque)
size_t chunk_size = (size_t)opaque;
test_cipher_speed(chunk_size,
QCRYPTO_CIPHER_MODE_CTR,
- QCRYPTO_CIPHER_ALG_AES_128);
+ QCRYPTO_CIPHER_ALGO_AES_128);
}
static void test_cipher_speed_ctr_aes_256(const void *opaque)
@@ -145,7 +145,7 @@ static void test_cipher_speed_ctr_aes_256(const void *opaque)
size_t chunk_size = (size_t)opaque;
test_cipher_speed(chunk_size,
QCRYPTO_CIPHER_MODE_CTR,
- QCRYPTO_CIPHER_ALG_AES_256);
+ QCRYPTO_CIPHER_ALGO_AES_256);
}
static void test_cipher_speed_xts_aes_128(const void *opaque)
@@ -153,7 +153,7 @@ static void test_cipher_speed_xts_aes_128(const void *opaque)
size_t chunk_size = (size_t)opaque;
test_cipher_speed(chunk_size,
QCRYPTO_CIPHER_MODE_XTS,
- QCRYPTO_CIPHER_ALG_AES_128);
+ QCRYPTO_CIPHER_ALGO_AES_128);
}
static void test_cipher_speed_xts_aes_256(const void *opaque)
@@ -161,7 +161,7 @@ static void test_cipher_speed_xts_aes_256(const void *opaque)
size_t chunk_size = (size_t)opaque;
test_cipher_speed(chunk_size,
QCRYPTO_CIPHER_MODE_XTS,
- QCRYPTO_CIPHER_ALG_AES_256);
+ QCRYPTO_CIPHER_ALGO_AES_256);
}
diff --git a/tests/bench/benchmark-crypto-hash.c b/tests/bench/benchmark-crypto-hash.c
index 927b00b..252098a 100644
--- a/tests/bench/benchmark-crypto-hash.c
+++ b/tests/bench/benchmark-crypto-hash.c
@@ -17,7 +17,7 @@
typedef struct QCryptoHashOpts {
size_t chunk_size;
- QCryptoHashAlgorithm alg;
+ QCryptoHashAlgo alg;
} QCryptoHashOpts;
static void test_hash_speed(const void *opaque)
@@ -49,7 +49,7 @@ static void test_hash_speed(const void *opaque)
g_test_timer_elapsed();
g_test_message("hash(%s): chunk %zu bytes %.2f MB/sec",
- QCryptoHashAlgorithm_str(opts->alg),
+ QCryptoHashAlgo_str(opts->alg),
opts->chunk_size, total / g_test_timer_last());
g_free(out);
@@ -65,14 +65,14 @@ int main(int argc, char **argv)
#define TEST_ONE(a, c) \
QCryptoHashOpts opts ## a ## c = { \
- .alg = QCRYPTO_HASH_ALG_ ## a, .chunk_size = c, \
+ .alg = QCRYPTO_HASH_ALGO_ ## a, .chunk_size = c, \
}; \
memset(name, 0 , sizeof(name)); \
snprintf(name, sizeof(name), \
"/crypto/benchmark/hash/%s/bufsize-%d", \
- QCryptoHashAlgorithm_str(QCRYPTO_HASH_ALG_ ## a), \
+ QCryptoHashAlgo_str(QCRYPTO_HASH_ALGO_ ## a), \
c); \
- if (qcrypto_hash_supports(QCRYPTO_HASH_ALG_ ## a)) \
+ if (qcrypto_hash_supports(QCRYPTO_HASH_ALGO_ ## a)) \
g_test_add_data_func(name, \
&opts ## a ## c, \
test_hash_speed);
diff --git a/tests/bench/benchmark-crypto-hmac.c b/tests/bench/benchmark-crypto-hmac.c
index 5cca636..d51de98 100644
--- a/tests/bench/benchmark-crypto-hmac.c
+++ b/tests/bench/benchmark-crypto-hmac.c
@@ -28,7 +28,7 @@ static void test_hmac_speed(const void *opaque)
Error *err = NULL;
int ret;
- if (!qcrypto_hmac_supports(QCRYPTO_HASH_ALG_SHA256)) {
+ if (!qcrypto_hmac_supports(QCRYPTO_HASH_ALGO_SHA256)) {
return;
}
@@ -40,7 +40,7 @@ static void test_hmac_speed(const void *opaque)
g_test_timer_start();
do {
- hmac = qcrypto_hmac_new(QCRYPTO_HASH_ALG_SHA256,
+ hmac = qcrypto_hmac_new(QCRYPTO_HASH_ALGO_SHA256,
(const uint8_t *)KEY, strlen(KEY), &err);
g_assert(err == NULL);
g_assert(hmac != NULL);
@@ -56,7 +56,7 @@ static void test_hmac_speed(const void *opaque)
total /= MiB;
g_test_message("hmac(%s): chunk %zu bytes %.2f MB/sec",
- QCryptoHashAlgorithm_str(QCRYPTO_HASH_ALG_SHA256),
+ QCryptoHashAlgo_str(QCRYPTO_HASH_ALGO_SHA256),
chunk_size, total / g_test_timer_last());
g_free(out);
diff --git a/tests/bench/test_akcipher_keys.inc b/tests/bench/test_akcipher_keys.c.inc
index df3eccb..df3eccb 100644
--- a/tests/bench/test_akcipher_keys.inc
+++ b/tests/bench/test_akcipher_keys.c.inc
diff --git a/tests/data/acpi/aarch64/virt/DSDT b/tests/data/acpi/aarch64/virt/DSDT
index c475039..36d3e5d 100644
--- a/tests/data/acpi/aarch64/virt/DSDT
+++ b/tests/data/acpi/aarch64/virt/DSDT
Binary files differ
diff --git a/tests/data/acpi/aarch64/virt/DSDT.acpihmatvirt b/tests/data/acpi/aarch64/virt/DSDT.acpihmatvirt
index aee6ba0..e6154d0 100644
--- a/tests/data/acpi/aarch64/virt/DSDT.acpihmatvirt
+++ b/tests/data/acpi/aarch64/virt/DSDT.acpihmatvirt
Binary files differ
diff --git a/tests/data/acpi/aarch64/virt/DSDT.memhp b/tests/data/acpi/aarch64/virt/DSDT.memhp
index bae36cd..33f011d 100644
--- a/tests/data/acpi/aarch64/virt/DSDT.memhp
+++ b/tests/data/acpi/aarch64/virt/DSDT.memhp
Binary files differ
diff --git a/tests/data/acpi/aarch64/virt/DSDT.pxb b/tests/data/acpi/aarch64/virt/DSDT.pxb
index fbd78f4..c0fdc6e 100644
--- a/tests/data/acpi/aarch64/virt/DSDT.pxb
+++ b/tests/data/acpi/aarch64/virt/DSDT.pxb
Binary files differ
diff --git a/tests/data/acpi/aarch64/virt/DSDT.topology b/tests/data/acpi/aarch64/virt/DSDT.topology
index 501314c..029d03e 100644
--- a/tests/data/acpi/aarch64/virt/DSDT.topology
+++ b/tests/data/acpi/aarch64/virt/DSDT.topology
Binary files differ
diff --git a/tests/data/acpi/aarch64/virt/SSDT.memhp b/tests/data/acpi/aarch64/virt/SSDT.memhp
index fb3dcde..1deb1d2 100644
--- a/tests/data/acpi/aarch64/virt/SSDT.memhp
+++ b/tests/data/acpi/aarch64/virt/SSDT.memhp
Binary files differ
diff --git a/tests/data/acpi/disassemle-aml.sh b/tests/data/acpi/disassemle-aml.sh
index 253b762..89561d2 100755
--- a/tests/data/acpi/disassemle-aml.sh
+++ b/tests/data/acpi/disassemle-aml.sh
@@ -14,7 +14,7 @@ while getopts "o:" arg; do
esac
done
-for machine in tests/data/acpi/*
+for machine in tests/data/acpi/*/*
do
if [[ ! -d "$machine" ]];
then
diff --git a/tests/data/acpi/riscv64/virt/APIC b/tests/data/acpi/riscv64/virt/APIC
new file mode 100644
index 0000000..66a25df
--- /dev/null
+++ b/tests/data/acpi/riscv64/virt/APIC
Binary files differ
diff --git a/tests/data/acpi/riscv64/virt/DSDT b/tests/data/acpi/riscv64/virt/DSDT
new file mode 100644
index 0000000..6a33f56
--- /dev/null
+++ b/tests/data/acpi/riscv64/virt/DSDT
Binary files differ
diff --git a/tests/data/acpi/riscv64/virt/FACP b/tests/data/acpi/riscv64/virt/FACP
new file mode 100644
index 0000000..a5276b6
--- /dev/null
+++ b/tests/data/acpi/riscv64/virt/FACP
Binary files differ
diff --git a/tests/data/acpi/riscv64/virt/MCFG b/tests/data/acpi/riscv64/virt/MCFG
new file mode 100644
index 0000000..37eb923
--- /dev/null
+++ b/tests/data/acpi/riscv64/virt/MCFG
Binary files differ
diff --git a/tests/data/acpi/riscv64/virt/RHCT b/tests/data/acpi/riscv64/virt/RHCT
new file mode 100644
index 0000000..13c8025
--- /dev/null
+++ b/tests/data/acpi/riscv64/virt/RHCT
Binary files differ
diff --git a/tests/data/acpi/riscv64/virt/SPCR b/tests/data/acpi/riscv64/virt/SPCR
new file mode 100644
index 0000000..09617f8
--- /dev/null
+++ b/tests/data/acpi/riscv64/virt/SPCR
Binary files differ
diff --git a/tests/data/acpi/riscv64/virt/SRAT.numamem b/tests/data/acpi/riscv64/virt/SRAT.numamem
new file mode 100644
index 0000000..2b64673
--- /dev/null
+++ b/tests/data/acpi/riscv64/virt/SRAT.numamem
Binary files differ
diff --git a/tests/data/acpi/x86/microvm/DSDT.pcie b/tests/data/acpi/x86/microvm/DSDT.pcie
index 765f14e..8eacd21 100644
--- a/tests/data/acpi/x86/microvm/DSDT.pcie
+++ b/tests/data/acpi/x86/microvm/DSDT.pcie
Binary files differ
diff --git a/tests/data/acpi/x86/pc/DSDT b/tests/data/acpi/x86/pc/DSDT
index c93ad6b..4beb519 100644
--- a/tests/data/acpi/x86/pc/DSDT
+++ b/tests/data/acpi/x86/pc/DSDT
Binary files differ
diff --git a/tests/data/acpi/x86/pc/DSDT.acpierst b/tests/data/acpi/x86/pc/DSDT.acpierst
index f643fa2..abda686 100644
--- a/tests/data/acpi/x86/pc/DSDT.acpierst
+++ b/tests/data/acpi/x86/pc/DSDT.acpierst
Binary files differ
diff --git a/tests/data/acpi/x86/pc/DSDT.acpihmat b/tests/data/acpi/x86/pc/DSDT.acpihmat
index 9d3695f..d081db2 100644
--- a/tests/data/acpi/x86/pc/DSDT.acpihmat
+++ b/tests/data/acpi/x86/pc/DSDT.acpihmat
Binary files differ
diff --git a/tests/data/acpi/x86/pc/DSDT.bridge b/tests/data/acpi/x86/pc/DSDT.bridge
index 840b45f..e16897d 100644
--- a/tests/data/acpi/x86/pc/DSDT.bridge
+++ b/tests/data/acpi/x86/pc/DSDT.bridge
Binary files differ
diff --git a/tests/data/acpi/x86/pc/DSDT.cphp b/tests/data/acpi/x86/pc/DSDT.cphp
index dbc0141..e95711c 100644
--- a/tests/data/acpi/x86/pc/DSDT.cphp
+++ b/tests/data/acpi/x86/pc/DSDT.cphp
Binary files differ
diff --git a/tests/data/acpi/x86/pc/DSDT.dimmpxm b/tests/data/acpi/x86/pc/DSDT.dimmpxm
index 1294f65..90ba66b 100644
--- a/tests/data/acpi/x86/pc/DSDT.dimmpxm
+++ b/tests/data/acpi/x86/pc/DSDT.dimmpxm
Binary files differ
diff --git a/tests/data/acpi/x86/pc/DSDT.hpbridge b/tests/data/acpi/x86/pc/DSDT.hpbridge
index 8012b5e..0eafe5f 100644
--- a/tests/data/acpi/x86/pc/DSDT.hpbridge
+++ b/tests/data/acpi/x86/pc/DSDT.hpbridge
Binary files differ
diff --git a/tests/data/acpi/x86/pc/DSDT.hpbrroot b/tests/data/acpi/x86/pc/DSDT.hpbrroot
index 4fa0c6f..077a4cc 100644
--- a/tests/data/acpi/x86/pc/DSDT.hpbrroot
+++ b/tests/data/acpi/x86/pc/DSDT.hpbrroot
Binary files differ
diff --git a/tests/data/acpi/x86/pc/DSDT.ipmikcs b/tests/data/acpi/x86/pc/DSDT.ipmikcs
index 0a891ba..8d465f0 100644
--- a/tests/data/acpi/x86/pc/DSDT.ipmikcs
+++ b/tests/data/acpi/x86/pc/DSDT.ipmikcs
Binary files differ
diff --git a/tests/data/acpi/x86/pc/DSDT.memhp b/tests/data/acpi/x86/pc/DSDT.memhp
index 9b442a6..e3b4975 100644
--- a/tests/data/acpi/x86/pc/DSDT.memhp
+++ b/tests/data/acpi/x86/pc/DSDT.memhp
Binary files differ
diff --git a/tests/data/acpi/x86/pc/DSDT.nohpet b/tests/data/acpi/x86/pc/DSDT.nohpet
index 1754c68..9e772c1 100644
--- a/tests/data/acpi/x86/pc/DSDT.nohpet
+++ b/tests/data/acpi/x86/pc/DSDT.nohpet
Binary files differ
diff --git a/tests/data/acpi/x86/pc/DSDT.numamem b/tests/data/acpi/x86/pc/DSDT.numamem
index 9fc731d..9bfbfc2 100644
--- a/tests/data/acpi/x86/pc/DSDT.numamem
+++ b/tests/data/acpi/x86/pc/DSDT.numamem
Binary files differ
diff --git a/tests/data/acpi/x86/pc/DSDT.roothp b/tests/data/acpi/x86/pc/DSDT.roothp
index e654c83..efbee6d 100644
--- a/tests/data/acpi/x86/pc/DSDT.roothp
+++ b/tests/data/acpi/x86/pc/DSDT.roothp
Binary files differ
diff --git a/tests/data/acpi/x86/q35/APIC.acpihmat-generic-x b/tests/data/acpi/x86/q35/APIC.acpihmat-generic-x
new file mode 100644
index 0000000..317ddb3
--- /dev/null
+++ b/tests/data/acpi/x86/q35/APIC.acpihmat-generic-x
Binary files differ
diff --git a/tests/data/acpi/x86/q35/CEDT.acpihmat-generic-x b/tests/data/acpi/x86/q35/CEDT.acpihmat-generic-x
new file mode 100644
index 0000000..31c9011
--- /dev/null
+++ b/tests/data/acpi/x86/q35/CEDT.acpihmat-generic-x
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DMAR.dmar b/tests/data/acpi/x86/q35/DMAR.dmar
index 0dca6e6..0c05976 100644
--- a/tests/data/acpi/x86/q35/DMAR.dmar
+++ b/tests/data/acpi/x86/q35/DMAR.dmar
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT b/tests/data/acpi/x86/q35/DSDT
index fb89ae0..e5e8d1e 100644
--- a/tests/data/acpi/x86/q35/DSDT
+++ b/tests/data/acpi/x86/q35/DSDT
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.acpierst b/tests/data/acpi/x86/q35/DSDT.acpierst
index 46fd254..072a3fe 100644
--- a/tests/data/acpi/x86/q35/DSDT.acpierst
+++ b/tests/data/acpi/x86/q35/DSDT.acpierst
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.acpihmat b/tests/data/acpi/x86/q35/DSDT.acpihmat
index 61c5bd5..2a4f2fc 100644
--- a/tests/data/acpi/x86/q35/DSDT.acpihmat
+++ b/tests/data/acpi/x86/q35/DSDT.acpihmat
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.acpihmat-generic-x b/tests/data/acpi/x86/q35/DSDT.acpihmat-generic-x
new file mode 100644
index 0000000..7911c05
--- /dev/null
+++ b/tests/data/acpi/x86/q35/DSDT.acpihmat-generic-x
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.acpihmat-noinitiator b/tests/data/acpi/x86/q35/DSDT.acpihmat-noinitiator
index 3aaa2bb..580b4a4 100644
--- a/tests/data/acpi/x86/q35/DSDT.acpihmat-noinitiator
+++ b/tests/data/acpi/x86/q35/DSDT.acpihmat-noinitiator
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.applesmc b/tests/data/acpi/x86/q35/DSDT.applesmc
index 944209a..5e8220e 100644
--- a/tests/data/acpi/x86/q35/DSDT.applesmc
+++ b/tests/data/acpi/x86/q35/DSDT.applesmc
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.bridge b/tests/data/acpi/x86/q35/DSDT.bridge
index d9938db..ee03945 100644
--- a/tests/data/acpi/x86/q35/DSDT.bridge
+++ b/tests/data/acpi/x86/q35/DSDT.bridge
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.core-count b/tests/data/acpi/x86/q35/DSDT.core-count
index a24b04c..7ebfcee 100644
--- a/tests/data/acpi/x86/q35/DSDT.core-count
+++ b/tests/data/acpi/x86/q35/DSDT.core-count
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.core-count2 b/tests/data/acpi/x86/q35/DSDT.core-count2
index 3a0cb8c..d039455 100644
--- a/tests/data/acpi/x86/q35/DSDT.core-count2
+++ b/tests/data/acpi/x86/q35/DSDT.core-count2
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.cphp b/tests/data/acpi/x86/q35/DSDT.cphp
index 20955d0..a055c2e 100644
--- a/tests/data/acpi/x86/q35/DSDT.cphp
+++ b/tests/data/acpi/x86/q35/DSDT.cphp
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.cxl b/tests/data/acpi/x86/q35/DSDT.cxl
index afcdc0d..2084354 100644
--- a/tests/data/acpi/x86/q35/DSDT.cxl
+++ b/tests/data/acpi/x86/q35/DSDT.cxl
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.dimmpxm b/tests/data/acpi/x86/q35/DSDT.dimmpxm
index 228374b..664e926 100644
--- a/tests/data/acpi/x86/q35/DSDT.dimmpxm
+++ b/tests/data/acpi/x86/q35/DSDT.dimmpxm
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.ipmibt b/tests/data/acpi/x86/q35/DSDT.ipmibt
index 45f911a..4066a76 100644
--- a/tests/data/acpi/x86/q35/DSDT.ipmibt
+++ b/tests/data/acpi/x86/q35/DSDT.ipmibt
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.ipmismbus b/tests/data/acpi/x86/q35/DSDT.ipmismbus
index e5d6811..6d0b6b9 100644
--- a/tests/data/acpi/x86/q35/DSDT.ipmismbus
+++ b/tests/data/acpi/x86/q35/DSDT.ipmismbus
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.ivrs b/tests/data/acpi/x86/q35/DSDT.ivrs
index 46fd254..072a3fe 100644
--- a/tests/data/acpi/x86/q35/DSDT.ivrs
+++ b/tests/data/acpi/x86/q35/DSDT.ivrs
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.memhp b/tests/data/acpi/x86/q35/DSDT.memhp
index 5ce0811..4f2f9bc 100644
--- a/tests/data/acpi/x86/q35/DSDT.memhp
+++ b/tests/data/acpi/x86/q35/DSDT.memhp
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.mmio64 b/tests/data/acpi/x86/q35/DSDT.mmio64
index bdf36c4..0fb6aab 100644
--- a/tests/data/acpi/x86/q35/DSDT.mmio64
+++ b/tests/data/acpi/x86/q35/DSDT.mmio64
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.multi-bridge b/tests/data/acpi/x86/q35/DSDT.multi-bridge
index 1db43a6..f6afa6d 100644
--- a/tests/data/acpi/x86/q35/DSDT.multi-bridge
+++ b/tests/data/acpi/x86/q35/DSDT.multi-bridge
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.noacpihp b/tests/data/acpi/x86/q35/DSDT.noacpihp
index 8bc1688..9f7261d 100644
--- a/tests/data/acpi/x86/q35/DSDT.noacpihp
+++ b/tests/data/acpi/x86/q35/DSDT.noacpihp
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.nohpet b/tests/data/acpi/x86/q35/DSDT.nohpet
index c13e45e..99ad629 100644
--- a/tests/data/acpi/x86/q35/DSDT.nohpet
+++ b/tests/data/acpi/x86/q35/DSDT.nohpet
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.numamem b/tests/data/acpi/x86/q35/DSDT.numamem
index ba66694..fd1d8a7 100644
--- a/tests/data/acpi/x86/q35/DSDT.numamem
+++ b/tests/data/acpi/x86/q35/DSDT.numamem
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.pvpanic-isa b/tests/data/acpi/x86/q35/DSDT.pvpanic-isa
index 6ad4287..89032fa 100644
--- a/tests/data/acpi/x86/q35/DSDT.pvpanic-isa
+++ b/tests/data/acpi/x86/q35/DSDT.pvpanic-isa
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.thread-count b/tests/data/acpi/x86/q35/DSDT.thread-count
index a24b04c..7ebfcee 100644
--- a/tests/data/acpi/x86/q35/DSDT.thread-count
+++ b/tests/data/acpi/x86/q35/DSDT.thread-count
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.thread-count2 b/tests/data/acpi/x86/q35/DSDT.thread-count2
index 3a0cb8c..d039455 100644
--- a/tests/data/acpi/x86/q35/DSDT.thread-count2
+++ b/tests/data/acpi/x86/q35/DSDT.thread-count2
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.tis.tpm12 b/tests/data/acpi/x86/q35/DSDT.tis.tpm12
index e381ce4..f2ed40c 100644
--- a/tests/data/acpi/x86/q35/DSDT.tis.tpm12
+++ b/tests/data/acpi/x86/q35/DSDT.tis.tpm12
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.tis.tpm2 b/tests/data/acpi/x86/q35/DSDT.tis.tpm2
index a092530..5c975d2 100644
--- a/tests/data/acpi/x86/q35/DSDT.tis.tpm2
+++ b/tests/data/acpi/x86/q35/DSDT.tis.tpm2
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.type4-count b/tests/data/acpi/x86/q35/DSDT.type4-count
index edc2319..3194a82 100644
--- a/tests/data/acpi/x86/q35/DSDT.type4-count
+++ b/tests/data/acpi/x86/q35/DSDT.type4-count
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.viot b/tests/data/acpi/x86/q35/DSDT.viot
index 64e81f5..129d43e 100644
--- a/tests/data/acpi/x86/q35/DSDT.viot
+++ b/tests/data/acpi/x86/q35/DSDT.viot
Binary files differ
diff --git a/tests/data/acpi/x86/q35/DSDT.xapic b/tests/data/acpi/x86/q35/DSDT.xapic
index d4acd85..b37ab59 100644
--- a/tests/data/acpi/x86/q35/DSDT.xapic
+++ b/tests/data/acpi/x86/q35/DSDT.xapic
Binary files differ
diff --git a/tests/data/acpi/x86/q35/HMAT.acpihmat-generic-x b/tests/data/acpi/x86/q35/HMAT.acpihmat-generic-x
new file mode 100644
index 0000000..0e5765f
--- /dev/null
+++ b/tests/data/acpi/x86/q35/HMAT.acpihmat-generic-x
Binary files differ
diff --git a/tests/data/acpi/x86/q35/SRAT.acpihmat-generic-x b/tests/data/acpi/x86/q35/SRAT.acpihmat-generic-x
new file mode 100644
index 0000000..b45838a
--- /dev/null
+++ b/tests/data/acpi/x86/q35/SRAT.acpihmat-generic-x
Binary files differ
diff --git a/tests/data/qobject/qdict.txt b/tests/data/qobject/qdict.txt
index e2edc88..888f343 100644
--- a/tests/data/qobject/qdict.txt
+++ b/tests/data/qobject/qdict.txt
@@ -3487,12 +3487,6 @@ cred-internals.h: 559
CREDITS: 603
crime.c: 2833
crime.h: 5271
-cris: 4096
-cris_defs_asm.h: 3805
-crisksyms.c: 472
-cris_supp_reg.h: 198
-crisv10.c: 129158
-crisv10.h: 4289
crm_regs.h: 1700
cr_pll.c: 4842
crt0_ram.S: 2152
diff --git a/tests/data/uefi-boot-images/bios-tables-test.loongarch64.iso.qcow2 b/tests/data/uefi-boot-images/bios-tables-test.loongarch64.iso.qcow2
new file mode 100644
index 0000000..18daee0
--- /dev/null
+++ b/tests/data/uefi-boot-images/bios-tables-test.loongarch64.iso.qcow2
Binary files differ
diff --git a/tests/docker/Makefile.include b/tests/docker/Makefile.include
index 708e3a7..3959d8a 100644
--- a/tests/docker/Makefile.include
+++ b/tests/docker/Makefile.include
@@ -92,10 +92,10 @@ endif
docker-image-alpine: NOUSER=1
debian-toolchain-run = \
- $(if $(NOCACHE), \
+ $(if $(NOCACHE)$(NOFETCH), \
$(call quiet-command, \
$(DOCKER_SCRIPT) build -t qemu/$1 -f $< \
- $(if $V,,--quiet) --no-cache \
+ $(if $V,,--quiet) $(if $(NOCACHE),--no-cache) \
--registry $(DOCKER_REGISTRY) --extra-files \
$(DOCKER_FILES_DIR)/$1.d/build-toolchain.sh, \
"BUILD", $1), \
@@ -117,7 +117,6 @@ docker-image-debian-microblaze-cross: $(DOCKER_FILES_DIR)/debian-toolchain.docke
# These images may be good enough for building tests but not for test builds
DOCKER_PARTIAL_IMAGES += debian-microblaze-cross
DOCKER_PARTIAL_IMAGES += debian-xtensa-cross
-DOCKER_PARTIAL_IMAGES += fedora-cris-cross
# images that are only used to build other images
DOCKER_VIRTUAL_IMAGES := debian-bootstrap debian-toolchain
@@ -178,6 +177,7 @@ docker:
@echo ' NETWORK=$$BACKEND Enable virtual network interface with $$BACKEND.'
@echo ' NOUSER=1 Define to disable adding current user to containers passwd.'
@echo ' NOCACHE=1 Ignore cache when build images.'
+ @echo ' NOFETCH=1 Do not fetch from the registry.'
@echo ' EXECUTABLE=<path> Include executable in image.'
@echo ' EXTRA_FILES="<path> [... <path>]"'
@echo ' Include extra files in image.'
@@ -185,8 +185,10 @@ docker:
docker-help: docker
+# Where QEMU caches build artefacts
+DOCKER_QEMU_CACHE_DIR := $$HOME/.cache/qemu
# Use a global constant ccache directory to speed up repetitive builds
-DOCKER_CCACHE_DIR := $$HOME/.cache/qemu-docker-ccache
+DOCKER_QEMU_CCACHE_DIR := DOCKER_QEMU_CACHE_DIR/docker-ccache
# This rule if for directly running against an arbitrary docker target.
# It is called by the expanded docker targets (e.g. make
@@ -195,7 +197,7 @@ DOCKER_CCACHE_DIR := $$HOME/.cache/qemu-docker-ccache
# For example: make docker-run TEST="test-quick" IMAGE="debian:arm64" EXECUTABLE=./aarch64-linux-user/qemu-aarch64
#
docker-run: docker-qemu-src
- @mkdir -p "$(DOCKER_CCACHE_DIR)"
+ @mkdir -p "$(DOCKER_QEMU_CCACHE_DIR)"
@if test -z "$(IMAGE)" || test -z "$(TEST)"; \
then echo "Invalid target $(IMAGE)/$(TEST)"; exit 1; \
fi
@@ -222,8 +224,8 @@ docker-run: docker-qemu-src
-e V=$V -e J=$J -e DEBUG=$(DEBUG) \
-e SHOW_ENV=$(SHOW_ENV) \
$(if $(NOUSER),, \
- -e CCACHE_DIR=/var/tmp/ccache \
- -v $(DOCKER_CCACHE_DIR):/var/tmp/ccache:z \
+ -v $(DOCKER_QEMU_CACHE_DIR):$(DOCKER_QEMU_CACHE_DIR) \
+ -e CCACHE_DIR=$(DOCKER_QEMU_CCACHE_DIR) \
) \
-v $$(readlink -e $(DOCKER_SRC_COPY)):/var/tmp/qemu:z$(COMMA)ro \
$(IMAGE) \
@@ -236,3 +238,6 @@ docker-image: ${DOCKER_IMAGES:%=docker-image-%}
docker-clean:
$(call quiet-command, $(DOCKER_SCRIPT) clean)
+
+# Overrides
+docker-test-rust%: NETWORK=1
diff --git a/tests/docker/dockerfiles/alpine.docker b/tests/docker/dockerfiles/alpine.docker
index b079a83..bf3bd5a 100644
--- a/tests/docker/dockerfiles/alpine.docker
+++ b/tests/docker/dockerfiles/alpine.docker
@@ -1,10 +1,10 @@
# THIS FILE WAS AUTO-GENERATED
#
-# $ lcitool dockerfile --layers all alpine-319 qemu
+# $ lcitool dockerfile --layers all alpine-321 qemu
#
# https://gitlab.com/libvirt/libvirt-ci
-FROM docker.io/library/alpine:3.19
+FROM docker.io/library/alpine:3.21
RUN apk update && \
apk upgrade && \
@@ -40,10 +40,12 @@ RUN apk update && \
glib-static \
gnutls-dev \
gtk+3.0-dev \
+ gtk-vnc-dev \
json-c-dev \
libaio-dev \
libbpf-dev \
libcap-ng-dev \
+ libcbor-dev \
libdrm-dev \
libepoxy-dev \
libffi-dev \
@@ -89,6 +91,8 @@ RUN apk update && \
py3-yaml \
python3 \
rpm2cpio \
+ rust \
+ rust-bindgen \
samurai \
sdl2-dev \
sdl2_image-dev \
@@ -107,6 +111,7 @@ RUN apk update && \
vde2-dev \
virglrenderer-dev \
vte3-dev \
+ vulkan-tools \
which \
xen-dev \
xorriso \
diff --git a/tests/docker/dockerfiles/centos9.docker b/tests/docker/dockerfiles/centos9.docker
index 0256865..a942835 100644
--- a/tests/docker/dockerfiles/centos9.docker
+++ b/tests/docker/dockerfiles/centos9.docker
@@ -16,6 +16,7 @@ RUN dnf distro-sync -y && \
alsa-lib-devel \
bash \
bc \
+ bindgen-cli \
bison \
brlapi-devel \
bzip2 \
@@ -102,6 +103,7 @@ RUN dnf distro-sync -y && \
python3-sphinx_rtd_theme \
python3-tomli \
rdma-core-devel \
+ rust \
sed \
snappy-devel \
socat \
@@ -113,6 +115,7 @@ RUN dnf distro-sync -y && \
usbredir-devel \
util-linux \
vte291-devel \
+ vulkan-tools \
which \
xorriso \
zlib-devel \
diff --git a/tests/docker/dockerfiles/debian-all-test-cross.docker b/tests/docker/dockerfiles/debian-all-test-cross.docker
index 6cc38a3..8ab244e 100644
--- a/tests/docker/dockerfiles/debian-all-test-cross.docker
+++ b/tests/docker/dockerfiles/debian-all-test-cross.docker
@@ -62,7 +62,8 @@ RUN DEBIAN_FRONTEND=noninteractive eatmydata \
gcc-s390x-linux-gnu \
libc6-dev-s390x-cross \
gcc-sparc64-linux-gnu \
- libc6-dev-sparc64-cross
+ libc6-dev-sparc64-cross && \
+ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt
ENV QEMU_CONFIGURE_OPTS --disable-system --disable-docs --disable-tools
diff --git a/tests/docker/dockerfiles/debian-amd64-cross.docker b/tests/docker/dockerfiles/debian-amd64-cross.docker
index 8058695..081f3e0 100644
--- a/tests/docker/dockerfiles/debian-amd64-cross.docker
+++ b/tests/docker/dockerfiles/debian-amd64-cross.docker
@@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
eatmydata apt-get install --no-install-recommends -y \
bash \
bc \
+ bindgen \
bison \
bsdextrautils \
bzip2 \
@@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
git \
hostname \
libglib2.0-dev \
- libpcre2-dev \
- libsndio-dev \
- libspice-protocol-dev \
llvm \
locales \
make \
@@ -52,6 +50,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
python3-venv \
python3-yaml \
rpm2cpio \
+ rustc-web \
sed \
socat \
sparse \
@@ -59,6 +58,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
tar \
tesseract-ocr \
tesseract-ocr-eng \
+ vulkan-tools \
xorriso \
zstd && \
eatmydata apt-get autoremove -y && \
@@ -91,6 +91,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libcacard-dev:amd64 \
libcap-ng-dev:amd64 \
libcapstone-dev:amd64 \
+ libcbor-dev:amd64 \
libcmocka-dev:amd64 \
libcurl4-gnutls-dev:amd64 \
libdaxctl-dev:amd64 \
@@ -105,6 +106,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libglusterfs-dev:amd64 \
libgnutls28-dev:amd64 \
libgtk-3-dev:amd64 \
+ libgtk-vnc-2.0-dev:amd64 \
libibverbs-dev:amd64 \
libiscsi-dev:amd64 \
libjemalloc-dev:amd64 \
@@ -116,6 +118,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libnfs-dev:amd64 \
libnuma-dev:amd64 \
libpam0g-dev:amd64 \
+ libpcre2-dev:amd64 \
libpipewire-0.3-dev:amd64 \
libpixman-1-dev:amd64 \
libpmem-dev:amd64 \
@@ -130,8 +133,10 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libselinux1-dev:amd64 \
libslirp-dev:amd64 \
libsnappy-dev:amd64 \
+ libsndio-dev:amd64 \
+ libspice-protocol-dev:amd64 \
libspice-server-dev:amd64 \
- libssh-gcrypt-dev:amd64 \
+ libssh-dev:amd64 \
libsystemd-dev:amd64 \
libtasn1-6-dev:amd64 \
libubsan1:amd64 \
@@ -169,6 +174,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/x86_64-linux-gnu && \
ENV ABI "x86_64-linux-gnu"
ENV MESON_OPTS "--cross-file=x86_64-linux-gnu"
+ENV RUST_TARGET "x86_64-unknown-linux-gnu"
ENV QEMU_CONFIGURE_OPTS --cross-prefix=x86_64-linux-gnu-
ENV DEF_TARGET_LIST x86_64-softmmu,x86_64-linux-user,i386-softmmu,i386-linux-user
# As a final step configure the user (if env is defined)
diff --git a/tests/docker/dockerfiles/debian-arm64-cross.docker b/tests/docker/dockerfiles/debian-arm64-cross.docker
index 15457d7..91c555a 100644
--- a/tests/docker/dockerfiles/debian-arm64-cross.docker
+++ b/tests/docker/dockerfiles/debian-arm64-cross.docker
@@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
eatmydata apt-get install --no-install-recommends -y \
bash \
bc \
+ bindgen \
bison \
bsdextrautils \
bzip2 \
@@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
git \
hostname \
libglib2.0-dev \
- libpcre2-dev \
- libsndio-dev \
- libspice-protocol-dev \
llvm \
locales \
make \
@@ -52,6 +50,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
python3-venv \
python3-yaml \
rpm2cpio \
+ rustc-web \
sed \
socat \
sparse \
@@ -59,6 +58,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
tar \
tesseract-ocr \
tesseract-ocr-eng \
+ vulkan-tools \
xorriso \
zstd && \
eatmydata apt-get autoremove -y && \
@@ -91,6 +91,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libcacard-dev:arm64 \
libcap-ng-dev:arm64 \
libcapstone-dev:arm64 \
+ libcbor-dev:arm64 \
libcmocka-dev:arm64 \
libcurl4-gnutls-dev:arm64 \
libdaxctl-dev:arm64 \
@@ -105,6 +106,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libglusterfs-dev:arm64 \
libgnutls28-dev:arm64 \
libgtk-3-dev:arm64 \
+ libgtk-vnc-2.0-dev:arm64 \
libibverbs-dev:arm64 \
libiscsi-dev:arm64 \
libjemalloc-dev:arm64 \
@@ -116,6 +118,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libnfs-dev:arm64 \
libnuma-dev:arm64 \
libpam0g-dev:arm64 \
+ libpcre2-dev:arm64 \
libpipewire-0.3-dev:arm64 \
libpixman-1-dev:arm64 \
libpng-dev:arm64 \
@@ -129,8 +132,10 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libselinux1-dev:arm64 \
libslirp-dev:arm64 \
libsnappy-dev:arm64 \
+ libsndio-dev:arm64 \
+ libspice-protocol-dev:arm64 \
libspice-server-dev:arm64 \
- libssh-gcrypt-dev:arm64 \
+ libssh-dev:arm64 \
libsystemd-dev:arm64 \
libtasn1-6-dev:arm64 \
libubsan1:arm64 \
@@ -168,6 +173,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/aarch64-linux-gnu && \
ENV ABI "aarch64-linux-gnu"
ENV MESON_OPTS "--cross-file=aarch64-linux-gnu"
+ENV RUST_TARGET "aarch64-unknown-linux-gnu"
ENV QEMU_CONFIGURE_OPTS --cross-prefix=aarch64-linux-gnu-
ENV DEF_TARGET_LIST aarch64-softmmu,aarch64-linux-user
# As a final step configure the user (if env is defined)
diff --git a/tests/docker/dockerfiles/debian-armel-cross.docker b/tests/docker/dockerfiles/debian-armel-cross.docker
deleted file mode 100644
index c26ffc2..0000000
--- a/tests/docker/dockerfiles/debian-armel-cross.docker
+++ /dev/null
@@ -1,178 +0,0 @@
-# THIS FILE WAS AUTO-GENERATED
-#
-# $ lcitool dockerfile --layers all --cross-arch armv6l debian-11 qemu
-#
-# https://gitlab.com/libvirt/libvirt-ci
-
-FROM docker.io/library/debian:11-slim
-
-RUN export DEBIAN_FRONTEND=noninteractive && \
- apt-get update && \
- apt-get install -y eatmydata && \
- eatmydata apt-get dist-upgrade -y && \
- eatmydata apt-get install --no-install-recommends -y \
- bash \
- bc \
- bison \
- bsdextrautils \
- bzip2 \
- ca-certificates \
- ccache \
- dbus \
- debianutils \
- diffutils \
- exuberant-ctags \
- findutils \
- flex \
- gcc \
- gcovr \
- gettext \
- git \
- hostname \
- libglib2.0-dev \
- libpcre2-dev \
- libsndio-dev \
- libspice-protocol-dev \
- llvm \
- locales \
- make \
- meson \
- mtools \
- ncat \
- ninja-build \
- openssh-client \
- pkgconf \
- python3 \
- python3-numpy \
- python3-opencv \
- python3-pillow \
- python3-pip \
- python3-setuptools \
- python3-sphinx \
- python3-sphinx-rtd-theme \
- python3-venv \
- python3-wheel \
- python3-yaml \
- rpm2cpio \
- sed \
- socat \
- sparse \
- tar \
- tesseract-ocr \
- tesseract-ocr-eng \
- xorriso \
- zstd && \
- eatmydata apt-get autoremove -y && \
- eatmydata apt-get autoclean -y && \
- sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \
- dpkg-reconfigure locales && \
- rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED
-
-RUN /usr/bin/pip3 install tomli
-
-ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers"
-ENV LANG "en_US.UTF-8"
-ENV MAKE "/usr/bin/make"
-ENV NINJA "/usr/bin/ninja"
-ENV PYTHON "/usr/bin/python3"
-
-RUN export DEBIAN_FRONTEND=noninteractive && \
- dpkg --add-architecture armel && \
- eatmydata apt-get update && \
- eatmydata apt-get dist-upgrade -y && \
- eatmydata apt-get install --no-install-recommends -y dpkg-dev && \
- eatmydata apt-get install --no-install-recommends -y \
- gcc-arm-linux-gnueabi \
- libaio-dev:armel \
- libasan6:armel \
- libasound2-dev:armel \
- libattr1-dev:armel \
- libbpf-dev:armel \
- libbrlapi-dev:armel \
- libbz2-dev:armel \
- libc6-dev:armel \
- libcacard-dev:armel \
- libcap-ng-dev:armel \
- libcapstone-dev:armel \
- libcmocka-dev:armel \
- libcurl4-gnutls-dev:armel \
- libdaxctl-dev:armel \
- libdrm-dev:armel \
- libepoxy-dev:armel \
- libfdt-dev:armel \
- libffi-dev:armel \
- libfuse3-dev:armel \
- libgbm-dev:armel \
- libgcrypt20-dev:armel \
- libglib2.0-dev:armel \
- libglusterfs-dev:armel \
- libgnutls28-dev:armel \
- libgtk-3-dev:armel \
- libibverbs-dev:armel \
- libiscsi-dev:armel \
- libjemalloc-dev:armel \
- libjpeg62-turbo-dev:armel \
- libjson-c-dev:armel \
- liblttng-ust-dev:armel \
- liblzo2-dev:armel \
- libncursesw5-dev:armel \
- libnfs-dev:armel \
- libnuma-dev:armel \
- libpam0g-dev:armel \
- libpipewire-0.3-dev:armel \
- libpixman-1-dev:armel \
- libpng-dev:armel \
- libpulse-dev:armel \
- librbd-dev:armel \
- librdmacm-dev:armel \
- libsasl2-dev:armel \
- libsdl2-dev:armel \
- libsdl2-image-dev:armel \
- libseccomp-dev:armel \
- libselinux1-dev:armel \
- libslirp-dev:armel \
- libsnappy-dev:armel \
- libspice-server-dev:armel \
- libssh-gcrypt-dev:armel \
- libsystemd-dev:armel \
- libtasn1-6-dev:armel \
- libubsan1:armel \
- libudev-dev:armel \
- liburing-dev:armel \
- libusb-1.0-0-dev:armel \
- libusbredirhost-dev:armel \
- libvdeplug-dev:armel \
- libvirglrenderer-dev:armel \
- libvte-2.91-dev:armel \
- libzstd-dev:armel \
- nettle-dev:armel \
- systemtap-sdt-dev:armel \
- zlib1g-dev:armel && \
- eatmydata apt-get autoremove -y && \
- eatmydata apt-get autoclean -y && \
- mkdir -p /usr/local/share/meson/cross && \
- printf "[binaries]\n\
-c = '/usr/bin/arm-linux-gnueabi-gcc'\n\
-ar = '/usr/bin/arm-linux-gnueabi-gcc-ar'\n\
-strip = '/usr/bin/arm-linux-gnueabi-strip'\n\
-pkgconfig = '/usr/bin/arm-linux-gnueabi-pkg-config'\n\
-\n\
-[host_machine]\n\
-system = 'linux'\n\
-cpu_family = 'arm'\n\
-cpu = 'arm'\n\
-endian = 'little'\n" > /usr/local/share/meson/cross/arm-linux-gnueabi && \
- dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \
- mkdir -p /usr/libexec/ccache-wrappers && \
- ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/arm-linux-gnueabi-cc && \
- ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/arm-linux-gnueabi-gcc
-
-ENV ABI "arm-linux-gnueabi"
-ENV MESON_OPTS "--cross-file=arm-linux-gnueabi"
-ENV QEMU_CONFIGURE_OPTS --cross-prefix=arm-linux-gnueabi-
-ENV DEF_TARGET_LIST arm-softmmu,arm-linux-user,armeb-linux-user
-# As a final step configure the user (if env is defined)
-ARG USER
-ARG UID
-RUN if [ "${USER}" ]; then \
- id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi
diff --git a/tests/docker/dockerfiles/debian-armhf-cross.docker b/tests/docker/dockerfiles/debian-armhf-cross.docker
index 8f87656..f0e2efc 100644
--- a/tests/docker/dockerfiles/debian-armhf-cross.docker
+++ b/tests/docker/dockerfiles/debian-armhf-cross.docker
@@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
eatmydata apt-get install --no-install-recommends -y \
bash \
bc \
+ bindgen \
bison \
bsdextrautils \
bzip2 \
@@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
git \
hostname \
libglib2.0-dev \
- libpcre2-dev \
- libsndio-dev \
- libspice-protocol-dev \
llvm \
locales \
make \
@@ -52,6 +50,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
python3-venv \
python3-yaml \
rpm2cpio \
+ rustc-web \
sed \
socat \
sparse \
@@ -59,6 +58,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
tar \
tesseract-ocr \
tesseract-ocr-eng \
+ vulkan-tools \
xorriso \
zstd && \
eatmydata apt-get autoremove -y && \
@@ -91,6 +91,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libcacard-dev:armhf \
libcap-ng-dev:armhf \
libcapstone-dev:armhf \
+ libcbor-dev:armhf \
libcmocka-dev:armhf \
libcurl4-gnutls-dev:armhf \
libdaxctl-dev:armhf \
@@ -105,6 +106,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libglusterfs-dev:armhf \
libgnutls28-dev:armhf \
libgtk-3-dev:armhf \
+ libgtk-vnc-2.0-dev:armhf \
libibverbs-dev:armhf \
libiscsi-dev:armhf \
libjemalloc-dev:armhf \
@@ -116,6 +118,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libnfs-dev:armhf \
libnuma-dev:armhf \
libpam0g-dev:armhf \
+ libpcre2-dev:armhf \
libpipewire-0.3-dev:armhf \
libpixman-1-dev:armhf \
libpng-dev:armhf \
@@ -129,8 +132,10 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libselinux1-dev:armhf \
libslirp-dev:armhf \
libsnappy-dev:armhf \
+ libsndio-dev:armhf \
+ libspice-protocol-dev:armhf \
libspice-server-dev:armhf \
- libssh-gcrypt-dev:armhf \
+ libssh-dev:armhf \
libsystemd-dev:armhf \
libtasn1-6-dev:armhf \
libubsan1:armhf \
@@ -168,6 +173,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/arm-linux-gnueabihf && \
ENV ABI "arm-linux-gnueabihf"
ENV MESON_OPTS "--cross-file=arm-linux-gnueabihf"
+ENV RUST_TARGET "armv7-unknown-linux-gnueabihf"
ENV QEMU_CONFIGURE_OPTS --cross-prefix=arm-linux-gnueabihf-
ENV DEF_TARGET_LIST arm-softmmu,arm-linux-user
# As a final step configure the user (if env is defined)
diff --git a/tests/docker/dockerfiles/debian-hexagon-cross.docker b/tests/docker/dockerfiles/debian-hexagon-cross.docker
index f2d40f2..23152b4 100644
--- a/tests/docker/dockerfiles/debian-hexagon-cross.docker
+++ b/tests/docker/dockerfiles/debian-hexagon-cross.docker
@@ -33,7 +33,8 @@ RUN apt-get update && \
ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/c++ && \
ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/cc && \
ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/g++ && \
- ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc
+ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc && \
+ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt
RUN /usr/bin/pip3 install tomli
diff --git a/tests/docker/dockerfiles/debian-i686-cross.docker b/tests/docker/dockerfiles/debian-i686-cross.docker
index f4ef054..025beb1 100644
--- a/tests/docker/dockerfiles/debian-i686-cross.docker
+++ b/tests/docker/dockerfiles/debian-i686-cross.docker
@@ -1,10 +1,10 @@
# THIS FILE WAS AUTO-GENERATED
#
-# $ lcitool dockerfile --layers all --cross-arch i686 debian-11 qemu
+# $ lcitool dockerfile --layers all --cross-arch i686 debian-12 qemu
#
# https://gitlab.com/libvirt/libvirt-ci
-FROM docker.io/library/debian:11-slim
+FROM docker.io/library/debian:12-slim
RUN export DEBIAN_FRONTEND=noninteractive && \
apt-get update && \
@@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
eatmydata apt-get install --no-install-recommends -y \
bash \
bc \
+ bindgen \
bison \
bsdextrautils \
bzip2 \
@@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
git \
hostname \
libglib2.0-dev \
- libpcre2-dev \
- libsndio-dev \
- libspice-protocol-dev \
llvm \
locales \
make \
@@ -47,19 +45,20 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
python3-opencv \
python3-pillow \
python3-pip \
- python3-setuptools \
python3-sphinx \
python3-sphinx-rtd-theme \
python3-venv \
- python3-wheel \
python3-yaml \
rpm2cpio \
+ rustc-web \
sed \
socat \
sparse \
+ swtpm \
tar \
tesseract-ocr \
tesseract-ocr-eng \
+ vulkan-tools \
xorriso \
zstd && \
eatmydata apt-get autoremove -y && \
@@ -68,8 +67,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
dpkg-reconfigure locales && \
rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED
-RUN /usr/bin/pip3 install tomli
-
ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers"
ENV LANG "en_US.UTF-8"
ENV MAKE "/usr/bin/make"
@@ -94,6 +91,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libcacard-dev:i386 \
libcap-ng-dev:i386 \
libcapstone-dev:i386 \
+ libcbor-dev:i386 \
libcmocka-dev:i386 \
libcurl4-gnutls-dev:i386 \
libdaxctl-dev:i386 \
@@ -108,6 +106,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libglusterfs-dev:i386 \
libgnutls28-dev:i386 \
libgtk-3-dev:i386 \
+ libgtk-vnc-2.0-dev:i386 \
libibverbs-dev:i386 \
libiscsi-dev:i386 \
libjemalloc-dev:i386 \
@@ -119,6 +118,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libnfs-dev:i386 \
libnuma-dev:i386 \
libpam0g-dev:i386 \
+ libpcre2-dev:i386 \
libpipewire-0.3-dev:i386 \
libpixman-1-dev:i386 \
libpng-dev:i386 \
@@ -132,8 +132,10 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libselinux1-dev:i386 \
libslirp-dev:i386 \
libsnappy-dev:i386 \
+ libsndio-dev:i386 \
+ libspice-protocol-dev:i386 \
libspice-server-dev:i386 \
- libssh-gcrypt-dev:i386 \
+ libssh-dev:i386 \
libsystemd-dev:i386 \
libtasn1-6-dev:i386 \
libubsan1:i386 \
@@ -144,6 +146,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libvdeplug-dev:i386 \
libvirglrenderer-dev:i386 \
libvte-2.91-dev:i386 \
+ libxdp-dev:i386 \
libzstd-dev:i386 \
nettle-dev:i386 \
systemtap-sdt-dev:i386 \
@@ -169,6 +172,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/i686-linux-gnu && \
ENV ABI "i686-linux-gnu"
ENV MESON_OPTS "--cross-file=i686-linux-gnu"
+ENV RUST_TARGET "i686-unknown-linux-gnu"
ENV QEMU_CONFIGURE_OPTS --cross-prefix=i686-linux-gnu-
ENV DEF_TARGET_LIST x86_64-softmmu,x86_64-linux-user,i386-softmmu,i386-linux-user
# As a final step configure the user (if env is defined)
diff --git a/tests/docker/dockerfiles/debian-legacy-test-cross.docker b/tests/docker/dockerfiles/debian-legacy-test-cross.docker
index d75e0b8..5a6616b 100644
--- a/tests/docker/dockerfiles/debian-legacy-test-cross.docker
+++ b/tests/docker/dockerfiles/debian-legacy-test-cross.docker
@@ -36,7 +36,8 @@ RUN DEBIAN_FRONTEND=noninteractive eatmydata \
python3-pip \
python3-setuptools \
python3-venv \
- python3-wheel
+ python3-wheel && \
+ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt
RUN /usr/bin/pip3 install tomli
diff --git a/tests/docker/dockerfiles/debian-loongarch-cross.docker b/tests/docker/dockerfiles/debian-loongarch-cross.docker
index 6a91975..538ab53 100644
--- a/tests/docker/dockerfiles/debian-loongarch-cross.docker
+++ b/tests/docker/dockerfiles/debian-loongarch-cross.docker
@@ -32,7 +32,8 @@ RUN apt-get update && \
python3-pip \
python3-setuptools \
python3-venv \
- python3-wheel
+ python3-wheel && \
+ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt
RUN /usr/bin/pip3 install tomli
@@ -42,8 +43,8 @@ RUN curl -#SL https://github.com/loongson/build-tools/releases/download/2023.08.
ENV PATH $PATH:/opt/cross-tools/bin
ENV LD_LIBRARY_PATH /opt/cross-tools/lib:/opt/cross-tools/loongarch64-unknown-linux-gnu/lib:$LD_LIBRARY_PATH
-ENV QEMU_CONFIGURE_OPTS --disable-system --disable-docs --disable-tools
-ENV DEF_TARGET_LIST loongarch64-linux-user,loongarch-softmmu
+ENV QEMU_CONFIGURE_OPTS --disable-docs --disable-tools
+ENV DEF_TARGET_LIST loongarch64-linux-user,loongarch64-softmmu
ENV MAKE /usr/bin/make
# As a final step configure the user (if env is defined)
diff --git a/tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh b/tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh
index 23ec0aa..c5cd0aa 100755
--- a/tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh
+++ b/tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh
@@ -10,6 +10,8 @@ TOOLCHAIN_INSTALL=/usr/local
TOOLCHAIN_BIN=${TOOLCHAIN_INSTALL}/bin
CROSS_SYSROOT=${TOOLCHAIN_INSTALL}/$TARGET/sys-root
+GCC_PATCH0_URL=https://raw.githubusercontent.com/Xilinx/meta-xilinx/refs/tags/xlnx-rel-v2024.1/meta-microblaze/recipes-devtools/gcc/gcc-12/0009-Patch-microblaze-Fix-atomic-boolean-return-value.patch
+
export PATH=${TOOLCHAIN_BIN}:$PATH
#
@@ -31,6 +33,12 @@ mv gcc-11.2.0 src-gcc
mv musl-1.2.2 src-musl
mv linux-5.10.70 src-linux
+#
+# Patch gcc
+#
+
+wget -O - ${GCC_PATCH0_URL} | patch -d src-gcc -p1
+
mkdir -p bld-hdr bld-binu bld-gcc bld-musl
mkdir -p ${CROSS_SYSROOT}/usr/include
diff --git a/tests/docker/dockerfiles/debian-mips64el-cross.docker b/tests/docker/dockerfiles/debian-mips64el-cross.docker
index 59c4c68..4a941dd 100644
--- a/tests/docker/dockerfiles/debian-mips64el-cross.docker
+++ b/tests/docker/dockerfiles/debian-mips64el-cross.docker
@@ -1,10 +1,10 @@
# THIS FILE WAS AUTO-GENERATED
#
-# $ lcitool dockerfile --layers all --cross-arch mips64el debian-11 qemu
+# $ lcitool dockerfile --layers all --cross-arch mips64el debian-12 qemu
#
# https://gitlab.com/libvirt/libvirt-ci
-FROM docker.io/library/debian:11-slim
+FROM docker.io/library/debian:12-slim
RUN export DEBIAN_FRONTEND=noninteractive && \
apt-get update && \
@@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
eatmydata apt-get install --no-install-recommends -y \
bash \
bc \
+ bindgen \
bison \
bsdextrautils \
bzip2 \
@@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
git \
hostname \
libglib2.0-dev \
- libpcre2-dev \
- libsndio-dev \
- libspice-protocol-dev \
llvm \
locales \
make \
@@ -47,19 +45,20 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
python3-opencv \
python3-pillow \
python3-pip \
- python3-setuptools \
python3-sphinx \
python3-sphinx-rtd-theme \
python3-venv \
- python3-wheel \
python3-yaml \
rpm2cpio \
+ rustc-web \
sed \
socat \
sparse \
+ swtpm \
tar \
tesseract-ocr \
tesseract-ocr-eng \
+ vulkan-tools \
xorriso \
zstd && \
eatmydata apt-get autoremove -y && \
@@ -68,8 +67,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
dpkg-reconfigure locales && \
rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED
-RUN /usr/bin/pip3 install tomli
-
ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers"
ENV LANG "en_US.UTF-8"
ENV MAKE "/usr/bin/make"
@@ -93,6 +90,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libcacard-dev:mips64el \
libcap-ng-dev:mips64el \
libcapstone-dev:mips64el \
+ libcbor-dev:mips64el \
libcmocka-dev:mips64el \
libcurl4-gnutls-dev:mips64el \
libdaxctl-dev:mips64el \
@@ -107,6 +105,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libglusterfs-dev:mips64el \
libgnutls28-dev:mips64el \
libgtk-3-dev:mips64el \
+ libgtk-vnc-2.0-dev:mips64el \
libibverbs-dev:mips64el \
libiscsi-dev:mips64el \
libjemalloc-dev:mips64el \
@@ -118,6 +117,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libnfs-dev:mips64el \
libnuma-dev:mips64el \
libpam0g-dev:mips64el \
+ libpcre2-dev:mips64el \
libpipewire-0.3-dev:mips64el \
libpixman-1-dev:mips64el \
libpng-dev:mips64el \
@@ -131,8 +131,10 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libselinux1-dev:mips64el \
libslirp-dev:mips64el \
libsnappy-dev:mips64el \
+ libsndio-dev:mips64el \
+ libspice-protocol-dev:mips64el \
libspice-server-dev:mips64el \
- libssh-gcrypt-dev:mips64el \
+ libssh-dev:mips64el \
libsystemd-dev:mips64el \
libtasn1-6-dev:mips64el \
libudev-dev:mips64el \
@@ -142,6 +144,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libvdeplug-dev:mips64el \
libvirglrenderer-dev:mips64el \
libvte-2.91-dev:mips64el \
+ libxdp-dev:mips64el \
libzstd-dev:mips64el \
nettle-dev:mips64el \
systemtap-sdt-dev:mips64el \
@@ -167,6 +170,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/mips64el-linux-gnuabi64 && \
ENV ABI "mips64el-linux-gnuabi64"
ENV MESON_OPTS "--cross-file=mips64el-linux-gnuabi64"
+ENV RUST_TARGET "mips64el-unknown-linux-gnuabi64"
ENV QEMU_CONFIGURE_OPTS --cross-prefix=mips64el-linux-gnuabi64-
ENV DEF_TARGET_LIST mips64el-softmmu,mips64el-linux-user
# As a final step configure the user (if env is defined)
diff --git a/tests/docker/dockerfiles/debian-mipsel-cross.docker b/tests/docker/dockerfiles/debian-mipsel-cross.docker
index 880c774..4d3e5d7 100644
--- a/tests/docker/dockerfiles/debian-mipsel-cross.docker
+++ b/tests/docker/dockerfiles/debian-mipsel-cross.docker
@@ -1,10 +1,10 @@
# THIS FILE WAS AUTO-GENERATED
#
-# $ lcitool dockerfile --layers all --cross-arch mipsel debian-11 qemu
+# $ lcitool dockerfile --layers all --cross-arch mipsel debian-12 qemu
#
# https://gitlab.com/libvirt/libvirt-ci
-FROM docker.io/library/debian:11-slim
+FROM docker.io/library/debian:12-slim
RUN export DEBIAN_FRONTEND=noninteractive && \
apt-get update && \
@@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
eatmydata apt-get install --no-install-recommends -y \
bash \
bc \
+ bindgen \
bison \
bsdextrautils \
bzip2 \
@@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
git \
hostname \
libglib2.0-dev \
- libpcre2-dev \
- libsndio-dev \
- libspice-protocol-dev \
llvm \
locales \
make \
@@ -47,19 +45,20 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
python3-opencv \
python3-pillow \
python3-pip \
- python3-setuptools \
python3-sphinx \
python3-sphinx-rtd-theme \
python3-venv \
- python3-wheel \
python3-yaml \
rpm2cpio \
+ rustc-web \
sed \
socat \
sparse \
+ swtpm \
tar \
tesseract-ocr \
tesseract-ocr-eng \
+ vulkan-tools \
xorriso \
zstd && \
eatmydata apt-get autoremove -y && \
@@ -68,8 +67,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
dpkg-reconfigure locales && \
rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED
-RUN /usr/bin/pip3 install tomli
-
ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers"
ENV LANG "en_US.UTF-8"
ENV MAKE "/usr/bin/make"
@@ -93,6 +90,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libcacard-dev:mipsel \
libcap-ng-dev:mipsel \
libcapstone-dev:mipsel \
+ libcbor-dev:mipsel \
libcmocka-dev:mipsel \
libcurl4-gnutls-dev:mipsel \
libdaxctl-dev:mipsel \
@@ -107,6 +105,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libglusterfs-dev:mipsel \
libgnutls28-dev:mipsel \
libgtk-3-dev:mipsel \
+ libgtk-vnc-2.0-dev:mipsel \
libibverbs-dev:mipsel \
libiscsi-dev:mipsel \
libjemalloc-dev:mipsel \
@@ -118,6 +117,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libnfs-dev:mipsel \
libnuma-dev:mipsel \
libpam0g-dev:mipsel \
+ libpcre2-dev:mipsel \
libpipewire-0.3-dev:mipsel \
libpixman-1-dev:mipsel \
libpng-dev:mipsel \
@@ -131,8 +131,10 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libselinux1-dev:mipsel \
libslirp-dev:mipsel \
libsnappy-dev:mipsel \
+ libsndio-dev:mipsel \
+ libspice-protocol-dev:mipsel \
libspice-server-dev:mipsel \
- libssh-gcrypt-dev:mipsel \
+ libssh-dev:mipsel \
libsystemd-dev:mipsel \
libtasn1-6-dev:mipsel \
libudev-dev:mipsel \
@@ -142,6 +144,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libvdeplug-dev:mipsel \
libvirglrenderer-dev:mipsel \
libvte-2.91-dev:mipsel \
+ libxdp-dev:mipsel \
libzstd-dev:mipsel \
nettle-dev:mipsel \
systemtap-sdt-dev:mipsel \
@@ -167,6 +170,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/mipsel-linux-gnu && \
ENV ABI "mipsel-linux-gnu"
ENV MESON_OPTS "--cross-file=mipsel-linux-gnu"
+ENV RUST_TARGET "mipsel-unknown-linux-gnu"
ENV QEMU_CONFIGURE_OPTS --cross-prefix=mipsel-linux-gnu-
ENV DEF_TARGET_LIST mipsel-softmmu,mipsel-linux-user
# As a final step configure the user (if env is defined)
diff --git a/tests/docker/dockerfiles/debian-ppc64el-cross.docker b/tests/docker/dockerfiles/debian-ppc64el-cross.docker
index 1d55b95..22b4457 100644
--- a/tests/docker/dockerfiles/debian-ppc64el-cross.docker
+++ b/tests/docker/dockerfiles/debian-ppc64el-cross.docker
@@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
eatmydata apt-get install --no-install-recommends -y \
bash \
bc \
+ bindgen \
bison \
bsdextrautils \
bzip2 \
@@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
git \
hostname \
libglib2.0-dev \
- libpcre2-dev \
- libsndio-dev \
- libspice-protocol-dev \
llvm \
locales \
make \
@@ -52,6 +50,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
python3-venv \
python3-yaml \
rpm2cpio \
+ rustc-web \
sed \
socat \
sparse \
@@ -59,6 +58,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
tar \
tesseract-ocr \
tesseract-ocr-eng \
+ vulkan-tools \
xorriso \
zstd && \
eatmydata apt-get autoremove -y && \
@@ -91,6 +91,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libcacard-dev:ppc64el \
libcap-ng-dev:ppc64el \
libcapstone-dev:ppc64el \
+ libcbor-dev:ppc64el \
libcmocka-dev:ppc64el \
libcurl4-gnutls-dev:ppc64el \
libdaxctl-dev:ppc64el \
@@ -105,6 +106,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libglusterfs-dev:ppc64el \
libgnutls28-dev:ppc64el \
libgtk-3-dev:ppc64el \
+ libgtk-vnc-2.0-dev:ppc64el \
libibverbs-dev:ppc64el \
libiscsi-dev:ppc64el \
libjemalloc-dev:ppc64el \
@@ -116,6 +118,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libnfs-dev:ppc64el \
libnuma-dev:ppc64el \
libpam0g-dev:ppc64el \
+ libpcre2-dev:ppc64el \
libpipewire-0.3-dev:ppc64el \
libpixman-1-dev:ppc64el \
libpng-dev:ppc64el \
@@ -129,8 +132,10 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libselinux1-dev:ppc64el \
libslirp-dev:ppc64el \
libsnappy-dev:ppc64el \
+ libsndio-dev:ppc64el \
+ libspice-protocol-dev:ppc64el \
libspice-server-dev:ppc64el \
- libssh-gcrypt-dev:ppc64el \
+ libssh-dev:ppc64el \
libsystemd-dev:ppc64el \
libtasn1-6-dev:ppc64el \
libubsan1:ppc64el \
@@ -167,6 +172,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/powerpc64le-linux-gnu && \
ENV ABI "powerpc64le-linux-gnu"
ENV MESON_OPTS "--cross-file=powerpc64le-linux-gnu"
+ENV RUST_TARGET "powerpc64le-unknown-linux-gnu"
ENV QEMU_CONFIGURE_OPTS --cross-prefix=powerpc64le-linux-gnu-
ENV DEF_TARGET_LIST ppc64-softmmu,ppc64-linux-user
# As a final step configure the user (if env is defined)
diff --git a/tests/docker/dockerfiles/debian-riscv64-cross.docker b/tests/docker/dockerfiles/debian-riscv64-cross.docker
index 4d8ca83..b0386cd 100644
--- a/tests/docker/dockerfiles/debian-riscv64-cross.docker
+++ b/tests/docker/dockerfiles/debian-riscv64-cross.docker
@@ -1,10 +1,10 @@
# THIS FILE WAS AUTO-GENERATED
#
-# $ lcitool dockerfile --layers all --cross-arch riscv64 debian-sid qemu-minimal
+# $ lcitool dockerfile --layers all --cross-arch riscv64 debian-13 qemu-minimal
#
# https://gitlab.com/libvirt/libvirt-ci
-FROM docker.io/library/debian:sid-slim
+FROM docker.io/library/debian:trixie-slim
RUN export DEBIAN_FRONTEND=noninteractive && \
apt-get update && \
diff --git a/tests/docker/dockerfiles/debian-s390x-cross.docker b/tests/docker/dockerfiles/debian-s390x-cross.docker
index 62ccda6..13ec52c 100644
--- a/tests/docker/dockerfiles/debian-s390x-cross.docker
+++ b/tests/docker/dockerfiles/debian-s390x-cross.docker
@@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
eatmydata apt-get install --no-install-recommends -y \
bash \
bc \
+ bindgen \
bison \
bsdextrautils \
bzip2 \
@@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
git \
hostname \
libglib2.0-dev \
- libpcre2-dev \
- libsndio-dev \
- libspice-protocol-dev \
llvm \
locales \
make \
@@ -52,6 +50,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
python3-venv \
python3-yaml \
rpm2cpio \
+ rustc-web \
sed \
socat \
sparse \
@@ -59,6 +58,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
tar \
tesseract-ocr \
tesseract-ocr-eng \
+ vulkan-tools \
xorriso \
zstd && \
eatmydata apt-get autoremove -y && \
@@ -91,6 +91,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libcacard-dev:s390x \
libcap-ng-dev:s390x \
libcapstone-dev:s390x \
+ libcbor-dev:s390x \
libcmocka-dev:s390x \
libcurl4-gnutls-dev:s390x \
libdaxctl-dev:s390x \
@@ -105,6 +106,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libglusterfs-dev:s390x \
libgnutls28-dev:s390x \
libgtk-3-dev:s390x \
+ libgtk-vnc-2.0-dev:s390x \
libibverbs-dev:s390x \
libiscsi-dev:s390x \
libjemalloc-dev:s390x \
@@ -116,6 +118,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libnfs-dev:s390x \
libnuma-dev:s390x \
libpam0g-dev:s390x \
+ libpcre2-dev:s390x \
libpipewire-0.3-dev:s390x \
libpixman-1-dev:s390x \
libpng-dev:s390x \
@@ -129,7 +132,9 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libselinux1-dev:s390x \
libslirp-dev:s390x \
libsnappy-dev:s390x \
- libssh-gcrypt-dev:s390x \
+ libsndio-dev:s390x \
+ libspice-protocol-dev:s390x \
+ libssh-dev:s390x \
libsystemd-dev:s390x \
libtasn1-6-dev:s390x \
libubsan1:s390x \
@@ -166,6 +171,7 @@ endian = 'big'\n" > /usr/local/share/meson/cross/s390x-linux-gnu && \
ENV ABI "s390x-linux-gnu"
ENV MESON_OPTS "--cross-file=s390x-linux-gnu"
+ENV RUST_TARGET "s390x-unknown-linux-gnu"
ENV QEMU_CONFIGURE_OPTS --cross-prefix=s390x-linux-gnu-
ENV DEF_TARGET_LIST s390x-softmmu,s390x-linux-user
# As a final step configure the user (if env is defined)
diff --git a/tests/docker/dockerfiles/debian-toolchain.docker b/tests/docker/dockerfiles/debian-toolchain.docker
index 687a97f..ab4ce29 100644
--- a/tests/docker/dockerfiles/debian-toolchain.docker
+++ b/tests/docker/dockerfiles/debian-toolchain.docker
@@ -10,6 +10,8 @@ FROM docker.io/library/debian:11-slim
# ??? The build-dep isn't working, missing a number of
# minimal build dependiencies, e.g. libmpc.
+RUN sed 's/^deb /deb-src /' </etc/apt/sources.list >/etc/apt/sources.list.d/deb-src.list
+
RUN apt update && \
DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \
DEBIAN_FRONTEND=noninteractive eatmydata \
@@ -33,6 +35,11 @@ RUN cd /root && ./build-toolchain.sh
# and the build trees by restoring the original image,
# then copying the built toolchain from stage 0.
FROM docker.io/library/debian:11-slim
+RUN apt update && \
+ DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \
+ DEBIAN_FRONTEND=noninteractive eatmydata \
+ apt install -y --no-install-recommends \
+ libmpc3
COPY --from=0 /usr/local /usr/local
# As a final step configure the user (if env is defined)
ARG USER
diff --git a/tests/docker/dockerfiles/debian-tricore-cross.docker b/tests/docker/dockerfiles/debian-tricore-cross.docker
index 16276aa..7e00e87 100644
--- a/tests/docker/dockerfiles/debian-tricore-cross.docker
+++ b/tests/docker/dockerfiles/debian-tricore-cross.docker
@@ -11,8 +11,6 @@
#
FROM docker.io/library/debian:11-slim
-MAINTAINER Philippe Mathieu-DaudƩ <f4bug@amsat.org>
-
RUN apt update && \
DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \
DEBIAN_FRONTEND=noninteractive eatmydata apt install -yy \
@@ -34,7 +32,8 @@ RUN apt update && \
python3-pip \
python3-setuptools \
python3-wheel \
- python3-venv
+ python3-venv && \
+ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt
RUN /usr/bin/pip3 install tomli
diff --git a/tests/docker/dockerfiles/debian-xtensa-cross.docker b/tests/docker/dockerfiles/debian-xtensa-cross.docker
index 4138818..d011eee 100644
--- a/tests/docker/dockerfiles/debian-xtensa-cross.docker
+++ b/tests/docker/dockerfiles/debian-xtensa-cross.docker
@@ -16,7 +16,8 @@ RUN apt-get update && \
curl \
gettext \
git \
- python3-minimal
+ python3-minimal && \
+ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt
ENV CPU_LIST dc232b dc233c de233_fpu dsp3400
ENV TOOLCHAIN_RELEASE 2020.07
diff --git a/tests/docker/dockerfiles/debian.docker b/tests/docker/dockerfiles/debian.docker
index 0d1d401..0a57c1a 100644
--- a/tests/docker/dockerfiles/debian.docker
+++ b/tests/docker/dockerfiles/debian.docker
@@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
eatmydata apt-get install --no-install-recommends -y \
bash \
bc \
+ bindgen \
bison \
bsdextrautils \
bzip2 \
@@ -41,6 +42,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libcacard-dev \
libcap-ng-dev \
libcapstone-dev \
+ libcbor-dev \
libcmocka-dev \
libcurl4-gnutls-dev \
libdaxctl-dev \
@@ -55,6 +57,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libglusterfs-dev \
libgnutls28-dev \
libgtk-3-dev \
+ libgtk-vnc-2.0-dev \
libibverbs-dev \
libiscsi-dev \
libjemalloc-dev \
@@ -84,7 +87,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libsndio-dev \
libspice-protocol-dev \
libspice-server-dev \
- libssh-gcrypt-dev \
+ libssh-dev \
libsystemd-dev \
libtasn1-6-dev \
libubsan1 \
@@ -119,6 +122,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
python3-venv \
python3-yaml \
rpm2cpio \
+ rustc-web \
sed \
socat \
sparse \
@@ -127,6 +131,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
tar \
tesseract-ocr \
tesseract-ocr-eng \
+ vulkan-tools \
xorriso \
zlib1g-dev \
zstd && \
diff --git a/tests/docker/dockerfiles/emsdk-wasm32-cross.docker b/tests/docker/dockerfiles/emsdk-wasm32-cross.docker
new file mode 100644
index 0000000..60a7d02
--- /dev/null
+++ b/tests/docker/dockerfiles/emsdk-wasm32-cross.docker
@@ -0,0 +1,145 @@
+# syntax = docker/dockerfile:1.5
+
+ARG EMSDK_VERSION_QEMU=3.1.50
+ARG ZLIB_VERSION=1.3.1
+ARG GLIB_MINOR_VERSION=2.84
+ARG GLIB_VERSION=${GLIB_MINOR_VERSION}.0
+ARG PIXMAN_VERSION=0.44.2
+ARG FFI_VERSION=v3.4.7
+ARG MESON_VERSION=1.5.0
+
+FROM emscripten/emsdk:$EMSDK_VERSION_QEMU AS build-base
+ARG MESON_VERSION
+ENV TARGET=/builddeps/target
+ENV CPATH="$TARGET/include"
+ENV PKG_CONFIG_PATH="$TARGET/lib/pkgconfig"
+ENV EM_PKG_CONFIG_PATH="$PKG_CONFIG_PATH"
+ENV CFLAGS="-O3 -pthread -DWASM_BIGINT"
+ENV CXXFLAGS="$CFLAGS"
+ENV LDFLAGS="-sWASM_BIGINT -sASYNCIFY=1 -L$TARGET/lib"
+RUN apt-get update && apt-get install -y \
+ autoconf \
+ build-essential \
+ libglib2.0-dev \
+ libtool \
+ pkgconf \
+ ninja-build \
+ python3-pip
+RUN pip3 install meson==${MESON_VERSION} tomli
+RUN mkdir /build
+WORKDIR /build
+RUN mkdir -p $TARGET
+RUN <<EOF
+cat <<EOT > /cross.meson
+[host_machine]
+system = 'emscripten'
+cpu_family = 'wasm32'
+cpu = 'wasm32'
+endian = 'little'
+
+[binaries]
+c = 'emcc'
+cpp = 'em++'
+ar = 'emar'
+ranlib = 'emranlib'
+pkgconfig = ['pkg-config', '--static']
+EOT
+EOF
+
+FROM build-base AS zlib-dev
+ARG ZLIB_VERSION
+RUN mkdir -p /zlib
+RUN curl -Ls https://zlib.net/zlib-$ZLIB_VERSION.tar.xz | \
+ tar xJC /zlib --strip-components=1
+WORKDIR /zlib
+RUN emconfigure ./configure --prefix=$TARGET --static
+RUN emmake make install -j$(nproc)
+
+FROM build-base AS libffi-dev
+ARG FFI_VERSION
+RUN mkdir -p /libffi
+RUN git clone https://github.com/libffi/libffi /libffi
+WORKDIR /libffi
+RUN git checkout $FFI_VERSION
+RUN autoreconf -fiv
+RUN emconfigure ./configure --host=wasm32-unknown-linux \
+ --prefix=$TARGET --enable-static \
+ --disable-shared --disable-dependency-tracking \
+ --disable-builddir --disable-multi-os-directory \
+ --disable-raw-api --disable-docs
+RUN emmake make install SUBDIRS='include' -j$(nproc)
+
+FROM build-base AS pixman-dev
+ARG PIXMAN_VERSION
+RUN mkdir /pixman/
+RUN git clone https://gitlab.freedesktop.org/pixman/pixman /pixman/
+WORKDIR /pixman
+RUN git checkout pixman-$PIXMAN_VERSION
+RUN <<EOF
+cat <<EOT >> /cross.meson
+[built-in options]
+c_args = [$(printf "'%s', " $CFLAGS | sed 's/, $//')]
+cpp_args = [$(printf "'%s', " $CFLAGS | sed 's/, $//')]
+objc_args = [$(printf "'%s', " $CFLAGS | sed 's/, $//')]
+c_link_args = [$(printf "'%s', " $LDFLAGS | sed 's/, $//')]
+cpp_link_args = [$(printf "'%s', " $LDFLAGS | sed 's/, $//')]
+EOT
+EOF
+RUN meson setup _build --prefix=$TARGET --cross-file=/cross.meson \
+ --default-library=static \
+ --buildtype=release -Dtests=disabled -Ddemos=disabled
+RUN meson install -C _build
+
+FROM build-base AS glib-dev
+ARG GLIB_VERSION
+ARG GLIB_MINOR_VERSION
+RUN mkdir -p /stub
+WORKDIR /stub
+RUN <<EOF
+cat <<'EOT' > res_query.c
+#include <netdb.h>
+int res_query(const char *name, int class,
+ int type, unsigned char *dest, int len)
+{
+ h_errno = HOST_NOT_FOUND;
+ return -1;
+}
+EOT
+EOF
+RUN emcc ${CFLAGS} -c res_query.c -fPIC -o libresolv.o
+RUN ar rcs libresolv.a libresolv.o
+RUN mkdir -p $TARGET/lib/
+RUN cp libresolv.a $TARGET/lib/
+
+RUN mkdir -p /glib
+RUN curl -Lks https://download.gnome.org/sources/glib/${GLIB_MINOR_VERSION}/glib-$GLIB_VERSION.tar.xz | \
+ tar xJC /glib --strip-components=1
+
+COPY --link --from=zlib-dev /builddeps/ /builddeps/
+COPY --link --from=libffi-dev /builddeps/ /builddeps/
+
+WORKDIR /glib
+RUN <<EOF
+CFLAGS="$CFLAGS -Wno-incompatible-function-pointer-types" ;
+cat <<EOT >> /cross.meson
+[built-in options]
+c_args = [$(printf "'%s', " $CFLAGS | sed 's/, $//')]
+cpp_args = [$(printf "'%s', " $CFLAGS | sed 's/, $//')]
+objc_args = [$(printf "'%s', " $CFLAGS | sed 's/, $//')]
+c_link_args = [$(printf "'%s', " $LDFLAGS | sed 's/, $//')]
+cpp_link_args = [$(printf "'%s', " $LDFLAGS | sed 's/, $//')]
+EOT
+EOF
+RUN meson setup _build --prefix=$TARGET --cross-file=/cross.meson \
+ --default-library=static --buildtype=release --force-fallback-for=pcre2 \
+ -Dselinux=disabled -Dxattr=false -Dlibmount=disabled -Dnls=disabled \
+ -Dtests=false -Dglib_debug=disabled -Dglib_assert=false -Dglib_checks=false
+# FIXME: emscripten doesn't provide some pthread functions in the final link,
+# which isn't detected during meson setup.
+RUN sed -i -E "/#define HAVE_POSIX_SPAWN 1/d" ./_build/config.h
+RUN sed -i -E "/#define HAVE_PTHREAD_GETNAME_NP 1/d" ./_build/config.h
+RUN meson install -C _build
+
+FROM build-base
+COPY --link --from=glib-dev /builddeps/ /builddeps/
+COPY --link --from=pixman-dev /builddeps/ /builddeps/
diff --git a/tests/docker/dockerfiles/fedora-cris-cross.docker b/tests/docker/dockerfiles/fedora-cris-cross.docker
deleted file mode 100644
index 97c9d37..0000000
--- a/tests/docker/dockerfiles/fedora-cris-cross.docker
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Cross compiler for cris system tests
-#
-
-FROM registry.fedoraproject.org/fedora:33
-ENV PACKAGES gcc-cris-linux-gnu
-ENV MAKE /usr/bin/make
-RUN dnf install -y $PACKAGES
-RUN rpm -q $PACKAGES | sort > /packages.txt
-# As a final step configure the user (if env is defined)
-ARG USER
-ARG UID
-RUN if [ "${USER}" ]; then \
- id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi
diff --git a/tests/docker/dockerfiles/fedora-rust-nightly.docker b/tests/docker/dockerfiles/fedora-rust-nightly.docker
new file mode 100644
index 0000000..4a03330
--- /dev/null
+++ b/tests/docker/dockerfiles/fedora-rust-nightly.docker
@@ -0,0 +1,183 @@
+# THIS FILE WAS AUTO-GENERATED
+#
+# $ lcitool dockerfile --layers all fedora-40 qemu
+#
+# https://gitlab.com/libvirt/libvirt-ci
+
+FROM registry.fedoraproject.org/fedora:40
+
+RUN dnf install -y nosync && \
+ printf '#!/bin/sh\n\
+if test -d /usr/lib64\n\
+then\n\
+ export LD_PRELOAD=/usr/lib64/nosync/nosync.so\n\
+else\n\
+ export LD_PRELOAD=/usr/lib/nosync/nosync.so\n\
+fi\n\
+exec "$@"\n' > /usr/bin/nosync && \
+ chmod +x /usr/bin/nosync && \
+ nosync dnf update -y && \
+ nosync dnf install -y \
+ SDL2-devel \
+ SDL2_image-devel \
+ alsa-lib-devel \
+ bash \
+ bc \
+ bindgen-cli \
+ bison \
+ brlapi-devel \
+ bzip2 \
+ bzip2-devel \
+ ca-certificates \
+ capstone-devel \
+ ccache \
+ clang \
+ ctags \
+ cyrus-sasl-devel \
+ daxctl-devel \
+ dbus-daemon \
+ device-mapper-multipath-devel \
+ diffutils \
+ findutils \
+ flex \
+ fuse3-devel \
+ gcc \
+ gcovr \
+ gettext \
+ git \
+ glib2-devel \
+ glib2-static \
+ glibc-langpack-en \
+ glibc-static \
+ glusterfs-api-devel \
+ gnutls-devel \
+ gtk-vnc2-devel \
+ gtk3-devel \
+ hostname \
+ jemalloc-devel \
+ json-c-devel \
+ libaio-devel \
+ libasan \
+ libattr-devel \
+ libbpf-devel \
+ libcacard-devel \
+ libcap-ng-devel \
+ libcbor-devel \
+ libcmocka-devel \
+ libcurl-devel \
+ libdrm-devel \
+ libepoxy-devel \
+ libfdt-devel \
+ libffi-devel \
+ libgcrypt-devel \
+ libiscsi-devel \
+ libjpeg-devel \
+ libnfs-devel \
+ libpmem-devel \
+ libpng-devel \
+ librbd-devel \
+ libseccomp-devel \
+ libselinux-devel \
+ libslirp-devel \
+ libssh-devel \
+ libtasn1-devel \
+ libubsan \
+ liburing-devel \
+ libusbx-devel \
+ libxdp-devel \
+ libzstd-devel \
+ llvm \
+ lttng-ust-devel \
+ lzo-devel \
+ make \
+ mesa-libgbm-devel \
+ meson \
+ mtools \
+ ncurses-devel \
+ nettle-devel \
+ ninja-build \
+ nmap-ncat \
+ numactl-devel \
+ openssh-clients \
+ pam-devel \
+ pcre-static \
+ pipewire-devel \
+ pixman-devel \
+ pkgconfig \
+ pulseaudio-libs-devel \
+ python3 \
+ python3-PyYAML \
+ python3-numpy \
+ python3-opencv \
+ python3-pillow \
+ python3-pip \
+ python3-sphinx \
+ python3-sphinx_rtd_theme \
+ python3-zombie-imp \
+ rdma-core-devel \
+ rust \
+ sed \
+ snappy-devel \
+ socat \
+ sparse \
+ spice-protocol \
+ spice-server-devel \
+ swtpm \
+ systemd-devel \
+ systemtap-sdt-devel \
+ tar \
+ tesseract \
+ tesseract-langpack-eng \
+ usbredir-devel \
+ util-linux \
+ virglrenderer-devel \
+ vte291-devel \
+ vulkan-tools \
+ which \
+ xen-devel \
+ xorriso \
+ zlib-devel \
+ zlib-static \
+ zstd && \
+ nosync dnf autoremove -y && \
+ nosync dnf clean all -y && \
+ rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED && \
+ rpm -qa | sort > /packages.txt && \
+ mkdir -p /usr/libexec/ccache-wrappers && \
+ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/cc && \
+ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/clang && \
+ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc
+
+ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers"
+ENV LANG "en_US.UTF-8"
+ENV MAKE "/usr/bin/make"
+ENV NINJA "/usr/bin/ninja"
+ENV PYTHON "/usr/bin/python3"
+RUN dnf install -y wget
+ENV RUSTUP_HOME=/usr/local/rustup CARGO_HOME=/usr/local/cargo
+ENV RUSTC=/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/rustc
+ENV RUSTDOC=/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/rustdoc
+ENV CARGO=/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/cargo
+RUN set -eux && \
+ rustArch='x86_64-unknown-linux-gnu' && \
+ rustupSha256='6aeece6993e902708983b209d04c0d1dbb14ebb405ddb87def578d41f920f56d' && \
+ url="https://static.rust-lang.org/rustup/archive/1.27.1/${rustArch}/rustup-init" && \
+ wget "$url" && \
+ echo "${rustupSha256} *rustup-init" | sha256sum -c - && \
+ chmod +x rustup-init && \
+ ./rustup-init -y --no-modify-path --profile default --default-toolchain nightly --default-host ${rustArch} && \
+ chmod -R a+w $RUSTUP_HOME $CARGO_HOME && \
+ /usr/local/cargo/bin/rustup --version && \
+ /usr/local/cargo/bin/rustup run nightly cargo --version && \
+ /usr/local/cargo/bin/rustup run nightly rustc --version && \
+ test "$CARGO" = "$(/usr/local/cargo/bin/rustup +nightly which cargo)" && \
+ test "$RUSTDOC" = "$(/usr/local/cargo/bin/rustup +nightly which rustdoc)" && \
+ test "$RUSTC" = "$(/usr/local/cargo/bin/rustup +nightly which rustc)"
+ENV PATH=$CARGO_HOME/bin:$PATH
+RUN /usr/local/cargo/bin/rustup run nightly cargo install bindgen-cli
+RUN $CARGO --list
+# As a final step configure the user (if env is defined)
+ARG USER
+ARG UID
+RUN if [ "${USER}" ]; then \
+ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi
diff --git a/tests/docker/dockerfiles/fedora-win64-cross.docker b/tests/docker/dockerfiles/fedora-win64-cross.docker
index 007e157..a950344 100644
--- a/tests/docker/dockerfiles/fedora-win64-cross.docker
+++ b/tests/docker/dockerfiles/fedora-win64-cross.docker
@@ -20,6 +20,7 @@ exec "$@"\n' > /usr/bin/nosync && \
nosync dnf install -y \
bash \
bc \
+ bindgen-cli \
bison \
bzip2 \
ca-certificates \
@@ -42,7 +43,6 @@ exec "$@"\n' > /usr/bin/nosync && \
ninja-build \
nmap-ncat \
openssh-clients \
- pcre-static \
python3 \
python3-PyYAML \
python3-numpy \
@@ -52,15 +52,16 @@ exec "$@"\n' > /usr/bin/nosync && \
python3-sphinx \
python3-sphinx_rtd_theme \
python3-zombie-imp \
+ rust \
sed \
socat \
sparse \
- spice-protocol \
swtpm \
tar \
tesseract \
tesseract-langpack-eng \
util-linux \
+ vulkan-tools \
which \
xorriso \
zstd && \
@@ -86,6 +87,7 @@ RUN nosync dnf install -y \
mingw64-gettext \
mingw64-glib2 \
mingw64-gnutls \
+ mingw64-gtk-vnc2 \
mingw64-gtk3 \
mingw64-libepoxy \
mingw64-libgcrypt \
diff --git a/tests/docker/dockerfiles/fedora.docker b/tests/docker/dockerfiles/fedora.docker
index 44f239c..014e3cc 100644
--- a/tests/docker/dockerfiles/fedora.docker
+++ b/tests/docker/dockerfiles/fedora.docker
@@ -23,6 +23,7 @@ exec "$@"\n' > /usr/bin/nosync && \
alsa-lib-devel \
bash \
bc \
+ bindgen-cli \
bison \
brlapi-devel \
bzip2 \
@@ -50,6 +51,7 @@ exec "$@"\n' > /usr/bin/nosync && \
glibc-static \
glusterfs-api-devel \
gnutls-devel \
+ gtk-vnc2-devel \
gtk3-devel \
hostname \
jemalloc-devel \
@@ -60,6 +62,7 @@ exec "$@"\n' > /usr/bin/nosync && \
libbpf-devel \
libcacard-devel \
libcap-ng-devel \
+ libcbor-devel \
libcmocka-devel \
libcurl-devel \
libdrm-devel \
@@ -112,6 +115,7 @@ exec "$@"\n' > /usr/bin/nosync && \
python3-sphinx_rtd_theme \
python3-zombie-imp \
rdma-core-devel \
+ rust \
sed \
snappy-devel \
socat \
@@ -128,6 +132,7 @@ exec "$@"\n' > /usr/bin/nosync && \
util-linux \
virglrenderer-devel \
vte291-devel \
+ vulkan-tools \
which \
xen-devel \
xorriso \
diff --git a/tests/docker/dockerfiles/opensuse-leap.docker b/tests/docker/dockerfiles/opensuse-leap.docker
index 836f531..e90225d 100644
--- a/tests/docker/dockerfiles/opensuse-leap.docker
+++ b/tests/docker/dockerfiles/opensuse-leap.docker
@@ -4,9 +4,10 @@
#
# https://gitlab.com/libvirt/libvirt-ci
-FROM registry.opensuse.org/opensuse/leap:15.5
+FROM registry.opensuse.org/opensuse/leap:15.6
RUN zypper update -y && \
+ zypper addrepo -fc https://download.opensuse.org/update/leap/15.6/backports/openSUSE:Backports:SLE-15-SP6:Update.repo && \
zypper install -y \
Mesa-devel \
alsa-lib-devel \
@@ -33,6 +34,7 @@ RUN zypper update -y && \
glibc-locale \
glibc-static \
glusterfs-devel \
+ gtk-vnc-devel \
gtk3-devel \
hostname \
jemalloc-devel \
@@ -45,6 +47,7 @@ RUN zypper update -y && \
libbz2-devel \
libcacard-devel \
libcap-ng-devel \
+ libcbor-devel \
libcmocka-devel \
libcurl-devel \
libdrm-devel \
@@ -94,6 +97,8 @@ RUN zypper update -y && \
python311-pip \
python311-setuptools \
rdma-core-devel \
+ rust \
+ rust-bindgen \
sed \
snappy-devel \
sndio-devel \
@@ -110,6 +115,7 @@ RUN zypper update -y && \
util-linux \
virglrenderer-devel \
vte-devel \
+ vulkan-tools \
which \
xen-devel \
xorriso \
@@ -126,7 +132,7 @@ RUN zypper update -y && \
RUN /usr/bin/pip3.11 install \
PyYAML \
- meson==0.63.2 \
+ meson==1.5.0 \
pillow \
sphinx \
sphinx-rtd-theme
diff --git a/tests/docker/dockerfiles/python.docker b/tests/docker/dockerfiles/python.docker
index 8f0af9e..59e70a0 100644
--- a/tests/docker/dockerfiles/python.docker
+++ b/tests/docker/dockerfiles/python.docker
@@ -15,7 +15,6 @@ ENV PACKAGES \
python3.11 \
python3.12 \
python3.13 \
- python3.8 \
python3.9
RUN dnf install -y $PACKAGES
diff --git a/tests/docker/dockerfiles/ubuntu2204.docker b/tests/docker/dockerfiles/ubuntu2204.docker
index beeb44f..28a6f93 100644
--- a/tests/docker/dockerfiles/ubuntu2204.docker
+++ b/tests/docker/dockerfiles/ubuntu2204.docker
@@ -41,6 +41,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libcacard-dev \
libcap-ng-dev \
libcapstone-dev \
+ libcbor-dev \
libcmocka-dev \
libcurl4-gnutls-dev \
libdaxctl-dev \
@@ -55,6 +56,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
libglusterfs-dev \
libgnutls28-dev \
libgtk-3-dev \
+ libgtk-vnc-2.0-dev \
libibverbs-dev \
libiscsi-dev \
libjemalloc-dev \
@@ -119,6 +121,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
python3-venv \
python3-yaml \
rpm2cpio \
+ rustc-1.77 \
sed \
socat \
sparse \
@@ -127,6 +130,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
tar \
tesseract-ocr \
tesseract-ocr-eng \
+ vulkan-tools \
xorriso \
zlib1g-dev \
zstd && \
@@ -146,6 +150,13 @@ ENV LANG "en_US.UTF-8"
ENV MAKE "/usr/bin/make"
ENV NINJA "/usr/bin/ninja"
ENV PYTHON "/usr/bin/python3"
+ENV RUSTC=/usr/bin/rustc-1.77
+ENV RUSTDOC=/usr/bin/rustdoc-1.77
+ENV CARGO_HOME=/usr/local/cargo
+ENV PATH=$CARGO_HOME/bin:$PATH
+RUN DEBIAN_FRONTEND=noninteractive eatmydata \
+ apt install -y --no-install-recommends cargo
+RUN cargo install bindgen-cli
# As a final step configure the user (if env is defined)
ARG USER
ARG UID
diff --git a/tests/docker/test-debug b/tests/docker/test-debug
index f52f163..678cecc 100755
--- a/tests/docker/test-debug
+++ b/tests/docker/test-debug
@@ -1,6 +1,6 @@
#!/bin/bash -e
#
-# Compile and check with clang & --enable-debug --enable-sanitizers.
+# Compile and check with clang & debug & sanitizers
#
# Copyright (c) 2016-2018 Red Hat Inc.
#
@@ -19,7 +19,7 @@ requires_binary clang
cd "$BUILD_DIR"
OPTS="--cxx=clang++ --cc=clang --host-cc=clang"
-OPTS="--enable-debug --enable-sanitizers $OPTS"
+OPTS="--enable-debug --enable-asan --enable-ubsan $OPTS"
export ASAN_OPTIONS=detect_leaks=0
build_qemu $OPTS
diff --git a/tests/docker/test-rust b/tests/docker/test-rust
new file mode 100755
index 0000000..e7e3e94
--- /dev/null
+++ b/tests/docker/test-rust
@@ -0,0 +1,21 @@
+#!/bin/bash -e
+#
+# Run the rust code checks (a.k.a. check-rust-tools-nightly)
+#
+# Copyright (c) 2025 Linaro Ltd
+#
+# Authors:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2
+# or (at your option) any later version. See the COPYING file in
+# the top-level directory.
+
+. common.rc
+
+cd "$BUILD_DIR"
+
+configure_qemu --disable-user --disable-docs --enable-rust
+pyvenv/bin/meson devenv -w $QEMU_SRC/rust ${CARGO-cargo} fmt --check
+make clippy
+make rustdoc
diff --git a/tests/fp/fp-bench.c b/tests/fp/fp-bench.c
index 8ce0ca1..d90f542 100644
--- a/tests/fp/fp-bench.c
+++ b/tests/fp/fp-bench.c
@@ -488,6 +488,16 @@ static void run_bench(void)
{
bench_func_t f;
+ /*
+ * These implementation-defined choices for various things IEEE
+ * doesn't specify match those used by the Arm architecture.
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_s_ab, &soft_status);
+ set_float_3nan_prop_rule(float_3nan_prop_s_cab, &soft_status);
+ set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, &soft_status);
+ set_float_default_nan_pattern(0b01000000, &soft_status);
+ set_float_ftz_detection(float_ftz_before_rounding, &soft_status);
+
f = bench_funcs[operation][precision];
g_assert(f);
f();
diff --git a/tests/fp/fp-test-log2.c b/tests/fp/fp-test-log2.c
index 4eae93e..79f619c 100644
--- a/tests/fp/fp-test-log2.c
+++ b/tests/fp/fp-test-log2.c
@@ -70,6 +70,8 @@ int main(int ac, char **av)
float_status qsf = {0};
int i;
+ set_float_2nan_prop_rule(float_2nan_prop_s_ab, &qsf);
+ set_float_default_nan_pattern(0b01000000, &qsf);
set_float_rounding_mode(float_round_nearest_even, &qsf);
test.d = 0.0;
diff --git a/tests/fp/fp-test.c b/tests/fp/fp-test.c
index 36b5712..c619e5d 100644
--- a/tests/fp/fp-test.c
+++ b/tests/fp/fp-test.c
@@ -935,6 +935,15 @@ void run_test(void)
{
unsigned int i;
+ /*
+ * These implementation-defined choices for various things IEEE
+ * doesn't specify match those used by the Arm architecture.
+ */
+ set_float_2nan_prop_rule(float_2nan_prop_s_ab, &qsf);
+ set_float_3nan_prop_rule(float_3nan_prop_s_cab, &qsf);
+ set_float_default_nan_pattern(0b01000000, &qsf);
+ set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, &qsf);
+
genCases_setLevel(test_level);
verCases_maxErrorCount = n_max_errors;
diff --git a/tests/fp/meson.build b/tests/fp/meson.build
index 114b4b4..9059a24 100644
--- a/tests/fp/meson.build
+++ b/tests/fp/meson.build
@@ -7,6 +7,16 @@ if host_os == 'windows'
subdir_done()
endif
+# By default tests run with the usual 30s timeout; particularly
+# slow tests can have that overridden here. The keys here are
+# the testnames without their fp-test- prefix.
+slow_fp_tests = {
+ 'rem': 60,
+ 'div': 60,
+ 'mul': 60,
+ 'mulAdd': 180,
+}
+
sfcflags = [
# softfloat defines
'-DSOFTFLOAT_ROUND_ODD',
@@ -109,6 +119,7 @@ fptest_rounding_args = ['-r', 'all']
foreach k, v : softfloat_conv_tests
test('fp-test-' + k, fptest,
args: fptest_args + fptest_rounding_args + v.split(),
+ timeout: slow_fp_tests.get(k, 30),
suite: ['softfloat', 'softfloat-conv'])
endforeach
@@ -116,6 +127,7 @@ foreach k, v : softfloat_tests
test('fp-test-' + k, fptest,
args: fptest_args + fptest_rounding_args +
['f16_' + k, 'f32_' + k, 'f64_' + k, 'f128_' + k, 'extF80_' + k],
+ timeout: slow_fp_tests.get(k, 30),
suite: ['softfloat', 'softfloat-' + v])
endforeach
@@ -124,7 +136,8 @@ test('fp-test-mulAdd', fptest,
# no fptest_rounding_args
args: fptest_args +
['f16_mulAdd', 'f32_mulAdd', 'f64_mulAdd', 'f128_mulAdd'],
- suite: ['softfloat-slow', 'softfloat-ops-slow', 'slow'], timeout: 180)
+ timeout: slow_fp_tests.get('mulAdd', 30),
+ suite: ['softfloat-slow', 'softfloat-ops-slow', 'slow'])
executable(
'fp-bench',
@@ -140,4 +153,5 @@ fptestlog2 = executable(
c_args: fpcflags,
)
test('fp-test-log2', fptestlog2,
+ timeout: slow_fp_tests.get('log2', 30),
suite: ['softfloat', 'softfloat-ops'])
diff --git a/tests/avocado/acpi-bits/bits-config/bits-cfg.txt b/tests/functional/acpi-bits/bits-config/bits-cfg.txt
index 8010804..8010804 100644
--- a/tests/avocado/acpi-bits/bits-config/bits-cfg.txt
+++ b/tests/functional/acpi-bits/bits-config/bits-cfg.txt
diff --git a/tests/avocado/acpi-bits/bits-tests/smbios.py2 b/tests/functional/acpi-bits/bits-tests/smbios.py2
index 5868a71..5868a71 100644
--- a/tests/avocado/acpi-bits/bits-tests/smbios.py2
+++ b/tests/functional/acpi-bits/bits-tests/smbios.py2
diff --git a/tests/avocado/acpi-bits/bits-tests/smilatency.py2 b/tests/functional/acpi-bits/bits-tests/smilatency.py2
index 405af67..405af67 100644
--- a/tests/avocado/acpi-bits/bits-tests/smilatency.py2
+++ b/tests/functional/acpi-bits/bits-tests/smilatency.py2
diff --git a/tests/avocado/acpi-bits/bits-tests/testacpi.py2 b/tests/functional/acpi-bits/bits-tests/testacpi.py2
index 7bf9075..7bf9075 100644
--- a/tests/avocado/acpi-bits/bits-tests/testacpi.py2
+++ b/tests/functional/acpi-bits/bits-tests/testacpi.py2
diff --git a/tests/avocado/acpi-bits/bits-tests/testcpuid.py2 b/tests/functional/acpi-bits/bits-tests/testcpuid.py2
index 7adefbe..7adefbe 100644
--- a/tests/avocado/acpi-bits/bits-tests/testcpuid.py2
+++ b/tests/functional/acpi-bits/bits-tests/testcpuid.py2
diff --git a/tests/functional/aspeed.py b/tests/functional/aspeed.py
new file mode 100644
index 0000000..7a40d5d
--- /dev/null
+++ b/tests/functional/aspeed.py
@@ -0,0 +1,58 @@
+# Test class to boot aspeed machines
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import LinuxKernelTest
+
+class AspeedTest(LinuxKernelTest):
+
+ def do_test_arm_aspeed_openbmc(self, machine, image, uboot='2019.04',
+ cpu_id='0x0', soc='AST2500 rev A1'):
+ hostname = machine.removesuffix('-bmc')
+
+ self.set_machine(machine)
+ self.vm.set_console()
+ self.vm.add_args('-drive', f'file={image},if=mtd,format=raw',
+ '-snapshot')
+ self.vm.launch()
+
+ self.wait_for_console_pattern(f'U-Boot {uboot}')
+ self.wait_for_console_pattern('## Loading kernel from FIT Image')
+ self.wait_for_console_pattern('Starting kernel ...')
+ self.wait_for_console_pattern(f'Booting Linux on physical CPU {cpu_id}')
+ self.wait_for_console_pattern(f'ASPEED {soc}')
+ self.wait_for_console_pattern('/init as init process')
+ self.wait_for_console_pattern(f'systemd[1]: Hostname set to <{hostname}>.')
+
+ def do_test_arm_aspeed_buildroot_start(self, image, cpu_id, pattern='Aspeed EVB'):
+ self.require_netdev('user')
+ self.vm.set_console()
+ self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw,read-only=true',
+ '-net', 'nic', '-net', 'user')
+ self.vm.launch()
+
+ self.wait_for_console_pattern('U-Boot 2019.04')
+ self.wait_for_console_pattern('## Loading kernel from FIT Image')
+ self.wait_for_console_pattern('Starting kernel ...')
+ self.wait_for_console_pattern('Booting Linux on physical CPU ' + cpu_id)
+ self.wait_for_console_pattern('lease of 10.0.2.15')
+ # the line before login:
+ self.wait_for_console_pattern(pattern)
+ exec_command_and_wait_for_pattern(self, 'root', 'Password:')
+ exec_command_and_wait_for_pattern(self, 'passw0rd', '#')
+
+ def do_test_arm_aspeed_buildroot_poweroff(self):
+ exec_command_and_wait_for_pattern(self, 'poweroff',
+ 'System halted')
+
+ def do_test_arm_aspeed_sdk_start(self, image):
+ self.require_netdev('user')
+ self.vm.set_console()
+ self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw',
+ '-net', 'nic', '-net', 'user', '-snapshot')
+ self.vm.launch()
+
+ self.wait_for_console_pattern('U-Boot 2019.04')
+ self.wait_for_console_pattern('## Loading kernel from FIT Image')
+ self.wait_for_console_pattern('Starting kernel ...')
diff --git a/tests/functional/meson.build b/tests/functional/meson.build
new file mode 100644
index 0000000..e9f19d5
--- /dev/null
+++ b/tests/functional/meson.build
@@ -0,0 +1,420 @@
+# QEMU functional tests:
+# Tests that are put in the 'quick' category are run by default during
+# 'make check'. Everything that should not be run during 'make check'
+# (e.g. tests that fetch assets from the internet) should be put into
+# the 'thorough' category instead.
+
+# Most tests run too slow with TCI enabled, so skip the functional tests there
+if get_option('tcg_interpreter')
+ subdir_done()
+endif
+
+# Timeouts for individual tests that can be slow e.g. with debugging enabled
+test_timeouts = {
+ 'aarch64_aspeed_ast2700' : 600,
+ 'aarch64_aspeed_ast2700fc' : 600,
+ 'aarch64_imx8mp_evk' : 240,
+ 'aarch64_raspi4' : 480,
+ 'aarch64_reverse_debug' : 180,
+ 'aarch64_rme_virt' : 1200,
+ 'aarch64_rme_sbsaref' : 1200,
+ 'aarch64_sbsaref_alpine' : 1200,
+ 'aarch64_sbsaref_freebsd' : 720,
+ 'aarch64_smmu' : 720,
+ 'aarch64_tuxrun' : 240,
+ 'aarch64_virt' : 360,
+ 'aarch64_virt_gpu' : 480,
+ 'acpi_bits' : 420,
+ 'arm_aspeed_palmetto' : 120,
+ 'arm_aspeed_romulus' : 120,
+ 'arm_aspeed_witherspoon' : 120,
+ 'arm_aspeed_ast2500' : 720,
+ 'arm_aspeed_ast2600' : 1200,
+ 'arm_aspeed_bletchley' : 480,
+ 'arm_aspeed_rainier' : 480,
+ 'arm_bpim2u' : 500,
+ 'arm_collie' : 180,
+ 'arm_cubieboard' : 360,
+ 'arm_orangepi' : 540,
+ 'arm_quanta_gsj' : 240,
+ 'arm_raspi2' : 120,
+ 'arm_replay' : 240,
+ 'arm_tuxrun' : 240,
+ 'arm_sx1' : 360,
+ 'intel_iommu': 300,
+ 'mips_malta' : 480,
+ 'mipsel_malta' : 420,
+ 'mipsel_replay' : 480,
+ 'mips64_malta' : 240,
+ 'mips64el_malta' : 420,
+ 'mips64el_replay' : 180,
+ 'netdev_ethtool' : 180,
+ 'ppc_40p' : 240,
+ 'ppc64_hv' : 1000,
+ 'ppc64_powernv' : 480,
+ 'ppc64_pseries' : 480,
+ 'ppc64_replay' : 210,
+ 'ppc64_tuxrun' : 420,
+ 'ppc64_mac99' : 120,
+ 'riscv64_tuxrun' : 120,
+ 's390x_ccw_virtio' : 420,
+ 'sh4_tuxrun' : 240,
+ 'virtio_balloon': 120,
+ 'x86_64_kvm_xen' : 180,
+ 'x86_64_replay' : 480,
+}
+
+tests_generic_system = [
+ 'empty_cpu_model',
+ 'info_usernet',
+ 'version',
+]
+
+tests_generic_linuxuser = [
+]
+
+tests_generic_bsduser = [
+]
+
+tests_aarch64_system_quick = [
+ 'migration',
+]
+
+tests_aarch64_system_thorough = [
+ 'aarch64_aspeed_ast2700',
+ 'aarch64_aspeed_ast2700fc',
+ 'aarch64_imx8mp_evk',
+ 'aarch64_raspi3',
+ 'aarch64_raspi4',
+ 'aarch64_replay',
+ 'aarch64_reverse_debug',
+ 'aarch64_rme_virt',
+ 'aarch64_rme_sbsaref',
+ 'aarch64_sbsaref',
+ 'aarch64_sbsaref_alpine',
+ 'aarch64_sbsaref_freebsd',
+ 'aarch64_smmu',
+ 'aarch64_tcg_plugins',
+ 'aarch64_tuxrun',
+ 'aarch64_virt',
+ 'aarch64_virt_gpu',
+ 'aarch64_xen',
+ 'aarch64_xlnx_versal',
+ 'multiprocess',
+]
+
+tests_alpha_system_quick = [
+ 'migration',
+]
+
+tests_alpha_system_thorough = [
+ 'alpha_clipper',
+ 'alpha_replay',
+]
+
+tests_arm_system_quick = [
+ 'migration',
+]
+
+tests_arm_system_thorough = [
+ 'arm_aspeed_ast1030',
+ 'arm_aspeed_palmetto',
+ 'arm_aspeed_romulus',
+ 'arm_aspeed_witherspoon',
+ 'arm_aspeed_ast2500',
+ 'arm_aspeed_ast2600',
+ 'arm_aspeed_bletchley',
+ 'arm_aspeed_rainier',
+ 'arm_bpim2u',
+ 'arm_canona1100',
+ 'arm_collie',
+ 'arm_cubieboard',
+ 'arm_emcraft_sf2',
+ 'arm_integratorcp',
+ 'arm_microbit',
+ 'arm_orangepi',
+ 'arm_quanta_gsj',
+ 'arm_raspi2',
+ 'arm_realview',
+ 'arm_replay',
+ 'arm_smdkc210',
+ 'arm_stellaris',
+ 'arm_sx1',
+ 'arm_vexpress',
+ 'arm_virt',
+ 'arm_tuxrun',
+]
+
+tests_arm_linuxuser_thorough = [
+ 'arm_bflt',
+]
+
+tests_avr_system_thorough = [
+ 'avr_mega2560',
+ 'avr_uno',
+]
+
+tests_hppa_system_quick = [
+ 'hppa_seabios',
+]
+
+tests_i386_system_quick = [
+ 'migration',
+]
+
+tests_i386_system_thorough = [
+ 'i386_replay',
+ 'i386_tuxrun',
+]
+
+tests_loongarch64_system_thorough = [
+ 'loongarch64_virt',
+]
+
+tests_m68k_system_thorough = [
+ 'm68k_mcf5208evb',
+ 'm68k_nextcube',
+ 'm68k_replay',
+ 'm68k_q800',
+ 'm68k_tuxrun',
+]
+
+tests_microblaze_system_thorough = [
+ 'microblaze_replay',
+ 'microblaze_s3adsp1800'
+]
+
+tests_microblazeel_system_thorough = [
+ 'microblazeel_s3adsp1800'
+]
+
+tests_mips_system_thorough = [
+ 'mips_malta',
+ 'mips_replay',
+ 'mips_tuxrun',
+]
+
+tests_mipsel_system_thorough = [
+ 'mipsel_malta',
+ 'mipsel_replay',
+ 'mipsel_tuxrun',
+]
+
+tests_mips64_system_thorough = [
+ 'mips64_malta',
+ 'mips64_tuxrun',
+]
+
+tests_mips64el_system_thorough = [
+ 'mips64el_fuloong2e',
+ 'mips64el_loongson3v',
+ 'mips64el_malta',
+ 'mips64el_replay',
+ 'mips64el_tuxrun',
+]
+
+tests_or1k_system_thorough = [
+ 'or1k_replay',
+ 'or1k_sim',
+]
+
+tests_ppc_system_quick = [
+ 'migration',
+ 'ppc_74xx',
+]
+
+tests_ppc_system_thorough = [
+ 'ppc_40p',
+ 'ppc_amiga',
+ 'ppc_bamboo',
+ 'ppc_mac',
+ 'ppc_mpc8544ds',
+ 'ppc_replay',
+ 'ppc_sam460ex',
+ 'ppc_tuxrun',
+ 'ppc_virtex_ml507',
+]
+
+tests_ppc64_system_quick = [
+ 'migration',
+]
+
+tests_ppc64_system_thorough = [
+ 'ppc64_e500',
+ 'ppc64_hv',
+ 'ppc64_powernv',
+ 'ppc64_pseries',
+ 'ppc64_replay',
+ 'ppc64_reverse_debug',
+ 'ppc64_tuxrun',
+ 'ppc64_mac99',
+]
+
+tests_riscv32_system_quick = [
+ 'migration',
+ 'riscv_opensbi',
+]
+
+tests_riscv32_system_thorough = [
+ 'riscv32_tuxrun',
+]
+
+tests_riscv64_system_quick = [
+ 'migration',
+ 'riscv_opensbi',
+]
+
+tests_riscv64_system_thorough = [
+ 'riscv64_tuxrun',
+]
+
+tests_rx_system_thorough = [
+ 'rx_gdbsim',
+]
+
+tests_s390x_system_thorough = [
+ 's390x_ccw_virtio',
+ 's390x_replay',
+ 's390x_topology',
+ 's390x_tuxrun',
+]
+
+tests_sh4_system_thorough = [
+ 'sh4_r2d',
+ 'sh4_tuxrun',
+]
+
+tests_sh4eb_system_thorough = [
+ 'sh4eb_r2d',
+]
+
+tests_sparc_system_quick = [
+ 'migration',
+]
+
+tests_sparc_system_thorough = [
+ 'sparc_replay',
+ 'sparc_sun4m',
+]
+
+tests_sparc64_system_quick = [
+ 'migration',
+]
+
+tests_sparc64_system_thorough = [
+ 'sparc64_sun4u',
+ 'sparc64_tuxrun',
+]
+
+tests_x86_64_system_quick = [
+ 'cpu_queries',
+ 'mem_addr_space',
+ 'migration',
+ 'pc_cpu_hotplug_props',
+ 'virtio_version',
+ 'x86_cpu_model_versions',
+ 'vnc',
+ 'memlock',
+]
+
+tests_x86_64_system_thorough = [
+ 'acpi_bits',
+ 'intel_iommu',
+ 'linux_initrd',
+ 'multiprocess',
+ 'netdev_ethtool',
+ 'virtio_balloon',
+ 'virtio_gpu',
+ 'x86_64_hotplug_blk',
+ 'x86_64_hotplug_cpu',
+ 'x86_64_kvm_xen',
+ 'x86_64_replay',
+ 'x86_64_reverse_debug',
+ 'x86_64_tuxrun',
+]
+
+tests_xtensa_system_thorough = [
+ 'xtensa_lx60',
+ 'xtensa_replay',
+]
+
+precache_all = []
+foreach speed : ['quick', 'thorough']
+ foreach dir : target_dirs
+
+ target_base = dir.split('-')[0]
+
+ if dir.endswith('-softmmu')
+ sysmode = 'system'
+ test_emulator = emulators['qemu-system-' + target_base]
+ elif dir.endswith('-linux-user')
+ sysmode = 'linuxuser'
+ test_emulator = emulators['qemu-' + target_base]
+ elif dir.endswith('-bsd-user')
+ sysmode = 'bsduser'
+ test_emulator = emulators['qemu-' + target_base]
+ else
+ continue
+ endif
+
+ if speed == 'quick'
+ suites = ['func-quick', 'func-' + target_base]
+ target_tests = get_variable('tests_' + target_base + '_' + sysmode + '_quick', []) \
+ + get_variable('tests_generic_' + sysmode)
+ else
+ suites = ['func-' + speed, 'func-' + target_base + '-' + speed, speed]
+ target_tests = get_variable('tests_' + target_base + '_' + sysmode + '_' + speed, [])
+ endif
+
+ test_deps = roms
+ test_env = environment()
+ if have_tools
+ test_env.set('QEMU_TEST_QEMU_IMG', meson.global_build_root() / 'qemu-img')
+ test_deps += [qemu_img]
+ endif
+ test_env.set('QEMU_TEST_QEMU_BINARY', test_emulator.full_path())
+ test_env.set('QEMU_BUILD_ROOT', meson.project_build_root())
+ test_env.set('PYTHONPATH', meson.project_source_root() / 'python:' +
+ meson.current_source_dir())
+
+ foreach test : target_tests
+ testname = '@0@-@1@'.format(target_base, test)
+ testfile = 'test_' + test + '.py'
+ testpath = meson.current_source_dir() / testfile
+ teststamp = testname + '.tstamp'
+ test_precache_env = environment()
+ test_precache_env.set('QEMU_TEST_PRECACHE', meson.current_build_dir() / teststamp)
+ test_precache_env.set('PYTHONPATH', meson.project_source_root() / 'python:' +
+ meson.current_source_dir())
+ precache = custom_target('func-precache-' + testname,
+ output: teststamp,
+ command: [python, testpath],
+ depend_files: files(testpath),
+ build_by_default: false,
+ env: test_precache_env)
+ precache_all += precache
+
+ # Ideally we would add 'precache' to 'depends' here, such that
+ # 'build_by_default: false' lets the pre-caching automatically
+ # run immediately before the test runs. In practice this is
+ # broken in meson, with it running the pre-caching in the normal
+ # compile phase https://github.com/mesonbuild/meson/issues/2518
+ # If the above bug ever gets fixed, when QEMU changes the min
+ # meson version, add the 'depends' and remove the custom
+ # 'run_target' logic below & in Makefile.include
+ test('func-' + testname,
+ python,
+ depends: [test_deps, test_emulator, emulator_modules, plugin_modules],
+ env: test_env,
+ args: [testpath],
+ protocol: 'tap',
+ timeout: test_timeouts.get(test, 90),
+ priority: test_timeouts.get(test, 90),
+ suite: suites)
+ endforeach
+ endforeach
+endforeach
+
+run_target('precache-functional',
+ depends: precache_all,
+ command: [python, '-c', ''])
diff --git a/tests/functional/qemu_test/__init__.py b/tests/functional/qemu_test/__init__.py
new file mode 100644
index 0000000..6e666a0
--- /dev/null
+++ b/tests/functional/qemu_test/__init__.py
@@ -0,0 +1,20 @@
+# Test class and utilities for functional tests
+#
+# Copyright 2024 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+
+from .asset import Asset
+from .config import BUILD_DIR, dso_suffix
+from .cmd import is_readable_executable_file, \
+ interrupt_interactive_console_until_pattern, wait_for_console_pattern, \
+ exec_command, exec_command_and_wait_for_pattern, get_qemu_img, which
+from .testcase import QemuBaseTest, QemuUserTest, QemuSystemTest
+from .linuxkernel import LinuxKernelTest
+from .decorators import skipIfMissingCommands, skipIfNotMachine, \
+ skipFlakyTest, skipUntrustedTest, skipBigDataTest, skipSlowTest, \
+ skipIfMissingImports, skipIfOperatingSystem, skipLockedMemoryTest
+from .archive import archive_extract
+from .uncompress import uncompress
diff --git a/tests/functional/qemu_test/archive.py b/tests/functional/qemu_test/archive.py
new file mode 100644
index 0000000..c803fda
--- /dev/null
+++ b/tests/functional/qemu_test/archive.py
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Utilities for python-based QEMU tests
+#
+# Copyright 2024 Red Hat, Inc.
+#
+# Authors:
+# Thomas Huth <thuth@redhat.com>
+
+import os
+from subprocess import check_call, run, DEVNULL
+import tarfile
+from urllib.parse import urlparse
+import zipfile
+
+from .asset import Asset
+
+
+def tar_extract(archive, dest_dir, member=None):
+ with tarfile.open(archive) as tf:
+ if hasattr(tarfile, 'data_filter'):
+ tf.extraction_filter = getattr(tarfile, 'data_filter',
+ (lambda member, path: member))
+ if member:
+ tf.extract(member=member, path=dest_dir)
+ else:
+ tf.extractall(path=dest_dir)
+
+def cpio_extract(archive, output_path):
+ cwd = os.getcwd()
+ os.chdir(output_path)
+ # Not passing 'check=True' as cpio exits with non-zero
+ # status if the archive contains any device nodes :-(
+ if type(archive) == str:
+ run(['cpio', '-i', '-F', archive],
+ stdout=DEVNULL, stderr=DEVNULL)
+ else:
+ run(['cpio', '-i'],
+ input=archive.read(),
+ stdout=DEVNULL, stderr=DEVNULL)
+ os.chdir(cwd)
+
+def zip_extract(archive, dest_dir, member=None):
+ with zipfile.ZipFile(archive, 'r') as zf:
+ if member:
+ zf.extract(member=member, path=dest_dir)
+ else:
+ zf.extractall(path=dest_dir)
+
+def deb_extract(archive, dest_dir, member=None):
+ cwd = os.getcwd()
+ os.chdir(dest_dir)
+ try:
+ proc = run(['ar', 't', archive],
+ check=True, capture_output=True, encoding='utf8')
+ file_path = proc.stdout.split()[2]
+ check_call(['ar', 'x', archive, file_path],
+ stdout=DEVNULL, stderr=DEVNULL)
+ tar_extract(file_path, dest_dir, member)
+ finally:
+ os.chdir(cwd)
+
+'''
+@params archive: filename, Asset, or file-like object to extract
+@params dest_dir: target directory to extract into
+@params member: optional member file to limit extraction to
+
+Extracts @archive into @dest_dir. All files are extracted
+unless @member specifies a limit.
+
+If @format is None, heuristics will be applied to guess the format
+from the filename or Asset URL. @format must be non-None if @archive
+is a file-like object.
+'''
+def archive_extract(archive, dest_dir, format=None, member=None):
+ if format is None:
+ format = guess_archive_format(archive)
+ if type(archive) == Asset:
+ archive = str(archive)
+
+ if format == "tar":
+ tar_extract(archive, dest_dir, member)
+ elif format == "zip":
+ zip_extract(archive, dest_dir, member)
+ elif format == "cpio":
+ if member is not None:
+ raise Exception("Unable to filter cpio extraction")
+ cpio_extract(archive, dest_dir)
+ elif format == "deb":
+ if type(archive) != str:
+ raise Exception("Unable to use file-like object with deb archives")
+ deb_extract(archive, dest_dir, "./" + member)
+ else:
+ raise Exception(f"Unknown archive format {format}")
+
+'''
+@params archive: filename, or Asset to guess
+
+Guess the format of @compressed, raising an exception if
+no format can be determined
+'''
+def guess_archive_format(archive):
+ if type(archive) == Asset:
+ archive = urlparse(archive.url).path
+ elif type(archive) != str:
+ raise Exception(f"Unable to guess archive format for {archive}")
+
+ if ".tar." in archive or archive.endswith("tgz"):
+ return "tar"
+ elif archive.endswith(".zip"):
+ return "zip"
+ elif archive.endswith(".cpio"):
+ return "cpio"
+ elif archive.endswith(".deb") or archive.endswith(".udeb"):
+ return "deb"
+ else:
+ raise Exception(f"Unknown archive format for {archive}")
diff --git a/tests/functional/qemu_test/asset.py b/tests/functional/qemu_test/asset.py
new file mode 100644
index 0000000..704b84d
--- /dev/null
+++ b/tests/functional/qemu_test/asset.py
@@ -0,0 +1,230 @@
+# Test utilities for fetching & caching assets
+#
+# Copyright 2024 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import hashlib
+import logging
+import os
+import stat
+import sys
+import unittest
+import urllib.request
+from time import sleep
+from pathlib import Path
+from shutil import copyfileobj
+from urllib.error import HTTPError
+
+class AssetError(Exception):
+ def __init__(self, asset, msg, transient=False):
+ self.url = asset.url
+ self.msg = msg
+ self.transient = transient
+
+ def __str__(self):
+ return "%s: %s" % (self.url, self.msg)
+
+# Instances of this class must be declared as class level variables
+# starting with a name "ASSET_". This enables the pre-caching logic
+# to easily find all referenced assets and download them prior to
+# execution of the tests.
+class Asset:
+
+ def __init__(self, url, hashsum):
+ self.url = url
+ self.hash = hashsum
+ cache_dir_env = os.getenv('QEMU_TEST_CACHE_DIR')
+ if cache_dir_env:
+ self.cache_dir = Path(cache_dir_env, "download")
+ else:
+ self.cache_dir = Path(Path("~").expanduser(),
+ ".cache", "qemu", "download")
+ self.cache_file = Path(self.cache_dir, hashsum)
+ self.log = logging.getLogger('qemu-test')
+
+ def __repr__(self):
+ return "Asset: url=%s hash=%s cache=%s" % (
+ self.url, self.hash, self.cache_file)
+
+ def __str__(self):
+ return str(self.cache_file)
+
+ def _check(self, cache_file):
+ if self.hash is None:
+ return True
+ if len(self.hash) == 64:
+ hl = hashlib.sha256()
+ elif len(self.hash) == 128:
+ hl = hashlib.sha512()
+ else:
+ raise AssetError(self, "unknown hash type")
+
+ # Calculate the hash of the file:
+ with open(cache_file, 'rb') as file:
+ while True:
+ chunk = file.read(1 << 20)
+ if not chunk:
+ break
+ hl.update(chunk)
+
+ return self.hash == hl.hexdigest()
+
+ def valid(self):
+ return self.cache_file.exists() and self._check(self.cache_file)
+
+ def fetchable(self):
+ return not os.environ.get("QEMU_TEST_NO_DOWNLOAD", False)
+
+ def available(self):
+ return self.valid() or self.fetchable()
+
+ def _wait_for_other_download(self, tmp_cache_file):
+ # Another thread already seems to download the asset, so wait until
+ # it is done, while also checking the size to see whether it is stuck
+ try:
+ current_size = tmp_cache_file.stat().st_size
+ new_size = current_size
+ except:
+ if os.path.exists(self.cache_file):
+ return True
+ raise
+ waittime = lastchange = 600
+ while waittime > 0:
+ sleep(1)
+ waittime -= 1
+ try:
+ new_size = tmp_cache_file.stat().st_size
+ except:
+ if os.path.exists(self.cache_file):
+ return True
+ raise
+ if new_size != current_size:
+ lastchange = waittime
+ current_size = new_size
+ elif lastchange - waittime > 90:
+ return False
+
+ self.log.debug("Time out while waiting for %s!", tmp_cache_file)
+ raise
+
+ def fetch(self):
+ if not self.cache_dir.exists():
+ self.cache_dir.mkdir(parents=True, exist_ok=True)
+
+ if self.valid():
+ self.log.debug("Using cached asset %s for %s",
+ self.cache_file, self.url)
+ return str(self.cache_file)
+
+ if not self.fetchable():
+ raise AssetError(self,
+ "Asset cache is invalid and downloads disabled")
+
+ self.log.info("Downloading %s to %s...", self.url, self.cache_file)
+ tmp_cache_file = self.cache_file.with_suffix(".download")
+
+ for retries in range(3):
+ try:
+ with tmp_cache_file.open("xb") as dst:
+ with urllib.request.urlopen(self.url) as resp:
+ copyfileobj(resp, dst)
+ length_hdr = resp.getheader("Content-Length")
+
+ # Verify downloaded file size against length metadata, if
+ # available.
+ if length_hdr is not None:
+ length = int(length_hdr)
+ fsize = tmp_cache_file.stat().st_size
+ if fsize != length:
+ self.log.error("Unable to download %s: "
+ "connection closed before "
+ "transfer complete (%d/%d)",
+ self.url, fsize, length)
+ tmp_cache_file.unlink()
+ continue
+ break
+ except FileExistsError:
+ self.log.debug("%s already exists, "
+ "waiting for other thread to finish...",
+ tmp_cache_file)
+ if self._wait_for_other_download(tmp_cache_file):
+ return str(self.cache_file)
+ self.log.debug("%s seems to be stale, "
+ "deleting and retrying download...",
+ tmp_cache_file)
+ tmp_cache_file.unlink()
+ continue
+ except HTTPError as e:
+ tmp_cache_file.unlink()
+ self.log.error("Unable to download %s: HTTP error %d",
+ self.url, e.code)
+ # Treat 404 as fatal, since it is highly likely to
+ # indicate a broken test rather than a transient
+ # server or networking problem
+ if e.code == 404:
+ raise AssetError(self, "Unable to download: "
+ "HTTP error %d" % e.code)
+ continue
+ except Exception as e:
+ tmp_cache_file.unlink()
+ raise AssetError(self, "Unable to download: " % e)
+
+ if not os.path.exists(tmp_cache_file):
+ raise AssetError(self, "Download retries exceeded", transient=True)
+
+ try:
+ # Set these just for informational purposes
+ os.setxattr(str(tmp_cache_file), "user.qemu-asset-url",
+ self.url.encode('utf8'))
+ os.setxattr(str(tmp_cache_file), "user.qemu-asset-hash",
+ self.hash.encode('utf8'))
+ except Exception as e:
+ self.log.debug("Unable to set xattr on %s: %s", tmp_cache_file, e)
+ pass
+
+ if not self._check(tmp_cache_file):
+ tmp_cache_file.unlink()
+ raise AssetError(self, "Hash does not match %s" % self.hash)
+ tmp_cache_file.replace(self.cache_file)
+ # Remove write perms to stop tests accidentally modifying them
+ os.chmod(self.cache_file, stat.S_IRUSR | stat.S_IRGRP)
+
+ self.log.info("Cached %s at %s" % (self.url, self.cache_file))
+ return str(self.cache_file)
+
+ def precache_test(test):
+ log = logging.getLogger('qemu-test')
+ log.setLevel(logging.DEBUG)
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setLevel(logging.DEBUG)
+ formatter = logging.Formatter(
+ '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ handler.setFormatter(formatter)
+ log.addHandler(handler)
+ for name, asset in vars(test.__class__).items():
+ if name.startswith("ASSET_") and type(asset) == Asset:
+ log.info("Attempting to cache '%s'" % asset)
+ try:
+ asset.fetch()
+ except AssetError as e:
+ if not e.transient:
+ raise
+ log.error("%s: skipping asset precache" % e)
+
+ log.removeHandler(handler)
+
+ def precache_suite(suite):
+ for test in suite:
+ if isinstance(test, unittest.TestSuite):
+ Asset.precache_suite(test)
+ elif isinstance(test, unittest.TestCase):
+ Asset.precache_test(test)
+
+ def precache_suites(path, cacheTstamp):
+ loader = unittest.loader.defaultTestLoader
+ tests = loader.loadTestsFromNames([path], None)
+
+ with open(cacheTstamp, "w") as fh:
+ Asset.precache_suite(tests)
diff --git a/tests/functional/qemu_test/cmd.py b/tests/functional/qemu_test/cmd.py
new file mode 100644
index 0000000..dc5f422
--- /dev/null
+++ b/tests/functional/qemu_test/cmd.py
@@ -0,0 +1,202 @@
+# Test class and utilities for functional tests
+#
+# Copyright 2018, 2024 Red Hat, Inc.
+#
+# Original Author (Avocado-based tests):
+# Cleber Rosa <crosa@redhat.com>
+#
+# Adaption for standalone version:
+# Thomas Huth <thuth@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import logging
+import os
+import os.path
+
+
+def which(tool):
+ """ looks up the full path for @tool, returns None if not found
+ or if @tool does not have executable permissions.
+ """
+ paths=os.getenv('PATH')
+ for p in paths.split(os.path.pathsep):
+ p = os.path.join(p, tool)
+ if os.access(p, os.X_OK):
+ return p
+ return None
+
+def is_readable_executable_file(path):
+ return os.path.isfile(path) and os.access(path, os.R_OK | os.X_OK)
+
+# @test: functional test to fail if @failure is seen
+# @vm: the VM whose console to process
+# @success: a non-None string to look for
+# @failure: a string to look for that triggers test failure, or None
+#
+# Read up to 1 line of text from @vm, looking for @success
+# and optionally @failure.
+#
+# If @success or @failure are seen, immediately return True,
+# even if end of line is not yet seen. ie remainder of the
+# line is left unread.
+#
+# If end of line is seen, with neither @success or @failure
+# return False
+#
+# If @failure is seen, then mark @test as failed
+def _console_read_line_until_match(test, vm, success, failure):
+ msg = bytes([])
+ done = False
+ while True:
+ c = vm.console_socket.recv(1)
+ if c is None:
+ done = True
+ test.fail(
+ f"EOF in console, expected '{success}'")
+ break
+ msg += c
+
+ if success in msg:
+ done = True
+ break
+ if failure and failure in msg:
+ done = True
+ vm.console_socket.close()
+ test.fail(
+ f"'{failure}' found in console, expected '{success}'")
+
+ if c == b'\n':
+ break
+
+ console_logger = logging.getLogger('console')
+ try:
+ console_logger.debug(msg.decode().strip())
+ except:
+ console_logger.debug(msg)
+
+ return done
+
+def _console_interaction(test, success_message, failure_message,
+ send_string, keep_sending=False, vm=None):
+ assert not keep_sending or send_string
+ assert success_message or send_string
+
+ if vm is None:
+ vm = test.vm
+
+ test.log.debug(
+ f"Console interaction: success_msg='{success_message}' " +
+ f"failure_msg='{failure_message}' send_string='{send_string}'")
+
+ # We'll process console in bytes, to avoid having to
+ # deal with unicode decode errors from receiving
+ # partial utf8 byte sequences
+ success_message_b = None
+ if success_message is not None:
+ success_message_b = success_message.encode()
+
+ failure_message_b = None
+ if failure_message is not None:
+ failure_message_b = failure_message.encode()
+
+ while True:
+ if send_string:
+ vm.console_socket.sendall(send_string.encode())
+ if not keep_sending:
+ send_string = None # send only once
+
+ # Only consume console output if waiting for something
+ if success_message is None:
+ if send_string is None:
+ break
+ continue
+
+ if _console_read_line_until_match(test, vm,
+ success_message_b,
+ failure_message_b):
+ break
+
+def interrupt_interactive_console_until_pattern(test, success_message,
+ failure_message=None,
+ interrupt_string='\r'):
+ """
+ Keep sending a string to interrupt a console prompt, while logging the
+ console output. Typical use case is to break a boot loader prompt, such:
+
+ Press a key within 5 seconds to interrupt boot process.
+ 5
+ 4
+ 3
+ 2
+ 1
+ Booting default image...
+
+ :param test: a test containing a VM that will have its console
+ read and probed for a success or failure message
+ :type test: :class:`qemu_test.QemuSystemTest`
+ :param success_message: if this message appears, test succeeds
+ :param failure_message: if this message appears, test fails
+ :param interrupt_string: a string to send to the console before trying
+ to read a new line
+ """
+ assert success_message
+ _console_interaction(test, success_message, failure_message,
+ interrupt_string, True)
+
+def wait_for_console_pattern(test, success_message, failure_message=None,
+ vm=None):
+ """
+ Waits for messages to appear on the console, while logging the content
+
+ :param test: a test containing a VM that will have its console
+ read and probed for a success or failure message
+ :type test: :class:`qemu_test.QemuSystemTest`
+ :param success_message: if this message appears, test succeeds
+ :param failure_message: if this message appears, test fails
+ """
+ assert success_message
+ _console_interaction(test, success_message, failure_message, None, vm=vm)
+
+def exec_command(test, command):
+ """
+ Send a command to a console (appending CRLF characters), while logging
+ the content.
+
+ :param test: a test containing a VM.
+ :type test: :class:`qemu_test.QemuSystemTest`
+ :param command: the command to send
+ :type command: str
+ """
+ _console_interaction(test, None, None, command + '\r')
+
+def exec_command_and_wait_for_pattern(test, command,
+ success_message, failure_message=None):
+ """
+ Send a command to a console (appending CRLF characters), then wait
+ for success_message to appear on the console, while logging the.
+ content. Mark the test as failed if failure_message is found instead.
+
+ :param test: a test containing a VM that will have its console
+ read and probed for a success or failure message
+ :type test: :class:`qemu_test.QemuSystemTest`
+ :param command: the command to send
+ :param success_message: if this message appears, test succeeds
+ :param failure_message: if this message appears, test fails
+ """
+ assert success_message
+ _console_interaction(test, success_message, failure_message, command + '\r')
+
+def get_qemu_img(test):
+ test.log.debug('Looking for and selecting a qemu-img binary')
+
+ # If qemu-img has been built, use it, otherwise the system wide one
+ # will be used.
+ qemu_img = test.build_file('qemu-img')
+ if os.path.exists(qemu_img):
+ return qemu_img
+ qemu_img = which('qemu-img')
+ if qemu_img is not None:
+ return qemu_img
+ test.skipTest(f"qemu-img not found in build dir or '$PATH'")
diff --git a/tests/functional/qemu_test/config.py b/tests/functional/qemu_test/config.py
new file mode 100644
index 0000000..6d4c9c3
--- /dev/null
+++ b/tests/functional/qemu_test/config.py
@@ -0,0 +1,48 @@
+# Test class and utilities for functional tests
+#
+# Copyright 2018, 2024 Red Hat, Inc.
+#
+# Original Author (Avocado-based tests):
+# Cleber Rosa <crosa@redhat.com>
+#
+# Adaption for standalone version:
+# Thomas Huth <thuth@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import os
+from pathlib import Path
+import platform
+
+
+def _source_dir():
+ # Determine top-level directory of the QEMU sources
+ return Path(__file__).parent.parent.parent.parent
+
+def _build_dir():
+ root = os.getenv('QEMU_BUILD_ROOT')
+ if root is not None:
+ return Path(root)
+ # Makefile.mtest only exists in build dir, so if it is available, use CWD
+ if os.path.exists('Makefile.mtest'):
+ return Path(os.getcwd())
+
+ root = os.path.join(_source_dir(), 'build')
+ if os.path.exists(root):
+ return Path(root)
+
+ raise Exception("Cannot identify build dir, set QEMU_BUILD_ROOT")
+
+BUILD_DIR = _build_dir()
+
+def dso_suffix():
+ '''Return the dynamic libraries suffix for the current platform'''
+
+ if platform.system() == "Darwin":
+ return "dylib"
+
+ if platform.system() == "Windows":
+ return "dll"
+
+ return "so"
diff --git a/tests/functional/qemu_test/decorators.py b/tests/functional/qemu_test/decorators.py
new file mode 100644
index 0000000..c0d1567
--- /dev/null
+++ b/tests/functional/qemu_test/decorators.py
@@ -0,0 +1,151 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Decorators useful in functional tests
+
+import importlib
+import os
+import platform
+import resource
+from unittest import skipIf, skipUnless
+
+from .cmd import which
+
+'''
+Decorator to skip execution of a test if the list
+of command binaries is not available in $PATH.
+Example:
+
+ @skipIfMissingCommands("mkisofs", "losetup")
+'''
+def skipIfMissingCommands(*args):
+ has_cmds = True
+ for cmd in args:
+ if not which(cmd):
+ has_cmds = False
+ break
+
+ return skipUnless(has_cmds, 'required command(s) "%s" not installed' %
+ ", ".join(args))
+
+'''
+Decorator to skip execution of a test if the current
+host operating system does match one of the prohibited
+ones.
+Example
+
+ @skipIfOperatingSystem("Linux", "Darwin")
+'''
+def skipIfOperatingSystem(*args):
+ return skipIf(platform.system() in args,
+ 'running on an OS (%s) that is not able to run this test' %
+ ", ".join(args))
+
+'''
+Decorator to skip execution of a test if the current
+host machine does not match one of the permitted
+machines.
+Example
+
+ @skipIfNotMachine("x86_64", "aarch64")
+'''
+def skipIfNotMachine(*args):
+ return skipUnless(platform.machine() in args,
+ 'not running on one of the required machine(s) "%s"' %
+ ", ".join(args))
+
+'''
+Decorator to skip execution of flaky tests, unless
+the $QEMU_TEST_FLAKY_TESTS environment variable is set.
+A bug URL must be provided that documents the observed
+failure behaviour, so it can be tracked & re-evaluated
+in future.
+
+Historical tests may be providing "None" as the bug_url
+but this should not be done for new test.
+
+Example:
+
+ @skipFlakyTest("https://gitlab.com/qemu-project/qemu/-/issues/NNN")
+'''
+def skipFlakyTest(bug_url):
+ if bug_url is None:
+ bug_url = "FIXME: reproduce flaky test and file bug report or remove"
+ return skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'),
+ f'Test is unstable: {bug_url}')
+
+'''
+Decorator to skip execution of tests which are likely
+to execute untrusted commands on the host, or commands
+which process untrusted code, unless the
+$QEMU_TEST_ALLOW_UNTRUSTED_CODE env var is set.
+Example:
+
+ @skipUntrustedTest()
+'''
+def skipUntrustedTest():
+ return skipUnless(os.getenv('QEMU_TEST_ALLOW_UNTRUSTED_CODE'),
+ 'Test runs untrusted code / processes untrusted data')
+
+'''
+Decorator to skip execution of tests which need large
+data storage (over around 500MB-1GB mark) on the host,
+unless the $QEMU_TEST_ALLOW_LARGE_STORAGE environment
+variable is set
+
+Example:
+
+ @skipBigDataTest()
+'''
+def skipBigDataTest():
+ return skipUnless(os.getenv('QEMU_TEST_ALLOW_LARGE_STORAGE'),
+ 'Test requires large host storage space')
+
+'''
+Decorator to skip execution of tests which have a really long
+runtime (and might e.g. time out if QEMU has been compiled with
+debugging enabled) unless the $QEMU_TEST_ALLOW_SLOW
+environment variable is set
+
+Example:
+
+ @skipSlowTest()
+'''
+def skipSlowTest():
+ return skipUnless(os.getenv('QEMU_TEST_ALLOW_SLOW'),
+ 'Test has a very long runtime and might time out')
+
+'''
+Decorator to skip execution of a test if the list
+of python imports is not available.
+Example:
+
+ @skipIfMissingImports("numpy", "cv2")
+'''
+def skipIfMissingImports(*args):
+ has_imports = True
+ for impname in args:
+ try:
+ importlib.import_module(impname)
+ except ImportError:
+ has_imports = False
+ break
+
+ return skipUnless(has_imports, 'required import(s) "%s" not installed' %
+ ", ".join(args))
+
+'''
+Decorator to skip execution of a test if the system's
+locked memory limit is below the required threshold.
+Takes required locked memory threshold in kB.
+Example:
+
+ @skipLockedMemoryTest(2_097_152)
+'''
+def skipLockedMemoryTest(locked_memory):
+ # get memlock hard limit in bytes
+ _, ulimit_memory = resource.getrlimit(resource.RLIMIT_MEMLOCK)
+
+ return skipUnless(
+ ulimit_memory == resource.RLIM_INFINITY or ulimit_memory >= locked_memory * 1024,
+ f'Test required {locked_memory} kB of available locked memory',
+ )
diff --git a/tests/functional/qemu_test/linuxkernel.py b/tests/functional/qemu_test/linuxkernel.py
new file mode 100644
index 0000000..2aca0ee
--- /dev/null
+++ b/tests/functional/qemu_test/linuxkernel.py
@@ -0,0 +1,52 @@
+# Test class for testing the boot process of a Linux kernel
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import hashlib
+import urllib.request
+
+from .cmd import wait_for_console_pattern, exec_command_and_wait_for_pattern
+from .testcase import QemuSystemTest
+from .utils import get_usernet_hostfwd_port
+
+
+class LinuxKernelTest(QemuSystemTest):
+ KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
+
+ def wait_for_console_pattern(self, success_message, vm=None):
+ wait_for_console_pattern(self, success_message,
+ failure_message='Kernel panic - not syncing',
+ vm=vm)
+
+ def launch_kernel(self, kernel, initrd=None, dtb=None, console_index=0,
+ wait_for=None):
+ self.vm.set_console(console_index=console_index)
+ self.vm.add_args('-kernel', kernel)
+ if initrd:
+ self.vm.add_args('-initrd', initrd)
+ if dtb:
+ self.vm.add_args('-dtb', dtb)
+ self.vm.launch()
+ if wait_for:
+ self.wait_for_console_pattern(wait_for)
+
+ def check_http_download(self, filename, hashsum, guestport=8080,
+ pythoncmd='python3 -m http.server'):
+ exec_command_and_wait_for_pattern(self,
+ f'{pythoncmd} {guestport} & sleep 1',
+ f'Serving HTTP on 0.0.0.0 port {guestport}')
+ hl = hashlib.sha256()
+ hostport = get_usernet_hostfwd_port(self.vm)
+ url = f'http://localhost:{hostport}{filename}'
+ self.log.info(f'Downloading {url} ...')
+ with urllib.request.urlopen(url) as response:
+ while True:
+ chunk = response.read(1 << 20)
+ if not chunk:
+ break
+ hl.update(chunk)
+
+ digest = hl.hexdigest()
+ self.log.info(f'sha256sum of download is {digest}.')
+ self.assertEqual(digest, hashsum)
diff --git a/tests/functional/qemu_test/ports.py b/tests/functional/qemu_test/ports.py
new file mode 100644
index 0000000..631b77a
--- /dev/null
+++ b/tests/functional/qemu_test/ports.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+#
+# Simple functional tests for VNC functionality
+#
+# Copyright 2018, 2024 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import fcntl
+import os
+import socket
+
+from .config import BUILD_DIR
+from typing import List
+
+
+class Ports():
+
+ PORTS_ADDR = '127.0.0.1'
+ PORTS_RANGE_SIZE = 1024
+ PORTS_START = 49152 + ((os.getpid() * PORTS_RANGE_SIZE) % 16384)
+ PORTS_END = PORTS_START + PORTS_RANGE_SIZE
+
+ def __enter__(self):
+ lock_file = os.path.join(BUILD_DIR, "tests", "functional", "port_lock")
+ self.lock_fh = os.open(lock_file, os.O_CREAT)
+ fcntl.flock(self.lock_fh, fcntl.LOCK_EX)
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ fcntl.flock(self.lock_fh, fcntl.LOCK_UN)
+ os.close(self.lock_fh)
+
+ def check_bind(self, port: int) -> bool:
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
+ try:
+ sock.bind((self.PORTS_ADDR, port))
+ except OSError:
+ return False
+
+ return True
+
+ def find_free_ports(self, count: int) -> List[int]:
+ result = []
+ for port in range(self.PORTS_START, self.PORTS_END):
+ if self.check_bind(port):
+ result.append(port)
+ if len(result) >= count:
+ break
+ assert len(result) == count
+ return result
+
+ def find_free_port(self) -> int:
+ return self.find_free_ports(1)[0]
diff --git a/tests/functional/qemu_test/tesseract.py b/tests/functional/qemu_test/tesseract.py
new file mode 100644
index 0000000..ede6c65
--- /dev/null
+++ b/tests/functional/qemu_test/tesseract.py
@@ -0,0 +1,25 @@
+# ...
+#
+# Copyright (c) 2019 Philippe Mathieu-DaudƩ <f4bug@amsat.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import logging
+from subprocess import run
+
+
+def tesseract_ocr(image_path, tesseract_args=''):
+ console_logger = logging.getLogger('console')
+ console_logger.debug(image_path)
+ proc = run(['tesseract', image_path, 'stdout'],
+ capture_output=True, encoding='utf8')
+ if proc.returncode:
+ return None
+ lines = []
+ for line in proc.stdout.split('\n'):
+ sline = line.strip()
+ if len(sline):
+ console_logger.debug(sline)
+ lines += [sline]
+ return lines
diff --git a/tests/functional/qemu_test/testcase.py b/tests/functional/qemu_test/testcase.py
new file mode 100644
index 0000000..50c401b
--- /dev/null
+++ b/tests/functional/qemu_test/testcase.py
@@ -0,0 +1,400 @@
+# Test class and utilities for functional tests
+#
+# Copyright 2018, 2024 Red Hat, Inc.
+#
+# Original Author (Avocado-based tests):
+# Cleber Rosa <crosa@redhat.com>
+#
+# Adaption for standalone version:
+# Thomas Huth <thuth@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import logging
+import os
+from pathlib import Path
+import pycotap
+import shutil
+from subprocess import run
+import sys
+import tempfile
+import unittest
+import uuid
+
+from qemu.machine import QEMUMachine
+from qemu.utils import kvm_available, tcg_available
+
+from .archive import archive_extract
+from .asset import Asset
+from .config import BUILD_DIR, dso_suffix
+from .uncompress import uncompress
+
+
+class QemuBaseTest(unittest.TestCase):
+
+ '''
+ @params compressed: filename, Asset, or file-like object to uncompress
+ @params format: optional compression format (gzip, lzma)
+
+ Uncompresses @compressed into the scratch directory.
+
+ If @format is None, heuristics will be applied to guess the format
+ from the filename or Asset URL. @format must be non-None if @uncompressed
+ is a file-like object.
+
+ Returns the fully qualified path to the uncompressed file
+ '''
+ def uncompress(self, compressed, format=None):
+ self.log.debug(f"Uncompress {compressed} format={format}")
+ if type(compressed) == Asset:
+ compressed.fetch()
+
+ (name, ext) = os.path.splitext(str(compressed))
+ uncompressed = self.scratch_file(os.path.basename(name))
+
+ uncompress(compressed, uncompressed, format)
+
+ return uncompressed
+
+ '''
+ @params archive: filename, Asset, or file-like object to extract
+ @params format: optional archive format (tar, zip, deb, cpio)
+ @params sub_dir: optional sub-directory to extract into
+ @params member: optional member file to limit extraction to
+
+ Extracts @archive into the scratch directory, or a directory beneath
+ named by @sub_dir. All files are extracted unless @member specifies
+ a limit.
+
+ If @format is None, heuristics will be applied to guess the format
+ from the filename or Asset URL. @format must be non-None if @archive
+ is a file-like object.
+
+ If @member is non-None, returns the fully qualified path to @member
+ '''
+ def archive_extract(self, archive, format=None, sub_dir=None, member=None):
+ self.log.debug(f"Extract {archive} format={format}" +
+ f"sub_dir={sub_dir} member={member}")
+ if type(archive) == Asset:
+ archive.fetch()
+ if sub_dir is None:
+ archive_extract(archive, self.scratch_file(), format, member)
+ else:
+ archive_extract(archive, self.scratch_file(sub_dir),
+ format, member)
+
+ if member is not None:
+ return self.scratch_file(member)
+ return None
+
+ '''
+ Create a temporary directory suitable for storing UNIX
+ socket paths.
+
+ Returns: a tempfile.TemporaryDirectory instance
+ '''
+ def socket_dir(self):
+ if self.socketdir is None:
+ self.socketdir = tempfile.TemporaryDirectory(
+ prefix="qemu_func_test_sock_")
+ return self.socketdir
+
+ '''
+ @params args list of zero or more subdirectories or file
+
+ Construct a path for accessing a data file located
+ relative to the source directory that is the root for
+ functional tests.
+
+ @args may be an empty list to reference the root dir
+ itself, may be a single element to reference a file in
+ the root directory, or may be multiple elements to
+ reference a file nested below. The path components
+ will be joined using the platform appropriate path
+ separator.
+
+ Returns: string representing a file path
+ '''
+ def data_file(self, *args):
+ return str(Path(Path(__file__).parent.parent, *args))
+
+ '''
+ @params args list of zero or more subdirectories or file
+
+ Construct a path for accessing a data file located
+ relative to the build directory root.
+
+ @args may be an empty list to reference the build dir
+ itself, may be a single element to reference a file in
+ the build directory, or may be multiple elements to
+ reference a file nested below. The path components
+ will be joined using the platform appropriate path
+ separator.
+
+ Returns: string representing a file path
+ '''
+ def build_file(self, *args):
+ return str(Path(BUILD_DIR, *args))
+
+ '''
+ @params args list of zero or more subdirectories or file
+
+ Construct a path for accessing/creating a scratch file
+ located relative to a temporary directory dedicated to
+ this test case. The directory and its contents will be
+ purged upon completion of the test.
+
+ @args may be an empty list to reference the scratch dir
+ itself, may be a single element to reference a file in
+ the scratch directory, or may be multiple elements to
+ reference a file nested below. The path components
+ will be joined using the platform appropriate path
+ separator.
+
+ Returns: string representing a file path
+ '''
+ def scratch_file(self, *args):
+ return str(Path(self.workdir, *args))
+
+ '''
+ @params args list of zero or more subdirectories or file
+
+ Construct a path for accessing/creating a log file
+ located relative to a temporary directory dedicated to
+ this test case. The directory and its log files will be
+ preserved upon completion of the test.
+
+ @args may be an empty list to reference the log dir
+ itself, may be a single element to reference a file in
+ the log directory, or may be multiple elements to
+ reference a file nested below. The path components
+ will be joined using the platform appropriate path
+ separator.
+
+ Returns: string representing a file path
+ '''
+ def log_file(self, *args):
+ return str(Path(self.outputdir, *args))
+
+ '''
+ @params plugin name
+
+ Return the full path to the plugin taking into account any host OS
+ specific suffixes.
+ '''
+ def plugin_file(self, plugin_name):
+ sfx = dso_suffix()
+ return os.path.join('tests', 'tcg', 'plugins', f'{plugin_name}.{sfx}')
+
+ def assets_available(self):
+ for name, asset in vars(self.__class__).items():
+ if name.startswith("ASSET_") and type(asset) == Asset:
+ if not asset.available():
+ self.log.debug(f"Asset {asset.url} not available")
+ return False
+ return True
+
+ def setUp(self):
+ self.qemu_bin = os.getenv('QEMU_TEST_QEMU_BINARY')
+ self.assertIsNotNone(self.qemu_bin, 'QEMU_TEST_QEMU_BINARY must be set')
+ self.arch = self.qemu_bin.split('-')[-1]
+ self.socketdir = None
+
+ self.outputdir = self.build_file('tests', 'functional',
+ self.arch, self.id())
+ self.workdir = os.path.join(self.outputdir, 'scratch')
+ os.makedirs(self.workdir, exist_ok=True)
+
+ self.log_filename = self.log_file('base.log')
+ self.log = logging.getLogger('qemu-test')
+ self.log.setLevel(logging.DEBUG)
+ self._log_fh = logging.FileHandler(self.log_filename, mode='w')
+ self._log_fh.setLevel(logging.DEBUG)
+ fileFormatter = logging.Formatter(
+ '%(asctime)s - %(levelname)s: %(message)s')
+ self._log_fh.setFormatter(fileFormatter)
+ self.log.addHandler(self._log_fh)
+
+ # Capture QEMUMachine logging
+ self.machinelog = logging.getLogger('qemu.machine')
+ self.machinelog.setLevel(logging.DEBUG)
+ self.machinelog.addHandler(self._log_fh)
+
+ if not self.assets_available():
+ self.skipTest('One or more assets is not available')
+
+ def tearDown(self):
+ if "QEMU_TEST_KEEP_SCRATCH" not in os.environ:
+ shutil.rmtree(self.workdir)
+ if self.socketdir is not None:
+ shutil.rmtree(self.socketdir.name)
+ self.socketdir = None
+ self.machinelog.removeHandler(self._log_fh)
+ self.log.removeHandler(self._log_fh)
+
+ def main():
+ path = os.path.basename(sys.argv[0])[:-3]
+
+ cache = os.environ.get("QEMU_TEST_PRECACHE", None)
+ if cache is not None:
+ Asset.precache_suites(path, cache)
+ return
+
+ tr = pycotap.TAPTestRunner(message_log = pycotap.LogMode.LogToError,
+ test_output_log = pycotap.LogMode.LogToError)
+ res = unittest.main(module = None, testRunner = tr, exit = False,
+ argv=["__dummy__", path])
+ for (test, message) in res.result.errors + res.result.failures:
+
+ if hasattr(test, "log_filename"):
+ print('More information on ' + test.id() + ' could be found here:'
+ '\n %s' % test.log_filename, file=sys.stderr)
+ if hasattr(test, 'console_log_name'):
+ print(' %s' % test.console_log_name, file=sys.stderr)
+ sys.exit(not res.result.wasSuccessful())
+
+
+class QemuUserTest(QemuBaseTest):
+
+ def setUp(self):
+ super().setUp()
+ self._ldpath = []
+
+ def add_ldpath(self, ldpath):
+ self._ldpath.append(os.path.abspath(ldpath))
+
+ def run_cmd(self, bin_path, args=[]):
+ return run([self.qemu_bin]
+ + ["-L %s" % ldpath for ldpath in self._ldpath]
+ + [bin_path]
+ + args,
+ text=True, capture_output=True)
+
+class QemuSystemTest(QemuBaseTest):
+ """Facilitates system emulation tests."""
+
+ cpu = None
+ machine = None
+ _machinehelp = None
+
+ def setUp(self):
+ self._vms = {}
+
+ super().setUp()
+
+ console_log = logging.getLogger('console')
+ console_log.setLevel(logging.DEBUG)
+ self.console_log_name = self.log_file('console.log')
+ self._console_log_fh = logging.FileHandler(self.console_log_name,
+ mode='w')
+ self._console_log_fh.setLevel(logging.DEBUG)
+ fileFormatter = logging.Formatter('%(asctime)s: %(message)s')
+ self._console_log_fh.setFormatter(fileFormatter)
+ console_log.addHandler(self._console_log_fh)
+
+ def set_machine(self, machinename):
+ # TODO: We should use QMP to get the list of available machines
+ if not self._machinehelp:
+ self._machinehelp = run(
+ [self.qemu_bin, '-M', 'help'],
+ capture_output=True, check=True, encoding='utf8').stdout
+ if self._machinehelp.find(machinename) < 0:
+ self.skipTest('no support for machine ' + machinename)
+ self.machine = machinename
+
+ def require_accelerator(self, accelerator):
+ """
+ Requires an accelerator to be available for the test to continue
+
+ It takes into account the currently set qemu binary.
+
+ If the check fails, the test is canceled. If the check itself
+ for the given accelerator is not available, the test is also
+ canceled.
+
+ :param accelerator: name of the accelerator, such as "kvm" or "tcg"
+ :type accelerator: str
+ """
+ checker = {'tcg': tcg_available,
+ 'kvm': kvm_available}.get(accelerator)
+ if checker is None:
+ self.skipTest("Don't know how to check for the presence "
+ "of accelerator %s" % accelerator)
+ if not checker(qemu_bin=self.qemu_bin):
+ self.skipTest("%s accelerator does not seem to be "
+ "available" % accelerator)
+
+ def require_netdev(self, netdevname):
+ help = run([self.qemu_bin,
+ '-M', 'none', '-netdev', 'help'],
+ capture_output=True, check=True, encoding='utf8').stdout;
+ if help.find('\n' + netdevname + '\n') < 0:
+ self.skipTest('no support for " + netdevname + " networking')
+
+ def require_device(self, devicename):
+ help = run([self.qemu_bin,
+ '-M', 'none', '-device', 'help'],
+ capture_output=True, check=True, encoding='utf8').stdout;
+ if help.find(devicename) < 0:
+ self.skipTest('no support for device ' + devicename)
+
+ def _new_vm(self, name, *args):
+ vm = QEMUMachine(self.qemu_bin,
+ name=name,
+ base_temp_dir=self.workdir,
+ log_dir=self.log_file())
+ self.log.debug('QEMUMachine "%s" created', name)
+ self.log.debug('QEMUMachine "%s" temp_dir: %s', name, vm.temp_dir)
+
+ sockpath = os.environ.get("QEMU_TEST_QMP_BACKDOOR", None)
+ if sockpath is not None:
+ vm.add_args("-chardev",
+ f"socket,id=backdoor,path={sockpath},server=on,wait=off",
+ "-mon", "chardev=backdoor,mode=control")
+
+ if args:
+ vm.add_args(*args)
+ return vm
+
+ @property
+ def vm(self):
+ return self.get_vm(name='default')
+
+ def get_vm(self, *args, name=None):
+ if not name:
+ name = str(uuid.uuid4())
+ if self._vms.get(name) is None:
+ self._vms[name] = self._new_vm(name, *args)
+ if self.cpu is not None:
+ self._vms[name].add_args('-cpu', self.cpu)
+ if self.machine is not None:
+ self._vms[name].set_machine(self.machine)
+ return self._vms[name]
+
+ def set_vm_arg(self, arg, value):
+ """
+ Set an argument to list of extra arguments to be given to the QEMU
+ binary. If the argument already exists then its value is replaced.
+
+ :param arg: the QEMU argument, such as "-cpu" in "-cpu host"
+ :type arg: str
+ :param value: the argument value, such as "host" in "-cpu host"
+ :type value: str
+ """
+ if not arg or not value:
+ return
+ if arg not in self.vm.args:
+ self.vm.args.extend([arg, value])
+ else:
+ idx = self.vm.args.index(arg) + 1
+ if idx < len(self.vm.args):
+ self.vm.args[idx] = value
+ else:
+ self.vm.args.append(value)
+
+ def tearDown(self):
+ for vm in self._vms.values():
+ vm.shutdown()
+ logging.getLogger('console').removeHandler(self._console_log_fh)
+ super().tearDown()
diff --git a/tests/functional/qemu_test/tuxruntest.py b/tests/functional/qemu_test/tuxruntest.py
new file mode 100644
index 0000000..6c442ff
--- /dev/null
+++ b/tests/functional/qemu_test/tuxruntest.py
@@ -0,0 +1,136 @@
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+
+from qemu_test import QemuSystemTest
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern
+from qemu_test import which, get_qemu_img
+
+class TuxRunBaselineTest(QemuSystemTest):
+
+ KERNEL_COMMON_COMMAND_LINE = 'printk.time=0'
+ # Tests are ~10-40s, allow for --debug/--enable-gcov overhead
+ timeout = 100
+
+ def setUp(self):
+ super().setUp()
+
+ # We need zstd for all the tuxrun tests
+ if which('zstd') is None:
+ self.skipTest("zstd not found in $PATH")
+
+ # Pre-init TuxRun specific settings: Most machines work with
+ # reasonable defaults but we sometimes need to tweak the
+ # config. To avoid open coding everything we store all these
+ # details in the metadata for each test.
+
+ # The tuxboot tag matches the root directory
+ self.tuxboot = self.arch
+
+ # Most Linux's use ttyS0 for their serial port
+ self.console = "ttyS0"
+
+ # Does the machine shutdown QEMU nicely on "halt"
+ self.wait_for_shutdown = True
+
+ self.root = "vda"
+
+ # Occasionally we need extra devices to hook things up
+ self.extradev = None
+
+ self.qemu_img = get_qemu_img(self)
+
+ def wait_for_console_pattern(self, success_message, vm=None):
+ wait_for_console_pattern(self, success_message,
+ failure_message='Kernel panic - not syncing',
+ vm=vm)
+
+ def fetch_tuxrun_assets(self, kernel_asset, rootfs_asset, dtb_asset=None):
+ """
+ Fetch the TuxBoot assets.
+ """
+ kernel_image = kernel_asset.fetch()
+ disk_image = self.uncompress(rootfs_asset)
+ dtb = dtb_asset.fetch() if dtb_asset is not None else None
+
+ return (kernel_image, disk_image, dtb)
+
+ def prepare_run(self, kernel, disk, drive, dtb=None, console_index=0):
+ """
+ Setup to run and add the common parameters to the system
+ """
+ self.vm.set_console(console_index=console_index)
+
+ # all block devices are raw ext4's
+ blockdev = "driver=raw,file.driver=file," \
+ + f"file.filename={disk},node-name=hd0"
+
+ self.kcmd_line = self.KERNEL_COMMON_COMMAND_LINE
+ self.kcmd_line += f" root=/dev/{self.root}"
+ self.kcmd_line += f" console={self.console}"
+
+ self.vm.add_args('-kernel', kernel,
+ '-append', self.kcmd_line,
+ '-blockdev', blockdev)
+
+ # Sometimes we need extra devices attached
+ if self.extradev:
+ self.vm.add_args('-device', self.extradev)
+
+ self.vm.add_args('-device',
+ f"{drive},drive=hd0")
+
+ # Some machines need an explicit DTB
+ if dtb:
+ self.vm.add_args('-dtb', dtb)
+
+ def run_tuxtest_tests(self, haltmsg):
+ """
+ Wait for the system to boot up, wait for the login prompt and
+ then do a few things on the console. Trigger a shutdown and
+ wait to exit cleanly.
+ """
+ ps1='root@tuxtest:~#'
+ self.wait_for_console_pattern(self.kcmd_line)
+ self.wait_for_console_pattern('tuxtest login:')
+ exec_command_and_wait_for_pattern(self, 'root', ps1)
+ exec_command_and_wait_for_pattern(self, 'cat /proc/interrupts', ps1)
+ exec_command_and_wait_for_pattern(self, 'cat /proc/self/maps', ps1)
+ exec_command_and_wait_for_pattern(self, 'uname -a', ps1)
+ exec_command_and_wait_for_pattern(self, 'halt', haltmsg)
+
+ # Wait for VM to shut down gracefully if it can
+ if self.wait_for_shutdown:
+ self.vm.wait()
+ else:
+ self.vm.shutdown()
+
+ def common_tuxrun(self,
+ kernel_asset,
+ rootfs_asset,
+ dtb_asset=None,
+ drive="virtio-blk-device",
+ haltmsg="reboot: System halted",
+ console_index=0):
+ """
+ Common path for LKFT tests. Unless we need to do something
+ special with the command line we can process most things using
+ the tag metadata.
+ """
+ (kernel, disk, dtb) = self.fetch_tuxrun_assets(kernel_asset, rootfs_asset,
+ dtb_asset)
+
+ self.prepare_run(kernel, disk, drive, dtb, console_index)
+ self.vm.launch()
+ self.run_tuxtest_tests(haltmsg)
+ os.remove(disk)
diff --git a/tests/functional/qemu_test/uncompress.py b/tests/functional/qemu_test/uncompress.py
new file mode 100644
index 0000000..b7ef8f7
--- /dev/null
+++ b/tests/functional/qemu_test/uncompress.py
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Utilities for python-based QEMU tests
+#
+# Copyright 2024 Red Hat, Inc.
+#
+# Authors:
+# Thomas Huth <thuth@redhat.com>
+
+import gzip
+import lzma
+import os
+import stat
+import shutil
+from urllib.parse import urlparse
+from subprocess import run, CalledProcessError
+
+from .asset import Asset
+
+
+def gzip_uncompress(gz_path, output_path):
+ if os.path.exists(output_path):
+ return
+ with gzip.open(gz_path, 'rb') as gz_in:
+ try:
+ with open(output_path, 'wb') as raw_out:
+ shutil.copyfileobj(gz_in, raw_out)
+ except:
+ os.remove(output_path)
+ raise
+
+def lzma_uncompress(xz_path, output_path):
+ if os.path.exists(output_path):
+ return
+ with lzma.open(xz_path, 'rb') as lzma_in:
+ try:
+ with open(output_path, 'wb') as raw_out:
+ shutil.copyfileobj(lzma_in, raw_out)
+ except:
+ os.remove(output_path)
+ raise
+
+
+def zstd_uncompress(zstd_path, output_path):
+ if os.path.exists(output_path):
+ return
+
+ try:
+ run(['zstd', "-f", "-d", zstd_path,
+ "-o", output_path], capture_output=True, check=True)
+ except CalledProcessError as e:
+ os.remove(output_path)
+ raise Exception(
+ f"Unable to decompress zstd file {zstd_path} with {e}") from e
+
+ # zstd copies source archive permissions for the output
+ # file, so must make this writable for QEMU
+ os.chmod(output_path, stat.S_IRUSR | stat.S_IWUSR)
+
+
+'''
+@params compressed: filename, Asset, or file-like object to uncompress
+@params uncompressed: filename to uncompress into
+@params format: optional compression format (gzip, lzma)
+
+Uncompresses @compressed into @uncompressed
+
+If @format is None, heuristics will be applied to guess the format
+from the filename or Asset URL. @format must be non-None if @uncompressed
+is a file-like object.
+
+Returns the fully qualified path to the uncompessed file
+'''
+def uncompress(compressed, uncompressed, format=None):
+ if format is None:
+ format = guess_uncompress_format(compressed)
+
+ if format == "xz":
+ lzma_uncompress(str(compressed), uncompressed)
+ elif format == "gz":
+ gzip_uncompress(str(compressed), uncompressed)
+ elif format == "zstd":
+ zstd_uncompress(str(compressed), uncompressed)
+ else:
+ raise Exception(f"Unknown compression format {format}")
+
+'''
+@params compressed: filename, Asset, or file-like object to guess
+
+Guess the format of @compressed, raising an exception if
+no format can be determined
+'''
+def guess_uncompress_format(compressed):
+ if type(compressed) == Asset:
+ compressed = urlparse(compressed.url).path
+ elif type(compressed) != str:
+ raise Exception(f"Unable to guess compression cformat for {compressed}")
+
+ (name, ext) = os.path.splitext(compressed)
+ if ext == ".xz":
+ return "xz"
+ elif ext == ".gz":
+ return "gz"
+ elif ext in [".zstd", ".zst"]:
+ return 'zstd'
+ else:
+ raise Exception(f"Unknown compression format for {compressed}")
diff --git a/tests/functional/qemu_test/utils.py b/tests/functional/qemu_test/utils.py
new file mode 100644
index 0000000..e7c8de8
--- /dev/null
+++ b/tests/functional/qemu_test/utils.py
@@ -0,0 +1,39 @@
+# Utilities for python-based QEMU tests
+#
+# Copyright 2024 Red Hat, Inc.
+#
+# Authors:
+# Thomas Huth <thuth@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import os
+
+from qemu.utils import get_info_usernet_hostfwd_port
+
+
+def get_usernet_hostfwd_port(vm):
+ res = vm.cmd('human-monitor-command', command_line='info usernet')
+ return get_info_usernet_hostfwd_port(res)
+
+"""
+Round up to next power of 2
+"""
+def pow2ceil(x):
+ return 1 if x == 0 else 2**(x - 1).bit_length()
+
+def file_truncate(path, size):
+ if size != os.path.getsize(path):
+ with open(path, 'ab+') as fd:
+ fd.truncate(size)
+
+"""
+Expand file size to next power of 2
+"""
+def image_pow2ceil_expand(path):
+ size = os.path.getsize(path)
+ size_aligned = pow2ceil(size)
+ if size != size_aligned:
+ with open(path, 'ab+') as fd:
+ fd.truncate(size_aligned)
diff --git a/tests/functional/replay_kernel.py b/tests/functional/replay_kernel.py
new file mode 100644
index 0000000..80795eb
--- /dev/null
+++ b/tests/functional/replay_kernel.py
@@ -0,0 +1,84 @@
+# Record/replay test that boots a Linux kernel
+#
+# Copyright (c) 2020 ISP RAS
+#
+# Author:
+# Pavel Dovgalyuk <Pavel.Dovgaluk@ispras.ru>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import os
+import logging
+import time
+import subprocess
+
+from qemu_test.linuxkernel import LinuxKernelTest
+
+class ReplayKernelBase(LinuxKernelTest):
+ """
+ Boots a Linux kernel in record mode and checks that the console
+ is operational and the kernel command line is properly passed
+ from QEMU to the kernel.
+ Then replays the same scenario and verifies, that QEMU correctly
+ terminates.
+ """
+
+ timeout = 180
+ REPLAY_KERNEL_COMMAND_LINE = 'printk.time=1 panic=-1 '
+
+ def run_vm(self, kernel_path, kernel_command_line, console_pattern,
+ record, shift, args, replay_path):
+ # icount requires TCG to be available
+ self.require_accelerator('tcg')
+
+ logger = logging.getLogger('replay')
+ start_time = time.time()
+ vm = self.get_vm(name='recording' if record else 'replay')
+ vm.set_console()
+ if record:
+ logger.info('recording the execution...')
+ mode = 'record'
+ else:
+ logger.info('replaying the execution...')
+ mode = 'replay'
+ vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s' %
+ (shift, mode, replay_path),
+ '-kernel', kernel_path,
+ '-append', kernel_command_line,
+ '-net', 'none',
+ '-no-reboot')
+ if args:
+ vm.add_args(*args)
+ vm.launch()
+ self.wait_for_console_pattern(console_pattern, vm)
+ if record:
+ vm.shutdown()
+ logger.info('finished the recording with log size %s bytes'
+ % os.path.getsize(replay_path))
+ self.run_replay_dump(replay_path)
+ logger.info('successfully tested replay-dump.py')
+ else:
+ vm.wait()
+ logger.info('successfully finished the replay')
+ elapsed = time.time() - start_time
+ logger.info('elapsed time %.2f sec' % elapsed)
+ return elapsed
+
+ def run_replay_dump(self, replay_path):
+ try:
+ subprocess.check_call(["./scripts/replay-dump.py",
+ "-f", replay_path],
+ stdout=subprocess.DEVNULL)
+ except subprocess.CalledProcessError:
+ self.fail('replay-dump.py failed')
+
+ def run_rr(self, kernel_path, kernel_command_line, console_pattern,
+ shift=7, args=None):
+ replay_path = os.path.join(self.workdir, 'replay.bin')
+ t1 = self.run_vm(kernel_path, kernel_command_line, console_pattern,
+ True, shift, args, replay_path)
+ t2 = self.run_vm(kernel_path, kernel_command_line, console_pattern,
+ False, shift, args, replay_path)
+ logger = logging.getLogger('replay')
+ logger.info('replay overhead {:.2%}'.format(t2 / t1 - 1))
diff --git a/tests/functional/reverse_debugging.py b/tests/functional/reverse_debugging.py
new file mode 100644
index 0000000..f9a1d39
--- /dev/null
+++ b/tests/functional/reverse_debugging.py
@@ -0,0 +1,196 @@
+# Reverse debugging test
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (c) 2020 ISP RAS
+#
+# Author:
+# Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+import os
+import logging
+
+from qemu_test import LinuxKernelTest, get_qemu_img
+from qemu_test.ports import Ports
+
+
+class ReverseDebugging(LinuxKernelTest):
+ """
+ Test GDB reverse debugging commands: reverse step and reverse continue.
+ Recording saves the execution of some instructions and makes an initial
+ VM snapshot to allow reverse execution.
+ Replay saves the order of the first instructions and then checks that they
+ are executed backwards in the correct order.
+ After that the execution is replayed to the end, and reverse continue
+ command is checked by setting several breakpoints, and asserting
+ that the execution is stopped at the last of them.
+ """
+
+ timeout = 10
+ STEPS = 10
+ endian_is_le = True
+
+ def run_vm(self, record, shift, args, replay_path, image_path, port):
+ from avocado.utils import datadrainer
+
+ logger = logging.getLogger('replay')
+ vm = self.get_vm(name='record' if record else 'replay')
+ vm.set_console()
+ if record:
+ logger.info('recording the execution...')
+ mode = 'record'
+ else:
+ logger.info('replaying the execution...')
+ mode = 'replay'
+ vm.add_args('-gdb', 'tcp::%d' % port, '-S')
+ vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s,rrsnapshot=init' %
+ (shift, mode, replay_path),
+ '-net', 'none')
+ vm.add_args('-drive', 'file=%s,if=none' % image_path)
+ if args:
+ vm.add_args(*args)
+ vm.launch()
+ console_drainer = datadrainer.LineLogger(vm.console_socket.fileno(),
+ logger=self.log.getChild('console'),
+ stop_check=(lambda : not vm.is_running()))
+ console_drainer.start()
+ return vm
+
+ @staticmethod
+ def get_reg_le(g, reg):
+ res = g.cmd(b'p%x' % reg)
+ num = 0
+ for i in range(len(res))[-2::-2]:
+ num = 0x100 * num + int(res[i:i + 2], 16)
+ return num
+
+ @staticmethod
+ def get_reg_be(g, reg):
+ res = g.cmd(b'p%x' % reg)
+ return int(res, 16)
+
+ def get_reg(self, g, reg):
+ # value may be encoded in BE or LE order
+ if self.endian_is_le:
+ return self.get_reg_le(g, reg)
+ else:
+ return self.get_reg_be(g, reg)
+
+ def get_pc(self, g):
+ return self.get_reg(g, self.REG_PC)
+
+ def check_pc(self, g, addr):
+ pc = self.get_pc(g)
+ if pc != addr:
+ self.fail('Invalid PC (read %x instead of %x)' % (pc, addr))
+
+ @staticmethod
+ def gdb_step(g):
+ g.cmd(b's', b'T05thread:01;')
+
+ @staticmethod
+ def gdb_bstep(g):
+ g.cmd(b'bs', b'T05thread:01;')
+
+ @staticmethod
+ def vm_get_icount(vm):
+ return vm.qmp('query-replay')['return']['icount']
+
+ def reverse_debugging(self, shift=7, args=None):
+ from avocado.utils import gdb
+ from avocado.utils import process
+
+ logger = logging.getLogger('replay')
+
+ # create qcow2 for snapshots
+ logger.info('creating qcow2 image for VM snapshots')
+ image_path = os.path.join(self.workdir, 'disk.qcow2')
+ qemu_img = get_qemu_img(self)
+ if qemu_img is None:
+ self.skipTest('Could not find "qemu-img", which is required to '
+ 'create the temporary qcow2 image')
+ cmd = '%s create -f qcow2 %s 128M' % (qemu_img, image_path)
+ process.run(cmd)
+
+ replay_path = os.path.join(self.workdir, 'replay.bin')
+
+ # record the log
+ vm = self.run_vm(True, shift, args, replay_path, image_path, -1)
+ while self.vm_get_icount(vm) <= self.STEPS:
+ pass
+ last_icount = self.vm_get_icount(vm)
+ vm.shutdown()
+
+ logger.info("recorded log with %s+ steps" % last_icount)
+
+ # replay and run debug commands
+ with Ports() as ports:
+ port = ports.find_free_port()
+ vm = self.run_vm(False, shift, args, replay_path, image_path, port)
+ logger.info('connecting to gdbstub')
+ g = gdb.GDBRemote('127.0.0.1', port, False, False)
+ g.connect()
+ r = g.cmd(b'qSupported')
+ if b'qXfer:features:read+' in r:
+ g.cmd(b'qXfer:features:read:target.xml:0,ffb')
+ if b'ReverseStep+' not in r:
+ self.fail('Reverse step is not supported by QEMU')
+ if b'ReverseContinue+' not in r:
+ self.fail('Reverse continue is not supported by QEMU')
+
+ logger.info('stepping forward')
+ steps = []
+ # record first instruction addresses
+ for _ in range(self.STEPS):
+ pc = self.get_pc(g)
+ logger.info('saving position %x' % pc)
+ steps.append(pc)
+ self.gdb_step(g)
+
+ # visit the recorded instruction in reverse order
+ logger.info('stepping backward')
+ for addr in steps[::-1]:
+ self.gdb_bstep(g)
+ self.check_pc(g, addr)
+ logger.info('found position %x' % addr)
+
+ # visit the recorded instruction in forward order
+ logger.info('stepping forward')
+ for addr in steps:
+ self.check_pc(g, addr)
+ self.gdb_step(g)
+ logger.info('found position %x' % addr)
+
+ # set breakpoints for the instructions just stepped over
+ logger.info('setting breakpoints')
+ for addr in steps:
+ # hardware breakpoint at addr with len=1
+ g.cmd(b'Z1,%x,1' % addr, b'OK')
+
+ # this may hit a breakpoint if first instructions are executed
+ # again
+ logger.info('continuing execution')
+ vm.qmp('replay-break', icount=last_icount - 1)
+ # continue - will return after pausing
+ # This could stop at the end and get a T02 return, or by
+ # re-executing one of the breakpoints and get a T05 return.
+ g.cmd(b'c')
+ if self.vm_get_icount(vm) == last_icount - 1:
+ logger.info('reached the end (icount %s)' % (last_icount - 1))
+ else:
+ logger.info('hit a breakpoint again at %x (icount %s)' %
+ (self.get_pc(g), self.vm_get_icount(vm)))
+
+ logger.info('running reverse continue to reach %x' % steps[-1])
+ # reverse continue - will return after stopping at the breakpoint
+ g.cmd(b'bc', b'T05thread:01;')
+
+ # assume that none of the first instructions is executed again
+ # breaking the order of the breakpoints
+ self.check_pc(g, steps[-1])
+ logger.info('successfully reached %x' % steps[-1])
+
+ logger.info('exiting gdb and qemu')
+ vm.shutdown()
diff --git a/tests/functional/test_aarch64_aspeed_ast2700.py b/tests/functional/test_aarch64_aspeed_ast2700.py
new file mode 100755
index 0000000..d02dc79
--- /dev/null
+++ b/tests/functional/test_aarch64_aspeed_ast2700.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots the ASPEED SoCs with firmware
+#
+# Copyright (C) 2022 ASPEED Technology Inc
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test import exec_command_and_wait_for_pattern
+
+
+class AST2x00MachineSDK(QemuSystemTest):
+
+ def do_test_aarch64_aspeed_sdk_start(self, image):
+ self.require_netdev('user')
+ self.vm.set_console()
+ self.vm.add_args('-device',
+ 'tmp105,bus=aspeed.i2c.bus.1,address=0x4d,id=tmp-test')
+ self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw',
+ '-net', 'nic', '-net', 'user', '-snapshot')
+
+ self.vm.launch()
+
+ def verify_vbootrom_firmware_flow(self):
+ wait_for_console_pattern(self, 'Found valid FIT image')
+ wait_for_console_pattern(self, '[uboot] loading')
+ wait_for_console_pattern(self, 'done')
+ wait_for_console_pattern(self, '[fdt] loading')
+ wait_for_console_pattern(self, 'done')
+ wait_for_console_pattern(self, '[tee] loading')
+ wait_for_console_pattern(self, 'done')
+ wait_for_console_pattern(self, '[atf] loading')
+ wait_for_console_pattern(self, 'done')
+ wait_for_console_pattern(self, 'Jumping to BL31 (Trusted Firmware-A)')
+
+ def verify_openbmc_boot_and_login(self, name):
+ wait_for_console_pattern(self, 'U-Boot 2023.10')
+ wait_for_console_pattern(self, '## Loading kernel from FIT Image')
+ wait_for_console_pattern(self, 'Starting kernel ...')
+
+ wait_for_console_pattern(self, f'{name} login:')
+ exec_command_and_wait_for_pattern(self, 'root', 'Password:')
+ exec_command_and_wait_for_pattern(self, '0penBmc', f'root@{name}:~#')
+
+ ASSET_SDK_V906_AST2700 = Asset(
+ 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2700-a0-default-obmc.tar.gz',
+ '7247b6f19dbfb700686f8d9f723ac23f3eb229226c0589cb9b06b80d1b61f3cb')
+
+ ASSET_SDK_V906_AST2700A1 = Asset(
+ 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2700-default-obmc.tar.gz',
+ 'f1d53e0be8a404ecce3e105f72bc50fa4e090ad13160ffa91b10a6e0233a9dc6')
+
+ def do_ast2700_i2c_test(self):
+ exec_command_and_wait_for_pattern(self,
+ 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-1/device/new_device ',
+ 'i2c i2c-1: new_device: Instantiated device lm75 at 0x4d')
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/bus/i2c/devices/1-004d/hwmon/hwmon*/temp1_input', '0')
+ self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test',
+ property='temperature', value=18000)
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/bus/i2c/devices/1-004d/hwmon/hwmon*/temp1_input', '18000')
+
+ def start_ast2700_test(self, name):
+ num_cpu = 4
+ uboot_size = os.path.getsize(self.scratch_file(name,
+ 'u-boot-nodtb.bin'))
+ uboot_dtb_load_addr = hex(0x400000000 + uboot_size)
+
+ load_images_list = [
+ {
+ 'addr': '0x400000000',
+ 'file': self.scratch_file(name,
+ 'u-boot-nodtb.bin')
+ },
+ {
+ 'addr': str(uboot_dtb_load_addr),
+ 'file': self.scratch_file(name, 'u-boot.dtb')
+ },
+ {
+ 'addr': '0x430000000',
+ 'file': self.scratch_file(name, 'bl31.bin')
+ },
+ {
+ 'addr': '0x430080000',
+ 'file': self.scratch_file(name, 'optee',
+ 'tee-raw.bin')
+ }
+ ]
+
+ for load_image in load_images_list:
+ addr = load_image['addr']
+ file = load_image['file']
+ self.vm.add_args('-device',
+ f'loader,force-raw=on,addr={addr},file={file}')
+
+ for i in range(num_cpu):
+ self.vm.add_args('-device',
+ f'loader,addr=0x430000000,cpu-num={i}')
+
+ self.vm.add_args('-smp', str(num_cpu))
+ self.do_test_aarch64_aspeed_sdk_start(
+ self.scratch_file(name, 'image-bmc'))
+
+ def start_ast2700_test_vbootrom(self, name):
+ self.vm.add_args('-bios', 'ast27x0_bootrom.bin')
+ self.do_test_aarch64_aspeed_sdk_start(
+ self.scratch_file(name, 'image-bmc'))
+
+ def test_aarch64_ast2700_evb_sdk_v09_06(self):
+ self.set_machine('ast2700-evb')
+
+ self.archive_extract(self.ASSET_SDK_V906_AST2700)
+ self.start_ast2700_test('ast2700-a0-default')
+ self.verify_openbmc_boot_and_login('ast2700-a0-default')
+ self.do_ast2700_i2c_test()
+
+ def test_aarch64_ast2700a1_evb_sdk_v09_06(self):
+ self.set_machine('ast2700a1-evb')
+
+ self.archive_extract(self.ASSET_SDK_V906_AST2700A1)
+ self.start_ast2700_test('ast2700-default')
+ self.verify_openbmc_boot_and_login('ast2700-default')
+ self.do_ast2700_i2c_test()
+
+ def test_aarch64_ast2700a1_evb_sdk_vbootrom_v09_06(self):
+ self.set_machine('ast2700a1-evb')
+
+ self.archive_extract(self.ASSET_SDK_V906_AST2700A1)
+ self.start_ast2700_test_vbootrom('ast2700-default')
+ self.verify_vbootrom_firmware_flow()
+ self.verify_openbmc_boot_and_login('ast2700-default')
+ self.do_ast2700_i2c_test()
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_aarch64_aspeed_ast2700fc.py b/tests/functional/test_aarch64_aspeed_ast2700fc.py
new file mode 100755
index 0000000..b85370e
--- /dev/null
+++ b/tests/functional/test_aarch64_aspeed_ast2700fc.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots the ASPEED SoCs with firmware
+#
+# Copyright (C) 2022 ASPEED Technology Inc
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test import exec_command_and_wait_for_pattern
+
+
+class AST2x00MachineSDK(QemuSystemTest):
+
+ def do_test_aarch64_aspeed_sdk_start(self, image):
+ self.require_netdev('user')
+ self.vm.set_console()
+ self.vm.add_args('-device',
+ 'tmp105,bus=aspeed.i2c.bus.1,address=0x4d,id=tmp-test')
+ self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw',
+ '-net', 'nic', '-net', 'user', '-snapshot')
+
+ self.vm.launch()
+
+ def verify_openbmc_boot_and_login(self, name):
+ wait_for_console_pattern(self, 'U-Boot 2023.10')
+ wait_for_console_pattern(self, '## Loading kernel from FIT Image')
+ wait_for_console_pattern(self, 'Starting kernel ...')
+
+ wait_for_console_pattern(self, f'{name} login:')
+ exec_command_and_wait_for_pattern(self, 'root', 'Password:')
+ exec_command_and_wait_for_pattern(self, '0penBmc', f'root@{name}:~#')
+
+ ASSET_SDK_V906_AST2700 = Asset(
+ 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2700-default-obmc.tar.gz',
+ 'f1d53e0be8a404ecce3e105f72bc50fa4e090ad13160ffa91b10a6e0233a9dc6')
+
+ def do_ast2700_i2c_test(self):
+ exec_command_and_wait_for_pattern(self,
+ 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-1/device/new_device ',
+ 'i2c i2c-1: new_device: Instantiated device lm75 at 0x4d')
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/bus/i2c/devices/1-004d/hwmon/hwmon*/temp1_input', '0')
+ self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test',
+ property='temperature', value=18000)
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/bus/i2c/devices/1-004d/hwmon/hwmon*/temp1_input', '18000')
+
+ def do_ast2700fc_ssp_test(self):
+ self.vm.shutdown()
+ self.vm.set_console(console_index=1)
+ self.vm.launch()
+
+ exec_command_and_wait_for_pattern(self, '\012', 'ssp:~$')
+ exec_command_and_wait_for_pattern(self, 'version',
+ 'Zephyr version 3.7.1')
+ exec_command_and_wait_for_pattern(self, 'md 72c02000 1',
+ '[72c02000] 06010103')
+
+ def do_ast2700fc_tsp_test(self):
+ self.vm.shutdown()
+ self.vm.set_console(console_index=2)
+ self.vm.launch()
+
+ exec_command_and_wait_for_pattern(self, '\012', 'tsp:~$')
+ exec_command_and_wait_for_pattern(self, 'version',
+ 'Zephyr version 3.7.1')
+ exec_command_and_wait_for_pattern(self, 'md 72c02000 1',
+ '[72c02000] 06010103')
+
+ def start_ast2700fc_test(self, name):
+ ca35_core = 4
+ uboot_size = os.path.getsize(self.scratch_file(name,
+ 'u-boot-nodtb.bin'))
+ uboot_dtb_load_addr = hex(0x400000000 + uboot_size)
+
+ load_images_list = [
+ {
+ 'addr': '0x400000000',
+ 'file': self.scratch_file(name,
+ 'u-boot-nodtb.bin')
+ },
+ {
+ 'addr': str(uboot_dtb_load_addr),
+ 'file': self.scratch_file(name, 'u-boot.dtb')
+ },
+ {
+ 'addr': '0x430000000',
+ 'file': self.scratch_file(name, 'bl31.bin')
+ },
+ {
+ 'addr': '0x430080000',
+ 'file': self.scratch_file(name, 'optee',
+ 'tee-raw.bin')
+ }
+ ]
+
+ for load_image in load_images_list:
+ addr = load_image['addr']
+ file = load_image['file']
+ self.vm.add_args('-device',
+ f'loader,force-raw=on,addr={addr},file={file}')
+
+ for i in range(ca35_core):
+ self.vm.add_args('-device',
+ f'loader,addr=0x430000000,cpu-num={i}')
+
+ load_elf_list = {
+ 'ssp': self.scratch_file(name, 'zephyr-aspeed-ssp.elf'),
+ 'tsp': self.scratch_file(name, 'zephyr-aspeed-tsp.elf')
+ }
+
+ for cpu_num, key in enumerate(load_elf_list, start=4):
+ file = load_elf_list[key]
+ self.vm.add_args('-device',
+ f'loader,file={file},cpu-num={cpu_num}')
+
+ self.do_test_aarch64_aspeed_sdk_start(
+ self.scratch_file(name, 'image-bmc'))
+
+ def test_aarch64_ast2700fc_sdk_v09_06(self):
+ self.set_machine('ast2700fc')
+
+ self.archive_extract(self.ASSET_SDK_V906_AST2700)
+ self.start_ast2700fc_test('ast2700-default')
+ self.verify_openbmc_boot_and_login('ast2700-default')
+ self.do_ast2700_i2c_test()
+ self.do_ast2700fc_ssp_test()
+ self.do_ast2700fc_tsp_test()
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_aarch64_imx8mp_evk.py b/tests/functional/test_aarch64_imx8mp_evk.py
new file mode 100755
index 0000000..638bf9e
--- /dev/null
+++ b/tests/functional/test_aarch64_imx8mp_evk.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+
+
+class Imx8mpEvkMachine(LinuxKernelTest):
+
+ ASSET_IMAGE = Asset(
+ ('https://cloud.debian.org/images/cloud/bookworm/20231210-1590/'
+ 'debian-12-generic-arm64-20231210-1590.tar.xz'),
+ '7ebf1577b32d5af6204df74b54ca2e4675de9b5a9fa14f3ff70b88eeb7b3b359')
+
+ KERNEL_OFFSET = 0x51000000
+ KERNEL_SIZE = 32622528
+ INITRD_OFFSET = 0x76000000
+ INITRD_SIZE = 30987766
+ DTB_OFFSET = 0x64F51000
+ DTB_SIZE = 45 * 1024
+
+ def extract(self, in_path, out_path, offset, size):
+ try:
+ with open(in_path, "rb") as source:
+ source.seek(offset)
+ data = source.read(size)
+ with open(out_path, "wb") as target:
+ target.write(data)
+ except (IOError, ValueError) as e:
+ self.log.error(f"Failed to extract {out_path}: {e}")
+ raise
+
+ def setUp(self):
+ super().setUp()
+
+ self.image_path = self.scratch_file("disk.raw")
+ self.kernel_path = self.scratch_file("linux")
+ self.initrd_path = self.scratch_file("initrd.zstd")
+ self.dtb_path = self.scratch_file("imx8mp-evk.dtb")
+
+ self.archive_extract(self.ASSET_IMAGE)
+ self.extract(self.image_path, self.kernel_path,
+ self.KERNEL_OFFSET, self.KERNEL_SIZE)
+ self.extract(self.image_path, self.initrd_path,
+ self.INITRD_OFFSET, self.INITRD_SIZE)
+ self.extract(self.image_path, self.dtb_path,
+ self.DTB_OFFSET, self.DTB_SIZE)
+
+ def test_aarch64_imx8mp_evk_usdhc(self):
+ self.set_machine('imx8mp-evk')
+ self.vm.set_console(console_index=1)
+ self.vm.add_args('-m', '2G',
+ '-smp', '4',
+ '-kernel', self.kernel_path,
+ '-initrd', self.initrd_path,
+ '-dtb', self.dtb_path,
+ '-append', 'root=/dev/mmcblk2p1',
+ '-drive', f'file={self.image_path},if=sd,bus=2,'
+ 'format=raw,id=mmcblk2,snapshot=on')
+
+ self.vm.launch()
+ self.wait_for_console_pattern('Welcome to ')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_aarch64_raspi3.py b/tests/functional/test_aarch64_raspi3.py
new file mode 100755
index 0000000..74f6630
--- /dev/null
+++ b/tests/functional/test_aarch64_raspi3.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel on a Raspberry Pi machine
+# and checks the console
+#
+# Copyright (c) 2020 Philippe Mathieu-DaudƩ <f4bug@amsat.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+
+
+class Aarch64Raspi3Machine(LinuxKernelTest):
+
+ ASSET_RPI3_UEFI = Asset(
+ ('https://github.com/pbatard/RPi3/releases/download/'
+ 'v1.15/RPi3_UEFI_Firmware_v1.15.zip'),
+ '8cff2e979560048b4c84921f41a91893240b9fb71a88f0b5c5d6c8edd994bd5b')
+
+ def test_aarch64_raspi3_atf(self):
+ efi_name = 'RPI_EFI.fd'
+ efi_fd = self.archive_extract(self.ASSET_RPI3_UEFI, member=efi_name)
+
+ self.set_machine('raspi3b')
+ self.vm.set_console(console_index=1)
+ self.vm.add_args('-cpu', 'cortex-a53',
+ '-nodefaults',
+ '-device', f'loader,file={efi_fd},force-raw=true')
+ self.vm.launch()
+ self.wait_for_console_pattern('version UEFI Firmware v1.15')
+
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_aarch64_raspi4.py b/tests/functional/test_aarch64_raspi4.py
new file mode 100755
index 0000000..7a4302b
--- /dev/null
+++ b/tests/functional/test_aarch64_raspi4.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel on a Raspberry Pi machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+from qemu_test import exec_command_and_wait_for_pattern
+
+
+class Aarch64Raspi4Machine(LinuxKernelTest):
+
+ """
+ The kernel can be rebuilt using the kernel source referenced
+ and following the instructions on the on:
+ https://www.raspberrypi.org/documentation/linux/kernel/building.md
+ """
+ ASSET_KERNEL_20190215 = Asset(
+ ('http://archive.raspberrypi.org/debian/'
+ 'pool/main/r/raspberrypi-firmware/'
+ 'raspberrypi-kernel_1.20230106-1_arm64.deb'),
+ '56d5713c8f6eee8a0d3f0e73600ec11391144fef318b08943e9abd94c0a9baf7')
+
+ ASSET_INITRD = Asset(
+ ('https://github.com/groeck/linux-build-test/raw/'
+ '86b2be1384d41c8c388e63078a847f1e1c4cb1de/rootfs/'
+ 'arm64/rootfs.cpio.gz'),
+ '7c0b16d1853772f6f4c3ca63e789b3b9ff4936efac9c8a01fb0c98c05c7a7648')
+
+ def test_arm_raspi4(self):
+ kernel_path = self.archive_extract(self.ASSET_KERNEL_20190215,
+ member='boot/kernel8.img')
+ dtb_path = self.archive_extract(self.ASSET_KERNEL_20190215,
+ member='boot/bcm2711-rpi-4-b.dtb')
+
+ self.set_machine('raspi4b')
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'earlycon=pl011,mmio32,0xfe201000 ' +
+ 'console=ttyAMA0,115200 ' +
+ 'root=/dev/mmcblk1p2 rootwait ' +
+ 'dwc_otg.fiq_fsm_enable=0')
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-append', kernel_command_line)
+ # When PCI is supported we can add a USB controller:
+ # '-device', 'qemu-xhci,bus=pcie.1,id=xhci',
+ # '-device', 'usb-kbd,bus=xhci.0',
+ self.vm.launch()
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.wait_for_console_pattern(console_pattern)
+ # When USB is enabled we can look for this
+ # console_pattern = 'Product: QEMU USB Keyboard'
+ # self.wait_for_console_pattern(console_pattern)
+ console_pattern = 'Waiting for root device'
+ self.wait_for_console_pattern(console_pattern)
+
+
+ def test_arm_raspi4_initrd(self):
+ kernel_path = self.archive_extract(self.ASSET_KERNEL_20190215,
+ member='boot/kernel8.img')
+ dtb_path = self.archive_extract(self.ASSET_KERNEL_20190215,
+ member='boot/bcm2711-rpi-4-b.dtb')
+ initrd_path = self.uncompress(self.ASSET_INITRD)
+
+ self.set_machine('raspi4b')
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'earlycon=pl011,mmio32,0xfe201000 ' +
+ 'console=ttyAMA0,115200 ' +
+ 'panic=-1 noreboot ' +
+ 'dwc_otg.fiq_fsm_enable=0')
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-initrd', initrd_path,
+ '-append', kernel_command_line,
+ '-no-reboot')
+ # When PCI is supported we can add a USB controller:
+ # '-device', 'qemu-xhci,bus=pcie.1,id=xhci',
+ # '-device', 'usb-kbd,bus=xhci.0',
+ self.vm.launch()
+ self.wait_for_console_pattern('Boot successful.')
+
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+ 'BCM2835')
+ exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
+ 'cprman@7e101000')
+ exec_command_and_wait_for_pattern(self, 'halt', 'reboot: System halted')
+ # TODO: Raspberry Pi4 doesn't shut down properly with recent kernels
+ # Wait for VM to shut down gracefully
+ #self.vm.wait()
+
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_aarch64_replay.py b/tests/functional/test_aarch64_replay.py
new file mode 100755
index 0000000..db12e76
--- /dev/null
+++ b/tests/functional/test_aarch64_replay.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+#
+# Replay test that boots a Linux kernel on an aarch64 machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from subprocess import check_call, DEVNULL
+
+from qemu_test import Asset, skipIfOperatingSystem, get_qemu_img
+from replay_kernel import ReplayKernelBase
+
+
+class Aarch64Replay(ReplayKernelBase):
+
+ ASSET_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/arm64/Image',
+ 'b74743c5e89e1cea0f73368d24ae0ae85c5204ff84be3b5e9610417417d2f235')
+
+ ASSET_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/arm64/rootfs.ext4.zst',
+ 'a1acaaae2068df4648d04ff75f532aaa8c5edcd6b936122b6f0db4848a07b465')
+
+ def test_aarch64_virt(self):
+ self.require_netdev('user')
+ self.set_machine('virt')
+ self.cpu = 'cortex-a57'
+ kernel_path = self.ASSET_KERNEL.fetch()
+
+ raw_disk = self.uncompress(self.ASSET_ROOTFS)
+ disk = self.scratch_file('scratch.qcow2')
+ qemu_img = get_qemu_img(self)
+ check_call([qemu_img, 'create', '-f', 'qcow2', '-b', raw_disk,
+ '-F', 'raw', disk], stdout=DEVNULL, stderr=DEVNULL)
+
+ args = ('-drive', 'file=%s,snapshot=on,id=hd0,if=none' % disk,
+ '-drive', 'driver=blkreplay,id=hd0-rr,if=none,image=hd0',
+ '-device', 'virtio-blk-device,drive=hd0-rr',
+ '-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22',
+ '-device', 'virtio-net,netdev=vnet',
+ '-object', 'filter-replay,id=replay,netdev=vnet')
+
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyAMA0 root=/dev/vda')
+ console_pattern = 'Welcome to TuxTest'
+ self.run_rr(kernel_path, kernel_command_line, console_pattern,
+ args=args)
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_aarch64_reverse_debug.py b/tests/functional/test_aarch64_reverse_debug.py
new file mode 100755
index 0000000..58d4532
--- /dev/null
+++ b/tests/functional/test_aarch64_reverse_debug.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Reverse debugging test
+#
+# Copyright (c) 2020 ISP RAS
+#
+# Author:
+# Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import Asset, skipIfMissingImports, skipFlakyTest
+from reverse_debugging import ReverseDebugging
+
+
+@skipIfMissingImports('avocado.utils')
+class ReverseDebugging_AArch64(ReverseDebugging):
+
+ REG_PC = 32
+
+ KERNEL_ASSET = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/'
+ 'releases/29/Everything/aarch64/os/images/pxeboot/vmlinuz'),
+ '7e1430b81c26bdd0da025eeb8fbd77b5dc961da4364af26e771bd39f379cbbf7')
+
+ @skipFlakyTest("https://gitlab.com/qemu-project/qemu/-/issues/2921")
+ def test_aarch64_virt(self):
+ self.set_machine('virt')
+ self.cpu = 'cortex-a53'
+ kernel_path = self.KERNEL_ASSET.fetch()
+ self.reverse_debugging(args=('-kernel', kernel_path))
+
+
+if __name__ == '__main__':
+ ReverseDebugging.main()
diff --git a/tests/functional/test_aarch64_rme_sbsaref.py b/tests/functional/test_aarch64_rme_sbsaref.py
new file mode 100755
index 0000000..746770e
--- /dev/null
+++ b/tests/functional/test_aarch64_rme_sbsaref.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Realms environment on sbsa-ref machine and a
+# nested guest VM using it.
+#
+# Copyright (c) 2024 Linaro Ltd.
+#
+# Author: Pierrick Bouvier <pierrick.bouvier@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+
+from qemu_test import QemuSystemTest, Asset, wait_for_console_pattern
+from qemu_test import exec_command_and_wait_for_pattern
+from test_aarch64_rme_virt import test_realms_guest
+
+
+class Aarch64RMESbsaRefMachine(QemuSystemTest):
+
+ # Stack is built with OP-TEE build environment from those instructions:
+ # https://linaro.atlassian.net/wiki/spaces/QEMU/pages/29051027459/
+ # https://github.com/pbo-linaro/qemu-rme-stack
+ ASSET_RME_STACK_SBSA = Asset(
+ ('https://fileserver.linaro.org/s/KJyeBxL82mz2r7F/'
+ 'download/rme-stack-op-tee-4.2.0-cca-v4-sbsa.tar.gz'),
+ 'dd9ab28ec869bdf3b5376116cb3689103b43433fd5c4bca0f4a8d8b3c104999e')
+
+ # This tests the FEAT_RME cpu implementation, by booting a VM supporting it,
+ # and launching a nested VM using it.
+ def test_aarch64_rme_sbsaref(self):
+ self.set_machine('sbsa-ref')
+ self.require_accelerator('tcg')
+ self.require_netdev('user')
+
+ self.vm.set_console()
+
+ stack_path_tar_gz = self.ASSET_RME_STACK_SBSA.fetch()
+ self.archive_extract(stack_path_tar_gz, format="tar")
+
+ rme_stack = self.scratch_file('rme-stack-op-tee-4.2.0-cca-v4-sbsa')
+ pflash0 = os.path.join(rme_stack, 'images', 'SBSA_FLASH0.fd')
+ pflash1 = os.path.join(rme_stack, 'images', 'SBSA_FLASH1.fd')
+ virtual = os.path.join(rme_stack, 'images', 'disks', 'virtual')
+ drive = os.path.join(rme_stack, 'out-br', 'images', 'rootfs.ext4')
+
+ self.vm.add_args('-cpu', 'max,x-rme=on,pauth-impdef=on')
+ self.vm.add_args('-m', '2G')
+ self.vm.add_args('-M', 'sbsa-ref')
+ self.vm.add_args('-drive', f'file={pflash0},format=raw,if=pflash')
+ self.vm.add_args('-drive', f'file={pflash1},format=raw,if=pflash')
+ self.vm.add_args('-drive', f'file=fat:rw:{virtual},format=raw')
+ self.vm.add_args('-drive', f'format=raw,if=none,file={drive},id=hd0')
+ self.vm.add_args('-device', 'virtio-blk-pci,drive=hd0')
+ self.vm.add_args('-device', 'virtio-9p-pci,fsdev=shr0,mount_tag=shr0')
+ self.vm.add_args('-fsdev', f'local,security_model=none,path={rme_stack},id=shr0')
+ self.vm.add_args('-device', 'virtio-net-pci,netdev=net0')
+ self.vm.add_args('-netdev', 'user,id=net0')
+
+ self.vm.launch()
+ # Wait for host VM boot to complete.
+ wait_for_console_pattern(self, 'Welcome to Buildroot',
+ failure_message='Synchronous Exception at')
+ exec_command_and_wait_for_pattern(self, 'root', '#')
+
+ test_realms_guest(self)
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_aarch64_rme_virt.py b/tests/functional/test_aarch64_rme_virt.py
new file mode 100755
index 0000000..8452d27
--- /dev/null
+++ b/tests/functional/test_aarch64_rme_virt.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Realms environment on virt machine and a nested
+# guest VM using it.
+#
+# Copyright (c) 2024 Linaro Ltd.
+#
+# Author: Pierrick Bouvier <pierrick.bouvier@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import exec_command, wait_for_console_pattern
+from qemu_test import exec_command_and_wait_for_pattern
+
+def test_realms_guest(test_rme_instance):
+
+ # Boot the (nested) guest VM
+ exec_command(test_rme_instance,
+ 'qemu-system-aarch64 -M virt,gic-version=3 '
+ '-cpu host -enable-kvm -m 512M '
+ '-M confidential-guest-support=rme0 '
+ '-object rme-guest,id=rme0 '
+ '-device virtio-net-pci,netdev=net0,romfile= '
+ '-netdev user,id=net0 '
+ '-kernel /mnt/out/bin/Image '
+ '-initrd /mnt/out-br/images/rootfs.cpio '
+ '-serial stdio')
+ # Detect Realm activation during (nested) guest boot.
+ wait_for_console_pattern(test_rme_instance,
+ 'SMC_RMI_REALM_ACTIVATE')
+ # Wait for (nested) guest boot to complete.
+ wait_for_console_pattern(test_rme_instance,
+ 'Welcome to Buildroot')
+ exec_command_and_wait_for_pattern(test_rme_instance, 'root', '#')
+ # query (nested) guest cca report
+ exec_command(test_rme_instance, 'cca-workload-attestation report')
+ wait_for_console_pattern(test_rme_instance,
+ '"cca-platform-hash-algo-id": "sha-256"')
+ wait_for_console_pattern(test_rme_instance,
+ '"cca-realm-hash-algo-id": "sha-512"')
+ wait_for_console_pattern(test_rme_instance,
+ '"cca-realm-public-key-hash-algo-id": "sha-256"')
+
+class Aarch64RMEVirtMachine(QemuSystemTest):
+
+ # Stack is built with OP-TEE build environment from those instructions:
+ # https://linaro.atlassian.net/wiki/spaces/QEMU/pages/29051027459/
+ # https://github.com/pbo-linaro/qemu-rme-stack
+ ASSET_RME_STACK_VIRT = Asset(
+ ('https://fileserver.linaro.org/s/iaRsNDJp2CXHMSJ/'
+ 'download/rme-stack-op-tee-4.2.0-cca-v4-qemu_v8.tar.gz'),
+ '1851adc232b094384d8b879b9a2cfff07ef3d6205032b85e9b3a4a9ae6b0b7ad')
+
+ # This tests the FEAT_RME cpu implementation, by booting a VM supporting it,
+ # and launching a nested VM using it.
+ def test_aarch64_rme_virt(self):
+ self.set_machine('virt')
+ self.require_accelerator('tcg')
+ self.require_netdev('user')
+
+ self.vm.set_console()
+
+ stack_path_tar_gz = self.ASSET_RME_STACK_VIRT.fetch()
+ self.archive_extract(stack_path_tar_gz, format="tar")
+
+ rme_stack = self.scratch_file('rme-stack-op-tee-4.2.0-cca-v4-qemu_v8')
+ kernel = os.path.join(rme_stack, 'out', 'bin', 'Image')
+ bios = os.path.join(rme_stack, 'out', 'bin', 'flash.bin')
+ drive = os.path.join(rme_stack, 'out-br', 'images', 'rootfs.ext4')
+
+ self.vm.add_args('-cpu', 'max,x-rme=on,pauth-impdef=on')
+ self.vm.add_args('-m', '2G')
+ self.vm.add_args('-M', 'virt,acpi=off,'
+ 'virtualization=on,'
+ 'secure=on,'
+ 'gic-version=3')
+ self.vm.add_args('-bios', bios)
+ self.vm.add_args('-kernel', kernel)
+ self.vm.add_args('-drive', f'format=raw,if=none,file={drive},id=hd0')
+ self.vm.add_args('-device', 'virtio-blk-pci,drive=hd0')
+ self.vm.add_args('-device', 'virtio-9p-device,fsdev=shr0,mount_tag=shr0')
+ self.vm.add_args('-fsdev', f'local,security_model=none,path={rme_stack},id=shr0')
+ self.vm.add_args('-device', 'virtio-net-pci,netdev=net0')
+ self.vm.add_args('-netdev', 'user,id=net0')
+ # We need to add nokaslr to avoid triggering this sporadic bug:
+ # https://gitlab.com/qemu-project/qemu/-/issues/2823
+ self.vm.add_args('-append', 'root=/dev/vda nokaslr')
+
+ self.vm.launch()
+ # Wait for host VM boot to complete.
+ wait_for_console_pattern(self, 'Welcome to Buildroot',
+ failure_message='Synchronous Exception at')
+ exec_command_and_wait_for_pattern(self, 'root', '#')
+
+ test_realms_guest(self)
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_aarch64_sbsaref.py b/tests/functional/test_aarch64_sbsaref.py
new file mode 100755
index 0000000..e6a55ae
--- /dev/null
+++ b/tests/functional/test_aarch64_sbsaref.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a kernel and checks the console
+#
+# Copyright (c) 2023-2024 Linaro Ltd.
+#
+# Authors:
+# Philippe Mathieu-DaudƩ
+# Marcin Juszkiewicz
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test import interrupt_interactive_console_until_pattern
+
+
+def fetch_firmware(test):
+ """
+ Flash volumes generated using:
+
+ Toolchain from Debian:
+ aarch64-linux-gnu-gcc (Debian 12.2.0-14) 12.2.0
+
+ Used components:
+
+ - Trusted Firmware v2.12.0
+ - Tianocore EDK2 edk2-stable202411
+ - Tianocore EDK2-platforms 4b3530d
+
+ """
+
+ # Secure BootRom (TF-A code)
+ fs0_path = test.uncompress(Aarch64SbsarefMachine.ASSET_FLASH0)
+
+ # Non-secure rom (UEFI and EFI variables)
+ fs1_path = test.uncompress(Aarch64SbsarefMachine.ASSET_FLASH1)
+
+ for path in [fs0_path, fs1_path]:
+ with open(path, "ab+") as fd:
+ fd.truncate(256 << 20) # Expand volumes to 256MiB
+
+ test.set_machine('sbsa-ref')
+ test.vm.set_console()
+ test.vm.add_args(
+ "-drive", f"if=pflash,file={fs0_path},format=raw",
+ "-drive", f"if=pflash,file={fs1_path},format=raw",
+ )
+
+
+class Aarch64SbsarefMachine(QemuSystemTest):
+ """
+ As firmware runs at a higher privilege level than the hypervisor we
+ can only run these tests under TCG emulation.
+ """
+
+ timeout = 180
+
+ ASSET_FLASH0 = Asset(
+ ('https://artifacts.codelinaro.org/artifactory/linaro-419-sbsa-ref/'
+ '20241122-189881/edk2/SBSA_FLASH0.fd.xz'),
+ '76eb89d42eebe324e4395329f47447cda9ac920aabcf99aca85424609c3384a5')
+
+ ASSET_FLASH1 = Asset(
+ ('https://artifacts.codelinaro.org/artifactory/linaro-419-sbsa-ref/'
+ '20241122-189881/edk2/SBSA_FLASH1.fd.xz'),
+ 'f850f243bd8dbd49c51e061e0f79f1697546938f454aeb59ab7d93e5f0d412fc')
+
+ def test_sbsaref_edk2_firmware(self):
+
+ fetch_firmware(self)
+
+ self.vm.add_args('-cpu', 'cortex-a57')
+ self.vm.launch()
+
+ # TF-A boot sequence:
+ #
+ # https://github.com/ARM-software/arm-trusted-firmware/blob/v2.8.0/\
+ # docs/design/trusted-board-boot.rst#trusted-board-boot-sequence
+ # https://trustedfirmware-a.readthedocs.io/en/v2.8/\
+ # design/firmware-design.html#cold-boot
+
+ # AP Trusted ROM
+ wait_for_console_pattern(self, "Booting Trusted Firmware")
+ wait_for_console_pattern(self, "BL1: v2.12.0(release):")
+ wait_for_console_pattern(self, "BL1: Booting BL2")
+
+ # Trusted Boot Firmware
+ wait_for_console_pattern(self, "BL2: v2.12.0(release)")
+ wait_for_console_pattern(self, "Booting BL31")
+
+ # EL3 Runtime Software
+ wait_for_console_pattern(self, "BL31: v2.12.0(release)")
+
+ # Non-trusted Firmware
+ wait_for_console_pattern(self, "UEFI firmware (version 1.0")
+ interrupt_interactive_console_until_pattern(self, "QEMU SBSA-REF Machine")
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_aarch64_sbsaref_alpine.py b/tests/functional/test_aarch64_sbsaref_alpine.py
new file mode 100755
index 0000000..6108ec6
--- /dev/null
+++ b/tests/functional/test_aarch64_sbsaref_alpine.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a kernel and checks the console
+#
+# Copyright (c) 2023-2024 Linaro Ltd.
+#
+# Authors:
+# Philippe Mathieu-DaudƩ
+# Marcin Juszkiewicz
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import QemuSystemTest, Asset, skipSlowTest
+from qemu_test import wait_for_console_pattern
+from test_aarch64_sbsaref import fetch_firmware
+
+
+class Aarch64SbsarefAlpine(QemuSystemTest):
+
+ ASSET_ALPINE_ISO = Asset(
+ ('https://dl-cdn.alpinelinux.org/'
+ 'alpine/v3.17/releases/aarch64/alpine-standard-3.17.2-aarch64.iso'),
+ '5a36304ecf039292082d92b48152a9ec21009d3a62f459de623e19c4bd9dc027')
+
+ # This tests the whole boot chain from EFI to Userspace
+ # We only boot a whole OS for the current top level CPU and GIC
+ # Other test profiles should use more minimal boots
+ def boot_alpine_linux(self, cpu=None):
+ fetch_firmware(self)
+
+ iso_path = self.ASSET_ALPINE_ISO.fetch()
+
+ self.vm.set_console()
+ self.vm.add_args(
+ "-drive", f"file={iso_path},media=cdrom,format=raw",
+ )
+ if cpu:
+ self.vm.add_args("-cpu", cpu)
+
+ self.vm.launch()
+ wait_for_console_pattern(self, "Welcome to Alpine Linux 3.17")
+
+ def test_sbsaref_alpine_linux_cortex_a57(self):
+ self.boot_alpine_linux("cortex-a57")
+
+ def test_sbsaref_alpine_linux_default_cpu(self):
+ self.boot_alpine_linux()
+
+ def test_sbsaref_alpine_linux_max_pauth_off(self):
+ self.boot_alpine_linux("max,pauth=off")
+
+ def test_sbsaref_alpine_linux_max_pauth_impdef(self):
+ self.boot_alpine_linux("max,pauth-impdef=on")
+
+ @skipSlowTest() # Test might timeout due to PAuth emulation
+ def test_sbsaref_alpine_linux_max(self):
+ self.boot_alpine_linux("max")
+
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_aarch64_sbsaref_freebsd.py b/tests/functional/test_aarch64_sbsaref_freebsd.py
new file mode 100755
index 0000000..26dfc58
--- /dev/null
+++ b/tests/functional/test_aarch64_sbsaref_freebsd.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a kernel and checks the console
+#
+# Copyright (c) 2023-2024 Linaro Ltd.
+#
+# Authors:
+# Philippe Mathieu-DaudƩ
+# Marcin Juszkiewicz
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import QemuSystemTest, Asset, skipSlowTest
+from qemu_test import wait_for_console_pattern
+from test_aarch64_sbsaref import fetch_firmware
+
+
+class Aarch64SbsarefFreeBSD(QemuSystemTest):
+
+ ASSET_FREEBSD_ISO = Asset(
+ ('https://download.freebsd.org/releases/arm64/aarch64/ISO-IMAGES/'
+ '14.1/FreeBSD-14.1-RELEASE-arm64-aarch64-bootonly.iso'),
+ '44cdbae275ef1bb6dab1d5fbb59473d4f741e1c8ea8a80fd9e906b531d6ad461')
+
+ # This tests the whole boot chain from EFI to Userspace
+ # We only boot a whole OS for the current top level CPU and GIC
+ # Other test profiles should use more minimal boots
+ def boot_freebsd14(self, cpu=None):
+ fetch_firmware(self)
+
+ img_path = self.ASSET_FREEBSD_ISO.fetch()
+
+ self.vm.set_console()
+ self.vm.add_args(
+ "-drive", f"file={img_path},format=raw,snapshot=on",
+ )
+ if cpu:
+ self.vm.add_args("-cpu", cpu)
+
+ self.vm.launch()
+ wait_for_console_pattern(self, 'Welcome to FreeBSD!')
+
+ def test_sbsaref_freebsd14_cortex_a57(self):
+ self.boot_freebsd14("cortex-a57")
+
+ def test_sbsaref_freebsd14_default_cpu(self):
+ self.boot_freebsd14()
+
+ def test_sbsaref_freebsd14_max_pauth_off(self):
+ self.boot_freebsd14("max,pauth=off")
+
+ @skipSlowTest() # Test might timeout due to PAuth emulation
+ def test_sbsaref_freebsd14_max_pauth_impdef(self):
+ self.boot_freebsd14("max,pauth-impdef=on")
+
+ @skipSlowTest() # Test might timeout due to PAuth emulation
+ def test_sbsaref_freebsd14_max(self):
+ self.boot_freebsd14("max")
+
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_aarch64_smmu.py b/tests/functional/test_aarch64_smmu.py
new file mode 100755
index 0000000..c65d0f2
--- /dev/null
+++ b/tests/functional/test_aarch64_smmu.py
@@ -0,0 +1,205 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# SMMUv3 Functional tests
+#
+# Copyright (c) 2021 Red Hat, Inc.
+#
+# Author:
+# Eric Auger <eric.auger@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import os
+import time
+
+from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern
+from qemu_test import BUILD_DIR
+from qemu.utils import kvm_available
+
+
+class SMMU(LinuxKernelTest):
+
+ default_kernel_params = ('earlyprintk=pl011,0x9000000 no_timer_check '
+ 'printk.time=1 rd_NO_PLYMOUTH net.ifnames=0 '
+ 'console=ttyAMA0 rd.rescue')
+ IOMMU_ADDON = ',iommu_platform=on,disable-modern=off,disable-legacy=on'
+ kernel_path = None
+ initrd_path = None
+ kernel_params = None
+
+ GUEST_PORT = 8080
+
+ def set_up_boot(self, path):
+ self.vm.add_args('-device', 'virtio-blk-pci,bus=pcie.0,' +
+ 'drive=drv0,id=virtio-disk0,bootindex=1,'
+ 'werror=stop,rerror=stop' + self.IOMMU_ADDON)
+ self.vm.add_args('-drive',
+ f'file={path},if=none,cache=writethrough,id=drv0,snapshot=on')
+
+ self.vm.add_args('-netdev',
+ 'user,id=n1,hostfwd=tcp:127.0.0.1:0-:%d' %
+ self.GUEST_PORT)
+ self.vm.add_args('-device', 'virtio-net,netdev=n1' + self.IOMMU_ADDON)
+
+ def common_vm_setup(self, kernel, initrd, disk):
+ self.require_accelerator("kvm")
+ self.require_netdev('user')
+ self.set_machine("virt")
+ self.vm.add_args('-m', '1G')
+ self.vm.add_args("-accel", "kvm")
+ self.vm.add_args("-cpu", "host")
+ self.vm.add_args("-machine", "iommu=smmuv3")
+ self.vm.add_args("-d", "guest_errors")
+ self.vm.add_args('-bios', os.path.join(BUILD_DIR, 'pc-bios',
+ 'edk2-aarch64-code.fd'))
+ self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0')
+ self.vm.add_args('-object',
+ 'rng-random,id=rng0,filename=/dev/urandom')
+
+ self.kernel_path = kernel.fetch()
+ self.initrd_path = initrd.fetch()
+ self.set_up_boot(disk.fetch())
+
+ def run_and_check(self, filename, hashsum):
+ self.vm.add_args('-initrd', self.initrd_path)
+ self.vm.add_args('-append', self.kernel_params)
+ self.launch_kernel(self.kernel_path, initrd=self.initrd_path,
+ wait_for='attach it to a bug report.')
+ prompt = '# '
+ # Fedora 33 requires 'return' to be pressed to enter the shell.
+ # There seems to be a small race between detecting the previous ':'
+ # and sending the newline, so we need to add a small delay here.
+ self.wait_for_console_pattern(':')
+ time.sleep(0.2)
+ exec_command_and_wait_for_pattern(self, '\n', prompt)
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cmdline',
+ self.kernel_params)
+
+ # Checking for SMMU enablement:
+ self.log.info("Checking whether SMMU has been enabled...")
+ exec_command_and_wait_for_pattern(self, 'dmesg | grep smmu',
+ 'arm-smmu-v3')
+ self.wait_for_console_pattern(prompt)
+ exec_command_and_wait_for_pattern(self,
+ 'find /sys/kernel/iommu_groups/ -type l',
+ 'devices/0000:00:')
+ self.wait_for_console_pattern(prompt)
+
+ # Copy a file (checked later), umount afterwards to drop disk cache:
+ self.log.info("Checking hard disk...")
+ exec_command_and_wait_for_pattern(self,
+ "while ! (dmesg -c | grep vda:) ; do sleep 1 ; done",
+ "vda2")
+ exec_command_and_wait_for_pattern(self, 'mount /dev/vda2 /sysroot',
+ 'mounted filesystem')
+ exec_command_and_wait_for_pattern(self, 'cp /bin/vi /sysroot/root/vi',
+ prompt)
+ exec_command_and_wait_for_pattern(self, 'umount /sysroot', prompt)
+ # Switch from initrd to the cloud image filesystem:
+ exec_command_and_wait_for_pattern(self, 'mount /dev/vda2 /sysroot',
+ prompt)
+ exec_command_and_wait_for_pattern(self,
+ ('for d in dev proc sys run ; do '
+ 'mount -o bind /$d /sysroot/$d ; done'), prompt)
+ exec_command_and_wait_for_pattern(self, 'chroot /sysroot', prompt)
+ # Check files on the hard disk:
+ exec_command_and_wait_for_pattern(self,
+ ('if diff -q /root/vi /usr/bin/vi ; then echo "file" "ok" ; '
+ 'else echo "files differ"; fi'), 'file ok')
+ self.wait_for_console_pattern(prompt)
+ exec_command_and_wait_for_pattern(self, f'sha256sum {filename}',
+ hashsum)
+
+ # Check virtio-net via HTTP:
+ exec_command_and_wait_for_pattern(self, 'dhclient eth0', prompt)
+ self.check_http_download(filename, hashsum, self.GUEST_PORT)
+
+
+ # 5.3 kernel without RIL #
+
+ ASSET_KERNEL_F31 = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/'
+ 'releases/31/Server/aarch64/os/images/pxeboot/vmlinuz'),
+ '3ae07fcafbfc8e4abeb693035a74fe10698faae15e9ccd48882a9167800c1527')
+
+ ASSET_INITRD_F31 = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/'
+ 'releases/31/Server/aarch64/os/images/pxeboot/initrd.img'),
+ '9f3146b28bc531c689f3c5f114cb74e4bd7bd548e0ba19fa77921d8bd256755a')
+
+ ASSET_DISK_F31 = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases'
+ '/31/Cloud/aarch64/images/Fedora-Cloud-Base-31-1.9.aarch64.qcow2'),
+ '1e18d9c0cf734940c4b5d5ec592facaed2af0ad0329383d5639c997fdf16fe49')
+
+ F31_FILENAME = '/boot/initramfs-5.3.7-301.fc31.aarch64.img'
+ F31_HSUM = '1a4beec6607d94df73d9dd1b4985c9c23dd0fdcf4e6ca1351d477f190df7bef9'
+
+ def test_smmu_noril(self):
+ self.common_vm_setup(self.ASSET_KERNEL_F31, self.ASSET_INITRD_F31,
+ self.ASSET_DISK_F31)
+ self.kernel_params = self.default_kernel_params
+ self.run_and_check(self.F31_FILENAME, self.F31_HSUM)
+
+ def test_smmu_noril_passthrough(self):
+ self.common_vm_setup(self.ASSET_KERNEL_F31, self.ASSET_INITRD_F31,
+ self.ASSET_DISK_F31)
+ self.kernel_params = (self.default_kernel_params +
+ ' iommu.passthrough=on')
+ self.run_and_check(self.F31_FILENAME, self.F31_HSUM)
+
+ def test_smmu_noril_nostrict(self):
+ self.common_vm_setup(self.ASSET_KERNEL_F31, self.ASSET_INITRD_F31,
+ self.ASSET_DISK_F31)
+ self.kernel_params = (self.default_kernel_params +
+ ' iommu.strict=0')
+ self.run_and_check(self.F31_FILENAME, self.F31_HSUM)
+
+
+ # 5.8 kernel featuring range invalidation
+ # >= v5.7 kernel
+
+ ASSET_KERNEL_F33 = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/'
+ 'releases/33/Server/aarch64/os/images/pxeboot/vmlinuz'),
+ 'd8b1e6f7241f339d8e7609c456cf0461ffa4583ed07e0b55c7d1d8a0c154aa89')
+
+ ASSET_INITRD_F33 = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/'
+ 'releases/33/Server/aarch64/os/images/pxeboot/initrd.img'),
+ '92513f55295c2c16a777f7b6c35ccd70a438e9e1e40b6ba39e0e60900615b3df')
+
+ ASSET_DISK_F33 = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases'
+ '/33/Cloud/aarch64/images/Fedora-Cloud-Base-33-1.2.aarch64.qcow2'),
+ 'e7f75cdfd523fe5ac2ca9eeece68edc1a81f386a17f969c1d1c7c87031008a6b')
+
+ F33_FILENAME = '/boot/initramfs-5.8.15-301.fc33.aarch64.img'
+ F33_HSUM = '079cfad0caa82e84c8ca1fb0897a4999dd769f262216099f518619e807a550d9'
+
+ def test_smmu_ril(self):
+ self.common_vm_setup(self.ASSET_KERNEL_F33, self.ASSET_INITRD_F33,
+ self.ASSET_DISK_F33)
+ self.kernel_params = self.default_kernel_params
+ self.run_and_check(self.F33_FILENAME, self.F33_HSUM)
+
+ def test_smmu_ril_passthrough(self):
+ self.common_vm_setup(self.ASSET_KERNEL_F33, self.ASSET_INITRD_F33,
+ self.ASSET_DISK_F33)
+ self.kernel_params = (self.default_kernel_params +
+ ' iommu.passthrough=on')
+ self.run_and_check(self.F33_FILENAME, self.F33_HSUM)
+
+ def test_smmu_ril_nostrict(self):
+ self.common_vm_setup(self.ASSET_KERNEL_F33, self.ASSET_INITRD_F33,
+ self.ASSET_DISK_F33)
+ self.kernel_params = (self.default_kernel_params +
+ ' iommu.strict=0')
+ self.run_and_check(self.F33_FILENAME, self.F33_HSUM)
+
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_aarch64_tcg_plugins.py b/tests/functional/test_aarch64_tcg_plugins.py
new file mode 100755
index 0000000..cb7e929
--- /dev/null
+++ b/tests/functional/test_aarch64_tcg_plugins.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+#
+# TCG Plugins tests
+#
+# These are a little more involved than the basic tests run by check-tcg.
+#
+# Copyright (c) 2021 Linaro
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import tempfile
+import mmap
+import re
+
+from qemu.machine.machine import VMLaunchFailure
+from qemu_test import LinuxKernelTest, Asset
+
+
+class PluginKernelBase(LinuxKernelTest):
+ """
+ Boots a Linux kernel with a TCG plugin enabled.
+ """
+
+ timeout = 120
+ KERNEL_COMMON_COMMAND_LINE = 'printk.time=1 panic=-1 '
+
+ def run_vm(self, kernel_path, kernel_command_line,
+ plugin, plugin_log, console_pattern, args=None):
+
+ vm = self.get_vm()
+ vm.set_console()
+ vm.add_args('-kernel', kernel_path,
+ '-append', kernel_command_line,
+ '-plugin', plugin,
+ '-d', 'plugin',
+ '-D', plugin_log,
+ '-net', 'none',
+ '-no-reboot')
+ if args:
+ vm.add_args(*args)
+
+ try:
+ vm.launch()
+ except VMLaunchFailure as excp:
+ if "plugin interface not enabled in this build" in excp.output:
+ self.skipTest("TCG plugins not enabled")
+ else:
+ self.log.info(f"unhandled launch failure: {excp.output}")
+ raise excp
+
+ self.wait_for_console_pattern(console_pattern, vm)
+ # ensure logs are flushed
+ vm.shutdown()
+
+
+class PluginKernelNormal(PluginKernelBase):
+
+ ASSET_KERNEL = Asset(
+ ('https://storage.tuxboot.com/20230331/arm64/Image'),
+ 'ce95a7101a5fecebe0fe630deee6bd97b32ba41bc8754090e9ad8961ea8674c7')
+
+ def test_aarch64_virt_insn(self):
+ self.set_machine('virt')
+ self.cpu='cortex-a53'
+ kernel_path = self.ASSET_KERNEL.fetch()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyAMA0')
+ console_pattern = 'Please append a correct "root=" boot option'
+
+ plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin",
+ suffix=".log")
+
+ self.run_vm(kernel_path, kernel_command_line,
+ self.plugin_file('libinsn'), plugin_log.name,
+ console_pattern)
+
+ with plugin_log as lf, \
+ mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s:
+
+ m = re.search(br"insns: (?P<count>\d+)", s)
+ if "count" not in m.groupdict():
+ self.fail("Failed to find instruction count")
+ else:
+ count = int(m.group("count"))
+ self.log.info(f"Counted: {count} instructions")
+
+
+ def test_aarch64_virt_insn_icount(self):
+ self.set_machine('virt')
+ self.cpu='cortex-a53'
+ kernel_path = self.ASSET_KERNEL.fetch()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyAMA0')
+ console_pattern = 'Please append a correct "root=" boot option'
+
+ plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin",
+ suffix=".log")
+
+ self.run_vm(kernel_path, kernel_command_line,
+ self.plugin_file('libinsn'), plugin_log.name,
+ console_pattern,
+ args=('-icount', 'shift=1'))
+
+ with plugin_log as lf, \
+ mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s:
+
+ m = re.search(br"insns: (?P<count>\d+)", s)
+ if "count" not in m.groupdict():
+ self.fail("Failed to find instruction count")
+ else:
+ count = int(m.group("count"))
+ self.log.info(f"Counted: {count} instructions")
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_aarch64_tuxrun.py b/tests/functional/test_aarch64_tuxrun.py
new file mode 100755
index 0000000..75adc8a
--- /dev/null
+++ b/tests/functional/test_aarch64_tuxrun.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunAarch64Test(TuxRunBaselineTest):
+
+ ASSET_ARM64_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/arm64/Image',
+ 'b74743c5e89e1cea0f73368d24ae0ae85c5204ff84be3b5e9610417417d2f235')
+ ASSET_ARM64_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/arm64/rootfs.ext4.zst',
+ 'a1acaaae2068df4648d04ff75f532aaa8c5edcd6b936122b6f0db4848a07b465')
+
+ def test_arm64(self):
+ self.set_machine('virt')
+ self.cpu='cortex-a57'
+ self.console='ttyAMA0'
+ self.wait_for_shutdown=False
+ self.common_tuxrun(kernel_asset=self.ASSET_ARM64_KERNEL,
+ rootfs_asset=self.ASSET_ARM64_ROOTFS)
+
+ ASSET_ARM64BE_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/arm64be/Image',
+ 'fd6af4f16689d17a2c24fe0053cc212edcdf77abdcaf301800b8d38fa9f6e109')
+ ASSET_ARM64BE_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/arm64be/rootfs.ext4.zst',
+ 'f5e9371b62701aab8dead52592ca7488c8a9e255c9be8d7635c7f30f477c2c21')
+
+ def test_arm64be(self):
+ self.set_machine('virt')
+ self.cpu='cortex-a57'
+ self.console='ttyAMA0'
+ self.wait_for_shutdown=False
+ self.common_tuxrun(kernel_asset=self.ASSET_ARM64BE_KERNEL,
+ rootfs_asset=self.ASSET_ARM64BE_ROOTFS)
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_aarch64_virt.py b/tests/functional/test_aarch64_virt.py
new file mode 100755
index 0000000..4d0ad90
--- /dev/null
+++ b/tests/functional/test_aarch64_virt.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a various Linux systems and checks the
+# console output.
+#
+# Copyright (c) 2022 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import logging
+from subprocess import check_call, DEVNULL
+
+from qemu_test import QemuSystemTest, Asset, exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern, get_qemu_img
+
+
+class Aarch64VirtMachine(QemuSystemTest):
+ KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
+ timeout = 360
+
+ def wait_for_console_pattern(self, success_message, vm=None):
+ wait_for_console_pattern(self, success_message,
+ failure_message='Kernel panic - not syncing',
+ vm=vm)
+
+ ASSET_ALPINE_ISO = Asset(
+ ('https://dl-cdn.alpinelinux.org/'
+ 'alpine/v3.17/releases/aarch64/alpine-standard-3.17.2-aarch64.iso'),
+ '5a36304ecf039292082d92b48152a9ec21009d3a62f459de623e19c4bd9dc027')
+
+ # This tests the whole boot chain from EFI to Userspace
+ # We only boot a whole OS for the current top level CPU and GIC
+ # Other test profiles should use more minimal boots
+ def test_alpine_virt_tcg_gic_max(self):
+ iso_path = self.ASSET_ALPINE_ISO.fetch()
+
+ self.set_machine('virt')
+ self.require_accelerator("tcg")
+
+ self.vm.set_console()
+ self.vm.add_args("-accel", "tcg")
+ self.vm.add_args("-cpu", "max,pauth-impdef=on")
+ self.vm.add_args("-machine",
+ "virt,acpi=on,"
+ "virtualization=on,"
+ "mte=on,"
+ "gic-version=max,iommu=smmuv3")
+ self.vm.add_args("-smp", "2", "-m", "1024")
+ self.vm.add_args('-bios', self.build_file('pc-bios',
+ 'edk2-aarch64-code.fd'))
+ self.vm.add_args("-drive", f"file={iso_path},media=cdrom,format=raw")
+ self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0')
+ self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom')
+
+ self.vm.launch()
+ self.wait_for_console_pattern('Welcome to Alpine Linux 3.17')
+
+
+ ASSET_KERNEL = Asset(
+ ('https://fileserver.linaro.org/s/'
+ 'z6B2ARM7DQT3HWN/download'),
+ '12a54d4805cda6ab647cb7c7bbdb16fafb3df400e0d6f16445c1a0436100ef8d')
+
+ def common_aarch64_virt(self, machine):
+ """
+ Common code to launch basic virt machine with kernel+initrd
+ and a scratch disk.
+ """
+ self.set_machine('virt')
+ self.require_accelerator("tcg")
+
+ logger = logging.getLogger('aarch64_virt')
+
+ kernel_path = self.ASSET_KERNEL.fetch()
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyAMA0')
+ self.vm.add_args('-cpu', 'max,pauth-impdef=on',
+ '-machine', machine,
+ '-accel', 'tcg',
+ '-kernel', kernel_path,
+ '-append', kernel_command_line)
+
+ # A RNG offers an easy way to generate a few IRQs
+ self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0')
+ self.vm.add_args('-object',
+ 'rng-random,id=rng0,filename=/dev/urandom')
+
+ # Also add a scratch block device
+ logger.info('creating scratch qcow2 image')
+ image_path = self.scratch_file('scratch.qcow2')
+ qemu_img = get_qemu_img(self)
+ check_call([qemu_img, 'create', '-f', 'qcow2', image_path, '8M'],
+ stdout=DEVNULL, stderr=DEVNULL)
+
+ # Add the device
+ self.vm.add_args('-blockdev',
+ "driver=qcow2,"
+ "file.driver=file,"
+ f"file.filename={image_path},node-name=scratch")
+ self.vm.add_args('-device',
+ 'virtio-blk-device,drive=scratch')
+
+ self.vm.launch()
+
+ ps1='#'
+ self.wait_for_console_pattern('login:')
+
+ commands = [
+ ('root', ps1),
+ ('cat /proc/interrupts', ps1),
+ ('cat /proc/self/maps', ps1),
+ ('uname -a', ps1),
+ ('dd if=/dev/hwrng of=/dev/vda bs=512 count=4', ps1),
+ ('md5sum /dev/vda', ps1),
+ ('halt -n', 'reboot: System halted')
+ ]
+
+ for cmd, pattern in commands:
+ exec_command_and_wait_for_pattern(self, cmd, pattern)
+
+ def test_aarch64_virt_gicv3(self):
+ self.common_aarch64_virt("virt,gic_version=3")
+
+ def test_aarch64_virt_gicv2(self):
+ self.common_aarch64_virt("virt,gic-version=2")
+
+
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_aarch64_virt_gpu.py b/tests/functional/test_aarch64_virt_gpu.py
new file mode 100755
index 0000000..3844727
--- /dev/null
+++ b/tests/functional/test_aarch64_virt_gpu.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python3
+#
+# Functional tests for the various graphics modes we can support.
+#
+# Copyright (c) 2024, 2025 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu.machine.machine import VMLaunchFailure
+
+from qemu_test import Asset
+from qemu_test import exec_command_and_wait_for_pattern as ec_and_wait
+from qemu_test import skipIfMissingCommands
+
+from qemu_test.linuxkernel import LinuxKernelTest
+
+from re import search
+from subprocess import check_output, CalledProcessError
+
+class Aarch64VirtGPUMachine(LinuxKernelTest):
+
+ ASSET_VIRT_GPU_KERNEL = Asset(
+ 'https://fileserver.linaro.org/s/ce5jXBFinPxtEdx/'
+ 'download?path=%2F&files='
+ 'Image.6.12.16.aarch64',
+ '7888c51c55d37e86bbbdeb5acea9f08c34e6b0f03c1f5b2463285f6a6f6eec8b')
+
+ ASSET_VIRT_GPU_ROOTFS = Asset(
+ 'https://fileserver.linaro.org/s/ce5jXBFinPxtEdx/'
+ 'download?path=%2F&files='
+ 'rootfs.aarch64.ext2.zstd',
+ 'd45118c899420b7e673f1539a37a35480134b3e36e3a59e2cb69b1781cbb14ef')
+
+ def _launch_virt_gpu(self, gpu_device):
+
+ self.set_machine('virt')
+ self.require_accelerator("tcg")
+
+ kernel_path = self.ASSET_VIRT_GPU_KERNEL.fetch()
+ image_path = self.uncompress(self.ASSET_VIRT_GPU_ROOTFS, format="zstd")
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyAMA0 root=/dev/vda')
+
+ self.vm.add_args("-accel", "tcg")
+ self.vm.add_args("-cpu", "cortex-a72")
+ self.vm.add_args("-machine", "virt,gic-version=max",
+ '-kernel', kernel_path,
+ '-append', kernel_command_line)
+ self.vm.add_args("-smp", "2", "-m", "2048")
+ self.vm.add_args("-device", gpu_device)
+ self.vm.add_args("-display", "egl-headless")
+ self.vm.add_args("-display", "dbus,gl=on")
+
+ self.vm.add_args("-device", "virtio-blk-device,drive=hd0")
+ self.vm.add_args("-blockdev",
+ "driver=raw,file.driver=file,"
+ "node-name=hd0,read-only=on,"
+ f"file.filename={image_path}")
+ self.vm.add_args("-snapshot")
+
+ try:
+ self.vm.launch()
+ except VMLaunchFailure as excp:
+ if "old virglrenderer, blob resources unsupported" in excp.output:
+ self.skipTest("No blob support for virtio-gpu")
+ elif "old virglrenderer, venus unsupported" in excp.output:
+ self.skipTest("No venus support for virtio-gpu")
+ elif "egl: no drm render node available" in excp.output:
+ self.skipTest("Can't access host DRM render node")
+ elif "'type' does not accept value 'egl-headless'" in excp.output:
+ self.skipTest("egl-headless support is not available")
+ elif "'type' does not accept value 'dbus'" in excp.output:
+ self.skipTest("dbus display support is not available")
+ else:
+ self.log.info("unhandled launch failure: %s", excp.output)
+ raise excp
+
+ self.wait_for_console_pattern('buildroot login:')
+ ec_and_wait(self, 'root', '#')
+
+ def _run_virt_weston_test(self, cmd, fail = None):
+
+ # make it easier to detect successful return to shell
+ PS1 = 'RES=[$?] # '
+ OK_CMD = 'RES=[0] # '
+
+ ec_and_wait(self, 'export XDG_RUNTIME_DIR=/tmp', '#')
+ ec_and_wait(self, f"export PS1='{PS1}'", OK_CMD)
+ full_cmd = f"weston -B headless --renderer gl --shell kiosk -- {cmd}"
+ ec_and_wait(self, full_cmd, OK_CMD, fail)
+
+ @skipIfMissingCommands('zstd')
+ def test_aarch64_virt_with_virgl_gpu(self):
+
+ self.require_device('virtio-gpu-gl-pci')
+
+ self._launch_virt_gpu("virtio-gpu-gl-pci")
+
+ # subset of the glmark tests
+ tests = " ".join([f"-b {test}" for test in
+ ["build", "texture", "shading",
+ "bump", "desktop", "buffer"]])
+
+ self._run_virt_weston_test("glmark2-wayland --validate " + tests)
+
+ @skipIfMissingCommands('zstd')
+ def test_aarch64_virt_with_virgl_blobs_gpu(self):
+
+ self.require_device('virtio-gpu-gl-pci')
+
+ self._launch_virt_gpu("virtio-gpu-gl-pci,hostmem=4G,blob=on")
+ self._run_virt_weston_test("glmark2-wayland -b:duration=1.0")
+
+ @skipIfMissingCommands('zstd')
+ @skipIfMissingCommands('vulkaninfo')
+ def test_aarch64_virt_with_vulkan_gpu(self):
+
+ self.require_device('virtio-gpu-gl-pci')
+
+ try:
+ vk_info = check_output(["vulkaninfo", "--summary"],
+ encoding="utf-8")
+ except CalledProcessError as excp:
+ self.skipTest(f"Miss-configured host Vulkan: {excp.output}")
+
+ if search(r"driverID\s+=\s+DRIVER_ID_NVIDIA_PROPRIETARY", vk_info):
+ self.skipTest("Test skipped on NVIDIA proprietary driver")
+
+ self._launch_virt_gpu("virtio-gpu-gl-pci,hostmem=4G,blob=on,venus=on")
+ self._run_virt_weston_test("vkmark -b:duration=1.0",
+ "debug: stuck in fence wait with iter at")
+
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_aarch64_xen.py b/tests/functional/test_aarch64_xen.py
new file mode 100755
index 0000000..3399042
--- /dev/null
+++ b/tests/functional/test_aarch64_xen.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Xen hypervisor with a domU kernel and
+# checks the console output is vaguely sane .
+#
+# Copyright (c) 2020 Linaro
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import Asset, LinuxKernelTest, wait_for_console_pattern
+
+
+class BootXen(LinuxKernelTest):
+ """
+ Boots a Xen hypervisor with a Linux DomU kernel.
+ """
+
+ timeout = 90
+ XEN_COMMON_COMMAND_LINE = 'dom0_mem=128M loglvl=all guest_loglvl=all'
+
+ ASSET_KERNEL = Asset(
+ ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/'
+ 'download?path=%2F&files=linux-5.9.9-arm64-ajb'),
+ '00366fa51ea957c19462d2e2aefd480bef80ce727120e714ae48e0c88f261edb')
+
+ def launch_xen(self, xen_path):
+ """
+ Launch Xen with a dom0 guest kernel
+ """
+ self.set_machine('virt')
+ self.cpu = "cortex-a57"
+ self.kernel_path = self.ASSET_KERNEL.fetch()
+ self.log.info("launch with xen_path: %s", xen_path)
+
+ self.vm.set_console()
+
+ self.vm.add_args('-machine', 'virtualization=on',
+ '-m', '768',
+ '-kernel', xen_path,
+ '-append', self.XEN_COMMON_COMMAND_LINE,
+ '-device',
+ 'guest-loader,addr=0x47000000,kernel=%s,bootargs=console=hvc0'
+ % (self.kernel_path))
+
+ self.vm.launch()
+
+ console_pattern = 'VFS: Cannot open root device'
+ wait_for_console_pattern(self, console_pattern, "Panic on CPU 0:")
+
+ ASSET_XEN_4_11 = Asset(
+ ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/download?path=%2F&'
+ 'files=xen-hypervisor-4.11-arm64_4.11.4%2B37-g3263f257ca-1_arm64.deb'),
+ 'b745c2631342f9fcc0147ddc364edb62c20ecfebd430e5a3546e7d7c6891c0bc')
+
+ def test_arm64_xen_411_and_dom0(self):
+ # archive of file from https://deb.debian.org/debian/pool/main/x/xen/
+ xen_path = self.archive_extract(self.ASSET_XEN_4_11, format='deb',
+ member="boot/xen-4.11-arm64")
+ self.launch_xen(xen_path)
+
+ ASSET_XEN_4_14 = Asset(
+ ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/download?path=%2F&'
+ 'files=xen-hypervisor-4.14-arm64_4.14.0%2B80-gd101b417b7-1_arm64.deb'),
+ 'e930a3293248edabd367d5b4b3b6448b9c99c057096ea8b47228a7870661d5cb')
+
+ def test_arm64_xen_414_and_dom0(self):
+ # archive of file from https://deb.debian.org/debian/pool/main/x/xen/
+ xen_path = self.archive_extract(self.ASSET_XEN_4_14, format='deb',
+ member="boot/xen-4.14-arm64")
+ self.launch_xen(xen_path)
+
+ ASSET_XEN_4_15 = Asset(
+ ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/download?path=%2F&'
+ 'files=xen-upstream-4.15-unstable.deb'),
+ '2a9a8af8acf0231844657cc28baab95bd918b0ee2d493ee4ee6f8846e1358bc9')
+
+ def test_arm64_xen_415_and_dom0(self):
+ xen_path = self.archive_extract(self.ASSET_XEN_4_15, format='deb',
+ member="boot/xen-4.15-unstable")
+ self.launch_xen(xen_path)
+
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_aarch64_xlnx_versal.py b/tests/functional/test_aarch64_xlnx_versal.py
new file mode 100755
index 0000000..4b9c49e
--- /dev/null
+++ b/tests/functional/test_aarch64_xlnx_versal.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+
+class XlnxVersalVirtMachine(LinuxKernelTest):
+
+ ASSET_KERNEL = Asset(
+ ('http://ports.ubuntu.com/ubuntu-ports/dists/bionic-updates/main/'
+ 'installer-arm64/20101020ubuntu543.19/images/netboot/'
+ 'ubuntu-installer/arm64/linux'),
+ 'ce54f74ab0b15cfd13d1a293f2d27ffd79d8a85b7bb9bf21093ae9513864ac79')
+
+ ASSET_INITRD = Asset(
+ ('http://ports.ubuntu.com/ubuntu-ports/dists/bionic-updates/main/'
+ 'installer-arm64/20101020ubuntu543.19/images/netboot/'
+ '/ubuntu-installer/arm64/initrd.gz'),
+ 'e7a5e716b6f516d8be315c06e7331aaf16994fe4222e0e7cfb34bc015698929e')
+
+ def test_aarch64_xlnx_versal_virt(self):
+ self.set_machine('xlnx-versal-virt')
+ kernel_path = self.ASSET_KERNEL.fetch()
+ initrd_path = self.ASSET_INITRD.fetch()
+
+ self.vm.set_console()
+ self.vm.add_args('-m', '2G',
+ '-accel', 'tcg',
+ '-kernel', kernel_path,
+ '-initrd', initrd_path)
+ self.vm.launch()
+ self.wait_for_console_pattern('Checked W+X mappings: passed')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_acpi_bits.py b/tests/functional/test_acpi_bits.py
new file mode 100755
index 0000000..8e0563a
--- /dev/null
+++ b/tests/functional/test_acpi_bits.py
@@ -0,0 +1,340 @@
+#!/usr/bin/env python3
+#
+# Exercise QEMU generated ACPI/SMBIOS tables using biosbits,
+# https://biosbits.org/
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+#
+# Author:
+# Ani Sinha <anisinha@redhat.com>
+
+# pylint: disable=invalid-name
+# pylint: disable=consider-using-f-string
+
+"""
+This is QEMU ACPI/SMBIOS functional tests using biosbits.
+Biosbits is available originally at https://biosbits.org/.
+This test uses a fork of the upstream bits and has numerous fixes
+including an upgraded acpica. The fork is located here:
+https://gitlab.com/qemu-project/biosbits-bits .
+"""
+
+import os
+import re
+import shutil
+import subprocess
+
+from typing import (
+ List,
+ Optional,
+ Sequence,
+)
+from qemu.machine import QEMUMachine
+from qemu_test import (QemuSystemTest, Asset, skipIfMissingCommands,
+ skipIfNotMachine)
+
+
+# default timeout of 120 secs is sometimes not enough for bits test.
+BITS_TIMEOUT = 200
+
+class QEMUBitsMachine(QEMUMachine): # pylint: disable=too-few-public-methods
+ """
+ A QEMU VM, with isa-debugcon enabled and bits iso passed
+ using -cdrom to QEMU commandline.
+
+ """
+ def __init__(self,
+ binary: str,
+ args: Sequence[str] = (),
+ wrapper: Sequence[str] = (),
+ name: Optional[str] = None,
+ base_temp_dir: str = "/var/tmp",
+ debugcon_log: str = "debugcon-log.txt",
+ debugcon_addr: str = "0x403",
+ qmp_timer: Optional[float] = None):
+ # pylint: disable=too-many-arguments
+
+ if name is None:
+ name = "qemu-bits-%d" % os.getpid()
+ super().__init__(binary, args, wrapper=wrapper, name=name,
+ base_temp_dir=base_temp_dir,
+ qmp_timer=qmp_timer)
+ self.debugcon_log = debugcon_log
+ self.debugcon_addr = debugcon_addr
+ self.base_temp_dir = base_temp_dir
+
+ @property
+ def _base_args(self) -> List[str]:
+ args = super()._base_args
+ args.extend([
+ '-chardev',
+ 'file,path=%s,id=debugcon' %os.path.join(self.base_temp_dir,
+ self.debugcon_log),
+ '-device',
+ 'isa-debugcon,iobase=%s,chardev=debugcon' %self.debugcon_addr,
+ ])
+ return args
+
+ def base_args(self):
+ """return the base argument to QEMU binary"""
+ return self._base_args
+
+@skipIfMissingCommands("xorriso", "mformat")
+@skipIfNotMachine("x86_64")
+class AcpiBitsTest(QemuSystemTest): #pylint: disable=too-many-instance-attributes
+ """
+ ACPI and SMBIOS tests using biosbits.
+ """
+ # in slower systems the test can take as long as 3 minutes to complete.
+ timeout = BITS_TIMEOUT
+
+ # following are some standard configuration constants
+ # gitlab CI does shallow clones of depth 20
+ BITS_INTERNAL_VER = 2020
+ # commit hash must match the artifact tag below
+ BITS_COMMIT_HASH = 'c7920d2b'
+ # this is the latest bits release as of today.
+ BITS_TAG = "qemu-bits-10262023"
+
+ ASSET_BITS = Asset(("https://gitlab.com/qemu-project/"
+ "biosbits-bits/-/jobs/artifacts/%s/"
+ "download?job=qemu-bits-build" % BITS_TAG),
+ '1b8dd612c6831a6b491716a77acc486666aaa867051cdc34f7ce169c2e25f487')
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._vm = None
+
+ self._debugcon_addr = '0x403'
+ self._debugcon_log = 'debugcon-log.txt'
+
+ def _print_log(self, log):
+ self.logger.info('\nlogs from biosbits follows:')
+ self.logger.info('==========================================\n')
+ self.logger.info(log)
+ self.logger.info('==========================================\n')
+
+ def copy_bits_config(self):
+ """ copies the bios bits config file into bits.
+ """
+ bits_config_file = self.data_file('acpi-bits',
+ 'bits-config',
+ 'bits-cfg.txt')
+ target_config_dir = self.scratch_file('bits-%d' %
+ self.BITS_INTERNAL_VER,
+ 'boot')
+ self.assertTrue(os.path.exists(bits_config_file))
+ self.assertTrue(os.path.exists(target_config_dir))
+ shutil.copy2(bits_config_file, target_config_dir)
+ self.logger.info('copied config file %s to %s',
+ bits_config_file, target_config_dir)
+
+ def copy_test_scripts(self):
+ """copies the python test scripts into bits. """
+
+ bits_test_dir = self.data_file('acpi-bits', 'bits-tests')
+ target_test_dir = self.scratch_file('bits-%d' % self.BITS_INTERNAL_VER,
+ 'boot', 'python')
+
+ self.assertTrue(os.path.exists(bits_test_dir))
+ self.assertTrue(os.path.exists(target_test_dir))
+
+ for filename in os.listdir(bits_test_dir):
+ if os.path.isfile(os.path.join(bits_test_dir, filename)) and \
+ filename.endswith('.py2'):
+ # All test scripts are named with extension .py2 so that
+ # they are not run by accident.
+ #
+ # These scripts are intended to run inside the test VM
+ # and are written for python 2.7 not python 3, hence
+ # would cause syntax errors if loaded ouside the VM.
+ newfilename = os.path.splitext(filename)[0] + '.py'
+ shutil.copy2(os.path.join(bits_test_dir, filename),
+ os.path.join(target_test_dir, newfilename))
+ self.logger.info('copied test file %s to %s',
+ filename, target_test_dir)
+
+ # now remove the pyc test file if it exists, otherwise the
+ # changes in the python test script won't be executed.
+ testfile_pyc = os.path.splitext(filename)[0] + '.pyc'
+ if os.access(os.path.join(target_test_dir, testfile_pyc),
+ os.F_OK):
+ os.remove(os.path.join(target_test_dir, testfile_pyc))
+ self.logger.info('removed compiled file %s',
+ os.path.join(target_test_dir,
+ testfile_pyc))
+
+ def fix_mkrescue(self, mkrescue):
+ """ grub-mkrescue is a bash script with two variables, 'prefix' and
+ 'libdir'. They must be pointed to the right location so that the
+ iso can be generated appropriately. We point the two variables to
+ the directory where we have extracted our pre-built bits grub
+ tarball.
+ """
+ grub_x86_64_mods = self.scratch_file('grub-inst-x86_64-efi')
+ grub_i386_mods = self.scratch_file('grub-inst')
+
+ self.assertTrue(os.path.exists(grub_x86_64_mods))
+ self.assertTrue(os.path.exists(grub_i386_mods))
+
+ new_script = ""
+ with open(mkrescue, 'r', encoding='utf-8') as filehandle:
+ orig_script = filehandle.read()
+ new_script = re.sub('(^prefix=)(.*)',
+ r'\1"%s"' %grub_x86_64_mods,
+ orig_script, flags=re.M)
+ new_script = re.sub('(^libdir=)(.*)', r'\1"%s/lib"' %grub_i386_mods,
+ new_script, flags=re.M)
+
+ with open(mkrescue, 'w', encoding='utf-8') as filehandle:
+ filehandle.write(new_script)
+
+ def generate_bits_iso(self):
+ """ Uses grub-mkrescue to generate a fresh bits iso with the python
+ test scripts
+ """
+ bits_dir = self.scratch_file('bits-%d' % self.BITS_INTERNAL_VER)
+ iso_file = self.scratch_file('bits-%d.iso' % self.BITS_INTERNAL_VER)
+ mkrescue_script = self.scratch_file('grub-inst-x86_64-efi',
+ 'bin',
+ 'grub-mkrescue')
+
+ self.assertTrue(os.access(mkrescue_script,
+ os.R_OK | os.W_OK | os.X_OK))
+
+ self.fix_mkrescue(mkrescue_script)
+
+ self.logger.info('using grub-mkrescue for generating biosbits iso ...')
+
+ try:
+ if os.getenv('V') or os.getenv('BITS_DEBUG'):
+ proc = subprocess.run([mkrescue_script, '-o', iso_file,
+ bits_dir],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ check=True)
+ self.logger.info("grub-mkrescue output %s" % proc.stdout)
+ else:
+ subprocess.check_call([mkrescue_script, '-o',
+ iso_file, bits_dir],
+ stderr=subprocess.DEVNULL,
+ stdout=subprocess.DEVNULL)
+ except Exception as e: # pylint: disable=broad-except
+ self.skipTest("Error while generating the bits iso. "
+ "Pass V=1 in the environment to get more details. "
+ + str(e))
+
+ self.assertTrue(os.access(iso_file, os.R_OK))
+
+ self.logger.info('iso file %s successfully generated.', iso_file)
+
+ def setUp(self): # pylint: disable=arguments-differ
+ super().setUp()
+ self.logger = self.log
+
+ prebuiltDir = self.scratch_file('prebuilt')
+ if not os.path.isdir(prebuiltDir):
+ os.mkdir(prebuiltDir, mode=0o775)
+
+ bits_zip_file = self.scratch_file('prebuilt',
+ 'bits-%d-%s.zip'
+ %(self.BITS_INTERNAL_VER,
+ self.BITS_COMMIT_HASH))
+ grub_tar_file = self.scratch_file('prebuilt',
+ 'bits-%d-%s-grub.tar.gz'
+ %(self.BITS_INTERNAL_VER,
+ self.BITS_COMMIT_HASH))
+
+ # extract the bits artifact in the temp working directory
+ self.archive_extract(self.ASSET_BITS, sub_dir='prebuilt', format='zip')
+
+ # extract the bits software in the temp working directory
+ self.archive_extract(bits_zip_file)
+ self.archive_extract(grub_tar_file)
+
+ self.copy_test_scripts()
+ self.copy_bits_config()
+ self.generate_bits_iso()
+
+ def parse_log(self):
+ """parse the log generated by running bits tests and
+ check for failures.
+ """
+ debugconf = self.scratch_file(self._debugcon_log)
+ log = ""
+ with open(debugconf, 'r', encoding='utf-8') as filehandle:
+ log = filehandle.read()
+
+ matchiter = re.finditer(r'(.*Summary: )(\d+ passed), (\d+ failed).*',
+ log)
+ for match in matchiter:
+ # verify that no test cases failed.
+ try:
+ self.assertEqual(match.group(3).split()[0], '0',
+ 'Some bits tests seems to have failed. ' \
+ 'Please check the test logs for more info.')
+ except AssertionError as e:
+ self._print_log(log)
+ raise e
+ else:
+ if os.getenv('V') or os.getenv('BITS_DEBUG'):
+ self._print_log(log)
+
+ def tearDown(self):
+ """
+ Lets do some cleanups.
+ """
+ if self._vm:
+ self.assertFalse(not self._vm.is_running)
+ super().tearDown()
+
+ def test_acpi_smbios_bits(self):
+ """The main test case implementation."""
+
+ self.set_machine('pc')
+ iso_file = self.scratch_file('bits-%d.iso' % self.BITS_INTERNAL_VER)
+
+ self.assertTrue(os.access(iso_file, os.R_OK))
+
+ self._vm = QEMUBitsMachine(binary=self.qemu_bin,
+ base_temp_dir=self.workdir,
+ debugcon_log=self._debugcon_log,
+ debugcon_addr=self._debugcon_addr)
+
+ self._vm.add_args('-cdrom', '%s' %iso_file)
+ # the vm needs to be run under icount so that TCG emulation is
+ # consistent in terms of timing. smilatency tests have consistent
+ # timing requirements.
+ self._vm.add_args('-icount', 'auto')
+ # currently there is no support in bits for recognizing 64-bit SMBIOS
+ # entry points. QEMU defaults to 64-bit entry points since the
+ # upstream commit bf376f3020 ("hw/i386/pc: Default to use SMBIOS 3.0
+ # for newer machine models"). Therefore, enforce 32-bit entry point.
+ self._vm.add_args('-machine', 'smbios-entry-point-type=32')
+
+ # enable console logging
+ self._vm.set_console()
+ self._vm.launch()
+
+
+ # biosbits has been configured to run all the specified test suites
+ # in batch mode and then automatically initiate a vm shutdown.
+ self._vm.event_wait('SHUTDOWN', timeout=BITS_TIMEOUT)
+ self._vm.wait(timeout=None)
+ self.logger.debug("Checking console output ...")
+ self.parse_log()
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_alpha_clipper.py b/tests/functional/test_alpha_clipper.py
new file mode 100755
index 0000000..c5d7181
--- /dev/null
+++ b/tests/functional/test_alpha_clipper.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel on an Alpha Clipper machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+
+
+class AlphaClipperTest(LinuxKernelTest):
+
+ ASSET_KERNEL = Asset(
+ ('http://archive.debian.org/debian/dists/lenny/main/'
+ 'installer-alpha/20090123lenny10/images/cdrom/vmlinuz'),
+ '34f53da3fa32212e4f00b03cb944b2ad81c06bc8faaf9b7193b2e544ceeca576')
+
+ def test_alpha_clipper(self):
+ self.set_machine('clipper')
+ kernel_path = self.ASSET_KERNEL.fetch()
+
+ uncompressed_kernel = self.uncompress(self.ASSET_KERNEL, format="gz")
+
+ self.vm.set_console()
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
+ self.vm.add_args('-nodefaults',
+ '-kernel', uncompressed_kernel,
+ '-append', kernel_command_line)
+ self.vm.launch()
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.wait_for_console_pattern(console_pattern)
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_alpha_replay.py b/tests/functional/test_alpha_replay.py
new file mode 100755
index 0000000..24a17ef
--- /dev/null
+++ b/tests/functional/test_alpha_replay.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python3
+#
+# Replay test that boots a Linux kernel on an Alpha machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from replay_kernel import ReplayKernelBase
+
+
+class AlphaReplay(ReplayKernelBase):
+
+ ASSET_KERNEL = Asset(
+ ('http://archive.debian.org/debian/dists/lenny/main/installer-alpha/'
+ '20090123lenny10/images/cdrom/vmlinuz'),
+ '34f53da3fa32212e4f00b03cb944b2ad81c06bc8faaf9b7193b2e544ceeca576')
+
+ def test_clipper(self):
+ self.set_machine('clipper')
+ kernel_path = self.uncompress(self.ASSET_KERNEL, format='gz')
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=9,
+ args=('-nodefaults', ))
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_arm_aspeed_ast1030.py b/tests/functional/test_arm_aspeed_ast1030.py
new file mode 100755
index 0000000..77037f0
--- /dev/null
+++ b/tests/functional/test_arm_aspeed_ast1030.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots the ASPEED SoCs with firmware
+#
+# Copyright (C) 2022 ASPEED Technology Inc
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+from qemu_test import exec_command_and_wait_for_pattern
+
+
+class AST1030Machine(LinuxKernelTest):
+
+ ASSET_ZEPHYR_3_00 = Asset(
+ ('https://github.com/AspeedTech-BMC'
+ '/zephyr/releases/download/v00.03.00/ast1030-evb-demo.zip'),
+ '37fe3ecd4a1b9d620971a15b96492a81093435396eeac69b6f3e384262ff555f')
+
+ def test_ast1030_zephyros_3_00(self):
+ self.set_machine('ast1030-evb')
+
+ kernel_name = "ast1030-evb-demo/zephyr.elf"
+ kernel_file = self.archive_extract(
+ self.ASSET_ZEPHYR_3_00, member=kernel_name)
+
+ self.vm.set_console()
+ self.vm.add_args('-kernel', kernel_file, '-nographic')
+ self.vm.launch()
+ self.wait_for_console_pattern("Booting Zephyr OS")
+ exec_command_and_wait_for_pattern(self, "help",
+ "Available commands")
+
+ ASSET_ZEPHYR_1_07 = Asset(
+ ('https://github.com/AspeedTech-BMC'
+ '/zephyr/releases/download/v00.01.07/ast1030-evb-demo.zip'),
+ 'ad52e27959746988afaed8429bf4e12ab988c05c4d07c9d90e13ec6f7be4574c')
+
+ def test_ast1030_zephyros_1_07(self):
+ self.set_machine('ast1030-evb')
+
+ kernel_name = "ast1030-evb-demo/zephyr.bin"
+ kernel_file = self.archive_extract(
+ self.ASSET_ZEPHYR_1_07, member=kernel_name)
+
+ self.vm.set_console()
+ self.vm.add_args('-kernel', kernel_file, '-nographic')
+ self.vm.launch()
+ self.wait_for_console_pattern("Booting Zephyr OS")
+ for shell_cmd in [
+ 'kernel stacks',
+ 'otp info conf',
+ 'otp info scu',
+ 'hwinfo devid',
+ 'crypto aes256_cbc_vault',
+ 'random get',
+ 'jtag JTAG1 sw_xfer high TMS',
+ 'adc ADC0 resolution 12',
+ 'adc ADC0 read 42',
+ 'adc ADC1 read 69',
+ 'i2c scan I2C_0',
+ 'i3c attach I3C_0',
+ 'hash test',
+ 'kernel uptime',
+ 'kernel reboot warm',
+ 'kernel uptime',
+ 'kernel reboot cold',
+ 'kernel uptime',
+ ]: exec_command_and_wait_for_pattern(self, shell_cmd, "uart:~$")
+
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_arm_aspeed_ast2500.py b/tests/functional/test_arm_aspeed_ast2500.py
new file mode 100755
index 0000000..6923fe8
--- /dev/null
+++ b/tests/functional/test_arm_aspeed_ast2500.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots the ASPEED machines
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset, exec_command_and_wait_for_pattern
+from aspeed import AspeedTest
+
+
+class AST2500Machine(AspeedTest):
+
+ ASSET_BR2_202411_AST2500_FLASH = Asset(
+ ('https://github.com/legoater/qemu-aspeed-boot/raw/master/'
+ 'images/ast2500-evb/buildroot-2024.11/flash.img'),
+ '641e6906c18c0f19a2aeb48099d66d4771929c361001d554d0d45c667413e13a')
+
+ def test_arm_ast2500_evb_buildroot(self):
+ self.set_machine('ast2500-evb')
+
+ image_path = self.ASSET_BR2_202411_AST2500_FLASH.fetch()
+
+ self.vm.add_args('-device',
+ 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test')
+ self.do_test_arm_aspeed_buildroot_start(image_path, '0x0',
+ 'ast2500-evb login:')
+
+ exec_command_and_wait_for_pattern(self,
+ 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device',
+ 'i2c i2c-3: new_device: Instantiated device lm75 at 0x4d')
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/class/hwmon/hwmon1/temp1_input', '0')
+ self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test',
+ property='temperature', value=18000)
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/class/hwmon/hwmon1/temp1_input', '18000')
+
+ self.do_test_arm_aspeed_buildroot_poweroff()
+
+ ASSET_SDK_V906_AST2500 = Asset(
+ 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2500-default-obmc.tar.gz',
+ '542db84645b4efd8aed50385d7f4dd1caff379a987032311cfa7b563a3addb2a')
+
+ def test_arm_ast2500_evb_sdk(self):
+ self.set_machine('ast2500-evb')
+
+ self.archive_extract(self.ASSET_SDK_V906_AST2500)
+
+ self.do_test_arm_aspeed_sdk_start(
+ self.scratch_file("ast2500-default", "image-bmc"))
+
+ self.wait_for_console_pattern('ast2500-default login:')
+
+
+if __name__ == '__main__':
+ AspeedTest.main()
diff --git a/tests/functional/test_arm_aspeed_ast2600.py b/tests/functional/test_arm_aspeed_ast2600.py
new file mode 100755
index 0000000..fdae4c9
--- /dev/null
+++ b/tests/functional/test_arm_aspeed_ast2600.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots the ASPEED machines
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+import time
+import tempfile
+import subprocess
+
+from qemu_test import Asset
+from aspeed import AspeedTest
+from qemu_test import exec_command_and_wait_for_pattern, skipIfMissingCommands
+
+
+class AST2600Machine(AspeedTest):
+
+ ASSET_BR2_202411_AST2600_FLASH = Asset(
+ ('https://github.com/legoater/qemu-aspeed-boot/raw/master/'
+ 'images/ast2600-evb/buildroot-2024.11/flash.img'),
+ '4bb2f3dfdea31199b51d66b42f686dc5374c144a7346fdc650194a5578b73609')
+
+ def test_arm_ast2600_evb_buildroot(self):
+ self.set_machine('ast2600-evb')
+
+ image_path = self.ASSET_BR2_202411_AST2600_FLASH.fetch()
+
+ self.vm.add_args('-device',
+ 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test')
+ self.vm.add_args('-device',
+ 'ds1338,bus=aspeed.i2c.bus.3,address=0x32')
+ self.vm.add_args('-device',
+ 'i2c-echo,bus=aspeed.i2c.bus.3,address=0x42')
+ self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00',
+ 'ast2600-evb login:')
+
+ exec_command_and_wait_for_pattern(self,
+ 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device',
+ 'i2c i2c-3: new_device: Instantiated device lm75 at 0x4d')
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/class/hwmon/hwmon1/temp1_input', '0')
+ self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test',
+ property='temperature', value=18000)
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/class/hwmon/hwmon1/temp1_input', '18000')
+
+ exec_command_and_wait_for_pattern(self,
+ 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-3/device/new_device',
+ 'i2c i2c-3: new_device: Instantiated device ds1307 at 0x32')
+ year = time.strftime("%Y")
+ exec_command_and_wait_for_pattern(self, 'hwclock -f /dev/rtc1', year)
+
+ exec_command_and_wait_for_pattern(self,
+ 'echo slave-24c02 0x1064 > /sys/bus/i2c/devices/i2c-3/new_device',
+ 'i2c i2c-3: new_device: Instantiated device slave-24c02 at 0x64')
+ exec_command_and_wait_for_pattern(self,
+ 'i2cset -y 3 0x42 0x64 0x00 0xaa i', '#')
+ exec_command_and_wait_for_pattern(self,
+ 'hexdump /sys/bus/i2c/devices/3-1064/slave-eeprom',
+ '0000000 ffaa ffff ffff ffff ffff ffff ffff ffff')
+ self.do_test_arm_aspeed_buildroot_poweroff()
+
+ ASSET_BR2_202302_AST2600_TPM_FLASH = Asset(
+ ('https://github.com/legoater/qemu-aspeed-boot/raw/master/'
+ 'images/ast2600-evb/buildroot-2023.02-tpm/flash.img'),
+ 'a46009ae8a5403a0826d607215e731a8c68d27c14c41e55331706b8f9c7bd997')
+
+ @skipIfMissingCommands('swtpm')
+ def test_arm_ast2600_evb_buildroot_tpm(self):
+ self.set_machine('ast2600-evb')
+
+ image_path = self.ASSET_BR2_202302_AST2600_TPM_FLASH.fetch()
+
+ tpmstate_dir = tempfile.TemporaryDirectory(prefix="qemu_")
+ socket = os.path.join(tpmstate_dir.name, 'swtpm-socket')
+
+ # We must put the TPM state dir in /tmp/, not the build dir,
+ # because some distros use AppArmor to lock down swtpm and
+ # restrict the set of locations it can access files in.
+ subprocess.run(['swtpm', 'socket', '-d', '--tpm2',
+ '--tpmstate', f'dir={tpmstate_dir.name}',
+ '--ctrl', f'type=unixio,path={socket}'])
+
+ self.vm.add_args('-chardev', f'socket,id=chrtpm,path={socket}')
+ self.vm.add_args('-tpmdev', 'emulator,id=tpm0,chardev=chrtpm')
+ self.vm.add_args('-device',
+ 'tpm-tis-i2c,tpmdev=tpm0,bus=aspeed.i2c.bus.12,address=0x2e')
+ self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00', 'Aspeed AST2600 EVB')
+
+ exec_command_and_wait_for_pattern(self,
+ 'echo tpm_tis_i2c 0x2e > /sys/bus/i2c/devices/i2c-12/new_device',
+ 'tpm_tis_i2c 12-002e: 2.0 TPM (device-id 0x1, rev-id 1)')
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/class/tpm/tpm0/pcr-sha256/0',
+ 'B804724EA13F52A9072BA87FE8FDCC497DFC9DF9AA15B9088694639C431688E0')
+
+ self.do_test_arm_aspeed_buildroot_poweroff()
+
+ ASSET_SDK_V906_AST2600 = Asset(
+ 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2600-default-obmc.tar.gz',
+ '768d76e247896ad78c154b9cff4f766da2ce65f217d620b286a4a03a8a4f68f5')
+
+ def test_arm_ast2600_evb_sdk(self):
+ self.set_machine('ast2600-evb')
+
+ self.archive_extract(self.ASSET_SDK_V906_AST2600)
+
+ self.vm.add_args('-device',
+ 'tmp105,bus=aspeed.i2c.bus.5,address=0x4d,id=tmp-test')
+ self.vm.add_args('-device',
+ 'ds1338,bus=aspeed.i2c.bus.5,address=0x32')
+ self.do_test_arm_aspeed_sdk_start(
+ self.scratch_file("ast2600-default", "image-bmc"))
+
+ self.wait_for_console_pattern('ast2600-default login:')
+
+ exec_command_and_wait_for_pattern(self, 'root', 'Password:')
+ exec_command_and_wait_for_pattern(self, '0penBmc',
+ 'root@ast2600-default:~#')
+
+ exec_command_and_wait_for_pattern(self,
+ 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-5/device/new_device',
+ 'i2c i2c-5: new_device: Instantiated device lm75 at 0x4d')
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/class/hwmon/hwmon19/temp1_input', '0')
+ self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test',
+ property='temperature', value=18000)
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/class/hwmon/hwmon19/temp1_input', '18000')
+
+ exec_command_and_wait_for_pattern(self,
+ 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-5/device/new_device',
+ 'i2c i2c-5: new_device: Instantiated device ds1307 at 0x32')
+ year = time.strftime("%Y")
+ exec_command_and_wait_for_pattern(self,
+ '/sbin/hwclock -f /dev/rtc1', year)
+
+if __name__ == '__main__':
+ AspeedTest.main()
diff --git a/tests/functional/test_arm_aspeed_bletchley.py b/tests/functional/test_arm_aspeed_bletchley.py
new file mode 100644
index 0000000..5a60b24
--- /dev/null
+++ b/tests/functional/test_arm_aspeed_bletchley.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots the ASPEED machines
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from aspeed import AspeedTest
+
+
+class BletchleyMachine(AspeedTest):
+
+ ASSET_BLETCHLEY_FLASH = Asset(
+ 'https://github.com/legoater/qemu-aspeed-boot/raw/master/images/bletchley-bmc/openbmc-20250128071329/obmc-phosphor-image-bletchley-20250128071329.static.mtd.xz',
+ 'db21d04d47d7bb2a276f59d308614b4dfb70b9c7c81facbbca40a3977a2d8844')
+
+ def test_arm_ast2600_bletchley_openbmc(self):
+ image_path = self.uncompress(self.ASSET_BLETCHLEY_FLASH)
+
+ self.do_test_arm_aspeed_openbmc('bletchley-bmc', image=image_path,
+ uboot='2019.04', cpu_id='0xf00',
+ soc='AST2600 rev A3')
+
+if __name__ == '__main__':
+ AspeedTest.main()
diff --git a/tests/functional/test_arm_aspeed_palmetto.py b/tests/functional/test_arm_aspeed_palmetto.py
new file mode 100755
index 0000000..ff0b821
--- /dev/null
+++ b/tests/functional/test_arm_aspeed_palmetto.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots the ASPEED machines
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from aspeed import AspeedTest
+
+
+class PalmettoMachine(AspeedTest):
+
+ ASSET_PALMETTO_FLASH = Asset(
+ 'https://github.com/legoater/qemu-aspeed-boot/raw/master/images/palmetto-bmc/openbmc-20250128071432/obmc-phosphor-image-palmetto-20250128071432.static.mtd',
+ 'bce7c392eec75c707a91cfc8fad7ca9a69d7e4f10df936930d65c1cb9897ac81')
+
+ def test_arm_ast2400_palmetto_openbmc(self):
+ image_path = self.ASSET_PALMETTO_FLASH.fetch()
+
+ self.do_test_arm_aspeed_openbmc('palmetto-bmc', image=image_path,
+ uboot='2019.04', cpu_id='0x0',
+ soc='AST2400 rev A1')
+
+if __name__ == '__main__':
+ AspeedTest.main()
diff --git a/tests/functional/test_arm_aspeed_rainier.py b/tests/functional/test_arm_aspeed_rainier.py
new file mode 100755
index 0000000..602d619
--- /dev/null
+++ b/tests/functional/test_arm_aspeed_rainier.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots the ASPEED machines
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from aspeed import AspeedTest
+
+class RainierMachine(AspeedTest):
+
+ ASSET_RAINIER_EMMC = Asset(
+ ('https://fileserver.linaro.org/s/B6pJTwWEkzSDi36/download/'
+ 'mmc-p10bmc-20240617.qcow2'),
+ 'd523fb478d2b84d5adc5658d08502bc64b1486955683814f89c6137518acd90b')
+
+ def test_arm_aspeed_emmc_boot(self):
+ self.set_machine('rainier-bmc')
+ self.require_netdev('user')
+
+ image_path = self.ASSET_RAINIER_EMMC.fetch()
+
+ self.vm.set_console()
+ self.vm.add_args('-drive',
+ 'file=' + image_path + ',if=sd,id=sd2,index=2',
+ '-net', 'nic', '-net', 'user', '-snapshot')
+ self.vm.launch()
+
+ self.wait_for_console_pattern('U-Boot SPL 2019.04')
+ self.wait_for_console_pattern('Trying to boot from MMC1')
+ self.wait_for_console_pattern('U-Boot 2019.04')
+ self.wait_for_console_pattern('eMMC 2nd Boot')
+ self.wait_for_console_pattern('## Loading kernel from FIT Image')
+ self.wait_for_console_pattern('Starting kernel ...')
+ self.wait_for_console_pattern('Booting Linux on physical CPU 0xf00')
+ self.wait_for_console_pattern('mmcblk0: p1 p2 p3 p4 p5 p6 p7')
+ self.wait_for_console_pattern('IBM eBMC (OpenBMC for IBM Enterprise')
+
+ ASSET_DEBIAN_LINUX_ARMHF_DEB = Asset(
+ ('http://snapshot.debian.org/archive/debian/20220606T211338Z/pool/main/l/linux/linux-image-5.17.0-2-armmp_5.17.6-1%2Bb1_armhf.deb'),
+ '8acb2b4439faedc2f3ed4bdb2847ad4f6e0491f73debaeb7f660c8abe4dcdc0e')
+
+ def test_arm_debian_kernel_boot(self):
+ self.set_machine('rainier-bmc')
+
+ kernel_path = self.archive_extract(
+ self.ASSET_DEBIAN_LINUX_ARMHF_DEB,
+ member='boot/vmlinuz-5.17.0-2-armmp')
+ dtb_path = self.archive_extract(
+ self.ASSET_DEBIAN_LINUX_ARMHF_DEB,
+ member='usr/lib/linux-image-5.17.0-2-armmp/aspeed-bmc-ibm-rainier.dtb')
+
+ self.vm.set_console()
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-net', 'nic')
+ self.vm.launch()
+
+ self.wait_for_console_pattern("Booting Linux on physical CPU 0xf00")
+ self.wait_for_console_pattern("SMP: Total of 2 processors activated")
+ self.wait_for_console_pattern("No filesystem could mount root")
+
+
+if __name__ == '__main__':
+ AspeedTest.main()
diff --git a/tests/functional/test_arm_aspeed_romulus.py b/tests/functional/test_arm_aspeed_romulus.py
new file mode 100755
index 0000000..0447212
--- /dev/null
+++ b/tests/functional/test_arm_aspeed_romulus.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots the ASPEED machines
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from aspeed import AspeedTest
+
+
+class RomulusMachine(AspeedTest):
+
+ ASSET_ROMULUS_FLASH = Asset(
+ 'https://github.com/legoater/qemu-aspeed-boot/raw/master/images/romulus-bmc/openbmc-20250128071340/obmc-phosphor-image-romulus-20250128071340.static.mtd',
+ '6d031376440c82ed9d087d25e9fa76aea75b42f80daa252ec402c0bc3cf6cf5b')
+
+ def test_arm_ast2500_romulus_openbmc(self):
+ image_path = self.ASSET_ROMULUS_FLASH.fetch()
+
+ self.do_test_arm_aspeed_openbmc('romulus-bmc', image=image_path,
+ uboot='2019.04', cpu_id='0x0',
+ soc='AST2500 rev A1')
+
+if __name__ == '__main__':
+ AspeedTest.main()
diff --git a/tests/functional/test_arm_aspeed_witherspoon.py b/tests/functional/test_arm_aspeed_witherspoon.py
new file mode 100644
index 0000000..51a2d47
--- /dev/null
+++ b/tests/functional/test_arm_aspeed_witherspoon.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots the ASPEED machines
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from aspeed import AspeedTest
+
+
+class WitherspoonMachine(AspeedTest):
+
+ ASSET_WITHERSPOON_FLASH = Asset(
+ 'https://github.com/legoater/qemu-aspeed-boot/raw/master/images/witherspoon-bmc/openbmc-20240618035022/obmc-phosphor-image-witherspoon-20240618035022.ubi.mtd',
+ '937d9ed449ea6c6cbed983519088a42d0cafe276bcfe4fce07772ca6673f9213')
+
+ def test_arm_ast2500_witherspoon_openbmc(self):
+ image_path = self.ASSET_WITHERSPOON_FLASH.fetch()
+
+ self.do_test_arm_aspeed_openbmc('witherspoon-bmc', image=image_path,
+ uboot='2016.07', cpu_id='0x0',
+ soc='AST2500 rev A1')
+
+if __name__ == '__main__':
+ AspeedTest.main()
diff --git a/tests/functional/test_arm_bflt.py b/tests/functional/test_arm_bflt.py
new file mode 100755
index 0000000..f273fc8
--- /dev/null
+++ b/tests/functional/test_arm_bflt.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python3
+#
+# Test the bFLT loader format
+#
+# Copyright (C) 2019 Philippe Mathieu-DaudƩ <f4bug@amsat.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import bz2
+
+from qemu_test import QemuUserTest, Asset
+from qemu_test import skipIfMissingCommands, skipUntrustedTest
+
+
+class LoadBFLT(QemuUserTest):
+
+ ASSET_ROOTFS = Asset(
+ ('https://elinux.org/images/5/51/Stm32_mini_rootfs.cpio.bz2'),
+ 'eefb788e4980c9e8d6c9d60ce7d15d4da6bf4fbc6a80f487673824600d5ba9cc')
+
+ @skipIfMissingCommands('cpio')
+ @skipUntrustedTest()
+ def test_stm32(self):
+ # See https://elinux.org/STM32#User_Space
+ rootfs_path_bz2 = self.ASSET_ROOTFS.fetch()
+ busybox_path = self.scratch_file("bin", "busybox")
+
+ with bz2.open(rootfs_path_bz2, 'rb') as cpio_handle:
+ self.archive_extract(cpio_handle, format="cpio")
+
+ res = self.run_cmd(busybox_path)
+ ver = 'BusyBox v1.24.0.git (2015-02-03 22:17:13 CET) multi-call binary.'
+ self.assertIn(ver, res.stdout)
+
+ res = self.run_cmd(busybox_path, ['uname', '-a'])
+ unm = 'armv7l GNU/Linux'
+ self.assertIn(unm, res.stdout)
+
+
+if __name__ == '__main__':
+ QemuUserTest.main()
diff --git a/tests/functional/test_arm_bpim2u.py b/tests/functional/test_arm_bpim2u.py
new file mode 100755
index 0000000..8bed64b
--- /dev/null
+++ b/tests/functional/test_arm_bpim2u.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel on a Banana Pi machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+
+from qemu_test import LinuxKernelTest, exec_command_and_wait_for_pattern
+from qemu_test import Asset, interrupt_interactive_console_until_pattern
+from qemu_test import skipBigDataTest
+from qemu_test.utils import image_pow2ceil_expand
+
+
+class BananaPiMachine(LinuxKernelTest):
+
+ ASSET_DEB = Asset(
+ ('https://apt.armbian.com/pool/main/l/linux-6.6.16/'
+ 'linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb'),
+ '3d968c15b121ede871dce49d13ee7644d6f74b6b121b84c9a40f51b0c80d6d22')
+
+ ASSET_INITRD = Asset(
+ ('https://github.com/groeck/linux-build-test/raw/'
+ '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
+ 'arm/rootfs-armv7a.cpio.gz'),
+ '2c8dbdb16ea7af2dfbcbea96044dde639fb07d09fd3c4fb31f2027ef71e55ddd')
+
+ ASSET_ROOTFS = Asset(
+ ('http://storage.kernelci.org/images/rootfs/buildroot/'
+ 'buildroot-baseline/20230703.0/armel/rootfs.ext2.xz'),
+ '42b44a12965ac0afe9a88378527fb698a7dc76af50495efc2361ee1595b4e5c6')
+
+ ASSET_SD_IMAGE = Asset(
+ ('https://downloads.openwrt.org/releases/22.03.3/targets/sunxi/cortexa7/'
+ 'openwrt-22.03.3-sunxi-cortexa7-sinovoip_bananapi-m2-ultra-ext4-sdcard.img.gz'),
+ '5b41b4e11423e562c6011640f9a7cd3bdd0a3d42b83430f7caa70a432e6cd82c')
+
+ def test_arm_bpim2u(self):
+ self.set_machine('bpim2u')
+ kernel_path = self.archive_extract(
+ self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi')
+ dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/'
+ 'sun8i-r40-bananapi-m2-ultra.dtb')
+ dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path)
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0,115200n8 '
+ 'earlycon=uart,mmio32,0x1c28000')
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-append', kernel_command_line)
+ self.vm.launch()
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.wait_for_console_pattern(console_pattern)
+ os.remove(kernel_path)
+ os.remove(dtb_path)
+
+ def test_arm_bpim2u_initrd(self):
+ self.set_machine('bpim2u')
+ kernel_path = self.archive_extract(
+ self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi')
+ dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/'
+ 'sun8i-r40-bananapi-m2-ultra.dtb')
+ dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path)
+ initrd_path = self.uncompress(self.ASSET_INITRD)
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0,115200 '
+ 'panic=-1 noreboot')
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-initrd', initrd_path,
+ '-append', kernel_command_line,
+ '-no-reboot')
+ self.vm.launch()
+ self.wait_for_console_pattern('Boot successful.')
+
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+ 'Allwinner sun8i Family')
+ exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
+ 'system-control@1c00000')
+ exec_command_and_wait_for_pattern(self, 'reboot',
+ 'reboot: Restarting system')
+ # Wait for VM to shut down gracefully
+ self.vm.wait()
+ os.remove(kernel_path)
+ os.remove(dtb_path)
+ os.remove(initrd_path)
+
+ def test_arm_bpim2u_gmac(self):
+ self.set_machine('bpim2u')
+ self.require_netdev('user')
+
+ deb_path = self.ASSET_DEB.fetch()
+ kernel_path = self.archive_extract(
+ self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi')
+ dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/'
+ 'sun8i-r40-bananapi-m2-ultra.dtb')
+ dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path)
+ rootfs_path = self.uncompress(self.ASSET_ROOTFS)
+ image_pow2ceil_expand(rootfs_path)
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0,115200 '
+ 'root=b300 rootwait rw '
+ 'panic=-1 noreboot')
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-drive', 'file=' + rootfs_path + ',if=sd,format=raw',
+ '-net', 'nic,model=gmac,netdev=host_gmac',
+ '-netdev', 'user,id=host_gmac',
+ '-append', kernel_command_line,
+ '-no-reboot')
+ self.vm.launch()
+ shell_ready = "/bin/sh: can't access tty; job control turned off"
+ self.wait_for_console_pattern(shell_ready)
+
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+ 'Allwinner sun8i Family')
+ exec_command_and_wait_for_pattern(self, 'cat /proc/partitions',
+ 'mmcblk')
+ exec_command_and_wait_for_pattern(self, 'ifconfig eth0 up',
+ 'eth0: Link is Up')
+ exec_command_and_wait_for_pattern(self, 'udhcpc eth0',
+ 'udhcpc: lease of 10.0.2.15 obtained')
+ exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2',
+ '3 packets transmitted, 3 packets received, 0% packet loss')
+ exec_command_and_wait_for_pattern(self, 'reboot',
+ 'reboot: Restarting system')
+ # Wait for VM to shut down gracefully
+ self.vm.wait()
+ os.remove(kernel_path)
+ os.remove(dtb_path)
+ os.remove(rootfs_path)
+
+ @skipBigDataTest()
+ def test_arm_bpim2u_openwrt_22_03_3(self):
+ self.set_machine('bpim2u')
+ self.require_netdev('user')
+
+ # This test download a 8.9 MiB compressed image and expand it
+ # to 127 MiB.
+ image_path = self.uncompress(self.ASSET_SD_IMAGE)
+ image_pow2ceil_expand(image_path)
+
+ self.vm.set_console()
+ self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw',
+ '-nic', 'user',
+ '-no-reboot')
+ self.vm.launch()
+
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'usbcore.nousb '
+ 'noreboot')
+
+ self.wait_for_console_pattern('U-Boot SPL')
+
+ interrupt_interactive_console_until_pattern(
+ self, 'Hit any key to stop autoboot:', '=>')
+ exec_command_and_wait_for_pattern(self, "setenv extraargs '" +
+ kernel_command_line + "'", '=>')
+ exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...')
+
+ self.wait_for_console_pattern(
+ 'Please press Enter to activate this console.')
+
+ exec_command_and_wait_for_pattern(self, ' ', 'root@')
+
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+ 'Allwinner sun8i Family')
+ exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
+ 'system-control@1c00000')
+ os.remove(image_path)
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_arm_canona1100.py b/tests/functional/test_arm_canona1100.py
new file mode 100755
index 0000000..21a1a59
--- /dev/null
+++ b/tests/functional/test_arm_canona1100.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots the canon-a1100 machine with firmware
+#
+# Copyright (c) 2020 Red Hat, Inc.
+#
+# Author:
+# Thomas Huth <thuth@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+
+class CanonA1100Machine(QemuSystemTest):
+ """Boots the barebox firmware and checks that the console is operational"""
+
+ timeout = 90
+
+ ASSET_BIOS = Asset(('https://qemu-advcal.gitlab.io'
+ '/qac-best-of-multiarch/download/day18.tar.xz'),
+ '28e71874ce985be66b7fd1345ed88cb2523b982f899c8d2900d6353054a1be49')
+
+ def test_arm_canona1100(self):
+ self.set_machine('canon-a1100')
+
+ bios = self.archive_extract(self.ASSET_BIOS,
+ member="day18/barebox.canon-a1100.bin")
+ self.vm.set_console()
+ self.vm.add_args('-bios', bios)
+ self.vm.launch()
+ wait_for_console_pattern(self, 'running /env/bin/init')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_arm_collie.py b/tests/functional/test_arm_collie.py
new file mode 100755
index 0000000..fe1be3d
--- /dev/null
+++ b/tests/functional/test_arm_collie.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel on a collie machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+
+
+class CollieTest(LinuxKernelTest):
+
+ ASSET_ZIMAGE = Asset(
+ 'https://github.com/groeck/linux-test-downloads/raw/225223f2ad7d637b34426810bf6c3b727b76a718/collie/zImage',
+ '10ace8abf9e0875ef8a83b8829cc3b5b50bc6d7bc3ca29f19f49f5673a43c13b')
+
+ ASSET_ROOTFS = Asset(
+ 'https://github.com/groeck/linux-test-downloads/raw/225223f2ad7d637b34426810bf6c3b727b76a718/collie/rootfs-sa110.cpio',
+ '89ccaaa5c6b33331887047e1618ffe81b0f55909173944347d5d2426f3bcc1f2')
+
+ def test_arm_collie(self):
+ self.set_machine('collie')
+ zimage_path = self.ASSET_ZIMAGE.fetch()
+ rootfs_path = self.ASSET_ROOTFS.fetch()
+ self.vm.add_args('-append', 'rdinit=/sbin/init console=ttySA1')
+ self.launch_kernel(zimage_path,
+ initrd=rootfs_path,
+ wait_for='reboot: Restarting system')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_arm_cubieboard.py b/tests/functional/test_arm_cubieboard.py
new file mode 100755
index 0000000..b536c2f
--- /dev/null
+++ b/tests/functional/test_arm_cubieboard.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern
+from qemu_test import interrupt_interactive_console_until_pattern
+from qemu_test import skipBigDataTest
+from qemu_test.utils import image_pow2ceil_expand
+
+
+class CubieboardMachine(LinuxKernelTest):
+
+ ASSET_DEB = Asset(
+ ('https://apt.armbian.com/pool/main/l/linux-6.6.16/'
+ 'linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb'),
+ '3d968c15b121ede871dce49d13ee7644d6f74b6b121b84c9a40f51b0c80d6d22')
+
+ ASSET_INITRD = Asset(
+ ('https://github.com/groeck/linux-build-test/raw/'
+ '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
+ 'arm/rootfs-armv5.cpio.gz'),
+ '334b8d256db67a3f2b3ad070aa08b5ade39624e0e7e35b02f4359a577bc8f39b')
+
+ ASSET_SATA_ROOTFS = Asset(
+ ('https://github.com/groeck/linux-build-test/raw/'
+ '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
+ 'arm/rootfs-armv5.ext2.gz'),
+ '17fc750da568580b39372133051ef2f0a963c0c0b369b845614442d025701745')
+
+ ASSET_OPENWRT = Asset(
+ ('https://downloads.openwrt.org/releases/22.03.2/targets/sunxi/cortexa8/'
+ 'openwrt-22.03.2-sunxi-cortexa8-cubietech_a10-cubieboard-ext4-sdcard.img.gz'),
+ '94b5ecbfbc0b3b56276e5146b899eafa2ac5dc2d08733d6705af9f144f39f554')
+
+ def test_arm_cubieboard_initrd(self):
+ self.set_machine('cubieboard')
+ kernel_path = self.archive_extract(
+ self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi')
+ dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/' +
+ 'sun4i-a10-cubieboard.dtb')
+ dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path)
+ initrd_path = self.uncompress(self.ASSET_INITRD)
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0,115200 '
+ 'usbcore.nousb '
+ 'panic=-1 noreboot')
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-initrd', initrd_path,
+ '-append', kernel_command_line,
+ '-no-reboot')
+ self.vm.launch()
+ self.wait_for_console_pattern('Boot successful.')
+
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+ 'Allwinner sun4i/sun5i')
+ exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
+ 'system-control@1c00000')
+ exec_command_and_wait_for_pattern(self, 'reboot',
+ 'reboot: Restarting system')
+ # Wait for VM to shut down gracefully
+ self.vm.wait()
+
+ def test_arm_cubieboard_sata(self):
+ self.set_machine('cubieboard')
+ kernel_path = self.archive_extract(
+ self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi')
+ dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/' +
+ 'sun4i-a10-cubieboard.dtb')
+ dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path)
+
+ rootfs_path = self.uncompress(self.ASSET_SATA_ROOTFS)
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0,115200 '
+ 'usbcore.nousb '
+ 'root=/dev/sda ro '
+ 'panic=-1 noreboot')
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-drive', 'if=none,format=raw,id=disk0,file='
+ + rootfs_path,
+ '-device', 'ide-hd,bus=ide.0,drive=disk0',
+ '-append', kernel_command_line,
+ '-no-reboot')
+ self.vm.launch()
+ self.wait_for_console_pattern('Boot successful.')
+
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+ 'Allwinner sun4i/sun5i')
+ exec_command_and_wait_for_pattern(self, 'cat /proc/partitions',
+ 'sda')
+ exec_command_and_wait_for_pattern(self, 'reboot',
+ 'reboot: Restarting system')
+ # Wait for VM to shut down gracefully
+ self.vm.wait()
+
+ @skipBigDataTest()
+ def test_arm_cubieboard_openwrt_22_03_2(self):
+ # This test download a 7.5 MiB compressed image and expand it
+ # to 126 MiB.
+ self.set_machine('cubieboard')
+ self.require_netdev('user')
+
+ image_path = self.uncompress(self.ASSET_OPENWRT)
+ image_pow2ceil_expand(image_path)
+
+ self.vm.set_console()
+ self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw',
+ '-nic', 'user',
+ '-no-reboot')
+ self.vm.launch()
+
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'usbcore.nousb '
+ 'noreboot')
+
+ self.wait_for_console_pattern('U-Boot SPL')
+
+ interrupt_interactive_console_until_pattern(
+ self, 'Hit any key to stop autoboot:', '=>')
+ exec_command_and_wait_for_pattern(self, "setenv extraargs '" +
+ kernel_command_line + "'", '=>')
+ exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...')
+
+ self.wait_for_console_pattern(
+ 'Please press Enter to activate this console.')
+
+ exec_command_and_wait_for_pattern(self, ' ', 'root@')
+
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+ 'Allwinner sun4i/sun5i')
+ exec_command_and_wait_for_pattern(self, 'reboot',
+ 'reboot: Restarting system')
+ # Wait for VM to shut down gracefully
+ self.vm.wait()
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_arm_emcraft_sf2.py b/tests/functional/test_arm_emcraft_sf2.py
new file mode 100755
index 0000000..f9f3f06
--- /dev/null
+++ b/tests/functional/test_arm_emcraft_sf2.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+import shutil
+
+from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern
+from qemu_test.utils import file_truncate
+
+class EmcraftSf2Machine(LinuxKernelTest):
+
+ ASSET_UBOOT = Asset(
+ ('https://raw.githubusercontent.com/Subbaraya-Sundeep/qemu-test-binaries/'
+ 'fe371d32e50ca682391e1e70ab98c2942aeffb01/u-boot'),
+ '5c6a15103375db11b21f2236473679a9dbbed6d89652bfcdd501c263d68ab725')
+
+ ASSET_SPI = Asset(
+ ('https://raw.githubusercontent.com/Subbaraya-Sundeep/qemu-test-binaries/'
+ 'fe371d32e50ca682391e1e70ab98c2942aeffb01/spi.bin'),
+ 'cd9bdd2c4cb55a59c3adb6bcf74881667c4500dde0570a43aa3be2b17eecfdb6')
+
+ def test_arm_emcraft_sf2(self):
+ self.set_machine('emcraft-sf2')
+ self.require_netdev('user')
+
+ uboot_path = self.ASSET_UBOOT.fetch()
+ spi_path = self.ASSET_SPI.fetch()
+ spi_path_rw = self.scratch_file('spi.bin')
+ shutil.copy(spi_path, spi_path_rw)
+ os.chmod(spi_path_rw, 0o600)
+
+ file_truncate(spi_path_rw, 16 << 20) # Spansion S25FL128SDPBHICO is 16 MiB
+
+ self.vm.set_console()
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE
+ self.vm.add_args('-kernel', uboot_path,
+ '-append', kernel_command_line,
+ '-drive', 'file=' + spi_path_rw + ',if=mtd,format=raw',
+ '-no-reboot')
+ self.vm.launch()
+ self.wait_for_console_pattern('Enter \'help\' for a list')
+
+ exec_command_and_wait_for_pattern(self, 'ifconfig eth0 10.0.2.15',
+ 'eth0: link becomes ready')
+ exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2',
+ '3 packets transmitted, 3 packets received, 0% packet loss')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_arm_integratorcp.py b/tests/functional/test_arm_integratorcp.py
new file mode 100755
index 0000000..4f00924
--- /dev/null
+++ b/tests/functional/test_arm_integratorcp.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# Copyright (c) 2020 Red Hat, Inc.
+#
+# Author:
+# Thomas Huth <thuth@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import logging
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test import skipIfMissingImports, skipUntrustedTest
+
+
+class IntegratorMachine(QemuSystemTest):
+
+ timeout = 90
+
+ ASSET_KERNEL = Asset(
+ ('https://github.com/zayac/qemu-arm/raw/master/'
+ 'arm-test/kernel/zImage.integrator'),
+ '26e7c7e8f943de785d95bd3c74d66451604a9b6a7a3d25dceb279e7548fd8e78')
+
+ ASSET_INITRD = Asset(
+ ('https://github.com/zayac/qemu-arm/raw/master/'
+ 'arm-test/kernel/arm_root.img'),
+ 'e187c27fb342ad148c7f33475fbed124933e0b3f4be8c74bc4f3426a4793373a')
+
+ ASSET_TUXLOGO = Asset(
+ ('https://github.com/torvalds/linux/raw/v2.6.12/'
+ 'drivers/video/logo/logo_linux_vga16.ppm'),
+ 'b762f0d91ec018887ad1b334543c2fdf9be9fdfc87672b409211efaa3ea0ef79')
+
+ def boot_integratorcp(self):
+ kernel_path = self.ASSET_KERNEL.fetch()
+ initrd_path = self.ASSET_INITRD.fetch()
+
+ self.set_machine('integratorcp')
+ self.vm.set_console()
+ self.vm.add_args('-kernel', kernel_path,
+ '-initrd', initrd_path,
+ '-append', 'printk.time=0 console=ttyAMA0')
+ self.vm.launch()
+
+ @skipUntrustedTest()
+ def test_integratorcp_console(self):
+ """
+ Boots the Linux kernel and checks that the console is operational
+ """
+ self.boot_integratorcp()
+ wait_for_console_pattern(self, 'Log in as root')
+
+ @skipIfMissingImports("numpy", "cv2")
+ @skipUntrustedTest()
+ def test_framebuffer_tux_logo(self):
+ """
+ Boot Linux and verify the Tux logo is displayed on the framebuffer.
+ """
+ import numpy as np
+ import cv2
+
+ screendump_path = self.scratch_file("screendump.pbm")
+ tuxlogo_path = self.ASSET_TUXLOGO.fetch()
+
+ self.boot_integratorcp()
+ framebuffer_ready = 'Console: switching to colour frame buffer device'
+ wait_for_console_pattern(self, framebuffer_ready)
+ self.vm.cmd('human-monitor-command', command_line='stop')
+ res = self.vm.cmd('human-monitor-command',
+ command_line='screendump %s' % screendump_path)
+ if 'unknown command' in res:
+ self.skipTest('screendump not available')
+ logger = logging.getLogger('framebuffer')
+
+ cpu_count = 1
+ match_threshold = 0.92
+ screendump_bgr = cv2.imread(screendump_path)
+ screendump_gray = cv2.cvtColor(screendump_bgr, cv2.COLOR_BGR2GRAY)
+ result = cv2.matchTemplate(screendump_gray, cv2.imread(tuxlogo_path, 0),
+ cv2.TM_CCOEFF_NORMED)
+ loc = np.where(result >= match_threshold)
+ tux_count = 0
+ for tux_count, pt in enumerate(zip(*loc[::-1]), start=1):
+ logger.debug('found Tux at position [x, y] = %s', pt)
+ self.assertGreaterEqual(tux_count, cpu_count)
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_arm_microbit.py b/tests/functional/test_arm_microbit.py
new file mode 100755
index 0000000..68ea4e7
--- /dev/null
+++ b/tests/functional/test_arm_microbit.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright 2025, The QEMU Project Developers.
+#
+# A functional test that runs MicroPython on the arm microbit machine.
+
+from qemu_test import QemuSystemTest, Asset, exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern
+
+
+class MicrobitMachine(QemuSystemTest):
+
+ ASSET_MICRO = Asset('https://ozlabs.org/~joel/microbit-micropython.hex',
+ '021641f93dfb11767d4978dbb3ca7f475d1b13c69e7f4aec3382f212636bffd6')
+
+ def test_arm_microbit(self):
+ self.set_machine('microbit')
+
+ micropython = self.ASSET_MICRO.fetch()
+ self.vm.set_console()
+ self.vm.add_args('-device', f'loader,file={micropython}')
+ self.vm.launch()
+ wait_for_console_pattern(self, 'Type "help()" for more information.')
+ exec_command_and_wait_for_pattern(self, 'import machine as mch', '>>>')
+ exec_command_and_wait_for_pattern(self, 'mch.reset()', 'MicroPython')
+ wait_for_console_pattern(self, '>>>')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_arm_orangepi.py b/tests/functional/test_arm_orangepi.py
new file mode 100755
index 0000000..f9bfa8c
--- /dev/null
+++ b/tests/functional/test_arm_orangepi.py
@@ -0,0 +1,237 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel on an Orange Pi machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+import shutil
+
+from qemu_test import LinuxKernelTest, exec_command_and_wait_for_pattern
+from qemu_test import Asset, interrupt_interactive_console_until_pattern
+from qemu_test import wait_for_console_pattern, skipBigDataTest
+from qemu_test.utils import image_pow2ceil_expand
+
+
+class OrangePiMachine(LinuxKernelTest):
+
+ ASSET_DEB = Asset(
+ ('https://apt.armbian.com/pool/main/l/linux-6.6.16/'
+ 'linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb'),
+ '3d968c15b121ede871dce49d13ee7644d6f74b6b121b84c9a40f51b0c80d6d22')
+
+ ASSET_INITRD = Asset(
+ ('https://github.com/groeck/linux-build-test/raw/'
+ '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
+ 'arm/rootfs-armv7a.cpio.gz'),
+ '2c8dbdb16ea7af2dfbcbea96044dde639fb07d09fd3c4fb31f2027ef71e55ddd')
+
+ ASSET_ROOTFS = Asset(
+ ('http://storage.kernelci.org/images/rootfs/buildroot/'
+ 'buildroot-baseline/20230703.0/armel/rootfs.ext2.xz'),
+ '42b44a12965ac0afe9a88378527fb698a7dc76af50495efc2361ee1595b4e5c6')
+
+ ASSET_ARMBIAN = Asset(
+ ('https://k-space.ee.armbian.com/archive/orangepipc/archive/'
+ 'Armbian_23.8.1_Orangepipc_jammy_current_6.1.47.img.xz'),
+ 'b386dff6552513b5f164ea00f94814a6b0f1da9fb90b83725e949cf797e11afb')
+
+ ASSET_UBOOT = Asset(
+ ('http://snapshot.debian.org/archive/debian/20200108T145233Z/pool/'
+ 'main/u/u-boot/u-boot-sunxi_2020.01%2Bdfsg-1_armhf.deb'),
+ '9223d94dc283ab54df41ce9d6f69025a5b47fece29fb67a714e23aa0cdf3bdfa')
+
+ ASSET_NETBSD = Asset(
+ ('https://archive.netbsd.org/pub/NetBSD-archive/NetBSD-9.0/'
+ 'evbarm-earmv7hf/binary/gzimg/armv7.img.gz'),
+ '20d3e07dc057e15c12452620e90ecab2047f0f7940d9cba8182ebc795927177f')
+
+ def test_arm_orangepi(self):
+ self.set_machine('orangepi-pc')
+ kernel_path = self.archive_extract(
+ self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi')
+ dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/' +
+ 'sun8i-h3-orangepi-pc.dtb')
+ dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path)
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0,115200n8 '
+ 'earlycon=uart,mmio32,0x1c28000')
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-append', kernel_command_line)
+ self.vm.launch()
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.wait_for_console_pattern(console_pattern)
+ os.remove(kernel_path)
+ os.remove(dtb_path)
+
+ def test_arm_orangepi_initrd(self):
+ self.set_machine('orangepi-pc')
+ kernel_path = self.archive_extract(
+ self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi')
+ dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/' +
+ 'sun8i-h3-orangepi-pc.dtb')
+ dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path)
+ initrd_path = self.uncompress(self.ASSET_INITRD)
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0,115200 '
+ 'panic=-1 noreboot')
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-initrd', initrd_path,
+ '-append', kernel_command_line,
+ '-no-reboot')
+ self.vm.launch()
+ self.wait_for_console_pattern('Boot successful.')
+
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+ 'Allwinner sun8i Family')
+ exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
+ 'system-control@1c00000')
+ exec_command_and_wait_for_pattern(self, 'reboot',
+ 'reboot: Restarting system')
+ # Wait for VM to shut down gracefully
+ self.vm.wait()
+ os.remove(kernel_path)
+ os.remove(dtb_path)
+ os.remove(initrd_path)
+
+ def test_arm_orangepi_sd(self):
+ self.set_machine('orangepi-pc')
+ self.require_netdev('user')
+ kernel_path = self.archive_extract(
+ self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi')
+ dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/' +
+ 'sun8i-h3-orangepi-pc.dtb')
+ dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path)
+ rootfs_path = self.uncompress(self.ASSET_ROOTFS)
+ image_pow2ceil_expand(rootfs_path)
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0,115200 '
+ 'root=/dev/mmcblk0 rootwait rw '
+ 'panic=-1 noreboot')
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-drive', 'file=' + rootfs_path + ',if=sd,format=raw',
+ '-append', kernel_command_line,
+ '-no-reboot')
+ self.vm.launch()
+ shell_ready = "/bin/sh: can't access tty; job control turned off"
+ self.wait_for_console_pattern(shell_ready)
+
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+ 'Allwinner sun8i Family')
+ exec_command_and_wait_for_pattern(self, 'cat /proc/partitions',
+ 'mmcblk0')
+ exec_command_and_wait_for_pattern(self, 'ifconfig eth0 up',
+ 'eth0: Link is Up')
+ exec_command_and_wait_for_pattern(self, 'udhcpc eth0',
+ 'udhcpc: lease of 10.0.2.15 obtained')
+ exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2',
+ '3 packets transmitted, 3 packets received, 0% packet loss')
+ exec_command_and_wait_for_pattern(self, 'reboot',
+ 'reboot: Restarting system')
+ # Wait for VM to shut down gracefully
+ self.vm.wait()
+ os.remove(kernel_path)
+ os.remove(dtb_path)
+ os.remove(rootfs_path)
+
+ @skipBigDataTest()
+ def test_arm_orangepi_armbian(self):
+ self.set_machine('orangepi-pc')
+ self.require_netdev('user')
+
+ # This test download a 275 MiB compressed image and expand it
+ # to 1036 MiB, but the underlying filesystem is 1552 MiB...
+ # As we expand it to 2 GiB we are safe.
+ image_path = self.uncompress(self.ASSET_ARMBIAN)
+ image_pow2ceil_expand(image_path)
+
+ self.vm.set_console()
+ self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw',
+ '-nic', 'user',
+ '-no-reboot')
+ self.vm.launch()
+
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0,115200 '
+ 'loglevel=7 '
+ 'nosmp '
+ 'systemd.default_timeout_start_sec=9000 '
+ 'systemd.mask=armbian-zram-config.service '
+ 'systemd.mask=armbian-ramlog.service')
+
+ self.wait_for_console_pattern('U-Boot SPL')
+ self.wait_for_console_pattern('Autoboot in ')
+ exec_command_and_wait_for_pattern(self, ' ', '=>')
+ exec_command_and_wait_for_pattern(self, "setenv extraargs '" +
+ kernel_command_line + "'", '=>')
+ exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...')
+
+ self.wait_for_console_pattern('systemd[1]: Hostname set ' +
+ 'to <orangepipc>')
+ self.wait_for_console_pattern('Starting Load Kernel Modules...')
+
+ @skipBigDataTest()
+ def test_arm_orangepi_uboot_netbsd9(self):
+ self.set_machine('orangepi-pc')
+ self.require_netdev('user')
+
+ # This test download a 304MB compressed image and expand it to 2GB
+ # We use the common OrangePi PC 'plus' build of U-Boot for our secondary
+ # program loader (SPL). We will then set the path to the more specific
+ # OrangePi "PC" device tree blob with 'setenv fdtfile' in U-Boot prompt,
+ # before to boot NetBSD.
+ uboot_path = 'usr/lib/u-boot/orangepi_plus/u-boot-sunxi-with-spl.bin'
+ uboot_path = self.archive_extract(self.ASSET_UBOOT, member=uboot_path)
+ image_path = self.uncompress(self.ASSET_NETBSD)
+ image_pow2ceil_expand(image_path)
+ image_drive_args = 'if=sd,format=raw,snapshot=on,file=' + image_path
+
+ # dd if=u-boot-sunxi-with-spl.bin of=armv7.img bs=1K seek=8 conv=notrunc
+ with open(uboot_path, 'rb') as f_in:
+ with open(image_path, 'r+b') as f_out:
+ f_out.seek(8 * 1024)
+ shutil.copyfileobj(f_in, f_out)
+
+ self.vm.set_console()
+ self.vm.add_args('-nic', 'user',
+ '-drive', image_drive_args,
+ '-global', 'allwinner-rtc.base-year=2000',
+ '-no-reboot')
+ self.vm.launch()
+ wait_for_console_pattern(self, 'U-Boot 2020.01+dfsg-1')
+ interrupt_interactive_console_until_pattern(self,
+ 'Hit any key to stop autoboot:',
+ 'switch to partitions #0, OK')
+
+ exec_command_and_wait_for_pattern(self, '', '=>')
+ cmd = 'setenv bootargs root=ld0a'
+ exec_command_and_wait_for_pattern(self, cmd, '=>')
+ cmd = 'setenv kernel netbsd-GENERIC.ub'
+ exec_command_and_wait_for_pattern(self, cmd, '=>')
+ cmd = 'setenv fdtfile dtb/sun8i-h3-orangepi-pc.dtb'
+ exec_command_and_wait_for_pattern(self, cmd, '=>')
+ cmd = ("setenv bootcmd 'fatload mmc 0:1 ${kernel_addr_r} ${kernel}; "
+ "fatload mmc 0:1 ${fdt_addr_r} ${fdtfile}; "
+ "fdt addr ${fdt_addr_r}; "
+ "bootm ${kernel_addr_r} - ${fdt_addr_r}'")
+ exec_command_and_wait_for_pattern(self, cmd, '=>')
+
+ exec_command_and_wait_for_pattern(self, 'boot',
+ 'Booting kernel from Legacy Image')
+ wait_for_console_pattern(self, 'Starting kernel ...')
+ wait_for_console_pattern(self, 'NetBSD 9.0 (GENERIC)')
+ # Wait for user-space
+ wait_for_console_pattern(self, 'Starting root file system check')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_arm_quanta_gsj.py b/tests/functional/test_arm_quanta_gsj.py
new file mode 100755
index 0000000..cb0545f
--- /dev/null
+++ b/tests/functional/test_arm_quanta_gsj.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern
+from qemu_test import interrupt_interactive_console_until_pattern, skipSlowTest
+
+
+class EmcraftSf2Machine(LinuxKernelTest):
+
+ ASSET_IMAGE = Asset(
+ ('https://github.com/hskinnemoen/openbmc/releases/download/'
+ '20200711-gsj-qemu-0/obmc-phosphor-image-gsj.static.mtd.gz'),
+ 'eccd4e375cde53034c84aece5c511932cacf838d9fd3f63da368a511757da72b')
+
+ ASSET_INITRD = Asset(
+ ('https://github.com/hskinnemoen/openbmc/releases/download/'
+ '20200711-gsj-qemu-0/obmc-phosphor-initramfs-gsj.cpio.xz'),
+ '37b05009fc54db1434beac12bd7ff99a2e751a2f032ee18d9042f991dd0cdeaa')
+
+ ASSET_KERNEL = Asset(
+ ('https://github.com/hskinnemoen/openbmc/releases/download/'
+ '20200711-gsj-qemu-0/uImage-gsj.bin'),
+ 'ce6d6b37bff46c74fc7b1e90da10a431cc37a62cdb35ec199fa73473d0790110')
+
+ ASSET_DTB = Asset(
+ ('https://github.com/hskinnemoen/openbmc/releases/download/'
+ '20200711-gsj-qemu-0/nuvoton-npcm730-gsj.dtb'),
+ '3249b2da787d4b9ad4e61f315b160abfceb87b5e1895a7ce898ce7f40c8d4045')
+
+ @skipSlowTest()
+ def test_arm_quanta_gsj(self):
+ self.set_machine('quanta-gsj')
+ image_path = self.uncompress(self.ASSET_IMAGE, format='gz')
+
+ self.vm.set_console()
+ drive_args = 'file=' + image_path + ',if=mtd,bus=0,unit=0'
+ self.vm.add_args('-drive', drive_args)
+ self.vm.launch()
+
+ # Disable drivers and services that stall for a long time during boot,
+ # to avoid running past the 90-second timeout. These may be removed
+ # as the corresponding device support is added.
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + (
+ 'console=${console} '
+ 'mem=${mem} '
+ 'initcall_blacklist=npcm_i2c_bus_driver_init '
+ 'systemd.mask=systemd-random-seed.service '
+ 'systemd.mask=dropbearkey.service '
+ )
+
+ self.wait_for_console_pattern('> BootBlock by Nuvoton')
+ self.wait_for_console_pattern('>Device: Poleg BMC NPCM730')
+ self.wait_for_console_pattern('>Skip DDR init.')
+ self.wait_for_console_pattern('U-Boot ')
+ interrupt_interactive_console_until_pattern(
+ self, 'Hit any key to stop autoboot:', 'U-Boot>')
+ exec_command_and_wait_for_pattern(
+ self, "setenv bootargs ${bootargs} " + kernel_command_line,
+ 'U-Boot>')
+ exec_command_and_wait_for_pattern(
+ self, 'run romboot', 'Booting Kernel from flash')
+ self.wait_for_console_pattern('Booting Linux on physical CPU 0x0')
+ self.wait_for_console_pattern('CPU1: thread -1, cpu 1, socket 0')
+ self.wait_for_console_pattern('OpenBMC Project Reference Distro')
+ self.wait_for_console_pattern('gsj login:')
+
+ def test_arm_quanta_gsj_initrd(self):
+ self.set_machine('quanta-gsj')
+ initrd_path = self.ASSET_INITRD.fetch()
+ kernel_path = self.ASSET_KERNEL.fetch()
+ dtb_path = self.ASSET_DTB.fetch()
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0,115200n8 '
+ 'earlycon=uart8250,mmio32,0xf0001000')
+ self.vm.add_args('-kernel', kernel_path,
+ '-initrd', initrd_path,
+ '-dtb', dtb_path,
+ '-append', kernel_command_line)
+ self.vm.launch()
+
+ self.wait_for_console_pattern('Booting Linux on physical CPU 0x0')
+ self.wait_for_console_pattern('CPU1: thread -1, cpu 1, socket 0')
+ self.wait_for_console_pattern(
+ 'Give root password for system maintenance')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_arm_raspi2.py b/tests/functional/test_arm_raspi2.py
new file mode 100755
index 0000000..d3c7aaa
--- /dev/null
+++ b/tests/functional/test_arm_raspi2.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel on a Raspberry Pi machine
+# and checks the console
+#
+# Copyright (c) 2019 Philippe Mathieu-DaudƩ <f4bug@amsat.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+from qemu_test import exec_command_and_wait_for_pattern
+
+
+class ArmRaspi2Machine(LinuxKernelTest):
+
+ ASSET_KERNEL_20190215 = Asset(
+ ('http://archive.raspberrypi.org/debian/'
+ 'pool/main/r/raspberrypi-firmware/'
+ 'raspberrypi-kernel_1.20190215-1_armhf.deb'),
+ '9f1759f7228113da24f5ee2aa6312946ec09a83e076aba9406c46ff776dfb291')
+
+ ASSET_INITRD = Asset(
+ ('https://github.com/groeck/linux-build-test/raw/'
+ '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
+ 'arm/rootfs-armv7a.cpio.gz'),
+ '2c8dbdb16ea7af2dfbcbea96044dde639fb07d09fd3c4fb31f2027ef71e55ddd')
+
+ def do_test_arm_raspi2(self, uart_id):
+ """
+ The kernel can be rebuilt using the kernel source referenced
+ and following the instructions on the on:
+ https://www.raspberrypi.org/documentation/linux/kernel/building.md
+ """
+ serial_kernel_cmdline = {
+ 0: 'earlycon=pl011,0x3f201000 console=ttyAMA0',
+ }
+ kernel_path = self.archive_extract(self.ASSET_KERNEL_20190215,
+ member='boot/kernel7.img')
+ dtb_path = self.archive_extract(self.ASSET_KERNEL_20190215,
+ member='boot/bcm2709-rpi-2-b.dtb')
+
+ self.set_machine('raspi2b')
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ serial_kernel_cmdline[uart_id] +
+ ' root=/dev/mmcblk0p2 rootwait ' +
+ 'dwc_otg.fiq_fsm_enable=0')
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-append', kernel_command_line,
+ '-device', 'usb-kbd')
+ self.vm.launch()
+
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.wait_for_console_pattern(console_pattern)
+ self.wait_for_console_pattern('Product: QEMU USB Keyboard')
+
+ def test_arm_raspi2_uart0(self):
+ self.do_test_arm_raspi2(0)
+
+ def test_arm_raspi2_initrd(self):
+ kernel_path = self.archive_extract(self.ASSET_KERNEL_20190215,
+ member='boot/kernel7.img')
+ dtb_path = self.archive_extract(self.ASSET_KERNEL_20190215,
+ member='boot/bcm2709-rpi-2-b.dtb')
+ initrd_path = self.uncompress(self.ASSET_INITRD)
+
+ self.set_machine('raspi2b')
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'earlycon=pl011,0x3f201000 console=ttyAMA0 '
+ 'panic=-1 noreboot ' +
+ 'dwc_otg.fiq_fsm_enable=0')
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-initrd', initrd_path,
+ '-append', kernel_command_line,
+ '-no-reboot')
+ self.vm.launch()
+ self.wait_for_console_pattern('Boot successful.')
+
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+ 'BCM2835')
+ exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
+ '/soc/cprman@7e101000')
+ exec_command_and_wait_for_pattern(self, 'halt', 'reboot: System halted')
+ # Wait for VM to shut down gracefully
+ self.vm.wait()
+
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_arm_realview.py b/tests/functional/test_arm_realview.py
new file mode 100755
index 0000000..82cc964
--- /dev/null
+++ b/tests/functional/test_arm_realview.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel on a realview arm machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, exec_command_and_wait_for_pattern
+from qemu_test import Asset
+
+
+class RealviewMachine(LinuxKernelTest):
+
+ ASSET_REALVIEW_MPCORE = Asset(
+ ('https://archive.openwrt.org/chaos_calmer/15.05.1/realview/generic/'
+ 'openwrt-15.05.1-realview-vmlinux-initramfs.elf'),
+ 'd3a01037f33e7512d46d50975588d5c3a0e0cbf25f37afab44775c2a2be523e6')
+
+ def test_realview_ep_mpcore(self):
+ self.require_netdev('user')
+ self.set_machine('realview-eb-mpcore')
+ kernel_path = self.ASSET_REALVIEW_MPCORE.fetch()
+ self.vm.set_console()
+ kernel_param = 'console=ttyAMA0 mem=128M quiet'
+ self.vm.add_args('-kernel', kernel_path,
+ '-append', kernel_param)
+ self.vm.launch()
+ self.wait_for_console_pattern('Please press Enter to activate')
+ prompt = ':/#'
+ exec_command_and_wait_for_pattern(self, '', prompt)
+ exec_command_and_wait_for_pattern(self, 'dmesg', kernel_param)
+ self.wait_for_console_pattern(prompt)
+ exec_command_and_wait_for_pattern(self,
+ ('while ! dmesg | grep "br-lan: port 1(eth0) entered" ;'
+ ' do sleep 1 ; done'),
+ 'entered forwarding state')
+ self.wait_for_console_pattern(prompt)
+ exec_command_and_wait_for_pattern(self,
+ 'while ! ifconfig | grep "10.0.2.15" ; do sleep 1 ; done',
+ 'addr:10.0.2.15')
+ self.wait_for_console_pattern(prompt)
+ exec_command_and_wait_for_pattern(self, 'ping -c 1 10.0.2.2',
+ '1 packets received, 0% packet loss')
+
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_arm_replay.py b/tests/functional/test_arm_replay.py
new file mode 100755
index 0000000..e002e6a
--- /dev/null
+++ b/tests/functional/test_arm_replay.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python3
+#
+# Replay test that boots a Linux kernel on arm machines and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from replay_kernel import ReplayKernelBase
+
+
+class ArmReplay(ReplayKernelBase):
+
+ ASSET_VIRT = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/'
+ 'releases/29/Everything/armhfp/os/images/pxeboot/vmlinuz'),
+ '18dd5f1a9a28bd539f9d047f7c0677211bae528e8712b40ca5a229a4ad8e2591')
+
+ def test_virt(self):
+ self.set_machine('virt')
+ kernel_path = self.ASSET_VIRT.fetch()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyAMA0')
+ console_pattern = 'VFS: Cannot open root device'
+ self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=1)
+
+ ASSET_CUBIE_KERNEL = Asset(
+ ('https://apt.armbian.com/pool/main/l/linux-6.6.16/'
+ 'linux-image-current-sunxi_24.2.1_armhf_'
+ '_6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb'),
+ '3d968c15b121ede871dce49d13ee7644d6f74b6b121b84c9a40f51b0c80d6d22')
+
+ ASSET_CUBIE_INITRD = Asset(
+ ('https://github.com/groeck/linux-build-test/raw/'
+ '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/arm/rootfs-armv5.cpio.gz'),
+ '334b8d256db67a3f2b3ad070aa08b5ade39624e0e7e35b02f4359a577bc8f39b')
+
+ def test_cubieboard(self):
+ self.set_machine('cubieboard')
+ kernel_path = self.archive_extract(self.ASSET_CUBIE_KERNEL,
+ member='boot/vmlinuz-6.6.16-current-sunxi')
+ dtb_path = self.archive_extract(self.ASSET_CUBIE_KERNEL,
+ member='usr/lib/linux-image-6.6.16-current-sunxi/sun4i-a10-cubieboard.dtb')
+ initrd_path = self.uncompress(self.ASSET_CUBIE_INITRD)
+
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0,115200 '
+ 'usbcore.nousb '
+ 'panic=-1 noreboot')
+ console_pattern = 'Boot successful.'
+ self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=1,
+ args=('-dtb', dtb_path,
+ '-initrd', initrd_path,
+ '-no-reboot'))
+
+ ASSET_DAY16 = Asset(
+ 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day16.tar.xz',
+ '63311adb2d4c4e7a73214a86d29988add87266a909719c56acfadd026b4110a7')
+
+ def test_vexpressa9(self):
+ self.set_machine('vexpress-a9')
+ self.archive_extract(self.ASSET_DAY16)
+ kernel_path = self.scratch_file('day16', 'winter.zImage')
+ dtb_path = self.scratch_file('day16', 'vexpress-v2p-ca9.dtb')
+ self.run_rr(kernel_path, self.REPLAY_KERNEL_COMMAND_LINE,
+ 'QEMU advent calendar', args=('-dtb', dtb_path))
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_arm_smdkc210.py b/tests/functional/test_arm_smdkc210.py
new file mode 100755
index 0000000..3154e7f
--- /dev/null
+++ b/tests/functional/test_arm_smdkc210.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+
+
+class Smdkc210Machine(LinuxKernelTest):
+
+ ASSET_DEB = Asset(
+ ('https://snapshot.debian.org/archive/debian/20190928T224601Z/pool/'
+ 'main/l/linux/linux-image-4.19.0-6-armmp_4.19.67-2+deb10u1_armhf.deb'),
+ '421804e7579ef40d554c962850dbdf1bfc79f7fa7faec9d391397170dc806c3e')
+
+ ASSET_ROOTFS = Asset(
+ ('https://github.com/groeck/linux-build-test/raw/'
+ '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/arm/'
+ 'rootfs-armv5.cpio.gz'),
+ '334b8d256db67a3f2b3ad070aa08b5ade39624e0e7e35b02f4359a577bc8f39b')
+
+ def test_arm_exynos4210_initrd(self):
+ self.set_machine('smdkc210')
+
+ kernel_path = self.archive_extract(self.ASSET_DEB,
+ member='boot/vmlinuz-4.19.0-6-armmp')
+ dtb_path = 'usr/lib/linux-image-4.19.0-6-armmp/exynos4210-smdkv310.dtb'
+ dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path)
+
+ initrd_path = self.uncompress(self.ASSET_ROOTFS)
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'earlycon=exynos4210,0x13800000 earlyprintk ' +
+ 'console=ttySAC0,115200n8 ' +
+ 'random.trust_cpu=off cryptomgr.notests ' +
+ 'cpuidle.off=1 panic=-1 noreboot')
+
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-initrd', initrd_path,
+ '-append', kernel_command_line,
+ '-no-reboot')
+ self.vm.launch()
+
+ self.wait_for_console_pattern('Boot successful.')
+ # TODO user command, for now the uart is stuck
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_arm_stellaris.py b/tests/functional/test_arm_stellaris.py
new file mode 100755
index 0000000..cbd21cb
--- /dev/null
+++ b/tests/functional/test_arm_stellaris.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+#
+# Functional test that checks the serial console of the stellaris machines
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import QemuSystemTest, Asset, exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern
+
+
+class StellarisMachine(QemuSystemTest):
+
+ ASSET_DAY22 = Asset(
+ 'https://www.qemu-advent-calendar.org/2023/download/day22.tar.gz',
+ 'ae3a63ef4b7a22c21bfc7fc0d85e402fe95e223308ed23ac854405016431ff51')
+
+ def test_lm3s6965evb(self):
+ self.set_machine('lm3s6965evb')
+ kernel_path = self.archive_extract(self.ASSET_DAY22,
+ member='day22/day22.bin')
+ self.vm.set_console()
+ self.vm.add_args('-kernel', kernel_path)
+ self.vm.launch()
+
+ wait_for_console_pattern(self, 'In a one horse open')
+
+ ASSET_NOTMAIN = Asset(
+ 'https://github.com/Ahelion/QemuArmM4FDemoSw/raw/master/build/notmain.bin',
+ '6ceda031aa081a420fca2fca9e137fa681d6e3820d820ad1917736cb265e611a')
+
+ def test_lm3s811evb(self):
+ self.set_machine('lm3s811evb')
+ kernel_path = self.ASSET_NOTMAIN.fetch()
+
+ self.vm.set_console()
+ self.vm.add_args('-cpu', 'cortex-m4')
+ self.vm.add_args('-kernel', kernel_path)
+ self.vm.launch()
+
+ # The test kernel emits an initial '!' and then waits for input.
+ # For each character that we send it responds with a certain
+ # other ASCII character.
+ wait_for_console_pattern(self, '!')
+ exec_command_and_wait_for_pattern(self, '789', 'cdf')
+
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_arm_sx1.py b/tests/functional/test_arm_sx1.py
new file mode 100755
index 0000000..25800b3
--- /dev/null
+++ b/tests/functional/test_arm_sx1.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+#
+# Copyright (c) 2024 Linaro Ltd.
+#
+# Functional test that boots a Linux kernel on an sx1 machine
+# and checks the console. We have three variants:
+# * just boot initrd
+# * boot with filesystem on SD card
+# * boot from flash
+# In all cases these images have a userspace that is configured
+# to immediately reboot the system on successful boot, so we
+# only need to wait for QEMU to exit (via -no-reboot).
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+
+
+class SX1Test(LinuxKernelTest):
+
+ ASSET_ZIMAGE = Asset(
+ 'https://github.com/groeck/linux-test-downloads/raw/225223f2ad7d637b34426810bf6c3b727b76a718/sx1/zImage',
+ 'a0271899a8dc2165f9e0adb2d0a57fc839ae3a469722ffc56c77e108a8887615')
+
+ ASSET_INITRD = Asset(
+ 'https://github.com/groeck/linux-test-downloads/raw/225223f2ad7d637b34426810bf6c3b727b76a718/sx1/rootfs-armv4.cpio',
+ '35b0721249821aa544cd85b85d3cb8901db4c6d128eed86ab261e5d9e37d58f8')
+
+ ASSET_SD_FS = Asset(
+ 'https://github.com/groeck/linux-test-downloads/raw/225223f2ad7d637b34426810bf6c3b727b76a718/sx1/rootfs-armv4.ext2',
+ 'c1db7f43ef92469ebc8605013728c8950e7608439f01d13678994f0ce101c3a8')
+
+ ASSET_FLASH = Asset(
+ 'https://github.com/groeck/linux-test-downloads/raw/225223f2ad7d637b34426810bf6c3b727b76a718/sx1/flash',
+ '17e6a2758fa38efd2666be0879d4751fd37d194f25168a8deede420df519b676')
+
+ CONSOLE_ARGS = 'console=ttyS0,115200 earlycon=uart8250,mmio32,0xfffb0000,115200n8'
+
+ def test_arm_sx1_initrd(self):
+ self.set_machine('sx1')
+ zimage_path = self.ASSET_ZIMAGE.fetch()
+ initrd_path = self.ASSET_INITRD.fetch()
+ self.vm.add_args('-append', f'kunit.enable=0 rdinit=/sbin/init {self.CONSOLE_ARGS}')
+ self.vm.add_args('-no-reboot')
+ self.launch_kernel(zimage_path,
+ initrd=initrd_path,
+ wait_for='Boot successful')
+ self.vm.wait(timeout=120)
+
+ def test_arm_sx1_sd(self):
+ self.set_machine('sx1')
+ zimage_path = self.ASSET_ZIMAGE.fetch()
+ sd_fs_path = self.ASSET_SD_FS.fetch()
+ self.vm.add_args('-append', f'kunit.enable=0 root=/dev/mmcblk0 rootwait {self.CONSOLE_ARGS}')
+ self.vm.add_args('-no-reboot')
+ self.vm.add_args('-snapshot')
+ self.vm.add_args('-drive', f'format=raw,if=sd,file={sd_fs_path}')
+ self.launch_kernel(zimage_path, wait_for='Boot successful')
+ self.vm.wait(timeout=120)
+
+ def test_arm_sx1_flash(self):
+ self.set_machine('sx1')
+ zimage_path = self.ASSET_ZIMAGE.fetch()
+ flash_path = self.ASSET_FLASH.fetch()
+ self.vm.add_args('-append', f'kunit.enable=0 root=/dev/mtdblock3 rootwait {self.CONSOLE_ARGS}')
+ self.vm.add_args('-no-reboot')
+ self.vm.add_args('-snapshot')
+ self.vm.add_args('-drive', f'format=raw,if=pflash,file={flash_path}')
+ self.launch_kernel(zimage_path, wait_for='Boot successful')
+ self.vm.wait(timeout=120)
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_arm_tuxrun.py b/tests/functional/test_arm_tuxrun.py
new file mode 100755
index 0000000..4ac85f4
--- /dev/null
+++ b/tests/functional/test_arm_tuxrun.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunArmTest(TuxRunBaselineTest):
+
+ ASSET_ARMV5_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/armv5/zImage',
+ '3931a3908dbcf0ec0fe292d035ffc4dfed95f797dedd4a59ccfcf7a46e6f92d4')
+ ASSET_ARMV5_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/armv5/rootfs.ext4.zst',
+ '60ff78b68c7021df378e4fc2d66d3b016484d1acc7e07fb8920c1d8e30f4571f')
+ ASSET_ARMV5_DTB = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/armv5/versatile-pb.dtb',
+ '50988e69ef3f3b08bfb9146e8fe414129990029e8dfbed444953b7e14809530a')
+
+ def test_armv5(self):
+ self.set_machine('versatilepb')
+ self.cpu='arm926'
+ self.console='ttyAMA0'
+ self.wait_for_shutdown=False
+ self.common_tuxrun(kernel_asset=self.ASSET_ARMV5_KERNEL,
+ rootfs_asset=self.ASSET_ARMV5_ROOTFS,
+ dtb_asset=self.ASSET_ARMV5_DTB,
+ drive="virtio-blk-pci")
+
+ ASSET_ARMV7_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/armv7/zImage',
+ '1377bc3d90de5ce57ab17cd67429fe8b15c2e9964248c775c682b67e6299b991')
+ ASSET_ARMV7_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/armv7/rootfs.ext4.zst',
+ 'ed2cbc69bd6b3fbd5cafb5ee961393c7cfbe726446f14301c67d6b1f28bfdb51')
+
+ def test_armv7(self):
+ self.set_machine('virt')
+ self.cpu='cortex-a15'
+ self.console='ttyAMA0'
+ self.wait_for_shutdown=False
+ self.common_tuxrun(kernel_asset=self.ASSET_ARMV7_KERNEL,
+ rootfs_asset=self.ASSET_ARMV7_ROOTFS)
+
+ ASSET_ARMV7BE_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/armv7be/zImage',
+ 'a244e6da99f1bbd254827ec7681bd4aac9eb1aa05aaebc6b15e5d289ebb683f3')
+ ASSET_ARMV7BE_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/armv7be/rootfs.ext4.zst',
+ 'd4f9c57860a512163f30ecc69b2174d1a1bdeb853a43dc49a09cfcfe84e428ea')
+
+ def test_armv7be(self):
+ self.set_machine('virt')
+ self.cpu='cortex-a15'
+ self.console='ttyAMA0'
+ self.wait_for_shutdown=False
+ self.common_tuxrun(kernel_asset=self.ASSET_ARMV7BE_KERNEL,
+ rootfs_asset=self.ASSET_ARMV7BE_ROOTFS)
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_arm_vexpress.py b/tests/functional/test_arm_vexpress.py
new file mode 100755
index 0000000..6b11552
--- /dev/null
+++ b/tests/functional/test_arm_vexpress.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel on an versatile express machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+
+
+class VExpressTest(LinuxKernelTest):
+
+ ASSET_DAY16 = Asset(
+ 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day16.tar.xz',
+ '63311adb2d4c4e7a73214a86d29988add87266a909719c56acfadd026b4110a7')
+
+ def test_arm_vexpressa9(self):
+ self.set_machine('vexpress-a9')
+ self.archive_extract(self.ASSET_DAY16)
+ self.launch_kernel(self.scratch_file('day16', 'winter.zImage'),
+ dtb=self.scratch_file('day16',
+ 'vexpress-v2p-ca9.dtb'),
+ wait_for='QEMU advent calendar')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_arm_virt.py b/tests/functional/test_arm_virt.py
new file mode 100755
index 0000000..7b65491
--- /dev/null
+++ b/tests/functional/test_arm_virt.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+
+class ArmVirtMachine(LinuxKernelTest):
+
+ ASSET_KERNEL = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/'
+ 'releases/29/Everything/armhfp/os/images/pxeboot/vmlinuz'),
+ '18dd5f1a9a28bd539f9d047f7c0677211bae528e8712b40ca5a229a4ad8e2591')
+
+ def test_arm_virt(self):
+ self.set_machine('virt')
+ kernel_path = self.ASSET_KERNEL.fetch()
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyAMA0')
+ self.vm.add_args('-kernel', kernel_path,
+ '-append', kernel_command_line)
+ self.vm.launch()
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.wait_for_console_pattern(console_pattern)
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_avr_mega2560.py b/tests/functional/test_avr_mega2560.py
new file mode 100755
index 0000000..6359b72
--- /dev/null
+++ b/tests/functional/test_avr_mega2560.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+#
+# QEMU AVR integration tests
+#
+# Copyright (c) 2019-2020 Michael Rolnik <mrolnik@gmail.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from qemu_test import QemuSystemTest, Asset, wait_for_console_pattern
+
+
+class AVR6Machine(QemuSystemTest):
+
+ ASSET_ROM = Asset(('https://github.com/seharris/qemu-avr-tests'
+ '/raw/36c3e67b8755dcf/free-rtos/Demo'
+ '/AVR_ATMega2560_GCC/demo.elf'),
+ 'ee4833bd65fc69e84a79ed1c608affddbd499a60e63acf87d9113618401904e4')
+
+ def test_freertos(self):
+ """
+ https://github.com/seharris/qemu-avr-tests/raw/master/free-rtos/Demo/AVR_ATMega2560_GCC/demo.elf
+ constantly prints out 'ABCDEFGHIJKLMNOPQRSTUVWXABCDEFGHIJKLMNOPQRSTUVWX'
+ """
+ rom_path = self.ASSET_ROM.fetch()
+
+ self.set_machine('arduino-mega-2560-v3')
+ self.vm.add_args('-bios', rom_path)
+ self.vm.add_args('-nographic')
+ self.vm.set_console()
+ self.vm.launch()
+
+ wait_for_console_pattern(self,
+ 'XABCDEFGHIJKLMNOPQRSTUVWXABCDEFGHIJKLMNOPQRSTUVWXA')
+
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_avr_uno.py b/tests/functional/test_avr_uno.py
new file mode 100755
index 0000000..adb3b73
--- /dev/null
+++ b/tests/functional/test_avr_uno.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python3
+#
+# QEMU AVR Arduino UNO functional test
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import QemuSystemTest, Asset, wait_for_console_pattern
+
+
+class UnoMachine(QemuSystemTest):
+
+ ASSET_UNO = Asset(
+ ('https://github.com/RahulRNandan/LED_Blink_AVR/raw/'
+ 'c6d602cbb974a193/build/main.elf'),
+ '3009a4e2cf5c5b65142f538abdf66d4dc6bc6beab7e552fff9ae314583761b72')
+
+ def test_uno(self):
+ """
+ The binary constantly prints out 'LED Blink'
+ """
+ self.set_machine('arduino-uno')
+ rom_path = self.ASSET_UNO.fetch()
+
+ self.vm.add_args('-bios', rom_path)
+ self.vm.set_console()
+ self.vm.launch()
+
+ wait_for_console_pattern(self, 'LED Blink')
+
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_cpu_queries.py b/tests/functional/test_cpu_queries.py
new file mode 100755
index 0000000..b1122a0
--- /dev/null
+++ b/tests/functional/test_cpu_queries.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+#
+# Sanity check of query-cpu-* results
+#
+# Copyright (c) 2019 Red Hat, Inc.
+#
+# Author:
+# Eduardo Habkost <ehabkost@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest
+
+class QueryCPUModelExpansion(QemuSystemTest):
+ """
+ Run query-cpu-model-expansion for each CPU model, and validate results
+ """
+
+ def test(self):
+ self.set_machine('none')
+ self.vm.add_args('-S')
+ self.vm.launch()
+
+ cpus = self.vm.cmd('query-cpu-definitions')
+ for c in cpus:
+ self.log.info("Checking CPU: %s", c)
+ self.assertNotIn('', c['unavailable-features'], c['name'])
+
+ for c in cpus:
+ model = {'name': c['name']}
+ e = self.vm.cmd('query-cpu-model-expansion', model=model,
+ type='full')
+ self.assertEqual(e['model']['name'], c['name'])
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_empty_cpu_model.py b/tests/functional/test_empty_cpu_model.py
new file mode 100755
index 0000000..0081b06
--- /dev/null
+++ b/tests/functional/test_empty_cpu_model.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python3
+#
+# Check for crash when using empty -cpu option
+#
+# Copyright (c) 2019 Red Hat, Inc.
+#
+# Author:
+# Eduardo Habkost <ehabkost@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+from qemu_test import QemuSystemTest
+
+class EmptyCPUModel(QemuSystemTest):
+ def test(self):
+ self.vm.add_args('-S', '-display', 'none', '-machine', 'none', '-cpu', '')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'-cpu option cannot be empty')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_hppa_seabios.py b/tests/functional/test_hppa_seabios.py
new file mode 100755
index 0000000..661b246
--- /dev/null
+++ b/tests/functional/test_hppa_seabios.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+#
+# SeaBIOS boot test for HPPA machines
+#
+# Copyright (c) 2024 Linaro, Ltd
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import QemuSystemTest
+from qemu_test import wait_for_console_pattern
+
+class HppaSeabios(QemuSystemTest):
+
+ timeout = 5
+ MACH_BITS = {'B160L': 32, 'C3700': 64}
+
+ def boot_seabios(self):
+ mach = self.machine
+ bits = self.MACH_BITS[mach]
+ self.vm.add_args('-no-shutdown')
+ self.vm.set_console()
+ self.vm.launch()
+ wait_for_console_pattern(self, f'SeaBIOS PA-RISC {bits}-bit Firmware')
+ wait_for_console_pattern(self, f'Emulated machine: HP {mach} ({bits}-bit')
+
+ def test_hppa_32(self):
+ self.set_machine('B160L')
+ self.boot_seabios()
+
+ def test_hppa_64(self):
+ self.set_machine('C3700')
+ self.boot_seabios()
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_i386_replay.py b/tests/functional/test_i386_replay.py
new file mode 100755
index 0000000..7c4c260
--- /dev/null
+++ b/tests/functional/test_i386_replay.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python3
+#
+# Replay test that boots a Linux kernel on a i386 machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from replay_kernel import ReplayKernelBase
+
+
+class I386Replay(ReplayKernelBase):
+
+ ASSET_KERNEL = Asset(
+ 'https://storage.tuxboot.com/20230331/i386/bzImage',
+ 'a3e5b32a354729e65910f5a1ffcda7c14a6c12a55e8213fb86e277f1b76ed956')
+
+ def test_pc(self):
+ self.set_machine('pc')
+ kernel_url = ()
+ kernel_path = self.ASSET_KERNEL.fetch()
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
+ console_pattern = 'VFS: Cannot open root device'
+ self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5)
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_i386_tuxrun.py b/tests/functional/test_i386_tuxrun.py
new file mode 100755
index 0000000..f3ccf11
--- /dev/null
+++ b/tests/functional/test_i386_tuxrun.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunI386Test(TuxRunBaselineTest):
+
+ ASSET_I386_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/i386/bzImage',
+ '47fb44e38e34101eb0f71a2a01742b959d40ed5fd67cefb5608a39be11d3b74e')
+ ASSET_I386_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/i386/rootfs.ext4.zst',
+ 'a1a3b3b4c9dccd6475b58db95c107b468b736b700f6620985a8ed050a73d51c8')
+
+ def test_i386(self):
+ self.set_machine('q35')
+ self.cpu="coreduo"
+ self.wait_for_shutdown=False
+ self.common_tuxrun(kernel_asset=self.ASSET_I386_KERNEL,
+ rootfs_asset=self.ASSET_I386_ROOTFS,
+ drive="virtio-blk-pci")
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_info_usernet.py b/tests/functional/test_info_usernet.py
new file mode 100755
index 0000000..e8cbc37
--- /dev/null
+++ b/tests/functional/test_info_usernet.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python3
+#
+# Test for the hmp command "info usernet"
+#
+# Copyright (c) 2021 Red Hat, Inc.
+#
+# Author:
+# Cleber Rosa <crosa@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest
+from qemu_test.utils import get_usernet_hostfwd_port
+
+
+class InfoUsernet(QemuSystemTest):
+
+ def test_hostfwd(self):
+ self.require_netdev('user')
+ self.set_machine('none')
+ self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22')
+ self.vm.launch()
+
+ port = get_usernet_hostfwd_port(self.vm)
+ self.assertIsNotNone(port,
+ ('"info usernet" output content does not seem to '
+ 'contain the redirected port'))
+ self.assertGreater(port, 0,
+ ('Found a redirected port that is not greater than'
+ ' zero'))
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_intel_iommu.py b/tests/functional/test_intel_iommu.py
new file mode 100755
index 0000000..62268d6
--- /dev/null
+++ b/tests/functional/test_intel_iommu.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+#
+# INTEL_IOMMU Functional tests
+#
+# Copyright (c) 2021 Red Hat, Inc.
+#
+# Author:
+# Eric Auger <eric.auger@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern
+
+
+class IntelIOMMU(LinuxKernelTest):
+
+ ASSET_KERNEL = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases'
+ '/31/Server/x86_64/os/images/pxeboot/vmlinuz'),
+ 'd4738d03dbbe083ca610d0821d0a8f1488bebbdccef54ce33e3adb35fda00129')
+
+ ASSET_INITRD = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases'
+ '/31/Server/x86_64/os/images/pxeboot/initrd.img'),
+ '277cd6c7adf77c7e63d73bbb2cded8ef9e2d3a2f100000e92ff1f8396513cd8b')
+
+ ASSET_DISKIMAGE = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases'
+ '/31/Cloud/x86_64/images/Fedora-Cloud-Base-31-1.9.x86_64.qcow2'),
+ 'e3c1b309d9203604922d6e255c2c5d098a309c2d46215d8fc026954f3c5c27a0')
+
+ DEFAULT_KERNEL_PARAMS = ('root=/dev/vda1 console=ttyS0 net.ifnames=0 '
+ 'quiet rd.rescue ')
+ GUEST_PORT = 8080
+ IOMMU_ADDON = ',iommu_platform=on,disable-modern=off,disable-legacy=on'
+ kernel_path = None
+ initrd_path = None
+ kernel_params = None
+
+ def add_common_args(self, path):
+ self.vm.add_args('-drive', f'file={path},if=none,id=drv0,snapshot=on')
+ self.vm.add_args('-device', 'virtio-blk-pci,bus=pcie.0,' +
+ 'drive=drv0,id=virtio-disk0,bootindex=1,'
+ 'werror=stop,rerror=stop' + self.IOMMU_ADDON)
+ self.vm.add_args('-device', 'virtio-gpu-pci' + self.IOMMU_ADDON)
+
+ self.vm.add_args('-netdev',
+ 'user,id=n1,hostfwd=tcp:127.0.0.1:0-:%d' %
+ self.GUEST_PORT)
+ self.vm.add_args('-device',
+ 'virtio-net-pci,netdev=n1' + self.IOMMU_ADDON)
+
+ self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0')
+ self.vm.add_args('-object',
+ 'rng-random,id=rng0,filename=/dev/urandom')
+ self.vm.add_args("-m", "1G")
+ self.vm.add_args("-accel", "kvm")
+
+ def common_vm_setup(self):
+ self.set_machine('q35')
+ self.require_accelerator("kvm")
+ self.require_netdev('user')
+
+ self.kernel_path = self.ASSET_KERNEL.fetch()
+ self.initrd_path = self.ASSET_INITRD.fetch()
+ image_path = self.ASSET_DISKIMAGE.fetch()
+ self.add_common_args(image_path)
+ self.kernel_params = self.DEFAULT_KERNEL_PARAMS
+
+ def run_and_check(self):
+ if self.kernel_path:
+ self.vm.add_args('-kernel', self.kernel_path,
+ '-append', self.kernel_params,
+ '-initrd', self.initrd_path)
+ self.vm.set_console()
+ self.vm.launch()
+ self.wait_for_console_pattern('Entering emergency mode.')
+ prompt = '# '
+ self.wait_for_console_pattern(prompt)
+
+ # Copy a file (checked later), umount afterwards to drop disk cache:
+ exec_command_and_wait_for_pattern(self, 'mount /dev/vda1 /sysroot',
+ prompt)
+ filename = '/boot/initramfs-5.3.7-301.fc31.x86_64.img'
+ exec_command_and_wait_for_pattern(self, (f'cp /sysroot{filename}'
+ ' /sysroot/root/data'),
+ prompt)
+ exec_command_and_wait_for_pattern(self, 'umount /sysroot', prompt)
+
+ # Switch from initrd to the cloud image filesystem:
+ exec_command_and_wait_for_pattern(self, 'mount /dev/vda1 /sysroot',
+ prompt)
+ exec_command_and_wait_for_pattern(self,
+ ('for d in dev proc sys run ; do '
+ 'mount -o bind /$d /sysroot/$d ; done'), prompt)
+ exec_command_and_wait_for_pattern(self, 'chroot /sysroot', prompt)
+
+ # Checking for IOMMU enablement:
+ self.log.info("Checking whether IOMMU has been enabled...")
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cmdline',
+ 'intel_iommu=on')
+ self.wait_for_console_pattern(prompt)
+ exec_command_and_wait_for_pattern(self, 'dmesg | grep DMAR:',
+ 'IOMMU enabled')
+ self.wait_for_console_pattern(prompt)
+ exec_command_and_wait_for_pattern(self,
+ 'find /sys/kernel/iommu_groups/ -type l',
+ 'devices/0000:00:')
+ self.wait_for_console_pattern(prompt)
+
+ # Check hard disk device via sha256sum:
+ self.log.info("Checking hard disk...")
+ hashsum = '0dc7472f879be70b2f3daae279e3ae47175ffe249691e7d97f47222b65b8a720'
+ exec_command_and_wait_for_pattern(self, 'sha256sum ' + filename,
+ hashsum)
+ self.wait_for_console_pattern(prompt)
+ exec_command_and_wait_for_pattern(self, 'sha256sum /root/data',
+ hashsum)
+ self.wait_for_console_pattern(prompt)
+
+ # Check virtio-net via HTTP:
+ exec_command_and_wait_for_pattern(self, 'dhclient eth0', prompt)
+ self.check_http_download(filename, hashsum, self.GUEST_PORT)
+
+ def test_intel_iommu(self):
+ self.common_vm_setup()
+ self.vm.add_args('-device', 'intel-iommu,intremap=on')
+ self.vm.add_args('-machine', 'kernel_irqchip=split')
+ self.kernel_params += 'intel_iommu=on'
+ self.run_and_check()
+
+ def test_intel_iommu_strict(self):
+ self.common_vm_setup()
+ self.vm.add_args('-device', 'intel-iommu,intremap=on')
+ self.vm.add_args('-machine', 'kernel_irqchip=split')
+ self.kernel_params += 'intel_iommu=on,strict'
+ self.run_and_check()
+
+ def test_intel_iommu_strict_cm(self):
+ self.common_vm_setup()
+ self.vm.add_args('-device', 'intel-iommu,intremap=on,caching-mode=on')
+ self.vm.add_args('-machine', 'kernel_irqchip=split')
+ self.kernel_params += 'intel_iommu=on,strict'
+ self.run_and_check()
+
+ def test_intel_iommu_pt(self):
+ self.common_vm_setup()
+ self.vm.add_args('-device', 'intel-iommu,intremap=on')
+ self.vm.add_args('-machine', 'kernel_irqchip=split')
+ self.kernel_params += 'intel_iommu=on iommu=pt'
+ self.run_and_check()
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_linux_initrd.py b/tests/functional/test_linux_initrd.py
new file mode 100755
index 0000000..2207f83
--- /dev/null
+++ b/tests/functional/test_linux_initrd.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+#
+# Linux initrd integration test.
+#
+# Copyright (c) 2018 Red Hat, Inc.
+#
+# Author:
+# Wainer dos Santos Moschetta <wainersm@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import logging
+import tempfile
+
+from qemu_test import QemuSystemTest, Asset, skipFlakyTest
+
+
+class LinuxInitrd(QemuSystemTest):
+ """
+ Checks QEMU evaluates correctly the initrd file passed as -initrd option.
+ """
+
+ timeout = 300
+
+ ASSET_F18_KERNEL = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/'
+ 'releases/18/Fedora/x86_64/os/images/pxeboot/vmlinuz'),
+ '1a27cb42559ce29237ac186699d063556ad69c8349d732bb1bd8d614e5a8cc2e')
+
+ ASSET_F28_KERNEL = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/'
+ 'releases/28/Everything/x86_64/os/images/pxeboot/vmlinuz'),
+ 'd05909c9d4a742a6fcc84dcc0361009e4611769619cc187a07107579a035f24e')
+
+ def test_with_2gib_file_should_exit_error_msg_with_linux_v3_6(self):
+ """
+ Pretends to boot QEMU with an initrd file with size of 2GiB
+ and expect it exits with error message.
+ Fedora-18 shipped with linux-3.6 which have not supported xloadflags
+ cannot support more than 2GiB initrd.
+ """
+ self.set_machine('pc')
+ kernel_path = self.ASSET_F18_KERNEL.fetch()
+ max_size = 2 * (1024 ** 3) - 1
+
+ with tempfile.NamedTemporaryFile() as initrd:
+ initrd.seek(max_size)
+ initrd.write(b'\0')
+ initrd.flush()
+ self.vm.add_args('-kernel', kernel_path, '-initrd', initrd.name,
+ '-m', '4096')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEqual(self.vm.exitcode(), 1)
+ expected_msg = r'.*initrd is too large.*max: \d+, need %s.*' % (
+ max_size + 1)
+ self.assertRegex(self.vm.get_log(), expected_msg)
+
+ # XXX file tracking bug
+ @skipFlakyTest(bug_url=None)
+ def test_with_2gib_file_should_work_with_linux_v4_16(self):
+ """
+ QEMU has supported up to 4 GiB initrd for recent kernel
+ Expect guest can reach 'Unpacking initramfs...'
+ """
+ self.set_machine('pc')
+ kernel_path = self.ASSET_F28_KERNEL.fetch()
+ max_size = 2 * (1024 ** 3) + 1
+
+ with tempfile.NamedTemporaryFile() as initrd:
+ initrd.seek(max_size)
+ initrd.write(b'\0')
+ initrd.flush()
+
+ self.vm.set_console()
+ kernel_command_line = 'console=ttyS0'
+ self.vm.add_args('-kernel', kernel_path,
+ '-append', kernel_command_line,
+ '-initrd', initrd.name,
+ '-m', '5120')
+ self.vm.launch()
+ console = self.vm.console_socket.makefile()
+ console_logger = logging.getLogger('console')
+ while True:
+ msg = console.readline()
+ console_logger.debug(msg.strip())
+ if 'Unpacking initramfs...' in msg:
+ break
+ if 'Kernel panic - not syncing' in msg:
+ self.fail("Kernel panic reached")
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_loongarch64_virt.py b/tests/functional/test_loongarch64_virt.py
new file mode 100755
index 0000000..b7d9abf
--- /dev/null
+++ b/tests/functional/test_loongarch64_virt.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# LoongArch virt test.
+#
+# Copyright (c) 2023 Loongson Technology Corporation Limited
+#
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern
+
+class LoongArchMachine(QemuSystemTest):
+ KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
+
+ timeout = 120
+
+ ASSET_KERNEL = Asset(
+ ('https://github.com/yangxiaojuan-loongson/qemu-binary/'
+ 'releases/download/2024-11-26/vmlinuz.efi'),
+ '08b88a45f48a5fd92260bae895be4e5175be2397481a6f7821b9f39b2965b79e')
+ ASSET_INITRD = Asset(
+ ('https://github.com/yangxiaojuan-loongson/qemu-binary/'
+ 'releases/download/2024-11-26/ramdisk'),
+ '03d6fb6f8ee64ecac961120a0bdacf741f17b3bee2141f17fa01908c8baf176a')
+ ASSET_BIOS = Asset(
+ ('https://github.com/yangxiaojuan-loongson/qemu-binary/'
+ 'releases/download/2024-11-26/QEMU_EFI.fd'),
+ 'f55fbf5d92e885844631ae9bfa8887f659bbb4f6ef2beea9e9ff8bc0603b6697')
+
+ def wait_for_console_pattern(self, success_message, vm=None):
+ wait_for_console_pattern(self, success_message,
+ failure_message='Kernel panic - not syncing',
+ vm=vm)
+
+ def test_loongarch64_devices(self):
+
+ self.set_machine('virt')
+
+ kernel_path = self.ASSET_KERNEL.fetch()
+ initrd_path = self.ASSET_INITRD.fetch()
+ bios_path = self.ASSET_BIOS.fetch()
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'root=/dev/ram rdinit=/sbin/init console=ttyS0,115200')
+ self.vm.add_args('-nographic',
+ '-smp', '4',
+ '-m', '1024',
+ '-cpu', 'la464',
+ '-kernel', kernel_path,
+ '-initrd', initrd_path,
+ '-bios', bios_path,
+ '-append', kernel_command_line)
+ self.vm.launch()
+ self.wait_for_console_pattern('Run /sbin/init as init process')
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+ 'processor : 3')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_m68k_mcf5208evb.py b/tests/functional/test_m68k_mcf5208evb.py
new file mode 100755
index 0000000..c7d1998
--- /dev/null
+++ b/tests/functional/test_m68k_mcf5208evb.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel on an MCF5208EVB machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+
+
+class Mcf5208EvbTest(LinuxKernelTest):
+
+ ASSET_DAY07 = Asset(
+ 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day07.tar.xz',
+ '753c2f3837126b7c6ba92d0b1e0b156e8a2c5131d2d576bb0b9a763fae73c08a')
+
+ def test_m68k_mcf5208evb(self):
+ self.set_machine('mcf5208evb')
+ self.archive_extract(self.ASSET_DAY07)
+ self.vm.set_console()
+ self.vm.add_args('-kernel',
+ self.scratch_file('day07', 'sanity-clause.elf'))
+ self.vm.launch()
+ self.wait_for_console_pattern('QEMU advent calendar')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_m68k_nextcube.py b/tests/functional/test_m68k_nextcube.py
new file mode 100755
index 0000000..13c72bd
--- /dev/null
+++ b/tests/functional/test_m68k_nextcube.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a VM and run OCR on the framebuffer
+#
+# Copyright (c) 2019 Philippe Mathieu-DaudƩ <f4bug@amsat.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import time
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import skipIfMissingImports, skipIfMissingCommands
+from qemu_test.tesseract import tesseract_ocr
+
+
+class NextCubeMachine(QemuSystemTest):
+
+ timeout = 15
+
+ ASSET_ROM = Asset(('https://sourceforge.net/p/previous/code/1350/tree/'
+ 'trunk/src/Rev_2.5_v66.BIN?format=raw'),
+ '1b753890b67095b73e104c939ddf62eca9e7d0aedde5108e3893b0ed9d8000a4')
+
+ def check_bootrom_framebuffer(self, screenshot_path):
+ rom_path = self.ASSET_ROM.fetch()
+
+ self.vm.add_args('-bios', rom_path)
+ self.vm.launch()
+
+ self.log.info('VM launched, waiting for display')
+ # TODO: wait for the 'displaysurface_create 1120x832' trace-event.
+ time.sleep(2)
+
+ res = self.vm.cmd('human-monitor-command',
+ command_line='screendump %s' % screenshot_path)
+ if 'unknown command' in res:
+ self.skipTest('screendump not available')
+
+ @skipIfMissingImports("PIL")
+ def test_bootrom_framebuffer_size(self):
+ self.set_machine('next-cube')
+ screenshot_path = self.scratch_file("dump.ppm")
+ self.check_bootrom_framebuffer(screenshot_path)
+
+ from PIL import Image
+ width, height = Image.open(screenshot_path).size
+ self.assertEqual(width, 1120)
+ self.assertEqual(height, 832)
+
+ @skipIfMissingCommands('tesseract')
+ def test_bootrom_framebuffer_ocr_with_tesseract(self):
+ self.set_machine('next-cube')
+ screenshot_path = self.scratch_file("dump.ppm")
+ self.check_bootrom_framebuffer(screenshot_path)
+ lines = tesseract_ocr(screenshot_path)
+ text = '\n'.join(lines)
+ self.assertIn('Testing the FPU', text)
+ self.assertIn('System test failed. Error code', text)
+ self.assertIn('Boot command', text)
+ self.assertIn('Next>', text)
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_m68k_q800.py b/tests/functional/test_m68k_q800.py
new file mode 100755
index 0000000..b3e6553
--- /dev/null
+++ b/tests/functional/test_m68k_q800.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+#
+# Functional test for testing the q800 m68k machine
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import LinuxKernelTest, Asset
+
+class Q800MachineTest(LinuxKernelTest):
+
+ ASSET_KERNEL = Asset(
+ ('https://snapshot.debian.org/'
+ 'archive/debian-ports/20191021T083923Z/pool-m68k/main/l/linux/'
+ 'kernel-image-5.3.0-1-m68k-di_5.3.7-1_m68k.udeb'),
+ '949e50d74d4b9bc15d26c06d402717b7a4c0e32ff8100014f5930d8024de7b73')
+
+ def test_m68k_q800(self):
+ self.set_machine('q800')
+
+ kernel_path = self.archive_extract(self.ASSET_KERNEL,
+ member='boot/vmlinux-5.3.0-1-m68k')
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0 vga=off')
+ self.vm.add_args('-kernel', kernel_path,
+ '-append', kernel_command_line,
+ '-audio', 'none')
+ self.vm.launch()
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.wait_for_console_pattern(console_pattern)
+ console_pattern = 'No filesystem could mount root'
+ self.wait_for_console_pattern(console_pattern)
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_m68k_replay.py b/tests/functional/test_m68k_replay.py
new file mode 100755
index 0000000..213d6ae
--- /dev/null
+++ b/tests/functional/test_m68k_replay.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+#
+# Replay test that boots a Linux kernel on an m68k machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from replay_kernel import ReplayKernelBase
+
+
+class M68kReplay(ReplayKernelBase):
+
+ ASSET_Q800 = Asset(
+ ('https://snapshot.debian.org/'
+ 'archive/debian-ports/20191021T083923Z/pool-m68k/main/l/linux/'
+ 'kernel-image-5.3.0-1-m68k-di_5.3.7-1_m68k.udeb'),
+ '949e50d74d4b9bc15d26c06d402717b7a4c0e32ff8100014f5930d8024de7b73')
+
+ def test_q800(self):
+ self.set_machine('q800')
+ kernel_path = self.archive_extract(self.ASSET_Q800,
+ member='boot/vmlinux-5.3.0-1-m68k')
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0 vga=off')
+ console_pattern = 'No filesystem could mount root'
+ self.run_rr(kernel_path, kernel_command_line, console_pattern,
+ args=('-audio', 'none'))
+
+ ASSET_MCF5208 = Asset(
+ 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day07.tar.xz',
+ '753c2f3837126b7c6ba92d0b1e0b156e8a2c5131d2d576bb0b9a763fae73c08a')
+
+ def test_mcf5208evb(self):
+ self.set_machine('mcf5208evb')
+ kernel_path = self.archive_extract(self.ASSET_MCF5208,
+ member='day07/sanity-clause.elf')
+ self.run_rr(kernel_path, self.KERNEL_COMMON_COMMAND_LINE,
+ 'QEMU advent calendar')
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_m68k_tuxrun.py b/tests/functional/test_m68k_tuxrun.py
new file mode 100755
index 0000000..7eacba1
--- /dev/null
+++ b/tests/functional/test_m68k_tuxrun.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2024 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunM68KTest(TuxRunBaselineTest):
+
+ ASSET_M68K_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/m68k/vmlinux',
+ '7754e1d5cec753ccf1dc6894729a7f54c1a4965631ebf56df8e4ce1163ad19d8')
+ ASSET_M68K_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/m68k/rootfs.ext4.zst',
+ '557962ffff265607912e82232cf21adbe0e4e5a88e1e1d411ce848c37f0213e9')
+
+ def test_m68k(self):
+ self.set_machine('virt')
+ self.cpu="m68040"
+ self.common_tuxrun(kernel_asset=self.ASSET_M68K_KERNEL,
+ rootfs_asset=self.ASSET_M68K_ROOTFS,
+ drive="virtio-blk-device")
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_mem_addr_space.py b/tests/functional/test_mem_addr_space.py
new file mode 100755
index 0000000..61b4a19
--- /dev/null
+++ b/tests/functional/test_mem_addr_space.py
@@ -0,0 +1,349 @@
+#!/usr/bin/env python3
+#
+# Check for crash when using memory beyond the available guest processor
+# address space.
+#
+# Copyright (c) 2023 Red Hat, Inc.
+#
+# Author:
+# Ani Sinha <anisinha@redhat.com>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import QemuSystemTest
+import time
+
+class MemAddrCheck(QemuSystemTest):
+ # after launch, in order to generate the logs from QEMU we need to
+ # wait for some time. Launching and then immediately shutting down
+ # the VM generates empty logs. A delay of 1 second is added for
+ # this reason.
+ DELAY_Q35_BOOT_SEQUENCE = 1
+
+ # This helper can go away when the 32-bit host deprecation
+ # turns into full & final removal of support.
+ def ensure_64bit_binary(self):
+ with open(self.qemu_bin, "rb") as fh:
+ ident = fh.read(4)
+
+ # "\x7fELF"
+ if ident != bytes([0x7f, 0x45, 0x4C, 0x46]):
+ # Non-ELF file implies macOS or Windows which
+ # we already assume to be 64-bit only
+ return
+
+ # bits == 1 -> 32-bit; bits == 2 -> 64-bit
+ bits = int.from_bytes(fh.read(1), byteorder='little')
+ if bits != 2:
+ # 32-bit ELF builds won't be able to address sufficient
+ # RAM to run the tests
+ self.skipTest("64-bit build host is required")
+
+ # first, lets test some 32-bit processors.
+ # for all 32-bit cases, pci64_hole_size is 0.
+ def test_phybits_low_pse36(self):
+ """
+ With pse36 feature ON, a processor has 36 bits of addressing. So it can
+ access up to a maximum of 64GiB of memory. Memory hotplug region begins
+ at 4 GiB boundary when "above_4g_mem_size" is 0 (this would be true when
+ we have 0.5 GiB of VM memory, see pc_q35_init()). This means total
+ hotpluggable memory size is 60 GiB. Per slot, we reserve 1 GiB of memory
+ for dimm alignment for all machines. That leaves total hotpluggable
+ actual memory size of 59 GiB. If the VM is started with 0.5 GiB of
+ memory, maxmem should be set to a maximum value of 59.5 GiB to ensure
+ that the processor can address all memory directly.
+ Note that 64-bit pci hole size is 0 in this case. If maxmem is set to
+ 59.6G, QEMU should fail to start with a message "phy-bits are too low".
+ If maxmem is set to 59.5G with all other QEMU parameters identical, QEMU
+ should start fine.
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('q35')
+ self.vm.add_args('-S', '-m', '512,slots=1,maxmem=59.6G',
+ '-cpu', 'pentium,pse36=on', '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_low_pae(self):
+ """
+ With pae feature ON, a processor has 36 bits of addressing. So it can
+ access up to a maximum of 64GiB of memory. Rest is the same as the case
+ with pse36 above.
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('q35')
+ self.vm.add_args('-S', '-m', '512,slots=1,maxmem=59.6G',
+ '-cpu', 'pentium,pae=on', '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_pentium_pse36(self):
+ """
+ Setting maxmem to 59.5G and making sure that QEMU can start with the
+ same options as the failing case above with pse36 cpu feature.
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('q35')
+ self.vm.add_args('-m', '512,slots=1,maxmem=59.5G',
+ '-cpu', 'pentium,pse36=on', '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_pentium_pae(self):
+ """
+ Test is same as above but now with pae cpu feature turned on.
+ Setting maxmem to 59.5G and making sure that QEMU can start fine
+ with the same options as the case above.
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('q35')
+ self.vm.add_args('-m', '512,slots=1,maxmem=59.5G',
+ '-cpu', 'pentium,pae=on', '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_pentium2(self):
+ """
+ Pentium2 has 36 bits of addressing, so its same as pentium
+ with pse36 ON.
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('q35')
+ self.vm.add_args('-m', '512,slots=1,maxmem=59.5G',
+ '-cpu', 'pentium2', '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_low_nonpse36(self):
+ """
+ Pentium processor has 32 bits of addressing without pse36 or pae
+ so it can access physical address up to 4 GiB. Setting maxmem to
+ 4 GiB should make QEMU fail to start with "phys-bits too low"
+ message because the region for memory hotplug is always placed
+ above 4 GiB due to the PCI hole and simplicity.
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('q35')
+ self.vm.add_args('-S', '-m', '512,slots=1,maxmem=4G',
+ '-cpu', 'pentium', '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+ # now lets test some 64-bit CPU cases.
+ def test_phybits_low_tcg_q35_70_amd(self):
+ """
+ For q35 7.1 machines and above, there is a HT window that starts at
+ 1024 GiB and ends at 1 TiB - 1. If the max GPA falls in this range,
+ "above_4G" memory is adjusted to start at 1 TiB boundary for AMD cpus
+ in the default case. Lets test without that case for machines 7.0.
+ For q35-7.0 machines, "above 4G" memory starts are 4G.
+ pci64_hole size is 32 GiB. Since TCG_PHYS_ADDR_BITS is defined to
+ be 40, TCG emulated CPUs have maximum of 1 TiB (1024 GiB) of
+ directly addressable memory.
+ Hence, maxmem value at most can be
+ 1024 GiB - 4 GiB - 1 GiB per slot for alignment - 32 GiB + 0.5 GiB
+ which is equal to 987.5 GiB. Setting the value to 988 GiB should
+ make QEMU fail with the error message.
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('pc-q35-7.0')
+ self.vm.add_args('-S', '-m', '512,slots=1,maxmem=988G',
+ '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_low_tcg_q35_71_amd(self):
+ """
+ AMD_HT_START is defined to be at 1012 GiB. So for q35 machines
+ version > 7.0 and AMD cpus, instead of 1024 GiB limit for 40 bit
+ processor address space, it has to be 1012 GiB , that is 12 GiB
+ less than the case above in order to accommodate HT hole.
+ Make sure QEMU fails when maxmem size is 976 GiB (12 GiB less
+ than 988 GiB).
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('pc-q35-7.1')
+ self.vm.add_args('-S', '-m', '512,slots=1,maxmem=976G',
+ '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_tcg_q35_70_amd(self):
+ """
+ Same as q35-7.0 AMD case except that here we check that QEMU can
+ successfully start when maxmem is < 988G.
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('pc-q35-7.0')
+ self.vm.add_args('-S', '-m', '512,slots=1,maxmem=987.5G',
+ '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_tcg_q35_71_amd(self):
+ """
+ Same as q35-7.1 AMD case except that here we check that QEMU can
+ successfully start when maxmem is < 976G.
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('pc-q35-7.1')
+ self.vm.add_args('-S', '-m', '512,slots=1,maxmem=975.5G',
+ '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_tcg_q35_71_intel(self):
+ """
+ Same parameters as test_phybits_low_tcg_q35_71_amd() but use
+ Intel cpu instead. QEMU should start fine in this case as
+ "above_4G" memory starts at 4G.
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('pc-q35-7.1')
+ self.vm.add_args('-S', '-cpu', 'Skylake-Server',
+ '-m', '512,slots=1,maxmem=976G',
+ '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_low_tcg_q35_71_amd_41bits(self):
+ """
+ AMD processor with 41 bits. Max cpu hw address = 2 TiB.
+ By setting maxram above 1012 GiB - 32 GiB - 4 GiB = 976 GiB, we can
+ force "above_4G" memory to start at 1 TiB for q35-7.1 machines
+ (max GPA will be above AMD_HT_START which is defined as 1012 GiB).
+
+ With pci_64_hole size at 32 GiB, in this case, maxmem should be 991.5
+ GiB with 1 GiB per slot for alignment and 0.5 GiB as non-hotplug
+ memory for the VM (1024 - 32 - 1 + 0.5). With 992 GiB, QEMU should
+ fail to start.
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('pc-q35-7.1')
+ self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41',
+ '-m', '512,slots=1,maxmem=992G',
+ '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_tcg_q35_71_amd_41bits(self):
+ """
+ AMD processor with 41 bits. Max cpu hw address = 2 TiB.
+ Same as above but by setting maxram between 976 GiB and 992 Gib,
+ QEMU should start fine.
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('pc-q35-7.1')
+ self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41',
+ '-m', '512,slots=1,maxmem=990G',
+ '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_low_tcg_q35_intel_cxl(self):
+ """
+ cxl memory window starts after memory device range. Here, we use 1 GiB
+ of cxl window memory. 4G_mem end aligns at 4G. pci64_hole is 32 GiB and
+ starts after the cxl memory window.
+ So maxmem here should be at most 986 GiB considering all memory boundary
+ alignment constraints with 40 bits (1 TiB) of processor physical bits.
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('q35')
+ self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40',
+ '-m', '512,slots=1,maxmem=987G',
+ '-display', 'none',
+ '-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1',
+ '-M', 'cxl=on,cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=1G')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_tcg_q35_intel_cxl(self):
+ """
+ Same as above but here we do not reserve any cxl memory window. Hence,
+ with the exact same parameters as above, QEMU should start fine even
+ with cxl enabled.
+ """
+ self.ensure_64bit_binary()
+ self.set_machine('q35')
+ self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40',
+ '-machine', 'cxl=on',
+ '-m', '512,slots=1,maxmem=987G',
+ '-display', 'none',
+ '-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_memlock.py b/tests/functional/test_memlock.py
new file mode 100755
index 0000000..2b515ff
--- /dev/null
+++ b/tests/functional/test_memlock.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python3
+#
+# Functional test that check overcommit memlock options
+#
+# Copyright (c) Yandex Technologies LLC, 2025
+#
+# Author:
+# Alexandr Moshkov <dtalexundeer@yandex-team.ru>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import re
+
+from typing import Dict
+
+from qemu_test import QemuSystemTest
+from qemu_test import skipLockedMemoryTest
+
+
+STATUS_VALUE_PATTERN = re.compile(r'^(\w+):\s+(\d+) kB', re.MULTILINE)
+
+
+@skipLockedMemoryTest(2_097_152) # 2GB
+class MemlockTest(QemuSystemTest):
+ """
+ Runs a guest with memlock options.
+ Then verify, that this options is working correctly
+ by checking the status file of the QEMU process.
+ """
+
+ def common_vm_setup_with_memlock(self, memlock):
+ self.vm.add_args('-overcommit', f'mem-lock={memlock}')
+ self.vm.launch()
+
+ def test_memlock_off(self):
+ self.common_vm_setup_with_memlock('off')
+
+ status = self.get_process_status_values(self.vm.get_pid())
+
+ self.assertTrue(status['VmLck'] == 0)
+
+ def test_memlock_on(self):
+ self.common_vm_setup_with_memlock('on')
+
+ status = self.get_process_status_values(self.vm.get_pid())
+
+ # VmLck > 0 kB and almost all memory is resident
+ self.assertTrue(status['VmLck'] > 0)
+ self.assertTrue(status['VmRSS'] >= status['VmSize'] * 0.70)
+
+ def test_memlock_onfault(self):
+ self.common_vm_setup_with_memlock('on-fault')
+
+ status = self.get_process_status_values(self.vm.get_pid())
+
+ # VmLck > 0 kB and only few memory is resident
+ self.assertTrue(status['VmLck'] > 0)
+ self.assertTrue(status['VmRSS'] <= status['VmSize'] * 0.30)
+
+ def get_process_status_values(self, pid: int) -> Dict[str, int]:
+ result = {}
+ raw_status = self._get_raw_process_status(pid)
+
+ for line in raw_status.split('\n'):
+ if m := STATUS_VALUE_PATTERN.match(line):
+ result[m.group(1)] = int(m.group(2))
+
+ return result
+
+ def _get_raw_process_status(self, pid: int) -> str:
+ try:
+ with open(f'/proc/{pid}/status', 'r') as f:
+ return f.read()
+ except FileNotFoundError:
+ self.skipTest("Can't open status file of the process")
+
+
+if __name__ == '__main__':
+ MemlockTest.main()
diff --git a/tests/functional/test_microblaze_replay.py b/tests/functional/test_microblaze_replay.py
new file mode 100755
index 0000000..7484c41
--- /dev/null
+++ b/tests/functional/test_microblaze_replay.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python3
+#
+# Replay test that boots a Linux kernel on an microblaze machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from replay_kernel import ReplayKernelBase
+
+
+class MicroblazeReplay(ReplayKernelBase):
+
+ ASSET_DAY17 = Asset(
+ ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/'
+ 'day17.tar.xz'),
+ '3ba7439dfbea7af4876662c97f8e1f0cdad9231fc166e4861d17042489270057')
+
+ def test_microblaze_s3adsp1800(self):
+ self.set_machine('petalogix-s3adsp1800')
+ kernel_path = self.archive_extract(self.ASSET_DAY17,
+ member='day17/ballerina.bin')
+ self.run_rr(kernel_path, self.REPLAY_KERNEL_COMMAND_LINE,
+ 'QEMU advent calendar')
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_microblaze_s3adsp1800.py b/tests/functional/test_microblaze_s3adsp1800.py
new file mode 100755
index 0000000..f093b16
--- /dev/null
+++ b/tests/functional/test_microblaze_s3adsp1800.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a microblaze Linux kernel and checks the console
+#
+# Copyright (c) 2018, 2021 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+
+class MicroblazeMachine(QemuSystemTest):
+
+ timeout = 90
+
+ ASSET_IMAGE_BE = Asset(
+ ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/'
+ 'day17.tar.xz'),
+ '3ba7439dfbea7af4876662c97f8e1f0cdad9231fc166e4861d17042489270057')
+
+ ASSET_IMAGE_LE = Asset(
+ ('http://www.qemu-advent-calendar.org/2023/download/day13.tar.gz'),
+ 'b9b3d43c5dd79db88ada495cc6e0d1f591153fe41355e925d791fbf44de50c22')
+
+ def do_ballerina_be_test(self, force_endianness=False):
+ self.set_machine('petalogix-s3adsp1800')
+ self.archive_extract(self.ASSET_IMAGE_BE)
+ self.vm.set_console()
+ self.vm.add_args('-kernel',
+ self.scratch_file('day17', 'ballerina.bin'))
+ if force_endianness:
+ self.vm.add_args('-M', 'endianness=big')
+ self.vm.launch()
+ wait_for_console_pattern(self, 'This architecture does not have '
+ 'kernel memory protection')
+ # Note:
+ # The kernel sometimes gets stuck after the "This architecture ..."
+ # message, that's why we don't test for a later string here. This
+ # needs some investigation by a microblaze wizard one day...
+
+ def do_xmaton_le_test(self, force_endianness=False):
+ self.require_netdev('user')
+ self.set_machine('petalogix-s3adsp1800')
+ self.archive_extract(self.ASSET_IMAGE_LE)
+ self.vm.set_console()
+ self.vm.add_args('-kernel', self.scratch_file('day13', 'xmaton.bin'))
+ if force_endianness:
+ self.vm.add_args('-M', 'endianness=little')
+ tftproot = self.scratch_file('day13')
+ self.vm.add_args('-nic', f'user,tftp={tftproot}')
+ self.vm.launch()
+ wait_for_console_pattern(self, 'QEMU Advent Calendar 2023')
+ wait_for_console_pattern(self, 'buildroot login:')
+ exec_command_and_wait_for_pattern(self, 'root', '#')
+ exec_command_and_wait_for_pattern(self,
+ 'tftp -g -r xmaton.png 10.0.2.2 ; md5sum xmaton.png',
+ '821cd3cab8efd16ad6ee5acc3642a8ea')
+
+
+class MicroblazeBigEndianMachine(MicroblazeMachine):
+
+ ASSET_IMAGE_BE = MicroblazeMachine.ASSET_IMAGE_BE
+ ASSET_IMAGE_LE = MicroblazeMachine.ASSET_IMAGE_LE
+
+ def test_microblaze_s3adsp1800_legacy_be(self):
+ self.do_ballerina_be_test()
+
+ def test_microblaze_s3adsp1800_legacy_le(self):
+ self.do_xmaton_le_test(force_endianness=True)
+
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_microblazeel_s3adsp1800.py b/tests/functional/test_microblazeel_s3adsp1800.py
new file mode 100755
index 0000000..915902d
--- /dev/null
+++ b/tests/functional/test_microblazeel_s3adsp1800.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a microblaze Linux kernel and checks the console
+#
+# Copyright (c) 2018, 2021 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from test_microblaze_s3adsp1800 import MicroblazeMachine
+
+
+class MicroblazeLittleEndianMachine(MicroblazeMachine):
+
+ ASSET_IMAGE_LE = MicroblazeMachine.ASSET_IMAGE_LE
+ ASSET_IMAGE_BE = MicroblazeMachine.ASSET_IMAGE_BE
+
+ def test_microblaze_s3adsp1800_legacy_le(self):
+ self.do_xmaton_le_test()
+
+ def test_microblaze_s3adsp1800_legacy_be(self):
+ self.do_ballerina_be_test(force_endianness=True)
+
+
+if __name__ == '__main__':
+ MicroblazeMachine.main()
diff --git a/tests/functional/test_migration.py b/tests/functional/test_migration.py
new file mode 100755
index 0000000..c4393c3
--- /dev/null
+++ b/tests/functional/test_migration.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python3
+#
+# Migration test
+#
+# Copyright (c) 2019 Red Hat, Inc.
+#
+# Authors:
+# Cleber Rosa <crosa@redhat.com>
+# Caio Carrara <ccarrara@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import tempfile
+import time
+
+from qemu_test import QemuSystemTest, skipIfMissingCommands
+from qemu_test.ports import Ports
+
+
+class MigrationTest(QemuSystemTest):
+
+ timeout = 10
+
+ @staticmethod
+ def migration_finished(vm):
+ return vm.cmd('query-migrate')['status'] in ('completed', 'failed')
+
+ def assert_migration(self, src_vm, dst_vm):
+
+ end = time.monotonic() + self.timeout
+ while time.monotonic() < end and not self.migration_finished(src_vm):
+ time.sleep(0.1)
+
+ end = time.monotonic() + self.timeout
+ while time.monotonic() < end and not self.migration_finished(dst_vm):
+ time.sleep(0.1)
+
+ self.assertEqual(src_vm.cmd('query-migrate')['status'], 'completed')
+ self.assertEqual(dst_vm.cmd('query-migrate')['status'], 'completed')
+ self.assertEqual(dst_vm.cmd('query-status')['status'], 'running')
+ self.assertEqual(src_vm.cmd('query-status')['status'],'postmigrate')
+
+ def select_machine(self):
+ target_machine = {
+ 'aarch64': 'quanta-gsj',
+ 'alpha': 'clipper',
+ 'arm': 'npcm750-evb',
+ 'i386': 'isapc',
+ 'ppc': 'sam460ex',
+ 'ppc64': 'mac99',
+ 'riscv32': 'spike',
+ 'riscv64': 'virt',
+ 'sparc': 'SS-4',
+ 'sparc64': 'sun4u',
+ 'x86_64': 'microvm',
+ }
+ self.set_machine(target_machine[self.arch])
+
+ def do_migrate(self, dest_uri, src_uri=None):
+ self.select_machine()
+ dest_vm = self.get_vm('-incoming', dest_uri, name="dest-qemu")
+ dest_vm.add_args('-nodefaults')
+ dest_vm.launch()
+ if src_uri is None:
+ src_uri = dest_uri
+ source_vm = self.get_vm(name="source-qemu")
+ source_vm.add_args('-nodefaults')
+ source_vm.launch()
+ source_vm.qmp('migrate', uri=src_uri)
+ self.assert_migration(source_vm, dest_vm)
+
+ def _get_free_port(self, ports):
+ port = ports.find_free_port()
+ if port is None:
+ self.skipTest('Failed to find a free port')
+ return port
+
+ def test_migration_with_tcp_localhost(self):
+ with Ports() as ports:
+ dest_uri = 'tcp:localhost:%u' % self._get_free_port(ports)
+ self.do_migrate(dest_uri)
+
+ def test_migration_with_unix(self):
+ with tempfile.TemporaryDirectory(prefix='socket_') as socket_path:
+ dest_uri = 'unix:%s/qemu-test.sock' % socket_path
+ self.do_migrate(dest_uri)
+
+ @skipIfMissingCommands('ncat')
+ def test_migration_with_exec(self):
+ with Ports() as ports:
+ free_port = self._get_free_port(ports)
+ dest_uri = 'exec:ncat -l localhost %u' % free_port
+ src_uri = 'exec:ncat localhost %u' % free_port
+ self.do_migrate(dest_uri, src_uri)
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_mips64_malta.py b/tests/functional/test_mips64_malta.py
new file mode 100755
index 0000000..53c3e0c
--- /dev/null
+++ b/tests/functional/test_mips64_malta.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+#
+# Functional tests for the big-endian 64-bit MIPS Malta board
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+from test_mips_malta import mips_check_wheezy
+
+
+class MaltaMachineConsole(LinuxKernelTest):
+
+ ASSET_WHEEZY_KERNEL = Asset(
+ ('https://people.debian.org/~aurel32/qemu/mips/'
+ 'vmlinux-3.2.0-4-5kc-malta'),
+ '3e4ec154db080b3f1839f04dde83120654a33e5e1716863de576c47cb94f68f6')
+
+ ASSET_WHEEZY_DISK = Asset(
+ ('https://people.debian.org/~aurel32/qemu/mips/'
+ 'debian_wheezy_mips_standard.qcow2'),
+ 'de03599285b8382ad309309a6c4869f6c6c42a5cfc983342bab9ec0dfa7849a2')
+
+ def test_wheezy(self):
+ kernel_path = self.ASSET_WHEEZY_KERNEL.fetch()
+ image_path = self.ASSET_WHEEZY_DISK.fetch()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE
+ + 'console=ttyS0 root=/dev/sda1')
+ mips_check_wheezy(self,
+ kernel_path, image_path, kernel_command_line, cpuinfo='MIPS 20Kc',
+ dl_file='/boot/initrd.img-3.2.0-4-5kc-malta',
+ hsum='d98b953bb4a41c0fc0fd8d19bbc691c08989ac52568c1d3054d92dfd890d3f06')
+
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_mips64_tuxrun.py b/tests/functional/test_mips64_tuxrun.py
new file mode 100755
index 0000000..0e4c659
--- /dev/null
+++ b/tests/functional/test_mips64_tuxrun.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunMips64Test(TuxRunBaselineTest):
+
+ ASSET_MIPS64_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/mips64/vmlinux',
+ 'fe2882d216898ba2c56b49ba59f46ad392f36871f7fe325373cd926848b9dbdc')
+ ASSET_MIPS64_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/mips64/rootfs.ext4.zst',
+ 'b8c98400216b6d4fb3b3ff05e9929aa015948b596cf0b82234813c84a4f7f4d5')
+
+ def test_mips64(self):
+ self.set_machine('malta')
+ self.root="sda"
+ self.wait_for_shutdown=False
+ self.common_tuxrun(kernel_asset=self.ASSET_MIPS64_KERNEL,
+ rootfs_asset=self.ASSET_MIPS64_ROOTFS,
+ drive="driver=ide-hd,bus=ide.0,unit=0")
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_mips64el_fuloong2e.py b/tests/functional/test_mips64el_fuloong2e.py
new file mode 100755
index 0000000..35e500b
--- /dev/null
+++ b/tests/functional/test_mips64el_fuloong2e.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python3
+#
+# Functional tests for the Lemote Fuloong-2E machine.
+#
+# Copyright (c) 2019 Philippe Mathieu-DaudƩ <f4bug@amsat.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+import subprocess
+
+from qemu_test import LinuxKernelTest, Asset
+from qemu_test import wait_for_console_pattern, skipUntrustedTest
+from unittest import skipUnless
+
+class MipsFuloong2e(LinuxKernelTest):
+
+ timeout = 60
+
+ ASSET_KERNEL = Asset(
+ ('http://archive.debian.org/debian/pool/main/l/linux/'
+ 'linux-image-3.16.0-6-loongson-2e_3.16.56-1+deb8u1_mipsel.deb'),
+ '2a70f15b397f4ced632b0c15cb22660394190644146d804d60a4796eefbe1f50')
+
+ def test_linux_kernel_3_16(self):
+ kernel_path = self.archive_extract(
+ self.ASSET_KERNEL,
+ member='boot/vmlinux-3.16.0-6-loongson-2e')
+
+ self.set_machine('fuloong2e')
+ self.vm.set_console()
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
+ self.vm.add_args('-kernel', kernel_path,
+ '-append', kernel_command_line)
+ self.vm.launch()
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.wait_for_console_pattern(console_pattern)
+
+ @skipUntrustedTest()
+ @skipUnless(os.getenv('RESCUE_YL_PATH'), 'RESCUE_YL_PATH not available')
+ def test_linux_kernel_2_6_27_isa_serial(self):
+ # Recovery system for the Yeeloong laptop
+ # (enough to test the fuloong2e southbridge, accessing its ISA bus)
+ # http://dev.lemote.com/files/resource/download/rescue/rescue-yl
+ sha = 'ab588d3316777c62cc81baa20ac92e98b01955c244dff3794b711bc34e26e51d'
+ kernel_path = os.getenv('RESCUE_YL_PATH')
+ output = subprocess.check_output(['sha256sum', kernel_path])
+ checksum = output.split()[0]
+ assert checksum.decode("utf-8") == sha
+
+ self.set_machine('fuloong2e')
+ self.vm.set_console()
+ self.vm.add_args('-kernel', kernel_path)
+ self.vm.launch()
+ wait_for_console_pattern(self, 'Linux version 2.6.27.7lemote')
+ cpu_revision = 'CPU revision is: 00006302 (ICT Loongson-2)'
+ wait_for_console_pattern(self, cpu_revision)
+
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_mips64el_loongson3v.py b/tests/functional/test_mips64el_loongson3v.py
new file mode 100755
index 0000000..f85371e
--- /dev/null
+++ b/tests/functional/test_mips64el_loongson3v.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+#
+# Functional tests for the Generic Loongson-3 Platform.
+#
+# Copyright (c) 2021 Jiaxun Yang <jiaxun.yang@flygoat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern, skipUntrustedTest
+
+
+class MipsLoongson3v(QemuSystemTest):
+ timeout = 60
+
+ ASSET_PMON = Asset(
+ ('https://github.com/loongson-community/pmon/'
+ 'releases/download/20210112/pmon-3avirt.bin'),
+ 'fcdf6bb2cb7885a4a62f31fcb0d5e368bac7b6cea28f40c6dfa678af22fea20a')
+
+ @skipUntrustedTest()
+ def test_pmon_serial_console(self):
+ self.set_machine('loongson3-virt')
+
+ pmon_path = self.ASSET_PMON.fetch()
+
+ self.vm.set_console()
+ self.vm.add_args('-bios', pmon_path)
+ self.vm.launch()
+ wait_for_console_pattern(self, 'CPU GODSON3 BogoMIPS:')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_mips64el_malta.py b/tests/functional/test_mips64el_malta.py
new file mode 100755
index 0000000..3cc79b7
--- /dev/null
+++ b/tests/functional/test_mips64el_malta.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python3
+#
+# Functional tests for the little-endian 64-bit MIPS Malta board
+#
+# Copyright (c) Philippe Mathieu-DaudƩ <f4bug@amsat.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+import logging
+
+from qemu_test import LinuxKernelTest, Asset
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import skipIfMissingImports, skipFlakyTest, skipUntrustedTest
+
+from test_mips_malta import mips_check_wheezy
+
+
+class MaltaMachineConsole(LinuxKernelTest):
+
+ ASSET_KERNEL_2_63_2 = Asset(
+ ('http://snapshot.debian.org/archive/debian/'
+ '20130217T032700Z/pool/main/l/linux-2.6/'
+ 'linux-image-2.6.32-5-5kc-malta_2.6.32-48_mipsel.deb'),
+ '35eb476f03be589824b0310358f1c447d85e645b88cbcd2ac02b97ef560f9f8d')
+
+ def test_mips64el_malta(self):
+ """
+ This test requires the ar tool to extract "data.tar.gz" from
+ the Debian package.
+
+ The kernel can be rebuilt using this Debian kernel source [1] and
+ following the instructions on [2].
+
+ [1] http://snapshot.debian.org/package/linux-2.6/2.6.32-48/
+ #linux-source-2.6.32_2.6.32-48
+ [2] https://kernel-team.pages.debian.net/kernel-handbook/
+ ch-common-tasks.html#s-common-official
+ """
+ kernel_path = self.archive_extract(
+ self.ASSET_KERNEL_2_63_2,
+ member='boot/vmlinux-2.6.32-5-5kc-malta')
+
+ self.set_machine('malta')
+ self.vm.set_console()
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
+ self.vm.add_args('-kernel', kernel_path,
+ '-append', kernel_command_line)
+ self.vm.launch()
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.wait_for_console_pattern(console_pattern)
+
+ ASSET_KERNEL_3_19_3 = Asset(
+ ('https://github.com/philmd/qemu-testing-blob/'
+ 'raw/9ad2df38/mips/malta/mips64el/'
+ 'vmlinux-3.19.3.mtoman.20150408'),
+ '8d3beb003bc66051ead98e7172139017fcf9ce2172576541c57e86418dfa5ab8')
+
+ ASSET_CPIO_R1 = Asset(
+ ('https://github.com/groeck/linux-build-test/'
+ 'raw/8584a59e/rootfs/mipsel64/'
+ 'rootfs.mipsel64r1.cpio.gz'),
+ '75ba10cd35fb44e32948eeb26974f061b703c81c4ba2fab1ebcacf1d1bec3b61')
+
+ @skipUntrustedTest()
+ def test_mips64el_malta_5KEc_cpio(self):
+ kernel_path = self.ASSET_KERNEL_3_19_3.fetch()
+ initrd_path = self.uncompress(self.ASSET_CPIO_R1)
+
+ self.set_machine('malta')
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE
+ + 'console=ttyS0 console=tty '
+ + 'rdinit=/sbin/init noreboot')
+ self.vm.add_args('-cpu', '5KEc',
+ '-kernel', kernel_path,
+ '-initrd', initrd_path,
+ '-append', kernel_command_line,
+ '-no-reboot')
+ self.vm.launch()
+ self.wait_for_console_pattern('Boot successful.')
+
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+ 'MIPS 5KE')
+ exec_command_and_wait_for_pattern(self, 'uname -a',
+ '3.19.3.mtoman.20150408')
+ exec_command_and_wait_for_pattern(self, 'reboot',
+ 'reboot: Restarting system')
+ # Wait for VM to shut down gracefully
+ self.vm.wait()
+
+ ASSET_WHEEZY_KERNEL = Asset(
+ ('https://people.debian.org/~aurel32/qemu/mipsel/'
+ 'vmlinux-3.2.0-4-5kc-malta'),
+ '5e8b725244c59745bb8b64f5d8f49f25fecfa549f3395fb6d19a3b9e5065b85b')
+
+ ASSET_WHEEZY_DISK = Asset(
+ ('https://people.debian.org/~aurel32/qemu/mipsel/'
+ 'debian_wheezy_mipsel_standard.qcow2'),
+ '454f09ae39f7e6461c84727b927100d2c7813841f2a0a5dce328114887ecf914')
+
+ def test_wheezy(self):
+ kernel_path = self.ASSET_WHEEZY_KERNEL.fetch()
+ image_path = self.ASSET_WHEEZY_DISK.fetch()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE
+ + 'console=ttyS0 root=/dev/sda1')
+ mips_check_wheezy(self,
+ kernel_path, image_path, kernel_command_line, cpuinfo='MIPS 20Kc',
+ dl_file='/boot/initrd.img-3.2.0-4-5kc-malta',
+ hsum='7579f8b56c1187c7c04d0dc3c0c56c7a6314c5ddd3a9bf8803ecc7cf8a3be9f8')
+
+
+@skipIfMissingImports('numpy', 'cv2')
+class MaltaMachineFramebuffer(LinuxKernelTest):
+
+ timeout = 30
+
+ ASSET_KERNEL_4_7_0 = Asset(
+ ('https://github.com/philmd/qemu-testing-blob/raw/a5966ca4b5/'
+ 'mips/malta/mips64el/vmlinux-4.7.0-rc1.I6400.gz'),
+ '1f64efc59968a3c328672e6b10213fe574bb2308d9d2ed44e75e40be59e9fbc2')
+
+ ASSET_TUXLOGO = Asset(
+ ('https://github.com/torvalds/linux/raw/v2.6.12/'
+ 'drivers/video/logo/logo_linux_vga16.ppm'),
+ 'b762f0d91ec018887ad1b334543c2fdf9be9fdfc87672b409211efaa3ea0ef79')
+
+ def do_test_i6400_framebuffer_logo(self, cpu_cores_count):
+ """
+ Boot Linux kernel and check Tux logo is displayed on the framebuffer.
+ """
+
+ import numpy as np
+ import cv2
+
+ screendump_path = self.scratch_file('screendump.pbm')
+
+ kernel_path = self.uncompress(self.ASSET_KERNEL_4_7_0)
+
+ tuxlogo_path = self.ASSET_TUXLOGO.fetch()
+
+ self.set_machine('malta')
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'clocksource=GIC console=tty0 console=ttyS0')
+ self.vm.add_args('-kernel', kernel_path,
+ '-cpu', 'I6400',
+ '-smp', '%u' % cpu_cores_count,
+ '-vga', 'std',
+ '-append', kernel_command_line)
+ self.vm.launch()
+ framebuffer_ready = 'Console: switching to colour frame buffer device'
+ self.wait_for_console_pattern(framebuffer_ready)
+ self.vm.cmd('human-monitor-command', command_line='stop')
+ res = self.vm.cmd('human-monitor-command',
+ command_line='screendump %s' % screendump_path)
+ if 'unknown command' in res:
+ self.skipTest('screendump not available')
+ logger = logging.getLogger('framebuffer')
+
+ match_threshold = 0.95
+ screendump_bgr = cv2.imread(screendump_path, cv2.IMREAD_COLOR)
+ tuxlogo_bgr = cv2.imread(tuxlogo_path, cv2.IMREAD_COLOR)
+ result = cv2.matchTemplate(screendump_bgr, tuxlogo_bgr,
+ cv2.TM_CCOEFF_NORMED)
+ loc = np.where(result >= match_threshold)
+ tuxlogo_count = 0
+ h, w = tuxlogo_bgr.shape[:2]
+ debug_png = os.getenv('QEMU_TEST_CV2_SCREENDUMP_PNG_PATH')
+ for tuxlogo_count, pt in enumerate(zip(*loc[::-1]), start=1):
+ logger.debug('found Tux at position (x, y) = %s', pt)
+ cv2.rectangle(screendump_bgr, pt,
+ (pt[0] + w, pt[1] + h), (0, 0, 255), 2)
+ if debug_png:
+ cv2.imwrite(debug_png, screendump_bgr)
+ self.assertGreaterEqual(tuxlogo_count, cpu_cores_count)
+
+ def test_mips_malta_i6400_framebuffer_logo_1core(self):
+ self.do_test_i6400_framebuffer_logo(1)
+
+ # XXX file tracking bug
+ @skipFlakyTest(bug_url=None)
+ def test_mips_malta_i6400_framebuffer_logo_7cores(self):
+ self.do_test_i6400_framebuffer_logo(7)
+
+ @skipFlakyTest(bug_url=None)
+ def test_mips_malta_i6400_framebuffer_logo_8cores(self):
+ self.do_test_i6400_framebuffer_logo(8)
+
+
+from test_mipsel_malta import MaltaMachineYAMON
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_mips64el_replay.py b/tests/functional/test_mips64el_replay.py
new file mode 100755
index 0000000..26a6ccf
--- /dev/null
+++ b/tests/functional/test_mips64el_replay.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python3
+#
+# Replay tests for the little-endian 64-bit MIPS Malta board
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset, skipUntrustedTest
+from replay_kernel import ReplayKernelBase
+
+
+class Mips64elReplay(ReplayKernelBase):
+
+ ASSET_KERNEL_2_63_2 = Asset(
+ ('http://snapshot.debian.org/archive/debian/'
+ '20130217T032700Z/pool/main/l/linux-2.6/'
+ 'linux-image-2.6.32-5-5kc-malta_2.6.32-48_mipsel.deb'),
+ '35eb476f03be589824b0310358f1c447d85e645b88cbcd2ac02b97ef560f9f8d')
+
+ def test_replay_mips64el_malta(self):
+ self.set_machine('malta')
+ kernel_path = self.archive_extract(self.ASSET_KERNEL_2_63_2,
+ member='boot/vmlinux-2.6.32-5-5kc-malta')
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5)
+
+
+ ASSET_KERNEL_3_19_3 = Asset(
+ ('https://github.com/philmd/qemu-testing-blob/'
+ 'raw/9ad2df38/mips/malta/mips64el/'
+ 'vmlinux-3.19.3.mtoman.20150408'),
+ '8d3beb003bc66051ead98e7172139017fcf9ce2172576541c57e86418dfa5ab8')
+
+ ASSET_CPIO_R1 = Asset(
+ ('https://github.com/groeck/linux-build-test/'
+ 'raw/8584a59e/rootfs/mipsel64/'
+ 'rootfs.mipsel64r1.cpio.gz'),
+ '75ba10cd35fb44e32948eeb26974f061b703c81c4ba2fab1ebcacf1d1bec3b61')
+
+ @skipUntrustedTest()
+ def test_replay_mips64el_malta_5KEc_cpio(self):
+ self.set_machine('malta')
+ self.cpu = '5KEc'
+ kernel_path = self.ASSET_KERNEL_3_19_3.fetch()
+ initrd_path = self.uncompress(self.ASSET_CPIO_R1)
+
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0 console=tty '
+ 'rdinit=/sbin/init noreboot')
+ console_pattern = 'Boot successful.'
+ self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5,
+ args=('-initrd', initrd_path))
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_mips64el_tuxrun.py b/tests/functional/test_mips64el_tuxrun.py
new file mode 100755
index 0000000..0a24757
--- /dev/null
+++ b/tests/functional/test_mips64el_tuxrun.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunMips64ELTest(TuxRunBaselineTest):
+
+ ASSET_MIPS64EL_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/mips64el/vmlinux',
+ '0d2829a96f005229839c4cd586d4d8a136ea4b488d29821611c8e97f2266bfa9')
+ ASSET_MIPS64EL_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/mips64el/rootfs.ext4.zst',
+ '69c8b69a4f1582ce4c6f01a994968f5d73bffb2fc99cbeeeb26c8b5a28eaeb84')
+
+ def test_mips64el(self):
+ self.set_machine('malta')
+ self.root="sda"
+ self.wait_for_shutdown=False
+ self.common_tuxrun(kernel_asset=self.ASSET_MIPS64EL_KERNEL,
+ rootfs_asset=self.ASSET_MIPS64EL_ROOTFS,
+ drive="driver=ide-hd,bus=ide.0,unit=0")
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_mips_malta.py b/tests/functional/test_mips_malta.py
new file mode 100755
index 0000000..30279f0
--- /dev/null
+++ b/tests/functional/test_mips_malta.py
@@ -0,0 +1,196 @@
+#!/usr/bin/env python3
+#
+# Functional tests for the big-endian 32-bit MIPS Malta board
+#
+# Copyright (c) Philippe Mathieu-DaudƩ <f4bug@amsat.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+
+from qemu_test import LinuxKernelTest, Asset, wait_for_console_pattern
+from qemu_test import exec_command_and_wait_for_pattern
+
+
+def mips_run_common_commands(test, prompt='#'):
+ exec_command_and_wait_for_pattern(test,
+ 'uname -m',
+ 'mips')
+ exec_command_and_wait_for_pattern(test,
+ 'grep XT-PIC /proc/interrupts',
+ 'timer')
+ wait_for_console_pattern(test, prompt)
+ exec_command_and_wait_for_pattern(test,
+ 'grep XT-PIC /proc/interrupts',
+ 'serial')
+ wait_for_console_pattern(test, prompt)
+ exec_command_and_wait_for_pattern(test,
+ 'grep XT-PIC /proc/interrupts',
+ 'ata_piix')
+ wait_for_console_pattern(test, prompt)
+ exec_command_and_wait_for_pattern(test,
+ 'grep XT-PIC /proc/interrupts',
+ 'rtc')
+ wait_for_console_pattern(test, prompt)
+ exec_command_and_wait_for_pattern(test,
+ 'cat /proc/devices',
+ 'input')
+ wait_for_console_pattern(test, prompt)
+ exec_command_and_wait_for_pattern(test,
+ 'cat /proc/devices',
+ 'fb')
+ wait_for_console_pattern(test, prompt)
+ exec_command_and_wait_for_pattern(test,
+ 'cat /proc/ioports',
+ ' : serial')
+ wait_for_console_pattern(test, prompt)
+ exec_command_and_wait_for_pattern(test,
+ 'cat /proc/ioports',
+ ' : ata_piix')
+ wait_for_console_pattern(test, prompt)
+
+def mips_check_wheezy(test, kernel_path, image_path, kernel_command_line,
+ dl_file, hsum, nic='pcnet', cpuinfo='MIPS 24Kc'):
+ test.require_netdev('user')
+ test.require_device(nic)
+ test.set_machine('malta')
+
+ port=8080
+ test.vm.add_args('-kernel', kernel_path,
+ '-append', kernel_command_line,
+ '-drive', 'file=%s,snapshot=on' % image_path,
+ '-netdev', 'user,id=n1' +
+ ',tftp=' + os.path.basename(kernel_path) +
+ ',hostfwd=tcp:127.0.0.1:0-:%d' % port,
+ '-device', f'{nic},netdev=n1',
+ '-no-reboot')
+ test.vm.set_console()
+ test.vm.launch()
+
+ wait_for_console_pattern(test, 'login: ', 'Oops')
+ exec_command_and_wait_for_pattern(test, 'root', 'Password:')
+ exec_command_and_wait_for_pattern(test, 'root', ':~# ')
+ mips_run_common_commands(test)
+
+ exec_command_and_wait_for_pattern(test, 'cd /', '# ')
+ test.check_http_download(dl_file, hsum, port,
+ pythoncmd='python -m SimpleHTTPServer')
+
+ exec_command_and_wait_for_pattern(test, 'cat /proc/cpuinfo', cpuinfo)
+ exec_command_and_wait_for_pattern(test, 'cat /proc/devices', 'usb')
+ exec_command_and_wait_for_pattern(test, 'cat /proc/ioports',
+ ' : piix4_smbus')
+ exec_command_and_wait_for_pattern(test, 'lspci -d 11ab:4620',
+ 'GT-64120')
+ exec_command_and_wait_for_pattern(test,
+ 'cat /sys/bus/i2c/devices/i2c-0/name',
+ 'SMBus PIIX4 adapter')
+ exec_command_and_wait_for_pattern(test, 'cat /proc/mtd', 'YAMON')
+ # Empty 'Board Config' (64KB)
+ exec_command_and_wait_for_pattern(test, 'md5sum /dev/mtd2ro',
+ '0dfbe8aa4c20b52e1b8bf3cb6cbdf193')
+
+
+class MaltaMachineConsole(LinuxKernelTest):
+
+ ASSET_KERNEL_2_63_2 = Asset(
+ ('http://snapshot.debian.org/archive/debian/'
+ '20130217T032700Z/pool/main/l/linux-2.6/'
+ 'linux-image-2.6.32-5-4kc-malta_2.6.32-48_mips.deb'),
+ '16ca524148afb0626f483163e5edf352bc1ab0e4fc7b9f9d473252762f2c7a43')
+
+ def test_mips_malta(self):
+ kernel_path = self.archive_extract(
+ self.ASSET_KERNEL_2_63_2,
+ member='boot/vmlinux-2.6.32-5-4kc-malta')
+
+ self.set_machine('malta')
+ self.vm.set_console()
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
+ self.vm.add_args('-kernel', kernel_path,
+ '-append', kernel_command_line)
+ self.vm.launch()
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.wait_for_console_pattern(console_pattern)
+
+ ASSET_KERNEL_4_5_0 = Asset(
+ ('http://snapshot.debian.org/archive/debian/'
+ '20160601T041800Z/pool/main/l/linux/'
+ 'linux-image-4.5.0-2-4kc-malta_4.5.5-1_mips.deb'),
+ '526b17d5889840888b76fc2c36a0ebde182c9b1410a3a1e68203c3b160eb2027')
+
+ ASSET_INITRD = Asset(
+ ('https://github.com/groeck/linux-build-test/raw/'
+ '8584a59ed9e5eb5ee7ca91f6d74bbb06619205b8/rootfs/'
+ 'mips/rootfs.cpio.gz'),
+ 'dcfe3a7fe3200da3a00d176b95caaa086495eb158f2bff64afc67d7e1eb2cddc')
+
+ def test_mips_malta_cpio(self):
+ self.require_netdev('user')
+ self.set_machine('malta')
+ self.require_device('pcnet')
+
+ kernel_path = self.archive_extract(
+ self.ASSET_KERNEL_4_5_0,
+ member='boot/vmlinux-4.5.0-2-4kc-malta')
+ initrd_path = self.uncompress(self.ASSET_INITRD)
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE
+ + 'console=ttyS0 console=tty '
+ + 'rdinit=/sbin/init noreboot')
+ self.vm.add_args('-kernel', kernel_path,
+ '-initrd', initrd_path,
+ '-append', kernel_command_line,
+ '-netdev', 'user,id=n1,tftp=' + self.scratch_file('boot'),
+ '-device', 'pcnet,netdev=n1',
+ '-no-reboot')
+ self.vm.launch()
+ self.wait_for_console_pattern('Boot successful.')
+
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+ 'BogoMIPS')
+ exec_command_and_wait_for_pattern(self, 'uname -a',
+ '4.5.0-2-4kc-malta #1 Debian')
+ mips_run_common_commands(self)
+
+ exec_command_and_wait_for_pattern(self, 'ip link set eth0 up',
+ 'eth0: link up')
+ exec_command_and_wait_for_pattern(self,
+ 'ip addr add 10.0.2.15 dev eth0',
+ '#')
+ exec_command_and_wait_for_pattern(self, 'route add default eth0', '#')
+ exec_command_and_wait_for_pattern(self,
+ 'tftp -g -r vmlinux-4.5.0-2-4kc-malta 10.0.2.2', '#')
+ exec_command_and_wait_for_pattern(self,
+ 'md5sum vmlinux-4.5.0-2-4kc-malta',
+ 'a98218a7efbdefb2dfdf9ecd08c98318')
+
+ exec_command_and_wait_for_pattern(self, 'reboot',
+ 'reboot: Restarting system')
+ # Wait for VM to shut down gracefully
+ self.vm.wait()
+
+ ASSET_WHEEZY_KERNEL = Asset(
+ ('https://people.debian.org/~aurel32/qemu/mips/'
+ 'vmlinux-3.2.0-4-4kc-malta'),
+ '0377fcda31299213c10b8e5babe7260ef99188b3ae1aca6f56594abb71e7f67e')
+
+ ASSET_WHEEZY_DISK = Asset(
+ ('https://people.debian.org/~aurel32/qemu/mips/'
+ 'debian_wheezy_mips_standard.qcow2'),
+ 'de03599285b8382ad309309a6c4869f6c6c42a5cfc983342bab9ec0dfa7849a2')
+
+ def test_wheezy(self):
+ kernel_path = self.ASSET_WHEEZY_KERNEL.fetch()
+ image_path = self.ASSET_WHEEZY_DISK.fetch()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE
+ + 'console=ttyS0 root=/dev/sda1')
+ mips_check_wheezy(self,
+ kernel_path, image_path, kernel_command_line, nic='e1000',
+ dl_file='/boot/initrd.img-3.2.0-4-4kc-malta',
+ hsum='ff0c0369143d9bbb9a6e6bc79322a2be535619df639e84103237f406e87493dc')
+
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_mips_replay.py b/tests/functional/test_mips_replay.py
new file mode 100755
index 0000000..4327481
--- /dev/null
+++ b/tests/functional/test_mips_replay.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+#
+# Replay tests for the big-endian 32-bit MIPS Malta board
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset, skipSlowTest
+from replay_kernel import ReplayKernelBase
+
+
+class MipsReplay(ReplayKernelBase):
+
+ ASSET_KERNEL_2_63_2 = Asset(
+ ('http://snapshot.debian.org/archive/debian/'
+ '20130217T032700Z/pool/main/l/linux-2.6/'
+ 'linux-image-2.6.32-5-4kc-malta_2.6.32-48_mips.deb'),
+ '16ca524148afb0626f483163e5edf352bc1ab0e4fc7b9f9d473252762f2c7a43')
+
+ def test_replay_mips_malta(self):
+ self.set_machine('malta')
+ kernel_path = self.archive_extract(self.ASSET_KERNEL_2_63_2,
+ member='boot/vmlinux-2.6.32-5-4kc-malta')
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5)
+
+ ASSET_KERNEL_4_5_0 = Asset(
+ ('http://snapshot.debian.org/archive/debian/'
+ '20160601T041800Z/pool/main/l/linux/'
+ 'linux-image-4.5.0-2-4kc-malta_4.5.5-1_mips.deb'),
+ '526b17d5889840888b76fc2c36a0ebde182c9b1410a3a1e68203c3b160eb2027')
+
+ ASSET_INITRD = Asset(
+ ('https://github.com/groeck/linux-build-test/raw/'
+ '8584a59ed9e5eb5ee7ca91f6d74bbb06619205b8/rootfs/'
+ 'mips/rootfs.cpio.gz'),
+ 'dcfe3a7fe3200da3a00d176b95caaa086495eb158f2bff64afc67d7e1eb2cddc')
+
+ @skipSlowTest()
+ def test_replay_mips_malta_cpio(self):
+ self.set_machine('malta')
+ kernel_path = self.archive_extract(self.ASSET_KERNEL_4_5_0,
+ member='boot/vmlinux-4.5.0-2-4kc-malta')
+ initrd_path = self.uncompress(self.ASSET_INITRD)
+
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=ttyS0 console=tty '
+ 'rdinit=/sbin/init noreboot')
+ console_pattern = 'Boot successful.'
+ self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5,
+ args=('-initrd', initrd_path))
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_mips_tuxrun.py b/tests/functional/test_mips_tuxrun.py
new file mode 100755
index 0000000..6771dbd
--- /dev/null
+++ b/tests/functional/test_mips_tuxrun.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunMipsTest(TuxRunBaselineTest):
+
+ ASSET_MIPS_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/mips32/vmlinux',
+ 'b6f97fc698ae8c96456ad8c996c7454228074df0d7520dedd0a15e2913700a19')
+ ASSET_MIPS_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/mips32/rootfs.ext4.zst',
+ '87055cf3cbde3fd134e5039e7b87feb03231d8c4b21ee712b8ba3308dfa72f50')
+
+ def test_mips32(self):
+ self.set_machine('malta')
+ self.cpu="mips32r6-generic"
+ self.root="sda"
+ self.wait_for_shutdown=False
+ self.common_tuxrun(kernel_asset=self.ASSET_MIPS_KERNEL,
+ rootfs_asset=self.ASSET_MIPS_ROOTFS,
+ drive="driver=ide-hd,bus=ide.0,unit=0")
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_mipsel_malta.py b/tests/functional/test_mipsel_malta.py
new file mode 100755
index 0000000..9ee2884
--- /dev/null
+++ b/tests/functional/test_mipsel_malta.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python3
+#
+# Functional tests for the little-endian 32-bit MIPS Malta board
+#
+# Copyright (c) Philippe Mathieu-DaudƩ <f4bug@amsat.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import QemuSystemTest, LinuxKernelTest, Asset
+from qemu_test import interrupt_interactive_console_until_pattern
+from qemu_test import wait_for_console_pattern
+
+from test_mips_malta import mips_check_wheezy
+
+
+class MaltaMachineConsole(LinuxKernelTest):
+
+ ASSET_KERNEL_4K = Asset(
+ ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
+ 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
+ 'generic_nano32r6el_page4k.xz'),
+ '019e034094ac6cf3aa77df5e130fb023ce4dbc804b04bfcc560c6403e1ae6bdb')
+ ASSET_KERNEL_16K = Asset(
+ ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
+ 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
+ 'generic_nano32r6el_page16k_up.xz'),
+ '3a54a10b3108c16a448dca9ea3db378733a27423befc2a45a5bdf990bd85e12c')
+ ASSET_KERNEL_64K = Asset(
+ ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
+ 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
+ 'generic_nano32r6el_page64k_dbg.xz'),
+ 'ce21ff4b07a981ecb8a39db2876616f5a2473eb2ab459c6f67465b9914b0c6b6')
+
+ def do_test_mips_malta32el_nanomips(self, kernel):
+ kernel_path = self.uncompress(kernel)
+
+ self.set_machine('malta')
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE
+ + 'mem=256m@@0x0 '
+ + 'console=ttyS0')
+ self.vm.add_args('-cpu', 'I7200',
+ '-no-reboot',
+ '-kernel', kernel_path,
+ '-append', kernel_command_line)
+ self.vm.launch()
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.wait_for_console_pattern(console_pattern)
+
+ def test_mips_malta32el_nanomips_4k(self):
+ self.do_test_mips_malta32el_nanomips(self.ASSET_KERNEL_4K)
+
+ def test_mips_malta32el_nanomips_16k_up(self):
+ self.do_test_mips_malta32el_nanomips(self.ASSET_KERNEL_16K)
+
+ def test_mips_malta32el_nanomips_64k_dbg(self):
+ self.do_test_mips_malta32el_nanomips(self.ASSET_KERNEL_64K)
+
+ ASSET_WHEEZY_KERNEL = Asset(
+ ('https://people.debian.org/~aurel32/qemu/mipsel/'
+ 'vmlinux-3.2.0-4-4kc-malta'),
+ 'dc8a3648305b0201ca7a5cd135fe2890067a65d93c38728022bb0e656ad2bf9a')
+
+ ASSET_WHEEZY_DISK = Asset(
+ ('https://people.debian.org/~aurel32/qemu/mipsel/'
+ 'debian_wheezy_mipsel_standard.qcow2'),
+ '454f09ae39f7e6461c84727b927100d2c7813841f2a0a5dce328114887ecf914')
+
+ def test_wheezy(self):
+ kernel_path = self.ASSET_WHEEZY_KERNEL.fetch()
+ image_path = self.ASSET_WHEEZY_DISK.fetch()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE
+ + 'console=ttyS0 root=/dev/sda1')
+ mips_check_wheezy(self,
+ kernel_path, image_path, kernel_command_line,
+ dl_file='/boot/initrd.img-3.2.0-4-4kc-malta',
+ hsum='9fc9f250ed56a74e35e704ddfd5a1c5a5625adefc5c9da91f649288d3ca000f0')
+
+
+class MaltaMachineYAMON(QemuSystemTest):
+
+ ASSET_YAMON_ROM = Asset(
+ ('https://s3-eu-west-1.amazonaws.com/downloads-mips/mips-downloads/'
+ 'YAMON/yamon-bin-02.22.zip'),
+ 'eef86f0eed0ef554f041dcd47b87eebea0e6f9f1184ed31f7e9e8b4a803860ab')
+
+ def test_mipsel_malta_yamon(self):
+ yamon_bin = 'yamon-02.22.bin'
+ self.archive_extract(self.ASSET_YAMON_ROM)
+ yamon_path = self.scratch_file(yamon_bin)
+
+ self.set_machine('malta')
+ self.vm.set_console()
+ self.vm.add_args('-bios', yamon_path)
+ self.vm.launch()
+
+ prompt = 'YAMON>'
+ pattern = 'YAMON ROM Monitor'
+ interrupt_interactive_console_until_pattern(self, pattern, prompt)
+ wait_for_console_pattern(self, prompt)
+ self.vm.shutdown()
+
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_mipsel_replay.py b/tests/functional/test_mipsel_replay.py
new file mode 100644
index 0000000..5f4796c
--- /dev/null
+++ b/tests/functional/test_mipsel_replay.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python3
+#
+# Replay tests for the little-endian 32-bit MIPS Malta board
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset, skipSlowTest
+from replay_kernel import ReplayKernelBase
+
+
+class MipselReplay(ReplayKernelBase):
+
+ ASSET_KERNEL_4K = Asset(
+ ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
+ 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
+ 'generic_nano32r6el_page4k.xz'),
+ '019e034094ac6cf3aa77df5e130fb023ce4dbc804b04bfcc560c6403e1ae6bdb')
+ ASSET_KERNEL_16K = Asset(
+ ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
+ 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
+ 'generic_nano32r6el_page16k_up.xz'),
+ '3a54a10b3108c16a448dca9ea3db378733a27423befc2a45a5bdf990bd85e12c')
+ ASSET_KERNEL_64K = Asset(
+ ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
+ 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
+ 'generic_nano32r6el_page64k_dbg.xz'),
+ 'ce21ff4b07a981ecb8a39db2876616f5a2473eb2ab459c6f67465b9914b0c6b6')
+
+ def do_test_replay_mips_malta32el_nanomips(self, kernel_asset):
+ self.set_machine('malta')
+ self.cpu = 'I7200'
+ kernel_path = self.uncompress(kernel_asset)
+
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'mem=256m@@0x0 '
+ 'console=ttyS0')
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5)
+
+ @skipSlowTest()
+ def test_replay_mips_malta32el_nanomips_4k(self):
+ self.do_test_replay_mips_malta32el_nanomips(self.ASSET_KERNEL_4K)
+
+ @skipSlowTest()
+ def test_replay_mips_malta32el_nanomips_16k_up(self):
+ self.do_test_replay_mips_malta32el_nanomips(self.ASSET_KERNEL_16K)
+
+ @skipSlowTest()
+ def test_replay_mips_malta32el_nanomips_64k_dbg(self):
+ self.do_test_replay_mips_malta32el_nanomips(self.ASSET_KERNEL_64K)
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_mipsel_tuxrun.py b/tests/functional/test_mipsel_tuxrun.py
new file mode 100755
index 0000000..d4b39ba
--- /dev/null
+++ b/tests/functional/test_mipsel_tuxrun.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunMipsELTest(TuxRunBaselineTest):
+
+ ASSET_MIPSEL_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/mips32el/vmlinux',
+ '660dd8c7a6ca7a32d37b4e6348865532ab0edb66802e8cc07869338444cf4929')
+ ASSET_MIPSEL_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/mips32el/rootfs.ext4.zst',
+ 'c5d69542bcaed54a4f34671671eb4be5c608ee02671d4d0436544367816a73b1')
+
+ def test_mips32el(self):
+ self.set_machine('malta')
+ self.cpu="mips32r6-generic"
+ self.root="sda"
+ self.wait_for_shutdown=False
+ self.common_tuxrun(kernel_asset=self.ASSET_MIPSEL_KERNEL,
+ rootfs_asset=self.ASSET_MIPSEL_ROOTFS,
+ drive="driver=ide-hd,bus=ide.0,unit=0")
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_multiprocess.py b/tests/functional/test_multiprocess.py
new file mode 100755
index 0000000..751cf10
--- /dev/null
+++ b/tests/functional/test_multiprocess.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python3
+#
+# Test for multiprocess qemu
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+
+import os
+import socket
+
+from qemu_test import QemuSystemTest, Asset, wait_for_console_pattern
+from qemu_test import exec_command, exec_command_and_wait_for_pattern
+
+class Multiprocess(QemuSystemTest):
+
+ KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
+
+ ASSET_KERNEL_X86 = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux'
+ '/releases/31/Everything/x86_64/os/images/pxeboot/vmlinuz'),
+ 'd4738d03dbbe083ca610d0821d0a8f1488bebbdccef54ce33e3adb35fda00129')
+
+ ASSET_INITRD_X86 = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux'
+ '/releases/31/Everything/x86_64/os/images/pxeboot/initrd.img'),
+ '3b6cb5c91a14c42e2f61520f1689264d865e772a1f0069e660a800d31dd61fb9')
+
+ ASSET_KERNEL_AARCH64 = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux'
+ '/releases/31/Everything/aarch64/os/images/pxeboot/vmlinuz'),
+ '3ae07fcafbfc8e4abeb693035a74fe10698faae15e9ccd48882a9167800c1527')
+
+ ASSET_INITRD_AARCH64 = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux'
+ '/releases/31/Everything/aarch64/os/images/pxeboot/initrd.img'),
+ '9fd230cab10b1dafea41cf00150e6669d37051fad133bd618d2130284e16d526')
+
+ def do_test(self, kernel_asset, initrd_asset,
+ kernel_command_line, machine_type):
+ """Main test method"""
+ self.require_accelerator('kvm')
+ self.require_device('x-pci-proxy-dev')
+
+ # Create socketpair to connect proxy and remote processes
+ proxy_sock, remote_sock = socket.socketpair(socket.AF_UNIX,
+ socket.SOCK_STREAM)
+ os.set_inheritable(proxy_sock.fileno(), True)
+ os.set_inheritable(remote_sock.fileno(), True)
+
+ kernel_path = kernel_asset.fetch()
+ initrd_path = initrd_asset.fetch()
+
+ # Create remote process
+ remote_vm = self.get_vm()
+ remote_vm.add_args('-machine', 'x-remote')
+ remote_vm.add_args('-nodefaults')
+ remote_vm.add_args('-device', 'lsi53c895a,id=lsi1')
+ remote_vm.add_args('-object', 'x-remote-object,id=robj1,'
+ 'devid=lsi1,fd='+str(remote_sock.fileno()))
+ remote_vm.launch()
+
+ # Create proxy process
+ self.vm.set_console()
+ self.vm.add_args('-machine', machine_type)
+ self.vm.add_args('-accel', 'kvm')
+ self.vm.add_args('-cpu', 'host')
+ self.vm.add_args('-object',
+ 'memory-backend-memfd,id=sysmem-file,size=2G')
+ self.vm.add_args('--numa', 'node,memdev=sysmem-file')
+ self.vm.add_args('-m', '2048')
+ self.vm.add_args('-kernel', kernel_path,
+ '-initrd', initrd_path,
+ '-append', kernel_command_line)
+ self.vm.add_args('-device',
+ 'x-pci-proxy-dev,'
+ 'id=lsi1,fd='+str(proxy_sock.fileno()))
+ self.vm.launch()
+ wait_for_console_pattern(self, 'as init process',
+ 'Kernel panic - not syncing')
+ exec_command(self, 'mount -t sysfs sysfs /sys')
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/bus/pci/devices/*/uevent',
+ 'PCI_ID=1000:0012')
+
+ def test_multiprocess(self):
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE
+ if self.arch == 'x86_64':
+ kernel_command_line += 'console=ttyS0 rdinit=/bin/bash'
+ self.do_test(self.ASSET_KERNEL_X86, self.ASSET_INITRD_X86,
+ kernel_command_line, 'pc')
+ elif self.arch == 'aarch64':
+ kernel_command_line += 'rdinit=/bin/bash console=ttyAMA0'
+ self.do_test(self.ASSET_KERNEL_AARCH64, self.ASSET_INITRD_AARCH64,
+ kernel_command_line, 'virt,gic-version=3')
+ else:
+ assert False
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_netdev_ethtool.py b/tests/functional/test_netdev_ethtool.py
new file mode 100755
index 0000000..ee1a397
--- /dev/null
+++ b/tests/functional/test_netdev_ethtool.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python3
+#
+# ethtool tests for emulated network devices
+#
+# This test leverages ethtool's --test sequence to validate network
+# device behaviour.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from unittest import skip
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+class NetDevEthtool(QemuSystemTest):
+
+ # Runs in about 17s under KVM, 19s under TCG, 25s under GCOV
+ timeout = 45
+
+ # Fetch assets from the netdev-ethtool subdir of my shared test
+ # images directory on fileserver.linaro.org.
+ ASSET_BASEURL = ('https://fileserver.linaro.org/s/kE4nCFLdQcoBF9t/'
+ 'download?path=%2Fnetdev-ethtool&files=')
+ ASSET_BZIMAGE = Asset(
+ ASSET_BASEURL + "bzImage",
+ "ed62ee06ea620b1035747f3f66a5e9fc5d3096b29f75562ada888b04cd1c4baf")
+ ASSET_ROOTFS = Asset(
+ ASSET_BASEURL + "rootfs.squashfs",
+ "8f0207e3c4d40832ae73c1a927e42ca30ccb1e71f047acb6ddb161ba422934e6")
+
+ def common_test_code(self, netdev, extra_args=None):
+ self.set_machine('q35')
+
+ # This custom kernel has drivers for all the supported network
+ # devices we can emulate in QEMU
+ kernel = self.ASSET_BZIMAGE.fetch()
+ rootfs = self.ASSET_ROOTFS.fetch()
+
+ append = 'printk.time=0 console=ttyS0 '
+ append += 'root=/dev/sr0 rootfstype=squashfs '
+
+ # any additional kernel tweaks for the test
+ if extra_args:
+ append += extra_args
+
+ # finally invoke ethtool directly
+ append += ' init=/usr/sbin/ethtool -- -t eth1 offline'
+
+ # add the rootfs via a readonly cdrom image
+ drive = f"file={rootfs},if=ide,index=0,media=cdrom"
+
+ self.vm.add_args('-kernel', kernel,
+ '-append', append,
+ '-drive', drive,
+ '-device', netdev)
+
+ self.vm.set_console(console_index=0)
+ self.vm.launch()
+
+ wait_for_console_pattern(self,
+ "The test result is PASS",
+ "The test result is FAIL",
+ vm=None)
+ # no need to gracefully shutdown, just finish
+ self.vm.kill()
+
+ def test_igb(self):
+ self.common_test_code("igb")
+
+ def test_igb_nomsi(self):
+ self.common_test_code("igb", "pci=nomsi")
+
+ # It seems the other popular cards we model in QEMU currently fail
+ # the pattern test with:
+ #
+ # pattern test failed (reg 0x00178): got 0x00000000 expected 0x00005A5A
+ #
+ # So for now we skip them.
+
+ @skip("Incomplete reg 0x00178 support")
+ def test_e1000(self):
+ self.common_test_code("e1000")
+
+ @skip("Incomplete reg 0x00178 support")
+ def test_i82550(self):
+ self.common_test_code("i82550")
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_or1k_replay.py b/tests/functional/test_or1k_replay.py
new file mode 100755
index 0000000..2b60a93
--- /dev/null
+++ b/tests/functional/test_or1k_replay.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+#
+# Replay test that boots a Linux kernel on an OpenRISC-1000 SIM machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from replay_kernel import ReplayKernelBase
+
+
+class Or1kReplay(ReplayKernelBase):
+
+ ASSET_DAY20 = Asset(
+ 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day20.tar.xz',
+ 'ff9d7dd7c6bdba325bd85ee85c02db61ff653e129558aeffe6aff55bffb6763a')
+
+ def test_sim(self):
+ self.set_machine('or1k-sim')
+ kernel_path = self.archive_extract(self.ASSET_DAY20,
+ member='day20/vmlinux')
+ self.run_rr(kernel_path, self.REPLAY_KERNEL_COMMAND_LINE,
+ 'QEMU advent calendar')
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_or1k_sim.py b/tests/functional/test_or1k_sim.py
new file mode 100755
index 0000000..f9f0b69
--- /dev/null
+++ b/tests/functional/test_or1k_sim.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel on an OpenRISC-1000 SIM machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+
+
+class OpenRISC1kSimTest(LinuxKernelTest):
+
+ ASSET_DAY20 = Asset(
+ 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day20.tar.xz',
+ 'ff9d7dd7c6bdba325bd85ee85c02db61ff653e129558aeffe6aff55bffb6763a')
+
+ def test_or1k_sim(self):
+ self.set_machine('or1k-sim')
+ self.archive_extract(self.ASSET_DAY20)
+ self.vm.set_console()
+ self.vm.add_args('-kernel', self.scratch_file('day20', 'vmlinux'))
+ self.vm.launch()
+ self.wait_for_console_pattern('QEMU advent calendar')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_pc_cpu_hotplug_props.py b/tests/functional/test_pc_cpu_hotplug_props.py
new file mode 100755
index 0000000..2bed8ad
--- /dev/null
+++ b/tests/functional/test_pc_cpu_hotplug_props.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+#
+# Ensure CPU die-id can be omitted on -device
+#
+# Copyright (c) 2019 Red Hat Inc
+#
+# Author:
+# Eduardo Habkost <ehabkost@redhat.com>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+from qemu_test import QemuSystemTest
+
+class OmittedCPUProps(QemuSystemTest):
+
+ def test_no_die_id(self):
+ self.set_machine('pc')
+ self.vm.add_args('-nodefaults', '-S')
+ self.vm.add_args('-smp', '1,sockets=2,cores=2,threads=2,maxcpus=8')
+ self.vm.add_args('-device', 'qemu64-x86_64-cpu,socket-id=1,core-id=0,thread-id=0')
+ self.vm.launch()
+ self.assertEqual(len(self.vm.cmd('query-cpus-fast')), 2)
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_ppc64_e500.py b/tests/functional/test_ppc64_e500.py
new file mode 100755
index 0000000..f5fcad9
--- /dev/null
+++ b/tests/functional/test_ppc64_e500.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python3
+#
+# Boot a Linux kernel on a e500 ppc64 machine and check the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+from qemu_test import exec_command_and_wait_for_pattern
+
+
+class E500Test(LinuxKernelTest):
+
+ ASSET_BR2_E5500_UIMAGE = Asset(
+ 'https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main/buildroot/qemu_ppc64_e5500-2023.11-8-gdcd9f0f6eb-20240104/uImage',
+ '2478187c455d6cca3984e9dfde9c635d824ea16236b85fd6b4809f744706deda')
+
+ ASSET_BR2_E5500_ROOTFS = Asset(
+ 'https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main//buildroot/qemu_ppc64_e5500-2023.11-8-gdcd9f0f6eb-20240104/rootfs.ext2',
+ '9035ef97237c84c7522baaff17d25cdfca4bb7a053d5e296e902919473423d76')
+
+ def test_ppc64_e500_buildroot(self):
+ self.set_machine('ppce500')
+ self.require_netdev('user')
+ self.cpu = 'e5500'
+
+ uimage_path = self.ASSET_BR2_E5500_UIMAGE.fetch()
+ rootfs_path = self.ASSET_BR2_E5500_ROOTFS.fetch()
+
+ self.vm.set_console()
+ self.vm.add_args('-kernel', uimage_path,
+ '-append', 'root=/dev/vda',
+ '-drive', f'file={rootfs_path},if=virtio,format=raw',
+ '-snapshot', '-no-shutdown')
+ self.vm.launch()
+
+ self.wait_for_console_pattern('Linux version')
+ self.wait_for_console_pattern('/init as init process')
+ self.wait_for_console_pattern('lease of 10.0.2.15')
+ self.wait_for_console_pattern('buildroot login:')
+ exec_command_and_wait_for_pattern(self, 'root', '#')
+ exec_command_and_wait_for_pattern(self, 'poweroff', 'Power down')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_ppc64_hv.py b/tests/functional/test_ppc64_hv.py
new file mode 100755
index 0000000..d87f440
--- /dev/null
+++ b/tests/functional/test_ppc64_hv.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python3
+#
+# Tests that specifically try to exercise hypervisor features of the
+# target machines. powernv supports the Power hypervisor ISA, and
+# pseries supports the nested-HV hypervisor spec.
+#
+# Copyright (c) 2023 IBM Corporation
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import os
+import subprocess
+
+from datetime import datetime
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern, exec_command
+from qemu_test import skipIfMissingCommands, skipBigDataTest
+from qemu_test import exec_command_and_wait_for_pattern
+
+# Alpine is a light weight distro that supports QEMU. These tests boot
+# that on the machine then run a QEMU guest inside it in KVM mode,
+# that runs the same Alpine distro image.
+# QEMU packages are downloaded and installed on each test. That's not a
+# large download, but it may be more polite to create qcow2 image with
+# QEMU already installed and use that.
+# XXX: The order of these tests seems to matter, see git blame.
+@skipIfMissingCommands("xorriso")
+@skipBigDataTest()
+class HypervisorTest(QemuSystemTest):
+
+ timeout = 1000
+ KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 '
+ panic_message = 'Kernel panic - not syncing'
+ good_message = 'VFS: Cannot open root device'
+
+ ASSET_ISO = Asset(
+ ('https://dl-cdn.alpinelinux.org/alpine/v3.21/'
+ 'releases/ppc64le/alpine-standard-3.21.0-ppc64le.iso'),
+ '7651ab4e3027604535c0b36e86c901b4695bf8fe97b908f5b48590f6baae8f30')
+
+ def extract_from_iso(self, iso, path):
+ """
+ Extracts a file from an iso file into the test workdir
+
+ :param iso: path to the iso file
+ :param path: path within the iso file of the file to be extracted
+ :returns: path of the extracted file
+ """
+ filename = self.scratch_file(os.path.basename(path))
+
+ cmd = "xorriso -osirrox on -indev %s -cpx %s %s" % (iso, path, filename)
+ subprocess.run(cmd.split(),
+ stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+
+ os.chmod(filename, 0o600)
+
+ return filename
+
+ def setUp(self):
+ super().setUp()
+
+ self.iso_path = self.ASSET_ISO.fetch()
+ self.vmlinuz = self.extract_from_iso(self.iso_path, '/boot/vmlinuz-lts')
+ self.initramfs = self.extract_from_iso(self.iso_path, '/boot/initramfs-lts')
+
+ def do_start_alpine(self):
+ self.vm.set_console()
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE
+ self.vm.add_args("-kernel", self.vmlinuz)
+ self.vm.add_args("-initrd", self.initramfs)
+ self.vm.add_args("-smp", "4", "-m", "2g")
+ self.vm.add_args("-drive", f"file={self.iso_path},format=raw,if=none,"
+ "id=drive0,read-only=true")
+
+ self.vm.launch()
+ ps1='localhost:~#'
+ wait_for_console_pattern(self, 'localhost login:')
+ exec_command_and_wait_for_pattern(self, 'root', ps1)
+ # If the time is wrong, SSL certificates can fail.
+ exec_command_and_wait_for_pattern(self, 'date -s "' + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S' + '"'), ps1)
+ ps1='alpine:~#'
+ exec_command_and_wait_for_pattern(self, 'setup-alpine -qe', ps1)
+ exec_command_and_wait_for_pattern(self, 'setup-apkrepos -c1', ps1)
+ exec_command_and_wait_for_pattern(self, 'apk update', ps1)
+ # Could upgrade here but it usually should not be necessary
+ # exec_command_and_wait_for_pattern(self, 'apk upgrade --available', ps1)
+
+ def do_stop_alpine(self):
+ exec_command(self, 'echo "TEST ME"')
+ wait_for_console_pattern(self, 'alpine:~#')
+ exec_command(self, 'poweroff')
+ wait_for_console_pattern(self, 'reboot: Power down')
+ self.vm.wait()
+
+ def do_setup_kvm(self):
+ ps1='alpine:~#'
+ exec_command_and_wait_for_pattern(self, 'apk add qemu-system-ppc64', ps1)
+ exec_command_and_wait_for_pattern(self, 'modprobe kvm-hv', ps1)
+
+ # This uses the host's block device as the source file for guest block
+ # device for install media. This is a bit hacky but allows reuse of the
+ # iso without having a passthrough filesystem configured.
+ def do_test_kvm(self, hpt=False):
+ if hpt:
+ append = 'disable_radix'
+ else:
+ append = ''
+ exec_command(self, 'qemu-system-ppc64 -nographic -smp 2 -m 1g '
+ '-machine pseries,x-vof=on,accel=kvm '
+ '-machine cap-cfpc=broken,cap-sbbc=broken,'
+ 'cap-ibs=broken,cap-ccf-assist=off '
+ '-drive file=/dev/nvme0n1,format=raw,readonly=on '
+ '-initrd /media/nvme0n1/boot/initramfs-lts '
+ '-kernel /media/nvme0n1/boot/vmlinuz-lts '
+ '-append \'usbcore.nousb ' + append + '\'')
+ # Alpine 3.21 kernel seems to crash in XHCI USB driver.
+ ps1='localhost:~#'
+ wait_for_console_pattern(self, 'localhost login:')
+ exec_command_and_wait_for_pattern(self, 'root', ps1)
+ exec_command(self, 'poweroff')
+ wait_for_console_pattern(self, 'reboot: Power down')
+ # Now wait for the host's prompt to come back
+ wait_for_console_pattern(self, 'alpine:~#')
+
+ def test_hv_pseries(self):
+ self.require_accelerator("tcg")
+ self.require_netdev('user')
+ self.set_machine('pseries')
+ self.vm.add_args("-accel", "tcg,thread=multi")
+ self.vm.add_args('-device', 'nvme,serial=1234,drive=drive0')
+ self.vm.add_args("-machine", "x-vof=on,cap-nested-hv=on")
+ self.do_start_alpine()
+ self.do_setup_kvm()
+ self.do_test_kvm()
+ self.do_stop_alpine()
+
+ def test_hv_pseries_kvm(self):
+ self.require_accelerator("kvm")
+ self.require_netdev('user')
+ self.set_machine('pseries')
+ self.vm.add_args("-accel", "kvm")
+ self.vm.add_args('-device', 'nvme,serial=1234,drive=drive0')
+ self.vm.add_args("-machine", "x-vof=on,cap-nested-hv=on,cap-ccf-assist=off")
+ self.do_start_alpine()
+ self.do_setup_kvm()
+ self.do_test_kvm()
+ self.do_stop_alpine()
+
+ def test_hv_powernv(self):
+ self.require_accelerator("tcg")
+ self.require_netdev('user')
+ self.set_machine('powernv')
+ self.vm.add_args("-accel", "tcg,thread=multi")
+ self.vm.add_args('-device', 'nvme,bus=pcie.2,addr=0x0,serial=1234,drive=drive0',
+ '-device', 'e1000e,netdev=net0,mac=C0:FF:EE:00:00:02,bus=pcie.0,addr=0x0',
+ '-netdev', 'user,id=net0,hostfwd=::20022-:22,hostname=alpine')
+ self.do_start_alpine()
+ self.do_setup_kvm()
+ self.do_test_kvm()
+ self.do_test_kvm(True)
+ self.do_stop_alpine()
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_ppc64_mac99.py b/tests/functional/test_ppc64_mac99.py
new file mode 100755
index 0000000..dfd9c01
--- /dev/null
+++ b/tests/functional/test_ppc64_mac99.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a mac99 machine with a PPC970 CPU
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+from qemu_test import exec_command_and_wait_for_pattern
+
+class mac99Test(LinuxKernelTest):
+
+ ASSET_BR2_MAC99_LINUX = Asset(
+ 'https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main/buildroot/qemu_ppc64_mac99-2023.11-8-gdcd9f0f6eb-20240105/vmlinux',
+ 'd59307437e4365f2cced0bbd1b04949f7397b282ef349b7cafd894d74aadfbff')
+
+ ASSET_BR2_MAC99_ROOTFS = Asset(
+ 'https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main//buildroot/qemu_ppc64_mac99-2023.11-8-gdcd9f0f6eb-20240105/rootfs.ext2',
+ 'bbd5fd8af62f580bc4e585f326fe584e22856572633a8333178ea6d4ed4955a4')
+
+ def test_ppc64_mac99_buildroot(self):
+ self.set_machine('mac99')
+
+ linux_path = self.ASSET_BR2_MAC99_LINUX.fetch()
+ rootfs_path = self.ASSET_BR2_MAC99_ROOTFS.fetch()
+
+ self.vm.set_console()
+
+ # Note: We need '-nographic' to get a serial console
+ self.vm.add_args('-kernel', linux_path,
+ '-append', 'root=/dev/sda',
+ '-drive', f'file={rootfs_path},format=raw',
+ '-snapshot', '-nographic')
+ self.vm.launch()
+
+ self.wait_for_console_pattern('>> OpenBIOS')
+ self.wait_for_console_pattern('Linux version')
+ self.wait_for_console_pattern('/init as init process')
+ self.wait_for_console_pattern('gem 0000:f0:0e.0 eth0: Link is up at 100 Mbps')
+ self.wait_for_console_pattern('buildroot login:')
+ exec_command_and_wait_for_pattern(self, 'root', '#')
+ exec_command_and_wait_for_pattern(self, 'poweroff', 'Power down')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_ppc64_powernv.py b/tests/functional/test_ppc64_powernv.py
new file mode 100755
index 0000000..685e217
--- /dev/null
+++ b/tests/functional/test_ppc64_powernv.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+#
+# Test that Linux kernel boots on ppc powernv machines and check the console
+#
+# Copyright (c) 2018, 2020 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import LinuxKernelTest, Asset
+from qemu_test import wait_for_console_pattern
+
+class powernvMachine(LinuxKernelTest):
+
+ timeout = 90
+ KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 '
+ panic_message = 'Kernel panic - not syncing'
+ good_message = 'VFS: Cannot open root device'
+
+ ASSET_KERNEL = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora-secondary/'
+ 'releases/29/Everything/ppc64le/os/ppc/ppc64/vmlinuz'),
+ '383c2f5c23bc0d9d32680c3924d3fd7ee25cc5ef97091ac1aa5e1d853422fc5f')
+
+ def do_test_linux_boot(self, command_line = KERNEL_COMMON_COMMAND_LINE):
+ self.require_accelerator("tcg")
+ kernel_path = self.ASSET_KERNEL.fetch()
+
+ self.vm.set_console()
+ self.vm.add_args('-kernel', kernel_path,
+ '-append', command_line)
+ self.vm.launch()
+
+ def test_linux_boot(self):
+ self.set_machine('powernv')
+ self.do_test_linux_boot()
+ console_pattern = 'VFS: Cannot open root device'
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+
+ def test_linux_smp_boot(self):
+ self.set_machine('powernv')
+ self.vm.add_args('-smp', '4')
+ self.do_test_linux_boot()
+ console_pattern = 'smp: Brought up 1 node, 4 CPUs'
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+ wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+ def test_linux_smp_hpt_boot(self):
+ self.set_machine('powernv')
+ self.vm.add_args('-smp', '4')
+ self.do_test_linux_boot(self.KERNEL_COMMON_COMMAND_LINE +
+ 'disable_radix')
+ console_pattern = 'smp: Brought up 1 node, 4 CPUs'
+ wait_for_console_pattern(self, 'hash-mmu: Initializing hash mmu',
+ self.panic_message)
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+ wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+ def test_linux_smt_boot(self):
+ self.set_machine('powernv')
+ self.vm.add_args('-smp', '4,threads=4')
+ self.do_test_linux_boot()
+ console_pattern = 'CPU maps initialized for 4 threads per core'
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+ console_pattern = 'smp: Brought up 1 node, 4 CPUs'
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+ wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+ def test_linux_big_boot(self):
+ self.set_machine('powernv')
+ self.vm.add_args('-smp', '16,threads=4,cores=2,sockets=2')
+
+ # powernv does not support NUMA
+ self.do_test_linux_boot()
+ console_pattern = 'CPU maps initialized for 4 threads per core'
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+ console_pattern = 'smp: Brought up 2 nodes, 16 CPUs'
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+ wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+
+ ASSET_EPAPR_KERNEL = Asset(
+ ('https://github.com/open-power/op-build/releases/download/v2.7/'
+ 'zImage.epapr'),
+ '0ab237df661727e5392cee97460e8674057a883c5f74381a128fa772588d45cd')
+
+ def do_test_ppc64_powernv(self, proc):
+ self.require_accelerator("tcg")
+ kernel_path = self.ASSET_EPAPR_KERNEL.fetch()
+ self.vm.set_console()
+ self.vm.add_args('-kernel', kernel_path,
+ '-append', 'console=tty0 console=hvc0',
+ '-device', 'pcie-pci-bridge,id=bridge1,bus=pcie.1,addr=0x0',
+ '-device', 'nvme,bus=pcie.2,addr=0x0,serial=1234',
+ '-device', 'e1000e,bus=bridge1,addr=0x3',
+ '-device', 'nec-usb-xhci,bus=bridge1,addr=0x2')
+ self.vm.launch()
+
+ self.wait_for_console_pattern("CPU: " + proc + " generation processor")
+ self.wait_for_console_pattern("zImage starting: loaded")
+ self.wait_for_console_pattern("Run /init as init process")
+ # Device detection output driven by udev probing is sometimes cut off
+ # from console output, suspect S14silence-console init script.
+
+ def test_powernv8(self):
+ self.set_machine('powernv8')
+ self.do_test_ppc64_powernv('P8')
+
+ def test_powernv9(self):
+ self.set_machine('powernv9')
+ self.do_test_ppc64_powernv('P9')
+
+ def test_powernv10(self):
+ self.set_machine('powernv10')
+ self.do_test_ppc64_powernv('P10')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_ppc64_pseries.py b/tests/functional/test_ppc64_pseries.py
new file mode 100755
index 0000000..6705793
--- /dev/null
+++ b/tests/functional/test_ppc64_pseries.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python3
+#
+# Test that Linux kernel boots on ppc machines and check the console
+#
+# Copyright (c) 2018, 2020 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+class pseriesMachine(QemuSystemTest):
+
+ timeout = 90
+ KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 '
+ panic_message = 'Kernel panic - not syncing'
+ good_message = 'VFS: Cannot open root device'
+
+ ASSET_KERNEL = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora-secondary/'
+ 'releases/29/Everything/ppc64le/os/ppc/ppc64/vmlinuz'),
+ '383c2f5c23bc0d9d32680c3924d3fd7ee25cc5ef97091ac1aa5e1d853422fc5f')
+
+ def do_test_ppc64_linux_boot(self, kernel_command_line = KERNEL_COMMON_COMMAND_LINE):
+ kernel_path = self.ASSET_KERNEL.fetch()
+
+ self.vm.set_console()
+ self.vm.add_args('-kernel', kernel_path,
+ '-append', kernel_command_line)
+ self.vm.launch()
+
+ def test_ppc64_vof_linux_boot(self):
+ self.set_machine('pseries')
+ self.vm.add_args('-machine', 'x-vof=on')
+ self.do_test_ppc64_linux_boot()
+ console_pattern = 'VFS: Cannot open root device'
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+
+ def test_ppc64_linux_boot(self):
+ self.set_machine('pseries')
+ self.do_test_ppc64_linux_boot()
+ console_pattern = 'VFS: Cannot open root device'
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+
+ def test_ppc64_linux_smp_boot(self):
+ self.set_machine('pseries')
+ self.vm.add_args('-smp', '4')
+ self.do_test_ppc64_linux_boot()
+ console_pattern = 'smp: Brought up 1 node, 4 CPUs'
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+ wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+ def test_ppc64_linux_hpt_smp_boot(self):
+ self.set_machine('pseries')
+ self.vm.add_args('-smp', '4')
+ self.do_test_ppc64_linux_boot(self.KERNEL_COMMON_COMMAND_LINE +
+ 'disable_radix')
+ console_pattern = 'smp: Brought up 1 node, 4 CPUs'
+ wait_for_console_pattern(self, 'hash-mmu: Initializing hash mmu',
+ self.panic_message)
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+ wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+ def test_ppc64_linux_smt_boot(self):
+ self.set_machine('pseries')
+ self.vm.add_args('-smp', '4,threads=4')
+ self.do_test_ppc64_linux_boot()
+ console_pattern = 'CPU maps initialized for 4 threads per core'
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+ console_pattern = 'smp: Brought up 1 node, 4 CPUs'
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+ wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+ def test_ppc64_linux_big_boot(self):
+ self.set_machine('pseries')
+ self.vm.add_args('-smp', '16,threads=4,cores=2,sockets=2')
+ self.vm.add_args('-m', '512M',
+ '-object', 'memory-backend-ram,size=256M,id=m0',
+ '-object', 'memory-backend-ram,size=256M,id=m1')
+ self.vm.add_args('-numa', 'node,nodeid=0,memdev=m0')
+ self.vm.add_args('-numa', 'node,nodeid=1,memdev=m1')
+ self.do_test_ppc64_linux_boot()
+ console_pattern = 'CPU maps initialized for 4 threads per core'
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+ console_pattern = 'smp: Brought up 2 nodes, 16 CPUs'
+ wait_for_console_pattern(self, console_pattern, self.panic_message)
+ wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_ppc64_replay.py b/tests/functional/test_ppc64_replay.py
new file mode 100755
index 0000000..e8c9c4b
--- /dev/null
+++ b/tests/functional/test_ppc64_replay.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python3
+#
+# Replay test that boots a Linux kernel on ppc64 machines
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset, skipFlakyTest
+from replay_kernel import ReplayKernelBase
+
+
+class Ppc64Replay(ReplayKernelBase):
+
+ ASSET_DAY19 = Asset(
+ ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/'
+ 'day19.tar.xz'),
+ '20b1bb5a8488c664defbb5d283addc91a05335a936c63b3f5ff7eee74b725755')
+
+ @skipFlakyTest('https://gitlab.com/qemu-project/qemu/-/issues/2523')
+ def test_ppc64_e500(self):
+ self.set_machine('ppce500')
+ self.cpu = 'e5500'
+ kernel_path = self.archive_extract(self.ASSET_DAY19,
+ member='day19/uImage')
+ self.run_rr(kernel_path, self.REPLAY_KERNEL_COMMAND_LINE,
+ 'QEMU advent calendar')
+
+ ASSET_KERNEL = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora-secondary/'
+ 'releases/29/Everything/ppc64le/os/ppc/ppc64/vmlinuz'),
+ '383c2f5c23bc0d9d32680c3924d3fd7ee25cc5ef97091ac1aa5e1d853422fc5f')
+
+ def test_ppc64_pseries(self):
+ self.set_machine('pseries')
+ kernel_path = self.ASSET_KERNEL.fetch()
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=hvc0'
+ console_pattern = 'VFS: Cannot open root device'
+ self.run_rr(kernel_path, kernel_command_line, console_pattern)
+
+ def test_ppc64_powernv(self):
+ self.set_machine('powernv')
+ kernel_path = self.ASSET_KERNEL.fetch()
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + \
+ 'console=tty0 console=hvc0'
+ console_pattern = 'VFS: Cannot open root device'
+ self.run_rr(kernel_path, kernel_command_line, console_pattern)
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_ppc64_reverse_debug.py b/tests/functional/test_ppc64_reverse_debug.py
new file mode 100755
index 0000000..5931ade
--- /dev/null
+++ b/tests/functional/test_ppc64_reverse_debug.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Reverse debugging test
+#
+# Copyright (c) 2020 ISP RAS
+#
+# Author:
+# Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import skipIfMissingImports, skipFlakyTest
+from reverse_debugging import ReverseDebugging
+
+
+@skipIfMissingImports('avocado.utils')
+class ReverseDebugging_ppc64(ReverseDebugging):
+
+ REG_PC = 0x40
+
+ @skipFlakyTest("https://gitlab.com/qemu-project/qemu/-/issues/1992")
+ def test_ppc64_pseries(self):
+ self.set_machine('pseries')
+ # SLOF branches back to its entry point, which causes this test
+ # to take the 'hit a breakpoint again' path. That's not a problem,
+ # just slightly different than the other machines.
+ self.endian_is_le = False
+ self.reverse_debugging()
+
+ @skipFlakyTest("https://gitlab.com/qemu-project/qemu/-/issues/1992")
+ def test_ppc64_powernv(self):
+ self.set_machine('powernv')
+ self.endian_is_le = False
+ self.reverse_debugging()
+
+
+if __name__ == '__main__':
+ ReverseDebugging.main()
diff --git a/tests/functional/test_ppc64_tuxrun.py b/tests/functional/test_ppc64_tuxrun.py
new file mode 100755
index 0000000..e8f79c6
--- /dev/null
+++ b/tests/functional/test_ppc64_tuxrun.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from subprocess import check_call, DEVNULL
+import tempfile
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunPPC64Test(TuxRunBaselineTest):
+
+ def ppc64_common_tuxrun(self, kernel_asset, rootfs_asset, prefix):
+ self.set_machine('pseries')
+ self.cpu='POWER10'
+ self.console='hvc0'
+ self.root='sda'
+ self.extradev='spapr-vscsi'
+ # add device args to command line.
+ self.require_netdev('user')
+ self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22',
+ '-device', 'virtio-net,netdev=vnet')
+ self.vm.add_args('-netdev', '{"type":"user","id":"hostnet0"}',
+ '-device', '{"driver":"virtio-net-pci","netdev":'
+ '"hostnet0","id":"net0","mac":"52:54:00:4c:e3:86",'
+ '"bus":"pci.0","addr":"0x9"}')
+ self.vm.add_args('-device', '{"driver":"qemu-xhci","p2":15,"p3":15,'
+ '"id":"usb","bus":"pci.0","addr":"0x2"}')
+ self.vm.add_args('-device', '{"driver":"virtio-scsi-pci","id":"scsi0"'
+ ',"bus":"pci.0","addr":"0x3"}')
+ self.vm.add_args('-device', '{"driver":"virtio-serial-pci","id":'
+ '"virtio-serial0","bus":"pci.0","addr":"0x4"}')
+ self.vm.add_args('-device', '{"driver":"scsi-cd","bus":"scsi0.0"'
+ ',"channel":0,"scsi-id":0,"lun":0,"device_id":'
+ '"drive-scsi0-0-0-0","id":"scsi0-0-0-0"}')
+ self.vm.add_args('-device', '{"driver":"virtio-balloon-pci",'
+ '"id":"balloon0","bus":"pci.0","addr":"0x6"}')
+ self.vm.add_args('-audiodev', '{"id":"audio1","driver":"none"}')
+ self.vm.add_args('-device', '{"driver":"usb-tablet","id":"input0"'
+ ',"bus":"usb.0","port":"1"}')
+ self.vm.add_args('-device', '{"driver":"usb-kbd","id":"input1"'
+ ',"bus":"usb.0","port":"2"}')
+ self.vm.add_args('-device', '{"driver":"VGA","id":"video0",'
+ '"vgamem_mb":16,"bus":"pci.0","addr":"0x7"}')
+ self.vm.add_args('-object', '{"qom-type":"rng-random","id":"objrng0"'
+ ',"filename":"/dev/urandom"}',
+ '-device', '{"driver":"virtio-rng-pci","rng":"objrng0"'
+ ',"id":"rng0","bus":"pci.0","addr":"0x8"}')
+ self.vm.add_args('-object', '{"qom-type":"cryptodev-backend-builtin",'
+ '"id":"objcrypto0","queues":1}',
+ '-device', '{"driver":"virtio-crypto-pci",'
+ '"cryptodev":"objcrypto0","id":"crypto0","bus"'
+ ':"pci.0","addr":"0xa"}')
+ self.vm.add_args('-device', '{"driver":"spapr-pci-host-bridge"'
+ ',"index":1,"id":"pci.1"}')
+ self.vm.add_args('-device', '{"driver":"spapr-vscsi","id":"scsi1"'
+ ',"reg":12288}')
+ self.vm.add_args('-m', '1G,slots=32,maxmem=2G',
+ '-object', 'memory-backend-ram,id=ram1,size=1G',
+ '-device', 'pc-dimm,id=dimm1,memdev=ram1')
+
+ # Create a temporary qcow2 and launch the test-case
+ with tempfile.NamedTemporaryFile(prefix=prefix,
+ suffix='.qcow2') as qcow2:
+ check_call([self.qemu_img, 'create', '-f', 'qcow2',
+ qcow2.name, ' 1G'],
+ stdout=DEVNULL, stderr=DEVNULL)
+
+ self.vm.add_args('-drive', 'file=' + qcow2.name +
+ ',format=qcow2,if=none,id='
+ 'drive-virtio-disk1',
+ '-device', 'virtio-blk-pci,bus=pci.0,'
+ 'addr=0xb,drive=drive-virtio-disk1,id=virtio-disk1'
+ ',bootindex=2')
+ self.common_tuxrun(kernel_asset, rootfs_asset=rootfs_asset,
+ drive="scsi-hd")
+
+ ASSET_PPC64_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/ppc64/vmlinux',
+ '8219d5cb26e7654ad7826fe8aee6290f7c01eef44f2cd6d26c15fe8f99e1c17c')
+ ASSET_PPC64_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/ppc64/rootfs.ext4.zst',
+ 'b68e12314303c5dd0fef37ae98021299a206085ae591893e73557af99a02d373')
+
+ def test_ppc64(self):
+ self.ppc64_common_tuxrun(kernel_asset=self.ASSET_PPC64_KERNEL,
+ rootfs_asset=self.ASSET_PPC64_ROOTFS,
+ prefix='tuxrun_ppc64_')
+
+ ASSET_PPC64LE_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/ppc64le/vmlinux',
+ '21aea1fbc18bf6fa7d8ca4ea48d4940b2c8363c077acd564eb47d769b7495279')
+ ASSET_PPC64LE_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/ppc64le/rootfs.ext4.zst',
+ '67d36a3f9597b738e8b7359bdf04500f4d9bb82fc35eaa65aa439d888b2392f4')
+
+ def test_ppc64le(self):
+ self.ppc64_common_tuxrun(kernel_asset=self.ASSET_PPC64LE_KERNEL,
+ rootfs_asset=self.ASSET_PPC64LE_ROOTFS,
+ prefix='tuxrun_ppc64le_')
+
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_ppc_40p.py b/tests/functional/test_ppc_40p.py
new file mode 100755
index 0000000..614972a
--- /dev/null
+++ b/tests/functional/test_ppc_40p.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a PReP/40p machine and checks its serial console.
+#
+# Copyright (c) Philippe Mathieu-DaudƩ <f4bug@amsat.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern, skipUntrustedTest
+from qemu_test import exec_command_and_wait_for_pattern
+
+
+class IbmPrep40pMachine(QemuSystemTest):
+
+ timeout = 60
+
+ ASSET_BIOS = Asset(
+ ('http://ftpmirror.your.org/pub/misc/'
+ 'ftp.software.ibm.com/rs6000/firmware/'
+ '7020-40p/P12H0456.IMG'),
+ 'd957f79c73f760d1455d2286fcd901ed6d06167320eb73511b478a939be25b3f')
+ ASSET_NETBSD40 = Asset(
+ ('https://archive.netbsd.org/pub/NetBSD-archive/'
+ 'NetBSD-4.0/prep/installation/floppy/generic_com0.fs'),
+ 'f86236e9d01b3f0dd0f5d3b8d5bbd40c68e78b4db560a108358f5ad58e636619')
+ ASSET_NETBSD71 = Asset(
+ ('https://archive.netbsd.org/pub/NetBSD-archive/'
+ 'NetBSD-7.1.2/iso/NetBSD-7.1.2-prep.iso'),
+ 'cc7cb290b06aaa839362deb7bd9f417ac5015557db24088508330f76c3f825ec')
+
+ # 12H0455 PPS Firmware Licensed Materials
+ # Property of IBM (C) Copyright IBM Corp. 1994.
+ # All rights reserved.
+ # U.S. Government Users Restricted Rights - Use, duplication or disclosure
+ # restricted by GSA ADP Schedule Contract with IBM Corp.
+ @skipUntrustedTest()
+ def test_factory_firmware_and_netbsd(self):
+ self.set_machine('40p')
+ self.require_accelerator("tcg")
+ bios_path = self.ASSET_BIOS.fetch()
+ drive_path = self.ASSET_NETBSD40.fetch()
+
+ self.vm.set_console()
+ self.vm.add_args('-bios', bios_path,
+ '-drive',
+ f"file={drive_path},format=raw,if=floppy,read-only=true")
+ self.vm.launch()
+ os_banner = 'NetBSD 4.0 (GENERIC) #0: Sun Dec 16 00:49:40 PST 2007'
+ wait_for_console_pattern(self, os_banner)
+ wait_for_console_pattern(self, 'Model: IBM PPS Model 6015')
+
+ def test_openbios_192m(self):
+ self.set_machine('40p')
+ self.require_accelerator("tcg")
+ self.vm.set_console()
+ self.vm.add_args('-m', '192') # test fw_cfg
+
+ self.vm.launch()
+ wait_for_console_pattern(self, '>> OpenBIOS')
+ wait_for_console_pattern(self, '>> Memory: 192M')
+ wait_for_console_pattern(self, '>> CPU type PowerPC,604')
+
+ def test_openbios_and_netbsd(self):
+ self.set_machine('40p')
+ self.require_accelerator("tcg")
+ drive_path = self.ASSET_NETBSD71.fetch()
+ self.vm.set_console()
+ self.vm.add_args('-cdrom', drive_path,
+ '-boot', 'd')
+
+ self.vm.launch()
+ wait_for_console_pattern(self, 'NetBSD/prep BOOT, Revision 1.9')
+
+ ASSET_40P_SANDALFOOT = Asset(
+ 'http://www.juneau-lug.org/zImage.initrd.sandalfoot',
+ '749ab02f576c6dc8f33b9fb022ecb44bf6a35a0472f2ea6a5e9956bc15933901')
+
+ def test_openbios_and_linux(self):
+ self.set_machine('40p')
+ self.require_accelerator("tcg")
+ drive_path = self.ASSET_40P_SANDALFOOT.fetch()
+ self.vm.set_console()
+ self.vm.add_args('-cdrom', drive_path,
+ '-boot', 'd')
+
+ self.vm.launch()
+ wait_for_console_pattern(self, 'Please press Enter to activate this console.')
+ exec_command_and_wait_for_pattern(self, '\012', '#')
+ exec_command_and_wait_for_pattern(self, 'uname -a', 'Linux ppc 2.4.18')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_ppc_74xx.py b/tests/functional/test_ppc_74xx.py
new file mode 100755
index 0000000..5386016
--- /dev/null
+++ b/tests/functional/test_ppc_74xx.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+#
+# Smoke tests for 74xx cpus (aka G4).
+#
+# Copyright (c) 2021, IBM Corp.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest
+from qemu_test import wait_for_console_pattern
+
+class ppc74xxCpu(QemuSystemTest):
+
+ timeout = 5
+
+ def test_ppc_7400(self):
+ self.require_accelerator("tcg")
+ self.set_machine('g3beige')
+ self.vm.set_console()
+ self.vm.add_args('-cpu', '7400')
+ self.vm.launch()
+ wait_for_console_pattern(self, '>> OpenBIOS')
+ wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+ def test_ppc_7410(self):
+ self.require_accelerator("tcg")
+ self.set_machine('g3beige')
+ self.vm.set_console()
+ self.vm.add_args('-cpu', '7410')
+ self.vm.launch()
+ wait_for_console_pattern(self, '>> OpenBIOS')
+ wait_for_console_pattern(self, '>> CPU type PowerPC,74xx')
+
+ def test_ppc_7441(self):
+ self.require_accelerator("tcg")
+ self.set_machine('g3beige')
+ self.vm.set_console()
+ self.vm.add_args('-cpu', '7441')
+ self.vm.launch()
+ wait_for_console_pattern(self, '>> OpenBIOS')
+ wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+ def test_ppc_7445(self):
+ self.require_accelerator("tcg")
+ self.set_machine('g3beige')
+ self.vm.set_console()
+ self.vm.add_args('-cpu', '7445')
+ self.vm.launch()
+ wait_for_console_pattern(self, '>> OpenBIOS')
+ wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+ def test_ppc_7447(self):
+ self.require_accelerator("tcg")
+ self.set_machine('g3beige')
+ self.vm.set_console()
+ self.vm.add_args('-cpu', '7447')
+ self.vm.launch()
+ wait_for_console_pattern(self, '>> OpenBIOS')
+ wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+ def test_ppc_7447a(self):
+ self.require_accelerator("tcg")
+ self.set_machine('g3beige')
+ self.vm.set_console()
+ self.vm.add_args('-cpu', '7447a')
+ self.vm.launch()
+ wait_for_console_pattern(self, '>> OpenBIOS')
+ wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+ def test_ppc_7448(self):
+ self.require_accelerator("tcg")
+ self.set_machine('g3beige')
+ self.vm.set_console()
+ self.vm.add_args('-cpu', '7448')
+ self.vm.launch()
+ wait_for_console_pattern(self, '>> OpenBIOS')
+ wait_for_console_pattern(self, '>> CPU type PowerPC,MPC86xx')
+
+ def test_ppc_7450(self):
+ self.require_accelerator("tcg")
+ self.set_machine('g3beige')
+ self.vm.set_console()
+ self.vm.add_args('-cpu', '7450')
+ self.vm.launch()
+ wait_for_console_pattern(self, '>> OpenBIOS')
+ wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+ def test_ppc_7451(self):
+ self.require_accelerator("tcg")
+ self.set_machine('g3beige')
+ self.vm.set_console()
+ self.vm.add_args('-cpu', '7451')
+ self.vm.launch()
+ wait_for_console_pattern(self, '>> OpenBIOS')
+ wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+ def test_ppc_7455(self):
+ self.require_accelerator("tcg")
+ self.set_machine('g3beige')
+ self.vm.set_console()
+ self.vm.add_args('-cpu', '7455')
+ self.vm.launch()
+ wait_for_console_pattern(self, '>> OpenBIOS')
+ wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+ def test_ppc_7457(self):
+ self.require_accelerator("tcg")
+ self.set_machine('g3beige')
+ self.vm.set_console()
+ self.vm.add_args('-cpu', '7457')
+ self.vm.launch()
+ wait_for_console_pattern(self, '>> OpenBIOS')
+ wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+ def test_ppc_7457a(self):
+ self.require_accelerator("tcg")
+ self.set_machine('g3beige')
+ self.vm.set_console()
+ self.vm.add_args('-cpu', '7457a')
+ self.vm.launch()
+ wait_for_console_pattern(self, '>> OpenBIOS')
+ wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_ppc_amiga.py b/tests/functional/test_ppc_amiga.py
new file mode 100755
index 0000000..8600e2e
--- /dev/null
+++ b/tests/functional/test_ppc_amiga.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+#
+# Test AmigaNG boards
+#
+# Copyright (c) 2023 BALATON Zoltan
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import subprocess
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+
+class AmigaOneMachine(QemuSystemTest):
+
+ timeout = 90
+
+ ASSET_IMAGE = Asset(
+ ('https://www.hyperion-entertainment.com/index.php/'
+ 'downloads?view=download&format=raw&file=25'),
+ '8ff39330ba47d4f64de4ee8fd6809e9c010a9ef17fe51e95c3c1d53437cb481f')
+
+ def test_ppc_amigaone(self):
+ self.require_accelerator("tcg")
+ self.set_machine('amigaone')
+ tar_name = 'A1Firmware_Floppy_05-Mar-2005.zip'
+ self.archive_extract(self.ASSET_IMAGE, format="zip")
+ bios = self.scratch_file("u-boot-amigaone.bin")
+ with open(bios, "wb") as bios_fh:
+ subprocess.run(['tail', '-c', '524288',
+ self.scratch_file("floppy_edition",
+ "updater.image")],
+ stdout=bios_fh)
+
+ self.vm.set_console()
+ self.vm.add_args('-bios', bios)
+ self.vm.launch()
+ wait_for_console_pattern(self, 'FLASH:')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_ppc_bamboo.py b/tests/functional/test_ppc_bamboo.py
new file mode 100755
index 0000000..fddcc24
--- /dev/null
+++ b/tests/functional/test_ppc_bamboo.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python3
+#
+# Test that Linux kernel boots on the ppc bamboo board and check the console
+#
+# Copyright (c) 2021 Red Hat
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test import exec_command_and_wait_for_pattern
+
+
+class BambooMachine(QemuSystemTest):
+
+ timeout = 90
+
+ ASSET_IMAGE = Asset(
+ ('http://landley.net/aboriginal/downloads/binaries/'
+ 'system-image-powerpc-440fp.tar.gz'),
+ 'c12b58f841c775a0e6df4832a55afe6b74814d1565d08ddeafc1fb949a075c5e')
+
+ def test_ppc_bamboo(self):
+ self.set_machine('bamboo')
+ self.require_accelerator("tcg")
+ self.require_netdev('user')
+ self.archive_extract(self.ASSET_IMAGE)
+ self.vm.set_console()
+ self.vm.add_args('-kernel',
+ self.scratch_file('system-image-powerpc-440fp',
+ 'linux'),
+ '-initrd',
+ self.scratch_file('system-image-powerpc-440fp',
+ 'rootfs.cpio.gz'),
+ '-nic', 'user,model=rtl8139,restrict=on')
+ self.vm.launch()
+ wait_for_console_pattern(self, 'Type exit when done')
+ exec_command_and_wait_for_pattern(self, 'ping 10.0.2.2',
+ '10.0.2.2 is alive!')
+ exec_command_and_wait_for_pattern(self, 'halt', 'System Halted')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_ppc_mac.py b/tests/functional/test_ppc_mac.py
new file mode 100755
index 0000000..9e4bc1a
--- /dev/null
+++ b/tests/functional/test_ppc_mac.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+#
+# Boot Linux kernel on a mac99 and g3beige ppc machine and check the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+
+
+class MacTest(LinuxKernelTest):
+
+ ASSET_DAY15 = Asset(
+ 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day15.tar.xz',
+ '03e0757c131d2959decf293a3572d3b96c5a53587165bf05ce41b2818a2bccd5')
+
+ def do_day15_test(self):
+ # mac99 also works with kvm_pr but we don't have a reliable way at
+ # the moment (e.g. by looking at /proc/modules) to detect whether
+ # we're running kvm_hv or kvm_pr. For now let's disable this test
+ # if we don't have TCG support.
+ self.require_accelerator("tcg")
+ self.archive_extract(self.ASSET_DAY15)
+ self.vm.add_args('-M', 'graphics=off')
+ self.launch_kernel(self.scratch_file('day15', 'invaders.elf'),
+ wait_for='QEMU advent calendar')
+
+ def test_ppc_g3beige(self):
+ self.set_machine('g3beige')
+ self.do_day15_test()
+
+ def test_ppc_mac99(self):
+ self.set_machine('mac99')
+ self.do_day15_test()
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_ppc_mpc8544ds.py b/tests/functional/test_ppc_mpc8544ds.py
new file mode 100755
index 0000000..0715410
--- /dev/null
+++ b/tests/functional/test_ppc_mpc8544ds.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+#
+# Test that Linux kernel boots on ppc machines and check the console
+#
+# Copyright (c) 2018, 2020 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+
+class Mpc8544dsMachine(QemuSystemTest):
+
+ timeout = 90
+ KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
+ panic_message = 'Kernel panic - not syncing'
+
+ ASSET_IMAGE = Asset(
+ ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/'
+ 'day04.tar.xz'),
+ '88bc83f3c9f3d633bcfc108a6342d677abca247066a2fb8d4636744a0d319f94')
+
+ def test_ppc_mpc8544ds(self):
+ self.require_accelerator("tcg")
+ self.set_machine('mpc8544ds')
+ kernel_file = self.archive_extract(self.ASSET_IMAGE,
+ member='creek/creek.bin')
+ self.vm.set_console()
+ self.vm.add_args('-kernel', kernel_file)
+ self.vm.launch()
+ wait_for_console_pattern(self, 'QEMU advent calendar 2020',
+ self.panic_message)
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_ppc_replay.py b/tests/functional/test_ppc_replay.py
new file mode 100755
index 0000000..8382070
--- /dev/null
+++ b/tests/functional/test_ppc_replay.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python3
+#
+# Replay tests for ppc machines
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from replay_kernel import ReplayKernelBase
+
+
+class PpcReplay(ReplayKernelBase):
+
+ ASSET_DAY15 = Asset(
+ 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day15.tar.xz',
+ '03e0757c131d2959decf293a3572d3b96c5a53587165bf05ce41b2818a2bccd5')
+
+ def do_day15_test(self):
+ self.require_accelerator("tcg")
+ kernel_path = self.archive_extract(self.ASSET_DAY15,
+ member='day15/invaders.elf')
+ self.run_rr(kernel_path, self.REPLAY_KERNEL_COMMAND_LINE,
+ 'QEMU advent calendar', args=('-M', 'graphics=off'))
+
+ def test_g3beige(self):
+ self.set_machine('g3beige')
+ self.do_day15_test()
+
+ def test_mac99(self):
+ self.set_machine('mac99')
+ self.do_day15_test()
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_ppc_sam460ex.py b/tests/functional/test_ppc_sam460ex.py
new file mode 100644
index 0000000..31cf9dd
--- /dev/null
+++ b/tests/functional/test_ppc_sam460ex.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a sam460ex machine with a PPC 460EX CPU
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+from qemu_test import exec_command_and_wait_for_pattern
+
+
+class sam460exTest(LinuxKernelTest):
+
+ ASSET_BR2_SAM460EX_LINUX = Asset(
+ 'https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main/buildroot/qemu_ppc_sam460ex-2023.11-8-gdcd9f0f6eb-20240105/vmlinux',
+ '6f46346f3e20e8b5fc050ff363f350f8b9d76a051b9e0bd7ea470cc680c14df2')
+
+ def test_ppc_sam460ex_buildroot(self):
+ self.set_machine('sam460ex')
+ self.require_netdev('user')
+
+ linux_path = self.ASSET_BR2_SAM460EX_LINUX.fetch()
+
+ self.vm.set_console()
+ self.vm.add_args('-kernel', linux_path,
+ '-device', 'virtio-net-pci,netdev=net0',
+ '-netdev', 'user,id=net0')
+ self.vm.launch()
+
+ self.wait_for_console_pattern('Linux version')
+ self.wait_for_console_pattern('Hardware name: amcc,canyonlands 460EX')
+ self.wait_for_console_pattern('/init as init process')
+ self.wait_for_console_pattern('lease of 10.0.2.15 obtained')
+ self.wait_for_console_pattern('buildroot login:')
+ exec_command_and_wait_for_pattern(self, 'root', '#')
+ exec_command_and_wait_for_pattern(self, 'poweroff', 'System Halted')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_ppc_tuxrun.py b/tests/functional/test_ppc_tuxrun.py
new file mode 100755
index 0000000..5458a7f
--- /dev/null
+++ b/tests/functional/test_ppc_tuxrun.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunPPC32Test(TuxRunBaselineTest):
+
+ ASSET_PPC32_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/ppc32/uImage',
+ 'aa5d81deabdb255a318c4bc5ffd6fdd2b5da1ef39f1955dcc35b671d258b68e9')
+ ASSET_PPC32_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/ppc32/rootfs.ext4.zst',
+ '67554f830269d6bf53b67c7dd206bcc821e463993d526b1644066fea8117019b')
+
+ def test_ppc32(self):
+ self.set_machine('ppce500')
+ self.cpu='e500mc'
+ self.wait_for_shutdown=False
+ self.common_tuxrun(kernel_asset=self.ASSET_PPC32_KERNEL,
+ rootfs_asset=self.ASSET_PPC32_ROOTFS,
+ drive="virtio-blk-pci")
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_ppc_virtex_ml507.py b/tests/functional/test_ppc_virtex_ml507.py
new file mode 100755
index 0000000..8fe4354
--- /dev/null
+++ b/tests/functional/test_ppc_virtex_ml507.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python3
+#
+# Test that Linux kernel boots on ppc machines and check the console
+#
+# Copyright (c) 2018, 2020 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+
+class VirtexMl507Machine(QemuSystemTest):
+
+ timeout = 90
+ KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
+ panic_message = 'Kernel panic - not syncing'
+
+ ASSET_IMAGE = Asset(
+ ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/'
+ 'day08.tar.xz'),
+ 'cefe5b8aeb5e9d2d1d4fd22dcf48d917d68d5a765132bf2ddd6332dc393b824c')
+
+ def test_ppc_virtex_ml507(self):
+ self.require_accelerator("tcg")
+ self.set_machine('virtex-ml507')
+ self.archive_extract(self.ASSET_IMAGE)
+ self.vm.set_console()
+ self.vm.add_args('-kernel', self.scratch_file('hippo', 'hippo.linux'),
+ '-dtb', self.scratch_file('hippo',
+ 'virtex440-ml507.dtb'),
+ '-m', '512')
+ self.vm.launch()
+ wait_for_console_pattern(self, 'QEMU advent calendar 2020',
+ self.panic_message)
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_riscv32_tuxrun.py b/tests/functional/test_riscv32_tuxrun.py
new file mode 100755
index 0000000..3c57020
--- /dev/null
+++ b/tests/functional/test_riscv32_tuxrun.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunRiscV32Test(TuxRunBaselineTest):
+
+ ASSET_RISCV32_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/riscv32/Image',
+ '872bc8f8e0d4661825d5f47f7bec64988e9d0a8bd5db8917d57e16f66d83b329')
+ ASSET_RISCV32_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/riscv32/rootfs.ext4.zst',
+ '511ad34e63222db08d6c1da16fad224970de36517a784110956ba6a24a0ee5f6')
+
+ def test_riscv32(self):
+ self.set_machine('virt')
+ self.common_tuxrun(kernel_asset=self.ASSET_RISCV32_KERNEL,
+ rootfs_asset=self.ASSET_RISCV32_ROOTFS)
+
+ def test_riscv32_maxcpu(self):
+ self.set_machine('virt')
+ self.cpu='max'
+ self.common_tuxrun(kernel_asset=self.ASSET_RISCV32_KERNEL,
+ rootfs_asset=self.ASSET_RISCV32_ROOTFS)
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_riscv64_tuxrun.py b/tests/functional/test_riscv64_tuxrun.py
new file mode 100755
index 0000000..0d8de36
--- /dev/null
+++ b/tests/functional/test_riscv64_tuxrun.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunRiscV64Test(TuxRunBaselineTest):
+
+ ASSET_RISCV64_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/riscv64/Image',
+ '2bd8132a3bf21570290042324fff48c987f42f2a00c08de979f43f0662ebadba')
+ ASSET_RISCV64_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/riscv64/rootfs.ext4.zst',
+ 'aa4736a9872651dfc0d95e709465eedf1134fd19d42b8cb305bfd776f9801004')
+
+ ASSET_RISCV32_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/riscv32/Image',
+ '872bc8f8e0d4661825d5f47f7bec64988e9d0a8bd5db8917d57e16f66d83b329')
+ ASSET_RISCV32_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/riscv32/rootfs.ext4.zst',
+ '511ad34e63222db08d6c1da16fad224970de36517a784110956ba6a24a0ee5f6')
+
+ def test_riscv64(self):
+ self.set_machine('virt')
+ self.common_tuxrun(kernel_asset=self.ASSET_RISCV64_KERNEL,
+ rootfs_asset=self.ASSET_RISCV64_ROOTFS)
+
+ def test_riscv64_maxcpu(self):
+ self.set_machine('virt')
+ self.cpu='max'
+ self.common_tuxrun(kernel_asset=self.ASSET_RISCV64_KERNEL,
+ rootfs_asset=self.ASSET_RISCV64_ROOTFS)
+
+ def test_riscv64_rv32(self):
+ self.set_machine('virt')
+ self.cpu='rv32'
+ self.common_tuxrun(kernel_asset=self.ASSET_RISCV32_KERNEL,
+ rootfs_asset=self.ASSET_RISCV32_ROOTFS)
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_riscv_opensbi.py b/tests/functional/test_riscv_opensbi.py
new file mode 100755
index 0000000..d077e40
--- /dev/null
+++ b/tests/functional/test_riscv_opensbi.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+#
+# OpenSBI boot test for RISC-V machines
+#
+# Copyright (c) 2022, Ventana Micro
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest
+from qemu_test import wait_for_console_pattern
+
+class RiscvOpenSBI(QemuSystemTest):
+
+ timeout = 5
+
+ def boot_opensbi(self):
+ self.vm.set_console()
+ self.vm.launch()
+ wait_for_console_pattern(self, 'Platform Name')
+ wait_for_console_pattern(self, 'Boot HART MEDELEG')
+
+ def test_riscv_spike(self):
+ self.set_machine('spike')
+ self.boot_opensbi()
+
+ def test_riscv_sifive_u(self):
+ self.set_machine('sifive_u')
+ self.boot_opensbi()
+
+ def test_riscv_virt(self):
+ self.set_machine('virt')
+ self.boot_opensbi()
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_rx_gdbsim.py b/tests/functional/test_rx_gdbsim.py
new file mode 100755
index 0000000..4924579
--- /dev/null
+++ b/tests/functional/test_rx_gdbsim.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# Copyright (c) 2018 Red Hat, Inc.
+#
+# Author:
+# Cleber Rosa <crosa@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern, skipFlakyTest
+
+
+class RxGdbSimMachine(QemuSystemTest):
+
+ timeout = 30
+ KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
+
+ ASSET_UBOOT = Asset(
+ ('https://github.com/philmd/qemu-testing-blob/raw/rx-gdbsim/rx/gdbsim/'
+ 'u-boot.bin'),
+ 'dd7dd4220cccf7aeb32227b26233bf39600db05c3f8e26005bcc2bf6c927207d')
+ ASSET_DTB = Asset(
+ ('https://github.com/philmd/qemu-testing-blob/raw/rx-gdbsim/rx/gdbsim/'
+ 'rx-gdbsim.dtb'),
+ 'aa278d9c1907a4501741d7ee57e7f65c02dd1b3e0323b33c6d4247f1b32cf29a')
+ ASSET_KERNEL = Asset(
+ ('https://github.com/philmd/qemu-testing-blob/raw/rx-gdbsim/rx/gdbsim/'
+ 'zImage'),
+ 'baa43205e74a7220ed8482188c5e9ce497226712abb7f4e7e4f825ce19ff9656')
+
+ def test_uboot(self):
+ """
+ U-Boot and checks that the console is operational.
+ """
+ self.set_machine('gdbsim-r5f562n8')
+
+ uboot_path = self.ASSET_UBOOT.fetch()
+
+ self.vm.set_console()
+ self.vm.add_args('-bios', uboot_path,
+ '-no-reboot')
+ self.vm.launch()
+ uboot_version = 'U-Boot 2016.05-rc3-23705-ga1ef3c71cb-dirty'
+ wait_for_console_pattern(self, uboot_version)
+ gcc_version = 'rx-unknown-linux-gcc (GCC) 9.0.0 20181105 (experimental)'
+ # FIXME limit baudrate on chardev, else we type too fast
+ # https://gitlab.com/qemu-project/qemu/-/issues/2691
+ #exec_command_and_wait_for_pattern(self, 'version', gcc_version)
+
+ @skipFlakyTest(bug_url="https://gitlab.com/qemu-project/qemu/-/issues/2691")
+ def test_linux_sash(self):
+ """
+ Boots a Linux kernel and checks that the console is operational.
+ """
+ self.set_machine('gdbsim-r5f562n7')
+
+ dtb_path = self.ASSET_DTB.fetch()
+ kernel_path = self.ASSET_KERNEL.fetch()
+
+ self.vm.set_console()
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'earlycon'
+ self.vm.add_args('-kernel', kernel_path,
+ '-dtb', dtb_path,
+ '-no-reboot')
+ self.vm.launch()
+ wait_for_console_pattern(self, 'Sash command shell (version 1.1.1)',
+ failure_message='Kernel panic - not syncing')
+ exec_command_and_wait_for_pattern(self, 'printenv', 'TERM=linux')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_s390x_ccw_virtio.py b/tests/functional/test_s390x_ccw_virtio.py
new file mode 100755
index 0000000..453711a
--- /dev/null
+++ b/tests/functional/test_s390x_ccw_virtio.py
@@ -0,0 +1,274 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots an s390x Linux guest with ccw and PCI devices
+# attached and checks whether the devices are recognized by Linux
+#
+# Copyright (c) 2020 Red Hat, Inc.
+#
+# Author:
+# Cornelia Huck <cohuck@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import os
+import tempfile
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern
+
+
+class S390CCWVirtioMachine(QemuSystemTest):
+ KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
+
+ timeout = 120
+
+ ASSET_BUSTER_KERNEL = Asset(
+ ('https://snapshot.debian.org/archive/debian/'
+ '20201126T092837Z/dists/buster/main/installer-s390x/'
+ '20190702+deb10u6/images/generic/kernel.debian'),
+ 'd411d17c39ae7ad38d27534376cbe88b68b403c325739364122c2e6f1537e818')
+ ASSET_BUSTER_INITRD = Asset(
+ ('https://snapshot.debian.org/archive/debian/'
+ '20201126T092837Z/dists/buster/main/installer-s390x/'
+ '20190702+deb10u6/images/generic/initrd.debian'),
+ '836bbd0fe6a5ca81274c28c2b063ea315ce1868660866e9b60180c575fef9fd5')
+
+ ASSET_F31_KERNEL = Asset(
+ ('https://archives.fedoraproject.org/pub/archive'
+ '/fedora-secondary/releases/31/Server/s390x/os'
+ '/images/kernel.img'),
+ '480859574f3f44caa6cd35c62d70e1ac0609134e22ce2a954bbed9b110c06e0b')
+ ASSET_F31_INITRD = Asset(
+ ('https://archives.fedoraproject.org/pub/archive'
+ '/fedora-secondary/releases/31/Server/s390x/os'
+ '/images/initrd.img'),
+ '04c46095b2c49020b1c2327158898b7db747e4892ae319726192fb949716aa9c')
+
+ def wait_for_console_pattern(self, success_message, vm=None):
+ wait_for_console_pattern(self, success_message,
+ failure_message='Kernel panic - not syncing',
+ vm=vm)
+
+ def wait_for_crw_reports(self):
+ exec_command_and_wait_for_pattern(self,
+ 'while ! (dmesg -c | grep CRW) ; do sleep 1 ; done',
+ 'CRW reports')
+
+ dmesg_clear_count = 1
+ def clear_guest_dmesg(self):
+ exec_command_and_wait_for_pattern(self, 'dmesg -c > /dev/null; '
+ r'echo dm_clear\ ' + str(self.dmesg_clear_count),
+ r'dm_clear ' + str(self.dmesg_clear_count))
+ self.dmesg_clear_count += 1
+
+ def test_s390x_devices(self):
+ self.set_machine('s390-ccw-virtio')
+
+ kernel_path = self.ASSET_BUSTER_KERNEL.fetch()
+ initrd_path = self.ASSET_BUSTER_INITRD.fetch()
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ 'console=sclp0 root=/dev/ram0 BOOT_DEBUG=3')
+ self.vm.add_args('-nographic',
+ '-kernel', kernel_path,
+ '-initrd', initrd_path,
+ '-append', kernel_command_line,
+ '-cpu', 'max,prno-trng=off',
+ '-device', 'virtio-net-ccw,devno=fe.1.1111',
+ '-device',
+ 'virtio-rng-ccw,devno=fe.2.0000,max_revision=0,id=rn1',
+ '-device',
+ 'virtio-rng-ccw,devno=fe.3.1234,max_revision=2,id=rn2',
+ '-device', 'zpci,uid=5,target=zzz',
+ '-device', 'virtio-net-pci,id=zzz',
+ '-device', 'zpci,uid=0xa,fid=12,target=serial',
+ '-device', 'virtio-serial-pci,id=serial',
+ '-device', 'virtio-balloon-ccw')
+ self.vm.launch()
+
+ shell_ready = "sh: can't access tty; job control turned off"
+ self.wait_for_console_pattern(shell_ready)
+ # first debug shell is too early, we need to wait for device detection
+ exec_command_and_wait_for_pattern(self, 'exit', shell_ready)
+
+ ccw_bus_ids="0.1.1111 0.2.0000 0.3.1234"
+ pci_bus_ids="0005:00:00.0 000a:00:00.0"
+ exec_command_and_wait_for_pattern(self, 'ls /sys/bus/ccw/devices/',
+ ccw_bus_ids)
+ exec_command_and_wait_for_pattern(self, 'ls /sys/bus/pci/devices/',
+ pci_bus_ids)
+ # check that the device at 0.2.0000 is in legacy mode, while the
+ # device at 0.3.1234 has the virtio-1 feature bit set
+ virtio_rng_features="00000000000000000000000000001100" + \
+ "10000000000000000000000000000000"
+ virtio_rng_features_legacy="00000000000000000000000000001100" + \
+ "00000000000000000000000000000000"
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/bus/ccw/devices/0.2.0000/virtio?/features',
+ virtio_rng_features_legacy)
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/bus/ccw/devices/0.3.1234/virtio?/features',
+ virtio_rng_features)
+ # check that /dev/hwrng works - and that it's gone after ejecting
+ exec_command_and_wait_for_pattern(self,
+ 'dd if=/dev/hwrng of=/dev/null bs=1k count=10',
+ '10+0 records out')
+ self.clear_guest_dmesg()
+ self.vm.cmd('device_del', id='rn1')
+ self.wait_for_crw_reports()
+ self.clear_guest_dmesg()
+ self.vm.cmd('device_del', id='rn2')
+ self.wait_for_crw_reports()
+ exec_command_and_wait_for_pattern(self,
+ 'dd if=/dev/hwrng of=/dev/null bs=1k count=10',
+ 'dd: /dev/hwrng: No such device')
+ # verify that we indeed have virtio-net devices (without having the
+ # virtio-net driver handy)
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/bus/ccw/devices/0.1.1111/cutype',
+ '3832/01')
+ exec_command_and_wait_for_pattern(self,
+ r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_vendor',
+ r'0x1af4')
+ exec_command_and_wait_for_pattern(self,
+ r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_device',
+ r'0x0001')
+ # check fid propagation
+ exec_command_and_wait_for_pattern(self,
+ r'cat /sys/bus/pci/devices/000a\:00\:00.0/function_id',
+ r'0x0000000c')
+ # add another device
+ self.clear_guest_dmesg()
+ self.vm.cmd('device_add', driver='virtio-net-ccw',
+ devno='fe.0.4711', id='net_4711')
+ self.wait_for_crw_reports()
+ exec_command_and_wait_for_pattern(self, 'for i in 1 2 3 4 5 6 7 ; do '
+ 'if [ -e /sys/bus/ccw/devices/*4711 ]; then break; fi ;'
+ 'sleep 1 ; done ; ls /sys/bus/ccw/devices/',
+ '0.0.4711')
+ # and detach it again
+ self.clear_guest_dmesg()
+ self.vm.cmd('device_del', id='net_4711')
+ self.vm.event_wait(name='DEVICE_DELETED',
+ match={'data': {'device': 'net_4711'}})
+ self.wait_for_crw_reports()
+ exec_command_and_wait_for_pattern(self,
+ 'ls /sys/bus/ccw/devices/0.0.4711',
+ 'No such file or directory')
+ # test the virtio-balloon device
+ exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo',
+ 'MemTotal: 115640 kB')
+ self.vm.cmd('human-monitor-command', command_line='balloon 96')
+ exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo',
+ 'MemTotal: 82872 kB')
+ self.vm.cmd('human-monitor-command', command_line='balloon 128')
+ exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo',
+ 'MemTotal: 115640 kB')
+
+
+ def test_s390x_fedora(self):
+ self.set_machine('s390-ccw-virtio')
+
+ kernel_path = self.ASSET_F31_KERNEL.fetch()
+
+ initrd_path = self.uncompress(self.ASSET_F31_INITRD, format="xz")
+
+ self.vm.set_console()
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + ' audit=0 '
+ 'rd.plymouth=0 plymouth.enable=0 rd.rescue')
+ self.vm.add_args('-nographic',
+ '-smp', '4',
+ '-m', '512',
+ '-name', 'Some Guest Name',
+ '-uuid', '30de4fd9-b4d5-409e-86a5-09b387f70bfa',
+ '-kernel', kernel_path,
+ '-initrd', initrd_path,
+ '-append', kernel_command_line,
+ '-device', 'zpci,uid=7,target=n',
+ '-device', 'virtio-net-pci,id=n,mac=02:ca:fe:fa:ce:12',
+ '-device', 'virtio-rng-ccw,devno=fe.1.9876',
+ '-device', 'virtio-gpu-ccw,devno=fe.2.5432')
+ self.vm.launch()
+ self.wait_for_console_pattern('Kernel command line: %s'
+ % kernel_command_line)
+ self.wait_for_console_pattern('Entering emergency mode')
+
+ # Some tests to see whether the CLI options have been considered:
+ self.log.info("Test whether QEMU CLI options have been considered")
+ exec_command_and_wait_for_pattern(self,
+ 'while ! (dmesg | grep enP7p0s0) ; do sleep 1 ; done',
+ 'virtio_net virtio0 enP7p0s0: renamed')
+ exec_command_and_wait_for_pattern(self, 'lspci',
+ '0007:00:00.0 Class 0200: Device 1af4:1000')
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/class/net/enP7p0s0/address',
+ '02:ca:fe:fa:ce:12')
+ exec_command_and_wait_for_pattern(self, 'lscss', '0.1.9876')
+ exec_command_and_wait_for_pattern(self, 'lscss', '0.2.5432')
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+ 'processors : 4')
+ exec_command_and_wait_for_pattern(self, 'grep MemTotal /proc/meminfo',
+ 'MemTotal: 499848 kB')
+ exec_command_and_wait_for_pattern(self, 'grep Name /proc/sysinfo',
+ 'Extended Name: Some Guest Name')
+ exec_command_and_wait_for_pattern(self, 'grep UUID /proc/sysinfo',
+ '30de4fd9-b4d5-409e-86a5-09b387f70bfa')
+
+ # Disable blinking cursor, then write some stuff into the framebuffer.
+ # QEMU's PPM screendumps contain uncompressed 24-bit values, while the
+ # framebuffer uses 32-bit, so we pad our text with some spaces when
+ # writing to the framebuffer. Since the PPM is uncompressed, we then
+ # can simply read the written "magic bytes" back from the PPM file to
+ # check whether the framebuffer is working as expected.
+ # Unfortunately, this test is flaky, so we don't run it by default
+ if os.getenv('QEMU_TEST_FLAKY_TESTS'):
+ self.log.info("Test screendump of virtio-gpu device")
+ exec_command_and_wait_for_pattern(self,
+ 'while ! (dmesg | grep gpudrmfb) ; do sleep 1 ; done',
+ 'virtio_gpudrmfb frame buffer device')
+ exec_command_and_wait_for_pattern(self,
+ r'echo -e "\e[?25l" > /dev/tty0', ':/#')
+ exec_command_and_wait_for_pattern(self, 'for ((i=0;i<250;i++)); do '
+ 'echo " The qu ick fo x j ump s o ver a laz y d og" >> fox.txt;'
+ 'done',
+ ':/#')
+ exec_command_and_wait_for_pattern(self,
+ 'dd if=fox.txt of=/dev/fb0 bs=1000 oflag=sync,nocache ; rm fox.txt',
+ '12+0 records out')
+ with tempfile.NamedTemporaryFile(suffix='.ppm',
+ prefix='qemu-scrdump-') as ppmfile:
+ self.vm.cmd('screendump', filename=ppmfile.name)
+ ppmfile.seek(0)
+ line = ppmfile.readline()
+ self.assertEqual(line, b"P6\n")
+ line = ppmfile.readline()
+ self.assertEqual(line, b"1280 800\n")
+ line = ppmfile.readline()
+ self.assertEqual(line, b"255\n")
+ line = ppmfile.readline(256)
+ self.assertEqual(line, b"The quick fox jumps over a lazy dog\n")
+ else:
+ self.log.info("Skipped flaky screendump of virtio-gpu device test")
+
+ # Hot-plug a virtio-crypto device and see whether it gets accepted
+ self.log.info("Test hot-plug virtio-crypto device")
+ self.clear_guest_dmesg()
+ self.vm.cmd('object-add', qom_type='cryptodev-backend-builtin',
+ id='cbe0')
+ self.vm.cmd('device_add', driver='virtio-crypto-ccw', id='crypdev0',
+ cryptodev='cbe0', devno='fe.0.2342')
+ exec_command_and_wait_for_pattern(self,
+ 'while ! (dmesg -c | grep Accelerator.device) ; do'
+ ' sleep 1 ; done', 'Accelerator device is ready')
+ exec_command_and_wait_for_pattern(self, 'lscss', '0.0.2342')
+ self.vm.cmd('device_del', id='crypdev0')
+ self.vm.cmd('object-del', id='cbe0')
+ exec_command_and_wait_for_pattern(self,
+ 'while ! (dmesg -c | grep Start.virtcrypto_remove) ; do'
+ ' sleep 1 ; done', 'Start virtcrypto_remove.')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_s390x_replay.py b/tests/functional/test_s390x_replay.py
new file mode 100755
index 0000000..33b5843
--- /dev/null
+++ b/tests/functional/test_s390x_replay.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python3
+#
+# Replay test that boots a Linux kernel on an s390x machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from replay_kernel import ReplayKernelBase
+
+
+class S390xReplay(ReplayKernelBase):
+
+ ASSET_KERNEL = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora-secondary/'
+ 'releases/29/Everything/s390x/os/images/kernel.img'),
+ 'dace03b8ae0c9f670ebb9b8d6ce5eb24b62987f346de8f1300a439bb00bb99e7')
+
+ def test_s390_ccw_virtio(self):
+ self.set_machine('s390-ccw-virtio')
+ kernel_path = self.ASSET_KERNEL.fetch()
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=sclp0'
+ console_pattern = 'Kernel command line: %s' % kernel_command_line
+ self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=9)
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_s390x_topology.py b/tests/functional/test_s390x_topology.py
new file mode 100755
index 0000000..1b5dc65
--- /dev/null
+++ b/tests/functional/test_s390x_topology.py
@@ -0,0 +1,415 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# Copyright IBM Corp. 2023
+#
+# Author:
+# Pierre Morel <pmorel@linux.ibm.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import exec_command
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern
+
+
+class S390CPUTopology(QemuSystemTest):
+ """
+ S390x CPU topology consists of 4 topology layers, from bottom to top,
+ the cores, sockets, books and drawers and 2 modifiers attributes,
+ the entitlement and the dedication.
+ See: docs/system/s390x/cpu-topology.rst.
+
+ S390x CPU topology is setup in different ways:
+ - implicitly from the '-smp' argument by completing each topology
+ level one after the other beginning with drawer 0, book 0 and
+ socket 0.
+ - explicitly from the '-device' argument on the QEMU command line
+ - explicitly by hotplug of a new CPU using QMP or HMP
+ - it is modified by using QMP 'set-cpu-topology'
+
+ The S390x modifier attribute entitlement depends on the machine
+ polarization, which can be horizontal or vertical.
+ The polarization is changed on a request from the guest.
+ """
+ timeout = 90
+ event_timeout = 10
+
+ KERNEL_COMMON_COMMAND_LINE = ('printk.time=0 '
+ 'root=/dev/ram '
+ 'selinux=0 '
+ 'rdinit=/bin/sh')
+ ASSET_F35_KERNEL = Asset(
+ ('https://archives.fedoraproject.org/pub/archive'
+ '/fedora-secondary/releases/35/Server/s390x/os'
+ '/images/kernel.img'),
+ '1f2dddfd11bb1393dd2eb2e784036fbf6fc11057a6d7d27f9eb12d3edc67ef73')
+
+ ASSET_F35_INITRD = Asset(
+ ('https://archives.fedoraproject.org/pub/archive'
+ '/fedora-secondary/releases/35/Server/s390x/os'
+ '/images/initrd.img'),
+ '1100145fbca00240c8c372ae4b89b48c99844bc189b3dfbc3f481dc60055ca46')
+
+ def wait_until_booted(self):
+ wait_for_console_pattern(self, 'no job control',
+ failure_message='Kernel panic - not syncing',
+ vm=None)
+
+ def check_topology(self, c, s, b, d, e, t):
+ res = self.vm.qmp('query-cpus-fast')
+ cpus = res['return']
+ for cpu in cpus:
+ core = cpu['props']['core-id']
+ socket = cpu['props']['socket-id']
+ book = cpu['props']['book-id']
+ drawer = cpu['props']['drawer-id']
+ entitlement = cpu.get('entitlement')
+ dedicated = cpu.get('dedicated')
+ if core == c:
+ self.assertEqual(drawer, d)
+ self.assertEqual(book, b)
+ self.assertEqual(socket, s)
+ self.assertEqual(entitlement, e)
+ self.assertEqual(dedicated, t)
+
+ def kernel_init(self):
+ """
+ We need a VM that supports CPU topology,
+ currently this only the case when using KVM, not TCG.
+ We need a kernel supporting the CPU topology.
+ We need a minimal root filesystem with a shell.
+ """
+ self.require_accelerator("kvm")
+ kernel_path = self.ASSET_F35_KERNEL.fetch()
+ initrd_path = self.uncompress(self.ASSET_F35_INITRD, format="xz")
+
+ self.vm.set_console()
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE
+ self.vm.add_args('-nographic',
+ '-enable-kvm',
+ '-cpu', 'max,ctop=on',
+ '-m', '512',
+ '-kernel', kernel_path,
+ '-initrd', initrd_path,
+ '-append', kernel_command_line)
+
+ def system_init(self):
+ self.log.info("System init")
+ exec_command_and_wait_for_pattern(self,
+ """ mount proc -t proc /proc;
+ mount sys -t sysfs /sys;
+ cat /sys/devices/system/cpu/dispatching """,
+ '0')
+
+ def test_single(self):
+ """
+ This test checks the simplest topology with a single CPU.
+ """
+ self.set_machine('s390-ccw-virtio')
+ self.kernel_init()
+ self.vm.launch()
+ self.wait_until_booted()
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+ def test_default(self):
+ """
+ This test checks the implicit topology.
+ """
+ self.set_machine('s390-ccw-virtio')
+ self.kernel_init()
+ self.vm.add_args('-smp',
+ '13,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
+ self.vm.launch()
+ self.wait_until_booted()
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+ self.check_topology(1, 0, 0, 0, 'medium', False)
+ self.check_topology(2, 1, 0, 0, 'medium', False)
+ self.check_topology(3, 1, 0, 0, 'medium', False)
+ self.check_topology(4, 2, 0, 0, 'medium', False)
+ self.check_topology(5, 2, 0, 0, 'medium', False)
+ self.check_topology(6, 0, 1, 0, 'medium', False)
+ self.check_topology(7, 0, 1, 0, 'medium', False)
+ self.check_topology(8, 1, 1, 0, 'medium', False)
+ self.check_topology(9, 1, 1, 0, 'medium', False)
+ self.check_topology(10, 2, 1, 0, 'medium', False)
+ self.check_topology(11, 2, 1, 0, 'medium', False)
+ self.check_topology(12, 0, 0, 1, 'medium', False)
+
+ def test_move(self):
+ """
+ This test checks the topology modification by moving a CPU
+ to another socket: CPU 0 is moved from socket 0 to socket 2.
+ """
+ self.set_machine('s390-ccw-virtio')
+ self.kernel_init()
+ self.vm.add_args('-smp',
+ '1,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'socket-id': 2, 'entitlement': 'low'})
+ self.assertEqual(res['return'], {})
+ self.check_topology(0, 2, 0, 0, 'low', False)
+
+ def test_dash_device(self):
+ """
+ This test verifies that a CPU defined with the '-device'
+ command line option finds its right place inside the topology.
+ """
+ self.set_machine('s390-ccw-virtio')
+ self.kernel_init()
+ self.vm.add_args('-smp',
+ '1,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
+ self.vm.add_args('-device', 'max-s390x-cpu,core-id=10')
+ self.vm.add_args('-device',
+ 'max-s390x-cpu,'
+ 'core-id=1,socket-id=0,book-id=1,drawer-id=1,entitlement=low')
+ self.vm.add_args('-device',
+ 'max-s390x-cpu,'
+ 'core-id=2,socket-id=0,book-id=1,drawer-id=1,entitlement=medium')
+ self.vm.add_args('-device',
+ 'max-s390x-cpu,'
+ 'core-id=3,socket-id=1,book-id=1,drawer-id=1,entitlement=high')
+ self.vm.add_args('-device',
+ 'max-s390x-cpu,'
+ 'core-id=4,socket-id=1,book-id=1,drawer-id=1')
+ self.vm.add_args('-device',
+ 'max-s390x-cpu,'
+ 'core-id=5,socket-id=2,book-id=1,drawer-id=1,dedicated=true')
+
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.check_topology(10, 2, 1, 0, 'medium', False)
+ self.check_topology(1, 0, 1, 1, 'low', False)
+ self.check_topology(2, 0, 1, 1, 'medium', False)
+ self.check_topology(3, 1, 1, 1, 'high', False)
+ self.check_topology(4, 1, 1, 1, 'medium', False)
+ self.check_topology(5, 2, 1, 1, 'high', True)
+
+
+ def guest_set_dispatching(self, dispatching):
+ exec_command(self,
+ f'echo {dispatching} > /sys/devices/system/cpu/dispatching')
+ self.vm.event_wait('CPU_POLARIZATION_CHANGE', self.event_timeout)
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/devices/system/cpu/dispatching', dispatching)
+
+
+ def test_polarization(self):
+ """
+ This test verifies that QEMU modifies the entitlement change after
+ several guest polarization change requests.
+ """
+ self.set_machine('s390-ccw-virtio')
+ self.kernel_init()
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.system_init()
+ res = self.vm.qmp('query-s390x-cpu-polarization')
+ self.assertEqual(res['return']['polarization'], 'horizontal')
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+ self.guest_set_dispatching('1')
+ res = self.vm.qmp('query-s390x-cpu-polarization')
+ self.assertEqual(res['return']['polarization'], 'vertical')
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+ self.guest_set_dispatching('0')
+ res = self.vm.qmp('query-s390x-cpu-polarization')
+ self.assertEqual(res['return']['polarization'], 'horizontal')
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+
+ def check_polarization(self, polarization):
+ #We need to wait for the change to have been propagated to the kernel
+ exec_command_and_wait_for_pattern(self,
+ "\n".join([
+ "timeout 1 sh -c 'while true",
+ 'do',
+ ' syspath="/sys/devices/system/cpu/cpu0/polarization"',
+ ' polarization="$(cat "$syspath")" || exit',
+ f' if [ "$polarization" = "{polarization}" ]; then',
+ ' exit 0',
+ ' fi',
+ ' sleep 0.01',
+ #searched for strings mustn't show up in command, '' to obfuscate
+ "done' && echo succ''ess || echo fail''ure",
+ ]),
+ "success", "failure")
+
+
+ def test_entitlement(self):
+ """
+ This test verifies that QEMU modifies the entitlement
+ after a guest request and that the guest sees the change.
+ """
+ self.set_machine('s390-ccw-virtio')
+ self.kernel_init()
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.system_init()
+
+ self.check_polarization('horizontal')
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+ self.guest_set_dispatching('1')
+ self.check_polarization('vertical:medium')
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'low'})
+ self.assertEqual(res['return'], {})
+ self.check_polarization('vertical:low')
+ self.check_topology(0, 0, 0, 0, 'low', False)
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'medium'})
+ self.assertEqual(res['return'], {})
+ self.check_polarization('vertical:medium')
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'high'})
+ self.assertEqual(res['return'], {})
+ self.check_polarization('vertical:high')
+ self.check_topology(0, 0, 0, 0, 'high', False)
+
+ self.guest_set_dispatching('0')
+ self.check_polarization("horizontal")
+ self.check_topology(0, 0, 0, 0, 'high', False)
+
+
+ def test_dedicated(self):
+ """
+ This test verifies that QEMU adjusts the entitlement correctly when a
+ CPU is made dedicated.
+ QEMU retains the entitlement value when horizontal polarization is in effect.
+ For the guest, the field shows the effective value of the entitlement.
+ """
+ self.set_machine('s390-ccw-virtio')
+ self.kernel_init()
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.system_init()
+
+ self.check_polarization("horizontal")
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'dedicated': True})
+ self.assertEqual(res['return'], {})
+ self.check_topology(0, 0, 0, 0, 'high', True)
+ self.check_polarization("horizontal")
+
+ self.guest_set_dispatching('1')
+ self.check_topology(0, 0, 0, 0, 'high', True)
+ self.check_polarization("vertical:high")
+
+ self.guest_set_dispatching('0')
+ self.check_topology(0, 0, 0, 0, 'high', True)
+ self.check_polarization("horizontal")
+
+
+ def test_socket_full(self):
+ """
+ This test verifies that QEMU does not accept to overload a socket.
+ The socket-id 0 on book-id 0 already contains CPUs 0 and 1 and can
+ not accept any new CPU while socket-id 0 on book-id 1 is free.
+ """
+ self.set_machine('s390-ccw-virtio')
+ self.kernel_init()
+ self.vm.add_args('-smp',
+ '3,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.system_init()
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 2, 'socket-id': 0, 'book-id': 0})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 2, 'socket-id': 0, 'book-id': 1})
+ self.assertEqual(res['return'], {})
+
+ def test_dedicated_error(self):
+ """
+ This test verifies that QEMU refuses to lower the entitlement
+ of a dedicated CPU
+ """
+ self.set_machine('s390-ccw-virtio')
+ self.kernel_init()
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.system_init()
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'dedicated': True})
+ self.assertEqual(res['return'], {})
+
+ self.check_topology(0, 0, 0, 0, 'high', True)
+
+ self.guest_set_dispatching('1')
+
+ self.check_topology(0, 0, 0, 0, 'high', True)
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'low', 'dedicated': True})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'low'})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'medium', 'dedicated': True})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'medium'})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'low', 'dedicated': False})
+ self.assertEqual(res['return'], {})
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'medium', 'dedicated': False})
+ self.assertEqual(res['return'], {})
+
+ def test_move_error(self):
+ """
+ This test verifies that QEMU refuses to move a CPU to an
+ nonexistent location
+ """
+ self.set_machine('s390-ccw-virtio')
+ self.kernel_init()
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.system_init()
+
+ res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'drawer-id': 1})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'book-id': 1})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'socket-id': 1})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_s390x_tuxrun.py b/tests/functional/test_s390x_tuxrun.py
new file mode 100755
index 0000000..8df3c68
--- /dev/null
+++ b/tests/functional/test_s390x_tuxrun.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunS390xTest(TuxRunBaselineTest):
+
+ ASSET_S390X_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/s390/bzImage',
+ 'ee67e91db52a2aed104a7c72b2a08987c678f8179c029626789c35d6dd0fedf1')
+ ASSET_S390X_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/s390/rootfs.ext4.zst',
+ 'bff7971fc2fef56372d98afe4557b82fd0a785a241e44c29b058e577ad1bbb44')
+
+ def test_s390(self):
+ self.set_machine('s390-ccw-virtio')
+ self.wait_for_shutdown=False
+ self.common_tuxrun(kernel_asset=self.ASSET_S390X_KERNEL,
+ rootfs_asset=self.ASSET_S390X_ROOTFS,
+ drive="virtio-blk-ccw",
+ haltmsg="Requesting system halt")
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_sh4_r2d.py b/tests/functional/test_sh4_r2d.py
new file mode 100755
index 0000000..03a64837
--- /dev/null
+++ b/tests/functional/test_sh4_r2d.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python3
+#
+# Boot a Linux kernel on a r2d sh4 machine and check the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset, skipFlakyTest
+
+
+class R2dTest(LinuxKernelTest):
+
+ ASSET_DAY09 = Asset(
+ 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day09.tar.xz',
+ 'a61b44d2630a739d1380cc4ff4b80981d47ccfd5992f1484ccf48322c35f09ac')
+
+ # This test has a 6-10% failure rate on various hosts that look
+ # like issues with a buggy kernel.
+ # XXX file tracking bug
+ @skipFlakyTest(bug_url=None)
+ def test_r2d(self):
+ self.set_machine('r2d')
+ self.archive_extract(self.ASSET_DAY09)
+ self.vm.add_args('-append', 'console=ttySC1')
+ self.launch_kernel(self.scratch_file('day09', 'zImage'),
+ console_index=1,
+ wait_for='QEMU advent calendar')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_sh4_tuxrun.py b/tests/functional/test_sh4_tuxrun.py
new file mode 100755
index 0000000..1748f8c
--- /dev/null
+++ b/tests/functional/test_sh4_tuxrun.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset, exec_command_and_wait_for_pattern
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunSh4Test(TuxRunBaselineTest):
+
+ ASSET_SH4_KERNEL = Asset(
+ 'https://storage.tuxboot.com/20230331/sh4/zImage',
+ '29d9b2aba604a0f53a5dc3b5d0f2b8e35d497de1129f8ee5139eb6fdf0db692f')
+ ASSET_SH4_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/20230331/sh4/rootfs.ext4.zst',
+ '3592a7a3d5a641e8b9821449e77bc43c9904a56c30d45da0694349cfd86743fd')
+
+ def test_sh4(self):
+ self.set_machine('r2d')
+ self.cpu='sh7785'
+ self.root='sda'
+ self.console='ttySC1'
+
+ # The test is currently too unstable to do much in userspace
+ # so we skip common_tuxrun and do a minimal boot and shutdown.
+ (kernel, disk, dtb) = self.fetch_tuxrun_assets(self.ASSET_SH4_KERNEL,
+ self.ASSET_SH4_ROOTFS)
+
+ # the console comes on the second serial port
+ self.prepare_run(kernel, disk,
+ "driver=ide-hd,bus=ide.0,unit=0",
+ console_index=1)
+ self.vm.launch()
+
+ self.wait_for_console_pattern("tuxtest login:")
+ exec_command_and_wait_for_pattern(self, 'root', 'root@tuxtest:~#')
+ exec_command_and_wait_for_pattern(self, 'halt',
+ "reboot: System halted")
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_sh4eb_r2d.py b/tests/functional/test_sh4eb_r2d.py
new file mode 100755
index 0000000..473093b
--- /dev/null
+++ b/tests/functional/test_sh4eb_r2d.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python3
+#
+# Boot a Linux kernel on a r2d sh4eb machine and check the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+from qemu_test import exec_command_and_wait_for_pattern
+
+
+class R2dEBTest(LinuxKernelTest):
+
+ ASSET_TGZ = Asset(
+ 'https://landley.net/bin/mkroot/0.8.11/sh4eb.tgz',
+ 'be8c6cb5aef8406899dc5aa5e22b6aa45840eb886cdd3ced51555c10577ada2c')
+
+ def test_sh4eb_r2d(self):
+ self.set_machine('r2d')
+ self.archive_extract(self.ASSET_TGZ)
+ self.vm.add_args('-append', 'console=ttySC1 noiotrap')
+ self.launch_kernel(self.scratch_file('sh4eb', 'linux-kernel'),
+ initrd=self.scratch_file('sh4eb',
+ 'initramfs.cpio.gz'),
+ console_index=1, wait_for='Type exit when done')
+ exec_command_and_wait_for_pattern(self, 'exit', 'Restarting system')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_sparc64_sun4u.py b/tests/functional/test_sparc64_sun4u.py
new file mode 100755
index 0000000..27ac289
--- /dev/null
+++ b/tests/functional/test_sparc64_sun4u.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# Copyright (c) 2020 Red Hat, Inc.
+#
+# Author:
+# Thomas Huth <thuth@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+
+class Sun4uMachine(QemuSystemTest):
+ """Boots the Linux kernel and checks that the console is operational"""
+
+ timeout = 90
+
+ ASSET_IMAGE = Asset(
+ ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/'
+ 'day23.tar.xz'),
+ 'a3ed92450704af244178351afd0e769776e7decb298e95a63abfd9a6e3f6c854')
+
+ def test_sparc64_sun4u(self):
+ self.set_machine('sun4u')
+ kernel_file = self.archive_extract(self.ASSET_IMAGE,
+ member='day23/vmlinux')
+ self.vm.set_console()
+ self.vm.add_args('-kernel', kernel_file,
+ '-append', 'printk.time=0')
+ self.vm.launch()
+ wait_for_console_pattern(self, 'Starting logging: OK')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_sparc64_tuxrun.py b/tests/functional/test_sparc64_tuxrun.py
new file mode 100755
index 0000000..0d7b43d
--- /dev/null
+++ b/tests/functional/test_sparc64_tuxrun.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunSparc64Test(TuxRunBaselineTest):
+
+ ASSET_SPARC64_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/sparc64/vmlinux',
+ 'a04cfb2e70a264051d161fdd93aabf4b2a9472f2e435c14ed18c5848c5fed261')
+ ASSET_SPARC64_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/sparc64/rootfs.ext4.zst',
+ '479c3dc104c82b68be55e2c0c5c38cd473d0b37ad4badccde4775bb88ce34611')
+
+ def test_sparc64(self):
+ self.set_machine('sun4u')
+ self.root='sda'
+ self.wait_for_shutdown=False
+ self.common_tuxrun(kernel_asset=self.ASSET_SPARC64_KERNEL,
+ rootfs_asset=self.ASSET_SPARC64_ROOTFS,
+ drive="driver=ide-hd,bus=ide.0,unit=0")
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_sparc_replay.py b/tests/functional/test_sparc_replay.py
new file mode 100755
index 0000000..865d648
--- /dev/null
+++ b/tests/functional/test_sparc_replay.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+#
+# Replay test that boots a Linux kernel on a sparc sun4m machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from replay_kernel import ReplayKernelBase
+
+
+class SparcReplay(ReplayKernelBase):
+
+ ASSET_DAY11 = Asset(
+ 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day11.tar.xz',
+ 'c776533ba756bf4dd3f1fc4c024fb50ef0d853e05c5f5ddf0900a32d1eaa49e0')
+
+ def test_replay(self):
+ self.set_machine('SS-10')
+ kernel_path = self.archive_extract(self.ASSET_DAY11,
+ member="day11/zImage.elf")
+ self.run_rr(kernel_path, self.REPLAY_KERNEL_COMMAND_LINE,
+ 'QEMU advent calendar')
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_sparc_sun4m.py b/tests/functional/test_sparc_sun4m.py
new file mode 100755
index 0000000..7cd28eb
--- /dev/null
+++ b/tests/functional/test_sparc_sun4m.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel on a sparc sun4m machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+
+
+class Sun4mTest(LinuxKernelTest):
+
+ ASSET_DAY11 = Asset(
+ 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day11.tar.xz',
+ 'c776533ba756bf4dd3f1fc4c024fb50ef0d853e05c5f5ddf0900a32d1eaa49e0')
+
+ def test_sparc_ss20(self):
+ self.set_machine('SS-20')
+ self.archive_extract(self.ASSET_DAY11)
+ self.launch_kernel(self.scratch_file('day11', 'zImage.elf'),
+ wait_for='QEMU advent calendar')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_version.py b/tests/functional/test_version.py
new file mode 100755
index 0000000..3ab3b67
--- /dev/null
+++ b/tests/functional/test_version.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python3
+#
+# Version check example test
+#
+# Copyright (c) 2018 Red Hat, Inc.
+#
+# Author:
+# Cleber Rosa <crosa@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+
+from qemu_test import QemuSystemTest
+
+
+class Version(QemuSystemTest):
+
+ def test_qmp_human_info_version(self):
+ self.set_machine('none')
+ self.vm.add_args('-nodefaults')
+ self.vm.launch()
+ res = self.vm.cmd('human-monitor-command',
+ command_line='info version')
+ self.assertRegex(res, r'^(\d+\.\d+\.\d)')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_virtio_balloon.py b/tests/functional/test_virtio_balloon.py
new file mode 100755
index 0000000..5877b6c
--- /dev/null
+++ b/tests/functional/test_virtio_balloon.py
@@ -0,0 +1,178 @@
+#!/usr/bin/env python3
+#
+# virtio-balloon tests
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import time
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test import exec_command_and_wait_for_pattern
+
+UNSET_STATS_VALUE = 18446744073709551615
+
+
+class VirtioBalloonx86(QemuSystemTest):
+
+ ASSET_KERNEL = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases'
+ '/31/Server/x86_64/os/images/pxeboot/vmlinuz'),
+ 'd4738d03dbbe083ca610d0821d0a8f1488bebbdccef54ce33e3adb35fda00129')
+
+ ASSET_INITRD = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases'
+ '/31/Server/x86_64/os/images/pxeboot/initrd.img'),
+ '277cd6c7adf77c7e63d73bbb2cded8ef9e2d3a2f100000e92ff1f8396513cd8b')
+
+ ASSET_DISKIMAGE = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases'
+ '/31/Cloud/x86_64/images/Fedora-Cloud-Base-31-1.9.x86_64.qcow2'),
+ 'e3c1b309d9203604922d6e255c2c5d098a309c2d46215d8fc026954f3c5c27a0')
+
+ DEFAULT_KERNEL_PARAMS = ('root=/dev/vda1 console=ttyS0 net.ifnames=0 '
+ 'rd.rescue quiet')
+
+ def wait_for_console_pattern(self, success_message, vm=None):
+ wait_for_console_pattern(
+ self,
+ success_message,
+ failure_message="Kernel panic - not syncing",
+ vm=vm,
+ )
+
+ def mount_root(self):
+ self.wait_for_console_pattern('Entering emergency mode.')
+ prompt = '# '
+ self.wait_for_console_pattern(prompt)
+
+ # Synchronize on virtio-block driver creating the root device
+ exec_command_and_wait_for_pattern(self,
+ "while ! (dmesg -c | grep vda:) ; do sleep 1 ; done",
+ "vda1")
+
+ exec_command_and_wait_for_pattern(self, 'mount /dev/vda1 /sysroot',
+ prompt)
+ exec_command_and_wait_for_pattern(self, 'chroot /sysroot',
+ prompt)
+ exec_command_and_wait_for_pattern(self, "modprobe virtio-balloon",
+ prompt)
+
+ def assert_initial_stats(self):
+ ret = self.vm.qmp('qom-get',
+ {'path': '/machine/peripheral/balloon',
+ 'property': 'guest-stats'})['return']
+ when = ret.get('last-update')
+ assert when == 0
+ stats = ret.get('stats')
+ for name, val in stats.items():
+ assert val == UNSET_STATS_VALUE
+
+ def assert_running_stats(self, then):
+ # We told the QEMU to refresh stats every 100ms, but
+ # there can be a delay between virtio-ballon driver
+ # being modprobed and seeing the first stats refresh
+ # Retry a few times for robustness under heavy load
+ retries = 10
+ when = 0
+ while when == 0 and retries:
+ ret = self.vm.qmp('qom-get',
+ {'path': '/machine/peripheral/balloon',
+ 'property': 'guest-stats'})['return']
+ when = ret.get('last-update')
+ if when == 0:
+ retries = retries - 1
+ time.sleep(0.5)
+
+ now = time.time()
+
+ assert when > then and when < now
+ stats = ret.get('stats')
+ # Stat we expect this particular Kernel to have set
+ expectData = [
+ "stat-available-memory",
+ "stat-disk-caches",
+ "stat-free-memory",
+ "stat-htlb-pgalloc",
+ "stat-htlb-pgfail",
+ "stat-major-faults",
+ "stat-minor-faults",
+ "stat-swap-in",
+ "stat-swap-out",
+ "stat-total-memory",
+ ]
+ for name, val in stats.items():
+ if name in expectData:
+ assert val != UNSET_STATS_VALUE
+ else:
+ assert val == UNSET_STATS_VALUE
+
+ def test_virtio_balloon_stats(self):
+ self.set_machine('q35')
+ self.require_accelerator("kvm")
+ kernel_path = self.ASSET_KERNEL.fetch()
+ initrd_path = self.ASSET_INITRD.fetch()
+ diskimage_path = self.ASSET_DISKIMAGE.fetch()
+
+ self.vm.set_console()
+ self.vm.add_args("-S")
+ self.vm.add_args("-cpu", "max")
+ self.vm.add_args("-m", "2G")
+ # Slow down BIOS phase with boot menu, so that after a system
+ # reset, we can reliably catch the clean stats again in BIOS
+ # phase before the guest OS launches
+ self.vm.add_args("-boot", "menu=on")
+ self.vm.add_args("-accel", "kvm")
+ self.vm.add_args("-device", "virtio-balloon,id=balloon")
+ self.vm.add_args('-drive',
+ f'file={diskimage_path},if=none,id=drv0,snapshot=on')
+ self.vm.add_args('-device', 'virtio-blk-pci,bus=pcie.0,' +
+ 'drive=drv0,id=virtio-disk0,bootindex=1')
+
+ self.vm.add_args(
+ "-kernel",
+ kernel_path,
+ "-initrd",
+ initrd_path,
+ "-append",
+ self.DEFAULT_KERNEL_PARAMS
+ )
+ self.vm.launch()
+
+ # Poll stats at 100ms
+ self.vm.qmp('qom-set',
+ {'path': '/machine/peripheral/balloon',
+ 'property': 'guest-stats-polling-interval',
+ 'value': 100 })
+
+ # We've not run any guest code yet, neither BIOS or guest,
+ # so stats should be all default values
+ self.assert_initial_stats()
+
+ self.vm.qmp('cont')
+
+ then = time.time()
+ self.mount_root()
+ self.assert_running_stats(then)
+
+ # Race window between these two commands, where we
+ # rely on '-boot menu=on' to (hopefully) ensure we're
+ # still executing the BIOS when QEMU processes the
+ # 'stop', and thus have not loaded the virtio-balloon
+ # driver in the guest
+ self.vm.qmp('system_reset')
+ self.vm.qmp('stop')
+
+ # If the above assumption held, we're in BIOS now and
+ # stats should be all back at their default values
+ self.assert_initial_stats()
+ self.vm.qmp('cont')
+
+ then = time.time()
+ self.mount_root()
+ self.assert_running_stats(then)
+
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_virtio_gpu.py b/tests/functional/test_virtio_gpu.py
new file mode 100755
index 0000000..81c9156
--- /dev/null
+++ b/tests/functional/test_virtio_gpu.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+#
+# virtio-gpu tests
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import is_readable_executable_file
+
+
+import os
+import socket
+import subprocess
+
+
+def pick_default_vug_bin(test):
+ bld_dir_path = test.build_file("contrib", "vhost-user-gpu", "vhost-user-gpu")
+ if is_readable_executable_file(bld_dir_path):
+ return bld_dir_path
+
+
+class VirtioGPUx86(QemuSystemTest):
+
+ KERNEL_COMMAND_LINE = "printk.time=0 console=ttyS0 rdinit=/bin/bash"
+ ASSET_KERNEL = Asset(
+ ("https://archives.fedoraproject.org/pub/archive/fedora"
+ "/linux/releases/33/Everything/x86_64/os/images"
+ "/pxeboot/vmlinuz"),
+ '2dc5fb5cfe9ac278fa45640f3602d9b7a08cc189ed63fd9b162b07073e4df397')
+ ASSET_INITRD = Asset(
+ ("https://archives.fedoraproject.org/pub/archive/fedora"
+ "/linux/releases/33/Everything/x86_64/os/images"
+ "/pxeboot/initrd.img"),
+ 'c49b97f893a5349e4883452178763e402bdc5caa8845b226a2d1329b5f356045')
+
+ def wait_for_console_pattern(self, success_message, vm=None):
+ wait_for_console_pattern(
+ self,
+ success_message,
+ failure_message="Kernel panic - not syncing",
+ vm=vm,
+ )
+
+ def test_virtio_vga_virgl(self):
+ # FIXME: should check presence of virtio, virgl etc
+ self.require_accelerator('kvm')
+
+ kernel_path = self.ASSET_KERNEL.fetch()
+ initrd_path = self.ASSET_INITRD.fetch()
+
+ self.vm.set_console()
+ self.vm.add_args("-cpu", "host")
+ self.vm.add_args("-m", "2G")
+ self.vm.add_args("-machine", "pc,accel=kvm")
+ self.vm.add_args("-device", "virtio-vga-gl")
+ self.vm.add_args("-display", "egl-headless")
+ self.vm.add_args(
+ "-kernel",
+ kernel_path,
+ "-initrd",
+ initrd_path,
+ "-append",
+ self.KERNEL_COMMAND_LINE,
+ )
+ try:
+ self.vm.launch()
+ except:
+ # TODO: probably fails because we are missing the VirGL features
+ self.skipTest("VirGL not enabled?")
+
+ self.wait_for_console_pattern("as init process")
+ exec_command_and_wait_for_pattern(
+ self, "/usr/sbin/modprobe virtio_gpu", "features: +virgl +edid"
+ )
+
+ def test_vhost_user_vga_virgl(self):
+ # FIXME: should check presence of vhost-user-gpu, virgl, memfd etc
+ self.require_accelerator('kvm')
+
+ vug = pick_default_vug_bin(self)
+ if not vug:
+ self.skipTest("Could not find vhost-user-gpu")
+
+ kernel_path = self.ASSET_KERNEL.fetch()
+ initrd_path = self.ASSET_INITRD.fetch()
+
+ # Create socketpair to connect proxy and remote processes
+ qemu_sock, vug_sock = socket.socketpair(
+ socket.AF_UNIX, socket.SOCK_STREAM
+ )
+ os.set_inheritable(qemu_sock.fileno(), True)
+ os.set_inheritable(vug_sock.fileno(), True)
+
+ self._vug_log_path = self.log_file("vhost-user-gpu.log")
+ self._vug_log_file = open(self._vug_log_path, "wb")
+ self.log.info('Complete vhost-user-gpu.log file can be '
+ 'found at %s', self._vug_log_path)
+
+ vugp = subprocess.Popen(
+ [vug, "--virgl", "--fd=%d" % vug_sock.fileno()],
+ stdin=subprocess.DEVNULL,
+ stdout=self._vug_log_file,
+ stderr=subprocess.STDOUT,
+ shell=False,
+ close_fds=False,
+ )
+
+ self.vm.set_console()
+ self.vm.add_args("-cpu", "host")
+ self.vm.add_args("-m", "2G")
+ self.vm.add_args("-object", "memory-backend-memfd,id=mem,size=2G")
+ self.vm.add_args("-machine", "pc,memory-backend=mem,accel=kvm")
+ self.vm.add_args("-chardev", "socket,id=vug,fd=%d" % qemu_sock.fileno())
+ self.vm.add_args("-device", "vhost-user-vga,chardev=vug")
+ self.vm.add_args("-display", "egl-headless")
+ self.vm.add_args(
+ "-kernel",
+ kernel_path,
+ "-initrd",
+ initrd_path,
+ "-append",
+ self.KERNEL_COMMAND_LINE,
+ )
+ try:
+ self.vm.launch()
+ except:
+ # TODO: probably fails because we are missing the VirGL features
+ self.skipTest("VirGL not enabled?")
+ self.wait_for_console_pattern("as init process")
+ exec_command_and_wait_for_pattern(self, "/usr/sbin/modprobe virtio_gpu",
+ "features: +virgl +edid")
+ self.vm.shutdown()
+ qemu_sock.close()
+ vugp.terminate()
+ vugp.wait()
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_virtio_version.py b/tests/functional/test_virtio_version.py
new file mode 100755
index 0000000..a5ea732
--- /dev/null
+++ b/tests/functional/test_virtio_version.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python3
+"""
+Check compatibility of virtio device types
+"""
+# Copyright (c) 2018 Red Hat, Inc.
+#
+# Author:
+# Eduardo Habkost <ehabkost@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu.machine import QEMUMachine
+from qemu_test import QemuSystemTest
+
+# Virtio Device IDs:
+VIRTIO_NET = 1
+VIRTIO_BLOCK = 2
+VIRTIO_CONSOLE = 3
+VIRTIO_RNG = 4
+VIRTIO_BALLOON = 5
+VIRTIO_RPMSG = 7
+VIRTIO_SCSI = 8
+VIRTIO_9P = 9
+VIRTIO_RPROC_SERIAL = 11
+VIRTIO_CAIF = 12
+VIRTIO_GPU = 16
+VIRTIO_INPUT = 18
+VIRTIO_VSOCK = 19
+VIRTIO_CRYPTO = 20
+
+PCI_VENDOR_ID_REDHAT_QUMRANET = 0x1af4
+
+# Device IDs for legacy/transitional devices:
+PCI_LEGACY_DEVICE_IDS = {
+ VIRTIO_NET: 0x1000,
+ VIRTIO_BLOCK: 0x1001,
+ VIRTIO_BALLOON: 0x1002,
+ VIRTIO_CONSOLE: 0x1003,
+ VIRTIO_SCSI: 0x1004,
+ VIRTIO_RNG: 0x1005,
+ VIRTIO_9P: 0x1009,
+ VIRTIO_VSOCK: 0x1012,
+}
+
+def pci_modern_device_id(virtio_devid):
+ return virtio_devid + 0x1040
+
+def devtype_implements(vm, devtype, implements):
+ return devtype in [d['name'] for d in
+ vm.cmd('qom-list-types', implements=implements)]
+
+def get_pci_interfaces(vm, devtype):
+ interfaces = ('pci-express-device', 'conventional-pci-device')
+ return [i for i in interfaces if devtype_implements(vm, devtype, i)]
+
+class VirtioVersionCheck(QemuSystemTest):
+ """
+ Check if virtio-version-specific device types result in the
+ same device tree created by `disable-modern` and
+ `disable-legacy`.
+ """
+
+ # just in case there are failures, show larger diff:
+ maxDiff = 4096
+
+ def run_device(self, devtype, opts=None, machine='pc'):
+ """
+ Run QEMU with `-device DEVTYPE`, return device info from `query-pci`
+ """
+ with QEMUMachine(self.qemu_bin) as vm:
+ vm.set_machine(machine)
+ if opts:
+ devtype += ',' + opts
+ vm.add_args('-device', '%s,id=devfortest' % (devtype))
+ vm.add_args('-S')
+ vm.launch()
+
+ pcibuses = vm.cmd('query-pci')
+ alldevs = [dev for bus in pcibuses for dev in bus['devices']]
+ devfortest = [dev for dev in alldevs
+ if dev['qdev_id'] == 'devfortest']
+ return devfortest[0], get_pci_interfaces(vm, devtype)
+
+
+ def assert_devids(self, dev, devid, non_transitional=False):
+ self.assertEqual(dev['id']['vendor'], PCI_VENDOR_ID_REDHAT_QUMRANET)
+ self.assertEqual(dev['id']['device'], devid)
+ if non_transitional:
+ self.assertTrue(0x1040 <= dev['id']['device'] <= 0x107f)
+ self.assertGreaterEqual(dev['id']['subsystem'], 0x40)
+
+ def check_all_variants(self, qemu_devtype, virtio_devid):
+ """Check if a virtio device type and its variants behave as expected"""
+ # Force modern mode:
+ dev_modern, _ = self.run_device(qemu_devtype,
+ 'disable-modern=off,disable-legacy=on')
+ self.assert_devids(dev_modern, pci_modern_device_id(virtio_devid),
+ non_transitional=True)
+
+ # <prefix>-non-transitional device types should be 100% equivalent to
+ # <prefix>,disable-modern=off,disable-legacy=on
+ dev_1_0, nt_ifaces = self.run_device('%s-non-transitional' % (qemu_devtype))
+ self.assertEqual(dev_modern, dev_1_0)
+
+ # Force transitional mode:
+ dev_trans, _ = self.run_device(qemu_devtype,
+ 'disable-modern=off,disable-legacy=off')
+ self.assert_devids(dev_trans, PCI_LEGACY_DEVICE_IDS[virtio_devid])
+
+ # Force legacy mode:
+ dev_legacy, _ = self.run_device(qemu_devtype,
+ 'disable-modern=on,disable-legacy=off')
+ self.assert_devids(dev_legacy, PCI_LEGACY_DEVICE_IDS[virtio_devid])
+
+ # No options: default to transitional on PC machine-type:
+ no_opts_pc, generic_ifaces = self.run_device(qemu_devtype)
+ self.assertEqual(dev_trans, no_opts_pc)
+
+ #TODO: check if plugging on a PCI Express bus will make the
+ # device non-transitional
+ #no_opts_q35 = self.run_device(qemu_devtype, machine='q35')
+ #self.assertEqual(dev_modern, no_opts_q35)
+
+ # <prefix>-transitional device types should be 100% equivalent to
+ # <prefix>,disable-modern=off,disable-legacy=off
+ dev_trans, trans_ifaces = self.run_device('%s-transitional' % (qemu_devtype))
+ self.assertEqual(dev_trans, dev_trans)
+
+ # ensure the interface information is correct:
+ self.assertIn('conventional-pci-device', generic_ifaces)
+ self.assertIn('pci-express-device', generic_ifaces)
+
+ self.assertIn('conventional-pci-device', nt_ifaces)
+ self.assertIn('pci-express-device', nt_ifaces)
+
+ self.assertIn('conventional-pci-device', trans_ifaces)
+ self.assertNotIn('pci-express-device', trans_ifaces)
+
+
+ def test_conventional_devs(self):
+ self.set_machine('pc')
+ self.check_all_variants('virtio-net-pci', VIRTIO_NET)
+ # virtio-blk requires 'driver' parameter
+ #self.check_all_variants('virtio-blk-pci', VIRTIO_BLOCK)
+ self.check_all_variants('virtio-serial-pci', VIRTIO_CONSOLE)
+ self.check_all_variants('virtio-rng-pci', VIRTIO_RNG)
+ self.check_all_variants('virtio-balloon-pci', VIRTIO_BALLOON)
+ self.check_all_variants('virtio-scsi-pci', VIRTIO_SCSI)
+ # virtio-9p requires 'fsdev' parameter
+ #self.check_all_variants('virtio-9p-pci', VIRTIO_9P)
+
+ def check_modern_only(self, qemu_devtype, virtio_devid):
+ """Check if a modern-only virtio device type behaves as expected"""
+ # Force modern mode:
+ dev_modern, _ = self.run_device(qemu_devtype,
+ 'disable-modern=off,disable-legacy=on')
+ self.assert_devids(dev_modern, pci_modern_device_id(virtio_devid),
+ non_transitional=True)
+
+ # No options: should be modern anyway
+ dev_no_opts, ifaces = self.run_device(qemu_devtype)
+ self.assertEqual(dev_modern, dev_no_opts)
+
+ self.assertIn('conventional-pci-device', ifaces)
+ self.assertIn('pci-express-device', ifaces)
+
+ def test_modern_only_devs(self):
+ self.set_machine('pc')
+ self.check_modern_only('virtio-vga', VIRTIO_GPU)
+ self.check_modern_only('virtio-gpu-pci', VIRTIO_GPU)
+ self.check_modern_only('virtio-mouse-pci', VIRTIO_INPUT)
+ self.check_modern_only('virtio-tablet-pci', VIRTIO_INPUT)
+ self.check_modern_only('virtio-keyboard-pci', VIRTIO_INPUT)
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_vnc.py b/tests/functional/test_vnc.py
new file mode 100755
index 0000000..f1dd159
--- /dev/null
+++ b/tests/functional/test_vnc.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python3
+#
+# Simple functional tests for VNC functionality
+#
+# Copyright (c) 2018 Red Hat, Inc.
+#
+# Author:
+# Cleber Rosa <crosa@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import socket
+
+from qemu.machine.machine import VMLaunchFailure
+from qemu_test import QemuSystemTest
+from qemu_test.ports import Ports
+
+
+VNC_ADDR = '127.0.0.1'
+
+def check_connect(port: int) -> bool:
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
+ try:
+ sock.connect((VNC_ADDR, port))
+ except ConnectionRefusedError:
+ return False
+
+ return True
+
+class Vnc(QemuSystemTest):
+
+ def test_no_vnc_change_password(self):
+ self.set_machine('none')
+ self.vm.add_args('-nodefaults', '-S')
+ self.vm.launch()
+
+ query_vnc_response = self.vm.qmp('query-vnc')
+ if 'error' in query_vnc_response:
+ self.assertEqual(query_vnc_response['error']['class'],
+ 'CommandNotFound')
+ self.skipTest('VNC support not available')
+ self.assertFalse(query_vnc_response['return']['enabled'])
+
+ set_password_response = self.vm.qmp('change-vnc-password',
+ password='new_password')
+ self.assertIn('error', set_password_response)
+ self.assertEqual(set_password_response['error']['class'],
+ 'GenericError')
+ self.assertEqual(set_password_response['error']['desc'],
+ 'Could not set password')
+
+ def launch_guarded(self):
+ try:
+ self.vm.launch()
+ except VMLaunchFailure as excp:
+ if "-vnc: invalid option" in excp.output:
+ self.skipTest("VNC support not available")
+ elif "Cipher backend does not support DES algorithm" in excp.output:
+ self.skipTest("No cryptographic backend available")
+ else:
+ self.log.info("unhandled launch failure: %s", excp.output)
+ raise excp
+
+ def test_change_password_requires_a_password(self):
+ self.set_machine('none')
+ self.vm.add_args('-nodefaults', '-S', '-vnc', ':1,to=999')
+ self.launch_guarded()
+ self.assertTrue(self.vm.qmp('query-vnc')['return']['enabled'])
+ set_password_response = self.vm.qmp('change-vnc-password',
+ password='new_password')
+ self.assertIn('error', set_password_response)
+ self.assertEqual(set_password_response['error']['class'],
+ 'GenericError')
+ self.assertEqual(set_password_response['error']['desc'],
+ 'Could not set password')
+
+ def test_change_password(self):
+ self.set_machine('none')
+ self.vm.add_args('-nodefaults', '-S', '-vnc', ':1,to=999,password=on')
+ self.launch_guarded()
+ self.assertTrue(self.vm.qmp('query-vnc')['return']['enabled'])
+ self.vm.cmd('change-vnc-password',
+ password='new_password')
+
+ def do_test_change_listen(self, a, b, c):
+ self.assertFalse(check_connect(a))
+ self.assertFalse(check_connect(b))
+ self.assertFalse(check_connect(c))
+
+ self.vm.add_args('-nodefaults', '-S', '-vnc', f'{VNC_ADDR}:{a - 5900}')
+ self.launch_guarded()
+ self.assertEqual(self.vm.qmp('query-vnc')['return']['service'], str(a))
+ self.assertTrue(check_connect(a))
+ self.assertFalse(check_connect(b))
+ self.assertFalse(check_connect(c))
+
+ self.vm.cmd('display-update', type='vnc',
+ addresses=[{'type': 'inet', 'host': VNC_ADDR,
+ 'port': str(b)},
+ {'type': 'inet', 'host': VNC_ADDR,
+ 'port': str(c)}])
+ self.assertEqual(self.vm.qmp('query-vnc')['return']['service'], str(b))
+ self.assertFalse(check_connect(a))
+ self.assertTrue(check_connect(b))
+ self.assertTrue(check_connect(c))
+
+ def test_change_listen(self):
+ self.set_machine('none')
+ with Ports() as ports:
+ a, b, c = ports.find_free_ports(3)
+ self.do_test_change_listen(a, b, c)
+
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_x86_64_hotplug_blk.py b/tests/functional/test_x86_64_hotplug_blk.py
new file mode 100755
index 0000000..7ddbfef
--- /dev/null
+++ b/tests/functional/test_x86_64_hotplug_blk.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python3
+#
+# Functional test that hotplugs a virtio blk disk and checks it on a Linux
+# guest
+#
+# Copyright (c) 2021 Red Hat, Inc.
+# Copyright (c) Yandex
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern
+
+
+class HotPlugBlk(LinuxKernelTest):
+
+ ASSET_KERNEL = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases'
+ '/31/Server/x86_64/os/images/pxeboot/vmlinuz'),
+ 'd4738d03dbbe083ca610d0821d0a8f1488bebbdccef54ce33e3adb35fda00129')
+
+ ASSET_INITRD = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases'
+ '/31/Server/x86_64/os/images/pxeboot/initrd.img'),
+ '277cd6c7adf77c7e63d73bbb2cded8ef9e2d3a2f100000e92ff1f8396513cd8b')
+
+ def blockdev_add(self) -> None:
+ self.vm.cmd('blockdev-add', **{
+ 'driver': 'null-co',
+ 'size': 1073741824,
+ 'node-name': 'disk'
+ })
+
+ def assert_vda(self) -> None:
+ exec_command_and_wait_for_pattern(self, 'while ! test -e /sys/block/vda ;'
+ ' do sleep 0.2 ; done', '# ')
+
+ def assert_no_vda(self) -> None:
+ exec_command_and_wait_for_pattern(self, 'while test -e /sys/block/vda ;'
+ ' do sleep 0.2 ; done', '# ')
+
+ def plug(self) -> None:
+ args = {
+ 'driver': 'virtio-blk-pci',
+ 'drive': 'disk',
+ 'id': 'virtio-disk0',
+ 'bus': 'pci.1',
+ 'addr': '1',
+ }
+
+ self.assert_no_vda()
+ self.vm.cmd('device_add', args)
+ self.wait_for_console_pattern('virtio_blk virtio0: [vda]')
+ self.assert_vda()
+
+ def unplug(self) -> None:
+ self.vm.cmd('device_del', id='virtio-disk0')
+
+ self.vm.event_wait('DEVICE_DELETED', 1.0,
+ match={'data': {'device': 'virtio-disk0'}})
+
+ self.assert_no_vda()
+
+ def test(self) -> None:
+ self.require_accelerator('kvm')
+ self.set_machine('q35')
+
+ self.vm.add_args('-accel', 'kvm')
+ self.vm.add_args('-device', 'pcie-pci-bridge,id=pci.1,bus=pcie.0')
+ self.vm.add_args('-m', '1G')
+ self.vm.add_args('-append', 'console=ttyS0 rd.rescue')
+
+ self.launch_kernel(self.ASSET_KERNEL.fetch(),
+ self.ASSET_INITRD.fetch(),
+ wait_for='Entering emergency mode.')
+ self.wait_for_console_pattern('# ')
+
+ self.blockdev_add()
+
+ self.plug()
+ self.unplug()
+
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_x86_64_hotplug_cpu.py b/tests/functional/test_x86_64_hotplug_cpu.py
new file mode 100755
index 0000000..7b9200a
--- /dev/null
+++ b/tests/functional/test_x86_64_hotplug_cpu.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python3
+#
+# Functional test that hotplugs a CPU and checks it on a Linux guest
+#
+# Copyright (c) 2021 Red Hat, Inc.
+#
+# Author:
+# Cleber Rosa <crosa@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern
+
+
+class HotPlugCPU(LinuxKernelTest):
+
+ ASSET_KERNEL = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases'
+ '/31/Server/x86_64/os/images/pxeboot/vmlinuz'),
+ 'd4738d03dbbe083ca610d0821d0a8f1488bebbdccef54ce33e3adb35fda00129')
+
+ ASSET_INITRD = Asset(
+ ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases'
+ '/31/Server/x86_64/os/images/pxeboot/initrd.img'),
+ '277cd6c7adf77c7e63d73bbb2cded8ef9e2d3a2f100000e92ff1f8396513cd8b')
+
+ def test_hotplug(self):
+
+ self.require_accelerator('kvm')
+ self.vm.add_args('-accel', 'kvm')
+ self.vm.add_args('-cpu', 'Haswell')
+ self.vm.add_args('-smp', '1,sockets=1,cores=2,threads=1,maxcpus=2')
+ self.vm.add_args('-m', '1G')
+ self.vm.add_args('-append', 'console=ttyS0 rd.rescue')
+
+ self.launch_kernel(self.ASSET_KERNEL.fetch(),
+ self.ASSET_INITRD.fetch(),
+ wait_for='Entering emergency mode.')
+ prompt = '# '
+ self.wait_for_console_pattern(prompt)
+
+ exec_command_and_wait_for_pattern(self,
+ 'cd /sys/devices/system/cpu/cpu0',
+ 'cpu0#')
+ exec_command_and_wait_for_pattern(self,
+ 'cd /sys/devices/system/cpu/cpu1',
+ 'No such file or directory')
+
+ self.vm.cmd('device_add',
+ driver='Haswell-x86_64-cpu',
+ id='c1',
+ socket_id=0,
+ core_id=1,
+ thread_id=0)
+ self.wait_for_console_pattern('CPU1 has been hot-added')
+
+ exec_command_and_wait_for_pattern(self,
+ 'cd /sys/devices/system/cpu/cpu1',
+ 'cpu1#')
+
+ exec_command_and_wait_for_pattern(self, 'cd ..', prompt)
+ self.vm.cmd('device_del', id='c1')
+
+ exec_command_and_wait_for_pattern(self,
+ 'while cd /sys/devices/system/cpu/cpu1 ;'
+ ' do sleep 0.2 ; done',
+ 'No such file or directory')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_x86_64_kvm_xen.py b/tests/functional/test_x86_64_kvm_xen.py
new file mode 100755
index 0000000..a5d4450
--- /dev/null
+++ b/tests/functional/test_x86_64_kvm_xen.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python3
+#
+# KVM Xen guest functional tests
+#
+# Copyright Ā© 2021 Red Hat, Inc.
+# Copyright Ā© 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+#
+# Author:
+# David Woodhouse <dwmw2@infradead.org>
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu.machine import machine
+
+from qemu_test import QemuSystemTest, Asset, exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern
+
+class KVMXenGuest(QemuSystemTest):
+
+ KERNEL_DEFAULT = 'printk.time=0 root=/dev/xvda console=ttyS0 quiet'
+
+ kernel_path = None
+ kernel_params = None
+
+ # Fetch assets from the kvm-xen-guest subdir of my shared test
+ # images directory on fileserver.linaro.org where you can find
+ # build instructions for how they where assembled.
+ ASSET_KERNEL = Asset(
+ ('https://fileserver.linaro.org/s/kE4nCFLdQcoBF9t/download?'
+ 'path=%2Fkvm-xen-guest&files=bzImage'),
+ 'ec0ad7bb8c33c5982baee0a75505fe7dbf29d3ff5d44258204d6307c6fe0132a')
+
+ ASSET_ROOTFS = Asset(
+ ('https://fileserver.linaro.org/s/kE4nCFLdQcoBF9t/download?'
+ 'path=%2Fkvm-xen-guest&files=rootfs.ext4'),
+ 'b11045d649006c649c184e93339aaa41a8fe20a1a86620af70323252eb29e40b')
+
+ def common_vm_setup(self):
+ # We also catch lack of KVM_XEN support if we fail to launch
+ self.require_accelerator("kvm")
+ self.require_netdev('user')
+
+ self.vm.set_console()
+
+ self.vm.add_args("-accel", "kvm,xen-version=0x4000a,kernel-irqchip=split")
+ self.vm.add_args("-smp", "2")
+
+ self.kernel_path = self.ASSET_KERNEL.fetch()
+ self.rootfs = self.ASSET_ROOTFS.fetch()
+
+ def run_and_check(self):
+ self.vm.add_args('-kernel', self.kernel_path,
+ '-append', self.kernel_params,
+ '-drive', f"file={self.rootfs},if=none,snapshot=on,format=raw,id=drv0",
+ '-device', 'xen-disk,drive=drv0,vdev=xvda',
+ '-device', 'virtio-net-pci,netdev=unet',
+ '-netdev', 'user,id=unet,hostfwd=:127.0.0.1:0-:22')
+
+ try:
+ self.vm.launch()
+ except machine.VMLaunchFailure as e:
+ if "Xen HVM guest support not present" in e.output:
+ self.skipTest("KVM Xen support is not present "
+ "(need v5.12+ kernel with CONFIG_KVM_XEN)")
+ elif "Property 'kvm-accel.xen-version' not found" in e.output:
+ self.skipTest("QEMU not built with CONFIG_XEN_EMU support")
+ else:
+ raise e
+
+ self.log.info('VM launched, waiting for sshd')
+ console_pattern = 'Starting dropbear sshd: OK'
+ wait_for_console_pattern(self, console_pattern, 'Oops')
+ self.log.info('sshd ready')
+
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cmdline', 'xen')
+ exec_command_and_wait_for_pattern(self, 'dmesg | grep "Grant table"',
+ 'Grant table initialized')
+ wait_for_console_pattern(self, '#', 'Oops')
+
+ def test_kvm_xen_guest(self):
+ self.common_vm_setup()
+
+ self.kernel_params = (self.KERNEL_DEFAULT +
+ ' xen_emul_unplug=ide-disks')
+ self.run_and_check()
+ exec_command_and_wait_for_pattern(self,
+ 'grep xen-pirq.*msi /proc/interrupts',
+ 'virtio0-output')
+
+ def test_kvm_xen_guest_nomsi(self):
+ self.common_vm_setup()
+
+ self.kernel_params = (self.KERNEL_DEFAULT +
+ ' xen_emul_unplug=ide-disks pci=nomsi')
+ self.run_and_check()
+ exec_command_and_wait_for_pattern(self,
+ 'grep xen-pirq.* /proc/interrupts',
+ 'virtio0')
+
+ def test_kvm_xen_guest_noapic_nomsi(self):
+ self.common_vm_setup()
+
+ self.kernel_params = (self.KERNEL_DEFAULT +
+ ' xen_emul_unplug=ide-disks noapic pci=nomsi')
+ self.run_and_check()
+ exec_command_and_wait_for_pattern(self,
+ 'grep xen-pirq /proc/interrupts',
+ 'virtio0')
+
+ def test_kvm_xen_guest_vapic(self):
+ self.common_vm_setup()
+ self.vm.add_args('-cpu', 'host,+xen-vapic')
+ self.kernel_params = (self.KERNEL_DEFAULT +
+ ' xen_emul_unplug=ide-disks')
+ self.run_and_check()
+ exec_command_and_wait_for_pattern(self,
+ 'grep xen-pirq /proc/interrupts',
+ 'acpi')
+ wait_for_console_pattern(self, '#')
+ exec_command_and_wait_for_pattern(self,
+ 'grep PCI-MSI /proc/interrupts',
+ 'virtio0-output')
+
+ def test_kvm_xen_guest_novector(self):
+ self.common_vm_setup()
+ self.kernel_params = (self.KERNEL_DEFAULT +
+ ' xen_emul_unplug=ide-disks' +
+ ' xen_no_vector_callback')
+ self.run_and_check()
+ exec_command_and_wait_for_pattern(self,
+ 'grep xen-platform-pci /proc/interrupts',
+ 'fasteoi')
+
+ def test_kvm_xen_guest_novector_nomsi(self):
+ self.common_vm_setup()
+
+ self.kernel_params = (self.KERNEL_DEFAULT +
+ ' xen_emul_unplug=ide-disks pci=nomsi' +
+ ' xen_no_vector_callback')
+ self.run_and_check()
+ exec_command_and_wait_for_pattern(self,
+ 'grep xen-platform-pci /proc/interrupts',
+ 'IO-APIC')
+
+ def test_kvm_xen_guest_novector_noapic(self):
+ self.common_vm_setup()
+ self.kernel_params = (self.KERNEL_DEFAULT +
+ ' xen_emul_unplug=ide-disks' +
+ ' xen_no_vector_callback noapic')
+ self.run_and_check()
+ exec_command_and_wait_for_pattern(self,
+ 'grep xen-platform-pci /proc/interrupts',
+ 'XT-PIC')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_x86_64_replay.py b/tests/functional/test_x86_64_replay.py
new file mode 100755
index 0000000..27287d4
--- /dev/null
+++ b/tests/functional/test_x86_64_replay.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+#
+# Replay test that boots a Linux kernel on x86_64 machines
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from subprocess import check_call, DEVNULL
+
+from qemu_test import Asset, skipFlakyTest, get_qemu_img
+from replay_kernel import ReplayKernelBase
+
+
+class X86Replay(ReplayKernelBase):
+
+ ASSET_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/x86_64/bzImage',
+ 'f57bfc6553bcd6e0a54aab86095bf642b33b5571d14e3af1731b18c87ed5aef8')
+
+ ASSET_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/x86_64/rootfs.ext4.zst',
+ '4b8b2a99117519c5290e1202cb36eb6c7aaba92b357b5160f5970cf5fb78a751')
+
+ def do_test_x86(self, machine, blkdevice, devroot):
+ self.require_netdev('user')
+ self.set_machine(machine)
+ self.cpu="Nehalem"
+ kernel_path = self.ASSET_KERNEL.fetch()
+
+ raw_disk = self.uncompress(self.ASSET_ROOTFS)
+ disk = self.scratch_file('scratch.qcow2')
+ qemu_img = get_qemu_img(self)
+ check_call([qemu_img, 'create', '-f', 'qcow2', '-b', raw_disk,
+ '-F', 'raw', disk], stdout=DEVNULL, stderr=DEVNULL)
+
+ args = ('-drive', 'file=%s,snapshot=on,id=hd0,if=none' % disk,
+ '-drive', 'driver=blkreplay,id=hd0-rr,if=none,image=hd0',
+ '-device', '%s,drive=hd0-rr' % blkdevice,
+ '-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22',
+ '-device', 'virtio-net,netdev=vnet',
+ '-object', 'filter-replay,id=replay,netdev=vnet')
+
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+ f"console=ttyS0 root=/dev/{devroot}")
+ console_pattern = 'Welcome to TuxTest'
+ self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5,
+ args=args)
+
+ @skipFlakyTest('https://gitlab.com/qemu-project/qemu/-/issues/2094')
+ def test_pc(self):
+ self.do_test_x86('pc', 'virtio-blk', 'vda')
+
+ def test_q35(self):
+ self.do_test_x86('q35', 'ide-hd', 'sda')
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/functional/test_x86_64_reverse_debug.py b/tests/functional/test_x86_64_reverse_debug.py
new file mode 100755
index 0000000..d713e91
--- /dev/null
+++ b/tests/functional/test_x86_64_reverse_debug.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Reverse debugging test
+#
+# Copyright (c) 2020 ISP RAS
+#
+# Author:
+# Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+from qemu_test import skipIfMissingImports, skipFlakyTest
+from reverse_debugging import ReverseDebugging
+
+
+@skipIfMissingImports('avocado.utils')
+class ReverseDebugging_X86_64(ReverseDebugging):
+
+ REG_PC = 0x10
+ REG_CS = 0x12
+ def get_pc(self, g):
+ return self.get_reg_le(g, self.REG_PC) \
+ + self.get_reg_le(g, self.REG_CS) * 0x10
+
+ @skipFlakyTest("https://gitlab.com/qemu-project/qemu/-/issues/2922")
+ def test_x86_64_pc(self):
+ self.set_machine('pc')
+ # start with BIOS only
+ self.reverse_debugging()
+
+
+if __name__ == '__main__':
+ ReverseDebugging.main()
diff --git a/tests/functional/test_x86_64_tuxrun.py b/tests/functional/test_x86_64_tuxrun.py
new file mode 100755
index 0000000..fcbc62b
--- /dev/null
+++ b/tests/functional/test_x86_64_tuxrun.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots known good tuxboot images the same way
+# that tuxrun (www.tuxrun.org) does. This tool is used by things like
+# the LKFT project to run regression tests on kernels.
+#
+# Copyright (c) 2023 Linaro Ltd.
+#
+# Author:
+# Alex BennƩe <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from qemu_test.tuxruntest import TuxRunBaselineTest
+
+class TuxRunX86Test(TuxRunBaselineTest):
+
+ ASSET_X86_64_KERNEL = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/x86_64/bzImage',
+ 'f57bfc6553bcd6e0a54aab86095bf642b33b5571d14e3af1731b18c87ed5aef8')
+ ASSET_X86_64_ROOTFS = Asset(
+ 'https://storage.tuxboot.com/buildroot/20241119/x86_64/rootfs.ext4.zst',
+ '4b8b2a99117519c5290e1202cb36eb6c7aaba92b357b5160f5970cf5fb78a751')
+
+ def test_x86_64(self):
+ self.set_machine('q35')
+ self.cpu="Nehalem"
+ self.root='sda'
+ self.wait_for_shutdown=False
+ self.common_tuxrun(kernel_asset=self.ASSET_X86_64_KERNEL,
+ rootfs_asset=self.ASSET_X86_64_ROOTFS,
+ drive="driver=ide-hd,bus=ide.0,unit=0")
+
+if __name__ == '__main__':
+ TuxRunBaselineTest.main()
diff --git a/tests/functional/test_x86_cpu_model_versions.py b/tests/functional/test_x86_cpu_model_versions.py
new file mode 100755
index 0000000..bd18acd
--- /dev/null
+++ b/tests/functional/test_x86_cpu_model_versions.py
@@ -0,0 +1,335 @@
+#!/usr/bin/env python3
+#
+# Basic validation of x86 versioned CPU models and CPU model aliases
+#
+# Copyright (c) 2019 Red Hat Inc
+#
+# Author:
+# Eduardo Habkost <ehabkost@redhat.com>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+import re
+
+from qemu_test import QemuSystemTest
+
+class X86CPUModelAliases(QemuSystemTest):
+ """
+ Validation of PC CPU model versions and CPU model aliases
+ """
+ def validate_aliases(self, cpus):
+ for c in cpus.values():
+ if 'alias-of' in c:
+ # all aliases must point to a valid CPU model name:
+ self.assertIn(c['alias-of'], cpus,
+ '%s.alias-of (%s) is not a valid CPU model name' % (c['name'], c['alias-of']))
+ # aliases must not point to aliases
+ self.assertNotIn('alias-of', cpus[c['alias-of']],
+ '%s.alias-of (%s) points to another alias' % (c['name'], c['alias-of']))
+
+ # aliases must not be static
+ self.assertFalse(c['static'])
+
+ def validate_variant_aliases(self, cpus):
+ # -noTSX, -IBRS and -IBPB variants of CPU models are special:
+ # they shouldn't have their own versions:
+ self.assertNotIn("Haswell-noTSX-v1", cpus,
+ "Haswell-noTSX shouldn't be versioned")
+ self.assertNotIn("Broadwell-noTSX-v1", cpus,
+ "Broadwell-noTSX shouldn't be versioned")
+ self.assertNotIn("Nehalem-IBRS-v1", cpus,
+ "Nehalem-IBRS shouldn't be versioned")
+ self.assertNotIn("Westmere-IBRS-v1", cpus,
+ "Westmere-IBRS shouldn't be versioned")
+ self.assertNotIn("SandyBridge-IBRS-v1", cpus,
+ "SandyBridge-IBRS shouldn't be versioned")
+ self.assertNotIn("IvyBridge-IBRS-v1", cpus,
+ "IvyBridge-IBRS shouldn't be versioned")
+ self.assertNotIn("Haswell-noTSX-IBRS-v1", cpus,
+ "Haswell-noTSX-IBRS shouldn't be versioned")
+ self.assertNotIn("Haswell-IBRS-v1", cpus,
+ "Haswell-IBRS shouldn't be versioned")
+ self.assertNotIn("Broadwell-noTSX-IBRS-v1", cpus,
+ "Broadwell-noTSX-IBRS shouldn't be versioned")
+ self.assertNotIn("Broadwell-IBRS-v1", cpus,
+ "Broadwell-IBRS shouldn't be versioned")
+ self.assertNotIn("Skylake-Client-IBRS-v1", cpus,
+ "Skylake-Client-IBRS shouldn't be versioned")
+ self.assertNotIn("Skylake-Server-IBRS-v1", cpus,
+ "Skylake-Server-IBRS shouldn't be versioned")
+ self.assertNotIn("EPYC-IBPB-v1", cpus,
+ "EPYC-IBPB shouldn't be versioned")
+
+ def test_4_0_alias_compatibility(self):
+ """
+ Check if pc-*-4.0 unversioned CPU model won't be reported as aliases
+ """
+ self.set_machine('pc-i440fx-4.0')
+ # pc-*-4.0 won't expose non-versioned CPU models as aliases
+ # We do this to help management software to keep compatibility
+ # with older QEMU versions that didn't have the versioned CPU model
+ self.vm.add_args('-S')
+ self.vm.launch()
+ cpus = dict((m['name'], m) for m in
+ self.vm.cmd('query-cpu-definitions'))
+
+ self.assertFalse(cpus['Cascadelake-Server']['static'],
+ 'unversioned Cascadelake-Server CPU model must not be static')
+ self.assertNotIn('alias-of', cpus['Cascadelake-Server'],
+ 'Cascadelake-Server must not be an alias')
+ self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'],
+ 'Cascadelake-Server-v1 must not be an alias')
+
+ self.assertFalse(cpus['qemu64']['static'],
+ 'unversioned qemu64 CPU model must not be static')
+ self.assertNotIn('alias-of', cpus['qemu64'],
+ 'qemu64 must not be an alias')
+ self.assertNotIn('alias-of', cpus['qemu64-v1'],
+ 'qemu64-v1 must not be an alias')
+
+ self.validate_variant_aliases(cpus)
+
+ # On pc-*-4.0, no CPU model should be reported as an alias:
+ for name,c in cpus.items():
+ self.assertNotIn('alias-of', c, "%s shouldn't be an alias" % (name))
+
+ def test_4_1_alias(self):
+ """
+ Check if unversioned CPU model is an alias pointing to right version
+ """
+ self.set_machine('pc-i440fx-4.1')
+ self.vm.add_args('-S')
+ self.vm.launch()
+
+ cpus = dict((m['name'], m) for m in
+ self.vm.cmd('query-cpu-definitions'))
+
+ self.assertFalse(cpus['Cascadelake-Server']['static'],
+ 'unversioned Cascadelake-Server CPU model must not be static')
+ self.assertEqual(cpus['Cascadelake-Server'].get('alias-of'),
+ 'Cascadelake-Server-v1',
+ 'Cascadelake-Server must be an alias of Cascadelake-Server-v1')
+ self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'],
+ 'Cascadelake-Server-v1 must not be an alias')
+
+ self.assertFalse(cpus['qemu64']['static'],
+ 'unversioned qemu64 CPU model must not be static')
+ self.assertEqual(cpus['qemu64'].get('alias-of'), 'qemu64-v1',
+ 'qemu64 must be an alias of qemu64-v1')
+ self.assertNotIn('alias-of', cpus['qemu64-v1'],
+ 'qemu64-v1 must not be an alias')
+
+ self.validate_variant_aliases(cpus)
+
+ # On pc-*-4.1, -noTSX and -IBRS models should be aliases:
+ self.assertEqual(cpus["Haswell"].get('alias-of'),
+ "Haswell-v1",
+ "Haswell must be an alias")
+ self.assertEqual(cpus["Haswell-noTSX"].get('alias-of'),
+ "Haswell-v2",
+ "Haswell-noTSX must be an alias")
+ self.assertEqual(cpus["Haswell-IBRS"].get('alias-of'),
+ "Haswell-v3",
+ "Haswell-IBRS must be an alias")
+ self.assertEqual(cpus["Haswell-noTSX-IBRS"].get('alias-of'),
+ "Haswell-v4",
+ "Haswell-noTSX-IBRS must be an alias")
+
+ self.assertEqual(cpus["Broadwell"].get('alias-of'),
+ "Broadwell-v1",
+ "Broadwell must be an alias")
+ self.assertEqual(cpus["Broadwell-noTSX"].get('alias-of'),
+ "Broadwell-v2",
+ "Broadwell-noTSX must be an alias")
+ self.assertEqual(cpus["Broadwell-IBRS"].get('alias-of'),
+ "Broadwell-v3",
+ "Broadwell-IBRS must be an alias")
+ self.assertEqual(cpus["Broadwell-noTSX-IBRS"].get('alias-of'),
+ "Broadwell-v4",
+ "Broadwell-noTSX-IBRS must be an alias")
+
+ self.assertEqual(cpus["Nehalem"].get('alias-of'),
+ "Nehalem-v1",
+ "Nehalem must be an alias")
+ self.assertEqual(cpus["Nehalem-IBRS"].get('alias-of'),
+ "Nehalem-v2",
+ "Nehalem-IBRS must be an alias")
+
+ self.assertEqual(cpus["Westmere"].get('alias-of'),
+ "Westmere-v1",
+ "Westmere must be an alias")
+ self.assertEqual(cpus["Westmere-IBRS"].get('alias-of'),
+ "Westmere-v2",
+ "Westmere-IBRS must be an alias")
+
+ self.assertEqual(cpus["SandyBridge"].get('alias-of'),
+ "SandyBridge-v1",
+ "SandyBridge must be an alias")
+ self.assertEqual(cpus["SandyBridge-IBRS"].get('alias-of'),
+ "SandyBridge-v2",
+ "SandyBridge-IBRS must be an alias")
+
+ self.assertEqual(cpus["IvyBridge"].get('alias-of'),
+ "IvyBridge-v1",
+ "IvyBridge must be an alias")
+ self.assertEqual(cpus["IvyBridge-IBRS"].get('alias-of'),
+ "IvyBridge-v2",
+ "IvyBridge-IBRS must be an alias")
+
+ self.assertEqual(cpus["Skylake-Client"].get('alias-of'),
+ "Skylake-Client-v1",
+ "Skylake-Client must be an alias")
+ self.assertEqual(cpus["Skylake-Client-IBRS"].get('alias-of'),
+ "Skylake-Client-v2",
+ "Skylake-Client-IBRS must be an alias")
+
+ self.assertEqual(cpus["Skylake-Server"].get('alias-of'),
+ "Skylake-Server-v1",
+ "Skylake-Server must be an alias")
+ self.assertEqual(cpus["Skylake-Server-IBRS"].get('alias-of'),
+ "Skylake-Server-v2",
+ "Skylake-Server-IBRS must be an alias")
+
+ self.assertEqual(cpus["EPYC"].get('alias-of'),
+ "EPYC-v1",
+ "EPYC must be an alias")
+ self.assertEqual(cpus["EPYC-IBPB"].get('alias-of'),
+ "EPYC-v2",
+ "EPYC-IBPB must be an alias")
+
+ self.validate_aliases(cpus)
+
+ def test_none_alias(self):
+ """
+ Check if unversioned CPU model is an alias pointing to some version
+ """
+ self.set_machine('none')
+ self.vm.add_args('-S')
+ self.vm.launch()
+
+ cpus = dict((m['name'], m) for m in
+ self.vm.cmd('query-cpu-definitions'))
+
+ self.assertFalse(cpus['Cascadelake-Server']['static'],
+ 'unversioned Cascadelake-Server CPU model must not be static')
+ self.assertTrue(re.match('Cascadelake-Server-v[0-9]+', cpus['Cascadelake-Server']['alias-of']),
+ 'Cascadelake-Server must be an alias of versioned CPU model')
+ self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'],
+ 'Cascadelake-Server-v1 must not be an alias')
+
+ self.assertFalse(cpus['qemu64']['static'],
+ 'unversioned qemu64 CPU model must not be static')
+ self.assertTrue(re.match('qemu64-v[0-9]+', cpus['qemu64']['alias-of']),
+ 'qemu64 must be an alias of versioned CPU model')
+ self.assertNotIn('alias-of', cpus['qemu64-v1'],
+ 'qemu64-v1 must not be an alias')
+
+ self.validate_aliases(cpus)
+
+
+class CascadelakeArchCapabilities(QemuSystemTest):
+ """
+ Validation of Cascadelake arch-capabilities
+ """
+ def get_cpu_prop(self, prop):
+ cpu_path = self.vm.cmd('query-cpus-fast')[0].get('qom-path')
+ return self.vm.cmd('qom-get', path=cpu_path, property=prop)
+
+ def test_4_1(self):
+ self.set_machine('pc-i440fx-4.1')
+ # machine-type only:
+ self.vm.add_args('-S')
+ self.set_vm_arg('-cpu',
+ 'Cascadelake-Server,x-force-features=on,check=off,'
+ 'enforce=off')
+ self.vm.launch()
+ self.assertFalse(self.get_cpu_prop('arch-capabilities'),
+ 'pc-i440fx-4.1 + Cascadelake-Server should not have arch-capabilities')
+
+ def test_4_0(self):
+ self.set_machine('pc-i440fx-4.0')
+ self.vm.add_args('-S')
+ self.set_vm_arg('-cpu',
+ 'Cascadelake-Server,x-force-features=on,check=off,'
+ 'enforce=off')
+ self.vm.launch()
+ self.assertFalse(self.get_cpu_prop('arch-capabilities'),
+ 'pc-i440fx-4.0 + Cascadelake-Server should not have arch-capabilities')
+
+ def test_set_4_0(self):
+ self.set_machine('pc-i440fx-4.0')
+ # command line must override machine-type if CPU model is not versioned:
+ self.vm.add_args('-S')
+ self.set_vm_arg('-cpu',
+ 'Cascadelake-Server,x-force-features=on,check=off,'
+ 'enforce=off,+arch-capabilities')
+ self.vm.launch()
+ self.assertTrue(self.get_cpu_prop('arch-capabilities'),
+ 'pc-i440fx-4.0 + Cascadelake-Server,+arch-capabilities should have arch-capabilities')
+
+ def test_unset_4_1(self):
+ self.set_machine('pc-i440fx-4.1')
+ self.vm.add_args('-S')
+ self.set_vm_arg('-cpu',
+ 'Cascadelake-Server,x-force-features=on,check=off,'
+ 'enforce=off,-arch-capabilities')
+ self.vm.launch()
+ self.assertFalse(self.get_cpu_prop('arch-capabilities'),
+ 'pc-i440fx-4.1 + Cascadelake-Server,-arch-capabilities should not have arch-capabilities')
+
+ def test_v1_4_0(self):
+ self.set_machine('pc-i440fx-4.0')
+ # versioned CPU model overrides machine-type:
+ self.vm.add_args('-S')
+ self.set_vm_arg('-cpu',
+ 'Cascadelake-Server-v1,x-force-features=on,check=off,'
+ 'enforce=off')
+ self.vm.launch()
+ self.assertFalse(self.get_cpu_prop('arch-capabilities'),
+ 'pc-i440fx-4.0 + Cascadelake-Server-v1 should not have arch-capabilities')
+
+ def test_v2_4_0(self):
+ self.set_machine('pc-i440fx-4.0')
+ self.vm.add_args('-S')
+ self.set_vm_arg('-cpu',
+ 'Cascadelake-Server-v2,x-force-features=on,check=off,'
+ 'enforce=off')
+ self.vm.launch()
+ self.assertTrue(self.get_cpu_prop('arch-capabilities'),
+ 'pc-i440fx-4.0 + Cascadelake-Server-v2 should have arch-capabilities')
+
+ def test_v1_set_4_0(self):
+ self.set_machine('pc-i440fx-4.0')
+ # command line must override machine-type and versioned CPU model:
+ self.vm.add_args('-S')
+ self.set_vm_arg('-cpu',
+ 'Cascadelake-Server-v1,x-force-features=on,check=off,'
+ 'enforce=off,+arch-capabilities')
+ self.vm.launch()
+ self.assertTrue(self.get_cpu_prop('arch-capabilities'),
+ 'pc-i440fx-4.0 + Cascadelake-Server-v1,+arch-capabilities should have arch-capabilities')
+
+ def test_v2_unset_4_1(self):
+ self.set_machine('pc-i440fx-4.1')
+ self.vm.add_args('-S')
+ self.set_vm_arg('-cpu',
+ 'Cascadelake-Server-v2,x-force-features=on,check=off,'
+ 'enforce=off,-arch-capabilities')
+ self.vm.launch()
+ self.assertFalse(self.get_cpu_prop('arch-capabilities'),
+ 'pc-i440fx-4.1 + Cascadelake-Server-v2,-arch-capabilities should not have arch-capabilities')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_xtensa_lx60.py b/tests/functional/test_xtensa_lx60.py
new file mode 100755
index 0000000..147c920
--- /dev/null
+++ b/tests/functional/test_xtensa_lx60.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel on an xtensa lx650 machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import LinuxKernelTest, Asset
+
+
+class XTensaLX60Test(LinuxKernelTest):
+
+ ASSET_DAY02 = Asset(
+ 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day02.tar.xz',
+ '68ff07f9b3fd3df36d015eb46299ba44748e94bfbb2d5295fddc1a8d4a9fd324')
+
+ def test_xtensa_lx60(self):
+ self.set_machine('lx60')
+ self.cpu = 'dc233c'
+ self.archive_extract(self.ASSET_DAY02)
+ self.launch_kernel(self.scratch_file('day02',
+ 'santas-sleigh-ride.elf'),
+ wait_for='QEMU advent calendar')
+
+if __name__ == '__main__':
+ LinuxKernelTest.main()
diff --git a/tests/functional/test_xtensa_replay.py b/tests/functional/test_xtensa_replay.py
new file mode 100755
index 0000000..eb00a3b
--- /dev/null
+++ b/tests/functional/test_xtensa_replay.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python3
+#
+# Replay test that boots a Linux kernel on an xtensa lx650 machine
+# and checks the console
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import Asset
+from replay_kernel import ReplayKernelBase
+
+
+class XTensaReplay(ReplayKernelBase):
+
+ ASSET_DAY02 = Asset(
+ 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day02.tar.xz',
+ '68ff07f9b3fd3df36d015eb46299ba44748e94bfbb2d5295fddc1a8d4a9fd324')
+
+ def test_replay(self):
+ self.set_machine('lx60')
+ self.cpu = 'dc233c'
+ kernel_path = self.archive_extract(self.ASSET_DAY02,
+ member='day02/santas-sleigh-ride.elf')
+ self.run_rr(kernel_path, self.REPLAY_KERNEL_COMMAND_LINE,
+ 'QEMU advent calendar')
+
+
+if __name__ == '__main__':
+ ReplayKernelBase.main()
diff --git a/tests/guest-debug/run-test.py b/tests/guest-debug/run-test.py
index 368ff8a..75e9c92 100755
--- a/tests/guest-debug/run-test.py
+++ b/tests/guest-debug/run-test.py
@@ -27,11 +27,17 @@ def get_args():
parser.add_argument("--binary", help="Binary to debug",
required=True)
parser.add_argument("--test", help="GDB test script")
+ parser.add_argument('test_args', nargs='*',
+ help="Additional args for GDB test script. "
+ "The args should be preceded by -- to avoid confusion "
+ "with flags for runner script")
parser.add_argument("--gdb", help="The gdb binary to use",
default=None)
parser.add_argument("--gdb-args", help="Additional gdb arguments")
parser.add_argument("--output", help="A file to redirect output to")
parser.add_argument("--stderr", help="A file to redirect stderr to")
+ parser.add_argument("--no-suspend", action="store_true",
+ help="Ask the binary to not wait for GDB connection")
return parser.parse_args()
@@ -69,10 +75,19 @@ if __name__ == '__main__':
# Launch QEMU with binary
if "system" in args.qemu:
+ if args.no_suspend:
+ suspend = ''
+ else:
+ suspend = ' -S'
cmd = f'{args.qemu} {args.qargs} {args.binary}' \
- f' -S -gdb unix:path={socket_name},server=on'
+ f'{suspend} -gdb unix:path={socket_name},server=on'
else:
- cmd = f'{args.qemu} {args.qargs} -g {socket_name} {args.binary}'
+ if args.no_suspend:
+ suspend = ',suspend=n'
+ else:
+ suspend = ''
+ cmd = f'{args.qemu} {args.qargs} -g {socket_name}{suspend}' \
+ f' {args.binary}'
log(output, "QEMU CMD: %s" % (cmd))
inferior = subprocess.Popen(shlex.split(cmd))
@@ -91,6 +106,8 @@ if __name__ == '__main__':
gdb_cmd += " -ex 'target remote %s'" % (socket_name)
# finally the test script itself
if args.test:
+ if args.test_args:
+ gdb_cmd += f" -ex \"py sys.argv={args.test_args}\""
gdb_cmd += " -x %s" % (args.test)
diff --git a/tests/guest-debug/test_gdbstub.py b/tests/guest-debug/test_gdbstub.py
index 46fbf98..4f08089 100644
--- a/tests/guest-debug/test_gdbstub.py
+++ b/tests/guest-debug/test_gdbstub.py
@@ -2,6 +2,7 @@
"""
from __future__ import print_function
+import argparse
import gdb
import os
import sys
@@ -10,6 +11,16 @@ import traceback
fail_count = 0
+def gdb_exit(status):
+ gdb.execute(f"exit {status}")
+
+
+class arg_parser(argparse.ArgumentParser):
+ def exit(self, status=None, message=""):
+ print("Wrong GDB script test argument! " + message)
+ gdb_exit(1)
+
+
def report(cond, msg):
"""Report success/fail of a test"""
if cond:
@@ -33,11 +44,11 @@ def main(test, expected_arch=None):
"connected to {}".format(expected_arch))
except (gdb.error, AttributeError):
print("SKIP: not connected")
- exit(0)
+ gdb_exit(0)
if gdb.parse_and_eval("$pc") == 0:
print("SKIP: PC not set")
- exit(0)
+ gdb_exit(0)
try:
test()
@@ -57,4 +68,4 @@ def main(test, expected_arch=None):
pass
print("All tests complete: {} failures".format(fail_count))
- gdb.execute(f"exit {fail_count}")
+ gdb_exit(fail_count)
diff --git a/tests/include/meson.build b/tests/include/meson.build
index 9abba30..8e8d1ec 100644
--- a/tests/include/meson.build
+++ b/tests/include/meson.build
@@ -13,4 +13,4 @@ test_qapi_outputs_extra = [
test_qapi_files_extra = custom_target('QAPI test (include)',
output: test_qapi_outputs_extra,
input: test_qapi_files,
- command: 'true')
+ command: [python, '-c', ''])
diff --git a/tests/lcitool/libvirt-ci b/tests/lcitool/libvirt-ci
-Subproject 0e9490cebc726ef772b6c9e27dac32e7ae99f9b
+Subproject 18c4bfe02c467e5639bf9a687139735ccd7a3ff
diff --git a/tests/lcitool/mappings.yml b/tests/lcitool/mappings.yml
index 03b974a..8f0e95e 100644
--- a/tests/lcitool/mappings.yml
+++ b/tests/lcitool/mappings.yml
@@ -1,9 +1,17 @@
mappings:
+ # Too old on Ubuntu 22.04; we install it from cargo instead
+ bindgen:
+ Ubuntu2204:
+
flake8:
OpenSUSELeap15:
meson:
OpenSUSELeap15:
+ # Use Meson from PyPI wherever Rust is enabled
+ Debian:
+ Fedora:
+ Ubuntu:
python3:
OpenSUSELeap15: python311-base
@@ -60,10 +68,15 @@ mappings:
python3-wheel:
OpenSUSELeap15: python311-pip
+ rust:
+ Debian12: rustc-web
+ Ubuntu2204: rustc-1.77
+ Ubuntu2404: rustc-1.77
+
pypi_mappings:
# Request more recent version
meson:
- default: meson==0.63.2
+ default: meson==1.8.1
# Drop packages that need devel headers
python3-numpy:
diff --git a/tests/lcitool/projects/qemu.yml b/tests/lcitool/projects/qemu.yml
index 0c85784..c07242f 100644
--- a/tests/lcitool/projects/qemu.yml
+++ b/tests/lcitool/projects/qemu.yml
@@ -3,6 +3,7 @@ packages:
- alsa
- bash
- bc
+ - bindgen
- bison
- brlapi
- bzip2
@@ -32,6 +33,7 @@ packages:
- glusterfs
- gnutls
- gtk3
+ - gtk-vnc
- hostname
- json-c
- libaio
@@ -41,6 +43,7 @@ packages:
- libc-static
- libcacard
- libcap-ng
+ - libcbor
- libcurl
- libdrm
- libepoxy
@@ -100,6 +103,7 @@ packages:
- python3-tomli
- python3-venv
- rpm2cpio
+ - rust
- sdl2
- sdl2-image
- sed
@@ -118,6 +122,7 @@ packages:
- usbredir
- virglrenderer
- vte
+ - vulkan-tools
- which
- xen
- xorriso
diff --git a/tests/lcitool/refresh b/tests/lcitool/refresh
index ac803e3..d3488b2 100755
--- a/tests/lcitool/refresh
+++ b/tests/lcitool/refresh
@@ -116,6 +116,42 @@ debian12_extras = [
"ENV QEMU_CONFIGURE_OPTS --enable-netmap\n"
]
+# Based on the hub.docker.com/library/rust Dockerfiles
+fedora_rustup_nightly_extras = [
+ "RUN dnf install -y wget\n",
+ "ENV RUSTUP_HOME=/usr/local/rustup CARGO_HOME=/usr/local/cargo\n",
+ "ENV RUSTC=/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/rustc\n",
+ "ENV RUSTDOC=/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/rustdoc\n",
+ "ENV CARGO=/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/cargo\n",
+ "RUN set -eux && \\\n",
+ " rustArch='x86_64-unknown-linux-gnu' && \\\n",
+ " rustupSha256='6aeece6993e902708983b209d04c0d1dbb14ebb405ddb87def578d41f920f56d' && \\\n",
+ ' url="https://static.rust-lang.org/rustup/archive/1.27.1/${rustArch}/rustup-init" && \\\n',
+ ' wget "$url" && \\\n',
+ ' echo "${rustupSha256} *rustup-init" | sha256sum -c - && \\\n',
+ " chmod +x rustup-init && \\\n",
+ " ./rustup-init -y --no-modify-path --profile default --default-toolchain nightly --default-host ${rustArch} && \\\n",
+ " chmod -R a+w $RUSTUP_HOME $CARGO_HOME && \\\n",
+ " /usr/local/cargo/bin/rustup --version && \\\n",
+ " /usr/local/cargo/bin/rustup run nightly cargo --version && \\\n",
+ " /usr/local/cargo/bin/rustup run nightly rustc --version && \\\n",
+ ' test "$CARGO" = "$(/usr/local/cargo/bin/rustup +nightly which cargo)" && \\\n',
+ ' test "$RUSTDOC" = "$(/usr/local/cargo/bin/rustup +nightly which rustdoc)" && \\\n',
+ ' test "$RUSTC" = "$(/usr/local/cargo/bin/rustup +nightly which rustc)"\n',
+ 'ENV PATH=$CARGO_HOME/bin:$PATH\n',
+ 'RUN /usr/local/cargo/bin/rustup run nightly cargo install bindgen-cli\n',
+ 'RUN $CARGO --list\n',
+]
+
+ubuntu2204_rust_extras = [
+ "ENV RUSTC=/usr/bin/rustc-1.77\n",
+ "ENV RUSTDOC=/usr/bin/rustdoc-1.77\n",
+ "ENV CARGO_HOME=/usr/local/cargo\n",
+ 'ENV PATH=$CARGO_HOME/bin:$PATH\n',
+ "RUN DEBIAN_FRONTEND=noninteractive eatmydata \\\n",
+ " apt install -y --no-install-recommends cargo\n",
+ 'RUN cargo install bindgen-cli\n',
+]
def cross_build(prefix, targets):
conf = "ENV QEMU_CONFIGURE_OPTS --cross-prefix=%s\n" % (prefix)
@@ -131,13 +167,20 @@ try:
#
# Standard native builds
#
- generate_dockerfile("alpine", "alpine-319")
+ generate_dockerfile("alpine", "alpine-321")
generate_dockerfile("centos9", "centos-stream-9")
generate_dockerfile("debian", "debian-12",
trailer="".join(debian12_extras))
generate_dockerfile("fedora", "fedora-40")
generate_dockerfile("opensuse-leap", "opensuse-leap-15")
- generate_dockerfile("ubuntu2204", "ubuntu-2204")
+ generate_dockerfile("ubuntu2204", "ubuntu-2204",
+ trailer="".join(ubuntu2204_rust_extras))
+
+ #
+ # Non-fatal Rust-enabled build
+ #
+ generate_dockerfile("fedora-rust-nightly", "fedora-40",
+ trailer="".join(fedora_rustup_nightly_extras))
#
# Cross compiling builds
@@ -154,30 +197,24 @@ try:
trailer=cross_build("aarch64-linux-gnu-",
"aarch64-softmmu,aarch64-linux-user"))
- # migration to bookworm stalled: https://lists.debian.org/debian-arm/2023/09/msg00006.html
- generate_dockerfile("debian-armel-cross", "debian-11",
- cross="armv6l",
- trailer=cross_build("arm-linux-gnueabi-",
- "arm-softmmu,arm-linux-user,armeb-linux-user"))
-
generate_dockerfile("debian-armhf-cross", "debian-12",
cross="armv7l",
trailer=cross_build("arm-linux-gnueabihf-",
"arm-softmmu,arm-linux-user"))
- generate_dockerfile("debian-i686-cross", "debian-11",
+ generate_dockerfile("debian-i686-cross", "debian-12",
cross="i686",
trailer=cross_build("i686-linux-gnu-",
"x86_64-softmmu,"
"x86_64-linux-user,"
"i386-softmmu,i386-linux-user"))
- generate_dockerfile("debian-mips64el-cross", "debian-11",
+ generate_dockerfile("debian-mips64el-cross", "debian-12",
cross="mips64el",
trailer=cross_build("mips64el-linux-gnuabi64-",
"mips64el-softmmu,mips64el-linux-user"))
- generate_dockerfile("debian-mipsel-cross", "debian-11",
+ generate_dockerfile("debian-mipsel-cross", "debian-12",
cross="mipsel",
trailer=cross_build("mipsel-linux-gnu-",
"mipsel-softmmu,mipsel-linux-user"))
@@ -187,7 +224,9 @@ try:
trailer=cross_build("powerpc64le-linux-gnu-",
"ppc64-softmmu,ppc64-linux-user"))
- generate_dockerfile("debian-riscv64-cross", "debian-sid",
+ # while not yet a release architecture the packages are still
+ # build while part of testing
+ generate_dockerfile("debian-riscv64-cross", "debian-13",
project="qemu-minimal",
cross="riscv64",
trailer=cross_build("riscv64-linux-gnu-",
@@ -207,14 +246,13 @@ try:
#
# Cirrus packages lists for GitLab
#
- generate_cirrus("freebsd-13")
- generate_cirrus("macos-13")
+ generate_cirrus("freebsd-14")
generate_cirrus("macos-14")
#
# VM packages lists
#
- generate_pkglist("freebsd", "freebsd-13")
+ generate_pkglist("freebsd", "freebsd-14")
#
# Ansible package lists
diff --git a/tests/meson.build b/tests/meson.build
index acb6807..c596192 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -16,6 +16,8 @@ test_qapi_outputs = [
'test-qapi-events-sub-sub-module.h',
'test-qapi-events.c',
'test-qapi-events.h',
+ 'test-qapi-features.c',
+ 'test-qapi-features.h',
'test-qapi-init-commands.c',
'test-qapi-init-commands.h',
'test-qapi-introspect.c',
@@ -78,10 +80,11 @@ subdir('decode')
if 'CONFIG_TCG' in config_all_accel
subdir('fp')
- subdir('plugin')
+ subdir('tcg/plugins')
endif
subdir('unit')
subdir('qapi-schema')
subdir('qtest')
-subdir('migration')
+subdir('migration-stress')
+subdir('functional')
diff --git a/tests/migration/guestperf-batch.py b/tests/migration-stress/guestperf-batch.py
index 9485eef..9485eef 100755
--- a/tests/migration/guestperf-batch.py
+++ b/tests/migration-stress/guestperf-batch.py
diff --git a/tests/migration/guestperf-plot.py b/tests/migration-stress/guestperf-plot.py
index 32977b4..32977b4 100755
--- a/tests/migration/guestperf-plot.py
+++ b/tests/migration-stress/guestperf-plot.py
diff --git a/tests/migration/guestperf.py b/tests/migration-stress/guestperf.py
index 07182f2..07182f2 100755
--- a/tests/migration/guestperf.py
+++ b/tests/migration-stress/guestperf.py
diff --git a/tests/migration/guestperf/__init__.py b/tests/migration-stress/guestperf/__init__.py
index e69de29..e69de29 100644
--- a/tests/migration/guestperf/__init__.py
+++ b/tests/migration-stress/guestperf/__init__.py
diff --git a/tests/migration-stress/guestperf/comparison.py b/tests/migration-stress/guestperf/comparison.py
new file mode 100644
index 0000000..dee3ac2
--- /dev/null
+++ b/tests/migration-stress/guestperf/comparison.py
@@ -0,0 +1,174 @@
+#
+# Migration test scenario comparison mapping
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+from guestperf.scenario import Scenario
+
+class Comparison(object):
+ def __init__(self, name, scenarios):
+ self._name = name
+ self._scenarios = scenarios
+
+COMPARISONS = [
+ # Looking at effect of pausing guest during migration
+ # at various stages of iteration over RAM
+ Comparison("pause-iters", scenarios = [
+ Scenario("pause-iters-0",
+ pause=True, pause_iters=0),
+ Scenario("pause-iters-1",
+ pause=True, pause_iters=1),
+ Scenario("pause-iters-5",
+ pause=True, pause_iters=5),
+ Scenario("pause-iters-20",
+ pause=True, pause_iters=20),
+ ]),
+
+
+ # Looking at use of post-copy in relation to bandwidth
+ # available for migration
+ Comparison("post-copy-bandwidth", scenarios = [
+ Scenario("post-copy-bw-100mbs",
+ post_copy=True, bandwidth=12),
+ Scenario("post-copy-bw-300mbs",
+ post_copy=True, bandwidth=37),
+ Scenario("post-copy-bw-1gbs",
+ post_copy=True, bandwidth=125),
+ Scenario("post-copy-bw-10gbs",
+ post_copy=True, bandwidth=1250),
+ Scenario("post-copy-bw-100gbs",
+ post_copy=True, bandwidth=12500),
+ ]),
+
+
+ # Looking at effect of starting post-copy at different
+ # stages of the migration
+ Comparison("post-copy-iters", scenarios = [
+ Scenario("post-copy-iters-0",
+ post_copy=True, post_copy_iters=0),
+ Scenario("post-copy-iters-1",
+ post_copy=True, post_copy_iters=1),
+ Scenario("post-copy-iters-5",
+ post_copy=True, post_copy_iters=5),
+ Scenario("post-copy-iters-20",
+ post_copy=True, post_copy_iters=20),
+ ]),
+
+
+ # Looking at effect of auto-converge with different
+ # throttling percentage step rates
+ Comparison("auto-converge-iters", scenarios = [
+ Scenario("auto-converge-step-5",
+ auto_converge=True, auto_converge_step=5),
+ Scenario("auto-converge-step-10",
+ auto_converge=True, auto_converge_step=10),
+ Scenario("auto-converge-step-20",
+ auto_converge=True, auto_converge_step=20),
+ ]),
+
+
+ # Looking at use of auto-converge in relation to bandwidth
+ # available for migration
+ Comparison("auto-converge-bandwidth", scenarios = [
+ Scenario("auto-converge-bw-100mbs",
+ auto_converge=True, bandwidth=12),
+ Scenario("auto-converge-bw-300mbs",
+ auto_converge=True, bandwidth=37),
+ Scenario("auto-converge-bw-1gbs",
+ auto_converge=True, bandwidth=125),
+ Scenario("auto-converge-bw-10gbs",
+ auto_converge=True, bandwidth=1250),
+ Scenario("auto-converge-bw-100gbs",
+ auto_converge=True, bandwidth=12500),
+ ]),
+
+
+ # Looking at effect of multi-thread compression with
+ # varying numbers of threads
+ Comparison("compr-mt", scenarios = [
+ Scenario("compr-mt-threads-1",
+ compression_mt=True, compression_mt_threads=1),
+ Scenario("compr-mt-threads-2",
+ compression_mt=True, compression_mt_threads=2),
+ Scenario("compr-mt-threads-4",
+ compression_mt=True, compression_mt_threads=4),
+ ]),
+
+
+ # Looking at effect of xbzrle compression with varying
+ # cache sizes
+ Comparison("compr-xbzrle", scenarios = [
+ Scenario("compr-xbzrle-cache-5",
+ compression_xbzrle=True, compression_xbzrle_cache=5),
+ Scenario("compr-xbzrle-cache-10",
+ compression_xbzrle=True, compression_xbzrle_cache=10),
+ Scenario("compr-xbzrle-cache-20",
+ compression_xbzrle=True, compression_xbzrle_cache=10),
+ Scenario("compr-xbzrle-cache-50",
+ compression_xbzrle=True, compression_xbzrle_cache=50),
+ ]),
+
+
+ # Looking at effect of multifd with
+ # varying numbers of channels
+ Comparison("compr-multifd", scenarios = [
+ Scenario("compr-multifd-channels-4",
+ multifd=True, multifd_channels=4),
+ Scenario("compr-multifd-channels-8",
+ multifd=True, multifd_channels=8),
+ Scenario("compr-multifd-channels-32",
+ multifd=True, multifd_channels=32),
+ Scenario("compr-multifd-channels-64",
+ multifd=True, multifd_channels=64),
+ ]),
+
+ # Looking at effect of dirty-limit with
+ # varying x_vcpu_dirty_limit_period
+ Comparison("compr-dirty-limit-period", scenarios = [
+ Scenario("compr-dirty-limit-period-500",
+ dirty_limit=True, x_vcpu_dirty_limit_period=500),
+ Scenario("compr-dirty-limit-period-800",
+ dirty_limit=True, x_vcpu_dirty_limit_period=800),
+ Scenario("compr-dirty-limit-period-1000",
+ dirty_limit=True, x_vcpu_dirty_limit_period=1000),
+ ]),
+
+
+ # Looking at effect of dirty-limit with
+ # varying vcpu_dirty_limit
+ Comparison("compr-dirty-limit", scenarios = [
+ Scenario("compr-dirty-limit-10MB",
+ dirty_limit=True, vcpu_dirty_limit=10),
+ Scenario("compr-dirty-limit-20MB",
+ dirty_limit=True, vcpu_dirty_limit=20),
+ Scenario("compr-dirty-limit-50MB",
+ dirty_limit=True, vcpu_dirty_limit=50),
+ ]),
+
+ # Looking at effect of multifd with
+ # different compression algorithms
+ Comparison("compr-multifd-compression", scenarios = [
+ Scenario("compr-multifd-compression-zlib",
+ multifd=True, multifd_channels=2, multifd_compression="zlib"),
+ Scenario("compr-multifd-compression-zstd",
+ multifd=True, multifd_channels=2, multifd_compression="zstd"),
+ Scenario("compr-multifd-compression-qpl",
+ multifd=True, multifd_channels=2, multifd_compression="qpl"),
+ Scenario("compr-multifd-compression-uadk",
+ multifd=True, multifd_channels=2, multifd_compression="uadk"),
+ ]),
+]
diff --git a/tests/migration-stress/guestperf/engine.py b/tests/migration-stress/guestperf/engine.py
new file mode 100644
index 0000000..d8462db
--- /dev/null
+++ b/tests/migration-stress/guestperf/engine.py
@@ -0,0 +1,536 @@
+#
+# Migration test main engine
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+
+import os
+import re
+import sys
+import time
+
+from guestperf.progress import Progress, ProgressStats
+from guestperf.report import Report, ReportResult
+from guestperf.timings import TimingRecord, Timings
+
+sys.path.append(os.path.join(os.path.dirname(__file__),
+ '..', '..', '..', 'python'))
+from qemu.machine import QEMUMachine
+
+# multifd supported compression algorithms
+MULTIFD_CMP_ALGS = ("zlib", "zstd", "qpl", "uadk")
+
+class Engine(object):
+
+ def __init__(self, binary, dst_host, kernel, initrd, transport="tcp",
+ sleep=15, verbose=False, debug=False):
+
+ self._binary = binary # Path to QEMU binary
+ self._dst_host = dst_host # Hostname of target host
+ self._kernel = kernel # Path to kernel image
+ self._initrd = initrd # Path to stress initrd
+ self._transport = transport # 'unix' or 'tcp' or 'rdma'
+ self._sleep = sleep
+ self._verbose = verbose
+ self._debug = debug
+
+ if debug:
+ self._verbose = debug
+
+ def _vcpu_timing(self, pid, tid_list):
+ records = []
+ now = time.time()
+
+ jiffies_per_sec = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
+ for tid in tid_list:
+ statfile = "/proc/%d/task/%d/stat" % (pid, tid)
+ with open(statfile, "r") as fh:
+ stat = fh.readline()
+ fields = stat.split(" ")
+ stime = int(fields[13])
+ utime = int(fields[14])
+ records.append(TimingRecord(tid, now, 1000 * (stime + utime) / jiffies_per_sec))
+ return records
+
+ def _cpu_timing(self, pid):
+ now = time.time()
+
+ jiffies_per_sec = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
+ statfile = "/proc/%d/stat" % pid
+ with open(statfile, "r") as fh:
+ stat = fh.readline()
+ fields = stat.split(" ")
+ stime = int(fields[13])
+ utime = int(fields[14])
+ return TimingRecord(pid, now, 1000 * (stime + utime) / jiffies_per_sec)
+
+ def _migrate_progress(self, vm):
+ info = vm.cmd("query-migrate")
+
+ if "ram" not in info:
+ info["ram"] = {}
+
+ return Progress(
+ info.get("status", "active"),
+ ProgressStats(
+ info["ram"].get("transferred", 0),
+ info["ram"].get("remaining", 0),
+ info["ram"].get("total", 0),
+ info["ram"].get("duplicate", 0),
+ info["ram"].get("skipped", 0),
+ info["ram"].get("normal", 0),
+ info["ram"].get("normal-bytes", 0),
+ info["ram"].get("dirty-pages-rate", 0),
+ info["ram"].get("mbps", 0),
+ info["ram"].get("dirty-sync-count", 0)
+ ),
+ time.time(),
+ info.get("total-time", 0),
+ info.get("downtime", 0),
+ info.get("expected-downtime", 0),
+ info.get("setup-time", 0),
+ info.get("cpu-throttle-percentage", 0),
+ info.get("dirty-limit-throttle-time-per-round", 0),
+ info.get("dirty-limit-ring-full-time", 0),
+ )
+
+ def _migrate(self, hardware, scenario, src,
+ dst, connect_uri, defer_migrate):
+ src_qemu_time = []
+ src_vcpu_time = []
+ src_pid = src.get_pid()
+
+ vcpus = src.cmd("query-cpus-fast")
+ src_threads = []
+ for vcpu in vcpus:
+ src_threads.append(vcpu["thread-id"])
+
+ # XXX how to get dst timings on remote host ?
+
+ if self._verbose:
+ print("Sleeping %d seconds for initial guest workload run" % self._sleep)
+ sleep_secs = self._sleep
+ while sleep_secs > 1:
+ src_qemu_time.append(self._cpu_timing(src_pid))
+ src_vcpu_time.extend(self._vcpu_timing(src_pid, src_threads))
+ time.sleep(1)
+ sleep_secs -= 1
+
+ if self._verbose:
+ print("Starting migration")
+ if scenario._auto_converge:
+ resp = src.cmd("migrate-set-capabilities",
+ capabilities = [
+ { "capability": "auto-converge",
+ "state": True }
+ ])
+ resp = src.cmd("migrate-set-parameters",
+ cpu_throttle_increment=scenario._auto_converge_step)
+
+ if scenario._post_copy:
+ resp = src.cmd("migrate-set-capabilities",
+ capabilities = [
+ { "capability": "postcopy-ram",
+ "state": True }
+ ])
+ resp = dst.cmd("migrate-set-capabilities",
+ capabilities = [
+ { "capability": "postcopy-ram",
+ "state": True }
+ ])
+
+ resp = src.cmd("migrate-set-parameters",
+ max_bandwidth=scenario._bandwidth * 1024 * 1024)
+
+ resp = src.cmd("migrate-set-parameters",
+ downtime_limit=scenario._downtime)
+
+ if scenario._compression_mt:
+ resp = src.cmd("migrate-set-capabilities",
+ capabilities = [
+ { "capability": "compress",
+ "state": True }
+ ])
+ resp = src.cmd("migrate-set-parameters",
+ compress_threads=scenario._compression_mt_threads)
+ resp = dst.cmd("migrate-set-capabilities",
+ capabilities = [
+ { "capability": "compress",
+ "state": True }
+ ])
+ resp = dst.cmd("migrate-set-parameters",
+ decompress_threads=scenario._compression_mt_threads)
+
+ if scenario._compression_xbzrle:
+ resp = src.cmd("migrate-set-capabilities",
+ capabilities = [
+ { "capability": "xbzrle",
+ "state": True }
+ ])
+ resp = dst.cmd("migrate-set-capabilities",
+ capabilities = [
+ { "capability": "xbzrle",
+ "state": True }
+ ])
+ resp = src.cmd("migrate-set-parameters",
+ xbzrle_cache_size=(
+ hardware._mem *
+ 1024 * 1024 * 1024 / 100 *
+ scenario._compression_xbzrle_cache))
+
+ if scenario._multifd:
+ if (scenario._multifd_compression and
+ (scenario._multifd_compression not in MULTIFD_CMP_ALGS)):
+ raise Exception("unsupported multifd compression "
+ "algorithm: %s" %
+ scenario._multifd_compression)
+
+ resp = src.cmd("migrate-set-capabilities",
+ capabilities = [
+ { "capability": "multifd",
+ "state": True }
+ ])
+ resp = src.cmd("migrate-set-parameters",
+ multifd_channels=scenario._multifd_channels)
+ resp = dst.cmd("migrate-set-capabilities",
+ capabilities = [
+ { "capability": "multifd",
+ "state": True }
+ ])
+ resp = dst.cmd("migrate-set-parameters",
+ multifd_channels=scenario._multifd_channels)
+
+ if scenario._multifd_compression:
+ resp = src.cmd("migrate-set-parameters",
+ multifd_compression=scenario._multifd_compression)
+ resp = dst.cmd("migrate-set-parameters",
+ multifd_compression=scenario._multifd_compression)
+
+ if scenario._dirty_limit:
+ if not hardware._dirty_ring_size:
+ raise Exception("dirty ring size must be configured when "
+ "testing dirty limit migration")
+
+ resp = src.cmd("migrate-set-capabilities",
+ capabilities = [
+ { "capability": "dirty-limit",
+ "state": True }
+ ])
+ resp = src.cmd("migrate-set-parameters",
+ x_vcpu_dirty_limit_period=scenario._x_vcpu_dirty_limit_period)
+ resp = src.cmd("migrate-set-parameters",
+ vcpu_dirty_limit=scenario._vcpu_dirty_limit)
+
+ if defer_migrate:
+ resp = dst.cmd("migrate-incoming", uri=connect_uri)
+ resp = src.cmd("migrate", uri=connect_uri)
+
+ post_copy = False
+ paused = False
+
+ progress_history = []
+
+ start = time.time()
+ loop = 0
+ while True:
+ loop = loop + 1
+ time.sleep(0.05)
+
+ progress = self._migrate_progress(src)
+ if (loop % 20) == 0:
+ src_qemu_time.append(self._cpu_timing(src_pid))
+ src_vcpu_time.extend(self._vcpu_timing(src_pid, src_threads))
+
+ if (len(progress_history) == 0 or
+ (progress_history[-1]._ram._iterations <
+ progress._ram._iterations)):
+ progress_history.append(progress)
+
+ if progress._status in ("completed", "failed", "cancelled"):
+ if progress._status == "completed" and paused:
+ dst.cmd("cont")
+ if progress_history[-1] != progress:
+ progress_history.append(progress)
+
+ if progress._status == "completed":
+ if self._verbose:
+ print("Sleeping %d seconds for final guest workload run" % self._sleep)
+ sleep_secs = self._sleep
+ while sleep_secs > 1:
+ time.sleep(1)
+ src_qemu_time.append(self._cpu_timing(src_pid))
+ src_vcpu_time.extend(self._vcpu_timing(src_pid, src_threads))
+ sleep_secs -= 1
+
+ result = ReportResult()
+ if progress._status == "completed" and not paused:
+ result = ReportResult(True)
+
+ return [progress_history, src_qemu_time, src_vcpu_time, result]
+
+ if self._verbose and (loop % 20) == 0:
+ print("Iter %d: remain %5dMB of %5dMB (total %5dMB @ %5dMb/sec)" % (
+ progress._ram._iterations,
+ progress._ram._remaining_bytes / (1024 * 1024),
+ progress._ram._total_bytes / (1024 * 1024),
+ progress._ram._transferred_bytes / (1024 * 1024),
+ progress._ram._transfer_rate_mbs,
+ ))
+
+ if progress._ram._iterations > scenario._max_iters:
+ if self._verbose:
+ print("No completion after %d iterations over RAM" % scenario._max_iters)
+ src.cmd("migrate_cancel")
+ continue
+
+ if time.time() > (start + scenario._max_time):
+ if self._verbose:
+ print("No completion after %d seconds" % scenario._max_time)
+ src.cmd("migrate_cancel")
+ continue
+
+ if (scenario._post_copy and
+ progress._ram._iterations >= scenario._post_copy_iters and
+ not post_copy):
+ if self._verbose:
+ print("Switching to post-copy after %d iterations" % scenario._post_copy_iters)
+ resp = src.cmd("migrate-start-postcopy")
+ post_copy = True
+
+ if (scenario._pause and
+ progress._ram._iterations >= scenario._pause_iters and
+ not paused):
+ if self._verbose:
+ print("Pausing VM after %d iterations" % scenario._pause_iters)
+ resp = src.cmd("stop")
+ paused = True
+
+ def _is_ppc64le(self):
+ _, _, _, _, machine = os.uname()
+ if machine == "ppc64le":
+ return True
+ return False
+
+ def _get_guest_console_args(self):
+ if self._is_ppc64le():
+ return "console=hvc0"
+ else:
+ return "console=ttyS0"
+
+ def _get_qemu_serial_args(self):
+ if self._is_ppc64le():
+ return ["-chardev", "stdio,id=cdev0",
+ "-device", "spapr-vty,chardev=cdev0"]
+ else:
+ return ["-chardev", "stdio,id=cdev0",
+ "-device", "isa-serial,chardev=cdev0"]
+
+ def _get_common_args(self, hardware, tunnelled=False):
+ args = [
+ "noapic",
+ "edd=off",
+ "printk.time=1",
+ "noreplace-smp",
+ "cgroup_disable=memory",
+ "pci=noearly",
+ ]
+
+ args.append(self._get_guest_console_args())
+
+ if self._debug:
+ args.append("debug")
+ else:
+ args.append("quiet")
+
+ args.append("ramsize=%s" % hardware._mem)
+
+ cmdline = " ".join(args)
+ if tunnelled:
+ cmdline = "'" + cmdline + "'"
+
+ argv = [
+ "-cpu", "host",
+ "-kernel", self._kernel,
+ "-initrd", self._initrd,
+ "-append", cmdline,
+ "-m", str((hardware._mem * 1024) + 512),
+ "-smp", str(hardware._cpus),
+ ]
+ if hardware._dirty_ring_size:
+ argv.extend(["-accel", "kvm,dirty-ring-size=%s" %
+ hardware._dirty_ring_size])
+ else:
+ argv.extend(["-accel", "kvm"])
+
+ argv.extend(self._get_qemu_serial_args())
+
+ if self._debug:
+ argv.extend(["-machine", "graphics=off"])
+
+ if hardware._prealloc_pages:
+ argv_source += ["-mem-path", "/dev/shm",
+ "-mem-prealloc"]
+ if hardware._locked_pages:
+ argv_source += ["-overcommit", "mem-lock=on"]
+ if hardware._huge_pages:
+ pass
+
+ return argv
+
+ def _get_src_args(self, hardware):
+ return self._get_common_args(hardware)
+
+ def _get_dst_args(self, hardware, uri, defer_migrate):
+ tunnelled = False
+ if self._dst_host != "localhost":
+ tunnelled = True
+ argv = self._get_common_args(hardware, tunnelled)
+
+ if defer_migrate:
+ return argv + ["-incoming", "defer"]
+ return argv + ["-incoming", uri]
+
+ @staticmethod
+ def _get_common_wrapper(cpu_bind, mem_bind):
+ wrapper = []
+ if len(cpu_bind) > 0 or len(mem_bind) > 0:
+ wrapper.append("numactl")
+ if cpu_bind:
+ wrapper.append("--physcpubind=%s" % ",".join(cpu_bind))
+ if mem_bind:
+ wrapper.append("--membind=%s" % ",".join(mem_bind))
+
+ return wrapper
+
+ def _get_src_wrapper(self, hardware):
+ return self._get_common_wrapper(hardware._src_cpu_bind, hardware._src_mem_bind)
+
+ def _get_dst_wrapper(self, hardware):
+ wrapper = self._get_common_wrapper(hardware._dst_cpu_bind, hardware._dst_mem_bind)
+ if self._dst_host != "localhost":
+ return ["ssh",
+ "-R", "9001:localhost:9001",
+ self._dst_host] + wrapper
+ else:
+ return wrapper
+
+ def _get_timings(self, vm):
+ log = vm.get_log()
+ if not log:
+ return []
+ if self._debug:
+ print(log)
+
+ regex = r"[^\s]+\s\((\d+)\):\sINFO:\s(\d+)ms\scopied\s\d+\sGB\sin\s(\d+)ms"
+ matcher = re.compile(regex)
+ records = []
+ for line in log.split("\n"):
+ match = matcher.match(line)
+ if match:
+ records.append(TimingRecord(int(match.group(1)),
+ int(match.group(2)) / 1000.0,
+ int(match.group(3))))
+ return records
+
+ def run(self, hardware, scenario, result_dir=os.getcwd()):
+ abs_result_dir = os.path.join(result_dir, scenario._name)
+ defer_migrate = False
+
+ if self._transport == "tcp":
+ uri = "tcp:%s:9000" % self._dst_host
+ elif self._transport == "rdma":
+ uri = "rdma:%s:9000" % self._dst_host
+ elif self._transport == "unix":
+ if self._dst_host != "localhost":
+ raise Exception("Running use unix migration transport for non-local host")
+ uri = "unix:/var/tmp/qemu-migrate-%d.migrate" % os.getpid()
+ try:
+ os.remove(uri[5:])
+ os.remove(monaddr)
+ except:
+ pass
+
+ if scenario._multifd:
+ defer_migrate = True
+
+ if self._dst_host != "localhost":
+ dstmonaddr = ("localhost", 9001)
+ else:
+ dstmonaddr = "/var/tmp/qemu-dst-%d-monitor.sock" % os.getpid()
+ srcmonaddr = "/var/tmp/qemu-src-%d-monitor.sock" % os.getpid()
+
+ src = QEMUMachine(self._binary,
+ args=self._get_src_args(hardware),
+ wrapper=self._get_src_wrapper(hardware),
+ name="qemu-src-%d" % os.getpid(),
+ monitor_address=srcmonaddr)
+
+ dst = QEMUMachine(self._binary,
+ args=self._get_dst_args(hardware, uri, defer_migrate),
+ wrapper=self._get_dst_wrapper(hardware),
+ name="qemu-dst-%d" % os.getpid(),
+ monitor_address=dstmonaddr)
+
+ try:
+ src.launch()
+ dst.launch()
+
+ ret = self._migrate(hardware, scenario, src,
+ dst, uri, defer_migrate)
+ progress_history = ret[0]
+ qemu_timings = ret[1]
+ vcpu_timings = ret[2]
+ result = ret[3]
+ if uri[0:5] == "unix:" and os.path.exists(uri[5:]):
+ os.remove(uri[5:])
+
+ if os.path.exists(srcmonaddr):
+ os.remove(srcmonaddr)
+
+ if self._dst_host == "localhost" and os.path.exists(dstmonaddr):
+ os.remove(dstmonaddr)
+
+ if self._verbose:
+ print("Finished migration")
+
+ src.shutdown()
+ dst.shutdown()
+
+ return Report(hardware, scenario, progress_history,
+ Timings(self._get_timings(src) + self._get_timings(dst)),
+ Timings(qemu_timings),
+ Timings(vcpu_timings),
+ result,
+ self._binary, self._dst_host, self._kernel,
+ self._initrd, self._transport, self._sleep)
+ except Exception as e:
+ if self._debug:
+ print("Failed: %s" % str(e))
+ try:
+ src.shutdown()
+ except:
+ pass
+ try:
+ dst.shutdown()
+ except:
+ pass
+
+ if self._debug:
+ print(src.get_log())
+ print(dst.get_log())
+ raise
+
diff --git a/tests/migration/guestperf/hardware.py b/tests/migration-stress/guestperf/hardware.py
index f779cc0..f779cc0 100644
--- a/tests/migration/guestperf/hardware.py
+++ b/tests/migration-stress/guestperf/hardware.py
diff --git a/tests/migration/guestperf/plot.py b/tests/migration-stress/guestperf/plot.py
index 30b3f66..30b3f66 100644
--- a/tests/migration/guestperf/plot.py
+++ b/tests/migration-stress/guestperf/plot.py
diff --git a/tests/migration/guestperf/progress.py b/tests/migration-stress/guestperf/progress.py
index d490584..d490584 100644
--- a/tests/migration/guestperf/progress.py
+++ b/tests/migration-stress/guestperf/progress.py
diff --git a/tests/migration-stress/guestperf/report.py b/tests/migration-stress/guestperf/report.py
new file mode 100644
index 0000000..e135e01
--- /dev/null
+++ b/tests/migration-stress/guestperf/report.py
@@ -0,0 +1,118 @@
+#
+# Migration test output result reporting
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+import json
+
+from guestperf.hardware import Hardware
+from guestperf.scenario import Scenario
+from guestperf.progress import Progress
+from guestperf.timings import Timings
+
+class ReportResult(object):
+
+ def __init__(self, success=False):
+ self._success = success
+
+ def serialize(self):
+ return {
+ "success": self._success,
+ }
+
+ @classmethod
+ def deserialize(cls, data):
+ return cls(
+ data["success"])
+
+
+class Report(object):
+
+ def __init__(self,
+ hardware,
+ scenario,
+ progress_history,
+ guest_timings,
+ qemu_timings,
+ vcpu_timings,
+ result,
+ binary,
+ dst_host,
+ kernel,
+ initrd,
+ transport,
+ sleep):
+
+ self._hardware = hardware
+ self._scenario = scenario
+ self._progress_history = progress_history
+ self._guest_timings = guest_timings
+ self._qemu_timings = qemu_timings
+ self._vcpu_timings = vcpu_timings
+ self._result = result
+ self._binary = binary
+ self._dst_host = dst_host
+ self._kernel = kernel
+ self._initrd = initrd
+ self._transport = transport
+ self._sleep = sleep
+
+ def serialize(self):
+ return {
+ "hardware": self._hardware.serialize(),
+ "scenario": self._scenario.serialize(),
+ "progress_history": [progress.serialize() for progress in self._progress_history],
+ "guest_timings": self._guest_timings.serialize(),
+ "qemu_timings": self._qemu_timings.serialize(),
+ "vcpu_timings": self._vcpu_timings.serialize(),
+ "result": self._result.serialize(),
+ "binary": self._binary,
+ "dst_host": self._dst_host,
+ "kernel": self._kernel,
+ "initrd": self._initrd,
+ "transport": self._transport,
+ "sleep": self._sleep,
+ }
+
+ @classmethod
+ def deserialize(cls, data):
+ return cls(
+ Hardware.deserialize(data["hardware"]),
+ Scenario.deserialize(data["scenario"]),
+ [Progress.deserialize(record) for record in data["progress_history"]],
+ Timings.deserialize(data["guest_timings"]),
+ Timings.deserialize(data["qemu_timings"]),
+ Timings.deserialize(data["vcpu_timings"]),
+ ReportResult.deserialize(data["result"]),
+ data["binary"],
+ data["dst_host"],
+ data["kernel"],
+ data["initrd"],
+ data["transport"],
+ data["sleep"])
+
+ def to_json(self):
+ return json.dumps(self.serialize(), indent=4)
+
+ @classmethod
+ def from_json(cls, data):
+ return cls.deserialize(json.loads(data))
+
+ @classmethod
+ def from_json_file(cls, filename):
+ with open(filename, "r") as fh:
+ return cls.deserialize(json.load(fh))
diff --git a/tests/migration-stress/guestperf/scenario.py b/tests/migration-stress/guestperf/scenario.py
new file mode 100644
index 0000000..4be7faf
--- /dev/null
+++ b/tests/migration-stress/guestperf/scenario.py
@@ -0,0 +1,115 @@
+#
+# Migration test scenario parameter description
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+
+class Scenario(object):
+
+ def __init__(self, name,
+ downtime=500,
+ bandwidth=125000, # 1000 gig-e, effectively unlimited
+ max_iters=30,
+ max_time=300,
+ pause=False, pause_iters=5,
+ post_copy=False, post_copy_iters=5,
+ auto_converge=False, auto_converge_step=10,
+ compression_mt=False, compression_mt_threads=1,
+ compression_xbzrle=False, compression_xbzrle_cache=10,
+ multifd=False, multifd_channels=2, multifd_compression="",
+ dirty_limit=False, x_vcpu_dirty_limit_period=500,
+ vcpu_dirty_limit=1):
+
+ self._name = name
+
+ # General migration tunables
+ self._downtime = downtime # milliseconds
+ self._bandwidth = bandwidth # MiB per second
+ self._max_iters = max_iters
+ self._max_time = max_time # seconds
+
+
+ # Strategies for ensuring completion
+ self._pause = pause
+ self._pause_iters = pause_iters
+
+ self._post_copy = post_copy
+ self._post_copy_iters = post_copy_iters
+
+ self._auto_converge = auto_converge
+ self._auto_converge_step = auto_converge_step # percentage CPU time
+
+ self._compression_mt = compression_mt
+ self._compression_mt_threads = compression_mt_threads
+
+ self._compression_xbzrle = compression_xbzrle
+ self._compression_xbzrle_cache = compression_xbzrle_cache # percentage of guest RAM
+
+ self._multifd = multifd
+ self._multifd_channels = multifd_channels
+ self._multifd_compression = multifd_compression
+
+ self._dirty_limit = dirty_limit
+ self._x_vcpu_dirty_limit_period = x_vcpu_dirty_limit_period
+ self._vcpu_dirty_limit = vcpu_dirty_limit
+
+ def serialize(self):
+ return {
+ "name": self._name,
+ "downtime": self._downtime,
+ "bandwidth": self._bandwidth,
+ "max_iters": self._max_iters,
+ "max_time": self._max_time,
+ "pause": self._pause,
+ "pause_iters": self._pause_iters,
+ "post_copy": self._post_copy,
+ "post_copy_iters": self._post_copy_iters,
+ "auto_converge": self._auto_converge,
+ "auto_converge_step": self._auto_converge_step,
+ "compression_mt": self._compression_mt,
+ "compression_mt_threads": self._compression_mt_threads,
+ "compression_xbzrle": self._compression_xbzrle,
+ "compression_xbzrle_cache": self._compression_xbzrle_cache,
+ "multifd": self._multifd,
+ "multifd_channels": self._multifd_channels,
+ "multifd_compression": self._multifd_compression,
+ "dirty_limit": self._dirty_limit,
+ "x_vcpu_dirty_limit_period": self._x_vcpu_dirty_limit_period,
+ "vcpu_dirty_limit": self._vcpu_dirty_limit,
+ }
+
+ @classmethod
+ def deserialize(cls, data):
+ return cls(
+ data["name"],
+ data["downtime"],
+ data["bandwidth"],
+ data["max_iters"],
+ data["max_time"],
+ data["pause"],
+ data["pause_iters"],
+ data["post_copy"],
+ data["post_copy_iters"],
+ data["auto_converge"],
+ data["auto_converge_step"],
+ data["compression_mt"],
+ data["compression_mt_threads"],
+ data["compression_xbzrle"],
+ data["compression_xbzrle_cache"],
+ data["multifd"],
+ data["multifd_channels"],
+ data["multifd_compression"])
diff --git a/tests/migration-stress/guestperf/shell.py b/tests/migration-stress/guestperf/shell.py
new file mode 100644
index 0000000..63bbe32
--- /dev/null
+++ b/tests/migration-stress/guestperf/shell.py
@@ -0,0 +1,300 @@
+#
+# Migration test command line shell integration
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+
+import argparse
+import fnmatch
+import os
+import os.path
+import platform
+import sys
+import logging
+
+from guestperf.hardware import Hardware
+from guestperf.engine import Engine
+from guestperf.scenario import Scenario
+from guestperf.comparison import COMPARISONS
+from guestperf.plot import Plot
+from guestperf.report import Report
+
+
+class BaseShell(object):
+
+ def __init__(self):
+ parser = argparse.ArgumentParser(description="Migration Test Tool")
+
+ # Test args
+ parser.add_argument("--debug", dest="debug", default=False, action="store_true")
+ parser.add_argument("--verbose", dest="verbose", default=False, action="store_true")
+ parser.add_argument("--sleep", dest="sleep", default=15, type=int)
+ parser.add_argument("--binary", dest="binary", default="/usr/bin/qemu-system-x86_64")
+ parser.add_argument("--dst-host", dest="dst_host", default="localhost")
+ parser.add_argument("--kernel", dest="kernel", default="/boot/vmlinuz-%s" % platform.release())
+ parser.add_argument("--initrd", dest="initrd",
+ default="tests/migration-stress/initrd-stress.img")
+ parser.add_argument("--transport", dest="transport", default="unix")
+
+
+ # Hardware args
+ parser.add_argument("--cpus", dest="cpus", default=1, type=int)
+ parser.add_argument("--mem", dest="mem", default=1, type=int)
+ parser.add_argument("--src-cpu-bind", dest="src_cpu_bind", default="")
+ parser.add_argument("--src-mem-bind", dest="src_mem_bind", default="")
+ parser.add_argument("--dst-cpu-bind", dest="dst_cpu_bind", default="")
+ parser.add_argument("--dst-mem-bind", dest="dst_mem_bind", default="")
+ parser.add_argument("--prealloc-pages", dest="prealloc_pages", default=False)
+ parser.add_argument("--huge-pages", dest="huge_pages", default=False)
+ parser.add_argument("--locked-pages", dest="locked_pages", default=False)
+ parser.add_argument("--dirty-ring-size", dest="dirty_ring_size",
+ default=0, type=int)
+
+ self._parser = parser
+
+ def get_engine(self, args):
+ return Engine(binary=args.binary,
+ dst_host=args.dst_host,
+ kernel=args.kernel,
+ initrd=args.initrd,
+ transport=args.transport,
+ sleep=args.sleep,
+ debug=args.debug,
+ verbose=args.verbose)
+
+ def get_hardware(self, args):
+ def split_map(value):
+ if value == "":
+ return []
+ return value.split(",")
+
+ return Hardware(cpus=args.cpus,
+ mem=args.mem,
+
+ src_cpu_bind=split_map(args.src_cpu_bind),
+ src_mem_bind=split_map(args.src_mem_bind),
+ dst_cpu_bind=split_map(args.dst_cpu_bind),
+ dst_mem_bind=split_map(args.dst_mem_bind),
+
+ locked_pages=args.locked_pages,
+ huge_pages=args.huge_pages,
+ prealloc_pages=args.prealloc_pages,
+
+ dirty_ring_size=args.dirty_ring_size)
+
+
+class Shell(BaseShell):
+
+ def __init__(self):
+ super(Shell, self).__init__()
+
+ parser = self._parser
+
+ parser.add_argument("--output", dest="output", default=None)
+
+ # Scenario args
+ parser.add_argument("--max-iters", dest="max_iters", default=30, type=int)
+ parser.add_argument("--max-time", dest="max_time", default=300, type=int)
+ parser.add_argument("--bandwidth", dest="bandwidth", default=125000, type=int)
+ parser.add_argument("--downtime", dest="downtime", default=500, type=int)
+
+ parser.add_argument("--pause", dest="pause", default=False, action="store_true")
+ parser.add_argument("--pause-iters", dest="pause_iters", default=5, type=int)
+
+ parser.add_argument("--post-copy", dest="post_copy", default=False, action="store_true")
+ parser.add_argument("--post-copy-iters", dest="post_copy_iters", default=5, type=int)
+
+ parser.add_argument("--auto-converge", dest="auto_converge", default=False, action="store_true")
+ parser.add_argument("--auto-converge-step", dest="auto_converge_step", default=10, type=int)
+
+ parser.add_argument("--compression-mt", dest="compression_mt", default=False, action="store_true")
+ parser.add_argument("--compression-mt-threads", dest="compression_mt_threads", default=1, type=int)
+
+ parser.add_argument("--compression-xbzrle", dest="compression_xbzrle", default=False, action="store_true")
+ parser.add_argument("--compression-xbzrle-cache", dest="compression_xbzrle_cache", default=10, type=int)
+
+ parser.add_argument("--multifd", dest="multifd", default=False,
+ action="store_true")
+ parser.add_argument("--multifd-channels", dest="multifd_channels",
+ default=2, type=int)
+ parser.add_argument("--multifd-compression", dest="multifd_compression",
+ default="")
+
+ parser.add_argument("--dirty-limit", dest="dirty_limit", default=False,
+ action="store_true")
+
+ parser.add_argument("--x-vcpu-dirty-limit-period",
+ dest="x_vcpu_dirty_limit_period",
+ default=500, type=int)
+
+ parser.add_argument("--vcpu-dirty-limit",
+ dest="vcpu_dirty_limit",
+ default=1, type=int)
+
+ def get_scenario(self, args):
+ return Scenario(name="perfreport",
+ downtime=args.downtime,
+ bandwidth=args.bandwidth,
+ max_iters=args.max_iters,
+ max_time=args.max_time,
+
+ pause=args.pause,
+ pause_iters=args.pause_iters,
+
+ post_copy=args.post_copy,
+ post_copy_iters=args.post_copy_iters,
+
+ auto_converge=args.auto_converge,
+ auto_converge_step=args.auto_converge_step,
+
+ compression_mt=args.compression_mt,
+ compression_mt_threads=args.compression_mt_threads,
+
+ compression_xbzrle=args.compression_xbzrle,
+ compression_xbzrle_cache=args.compression_xbzrle_cache,
+
+ multifd=args.multifd,
+ multifd_channels=args.multifd_channels,
+ multifd_compression=args.multifd_compression,
+
+ dirty_limit=args.dirty_limit,
+ x_vcpu_dirty_limit_period=\
+ args.x_vcpu_dirty_limit_period,
+ vcpu_dirty_limit=args.vcpu_dirty_limit)
+
+ def run(self, argv):
+ args = self._parser.parse_args(argv)
+ logging.basicConfig(level=(logging.DEBUG if args.debug else
+ logging.INFO if args.verbose else
+ logging.WARN))
+
+
+ engine = self.get_engine(args)
+ hardware = self.get_hardware(args)
+ scenario = self.get_scenario(args)
+
+ try:
+ report = engine.run(hardware, scenario)
+ if args.output is None:
+ print(report.to_json())
+ else:
+ with open(args.output, "w") as fh:
+ print(report.to_json(), file=fh)
+ return 0
+ except Exception as e:
+ print("Error: %s" % str(e), file=sys.stderr)
+ if args.debug:
+ raise
+ return 1
+
+
+class BatchShell(BaseShell):
+
+ def __init__(self):
+ super(BatchShell, self).__init__()
+
+ parser = self._parser
+
+ parser.add_argument("--filter", dest="filter", default="*")
+ parser.add_argument("--output", dest="output", default=os.getcwd())
+
+ def run(self, argv):
+ args = self._parser.parse_args(argv)
+ logging.basicConfig(level=(logging.DEBUG if args.debug else
+ logging.INFO if args.verbose else
+ logging.WARN))
+
+
+ engine = self.get_engine(args)
+ hardware = self.get_hardware(args)
+
+ try:
+ for comparison in COMPARISONS:
+ compdir = os.path.join(args.output, comparison._name)
+ for scenario in comparison._scenarios:
+ name = os.path.join(comparison._name, scenario._name)
+ if not fnmatch.fnmatch(name, args.filter):
+ if args.verbose:
+ print("Skipping %s" % name)
+ continue
+
+ if args.verbose:
+ print("Running %s" % name)
+
+ dirname = os.path.join(args.output, comparison._name)
+ filename = os.path.join(dirname, scenario._name + ".json")
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ report = engine.run(hardware, scenario)
+ with open(filename, "w") as fh:
+ print(report.to_json(), file=fh)
+ except Exception as e:
+ print("Error: %s" % str(e), file=sys.stderr)
+ if args.debug:
+ raise
+
+
+class PlotShell(object):
+
+ def __init__(self):
+ super(PlotShell, self).__init__()
+
+ self._parser = argparse.ArgumentParser(description="Migration Test Tool")
+
+ self._parser.add_argument("--output", dest="output", default=None)
+
+ self._parser.add_argument("--debug", dest="debug", default=False, action="store_true")
+ self._parser.add_argument("--verbose", dest="verbose", default=False, action="store_true")
+
+ self._parser.add_argument("--migration-iters", dest="migration_iters", default=False, action="store_true")
+ self._parser.add_argument("--total-guest-cpu", dest="total_guest_cpu", default=False, action="store_true")
+ self._parser.add_argument("--split-guest-cpu", dest="split_guest_cpu", default=False, action="store_true")
+ self._parser.add_argument("--qemu-cpu", dest="qemu_cpu", default=False, action="store_true")
+ self._parser.add_argument("--vcpu-cpu", dest="vcpu_cpu", default=False, action="store_true")
+
+ self._parser.add_argument("reports", nargs='*')
+
+ def run(self, argv):
+ args = self._parser.parse_args(argv)
+ logging.basicConfig(level=(logging.DEBUG if args.debug else
+ logging.INFO if args.verbose else
+ logging.WARN))
+
+
+ if len(args.reports) == 0:
+ print("At least one report required", file=sys.stderr)
+ return 1
+
+ if not (args.qemu_cpu or
+ args.vcpu_cpu or
+ args.total_guest_cpu or
+ args.split_guest_cpu):
+ print("At least one chart type is required", file=sys.stderr)
+ return 1
+
+ reports = []
+ for report in args.reports:
+ reports.append(Report.from_json_file(report))
+
+ plot = Plot(reports,
+ args.migration_iters,
+ args.total_guest_cpu,
+ args.split_guest_cpu,
+ args.qemu_cpu,
+ args.vcpu_cpu)
+
+ plot.generate(args.output)
diff --git a/tests/migration/guestperf/timings.py b/tests/migration-stress/guestperf/timings.py
index 2374010..2374010 100644
--- a/tests/migration/guestperf/timings.py
+++ b/tests/migration-stress/guestperf/timings.py
diff --git a/tests/migration/initrd-stress.sh b/tests/migration-stress/initrd-stress.sh
index 0f20ac2..0f20ac2 100755
--- a/tests/migration/initrd-stress.sh
+++ b/tests/migration-stress/initrd-stress.sh
diff --git a/tests/migration/meson.build b/tests/migration-stress/meson.build
index a91aa61..a91aa61 100644
--- a/tests/migration/meson.build
+++ b/tests/migration-stress/meson.build
diff --git a/tests/migration/stress.c b/tests/migration-stress/stress.c
index 88acf8d..88acf8d 100644
--- a/tests/migration/stress.c
+++ b/tests/migration-stress/stress.c
diff --git a/tests/migration/guestperf/comparison.py b/tests/migration/guestperf/comparison.py
deleted file mode 100644
index 42cc037..0000000
--- a/tests/migration/guestperf/comparison.py
+++ /dev/null
@@ -1,161 +0,0 @@
-#
-# Migration test scenario comparison mapping
-#
-# Copyright (c) 2016 Red Hat, Inc.
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, see <http://www.gnu.org/licenses/>.
-#
-
-from guestperf.scenario import Scenario
-
-class Comparison(object):
- def __init__(self, name, scenarios):
- self._name = name
- self._scenarios = scenarios
-
-COMPARISONS = [
- # Looking at effect of pausing guest during migration
- # at various stages of iteration over RAM
- Comparison("pause-iters", scenarios = [
- Scenario("pause-iters-0",
- pause=True, pause_iters=0),
- Scenario("pause-iters-1",
- pause=True, pause_iters=1),
- Scenario("pause-iters-5",
- pause=True, pause_iters=5),
- Scenario("pause-iters-20",
- pause=True, pause_iters=20),
- ]),
-
-
- # Looking at use of post-copy in relation to bandwidth
- # available for migration
- Comparison("post-copy-bandwidth", scenarios = [
- Scenario("post-copy-bw-100mbs",
- post_copy=True, bandwidth=12),
- Scenario("post-copy-bw-300mbs",
- post_copy=True, bandwidth=37),
- Scenario("post-copy-bw-1gbs",
- post_copy=True, bandwidth=125),
- Scenario("post-copy-bw-10gbs",
- post_copy=True, bandwidth=1250),
- Scenario("post-copy-bw-100gbs",
- post_copy=True, bandwidth=12500),
- ]),
-
-
- # Looking at effect of starting post-copy at different
- # stages of the migration
- Comparison("post-copy-iters", scenarios = [
- Scenario("post-copy-iters-0",
- post_copy=True, post_copy_iters=0),
- Scenario("post-copy-iters-1",
- post_copy=True, post_copy_iters=1),
- Scenario("post-copy-iters-5",
- post_copy=True, post_copy_iters=5),
- Scenario("post-copy-iters-20",
- post_copy=True, post_copy_iters=20),
- ]),
-
-
- # Looking at effect of auto-converge with different
- # throttling percentage step rates
- Comparison("auto-converge-iters", scenarios = [
- Scenario("auto-converge-step-5",
- auto_converge=True, auto_converge_step=5),
- Scenario("auto-converge-step-10",
- auto_converge=True, auto_converge_step=10),
- Scenario("auto-converge-step-20",
- auto_converge=True, auto_converge_step=20),
- ]),
-
-
- # Looking at use of auto-converge in relation to bandwidth
- # available for migration
- Comparison("auto-converge-bandwidth", scenarios = [
- Scenario("auto-converge-bw-100mbs",
- auto_converge=True, bandwidth=12),
- Scenario("auto-converge-bw-300mbs",
- auto_converge=True, bandwidth=37),
- Scenario("auto-converge-bw-1gbs",
- auto_converge=True, bandwidth=125),
- Scenario("auto-converge-bw-10gbs",
- auto_converge=True, bandwidth=1250),
- Scenario("auto-converge-bw-100gbs",
- auto_converge=True, bandwidth=12500),
- ]),
-
-
- # Looking at effect of multi-thread compression with
- # varying numbers of threads
- Comparison("compr-mt", scenarios = [
- Scenario("compr-mt-threads-1",
- compression_mt=True, compression_mt_threads=1),
- Scenario("compr-mt-threads-2",
- compression_mt=True, compression_mt_threads=2),
- Scenario("compr-mt-threads-4",
- compression_mt=True, compression_mt_threads=4),
- ]),
-
-
- # Looking at effect of xbzrle compression with varying
- # cache sizes
- Comparison("compr-xbzrle", scenarios = [
- Scenario("compr-xbzrle-cache-5",
- compression_xbzrle=True, compression_xbzrle_cache=5),
- Scenario("compr-xbzrle-cache-10",
- compression_xbzrle=True, compression_xbzrle_cache=10),
- Scenario("compr-xbzrle-cache-20",
- compression_xbzrle=True, compression_xbzrle_cache=10),
- Scenario("compr-xbzrle-cache-50",
- compression_xbzrle=True, compression_xbzrle_cache=50),
- ]),
-
-
- # Looking at effect of multifd with
- # varying numbers of channels
- Comparison("compr-multifd", scenarios = [
- Scenario("compr-multifd-channels-4",
- multifd=True, multifd_channels=2),
- Scenario("compr-multifd-channels-8",
- multifd=True, multifd_channels=8),
- Scenario("compr-multifd-channels-32",
- multifd=True, multifd_channels=32),
- Scenario("compr-multifd-channels-64",
- multifd=True, multifd_channels=64),
- ]),
-
- # Looking at effect of dirty-limit with
- # varying x_vcpu_dirty_limit_period
- Comparison("compr-dirty-limit-period", scenarios = [
- Scenario("compr-dirty-limit-period-500",
- dirty_limit=True, x_vcpu_dirty_limit_period=500),
- Scenario("compr-dirty-limit-period-800",
- dirty_limit=True, x_vcpu_dirty_limit_period=800),
- Scenario("compr-dirty-limit-period-1000",
- dirty_limit=True, x_vcpu_dirty_limit_period=1000),
- ]),
-
-
- # Looking at effect of dirty-limit with
- # varying vcpu_dirty_limit
- Comparison("compr-dirty-limit", scenarios = [
- Scenario("compr-dirty-limit-10MB",
- dirty_limit=True, vcpu_dirty_limit=10),
- Scenario("compr-dirty-limit-20MB",
- dirty_limit=True, vcpu_dirty_limit=20),
- Scenario("compr-dirty-limit-50MB",
- dirty_limit=True, vcpu_dirty_limit=50),
- ]),
-]
diff --git a/tests/migration/guestperf/engine.py b/tests/migration/guestperf/engine.py
deleted file mode 100644
index 608d727..0000000
--- a/tests/migration/guestperf/engine.py
+++ /dev/null
@@ -1,505 +0,0 @@
-#
-# Migration test main engine
-#
-# Copyright (c) 2016 Red Hat, Inc.
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, see <http://www.gnu.org/licenses/>.
-#
-
-
-import os
-import re
-import sys
-import time
-
-from guestperf.progress import Progress, ProgressStats
-from guestperf.report import Report
-from guestperf.timings import TimingRecord, Timings
-
-sys.path.append(os.path.join(os.path.dirname(__file__),
- '..', '..', '..', 'python'))
-from qemu.machine import QEMUMachine
-
-
-class Engine(object):
-
- def __init__(self, binary, dst_host, kernel, initrd, transport="tcp",
- sleep=15, verbose=False, debug=False):
-
- self._binary = binary # Path to QEMU binary
- self._dst_host = dst_host # Hostname of target host
- self._kernel = kernel # Path to kernel image
- self._initrd = initrd # Path to stress initrd
- self._transport = transport # 'unix' or 'tcp' or 'rdma'
- self._sleep = sleep
- self._verbose = verbose
- self._debug = debug
-
- if debug:
- self._verbose = debug
-
- def _vcpu_timing(self, pid, tid_list):
- records = []
- now = time.time()
-
- jiffies_per_sec = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
- for tid in tid_list:
- statfile = "/proc/%d/task/%d/stat" % (pid, tid)
- with open(statfile, "r") as fh:
- stat = fh.readline()
- fields = stat.split(" ")
- stime = int(fields[13])
- utime = int(fields[14])
- records.append(TimingRecord(tid, now, 1000 * (stime + utime) / jiffies_per_sec))
- return records
-
- def _cpu_timing(self, pid):
- now = time.time()
-
- jiffies_per_sec = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
- statfile = "/proc/%d/stat" % pid
- with open(statfile, "r") as fh:
- stat = fh.readline()
- fields = stat.split(" ")
- stime = int(fields[13])
- utime = int(fields[14])
- return TimingRecord(pid, now, 1000 * (stime + utime) / jiffies_per_sec)
-
- def _migrate_progress(self, vm):
- info = vm.cmd("query-migrate")
-
- if "ram" not in info:
- info["ram"] = {}
-
- return Progress(
- info.get("status", "active"),
- ProgressStats(
- info["ram"].get("transferred", 0),
- info["ram"].get("remaining", 0),
- info["ram"].get("total", 0),
- info["ram"].get("duplicate", 0),
- info["ram"].get("skipped", 0),
- info["ram"].get("normal", 0),
- info["ram"].get("normal-bytes", 0),
- info["ram"].get("dirty-pages-rate", 0),
- info["ram"].get("mbps", 0),
- info["ram"].get("dirty-sync-count", 0)
- ),
- time.time(),
- info.get("total-time", 0),
- info.get("downtime", 0),
- info.get("expected-downtime", 0),
- info.get("setup-time", 0),
- info.get("cpu-throttle-percentage", 0),
- info.get("dirty-limit-throttle-time-per-round", 0),
- info.get("dirty-limit-ring-full-time", 0),
- )
-
- def _migrate(self, hardware, scenario, src, dst, connect_uri):
- src_qemu_time = []
- src_vcpu_time = []
- src_pid = src.get_pid()
-
- vcpus = src.cmd("query-cpus-fast")
- src_threads = []
- for vcpu in vcpus:
- src_threads.append(vcpu["thread-id"])
-
- # XXX how to get dst timings on remote host ?
-
- if self._verbose:
- print("Sleeping %d seconds for initial guest workload run" % self._sleep)
- sleep_secs = self._sleep
- while sleep_secs > 1:
- src_qemu_time.append(self._cpu_timing(src_pid))
- src_vcpu_time.extend(self._vcpu_timing(src_pid, src_threads))
- time.sleep(1)
- sleep_secs -= 1
-
- if self._verbose:
- print("Starting migration")
- if scenario._auto_converge:
- resp = src.cmd("migrate-set-capabilities",
- capabilities = [
- { "capability": "auto-converge",
- "state": True }
- ])
- resp = src.cmd("migrate-set-parameters",
- cpu_throttle_increment=scenario._auto_converge_step)
-
- if scenario._post_copy:
- resp = src.cmd("migrate-set-capabilities",
- capabilities = [
- { "capability": "postcopy-ram",
- "state": True }
- ])
- resp = dst.cmd("migrate-set-capabilities",
- capabilities = [
- { "capability": "postcopy-ram",
- "state": True }
- ])
-
- resp = src.cmd("migrate-set-parameters",
- max_bandwidth=scenario._bandwidth * 1024 * 1024)
-
- resp = src.cmd("migrate-set-parameters",
- downtime_limit=scenario._downtime)
-
- if scenario._compression_mt:
- resp = src.cmd("migrate-set-capabilities",
- capabilities = [
- { "capability": "compress",
- "state": True }
- ])
- resp = src.cmd("migrate-set-parameters",
- compress_threads=scenario._compression_mt_threads)
- resp = dst.cmd("migrate-set-capabilities",
- capabilities = [
- { "capability": "compress",
- "state": True }
- ])
- resp = dst.cmd("migrate-set-parameters",
- decompress_threads=scenario._compression_mt_threads)
-
- if scenario._compression_xbzrle:
- resp = src.cmd("migrate-set-capabilities",
- capabilities = [
- { "capability": "xbzrle",
- "state": True }
- ])
- resp = dst.cmd("migrate-set-capabilities",
- capabilities = [
- { "capability": "xbzrle",
- "state": True }
- ])
- resp = src.cmd("migrate-set-parameters",
- xbzrle_cache_size=(
- hardware._mem *
- 1024 * 1024 * 1024 / 100 *
- scenario._compression_xbzrle_cache))
-
- if scenario._multifd:
- resp = src.cmd("migrate-set-capabilities",
- capabilities = [
- { "capability": "multifd",
- "state": True }
- ])
- resp = src.cmd("migrate-set-parameters",
- multifd_channels=scenario._multifd_channels)
- resp = dst.cmd("migrate-set-capabilities",
- capabilities = [
- { "capability": "multifd",
- "state": True }
- ])
- resp = dst.cmd("migrate-set-parameters",
- multifd_channels=scenario._multifd_channels)
-
- if scenario._dirty_limit:
- if not hardware._dirty_ring_size:
- raise Exception("dirty ring size must be configured when "
- "testing dirty limit migration")
-
- resp = src.cmd("migrate-set-capabilities",
- capabilities = [
- { "capability": "dirty-limit",
- "state": True }
- ])
- resp = src.cmd("migrate-set-parameters",
- x_vcpu_dirty_limit_period=scenario._x_vcpu_dirty_limit_period)
- resp = src.cmd("migrate-set-parameters",
- vcpu_dirty_limit=scenario._vcpu_dirty_limit)
-
- resp = src.cmd("migrate", uri=connect_uri)
-
- post_copy = False
- paused = False
-
- progress_history = []
-
- start = time.time()
- loop = 0
- while True:
- loop = loop + 1
- time.sleep(0.05)
-
- progress = self._migrate_progress(src)
- if (loop % 20) == 0:
- src_qemu_time.append(self._cpu_timing(src_pid))
- src_vcpu_time.extend(self._vcpu_timing(src_pid, src_threads))
-
- if (len(progress_history) == 0 or
- (progress_history[-1]._ram._iterations <
- progress._ram._iterations)):
- progress_history.append(progress)
-
- if progress._status in ("completed", "failed", "cancelled"):
- if progress._status == "completed" and paused:
- dst.cmd("cont")
- if progress_history[-1] != progress:
- progress_history.append(progress)
-
- if progress._status == "completed":
- if self._verbose:
- print("Sleeping %d seconds for final guest workload run" % self._sleep)
- sleep_secs = self._sleep
- while sleep_secs > 1:
- time.sleep(1)
- src_qemu_time.append(self._cpu_timing(src_pid))
- src_vcpu_time.extend(self._vcpu_timing(src_pid, src_threads))
- sleep_secs -= 1
-
- return [progress_history, src_qemu_time, src_vcpu_time]
-
- if self._verbose and (loop % 20) == 0:
- print("Iter %d: remain %5dMB of %5dMB (total %5dMB @ %5dMb/sec)" % (
- progress._ram._iterations,
- progress._ram._remaining_bytes / (1024 * 1024),
- progress._ram._total_bytes / (1024 * 1024),
- progress._ram._transferred_bytes / (1024 * 1024),
- progress._ram._transfer_rate_mbs,
- ))
-
- if progress._ram._iterations > scenario._max_iters:
- if self._verbose:
- print("No completion after %d iterations over RAM" % scenario._max_iters)
- src.cmd("migrate_cancel")
- continue
-
- if time.time() > (start + scenario._max_time):
- if self._verbose:
- print("No completion after %d seconds" % scenario._max_time)
- src.cmd("migrate_cancel")
- continue
-
- if (scenario._post_copy and
- progress._ram._iterations >= scenario._post_copy_iters and
- not post_copy):
- if self._verbose:
- print("Switching to post-copy after %d iterations" % scenario._post_copy_iters)
- resp = src.cmd("migrate-start-postcopy")
- post_copy = True
-
- if (scenario._pause and
- progress._ram._iterations >= scenario._pause_iters and
- not paused):
- if self._verbose:
- print("Pausing VM after %d iterations" % scenario._pause_iters)
- resp = src.cmd("stop")
- paused = True
-
- def _is_ppc64le(self):
- _, _, _, _, machine = os.uname()
- if machine == "ppc64le":
- return True
- return False
-
- def _get_guest_console_args(self):
- if self._is_ppc64le():
- return "console=hvc0"
- else:
- return "console=ttyS0"
-
- def _get_qemu_serial_args(self):
- if self._is_ppc64le():
- return ["-chardev", "stdio,id=cdev0",
- "-device", "spapr-vty,chardev=cdev0"]
- else:
- return ["-chardev", "stdio,id=cdev0",
- "-device", "isa-serial,chardev=cdev0"]
-
- def _get_common_args(self, hardware, tunnelled=False):
- args = [
- "noapic",
- "edd=off",
- "printk.time=1",
- "noreplace-smp",
- "cgroup_disable=memory",
- "pci=noearly",
- ]
-
- args.append(self._get_guest_console_args())
-
- if self._debug:
- args.append("debug")
- else:
- args.append("quiet")
-
- args.append("ramsize=%s" % hardware._mem)
-
- cmdline = " ".join(args)
- if tunnelled:
- cmdline = "'" + cmdline + "'"
-
- argv = [
- "-cpu", "host",
- "-kernel", self._kernel,
- "-initrd", self._initrd,
- "-append", cmdline,
- "-m", str((hardware._mem * 1024) + 512),
- "-smp", str(hardware._cpus),
- ]
- if hardware._dirty_ring_size:
- argv.extend(["-accel", "kvm,dirty-ring-size=%s" %
- hardware._dirty_ring_size])
- else:
- argv.extend(["-accel", "kvm"])
-
- argv.extend(self._get_qemu_serial_args())
-
- if self._debug:
- argv.extend(["-machine", "graphics=off"])
-
- if hardware._prealloc_pages:
- argv_source += ["-mem-path", "/dev/shm",
- "-mem-prealloc"]
- if hardware._locked_pages:
- argv_source += ["-overcommit", "mem-lock=on"]
- if hardware._huge_pages:
- pass
-
- return argv
-
- def _get_src_args(self, hardware):
- return self._get_common_args(hardware)
-
- def _get_dst_args(self, hardware, uri):
- tunnelled = False
- if self._dst_host != "localhost":
- tunnelled = True
- argv = self._get_common_args(hardware, tunnelled)
- return argv + ["-incoming", uri]
-
- @staticmethod
- def _get_common_wrapper(cpu_bind, mem_bind):
- wrapper = []
- if len(cpu_bind) > 0 or len(mem_bind) > 0:
- wrapper.append("numactl")
- if cpu_bind:
- wrapper.append("--physcpubind=%s" % ",".join(cpu_bind))
- if mem_bind:
- wrapper.append("--membind=%s" % ",".join(mem_bind))
-
- return wrapper
-
- def _get_src_wrapper(self, hardware):
- return self._get_common_wrapper(hardware._src_cpu_bind, hardware._src_mem_bind)
-
- def _get_dst_wrapper(self, hardware):
- wrapper = self._get_common_wrapper(hardware._dst_cpu_bind, hardware._dst_mem_bind)
- if self._dst_host != "localhost":
- return ["ssh",
- "-R", "9001:localhost:9001",
- self._dst_host] + wrapper
- else:
- return wrapper
-
- def _get_timings(self, vm):
- log = vm.get_log()
- if not log:
- return []
- if self._debug:
- print(log)
-
- regex = r"[^\s]+\s\((\d+)\):\sINFO:\s(\d+)ms\scopied\s\d+\sGB\sin\s(\d+)ms"
- matcher = re.compile(regex)
- records = []
- for line in log.split("\n"):
- match = matcher.match(line)
- if match:
- records.append(TimingRecord(int(match.group(1)),
- int(match.group(2)) / 1000.0,
- int(match.group(3))))
- return records
-
- def run(self, hardware, scenario, result_dir=os.getcwd()):
- abs_result_dir = os.path.join(result_dir, scenario._name)
-
- if self._transport == "tcp":
- uri = "tcp:%s:9000" % self._dst_host
- elif self._transport == "rdma":
- uri = "rdma:%s:9000" % self._dst_host
- elif self._transport == "unix":
- if self._dst_host != "localhost":
- raise Exception("Running use unix migration transport for non-local host")
- uri = "unix:/var/tmp/qemu-migrate-%d.migrate" % os.getpid()
- try:
- os.remove(uri[5:])
- os.remove(monaddr)
- except:
- pass
-
- if self._dst_host != "localhost":
- dstmonaddr = ("localhost", 9001)
- else:
- dstmonaddr = "/var/tmp/qemu-dst-%d-monitor.sock" % os.getpid()
- srcmonaddr = "/var/tmp/qemu-src-%d-monitor.sock" % os.getpid()
-
- src = QEMUMachine(self._binary,
- args=self._get_src_args(hardware),
- wrapper=self._get_src_wrapper(hardware),
- name="qemu-src-%d" % os.getpid(),
- monitor_address=srcmonaddr)
-
- dst = QEMUMachine(self._binary,
- args=self._get_dst_args(hardware, uri),
- wrapper=self._get_dst_wrapper(hardware),
- name="qemu-dst-%d" % os.getpid(),
- monitor_address=dstmonaddr)
-
- try:
- src.launch()
- dst.launch()
-
- ret = self._migrate(hardware, scenario, src, dst, uri)
- progress_history = ret[0]
- qemu_timings = ret[1]
- vcpu_timings = ret[2]
- if uri[0:5] == "unix:" and os.path.exists(uri[5:]):
- os.remove(uri[5:])
-
- if os.path.exists(srcmonaddr):
- os.remove(srcmonaddr)
-
- if self._dst_host == "localhost" and os.path.exists(dstmonaddr):
- os.remove(dstmonaddr)
-
- if self._verbose:
- print("Finished migration")
-
- src.shutdown()
- dst.shutdown()
-
- return Report(hardware, scenario, progress_history,
- Timings(self._get_timings(src) + self._get_timings(dst)),
- Timings(qemu_timings),
- Timings(vcpu_timings),
- self._binary, self._dst_host, self._kernel,
- self._initrd, self._transport, self._sleep)
- except Exception as e:
- if self._debug:
- print("Failed: %s" % str(e))
- try:
- src.shutdown()
- except:
- pass
- try:
- dst.shutdown()
- except:
- pass
-
- if self._debug:
- print(src.get_log())
- print(dst.get_log())
- raise
-
diff --git a/tests/migration/guestperf/report.py b/tests/migration/guestperf/report.py
deleted file mode 100644
index 1efd40c..0000000
--- a/tests/migration/guestperf/report.py
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-# Migration test output result reporting
-#
-# Copyright (c) 2016 Red Hat, Inc.
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, see <http://www.gnu.org/licenses/>.
-#
-
-import json
-
-from guestperf.hardware import Hardware
-from guestperf.scenario import Scenario
-from guestperf.progress import Progress
-from guestperf.timings import Timings
-
-class Report(object):
-
- def __init__(self,
- hardware,
- scenario,
- progress_history,
- guest_timings,
- qemu_timings,
- vcpu_timings,
- binary,
- dst_host,
- kernel,
- initrd,
- transport,
- sleep):
-
- self._hardware = hardware
- self._scenario = scenario
- self._progress_history = progress_history
- self._guest_timings = guest_timings
- self._qemu_timings = qemu_timings
- self._vcpu_timings = vcpu_timings
- self._binary = binary
- self._dst_host = dst_host
- self._kernel = kernel
- self._initrd = initrd
- self._transport = transport
- self._sleep = sleep
-
- def serialize(self):
- return {
- "hardware": self._hardware.serialize(),
- "scenario": self._scenario.serialize(),
- "progress_history": [progress.serialize() for progress in self._progress_history],
- "guest_timings": self._guest_timings.serialize(),
- "qemu_timings": self._qemu_timings.serialize(),
- "vcpu_timings": self._vcpu_timings.serialize(),
- "binary": self._binary,
- "dst_host": self._dst_host,
- "kernel": self._kernel,
- "initrd": self._initrd,
- "transport": self._transport,
- "sleep": self._sleep,
- }
-
- @classmethod
- def deserialize(cls, data):
- return cls(
- Hardware.deserialize(data["hardware"]),
- Scenario.deserialize(data["scenario"]),
- [Progress.deserialize(record) for record in data["progress_history"]],
- Timings.deserialize(data["guest_timings"]),
- Timings.deserialize(data["qemu_timings"]),
- Timings.deserialize(data["vcpu_timings"]),
- data["binary"],
- data["dst_host"],
- data["kernel"],
- data["initrd"],
- data["transport"],
- data["sleep"])
-
- def to_json(self):
- return json.dumps(self.serialize(), indent=4)
-
- @classmethod
- def from_json(cls, data):
- return cls.deserialize(json.loads(data))
-
- @classmethod
- def from_json_file(cls, filename):
- with open(filename, "r") as fh:
- return cls.deserialize(json.load(fh))
diff --git a/tests/migration/guestperf/scenario.py b/tests/migration/guestperf/scenario.py
deleted file mode 100644
index 154c4f5..0000000
--- a/tests/migration/guestperf/scenario.py
+++ /dev/null
@@ -1,112 +0,0 @@
-#
-# Migration test scenario parameter description
-#
-# Copyright (c) 2016 Red Hat, Inc.
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, see <http://www.gnu.org/licenses/>.
-#
-
-
-class Scenario(object):
-
- def __init__(self, name,
- downtime=500,
- bandwidth=125000, # 1000 gig-e, effectively unlimited
- max_iters=30,
- max_time=300,
- pause=False, pause_iters=5,
- post_copy=False, post_copy_iters=5,
- auto_converge=False, auto_converge_step=10,
- compression_mt=False, compression_mt_threads=1,
- compression_xbzrle=False, compression_xbzrle_cache=10,
- multifd=False, multifd_channels=2,
- dirty_limit=False, x_vcpu_dirty_limit_period=500,
- vcpu_dirty_limit=1):
-
- self._name = name
-
- # General migration tunables
- self._downtime = downtime # milliseconds
- self._bandwidth = bandwidth # MiB per second
- self._max_iters = max_iters
- self._max_time = max_time # seconds
-
-
- # Strategies for ensuring completion
- self._pause = pause
- self._pause_iters = pause_iters
-
- self._post_copy = post_copy
- self._post_copy_iters = post_copy_iters
-
- self._auto_converge = auto_converge
- self._auto_converge_step = auto_converge_step # percentage CPU time
-
- self._compression_mt = compression_mt
- self._compression_mt_threads = compression_mt_threads
-
- self._compression_xbzrle = compression_xbzrle
- self._compression_xbzrle_cache = compression_xbzrle_cache # percentage of guest RAM
-
- self._multifd = multifd
- self._multifd_channels = multifd_channels
-
- self._dirty_limit = dirty_limit
- self._x_vcpu_dirty_limit_period = x_vcpu_dirty_limit_period
- self._vcpu_dirty_limit = vcpu_dirty_limit
-
- def serialize(self):
- return {
- "name": self._name,
- "downtime": self._downtime,
- "bandwidth": self._bandwidth,
- "max_iters": self._max_iters,
- "max_time": self._max_time,
- "pause": self._pause,
- "pause_iters": self._pause_iters,
- "post_copy": self._post_copy,
- "post_copy_iters": self._post_copy_iters,
- "auto_converge": self._auto_converge,
- "auto_converge_step": self._auto_converge_step,
- "compression_mt": self._compression_mt,
- "compression_mt_threads": self._compression_mt_threads,
- "compression_xbzrle": self._compression_xbzrle,
- "compression_xbzrle_cache": self._compression_xbzrle_cache,
- "multifd": self._multifd,
- "multifd_channels": self._multifd_channels,
- "dirty_limit": self._dirty_limit,
- "x_vcpu_dirty_limit_period": self._x_vcpu_dirty_limit_period,
- "vcpu_dirty_limit": self._vcpu_dirty_limit,
- }
-
- @classmethod
- def deserialize(cls, data):
- return cls(
- data["name"],
- data["downtime"],
- data["bandwidth"],
- data["max_iters"],
- data["max_time"],
- data["pause"],
- data["pause_iters"],
- data["post_copy"],
- data["post_copy_iters"],
- data["auto_converge"],
- data["auto_converge_step"],
- data["compression_mt"],
- data["compression_mt_threads"],
- data["compression_xbzrle"],
- data["compression_xbzrle_cache"],
- data["multifd"],
- data["multifd_channels"])
diff --git a/tests/migration/guestperf/shell.py b/tests/migration/guestperf/shell.py
deleted file mode 100644
index c85d89e..0000000
--- a/tests/migration/guestperf/shell.py
+++ /dev/null
@@ -1,296 +0,0 @@
-#
-# Migration test command line shell integration
-#
-# Copyright (c) 2016 Red Hat, Inc.
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, see <http://www.gnu.org/licenses/>.
-#
-
-
-import argparse
-import fnmatch
-import os
-import os.path
-import platform
-import sys
-import logging
-
-from guestperf.hardware import Hardware
-from guestperf.engine import Engine
-from guestperf.scenario import Scenario
-from guestperf.comparison import COMPARISONS
-from guestperf.plot import Plot
-from guestperf.report import Report
-
-
-class BaseShell(object):
-
- def __init__(self):
- parser = argparse.ArgumentParser(description="Migration Test Tool")
-
- # Test args
- parser.add_argument("--debug", dest="debug", default=False, action="store_true")
- parser.add_argument("--verbose", dest="verbose", default=False, action="store_true")
- parser.add_argument("--sleep", dest="sleep", default=15, type=int)
- parser.add_argument("--binary", dest="binary", default="/usr/bin/qemu-system-x86_64")
- parser.add_argument("--dst-host", dest="dst_host", default="localhost")
- parser.add_argument("--kernel", dest="kernel", default="/boot/vmlinuz-%s" % platform.release())
- parser.add_argument("--initrd", dest="initrd", default="tests/migration/initrd-stress.img")
- parser.add_argument("--transport", dest="transport", default="unix")
-
-
- # Hardware args
- parser.add_argument("--cpus", dest="cpus", default=1, type=int)
- parser.add_argument("--mem", dest="mem", default=1, type=int)
- parser.add_argument("--src-cpu-bind", dest="src_cpu_bind", default="")
- parser.add_argument("--src-mem-bind", dest="src_mem_bind", default="")
- parser.add_argument("--dst-cpu-bind", dest="dst_cpu_bind", default="")
- parser.add_argument("--dst-mem-bind", dest="dst_mem_bind", default="")
- parser.add_argument("--prealloc-pages", dest="prealloc_pages", default=False)
- parser.add_argument("--huge-pages", dest="huge_pages", default=False)
- parser.add_argument("--locked-pages", dest="locked_pages", default=False)
- parser.add_argument("--dirty-ring-size", dest="dirty_ring_size",
- default=0, type=int)
-
- self._parser = parser
-
- def get_engine(self, args):
- return Engine(binary=args.binary,
- dst_host=args.dst_host,
- kernel=args.kernel,
- initrd=args.initrd,
- transport=args.transport,
- sleep=args.sleep,
- debug=args.debug,
- verbose=args.verbose)
-
- def get_hardware(self, args):
- def split_map(value):
- if value == "":
- return []
- return value.split(",")
-
- return Hardware(cpus=args.cpus,
- mem=args.mem,
-
- src_cpu_bind=split_map(args.src_cpu_bind),
- src_mem_bind=split_map(args.src_mem_bind),
- dst_cpu_bind=split_map(args.dst_cpu_bind),
- dst_mem_bind=split_map(args.dst_mem_bind),
-
- locked_pages=args.locked_pages,
- huge_pages=args.huge_pages,
- prealloc_pages=args.prealloc_pages,
-
- dirty_ring_size=args.dirty_ring_size)
-
-
-class Shell(BaseShell):
-
- def __init__(self):
- super(Shell, self).__init__()
-
- parser = self._parser
-
- parser.add_argument("--output", dest="output", default=None)
-
- # Scenario args
- parser.add_argument("--max-iters", dest="max_iters", default=30, type=int)
- parser.add_argument("--max-time", dest="max_time", default=300, type=int)
- parser.add_argument("--bandwidth", dest="bandwidth", default=125000, type=int)
- parser.add_argument("--downtime", dest="downtime", default=500, type=int)
-
- parser.add_argument("--pause", dest="pause", default=False, action="store_true")
- parser.add_argument("--pause-iters", dest="pause_iters", default=5, type=int)
-
- parser.add_argument("--post-copy", dest="post_copy", default=False, action="store_true")
- parser.add_argument("--post-copy-iters", dest="post_copy_iters", default=5, type=int)
-
- parser.add_argument("--auto-converge", dest="auto_converge", default=False, action="store_true")
- parser.add_argument("--auto-converge-step", dest="auto_converge_step", default=10, type=int)
-
- parser.add_argument("--compression-mt", dest="compression_mt", default=False, action="store_true")
- parser.add_argument("--compression-mt-threads", dest="compression_mt_threads", default=1, type=int)
-
- parser.add_argument("--compression-xbzrle", dest="compression_xbzrle", default=False, action="store_true")
- parser.add_argument("--compression-xbzrle-cache", dest="compression_xbzrle_cache", default=10, type=int)
-
- parser.add_argument("--multifd", dest="multifd", default=False,
- action="store_true")
- parser.add_argument("--multifd-channels", dest="multifd_channels",
- default=2, type=int)
-
- parser.add_argument("--dirty-limit", dest="dirty_limit", default=False,
- action="store_true")
-
- parser.add_argument("--x-vcpu-dirty-limit-period",
- dest="x_vcpu_dirty_limit_period",
- default=500, type=int)
-
- parser.add_argument("--vcpu-dirty-limit",
- dest="vcpu_dirty_limit",
- default=1, type=int)
-
- def get_scenario(self, args):
- return Scenario(name="perfreport",
- downtime=args.downtime,
- bandwidth=args.bandwidth,
- max_iters=args.max_iters,
- max_time=args.max_time,
-
- pause=args.pause,
- pause_iters=args.pause_iters,
-
- post_copy=args.post_copy,
- post_copy_iters=args.post_copy_iters,
-
- auto_converge=args.auto_converge,
- auto_converge_step=args.auto_converge_step,
-
- compression_mt=args.compression_mt,
- compression_mt_threads=args.compression_mt_threads,
-
- compression_xbzrle=args.compression_xbzrle,
- compression_xbzrle_cache=args.compression_xbzrle_cache,
-
- multifd=args.multifd,
- multifd_channels=args.multifd_channels,
-
- dirty_limit=args.dirty_limit,
- x_vcpu_dirty_limit_period=\
- args.x_vcpu_dirty_limit_period,
- vcpu_dirty_limit=args.vcpu_dirty_limit)
-
- def run(self, argv):
- args = self._parser.parse_args(argv)
- logging.basicConfig(level=(logging.DEBUG if args.debug else
- logging.INFO if args.verbose else
- logging.WARN))
-
-
- engine = self.get_engine(args)
- hardware = self.get_hardware(args)
- scenario = self.get_scenario(args)
-
- try:
- report = engine.run(hardware, scenario)
- if args.output is None:
- print(report.to_json())
- else:
- with open(args.output, "w") as fh:
- print(report.to_json(), file=fh)
- return 0
- except Exception as e:
- print("Error: %s" % str(e), file=sys.stderr)
- if args.debug:
- raise
- return 1
-
-
-class BatchShell(BaseShell):
-
- def __init__(self):
- super(BatchShell, self).__init__()
-
- parser = self._parser
-
- parser.add_argument("--filter", dest="filter", default="*")
- parser.add_argument("--output", dest="output", default=os.getcwd())
-
- def run(self, argv):
- args = self._parser.parse_args(argv)
- logging.basicConfig(level=(logging.DEBUG if args.debug else
- logging.INFO if args.verbose else
- logging.WARN))
-
-
- engine = self.get_engine(args)
- hardware = self.get_hardware(args)
-
- try:
- for comparison in COMPARISONS:
- compdir = os.path.join(args.output, comparison._name)
- for scenario in comparison._scenarios:
- name = os.path.join(comparison._name, scenario._name)
- if not fnmatch.fnmatch(name, args.filter):
- if args.verbose:
- print("Skipping %s" % name)
- continue
-
- if args.verbose:
- print("Running %s" % name)
-
- dirname = os.path.join(args.output, comparison._name)
- filename = os.path.join(dirname, scenario._name + ".json")
- if not os.path.exists(dirname):
- os.makedirs(dirname)
- report = engine.run(hardware, scenario)
- with open(filename, "w") as fh:
- print(report.to_json(), file=fh)
- except Exception as e:
- print("Error: %s" % str(e), file=sys.stderr)
- if args.debug:
- raise
-
-
-class PlotShell(object):
-
- def __init__(self):
- super(PlotShell, self).__init__()
-
- self._parser = argparse.ArgumentParser(description="Migration Test Tool")
-
- self._parser.add_argument("--output", dest="output", default=None)
-
- self._parser.add_argument("--debug", dest="debug", default=False, action="store_true")
- self._parser.add_argument("--verbose", dest="verbose", default=False, action="store_true")
-
- self._parser.add_argument("--migration-iters", dest="migration_iters", default=False, action="store_true")
- self._parser.add_argument("--total-guest-cpu", dest="total_guest_cpu", default=False, action="store_true")
- self._parser.add_argument("--split-guest-cpu", dest="split_guest_cpu", default=False, action="store_true")
- self._parser.add_argument("--qemu-cpu", dest="qemu_cpu", default=False, action="store_true")
- self._parser.add_argument("--vcpu-cpu", dest="vcpu_cpu", default=False, action="store_true")
-
- self._parser.add_argument("reports", nargs='*')
-
- def run(self, argv):
- args = self._parser.parse_args(argv)
- logging.basicConfig(level=(logging.DEBUG if args.debug else
- logging.INFO if args.verbose else
- logging.WARN))
-
-
- if len(args.reports) == 0:
- print("At least one report required", file=sys.stderr)
- return 1
-
- if not (args.qemu_cpu or
- args.vcpu_cpu or
- args.total_guest_cpu or
- args.split_guest_cpu):
- print("At least one chart type is required", file=sys.stderr)
- return 1
-
- reports = []
- for report in args.reports:
- reports.append(Report.from_json_file(report))
-
- plot = Plot(reports,
- args.migration_iters,
- args.total_guest_cpu,
- args.split_guest_cpu,
- args.qemu_cpu,
- args.vcpu_cpu)
-
- plot.generate(args.output)
diff --git a/tests/migration/migration-test.h b/tests/migration/migration-test.h
deleted file mode 100644
index 194df7d..0000000
--- a/tests/migration/migration-test.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2018 Red Hat, Inc. and/or its affiliates
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef MIGRATION_TEST_H
-#define MIGRATION_TEST_H
-
-/* Common */
-#define TEST_MEM_PAGE_SIZE 4096
-
-/* x86 */
-#define X86_TEST_MEM_START (1 * 1024 * 1024)
-#define X86_TEST_MEM_END (100 * 1024 * 1024)
-
-/* S390 */
-#define S390_TEST_MEM_START (1 * 1024 * 1024)
-#define S390_TEST_MEM_END (100 * 1024 * 1024)
-
-/* PPC */
-#define PPC_TEST_MEM_START (1 * 1024 * 1024)
-#define PPC_TEST_MEM_END (100 * 1024 * 1024)
-#define PPC_H_PUT_TERM_CHAR 0x58
-
-/* ARM */
-#define ARM_TEST_MEM_START (0x40000000 + 1 * 1024 * 1024)
-#define ARM_TEST_MEM_END (0x40000000 + 100 * 1024 * 1024)
-#define ARM_MACH_VIRT_UART 0x09000000
-/* AArch64 kernel load address is 0x40080000, and the test memory starts at
- * 0x40100000. So the maximum allowable kernel size is 512KB.
- */
-#define ARM_TEST_MAX_KERNEL_SIZE (512 * 1024)
-
-#endif /* MIGRATION_TEST_H */
diff --git a/tests/plugin/inline.c b/tests/plugin/inline.c
deleted file mode 100644
index cd63827..0000000
--- a/tests/plugin/inline.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (C) 2023, Pierrick Bouvier <pierrick.bouvier@linaro.org>
- *
- * Demonstrates and tests usage of inline ops.
- *
- * License: GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#include <glib.h>
-#include <stdint.h>
-#include <stdio.h>
-
-#include <qemu-plugin.h>
-
-typedef struct {
- uint64_t count_tb;
- uint64_t count_tb_inline;
- uint64_t count_insn;
- uint64_t count_insn_inline;
- uint64_t count_mem;
- uint64_t count_mem_inline;
- uint64_t tb_cond_num_trigger;
- uint64_t tb_cond_track_count;
- uint64_t insn_cond_num_trigger;
- uint64_t insn_cond_track_count;
-} CPUCount;
-
-static const uint64_t cond_trigger_limit = 100;
-
-typedef struct {
- uint64_t data_insn;
- uint64_t data_tb;
- uint64_t data_mem;
-} CPUData;
-
-static struct qemu_plugin_scoreboard *counts;
-static qemu_plugin_u64 count_tb;
-static qemu_plugin_u64 count_tb_inline;
-static qemu_plugin_u64 count_insn;
-static qemu_plugin_u64 count_insn_inline;
-static qemu_plugin_u64 count_mem;
-static qemu_plugin_u64 count_mem_inline;
-static qemu_plugin_u64 tb_cond_num_trigger;
-static qemu_plugin_u64 tb_cond_track_count;
-static qemu_plugin_u64 insn_cond_num_trigger;
-static qemu_plugin_u64 insn_cond_track_count;
-static struct qemu_plugin_scoreboard *data;
-static qemu_plugin_u64 data_insn;
-static qemu_plugin_u64 data_tb;
-static qemu_plugin_u64 data_mem;
-
-static uint64_t global_count_tb;
-static uint64_t global_count_insn;
-static uint64_t global_count_mem;
-static unsigned int max_cpu_index;
-static GMutex tb_lock;
-static GMutex insn_lock;
-static GMutex mem_lock;
-
-QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
-
-static void stats_insn(void)
-{
- const uint64_t expected = global_count_insn;
- const uint64_t per_vcpu = qemu_plugin_u64_sum(count_insn);
- const uint64_t inl_per_vcpu =
- qemu_plugin_u64_sum(count_insn_inline);
- const uint64_t cond_num_trigger =
- qemu_plugin_u64_sum(insn_cond_num_trigger);
- const uint64_t cond_track_left = qemu_plugin_u64_sum(insn_cond_track_count);
- const uint64_t conditional =
- cond_num_trigger * cond_trigger_limit + cond_track_left;
- printf("insn: %" PRIu64 "\n", expected);
- printf("insn: %" PRIu64 " (per vcpu)\n", per_vcpu);
- printf("insn: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
- printf("insn: %" PRIu64 " (cond cb)\n", conditional);
- g_assert(expected > 0);
- g_assert(per_vcpu == expected);
- g_assert(inl_per_vcpu == expected);
- g_assert(conditional == expected);
-}
-
-static void stats_tb(void)
-{
- const uint64_t expected = global_count_tb;
- const uint64_t per_vcpu = qemu_plugin_u64_sum(count_tb);
- const uint64_t inl_per_vcpu =
- qemu_plugin_u64_sum(count_tb_inline);
- const uint64_t cond_num_trigger = qemu_plugin_u64_sum(tb_cond_num_trigger);
- const uint64_t cond_track_left = qemu_plugin_u64_sum(tb_cond_track_count);
- const uint64_t conditional =
- cond_num_trigger * cond_trigger_limit + cond_track_left;
- printf("tb: %" PRIu64 "\n", expected);
- printf("tb: %" PRIu64 " (per vcpu)\n", per_vcpu);
- printf("tb: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
- printf("tb: %" PRIu64 " (conditional cb)\n", conditional);
- g_assert(expected > 0);
- g_assert(per_vcpu == expected);
- g_assert(inl_per_vcpu == expected);
- g_assert(conditional == expected);
-}
-
-static void stats_mem(void)
-{
- const uint64_t expected = global_count_mem;
- const uint64_t per_vcpu = qemu_plugin_u64_sum(count_mem);
- const uint64_t inl_per_vcpu =
- qemu_plugin_u64_sum(count_mem_inline);
- printf("mem: %" PRIu64 "\n", expected);
- printf("mem: %" PRIu64 " (per vcpu)\n", per_vcpu);
- printf("mem: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
- g_assert(expected > 0);
- g_assert(per_vcpu == expected);
- g_assert(inl_per_vcpu == expected);
-}
-
-static void plugin_exit(qemu_plugin_id_t id, void *udata)
-{
- const unsigned int num_cpus = qemu_plugin_num_vcpus();
- g_assert(num_cpus == max_cpu_index + 1);
-
- for (int i = 0; i < num_cpus ; ++i) {
- const uint64_t tb = qemu_plugin_u64_get(count_tb, i);
- const uint64_t tb_inline = qemu_plugin_u64_get(count_tb_inline, i);
- const uint64_t insn = qemu_plugin_u64_get(count_insn, i);
- const uint64_t insn_inline = qemu_plugin_u64_get(count_insn_inline, i);
- const uint64_t mem = qemu_plugin_u64_get(count_mem, i);
- const uint64_t mem_inline = qemu_plugin_u64_get(count_mem_inline, i);
- const uint64_t tb_cond_trigger =
- qemu_plugin_u64_get(tb_cond_num_trigger, i);
- const uint64_t tb_cond_left =
- qemu_plugin_u64_get(tb_cond_track_count, i);
- const uint64_t insn_cond_trigger =
- qemu_plugin_u64_get(insn_cond_num_trigger, i);
- const uint64_t insn_cond_left =
- qemu_plugin_u64_get(insn_cond_track_count, i);
- printf("cpu %d: tb (%" PRIu64 ", %" PRIu64
- ", %" PRIu64 " * %" PRIu64 " + %" PRIu64
- ") | "
- "insn (%" PRIu64 ", %" PRIu64
- ", %" PRIu64 " * %" PRIu64 " + %" PRIu64
- ") | "
- "mem (%" PRIu64 ", %" PRIu64 ")"
- "\n",
- i,
- tb, tb_inline,
- tb_cond_trigger, cond_trigger_limit, tb_cond_left,
- insn, insn_inline,
- insn_cond_trigger, cond_trigger_limit, insn_cond_left,
- mem, mem_inline);
- g_assert(tb == tb_inline);
- g_assert(insn == insn_inline);
- g_assert(mem == mem_inline);
- g_assert(tb_cond_trigger == tb / cond_trigger_limit);
- g_assert(tb_cond_left == tb % cond_trigger_limit);
- g_assert(insn_cond_trigger == insn / cond_trigger_limit);
- g_assert(insn_cond_left == insn % cond_trigger_limit);
- }
-
- stats_tb();
- stats_insn();
- stats_mem();
-
- qemu_plugin_scoreboard_free(counts);
- qemu_plugin_scoreboard_free(data);
-}
-
-static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
-{
- qemu_plugin_u64_add(count_tb, cpu_index, 1);
- g_assert(qemu_plugin_u64_get(data_tb, cpu_index) == (uintptr_t) udata);
- g_mutex_lock(&tb_lock);
- max_cpu_index = MAX(max_cpu_index, cpu_index);
- global_count_tb++;
- g_mutex_unlock(&tb_lock);
-}
-
-static void vcpu_tb_cond_exec(unsigned int cpu_index, void *udata)
-{
- g_assert(qemu_plugin_u64_get(tb_cond_track_count, cpu_index) ==
- cond_trigger_limit);
- g_assert(qemu_plugin_u64_get(data_tb, cpu_index) == (uintptr_t) udata);
- qemu_plugin_u64_set(tb_cond_track_count, cpu_index, 0);
- qemu_plugin_u64_add(tb_cond_num_trigger, cpu_index, 1);
-}
-
-static void vcpu_insn_cond_exec(unsigned int cpu_index, void *udata)
-{
- g_assert(qemu_plugin_u64_get(insn_cond_track_count, cpu_index) ==
- cond_trigger_limit);
- g_assert(qemu_plugin_u64_get(data_insn, cpu_index) == (uintptr_t) udata);
- qemu_plugin_u64_set(insn_cond_track_count, cpu_index, 0);
- qemu_plugin_u64_add(insn_cond_num_trigger, cpu_index, 1);
-}
-
-static void vcpu_insn_exec(unsigned int cpu_index, void *udata)
-{
- qemu_plugin_u64_add(count_insn, cpu_index, 1);
- g_assert(qemu_plugin_u64_get(data_insn, cpu_index) == (uintptr_t) udata);
- g_mutex_lock(&insn_lock);
- global_count_insn++;
- g_mutex_unlock(&insn_lock);
-}
-
-static void vcpu_mem_access(unsigned int cpu_index,
- qemu_plugin_meminfo_t info,
- uint64_t vaddr,
- void *udata)
-{
- qemu_plugin_u64_add(count_mem, cpu_index, 1);
- g_assert(qemu_plugin_u64_get(data_mem, cpu_index) == (uintptr_t) udata);
- g_mutex_lock(&mem_lock);
- global_count_mem++;
- g_mutex_unlock(&mem_lock);
-}
-
-static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
-{
- void *tb_store = tb;
- qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
- tb, QEMU_PLUGIN_INLINE_STORE_U64, data_tb, (uintptr_t) tb_store);
- qemu_plugin_register_vcpu_tb_exec_cb(
- tb, vcpu_tb_exec, QEMU_PLUGIN_CB_NO_REGS, tb_store);
- qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
- tb, QEMU_PLUGIN_INLINE_ADD_U64, count_tb_inline, 1);
-
- qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
- tb, QEMU_PLUGIN_INLINE_ADD_U64, tb_cond_track_count, 1);
- qemu_plugin_register_vcpu_tb_exec_cond_cb(
- tb, vcpu_tb_cond_exec, QEMU_PLUGIN_CB_NO_REGS,
- QEMU_PLUGIN_COND_EQ, tb_cond_track_count, cond_trigger_limit, tb_store);
-
- for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) {
- struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx);
- void *insn_store = insn;
- void *mem_store = (char *)insn_store + 0xff;
-
- qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
- insn, QEMU_PLUGIN_INLINE_STORE_U64, data_insn,
- (uintptr_t) insn_store);
- qemu_plugin_register_vcpu_insn_exec_cb(
- insn, vcpu_insn_exec, QEMU_PLUGIN_CB_NO_REGS, insn_store);
- qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
- insn, QEMU_PLUGIN_INLINE_ADD_U64, count_insn_inline, 1);
-
- qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
- insn, QEMU_PLUGIN_INLINE_ADD_U64, insn_cond_track_count, 1);
- qemu_plugin_register_vcpu_insn_exec_cond_cb(
- insn, vcpu_insn_cond_exec, QEMU_PLUGIN_CB_NO_REGS,
- QEMU_PLUGIN_COND_EQ, insn_cond_track_count, cond_trigger_limit,
- insn_store);
-
- qemu_plugin_register_vcpu_mem_inline_per_vcpu(
- insn, QEMU_PLUGIN_MEM_RW,
- QEMU_PLUGIN_INLINE_STORE_U64,
- data_mem, (uintptr_t) mem_store);
- qemu_plugin_register_vcpu_mem_cb(insn, &vcpu_mem_access,
- QEMU_PLUGIN_CB_NO_REGS,
- QEMU_PLUGIN_MEM_RW, mem_store);
- qemu_plugin_register_vcpu_mem_inline_per_vcpu(
- insn, QEMU_PLUGIN_MEM_RW,
- QEMU_PLUGIN_INLINE_ADD_U64,
- count_mem_inline, 1);
- }
-}
-
-QEMU_PLUGIN_EXPORT
-int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
- int argc, char **argv)
-{
- counts = qemu_plugin_scoreboard_new(sizeof(CPUCount));
- count_tb = qemu_plugin_scoreboard_u64_in_struct(
- counts, CPUCount, count_tb);
- count_insn = qemu_plugin_scoreboard_u64_in_struct(
- counts, CPUCount, count_insn);
- count_mem = qemu_plugin_scoreboard_u64_in_struct(
- counts, CPUCount, count_mem);
- count_tb_inline = qemu_plugin_scoreboard_u64_in_struct(
- counts, CPUCount, count_tb_inline);
- count_insn_inline = qemu_plugin_scoreboard_u64_in_struct(
- counts, CPUCount, count_insn_inline);
- count_mem_inline = qemu_plugin_scoreboard_u64_in_struct(
- counts, CPUCount, count_mem_inline);
- tb_cond_num_trigger = qemu_plugin_scoreboard_u64_in_struct(
- counts, CPUCount, tb_cond_num_trigger);
- tb_cond_track_count = qemu_plugin_scoreboard_u64_in_struct(
- counts, CPUCount, tb_cond_track_count);
- insn_cond_num_trigger = qemu_plugin_scoreboard_u64_in_struct(
- counts, CPUCount, insn_cond_num_trigger);
- insn_cond_track_count = qemu_plugin_scoreboard_u64_in_struct(
- counts, CPUCount, insn_cond_track_count);
- data = qemu_plugin_scoreboard_new(sizeof(CPUData));
- data_insn = qemu_plugin_scoreboard_u64_in_struct(data, CPUData, data_insn);
- data_tb = qemu_plugin_scoreboard_u64_in_struct(data, CPUData, data_tb);
- data_mem = qemu_plugin_scoreboard_u64_in_struct(data, CPUData, data_mem);
-
- qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
- qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
-
- return 0;
-}
diff --git a/tests/plugin/insn.c b/tests/plugin/insn.c
deleted file mode 100644
index baf2d07..0000000
--- a/tests/plugin/insn.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
- *
- * License: GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-#include <inttypes.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <glib.h>
-
-#include <qemu-plugin.h>
-
-QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
-
-static qemu_plugin_u64 insn_count;
-
-static bool do_inline;
-static bool do_size;
-static bool do_trace;
-static GArray *sizes;
-
-typedef struct {
- uint64_t hits;
- uint64_t last_hit;
- uint64_t total_delta;
-} MatchCount;
-
-typedef struct {
- char *match_string;
- struct qemu_plugin_scoreboard *counts; /* MatchCount */
-} Match;
-
-static GArray *matches;
-
-typedef struct {
- Match *match;
- uint64_t vaddr;
- uint64_t hits;
- char *disas;
-} Instruction;
-
-/* A hash table to hold matched instructions */
-static GHashTable *match_insn_records;
-static GMutex match_hash_lock;
-
-
-static Instruction * get_insn_record(const char *disas, uint64_t vaddr, Match *m)
-{
- g_autofree char *str_hash = g_strdup_printf("%"PRIx64" %s", vaddr, disas);
- Instruction *record;
-
- g_mutex_lock(&match_hash_lock);
-
- if (!match_insn_records) {
- match_insn_records = g_hash_table_new(g_str_hash, g_str_equal);
- }
-
- record = g_hash_table_lookup(match_insn_records, str_hash);
-
- if (!record) {
- g_autoptr(GString) ts = g_string_new(str_hash);
-
- record = g_new0(Instruction, 1);
- record->disas = g_strdup(disas);
- record->vaddr = vaddr;
- record->match = m;
-
- g_hash_table_insert(match_insn_records, str_hash, record);
-
- g_string_prepend(ts, "Created record for: ");
- g_string_append(ts, "\n");
- qemu_plugin_outs(ts->str);
- }
-
- g_mutex_unlock(&match_hash_lock);
-
- return record;
-}
-
-/*
- * Initialise a new vcpu with reading the register list
- */
-static void vcpu_init(qemu_plugin_id_t id, unsigned int vcpu_index)
-{
- g_autoptr(GArray) reg_list = qemu_plugin_get_registers();
- g_autoptr(GByteArray) reg_value = g_byte_array_new();
-
- if (reg_list) {
- for (int i = 0; i < reg_list->len; i++) {
- qemu_plugin_reg_descriptor *rd = &g_array_index(
- reg_list, qemu_plugin_reg_descriptor, i);
- int count = qemu_plugin_read_register(rd->handle, reg_value);
- g_assert(count > 0);
- }
- }
-}
-
-
-static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata)
-{
- qemu_plugin_u64_add(insn_count, cpu_index, 1);
-}
-
-static void vcpu_insn_matched_exec_before(unsigned int cpu_index, void *udata)
-{
- Instruction *insn = (Instruction *) udata;
- Match *insn_match = insn->match;
- MatchCount *match = qemu_plugin_scoreboard_find(insn_match->counts,
- cpu_index);
-
- insn->hits++;
-
- uint64_t icount = qemu_plugin_u64_get(insn_count, cpu_index);
- uint64_t delta = icount - match->last_hit;
-
- match->hits++;
- match->total_delta += delta;
- match->last_hit = icount;
-
- if (do_trace) {
- g_autoptr(GString) ts = g_string_new("");
- g_string_append_printf(ts, "0x%" PRIx64 ", '%s', %"PRId64 " hits",
- insn->vaddr, insn->disas, insn->hits);
- g_string_append_printf(ts,
- " , cpu %u,"
- " %"PRId64" match hits,"
- " Ī”+%"PRId64 " since last match,"
- " %"PRId64 " avg insns/match\n",
- cpu_index,
- match->hits, delta,
- match->total_delta / match->hits);
-
- qemu_plugin_outs(ts->str);
- }
-}
-
-static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
-{
- size_t n = qemu_plugin_tb_n_insns(tb);
- size_t i;
-
- for (i = 0; i < n; i++) {
- struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
-
- if (do_inline) {
- qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
- insn, QEMU_PLUGIN_INLINE_ADD_U64, insn_count, 1);
- } else {
- uint64_t vaddr = qemu_plugin_insn_vaddr(insn);
- qemu_plugin_register_vcpu_insn_exec_cb(
- insn, vcpu_insn_exec_before, QEMU_PLUGIN_CB_NO_REGS,
- GUINT_TO_POINTER(vaddr));
- }
-
- if (do_size) {
- size_t sz = qemu_plugin_insn_size(insn);
- if (sz > sizes->len) {
- g_array_set_size(sizes, sz);
- }
- unsigned long *cnt = &g_array_index(sizes, unsigned long, sz);
- (*cnt)++;
- }
-
- /*
- * If we are tracking certain instructions we will need more
- * information about the instruction which we also need to
- * save if there is a hit.
- *
- * We only want one record for each occurrence of the matched
- * instruction.
- */
- if (matches->len) {
- char *insn_disas = qemu_plugin_insn_disas(insn);
- for (int j = 0; j < matches->len; j++) {
- Match *m = &g_array_index(matches, Match, j);
- if (g_str_has_prefix(insn_disas, m->match_string)) {
- Instruction *rec = get_insn_record(insn_disas,
- qemu_plugin_insn_vaddr(insn),
- m);
-
- qemu_plugin_register_vcpu_insn_exec_cb(
- insn, vcpu_insn_matched_exec_before,
- QEMU_PLUGIN_CB_NO_REGS, rec);
- }
- }
- g_free(insn_disas);
- }
- }
-}
-
-static void plugin_exit(qemu_plugin_id_t id, void *p)
-{
- g_autoptr(GString) out = g_string_new(NULL);
- int i;
-
- if (do_size) {
- for (i = 0; i <= sizes->len; i++) {
- unsigned long *cnt = &g_array_index(sizes, unsigned long, i);
- if (*cnt) {
- g_string_append_printf(out,
- "len %d bytes: %ld insns\n", i, *cnt);
- }
- }
- } else {
- for (i = 0; i < qemu_plugin_num_vcpus(); i++) {
- g_string_append_printf(out, "cpu %d insns: %" PRIu64 "\n",
- i, qemu_plugin_u64_get(insn_count, i));
- }
- g_string_append_printf(out, "total insns: %" PRIu64 "\n",
- qemu_plugin_u64_sum(insn_count));
- }
- qemu_plugin_outs(out->str);
- qemu_plugin_scoreboard_free(insn_count.score);
-
- g_mutex_lock(&match_hash_lock);
-
- for (i = 0; i < matches->len; ++i) {
- Match *m = &g_array_index(matches, Match, i);
- GHashTableIter iter;
- Instruction *record;
- qemu_plugin_u64 hit_e = qemu_plugin_scoreboard_u64_in_struct(m->counts, MatchCount, hits);
- uint64_t hits = qemu_plugin_u64_sum(hit_e);
-
- g_string_printf(out, "Match: %s, hits %"PRId64"\n", m->match_string, hits);
- qemu_plugin_outs(out->str);
-
- g_hash_table_iter_init(&iter, match_insn_records);
- while (g_hash_table_iter_next(&iter, NULL, (void **)&record)) {
- if (record->match == m) {
- g_string_printf(out,
- " %"PRIx64": %s (hits %"PRId64")\n",
- record->vaddr,
- record->disas,
- record->hits);
- qemu_plugin_outs(out->str);
- }
- }
-
- g_free(m->match_string);
- qemu_plugin_scoreboard_free(m->counts);
- }
-
- g_mutex_unlock(&match_hash_lock);
-
- g_array_free(matches, TRUE);
- g_array_free(sizes, TRUE);
-}
-
-
-/* Add a match to the array of matches */
-static void parse_match(char *match)
-{
- Match new_match = {
- .match_string = g_strdup(match),
- .counts = qemu_plugin_scoreboard_new(sizeof(MatchCount)) };
- g_array_append_val(matches, new_match);
-}
-
-QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
- const qemu_info_t *info,
- int argc, char **argv)
-{
- matches = g_array_new(false, true, sizeof(Match));
- /* null terminated so 0 is not a special case */
- sizes = g_array_new(true, true, sizeof(unsigned long));
-
- for (int i = 0; i < argc; i++) {
- char *opt = argv[i];
- g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
- if (g_strcmp0(tokens[0], "inline") == 0) {
- if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_inline)) {
- fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
- return -1;
- }
- } else if (g_strcmp0(tokens[0], "sizes") == 0) {
- if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_size)) {
- fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
- return -1;
- }
- } else if (g_strcmp0(tokens[0], "match") == 0) {
- parse_match(tokens[1]);
- } else if (g_strcmp0(tokens[0], "trace") == 0) {
- if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_trace)) {
- fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
- return -1;
- }
- } else {
- fprintf(stderr, "option parsing failed: %s\n", opt);
- return -1;
- }
- }
-
- insn_count = qemu_plugin_scoreboard_u64(
- qemu_plugin_scoreboard_new(sizeof(uint64_t)));
-
- /* Register init, translation block and exit callbacks */
- qemu_plugin_register_vcpu_init_cb(id, vcpu_init);
- qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
- qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
- return 0;
-}
diff --git a/tests/plugin/mem.c b/tests/plugin/mem.c
deleted file mode 100644
index b650ddd..0000000
--- a/tests/plugin/mem.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
- *
- * License: GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-#include <inttypes.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <glib.h>
-
-#include <qemu-plugin.h>
-
-QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
-
-typedef struct {
- uint64_t mem_count;
- uint64_t io_count;
-} CPUCount;
-
-static struct qemu_plugin_scoreboard *counts;
-static qemu_plugin_u64 mem_count;
-static qemu_plugin_u64 io_count;
-static bool do_inline, do_callback;
-static bool do_haddr;
-static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW;
-
-static void plugin_exit(qemu_plugin_id_t id, void *p)
-{
- g_autoptr(GString) out = g_string_new("");
-
- if (do_inline || do_callback) {
- g_string_printf(out, "mem accesses: %" PRIu64 "\n",
- qemu_plugin_u64_sum(mem_count));
- }
- if (do_haddr) {
- g_string_append_printf(out, "io accesses: %" PRIu64 "\n",
- qemu_plugin_u64_sum(io_count));
- }
- qemu_plugin_outs(out->str);
- qemu_plugin_scoreboard_free(counts);
-}
-
-static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
- uint64_t vaddr, void *udata)
-{
- if (do_haddr) {
- struct qemu_plugin_hwaddr *hwaddr;
- hwaddr = qemu_plugin_get_hwaddr(meminfo, vaddr);
- if (qemu_plugin_hwaddr_is_io(hwaddr)) {
- qemu_plugin_u64_add(io_count, cpu_index, 1);
- } else {
- qemu_plugin_u64_add(mem_count, cpu_index, 1);
- }
- } else {
- qemu_plugin_u64_add(mem_count, cpu_index, 1);
- }
-}
-
-static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
-{
- size_t n = qemu_plugin_tb_n_insns(tb);
- size_t i;
-
- for (i = 0; i < n; i++) {
- struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
-
- if (do_inline) {
- qemu_plugin_register_vcpu_mem_inline_per_vcpu(
- insn, rw,
- QEMU_PLUGIN_INLINE_ADD_U64,
- mem_count, 1);
- }
- if (do_callback) {
- qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem,
- QEMU_PLUGIN_CB_NO_REGS,
- rw, NULL);
- }
- }
-}
-
-QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
- const qemu_info_t *info,
- int argc, char **argv)
-{
-
- for (int i = 0; i < argc; i++) {
- char *opt = argv[i];
- g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
-
- if (g_strcmp0(tokens[0], "haddr") == 0) {
- if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_haddr)) {
- fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
- return -1;
- }
- } else if (g_strcmp0(tokens[0], "track") == 0) {
- if (g_strcmp0(tokens[1], "r") == 0) {
- rw = QEMU_PLUGIN_MEM_R;
- } else if (g_strcmp0(tokens[1], "w") == 0) {
- rw = QEMU_PLUGIN_MEM_W;
- } else if (g_strcmp0(tokens[1], "rw") == 0) {
- rw = QEMU_PLUGIN_MEM_RW;
- } else {
- fprintf(stderr, "invalid value for argument track: %s\n", opt);
- return -1;
- }
- } else if (g_strcmp0(tokens[0], "inline") == 0) {
- if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_inline)) {
- fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
- return -1;
- }
- } else if (g_strcmp0(tokens[0], "callback") == 0) {
- if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_callback)) {
- fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
- return -1;
- }
- } else {
- fprintf(stderr, "option parsing failed: %s\n", opt);
- return -1;
- }
- }
-
- if (do_inline && do_callback) {
- fprintf(stderr,
- "can't enable inline and callback counting at the same time\n");
- return -1;
- }
-
- counts = qemu_plugin_scoreboard_new(sizeof(CPUCount));
- mem_count = qemu_plugin_scoreboard_u64_in_struct(
- counts, CPUCount, mem_count);
- io_count = qemu_plugin_scoreboard_u64_in_struct(counts, CPUCount, io_count);
- qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
- qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
- return 0;
-}
diff --git a/tests/plugin/meson.build b/tests/plugin/meson.build
deleted file mode 100644
index 9eece5b..0000000
--- a/tests/plugin/meson.build
+++ /dev/null
@@ -1,22 +0,0 @@
-t = []
-if get_option('plugins')
- foreach i : ['bb', 'empty', 'inline', 'insn', 'mem', 'syscall']
- if host_os == 'windows'
- t += shared_module(i, files(i + '.c') + '../../contrib/plugins/win32_linker.c',
- include_directories: '../../include/qemu',
- link_depends: [win32_qemu_plugin_api_lib],
- link_args: ['-Lplugins', '-lqemu_plugin_api'],
- dependencies: glib)
-
- else
- t += shared_module(i, files(i + '.c'),
- include_directories: '../../include/qemu',
- dependencies: glib)
- endif
- endforeach
-endif
-if t.length() > 0
- alias_target('test-plugins', t)
-else
- run_target('test-plugins', command: find_program('true'))
-endif
diff --git a/tests/plugin/syscall.c b/tests/plugin/syscall.c
deleted file mode 100644
index 72e1a5b..0000000
--- a/tests/plugin/syscall.c
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (C) 2020, Matthias Weckbecker <matthias@weckbecker.name>
- *
- * License: GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-#include <inttypes.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <glib.h>
-
-#include <qemu-plugin.h>
-
-QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
-
-typedef struct {
- int64_t num;
- int64_t calls;
- int64_t errors;
-} SyscallStats;
-
-static GMutex lock;
-static GHashTable *statistics;
-
-static SyscallStats *get_or_create_entry(int64_t num)
-{
- SyscallStats *entry =
- (SyscallStats *) g_hash_table_lookup(statistics, GINT_TO_POINTER(num));
-
- if (!entry) {
- entry = g_new0(SyscallStats, 1);
- entry->num = num;
- g_hash_table_insert(statistics, GINT_TO_POINTER(num), (gpointer) entry);
- }
-
- return entry;
-}
-
-static void vcpu_syscall(qemu_plugin_id_t id, unsigned int vcpu_index,
- int64_t num, uint64_t a1, uint64_t a2,
- uint64_t a3, uint64_t a4, uint64_t a5,
- uint64_t a6, uint64_t a7, uint64_t a8)
-{
- if (statistics) {
- SyscallStats *entry;
- g_mutex_lock(&lock);
- entry = get_or_create_entry(num);
- entry->calls++;
- g_mutex_unlock(&lock);
- } else {
- g_autofree gchar *out = g_strdup_printf("syscall #%" PRIi64 "\n", num);
- qemu_plugin_outs(out);
- }
-}
-
-static void vcpu_syscall_ret(qemu_plugin_id_t id, unsigned int vcpu_idx,
- int64_t num, int64_t ret)
-{
- if (statistics) {
- SyscallStats *entry;
-
- g_mutex_lock(&lock);
- /* Should always return an existent entry. */
- entry = get_or_create_entry(num);
- if (ret < 0) {
- entry->errors++;
- }
- g_mutex_unlock(&lock);
- } else {
- g_autofree gchar *out = g_strdup_printf(
- "syscall #%" PRIi64 " returned -> %" PRIi64 "\n", num, ret);
- qemu_plugin_outs(out);
- }
-}
-
-static void print_entry(gpointer val, gpointer user_data)
-{
- SyscallStats *entry = (SyscallStats *) val;
- int64_t syscall_num = entry->num;
- g_autofree gchar *out = g_strdup_printf(
- "%-13" PRIi64 "%-6" PRIi64 " %" PRIi64 "\n",
- syscall_num, entry->calls, entry->errors);
- qemu_plugin_outs(out);
-}
-
-static gint comp_func(gconstpointer ea, gconstpointer eb)
-{
- SyscallStats *ent_a = (SyscallStats *) ea;
- SyscallStats *ent_b = (SyscallStats *) eb;
-
- return ent_a->calls > ent_b->calls ? -1 : 1;
-}
-
-/* ************************************************************************* */
-static void plugin_exit(qemu_plugin_id_t id, void *p)
-{
- if (!statistics) {
- return;
- }
-
- g_mutex_lock(&lock);
- GList *entries = g_hash_table_get_values(statistics);
- entries = g_list_sort(entries, comp_func);
- qemu_plugin_outs("syscall no. calls errors\n");
-
- g_list_foreach(entries, print_entry, NULL);
-
- g_list_free(entries);
- g_hash_table_destroy(statistics);
- g_mutex_unlock(&lock);
-}
-
-QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
- const qemu_info_t *info,
- int argc, char **argv)
-{
- bool do_print = false;
-
- for (int i = 0; i < argc; i++) {
- char *opt = argv[i];
- g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
-
- if (g_strcmp0(tokens[0], "print") == 0) {
- if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_print)) {
- fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
- }
- } else {
- fprintf(stderr, "unsupported argument: %s\n", argv[i]);
- return -1;
- }
- }
-
- if (!do_print) {
- statistics = g_hash_table_new_full(NULL, g_direct_equal, NULL, g_free);
- }
-
- qemu_plugin_register_vcpu_syscall_cb(id, vcpu_syscall);
- qemu_plugin_register_vcpu_syscall_ret_cb(id, vcpu_syscall_ret);
- qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
- return 0;
-}
diff --git a/tests/qapi-schema/alternate-array.out b/tests/qapi-schema/alternate-array.out
index a657d85..2f30973 100644
--- a/tests/qapi-schema/alternate-array.out
+++ b/tests/qapi-schema/alternate-array.out
@@ -1,7 +1,6 @@
module ./builtin
object q_empty
enum QType
- prefix QTYPE
member none
member qnull
member qnum
diff --git a/tests/qapi-schema/comments.out b/tests/qapi-schema/comments.out
index ce4f6a4..937070c 100644
--- a/tests/qapi-schema/comments.out
+++ b/tests/qapi-schema/comments.out
@@ -1,7 +1,6 @@
module ./builtin
object q_empty
enum QType
- prefix QTYPE
member none
member qnull
member qnum
diff --git a/tests/qapi-schema/doc-good.json b/tests/qapi-schema/doc-good.json
index f64bf38..14b808f 100644
--- a/tests/qapi-schema/doc-good.json
+++ b/tests/qapi-schema/doc-good.json
@@ -12,6 +12,10 @@
##
##
+# Just text, no heading.
+##
+
+##
# == Subsection
#
# *with emphasis*
@@ -208,7 +212,7 @@
#
# -> "this example"
#
-# <- "has no title"
+# <- ... has no title ...
##
{ 'command': 'cmd-boxed', 'boxed': true,
'data': 'Object',
diff --git a/tests/qapi-schema/doc-good.out b/tests/qapi-schema/doc-good.out
index 6d24f11..dc8352e 100644
--- a/tests/qapi-schema/doc-good.out
+++ b/tests/qapi-schema/doc-good.out
@@ -1,7 +1,6 @@
module ./builtin
object q_empty
enum QType
- prefix QTYPE
member none
member qnull
member qnum
@@ -59,6 +58,9 @@ doc freeform
= Section
doc freeform
body=
+Just text, no heading.
+doc freeform
+ body=
== Subsection
*with emphasis*
@@ -111,7 +113,7 @@ The _one_ {and only}, description on the same line
Also _one_ {and only}
feature=enum-member-feat
a member feature
- section=None
+ section=Plain
@two is undocumented
doc symbol=Base
body=
@@ -169,15 +171,15 @@ description starts on the same line
a feature
feature=cmd-feat2
another feature
- section=None
+ section=Plain
.. note:: @arg3 is undocumented
section=Returns
@Object
section=Errors
some
- section=TODO
+ section=Todo
frobnicate
- section=None
+ section=Plain
.. admonition:: Notes
- Lorem ipsum dolor sit amet
@@ -210,12 +212,12 @@ If you're bored enough to read this, go see a video of boxed cats
a feature
feature=cmd-feat2
another feature
- section=None
+ section=Plain
.. qmp-example::
-> "this example"
- <- "has no title"
+ <- ... has no title ...
doc symbol=EVT_BOXED
body=
diff --git a/tests/qapi-schema/doc-good.txt b/tests/qapi-schema/doc-good.txt
index cb37db6..17a1d56 100644
--- a/tests/qapi-schema/doc-good.txt
+++ b/tests/qapi-schema/doc-good.txt
@@ -264,7 +264,7 @@ Example::
-> "this example"
- <- "has no title"
+ <- ... has no title ...
"EVT_BOXED" (Event)
diff --git a/tests/qapi-schema/empty.out b/tests/qapi-schema/empty.out
index 3feb3f6..d1981f8 100644
--- a/tests/qapi-schema/empty.out
+++ b/tests/qapi-schema/empty.out
@@ -1,7 +1,6 @@
module ./builtin
object q_empty
enum QType
- prefix QTYPE
member none
member qnull
member qnum
diff --git a/tests/qapi-schema/features-too-many.err b/tests/qapi-schema/features-too-many.err
new file mode 100644
index 0000000..bbbd6e5
--- /dev/null
+++ b/tests/qapi-schema/features-too-many.err
@@ -0,0 +1,2 @@
+features-too-many.json: In command 'go-fish':
+features-too-many.json:2: Maximum of 64 schema features is permitted
diff --git a/tests/qapi-schema/features-too-many.json b/tests/qapi-schema/features-too-many.json
new file mode 100644
index 0000000..aab0a0b
--- /dev/null
+++ b/tests/qapi-schema/features-too-many.json
@@ -0,0 +1,13 @@
+# Max 64 features, with 2 specials, so 63rd custom is invalid
+{ 'command': 'go-fish',
+ 'features': [
+ 'f00', 'f01', 'f02', 'f03', 'f04', 'f05', 'f06', 'f07',
+ 'f08', 'f09', 'f0a', 'f0b', 'f0c', 'f0d', 'f0e', 'f0f',
+ 'f10', 'f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17',
+ 'f18', 'f19', 'f1a', 'f1b', 'f1c', 'f1d', 'f1e', 'f1f',
+ 'f20', 'f21', 'f22', 'f23', 'f24', 'f25', 'f26', 'f27',
+ 'f28', 'f29', 'f2a', 'f2b', 'f2c', 'f2d', 'f2e', 'f2f',
+ 'f30', 'f31', 'f32', 'f33', 'f34', 'f35', 'f36', 'f37',
+ 'f38', 'f39', 'f3a', 'f3b', 'f3c', 'f3d', 'f3e'
+ ]
+}
diff --git a/tests/qapi-schema/features-too-many.out b/tests/qapi-schema/features-too-many.out
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/qapi-schema/features-too-many.out
diff --git a/tests/qapi-schema/include-repetition.out b/tests/qapi-schema/include-repetition.out
index 16dbd9b..c564d27 100644
--- a/tests/qapi-schema/include-repetition.out
+++ b/tests/qapi-schema/include-repetition.out
@@ -1,7 +1,6 @@
module ./builtin
object q_empty
enum QType
- prefix QTYPE
member none
member qnull
member qnum
diff --git a/tests/qapi-schema/include-simple.out b/tests/qapi-schema/include-simple.out
index 48e923b..ec8200a 100644
--- a/tests/qapi-schema/include-simple.out
+++ b/tests/qapi-schema/include-simple.out
@@ -1,7 +1,6 @@
module ./builtin
object q_empty
enum QType
- prefix QTYPE
member none
member qnull
member qnum
diff --git a/tests/qapi-schema/indented-expr.out b/tests/qapi-schema/indented-expr.out
index 6a30ded..a7c22c3 100644
--- a/tests/qapi-schema/indented-expr.out
+++ b/tests/qapi-schema/indented-expr.out
@@ -1,7 +1,6 @@
module ./builtin
object q_empty
enum QType
- prefix QTYPE
member none
member qnull
member qnum
diff --git a/tests/qapi-schema/meson.build b/tests/qapi-schema/meson.build
index 0f479d9..9577178 100644
--- a/tests/qapi-schema/meson.build
+++ b/tests/qapi-schema/meson.build
@@ -105,6 +105,7 @@ schemas = [
'event-case.json',
'event-member-invalid-dict.json',
'event-nest-struct.json',
+ 'features-too-many.json',
'features-bad-type.json',
'features-deprecated-type.json',
'features-duplicate-name.json',
diff --git a/tests/qapi-schema/qapi-schema-test.out b/tests/qapi-schema/qapi-schema-test.out
index e2f0981..4617eb4 100644
--- a/tests/qapi-schema/qapi-schema-test.out
+++ b/tests/qapi-schema/qapi-schema-test.out
@@ -1,7 +1,6 @@
module ./builtin
object q_empty
enum QType
- prefix QTYPE
member none
member qnull
member qnum
diff --git a/tests/qapi-schema/test-qapi.py b/tests/qapi-schema/test-qapi.py
index 7e3f9f4..4be9302 100755
--- a/tests/qapi-schema/test-qapi.py
+++ b/tests/qapi-schema/test-qapi.py
@@ -96,17 +96,8 @@ class QAPISchemaTestVisitor(QAPISchemaVisitor):
@staticmethod
def _print_if(ifcond, indent=4):
- # TODO Drop this hack after replacing OrderedDict by plain
- # dict (requires Python 3.7)
- def _massage(subcond):
- if isinstance(subcond, str):
- return subcond
- if isinstance(subcond, list):
- return [_massage(val) for val in subcond]
- return {key: _massage(val) for key, val in subcond.items()}
-
if ifcond.is_present():
- print('%sif %s' % (' ' * indent, _massage(ifcond.ifcond)))
+ print('%sif %s' % (' ' * indent, ifcond.ifcond))
@classmethod
def _print_features(cls, features, indent=4):
@@ -131,7 +122,7 @@ def test_frontend(fname):
for feat, section in doc.features.items():
print(' feature=%s\n%s' % (feat, section.text))
for section in doc.sections:
- print(' section=%s\n%s' % (section.tag, section.text))
+ print(' section=%s\n%s' % (section.kind, section.text))
def open_test_result(dir_name, file_name, update):
diff --git a/tests/qemu-iotests/024 b/tests/qemu-iotests/024
index 285f17e..b29c76e 100755
--- a/tests/qemu-iotests/024
+++ b/tests/qemu-iotests/024
@@ -283,7 +283,7 @@ TEST_IMG=$BASE_OLD _make_test_img -b "$BASE_NEW" -F $IMGFMT \
CLUSTER_SIZE=$(( CLUSTER_SIZE * 2 )) TEST_IMG=$OVERLAY \
_make_test_img -b "$BASE_OLD" -F $IMGFMT $(( CLUSTER_SIZE * 6 ))
-TEST_IMG=$OVERLAY _img_info
+TEST_IMG=$OVERLAY _img_info | grep -v '^backing file format:'
echo
echo "Fill backing files with data"
diff --git a/tests/qemu-iotests/024.out b/tests/qemu-iotests/024.out
index e1e8eea..3d1e319 100644
--- a/tests/qemu-iotests/024.out
+++ b/tests/qemu-iotests/024.out
@@ -214,7 +214,6 @@ file format: IMGFMT
virtual size: 384 KiB (393216 bytes)
cluster_size: 131072
backing file: TEST_DIR/subdir/t.IMGFMT.base_old
-backing file format: IMGFMT
Fill backing files with data
diff --git a/tests/qemu-iotests/041 b/tests/qemu-iotests/041
index 98d17b1..8452845 100755
--- a/tests/qemu-iotests/041
+++ b/tests/qemu-iotests/041
@@ -1100,10 +1100,8 @@ class TestRepairQuorum(iotests.QMPTestCase):
# Check the full error message now
self.vm.shutdown()
- log = self.vm.get_log()
- log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log)
+ log = iotests.filter_qtest(self.vm.get_log())
log = re.sub(r'^Formatting.*\n', '', log)
- log = re.sub(r'\n\[I \+\d+\.\d+\] CLOSED\n?$', '', log)
log = re.sub(r'^%s: ' % os.path.basename(iotests.qemu_prog), '', log)
self.assertEqual(log,
diff --git a/tests/qemu-iotests/051.pc.out b/tests/qemu-iotests/051.pc.out
index 7e10c5f..f19b532 100644
--- a/tests/qemu-iotests/051.pc.out
+++ b/tests/qemu-iotests/051.pc.out
@@ -181,7 +181,7 @@ QEMU X.Y.Z monitor - type 'help' for more information
Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device virtio-scsi,id=virtio-scsi1 -device scsi-hd,bus=virtio-scsi1.0,drive=disk,share-rw=on
QEMU X.Y.Z monitor - type 'help' for more information
-(qemu) QEMU_PROG: -device scsi-hd,bus=virtio-scsi1.0,drive=disk,share-rw=on: Cannot change iothread of active block backend
+(qemu) quit
Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device virtio-blk-pci,drive=disk,iothread=thread0,share-rw=on
QEMU X.Y.Z monitor - type 'help' for more information
diff --git a/tests/qemu-iotests/106 b/tests/qemu-iotests/106
index ae0fc46..5554843 100755
--- a/tests/qemu-iotests/106
+++ b/tests/qemu-iotests/106
@@ -40,6 +40,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15
_supported_fmt raw
_supported_proto file fuse
_supported_os Linux
+_require_disk_usage
# in kB
CREATION_SIZE=128
diff --git a/tests/qemu-iotests/125 b/tests/qemu-iotests/125
index 46279d6..708e7c5 100755
--- a/tests/qemu-iotests/125
+++ b/tests/qemu-iotests/125
@@ -35,7 +35,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15
get_image_size_on_host()
{
- echo $(($(stat -c '%b * %B' "$TEST_IMG_FILE")))
+ disk_usage "$TEST_IMG_FILE"
}
# get standard environment and filters
diff --git a/tests/qemu-iotests/165 b/tests/qemu-iotests/165
index b24907a..b3b1709 100755
--- a/tests/qemu-iotests/165
+++ b/tests/qemu-iotests/165
@@ -82,9 +82,7 @@ class TestPersistentDirtyBitmap(iotests.QMPTestCase):
self.vm.shutdown()
#catch 'Persistent bitmaps are lost' possible error
- log = self.vm.get_log()
- log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log)
- log = re.sub(r'\[I \+\d+\.\d+\] CLOSED\n?$', '', log)
+ log = iotests.filter_qtest(self.vm.get_log())
if log:
print(log)
diff --git a/tests/qemu-iotests/172.out b/tests/qemu-iotests/172.out
index 07eebf3..146fc72 100644
--- a/tests/qemu-iotests/172.out
+++ b/tests/qemu-iotests/172.out
@@ -68,9 +68,6 @@ floppy0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -125,9 +122,6 @@ ide1-cd0: [not inserted]
floppy0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -183,9 +177,6 @@ floppy1 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -265,9 +256,6 @@ floppy0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -322,9 +310,6 @@ ide1-cd0: [not inserted]
floppy0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -380,9 +365,6 @@ floppy1 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -422,9 +404,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -461,9 +440,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -519,9 +495,6 @@ none1 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -586,9 +559,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -644,9 +614,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -702,9 +669,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -760,9 +724,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -827,9 +788,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -885,9 +843,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -930,9 +885,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -1106,9 +1058,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -1145,9 +1094,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -1187,9 +1133,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
@@ -1226,9 +1169,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2)
ide1-cd0: [not inserted]
Attached to: /machine/unattached/device[N]
Removable device: not locked, tray closed
-
-sd0: [not inserted]
- Removable device: not locked, tray closed
(qemu) quit
diff --git a/tests/qemu-iotests/175 b/tests/qemu-iotests/175
index f74f053..bbbf550 100755
--- a/tests/qemu-iotests/175
+++ b/tests/qemu-iotests/175
@@ -77,6 +77,7 @@ _supported_os Linux
_default_cache_mode none
_supported_cache_modes none directsync
+_require_disk_usage
size=$((1 * 1024 * 1024))
diff --git a/tests/qemu-iotests/184.out b/tests/qemu-iotests/184.out
index e8f631f..52692b6 100644
--- a/tests/qemu-iotests/184.out
+++ b/tests/qemu-iotests/184.out
@@ -26,6 +26,7 @@ Testing:
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"backing-image": {
"virtual-size": 1073741824,
@@ -59,6 +60,7 @@ Testing:
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"virtual-size": 1073741824,
"filename": "null-co://",
diff --git a/tests/qemu-iotests/191.out b/tests/qemu-iotests/191.out
index c3309e4..2a72ca7 100644
--- a/tests/qemu-iotests/191.out
+++ b/tests/qemu-iotests/191.out
@@ -114,6 +114,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"backing-image": {
"virtual-size": 67108864,
@@ -155,6 +156,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"virtual-size": 197120,
"filename": "TEST_DIR/t.IMGFMT.ovl2",
@@ -183,6 +185,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"backing-image": {
"virtual-size": 67108864,
@@ -224,6 +227,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"virtual-size": 197120,
"filename": "TEST_DIR/t.IMGFMT",
@@ -252,6 +256,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"backing-image": {
"virtual-size": 67108864,
@@ -293,6 +298,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"virtual-size": 393216,
"filename": "TEST_DIR/t.IMGFMT.mid",
@@ -321,6 +327,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"virtual-size": 67108864,
"filename": "TEST_DIR/t.IMGFMT.base",
@@ -350,6 +357,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"virtual-size": 393216,
"filename": "TEST_DIR/t.IMGFMT.base",
@@ -521,6 +529,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"backing-image": {
"virtual-size": 67108864,
@@ -562,6 +571,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"virtual-size": 197120,
"filename": "TEST_DIR/t.IMGFMT.ovl2",
@@ -590,6 +600,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"backing-image": {
"backing-image": {
@@ -642,6 +653,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"virtual-size": 197120,
"filename": "TEST_DIR/t.IMGFMT.ovl3",
@@ -670,6 +682,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"virtual-size": 67108864,
"filename": "TEST_DIR/t.IMGFMT.base",
@@ -699,6 +712,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"virtual-size": 393216,
"filename": "TEST_DIR/t.IMGFMT.base",
@@ -727,6 +741,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"backing-image": {
"virtual-size": 67108864,
@@ -768,6 +783,7 @@ wrote 65536/65536 bytes at offset 1048576
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"virtual-size": 197120,
"filename": "TEST_DIR/t.IMGFMT",
diff --git a/tests/qemu-iotests/194 b/tests/qemu-iotests/194
index c0ce82d..e114c0b 100755
--- a/tests/qemu-iotests/194
+++ b/tests/qemu-iotests/194
@@ -34,6 +34,7 @@ with iotests.FilePath('source.img') as source_img_path, \
img_size = '1G'
iotests.qemu_img_create('-f', iotests.imgfmt, source_img_path, img_size)
+ iotests.qemu_io('-f', iotests.imgfmt, '-c', 'write 512M 1M', source_img_path)
iotests.qemu_img_create('-f', iotests.imgfmt, dest_img_path, img_size)
iotests.log('Launching VMs...')
@@ -61,7 +62,8 @@ with iotests.FilePath('source.img') as source_img_path, \
iotests.log('Waiting for `drive-mirror` to complete...')
iotests.log(source_vm.event_wait('BLOCK_JOB_READY'),
- filters=[iotests.filter_qmp_event])
+ filters=[iotests.filter_qmp_event,
+ iotests.filter_block_job])
iotests.log('Starting migration...')
capabilities = [{'capability': 'events', 'state': True},
@@ -87,7 +89,8 @@ with iotests.FilePath('source.img') as source_img_path, \
while True:
event2 = source_vm.event_wait('BLOCK_JOB_COMPLETED')
- iotests.log(event2, filters=[iotests.filter_qmp_event])
+ iotests.log(event2, filters=[iotests.filter_qmp_event,
+ iotests.filter_block_job])
if event2['event'] == 'BLOCK_JOB_COMPLETED':
iotests.log('Stopping the NBD server on destination...')
iotests.log(dest_vm.qmp('nbd-server-stop'))
diff --git a/tests/qemu-iotests/194.out b/tests/qemu-iotests/194.out
index 376ed1d..d02655a 100644
--- a/tests/qemu-iotests/194.out
+++ b/tests/qemu-iotests/194.out
@@ -7,17 +7,18 @@ Launching NBD server on destination...
Starting `drive-mirror` on source...
{"return": {}}
Waiting for `drive-mirror` to complete...
-{"data": {"device": "mirror-job0", "len": 1073741824, "offset": 1073741824, "speed": 0, "type": "mirror"}, "event": "BLOCK_JOB_READY", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"data": {"device": "mirror-job0", "len": "LEN", "offset": "OFFSET", "speed": 0, "type": "mirror"}, "event": "BLOCK_JOB_READY", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
Starting migration...
{"return": {}}
{"execute": "migrate-start-postcopy", "arguments": {}}
{"return": {}}
{"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"data": {"status": "device"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
Gracefully ending the `drive-mirror` job on source...
{"return": {}}
-{"data": {"device": "mirror-job0", "len": 1073741824, "offset": 1073741824, "speed": 0, "type": "mirror"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"data": {"device": "mirror-job0", "len": "LEN", "offset": "OFFSET", "speed": 0, "type": "mirror"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
Stopping the NBD server on destination...
{"return": {}}
Wait for migration completion on target...
diff --git a/tests/qemu-iotests/203.out b/tests/qemu-iotests/203.out
index 9d4abba..8e58705 100644
--- a/tests/qemu-iotests/203.out
+++ b/tests/qemu-iotests/203.out
@@ -8,4 +8,5 @@ Starting migration...
{"return": {}}
{"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"data": {"status": "device"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
diff --git a/tests/qemu-iotests/211.out b/tests/qemu-iotests/211.out
index f02c754..ff9f9a6 100644
--- a/tests/qemu-iotests/211.out
+++ b/tests/qemu-iotests/211.out
@@ -17,7 +17,7 @@ file format: IMGFMT
virtual size: 128 MiB (134217728 bytes)
cluster_size: 1048576
-[{"data": false, "depth": 0, "length": 134217728, "present": true, "start": 0, "zero": true}]
+[{"compressed": false, "data": false, "depth": 0, "length": 134217728, "present": true, "start": 0, "zero": true}]
=== Successful image creation (explicit defaults) ===
{"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "file", "filename": "TEST_DIR/PID-t.vdi", "size": 0}}}
@@ -35,7 +35,7 @@ file format: IMGFMT
virtual size: 64 MiB (67108864 bytes)
cluster_size: 1048576
-[{"data": false, "depth": 0, "length": 67108864, "present": true, "start": 0, "zero": true}]
+[{"compressed": false, "data": false, "depth": 0, "length": 67108864, "present": true, "start": 0, "zero": true}]
=== Successful image creation (with non-default options) ===
{"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "file", "filename": "TEST_DIR/PID-t.vdi", "size": 0}}}
@@ -53,7 +53,7 @@ file format: IMGFMT
virtual size: 32 MiB (33554432 bytes)
cluster_size: 1048576
-[{"data": true, "depth": 0, "length": 3072, "offset": 1024, "present": true, "start": 0, "zero": false}, {"data": true, "depth": 0, "length": 33551360, "offset": 4096, "present": true, "start": 3072, "zero": true}]
+[{"compressed": false, "data": true, "depth": 0, "length": 3072, "offset": 1024, "present": true, "start": 0, "zero": false}, {"compressed": false, "data": true, "depth": 0, "length": 33551360, "offset": 4096, "present": true, "start": 3072, "zero": true}]
=== Invalid BlockdevRef ===
{"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "vdi", "file": "this doesn't exist", "size": 33554432}}}
diff --git a/tests/qemu-iotests/221 b/tests/qemu-iotests/221
index c463fd4..eba00b8 100755
--- a/tests/qemu-iotests/221
+++ b/tests/qemu-iotests/221
@@ -41,6 +41,7 @@ _supported_os Linux
_default_cache_mode writeback
_supported_cache_modes writeback writethrough unsafe
+_require_disk_usage
echo
echo "=== Check mapping of unaligned raw image ==="
diff --git a/tests/qemu-iotests/233.out b/tests/qemu-iotests/233.out
index 1910f7d..d498d55 100644
--- a/tests/qemu-iotests/233.out
+++ b/tests/qemu-iotests/233.out
@@ -69,8 +69,8 @@ read 1048576/1048576 bytes at offset 1048576
1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
== check TLS with authorization ==
-qemu-img: Could not open 'driver=nbd,host=127.0.0.1,port=PORT,tls-creds=tls0': Failed to read option reply: Cannot read from TLS channel: Software caused connection abort
-qemu-img: Could not open 'driver=nbd,host=127.0.0.1,port=PORT,tls-creds=tls0': Failed to read option reply: Cannot read from TLS channel: Software caused connection abort
+qemu-img: Could not open 'driver=nbd,host=127.0.0.1,port=PORT,tls-creds=tls0': Failed to read option reply: Cannot read from TLS channel: The TLS connection was non-properly terminated.
+qemu-img: Could not open 'driver=nbd,host=127.0.0.1,port=PORT,tls-creds=tls0': Failed to read option reply: Cannot read from TLS channel: The TLS connection was non-properly terminated.
== check TLS fail over UNIX with no hostname ==
qemu-img: Could not open 'driver=nbd,path=SOCK_DIR/qemu-nbd.sock,tls-creds=tls0': No hostname for certificate validation
@@ -103,14 +103,14 @@ qemu-img: Could not open 'driver=nbd,path=SOCK_DIR/qemu-nbd.sock,tls-creds=tls0'
qemu-nbd: TLS handshake failed: The TLS connection was non-properly terminated.
== final server log ==
-qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: Software caused connection abort
-qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: Software caused connection abort
+qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: The TLS connection was non-properly terminated.
+qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: The TLS connection was non-properly terminated.
qemu-nbd: option negotiation failed: Verify failed: No certificate was found.
qemu-nbd: option negotiation failed: Verify failed: No certificate was found.
qemu-nbd: option negotiation failed: TLS x509 authz check for DISTINGUISHED-NAME is denied
qemu-nbd: option negotiation failed: TLS x509 authz check for DISTINGUISHED-NAME is denied
-qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: Software caused connection abort
-qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: Software caused connection abort
+qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: The TLS connection was non-properly terminated.
+qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: The TLS connection was non-properly terminated.
qemu-nbd: option negotiation failed: TLS handshake failed: An illegal parameter has been received.
qemu-nbd: option negotiation failed: TLS handshake failed: An illegal parameter has been received.
*** done
diff --git a/tests/qemu-iotests/234.out b/tests/qemu-iotests/234.out
index ac8b643..be3e138 100644
--- a/tests/qemu-iotests/234.out
+++ b/tests/qemu-iotests/234.out
@@ -10,6 +10,7 @@ Starting migration to B...
{"return": {}}
{"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"data": {"status": "device"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
@@ -27,6 +28,7 @@ Starting migration back to A...
{"return": {}}
{"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"data": {"status": "device"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
diff --git a/tests/qemu-iotests/240 b/tests/qemu-iotests/240
index 9b281e1..f8af9ff 100755
--- a/tests/qemu-iotests/240
+++ b/tests/qemu-iotests/240
@@ -81,8 +81,6 @@ class TestCase(iotests.QMPTestCase):
self.vm.qmp_log('device_del', id='scsi-hd0')
self.vm.event_wait('DEVICE_DELETED')
- self.vm.qmp_log('device_add', id='scsi-hd1', driver='scsi-hd', drive='hd0', bus="scsi1.0")
-
self.vm.qmp_log('device_del', id='scsi-hd1')
self.vm.event_wait('DEVICE_DELETED')
self.vm.qmp_log('blockdev-del', node_name='hd0')
diff --git a/tests/qemu-iotests/240.out b/tests/qemu-iotests/240.out
index 89ed25e..10dcc42 100644
--- a/tests/qemu-iotests/240.out
+++ b/tests/qemu-iotests/240.out
@@ -46,10 +46,8 @@
{"execute": "device_add", "arguments": {"bus": "scsi0.0", "drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd0"}}
{"return": {}}
{"execute": "device_add", "arguments": {"bus": "scsi1.0", "drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd1"}}
-{"error": {"class": "GenericError", "desc": "Cannot change iothread of active block backend"}}
-{"execute": "device_del", "arguments": {"id": "scsi-hd0"}}
{"return": {}}
-{"execute": "device_add", "arguments": {"bus": "scsi1.0", "drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd1"}}
+{"execute": "device_del", "arguments": {"id": "scsi-hd0"}}
{"return": {}}
{"execute": "device_del", "arguments": {"id": "scsi-hd1"}}
{"return": {}}
diff --git a/tests/qemu-iotests/250 b/tests/qemu-iotests/250
index af48f83..c0a0dbc 100755
--- a/tests/qemu-iotests/250
+++ b/tests/qemu-iotests/250
@@ -52,11 +52,6 @@ _unsupported_imgopts data_file
# bdrv_co_truncate(bs->file) call in qcow2_co_truncate(), which might succeed
# anyway.
-disk_usage()
-{
- du --block-size=1 $1 | awk '{print $1}'
-}
-
size=2100M
_make_test_img -o "cluster_size=1M,preallocation=metadata" $size
diff --git a/tests/qemu-iotests/253 b/tests/qemu-iotests/253
index 35039d2..6da85e6 100755
--- a/tests/qemu-iotests/253
+++ b/tests/qemu-iotests/253
@@ -41,6 +41,7 @@ _supported_os Linux
_default_cache_mode none
_supported_cache_modes none directsync
+_require_disk_usage
echo
echo "=== Check mapping of unaligned raw image ==="
diff --git a/tests/qemu-iotests/262.out b/tests/qemu-iotests/262.out
index b8a2d35..bd7706b 100644
--- a/tests/qemu-iotests/262.out
+++ b/tests/qemu-iotests/262.out
@@ -8,6 +8,7 @@ Starting migration to B...
{"return": {}}
{"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"data": {"status": "device"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
diff --git a/tests/qemu-iotests/273.out b/tests/qemu-iotests/273.out
index 71843f0..c19753c 100644
--- a/tests/qemu-iotests/273.out
+++ b/tests/qemu-iotests/273.out
@@ -23,6 +23,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"backing-image": {
"backing-image": {
@@ -74,6 +75,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"virtual-size": 197120,
"filename": "TEST_DIR/t.IMGFMT",
@@ -102,6 +104,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"backing-image": {
"virtual-size": 197120,
@@ -142,6 +145,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"virtual-size": 197120,
"filename": "TEST_DIR/t.IMGFMT.mid",
@@ -170,6 +174,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev
{
"iops_rd": 0,
"detect_zeroes": "off",
+ "active": true,
"image": {
"virtual-size": 197120,
"filename": "TEST_DIR/t.IMGFMT.base",
diff --git a/tests/qemu-iotests/280.out b/tests/qemu-iotests/280.out
index 546dbb4..3741114 100644
--- a/tests/qemu-iotests/280.out
+++ b/tests/qemu-iotests/280.out
@@ -7,6 +7,7 @@ Enabling migration QMP events on VM...
{"return": {}}
{"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"data": {"status": "device"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
VM is now stopped:
diff --git a/tests/qemu-iotests/302 b/tests/qemu-iotests/302
index a6d79e7..e980ec5 100755
--- a/tests/qemu-iotests/302
+++ b/tests/qemu-iotests/302
@@ -115,13 +115,22 @@ with tarfile.open(tar_file, "w") as tar:
disk = tarfile.TarInfo("disk")
disk.size = actual_size
- tar.addfile(disk)
- # 6. Shrink the tar to the actual size, aligned to 512 bytes.
+ # Since python 3.13 we cannot use addfile() to create the member header.
+ # Add the tarinfo directly using public but undocumented attributes.
- tar_size = offset + (disk.size + 511) & ~511
- tar.fileobj.seek(tar_size)
- tar.fileobj.truncate(tar_size)
+ buf = disk.tobuf(tar.format, tar.encoding, tar.errors)
+ tar.fileobj.write(buf)
+ tar.members.append(disk)
+
+ # Update the offset and position to the location of the next member.
+
+ tar.offset = offset + (disk.size + 511) & ~511
+ tar.fileobj.seek(tar.offset)
+
+ # 6. Shrink the tar to the actual size.
+
+ tar.fileobj.truncate(tar.offset)
with tarfile.open(tar_file) as tar:
members = [{"name": m.name, "size": m.size, "offset": m.offset_data}
diff --git a/tests/qemu-iotests/308 b/tests/qemu-iotests/308
index ea81dc4..6eced3a 100755
--- a/tests/qemu-iotests/308
+++ b/tests/qemu-iotests/308
@@ -51,6 +51,7 @@ _unsupported_fmt vpc
_supported_proto file # We create the FUSE export manually
_supported_os Linux # We need /dev/urandom
+_require_disk_usage
# $1: Export ID
# $2: Options (beyond the node-name and ID)
@@ -290,7 +291,7 @@ echo '--- Try growing non-growable export ---'
# Get the current size so we can write beyond the EOF
orig_len=$(get_proto_len "$EXT_MP" "$TEST_IMG")
-orig_disk_usage=$(stat -c '%b' "$TEST_IMG")
+orig_disk_usage=$(disk_usage "$TEST_IMG")
# Should fail (exports are non-growable by default)
# (Note that qemu-io can never write beyond the EOF, so we have to use
@@ -312,7 +313,7 @@ else
echo 'OK: Post-truncate image size is as expected'
fi
-new_disk_usage=$(stat -c '%b' "$TEST_IMG")
+new_disk_usage=$(disk_usage "$TEST_IMG")
if [ "$new_disk_usage" -gt "$orig_disk_usage" ]; then
echo 'OK: Disk usage grew with fallocate'
else
diff --git a/tests/qemu-iotests/check b/tests/qemu-iotests/check
index 56d88ca..545f9ec 100755
--- a/tests/qemu-iotests/check
+++ b/tests/qemu-iotests/check
@@ -84,7 +84,7 @@ def make_argparser() -> argparse.ArgumentParser:
p.set_defaults(imgfmt='raw', imgproto='file')
format_list = ['raw', 'bochs', 'cloop', 'parallels', 'qcow', 'qcow2',
- 'qed', 'vdi', 'vpc', 'vhdx', 'vmdk', 'luks', 'dmg']
+ 'qed', 'vdi', 'vpc', 'vhdx', 'vmdk', 'luks', 'dmg', 'vvfat']
g_fmt = p.add_argument_group(
' image format options',
'The following options set the IMGFMT environment variable. '
diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc
index 95c1257..e977cb4 100644
--- a/tests/qemu-iotests/common.rc
+++ b/tests/qemu-iotests/common.rc
@@ -140,6 +140,12 @@ _optstr_add()
fi
}
+# report real disk usage for sparse files
+disk_usage()
+{
+ du --block-size=1 "$1" | awk '{print $1}'
+}
+
# Set the variables to the empty string to turn Valgrind off
# for specific processes, e.g.
# $ VALGRIND_QEMU_IO= ./check -qcow2 -valgrind 015
@@ -990,6 +996,36 @@ _require_large_file()
rm "$FILENAME"
}
+# Check whether disk_usage can be reliably used.
+_require_disk_usage()
+{
+ local unusable=false
+ # ZFS triggers known failures on this front; it does not immediately
+ # allocate files, and then aggressively compresses writes even when full
+ # allocation was requested.
+ if [ -z "$TEST_IMG_FILE" ]; then
+ FILENAME="$TEST_IMG"
+ else
+ FILENAME="$TEST_IMG_FILE"
+ fi
+ if [ -e "FILENAME" ]; then
+ echo "unwilling to overwrite existing file"
+ exit 1
+ fi
+ $QEMU_IMG create -f raw "$FILENAME" 5M > /dev/null
+ if [ $(disk_usage "$FILENAME") -gt $((1024*1024)) ]; then
+ unusable=true
+ fi
+ $QEMU_IMG create -f raw -o preallocation=full "$FILENAME" 5M > /dev/null
+ if [ $(disk_usage "$FILENAME") -lt $((4*1024*1024)) ]; then
+ unusable=true
+ fi
+ rm -f "$FILENAME"
+ if $unusable; then
+ _notrun "file system on $TEST_DIR does not handle sparse files nicely"
+ fi
+}
+
# Check that a set of devices is available in the QEMU binary
#
_require_devices()
diff --git a/tests/qemu-iotests/fat16.py b/tests/qemu-iotests/fat16.py
new file mode 100644
index 0000000..7d2d052
--- /dev/null
+++ b/tests/qemu-iotests/fat16.py
@@ -0,0 +1,690 @@
+# A simple FAT16 driver that is used to test the `vvfat` driver in QEMU.
+#
+# Copyright (C) 2024 Amjad Alsharafi <amjadsharafi10@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from typing import Callable, List, Optional, Protocol, Set
+import string
+
+SECTOR_SIZE = 512
+DIRENTRY_SIZE = 32
+ALLOWED_FILE_CHARS = set(
+ "!#$%&'()-@^_`{}~" + string.digits + string.ascii_uppercase
+)
+
+
+class MBR:
+ def __init__(self, data: bytes):
+ assert len(data) == 512
+ self.partition_table = []
+ for i in range(4):
+ partition = data[446 + i * 16 : 446 + (i + 1) * 16]
+ self.partition_table.append(
+ {
+ "status": partition[0],
+ "start_head": partition[1],
+ "start_sector": partition[2] & 0x3F,
+ "start_cylinder": ((partition[2] & 0xC0) << 2)
+ | partition[3],
+ "type": partition[4],
+ "end_head": partition[5],
+ "end_sector": partition[6] & 0x3F,
+ "end_cylinder": ((partition[6] & 0xC0) << 2)
+ | partition[7],
+ "start_lba": int.from_bytes(partition[8:12], "little"),
+ "size": int.from_bytes(partition[12:16], "little"),
+ }
+ )
+
+ def __str__(self):
+ return "\n".join(
+ [
+ f"{i}: {partition}"
+ for i, partition in enumerate(self.partition_table)
+ ]
+ )
+
+
+class FatBootSector:
+ # pylint: disable=too-many-instance-attributes
+ def __init__(self, data: bytes):
+ assert len(data) == 512
+ self.bytes_per_sector = int.from_bytes(data[11:13], "little")
+ self.sectors_per_cluster = data[13]
+ self.reserved_sectors = int.from_bytes(data[14:16], "little")
+ self.fat_count = data[16]
+ self.root_entries = int.from_bytes(data[17:19], "little")
+ total_sectors_16 = int.from_bytes(data[19:21], "little")
+ self.media_descriptor = data[21]
+ self.sectors_per_fat = int.from_bytes(data[22:24], "little")
+ self.sectors_per_track = int.from_bytes(data[24:26], "little")
+ self.heads = int.from_bytes(data[26:28], "little")
+ self.hidden_sectors = int.from_bytes(data[28:32], "little")
+ total_sectors_32 = int.from_bytes(data[32:36], "little")
+ assert (
+ total_sectors_16 == 0 or total_sectors_32 == 0
+ ), "Both total sectors (16 and 32) fields are non-zero"
+ self.total_sectors = total_sectors_16 or total_sectors_32
+ self.drive_number = data[36]
+ self.volume_id = int.from_bytes(data[39:43], "little")
+ self.volume_label = data[43:54].decode("ascii").strip()
+ self.fs_type = data[54:62].decode("ascii").strip()
+
+ def root_dir_start(self):
+ """
+ Calculate the start sector of the root directory.
+ """
+ return self.reserved_sectors + self.fat_count * self.sectors_per_fat
+
+ def root_dir_size(self):
+ """
+ Calculate the size of the root directory in sectors.
+ """
+ return (
+ self.root_entries * DIRENTRY_SIZE + self.bytes_per_sector - 1
+ ) // self.bytes_per_sector
+
+ def data_sector_start(self):
+ """
+ Calculate the start sector of the data region.
+ """
+ return self.root_dir_start() + self.root_dir_size()
+
+ def first_sector_of_cluster(self, cluster: int) -> int:
+ """
+ Calculate the first sector of the given cluster.
+ """
+ return (
+ self.data_sector_start() + (cluster - 2) * self.sectors_per_cluster
+ )
+
+ def cluster_bytes(self):
+ """
+ Calculate the number of bytes in a cluster.
+ """
+ return self.bytes_per_sector * self.sectors_per_cluster
+
+ def __str__(self):
+ return (
+ f"Bytes per sector: {self.bytes_per_sector}\n"
+ f"Sectors per cluster: {self.sectors_per_cluster}\n"
+ f"Reserved sectors: {self.reserved_sectors}\n"
+ f"FAT count: {self.fat_count}\n"
+ f"Root entries: {self.root_entries}\n"
+ f"Total sectors: {self.total_sectors}\n"
+ f"Media descriptor: {self.media_descriptor}\n"
+ f"Sectors per FAT: {self.sectors_per_fat}\n"
+ f"Sectors per track: {self.sectors_per_track}\n"
+ f"Heads: {self.heads}\n"
+ f"Hidden sectors: {self.hidden_sectors}\n"
+ f"Drive number: {self.drive_number}\n"
+ f"Volume ID: {self.volume_id}\n"
+ f"Volume label: {self.volume_label}\n"
+ f"FS type: {self.fs_type}\n"
+ )
+
+
+class FatDirectoryEntry:
+ # pylint: disable=too-many-instance-attributes
+ def __init__(self, data: bytes, sector: int, offset: int):
+ self.name = data[0:8].decode("ascii").strip()
+ self.ext = data[8:11].decode("ascii").strip()
+ self.attributes = data[11]
+ self.reserved = data[12]
+ self.create_time_tenth = data[13]
+ self.create_time = int.from_bytes(data[14:16], "little")
+ self.create_date = int.from_bytes(data[16:18], "little")
+ self.last_access_date = int.from_bytes(data[18:20], "little")
+ high_cluster = int.from_bytes(data[20:22], "little")
+ self.last_mod_time = int.from_bytes(data[22:24], "little")
+ self.last_mod_date = int.from_bytes(data[24:26], "little")
+ low_cluster = int.from_bytes(data[26:28], "little")
+ self.cluster = (high_cluster << 16) | low_cluster
+ self.size_bytes = int.from_bytes(data[28:32], "little")
+
+ # extra (to help write back to disk)
+ self.sector = sector
+ self.offset = offset
+
+ def as_bytes(self) -> bytes:
+ return (
+ self.name.ljust(8, " ").encode("ascii")
+ + self.ext.ljust(3, " ").encode("ascii")
+ + self.attributes.to_bytes(1, "little")
+ + self.reserved.to_bytes(1, "little")
+ + self.create_time_tenth.to_bytes(1, "little")
+ + self.create_time.to_bytes(2, "little")
+ + self.create_date.to_bytes(2, "little")
+ + self.last_access_date.to_bytes(2, "little")
+ + (self.cluster >> 16).to_bytes(2, "little")
+ + self.last_mod_time.to_bytes(2, "little")
+ + self.last_mod_date.to_bytes(2, "little")
+ + (self.cluster & 0xFFFF).to_bytes(2, "little")
+ + self.size_bytes.to_bytes(4, "little")
+ )
+
+ def whole_name(self):
+ if self.ext:
+ return f"{self.name}.{self.ext}"
+ else:
+ return self.name
+
+ def __str__(self):
+ return (
+ f"Name: {self.name}\n"
+ f"Ext: {self.ext}\n"
+ f"Attributes: {self.attributes}\n"
+ f"Reserved: {self.reserved}\n"
+ f"Create time tenth: {self.create_time_tenth}\n"
+ f"Create time: {self.create_time}\n"
+ f"Create date: {self.create_date}\n"
+ f"Last access date: {self.last_access_date}\n"
+ f"Last mod time: {self.last_mod_time}\n"
+ f"Last mod date: {self.last_mod_date}\n"
+ f"Cluster: {self.cluster}\n"
+ f"Size: {self.size_bytes}\n"
+ )
+
+ def __repr__(self):
+ # convert to dict
+ return str(vars(self))
+
+
+class SectorReader(Protocol):
+ def __call__(self, start_sector: int, num_sectors: int = 1) -> bytes: ...
+
+# pylint: disable=broad-exception-raised
+class Fat16:
+ def __init__(
+ self,
+ start_sector: int,
+ size: int,
+ sector_reader: SectorReader,
+ sector_writer: Callable[[int, bytes], None]
+ ):
+ self.start_sector = start_sector
+ self.size_in_sectors = size
+ self.sector_reader = sector_reader
+ self.sector_writer = sector_writer
+
+ self.boot_sector = FatBootSector(self.sector_reader(start_sector, 1))
+
+ fat_size_in_sectors = (
+ self.boot_sector.sectors_per_fat * self.boot_sector.fat_count
+ )
+ self.fats = self.read_sectors(
+ self.boot_sector.reserved_sectors, fat_size_in_sectors
+ )
+ self.fats_dirty_sectors: Set[int] = set()
+
+ def read_sectors(self, start_sector: int, num_sectors: int) -> bytes:
+ return self.sector_reader(start_sector + self.start_sector,
+ num_sectors)
+
+ def write_sectors(self, start_sector: int, data: bytes) -> None:
+ return self.sector_writer(start_sector + self.start_sector, data)
+
+ def directory_from_bytes(
+ self, data: bytes, start_sector: int
+ ) -> List[FatDirectoryEntry]:
+ """
+ Convert `bytes` into a list of `FatDirectoryEntry` objects.
+ Will ignore long file names.
+ Will stop when it encounters a 0x00 byte.
+ """
+
+ entries = []
+ for i in range(0, len(data), DIRENTRY_SIZE):
+ entry = data[i : i + DIRENTRY_SIZE]
+
+ current_sector = start_sector + (i // SECTOR_SIZE)
+ current_offset = i % SECTOR_SIZE
+
+ if entry[0] == 0:
+ break
+
+ if entry[0] == 0xE5:
+ # Deleted file
+ continue
+
+ if entry[11] & 0xF == 0xF:
+ # Long file name
+ continue
+
+ entries.append(
+ FatDirectoryEntry(entry, current_sector, current_offset)
+ )
+ return entries
+
+ def read_root_directory(self) -> List[FatDirectoryEntry]:
+ root_dir = self.read_sectors(
+ self.boot_sector.root_dir_start(), self.boot_sector.root_dir_size()
+ )
+ return self.directory_from_bytes(
+ root_dir, self.boot_sector.root_dir_start()
+ )
+
+ def read_fat_entry(self, cluster: int) -> int:
+ """
+ Read the FAT entry for the given cluster.
+ """
+ fat_offset = cluster * 2 # FAT16
+ return int.from_bytes(self.fats[fat_offset : fat_offset + 2], "little")
+
+ def write_fat_entry(self, cluster: int, value: int) -> None:
+ """
+ Write the FAT entry for the given cluster.
+ """
+ fat_offset = cluster * 2
+ self.fats = (
+ self.fats[:fat_offset]
+ + value.to_bytes(2, "little")
+ + self.fats[fat_offset + 2 :]
+ )
+ self.fats_dirty_sectors.add(fat_offset // SECTOR_SIZE)
+
+ def flush_fats(self) -> None:
+ """
+ Write the FATs back to the disk.
+ """
+ for sector in self.fats_dirty_sectors:
+ data = self.fats[sector * SECTOR_SIZE : (sector + 1) * SECTOR_SIZE]
+ sector = self.boot_sector.reserved_sectors + sector
+ self.write_sectors(sector, data)
+ self.fats_dirty_sectors = set()
+
+ def next_cluster(self, cluster: int) -> Optional[int]:
+ """
+ Get the next cluster in the chain.
+ If its `None`, then its the last cluster.
+ The function will crash if the next cluster
+ is `FREE` (unexpected) or invalid entry.
+ """
+ fat_entry = self.read_fat_entry(cluster)
+ if fat_entry == 0:
+ raise Exception("Unexpected: FREE cluster")
+ if fat_entry == 1:
+ raise Exception("Unexpected: RESERVED cluster")
+ if fat_entry >= 0xFFF8:
+ return None
+ if fat_entry >= 0xFFF7:
+ raise Exception("Invalid FAT entry")
+
+ return fat_entry
+
+ def next_free_cluster(self) -> int:
+ """
+ Find the next free cluster.
+ """
+ # simple linear search
+ for i in range(2, 0xFFFF):
+ if self.read_fat_entry(i) == 0:
+ return i
+ raise Exception("No free clusters")
+
+ def next_free_cluster_non_continuous(self) -> int:
+ """
+ Find the next free cluster, but makes sure
+ that the cluster before and after it are not allocated.
+ """
+ # simple linear search
+ before = False
+ for i in range(2, 0xFFFF):
+ if self.read_fat_entry(i) == 0:
+ if before and self.read_fat_entry(i + 1) == 0:
+ return i
+ else:
+ before = True
+ else:
+ before = False
+
+ raise Exception("No free clusters")
+
+ def read_cluster(self, cluster: int) -> bytes:
+ """
+ Read the cluster at the given cluster.
+ """
+ return self.read_sectors(
+ self.boot_sector.first_sector_of_cluster(cluster),
+ self.boot_sector.sectors_per_cluster,
+ )
+
+ def write_cluster(self, cluster: int, data: bytes) -> None:
+ """
+ Write the cluster at the given cluster.
+ """
+ assert len(data) == self.boot_sector.cluster_bytes()
+ self.write_sectors(
+ self.boot_sector.first_sector_of_cluster(cluster),
+ data,
+ )
+
+ def read_directory(
+ self, cluster: Optional[int]
+ ) -> List[FatDirectoryEntry]:
+ """
+ Read the directory at the given cluster.
+ """
+ entries = []
+ while cluster is not None:
+ data = self.read_cluster(cluster)
+ entries.extend(
+ self.directory_from_bytes(
+ data, self.boot_sector.first_sector_of_cluster(cluster)
+ )
+ )
+ cluster = self.next_cluster(cluster)
+ return entries
+
+ def add_direntry(
+ self, cluster: Optional[int], name: str, ext: str, attributes: int
+ ) -> FatDirectoryEntry:
+ """
+ Add a new directory entry to the given cluster.
+ If the cluster is `None`, then it will be added to the root directory.
+ """
+
+ def find_free_entry(data: bytes) -> Optional[int]:
+ for i in range(0, len(data), DIRENTRY_SIZE):
+ entry = data[i : i + DIRENTRY_SIZE]
+ if entry[0] == 0 or entry[0] == 0xE5:
+ return i
+ return None
+
+ assert len(name) <= 8, "Name must be 8 characters or less"
+ assert len(ext) <= 3, "Ext must be 3 characters or less"
+ assert attributes % 0x15 != 0x15, "Invalid attributes"
+
+ # initial dummy data
+ new_entry = FatDirectoryEntry(b"\0" * 32, 0, 0)
+ new_entry.name = name.ljust(8, " ")
+ new_entry.ext = ext.ljust(3, " ")
+ new_entry.attributes = attributes
+ new_entry.reserved = 0
+ new_entry.create_time_tenth = 0
+ new_entry.create_time = 0
+ new_entry.create_date = 0
+ new_entry.last_access_date = 0
+ new_entry.last_mod_time = 0
+ new_entry.last_mod_date = 0
+ new_entry.cluster = self.next_free_cluster()
+ new_entry.size_bytes = 0
+
+ # mark as EOF
+ self.write_fat_entry(new_entry.cluster, 0xFFFF)
+
+ if cluster is None:
+ for i in range(self.boot_sector.root_dir_size()):
+ sector_data = self.read_sectors(
+ self.boot_sector.root_dir_start() + i, 1
+ )
+ offset = find_free_entry(sector_data)
+ if offset is not None:
+ new_entry.sector = self.boot_sector.root_dir_start() + i
+ new_entry.offset = offset
+ self.update_direntry(new_entry)
+ return new_entry
+ else:
+ while cluster is not None:
+ data = self.read_cluster(cluster)
+ offset = find_free_entry(data)
+ if offset is not None:
+ new_entry.sector = (
+ self.boot_sector.first_sector_of_cluster(cluster)
+ + (offset // SECTOR_SIZE))
+ new_entry.offset = offset % SECTOR_SIZE
+ self.update_direntry(new_entry)
+ return new_entry
+ cluster = self.next_cluster(cluster)
+
+ raise Exception("No free directory entries")
+
+ def update_direntry(self, entry: FatDirectoryEntry) -> None:
+ """
+ Write the directory entry back to the disk.
+ """
+ sector = self.read_sectors(entry.sector, 1)
+ sector = (
+ sector[: entry.offset]
+ + entry.as_bytes()
+ + sector[entry.offset + DIRENTRY_SIZE :]
+ )
+ self.write_sectors(entry.sector, sector)
+
+ def find_direntry(self, path: str) -> Optional[FatDirectoryEntry]:
+ """
+ Find the directory entry for the given path.
+ """
+ assert path[0] == "/", "Path must start with /"
+
+ path = path[1:] # remove the leading /
+ parts = path.split("/")
+ directory = self.read_root_directory()
+
+ current_entry = None
+
+ for i, part in enumerate(parts):
+ is_last = i == len(parts) - 1
+
+ for entry in directory:
+ if entry.whole_name() == part:
+ current_entry = entry
+ break
+ if current_entry is None:
+ return None
+
+ if is_last:
+ return current_entry
+
+ if current_entry.attributes & 0x10 == 0:
+ raise Exception(
+ f"{current_entry.whole_name()} is not a directory"
+ )
+
+ directory = self.read_directory(current_entry.cluster)
+
+ assert False, "Exited loop with is_last == False"
+
+ def read_file(self, entry: Optional[FatDirectoryEntry]) -> Optional[bytes]:
+ """
+ Read the content of the file at the given path.
+ """
+ if entry is None:
+ return None
+ if entry.attributes & 0x10 != 0:
+ raise Exception(f"{entry.whole_name()} is a directory")
+
+ data = b""
+ cluster: Optional[int] = entry.cluster
+ while cluster is not None and len(data) <= entry.size_bytes:
+ data += self.read_cluster(cluster)
+ cluster = self.next_cluster(cluster)
+ return data[: entry.size_bytes]
+
+ def truncate_file(
+ self,
+ entry: FatDirectoryEntry,
+ new_size: int,
+ allocate_non_continuous: bool = False,
+ ) -> None:
+ """
+ Truncate the file at the given path to the new size.
+ """
+ if entry is None:
+ raise Exception("entry is None")
+ if entry.attributes & 0x10 != 0:
+ raise Exception(f"{entry.whole_name()} is a directory")
+
+ def clusters_from_size(size: int) -> int:
+ return (
+ size + self.boot_sector.cluster_bytes() - 1
+ ) // self.boot_sector.cluster_bytes()
+
+ # First, allocate new FATs if we need to
+ required_clusters = clusters_from_size(new_size)
+ current_clusters = clusters_from_size(entry.size_bytes)
+
+ affected_clusters = set()
+
+ # Keep at least one cluster, easier to manage this way
+ if required_clusters == 0:
+ required_clusters = 1
+ if current_clusters == 0:
+ current_clusters = 1
+
+ cluster: Optional[int]
+
+ if required_clusters > current_clusters:
+ # Allocate new clusters
+ cluster = entry.cluster
+ to_add = required_clusters
+ for _ in range(current_clusters - 1):
+ to_add -= 1
+ assert cluster is not None, "Cluster is None"
+ affected_clusters.add(cluster)
+ cluster = self.next_cluster(cluster)
+ assert required_clusters > 0, "No new clusters to allocate"
+ assert cluster is not None, "Cluster is None"
+ assert (
+ self.next_cluster(cluster) is None
+ ), "Cluster is not the last cluster"
+
+ # Allocate new clusters
+ for _ in range(to_add - 1):
+ if allocate_non_continuous:
+ new_cluster = self.next_free_cluster_non_continuous()
+ else:
+ new_cluster = self.next_free_cluster()
+ self.write_fat_entry(cluster, new_cluster)
+ self.write_fat_entry(new_cluster, 0xFFFF)
+ cluster = new_cluster
+
+ elif required_clusters < current_clusters:
+ # Truncate the file
+ cluster = entry.cluster
+ for _ in range(required_clusters - 1):
+ assert cluster is not None, "Cluster is None"
+ cluster = self.next_cluster(cluster)
+ assert cluster is not None, "Cluster is None"
+
+ next_cluster = self.next_cluster(cluster)
+ # mark last as EOF
+ self.write_fat_entry(cluster, 0xFFFF)
+ # free the rest
+ while next_cluster is not None:
+ cluster = next_cluster
+ next_cluster = self.next_cluster(next_cluster)
+ self.write_fat_entry(cluster, 0)
+
+ self.flush_fats()
+
+ # verify number of clusters
+ cluster = entry.cluster
+ count = 0
+ while cluster is not None:
+ count += 1
+ affected_clusters.add(cluster)
+ cluster = self.next_cluster(cluster)
+ assert (
+ count == required_clusters
+ ), f"Expected {required_clusters} clusters, got {count}"
+
+ # update the size
+ entry.size_bytes = new_size
+ self.update_direntry(entry)
+
+ # trigger every affected cluster
+ for cluster in affected_clusters:
+ first_sector = self.boot_sector.first_sector_of_cluster(cluster)
+ first_sector_data = self.read_sectors(first_sector, 1)
+ self.write_sectors(first_sector, first_sector_data)
+
+ def write_file(self, entry: FatDirectoryEntry, data: bytes) -> None:
+ """
+ Write the content of the file at the given path.
+ """
+ if entry is None:
+ raise Exception("entry is None")
+ if entry.attributes & 0x10 != 0:
+ raise Exception(f"{entry.whole_name()} is a directory")
+
+ data_len = len(data)
+
+ self.truncate_file(entry, data_len)
+
+ cluster: Optional[int] = entry.cluster
+ while cluster is not None:
+ data_to_write = data[: self.boot_sector.cluster_bytes()]
+ if len(data_to_write) < self.boot_sector.cluster_bytes():
+ old_data = self.read_cluster(cluster)
+ data_to_write += old_data[len(data_to_write) :]
+
+ self.write_cluster(cluster, data_to_write)
+ data = data[self.boot_sector.cluster_bytes() :]
+ if len(data) == 0:
+ break
+ cluster = self.next_cluster(cluster)
+
+ assert (
+ len(data) == 0
+ ), "Data was not written completely, clusters missing"
+
+ def create_file(self, path: str) -> Optional[FatDirectoryEntry]:
+ """
+ Create a new file at the given path.
+ """
+ assert path[0] == "/", "Path must start with /"
+
+ path = path[1:] # remove the leading /
+
+ parts = path.split("/")
+
+ directory_cluster = None
+ directory = self.read_root_directory()
+
+ parts, filename = parts[:-1], parts[-1]
+
+ for _, part in enumerate(parts):
+ current_entry = None
+ for entry in directory:
+ if entry.whole_name() == part:
+ current_entry = entry
+ break
+ if current_entry is None:
+ return None
+
+ if current_entry.attributes & 0x10 == 0:
+ raise Exception(
+ f"{current_entry.whole_name()} is not a directory"
+ )
+
+ directory = self.read_directory(current_entry.cluster)
+ directory_cluster = current_entry.cluster
+
+ # add new entry to the directory
+
+ filename, ext = filename.split(".")
+
+ if len(ext) > 3:
+ raise Exception("Ext must be 3 characters or less")
+ if len(filename) > 8:
+ raise Exception("Name must be 8 characters or less")
+
+ for c in filename + ext:
+
+ if c not in ALLOWED_FILE_CHARS:
+ raise Exception("Invalid character in filename")
+
+ return self.add_direntry(directory_cluster, filename, ext, 0)
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index ea48af4..0527477 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -601,13 +601,23 @@ def filter_chown(msg):
return chown_re.sub("chown UID:GID", msg)
def filter_qmp_event(event):
- '''Filter a QMP event dict'''
+ '''Filter the timestamp of a QMP event dict'''
event = dict(event)
if 'timestamp' in event:
event['timestamp']['seconds'] = 'SECS'
event['timestamp']['microseconds'] = 'USECS'
return event
+def filter_block_job(event):
+ '''Filter the offset and length of a QMP block job event dict'''
+ event = dict(event)
+ if 'data' in event:
+ if 'offset' in event['data']:
+ event['data']['offset'] = 'OFFSET'
+ if 'len' in event['data']:
+ event['data']['len'] = 'LEN'
+ return event
+
def filter_qmp(qmsg, filter_fn):
'''Given a string filter, filter a QMP object's values.
filter_fn takes a (key, value) pair.'''
@@ -701,6 +711,10 @@ def filter_qmp_imgfmt(qmsg):
def filter_nbd_exports(output: str) -> str:
return re.sub(r'((min|opt|max) block): [0-9]+', r'\1: XXX', output)
+def filter_qtest(output: str) -> str:
+ output = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', output)
+ output = re.sub(r'\n?\[I \+\d+\.\d+\] CLOSED\n?$', '', output)
+ return output
Msg = TypeVar('Msg', Dict[str, Any], List[Any], str)
@@ -909,6 +923,10 @@ class VM(qtest.QEMUQtestMachine):
self._args.append(addr)
return self
+ def add_paused(self):
+ self._args.append('-S')
+ return self
+
def hmp(self, command_line: str, use_log: bool = False) -> QMPMessage:
cmd = 'human-monitor-command'
kwargs: Dict[str, Any] = {'command-line': command_line}
@@ -1614,10 +1632,13 @@ class ReproducibleStreamWrapper:
self.stream.write(arg)
class ReproducibleTestRunner(unittest.TextTestRunner):
- def __init__(self, stream: Optional[TextIO] = None,
- resultclass: Type[unittest.TestResult] =
- ReproducibleTestResult,
- **kwargs: Any) -> None:
+ def __init__(
+ self,
+ stream: Optional[TextIO] = None,
+ resultclass: Type[unittest.TextTestResult] =
+ ReproducibleTestResult,
+ **kwargs: Any
+ ) -> None:
rstream = ReproducibleStreamWrapper(stream or sys.stdout)
super().__init__(stream=rstream, # type: ignore
descriptions=True,
diff --git a/tests/qemu-iotests/pylintrc b/tests/qemu-iotests/pylintrc
index 05b75ee..c5f4833 100644
--- a/tests/qemu-iotests/pylintrc
+++ b/tests/qemu-iotests/pylintrc
@@ -13,6 +13,7 @@ disable=invalid-name,
no-else-return,
too-few-public-methods,
too-many-arguments,
+ too-many-positional-arguments,
too-many-branches,
too-many-lines,
too-many-locals,
diff --git a/tests/qemu-iotests/testenv.py b/tests/qemu-iotests/testenv.py
index 96d69e5..6326e46 100644
--- a/tests/qemu-iotests/testenv.py
+++ b/tests/qemu-iotests/testenv.py
@@ -240,9 +240,12 @@ class TestEnv(ContextManager['TestEnv']):
('aarch64', 'virt'),
('avr', 'mega2560'),
('m68k', 'virt'),
+ ('or1k', 'virt'),
('riscv32', 'virt'),
('riscv64', 'virt'),
('rx', 'gdbsim-r5f562n8'),
+ ('sh4', 'r2d'),
+ ('sh4eb', 'r2d'),
('tricore', 'tricore_testboard')
)
for suffix, machine in machine_map:
@@ -255,7 +258,7 @@ class TestEnv(ContextManager['TestEnv']):
self.qemu_img_options = os.getenv('QEMU_IMG_OPTIONS')
self.qemu_nbd_options = os.getenv('QEMU_NBD_OPTIONS')
- is_generic = self.imgfmt not in ['bochs', 'cloop', 'dmg']
+ is_generic = self.imgfmt not in ['bochs', 'cloop', 'dmg', 'vvfat']
self.imgfmt_generic = 'true' if is_generic else 'false'
self.qemu_io_options = f'--cache {self.cachemode} --aio {self.aiomode}'
diff --git a/tests/qemu-iotests/tests/backup-discard-source b/tests/qemu-iotests/tests/backup-discard-source
index 2391b12..17fef9c 100755
--- a/tests/qemu-iotests/tests/backup-discard-source
+++ b/tests/qemu-iotests/tests/backup-discard-source
@@ -28,20 +28,14 @@ from iotests import qemu_img_create, qemu_img_map, qemu_io
temp_img = os.path.join(iotests.test_dir, 'temp')
source_img = os.path.join(iotests.test_dir, 'source')
target_img = os.path.join(iotests.test_dir, 'target')
-size = '1M'
-
-
-def get_actual_size(vm, node_name):
- nodes = vm.cmd('query-named-block-nodes', flat=True)
- node = next(n for n in nodes if n['node-name'] == node_name)
- return node['image']['actual-size']
+size = 1024 * 1024
class TestBackup(iotests.QMPTestCase):
def setUp(self):
- qemu_img_create('-f', iotests.imgfmt, source_img, size)
- qemu_img_create('-f', iotests.imgfmt, temp_img, size)
- qemu_img_create('-f', iotests.imgfmt, target_img, size)
+ qemu_img_create('-f', iotests.imgfmt, source_img, str(size))
+ qemu_img_create('-f', iotests.imgfmt, temp_img, str(size))
+ qemu_img_create('-f', iotests.imgfmt, target_img, str(size))
qemu_io('-c', 'write 0 1M', source_img)
self.vm = iotests.VM()
@@ -84,7 +78,12 @@ class TestBackup(iotests.QMPTestCase):
}
})
- self.assertLess(get_actual_size(self.vm, 'temp'), 512 * 1024)
+ self.bitmap = {
+ 'node': 'temp',
+ 'name': 'bitmap0'
+ }
+
+ self.vm.cmd('block-dirty-bitmap-add', self.bitmap)
def tearDown(self):
# That should fail, because region is discarded
@@ -98,7 +97,7 @@ class TestBackup(iotests.QMPTestCase):
mapping = qemu_img_map(temp_img)
self.assertEqual(len(mapping), 1)
self.assertEqual(mapping[0]['start'], 0)
- self.assertEqual(mapping[0]['length'], 1024 * 1024)
+ self.assertEqual(mapping[0]['length'], size)
self.assertEqual(mapping[0]['data'], False)
os.remove(temp_img)
@@ -113,6 +112,13 @@ class TestBackup(iotests.QMPTestCase):
self.vm.event_wait(name='BLOCK_JOB_COMPLETED')
+ def get_bitmap_count(self):
+ nodes = self.vm.cmd('query-named-block-nodes', flat=True)
+ temp = next(n for n in nodes if n['node-name'] == 'temp')
+ bitmap = temp['dirty-bitmaps'][0]
+ assert bitmap['name'] == self.bitmap['name']
+ return bitmap['count']
+
def test_discard_written(self):
"""
1. Guest writes
@@ -125,7 +131,7 @@ class TestBackup(iotests.QMPTestCase):
self.assert_qmp(result, 'return', '')
# Check that data is written to temporary image
- self.assertGreater(get_actual_size(self.vm, 'temp'), 1024 * 1024)
+ self.assertEqual(self.get_bitmap_count(), size)
self.do_backup()
@@ -138,13 +144,18 @@ class TestBackup(iotests.QMPTestCase):
"""
self.do_backup()
+ # backup job did discard operation and pollute the bitmap,
+ # we have to clean the bitmap, to check next write
+ self.assertEqual(self.get_bitmap_count(), size)
+ self.vm.cmd('block-dirty-bitmap-clear', self.bitmap)
+
# Try trigger copy-before-write operation
result = self.vm.hmp_qemu_io('cbw', 'write 0 1M')
self.assert_qmp(result, 'return', '')
# Check that data is not written to temporary image, as region
# is discarded from copy-before-write process
- self.assertLess(get_actual_size(self.vm, 'temp'), 512 * 1024)
+ self.assertEqual(self.get_bitmap_count(), 0)
if __name__ == '__main__':
diff --git a/tests/qemu-iotests/tests/commit-zero-blocks b/tests/qemu-iotests/tests/commit-zero-blocks
new file mode 100755
index 0000000..de00273
--- /dev/null
+++ b/tests/qemu-iotests/tests/commit-zero-blocks
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+# group: rw quick
+#
+# Test for commit of discarded blocks
+#
+# This tests committing a live snapshot where some of the blocks that
+# are present in the base image are discarded in the intermediate image.
+# This intends to check that these blocks are also discarded in the base
+# image after the commit.
+#
+# Copyright (C) 2024 Vincent Vanlaer.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# creator
+owner=libvirt-e6954efa@volkihar.be
+
+seq=`basename $0`
+echo "QA output created by $seq"
+
+status=1 # failure is the default!
+
+_cleanup()
+{
+ _cleanup_qemu
+ _rm_test_img "${TEST_IMG}.base"
+ _rm_test_img "${TEST_IMG}.mid"
+ _cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+cd ..
+. ./common.rc
+. ./common.filter
+. ./common.qemu
+
+_supported_fmt qcow2
+_supported_proto file
+
+size="1M"
+
+TEST_IMG="$TEST_IMG.base" _make_test_img $size
+TEST_IMG="$TEST_IMG.mid" _make_test_img -b "$TEST_IMG.base" -F $IMGFMT $size
+_make_test_img -b "${TEST_IMG}.mid" -F $IMGFMT $size
+
+$QEMU_IO -c "write -P 0x01 64k 128k" "$TEST_IMG.base" | _filter_qemu_io
+$QEMU_IO -c "discard 64k 64k" "$TEST_IMG.mid" | _filter_qemu_io
+
+echo
+echo "=== Base image info before commit ==="
+TEST_IMG="${TEST_IMG}.base" _img_info | _filter_img_info
+$QEMU_IMG map --output=json "$TEST_IMG.base" | _filter_qemu_img_map
+
+echo
+echo "=== Middle image info before commit ==="
+TEST_IMG="${TEST_IMG}.mid" _img_info | _filter_img_info
+$QEMU_IMG map --output=json "$TEST_IMG.mid" | _filter_qemu_img_map
+
+echo
+echo === Running QEMU Live Commit Test ===
+echo
+
+qemu_comm_method="qmp"
+_launch_qemu -drive file="${TEST_IMG}",if=virtio,id=test
+h=$QEMU_HANDLE
+
+_send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" "return"
+
+_send_qemu_cmd $h "{ 'execute': 'block-commit',
+ 'arguments': { 'device': 'test',
+ 'top': '"${TEST_IMG}.mid"',
+ 'base': '"${TEST_IMG}.base"'} }" '"status": "null"'
+
+_cleanup_qemu
+
+echo
+echo "=== Base image info after commit ==="
+TEST_IMG="${TEST_IMG}.base" _img_info | _filter_img_info
+$QEMU_IMG map --output=json "$TEST_IMG.base" | _filter_qemu_img_map
+
+# success, all done
+echo "*** done"
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/tests/commit-zero-blocks.out b/tests/qemu-iotests/tests/commit-zero-blocks.out
new file mode 100644
index 0000000..85bdc46
--- /dev/null
+++ b/tests/qemu-iotests/tests/commit-zero-blocks.out
@@ -0,0 +1,54 @@
+QA output created by commit-zero-blocks
+Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=1048576
+Formatting 'TEST_DIR/t.IMGFMT.mid', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT.mid backing_fmt=IMGFMT
+wrote 131072/131072 bytes at offset 65536
+128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+discard 65536/65536 bytes at offset 65536
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+=== Base image info before commit ===
+image: TEST_DIR/t.IMGFMT.base
+file format: IMGFMT
+virtual size: 1 MiB (1048576 bytes)
+[{ "start": 0, "length": 65536, "depth": 0, "present": false, "zero": true, "data": false, "compressed": false},
+{ "start": 65536, "length": 131072, "depth": 0, "present": true, "zero": false, "data": true, "compressed": false, "offset": OFFSET},
+{ "start": 196608, "length": 851968, "depth": 0, "present": false, "zero": true, "data": false, "compressed": false}]
+
+=== Middle image info before commit ===
+image: TEST_DIR/t.IMGFMT.mid
+file format: IMGFMT
+virtual size: 1 MiB (1048576 bytes)
+backing file: TEST_DIR/t.IMGFMT.base
+backing file format: IMGFMT
+[{ "start": 0, "length": 65536, "depth": 1, "present": false, "zero": true, "data": false, "compressed": false},
+{ "start": 65536, "length": 65536, "depth": 0, "present": true, "zero": true, "data": false, "compressed": false},
+{ "start": 131072, "length": 65536, "depth": 1, "present": true, "zero": false, "data": true, "compressed": false, "offset": OFFSET},
+{ "start": 196608, "length": 851968, "depth": 1, "present": false, "zero": true, "data": false, "compressed": false}]
+
+=== Running QEMU Live Commit Test ===
+
+{ 'execute': 'qmp_capabilities' }
+{"return": {}}
+{ 'execute': 'block-commit',
+ 'arguments': { 'device': 'test',
+ 'top': 'TEST_DIR/t.IMGFMT.mid',
+ 'base': 'TEST_DIR/t.IMGFMT.base'} }
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "test"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "test"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "test"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "test"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "test", "len": 1048576, "offset": 1048576, "speed": 0, "type": "commit"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "test"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "test"}}
+
+=== Base image info after commit ===
+image: TEST_DIR/t.IMGFMT.base
+file format: IMGFMT
+virtual size: 1 MiB (1048576 bytes)
+[{ "start": 0, "length": 65536, "depth": 0, "present": false, "zero": true, "data": false, "compressed": false},
+{ "start": 65536, "length": 65536, "depth": 0, "present": true, "zero": true, "data": false, "compressed": false},
+{ "start": 131072, "length": 65536, "depth": 0, "present": true, "zero": false, "data": true, "compressed": false, "offset": OFFSET},
+{ "start": 196608, "length": 851968, "depth": 0, "present": false, "zero": true, "data": false, "compressed": false}]
+*** done
diff --git a/tests/qemu-iotests/tests/copy-before-write b/tests/qemu-iotests/tests/copy-before-write
index d33bea5..236cb8a 100755
--- a/tests/qemu-iotests/tests/copy-before-write
+++ b/tests/qemu-iotests/tests/copy-before-write
@@ -95,8 +95,69 @@ class TestCbwError(iotests.QMPTestCase):
self.vm.shutdown()
log = self.vm.get_log()
- log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log)
- log = re.sub(r'\[I \+\d+\.\d+\] CLOSED\n?$', '', log)
+ log = iotests.filter_qtest(log)
+ log = iotests.filter_qemu_io(log)
+ return log
+
+ def do_cbw_error_via_blockdev_backup(self, on_cbw_error=None):
+ self.vm.cmd('blockdev-add', {
+ 'node-name': 'source',
+ 'driver': iotests.imgfmt,
+ 'file': {
+ 'driver': 'file',
+ 'filename': source_img
+ }
+ })
+
+ self.vm.cmd('blockdev-add', {
+ 'node-name': 'target',
+ 'driver': iotests.imgfmt,
+ 'file': {
+ 'driver': 'blkdebug',
+ 'image': {
+ 'driver': 'file',
+ 'filename': temp_img
+ },
+ 'inject-error': [
+ {
+ 'event': 'write_aio',
+ 'errno': 5,
+ 'immediately': False,
+ 'once': True
+ }
+ ]
+ }
+ })
+
+ blockdev_backup_options = {
+ 'device': 'source',
+ 'target': 'target',
+ 'sync': 'none',
+ 'job-id': 'job-id',
+ 'filter-node-name': 'cbw'
+ }
+
+ if on_cbw_error:
+ blockdev_backup_options['on-cbw-error'] = on_cbw_error
+
+ self.vm.cmd('blockdev-backup', blockdev_backup_options)
+
+ self.vm.cmd('blockdev-add', {
+ 'node-name': 'access',
+ 'driver': 'snapshot-access',
+ 'file': 'cbw'
+ })
+
+ result = self.vm.qmp('human-monitor-command',
+ command_line='qemu-io cbw "write 0 1M"')
+ self.assert_qmp(result, 'return', '')
+
+ result = self.vm.qmp('human-monitor-command',
+ command_line='qemu-io access "read 0 1M"')
+ self.assert_qmp(result, 'return', '')
+
+ self.vm.shutdown()
+ log = self.vm.get_log()
log = iotests.filter_qemu_io(log)
return log
@@ -126,6 +187,39 @@ read 1048576/1048576 bytes at offset 0
1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
""")
+ def test_break_snapshot_policy_forwarding(self):
+ """Ensure CBW filter accepts break-snapshot policy
+ specified in blockdev-backup QMP command.
+ """
+ log = self.do_cbw_error_via_blockdev_backup('break-snapshot')
+ self.assertEqual(log, """\
+wrote 1048576/1048576 bytes at offset 0
+1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read failed: Permission denied
+""")
+
+ def test_break_guest_write_policy_forwarding(self):
+ """Ensure CBW filter accepts break-guest-write policy
+ specified in blockdev-backup QMP command.
+ """
+ log = self.do_cbw_error_via_blockdev_backup('break-guest-write')
+ self.assertEqual(log, """\
+write failed: Input/output error
+read 1048576/1048576 bytes at offset 0
+1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+""")
+
+ def test_default_on_cbw_error_policy_forwarding(self):
+ """Ensure break-guest-write policy is used by default when
+ on-cbw-error is not explicitly specified.
+ """
+ log = self.do_cbw_error_via_blockdev_backup()
+ self.assertEqual(log, """\
+write failed: Input/output error
+read 1048576/1048576 bytes at offset 0
+1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+""")
+
def do_cbw_timeout(self, on_cbw_error):
self.vm.cmd('object-add', {
'qom-type': 'throttle-group',
diff --git a/tests/qemu-iotests/tests/copy-before-write.out b/tests/qemu-iotests/tests/copy-before-write.out
index 89968f3..2f7d390 100644
--- a/tests/qemu-iotests/tests/copy-before-write.out
+++ b/tests/qemu-iotests/tests/copy-before-write.out
@@ -1,5 +1,5 @@
-....
+.......
----------------------------------------------------------------------
-Ran 4 tests
+Ran 7 tests
OK
diff --git a/tests/qemu-iotests/tests/graph-changes-while-io b/tests/qemu-iotests/tests/graph-changes-while-io
index 194fda5..dca1167 100755
--- a/tests/qemu-iotests/tests/graph-changes-while-io
+++ b/tests/qemu-iotests/tests/graph-changes-while-io
@@ -27,6 +27,7 @@ from iotests import imgfmt, qemu_img, qemu_img_create, qemu_io, \
top = os.path.join(iotests.test_dir, 'top.img')
+mid = os.path.join(iotests.test_dir, 'mid.img')
nbd_sock = os.path.join(iotests.sock_dir, 'nbd.sock')
@@ -57,6 +58,16 @@ class TestGraphChangesWhileIO(QMPTestCase):
def tearDown(self) -> None:
self.qsd.stop()
+ os.remove(top)
+
+ def _wait_for_blockjob(self, status: str) -> None:
+ done = False
+ while not done:
+ for event in self.qsd.get_qmp().get_events(wait=10.0):
+ if event['event'] != 'JOB_STATUS_CHANGE':
+ continue
+ if event['data']['status'] == status:
+ done = True
def test_blockdev_add_while_io(self) -> None:
# Run qemu-img bench in the background
@@ -116,15 +127,92 @@ class TestGraphChangesWhileIO(QMPTestCase):
'device': 'job0',
})
- cancelled = False
- while not cancelled:
- for event in self.qsd.get_qmp().get_events(wait=10.0):
- if event['event'] != 'JOB_STATUS_CHANGE':
- continue
- if event['data']['status'] == 'null':
- cancelled = True
+ self._wait_for_blockjob('null')
+
+ bench_thr.join()
+
+ def test_remove_lower_snapshot_while_io(self) -> None:
+ # Run qemu-img bench in the background
+ bench_thr = Thread(target=do_qemu_img_bench, args=(100000, ))
+ bench_thr.start()
+
+ # While I/O is performed on 'node0' node, consequently add 2 snapshots
+ # on top of it, then remove (commit) them starting from lower one.
+ while bench_thr.is_alive():
+ # Recreate snapshot images on every iteration
+ qemu_img_create('-f', imgfmt, mid, '1G')
+ qemu_img_create('-f', imgfmt, top, '1G')
+
+ self.qsd.cmd('blockdev-add', {
+ 'driver': imgfmt,
+ 'node-name': 'mid',
+ 'file': {
+ 'driver': 'file',
+ 'filename': mid
+ }
+ })
+
+ self.qsd.cmd('blockdev-snapshot', {
+ 'node': 'node0',
+ 'overlay': 'mid',
+ })
+
+ self.qsd.cmd('blockdev-add', {
+ 'driver': imgfmt,
+ 'node-name': 'top',
+ 'file': {
+ 'driver': 'file',
+ 'filename': top
+ }
+ })
+
+ self.qsd.cmd('blockdev-snapshot', {
+ 'node': 'mid',
+ 'overlay': 'top',
+ })
+
+ self.qsd.cmd('block-commit', {
+ 'job-id': 'commit-mid',
+ 'device': 'top',
+ 'top-node': 'mid',
+ 'base-node': 'node0',
+ 'auto-finalize': True,
+ 'auto-dismiss': False,
+ })
+
+ self._wait_for_blockjob('concluded')
+ self.qsd.cmd('job-dismiss', {
+ 'id': 'commit-mid',
+ })
+
+ self.qsd.cmd('block-commit', {
+ 'job-id': 'commit-top',
+ 'device': 'top',
+ 'top-node': 'top',
+ 'base-node': 'node0',
+ 'auto-finalize': True,
+ 'auto-dismiss': False,
+ })
+
+ self._wait_for_blockjob('ready')
+ self.qsd.cmd('job-complete', {
+ 'id': 'commit-top',
+ })
+
+ self._wait_for_blockjob('concluded')
+ self.qsd.cmd('job-dismiss', {
+ 'id': 'commit-top',
+ })
+
+ self.qsd.cmd('blockdev-del', {
+ 'node-name': 'mid'
+ })
+ self.qsd.cmd('blockdev-del', {
+ 'node-name': 'top'
+ })
bench_thr.join()
+ os.remove(mid)
if __name__ == '__main__':
# Format must support raw backing files
diff --git a/tests/qemu-iotests/tests/graph-changes-while-io.out b/tests/qemu-iotests/tests/graph-changes-while-io.out
index fbc63e6..8d7e9967 100644
--- a/tests/qemu-iotests/tests/graph-changes-while-io.out
+++ b/tests/qemu-iotests/tests/graph-changes-while-io.out
@@ -1,5 +1,5 @@
-..
+...
----------------------------------------------------------------------
-Ran 2 tests
+Ran 3 tests
OK
diff --git a/tests/qemu-iotests/tests/inactive-node-nbd b/tests/qemu-iotests/tests/inactive-node-nbd
new file mode 100755
index 0000000..a95b37e
--- /dev/null
+++ b/tests/qemu-iotests/tests/inactive-node-nbd
@@ -0,0 +1,303 @@
+#!/usr/bin/env python3
+# group: rw quick
+#
+# Copyright (C) Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Creator/Owner: Kevin Wolf <kwolf@redhat.com>
+
+import iotests
+
+from iotests import QemuIoInteractive
+from iotests import filter_qemu_io, filter_qtest, filter_qmp_testfiles
+
+iotests.script_initialize(supported_fmts=['generic'],
+ supported_protocols=['file'],
+ supported_platforms=['linux'])
+
+def get_export(node_name='disk-fmt', allow_inactive=None):
+ exp = {
+ 'id': 'exp0',
+ 'type': 'nbd',
+ 'node-name': node_name,
+ 'writable': True,
+ }
+
+ if allow_inactive is not None:
+ exp['allow-inactive'] = allow_inactive
+
+ return exp
+
+def node_is_active(_vm, node_name):
+ nodes = _vm.cmd('query-named-block-nodes', flat=True)
+ node = next(n for n in nodes if n['node-name'] == node_name)
+ return node['active']
+
+with iotests.FilePath('disk.img') as path, \
+ iotests.FilePath('snap.qcow2') as snap_path, \
+ iotests.FilePath('snap2.qcow2') as snap2_path, \
+ iotests.FilePath('target.img') as target_path, \
+ iotests.FilePath('nbd.sock', base_dir=iotests.sock_dir) as nbd_sock, \
+ iotests.VM() as vm:
+
+ img_size = '10M'
+
+ iotests.log('Preparing disk...')
+ iotests.qemu_img_create('-f', iotests.imgfmt, path, img_size)
+ iotests.qemu_img_create('-f', iotests.imgfmt, target_path, img_size)
+
+ iotests.qemu_img_create('-f', 'qcow2', '-b', path, '-F', iotests.imgfmt,
+ snap_path)
+ iotests.qemu_img_create('-f', 'qcow2', '-b', snap_path, '-F', 'qcow2',
+ snap2_path)
+
+ iotests.log('Launching VM...')
+ vm.add_blockdev(f'file,node-name=disk-file,filename={path}')
+ vm.add_blockdev(f'{iotests.imgfmt},file=disk-file,node-name=disk-fmt,'
+ 'active=off')
+ vm.add_blockdev(f'file,node-name=target-file,filename={target_path}')
+ vm.add_blockdev(f'{iotests.imgfmt},file=target-file,node-name=target-fmt')
+ vm.add_blockdev(f'file,node-name=snap-file,filename={snap_path}')
+ vm.add_blockdev(f'file,node-name=snap2-file,filename={snap2_path}')
+
+ # Actually running the VM activates all images
+ vm.add_paused()
+
+ vm.launch()
+ vm.qmp_log('nbd-server-start',
+ addr={'type': 'unix', 'data':{'path': nbd_sock}},
+ filters=[filter_qmp_testfiles])
+
+ iotests.log('\n=== Creating export of inactive node ===')
+
+ iotests.log('\nExports activate nodes without allow-inactive')
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ vm.qmp_log('block-export-add', **get_export())
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ vm.qmp_log('query-block-exports')
+ vm.qmp_log('block-export-del', id='exp0')
+ vm.event_wait('BLOCK_EXPORT_DELETED')
+ vm.qmp_log('query-block-exports')
+
+ iotests.log('\nExports activate nodes with allow-inactive=false')
+ vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False)
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ vm.qmp_log('block-export-add', **get_export(allow_inactive=False))
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ vm.qmp_log('query-block-exports')
+ vm.qmp_log('block-export-del', id='exp0')
+ vm.event_wait('BLOCK_EXPORT_DELETED')
+ vm.qmp_log('query-block-exports')
+
+ iotests.log('\nExport leaves nodes inactive with allow-inactive=true')
+ vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False)
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ vm.qmp_log('block-export-add', **get_export(allow_inactive=True))
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ vm.qmp_log('query-block-exports')
+ vm.qmp_log('block-export-del', id='exp0')
+ vm.event_wait('BLOCK_EXPORT_DELETED')
+ vm.qmp_log('query-block-exports')
+
+ iotests.log('\n=== Inactivating node with existing export ===')
+
+ iotests.log('\nInactivating nodes with an export fails without '
+ 'allow-inactive')
+ vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True)
+ vm.qmp_log('block-export-add', **get_export(node_name='disk-fmt'))
+ vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False)
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ vm.qmp_log('query-block-exports')
+ vm.qmp_log('block-export-del', id='exp0')
+ vm.event_wait('BLOCK_EXPORT_DELETED')
+ vm.qmp_log('query-block-exports')
+
+ iotests.log('\nInactivating nodes with an export fails with '
+ 'allow-inactive=false')
+ vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True)
+ vm.qmp_log('block-export-add',
+ **get_export(node_name='disk-fmt', allow_inactive=False))
+ vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False)
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ vm.qmp_log('query-block-exports')
+ vm.qmp_log('block-export-del', id='exp0')
+ vm.event_wait('BLOCK_EXPORT_DELETED')
+ vm.qmp_log('query-block-exports')
+
+ iotests.log('\nInactivating nodes with an export works with '
+ 'allow-inactive=true')
+ vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True)
+ vm.qmp_log('block-export-add',
+ **get_export(node_name='disk-fmt', allow_inactive=True))
+ vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False)
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ vm.qmp_log('query-block-exports')
+ vm.qmp_log('block-export-del', id='exp0')
+ vm.event_wait('BLOCK_EXPORT_DELETED')
+ vm.qmp_log('query-block-exports')
+
+ iotests.log('\n=== Inactive nodes with parent ===')
+
+ iotests.log('\nInactivating nodes with an active parent fails')
+ vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True)
+ vm.qmp_log('blockdev-set-active', node_name='disk-file', active=False)
+ iotests.log('disk-file active: %s' % node_is_active(vm, 'disk-file'))
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+
+ iotests.log('\nInactivating nodes with an inactive parent works')
+ vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False)
+ vm.qmp_log('blockdev-set-active', node_name='disk-file', active=False)
+ iotests.log('disk-file active: %s' % node_is_active(vm, 'disk-file'))
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+
+ iotests.log('\nCreating active parent node with an inactive child fails')
+ vm.qmp_log('blockdev-add', driver='raw', file='disk-fmt',
+ node_name='disk-filter')
+ vm.qmp_log('blockdev-add', driver='raw', file='disk-fmt',
+ node_name='disk-filter', active=True)
+
+ iotests.log('\nCreating inactive parent node with an inactive child works')
+ vm.qmp_log('blockdev-add', driver='raw', file='disk-fmt',
+ node_name='disk-filter', active=False)
+ vm.qmp_log('blockdev-del', node_name='disk-filter')
+
+ iotests.log('\n=== Resizing an inactive node ===')
+ vm.qmp_log('block_resize', node_name='disk-fmt', size=16*1024*1024)
+
+ iotests.log('\n=== Taking a snapshot of an inactive node ===')
+
+ iotests.log('\nActive overlay over inactive backing file automatically '
+ 'makes both inactive for compatibility')
+ vm.qmp_log('blockdev-add', driver='qcow2', node_name='snap-fmt',
+ file='snap-file', backing=None)
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt'))
+ vm.qmp_log('blockdev-snapshot', node='disk-fmt', overlay='snap-fmt')
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt'))
+ vm.qmp_log('blockdev-del', node_name='snap-fmt')
+
+ iotests.log('\nInactive overlay over inactive backing file just works')
+ vm.qmp_log('blockdev-add', driver='qcow2', node_name='snap-fmt',
+ file='snap-file', backing=None, active=False)
+ vm.qmp_log('blockdev-snapshot', node='disk-fmt', overlay='snap-fmt')
+
+ iotests.log('\n=== Block jobs with inactive nodes ===')
+
+ iotests.log('\nStreaming into an inactive node')
+ vm.qmp_log('block-stream', device='snap-fmt',
+ filters=[iotests.filter_qmp_generated_node_ids])
+
+ iotests.log('\nCommitting an inactive root node (active commit)')
+ vm.qmp_log('block-commit', job_id='job0', device='snap-fmt',
+ filters=[iotests.filter_qmp_generated_node_ids])
+
+ iotests.log('\nCommitting an inactive intermediate node to inactive base')
+ vm.qmp_log('blockdev-add', driver='qcow2', node_name='snap2-fmt',
+ file='snap2-file', backing='snap-fmt', active=False)
+
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt'))
+ iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt'))
+
+ vm.qmp_log('block-commit', job_id='job0', device='snap2-fmt',
+ top_node='snap-fmt',
+ filters=[iotests.filter_qmp_generated_node_ids])
+
+ iotests.log('\nCommitting an inactive intermediate node to active base')
+ vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True)
+ vm.qmp_log('block-commit', job_id='job0', device='snap2-fmt',
+ top_node='snap-fmt',
+ filters=[iotests.filter_qmp_generated_node_ids])
+
+ iotests.log('\nMirror from inactive source to active target')
+ vm.qmp_log('blockdev-mirror', job_id='job0', device='snap2-fmt',
+ target='target-fmt', sync='full',
+ filters=[iotests.filter_qmp_generated_node_ids])
+
+ iotests.log('\nMirror from active source to inactive target')
+
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt'))
+ iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt'))
+ iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt'))
+
+ # Activating snap2-fmt recursively activates the whole backing chain
+ vm.qmp_log('blockdev-set-active', node_name='snap2-fmt', active=True)
+ vm.qmp_log('blockdev-set-active', node_name='target-fmt', active=False)
+
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt'))
+ iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt'))
+ iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt'))
+
+ vm.qmp_log('blockdev-mirror', job_id='job0', device='snap2-fmt',
+ target='target-fmt', sync='full',
+ filters=[iotests.filter_qmp_generated_node_ids])
+
+ iotests.log('\nBackup from active source to inactive target')
+
+ vm.qmp_log('blockdev-backup', job_id='job0', device='snap2-fmt',
+ target='target-fmt', sync='full',
+ filters=[iotests.filter_qmp_generated_node_ids])
+
+ iotests.log('\nBackup from inactive source to active target')
+
+ # Inactivating snap2-fmt recursively inactivates the whole backing chain
+ vm.qmp_log('blockdev-set-active', node_name='snap2-fmt', active=False)
+ vm.qmp_log('blockdev-set-active', node_name='target-fmt', active=True)
+
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt'))
+ iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt'))
+ iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt'))
+
+ vm.qmp_log('blockdev-backup', job_id='job0', device='snap2-fmt',
+ target='target-fmt', sync='full',
+ filters=[iotests.filter_qmp_generated_node_ids])
+
+ iotests.log('\n=== Accessing export on inactive node ===')
+
+ # Use the target node because it has the right image format and isn't the
+ # (read-only) backing file of a qcow2 node
+ vm.qmp_log('blockdev-set-active', node_name='target-fmt', active=False)
+ vm.qmp_log('block-export-add',
+ **get_export(node_name='target-fmt', allow_inactive=True))
+
+ # The read should succeed, everything else should fail gracefully
+ qemu_io = QemuIoInteractive('-f', 'raw',
+ f'nbd+unix:///target-fmt?socket={nbd_sock}')
+ iotests.log(qemu_io.cmd('read 0 64k'), filters=[filter_qemu_io])
+ iotests.log(qemu_io.cmd('write 0 64k'), filters=[filter_qemu_io])
+ iotests.log(qemu_io.cmd('write -z 0 64k'), filters=[filter_qemu_io])
+ iotests.log(qemu_io.cmd('write -zu 0 64k'), filters=[filter_qemu_io])
+ iotests.log(qemu_io.cmd('discard 0 64k'), filters=[filter_qemu_io])
+ iotests.log(qemu_io.cmd('flush'), filters=[filter_qemu_io])
+ iotests.log(qemu_io.cmd('map'), filters=[filter_qemu_io])
+ qemu_io.close()
+
+ iotests.log('\n=== Resuming VM activates all images ===')
+ vm.qmp_log('cont')
+
+ iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
+ iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt'))
+ iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt'))
+ iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt'))
+
+ iotests.log('\nShutting down...')
+ vm.shutdown()
+ log = vm.get_log()
+ if log:
+ iotests.log(log, [filter_qtest, filter_qemu_io])
diff --git a/tests/qemu-iotests/tests/inactive-node-nbd.out b/tests/qemu-iotests/tests/inactive-node-nbd.out
new file mode 100644
index 0000000..a458b4f
--- /dev/null
+++ b/tests/qemu-iotests/tests/inactive-node-nbd.out
@@ -0,0 +1,239 @@
+Preparing disk...
+Launching VM...
+{"execute": "nbd-server-start", "arguments": {"addr": {"data": {"path": "SOCK_DIR/PID-nbd.sock"}, "type": "unix"}}}
+{"return": {}}
+
+=== Creating export of inactive node ===
+
+Exports activate nodes without allow-inactive
+disk-fmt active: False
+{"execute": "block-export-add", "arguments": {"id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}}
+{"return": {}}
+disk-fmt active: True
+{"execute": "query-block-exports", "arguments": {}}
+{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]}
+{"execute": "block-export-del", "arguments": {"id": "exp0"}}
+{"return": {}}
+{"execute": "query-block-exports", "arguments": {}}
+{"return": []}
+
+Exports activate nodes with allow-inactive=false
+{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}}
+{"return": {}}
+disk-fmt active: False
+{"execute": "block-export-add", "arguments": {"allow-inactive": false, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}}
+{"return": {}}
+disk-fmt active: True
+{"execute": "query-block-exports", "arguments": {}}
+{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]}
+{"execute": "block-export-del", "arguments": {"id": "exp0"}}
+{"return": {}}
+{"execute": "query-block-exports", "arguments": {}}
+{"return": []}
+
+Export leaves nodes inactive with allow-inactive=true
+{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}}
+{"return": {}}
+disk-fmt active: False
+{"execute": "block-export-add", "arguments": {"allow-inactive": true, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}}
+{"return": {}}
+disk-fmt active: False
+{"execute": "query-block-exports", "arguments": {}}
+{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]}
+{"execute": "block-export-del", "arguments": {"id": "exp0"}}
+{"return": {}}
+{"execute": "query-block-exports", "arguments": {}}
+{"return": []}
+
+=== Inactivating node with existing export ===
+
+Inactivating nodes with an export fails without allow-inactive
+{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}}
+{"return": {}}
+{"execute": "block-export-add", "arguments": {"id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}}
+{"return": {}}
+{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}}
+{"error": {"class": "GenericError", "desc": "Failed to inactivate node: Operation not permitted"}}
+disk-fmt active: True
+{"execute": "query-block-exports", "arguments": {}}
+{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]}
+{"execute": "block-export-del", "arguments": {"id": "exp0"}}
+{"return": {}}
+{"execute": "query-block-exports", "arguments": {}}
+{"return": []}
+
+Inactivating nodes with an export fails with allow-inactive=false
+{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}}
+{"return": {}}
+{"execute": "block-export-add", "arguments": {"allow-inactive": false, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}}
+{"return": {}}
+{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}}
+{"error": {"class": "GenericError", "desc": "Failed to inactivate node: Operation not permitted"}}
+disk-fmt active: True
+{"execute": "query-block-exports", "arguments": {}}
+{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]}
+{"execute": "block-export-del", "arguments": {"id": "exp0"}}
+{"return": {}}
+{"execute": "query-block-exports", "arguments": {}}
+{"return": []}
+
+Inactivating nodes with an export works with allow-inactive=true
+{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}}
+{"return": {}}
+{"execute": "block-export-add", "arguments": {"allow-inactive": true, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}}
+{"return": {}}
+{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}}
+{"return": {}}
+disk-fmt active: False
+{"execute": "query-block-exports", "arguments": {}}
+{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]}
+{"execute": "block-export-del", "arguments": {"id": "exp0"}}
+{"return": {}}
+{"execute": "query-block-exports", "arguments": {}}
+{"return": []}
+
+=== Inactive nodes with parent ===
+
+Inactivating nodes with an active parent fails
+{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}}
+{"return": {}}
+{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-file"}}
+{"error": {"class": "GenericError", "desc": "Node has active parent node"}}
+disk-file active: True
+disk-fmt active: True
+
+Inactivating nodes with an inactive parent works
+{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}}
+{"return": {}}
+{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-file"}}
+{"return": {}}
+disk-file active: False
+disk-fmt active: False
+
+Creating active parent node with an inactive child fails
+{"execute": "blockdev-add", "arguments": {"driver": "raw", "file": "disk-fmt", "node-name": "disk-filter"}}
+{"error": {"class": "GenericError", "desc": "Inactive 'disk-fmt' can't be a file child of active 'disk-filter'"}}
+{"execute": "blockdev-add", "arguments": {"active": true, "driver": "raw", "file": "disk-fmt", "node-name": "disk-filter"}}
+{"error": {"class": "GenericError", "desc": "Inactive 'disk-fmt' can't be a file child of active 'disk-filter'"}}
+
+Creating inactive parent node with an inactive child works
+{"execute": "blockdev-add", "arguments": {"active": false, "driver": "raw", "file": "disk-fmt", "node-name": "disk-filter"}}
+{"return": {}}
+{"execute": "blockdev-del", "arguments": {"node-name": "disk-filter"}}
+{"return": {}}
+
+=== Resizing an inactive node ===
+{"execute": "block_resize", "arguments": {"node-name": "disk-fmt", "size": 16777216}}
+{"error": {"class": "GenericError", "desc": "Permission 'resize' unavailable on inactive node"}}
+
+=== Taking a snapshot of an inactive node ===
+
+Active overlay over inactive backing file automatically makes both inactive for compatibility
+{"execute": "blockdev-add", "arguments": {"backing": null, "driver": "qcow2", "file": "snap-file", "node-name": "snap-fmt"}}
+{"return": {}}
+disk-fmt active: False
+snap-fmt active: True
+{"execute": "blockdev-snapshot", "arguments": {"node": "disk-fmt", "overlay": "snap-fmt"}}
+{"return": {}}
+disk-fmt active: False
+snap-fmt active: False
+{"execute": "blockdev-del", "arguments": {"node-name": "snap-fmt"}}
+{"return": {}}
+
+Inactive overlay over inactive backing file just works
+{"execute": "blockdev-add", "arguments": {"active": false, "backing": null, "driver": "qcow2", "file": "snap-file", "node-name": "snap-fmt"}}
+{"return": {}}
+{"execute": "blockdev-snapshot", "arguments": {"node": "disk-fmt", "overlay": "snap-fmt"}}
+{"return": {}}
+
+=== Block jobs with inactive nodes ===
+
+Streaming into an inactive node
+{"execute": "block-stream", "arguments": {"device": "snap-fmt"}}
+{"error": {"class": "GenericError", "desc": "Could not create node: Inactive 'snap-fmt' can't be a file child of active 'NODE_NAME'"}}
+
+Committing an inactive root node (active commit)
+{"execute": "block-commit", "arguments": {"device": "snap-fmt", "job-id": "job0"}}
+{"error": {"class": "GenericError", "desc": "Inactive 'snap-fmt' can't be a backing child of active 'NODE_NAME'"}}
+
+Committing an inactive intermediate node to inactive base
+{"execute": "blockdev-add", "arguments": {"active": false, "backing": "snap-fmt", "driver": "qcow2", "file": "snap2-file", "node-name": "snap2-fmt"}}
+{"return": {}}
+disk-fmt active: False
+snap-fmt active: False
+snap2-fmt active: False
+{"execute": "block-commit", "arguments": {"device": "snap2-fmt", "job-id": "job0", "top-node": "snap-fmt"}}
+{"error": {"class": "GenericError", "desc": "Inactive 'snap-fmt' can't be a backing child of active 'NODE_NAME'"}}
+
+Committing an inactive intermediate node to active base
+{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}}
+{"return": {}}
+{"execute": "block-commit", "arguments": {"device": "snap2-fmt", "job-id": "job0", "top-node": "snap-fmt"}}
+{"error": {"class": "GenericError", "desc": "Inactive 'snap-fmt' can't be a backing child of active 'NODE_NAME'"}}
+
+Mirror from inactive source to active target
+{"execute": "blockdev-mirror", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}}
+{"error": {"class": "GenericError", "desc": "Inactive 'snap2-fmt' can't be a backing child of active 'NODE_NAME'"}}
+
+Mirror from active source to inactive target
+disk-fmt active: True
+snap-fmt active: False
+snap2-fmt active: False
+target-fmt active: True
+{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "snap2-fmt"}}
+{"return": {}}
+{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "target-fmt"}}
+{"return": {}}
+disk-fmt active: True
+snap-fmt active: True
+snap2-fmt active: True
+target-fmt active: False
+{"execute": "blockdev-mirror", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}}
+{"error": {"class": "GenericError", "desc": "Permission 'write' unavailable on inactive node"}}
+
+Backup from active source to inactive target
+{"execute": "blockdev-backup", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}}
+{"error": {"class": "GenericError", "desc": "Could not create node: Inactive 'target-fmt' can't be a target child of active 'NODE_NAME'"}}
+
+Backup from inactive source to active target
+{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "snap2-fmt"}}
+{"return": {}}
+{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "target-fmt"}}
+{"return": {}}
+disk-fmt active: False
+snap-fmt active: False
+snap2-fmt active: False
+target-fmt active: True
+{"execute": "blockdev-backup", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}}
+{"error": {"class": "GenericError", "desc": "Could not create node: Inactive 'snap2-fmt' can't be a file child of active 'NODE_NAME'"}}
+
+=== Accessing export on inactive node ===
+{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "target-fmt"}}
+{"return": {}}
+{"execute": "block-export-add", "arguments": {"allow-inactive": true, "id": "exp0", "node-name": "target-fmt", "type": "nbd", "writable": true}}
+{"return": {}}
+read 65536/65536 bytes at offset 0
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+write failed: Operation not permitted
+
+write failed: Operation not permitted
+
+write failed: Operation not permitted
+
+discard failed: Operation not permitted
+
+
+qemu-io: Failed to get allocation status: Operation not permitted
+
+
+=== Resuming VM activates all images ===
+{"execute": "cont", "arguments": {}}
+{"return": {}}
+disk-fmt active: True
+snap-fmt active: True
+snap2-fmt active: True
+target-fmt active: True
+
+Shutting down...
+
diff --git a/tests/qemu-iotests/tests/migrate-bitmaps-test b/tests/qemu-iotests/tests/migrate-bitmaps-test
index f98e721..8fb4099 100755
--- a/tests/qemu-iotests/tests/migrate-bitmaps-test
+++ b/tests/qemu-iotests/tests/migrate-bitmaps-test
@@ -122,11 +122,10 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
# catch 'Could not reopen qcow2 layer: Bitmap already exists'
# possible error
- log = self.vm_a.get_log()
- log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log)
- log = re.sub(r'^(wrote .* bytes at offset .*\n.*KiB.*ops.*sec.*\n){3}',
+ log = iotests.filter_qtest(self.vm_a.get_log())
+ log = re.sub(r'^(wrote .* bytes at offset .*\n'
+ r'.*KiB.*ops.*sec.*\n?){3}',
'', log)
- log = re.sub(r'\[I \+\d+\.\d+\] CLOSED\n?$', '', log)
self.assertEqual(log, '')
# test that bitmap is still persistent
diff --git a/tests/qemu-iotests/tests/mirror-sparse b/tests/qemu-iotests/tests/mirror-sparse
new file mode 100755
index 0000000..cfcaa60
--- /dev/null
+++ b/tests/qemu-iotests/tests/mirror-sparse
@@ -0,0 +1,128 @@
+#!/usr/bin/env bash
+# group: rw auto quick
+#
+# Test blockdev-mirror with raw sparse destination
+#
+# Copyright (C) 2025 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+seq="$(basename $0)"
+echo "QA output created by $seq"
+
+status=1 # failure is the default!
+
+_cleanup()
+{
+ _cleanup_test_img
+ _cleanup_qemu
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+cd ..
+. ./common.rc
+. ./common.filter
+. ./common.qemu
+
+_supported_fmt qcow2 raw # Format of the source. dst is always raw file
+_supported_proto file
+_supported_os Linux
+_require_disk_usage
+
+echo
+echo "=== Initial image setup ==="
+echo
+
+TEST_IMG="$TEST_IMG.base" _make_test_img 20M
+$QEMU_IO -c 'w 8M 2M' -f $IMGFMT "$TEST_IMG.base" | _filter_qemu_io
+
+_launch_qemu \
+ -blockdev '{"driver":"file", "cache":{"direct":true, "no-flush":false},
+ "filename":"'"$TEST_IMG.base"'", "node-name":"src-file"}' \
+ -blockdev '{"driver":"'$IMGFMT'", "node-name":"src", "file":"src-file"}'
+h1=$QEMU_HANDLE
+_send_qemu_cmd $h1 '{"execute": "qmp_capabilities"}' 'return'
+
+# Check several combinations; most should result in a sparse destination;
+# the destination should only be fully allocated if pre-allocated
+# and not punching holes due to detect-zeroes
+# do_test creation discard zeroes result
+do_test() {
+ creation=$1
+ discard=$2
+ zeroes=$3
+ expected=$4
+
+echo
+echo "=== Testing creation=$creation discard=$discard zeroes=$zeroes ==="
+echo
+
+rm -f $TEST_IMG
+if test $creation = external; then
+ truncate --size=20M $TEST_IMG
+else
+ _send_qemu_cmd $h1 '{"execute": "blockdev-create", "arguments":
+ {"options": {"driver":"file", "filename":"'$TEST_IMG'",
+ "size":'$((20*1024*1024))', "preallocation":"'$creation'"},
+ "job-id":"job1"}}' 'concluded'
+ _send_qemu_cmd $h1 '{"execute": "job-dismiss", "arguments":
+ {"id": "job1"}}' 'return'
+fi
+_send_qemu_cmd $h1 '{"execute": "blockdev-add", "arguments":
+ {"node-name": "dst", "driver":"file",
+ "filename":"'$TEST_IMG'", "aio":"threads",
+ "auto-read-only":true, "discard":"'$discard'",
+ "detect-zeroes":"'$zeroes'"}}' 'return'
+_send_qemu_cmd $h1 '{"execute":"blockdev-mirror", "arguments":
+ {"sync":"full", "device":"src", "target":"dst",
+ "job-id":"job2"}}' 'return'
+_timed_wait_for $h1 '"ready"'
+_send_qemu_cmd $h1 '{"execute": "job-complete", "arguments":
+ {"id":"job2"}}' 'return' \
+ | _filter_block_job_offset | _filter_block_job_len
+_send_qemu_cmd $h1 '{"execute": "blockdev-del", "arguments":
+ {"node-name": "dst"}}' 'return' \
+ | _filter_block_job_offset | _filter_block_job_len
+$QEMU_IMG compare -U -f $IMGFMT -F raw $TEST_IMG.base $TEST_IMG
+# Some filesystems can fudge allocations for various reasons; rather
+# than expecting precise 2M and 20M images, it is better to allow for slop.
+result=$(disk_usage $TEST_IMG)
+if test $result -lt $((4*1024*1024)); then
+ actual=sparse
+elif test $result -gt $((19*1024*1024)); then
+ actual=full
+else
+ actual="unexpected size ($result)"
+fi
+echo "Destination is $actual; expected $expected"
+}
+
+do_test external ignore off sparse
+do_test external unmap off sparse
+do_test external unmap unmap sparse
+do_test off ignore off sparse
+do_test off unmap off sparse
+do_test off unmap unmap sparse
+do_test full ignore off full
+do_test full unmap off sparse
+do_test full unmap unmap sparse
+
+_send_qemu_cmd $h1 '{"execute":"quit"}' ''
+
+# success, all done
+echo '*** done'
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/tests/mirror-sparse.out b/tests/qemu-iotests/tests/mirror-sparse.out
new file mode 100644
index 0000000..2103b89
--- /dev/null
+++ b/tests/qemu-iotests/tests/mirror-sparse.out
@@ -0,0 +1,365 @@
+QA output created by mirror-sparse
+
+=== Initial image setup ===
+
+Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=20971520
+wrote 2097152/2097152 bytes at offset 8388608
+2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+{"execute": "qmp_capabilities"}
+{"return": {}}
+
+=== Testing creation=external discard=ignore zeroes=off ===
+
+{"execute": "blockdev-add", "arguments":
+ {"node-name": "dst", "driver":"file",
+ "filename":"TEST_DIR/t.IMGFMT", "aio":"threads",
+ "auto-read-only":true, "discard":"ignore",
+ "detect-zeroes":"off"}}
+{"return": {}}
+{"execute":"blockdev-mirror", "arguments":
+ {"sync":"full", "device":"src", "target":"dst",
+ "job-id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}}
+{"execute": "job-complete", "arguments":
+ {"id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"return": {}}
+{"execute": "blockdev-del", "arguments":
+ {"node-name": "dst"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}}
+{"return": {}}
+Images are identical.
+Destination is sparse; expected sparse
+
+=== Testing creation=external discard=unmap zeroes=off ===
+
+{"execute": "blockdev-add", "arguments":
+ {"node-name": "dst", "driver":"file",
+ "filename":"TEST_DIR/t.IMGFMT", "aio":"threads",
+ "auto-read-only":true, "discard":"unmap",
+ "detect-zeroes":"off"}}
+{"return": {}}
+{"execute":"blockdev-mirror", "arguments":
+ {"sync":"full", "device":"src", "target":"dst",
+ "job-id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}}
+{"execute": "job-complete", "arguments":
+ {"id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"return": {}}
+{"execute": "blockdev-del", "arguments":
+ {"node-name": "dst"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}}
+{"return": {}}
+Images are identical.
+Destination is sparse; expected sparse
+
+=== Testing creation=external discard=unmap zeroes=unmap ===
+
+{"execute": "blockdev-add", "arguments":
+ {"node-name": "dst", "driver":"file",
+ "filename":"TEST_DIR/t.IMGFMT", "aio":"threads",
+ "auto-read-only":true, "discard":"unmap",
+ "detect-zeroes":"unmap"}}
+{"return": {}}
+{"execute":"blockdev-mirror", "arguments":
+ {"sync":"full", "device":"src", "target":"dst",
+ "job-id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}}
+{"execute": "job-complete", "arguments":
+ {"id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"return": {}}
+{"execute": "blockdev-del", "arguments":
+ {"node-name": "dst"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}}
+{"return": {}}
+Images are identical.
+Destination is sparse; expected sparse
+
+=== Testing creation=off discard=ignore zeroes=off ===
+
+{"execute": "blockdev-create", "arguments":
+ {"options": {"driver":"file", "filename":"TEST_DIR/t.IMGFMT",
+ "size":20971520, "preallocation":"off"},
+ "job-id":"job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job1"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job1"}}
+{"execute": "job-dismiss", "arguments":
+ {"id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job1"}}
+{"return": {}}
+{"execute": "blockdev-add", "arguments":
+ {"node-name": "dst", "driver":"file",
+ "filename":"TEST_DIR/t.IMGFMT", "aio":"threads",
+ "auto-read-only":true, "discard":"ignore",
+ "detect-zeroes":"off"}}
+{"return": {}}
+{"execute":"blockdev-mirror", "arguments":
+ {"sync":"full", "device":"src", "target":"dst",
+ "job-id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}}
+{"execute": "job-complete", "arguments":
+ {"id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"return": {}}
+{"execute": "blockdev-del", "arguments":
+ {"node-name": "dst"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}}
+{"return": {}}
+Images are identical.
+Destination is sparse; expected sparse
+
+=== Testing creation=off discard=unmap zeroes=off ===
+
+{"execute": "blockdev-create", "arguments":
+ {"options": {"driver":"file", "filename":"TEST_DIR/t.IMGFMT",
+ "size":20971520, "preallocation":"off"},
+ "job-id":"job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job1"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job1"}}
+{"execute": "job-dismiss", "arguments":
+ {"id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job1"}}
+{"return": {}}
+{"execute": "blockdev-add", "arguments":
+ {"node-name": "dst", "driver":"file",
+ "filename":"TEST_DIR/t.IMGFMT", "aio":"threads",
+ "auto-read-only":true, "discard":"unmap",
+ "detect-zeroes":"off"}}
+{"return": {}}
+{"execute":"blockdev-mirror", "arguments":
+ {"sync":"full", "device":"src", "target":"dst",
+ "job-id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}}
+{"execute": "job-complete", "arguments":
+ {"id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"return": {}}
+{"execute": "blockdev-del", "arguments":
+ {"node-name": "dst"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}}
+{"return": {}}
+Images are identical.
+Destination is sparse; expected sparse
+
+=== Testing creation=off discard=unmap zeroes=unmap ===
+
+{"execute": "blockdev-create", "arguments":
+ {"options": {"driver":"file", "filename":"TEST_DIR/t.IMGFMT",
+ "size":20971520, "preallocation":"off"},
+ "job-id":"job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job1"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job1"}}
+{"execute": "job-dismiss", "arguments":
+ {"id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job1"}}
+{"return": {}}
+{"execute": "blockdev-add", "arguments":
+ {"node-name": "dst", "driver":"file",
+ "filename":"TEST_DIR/t.IMGFMT", "aio":"threads",
+ "auto-read-only":true, "discard":"unmap",
+ "detect-zeroes":"unmap"}}
+{"return": {}}
+{"execute":"blockdev-mirror", "arguments":
+ {"sync":"full", "device":"src", "target":"dst",
+ "job-id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}}
+{"execute": "job-complete", "arguments":
+ {"id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"return": {}}
+{"execute": "blockdev-del", "arguments":
+ {"node-name": "dst"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}}
+{"return": {}}
+Images are identical.
+Destination is sparse; expected sparse
+
+=== Testing creation=full discard=ignore zeroes=off ===
+
+{"execute": "blockdev-create", "arguments":
+ {"options": {"driver":"file", "filename":"TEST_DIR/t.IMGFMT",
+ "size":20971520, "preallocation":"full"},
+ "job-id":"job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job1"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job1"}}
+{"execute": "job-dismiss", "arguments":
+ {"id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job1"}}
+{"return": {}}
+{"execute": "blockdev-add", "arguments":
+ {"node-name": "dst", "driver":"file",
+ "filename":"TEST_DIR/t.IMGFMT", "aio":"threads",
+ "auto-read-only":true, "discard":"ignore",
+ "detect-zeroes":"off"}}
+{"return": {}}
+{"execute":"blockdev-mirror", "arguments":
+ {"sync":"full", "device":"src", "target":"dst",
+ "job-id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}}
+{"execute": "job-complete", "arguments":
+ {"id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"return": {}}
+{"execute": "blockdev-del", "arguments":
+ {"node-name": "dst"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}}
+{"return": {}}
+Images are identical.
+Destination is full; expected full
+
+=== Testing creation=full discard=unmap zeroes=off ===
+
+{"execute": "blockdev-create", "arguments":
+ {"options": {"driver":"file", "filename":"TEST_DIR/t.IMGFMT",
+ "size":20971520, "preallocation":"full"},
+ "job-id":"job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job1"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job1"}}
+{"execute": "job-dismiss", "arguments":
+ {"id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job1"}}
+{"return": {}}
+{"execute": "blockdev-add", "arguments":
+ {"node-name": "dst", "driver":"file",
+ "filename":"TEST_DIR/t.IMGFMT", "aio":"threads",
+ "auto-read-only":true, "discard":"unmap",
+ "detect-zeroes":"off"}}
+{"return": {}}
+{"execute":"blockdev-mirror", "arguments":
+ {"sync":"full", "device":"src", "target":"dst",
+ "job-id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}}
+{"execute": "job-complete", "arguments":
+ {"id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"return": {}}
+{"execute": "blockdev-del", "arguments":
+ {"node-name": "dst"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}}
+{"return": {}}
+Images are identical.
+Destination is sparse; expected sparse
+
+=== Testing creation=full discard=unmap zeroes=unmap ===
+
+{"execute": "blockdev-create", "arguments":
+ {"options": {"driver":"file", "filename":"TEST_DIR/t.IMGFMT",
+ "size":20971520, "preallocation":"full"},
+ "job-id":"job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job1"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job1"}}
+{"execute": "job-dismiss", "arguments":
+ {"id": "job1"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job1"}}
+{"return": {}}
+{"execute": "blockdev-add", "arguments":
+ {"node-name": "dst", "driver":"file",
+ "filename":"TEST_DIR/t.IMGFMT", "aio":"threads",
+ "auto-read-only":true, "discard":"unmap",
+ "detect-zeroes":"unmap"}}
+{"return": {}}
+{"execute":"blockdev-mirror", "arguments":
+ {"sync":"full", "device":"src", "target":"dst",
+ "job-id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}}
+{"execute": "job-complete", "arguments":
+ {"id":"job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"return": {}}
+{"execute": "blockdev-del", "arguments":
+ {"node-name": "dst"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}}
+{"return": {}}
+Images are identical.
+Destination is sparse; expected sparse
+{"execute":"quit"}
+*** done
diff --git a/tests/qemu-iotests/tests/qcow2-encryption b/tests/qemu-iotests/tests/qcow2-encryption
new file mode 100755
index 0000000..95f6195
--- /dev/null
+++ b/tests/qemu-iotests/tests/qcow2-encryption
@@ -0,0 +1,75 @@
+#!/usr/bin/env bash
+# group: rw quick
+#
+# Test case for encryption support in qcow2
+#
+# Copyright (C) 2025 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=kwolf@redhat.com
+
+seq="$(basename $0)"
+echo "QA output created by $seq"
+
+status=1 # failure is the default!
+
+_cleanup()
+{
+ _cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ../common.rc
+. ../common.filter
+
+# This tests qcow2-specific low-level functionality
+_supported_fmt qcow2
+_supported_proto file
+_require_working_luks
+
+IMG_SIZE=64M
+
+echo
+echo "=== Create an encrypted image ==="
+echo
+
+_make_test_img --object secret,id=sec0,data=123456 -o encrypt.format=luks,encrypt.key-secret=sec0 $IMG_SIZE
+$PYTHON ../qcow2.py "$TEST_IMG" dump-header-exts
+_img_info
+$QEMU_IMG check \
+ --object secret,id=sec0,data=123456 \
+ --image-opts file.filename="$TEST_IMG",encrypt.key-secret=sec0 \
+ | _filter_qemu_img_check
+
+echo
+echo "=== Remove the header extension ==="
+echo
+
+$PYTHON ../qcow2.py "$TEST_IMG" del-header-ext 0x0537be77
+$PYTHON ../qcow2.py "$TEST_IMG" dump-header-exts
+_img_info
+$QEMU_IMG check \
+ --object secret,id=sec0,data=123456 \
+ --image-opts file.filename="$TEST_IMG",encrypt.key-secret=sec0 2>&1 \
+ | _filter_qemu_img_check \
+ | _filter_testdir
+
+# success, all done
+echo "*** done"
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/tests/qcow2-encryption.out b/tests/qemu-iotests/tests/qcow2-encryption.out
new file mode 100644
index 0000000..9b549dc2
--- /dev/null
+++ b/tests/qemu-iotests/tests/qcow2-encryption.out
@@ -0,0 +1,32 @@
+QA output created by qcow2-encryption
+
+=== Create an encrypted image ===
+
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+Header extension:
+magic 0x537be77 (Crypto header)
+length 16
+data <binary>
+
+Header extension:
+magic 0x6803f857 (Feature table)
+length 384
+data <binary>
+
+image: TEST_DIR/t.IMGFMT
+file format: IMGFMT
+virtual size: 64 MiB (67108864 bytes)
+encrypted: yes
+cluster_size: 65536
+No errors were found on the image.
+
+=== Remove the header extension ===
+
+Header extension:
+magic 0x6803f857 (Feature table)
+length 384
+data <binary>
+
+qemu-img: Could not open 'TEST_DIR/t.IMGFMT': Missing CRYPTO header for crypt method 2
+qemu-img: Could not open 'file.filename=TEST_DIR/t.qcow2,encrypt.key-secret=sec0': Missing CRYPTO header for crypt method 2
+*** done
diff --git a/tests/qemu-iotests/tests/qsd-migrate b/tests/qemu-iotests/tests/qsd-migrate
new file mode 100755
index 0000000..a4c6592
--- /dev/null
+++ b/tests/qemu-iotests/tests/qsd-migrate
@@ -0,0 +1,140 @@
+#!/usr/bin/env python3
+# group: rw quick
+#
+# Copyright (C) Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Creator/Owner: Kevin Wolf <kwolf@redhat.com>
+
+import iotests
+
+from iotests import filter_qemu_io, filter_qtest
+
+iotests.script_initialize(supported_fmts=['qcow2', 'qed', 'raw'],
+ supported_protocols=['file'],
+ supported_platforms=['linux'])
+
+with iotests.FilePath('disk.img') as path, \
+ iotests.FilePath('nbd-src.sock', base_dir=iotests.sock_dir) as nbd_src, \
+ iotests.FilePath('nbd-dst.sock', base_dir=iotests.sock_dir) as nbd_dst, \
+ iotests.FilePath('migrate.sock', base_dir=iotests.sock_dir) as mig_sock, \
+ iotests.VM(path_suffix="-src") as vm_src, \
+ iotests.VM(path_suffix="-dst") as vm_dst:
+
+ img_size = '10M'
+
+ iotests.log('Preparing disk...')
+ iotests.qemu_img_create('-f', iotests.imgfmt, path, img_size)
+
+ iotests.log('Launching source QSD...')
+ qsd_src = iotests.QemuStorageDaemon(
+ '--blockdev', f'file,node-name=disk-file,filename={path}',
+ '--blockdev', f'{iotests.imgfmt},file=disk-file,node-name=disk-fmt',
+ '--nbd-server', f'addr.type=unix,addr.path={nbd_src}',
+ '--export', 'nbd,id=exp0,node-name=disk-fmt,writable=true,'
+ 'allow-inactive=true',
+ qmp=True,
+ )
+
+ iotests.log('Launching source VM...')
+ vm_src.add_args('-blockdev', f'nbd,node-name=disk,server.type=unix,'
+ f'server.path={nbd_src},export=disk-fmt')
+ vm_src.add_args('-device', 'virtio-blk,drive=disk,id=virtio0')
+ vm_src.launch()
+
+ iotests.log('Launching destination QSD...')
+ qsd_dst = iotests.QemuStorageDaemon(
+ '--blockdev', f'file,node-name=disk-file,filename={path},active=off',
+ '--blockdev', f'{iotests.imgfmt},file=disk-file,node-name=disk-fmt,'
+ f'active=off',
+ '--nbd-server', f'addr.type=unix,addr.path={nbd_dst}',
+ '--export', 'nbd,id=exp0,node-name=disk-fmt,writable=true,'
+ 'allow-inactive=true',
+ qmp=True,
+ instance_id='b',
+ )
+
+ iotests.log('Launching destination VM...')
+ vm_dst.add_args('-blockdev', f'nbd,node-name=disk,server.type=unix,'
+ f'server.path={nbd_dst},export=disk-fmt')
+ vm_dst.add_args('-device', 'virtio-blk,drive=disk,id=virtio0')
+ vm_dst.add_args('-incoming', f'unix:{mig_sock}')
+ vm_dst.launch()
+
+ iotests.log('\nTest I/O on the source')
+ vm_src.hmp_qemu_io('virtio0/virtio-backend', 'write -P 0x11 0 4k',
+ use_log=True, qdev=True)
+ vm_src.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x11 0 4k',
+ use_log=True, qdev=True)
+
+ iotests.log('\nStarting migration...')
+
+ mig_caps = [
+ {'capability': 'events', 'state': True},
+ {'capability': 'pause-before-switchover', 'state': True},
+ ]
+ vm_src.qmp_log('migrate-set-capabilities', capabilities=mig_caps)
+ vm_dst.qmp_log('migrate-set-capabilities', capabilities=mig_caps)
+ vm_src.qmp_log('migrate', uri=f'unix:{mig_sock}',
+ filters=[iotests.filter_qmp_testfiles])
+
+ vm_src.event_wait('MIGRATION',
+ match={'data': {'status': 'pre-switchover'}})
+
+ iotests.log('\nPre-switchover: Reconfigure QSD instances')
+
+ iotests.log(qsd_src.qmp('blockdev-set-active', {'active': False}))
+
+ # Reading is okay from both sides while the image is inactive. Note that
+ # the destination may have stale data until it activates the image, though.
+ vm_src.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x11 0 4k',
+ use_log=True, qdev=True)
+ vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'read 0 4k',
+ use_log=True, qdev=True)
+
+ iotests.log(qsd_dst.qmp('blockdev-set-active', {'active': True}))
+
+ iotests.log('\nCompleting migration...')
+
+ vm_src.qmp_log('migrate-continue', state='pre-switchover')
+ vm_dst.event_wait('MIGRATION', match={'data': {'status': 'completed'}})
+
+ iotests.log('\nTest I/O on the destination')
+
+ # Now the destination must see what the source wrote
+ vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x11 0 4k',
+ use_log=True, qdev=True)
+
+ # And be able to overwrite it
+ vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'write -P 0x22 0 4k',
+ use_log=True, qdev=True)
+ vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x22 0 4k',
+ use_log=True, qdev=True)
+
+ iotests.log('\nDone')
+
+ vm_src.shutdown()
+ iotests.log('\n--- vm_src log ---')
+ log = vm_src.get_log()
+ if log:
+ iotests.log(log, [filter_qtest, filter_qemu_io])
+ qsd_src.stop()
+
+ vm_dst.shutdown()
+ iotests.log('\n--- vm_dst log ---')
+ log = vm_dst.get_log()
+ if log:
+ iotests.log(log, [filter_qtest, filter_qemu_io])
+ qsd_dst.stop()
diff --git a/tests/qemu-iotests/tests/qsd-migrate.out b/tests/qemu-iotests/tests/qsd-migrate.out
new file mode 100644
index 0000000..4a5241e
--- /dev/null
+++ b/tests/qemu-iotests/tests/qsd-migrate.out
@@ -0,0 +1,59 @@
+Preparing disk...
+Launching source QSD...
+Launching source VM...
+Launching destination QSD...
+Launching destination VM...
+
+Test I/O on the source
+{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"write -P 0x11 0 4k\""}}
+{"return": ""}
+{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x11 0 4k\""}}
+{"return": ""}
+
+Starting migration...
+{"execute": "migrate-set-capabilities", "arguments": {"capabilities": [{"capability": "events", "state": true}, {"capability": "pause-before-switchover", "state": true}]}}
+{"return": {}}
+{"execute": "migrate-set-capabilities", "arguments": {"capabilities": [{"capability": "events", "state": true}, {"capability": "pause-before-switchover", "state": true}]}}
+{"return": {}}
+{"execute": "migrate", "arguments": {"uri": "unix:SOCK_DIR/PID-migrate.sock"}}
+{"return": {}}
+
+Pre-switchover: Reconfigure QSD instances
+{"return": {}}
+{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x11 0 4k\""}}
+{"return": ""}
+{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read 0 4k\""}}
+{"return": ""}
+{"return": {}}
+
+Completing migration...
+{"execute": "migrate-continue", "arguments": {"state": "pre-switchover"}}
+{"return": {}}
+
+Test I/O on the destination
+{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x11 0 4k\""}}
+{"return": ""}
+{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"write -P 0x22 0 4k\""}}
+{"return": ""}
+{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x22 0 4k\""}}
+{"return": ""}
+
+Done
+
+--- vm_src log ---
+wrote 4096/4096 bytes at offset 0
+4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 4096/4096 bytes at offset 0
+4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 4096/4096 bytes at offset 0
+4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+--- vm_dst log ---
+read 4096/4096 bytes at offset 0
+4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 4096/4096 bytes at offset 0
+4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 4096/4096 bytes at offset 0
+4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 4096/4096 bytes at offset 0
+4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
diff --git a/tests/qemu-iotests/tests/vvfat b/tests/qemu-iotests/tests/vvfat
new file mode 100755
index 0000000..acdc6ce
--- /dev/null
+++ b/tests/qemu-iotests/tests/vvfat
@@ -0,0 +1,485 @@
+#!/usr/bin/env python3
+# group: rw vvfat
+#
+# Test vvfat driver implementation
+# Here, we use a simple FAT16 implementation and check the behavior of
+# the vvfat driver.
+#
+# Copyright (C) 2024 Amjad Alsharafi <amjadsharafi10@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import shutil
+import iotests
+from iotests import imgfmt, QMPTestCase
+from fat16 import MBR, Fat16, DIRENTRY_SIZE
+
+filesystem = os.path.join(iotests.test_dir, "filesystem")
+
+nbd_sock = iotests.file_path("nbd.sock", base_dir=iotests.sock_dir)
+nbd_uri = "nbd+unix:///disk?socket=" + nbd_sock
+
+SECTOR_SIZE = 512
+
+
+class TestVVFatDriver(QMPTestCase):
+ # pylint: disable=broad-exception-raised
+ def setUp(self) -> None:
+ if os.path.exists(filesystem):
+ if os.path.isdir(filesystem):
+ shutil.rmtree(filesystem)
+ else:
+ raise Exception(f"{filesystem} exists and is not a directory")
+
+ os.mkdir(filesystem)
+
+ # Add some text files to the filesystem
+ for i in range(10):
+ with open(os.path.join(filesystem, f"file{i}.txt"),
+ "w", encoding="ascii") as f:
+ f.write(f"Hello, world! {i}\n")
+
+ # Add 2 large files, above the cluster size (8KB)
+ with open(os.path.join(filesystem, "large1.txt"), "wb") as f:
+ # write 'A' * 1KB, 'B' * 1KB, 'C' * 1KB, ...
+ for i in range(8 * 2): # two clusters
+ f.write(bytes([0x41 + i] * 1024))
+
+ with open(os.path.join(filesystem, "large2.txt"), "wb") as f:
+ # write 'A' * 1KB, 'B' * 1KB, 'C' * 1KB, ...
+ for i in range(8 * 3): # 3 clusters
+ f.write(bytes([0x41 + i] * 1024))
+
+ self.vm = iotests.VM()
+
+ self.vm.add_blockdev(
+ self.vm.qmp_to_opts(
+ {
+ "driver": imgfmt,
+ "node-name": "disk",
+ "rw": "true",
+ "fat-type": "16",
+ "dir": filesystem,
+ }
+ )
+ )
+
+ self.vm.launch()
+
+ self.vm.qmp_log("block-dirty-bitmap-add", **{
+ "node": "disk",
+ "name": "bitmap0",
+ })
+
+ # attach nbd server
+ self.vm.qmp_log(
+ "nbd-server-start",
+ **{"addr": {"type": "unix", "data": {"path": nbd_sock}}},
+ filters=[],
+ )
+
+ self.vm.qmp_log(
+ "nbd-server-add",
+ **{"device": "disk", "writable": True, "bitmap": "bitmap0"},
+ )
+
+ self.qio = iotests.QemuIoInteractive("-f", "raw", nbd_uri)
+
+ def tearDown(self) -> None:
+ self.qio.close()
+ self.vm.shutdown()
+ # print(self.vm.get_log())
+ shutil.rmtree(filesystem)
+
+ def read_sectors(self, sector: int, num: int = 1) -> bytes:
+ """
+ Read `num` sectors starting from `sector` from the `disk`.
+ This uses `QemuIoInteractive` to read the sectors into `stdout` and
+ then parse the output.
+ """
+ self.assertGreater(num, 0)
+
+ # The output contains the content of the sector in hex dump format
+ # We need to extract the content from it
+ output = self.qio.cmd(
+ f"read -v {sector * SECTOR_SIZE} {num * SECTOR_SIZE}")
+
+ # Each row is 16 bytes long, and we are writing `num` sectors
+ rows = num * SECTOR_SIZE // 16
+ output_rows = output.split("\n")[:rows]
+
+ hex_content = "".join(
+ [(row.split(": ")[1]).split(" ")[0] for row in output_rows]
+ )
+ bytes_content = bytes.fromhex(hex_content)
+
+ self.assertEqual(len(bytes_content), num * SECTOR_SIZE)
+
+ return bytes_content
+
+ def write_sectors(self, sector: int, data: bytes) -> None:
+ """
+ Write `data` to the `disk` starting from `sector`.
+ This uses `QemuIoInteractive` to write the data into the disk.
+ """
+
+ self.assertGreater(len(data), 0)
+ self.assertEqual(len(data) % SECTOR_SIZE, 0)
+
+ temp_file = os.path.join(iotests.test_dir, "temp.bin")
+ with open(temp_file, "wb") as f:
+ f.write(data)
+
+ self.qio.cmd(
+ f"write -s {temp_file} {sector * SECTOR_SIZE} {len(data)}"
+ )
+
+ os.remove(temp_file)
+
+ def init_fat16(self):
+ mbr = MBR(self.read_sectors(0))
+ return Fat16(
+ mbr.partition_table[0]["start_lba"],
+ mbr.partition_table[0]["size"],
+ self.read_sectors,
+ self.write_sectors,
+ )
+
+ # Tests
+
+ def test_fat_filesystem(self):
+ """
+ Test that vvfat produce a valid FAT16 and MBR sectors
+ """
+ mbr = MBR(self.read_sectors(0))
+
+ self.assertEqual(mbr.partition_table[0]["status"], 0x80)
+ self.assertEqual(mbr.partition_table[0]["type"], 6)
+
+ fat16 = Fat16(
+ mbr.partition_table[0]["start_lba"],
+ mbr.partition_table[0]["size"],
+ self.read_sectors,
+ self.write_sectors,
+ )
+ self.assertEqual(fat16.boot_sector.bytes_per_sector, 512)
+ self.assertEqual(fat16.boot_sector.volume_label, "QEMU VVFAT")
+
+ def test_read_root_directory(self):
+ """
+ Test the content of the root directory
+ """
+ fat16 = self.init_fat16()
+
+ root_dir = fat16.read_root_directory()
+
+ self.assertEqual(len(root_dir), 13) # 12 + 1 special file
+
+ files = {
+ "QEMU VVF.AT": 0, # special empty file
+ "FILE0.TXT": 16,
+ "FILE1.TXT": 16,
+ "FILE2.TXT": 16,
+ "FILE3.TXT": 16,
+ "FILE4.TXT": 16,
+ "FILE5.TXT": 16,
+ "FILE6.TXT": 16,
+ "FILE7.TXT": 16,
+ "FILE8.TXT": 16,
+ "FILE9.TXT": 16,
+ "LARGE1.TXT": 0x2000 * 2,
+ "LARGE2.TXT": 0x2000 * 3,
+ }
+
+ for entry in root_dir:
+ self.assertIn(entry.whole_name(), files)
+ self.assertEqual(entry.size_bytes, files[entry.whole_name()])
+
+ def test_direntry_as_bytes(self):
+ """
+ Test if we can convert Direntry back to bytes, so that we can write it
+ back to the disk safely.
+ """
+ fat16 = self.init_fat16()
+
+ root_dir = fat16.read_root_directory()
+ first_entry_bytes = fat16.read_sectors(
+ fat16.boot_sector.root_dir_start(), 1)
+
+ # The first entry won't be deleted, so we can compare it with the first
+ # entry in the root directory
+ self.assertEqual(root_dir[0].as_bytes(),
+ first_entry_bytes[:DIRENTRY_SIZE])
+
+ def test_read_files(self):
+ """
+ Test reading the content of the files
+ """
+ fat16 = self.init_fat16()
+
+ for i in range(10):
+ file = fat16.find_direntry(f"/FILE{i}.TXT")
+ self.assertIsNotNone(file)
+ self.assertEqual(
+ fat16.read_file(file), f"Hello, world! {i}\n".encode("ascii")
+ )
+
+ # test large files
+ large1 = fat16.find_direntry("/LARGE1.TXT")
+ with open(os.path.join(filesystem, "large1.txt"), "rb") as f:
+ self.assertEqual(fat16.read_file(large1), f.read())
+
+ large2 = fat16.find_direntry("/LARGE2.TXT")
+ self.assertIsNotNone(large2)
+ with open(os.path.join(filesystem, "large2.txt"), "rb") as f:
+ self.assertEqual(fat16.read_file(large2), f.read())
+
+ def test_write_file_same_content_direct(self):
+ """
+ Similar to `test_write_file_in_same_content`, but we write the file
+ directly clusters and thus we don't go through the modification of
+ direntry.
+ """
+ fat16 = self.init_fat16()
+
+ file = fat16.find_direntry("/FILE0.TXT")
+ self.assertIsNotNone(file)
+
+ data = fat16.read_cluster(file.cluster)
+ fat16.write_cluster(file.cluster, data)
+
+ with open(os.path.join(filesystem, "file0.txt"), "rb") as f:
+ self.assertEqual(fat16.read_file(file), f.read())
+
+ def test_write_file_in_same_content(self):
+ """
+ Test writing the same content to the file back to it
+ """
+ fat16 = self.init_fat16()
+
+ file = fat16.find_direntry("/FILE0.TXT")
+ self.assertIsNotNone(file)
+
+ self.assertEqual(fat16.read_file(file), b"Hello, world! 0\n")
+
+ fat16.write_file(file, b"Hello, world! 0\n")
+ self.assertEqual(fat16.read_file(file), b"Hello, world! 0\n")
+
+ with open(os.path.join(filesystem, "file0.txt"), "rb") as f:
+ self.assertEqual(f.read(), b"Hello, world! 0\n")
+
+ def test_modify_content_same_clusters(self):
+ """
+ Test modifying the content of the file without changing the number of
+ clusters
+ """
+ fat16 = self.init_fat16()
+
+ file = fat16.find_direntry("/FILE0.TXT")
+ self.assertIsNotNone(file)
+
+ new_content = b"Hello, world! Modified\n"
+ self.assertEqual(fat16.read_file(file), b"Hello, world! 0\n")
+
+ fat16.write_file(file, new_content)
+ self.assertEqual(fat16.read_file(file), new_content)
+
+ with open(os.path.join(filesystem, "file0.txt"), "rb") as f:
+ self.assertEqual(f.read(), new_content)
+
+ def test_truncate_file_same_clusters_less(self):
+ """
+ Test truncating the file without changing number of clusters
+ Test decreasing the file size
+ """
+ fat16 = self.init_fat16()
+
+ file = fat16.find_direntry("/FILE0.TXT")
+ self.assertIsNotNone(file)
+
+ self.assertEqual(fat16.read_file(file), b"Hello, world! 0\n")
+
+ fat16.truncate_file(file, 5)
+ new_content = fat16.read_file(file)
+ self.assertEqual(new_content, b"Hello")
+
+ with open(os.path.join(filesystem, "file0.txt"), "rb") as f:
+ self.assertEqual(f.read(), new_content)
+
+ def test_truncate_file_same_clusters_more(self):
+ """
+ Test truncating the file without changing number of clusters
+ Test increase the file size
+ """
+ fat16 = self.init_fat16()
+
+ file = fat16.find_direntry("/FILE0.TXT")
+ self.assertIsNotNone(file)
+
+ self.assertEqual(fat16.read_file(file), b"Hello, world! 0\n")
+
+ fat16.truncate_file(file, 20)
+ new_content = fat16.read_file(file)
+ self.assertIsNotNone(new_content)
+
+ # random pattern will be appended to the file, and its not always the
+ # same
+ self.assertEqual(new_content[:16], b"Hello, world! 0\n")
+ self.assertEqual(len(new_content), 20)
+
+ with open(os.path.join(filesystem, "file0.txt"), "rb") as f:
+ self.assertEqual(f.read(), new_content)
+
+ def test_write_large_file(self):
+ """
+ Test writing a large file
+ """
+ fat16 = self.init_fat16()
+
+ file = fat16.find_direntry("/LARGE1.TXT")
+ self.assertIsNotNone(file)
+
+ # The content of LARGE1 is A * 1KB, B * 1KB, C * 1KB, ..., P * 1KB
+ # Lets change it to be Z * 1KB, Y * 1KB, X * 1KB, ..., K * 1KB
+ # without changing the number of clusters or filesize
+ new_content = b"".join([bytes([0x5A - i] * 1024) for i in range(16)])
+ fat16.write_file(file, new_content)
+ self.assertEqual(fat16.read_file(file), new_content)
+
+ with open(os.path.join(filesystem, "large1.txt"), "rb") as f:
+ self.assertEqual(f.read(), new_content)
+
+ def test_truncate_file_change_clusters_less(self):
+ """
+ Test truncating a file by reducing the number of clusters
+ """
+ fat16 = self.init_fat16()
+
+ file = fat16.find_direntry("/LARGE1.TXT")
+ self.assertIsNotNone(file)
+
+ fat16.truncate_file(file, 1)
+ self.assertEqual(fat16.read_file(file), b"A")
+
+ with open(os.path.join(filesystem, "large1.txt"), "rb") as f:
+ self.assertEqual(f.read(), b"A")
+
+ def test_write_file_change_clusters_less(self):
+ """
+ Test truncating a file by reducing the number of clusters
+ """
+ fat16 = self.init_fat16()
+
+ file = fat16.find_direntry("/LARGE2.TXT")
+ self.assertIsNotNone(file)
+
+ new_content = b"X" * 8 * 1024 + b"Y" * 8 * 1024
+ fat16.write_file(file, new_content)
+ self.assertEqual(fat16.read_file(file), new_content)
+
+ with open(os.path.join(filesystem, "large2.txt"), "rb") as f:
+ self.assertEqual(f.read(), new_content)
+
+ def test_write_file_change_clusters_more(self):
+ """
+ Test truncating a file by increasing the number of clusters
+ """
+ fat16 = self.init_fat16()
+
+ file = fat16.find_direntry("/LARGE2.TXT")
+ self.assertIsNotNone(file)
+
+ # from 3 clusters to 4 clusters
+ new_content = (
+ b"W" * 8 * 1024 +
+ b"X" * 8 * 1024 +
+ b"Y" * 8 * 1024 +
+ b"Z" * 8 * 1024
+ )
+ fat16.write_file(file, new_content)
+ self.assertEqual(fat16.read_file(file), new_content)
+
+ with open(os.path.join(filesystem, "large2.txt"), "rb") as f:
+ self.assertEqual(f.read(), new_content)
+
+ def test_write_file_change_clusters_more_non_contiguous_2_mappings(self):
+ """
+ Test truncating a file by increasing the number of clusters Here we
+ allocate the new clusters in a way that makes them non-contiguous so
+ that we will get 2 cluster mappings for the file
+ """
+ fat16 = self.init_fat16()
+
+ file = fat16.find_direntry("/LARGE1.TXT")
+ self.assertIsNotNone(file)
+
+ # from 2 clusters to 3 clusters with non-contiguous allocation
+ fat16.truncate_file(file, 3 * 0x2000, allocate_non_continuous=True)
+ new_content = b"X" * 8 * 1024 + b"Y" * 8 * 1024 + b"Z" * 8 * 1024
+ fat16.write_file(file, new_content)
+ self.assertEqual(fat16.read_file(file), new_content)
+
+ with open(os.path.join(filesystem, "large1.txt"), "rb") as f:
+ self.assertEqual(f.read(), new_content)
+
+ def test_write_file_change_clusters_more_non_contiguous_3_mappings(self):
+ """
+ Test truncating a file by increasing the number of clusters Here we
+ allocate the new clusters in a way that makes them non-contiguous so
+ that we will get 3 cluster mappings for the file
+ """
+ fat16 = self.init_fat16()
+
+ file = fat16.find_direntry("/LARGE1.TXT")
+ self.assertIsNotNone(file)
+
+ # from 2 clusters to 4 clusters with non-contiguous allocation
+ fat16.truncate_file(file, 4 * 0x2000, allocate_non_continuous=True)
+ new_content = (
+ b"W" * 8 * 1024 +
+ b"X" * 8 * 1024 +
+ b"Y" * 8 * 1024 +
+ b"Z" * 8 * 1024
+ )
+ fat16.write_file(file, new_content)
+ self.assertEqual(fat16.read_file(file), new_content)
+
+ with open(os.path.join(filesystem, "large1.txt"), "rb") as f:
+ self.assertEqual(f.read(), new_content)
+
+ def test_create_file(self):
+ """
+ Test creating a new file
+ """
+ fat16 = self.init_fat16()
+
+ new_file = fat16.create_file("/NEWFILE.TXT")
+
+ self.assertIsNotNone(new_file)
+ self.assertEqual(new_file.size_bytes, 0)
+
+ new_content = b"Hello, world! New file\n"
+ fat16.write_file(new_file, new_content)
+ self.assertEqual(fat16.read_file(new_file), new_content)
+
+ with open(os.path.join(filesystem, "newfile.txt"), "rb") as f:
+ self.assertEqual(f.read(), new_content)
+
+ # TODO: support deleting files
+
+
+if __name__ == "__main__":
+ # This is a specific test for vvfat driver
+ iotests.main(supported_fmts=["vvfat"], supported_protocols=["file"])
diff --git a/tests/qemu-iotests/tests/vvfat.out b/tests/qemu-iotests/tests/vvfat.out
new file mode 100755
index 0000000..b6f2576
--- /dev/null
+++ b/tests/qemu-iotests/tests/vvfat.out
@@ -0,0 +1,5 @@
+................
+----------------------------------------------------------------------
+Ran 16 tests
+
+OK
diff --git a/tests/qemu-iotests/tests/write-zeroes-unmap b/tests/qemu-iotests/tests/write-zeroes-unmap
index 7cfeeaf..f90fb8e 100755
--- a/tests/qemu-iotests/tests/write-zeroes-unmap
+++ b/tests/qemu-iotests/tests/write-zeroes-unmap
@@ -32,6 +32,7 @@ cd ..
_supported_fmt raw
_supported_proto file
_supported_os Linux
+_require_disk_usage
create_test_image() {
_make_test_img -f $IMGFMT 1m
diff --git a/tests/qtest/acpi-utils.c b/tests/qtest/acpi-utils.c
index 673fc97..9dc24fb 100644
--- a/tests/qtest/acpi-utils.c
+++ b/tests/qtest/acpi-utils.c
@@ -156,5 +156,4 @@ uint64_t acpi_find_rsdp_address_uefi(QTestState *qts, uint64_t start,
g_usleep(TEST_DELAY);
}
g_assert_not_reached();
- return 0;
}
diff --git a/tests/qtest/adm1266-test.c b/tests/qtest/adm1266-test.c
index 6c312c4..5ae8206 100644
--- a/tests/qtest/adm1266-test.c
+++ b/tests/qtest/adm1266-test.c
@@ -13,8 +13,8 @@
#include "libqtest-single.h"
#include "libqos/qgraph.h"
#include "libqos/i2c.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qnum.h"
+#include "qobject/qdict.h"
+#include "qobject/qnum.h"
#include "qemu/bitops.h"
#define TEST_ID "adm1266-test"
diff --git a/tests/qtest/adm1272-test.c b/tests/qtest/adm1272-test.c
index 63f8514..2abda8d 100644
--- a/tests/qtest/adm1272-test.c
+++ b/tests/qtest/adm1272-test.c
@@ -12,8 +12,8 @@
#include "libqtest-single.h"
#include "libqos/qgraph.h"
#include "libqos/i2c.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qnum.h"
+#include "qobject/qdict.h"
+#include "qobject/qnum.h"
#include "qemu/bitops.h"
#define TEST_ID "adm1272-test"
diff --git a/tests/qtest/ahci-test.c b/tests/qtest/ahci-test.c
index 5a1923f..e8aabfc 100644
--- a/tests/qtest/ahci-test.c
+++ b/tests/qtest/ahci-test.c
@@ -30,7 +30,7 @@
#include "libqos/ahci.h"
#include "libqos/pci-pc.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/host-utils.h"
#include "hw/pci/pci_ids.h"
@@ -1881,7 +1881,6 @@ static void test_io_interface(gconstpointer opaque)
sector = offset_sector(opts->offset, opts->address_type, bufsize);
test_io_rw_interface(opts->address_type, opts->io_type, bufsize, sector);
g_free(opts);
- return;
}
static void create_ahci_io_test(enum IOMode type, enum AddrMode addr,
diff --git a/tests/qtest/arm-cpu-features.c b/tests/qtest/arm-cpu-features.c
index cfd6f77..eb8ddeb 100644
--- a/tests/qtest/arm-cpu-features.c
+++ b/tests/qtest/arm-cpu-features.c
@@ -11,8 +11,8 @@
#include "qemu/osdep.h"
#include "qemu/bitops.h"
#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qjson.h"
+#include "qobject/qdict.h"
+#include "qobject/qjson.h"
/*
* We expect the SVE max-vq to be 16. Also it must be <= 64
@@ -419,21 +419,28 @@ static void pauth_tests_default(QTestState *qts, const char *cpu_type)
assert_has_feature_enabled(qts, cpu_type, "pauth");
assert_has_feature_disabled(qts, cpu_type, "pauth-impdef");
assert_has_feature_disabled(qts, cpu_type, "pauth-qarma3");
+ assert_has_feature_disabled(qts, cpu_type, "pauth-qarma5");
assert_set_feature(qts, cpu_type, "pauth", false);
assert_set_feature(qts, cpu_type, "pauth", true);
assert_set_feature(qts, cpu_type, "pauth-impdef", true);
assert_set_feature(qts, cpu_type, "pauth-impdef", false);
assert_set_feature(qts, cpu_type, "pauth-qarma3", true);
assert_set_feature(qts, cpu_type, "pauth-qarma3", false);
+ assert_set_feature(qts, cpu_type, "pauth-qarma5", true);
+ assert_set_feature(qts, cpu_type, "pauth-qarma5", false);
assert_error(qts, cpu_type,
- "cannot enable pauth-impdef or pauth-qarma3 without pauth",
+ "cannot enable pauth-impdef, pauth-qarma3 or pauth-qarma5 without pauth",
"{ 'pauth': false, 'pauth-impdef': true }");
assert_error(qts, cpu_type,
- "cannot enable pauth-impdef or pauth-qarma3 without pauth",
+ "cannot enable pauth-impdef, pauth-qarma3 or pauth-qarma5 without pauth",
"{ 'pauth': false, 'pauth-qarma3': true }");
assert_error(qts, cpu_type,
- "cannot enable both pauth-impdef and pauth-qarma3",
- "{ 'pauth': true, 'pauth-impdef': true, 'pauth-qarma3': true }");
+ "cannot enable pauth-impdef, pauth-qarma3 or pauth-qarma5 without pauth",
+ "{ 'pauth': false, 'pauth-qarma5': true }");
+ assert_error(qts, cpu_type,
+ "cannot enable pauth-impdef, pauth-qarma3 and pauth-qarma5 at the same time",
+ "{ 'pauth': true, 'pauth-impdef': true, 'pauth-qarma3': true,"
+ " 'pauth-qarma5': true }");
}
static void test_query_cpu_model_expansion(const void *data)
diff --git a/tests/qtest/aspeed-hace-utils.c b/tests/qtest/aspeed-hace-utils.c
new file mode 100644
index 0000000..0f7f911
--- /dev/null
+++ b/tests/qtest/aspeed-hace-utils.c
@@ -0,0 +1,646 @@
+/*
+ * QTest testcase for the ASPEED Hash and Crypto Engine
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2021 IBM Corp.
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest.h"
+#include "qemu/bitops.h"
+#include "aspeed-hace-utils.h"
+
+/*
+ * Test vector is the ascii "abc"
+ *
+ * Expected results were generated using command line utitiles:
+ *
+ * echo -n -e 'abc' | dd of=/tmp/test
+ * for hash in sha512sum sha384sum sha256sum md5sum; do $hash /tmp/test; done
+ *
+ */
+static const uint8_t test_vector[3] = {0x61, 0x62, 0x63};
+
+static const uint8_t test_result_sha512[64] = {
+ 0xdd, 0xaf, 0x35, 0xa1, 0x93, 0x61, 0x7a, 0xba, 0xcc, 0x41, 0x73, 0x49,
+ 0xae, 0x20, 0x41, 0x31, 0x12, 0xe6, 0xfa, 0x4e, 0x89, 0xa9, 0x7e, 0xa2,
+ 0x0a, 0x9e, 0xee, 0xe6, 0x4b, 0x55, 0xd3, 0x9a, 0x21, 0x92, 0x99, 0x2a,
+ 0x27, 0x4f, 0xc1, 0xa8, 0x36, 0xba, 0x3c, 0x23, 0xa3, 0xfe, 0xeb, 0xbd,
+ 0x45, 0x4d, 0x44, 0x23, 0x64, 0x3c, 0xe8, 0x0e, 0x2a, 0x9a, 0xc9, 0x4f,
+ 0xa5, 0x4c, 0xa4, 0x9f};
+
+static const uint8_t test_result_sha384[48] = {
+ 0xcb, 0x00, 0x75, 0x3f, 0x45, 0xa3, 0x5e, 0x8b, 0xb5, 0xa0, 0x3d, 0x69,
+ 0x9a, 0xc6, 0x50, 0x07, 0x27, 0x2c, 0x32, 0xab, 0x0e, 0xde, 0xd1, 0x63,
+ 0x1a, 0x8b, 0x60, 0x5a, 0x43, 0xff, 0x5b, 0xed, 0x80, 0x86, 0x07, 0x2b,
+ 0xa1, 0xe7, 0xcc, 0x23, 0x58, 0xba, 0xec, 0xa1, 0x34, 0xc8, 0x25, 0xa7};
+
+static const uint8_t test_result_sha256[32] = {
+ 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde,
+ 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
+ 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad};
+
+static const uint8_t test_result_md5[16] = {
+ 0x90, 0x01, 0x50, 0x98, 0x3c, 0xd2, 0x4f, 0xb0, 0xd6, 0x96, 0x3f, 0x7d,
+ 0x28, 0xe1, 0x7f, 0x72};
+
+/*
+ * The Scatter-Gather Test vector is the ascii "abc" "def" "ghi", broken
+ * into blocks of 3 characters as shown
+ *
+ * Expected results were generated using command line utitiles:
+ *
+ * echo -n -e 'abcdefghijkl' | dd of=/tmp/test
+ * for hash in sha512sum sha384sum sha256sum; do $hash /tmp/test; done
+ *
+ */
+static const uint8_t test_vector_sg1[6] = {0x61, 0x62, 0x63, 0x64, 0x65, 0x66};
+static const uint8_t test_vector_sg2[3] = {0x67, 0x68, 0x69};
+static const uint8_t test_vector_sg3[3] = {0x6a, 0x6b, 0x6c};
+
+static const uint8_t test_result_sg_sha512[64] = {
+ 0x17, 0x80, 0x7c, 0x72, 0x8e, 0xe3, 0xba, 0x35, 0xe7, 0xcf, 0x7a, 0xf8,
+ 0x23, 0x11, 0x6d, 0x26, 0xe4, 0x1e, 0x5d, 0x4d, 0x6c, 0x2f, 0xf1, 0xf3,
+ 0x72, 0x0d, 0x3d, 0x96, 0xaa, 0xcb, 0x6f, 0x69, 0xde, 0x64, 0x2e, 0x63,
+ 0xd5, 0xb7, 0x3f, 0xc3, 0x96, 0xc1, 0x2b, 0xe3, 0x8b, 0x2b, 0xd5, 0xd8,
+ 0x84, 0x25, 0x7c, 0x32, 0xc8, 0xf6, 0xd0, 0x85, 0x4a, 0xe6, 0xb5, 0x40,
+ 0xf8, 0x6d, 0xda, 0x2e};
+
+static const uint8_t test_result_sg_sha384[48] = {
+ 0x10, 0x3c, 0xa9, 0x6c, 0x06, 0xa1, 0xce, 0x79, 0x8f, 0x08, 0xf8, 0xef,
+ 0xf0, 0xdf, 0xb0, 0xcc, 0xdb, 0x56, 0x7d, 0x48, 0xb2, 0x85, 0xb2, 0x3d,
+ 0x0c, 0xd7, 0x73, 0x45, 0x46, 0x67, 0xa3, 0xc2, 0xfa, 0x5f, 0x1b, 0x58,
+ 0xd9, 0xcd, 0xf2, 0x32, 0x9b, 0xd9, 0x97, 0x97, 0x30, 0xbf, 0xaa, 0xff};
+
+static const uint8_t test_result_sg_sha256[32] = {
+ 0xd6, 0x82, 0xed, 0x4c, 0xa4, 0xd9, 0x89, 0xc1, 0x34, 0xec, 0x94, 0xf1,
+ 0x55, 0x1e, 0x1e, 0xc5, 0x80, 0xdd, 0x6d, 0x5a, 0x6e, 0xcd, 0xe9, 0xf3,
+ 0xd3, 0x5e, 0x6e, 0x4a, 0x71, 0x7f, 0xbd, 0xe4};
+
+/*
+ * The accumulative mode requires firmware to provide internal initial state
+ * and message padding (including length L at the end of padding).
+ *
+ * This test vector is a ascii text "abc" with padding message.
+ *
+ * Expected results were generated using command line utitiles:
+ *
+ * echo -n -e 'abc' | dd of=/tmp/test
+ * for hash in sha512sum sha384sum sha256sum; do $hash /tmp/test; done
+ */
+static const uint8_t test_vector_accum_512[128] = {
+ 0x61, 0x62, 0x63, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18};
+
+static const uint8_t test_vector_accum_384[128] = {
+ 0x61, 0x62, 0x63, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18};
+
+static const uint8_t test_vector_accum_256[64] = {
+ 0x61, 0x62, 0x63, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18};
+
+static const uint8_t test_result_accum_sha512[64] = {
+ 0xdd, 0xaf, 0x35, 0xa1, 0x93, 0x61, 0x7a, 0xba, 0xcc, 0x41, 0x73, 0x49,
+ 0xae, 0x20, 0x41, 0x31, 0x12, 0xe6, 0xfa, 0x4e, 0x89, 0xa9, 0x7e, 0xa2,
+ 0x0a, 0x9e, 0xee, 0xe6, 0x4b, 0x55, 0xd3, 0x9a, 0x21, 0x92, 0x99, 0x2a,
+ 0x27, 0x4f, 0xc1, 0xa8, 0x36, 0xba, 0x3c, 0x23, 0xa3, 0xfe, 0xeb, 0xbd,
+ 0x45, 0x4d, 0x44, 0x23, 0x64, 0x3c, 0xe8, 0x0e, 0x2a, 0x9a, 0xc9, 0x4f,
+ 0xa5, 0x4c, 0xa4, 0x9f};
+
+static const uint8_t test_result_accum_sha384[48] = {
+ 0xcb, 0x00, 0x75, 0x3f, 0x45, 0xa3, 0x5e, 0x8b, 0xb5, 0xa0, 0x3d, 0x69,
+ 0x9a, 0xc6, 0x50, 0x07, 0x27, 0x2c, 0x32, 0xab, 0x0e, 0xde, 0xd1, 0x63,
+ 0x1a, 0x8b, 0x60, 0x5a, 0x43, 0xff, 0x5b, 0xed, 0x80, 0x86, 0x07, 0x2b,
+ 0xa1, 0xe7, 0xcc, 0x23, 0x58, 0xba, 0xec, 0xa1, 0x34, 0xc8, 0x25, 0xa7};
+
+static const uint8_t test_result_accum_sha256[32] = {
+ 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde,
+ 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
+ 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad};
+
+static void write_regs(QTestState *s, uint32_t base, uint64_t src,
+ uint32_t length, uint64_t out, uint32_t method)
+{
+ qtest_writel(s, base + HACE_HASH_SRC, extract64(src, 0, 32));
+ qtest_writel(s, base + HACE_HASH_SRC_HI, extract64(src, 32, 32));
+ qtest_writel(s, base + HACE_HASH_DIGEST, extract64(out, 0, 32));
+ qtest_writel(s, base + HACE_HASH_DIGEST_HI, extract64(out, 32, 32));
+ qtest_writel(s, base + HACE_HASH_DATA_LEN, length);
+ qtest_writel(s, base + HACE_HASH_CMD, HACE_SHA_BE_EN | method);
+}
+
+void aspeed_test_md5(const char *machine, const uint32_t base,
+ const uint64_t src_addr)
+
+{
+ QTestState *s = qtest_init(machine);
+
+ uint64_t digest_addr = src_addr + 0x010000;
+ uint8_t digest[16] = {0};
+
+ /* Check engine is idle, no busy or irq bits set */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Write test vector into memory */
+ qtest_memwrite(s, src_addr, test_vector, sizeof(test_vector));
+
+ write_regs(s, base, src_addr, sizeof(test_vector),
+ digest_addr, HACE_ALGO_MD5);
+
+ /* Check hash IRQ status is asserted */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
+
+ /* Clear IRQ status and check status is deasserted */
+ qtest_writel(s, base + HACE_STS, 0x00000200);
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Read computed digest from memory */
+ qtest_memread(s, digest_addr, digest, sizeof(digest));
+
+ /* Check result of computation */
+ g_assert_cmpmem(digest, sizeof(digest),
+ test_result_md5, sizeof(digest));
+
+ qtest_quit(s);
+}
+
+void aspeed_test_sha256(const char *machine, const uint32_t base,
+ const uint64_t src_addr)
+{
+ QTestState *s = qtest_init(machine);
+
+ const uint64_t digest_addr = src_addr + 0x10000;
+ uint8_t digest[32] = {0};
+
+ /* Check engine is idle, no busy or irq bits set */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Write test vector into memory */
+ qtest_memwrite(s, src_addr, test_vector, sizeof(test_vector));
+
+ write_regs(s, base, src_addr, sizeof(test_vector), digest_addr,
+ HACE_ALGO_SHA256);
+
+ /* Check hash IRQ status is asserted */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
+
+ /* Clear IRQ status and check status is deasserted */
+ qtest_writel(s, base + HACE_STS, 0x00000200);
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Read computed digest from memory */
+ qtest_memread(s, digest_addr, digest, sizeof(digest));
+
+ /* Check result of computation */
+ g_assert_cmpmem(digest, sizeof(digest),
+ test_result_sha256, sizeof(digest));
+
+ qtest_quit(s);
+}
+
+void aspeed_test_sha384(const char *machine, const uint32_t base,
+ const uint64_t src_addr)
+{
+ QTestState *s = qtest_init(machine);
+
+ const uint64_t digest_addr = src_addr + 0x10000;
+ uint8_t digest[48] = {0};
+
+ /* Check engine is idle, no busy or irq bits set */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Write test vector into memory */
+ qtest_memwrite(s, src_addr, test_vector, sizeof(test_vector));
+
+ write_regs(s, base, src_addr, sizeof(test_vector), digest_addr,
+ HACE_ALGO_SHA384);
+
+ /* Check hash IRQ status is asserted */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
+
+ /* Clear IRQ status and check status is deasserted */
+ qtest_writel(s, base + HACE_STS, 0x00000200);
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Read computed digest from memory */
+ qtest_memread(s, digest_addr, digest, sizeof(digest));
+
+ /* Check result of computation */
+ g_assert_cmpmem(digest, sizeof(digest),
+ test_result_sha384, sizeof(digest));
+
+ qtest_quit(s);
+}
+
+void aspeed_test_sha512(const char *machine, const uint32_t base,
+ const uint64_t src_addr)
+{
+ QTestState *s = qtest_init(machine);
+
+ const uint64_t digest_addr = src_addr + 0x10000;
+ uint8_t digest[64] = {0};
+
+ /* Check engine is idle, no busy or irq bits set */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Write test vector into memory */
+ qtest_memwrite(s, src_addr, test_vector, sizeof(test_vector));
+
+ write_regs(s, base, src_addr, sizeof(test_vector), digest_addr,
+ HACE_ALGO_SHA512);
+
+ /* Check hash IRQ status is asserted */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
+
+ /* Clear IRQ status and check status is deasserted */
+ qtest_writel(s, base + HACE_STS, 0x00000200);
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Read computed digest from memory */
+ qtest_memread(s, digest_addr, digest, sizeof(digest));
+
+ /* Check result of computation */
+ g_assert_cmpmem(digest, sizeof(digest),
+ test_result_sha512, sizeof(digest));
+
+ qtest_quit(s);
+}
+
+void aspeed_test_sha256_sg(const char *machine, const uint32_t base,
+ const uint64_t src_addr)
+{
+ QTestState *s = qtest_init(machine);
+
+ const uint64_t src_addr_1 = src_addr + 0x10000;
+ const uint64_t src_addr_2 = src_addr + 0x20000;
+ const uint64_t src_addr_3 = src_addr + 0x30000;
+ const uint64_t digest_addr = src_addr + 0x40000;
+ uint8_t digest[32] = {0};
+ struct AspeedSgList array[] = {
+ { cpu_to_le32(sizeof(test_vector_sg1)),
+ cpu_to_le32(src_addr_1) },
+ { cpu_to_le32(sizeof(test_vector_sg2)),
+ cpu_to_le32(src_addr_2) },
+ { cpu_to_le32(sizeof(test_vector_sg3) | SG_LIST_LEN_LAST),
+ cpu_to_le32(src_addr_3) },
+ };
+
+ /* Check engine is idle, no busy or irq bits set */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Write test vector into memory */
+ qtest_memwrite(s, src_addr_1, test_vector_sg1, sizeof(test_vector_sg1));
+ qtest_memwrite(s, src_addr_2, test_vector_sg2, sizeof(test_vector_sg2));
+ qtest_memwrite(s, src_addr_3, test_vector_sg3, sizeof(test_vector_sg3));
+ qtest_memwrite(s, src_addr, array, sizeof(array));
+
+ write_regs(s, base, src_addr,
+ (sizeof(test_vector_sg1)
+ + sizeof(test_vector_sg2)
+ + sizeof(test_vector_sg3)),
+ digest_addr, HACE_ALGO_SHA256 | HACE_SG_EN);
+
+ /* Check hash IRQ status is asserted */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
+
+ /* Clear IRQ status and check status is deasserted */
+ qtest_writel(s, base + HACE_STS, 0x00000200);
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Read computed digest from memory */
+ qtest_memread(s, digest_addr, digest, sizeof(digest));
+
+ /* Check result of computation */
+ g_assert_cmpmem(digest, sizeof(digest),
+ test_result_sg_sha256, sizeof(digest));
+
+ qtest_quit(s);
+}
+
+void aspeed_test_sha384_sg(const char *machine, const uint32_t base,
+ const uint64_t src_addr)
+{
+ QTestState *s = qtest_init(machine);
+
+ const uint64_t src_addr_1 = src_addr + 0x10000;
+ const uint64_t src_addr_2 = src_addr + 0x20000;
+ const uint64_t src_addr_3 = src_addr + 0x30000;
+ const uint64_t digest_addr = src_addr + 0x40000;
+ uint8_t digest[48] = {0};
+ struct AspeedSgList array[] = {
+ { cpu_to_le32(sizeof(test_vector_sg1)),
+ cpu_to_le32(src_addr_1) },
+ { cpu_to_le32(sizeof(test_vector_sg2)),
+ cpu_to_le32(src_addr_2) },
+ { cpu_to_le32(sizeof(test_vector_sg3) | SG_LIST_LEN_LAST),
+ cpu_to_le32(src_addr_3) },
+ };
+
+ /* Check engine is idle, no busy or irq bits set */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Write test vector into memory */
+ qtest_memwrite(s, src_addr_1, test_vector_sg1, sizeof(test_vector_sg1));
+ qtest_memwrite(s, src_addr_2, test_vector_sg2, sizeof(test_vector_sg2));
+ qtest_memwrite(s, src_addr_3, test_vector_sg3, sizeof(test_vector_sg3));
+ qtest_memwrite(s, src_addr, array, sizeof(array));
+
+ write_regs(s, base, src_addr,
+ (sizeof(test_vector_sg1)
+ + sizeof(test_vector_sg2)
+ + sizeof(test_vector_sg3)),
+ digest_addr, HACE_ALGO_SHA384 | HACE_SG_EN);
+
+ /* Check hash IRQ status is asserted */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
+
+ /* Clear IRQ status and check status is deasserted */
+ qtest_writel(s, base + HACE_STS, 0x00000200);
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Read computed digest from memory */
+ qtest_memread(s, digest_addr, digest, sizeof(digest));
+
+ /* Check result of computation */
+ g_assert_cmpmem(digest, sizeof(digest),
+ test_result_sg_sha384, sizeof(digest));
+
+ qtest_quit(s);
+}
+
+void aspeed_test_sha512_sg(const char *machine, const uint32_t base,
+ const uint64_t src_addr)
+{
+ QTestState *s = qtest_init(machine);
+
+ const uint64_t src_addr_1 = src_addr + 0x10000;
+ const uint64_t src_addr_2 = src_addr + 0x20000;
+ const uint64_t src_addr_3 = src_addr + 0x30000;
+ const uint64_t digest_addr = src_addr + 0x40000;
+ uint8_t digest[64] = {0};
+ struct AspeedSgList array[] = {
+ { cpu_to_le32(sizeof(test_vector_sg1)),
+ cpu_to_le32(src_addr_1) },
+ { cpu_to_le32(sizeof(test_vector_sg2)),
+ cpu_to_le32(src_addr_2) },
+ { cpu_to_le32(sizeof(test_vector_sg3) | SG_LIST_LEN_LAST),
+ cpu_to_le32(src_addr_3) },
+ };
+
+ /* Check engine is idle, no busy or irq bits set */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Write test vector into memory */
+ qtest_memwrite(s, src_addr_1, test_vector_sg1, sizeof(test_vector_sg1));
+ qtest_memwrite(s, src_addr_2, test_vector_sg2, sizeof(test_vector_sg2));
+ qtest_memwrite(s, src_addr_3, test_vector_sg3, sizeof(test_vector_sg3));
+ qtest_memwrite(s, src_addr, array, sizeof(array));
+
+ write_regs(s, base, src_addr,
+ (sizeof(test_vector_sg1)
+ + sizeof(test_vector_sg2)
+ + sizeof(test_vector_sg3)),
+ digest_addr, HACE_ALGO_SHA512 | HACE_SG_EN);
+
+ /* Check hash IRQ status is asserted */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
+
+ /* Clear IRQ status and check status is deasserted */
+ qtest_writel(s, base + HACE_STS, 0x00000200);
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Read computed digest from memory */
+ qtest_memread(s, digest_addr, digest, sizeof(digest));
+
+ /* Check result of computation */
+ g_assert_cmpmem(digest, sizeof(digest),
+ test_result_sg_sha512, sizeof(digest));
+
+ qtest_quit(s);
+}
+
+void aspeed_test_sha256_accum(const char *machine, const uint32_t base,
+ const uint64_t src_addr)
+{
+ QTestState *s = qtest_init(machine);
+
+ const uint64_t buffer_addr = src_addr + 0x10000;
+ const uint64_t digest_addr = src_addr + 0x40000;
+ uint8_t digest[32] = {0};
+ struct AspeedSgList array[] = {
+ { cpu_to_le32(sizeof(test_vector_accum_256) | SG_LIST_LEN_LAST),
+ cpu_to_le32(buffer_addr) },
+ };
+
+ /* Check engine is idle, no busy or irq bits set */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Write test vector into memory */
+ qtest_memwrite(s, buffer_addr, test_vector_accum_256,
+ sizeof(test_vector_accum_256));
+ qtest_memwrite(s, src_addr, array, sizeof(array));
+
+ write_regs(s, base, src_addr, sizeof(test_vector_accum_256),
+ digest_addr, HACE_ALGO_SHA256 | HACE_SG_EN | HACE_ACCUM_EN);
+
+ /* Check hash IRQ status is asserted */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
+
+ /* Clear IRQ status and check status is deasserted */
+ qtest_writel(s, base + HACE_STS, 0x00000200);
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Read computed digest from memory */
+ qtest_memread(s, digest_addr, digest, sizeof(digest));
+
+ /* Check result of computation */
+ g_assert_cmpmem(digest, sizeof(digest),
+ test_result_accum_sha256, sizeof(digest));
+
+ qtest_quit(s);
+}
+
+void aspeed_test_sha384_accum(const char *machine, const uint32_t base,
+ const uint64_t src_addr)
+{
+ QTestState *s = qtest_init(machine);
+
+ const uint64_t buffer_addr = src_addr + 0x10000;
+ const uint64_t digest_addr = src_addr + 0x40000;
+ uint8_t digest[48] = {0};
+ struct AspeedSgList array[] = {
+ { cpu_to_le32(sizeof(test_vector_accum_384) | SG_LIST_LEN_LAST),
+ cpu_to_le32(buffer_addr) },
+ };
+
+ /* Check engine is idle, no busy or irq bits set */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Write test vector into memory */
+ qtest_memwrite(s, buffer_addr, test_vector_accum_384,
+ sizeof(test_vector_accum_384));
+ qtest_memwrite(s, src_addr, array, sizeof(array));
+
+ write_regs(s, base, src_addr, sizeof(test_vector_accum_384),
+ digest_addr, HACE_ALGO_SHA384 | HACE_SG_EN | HACE_ACCUM_EN);
+
+ /* Check hash IRQ status is asserted */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
+
+ /* Clear IRQ status and check status is deasserted */
+ qtest_writel(s, base + HACE_STS, 0x00000200);
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Read computed digest from memory */
+ qtest_memread(s, digest_addr, digest, sizeof(digest));
+
+ /* Check result of computation */
+ g_assert_cmpmem(digest, sizeof(digest),
+ test_result_accum_sha384, sizeof(digest));
+
+ qtest_quit(s);
+}
+
+void aspeed_test_sha512_accum(const char *machine, const uint32_t base,
+ const uint64_t src_addr)
+{
+ QTestState *s = qtest_init(machine);
+
+ const uint64_t buffer_addr = src_addr + 0x10000;
+ const uint64_t digest_addr = src_addr + 0x40000;
+ uint8_t digest[64] = {0};
+ struct AspeedSgList array[] = {
+ { cpu_to_le32(sizeof(test_vector_accum_512) | SG_LIST_LEN_LAST),
+ cpu_to_le32(buffer_addr) },
+ };
+
+ /* Check engine is idle, no busy or irq bits set */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Write test vector into memory */
+ qtest_memwrite(s, buffer_addr, test_vector_accum_512,
+ sizeof(test_vector_accum_512));
+ qtest_memwrite(s, src_addr, array, sizeof(array));
+
+ write_regs(s, base, src_addr, sizeof(test_vector_accum_512),
+ digest_addr, HACE_ALGO_SHA512 | HACE_SG_EN | HACE_ACCUM_EN);
+
+ /* Check hash IRQ status is asserted */
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
+
+ /* Clear IRQ status and check status is deasserted */
+ qtest_writel(s, base + HACE_STS, 0x00000200);
+ g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
+
+ /* Read computed digest from memory */
+ qtest_memread(s, digest_addr, digest, sizeof(digest));
+
+ /* Check result of computation */
+ g_assert_cmpmem(digest, sizeof(digest),
+ test_result_accum_sha512, sizeof(digest));
+
+ qtest_quit(s);
+}
+
+void aspeed_test_addresses(const char *machine, const uint32_t base,
+ const struct AspeedMasks *expected)
+{
+ QTestState *s = qtest_init(machine);
+
+ /*
+ * Check command mode is zero, meaning engine is in direct access mode,
+ * as this affects the masking behavior of the HASH_SRC register.
+ */
+ g_assert_cmphex(qtest_readl(s, base + HACE_CMD), ==, 0);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC), ==, 0);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC_HI), ==, 0);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST), ==, 0);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST_HI), ==, 0);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_KEY_BUFF), ==, 0);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_KEY_BUFF_HI), ==, 0);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DATA_LEN), ==, 0);
+
+ /* Check that the address masking is correct */
+ qtest_writel(s, base + HACE_HASH_SRC, 0xffffffff);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC), ==, expected->src);
+
+ qtest_writel(s, base + HACE_HASH_SRC_HI, 0xffffffff);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC_HI),
+ ==, expected->src_hi);
+
+ qtest_writel(s, base + HACE_HASH_DIGEST, 0xffffffff);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST), ==,
+ expected->dest);
+
+ qtest_writel(s, base + HACE_HASH_DIGEST_HI, 0xffffffff);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST_HI), ==,
+ expected->dest_hi);
+
+ qtest_writel(s, base + HACE_HASH_KEY_BUFF, 0xffffffff);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_KEY_BUFF), ==,
+ expected->key);
+
+ qtest_writel(s, base + HACE_HASH_KEY_BUFF_HI, 0xffffffff);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_KEY_BUFF_HI), ==,
+ expected->key_hi);
+
+ qtest_writel(s, base + HACE_HASH_DATA_LEN, 0xffffffff);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DATA_LEN), ==,
+ expected->len);
+
+ /* Reset to zero */
+ qtest_writel(s, base + HACE_HASH_SRC, 0);
+ qtest_writel(s, base + HACE_HASH_SRC_HI, 0);
+ qtest_writel(s, base + HACE_HASH_DIGEST, 0);
+ qtest_writel(s, base + HACE_HASH_DIGEST_HI, 0);
+ qtest_writel(s, base + HACE_HASH_KEY_BUFF, 0);
+ qtest_writel(s, base + HACE_HASH_KEY_BUFF_HI, 0);
+ qtest_writel(s, base + HACE_HASH_DATA_LEN, 0);
+
+ /* Check that all bits are now zero */
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC), ==, 0);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC_HI), ==, 0);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST), ==, 0);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST_HI), ==, 0);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_KEY_BUFF), ==, 0);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_KEY_BUFF_HI), ==, 0);
+ g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DATA_LEN), ==, 0);
+
+ qtest_quit(s);
+}
+
diff --git a/tests/qtest/aspeed-hace-utils.h b/tests/qtest/aspeed-hace-utils.h
new file mode 100644
index 0000000..c8b2ec4
--- /dev/null
+++ b/tests/qtest/aspeed-hace-utils.h
@@ -0,0 +1,84 @@
+/*
+ * QTest testcase for the ASPEED Hash and Crypto Engine
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2021 IBM Corp.
+ */
+
+#ifndef TESTS_ASPEED_HACE_UTILS_H
+#define TESTS_ASPEED_HACE_UTILS_H
+
+#include "qemu/osdep.h"
+#include "libqtest.h"
+#include "qemu/bitops.h"
+
+#define HACE_CMD 0x10
+#define HACE_SHA_BE_EN BIT(3)
+#define HACE_MD5_LE_EN BIT(2)
+#define HACE_ALGO_MD5 0
+#define HACE_ALGO_SHA1 BIT(5)
+#define HACE_ALGO_SHA224 BIT(6)
+#define HACE_ALGO_SHA256 (BIT(4) | BIT(6))
+#define HACE_ALGO_SHA512 (BIT(5) | BIT(6))
+#define HACE_ALGO_SHA384 (BIT(5) | BIT(6) | BIT(10))
+#define HACE_SG_EN BIT(18)
+#define HACE_ACCUM_EN BIT(8)
+
+#define HACE_STS 0x1c
+#define HACE_RSA_ISR BIT(13)
+#define HACE_CRYPTO_ISR BIT(12)
+#define HACE_HASH_ISR BIT(9)
+#define HACE_RSA_BUSY BIT(2)
+#define HACE_CRYPTO_BUSY BIT(1)
+#define HACE_HASH_BUSY BIT(0)
+#define HACE_HASH_SRC 0x20
+#define HACE_HASH_DIGEST 0x24
+#define HACE_HASH_KEY_BUFF 0x28
+#define HACE_HASH_DATA_LEN 0x2c
+#define HACE_HASH_CMD 0x30
+#define HACE_HASH_SRC_HI 0x90
+#define HACE_HASH_DIGEST_HI 0x94
+#define HACE_HASH_KEY_BUFF_HI 0x98
+
+/* Scatter-Gather Hash */
+#define SG_LIST_LEN_LAST BIT(31)
+struct AspeedSgList {
+ uint32_t len;
+ uint32_t addr;
+} __attribute__ ((__packed__));
+
+struct AspeedMasks {
+ uint32_t src;
+ uint32_t dest;
+ uint32_t key;
+ uint32_t len;
+ uint32_t src_hi;
+ uint32_t dest_hi;
+ uint32_t key_hi;
+};
+
+void aspeed_test_md5(const char *machine, const uint32_t base,
+ const uint64_t src_addr);
+void aspeed_test_sha256(const char *machine, const uint32_t base,
+ const uint64_t src_addr);
+void aspeed_test_sha384(const char *machine, const uint32_t base,
+ const uint64_t src_addr);
+void aspeed_test_sha512(const char *machine, const uint32_t base,
+ const uint64_t src_addr);
+void aspeed_test_sha256_sg(const char *machine, const uint32_t base,
+ const uint64_t src_addr);
+void aspeed_test_sha384_sg(const char *machine, const uint32_t base,
+ const uint64_t src_addr);
+void aspeed_test_sha512_sg(const char *machine, const uint32_t base,
+ const uint64_t src_addr);
+void aspeed_test_sha256_accum(const char *machine, const uint32_t base,
+ const uint64_t src_addr);
+void aspeed_test_sha384_accum(const char *machine, const uint32_t base,
+ const uint64_t src_addr);
+void aspeed_test_sha512_accum(const char *machine, const uint32_t base,
+ const uint64_t src_addr);
+void aspeed_test_addresses(const char *machine, const uint32_t base,
+ const struct AspeedMasks *expected);
+
+#endif /* TESTS_ASPEED_HACE_UTILS_H */
+
diff --git a/tests/qtest/aspeed-smc-utils.c b/tests/qtest/aspeed-smc-utils.c
new file mode 100644
index 0000000..c27d09e
--- /dev/null
+++ b/tests/qtest/aspeed-smc-utils.c
@@ -0,0 +1,686 @@
+/*
+ * QTest testcase for the M25P80 Flash (Using the Aspeed SPI
+ * Controller)
+ *
+ * Copyright (C) 2016 IBM Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/bswap.h"
+#include "libqtest-single.h"
+#include "qemu/bitops.h"
+#include "aspeed-smc-utils.h"
+
+/*
+ * Use an explicit bswap for the values read/wrote to the flash region
+ * as they are BE and the Aspeed CPU is LE.
+ */
+static inline uint32_t make_be32(uint32_t data)
+{
+ return bswap32(data);
+}
+
+static inline void spi_writel(const AspeedSMCTestData *data, uint64_t offset,
+ uint32_t value)
+{
+ qtest_writel(data->s, data->spi_base + offset, value);
+}
+
+static inline uint32_t spi_readl(const AspeedSMCTestData *data, uint64_t offset)
+{
+ return qtest_readl(data->s, data->spi_base + offset);
+}
+
+static inline void flash_writeb(const AspeedSMCTestData *data, uint64_t offset,
+ uint8_t value)
+{
+ qtest_writeb(data->s, data->flash_base + offset, value);
+}
+
+static inline void flash_writel(const AspeedSMCTestData *data, uint64_t offset,
+ uint32_t value)
+{
+ qtest_writel(data->s, data->flash_base + offset, value);
+}
+
+static inline uint8_t flash_readb(const AspeedSMCTestData *data,
+ uint64_t offset)
+{
+ return qtest_readb(data->s, data->flash_base + offset);
+}
+
+static inline uint32_t flash_readl(const AspeedSMCTestData *data,
+ uint64_t offset)
+{
+ return qtest_readl(data->s, data->flash_base + offset);
+}
+
+static void spi_conf(const AspeedSMCTestData *data, uint32_t value)
+{
+ uint32_t conf = spi_readl(data, R_CONF);
+
+ conf |= value;
+ spi_writel(data, R_CONF, conf);
+}
+
+static void spi_conf_remove(const AspeedSMCTestData *data, uint32_t value)
+{
+ uint32_t conf = spi_readl(data, R_CONF);
+
+ conf &= ~value;
+ spi_writel(data, R_CONF, conf);
+}
+
+static void spi_ce_ctrl(const AspeedSMCTestData *data, uint32_t value)
+{
+ uint32_t conf = spi_readl(data, R_CE_CTRL);
+
+ conf |= value;
+ spi_writel(data, R_CE_CTRL, conf);
+}
+
+static void spi_ctrl_setmode(const AspeedSMCTestData *data, uint8_t mode,
+ uint8_t cmd)
+{
+ uint32_t ctrl_reg = R_CTRL0 + data->cs * 4;
+ uint32_t ctrl = spi_readl(data, ctrl_reg);
+ ctrl &= ~(CTRL_USERMODE | 0xff << 16);
+ ctrl |= mode | (cmd << 16);
+ spi_writel(data, ctrl_reg, ctrl);
+}
+
+static void spi_ctrl_start_user(const AspeedSMCTestData *data)
+{
+ uint32_t ctrl_reg = R_CTRL0 + data->cs * 4;
+ uint32_t ctrl = spi_readl(data, ctrl_reg);
+
+ ctrl |= CTRL_USERMODE | CTRL_CE_STOP_ACTIVE;
+ spi_writel(data, ctrl_reg, ctrl);
+
+ ctrl &= ~CTRL_CE_STOP_ACTIVE;
+ spi_writel(data, ctrl_reg, ctrl);
+}
+
+static void spi_ctrl_stop_user(const AspeedSMCTestData *data)
+{
+ uint32_t ctrl_reg = R_CTRL0 + data->cs * 4;
+ uint32_t ctrl = spi_readl(data, ctrl_reg);
+
+ ctrl |= CTRL_USERMODE | CTRL_CE_STOP_ACTIVE;
+ spi_writel(data, ctrl_reg, ctrl);
+}
+
+static void spi_ctrl_set_io_mode(const AspeedSMCTestData *data, uint32_t value)
+{
+ uint32_t ctrl_reg = R_CTRL0 + data->cs * 4;
+ uint32_t ctrl = spi_readl(data, ctrl_reg);
+ uint32_t mode;
+
+ mode = value & CTRL_IO_MODE_MASK;
+ ctrl &= ~CTRL_IO_MODE_MASK;
+ ctrl |= mode;
+ spi_writel(data, ctrl_reg, ctrl);
+}
+
+static void flash_reset(const AspeedSMCTestData *data)
+{
+ spi_conf(data, 1 << (CONF_ENABLE_W0 + data->cs));
+
+ spi_ctrl_start_user(data);
+ flash_writeb(data, 0, RESET_ENABLE);
+ flash_writeb(data, 0, RESET_MEMORY);
+ flash_writeb(data, 0, WREN);
+ flash_writeb(data, 0, BULK_ERASE);
+ flash_writeb(data, 0, WRDI);
+ spi_ctrl_stop_user(data);
+
+ spi_conf_remove(data, 1 << (CONF_ENABLE_W0 + data->cs));
+}
+
+static void read_page(const AspeedSMCTestData *data, uint32_t addr,
+ uint32_t *page)
+{
+ int i;
+
+ spi_ctrl_start_user(data);
+
+ flash_writeb(data, 0, EN_4BYTE_ADDR);
+ flash_writeb(data, 0, READ);
+ flash_writel(data, 0, make_be32(addr));
+
+ /* Continuous read are supported */
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ page[i] = make_be32(flash_readl(data, 0));
+ }
+ spi_ctrl_stop_user(data);
+}
+
+static void read_page_mem(const AspeedSMCTestData *data, uint32_t addr,
+ uint32_t *page)
+{
+ int i;
+
+ /* move out USER mode to use direct reads from the AHB bus */
+ spi_ctrl_setmode(data, CTRL_READMODE, READ);
+
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ page[i] = make_be32(flash_readl(data, addr + i * 4));
+ }
+}
+
+static void write_page_mem(const AspeedSMCTestData *data, uint32_t addr,
+ uint32_t write_value)
+{
+ spi_ctrl_setmode(data, CTRL_WRITEMODE, PP);
+
+ for (int i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ flash_writel(data, addr + i * 4, write_value);
+ }
+}
+
+static void assert_page_mem(const AspeedSMCTestData *data, uint32_t addr,
+ uint32_t expected_value)
+{
+ uint32_t page[FLASH_PAGE_SIZE / 4];
+ read_page_mem(data, addr, page);
+ for (int i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, expected_value);
+ }
+}
+
+void aspeed_smc_test_read_jedec(const void *data)
+{
+ const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data;
+ uint32_t jedec = 0x0;
+
+ spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs));
+
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, JEDEC_READ);
+ jedec |= flash_readb(test_data, 0) << 16;
+ jedec |= flash_readb(test_data, 0) << 8;
+ jedec |= flash_readb(test_data, 0);
+ spi_ctrl_stop_user(test_data);
+
+ flash_reset(test_data);
+
+ g_assert_cmphex(jedec, ==, test_data->jedec_id);
+}
+
+void aspeed_smc_test_erase_sector(const void *data)
+{
+ const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data;
+ uint32_t some_page_addr = test_data->page_addr;
+ uint32_t page[FLASH_PAGE_SIZE / 4];
+ int i;
+
+ spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs));
+
+ /*
+ * Previous page should be full of 0xffs after backend is
+ * initialized
+ */
+ read_page(test_data, some_page_addr - FLASH_PAGE_SIZE, page);
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, 0xffffffff);
+ }
+
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, EN_4BYTE_ADDR);
+ flash_writeb(test_data, 0, WREN);
+ flash_writeb(test_data, 0, PP);
+ flash_writel(test_data, 0, make_be32(some_page_addr));
+
+ /* Fill the page with its own addresses */
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ flash_writel(test_data, 0, make_be32(some_page_addr + i * 4));
+ }
+ spi_ctrl_stop_user(test_data);
+
+ /* Check the page is correctly written */
+ read_page(test_data, some_page_addr, page);
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, some_page_addr + i * 4);
+ }
+
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, WREN);
+ flash_writeb(test_data, 0, EN_4BYTE_ADDR);
+ flash_writeb(test_data, 0, ERASE_SECTOR);
+ flash_writel(test_data, 0, make_be32(some_page_addr));
+ spi_ctrl_stop_user(test_data);
+
+ /* Check the page is erased */
+ read_page(test_data, some_page_addr, page);
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, 0xffffffff);
+ }
+
+ flash_reset(test_data);
+}
+
+void aspeed_smc_test_erase_all(const void *data)
+{
+ const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data;
+ uint32_t some_page_addr = test_data->page_addr;
+ uint32_t page[FLASH_PAGE_SIZE / 4];
+ int i;
+
+ spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs));
+
+ /*
+ * Previous page should be full of 0xffs after backend is
+ * initialized
+ */
+ read_page(test_data, some_page_addr - FLASH_PAGE_SIZE, page);
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, 0xffffffff);
+ }
+
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, EN_4BYTE_ADDR);
+ flash_writeb(test_data, 0, WREN);
+ flash_writeb(test_data, 0, PP);
+ flash_writel(test_data, 0, make_be32(some_page_addr));
+
+ /* Fill the page with its own addresses */
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ flash_writel(test_data, 0, make_be32(some_page_addr + i * 4));
+ }
+ spi_ctrl_stop_user(test_data);
+
+ /* Check the page is correctly written */
+ read_page(test_data, some_page_addr, page);
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, some_page_addr + i * 4);
+ }
+
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, WREN);
+ flash_writeb(test_data, 0, BULK_ERASE);
+ spi_ctrl_stop_user(test_data);
+
+ /* Check the page is erased */
+ read_page(test_data, some_page_addr, page);
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, 0xffffffff);
+ }
+
+ flash_reset(test_data);
+}
+
+void aspeed_smc_test_write_page(const void *data)
+{
+ const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data;
+ uint32_t my_page_addr = test_data->page_addr;
+ uint32_t some_page_addr = my_page_addr + FLASH_PAGE_SIZE;
+ uint32_t page[FLASH_PAGE_SIZE / 4];
+ int i;
+
+ spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs));
+
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, EN_4BYTE_ADDR);
+ flash_writeb(test_data, 0, WREN);
+ flash_writeb(test_data, 0, PP);
+ flash_writel(test_data, 0, make_be32(my_page_addr));
+
+ /* Fill the page with its own addresses */
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ flash_writel(test_data, 0, make_be32(my_page_addr + i * 4));
+ }
+ spi_ctrl_stop_user(test_data);
+
+ /* Check what was written */
+ read_page(test_data, my_page_addr, page);
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, my_page_addr + i * 4);
+ }
+
+ /* Check some other page. It should be full of 0xff */
+ read_page(test_data, some_page_addr, page);
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, 0xffffffff);
+ }
+
+ flash_reset(test_data);
+}
+
+void aspeed_smc_test_read_page_mem(const void *data)
+{
+ const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data;
+ uint32_t my_page_addr = test_data->page_addr;
+ uint32_t some_page_addr = my_page_addr + FLASH_PAGE_SIZE;
+ uint32_t page[FLASH_PAGE_SIZE / 4];
+ int i;
+
+ /*
+ * Enable 4BYTE mode for controller.
+ */
+ spi_ce_ctrl(test_data, 1 << (CRTL_EXTENDED0 + test_data->cs));
+
+ /* Enable 4BYTE mode for flash. */
+ spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs));
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, EN_4BYTE_ADDR);
+ flash_writeb(test_data, 0, WREN);
+ flash_writeb(test_data, 0, PP);
+ flash_writel(test_data, 0, make_be32(my_page_addr));
+
+ /* Fill the page with its own addresses */
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ flash_writel(test_data, 0, make_be32(my_page_addr + i * 4));
+ }
+ spi_ctrl_stop_user(test_data);
+ spi_conf_remove(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs));
+
+ /* Check what was written */
+ read_page_mem(test_data, my_page_addr, page);
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, my_page_addr + i * 4);
+ }
+
+ /* Check some other page. It should be full of 0xff */
+ read_page_mem(test_data, some_page_addr, page);
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, 0xffffffff);
+ }
+
+ flash_reset(test_data);
+}
+
+void aspeed_smc_test_write_page_mem(const void *data)
+{
+ const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data;
+ uint32_t my_page_addr = test_data->page_addr;
+ uint32_t page[FLASH_PAGE_SIZE / 4];
+ int i;
+
+ /*
+ * Enable 4BYTE mode for controller.
+ */
+ spi_ce_ctrl(test_data, 1 << (CRTL_EXTENDED0 + test_data->cs));
+
+ /* Enable 4BYTE mode for flash. */
+ spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs));
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, EN_4BYTE_ADDR);
+ flash_writeb(test_data, 0, WREN);
+ spi_ctrl_stop_user(test_data);
+
+ /* move out USER mode to use direct writes to the AHB bus */
+ spi_ctrl_setmode(test_data, CTRL_WRITEMODE, PP);
+
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ flash_writel(test_data, my_page_addr + i * 4,
+ make_be32(my_page_addr + i * 4));
+ }
+
+ /* Check what was written */
+ read_page_mem(test_data, my_page_addr, page);
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, my_page_addr + i * 4);
+ }
+
+ flash_reset(test_data);
+}
+
+void aspeed_smc_test_read_status_reg(const void *data)
+{
+ const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data;
+ uint8_t r;
+
+ spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs));
+
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, RDSR);
+ r = flash_readb(test_data, 0);
+ spi_ctrl_stop_user(test_data);
+
+ g_assert_cmphex(r & SR_WEL, ==, 0);
+ g_assert(!qtest_qom_get_bool
+ (test_data->s, test_data->node, "write-enable"));
+
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, WREN);
+ flash_writeb(test_data, 0, RDSR);
+ r = flash_readb(test_data, 0);
+ spi_ctrl_stop_user(test_data);
+
+ g_assert_cmphex(r & SR_WEL, ==, SR_WEL);
+ g_assert(qtest_qom_get_bool
+ (test_data->s, test_data->node, "write-enable"));
+
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, WRDI);
+ flash_writeb(test_data, 0, RDSR);
+ r = flash_readb(test_data, 0);
+ spi_ctrl_stop_user(test_data);
+
+ g_assert_cmphex(r & SR_WEL, ==, 0);
+ g_assert(!qtest_qom_get_bool
+ (test_data->s, test_data->node, "write-enable"));
+
+ flash_reset(test_data);
+}
+
+void aspeed_smc_test_status_reg_write_protection(const void *data)
+{
+ const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data;
+ uint8_t r;
+
+ spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs));
+
+ /* default case: WP# is high and SRWD is low -> status register writable */
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, WREN);
+ /* test ability to write SRWD */
+ flash_writeb(test_data, 0, WRSR);
+ flash_writeb(test_data, 0, SRWD);
+ flash_writeb(test_data, 0, RDSR);
+ r = flash_readb(test_data, 0);
+ spi_ctrl_stop_user(test_data);
+ g_assert_cmphex(r & SRWD, ==, SRWD);
+
+ /* WP# high and SRWD high -> status register writable */
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, WREN);
+ /* test ability to write SRWD */
+ flash_writeb(test_data, 0, WRSR);
+ flash_writeb(test_data, 0, 0);
+ flash_writeb(test_data, 0, RDSR);
+ r = flash_readb(test_data, 0);
+ spi_ctrl_stop_user(test_data);
+ g_assert_cmphex(r & SRWD, ==, 0);
+
+ /* WP# low and SRWD low -> status register writable */
+ qtest_set_irq_in(test_data->s, test_data->node, "WP#", 0, 0);
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, WREN);
+ /* test ability to write SRWD */
+ flash_writeb(test_data, 0, WRSR);
+ flash_writeb(test_data, 0, SRWD);
+ flash_writeb(test_data, 0, RDSR);
+ r = flash_readb(test_data, 0);
+ spi_ctrl_stop_user(test_data);
+ g_assert_cmphex(r & SRWD, ==, SRWD);
+
+ /* WP# low and SRWD high -> status register NOT writable */
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0 , WREN);
+ /* test ability to write SRWD */
+ flash_writeb(test_data, 0, WRSR);
+ flash_writeb(test_data, 0, 0);
+ flash_writeb(test_data, 0, RDSR);
+ r = flash_readb(test_data, 0);
+ spi_ctrl_stop_user(test_data);
+ /* write is not successful */
+ g_assert_cmphex(r & SRWD, ==, SRWD);
+
+ qtest_set_irq_in(test_data->s, test_data->node, "WP#", 0, 1);
+ flash_reset(test_data);
+}
+
+void aspeed_smc_test_write_block_protect(const void *data)
+{
+ const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data;
+ uint32_t sector_size = 65536;
+ uint32_t n_sectors = 512;
+
+ spi_ce_ctrl(test_data, 1 << (CRTL_EXTENDED0 + test_data->cs));
+ spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs));
+
+ uint32_t bp_bits = 0b0;
+
+ for (int i = 0; i < 16; i++) {
+ bp_bits = ((i & 0b1000) << 3) | ((i & 0b0111) << 2);
+
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, WREN);
+ flash_writeb(test_data, 0, BULK_ERASE);
+ flash_writeb(test_data, 0, WREN);
+ flash_writeb(test_data, 0, WRSR);
+ flash_writeb(test_data, 0, bp_bits);
+ flash_writeb(test_data, 0, EN_4BYTE_ADDR);
+ flash_writeb(test_data, 0, WREN);
+ spi_ctrl_stop_user(test_data);
+
+ uint32_t num_protected_sectors = i ? MIN(1 << (i - 1), n_sectors) : 0;
+ uint32_t protection_start = n_sectors - num_protected_sectors;
+ uint32_t protection_end = n_sectors;
+
+ for (int sector = 0; sector < n_sectors; sector++) {
+ uint32_t addr = sector * sector_size;
+
+ assert_page_mem(test_data, addr, 0xffffffff);
+ write_page_mem(test_data, addr, make_be32(0xabcdef12));
+
+ uint32_t expected_value = protection_start <= sector
+ && sector < protection_end
+ ? 0xffffffff : 0xabcdef12;
+
+ assert_page_mem(test_data, addr, expected_value);
+ }
+ }
+
+ flash_reset(test_data);
+}
+
+void aspeed_smc_test_write_block_protect_bottom_bit(const void *data)
+{
+ const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data;
+ uint32_t sector_size = 65536;
+ uint32_t n_sectors = 512;
+
+ spi_ce_ctrl(test_data, 1 << (CRTL_EXTENDED0 + test_data->cs));
+ spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs));
+
+ /* top bottom bit is enabled */
+ uint32_t bp_bits = 0b00100 << 3;
+
+ for (int i = 0; i < 16; i++) {
+ bp_bits = (((i & 0b1000) | 0b0100) << 3) | ((i & 0b0111) << 2);
+
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, WREN);
+ flash_writeb(test_data, 0, BULK_ERASE);
+ flash_writeb(test_data, 0, WREN);
+ flash_writeb(test_data, 0, WRSR);
+ flash_writeb(test_data, 0, bp_bits);
+ flash_writeb(test_data, 0, EN_4BYTE_ADDR);
+ flash_writeb(test_data, 0, WREN);
+ spi_ctrl_stop_user(test_data);
+
+ uint32_t num_protected_sectors = i ? MIN(1 << (i - 1), n_sectors) : 0;
+ uint32_t protection_start = 0;
+ uint32_t protection_end = num_protected_sectors;
+
+ for (int sector = 0; sector < n_sectors; sector++) {
+ uint32_t addr = sector * sector_size;
+
+ assert_page_mem(test_data, addr, 0xffffffff);
+ write_page_mem(test_data, addr, make_be32(0xabcdef12));
+
+ uint32_t expected_value = protection_start <= sector
+ && sector < protection_end
+ ? 0xffffffff : 0xabcdef12;
+
+ assert_page_mem(test_data, addr, expected_value);
+ }
+ }
+
+ flash_reset(test_data);
+}
+
+void aspeed_smc_test_write_page_qpi(const void *data)
+{
+ const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data;
+ uint32_t my_page_addr = test_data->page_addr;
+ uint32_t some_page_addr = my_page_addr + FLASH_PAGE_SIZE;
+ uint32_t page[FLASH_PAGE_SIZE / 4];
+ uint32_t page_pattern[] = {
+ 0xebd8c134, 0x5da196bc, 0xae15e729, 0x5085ccdf
+ };
+ int i;
+
+ spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs));
+
+ spi_ctrl_start_user(test_data);
+ flash_writeb(test_data, 0, EN_4BYTE_ADDR);
+ flash_writeb(test_data, 0, WREN);
+ flash_writeb(test_data, 0, PP);
+ flash_writel(test_data, 0, make_be32(my_page_addr));
+
+ /* Set QPI mode */
+ spi_ctrl_set_io_mode(test_data, CTRL_IO_QUAD_IO);
+
+ /* Fill the page pattern */
+ for (i = 0; i < ARRAY_SIZE(page_pattern); i++) {
+ flash_writel(test_data, 0, make_be32(page_pattern[i]));
+ }
+
+ /* Fill the page with its own addresses */
+ for (; i < FLASH_PAGE_SIZE / 4; i++) {
+ flash_writel(test_data, 0, make_be32(my_page_addr + i * 4));
+ }
+
+ /* Restore io mode */
+ spi_ctrl_set_io_mode(test_data, 0);
+ spi_ctrl_stop_user(test_data);
+
+ /* Check what was written */
+ read_page(test_data, my_page_addr, page);
+ for (i = 0; i < ARRAY_SIZE(page_pattern); i++) {
+ g_assert_cmphex(page[i], ==, page_pattern[i]);
+ }
+ for (; i < FLASH_PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, my_page_addr + i * 4);
+ }
+
+ /* Check some other page. It should be full of 0xff */
+ read_page(test_data, some_page_addr, page);
+ for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, 0xffffffff);
+ }
+
+ flash_reset(test_data);
+}
+
diff --git a/tests/qtest/aspeed-smc-utils.h b/tests/qtest/aspeed-smc-utils.h
new file mode 100644
index 0000000..b07870f
--- /dev/null
+++ b/tests/qtest/aspeed-smc-utils.h
@@ -0,0 +1,95 @@
+/*
+ * QTest testcase for the M25P80 Flash (Using the Aspeed SPI
+ * Controller)
+ *
+ * Copyright (C) 2016 IBM Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TESTS_ASPEED_SMC_UTILS_H
+#define TESTS_ASPEED_SMC_UTILS_H
+
+#include "qemu/osdep.h"
+#include "qemu/bswap.h"
+#include "libqtest-single.h"
+#include "qemu/bitops.h"
+
+/*
+ * ASPEED SPI Controller registers
+ */
+#define R_CONF 0x00
+#define CONF_ENABLE_W0 16
+#define R_CE_CTRL 0x04
+#define CRTL_EXTENDED0 0 /* 32 bit addressing for SPI */
+#define R_CTRL0 0x10
+#define CTRL_IO_QUAD_IO BIT(31)
+#define CTRL_CE_STOP_ACTIVE BIT(2)
+#define CTRL_READMODE 0x0
+#define CTRL_FREADMODE 0x1
+#define CTRL_WRITEMODE 0x2
+#define CTRL_USERMODE 0x3
+#define SR_WEL BIT(1)
+
+/*
+ * Flash commands
+ */
+enum {
+ JEDEC_READ = 0x9f,
+ RDSR = 0x5,
+ WRDI = 0x4,
+ BULK_ERASE = 0xc7,
+ READ = 0x03,
+ PP = 0x02,
+ WRSR = 0x1,
+ WREN = 0x6,
+ SRWD = 0x80,
+ RESET_ENABLE = 0x66,
+ RESET_MEMORY = 0x99,
+ EN_4BYTE_ADDR = 0xB7,
+ ERASE_SECTOR = 0xd8,
+};
+
+#define CTRL_IO_MODE_MASK (BIT(31) | BIT(30) | BIT(29) | BIT(28))
+#define FLASH_PAGE_SIZE 256
+
+typedef struct AspeedSMCTestData {
+ QTestState *s;
+ uint64_t spi_base;
+ uint64_t flash_base;
+ uint32_t jedec_id;
+ char *tmp_path;
+ uint8_t cs;
+ const char *node;
+ uint32_t page_addr;
+} AspeedSMCTestData;
+
+void aspeed_smc_test_read_jedec(const void *data);
+void aspeed_smc_test_erase_sector(const void *data);
+void aspeed_smc_test_erase_all(const void *data);
+void aspeed_smc_test_write_page(const void *data);
+void aspeed_smc_test_read_page_mem(const void *data);
+void aspeed_smc_test_write_page_mem(const void *data);
+void aspeed_smc_test_read_status_reg(const void *data);
+void aspeed_smc_test_status_reg_write_protection(const void *data);
+void aspeed_smc_test_write_block_protect(const void *data);
+void aspeed_smc_test_write_block_protect_bottom_bit(const void *data);
+void aspeed_smc_test_write_page_qpi(const void *data);
+
+#endif /* TESTS_ASPEED_SMC_UTILS_H */
diff --git a/tests/qtest/aspeed_gpio-test.c b/tests/qtest/aspeed_gpio-test.c
index d38f51d..12675d4 100644
--- a/tests/qtest/aspeed_gpio-test.c
+++ b/tests/qtest/aspeed_gpio-test.c
@@ -25,7 +25,7 @@
#include "qemu/osdep.h"
#include "qemu/bitops.h"
#include "qemu/timer.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "libqtest-single.h"
#define AST2600_GPIO_BASE 0x1E780000
diff --git a/tests/qtest/aspeed_hace-test.c b/tests/qtest/aspeed_hace-test.c
index ce86a44..3877702 100644
--- a/tests/qtest/aspeed_hace-test.c
+++ b/tests/qtest/aspeed_hace-test.c
@@ -6,599 +6,222 @@
*/
#include "qemu/osdep.h"
-
#include "libqtest.h"
#include "qemu/bitops.h"
+#include "aspeed-hace-utils.h"
-#define HACE_CMD 0x10
-#define HACE_SHA_BE_EN BIT(3)
-#define HACE_MD5_LE_EN BIT(2)
-#define HACE_ALGO_MD5 0
-#define HACE_ALGO_SHA1 BIT(5)
-#define HACE_ALGO_SHA224 BIT(6)
-#define HACE_ALGO_SHA256 (BIT(4) | BIT(6))
-#define HACE_ALGO_SHA512 (BIT(5) | BIT(6))
-#define HACE_ALGO_SHA384 (BIT(5) | BIT(6) | BIT(10))
-#define HACE_SG_EN BIT(18)
-#define HACE_ACCUM_EN BIT(8)
-
-#define HACE_STS 0x1c
-#define HACE_RSA_ISR BIT(13)
-#define HACE_CRYPTO_ISR BIT(12)
-#define HACE_HASH_ISR BIT(9)
-#define HACE_RSA_BUSY BIT(2)
-#define HACE_CRYPTO_BUSY BIT(1)
-#define HACE_HASH_BUSY BIT(0)
-#define HACE_HASH_SRC 0x20
-#define HACE_HASH_DIGEST 0x24
-#define HACE_HASH_KEY_BUFF 0x28
-#define HACE_HASH_DATA_LEN 0x2c
-#define HACE_HASH_CMD 0x30
-/* Scatter-Gather Hash */
-#define SG_LIST_LEN_LAST BIT(31)
-struct AspeedSgList {
- uint32_t len;
- uint32_t addr;
-} __attribute__ ((__packed__));
-
-/*
- * Test vector is the ascii "abc"
- *
- * Expected results were generated using command line utitiles:
- *
- * echo -n -e 'abc' | dd of=/tmp/test
- * for hash in sha512sum sha256sum md5sum; do $hash /tmp/test; done
- *
- */
-static const uint8_t test_vector[] = {0x61, 0x62, 0x63};
-
-static const uint8_t test_result_sha512[] = {
- 0xdd, 0xaf, 0x35, 0xa1, 0x93, 0x61, 0x7a, 0xba, 0xcc, 0x41, 0x73, 0x49,
- 0xae, 0x20, 0x41, 0x31, 0x12, 0xe6, 0xfa, 0x4e, 0x89, 0xa9, 0x7e, 0xa2,
- 0x0a, 0x9e, 0xee, 0xe6, 0x4b, 0x55, 0xd3, 0x9a, 0x21, 0x92, 0x99, 0x2a,
- 0x27, 0x4f, 0xc1, 0xa8, 0x36, 0xba, 0x3c, 0x23, 0xa3, 0xfe, 0xeb, 0xbd,
- 0x45, 0x4d, 0x44, 0x23, 0x64, 0x3c, 0xe8, 0x0e, 0x2a, 0x9a, 0xc9, 0x4f,
- 0xa5, 0x4c, 0xa4, 0x9f};
+static const struct AspeedMasks ast1030_masks = {
+ .src = 0x7fffffff,
+ .dest = 0x7ffffff8,
+ .key = 0x7ffffff8,
+ .len = 0x0fffffff,
+};
-static const uint8_t test_result_sha256[] = {
- 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde,
- 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
- 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad};
+static const struct AspeedMasks ast2600_masks = {
+ .src = 0x7fffffff,
+ .dest = 0x7ffffff8,
+ .key = 0x7ffffff8,
+ .len = 0x0fffffff,
+};
-static const uint8_t test_result_md5[] = {
- 0x90, 0x01, 0x50, 0x98, 0x3c, 0xd2, 0x4f, 0xb0, 0xd6, 0x96, 0x3f, 0x7d,
- 0x28, 0xe1, 0x7f, 0x72};
+static const struct AspeedMasks ast2500_masks = {
+ .src = 0x3fffffff,
+ .dest = 0x3ffffff8,
+ .key = 0x3fffffc0,
+ .len = 0x0fffffff,
+};
-/*
- * The Scatter-Gather Test vector is the ascii "abc" "def" "ghi", broken
- * into blocks of 3 characters as shown
- *
- * Expected results were generated using command line utitiles:
- *
- * echo -n -e 'abcdefghijkl' | dd of=/tmp/test
- * for hash in sha512sum sha256sum; do $hash /tmp/test; done
- *
- */
-static const uint8_t test_vector_sg1[] = {0x61, 0x62, 0x63, 0x64, 0x65, 0x66};
-static const uint8_t test_vector_sg2[] = {0x67, 0x68, 0x69};
-static const uint8_t test_vector_sg3[] = {0x6a, 0x6b, 0x6c};
-
-static const uint8_t test_result_sg_sha512[] = {
- 0x17, 0x80, 0x7c, 0x72, 0x8e, 0xe3, 0xba, 0x35, 0xe7, 0xcf, 0x7a, 0xf8,
- 0x23, 0x11, 0x6d, 0x26, 0xe4, 0x1e, 0x5d, 0x4d, 0x6c, 0x2f, 0xf1, 0xf3,
- 0x72, 0x0d, 0x3d, 0x96, 0xaa, 0xcb, 0x6f, 0x69, 0xde, 0x64, 0x2e, 0x63,
- 0xd5, 0xb7, 0x3f, 0xc3, 0x96, 0xc1, 0x2b, 0xe3, 0x8b, 0x2b, 0xd5, 0xd8,
- 0x84, 0x25, 0x7c, 0x32, 0xc8, 0xf6, 0xd0, 0x85, 0x4a, 0xe6, 0xb5, 0x40,
- 0xf8, 0x6d, 0xda, 0x2e};
-
-static const uint8_t test_result_sg_sha256[] = {
- 0xd6, 0x82, 0xed, 0x4c, 0xa4, 0xd9, 0x89, 0xc1, 0x34, 0xec, 0x94, 0xf1,
- 0x55, 0x1e, 0x1e, 0xc5, 0x80, 0xdd, 0x6d, 0x5a, 0x6e, 0xcd, 0xe9, 0xf3,
- 0xd3, 0x5e, 0x6e, 0x4a, 0x71, 0x7f, 0xbd, 0xe4};
+static const struct AspeedMasks ast2400_masks = {
+ .src = 0x0fffffff,
+ .dest = 0x0ffffff8,
+ .key = 0x0fffffc0,
+ .len = 0x0fffffff,
+};
-/*
- * The accumulative mode requires firmware to provide internal initial state
- * and message padding (including length L at the end of padding).
- *
- * This test vector is a ascii text "abc" with padding message.
- *
- * Expected results were generated using command line utitiles:
- *
- * echo -n -e 'abc' | dd of=/tmp/test
- * for hash in sha512sum sha256sum; do $hash /tmp/test; done
- */
-static const uint8_t test_vector_accum_512[] = {
- 0x61, 0x62, 0x63, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18};
-
-static const uint8_t test_vector_accum_256[] = {
- 0x61, 0x62, 0x63, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18};
-
-static const uint8_t test_result_accum_sha512[] = {
- 0xdd, 0xaf, 0x35, 0xa1, 0x93, 0x61, 0x7a, 0xba, 0xcc, 0x41, 0x73, 0x49,
- 0xae, 0x20, 0x41, 0x31, 0x12, 0xe6, 0xfa, 0x4e, 0x89, 0xa9, 0x7e, 0xa2,
- 0x0a, 0x9e, 0xee, 0xe6, 0x4b, 0x55, 0xd3, 0x9a, 0x21, 0x92, 0x99, 0x2a,
- 0x27, 0x4f, 0xc1, 0xa8, 0x36, 0xba, 0x3c, 0x23, 0xa3, 0xfe, 0xeb, 0xbd,
- 0x45, 0x4d, 0x44, 0x23, 0x64, 0x3c, 0xe8, 0x0e, 0x2a, 0x9a, 0xc9, 0x4f,
- 0xa5, 0x4c, 0xa4, 0x9f};
-
-static const uint8_t test_result_accum_sha256[] = {
- 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde,
- 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
- 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad};
-
-static void write_regs(QTestState *s, uint32_t base, uint32_t src,
- uint32_t length, uint32_t out, uint32_t method)
+/* ast1030 */
+static void test_md5_ast1030(void)
{
- qtest_writel(s, base + HACE_HASH_SRC, src);
- qtest_writel(s, base + HACE_HASH_DIGEST, out);
- qtest_writel(s, base + HACE_HASH_DATA_LEN, length);
- qtest_writel(s, base + HACE_HASH_CMD, HACE_SHA_BE_EN | method);
+ aspeed_test_md5("-machine ast1030-evb", 0x7e6d0000, 0x00000000);
}
-static void test_md5(const char *machine, const uint32_t base,
- const uint32_t src_addr)
-
+static void test_sha256_ast1030(void)
{
- QTestState *s = qtest_init(machine);
-
- uint32_t digest_addr = src_addr + 0x01000000;
- uint8_t digest[16] = {0};
-
- /* Check engine is idle, no busy or irq bits set */
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
-
- /* Write test vector into memory */
- qtest_memwrite(s, src_addr, test_vector, sizeof(test_vector));
-
- write_regs(s, base, src_addr, sizeof(test_vector), digest_addr, HACE_ALGO_MD5);
-
- /* Check hash IRQ status is asserted */
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
-
- /* Clear IRQ status and check status is deasserted */
- qtest_writel(s, base + HACE_STS, 0x00000200);
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
-
- /* Read computed digest from memory */
- qtest_memread(s, digest_addr, digest, sizeof(digest));
-
- /* Check result of computation */
- g_assert_cmpmem(digest, sizeof(digest),
- test_result_md5, sizeof(digest));
-
- qtest_quit(s);
+ aspeed_test_sha256("-machine ast1030-evb", 0x7e6d0000, 0x00000000);
}
-static void test_sha256(const char *machine, const uint32_t base,
- const uint32_t src_addr)
+static void test_sha256_sg_ast1030(void)
{
- QTestState *s = qtest_init(machine);
-
- const uint32_t digest_addr = src_addr + 0x1000000;
- uint8_t digest[32] = {0};
-
- /* Check engine is idle, no busy or irq bits set */
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
-
- /* Write test vector into memory */
- qtest_memwrite(s, src_addr, test_vector, sizeof(test_vector));
-
- write_regs(s, base, src_addr, sizeof(test_vector), digest_addr, HACE_ALGO_SHA256);
-
- /* Check hash IRQ status is asserted */
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
-
- /* Clear IRQ status and check status is deasserted */
- qtest_writel(s, base + HACE_STS, 0x00000200);
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
-
- /* Read computed digest from memory */
- qtest_memread(s, digest_addr, digest, sizeof(digest));
-
- /* Check result of computation */
- g_assert_cmpmem(digest, sizeof(digest),
- test_result_sha256, sizeof(digest));
-
- qtest_quit(s);
+ aspeed_test_sha256_sg("-machine ast1030-evb", 0x7e6d0000, 0x00000000);
}
-static void test_sha512(const char *machine, const uint32_t base,
- const uint32_t src_addr)
+static void test_sha384_ast1030(void)
{
- QTestState *s = qtest_init(machine);
-
- const uint32_t digest_addr = src_addr + 0x1000000;
- uint8_t digest[64] = {0};
-
- /* Check engine is idle, no busy or irq bits set */
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
-
- /* Write test vector into memory */
- qtest_memwrite(s, src_addr, test_vector, sizeof(test_vector));
-
- write_regs(s, base, src_addr, sizeof(test_vector), digest_addr, HACE_ALGO_SHA512);
-
- /* Check hash IRQ status is asserted */
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
-
- /* Clear IRQ status and check status is deasserted */
- qtest_writel(s, base + HACE_STS, 0x00000200);
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
-
- /* Read computed digest from memory */
- qtest_memread(s, digest_addr, digest, sizeof(digest));
-
- /* Check result of computation */
- g_assert_cmpmem(digest, sizeof(digest),
- test_result_sha512, sizeof(digest));
-
- qtest_quit(s);
+ aspeed_test_sha384("-machine ast1030-evb", 0x7e6d0000, 0x00000000);
}
-static void test_sha256_sg(const char *machine, const uint32_t base,
- const uint32_t src_addr)
+static void test_sha384_sg_ast1030(void)
{
- QTestState *s = qtest_init(machine);
-
- const uint32_t src_addr_1 = src_addr + 0x1000000;
- const uint32_t src_addr_2 = src_addr + 0x2000000;
- const uint32_t src_addr_3 = src_addr + 0x3000000;
- const uint32_t digest_addr = src_addr + 0x4000000;
- uint8_t digest[32] = {0};
- struct AspeedSgList array[] = {
- { cpu_to_le32(sizeof(test_vector_sg1)),
- cpu_to_le32(src_addr_1) },
- { cpu_to_le32(sizeof(test_vector_sg2)),
- cpu_to_le32(src_addr_2) },
- { cpu_to_le32(sizeof(test_vector_sg3) | SG_LIST_LEN_LAST),
- cpu_to_le32(src_addr_3) },
- };
-
- /* Check engine is idle, no busy or irq bits set */
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
-
- /* Write test vector into memory */
- qtest_memwrite(s, src_addr_1, test_vector_sg1, sizeof(test_vector_sg1));
- qtest_memwrite(s, src_addr_2, test_vector_sg2, sizeof(test_vector_sg2));
- qtest_memwrite(s, src_addr_3, test_vector_sg3, sizeof(test_vector_sg3));
- qtest_memwrite(s, src_addr, array, sizeof(array));
-
- write_regs(s, base, src_addr,
- (sizeof(test_vector_sg1)
- + sizeof(test_vector_sg2)
- + sizeof(test_vector_sg3)),
- digest_addr, HACE_ALGO_SHA256 | HACE_SG_EN);
-
- /* Check hash IRQ status is asserted */
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
-
- /* Clear IRQ status and check status is deasserted */
- qtest_writel(s, base + HACE_STS, 0x00000200);
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
-
- /* Read computed digest from memory */
- qtest_memread(s, digest_addr, digest, sizeof(digest));
-
- /* Check result of computation */
- g_assert_cmpmem(digest, sizeof(digest),
- test_result_sg_sha256, sizeof(digest));
-
- qtest_quit(s);
+ aspeed_test_sha384_sg("-machine ast1030-evb", 0x7e6d0000, 0x00000000);
}
-static void test_sha512_sg(const char *machine, const uint32_t base,
- const uint32_t src_addr)
+static void test_sha512_ast1030(void)
{
- QTestState *s = qtest_init(machine);
-
- const uint32_t src_addr_1 = src_addr + 0x1000000;
- const uint32_t src_addr_2 = src_addr + 0x2000000;
- const uint32_t src_addr_3 = src_addr + 0x3000000;
- const uint32_t digest_addr = src_addr + 0x4000000;
- uint8_t digest[64] = {0};
- struct AspeedSgList array[] = {
- { cpu_to_le32(sizeof(test_vector_sg1)),
- cpu_to_le32(src_addr_1) },
- { cpu_to_le32(sizeof(test_vector_sg2)),
- cpu_to_le32(src_addr_2) },
- { cpu_to_le32(sizeof(test_vector_sg3) | SG_LIST_LEN_LAST),
- cpu_to_le32(src_addr_3) },
- };
-
- /* Check engine is idle, no busy or irq bits set */
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
-
- /* Write test vector into memory */
- qtest_memwrite(s, src_addr_1, test_vector_sg1, sizeof(test_vector_sg1));
- qtest_memwrite(s, src_addr_2, test_vector_sg2, sizeof(test_vector_sg2));
- qtest_memwrite(s, src_addr_3, test_vector_sg3, sizeof(test_vector_sg3));
- qtest_memwrite(s, src_addr, array, sizeof(array));
-
- write_regs(s, base, src_addr,
- (sizeof(test_vector_sg1)
- + sizeof(test_vector_sg2)
- + sizeof(test_vector_sg3)),
- digest_addr, HACE_ALGO_SHA512 | HACE_SG_EN);
-
- /* Check hash IRQ status is asserted */
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
-
- /* Clear IRQ status and check status is deasserted */
- qtest_writel(s, base + HACE_STS, 0x00000200);
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
-
- /* Read computed digest from memory */
- qtest_memread(s, digest_addr, digest, sizeof(digest));
-
- /* Check result of computation */
- g_assert_cmpmem(digest, sizeof(digest),
- test_result_sg_sha512, sizeof(digest));
-
- qtest_quit(s);
+ aspeed_test_sha512("-machine ast1030-evb", 0x7e6d0000, 0x00000000);
}
-static void test_sha256_accum(const char *machine, const uint32_t base,
- const uint32_t src_addr)
+static void test_sha512_sg_ast1030(void)
{
- QTestState *s = qtest_init(machine);
-
- const uint32_t buffer_addr = src_addr + 0x1000000;
- const uint32_t digest_addr = src_addr + 0x4000000;
- uint8_t digest[32] = {0};
- struct AspeedSgList array[] = {
- { cpu_to_le32(sizeof(test_vector_accum_256) | SG_LIST_LEN_LAST),
- cpu_to_le32(buffer_addr) },
- };
-
- /* Check engine is idle, no busy or irq bits set */
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
-
- /* Write test vector into memory */
- qtest_memwrite(s, buffer_addr, test_vector_accum_256,
- sizeof(test_vector_accum_256));
- qtest_memwrite(s, src_addr, array, sizeof(array));
-
- write_regs(s, base, src_addr, sizeof(test_vector_accum_256),
- digest_addr, HACE_ALGO_SHA256 | HACE_SG_EN | HACE_ACCUM_EN);
-
- /* Check hash IRQ status is asserted */
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
-
- /* Clear IRQ status and check status is deasserted */
- qtest_writel(s, base + HACE_STS, 0x00000200);
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
-
- /* Read computed digest from memory */
- qtest_memread(s, digest_addr, digest, sizeof(digest));
-
- /* Check result of computation */
- g_assert_cmpmem(digest, sizeof(digest),
- test_result_accum_sha256, sizeof(digest));
-
- qtest_quit(s);
+ aspeed_test_sha512_sg("-machine ast1030-evb", 0x7e6d0000, 0x00000000);
}
-static void test_sha512_accum(const char *machine, const uint32_t base,
- const uint32_t src_addr)
+static void test_sha256_accum_ast1030(void)
{
- QTestState *s = qtest_init(machine);
-
- const uint32_t buffer_addr = src_addr + 0x1000000;
- const uint32_t digest_addr = src_addr + 0x4000000;
- uint8_t digest[64] = {0};
- struct AspeedSgList array[] = {
- { cpu_to_le32(sizeof(test_vector_accum_512) | SG_LIST_LEN_LAST),
- cpu_to_le32(buffer_addr) },
- };
-
- /* Check engine is idle, no busy or irq bits set */
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
-
- /* Write test vector into memory */
- qtest_memwrite(s, buffer_addr, test_vector_accum_512,
- sizeof(test_vector_accum_512));
- qtest_memwrite(s, src_addr, array, sizeof(array));
-
- write_regs(s, base, src_addr, sizeof(test_vector_accum_512),
- digest_addr, HACE_ALGO_SHA512 | HACE_SG_EN | HACE_ACCUM_EN);
-
- /* Check hash IRQ status is asserted */
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200);
-
- /* Clear IRQ status and check status is deasserted */
- qtest_writel(s, base + HACE_STS, 0x00000200);
- g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0);
-
- /* Read computed digest from memory */
- qtest_memread(s, digest_addr, digest, sizeof(digest));
-
- /* Check result of computation */
- g_assert_cmpmem(digest, sizeof(digest),
- test_result_accum_sha512, sizeof(digest));
-
- qtest_quit(s);
+ aspeed_test_sha256_accum("-machine ast1030-evb", 0x7e6d0000, 0x00000000);
}
-struct masks {
- uint32_t src;
- uint32_t dest;
- uint32_t len;
-};
-
-static const struct masks ast2600_masks = {
- .src = 0x7fffffff,
- .dest = 0x7ffffff8,
- .len = 0x0fffffff,
-};
-
-static const struct masks ast2500_masks = {
- .src = 0x3fffffff,
- .dest = 0x3ffffff8,
- .len = 0x0fffffff,
-};
-
-static const struct masks ast2400_masks = {
- .src = 0x0fffffff,
- .dest = 0x0ffffff8,
- .len = 0x0fffffff,
-};
-
-static void test_addresses(const char *machine, const uint32_t base,
- const struct masks *expected)
+static void test_sha384_accum_ast1030(void)
{
- QTestState *s = qtest_init(machine);
-
- /*
- * Check command mode is zero, meaning engine is in direct access mode,
- * as this affects the masking behavior of the HASH_SRC register.
- */
- g_assert_cmphex(qtest_readl(s, base + HACE_CMD), ==, 0);
- g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC), ==, 0);
- g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST), ==, 0);
- g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DATA_LEN), ==, 0);
-
-
- /* Check that the address masking is correct */
- qtest_writel(s, base + HACE_HASH_SRC, 0xffffffff);
- g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC), ==, expected->src);
-
- qtest_writel(s, base + HACE_HASH_DIGEST, 0xffffffff);
- g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST), ==, expected->dest);
-
- qtest_writel(s, base + HACE_HASH_DATA_LEN, 0xffffffff);
- g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DATA_LEN), ==, expected->len);
-
- /* Reset to zero */
- qtest_writel(s, base + HACE_HASH_SRC, 0);
- qtest_writel(s, base + HACE_HASH_DIGEST, 0);
- qtest_writel(s, base + HACE_HASH_DATA_LEN, 0);
+ aspeed_test_sha384_accum("-machine ast1030-evb", 0x7e6d0000, 0x00000000);
+}
- /* Check that all bits are now zero */
- g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC), ==, 0);
- g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST), ==, 0);
- g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DATA_LEN), ==, 0);
+static void test_sha512_accum_ast1030(void)
+{
+ aspeed_test_sha512_accum("-machine ast1030-evb", 0x7e6d0000, 0x00000000);
+}
- qtest_quit(s);
+static void test_addresses_ast1030(void)
+{
+ aspeed_test_addresses("-machine ast1030-evb", 0x7e6d0000, &ast1030_masks);
}
/* ast2600 */
static void test_md5_ast2600(void)
{
- test_md5("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
+ aspeed_test_md5("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
}
static void test_sha256_ast2600(void)
{
- test_sha256("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
+ aspeed_test_sha256("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
}
static void test_sha256_sg_ast2600(void)
{
- test_sha256_sg("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
+ aspeed_test_sha256_sg("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
+}
+
+static void test_sha384_ast2600(void)
+{
+ aspeed_test_sha384("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
+}
+
+static void test_sha384_sg_ast2600(void)
+{
+ aspeed_test_sha384_sg("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
}
static void test_sha512_ast2600(void)
{
- test_sha512("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
+ aspeed_test_sha512("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
}
static void test_sha512_sg_ast2600(void)
{
- test_sha512_sg("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
+ aspeed_test_sha512_sg("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
}
static void test_sha256_accum_ast2600(void)
{
- test_sha256_accum("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
+ aspeed_test_sha256_accum("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
+}
+
+static void test_sha384_accum_ast2600(void)
+{
+ aspeed_test_sha384_accum("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
}
static void test_sha512_accum_ast2600(void)
{
- test_sha512_accum("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
+ aspeed_test_sha512_accum("-machine ast2600-evb", 0x1e6d0000, 0x80000000);
}
static void test_addresses_ast2600(void)
{
- test_addresses("-machine ast2600-evb", 0x1e6d0000, &ast2600_masks);
+ aspeed_test_addresses("-machine ast2600-evb", 0x1e6d0000, &ast2600_masks);
}
/* ast2500 */
static void test_md5_ast2500(void)
{
- test_md5("-machine ast2500-evb", 0x1e6e3000, 0x80000000);
+ aspeed_test_md5("-machine ast2500-evb", 0x1e6e3000, 0x80000000);
}
static void test_sha256_ast2500(void)
{
- test_sha256("-machine ast2500-evb", 0x1e6e3000, 0x80000000);
+ aspeed_test_sha256("-machine ast2500-evb", 0x1e6e3000, 0x80000000);
}
static void test_sha512_ast2500(void)
{
- test_sha512("-machine ast2500-evb", 0x1e6e3000, 0x80000000);
+ aspeed_test_sha512("-machine ast2500-evb", 0x1e6e3000, 0x80000000);
}
static void test_addresses_ast2500(void)
{
- test_addresses("-machine ast2500-evb", 0x1e6e3000, &ast2500_masks);
+ aspeed_test_addresses("-machine ast2500-evb", 0x1e6e3000, &ast2500_masks);
}
/* ast2400 */
static void test_md5_ast2400(void)
{
- test_md5("-machine palmetto-bmc", 0x1e6e3000, 0x40000000);
+ aspeed_test_md5("-machine palmetto-bmc", 0x1e6e3000, 0x40000000);
}
static void test_sha256_ast2400(void)
{
- test_sha256("-machine palmetto-bmc", 0x1e6e3000, 0x40000000);
+ aspeed_test_sha256("-machine palmetto-bmc", 0x1e6e3000, 0x40000000);
}
static void test_sha512_ast2400(void)
{
- test_sha512("-machine palmetto-bmc", 0x1e6e3000, 0x40000000);
+ aspeed_test_sha512("-machine palmetto-bmc", 0x1e6e3000, 0x40000000);
}
static void test_addresses_ast2400(void)
{
- test_addresses("-machine palmetto-bmc", 0x1e6e3000, &ast2400_masks);
+ aspeed_test_addresses("-machine palmetto-bmc", 0x1e6e3000, &ast2400_masks);
}
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
+ qtest_add_func("ast1030/hace/addresses", test_addresses_ast1030);
+ qtest_add_func("ast1030/hace/sha512", test_sha512_ast1030);
+ qtest_add_func("ast1030/hace/sha384", test_sha384_ast1030);
+ qtest_add_func("ast1030/hace/sha256", test_sha256_ast1030);
+ qtest_add_func("ast1030/hace/md5", test_md5_ast1030);
+
+ qtest_add_func("ast1030/hace/sha512_sg", test_sha512_sg_ast1030);
+ qtest_add_func("ast1030/hace/sha384_sg", test_sha384_sg_ast1030);
+ qtest_add_func("ast1030/hace/sha256_sg", test_sha256_sg_ast1030);
+
+ qtest_add_func("ast1030/hace/sha512_accum", test_sha512_accum_ast1030);
+ qtest_add_func("ast1030/hace/sha384_accum", test_sha384_accum_ast1030);
+ qtest_add_func("ast1030/hace/sha256_accum", test_sha256_accum_ast1030);
+
qtest_add_func("ast2600/hace/addresses", test_addresses_ast2600);
qtest_add_func("ast2600/hace/sha512", test_sha512_ast2600);
+ qtest_add_func("ast2600/hace/sha384", test_sha384_ast2600);
qtest_add_func("ast2600/hace/sha256", test_sha256_ast2600);
qtest_add_func("ast2600/hace/md5", test_md5_ast2600);
qtest_add_func("ast2600/hace/sha512_sg", test_sha512_sg_ast2600);
+ qtest_add_func("ast2600/hace/sha384_sg", test_sha384_sg_ast2600);
qtest_add_func("ast2600/hace/sha256_sg", test_sha256_sg_ast2600);
qtest_add_func("ast2600/hace/sha512_accum", test_sha512_accum_ast2600);
+ qtest_add_func("ast2600/hace/sha384_accum", test_sha384_accum_ast2600);
qtest_add_func("ast2600/hace/sha256_accum", test_sha256_accum_ast2600);
qtest_add_func("ast2500/hace/addresses", test_addresses_ast2500);
diff --git a/tests/qtest/aspeed_smc-test.c b/tests/qtest/aspeed_smc-test.c
index c713a37..52a00e6 100644
--- a/tests/qtest/aspeed_smc-test.c
+++ b/tests/qtest/aspeed_smc-test.c
@@ -27,623 +27,211 @@
#include "qemu/bswap.h"
#include "libqtest-single.h"
#include "qemu/bitops.h"
+#include "aspeed-smc-utils.h"
-/*
- * ASPEED SPI Controller registers
- */
-#define R_CONF 0x00
-#define CONF_ENABLE_W0 (1 << 16)
-#define R_CE_CTRL 0x04
-#define CRTL_EXTENDED0 0 /* 32 bit addressing for SPI */
-#define R_CTRL0 0x10
-#define CTRL_CE_STOP_ACTIVE (1 << 2)
-#define CTRL_READMODE 0x0
-#define CTRL_FREADMODE 0x1
-#define CTRL_WRITEMODE 0x2
-#define CTRL_USERMODE 0x3
-#define SR_WEL BIT(1)
-
-#define ASPEED_FMC_BASE 0x1E620000
-#define ASPEED_FLASH_BASE 0x20000000
-
-/*
- * Flash commands
- */
-enum {
- JEDEC_READ = 0x9f,
- RDSR = 0x5,
- WRDI = 0x4,
- BULK_ERASE = 0xc7,
- READ = 0x03,
- PP = 0x02,
- WRSR = 0x1,
- WREN = 0x6,
- SRWD = 0x80,
- RESET_ENABLE = 0x66,
- RESET_MEMORY = 0x99,
- EN_4BYTE_ADDR = 0xB7,
- ERASE_SECTOR = 0xd8,
-};
-
-#define FLASH_JEDEC 0x20ba19 /* n25q256a */
-#define FLASH_SIZE (32 * 1024 * 1024)
-
-#define FLASH_PAGE_SIZE 256
-
-/*
- * Use an explicit bswap for the values read/wrote to the flash region
- * as they are BE and the Aspeed CPU is LE.
- */
-static inline uint32_t make_be32(uint32_t data)
+static void test_palmetto_bmc(AspeedSMCTestData *data)
{
- return bswap32(data);
-}
-
-static void spi_conf(uint32_t value)
-{
- uint32_t conf = readl(ASPEED_FMC_BASE + R_CONF);
-
- conf |= value;
- writel(ASPEED_FMC_BASE + R_CONF, conf);
-}
-
-static void spi_conf_remove(uint32_t value)
-{
- uint32_t conf = readl(ASPEED_FMC_BASE + R_CONF);
-
- conf &= ~value;
- writel(ASPEED_FMC_BASE + R_CONF, conf);
-}
-
-static void spi_ce_ctrl(uint32_t value)
-{
- uint32_t conf = readl(ASPEED_FMC_BASE + R_CE_CTRL);
-
- conf |= value;
- writel(ASPEED_FMC_BASE + R_CE_CTRL, conf);
-}
-
-static void spi_ctrl_setmode(uint8_t mode, uint8_t cmd)
-{
- uint32_t ctrl = readl(ASPEED_FMC_BASE + R_CTRL0);
- ctrl &= ~(CTRL_USERMODE | 0xff << 16);
- ctrl |= mode | (cmd << 16);
- writel(ASPEED_FMC_BASE + R_CTRL0, ctrl);
-}
-
-static void spi_ctrl_start_user(void)
-{
- uint32_t ctrl = readl(ASPEED_FMC_BASE + R_CTRL0);
-
- ctrl |= CTRL_USERMODE | CTRL_CE_STOP_ACTIVE;
- writel(ASPEED_FMC_BASE + R_CTRL0, ctrl);
-
- ctrl &= ~CTRL_CE_STOP_ACTIVE;
- writel(ASPEED_FMC_BASE + R_CTRL0, ctrl);
-}
-
-static void spi_ctrl_stop_user(void)
-{
- uint32_t ctrl = readl(ASPEED_FMC_BASE + R_CTRL0);
-
- ctrl |= CTRL_USERMODE | CTRL_CE_STOP_ACTIVE;
- writel(ASPEED_FMC_BASE + R_CTRL0, ctrl);
-}
-
-static void flash_reset(void)
-{
- spi_conf(CONF_ENABLE_W0);
-
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, RESET_ENABLE);
- writeb(ASPEED_FLASH_BASE, RESET_MEMORY);
- writeb(ASPEED_FLASH_BASE, WREN);
- writeb(ASPEED_FLASH_BASE, BULK_ERASE);
- writeb(ASPEED_FLASH_BASE, WRDI);
- spi_ctrl_stop_user();
-
- spi_conf_remove(CONF_ENABLE_W0);
-}
-
-static void test_read_jedec(void)
-{
- uint32_t jedec = 0x0;
-
- spi_conf(CONF_ENABLE_W0);
-
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, JEDEC_READ);
- jedec |= readb(ASPEED_FLASH_BASE) << 16;
- jedec |= readb(ASPEED_FLASH_BASE) << 8;
- jedec |= readb(ASPEED_FLASH_BASE);
- spi_ctrl_stop_user();
-
- flash_reset();
-
- g_assert_cmphex(jedec, ==, FLASH_JEDEC);
-}
-
-static void read_page(uint32_t addr, uint32_t *page)
-{
- int i;
-
- spi_ctrl_start_user();
-
- writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
- writeb(ASPEED_FLASH_BASE, READ);
- writel(ASPEED_FLASH_BASE, make_be32(addr));
-
- /* Continuous read are supported */
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- page[i] = make_be32(readl(ASPEED_FLASH_BASE));
- }
- spi_ctrl_stop_user();
-}
-
-static void read_page_mem(uint32_t addr, uint32_t *page)
-{
- int i;
-
- /* move out USER mode to use direct reads from the AHB bus */
- spi_ctrl_setmode(CTRL_READMODE, READ);
-
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- page[i] = make_be32(readl(ASPEED_FLASH_BASE + addr + i * 4));
- }
-}
-
-static void write_page_mem(uint32_t addr, uint32_t write_value)
-{
- spi_ctrl_setmode(CTRL_WRITEMODE, PP);
-
- for (int i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- writel(ASPEED_FLASH_BASE + addr + i * 4, write_value);
- }
-}
-
-static void assert_page_mem(uint32_t addr, uint32_t expected_value)
-{
- uint32_t page[FLASH_PAGE_SIZE / 4];
- read_page_mem(addr, page);
- for (int i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- g_assert_cmphex(page[i], ==, expected_value);
- }
-}
-
-static void test_erase_sector(void)
-{
- uint32_t some_page_addr = 0x600 * FLASH_PAGE_SIZE;
- uint32_t page[FLASH_PAGE_SIZE / 4];
- int i;
-
- spi_conf(CONF_ENABLE_W0);
-
- /*
- * Previous page should be full of 0xffs after backend is
- * initialized
- */
- read_page(some_page_addr - FLASH_PAGE_SIZE, page);
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- g_assert_cmphex(page[i], ==, 0xffffffff);
- }
-
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
- writeb(ASPEED_FLASH_BASE, WREN);
- writeb(ASPEED_FLASH_BASE, PP);
- writel(ASPEED_FLASH_BASE, make_be32(some_page_addr));
-
- /* Fill the page with its own addresses */
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- writel(ASPEED_FLASH_BASE, make_be32(some_page_addr + i * 4));
- }
- spi_ctrl_stop_user();
-
- /* Check the page is correctly written */
- read_page(some_page_addr, page);
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- g_assert_cmphex(page[i], ==, some_page_addr + i * 4);
- }
-
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, WREN);
- writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
- writeb(ASPEED_FLASH_BASE, ERASE_SECTOR);
- writel(ASPEED_FLASH_BASE, make_be32(some_page_addr));
- spi_ctrl_stop_user();
-
- /* Check the page is erased */
- read_page(some_page_addr, page);
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- g_assert_cmphex(page[i], ==, 0xffffffff);
- }
-
- flash_reset();
-}
-
-static void test_erase_all(void)
-{
- uint32_t some_page_addr = 0x15000 * FLASH_PAGE_SIZE;
- uint32_t page[FLASH_PAGE_SIZE / 4];
- int i;
-
- spi_conf(CONF_ENABLE_W0);
-
- /*
- * Previous page should be full of 0xffs after backend is
- * initialized
- */
- read_page(some_page_addr - FLASH_PAGE_SIZE, page);
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- g_assert_cmphex(page[i], ==, 0xffffffff);
- }
-
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
- writeb(ASPEED_FLASH_BASE, WREN);
- writeb(ASPEED_FLASH_BASE, PP);
- writel(ASPEED_FLASH_BASE, make_be32(some_page_addr));
-
- /* Fill the page with its own addresses */
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- writel(ASPEED_FLASH_BASE, make_be32(some_page_addr + i * 4));
- }
- spi_ctrl_stop_user();
-
- /* Check the page is correctly written */
- read_page(some_page_addr, page);
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- g_assert_cmphex(page[i], ==, some_page_addr + i * 4);
- }
-
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, WREN);
- writeb(ASPEED_FLASH_BASE, BULK_ERASE);
- spi_ctrl_stop_user();
-
- /* Check the page is erased */
- read_page(some_page_addr, page);
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- g_assert_cmphex(page[i], ==, 0xffffffff);
- }
-
- flash_reset();
-}
-
-static void test_write_page(void)
-{
- uint32_t my_page_addr = 0x14000 * FLASH_PAGE_SIZE; /* beyond 16MB */
- uint32_t some_page_addr = 0x15000 * FLASH_PAGE_SIZE;
- uint32_t page[FLASH_PAGE_SIZE / 4];
- int i;
-
- spi_conf(CONF_ENABLE_W0);
-
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
- writeb(ASPEED_FLASH_BASE, WREN);
- writeb(ASPEED_FLASH_BASE, PP);
- writel(ASPEED_FLASH_BASE, make_be32(my_page_addr));
-
- /* Fill the page with its own addresses */
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- writel(ASPEED_FLASH_BASE, make_be32(my_page_addr + i * 4));
- }
- spi_ctrl_stop_user();
-
- /* Check what was written */
- read_page(my_page_addr, page);
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- g_assert_cmphex(page[i], ==, my_page_addr + i * 4);
- }
-
- /* Check some other page. It should be full of 0xff */
- read_page(some_page_addr, page);
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- g_assert_cmphex(page[i], ==, 0xffffffff);
- }
-
- flash_reset();
-}
-
-static void test_read_page_mem(void)
-{
- uint32_t my_page_addr = 0x14000 * FLASH_PAGE_SIZE; /* beyond 16MB */
- uint32_t some_page_addr = 0x15000 * FLASH_PAGE_SIZE;
- uint32_t page[FLASH_PAGE_SIZE / 4];
- int i;
-
- /* Enable 4BYTE mode for controller. This is should be strapped by
- * HW for CE0 anyhow.
- */
- spi_ce_ctrl(1 << CRTL_EXTENDED0);
-
- /* Enable 4BYTE mode for flash. */
- spi_conf(CONF_ENABLE_W0);
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
- writeb(ASPEED_FLASH_BASE, WREN);
- writeb(ASPEED_FLASH_BASE, PP);
- writel(ASPEED_FLASH_BASE, make_be32(my_page_addr));
-
- /* Fill the page with its own addresses */
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- writel(ASPEED_FLASH_BASE, make_be32(my_page_addr + i * 4));
- }
- spi_ctrl_stop_user();
- spi_conf_remove(CONF_ENABLE_W0);
-
- /* Check what was written */
- read_page_mem(my_page_addr, page);
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- g_assert_cmphex(page[i], ==, my_page_addr + i * 4);
- }
-
- /* Check some other page. It should be full of 0xff */
- read_page_mem(some_page_addr, page);
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- g_assert_cmphex(page[i], ==, 0xffffffff);
- }
-
- flash_reset();
-}
-
-static void test_write_page_mem(void)
-{
- uint32_t my_page_addr = 0x15000 * FLASH_PAGE_SIZE;
- uint32_t page[FLASH_PAGE_SIZE / 4];
- int i;
-
- /* Enable 4BYTE mode for controller. This is should be strapped by
- * HW for CE0 anyhow.
- */
- spi_ce_ctrl(1 << CRTL_EXTENDED0);
-
- /* Enable 4BYTE mode for flash. */
- spi_conf(CONF_ENABLE_W0);
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
- writeb(ASPEED_FLASH_BASE, WREN);
- spi_ctrl_stop_user();
-
- /* move out USER mode to use direct writes to the AHB bus */
- spi_ctrl_setmode(CTRL_WRITEMODE, PP);
-
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- writel(ASPEED_FLASH_BASE + my_page_addr + i * 4,
- make_be32(my_page_addr + i * 4));
- }
-
- /* Check what was written */
- read_page_mem(my_page_addr, page);
- for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) {
- g_assert_cmphex(page[i], ==, my_page_addr + i * 4);
- }
-
- flash_reset();
-}
-
-static void test_read_status_reg(void)
-{
- uint8_t r;
-
- spi_conf(CONF_ENABLE_W0);
-
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, RDSR);
- r = readb(ASPEED_FLASH_BASE);
- spi_ctrl_stop_user();
-
- g_assert_cmphex(r & SR_WEL, ==, 0);
- g_assert(!qtest_qom_get_bool
- (global_qtest, "/machine/soc/fmc/ssi.0/child[0]", "write-enable"));
-
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, WREN);
- writeb(ASPEED_FLASH_BASE, RDSR);
- r = readb(ASPEED_FLASH_BASE);
- spi_ctrl_stop_user();
-
- g_assert_cmphex(r & SR_WEL, ==, SR_WEL);
- g_assert(qtest_qom_get_bool
- (global_qtest, "/machine/soc/fmc/ssi.0/child[0]", "write-enable"));
-
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, WRDI);
- writeb(ASPEED_FLASH_BASE, RDSR);
- r = readb(ASPEED_FLASH_BASE);
- spi_ctrl_stop_user();
-
- g_assert_cmphex(r & SR_WEL, ==, 0);
- g_assert(!qtest_qom_get_bool
- (global_qtest, "/machine/soc/fmc/ssi.0/child[0]", "write-enable"));
+ int ret;
+ int fd;
- flash_reset();
-}
+ fd = g_file_open_tmp("qtest.m25p80.n25q256a.XXXXXX", &data->tmp_path, NULL);
+ g_assert(fd >= 0);
+ ret = ftruncate(fd, 32 * 1024 * 1024);
+ g_assert(ret == 0);
+ close(fd);
-static void test_status_reg_write_protection(void)
+ data->s = qtest_initf("-m 256 -machine palmetto-bmc "
+ "-drive file=%s,format=raw,if=mtd",
+ data->tmp_path);
+
+ /* fmc cs0 with n25q256a flash */
+ data->flash_base = 0x20000000;
+ data->spi_base = 0x1E620000;
+ data->jedec_id = 0x20ba19;
+ data->cs = 0;
+ data->node = "/machine/soc/fmc/ssi.0/child[0]";
+ /* beyond 16MB */
+ data->page_addr = 0x14000 * FLASH_PAGE_SIZE;
+
+ qtest_add_data_func("/ast2400/smc/read_jedec",
+ data, aspeed_smc_test_read_jedec);
+ qtest_add_data_func("/ast2400/smc/erase_sector",
+ data, aspeed_smc_test_erase_sector);
+ qtest_add_data_func("/ast2400/smc/erase_all",
+ data, aspeed_smc_test_erase_all);
+ qtest_add_data_func("/ast2400/smc/write_page",
+ data, aspeed_smc_test_write_page);
+ qtest_add_data_func("/ast2400/smc/read_page_mem",
+ data, aspeed_smc_test_read_page_mem);
+ qtest_add_data_func("/ast2400/smc/write_page_mem",
+ data, aspeed_smc_test_write_page_mem);
+ qtest_add_data_func("/ast2400/smc/read_status_reg",
+ data, aspeed_smc_test_read_status_reg);
+ qtest_add_data_func("/ast2400/smc/status_reg_write_protection",
+ data, aspeed_smc_test_status_reg_write_protection);
+ qtest_add_data_func("/ast2400/smc/write_block_protect",
+ data, aspeed_smc_test_write_block_protect);
+ qtest_add_data_func("/ast2400/smc/write_block_protect_bottom_bit",
+ data, aspeed_smc_test_write_block_protect_bottom_bit);
+}
+
+static void test_ast2500_evb(AspeedSMCTestData *data)
{
- uint8_t r;
-
- spi_conf(CONF_ENABLE_W0);
-
- /* default case: WP# is high and SRWD is low -> status register writable */
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, WREN);
- /* test ability to write SRWD */
- writeb(ASPEED_FLASH_BASE, WRSR);
- writeb(ASPEED_FLASH_BASE, SRWD);
- writeb(ASPEED_FLASH_BASE, RDSR);
- r = readb(ASPEED_FLASH_BASE);
- spi_ctrl_stop_user();
- g_assert_cmphex(r & SRWD, ==, SRWD);
-
- /* WP# high and SRWD high -> status register writable */
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, WREN);
- /* test ability to write SRWD */
- writeb(ASPEED_FLASH_BASE, WRSR);
- writeb(ASPEED_FLASH_BASE, 0);
- writeb(ASPEED_FLASH_BASE, RDSR);
- r = readb(ASPEED_FLASH_BASE);
- spi_ctrl_stop_user();
- g_assert_cmphex(r & SRWD, ==, 0);
-
- /* WP# low and SRWD low -> status register writable */
- qtest_set_irq_in(global_qtest,
- "/machine/soc/fmc/ssi.0/child[0]", "WP#", 0, 0);
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, WREN);
- /* test ability to write SRWD */
- writeb(ASPEED_FLASH_BASE, WRSR);
- writeb(ASPEED_FLASH_BASE, SRWD);
- writeb(ASPEED_FLASH_BASE, RDSR);
- r = readb(ASPEED_FLASH_BASE);
- spi_ctrl_stop_user();
- g_assert_cmphex(r & SRWD, ==, SRWD);
-
- /* WP# low and SRWD high -> status register NOT writable */
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, WREN);
- /* test ability to write SRWD */
- writeb(ASPEED_FLASH_BASE, WRSR);
- writeb(ASPEED_FLASH_BASE, 0);
- writeb(ASPEED_FLASH_BASE, RDSR);
- r = readb(ASPEED_FLASH_BASE);
- spi_ctrl_stop_user();
- /* write is not successful */
- g_assert_cmphex(r & SRWD, ==, SRWD);
+ int ret;
+ int fd;
- qtest_set_irq_in(global_qtest,
- "/machine/soc/fmc/ssi.0/child[0]", "WP#", 0, 1);
- flash_reset();
-}
+ fd = g_file_open_tmp("qtest.m25p80.mx25l25635e.XXXXXX",
+ &data->tmp_path, NULL);
+ g_assert(fd >= 0);
+ ret = ftruncate(fd, 32 * 1024 * 1024);
+ g_assert(ret == 0);
+ close(fd);
-static void test_write_block_protect(void)
+ data->s = qtest_initf("-machine ast2500-evb "
+ "-drive file=%s,format=raw,if=mtd",
+ data->tmp_path);
+
+ /* fmc cs0 with mx25l25635e flash */
+ data->flash_base = 0x20000000;
+ data->spi_base = 0x1E620000;
+ data->jedec_id = 0xc22019;
+ data->cs = 0;
+ data->node = "/machine/soc/fmc/ssi.0/child[0]";
+ /* beyond 16MB */
+ data->page_addr = 0x14000 * FLASH_PAGE_SIZE;
+
+ qtest_add_data_func("/ast2500/smc/read_jedec",
+ data, aspeed_smc_test_read_jedec);
+ qtest_add_data_func("/ast2500/smc/erase_sector",
+ data, aspeed_smc_test_erase_sector);
+ qtest_add_data_func("/ast2500/smc/erase_all",
+ data, aspeed_smc_test_erase_all);
+ qtest_add_data_func("/ast2500/smc/write_page",
+ data, aspeed_smc_test_write_page);
+ qtest_add_data_func("/ast2500/smc/read_page_mem",
+ data, aspeed_smc_test_read_page_mem);
+ qtest_add_data_func("/ast2500/smc/write_page_mem",
+ data, aspeed_smc_test_write_page_mem);
+ qtest_add_data_func("/ast2500/smc/read_status_reg",
+ data, aspeed_smc_test_read_status_reg);
+ qtest_add_data_func("/ast2500/smc/write_page_qpi",
+ data, aspeed_smc_test_write_page_qpi);
+}
+
+static void test_ast2600_evb(AspeedSMCTestData *data)
{
- uint32_t sector_size = 65536;
- uint32_t n_sectors = 512;
-
- spi_ce_ctrl(1 << CRTL_EXTENDED0);
- spi_conf(CONF_ENABLE_W0);
-
- uint32_t bp_bits = 0b0;
-
- for (int i = 0; i < 16; i++) {
- bp_bits = ((i & 0b1000) << 3) | ((i & 0b0111) << 2);
-
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, WREN);
- writeb(ASPEED_FLASH_BASE, BULK_ERASE);
- writeb(ASPEED_FLASH_BASE, WREN);
- writeb(ASPEED_FLASH_BASE, WRSR);
- writeb(ASPEED_FLASH_BASE, bp_bits);
- writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
- writeb(ASPEED_FLASH_BASE, WREN);
- spi_ctrl_stop_user();
-
- uint32_t num_protected_sectors = i ? MIN(1 << (i - 1), n_sectors) : 0;
- uint32_t protection_start = n_sectors - num_protected_sectors;
- uint32_t protection_end = n_sectors;
-
- for (int sector = 0; sector < n_sectors; sector++) {
- uint32_t addr = sector * sector_size;
-
- assert_page_mem(addr, 0xffffffff);
- write_page_mem(addr, make_be32(0xabcdef12));
-
- uint32_t expected_value = protection_start <= sector
- && sector < protection_end
- ? 0xffffffff : 0xabcdef12;
-
- assert_page_mem(addr, expected_value);
- }
- }
+ int ret;
+ int fd;
- flash_reset();
-}
+ fd = g_file_open_tmp("qtest.m25p80.mx66u51235f.XXXXXX",
+ &data->tmp_path, NULL);
+ g_assert(fd >= 0);
+ ret = ftruncate(fd, 64 * 1024 * 1024);
+ g_assert(ret == 0);
+ close(fd);
-static void test_write_block_protect_bottom_bit(void)
+ data->s = qtest_initf("-machine ast2600-evb "
+ "-drive file=%s,format=raw,if=mtd",
+ data->tmp_path);
+
+ /* fmc cs0 with mx66u51235f flash */
+ data->flash_base = 0x20000000;
+ data->spi_base = 0x1E620000;
+ data->jedec_id = 0xc2253a;
+ data->cs = 0;
+ data->node = "/machine/soc/fmc/ssi.0/child[0]";
+ /* beyond 16MB */
+ data->page_addr = 0x14000 * FLASH_PAGE_SIZE;
+
+ qtest_add_data_func("/ast2600/smc/read_jedec",
+ data, aspeed_smc_test_read_jedec);
+ qtest_add_data_func("/ast2600/smc/erase_sector",
+ data, aspeed_smc_test_erase_sector);
+ qtest_add_data_func("/ast2600/smc/erase_all",
+ data, aspeed_smc_test_erase_all);
+ qtest_add_data_func("/ast2600/smc/write_page",
+ data, aspeed_smc_test_write_page);
+ qtest_add_data_func("/ast2600/smc/read_page_mem",
+ data, aspeed_smc_test_read_page_mem);
+ qtest_add_data_func("/ast2600/smc/write_page_mem",
+ data, aspeed_smc_test_write_page_mem);
+ qtest_add_data_func("/ast2600/smc/read_status_reg",
+ data, aspeed_smc_test_read_status_reg);
+ qtest_add_data_func("/ast2600/smc/write_page_qpi",
+ data, aspeed_smc_test_write_page_qpi);
+}
+
+static void test_ast1030_evb(AspeedSMCTestData *data)
{
- uint32_t sector_size = 65536;
- uint32_t n_sectors = 512;
-
- spi_ce_ctrl(1 << CRTL_EXTENDED0);
- spi_conf(CONF_ENABLE_W0);
-
- /* top bottom bit is enabled */
- uint32_t bp_bits = 0b00100 << 3;
-
- for (int i = 0; i < 16; i++) {
- bp_bits = (((i & 0b1000) | 0b0100) << 3) | ((i & 0b0111) << 2);
-
- spi_ctrl_start_user();
- writeb(ASPEED_FLASH_BASE, WREN);
- writeb(ASPEED_FLASH_BASE, BULK_ERASE);
- writeb(ASPEED_FLASH_BASE, WREN);
- writeb(ASPEED_FLASH_BASE, WRSR);
- writeb(ASPEED_FLASH_BASE, bp_bits);
- writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
- writeb(ASPEED_FLASH_BASE, WREN);
- spi_ctrl_stop_user();
-
- uint32_t num_protected_sectors = i ? MIN(1 << (i - 1), n_sectors) : 0;
- uint32_t protection_start = 0;
- uint32_t protection_end = num_protected_sectors;
-
- for (int sector = 0; sector < n_sectors; sector++) {
- uint32_t addr = sector * sector_size;
-
- assert_page_mem(addr, 0xffffffff);
- write_page_mem(addr, make_be32(0xabcdef12));
-
- uint32_t expected_value = protection_start <= sector
- && sector < protection_end
- ? 0xffffffff : 0xabcdef12;
+ int ret;
+ int fd;
- assert_page_mem(addr, expected_value);
- }
- }
+ fd = g_file_open_tmp("qtest.m25p80.w25q80bl.XXXXXX",
+ &data->tmp_path, NULL);
+ g_assert(fd >= 0);
+ ret = ftruncate(fd, 1 * 1024 * 1024);
+ g_assert(ret == 0);
+ close(fd);
- flash_reset();
+ data->s = qtest_initf("-machine ast1030-evb "
+ "-drive file=%s,format=raw,if=mtd",
+ data->tmp_path);
+
+ /* fmc cs0 with w25q80bl flash */
+ data->flash_base = 0x80000000;
+ data->spi_base = 0x7E620000;
+ data->jedec_id = 0xef4014;
+ data->cs = 0;
+ data->node = "/machine/soc/fmc/ssi.0/child[0]";
+ /* beyond 512KB */
+ data->page_addr = 0x800 * FLASH_PAGE_SIZE;
+
+ qtest_add_data_func("/ast1030/smc/read_jedec",
+ data, aspeed_smc_test_read_jedec);
+ qtest_add_data_func("/ast1030/smc/erase_sector",
+ data, aspeed_smc_test_erase_sector);
+ qtest_add_data_func("/ast1030/smc/erase_all",
+ data, aspeed_smc_test_erase_all);
+ qtest_add_data_func("/ast1030/smc/write_page",
+ data, aspeed_smc_test_write_page);
+ qtest_add_data_func("/ast1030/smc/read_page_mem",
+ data, aspeed_smc_test_read_page_mem);
+ qtest_add_data_func("/ast1030/smc/write_page_mem",
+ data, aspeed_smc_test_write_page_mem);
+ qtest_add_data_func("/ast1030/smc/read_status_reg",
+ data, aspeed_smc_test_read_status_reg);
+ qtest_add_data_func("/ast1030/smc/write_page_qpi",
+ data, aspeed_smc_test_write_page_qpi);
}
int main(int argc, char **argv)
{
- g_autofree char *tmp_path = NULL;
+ AspeedSMCTestData palmetto_data;
+ AspeedSMCTestData ast2500_evb_data;
+ AspeedSMCTestData ast2600_evb_data;
+ AspeedSMCTestData ast1030_evb_data;
int ret;
- int fd;
g_test_init(&argc, &argv, NULL);
- fd = g_file_open_tmp("qtest.m25p80.XXXXXX", &tmp_path, NULL);
- g_assert(fd >= 0);
- ret = ftruncate(fd, FLASH_SIZE);
- g_assert(ret == 0);
- close(fd);
-
- global_qtest = qtest_initf("-m 256 -machine palmetto-bmc "
- "-drive file=%s,format=raw,if=mtd",
- tmp_path);
-
- qtest_add_func("/ast2400/smc/read_jedec", test_read_jedec);
- qtest_add_func("/ast2400/smc/erase_sector", test_erase_sector);
- qtest_add_func("/ast2400/smc/erase_all", test_erase_all);
- qtest_add_func("/ast2400/smc/write_page", test_write_page);
- qtest_add_func("/ast2400/smc/read_page_mem", test_read_page_mem);
- qtest_add_func("/ast2400/smc/write_page_mem", test_write_page_mem);
- qtest_add_func("/ast2400/smc/read_status_reg", test_read_status_reg);
- qtest_add_func("/ast2400/smc/status_reg_write_protection",
- test_status_reg_write_protection);
- qtest_add_func("/ast2400/smc/write_block_protect",
- test_write_block_protect);
- qtest_add_func("/ast2400/smc/write_block_protect_bottom_bit",
- test_write_block_protect_bottom_bit);
-
- flash_reset();
+ test_palmetto_bmc(&palmetto_data);
+ test_ast2500_evb(&ast2500_evb_data);
+ test_ast2600_evb(&ast2600_evb_data);
+ test_ast1030_evb(&ast1030_evb_data);
ret = g_test_run();
- qtest_quit(global_qtest);
- unlink(tmp_path);
+ qtest_quit(palmetto_data.s);
+ qtest_quit(ast2500_evb_data.s);
+ qtest_quit(ast2600_evb_data.s);
+ qtest_quit(ast1030_evb_data.s);
+ unlink(palmetto_data.tmp_path);
+ unlink(ast2500_evb_data.tmp_path);
+ unlink(ast2600_evb_data.tmp_path);
+ unlink(ast1030_evb_data.tmp_path);
+ g_free(palmetto_data.tmp_path);
+ g_free(ast2500_evb_data.tmp_path);
+ g_free(ast2600_evb_data.tmp_path);
+ g_free(ast1030_evb_data.tmp_path);
+
return ret;
}
diff --git a/tests/qtest/ast2700-gpio-test.c b/tests/qtest/ast2700-gpio-test.c
new file mode 100644
index 0000000..eeae9bf
--- /dev/null
+++ b/tests/qtest/ast2700-gpio-test.c
@@ -0,0 +1,95 @@
+/*
+ * QTest testcase for the ASPEED AST2700 GPIO Controller.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (C) 2024 ASPEED Technology Inc.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/bitops.h"
+#include "qemu/timer.h"
+#include "qobject/qdict.h"
+#include "libqtest-single.h"
+
+#define AST2700_GPIO_BASE 0x14C0B000
+#define GPIOA0_CONTROL 0x180
+
+static void test_output_pins(const char *machine, const uint32_t base)
+{
+ QTestState *s = qtest_init(machine);
+ uint32_t offset = 0;
+ uint32_t value = 0;
+ uint32_t pin = 0;
+
+ for (char c = 'A'; c <= 'D'; c++) {
+ for (int i = 0; i < 8; i++) {
+ offset = base + (pin * 4);
+
+ /* output direction and output hi */
+ qtest_writel(s, offset, 0x00000003);
+ value = qtest_readl(s, offset);
+ g_assert_cmphex(value, ==, 0x00000003);
+
+ /* output direction and output low */
+ qtest_writel(s, offset, 0x00000002);
+ value = qtest_readl(s, offset);
+ g_assert_cmphex(value, ==, 0x00000002);
+ pin++;
+ }
+ }
+
+ qtest_quit(s);
+}
+
+static void test_input_pins(const char *machine, const uint32_t base)
+{
+ QTestState *s = qtest_init(machine);
+ char name[16];
+ uint32_t offset = 0;
+ uint32_t value = 0;
+ uint32_t pin = 0;
+
+ for (char c = 'A'; c <= 'D'; c++) {
+ for (int i = 0; i < 8; i++) {
+ sprintf(name, "gpio%c%d", c, i);
+ offset = base + (pin * 4);
+ /* input direction */
+ qtest_writel(s, offset, 0);
+
+ /* set input */
+ qtest_qom_set_bool(s, "/machine/soc/gpio", name, true);
+ value = qtest_readl(s, offset);
+ g_assert_cmphex(value, ==, 0x00002000);
+
+ /* clear input */
+ qtest_qom_set_bool(s, "/machine/soc/gpio", name, false);
+ value = qtest_readl(s, offset);
+ g_assert_cmphex(value, ==, 0);
+ pin++;
+ }
+ }
+
+ qtest_quit(s);
+}
+
+static void test_2700_input_pins(void)
+{
+ test_input_pins("-machine ast2700-evb",
+ AST2700_GPIO_BASE + GPIOA0_CONTROL);
+}
+
+static void test_2700_output_pins(void)
+{
+ test_output_pins("-machine ast2700-evb",
+ AST2700_GPIO_BASE + GPIOA0_CONTROL);
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+
+ qtest_add_func("/ast2700/gpio/input_pins", test_2700_input_pins);
+ qtest_add_func("/ast2700/gpio/output_pins", test_2700_output_pins);
+
+ return g_test_run();
+}
diff --git a/tests/qtest/ast2700-hace-test.c b/tests/qtest/ast2700-hace-test.c
new file mode 100644
index 0000000..a400e29
--- /dev/null
+++ b/tests/qtest/ast2700-hace-test.c
@@ -0,0 +1,98 @@
+/*
+ * QTest testcase for the ASPEED Hash and Crypto Engine
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (C) 2025 ASPEED Technology Inc.
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest.h"
+#include "qemu/bitops.h"
+#include "aspeed-hace-utils.h"
+
+static const struct AspeedMasks as2700_masks = {
+ .src = 0x7fffffff,
+ .dest = 0x7ffffff8,
+ .key = 0x7ffffff8,
+ .len = 0x0fffffff,
+ .src_hi = 0x00000003,
+ .dest_hi = 0x00000003,
+ .key_hi = 0x00000003,
+};
+
+/* ast2700 */
+static void test_md5_ast2700(void)
+{
+ aspeed_test_md5("-machine ast2700a1-evb", 0x12070000, 0x400000000);
+}
+
+static void test_sha256_ast2700(void)
+{
+ aspeed_test_sha256("-machine ast2700a1-evb", 0x12070000, 0x400000000);
+}
+
+static void test_sha256_sg_ast2700(void)
+{
+ aspeed_test_sha256_sg("-machine ast2700a1-evb", 0x12070000, 0x400000000);
+}
+
+static void test_sha384_ast2700(void)
+{
+ aspeed_test_sha384("-machine ast2700a1-evb", 0x12070000, 0x400000000);
+}
+
+static void test_sha384_sg_ast2700(void)
+{
+ aspeed_test_sha384_sg("-machine ast2700a1-evb", 0x12070000, 0x400000000);
+}
+
+static void test_sha512_ast2700(void)
+{
+ aspeed_test_sha512("-machine ast2700a1-evb", 0x12070000, 0x400000000);
+}
+
+static void test_sha512_sg_ast2700(void)
+{
+ aspeed_test_sha512_sg("-machine ast2700a1-evb", 0x12070000, 0x400000000);
+}
+
+static void test_sha256_accum_ast2700(void)
+{
+ aspeed_test_sha256_accum("-machine ast2700a1-evb", 0x12070000, 0x400000000);
+}
+
+static void test_sha384_accum_ast2700(void)
+{
+ aspeed_test_sha384_accum("-machine ast2700a1-evb", 0x12070000, 0x400000000);
+}
+
+static void test_sha512_accum_ast2700(void)
+{
+ aspeed_test_sha512_accum("-machine ast2700a1-evb", 0x12070000, 0x400000000);
+}
+
+static void test_addresses_ast2700(void)
+{
+ aspeed_test_addresses("-machine ast2700a1-evb", 0x12070000, &as2700_masks);
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+
+ qtest_add_func("ast2700/hace/addresses", test_addresses_ast2700);
+ qtest_add_func("ast2700/hace/sha512", test_sha512_ast2700);
+ qtest_add_func("ast2700/hace/sha384", test_sha384_ast2700);
+ qtest_add_func("ast2700/hace/sha256", test_sha256_ast2700);
+ qtest_add_func("ast2700/hace/md5", test_md5_ast2700);
+
+ qtest_add_func("ast2700/hace/sha512_sg", test_sha512_sg_ast2700);
+ qtest_add_func("ast2700/hace/sha384_sg", test_sha384_sg_ast2700);
+ qtest_add_func("ast2700/hace/sha256_sg", test_sha256_sg_ast2700);
+
+ qtest_add_func("ast2700/hace/sha512_accum", test_sha512_accum_ast2700);
+ qtest_add_func("ast2700/hace/sha384_accum", test_sha384_accum_ast2700);
+ qtest_add_func("ast2700/hace/sha256_accum", test_sha256_accum_ast2700);
+
+ return g_test_run();
+}
diff --git a/tests/qtest/ast2700-smc-test.c b/tests/qtest/ast2700-smc-test.c
new file mode 100644
index 0000000..62d538d
--- /dev/null
+++ b/tests/qtest/ast2700-smc-test.c
@@ -0,0 +1,72 @@
+/*
+ * QTest testcase for the M25P80 Flash using the ASPEED SPI Controller since
+ * AST2700.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (C) 2024 ASPEED Technology Inc.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/bswap.h"
+#include "libqtest-single.h"
+#include "qemu/bitops.h"
+#include "aspeed-smc-utils.h"
+
+static void test_ast2700_evb(AspeedSMCTestData *data)
+{
+ int ret;
+ int fd;
+
+ fd = g_file_open_tmp("qtest.m25p80.w25q01jvq.XXXXXX",
+ &data->tmp_path, NULL);
+ g_assert(fd >= 0);
+ ret = ftruncate(fd, 128 * 1024 * 1024);
+ g_assert(ret == 0);
+ close(fd);
+
+ data->s = qtest_initf("-machine ast2700-evb "
+ "-drive file=%s,format=raw,if=mtd",
+ data->tmp_path);
+
+ /* fmc cs0 with w25q01jvq flash */
+ data->flash_base = 0x100000000;
+ data->spi_base = 0x14000000;
+ data->jedec_id = 0xef4021;
+ data->cs = 0;
+ data->node = "/machine/soc/fmc/ssi.0/child[0]";
+ /* beyond 64MB */
+ data->page_addr = 0x40000 * FLASH_PAGE_SIZE;
+
+ qtest_add_data_func("/ast2700/smc/read_jedec",
+ data, aspeed_smc_test_read_jedec);
+ qtest_add_data_func("/ast2700/smc/erase_sector",
+ data, aspeed_smc_test_erase_sector);
+ qtest_add_data_func("/ast2700/smc/erase_all",
+ data, aspeed_smc_test_erase_all);
+ qtest_add_data_func("/ast2700/smc/write_page",
+ data, aspeed_smc_test_write_page);
+ qtest_add_data_func("/ast2700/smc/read_page_mem",
+ data, aspeed_smc_test_read_page_mem);
+ qtest_add_data_func("/ast2700/smc/write_page_mem",
+ data, aspeed_smc_test_write_page_mem);
+ qtest_add_data_func("/ast2700/smc/read_status_reg",
+ data, aspeed_smc_test_read_status_reg);
+ qtest_add_data_func("/ast2700/smc/write_page_qpi",
+ data, aspeed_smc_test_write_page_qpi);
+}
+
+int main(int argc, char **argv)
+{
+ AspeedSMCTestData ast2700_evb_data;
+ int ret;
+
+ g_test_init(&argc, &argv, NULL);
+
+ test_ast2700_evb(&ast2700_evb_data);
+ ret = g_test_run();
+
+ qtest_quit(ast2700_evb_data.s);
+ unlink(ast2700_evb_data.tmp_path);
+ g_free(ast2700_evb_data.tmp_path);
+ return ret;
+}
diff --git a/tests/qtest/bcm2835-i2c-test.c b/tests/qtest/bcm2835-i2c-test.c
index 513ecce..1599194 100644
--- a/tests/qtest/bcm2835-i2c-test.c
+++ b/tests/qtest/bcm2835-i2c-test.c
@@ -81,7 +81,7 @@ static void test_i2c_read_write(gconstpointer data)
g_assert_cmpint(i2cdata, ==, 0xde);
i2cdata = readl(base_addr + BCM2835_I2C_FIFO);
- g_assert_cmpint(i2cdata, ==, 0xad);
+ g_assert_cmpint(i2cdata, ==, 0xa0);
/* Clear flags */
writel(base_addr + BCM2835_I2C_S, BCM2835_I2C_S_DONE | BCM2835_I2C_S_ERR |
diff --git a/tests/qtest/bios-tables-test.c b/tests/qtest/bios-tables-test.c
index f4c4704..0b2bdf9 100644
--- a/tests/qtest/bios-tables-test.c
+++ b/tests/qtest/bios-tables-test.c
@@ -267,15 +267,6 @@ static void dump_aml_files(test_data *data, bool rebuild)
data->arch, data->machine,
sdt->aml, ext);
- /*
- * To keep test cases not failing before the DATA files are moved to
- * ${arch}/${machine} folder, add this check as well.
- */
- if (!g_file_test(aml_file, G_FILE_TEST_EXISTS)) {
- aml_file = g_strdup_printf("%s/%s/%.4s%s", data_dir,
- data->machine, sdt->aml, ext);
- }
-
if (!g_file_test(aml_file, G_FILE_TEST_EXISTS) &&
sdt->aml_len == exp_sdt->aml_len &&
!memcmp(sdt->aml, exp_sdt->aml, sdt->aml_len)) {
@@ -301,6 +292,7 @@ static void dump_aml_files(test_data *data, bool rebuild)
g_free(aml_file);
}
+ free_test_data(&exp_data);
}
static bool create_tmp_asl(AcpiSdtTable *sdt)
@@ -412,11 +404,6 @@ static GArray *load_expected_aml(test_data *data)
try_again:
aml_file = g_strdup_printf("%s/%s/%s/%.4s%s", data_dir, data->arch,
data->machine, sdt->aml, ext);
- if (!g_file_test(aml_file, G_FILE_TEST_EXISTS)) {
- aml_file = g_strdup_printf("%s/%s/%.4s%s", data_dir, data->machine,
- sdt->aml, ext);
- }
-
if (verbosity_level >= 2) {
fprintf(stderr, "Looking for expected file '%s'\n", aml_file);
}
@@ -973,7 +960,7 @@ static void test_acpi_piix4_tcg_bridge(void)
free_test_data(&data);
/* check that reboot/reset doesn't change any ACPI tables */
- qtest_qmp_send(data.qts, "{'execute':'system_reset' }");
+ qtest_system_reset(data.qts);
process_acpi_tables(&data);
free_test_data(&data);
}
@@ -1230,7 +1217,7 @@ static void test_acpi_q35_multif_bridge(void)
free_test_data(&data);
/* check that reboot/reset doesn't change any ACPI tables */
- qtest_qmp_send(data.qts, "{'execute':'system_reset' }");
+ qtest_system_reset(data.qts);
process_acpi_tables(&data);
free_test_data(&data);
}
@@ -1635,7 +1622,7 @@ static void test_acpi_aarch64_virt_tcg_memhp(void)
.uefi_fl2 = "pc-bios/edk2-arm-vars.fd",
.cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2",
.ram_start = 0x40000000ULL,
- .scan_len = 256ULL * 1024 * 1024,
+ .scan_len = 256ULL * MiB,
};
data.variant = ".memhp";
@@ -1720,6 +1707,32 @@ static void test_acpi_microvm_ioapic2_tcg(void)
free_test_data(&data);
}
+static void test_acpi_riscv64_virt_tcg_numamem(void)
+{
+ test_data data = {
+ .machine = "virt",
+ .arch = "riscv64",
+ .tcg_only = true,
+ .uefi_fl1 = "pc-bios/edk2-riscv-code.fd",
+ .uefi_fl2 = "pc-bios/edk2-riscv-vars.fd",
+ .cd = "tests/data/uefi-boot-images/bios-tables-test.riscv64.iso.qcow2",
+ .ram_start = 0x80000000ULL,
+ .scan_len = 128ULL * MiB,
+ };
+
+ data.variant = ".numamem";
+ /*
+ * RHCT will have ISA string encoded. To reduce the effort
+ * of updating expected AML file for any new default ISA extension,
+ * use the profile rva22s64.
+ */
+ test_acpi_one(" -cpu rva22s64"
+ " -object memory-backend-ram,id=ram0,size=128M"
+ " -numa node,memdev=ram0",
+ &data);
+ free_test_data(&data);
+}
+
static void test_acpi_aarch64_virt_tcg_numamem(void)
{
test_data data = {
@@ -1730,7 +1743,7 @@ static void test_acpi_aarch64_virt_tcg_numamem(void)
.uefi_fl2 = "pc-bios/edk2-arm-vars.fd",
.cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2",
.ram_start = 0x40000000ULL,
- .scan_len = 128ULL * 1024 * 1024,
+ .scan_len = 128ULL * MiB,
};
data.variant = ".numamem";
@@ -1752,7 +1765,7 @@ static void test_acpi_aarch64_virt_tcg_pxb(void)
.uefi_fl1 = "pc-bios/edk2-aarch64-code.fd",
.uefi_fl2 = "pc-bios/edk2-arm-vars.fd",
.ram_start = 0x40000000ULL,
- .scan_len = 128ULL * 1024 * 1024,
+ .scan_len = 128ULL * MiB,
};
/*
* While using -cdrom, the cdrom would auto plugged into pxb-pcie,
@@ -1828,7 +1841,7 @@ static void test_acpi_aarch64_virt_tcg_acpi_hmat(void)
.uefi_fl2 = "pc-bios/edk2-arm-vars.fd",
.cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2",
.ram_start = 0x40000000ULL,
- .scan_len = 128ULL * 1024 * 1024,
+ .scan_len = 128ULL * MiB,
};
data.variant = ".acpihmatvirt";
@@ -1924,6 +1937,101 @@ static void test_acpi_q35_tcg_acpi_hmat_noinitiator(void)
free_test_data(&data);
}
+/* Test intended to hit corner cases of SRAT and HMAT */
+static void test_acpi_q35_tcg_acpi_hmat_generic_x(void)
+{
+ test_data data = {};
+
+ data.machine = MACHINE_Q35;
+ data.arch = "x86";
+ data.variant = ".acpihmat-generic-x";
+ test_acpi_one(" -machine hmat=on,cxl=on"
+ " -smp 3,sockets=3"
+ " -m 128M,maxmem=384M,slots=2"
+ " -device pcie-root-port,chassis=1,id=pci.1"
+ " -device pci-testdev,bus=pci.1,"
+ "multifunction=on,addr=00.0"
+ " -device pci-testdev,bus=pci.1,addr=00.1"
+ " -device pci-testdev,bus=pci.1,id=gidev,addr=00.2"
+ " -device pxb-cxl,bus_nr=64,bus=pcie.0,id=cxl.1"
+ " -object memory-backend-ram,size=64M,id=ram0"
+ " -object memory-backend-ram,size=64M,id=ram1"
+ " -numa node,nodeid=0,cpus=0,memdev=ram0"
+ " -numa node,nodeid=1"
+ " -object acpi-generic-initiator,id=gi0,pci-dev=gidev,node=1"
+ " -numa node,nodeid=2"
+ " -object acpi-generic-port,id=gp0,pci-bus=cxl.1,node=2"
+ " -numa node,nodeid=3,cpus=1"
+ " -numa node,nodeid=4,memdev=ram1"
+ " -numa node,nodeid=5,cpus=2"
+ " -numa hmat-lb,initiator=0,target=0,hierarchy=memory,"
+ "data-type=access-latency,latency=10"
+ " -numa hmat-lb,initiator=0,target=0,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=800M"
+ " -numa hmat-lb,initiator=0,target=2,hierarchy=memory,"
+ "data-type=access-latency,latency=100"
+ " -numa hmat-lb,initiator=0,target=2,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=200M"
+ " -numa hmat-lb,initiator=0,target=4,hierarchy=memory,"
+ "data-type=access-latency,latency=100"
+ " -numa hmat-lb,initiator=0,target=4,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=200M"
+ " -numa hmat-lb,initiator=0,target=5,hierarchy=memory,"
+ "data-type=access-latency,latency=200"
+ " -numa hmat-lb,initiator=0,target=5,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=400M"
+ " -numa hmat-lb,initiator=1,target=0,hierarchy=memory,"
+ "data-type=access-latency,latency=500"
+ " -numa hmat-lb,initiator=1,target=0,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=100M"
+ " -numa hmat-lb,initiator=1,target=2,hierarchy=memory,"
+ "data-type=access-latency,latency=50"
+ " -numa hmat-lb,initiator=1,target=2,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=400M"
+ " -numa hmat-lb,initiator=1,target=4,hierarchy=memory,"
+ "data-type=access-latency,latency=50"
+ " -numa hmat-lb,initiator=1,target=4,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=800M"
+ " -numa hmat-lb,initiator=1,target=5,hierarchy=memory,"
+ "data-type=access-latency,latency=500"
+ " -numa hmat-lb,initiator=1,target=5,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=100M"
+ " -numa hmat-lb,initiator=3,target=0,hierarchy=memory,"
+ "data-type=access-latency,latency=20"
+ " -numa hmat-lb,initiator=3,target=0,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=400M"
+ " -numa hmat-lb,initiator=3,target=2,hierarchy=memory,"
+ "data-type=access-latency,latency=80"
+ " -numa hmat-lb,initiator=3,target=2,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=200M"
+ " -numa hmat-lb,initiator=3,target=4,hierarchy=memory,"
+ "data-type=access-latency,latency=80"
+ " -numa hmat-lb,initiator=3,target=4,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=200M"
+ " -numa hmat-lb,initiator=3,target=5,hierarchy=memory,"
+ "data-type=access-latency,latency=20"
+ " -numa hmat-lb,initiator=3,target=5,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=400M"
+ " -numa hmat-lb,initiator=5,target=0,hierarchy=memory,"
+ "data-type=access-latency,latency=20"
+ " -numa hmat-lb,initiator=5,target=0,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=400M"
+ " -numa hmat-lb,initiator=5,target=2,hierarchy=memory,"
+ "data-type=access-latency,latency=80"
+ " -numa hmat-lb,initiator=5,target=4,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=200M"
+ " -numa hmat-lb,initiator=5,target=4,hierarchy=memory,"
+ "data-type=access-latency,latency=80"
+ " -numa hmat-lb,initiator=5,target=2,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=200M"
+ " -numa hmat-lb,initiator=5,target=5,hierarchy=memory,"
+ "data-type=access-latency,latency=10"
+ " -numa hmat-lb,initiator=5,target=5,hierarchy=memory,"
+ "data-type=access-bandwidth,bandwidth=800M",
+ &data);
+ free_test_data(&data);
+}
+
#ifdef CONFIG_POSIX
static void test_acpi_erst(const char *machine, const char *arch)
{
@@ -1977,6 +2085,28 @@ static void test_acpi_microvm_acpi_erst(void)
}
#endif /* CONFIG_POSIX */
+static void test_acpi_riscv64_virt_tcg(void)
+{
+ test_data data = {
+ .machine = "virt",
+ .arch = "riscv64",
+ .tcg_only = true,
+ .uefi_fl1 = "pc-bios/edk2-riscv-code.fd",
+ .uefi_fl2 = "pc-bios/edk2-riscv-vars.fd",
+ .cd = "tests/data/uefi-boot-images/bios-tables-test.riscv64.iso.qcow2",
+ .ram_start = 0x80000000ULL,
+ .scan_len = 128ULL * MiB,
+ };
+
+ /*
+ * RHCT will have ISA string encoded. To reduce the effort
+ * of updating expected AML file for any new default ISA extension,
+ * use the profile rva22s64.
+ */
+ test_acpi_one("-cpu rva22s64 ", &data);
+ free_test_data(&data);
+}
+
static void test_acpi_aarch64_virt_tcg(void)
{
test_data data = {
@@ -1987,7 +2117,7 @@ static void test_acpi_aarch64_virt_tcg(void)
.uefi_fl2 = "pc-bios/edk2-arm-vars.fd",
.cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2",
.ram_start = 0x40000000ULL,
- .scan_len = 128ULL * 1024 * 1024,
+ .scan_len = 128ULL * MiB,
};
data.smbios_cpu_max_speed = 2900;
@@ -2008,7 +2138,7 @@ static void test_acpi_aarch64_virt_tcg_topology(void)
.uefi_fl2 = "pc-bios/edk2-arm-vars.fd",
.cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2",
.ram_start = 0x40000000ULL,
- .scan_len = 128ULL * 1024 * 1024,
+ .scan_len = 128ULL * MiB,
};
test_acpi_one("-cpu cortex-a57 "
@@ -2093,7 +2223,7 @@ static void test_acpi_aarch64_virt_viot(void)
.uefi_fl2 = "pc-bios/edk2-arm-vars.fd",
.cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2",
.ram_start = 0x40000000ULL,
- .scan_len = 128ULL * 1024 * 1024,
+ .scan_len = 128ULL * MiB,
};
test_acpi_one("-cpu cortex-a57 "
@@ -2277,7 +2407,7 @@ static void test_acpi_aarch64_virt_oem_fields(void)
.uefi_fl2 = "pc-bios/edk2-arm-vars.fd",
.cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2",
.ram_start = 0x40000000ULL,
- .scan_len = 128ULL * 1024 * 1024,
+ .scan_len = 128ULL * MiB,
};
char *args;
@@ -2380,6 +2510,8 @@ int main(int argc, char *argv[])
qtest_add_func("acpi/q35/nohpet", test_acpi_q35_tcg_nohpet);
qtest_add_func("acpi/q35/acpihmat-noinitiator",
test_acpi_q35_tcg_acpi_hmat_noinitiator);
+ qtest_add_func("acpi/q35/acpihmat-genericx",
+ test_acpi_q35_tcg_acpi_hmat_generic_x);
/* i386 does not support memory hotplug */
if (strcmp(arch, "i386")) {
@@ -2455,6 +2587,12 @@ int main(int argc, char *argv[])
qtest_add_func("acpi/virt/viot", test_acpi_aarch64_virt_viot);
}
}
+ } else if (strcmp(arch, "riscv64") == 0) {
+ if (has_tcg && qtest_has_device("virtio-blk-pci")) {
+ qtest_add_func("acpi/virt", test_acpi_riscv64_virt_tcg);
+ qtest_add_func("acpi/virt/numamem",
+ test_acpi_riscv64_virt_tcg_numamem);
+ }
}
ret = g_test_run();
boot_sector_cleanup(disk);
diff --git a/tests/qtest/boot-order-test.c b/tests/qtest/boot-order-test.c
index 8f2b6ef..74d6b82 100644
--- a/tests/qtest/boot-order-test.c
+++ b/tests/qtest/boot-order-test.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "libqos/fw_cfg.h"
#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "standard-headers/linux/qemu_fw_cfg.h"
typedef struct {
@@ -31,7 +31,7 @@ static void test_a_boot_order(const char *machine,
uint64_t actual;
QTestState *qts;
- if (machine && !qtest_has_machine(machine)) {
+ if (!qtest_has_machine(machine)) {
g_test_skip("Machine is not available");
return;
}
@@ -40,12 +40,7 @@ static void test_a_boot_order(const char *machine,
machine ?: "", test_args);
actual = read_boot_order(qts);
g_assert_cmphex(actual, ==, expected_boot);
- qtest_qmp_assert_success(qts, "{ 'execute': 'system_reset' }");
- /*
- * system_reset only requests reset. We get a RESET event after
- * the actual reset completes. Need to wait for that.
- */
- qtest_qmp_eventwait(qts, "RESET");
+ qtest_system_reset(qts);
actual = read_boot_order(qts);
g_assert_cmphex(actual, ==, expected_reboot);
qtest_quit(qts);
@@ -107,7 +102,7 @@ static const boot_order_test test_cases_pc[] = {
static void test_pc_boot_order(void)
{
- test_boot_orders(NULL, read_boot_order_pc, test_cases_pc);
+ test_boot_orders("pc", read_boot_order_pc, test_cases_pc);
}
static uint64_t read_boot_order_pmac(QTestState *qts)
diff --git a/tests/qtest/boot-serial-test.c b/tests/qtest/boot-serial-test.c
index 3b92fa5..a05d26e 100644
--- a/tests/qtest/boot-serial-test.c
+++ b/tests/qtest/boot-serial-test.c
@@ -70,18 +70,23 @@ static const uint8_t kernel_plml605[] = {
};
static const uint8_t bios_raspi2[] = {
- 0x08, 0x30, 0x9f, 0xe5, /* ldr r3,[pc,#8] Get base */
- 0x54, 0x20, 0xa0, 0xe3, /* mov r2,#'T' */
- 0x00, 0x20, 0xc3, 0xe5, /* strb r2,[r3] */
- 0xfb, 0xff, 0xff, 0xea, /* b loop */
- 0x00, 0x10, 0x20, 0x3f, /* 0x3f201000 = UART0 base addr */
+ 0x10, 0x30, 0x9f, 0xe5, /* ldr r3, [pc, #16] Get &UART0 */
+ 0x10, 0x20, 0x9f, 0xe5, /* ldr r2, [pc, #16] Get &CR */
+ 0xb0, 0x23, 0xc3, 0xe1, /* strh r2, [r3, #48] Set CR */
+ 0x54, 0x20, 0xa0, 0xe3, /* mov r2, #'T' */
+ 0x00, 0x20, 0xc3, 0xe5, /* loop: strb r2, [r3] *TXDAT = 'T' */
+ 0xff, 0xff, 0xff, 0xea, /* b -4 (loop) */
+ 0x00, 0x10, 0x20, 0x3f, /* UART0: 0x3f201000 */
+ 0x01, 0x01, 0x00, 0x00, /* CR: 0x101 = UARTEN|TXE */
};
static const uint8_t kernel_aarch64[] = {
- 0x81, 0x0a, 0x80, 0x52, /* mov w1, #0x54 */
- 0x02, 0x20, 0xa1, 0xd2, /* mov x2, #0x9000000 */
- 0x41, 0x00, 0x00, 0x39, /* strb w1, [x2] */
- 0xfd, 0xff, 0xff, 0x17, /* b -12 (loop) */
+ 0x02, 0x20, 0xa1, 0xd2, /* mov x2, #0x9000000 Load UART0 */
+ 0x21, 0x20, 0x80, 0x52, /* mov w1, 0x101 CR = UARTEN|TXE */
+ 0x41, 0x60, 0x00, 0x79, /* strh w1, [x2, #48] Set CR */
+ 0x81, 0x0a, 0x80, 0x52, /* mov w1, #'T' */
+ 0x41, 0x00, 0x00, 0x39, /* loop: strb w1, [x2] *TXDAT = 'T' */
+ 0xff, 0xff, 0xff, 0x17, /* b -4 (loop) */
};
static const uint8_t kernel_nrf51[] = {
@@ -184,8 +189,6 @@ static const testdef_t tests[] = {
{ "microblazeel", "petalogix-ml605", "", "TT",
sizeof(kernel_plml605), kernel_plml605 },
{ "arm", "raspi2b", "", "TT", sizeof(bios_raspi2), 0, bios_raspi2 },
- /* For hppa, force bios to output to serial by disabling graphics. */
- { "hppa", "hppa", "-vga none", "SeaBIOS wants SYSTEM HALT" },
{ "aarch64", "virt", "-cpu max", "TT", sizeof(kernel_aarch64),
kernel_aarch64 },
{ "arm", "microbit", "", "T", sizeof(kernel_nrf51), kernel_nrf51 },
diff --git a/tests/qtest/cdrom-test.c b/tests/qtest/cdrom-test.c
index 5d89e62..56e2d28 100644
--- a/tests/qtest/cdrom-test.c
+++ b/tests/qtest/cdrom-test.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "libqtest.h"
#include "boot-sector.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
static char isoimage[] = "cdrom-boot-iso-XXXXXX";
@@ -135,13 +135,35 @@ static void add_x86_tests(void)
return;
}
- qtest_add_data_func("cdrom/boot/default", "-cdrom ", test_cdboot);
- if (qtest_has_device("virtio-scsi-ccw")) {
- qtest_add_data_func("cdrom/boot/virtio-scsi",
- "-device virtio-scsi -device scsi-cd,drive=cdr "
- "-blockdev file,node-name=cdr,filename=",
- test_cdboot);
+ if (qtest_has_machine("pc")) {
+ qtest_add_data_func("cdrom/boot/default", "-cdrom ", test_cdboot);
+ if (qtest_has_device("virtio-scsi-ccw")) {
+ qtest_add_data_func("cdrom/boot/virtio-scsi",
+ "-device virtio-scsi -device scsi-cd,drive=cdr "
+ "-blockdev file,node-name=cdr,filename=",
+ test_cdboot);
+ }
+
+ if (qtest_has_device("am53c974")) {
+ qtest_add_data_func("cdrom/boot/am53c974",
+ "-device am53c974 -device scsi-cd,drive=cd1 "
+ "-drive if=none,id=cd1,format=raw,file=",
+ test_cdboot);
+ }
+ if (qtest_has_device("dc390")) {
+ qtest_add_data_func("cdrom/boot/dc390",
+ "-device dc390 -device scsi-cd,drive=cd1 "
+ "-blockdev file,node-name=cd1,filename=",
+ test_cdboot);
+ }
+ if (qtest_has_device("lsi53c895a")) {
+ qtest_add_data_func("cdrom/boot/lsi53c895a",
+ "-device lsi53c895a -device scsi-cd,drive=cd1 "
+ "-blockdev file,node-name=cd1,filename=",
+ test_cdboot);
+ }
}
+
/*
* Unstable CI test under load
* See https://lists.gnu.org/archive/html/qemu-devel/2019-02/msg05509.html
@@ -150,35 +172,20 @@ static void add_x86_tests(void)
qtest_add_data_func("cdrom/boot/isapc", "-M isapc "
"-drive if=ide,media=cdrom,file=", test_cdboot);
}
- if (qtest_has_device("am53c974")) {
- qtest_add_data_func("cdrom/boot/am53c974",
- "-device am53c974 -device scsi-cd,drive=cd1 "
- "-drive if=none,id=cd1,format=raw,file=",
- test_cdboot);
- }
- if (qtest_has_device("dc390")) {
- qtest_add_data_func("cdrom/boot/dc390",
- "-device dc390 -device scsi-cd,drive=cd1 "
- "-blockdev file,node-name=cd1,filename=",
- test_cdboot);
- }
- if (qtest_has_device("lsi53c895a")) {
- qtest_add_data_func("cdrom/boot/lsi53c895a",
- "-device lsi53c895a -device scsi-cd,drive=cd1 "
- "-blockdev file,node-name=cd1,filename=",
- test_cdboot);
- }
- if (qtest_has_device("megasas")) {
- qtest_add_data_func("cdrom/boot/megasas", "-M q35 "
- "-device megasas -device scsi-cd,drive=cd1 "
- "-blockdev file,node-name=cd1,filename=",
- test_cdboot);
- }
- if (qtest_has_device("megasas-gen2")) {
- qtest_add_data_func("cdrom/boot/megasas-gen2", "-M q35 "
- "-device megasas-gen2 -device scsi-cd,drive=cd1 "
- "-blockdev file,node-name=cd1,filename=",
- test_cdboot);
+
+ if (qtest_has_machine("q35")) {
+ if (qtest_has_device("megasas")) {
+ qtest_add_data_func("cdrom/boot/megasas", "-M q35 "
+ "-device megasas -device scsi-cd,drive=cd1 "
+ "-blockdev file,node-name=cd1,filename=",
+ test_cdboot);
+ }
+ if (qtest_has_device("megasas-gen2")) {
+ qtest_add_data_func("cdrom/boot/megasas-gen2", "-M q35 "
+ "-device megasas-gen2 -device scsi-cd,drive=cd1 "
+ "-blockdev file,node-name=cd1,filename=",
+ test_cdboot);
+ }
}
}
@@ -206,6 +213,30 @@ static void add_s390x_tests(void)
"-drive driver=null-co,read-zeroes=on,if=none,id=d1 "
"-device virtio-blk,drive=d2,bootindex=1 "
"-drive if=none,id=d2,media=cdrom,file=", test_cdboot);
+ qtest_add_data_func("cdrom/boot/as-fallback-device",
+ "-device virtio-serial -device virtio-scsi "
+ "-device virtio-blk,drive=d1,bootindex=1 "
+ "-drive driver=null-co,read-zeroes=on,if=none,id=d1 "
+ "-device virtio-blk,drive=d2,bootindex=2 "
+ "-drive if=none,id=d2,media=cdrom,file=", test_cdboot);
+ qtest_add_data_func("cdrom/boot/as-last-option",
+ "-device virtio-serial -device virtio-scsi "
+ "-device virtio-blk,drive=d1,bootindex=1 "
+ "-drive driver=null-co,read-zeroes=on,if=none,id=d1 "
+ "-device virtio-blk,drive=d2,bootindex=2 "
+ "-drive driver=null-co,read-zeroes=on,if=none,id=d2 "
+ "-device virtio-blk,drive=d3,bootindex=3 "
+ "-drive driver=null-co,read-zeroes=on,if=none,id=d3 "
+ "-device scsi-hd,drive=d4,bootindex=4 "
+ "-drive driver=null-co,read-zeroes=on,if=none,id=d4 "
+ "-device scsi-hd,drive=d5,bootindex=5 "
+ "-drive driver=null-co,read-zeroes=on,if=none,id=d5 "
+ "-device virtio-blk,drive=d6,bootindex=6 "
+ "-drive driver=null-co,read-zeroes=on,if=none,id=d6 "
+ "-device scsi-hd,drive=d7,bootindex=7 "
+ "-drive driver=null-co,read-zeroes=on,if=none,id=d7 "
+ "-device scsi-cd,drive=d8,bootindex=8 "
+ "-drive if=none,id=d8,media=cdrom,file=", test_cdboot);
if (qtest_has_device("x-terminal3270")) {
qtest_add_data_func("cdrom/boot/without-bootindex",
"-device virtio-scsi -device virtio-serial "
diff --git a/tests/qtest/cmsdk-apb-watchdog-test.c b/tests/qtest/cmsdk-apb-watchdog-test.c
index 00b5dbb..cd0c602 100644
--- a/tests/qtest/cmsdk-apb-watchdog-test.c
+++ b/tests/qtest/cmsdk-apb-watchdog-test.c
@@ -15,14 +15,12 @@
*/
#include "qemu/osdep.h"
+#include "exec/hwaddr.h"
#include "qemu/bitops.h"
#include "libqtest-single.h"
-/*
- * lm3s811evb watchdog; at board startup this runs at 200MHz / 16 == 12.5MHz,
- * which is 80ns per tick.
- */
#define WDOG_BASE 0x40000000
+#define WDOG_BASE_MPS2 0x40008000
#define WDOGLOAD 0
#define WDOGVALUE 4
@@ -37,39 +35,97 @@
#define SYSDIV_SHIFT 23
#define SYSDIV_LENGTH 4
-static void test_watchdog(void)
+#define WDOGLOAD_DEFAULT 0xFFFFFFFF
+#define WDOGVALUE_DEFAULT 0xFFFFFFFF
+
+typedef struct CMSDKAPBWatchdogTestArgs {
+ int64_t tick;
+ hwaddr wdog_base;
+ const char *machine;
+} CMSDKAPBWatchdogTestArgs;
+
+enum {
+ MACHINE_LM3S811EVB,
+ MACHINE_MPS2_AN385,
+};
+
+/*
+ * lm3s811evb watchdog; at board startup this runs at 200MHz / 16 == 12.5MHz,
+ * which is 80ns per tick.
+ *
+ * IoTKit/ARMSSE dualtimer; driven at 25MHz in mps2-an385, so 40ns per tick
+ */
+static const CMSDKAPBWatchdogTestArgs machine_info[] = {
+ [MACHINE_LM3S811EVB] = {
+ .tick = 80,
+ .wdog_base = WDOG_BASE,
+ .machine = "lm3s811evb",
+ },
+ [MACHINE_MPS2_AN385] = {
+ .tick = 40,
+ .wdog_base = WDOG_BASE_MPS2,
+ .machine = "mps2-an385",
+ },
+};
+
+static void system_reset(QTestState *qtest)
{
- g_assert_cmpuint(readl(WDOG_BASE + WDOGRIS), ==, 0);
+ QDict *resp;
- writel(WDOG_BASE + WDOGCONTROL, 1);
- writel(WDOG_BASE + WDOGLOAD, 1000);
+ resp = qtest_qmp(qtest, "{'execute': 'system_reset'}");
+ g_assert(qdict_haskey(resp, "return"));
+ qobject_unref(resp);
+ qtest_qmp_eventwait(qtest, "RESET");
+}
+
+static void test_watchdog(const void *ptr)
+{
+ const CMSDKAPBWatchdogTestArgs *args = ptr;
+ hwaddr wdog_base = args->wdog_base;
+ int64_t tick = args->tick;
+ g_autofree gchar *cmdline = g_strdup_printf("-machine %s", args->machine);
+ qtest_start(cmdline);
+
+ g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0);
+
+ writel(wdog_base + WDOGCONTROL, 1);
+ writel(wdog_base + WDOGLOAD, 1000);
/* Step to just past the 500th tick */
- clock_step(500 * 80 + 1);
- g_assert_cmpuint(readl(WDOG_BASE + WDOGRIS), ==, 0);
- g_assert_cmpuint(readl(WDOG_BASE + WDOGVALUE), ==, 500);
+ clock_step(500 * tick + 1);
+ g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 500);
/* Just past the 1000th tick: timer should have fired */
- clock_step(500 * 80);
- g_assert_cmpuint(readl(WDOG_BASE + WDOGRIS), ==, 1);
- g_assert_cmpuint(readl(WDOG_BASE + WDOGVALUE), ==, 0);
+ clock_step(500 * tick);
+ g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 1);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 0);
/* VALUE reloads at following tick */
- clock_step(80);
- g_assert_cmpuint(readl(WDOG_BASE + WDOGVALUE), ==, 1000);
+ clock_step(tick);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 1000);
/* Writing any value to WDOGINTCLR clears the interrupt and reloads */
- clock_step(500 * 80);
- g_assert_cmpuint(readl(WDOG_BASE + WDOGVALUE), ==, 500);
- g_assert_cmpuint(readl(WDOG_BASE + WDOGRIS), ==, 1);
- writel(WDOG_BASE + WDOGINTCLR, 0);
- g_assert_cmpuint(readl(WDOG_BASE + WDOGVALUE), ==, 1000);
- g_assert_cmpuint(readl(WDOG_BASE + WDOGRIS), ==, 0);
+ clock_step(500 * tick);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 500);
+ g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 1);
+ writel(wdog_base + WDOGINTCLR, 0);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 1000);
+ g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0);
+
+ qtest_end();
}
-static void test_clock_change(void)
+/*
+ * This test can only be executed in the stellaris board since it relies on a
+ * component of the board to change the clocking parameters of the watchdog.
+ */
+static void test_clock_change(const void *ptr)
{
uint32_t rcc;
+ const CMSDKAPBWatchdogTestArgs *args = ptr;
+ g_autofree gchar *cmdline = g_strdup_printf("-machine %s", args->machine);
+ qtest_start(cmdline);
/*
* Test that writing to the stellaris board's RCC register to
@@ -109,23 +165,231 @@ static void test_clock_change(void)
writel(WDOG_BASE + WDOGINTCLR, 0);
g_assert_cmpuint(readl(WDOG_BASE + WDOGVALUE), ==, 1000);
g_assert_cmpuint(readl(WDOG_BASE + WDOGRIS), ==, 0);
+
+ qtest_end();
}
-int main(int argc, char **argv)
+/* Tests the counter is not running after reset. */
+static void test_watchdog_reset(const void *ptr)
{
- int r;
+ const CMSDKAPBWatchdogTestArgs *args = ptr;
+ hwaddr wdog_base = args->wdog_base;
+ int64_t tick = args->tick;
+ g_autofree gchar *cmdline = g_strdup_printf("-machine %s", args->machine);
+ qtest_start(cmdline);
+ g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0);
- g_test_init(&argc, &argv, NULL);
+ g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT);
+ g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT);
+
+ g_assert_cmphex(readl(wdog_base + WDOGCONTROL), ==, 0);
+
+ /*
+ * The counter should not be running if WDOGCONTROL.INTEN has not been set,
+ * as it is the case after a cold reset.
+ */
+ clock_step(15 * tick + 1);
+ g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT);
+ g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT);
+
+ /* Let the counter run before reset */
+ writel(wdog_base + WDOGLOAD, 3000);
+ writel(wdog_base + WDOGCONTROL, 1);
+
+ /* Verify it is running */
+ clock_step(1000 * tick + 1);
+ g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 3000);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 2000);
+
+ system_reset(global_qtest);
- qtest_start("-machine lm3s811evb");
+ /* Check defaults after reset */
+ g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT);
+ g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT);
- qtest_add_func("/cmsdk-apb-watchdog/watchdog", test_watchdog);
- qtest_add_func("/cmsdk-apb-watchdog/watchdog_clock_change",
- test_clock_change);
+ /* The counter should not be running after reset. */
+ clock_step(1000 * tick + 1);
+ g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT);
+ g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT);
- r = g_test_run();
+ qtest_end();
+}
+
+/*
+ * Tests inten works as the counter enable based on this description:
+ *
+ * Enable the interrupt event, WDOGINT. Set HIGH to enable the counter and the
+ * interrupt, or LOW to disable the counter and interrupt. Reloads the counter
+ * from the value in WDOGLOAD when the interrupt is enabled, after previously
+ * being disabled.
+ */
+static void test_watchdog_inten(const void *ptr)
+{
+ const CMSDKAPBWatchdogTestArgs *args = ptr;
+ hwaddr wdog_base = args->wdog_base;
+ int64_t tick = args->tick;
+ g_autofree gchar *cmdline = g_strdup_printf("-machine %s", args->machine);
+ qtest_start(cmdline);
+ g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0);
+
+ g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT);
+ g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT);
+
+ /*
+ * When WDOGLOAD is written to, the count is immediately restarted from the
+ * new value.
+ *
+ * Note: the counter should not be running as long as WDOGCONTROL.INTEN is
+ * not set
+ */
+ writel(wdog_base + WDOGLOAD, 4000);
+ g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 4000);
+ clock_step(500 * tick + 1);
+ g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 4000);
+
+ /* Set HIGH WDOGCONTROL.INTEN to enable the counter and the interrupt */
+ writel(wdog_base + WDOGCONTROL, 1);
+ clock_step(500 * tick + 1);
+ g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 3500);
+
+ /* or LOW to disable the counter and interrupt. */
+ writel(wdog_base + WDOGCONTROL, 0);
+ clock_step(100 * tick);
+ g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 3500);
+
+ /*
+ * Reloads the counter from the value in WDOGLOAD when the interrupt is
+ * enabled, after previously being disabled.
+ */
+ writel(wdog_base + WDOGCONTROL, 1);
+ g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 4000);
+
+ /* Test counter is still on */
+ clock_step(50 * tick + 1);
+ g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 3950);
+
+ /*
+ * When WDOGLOAD is written to, the count is immediately restarted from the
+ * new value.
+ *
+ * Note: the counter should be running since WDOGCONTROL.INTEN is set
+ */
+ writel(wdog_base + WDOGLOAD, 5000);
+ g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 5000);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 5000);
+ clock_step(4999 * tick + 1);
+ g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 5000);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 1);
+ g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0);
+
+ /* Finally disable and check the conditions don't change */
+ writel(wdog_base + WDOGCONTROL, 0);
+ clock_step(10 * tick);
+ g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 5000);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 1);
+ g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0);
+
+ qtest_end();
+}
+
+/*
+ * Tests the following custom behavior:
+ *
+ * The Luminary version of this device ignores writes to this register after the
+ * guest has enabled interrupts (so they can only be disabled again via reset).
+ */
+static void test_watchdog_inten_luminary(const void *ptr)
+{
+ const CMSDKAPBWatchdogTestArgs *args = ptr;
+ hwaddr wdog_base = args->wdog_base;
+ int64_t tick = args->tick;
+ g_autofree gchar *cmdline = g_strdup_printf("-machine %s", args->machine);
+ qtest_start(cmdline);
+ g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0);
+
+ g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT);
+ g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT);
+
+ /*
+ * When WDOGLOAD is written to, the count is immediately restarted from the
+ * new value.
+ *
+ * Note: the counter should not be running as long as WDOGCONTROL.INTEN is
+ * not set
+ */
+ writel(wdog_base + WDOGLOAD, 4000);
+ g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 4000);
+ clock_step(500 * tick + 1);
+ g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 4000);
+
+ /* Set HIGH WDOGCONTROL.INTEN to enable the counter and the interrupt */
+ writel(wdog_base + WDOGCONTROL, 1);
+ clock_step(500 * tick + 1);
+ g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 3500);
+
+ /*
+ * The Luminary version of this device ignores writes to this register after
+ * the guest has enabled interrupts
+ */
+ writel(wdog_base + WDOGCONTROL, 0);
+ clock_step(100 * tick);
+ g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000);
+ g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 3400);
+ g_assert_cmphex(readl(wdog_base + WDOGCONTROL), ==, 0x1);
+
+ /* They can only be disabled again via reset */
+ system_reset(global_qtest);
+
+ /* Check defaults after reset */
+ g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT);
+ g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT);
+ g_assert_cmphex(readl(wdog_base + WDOGCONTROL), ==, 0);
+
+ /* The counter should not be running after reset. */
+ clock_step(1000 * tick + 1);
+ g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT);
+ g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT);
qtest_end();
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+ g_test_set_nonfatal_assertions();
+
+ if (qtest_has_machine(machine_info[MACHINE_LM3S811EVB].machine)) {
+ qtest_add_data_func("/cmsdk-apb-watchdog/watchdog",
+ &machine_info[MACHINE_LM3S811EVB], test_watchdog);
+ qtest_add_data_func("/cmsdk-apb-watchdog/watchdog_clock_change",
+ &machine_info[MACHINE_LM3S811EVB],
+ test_clock_change);
+ qtest_add_data_func("/cmsdk-apb-watchdog/watchdog_reset",
+ &machine_info[MACHINE_LM3S811EVB],
+ test_watchdog_reset);
+ qtest_add_data_func("/cmsdk-apb-watchdog/watchdog_inten_luminary",
+ &machine_info[MACHINE_LM3S811EVB],
+ test_watchdog_inten_luminary);
+ }
+ if (qtest_has_machine(machine_info[MACHINE_MPS2_AN385].machine)) {
+ qtest_add_data_func("/cmsdk-apb-watchdog/watchdog_mps2",
+ &machine_info[MACHINE_MPS2_AN385], test_watchdog);
+ qtest_add_data_func("/cmsdk-apb-watchdog/watchdog_reset_mps2",
+ &machine_info[MACHINE_MPS2_AN385],
+ test_watchdog_reset);
+ qtest_add_data_func("/cmsdk-apb-watchdog/watchdog_inten",
+ &machine_info[MACHINE_MPS2_AN385],
+ test_watchdog_inten);
+ }
- return r;
+ return g_test_run();
}
diff --git a/tests/qtest/cpu-plug-test.c b/tests/qtest/cpu-plug-test.c
index 7f5dd5f..44d7046 100644
--- a/tests/qtest/cpu-plug-test.c
+++ b/tests/qtest/cpu-plug-test.c
@@ -10,8 +10,8 @@
#include "qemu/osdep.h"
#include "libqtest-single.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
struct PlugTestData {
char *machine;
@@ -156,6 +156,28 @@ static void add_s390x_test_case(const char *mname)
g_free(path);
}
+static void add_loongarch_test_case(const char *mname)
+{
+ char *path;
+ PlugTestData *data;
+
+ data = g_new(PlugTestData, 1);
+ data->machine = g_strdup(mname);
+ data->cpu_model = "la464";
+ data->device_model = g_strdup("la464-loongarch-cpu");
+ data->sockets = 1;
+ data->cores = 3;
+ data->threads = 1;
+ data->maxcpus = data->sockets * data->cores * data->threads;
+
+ path = g_strdup_printf("cpu-plug/%s/device-add/%ux%ux%u&maxcpus=%u",
+ mname, data->sockets, data->cores,
+ data->threads, data->maxcpus);
+ qtest_add_data_func_full(path, data, test_plug_with_device_add,
+ test_data_free);
+ g_free(path);
+}
+
int main(int argc, char **argv)
{
const char *arch = qtest_get_arch();
@@ -168,6 +190,8 @@ int main(int argc, char **argv)
qtest_cb_for_every_machine(add_pseries_test_case, g_test_quick());
} else if (g_str_equal(arch, "s390x")) {
qtest_cb_for_every_machine(add_s390x_test_case, g_test_quick());
+ } else if (g_str_equal(arch, "loongarch64")) {
+ add_loongarch_test_case("virt");
}
return g_test_run();
diff --git a/tests/qtest/dbus-display-test.c b/tests/qtest/dbus-display-test.c
index 0390bdc..f7fc873 100644
--- a/tests/qtest/dbus-display-test.c
+++ b/tests/qtest/dbus-display-test.c
@@ -2,9 +2,14 @@
#include "qemu/sockets.h"
#include "qemu/dbus.h"
#include "qemu/sockets.h"
+#include "glib.h"
+#include "glibconfig.h"
#include <gio/gio.h>
#include <gio/gunixfdlist.h>
#include "libqtest.h"
+#ifndef WIN32
+#include <sys/mman.h>
+#endif
#include "ui/dbus-display1.h"
static GDBusConnection*
@@ -82,6 +87,7 @@ typedef struct TestDBusConsoleRegister {
GThread *thread;
GDBusConnection *listener_conn;
GDBusObjectManagerServer *server;
+ bool with_map;
} TestDBusConsoleRegister;
static gboolean listener_handle_scanout(
@@ -94,13 +100,49 @@ static gboolean listener_handle_scanout(
GVariant *arg_data,
TestDBusConsoleRegister *test)
{
+ if (!test->with_map) {
+ g_main_loop_quit(test->loop);
+ }
+
+ return DBUS_METHOD_INVOCATION_HANDLED;
+}
+
+#ifndef WIN32
+static gboolean listener_handle_scanout_map(
+ QemuDBusDisplay1ListenerUnixMap *object,
+ GDBusMethodInvocation *invocation,
+ GUnixFDList *fd_list,
+ GVariant *arg_handle,
+ guint arg_offset,
+ guint arg_width,
+ guint arg_height,
+ guint arg_stride,
+ guint arg_pixman_format,
+ TestDBusConsoleRegister *test)
+{
+ int fd = -1;
+ gint32 handle = g_variant_get_handle(arg_handle);
+ g_autoptr(GError) error = NULL;
+ void *addr = NULL;
+ size_t len = arg_height * arg_stride;
+
+ g_assert_cmpuint(g_unix_fd_list_get_length(fd_list), ==, 1);
+ fd = g_unix_fd_list_get(fd_list, handle, &error);
+ g_assert_no_error(error);
+
+ addr = mmap(NULL, len, PROT_READ, MAP_PRIVATE, fd, arg_offset);
+ g_assert_no_errno(addr == MAP_FAILED ? -1 : 0);
+ g_assert_no_errno(munmap(addr, len));
+
g_main_loop_quit(test->loop);
+ close(fd);
return DBUS_METHOD_INVOCATION_HANDLED;
}
+#endif
static void
-test_dbus_console_setup_listener(TestDBusConsoleRegister *test)
+test_dbus_console_setup_listener(TestDBusConsoleRegister *test, bool with_map)
{
g_autoptr(GDBusObjectSkeleton) listener = NULL;
g_autoptr(QemuDBusDisplay1ListenerSkeleton) iface = NULL;
@@ -114,6 +156,25 @@ test_dbus_console_setup_listener(TestDBusConsoleRegister *test)
NULL);
g_dbus_object_skeleton_add_interface(listener,
G_DBUS_INTERFACE_SKELETON(iface));
+ if (with_map) {
+#ifdef WIN32
+ g_test_skip("map test lacking on win32");
+ return;
+#else
+ g_autoptr(QemuDBusDisplay1ListenerUnixMapSkeleton) iface_map =
+ QEMU_DBUS_DISPLAY1_LISTENER_UNIX_MAP_SKELETON(
+ qemu_dbus_display1_listener_unix_map_skeleton_new());
+
+ g_object_connect(iface_map,
+ "signal::handle-scanout-map", listener_handle_scanout_map, test,
+ NULL);
+ g_dbus_object_skeleton_add_interface(listener,
+ G_DBUS_INTERFACE_SKELETON(iface_map));
+ g_object_set(iface, "interfaces",
+ (const gchar *[]) { "org.qemu.Display1.Listener.Unix.Map", NULL },
+ NULL);
+#endif
+ }
g_dbus_object_manager_server_export(test->server, listener);
g_dbus_object_manager_server_set_connection(test->server,
test->listener_conn);
@@ -145,7 +206,7 @@ test_dbus_console_registered(GObject *source_object,
g_assert_no_error(err);
test->listener_conn = g_thread_join(test->thread);
- test_dbus_console_setup_listener(test);
+ test_dbus_console_setup_listener(test, test->with_map);
}
static gpointer
@@ -155,7 +216,7 @@ test_dbus_p2p_server_setup_thread(gpointer data)
}
static void
-test_dbus_display_console(void)
+test_dbus_display_console(const void* data)
{
g_autoptr(GError) err = NULL;
g_autoptr(GDBusConnection) conn = NULL;
@@ -163,7 +224,7 @@ test_dbus_display_console(void)
g_autoptr(GMainLoop) loop = NULL;
QTestState *qts = NULL;
int pair[2];
- TestDBusConsoleRegister test = { 0, };
+ TestDBusConsoleRegister test = { 0, .with_map = GPOINTER_TO_INT(data) };
#ifdef WIN32
WSAPROTOCOL_INFOW info;
g_autoptr(GVariant) listener = NULL;
@@ -299,7 +360,8 @@ main(int argc, char **argv)
g_test_init(&argc, &argv, NULL);
qtest_add_func("/dbus-display/vm", test_dbus_display_vm);
- qtest_add_func("/dbus-display/console", test_dbus_display_console);
+ qtest_add_data_func("/dbus-display/console", GINT_TO_POINTER(false), test_dbus_display_console);
+ qtest_add_data_func("/dbus-display/console/map", GINT_TO_POINTER(true), test_dbus_display_console);
qtest_add_func("/dbus-display/keyboard", test_dbus_display_keyboard);
return g_test_run();
diff --git a/tests/qtest/device-introspect-test.c b/tests/qtest/device-introspect-test.c
index 587da59..f84cec5 100644
--- a/tests/qtest/device-introspect-test.c
+++ b/tests/qtest/device-introspect-test.c
@@ -18,9 +18,9 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qstring.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
#include "libqtest.h"
const char common_args[] = "-nodefaults -machine none";
diff --git a/tests/qtest/device-plug-test.c b/tests/qtest/device-plug-test.c
index c6f3315..2707ee5 100644
--- a/tests/qtest/device-plug-test.c
+++ b/tests/qtest/device-plug-test.c
@@ -12,17 +12,8 @@
#include "qemu/osdep.h"
#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
-
-static void system_reset(QTestState *qtest)
-{
- QDict *resp;
-
- resp = qtest_qmp(qtest, "{'execute': 'system_reset'}");
- g_assert(qdict_haskey(resp, "return"));
- qobject_unref(resp);
-}
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
static void wait_device_deleted_event(QTestState *qtest, const char *id)
{
@@ -58,7 +49,7 @@ static void process_device_remove(QTestState *qtest, const char *id)
* handled, removing the device.
*/
qtest_qmp_device_del_send(qtest, id);
- system_reset(qtest);
+ qtest_system_reset_nowait(qtest);
wait_device_deleted_event(qtest, id);
}
diff --git a/tests/qtest/dm163-test.c b/tests/qtest/dm163-test.c
index 3161c92..4c8e654 100644
--- a/tests/qtest/dm163-test.c
+++ b/tests/qtest/dm163-test.c
@@ -182,6 +182,8 @@ static void test_dm163_gpio_connection(void)
g_assert_false(qtest_get_irq(qts, LAT_B));
g_assert_false(qtest_get_irq(qts, SELBK));
g_assert_false(qtest_get_irq(qts, RST_B));
+
+ qtest_quit(qts);
}
int main(int argc, char **argv)
diff --git a/tests/qtest/drive_del-test.c b/tests/qtest/drive_del-test.c
index 7b67a4b..30d9451 100644
--- a/tests/qtest/drive_del-test.c
+++ b/tests/qtest/drive_del-test.c
@@ -13,8 +13,8 @@
#include "qemu/osdep.h"
#include "libqtest.h"
#include "libqos/virtio.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
static const char *qvirtio_get_dev_type(void);
@@ -154,15 +154,10 @@ static void device_add(QTestState *qts)
static void device_del(QTestState *qts, bool and_reset)
{
- QDict *response;
-
qtest_qmp_device_del_send(qts, "dev0");
if (and_reset) {
- response = qtest_qmp(qts, "{'execute': 'system_reset' }");
- g_assert(response);
- g_assert(qdict_haskey(response, "return"));
- qobject_unref(response);
+ qtest_system_reset_nowait(qts);
}
qtest_qmp_eventwait(qts, "DEVICE_DELETED");
diff --git a/tests/qtest/emc141x-test.c b/tests/qtest/emc141x-test.c
index 8c86694..a24103e 100644
--- a/tests/qtest/emc141x-test.c
+++ b/tests/qtest/emc141x-test.c
@@ -10,7 +10,7 @@
#include "libqtest-single.h"
#include "libqos/qgraph.h"
#include "libqos/i2c.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "hw/sensor/emc141x_regs.h"
#define EMC1414_TEST_ID "emc1414-test"
diff --git a/tests/qtest/fdc-test.c b/tests/qtest/fdc-test.c
index 5e8fbda..1b37a8a 100644
--- a/tests/qtest/fdc-test.c
+++ b/tests/qtest/fdc-test.c
@@ -26,7 +26,7 @@
#include "libqtest-single.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#define DRIVE_FLOPPY_BLANK \
"-drive if=floppy,file=null-co://,file.read-zeroes=on,format=raw,size=1440k"
@@ -552,7 +552,7 @@ static bool qtest_check_clang_sanitizer(void)
#ifdef QEMU_SANITIZE_ADDRESS
return true;
#else
- g_test_skip("QEMU not configured using --enable-sanitizers");
+ g_test_skip("QEMU not configured using --enable-asan");
return false;
#endif
}
diff --git a/tests/qtest/fuzz/fuzz.c b/tests/qtest/fuzz/fuzz.c
index 9b9c9f9..ca248a5 100644
--- a/tests/qtest/fuzz/fuzz.c
+++ b/tests/qtest/fuzz/fuzz.c
@@ -17,9 +17,9 @@
#include "qemu/cutils.h"
#include "qemu/datadir.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/qtest.h"
-#include "sysemu/runstate.h"
+#include "system/system.h"
+#include "system/qtest.h"
+#include "system/runstate.h"
#include "qemu/main-loop.h"
#include "qemu/rcu.h"
#include "tests/qtest/libqtest.h"
@@ -41,6 +41,7 @@ static FuzzTargetList *fuzz_target_list;
static FuzzTarget *fuzz_target;
static QTestState *fuzz_qts;
+int (*qemu_main)(void);
void flush_events(QTestState *s)
diff --git a/tests/qtest/fuzz/generic_fuzz.c b/tests/qtest/fuzz/generic_fuzz.c
index ec842e0..f12080e 100644
--- a/tests/qtest/fuzz/generic_fuzz.c
+++ b/tests/qtest/fuzz/generic_fuzz.c
@@ -11,6 +11,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/range.h"
#include <wordexp.h>
@@ -19,8 +20,8 @@
#include "tests/qtest/libqos/pci-pc.h"
#include "fuzz.h"
#include "string.h"
-#include "exec/memory.h"
-#include "exec/ramblock.h"
+#include "system/memory.h"
+#include "system/ramblock.h"
#include "hw/qdev-core.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_device.h"
@@ -211,7 +212,7 @@ void fuzz_dma_read_cb(size_t addr, size_t len, MemoryRegion *mr)
i < dma_regions->len && (avoid_double_fetches || qtest_log_enabled);
++i) {
region = g_array_index(dma_regions, address_range, i);
- if (addr < region.addr + region.size && addr + len > region.addr) {
+ if (ranges_overlap(addr, len, region.addr, region.size)) {
double_fetch = true;
if (addr < region.addr
&& avoid_double_fetches) {
@@ -571,7 +572,6 @@ static void op_add_dma_pattern(QTestState *s,
pattern p = {a.index, a.stride, len - sizeof(a), data + sizeof(a)};
p.index = a.index % p.len;
g_array_append_val(dma_patterns, p);
- return;
}
static void op_clear_dma_patterns(QTestState *s,
diff --git a/tests/qtest/fuzz/qos_fuzz.c b/tests/qtest/fuzz/qos_fuzz.c
index d3839bf..9afe8bf 100644
--- a/tests/qtest/fuzz/qos_fuzz.c
+++ b/tests/qtest/fuzz/qos_fuzz.c
@@ -19,7 +19,7 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qapi/error.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qemu/main-loop.h"
#include "tests/qtest/libqtest.h"
diff --git a/tests/qtest/fuzz/qtest_wrappers.c b/tests/qtest/fuzz/qtest_wrappers.c
index 0580f8d..d7adcbe 100644
--- a/tests/qtest/fuzz/qtest_wrappers.c
+++ b/tests/qtest/fuzz/qtest_wrappers.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "hw/core/cpu.h"
-#include "exec/ioport.h"
+#include "system/ioport.h"
#include "fuzz.h"
diff --git a/tests/qtest/fw_cfg-test.c b/tests/qtest/fw_cfg-test.c
index 5dc807b..e48b34a 100644
--- a/tests/qtest/fw_cfg-test.c
+++ b/tests/qtest/fw_cfg-test.c
@@ -243,12 +243,6 @@ int main(int argc, char **argv)
qtest_add_func("fw_cfg/ram_size", test_fw_cfg_ram_size);
qtest_add_func("fw_cfg/nographic", test_fw_cfg_nographic);
qtest_add_func("fw_cfg/nb_cpus", test_fw_cfg_nb_cpus);
-#if 0
- qtest_add_func("fw_cfg/machine_id", test_fw_cfg_machine_id);
- qtest_add_func("fw_cfg/kernel", test_fw_cfg_kernel);
- qtest_add_func("fw_cfg/initrd", test_fw_cfg_initrd);
- qtest_add_func("fw_cfg/boot_device", test_fw_cfg_boot_device);
-#endif
qtest_add_func("fw_cfg/max_cpus", test_fw_cfg_max_cpus);
qtest_add_func("fw_cfg/numa", test_fw_cfg_numa);
qtest_add_func("fw_cfg/boot_menu", test_fw_cfg_boot_menu);
diff --git a/tests/qtest/hd-geo-test.c b/tests/qtest/hd-geo-test.c
index d08bffa..41481a5 100644
--- a/tests/qtest/hd-geo-test.c
+++ b/tests/qtest/hd-geo-test.c
@@ -17,7 +17,7 @@
#include "qemu/osdep.h"
#include "qemu/bswap.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qlist.h"
#include "libqtest.h"
#include "libqos/fw_cfg.h"
#include "libqos/libqos.h"
@@ -900,7 +900,6 @@ static void test_override_hot_unplug(TestArgs *args, const char *devid,
QTestState *qts;
char *joined_args;
QFWCFG *fw_cfg;
- QDict *response;
int i;
joined_args = g_strjoinv(" ", args->argv);
@@ -913,13 +912,7 @@ static void test_override_hot_unplug(TestArgs *args, const char *devid,
/* unplug device an restart */
qtest_qmp_device_del_send(qts, devid);
- response = qtest_qmp(qts,
- "{ 'execute': 'system_reset', 'arguments': { }}");
- g_assert(response);
- g_assert(!qdict_haskey(response, "error"));
- qobject_unref(response);
-
- qtest_qmp_eventwait(qts, "RESET");
+ qtest_system_reset(qts);
read_bootdevices(fw_cfg, expected2);
@@ -1074,17 +1067,26 @@ int main(int argc, char **argv)
}
}
- qtest_add_func("hd-geo/ide/none", test_ide_none);
- qtest_add_func("hd-geo/ide/drive/mbr/blank", test_ide_drive_mbr_blank);
- qtest_add_func("hd-geo/ide/drive/mbr/lba", test_ide_drive_mbr_lba);
- qtest_add_func("hd-geo/ide/drive/mbr/chs", test_ide_drive_mbr_chs);
- qtest_add_func("hd-geo/ide/drive/cd_0", test_ide_drive_cd_0);
- qtest_add_func("hd-geo/ide/device/mbr/blank", test_ide_device_mbr_blank);
- qtest_add_func("hd-geo/ide/device/mbr/lba", test_ide_device_mbr_lba);
- qtest_add_func("hd-geo/ide/device/mbr/chs", test_ide_device_mbr_chs);
- qtest_add_func("hd-geo/ide/device/user/chs", test_ide_device_user_chs);
- qtest_add_func("hd-geo/ide/device/user/chst", test_ide_device_user_chst);
- if (have_qemu_img()) {
+ if (qtest_has_machine("pc")) {
+ qtest_add_func("hd-geo/ide/none", test_ide_none);
+ qtest_add_func("hd-geo/ide/drive/mbr/blank", test_ide_drive_mbr_blank);
+ qtest_add_func("hd-geo/ide/drive/mbr/lba", test_ide_drive_mbr_lba);
+ qtest_add_func("hd-geo/ide/drive/mbr/chs", test_ide_drive_mbr_chs);
+ qtest_add_func("hd-geo/ide/drive/cd_0", test_ide_drive_cd_0);
+ qtest_add_func("hd-geo/ide/device/mbr/blank", test_ide_device_mbr_blank);
+ qtest_add_func("hd-geo/ide/device/mbr/lba", test_ide_device_mbr_lba);
+ qtest_add_func("hd-geo/ide/device/mbr/chs", test_ide_device_mbr_chs);
+ qtest_add_func("hd-geo/ide/device/user/chs", test_ide_device_user_chs);
+ qtest_add_func("hd-geo/ide/device/user/chst", test_ide_device_user_chst);
+ }
+
+ if (!have_qemu_img()) {
+ g_test_message("QTEST_QEMU_IMG not set or qemu-img missing; "
+ "skipping hd-geo/override/* tests");
+ goto test_add_done;
+ }
+
+ if (qtest_has_machine("pc")) {
qtest_add_func("hd-geo/override/ide", test_override_ide);
if (qtest_has_device("lsi53c895a")) {
qtest_add_func("hd-geo/override/scsi", test_override_scsi);
@@ -1104,30 +1106,26 @@ int main(int argc, char **argv)
qtest_add_func("hd-geo/override/virtio_blk",
test_override_virtio_blk);
}
+ }
- if (qtest_has_machine("q35")) {
- qtest_add_func("hd-geo/override/sata", test_override_sata);
- qtest_add_func("hd-geo/override/zero_chs_q35",
- test_override_zero_chs_q35);
- if (qtest_has_device("lsi53c895a")) {
- qtest_add_func("hd-geo/override/scsi_q35",
- test_override_scsi_q35);
- }
- if (qtest_has_device("virtio-scsi-pci")) {
- qtest_add_func("hd-geo/override/scsi_hot_unplug_q35",
- test_override_scsi_hot_unplug_q35);
- }
- if (qtest_has_device("virtio-blk-pci")) {
- qtest_add_func("hd-geo/override/virtio_hot_unplug_q35",
- test_override_virtio_hot_unplug_q35);
- qtest_add_func("hd-geo/override/virtio_blk_q35",
- test_override_virtio_blk_q35);
- }
-
+ if (qtest_has_machine("q35")) {
+ qtest_add_func("hd-geo/override/sata", test_override_sata);
+ qtest_add_func("hd-geo/override/zero_chs_q35",
+ test_override_zero_chs_q35);
+ if (qtest_has_device("lsi53c895a")) {
+ qtest_add_func("hd-geo/override/scsi_q35",
+ test_override_scsi_q35);
+ }
+ if (qtest_has_device("virtio-scsi-pci")) {
+ qtest_add_func("hd-geo/override/scsi_hot_unplug_q35",
+ test_override_scsi_hot_unplug_q35);
+ }
+ if (qtest_has_device("virtio-blk-pci")) {
+ qtest_add_func("hd-geo/override/virtio_hot_unplug_q35",
+ test_override_virtio_hot_unplug_q35);
+ qtest_add_func("hd-geo/override/virtio_blk_q35",
+ test_override_virtio_blk_q35);
}
- } else {
- g_test_message("QTEST_QEMU_IMG not set or qemu-img missing; "
- "skipping hd-geo/override/* tests");
}
test_add_done:
diff --git a/tests/qtest/ide-test.c b/tests/qtest/ide-test.c
index 90ba6b2..ceee444 100644
--- a/tests/qtest/ide-test.c
+++ b/tests/qtest/ide-test.c
@@ -29,7 +29,7 @@
#include "libqos/libqos.h"
#include "libqos/pci-pc.h"
#include "libqos/malloc-pc.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/bswap.h"
#include "hw/pci/pci_ids.h"
#include "hw/pci/pci_regs.h"
diff --git a/tests/qtest/intel-iommu-test.c b/tests/qtest/intel-iommu-test.c
new file mode 100644
index 0000000..c521b37
--- /dev/null
+++ b/tests/qtest/intel-iommu-test.c
@@ -0,0 +1,64 @@
+/*
+ * QTest testcase for intel-iommu
+ *
+ * Copyright (c) 2024 Intel, Inc.
+ *
+ * Author: Zhenzhong Duan <zhenzhong.duan@intel.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest.h"
+#include "hw/i386/intel_iommu_internal.h"
+
+#define CAP_STAGE_1_FIXED1 (VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | \
+ VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS)
+#define ECAP_STAGE_1_FIXED1 (VTD_ECAP_QI | VTD_ECAP_IR | VTD_ECAP_IRO | \
+ VTD_ECAP_MHMV | VTD_ECAP_SMTS | VTD_ECAP_FLTS)
+
+static inline uint64_t vtd_reg_readq(QTestState *s, uint64_t offset)
+{
+ return qtest_readq(s, Q35_HOST_BRIDGE_IOMMU_ADDR + offset);
+}
+
+static void test_intel_iommu_stage_1(void)
+{
+ uint8_t init_csr[DMAR_REG_SIZE]; /* register values */
+ uint8_t post_reset_csr[DMAR_REG_SIZE]; /* register values */
+ uint64_t cap, ecap, tmp;
+ QTestState *s;
+
+ s = qtest_init("-M q35 -device intel-iommu,x-scalable-mode=on,x-flts=on");
+
+ cap = vtd_reg_readq(s, DMAR_CAP_REG);
+ g_assert((cap & CAP_STAGE_1_FIXED1) == CAP_STAGE_1_FIXED1);
+
+ tmp = cap & VTD_CAP_SAGAW_MASK;
+ g_assert(tmp == (VTD_CAP_SAGAW_39bit | VTD_CAP_SAGAW_48bit));
+
+ tmp = VTD_MGAW_FROM_CAP(cap);
+ g_assert(tmp == VTD_HOST_AW_48BIT - 1);
+
+ ecap = vtd_reg_readq(s, DMAR_ECAP_REG);
+ g_assert((ecap & ECAP_STAGE_1_FIXED1) == ECAP_STAGE_1_FIXED1);
+
+ qtest_memread(s, Q35_HOST_BRIDGE_IOMMU_ADDR, init_csr, DMAR_REG_SIZE);
+
+ qobject_unref(qtest_qmp(s, "{ 'execute': 'system_reset' }"));
+ qtest_qmp_eventwait(s, "RESET");
+
+ qtest_memread(s, Q35_HOST_BRIDGE_IOMMU_ADDR, post_reset_csr, DMAR_REG_SIZE);
+ /* Ensure registers are consistent after hard reset */
+ g_assert(!memcmp(init_csr, post_reset_csr, DMAR_REG_SIZE));
+
+ qtest_quit(s);
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("/q35/intel-iommu/stage-1", test_intel_iommu_stage_1);
+
+ return g_test_run();
+}
diff --git a/tests/qtest/ipmi-bt-test.c b/tests/qtest/ipmi-bt-test.c
index 383239b..637732f 100644
--- a/tests/qtest/ipmi-bt-test.c
+++ b/tests/qtest/ipmi-bt-test.c
@@ -251,7 +251,7 @@ static void emu_msg_handler(void)
msg[msg_len++] = 0xa0;
write_emu_msg(msg, msg_len);
} else {
- g_assert(0);
+ g_assert_not_reached();
}
}
@@ -411,7 +411,7 @@ int main(int argc, char **argv)
g_test_init(&argc, &argv, NULL);
global_qtest = qtest_initf(
- " -chardev socket,id=ipmi0,host=127.0.0.1,port=%d,reconnect=10"
+ " -chardev socket,id=ipmi0,host=127.0.0.1,port=%d,reconnect-ms=10000"
" -device ipmi-bmc-extern,chardev=ipmi0,id=bmc0"
" -device isa-ipmi-bt,bmc=bmc0", emu_port);
qtest_irq_intercept_in(global_qtest, "ioapic");
diff --git a/tests/qtest/ipmi-kcs-test.c b/tests/qtest/ipmi-kcs-test.c
index afc24dd..3186c6a 100644
--- a/tests/qtest/ipmi-kcs-test.c
+++ b/tests/qtest/ipmi-kcs-test.c
@@ -145,7 +145,7 @@ static void kcs_cmd(uint8_t *cmd, unsigned int cmd_len,
break;
default:
- g_assert(0);
+ g_assert_not_reached();
}
*rsp_len = j;
}
@@ -184,7 +184,7 @@ static void kcs_abort(uint8_t *cmd, unsigned int cmd_len,
break;
default:
- g_assert(0);
+ g_assert_not_reached();
}
/* Start the abort here */
diff --git a/tests/qtest/isl_pmbus_vr-test.c b/tests/qtest/isl_pmbus_vr-test.c
index 5553ea4..1ff840c 100644
--- a/tests/qtest/isl_pmbus_vr-test.c
+++ b/tests/qtest/isl_pmbus_vr-test.c
@@ -21,8 +21,8 @@
#include "libqtest-single.h"
#include "libqos/qgraph.h"
#include "libqos/i2c.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qnum.h"
+#include "qobject/qdict.h"
+#include "qobject/qnum.h"
#include "qemu/bitops.h"
#define TEST_ID "isl_pmbus_vr-test"
diff --git a/tests/qtest/libqmp.c b/tests/qtest/libqmp.c
index a89cab0..16fe546 100644
--- a/tests/qtest/libqmp.c
+++ b/tests/qtest/libqmp.c
@@ -25,8 +25,8 @@
#include "qemu/cutils.h"
#include "qemu/sockets.h"
#include "qapi/error.h"
-#include "qapi/qmp/json-parser.h"
-#include "qapi/qmp/qjson.h"
+#include "qobject/json-parser.h"
+#include "qobject/qjson.h"
#define SOCKET_MAX_FDS 16
diff --git a/tests/qtest/libqmp.h b/tests/qtest/libqmp.h
index 3445b75..4a931c9 100644
--- a/tests/qtest/libqmp.h
+++ b/tests/qtest/libqmp.h
@@ -18,7 +18,7 @@
#ifndef LIBQMP_H
#define LIBQMP_H
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
QDict *qmp_fd_receive(int fd);
#ifndef _WIN32
diff --git a/tests/qtest/libqos/arm-imx25-pdk-machine.c b/tests/qtest/libqos/arm-imx25-pdk-machine.c
index 8fe128f..2d8b754 100644
--- a/tests/qtest/libqos/arm-imx25-pdk-machine.c
+++ b/tests/qtest/libqos/arm-imx25-pdk-machine.c
@@ -23,6 +23,7 @@
#include "libqos-malloc.h"
#include "qgraph.h"
#include "i2c.h"
+#include "hw/i2c/imx_i2c.h"
#define ARM_PAGE_SIZE 4096
#define IMX25_PDK_RAM_START 0x80000000
@@ -50,7 +51,7 @@ static void *imx25_pdk_get_driver(void *object, const char *interface)
static QOSGraphObject *imx25_pdk_get_device(void *obj, const char *device)
{
QIMX25PDKMachine *machine = obj;
- if (!g_strcmp0(device, "imx.i2c")) {
+ if (!g_strcmp0(device, TYPE_IMX_I2C)) {
return &machine->i2c_1.obj;
}
@@ -86,7 +87,7 @@ static void imx25_pdk_register_nodes(void)
.extra_device_opts = "bus=i2c-bus.0"
};
qos_node_create_machine("arm/imx25-pdk", qos_create_machine_arm_imx25_pdk);
- qos_node_contains("arm/imx25-pdk", "imx.i2c", &edge, NULL);
+ qos_node_contains("arm/imx25-pdk", TYPE_IMX_I2C, &edge, NULL);
}
libqos_init(imx25_pdk_register_nodes);
diff --git a/tests/qtest/libqos/arm-n800-machine.c b/tests/qtest/libqos/arm-n800-machine.c
deleted file mode 100644
index 4e5afe0..0000000
--- a/tests/qtest/libqos/arm-n800-machine.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * libqos driver framework
- *
- * Copyright (c) 2019 Red Hat, Inc.
- *
- * Author: Paolo Bonzini <pbonzini@redhat.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License version 2.1 as published by the Free Software Foundation.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>
- */
-
-#include "qemu/osdep.h"
-#include "../libqtest.h"
-#include "libqos-malloc.h"
-#include "qgraph.h"
-#include "i2c.h"
-
-#define ARM_PAGE_SIZE 4096
-#define N800_RAM_START 0x80000000
-#define N800_RAM_END 0x88000000
-
-typedef struct QN800Machine QN800Machine;
-
-struct QN800Machine {
- QOSGraphObject obj;
- QGuestAllocator alloc;
- OMAPI2C i2c_1;
-};
-
-static void *n800_get_driver(void *object, const char *interface)
-{
- QN800Machine *machine = object;
- if (!g_strcmp0(interface, "memory")) {
- return &machine->alloc;
- }
-
- fprintf(stderr, "%s not present in arm/n800\n", interface);
- g_assert_not_reached();
-}
-
-static QOSGraphObject *n800_get_device(void *obj, const char *device)
-{
- QN800Machine *machine = obj;
- if (!g_strcmp0(device, "omap_i2c")) {
- return &machine->i2c_1.obj;
- }
-
- fprintf(stderr, "%s not present in arm/n800\n", device);
- g_assert_not_reached();
-}
-
-static void n800_destructor(QOSGraphObject *obj)
-{
- QN800Machine *machine = (QN800Machine *) obj;
- alloc_destroy(&machine->alloc);
-}
-
-static void *qos_create_machine_arm_n800(QTestState *qts)
-{
- QN800Machine *machine = g_new0(QN800Machine, 1);
-
- alloc_init(&machine->alloc, 0,
- N800_RAM_START,
- N800_RAM_END,
- ARM_PAGE_SIZE);
- machine->obj.get_device = n800_get_device;
- machine->obj.get_driver = n800_get_driver;
- machine->obj.destructor = n800_destructor;
-
- omap_i2c_init(&machine->i2c_1, qts, 0x48070000);
- return &machine->obj;
-}
-
-static void n800_register_nodes(void)
-{
- QOSGraphEdgeOptions edge = {
- .extra_device_opts = "bus=i2c-bus.0"
- };
- qos_node_create_machine("arm/n800", qos_create_machine_arm_n800);
- qos_node_contains("arm/n800", "omap_i2c", &edge, NULL);
-}
-
-libqos_init(n800_register_nodes);
diff --git a/tests/qtest/libqos/fw_cfg.c b/tests/qtest/libqos/fw_cfg.c
index 89f053c..0ab3959 100644
--- a/tests/qtest/libqos/fw_cfg.c
+++ b/tests/qtest/libqos/fw_cfg.c
@@ -14,6 +14,8 @@
#include "qemu/osdep.h"
#include "fw_cfg.h"
+#include "malloc-pc.h"
+#include "libqos-malloc.h"
#include "../libqtest.h"
#include "qemu/bswap.h"
#include "hw/nvram/fw_cfg.h"
@@ -60,6 +62,91 @@ static void mm_fw_cfg_select(QFWCFG *fw_cfg, uint16_t key)
qtest_writew(fw_cfg->qts, fw_cfg->base, key);
}
+static void qfw_cfg_dma_transfer(QFWCFG *fw_cfg, QOSState *qs, void *address,
+ uint32_t length, uint32_t control)
+{
+ FWCfgDmaAccess access;
+ uint32_t addr;
+ uint64_t guest_access_addr;
+ uint64_t gaddr;
+
+ /* create a data buffer in guest memory */
+ gaddr = guest_alloc(&qs->alloc, length);
+
+ if (control & FW_CFG_DMA_CTL_WRITE) {
+ qtest_bufwrite(fw_cfg->qts, gaddr, address, length);
+ }
+ access.address = cpu_to_be64(gaddr);
+ access.length = cpu_to_be32(length);
+ access.control = cpu_to_be32(control);
+
+ /* now create a separate buffer in guest memory for 'access' */
+ guest_access_addr = guest_alloc(&qs->alloc, sizeof(access));
+ qtest_bufwrite(fw_cfg->qts, guest_access_addr, &access, sizeof(access));
+
+ /* write lower 32 bits of address */
+ addr = cpu_to_be32((uint32_t)(uintptr_t)guest_access_addr);
+ qtest_outl(fw_cfg->qts, fw_cfg->base + 8, addr);
+
+ /* write upper 32 bits of address */
+ addr = cpu_to_be32((uint32_t)(uintptr_t)(guest_access_addr >> 32));
+ qtest_outl(fw_cfg->qts, fw_cfg->base + 4, addr);
+
+ g_assert(!(be32_to_cpu(access.control) & FW_CFG_DMA_CTL_ERROR));
+
+ if (control & FW_CFG_DMA_CTL_READ) {
+ qtest_bufread(fw_cfg->qts, gaddr, address, length);
+ }
+
+ guest_free(&qs->alloc, guest_access_addr);
+ guest_free(&qs->alloc, gaddr);
+}
+
+static void qfw_cfg_write_entry(QFWCFG *fw_cfg, QOSState *qs, uint16_t key,
+ void *buf, uint32_t len)
+{
+ qfw_cfg_select(fw_cfg, key);
+ qfw_cfg_dma_transfer(fw_cfg, qs, buf, len, FW_CFG_DMA_CTL_WRITE);
+}
+
+static void qfw_cfg_read_entry(QFWCFG *fw_cfg, QOSState *qs, uint16_t key,
+ void *buf, uint32_t len)
+{
+ qfw_cfg_select(fw_cfg, key);
+ qfw_cfg_dma_transfer(fw_cfg, qs, buf, len, FW_CFG_DMA_CTL_READ);
+}
+
+static bool find_pdir_entry(QFWCFG *fw_cfg, const char *filename,
+ uint16_t *sel, uint32_t *size)
+{
+ g_autofree unsigned char *filesbuf = NULL;
+ uint32_t count;
+ size_t dsize;
+ FWCfgFile *pdir_entry;
+ uint32_t i;
+ bool found = false;
+
+ *size = 0;
+ *sel = 0;
+
+ qfw_cfg_get(fw_cfg, FW_CFG_FILE_DIR, &count, sizeof(count));
+ count = be32_to_cpu(count);
+ dsize = sizeof(uint32_t) + count * sizeof(struct fw_cfg_file);
+ filesbuf = g_malloc(dsize);
+ qfw_cfg_get(fw_cfg, FW_CFG_FILE_DIR, filesbuf, dsize);
+ pdir_entry = (FWCfgFile *)(filesbuf + sizeof(uint32_t));
+ for (i = 0; i < count; ++i, ++pdir_entry) {
+ if (!strcmp(pdir_entry->name, filename)) {
+ *size = be32_to_cpu(pdir_entry->size);
+ *sel = be16_to_cpu(pdir_entry->select);
+ found = true;
+ break;
+ }
+ }
+
+ return found;
+}
+
/*
* The caller need check the return value. When the return value is
* nonzero, it means that some bytes have been transferred.
@@ -73,37 +160,106 @@ static void mm_fw_cfg_select(QFWCFG *fw_cfg, uint16_t key)
* populated, it has received only a starting slice of the fw_cfg file.
*/
size_t qfw_cfg_get_file(QFWCFG *fw_cfg, const char *filename,
- void *data, size_t buflen)
+ void *data, size_t buflen)
{
- uint32_t count;
- uint32_t i;
- unsigned char *filesbuf = NULL;
- size_t dsize;
- FWCfgFile *pdir_entry;
size_t filesize = 0;
+ uint32_t len;
+ uint16_t sel;
- qfw_cfg_get(fw_cfg, FW_CFG_FILE_DIR, &count, sizeof(count));
- count = be32_to_cpu(count);
- dsize = sizeof(uint32_t) + count * sizeof(struct fw_cfg_file);
- filesbuf = g_malloc(dsize);
- qfw_cfg_get(fw_cfg, FW_CFG_FILE_DIR, filesbuf, dsize);
- pdir_entry = (FWCfgFile *)(filesbuf + sizeof(uint32_t));
- for (i = 0; i < count; ++i, ++pdir_entry) {
- if (!strcmp(pdir_entry->name, filename)) {
- uint32_t len = be32_to_cpu(pdir_entry->size);
- uint16_t sel = be16_to_cpu(pdir_entry->select);
- filesize = len;
- if (len > buflen) {
- len = buflen;
- }
- qfw_cfg_get(fw_cfg, sel, data, len);
- break;
+ if (find_pdir_entry(fw_cfg, filename, &sel, &len)) {
+ filesize = len;
+ if (len > buflen) {
+ len = buflen;
}
+ qfw_cfg_get(fw_cfg, sel, data, len);
}
- g_free(filesbuf);
+
return filesize;
}
+/*
+ * The caller need check the return value. When the return value is
+ * nonzero, it means that some bytes have been transferred.
+ *
+ * If the fw_cfg file in question is smaller than the allocated & passed-in
+ * buffer, then the first len bytes were read.
+ *
+ * If the fw_cfg file in question is larger than the passed-in
+ * buffer, then the return value explains how much was actually read.
+ *
+ * It is illegal to call this function if fw_cfg does not support DMA
+ * interface. The caller should ensure that DMA is supported before
+ * calling this function.
+ *
+ * Passed QOSState pointer qs must be initialized. qs->alloc must also be
+ * properly initialized.
+ */
+size_t qfw_cfg_read_file(QFWCFG *fw_cfg, QOSState *qs, const char *filename,
+ void *data, size_t buflen)
+{
+ uint32_t len = 0;
+ uint16_t sel;
+ uint32_t id;
+
+ g_assert(qs);
+ g_assert(filename);
+ g_assert(data);
+ g_assert(buflen);
+ /* check if DMA is supported since we use DMA for read */
+ id = qfw_cfg_get_u32(fw_cfg, FW_CFG_ID);
+ g_assert(id & FW_CFG_VERSION_DMA);
+
+ if (find_pdir_entry(fw_cfg, filename, &sel, &len)) {
+ if (len > buflen) {
+ len = buflen;
+ }
+ qfw_cfg_read_entry(fw_cfg, qs, sel, data, len);
+ }
+
+ return len;
+}
+
+/*
+ * The caller need check the return value. When the return value is
+ * nonzero, it means that some bytes have been transferred.
+ *
+ * If the fw_cfg file in question is smaller than the allocated & passed-in
+ * buffer, then the buffer has been partially written.
+ *
+ * If the fw_cfg file in question is larger than the passed-in
+ * buffer, then the return value explains how much was actually written.
+ *
+ * It is illegal to call this function if fw_cfg does not support DMA
+ * interface. The caller should ensure that DMA is supported before
+ * calling this function.
+ *
+ * Passed QOSState pointer qs must be initialized. qs->alloc must also be
+ * properly initialized.
+ */
+size_t qfw_cfg_write_file(QFWCFG *fw_cfg, QOSState *qs, const char *filename,
+ void *data, size_t buflen)
+{
+ uint32_t len = 0;
+ uint16_t sel;
+ uint32_t id;
+
+ g_assert(qs);
+ g_assert(filename);
+ g_assert(data);
+ g_assert(buflen);
+ /* write operation is only valid if DMA is supported */
+ id = qfw_cfg_get_u32(fw_cfg, FW_CFG_ID);
+ g_assert(id & FW_CFG_VERSION_DMA);
+
+ if (find_pdir_entry(fw_cfg, filename, &sel, &len)) {
+ if (len > buflen) {
+ len = buflen;
+ }
+ qfw_cfg_write_entry(fw_cfg, qs, sel, data, len);
+ }
+ return len;
+}
+
static void mm_fw_cfg_read(QFWCFG *fw_cfg, void *data, size_t len)
{
uint8_t *ptr = data;
diff --git a/tests/qtest/libqos/fw_cfg.h b/tests/qtest/libqos/fw_cfg.h
index b0456a1..6d6ff09 100644
--- a/tests/qtest/libqos/fw_cfg.h
+++ b/tests/qtest/libqos/fw_cfg.h
@@ -14,6 +14,7 @@
#define LIBQOS_FW_CFG_H
#include "../libqtest.h"
+#include "libqos.h"
typedef struct QFWCFG QFWCFG;
@@ -33,7 +34,10 @@ uint32_t qfw_cfg_get_u32(QFWCFG *fw_cfg, uint16_t key);
uint64_t qfw_cfg_get_u64(QFWCFG *fw_cfg, uint16_t key);
size_t qfw_cfg_get_file(QFWCFG *fw_cfg, const char *filename,
void *data, size_t buflen);
-
+size_t qfw_cfg_write_file(QFWCFG *fw_cfg, QOSState *qs, const char *filename,
+ void *data, size_t buflen);
+size_t qfw_cfg_read_file(QFWCFG *fw_cfg, QOSState *qs, const char *filename,
+ void *data, size_t buflen);
QFWCFG *mm_fw_cfg_init(QTestState *qts, uint64_t base);
void mm_fw_cfg_uninit(QFWCFG *fw_cfg);
QFWCFG *io_fw_cfg_init(QTestState *qts, uint16_t base);
diff --git a/tests/qtest/libqos/generic-pcihost.c b/tests/qtest/libqos/generic-pcihost.c
index 3124b0e..4bbeb5f 100644
--- a/tests/qtest/libqos/generic-pcihost.c
+++ b/tests/qtest/libqos/generic-pcihost.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "../libqtest.h"
#include "generic-pcihost.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "hw/pci/pci_regs.h"
#include "qemu/host-utils.h"
diff --git a/tests/qtest/libqos/i2c-imx.c b/tests/qtest/libqos/i2c-imx.c
index 710cb92..6d868e4 100644
--- a/tests/qtest/libqos/i2c-imx.c
+++ b/tests/qtest/libqos/i2c-imx.c
@@ -209,8 +209,8 @@ void imx_i2c_init(IMXI2C *s, QTestState *qts, uint64_t addr)
static void imx_i2c_register_nodes(void)
{
- qos_node_create_driver("imx.i2c", NULL);
- qos_node_produces("imx.i2c", "i2c-bus");
+ qos_node_create_driver(TYPE_IMX_I2C, NULL);
+ qos_node_produces(TYPE_IMX_I2C, "i2c-bus");
}
libqos_init(imx_i2c_register_nodes);
diff --git a/tests/qtest/libqos/igb.c b/tests/qtest/libqos/igb.c
index f40c4ec..ab3ef6f 100644
--- a/tests/qtest/libqos/igb.c
+++ b/tests/qtest/libqos/igb.c
@@ -104,10 +104,10 @@ static void igb_pci_start_hw(QOSGraphObject *obj)
e1000e_macreg_write(&d->e1000e, E1000_RDT(0), 0);
e1000e_macreg_write(&d->e1000e, E1000_RDH(0), 0);
e1000e_macreg_write(&d->e1000e, E1000_RA,
- le32_to_cpu(*(uint32_t *)address));
+ ldl_le_p(address));
e1000e_macreg_write(&d->e1000e, E1000_RA + 4,
E1000_RAH_AV | E1000_RAH_POOL_1 |
- le16_to_cpu(*(uint16_t *)(address + 4)));
+ lduw_le_p(address + 4));
/* Set supported receive descriptor mode */
e1000e_macreg_write(&d->e1000e,
diff --git a/tests/qtest/libqos/libqos-malloc.c b/tests/qtest/libqos/libqos-malloc.c
index d756697..c90f8f0 100644
--- a/tests/qtest/libqos/libqos-malloc.c
+++ b/tests/qtest/libqos/libqos-malloc.c
@@ -342,5 +342,4 @@ void migrate_allocator(QGuestAllocator *src,
QTAILQ_INIT(src->free);
node = mlist_new(src->start, src->end - src->start);
QTAILQ_INSERT_HEAD(src->free, node, MLIST_ENTNAME);
- return;
}
diff --git a/tests/qtest/libqos/libqos.c b/tests/qtest/libqos/libqos.c
index 5c0fa1f..9b49d0d 100644
--- a/tests/qtest/libqos/libqos.c
+++ b/tests/qtest/libqos/libqos.c
@@ -2,7 +2,7 @@
#include "../libqtest.h"
#include "libqos.h"
#include "pci.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
/*** Test Setup & Teardown ***/
@@ -117,13 +117,14 @@ void migrate(QOSState *from, QOSState *to, const char *uri)
g_assert(qdict_haskey(sub, "status"));
st = qdict_get_str(sub, "status");
- /* "setup", "active", "completed", "failed", "cancelled" */
+ /* "setup", "active", "device", "completed", "failed", "cancelled" */
if (strcmp(st, "completed") == 0) {
qobject_unref(rsp);
break;
}
if ((strcmp(st, "setup") == 0) || (strcmp(st, "active") == 0)
+ || (strcmp(st, "device") == 0)
|| (strcmp(st, "wait-unplug") == 0)) {
qobject_unref(rsp);
g_usleep(5000);
diff --git a/tests/qtest/libqos/meson.build b/tests/qtest/libqos/meson.build
index 1b2b2db..1ddaf7b 100644
--- a/tests/qtest/libqos/meson.build
+++ b/tests/qtest/libqos/meson.build
@@ -32,7 +32,6 @@ libqos_srcs = files(
'i2c-omap.c',
'igb.c',
'sdhci.c',
- 'tpci200.c',
'virtio.c',
'virtio-balloon.c',
'virtio-blk.c',
@@ -52,7 +51,6 @@ libqos_srcs = files(
# qgraph machines:
'aarch64-xlnx-zcu102-machine.c',
'arm-imx25-pdk-machine.c',
- 'arm-n800-machine.c',
'arm-raspi2-machine.c',
'arm-sabrelite-machine.c',
'arm-smdkc210-machine.c',
@@ -68,6 +66,13 @@ if have_virtfs
libqos_srcs += files('virtio-9p.c', 'virtio-9p-client.c')
endif
+if config_all_devices.has_key('CONFIG_RISCV_IOMMU')
+ libqos_srcs += files('riscv-iommu.c')
+endif
+if config_all_devices.has_key('CONFIG_TPCI200')
+ libqos_srcs += files('tpci200.c')
+endif
+
libqos = static_library('qos', libqos_srcs + genh,
build_by_default: false)
diff --git a/tests/qtest/libqos/pci-pc.c b/tests/qtest/libqos/pci-pc.c
index 9604628..147009f 100644
--- a/tests/qtest/libqos/pci-pc.c
+++ b/tests/qtest/libqos/pci-pc.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "../libqtest.h"
#include "pci-pc.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "hw/pci/pci_regs.h"
#include "qemu/module.h"
diff --git a/tests/qtest/libqos/pci.c b/tests/qtest/libqos/pci.c
index b23d723..a59197b 100644
--- a/tests/qtest/libqos/pci.c
+++ b/tests/qtest/libqos/pci.c
@@ -328,8 +328,6 @@ bool qpci_msix_pending(QPCIDevice *dev, uint16_t entry)
g_assert(dev->msix_enabled);
pba_entry = qpci_io_readl(dev, dev->msix_pba_bar, dev->msix_pba_off + off);
- qpci_io_writel(dev, dev->msix_pba_bar, dev->msix_pba_off + off,
- pba_entry & ~(1 << bit_n));
return (pba_entry & (1 << bit_n)) != 0;
}
diff --git a/tests/qtest/libqos/qgraph.h b/tests/qtest/libqos/qgraph.h
index 1b5de02..81fbfdd 100644
--- a/tests/qtest/libqos/qgraph.h
+++ b/tests/qtest/libqos/qgraph.h
@@ -355,7 +355,7 @@ void qos_object_start_hw(QOSGraphObject *obj);
QOSGraphObject *qos_machine_new(QOSGraphNode *node, QTestState *qts);
/**
- * qos_machine_new(): instantiate a new driver node
+ * qos_driver_new(): instantiate a new driver node
* @node: A driver node to be instantiated
* @parent: A #QOSGraphObject to be consumed by the new driver node
* @alloc: An allocator to be used by the new driver node.
diff --git a/tests/qtest/libqos/qos_external.c b/tests/qtest/libqos/qos_external.c
index c6bb8bf..493ab74 100644
--- a/tests/qtest/libqos/qos_external.c
+++ b/tests/qtest/libqos/qos_external.c
@@ -19,11 +19,11 @@
#include "qemu/osdep.h"
#include <getopt.h>
#include "../libqtest.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qbool.h"
+#include "qobject/qstring.h"
#include "qemu/module.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qlist.h"
#include "libqos-malloc.h"
#include "qgraph.h"
#include "qgraph_internal.h"
diff --git a/tests/qtest/libqos/riscv-iommu.c b/tests/qtest/libqos/riscv-iommu.c
new file mode 100644
index 0000000..01e3b31
--- /dev/null
+++ b/tests/qtest/libqos/riscv-iommu.c
@@ -0,0 +1,76 @@
+/*
+ * libqos driver riscv-iommu-pci framework
+ *
+ * Copyright (c) 2024 Ventana Micro Systems Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "../libqtest.h"
+#include "qemu/module.h"
+#include "qgraph.h"
+#include "pci.h"
+#include "riscv-iommu.h"
+
+static void *riscv_iommu_pci_get_driver(void *obj, const char *interface)
+{
+ QRISCVIOMMU *r_iommu_pci = obj;
+
+ if (!g_strcmp0(interface, "pci-device")) {
+ return &r_iommu_pci->dev;
+ }
+
+ fprintf(stderr, "%s not present in riscv_iommu_pci\n", interface);
+ g_assert_not_reached();
+}
+
+static void riscv_iommu_pci_start_hw(QOSGraphObject *obj)
+{
+ QRISCVIOMMU *pci = (QRISCVIOMMU *)obj;
+ qpci_device_enable(&pci->dev);
+}
+
+static void riscv_iommu_pci_destructor(QOSGraphObject *obj)
+{
+ QRISCVIOMMU *pci = (QRISCVIOMMU *)obj;
+ qpci_iounmap(&pci->dev, pci->reg_bar);
+}
+
+static void *riscv_iommu_pci_create(void *pci_bus, QGuestAllocator *alloc,
+ void *addr)
+{
+ QRISCVIOMMU *r_iommu_pci = g_new0(QRISCVIOMMU, 1);
+ QPCIBus *bus = pci_bus;
+
+ qpci_device_init(&r_iommu_pci->dev, bus, addr);
+ r_iommu_pci->reg_bar = qpci_iomap(&r_iommu_pci->dev, 0, NULL);
+
+ r_iommu_pci->obj.get_driver = riscv_iommu_pci_get_driver;
+ r_iommu_pci->obj.start_hw = riscv_iommu_pci_start_hw;
+ r_iommu_pci->obj.destructor = riscv_iommu_pci_destructor;
+ return &r_iommu_pci->obj;
+}
+
+static void riscv_iommu_pci_register_nodes(void)
+{
+ QPCIAddress addr = {
+ .vendor_id = RISCV_IOMMU_PCI_VENDOR_ID,
+ .device_id = RISCV_IOMMU_PCI_DEVICE_ID,
+ .devfn = QPCI_DEVFN(1, 0),
+ };
+
+ QOSGraphEdgeOptions opts = {
+ .extra_device_opts = "addr=01.0",
+ };
+
+ add_qpci_address(&opts, &addr);
+
+ qos_node_create_driver("riscv-iommu-pci", riscv_iommu_pci_create);
+ qos_node_produces("riscv-iommu-pci", "pci-device");
+ qos_node_consumes("riscv-iommu-pci", "pci-bus", &opts);
+}
+
+libqos_init(riscv_iommu_pci_register_nodes);
diff --git a/tests/qtest/libqos/riscv-iommu.h b/tests/qtest/libqos/riscv-iommu.h
new file mode 100644
index 0000000..318db13
--- /dev/null
+++ b/tests/qtest/libqos/riscv-iommu.h
@@ -0,0 +1,101 @@
+/*
+ * libqos driver riscv-iommu-pci framework
+ *
+ * Copyright (c) 2024 Ventana Micro Systems Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef TESTS_LIBQOS_RISCV_IOMMU_H
+#define TESTS_LIBQOS_RISCV_IOMMU_H
+
+#include "qgraph.h"
+#include "pci.h"
+#include "qemu/bitops.h"
+
+#ifndef GENMASK_ULL
+#define GENMASK_ULL(h, l) (((~0ULL) >> (63 - (h) + (l))) << (l))
+#endif
+
+/*
+ * RISC-V IOMMU uses PCI_VENDOR_ID_REDHAT 0x1b36 and
+ * PCI_DEVICE_ID_REDHAT_RISCV_IOMMU 0x0014.
+ */
+#define RISCV_IOMMU_PCI_VENDOR_ID 0x1b36
+#define RISCV_IOMMU_PCI_DEVICE_ID 0x0014
+#define RISCV_IOMMU_PCI_DEVICE_CLASS 0x0806
+
+/* Common field positions */
+#define RISCV_IOMMU_QUEUE_ENABLE BIT(0)
+#define RISCV_IOMMU_QUEUE_INTR_ENABLE BIT(1)
+#define RISCV_IOMMU_QUEUE_MEM_FAULT BIT(8)
+#define RISCV_IOMMU_QUEUE_ACTIVE BIT(16)
+#define RISCV_IOMMU_QUEUE_BUSY BIT(17)
+
+#define RISCV_IOMMU_REG_CAP 0x0000
+#define RISCV_IOMMU_CAP_VERSION GENMASK_ULL(7, 0)
+
+#define RISCV_IOMMU_REG_DDTP 0x0010
+#define RISCV_IOMMU_DDTP_BUSY BIT_ULL(4)
+#define RISCV_IOMMU_DDTP_MODE GENMASK_ULL(3, 0)
+#define RISCV_IOMMU_DDTP_MODE_OFF 0
+
+#define RISCV_IOMMU_REG_CQCSR 0x0048
+#define RISCV_IOMMU_CQCSR_CQEN RISCV_IOMMU_QUEUE_ENABLE
+#define RISCV_IOMMU_CQCSR_CIE RISCV_IOMMU_QUEUE_INTR_ENABLE
+#define RISCV_IOMMU_CQCSR_CQON RISCV_IOMMU_QUEUE_ACTIVE
+#define RISCV_IOMMU_CQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY
+
+#define RISCV_IOMMU_REG_FQCSR 0x004C
+#define RISCV_IOMMU_FQCSR_FQEN RISCV_IOMMU_QUEUE_ENABLE
+#define RISCV_IOMMU_FQCSR_FIE RISCV_IOMMU_QUEUE_INTR_ENABLE
+#define RISCV_IOMMU_FQCSR_FQON RISCV_IOMMU_QUEUE_ACTIVE
+#define RISCV_IOMMU_FQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY
+
+#define RISCV_IOMMU_REG_PQCSR 0x0050
+#define RISCV_IOMMU_PQCSR_PQEN RISCV_IOMMU_QUEUE_ENABLE
+#define RISCV_IOMMU_PQCSR_PIE RISCV_IOMMU_QUEUE_INTR_ENABLE
+#define RISCV_IOMMU_PQCSR_PQON RISCV_IOMMU_QUEUE_ACTIVE
+#define RISCV_IOMMU_PQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY
+
+#define RISCV_IOMMU_REG_IPSR 0x0054
+
+#define RISCV_IOMMU_REG_IVEC 0x02F8
+#define RISCV_IOMMU_REG_IVEC_CIV GENMASK_ULL(3, 0)
+#define RISCV_IOMMU_REG_IVEC_FIV GENMASK_ULL(7, 4)
+#define RISCV_IOMMU_REG_IVEC_PMIV GENMASK_ULL(11, 8)
+#define RISCV_IOMMU_REG_IVEC_PIV GENMASK_ULL(15, 12)
+
+#define RISCV_IOMMU_REG_CQB 0x0018
+#define RISCV_IOMMU_CQB_PPN_START 10
+#define RISCV_IOMMU_CQB_PPN_LEN 44
+#define RISCV_IOMMU_CQB_LOG2SZ_START 0
+#define RISCV_IOMMU_CQB_LOG2SZ_LEN 5
+
+#define RISCV_IOMMU_REG_CQT 0x0024
+
+#define RISCV_IOMMU_REG_FQB 0x0028
+#define RISCV_IOMMU_FQB_PPN_START 10
+#define RISCV_IOMMU_FQB_PPN_LEN 44
+#define RISCV_IOMMU_FQB_LOG2SZ_START 0
+#define RISCV_IOMMU_FQB_LOG2SZ_LEN 5
+
+#define RISCV_IOMMU_REG_FQT 0x0034
+
+#define RISCV_IOMMU_REG_PQB 0x0038
+#define RISCV_IOMMU_PQB_PPN_START 10
+#define RISCV_IOMMU_PQB_PPN_LEN 44
+#define RISCV_IOMMU_PQB_LOG2SZ_START 0
+#define RISCV_IOMMU_PQB_LOG2SZ_LEN 5
+
+#define RISCV_IOMMU_REG_PQT 0x0044
+
+typedef struct QRISCVIOMMU {
+ QOSGraphObject obj;
+ QPCIDevice dev;
+ QPCIBar reg_bar;
+} QRISCVIOMMU;
+
+#endif
diff --git a/tests/qtest/libqos/virtio-9p-client.c b/tests/qtest/libqos/virtio-9p-client.c
index b8adc8d..6ab4501 100644
--- a/tests/qtest/libqos/virtio-9p-client.c
+++ b/tests/qtest/libqos/virtio-9p-client.c
@@ -235,10 +235,11 @@ static const char *rmessage_name(uint8_t id)
id == P9_RMKDIR ? "RMKDIR" :
id == P9_RLCREATE ? "RLCREATE" :
id == P9_RSYMLINK ? "RSYMLINK" :
+ id == P9_RGETATTR ? "RGETATTR" :
id == P9_RLINK ? "RLINK" :
id == P9_RUNLINKAT ? "RUNLINKAT" :
id == P9_RFLUSH ? "RFLUSH" :
- id == P9_RREADDIR ? "READDIR" :
+ id == P9_RREADDIR ? "RREADDIR" :
"<unknown>";
}
@@ -556,6 +557,55 @@ void v9fs_rgetattr(P9Req *req, v9fs_attr *attr)
v9fs_req_free(req);
}
+/*
+ * size[4] Tsetattr tag[2] fid[4] valid[4] mode[4] uid[4] gid[4] size[8]
+ * atime_sec[8] atime_nsec[8] mtime_sec[8] mtime_nsec[8]
+ */
+TSetAttrRes v9fs_tsetattr(TSetAttrOpt opt)
+{
+ P9Req *req;
+ uint32_t err;
+
+ g_assert(opt.client);
+
+ req = v9fs_req_init(
+ opt.client, 4/*fid*/ + 4/*valid*/ + 4/*mode*/ + 4/*uid*/ + 4/*gid*/ +
+ 8/*size*/ + 8/*atime_sec*/ + 8/*atime_nsec*/ + 8/*mtime_sec*/ +
+ 8/*mtime_nsec*/, P9_TSETATTR, opt.tag
+ );
+ v9fs_uint32_write(req, opt.fid);
+ v9fs_uint32_write(req, (uint32_t) opt.attr.valid);
+ v9fs_uint32_write(req, opt.attr.mode);
+ v9fs_uint32_write(req, opt.attr.uid);
+ v9fs_uint32_write(req, opt.attr.gid);
+ v9fs_uint64_write(req, opt.attr.size);
+ v9fs_uint64_write(req, opt.attr.atime_sec);
+ v9fs_uint64_write(req, opt.attr.atime_nsec);
+ v9fs_uint64_write(req, opt.attr.mtime_sec);
+ v9fs_uint64_write(req, opt.attr.mtime_nsec);
+ v9fs_req_send(req);
+
+ if (!opt.requestOnly) {
+ v9fs_req_wait_for_reply(req, NULL);
+ if (opt.expectErr) {
+ v9fs_rlerror(req, &err);
+ g_assert_cmpint(err, ==, opt.expectErr);
+ } else {
+ v9fs_rsetattr(req);
+ }
+ req = NULL; /* request was freed */
+ }
+
+ return (TSetAttrRes) { .req = req };
+}
+
+/* size[4] Rsetattr tag[2] */
+void v9fs_rsetattr(P9Req *req)
+{
+ v9fs_req_recv(req, P9_RSETATTR);
+ v9fs_req_free(req);
+}
+
/* size[4] Treaddir tag[2] fid[4] offset[8] count[4] */
TReadDirRes v9fs_treaddir(TReadDirOpt opt)
{
diff --git a/tests/qtest/libqos/virtio-9p-client.h b/tests/qtest/libqos/virtio-9p-client.h
index 78228eb..e3221a3 100644
--- a/tests/qtest/libqos/virtio-9p-client.h
+++ b/tests/qtest/libqos/virtio-9p-client.h
@@ -65,6 +65,16 @@ typedef struct v9fs_attr {
#define P9_GETATTR_BASIC 0x000007ffULL /* Mask for fields up to BLOCKS */
#define P9_GETATTR_ALL 0x00003fffULL /* Mask for ALL fields */
+#define P9_SETATTR_MODE 0x00000001UL
+#define P9_SETATTR_UID 0x00000002UL
+#define P9_SETATTR_GID 0x00000004UL
+#define P9_SETATTR_SIZE 0x00000008UL
+#define P9_SETATTR_ATIME 0x00000010UL
+#define P9_SETATTR_MTIME 0x00000020UL
+#define P9_SETATTR_CTIME 0x00000040UL
+#define P9_SETATTR_ATIME_SET 0x00000080UL
+#define P9_SETATTR_MTIME_SET 0x00000100UL
+
struct V9fsDirent {
v9fs_qid qid;
uint64_t offset;
@@ -182,6 +192,28 @@ typedef struct TGetAttrRes {
P9Req *req;
} TGetAttrRes;
+/* options for 'Tsetattr' 9p request */
+typedef struct TSetAttrOpt {
+ /* 9P client being used (mandatory) */
+ QVirtio9P *client;
+ /* user supplied tag number being returned with response (optional) */
+ uint16_t tag;
+ /* file ID of file/dir whose attributes shall be modified (required) */
+ uint32_t fid;
+ /* new attribute values to be set by 9p server */
+ v9fs_attr attr;
+ /* only send Tsetattr request but not wait for a reply? (optional) */
+ bool requestOnly;
+ /* do we expect an Rlerror response, if yes which error code? (optional) */
+ uint32_t expectErr;
+} TSetAttrOpt;
+
+/* result of 'Tsetattr' 9p request */
+typedef struct TSetAttrRes {
+ /* if requestOnly was set: request object for further processing */
+ P9Req *req;
+} TSetAttrRes;
+
/* options for 'Treaddir' 9p request */
typedef struct TReadDirOpt {
/* 9P client being used (mandatory) */
@@ -470,6 +502,8 @@ TWalkRes v9fs_twalk(TWalkOpt opt);
void v9fs_rwalk(P9Req *req, uint16_t *nwqid, v9fs_qid **wqid);
TGetAttrRes v9fs_tgetattr(TGetAttrOpt);
void v9fs_rgetattr(P9Req *req, v9fs_attr *attr);
+TSetAttrRes v9fs_tsetattr(TSetAttrOpt opt);
+void v9fs_rsetattr(P9Req *req);
TReadDirRes v9fs_treaddir(TReadDirOpt);
void v9fs_rreaddir(P9Req *req, uint32_t *count, uint32_t *nentries,
struct V9fsDirent **entries);
diff --git a/tests/qtest/libqos/virtio-pci-modern.c b/tests/qtest/libqos/virtio-pci-modern.c
index 18d1188..4e67fcb 100644
--- a/tests/qtest/libqos/virtio-pci-modern.c
+++ b/tests/qtest/libqos/virtio-pci-modern.c
@@ -173,13 +173,11 @@ static bool get_config_isr_status(QVirtioDevice *d)
static void wait_config_isr_status(QVirtioDevice *d, gint64 timeout_us)
{
- QVirtioPCIDevice *dev = container_of(d, QVirtioPCIDevice, vdev);
gint64 start_time = g_get_monotonic_time();
- do {
+ while (!get_config_isr_status(d)) {
g_assert(g_get_monotonic_time() - start_time <= timeout_us);
- qtest_clock_step(dev->pdev->bus->qts, 100);
- } while (!get_config_isr_status(d));
+ }
}
static void queue_select(QVirtioDevice *d, uint16_t index)
diff --git a/tests/qtest/libqos/virtio-pci.c b/tests/qtest/libqos/virtio-pci.c
index 485b8f6..002bf8b 100644
--- a/tests/qtest/libqos/virtio-pci.c
+++ b/tests/qtest/libqos/virtio-pci.c
@@ -171,13 +171,11 @@ static bool qvirtio_pci_get_config_isr_status(QVirtioDevice *d)
static void qvirtio_pci_wait_config_isr_status(QVirtioDevice *d,
gint64 timeout_us)
{
- QVirtioPCIDevice *dev = container_of(d, QVirtioPCIDevice, vdev);
gint64 start_time = g_get_monotonic_time();
- do {
+ while (!qvirtio_pci_get_config_isr_status(d)) {
g_assert(g_get_monotonic_time() - start_time <= timeout_us);
- qtest_clock_step(dev->pdev->bus->qts, 100);
- } while (!qvirtio_pci_get_config_isr_status(d));
+ }
}
static void qvirtio_pci_queue_select(QVirtioDevice *d, uint16_t index)
diff --git a/tests/qtest/libqos/virtio-scmi.c b/tests/qtest/libqos/virtio-scmi.c
index ce8f4d5..6b5bd4d 100644
--- a/tests/qtest/libqos/virtio-scmi.c
+++ b/tests/qtest/libqos/virtio-scmi.c
@@ -1,7 +1,7 @@
/*
* virtio-scmi nodes for testing
*
- * SPDX-FileCopyrightText: Linaro Ltd
+ * Copyright (c) Linaro Ltd.
* SPDX-FileCopyrightText: Red Hat, Inc.
* SPDX-License-Identifier: GPL-2.0-or-later
*
diff --git a/tests/qtest/libqos/virtio.c b/tests/qtest/libqos/virtio.c
index a21b6ee..5a709d0 100644
--- a/tests/qtest/libqos/virtio.c
+++ b/tests/qtest/libqos/virtio.c
@@ -25,49 +25,63 @@
*/
static uint16_t qvirtio_readw(QVirtioDevice *d, QTestState *qts, uint64_t addr)
{
- uint16_t val = qtest_readw(qts, addr);
+ uint16_t val;
- if (d->features & (1ull << VIRTIO_F_VERSION_1) && qtest_big_endian(qts)) {
- val = bswap16(val);
+ if (d->features & (1ull << VIRTIO_F_VERSION_1)) {
+ qtest_memread(qts, addr, &val, sizeof(val));
+ val = le16_to_cpu(val);
+ } else {
+ val = qtest_readw(qts, addr);
}
+
return val;
}
static uint32_t qvirtio_readl(QVirtioDevice *d, QTestState *qts, uint64_t addr)
{
- uint32_t val = qtest_readl(qts, addr);
+ uint32_t val;
- if (d->features & (1ull << VIRTIO_F_VERSION_1) && qtest_big_endian(qts)) {
- val = bswap32(val);
+ if (d->features & (1ull << VIRTIO_F_VERSION_1)) {
+ qtest_memread(qts, addr, &val, sizeof(val));
+ val = le32_to_cpu(val);
+ } else {
+ val = qtest_readl(qts, addr);
}
+
return val;
}
static void qvirtio_writew(QVirtioDevice *d, QTestState *qts,
uint64_t addr, uint16_t val)
{
- if (d->features & (1ull << VIRTIO_F_VERSION_1) && qtest_big_endian(qts)) {
- val = bswap16(val);
+ if (d->features & (1ull << VIRTIO_F_VERSION_1)) {
+ val = cpu_to_le16(val);
+ qtest_memwrite(qts, addr, &val, sizeof(val));
+ } else {
+ qtest_writew(qts, addr, val);
}
- qtest_writew(qts, addr, val);
}
static void qvirtio_writel(QVirtioDevice *d, QTestState *qts,
uint64_t addr, uint32_t val)
{
- if (d->features & (1ull << VIRTIO_F_VERSION_1) && qtest_big_endian(qts)) {
- val = bswap32(val);
+ if (d->features & (1ull << VIRTIO_F_VERSION_1)) {
+ val = cpu_to_le32(val);
+ qtest_memwrite(qts, addr, &val, sizeof(val));
+ } else {
+ qtest_writel(qts, addr, val);
}
- qtest_writel(qts, addr, val);
}
static void qvirtio_writeq(QVirtioDevice *d, QTestState *qts,
uint64_t addr, uint64_t val)
{
- if (d->features & (1ull << VIRTIO_F_VERSION_1) && qtest_big_endian(qts)) {
- val = bswap64(val);
+ if (d->features & (1ull << VIRTIO_F_VERSION_1)) {
+ val = cpu_to_le64(val);
+ qtest_memwrite(qts, addr, &val, sizeof(val));
+ } else {
+ qtest_writeq(qts, addr, val);
}
- qtest_writeq(qts, addr, val);
}
uint8_t qvirtio_config_readb(QVirtioDevice *d, uint64_t addr)
@@ -170,7 +184,6 @@ void qvirtio_wait_queue_isr(QTestState *qts, QVirtioDevice *d,
gint64 start_time = g_get_monotonic_time();
for (;;) {
- qtest_clock_step(qts, 100);
if (d->bus->get_queue_isr_status(d, vq)) {
return;
}
@@ -192,7 +205,6 @@ uint8_t qvirtio_wait_status_byte_no_isr(QTestState *qts, QVirtioDevice *d,
uint8_t val;
while ((val = qtest_readb(qts, addr)) == 0xff) {
- qtest_clock_step(qts, 100);
g_assert(!d->bus->get_queue_isr_status(d, vq));
g_assert(g_get_monotonic_time() - start_time <= timeout_us);
}
@@ -219,14 +231,12 @@ void qvirtio_wait_used_elem(QTestState *qts, QVirtioDevice *d,
for (;;) {
uint32_t got_desc_idx;
- qtest_clock_step(qts, 100);
if (d->bus->get_queue_isr_status(d, vq) &&
qvirtqueue_get_buf(qts, vq, &got_desc_idx, len)) {
g_assert_cmpint(got_desc_idx, ==, desc_idx);
return;
}
-
g_assert(g_get_monotonic_time() - start_time <= timeout_us);
}
}
diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c
index 1326e34..94526b7 100644
--- a/tests/qtest/libqtest.c
+++ b/tests/qtest/libqtest.c
@@ -30,14 +30,15 @@
#include "libqtest.h"
#include "libqmp.h"
+#include "qemu/accel.h"
#include "qemu/ctype.h"
#include "qemu/cutils.h"
#include "qemu/sockets.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qstring.h"
-#include "qapi/qmp/qbool.h"
+#include "qobject/qdict.h"
+#include "qobject/qjson.h"
+#include "qobject/qlist.h"
+#include "qobject/qstring.h"
+#include "qobject/qbool.h"
#define MAX_IRQ 256
@@ -75,6 +76,8 @@ struct QTestState
{
int fd;
int qmp_fd;
+ int sock;
+ int qmpsock;
pid_t qemu_pid; /* our child QEMU process */
int wstatus;
#ifdef _WIN32
@@ -215,6 +218,22 @@ static void qtest_check_status(QTestState *s)
#endif
}
+void qtest_system_reset_nowait(QTestState *s)
+{
+ /* Request the system reset, but do not wait for it to complete */
+ qtest_qmp_assert_success(s, "{'execute': 'system_reset' }");
+}
+
+void qtest_system_reset(QTestState *s)
+{
+ qtest_system_reset_nowait(s);
+ /*
+ * Wait for the RESET event, which is sent once the system reset
+ * has actually completed.
+ */
+ qtest_qmp_eventwait(s, "RESET");
+}
+
void qtest_wait_qemu(QTestState *s)
{
if (s->qemu_pid != -1) {
@@ -442,18 +461,19 @@ static QTestState *G_GNUC_PRINTF(2, 3) qtest_spawn_qemu(const char *qemu_bin,
return s;
}
+static char *qtest_socket_path(const char *suffix)
+{
+ return g_strdup_printf("%s/qtest-%d.%s", g_get_tmp_dir(), getpid(), suffix);
+}
+
static QTestState *qtest_init_internal(const char *qemu_bin,
- const char *extra_args)
+ const char *extra_args,
+ bool do_connect)
{
QTestState *s;
int sock, qmpsock, i;
- gchar *socket_path;
- gchar *qmp_socket_path;
-
- socket_path = g_strdup_printf("%s/qtest-%d.sock",
- g_get_tmp_dir(), getpid());
- qmp_socket_path = g_strdup_printf("%s/qtest-%d.qmp",
- g_get_tmp_dir(), getpid());
+ g_autofree gchar *socket_path = qtest_socket_path("sock");
+ g_autofree gchar *qmp_socket_path = qtest_socket_path("qmp");
/*
* It's possible that if an earlier test run crashed it might
@@ -485,22 +505,19 @@ static QTestState *qtest_init_internal(const char *qemu_bin,
qtest_client_set_rx_handler(s, qtest_client_socket_recv_line);
qtest_client_set_tx_handler(s, qtest_client_socket_send);
- s->fd = socket_accept(sock);
- if (s->fd >= 0) {
- s->qmp_fd = socket_accept(qmpsock);
- }
- unlink(socket_path);
- unlink(qmp_socket_path);
- g_free(socket_path);
- g_free(qmp_socket_path);
-
- g_assert(s->fd >= 0 && s->qmp_fd >= 0);
-
s->rx = g_string_new("");
for (i = 0; i < MAX_IRQ; i++) {
s->irq_level[i] = false;
}
+ s->fd = -1;
+ s->qmp_fd = -1;
+ s->sock = sock;
+ s->qmpsock = qmpsock;
+ if (do_connect) {
+ qtest_connect(s);
+ }
+
/*
* Stopping QEMU for debugging is not supported on Windows.
*
@@ -514,40 +531,70 @@ static QTestState *qtest_init_internal(const char *qemu_bin,
kill(s->qemu_pid, SIGSTOP);
}
#endif
- return s;
+
+ return s;
}
-QTestState *qtest_init_without_qmp_handshake(const char *extra_args)
+void qtest_connect(QTestState *s)
{
- return qtest_init_internal(qtest_qemu_binary(NULL), extra_args);
+ g_autofree gchar *socket_path = qtest_socket_path("sock");
+ g_autofree gchar *qmp_socket_path = qtest_socket_path("qmp");
+
+ g_assert(s->sock >= 0 && s->qmpsock >= 0);
+ s->fd = socket_accept(s->sock);
+ if (s->fd >= 0) {
+ s->qmp_fd = socket_accept(s->qmpsock);
+ }
+ unlink(socket_path);
+ unlink(qmp_socket_path);
+ g_assert(s->fd >= 0 && s->qmp_fd >= 0);
+ s->sock = s->qmpsock = -1;
+ /* ask endianness of the target */
+ s->big_endian = qtest_query_target_endianness(s);
}
-QTestState *qtest_init_with_env_no_handshake(const char *var,
- const char *extra_args)
+QTestState *qtest_init_without_qmp_handshake(const char *extra_args)
{
- return qtest_init_internal(qtest_qemu_binary(var), extra_args);
+ return qtest_init_internal(qtest_qemu_binary(NULL), extra_args, true);
}
-QTestState *qtest_init_with_env(const char *var, const char *extra_args)
+void qtest_qmp_handshake(QTestState *s, QList *capabilities)
{
- QTestState *s = qtest_init_internal(qtest_qemu_binary(var), extra_args);
- QDict *greeting;
-
- /* ask endianness of the target */
-
- s->big_endian = qtest_query_target_endianness(s);
-
/* Read the QMP greeting and then do the handshake */
- greeting = qtest_qmp_receive(s);
+ QDict *greeting = qtest_qmp_receive(s);
qobject_unref(greeting);
- qobject_unref(qtest_qmp(s, "{ 'execute': 'qmp_capabilities' }"));
+ if (capabilities) {
+ qtest_qmp_assert_success(s,
+ "{ 'execute': 'qmp_capabilities', "
+ "'arguments': { 'enable': %p } }",
+ qobject_ref(capabilities));
+ } else {
+ qtest_qmp_assert_success(s, "{ 'execute': 'qmp_capabilities' }");
+ }
+}
+
+QTestState *qtest_init_ext(const char *var, const char *extra_args,
+ QList *capabilities, bool do_connect)
+{
+ QTestState *s = qtest_init_internal(qtest_qemu_binary(var), extra_args,
+ do_connect);
+
+ if (do_connect) {
+ qtest_qmp_handshake(s, capabilities);
+ } else {
+ /*
+ * If the connection is delayed, the capabilities must be set
+ * at that moment.
+ */
+ assert(!capabilities);
+ }
return s;
}
QTestState *qtest_init(const char *extra_args)
{
- return qtest_init_with_env(NULL, extra_args);
+ return qtest_init_ext(NULL, extra_args, NULL, true);
}
QTestState *qtest_vinitf(const char *fmt, va_list ap)
@@ -757,6 +804,7 @@ QDict *qtest_qmp_receive(QTestState *s)
QDict *qtest_qmp_receive_dict(QTestState *s)
{
+ g_assert(s->qmp_fd >= 0);
return qmp_fd_receive(s->qmp_fd);
}
@@ -784,12 +832,14 @@ int qtest_socket_server(const char *socket_path)
void qtest_qmp_vsend_fds(QTestState *s, int *fds, size_t fds_num,
const char *fmt, va_list ap)
{
+ g_assert(s->qmp_fd >= 0);
qmp_fd_vsend_fds(s->qmp_fd, fds, fds_num, fmt, ap);
}
#endif
void qtest_qmp_vsend(QTestState *s, const char *fmt, va_list ap)
{
+ g_assert(s->qmp_fd >= 0);
qmp_fd_vsend(s->qmp_fd, fmt, ap);
}
@@ -850,6 +900,7 @@ void qtest_qmp_send_raw(QTestState *s, const char *fmt, ...)
{
va_list ap;
+ g_assert(s->qmp_fd >= 0);
va_start(ap, fmt);
qmp_fd_vsend_raw(s->qmp_fd, fmt, ap);
va_end(ap);
@@ -953,15 +1004,62 @@ const char *qtest_get_arch(void)
return end + 1;
}
+static bool qtest_qom_has_concrete_type(const char *parent_typename,
+ const char *child_typename,
+ QList **cached_list)
+{
+ QList *list = cached_list ? *cached_list : NULL;
+ const QListEntry *p;
+ QObject *qobj;
+ QString *qstr;
+ QDict *devinfo;
+ int idx;
+
+ if (!list) {
+ QDict *resp;
+ QDict *args;
+ QTestState *qts = qtest_init("-machine none");
+
+ args = qdict_new();
+ qdict_put_bool(args, "abstract", false);
+ qdict_put_str(args, "implements", parent_typename);
+
+ resp = qtest_qmp(qts, "{'execute': 'qom-list-types', 'arguments': %p }",
+ args);
+ g_assert(qdict_haskey(resp, "return"));
+ list = qdict_get_qlist(resp, "return");
+ qobject_ref(list);
+ qobject_unref(resp);
+
+ qtest_quit(qts);
+
+ if (cached_list) {
+ *cached_list = list;
+ }
+ }
+
+ for (p = qlist_first(list), idx = 0; p; p = qlist_next(p), idx++) {
+ devinfo = qobject_to(QDict, qlist_entry_obj(p));
+ g_assert(devinfo);
+
+ qobj = qdict_get(devinfo, "name");
+ g_assert(qobj);
+ qstr = qobject_to(QString, qobj);
+ g_assert(qstr);
+ if (g_str_equal(qstring_get_str(qstr), child_typename)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
bool qtest_has_accel(const char *accel_name)
{
- if (g_str_equal(accel_name, "tcg")) {
-#if defined(CONFIG_TCG)
- return true;
-#else
- return false;
-#endif
- } else if (g_str_equal(accel_name, "kvm")) {
+ static QList *list;
+ g_autofree char *accel_type = NULL;
+
+ if (g_str_equal(accel_name, "kvm")) {
int i;
const char *arch = qtest_get_arch();
const char *targets[] = { CONFIG_KVM_TARGETS };
@@ -973,11 +1071,12 @@ bool qtest_has_accel(const char *accel_name)
}
}
}
- } else {
- /* not implemented */
- g_assert_not_reached();
+ return false;
}
- return false;
+
+ accel_type = g_strconcat(accel_name, ACCEL_CLASS_SUFFIX, NULL);
+
+ return qtest_qom_has_concrete_type("accel", accel_type, &list);
}
bool qtest_get_irq(QTestState *s, int num)
@@ -1207,6 +1306,33 @@ uint64_t qtest_rtas_call(QTestState *s, const char *name,
return 0;
}
+static void qtest_rsp_csr(QTestState *s, uint64_t *val)
+{
+ gchar **args;
+ uint64_t ret;
+ int rc;
+
+ args = qtest_rsp_args(s, 3);
+
+ rc = qemu_strtou64(args[1], NULL, 16, &ret);
+ g_assert(rc == 0);
+ rc = qemu_strtou64(args[2], NULL, 16, val);
+ g_assert(rc == 0);
+
+ g_strfreev(args);
+}
+
+uint64_t qtest_csr_call(QTestState *s, const char *name,
+ uint64_t cpu, int csr,
+ uint64_t *val)
+{
+ qtest_sendf(s, "csr %s 0x%"PRIx64" %d 0x%"PRIx64"\n",
+ name, cpu, csr, *val);
+
+ qtest_rsp_csr(s, val);
+ return 0;
+}
+
void qtest_add_func(const char *str, void (*fn)(void))
{
gchar *path = g_strdup_printf("/%s/%s", qtest_get_arch(), str);
@@ -1528,7 +1654,7 @@ static struct MachInfo *qtest_get_machines(const char *var)
silence_spawn_log = !g_test_verbose();
- qts = qtest_init_with_env(qemu_var, "-machine none");
+ qts = qtest_init_ext(qemu_var, "-machine none", NULL, true);
response = qtest_qmp(qts, "{ 'execute': 'query-machines' }");
g_assert(response);
list = qdict_get_qlist(response, "return");
@@ -1583,7 +1709,7 @@ static struct CpuModel *qtest_get_cpu_models(void)
silence_spawn_log = !g_test_verbose();
- qts = qtest_init_with_env(NULL, "-machine none");
+ qts = qtest_init_ext(NULL, "-machine none", NULL, true);
response = qtest_qmp(qts, "{ 'execute': 'query-cpu-definitions' }");
g_assert(response);
list = qdict_get_qlist(response, "return");
@@ -1653,7 +1779,9 @@ void qtest_cb_for_every_machine(void (*cb)(const char *machine),
/* Ignore machines that cannot be used for qtests */
if (!strncmp("xenfv", machines[i].name, 5) ||
g_str_equal("xenpv", machines[i].name) ||
- g_str_equal("xenpvh", machines[i].name)) {
+ g_str_equal("xenpvh", machines[i].name) ||
+ g_str_equal("vmapple", machines[i].name) ||
+ g_str_equal("nitro-enclave", machines[i].name)) {
continue;
}
if (!skip_old_versioned ||
@@ -1704,45 +1832,8 @@ bool qtest_has_machine(const char *machine)
bool qtest_has_device(const char *device)
{
static QList *list;
- const QListEntry *p;
- QObject *qobj;
- QString *qstr;
- QDict *devinfo;
- int idx;
-
- if (!list) {
- QDict *resp;
- QDict *args;
- QTestState *qts = qtest_init("-machine none");
-
- args = qdict_new();
- qdict_put_bool(args, "abstract", false);
- qdict_put_str(args, "implements", "device");
-
- resp = qtest_qmp(qts, "{'execute': 'qom-list-types', 'arguments': %p }",
- args);
- g_assert(qdict_haskey(resp, "return"));
- list = qdict_get_qlist(resp, "return");
- qobject_ref(list);
- qobject_unref(resp);
-
- qtest_quit(qts);
- }
-
- for (p = qlist_first(list), idx = 0; p; p = qlist_next(p), idx++) {
- devinfo = qobject_to(QDict, qlist_entry_obj(p));
- g_assert(devinfo);
- qobj = qdict_get(devinfo, "name");
- g_assert(qobj);
- qstr = qobject_to(QString, qobj);
- g_assert(qstr);
- if (g_str_equal(qstring_get_str(qstr), device)) {
- return true;
- }
- }
-
- return false;
+ return qtest_qom_has_concrete_type("device", device, &list);
}
/*
@@ -1923,7 +2014,6 @@ void qtest_client_inproc_recv(void *opaque, const char *str)
qts->rx = g_string_new(NULL);
}
g_string_append(qts->rx, str);
- return;
}
void qtest_qom_set_bool(QTestState *s, const char *path, const char *property,
diff --git a/tests/qtest/libqtest.h b/tests/qtest/libqtest.h
index c261b7e..b3f2e7f 100644
--- a/tests/qtest/libqtest.h
+++ b/tests/qtest/libqtest.h
@@ -17,8 +17,9 @@
#ifndef LIBQTEST_H
#define LIBQTEST_H
-#include "qapi/qmp/qobject.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qobject.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
#include "libqmp.h"
typedef struct QTestState QTestState;
@@ -56,20 +57,22 @@ QTestState *qtest_vinitf(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0);
QTestState *qtest_init(const char *extra_args);
/**
- * qtest_init_with_env:
+ * qtest_init_ext:
* @var: Environment variable from where to take the QEMU binary
* @extra_args: Other arguments to pass to QEMU. CAUTION: these
* arguments are subject to word splitting and shell evaluation.
+ * @capabilities: list of QMP capabilities (strings) to enable
+ * @do_connect: connect to qemu monitor and qtest socket.
*
* Like qtest_init(), but use a different environment variable for the
- * QEMU binary.
+ * QEMU binary, allow specify capabilities and skip connecting
+ * to QEMU monitor.
*
* Returns: #QTestState instance.
*/
-QTestState *qtest_init_with_env(const char *var, const char *extra_args);
+QTestState *qtest_init_ext(const char *var, const char *extra_args,
+ QList *capabilities, bool do_connect);
-QTestState *qtest_init_with_env_no_handshake(const char *var,
- const char *extra_args);
/**
* qtest_init_without_qmp_handshake:
* @extra_args: other arguments to pass to QEMU. CAUTION: these
@@ -80,6 +83,22 @@ QTestState *qtest_init_with_env_no_handshake(const char *var,
QTestState *qtest_init_without_qmp_handshake(const char *extra_args);
/**
+ * qtest_connect
+ * @s: #QTestState instance to connect
+ * Connect to qemu monitor and qtest socket, after skipping them in
+ * qtest_init_ext. Does not handshake with the monitor.
+ */
+void qtest_connect(QTestState *s);
+
+/**
+ * qtest_qmp_handshake:
+ * @s: #QTestState instance to operate on.
+ * @capabilities: list of QMP capabilities (strings) to enable
+ * Perform handshake after connecting to qemu monitor.
+ */
+void qtest_qmp_handshake(QTestState *s, QList *capabilities);
+
+/**
* qtest_init_with_serial:
* @extra_args: other arguments to pass to QEMU. CAUTION: these
* arguments are subject to word splitting and shell evaluation.
@@ -91,6 +110,31 @@ QTestState *qtest_init_without_qmp_handshake(const char *extra_args);
QTestState *qtest_init_with_serial(const char *extra_args, int *sock_fd);
/**
+ * qtest_system_reset:
+ * @s: #QTestState instance to operate on.
+ *
+ * Send a "system_reset" command to the QEMU under test, and wait for
+ * the reset to complete before returning.
+ */
+void qtest_system_reset(QTestState *s);
+
+/**
+ * qtest_system_reset_nowait:
+ * @s: #QTestState instance to operate on.
+ *
+ * Send a "system_reset" command to the QEMU under test, but do not
+ * wait for the reset to complete before returning. The caller is
+ * responsible for waiting for either the RESET event or some other
+ * event of interest to them before proceeding.
+ *
+ * This function should only be used if you're specifically testing
+ * for some other event; in that case you can't use qtest_system_reset()
+ * because it will read and discard any other QMP events that arrive
+ * before the RESET event.
+ */
+void qtest_system_reset_nowait(QTestState *s);
+
+/**
* qtest_wait_qemu:
* @s: #QTestState instance to operate on.
*
@@ -342,7 +386,7 @@ QDict *qtest_qmp_event_ref(QTestState *s, const char *event);
char *qtest_hmp(QTestState *s, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
/**
- * qtest_hmpv:
+ * qtest_vhmp:
* @s: #QTestState instance to operate on.
* @fmt: HMP command to send to QEMU, formats arguments like vsprintf().
* @ap: HMP command arguments
@@ -578,6 +622,20 @@ uint64_t qtest_rtas_call(QTestState *s, const char *name,
uint32_t nret, uint64_t ret);
/**
+ * qtest_csr_call:
+ * @s: #QTestState instance to operate on.
+ * @name: name of the command to call.
+ * @cpu: hart number.
+ * @csr: CSR number.
+ * @val: Value for reading/writing.
+ *
+ * Call an RISC-V CSR read/write function
+ */
+uint64_t qtest_csr_call(QTestState *s, const char *name,
+ uint64_t cpu, int csr,
+ uint64_t *val);
+
+/**
* qtest_bufread:
* @s: #QTestState instance to operate on.
* @addr: Guest address to read from.
@@ -881,7 +939,7 @@ void qtest_qmp_assert_success(QTestState *qts, const char *fmt, ...)
#ifndef _WIN32
/**
- * qtest_qmp_fd_assert_success_ref:
+ * qtest_qmp_fds_assert_success_ref:
* @qts: QTestState instance to operate on
* @fds: the file descriptors to send
* @nfds: number of @fds to send
@@ -898,7 +956,7 @@ QDict *qtest_qmp_fds_assert_success_ref(QTestState *qts, int *fds, size_t nfds,
G_GNUC_PRINTF(4, 5);
/**
- * qtest_qmp_fd_assert_success:
+ * qtest_qmp_fds_assert_success:
* @qts: QTestState instance to operate on
* @fds: the file descriptors to send
* @nfds: number of @fds to send
diff --git a/tests/qtest/lsm303dlhc-mag-test.c b/tests/qtest/lsm303dlhc-mag-test.c
index 0f64e7f..55ef459 100644
--- a/tests/qtest/lsm303dlhc-mag-test.c
+++ b/tests/qtest/lsm303dlhc-mag-test.c
@@ -13,7 +13,7 @@
#include "libqtest-single.h"
#include "libqos/qgraph.h"
#include "libqos/i2c.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#define LSM303DLHC_MAG_TEST_ID "lsm303dlhc_mag-test"
#define LSM303DLHC_MAG_REG_CRA 0x00
diff --git a/tests/qtest/m48t59-test.c b/tests/qtest/m48t59-test.c
index 605797a..1e39a0e 100644
--- a/tests/qtest/m48t59-test.c
+++ b/tests/qtest/m48t59-test.c
@@ -247,11 +247,6 @@ static void base_setup(void)
base_year = 1968;
base_machine = "SS-5";
use_mmio = true;
- } else if (g_str_equal(arch, "ppc") || g_str_equal(arch, "ppc64")) {
- base = 0xF0000000;
- base_year = 1968;
- base_machine = "ref405ep";
- use_mmio = true;
} else {
g_assert_not_reached();
}
diff --git a/tests/qtest/machine-none-test.c b/tests/qtest/machine-none-test.c
index 05da7bc..b6a87d2 100644
--- a/tests/qtest/machine-none-test.c
+++ b/tests/qtest/machine-none-test.c
@@ -14,7 +14,7 @@
#include "qemu/cutils.h"
#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
struct arch2cpu {
@@ -30,7 +30,6 @@ static struct arch2cpu cpus_map[] = {
{ "x86_64", "qemu64,apic-id=0" },
{ "i386", "qemu32,apic-id=0" },
{ "alpha", "ev67" },
- { "cris", "crisv32" },
{ "m68k", "m5206" },
{ "microblaze", "any" },
{ "microblazeel", "any" },
diff --git a/tests/qtest/max34451-test.c b/tests/qtest/max34451-test.c
index dbf6ddc..5e0878c 100644
--- a/tests/qtest/max34451-test.c
+++ b/tests/qtest/max34451-test.c
@@ -11,8 +11,8 @@
#include "libqtest-single.h"
#include "libqos/qgraph.h"
#include "libqos/i2c.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qnum.h"
+#include "qobject/qdict.h"
+#include "qobject/qnum.h"
#include "qemu/bitops.h"
#define TEST_ID "max34451-test"
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
index 6508bfb..8ad8490 100644
--- a/tests/qtest/meson.build
+++ b/tests/qtest/meson.build
@@ -1,11 +1,14 @@
slow_qtests = {
+ 'ahci-test': 150,
'aspeed_smc-test': 360,
- 'bios-tables-test' : 610,
+ 'bios-tables-test' : 910,
'cdrom-test' : 610,
'device-introspect-test' : 720,
+ 'ide-test' : 120,
'migration-test' : 480,
'npcm7xx_pwm-test': 300,
'npcm7xx_watchdog_timer-test': 120,
+ 'qmp-cmd-test' : 120,
'qom-test' : 900,
'stm32l4x5_usart-test' : 600,
'test-hmp' : 240,
@@ -49,7 +52,16 @@ qtests_filter = \
qtests_i386 = \
(slirp.found() ? ['pxe-test'] : []) + \
qtests_filter + \
- (have_tools ? ['ahci-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_ACPI_VMGENID') ? ['vmgenid-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_AHCI_ICH9') and have_tools ? ['ahci-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_AHCI_ICH9') ? ['tco-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_FDC_ISA') ? ['fdc-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_I440FX') ? ['fw_cfg-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_FW_CFG_DMA') ? ['vmcoreinfo-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_I440FX') ? ['i440fx-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_I440FX') ? ['ide-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_I440FX') ? ['numa-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_I440FX') ? ['test-x86-cpuid-compat'] : []) + \
(config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \
(config_all_devices.has_key('CONFIG_SGA') ? ['boot-serial-test'] : []) + \
(config_all_devices.has_key('CONFIG_ISA_IPMI_KCS') ? ['ipmi-kcs-test'] : []) + \
@@ -63,6 +75,7 @@ qtests_i386 = \
(config_all_devices.has_key('CONFIG_I82801B11') ? ['i82801b11-test'] : []) + \
(config_all_devices.has_key('CONFIG_IOH3420') ? ['ioh3420-test'] : []) + \
(config_all_devices.has_key('CONFIG_LPC_ICH9') ? ['lpc-ich9-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_MC146818RTC') ? ['rtc-test'] : []) + \
(config_all_devices.has_key('CONFIG_USB_UHCI') ? ['usb-hcd-uhci-test'] : []) + \
(config_all_devices.has_key('CONFIG_USB_UHCI') and \
config_all_devices.has_key('CONFIG_USB_EHCI') ? ['usb-hcd-ehci-test'] : []) + \
@@ -76,9 +89,12 @@ qtests_i386 = \
(config_all_devices.has_key('CONFIG_MEGASAS_SCSI_PCI') ? ['fuzz-megasas-test'] : []) + \
(config_all_devices.has_key('CONFIG_LSI_SCSI_PCI') ? ['fuzz-lsi53c895a-test'] : []) + \
(config_all_devices.has_key('CONFIG_VIRTIO_SCSI') ? ['fuzz-virtio-scsi-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_VIRTIO_BALLOON') ? ['virtio-balloon-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_Q35') ? ['q35-test'] : []) + \
(config_all_devices.has_key('CONFIG_SB16') ? ['fuzz-sb16-test'] : []) + \
(config_all_devices.has_key('CONFIG_SDHCI_PCI') ? ['fuzz-sdcard-test'] : []) + \
(config_all_devices.has_key('CONFIG_ESP_PCI') ? ['am53c974-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_VTD') ? ['intel-iommu-test'] : []) + \
(host_os != 'windows' and \
config_all_devices.has_key('CONFIG_ACPI_ERST') ? ['erst-test'] : []) + \
(config_all_devices.has_key('CONFIG_PCIE_PORT') and \
@@ -91,25 +107,16 @@ qtests_i386 = \
config_all_devices.has_key('CONFIG_PARALLEL') ? ['bios-tables-test'] : []) + \
qtests_pci + \
qtests_cxl + \
- ['fdc-test',
- 'ide-test',
+ [
'hd-geo-test',
'boot-order-test',
- 'rtc-test',
- 'i440fx-test',
- 'fw_cfg-test',
'device-plug-test',
'drive_del-test',
- 'tco-test',
'cpu-plug-test',
- 'q35-test',
- 'vmgenid-test',
'migration-test',
- 'test-x86-cpuid-compat',
- 'numa-test'
]
-if dbus_display
+if dbus_display and config_all_devices.has_key('CONFIG_VGA')
qtests_i386 += ['dbus-display-test']
endif
@@ -135,12 +142,14 @@ qtests_alpha = ['boot-serial-test'] + \
qtests_avr = [ 'boot-serial-test' ]
-qtests_hppa = ['boot-serial-test'] + \
+qtests_hppa = \
qtests_filter + \
(config_all_devices.has_key('CONFIG_VGA') ? ['display-vga-test'] : [])
qtests_loongarch64 = qtests_filter + \
- ['boot-serial-test', 'numa-test']
+ (config_all_devices.has_key('CONFIG_LOONGARCH_VIRT') ? ['numa-test'] : []) + \
+ ['boot-serial-test',
+ 'cpu-plug-test']
qtests_m68k = ['boot-serial-test'] + \
qtests_filter
@@ -162,7 +171,6 @@ qtests_mips64el = qtests_mips
qtests_ppc = \
qtests_filter + \
(config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \
- (config_all_devices.has_key('CONFIG_M48T59') ? ['m48t59-test'] : []) + \
(config_all_accel.has_key('CONFIG_TCG') ? ['prom-env-test'] : []) + \
(config_all_accel.has_key('CONFIG_TCG') ? ['boot-serial-test'] : []) + \
['boot-order-test']
@@ -171,12 +179,15 @@ qtests_ppc64 = \
qtests_ppc + \
(config_all_devices.has_key('CONFIG_PSERIES') ? ['device-plug-test'] : []) + \
(config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-xscom-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-xive2-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-spi-seeprom-test'] : []) + \
(config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-host-i2c-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_PSERIES') ? ['numa-test'] : []) + \
(config_all_devices.has_key('CONFIG_PSERIES') ? ['rtas-test'] : []) + \
(slirp.found() ? ['pxe-test'] : []) + \
(config_all_devices.has_key('CONFIG_USB_UHCI') ? ['usb-hcd-uhci-test'] : []) + \
(config_all_devices.has_key('CONFIG_USB_XHCI_NEC') ? ['usb-hcd-xhci-test'] : []) + \
- qtests_pci + ['migration-test', 'numa-test', 'cpu-plug-test', 'drive_del-test']
+ qtests_pci + ['migration-test', 'cpu-plug-test', 'drive_del-test']
qtests_sh4 = (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : [])
qtests_sh4eb = (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : [])
@@ -197,13 +208,18 @@ qtests_npcm7xx = \
'npcm7xx_sdhci-test',
'npcm7xx_smbus-test',
'npcm7xx_timer-test',
- 'npcm7xx_watchdog_timer-test',
- 'npcm_gmac-test'] + \
+ 'npcm7xx_watchdog_timer-test'] + \
(slirp.found() ? ['npcm7xx_emc-test'] : [])
+qtests_npcm8xx = \
+ ['npcm_gmac-test']
qtests_aspeed = \
- ['aspeed_hace-test',
- 'aspeed_smc-test',
- 'aspeed_gpio-test']
+ ['aspeed_gpio-test',
+ 'aspeed_hace-test',
+ 'aspeed_smc-test']
+qtests_aspeed64 = \
+ ['ast2700-gpio-test',
+ 'ast2700-hace-test',
+ 'ast2700-smc-test']
qtests_stm32l4x5 = \
['stm32l4x5_exti-test',
@@ -216,7 +232,8 @@ qtests_arm = \
(config_all_devices.has_key('CONFIG_MPS2') ? ['sse-timer-test'] : []) + \
(config_all_devices.has_key('CONFIG_CMSDK_APB_DUALTIMER') ? ['cmsdk-apb-dualtimer-test'] : []) + \
(config_all_devices.has_key('CONFIG_CMSDK_APB_TIMER') ? ['cmsdk-apb-timer-test'] : []) + \
- (config_all_devices.has_key('CONFIG_CMSDK_APB_WATCHDOG') ? ['cmsdk-apb-watchdog-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_STELLARIS') or
+ config_all_devices.has_key('CONFIG_MPS2') ? ['cmsdk-apb-watchdog-test'] : []) + \
(config_all_devices.has_key('CONFIG_PFLASH_CFI02') and
config_all_devices.has_key('CONFIG_MUSICPAL') ? ['pflash-cfi02-test'] : []) + \
(config_all_devices.has_key('CONFIG_ASPEED_SOC') ? qtests_aspeed : []) + \
@@ -242,6 +259,8 @@ qtests_aarch64 = \
(config_all_devices.has_key('CONFIG_RASPI') ? ['bcm2835-dma-test', 'bcm2835-i2c-test'] : []) + \
(config_all_accel.has_key('CONFIG_TCG') and \
config_all_devices.has_key('CONFIG_TPM_TIS_I2C') ? ['tpm-tis-i2c-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_ASPEED_SOC') ? qtests_aspeed64 : []) + \
+ (config_all_devices.has_key('CONFIG_NPCM8XX') ? qtests_npcm8xx : []) + \
['arm-cpu-features',
'numa-test',
'boot-serial-test',
@@ -259,7 +278,7 @@ qtests_s390x = \
qtests_riscv32 = \
(config_all_devices.has_key('CONFIG_SIFIVE_E_AON') ? ['sifive-e-aon-watchdog-test'] : [])
-qtests_riscv64 = \
+qtests_riscv64 = ['riscv-csr-test'] + \
(unpack_edk2_blobs ? ['bios-tables-test'] : [])
qos_test_ss = ss.source_set()
@@ -271,7 +290,6 @@ qos_test_ss.add(
'e1000-test.c',
'eepro100-test.c',
'es1370-test.c',
- 'ipoctal232-test.c',
'lsm303dlhc-mag-test.c',
'isl_pmbus_vr-test.c',
'max34451-test.c',
@@ -282,6 +300,7 @@ qos_test_ss.add(
'pca9552-test.c',
'pci-test.c',
'pcnet-test.c',
+ 'rs5c372-test.c',
'sdhci-test.c',
'spapr-phb-test.c',
'tmp105-test.c',
@@ -296,11 +315,15 @@ qos_test_ss.add(
'vmxnet3-test.c',
'igb-test.c',
'ufs-test.c',
+ 'riscv-iommu-test.c',
)
if config_all_devices.has_key('CONFIG_VIRTIO_SERIAL')
qos_test_ss.add(files('virtio-serial-test.c'))
endif
+if config_all_devices.has_key('CONFIG_IP_OCTAL_232')
+ qos_test_ss.add(files('ipoctal232-test.c'))
+endif
if host_os != 'windows'
qos_test_ss.add(files('e1000e-test.c'))
@@ -317,24 +340,44 @@ endif
tpmemu_files = ['tpm-emu.c', 'tpm-util.c', 'tpm-tests.c']
-migration_files = [files('migration-helpers.c')]
+migration_files = [files(
+ 'migration/bootfile.c',
+ 'migration/framework.c',
+ 'migration/migration-qmp.c',
+ 'migration/migration-util.c',
+ 'migration/compression-tests.c',
+ 'migration/cpr-tests.c',
+ 'migration/file-tests.c',
+ 'migration/misc-tests.c',
+ 'migration/precopy-tests.c',
+ 'migration/postcopy-tests.c',
+)]
+
+migration_tls_files = []
if gnutls.found()
- migration_files += [files('../unit/crypto-tls-psk-helpers.c'), gnutls]
+ migration_tls_files = [files('migration/tls-tests.c',
+ '../unit/crypto-tls-psk-helpers.c'), gnutls]
if tasn1.found()
- migration_files += [files('../unit/crypto-tls-x509-helpers.c',
- '../unit/pkix_asn1_tab.c'), tasn1]
+ migration_tls_files += [files('../unit/crypto-tls-x509-helpers.c'), tasn1]
endif
endif
qtests = {
+ 'aspeed_hace-test': files('aspeed-hace-utils.c', 'aspeed_hace-test.c'),
+ 'aspeed_smc-test': files('aspeed-smc-utils.c', 'aspeed_smc-test.c'),
+ 'ast2700-hace-test': files('aspeed-hace-utils.c', 'ast2700-hace-test.c'),
+ 'ast2700-smc-test': files('aspeed-smc-utils.c', 'ast2700-smc-test.c'),
'bios-tables-test': [io, 'boot-sector.c', 'acpi-utils.c', 'tpm-emu.c'],
'cdrom-test': files('boot-sector.c'),
- 'dbus-vmstate-test': files('migration-helpers.c') + dbus_vmstate1,
+ 'dbus-vmstate-test': files('migration/migration-qmp.c',
+ 'migration/migration-util.c') + dbus_vmstate1,
'erst-test': files('erst-test.c'),
'ivshmem-test': [rt, '../../contrib/ivshmem-server/ivshmem-server.c'],
- 'migration-test': migration_files,
+ 'migration-test': migration_files + migration_tls_files,
'pxe-test': files('boot-sector.c'),
+ 'pnv-xive2-test': files('pnv-xive2-common.c', 'pnv-xive2-flush-sync.c',
+ 'pnv-xive2-nvpg_bar.c'),
'qos-test': [chardev, io, qos_test_ss.apply({}).sources()],
'tpm-crb-swtpm-test': [io, tpmemu_files],
'tpm-crb-test': [io, tpmemu_files],
@@ -343,7 +386,7 @@ qtests = {
'tpm-tis-i2c-test': [io, tpmemu_files, 'qtest_aspeed.c'],
'tpm-tis-device-swtpm-test': [io, tpmemu_files, 'tpm-tis-util.c'],
'tpm-tis-device-test': [io, tpmemu_files, 'tpm-tis-util.c'],
- 'virtio-net-failover': files('migration-helpers.c'),
+ 'virtio-net-failover': migration_files,
'vmgenid-test': files('boot-sector.c', 'acpi-utils.c'),
'netdev-socket': files('netdev-socket.c', '../unit/socket-helpers.c'),
}
@@ -351,7 +394,7 @@ qtests = {
if vnc.found()
gvnc = dependency('gvnc-1.0', method: 'pkg-config', required: false)
if gvnc.found()
- qtests += {'vnc-display-test': [gvnc]}
+ qtests += {'vnc-display-test': [gvnc, keymap_targets]}
qtests_generic += [ 'vnc-display-test' ]
endif
endif
@@ -369,6 +412,8 @@ foreach dir : target_dirs
target_base = dir.split('-')[0]
qtest_emulator = emulators['qemu-system-' + target_base]
target_qtests = get_variable('qtests_' + target_base, []) + qtests_generic
+ has_kvm = ('CONFIG_KVM' in config_all_accel and host_os == 'linux'
+ and cpu == target_base and fs.exists('/dev/kvm'))
test_deps = roms
qtest_env = environment()
@@ -402,11 +447,18 @@ foreach dir : target_dirs
test: executable(test, src, dependencies: deps)
}
endif
+
+ test_args = ['--tap', '-k']
+
+ if test == 'migration-test' and has_kvm
+ test_args += ['--full']
+ endif
+
test('qtest-@0@/@1@'.format(target_base, test),
qtest_executables[test],
depends: [test_deps, qtest_emulator, emulator_modules],
env: qtest_env,
- args: ['--tap', '-k'],
+ args: test_args,
protocol: 'tap',
timeout: slow_qtests.get(test, 60),
priority: slow_qtests.get(test, 60),
diff --git a/tests/qtest/migration-helpers.c b/tests/qtest/migration-helpers.c
deleted file mode 100644
index 84f49db..0000000
--- a/tests/qtest/migration-helpers.c
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * QTest migration helpers
- *
- * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
- * based on the vhost-user-test.c that is:
- * Copyright (c) 2014 Virtual Open Systems Sarl.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#include "qemu/osdep.h"
-#include "qemu/ctype.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qapi-visit-sockets.h"
-#include "qapi/qobject-input-visitor.h"
-#include "qapi/error.h"
-#include "qapi/qmp/qlist.h"
-#include "qemu/cutils.h"
-#include "qemu/memalign.h"
-
-#include "migration-helpers.h"
-
-/*
- * Number of seconds we wait when looking for migration
- * status changes, to avoid test suite hanging forever
- * when things go wrong. Needs to be higher enough to
- * avoid false positives on loaded hosts.
- */
-#define MIGRATION_STATUS_WAIT_TIMEOUT 120
-
-static char *SocketAddress_to_str(SocketAddress *addr)
-{
- switch (addr->type) {
- case SOCKET_ADDRESS_TYPE_INET:
- return g_strdup_printf("tcp:%s:%s",
- addr->u.inet.host,
- addr->u.inet.port);
- case SOCKET_ADDRESS_TYPE_UNIX:
- return g_strdup_printf("unix:%s",
- addr->u.q_unix.path);
- case SOCKET_ADDRESS_TYPE_FD:
- return g_strdup_printf("fd:%s", addr->u.fd.str);
- case SOCKET_ADDRESS_TYPE_VSOCK:
- return g_strdup_printf("vsock:%s:%s",
- addr->u.vsock.cid,
- addr->u.vsock.port);
- default:
- return g_strdup("unknown address type");
- }
-}
-
-static QDict *SocketAddress_to_qdict(SocketAddress *addr)
-{
- QDict *dict = qdict_new();
-
- switch (addr->type) {
- case SOCKET_ADDRESS_TYPE_INET:
- qdict_put_str(dict, "type", "inet");
- qdict_put_str(dict, "host", addr->u.inet.host);
- qdict_put_str(dict, "port", addr->u.inet.port);
- break;
- case SOCKET_ADDRESS_TYPE_UNIX:
- qdict_put_str(dict, "type", "unix");
- qdict_put_str(dict, "path", addr->u.q_unix.path);
- break;
- case SOCKET_ADDRESS_TYPE_FD:
- qdict_put_str(dict, "type", "fd");
- qdict_put_str(dict, "str", addr->u.fd.str);
- break;
- case SOCKET_ADDRESS_TYPE_VSOCK:
- qdict_put_str(dict, "type", "vsock");
- qdict_put_str(dict, "cid", addr->u.vsock.cid);
- qdict_put_str(dict, "port", addr->u.vsock.port);
- break;
- default:
- g_assert_not_reached();
- break;
- }
-
- return dict;
-}
-
-static SocketAddress *migrate_get_socket_address(QTestState *who)
-{
- QDict *rsp;
- SocketAddressList *addrs;
- SocketAddress *addr;
- Visitor *iv = NULL;
- QObject *object;
-
- rsp = migrate_query(who);
- object = qdict_get(rsp, "socket-address");
-
- iv = qobject_input_visitor_new(object);
- visit_type_SocketAddressList(iv, NULL, &addrs, &error_abort);
- addr = addrs->value;
- visit_free(iv);
-
- qobject_unref(rsp);
- return addr;
-}
-
-static char *
-migrate_get_connect_uri(QTestState *who)
-{
- SocketAddress *addrs;
- char *connect_uri;
-
- addrs = migrate_get_socket_address(who);
- connect_uri = SocketAddress_to_str(addrs);
-
- qapi_free_SocketAddress(addrs);
- return connect_uri;
-}
-
-static QDict *
-migrate_get_connect_qdict(QTestState *who)
-{
- SocketAddress *addrs;
- QDict *connect_qdict;
-
- addrs = migrate_get_socket_address(who);
- connect_qdict = SocketAddress_to_qdict(addrs);
-
- qapi_free_SocketAddress(addrs);
- return connect_qdict;
-}
-
-static void migrate_set_ports(QTestState *to, QList *channel_list)
-{
- QDict *addr;
- QListEntry *entry;
- const char *addr_port = NULL;
-
- addr = migrate_get_connect_qdict(to);
-
- QLIST_FOREACH_ENTRY(channel_list, entry) {
- QDict *channel = qobject_to(QDict, qlist_entry_obj(entry));
- QDict *addrdict = qdict_get_qdict(channel, "addr");
-
- if (qdict_haskey(addrdict, "port") &&
- qdict_haskey(addr, "port") &&
- (strcmp(qdict_get_str(addrdict, "port"), "0") == 0)) {
- addr_port = qdict_get_str(addr, "port");
- qdict_put_str(addrdict, "port", g_strdup(addr_port));
- }
- }
-
- qobject_unref(addr);
-}
-
-bool migrate_watch_for_events(QTestState *who, const char *name,
- QDict *event, void *opaque)
-{
- QTestMigrationState *state = opaque;
-
- if (g_str_equal(name, "STOP")) {
- state->stop_seen = true;
- return true;
- } else if (g_str_equal(name, "SUSPEND")) {
- state->suspend_seen = true;
- return true;
- } else if (g_str_equal(name, "RESUME")) {
- state->resume_seen = true;
- return true;
- }
-
- return false;
-}
-
-void migrate_qmp_fail(QTestState *who, const char *uri,
- const char *channels, const char *fmt, ...)
-{
- va_list ap;
- QDict *args, *err;
-
- va_start(ap, fmt);
- args = qdict_from_vjsonf_nofail(fmt, ap);
- va_end(ap);
-
- g_assert(!qdict_haskey(args, "uri"));
- if (uri) {
- qdict_put_str(args, "uri", uri);
- }
-
- g_assert(!qdict_haskey(args, "channels"));
- if (channels) {
- QObject *channels_obj = qobject_from_json(channels, &error_abort);
- qdict_put_obj(args, "channels", channels_obj);
- }
-
- err = qtest_qmp_assert_failure_ref(
- who, "{ 'execute': 'migrate', 'arguments': %p}", args);
-
- g_assert(qdict_haskey(err, "desc"));
-
- qobject_unref(err);
-}
-
-/*
- * Send QMP command "migrate".
- * Arguments are built from @fmt... (formatted like
- * qobject_from_jsonf_nofail()) with "uri": @uri spliced in.
- */
-void migrate_qmp(QTestState *who, QTestState *to, const char *uri,
- const char *channels, const char *fmt, ...)
-{
- va_list ap;
- QDict *args;
- g_autofree char *connect_uri = NULL;
-
- va_start(ap, fmt);
- args = qdict_from_vjsonf_nofail(fmt, ap);
- va_end(ap);
-
- g_assert(!qdict_haskey(args, "uri"));
- if (uri) {
- qdict_put_str(args, "uri", uri);
- } else if (!channels) {
- connect_uri = migrate_get_connect_uri(to);
- qdict_put_str(args, "uri", connect_uri);
- }
-
- g_assert(!qdict_haskey(args, "channels"));
- if (channels) {
- QObject *channels_obj = qobject_from_json(channels, &error_abort);
- QList *channel_list = qobject_to(QList, channels_obj);
- migrate_set_ports(to, channel_list);
- qdict_put_obj(args, "channels", channels_obj);
- }
-
- qtest_qmp_assert_success(who,
- "{ 'execute': 'migrate', 'arguments': %p}", args);
-}
-
-void migrate_set_capability(QTestState *who, const char *capability,
- bool value)
-{
- qtest_qmp_assert_success(who,
- "{ 'execute': 'migrate-set-capabilities',"
- "'arguments': { "
- "'capabilities': [ { "
- "'capability': %s, 'state': %i } ] } }",
- capability, value);
-}
-
-void migrate_incoming_qmp(QTestState *to, const char *uri, const char *fmt, ...)
-{
- va_list ap;
- QDict *args, *rsp;
-
- va_start(ap, fmt);
- args = qdict_from_vjsonf_nofail(fmt, ap);
- va_end(ap);
-
- g_assert(!qdict_haskey(args, "uri"));
- qdict_put_str(args, "uri", uri);
-
- /* This function relies on the event to work, make sure it's enabled */
- migrate_set_capability(to, "events", true);
-
- rsp = qtest_qmp(to, "{ 'execute': 'migrate-incoming', 'arguments': %p}",
- args);
-
- if (!qdict_haskey(rsp, "return")) {
- g_autoptr(GString) s = qobject_to_json_pretty(QOBJECT(rsp), true);
- g_test_message("%s", s->str);
- }
-
- g_assert(qdict_haskey(rsp, "return"));
- qobject_unref(rsp);
-
- migration_event_wait(to, "setup");
-}
-
-/*
- * Note: caller is responsible to free the returned object via
- * qobject_unref() after use
- */
-QDict *migrate_query(QTestState *who)
-{
- return qtest_qmp_assert_success_ref(who, "{ 'execute': 'query-migrate' }");
-}
-
-QDict *migrate_query_not_failed(QTestState *who)
-{
- const char *status;
- QDict *rsp = migrate_query(who);
- status = qdict_get_str(rsp, "status");
- if (g_str_equal(status, "failed")) {
- g_printerr("query-migrate shows failed migration: %s\n",
- qdict_get_str(rsp, "error-desc"));
- }
- g_assert(!g_str_equal(status, "failed"));
- return rsp;
-}
-
-/*
- * Note: caller is responsible to free the returned object via
- * g_free() after use
- */
-static gchar *migrate_query_status(QTestState *who)
-{
- QDict *rsp_return = migrate_query(who);
- gchar *status = g_strdup(qdict_get_str(rsp_return, "status"));
-
- g_assert(status);
- qobject_unref(rsp_return);
-
- return status;
-}
-
-static bool check_migration_status(QTestState *who, const char *goal,
- const char **ungoals)
-{
- bool ready;
- char *current_status;
- const char **ungoal;
-
- current_status = migrate_query_status(who);
- ready = strcmp(current_status, goal) == 0;
- if (!ungoals) {
- g_assert_cmpstr(current_status, !=, "failed");
- /*
- * If looking for a state other than completed,
- * completion of migration would cause the test to
- * hang.
- */
- if (strcmp(goal, "completed") != 0) {
- g_assert_cmpstr(current_status, !=, "completed");
- }
- } else {
- for (ungoal = ungoals; *ungoal; ungoal++) {
- g_assert_cmpstr(current_status, !=, *ungoal);
- }
- }
- g_free(current_status);
- return ready;
-}
-
-void wait_for_migration_status(QTestState *who,
- const char *goal, const char **ungoals)
-{
- g_test_timer_start();
- while (!check_migration_status(who, goal, ungoals)) {
- usleep(1000);
-
- g_assert(g_test_timer_elapsed() < MIGRATION_STATUS_WAIT_TIMEOUT);
- }
-}
-
-void wait_for_migration_complete(QTestState *who)
-{
- wait_for_migration_status(who, "completed", NULL);
-}
-
-void wait_for_migration_fail(QTestState *from, bool allow_active)
-{
- g_test_timer_start();
- QDict *rsp_return;
- char *status;
- bool failed;
-
- do {
- status = migrate_query_status(from);
- bool result = !strcmp(status, "setup") || !strcmp(status, "failed") ||
- (allow_active && !strcmp(status, "active"));
- if (!result) {
- fprintf(stderr, "%s: unexpected status status=%s allow_active=%d\n",
- __func__, status, allow_active);
- }
- g_assert(result);
- failed = !strcmp(status, "failed");
- g_free(status);
-
- g_assert(g_test_timer_elapsed() < MIGRATION_STATUS_WAIT_TIMEOUT);
- } while (!failed);
-
- /* Is the machine currently running? */
- rsp_return = qtest_qmp_assert_success_ref(from,
- "{ 'execute': 'query-status' }");
- g_assert(qdict_haskey(rsp_return, "running"));
- g_assert(qdict_get_bool(rsp_return, "running"));
- qobject_unref(rsp_return);
-}
-
-char *find_common_machine_version(const char *mtype, const char *var1,
- const char *var2)
-{
- g_autofree char *type1 = qtest_resolve_machine_alias(var1, mtype);
- g_autofree char *type2 = qtest_resolve_machine_alias(var2, mtype);
-
- g_assert(type1 && type2);
-
- if (g_str_equal(type1, type2)) {
- /* either can be used */
- return g_strdup(type1);
- }
-
- if (qtest_has_machine_with_env(var2, type1)) {
- return g_strdup(type1);
- }
-
- if (qtest_has_machine_with_env(var1, type2)) {
- return g_strdup(type2);
- }
-
- g_test_message("No common machine version for machine type '%s' between "
- "binaries %s and %s", mtype, getenv(var1), getenv(var2));
- g_assert_not_reached();
-}
-
-char *resolve_machine_version(const char *alias, const char *var1,
- const char *var2)
-{
- const char *mname = g_getenv("QTEST_QEMU_MACHINE_TYPE");
- g_autofree char *machine_name = NULL;
-
- if (mname) {
- const char *dash = strrchr(mname, '-');
- const char *dot = strrchr(mname, '.');
-
- machine_name = g_strdup(mname);
-
- if (dash && dot) {
- assert(qtest_has_machine(machine_name));
- return g_steal_pointer(&machine_name);
- }
- /* else: probably an alias, let it be resolved below */
- } else {
- /* use the hardcoded alias */
- machine_name = g_strdup(alias);
- }
-
- return find_common_machine_version(machine_name, var1, var2);
-}
-
-typedef struct {
- char *name;
- void (*func)(void);
-} MigrationTest;
-
-static void migration_test_destroy(gpointer data)
-{
- MigrationTest *test = (MigrationTest *)data;
-
- g_free(test->name);
- g_free(test);
-}
-
-static void migration_test_wrapper(const void *data)
-{
- MigrationTest *test = (MigrationTest *)data;
-
- g_test_message("Running /%s%s", qtest_get_arch(), test->name);
- test->func();
-}
-
-void migration_test_add(const char *path, void (*fn)(void))
-{
- MigrationTest *test = g_new0(MigrationTest, 1);
-
- test->func = fn;
- test->name = g_strdup(path);
-
- qtest_add_data_func_full(path, test, migration_test_wrapper,
- migration_test_destroy);
-}
-
-#ifdef O_DIRECT
-/*
- * Probe for O_DIRECT support on the filesystem. Since this is used
- * for tests, be conservative, if anything fails, assume it's
- * unsupported.
- */
-bool probe_o_direct_support(const char *tmpfs)
-{
- g_autofree char *filename = g_strdup_printf("%s/probe-o-direct", tmpfs);
- int fd, flags = O_CREAT | O_RDWR | O_TRUNC | O_DIRECT;
- void *buf;
- ssize_t ret, len;
- uint64_t offset;
-
- fd = open(filename, flags, 0660);
- if (fd < 0) {
- unlink(filename);
- return false;
- }
-
- /*
- * Using 1MB alignment as conservative choice to satisfy any
- * plausible architecture default page size, and/or filesystem
- * alignment restrictions.
- */
- len = 0x100000;
- offset = 0x100000;
-
- buf = qemu_try_memalign(len, len);
- g_assert(buf);
-
- ret = pwrite(fd, buf, len, offset);
- unlink(filename);
- g_free(buf);
-
- if (ret < 0) {
- return false;
- }
-
- return true;
-}
-#endif
-
-/*
- * Wait for a "MIGRATION" event. This is what Libvirt uses to track
- * migration status changes.
- */
-void migration_event_wait(QTestState *s, const char *target)
-{
- QDict *response, *data;
- const char *status;
- bool found;
-
- do {
- response = qtest_qmp_eventwait_ref(s, "MIGRATION");
- data = qdict_get_qdict(response, "data");
- g_assert(data);
- status = qdict_get_str(data, "status");
- found = (strcmp(status, target) == 0);
- qobject_unref(response);
- } while (!found);
-}
diff --git a/tests/qtest/migration-helpers.h b/tests/qtest/migration-helpers.h
deleted file mode 100644
index 72dba36..0000000
--- a/tests/qtest/migration-helpers.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * QTest migration helpers
- *
- * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
- * based on the vhost-user-test.c that is:
- * Copyright (c) 2014 Virtual Open Systems Sarl.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifndef MIGRATION_HELPERS_H
-#define MIGRATION_HELPERS_H
-
-#include "libqtest.h"
-
-typedef struct QTestMigrationState {
- bool stop_seen;
- bool resume_seen;
- bool suspend_seen;
- bool suspend_me;
-} QTestMigrationState;
-
-bool migrate_watch_for_events(QTestState *who, const char *name,
- QDict *event, void *opaque);
-
-G_GNUC_PRINTF(5, 6)
-void migrate_qmp(QTestState *who, QTestState *to, const char *uri,
- const char *channels, const char *fmt, ...);
-
-G_GNUC_PRINTF(3, 4)
-void migrate_incoming_qmp(QTestState *who, const char *uri,
- const char *fmt, ...);
-
-G_GNUC_PRINTF(4, 5)
-void migrate_qmp_fail(QTestState *who, const char *uri,
- const char *channels, const char *fmt, ...);
-
-void migrate_set_capability(QTestState *who, const char *capability,
- bool value);
-
-QDict *migrate_query(QTestState *who);
-QDict *migrate_query_not_failed(QTestState *who);
-
-void wait_for_migration_status(QTestState *who,
- const char *goal, const char **ungoals);
-
-void wait_for_migration_complete(QTestState *who);
-
-void wait_for_migration_fail(QTestState *from, bool allow_active);
-
-char *find_common_machine_version(const char *mtype, const char *var1,
- const char *var2);
-char *resolve_machine_version(const char *alias, const char *var1,
- const char *var2);
-#ifdef O_DIRECT
-bool probe_o_direct_support(const char *tmpfs);
-#else
-static inline bool probe_o_direct_support(const char *tmpfs)
-{
- return false;
-}
-#endif
-void migration_test_add(const char *path, void (*fn)(void));
-void migration_event_wait(QTestState *s, const char *target);
-
-#endif /* MIGRATION_HELPERS_H */
diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c
index 70b606b..0893687 100644
--- a/tests/qtest/migration-test.c
+++ b/tests/qtest/migration-test.c
@@ -11,4032 +11,56 @@
*/
#include "qemu/osdep.h"
-
-#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
+#include "migration/framework.h"
#include "qemu/module.h"
-#include "qemu/option.h"
-#include "qemu/range.h"
-#include "qemu/sockets.h"
-#include "chardev/char.h"
-#include "crypto/tlscredspsk.h"
-#include "qapi/qmp/qlist.h"
-#include "ppc-util.h"
-
-#include "migration-helpers.h"
-#include "tests/migration/migration-test.h"
-#ifdef CONFIG_GNUTLS
-# include "tests/unit/crypto-tls-psk-helpers.h"
-# ifdef CONFIG_TASN1
-# include "tests/unit/crypto-tls-x509-helpers.h"
-# endif /* CONFIG_TASN1 */
-#endif /* CONFIG_GNUTLS */
-
-/* For dirty ring test; so far only x86_64 is supported */
-#if defined(__linux__) && defined(HOST_X86_64)
-#include "linux/kvm.h"
-#endif
-
-unsigned start_address;
-unsigned end_address;
-static bool uffd_feature_thread_id;
-static QTestMigrationState src_state;
-static QTestMigrationState dst_state;
-
-/*
- * An initial 3 MB offset is used as that corresponds
- * to ~1 sec of data transfer with our bandwidth setting.
- */
-#define MAGIC_OFFSET_BASE (3 * 1024 * 1024)
-/*
- * A further 1k is added to ensure we're not a multiple
- * of TEST_MEM_PAGE_SIZE, thus avoid clash with writes
- * from the migration guest workload.
- */
-#define MAGIC_OFFSET_SHUFFLE 1024
-#define MAGIC_OFFSET (MAGIC_OFFSET_BASE + MAGIC_OFFSET_SHUFFLE)
-#define MAGIC_MARKER 0xFEED12345678CAFEULL
-
-/*
- * Dirtylimit stop working if dirty page rate error
- * value less than DIRTYLIMIT_TOLERANCE_RANGE
- */
-#define DIRTYLIMIT_TOLERANCE_RANGE 25 /* MB/s */
-
-#define ANALYZE_SCRIPT "scripts/analyze-migration.py"
-#define VMSTATE_CHECKER_SCRIPT "scripts/vmstate-static-checker.py"
-
-#define QEMU_VM_FILE_MAGIC 0x5145564d
-#define FILE_TEST_FILENAME "migfile"
-#define FILE_TEST_OFFSET 0x1000
-#define FILE_TEST_MARKER 'X'
-#define QEMU_ENV_SRC "QTEST_QEMU_BINARY_SRC"
-#define QEMU_ENV_DST "QTEST_QEMU_BINARY_DST"
-
-typedef enum PostcopyRecoveryFailStage {
- /*
- * "no failure" must be 0 as it's the default. OTOH, real failure
- * cases must be >0 to make sure they trigger by a "if" test.
- */
- POSTCOPY_FAIL_NONE = 0,
- POSTCOPY_FAIL_CHANNEL_ESTABLISH,
- POSTCOPY_FAIL_RECOVERY,
- POSTCOPY_FAIL_MAX
-} PostcopyRecoveryFailStage;
-
-#if defined(__linux__)
-#include <sys/syscall.h>
-#include <sys/vfs.h>
-#endif
-
-#if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
-#include <sys/eventfd.h>
-#include <sys/ioctl.h>
-#include "qemu/userfaultfd.h"
-
-static bool ufd_version_check(void)
-{
- struct uffdio_api api_struct;
- uint64_t ioctl_mask;
-
- int ufd = uffd_open(O_CLOEXEC);
-
- if (ufd == -1) {
- g_test_message("Skipping test: userfaultfd not available");
- return false;
- }
-
- api_struct.api = UFFD_API;
- api_struct.features = 0;
- if (ioctl(ufd, UFFDIO_API, &api_struct)) {
- g_test_message("Skipping test: UFFDIO_API failed");
- return false;
- }
- uffd_feature_thread_id = api_struct.features & UFFD_FEATURE_THREAD_ID;
-
- ioctl_mask = 1ULL << _UFFDIO_REGISTER |
- 1ULL << _UFFDIO_UNREGISTER;
- if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) {
- g_test_message("Skipping test: Missing userfault feature");
- return false;
- }
-
- return true;
-}
-#else
-static bool ufd_version_check(void)
+static void parse_args(int *argc_p, char ***argv_p, bool *full_set)
{
- g_test_message("Skipping test: Userfault not available (builtdtime)");
- return false;
-}
-
-#endif
+ int argc = *argc_p;
+ char **argv = *argv_p;
+ int i, j;
-static char *tmpfs;
-static char *bootpath;
-
-/* The boot file modifies memory area in [start_address, end_address)
- * repeatedly. It outputs a 'B' at a fixed rate while it's still running.
- */
-#include "tests/migration/i386/a-b-bootblock.h"
-#include "tests/migration/aarch64/a-b-kernel.h"
-#include "tests/migration/ppc64/a-b-kernel.h"
-#include "tests/migration/s390x/a-b-bios.h"
-
-static void bootfile_create(char *dir, bool suspend_me)
-{
- const char *arch = qtest_get_arch();
- unsigned char *content;
- size_t len;
-
- bootpath = g_strdup_printf("%s/bootsect", dir);
- if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
- /* the assembled x86 boot sector should be exactly one sector large */
- g_assert(sizeof(x86_bootsect) == 512);
- x86_bootsect[SYM_suspend_me - SYM_start] = suspend_me;
- content = x86_bootsect;
- len = sizeof(x86_bootsect);
- } else if (g_str_equal(arch, "s390x")) {
- content = s390x_elf;
- len = sizeof(s390x_elf);
- } else if (strcmp(arch, "ppc64") == 0) {
- content = ppc64_kernel;
- len = sizeof(ppc64_kernel);
- } else if (strcmp(arch, "aarch64") == 0) {
- content = aarch64_kernel;
- len = sizeof(aarch64_kernel);
- g_assert(sizeof(aarch64_kernel) <= ARM_TEST_MAX_KERNEL_SIZE);
- } else {
- g_assert_not_reached();
- }
-
- FILE *bootfile = fopen(bootpath, "wb");
-
- g_assert_cmpint(fwrite(content, len, 1, bootfile), ==, 1);
- fclose(bootfile);
-}
-
-static void bootfile_delete(void)
-{
- unlink(bootpath);
- g_free(bootpath);
- bootpath = NULL;
-}
-
-/*
- * Wait for some output in the serial output file,
- * we get an 'A' followed by an endless string of 'B's
- * but on the destination we won't have the A (unless we enabled suspend/resume)
- */
-static void wait_for_serial(const char *side)
-{
- g_autofree char *serialpath = g_strdup_printf("%s/%s", tmpfs, side);
- FILE *serialfile = fopen(serialpath, "r");
-
- do {
- int readvalue = fgetc(serialfile);
-
- switch (readvalue) {
- case 'A':
- /* Fine */
- break;
-
- case 'B':
- /* It's alive! */
- fclose(serialfile);
- return;
-
- case EOF:
- fseek(serialfile, 0, SEEK_SET);
- usleep(1000);
- break;
-
- default:
- fprintf(stderr, "Unexpected %d on %s serial\n", readvalue, side);
- g_assert_not_reached();
+ j = 1;
+ for (i = 1; i < argc; i++) {
+ if (g_str_equal(argv[i], "--full")) {
+ *full_set = true;
+ continue;
}
- } while (true);
-}
-
-static void wait_for_stop(QTestState *who, QTestMigrationState *state)
-{
- if (!state->stop_seen) {
- qtest_qmp_eventwait(who, "STOP");
- }
-}
-
-static void wait_for_resume(QTestState *who, QTestMigrationState *state)
-{
- if (!state->resume_seen) {
- qtest_qmp_eventwait(who, "RESUME");
- }
-}
-
-static void wait_for_suspend(QTestState *who, QTestMigrationState *state)
-{
- if (state->suspend_me && !state->suspend_seen) {
- qtest_qmp_eventwait(who, "SUSPEND");
- }
-}
-
-/*
- * It's tricky to use qemu's migration event capability with qtest,
- * events suddenly appearing confuse the qmp()/hmp() responses.
- */
-
-static int64_t read_ram_property_int(QTestState *who, const char *property)
-{
- QDict *rsp_return, *rsp_ram;
- int64_t result;
-
- rsp_return = migrate_query_not_failed(who);
- if (!qdict_haskey(rsp_return, "ram")) {
- /* Still in setup */
- result = 0;
- } else {
- rsp_ram = qdict_get_qdict(rsp_return, "ram");
- result = qdict_get_try_int(rsp_ram, property, 0);
- }
- qobject_unref(rsp_return);
- return result;
-}
-
-static int64_t read_migrate_property_int(QTestState *who, const char *property)
-{
- QDict *rsp_return;
- int64_t result;
-
- rsp_return = migrate_query_not_failed(who);
- result = qdict_get_try_int(rsp_return, property, 0);
- qobject_unref(rsp_return);
- return result;
-}
-
-static uint64_t get_migration_pass(QTestState *who)
-{
- return read_ram_property_int(who, "dirty-sync-count");
-}
-
-static void read_blocktime(QTestState *who)
-{
- QDict *rsp_return;
-
- rsp_return = migrate_query_not_failed(who);
- g_assert(qdict_haskey(rsp_return, "postcopy-blocktime"));
- qobject_unref(rsp_return);
-}
-
-/*
- * Wait for two changes in the migration pass count, but bail if we stop.
- */
-static void wait_for_migration_pass(QTestState *who)
-{
- uint64_t pass, prev_pass = 0, changes = 0;
-
- while (changes < 2 && !src_state.stop_seen && !src_state.suspend_seen) {
- usleep(1000);
- pass = get_migration_pass(who);
- changes += (pass != prev_pass);
- prev_pass = pass;
- }
-}
-
-static void check_guests_ram(QTestState *who)
-{
- /* Our ASM test will have been incrementing one byte from each page from
- * start_address to < end_address in order. This gives us a constraint
- * that any page's byte should be equal or less than the previous pages
- * byte (mod 256); and they should all be equal except for one transition
- * at the point where we meet the incrementer. (We're running this with
- * the guest stopped).
- */
- unsigned address;
- uint8_t first_byte;
- uint8_t last_byte;
- bool hit_edge = false;
- int bad = 0;
-
- qtest_memread(who, start_address, &first_byte, 1);
- last_byte = first_byte;
-
- for (address = start_address + TEST_MEM_PAGE_SIZE; address < end_address;
- address += TEST_MEM_PAGE_SIZE)
- {
- uint8_t b;
- qtest_memread(who, address, &b, 1);
- if (b != last_byte) {
- if (((b + 1) % 256) == last_byte && !hit_edge) {
- /* This is OK, the guest stopped at the point of
- * incrementing the previous page but didn't get
- * to us yet.
- */
- hit_edge = true;
- last_byte = b;
- } else {
- bad++;
- if (bad <= 10) {
- fprintf(stderr, "Memory content inconsistency at %x"
- " first_byte = %x last_byte = %x current = %x"
- " hit_edge = %x\n",
- address, first_byte, last_byte, b, hit_edge);
- }
- }
+ argv[j++] = argv[i];
+ if (i >= j) {
+ argv[i] = NULL;
}
}
- if (bad >= 10) {
- fprintf(stderr, "and in another %d pages", bad - 10);
- }
- g_assert(bad == 0);
-}
-
-static void cleanup(const char *filename)
-{
- g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, filename);
-
- unlink(path);
-}
-
-static long long migrate_get_parameter_int(QTestState *who,
- const char *parameter)
-{
- QDict *rsp;
- long long result;
-
- rsp = qtest_qmp_assert_success_ref(
- who, "{ 'execute': 'query-migrate-parameters' }");
- result = qdict_get_int(rsp, parameter);
- qobject_unref(rsp);
- return result;
-}
-
-static void migrate_check_parameter_int(QTestState *who, const char *parameter,
- long long value)
-{
- long long result;
-
- result = migrate_get_parameter_int(who, parameter);
- g_assert_cmpint(result, ==, value);
-}
-
-static void migrate_set_parameter_int(QTestState *who, const char *parameter,
- long long value)
-{
- qtest_qmp_assert_success(who,
- "{ 'execute': 'migrate-set-parameters',"
- "'arguments': { %s: %lld } }",
- parameter, value);
- migrate_check_parameter_int(who, parameter, value);
-}
-
-static char *migrate_get_parameter_str(QTestState *who,
- const char *parameter)
-{
- QDict *rsp;
- char *result;
-
- rsp = qtest_qmp_assert_success_ref(
- who, "{ 'execute': 'query-migrate-parameters' }");
- result = g_strdup(qdict_get_str(rsp, parameter));
- qobject_unref(rsp);
- return result;
-}
-
-static void migrate_check_parameter_str(QTestState *who, const char *parameter,
- const char *value)
-{
- g_autofree char *result = migrate_get_parameter_str(who, parameter);
- g_assert_cmpstr(result, ==, value);
-}
-
-static void migrate_set_parameter_str(QTestState *who, const char *parameter,
- const char *value)
-{
- qtest_qmp_assert_success(who,
- "{ 'execute': 'migrate-set-parameters',"
- "'arguments': { %s: %s } }",
- parameter, value);
- migrate_check_parameter_str(who, parameter, value);
-}
-
-static long long migrate_get_parameter_bool(QTestState *who,
- const char *parameter)
-{
- QDict *rsp;
- int result;
-
- rsp = qtest_qmp_assert_success_ref(
- who, "{ 'execute': 'query-migrate-parameters' }");
- result = qdict_get_bool(rsp, parameter);
- qobject_unref(rsp);
- return !!result;
-}
-
-static void migrate_check_parameter_bool(QTestState *who, const char *parameter,
- int value)
-{
- int result;
-
- result = migrate_get_parameter_bool(who, parameter);
- g_assert_cmpint(result, ==, value);
-}
-
-static void migrate_set_parameter_bool(QTestState *who, const char *parameter,
- int value)
-{
- qtest_qmp_assert_success(who,
- "{ 'execute': 'migrate-set-parameters',"
- "'arguments': { %s: %i } }",
- parameter, value);
- migrate_check_parameter_bool(who, parameter, value);
-}
-
-static void migrate_ensure_non_converge(QTestState *who)
-{
- /* Can't converge with 1ms downtime + 3 mbs bandwidth limit */
- migrate_set_parameter_int(who, "max-bandwidth", 3 * 1000 * 1000);
- migrate_set_parameter_int(who, "downtime-limit", 1);
-}
-
-static void migrate_ensure_converge(QTestState *who)
-{
- /* Should converge with 30s downtime + 1 gbs bandwidth limit */
- migrate_set_parameter_int(who, "max-bandwidth", 1 * 1000 * 1000 * 1000);
- migrate_set_parameter_int(who, "downtime-limit", 30 * 1000);
-}
-
-/*
- * Our goal is to ensure that we run a single full migration
- * iteration, and also dirty memory, ensuring that at least
- * one further iteration is required.
- *
- * We can't directly synchronize with the start of a migration
- * so we have to apply some tricks monitoring memory that is
- * transferred.
- *
- * Initially we set the migration bandwidth to an insanely
- * low value, with tiny max downtime too. This basically
- * guarantees migration will never complete.
- *
- * This will result in a test that is unacceptably slow though,
- * so we can't let the entire migration pass run at this speed.
- * Our intent is to let it run just long enough that we can
- * prove data prior to the marker has been transferred *AND*
- * also prove this transferred data is dirty again.
- *
- * Before migration starts, we write a 64-bit magic marker
- * into a fixed location in the src VM RAM.
- *
- * Then watch dst memory until the marker appears. This is
- * proof that start_address -> MAGIC_OFFSET_BASE has been
- * transferred.
- *
- * Finally we go back to the source and read a byte just
- * before the marker until we see it flip in value. This
- * is proof that start_address -> MAGIC_OFFSET_BASE
- * is now dirty again.
- *
- * IOW, we're guaranteed at least a 2nd migration pass
- * at this point.
- *
- * We can now let migration run at full speed to finish
- * the test
- */
-static void migrate_prepare_for_dirty_mem(QTestState *from)
-{
- /*
- * The guest workflow iterates from start_address to
- * end_address, writing 1 byte every TEST_MEM_PAGE_SIZE
- * bytes.
- *
- * IOW, if we write to mem at a point which is NOT
- * a multiple of TEST_MEM_PAGE_SIZE, our write won't
- * conflict with the migration workflow.
- *
- * We put in a marker here, that we'll use to determine
- * when the data has been transferred to the dst.
- */
- qtest_writeq(from, start_address + MAGIC_OFFSET, MAGIC_MARKER);
-}
-
-static void migrate_wait_for_dirty_mem(QTestState *from,
- QTestState *to)
-{
- uint64_t watch_address = start_address + MAGIC_OFFSET_BASE;
- uint64_t marker_address = start_address + MAGIC_OFFSET;
- uint8_t watch_byte;
-
- /*
- * Wait for the MAGIC_MARKER to get transferred, as an
- * indicator that a migration pass has made some known
- * amount of progress.
- */
- do {
- usleep(1000 * 10);
- } while (qtest_readq(to, marker_address) != MAGIC_MARKER);
-
-
- /* If suspended, src only iterates once, and watch_byte may never change */
- if (src_state.suspend_me) {
- return;
- }
-
- /*
- * Now ensure that already transferred bytes are
- * dirty again from the guest workload. Note the
- * guest byte value will wrap around and by chance
- * match the original watch_byte. This is harmless
- * as we'll eventually see a different value if we
- * keep watching
- */
- watch_byte = qtest_readb(from, watch_address);
- do {
- usleep(1000 * 10);
- } while (qtest_readb(from, watch_address) == watch_byte);
-}
-
-
-static void migrate_pause(QTestState *who)
-{
- qtest_qmp_assert_success(who, "{ 'execute': 'migrate-pause' }");
-}
-
-static void migrate_continue(QTestState *who, const char *state)
-{
- qtest_qmp_assert_success(who,
- "{ 'execute': 'migrate-continue',"
- " 'arguments': { 'state': %s } }",
- state);
-}
-
-static void migrate_recover(QTestState *who, const char *uri)
-{
- qtest_qmp_assert_success(who,
- "{ 'execute': 'migrate-recover', "
- " 'id': 'recover-cmd', "
- " 'arguments': { 'uri': %s } }",
- uri);
-}
-
-static void migrate_cancel(QTestState *who)
-{
- qtest_qmp_assert_success(who, "{ 'execute': 'migrate_cancel' }");
-}
-
-static void migrate_postcopy_start(QTestState *from, QTestState *to)
-{
- qtest_qmp_assert_success(from, "{ 'execute': 'migrate-start-postcopy' }");
-
- wait_for_stop(from, &src_state);
- qtest_qmp_eventwait(to, "RESUME");
-}
-
-typedef struct {
- /*
- * QTEST_LOG=1 may override this. When QTEST_LOG=1, we always dump errors
- * unconditionally, because it means the user would like to be verbose.
- */
- bool hide_stderr;
- bool use_shmem;
- /* only launch the target process */
- bool only_target;
- /* Use dirty ring if true; dirty logging otherwise */
- bool use_dirty_ring;
- const char *opts_source;
- const char *opts_target;
- /* suspend the src before migrating to dest. */
- bool suspend_me;
-} MigrateStart;
-
-/*
- * A hook that runs after the src and dst QEMUs have been
- * created, but before the migration is started. This can
- * be used to set migration parameters and capabilities.
- *
- * Returns: NULL, or a pointer to opaque state to be
- * later passed to the TestMigrateFinishHook
- */
-typedef void * (*TestMigrateStartHook)(QTestState *from,
- QTestState *to);
-
-/*
- * A hook that runs after the migration has finished,
- * regardless of whether it succeeded or failed, but
- * before QEMU has terminated (unless it self-terminated
- * due to migration error)
- *
- * @opaque is a pointer to state previously returned
- * by the TestMigrateStartHook if any, or NULL.
- */
-typedef void (*TestMigrateFinishHook)(QTestState *from,
- QTestState *to,
- void *opaque);
-
-typedef struct {
- /* Optional: fine tune start parameters */
- MigrateStart start;
-
- /* Required: the URI for the dst QEMU to listen on */
- const char *listen_uri;
-
- /*
- * Optional: the URI for the src QEMU to connect to
- * If NULL, then it will query the dst QEMU for its actual
- * listening address and use that as the connect address.
- * This allows for dynamically picking a free TCP port.
- */
- const char *connect_uri;
-
- /*
- * Optional: JSON-formatted list of src QEMU URIs. If a port is
- * defined as '0' in any QDict key a value of '0' will be
- * automatically converted to the correct destination port.
- */
- const char *connect_channels;
-
- /* Optional: callback to run at start to set migration parameters */
- TestMigrateStartHook start_hook;
- /* Optional: callback to run at finish to cleanup */
- TestMigrateFinishHook finish_hook;
-
- /*
- * Optional: normally we expect the migration process to complete.
- *
- * There can be a variety of reasons and stages in which failure
- * can happen during tests.
- *
- * If a failure is expected to happen at time of establishing
- * the connection, then MIG_TEST_FAIL will indicate that the dst
- * QEMU is expected to stay running and accept future migration
- * connections.
- *
- * If a failure is expected to happen while processing the
- * migration stream, then MIG_TEST_FAIL_DEST_QUIT_ERR will indicate
- * that the dst QEMU is expected to quit with non-zero exit status
- */
- enum {
- /* This test should succeed, the default */
- MIG_TEST_SUCCEED = 0,
- /* This test should fail, dest qemu should keep alive */
- MIG_TEST_FAIL,
- /* This test should fail, dest qemu should fail with abnormal status */
- MIG_TEST_FAIL_DEST_QUIT_ERR,
- /* The QMP command for this migration should fail with an error */
- MIG_TEST_QMP_ERROR,
- } result;
-
- /*
- * Optional: set number of migration passes to wait for, if live==true.
- * If zero, then merely wait for a few MB of dirty data
- */
- unsigned int iterations;
-
- /*
- * Optional: whether the guest CPUs should be running during a precopy
- * migration test. We used to always run with live but it took much
- * longer so we reduced live tests to only the ones that have solid
- * reason to be tested live-only. For each of the new test cases for
- * precopy please provide justifications to use live explicitly (please
- * refer to existing ones with live=true), or use live=off by default.
- */
- bool live;
-
- /* Postcopy specific fields */
- void *postcopy_data;
- bool postcopy_preempt;
- PostcopyRecoveryFailStage postcopy_recovery_fail_stage;
-} MigrateCommon;
-
-static int test_migrate_start(QTestState **from, QTestState **to,
- const char *uri, MigrateStart *args)
-{
- g_autofree gchar *arch_source = NULL;
- g_autofree gchar *arch_target = NULL;
- /* options for source and target */
- g_autofree gchar *arch_opts = NULL;
- g_autofree gchar *cmd_source = NULL;
- g_autofree gchar *cmd_target = NULL;
- const gchar *ignore_stderr;
- g_autofree char *shmem_opts = NULL;
- g_autofree char *shmem_path = NULL;
- const char *kvm_opts = NULL;
- const char *arch = qtest_get_arch();
- const char *memory_size;
- const char *machine_alias, *machine_opts = "";
- g_autofree char *machine = NULL;
-
- if (args->use_shmem) {
- if (!g_file_test("/dev/shm", G_FILE_TEST_IS_DIR)) {
- g_test_skip("/dev/shm is not supported");
- return -1;
- }
- }
-
- dst_state = (QTestMigrationState) { };
- src_state = (QTestMigrationState) { };
- bootfile_create(tmpfs, args->suspend_me);
- src_state.suspend_me = args->suspend_me;
-
- if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
- memory_size = "150M";
-
- if (g_str_equal(arch, "i386")) {
- machine_alias = "pc";
- } else {
- machine_alias = "q35";
- }
- arch_opts = g_strdup_printf(
- "-drive if=none,id=d0,file=%s,format=raw "
- "-device ide-hd,drive=d0,secs=1,cyls=1,heads=1", bootpath);
- start_address = X86_TEST_MEM_START;
- end_address = X86_TEST_MEM_END;
- } else if (g_str_equal(arch, "s390x")) {
- memory_size = "128M";
- machine_alias = "s390-ccw-virtio";
- arch_opts = g_strdup_printf("-bios %s", bootpath);
- start_address = S390_TEST_MEM_START;
- end_address = S390_TEST_MEM_END;
- } else if (strcmp(arch, "ppc64") == 0) {
- memory_size = "256M";
- start_address = PPC_TEST_MEM_START;
- end_address = PPC_TEST_MEM_END;
- machine_alias = "pseries";
- machine_opts = "vsmt=8";
- arch_opts = g_strdup_printf(
- "-nodefaults -machine " PSERIES_DEFAULT_CAPABILITIES " "
- "-bios %s", bootpath);
- } else if (strcmp(arch, "aarch64") == 0) {
- memory_size = "150M";
- machine_alias = "virt";
- machine_opts = "gic-version=3";
- arch_opts = g_strdup_printf("-cpu max -kernel %s", bootpath);
- start_address = ARM_TEST_MEM_START;
- end_address = ARM_TEST_MEM_END;
- } else {
- g_assert_not_reached();
- }
-
- if (!getenv("QTEST_LOG") && args->hide_stderr) {
-#ifndef _WIN32
- ignore_stderr = "2>/dev/null";
-#else
- /*
- * On Windows the QEMU executable is created via CreateProcess() and
- * IO redirection does not work, so don't bother adding IO redirection
- * to the command line.
- */
- ignore_stderr = "";
-#endif
- } else {
- ignore_stderr = "";
- }
-
- if (args->use_shmem) {
- shmem_path = g_strdup_printf("/dev/shm/qemu-%d", getpid());
- shmem_opts = g_strdup_printf(
- "-object memory-backend-file,id=mem0,size=%s"
- ",mem-path=%s,share=on -numa node,memdev=mem0",
- memory_size, shmem_path);
- }
-
- if (args->use_dirty_ring) {
- kvm_opts = ",dirty-ring-size=4096";
- }
-
- if (!qtest_has_machine(machine_alias)) {
- g_autofree char *msg = g_strdup_printf("machine %s not supported", machine_alias);
- g_test_skip(msg);
- return -1;
- }
-
- machine = resolve_machine_version(machine_alias, QEMU_ENV_SRC,
- QEMU_ENV_DST);
-
- g_test_message("Using machine type: %s", machine);
-
- cmd_source = g_strdup_printf("-accel kvm%s -accel tcg "
- "-machine %s,%s "
- "-name source,debug-threads=on "
- "-m %s "
- "-serial file:%s/src_serial "
- "%s %s %s %s %s",
- kvm_opts ? kvm_opts : "",
- machine, machine_opts,
- memory_size, tmpfs,
- arch_opts ? arch_opts : "",
- arch_source ? arch_source : "",
- shmem_opts ? shmem_opts : "",
- args->opts_source ? args->opts_source : "",
- ignore_stderr);
- if (!args->only_target) {
- *from = qtest_init_with_env(QEMU_ENV_SRC, cmd_source);
- qtest_qmp_set_event_callback(*from,
- migrate_watch_for_events,
- &src_state);
- }
-
- cmd_target = g_strdup_printf("-accel kvm%s -accel tcg "
- "-machine %s,%s "
- "-name target,debug-threads=on "
- "-m %s "
- "-serial file:%s/dest_serial "
- "-incoming %s "
- "%s %s %s %s %s",
- kvm_opts ? kvm_opts : "",
- machine, machine_opts,
- memory_size, tmpfs, uri,
- arch_opts ? arch_opts : "",
- arch_target ? arch_target : "",
- shmem_opts ? shmem_opts : "",
- args->opts_target ? args->opts_target : "",
- ignore_stderr);
- *to = qtest_init_with_env(QEMU_ENV_DST, cmd_target);
- qtest_qmp_set_event_callback(*to,
- migrate_watch_for_events,
- &dst_state);
-
- /*
- * Remove shmem file immediately to avoid memory leak in test failed case.
- * It's valid because QEMU has already opened this file
- */
- if (args->use_shmem) {
- unlink(shmem_path);
- }
-
- /*
- * Always enable migration events. Libvirt always uses it, let's try
- * to mimic as closer as that.
- */
- migrate_set_capability(*from, "events", true);
- migrate_set_capability(*to, "events", true);
-
- return 0;
-}
-
-static void test_migrate_end(QTestState *from, QTestState *to, bool test_dest)
-{
- unsigned char dest_byte_a, dest_byte_b, dest_byte_c, dest_byte_d;
-
- qtest_quit(from);
-
- if (test_dest) {
- qtest_memread(to, start_address, &dest_byte_a, 1);
-
- /* Destination still running, wait for a byte to change */
- do {
- qtest_memread(to, start_address, &dest_byte_b, 1);
- usleep(1000 * 10);
- } while (dest_byte_a == dest_byte_b);
-
- qtest_qmp_assert_success(to, "{ 'execute' : 'stop'}");
-
- /* With it stopped, check nothing changes */
- qtest_memread(to, start_address, &dest_byte_c, 1);
- usleep(1000 * 200);
- qtest_memread(to, start_address, &dest_byte_d, 1);
- g_assert_cmpint(dest_byte_c, ==, dest_byte_d);
-
- check_guests_ram(to);
- }
-
- qtest_quit(to);
-
- cleanup("migsocket");
- cleanup("src_serial");
- cleanup("dest_serial");
- cleanup(FILE_TEST_FILENAME);
-}
-
-#ifdef CONFIG_GNUTLS
-struct TestMigrateTLSPSKData {
- char *workdir;
- char *workdiralt;
- char *pskfile;
- char *pskfilealt;
-};
-
-static void *
-test_migrate_tls_psk_start_common(QTestState *from,
- QTestState *to,
- bool mismatch)
-{
- struct TestMigrateTLSPSKData *data =
- g_new0(struct TestMigrateTLSPSKData, 1);
-
- data->workdir = g_strdup_printf("%s/tlscredspsk0", tmpfs);
- data->pskfile = g_strdup_printf("%s/%s", data->workdir,
- QCRYPTO_TLS_CREDS_PSKFILE);
- g_mkdir_with_parents(data->workdir, 0700);
- test_tls_psk_init(data->pskfile);
-
- if (mismatch) {
- data->workdiralt = g_strdup_printf("%s/tlscredspskalt0", tmpfs);
- data->pskfilealt = g_strdup_printf("%s/%s", data->workdiralt,
- QCRYPTO_TLS_CREDS_PSKFILE);
- g_mkdir_with_parents(data->workdiralt, 0700);
- test_tls_psk_init_alt(data->pskfilealt);
- }
-
- qtest_qmp_assert_success(from,
- "{ 'execute': 'object-add',"
- " 'arguments': { 'qom-type': 'tls-creds-psk',"
- " 'id': 'tlscredspsk0',"
- " 'endpoint': 'client',"
- " 'dir': %s,"
- " 'username': 'qemu'} }",
- data->workdir);
-
- qtest_qmp_assert_success(to,
- "{ 'execute': 'object-add',"
- " 'arguments': { 'qom-type': 'tls-creds-psk',"
- " 'id': 'tlscredspsk0',"
- " 'endpoint': 'server',"
- " 'dir': %s } }",
- mismatch ? data->workdiralt : data->workdir);
-
- migrate_set_parameter_str(from, "tls-creds", "tlscredspsk0");
- migrate_set_parameter_str(to, "tls-creds", "tlscredspsk0");
-
- return data;
-}
-
-static void *
-test_migrate_tls_psk_start_match(QTestState *from,
- QTestState *to)
-{
- return test_migrate_tls_psk_start_common(from, to, false);
-}
-
-static void *
-test_migrate_tls_psk_start_mismatch(QTestState *from,
- QTestState *to)
-{
- return test_migrate_tls_psk_start_common(from, to, true);
-}
-
-static void
-test_migrate_tls_psk_finish(QTestState *from,
- QTestState *to,
- void *opaque)
-{
- struct TestMigrateTLSPSKData *data = opaque;
-
- test_tls_psk_cleanup(data->pskfile);
- if (data->pskfilealt) {
- test_tls_psk_cleanup(data->pskfilealt);
- }
- rmdir(data->workdir);
- if (data->workdiralt) {
- rmdir(data->workdiralt);
- }
-
- g_free(data->workdiralt);
- g_free(data->pskfilealt);
- g_free(data->workdir);
- g_free(data->pskfile);
- g_free(data);
-}
-
-#ifdef CONFIG_TASN1
-typedef struct {
- char *workdir;
- char *keyfile;
- char *cacert;
- char *servercert;
- char *serverkey;
- char *clientcert;
- char *clientkey;
-} TestMigrateTLSX509Data;
-
-typedef struct {
- bool verifyclient;
- bool clientcert;
- bool hostileclient;
- bool authzclient;
- const char *certhostname;
- const char *certipaddr;
-} TestMigrateTLSX509;
-
-static void *
-test_migrate_tls_x509_start_common(QTestState *from,
- QTestState *to,
- TestMigrateTLSX509 *args)
-{
- TestMigrateTLSX509Data *data = g_new0(TestMigrateTLSX509Data, 1);
-
- data->workdir = g_strdup_printf("%s/tlscredsx5090", tmpfs);
- data->keyfile = g_strdup_printf("%s/key.pem", data->workdir);
-
- data->cacert = g_strdup_printf("%s/ca-cert.pem", data->workdir);
- data->serverkey = g_strdup_printf("%s/server-key.pem", data->workdir);
- data->servercert = g_strdup_printf("%s/server-cert.pem", data->workdir);
- if (args->clientcert) {
- data->clientkey = g_strdup_printf("%s/client-key.pem", data->workdir);
- data->clientcert = g_strdup_printf("%s/client-cert.pem", data->workdir);
- }
-
- g_mkdir_with_parents(data->workdir, 0700);
-
- test_tls_init(data->keyfile);
-#ifndef _WIN32
- g_assert(link(data->keyfile, data->serverkey) == 0);
-#else
- g_assert(CreateHardLink(data->serverkey, data->keyfile, NULL) != 0);
-#endif
- if (args->clientcert) {
-#ifndef _WIN32
- g_assert(link(data->keyfile, data->clientkey) == 0);
-#else
- g_assert(CreateHardLink(data->clientkey, data->keyfile, NULL) != 0);
-#endif
- }
-
- TLS_ROOT_REQ_SIMPLE(cacertreq, data->cacert);
- if (args->clientcert) {
- TLS_CERT_REQ_SIMPLE_CLIENT(servercertreq, cacertreq,
- args->hostileclient ?
- QCRYPTO_TLS_TEST_CLIENT_HOSTILE_NAME :
- QCRYPTO_TLS_TEST_CLIENT_NAME,
- data->clientcert);
- }
-
- TLS_CERT_REQ_SIMPLE_SERVER(clientcertreq, cacertreq,
- data->servercert,
- args->certhostname,
- args->certipaddr);
-
- qtest_qmp_assert_success(from,
- "{ 'execute': 'object-add',"
- " 'arguments': { 'qom-type': 'tls-creds-x509',"
- " 'id': 'tlscredsx509client0',"
- " 'endpoint': 'client',"
- " 'dir': %s,"
- " 'sanity-check': true,"
- " 'verify-peer': true} }",
- data->workdir);
- migrate_set_parameter_str(from, "tls-creds", "tlscredsx509client0");
- if (args->certhostname) {
- migrate_set_parameter_str(from, "tls-hostname", args->certhostname);
- }
-
- qtest_qmp_assert_success(to,
- "{ 'execute': 'object-add',"
- " 'arguments': { 'qom-type': 'tls-creds-x509',"
- " 'id': 'tlscredsx509server0',"
- " 'endpoint': 'server',"
- " 'dir': %s,"
- " 'sanity-check': true,"
- " 'verify-peer': %i} }",
- data->workdir, args->verifyclient);
- migrate_set_parameter_str(to, "tls-creds", "tlscredsx509server0");
-
- if (args->authzclient) {
- qtest_qmp_assert_success(to,
- "{ 'execute': 'object-add',"
- " 'arguments': { 'qom-type': 'authz-simple',"
- " 'id': 'tlsauthz0',"
- " 'identity': %s} }",
- "CN=" QCRYPTO_TLS_TEST_CLIENT_NAME);
- migrate_set_parameter_str(to, "tls-authz", "tlsauthz0");
- }
-
- return data;
-}
-
-/*
- * The normal case: match server's cert hostname against
- * whatever host we were telling QEMU to connect to (if any)
- */
-static void *
-test_migrate_tls_x509_start_default_host(QTestState *from,
- QTestState *to)
-{
- TestMigrateTLSX509 args = {
- .verifyclient = true,
- .clientcert = true,
- .certipaddr = "127.0.0.1"
- };
- return test_migrate_tls_x509_start_common(from, to, &args);
-}
-
-/*
- * The unusual case: the server's cert is different from
- * the address we're telling QEMU to connect to (if any),
- * so we must give QEMU an explicit hostname to validate
- */
-static void *
-test_migrate_tls_x509_start_override_host(QTestState *from,
- QTestState *to)
-{
- TestMigrateTLSX509 args = {
- .verifyclient = true,
- .clientcert = true,
- .certhostname = "qemu.org",
- };
- return test_migrate_tls_x509_start_common(from, to, &args);
-}
-
-/*
- * The unusual case: the server's cert is different from
- * the address we're telling QEMU to connect to, and so we
- * expect the client to reject the server
- */
-static void *
-test_migrate_tls_x509_start_mismatch_host(QTestState *from,
- QTestState *to)
-{
- TestMigrateTLSX509 args = {
- .verifyclient = true,
- .clientcert = true,
- .certipaddr = "10.0.0.1",
- };
- return test_migrate_tls_x509_start_common(from, to, &args);
-}
-
-static void *
-test_migrate_tls_x509_start_friendly_client(QTestState *from,
- QTestState *to)
-{
- TestMigrateTLSX509 args = {
- .verifyclient = true,
- .clientcert = true,
- .authzclient = true,
- .certipaddr = "127.0.0.1",
- };
- return test_migrate_tls_x509_start_common(from, to, &args);
-}
-
-static void *
-test_migrate_tls_x509_start_hostile_client(QTestState *from,
- QTestState *to)
-{
- TestMigrateTLSX509 args = {
- .verifyclient = true,
- .clientcert = true,
- .hostileclient = true,
- .authzclient = true,
- .certipaddr = "127.0.0.1",
- };
- return test_migrate_tls_x509_start_common(from, to, &args);
-}
-
-/*
- * The case with no client certificate presented,
- * and no server verification
- */
-static void *
-test_migrate_tls_x509_start_allow_anon_client(QTestState *from,
- QTestState *to)
-{
- TestMigrateTLSX509 args = {
- .certipaddr = "127.0.0.1",
- };
- return test_migrate_tls_x509_start_common(from, to, &args);
-}
-
-/*
- * The case with no client certificate presented,
- * and server verification rejecting
- */
-static void *
-test_migrate_tls_x509_start_reject_anon_client(QTestState *from,
- QTestState *to)
-{
- TestMigrateTLSX509 args = {
- .verifyclient = true,
- .certipaddr = "127.0.0.1",
- };
- return test_migrate_tls_x509_start_common(from, to, &args);
-}
-
-static void
-test_migrate_tls_x509_finish(QTestState *from,
- QTestState *to,
- void *opaque)
-{
- TestMigrateTLSX509Data *data = opaque;
-
- test_tls_cleanup(data->keyfile);
- g_free(data->keyfile);
-
- unlink(data->cacert);
- g_free(data->cacert);
- unlink(data->servercert);
- g_free(data->servercert);
- unlink(data->serverkey);
- g_free(data->serverkey);
-
- if (data->clientcert) {
- unlink(data->clientcert);
- g_free(data->clientcert);
- }
- if (data->clientkey) {
- unlink(data->clientkey);
- g_free(data->clientkey);
- }
-
- rmdir(data->workdir);
- g_free(data->workdir);
-
- g_free(data);
-}
-#endif /* CONFIG_TASN1 */
-#endif /* CONFIG_GNUTLS */
-
-static int migrate_postcopy_prepare(QTestState **from_ptr,
- QTestState **to_ptr,
- MigrateCommon *args)
-{
- QTestState *from, *to;
-
- if (test_migrate_start(&from, &to, "defer", &args->start)) {
- return -1;
- }
-
- if (args->start_hook) {
- args->postcopy_data = args->start_hook(from, to);
- }
-
- migrate_set_capability(from, "postcopy-ram", true);
- migrate_set_capability(to, "postcopy-ram", true);
- migrate_set_capability(to, "postcopy-blocktime", true);
-
- if (args->postcopy_preempt) {
- migrate_set_capability(from, "postcopy-preempt", true);
- migrate_set_capability(to, "postcopy-preempt", true);
- }
-
- migrate_ensure_non_converge(from);
-
- migrate_prepare_for_dirty_mem(from);
- qtest_qmp_assert_success(to, "{ 'execute': 'migrate-incoming',"
- " 'arguments': { "
- " 'channels': [ { 'channel-type': 'main',"
- " 'addr': { 'transport': 'socket',"
- " 'type': 'inet',"
- " 'host': '127.0.0.1',"
- " 'port': '0' } } ] } }");
-
- /* Wait for the first serial output from the source */
- wait_for_serial("src_serial");
- wait_for_suspend(from, &src_state);
-
- migrate_qmp(from, to, NULL, NULL, "{}");
-
- migrate_wait_for_dirty_mem(from, to);
-
- *from_ptr = from;
- *to_ptr = to;
-
- return 0;
-}
-
-static void migrate_postcopy_complete(QTestState *from, QTestState *to,
- MigrateCommon *args)
-{
- wait_for_migration_complete(from);
-
- if (args->start.suspend_me) {
- /* wakeup succeeds only if guest is suspended */
- qtest_qmp_assert_success(to, "{'execute': 'system_wakeup'}");
- }
-
- /* Make sure we get at least one "B" on destination */
- wait_for_serial("dest_serial");
-
- if (uffd_feature_thread_id) {
- read_blocktime(to);
- }
-
- if (args->finish_hook) {
- args->finish_hook(from, to, args->postcopy_data);
- args->postcopy_data = NULL;
- }
-
- test_migrate_end(from, to, true);
-}
-
-static void test_postcopy_common(MigrateCommon *args)
-{
- QTestState *from, *to;
-
- if (migrate_postcopy_prepare(&from, &to, args)) {
- return;
- }
- migrate_postcopy_start(from, to);
- migrate_postcopy_complete(from, to, args);
-}
-
-static void test_postcopy(void)
-{
- MigrateCommon args = { };
-
- test_postcopy_common(&args);
-}
-
-static void test_postcopy_suspend(void)
-{
- MigrateCommon args = {
- .start.suspend_me = true,
- };
-
- test_postcopy_common(&args);
-}
-
-static void test_postcopy_preempt(void)
-{
- MigrateCommon args = {
- .postcopy_preempt = true,
- };
-
- test_postcopy_common(&args);
-}
-
-#ifdef CONFIG_GNUTLS
-static void test_postcopy_tls_psk(void)
-{
- MigrateCommon args = {
- .start_hook = test_migrate_tls_psk_start_match,
- .finish_hook = test_migrate_tls_psk_finish,
- };
-
- test_postcopy_common(&args);
-}
-
-static void test_postcopy_preempt_tls_psk(void)
-{
- MigrateCommon args = {
- .postcopy_preempt = true,
- .start_hook = test_migrate_tls_psk_start_match,
- .finish_hook = test_migrate_tls_psk_finish,
- };
-
- test_postcopy_common(&args);
-}
-#endif
-
-static void wait_for_postcopy_status(QTestState *one, const char *status)
-{
- wait_for_migration_status(one, status,
- (const char * []) { "failed", "active",
- "completed", NULL });
-}
-
-static void postcopy_recover_fail(QTestState *from, QTestState *to,
- PostcopyRecoveryFailStage stage)
-{
-#ifndef _WIN32
- bool fail_early = (stage == POSTCOPY_FAIL_CHANNEL_ESTABLISH);
- int ret, pair1[2], pair2[2];
- char c;
-
- g_assert(stage > POSTCOPY_FAIL_NONE && stage < POSTCOPY_FAIL_MAX);
-
- /* Create two unrelated socketpairs */
- ret = qemu_socketpair(PF_LOCAL, SOCK_STREAM, 0, pair1);
- g_assert_cmpint(ret, ==, 0);
-
- ret = qemu_socketpair(PF_LOCAL, SOCK_STREAM, 0, pair2);
- g_assert_cmpint(ret, ==, 0);
-
- /*
- * Give the guests unpaired ends of the sockets, so they'll all blocked
- * at reading. This mimics a wrong channel established.
- */
- qtest_qmp_fds_assert_success(from, &pair1[0], 1,
- "{ 'execute': 'getfd',"
- " 'arguments': { 'fdname': 'fd-mig' }}");
- qtest_qmp_fds_assert_success(to, &pair2[0], 1,
- "{ 'execute': 'getfd',"
- " 'arguments': { 'fdname': 'fd-mig' }}");
-
- /*
- * Write the 1st byte as QEMU_VM_COMMAND (0x8) for the dest socket, to
- * emulate the 1st byte of a real recovery, but stops from there to
- * keep dest QEMU in RECOVER. This is needed so that we can kick off
- * the recover process on dest QEMU (by triggering the G_IO_IN event).
- *
- * NOTE: this trick is not needed on src QEMUs, because src doesn't
- * rely on an pre-existing G_IO_IN event, so it will always trigger the
- * upcoming recovery anyway even if it can read nothing.
- */
-#define QEMU_VM_COMMAND 0x08
- c = QEMU_VM_COMMAND;
- ret = send(pair2[1], &c, 1, 0);
- g_assert_cmpint(ret, ==, 1);
-
- if (stage == POSTCOPY_FAIL_CHANNEL_ESTABLISH) {
- /*
- * This will make src QEMU to fail at an early stage when trying to
- * resume later, where it shouldn't reach RECOVER stage at all.
- */
- close(pair1[1]);
- }
-
- migrate_recover(to, "fd:fd-mig");
- migrate_qmp(from, to, "fd:fd-mig", NULL, "{'resume': true}");
-
- /*
- * Source QEMU has an extra RECOVER_SETUP phase, dest doesn't have it.
- * Make sure it appears along the way.
- */
- migration_event_wait(from, "postcopy-recover-setup");
-
- if (fail_early) {
- /*
- * When fails at reconnection, src QEMU will automatically goes
- * back to PAUSED state. Making sure there is an event in this
- * case: Libvirt relies on this to detect early reconnection
- * errors.
- */
- migration_event_wait(from, "postcopy-paused");
- } else {
- /*
- * We want to test "fail later" at RECOVER stage here. Make sure
- * both QEMU instances will go into RECOVER stage first, then test
- * kicking them out using migrate-pause.
- *
- * Explicitly check the RECOVER event on src, that's what Libvirt
- * relies on, rather than polling.
- */
- migration_event_wait(from, "postcopy-recover");
- wait_for_postcopy_status(from, "postcopy-recover");
-
- /* Need an explicit kick on src QEMU in this case */
- migrate_pause(from);
- }
-
- /*
- * For all failure cases, we'll reach such states on both sides now.
- * Check them.
- */
- wait_for_postcopy_status(from, "postcopy-paused");
- wait_for_postcopy_status(to, "postcopy-recover");
-
- /*
- * Kick dest QEMU out too. This is normally not needed in reality
- * because when the channel is shutdown it should also happen on src.
- * However here we used separate socket pairs so we need to do that
- * explicitly.
- */
- migrate_pause(to);
- wait_for_postcopy_status(to, "postcopy-paused");
-
- close(pair1[0]);
- close(pair2[0]);
- close(pair2[1]);
-
- if (stage != POSTCOPY_FAIL_CHANNEL_ESTABLISH) {
- close(pair1[1]);
- }
-#endif
-}
-
-static void test_postcopy_recovery_common(MigrateCommon *args)
-{
- QTestState *from, *to;
- g_autofree char *uri = NULL;
-
- /* Always hide errors for postcopy recover tests since they're expected */
- args->start.hide_stderr = true;
-
- if (migrate_postcopy_prepare(&from, &to, args)) {
- return;
- }
-
- /* Turn postcopy speed down, 4K/s is slow enough on any machines */
- migrate_set_parameter_int(from, "max-postcopy-bandwidth", 4096);
-
- /* Now we start the postcopy */
- migrate_postcopy_start(from, to);
-
- /*
- * Wait until postcopy is really started; we can only run the
- * migrate-pause command during a postcopy
- */
- wait_for_migration_status(from, "postcopy-active", NULL);
-
- /*
- * Manually stop the postcopy migration. This emulates a network
- * failure with the migration socket
- */
- migrate_pause(from);
-
- /*
- * Wait for destination side to reach postcopy-paused state. The
- * migrate-recover command can only succeed if destination machine
- * is in the paused state
- */
- wait_for_postcopy_status(to, "postcopy-paused");
- wait_for_postcopy_status(from, "postcopy-paused");
-
- if (args->postcopy_recovery_fail_stage) {
- /*
- * Test when a wrong socket specified for recover, and then the
- * ability to kick it out, and continue with a correct socket.
- */
- postcopy_recover_fail(from, to, args->postcopy_recovery_fail_stage);
- /* continue with a good recovery */
- }
-
- /*
- * Create a new socket to emulate a new channel that is different
- * from the broken migration channel; tell the destination to
- * listen to the new port
- */
- uri = g_strdup_printf("unix:%s/migsocket-recover", tmpfs);
- migrate_recover(to, uri);
-
- /*
- * Try to rebuild the migration channel using the resume flag and
- * the newly created channel
- */
- migrate_qmp(from, to, uri, NULL, "{'resume': true}");
-
- /* Restore the postcopy bandwidth to unlimited */
- migrate_set_parameter_int(from, "max-postcopy-bandwidth", 0);
-
- migrate_postcopy_complete(from, to, args);
-}
-
-static void test_postcopy_recovery(void)
-{
- MigrateCommon args = { };
-
- test_postcopy_recovery_common(&args);
-}
-
-static void test_postcopy_recovery_fail_handshake(void)
-{
- MigrateCommon args = {
- .postcopy_recovery_fail_stage = POSTCOPY_FAIL_RECOVERY,
- };
-
- test_postcopy_recovery_common(&args);
-}
-
-static void test_postcopy_recovery_fail_reconnect(void)
-{
- MigrateCommon args = {
- .postcopy_recovery_fail_stage = POSTCOPY_FAIL_CHANNEL_ESTABLISH,
- };
-
- test_postcopy_recovery_common(&args);
-}
-
-#ifdef CONFIG_GNUTLS
-static void test_postcopy_recovery_tls_psk(void)
-{
- MigrateCommon args = {
- .start_hook = test_migrate_tls_psk_start_match,
- .finish_hook = test_migrate_tls_psk_finish,
- };
-
- test_postcopy_recovery_common(&args);
-}
-#endif
-
-static void test_postcopy_preempt_recovery(void)
-{
- MigrateCommon args = {
- .postcopy_preempt = true,
- };
-
- test_postcopy_recovery_common(&args);
-}
-
-#ifdef CONFIG_GNUTLS
-/* This contains preempt+recovery+tls test altogether */
-static void test_postcopy_preempt_all(void)
-{
- MigrateCommon args = {
- .postcopy_preempt = true,
- .start_hook = test_migrate_tls_psk_start_match,
- .finish_hook = test_migrate_tls_psk_finish,
- };
-
- test_postcopy_recovery_common(&args);
-}
-
-#endif
-
-static void test_baddest(void)
-{
- MigrateStart args = {
- .hide_stderr = true
- };
- QTestState *from, *to;
-
- if (test_migrate_start(&from, &to, "tcp:127.0.0.1:0", &args)) {
- return;
- }
- migrate_qmp(from, to, "tcp:127.0.0.1:0", NULL, "{}");
- wait_for_migration_fail(from, false);
- test_migrate_end(from, to, false);
-}
-
-#ifndef _WIN32
-static void test_analyze_script(void)
-{
- MigrateStart args = {
- .opts_source = "-uuid 11111111-1111-1111-1111-111111111111",
- };
- QTestState *from, *to;
- g_autofree char *uri = NULL;
- g_autofree char *file = NULL;
- int pid, wstatus;
- const char *python = g_getenv("PYTHON");
-
- if (!python) {
- g_test_skip("PYTHON variable not set");
- return;
- }
-
- /* dummy url */
- if (test_migrate_start(&from, &to, "tcp:127.0.0.1:0", &args)) {
- return;
- }
-
- /*
- * Setting these two capabilities causes the "configuration"
- * vmstate to include subsections for them. The script needs to
- * parse those subsections properly.
- */
- migrate_set_capability(from, "validate-uuid", true);
- migrate_set_capability(from, "x-ignore-shared", true);
-
- file = g_strdup_printf("%s/migfile", tmpfs);
- uri = g_strdup_printf("exec:cat > %s", file);
-
- migrate_ensure_converge(from);
- migrate_qmp(from, to, uri, NULL, "{}");
- wait_for_migration_complete(from);
-
- pid = fork();
- if (!pid) {
- close(1);
- open("/dev/null", O_WRONLY);
- execl(python, python, ANALYZE_SCRIPT, "-f", file, NULL);
- g_assert_not_reached();
- }
-
- g_assert(waitpid(pid, &wstatus, 0) == pid);
- if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus) != 0) {
- g_test_message("Failed to analyze the migration stream");
- g_test_fail();
- }
- test_migrate_end(from, to, false);
- cleanup("migfile");
-}
-
-static void test_vmstate_checker_script(void)
-{
- g_autofree gchar *cmd_src = NULL;
- g_autofree gchar *cmd_dst = NULL;
- g_autofree gchar *vmstate_src = NULL;
- g_autofree gchar *vmstate_dst = NULL;
- const char *machine_alias, *machine_opts = "";
- g_autofree char *machine = NULL;
- const char *arch = qtest_get_arch();
- int pid, wstatus;
- const char *python = g_getenv("PYTHON");
-
- if (!getenv(QEMU_ENV_SRC) && !getenv(QEMU_ENV_DST)) {
- g_test_skip("Test needs two different QEMU versions");
- return;
- }
-
- if (!python) {
- g_test_skip("PYTHON variable not set");
- return;
- }
-
- if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
- if (g_str_equal(arch, "i386")) {
- machine_alias = "pc";
- } else {
- machine_alias = "q35";
- }
- } else if (g_str_equal(arch, "s390x")) {
- machine_alias = "s390-ccw-virtio";
- } else if (strcmp(arch, "ppc64") == 0) {
- machine_alias = "pseries";
- } else if (strcmp(arch, "aarch64") == 0) {
- machine_alias = "virt";
- } else {
- g_assert_not_reached();
- }
-
- if (!qtest_has_machine(machine_alias)) {
- g_autofree char *msg = g_strdup_printf("machine %s not supported", machine_alias);
- g_test_skip(msg);
- return;
- }
-
- machine = resolve_machine_version(machine_alias, QEMU_ENV_SRC,
- QEMU_ENV_DST);
-
- vmstate_src = g_strdup_printf("%s/vmstate-src", tmpfs);
- vmstate_dst = g_strdup_printf("%s/vmstate-dst", tmpfs);
-
- cmd_dst = g_strdup_printf("-machine %s,%s -dump-vmstate %s",
- machine, machine_opts, vmstate_dst);
- cmd_src = g_strdup_printf("-machine %s,%s -dump-vmstate %s",
- machine, machine_opts, vmstate_src);
-
- qtest_init_with_env_no_handshake(QEMU_ENV_SRC, cmd_src);
- qtest_init_with_env_no_handshake(QEMU_ENV_DST, cmd_dst);
-
- pid = fork();
- if (!pid) {
- close(1);
- open("/dev/null", O_WRONLY);
- execl(python, python, VMSTATE_CHECKER_SCRIPT,
- "-s", vmstate_src,
- "-d", vmstate_dst,
- NULL);
- g_assert_not_reached();
- }
-
- g_assert(waitpid(pid, &wstatus, 0) == pid);
- if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus) != 0) {
- g_test_message("Failed to run vmstate-static-checker.py");
- g_test_fail();
- }
-
- cleanup("vmstate-src");
- cleanup("vmstate-dst");
-}
-#endif
-
-static void test_precopy_common(MigrateCommon *args)
-{
- QTestState *from, *to;
- void *data_hook = NULL;
-
- if (test_migrate_start(&from, &to, args->listen_uri, &args->start)) {
- return;
- }
-
- if (args->start_hook) {
- data_hook = args->start_hook(from, to);
- }
-
- /* Wait for the first serial output from the source */
- if (args->result == MIG_TEST_SUCCEED) {
- wait_for_serial("src_serial");
- wait_for_suspend(from, &src_state);
- }
-
- if (args->live) {
- migrate_ensure_non_converge(from);
- migrate_prepare_for_dirty_mem(from);
- } else {
- /*
- * Testing non-live migration, we allow it to run at
- * full speed to ensure short test case duration.
- * For tests expected to fail, we don't need to
- * change anything.
- */
- if (args->result == MIG_TEST_SUCCEED) {
- qtest_qmp_assert_success(from, "{ 'execute' : 'stop'}");
- wait_for_stop(from, &src_state);
- migrate_ensure_converge(from);
- }
- }
-
- if (args->result == MIG_TEST_QMP_ERROR) {
- migrate_qmp_fail(from, args->connect_uri, args->connect_channels, "{}");
- goto finish;
- }
-
- migrate_qmp(from, to, args->connect_uri, args->connect_channels, "{}");
-
- if (args->result != MIG_TEST_SUCCEED) {
- bool allow_active = args->result == MIG_TEST_FAIL;
- wait_for_migration_fail(from, allow_active);
-
- if (args->result == MIG_TEST_FAIL_DEST_QUIT_ERR) {
- qtest_set_expected_status(to, EXIT_FAILURE);
- }
- } else {
- if (args->live) {
- /*
- * For initial iteration(s) we must do a full pass,
- * but for the final iteration, we need only wait
- * for some dirty mem before switching to converge
- */
- while (args->iterations > 1) {
- wait_for_migration_pass(from);
- args->iterations--;
- }
- migrate_wait_for_dirty_mem(from, to);
-
- migrate_ensure_converge(from);
-
- /*
- * We do this first, as it has a timeout to stop us
- * hanging forever if migration didn't converge
- */
- wait_for_migration_complete(from);
-
- wait_for_stop(from, &src_state);
-
- } else {
- wait_for_migration_complete(from);
- /*
- * Must wait for dst to finish reading all incoming
- * data on the socket before issuing 'cont' otherwise
- * it'll be ignored
- */
- wait_for_migration_complete(to);
-
- qtest_qmp_assert_success(to, "{ 'execute' : 'cont'}");
- }
-
- wait_for_resume(to, &dst_state);
-
- if (args->start.suspend_me) {
- /* wakeup succeeds only if guest is suspended */
- qtest_qmp_assert_success(to, "{'execute': 'system_wakeup'}");
- }
-
- wait_for_serial("dest_serial");
- }
-
-finish:
- if (args->finish_hook) {
- args->finish_hook(from, to, data_hook);
- }
-
- test_migrate_end(from, to, args->result == MIG_TEST_SUCCEED);
-}
-
-static void file_dirty_offset_region(void)
-{
- g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
- size_t size = FILE_TEST_OFFSET;
- g_autofree char *data = g_new0(char, size);
-
- memset(data, FILE_TEST_MARKER, size);
- g_assert(g_file_set_contents(path, data, size, NULL));
-}
-
-static void file_check_offset_region(void)
-{
- g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
- size_t size = FILE_TEST_OFFSET;
- g_autofree char *expected = g_new0(char, size);
- g_autofree char *actual = NULL;
- uint64_t *stream_start;
-
- /*
- * Ensure the skipped offset region's data has not been touched
- * and the migration stream starts at the right place.
- */
-
- memset(expected, FILE_TEST_MARKER, size);
-
- g_assert(g_file_get_contents(path, &actual, NULL, NULL));
- g_assert(!memcmp(actual, expected, size));
-
- stream_start = (uint64_t *)(actual + size);
- g_assert_cmpint(cpu_to_be64(*stream_start) >> 32, ==, QEMU_VM_FILE_MAGIC);
-}
-
-static void test_file_common(MigrateCommon *args, bool stop_src)
-{
- QTestState *from, *to;
- void *data_hook = NULL;
- bool check_offset = false;
-
- if (test_migrate_start(&from, &to, args->listen_uri, &args->start)) {
- return;
- }
-
- /*
- * File migration is never live. We can keep the source VM running
- * during migration, but the destination will not be running
- * concurrently.
- */
- g_assert_false(args->live);
-
- if (g_strrstr(args->connect_uri, "offset=")) {
- check_offset = true;
- /*
- * This comes before the start_hook because it's equivalent to
- * a management application creating the file and writing to
- * it so hooks should expect the file to be already present.
- */
- file_dirty_offset_region();
- }
-
- if (args->start_hook) {
- data_hook = args->start_hook(from, to);
- }
-
- migrate_ensure_converge(from);
- wait_for_serial("src_serial");
-
- if (stop_src) {
- qtest_qmp_assert_success(from, "{ 'execute' : 'stop'}");
- wait_for_stop(from, &src_state);
- }
-
- if (args->result == MIG_TEST_QMP_ERROR) {
- migrate_qmp_fail(from, args->connect_uri, NULL, "{}");
- goto finish;
- }
-
- migrate_qmp(from, to, args->connect_uri, NULL, "{}");
- wait_for_migration_complete(from);
-
- /*
- * We need to wait for the source to finish before starting the
- * destination.
- */
- migrate_incoming_qmp(to, args->connect_uri, "{}");
- wait_for_migration_complete(to);
-
- if (stop_src) {
- qtest_qmp_assert_success(to, "{ 'execute' : 'cont'}");
- }
- wait_for_resume(to, &dst_state);
-
- wait_for_serial("dest_serial");
-
- if (check_offset) {
- file_check_offset_region();
- }
-
-finish:
- if (args->finish_hook) {
- args->finish_hook(from, to, data_hook);
- }
-
- test_migrate_end(from, to, args->result == MIG_TEST_SUCCEED);
-}
-
-static void test_precopy_unix_plain(void)
-{
- g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
- MigrateCommon args = {
- .listen_uri = uri,
- .connect_uri = uri,
- /*
- * The simplest use case of precopy, covering smoke tests of
- * get-dirty-log dirty tracking.
- */
- .live = true,
- };
-
- test_precopy_common(&args);
-}
-
-static void test_precopy_unix_suspend_live(void)
-{
- g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
- MigrateCommon args = {
- .listen_uri = uri,
- .connect_uri = uri,
- /*
- * despite being live, the test is fast because the src
- * suspends immediately.
- */
- .live = true,
- .start.suspend_me = true,
- };
-
- test_precopy_common(&args);
-}
-
-static void test_precopy_unix_suspend_notlive(void)
-{
- g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
- MigrateCommon args = {
- .listen_uri = uri,
- .connect_uri = uri,
- .start.suspend_me = true,
- };
-
- test_precopy_common(&args);
-}
-
-static void test_precopy_unix_dirty_ring(void)
-{
- g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
- MigrateCommon args = {
- .start = {
- .use_dirty_ring = true,
- },
- .listen_uri = uri,
- .connect_uri = uri,
- /*
- * Besides the precopy/unix basic test, cover dirty ring interface
- * rather than get-dirty-log.
- */
- .live = true,
- };
-
- test_precopy_common(&args);
-}
-
-#ifdef CONFIG_GNUTLS
-static void test_precopy_unix_tls_psk(void)
-{
- g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
- MigrateCommon args = {
- .connect_uri = uri,
- .listen_uri = uri,
- .start_hook = test_migrate_tls_psk_start_match,
- .finish_hook = test_migrate_tls_psk_finish,
- };
-
- test_precopy_common(&args);
-}
-
-#ifdef CONFIG_TASN1
-static void test_precopy_unix_tls_x509_default_host(void)
-{
- g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
- MigrateCommon args = {
- .start = {
- .hide_stderr = true,
- },
- .connect_uri = uri,
- .listen_uri = uri,
- .start_hook = test_migrate_tls_x509_start_default_host,
- .finish_hook = test_migrate_tls_x509_finish,
- .result = MIG_TEST_FAIL_DEST_QUIT_ERR,
- };
-
- test_precopy_common(&args);
-}
-
-static void test_precopy_unix_tls_x509_override_host(void)
-{
- g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
- MigrateCommon args = {
- .connect_uri = uri,
- .listen_uri = uri,
- .start_hook = test_migrate_tls_x509_start_override_host,
- .finish_hook = test_migrate_tls_x509_finish,
- };
-
- test_precopy_common(&args);
-}
-#endif /* CONFIG_TASN1 */
-#endif /* CONFIG_GNUTLS */
-
-#if 0
-/* Currently upset on aarch64 TCG */
-static void test_ignore_shared(void)
-{
- g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
- QTestState *from, *to;
-
- if (test_migrate_start(&from, &to, uri, false, true, NULL, NULL)) {
- return;
- }
-
- migrate_ensure_non_converge(from);
- migrate_prepare_for_dirty_mem(from);
-
- migrate_set_capability(from, "x-ignore-shared", true);
- migrate_set_capability(to, "x-ignore-shared", true);
-
- /* Wait for the first serial output from the source */
- wait_for_serial("src_serial");
-
- migrate_qmp(from, to, uri, NULL, "{}");
-
- migrate_wait_for_dirty_mem(from, to);
-
- wait_for_stop(from, &src_state);
-
- qtest_qmp_eventwait(to, "RESUME");
-
- wait_for_serial("dest_serial");
- wait_for_migration_complete(from);
-
- /* Check whether shared RAM has been really skipped */
- g_assert_cmpint(read_ram_property_int(from, "transferred"), <, 1024 * 1024);
-
- test_migrate_end(from, to, true);
-}
-#endif
-
-static void *
-test_migrate_xbzrle_start(QTestState *from,
- QTestState *to)
-{
- migrate_set_parameter_int(from, "xbzrle-cache-size", 33554432);
-
- migrate_set_capability(from, "xbzrle", true);
- migrate_set_capability(to, "xbzrle", true);
-
- return NULL;
-}
-
-static void test_precopy_unix_xbzrle(void)
-{
- g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
- MigrateCommon args = {
- .connect_uri = uri,
- .listen_uri = uri,
- .start_hook = test_migrate_xbzrle_start,
- .iterations = 2,
- /*
- * XBZRLE needs pages to be modified when doing the 2nd+ round
- * iteration to have real data pushed to the stream.
- */
- .live = true,
- };
-
- test_precopy_common(&args);
-}
-
-static void test_precopy_file(void)
-{
- g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
- FILE_TEST_FILENAME);
- MigrateCommon args = {
- .connect_uri = uri,
- .listen_uri = "defer",
- };
-
- test_file_common(&args, true);
-}
-
-#ifndef _WIN32
-static void fdset_add_fds(QTestState *qts, const char *file, int flags,
- int num_fds, bool direct_io)
-{
- for (int i = 0; i < num_fds; i++) {
- int fd;
-
-#ifdef O_DIRECT
- /* only secondary channels can use direct-io */
- if (direct_io && i != 0) {
- flags |= O_DIRECT;
- }
-#endif
-
- fd = open(file, flags, 0660);
- assert(fd != -1);
-
- qtest_qmp_fds_assert_success(qts, &fd, 1, "{'execute': 'add-fd', "
- "'arguments': {'fdset-id': 1}}");
- close(fd);
- }
-}
-
-static void *file_offset_fdset_start_hook(QTestState *from, QTestState *to)
-{
- g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
-
- fdset_add_fds(from, file, O_WRONLY, 1, false);
- fdset_add_fds(to, file, O_RDONLY, 1, false);
-
- return NULL;
-}
-
-static void test_precopy_file_offset_fdset(void)
-{
- g_autofree char *uri = g_strdup_printf("file:/dev/fdset/1,offset=%d",
- FILE_TEST_OFFSET);
- MigrateCommon args = {
- .connect_uri = uri,
- .listen_uri = "defer",
- .start_hook = file_offset_fdset_start_hook,
- };
-
- test_file_common(&args, false);
-}
-#endif
-
-static void test_precopy_file_offset(void)
-{
- g_autofree char *uri = g_strdup_printf("file:%s/%s,offset=%d", tmpfs,
- FILE_TEST_FILENAME,
- FILE_TEST_OFFSET);
- MigrateCommon args = {
- .connect_uri = uri,
- .listen_uri = "defer",
- };
-
- test_file_common(&args, false);
-}
-
-static void test_precopy_file_offset_bad(void)
-{
- /* using a value not supported by qemu_strtosz() */
- g_autofree char *uri = g_strdup_printf("file:%s/%s,offset=0x20M",
- tmpfs, FILE_TEST_FILENAME);
- MigrateCommon args = {
- .connect_uri = uri,
- .listen_uri = "defer",
- .result = MIG_TEST_QMP_ERROR,
- };
-
- test_file_common(&args, false);
-}
-
-static void *test_mode_reboot_start(QTestState *from, QTestState *to)
-{
- migrate_set_parameter_str(from, "mode", "cpr-reboot");
- migrate_set_parameter_str(to, "mode", "cpr-reboot");
-
- migrate_set_capability(from, "x-ignore-shared", true);
- migrate_set_capability(to, "x-ignore-shared", true);
-
- return NULL;
-}
-
-static void *migrate_mapped_ram_start(QTestState *from, QTestState *to)
-{
- migrate_set_capability(from, "mapped-ram", true);
- migrate_set_capability(to, "mapped-ram", true);
-
- return NULL;
-}
-
-static void test_mode_reboot(void)
-{
- g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
- FILE_TEST_FILENAME);
- MigrateCommon args = {
- .start.use_shmem = true,
- .connect_uri = uri,
- .listen_uri = "defer",
- .start_hook = test_mode_reboot_start
- };
-
- test_file_common(&args, true);
-}
-
-static void test_precopy_file_mapped_ram_live(void)
-{
- g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
- FILE_TEST_FILENAME);
- MigrateCommon args = {
- .connect_uri = uri,
- .listen_uri = "defer",
- .start_hook = migrate_mapped_ram_start,
- };
-
- test_file_common(&args, false);
-}
-
-static void test_precopy_file_mapped_ram(void)
-{
- g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
- FILE_TEST_FILENAME);
- MigrateCommon args = {
- .connect_uri = uri,
- .listen_uri = "defer",
- .start_hook = migrate_mapped_ram_start,
- };
-
- test_file_common(&args, true);
-}
-
-static void *migrate_multifd_mapped_ram_start(QTestState *from, QTestState *to)
-{
- migrate_mapped_ram_start(from, to);
-
- migrate_set_parameter_int(from, "multifd-channels", 4);
- migrate_set_parameter_int(to, "multifd-channels", 4);
-
- migrate_set_capability(from, "multifd", true);
- migrate_set_capability(to, "multifd", true);
-
- return NULL;
-}
-
-static void test_multifd_file_mapped_ram_live(void)
-{
- g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
- FILE_TEST_FILENAME);
- MigrateCommon args = {
- .connect_uri = uri,
- .listen_uri = "defer",
- .start_hook = migrate_multifd_mapped_ram_start,
- };
-
- test_file_common(&args, false);
-}
-
-static void test_multifd_file_mapped_ram(void)
-{
- g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
- FILE_TEST_FILENAME);
- MigrateCommon args = {
- .connect_uri = uri,
- .listen_uri = "defer",
- .start_hook = migrate_multifd_mapped_ram_start,
- };
-
- test_file_common(&args, true);
-}
-
-static void *multifd_mapped_ram_dio_start(QTestState *from, QTestState *to)
-{
- migrate_multifd_mapped_ram_start(from, to);
-
- migrate_set_parameter_bool(from, "direct-io", true);
- migrate_set_parameter_bool(to, "direct-io", true);
-
- return NULL;
-}
-
-static void test_multifd_file_mapped_ram_dio(void)
-{
- g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
- FILE_TEST_FILENAME);
- MigrateCommon args = {
- .connect_uri = uri,
- .listen_uri = "defer",
- .start_hook = multifd_mapped_ram_dio_start,
- };
-
- if (!probe_o_direct_support(tmpfs)) {
- g_test_skip("Filesystem does not support O_DIRECT");
- return;
- }
-
- test_file_common(&args, true);
-}
-
-#ifndef _WIN32
-static void multifd_mapped_ram_fdset_end(QTestState *from, QTestState *to,
- void *opaque)
-{
- QDict *resp;
- QList *fdsets;
-
- /*
- * Remove the fdsets after migration, otherwise a second migration
- * would fail due fdset reuse.
- */
- qtest_qmp_assert_success(from, "{'execute': 'remove-fd', "
- "'arguments': { 'fdset-id': 1}}");
-
- /*
- * Make sure no fdsets are left after migration, otherwise a
- * second migration would fail due fdset reuse.
- */
- resp = qtest_qmp(from, "{'execute': 'query-fdsets', "
- "'arguments': {}}");
- g_assert(qdict_haskey(resp, "return"));
- fdsets = qdict_get_qlist(resp, "return");
- g_assert(fdsets && qlist_empty(fdsets));
-}
-
-static void *multifd_mapped_ram_fdset_dio(QTestState *from, QTestState *to)
-{
- g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
-
- fdset_add_fds(from, file, O_WRONLY, 2, true);
- fdset_add_fds(to, file, O_RDONLY, 2, true);
-
- migrate_multifd_mapped_ram_start(from, to);
- migrate_set_parameter_bool(from, "direct-io", true);
- migrate_set_parameter_bool(to, "direct-io", true);
-
- return NULL;
-}
-
-static void *multifd_mapped_ram_fdset(QTestState *from, QTestState *to)
-{
- g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
-
- fdset_add_fds(from, file, O_WRONLY, 2, false);
- fdset_add_fds(to, file, O_RDONLY, 2, false);
-
- migrate_multifd_mapped_ram_start(from, to);
-
- return NULL;
-}
-
-static void test_multifd_file_mapped_ram_fdset(void)
-{
- g_autofree char *uri = g_strdup_printf("file:/dev/fdset/1,offset=%d",
- FILE_TEST_OFFSET);
- MigrateCommon args = {
- .connect_uri = uri,
- .listen_uri = "defer",
- .start_hook = multifd_mapped_ram_fdset,
- .finish_hook = multifd_mapped_ram_fdset_end,
- };
-
- test_file_common(&args, true);
-}
-
-static void test_multifd_file_mapped_ram_fdset_dio(void)
-{
- g_autofree char *uri = g_strdup_printf("file:/dev/fdset/1,offset=%d",
- FILE_TEST_OFFSET);
- MigrateCommon args = {
- .connect_uri = uri,
- .listen_uri = "defer",
- .start_hook = multifd_mapped_ram_fdset_dio,
- .finish_hook = multifd_mapped_ram_fdset_end,
- };
-
- if (!probe_o_direct_support(tmpfs)) {
- g_test_skip("Filesystem does not support O_DIRECT");
- return;
- }
-
- test_file_common(&args, true);
-}
-#endif /* !_WIN32 */
-
-static void test_precopy_tcp_plain(void)
-{
- MigrateCommon args = {
- .listen_uri = "tcp:127.0.0.1:0",
- };
-
- test_precopy_common(&args);
-}
-
-static void *test_migrate_switchover_ack_start(QTestState *from, QTestState *to)
-{
-
- migrate_set_capability(from, "return-path", true);
- migrate_set_capability(to, "return-path", true);
-
- migrate_set_capability(from, "switchover-ack", true);
- migrate_set_capability(to, "switchover-ack", true);
-
- return NULL;
-}
-
-static void test_precopy_tcp_switchover_ack(void)
-{
- MigrateCommon args = {
- .listen_uri = "tcp:127.0.0.1:0",
- .start_hook = test_migrate_switchover_ack_start,
- /*
- * Source VM must be running in order to consider the switchover ACK
- * when deciding to do switchover or not.
- */
- .live = true,
- };
-
- test_precopy_common(&args);
-}
-
-#ifdef CONFIG_GNUTLS
-static void test_precopy_tcp_tls_psk_match(void)
-{
- MigrateCommon args = {
- .listen_uri = "tcp:127.0.0.1:0",
- .start_hook = test_migrate_tls_psk_start_match,
- .finish_hook = test_migrate_tls_psk_finish,
- };
-
- test_precopy_common(&args);
-}
-
-static void test_precopy_tcp_tls_psk_mismatch(void)
-{
- MigrateCommon args = {
- .start = {
- .hide_stderr = true,
- },
- .listen_uri = "tcp:127.0.0.1:0",
- .start_hook = test_migrate_tls_psk_start_mismatch,
- .finish_hook = test_migrate_tls_psk_finish,
- .result = MIG_TEST_FAIL,
- };
-
- test_precopy_common(&args);
-}
-
-#ifdef CONFIG_TASN1
-static void test_precopy_tcp_tls_x509_default_host(void)
-{
- MigrateCommon args = {
- .listen_uri = "tcp:127.0.0.1:0",
- .start_hook = test_migrate_tls_x509_start_default_host,
- .finish_hook = test_migrate_tls_x509_finish,
- };
-
- test_precopy_common(&args);
-}
-
-static void test_precopy_tcp_tls_x509_override_host(void)
-{
- MigrateCommon args = {
- .listen_uri = "tcp:127.0.0.1:0",
- .start_hook = test_migrate_tls_x509_start_override_host,
- .finish_hook = test_migrate_tls_x509_finish,
- };
-
- test_precopy_common(&args);
-}
-
-static void test_precopy_tcp_tls_x509_mismatch_host(void)
-{
- MigrateCommon args = {
- .start = {
- .hide_stderr = true,
- },
- .listen_uri = "tcp:127.0.0.1:0",
- .start_hook = test_migrate_tls_x509_start_mismatch_host,
- .finish_hook = test_migrate_tls_x509_finish,
- .result = MIG_TEST_FAIL_DEST_QUIT_ERR,
- };
-
- test_precopy_common(&args);
-}
-
-static void test_precopy_tcp_tls_x509_friendly_client(void)
-{
- MigrateCommon args = {
- .listen_uri = "tcp:127.0.0.1:0",
- .start_hook = test_migrate_tls_x509_start_friendly_client,
- .finish_hook = test_migrate_tls_x509_finish,
- };
-
- test_precopy_common(&args);
-}
-
-static void test_precopy_tcp_tls_x509_hostile_client(void)
-{
- MigrateCommon args = {
- .start = {
- .hide_stderr = true,
- },
- .listen_uri = "tcp:127.0.0.1:0",
- .start_hook = test_migrate_tls_x509_start_hostile_client,
- .finish_hook = test_migrate_tls_x509_finish,
- .result = MIG_TEST_FAIL,
- };
-
- test_precopy_common(&args);
-}
-
-static void test_precopy_tcp_tls_x509_allow_anon_client(void)
-{
- MigrateCommon args = {
- .listen_uri = "tcp:127.0.0.1:0",
- .start_hook = test_migrate_tls_x509_start_allow_anon_client,
- .finish_hook = test_migrate_tls_x509_finish,
- };
-
- test_precopy_common(&args);
-}
-
-static void test_precopy_tcp_tls_x509_reject_anon_client(void)
-{
- MigrateCommon args = {
- .start = {
- .hide_stderr = true,
- },
- .listen_uri = "tcp:127.0.0.1:0",
- .start_hook = test_migrate_tls_x509_start_reject_anon_client,
- .finish_hook = test_migrate_tls_x509_finish,
- .result = MIG_TEST_FAIL,
- };
-
- test_precopy_common(&args);
-}
-#endif /* CONFIG_TASN1 */
-#endif /* CONFIG_GNUTLS */
-
-#ifndef _WIN32
-static void *test_migrate_fd_start_hook(QTestState *from,
- QTestState *to)
-{
- int ret;
- int pair[2];
-
- /* Create two connected sockets for migration */
- ret = qemu_socketpair(PF_LOCAL, SOCK_STREAM, 0, pair);
- g_assert_cmpint(ret, ==, 0);
-
- /* Send the 1st socket to the target */
- qtest_qmp_fds_assert_success(to, &pair[0], 1,
- "{ 'execute': 'getfd',"
- " 'arguments': { 'fdname': 'fd-mig' }}");
- close(pair[0]);
-
- /* Start incoming migration from the 1st socket */
- migrate_incoming_qmp(to, "fd:fd-mig", "{}");
-
- /* Send the 2nd socket to the target */
- qtest_qmp_fds_assert_success(from, &pair[1], 1,
- "{ 'execute': 'getfd',"
- " 'arguments': { 'fdname': 'fd-mig' }}");
- close(pair[1]);
-
- return NULL;
-}
-
-static void test_migrate_fd_finish_hook(QTestState *from,
- QTestState *to,
- void *opaque)
-{
- QDict *rsp;
- const char *error_desc;
-
- /* Test closing fds */
- /* We assume, that QEMU removes named fd from its list,
- * so this should fail */
- rsp = qtest_qmp(from, "{ 'execute': 'closefd',"
- " 'arguments': { 'fdname': 'fd-mig' }}");
- g_assert_true(qdict_haskey(rsp, "error"));
- error_desc = qdict_get_str(qdict_get_qdict(rsp, "error"), "desc");
- g_assert_cmpstr(error_desc, ==, "File descriptor named 'fd-mig' not found");
- qobject_unref(rsp);
-
- rsp = qtest_qmp(to, "{ 'execute': 'closefd',"
- " 'arguments': { 'fdname': 'fd-mig' }}");
- g_assert_true(qdict_haskey(rsp, "error"));
- error_desc = qdict_get_str(qdict_get_qdict(rsp, "error"), "desc");
- g_assert_cmpstr(error_desc, ==, "File descriptor named 'fd-mig' not found");
- qobject_unref(rsp);
-}
-
-static void test_migrate_precopy_fd_socket(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .connect_uri = "fd:fd-mig",
- .start_hook = test_migrate_fd_start_hook,
- .finish_hook = test_migrate_fd_finish_hook
- };
- test_precopy_common(&args);
-}
-
-static void *migrate_precopy_fd_file_start(QTestState *from, QTestState *to)
-{
- g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
- int src_flags = O_CREAT | O_RDWR;
- int dst_flags = O_CREAT | O_RDWR;
- int fds[2];
-
- fds[0] = open(file, src_flags, 0660);
- assert(fds[0] != -1);
-
- fds[1] = open(file, dst_flags, 0660);
- assert(fds[1] != -1);
-
-
- qtest_qmp_fds_assert_success(to, &fds[0], 1,
- "{ 'execute': 'getfd',"
- " 'arguments': { 'fdname': 'fd-mig' }}");
-
- qtest_qmp_fds_assert_success(from, &fds[1], 1,
- "{ 'execute': 'getfd',"
- " 'arguments': { 'fdname': 'fd-mig' }}");
-
- close(fds[0]);
- close(fds[1]);
-
- return NULL;
-}
-
-static void test_migrate_precopy_fd_file(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .connect_uri = "fd:fd-mig",
- .start_hook = migrate_precopy_fd_file_start,
- .finish_hook = test_migrate_fd_finish_hook
- };
- test_file_common(&args, true);
-}
-#endif /* _WIN32 */
-
-static void do_test_validate_uuid(MigrateStart *args, bool should_fail)
-{
- g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
- QTestState *from, *to;
-
- if (test_migrate_start(&from, &to, uri, args)) {
- return;
- }
-
- /*
- * UUID validation is at the begin of migration. So, the main process of
- * migration is not interesting for us here. Thus, set huge downtime for
- * very fast migration.
- */
- migrate_set_parameter_int(from, "downtime-limit", 1000000);
- migrate_set_capability(from, "validate-uuid", true);
-
- /* Wait for the first serial output from the source */
- wait_for_serial("src_serial");
-
- migrate_qmp(from, to, uri, NULL, "{}");
-
- if (should_fail) {
- qtest_set_expected_status(to, EXIT_FAILURE);
- wait_for_migration_fail(from, true);
- } else {
- wait_for_migration_complete(from);
- }
-
- test_migrate_end(from, to, false);
-}
-
-static void test_validate_uuid(void)
-{
- MigrateStart args = {
- .opts_source = "-uuid 11111111-1111-1111-1111-111111111111",
- .opts_target = "-uuid 11111111-1111-1111-1111-111111111111",
- };
-
- do_test_validate_uuid(&args, false);
-}
-
-static void test_validate_uuid_error(void)
-{
- MigrateStart args = {
- .opts_source = "-uuid 11111111-1111-1111-1111-111111111111",
- .opts_target = "-uuid 22222222-2222-2222-2222-222222222222",
- .hide_stderr = true,
- };
-
- do_test_validate_uuid(&args, true);
-}
-
-static void test_validate_uuid_src_not_set(void)
-{
- MigrateStart args = {
- .opts_target = "-uuid 22222222-2222-2222-2222-222222222222",
- .hide_stderr = true,
- };
-
- do_test_validate_uuid(&args, false);
-}
-
-static void test_validate_uuid_dst_not_set(void)
-{
- MigrateStart args = {
- .opts_source = "-uuid 11111111-1111-1111-1111-111111111111",
- .hide_stderr = true,
- };
-
- do_test_validate_uuid(&args, false);
-}
-
-static void do_test_validate_uri_channel(MigrateCommon *args)
-{
- QTestState *from, *to;
-
- if (test_migrate_start(&from, &to, args->listen_uri, &args->start)) {
- return;
- }
-
- /* Wait for the first serial output from the source */
- wait_for_serial("src_serial");
-
- /*
- * 'uri' and 'channels' validation is checked even before the migration
- * starts.
- */
- migrate_qmp_fail(from, args->connect_uri, args->connect_channels, "{}");
- test_migrate_end(from, to, false);
-}
-
-static void test_validate_uri_channels_both_set(void)
-{
- MigrateCommon args = {
- .start = {
- .hide_stderr = true,
- },
- .listen_uri = "defer",
- .connect_uri = "tcp:127.0.0.1:0",
- .connect_channels = "[ { 'channel-type': 'main',"
- " 'addr': { 'transport': 'socket',"
- " 'type': 'inet',"
- " 'host': '127.0.0.1',"
- " 'port': '0' } } ]",
- };
-
- do_test_validate_uri_channel(&args);
-}
-
-static void test_validate_uri_channels_none_set(void)
-{
- MigrateCommon args = {
- .start = {
- .hide_stderr = true,
- },
- .listen_uri = "defer",
- };
-
- do_test_validate_uri_channel(&args);
-}
-
-/*
- * The way auto_converge works, we need to do too many passes to
- * run this test. Auto_converge logic is only run once every
- * three iterations, so:
- *
- * - 3 iterations without auto_converge enabled
- * - 3 iterations with pct = 5
- * - 3 iterations with pct = 30
- * - 3 iterations with pct = 55
- * - 3 iterations with pct = 80
- * - 3 iterations with pct = 95 (max(95, 80 + 25))
- *
- * To make things even worse, we need to run the initial stage at
- * 3MB/s so we enter autoconverge even when host is (over)loaded.
- */
-static void test_migrate_auto_converge(void)
-{
- g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
- MigrateStart args = {};
- QTestState *from, *to;
- int64_t percentage;
-
- /*
- * We want the test to be stable and as fast as possible.
- * E.g., with 1Gb/s bandwidth migration may pass without throttling,
- * so we need to decrease a bandwidth.
- */
- const int64_t init_pct = 5, inc_pct = 25, max_pct = 95;
-
- if (test_migrate_start(&from, &to, uri, &args)) {
- return;
- }
-
- migrate_set_capability(from, "auto-converge", true);
- migrate_set_parameter_int(from, "cpu-throttle-initial", init_pct);
- migrate_set_parameter_int(from, "cpu-throttle-increment", inc_pct);
- migrate_set_parameter_int(from, "max-cpu-throttle", max_pct);
-
- /*
- * Set the initial parameters so that the migration could not converge
- * without throttling.
- */
- migrate_ensure_non_converge(from);
-
- /* To check remaining size after precopy */
- migrate_set_capability(from, "pause-before-switchover", true);
-
- /* Wait for the first serial output from the source */
- wait_for_serial("src_serial");
-
- migrate_qmp(from, to, uri, NULL, "{}");
-
- /* Wait for throttling begins */
- percentage = 0;
- do {
- percentage = read_migrate_property_int(from, "cpu-throttle-percentage");
- if (percentage != 0) {
- break;
- }
- usleep(20);
- g_assert_false(src_state.stop_seen);
- } while (true);
- /* The first percentage of throttling should be at least init_pct */
- g_assert_cmpint(percentage, >=, init_pct);
- /* Now, when we tested that throttling works, let it converge */
- migrate_ensure_converge(from);
-
- /*
- * Wait for pre-switchover status to check last throttle percentage
- * and remaining. These values will be zeroed later
- */
- wait_for_migration_status(from, "pre-switchover", NULL);
-
- /* The final percentage of throttling shouldn't be greater than max_pct */
- percentage = read_migrate_property_int(from, "cpu-throttle-percentage");
- g_assert_cmpint(percentage, <=, max_pct);
- migrate_continue(from, "pre-switchover");
-
- qtest_qmp_eventwait(to, "RESUME");
-
- wait_for_serial("dest_serial");
- wait_for_migration_complete(from);
-
- test_migrate_end(from, to, true);
-}
-
-static void *
-test_migrate_precopy_tcp_multifd_start_common(QTestState *from,
- QTestState *to,
- const char *method)
-{
- migrate_set_parameter_int(from, "multifd-channels", 16);
- migrate_set_parameter_int(to, "multifd-channels", 16);
-
- migrate_set_parameter_str(from, "multifd-compression", method);
- migrate_set_parameter_str(to, "multifd-compression", method);
-
- migrate_set_capability(from, "multifd", true);
- migrate_set_capability(to, "multifd", true);
-
- /* Start incoming migration from the 1st socket */
- migrate_incoming_qmp(to, "tcp:127.0.0.1:0", "{}");
-
- return NULL;
-}
-
-static void *
-test_migrate_precopy_tcp_multifd_start(QTestState *from,
- QTestState *to)
-{
- return test_migrate_precopy_tcp_multifd_start_common(from, to, "none");
-}
-
-static void *
-test_migrate_precopy_tcp_multifd_start_zero_page_legacy(QTestState *from,
- QTestState *to)
-{
- test_migrate_precopy_tcp_multifd_start_common(from, to, "none");
- migrate_set_parameter_str(from, "zero-page-detection", "legacy");
- return NULL;
-}
-
-static void *
-test_migration_precopy_tcp_multifd_start_no_zero_page(QTestState *from,
- QTestState *to)
-{
- test_migrate_precopy_tcp_multifd_start_common(from, to, "none");
- migrate_set_parameter_str(from, "zero-page-detection", "none");
- return NULL;
-}
-
-static void *
-test_migrate_precopy_tcp_multifd_zlib_start(QTestState *from,
- QTestState *to)
-{
- /*
- * Overloading this test to also check that set_parameter does not error.
- * This is also done in the tests for the other compression methods.
- */
- migrate_set_parameter_int(from, "multifd-zlib-level", 2);
- migrate_set_parameter_int(to, "multifd-zlib-level", 2);
-
- return test_migrate_precopy_tcp_multifd_start_common(from, to, "zlib");
-}
-
-#ifdef CONFIG_ZSTD
-static void *
-test_migrate_precopy_tcp_multifd_zstd_start(QTestState *from,
- QTestState *to)
-{
- migrate_set_parameter_int(from, "multifd-zstd-level", 2);
- migrate_set_parameter_int(to, "multifd-zstd-level", 2);
-
- return test_migrate_precopy_tcp_multifd_start_common(from, to, "zstd");
-}
-#endif /* CONFIG_ZSTD */
-
-#ifdef CONFIG_QPL
-static void *
-test_migrate_precopy_tcp_multifd_qpl_start(QTestState *from,
- QTestState *to)
-{
- return test_migrate_precopy_tcp_multifd_start_common(from, to, "qpl");
-}
-#endif /* CONFIG_QPL */
-#ifdef CONFIG_UADK
-static void *
-test_migrate_precopy_tcp_multifd_uadk_start(QTestState *from,
- QTestState *to)
-{
- return test_migrate_precopy_tcp_multifd_start_common(from, to, "uadk");
-}
-#endif /* CONFIG_UADK */
-
-static void test_multifd_tcp_uri_none(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .start_hook = test_migrate_precopy_tcp_multifd_start,
- /*
- * Multifd is more complicated than most of the features, it
- * directly takes guest page buffers when sending, make sure
- * everything will work alright even if guest page is changing.
- */
- .live = true,
- };
- test_precopy_common(&args);
-}
-
-static void test_multifd_tcp_zero_page_legacy(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .start_hook = test_migrate_precopy_tcp_multifd_start_zero_page_legacy,
- /*
- * Multifd is more complicated than most of the features, it
- * directly takes guest page buffers when sending, make sure
- * everything will work alright even if guest page is changing.
- */
- .live = true,
- };
- test_precopy_common(&args);
-}
-
-static void test_multifd_tcp_no_zero_page(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .start_hook = test_migration_precopy_tcp_multifd_start_no_zero_page,
- /*
- * Multifd is more complicated than most of the features, it
- * directly takes guest page buffers when sending, make sure
- * everything will work alright even if guest page is changing.
- */
- .live = true,
- };
- test_precopy_common(&args);
-}
-
-static void test_multifd_tcp_channels_none(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .start_hook = test_migrate_precopy_tcp_multifd_start,
- .live = true,
- .connect_channels = "[ { 'channel-type': 'main',"
- " 'addr': { 'transport': 'socket',"
- " 'type': 'inet',"
- " 'host': '127.0.0.1',"
- " 'port': '0' } } ]",
- };
- test_precopy_common(&args);
-}
-
-static void test_multifd_tcp_zlib(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .start_hook = test_migrate_precopy_tcp_multifd_zlib_start,
- };
- test_precopy_common(&args);
-}
-
-#ifdef CONFIG_ZSTD
-static void test_multifd_tcp_zstd(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .start_hook = test_migrate_precopy_tcp_multifd_zstd_start,
- };
- test_precopy_common(&args);
-}
-#endif
-
-#ifdef CONFIG_QPL
-static void test_multifd_tcp_qpl(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .start_hook = test_migrate_precopy_tcp_multifd_qpl_start,
- };
- test_precopy_common(&args);
-}
-#endif
-
-#ifdef CONFIG_UADK
-static void test_multifd_tcp_uadk(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .start_hook = test_migrate_precopy_tcp_multifd_uadk_start,
- };
- test_precopy_common(&args);
-}
-#endif
-
-#ifdef CONFIG_GNUTLS
-static void *
-test_migrate_multifd_tcp_tls_psk_start_match(QTestState *from,
- QTestState *to)
-{
- test_migrate_precopy_tcp_multifd_start_common(from, to, "none");
- return test_migrate_tls_psk_start_match(from, to);
-}
-
-static void *
-test_migrate_multifd_tcp_tls_psk_start_mismatch(QTestState *from,
- QTestState *to)
-{
- test_migrate_precopy_tcp_multifd_start_common(from, to, "none");
- return test_migrate_tls_psk_start_mismatch(from, to);
-}
-
-#ifdef CONFIG_TASN1
-static void *
-test_migrate_multifd_tls_x509_start_default_host(QTestState *from,
- QTestState *to)
-{
- test_migrate_precopy_tcp_multifd_start_common(from, to, "none");
- return test_migrate_tls_x509_start_default_host(from, to);
-}
-
-static void *
-test_migrate_multifd_tls_x509_start_override_host(QTestState *from,
- QTestState *to)
-{
- test_migrate_precopy_tcp_multifd_start_common(from, to, "none");
- return test_migrate_tls_x509_start_override_host(from, to);
-}
-
-static void *
-test_migrate_multifd_tls_x509_start_mismatch_host(QTestState *from,
- QTestState *to)
-{
- test_migrate_precopy_tcp_multifd_start_common(from, to, "none");
- return test_migrate_tls_x509_start_mismatch_host(from, to);
-}
-
-static void *
-test_migrate_multifd_tls_x509_start_allow_anon_client(QTestState *from,
- QTestState *to)
-{
- test_migrate_precopy_tcp_multifd_start_common(from, to, "none");
- return test_migrate_tls_x509_start_allow_anon_client(from, to);
-}
-
-static void *
-test_migrate_multifd_tls_x509_start_reject_anon_client(QTestState *from,
- QTestState *to)
-{
- test_migrate_precopy_tcp_multifd_start_common(from, to, "none");
- return test_migrate_tls_x509_start_reject_anon_client(from, to);
-}
-#endif /* CONFIG_TASN1 */
-
-static void test_multifd_tcp_tls_psk_match(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .start_hook = test_migrate_multifd_tcp_tls_psk_start_match,
- .finish_hook = test_migrate_tls_psk_finish,
- };
- test_precopy_common(&args);
-}
-
-static void test_multifd_tcp_tls_psk_mismatch(void)
-{
- MigrateCommon args = {
- .start = {
- .hide_stderr = true,
- },
- .listen_uri = "defer",
- .start_hook = test_migrate_multifd_tcp_tls_psk_start_mismatch,
- .finish_hook = test_migrate_tls_psk_finish,
- .result = MIG_TEST_FAIL,
- };
- test_precopy_common(&args);
-}
-
-#ifdef CONFIG_TASN1
-static void test_multifd_tcp_tls_x509_default_host(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .start_hook = test_migrate_multifd_tls_x509_start_default_host,
- .finish_hook = test_migrate_tls_x509_finish,
- };
- test_precopy_common(&args);
-}
-
-static void test_multifd_tcp_tls_x509_override_host(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .start_hook = test_migrate_multifd_tls_x509_start_override_host,
- .finish_hook = test_migrate_tls_x509_finish,
- };
- test_precopy_common(&args);
-}
-
-static void test_multifd_tcp_tls_x509_mismatch_host(void)
-{
- /*
- * This has different behaviour to the non-multifd case.
- *
- * In non-multifd case when client aborts due to mismatched
- * cert host, the server has already started trying to load
- * migration state, and so it exits with I/O failure.
- *
- * In multifd case when client aborts due to mismatched
- * cert host, the server is still waiting for the other
- * multifd connections to arrive so hasn't started trying
- * to load migration state, and thus just aborts the migration
- * without exiting.
- */
- MigrateCommon args = {
- .start = {
- .hide_stderr = true,
- },
- .listen_uri = "defer",
- .start_hook = test_migrate_multifd_tls_x509_start_mismatch_host,
- .finish_hook = test_migrate_tls_x509_finish,
- .result = MIG_TEST_FAIL,
- };
- test_precopy_common(&args);
-}
-
-static void test_multifd_tcp_tls_x509_allow_anon_client(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .start_hook = test_migrate_multifd_tls_x509_start_allow_anon_client,
- .finish_hook = test_migrate_tls_x509_finish,
- };
- test_precopy_common(&args);
-}
-
-static void test_multifd_tcp_tls_x509_reject_anon_client(void)
-{
- MigrateCommon args = {
- .start = {
- .hide_stderr = true,
- },
- .listen_uri = "defer",
- .start_hook = test_migrate_multifd_tls_x509_start_reject_anon_client,
- .finish_hook = test_migrate_tls_x509_finish,
- .result = MIG_TEST_FAIL,
- };
- test_precopy_common(&args);
-}
-#endif /* CONFIG_TASN1 */
-#endif /* CONFIG_GNUTLS */
-
-/*
- * This test does:
- * source target
- * migrate_incoming
- * migrate
- * migrate_cancel
- * launch another target
- * migrate
- *
- * And see that it works
- */
-static void test_multifd_tcp_cancel(void)
-{
- MigrateStart args = {
- .hide_stderr = true,
- };
- QTestState *from, *to, *to2;
-
- if (test_migrate_start(&from, &to, "defer", &args)) {
- return;
- }
-
- migrate_ensure_non_converge(from);
- migrate_prepare_for_dirty_mem(from);
-
- migrate_set_parameter_int(from, "multifd-channels", 16);
- migrate_set_parameter_int(to, "multifd-channels", 16);
-
- migrate_set_capability(from, "multifd", true);
- migrate_set_capability(to, "multifd", true);
-
- /* Start incoming migration from the 1st socket */
- migrate_incoming_qmp(to, "tcp:127.0.0.1:0", "{}");
-
- /* Wait for the first serial output from the source */
- wait_for_serial("src_serial");
-
- migrate_qmp(from, to, NULL, NULL, "{}");
-
- migrate_wait_for_dirty_mem(from, to);
-
- migrate_cancel(from);
-
- /* Make sure QEMU process "to" exited */
- qtest_set_expected_status(to, EXIT_FAILURE);
- qtest_wait_qemu(to);
-
- args = (MigrateStart){
- .only_target = true,
- };
-
- if (test_migrate_start(&from, &to2, "defer", &args)) {
- return;
- }
-
- migrate_set_parameter_int(to2, "multifd-channels", 16);
-
- migrate_set_capability(to2, "multifd", true);
-
- /* Start incoming migration from the 1st socket */
- migrate_incoming_qmp(to2, "tcp:127.0.0.1:0", "{}");
-
- wait_for_migration_status(from, "cancelled", NULL);
-
- migrate_ensure_non_converge(from);
-
- migrate_qmp(from, to2, NULL, NULL, "{}");
-
- migrate_wait_for_dirty_mem(from, to2);
-
- migrate_ensure_converge(from);
-
- wait_for_stop(from, &src_state);
- qtest_qmp_eventwait(to2, "RESUME");
-
- wait_for_serial("dest_serial");
- wait_for_migration_complete(from);
- test_migrate_end(from, to2, true);
-}
-
-static void calc_dirty_rate(QTestState *who, uint64_t calc_time)
-{
- qtest_qmp_assert_success(who,
- "{ 'execute': 'calc-dirty-rate',"
- "'arguments': { "
- "'calc-time': %" PRIu64 ","
- "'mode': 'dirty-ring' }}",
- calc_time);
-}
-
-static QDict *query_dirty_rate(QTestState *who)
-{
- return qtest_qmp_assert_success_ref(who,
- "{ 'execute': 'query-dirty-rate' }");
-}
-
-static void dirtylimit_set_all(QTestState *who, uint64_t dirtyrate)
-{
- qtest_qmp_assert_success(who,
- "{ 'execute': 'set-vcpu-dirty-limit',"
- "'arguments': { "
- "'dirty-rate': %" PRIu64 " } }",
- dirtyrate);
-}
-
-static void cancel_vcpu_dirty_limit(QTestState *who)
-{
- qtest_qmp_assert_success(who,
- "{ 'execute': 'cancel-vcpu-dirty-limit' }");
-}
-
-static QDict *query_vcpu_dirty_limit(QTestState *who)
-{
- QDict *rsp;
-
- rsp = qtest_qmp(who, "{ 'execute': 'query-vcpu-dirty-limit' }");
- g_assert(!qdict_haskey(rsp, "error"));
- g_assert(qdict_haskey(rsp, "return"));
-
- return rsp;
-}
-
-static bool calc_dirtyrate_ready(QTestState *who)
-{
- QDict *rsp_return;
- gchar *status;
-
- rsp_return = query_dirty_rate(who);
- g_assert(rsp_return);
-
- status = g_strdup(qdict_get_str(rsp_return, "status"));
- g_assert(status);
-
- return g_strcmp0(status, "measuring");
-}
-
-static void wait_for_calc_dirtyrate_complete(QTestState *who,
- int64_t time_s)
-{
- int max_try_count = 10000;
- usleep(time_s * 1000000);
-
- while (!calc_dirtyrate_ready(who) && max_try_count--) {
- usleep(1000);
- }
-
- /*
- * Set the timeout with 10 s(max_try_count * 1000us),
- * if dirtyrate measurement not complete, fail test.
- */
- g_assert_cmpint(max_try_count, !=, 0);
-}
-
-static int64_t get_dirty_rate(QTestState *who)
-{
- QDict *rsp_return;
- gchar *status;
- QList *rates;
- const QListEntry *entry;
- QDict *rate;
- int64_t dirtyrate;
-
- rsp_return = query_dirty_rate(who);
- g_assert(rsp_return);
-
- status = g_strdup(qdict_get_str(rsp_return, "status"));
- g_assert(status);
- g_assert_cmpstr(status, ==, "measured");
-
- rates = qdict_get_qlist(rsp_return, "vcpu-dirty-rate");
- g_assert(rates && !qlist_empty(rates));
-
- entry = qlist_first(rates);
- g_assert(entry);
-
- rate = qobject_to(QDict, qlist_entry_obj(entry));
- g_assert(rate);
-
- dirtyrate = qdict_get_try_int(rate, "dirty-rate", -1);
-
- qobject_unref(rsp_return);
- return dirtyrate;
-}
-
-static int64_t get_limit_rate(QTestState *who)
-{
- QDict *rsp_return;
- QList *rates;
- const QListEntry *entry;
- QDict *rate;
- int64_t dirtyrate;
-
- rsp_return = query_vcpu_dirty_limit(who);
- g_assert(rsp_return);
-
- rates = qdict_get_qlist(rsp_return, "return");
- g_assert(rates && !qlist_empty(rates));
-
- entry = qlist_first(rates);
- g_assert(entry);
-
- rate = qobject_to(QDict, qlist_entry_obj(entry));
- g_assert(rate);
-
- dirtyrate = qdict_get_try_int(rate, "limit-rate", -1);
-
- qobject_unref(rsp_return);
- return dirtyrate;
-}
-
-static QTestState *dirtylimit_start_vm(void)
-{
- QTestState *vm = NULL;
- g_autofree gchar *cmd = NULL;
-
- bootfile_create(tmpfs, false);
- cmd = g_strdup_printf("-accel kvm,dirty-ring-size=4096 "
- "-name dirtylimit-test,debug-threads=on "
- "-m 150M -smp 1 "
- "-serial file:%s/vm_serial "
- "-drive file=%s,format=raw ",
- tmpfs, bootpath);
-
- vm = qtest_init(cmd);
- return vm;
-}
-
-static void dirtylimit_stop_vm(QTestState *vm)
-{
- qtest_quit(vm);
- cleanup("vm_serial");
-}
-
-static void test_vcpu_dirty_limit(void)
-{
- QTestState *vm;
- int64_t origin_rate;
- int64_t quota_rate;
- int64_t rate ;
- int max_try_count = 20;
- int hit = 0;
-
- /* Start vm for vcpu dirtylimit test */
- vm = dirtylimit_start_vm();
-
- /* Wait for the first serial output from the vm*/
- wait_for_serial("vm_serial");
-
- /* Do dirtyrate measurement with calc time equals 1s */
- calc_dirty_rate(vm, 1);
-
- /* Sleep calc time and wait for calc dirtyrate complete */
- wait_for_calc_dirtyrate_complete(vm, 1);
-
- /* Query original dirty page rate */
- origin_rate = get_dirty_rate(vm);
-
- /* VM booted from bootsect should dirty memory steadily */
- assert(origin_rate != 0);
-
- /* Setup quota dirty page rate at half of origin */
- quota_rate = origin_rate / 2;
-
- /* Set dirtylimit */
- dirtylimit_set_all(vm, quota_rate);
-
- /*
- * Check if set-vcpu-dirty-limit and query-vcpu-dirty-limit
- * works literally
- */
- g_assert_cmpint(quota_rate, ==, get_limit_rate(vm));
-
- /* Sleep a bit to check if it take effect */
- usleep(2000000);
-
- /*
- * Check if dirtylimit take effect realistically, set the
- * timeout with 20 s(max_try_count * 1s), if dirtylimit
- * doesn't take effect, fail test.
- */
- while (--max_try_count) {
- calc_dirty_rate(vm, 1);
- wait_for_calc_dirtyrate_complete(vm, 1);
- rate = get_dirty_rate(vm);
-
- /*
- * Assume hitting if current rate is less
- * than quota rate (within accepting error)
- */
- if (rate < (quota_rate + DIRTYLIMIT_TOLERANCE_RANGE)) {
- hit = 1;
- break;
- }
- }
-
- g_assert_cmpint(hit, ==, 1);
-
- hit = 0;
- max_try_count = 20;
-
- /* Check if dirtylimit cancellation take effect */
- cancel_vcpu_dirty_limit(vm);
- while (--max_try_count) {
- calc_dirty_rate(vm, 1);
- wait_for_calc_dirtyrate_complete(vm, 1);
- rate = get_dirty_rate(vm);
-
- /*
- * Assume dirtylimit be canceled if current rate is
- * greater than quota rate (within accepting error)
- */
- if (rate > (quota_rate + DIRTYLIMIT_TOLERANCE_RANGE)) {
- hit = 1;
- break;
- }
- }
-
- g_assert_cmpint(hit, ==, 1);
- dirtylimit_stop_vm(vm);
-}
-
-static void migrate_dirty_limit_wait_showup(QTestState *from,
- const int64_t period,
- const int64_t value)
-{
- /* Enable dirty limit capability */
- migrate_set_capability(from, "dirty-limit", true);
-
- /* Set dirty limit parameters */
- migrate_set_parameter_int(from, "x-vcpu-dirty-limit-period", period);
- migrate_set_parameter_int(from, "vcpu-dirty-limit", value);
-
- /* Make sure migrate can't converge */
- migrate_ensure_non_converge(from);
-
- /* To check limit rate after precopy */
- migrate_set_capability(from, "pause-before-switchover", true);
-
- /* Wait for the serial output from the source */
- wait_for_serial("src_serial");
-}
-
-/*
- * This test does:
- * source destination
- * start vm
- * start incoming vm
- * migrate
- * wait dirty limit to begin
- * cancel migrate
- * cancellation check
- * restart incoming vm
- * migrate
- * wait dirty limit to begin
- * wait pre-switchover event
- * convergence condition check
- *
- * And see if dirty limit migration works correctly.
- * This test case involves many passes, so it runs in slow mode only.
- */
-static void test_migrate_dirty_limit(void)
-{
- g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
- QTestState *from, *to;
- int64_t remaining;
- uint64_t throttle_us_per_full;
- /*
- * We want the test to be stable and as fast as possible.
- * E.g., with 1Gb/s bandwidth migration may pass without dirty limit,
- * so we need to decrease a bandwidth.
- */
- const int64_t dirtylimit_period = 1000, dirtylimit_value = 50;
- const int64_t max_bandwidth = 400000000; /* ~400Mb/s */
- const int64_t downtime_limit = 250; /* 250ms */
- /*
- * We migrate through unix-socket (> 500Mb/s).
- * Thus, expected migration speed ~= bandwidth limit (< 500Mb/s).
- * So, we can predict expected_threshold
- */
- const int64_t expected_threshold = max_bandwidth * downtime_limit / 1000;
- int max_try_count = 10;
- MigrateCommon args = {
- .start = {
- .hide_stderr = true,
- .use_dirty_ring = true,
- },
- .listen_uri = uri,
- .connect_uri = uri,
- };
-
- /* Start src, dst vm */
- if (test_migrate_start(&from, &to, args.listen_uri, &args.start)) {
- return;
- }
-
- /* Prepare for dirty limit migration and wait src vm show up */
- migrate_dirty_limit_wait_showup(from, dirtylimit_period, dirtylimit_value);
-
- /* Start migrate */
- migrate_qmp(from, to, args.connect_uri, NULL, "{}");
-
- /* Wait for dirty limit throttle begin */
- throttle_us_per_full = 0;
- while (throttle_us_per_full == 0) {
- throttle_us_per_full =
- read_migrate_property_int(from, "dirty-limit-throttle-time-per-round");
- usleep(100);
- g_assert_false(src_state.stop_seen);
- }
-
- /* Now cancel migrate and wait for dirty limit throttle switch off */
- migrate_cancel(from);
- wait_for_migration_status(from, "cancelled", NULL);
-
- /* Check if dirty limit throttle switched off, set timeout 1ms */
- do {
- throttle_us_per_full =
- read_migrate_property_int(from, "dirty-limit-throttle-time-per-round");
- usleep(100);
- g_assert_false(src_state.stop_seen);
- } while (throttle_us_per_full != 0 && --max_try_count);
-
- /* Assert dirty limit is not in service */
- g_assert_cmpint(throttle_us_per_full, ==, 0);
-
- args = (MigrateCommon) {
- .start = {
- .only_target = true,
- .use_dirty_ring = true,
- },
- .listen_uri = uri,
- .connect_uri = uri,
- };
-
- /* Restart dst vm, src vm already show up so we needn't wait anymore */
- if (test_migrate_start(&from, &to, args.listen_uri, &args.start)) {
- return;
- }
-
- /* Start migrate */
- migrate_qmp(from, to, args.connect_uri, NULL, "{}");
-
- /* Wait for dirty limit throttle begin */
- throttle_us_per_full = 0;
- while (throttle_us_per_full == 0) {
- throttle_us_per_full =
- read_migrate_property_int(from, "dirty-limit-throttle-time-per-round");
- usleep(100);
- g_assert_false(src_state.stop_seen);
- }
-
- /*
- * The dirty limit rate should equals the return value of
- * query-vcpu-dirty-limit if dirty limit cap set
- */
- g_assert_cmpint(dirtylimit_value, ==, get_limit_rate(from));
-
- /* Now, we have tested if dirty limit works, let it converge */
- migrate_set_parameter_int(from, "downtime-limit", downtime_limit);
- migrate_set_parameter_int(from, "max-bandwidth", max_bandwidth);
-
- /*
- * Wait for pre-switchover status to check if migration
- * satisfy the convergence condition
- */
- wait_for_migration_status(from, "pre-switchover", NULL);
-
- remaining = read_ram_property_int(from, "remaining");
- g_assert_cmpint(remaining, <,
- (expected_threshold + expected_threshold / 100));
-
- migrate_continue(from, "pre-switchover");
-
- qtest_qmp_eventwait(to, "RESUME");
-
- wait_for_serial("dest_serial");
- wait_for_migration_complete(from);
-
- test_migrate_end(from, to, true);
-}
-
-static bool kvm_dirty_ring_supported(void)
-{
-#if defined(__linux__) && defined(HOST_X86_64)
- int ret, kvm_fd = open("/dev/kvm", O_RDONLY);
-
- if (kvm_fd < 0) {
- return false;
- }
-
- ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING);
- close(kvm_fd);
-
- /* We test with 4096 slots */
- if (ret < 4096) {
- return false;
- }
-
- return true;
-#else
- return false;
-#endif
+ *argc_p = j;
}
int main(int argc, char **argv)
{
- bool has_kvm, has_tcg;
- bool has_uffd, is_x86;
- const char *arch;
- g_autoptr(GError) err = NULL;
- const char *qemu_src = getenv(QEMU_ENV_SRC);
- const char *qemu_dst = getenv(QEMU_ENV_DST);
+ MigrationTestEnv *env;
int ret;
+ bool full_set = false;
- g_test_init(&argc, &argv, NULL);
-
- /*
- * The default QTEST_QEMU_BINARY must always be provided because
- * that is what helpers use to query the accel type and
- * architecture.
- */
- if (qemu_src && qemu_dst) {
- g_test_message("Only one of %s, %s is allowed",
- QEMU_ENV_SRC, QEMU_ENV_DST);
- exit(1);
- }
-
- has_kvm = qtest_has_accel("kvm");
- has_tcg = qtest_has_accel("tcg");
-
- if (!has_tcg && !has_kvm) {
- g_test_skip("No KVM or TCG accelerator available");
- return 0;
- }
-
- has_uffd = ufd_version_check();
- arch = qtest_get_arch();
- is_x86 = !strcmp(arch, "i386") || !strcmp(arch, "x86_64");
-
- tmpfs = g_dir_make_tmp("migration-test-XXXXXX", &err);
- if (!tmpfs) {
- g_test_message("Can't create temporary directory in %s: %s",
- g_get_tmp_dir(), err->message);
- }
- g_assert(tmpfs);
+ /* strip the --full option if it's present */
+ parse_args(&argc, &argv, &full_set);
+ g_test_init(&argc, &argv, NULL);
+ env = migration_get_env();
+ env->full_set = full_set;
module_call_init(MODULE_INIT_QOM);
- migration_test_add("/migration/bad_dest", test_baddest);
-#ifndef _WIN32
- migration_test_add("/migration/analyze-script", test_analyze_script);
- migration_test_add("/migration/vmstate-checker-script",
- test_vmstate_checker_script);
-#endif
-
- if (is_x86) {
- migration_test_add("/migration/precopy/unix/suspend/live",
- test_precopy_unix_suspend_live);
- migration_test_add("/migration/precopy/unix/suspend/notlive",
- test_precopy_unix_suspend_notlive);
- }
-
- if (has_uffd) {
- migration_test_add("/migration/postcopy/plain", test_postcopy);
- migration_test_add("/migration/postcopy/recovery/plain",
- test_postcopy_recovery);
- migration_test_add("/migration/postcopy/preempt/plain",
- test_postcopy_preempt);
- migration_test_add("/migration/postcopy/preempt/recovery/plain",
- test_postcopy_preempt_recovery);
- migration_test_add("/migration/postcopy/recovery/double-failures/handshake",
- test_postcopy_recovery_fail_handshake);
- migration_test_add("/migration/postcopy/recovery/double-failures/reconnect",
- test_postcopy_recovery_fail_reconnect);
- if (is_x86) {
- migration_test_add("/migration/postcopy/suspend",
- test_postcopy_suspend);
- }
- }
-
- migration_test_add("/migration/precopy/unix/plain",
- test_precopy_unix_plain);
- migration_test_add("/migration/precopy/unix/xbzrle",
- test_precopy_unix_xbzrle);
- migration_test_add("/migration/precopy/file",
- test_precopy_file);
- migration_test_add("/migration/precopy/file/offset",
- test_precopy_file_offset);
-#ifndef _WIN32
- migration_test_add("/migration/precopy/file/offset/fdset",
- test_precopy_file_offset_fdset);
-#endif
- migration_test_add("/migration/precopy/file/offset/bad",
- test_precopy_file_offset_bad);
-
- /*
- * Our CI system has problems with shared memory.
- * Don't run this test until we find a workaround.
- */
- if (getenv("QEMU_TEST_FLAKY_TESTS")) {
- migration_test_add("/migration/mode/reboot", test_mode_reboot);
- }
-
- migration_test_add("/migration/precopy/file/mapped-ram",
- test_precopy_file_mapped_ram);
- migration_test_add("/migration/precopy/file/mapped-ram/live",
- test_precopy_file_mapped_ram_live);
-
- migration_test_add("/migration/multifd/file/mapped-ram",
- test_multifd_file_mapped_ram);
- migration_test_add("/migration/multifd/file/mapped-ram/live",
- test_multifd_file_mapped_ram_live);
-
- migration_test_add("/migration/multifd/file/mapped-ram/dio",
- test_multifd_file_mapped_ram_dio);
-
-#ifndef _WIN32
- migration_test_add("/migration/multifd/file/mapped-ram/fdset",
- test_multifd_file_mapped_ram_fdset);
- migration_test_add("/migration/multifd/file/mapped-ram/fdset/dio",
- test_multifd_file_mapped_ram_fdset_dio);
-#endif
-
-#ifdef CONFIG_GNUTLS
- migration_test_add("/migration/precopy/unix/tls/psk",
- test_precopy_unix_tls_psk);
-
- if (has_uffd) {
- /*
- * NOTE: psk test is enough for postcopy, as other types of TLS
- * channels are tested under precopy. Here what we want to test is the
- * general postcopy path that has TLS channel enabled.
- */
- migration_test_add("/migration/postcopy/tls/psk",
- test_postcopy_tls_psk);
- migration_test_add("/migration/postcopy/recovery/tls/psk",
- test_postcopy_recovery_tls_psk);
- migration_test_add("/migration/postcopy/preempt/tls/psk",
- test_postcopy_preempt_tls_psk);
- migration_test_add("/migration/postcopy/preempt/recovery/tls/psk",
- test_postcopy_preempt_all);
- }
-#ifdef CONFIG_TASN1
- migration_test_add("/migration/precopy/unix/tls/x509/default-host",
- test_precopy_unix_tls_x509_default_host);
- migration_test_add("/migration/precopy/unix/tls/x509/override-host",
- test_precopy_unix_tls_x509_override_host);
-#endif /* CONFIG_TASN1 */
-#endif /* CONFIG_GNUTLS */
-
- migration_test_add("/migration/precopy/tcp/plain", test_precopy_tcp_plain);
-
- migration_test_add("/migration/precopy/tcp/plain/switchover-ack",
- test_precopy_tcp_switchover_ack);
-
-#ifdef CONFIG_GNUTLS
- migration_test_add("/migration/precopy/tcp/tls/psk/match",
- test_precopy_tcp_tls_psk_match);
- migration_test_add("/migration/precopy/tcp/tls/psk/mismatch",
- test_precopy_tcp_tls_psk_mismatch);
-#ifdef CONFIG_TASN1
- migration_test_add("/migration/precopy/tcp/tls/x509/default-host",
- test_precopy_tcp_tls_x509_default_host);
- migration_test_add("/migration/precopy/tcp/tls/x509/override-host",
- test_precopy_tcp_tls_x509_override_host);
- migration_test_add("/migration/precopy/tcp/tls/x509/mismatch-host",
- test_precopy_tcp_tls_x509_mismatch_host);
- migration_test_add("/migration/precopy/tcp/tls/x509/friendly-client",
- test_precopy_tcp_tls_x509_friendly_client);
- migration_test_add("/migration/precopy/tcp/tls/x509/hostile-client",
- test_precopy_tcp_tls_x509_hostile_client);
- migration_test_add("/migration/precopy/tcp/tls/x509/allow-anon-client",
- test_precopy_tcp_tls_x509_allow_anon_client);
- migration_test_add("/migration/precopy/tcp/tls/x509/reject-anon-client",
- test_precopy_tcp_tls_x509_reject_anon_client);
-#endif /* CONFIG_TASN1 */
-#endif /* CONFIG_GNUTLS */
-
- /* migration_test_add("/migration/ignore_shared", test_ignore_shared); */
-#ifndef _WIN32
- migration_test_add("/migration/precopy/fd/tcp",
- test_migrate_precopy_fd_socket);
- migration_test_add("/migration/precopy/fd/file",
- test_migrate_precopy_fd_file);
-#endif
- migration_test_add("/migration/validate_uuid", test_validate_uuid);
- migration_test_add("/migration/validate_uuid_error",
- test_validate_uuid_error);
- migration_test_add("/migration/validate_uuid_src_not_set",
- test_validate_uuid_src_not_set);
- migration_test_add("/migration/validate_uuid_dst_not_set",
- test_validate_uuid_dst_not_set);
- migration_test_add("/migration/validate_uri/channels/both_set",
- test_validate_uri_channels_both_set);
- migration_test_add("/migration/validate_uri/channels/none_set",
- test_validate_uri_channels_none_set);
- /*
- * See explanation why this test is slow on function definition
- */
- if (g_test_slow()) {
- migration_test_add("/migration/auto_converge",
- test_migrate_auto_converge);
- if (g_str_equal(arch, "x86_64") &&
- has_kvm && kvm_dirty_ring_supported()) {
- migration_test_add("/migration/dirty_limit",
- test_migrate_dirty_limit);
- }
- }
- migration_test_add("/migration/multifd/tcp/uri/plain/none",
- test_multifd_tcp_uri_none);
- migration_test_add("/migration/multifd/tcp/channels/plain/none",
- test_multifd_tcp_channels_none);
- migration_test_add("/migration/multifd/tcp/plain/zero-page/legacy",
- test_multifd_tcp_zero_page_legacy);
- migration_test_add("/migration/multifd/tcp/plain/zero-page/none",
- test_multifd_tcp_no_zero_page);
- migration_test_add("/migration/multifd/tcp/plain/cancel",
- test_multifd_tcp_cancel);
- migration_test_add("/migration/multifd/tcp/plain/zlib",
- test_multifd_tcp_zlib);
-#ifdef CONFIG_ZSTD
- migration_test_add("/migration/multifd/tcp/plain/zstd",
- test_multifd_tcp_zstd);
-#endif
-#ifdef CONFIG_QPL
- migration_test_add("/migration/multifd/tcp/plain/qpl",
- test_multifd_tcp_qpl);
-#endif
-#ifdef CONFIG_UADK
- migration_test_add("/migration/multifd/tcp/plain/uadk",
- test_multifd_tcp_uadk);
-#endif
-#ifdef CONFIG_GNUTLS
- migration_test_add("/migration/multifd/tcp/tls/psk/match",
- test_multifd_tcp_tls_psk_match);
- migration_test_add("/migration/multifd/tcp/tls/psk/mismatch",
- test_multifd_tcp_tls_psk_mismatch);
-#ifdef CONFIG_TASN1
- migration_test_add("/migration/multifd/tcp/tls/x509/default-host",
- test_multifd_tcp_tls_x509_default_host);
- migration_test_add("/migration/multifd/tcp/tls/x509/override-host",
- test_multifd_tcp_tls_x509_override_host);
- migration_test_add("/migration/multifd/tcp/tls/x509/mismatch-host",
- test_multifd_tcp_tls_x509_mismatch_host);
- migration_test_add("/migration/multifd/tcp/tls/x509/allow-anon-client",
- test_multifd_tcp_tls_x509_allow_anon_client);
- migration_test_add("/migration/multifd/tcp/tls/x509/reject-anon-client",
- test_multifd_tcp_tls_x509_reject_anon_client);
-#endif /* CONFIG_TASN1 */
-#endif /* CONFIG_GNUTLS */
-
- if (g_str_equal(arch, "x86_64") && has_kvm && kvm_dirty_ring_supported()) {
- migration_test_add("/migration/dirty_ring",
- test_precopy_unix_dirty_ring);
- migration_test_add("/migration/vcpu_dirty_limit",
- test_vcpu_dirty_limit);
- }
+ migration_test_add_tls(env);
+ migration_test_add_compression(env);
+ migration_test_add_postcopy(env);
+ migration_test_add_file(env);
+ migration_test_add_precopy(env);
+ migration_test_add_cpr(env);
+ migration_test_add_misc(env);
ret = g_test_run();
g_assert_cmpint(ret, ==, 0);
- bootfile_delete();
- ret = rmdir(tmpfs);
- if (ret != 0) {
- g_test_message("unable to rmdir: path (%s): %s",
- tmpfs, strerror(errno));
- }
- g_free(tmpfs);
+ ret = migration_env_clean(env);
return ret;
}
diff --git a/tests/migration/Makefile b/tests/qtest/migration/Makefile
index 2c5ee28..2c5ee28 100644
--- a/tests/migration/Makefile
+++ b/tests/qtest/migration/Makefile
diff --git a/tests/migration/aarch64/Makefile b/tests/qtest/migration/aarch64/Makefile
index 9c4fa18..9c4fa18 100644
--- a/tests/migration/aarch64/Makefile
+++ b/tests/qtest/migration/aarch64/Makefile
diff --git a/tests/migration/aarch64/a-b-kernel.S b/tests/qtest/migration/aarch64/a-b-kernel.S
index a4103ec..a4103ec 100644
--- a/tests/migration/aarch64/a-b-kernel.S
+++ b/tests/qtest/migration/aarch64/a-b-kernel.S
diff --git a/tests/migration/aarch64/a-b-kernel.h b/tests/qtest/migration/aarch64/a-b-kernel.h
index 34e518d..34e518d 100644
--- a/tests/migration/aarch64/a-b-kernel.h
+++ b/tests/qtest/migration/aarch64/a-b-kernel.h
diff --git a/tests/qtest/migration/bootfile.c b/tests/qtest/migration/bootfile.c
new file mode 100644
index 0000000..fac059d
--- /dev/null
+++ b/tests/qtest/migration/bootfile.c
@@ -0,0 +1,70 @@
+/*
+ * Guest code setup for migration tests
+ *
+ * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
+ * based on the vhost-user-test.c that is:
+ * Copyright (c) 2014 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+
+/*
+ * The boot file modifies memory area in [start_address, end_address)
+ * repeatedly. It outputs a 'B' at a fixed rate while it's still running.
+ */
+#include "bootfile.h"
+#include "i386/a-b-bootblock.h"
+#include "aarch64/a-b-kernel.h"
+#include "ppc64/a-b-kernel.h"
+#include "s390x/a-b-bios.h"
+
+static char *bootpath;
+
+void bootfile_delete(void)
+{
+ if (!bootpath) {
+ return;
+ }
+ unlink(bootpath);
+ g_free(bootpath);
+ bootpath = NULL;
+}
+
+char *bootfile_create(const char *arch, const char *dir, bool suspend_me)
+{
+ unsigned char *content;
+ size_t len;
+
+ bootfile_delete();
+ bootpath = g_strdup_printf("%s/bootsect", dir);
+ if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
+ /* the assembled x86 boot sector should be exactly one sector large */
+ g_assert(sizeof(x86_bootsect) == 512);
+ x86_bootsect[SYM_suspend_me - SYM_start] = suspend_me;
+ content = x86_bootsect;
+ len = sizeof(x86_bootsect);
+ } else if (g_str_equal(arch, "s390x")) {
+ content = s390x_elf;
+ len = sizeof(s390x_elf);
+ } else if (strcmp(arch, "ppc64") == 0) {
+ content = ppc64_kernel;
+ len = sizeof(ppc64_kernel);
+ } else if (strcmp(arch, "aarch64") == 0) {
+ content = aarch64_kernel;
+ len = sizeof(aarch64_kernel);
+ g_assert(sizeof(aarch64_kernel) <= ARM_TEST_MAX_KERNEL_SIZE);
+ } else {
+ g_assert_not_reached();
+ }
+
+ FILE *bootfile = fopen(bootpath, "wb");
+
+ g_assert_cmpint(fwrite(content, len, 1, bootfile), ==, 1);
+ fclose(bootfile);
+
+ return bootpath;
+}
diff --git a/tests/qtest/migration/bootfile.h b/tests/qtest/migration/bootfile.h
new file mode 100644
index 0000000..6d6a673
--- /dev/null
+++ b/tests/qtest/migration/bootfile.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 Red Hat, Inc. and/or its affiliates
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef BOOTFILE_H
+#define BOOTFILE_H
+
+/* Common */
+#define TEST_MEM_PAGE_SIZE 4096
+
+/* x86 */
+#define X86_TEST_MEM_START (1 * 1024 * 1024)
+#define X86_TEST_MEM_END (100 * 1024 * 1024)
+
+/* S390 */
+#define S390_TEST_MEM_START (1 * 1024 * 1024)
+#define S390_TEST_MEM_END (100 * 1024 * 1024)
+
+/* PPC */
+#define PPC_TEST_MEM_START (1 * 1024 * 1024)
+#define PPC_TEST_MEM_END (100 * 1024 * 1024)
+#define PPC_H_PUT_TERM_CHAR 0x58
+
+/* ARM */
+#define ARM_TEST_MEM_START (0x40000000 + 1 * 1024 * 1024)
+#define ARM_TEST_MEM_END (0x40000000 + 100 * 1024 * 1024)
+#define ARM_MACH_VIRT_UART 0x09000000
+/* AArch64 kernel load address is 0x40080000, and the test memory starts at
+ * 0x40100000. So the maximum allowable kernel size is 512KB.
+ */
+#define ARM_TEST_MAX_KERNEL_SIZE (512 * 1024)
+
+void bootfile_delete(void);
+char *bootfile_create(const char *arch, const char *dir, bool suspend_me);
+
+#endif /* BOOTFILE_H */
diff --git a/tests/qtest/migration/compression-tests.c b/tests/qtest/migration/compression-tests.c
new file mode 100644
index 0000000..b827665
--- /dev/null
+++ b/tests/qtest/migration/compression-tests.c
@@ -0,0 +1,226 @@
+/*
+ * QTest testcases for migration compression
+ *
+ * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
+ * based on the vhost-user-test.c that is:
+ * Copyright (c) 2014 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest.h"
+#include "migration/framework.h"
+#include "migration/migration-qmp.h"
+#include "migration/migration-util.h"
+#include "qemu/module.h"
+
+
+static char *tmpfs;
+
+#ifdef CONFIG_ZSTD
+static void *
+migrate_hook_start_precopy_tcp_multifd_zstd(QTestState *from,
+ QTestState *to)
+{
+ migrate_set_parameter_int(from, "multifd-zstd-level", 2);
+ migrate_set_parameter_int(to, "multifd-zstd-level", 2);
+
+ return migrate_hook_start_precopy_tcp_multifd_common(from, to, "zstd");
+}
+
+static void test_multifd_tcp_zstd(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ .start_hook = migrate_hook_start_precopy_tcp_multifd_zstd,
+ };
+ test_precopy_common(&args);
+}
+
+static void test_multifd_postcopy_tcp_zstd(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ .caps[MIGRATION_CAPABILITY_POSTCOPY_RAM] = true,
+ },
+ .start_hook = migrate_hook_start_precopy_tcp_multifd_zstd,
+ };
+
+ test_precopy_common(&args);
+}
+#endif /* CONFIG_ZSTD */
+
+#ifdef CONFIG_QATZIP
+static void *
+migrate_hook_start_precopy_tcp_multifd_qatzip(QTestState *from,
+ QTestState *to)
+{
+ migrate_set_parameter_int(from, "multifd-qatzip-level", 2);
+ migrate_set_parameter_int(to, "multifd-qatzip-level", 2);
+
+ return migrate_hook_start_precopy_tcp_multifd_common(from, to, "qatzip");
+}
+
+static void test_multifd_tcp_qatzip(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ .start_hook = migrate_hook_start_precopy_tcp_multifd_qatzip,
+ };
+ test_precopy_common(&args);
+}
+#endif
+
+#ifdef CONFIG_QPL
+static void *
+migrate_hook_start_precopy_tcp_multifd_qpl(QTestState *from,
+ QTestState *to)
+{
+ return migrate_hook_start_precopy_tcp_multifd_common(from, to, "qpl");
+}
+
+static void test_multifd_tcp_qpl(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ .start_hook = migrate_hook_start_precopy_tcp_multifd_qpl,
+ };
+ test_precopy_common(&args);
+}
+#endif /* CONFIG_QPL */
+
+#ifdef CONFIG_UADK
+static void *
+migrate_hook_start_precopy_tcp_multifd_uadk(QTestState *from,
+ QTestState *to)
+{
+ return migrate_hook_start_precopy_tcp_multifd_common(from, to, "uadk");
+}
+
+static void test_multifd_tcp_uadk(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ .start_hook = migrate_hook_start_precopy_tcp_multifd_uadk,
+ };
+ test_precopy_common(&args);
+}
+#endif /* CONFIG_UADK */
+
+static void *
+migrate_hook_start_xbzrle(QTestState *from,
+ QTestState *to)
+{
+ migrate_set_parameter_int(from, "xbzrle-cache-size", 33554432);
+ return NULL;
+}
+
+static void test_precopy_unix_xbzrle(void)
+{
+ g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
+ MigrateCommon args = {
+ .connect_uri = uri,
+ .listen_uri = uri,
+ .start_hook = migrate_hook_start_xbzrle,
+ .iterations = 2,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_XBZRLE] = true,
+ },
+ /*
+ * XBZRLE needs pages to be modified when doing the 2nd+ round
+ * iteration to have real data pushed to the stream.
+ */
+ .live = true,
+ };
+
+ test_precopy_common(&args);
+}
+
+static void *
+migrate_hook_start_precopy_tcp_multifd_zlib(QTestState *from,
+ QTestState *to)
+{
+ /*
+ * Overloading this test to also check that set_parameter does not error.
+ * This is also done in the tests for the other compression methods.
+ */
+ migrate_set_parameter_int(from, "multifd-zlib-level", 2);
+ migrate_set_parameter_int(to, "multifd-zlib-level", 2);
+
+ return migrate_hook_start_precopy_tcp_multifd_common(from, to, "zlib");
+}
+
+static void test_multifd_tcp_zlib(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ .start_hook = migrate_hook_start_precopy_tcp_multifd_zlib,
+ };
+ test_precopy_common(&args);
+}
+
+static void migration_test_add_compression_smoke(MigrationTestEnv *env)
+{
+ migration_test_add("/migration/multifd/tcp/plain/zlib",
+ test_multifd_tcp_zlib);
+}
+
+void migration_test_add_compression(MigrationTestEnv *env)
+{
+ tmpfs = env->tmpfs;
+
+ migration_test_add_compression_smoke(env);
+
+ if (!env->full_set) {
+ return;
+ }
+
+#ifdef CONFIG_ZSTD
+ migration_test_add("/migration/multifd/tcp/plain/zstd",
+ test_multifd_tcp_zstd);
+ if (env->has_uffd) {
+ migration_test_add("/migration/multifd+postcopy/tcp/plain/zstd",
+ test_multifd_postcopy_tcp_zstd);
+ }
+#endif
+
+#ifdef CONFIG_QATZIP
+ migration_test_add("/migration/multifd/tcp/plain/qatzip",
+ test_multifd_tcp_qatzip);
+#endif
+
+#ifdef CONFIG_QPL
+ migration_test_add("/migration/multifd/tcp/plain/qpl",
+ test_multifd_tcp_qpl);
+#endif
+
+#ifdef CONFIG_UADK
+ migration_test_add("/migration/multifd/tcp/plain/uadk",
+ test_multifd_tcp_uadk);
+#endif
+
+ if (g_test_slow()) {
+ migration_test_add("/migration/precopy/unix/xbzrle",
+ test_precopy_unix_xbzrle);
+ }
+}
diff --git a/tests/qtest/migration/cpr-tests.c b/tests/qtest/migration/cpr-tests.c
new file mode 100644
index 0000000..5e764a6
--- /dev/null
+++ b/tests/qtest/migration/cpr-tests.c
@@ -0,0 +1,136 @@
+/*
+ * QTest testcases for CPR
+ *
+ * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
+ * based on the vhost-user-test.c that is:
+ * Copyright (c) 2014 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest.h"
+#include "migration/framework.h"
+#include "migration/migration-qmp.h"
+#include "migration/migration-util.h"
+
+
+static char *tmpfs;
+
+static void *migrate_hook_start_mode_reboot(QTestState *from, QTestState *to)
+{
+ migrate_set_parameter_str(from, "mode", "cpr-reboot");
+ migrate_set_parameter_str(to, "mode", "cpr-reboot");
+
+ return NULL;
+}
+
+static void test_mode_reboot(void)
+{
+ g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
+ FILE_TEST_FILENAME);
+ MigrateCommon args = {
+ .start.use_shmem = true,
+ .connect_uri = uri,
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_mode_reboot,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_X_IGNORE_SHARED] = true,
+ },
+ };
+
+ test_file_common(&args, true);
+}
+
+static void *test_mode_transfer_start(QTestState *from, QTestState *to)
+{
+ migrate_set_parameter_str(from, "mode", "cpr-transfer");
+ return NULL;
+}
+
+/*
+ * cpr-transfer mode cannot use the target monitor prior to starting the
+ * migration, and cannot connect synchronously to the monitor, so defer
+ * the target connection.
+ */
+static void test_mode_transfer_common(bool incoming_defer)
+{
+ g_autofree char *cpr_path = g_strdup_printf("%s/cpr.sock", tmpfs);
+ g_autofree char *mig_path = g_strdup_printf("%s/migsocket", tmpfs);
+ g_autofree char *uri = g_strdup_printf("unix:%s", mig_path);
+ g_autofree char *opts_target = NULL;
+
+ const char *opts = "-machine aux-ram-share=on -nodefaults";
+ g_autofree const char *cpr_channel = g_strdup_printf(
+ "cpr,addr.transport=socket,addr.type=unix,addr.path=%s",
+ cpr_path);
+
+ g_autofree char *connect_channels = g_strdup_printf(
+ "[ { 'channel-type': 'main',"
+ " 'addr': { 'transport': 'socket',"
+ " 'type': 'unix',"
+ " 'path': '%s' } } ]",
+ mig_path);
+
+ /*
+ * Set up a UNIX domain socket for the CPR channel before
+ * launching the destination VM, to avoid timing issues
+ * during connection setup.
+ */
+ int cpr_sockfd = qtest_socket_server(cpr_path);
+ g_assert(cpr_sockfd >= 0);
+
+ opts_target = g_strdup_printf("-incoming cpr,addr.transport=socket,"
+ "addr.type=fd,addr.str=%d %s",
+ cpr_sockfd, opts);
+ MigrateCommon args = {
+ .start.opts_source = opts,
+ .start.opts_target = opts_target,
+ .start.defer_target_connect = true,
+ .start.memory_backend = "-object memory-backend-memfd,id=pc.ram,size=%s"
+ " -machine memory-backend=pc.ram",
+ .listen_uri = incoming_defer ? "defer" : uri,
+ .connect_channels = connect_channels,
+ .cpr_channel = cpr_channel,
+ .start_hook = test_mode_transfer_start,
+ };
+
+ test_precopy_common(&args);
+}
+
+static void test_mode_transfer(void)
+{
+ test_mode_transfer_common(NULL);
+}
+
+static void test_mode_transfer_defer(void)
+{
+ test_mode_transfer_common(true);
+}
+
+void migration_test_add_cpr(MigrationTestEnv *env)
+{
+ tmpfs = env->tmpfs;
+
+ /* no tests in the smoke set for now */
+
+ if (!env->full_set) {
+ return;
+ }
+
+ /*
+ * Our CI system has problems with shared memory.
+ * Don't run this test until we find a workaround.
+ */
+ if (getenv("QEMU_TEST_FLAKY_TESTS")) {
+ migration_test_add("/migration/mode/reboot", test_mode_reboot);
+ }
+
+ if (env->has_kvm) {
+ migration_test_add("/migration/mode/transfer", test_mode_transfer);
+ migration_test_add("/migration/mode/transfer/defer",
+ test_mode_transfer_defer);
+ }
+}
diff --git a/tests/qtest/migration/file-tests.c b/tests/qtest/migration/file-tests.c
new file mode 100644
index 0000000..4d78ce0
--- /dev/null
+++ b/tests/qtest/migration/file-tests.c
@@ -0,0 +1,341 @@
+/*
+ * QTest testcases for migration to file
+ *
+ * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
+ * based on the vhost-user-test.c that is:
+ * Copyright (c) 2014 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest.h"
+#include "migration/framework.h"
+#include "migration/migration-qmp.h"
+#include "migration/migration-util.h"
+#include "qobject/qlist.h"
+
+
+static char *tmpfs;
+
+static void test_precopy_file(void)
+{
+ g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
+ FILE_TEST_FILENAME);
+ MigrateCommon args = {
+ .connect_uri = uri,
+ .listen_uri = "defer",
+ };
+
+ test_file_common(&args, true);
+}
+
+#ifndef _WIN32
+static void fdset_add_fds(QTestState *qts, const char *file, int flags,
+ int num_fds, bool direct_io)
+{
+ for (int i = 0; i < num_fds; i++) {
+ int fd;
+
+#ifdef O_DIRECT
+ /* only secondary channels can use direct-io */
+ if (direct_io && i != 0) {
+ flags |= O_DIRECT;
+ }
+#endif
+
+ fd = open(file, flags, 0660);
+ assert(fd != -1);
+
+ qtest_qmp_fds_assert_success(qts, &fd, 1, "{'execute': 'add-fd', "
+ "'arguments': {'fdset-id': 1}}");
+ close(fd);
+ }
+}
+
+static void *migrate_hook_start_file_offset_fdset(QTestState *from,
+ QTestState *to)
+{
+ g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
+
+ fdset_add_fds(from, file, O_WRONLY, 1, false);
+ fdset_add_fds(to, file, O_RDONLY, 1, false);
+
+ return NULL;
+}
+
+static void test_precopy_file_offset_fdset(void)
+{
+ g_autofree char *uri = g_strdup_printf("file:/dev/fdset/1,offset=%d",
+ FILE_TEST_OFFSET);
+ MigrateCommon args = {
+ .connect_uri = uri,
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_file_offset_fdset,
+ };
+
+ test_file_common(&args, false);
+}
+#endif
+
+static void test_precopy_file_offset(void)
+{
+ g_autofree char *uri = g_strdup_printf("file:%s/%s,offset=%d", tmpfs,
+ FILE_TEST_FILENAME,
+ FILE_TEST_OFFSET);
+ MigrateCommon args = {
+ .connect_uri = uri,
+ .listen_uri = "defer",
+ };
+
+ test_file_common(&args, false);
+}
+
+static void test_precopy_file_offset_bad(void)
+{
+ /* using a value not supported by qemu_strtosz() */
+ g_autofree char *uri = g_strdup_printf("file:%s/%s,offset=0x20M",
+ tmpfs, FILE_TEST_FILENAME);
+ MigrateCommon args = {
+ .connect_uri = uri,
+ .listen_uri = "defer",
+ .result = MIG_TEST_QMP_ERROR,
+ };
+
+ test_file_common(&args, false);
+}
+
+static void test_precopy_file_mapped_ram_live(void)
+{
+ g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
+ FILE_TEST_FILENAME);
+ MigrateCommon args = {
+ .connect_uri = uri,
+ .listen_uri = "defer",
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MAPPED_RAM] = true,
+ },
+ };
+
+ test_file_common(&args, false);
+}
+
+static void test_precopy_file_mapped_ram(void)
+{
+ g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
+ FILE_TEST_FILENAME);
+ MigrateCommon args = {
+ .connect_uri = uri,
+ .listen_uri = "defer",
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MAPPED_RAM] = true,
+ },
+ };
+
+ test_file_common(&args, true);
+}
+
+static void test_multifd_file_mapped_ram_live(void)
+{
+ g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
+ FILE_TEST_FILENAME);
+ MigrateCommon args = {
+ .connect_uri = uri,
+ .listen_uri = "defer",
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ .caps[MIGRATION_CAPABILITY_MAPPED_RAM] = true,
+ },
+ };
+
+ test_file_common(&args, false);
+}
+
+static void test_multifd_file_mapped_ram(void)
+{
+ g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
+ FILE_TEST_FILENAME);
+ MigrateCommon args = {
+ .connect_uri = uri,
+ .listen_uri = "defer",
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ .caps[MIGRATION_CAPABILITY_MAPPED_RAM] = true,
+ },
+ };
+
+ test_file_common(&args, true);
+}
+
+static void *migrate_hook_start_multifd_mapped_ram_dio(QTestState *from,
+ QTestState *to)
+{
+ migrate_set_parameter_bool(from, "direct-io", true);
+ migrate_set_parameter_bool(to, "direct-io", true);
+
+ return NULL;
+}
+
+static void test_multifd_file_mapped_ram_dio(void)
+{
+ g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
+ FILE_TEST_FILENAME);
+ MigrateCommon args = {
+ .connect_uri = uri,
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_multifd_mapped_ram_dio,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MAPPED_RAM] = true,
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ };
+
+ if (!probe_o_direct_support(tmpfs)) {
+ g_test_skip("Filesystem does not support O_DIRECT");
+ return;
+ }
+
+ test_file_common(&args, true);
+}
+
+#ifndef _WIN32
+static void migrate_hook_end_multifd_mapped_ram_fdset(QTestState *from,
+ QTestState *to,
+ void *opaque)
+{
+ QDict *resp;
+ QList *fdsets;
+
+ /*
+ * Remove the fdsets after migration, otherwise a second migration
+ * would fail due fdset reuse.
+ */
+ qtest_qmp_assert_success(from, "{'execute': 'remove-fd', "
+ "'arguments': { 'fdset-id': 1}}");
+
+ /*
+ * Make sure no fdsets are left after migration, otherwise a
+ * second migration would fail due fdset reuse.
+ */
+ resp = qtest_qmp(from, "{'execute': 'query-fdsets', "
+ "'arguments': {}}");
+ g_assert(qdict_haskey(resp, "return"));
+ fdsets = qdict_get_qlist(resp, "return");
+ g_assert(fdsets && qlist_empty(fdsets));
+ qobject_unref(resp);
+}
+
+static void *migrate_hook_start_multifd_mapped_ram_fdset_dio(QTestState *from,
+ QTestState *to)
+{
+ g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
+
+ fdset_add_fds(from, file, O_WRONLY, 2, true);
+ fdset_add_fds(to, file, O_RDONLY, 2, true);
+
+ migrate_set_parameter_bool(from, "direct-io", true);
+ migrate_set_parameter_bool(to, "direct-io", true);
+
+ return NULL;
+}
+
+static void *migrate_hook_start_multifd_mapped_ram_fdset(QTestState *from,
+ QTestState *to)
+{
+ g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
+
+ fdset_add_fds(from, file, O_WRONLY, 2, false);
+ fdset_add_fds(to, file, O_RDONLY, 2, false);
+
+ return NULL;
+}
+
+static void test_multifd_file_mapped_ram_fdset(void)
+{
+ g_autofree char *uri = g_strdup_printf("file:/dev/fdset/1,offset=%d",
+ FILE_TEST_OFFSET);
+ MigrateCommon args = {
+ .connect_uri = uri,
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_multifd_mapped_ram_fdset,
+ .end_hook = migrate_hook_end_multifd_mapped_ram_fdset,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MAPPED_RAM] = true,
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ };
+
+ test_file_common(&args, true);
+}
+
+static void test_multifd_file_mapped_ram_fdset_dio(void)
+{
+ g_autofree char *uri = g_strdup_printf("file:/dev/fdset/1,offset=%d",
+ FILE_TEST_OFFSET);
+ MigrateCommon args = {
+ .connect_uri = uri,
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_multifd_mapped_ram_fdset_dio,
+ .end_hook = migrate_hook_end_multifd_mapped_ram_fdset,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MAPPED_RAM] = true,
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ };
+
+ if (!probe_o_direct_support(tmpfs)) {
+ g_test_skip("Filesystem does not support O_DIRECT");
+ return;
+ }
+
+ test_file_common(&args, true);
+}
+#endif /* !_WIN32 */
+
+static void migration_test_add_file_smoke(MigrationTestEnv *env)
+{
+ migration_test_add("/migration/precopy/file",
+ test_precopy_file);
+
+ migration_test_add("/migration/multifd/file/mapped-ram/dio",
+ test_multifd_file_mapped_ram_dio);
+}
+
+void migration_test_add_file(MigrationTestEnv *env)
+{
+ tmpfs = env->tmpfs;
+
+ migration_test_add_file_smoke(env);
+
+ if (!env->full_set) {
+ return;
+ }
+
+ migration_test_add("/migration/precopy/file/offset",
+ test_precopy_file_offset);
+#ifndef _WIN32
+ migration_test_add("/migration/precopy/file/offset/fdset",
+ test_precopy_file_offset_fdset);
+#endif
+ migration_test_add("/migration/precopy/file/offset/bad",
+ test_precopy_file_offset_bad);
+
+ migration_test_add("/migration/precopy/file/mapped-ram",
+ test_precopy_file_mapped_ram);
+ migration_test_add("/migration/precopy/file/mapped-ram/live",
+ test_precopy_file_mapped_ram_live);
+
+ migration_test_add("/migration/multifd/file/mapped-ram",
+ test_multifd_file_mapped_ram);
+ migration_test_add("/migration/multifd/file/mapped-ram/live",
+ test_multifd_file_mapped_ram_live);
+
+#ifndef _WIN32
+ migration_test_add("/migration/multifd/file/mapped-ram/fdset",
+ test_multifd_file_mapped_ram_fdset);
+ migration_test_add("/migration/multifd/file/mapped-ram/fdset/dio",
+ test_multifd_file_mapped_ram_fdset_dio);
+#endif
+}
diff --git a/tests/qtest/migration/framework.c b/tests/qtest/migration/framework.c
new file mode 100644
index 0000000..407c902
--- /dev/null
+++ b/tests/qtest/migration/framework.c
@@ -0,0 +1,1066 @@
+/*
+ * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
+ * based on the vhost-user-test.c that is:
+ * Copyright (c) 2014 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+
+#include "chardev/char.h"
+#include "crypto/tlscredspsk.h"
+#include "libqtest.h"
+#include "migration/bootfile.h"
+#include "migration/framework.h"
+#include "migration/migration-qmp.h"
+#include "migration/migration-util.h"
+#include "ppc-util.h"
+#include "qapi/error.h"
+#include "qobject/qjson.h"
+#include "qobject/qlist.h"
+#include "qemu/module.h"
+#include "qemu/option.h"
+#include "qemu/range.h"
+#include "qemu/sockets.h"
+
+
+#define QEMU_VM_FILE_MAGIC 0x5145564d
+#define QEMU_ENV_SRC "QTEST_QEMU_BINARY_SRC"
+#define QEMU_ENV_DST "QTEST_QEMU_BINARY_DST"
+#define MULTIFD_TEST_CHANNELS 4
+
+unsigned start_address;
+unsigned end_address;
+static QTestMigrationState src_state;
+static QTestMigrationState dst_state;
+static char *tmpfs;
+
+/*
+ * An initial 3 MB offset is used as that corresponds
+ * to ~1 sec of data transfer with our bandwidth setting.
+ */
+#define MAGIC_OFFSET_BASE (3 * 1024 * 1024)
+/*
+ * A further 1k is added to ensure we're not a multiple
+ * of TEST_MEM_PAGE_SIZE, thus avoid clash with writes
+ * from the migration guest workload.
+ */
+#define MAGIC_OFFSET_SHUFFLE 1024
+#define MAGIC_OFFSET (MAGIC_OFFSET_BASE + MAGIC_OFFSET_SHUFFLE)
+#define MAGIC_MARKER 0xFEED12345678CAFEULL
+
+
+/*
+ * Wait for some output in the serial output file,
+ * we get an 'A' followed by an endless string of 'B's
+ * but on the destination we won't have the A (unless we enabled suspend/resume)
+ */
+void wait_for_serial(const char *side)
+{
+ g_autofree char *serialpath = g_strdup_printf("%s/%s", tmpfs, side);
+ FILE *serialfile = fopen(serialpath, "r");
+
+ do {
+ int readvalue = fgetc(serialfile);
+
+ switch (readvalue) {
+ case 'A':
+ /* Fine */
+ break;
+
+ case 'B':
+ /* It's alive! */
+ fclose(serialfile);
+ return;
+
+ case EOF:
+ fseek(serialfile, 0, SEEK_SET);
+ usleep(1000);
+ break;
+
+ default:
+ fprintf(stderr, "Unexpected %d on %s serial\n", readvalue, side);
+ g_assert_not_reached();
+ }
+ } while (true);
+}
+
+void migrate_prepare_for_dirty_mem(QTestState *from)
+{
+ /*
+ * The guest workflow iterates from start_address to
+ * end_address, writing 1 byte every TEST_MEM_PAGE_SIZE
+ * bytes.
+ *
+ * IOW, if we write to mem at a point which is NOT
+ * a multiple of TEST_MEM_PAGE_SIZE, our write won't
+ * conflict with the migration workflow.
+ *
+ * We put in a marker here, that we'll use to determine
+ * when the data has been transferred to the dst.
+ */
+ qtest_writeq(from, start_address + MAGIC_OFFSET, MAGIC_MARKER);
+}
+
+void migrate_wait_for_dirty_mem(QTestState *from, QTestState *to)
+{
+ uint64_t watch_address = start_address + MAGIC_OFFSET_BASE;
+ uint64_t marker_address = start_address + MAGIC_OFFSET;
+ uint8_t watch_byte;
+
+ /*
+ * Wait for the MAGIC_MARKER to get transferred, as an
+ * indicator that a migration pass has made some known
+ * amount of progress.
+ */
+ do {
+ usleep(1000 * 10);
+ } while (qtest_readq(to, marker_address) != MAGIC_MARKER);
+
+
+ /* If suspended, src only iterates once, and watch_byte may never change */
+ if (src_state.suspend_me) {
+ return;
+ }
+
+ /*
+ * Now ensure that already transferred bytes are
+ * dirty again from the guest workload. Note the
+ * guest byte value will wrap around and by chance
+ * match the original watch_byte. This is harmless
+ * as we'll eventually see a different value if we
+ * keep watching
+ */
+ watch_byte = qtest_readb(from, watch_address);
+ do {
+ usleep(1000 * 10);
+ } while (qtest_readb(from, watch_address) == watch_byte);
+}
+
+static void check_guests_ram(QTestState *who)
+{
+ /*
+ * Our ASM test will have been incrementing one byte from each page from
+ * start_address to < end_address in order. This gives us a constraint
+ * that any page's byte should be equal or less than the previous pages
+ * byte (mod 256); and they should all be equal except for one transition
+ * at the point where we meet the incrementer. (We're running this with
+ * the guest stopped).
+ */
+ unsigned address;
+ uint8_t first_byte;
+ uint8_t last_byte;
+ bool hit_edge = false;
+ int bad = 0;
+
+ qtest_memread(who, start_address, &first_byte, 1);
+ last_byte = first_byte;
+
+ for (address = start_address + TEST_MEM_PAGE_SIZE; address < end_address;
+ address += TEST_MEM_PAGE_SIZE)
+ {
+ uint8_t b;
+ qtest_memread(who, address, &b, 1);
+ if (b != last_byte) {
+ if (((b + 1) % 256) == last_byte && !hit_edge) {
+ /*
+ * This is OK, the guest stopped at the point of
+ * incrementing the previous page but didn't get
+ * to us yet.
+ */
+ hit_edge = true;
+ last_byte = b;
+ } else {
+ bad++;
+ if (bad <= 10) {
+ fprintf(stderr, "Memory content inconsistency at %x"
+ " first_byte = %x last_byte = %x current = %x"
+ " hit_edge = %x\n",
+ address, first_byte, last_byte, b, hit_edge);
+ }
+ }
+ }
+ }
+ if (bad >= 10) {
+ fprintf(stderr, "and in another %d pages", bad - 10);
+ }
+ g_assert(bad == 0);
+}
+
+static void cleanup(const char *filename)
+{
+ g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, filename);
+
+ unlink(path);
+}
+
+static QList *migrate_start_get_qmp_capabilities(const MigrateStart *args)
+{
+ QList *capabilities = NULL;
+
+ if (args->oob) {
+ capabilities = qlist_new();
+ qlist_append_str(capabilities, "oob");
+ }
+ return capabilities;
+}
+
+static void migrate_start_set_capabilities(QTestState *from, QTestState *to,
+ MigrateStart *args)
+{
+ /*
+ * MigrationCapability_lookup and MIGRATION_CAPABILITY_ constants
+ * are from qapi-types-migration.h.
+ */
+ for (uint8_t i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
+ if (!args->caps[i]) {
+ continue;
+ }
+ if (from) {
+ migrate_set_capability(from,
+ MigrationCapability_lookup.array[i], true);
+ }
+ if (to) {
+ migrate_set_capability(to,
+ MigrationCapability_lookup.array[i], true);
+ }
+ }
+
+ /*
+ * Always enable migration events. Libvirt always uses it, let's try
+ * to mimic as closer as that.
+ */
+ migrate_set_capability(from, "events", true);
+ if (!args->defer_target_connect) {
+ migrate_set_capability(to, "events", true);
+ }
+
+ /*
+ * Default number of channels should be fine for most
+ * tests. Individual tests can override by calling
+ * migrate_set_parameter() directly.
+ */
+ if (args->caps[MIGRATION_CAPABILITY_MULTIFD]) {
+ migrate_set_parameter_int(from, "multifd-channels",
+ MULTIFD_TEST_CHANNELS);
+ migrate_set_parameter_int(to, "multifd-channels",
+ MULTIFD_TEST_CHANNELS);
+ }
+
+ return;
+}
+
+int migrate_start(QTestState **from, QTestState **to, const char *uri,
+ MigrateStart *args)
+{
+ /* options for source and target */
+ g_autofree gchar *arch_opts = NULL;
+ g_autofree gchar *cmd_source = NULL;
+ g_autofree gchar *cmd_target = NULL;
+ const gchar *ignore_stderr;
+ g_autofree char *shmem_opts = NULL;
+ g_autofree char *shmem_path = NULL;
+ const char *kvm_opts = NULL;
+ const char *arch = qtest_get_arch();
+ const char *memory_size;
+ const char *machine_alias, *machine_opts = "";
+ g_autofree char *machine = NULL;
+ const char *bootpath;
+ g_autoptr(QList) capabilities = migrate_start_get_qmp_capabilities(args);
+ g_autofree char *memory_backend = NULL;
+ const char *events;
+
+ if (args->use_shmem) {
+ if (!g_file_test("/dev/shm", G_FILE_TEST_IS_DIR)) {
+ g_test_skip("/dev/shm is not supported");
+ return -1;
+ }
+ }
+
+ dst_state = (QTestMigrationState) { };
+ src_state = (QTestMigrationState) { };
+ bootpath = bootfile_create(arch, tmpfs, args->suspend_me);
+ src_state.suspend_me = args->suspend_me;
+
+ if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
+ memory_size = "150M";
+
+ if (g_str_equal(arch, "i386")) {
+ machine_alias = "pc";
+ } else {
+ machine_alias = "q35";
+ }
+ arch_opts = g_strdup_printf(
+ "-drive if=none,id=d0,file=%s,format=raw "
+ "-device ide-hd,drive=d0,secs=1,cyls=1,heads=1", bootpath);
+ start_address = X86_TEST_MEM_START;
+ end_address = X86_TEST_MEM_END;
+ } else if (g_str_equal(arch, "s390x")) {
+ memory_size = "128M";
+ machine_alias = "s390-ccw-virtio";
+ arch_opts = g_strdup_printf("-bios %s", bootpath);
+ start_address = S390_TEST_MEM_START;
+ end_address = S390_TEST_MEM_END;
+ } else if (strcmp(arch, "ppc64") == 0) {
+ memory_size = "256M";
+ start_address = PPC_TEST_MEM_START;
+ end_address = PPC_TEST_MEM_END;
+ machine_alias = "pseries";
+ machine_opts = "vsmt=8";
+ arch_opts = g_strdup_printf(
+ "-nodefaults -machine " PSERIES_DEFAULT_CAPABILITIES " "
+ "-bios %s", bootpath);
+ } else if (strcmp(arch, "aarch64") == 0) {
+ memory_size = "150M";
+ machine_alias = "virt";
+ machine_opts = "gic-version=3";
+ arch_opts = g_strdup_printf("-cpu max -kernel %s", bootpath);
+ start_address = ARM_TEST_MEM_START;
+ end_address = ARM_TEST_MEM_END;
+ } else {
+ g_assert_not_reached();
+ }
+
+ if (!getenv("QTEST_LOG") && args->hide_stderr) {
+#ifndef _WIN32
+ ignore_stderr = "2>/dev/null";
+#else
+ /*
+ * On Windows the QEMU executable is created via CreateProcess() and
+ * IO redirection does not work, so don't bother adding IO redirection
+ * to the command line.
+ */
+ ignore_stderr = "";
+#endif
+ } else {
+ ignore_stderr = "";
+ }
+
+ if (args->use_shmem) {
+ shmem_path = g_strdup_printf("/dev/shm/qemu-%d", getpid());
+ shmem_opts = g_strdup_printf(
+ "-object memory-backend-file,id=mem0,size=%s"
+ ",mem-path=%s,share=on -numa node,memdev=mem0",
+ memory_size, shmem_path);
+ }
+
+ if (args->memory_backend) {
+ memory_backend = g_strdup_printf(args->memory_backend, memory_size);
+ } else {
+ memory_backend = g_strdup_printf("-m %s ", memory_size);
+ }
+
+ if (args->use_dirty_ring) {
+ kvm_opts = ",dirty-ring-size=4096";
+ }
+
+ if (!qtest_has_machine(machine_alias)) {
+ g_autofree char *msg = g_strdup_printf("machine %s not supported", machine_alias);
+ g_test_skip(msg);
+ return -1;
+ }
+
+ machine = resolve_machine_version(machine_alias, QEMU_ENV_SRC,
+ QEMU_ENV_DST);
+
+ g_test_message("Using machine type: %s", machine);
+
+ cmd_source = g_strdup_printf("-accel kvm%s -accel tcg "
+ "-machine %s,%s "
+ "-name source,debug-threads=on "
+ "%s "
+ "-serial file:%s/src_serial "
+ "%s %s %s %s",
+ kvm_opts ? kvm_opts : "",
+ machine, machine_opts,
+ memory_backend, tmpfs,
+ arch_opts ? arch_opts : "",
+ shmem_opts ? shmem_opts : "",
+ args->opts_source ? args->opts_source : "",
+ ignore_stderr);
+ if (!args->only_target) {
+ *from = qtest_init_ext(QEMU_ENV_SRC, cmd_source, capabilities, true);
+ qtest_qmp_set_event_callback(*from,
+ migrate_watch_for_events,
+ &src_state);
+ }
+
+ /*
+ * If the monitor connection is deferred, enable events on the command line
+ * so none are missed. This is for testing only, do not set migration
+ * options like this in general.
+ */
+ events = args->defer_target_connect ? "-global migration.x-events=on" : "";
+
+ cmd_target = g_strdup_printf("-accel kvm%s -accel tcg "
+ "-machine %s,%s "
+ "-name target,debug-threads=on "
+ "%s "
+ "-serial file:%s/dest_serial "
+ "-incoming %s "
+ "%s %s %s %s %s",
+ kvm_opts ? kvm_opts : "",
+ machine, machine_opts,
+ memory_backend, tmpfs, uri,
+ events,
+ arch_opts ? arch_opts : "",
+ shmem_opts ? shmem_opts : "",
+ args->opts_target ? args->opts_target : "",
+ ignore_stderr);
+ *to = qtest_init_ext(QEMU_ENV_DST, cmd_target, capabilities,
+ !args->defer_target_connect);
+ qtest_qmp_set_event_callback(*to,
+ migrate_watch_for_events,
+ &dst_state);
+
+ /*
+ * Remove shmem file immediately to avoid memory leak in test failed case.
+ * It's valid because QEMU has already opened this file
+ */
+ if (args->use_shmem) {
+ unlink(shmem_path);
+ }
+
+ migrate_start_set_capabilities(*from, *to, args);
+
+ return 0;
+}
+
+void migrate_end(QTestState *from, QTestState *to, bool test_dest)
+{
+ unsigned char dest_byte_a, dest_byte_b, dest_byte_c, dest_byte_d;
+
+ qtest_quit(from);
+
+ if (test_dest) {
+ qtest_memread(to, start_address, &dest_byte_a, 1);
+
+ /* Destination still running, wait for a byte to change */
+ do {
+ qtest_memread(to, start_address, &dest_byte_b, 1);
+ usleep(1000 * 10);
+ } while (dest_byte_a == dest_byte_b);
+
+ qtest_qmp_assert_success(to, "{ 'execute' : 'stop'}");
+
+ /* With it stopped, check nothing changes */
+ qtest_memread(to, start_address, &dest_byte_c, 1);
+ usleep(1000 * 200);
+ qtest_memread(to, start_address, &dest_byte_d, 1);
+ g_assert_cmpint(dest_byte_c, ==, dest_byte_d);
+
+ check_guests_ram(to);
+ }
+
+ qtest_quit(to);
+
+ cleanup("migsocket");
+ cleanup("cpr.sock");
+ cleanup("src_serial");
+ cleanup("dest_serial");
+ cleanup(FILE_TEST_FILENAME);
+}
+
+static int migrate_postcopy_prepare(QTestState **from_ptr,
+ QTestState **to_ptr,
+ MigrateCommon *args)
+{
+ QTestState *from, *to;
+
+ /* set postcopy capabilities */
+ args->start.caps[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME] = true;
+ args->start.caps[MIGRATION_CAPABILITY_POSTCOPY_RAM] = true;
+
+ if (migrate_start(&from, &to, "defer", &args->start)) {
+ return -1;
+ }
+
+ if (args->start_hook) {
+ args->postcopy_data = args->start_hook(from, to);
+ }
+
+ migrate_ensure_non_converge(from);
+ migrate_prepare_for_dirty_mem(from);
+ qtest_qmp_assert_success(to, "{ 'execute': 'migrate-incoming',"
+ " 'arguments': { "
+ " 'channels': [ { 'channel-type': 'main',"
+ " 'addr': { 'transport': 'socket',"
+ " 'type': 'inet',"
+ " 'host': '127.0.0.1',"
+ " 'port': '0' } } ] } }");
+
+ /* Wait for the first serial output from the source */
+ wait_for_serial("src_serial");
+ wait_for_suspend(from, &src_state);
+
+ migrate_qmp(from, to, NULL, NULL, "{}");
+
+ migrate_wait_for_dirty_mem(from, to);
+
+ *from_ptr = from;
+ *to_ptr = to;
+
+ return 0;
+}
+
+static void migrate_postcopy_complete(QTestState *from, QTestState *to,
+ MigrateCommon *args)
+{
+ MigrationTestEnv *env = migration_get_env();
+
+ wait_for_migration_complete(from);
+
+ if (args->start.suspend_me) {
+ /* wakeup succeeds only if guest is suspended */
+ qtest_qmp_assert_success(to, "{'execute': 'system_wakeup'}");
+ }
+
+ /* Make sure we get at least one "B" on destination */
+ wait_for_serial("dest_serial");
+
+ if (env->uffd_feature_thread_id) {
+ read_blocktime(to);
+ }
+
+ if (args->end_hook) {
+ args->end_hook(from, to, args->postcopy_data);
+ args->postcopy_data = NULL;
+ }
+
+ migrate_end(from, to, true);
+}
+
+void test_postcopy_common(MigrateCommon *args)
+{
+ QTestState *from, *to;
+
+ if (migrate_postcopy_prepare(&from, &to, args)) {
+ return;
+ }
+ migrate_postcopy_start(from, to, &src_state);
+ migrate_postcopy_complete(from, to, args);
+}
+
+static void wait_for_postcopy_status(QTestState *one, const char *status)
+{
+ wait_for_migration_status(one, status,
+ (const char * []) {
+ "failed", "active",
+ "completed", NULL
+ });
+}
+
+static void postcopy_recover_fail(QTestState *from, QTestState *to,
+ PostcopyRecoveryFailStage stage)
+{
+#ifndef _WIN32
+ bool fail_early = (stage == POSTCOPY_FAIL_CHANNEL_ESTABLISH);
+ int ret, pair1[2], pair2[2];
+ char c;
+
+ g_assert(stage > POSTCOPY_FAIL_NONE && stage < POSTCOPY_FAIL_MAX);
+
+ /* Create two unrelated socketpairs */
+ ret = qemu_socketpair(PF_LOCAL, SOCK_STREAM, 0, pair1);
+ g_assert_cmpint(ret, ==, 0);
+
+ ret = qemu_socketpair(PF_LOCAL, SOCK_STREAM, 0, pair2);
+ g_assert_cmpint(ret, ==, 0);
+
+ /*
+ * Give the guests unpaired ends of the sockets, so they'll all blocked
+ * at reading. This mimics a wrong channel established.
+ */
+ qtest_qmp_fds_assert_success(from, &pair1[0], 1,
+ "{ 'execute': 'getfd',"
+ " 'arguments': { 'fdname': 'fd-mig' }}");
+ qtest_qmp_fds_assert_success(to, &pair2[0], 1,
+ "{ 'execute': 'getfd',"
+ " 'arguments': { 'fdname': 'fd-mig' }}");
+
+ /*
+ * Write the 1st byte as QEMU_VM_COMMAND (0x8) for the dest socket, to
+ * emulate the 1st byte of a real recovery, but stops from there to
+ * keep dest QEMU in RECOVER. This is needed so that we can kick off
+ * the recover process on dest QEMU (by triggering the G_IO_IN event).
+ *
+ * NOTE: this trick is not needed on src QEMUs, because src doesn't
+ * rely on an pre-existing G_IO_IN event, so it will always trigger the
+ * upcoming recovery anyway even if it can read nothing.
+ */
+#define QEMU_VM_COMMAND 0x08
+ c = QEMU_VM_COMMAND;
+ ret = send(pair2[1], &c, 1, 0);
+ g_assert_cmpint(ret, ==, 1);
+
+ if (stage == POSTCOPY_FAIL_CHANNEL_ESTABLISH) {
+ /*
+ * This will make src QEMU to fail at an early stage when trying to
+ * resume later, where it shouldn't reach RECOVER stage at all.
+ */
+ close(pair1[1]);
+ }
+
+ migrate_recover(to, "fd:fd-mig");
+ migrate_qmp(from, to, "fd:fd-mig", NULL, "{'resume': true}");
+
+ /*
+ * Source QEMU has an extra RECOVER_SETUP phase, dest doesn't have it.
+ * Make sure it appears along the way.
+ */
+ migration_event_wait(from, "postcopy-recover-setup");
+
+ if (fail_early) {
+ /*
+ * When fails at reconnection, src QEMU will automatically goes
+ * back to PAUSED state. Making sure there is an event in this
+ * case: Libvirt relies on this to detect early reconnection
+ * errors.
+ */
+ migration_event_wait(from, "postcopy-paused");
+ } else {
+ /*
+ * We want to test "fail later" at RECOVER stage here. Make sure
+ * both QEMU instances will go into RECOVER stage first, then test
+ * kicking them out using migrate-pause.
+ *
+ * Explicitly check the RECOVER event on src, that's what Libvirt
+ * relies on, rather than polling.
+ */
+ migration_event_wait(from, "postcopy-recover");
+ wait_for_postcopy_status(from, "postcopy-recover");
+
+ /* Need an explicit kick on src QEMU in this case */
+ migrate_pause(from);
+ }
+
+ /*
+ * For all failure cases, we'll reach such states on both sides now.
+ * Check them.
+ */
+ wait_for_postcopy_status(from, "postcopy-paused");
+ wait_for_postcopy_status(to, "postcopy-recover");
+
+ /*
+ * Kick dest QEMU out too. This is normally not needed in reality
+ * because when the channel is shutdown it should also happen on src.
+ * However here we used separate socket pairs so we need to do that
+ * explicitly.
+ */
+ migrate_pause(to);
+ wait_for_postcopy_status(to, "postcopy-paused");
+
+ close(pair1[0]);
+ close(pair2[0]);
+ close(pair2[1]);
+
+ if (stage != POSTCOPY_FAIL_CHANNEL_ESTABLISH) {
+ close(pair1[1]);
+ }
+#endif
+}
+
+void test_postcopy_recovery_common(MigrateCommon *args)
+{
+ QTestState *from, *to;
+ g_autofree char *uri = NULL;
+
+ /*
+ * Always enable OOB QMP capability for recovery tests, migrate-recover is
+ * executed out-of-band
+ */
+ args->start.oob = true;
+
+ /* Always hide errors for postcopy recover tests since they're expected */
+ args->start.hide_stderr = true;
+
+ if (migrate_postcopy_prepare(&from, &to, args)) {
+ return;
+ }
+
+ /* Turn postcopy speed down, 4K/s is slow enough on any machines */
+ migrate_set_parameter_int(from, "max-postcopy-bandwidth", 4096);
+
+ /* Now we start the postcopy */
+ migrate_postcopy_start(from, to, &src_state);
+
+ /*
+ * Wait until postcopy is really started; we can only run the
+ * migrate-pause command during a postcopy
+ */
+ wait_for_migration_status(from, "postcopy-active", NULL);
+
+ /*
+ * Manually stop the postcopy migration. This emulates a network
+ * failure with the migration socket
+ */
+ migrate_pause(from);
+
+ /*
+ * Wait for destination side to reach postcopy-paused state. The
+ * migrate-recover command can only succeed if destination machine
+ * is in the paused state
+ */
+ wait_for_postcopy_status(to, "postcopy-paused");
+ wait_for_postcopy_status(from, "postcopy-paused");
+
+ if (args->postcopy_recovery_fail_stage) {
+ /*
+ * Test when a wrong socket specified for recover, and then the
+ * ability to kick it out, and continue with a correct socket.
+ */
+ postcopy_recover_fail(from, to, args->postcopy_recovery_fail_stage);
+ /* continue with a good recovery */
+ }
+
+ /*
+ * Create a new socket to emulate a new channel that is different
+ * from the broken migration channel; tell the destination to
+ * listen to the new port
+ */
+ uri = g_strdup_printf("unix:%s/migsocket-recover", tmpfs);
+ migrate_recover(to, uri);
+
+ /*
+ * Try to rebuild the migration channel using the resume flag and
+ * the newly created channel
+ */
+ migrate_qmp(from, to, uri, NULL, "{'resume': true}");
+
+ /* Restore the postcopy bandwidth to unlimited */
+ migrate_set_parameter_int(from, "max-postcopy-bandwidth", 0);
+
+ migrate_postcopy_complete(from, to, args);
+}
+
+void test_precopy_common(MigrateCommon *args)
+{
+ QTestState *from, *to;
+ void *data_hook = NULL;
+ QObject *in_channels = NULL;
+ QObject *out_channels = NULL;
+
+ g_assert(!args->cpr_channel || args->connect_channels);
+
+ if (migrate_start(&from, &to, args->listen_uri, &args->start)) {
+ return;
+ }
+
+ if (args->start_hook) {
+ data_hook = args->start_hook(from, to);
+ }
+
+ /* Wait for the first serial output from the source */
+ if (args->result == MIG_TEST_SUCCEED) {
+ wait_for_serial("src_serial");
+ wait_for_suspend(from, &src_state);
+ }
+
+ if (args->live) {
+ migrate_ensure_non_converge(from);
+ migrate_prepare_for_dirty_mem(from);
+ } else {
+ /*
+ * Testing non-live migration, we allow it to run at
+ * full speed to ensure short test case duration.
+ * For tests expected to fail, we don't need to
+ * change anything.
+ */
+ if (args->result == MIG_TEST_SUCCEED) {
+ qtest_qmp_assert_success(from, "{ 'execute' : 'stop'}");
+ wait_for_stop(from, &src_state);
+ migrate_ensure_converge(from);
+ }
+ }
+
+ /*
+ * The cpr channel must be included in outgoing channels, but not in
+ * migrate-incoming channels.
+ */
+ if (args->connect_channels) {
+ if (args->start.defer_target_connect &&
+ !strcmp(args->listen_uri, "defer")) {
+ in_channels = qobject_from_json(args->connect_channels,
+ &error_abort);
+ }
+ out_channels = qobject_from_json(args->connect_channels, &error_abort);
+
+ if (args->cpr_channel) {
+ QList *channels_list = qobject_to(QList, out_channels);
+ QObject *obj = migrate_str_to_channel(args->cpr_channel);
+
+ qlist_append(channels_list, obj);
+ }
+ }
+
+ if (args->result == MIG_TEST_QMP_ERROR) {
+ migrate_qmp_fail(from, args->connect_uri, out_channels, "{}");
+ goto finish;
+ }
+
+ migrate_qmp(from, to, args->connect_uri, out_channels, "{}");
+
+ if (args->start.defer_target_connect) {
+ qtest_connect(to);
+ qtest_qmp_handshake(to, NULL);
+ if (!strcmp(args->listen_uri, "defer")) {
+ migrate_incoming_qmp(to, args->connect_uri, in_channels, "{}");
+ }
+ }
+
+ if (args->result != MIG_TEST_SUCCEED) {
+ bool allow_active = args->result == MIG_TEST_FAIL;
+ wait_for_migration_fail(from, allow_active);
+
+ if (args->result == MIG_TEST_FAIL_DEST_QUIT_ERR) {
+ qtest_set_expected_status(to, EXIT_FAILURE);
+ }
+ } else {
+ if (args->live) {
+ /*
+ * For initial iteration(s) we must do a full pass,
+ * but for the final iteration, we need only wait
+ * for some dirty mem before switching to converge
+ */
+ while (args->iterations > 1) {
+ wait_for_migration_pass(from, &src_state);
+ args->iterations--;
+ }
+ migrate_wait_for_dirty_mem(from, to);
+
+ migrate_ensure_converge(from);
+
+ /*
+ * We do this first, as it has a timeout to stop us
+ * hanging forever if migration didn't converge
+ */
+ wait_for_migration_complete(from);
+
+ wait_for_stop(from, &src_state);
+
+ } else {
+ wait_for_migration_complete(from);
+ /*
+ * Must wait for dst to finish reading all incoming
+ * data on the socket before issuing 'cont' otherwise
+ * it'll be ignored
+ */
+ wait_for_migration_complete(to);
+
+ qtest_qmp_assert_success(to, "{ 'execute' : 'cont'}");
+ }
+
+ wait_for_resume(to, &dst_state);
+
+ if (args->start.suspend_me) {
+ /* wakeup succeeds only if guest is suspended */
+ qtest_qmp_assert_success(to, "{'execute': 'system_wakeup'}");
+ }
+
+ wait_for_serial("dest_serial");
+ }
+
+finish:
+ if (args->end_hook) {
+ args->end_hook(from, to, data_hook);
+ }
+
+ migrate_end(from, to, args->result == MIG_TEST_SUCCEED);
+}
+
+static void file_dirty_offset_region(void)
+{
+ g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
+ size_t size = FILE_TEST_OFFSET;
+ g_autofree char *data = g_new0(char, size);
+
+ memset(data, FILE_TEST_MARKER, size);
+ g_assert(g_file_set_contents(path, data, size, NULL));
+}
+
+static void file_check_offset_region(void)
+{
+ g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
+ size_t size = FILE_TEST_OFFSET;
+ g_autofree char *expected = g_new0(char, size);
+ g_autofree char *actual = NULL;
+ uint64_t *stream_start;
+
+ /*
+ * Ensure the skipped offset region's data has not been touched
+ * and the migration stream starts at the right place.
+ */
+
+ memset(expected, FILE_TEST_MARKER, size);
+
+ g_assert(g_file_get_contents(path, &actual, NULL, NULL));
+ g_assert(!memcmp(actual, expected, size));
+
+ stream_start = (uint64_t *)(actual + size);
+ g_assert_cmpint(cpu_to_be64(*stream_start) >> 32, ==, QEMU_VM_FILE_MAGIC);
+}
+
+void test_file_common(MigrateCommon *args, bool stop_src)
+{
+ QTestState *from, *to;
+ void *data_hook = NULL;
+ bool check_offset = false;
+
+ if (migrate_start(&from, &to, args->listen_uri, &args->start)) {
+ return;
+ }
+
+ /*
+ * File migration is never live. We can keep the source VM running
+ * during migration, but the destination will not be running
+ * concurrently.
+ */
+ g_assert_false(args->live);
+
+ if (g_strrstr(args->connect_uri, "offset=")) {
+ check_offset = true;
+ /*
+ * This comes before the start_hook because it's equivalent to
+ * a management application creating the file and writing to
+ * it so hooks should expect the file to be already present.
+ */
+ file_dirty_offset_region();
+ }
+
+ if (args->start_hook) {
+ data_hook = args->start_hook(from, to);
+ }
+
+ migrate_ensure_converge(from);
+ wait_for_serial("src_serial");
+
+ if (stop_src) {
+ qtest_qmp_assert_success(from, "{ 'execute' : 'stop'}");
+ wait_for_stop(from, &src_state);
+ }
+
+ if (args->result == MIG_TEST_QMP_ERROR) {
+ migrate_qmp_fail(from, args->connect_uri, NULL, "{}");
+ goto finish;
+ }
+
+ migrate_qmp(from, to, args->connect_uri, NULL, "{}");
+ wait_for_migration_complete(from);
+
+ /*
+ * We need to wait for the source to finish before starting the
+ * destination.
+ */
+ migrate_incoming_qmp(to, args->connect_uri, NULL, "{}");
+ wait_for_migration_complete(to);
+
+ if (stop_src) {
+ qtest_qmp_assert_success(to, "{ 'execute' : 'cont'}");
+ }
+ wait_for_resume(to, &dst_state);
+
+ wait_for_serial("dest_serial");
+
+ if (check_offset) {
+ file_check_offset_region();
+ }
+
+finish:
+ if (args->end_hook) {
+ args->end_hook(from, to, data_hook);
+ }
+
+ migrate_end(from, to, args->result == MIG_TEST_SUCCEED);
+}
+
+void *migrate_hook_start_precopy_tcp_multifd_common(QTestState *from,
+ QTestState *to,
+ const char *method)
+{
+ migrate_set_parameter_str(from, "multifd-compression", method);
+ migrate_set_parameter_str(to, "multifd-compression", method);
+
+ /* Start incoming migration from the 1st socket */
+ migrate_incoming_qmp(to, "tcp:127.0.0.1:0", NULL, "{}");
+
+ return NULL;
+}
+
+QTestMigrationState *get_src(void)
+{
+ return &src_state;
+}
+
+MigrationTestEnv *migration_get_env(void)
+{
+ static MigrationTestEnv *env;
+ g_autoptr(GError) err = NULL;
+
+ if (env) {
+ return env;
+ }
+
+ env = g_new0(MigrationTestEnv, 1);
+ env->qemu_src = getenv(QEMU_ENV_SRC);
+ env->qemu_dst = getenv(QEMU_ENV_DST);
+
+ /*
+ * The default QTEST_QEMU_BINARY must always be provided because
+ * that is what helpers use to query the accel type and
+ * architecture.
+ */
+ if (env->qemu_src && env->qemu_dst) {
+ g_test_message("Only one of %s, %s is allowed",
+ QEMU_ENV_SRC, QEMU_ENV_DST);
+ exit(1);
+ }
+
+ env->has_kvm = qtest_has_accel("kvm");
+ env->has_tcg = qtest_has_accel("tcg");
+
+ if (!env->has_tcg && !env->has_kvm) {
+ g_test_skip("No KVM or TCG accelerator available");
+ return env;
+ }
+
+ env->has_dirty_ring = kvm_dirty_ring_supported();
+ env->has_uffd = ufd_version_check(&env->uffd_feature_thread_id);
+ env->arch = qtest_get_arch();
+ env->is_x86 = !strcmp(env->arch, "i386") || !strcmp(env->arch, "x86_64");
+
+ env->tmpfs = g_dir_make_tmp("migration-test-XXXXXX", &err);
+ if (!env->tmpfs) {
+ g_test_message("Can't create temporary directory in %s: %s",
+ g_get_tmp_dir(), err->message);
+ }
+ g_assert(env->tmpfs);
+
+ tmpfs = env->tmpfs;
+
+ return env;
+}
+
+int migration_env_clean(MigrationTestEnv *env)
+{
+ char *tmpfs;
+ int ret = 0;
+
+ if (!env) {
+ return ret;
+ }
+
+ bootfile_delete();
+
+ tmpfs = env->tmpfs;
+ ret = rmdir(tmpfs);
+ if (ret != 0) {
+ g_test_message("unable to rmdir: path (%s): %s",
+ tmpfs, strerror(errno));
+ }
+ g_free(tmpfs);
+
+ return ret;
+}
diff --git a/tests/qtest/migration/framework.h b/tests/qtest/migration/framework.h
new file mode 100644
index 0000000..01e425e
--- /dev/null
+++ b/tests/qtest/migration/framework.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
+ * based on the vhost-user-test.c that is:
+ * Copyright (c) 2014 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef TEST_FRAMEWORK_H
+#define TEST_FRAMEWORK_H
+
+#include "libqtest.h"
+#include <qapi/qapi-types-migration.h>
+
+#define FILE_TEST_FILENAME "migfile"
+#define FILE_TEST_OFFSET 0x1000
+#define FILE_TEST_MARKER 'X'
+
+typedef struct MigrationTestEnv {
+ bool has_kvm;
+ bool has_tcg;
+ bool has_uffd;
+ bool uffd_feature_thread_id;
+ bool has_dirty_ring;
+ bool is_x86;
+ bool full_set;
+ const char *arch;
+ const char *qemu_src;
+ const char *qemu_dst;
+ char *tmpfs;
+} MigrationTestEnv;
+
+MigrationTestEnv *migration_get_env(void);
+int migration_env_clean(MigrationTestEnv *env);
+
+/*
+ * A hook that runs after the src and dst QEMUs have been
+ * created, but before the migration is started. This can
+ * be used to set migration parameters and capabilities.
+ *
+ * Returns: NULL, or a pointer to opaque state to be
+ * later passed to the TestMigrateEndHook
+ */
+typedef void * (*TestMigrateStartHook)(QTestState *from,
+ QTestState *to);
+
+/*
+ * A hook that runs after the migration has finished,
+ * regardless of whether it succeeded or failed, but
+ * before QEMU has terminated (unless it self-terminated
+ * due to migration error)
+ *
+ * @opaque is a pointer to state previously returned
+ * by the TestMigrateStartHook if any, or NULL.
+ */
+typedef void (*TestMigrateEndHook)(QTestState *from,
+ QTestState *to,
+ void *opaque);
+
+/*
+ * Our goal is to ensure that we run a single full migration
+ * iteration, and also dirty memory, ensuring that at least
+ * one further iteration is required.
+ *
+ * We can't directly synchronize with the start of a migration
+ * so we have to apply some tricks monitoring memory that is
+ * transferred.
+ *
+ * Initially we set the migration bandwidth to an insanely
+ * low value, with tiny max downtime too. This basically
+ * guarantees migration will never complete.
+ *
+ * This will result in a test that is unacceptably slow though,
+ * so we can't let the entire migration pass run at this speed.
+ * Our intent is to let it run just long enough that we can
+ * prove data prior to the marker has been transferred *AND*
+ * also prove this transferred data is dirty again.
+ *
+ * Before migration starts, we write a 64-bit magic marker
+ * into a fixed location in the src VM RAM.
+ *
+ * Then watch dst memory until the marker appears. This is
+ * proof that start_address -> MAGIC_OFFSET_BASE has been
+ * transferred.
+ *
+ * Finally we go back to the source and read a byte just
+ * before the marker until we see it flip in value. This
+ * is proof that start_address -> MAGIC_OFFSET_BASE
+ * is now dirty again.
+ *
+ * IOW, we're guaranteed at least a 2nd migration pass
+ * at this point.
+ *
+ * We can now let migration run at full speed to finish
+ * the test
+ */
+typedef struct {
+ /*
+ * QTEST_LOG=1 may override this. When QTEST_LOG=1, we always dump errors
+ * unconditionally, because it means the user would like to be verbose.
+ */
+ bool hide_stderr;
+ bool use_shmem;
+ /* only launch the target process */
+ bool only_target;
+ /* Use dirty ring if true; dirty logging otherwise */
+ bool use_dirty_ring;
+ const char *opts_source;
+ const char *opts_target;
+ /* suspend the src before migrating to dest. */
+ bool suspend_me;
+ /* enable OOB QMP capability */
+ bool oob;
+ /*
+ * Format string for the main memory backend, containing one %s where the
+ * size is plugged in. If omitted, "-m %s" is used.
+ */
+ const char *memory_backend;
+
+ /* Do not connect to target monitor and qtest sockets in qtest_init */
+ bool defer_target_connect;
+
+ /*
+ * Migration capabilities to be set in both source and
+ * destination. For unilateral capabilities, use
+ * migration_set_capabilities().
+ */
+ bool caps[MIGRATION_CAPABILITY__MAX];
+} MigrateStart;
+
+typedef enum PostcopyRecoveryFailStage {
+ /*
+ * "no failure" must be 0 as it's the default. OTOH, real failure
+ * cases must be >0 to make sure they trigger by a "if" test.
+ */
+ POSTCOPY_FAIL_NONE = 0,
+ POSTCOPY_FAIL_CHANNEL_ESTABLISH,
+ POSTCOPY_FAIL_RECOVERY,
+ POSTCOPY_FAIL_MAX
+} PostcopyRecoveryFailStage;
+
+typedef struct {
+ /* Optional: fine tune start parameters */
+ MigrateStart start;
+
+ /* Required: the URI for the dst QEMU to listen on */
+ const char *listen_uri;
+
+ /*
+ * Optional: the URI for the src QEMU to connect to
+ * If NULL, then it will query the dst QEMU for its actual
+ * listening address and use that as the connect address.
+ * This allows for dynamically picking a free TCP port.
+ */
+ const char *connect_uri;
+
+ /*
+ * Optional: JSON-formatted list of src QEMU URIs. If a port is
+ * defined as '0' in any QDict key a value of '0' will be
+ * automatically converted to the correct destination port.
+ */
+ const char *connect_channels;
+
+ /* Optional: the cpr migration channel, in JSON or dotted keys format */
+ const char *cpr_channel;
+
+ /* Optional: callback to run at start to set migration parameters */
+ TestMigrateStartHook start_hook;
+ /* Optional: callback to run at finish to cleanup */
+ TestMigrateEndHook end_hook;
+
+ /*
+ * Optional: normally we expect the migration process to complete.
+ *
+ * There can be a variety of reasons and stages in which failure
+ * can happen during tests.
+ *
+ * If a failure is expected to happen at time of establishing
+ * the connection, then MIG_TEST_FAIL will indicate that the dst
+ * QEMU is expected to stay running and accept future migration
+ * connections.
+ *
+ * If a failure is expected to happen while processing the
+ * migration stream, then MIG_TEST_FAIL_DEST_QUIT_ERR will indicate
+ * that the dst QEMU is expected to quit with non-zero exit status
+ */
+ enum {
+ /* This test should succeed, the default */
+ MIG_TEST_SUCCEED = 0,
+ /* This test should fail, dest qemu should keep alive */
+ MIG_TEST_FAIL,
+ /* This test should fail, dest qemu should fail with abnormal status */
+ MIG_TEST_FAIL_DEST_QUIT_ERR,
+ /* The QMP command for this migration should fail with an error */
+ MIG_TEST_QMP_ERROR,
+ } result;
+
+ /*
+ * Optional: set number of migration passes to wait for, if live==true.
+ * If zero, then merely wait for a few MB of dirty data
+ */
+ unsigned int iterations;
+
+ /*
+ * Optional: whether the guest CPUs should be running during a precopy
+ * migration test. We used to always run with live but it took much
+ * longer so we reduced live tests to only the ones that have solid
+ * reason to be tested live-only. For each of the new test cases for
+ * precopy please provide justifications to use live explicitly (please
+ * refer to existing ones with live=true), or use live=off by default.
+ */
+ bool live;
+
+ /* Postcopy specific fields */
+ void *postcopy_data;
+ PostcopyRecoveryFailStage postcopy_recovery_fail_stage;
+} MigrateCommon;
+
+void wait_for_serial(const char *side);
+void migrate_prepare_for_dirty_mem(QTestState *from);
+void migrate_wait_for_dirty_mem(QTestState *from, QTestState *to);
+int migrate_start(QTestState **from, QTestState **to, const char *uri,
+ MigrateStart *args);
+void migrate_end(QTestState *from, QTestState *to, bool test_dest);
+
+void test_postcopy_common(MigrateCommon *args);
+void test_postcopy_recovery_common(MigrateCommon *args);
+void test_precopy_common(MigrateCommon *args);
+void test_file_common(MigrateCommon *args, bool stop_src);
+void *migrate_hook_start_precopy_tcp_multifd_common(QTestState *from,
+ QTestState *to,
+ const char *method);
+
+typedef struct QTestMigrationState QTestMigrationState;
+QTestMigrationState *get_src(void);
+
+#ifdef CONFIG_GNUTLS
+void migration_test_add_tls(MigrationTestEnv *env);
+#else
+static inline void migration_test_add_tls(MigrationTestEnv *env) {};
+#endif
+void migration_test_add_compression(MigrationTestEnv *env);
+void migration_test_add_postcopy(MigrationTestEnv *env);
+void migration_test_add_file(MigrationTestEnv *env);
+void migration_test_add_precopy(MigrationTestEnv *env);
+void migration_test_add_cpr(MigrationTestEnv *env);
+void migration_test_add_misc(MigrationTestEnv *env);
+
+#endif /* TEST_FRAMEWORK_H */
diff --git a/tests/migration/i386/Makefile b/tests/qtest/migration/i386/Makefile
index 37a72ae..37a72ae 100644
--- a/tests/migration/i386/Makefile
+++ b/tests/qtest/migration/i386/Makefile
diff --git a/tests/migration/i386/a-b-bootblock.S b/tests/qtest/migration/i386/a-b-bootblock.S
index 6f39eb6..6f39eb6 100644
--- a/tests/migration/i386/a-b-bootblock.S
+++ b/tests/qtest/migration/i386/a-b-bootblock.S
diff --git a/tests/migration/i386/a-b-bootblock.h b/tests/qtest/migration/i386/a-b-bootblock.h
index c83f871..c83f871 100644
--- a/tests/migration/i386/a-b-bootblock.h
+++ b/tests/qtest/migration/i386/a-b-bootblock.h
diff --git a/tests/qtest/migration/migration-qmp.c b/tests/qtest/migration/migration-qmp.c
new file mode 100644
index 0000000..fb59741
--- /dev/null
+++ b/tests/qtest/migration/migration-qmp.c
@@ -0,0 +1,520 @@
+/*
+ * QTest QMP helpers for migration
+ *
+ * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
+ * based on the vhost-user-test.c that is:
+ * Copyright (c) 2014 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest.h"
+#include "migration-qmp.h"
+#include "migration-util.h"
+#include "qapi/error.h"
+#include "qapi/qapi-types-migration.h"
+#include "qapi/qapi-visit-migration.h"
+#include "qobject/qdict.h"
+#include "qobject/qjson.h"
+#include "qobject/qlist.h"
+#include "qapi/qobject-input-visitor.h"
+#include "qapi/qobject-output-visitor.h"
+
+/*
+ * Number of seconds we wait when looking for migration
+ * status changes, to avoid test suite hanging forever
+ * when things go wrong. Needs to be higher enough to
+ * avoid false positives on loaded hosts.
+ */
+#define MIGRATION_STATUS_WAIT_TIMEOUT 120
+
+/*
+ * Wait for a "MIGRATION" event. This is what Libvirt uses to track
+ * migration status changes.
+ */
+void migration_event_wait(QTestState *s, const char *target)
+{
+ QDict *response, *data;
+ const char *status;
+ bool found;
+
+ do {
+ response = qtest_qmp_eventwait_ref(s, "MIGRATION");
+ data = qdict_get_qdict(response, "data");
+ g_assert(data);
+ status = qdict_get_str(data, "status");
+ found = (strcmp(status, target) == 0);
+ qobject_unref(response);
+ } while (!found);
+}
+
+/*
+ * Convert a string representing a single channel to an object.
+ * @str may be in JSON or dotted keys format.
+ */
+QObject *migrate_str_to_channel(const char *str)
+{
+ Visitor *v;
+ MigrationChannel *channel;
+ QObject *obj;
+
+ /* Create the channel */
+ v = qobject_input_visitor_new_str(str, "channel-type", &error_abort);
+ visit_type_MigrationChannel(v, NULL, &channel, &error_abort);
+ visit_free(v);
+
+ /* Create the object */
+ v = qobject_output_visitor_new(&obj);
+ visit_type_MigrationChannel(v, NULL, &channel, &error_abort);
+ visit_complete(v, &obj);
+ visit_free(v);
+
+ qapi_free_MigrationChannel(channel);
+ return obj;
+}
+
+void migrate_qmp_fail(QTestState *who, const char *uri,
+ QObject *channels, const char *fmt, ...)
+{
+ va_list ap;
+ QDict *args, *err;
+
+ va_start(ap, fmt);
+ args = qdict_from_vjsonf_nofail(fmt, ap);
+ va_end(ap);
+
+ g_assert(!qdict_haskey(args, "uri"));
+ if (uri) {
+ qdict_put_str(args, "uri", uri);
+ }
+
+ g_assert(!qdict_haskey(args, "channels"));
+ if (channels) {
+ qdict_put_obj(args, "channels", channels);
+ }
+
+ err = qtest_qmp_assert_failure_ref(
+ who, "{ 'execute': 'migrate', 'arguments': %p}", args);
+
+ g_assert(qdict_haskey(err, "desc"));
+
+ qobject_unref(err);
+}
+
+/*
+ * Send QMP command "migrate".
+ * Arguments are built from @fmt... (formatted like
+ * qobject_from_jsonf_nofail()) with "uri": @uri spliced in.
+ */
+void migrate_qmp(QTestState *who, QTestState *to, const char *uri,
+ QObject *channels, const char *fmt, ...)
+{
+ va_list ap;
+ QDict *args;
+ g_autofree char *connect_uri = NULL;
+
+ va_start(ap, fmt);
+ args = qdict_from_vjsonf_nofail(fmt, ap);
+ va_end(ap);
+
+ g_assert(!qdict_haskey(args, "uri"));
+ if (uri) {
+ qdict_put_str(args, "uri", uri);
+ } else if (!channels) {
+ connect_uri = migrate_get_connect_uri(to);
+ qdict_put_str(args, "uri", connect_uri);
+ }
+
+ g_assert(!qdict_haskey(args, "channels"));
+ if (channels) {
+ QList *channel_list = qobject_to(QList, channels);
+ migrate_set_ports(to, channel_list);
+ qdict_put_obj(args, "channels", channels);
+ }
+
+ qtest_qmp_assert_success(who,
+ "{ 'execute': 'migrate', 'arguments': %p}", args);
+}
+
+void migrate_set_capability(QTestState *who, const char *capability,
+ bool value)
+{
+ qtest_qmp_assert_success(who,
+ "{ 'execute': 'migrate-set-capabilities',"
+ "'arguments': { "
+ "'capabilities': [ { "
+ "'capability': %s, 'state': %i } ] } }",
+ capability, value);
+}
+
+void migrate_incoming_qmp(QTestState *to, const char *uri, QObject *channels,
+ const char *fmt, ...)
+{
+ va_list ap;
+ QDict *args, *rsp;
+
+ va_start(ap, fmt);
+ args = qdict_from_vjsonf_nofail(fmt, ap);
+ va_end(ap);
+
+ g_assert(!qdict_haskey(args, "uri"));
+ if (uri) {
+ qdict_put_str(args, "uri", uri);
+ }
+
+ g_assert(!qdict_haskey(args, "channels"));
+ if (channels) {
+ qdict_put_obj(args, "channels", channels);
+ }
+
+ /* This function relies on the event to work, make sure it's enabled */
+ migrate_set_capability(to, "events", true);
+
+ rsp = qtest_qmp(to, "{ 'execute': 'migrate-incoming', 'arguments': %p}",
+ args);
+
+ if (!qdict_haskey(rsp, "return")) {
+ g_autoptr(GString) s = qobject_to_json_pretty(QOBJECT(rsp), true);
+ g_test_message("%s", s->str);
+ }
+
+ g_assert(qdict_haskey(rsp, "return"));
+ qobject_unref(rsp);
+
+ migration_event_wait(to, "setup");
+}
+
+static bool check_migration_status(QTestState *who, const char *goal,
+ const char **ungoals)
+{
+ bool ready;
+ char *current_status;
+ const char **ungoal;
+
+ current_status = migrate_query_status(who);
+ ready = strcmp(current_status, goal) == 0;
+ if (!ungoals) {
+ g_assert_cmpstr(current_status, !=, "failed");
+ /*
+ * If looking for a state other than completed,
+ * completion of migration would cause the test to
+ * hang.
+ */
+ if (strcmp(goal, "completed") != 0) {
+ g_assert_cmpstr(current_status, !=, "completed");
+ }
+ } else {
+ for (ungoal = ungoals; *ungoal; ungoal++) {
+ g_assert_cmpstr(current_status, !=, *ungoal);
+ }
+ }
+ g_free(current_status);
+ return ready;
+}
+
+void wait_for_migration_status(QTestState *who,
+ const char *goal, const char **ungoals)
+{
+ g_test_timer_start();
+ while (!check_migration_status(who, goal, ungoals)) {
+ usleep(1000);
+
+ g_assert(g_test_timer_elapsed() < MIGRATION_STATUS_WAIT_TIMEOUT);
+ }
+}
+
+void wait_for_migration_complete(QTestState *who)
+{
+ wait_for_migration_status(who, "completed", NULL);
+}
+
+void wait_for_migration_fail(QTestState *from, bool allow_active)
+{
+ g_test_timer_start();
+ QDict *rsp_return;
+ char *status;
+ bool failed;
+
+ do {
+ status = migrate_query_status(from);
+ bool result = !strcmp(status, "setup") || !strcmp(status, "failed") ||
+ (allow_active && !strcmp(status, "active"));
+ if (!result) {
+ fprintf(stderr, "%s: unexpected status status=%s allow_active=%d\n",
+ __func__, status, allow_active);
+ }
+ g_assert(result);
+ failed = !strcmp(status, "failed");
+ g_free(status);
+
+ g_assert(g_test_timer_elapsed() < MIGRATION_STATUS_WAIT_TIMEOUT);
+ } while (!failed);
+
+ /* Is the machine currently running? */
+ rsp_return = qtest_qmp_assert_success_ref(from,
+ "{ 'execute': 'query-status' }");
+ g_assert(qdict_haskey(rsp_return, "running"));
+ g_assert(qdict_get_bool(rsp_return, "running"));
+ qobject_unref(rsp_return);
+}
+
+void wait_for_stop(QTestState *who, QTestMigrationState *state)
+{
+ if (!state->stop_seen) {
+ qtest_qmp_eventwait(who, "STOP");
+ }
+}
+
+void wait_for_resume(QTestState *who, QTestMigrationState *state)
+{
+ if (!state->resume_seen) {
+ qtest_qmp_eventwait(who, "RESUME");
+ }
+}
+
+void wait_for_suspend(QTestState *who, QTestMigrationState *state)
+{
+ if (state->suspend_me && !state->suspend_seen) {
+ qtest_qmp_eventwait(who, "SUSPEND");
+ }
+}
+
+/*
+ * Note: caller is responsible to free the returned object via
+ * qobject_unref() after use
+ */
+QDict *migrate_query(QTestState *who)
+{
+ return qtest_qmp_assert_success_ref(who, "{ 'execute': 'query-migrate' }");
+}
+
+QDict *migrate_query_not_failed(QTestState *who)
+{
+ const char *status;
+ QDict *rsp = migrate_query(who);
+ status = qdict_get_str(rsp, "status");
+ if (g_str_equal(status, "failed")) {
+ g_printerr("query-migrate shows failed migration: %s\n",
+ qdict_get_str(rsp, "error-desc"));
+ }
+ g_assert(!g_str_equal(status, "failed"));
+ return rsp;
+}
+
+/*
+ * Note: caller is responsible to free the returned object via
+ * g_free() after use
+ */
+gchar *migrate_query_status(QTestState *who)
+{
+ QDict *rsp_return = migrate_query(who);
+ gchar *status = g_strdup(qdict_get_str(rsp_return, "status"));
+
+ g_assert(status);
+ qobject_unref(rsp_return);
+
+ return status;
+}
+
+int64_t read_ram_property_int(QTestState *who, const char *property)
+{
+ QDict *rsp_return, *rsp_ram;
+ int64_t result;
+
+ rsp_return = migrate_query_not_failed(who);
+ if (!qdict_haskey(rsp_return, "ram")) {
+ /* Still in setup */
+ result = 0;
+ } else {
+ rsp_ram = qdict_get_qdict(rsp_return, "ram");
+ result = qdict_get_try_int(rsp_ram, property, 0);
+ }
+ qobject_unref(rsp_return);
+ return result;
+}
+
+int64_t read_migrate_property_int(QTestState *who, const char *property)
+{
+ QDict *rsp_return;
+ int64_t result;
+
+ rsp_return = migrate_query_not_failed(who);
+ result = qdict_get_try_int(rsp_return, property, 0);
+ qobject_unref(rsp_return);
+ return result;
+}
+
+uint64_t get_migration_pass(QTestState *who)
+{
+ return read_ram_property_int(who, "dirty-sync-count");
+}
+
+void read_blocktime(QTestState *who)
+{
+ QDict *rsp_return;
+
+ rsp_return = migrate_query_not_failed(who);
+ g_assert(qdict_haskey(rsp_return, "postcopy-blocktime"));
+ qobject_unref(rsp_return);
+}
+
+/*
+ * Wait for two changes in the migration pass count, but bail if we stop.
+ */
+void wait_for_migration_pass(QTestState *who, QTestMigrationState *src_state)
+{
+ uint64_t pass, prev_pass = 0, changes = 0;
+
+ while (changes < 2 && !src_state->stop_seen && !src_state->suspend_seen) {
+ usleep(1000);
+ pass = get_migration_pass(who);
+ changes += (pass != prev_pass);
+ prev_pass = pass;
+ }
+}
+
+static long long migrate_get_parameter_int(QTestState *who,
+ const char *parameter)
+{
+ QDict *rsp;
+ long long result;
+
+ rsp = qtest_qmp_assert_success_ref(
+ who, "{ 'execute': 'query-migrate-parameters' }");
+ result = qdict_get_int(rsp, parameter);
+ qobject_unref(rsp);
+ return result;
+}
+
+static void migrate_check_parameter_int(QTestState *who, const char *parameter,
+ long long value)
+{
+ long long result;
+
+ result = migrate_get_parameter_int(who, parameter);
+ g_assert_cmpint(result, ==, value);
+}
+
+void migrate_set_parameter_int(QTestState *who, const char *parameter,
+ long long value)
+{
+ qtest_qmp_assert_success(who,
+ "{ 'execute': 'migrate-set-parameters',"
+ "'arguments': { %s: %lld } }",
+ parameter, value);
+ migrate_check_parameter_int(who, parameter, value);
+}
+
+static char *migrate_get_parameter_str(QTestState *who, const char *parameter)
+{
+ QDict *rsp;
+ char *result;
+
+ rsp = qtest_qmp_assert_success_ref(
+ who, "{ 'execute': 'query-migrate-parameters' }");
+ result = g_strdup(qdict_get_str(rsp, parameter));
+ qobject_unref(rsp);
+ return result;
+}
+
+static void migrate_check_parameter_str(QTestState *who, const char *parameter,
+ const char *value)
+{
+ g_autofree char *result = migrate_get_parameter_str(who, parameter);
+ g_assert_cmpstr(result, ==, value);
+}
+
+void migrate_set_parameter_str(QTestState *who, const char *parameter,
+ const char *value)
+{
+ qtest_qmp_assert_success(who,
+ "{ 'execute': 'migrate-set-parameters',"
+ "'arguments': { %s: %s } }",
+ parameter, value);
+ migrate_check_parameter_str(who, parameter, value);
+}
+
+static long long migrate_get_parameter_bool(QTestState *who,
+ const char *parameter)
+{
+ QDict *rsp;
+ int result;
+
+ rsp = qtest_qmp_assert_success_ref(
+ who, "{ 'execute': 'query-migrate-parameters' }");
+ result = qdict_get_bool(rsp, parameter);
+ qobject_unref(rsp);
+ return !!result;
+}
+
+static void migrate_check_parameter_bool(QTestState *who, const char *parameter,
+ int value)
+{
+ int result;
+
+ result = migrate_get_parameter_bool(who, parameter);
+ g_assert_cmpint(result, ==, value);
+}
+
+void migrate_set_parameter_bool(QTestState *who, const char *parameter,
+ int value)
+{
+ qtest_qmp_assert_success(who,
+ "{ 'execute': 'migrate-set-parameters',"
+ "'arguments': { %s: %i } }",
+ parameter, value);
+ migrate_check_parameter_bool(who, parameter, value);
+}
+
+void migrate_ensure_non_converge(QTestState *who)
+{
+ /* Can't converge with 1ms downtime + 3 mbs bandwidth limit */
+ migrate_set_parameter_int(who, "max-bandwidth", 3 * 1000 * 1000);
+ migrate_set_parameter_int(who, "downtime-limit", 1);
+}
+
+void migrate_ensure_converge(QTestState *who)
+{
+ /* Should converge with 30s downtime + 1 gbs bandwidth limit */
+ migrate_set_parameter_int(who, "max-bandwidth", 1 * 1000 * 1000 * 1000);
+ migrate_set_parameter_int(who, "downtime-limit", 30 * 1000);
+}
+
+void migrate_pause(QTestState *who)
+{
+ qtest_qmp_assert_success(who, "{ 'execute': 'migrate-pause' }");
+}
+
+void migrate_continue(QTestState *who, const char *state)
+{
+ qtest_qmp_assert_success(who,
+ "{ 'execute': 'migrate-continue',"
+ " 'arguments': { 'state': %s } }",
+ state);
+}
+
+void migrate_recover(QTestState *who, const char *uri)
+{
+ qtest_qmp_assert_success(who,
+ "{ 'exec-oob': 'migrate-recover', "
+ " 'id': 'recover-cmd', "
+ " 'arguments': { 'uri': %s } }",
+ uri);
+}
+
+void migrate_cancel(QTestState *who)
+{
+ qtest_qmp_assert_success(who, "{ 'execute': 'migrate_cancel' }");
+}
+
+void migrate_postcopy_start(QTestState *from, QTestState *to,
+ QTestMigrationState *src_state)
+{
+ qtest_qmp_assert_success(from, "{ 'execute': 'migrate-start-postcopy' }");
+
+ wait_for_stop(from, src_state);
+ qtest_qmp_eventwait(to, "RESUME");
+}
diff --git a/tests/qtest/migration/migration-qmp.h b/tests/qtest/migration/migration-qmp.h
new file mode 100644
index 0000000..faa8181
--- /dev/null
+++ b/tests/qtest/migration/migration-qmp.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef MIGRATION_QMP_H
+#define MIGRATION_QMP_H
+
+#include "migration-util.h"
+
+QObject *migrate_str_to_channel(const char *str);
+
+G_GNUC_PRINTF(4, 5)
+void migrate_qmp_fail(QTestState *who, const char *uri,
+ QObject *channels, const char *fmt, ...);
+
+G_GNUC_PRINTF(5, 6)
+void migrate_qmp(QTestState *who, QTestState *to, const char *uri,
+ QObject *channels, const char *fmt, ...);
+
+G_GNUC_PRINTF(4, 5)
+void migrate_incoming_qmp(QTestState *who, const char *uri,
+ QObject *channels, const char *fmt, ...);
+
+void migration_event_wait(QTestState *s, const char *target);
+void migrate_set_capability(QTestState *who, const char *capability,
+ bool value);
+int64_t read_ram_property_int(QTestState *who, const char *property);
+void migrate_set_parameter_int(QTestState *who, const char *parameter,
+ long long value);
+void wait_for_stop(QTestState *who, QTestMigrationState *state);
+void wait_for_resume(QTestState *who, QTestMigrationState *state);
+void wait_for_suspend(QTestState *who, QTestMigrationState *state);
+gchar *migrate_query_status(QTestState *who);
+int64_t read_migrate_property_int(QTestState *who, const char *property);
+uint64_t get_migration_pass(QTestState *who);
+void read_blocktime(QTestState *who);
+void wait_for_migration_pass(QTestState *who, QTestMigrationState *src_state);
+void migrate_set_parameter_str(QTestState *who, const char *parameter,
+ const char *value);
+void migrate_set_parameter_bool(QTestState *who, const char *parameter,
+ int value);
+void migrate_ensure_non_converge(QTestState *who);
+void migrate_ensure_converge(QTestState *who);
+void migrate_pause(QTestState *who);
+void migrate_continue(QTestState *who, const char *state);
+void migrate_recover(QTestState *who, const char *uri);
+void migrate_cancel(QTestState *who);
+void migrate_postcopy_start(QTestState *from, QTestState *to,
+ QTestMigrationState *src_state);
+
+#endif /* MIGRATION_QMP_H */
diff --git a/tests/qtest/migration/migration-util.c b/tests/qtest/migration/migration-util.c
new file mode 100644
index 0000000..642cf50
--- /dev/null
+++ b/tests/qtest/migration/migration-util.c
@@ -0,0 +1,398 @@
+/*
+ * QTest migration utilities
+ *
+ * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
+ * based on the vhost-user-test.c that is:
+ * Copyright (c) 2014 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/ctype.h"
+#include "qapi/qapi-visit-sockets.h"
+#include "qapi/qobject-input-visitor.h"
+#include "qapi/error.h"
+#include "qobject/qlist.h"
+#include "qemu/cutils.h"
+#include "qemu/memalign.h"
+
+#include "migration/bootfile.h"
+#include "migration/migration-util.h"
+
+#if defined(__linux__)
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+#endif
+
+/* for uffd_version_check() */
+#if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
+#include <sys/eventfd.h>
+#include "qemu/userfaultfd.h"
+#endif
+
+/* For dirty ring test; so far only x86_64 is supported */
+#if defined(__linux__) && defined(HOST_X86_64)
+#include "linux/kvm.h"
+#endif
+
+
+static char *SocketAddress_to_str(SocketAddress *addr)
+{
+ switch (addr->type) {
+ case SOCKET_ADDRESS_TYPE_INET:
+ return g_strdup_printf("tcp:%s:%s",
+ addr->u.inet.host,
+ addr->u.inet.port);
+ case SOCKET_ADDRESS_TYPE_UNIX:
+ return g_strdup_printf("unix:%s",
+ addr->u.q_unix.path);
+ case SOCKET_ADDRESS_TYPE_FD:
+ return g_strdup_printf("fd:%s", addr->u.fd.str);
+ case SOCKET_ADDRESS_TYPE_VSOCK:
+ return g_strdup_printf("vsock:%s:%s",
+ addr->u.vsock.cid,
+ addr->u.vsock.port);
+ default:
+ return g_strdup("unknown address type");
+ }
+}
+
+static QDict *SocketAddress_to_qdict(SocketAddress *addr)
+{
+ QDict *dict = qdict_new();
+
+ switch (addr->type) {
+ case SOCKET_ADDRESS_TYPE_INET:
+ qdict_put_str(dict, "type", "inet");
+ qdict_put_str(dict, "host", addr->u.inet.host);
+ qdict_put_str(dict, "port", addr->u.inet.port);
+ break;
+ case SOCKET_ADDRESS_TYPE_UNIX:
+ qdict_put_str(dict, "type", "unix");
+ qdict_put_str(dict, "path", addr->u.q_unix.path);
+ break;
+ case SOCKET_ADDRESS_TYPE_FD:
+ qdict_put_str(dict, "type", "fd");
+ qdict_put_str(dict, "str", addr->u.fd.str);
+ break;
+ case SOCKET_ADDRESS_TYPE_VSOCK:
+ qdict_put_str(dict, "type", "vsock");
+ qdict_put_str(dict, "cid", addr->u.vsock.cid);
+ qdict_put_str(dict, "port", addr->u.vsock.port);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ return dict;
+}
+
+static SocketAddressList *migrate_get_socket_address(QTestState *who)
+{
+ QDict *rsp;
+ SocketAddressList *addrs;
+ Visitor *iv = NULL;
+ QObject *object;
+
+ rsp = migrate_query(who);
+ object = qdict_get(rsp, "socket-address");
+
+ iv = qobject_input_visitor_new(object);
+ visit_type_SocketAddressList(iv, NULL, &addrs, &error_abort);
+ visit_free(iv);
+
+ qobject_unref(rsp);
+ return addrs;
+}
+
+char *migrate_get_connect_uri(QTestState *who)
+{
+ SocketAddressList *addrs;
+ char *connect_uri;
+
+ addrs = migrate_get_socket_address(who);
+ connect_uri = SocketAddress_to_str(addrs->value);
+
+ qapi_free_SocketAddressList(addrs);
+ return connect_uri;
+}
+
+static QDict *
+migrate_get_connect_qdict(QTestState *who)
+{
+ SocketAddressList *addrs;
+ QDict *connect_qdict;
+
+ addrs = migrate_get_socket_address(who);
+ connect_qdict = SocketAddress_to_qdict(addrs->value);
+
+ qapi_free_SocketAddressList(addrs);
+ return connect_qdict;
+}
+
+void migrate_set_ports(QTestState *to, QList *channel_list)
+{
+ g_autoptr(QDict) addr = NULL;
+ QListEntry *entry;
+ const char *addr_port = NULL;
+
+ QLIST_FOREACH_ENTRY(channel_list, entry) {
+ QDict *channel = qobject_to(QDict, qlist_entry_obj(entry));
+ QDict *addrdict = qdict_get_qdict(channel, "addr");
+
+ if (!qdict_haskey(addrdict, "port") ||
+ strcmp(qdict_get_str(addrdict, "port"), "0")) {
+ continue;
+ }
+
+ /*
+ * Fetch addr only if needed, so tests that are not yet connected to
+ * the monitor do not query it. Such tests cannot use port=0.
+ */
+ if (!addr) {
+ addr = migrate_get_connect_qdict(to);
+ }
+
+ if (qdict_haskey(addr, "port")) {
+ addr_port = qdict_get_str(addr, "port");
+ qdict_put_str(addrdict, "port", addr_port);
+ }
+ }
+}
+
+bool migrate_watch_for_events(QTestState *who, const char *name,
+ QDict *event, void *opaque)
+{
+ QTestMigrationState *state = opaque;
+
+ if (g_str_equal(name, "STOP")) {
+ state->stop_seen = true;
+ return true;
+ } else if (g_str_equal(name, "SUSPEND")) {
+ state->suspend_seen = true;
+ return true;
+ } else if (g_str_equal(name, "RESUME")) {
+ state->resume_seen = true;
+ return true;
+ }
+
+ return false;
+}
+
+char *find_common_machine_version(const char *mtype, const char *var1,
+ const char *var2)
+{
+ g_autofree char *type1 = qtest_resolve_machine_alias(var1, mtype);
+ g_autofree char *type2 = qtest_resolve_machine_alias(var2, mtype);
+
+ g_assert(type1 && type2);
+
+ if (g_str_equal(type1, type2)) {
+ /* either can be used */
+ return g_strdup(type1);
+ }
+
+ if (qtest_has_machine_with_env(var2, type1)) {
+ return g_strdup(type1);
+ }
+
+ if (qtest_has_machine_with_env(var1, type2)) {
+ return g_strdup(type2);
+ }
+
+ g_test_message("No common machine version for machine type '%s' between "
+ "binaries %s and %s", mtype, getenv(var1), getenv(var2));
+ g_assert_not_reached();
+}
+
+char *resolve_machine_version(const char *alias, const char *var1,
+ const char *var2)
+{
+ const char *mname = g_getenv("QTEST_QEMU_MACHINE_TYPE");
+ g_autofree char *machine_name = NULL;
+
+ if (mname) {
+ const char *dash = strrchr(mname, '-');
+ const char *dot = strrchr(mname, '.');
+
+ machine_name = g_strdup(mname);
+
+ if (dash && dot) {
+ assert(qtest_has_machine(machine_name));
+ return g_steal_pointer(&machine_name);
+ }
+ /* else: probably an alias, let it be resolved below */
+ } else {
+ /* use the hardcoded alias */
+ machine_name = g_strdup(alias);
+ }
+
+ return find_common_machine_version(machine_name, var1, var2);
+}
+
+typedef struct {
+ char *name;
+ void (*func)(void);
+ void (*func_full)(void *);
+} MigrationTest;
+
+static void migration_test_destroy(gpointer data)
+{
+ MigrationTest *test = (MigrationTest *)data;
+
+ g_free(test->name);
+ g_free(test);
+}
+
+static void migration_test_wrapper(const void *data)
+{
+ MigrationTest *test = (MigrationTest *)data;
+
+ g_test_message("Running /%s%s", qtest_get_arch(), test->name);
+ test->func();
+}
+
+void migration_test_add(const char *path, void (*fn)(void))
+{
+ MigrationTest *test = g_new0(MigrationTest, 1);
+
+ test->func = fn;
+ test->name = g_strdup(path);
+
+ qtest_add_data_func_full(path, test, migration_test_wrapper,
+ migration_test_destroy);
+}
+
+static void migration_test_wrapper_full(const void *data)
+{
+ MigrationTest *test = (MigrationTest *)data;
+
+ g_test_message("Running /%s%s", qtest_get_arch(), test->name);
+ test->func_full(test->name);
+}
+
+void migration_test_add_suffix(const char *path, const char *suffix,
+ void (*fn)(void *))
+{
+ MigrationTest *test = g_new0(MigrationTest, 1);
+
+ g_assert(g_str_has_suffix(path, "/"));
+ g_assert(!g_str_has_prefix(suffix, "/"));
+
+ test->func_full = fn;
+ test->name = g_strconcat(path, suffix, NULL);
+
+ qtest_add_data_func_full(test->name, test, migration_test_wrapper_full,
+ migration_test_destroy);
+}
+
+#ifdef O_DIRECT
+/*
+ * Probe for O_DIRECT support on the filesystem. Since this is used
+ * for tests, be conservative, if anything fails, assume it's
+ * unsupported.
+ */
+bool probe_o_direct_support(const char *tmpfs)
+{
+ g_autofree char *filename = g_strdup_printf("%s/probe-o-direct", tmpfs);
+ int fd, flags = O_CREAT | O_RDWR | O_TRUNC | O_DIRECT;
+ void *buf;
+ ssize_t ret, len;
+ uint64_t offset;
+
+ fd = open(filename, flags, 0660);
+ if (fd < 0) {
+ unlink(filename);
+ return false;
+ }
+
+ /*
+ * Using 1MB alignment as conservative choice to satisfy any
+ * plausible architecture default page size, and/or filesystem
+ * alignment restrictions.
+ */
+ len = 0x100000;
+ offset = 0x100000;
+
+ buf = qemu_try_memalign(len, len);
+ g_assert(buf);
+ memset(buf, 0, len);
+
+ ret = pwrite(fd, buf, len, offset);
+ unlink(filename);
+ g_free(buf);
+
+ if (ret < 0) {
+ return false;
+ }
+
+ return true;
+}
+#endif
+
+#if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
+bool ufd_version_check(bool *uffd_feature_thread_id)
+{
+ struct uffdio_api api_struct;
+ uint64_t ioctl_mask;
+
+ int ufd = uffd_open(O_CLOEXEC);
+
+ if (ufd == -1) {
+ g_test_message("Skipping test: userfaultfd not available");
+ return false;
+ }
+
+ api_struct.api = UFFD_API;
+ api_struct.features = 0;
+ if (ioctl(ufd, UFFDIO_API, &api_struct)) {
+ g_test_message("Skipping test: UFFDIO_API failed");
+ return false;
+ }
+
+ if (uffd_feature_thread_id) {
+ *uffd_feature_thread_id = api_struct.features & UFFD_FEATURE_THREAD_ID;
+ }
+
+ ioctl_mask = (1ULL << _UFFDIO_REGISTER |
+ 1ULL << _UFFDIO_UNREGISTER);
+ if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) {
+ g_test_message("Skipping test: Missing userfault feature");
+ return false;
+ }
+
+ return true;
+}
+#else
+bool ufd_version_check(bool *uffd_feature_thread_id)
+{
+ g_test_message("Skipping test: Userfault not available (builtdtime)");
+ return false;
+}
+#endif
+
+bool kvm_dirty_ring_supported(void)
+{
+#if defined(__linux__) && defined(HOST_X86_64)
+ int ret, kvm_fd = open("/dev/kvm", O_RDONLY);
+
+ if (kvm_fd < 0) {
+ return false;
+ }
+
+ ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING);
+ close(kvm_fd);
+
+ /* We test with 4096 slots */
+ if (ret < 4096) {
+ return false;
+ }
+
+ return true;
+#else
+ return false;
+#endif
+}
diff --git a/tests/qtest/migration/migration-util.h b/tests/qtest/migration/migration-util.h
new file mode 100644
index 0000000..44815e9
--- /dev/null
+++ b/tests/qtest/migration/migration-util.h
@@ -0,0 +1,59 @@
+/*
+ * QTest migration helpers
+ *
+ * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
+ * based on the vhost-user-test.c that is:
+ * Copyright (c) 2014 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MIGRATION_UTIL_H
+#define MIGRATION_UTIL_H
+
+#include "libqtest.h"
+
+typedef struct QTestMigrationState {
+ bool stop_seen;
+ bool resume_seen;
+ bool suspend_seen;
+ bool suspend_me;
+} QTestMigrationState;
+
+bool migrate_watch_for_events(QTestState *who, const char *name,
+ QDict *event, void *opaque);
+
+QDict *migrate_query(QTestState *who);
+QDict *migrate_query_not_failed(QTestState *who);
+
+void wait_for_migration_status(QTestState *who,
+ const char *goal, const char **ungoals);
+
+void wait_for_migration_complete(QTestState *who);
+
+void wait_for_migration_fail(QTestState *from, bool allow_active);
+
+char *find_common_machine_version(const char *mtype, const char *var1,
+ const char *var2);
+char *resolve_machine_version(const char *alias, const char *var1,
+ const char *var2);
+#ifdef O_DIRECT
+bool probe_o_direct_support(const char *tmpfs);
+#else
+static inline bool probe_o_direct_support(const char *tmpfs)
+{
+ return false;
+}
+#endif
+
+bool ufd_version_check(bool *uffd_feature_thread_id);
+bool kvm_dirty_ring_supported(void);
+void migration_test_add(const char *path, void (*fn)(void));
+void migration_test_add_suffix(const char *path, const char *suffix,
+ void (*fn)(void *));
+char *migrate_get_connect_uri(QTestState *who);
+void migrate_set_ports(QTestState *to, QList *channel_list);
+
+#endif /* MIGRATION_UTIL_H */
diff --git a/tests/qtest/migration/misc-tests.c b/tests/qtest/migration/misc-tests.c
new file mode 100644
index 0000000..5499525
--- /dev/null
+++ b/tests/qtest/migration/misc-tests.c
@@ -0,0 +1,297 @@
+/*
+ * QTest testcases for migration
+ *
+ * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
+ * based on the vhost-user-test.c that is:
+ * Copyright (c) 2014 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qobject/qjson.h"
+#include "libqtest.h"
+#include "migration/framework.h"
+#include "migration/migration-qmp.h"
+#include "migration/migration-util.h"
+
+#define ANALYZE_SCRIPT "scripts/analyze-migration.py"
+
+static char *tmpfs;
+
+static void test_baddest(void)
+{
+ MigrateStart args = {
+ .hide_stderr = true
+ };
+ QTestState *from, *to;
+
+ if (migrate_start(&from, &to, "tcp:127.0.0.1:0", &args)) {
+ return;
+ }
+ migrate_qmp(from, to, "tcp:127.0.0.1:0", NULL, "{}");
+ wait_for_migration_fail(from, false);
+ migrate_end(from, to, false);
+}
+
+#ifndef _WIN32
+static void test_analyze_script(void)
+{
+ MigrateStart args = {
+ .opts_source = "-uuid 11111111-1111-1111-1111-111111111111",
+ };
+ QTestState *from, *to;
+ g_autofree char *uri = NULL;
+ g_autofree char *file = NULL;
+ int pid, wstatus;
+ const char *python = g_getenv("PYTHON");
+
+ if (!python) {
+ g_test_skip("PYTHON variable not set");
+ return;
+ }
+
+ /* dummy url */
+ if (migrate_start(&from, &to, "tcp:127.0.0.1:0", &args)) {
+ return;
+ }
+
+ /*
+ * Setting these two capabilities causes the "configuration"
+ * vmstate to include subsections for them. The script needs to
+ * parse those subsections properly.
+ */
+ migrate_set_capability(from, "validate-uuid", true);
+ migrate_set_capability(from, "x-ignore-shared", true);
+
+ file = g_strdup_printf("%s/migfile", tmpfs);
+ uri = g_strdup_printf("exec:cat > %s", file);
+
+ migrate_ensure_converge(from);
+ migrate_qmp(from, to, uri, NULL, "{}");
+ wait_for_migration_complete(from);
+
+ pid = fork();
+ if (!pid) {
+ close(1);
+ open("/dev/null", O_WRONLY);
+ execl(python, python, ANALYZE_SCRIPT, "-f", file, NULL);
+ g_assert_not_reached();
+ }
+
+ g_assert(waitpid(pid, &wstatus, 0) == pid);
+ if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus) != 0) {
+ g_test_message("Failed to analyze the migration stream");
+ g_test_fail();
+ }
+ migrate_end(from, to, false);
+ unlink(file);
+}
+#endif
+
+static void test_ignore_shared(void)
+{
+ g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
+ QTestState *from, *to;
+ MigrateStart args = {
+ .use_shmem = true,
+ .caps[MIGRATION_CAPABILITY_X_IGNORE_SHARED] = true,
+ };
+
+ if (migrate_start(&from, &to, uri, &args)) {
+ return;
+ }
+
+ migrate_ensure_non_converge(from);
+ migrate_prepare_for_dirty_mem(from);
+
+ /* Wait for the first serial output from the source */
+ wait_for_serial("src_serial");
+
+ migrate_qmp(from, to, uri, NULL, "{}");
+
+ migrate_wait_for_dirty_mem(from, to);
+
+ wait_for_stop(from, get_src());
+
+ qtest_qmp_eventwait(to, "RESUME");
+
+ wait_for_serial("dest_serial");
+ wait_for_migration_complete(from);
+
+ /* Check whether shared RAM has been really skipped */
+ g_assert_cmpint(
+ read_ram_property_int(from, "transferred"), <, 4 * 1024 * 1024);
+
+ migrate_end(from, to, true);
+}
+
+static void do_test_validate_uuid(MigrateStart *args, bool should_fail)
+{
+ g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
+ QTestState *from, *to;
+
+ if (migrate_start(&from, &to, uri, args)) {
+ return;
+ }
+
+ /*
+ * UUID validation is at the begin of migration. So, the main process of
+ * migration is not interesting for us here. Thus, set huge downtime for
+ * very fast migration.
+ */
+ migrate_set_parameter_int(from, "downtime-limit", 1000000);
+ migrate_set_capability(from, "validate-uuid", true);
+
+ /* Wait for the first serial output from the source */
+ wait_for_serial("src_serial");
+
+ migrate_qmp(from, to, uri, NULL, "{}");
+
+ if (should_fail) {
+ qtest_set_expected_status(to, EXIT_FAILURE);
+ wait_for_migration_fail(from, true);
+ } else {
+ wait_for_migration_complete(from);
+ }
+
+ migrate_end(from, to, false);
+}
+
+static void test_validate_uuid(void)
+{
+ MigrateStart args = {
+ .opts_source = "-uuid 11111111-1111-1111-1111-111111111111",
+ .opts_target = "-uuid 11111111-1111-1111-1111-111111111111",
+ };
+
+ do_test_validate_uuid(&args, false);
+}
+
+static void test_validate_uuid_error(void)
+{
+ MigrateStart args = {
+ .opts_source = "-uuid 11111111-1111-1111-1111-111111111111",
+ .opts_target = "-uuid 22222222-2222-2222-2222-222222222222",
+ .hide_stderr = true,
+ };
+
+ do_test_validate_uuid(&args, true);
+}
+
+static void test_validate_uuid_src_not_set(void)
+{
+ MigrateStart args = {
+ .opts_target = "-uuid 22222222-2222-2222-2222-222222222222",
+ .hide_stderr = true,
+ };
+
+ do_test_validate_uuid(&args, false);
+}
+
+static void test_validate_uuid_dst_not_set(void)
+{
+ MigrateStart args = {
+ .opts_source = "-uuid 11111111-1111-1111-1111-111111111111",
+ .hide_stderr = true,
+ };
+
+ do_test_validate_uuid(&args, false);
+}
+
+static void do_test_validate_uri_channel(MigrateCommon *args)
+{
+ QTestState *from, *to;
+ QObject *channels;
+
+ if (migrate_start(&from, &to, args->listen_uri, &args->start)) {
+ return;
+ }
+
+ /* Wait for the first serial output from the source */
+ wait_for_serial("src_serial");
+
+ /*
+ * 'uri' and 'channels' validation is checked even before the migration
+ * starts.
+ */
+ channels = args->connect_channels ?
+ qobject_from_json(args->connect_channels, &error_abort) :
+ NULL;
+ migrate_qmp_fail(from, args->connect_uri, channels, "{}");
+
+ migrate_end(from, to, false);
+}
+
+static void test_validate_uri_channels_both_set(void)
+{
+ MigrateCommon args = {
+ .start = {
+ .hide_stderr = true,
+ },
+ .listen_uri = "defer",
+ .connect_uri = "tcp:127.0.0.1:0",
+ .connect_channels = ("[ { ""'channel-type': 'main',"
+ " 'addr': { 'transport': 'socket',"
+ " 'type': 'inet',"
+ " 'host': '127.0.0.1',"
+ " 'port': '0' } } ]"),
+ };
+
+ do_test_validate_uri_channel(&args);
+}
+
+static void test_validate_uri_channels_none_set(void)
+{
+ MigrateCommon args = {
+ .start = {
+ .hide_stderr = true,
+ },
+ .listen_uri = "defer",
+ };
+
+ do_test_validate_uri_channel(&args);
+}
+
+static void migration_test_add_misc_smoke(MigrationTestEnv *env)
+{
+#ifndef _WIN32
+ migration_test_add("/migration/analyze-script", test_analyze_script);
+#endif
+}
+
+void migration_test_add_misc(MigrationTestEnv *env)
+{
+ tmpfs = env->tmpfs;
+
+ migration_test_add_misc_smoke(env);
+
+ if (!env->full_set) {
+ return;
+ }
+
+ migration_test_add("/migration/bad_dest", test_baddest);
+
+ /*
+ * Our CI system has problems with shared memory.
+ * Don't run this test until we find a workaround.
+ */
+ if (getenv("QEMU_TEST_FLAKY_TESTS")) {
+ migration_test_add("/migration/ignore-shared", test_ignore_shared);
+ }
+
+ migration_test_add("/migration/validate_uuid", test_validate_uuid);
+ migration_test_add("/migration/validate_uuid_error",
+ test_validate_uuid_error);
+ migration_test_add("/migration/validate_uuid_src_not_set",
+ test_validate_uuid_src_not_set);
+ migration_test_add("/migration/validate_uuid_dst_not_set",
+ test_validate_uuid_dst_not_set);
+ migration_test_add("/migration/validate_uri/channels/both_set",
+ test_validate_uri_channels_both_set);
+ migration_test_add("/migration/validate_uri/channels/none_set",
+ test_validate_uri_channels_none_set);
+}
diff --git a/tests/qtest/migration/postcopy-tests.c b/tests/qtest/migration/postcopy-tests.c
new file mode 100644
index 0000000..3773525
--- /dev/null
+++ b/tests/qtest/migration/postcopy-tests.c
@@ -0,0 +1,149 @@
+/*
+ * QTest testcases for postcopy migration
+ *
+ * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
+ * based on the vhost-user-test.c that is:
+ * Copyright (c) 2014 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest.h"
+#include "migration/framework.h"
+#include "migration/migration-util.h"
+#include "qobject/qlist.h"
+#include "qemu/module.h"
+#include "qemu/option.h"
+#include "qemu/range.h"
+#include "qemu/sockets.h"
+
+static void test_postcopy(void)
+{
+ MigrateCommon args = { };
+
+ test_postcopy_common(&args);
+}
+
+static void test_postcopy_suspend(void)
+{
+ MigrateCommon args = {
+ .start.suspend_me = true,
+ };
+
+ test_postcopy_common(&args);
+}
+
+static void test_postcopy_preempt(void)
+{
+ MigrateCommon args = {
+ .start = {
+ .caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT] = true,
+ },
+ };
+
+ test_postcopy_common(&args);
+}
+
+static void test_postcopy_recovery(void)
+{
+ MigrateCommon args = { };
+
+ test_postcopy_recovery_common(&args);
+}
+
+static void test_postcopy_recovery_fail_handshake(void)
+{
+ MigrateCommon args = {
+ .postcopy_recovery_fail_stage = POSTCOPY_FAIL_RECOVERY,
+ };
+
+ test_postcopy_recovery_common(&args);
+}
+
+static void test_postcopy_recovery_fail_reconnect(void)
+{
+ MigrateCommon args = {
+ .postcopy_recovery_fail_stage = POSTCOPY_FAIL_CHANNEL_ESTABLISH,
+ };
+
+ test_postcopy_recovery_common(&args);
+}
+
+static void test_postcopy_preempt_recovery(void)
+{
+ MigrateCommon args = {
+ .start = {
+ .caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT] = true,
+ },
+ };
+
+ test_postcopy_recovery_common(&args);
+}
+
+static void migration_test_add_postcopy_smoke(MigrationTestEnv *env)
+{
+ if (env->has_uffd) {
+ migration_test_add("/migration/postcopy/plain", test_postcopy);
+ migration_test_add("/migration/postcopy/recovery/plain",
+ test_postcopy_recovery);
+ migration_test_add("/migration/postcopy/preempt/plain",
+ test_postcopy_preempt);
+ }
+}
+
+static void test_multifd_postcopy(void)
+{
+ MigrateCommon args = {
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ };
+
+ test_postcopy_common(&args);
+}
+
+static void test_multifd_postcopy_preempt(void)
+{
+ MigrateCommon args = {
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ .caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT] = true,
+ },
+ };
+
+ test_postcopy_common(&args);
+}
+
+void migration_test_add_postcopy(MigrationTestEnv *env)
+{
+ migration_test_add_postcopy_smoke(env);
+
+ if (!env->full_set) {
+ return;
+ }
+
+ if (env->has_uffd) {
+ migration_test_add("/migration/postcopy/preempt/recovery/plain",
+ test_postcopy_preempt_recovery);
+
+ migration_test_add(
+ "/migration/postcopy/recovery/double-failures/handshake",
+ test_postcopy_recovery_fail_handshake);
+
+ migration_test_add(
+ "/migration/postcopy/recovery/double-failures/reconnect",
+ test_postcopy_recovery_fail_reconnect);
+
+ migration_test_add("/migration/multifd+postcopy/plain",
+ test_multifd_postcopy);
+ migration_test_add("/migration/multifd+postcopy/preempt/plain",
+ test_multifd_postcopy_preempt);
+ if (env->is_x86) {
+ migration_test_add("/migration/postcopy/suspend",
+ test_postcopy_suspend);
+ }
+ }
+}
diff --git a/tests/migration/ppc64/Makefile b/tests/qtest/migration/ppc64/Makefile
index a3a2d98..a3a2d98 100644
--- a/tests/migration/ppc64/Makefile
+++ b/tests/qtest/migration/ppc64/Makefile
diff --git a/tests/migration/ppc64/a-b-kernel.S b/tests/qtest/migration/ppc64/a-b-kernel.S
index 0613a8d..0613a8d 100644
--- a/tests/migration/ppc64/a-b-kernel.S
+++ b/tests/qtest/migration/ppc64/a-b-kernel.S
diff --git a/tests/migration/ppc64/a-b-kernel.h b/tests/qtest/migration/ppc64/a-b-kernel.h
index 673317e..673317e 100644
--- a/tests/migration/ppc64/a-b-kernel.h
+++ b/tests/qtest/migration/ppc64/a-b-kernel.h
diff --git a/tests/qtest/migration/precopy-tests.c b/tests/qtest/migration/precopy-tests.c
new file mode 100644
index 0000000..bb38292
--- /dev/null
+++ b/tests/qtest/migration/precopy-tests.c
@@ -0,0 +1,1337 @@
+/*
+ * QTest testcase for precopy migration
+ *
+ * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
+ * based on the vhost-user-test.c that is:
+ * Copyright (c) 2014 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "chardev/char.h"
+#include "crypto/tlscredspsk.h"
+#include "libqtest.h"
+#include "migration/bootfile.h"
+#include "migration/framework.h"
+#include "migration/migration-qmp.h"
+#include "migration/migration-util.h"
+#include "ppc-util.h"
+#include "qobject/qlist.h"
+#include "qapi-types-migration.h"
+#include "qemu/module.h"
+#include "qemu/option.h"
+#include "qemu/range.h"
+#include "qemu/sockets.h"
+
+
+/*
+ * Dirtylimit stop working if dirty page rate error
+ * value less than DIRTYLIMIT_TOLERANCE_RANGE
+ */
+#define DIRTYLIMIT_TOLERANCE_RANGE 25 /* MB/s */
+
+static char *tmpfs;
+
+static void test_precopy_unix_plain(void)
+{
+ g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
+ MigrateCommon args = {
+ .listen_uri = uri,
+ .connect_uri = uri,
+ /*
+ * The simplest use case of precopy, covering smoke tests of
+ * get-dirty-log dirty tracking.
+ */
+ .live = true,
+ };
+
+ test_precopy_common(&args);
+}
+
+static void test_precopy_unix_suspend_live(void)
+{
+ g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
+ MigrateCommon args = {
+ .listen_uri = uri,
+ .connect_uri = uri,
+ /*
+ * despite being live, the test is fast because the src
+ * suspends immediately.
+ */
+ .live = true,
+ .start.suspend_me = true,
+ };
+
+ test_precopy_common(&args);
+}
+
+static void test_precopy_unix_suspend_notlive(void)
+{
+ g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
+ MigrateCommon args = {
+ .listen_uri = uri,
+ .connect_uri = uri,
+ .start.suspend_me = true,
+ };
+
+ test_precopy_common(&args);
+}
+
+static void test_precopy_unix_dirty_ring(void)
+{
+ g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
+ MigrateCommon args = {
+ .start = {
+ .use_dirty_ring = true,
+ },
+ .listen_uri = uri,
+ .connect_uri = uri,
+ /*
+ * Besides the precopy/unix basic test, cover dirty ring interface
+ * rather than get-dirty-log.
+ */
+ .live = true,
+ };
+
+ test_precopy_common(&args);
+}
+
+#ifdef CONFIG_RDMA
+
+#include <sys/resource.h>
+
+/*
+ * During migration over RDMA, it will try to pin portions of guest memory,
+ * typically exceeding 100MB in this test, while the remainder will be
+ * transmitted as compressed zero pages.
+ *
+ * REQUIRED_MEMLOCK_SZ indicates the minimal mlock size in the current context.
+ */
+#define REQUIRED_MEMLOCK_SZ (128 << 20) /* 128MB */
+
+/* check 'ulimit -l' */
+static bool mlock_check(void)
+{
+ uid_t uid;
+ struct rlimit rlim;
+
+ uid = getuid();
+ if (uid == 0) {
+ return true;
+ }
+
+ if (getrlimit(RLIMIT_MEMLOCK, &rlim) != 0) {
+ return false;
+ }
+
+ return rlim.rlim_cur >= REQUIRED_MEMLOCK_SZ;
+}
+
+#define RDMA_MIGRATION_HELPER "scripts/rdma-migration-helper.sh"
+static int new_rdma_link(char *buffer, bool ipv6)
+{
+ char cmd[256];
+ bool verbose = g_getenv("QTEST_LOG");
+
+ snprintf(cmd, sizeof(cmd), "IP_FAMILY=%s %s detect %s",
+ ipv6 ? "ipv6" : "ipv4", RDMA_MIGRATION_HELPER,
+ verbose ? "" : "2>/dev/null");
+
+ FILE *pipe = popen(cmd, "r");
+ if (pipe == NULL) {
+ perror("Failed to run script");
+ return -1;
+ }
+
+ int idx = 0;
+ while (fgets(buffer + idx, 128 - idx, pipe) != NULL) {
+ idx += strlen(buffer);
+ }
+
+ int status = pclose(pipe);
+ if (status == -1) {
+ perror("Error reported by pclose()");
+ return -1;
+ } else if (WIFEXITED(status)) {
+ return WEXITSTATUS(status);
+ }
+
+ return -1;
+}
+
+static void __test_precopy_rdma_plain(bool ipv6)
+{
+ char buffer[128] = {};
+
+ if (!mlock_check()) {
+ g_test_skip("'ulimit -l' is too small, require >=128M");
+ return;
+ }
+
+ if (new_rdma_link(buffer, ipv6)) {
+ g_test_skip("No rdma link available\n"
+ "# To enable the test:\n"
+ "# Run \'" RDMA_MIGRATION_HELPER " setup\' with root to "
+ "setup a new rdma/rxe link and rerun the test\n"
+ "# Optional: run 'scripts/rdma-migration-helper.sh clean' "
+ "to revert the 'setup'");
+ return;
+ }
+
+ /*
+ * TODO: query a free port instead of hard code.
+ * 29200=('R'+'D'+'M'+'A')*100
+ **/
+ g_autofree char *uri = g_strdup_printf("rdma:%s:29200", buffer);
+
+ MigrateCommon args = {
+ .listen_uri = uri,
+ .connect_uri = uri,
+ };
+
+ test_precopy_common(&args);
+}
+
+static void test_precopy_rdma_plain(void)
+{
+ __test_precopy_rdma_plain(false);
+}
+
+static void test_precopy_rdma_plain_ipv6(void)
+{
+ __test_precopy_rdma_plain(true);
+}
+#endif
+
+static void test_precopy_tcp_plain(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "tcp:127.0.0.1:0",
+ };
+
+ test_precopy_common(&args);
+}
+
+static void test_precopy_tcp_switchover_ack(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "tcp:127.0.0.1:0",
+ .start = {
+ .caps[MIGRATION_CAPABILITY_RETURN_PATH] = true,
+ .caps[MIGRATION_CAPABILITY_SWITCHOVER_ACK] = true,
+ },
+ /*
+ * Source VM must be running in order to consider the switchover ACK
+ * when deciding to do switchover or not.
+ */
+ .live = true,
+ };
+
+ test_precopy_common(&args);
+}
+
+#ifndef _WIN32
+static void *migrate_hook_start_fd(QTestState *from,
+ QTestState *to)
+{
+ int ret;
+ int pair[2];
+
+ /* Create two connected sockets for migration */
+ ret = qemu_socketpair(PF_LOCAL, SOCK_STREAM, 0, pair);
+ g_assert_cmpint(ret, ==, 0);
+
+ /* Send the 1st socket to the target */
+ qtest_qmp_fds_assert_success(to, &pair[0], 1,
+ "{ 'execute': 'getfd',"
+ " 'arguments': { 'fdname': 'fd-mig' }}");
+ close(pair[0]);
+
+ /* Start incoming migration from the 1st socket */
+ migrate_incoming_qmp(to, "fd:fd-mig", NULL, "{}");
+
+ /* Send the 2nd socket to the target */
+ qtest_qmp_fds_assert_success(from, &pair[1], 1,
+ "{ 'execute': 'getfd',"
+ " 'arguments': { 'fdname': 'fd-mig' }}");
+ close(pair[1]);
+
+ return NULL;
+}
+
+static void migrate_hook_end_fd(QTestState *from,
+ QTestState *to,
+ void *opaque)
+{
+ QDict *rsp;
+ const char *error_desc;
+
+ /* Test closing fds */
+ /*
+ * We assume, that QEMU removes named fd from its list,
+ * so this should fail.
+ */
+ rsp = qtest_qmp(from,
+ "{ 'execute': 'closefd',"
+ " 'arguments': { 'fdname': 'fd-mig' }}");
+ g_assert_true(qdict_haskey(rsp, "error"));
+ error_desc = qdict_get_str(qdict_get_qdict(rsp, "error"), "desc");
+ g_assert_cmpstr(error_desc, ==, "File descriptor named 'fd-mig' not found");
+ qobject_unref(rsp);
+
+ rsp = qtest_qmp(to,
+ "{ 'execute': 'closefd',"
+ " 'arguments': { 'fdname': 'fd-mig' }}");
+ g_assert_true(qdict_haskey(rsp, "error"));
+ error_desc = qdict_get_str(qdict_get_qdict(rsp, "error"), "desc");
+ g_assert_cmpstr(error_desc, ==, "File descriptor named 'fd-mig' not found");
+ qobject_unref(rsp);
+}
+
+static void test_precopy_fd_socket(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .connect_uri = "fd:fd-mig",
+ .start_hook = migrate_hook_start_fd,
+ .end_hook = migrate_hook_end_fd,
+ };
+ test_precopy_common(&args);
+}
+
+static void *migrate_hook_start_precopy_fd_file(QTestState *from,
+ QTestState *to)
+{
+ g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
+ int src_flags = O_CREAT | O_RDWR;
+ int dst_flags = O_CREAT | O_RDWR;
+ int fds[2];
+
+ fds[0] = open(file, src_flags, 0660);
+ assert(fds[0] != -1);
+
+ fds[1] = open(file, dst_flags, 0660);
+ assert(fds[1] != -1);
+
+
+ qtest_qmp_fds_assert_success(to, &fds[0], 1,
+ "{ 'execute': 'getfd',"
+ " 'arguments': { 'fdname': 'fd-mig' }}");
+
+ qtest_qmp_fds_assert_success(from, &fds[1], 1,
+ "{ 'execute': 'getfd',"
+ " 'arguments': { 'fdname': 'fd-mig' }}");
+
+ close(fds[0]);
+ close(fds[1]);
+
+ return NULL;
+}
+
+static void test_precopy_fd_file(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .connect_uri = "fd:fd-mig",
+ .start_hook = migrate_hook_start_precopy_fd_file,
+ .end_hook = migrate_hook_end_fd,
+ };
+ test_file_common(&args, true);
+}
+#endif /* _WIN32 */
+
+/*
+ * The way auto_converge works, we need to do too many passes to
+ * run this test. Auto_converge logic is only run once every
+ * three iterations, so:
+ *
+ * - 3 iterations without auto_converge enabled
+ * - 3 iterations with pct = 5
+ * - 3 iterations with pct = 30
+ * - 3 iterations with pct = 55
+ * - 3 iterations with pct = 80
+ * - 3 iterations with pct = 95 (max(95, 80 + 25))
+ *
+ * To make things even worse, we need to run the initial stage at
+ * 3MB/s so we enter autoconverge even when host is (over)loaded.
+ */
+static void test_auto_converge(void)
+{
+ g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
+ MigrateStart args = {};
+ QTestState *from, *to;
+ int64_t percentage;
+
+ /*
+ * We want the test to be stable and as fast as possible.
+ * E.g., with 1Gb/s bandwidth migration may pass without throttling,
+ * so we need to decrease a bandwidth.
+ */
+ const int64_t init_pct = 5, inc_pct = 25, max_pct = 95;
+ uint64_t prev_dirty_sync_cnt, dirty_sync_cnt;
+ int max_try_count, hit = 0;
+
+ if (migrate_start(&from, &to, uri, &args)) {
+ return;
+ }
+
+ migrate_set_capability(from, "auto-converge", true);
+ migrate_set_parameter_int(from, "cpu-throttle-initial", init_pct);
+ migrate_set_parameter_int(from, "cpu-throttle-increment", inc_pct);
+ migrate_set_parameter_int(from, "max-cpu-throttle", max_pct);
+
+ /*
+ * Set the initial parameters so that the migration could not converge
+ * without throttling.
+ */
+ migrate_ensure_non_converge(from);
+
+ /* To check remaining size after precopy */
+ migrate_set_capability(from, "pause-before-switchover", true);
+
+ /* Wait for the first serial output from the source */
+ wait_for_serial("src_serial");
+
+ migrate_qmp(from, to, uri, NULL, "{}");
+
+ /* Wait for throttling begins */
+ percentage = 0;
+ do {
+ percentage = read_migrate_property_int(from, "cpu-throttle-percentage");
+ if (percentage != 0) {
+ break;
+ }
+ usleep(20);
+ g_assert_false(get_src()->stop_seen);
+ } while (true);
+ /* The first percentage of throttling should be at least init_pct */
+ g_assert_cmpint(percentage, >=, init_pct);
+
+ /*
+ * End the loop when the dirty sync count greater than 1.
+ */
+ while ((dirty_sync_cnt = get_migration_pass(from)) < 2) {
+ usleep(1000 * 1000);
+ }
+
+ prev_dirty_sync_cnt = dirty_sync_cnt;
+
+ /*
+ * The RAMBlock dirty sync count must changes in 5 seconds, here we set
+ * the timeout to 10 seconds to ensure it changes.
+ *
+ * Note that migrate_ensure_non_converge set the max-bandwidth to 3MB/s,
+ * while the qtest mem is >= 100MB, one iteration takes at least 33s (100/3)
+ * to complete; this ensures that the RAMBlock dirty sync occurs.
+ */
+ max_try_count = 10;
+ while (--max_try_count) {
+ dirty_sync_cnt = get_migration_pass(from);
+ if (dirty_sync_cnt != prev_dirty_sync_cnt) {
+ hit = 1;
+ break;
+ }
+ prev_dirty_sync_cnt = dirty_sync_cnt;
+ sleep(1);
+ }
+ g_assert_cmpint(hit, ==, 1);
+
+ /* Now, when we tested that throttling works, let it converge */
+ migrate_ensure_converge(from);
+
+ /*
+ * Wait for pre-switchover status to check last throttle percentage
+ * and remaining. These values will be zeroed later
+ */
+ wait_for_migration_status(from, "pre-switchover", NULL);
+
+ /* The final percentage of throttling shouldn't be greater than max_pct */
+ percentage = read_migrate_property_int(from, "cpu-throttle-percentage");
+ g_assert_cmpint(percentage, <=, max_pct);
+ migrate_continue(from, "pre-switchover");
+
+ qtest_qmp_eventwait(to, "RESUME");
+
+ wait_for_serial("dest_serial");
+ wait_for_migration_complete(from);
+
+ migrate_end(from, to, true);
+}
+
+static void *
+migrate_hook_start_precopy_tcp_multifd(QTestState *from,
+ QTestState *to)
+{
+ return migrate_hook_start_precopy_tcp_multifd_common(from, to, "none");
+}
+
+static void *
+migrate_hook_start_precopy_tcp_multifd_zero_page_legacy(QTestState *from,
+ QTestState *to)
+{
+ migrate_hook_start_precopy_tcp_multifd_common(from, to, "none");
+ migrate_set_parameter_str(from, "zero-page-detection", "legacy");
+ return NULL;
+}
+
+static void *
+migrate_hook_start_precopy_tcp_multifd_no_zero_page(QTestState *from,
+ QTestState *to)
+{
+ migrate_hook_start_precopy_tcp_multifd_common(from, to, "none");
+ migrate_set_parameter_str(from, "zero-page-detection", "none");
+ return NULL;
+}
+
+static void test_multifd_tcp_uri_none(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_precopy_tcp_multifd,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ /*
+ * Multifd is more complicated than most of the features, it
+ * directly takes guest page buffers when sending, make sure
+ * everything will work alright even if guest page is changing.
+ */
+ .live = true,
+ };
+ test_precopy_common(&args);
+}
+
+static void test_multifd_tcp_zero_page_legacy(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_precopy_tcp_multifd_zero_page_legacy,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ /*
+ * Multifd is more complicated than most of the features, it
+ * directly takes guest page buffers when sending, make sure
+ * everything will work alright even if guest page is changing.
+ */
+ .live = true,
+ };
+ test_precopy_common(&args);
+}
+
+static void test_multifd_tcp_no_zero_page(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_precopy_tcp_multifd_no_zero_page,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ /*
+ * Multifd is more complicated than most of the features, it
+ * directly takes guest page buffers when sending, make sure
+ * everything will work alright even if guest page is changing.
+ */
+ .live = true,
+ };
+ test_precopy_common(&args);
+}
+
+static void test_multifd_tcp_channels_none(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_precopy_tcp_multifd,
+ .live = true,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ .connect_channels = ("[ { 'channel-type': 'main',"
+ " 'addr': { 'transport': 'socket',"
+ " 'type': 'inet',"
+ " 'host': '127.0.0.1',"
+ " 'port': '0' } } ]"),
+ };
+ test_precopy_common(&args);
+}
+
+/*
+ * This test does:
+ * source target
+ * migrate_incoming
+ * migrate
+ * migrate_cancel
+ * launch another target
+ * migrate
+ *
+ * And see that it works
+ */
+static void test_multifd_tcp_cancel(bool postcopy_ram)
+{
+ MigrateStart args = {
+ .hide_stderr = true,
+ };
+ QTestState *from, *to, *to2;
+
+ if (migrate_start(&from, &to, "defer", &args)) {
+ return;
+ }
+
+ migrate_ensure_non_converge(from);
+ migrate_prepare_for_dirty_mem(from);
+
+ if (postcopy_ram) {
+ migrate_set_capability(from, "postcopy-ram", true);
+ migrate_set_capability(to, "postcopy-ram", true);
+ }
+
+ migrate_set_parameter_int(from, "multifd-channels", 16);
+ migrate_set_parameter_int(to, "multifd-channels", 16);
+
+ migrate_set_capability(from, "multifd", true);
+ migrate_set_capability(to, "multifd", true);
+
+ /* Start incoming migration from the 1st socket */
+ migrate_incoming_qmp(to, "tcp:127.0.0.1:0", NULL, "{}");
+
+ /* Wait for the first serial output from the source */
+ wait_for_serial("src_serial");
+
+ migrate_qmp(from, to, NULL, NULL, "{}");
+
+ migrate_wait_for_dirty_mem(from, to);
+
+ migrate_cancel(from);
+
+ /* Make sure QEMU process "to" exited */
+ qtest_set_expected_status(to, EXIT_FAILURE);
+ qtest_wait_qemu(to);
+ qtest_quit(to);
+
+ /*
+ * Ensure the source QEMU finishes its cancellation process before we
+ * proceed with the setup of the next migration. The migrate_start()
+ * function and others might want to interact with the source in a way that
+ * is not possible while the migration is not canceled properly. For
+ * example, setting migration capabilities when the migration is still
+ * running leads to an error.
+ */
+ wait_for_migration_status(from, "cancelled", NULL);
+
+ args = (MigrateStart){
+ .only_target = true,
+ };
+
+ if (migrate_start(&from, &to2, "defer", &args)) {
+ return;
+ }
+
+ if (postcopy_ram) {
+ migrate_set_capability(to2, "postcopy-ram", true);
+ }
+
+ migrate_set_parameter_int(to2, "multifd-channels", 16);
+
+ migrate_set_capability(to2, "multifd", true);
+
+ /* Start incoming migration from the 1st socket */
+ migrate_incoming_qmp(to2, "tcp:127.0.0.1:0", NULL, "{}");
+
+ migrate_ensure_non_converge(from);
+
+ migrate_qmp(from, to2, NULL, NULL, "{}");
+
+ migrate_wait_for_dirty_mem(from, to2);
+
+ migrate_ensure_converge(from);
+
+ wait_for_stop(from, get_src());
+ qtest_qmp_eventwait(to2, "RESUME");
+
+ wait_for_serial("dest_serial");
+ wait_for_migration_complete(from);
+ migrate_end(from, to2, true);
+}
+
+static void test_multifd_precopy_tcp_cancel(void)
+{
+ test_multifd_tcp_cancel(false);
+}
+
+static void test_multifd_postcopy_tcp_cancel(void)
+{
+ test_multifd_tcp_cancel(true);
+}
+
+static void test_cancel_src_after_failed(QTestState *from, QTestState *to,
+ const char *uri, const char *phase)
+{
+ /*
+ * No migrate_incoming_qmp() at the start to force source into
+ * failed state during migrate_qmp().
+ */
+
+ wait_for_serial("src_serial");
+ migrate_ensure_converge(from);
+
+ migrate_qmp(from, to, uri, NULL, "{}");
+
+ migration_event_wait(from, phase);
+ migrate_cancel(from);
+
+ /* cancelling will not move the migration out of 'failed' */
+
+ wait_for_migration_status(from, "failed",
+ (const char * []) { "completed", NULL });
+
+ /*
+ * Not waiting for the destination because it never started
+ * migration.
+ */
+}
+
+static void test_cancel_src_after_cancelled(QTestState *from, QTestState *to,
+ const char *uri, const char *phase)
+{
+ migrate_incoming_qmp(to, uri, NULL, "{ 'exit-on-error': false }");
+
+ wait_for_serial("src_serial");
+ migrate_ensure_converge(from);
+
+ migrate_qmp(from, to, uri, NULL, "{}");
+
+ /* To move to cancelled/cancelling */
+ migrate_cancel(from);
+ migration_event_wait(from, phase);
+
+ /* The migrate_cancel under test */
+ migrate_cancel(from);
+
+ wait_for_migration_status(from, "cancelled",
+ (const char * []) { "completed", NULL });
+
+ wait_for_migration_status(to, "failed",
+ (const char * []) { "completed", NULL });
+}
+
+static void test_cancel_src_after_complete(QTestState *from, QTestState *to,
+ const char *uri, const char *phase)
+{
+ migrate_incoming_qmp(to, uri, NULL, "{ 'exit-on-error': false }");
+
+ wait_for_serial("src_serial");
+ migrate_ensure_converge(from);
+
+ migrate_qmp(from, to, uri, NULL, "{}");
+
+ migration_event_wait(from, phase);
+ migrate_cancel(from);
+
+ /*
+ * qmp_migrate_cancel() exits early if migration is not running
+ * anymore, the status will not change to cancelled.
+ */
+ wait_for_migration_complete(from);
+ wait_for_migration_complete(to);
+}
+
+static void test_cancel_src_after_none(QTestState *from, QTestState *to,
+ const char *uri, const char *phase)
+{
+ /*
+ * Test that cancelling without a migration happening does not
+ * affect subsequent migrations
+ */
+ migrate_cancel(to);
+
+ wait_for_serial("src_serial");
+ migrate_cancel(from);
+
+ migrate_incoming_qmp(to, uri, NULL, "{ 'exit-on-error': false }");
+
+ migrate_ensure_converge(from);
+ migrate_qmp(from, to, uri, NULL, "{}");
+
+ wait_for_migration_complete(from);
+ wait_for_migration_complete(to);
+}
+
+static void test_cancel_src_pre_switchover(QTestState *from, QTestState *to,
+ const char *uri, const char *phase)
+{
+ migrate_set_capability(from, "pause-before-switchover", true);
+ migrate_set_capability(to, "pause-before-switchover", true);
+
+ migrate_set_capability(from, "multifd", true);
+ migrate_set_capability(to, "multifd", true);
+
+ migrate_incoming_qmp(to, uri, NULL, "{ 'exit-on-error': false }");
+
+ wait_for_serial("src_serial");
+ migrate_ensure_converge(from);
+
+ migrate_qmp(from, to, uri, NULL, "{}");
+
+ migration_event_wait(from, phase);
+ migrate_cancel(from);
+ migration_event_wait(from, "cancelling");
+
+ wait_for_migration_status(from, "cancelled",
+ (const char * []) { "completed", NULL });
+
+ wait_for_migration_status(to, "failed",
+ (const char * []) { "completed", NULL });
+}
+
+static void test_cancel_src_after_status(void *opaque)
+{
+ const char *test_path = opaque;
+ g_autofree char *phase = g_path_get_basename(test_path);
+ g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
+ QTestState *from, *to;
+ MigrateStart args = {
+ .hide_stderr = true,
+ };
+
+ if (migrate_start(&from, &to, "defer", &args)) {
+ return;
+ }
+
+ if (g_str_equal(phase, "cancelling") ||
+ g_str_equal(phase, "cancelled")) {
+ test_cancel_src_after_cancelled(from, to, uri, phase);
+
+ } else if (g_str_equal(phase, "completed")) {
+ test_cancel_src_after_complete(from, to, uri, phase);
+
+ } else if (g_str_equal(phase, "failed")) {
+ test_cancel_src_after_failed(from, to, uri, phase);
+
+ } else if (g_str_equal(phase, "none")) {
+ test_cancel_src_after_none(from, to, uri, phase);
+
+ } else {
+ /* any state that comes before pre-switchover */
+ test_cancel_src_pre_switchover(from, to, uri, phase);
+ }
+
+ migrate_end(from, to, false);
+}
+
+static void calc_dirty_rate(QTestState *who, uint64_t calc_time)
+{
+ qtest_qmp_assert_success(who,
+ "{ 'execute': 'calc-dirty-rate',"
+ "'arguments': { "
+ "'calc-time': %" PRIu64 ","
+ "'mode': 'dirty-ring' }}",
+ calc_time);
+}
+
+static QDict *query_dirty_rate(QTestState *who)
+{
+ return qtest_qmp_assert_success_ref(who,
+ "{ 'execute': 'query-dirty-rate' }");
+}
+
+static void dirtylimit_set_all(QTestState *who, uint64_t dirtyrate)
+{
+ qtest_qmp_assert_success(who,
+ "{ 'execute': 'set-vcpu-dirty-limit',"
+ "'arguments': { "
+ "'dirty-rate': %" PRIu64 " } }",
+ dirtyrate);
+}
+
+static void cancel_vcpu_dirty_limit(QTestState *who)
+{
+ qtest_qmp_assert_success(who,
+ "{ 'execute': 'cancel-vcpu-dirty-limit' }");
+}
+
+static QDict *query_vcpu_dirty_limit(QTestState *who)
+{
+ QDict *rsp;
+
+ rsp = qtest_qmp(who, "{ 'execute': 'query-vcpu-dirty-limit' }");
+ g_assert(!qdict_haskey(rsp, "error"));
+ g_assert(qdict_haskey(rsp, "return"));
+
+ return rsp;
+}
+
+static bool calc_dirtyrate_ready(QTestState *who)
+{
+ QDict *rsp_return;
+ const char *status;
+ bool ready;
+
+ rsp_return = query_dirty_rate(who);
+ g_assert(rsp_return);
+
+ status = qdict_get_str(rsp_return, "status");
+ g_assert(status);
+ ready = g_strcmp0(status, "measuring");
+ qobject_unref(rsp_return);
+
+ return ready;
+}
+
+static void wait_for_calc_dirtyrate_complete(QTestState *who,
+ int64_t time_s)
+{
+ int max_try_count = 10000;
+ usleep(time_s * 1000000);
+
+ while (!calc_dirtyrate_ready(who) && max_try_count--) {
+ usleep(1000);
+ }
+
+ /*
+ * Set the timeout with 10 s(max_try_count * 1000us),
+ * if dirtyrate measurement not complete, fail test.
+ */
+ g_assert_cmpint(max_try_count, !=, 0);
+}
+
+static int64_t get_dirty_rate(QTestState *who)
+{
+ QDict *rsp_return;
+ const char *status;
+ QList *rates;
+ const QListEntry *entry;
+ QDict *rate;
+ int64_t dirtyrate;
+
+ rsp_return = query_dirty_rate(who);
+ g_assert(rsp_return);
+
+ status = qdict_get_str(rsp_return, "status");
+ g_assert(status);
+ g_assert_cmpstr(status, ==, "measured");
+
+ rates = qdict_get_qlist(rsp_return, "vcpu-dirty-rate");
+ g_assert(rates && !qlist_empty(rates));
+
+ entry = qlist_first(rates);
+ g_assert(entry);
+
+ rate = qobject_to(QDict, qlist_entry_obj(entry));
+ g_assert(rate);
+
+ dirtyrate = qdict_get_try_int(rate, "dirty-rate", -1);
+
+ qobject_unref(rsp_return);
+ return dirtyrate;
+}
+
+static int64_t get_limit_rate(QTestState *who)
+{
+ QDict *rsp_return;
+ QList *rates;
+ const QListEntry *entry;
+ QDict *rate;
+ int64_t dirtyrate;
+
+ rsp_return = query_vcpu_dirty_limit(who);
+ g_assert(rsp_return);
+
+ rates = qdict_get_qlist(rsp_return, "return");
+ g_assert(rates && !qlist_empty(rates));
+
+ entry = qlist_first(rates);
+ g_assert(entry);
+
+ rate = qobject_to(QDict, qlist_entry_obj(entry));
+ g_assert(rate);
+
+ dirtyrate = qdict_get_try_int(rate, "limit-rate", -1);
+
+ qobject_unref(rsp_return);
+ return dirtyrate;
+}
+
+static QTestState *dirtylimit_start_vm(void)
+{
+ QTestState *vm = NULL;
+ g_autofree gchar *cmd = NULL;
+ const char *bootpath;
+
+ bootpath = bootfile_create(qtest_get_arch(), tmpfs, false);
+ cmd = g_strdup_printf("-accel kvm,dirty-ring-size=4096 "
+ "-name dirtylimit-test,debug-threads=on "
+ "-m 150M -smp 1 "
+ "-serial file:%s/vm_serial "
+ "-drive file=%s,format=raw ",
+ tmpfs, bootpath);
+
+ vm = qtest_init(cmd);
+ return vm;
+}
+
+static void dirtylimit_stop_vm(QTestState *vm)
+{
+ g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, "vm_serial");
+
+ qtest_quit(vm);
+ unlink(path);
+}
+
+static void test_vcpu_dirty_limit(void)
+{
+ QTestState *vm;
+ int64_t origin_rate;
+ int64_t quota_rate;
+ int64_t rate ;
+ int max_try_count = 20;
+ int hit = 0;
+
+ /* Start vm for vcpu dirtylimit test */
+ vm = dirtylimit_start_vm();
+
+ /* Wait for the first serial output from the vm*/
+ wait_for_serial("vm_serial");
+
+ /* Do dirtyrate measurement with calc time equals 1s */
+ calc_dirty_rate(vm, 1);
+
+ /* Sleep calc time and wait for calc dirtyrate complete */
+ wait_for_calc_dirtyrate_complete(vm, 1);
+
+ /* Query original dirty page rate */
+ origin_rate = get_dirty_rate(vm);
+
+ /* VM booted from bootsect should dirty memory steadily */
+ assert(origin_rate != 0);
+
+ /* Setup quota dirty page rate at half of origin */
+ quota_rate = origin_rate / 2;
+
+ /* Set dirtylimit */
+ dirtylimit_set_all(vm, quota_rate);
+
+ /*
+ * Check if set-vcpu-dirty-limit and query-vcpu-dirty-limit
+ * works literally
+ */
+ g_assert_cmpint(quota_rate, ==, get_limit_rate(vm));
+
+ /* Sleep a bit to check if it take effect */
+ usleep(2000000);
+
+ /*
+ * Check if dirtylimit take effect realistically, set the
+ * timeout with 20 s(max_try_count * 1s), if dirtylimit
+ * doesn't take effect, fail test.
+ */
+ while (--max_try_count) {
+ calc_dirty_rate(vm, 1);
+ wait_for_calc_dirtyrate_complete(vm, 1);
+ rate = get_dirty_rate(vm);
+
+ /*
+ * Assume hitting if current rate is less
+ * than quota rate (within accepting error)
+ */
+ if (rate < (quota_rate + DIRTYLIMIT_TOLERANCE_RANGE)) {
+ hit = 1;
+ break;
+ }
+ }
+
+ g_assert_cmpint(hit, ==, 1);
+
+ hit = 0;
+ max_try_count = 20;
+
+ /* Check if dirtylimit cancellation take effect */
+ cancel_vcpu_dirty_limit(vm);
+ while (--max_try_count) {
+ calc_dirty_rate(vm, 1);
+ wait_for_calc_dirtyrate_complete(vm, 1);
+ rate = get_dirty_rate(vm);
+
+ /*
+ * Assume dirtylimit be canceled if current rate is
+ * greater than quota rate (within accepting error)
+ */
+ if (rate > (quota_rate + DIRTYLIMIT_TOLERANCE_RANGE)) {
+ hit = 1;
+ break;
+ }
+ }
+
+ g_assert_cmpint(hit, ==, 1);
+ dirtylimit_stop_vm(vm);
+}
+
+static void migrate_dirty_limit_wait_showup(QTestState *from,
+ const int64_t period,
+ const int64_t value)
+{
+ /* Enable dirty limit capability */
+ migrate_set_capability(from, "dirty-limit", true);
+
+ /* Set dirty limit parameters */
+ migrate_set_parameter_int(from, "x-vcpu-dirty-limit-period", period);
+ migrate_set_parameter_int(from, "vcpu-dirty-limit", value);
+
+ /* Make sure migrate can't converge */
+ migrate_ensure_non_converge(from);
+
+ /* To check limit rate after precopy */
+ migrate_set_capability(from, "pause-before-switchover", true);
+
+ /* Wait for the serial output from the source */
+ wait_for_serial("src_serial");
+}
+
+/*
+ * This test does:
+ * source destination
+ * start vm
+ * start incoming vm
+ * migrate
+ * wait dirty limit to begin
+ * cancel migrate
+ * cancellation check
+ * restart incoming vm
+ * migrate
+ * wait dirty limit to begin
+ * wait pre-switchover event
+ * convergence condition check
+ *
+ * And see if dirty limit migration works correctly.
+ * This test case involves many passes, so it runs in slow mode only.
+ */
+static void test_dirty_limit(void)
+{
+ g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
+ QTestState *from, *to;
+ int64_t remaining;
+ uint64_t throttle_us_per_full;
+ /*
+ * We want the test to be stable and as fast as possible.
+ * E.g., with 1Gb/s bandwidth migration may pass without dirty limit,
+ * so we need to decrease a bandwidth.
+ */
+ const int64_t dirtylimit_period = 1000, dirtylimit_value = 50;
+ const int64_t max_bandwidth = 400000000; /* ~400Mb/s */
+ const int64_t downtime_limit = 250; /* 250ms */
+ /*
+ * We migrate through unix-socket (> 500Mb/s).
+ * Thus, expected migration speed ~= bandwidth limit (< 500Mb/s).
+ * So, we can predict expected_threshold
+ */
+ const int64_t expected_threshold = max_bandwidth * downtime_limit / 1000;
+ int max_try_count = 10;
+ MigrateCommon args = {
+ .start = {
+ .hide_stderr = true,
+ .use_dirty_ring = true,
+ },
+ .listen_uri = uri,
+ .connect_uri = uri,
+ };
+
+ /* Start src, dst vm */
+ if (migrate_start(&from, &to, args.listen_uri, &args.start)) {
+ return;
+ }
+
+ /* Prepare for dirty limit migration and wait src vm show up */
+ migrate_dirty_limit_wait_showup(from, dirtylimit_period, dirtylimit_value);
+
+ /* Start migrate */
+ migrate_qmp(from, to, args.connect_uri, NULL, "{}");
+
+ /* Wait for dirty limit throttle begin */
+ throttle_us_per_full = 0;
+ while (throttle_us_per_full == 0) {
+ throttle_us_per_full =
+ read_migrate_property_int(from,
+ "dirty-limit-throttle-time-per-round");
+ usleep(100);
+ g_assert_false(get_src()->stop_seen);
+ }
+
+ /* Now cancel migrate and wait for dirty limit throttle switch off */
+ migrate_cancel(from);
+ wait_for_migration_status(from, "cancelled", NULL);
+
+ /* destination always fails after cancel */
+ migration_event_wait(to, "failed");
+ qtest_set_expected_status(to, EXIT_FAILURE);
+ qtest_quit(to);
+
+ /* Check if dirty limit throttle switched off, set timeout 1ms */
+ do {
+ throttle_us_per_full =
+ read_migrate_property_int(from,
+ "dirty-limit-throttle-time-per-round");
+ usleep(100);
+ g_assert_false(get_src()->stop_seen);
+ } while (throttle_us_per_full != 0 && --max_try_count);
+
+ /* Assert dirty limit is not in service */
+ g_assert_cmpint(throttle_us_per_full, ==, 0);
+
+ args = (MigrateCommon) {
+ .start = {
+ .only_target = true,
+ .use_dirty_ring = true,
+ },
+ .listen_uri = uri,
+ .connect_uri = uri,
+ };
+
+ /* Restart dst vm, src vm already show up so we needn't wait anymore */
+ if (migrate_start(&from, &to, args.listen_uri, &args.start)) {
+ return;
+ }
+
+ /* Start migrate */
+ migrate_qmp(from, to, args.connect_uri, NULL, "{}");
+
+ /* Wait for dirty limit throttle begin */
+ throttle_us_per_full = 0;
+ while (throttle_us_per_full == 0) {
+ throttle_us_per_full =
+ read_migrate_property_int(from,
+ "dirty-limit-throttle-time-per-round");
+ usleep(100);
+ g_assert_false(get_src()->stop_seen);
+ }
+
+ /*
+ * The dirty limit rate should equals the return value of
+ * query-vcpu-dirty-limit if dirty limit cap set
+ */
+ g_assert_cmpint(dirtylimit_value, ==, get_limit_rate(from));
+
+ /* Now, we have tested if dirty limit works, let it converge */
+ migrate_set_parameter_int(from, "downtime-limit", downtime_limit);
+ migrate_set_parameter_int(from, "max-bandwidth", max_bandwidth);
+
+ /*
+ * Wait for pre-switchover status to check if migration
+ * satisfy the convergence condition
+ */
+ wait_for_migration_status(from, "pre-switchover", NULL);
+
+ remaining = read_ram_property_int(from, "remaining");
+ g_assert_cmpint(remaining, <,
+ (expected_threshold + expected_threshold / 100));
+
+ migrate_continue(from, "pre-switchover");
+
+ qtest_qmp_eventwait(to, "RESUME");
+
+ wait_for_serial("dest_serial");
+ wait_for_migration_complete(from);
+
+ migrate_end(from, to, true);
+}
+
+static void migration_test_add_precopy_smoke(MigrationTestEnv *env)
+{
+ if (env->is_x86) {
+ migration_test_add("/migration/precopy/unix/suspend/live",
+ test_precopy_unix_suspend_live);
+ migration_test_add("/migration/precopy/unix/suspend/notlive",
+ test_precopy_unix_suspend_notlive);
+ }
+
+ migration_test_add("/migration/precopy/unix/plain",
+ test_precopy_unix_plain);
+
+ migration_test_add("/migration/precopy/tcp/plain", test_precopy_tcp_plain);
+ migration_test_add("/migration/multifd/tcp/uri/plain/none",
+ test_multifd_tcp_uri_none);
+ migration_test_add("/migration/multifd/tcp/plain/cancel",
+ test_multifd_precopy_tcp_cancel);
+ if (env->has_uffd) {
+ migration_test_add("/migration/multifd+postcopy/tcp/plain/cancel",
+ test_multifd_postcopy_tcp_cancel);
+ }
+
+#ifdef CONFIG_RDMA
+ migration_test_add("/migration/precopy/rdma/plain",
+ test_precopy_rdma_plain);
+ migration_test_add("/migration/precopy/rdma/plain/ipv6",
+ test_precopy_rdma_plain_ipv6);
+#endif
+}
+
+void migration_test_add_precopy(MigrationTestEnv *env)
+{
+ tmpfs = env->tmpfs;
+
+ migration_test_add_precopy_smoke(env);
+
+ if (!env->full_set) {
+ return;
+ }
+
+ migration_test_add("/migration/precopy/tcp/plain/switchover-ack",
+ test_precopy_tcp_switchover_ack);
+
+#ifndef _WIN32
+ migration_test_add("/migration/precopy/fd/tcp",
+ test_precopy_fd_socket);
+ migration_test_add("/migration/precopy/fd/file",
+ test_precopy_fd_file);
+#endif
+
+ /*
+ * See explanation why this test is slow on function definition
+ */
+ if (g_test_slow()) {
+ migration_test_add("/migration/auto_converge",
+ test_auto_converge);
+ if (g_str_equal(env->arch, "x86_64") &&
+ env->has_kvm && env->has_dirty_ring) {
+ migration_test_add("/dirty_limit",
+ test_dirty_limit);
+ }
+ }
+ migration_test_add("/migration/multifd/tcp/channels/plain/none",
+ test_multifd_tcp_channels_none);
+ migration_test_add("/migration/multifd/tcp/plain/zero-page/legacy",
+ test_multifd_tcp_zero_page_legacy);
+ migration_test_add("/migration/multifd/tcp/plain/zero-page/none",
+ test_multifd_tcp_no_zero_page);
+ if (g_str_equal(env->arch, "x86_64")
+ && env->has_kvm && env->has_dirty_ring) {
+
+ migration_test_add("/migration/dirty_ring",
+ test_precopy_unix_dirty_ring);
+ if (qtest_has_machine("pc") && g_test_slow()) {
+ migration_test_add("/migration/vcpu_dirty_limit",
+ test_vcpu_dirty_limit);
+ }
+ }
+
+ /* ensure new status don't go unnoticed */
+ assert(MIGRATION_STATUS__MAX == 15);
+
+ for (int i = MIGRATION_STATUS_NONE; i < MIGRATION_STATUS__MAX; i++) {
+ switch (i) {
+ case MIGRATION_STATUS_DEVICE: /* happens too fast */
+ case MIGRATION_STATUS_WAIT_UNPLUG: /* no support in tests */
+ case MIGRATION_STATUS_COLO: /* no support in tests */
+ case MIGRATION_STATUS_POSTCOPY_ACTIVE: /* postcopy can't be cancelled */
+ case MIGRATION_STATUS_POSTCOPY_PAUSED:
+ case MIGRATION_STATUS_POSTCOPY_RECOVER_SETUP:
+ case MIGRATION_STATUS_POSTCOPY_RECOVER:
+ continue;
+ default:
+ migration_test_add_suffix("/migration/cancel/src/after/",
+ MigrationStatus_str(i),
+ test_cancel_src_after_status);
+ }
+ }
+}
diff --git a/tests/migration/s390x/Makefile b/tests/qtest/migration/s390x/Makefile
index 6671de2..6671de2 100644
--- a/tests/migration/s390x/Makefile
+++ b/tests/qtest/migration/s390x/Makefile
diff --git a/tests/migration/s390x/a-b-bios.c b/tests/qtest/migration/s390x/a-b-bios.c
index ff99a3e..ff99a3e 100644
--- a/tests/migration/s390x/a-b-bios.c
+++ b/tests/qtest/migration/s390x/a-b-bios.c
diff --git a/tests/migration/s390x/a-b-bios.h b/tests/qtest/migration/s390x/a-b-bios.h
index 96103da..96103da 100644
--- a/tests/migration/s390x/a-b-bios.h
+++ b/tests/qtest/migration/s390x/a-b-bios.h
diff --git a/tests/qtest/migration/tls-tests.c b/tests/qtest/migration/tls-tests.c
new file mode 100644
index 0000000..21e9fec
--- /dev/null
+++ b/tests/qtest/migration/tls-tests.c
@@ -0,0 +1,871 @@
+/*
+ * QTest testcases for TLS migration
+ *
+ * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
+ * based on the vhost-user-test.c that is:
+ * Copyright (c) 2014 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "crypto/tlscredspsk.h"
+#include "libqtest.h"
+#include "migration/framework.h"
+#include "migration/migration-qmp.h"
+#include "migration/migration-util.h"
+
+#include "tests/unit/crypto-tls-psk-helpers.h"
+#ifdef CONFIG_TASN1
+# include "tests/unit/crypto-tls-x509-helpers.h"
+#endif /* CONFIG_TASN1 */
+
+
+struct TestMigrateTLSPSKData {
+ char *workdir;
+ char *workdiralt;
+ char *pskfile;
+ char *pskfilealt;
+};
+
+static char *tmpfs;
+
+static void *
+migrate_hook_start_tls_psk_common(QTestState *from,
+ QTestState *to,
+ bool mismatch)
+{
+ struct TestMigrateTLSPSKData *data =
+ g_new0(struct TestMigrateTLSPSKData, 1);
+
+ data->workdir = g_strdup_printf("%s/tlscredspsk0", tmpfs);
+ data->pskfile = g_strdup_printf("%s/%s", data->workdir,
+ QCRYPTO_TLS_CREDS_PSKFILE);
+ g_mkdir_with_parents(data->workdir, 0700);
+ test_tls_psk_init(data->pskfile);
+
+ if (mismatch) {
+ data->workdiralt = g_strdup_printf("%s/tlscredspskalt0", tmpfs);
+ data->pskfilealt = g_strdup_printf("%s/%s", data->workdiralt,
+ QCRYPTO_TLS_CREDS_PSKFILE);
+ g_mkdir_with_parents(data->workdiralt, 0700);
+ test_tls_psk_init_alt(data->pskfilealt);
+ }
+
+ qtest_qmp_assert_success(from,
+ "{ 'execute': 'object-add',"
+ " 'arguments': { 'qom-type': 'tls-creds-psk',"
+ " 'id': 'tlscredspsk0',"
+ " 'endpoint': 'client',"
+ " 'dir': %s,"
+ " 'username': 'qemu'} }",
+ data->workdir);
+
+ qtest_qmp_assert_success(to,
+ "{ 'execute': 'object-add',"
+ " 'arguments': { 'qom-type': 'tls-creds-psk',"
+ " 'id': 'tlscredspsk0',"
+ " 'endpoint': 'server',"
+ " 'dir': %s } }",
+ mismatch ? data->workdiralt : data->workdir);
+
+ migrate_set_parameter_str(from, "tls-creds", "tlscredspsk0");
+ migrate_set_parameter_str(to, "tls-creds", "tlscredspsk0");
+
+ return data;
+}
+
+static void *
+migrate_hook_start_tls_psk_match(QTestState *from,
+ QTestState *to)
+{
+ return migrate_hook_start_tls_psk_common(from, to, false);
+}
+
+static void *
+migrate_hook_start_tls_psk_mismatch(QTestState *from,
+ QTestState *to)
+{
+ return migrate_hook_start_tls_psk_common(from, to, true);
+}
+
+static void
+migrate_hook_end_tls_psk(QTestState *from,
+ QTestState *to,
+ void *opaque)
+{
+ struct TestMigrateTLSPSKData *data = opaque;
+
+ test_tls_psk_cleanup(data->pskfile);
+ if (data->pskfilealt) {
+ test_tls_psk_cleanup(data->pskfilealt);
+ }
+ rmdir(data->workdir);
+ if (data->workdiralt) {
+ rmdir(data->workdiralt);
+ }
+
+ g_free(data->workdiralt);
+ g_free(data->pskfilealt);
+ g_free(data->workdir);
+ g_free(data->pskfile);
+ g_free(data);
+}
+
+#ifdef CONFIG_TASN1
+typedef struct {
+ char *workdir;
+ char *keyfile;
+ char *cacert;
+ char *servercert;
+ char *serverkey;
+ char *clientcert;
+ char *clientkey;
+} TestMigrateTLSX509Data;
+
+typedef struct {
+ bool verifyclient;
+ bool clientcert;
+ bool hostileclient;
+ bool authzclient;
+ const char *certhostname;
+ const char *certipaddr;
+} TestMigrateTLSX509;
+
+static void *
+migrate_hook_start_tls_x509_common(QTestState *from,
+ QTestState *to,
+ TestMigrateTLSX509 *args)
+{
+ TestMigrateTLSX509Data *data = g_new0(TestMigrateTLSX509Data, 1);
+
+ data->workdir = g_strdup_printf("%s/tlscredsx5090", tmpfs);
+ data->keyfile = g_strdup_printf("%s/key.pem", data->workdir);
+
+ data->cacert = g_strdup_printf("%s/ca-cert.pem", data->workdir);
+ data->serverkey = g_strdup_printf("%s/server-key.pem", data->workdir);
+ data->servercert = g_strdup_printf("%s/server-cert.pem", data->workdir);
+ if (args->clientcert) {
+ data->clientkey = g_strdup_printf("%s/client-key.pem", data->workdir);
+ data->clientcert = g_strdup_printf("%s/client-cert.pem", data->workdir);
+ }
+
+ g_mkdir_with_parents(data->workdir, 0700);
+
+ test_tls_init(data->keyfile);
+#ifndef _WIN32
+ g_assert(link(data->keyfile, data->serverkey) == 0);
+#else
+ g_assert(CreateHardLink(data->serverkey, data->keyfile, NULL) != 0);
+#endif
+ if (args->clientcert) {
+#ifndef _WIN32
+ g_assert(link(data->keyfile, data->clientkey) == 0);
+#else
+ g_assert(CreateHardLink(data->clientkey, data->keyfile, NULL) != 0);
+#endif
+ }
+
+ TLS_ROOT_REQ_SIMPLE(cacertreq, data->cacert);
+ if (args->clientcert) {
+ TLS_CERT_REQ_SIMPLE_CLIENT(servercertreq, cacertreq,
+ args->hostileclient ?
+ QCRYPTO_TLS_TEST_CLIENT_HOSTILE_NAME :
+ QCRYPTO_TLS_TEST_CLIENT_NAME,
+ data->clientcert);
+ test_tls_deinit_cert(&servercertreq);
+ }
+
+ TLS_CERT_REQ_SIMPLE_SERVER(clientcertreq, cacertreq,
+ data->servercert,
+ args->certhostname,
+ args->certipaddr);
+ test_tls_deinit_cert(&clientcertreq);
+ test_tls_deinit_cert(&cacertreq);
+
+ qtest_qmp_assert_success(from,
+ "{ 'execute': 'object-add',"
+ " 'arguments': { 'qom-type': 'tls-creds-x509',"
+ " 'id': 'tlscredsx509client0',"
+ " 'endpoint': 'client',"
+ " 'dir': %s,"
+ " 'sanity-check': true,"
+ " 'verify-peer': true} }",
+ data->workdir);
+ migrate_set_parameter_str(from, "tls-creds", "tlscredsx509client0");
+ if (args->certhostname) {
+ migrate_set_parameter_str(from, "tls-hostname", args->certhostname);
+ }
+
+ qtest_qmp_assert_success(to,
+ "{ 'execute': 'object-add',"
+ " 'arguments': { 'qom-type': 'tls-creds-x509',"
+ " 'id': 'tlscredsx509server0',"
+ " 'endpoint': 'server',"
+ " 'dir': %s,"
+ " 'sanity-check': true,"
+ " 'verify-peer': %i} }",
+ data->workdir, args->verifyclient);
+ migrate_set_parameter_str(to, "tls-creds", "tlscredsx509server0");
+
+ if (args->authzclient) {
+ qtest_qmp_assert_success(to,
+ "{ 'execute': 'object-add',"
+ " 'arguments': { 'qom-type': 'authz-simple',"
+ " 'id': 'tlsauthz0',"
+ " 'identity': %s} }",
+ "CN=" QCRYPTO_TLS_TEST_CLIENT_NAME);
+ migrate_set_parameter_str(to, "tls-authz", "tlsauthz0");
+ }
+
+ return data;
+}
+
+/*
+ * The normal case: match server's cert hostname against
+ * whatever host we were telling QEMU to connect to (if any)
+ */
+static void *
+migrate_hook_start_tls_x509_default_host(QTestState *from,
+ QTestState *to)
+{
+ TestMigrateTLSX509 args = {
+ .verifyclient = true,
+ .clientcert = true,
+ .certipaddr = "127.0.0.1"
+ };
+ return migrate_hook_start_tls_x509_common(from, to, &args);
+}
+
+/*
+ * The unusual case: the server's cert is different from
+ * the address we're telling QEMU to connect to (if any),
+ * so we must give QEMU an explicit hostname to validate
+ */
+static void *
+migrate_hook_start_tls_x509_override_host(QTestState *from,
+ QTestState *to)
+{
+ TestMigrateTLSX509 args = {
+ .verifyclient = true,
+ .clientcert = true,
+ .certhostname = "qemu.org",
+ };
+ return migrate_hook_start_tls_x509_common(from, to, &args);
+}
+
+/*
+ * The unusual case: the server's cert is different from
+ * the address we're telling QEMU to connect to, and so we
+ * expect the client to reject the server
+ */
+static void *
+migrate_hook_start_tls_x509_mismatch_host(QTestState *from,
+ QTestState *to)
+{
+ TestMigrateTLSX509 args = {
+ .verifyclient = true,
+ .clientcert = true,
+ .certipaddr = "10.0.0.1",
+ };
+ return migrate_hook_start_tls_x509_common(from, to, &args);
+}
+
+static void *
+migrate_hook_start_tls_x509_friendly_client(QTestState *from,
+ QTestState *to)
+{
+ TestMigrateTLSX509 args = {
+ .verifyclient = true,
+ .clientcert = true,
+ .authzclient = true,
+ .certipaddr = "127.0.0.1",
+ };
+ return migrate_hook_start_tls_x509_common(from, to, &args);
+}
+
+static void *
+migrate_hook_start_tls_x509_hostile_client(QTestState *from,
+ QTestState *to)
+{
+ TestMigrateTLSX509 args = {
+ .verifyclient = true,
+ .clientcert = true,
+ .hostileclient = true,
+ .authzclient = true,
+ .certipaddr = "127.0.0.1",
+ };
+ return migrate_hook_start_tls_x509_common(from, to, &args);
+}
+
+/*
+ * The case with no client certificate presented,
+ * and no server verification
+ */
+static void *
+migrate_hook_start_tls_x509_allow_anon_client(QTestState *from,
+ QTestState *to)
+{
+ TestMigrateTLSX509 args = {
+ .certipaddr = "127.0.0.1",
+ };
+ return migrate_hook_start_tls_x509_common(from, to, &args);
+}
+
+/*
+ * The case with no client certificate presented,
+ * and server verification rejecting
+ */
+static void *
+migrate_hook_start_tls_x509_reject_anon_client(QTestState *from,
+ QTestState *to)
+{
+ TestMigrateTLSX509 args = {
+ .verifyclient = true,
+ .certipaddr = "127.0.0.1",
+ };
+ return migrate_hook_start_tls_x509_common(from, to, &args);
+}
+
+static void
+migrate_hook_end_tls_x509(QTestState *from,
+ QTestState *to,
+ void *opaque)
+{
+ TestMigrateTLSX509Data *data = opaque;
+
+ test_tls_cleanup(data->keyfile);
+ g_free(data->keyfile);
+
+ unlink(data->cacert);
+ g_free(data->cacert);
+ unlink(data->servercert);
+ g_free(data->servercert);
+ unlink(data->serverkey);
+ g_free(data->serverkey);
+
+ if (data->clientcert) {
+ unlink(data->clientcert);
+ g_free(data->clientcert);
+ }
+ if (data->clientkey) {
+ unlink(data->clientkey);
+ g_free(data->clientkey);
+ }
+
+ rmdir(data->workdir);
+ g_free(data->workdir);
+
+ g_free(data);
+}
+#endif /* CONFIG_TASN1 */
+
+static void test_postcopy_tls_psk(void)
+{
+ MigrateCommon args = {
+ .start_hook = migrate_hook_start_tls_psk_match,
+ .end_hook = migrate_hook_end_tls_psk,
+ };
+
+ test_postcopy_common(&args);
+}
+
+static void test_postcopy_preempt_tls_psk(void)
+{
+ MigrateCommon args = {
+ .start_hook = migrate_hook_start_tls_psk_match,
+ .end_hook = migrate_hook_end_tls_psk,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT] = true,
+ },
+ };
+
+ test_postcopy_common(&args);
+}
+
+static void test_postcopy_recovery_tls_psk(void)
+{
+ MigrateCommon args = {
+ .start_hook = migrate_hook_start_tls_psk_match,
+ .end_hook = migrate_hook_end_tls_psk,
+ };
+
+ test_postcopy_recovery_common(&args);
+}
+
+static void test_multifd_postcopy_recovery_tls_psk(void)
+{
+ MigrateCommon args = {
+ .start_hook = migrate_hook_start_tls_psk_match,
+ .end_hook = migrate_hook_end_tls_psk,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ };
+
+ test_postcopy_recovery_common(&args);
+}
+
+/* This contains preempt+recovery+tls test altogether */
+static void test_postcopy_preempt_all(void)
+{
+ MigrateCommon args = {
+ .start_hook = migrate_hook_start_tls_psk_match,
+ .end_hook = migrate_hook_end_tls_psk,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT] = true,
+ },
+ };
+
+ test_postcopy_recovery_common(&args);
+}
+
+static void test_multifd_postcopy_preempt_recovery_tls_psk(void)
+{
+ MigrateCommon args = {
+ .start_hook = migrate_hook_start_tls_psk_match,
+ .end_hook = migrate_hook_end_tls_psk,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ .caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT] = true,
+ },
+ };
+
+ test_postcopy_recovery_common(&args);
+}
+
+static void test_precopy_unix_tls_psk(void)
+{
+ g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
+ MigrateCommon args = {
+ .connect_uri = uri,
+ .listen_uri = uri,
+ .start_hook = migrate_hook_start_tls_psk_match,
+ .end_hook = migrate_hook_end_tls_psk,
+ };
+
+ test_precopy_common(&args);
+}
+
+#ifdef CONFIG_TASN1
+static void test_precopy_unix_tls_x509_default_host(void)
+{
+ g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
+ MigrateCommon args = {
+ .start = {
+ .hide_stderr = true,
+ },
+ .connect_uri = uri,
+ .listen_uri = uri,
+ .start_hook = migrate_hook_start_tls_x509_default_host,
+ .end_hook = migrate_hook_end_tls_x509,
+ .result = MIG_TEST_FAIL_DEST_QUIT_ERR,
+ };
+
+ test_precopy_common(&args);
+}
+
+static void test_precopy_unix_tls_x509_override_host(void)
+{
+ g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
+ MigrateCommon args = {
+ .connect_uri = uri,
+ .listen_uri = uri,
+ .start_hook = migrate_hook_start_tls_x509_override_host,
+ .end_hook = migrate_hook_end_tls_x509,
+ };
+
+ test_precopy_common(&args);
+}
+#endif /* CONFIG_TASN1 */
+
+static void test_precopy_tcp_tls_psk_match(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "tcp:127.0.0.1:0",
+ .start_hook = migrate_hook_start_tls_psk_match,
+ .end_hook = migrate_hook_end_tls_psk,
+ };
+
+ test_precopy_common(&args);
+}
+
+static void test_precopy_tcp_tls_psk_mismatch(void)
+{
+ MigrateCommon args = {
+ .start = {
+ .hide_stderr = true,
+ },
+ .listen_uri = "tcp:127.0.0.1:0",
+ .start_hook = migrate_hook_start_tls_psk_mismatch,
+ .end_hook = migrate_hook_end_tls_psk,
+ .result = MIG_TEST_FAIL,
+ };
+
+ test_precopy_common(&args);
+}
+
+#ifdef CONFIG_TASN1
+static void test_precopy_tcp_tls_x509_default_host(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "tcp:127.0.0.1:0",
+ .start_hook = migrate_hook_start_tls_x509_default_host,
+ .end_hook = migrate_hook_end_tls_x509,
+ };
+
+ test_precopy_common(&args);
+}
+
+static void test_precopy_tcp_tls_x509_override_host(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "tcp:127.0.0.1:0",
+ .start_hook = migrate_hook_start_tls_x509_override_host,
+ .end_hook = migrate_hook_end_tls_x509,
+ };
+
+ test_precopy_common(&args);
+}
+
+static void test_precopy_tcp_tls_x509_mismatch_host(void)
+{
+ MigrateCommon args = {
+ .start = {
+ .hide_stderr = true,
+ },
+ .listen_uri = "tcp:127.0.0.1:0",
+ .start_hook = migrate_hook_start_tls_x509_mismatch_host,
+ .end_hook = migrate_hook_end_tls_x509,
+ .result = MIG_TEST_FAIL_DEST_QUIT_ERR,
+ };
+
+ test_precopy_common(&args);
+}
+
+static void test_precopy_tcp_tls_x509_friendly_client(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "tcp:127.0.0.1:0",
+ .start_hook = migrate_hook_start_tls_x509_friendly_client,
+ .end_hook = migrate_hook_end_tls_x509,
+ };
+
+ test_precopy_common(&args);
+}
+
+static void test_precopy_tcp_tls_x509_hostile_client(void)
+{
+ MigrateCommon args = {
+ .start = {
+ .hide_stderr = true,
+ },
+ .listen_uri = "tcp:127.0.0.1:0",
+ .start_hook = migrate_hook_start_tls_x509_hostile_client,
+ .end_hook = migrate_hook_end_tls_x509,
+ .result = MIG_TEST_FAIL,
+ };
+
+ test_precopy_common(&args);
+}
+
+static void test_precopy_tcp_tls_x509_allow_anon_client(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "tcp:127.0.0.1:0",
+ .start_hook = migrate_hook_start_tls_x509_allow_anon_client,
+ .end_hook = migrate_hook_end_tls_x509,
+ };
+
+ test_precopy_common(&args);
+}
+
+static void test_precopy_tcp_tls_x509_reject_anon_client(void)
+{
+ MigrateCommon args = {
+ .start = {
+ .hide_stderr = true,
+ },
+ .listen_uri = "tcp:127.0.0.1:0",
+ .start_hook = migrate_hook_start_tls_x509_reject_anon_client,
+ .end_hook = migrate_hook_end_tls_x509,
+ .result = MIG_TEST_FAIL,
+ };
+
+ test_precopy_common(&args);
+}
+#endif /* CONFIG_TASN1 */
+
+static void *
+migrate_hook_start_multifd_tcp_tls_psk_match(QTestState *from,
+ QTestState *to)
+{
+ migrate_hook_start_precopy_tcp_multifd_common(from, to, "none");
+ return migrate_hook_start_tls_psk_match(from, to);
+}
+
+static void *
+migrate_hook_start_multifd_tcp_tls_psk_mismatch(QTestState *from,
+ QTestState *to)
+{
+ migrate_hook_start_precopy_tcp_multifd_common(from, to, "none");
+ return migrate_hook_start_tls_psk_mismatch(from, to);
+}
+
+#ifdef CONFIG_TASN1
+static void *
+migrate_hook_start_multifd_tls_x509_default_host(QTestState *from,
+ QTestState *to)
+{
+ migrate_hook_start_precopy_tcp_multifd_common(from, to, "none");
+ return migrate_hook_start_tls_x509_default_host(from, to);
+}
+
+static void *
+migrate_hook_start_multifd_tls_x509_override_host(QTestState *from,
+ QTestState *to)
+{
+ migrate_hook_start_precopy_tcp_multifd_common(from, to, "none");
+ return migrate_hook_start_tls_x509_override_host(from, to);
+}
+
+static void *
+migrate_hook_start_multifd_tls_x509_mismatch_host(QTestState *from,
+ QTestState *to)
+{
+ migrate_hook_start_precopy_tcp_multifd_common(from, to, "none");
+ return migrate_hook_start_tls_x509_mismatch_host(from, to);
+}
+
+static void *
+migrate_hook_start_multifd_tls_x509_allow_anon_client(QTestState *from,
+ QTestState *to)
+{
+ migrate_hook_start_precopy_tcp_multifd_common(from, to, "none");
+ return migrate_hook_start_tls_x509_allow_anon_client(from, to);
+}
+
+static void *
+migrate_hook_start_multifd_tls_x509_reject_anon_client(QTestState *from,
+ QTestState *to)
+{
+ migrate_hook_start_precopy_tcp_multifd_common(from, to, "none");
+ return migrate_hook_start_tls_x509_reject_anon_client(from, to);
+}
+#endif /* CONFIG_TASN1 */
+
+static void test_multifd_tcp_tls_psk_match(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_multifd_tcp_tls_psk_match,
+ .end_hook = migrate_hook_end_tls_psk,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ };
+ test_precopy_common(&args);
+}
+
+static void test_multifd_tcp_tls_psk_mismatch(void)
+{
+ MigrateCommon args = {
+ .start = {
+ .hide_stderr = true,
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_multifd_tcp_tls_psk_mismatch,
+ .end_hook = migrate_hook_end_tls_psk,
+ .result = MIG_TEST_FAIL,
+ };
+ test_precopy_common(&args);
+}
+
+static void test_multifd_postcopy_tcp_tls_psk_match(void)
+{
+ MigrateCommon args = {
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ .caps[MIGRATION_CAPABILITY_POSTCOPY_RAM] = true,
+ },
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_multifd_tcp_tls_psk_match,
+ .end_hook = migrate_hook_end_tls_psk,
+ };
+
+ test_precopy_common(&args);
+}
+
+#ifdef CONFIG_TASN1
+static void test_multifd_tcp_tls_x509_default_host(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_multifd_tls_x509_default_host,
+ .end_hook = migrate_hook_end_tls_x509,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ };
+ test_precopy_common(&args);
+}
+
+static void test_multifd_tcp_tls_x509_override_host(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_multifd_tls_x509_override_host,
+ .end_hook = migrate_hook_end_tls_x509,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ };
+ test_precopy_common(&args);
+}
+
+static void test_multifd_tcp_tls_x509_mismatch_host(void)
+{
+ /*
+ * This has different behaviour to the non-multifd case.
+ *
+ * In non-multifd case when client aborts due to mismatched
+ * cert host, the server has already started trying to load
+ * migration state, and so it exits with I/O failure.
+ *
+ * In multifd case when client aborts due to mismatched
+ * cert host, the server is still waiting for the other
+ * multifd connections to arrive so hasn't started trying
+ * to load migration state, and thus just aborts the migration
+ * without exiting.
+ */
+ MigrateCommon args = {
+ .start = {
+ .hide_stderr = true,
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_multifd_tls_x509_mismatch_host,
+ .end_hook = migrate_hook_end_tls_x509,
+ .result = MIG_TEST_FAIL,
+ };
+ test_precopy_common(&args);
+}
+
+static void test_multifd_tcp_tls_x509_allow_anon_client(void)
+{
+ MigrateCommon args = {
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_multifd_tls_x509_allow_anon_client,
+ .end_hook = migrate_hook_end_tls_x509,
+ .start = {
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ };
+ test_precopy_common(&args);
+}
+
+static void test_multifd_tcp_tls_x509_reject_anon_client(void)
+{
+ MigrateCommon args = {
+ .start = {
+ .hide_stderr = true,
+ .caps[MIGRATION_CAPABILITY_MULTIFD] = true,
+ },
+ .listen_uri = "defer",
+ .start_hook = migrate_hook_start_multifd_tls_x509_reject_anon_client,
+ .end_hook = migrate_hook_end_tls_x509,
+ .result = MIG_TEST_FAIL,
+ };
+ test_precopy_common(&args);
+}
+#endif /* CONFIG_TASN1 */
+
+static void migration_test_add_tls_smoke(MigrationTestEnv *env)
+{
+ migration_test_add("/migration/precopy/tcp/tls/psk/match",
+ test_precopy_tcp_tls_psk_match);
+}
+
+void migration_test_add_tls(MigrationTestEnv *env)
+{
+ tmpfs = env->tmpfs;
+
+ migration_test_add_tls_smoke(env);
+
+ if (!env->full_set) {
+ return;
+ }
+
+ migration_test_add("/migration/precopy/unix/tls/psk",
+ test_precopy_unix_tls_psk);
+
+ if (env->has_uffd) {
+ /*
+ * NOTE: psk test is enough for postcopy, as other types of TLS
+ * channels are tested under precopy. Here what we want to test is the
+ * general postcopy path that has TLS channel enabled.
+ */
+ migration_test_add("/migration/postcopy/tls/psk",
+ test_postcopy_tls_psk);
+ migration_test_add("/migration/postcopy/recovery/tls/psk",
+ test_postcopy_recovery_tls_psk);
+ migration_test_add("/migration/postcopy/preempt/tls/psk",
+ test_postcopy_preempt_tls_psk);
+ migration_test_add("/migration/postcopy/preempt/recovery/tls/psk",
+ test_postcopy_preempt_all);
+ migration_test_add("/migration/multifd+postcopy/recovery/tls/psk",
+ test_multifd_postcopy_recovery_tls_psk);
+ migration_test_add(
+ "/migration/multifd+postcopy/preempt/recovery/tls/psk",
+ test_multifd_postcopy_preempt_recovery_tls_psk);
+ }
+#ifdef CONFIG_TASN1
+ migration_test_add("/migration/precopy/unix/tls/x509/default-host",
+ test_precopy_unix_tls_x509_default_host);
+ migration_test_add("/migration/precopy/unix/tls/x509/override-host",
+ test_precopy_unix_tls_x509_override_host);
+#endif /* CONFIG_TASN1 */
+
+ migration_test_add("/migration/precopy/tcp/tls/psk/mismatch",
+ test_precopy_tcp_tls_psk_mismatch);
+#ifdef CONFIG_TASN1
+ migration_test_add("/migration/precopy/tcp/tls/x509/default-host",
+ test_precopy_tcp_tls_x509_default_host);
+ migration_test_add("/migration/precopy/tcp/tls/x509/override-host",
+ test_precopy_tcp_tls_x509_override_host);
+ migration_test_add("/migration/precopy/tcp/tls/x509/mismatch-host",
+ test_precopy_tcp_tls_x509_mismatch_host);
+ migration_test_add("/migration/precopy/tcp/tls/x509/friendly-client",
+ test_precopy_tcp_tls_x509_friendly_client);
+ migration_test_add("/migration/precopy/tcp/tls/x509/hostile-client",
+ test_precopy_tcp_tls_x509_hostile_client);
+ migration_test_add("/migration/precopy/tcp/tls/x509/allow-anon-client",
+ test_precopy_tcp_tls_x509_allow_anon_client);
+ migration_test_add("/migration/precopy/tcp/tls/x509/reject-anon-client",
+ test_precopy_tcp_tls_x509_reject_anon_client);
+#endif /* CONFIG_TASN1 */
+
+ migration_test_add("/migration/multifd/tcp/tls/psk/match",
+ test_multifd_tcp_tls_psk_match);
+ migration_test_add("/migration/multifd/tcp/tls/psk/mismatch",
+ test_multifd_tcp_tls_psk_mismatch);
+ if (env->has_uffd) {
+ migration_test_add("/migration/multifd+postcopy/tcp/tls/psk/match",
+ test_multifd_postcopy_tcp_tls_psk_match);
+ }
+#ifdef CONFIG_TASN1
+ migration_test_add("/migration/multifd/tcp/tls/x509/default-host",
+ test_multifd_tcp_tls_x509_default_host);
+ migration_test_add("/migration/multifd/tcp/tls/x509/override-host",
+ test_multifd_tcp_tls_x509_override_host);
+ migration_test_add("/migration/multifd/tcp/tls/x509/mismatch-host",
+ test_multifd_tcp_tls_x509_mismatch_host);
+ migration_test_add("/migration/multifd/tcp/tls/x509/allow-anon-client",
+ test_multifd_tcp_tls_x509_allow_anon_client);
+ migration_test_add("/migration/multifd/tcp/tls/x509/reject-anon-client",
+ test_multifd_tcp_tls_x509_reject_anon_client);
+#endif /* CONFIG_TASN1 */
+}
diff --git a/tests/qtest/netdev-socket.c b/tests/qtest/netdev-socket.c
index fc7d119..b731af0 100644
--- a/tests/qtest/netdev-socket.c
+++ b/tests/qtest/netdev-socket.c
@@ -11,7 +11,7 @@
#include <glib/gstdio.h>
#include "../unit/socket-helpers.h"
#include "libqtest.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qstring.h"
#include "qemu/sockets.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qapi-visit-sockets.h"
@@ -204,7 +204,7 @@ static void test_stream_unix_reconnect(void)
qts1 = qtest_initf("-nodefaults -M none "
"-netdev stream,server=false,id=st0,addr.type=unix,"
- "addr.path=%s,reconnect=1", path);
+ "addr.path=%s,reconnect-ms=1000", path);
wait_stream_connected(qts0, "st0", &addr);
g_assert_cmpint(addr->type, ==, SOCKET_ADDRESS_TYPE_UNIX);
diff --git a/tests/qtest/npcm7xx_adc-test.c b/tests/qtest/npcm7xx_adc-test.c
index e751a72..8bc89b8 100644
--- a/tests/qtest/npcm7xx_adc-test.c
+++ b/tests/qtest/npcm7xx_adc-test.c
@@ -18,7 +18,7 @@
#include "qemu/bitops.h"
#include "qemu/timer.h"
#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#define REF_HZ (25000000)
diff --git a/tests/qtest/npcm7xx_emc-test.c b/tests/qtest/npcm7xx_emc-test.c
index 2e1a1a6..eeedb27 100644
--- a/tests/qtest/npcm7xx_emc-test.c
+++ b/tests/qtest/npcm7xx_emc-test.c
@@ -16,8 +16,8 @@
#include "qemu/osdep.h"
#include "libqos/libqos.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qnum.h"
+#include "qobject/qdict.h"
+#include "qobject/qnum.h"
#include "qemu/bitops.h"
#include "qemu/iov.h"
diff --git a/tests/qtest/npcm7xx_pwm-test.c b/tests/qtest/npcm7xx_pwm-test.c
index b53a43c..052ea87 100644
--- a/tests/qtest/npcm7xx_pwm-test.c
+++ b/tests/qtest/npcm7xx_pwm-test.c
@@ -17,8 +17,8 @@
#include "qemu/osdep.h"
#include "qemu/bitops.h"
#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qnum.h"
+#include "qobject/qdict.h"
+#include "qobject/qnum.h"
static int verbosity_level;
diff --git a/tests/qtest/npcm7xx_timer-test.c b/tests/qtest/npcm7xx_timer-test.c
index 58f58c2..4371104 100644
--- a/tests/qtest/npcm7xx_timer-test.c
+++ b/tests/qtest/npcm7xx_timer-test.c
@@ -465,7 +465,6 @@ static void test_periodic_interrupt(gconstpointer test_data)
int i;
tim_reset(td);
- clock_step_next();
tim_write_ticr(td, count);
tim_write_tcsr(td, CEN | IE | MODE_PERIODIC | PRESCALE(ps));
diff --git a/tests/qtest/npcm7xx_watchdog_timer-test.c b/tests/qtest/npcm7xx_watchdog_timer-test.c
index 981b853..521ea78 100644
--- a/tests/qtest/npcm7xx_watchdog_timer-test.c
+++ b/tests/qtest/npcm7xx_watchdog_timer-test.c
@@ -18,7 +18,7 @@
#include "qemu/timer.h"
#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#define WTCR_OFFSET 0x1c
#define REF_HZ (25000000)
diff --git a/tests/qtest/npcm_gmac-test.c b/tests/qtest/npcm_gmac-test.c
index c28b471..1317da2 100644
--- a/tests/qtest/npcm_gmac-test.c
+++ b/tests/qtest/npcm_gmac-test.c
@@ -36,7 +36,7 @@ typedef struct TestData {
const GMACModule *module;
} TestData;
-/* Values extracted from hw/arm/npcm7xx.c */
+/* Values extracted from hw/arm/npcm8xx.c */
static const GMACModule gmac_module_list[] = {
{
.irq = 14,
@@ -46,6 +46,14 @@ static const GMACModule gmac_module_list[] = {
.irq = 15,
.base_addr = 0xf0804000
},
+ {
+ .irq = 16,
+ .base_addr = 0xf0806000
+ },
+ {
+ .irq = 17,
+ .base_addr = 0xf0808000
+ }
};
/* Returns the index of the GMAC module. */
@@ -174,18 +182,32 @@ static uint32_t gmac_read(QTestState *qts, const GMACModule *mod,
return qtest_readl(qts, mod->base_addr + regno);
}
+static uint16_t pcs_read(QTestState *qts, const GMACModule *mod,
+ NPCMRegister regno)
+{
+ uint32_t write_value = (regno & 0x3ffe00) >> 9;
+ qtest_writel(qts, PCS_BASE_ADDRESS + NPCM_PCS_IND_AC_BA, write_value);
+ uint32_t read_offset = regno & 0x1ff;
+ return qtest_readl(qts, PCS_BASE_ADDRESS + read_offset);
+}
+
/* Check that GMAC registers are reset to default value */
static void test_init(gconstpointer test_data)
{
const TestData *td = test_data;
const GMACModule *mod = td->module;
- QTestState *qts = qtest_init("-machine npcm750-evb");
+ QTestState *qts = qtest_init("-machine npcm845-evb");
#define CHECK_REG32(regno, value) \
do { \
g_assert_cmphex(gmac_read(qts, mod, (regno)), ==, (value)); \
} while (0)
+#define CHECK_REG_PCS(regno, value) \
+ do { \
+ g_assert_cmphex(pcs_read(qts, mod, (regno)), ==, (value)); \
+ } while (0)
+
CHECK_REG32(NPCM_DMA_BUS_MODE, 0x00020100);
CHECK_REG32(NPCM_DMA_XMT_POLL_DEMAND, 0);
CHECK_REG32(NPCM_DMA_RCV_POLL_DEMAND, 0);
@@ -235,6 +257,63 @@ static void test_init(gconstpointer test_data)
CHECK_REG32(NPCM_GMAC_PTP_TAR, 0);
CHECK_REG32(NPCM_GMAC_PTP_TTSR, 0);
+ if (mod->base_addr == 0xf0802000) {
+ CHECK_REG_PCS(NPCM_PCS_SR_CTL_ID1, 0x699e);
+ CHECK_REG_PCS(NPCM_PCS_SR_CTL_ID2, 0);
+ CHECK_REG_PCS(NPCM_PCS_SR_CTL_STS, 0x8000);
+
+ CHECK_REG_PCS(NPCM_PCS_SR_MII_CTRL, 0x1140);
+ CHECK_REG_PCS(NPCM_PCS_SR_MII_STS, 0x0109);
+ CHECK_REG_PCS(NPCM_PCS_SR_MII_DEV_ID1, 0x699e);
+ CHECK_REG_PCS(NPCM_PCS_SR_MII_DEV_ID2, 0x0ced0);
+ CHECK_REG_PCS(NPCM_PCS_SR_MII_AN_ADV, 0x0020);
+ CHECK_REG_PCS(NPCM_PCS_SR_MII_LP_BABL, 0);
+ CHECK_REG_PCS(NPCM_PCS_SR_MII_AN_EXPN, 0);
+ CHECK_REG_PCS(NPCM_PCS_SR_MII_EXT_STS, 0xc000);
+
+ CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_ABL, 0x0003);
+ CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_TX_MAX_DLY_LWR, 0x0038);
+ CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_TX_MAX_DLY_UPR, 0);
+ CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_TX_MIN_DLY_LWR, 0x0038);
+ CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_TX_MIN_DLY_UPR, 0);
+ CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_RX_MAX_DLY_LWR, 0x0058);
+ CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_RX_MAX_DLY_UPR, 0);
+ CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_RX_MIN_DLY_LWR, 0x0048);
+ CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_RX_MIN_DLY_UPR, 0);
+
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MMD_DIG_CTRL1, 0x2400);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_AN_CTRL, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_AN_INTR_STS, 0x000a);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_TC, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_DBG_CTRL, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_EEE_MCTRL0, 0x899c);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_EEE_TXTIMER, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_EEE_RXTIMER, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_LINK_TIMER_CTRL, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_EEE_MCTRL1, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_DIG_STS, 0x0010);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_ICG_ERRCNT1, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MISC_STS, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_RX_LSTS, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_TX_BSTCTRL0, 0x00a);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_TX_LVLCTRL0, 0x007f);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_TX_GENCTRL0, 0x0001);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_TX_GENCTRL1, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_TX_STS, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_RX_GENCTRL0, 0x0100);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_RX_GENCTRL1, 0x1100);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_RX_LOS_CTRL0, 0x000e);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_MPLL_CTRL0, 0x0100);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_MPLL_CTRL1, 0x0032);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_MPLL_STS, 0x0001);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_MISC_CTRL2, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_LVL_CTRL, 0x0019);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_MISC_CTRL0, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_MISC_CTRL1, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_DIG_CTRL2, 0);
+ CHECK_REG_PCS(NPCM_PCS_VR_MII_DIG_ERRCNT_SEL, 0);
+ }
+
qtest_quit(qts);
}
@@ -242,7 +321,7 @@ static void gmac_add_test(const char *name, const TestData* td,
GTestDataFunc fn)
{
g_autofree char *full_name = g_strdup_printf(
- "npcm7xx_gmac/gmac[%d]/%s", gmac_module_index(td->module), name);
+ "npcm8xx_gmac/gmac[%d]/%s", gmac_module_index(td->module), name);
qtest_add_data_func(full_name, td, fn);
}
diff --git a/tests/qtest/numa-test.c b/tests/qtest/numa-test.c
index ede4189..d657f38 100644
--- a/tests/qtest/numa-test.c
+++ b/tests/qtest/numa-test.c
@@ -11,8 +11,8 @@
#include "qemu/osdep.h"
#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
static char *make_cli(const GString *generic_cli, const char *test_cli)
{
@@ -162,7 +162,7 @@ static void pc_numa_cpu(const void *data)
} else if (socket == 1 && core == 1 && thread == 1) {
g_assert_cmpint(node, ==, 1);
} else {
- g_assert(false);
+ g_assert_not_reached();
}
qobject_unref(e);
}
@@ -207,7 +207,7 @@ static void spapr_numa_cpu(const void *data)
} else if (core == 3) {
g_assert_cmpint(node, ==, 1);
} else {
- g_assert(false);
+ g_assert_not_reached();
}
qobject_unref(e);
}
@@ -257,7 +257,7 @@ static void aarch64_numa_cpu(const void *data)
} else if (socket == 1 && cluster == 0 && core == 0 && thread == 0) {
g_assert_cmpint(node, ==, 0);
} else {
- g_assert(false);
+ g_assert_not_reached();
}
qobject_unref(e);
}
@@ -305,7 +305,7 @@ static void loongarch64_numa_cpu(const void *data)
} else if (socket == 1 && core == 0 && thread == 0) {
g_assert_cmpint(node, ==, 0);
} else {
- g_assert(false);
+ g_assert_not_reached();
}
qobject_unref(e);
}
@@ -367,7 +367,7 @@ static void pc_dynamic_cpu_cfg(const void *data)
} else if (socket == 1) {
g_assert_cmpint(node, ==, 0);
} else {
- g_assert(false);
+ g_assert_not_reached();
}
qobject_unref(e);
}
diff --git a/tests/qtest/pnv-host-i2c-test.c b/tests/qtest/pnv-host-i2c-test.c
index 7f64d59..51e613e 100644
--- a/tests/qtest/pnv-host-i2c-test.c
+++ b/tests/qtest/pnv-host-i2c-test.c
@@ -191,12 +191,10 @@ static uint8_t pnv_i2c_pca9554_read_pins(PnvI2cDev *dev)
{
uint8_t send_buf[1];
uint8_t recv_buf[1];
- uint8_t inputs;
send_buf[0] = PCA9554_INPUT;
pnv_i2c_send(dev, send_buf, 1);
pnv_i2c_recv(dev, recv_buf, 1);
- inputs = recv_buf[0];
- return inputs;
+ return recv_buf[0];
}
static void pnv_i2c_pca9554_flip_polarity(PnvI2cDev *dev)
diff --git a/tests/qtest/pnv-spi-seeprom-test.c b/tests/qtest/pnv-spi-seeprom-test.c
new file mode 100644
index 0000000..600493c
--- /dev/null
+++ b/tests/qtest/pnv-spi-seeprom-test.c
@@ -0,0 +1,110 @@
+/*
+ * QTest testcase for PowerNV 10 Seeprom Communications
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <unistd.h>
+#include "qemu/osdep.h"
+#include "libqtest.h"
+#include "qemu/bswap.h"
+#include "hw/ssi/pnv_spi_regs.h"
+#include "pnv-xscom.h"
+
+#define FLASH_SIZE (512 * 1024)
+#define SPIC2_XSCOM_BASE 0xc0040
+
+/* To transmit READ opcode and address */
+#define READ_OP_TDR_DATA 0x0300010000000000
+/*
+ * N1 shift - tx 4 bytes (transmit opcode and address)
+ * N2 shift - tx and rx 8 bytes.
+ */
+#define READ_OP_COUNTER_CONFIG 0x2040000000002b00
+/* SEQ_OP_SELECT_RESPONDER - N1 Shift - N2 Shift * 5 - SEQ_OP_STOP */
+#define READ_OP_SEQUENCER 0x1130404040404010
+
+/* To transmit WREN(Set Write Enable Latch in status0 register) opcode */
+#define WRITE_OP_WREN 0x0600000000000000
+/* To transmit WRITE opcode, address and data */
+#define WRITE_OP_TDR_DATA 0x0300010012345678
+/* N1 shift - tx 8 bytes (transmit opcode, address and data) */
+#define WRITE_OP_COUNTER_CONFIG 0x4000000000002000
+/* SEQ_OP_SELECT_RESPONDER - N1 Shift - SEQ_OP_STOP */
+#define WRITE_OP_SEQUENCER 0x1130100000000000
+
+static void pnv_spi_xscom_write(QTestState *qts, const PnvChip *chip,
+ uint32_t reg, uint64_t val)
+{
+ uint32_t pcba = SPIC2_XSCOM_BASE + reg;
+ qtest_writeq(qts, pnv_xscom_addr(chip, pcba), val);
+}
+
+static uint64_t pnv_spi_xscom_read(QTestState *qts, const PnvChip *chip,
+ uint32_t reg)
+{
+ uint32_t pcba = SPIC2_XSCOM_BASE + reg;
+ return qtest_readq(qts, pnv_xscom_addr(chip, pcba));
+}
+
+static void spi_seeprom_transaction(QTestState *qts, const PnvChip *chip)
+{
+ /* SPI transactions to SEEPROM to read from SEEPROM image */
+ pnv_spi_xscom_write(qts, chip, SPI_CTR_CFG_REG, READ_OP_COUNTER_CONFIG);
+ pnv_spi_xscom_write(qts, chip, SPI_SEQ_OP_REG, READ_OP_SEQUENCER);
+ pnv_spi_xscom_write(qts, chip, SPI_XMIT_DATA_REG, READ_OP_TDR_DATA);
+ pnv_spi_xscom_write(qts, chip, SPI_XMIT_DATA_REG, 0);
+ /* Read 5*8 bytes from SEEPROM at 0x100 */
+ uint64_t rdr_val = pnv_spi_xscom_read(qts, chip, SPI_RCV_DATA_REG);
+ g_test_message("RDR READ = 0x%" PRIx64, rdr_val);
+ rdr_val = pnv_spi_xscom_read(qts, chip, SPI_RCV_DATA_REG);
+ rdr_val = pnv_spi_xscom_read(qts, chip, SPI_RCV_DATA_REG);
+ rdr_val = pnv_spi_xscom_read(qts, chip, SPI_RCV_DATA_REG);
+ rdr_val = pnv_spi_xscom_read(qts, chip, SPI_RCV_DATA_REG);
+ g_test_message("RDR READ = 0x%" PRIx64, rdr_val);
+
+ /* SPI transactions to SEEPROM to write to SEEPROM image */
+ pnv_spi_xscom_write(qts, chip, SPI_CTR_CFG_REG, WRITE_OP_COUNTER_CONFIG);
+ /* Set Write Enable Latch bit of status0 register */
+ pnv_spi_xscom_write(qts, chip, SPI_SEQ_OP_REG, WRITE_OP_SEQUENCER);
+ pnv_spi_xscom_write(qts, chip, SPI_XMIT_DATA_REG, WRITE_OP_WREN);
+ /* write 8 bytes to SEEPROM at 0x100 */
+ pnv_spi_xscom_write(qts, chip, SPI_SEQ_OP_REG, WRITE_OP_SEQUENCER);
+ pnv_spi_xscom_write(qts, chip, SPI_XMIT_DATA_REG, WRITE_OP_TDR_DATA);
+}
+
+static void test_spi_seeprom(const void *data)
+{
+ const PnvChip *chip = data;
+ QTestState *qts = NULL;
+ g_autofree char *tmp_path = NULL;
+ int ret;
+ int fd;
+
+ /* Create a temporary raw image */
+ fd = g_file_open_tmp("qtest-seeprom-XXXXXX", &tmp_path, NULL);
+ g_assert(fd >= 0);
+ ret = ftruncate(fd, FLASH_SIZE);
+ g_assert(ret == 0);
+ close(fd);
+
+ qts = qtest_initf("-machine powernv10 -smp 2,cores=2,"
+ "threads=1 -accel tcg,thread=single -nographic "
+ "-blockdev node-name=pib_spic2,driver=file,"
+ "filename=%s -device 25csm04,bus=chip0.spi.2,cs=0,"
+ "drive=pib_spic2", tmp_path);
+ spi_seeprom_transaction(qts, chip);
+ qtest_quit(qts);
+ unlink(tmp_path);
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+ char *tname = g_strdup_printf("pnv-xscom/spi-seeprom/%s",
+ pnv_chips[3].cpu_model);
+ qtest_add_data_func(tname, &pnv_chips[3], test_spi_seeprom);
+ g_free(tname);
+ return g_test_run();
+}
diff --git a/tests/qtest/pnv-xive2-common.c b/tests/qtest/pnv-xive2-common.c
new file mode 100644
index 0000000..bf2bce00
--- /dev/null
+++ b/tests/qtest/pnv-xive2-common.c
@@ -0,0 +1,190 @@
+/*
+ * QTest testcase for PowerNV 10 interrupt controller (xive2)
+ * - Common functions for XIVE2 tests
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "qemu/osdep.h"
+#include "libqtest.h"
+
+#include "pnv-xive2-common.h"
+
+
+static uint64_t pnv_xscom_addr(uint32_t pcba)
+{
+ return P10_XSCOM_BASE | ((uint64_t) pcba << 3);
+}
+
+static uint64_t pnv_xive_xscom_addr(uint32_t reg)
+{
+ return pnv_xscom_addr(XIVE_XSCOM + reg);
+}
+
+uint64_t pnv_xive_xscom_read(QTestState *qts, uint32_t reg)
+{
+ return qtest_readq(qts, pnv_xive_xscom_addr(reg));
+}
+
+void pnv_xive_xscom_write(QTestState *qts, uint32_t reg, uint64_t val)
+{
+ qtest_writeq(qts, pnv_xive_xscom_addr(reg), val);
+}
+
+static void xive_get_struct(QTestState *qts, uint64_t src, void *dest,
+ size_t size)
+{
+ uint8_t *destination = (uint8_t *)dest;
+ size_t i;
+
+ for (i = 0; i < size; i++) {
+ *(destination + i) = qtest_readb(qts, src + i);
+ }
+}
+
+static void xive_copy_struct(QTestState *qts, void *src, uint64_t dest,
+ size_t size)
+{
+ uint8_t *source = (uint8_t *)src;
+ size_t i;
+
+ for (i = 0; i < size; i++) {
+ qtest_writeb(qts, dest + i, *(source + i));
+ }
+}
+
+uint64_t xive_get_queue_addr(uint32_t end_index)
+{
+ return XIVE_QUEUE_MEM + (uint64_t)end_index * XIVE_QUEUE_SIZE;
+}
+
+uint8_t get_esb(QTestState *qts, uint32_t index, uint8_t page,
+ uint32_t offset)
+{
+ uint64_t addr;
+
+ addr = XIVE_ESB_ADDR + ((uint64_t)index << (XIVE_PAGE_SHIFT + 1));
+ if (page == 1) {
+ addr += 1 << XIVE_PAGE_SHIFT;
+ }
+ return qtest_readb(qts, addr + offset);
+}
+
+void set_esb(QTestState *qts, uint32_t index, uint8_t page,
+ uint32_t offset, uint32_t val)
+{
+ uint64_t addr;
+
+ addr = XIVE_ESB_ADDR + ((uint64_t)index << (XIVE_PAGE_SHIFT + 1));
+ if (page == 1) {
+ addr += 1 << XIVE_PAGE_SHIFT;
+ }
+ return qtest_writel(qts, addr + offset, cpu_to_be32(val));
+}
+
+void get_nvp(QTestState *qts, uint32_t index, Xive2Nvp* nvp)
+{
+ uint64_t addr = XIVE_NVP_MEM + (uint64_t)index * sizeof(Xive2Nvp);
+ xive_get_struct(qts, addr, nvp, sizeof(Xive2Nvp));
+}
+
+void set_nvp(QTestState *qts, uint32_t index, uint8_t first)
+{
+ uint64_t nvp_addr;
+ Xive2Nvp nvp;
+ uint64_t report_addr;
+
+ nvp_addr = XIVE_NVP_MEM + (uint64_t)index * sizeof(Xive2Nvp);
+ report_addr = (XIVE_REPORT_MEM + (uint64_t)index * XIVE_REPORT_SIZE) >> 8;
+
+ memset(&nvp, 0, sizeof(nvp));
+ nvp.w0 = xive_set_field32(NVP2_W0_VALID, 0, 1);
+ nvp.w0 = xive_set_field32(NVP2_W0_PGOFIRST, nvp.w0, first);
+ nvp.w6 = xive_set_field32(NVP2_W6_REPORTING_LINE, nvp.w6,
+ (report_addr >> 24) & 0xfffffff);
+ nvp.w7 = xive_set_field32(NVP2_W7_REPORTING_LINE, nvp.w7,
+ report_addr & 0xffffff);
+ xive_copy_struct(qts, &nvp, nvp_addr, sizeof(nvp));
+}
+
+static uint64_t get_cl_pair_addr(Xive2Nvp *nvp)
+{
+ uint64_t upper = xive_get_field32(0x0fffffff, nvp->w6);
+ uint64_t lower = xive_get_field32(0xffffff00, nvp->w7);
+ return (upper << 32) | (lower << 8);
+}
+
+void get_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair)
+{
+ uint64_t addr = get_cl_pair_addr(nvp);
+ xive_get_struct(qts, addr, cl_pair, XIVE_REPORT_SIZE);
+}
+
+void set_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair)
+{
+ uint64_t addr = get_cl_pair_addr(nvp);
+ xive_copy_struct(qts, cl_pair, addr, XIVE_REPORT_SIZE);
+}
+
+void set_nvg(QTestState *qts, uint32_t index, uint8_t next)
+{
+ uint64_t nvg_addr;
+ Xive2Nvgc nvg;
+
+ nvg_addr = XIVE_NVG_MEM + (uint64_t)index * sizeof(Xive2Nvgc);
+
+ memset(&nvg, 0, sizeof(nvg));
+ nvg.w0 = xive_set_field32(NVGC2_W0_VALID, 0, 1);
+ nvg.w0 = xive_set_field32(NVGC2_W0_PGONEXT, nvg.w0, next);
+ xive_copy_struct(qts, &nvg, nvg_addr, sizeof(nvg));
+}
+
+void set_eas(QTestState *qts, uint32_t index, uint32_t end_index,
+ uint32_t data)
+{
+ uint64_t eas_addr;
+ Xive2Eas eas;
+
+ eas_addr = XIVE_EAS_MEM + (uint64_t)index * sizeof(Xive2Eas);
+
+ memset(&eas, 0, sizeof(eas));
+ eas.w = xive_set_field64(EAS2_VALID, 0, 1);
+ eas.w = xive_set_field64(EAS2_END_INDEX, eas.w, end_index);
+ eas.w = xive_set_field64(EAS2_END_DATA, eas.w, data);
+ xive_copy_struct(qts, &eas, eas_addr, sizeof(eas));
+}
+
+void set_end(QTestState *qts, uint32_t index, uint32_t nvp_index,
+ uint8_t priority, bool i)
+{
+ uint64_t end_addr, queue_addr, queue_hi, queue_lo;
+ uint8_t queue_size;
+ Xive2End end;
+
+ end_addr = XIVE_END_MEM + (uint64_t)index * sizeof(Xive2End);
+ queue_addr = xive_get_queue_addr(index);
+ queue_hi = (queue_addr >> 32) & END2_W2_EQ_ADDR_HI;
+ queue_lo = queue_addr & END2_W3_EQ_ADDR_LO;
+ queue_size = ctz16(XIVE_QUEUE_SIZE) - 12;
+
+ memset(&end, 0, sizeof(end));
+ end.w0 = xive_set_field32(END2_W0_VALID, 0, 1);
+ end.w0 = xive_set_field32(END2_W0_ENQUEUE, end.w0, 1);
+ end.w0 = xive_set_field32(END2_W0_UCOND_NOTIFY, end.w0, 1);
+ end.w0 = xive_set_field32(END2_W0_BACKLOG, end.w0, 1);
+
+ end.w1 = xive_set_field32(END2_W1_GENERATION, 0, 1);
+
+ end.w2 = cpu_to_be32(queue_hi);
+
+ end.w3 = cpu_to_be32(queue_lo);
+ end.w3 = xive_set_field32(END2_W3_QSIZE, end.w3, queue_size);
+
+ end.w6 = xive_set_field32(END2_W6_IGNORE, 0, i);
+ end.w6 = xive_set_field32(END2_W6_VP_OFFSET, end.w6, nvp_index);
+
+ end.w7 = xive_set_field32(END2_W7_F0_PRIORITY, 0, priority);
+ xive_copy_struct(qts, &end, end_addr, sizeof(end));
+}
+
diff --git a/tests/qtest/pnv-xive2-common.h b/tests/qtest/pnv-xive2-common.h
new file mode 100644
index 0000000..2077c05
--- /dev/null
+++ b/tests/qtest/pnv-xive2-common.h
@@ -0,0 +1,112 @@
+/*
+ * QTest testcase for PowerNV 10 interrupt controller (xive2)
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef TEST_PNV_XIVE2_COMMON_H
+#define TEST_PNV_XIVE2_COMMON_H
+
+#define PPC_BIT(bit) (0x8000000000000000ULL >> (bit))
+#define PPC_BIT32(bit) (0x80000000 >> (bit))
+#define PPC_BIT8(bit) (0x80 >> (bit))
+#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
+#define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be)) | \
+ PPC_BIT32(bs))
+#include "qemu/bswap.h"
+#include "hw/intc/pnv_xive2_regs.h"
+#include "hw/ppc/xive_regs.h"
+#include "hw/ppc/xive2_regs.h"
+
+/*
+ * sizing:
+ * 128 interrupts
+ * => ESB BAR range: 16M
+ * 256 ENDs
+ * => END BAR range: 16M
+ * 256 VPs
+ * => NVPG,NVC BAR range: 32M
+ */
+#define MAX_IRQS 128
+#define MAX_ENDS 256
+#define MAX_VPS 256
+
+#define XIVE_PAGE_SHIFT 16
+
+#define XIVE_TRIGGER_PAGE 0
+#define XIVE_EOI_PAGE 1
+
+#define XIVE_IC_ADDR 0x0006030200000000ull
+#define XIVE_IC_TM_INDIRECT (XIVE_IC_ADDR + (256 << XIVE_PAGE_SHIFT))
+#define XIVE_IC_BAR ((0x3ull << 62) | XIVE_IC_ADDR)
+#define XIVE_TM_BAR 0xc006030203180000ull
+#define XIVE_ESB_ADDR 0x0006050000000000ull
+#define XIVE_ESB_BAR ((0x3ull << 62) | XIVE_ESB_ADDR)
+#define XIVE_END_BAR 0xc006060000000000ull
+#define XIVE_NVPG_ADDR 0x0006040000000000ull
+#define XIVE_NVPG_BAR ((0x3ull << 62) | XIVE_NVPG_ADDR)
+#define XIVE_NVC_ADDR 0x0006030208000000ull
+#define XIVE_NVC_BAR ((0x3ull << 62) | XIVE_NVC_ADDR)
+
+/*
+ * Memory layout
+ * A check is done when a table is configured to ensure that the max
+ * size of the resource fits in the table.
+ */
+#define XIVE_VST_SIZE 0x10000ull /* must be at least 4k */
+
+#define XIVE_MEM_START 0x10000000ull
+#define XIVE_ESB_MEM XIVE_MEM_START
+#define XIVE_EAS_MEM (XIVE_ESB_MEM + XIVE_VST_SIZE)
+#define XIVE_END_MEM (XIVE_EAS_MEM + XIVE_VST_SIZE)
+#define XIVE_NVP_MEM (XIVE_END_MEM + XIVE_VST_SIZE)
+#define XIVE_NVG_MEM (XIVE_NVP_MEM + XIVE_VST_SIZE)
+#define XIVE_NVC_MEM (XIVE_NVG_MEM + XIVE_VST_SIZE)
+#define XIVE_SYNC_MEM (XIVE_NVC_MEM + XIVE_VST_SIZE)
+#define XIVE_QUEUE_MEM (XIVE_SYNC_MEM + XIVE_VST_SIZE)
+#define XIVE_QUEUE_SIZE 4096 /* per End */
+#define XIVE_REPORT_MEM (XIVE_QUEUE_MEM + XIVE_QUEUE_SIZE * MAX_VPS)
+#define XIVE_REPORT_SIZE 256 /* two cache lines per NVP */
+#define XIVE_MEM_END (XIVE_REPORT_MEM + XIVE_REPORT_SIZE * MAX_VPS)
+
+#define P10_XSCOM_BASE 0x000603fc00000000ull
+#define XIVE_XSCOM 0x2010800ull
+
+#define XIVE_ESB_RESET 0b00
+#define XIVE_ESB_OFF 0b01
+#define XIVE_ESB_PENDING 0b10
+#define XIVE_ESB_QUEUED 0b11
+
+#define XIVE_ESB_GET 0x800
+#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
+#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
+#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
+#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
+
+#define XIVE_ESB_STORE_EOI 0x400 /* Store */
+
+
+extern uint64_t pnv_xive_xscom_read(QTestState *qts, uint32_t reg);
+extern void pnv_xive_xscom_write(QTestState *qts, uint32_t reg, uint64_t val);
+extern uint64_t xive_get_queue_addr(uint32_t end_index);
+extern uint8_t get_esb(QTestState *qts, uint32_t index, uint8_t page,
+ uint32_t offset);
+extern void set_esb(QTestState *qts, uint32_t index, uint8_t page,
+ uint32_t offset, uint32_t val);
+extern void get_nvp(QTestState *qts, uint32_t index, Xive2Nvp* nvp);
+extern void set_nvp(QTestState *qts, uint32_t index, uint8_t first);
+extern void get_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair);
+extern void set_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair);
+extern void set_nvg(QTestState *qts, uint32_t index, uint8_t next);
+extern void set_eas(QTestState *qts, uint32_t index, uint32_t end_index,
+ uint32_t data);
+extern void set_end(QTestState *qts, uint32_t index, uint32_t nvp_index,
+ uint8_t priority, bool i);
+
+
+void test_flush_sync_inject(QTestState *qts);
+void test_nvpg_bar(QTestState *qts);
+
+#endif /* TEST_PNV_XIVE2_COMMON_H */
diff --git a/tests/qtest/pnv-xive2-flush-sync.c b/tests/qtest/pnv-xive2-flush-sync.c
new file mode 100644
index 0000000..142826b
--- /dev/null
+++ b/tests/qtest/pnv-xive2-flush-sync.c
@@ -0,0 +1,205 @@
+/*
+ * QTest testcase for PowerNV 10 interrupt controller (xive2)
+ * - Test cache flush/queue sync injection
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "qemu/osdep.h"
+#include "libqtest.h"
+
+#include "pnv-xive2-common.h"
+#include "hw/intc/pnv_xive2_regs.h"
+#include "hw/ppc/xive_regs.h"
+#include "hw/ppc/xive2_regs.h"
+
+#define PNV_XIVE2_QUEUE_IPI 0x00
+#define PNV_XIVE2_QUEUE_HW 0x01
+#define PNV_XIVE2_QUEUE_NXC 0x02
+#define PNV_XIVE2_QUEUE_INT 0x03
+#define PNV_XIVE2_QUEUE_OS 0x04
+#define PNV_XIVE2_QUEUE_POOL 0x05
+#define PNV_XIVE2_QUEUE_HARD 0x06
+#define PNV_XIVE2_CACHE_ENDC 0x08
+#define PNV_XIVE2_CACHE_ESBC 0x09
+#define PNV_XIVE2_CACHE_EASC 0x0a
+#define PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO 0x10
+#define PNV_XIVE2_QUEUE_NXC_LD_LCL_CO 0x11
+#define PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI 0x12
+#define PNV_XIVE2_QUEUE_NXC_ST_LCL_CI 0x13
+#define PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI 0x14
+#define PNV_XIVE2_QUEUE_NXC_ST_RMT_CI 0x15
+#define PNV_XIVE2_CACHE_NXC 0x18
+
+#define PNV_XIVE2_SYNC_IPI 0x000
+#define PNV_XIVE2_SYNC_HW 0x080
+#define PNV_XIVE2_SYNC_NxC 0x100
+#define PNV_XIVE2_SYNC_INT 0x180
+#define PNV_XIVE2_SYNC_OS_ESC 0x200
+#define PNV_XIVE2_SYNC_POOL_ESC 0x280
+#define PNV_XIVE2_SYNC_HARD_ESC 0x300
+#define PNV_XIVE2_SYNC_NXC_LD_LCL_NCO 0x800
+#define PNV_XIVE2_SYNC_NXC_LD_LCL_CO 0x880
+#define PNV_XIVE2_SYNC_NXC_ST_LCL_NCI 0x900
+#define PNV_XIVE2_SYNC_NXC_ST_LCL_CI 0x980
+#define PNV_XIVE2_SYNC_NXC_ST_RMT_NCI 0xA00
+#define PNV_XIVE2_SYNC_NXC_ST_RMT_CI 0xA80
+
+
+static uint64_t get_sync_addr(uint32_t src_pir, int ic_topo_id, int type)
+{
+ int thread_nr = src_pir & 0x7f;
+ uint64_t addr = XIVE_SYNC_MEM + thread_nr * 512 + ic_topo_id * 32 + type;
+ return addr;
+}
+
+static uint8_t get_sync(QTestState *qts, uint32_t src_pir, int ic_topo_id,
+ int type)
+{
+ uint64_t addr = get_sync_addr(src_pir, ic_topo_id, type);
+ return qtest_readb(qts, addr);
+}
+
+static void clr_sync(QTestState *qts, uint32_t src_pir, int ic_topo_id,
+ int type)
+{
+ uint64_t addr = get_sync_addr(src_pir, ic_topo_id, type);
+ qtest_writeb(qts, addr, 0x0);
+}
+
+static void inject_cache_flush(QTestState *qts, int ic_topo_id,
+ uint64_t scom_addr)
+{
+ (void)ic_topo_id;
+ pnv_xive_xscom_write(qts, scom_addr, 0);
+}
+
+static void inject_queue_sync(QTestState *qts, int ic_topo_id, uint64_t offset)
+{
+ (void)ic_topo_id;
+ uint64_t addr = XIVE_IC_ADDR + (VST_SYNC << XIVE_PAGE_SHIFT) + offset;
+ qtest_writeq(qts, addr, 0);
+}
+
+static void inject_op(QTestState *qts, int ic_topo_id, int type)
+{
+ switch (type) {
+ case PNV_XIVE2_QUEUE_IPI:
+ inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_IPI);
+ break;
+ case PNV_XIVE2_QUEUE_HW:
+ inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_HW);
+ break;
+ case PNV_XIVE2_QUEUE_NXC:
+ inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NxC);
+ break;
+ case PNV_XIVE2_QUEUE_INT:
+ inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_INT);
+ break;
+ case PNV_XIVE2_QUEUE_OS:
+ inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_OS_ESC);
+ break;
+ case PNV_XIVE2_QUEUE_POOL:
+ inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_POOL_ESC);
+ break;
+ case PNV_XIVE2_QUEUE_HARD:
+ inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_HARD_ESC);
+ break;
+ case PNV_XIVE2_CACHE_ENDC:
+ inject_cache_flush(qts, ic_topo_id, X_VC_ENDC_FLUSH_INJECT);
+ break;
+ case PNV_XIVE2_CACHE_ESBC:
+ inject_cache_flush(qts, ic_topo_id, X_VC_ESBC_FLUSH_INJECT);
+ break;
+ case PNV_XIVE2_CACHE_EASC:
+ inject_cache_flush(qts, ic_topo_id, X_VC_EASC_FLUSH_INJECT);
+ break;
+ case PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO:
+ inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_LD_LCL_NCO);
+ break;
+ case PNV_XIVE2_QUEUE_NXC_LD_LCL_CO:
+ inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_LD_LCL_CO);
+ break;
+ case PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI:
+ inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_LCL_NCI);
+ break;
+ case PNV_XIVE2_QUEUE_NXC_ST_LCL_CI:
+ inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_LCL_CI);
+ break;
+ case PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI:
+ inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_RMT_NCI);
+ break;
+ case PNV_XIVE2_QUEUE_NXC_ST_RMT_CI:
+ inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_RMT_CI);
+ break;
+ case PNV_XIVE2_CACHE_NXC:
+ inject_cache_flush(qts, ic_topo_id, X_PC_NXC_FLUSH_INJECT);
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+}
+
+const uint8_t xive_inject_tests[] = {
+ PNV_XIVE2_QUEUE_IPI,
+ PNV_XIVE2_QUEUE_HW,
+ PNV_XIVE2_QUEUE_NXC,
+ PNV_XIVE2_QUEUE_INT,
+ PNV_XIVE2_QUEUE_OS,
+ PNV_XIVE2_QUEUE_POOL,
+ PNV_XIVE2_QUEUE_HARD,
+ PNV_XIVE2_CACHE_ENDC,
+ PNV_XIVE2_CACHE_ESBC,
+ PNV_XIVE2_CACHE_EASC,
+ PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO,
+ PNV_XIVE2_QUEUE_NXC_LD_LCL_CO,
+ PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI,
+ PNV_XIVE2_QUEUE_NXC_ST_LCL_CI,
+ PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI,
+ PNV_XIVE2_QUEUE_NXC_ST_RMT_CI,
+ PNV_XIVE2_CACHE_NXC,
+};
+
+void test_flush_sync_inject(QTestState *qts)
+{
+ int ic_topo_id = 0;
+
+ /*
+ * Writes performed by qtest are not done in the context of a thread.
+ * This means that QEMU XIVE code doesn't have a way to determine what
+ * thread is originating the write. In order to allow for some testing,
+ * QEMU XIVE code will assume a PIR of 0 when unable to determine the
+ * source thread for cache flush and queue sync inject operations.
+ * See hw/intc/pnv_xive2.c: pnv_xive2_inject_notify() for details.
+ */
+ int src_pir = 0;
+ int test_nr;
+ uint8_t byte;
+
+ g_test_message("=========================================================");
+ g_test_message("Starting cache flush/queue sync injection tests...");
+
+ for (test_nr = 0; test_nr < sizeof(xive_inject_tests);
+ test_nr++) {
+ int op_type = xive_inject_tests[test_nr];
+
+ g_test_message("Running test %d", test_nr);
+
+ /* start with status byte set to 0 */
+ clr_sync(qts, src_pir, ic_topo_id, op_type);
+ byte = get_sync(qts, src_pir, ic_topo_id, op_type);
+ g_assert_cmphex(byte, ==, 0);
+
+ /* request cache flush or queue sync operation */
+ inject_op(qts, ic_topo_id, op_type);
+
+ /* verify that status byte was written to 0xff */
+ byte = get_sync(qts, src_pir, ic_topo_id, op_type);
+ g_assert_cmphex(byte, ==, 0xff);
+
+ clr_sync(qts, src_pir, ic_topo_id, op_type);
+ }
+}
+
diff --git a/tests/qtest/pnv-xive2-nvpg_bar.c b/tests/qtest/pnv-xive2-nvpg_bar.c
new file mode 100644
index 0000000..6ac8d36
--- /dev/null
+++ b/tests/qtest/pnv-xive2-nvpg_bar.c
@@ -0,0 +1,152 @@
+/*
+ * QTest testcase for PowerNV 10 interrupt controller (xive2)
+ * - Test NVPG BAR MMIO operations
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "qemu/osdep.h"
+#include "libqtest.h"
+
+#include "pnv-xive2-common.h"
+
+#define NVPG_BACKLOG_OP_SHIFT 10
+#define NVPG_BACKLOG_PRIO_SHIFT 4
+
+#define XIVE_PRIORITY_MAX 7
+
+enum NVx {
+ NVP,
+ NVG,
+ NVC
+};
+
+typedef enum {
+ INCR_STORE = 0b100,
+ INCR_LOAD = 0b000,
+ DECR_STORE = 0b101,
+ DECR_LOAD = 0b001,
+ READ_x = 0b010,
+ READ_y = 0b011,
+} backlog_op;
+
+static uint32_t nvpg_backlog_op(QTestState *qts, backlog_op op,
+ enum NVx type, uint64_t index,
+ uint8_t priority, uint8_t delta)
+{
+ uint64_t addr, offset;
+ uint32_t count = 0;
+
+ switch (type) {
+ case NVP:
+ addr = XIVE_NVPG_ADDR + (index << (XIVE_PAGE_SHIFT + 1));
+ break;
+ case NVG:
+ addr = XIVE_NVPG_ADDR + (index << (XIVE_PAGE_SHIFT + 1)) +
+ (1 << XIVE_PAGE_SHIFT);
+ break;
+ case NVC:
+ addr = XIVE_NVC_ADDR + (index << XIVE_PAGE_SHIFT);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ offset = (op & 0b11) << NVPG_BACKLOG_OP_SHIFT;
+ offset |= priority << NVPG_BACKLOG_PRIO_SHIFT;
+ if (op >> 2) {
+ qtest_writeb(qts, addr + offset, delta);
+ } else {
+ count = qtest_readw(qts, addr + offset);
+ }
+ return count;
+}
+
+void test_nvpg_bar(QTestState *qts)
+{
+ uint32_t nvp_target = 0x11;
+ uint32_t group_target = 0x17; /* size 16 */
+ uint32_t vp_irq = 33, group_irq = 47;
+ uint32_t vp_end = 3, group_end = 97;
+ uint32_t vp_irq_data = 0x33333333;
+ uint32_t group_irq_data = 0x66666666;
+ uint8_t vp_priority = 0, group_priority = 5;
+ uint32_t vp_count[XIVE_PRIORITY_MAX + 1] = { 0 };
+ uint32_t group_count[XIVE_PRIORITY_MAX + 1] = { 0 };
+ uint32_t count, delta;
+ uint8_t i;
+
+ g_test_message("=========================================================");
+ g_test_message("Testing NVPG BAR operations");
+
+ set_nvg(qts, group_target, 0);
+ set_nvp(qts, nvp_target, 0x04);
+ set_nvp(qts, group_target, 0x04);
+
+ /*
+ * Setup: trigger a VP-specific interrupt and a group interrupt
+ * so that the backlog counters are initialized to something else
+ * than 0 for at least one priority level
+ */
+ set_eas(qts, vp_irq, vp_end, vp_irq_data);
+ set_end(qts, vp_end, nvp_target, vp_priority, false /* group */);
+
+ set_eas(qts, group_irq, group_end, group_irq_data);
+ set_end(qts, group_end, group_target, group_priority, true /* group */);
+
+ get_esb(qts, vp_irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
+ set_esb(qts, vp_irq, XIVE_TRIGGER_PAGE, 0, 0);
+ vp_count[vp_priority]++;
+
+ get_esb(qts, group_irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
+ set_esb(qts, group_irq, XIVE_TRIGGER_PAGE, 0, 0);
+ group_count[group_priority]++;
+
+ /* check the initial counters */
+ for (i = 0; i <= XIVE_PRIORITY_MAX; i++) {
+ count = nvpg_backlog_op(qts, READ_x, NVP, nvp_target, i, 0);
+ g_assert_cmpuint(count, ==, vp_count[i]);
+
+ count = nvpg_backlog_op(qts, READ_y, NVG, group_target, i, 0);
+ g_assert_cmpuint(count, ==, group_count[i]);
+ }
+
+ /* do a few ops on the VP. Counter can only be 0 and 1 */
+ vp_priority = 2;
+ delta = 7;
+ nvpg_backlog_op(qts, INCR_STORE, NVP, nvp_target, vp_priority, delta);
+ vp_count[vp_priority] = 1;
+ count = nvpg_backlog_op(qts, INCR_LOAD, NVP, nvp_target, vp_priority, 0);
+ g_assert_cmpuint(count, ==, vp_count[vp_priority]);
+ count = nvpg_backlog_op(qts, READ_y, NVP, nvp_target, vp_priority, 0);
+ g_assert_cmpuint(count, ==, vp_count[vp_priority]);
+
+ count = nvpg_backlog_op(qts, DECR_LOAD, NVP, nvp_target, vp_priority, 0);
+ g_assert_cmpuint(count, ==, vp_count[vp_priority]);
+ vp_count[vp_priority] = 0;
+ nvpg_backlog_op(qts, DECR_STORE, NVP, nvp_target, vp_priority, delta);
+ count = nvpg_backlog_op(qts, READ_x, NVP, nvp_target, vp_priority, 0);
+ g_assert_cmpuint(count, ==, vp_count[vp_priority]);
+
+ /* do a few ops on the group */
+ group_priority = 2;
+ delta = 9;
+ /* can't go negative */
+ nvpg_backlog_op(qts, DECR_STORE, NVG, group_target, group_priority, delta);
+ count = nvpg_backlog_op(qts, READ_y, NVG, group_target, group_priority, 0);
+ g_assert_cmpuint(count, ==, 0);
+ nvpg_backlog_op(qts, INCR_STORE, NVG, group_target, group_priority, delta);
+ group_count[group_priority] += delta;
+ count = nvpg_backlog_op(qts, INCR_LOAD, NVG, group_target,
+ group_priority, delta);
+ g_assert_cmpuint(count, ==, group_count[group_priority]);
+ group_count[group_priority]++;
+
+ count = nvpg_backlog_op(qts, DECR_LOAD, NVG, group_target,
+ group_priority, delta);
+ g_assert_cmpuint(count, ==, group_count[group_priority]);
+ group_count[group_priority]--;
+ count = nvpg_backlog_op(qts, READ_x, NVG, group_target, group_priority, 0);
+ g_assert_cmpuint(count, ==, group_count[group_priority]);
+}
diff --git a/tests/qtest/pnv-xive2-test.c b/tests/qtest/pnv-xive2-test.c
new file mode 100644
index 0000000..5313d4e
--- /dev/null
+++ b/tests/qtest/pnv-xive2-test.c
@@ -0,0 +1,585 @@
+/*
+ * QTest testcase for PowerNV 10 interrupt controller (xive2)
+ * - Test irq to hardware thread
+ * - Test 'Pull Thread Context to Odd Thread Reporting Line'
+ * - Test irq to hardware group
+ * - Test irq to hardware group going through backlog
+ * - Test irq to pool thread
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "qemu/osdep.h"
+#include "libqtest.h"
+
+#include "pnv-xive2-common.h"
+#include "hw/intc/pnv_xive2_regs.h"
+#include "hw/ppc/xive_regs.h"
+#include "hw/ppc/xive2_regs.h"
+
+#define SMT 4 /* some tests will break if less than 4 */
+
+
+static void set_table(QTestState *qts, uint64_t type, uint64_t addr)
+{
+ uint64_t vsd, size, log_size;
+
+ /*
+ * First, let's make sure that all the resources used fit in the
+ * given table.
+ */
+ switch (type) {
+ case VST_ESB:
+ size = MAX_IRQS / 4;
+ break;
+ case VST_EAS:
+ size = MAX_IRQS * 8;
+ break;
+ case VST_END:
+ size = MAX_ENDS * 32;
+ break;
+ case VST_NVP:
+ case VST_NVG:
+ case VST_NVC:
+ size = MAX_VPS * 32;
+ break;
+ case VST_SYNC:
+ size = 64 * 1024;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ g_assert_cmpuint(size, <=, XIVE_VST_SIZE);
+ log_size = ctzl(XIVE_VST_SIZE) - 12;
+
+ vsd = ((uint64_t) VSD_MODE_EXCLUSIVE) << 62 | addr | log_size;
+ pnv_xive_xscom_write(qts, X_VC_VSD_TABLE_ADDR, type << 48);
+ pnv_xive_xscom_write(qts, X_VC_VSD_TABLE_DATA, vsd);
+
+ if (type != VST_EAS && type != VST_IC && type != VST_ERQ) {
+ pnv_xive_xscom_write(qts, X_PC_VSD_TABLE_ADDR, type << 48);
+ pnv_xive_xscom_write(qts, X_PC_VSD_TABLE_DATA, vsd);
+ }
+}
+
+static void set_tima8(QTestState *qts, uint32_t pir, uint32_t offset,
+ uint8_t b)
+{
+ uint64_t ic_addr;
+
+ ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
+ qtest_writeb(qts, ic_addr + offset, b);
+}
+
+static void set_tima32(QTestState *qts, uint32_t pir, uint32_t offset,
+ uint32_t l)
+{
+ uint64_t ic_addr;
+
+ ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
+ qtest_writel(qts, ic_addr + offset, l);
+}
+
+static uint8_t get_tima8(QTestState *qts, uint32_t pir, uint32_t offset)
+{
+ uint64_t ic_addr;
+
+ ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
+ return qtest_readb(qts, ic_addr + offset);
+}
+
+static uint16_t get_tima16(QTestState *qts, uint32_t pir, uint32_t offset)
+{
+ uint64_t ic_addr;
+
+ ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
+ return qtest_readw(qts, ic_addr + offset);
+}
+
+static uint32_t get_tima32(QTestState *qts, uint32_t pir, uint32_t offset)
+{
+ uint64_t ic_addr;
+
+ ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
+ return qtest_readl(qts, ic_addr + offset);
+}
+
+static void reset_pool_threads(QTestState *qts)
+{
+ uint8_t first_group = 0;
+ int i;
+
+ for (i = 0; i < SMT; i++) {
+ uint32_t nvp_idx = 0x100 + i;
+ set_nvp(qts, nvp_idx, first_group);
+ set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD0, 0x000000ff);
+ set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD1, 0);
+ set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | nvp_idx);
+ }
+}
+
+static void reset_hw_threads(QTestState *qts)
+{
+ uint8_t first_group = 0;
+ uint32_t w1 = 0x000000ff;
+ int i;
+
+ if (SMT >= 4) {
+ /* define 2 groups of 2, part of a bigger group of size 4 */
+ set_nvg(qts, 0x80, 0x02);
+ set_nvg(qts, 0x82, 0x02);
+ set_nvg(qts, 0x81, 0);
+ first_group = 0x01;
+ w1 = 0x000300ff;
+ }
+
+ for (i = 0; i < SMT; i++) {
+ set_nvp(qts, 0x80 + i, first_group);
+ set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD0, 0x00ff00ff);
+ set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD1, w1);
+ set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD2, 0x80000000);
+ }
+}
+
+static void reset_state(QTestState *qts)
+{
+ size_t mem_used = XIVE_MEM_END - XIVE_MEM_START;
+
+ qtest_memset(qts, XIVE_MEM_START, 0, mem_used);
+ reset_hw_threads(qts);
+ reset_pool_threads(qts);
+}
+
+static void init_xive(QTestState *qts)
+{
+ uint64_t val1, val2, range;
+
+ /*
+ * We can take a few shortcuts here, as we know the default values
+ * used for xive initialization
+ */
+
+ /*
+ * Set the BARs.
+ * We reuse the same values used by firmware to ease debug.
+ */
+ pnv_xive_xscom_write(qts, X_CQ_IC_BAR, XIVE_IC_BAR);
+ pnv_xive_xscom_write(qts, X_CQ_TM_BAR, XIVE_TM_BAR);
+
+ /* ESB and NVPG use 2 pages per resource. The others only one page */
+ range = (MAX_IRQS << 17) >> 25;
+ val1 = XIVE_ESB_BAR | range;
+ pnv_xive_xscom_write(qts, X_CQ_ESB_BAR, val1);
+
+ range = (MAX_ENDS << 16) >> 25;
+ val1 = XIVE_END_BAR | range;
+ pnv_xive_xscom_write(qts, X_CQ_END_BAR, val1);
+
+ range = (MAX_VPS << 17) >> 25;
+ val1 = XIVE_NVPG_BAR | range;
+ pnv_xive_xscom_write(qts, X_CQ_NVPG_BAR, val1);
+
+ range = (MAX_VPS << 16) >> 25;
+ val1 = XIVE_NVC_BAR | range;
+ pnv_xive_xscom_write(qts, X_CQ_NVC_BAR, val1);
+
+ /*
+ * Enable hw threads.
+ * We check the value written. Useless with current
+ * implementation, but it validates the xscom read path and it's
+ * what the hardware procedure says
+ */
+ val1 = 0xF000000000000000ull; /* core 0, 4 threads */
+ pnv_xive_xscom_write(qts, X_TCTXT_EN0, val1);
+ val2 = pnv_xive_xscom_read(qts, X_TCTXT_EN0);
+ g_assert_cmphex(val1, ==, val2);
+
+ /* Memory tables */
+ set_table(qts, VST_ESB, XIVE_ESB_MEM);
+ set_table(qts, VST_EAS, XIVE_EAS_MEM);
+ set_table(qts, VST_END, XIVE_END_MEM);
+ set_table(qts, VST_NVP, XIVE_NVP_MEM);
+ set_table(qts, VST_NVG, XIVE_NVG_MEM);
+ set_table(qts, VST_NVC, XIVE_NVC_MEM);
+ set_table(qts, VST_SYNC, XIVE_SYNC_MEM);
+
+ reset_hw_threads(qts);
+ reset_pool_threads(qts);
+}
+
+static void test_hw_irq(QTestState *qts)
+{
+ uint32_t irq = 2;
+ uint32_t irq_data = 0x600df00d;
+ uint32_t end_index = 5;
+ uint32_t target_pir = 1;
+ uint32_t target_nvp = 0x80 + target_pir;
+ uint8_t priority = 5;
+ uint32_t reg32;
+ uint16_t reg16;
+ uint8_t pq, nsr, cppr;
+
+ g_test_message("=========================================================");
+ g_test_message("Testing irq %d to hardware thread %d", irq, target_pir);
+
+ /* irq config */
+ set_eas(qts, irq, end_index, irq_data);
+ set_end(qts, end_index, target_nvp, priority, false /* group */);
+
+ /* enable and trigger irq */
+ get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
+ set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0);
+
+ /* check irq is raised on cpu */
+ pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
+ g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING);
+
+ reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x80);
+ g_assert_cmphex(cppr, ==, 0xFF);
+
+ /* ack the irq */
+ reg16 = get_tima16(qts, target_pir, TM_SPC_ACK_HV_REG);
+ nsr = reg16 >> 8;
+ cppr = reg16 & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x80);
+ g_assert_cmphex(cppr, ==, priority);
+
+ /* check irq data is what was configured */
+ reg32 = qtest_readl(qts, xive_get_queue_addr(end_index));
+ g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff));
+
+ /* End Of Interrupt */
+ set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0);
+ pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
+ g_assert_cmpuint(pq, ==, XIVE_ESB_RESET);
+
+ /* reset CPPR */
+ set_tima8(qts, target_pir, TM_QW3_HV_PHYS + TM_CPPR, 0xFF);
+ reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x00);
+ g_assert_cmphex(cppr, ==, 0xFF);
+}
+
+static void test_pool_irq(QTestState *qts)
+{
+ uint32_t irq = 2;
+ uint32_t irq_data = 0x600d0d06;
+ uint32_t end_index = 5;
+ uint32_t target_pir = 1;
+ uint32_t target_nvp = 0x100 + target_pir;
+ uint8_t priority = 5;
+ uint32_t reg32;
+ uint16_t reg16;
+ uint8_t pq, nsr, cppr, ipb;
+
+ g_test_message("=========================================================");
+ g_test_message("Testing irq %d to pool thread %d", irq, target_pir);
+
+ /* irq config */
+ set_eas(qts, irq, end_index, irq_data);
+ set_end(qts, end_index, target_nvp, priority, false /* group */);
+
+ /* enable and trigger irq */
+ get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
+ set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0);
+
+ /* check irq is raised on cpu */
+ pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
+ g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING);
+
+ /* check TIMA values in the PHYS ring (shared by POOL ring) */
+ reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x40);
+ g_assert_cmphex(cppr, ==, 0xFF);
+
+ /* check TIMA values in the POOL ring */
+ reg32 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ ipb = (reg32 >> 8) & 0xFF;
+ g_assert_cmphex(nsr, ==, 0);
+ g_assert_cmphex(cppr, ==, 0);
+ g_assert_cmphex(ipb, ==, 0x80 >> priority);
+
+ /* ack the irq */
+ reg16 = get_tima16(qts, target_pir, TM_SPC_ACK_HV_REG);
+ nsr = reg16 >> 8;
+ cppr = reg16 & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x40);
+ g_assert_cmphex(cppr, ==, priority);
+
+ /* check irq data is what was configured */
+ reg32 = qtest_readl(qts, xive_get_queue_addr(end_index));
+ g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff));
+
+ /* check IPB is cleared in the POOL ring */
+ reg32 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD0);
+ ipb = (reg32 >> 8) & 0xFF;
+ g_assert_cmphex(ipb, ==, 0);
+
+ /* End Of Interrupt */
+ set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0);
+ pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
+ g_assert_cmpuint(pq, ==, XIVE_ESB_RESET);
+
+ /* reset CPPR */
+ set_tima8(qts, target_pir, TM_QW3_HV_PHYS + TM_CPPR, 0xFF);
+ reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x00);
+ g_assert_cmphex(cppr, ==, 0xFF);
+}
+
+#define XIVE_ODD_CL 0x80
+static void test_pull_thread_ctx_to_odd_thread_cl(QTestState *qts)
+{
+ uint32_t target_pir = 1;
+ uint32_t target_nvp = 0x80 + target_pir;
+ Xive2Nvp nvp;
+ uint8_t cl_pair[XIVE_REPORT_SIZE];
+ uint32_t qw1w0, qw3w0, qw1w2, qw2w2;
+ uint8_t qw3b8;
+ uint32_t cl_word;
+ uint32_t word2;
+
+ g_test_message("=========================================================");
+ g_test_message("Testing 'Pull Thread Context to Odd Thread Reporting " \
+ "Line'");
+
+ /* clear odd cache line prior to pull operation */
+ memset(cl_pair, 0, sizeof(cl_pair));
+ get_nvp(qts, target_nvp, &nvp);
+ set_cl_pair(qts, &nvp, cl_pair);
+
+ /* Read some values from TIMA that we expect to see in cacheline */
+ qw1w0 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD0);
+ qw3w0 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
+ qw1w2 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD2);
+ qw2w2 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD2);
+ qw3b8 = get_tima8(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD2);
+
+ /* Execute the pull operation */
+ set_tima8(qts, target_pir, TM_SPC_PULL_PHYS_CTX_OL, 0);
+
+ /* Verify odd cache line values match TIMA after pull operation */
+ get_cl_pair(qts, &nvp, cl_pair);
+ memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW1_OS + TM_WORD0], 4);
+ g_assert_cmphex(qw1w0, ==, be32_to_cpu(cl_word));
+ memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW3_HV_PHYS + TM_WORD0], 4);
+ g_assert_cmphex(qw3w0, ==, be32_to_cpu(cl_word));
+ memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW1_OS + TM_WORD2], 4);
+ g_assert_cmphex(qw1w2, ==, be32_to_cpu(cl_word));
+ memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW2_HV_POOL + TM_WORD2], 4);
+ g_assert_cmphex(qw2w2, ==, be32_to_cpu(cl_word));
+ g_assert_cmphex(qw3b8, ==,
+ cl_pair[XIVE_ODD_CL + TM_QW3_HV_PHYS + TM_WORD2]);
+
+ /* Verify that all TIMA valid bits for target thread are cleared */
+ word2 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD2);
+ g_assert_cmphex(xive_get_field32(TM_QW1W2_VO, word2), ==, 0);
+ word2 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD2);
+ g_assert_cmphex(xive_get_field32(TM_QW2W2_VP, word2), ==, 0);
+ word2 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD2);
+ g_assert_cmphex(xive_get_field32(TM_QW3W2_VT, word2), ==, 0);
+}
+
+static void test_hw_group_irq(QTestState *qts)
+{
+ uint32_t irq = 100;
+ uint32_t irq_data = 0xdeadbeef;
+ uint32_t end_index = 23;
+ uint32_t chosen_one;
+ uint32_t target_nvp = 0x81; /* group size = 4 */
+ uint8_t priority = 6;
+ uint32_t reg32;
+ uint16_t reg16;
+ uint8_t pq, nsr, cppr;
+
+ g_test_message("=========================================================");
+ g_test_message("Testing irq %d to hardware group of size 4", irq);
+
+ /* irq config */
+ set_eas(qts, irq, end_index, irq_data);
+ set_end(qts, end_index, target_nvp, priority, true /* group */);
+
+ /* enable and trigger irq */
+ get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
+ set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0);
+
+ /* check irq is raised on cpu */
+ pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
+ g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING);
+
+ /* find the targeted vCPU */
+ for (chosen_one = 0; chosen_one < SMT; chosen_one++) {
+ reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ if (nsr == 0x82) {
+ break;
+ }
+ }
+ g_assert_cmphex(chosen_one, <, SMT);
+ cppr = (reg32 >> 16) & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x82);
+ g_assert_cmphex(cppr, ==, 0xFF);
+
+ /* ack the irq */
+ reg16 = get_tima16(qts, chosen_one, TM_SPC_ACK_HV_REG);
+ nsr = reg16 >> 8;
+ cppr = reg16 & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x82);
+ g_assert_cmphex(cppr, ==, priority);
+
+ /* check irq data is what was configured */
+ reg32 = qtest_readl(qts, xive_get_queue_addr(end_index));
+ g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff));
+
+ /* End Of Interrupt */
+ set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0);
+ pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
+ g_assert_cmpuint(pq, ==, XIVE_ESB_RESET);
+
+ /* reset CPPR */
+ set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, 0xFF);
+ reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x00);
+ g_assert_cmphex(cppr, ==, 0xFF);
+}
+
+static void test_hw_group_irq_backlog(QTestState *qts)
+{
+ uint32_t irq = 31;
+ uint32_t irq_data = 0x01234567;
+ uint32_t end_index = 129;
+ uint32_t target_nvp = 0x81; /* group size = 4 */
+ uint32_t chosen_one = 3;
+ uint8_t blocking_priority, priority = 3;
+ uint32_t reg32;
+ uint16_t reg16;
+ uint8_t pq, nsr, cppr, lsmfb, i;
+
+ g_test_message("=========================================================");
+ g_test_message("Testing irq %d to hardware group of size 4 going " \
+ "through backlog",
+ irq);
+
+ /*
+ * set current priority of all threads in the group to something
+ * higher than what we're about to trigger
+ */
+ blocking_priority = priority - 1;
+ for (i = 0; i < SMT; i++) {
+ set_tima8(qts, i, TM_QW3_HV_PHYS + TM_CPPR, blocking_priority);
+ }
+
+ /* irq config */
+ set_eas(qts, irq, end_index, irq_data);
+ set_end(qts, end_index, target_nvp, priority, true /* group */);
+
+ /* enable and trigger irq */
+ get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
+ set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0);
+
+ /* check irq is raised on cpu */
+ pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
+ g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING);
+
+ /* check no interrupt is pending on the 2 possible targets */
+ for (i = 0; i < SMT; i++) {
+ reg32 = get_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ lsmfb = reg32 & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x0);
+ g_assert_cmphex(cppr, ==, blocking_priority);
+ g_assert_cmphex(lsmfb, ==, priority);
+ }
+
+ /* lower priority of one thread */
+ set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, priority + 1);
+
+ /* check backlogged interrupt is presented */
+ reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x82);
+ g_assert_cmphex(cppr, ==, priority + 1);
+
+ /* ack the irq */
+ reg16 = get_tima16(qts, chosen_one, TM_SPC_ACK_HV_REG);
+ nsr = reg16 >> 8;
+ cppr = reg16 & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x82);
+ g_assert_cmphex(cppr, ==, priority);
+
+ /* check irq data is what was configured */
+ reg32 = qtest_readl(qts, xive_get_queue_addr(end_index));
+ g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff));
+
+ /* End Of Interrupt */
+ set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0);
+ pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
+ g_assert_cmpuint(pq, ==, XIVE_ESB_RESET);
+
+ /* reset CPPR */
+ set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, 0xFF);
+ reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0);
+ nsr = reg32 >> 24;
+ cppr = (reg32 >> 16) & 0xFF;
+ lsmfb = reg32 & 0xFF;
+ g_assert_cmphex(nsr, ==, 0x00);
+ g_assert_cmphex(cppr, ==, 0xFF);
+ g_assert_cmphex(lsmfb, ==, 0xFF);
+}
+
+static void test_xive(void)
+{
+ QTestState *qts;
+
+ qts = qtest_initf("-M powernv10 -smp %d,cores=1,threads=%d -nographic "
+ "-nodefaults -serial mon:stdio -S "
+ "-d guest_errors -trace '*xive*'",
+ SMT, SMT);
+ init_xive(qts);
+
+ test_hw_irq(qts);
+
+ /* omit reset_state here and use settings from test_hw_irq */
+ test_pull_thread_ctx_to_odd_thread_cl(qts);
+
+ reset_state(qts);
+ test_pool_irq(qts);
+
+ reset_state(qts);
+ test_hw_group_irq(qts);
+
+ reset_state(qts);
+ test_hw_group_irq_backlog(qts);
+
+ reset_state(qts);
+ test_flush_sync_inject(qts);
+
+ reset_state(qts);
+ test_nvpg_bar(qts);
+
+ qtest_quit(qts);
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("xive2", test_xive);
+ return g_test_run();
+}
diff --git a/tests/qtest/pnv-xscom.h b/tests/qtest/pnv-xscom.h
index 6f62941..5aa1701 100644
--- a/tests/qtest/pnv-xscom.h
+++ b/tests/qtest/pnv-xscom.h
@@ -56,7 +56,7 @@ static const PnvChip pnv_chips[] = {
.chip_type = PNV_CHIP_POWER10,
.cpu_model = "POWER10",
.xscom_base = 0x000603fc00000000ull,
- .cfam_id = 0x120da04900008000ull,
+ .cfam_id = 0x220da04980000000ull,
.first_core = 0x0,
.num_i2c = 4,
},
diff --git a/tests/qtest/pvpanic-pci-test.c b/tests/qtest/pvpanic-pci-test.c
index dc021c2..f788a44 100644
--- a/tests/qtest/pvpanic-pci-test.c
+++ b/tests/qtest/pvpanic-pci-test.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "libqos/pci.h"
#include "libqos/pci-pc.h"
#include "hw/misc/pvpanic.h"
diff --git a/tests/qtest/pvpanic-test.c b/tests/qtest/pvpanic-test.c
index d49d2ba..5606baf 100644
--- a/tests/qtest/pvpanic-test.c
+++ b/tests/qtest/pvpanic-test.c
@@ -9,7 +9,7 @@
#include "qemu/osdep.h"
#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "hw/misc/pvpanic.h"
static void test_panic_nopause(void)
diff --git a/tests/qtest/q35-test.c b/tests/qtest/q35-test.c
index c922d81..62fff49 100644
--- a/tests/qtest/q35-test.c
+++ b/tests/qtest/q35-test.c
@@ -14,7 +14,7 @@
#include "libqos/pci.h"
#include "libqos/pci-pc.h"
#include "hw/pci-host/q35.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#define TSEG_SIZE_TEST_GUEST_RAM_MBYTES 128
@@ -83,7 +83,6 @@ static void test_smram_lock(void)
{
QPCIBus *pcibus;
QPCIDevice *pcidev;
- QDict *response;
QTestState *qts;
qts = qtest_init("-M q35");
@@ -107,10 +106,7 @@ static void test_smram_lock(void)
g_assert(smram_test_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN) == false);
/* reset */
- response = qtest_qmp(qts, "{'execute': 'system_reset', 'arguments': {} }");
- g_assert(response);
- g_assert(!qdict_haskey(response, "error"));
- qobject_unref(response);
+ qtest_system_reset(qts);
/* check open is settable again */
smram_set_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN, false);
@@ -194,7 +190,6 @@ static void test_smram_smbase_lock(void)
{
QPCIBus *pcibus;
QPCIDevice *pcidev;
- QDict *response;
QTestState *qts;
int i;
@@ -237,10 +232,7 @@ static void test_smram_smbase_lock(void)
}
/* reset */
- response = qtest_qmp(qts, "{'execute': 'system_reset', 'arguments': {} }");
- g_assert(response);
- g_assert(!qdict_haskey(response, "error"));
- qobject_unref(response);
+ qtest_system_reset(qts);
/* check RAM at SMBASE is available after reset */
g_assert_cmpint(qtest_readb(qts, SMBASE), ==, SMRAM_TEST_PATTERN);
@@ -254,41 +246,6 @@ static void test_smram_smbase_lock(void)
qtest_quit(qts);
}
-static void test_without_smram_base(void)
-{
- QPCIBus *pcibus;
- QPCIDevice *pcidev;
- QTestState *qts;
- int i;
-
- qts = qtest_init("-M pc-q35-4.1");
-
- pcibus = qpci_new_pc(qts, NULL);
- g_assert(pcibus != NULL);
-
- pcidev = qpci_device_find(pcibus, 0);
- g_assert(pcidev != NULL);
-
- /* check that RAM is accessible */
- qtest_writeb(qts, SMBASE, SMRAM_TEST_PATTERN);
- g_assert_cmpint(qtest_readb(qts, SMBASE), ==, SMRAM_TEST_PATTERN);
-
- /* check that writing to 0x9c succeeds */
- for (i = 0; i <= 0xff; i++) {
- qpci_config_writeb(pcidev, MCH_HOST_BRIDGE_F_SMBASE, i);
- g_assert(qpci_config_readb(pcidev, MCH_HOST_BRIDGE_F_SMBASE) == i);
- }
-
- /* check that RAM is still accessible */
- qtest_writeb(qts, SMBASE, SMRAM_TEST_PATTERN + 1);
- g_assert_cmpint(qtest_readb(qts, SMBASE), ==, (SMRAM_TEST_PATTERN + 1));
-
- g_free(pcidev);
- qpci_free_pc(pcibus);
-
- qtest_quit(qts);
-}
-
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
@@ -301,6 +258,6 @@ int main(int argc, char **argv)
qtest_add_data_func("/q35/tseg-size/ext/16mb", &tseg_ext_16mb,
test_tseg_size);
qtest_add_func("/q35/smram/smbase_lock", test_smram_smbase_lock);
- qtest_add_func("/q35/smram/legacy_smbase", test_without_smram_base);
+
return g_test_run();
}
diff --git a/tests/qtest/qmp-cmd-test.c b/tests/qtest/qmp-cmd-test.c
index 2c15f60..040d042 100644
--- a/tests/qtest/qmp-cmd-test.c
+++ b/tests/qtest/qmp-cmd-test.c
@@ -14,7 +14,7 @@
#include "libqtest.h"
#include "qapi/error.h"
#include "qapi/qapi-visit-introspect.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qapi/qobject-input-visitor.h"
const char common_args[] = "-nodefaults -machine none";
@@ -100,6 +100,7 @@ static bool query_is_ignored(const char *cmd)
/* Success depends on target arch: */
"query-cpu-definitions", /* arm, i386, ppc, s390x */
"query-gic-capabilities", /* arm */
+ "query-s390x-cpu-polarization", /* s390x */
/* Success depends on target-specific build configuration: */
"query-pci", /* CONFIG_PCI */
"x-query-virtio", /* CONFIG_VIRTIO */
diff --git a/tests/qtest/qmp-test.c b/tests/qtest/qmp-test.c
index 22957fa..edf0886 100644
--- a/tests/qtest/qmp-test.c
+++ b/tests/qtest/qmp-test.c
@@ -14,10 +14,10 @@
#include "libqtest.h"
#include "qapi/error.h"
#include "qapi/qapi-visit-control.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
#include "qapi/qobject-input-visitor.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qstring.h"
const char common_args[] = "-nodefaults -machine none";
diff --git a/tests/qtest/qom-test.c b/tests/qtest/qom-test.c
index d677f87..27d70bc 100644
--- a/tests/qtest/qom-test.c
+++ b/tests/qtest/qom-test.c
@@ -9,8 +9,8 @@
#include "qemu/osdep.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
#include "qemu/cutils.h"
#include "libqtest.h"
@@ -88,6 +88,17 @@ static void test_machine(gconstpointer data)
qts = qtest_initf("-machine %s", machine);
+ if (g_test_slow()) {
+ /* Make sure we can get the machine class properties: */
+ g_autofree char *qom_machine = g_strdup_printf("%s-machine", machine);
+
+ response = qtest_qmp(qts, "{ 'execute': 'qom-list-properties',"
+ " 'arguments': { 'typename': %s } }",
+ qom_machine);
+ g_assert(response);
+ qobject_unref(response);
+ }
+
test_properties(qts, "/machine", true);
response = qtest_qmp(qts, "{ 'execute': 'quit' }");
diff --git a/tests/qtest/qos-test.c b/tests/qtest/qos-test.c
index 114f6be..abfd4b9 100644
--- a/tests/qtest/qos-test.c
+++ b/tests/qtest/qos-test.c
@@ -20,7 +20,7 @@
#include <getopt.h>
#include "libqtest-single.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/module.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qapi-visit-machine.h"
@@ -103,8 +103,7 @@ static void restart_qemu_or_continue(char *path)
old_path = g_strdup(path);
qtest_start(path);
} else { /* if cmd line is the same, reset the guest */
- qobject_unref(qmp("{ 'execute': 'system_reset' }"));
- qmp_eventwait("RESET");
+ qtest_system_reset(global_qtest);
}
}
diff --git a/tests/qtest/readconfig-test.c b/tests/qtest/readconfig-test.c
index 760f974..c6f32a4 100644
--- a/tests/qtest/readconfig-test.c
+++ b/tests/qtest/readconfig-test.c
@@ -13,10 +13,10 @@
#include "qapi/qapi-visit-machine.h"
#include "qapi/qapi-visit-qom.h"
#include "qapi/qapi-visit-ui.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
#include "qapi/qobject-input-visitor.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qstring.h"
#include "qemu/units.h"
static QTestState *qtest_init_with_config(const char *cfgdata)
diff --git a/tests/qtest/riscv-csr-test.c b/tests/qtest/riscv-csr-test.c
new file mode 100644
index 0000000..ff5c29e
--- /dev/null
+++ b/tests/qtest/riscv-csr-test.c
@@ -0,0 +1,56 @@
+/*
+ * QTest testcase for RISC-V CSRs
+ *
+ * Copyright (c) 2024 Syntacore.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest.h"
+
+#define CSR_MVENDORID 0xf11
+#define CSR_MISELECT 0x350
+
+static void run_test_csr(void)
+{
+ uint64_t res;
+ uint64_t val = 0;
+
+ QTestState *qts = qtest_init("-machine virt -cpu veyron-v1");
+
+ res = qtest_csr_call(qts, "get_csr", 0, CSR_MVENDORID, &val);
+
+ g_assert_cmpint(res, ==, 0);
+ g_assert_cmpint(val, ==, 0x61f);
+
+ val = 0xff;
+ res = qtest_csr_call(qts, "set_csr", 0, CSR_MISELECT, &val);
+
+ g_assert_cmpint(res, ==, 0);
+
+ val = 0;
+ res = qtest_csr_call(qts, "get_csr", 0, CSR_MISELECT, &val);
+
+ g_assert_cmpint(res, ==, 0);
+ g_assert_cmpint(val, ==, 0xff);
+
+ qtest_quit(qts);
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+
+ qtest_add_func("/cpu/csr", run_test_csr);
+
+ return g_test_run();
+}
diff --git a/tests/qtest/riscv-iommu-test.c b/tests/qtest/riscv-iommu-test.c
new file mode 100644
index 0000000..df0c781
--- /dev/null
+++ b/tests/qtest/riscv-iommu-test.c
@@ -0,0 +1,210 @@
+/*
+ * QTest testcase for RISC-V IOMMU
+ *
+ * Copyright (c) 2024 Ventana Micro Systems Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest-single.h"
+#include "qemu/module.h"
+#include "libqos/qgraph.h"
+#include "libqos/riscv-iommu.h"
+#include "hw/pci/pci_regs.h"
+
+static uint32_t riscv_iommu_read_reg32(QRISCVIOMMU *r_iommu, int reg_offset)
+{
+ return qpci_io_readl(&r_iommu->dev, r_iommu->reg_bar, reg_offset);
+}
+
+static uint64_t riscv_iommu_read_reg64(QRISCVIOMMU *r_iommu, int reg_offset)
+{
+ return qpci_io_readq(&r_iommu->dev, r_iommu->reg_bar, reg_offset);
+}
+
+static void riscv_iommu_write_reg32(QRISCVIOMMU *r_iommu, int reg_offset,
+ uint32_t val)
+{
+ qpci_io_writel(&r_iommu->dev, r_iommu->reg_bar, reg_offset, val);
+}
+
+static void riscv_iommu_write_reg64(QRISCVIOMMU *r_iommu, int reg_offset,
+ uint64_t val)
+{
+ qpci_io_writeq(&r_iommu->dev, r_iommu->reg_bar, reg_offset, val);
+}
+
+static void test_pci_config(void *obj, void *data, QGuestAllocator *t_alloc)
+{
+ QRISCVIOMMU *r_iommu = obj;
+ QPCIDevice *dev = &r_iommu->dev;
+ uint16_t vendorid, deviceid, classid;
+
+ vendorid = qpci_config_readw(dev, PCI_VENDOR_ID);
+ deviceid = qpci_config_readw(dev, PCI_DEVICE_ID);
+ classid = qpci_config_readw(dev, PCI_CLASS_DEVICE);
+
+ g_assert_cmpuint(vendorid, ==, RISCV_IOMMU_PCI_VENDOR_ID);
+ g_assert_cmpuint(deviceid, ==, RISCV_IOMMU_PCI_DEVICE_ID);
+ g_assert_cmpuint(classid, ==, RISCV_IOMMU_PCI_DEVICE_CLASS);
+}
+
+static void test_reg_reset(void *obj, void *data, QGuestAllocator *t_alloc)
+{
+ QRISCVIOMMU *r_iommu = obj;
+ uint64_t cap;
+ uint32_t reg;
+
+ cap = riscv_iommu_read_reg64(r_iommu, RISCV_IOMMU_REG_CAP);
+ g_assert_cmpuint(cap & RISCV_IOMMU_CAP_VERSION, ==, 0x10);
+
+ reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_CQCSR);
+ g_assert_cmpuint(reg & RISCV_IOMMU_CQCSR_CQEN, ==, 0);
+ g_assert_cmpuint(reg & RISCV_IOMMU_CQCSR_CIE, ==, 0);
+ g_assert_cmpuint(reg & RISCV_IOMMU_CQCSR_CQON, ==, 0);
+ g_assert_cmpuint(reg & RISCV_IOMMU_CQCSR_BUSY, ==, 0);
+
+ reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_FQCSR);
+ g_assert_cmpuint(reg & RISCV_IOMMU_FQCSR_FQEN, ==, 0);
+ g_assert_cmpuint(reg & RISCV_IOMMU_FQCSR_FIE, ==, 0);
+ g_assert_cmpuint(reg & RISCV_IOMMU_FQCSR_FQON, ==, 0);
+ g_assert_cmpuint(reg & RISCV_IOMMU_FQCSR_BUSY, ==, 0);
+
+ reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_PQCSR);
+ g_assert_cmpuint(reg & RISCV_IOMMU_PQCSR_PQEN, ==, 0);
+ g_assert_cmpuint(reg & RISCV_IOMMU_PQCSR_PIE, ==, 0);
+ g_assert_cmpuint(reg & RISCV_IOMMU_PQCSR_PQON, ==, 0);
+ g_assert_cmpuint(reg & RISCV_IOMMU_PQCSR_BUSY, ==, 0);
+
+ reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_DDTP);
+ g_assert_cmpuint(reg & RISCV_IOMMU_DDTP_BUSY, ==, 0);
+ g_assert_cmpuint(reg & RISCV_IOMMU_DDTP_MODE, ==,
+ RISCV_IOMMU_DDTP_MODE_OFF);
+
+ reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_IPSR);
+ g_assert_cmpuint(reg, ==, 0);
+}
+
+/*
+ * Common timeout-based poll for CQCSR, FQCSR and PQCSR. All
+ * their ON bits are mapped as RISCV_IOMMU_QUEUE_ACTIVE (16),
+ */
+static void qtest_wait_for_queue_active(QRISCVIOMMU *r_iommu,
+ uint32_t queue_csr)
+{
+ QTestState *qts = global_qtest;
+ guint64 timeout_us = 2 * 1000 * 1000;
+ gint64 start_time = g_get_monotonic_time();
+ uint32_t reg;
+
+ for (;;) {
+ qtest_clock_step(qts, 100);
+
+ reg = riscv_iommu_read_reg32(r_iommu, queue_csr);
+ if (reg & RISCV_IOMMU_QUEUE_ACTIVE) {
+ break;
+ }
+ g_assert(g_get_monotonic_time() - start_time <= timeout_us);
+ }
+}
+
+/*
+ * Goes through the queue activation procedures of chapter 6.2,
+ * "Guidelines for initialization", of the RISCV-IOMMU spec.
+ */
+static void test_iommu_init_queues(void *obj, void *data,
+ QGuestAllocator *t_alloc)
+{
+ QRISCVIOMMU *r_iommu = obj;
+ uint64_t reg64, q_addr;
+ uint32_t reg;
+ int k = 2;
+
+ reg64 = riscv_iommu_read_reg64(r_iommu, RISCV_IOMMU_REG_CAP);
+ g_assert_cmpuint(reg64 & RISCV_IOMMU_CAP_VERSION, ==, 0x10);
+
+ /*
+ * Program the command queue. Write 0xF to civ, fiv, pmiv and
+ * piv. With the current PCI device impl we expect 2 writable
+ * bits for each (k = 2) since we have N = 4 total vectors (2^k).
+ */
+ riscv_iommu_write_reg32(r_iommu, RISCV_IOMMU_REG_IVEC, 0xFFFF);
+ reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_IVEC);
+ g_assert_cmpuint(reg & RISCV_IOMMU_REG_IVEC_CIV, ==, 0x3);
+ g_assert_cmpuint(reg & RISCV_IOMMU_REG_IVEC_FIV, ==, 0x30);
+ g_assert_cmpuint(reg & RISCV_IOMMU_REG_IVEC_PMIV, ==, 0x300);
+ g_assert_cmpuint(reg & RISCV_IOMMU_REG_IVEC_PIV, ==, 0x3000);
+
+ /* Alloc a 4*16 bytes buffer and use it to set cqb */
+ q_addr = guest_alloc(t_alloc, 4 * 16);
+ reg64 = 0;
+ deposit64(reg64, RISCV_IOMMU_CQB_PPN_START,
+ RISCV_IOMMU_CQB_PPN_LEN, q_addr);
+ deposit64(reg64, RISCV_IOMMU_CQB_LOG2SZ_START,
+ RISCV_IOMMU_CQB_LOG2SZ_LEN, k - 1);
+ riscv_iommu_write_reg64(r_iommu, RISCV_IOMMU_REG_CQB, reg64);
+
+ /* cqt = 0, cqcsr.cqen = 1, poll cqcsr.cqon until it reads 1 */
+ riscv_iommu_write_reg32(r_iommu, RISCV_IOMMU_REG_CQT, 0);
+
+ reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_CQCSR);
+ reg |= RISCV_IOMMU_CQCSR_CQEN;
+ riscv_iommu_write_reg32(r_iommu, RISCV_IOMMU_REG_CQCSR, reg);
+
+ qtest_wait_for_queue_active(r_iommu, RISCV_IOMMU_REG_CQCSR);
+
+ /*
+ * Program the fault queue. Alloc a 4*32 bytes (instead of 4*16)
+ * buffer and use it to set fqb.
+ */
+ q_addr = guest_alloc(t_alloc, 4 * 32);
+ reg64 = 0;
+ deposit64(reg64, RISCV_IOMMU_FQB_PPN_START,
+ RISCV_IOMMU_FQB_PPN_LEN, q_addr);
+ deposit64(reg64, RISCV_IOMMU_FQB_LOG2SZ_START,
+ RISCV_IOMMU_FQB_LOG2SZ_LEN, k - 1);
+ riscv_iommu_write_reg64(r_iommu, RISCV_IOMMU_REG_FQB, reg64);
+
+ /* fqt = 0, fqcsr.fqen = 1, poll fqcsr.fqon until it reads 1 */
+ riscv_iommu_write_reg32(r_iommu, RISCV_IOMMU_REG_FQT, 0);
+
+ reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_FQCSR);
+ reg |= RISCV_IOMMU_FQCSR_FQEN;
+ riscv_iommu_write_reg32(r_iommu, RISCV_IOMMU_REG_FQCSR, reg);
+
+ qtest_wait_for_queue_active(r_iommu, RISCV_IOMMU_REG_FQCSR);
+
+ /*
+ * Program the page-request queue. Alloc a 4*16 bytes buffer
+ * and use it to set pqb.
+ */
+ q_addr = guest_alloc(t_alloc, 4 * 16);
+ reg64 = 0;
+ deposit64(reg64, RISCV_IOMMU_PQB_PPN_START,
+ RISCV_IOMMU_PQB_PPN_LEN, q_addr);
+ deposit64(reg64, RISCV_IOMMU_PQB_LOG2SZ_START,
+ RISCV_IOMMU_PQB_LOG2SZ_LEN, k - 1);
+ riscv_iommu_write_reg64(r_iommu, RISCV_IOMMU_REG_PQB, reg64);
+
+ /* pqt = 0, pqcsr.pqen = 1, poll pqcsr.pqon until it reads 1 */
+ riscv_iommu_write_reg32(r_iommu, RISCV_IOMMU_REG_PQT, 0);
+
+ reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_PQCSR);
+ reg |= RISCV_IOMMU_PQCSR_PQEN;
+ riscv_iommu_write_reg32(r_iommu, RISCV_IOMMU_REG_PQCSR, reg);
+
+ qtest_wait_for_queue_active(r_iommu, RISCV_IOMMU_REG_PQCSR);
+}
+
+static void register_riscv_iommu_test(void)
+{
+ qos_add_test("pci_config", "riscv-iommu-pci", test_pci_config, NULL);
+ qos_add_test("reg_reset", "riscv-iommu-pci", test_reg_reset, NULL);
+ qos_add_test("iommu_init_queues", "riscv-iommu-pci",
+ test_iommu_init_queues, NULL);
+}
+
+libqos_init(register_riscv_iommu_test);
diff --git a/tests/qtest/rs5c372-test.c b/tests/qtest/rs5c372-test.c
new file mode 100644
index 0000000..0f6a9b6
--- /dev/null
+++ b/tests/qtest/rs5c372-test.c
@@ -0,0 +1,43 @@
+/*
+ * QTest testcase for the RS5C372 RTC
+ *
+ * Copyright (c) 2025 Bernhard Beschow <shentey@gmail.com>
+ *
+ * Based on ds1338-test.c
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/bcd.h"
+#include "libqos/i2c.h"
+
+#define RS5C372_ADDR 0x32
+
+static void rs5c372_read_date(void *obj, void *data, QGuestAllocator *alloc)
+{
+ QI2CDevice *i2cdev = obj;
+
+ uint8_t resp[0x10];
+ time_t now = time(NULL);
+ struct tm *utc = gmtime(&now);
+
+ i2c_read_block(i2cdev, 0, resp, sizeof(resp));
+
+ /* check retrieved time against local time */
+ g_assert_cmpuint(from_bcd(resp[5]), == , utc->tm_mday);
+ g_assert_cmpuint(from_bcd(resp[6]), == , 1 + utc->tm_mon);
+ g_assert_cmpuint(2000 + from_bcd(resp[7]), == , 1900 + utc->tm_year);
+}
+
+static void rs5c372_register_nodes(void)
+{
+ QOSGraphEdgeOptions opts = { };
+ add_qi2c_address(&opts, &(QI2CAddress) { RS5C372_ADDR });
+
+ qos_node_create_driver("rs5c372", i2c_device_create);
+ qos_node_consumes("rs5c372", "i2c-bus", &opts);
+ qos_add_test("read_date", "rs5c372", rs5c372_read_date, NULL);
+}
+
+libqos_init(rs5c372_register_nodes);
diff --git a/tests/qtest/rtl8139-test.c b/tests/qtest/rtl8139-test.c
index eedf90f..55f671f 100644
--- a/tests/qtest/rtl8139-test.c
+++ b/tests/qtest/rtl8139-test.c
@@ -65,7 +65,7 @@ PORT(IntrMask, w, 0x3c)
PORT(IntrStatus, w, 0x3E)
PORT(TimerInt, l, 0x54)
-#define fatal(...) do { g_test_message(__VA_ARGS__); g_assert(0); } while (0)
+#define fatal(...) do { g_test_message(__VA_ARGS__); g_assert_not_reached(); } while (0)
static void test_timer(void)
{
diff --git a/tests/qtest/stm32l4x5.h b/tests/qtest/stm32l4x5.h
new file mode 100644
index 0000000..2d21cc6
--- /dev/null
+++ b/tests/qtest/stm32l4x5.h
@@ -0,0 +1,42 @@
+/*
+ * QTest testcase header for STM32L4X5 :
+ * used for consolidating common objects in stm32l4x5_*-test.c
+ *
+ * Copyright (c) 2024 Arnaud Minier <arnaud.minier@telecom-paris.fr>
+ * Copyright (c) 2024 InĆØs Varhol <ines.varhol@telecom-paris.fr>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "libqtest.h"
+
+/* copied from clock.h */
+#define CLOCK_PERIOD_1SEC (1000000000llu << 32)
+#define CLOCK_PERIOD_FROM_HZ(hz) (((hz) != 0) ? CLOCK_PERIOD_1SEC / (hz) : 0u)
+/*
+ * MSI (4 MHz) is used as system clock source after startup
+ * from Reset.
+ * AHB, APB1 and APB2 prescalers are set to 1 at reset.
+ */
+#define SYSCLK_PERIOD CLOCK_PERIOD_FROM_HZ(4000000)
+#define RCC_AHB2ENR 0x4002104C
+#define RCC_APB1ENR1 0x40021058
+#define RCC_APB1ENR2 0x4002105C
+#define RCC_APB2ENR 0x40021060
+
+
+static inline uint64_t get_clock_period(QTestState *qts, const char *path)
+{
+ uint64_t clock_period = 0;
+ QDict *r;
+
+ r = qtest_qmp(qts, "{ 'execute': 'qom-get', 'arguments':"
+ " { 'path': %s, 'property': 'qtest-clock-period'} }", path);
+ g_assert_false(qdict_haskey(r, "error"));
+ clock_period = qdict_get_int(r, "return");
+ qobject_unref(r);
+ return clock_period;
+}
+
+
diff --git a/tests/qtest/stm32l4x5_gpio-test.c b/tests/qtest/stm32l4x5_gpio-test.c
index 72a7823..3c6ea71 100644
--- a/tests/qtest/stm32l4x5_gpio-test.c
+++ b/tests/qtest/stm32l4x5_gpio-test.c
@@ -10,6 +10,7 @@
#include "qemu/osdep.h"
#include "libqtest-single.h"
+#include "stm32l4x5.h"
#define GPIO_BASE_ADDR 0x48000000
#define GPIO_SIZE 0x400
@@ -168,14 +169,6 @@ static uint32_t reset(uint32_t gpio, unsigned int offset)
return 0x0;
}
-static void system_reset(void)
-{
- QDict *r;
- r = qtest_qmp(global_qtest, "{'execute': 'system_reset'}");
- g_assert_false(qdict_haskey(r, "error"));
- qobject_unref(r);
-}
-
static void test_idr_reset_value(void)
{
/*
@@ -213,7 +206,7 @@ static void test_idr_reset_value(void)
gpio_writel(GPIO_H, OTYPER, 0xDEADBEEF);
gpio_writel(GPIO_H, PUPDR, 0xDEADBEEF);
- system_reset();
+ qtest_system_reset(global_qtest);
uint32_t moder = gpio_readl(GPIO_A, MODER);
uint32_t odr = gpio_readl(GPIO_A, ODR);
@@ -505,6 +498,26 @@ static void test_bsrr_brr(const void *data)
gpio_writel(gpio, ODR, reset(gpio, ODR));
}
+static void test_clock_enable(void)
+{
+ /*
+ * For each GPIO, enable its clock in RCC
+ * and check that its clock period changes to SYSCLK_PERIOD
+ */
+ unsigned int gpio_id;
+
+ for (uint32_t gpio = GPIO_A; gpio <= GPIO_H; gpio += GPIO_B - GPIO_A) {
+ gpio_id = get_gpio_id(gpio);
+ g_autofree char *path = g_strdup_printf("/machine/soc/gpio%c/clk",
+ gpio_id + 'a');
+ g_assert_cmpuint(get_clock_period(global_qtest, path), ==, 0);
+ /* Enable the gpio clock */
+ writel(RCC_AHB2ENR, readl(RCC_AHB2ENR) | (0x1 << gpio_id));
+ g_assert_cmpuint(get_clock_period(global_qtest, path), ==,
+ SYSCLK_PERIOD);
+ }
+}
+
int main(int argc, char **argv)
{
int ret;
@@ -556,6 +569,8 @@ int main(int argc, char **argv)
qtest_add_data_func("stm32l4x5/gpio/test_bsrr_brr2",
test_data(GPIO_D, 0),
test_bsrr_brr);
+ qtest_add_func("stm32l4x5/gpio/test_clock_enable",
+ test_clock_enable);
qtest_start("-machine b-l475e-iot01a");
ret = g_test_run();
diff --git a/tests/qtest/stm32l4x5_syscfg-test.c b/tests/qtest/stm32l4x5_syscfg-test.c
index 258417c..376c80e 100644
--- a/tests/qtest/stm32l4x5_syscfg-test.c
+++ b/tests/qtest/stm32l4x5_syscfg-test.c
@@ -10,6 +10,7 @@
#include "qemu/osdep.h"
#include "libqtest-single.h"
+#include "stm32l4x5.h"
#define SYSCFG_BASE_ADDR 0x40010000
#define SYSCFG_MEMRMP 0x00
@@ -26,7 +27,9 @@
#define INVALID_ADDR 0x2C
/* SoC forwards GPIOs to SysCfg */
-#define SYSCFG "/machine/soc"
+#define SOC "/machine/soc"
+#define SYSCFG "/machine/soc/syscfg"
+#define SYSCFG_CLK "/machine/soc/syscfg/clk"
#define EXTI "/machine/soc/exti"
static void syscfg_writel(unsigned int offset, uint32_t value)
@@ -41,15 +44,7 @@ static uint32_t syscfg_readl(unsigned int offset)
static void syscfg_set_irq(int num, int level)
{
- qtest_set_irq_in(global_qtest, SYSCFG, NULL, num, level);
-}
-
-static void system_reset(void)
-{
- QDict *response;
- response = qtest_qmp(global_qtest, "{'execute': 'system_reset'}");
- g_assert(qdict_haskey(response, "return"));
- qobject_unref(response);
+ qtest_set_irq_in(global_qtest, SOC, NULL, num, level);
}
static void test_reset(void)
@@ -179,7 +174,7 @@ static void test_set_only_bits(void)
syscfg_writel(SYSCFG_SWPR2, 0x00000000);
g_assert_cmphex(syscfg_readl(SYSCFG_SWPR2), ==, 0xFFFFFFFF);
- system_reset();
+ qtest_system_reset(global_qtest);
}
static void test_clear_only_bits(void)
@@ -191,7 +186,7 @@ static void test_clear_only_bits(void)
syscfg_writel(SYSCFG_CFGR1, 0x00000001);
g_assert_cmphex(syscfg_readl(SYSCFG_CFGR1), ==, 0x00000000);
- system_reset();
+ qtest_system_reset(global_qtest);
}
static void test_interrupt(void)
@@ -301,6 +296,17 @@ static void test_irq_gpio_multiplexer(void)
syscfg_writel(SYSCFG_EXTICR1, 0x00000000);
}
+static void test_clock_enable(void)
+{
+ g_assert_cmpuint(get_clock_period(global_qtest, SYSCFG_CLK), ==, 0);
+
+ /* Enable SYSCFG clock */
+ writel(RCC_APB2ENR, readl(RCC_APB2ENR) | (0x1 << 0));
+
+ g_assert_cmpuint(get_clock_period(global_qtest, SYSCFG_CLK), ==,
+ SYSCLK_PERIOD);
+}
+
int main(int argc, char **argv)
{
int ret;
@@ -325,6 +331,8 @@ int main(int argc, char **argv)
test_irq_pin_multiplexer);
qtest_add_func("stm32l4x5/syscfg/test_irq_gpio_multiplexer",
test_irq_gpio_multiplexer);
+ qtest_add_func("stm32l4x5/syscfg/test_clock_enable",
+ test_clock_enable);
qtest_start("-machine b-l475e-iot01a");
ret = g_test_run();
diff --git a/tests/qtest/stm32l4x5_usart-test.c b/tests/qtest/stm32l4x5_usart-test.c
index 8902518..98a7472 100644
--- a/tests/qtest/stm32l4x5_usart-test.c
+++ b/tests/qtest/stm32l4x5_usart-test.c
@@ -12,6 +12,7 @@
#include "libqtest.h"
#include "hw/misc/stm32l4x5_rcc_internals.h"
#include "hw/registerfields.h"
+#include "stm32l4x5.h"
#define RCC_BASE_ADDR 0x40021000
/* Use USART 1 ADDR, assume the others work the same */
@@ -36,6 +37,8 @@ REG32(GTPR, 0x10)
REG32(RTOR, 0x14)
REG32(RQR, 0x18)
REG32(ISR, 0x1C)
+ FIELD(ISR, REACK, 22, 1)
+ FIELD(ISR, TEACK, 21, 1)
FIELD(ISR, TXE, 7, 1)
FIELD(ISR, RXNE, 5, 1)
FIELD(ISR, ORE, 3, 1)
@@ -191,7 +194,7 @@ static void init_uart(QTestState *qts)
/* Enable the transmitter, the receiver and the USART. */
qtest_writel(qts, (USART1_BASE_ADDR + A_CR1),
- R_CR1_UE_MASK | R_CR1_RE_MASK | R_CR1_TE_MASK);
+ cr1 | R_CR1_UE_MASK | R_CR1_RE_MASK | R_CR1_TE_MASK);
}
static void test_write_read(void)
@@ -202,6 +205,8 @@ static void test_write_read(void)
qtest_writel(qts, USART1_BASE_ADDR + A_TDR, 0xFFFFFFFF);
const uint32_t tdr = qtest_readl(qts, USART1_BASE_ADDR + A_TDR);
g_assert_cmpuint(tdr, ==, 0x000001FF);
+
+ qtest_quit(qts);
}
static void test_receive_char(void)
@@ -296,10 +301,65 @@ static void test_send_str(void)
qtest_quit(qts);
}
-int main(int argc, char **argv)
+static void test_ack(void)
{
- int ret;
+ uint32_t cr1;
+ uint32_t isr;
+ QTestState *qts = qtest_init("-M b-l475e-iot01a");
+
+ init_uart(qts);
+
+ cr1 = qtest_readl(qts, (USART1_BASE_ADDR + A_CR1));
+ /* Disable the transmitter and receiver. */
+ qtest_writel(qts, (USART1_BASE_ADDR + A_CR1),
+ cr1 & ~(R_CR1_RE_MASK | R_CR1_TE_MASK));
+
+ /* Test ISR ACK for transmitter and receiver disabled */
+ isr = qtest_readl(qts, (USART1_BASE_ADDR + A_ISR));
+ g_assert_false(isr & R_ISR_TEACK_MASK);
+ g_assert_false(isr & R_ISR_REACK_MASK);
+
+ /* Enable the transmitter and receiver. */
+ qtest_writel(qts, (USART1_BASE_ADDR + A_CR1),
+ cr1 | (R_CR1_RE_MASK | R_CR1_TE_MASK));
+
+ /* Test ISR ACK for transmitter and receiver disabled */
+ isr = qtest_readl(qts, (USART1_BASE_ADDR + A_ISR));
+ g_assert_true(isr & R_ISR_TEACK_MASK);
+ g_assert_true(isr & R_ISR_REACK_MASK);
+
+ qtest_quit(qts);
+}
+
+static void check_clock(QTestState *qts, const char *path, uint32_t rcc_reg,
+ uint32_t reg_offset)
+{
+ g_assert_cmpuint(get_clock_period(qts, path), ==, 0);
+ qtest_writel(qts, rcc_reg, qtest_readl(qts, rcc_reg) | (0x1 << reg_offset));
+ g_assert_cmpuint(get_clock_period(qts, path), ==, SYSCLK_PERIOD);
+}
+
+static void test_clock_enable(void)
+{
+ /*
+ * For each USART device, enable its clock in RCC
+ * and check that its clock frequency is SYSCLK_PERIOD
+ */
+ QTestState *qts = qtest_init("-M b-l475e-iot01a");
+
+ check_clock(qts, "machine/soc/usart[0]/clk", RCC_APB2ENR, 14);
+ check_clock(qts, "machine/soc/usart[1]/clk", RCC_APB1ENR1, 17);
+ check_clock(qts, "machine/soc/usart[2]/clk", RCC_APB1ENR1, 18);
+ check_clock(qts, "machine/soc/uart[0]/clk", RCC_APB1ENR1, 19);
+ check_clock(qts, "machine/soc/uart[1]/clk", RCC_APB1ENR1, 20);
+ check_clock(qts, "machine/soc/lpuart1/clk", RCC_APB1ENR2, 0);
+
+ qtest_quit(qts);
+}
+
+int main(int argc, char **argv)
+{
g_test_init(&argc, &argv, NULL);
g_test_set_nonfatal_assertions();
@@ -308,8 +368,8 @@ int main(int argc, char **argv)
qtest_add_func("stm32l4x5/usart/send_char", test_send_char);
qtest_add_func("stm32l4x5/usart/receive_str", test_receive_str);
qtest_add_func("stm32l4x5/usart/send_str", test_send_str);
- ret = g_test_run();
-
- return ret;
+ qtest_add_func("stm32l4x5/usart/ack", test_ack);
+ qtest_add_func("stm32l4x5/usart/clock_enable", test_clock_enable);
+ return g_test_run();
}
diff --git a/tests/qtest/tco-test.c b/tests/qtest/tco-test.c
index 0547d41..20ccefa 100644
--- a/tests/qtest/tco-test.c
+++ b/tests/qtest/tco-test.c
@@ -12,7 +12,7 @@
#include "libqtest.h"
#include "libqos/pci.h"
#include "libqos/pci-pc.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "hw/pci/pci_regs.h"
#include "hw/southbridge/ich9.h"
#include "hw/acpi/ich9.h"
diff --git a/tests/qtest/test-filter-mirror.c b/tests/qtest/test-filter-mirror.c
index f3865f7..723d2c2 100644
--- a/tests/qtest/test-filter-mirror.c
+++ b/tests/qtest/test-filter-mirror.c
@@ -10,7 +10,7 @@
#include "qemu/osdep.h"
#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/iov.h"
#include "qemu/sockets.h"
#include "qemu/error-report.h"
diff --git a/tests/qtest/test-filter-redirector.c b/tests/qtest/test-filter-redirector.c
index a77d5fd..a996a80 100644
--- a/tests/qtest/test-filter-redirector.c
+++ b/tests/qtest/test-filter-redirector.c
@@ -52,7 +52,7 @@
#include "qemu/osdep.h"
#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/iov.h"
#include "qemu/sockets.h"
#include "qemu/error-report.h"
diff --git a/tests/qtest/test-netfilter.c b/tests/qtest/test-netfilter.c
index b09ef7f..326d4bd 100644
--- a/tests/qtest/test-netfilter.c
+++ b/tests/qtest/test-netfilter.c
@@ -10,7 +10,7 @@
#include "qemu/osdep.h"
#include "libqtest-single.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
/* add a netfilter to a netdev and then remove it */
static void add_one_netfilter(void)
diff --git a/tests/qtest/test-x86-cpuid-compat.c b/tests/qtest/test-x86-cpuid-compat.c
index b9e7e5e..456e2af 100644
--- a/tests/qtest/test-x86-cpuid-compat.c
+++ b/tests/qtest/test-x86-cpuid-compat.c
@@ -1,8 +1,8 @@
#include "qemu/osdep.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qbool.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
+#include "qobject/qnum.h"
+#include "qobject/qbool.h"
#include "libqtest-single.h"
static char *get_cpu0_qom_path(void)
@@ -193,7 +193,6 @@ static void add_feature_test(const char *name, const char *cpu,
args->bitnr = bitnr;
args->expected_value = expected_value;
qtest_add_data_func(name, args, test_feature_flag);
- return;
}
static void test_plus_minus_subprocess(void)
@@ -357,19 +356,6 @@ int main(int argc, char **argv)
"486", "xstore=on", "pc-i440fx-2.7",
"xlevel2", 0);
}
- /*
- * QEMU 2.3.0 had auto-level enabled for CPUID[7], already,
- * and the compat code that sets default level shouldn't
- * disable the auto-level=7 code:
- */
- if (qtest_has_machine("pc-i440fx-2.3")) {
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/off",
- "Penryn", NULL, "pc-i440fx-2.3",
- "level", 4);
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/on",
- "Penryn", "erms=on", "pc-i440fx-2.3",
- "level", 7);
- }
if (qtest_has_machine("pc-i440fx-2.9")) {
add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/off",
"Conroe", NULL, "pc-i440fx-2.9",
@@ -379,25 +365,6 @@ int main(int argc, char **argv)
"level", 10);
}
- /*
- * xlevel doesn't have any feature that triggers auto-level
- * code on old machine-types. Just check that the compat code
- * is working correctly:
- */
- if (qtest_has_machine("pc-i440fx-2.3")) {
- add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.3",
- "SandyBridge", NULL, "pc-i440fx-2.3",
- "xlevel", 0x8000000a);
- }
- if (qtest_has_machine("pc-i440fx-2.4")) {
- add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-off",
- "SandyBridge", NULL, "pc-i440fx-2.4",
- "xlevel", 0x80000008);
- add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-on",
- "SandyBridge", "svm=on,npt=on", "pc-i440fx-2.4",
- "xlevel", 0x80000008);
- }
-
/* Test feature parsing */
add_feature_test("x86/cpuid/features/plus",
"486", "+arat",
diff --git a/tests/qtest/tmp105-test.c b/tests/qtest/tmp105-test.c
index 3678646..3b114a5 100644
--- a/tests/qtest/tmp105-test.c
+++ b/tests/qtest/tmp105-test.c
@@ -12,7 +12,7 @@
#include "libqtest-single.h"
#include "libqos/qgraph.h"
#include "libqos/i2c.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "hw/sensor/tmp105_regs.h"
#define TMP105_TEST_ID "tmp105-test"
@@ -100,9 +100,9 @@ static void send_and_receive(void *obj, void *data, QGuestAllocator *alloc)
g_assert_cmphex(value, ==, 0x14f0);
i2c_set16(i2cdev, TMP105_REG_T_LOW, 0x1234);
- g_assert_cmphex(i2c_get16(i2cdev, TMP105_REG_T_LOW), ==, 0x1234);
+ g_assert_cmphex(i2c_get16(i2cdev, TMP105_REG_T_LOW), ==, 0x1230);
i2c_set16(i2cdev, TMP105_REG_T_HIGH, 0x4231);
- g_assert_cmphex(i2c_get16(i2cdev, TMP105_REG_T_HIGH), ==, 0x4231);
+ g_assert_cmphex(i2c_get16(i2cdev, TMP105_REG_T_HIGH), ==, 0x4230);
}
static void tmp105_register_nodes(void)
diff --git a/tests/qtest/tpm-emu.c b/tests/qtest/tpm-emu.c
index 2bf8ff4..9e4c200 100644
--- a/tests/qtest/tpm-emu.c
+++ b/tests/qtest/tpm-emu.c
@@ -16,8 +16,8 @@
#include "backends/tpm/tpm_ioctl.h"
#include "io/channel-socket.h"
#include "qapi/error.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qlist.h"
+#include "qobject/qstring.h"
#include "tpm-emu.h"
void tpm_emu_test_wait_cond(TPMTestState *s)
diff --git a/tests/qtest/tpm-emu.h b/tests/qtest/tpm-emu.h
index 712cee9..59c8009 100644
--- a/tests/qtest/tpm-emu.h
+++ b/tests/qtest/tpm-emu.h
@@ -21,7 +21,7 @@
#include "qemu/sockets.h"
#include "io/channel.h"
-#include "sysemu/tpm.h"
+#include "system/tpm.h"
#include "libqtest.h"
struct tpm_hdr {
diff --git a/tests/qtest/tpm-tests.c b/tests/qtest/tpm-tests.c
index fb94496..197714f 100644
--- a/tests/qtest/tpm-tests.c
+++ b/tests/qtest/tpm-tests.c
@@ -114,7 +114,7 @@ void tpm_test_swtpm_migration_test(const char *src_tpm_path,
sizeof(tpm_pcrread_resp));
tpm_util_migrate(src_qemu, uri);
- tpm_util_wait_for_migration_complete(src_qemu);
+ tpm_util_wait_for_migration_complete(dst_qemu);
tpm_util_pcrread(dst_qemu, tx, tpm_pcrread_resp,
sizeof(tpm_pcrread_resp));
diff --git a/tests/qtest/tpm-util.c b/tests/qtest/tpm-util.c
index 1c0319e..2cb2dd4 100644
--- a/tests/qtest/tpm-util.c
+++ b/tests/qtest/tpm-util.c
@@ -18,7 +18,7 @@
#include "hw/acpi/tpm.h"
#include "libqtest.h"
#include "tpm-util.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
void tpm_util_crb_transfer(QTestState *s,
const unsigned char *req, size_t req_size,
diff --git a/tests/qtest/ufs-test.c b/tests/qtest/ufs-test.c
index 82ec3f0..4867ccf 100644
--- a/tests/qtest/ufs-test.c
+++ b/tests/qtest/ufs-test.c
@@ -8,13 +8,14 @@
#include "qemu/osdep.h"
#include "qemu/module.h"
-#include "qemu/units.h"
#include "libqtest.h"
#include "libqos/qgraph.h"
#include "libqos/pci.h"
#include "scsi/constants.h"
#include "block/ufs.h"
+#include "qemu/bitmap.h"
+#define DWORD_BYTE 4
/* Test images sizes in Bytes */
#define TEST_IMAGE_SIZE (64 * 1024 * 1024)
/* Timeout for various operations, in seconds. */
@@ -26,6 +27,12 @@
#define UTP_COMMAND_DESCRIPTOR_SIZE 4096
#define UTP_RESPONSE_UPIU_OFFSET 1024
#define UTP_PRDT_UPIU_OFFSET 2048
+#define UTRD_TEST_SLOT 0
+#define UFS_MAX_CMD_DESC 32
+/* Constants for MCQ */
+#define TEST_QID 0
+#define QUEUE_SIZE 32
+#define UFS_MCQ_MAX_QNUM 32
typedef struct QUfs QUfs;
@@ -34,12 +41,22 @@ struct QUfs {
QPCIDevice dev;
QPCIBar bar;
- uint64_t utrlba;
- uint64_t utmrlba;
+ DECLARE_BITMAP(cmd_desc_bitmap, UFS_MAX_CMD_DESC);
uint64_t cmd_desc_addr;
uint64_t data_buffer_addr;
bool enabled;
+ bool support_mcq;
+
+ /* for legacy doorbell mode */
+ uint64_t utrlba;
+
+ /* for mcq mode */
+ uint32_t maxq;
+ uint64_t sqlba[UFS_MCQ_MAX_QNUM];
+ uint64_t cqlba[UFS_MCQ_MAX_QNUM];
+ uint64_t sqdao[UFS_MCQ_MAX_QNUM];
+ uint64_t cqdao[UFS_MCQ_MAX_QNUM];
};
static inline uint32_t ufs_rreg(QUfs *ufs, size_t offset)
@@ -52,6 +69,24 @@ static inline void ufs_wreg(QUfs *ufs, size_t offset, uint32_t value)
qpci_io_writel(&ufs->dev, ufs->bar, offset, value);
}
+static int alloc_cmd_desc_slot(QUfs *ufs)
+{
+ int slot = find_first_zero_bit(ufs->cmd_desc_bitmap, UFS_MAX_CMD_DESC);
+ if (slot == UFS_MAX_CMD_DESC) {
+ g_assert_not_reached();
+ }
+ set_bit(slot, ufs->cmd_desc_bitmap);
+ return slot;
+}
+
+static void release_cmd_desc_slot(QUfs *ufs, int slot)
+{
+ if (!test_bit(slot, ufs->cmd_desc_bitmap)) {
+ g_assert_not_reached();
+ }
+ clear_bit(slot, ufs->cmd_desc_bitmap);
+}
+
static void ufs_wait_for_irq(QUfs *ufs)
{
uint64_t end_time;
@@ -64,14 +99,11 @@ static void ufs_wait_for_irq(QUfs *ufs)
} while (is == 0 && g_get_monotonic_time() < end_time);
}
-static UtpTransferReqDesc ufs_build_req_utrd(uint64_t cmd_desc_addr,
- uint8_t slot,
+static UtpTransferReqDesc ufs_build_req_utrd(uint64_t command_desc_base_addr,
uint32_t data_direction,
uint16_t prd_table_length)
{
UtpTransferReqDesc req = { 0 };
- uint64_t command_desc_base_addr =
- cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE;
req.header.dword_0 =
cpu_to_le32(1 << 28 | data_direction | UFS_UTP_REQ_DESC_INT_CMD);
@@ -88,80 +120,140 @@ static UtpTransferReqDesc ufs_build_req_utrd(uint64_t cmd_desc_addr,
return req;
}
-static void ufs_send_nop_out(QUfs *ufs, uint8_t slot,
- UtpTransferReqDesc *utrd_out, UtpUpiuRsp *rsp_out)
+static enum UtpOcsCodes
+__ufs_send_transfer_request_doorbell(QUfs *ufs, uint8_t lun,
+ const UtpTransferReqDesc *utrd)
{
- /* Build up utp transfer request descriptor */
- UtpTransferReqDesc utrd = ufs_build_req_utrd(ufs->cmd_desc_addr, slot,
- UFS_UTP_NO_DATA_TRANSFER, 0);
- uint64_t utrd_addr = ufs->utrlba + slot * sizeof(UtpTransferReqDesc);
+ uint64_t utrd_addr =
+ ufs->utrlba + UTRD_TEST_SLOT * sizeof(UtpTransferReqDesc);
+ UtpTransferReqDesc utrd_result;
+
+ qtest_memwrite(ufs->dev.bus->qts, utrd_addr, utrd, sizeof(*utrd));
+
+ /* Ring the doorbell */
+ ufs_wreg(ufs, A_UTRLDBR, 1);
+ ufs_wait_for_irq(ufs);
+ g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, UTRCS));
+ ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UTRCS, 1));
+
+ /* Handle completed command */
+ qtest_memread(ufs->dev.bus->qts, utrd_addr, &utrd_result,
+ sizeof(utrd_result));
+ return le32_to_cpu(utrd_result.header.dword_2) & 0xf;
+}
+
+static enum UtpOcsCodes
+__ufs_send_transfer_request_mcq(QUfs *ufs, uint8_t lun,
+ const UtpTransferReqDesc *utrd)
+{
+ uint32_t sqtp = ufs_rreg(ufs, ufs->sqdao[TEST_QID] + 0x4);
+ uint64_t utrd_addr = ufs->sqlba[TEST_QID] + sqtp;
+ uint32_t cqhp;
+ uint64_t cqentry_addr;
+ UfsCqEntry cqentry;
+
+ qtest_memwrite(ufs->dev.bus->qts, utrd_addr, utrd, sizeof(*utrd));
+
+ /* Insert a new entry into the submission queue */
+ sqtp = ufs_rreg(ufs, ufs->sqdao[TEST_QID] + 0x4);
+ sqtp = (sqtp + sizeof(UfsSqEntry)) % (QUEUE_SIZE * sizeof(UfsSqEntry));
+ ufs_wreg(ufs, ufs->sqdao[TEST_QID] + 0x4, sqtp);
+ ufs_wait_for_irq(ufs);
+ g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, CQES));
+ ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, CQES, 1));
+
+ /* Handle the completed command from the completion queue */
+ cqhp = ufs_rreg(ufs, ufs->cqdao[TEST_QID]);
+ cqentry_addr = ufs->cqlba[TEST_QID] + cqhp;
+ qtest_memread(ufs->dev.bus->qts, cqentry_addr, &cqentry, sizeof(cqentry));
+ ufs_wreg(ufs, ufs->cqdao[TEST_QID], cqhp);
+
+ return cqentry.status;
+}
+
+static enum UtpOcsCodes
+ufs_send_transfer_request_sync(QUfs *ufs, uint8_t lun,
+ const UtpTransferReqDesc *utrd)
+{
+ if (ufs->support_mcq) {
+ return __ufs_send_transfer_request_mcq(ufs, lun, utrd);
+ }
+
+ return __ufs_send_transfer_request_doorbell(ufs, lun, utrd);
+}
+
+static enum UtpOcsCodes ufs_send_nop_out(QUfs *ufs, UtpUpiuRsp *rsp_out)
+{
+ int cmd_desc_slot = alloc_cmd_desc_slot(ufs);
uint64_t req_upiu_addr =
- ufs->cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE;
+ ufs->cmd_desc_addr + cmd_desc_slot * UTP_COMMAND_DESCRIPTOR_SIZE;
uint64_t rsp_upiu_addr = req_upiu_addr + UTP_RESPONSE_UPIU_OFFSET;
- qtest_memwrite(ufs->dev.bus->qts, utrd_addr, &utrd, sizeof(utrd));
/* Build up request upiu */
UtpUpiuReq req_upiu = { 0 };
req_upiu.header.trans_type = UFS_UPIU_TRANSACTION_NOP_OUT;
- req_upiu.header.task_tag = slot;
+ req_upiu.header.task_tag = cmd_desc_slot;
qtest_memwrite(ufs->dev.bus->qts, req_upiu_addr, &req_upiu,
sizeof(req_upiu));
- /* Ring Doorbell */
- ufs_wreg(ufs, A_UTRLDBR, 1);
- ufs_wait_for_irq(ufs);
- g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, UTRCS));
- ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UTRCS, 1));
+ /* Build up utp transfer request descriptor */
+ UtpTransferReqDesc utrd =
+ ufs_build_req_utrd(req_upiu_addr, UFS_UTP_NO_DATA_TRANSFER, 0);
+
+ /* Send Transfer Request */
+ enum UtpOcsCodes ret = ufs_send_transfer_request_sync(ufs, 0, &utrd);
- qtest_memread(ufs->dev.bus->qts, utrd_addr, utrd_out, sizeof(*utrd_out));
qtest_memread(ufs->dev.bus->qts, rsp_upiu_addr, rsp_out, sizeof(*rsp_out));
+ release_cmd_desc_slot(ufs, cmd_desc_slot);
+ return ret;
}
-static void ufs_send_query(QUfs *ufs, uint8_t slot, uint8_t query_function,
- uint8_t query_opcode, uint8_t idn, uint8_t index,
- UtpTransferReqDesc *utrd_out, UtpUpiuRsp *rsp_out)
+static enum UtpOcsCodes ufs_send_query(QUfs *ufs, uint8_t query_function,
+ uint8_t query_opcode, uint8_t idn,
+ uint8_t index, uint8_t selector,
+ uint32_t attr_value, UtpUpiuRsp *rsp_out)
{
- /* Build up utp transfer request descriptor */
- UtpTransferReqDesc utrd = ufs_build_req_utrd(ufs->cmd_desc_addr, slot,
- UFS_UTP_NO_DATA_TRANSFER, 0);
- uint64_t utrd_addr = ufs->utrlba + slot * sizeof(UtpTransferReqDesc);
+ int cmd_desc_slot = alloc_cmd_desc_slot(ufs);
uint64_t req_upiu_addr =
- ufs->cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE;
+ ufs->cmd_desc_addr + cmd_desc_slot * UTP_COMMAND_DESCRIPTOR_SIZE;
uint64_t rsp_upiu_addr = req_upiu_addr + UTP_RESPONSE_UPIU_OFFSET;
- qtest_memwrite(ufs->dev.bus->qts, utrd_addr, &utrd, sizeof(utrd));
/* Build up request upiu */
UtpUpiuReq req_upiu = { 0 };
req_upiu.header.trans_type = UFS_UPIU_TRANSACTION_QUERY_REQ;
req_upiu.header.query_func = query_function;
- req_upiu.header.task_tag = slot;
+ req_upiu.header.task_tag = cmd_desc_slot;
/*
- * QEMU UFS does not currently support Write descriptor and Write attribute,
+ * QEMU UFS does not currently support Write descriptor,
* so the value of data_segment_length is always 0.
*/
req_upiu.header.data_segment_length = 0;
req_upiu.qr.opcode = query_opcode;
req_upiu.qr.idn = idn;
req_upiu.qr.index = index;
+ req_upiu.qr.selector = selector;
+ req_upiu.qr.value = cpu_to_be32(attr_value);
+ req_upiu.qr.length = UFS_QUERY_DESC_MAX_SIZE;
qtest_memwrite(ufs->dev.bus->qts, req_upiu_addr, &req_upiu,
sizeof(req_upiu));
- /* Ring Doorbell */
- ufs_wreg(ufs, A_UTRLDBR, 1);
- ufs_wait_for_irq(ufs);
- g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, UTRCS));
- ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UTRCS, 1));
+ /* Build up utp transfer request descriptor */
+ UtpTransferReqDesc utrd =
+ ufs_build_req_utrd(req_upiu_addr, UFS_UTP_NO_DATA_TRANSFER, 0);
+
+ /* Send Transfer Request */
+ enum UtpOcsCodes ret = ufs_send_transfer_request_sync(ufs, 0, &utrd);
- qtest_memread(ufs->dev.bus->qts, utrd_addr, utrd_out, sizeof(*utrd_out));
qtest_memread(ufs->dev.bus->qts, rsp_upiu_addr, rsp_out, sizeof(*rsp_out));
+ release_cmd_desc_slot(ufs, cmd_desc_slot);
+ return ret;
}
-static void ufs_send_scsi_command(QUfs *ufs, uint8_t slot, uint8_t lun,
- const uint8_t *cdb, const uint8_t *data_in,
- size_t data_in_len, uint8_t *data_out,
- size_t data_out_len,
- UtpTransferReqDesc *utrd_out,
- UtpUpiuRsp *rsp_out)
+static enum UtpOcsCodes
+ufs_send_scsi_command(QUfs *ufs, uint8_t lun, const uint8_t *cdb,
+ const uint8_t *data_in, size_t data_in_len,
+ uint8_t *data_out, size_t data_out_len,
+ UtpUpiuRsp *rsp_out)
{
/* Build up PRDT */
@@ -171,8 +263,9 @@ static void ufs_send_scsi_command(QUfs *ufs, uint8_t slot, uint8_t lun,
uint8_t flags;
uint16_t prd_table_length, i;
uint32_t data_direction, data_len;
+ int cmd_desc_slot = alloc_cmd_desc_slot(ufs);
uint64_t req_upiu_addr =
- ufs->cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE;
+ ufs->cmd_desc_addr + cmd_desc_slot * UTP_COMMAND_DESCRIPTOR_SIZE;
uint64_t prdt_addr = req_upiu_addr + UTP_PRDT_UPIU_OFFSET;
g_assert_true(data_in_len < MAX_PRD_ENTRY_COUNT * PRD_ENTRY_DATA_SIZE);
@@ -214,36 +307,33 @@ static void ufs_send_scsi_command(QUfs *ufs, uint8_t slot, uint8_t lun,
qtest_memwrite(ufs->dev.bus->qts, prdt_addr, entries,
prd_table_length * sizeof(UfshcdSgEntry));
- /* Build up utp transfer request descriptor */
- UtpTransferReqDesc utrd = ufs_build_req_utrd(
- ufs->cmd_desc_addr, slot, data_direction, prd_table_length);
- uint64_t utrd_addr = ufs->utrlba + slot * sizeof(UtpTransferReqDesc);
uint64_t rsp_upiu_addr = req_upiu_addr + UTP_RESPONSE_UPIU_OFFSET;
- qtest_memwrite(ufs->dev.bus->qts, utrd_addr, &utrd, sizeof(utrd));
/* Build up request upiu */
UtpUpiuReq req_upiu = { 0 };
req_upiu.header.trans_type = UFS_UPIU_TRANSACTION_COMMAND;
req_upiu.header.flags = flags;
req_upiu.header.lun = lun;
- req_upiu.header.task_tag = slot;
+ req_upiu.header.task_tag = cmd_desc_slot;
req_upiu.sc.exp_data_transfer_len = cpu_to_be32(data_len);
memcpy(req_upiu.sc.cdb, cdb, UFS_CDB_SIZE);
qtest_memwrite(ufs->dev.bus->qts, req_upiu_addr, &req_upiu,
sizeof(req_upiu));
- /* Ring Doorbell */
- ufs_wreg(ufs, A_UTRLDBR, 1);
- ufs_wait_for_irq(ufs);
- g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, UTRCS));
- ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UTRCS, 1));
+ /* Build up utp transfer request descriptor */
+ UtpTransferReqDesc utrd =
+ ufs_build_req_utrd(req_upiu_addr, data_direction, prd_table_length);
+
+ /* Send Transfer Request */
+ enum UtpOcsCodes ret = ufs_send_transfer_request_sync(ufs, lun, &utrd);
- qtest_memread(ufs->dev.bus->qts, utrd_addr, utrd_out, sizeof(*utrd_out));
qtest_memread(ufs->dev.bus->qts, rsp_upiu_addr, rsp_out, sizeof(*rsp_out));
if (data_out_len) {
qtest_memread(ufs->dev.bus->qts, ufs->data_buffer_addr, data_out,
data_out_len);
}
+ release_cmd_desc_slot(ufs, cmd_desc_slot);
+ return ret;
}
/**
@@ -253,10 +343,10 @@ static void ufs_send_scsi_command(QUfs *ufs, uint8_t slot, uint8_t lun,
static void ufs_init(QUfs *ufs, QGuestAllocator *alloc)
{
uint64_t end_time;
- uint32_t nutrs, nutmrs;
+ uint32_t nutrs;
uint32_t hcs, is, ucmdarg2, cap;
uint32_t hce = 0, ie = 0;
- UtpTransferReqDesc utrd;
+ enum UtpOcsCodes ocs;
UtpUpiuRsp rsp_upiu;
ufs->bar = qpci_iomap(&ufs->dev, 0, NULL);
@@ -301,9 +391,12 @@ static void ufs_init(QUfs *ufs, QGuestAllocator *alloc)
hcs = ufs_rreg(ufs, A_HCS);
g_assert_true(FIELD_EX32(hcs, HCS, DP));
g_assert_true(FIELD_EX32(hcs, HCS, UTRLRDY));
- g_assert_true(FIELD_EX32(hcs, HCS, UTMRLRDY));
g_assert_true(FIELD_EX32(hcs, HCS, UCRDY));
+ /* Check MCQ support */
+ cap = ufs_rreg(ufs, A_CAP);
+ ufs->support_mcq = FIELD_EX32(cap, CAP, MCQS);
+
/* Enable all interrupt functions */
ie = FIELD_DP32(ie, IE, UTRCE, 1);
ie = FIELD_DP32(ie, IE, UEE, 1);
@@ -316,44 +409,89 @@ static void ufs_init(QUfs *ufs, QGuestAllocator *alloc)
ie = FIELD_DP32(ie, IE, HCFEE, 1);
ie = FIELD_DP32(ie, IE, SBFEE, 1);
ie = FIELD_DP32(ie, IE, CEFEE, 1);
+ if (ufs->support_mcq) {
+ ie = FIELD_DP32(ie, IE, CQEE, 1);
+ }
ufs_wreg(ufs, A_IE, ie);
ufs_wreg(ufs, A_UTRIACR, 0);
- /* Enable transfer request and task management request */
- cap = ufs_rreg(ufs, A_CAP);
- nutrs = FIELD_EX32(cap, CAP, NUTRS) + 1;
- nutmrs = FIELD_EX32(cap, CAP, NUTMRS) + 1;
+ /* Enable transfer request */
ufs->cmd_desc_addr =
- guest_alloc(alloc, nutrs * UTP_COMMAND_DESCRIPTOR_SIZE);
+ guest_alloc(alloc, UFS_MAX_CMD_DESC * UTP_COMMAND_DESCRIPTOR_SIZE);
ufs->data_buffer_addr =
guest_alloc(alloc, MAX_PRD_ENTRY_COUNT * PRD_ENTRY_DATA_SIZE);
- ufs->utrlba = guest_alloc(alloc, nutrs * sizeof(UtpTransferReqDesc));
- ufs->utmrlba = guest_alloc(alloc, nutmrs * sizeof(UtpTaskReqDesc));
- ufs_wreg(ufs, A_UTRLBA, ufs->utrlba & 0xffffffff);
- ufs_wreg(ufs, A_UTRLBAU, ufs->utrlba >> 32);
- ufs_wreg(ufs, A_UTMRLBA, ufs->utmrlba & 0xffffffff);
- ufs_wreg(ufs, A_UTMRLBAU, ufs->utmrlba >> 32);
- ufs_wreg(ufs, A_UTRLRSR, 1);
- ufs_wreg(ufs, A_UTMRLRSR, 1);
+ if (ufs->support_mcq) {
+ uint32_t mcqcap, qid, qcfgptr, mcq_reg_offset;
+ uint32_t cqattr = 0, sqattr = 0;
+
+ mcqcap = ufs_rreg(ufs, A_MCQCAP);
+ qcfgptr = FIELD_EX32(mcqcap, MCQCAP, QCFGPTR);
+ ufs->maxq = FIELD_EX32(mcqcap, MCQCAP, MAXQ) + 1;
+ for (qid = 0; qid < ufs->maxq; ++qid) {
+ ufs->sqlba[qid] =
+ guest_alloc(alloc, QUEUE_SIZE * sizeof(UtpTransferReqDesc));
+ ufs->cqlba[qid] =
+ guest_alloc(alloc, QUEUE_SIZE * sizeof(UtpTransferReqDesc));
+ mcq_reg_offset = qcfgptr * 0x200 + qid * 0x40;
+
+ ufs_wreg(ufs, mcq_reg_offset + A_SQLBA,
+ ufs->sqlba[qid] & 0xffffffff);
+ ufs_wreg(ufs, mcq_reg_offset + A_SQUBA, ufs->sqlba[qid] >> 32);
+ ufs_wreg(ufs, mcq_reg_offset + A_CQLBA,
+ ufs->cqlba[qid] & 0xffffffff);
+ ufs_wreg(ufs, mcq_reg_offset + A_CQUBA, ufs->cqlba[qid] >> 32);
+
+ /* Enable Completion Queue */
+ cqattr = FIELD_DP32(cqattr, CQATTR, CQEN, 1);
+ cqattr = FIELD_DP32(cqattr, CQATTR, SIZE,
+ QUEUE_SIZE * sizeof(UtpTransferReqDesc) /
+ DWORD_BYTE);
+ ufs_wreg(ufs, mcq_reg_offset + A_CQATTR, cqattr);
+
+ /* Enable Submission Queue */
+ sqattr = FIELD_DP32(sqattr, SQATTR, SQEN, 1);
+ sqattr = FIELD_DP32(sqattr, SQATTR, SIZE,
+ QUEUE_SIZE * sizeof(UtpTransferReqDesc) /
+ DWORD_BYTE);
+ sqattr = FIELD_DP32(sqattr, SQATTR, CQID, qid);
+ ufs_wreg(ufs, mcq_reg_offset + A_SQATTR, sqattr);
+
+ /* Cache head & tail pointer */
+ ufs->sqdao[qid] = ufs_rreg(ufs, mcq_reg_offset + A_SQDAO);
+ ufs->cqdao[qid] = ufs_rreg(ufs, mcq_reg_offset + A_CQDAO);
+ }
+ } else {
+ nutrs = FIELD_EX32(cap, CAP, NUTRS) + 1;
+ ufs->utrlba = guest_alloc(alloc, nutrs * sizeof(UtpTransferReqDesc));
+
+ ufs_wreg(ufs, A_UTRLBA, ufs->utrlba & 0xffffffff);
+ ufs_wreg(ufs, A_UTRLBAU, ufs->utrlba >> 32);
+ ufs_wreg(ufs, A_UTRLRSR, 1);
+ }
/* Send nop out to test transfer request */
- ufs_send_nop_out(ufs, 0, &utrd, &rsp_upiu);
- g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+ ocs = ufs_send_nop_out(ufs, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
/* Set fDeviceInit flag via query request */
- ufs_send_query(ufs, 0, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST,
- UFS_UPIU_QUERY_OPCODE_SET_FLAG,
- UFS_QUERY_FLAG_IDN_FDEVICEINIT, 0, &utrd, &rsp_upiu);
- g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_SET_FLAG,
+ UFS_QUERY_FLAG_IDN_FDEVICEINIT, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
/* Wait for device to reset */
end_time = g_get_monotonic_time() + TIMEOUT_SECONDS * G_TIME_SPAN_SECOND;
do {
qtest_clock_step(ufs->dev.bus->qts, 100);
- ufs_send_query(ufs, 0, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
- UFS_UPIU_QUERY_OPCODE_READ_FLAG,
- UFS_QUERY_FLAG_IDN_FDEVICEINIT, 0, &utrd, &rsp_upiu);
+ ocs =
+ ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_FLAG,
+ UFS_QUERY_FLAG_IDN_FDEVICEINIT, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==,
+ UFS_COMMAND_RESULT_SUCCESS);
} while (be32_to_cpu(rsp_upiu.qr.value) != 0 &&
g_get_monotonic_time() < end_time);
g_assert_cmpuint(be32_to_cpu(rsp_upiu.qr.value), ==, 0);
@@ -364,8 +502,15 @@ static void ufs_init(QUfs *ufs, QGuestAllocator *alloc)
static void ufs_exit(QUfs *ufs, QGuestAllocator *alloc)
{
if (ufs->enabled) {
- guest_free(alloc, ufs->utrlba);
- guest_free(alloc, ufs->utmrlba);
+ if (ufs->support_mcq) {
+ for (uint32_t qid = 0; qid < ufs->maxq; ++qid) {
+ guest_free(alloc, ufs->sqlba[qid]);
+ guest_free(alloc, ufs->cqlba[qid]);
+ }
+ } else {
+ guest_free(alloc, ufs->utrlba);
+ }
+
guest_free(alloc, ufs->cmd_desc_addr);
guest_free(alloc, ufs->data_buffer_addr);
}
@@ -428,15 +573,15 @@ static void ufstest_init(void *obj, void *data, QGuestAllocator *alloc)
const uint8_t request_sense_cdb[UFS_CDB_SIZE] = {
REQUEST_SENSE,
};
- UtpTransferReqDesc utrd;
+ enum UtpOcsCodes ocs;
UtpUpiuRsp rsp_upiu;
ufs_init(ufs, alloc);
/* Check REPORT_LUNS */
- ufs_send_scsi_command(ufs, 0, 0, report_luns_cdb, NULL, 0, buf, sizeof(buf),
- &utrd, &rsp_upiu);
- g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+ ocs = ufs_send_scsi_command(ufs, 0, report_luns_cdb, NULL, 0, buf,
+ sizeof(buf), &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, GOOD);
/* LUN LIST LENGTH should be 8, in big endian */
g_assert_cmpuint(buf[3], ==, 8);
@@ -444,15 +589,15 @@ static void ufstest_init(void *obj, void *data, QGuestAllocator *alloc)
g_assert_cmpuint(buf[9], ==, 0);
/* Clear Unit Attention */
- ufs_send_scsi_command(ufs, 0, 0, request_sense_cdb, NULL, 0, buf,
- sizeof(buf), &utrd, &rsp_upiu);
- g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+ ocs = ufs_send_scsi_command(ufs, 0, request_sense_cdb, NULL, 0, buf,
+ sizeof(buf), &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, CHECK_CONDITION);
/* Check TEST_UNIT_READY */
- ufs_send_scsi_command(ufs, 0, 0, test_unit_ready_cdb, NULL, 0, NULL, 0,
- &utrd, &rsp_upiu);
- g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+ ocs = ufs_send_scsi_command(ufs, 0, test_unit_ready_cdb, NULL, 0, NULL, 0,
+ &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, GOOD);
ufs_exit(ufs, alloc);
@@ -494,22 +639,22 @@ static void ufstest_read_write(void *obj, void *data, QGuestAllocator *alloc)
WRITE_10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00
};
uint32_t block_size;
- UtpTransferReqDesc utrd;
+ enum UtpOcsCodes ocs;
UtpUpiuRsp rsp_upiu;
const int test_lun = 1;
ufs_init(ufs, alloc);
/* Clear Unit Attention */
- ufs_send_scsi_command(ufs, 0, test_lun, request_sense_cdb, NULL, 0,
- read_buf, sizeof(read_buf), &utrd, &rsp_upiu);
- g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+ ocs = ufs_send_scsi_command(ufs, test_lun, request_sense_cdb, NULL, 0,
+ read_buf, sizeof(read_buf), &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, CHECK_CONDITION);
/* Read capacity */
- ufs_send_scsi_command(ufs, 0, test_lun, read_capacity_cdb, NULL, 0,
- read_buf, sizeof(read_buf), &utrd, &rsp_upiu);
- g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+ ocs = ufs_send_scsi_command(ufs, test_lun, read_capacity_cdb, NULL, 0,
+ read_buf, sizeof(read_buf), &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
g_assert_cmpuint(rsp_upiu.header.scsi_status, ==,
UFS_COMMAND_RESULT_SUCCESS);
block_size = ldl_be_p(&read_buf[8]);
@@ -517,16 +662,16 @@ static void ufstest_read_write(void *obj, void *data, QGuestAllocator *alloc)
/* Write data */
memset(write_buf, 0xab, block_size);
- ufs_send_scsi_command(ufs, 0, test_lun, write_cdb, write_buf, block_size,
- NULL, 0, &utrd, &rsp_upiu);
- g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+ ocs = ufs_send_scsi_command(ufs, test_lun, write_cdb, write_buf, block_size,
+ NULL, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
g_assert_cmpuint(rsp_upiu.header.scsi_status, ==,
UFS_COMMAND_RESULT_SUCCESS);
/* Read data and verify */
- ufs_send_scsi_command(ufs, 0, test_lun, read_cdb, NULL, 0, read_buf,
- block_size, &utrd, &rsp_upiu);
- g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+ ocs = ufs_send_scsi_command(ufs, test_lun, read_cdb, NULL, 0, read_buf,
+ block_size, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
g_assert_cmpuint(rsp_upiu.header.scsi_status, ==,
UFS_COMMAND_RESULT_SUCCESS);
g_assert_cmpint(memcmp(read_buf, write_buf, block_size), ==, 0);
@@ -534,6 +679,384 @@ static void ufstest_read_write(void *obj, void *data, QGuestAllocator *alloc)
ufs_exit(ufs, alloc);
}
+static void ufstest_query_flag_request(void *obj, void *data,
+ QGuestAllocator *alloc)
+{
+ QUfs *ufs = obj;
+
+ enum UtpOcsCodes ocs;
+ UtpUpiuRsp rsp_upiu;
+ ufs_init(ufs, alloc);
+
+ /* Read read-only flag */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_FLAG,
+ UFS_QUERY_FLAG_IDN_FDEVICEINIT, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.opcode, ==, UFS_UPIU_QUERY_OPCODE_READ_FLAG);
+ g_assert_cmpuint(rsp_upiu.qr.idn, ==, UFS_QUERY_FLAG_IDN_FDEVICEINIT);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, be32_to_cpu(0));
+
+ /* Flag Set, Clear, Toggle Test with fDeviceLifeSpanModeEn */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_FLAG,
+ UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE, 0, 0, 0,
+ &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, be32_to_cpu(0));
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_SET_FLAG,
+ UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE, 0, 0, 0,
+ &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, be32_to_cpu(1));
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE, 0, 0, 0,
+ &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, be32_to_cpu(0));
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG,
+ UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE, 0, 0, 0,
+ &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, be32_to_cpu(1));
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG,
+ UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE, 0, 0, 0,
+ &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, be32_to_cpu(0));
+
+ /* Read Write-only Flag (Intended Failure) */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_FLAG,
+ UFS_QUERY_FLAG_IDN_PURGE_ENABLE, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR);
+ g_assert_cmpuint(rsp_upiu.header.response, ==,
+ UFS_QUERY_RESULT_NOT_READABLE);
+
+ /* Write Read-Only Flag (Intended Failure) */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_SET_FLAG,
+ UFS_QUERY_FLAG_IDN_BUSY_RTC, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR);
+ g_assert_cmpuint(rsp_upiu.header.response, ==,
+ UFS_QUERY_RESULT_NOT_WRITEABLE);
+
+ ufs_exit(ufs, alloc);
+}
+
+static void ufstest_query_attr_request(void *obj, void *data,
+ QGuestAllocator *alloc)
+{
+ QUfs *ufs = obj;
+
+ enum UtpOcsCodes ocs;
+ UtpUpiuRsp rsp_upiu;
+ ufs_init(ufs, alloc);
+
+ /* Read Readable Attributes*/
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_ATTR,
+ UFS_QUERY_ATTR_IDN_BOOT_LU_EN, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.opcode, ==, UFS_UPIU_QUERY_OPCODE_READ_ATTR);
+ g_assert_cmpuint(rsp_upiu.qr.idn, ==, UFS_QUERY_ATTR_IDN_BOOT_LU_EN);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00));
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_ATTR,
+ UFS_QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00));
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_ATTR,
+ UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, 0,
+ &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00));
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_ATTR,
+ UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND, 0, 0, 0,
+ &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(160));
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_ATTR,
+ UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND, 0, 0, 0,
+ &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(60));
+
+ /* Write Writable Attributes & Read Again */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_WRITE_ATTR,
+ UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, 0x03,
+ &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x03));
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_WRITE_ATTR,
+ UFS_QUERY_ATTR_IDN_EE_CONTROL, 0, 0, 0x07, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x07));
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_ATTR,
+ UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x03));
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_ATTR,
+ UFS_QUERY_ATTR_IDN_EE_CONTROL, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x07));
+
+ /* Write Invalid Value (Intended Error) */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_WRITE_ATTR,
+ UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, 0x10,
+ &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR);
+ g_assert_cmpuint(rsp_upiu.header.response, ==,
+ UFS_QUERY_RESULT_INVALID_VALUE);
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_ATTR,
+ UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x03));
+
+ /* Read Write-Only Attribute (Intended Error) */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_ATTR,
+ UFS_QUERY_ATTR_IDN_SECONDS_PASSED, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR);
+ g_assert_cmpuint(rsp_upiu.header.response, ==,
+ UFS_QUERY_RESULT_NOT_READABLE);
+
+ /* Write Read-Only Attribute (Intended Error) */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_WRITE_ATTR,
+ UFS_QUERY_ATTR_IDN_POWER_MODE, 0, 0, 0x01, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR);
+ g_assert_cmpuint(rsp_upiu.header.response, ==,
+ UFS_QUERY_RESULT_NOT_WRITEABLE);
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_ATTR,
+ UFS_QUERY_ATTR_IDN_POWER_MODE, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00));
+
+ /* Reset Written Attributes */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_WRITE_ATTR,
+ UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00));
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_WRITE_ATTR,
+ UFS_QUERY_ATTR_IDN_EE_CONTROL, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00));
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_ATTR,
+ UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00));
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_ATTR,
+ UFS_QUERY_ATTR_IDN_EE_CONTROL, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00));
+
+ ufs_exit(ufs, alloc);
+}
+
+static void ufstest_query_desc_request(void *obj, void *data,
+ QGuestAllocator *alloc)
+{
+ QUfs *ufs = obj;
+
+ enum UtpOcsCodes ocs;
+ UtpUpiuRsp rsp_upiu;
+ ufs_init(ufs, alloc);
+
+ /* Write Descriptor is not supported yet */
+
+ /* Read Device Descriptor */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC,
+ UFS_QUERY_DESC_IDN_DEVICE, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.opcode, ==, UFS_UPIU_QUERY_OPCODE_READ_DESC);
+ g_assert_cmpuint(rsp_upiu.qr.idn, ==, UFS_QUERY_DESC_IDN_DEVICE);
+ g_assert_cmpuint(rsp_upiu.qr.data[0], ==, sizeof(DeviceDescriptor));
+ g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_DEVICE);
+
+ /* Read Configuration Descriptor is not supported yet*/
+
+ /* Read Unit Descriptor */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC,
+ UFS_QUERY_DESC_IDN_UNIT, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.data[0], ==, sizeof(UnitDescriptor));
+ g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_UNIT);
+ g_assert_cmpuint(rsp_upiu.qr.data[2], ==, 0);
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC,
+ UFS_QUERY_DESC_IDN_UNIT, 1, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.data[0], ==, sizeof(UnitDescriptor));
+ g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_UNIT);
+ g_assert_cmpuint(rsp_upiu.qr.data[2], ==, 1);
+
+ ocs =
+ ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC, UFS_QUERY_DESC_IDN_UNIT,
+ UFS_UPIU_RPMB_WLUN, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.data[0], ==, sizeof(RpmbUnitDescriptor));
+ g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_UNIT);
+ g_assert_cmpuint(rsp_upiu.qr.data[2], ==, UFS_UPIU_RPMB_WLUN);
+
+ /* Read Interconnect Descriptor */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC,
+ UFS_QUERY_DESC_IDN_INTERCONNECT, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.data[0], ==, sizeof(InterconnectDescriptor));
+ g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_INTERCONNECT);
+
+ /* Read String Descriptor */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC,
+ UFS_QUERY_DESC_IDN_STRING, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.data[0], ==, 0x12);
+ g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_STRING);
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC,
+ UFS_QUERY_DESC_IDN_STRING, 1, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.data[0], ==, 0x22);
+ g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_STRING);
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC,
+ UFS_QUERY_DESC_IDN_STRING, 4, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.data[0], ==, 0x0a);
+ g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_STRING);
+
+ /* Read Geometry Descriptor */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC,
+ UFS_QUERY_DESC_IDN_GEOMETRY, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.data[0], ==, sizeof(GeometryDescriptor));
+ g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_GEOMETRY);
+
+ /* Read Power Descriptor */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC,
+ UFS_QUERY_DESC_IDN_POWER, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.data[0], ==,
+ sizeof(PowerParametersDescriptor));
+ g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_POWER);
+
+ /* Read Health Descriptor */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC,
+ UFS_QUERY_DESC_IDN_HEALTH, 0, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS);
+ g_assert_cmpuint(rsp_upiu.qr.data[0], ==, sizeof(DeviceHealthDescriptor));
+ g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_HEALTH);
+
+ /* Invalid Index (Intended Failure) */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC,
+ UFS_QUERY_DESC_IDN_UNIT, 4, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR);
+ g_assert_cmpuint(rsp_upiu.header.response, ==,
+ UFS_QUERY_RESULT_INVALID_INDEX);
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC,
+ UFS_QUERY_DESC_IDN_STRING, 5, 0, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR);
+ g_assert_cmpuint(rsp_upiu.header.response, ==,
+ UFS_QUERY_RESULT_INVALID_INDEX);
+
+ /* Invalid Selector (Intended Failure) */
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC,
+ UFS_QUERY_DESC_IDN_DEVICE, 0, 1, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR);
+ g_assert_cmpuint(rsp_upiu.header.response, ==,
+ UFS_QUERY_RESULT_INVALID_SELECTOR);
+
+ ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC,
+ UFS_QUERY_DESC_IDN_STRING, 0, 1, 0, &rsp_upiu);
+ g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR);
+ g_assert_cmpuint(rsp_upiu.header.response, ==,
+ UFS_QUERY_RESULT_INVALID_SELECTOR);
+
+ ufs_exit(ufs, alloc);
+}
+
static void drive_destroy(void *path)
{
unlink(path);
@@ -575,12 +1098,16 @@ static void ufs_register_nodes(void)
QOSGraphEdgeOptions edge_opts = {
.before_cmd_line = "-blockdev null-co,node-name=drv0,read-zeroes=on",
.after_cmd_line = "-device ufs-lu,bus=ufs0,drive=drv0,lun=0",
- .extra_device_opts = "addr=04.0,id=ufs0,nutrs=32,nutmrs=8"
+ .extra_device_opts = "addr=04.0,id=ufs0"
};
- QOSGraphTestOptions io_test_opts = {
- .before = ufs_blk_test_setup,
- };
+ QOSGraphTestOptions io_test_opts = { .before = ufs_blk_test_setup,
+ .edge.extra_device_opts =
+ "mcq=false,nutrs=32,nutmrs=8" };
+
+ QOSGraphTestOptions mcq_test_opts = { .before = ufs_blk_test_setup,
+ .edge.extra_device_opts =
+ "mcq=true,mcq-maxq=1" };
add_qpci_address(&edge_opts, &(QPCIAddress){ .devfn = QPCI_DEVFN(4, 0) });
@@ -600,7 +1127,14 @@ static void ufs_register_nodes(void)
return;
}
qos_add_test("init", "ufs", ufstest_init, NULL);
- qos_add_test("read-write", "ufs", ufstest_read_write, &io_test_opts);
+ qos_add_test("legacy-read-write", "ufs", ufstest_read_write, &io_test_opts);
+ qos_add_test("mcq-read-write", "ufs", ufstest_read_write, &mcq_test_opts);
+ qos_add_test("query-flag", "ufs", ufstest_query_flag_request,
+ &io_test_opts);
+ qos_add_test("query-attribute", "ufs", ufstest_query_attr_request,
+ &io_test_opts);
+ qos_add_test("query-desciptor", "ufs", ufstest_query_desc_request,
+ &io_test_opts);
}
libqos_init(ufs_register_nodes);
diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c
index d607500..75cb3e4 100644
--- a/tests/qtest/vhost-user-test.c
+++ b/tests/qtest/vhost-user-test.c
@@ -12,7 +12,7 @@
#include "libqtest-single.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/config-file.h"
#include "qemu/option.h"
#include "qemu/range.h"
@@ -20,7 +20,7 @@
#include "chardev/char-fe.h"
#include "qemu/memfd.h"
#include "qemu/module.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "libqos/libqos.h"
#include "libqos/pci-pc.h"
#include "libqos/virtio-pci.h"
@@ -920,7 +920,7 @@ static void wait_for_rings_started(TestServer *s, size_t count)
static inline void test_server_connect(TestServer *server)
{
- test_server_create_chr(server, ",reconnect=1");
+ test_server_create_chr(server, ",reconnect-ms=1000");
}
static gboolean
@@ -1043,7 +1043,8 @@ static void test_multiqueue(void *obj, void *arg, QGuestAllocator *alloc)
static uint64_t vu_net_get_features(TestServer *s)
{
- uint64_t features = 0x1ULL << VHOST_F_LOG_ALL |
+ uint64_t features = 0x1ULL << VIRTIO_F_VERSION_1 |
+ 0x1ULL << VHOST_F_LOG_ALL |
0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
if (s->queues > 1) {
diff --git a/tests/qtest/virtio-9p-test.c b/tests/qtest/virtio-9p-test.c
index 3c8cd23..ac38ccf 100644
--- a/tests/qtest/virtio-9p-test.c
+++ b/tests/qtest/virtio-9p-test.c
@@ -20,6 +20,7 @@
#define tversion(...) v9fs_tversion((TVersionOpt) __VA_ARGS__)
#define tattach(...) v9fs_tattach((TAttachOpt) __VA_ARGS__)
#define tgetattr(...) v9fs_tgetattr((TGetAttrOpt) __VA_ARGS__)
+#define tsetattr(...) v9fs_tsetattr((TSetAttrOpt) __VA_ARGS__)
#define treaddir(...) v9fs_treaddir((TReadDirOpt) __VA_ARGS__)
#define tlopen(...) v9fs_tlopen((TLOpenOpt) __VA_ARGS__)
#define twrite(...) v9fs_twrite((TWriteOpt) __VA_ARGS__)
@@ -693,6 +694,64 @@ static void fs_unlinkat_hardlink(void *obj, void *data,
g_assert(stat(real_file, &st_real) == 0);
}
+static void fs_use_after_unlink(void *obj, void *data,
+ QGuestAllocator *t_alloc)
+{
+ QVirtio9P *v9p = obj;
+ v9fs_set_allocator(t_alloc);
+ static const uint32_t write_count = P9_MAX_SIZE / 2;
+ g_autofree char *real_file = virtio_9p_test_path("09/doa_file");
+ g_autofree char *buf = g_malloc0(write_count);
+ struct stat st_file;
+ struct v9fs_attr attr;
+ uint32_t fid_file;
+ uint32_t count;
+
+ tattach({ .client = v9p });
+
+ /* create a file "09/doa_file" and make sure it exists and is regular */
+ tmkdir({ .client = v9p, .atPath = "/", .name = "09" });
+ tlcreate({ .client = v9p, .atPath = "09", .name = "doa_file" });
+ g_assert(stat(real_file, &st_file) == 0);
+ g_assert((st_file.st_mode & S_IFMT) == S_IFREG);
+
+ /* request a FID for that regular file that we can work with next */
+ fid_file = twalk({
+ .client = v9p, .fid = 0, .path = "09/doa_file"
+ }).newfid;
+ g_assert(fid_file != 0);
+
+ /* now first open the file in write mode before ... */
+ tlopen({ .client = v9p, .fid = fid_file, .flags = O_WRONLY });
+ /* ... removing the file from file system */
+ tunlinkat({ .client = v9p, .atPath = "09", .name = "doa_file" });
+
+ /* file is removed, but we still have it open, so this should succeed */
+ tgetattr({
+ .client = v9p, .fid = fid_file, .request_mask = P9_GETATTR_BASIC,
+ .rgetattr.attr = &attr
+ });
+ count = twrite({
+ .client = v9p, .fid = fid_file, .offset = 0, .count = write_count,
+ .data = buf
+ }).count;
+ g_assert_cmpint(count, ==, write_count);
+
+ /* truncate file to (arbitrarily chosen) size 2001 */
+ tsetattr({
+ .client = v9p, .fid = fid_file, .attr = (v9fs_attr) {
+ .valid = P9_SETATTR_SIZE,
+ .size = 2001
+ }
+ });
+ /* truncate apparently succeeded, let's double-check the size */
+ tgetattr({
+ .client = v9p, .fid = fid_file, .request_mask = P9_GETATTR_BASIC,
+ .rgetattr.attr = &attr
+ });
+ g_assert_cmpint(attr.size, ==, 2001);
+}
+
static void cleanup_9p_local_driver(void *data)
{
/* remove previously created test dir when test is completed */
@@ -758,6 +817,8 @@ static void register_virtio_9p_test(void)
qos_add_test("local/hardlink_file", "virtio-9p", fs_hardlink_file, &opts);
qos_add_test("local/unlinkat_hardlink", "virtio-9p", fs_unlinkat_hardlink,
&opts);
+ qos_add_test("local/use_after_unlink", "virtio-9p", fs_use_after_unlink,
+ &opts);
}
libqos_init(register_virtio_9p_test);
diff --git a/tests/qtest/virtio-balloon-test.c b/tests/qtest/virtio-balloon-test.c
new file mode 100644
index 0000000..ecdd363
--- /dev/null
+++ b/tests/qtest/virtio-balloon-test.c
@@ -0,0 +1,57 @@
+/*
+ * QTest test cases for virtio balloon device
+ *
+ * Copyright (c) 2024 Gao Shiyuan <gaoshiyuan@baidu.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest.h"
+#include "standard-headers/linux/virtio_balloon.h"
+
+/*
+ * https://gitlab.com/qemu-project/qemu/-/issues/2576
+ * Used to trigger:
+ * virtio_address_space_lookup: Assertion `mrs.mr' failed.
+ */
+static void oss_fuzz_71649(void)
+{
+ QTestState *s = qtest_init("-device virtio-balloon -machine q35"
+ " -nodefaults");
+
+ qtest_outl(s, 0xcf8, 0x80000890);
+ qtest_outl(s, 0xcfc, 0x2);
+ qtest_outl(s, 0xcf8, 0x80000891);
+ qtest_inl(s, 0xcfc);
+ qtest_quit(s);
+}
+
+static void query_stats(void)
+{
+ QTestState *s = qtest_init("-device virtio-balloon,id=balloon"
+ " -nodefaults");
+ QDict *ret = qtest_qmp_assert_success_ref(
+ s,
+ "{ 'execute': 'qom-get', 'arguments': " \
+ "{ 'path': '/machine/peripheral/balloon', " \
+ " 'property': 'guest-stats' } }");
+ QDict *stats = qdict_get_qdict(ret, "stats");
+
+ /* We expect 1 entry in the dict for each known kernel stat */
+ assert(qdict_size(stats) == VIRTIO_BALLOON_S_NR);
+
+ qobject_unref(ret);
+ qtest_quit(s);
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+
+ qtest_add_func("virtio-balloon/oss_fuzz_71649", oss_fuzz_71649);
+ qtest_add_func("virtio-balloon/query-stats", query_stats);
+
+ return g_test_run();
+}
+
diff --git a/tests/qtest/virtio-iommu-test.c b/tests/qtest/virtio-iommu-test.c
index afb2259..98ffa27 100644
--- a/tests/qtest/virtio-iommu-test.c
+++ b/tests/qtest/virtio-iommu-test.c
@@ -105,7 +105,7 @@ static int send_map(QTestState *qts, QVirtioIOMMU *v_iommu,
QVirtQueue *vq = v_iommu->vq;
uint64_t ro_addr, wr_addr;
uint32_t free_head;
- struct virtio_iommu_req_map req;
+ struct virtio_iommu_req_map req = {};
size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail);
size_t wr_size = sizeof(struct virtio_iommu_req_tail);
struct virtio_iommu_req_tail buffer;
@@ -147,7 +147,7 @@ static int send_unmap(QTestState *qts, QVirtioIOMMU *v_iommu,
QVirtQueue *vq = v_iommu->vq;
uint64_t ro_addr, wr_addr;
uint32_t free_head;
- struct virtio_iommu_req_unmap req;
+ struct virtio_iommu_req_unmap req = {};
size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail);
size_t wr_size = sizeof(struct virtio_iommu_req_tail);
struct virtio_iommu_req_tail buffer;
diff --git a/tests/qtest/virtio-net-failover.c b/tests/qtest/virtio-net-failover.c
index 73dfabc..5baf81c 100644
--- a/tests/qtest/virtio-net-failover.c
+++ b/tests/qtest/virtio-net-failover.c
@@ -11,10 +11,11 @@
#include "libqtest.h"
#include "libqos/pci.h"
#include "libqos/pci-pc.h"
-#include "migration-helpers.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qjson.h"
+#include "migration/migration-qmp.h"
+#include "migration/migration-util.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
+#include "qobject/qjson.h"
#include "libqos/malloc-pc.h"
#include "libqos/virtio-pci.h"
#include "hw/pci/pci.h"
@@ -772,7 +773,7 @@ static void test_migrate_in(gconstpointer opaque)
check_one_card(qts, true, "standby0", MAC_STANDBY0);
check_one_card(qts, false, "primary0", MAC_PRIMARY0);
- migrate_incoming_qmp(qts, uri, "{}");
+ migrate_incoming_qmp(qts, uri, NULL, "{}");
resp = get_failover_negociated_event(qts);
g_assert_cmpstr(qdict_get_str(resp, "device-id"), ==, "standby0");
@@ -894,7 +895,7 @@ static void test_off_migrate_in(gconstpointer opaque)
check_one_card(qts, true, "standby0", MAC_STANDBY0);
check_one_card(qts, true, "primary0", MAC_PRIMARY0);
- migrate_incoming_qmp(qts, uri, "{}");
+ migrate_incoming_qmp(qts, uri, NULL, "{}");
check_one_card(qts, true, "standby0", MAC_STANDBY0);
check_one_card(qts, true, "primary0", MAC_PRIMARY0);
@@ -1021,7 +1022,7 @@ static void test_guest_off_migrate_in(gconstpointer opaque)
check_one_card(qts, true, "standby0", MAC_STANDBY0);
check_one_card(qts, false, "primary0", MAC_PRIMARY0);
- migrate_incoming_qmp(qts, uri, "{}");
+ migrate_incoming_qmp(qts, uri, NULL, "{}");
check_one_card(qts, true, "standby0", MAC_STANDBY0);
check_one_card(qts, false, "primary0", MAC_PRIMARY0);
@@ -1746,7 +1747,7 @@ static void test_multi_in(gconstpointer opaque)
check_one_card(qts, true, "standby1", MAC_STANDBY1);
check_one_card(qts, false, "primary1", MAC_PRIMARY1);
- migrate_incoming_qmp(qts, uri, "{}");
+ migrate_incoming_qmp(qts, uri, NULL, "{}");
resp = get_failover_negociated_event(qts);
g_assert_cmpstr(qdict_get_str(resp, "device-id"), ==, "standby0");
diff --git a/tests/qtest/virtio-net-test.c b/tests/qtest/virtio-net-test.c
index 2df75c9..60e5229 100644
--- a/tests/qtest/virtio-net-test.c
+++ b/tests/qtest/virtio-net-test.c
@@ -11,7 +11,7 @@
#include "libqtest-single.h"
#include "qemu/iov.h"
#include "qemu/module.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "hw/virtio/virtio-net.h"
#include "libqos/qgraph.h"
#include "libqos/virtio-net.h"
diff --git a/tests/qtest/vmcoreinfo-test.c b/tests/qtest/vmcoreinfo-test.c
new file mode 100644
index 0000000..dcf3b5a
--- /dev/null
+++ b/tests/qtest/vmcoreinfo-test.c
@@ -0,0 +1,90 @@
+/*
+ * qtest vmcoreinfo test case
+ *
+ * Copyright Red Hat. 2025.
+ *
+ * Authors:
+ * Ani Sinha <anisinha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "libqos/libqos-pc.h"
+#include "libqtest.h"
+#include "standard-headers/linux/qemu_fw_cfg.h"
+#include "libqos/fw_cfg.h"
+#include "qemu/bswap.h"
+#include "hw/misc/vmcoreinfo.h"
+
+static void test_vmcoreinfo_write_basic(void)
+{
+ QFWCFG *fw_cfg;
+ QOSState *qs;
+ FWCfgVMCoreInfo info;
+ size_t filesize;
+ uint16_t guest_format;
+ uint16_t host_format;
+ uint32_t size;
+ uint64_t paddr;
+
+ qs = qtest_pc_boot("-device vmcoreinfo");
+ fw_cfg = pc_fw_cfg_init(qs->qts);
+
+ memset(&info, 0 , sizeof(info));
+ /* read vmcoreinfo and read back the host format */
+ filesize = qfw_cfg_read_file(fw_cfg, qs, FW_CFG_VMCOREINFO_FILENAME,
+ &info, sizeof(info));
+ g_assert_cmpint(filesize, ==, sizeof(info));
+
+ host_format = le16_to_cpu(info.host_format);
+ g_assert_cmpint(host_format, ==, FW_CFG_VMCOREINFO_FORMAT_ELF);
+
+ memset(&info, 0 , sizeof(info));
+ info.guest_format = cpu_to_le16(FW_CFG_VMCOREINFO_FORMAT_ELF);
+ info.size = cpu_to_le32(1 * MiB);
+ info.paddr = cpu_to_le64(0xffffff00);
+ info.host_format = cpu_to_le16(host_format);
+
+ /* write the values to the host */
+ filesize = qfw_cfg_write_file(fw_cfg, qs, FW_CFG_VMCOREINFO_FILENAME,
+ &info, sizeof(info));
+ g_assert_cmpint(filesize, ==, sizeof(info));
+
+ memset(&info, 0 , sizeof(info));
+
+ /* now read back the values we wrote and compare that they are the same */
+ filesize = qfw_cfg_read_file(fw_cfg, qs, FW_CFG_VMCOREINFO_FILENAME,
+ &info, sizeof(info));
+ g_assert_cmpint(filesize, ==, sizeof(info));
+
+ size = le32_to_cpu(info.size);
+ paddr = le64_to_cpu(info.paddr);
+ guest_format = le16_to_cpu(info.guest_format);
+
+ g_assert_cmpint(size, ==, 1 * MiB);
+ g_assert_cmpint(paddr, ==, 0xffffff00);
+ g_assert_cmpint(guest_format, ==, FW_CFG_VMCOREINFO_FORMAT_ELF);
+
+ pc_fw_cfg_uninit(fw_cfg);
+ qtest_shutdown(qs);
+}
+
+int main(int argc, char **argv)
+{
+ const char *arch = qtest_get_arch();
+
+ g_test_init(&argc, &argv, NULL);
+
+ if (strcmp(arch, "i386") && strcmp(arch, "x86_64")) {
+ /* skip for non-x86 */
+ exit(EXIT_SUCCESS);
+ }
+
+ qtest_add_func("vmcoreinfo/basic-write",
+ test_vmcoreinfo_write_basic);
+
+ return g_test_run();
+}
diff --git a/tests/qtest/vmgenid-test.c b/tests/qtest/vmgenid-test.c
index 29fee9e..e613374 100644
--- a/tests/qtest/vmgenid-test.c
+++ b/tests/qtest/vmgenid-test.c
@@ -15,7 +15,7 @@
#include "boot-sector.h"
#include "acpi-utils.h"
#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#define VGID_GUID "324e6eaf-d1d1-4bf6-bf41-b9bb6c91fb87"
#define VMGENID_GUID_OFFSET 40 /* allow space for
diff --git a/tests/qtest/wdt_ib700-test.c b/tests/qtest/wdt_ib700-test.c
index 797288d..1754757 100644
--- a/tests/qtest/wdt_ib700-test.c
+++ b/tests/qtest/wdt_ib700-test.c
@@ -9,7 +9,7 @@
#include "qemu/osdep.h"
#include "libqtest.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/timer.h"
static void qmp_check_no_event(QTestState *s)
diff --git a/tests/tcg/Makefile.target b/tests/tcg/Makefile.target
index cb8cfeb..95ff76e 100644
--- a/tests/tcg/Makefile.target
+++ b/tests/tcg/Makefile.target
@@ -90,6 +90,7 @@ CFLAGS=
LDFLAGS=
QEMU_OPTS=
+CHECK_PLUGIN_OUTPUT_COMMAND=
# If TCG debugging, or TCI is enabled things are a lot slower
@@ -102,9 +103,14 @@ ifeq ($(filter %-softmmu, $(TARGET)),)
# then the target. If there are common tests shared between
# sub-targets (e.g. ARM & AArch64) then it is up to
# $(TARGET_NAME)/Makefile.target to include the common parent
-# architecture in its VPATH.
+# architecture in its VPATH. However some targets are so minimal we
+# can't even build the multiarch tests.
+ifneq ($(filter $(TARGET_NAME),aarch64_be),)
+-include $(SRC_PATH)/tests/tcg/$(TARGET_NAME)/Makefile.target
+else
-include $(SRC_PATH)/tests/tcg/multiarch/Makefile.target
-include $(SRC_PATH)/tests/tcg/$(TARGET_NAME)/Makefile.target
+endif
# Add the common build options
CFLAGS+=-Wall -Werror -O0 -g -fno-strict-aliasing
@@ -115,7 +121,7 @@ endif
%: %.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
%: %.S
- $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
+ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -Wa,--noexecstack $< -o $@ $(LDFLAGS)
else
# For system targets we include a different Makefile fragment as the
# build options for bare programs are usually pretty different. They
@@ -142,8 +148,8 @@ RUN_TESTS=$(patsubst %,run-%, $(TESTS))
# If plugins exist also include those in the tests
ifeq ($(CONFIG_PLUGIN),y)
-PLUGIN_SRC=$(SRC_PATH)/tests/plugin
-PLUGIN_LIB=../../plugin
+PLUGIN_SRC=$(SRC_PATH)/tests/tcg/plugins
+PLUGIN_LIB=../plugins
VPATH+=$(PLUGIN_LIB)
PLUGINS=$(patsubst %.c, lib%.so, $(notdir $(wildcard $(PLUGIN_SRC)/*.c)))
@@ -152,10 +158,11 @@ PLUGINS=$(patsubst %.c, lib%.so, $(notdir $(wildcard $(PLUGIN_SRC)/*.c)))
# only expand MULTIARCH_TESTS which are common on most of our targets
# to avoid an exponential explosion as new tests are added. We also
# add some special helpers the run-plugin- rules can use below.
+# In more, extra tests can be added using ADDITIONAL_PLUGINS_TESTS variable.
ifneq ($(MULTIARCH_TESTS),)
$(foreach p,$(PLUGINS), \
- $(foreach t,$(MULTIARCH_TESTS),\
+ $(foreach t,$(MULTIARCH_TESTS) $(ADDITIONAL_PLUGINS_TESTS),\
$(eval run-plugin-$(t)-with-$(p): $t $p) \
$(eval RUN_TESTS+=run-plugin-$(t)-with-$(p))))
endif # MULTIARCH_TESTS
@@ -172,13 +179,17 @@ run-plugin-%-with-libmem.so: PLUGIN_ARGS=$(COMMA)inline=true
ifeq ($(filter %-softmmu, $(TARGET)),)
run-%: %
- $(call run-test, $<, $(QEMU) $(QEMU_OPTS) $<)
+ $(call run-test, $<, env QEMU=$(QEMU) $(QEMU) $(QEMU_OPTS) $<)
run-plugin-%:
- $(call run-test, $@, $(QEMU) $(QEMU_OPTS) \
+ $(call run-test, $@, env QEMU=$(QEMU) $(QEMU) $(QEMU_OPTS) \
-plugin $(PLUGIN_LIB)/$(call extract-plugin,$@)$(PLUGIN_ARGS) \
-d plugin -D $*.pout \
$(call strip-plugin,$<))
+ $(if $(CHECK_PLUGIN_OUTPUT_COMMAND), \
+ $(call quiet-command, $(CHECK_PLUGIN_OUTPUT_COMMAND) $*.pout, \
+ TEST, check plugin $(call extract-plugin,$@) output \
+ with $(call strip-plugin,$<)))
else
run-%: %
$(call run-test, $<, \
@@ -193,6 +204,10 @@ run-plugin-%:
-plugin $(PLUGIN_LIB)/$(call extract-plugin,$@)$(PLUGIN_ARGS) \
-d plugin -D $*.pout \
$(QEMU_OPTS) $(call strip-plugin,$<))
+ $(if $(CHECK_PLUGIN_OUTPUT_COMMAND), \
+ $(call quiet-command, $(CHECK_PLUGIN_OUTPUT_COMMAND) $*.pout, \
+ TEST, check plugin $(call extract-plugin,$@) output \
+ with $(call strip-plugin,$<)))
endif
gdb-%: %
diff --git a/tests/tcg/README b/tests/tcg/README
index 706bb18..6d08ca5 100644
--- a/tests/tcg/README
+++ b/tests/tcg/README
@@ -1,9 +1,14 @@
-This directory contains various interesting guest programs for
-regression testing. Tests are either multi-arch, meaning they can be
-built for all guest architectures that support linux-user executable,
-or they are architecture specific.
-
-CRIS
-====
-The testsuite for CRIS is in tests/tcg/cris. You can run it
-with "make test-cris".
+This directory contains various interesting guest binaries for
+regression testing the Tiny Code Generator doing system and user-mode
+emulation.
+
+The multiarch directory contains shared code for tests that can be
+built for all guest architectures. Architecture specific code can be
+found in their respective directories.
+
+System mode tests will be under the "system" subdirectories.
+
+GDB scripts for exercising the gdbstub on specific tests will be found
+under the "gdbstb" subdirectories.
+
+See the developer guide for more instructions on "make check-tcg"
diff --git a/tests/tcg/aarch64/Makefile.softmmu-target b/tests/tcg/aarch64/Makefile.softmmu-target
index dd6d595..f7a7d2b 100644
--- a/tests/tcg/aarch64/Makefile.softmmu-target
+++ b/tests/tcg/aarch64/Makefile.softmmu-target
@@ -2,14 +2,22 @@
# Aarch64 system tests
#
-AARCH64_SYSTEM_SRC=$(SRC_PATH)/tests/tcg/aarch64/system
+AARCH64_SRC=$(SRC_PATH)/tests/tcg/aarch64
+AARCH64_SYSTEM_SRC=$(AARCH64_SRC)/system
+
VPATH+=$(AARCH64_SYSTEM_SRC)
# These objects provide the basic boot code and helper functions for all tests
CRT_OBJS=boot.o
-AARCH64_TEST_SRCS=$(wildcard $(AARCH64_SYSTEM_SRC)/*.c)
-AARCH64_TESTS = $(patsubst $(AARCH64_SYSTEM_SRC)/%.c, %, $(AARCH64_TEST_SRCS))
+AARCH64_TEST_C_SRCS=$(wildcard $(AARCH64_SYSTEM_SRC)/*.c)
+AARCH64_TEST_S_SRCS=$(AARCH64_SYSTEM_SRC)/mte.S
+
+AARCH64_C_TESTS = $(patsubst $(AARCH64_SYSTEM_SRC)/%.c, %, $(AARCH64_TEST_C_SRCS))
+AARCH64_S_TESTS = $(patsubst $(AARCH64_SYSTEM_SRC)/%.S, %, $(AARCH64_TEST_S_SRCS))
+
+AARCH64_TESTS = $(AARCH64_C_TESTS)
+AARCH64_TESTS += $(AARCH64_S_TESTS)
CRT_PATH=$(AARCH64_SYSTEM_SRC)
LINK_SCRIPT=$(AARCH64_SYSTEM_SRC)/kernel.ld
@@ -21,14 +29,15 @@ LDFLAGS+=-static -nostdlib $(CRT_OBJS) $(MINILIB_OBJS) -lgcc
config-cc.mak: Makefile
$(quiet-@)( \
- $(call cc-option,-march=armv8.3-a, CROSS_CC_HAS_ARMV8_3)) 3> config-cc.mak
+ $(call cc-option,-march=armv8.3-a, CROSS_CC_HAS_ARMV8_3); \
+ $(call cc-option,-march=armv8.5-a+memtag, CROSS_CC_HAS_ARMV8_MTE)) 3> config-cc.mak
-include config-cc.mak
# building head blobs
.PRECIOUS: $(CRT_OBJS)
%.o: $(CRT_PATH)/%.S
- $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -x assembler-with-cpp -c $< -o $@
+ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -x assembler-with-cpp -Wa,--noexecstack -c $< -o $@
# Build and link the tests
%: %.c $(LINK_SCRIPT) $(CRT_OBJS) $(MINILIB_OBJS)
@@ -59,7 +68,8 @@ run-plugin-semiconsole-with-%: semiconsole
# vtimer test needs EL2
QEMU_EL2_MACHINE=-machine virt,virtualization=on,gic-version=2 -cpu cortex-a57 -smp 4
-run-vtimer: QEMU_OPTS=$(QEMU_EL2_MACHINE) $(QEMU_BASE_ARGS) -kernel
+QEMU_EL2_BASE_ARGS=-semihosting-config enable=on,target=native,chardev=output,arg="2"
+run-vtimer: QEMU_OPTS=$(QEMU_EL2_MACHINE) $(QEMU_EL2_BASE_ARGS) -kernel
# Simple Record/Replay Test
.PHONY: memory-record
@@ -82,9 +92,44 @@ EXTRA_RUNS+=run-memory-replay
ifneq ($(CROSS_CC_HAS_ARMV8_3),)
pauth-3: CFLAGS += $(CROSS_CC_HAS_ARMV8_3)
+# This test explicitly checks the output of the pauth operation so we
+# must force the use of the QARMA5 algorithm for it.
+run-pauth-3: QEMU_BASE_MACHINE=-M virt -cpu max,pauth-qarma5=on -display none
else
pauth-3:
$(call skip-test, "BUILD of $@", "missing compiler support")
run-pauth-3:
$(call skip-test, "RUN of pauth-3", "not built")
endif
+
+ifneq ($(CROSS_CC_HAS_ARMV8_MTE),)
+QEMU_MTE_ENABLED_MACHINE=-M virt,mte=on -cpu max -display none
+QEMU_OPTS_WITH_MTE_ON = $(QEMU_MTE_ENABLED_MACHINE) $(QEMU_BASE_ARGS) -kernel
+mte: CFLAGS+=-march=armv8.5-a+memtag
+mte: mte.S $(LINK_SCRIPT) $(CRT_OBJS) $(MINILIB_OBJS)
+ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
+
+run-mte: QEMU_OPTS=$(QEMU_OPTS_WITH_MTE_ON)
+run-mte: mte
+
+ifeq ($(GDB_SUPPORTS_MTE_IN_BAREMETAL),y)
+run-gdbstub-mte: QEMU_OPTS=$(QEMU_OPTS_WITH_MTE_ON)
+run-gdbstub-mte: mte
+ $(call run-test, $@, $(GDB_SCRIPT) \
+ --output run-gdbstub-mte.out \
+ --gdb $(GDB) \
+ --qemu $(QEMU) --qargs "-chardev null$(COMMA)id=output $(QEMU_OPTS)" \
+ --bin $< --test $(AARCH64_SRC)/gdbstub/test-mte.py -- --mode=system, \
+ gdbstub MTE support)
+
+EXTRA_RUNS += run-gdbstub-mte
+else # !GDB_SUPPORTS_MTE_IN_BAREMETAL
+run-gdbstub-mte:
+ $(call skip-test "RUN of gdbstub-mte", "GDB does not support MTE in baremetal!")
+endif
+else # !CROSS_CC_HAS_ARMV8_MTE
+mte:
+ $(call skip-test, "BUILD of $@", "missing compiler support")
+run-mte:
+ $(call skip-test, "RUN of mte", "not build")
+endif
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
index 8cc62eb..16ddcf4 100644
--- a/tests/tcg/aarch64/Makefile.target
+++ b/tests/tcg/aarch64/Makefile.target
@@ -83,7 +83,8 @@ test-aes: CFLAGS += -O -march=armv8-a+aes
test-aes: test-aes-main.c.inc
# Vector SHA1
-sha1-vector: CFLAGS=-O3
+# Work around compiler false-positive warning, as we do for the 'sha1' test
+sha1-vector: CFLAGS=-O3 -Wno-stringop-overread
sha1-vector: sha1.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
run-sha1-vector: sha1-vector run-sha1
@@ -138,7 +139,8 @@ run-gdbstub-mte: mte-8
$(call run-test, $@, $(GDB_SCRIPT) \
--gdb $(GDB) \
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
- --bin $< --test $(AARCH64_SRC)/gdbstub/test-mte.py, \
+ --bin $< --test $(AARCH64_SRC)/gdbstub/test-mte.py \
+ -- --mode=user, \
gdbstub MTE support)
EXTRA_RUNS += run-gdbstub-mte
diff --git a/tests/tcg/aarch64/gdbstub/test-mte.py b/tests/tcg/aarch64/gdbstub/test-mte.py
index 2db0663..9ad98e7 100644
--- a/tests/tcg/aarch64/gdbstub/test-mte.py
+++ b/tests/tcg/aarch64/gdbstub/test-mte.py
@@ -1,34 +1,59 @@
from __future__ import print_function
#
# Test GDB memory-tag commands that exercise the stubs for the qIsAddressTagged,
-# qMemTag, and QMemTag packets. Logical tag-only commands rely on local
-# operations, hence don't exercise any stub.
+# qMemTag, and QMemTag packets, which are used for manipulating allocation tags.
+# Logical tags-related commands rely on local operations, hence don't exercise
+# any stub and so are not used in this test.
#
-# The test consists in breaking just after a atag() call (which sets the
-# allocation tag -- see mte-8.c for details) and setting/getting tags in
-# different memory locations and ranges starting at the address of the array
-# 'a'.
+# The test consists in breaking just after a tag is set in a specific memory
+# chunk, and then using the GDB 'memory-tagging' subcommands to set/get tags in
+# different memory locations and ranges in the MTE-enabled memory chunk.
#
# This is launched via tests/guest-debug/run-test.py
#
-import gdb
+try:
+ import gdb
+except ModuleNotFoundError:
+ from sys import exit
+ exit("This script must be launched via tests/guest-debug/run-test.py!")
import re
-from test_gdbstub import main, report
+from sys import argv
+from test_gdbstub import arg_parser, main, report
-PATTERN_0 = "Memory tags for address 0x[0-9a-f]+ match \(0x[0-9a-f]+\)."
-PATTERN_1 = ".*(0x[0-9a-f]+)"
+PATTERN_0 = r"Memory tags for address 0x[0-9a-f]+ match \(0x[0-9a-f]+\)."
+PATTERN_1 = r".*(0x[0-9a-f]+)"
def run_test():
- gdb.execute("break 95", False, True)
+ p = arg_parser(prog="test-mte.py", description="TCG MTE tests.")
+ p.add_argument("--mode", help="Run test for QEMU system or user mode.",
+ required=True, choices=['system','user'])
+
+ args = p.parse_args(args=argv)
+
+ if args.mode == "system":
+ # Break address: where to break before performing the tests
+ # See mte.S for details about this label.
+ ba = "main_end"
+ # Tagged address: the start of the MTE-enabled memory chunk to be tested
+ # 'tagged_addr' (x1) is a pointer to the MTE-enabled page. See mte.S.
+ ta = "$x1"
+ else: # mode="user"
+ # Line 95 in mte-8.c
+ ba = "95"
+ # 'a' array. See mte-8.c
+ ta = "a"
+
+ gdb.execute(f"break {ba}", False, True)
gdb.execute("continue", False, True)
+
try:
- # Test if we can check correctly that the allocation tag for
- # array 'a' matches the logical tag after atag() is called.
- co = gdb.execute("memory-tag check a", False, True)
+ # Test if we can check correctly that the allocation tag for the address
+ # in {ta} matches the logical tag in {ta}.
+ co = gdb.execute(f"memory-tag check {ta}", False, True)
tags_match = re.findall(PATTERN_0, co, re.MULTILINE)
if tags_match:
report(True, f"{tags_match[0]}")
@@ -39,20 +64,20 @@ def run_test():
# tags rely on local operation and so don't exercise any stub.
# Set the allocation tag for the first granule (16 bytes) of
- # address starting at 'a' address to a known value, i.e. 0x04.
- gdb.execute("memory-tag set-allocation-tag a 1 04", False, True)
+ # address starting at {ta} address to a known value, i.e. 0x04.
+ gdb.execute(f"memory-tag set-allocation-tag {ta} 1 04", False, True)
# Then set the allocation tag for the second granule to a known
# value, i.e. 0x06. This tests that contiguous tag granules are
- # set correct and don't run over each other.
- gdb.execute("memory-tag set-allocation-tag a+16 1 06", False, True)
+ # set correctly and don't run over each other.
+ gdb.execute(f"memory-tag set-allocation-tag {ta}+16 1 06", False, True)
# Read the known values back and check if they remain the same.
- co = gdb.execute("memory-tag print-allocation-tag a", False, True)
+ co = gdb.execute(f"memory-tag print-allocation-tag {ta}", False, True)
first_tag = re.match(PATTERN_1, co)[1]
- co = gdb.execute("memory-tag print-allocation-tag a+16", False, True)
+ co = gdb.execute(f"memory-tag print-allocation-tag {ta}+16", False, True)
second_tag = re.match(PATTERN_1, co)[1]
if first_tag == "0x4" and second_tag == "0x6":
@@ -61,15 +86,15 @@ def run_test():
report(False, "Can't set/print allocation tags!")
# Now test fill pattern by setting a whole page with a pattern.
- gdb.execute("memory-tag set-allocation-tag a 4096 0a0b", False, True)
+ gdb.execute(f"memory-tag set-allocation-tag {ta} 4096 0a0b", False, True)
# And read back the tags of the last two granules in page so
# we also test if the pattern is set correctly up to the end of
# the page.
- co = gdb.execute("memory-tag print-allocation-tag a+4096-32", False, True)
+ co = gdb.execute(f"memory-tag print-allocation-tag {ta}+4096-32", False, True)
tag = re.match(PATTERN_1, co)[1]
- co = gdb.execute("memory-tag print-allocation-tag a+4096-16", False, True)
+ co = gdb.execute(f"memory-tag print-allocation-tag {ta}+4096-16", False, True)
last_tag = re.match(PATTERN_1, co)[1]
if tag == "0xa" and last_tag == "0xb":
@@ -78,8 +103,8 @@ def run_test():
report(False, "Fill pattern failed!")
except gdb.error:
- # This usually happens because a GDB version that does not
- # support memory tagging was used to run the test.
+ # This usually happens because a GDB version that does not support
+ # memory tagging was used to run the test.
report(False, "'memory-tag' command failed!")
diff --git a/tests/tcg/aarch64/system/boot.S b/tests/tcg/aarch64/system/boot.S
index 501685d..8bfa4e4 100644
--- a/tests/tcg/aarch64/system/boot.S
+++ b/tests/tcg/aarch64/system/boot.S
@@ -16,6 +16,7 @@
#define semihosting_call hlt 0xf000
#define SYS_WRITEC 0x03 /* character to debug channel */
#define SYS_WRITE0 0x04 /* string to debug channel */
+#define SYS_GET_CMDLINE 0x15 /* get command line */
#define SYS_EXIT 0x18
.align 12
@@ -70,22 +71,172 @@ lower_a32_sync:
lower_a32_irq:
lower_a32_fiq:
lower_a32_serror:
+ adr x1, .unexp_excp
+exit_msg:
mov x0, SYS_WRITE0
- adr x1, .error
- semihosting_call
- mov x0, SYS_EXIT
- mov x1, 1
semihosting_call
+ mov x0, 1 /* EXIT_FAILURE */
+ bl _exit
/* never returns */
.section .rodata
-.error:
- .string "Terminated by exception.\n"
+.unexp_excp:
+ .string "Unexpected exception.\n"
+.high_el_msg:
+ .string "Started in lower EL than requested.\n"
+.unexp_el0:
+ .string "Started in invalid EL.\n"
+
+ .align 8
+.get_cmd:
+ .quad cmdline
+ .quad 128
.text
.align 4
.global __start
__start:
+ /*
+ * Initialise the stack for whatever EL we are in before
+ * anything else, we need it to be able to _exit cleanly.
+ * It's smaller than the stack we pass to the C code but we
+ * don't need much.
+ */
+ adrp x0, system_stack_end
+ add x0, x0, :lo12:system_stack_end
+ mov sp, x0
+
+ /*
+ * The test can set the semihosting command line to the target
+ * EL needed for the test. However if no semihosting args are set we will
+ * end up with -kernel/-append data (see semihosting_arg_fallback).
+ * Keep the normalised target in w11.
+ */
+ mov x0, SYS_GET_CMDLINE
+ adr x1, .get_cmd
+ semihosting_call
+ adrp x10, cmdline
+ add x10, x10, :lo12:cmdline
+ ldrb w11, [x10]
+
+ /* sanity check, normalise char to EL, clamp to 1 if outside range */
+ subs w11, w11, #'0'
+ b.lt el_default
+ cmp w11, #3
+ b.gt el_default
+ b 1f
+
+el_high:
+ adr x1, .high_el_msg
+ b exit_msg
+
+el_default:
+ mov w11, #1
+
+1:
+ /* Determine current Exception Level */
+ mrs x0, CurrentEL
+ lsr x0, x0, #2 /* CurrentEL[3:2] contains the current EL */
+
+ /* Are we already in a lower EL than we want? */
+ cmp w11, w0
+ bgt el_high
+
+ /* Branch based on current EL */
+ cmp x0, #3
+ b.eq setup_el3
+ cmp x0, #2
+ b.eq setup_el2
+ cmp x0, #1
+ b.eq at_testel /* Already at EL1, skip transition */
+
+ /* Should not be at EL0 - error out */
+ adr x1, .unexp_el0
+ b exit_msg
+
+setup_el3:
+ /* Ensure we trap if we get anything wrong */
+ adr x0, vector_table
+ msr vbar_el3, x0
+
+ /* Does the test want to be at EL3? */
+ cmp w11, #3
+ beq at_testel
+
+ /* Configure EL3 to for lower states (EL2 or EL1) */
+ mrs x0, scr_el3
+ orr x0, x0, #(1 << 10) /* RW = 1: EL2/EL1 execution state is AArch64 */
+ orr x0, x0, #(1 << 0) /* NS = 1: Non-secure state */
+ msr scr_el3, x0
+
+ /*
+ * We need to check if EL2 is actually enabled via ID_AA64PFR0_EL1,
+ * otherwise we should just jump straight to EL1.
+ */
+ mrs x0, id_aa64pfr0_el1
+ ubfx x0, x0, #8, #4 /* Extract EL2 field (bits 11:8) */
+ cbz x0, el2_not_present /* If field is 0 no EL2 */
+
+
+ /* Prepare SPSR for exception return to EL2 */
+ mov x0, #0x3c9 /* DAIF bits and EL2h mode (9) */
+ msr spsr_el3, x0
+
+ /* Set EL2 entry point */
+ adr x0, setup_el2
+ msr elr_el3, x0
+
+ /* Return to EL2 */
+ eret
+
+el2_not_present:
+ /* Initialize SCTLR_EL1 with reset value */
+ msr sctlr_el1, xzr
+
+ /* Set EL1 entry point */
+ adr x0, at_testel
+ msr elr_el3, x0
+
+ /* Prepare SPSR for exception return to EL1h with interrupts masked */
+ mov x0, #0x3c5 /* DAIF bits and EL1h mode (5) */
+ msr spsr_el3, x0
+
+ isb /* Synchronization barrier */
+ eret /* Jump to EL1 */
+
+setup_el2:
+ /* Ensure we trap if we get anything wrong */
+ adr x0, vector_table
+ msr vbar_el2, x0
+
+ /* Does the test want to be at EL2? */
+ cmp w11, #2
+ beq at_testel
+
+ /* Configure EL2 to allow transition to EL1 */
+ mrs x0, hcr_el2
+ orr x0, x0, #(1 << 31) /* RW = 1: EL1 execution state is AArch64 */
+ msr hcr_el2, x0
+
+ /* Initialize SCTLR_EL1 with reset value */
+ msr sctlr_el1, xzr
+
+ /* Set EL1 entry point */
+ adr x0, at_testel
+ msr elr_el2, x0
+
+ /* Prepare SPSR for exception return to EL1 */
+ mov x0, #(0x5 << 0) /* EL1h (SPx), with interrupts disabled */
+ msr spsr_el2, x0
+
+ /* Return to EL1 */
+ eret
+
+ /*
+ * At the target EL for the test, usually EL1. Note we still
+ * set everything up as if we were at EL1.
+ */
+at_testel:
/* Installs a table of exception vectors to catch and handle all
exceptions by terminating the process with a diagnostic. */
adr x0, vector_table
@@ -101,7 +252,7 @@ __start:
* maps RAM to the first Gb. The stage2 tables have two 2mb
* translation block entries covering a series of adjacent
* 4k pages.
- */
+ */
/* Stage 1 entry: indexed by IA[38:30] */
adr x1, . /* phys address */
@@ -135,6 +286,17 @@ __start:
orr x1, x1, x3
str x1, [x2] /* 2nd 2mb (.data & .bss)*/
+ /* Third block: at 'mte_page', set in kernel.ld */
+ adrp x1, mte_page
+ add x1, x1, :lo12:mte_page
+ bic x1, x1, #(1 << 21) - 1
+ and x4, x1, x5
+ add x2, x0, x4, lsr #(21 - 3)
+ /* attr(AF, NX, block, AttrIndx=Attr1) */
+ ldr x3, =(3 << 53) | 0x401 | (1 << 2)
+ orr x1, x1, x3
+ str x1, [x2]
+
/* Setup/enable the MMU. */
/*
@@ -188,7 +350,8 @@ __start:
orr x0, x0, #(3 << 16)
msr cpacr_el1, x0
- /* Setup some stack space and enter the test code.
+ /*
+ * Setup some stack space before we enter the test code.
* Assume everything except the return value is garbage when we
* return, we won't need it.
*/
@@ -223,6 +386,11 @@ __sys_outc:
ret
.data
+
+ .align 8
+cmdline:
+ .space 128, 0
+
.align 12
/* Translation table
@@ -236,6 +404,10 @@ ttb_stage2:
.space 4096, 0
.align 12
+system_stack:
+ .space 4096, 0
+system_stack_end:
+
stack:
.space 65536, 0
stack_end:
diff --git a/tests/tcg/aarch64/system/feat-xs.c b/tests/tcg/aarch64/system/feat-xs.c
new file mode 100644
index 0000000..f310fc8
--- /dev/null
+++ b/tests/tcg/aarch64/system/feat-xs.c
@@ -0,0 +1,27 @@
+/*
+ * FEAT_XS Test
+ *
+ * Copyright (c) 2024 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <minilib.h>
+#include <stdint.h>
+
+int main(void)
+{
+ uint64_t isar1;
+
+ asm volatile ("mrs %0, id_aa64isar1_el1" : "=r"(isar1));
+ if (((isar1 >> 56) & 0xf) < 1) {
+ ml_printf("FEAT_XS not supported by CPU");
+ return 1;
+ }
+ /* VMALLE1NXS */
+ asm volatile (".inst 0xd508971f");
+ /* VMALLE1OSNXS */
+ asm volatile (".inst 0xd508911f");
+
+ return 0;
+}
diff --git a/tests/tcg/aarch64/system/kernel.ld b/tests/tcg/aarch64/system/kernel.ld
index 7b3a76d..aef043e 100644
--- a/tests/tcg/aarch64/system/kernel.ld
+++ b/tests/tcg/aarch64/system/kernel.ld
@@ -1,23 +1,32 @@
ENTRY(__start)
-SECTIONS
-{
- /* virt machine, RAM starts at 1gb */
- . = (1 << 30);
+MEMORY {
+ /* On virt machine RAM starts at 1 GiB. */
+
+ /* Align text and rodata to the 1st 2 MiB chunk. */
+ TXT (rx) : ORIGIN = 1 << 30, LENGTH = 2M
+ /* Align r/w data to the 2nd 2 MiB chunk. */
+ DAT (rw) : ORIGIN = (1 << 30) + 2M, LENGTH = 2M
+ /* Align the MTE-enabled page to the 3rd 2 MiB chunk. */
+ TAG (rw) : ORIGIN = (1 << 30) + 4M, LENGTH = 2M
+}
+
+SECTIONS {
.text : {
*(.text)
- }
- .rodata : {
*(.rodata)
- }
- /* align r/w section to next 2mb */
- . = ALIGN(1 << 21);
+ } >TXT
.data : {
*(.data)
- }
- .bss : {
*(.bss)
- }
+ } >DAT
+ .tag : {
+ /*
+ * Symbol 'mte_page' is used in boot.S to setup the PTE and in the mte.S
+ * test as the address that the MTE instructions operate on.
+ */
+ mte_page = .;
+ } >TAG
/DISCARD/ : {
*(.ARM.attributes)
}
diff --git a/tests/tcg/aarch64/system/mte.S b/tests/tcg/aarch64/system/mte.S
new file mode 100644
index 0000000..b611240
--- /dev/null
+++ b/tests/tcg/aarch64/system/mte.S
@@ -0,0 +1,109 @@
+/*
+ * Code to help test the MTE gdbstubs in system mode.
+ *
+ * Copyright (c) 2024 Linaro Limited
+ *
+ * Author: Gustavo Romero <gustavo.romero@linaro.org>
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#define addr x0 /* Ptr to the start of the MTE-enabled page. */
+#define tagged_addr x1 /* 'addr' ptr with a random-generated tag added. */
+#define tmp0 x2 /* Scratch register. */
+#define tmp1 x3 /* Scratch register. */
+#define tmp2 x4 /* Scratch register. */
+#define tmp3 x5 /* Sctatch register. */
+
+ .file "mte.S"
+
+ .text
+ .align 4
+
+ .globl main
+ .type main, @function
+
+main:
+ /*
+ * Set MAIR_EL1 (Memory Attribute Index Register). In boot.S, the
+ * attribute index for .mte_page is set to point to MAILR_EL field Attr1
+ * (AttrIndx=Attr1), so set Attr1 as Tagged Normal (MTE) to enable MTE
+ * on this page.
+ *
+ * Attr1 = 0xF0 => Tagged Normal (MTE)
+ */
+ mrs tmp0, mair_el1
+ orr tmp0, tmp0, (0xF0 << 8)
+ msr mair_el1, tmp0
+
+ /*
+ * Set TCR_EL1 (Translation Control Registers) to ignore the top byte
+ * in the translated addresses so it can be used to keep the tags.
+ *
+ * TBI0[37] = 0b1 => Top Byte ignored and used for tagged addresses
+ */
+ mrs tmp1, tcr_el1
+ orr tmp1, tmp1, (1 << 37)
+ msr tcr_el1, tmp1
+
+ /*
+ * Set SCTLR_EL1 (System Control Register) to enable the use of MTE
+ * insns., like stg & friends, and to enable synchronous exception in
+ * case of a tag mismatch, i.e., when the logical tag in 'tagged_addr'
+ * is different from the allocation tag related to 'addr' address.
+ *
+ * ATA[43] = 0b1 => Enable access to allocation tags at EL1
+ * TCF[41:40] = 0b01 => Tag Check Faults cause a synchronous exception
+ *
+ */
+ mrs tmp2, sctlr_el1
+ mov tmp3, (1 << 43) | (1 << 40)
+ orr tmp2, tmp2, tmp3
+ msr sctlr_el1, tmp2
+
+ isb
+
+ /*
+ * MTE-enabled page resides at the 3rd 2MB chunk in the second 1GB
+ * block, i.e., at 0x40400000 address. See .mte_page section in boot.S
+ * and kernel.ld (where the address is effectively computed).
+ *
+ * Load .mte_page address into 'addr' register.
+ */
+ adrp addr, mte_page
+ add addr, addr, :lo12:mte_page
+
+ /*
+ * Set GCR for random tag generation. 0xA5 is just a random value to set
+ * GCR != 0 so the tag generated by 'irg' insn. is not zero, which is
+ * more interesting for the tests than when tag is zero.
+ */
+ mov tmp0, 0xA5
+ msr gcr_el1, tmp0
+
+ /*
+ * Generate a logical tag, add it to 'addr' address and put it into
+ * 'tagged_addr'.
+ */
+ irg tagged_addr, addr
+
+ /*
+ * Store the generated tag to memory region pointed to by 'addr', i.e.
+ * set the allocation tag for granule at 'addr'. The tag is extracted
+ * by stg from tagged_addr pointer.
+ */
+ stg tagged_addr, [addr]
+
+ /*
+ * Store a random value (0xdeadbeef) to tagged_addr address. This must
+ * not cause any Tag Check Fault since logical tag in tagged_addr and
+ * allocation tag associated with the memory pointed by tagged_addr are
+ * set the same, otherwise something is off and the test fails -- an
+ * exception is generated.
+ */
+ ldr tmp1, =0xdeadbeef
+ str tmp1, [tagged_addr]
+
+ /* This label is used by GDB Python script test-mte.py. */
+main_end:
+ ret
diff --git a/tests/tcg/aarch64_be/Makefile.target b/tests/tcg/aarch64_be/Makefile.target
new file mode 100644
index 0000000..cbe5fa0
--- /dev/null
+++ b/tests/tcg/aarch64_be/Makefile.target
@@ -0,0 +1,17 @@
+# -*- Mode: makefile -*-
+#
+# A super basic AArch64 BE makefile. As we don't have any big-endian
+# libc available the best we can do is a basic Hello World.
+
+AARCH64BE_SRC=$(SRC_PATH)/tests/tcg/aarch64_be
+VPATH += $(AARCH64BE_SRC)
+
+AARCH64BE_TEST_SRCS=$(notdir $(wildcard $(AARCH64BE_SRC)/*.c))
+AARCH64BE_TESTS=$(AARCH64BE_TEST_SRCS:.c=)
+#MULTIARCH_TESTS = $(MULTIARCH_SRCS:.c=)
+
+# We need to specify big-endian cflags
+CFLAGS +=-mbig-endian -ffreestanding
+LDFLAGS +=-nostdlib
+
+TESTS += $(AARCH64BE_TESTS)
diff --git a/tests/tcg/aarch64_be/hello.c b/tests/tcg/aarch64_be/hello.c
new file mode 100644
index 0000000..a9b2ab4
--- /dev/null
+++ b/tests/tcg/aarch64_be/hello.c
@@ -0,0 +1,35 @@
+/*
+ * Non-libc syscall hello world for Aarch64 BE
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#define __NR_write 64
+#define __NR_exit 93
+
+int write(int fd, char *buf, int len)
+{
+ register int x0 __asm__("x0") = fd;
+ register char *x1 __asm__("x1") = buf;
+ register int x2 __asm__("x2") = len;
+ register int x8 __asm__("x8") = __NR_write;
+
+ asm volatile("svc #0" : : "r"(x0), "r"(x1), "r"(x2), "r"(x8));
+
+ return len;
+}
+
+void exit(int ret)
+{
+ register int x0 __asm__("x0") = ret;
+ register int x8 __asm__("x8") = __NR_exit;
+
+ asm volatile("svc #0" : : "r"(x0), "r"(x8));
+ __builtin_unreachable();
+}
+
+void _start(void)
+{
+ write(1, "Hello World\n", 12);
+ exit(0);
+}
diff --git a/tests/tcg/alpha/Makefile.softmmu-target b/tests/tcg/alpha/Makefile.softmmu-target
index 09193a6..a944102 100644
--- a/tests/tcg/alpha/Makefile.softmmu-target
+++ b/tests/tcg/alpha/Makefile.softmmu-target
@@ -22,13 +22,13 @@ LDFLAGS+=-static -nostdlib $(CRT_OBJS) $(MINILIB_OBJS) -lgcc
.PRECIOUS: $(CRT_OBJS)
%.o: $(CRT_PATH)/%.S
- $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -x assembler-with-cpp -c $< -o $@
+ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -x assembler-with-cpp -Wa,--noexecstack -c $< -o $@
# Build and link the tests
%: %.c $(LINK_SCRIPT) $(CRT_OBJS) $(MINILIB_OBJS)
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
-memory: CFLAGS+=-DCHECK_UNALIGNED=0
+memory: CFLAGS+=-DCHECK_UNALIGNED=0 -mbwx
# Running
QEMU_OPTS+=-serial chardev:output -kernel
diff --git a/tests/tcg/alpha/Makefile.target b/tests/tcg/alpha/Makefile.target
index fdd7ddf..36d8ed1 100644
--- a/tests/tcg/alpha/Makefile.target
+++ b/tests/tcg/alpha/Makefile.target
@@ -12,4 +12,7 @@ test-cmov: EXTRA_CFLAGS=-DTEST_CMOV
test-cmov: test-cond.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
+# Force generation of byte read/write
+test-plugin-mem-access: CFLAGS+=-mbwx
+
run-test-cmov: test-cmov
diff --git a/tests/tcg/arm/Makefile.softmmu-target b/tests/tcg/arm/Makefile.softmmu-target
index 547063c..b66074b 100644
--- a/tests/tcg/arm/Makefile.softmmu-target
+++ b/tests/tcg/arm/Makefile.softmmu-target
@@ -36,7 +36,7 @@ LDFLAGS+=-static -nostdlib $(CRT_OBJS) $(MINILIB_OBJS) -lgcc
.PRECIOUS: $(CRT_OBJS)
%.o: $(ARM_SRC)/%.S
- $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -x assembler-with-cpp -c $< -o $@
+ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -x assembler-with-cpp -Wa,--noexecstack -c $< -o $@
# Build and link the tests
%: %.c $(LINK_SCRIPT) $(CRT_OBJS) $(MINILIB_OBJS)
diff --git a/tests/tcg/arm/Makefile.target b/tests/tcg/arm/Makefile.target
index 8e28719..6189d7a 100644
--- a/tests/tcg/arm/Makefile.target
+++ b/tests/tcg/arm/Makefile.target
@@ -20,13 +20,6 @@ ARM_TESTS = hello-arm
hello-arm: CFLAGS+=-marm -ffreestanding -fno-stack-protector
hello-arm: LDFLAGS+=-nostdlib
-# IWMXT floating point extensions
-ARM_TESTS += test-arm-iwmmxt
-# Clang assembler does not support IWMXT, so use the external assembler.
-test-arm-iwmmxt: CFLAGS += -marm -march=iwmmxt -mabi=aapcs -mfpu=fpv4-sp-d16 $(CROSS_CC_HAS_FNIA)
-test-arm-iwmmxt: test-arm-iwmmxt.S
- $(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
-
# Float-convert Tests
ARM_TESTS += fcvt
fcvt: LDFLAGS += -lm
@@ -68,7 +61,8 @@ endif
ARM_TESTS += commpage
# Vector SHA1
-sha1-vector: CFLAGS=-O3
+# Work around compiler false-positive warning, as we do for the 'sha1' test
+sha1-vector: CFLAGS=-O3 -Wno-stringop-overread
sha1-vector: sha1.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
run-sha1-vector: sha1-vector run-sha1
diff --git a/tests/tcg/arm/README b/tests/tcg/arm/README
index e630711..aceccc1 100644
--- a/tests/tcg/arm/README
+++ b/tests/tcg/arm/README
@@ -4,8 +4,3 @@ hello-arm
---------
A very simple inline assembly, write syscall based hello world
-
-test-arm-iwmmxt
----------------
-
-A simple test case for older iwmmxt extended ARMs
diff --git a/tests/tcg/arm/test-arm-iwmmxt.S b/tests/tcg/arm/test-arm-iwmmxt.S
deleted file mode 100644
index d647f94..0000000
--- a/tests/tcg/arm/test-arm-iwmmxt.S
+++ /dev/null
@@ -1,49 +0,0 @@
-@ Checks whether iwMMXt is functional.
-.code 32
-.globl main
-
-main:
-ldr r0, =data0
-ldr r1, =data1
-ldr r2, =data2
-#ifndef FPA
-wldrd wr0, [r0, #0]
-wldrd wr1, [r0, #8]
-wldrd wr2, [r1, #0]
-wldrd wr3, [r1, #8]
-wsubb wr2, wr2, wr0
-wsubb wr3, wr3, wr1
-wldrd wr0, [r2, #0]
-wldrd wr1, [r2, #8]
-waddb wr0, wr0, wr2
-waddb wr1, wr1, wr3
-wstrd wr0, [r2, #0]
-wstrd wr1, [r2, #8]
-#else
-ldfe f0, [r0, #0]
-ldfe f1, [r0, #8]
-ldfe f2, [r1, #0]
-ldfe f3, [r1, #8]
-adfdp f2, f2, f0
-adfdp f3, f3, f1
-ldfe f0, [r2, #0]
-ldfe f1, [r2, #8]
-adfd f0, f0, f2
-adfd f1, f1, f3
-stfe f0, [r2, #0]
-stfe f1, [r2, #8]
-#endif
-mov r0, #1
-mov r1, r2
-mov r2, #0x11
-swi #0x900004
-mov r0, #0
-swi #0x900001
-
-.data
-data0:
-.string "aaaabbbbccccdddd"
-data1:
-.string "bbbbccccddddeeee"
-data2:
-.string "hvLLWs\x1fsdrs9\x1fNJ-\n"
diff --git a/tests/tcg/cris/.gdbinit b/tests/tcg/cris/.gdbinit
deleted file mode 100644
index 5e8c1d3..0000000
--- a/tests/tcg/cris/.gdbinit
+++ /dev/null
@@ -1,11 +0,0 @@
-b main
-b _fail
-b exit
-display /i $pc
-display /x $srp
-display /x $r0
-display /x $r1
-display /x $r2
-display /x $r3
-display /x $r4
-display /t $ccs
diff --git a/tests/tcg/cris/Makefile.target b/tests/tcg/cris/Makefile.target
deleted file mode 100644
index 713e2a5..0000000
--- a/tests/tcg/cris/Makefile.target
+++ /dev/null
@@ -1,62 +0,0 @@
-# -*- Mode: makefile -*-
-#
-# Cris tests
-#
-# Currently we can only build the "bare" tests with the docker
-# supplied cross-compiler.
-#
-
-CRIS_SRC = $(SRC_PATH)/tests/tcg/cris/bare
-CRIS_ALL = $(wildcard $(CRIS_SRC)/*.s)
-CRIS_TESTS = $(patsubst $(CRIS_SRC)/%.s, %, $(CRIS_ALL))
-# Filter out common blobs and broken tests
-CRIS_BROKEN_TESTS = crt check_jsr
-# upstream GCC doesn't support v32
-CRIS_BROKEN_TESTS += check_mcp check_mulv32 check_addiv32 check_movpmv32
-CRIS_BROKEN_TESTS += check_movprv32 check_clearfv32 check_movemrv32 check_bas
-CRIS_BROKEN_TESTS += check_lapc check_movei
-# no sure why
-CRIS_BROKEN_TESTS += check_scc check_xarith
-
-CRIS_USABLE_TESTS = $(filter-out $(CRIS_BROKEN_TESTS), $(CRIS_TESTS))
-CRIS_RUNS = $(patsubst %, run-%, $(CRIS_USABLE_TESTS))
-
-# override the list of tests, as we can't build the multiarch tests
-TESTS = $(CRIS_USABLE_TESTS)
-EXTRA_RUNS =
-VPATH = $(CRIS_SRC)
-
-AS = $(CC) -x assembler-with-cpp
-LD = $(CC)
-
-# we rely on GCC inline:ing the stuff we tell it to in many places here.
-CFLAGS = -Winline -Wall -g -O2 -static -fno-stack-protector
-NOSTDFLAGS = -nostartfiles -nostdlib
-ASFLAGS += -mcpu=v10 -g -Wa,-I,$(SRC_PATH)/tests/tcg/cris/bare
-CRT_FILES = crt.o sys.o
-
-# stop make deleting crt files if build fails
-.PRECIOUS: $(CRT_FILES)
-
-%.o: %.c
- $(CC) -c $< -o $@
-
-%.o: %.s
- $(AS) $(ASFLAGS) -c $< -o $@
-
-%: %.s $(CRT_FILES)
- $(CC) $(ASFLAGS) $< -o $@ $(LDFLAGS) $(NOSTDFLAGS) $(CRT_FILES)
-
-# The default CPU breaks (possibly as it's max?) so force crisv17
-QEMU_OPTS=-cpu crisv17
-
-# Additional runners to run under GNU SIM
-CRIS_RUNS_ON_SIM=$(patsubst %, %-on-sim, $(CRIS_RUNS))
-SIMG:=cris-axis-linux-gnu-run
-
-# e.g.: make -f ../../tests/tcg/Makefile run-check_orm-on-sim
-run-%-on-sim:
- $(call run-test, $<, $(SIMG) $<)
-
-# We don't currently support the multiarch tests
-undefine MULTIARCH_TESTS
diff --git a/tests/tcg/cris/README b/tests/tcg/cris/README
deleted file mode 100644
index 2e65a76..0000000
--- a/tests/tcg/cris/README
+++ /dev/null
@@ -1 +0,0 @@
-Test-suite for the cris port. Heavily based on the test-suite for the CRIS port of sim by Hans-Peter Nilsson.
diff --git a/tests/tcg/cris/bare/check_addcv17.s b/tests/tcg/cris/bare/check_addcv17.s
deleted file mode 100644
index 52ef7a9..0000000
--- a/tests/tcg/cris/bare/check_addcv17.s
+++ /dev/null
@@ -1,65 +0,0 @@
-# mach: crisv17
-
- .include "testutils.inc"
-
- .macro addc Rs Rd inc=0
-# Create the instruction manually since there is no assembler support yet
- .word (\Rd << 12) | \Rs | (\inc << 10) | 0x09a0
- .endm
-
- start
-
- .data
-mem1:
- .dword 0x0
-mem2:
- .dword 0x12345678
-
- .text
- move.d mem1,r4
- clearf nzvc
- addc 4 3
- test_cc 0 1 0 0
- checkr3 0
-
- move.d mem1,r4
- clearf nzvc
- ax
- addc 4 3
- test_cc 0 0 0 0
- checkr3 0
-
- move.d mem1,r4
- clearf nzvc
- setf c
- addc 4 3
- test_cc 0 0 0 0
- checkr3 1
-
- move.d mem2,r4
- moveq 2, r3
- clearf nzvc
- setf c
- addc 4 3
- test_cc 0 0 0 0
- checkr3 1234567b
-
- move.d mem2,r5
- clearf nzvc
- cmp.d r4,r5
- test_cc 0 1 0 0
-
- move.d mem2,r4
- moveq 2, r3
- clearf nzvc
- addc 4 3 inc=1
- test_cc 0 0 0 0
- checkr3 1234567a
-
- move.d mem2,r5
- clearf nzvc
- addq 4,r5
- cmp.d r4,r5
- test_cc 0 1 0 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_addi.s b/tests/tcg/cris/bare/check_addi.s
deleted file mode 100644
index a00dec0..0000000
--- a/tests/tcg/cris/bare/check_addi.s
+++ /dev/null
@@ -1,57 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 0\n1\n2\n4\nbe02460f\n69d035a6\nc16c14d4\n
-
- .include "testutils.inc"
- start
- moveq 0,r3
- moveq 0,r4
- clearf zcvn
- addi r4.b,r3
- test_cc 0 0 0 0
- checkr3 0
-
- moveq 0,r3
- moveq 1,r4
- setf zcvn
- addi r4.b,r3
- test_cc 1 1 1 1
- checkr3 1
-
- moveq 0,r3
- moveq 1,r4
- setf cv
- clearf zn
- addi r4.w,r3
- test_cc 0 0 1 1
- checkr3 2
-
- moveq 0,r3
- moveq 1,r4
- clearf cv
- setf zn
- addi r4.d,r3
- test_cc 1 1 0 0
- checkr3 4
-
- move.d 0x12345678,r3
- move.d 0xabcdef97,r4
- clearf cn
- setf zv
- addi r4.b,r3
- test_cc 0 1 1 0
- checkr3 be02460f
-
- move.d 0x12345678,r3
- move.d 0xabcdef97,r4
- setf cn
- clearf zv
- addi r4.w,r3
- test_cc 1 0 0 1
- checkr3 69d035a6
-
- move.d 0x12345678,r3
- move.d 0xabcdef97,r4
- addi r4.d,r3
- checkr3 c16c14d4
-
- quit
diff --git a/tests/tcg/cris/bare/check_addiv32.s b/tests/tcg/cris/bare/check_addiv32.s
deleted file mode 100644
index 20ba25d..0000000
--- a/tests/tcg/cris/bare/check_addiv32.s
+++ /dev/null
@@ -1,62 +0,0 @@
-# mach: crisv32
-# output: 4455aa77\n4455aa77\nee19ccff\nff22\n4455aa77\nff224455\n55aa77ff\n
-
- .include "testutils.inc"
- .data
-x:
- .dword 0x55aa77ff
- .dword 0xccff2244
- .dword 0x88ccee19
-
- start
- setf cv
- moveq -1,r0
- move.d x-32768,r5
- move.d 32769,r6
- addi r6.b,r5,acr
- test_cc 0 0 1 1
- move.d [acr],r3
- checkr3 4455aa77
-
- addu.w 32771,r5
- setf znvc
- moveq -1,r8
- addi r8.w,r5,acr
- test_cc 1 1 1 1
- move.d [acr],r3
- checkr3 4455aa77
-
- moveq 5,r10
- clearf znvc
- addi r10.b,acr,acr
- test_cc 0 0 0 0
- move.d [acr],r3
- checkr3 ee19ccff
-
- subq 1,r5
- move.d r5,r8
- subq 1,r8
- moveq 1,r9
- addi r9.d,r8,acr
- test_cc 0 0 0 0
- movu.w [acr],r3
- checkr3 ff22
-
- moveq -2,r11
- addi r11.w,acr,acr
- move.d [acr],r3
- checkr3 4455aa77
-
- moveq 5,r9
- addi r9.d,acr,acr
- subq 18,acr
- move.d [acr],r3
- checkr3 ff224455
-
- move.d -76789888/4,r12
- addi r12.d,r5,acr
- add.d 76789886,acr
- move.d [acr],r3
- checkr3 55aa77ff
-
- quit
diff --git a/tests/tcg/cris/bare/check_addm.s b/tests/tcg/cris/bare/check_addm.s
deleted file mode 100644
index efece9f..0000000
--- a/tests/tcg/cris/bare/check_addm.s
+++ /dev/null
@@ -1,96 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 1\n1\n1fffe\nfffffffe\ncc463bdb\nffff0001\n1\nfffe\nfedafffe\n78133bdb\nffffff01\n1\nfe\nfeda49fe\n781344db\n781344d0\n
-
- .include "testutils.inc"
- .data
-x:
- .dword 2,-1,0xffff,-1,0x5432f789
- .word 2,-1,0xffff,0xf789
- .byte 2,0xff,0x89
- .byte 0x7e
-
- start
- moveq -1,r3
- move.d x,r5
- add.d [r5+],r3
- test_cc 0 0 0 1
- checkr3 1
-
- moveq 2,r3
- add.d [r5],r3
- test_cc 0 0 0 1
- addq 4,r5
- checkr3 1
-
- move.d 0xffff,r3
- add.d [r5+],r3
- test_cc 0 0 0 0
- checkr3 1fffe
-
- moveq -1,r3
- add.d [r5+],r3
- test_cc 1 0 0 1
- checkr3 fffffffe
-
- move.d 0x78134452,r3
- add.d [r5+],r3
- test_cc 1 0 1 0
- checkr3 cc463bdb
-
- moveq -1,r3
- add.w [r5+],r3
- test_cc 0 0 0 1
- checkr3 ffff0001
-
- moveq 2,r3
- add.w [r5+],r3
- test_cc 0 0 0 1
- checkr3 1
-
- move.d 0xffff,r3
- add.w [r5],r3
- test_cc 1 0 0 1
- checkr3 fffe
-
- move.d 0xfedaffff,r3
- add.w [r5+],r3
- test_cc 1 0 0 1
- checkr3 fedafffe
-
- move.d 0x78134452,r3
- add.w [r5+],r3
- test_cc 0 0 0 1
- checkr3 78133bdb
-
- moveq -1,r3
- add.b [r5],r3
- test_cc 0 0 0 1
- addq 1,r5
- checkr3 ffffff01
-
- moveq 2,r3
- add.b [r5],r3
- test_cc 0 0 0 1
- checkr3 1
-
- move.d 0xff,r3
- add.b [r5],r3
- test_cc 1 0 0 1
- checkr3 fe
-
- move.d 0xfeda49ff,r3
- add.b [r5+],r3
- test_cc 1 0 0 1
- checkr3 feda49fe
-
- move.d 0x78134452,r3
- add.b [r5+],r3
- test_cc 1 0 0 0
- checkr3 781344db
-
- move.d 0x78134452,r3
- add.b [r5],r3
- test_cc 1 0 1 0
- checkr3 781344d0
-
- quit
diff --git a/tests/tcg/cris/bare/check_addq.s b/tests/tcg/cris/bare/check_addq.s
deleted file mode 100644
index e6f874f..0000000
--- a/tests/tcg/cris/bare/check_addq.s
+++ /dev/null
@@ -1,47 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: ffffffff\n0\n1\n100\n10000\n47\n67\na6\n80000001\n
-
- .include "testutils.inc"
- start
- moveq -2,r3
- addq 1,r3
- test_cc 1 0 0 0
- checkr3 ffffffff
-
- addq 1,r3
- test_cc 0 1 0 1
- checkr3 0
-
- addq 1,r3
- test_cc 0 0 0 0
- checkr3 1
-
- move.d 0xff,r3
- addq 1,r3
- test_cc 0 0 0 0
- checkr3 100
-
- move.d 0xffff,r3
- addq 1,r3
- test_cc 0 0 0 0
- checkr3 10000
-
- move.d 0x42,r3
- addq 5,r3
- test_cc 0 0 0 0
- checkr3 47
-
- addq 32,r3
- test_cc 0 0 0 0
- checkr3 67
-
- addq 63,r3
- test_cc 0 0 0 0
- checkr3 a6
-
- move.d 0x7ffffffe,r3
- addq 3,r3
- test_cc 1 0 1 0
- checkr3 80000001
-
- quit
diff --git a/tests/tcg/cris/bare/check_addr.s b/tests/tcg/cris/bare/check_addr.s
deleted file mode 100644
index 7f55cdc..0000000
--- a/tests/tcg/cris/bare/check_addr.s
+++ /dev/null
@@ -1,96 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 1\n1\n1fffe\nfffffffe\ncc463bdb\nffff0001\n1\nfffe\nfedafffe\n78133bdb\nffffff01\n1\nfe\nfeda49fe\n781344db\n
-
- .include "testutils.inc"
- start
- moveq -1,r3
- moveq 2,r4
- add.d r4,r3
- test_cc 0 0 0 1
- checkr3 1
-
- moveq 2,r3
- moveq -1,r4
- add.d r4,r3
- test_cc 0 0 0 1
- checkr3 1
-
- move.d 0xffff,r4
- move.d r4,r3
- add.d r4,r3
- test_cc 0 0 0 0
- checkr3 1fffe
-
- moveq -1,r4
- move.d r4,r3
- add.d r4,r3
- test_cc 1 0 0 1
- checkr3 fffffffe
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- add.d r4,r3
- test_cc 1 0 1 0
- checkr3 cc463bdb
-
- moveq -1,r3
- moveq 2,r4
- add.w r4,r3
- test_cc 0 0 0 1
- checkr3 ffff0001
-
- moveq 2,r3
- moveq -1,r4
- add.w r4,r3
- test_cc 0 0 0 1
- checkr3 1
-
- move.d 0xffff,r4
- move.d r4,r3
- add.w r4,r3
- test_cc 1 0 0 1
- checkr3 fffe
-
- move.d 0xfedaffff,r4
- move.d r4,r3
- add.w r4,r3
- test_cc 1 0 0 1
- checkr3 fedafffe
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- add.w r4,r3
- test_cc 0 0 0 1
- checkr3 78133bdb
-
- moveq -1,r3
- moveq 2,r4
- add.b r4,r3
- test_cc 0 0 0 1
- checkr3 ffffff01
-
- moveq 2,r3
- moveq -1,r4
- add.b r4,r3
- test_cc 0 0 0 1
- checkr3 1
-
- move.d 0xff,r4
- move.d r4,r3
- add.b r4,r3
- test_cc 1 0 0 1
- checkr3 fe
-
- move.d 0xfeda49ff,r4
- move.d r4,r3
- add.b r4,r3
- test_cc 1 0 0 1
- checkr3 feda49fe
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- add.b r4,r3
- test_cc 1 0 0 0
- checkr3 781344db
-
- quit
diff --git a/tests/tcg/cris/bare/check_addxc.s b/tests/tcg/cris/bare/check_addxc.s
deleted file mode 100644
index 09c8355..0000000
--- a/tests/tcg/cris/bare/check_addxc.s
+++ /dev/null
@@ -1,91 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 1\n1\n101\n10001\n100fe\n1fffe\nfffe\nfffe\nfffffffe\nfe\nfffffffe\n781344db\n781343db\n78143bdb\n78133bdb\n800000ed\n0\n
-
- .include "testutils.inc"
- start
- moveq 2,r3
- adds.b 0xff,r3
- test_cc 0 0 0 1
- checkr3 1
-
- moveq 2,r3
- adds.w 0xffff,r3
- test_cc 0 0 0 1
- checkr3 1
-
- moveq 2,r3
- addu.b 0xff,r3
- checkr3 101
-
- moveq 2,r3
- move.d 0xffffffff,r4
- addu.w -1,r3
- test_cc 0 0 0 0
- checkr3 10001
-
- move.d 0xffff,r3
- addu.b -1,r3
- test_cc 0 0 0 0
- checkr3 100fe
-
- move.d 0xffff,r3
- addu.w -1,r3
- test_cc 0 0 0 0
- checkr3 1fffe
-
- move.d 0xffff,r3
- adds.b 0xff,r3
- test_cc 0 0 0 1
- checkr3 fffe
-
- move.d 0xffff,r3
- adds.w 0xffff,r3
- test_cc 0 0 0 1
- checkr3 fffe
-
- moveq -1,r3
- adds.b 0xff,r3
- test_cc 1 0 0 1
- checkr3 fffffffe
-
- moveq -1,r3
- adds.w 0xff,r3
- test_cc 0 0 0 1
- checkr3 fe
-
- moveq -1,r3
- adds.w 0xffff,r3
- test_cc 1 0 0 1
- checkr3 fffffffe
-
- move.d 0x78134452,r3
- addu.b 0x89,r3
- test_cc 0 0 0 0
- checkr3 781344db
-
- move.d 0x78134452,r3
- adds.b 0x89,r3
- test_cc 0 0 0 1
- checkr3 781343db
-
- move.d 0x78134452,r3
- addu.w 0xf789,r3
- test_cc 0 0 0 0
- checkr3 78143bdb
-
- move.d 0x78134452,r3
- adds.w 0xf789,r3
- test_cc 0 0 0 1
- checkr3 78133bdb
-
- move.d 0x7fffffee,r3
- addu.b 0xff,r3
- test_cc 1 0 1 0
- checkr3 800000ed
-
- move.d 0x1,r3
- adds.w 0xffff,r3
- test_cc 0 1 0 1
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_addxm.s b/tests/tcg/cris/bare/check_addxm.s
deleted file mode 100644
index 7563494..0000000
--- a/tests/tcg/cris/bare/check_addxm.s
+++ /dev/null
@@ -1,106 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 1\n1\n101\n10001\n100fe\n1fffe\nfffe\nfffe\nfffffffe\nfe\nfffffffe\n781344db\n781343db\n78143bdb\n78133bdb\n800000ed\n0\n
-
- .include "testutils.inc"
- .data
-x:
- .byte 0xff
- .word 0xffff
- .word 0xff
- .word 0xffff
- .byte 0x89
- .word 0xf789
- .byte 0xff
- .word 0xffff
-
- start
- moveq 2,r3
- move.d x,r5
- adds.b [r5+],r3
- test_cc 0 0 0 1
- checkr3 1
-
- moveq 2,r3
- adds.w [r5+],r3
- test_cc 0 0 0 1
- checkr3 1
-
- moveq 2,r3
- subq 3,r5
- addu.b [r5+],r3
- test_cc 0 0 0 0
- checkr3 101
-
- moveq 2,r3
- addu.w [r5+],r3
- subq 3,r5
- test_cc 0 0 0 0
- checkr3 10001
-
- move.d 0xffff,r3
- addu.b [r5],r3
- test_cc 0 0 0 0
- checkr3 100fe
-
- move.d 0xffff,r3
- addu.w [r5],r3
- test_cc 0 0 0 0
- checkr3 1fffe
-
- move.d 0xffff,r3
- adds.b [r5],r3
- test_cc 0 0 0 1
- checkr3 fffe
-
- move.d 0xffff,r3
- adds.w [r5],r3
- test_cc 0 0 0 1
- checkr3 fffe
-
- moveq -1,r3
- adds.b [r5],r3
- test_cc 1 0 0 1
- addq 3,r5
- checkr3 fffffffe
-
- moveq -1,r3
- adds.w [r5+],r3
- test_cc 0 0 0 1
- checkr3 fe
-
- moveq -1,r3
- adds.w [r5+],r3
- test_cc 1 0 0 1
- checkr3 fffffffe
-
- move.d 0x78134452,r3
- addu.b [r5],r3
- test_cc 0 0 0 0
- checkr3 781344db
-
- move.d 0x78134452,r3
- adds.b [r5+],r3
- test_cc 0 0 0 1
- checkr3 781343db
-
- move.d 0x78134452,r3
- addu.w [r5],r3
- test_cc 0 0 0 0
- checkr3 78143bdb
-
- move.d 0x78134452,r3
- adds.w [r5+],r3
- test_cc 0 0 0 1
- checkr3 78133bdb
-
- move.d 0x7fffffee,r3
- addu.b [r5+],r3
- test_cc 1 0 1 0
- checkr3 800000ed
-
- move.d 0x1,r3
- adds.w [r5+],r3
- test_cc 0 1 0 1
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_addxr.s b/tests/tcg/cris/bare/check_addxr.s
deleted file mode 100644
index 7f55cdc..0000000
--- a/tests/tcg/cris/bare/check_addxr.s
+++ /dev/null
@@ -1,96 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 1\n1\n1fffe\nfffffffe\ncc463bdb\nffff0001\n1\nfffe\nfedafffe\n78133bdb\nffffff01\n1\nfe\nfeda49fe\n781344db\n
-
- .include "testutils.inc"
- start
- moveq -1,r3
- moveq 2,r4
- add.d r4,r3
- test_cc 0 0 0 1
- checkr3 1
-
- moveq 2,r3
- moveq -1,r4
- add.d r4,r3
- test_cc 0 0 0 1
- checkr3 1
-
- move.d 0xffff,r4
- move.d r4,r3
- add.d r4,r3
- test_cc 0 0 0 0
- checkr3 1fffe
-
- moveq -1,r4
- move.d r4,r3
- add.d r4,r3
- test_cc 1 0 0 1
- checkr3 fffffffe
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- add.d r4,r3
- test_cc 1 0 1 0
- checkr3 cc463bdb
-
- moveq -1,r3
- moveq 2,r4
- add.w r4,r3
- test_cc 0 0 0 1
- checkr3 ffff0001
-
- moveq 2,r3
- moveq -1,r4
- add.w r4,r3
- test_cc 0 0 0 1
- checkr3 1
-
- move.d 0xffff,r4
- move.d r4,r3
- add.w r4,r3
- test_cc 1 0 0 1
- checkr3 fffe
-
- move.d 0xfedaffff,r4
- move.d r4,r3
- add.w r4,r3
- test_cc 1 0 0 1
- checkr3 fedafffe
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- add.w r4,r3
- test_cc 0 0 0 1
- checkr3 78133bdb
-
- moveq -1,r3
- moveq 2,r4
- add.b r4,r3
- test_cc 0 0 0 1
- checkr3 ffffff01
-
- moveq 2,r3
- moveq -1,r4
- add.b r4,r3
- test_cc 0 0 0 1
- checkr3 1
-
- move.d 0xff,r4
- move.d r4,r3
- add.b r4,r3
- test_cc 1 0 0 1
- checkr3 fe
-
- move.d 0xfeda49ff,r4
- move.d r4,r3
- add.b r4,r3
- test_cc 1 0 0 1
- checkr3 feda49fe
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- add.b r4,r3
- test_cc 1 0 0 0
- checkr3 781344db
-
- quit
diff --git a/tests/tcg/cris/bare/check_andc.s b/tests/tcg/cris/bare/check_andc.s
deleted file mode 100644
index a947b77..0000000
--- a/tests/tcg/cris/bare/check_andc.s
+++ /dev/null
@@ -1,80 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 2\n2\nffff\nffffffff\n50124400\nffff0002\n2\nfffff\nfedaff0f\n78134400\nffffff02\n2\nf02\n78134401\n78134400\n
-
- .include "testutils.inc"
- start
- moveq -1,r3
- and.d 2,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- moveq 2,r3
- and.d -1,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- move.d 0xffff,r3
- and.d 0xffff,r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- moveq -1,r3
- and.d -1,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0x78134452,r3
- and.d 0x5432f789,r3
- test_move_cc 0 0 0 0
- checkr3 50124400
-
- moveq -1,r3
- and.w 2,r3
- test_move_cc 0 0 0 0
- checkr3 ffff0002
-
- moveq 2,r3
- and.w -1,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- move.d 0xfffff,r3
- and.w 0xffff,r3
- test_move_cc 1 0 0 0
- checkr3 fffff
-
- move.d 0xfedaffaf,r3
- and.w 0xff5f,r3
- test_move_cc 1 0 0 0
- checkr3 fedaff0f
-
- move.d 0x78134452,r3
- and.w 0xf789,r3
- test_move_cc 0 0 0 0
- checkr3 78134400
-
- moveq -1,r3
- and.b 2,r3
- test_move_cc 0 0 0 0
- checkr3 ffffff02
-
- moveq 2,r3
- and.b -1,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- move.d 0xfa7,r3
- and.b 0x5a,r3
- test_move_cc 0 0 0 0
- checkr3 f02
-
- move.d 0x78134453,r3
- and.b 0x89,r3
- test_move_cc 0 0 0 0
- checkr3 78134401
-
- and.b 0,r3
- test_move_cc 0 1 0 0
- checkr3 78134400
-
- quit
diff --git a/tests/tcg/cris/bare/check_andm.s b/tests/tcg/cris/bare/check_andm.s
deleted file mode 100644
index 9385886..0000000
--- a/tests/tcg/cris/bare/check_andm.s
+++ /dev/null
@@ -1,90 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 2\n2\nffff\nffffffff\n50124400\nffff0002\n2\nfffff\nfedaff0f\n78134400\nffffff02\n2\nf02\n78134401\n78134400\n
-
- .include "testutils.inc"
- .data
-x:
- .dword 2,-1,0xffff,-1,0x5432f789
- .word 2,-1,0xffff,0xff5f,0xf789
- .byte 2,-1,0x5a,0x89,0
-
- start
- moveq -1,r3
- move.d x,r5
- and.d [r5+],r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- moveq 2,r3
- and.d [r5],r3
- test_move_cc 0 0 0 0
- addq 4,r5
- checkr3 2
-
- move.d 0xffff,r3
- and.d [r5+],r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- moveq -1,r3
- and.d [r5+],r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0x78134452,r3
- and.d [r5+],r3
- test_move_cc 0 0 0 0
- checkr3 50124400
-
- moveq -1,r3
- and.w [r5+],r3
- test_move_cc 0 0 0 0
- checkr3 ffff0002
-
- moveq 2,r3
- and.w [r5+],r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- move.d 0xfffff,r3
- and.w [r5],r3
- test_move_cc 1 0 0 0
- addq 2,r5
- checkr3 fffff
-
- move.d 0xfedaffaf,r3
- and.w [r5+],r3
- test_move_cc 1 0 0 0
- checkr3 fedaff0f
-
- move.d 0x78134452,r3
- and.w [r5+],r3
- test_move_cc 0 0 0 0
- checkr3 78134400
-
- moveq -1,r3
- and.b [r5],r3
- test_move_cc 0 0 0 0
- addq 1,r5
- checkr3 ffffff02
-
- moveq 2,r3
- and.b [r5+],r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- move.d 0xfa7,r3
- and.b [r5+],r3
- test_move_cc 0 0 0 0
- checkr3 f02
-
- move.d 0x78134453,r3
- and.b [r5+],r3
- test_move_cc 0 0 0 0
- checkr3 78134401
-
- and.b [r5],r3
- test_move_cc 0 1 0 0
- checkr3 78134400
-
- quit
diff --git a/tests/tcg/cris/bare/check_andq.s b/tests/tcg/cris/bare/check_andq.s
deleted file mode 100644
index 55aa7b0..0000000
--- a/tests/tcg/cris/bare/check_andq.s
+++ /dev/null
@@ -1,46 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 2\n2\nffff\nffffffff\n1f\nffffffe0\n78134452\n0\n
-
- .include "testutils.inc"
- start
- moveq -1,r3
- andq 2,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- moveq 2,r3
- andq -1,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- move.d 0xffff,r3
- andq -1,r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- moveq -1,r3
- andq -1,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq -1,r3
- andq 31,r3
- test_move_cc 0 0 0 0
- checkr3 1f
-
- moveq -1,r3
- andq -32,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffe0
-
- move.d 0x78134457,r3
- andq -14,r3
- test_move_cc 0 0 0 0
- checkr3 78134452
-
- moveq 0,r3
- andq -14,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_andr.s b/tests/tcg/cris/bare/check_andr.s
deleted file mode 100644
index 61aa1dc..0000000
--- a/tests/tcg/cris/bare/check_andr.s
+++ /dev/null
@@ -1,95 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 2\n2\nffff\nffffffff\n50124400\nffff0002\n2\nfffff\nfedaff0f\n78134400\nffffff02\n2\nf02\n78134401\n78134400\n
-
- .include "testutils.inc"
- start
- moveq -1,r3
- moveq 2,r4
- and.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- moveq 2,r3
- moveq -1,r4
- and.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- move.d 0xffff,r4
- move.d r4,r3
- and.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- moveq -1,r4
- move.d r4,r3
- and.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- and.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 50124400
-
- moveq -1,r3
- moveq 2,r4
- and.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 ffff0002
-
- moveq 2,r3
- moveq -1,r4
- and.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- move.d 0xfffff,r3
- move.d 0xffff,r4
- and.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 fffff
-
- move.d 0xfedaffaf,r3
- move.d 0xff5f,r4
- and.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 fedaff0f
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- and.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 78134400
-
- moveq -1,r3
- moveq 2,r4
- and.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 ffffff02
-
- moveq 2,r3
- moveq -1,r4
- and.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- move.d 0x5a,r4
- move.d 0xfa7,r3
- and.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 f02
-
- move.d 0x5432f789,r4
- move.d 0x78134453,r3
- and.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 78134401
-
- moveq 0,r7
- and.b r7,r3
- test_move_cc 0 1 0 0
- checkr3 78134400
-
- quit
diff --git a/tests/tcg/cris/bare/check_asr.s b/tests/tcg/cris/bare/check_asr.s
deleted file mode 100644
index 0a02ae6..0000000
--- a/tests/tcg/cris/bare/check_asr.s
+++ /dev/null
@@ -1,230 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: ffffffff\n1\nffffffff\nffffffff\n5a67f\nffffffff\nffffffff\nffffffff\nf699fc67\nffffffff\n1\nffffffff\nffffffff\n5a67f\nda67ffff\nda67ffff\nda67ffff\nda67fc67\nffffffff\nffffffff\n1\nffffffff\nffffffff\n5a670007\nda67f1ff\nda67f1ff\nda67f1ff\nda67f1e7\nffffffff\nffffffff\n1\nffffffff\nffffffff\nffffffff\n5a67f1ff\n5a67f1f9\n0\n5a670000\n
-
- .include "testutils.inc"
- start
- moveq -1,r3
- asrq 0,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- asrq 1,r3
- test_move_cc 0 0 0 0
- checkr3 1
-
- moveq -1,r3
- asrq 31,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq -1,r3
- asrq 15,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0x5a67f19f,r3
- asrq 12,r3
- test_move_cc 0 0 0 0
- checkr3 5a67f
-
- move.d 0xda67f19f,r3
- move.d 31,r4
- asr.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0xda67f19f,r3
- move.d 32,r4
- asr.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0xda67f19f,r3
- move.d 33,r4
- asr.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0xda67f19f,r3
- move.d 66,r4
- asr.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 f699fc67
-
- moveq -1,r3
- moveq 0,r4
- asr.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- moveq 1,r4
- asr.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 1
-
- moveq -1,r3
- moveq 31,r4
- asr.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq -1,r3
- moveq 15,r4
- asr.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0x5a67f19f,r3
- moveq 12,r4
- asr.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 5a67f
-
- move.d 0xda67f19f,r3
- move.d 31,r4
- asr.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 da67ffff
-
- move.d 0xda67f19f,r3
- move.d 32,r4
- asr.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 da67ffff
-
- move.d 0xda67f19f,r3
- move.d 33,r4
- asr.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 da67ffff
-
- move.d 0xda67f19f,r3
- move.d 66,r4
- asr.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 da67fc67
-
- moveq -1,r3
- moveq 0,r4
- asr.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq -1,r3
- moveq 1,r4
- asr.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- moveq 1,r4
- asr.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 1
-
- moveq -1,r3
- moveq 31,r4
- asr.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq -1,r3
- moveq 15,r4
- asr.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0x5a67719f,r3
- moveq 12,r4
- asr.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 5a670007
-
- move.d 0xda67f19f,r3
- move.d 31,r4
- asr.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 da67f1ff
-
- move.d 0xda67f19f,r3
- move.d 32,r4
- asr.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 da67f1ff
-
- move.d 0xda67f19f,r3
- move.d 33,r4
- asr.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 da67f1ff
-
- move.d 0xda67f19f,r3
- move.d 66,r4
- asr.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 da67f1e7
-
- moveq -1,r3
- moveq 0,r4
- asr.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq -1,r3
- moveq 1,r4
- asr.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- moveq 1,r4
- asr.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 1
-
- moveq -1,r3
- moveq 31,r4
- asr.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq -1,r3
- moveq 15,r4
- asr.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq -1,r3
- moveq 7,r4
- asr.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
-; FIXME: was wrong.
- move.d 0x5a67f19f,r3
- moveq 12,r4
- asr.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 5a67f1ff
-
-; FIXME: was wrong.
- move.d 0x5a67f19f,r3
- moveq 4,r4
- asr.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 5a67f1f9
-
- move.d 0x5a67f19f,r3
- asrq 31,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- move.d 0x5a67419f,r3
- moveq 16,r4
- asr.w r4,r3
- test_move_cc 0 1 0 0
- checkr3 5a670000
-
- quit
diff --git a/tests/tcg/cris/bare/check_ba.s b/tests/tcg/cris/bare/check_ba.s
deleted file mode 100644
index 873a408..0000000
--- a/tests/tcg/cris/bare/check_ba.s
+++ /dev/null
@@ -1,93 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: a\n
-
-
- .set smalloffset,0
- .set largeoffset,0
-
-
- .macro fail
- jump _fail
- .endm
-
- .global main
-main:
- moveq 0,$r3
-
-; Short forward branch.
- ba 0f
- addq 1,$r3
- fail
-
-; Max short forward branch.
-1:
- ba 2f
- addq 1,$r3
- fail
-
-; Short backward branch.
-0:
- ba 1b
- addq 1,$r3
- fail
-
- .space 254-2+smalloffset+1b-.,0
- moveq 0,$r3
-
-2:
-; Transit branch (long).
- ba 3f
- addq 1,$r3
- fail
-
- moveq 0,$r3
-4:
-; Long forward branch.
- ba 5f
- addq 1,$r3
- fail
-
- .space 256-2-smalloffset+4b-.,0
-
- moveq 0,$r3
-
-; Max short backward branch.
-3:
- ba 4b
- addq 1,$r3
- fail
-
-5:
-; Max long forward branch.
- ba 6f
- addq 1,$r3
- fail
-
- .space 32766+largeoffset-2+5b-.,0
-
- moveq 0,$r3
-6:
-; Transit branch.
- ba 7f
- addq 1,$r3
- fail
-
- moveq 0,$r3
-9:
- jsr pass
- nop
-
-; Transit branch.
- moveq 0,$r3
-7:
- ba 8f
- addq 1,$r3
- fail
-
- .space 32768-largeoffset+9b-.,0
-
-8:
-; Max long backward branch.
- ba 9b
- addq 1,$r3
- fail
diff --git a/tests/tcg/cris/bare/check_bas.s b/tests/tcg/cris/bare/check_bas.s
deleted file mode 100644
index 11929d4..0000000
--- a/tests/tcg/cris/bare/check_bas.s
+++ /dev/null
@@ -1,102 +0,0 @@
-# mach: crisv32
-# output: 0\n0\n0\nfb349abc\n0\n12124243\n0\n0\neab5baad\n0\nefb37832\n
-
- .include "testutils.inc"
- start
-x:
- setf zncv
- bsr 0f
- nop
-0:
- test_cc 1 1 1 1
- move srp,r3
- sub.d 0b,r3
- checkr3 0
-
- bas 1f,mof
- moveq 0,r0
-6:
- nop
- quit
-
-2:
- move srp,r3
- sub.d 3f,r3
- checkr3 0
- move srp,r4
- subq 4,r4
- move.d [r4],r3
- checkr3 fb349abc
-
- basc 4f,mof
- nop
- .dword 0x12124243
-7:
- nop
- quit
-
-8:
- move mof,r3
- sub.d 7f,r3
- checkr3 0
-
- move mof,r4
- subq 4,r4
- move.d [r4],r3
- checkr3 eab5baad
-
- jasc 9f,mof
- nop
- .dword 0xefb37832
-0:
- quit
-
- quit
-9:
- move mof,r3
- sub.d 0b,r3
- checkr3 0
-
- move mof,r4
- subq 4,r4
- move.d [r4],r3
- checkr3 efb37832
-
- quit
-
-4:
- move mof,r3
- sub.d 7b,r3
- checkr3 0
- move mof,r4
- subq 4,r4
- move.d [r4],r3
- checkr3 12124243
- basc 5f,bz
- moveq 0,r3
- .dword 0x7634aeba
- quit
-
- .space 32770,0
-1:
- move mof,r3
- sub.d 6b,r3
- checkr3 0
-
- bsrc 2b
- nop
- .dword 0xfb349abc
-3:
-
- quit
-
-5:
- move mof,r3
- sub.d 7b,r3
- checkr3 0
- move.d 8b,r6
- jasc r6,mof
- nop
- .dword 0xeab5baad
-7:
- quit
diff --git a/tests/tcg/cris/bare/check_bcc.s b/tests/tcg/cris/bare/check_bcc.s
deleted file mode 100644
index c57ffa6..0000000
--- a/tests/tcg/cris/bare/check_bcc.s
+++ /dev/null
@@ -1,197 +0,0 @@
- .global main
- .type main, @function
-main:
- clearf nzvc
- setf nzv
- bcc 0f
- addq 1, $r3
- jump dofail
-
-0:
- clearf nzvc
- setf nzv
- bcs dofail
- addq 1,$r3
-
- clearf nzvc
- setf ncv
- bne 1f
- addq 1, $r3
-
-fail:
-dofail:
- jump _fail
-
-1:
- clearf nzvc
- setf ncv
- beq dofail
- addq 1,$r3
-
- clearf nzvc
- setf ncz
- bvc 2f
- addq 1,$r3
- jump dofail
-
-2:
- clearf nzvc
- setf ncz
- bvs dofail
- addq 1,$r3
-
- clearf nzvc
- setf vcz
- bpl 3f
- addq 1,$r3
- jump fail
-3:
- clearf nzvc
- setf vcz
- bmi dofail
- addq 1,$r3
-
- clearf nzvc
- setf nv
- bls dofail
- addq 1,$r3
-
- clearf nzvc
- setf nv
- bhi 4f
- addq 1,$r3
- jump dofail
-
-4:
- clearf nzvc
- setf zc
- bge 5f
- addq 1,$r3
- jump dofail
-
-5:
- clearf nzvc
- setf zc
- blt dofail
- addq 1,$r3
-
- clearf nzvc
- setf c
- bgt 6f
- addq 1,$r3
- jump fail
-
-6:
- clearf nzvc
- setf c
- ble dofail
- addq 1,$r3
-
-;;;;;;;;;;
-
- setf nzvc
- clearf nzv
- bcc dofail
- addq 1,$r3
-
- setf nzvc
- clearf nzv
- bcs 0f
- addq 1,$r3
- jump fail
-
-0:
- setf nzvc
- clearf ncv
- bne dofail
- addq 1,$r3
-
- setf nzvc
- clearf ncv
- beq 1f
- addq 1,$r3
- jump fail
-
-1:
- setf nzvc
- clearf ncz
- bvc dofail
- addq 1,$r3
-
- setf nzvc
- clearf ncz
- bvs 2f
- addq 1,$r3
- jump fail
-
-2:
- setf nzvc
- clearf vcz
- bpl dofail
- addq 1,$r3
-
- setf nzvc
- clearf vcz
- bmi 3f
- addq 1,$r3
- jump fail
-
-3:
- setf nzvc
- clearf nv
- bls 4f
- addq 1,$r3
- jump fail
-
-4:
- setf nzvc
- clearf nv
- bhi dofail
- addq 1,$r3
-
- setf zvc
- clearf nzc
- bge dofail
- addq 1,$r3
-
- setf nzc
- clearf vzc
- blt 5f
- addq 1,$r3
- jump fail
-
-5:
- setf nzvc
- clearf c
- bgt dofail
- addq 1,$r3
-
- setf nzvc
- clearf c
- ble 6f
- addq 1,$r3
- jump fail
-
-6:
- ; do a forward branch.
- ba 2f
- nop
- .fill 100
-1:
- ba 3f
- nop
- .fill 800
-2:
- ba 1b
- nop
- .fill 1024
-3:
-
- moveq 31, $r0
-1: bne 1b
- subq 1, $r0
-
- jsr pass
- moveq 0, $r10
- ret
- nop
diff --git a/tests/tcg/cris/bare/check_boundc.s b/tests/tcg/cris/bare/check_boundc.s
deleted file mode 100644
index fb9e5bc..0000000
--- a/tests/tcg/cris/bare/check_boundc.s
+++ /dev/null
@@ -1,101 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 2\n2\nffff\nffffffff\n5432f789\n2\nffff\n2\nffff\nffff\nf789\n2\n2\nff\nff\nff\n89\n0\nff\n
-
- .include "testutils.inc"
- start
- moveq -1,r3
- moveq 2,r4
- bound.d 2,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- moveq 2,r3
- bound.d 0xffffffff,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- move.d 0xffff,r3
- bound.d 0xffff,r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- moveq -1,r3
- bound.d 0xffffffff,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0x78134452,r3
- bound.d 0x5432f789,r3
- test_move_cc 0 0 0 0
- checkr3 5432f789
-
- moveq -1,r3
- bound.w 2,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- moveq -1,r3
- bound.w 0xffff,r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- moveq 2,r3
- bound.w 0xffff,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- move.d 0xffff,r3
- bound.w 0xffff,r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- move.d 0xfedaffff,r3
- bound.w 0xffff,r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- move.d 0x78134452,r3
- bound.w 0xf789,r3
- test_move_cc 0 0 0 0
- checkr3 f789
-
- moveq -1,r3
- bound.b 2,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- moveq 2,r3
- bound.b 0xff,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- moveq -1,r3
- bound.b 0xff,r3
- test_move_cc 0 0 0 0
- checkr3 ff
-
- move.d 0xff,r3
- bound.b 0xff,r3
- test_move_cc 0 0 0 0
- checkr3 ff
-
- move.d 0xfeda49ff,r3
- bound.b 0xff,r3
- test_move_cc 0 0 0 0
- checkr3 ff
-
- move.d 0x78134452,r3
- bound.b 0x89,r3
- test_move_cc 0 0 0 0
- checkr3 89
-
- bound.w 0,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- move.d 0xffff,r3
- bound.b -1,r3
- test_move_cc 0 0 0 0
- checkr3 ff
-
- quit
diff --git a/tests/tcg/cris/bare/check_boundr.s b/tests/tcg/cris/bare/check_boundr.s
deleted file mode 100644
index 5c50cc5..0000000
--- a/tests/tcg/cris/bare/check_boundr.s
+++ /dev/null
@@ -1,125 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 2\n2\nffff\nffffffff\n5432f789\n2\n2\nffff\nffff\nffff\nf789\n2\n2\nff\nff\n89\nfeda4953\nfeda4962\n0\n0\n
-
- .include "testutils.inc"
- start
- moveq -1,r3
- moveq 2,r4
- bound.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- moveq 2,r3
- moveq -1,r4
- bound.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- move.d 0xffff,r4
- move.d r4,r3
- bound.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- moveq -1,r4
- move.d r4,r3
- bound.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- bound.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 5432f789
-
- moveq -1,r3
- moveq 2,r4
- bound.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- moveq 2,r3
- moveq -1,r4
- bound.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- moveq -1,r3
- bound.w r3,r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- move.d 0xffff,r4
- move.d r4,r3
- bound.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- move.d 0xfedaffff,r4
- move.d r4,r3
- bound.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- bound.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 f789
-
- moveq -1,r3
- moveq 2,r4
- bound.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- moveq 2,r3
- moveq -1,r4
- bound.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 2
-
- move.d 0xff,r4
- move.d r4,r3
- bound.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 ff
-
- move.d 0xfeda49ff,r4
- move.d r4,r3
- bound.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 ff
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- bound.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 89
-
- move.d 0xfeda4956,r3
- move.d 0xfeda4953,r4
- bound.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 feda4953
-
- move.d 0xfeda4962,r3
- move.d 0xfeda4963,r4
- bound.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 feda4962
-
- move.d 0xfeda4956,r3
- move.d 0,r4
- bound.d r4,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- move.d 0xfeda4956,r4
- move.d 0,r3
- bound.d r4,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_btst.s b/tests/tcg/cris/bare/check_btst.s
deleted file mode 100644
index 485deb2..0000000
--- a/tests/tcg/cris/bare/check_btst.s
+++ /dev/null
@@ -1,96 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 1111\n
-
- .include "testutils.inc"
- start
- clearf nzvc
- moveq -1,r3
- .if 1 ;..asm.arch.cris.v32
- .else
- setf vc
- .endif
- btstq 0,r3
- test_cc 1 0 0 0
-
- moveq 2,r3
- btstq 1,r3
- test_cc 1 0 0 0
-
- moveq 4,r3
- btstq 1,r3
- test_cc 0 1 0 0
-
- moveq -1,r3
- btstq 31,r3
- test_cc 1 0 0 0
-
- move.d 0x5a67f19f,r3
- btstq 12,r3
- test_cc 1 0 0 0
-
- move.d 0xda67f19f,r3
- move.d 29,r4
- btst r4,r3
- test_cc 0 0 0 0
-
- move.d 0xda67f19f,r3
- move.d 32,r4
- btst r4,r3
- test_cc 1 0 0 0
-
- move.d 0xda67f191,r3
- move.d 33,r4
- btst r4,r3
- test_cc 0 0 0 0
-
- moveq -1,r3
- moveq 0,r4
- btst r4,r3
- test_cc 1 0 0 0
-
- moveq 2,r3
- moveq 1,r4
- btst r4,r3
- test_cc 1 0 0 0
-
- moveq -1,r3
- moveq 31,r4
- btst r4,r3
- test_cc 1 0 0 0
-
- moveq 4,r3
- btstq 1,r3
- test_cc 0 1 0 0
-
- moveq -1,r3
- moveq 15,r4
- btst r4,r3
- test_cc 1 0 0 0
-
- move.d 0x5a67f19f,r3
- moveq 12,r4
- btst r4,r3
- test_cc 1 0 0 0
-
- move.d 0x5a678000,r3
- moveq 11,r4
- btst r4,r3
- test_cc 0 1 0 0
-
- move.d 0x5a67f19f,r3
- btst r3,r3
- test_cc 0 0 0 0
-
- move.d 0x1111,r3
- checkr3 1111
-
- ; check that X gets cleared and that only the NZ flags are touched.
- ;; move.d 0xff, $r0
- ;; move $r0, $ccs
- ;; btst r3,r3
- ;; move $ccs, $r0
- ;; and.d 0xff, $r0
- ;; cmp.d 0xe3, $r0
- ;; test_cc 0 1 0 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_clearfv32.s b/tests/tcg/cris/bare/check_clearfv32.s
deleted file mode 100644
index 4e91360..0000000
--- a/tests/tcg/cris/bare/check_clearfv32.s
+++ /dev/null
@@ -1,19 +0,0 @@
-# mach: crisv32
-# output: ef\nef\n
-
-; Check that "clearf x" doesn't trivially fail.
-
- .include "testutils.inc"
- start
- setf puixnzvc
- clearf x ; Actually, x would be cleared by almost-all other insns.
- move ccs,r3
- and.d 0xff, $r3
- checkr3 ef
-
- setf puixnzvc
- moveq 0, $r3 ; moveq should only clear the xflag.
- move ccs,r3
- and.d 0xff, $r3
- checkr3 ef
- quit
diff --git a/tests/tcg/cris/bare/check_clrjmp1.s b/tests/tcg/cris/bare/check_clrjmp1.s
deleted file mode 100644
index 45a7005..0000000
--- a/tests/tcg/cris/bare/check_clrjmp1.s
+++ /dev/null
@@ -1,36 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: ffffff00\n
-
-; A bug resulting in a non-effectual clear.b discovered running the GCC
-; testsuite; jump actually wrote to p0.
-
- .include "testutils.inc"
-
- start
- jump 1f
- nop
- .p2align 8
-1:
- move.d y,r4
-
- .if 0 ;0 == ..asm.arch.cris.v32
-; There was a bug causing this insn to set special register p0
-; (byte-clear) to 8 (low 8 bits of location after insn).
- jump [r4+]
- .endif
-
-1:
- move.d 0f,r4
-
-; The corresponding bug would cause this insn too, to set p0.
- jump r4
- nop
- quit
-0:
- moveq -1,r3
- clear.b r3
- checkr3 ffffff00
- quit
-
-y:
- .dword 1b
diff --git a/tests/tcg/cris/bare/check_cmp-2.s b/tests/tcg/cris/bare/check_cmp-2.s
deleted file mode 100644
index 414d370..0000000
--- a/tests/tcg/cris/bare/check_cmp-2.s
+++ /dev/null
@@ -1,15 +0,0 @@
-
-
-.include "testutils.inc"
-
- start
-
- move.d 4294967283, $r0
- move.d $r0, $r10
- cmp.d $r0, $r10
- beq 1f
- move.d $r10, $r3
- fail
-1:
- pass
- quit
diff --git a/tests/tcg/cris/bare/check_cmpc.s b/tests/tcg/cris/bare/check_cmpc.s
deleted file mode 100644
index 267c9ba..0000000
--- a/tests/tcg/cris/bare/check_cmpc.s
+++ /dev/null
@@ -1,86 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: ffffffff\n2\nffff\nffffffff\n78134452\nffffffff\n2\nffff\nfedaffff\n78134452\nffffffff\n2\nff\nfeda49ff\n78134452\n85649282\n
-
- .include "testutils.inc"
- start
- moveq -1,r3
- cmp.d -2,r3
- test_cc 0 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- cmp.d 1,r3
- test_cc 0 0 0 0
- checkr3 2
-
- move.d 0xffff,r3
- cmp.d -0xffff,r3
- test_cc 0 0 0 1
- checkr3 ffff
-
- moveq -1,r3
- cmp.d 1,r3
- test_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0x78134452,r3
- cmp.d -0x5432f789,r3
- test_cc 1 0 1 1
- checkr3 78134452
-
- moveq -1,r3
- cmp.w -2,r3
- test_cc 0 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- cmp.w 1,r3
- test_cc 0 0 0 0
- checkr3 2
-
- move.d 0xffff,r3
- cmp.w 1,r3
- test_cc 1 0 0 0
- checkr3 ffff
-
- move.d 0xfedaffff,r3
- cmp.w 1,r3
- test_cc 1 0 0 0
- checkr3 fedaffff
-
- move.d 0x78134452,r3
- cmp.w 0x877,r3
- test_cc 0 0 0 0
- checkr3 78134452
-
- moveq -1,r3
- cmp.b -2,r3
- test_cc 0 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- cmp.b 1,r3
- test_cc 0 0 0 0
- checkr3 2
-
- move.d 0xff,r3
- cmp.b 1,r3
- test_cc 1 0 0 0
- checkr3 ff
-
- move.d 0xfeda49ff,r3
- cmp.b 1,r3
- test_cc 1 0 0 0
- checkr3 feda49ff
-
- move.d 0x78134452,r3
- cmp.b 0x77,r3
- test_cc 1 0 0 1
- checkr3 78134452
-
- move.d 0x85649282,r3
- cmp.b 0x82,r3
- test_cc 0 1 0 0
- checkr3 85649282
-
- quit
diff --git a/tests/tcg/cris/bare/check_cmpm.s b/tests/tcg/cris/bare/check_cmpm.s
deleted file mode 100644
index e4dde15..0000000
--- a/tests/tcg/cris/bare/check_cmpm.s
+++ /dev/null
@@ -1,96 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: ffffffff\n2\nffff\nffffffff\n78134452\nffffffff\n2\nffff\nfedaffff\n78134452\nffffffff\n2\nff\nfeda49ff\n78134452\n85649222\n
-
- .include "testutils.inc"
- .data
-x:
- .dword -2,1,-0xffff,1,-0x5432f789
- .word -2,1,1,0x877
- .byte -2,1,0x77
- .byte 0x22
-
- start
- moveq -1,r3
- move.d x,r5
- cmp.d [r5+],r3
- test_cc 0 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- cmp.d [r5],r3
- test_cc 0 0 0 0
- addq 4,r5
- checkr3 2
-
- move.d 0xffff,r3
- cmp.d [r5+],r3
- test_cc 0 0 0 1
- checkr3 ffff
-
- moveq -1,r3
- cmp.d [r5+],r3
- test_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0x78134452,r3
- cmp.d [r5+],r3
- test_cc 1 0 1 1
- checkr3 78134452
-
- moveq -1,r3
- cmp.w [r5+],r3
- test_cc 0 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- cmp.w [r5+],r3
- test_cc 0 0 0 0
- checkr3 2
-
- move.d 0xffff,r3
- cmp.w [r5],r3
- test_cc 1 0 0 0
- checkr3 ffff
-
- move.d 0xfedaffff,r3
- cmp.w [r5+],r3
- test_cc 1 0 0 0
- checkr3 fedaffff
-
- move.d 0x78134452,r3
- cmp.w [r5+],r3
- test_cc 0 0 0 0
- checkr3 78134452
-
- moveq -1,r3
- cmp.b [r5],r3
- test_cc 0 0 0 0
- addq 1,r5
- checkr3 ffffffff
-
- moveq 2,r3
- cmp.b [r5],r3
- test_cc 0 0 0 0
- checkr3 2
-
- move.d 0xff,r3
- cmp.b [r5],r3
- test_cc 1 0 0 0
- checkr3 ff
-
- move.d 0xfeda49ff,r3
- cmp.b [r5+],r3
- test_cc 1 0 0 0
- checkr3 feda49ff
-
- move.d 0x78134452,r3
- cmp.b [r5+],r3
- test_cc 1 0 0 1
- checkr3 78134452
-
- move.d 0x85649222,r3
- cmp.b [r5],r3
- test_cc 0 1 0 0
- checkr3 85649222
-
- quit
diff --git a/tests/tcg/cris/bare/check_cmpq.s b/tests/tcg/cris/bare/check_cmpq.s
deleted file mode 100644
index 5469141..0000000
--- a/tests/tcg/cris/bare/check_cmpq.s
+++ /dev/null
@@ -1,75 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: 1\n1\n1\n1f\n1f\nffffffe1\nffffffe1\nffffffe0\n0\n0\nffffffff\nffffffff\n10000\n100\n5678900\n
-
- .include "testutils.inc"
- start
- moveq 1,r3
- cmpq 1,r3
- test_cc 0 1 0 0
- checkr3 1
-
- cmpq -1,r3
- test_cc 0 0 0 1
- checkr3 1
-
- cmpq 31,r3
- test_cc 1 0 0 1
- checkr3 1
-
- moveq 31,r3
- cmpq 31,r3
- test_cc 0 1 0 0
- checkr3 1f
-
- cmpq -31,r3
- test_cc 0 0 0 1
- checkr3 1f
-
- movs.b -31,r3
- cmpq -31,r3
- test_cc 0 1 0 0
- checkr3 ffffffe1
-
- cmpq -32,r3
- test_cc 0 0 0 0
- checkr3 ffffffe1
-
- movs.b -32,r3
- cmpq -32,r3
- test_cc 0 1 0 0
- checkr3 ffffffe0
-
- moveq 0,r3
- cmpq 1,r3
- test_cc 1 0 0 1
- checkr3 0
-
- cmpq -32,r3
- test_cc 0 0 0 1
- checkr3 0
-
- moveq -1,r3
- cmpq 1,r3
- test_cc 1 0 0 0
- checkr3 ffffffff
-
- cmpq -1,r3
- test_cc 0 1 0 0
- checkr3 ffffffff
-
- move.d 0x10000,r3
- cmpq 1,r3
- test_cc 0 0 0 0
- checkr3 10000
-
- move.d 0x100,r3
- cmpq 1,r3
- test_cc 0 0 0 0
- checkr3 100
-
- move.d 0x5678900,r3
- cmpq 7,r3
- test_cc 0 0 0 0
- checkr3 5678900
-
- quit
diff --git a/tests/tcg/cris/bare/check_cmpr.s b/tests/tcg/cris/bare/check_cmpr.s
deleted file mode 100644
index b30af7a..0000000
--- a/tests/tcg/cris/bare/check_cmpr.s
+++ /dev/null
@@ -1,102 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: ffffffff\n2\nffff\nffffffff\n78134452\nffffffff\n2\nffff\nfedaffff\n78134452\nffffffff\n2\nff\nfeda49ff\n78134452\n85649222\n
-
- .include "testutils.inc"
- start
- moveq -1,r3
- moveq -2,r4
- cmp.d r4,r3
- test_cc 0 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- moveq 1,r4
- cmp.d r4,r3
- test_cc 0 0 0 0
- checkr3 2
-
- move.d 0xffff,r3
- move.d -0xffff,r4
- cmp.d r4,r3
- test_cc 0 0 0 1
- checkr3 ffff
-
- moveq 1,r4
- moveq -1,r3
- cmp.d r4,r3
- test_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d -0x5432f789,r4
- move.d 0x78134452,r3
- cmp.d r4,r3
- test_cc 1 0 1 1
- checkr3 78134452
-
- moveq -1,r3
- moveq -2,r4
- cmp.w r4,r3
- test_cc 0 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- moveq 1,r4
- cmp.w r4,r3
- test_cc 0 0 0 0
- checkr3 2
-
- move.d 0xffff,r3
- move.d -0xffff,r4
- cmp.w r4,r3
- test_cc 1 0 0 0
- checkr3 ffff
-
- move.d 0xfedaffff,r3
- move.d -0xfedaffff,r4
- cmp.w r4,r3
- test_cc 1 0 0 0
- checkr3 fedaffff
-
- move.d -0x5432f789,r4
- move.d 0x78134452,r3
- cmp.w r4,r3
- test_cc 0 0 0 0
- checkr3 78134452
-
- moveq -1,r3
- moveq -2,r4
- cmp.b r4,r3
- test_cc 0 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- moveq 1,r4
- cmp.b r4,r3
- test_cc 0 0 0 0
- checkr3 2
-
- move.d -0xff,r4
- move.d 0xff,r3
- cmp.b r4,r3
- test_cc 1 0 0 0
- checkr3 ff
-
- move.d -0xfeda49ff,r4
- move.d 0xfeda49ff,r3
- cmp.b r4,r3
- test_cc 1 0 0 0
- checkr3 feda49ff
-
- move.d -0x5432f789,r4
- move.d 0x78134452,r3
- cmp.b r4,r3
- test_cc 1 0 0 1
- checkr3 78134452
-
- move.d 0x85649222,r3
- move.d 0x77445622,r4
- cmp.b r4,r3
- test_cc 0 1 0 0
- checkr3 85649222
-
- quit
diff --git a/tests/tcg/cris/bare/check_cmpxc.s b/tests/tcg/cris/bare/check_cmpxc.s
deleted file mode 100644
index b237a93..0000000
--- a/tests/tcg/cris/bare/check_cmpxc.s
+++ /dev/null
@@ -1,92 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 2\n2\n2\n2\nffff\nffff\nffff\nffff\nffffffff\nffffffff\nffffffff\n78134452\n78134452\n78134452\n78134452\n4452\n80000032\n
-
- .include "testutils.inc"
- start
- moveq 2,r3
- cmps.b 0xff,r3
- test_cc 0 0 0 1
- checkr3 2
-
- moveq 2,r3
- cmps.w 0xffff,r3
- test_cc 0 0 0 1
- checkr3 2
-
- moveq 2,r3
- cmpu.b 0xff,r3
- test_cc 1 0 0 1
- checkr3 2
-
- moveq 2,r3
- move.d 0xffffffff,r4
- cmpu.w -1,r3
- test_cc 1 0 0 1
- checkr3 2
-
- move.d 0xffff,r3
- cmpu.b -1,r3
- test_cc 0 0 0 0
- checkr3 ffff
-
- move.d 0xffff,r3
- cmpu.w -1,r3
- test_cc 0 1 0 0
- checkr3 ffff
-
- move.d 0xffff,r3
- cmps.b 0xff,r3
- test_cc 0 0 0 1
- checkr3 ffff
-
- move.d 0xffff,r3
- cmps.w 0xffff,r3
- test_cc 0 0 0 1
- checkr3 ffff
-
- moveq -1,r3
- cmps.b 0xff,r3
- test_cc 0 1 0 0
- checkr3 ffffffff
-
- moveq -1,r3
- cmps.w 0xff,r3
- test_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq -1,r3
- cmps.w 0xffff,r3
- test_cc 0 1 0 0
- checkr3 ffffffff
-
- move.d 0x78134452,r3
- cmpu.b 0x89,r3
- test_cc 0 0 0 0
- checkr3 78134452
-
- move.d 0x78134452,r3
- cmps.b 0x89,r3
- test_cc 0 0 0 1
- checkr3 78134452
-
- move.d 0x78134452,r3
- cmpu.w 0xf789,r3
- test_cc 0 0 0 0
- checkr3 78134452
-
- move.d 0x78134452,r3
- cmps.w 0xf789,r3
- test_cc 0 0 0 1
- checkr3 78134452
-
- move.d 0x4452,r3
- cmps.w 0x8002,r3
- test_cc 0 0 0 1
- checkr3 4452
-
- move.d 0x80000032,r3
- cmpu.w 0x764,r3
- test_cc 0 0 1 0
- checkr3 80000032
-
- quit
diff --git a/tests/tcg/cris/bare/check_cmpxm.s b/tests/tcg/cris/bare/check_cmpxm.s
deleted file mode 100644
index 87ea5bf..0000000
--- a/tests/tcg/cris/bare/check_cmpxm.s
+++ /dev/null
@@ -1,106 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 2\n2\n2\n2\nffff\nffff\nffff\nffff\nffffffff\nffffffff\nffffffff\n78134452\n78134452\n78134452\n78134452\n4452\n80000032\n
-
- .include "testutils.inc"
- .data
-x:
- .byte 0xff
- .word 0xffff
- .word 0xff
- .word 0xffff
- .byte 0x89
- .word 0xf789
- .word 0x8002
- .word 0x764
-
- start
- moveq 2,r3
- move.d x,r5
- cmps.b [r5+],r3
- test_cc 0 0 0 1
- checkr3 2
-
- moveq 2,r3
- cmps.w [r5+],r3
- test_cc 0 0 0 1
- checkr3 2
-
- moveq 2,r3
- subq 3,r5
- cmpu.b [r5+],r3
- test_cc 1 0 0 1
- checkr3 2
-
- moveq 2,r3
- cmpu.w [r5+],r3
- test_cc 1 0 0 1
- subq 3,r5
- checkr3 2
-
- move.d 0xffff,r3
- cmpu.b [r5],r3
- test_cc 0 0 0 0
- checkr3 ffff
-
- move.d 0xffff,r3
- cmpu.w [r5],r3
- test_cc 0 1 0 0
- checkr3 ffff
-
- move.d 0xffff,r3
- cmps.b [r5],r3
- test_cc 0 0 0 1
- checkr3 ffff
-
- move.d 0xffff,r3
- cmps.w [r5],r3
- test_cc 0 0 0 1
- checkr3 ffff
-
- moveq -1,r3
- cmps.b [r5],r3
- test_cc 0 1 0 0
- addq 3,r5
- checkr3 ffffffff
-
- moveq -1,r3
- cmps.w [r5+],r3
- test_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq -1,r3
- cmps.w [r5+],r3
- test_cc 0 1 0 0
- checkr3 ffffffff
-
- move.d 0x78134452,r3
- cmpu.b [r5],r3
- test_cc 0 0 0 0
- checkr3 78134452
-
- move.d 0x78134452,r3
- cmps.b [r5+],r3
- test_cc 0 0 0 1
- checkr3 78134452
-
- move.d 0x78134452,r3
- cmpu.w [r5],r3
- test_cc 0 0 0 0
- checkr3 78134452
-
- move.d 0x78134452,r3
- cmps.w [r5+],r3
- test_cc 0 0 0 1
- checkr3 78134452
-
- move.d 0x4452,r3
- cmps.w [r5+],r3
- test_cc 0 0 0 1
- checkr3 4452
-
- move.d 0x80000032,r3
- cmpu.w [r5+],r3
- test_cc 0 0 1 0
- checkr3 80000032
-
- quit
diff --git a/tests/tcg/cris/bare/check_dstep.s b/tests/tcg/cris/bare/check_dstep.s
deleted file mode 100644
index bd43b83..0000000
--- a/tests/tcg/cris/bare/check_dstep.s
+++ /dev/null
@@ -1,42 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: fffffffc\n4\nffff\nfffffffe\n9bf3911b\n0\n
-
- .include "testutils.inc"
- start
- moveq -1,r3
- moveq 2,r4
- dstep r4,r3
- test_move_cc 1 0 0 0
- checkr3 fffffffc
-
- moveq 2,r3
- moveq -1,r4
- dstep r4,r3
- test_move_cc 0 0 0 0
- checkr3 4
-
- move.d 0xffff,r4
- move.d r4,r3
- dstep r4,r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- moveq -1,r4
- move.d r4,r3
- dstep r4,r3
- test_move_cc 1 0 0 0
- checkr3 fffffffe
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- dstep r4,r3
- test_move_cc 1 0 0 0
- checkr3 9bf3911b
-
- move.d 0xffff,r3
- move.d 0x1fffe,r4
- dstep r4,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_jsr.s b/tests/tcg/cris/bare/check_jsr.s
deleted file mode 100644
index 1060237..0000000
--- a/tests/tcg/cris/bare/check_jsr.s
+++ /dev/null
@@ -1,85 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: 0\n0\n0\n0\n0\n0\n
-
-# Test that jsr Rn and jsr [PC+] work.
-
- .include "testutils.inc"
- start
-x:
- move.d 0f,r6
- setf nzvc
- jsr r6
- .if 1; ..asm.arch.cris.v32
- nop
- .endif
-0:
- test_move_cc 1 1 1 1
- move srp,r3
- sub.d 0b,r3
- checkr3 0
-
- move.d 1f,r0
- setf nzvc
- jsr r0
- .if 1 ; ..asm.arch.cris.v32
- moveq 0,r0
- .endif
-6:
- nop
- quit
-
-2:
- test_move_cc 0 0 0 0
- move srp,r3
- sub.d 3f,r3
- checkr3 0
- jsr 4f
- .if 1 ; ..asm.arch.cris.v32
- nop
- .endif
-7:
- nop
- quit
-
-8:
- move srp,r3
- sub.d 7b,r3
- checkr3 0
- quit
-
-4:
- move srp,r3
- sub.d 7b,r3
- checkr3 0
- move.d 5f,r3
- jump r3
- .if 1; ..asm.arch.cris.v32
- moveq 0,r3
- .endif
- quit
-
- .space 32770,0
-1:
- test_move_cc 1 1 1 1
- move srp,r3
- sub.d 6b,r3
- checkr3 0
-
- clearf cznv
- jsr 2b
- .if 1; ..asm.arch.cris.v32
- nop
- .endif
-3:
-
- quit
-
-5:
- move srp,r3
- sub.d 7b,r3
- checkr3 0
- jump 8b
- .if 1 ; ..asm.arch.cris.v32
- nop
- .endif
- quit
diff --git a/tests/tcg/cris/bare/check_lapc.s b/tests/tcg/cris/bare/check_lapc.s
deleted file mode 100644
index 9a6150b..0000000
--- a/tests/tcg/cris/bare/check_lapc.s
+++ /dev/null
@@ -1,78 +0,0 @@
-# mach: crisv32
-# output: 0\n0\nfffffffa\nfffffffe\nffffffda\n1e\n1e\n0\n
-
-.include "testutils.inc"
-
-; To accommodate dumpr3 with more than one instruction, keep it
-; out of lapc operand ranges and difference calculations.
-
- start
- lapc.d 0f,r3
-0:
- sub.d .,r3
- checkr3 0
-
- lapcq 0f,r3
-0:
- sub.d .,r3
- checkr3 0
-
- lapc.d .,r3
- sub.d .,r3
- checkr3 fffffffa
-
- lapcq .,r3
- sub.d .,r3
- checkr3 fffffffe
-
-0:
- .rept 16
- nop
- .endr
- lapc.d 0b,r3
- sub.d .,r3
- checkr3 ffffffda
-
- setf zcvn
- lapc.d 0f,r3
- test_cc 1 1 1 1
- sub.d .,r3
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-0:
- checkr3 1e
-0:
- lapcq 0f,r3
- sub.d 0b,r3
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-0:
- checkr3 1e
- clearf cn
- setf zv
-1:
- lapcq .,r3
- test_cc 0 1 1 0
- sub.d 1b,r3
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_lsl.s b/tests/tcg/cris/bare/check_lsl.s
deleted file mode 100644
index 9e2ddd7..0000000
--- a/tests/tcg/cris/bare/check_lsl.s
+++ /dev/null
@@ -1,217 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: ffffffff\n4\n80000000\nffff8000\n7f19f000\n80000000\n0\n0\n699fc67c\nffffffff\n4\n80000000\nffff8000\n7f19f000\nda670000\nda670000\nda670000\nda67c67c\nffffffff\nfffafffe\n4\nffff0000\nffff8000\n5a67f000\nda67f100\nda67f100\nda67f100\nda67f17c\nfff3faff\nfff3fafe\n4\nffffff00\nffffff00\nffffff80\n5a67f100\n5a67f1f0\n
-
- .include "testutils.inc"
- start
- moveq -1,r3
- lslq 0,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- lslq 1,r3
- test_move_cc 0 0 0 0
- checkr3 4
-
- moveq -1,r3
- lslq 31,r3
- test_move_cc 1 0 0 0
- checkr3 80000000
-
- moveq -1,r3
- lslq 15,r3
- test_move_cc 1 0 0 0
- checkr3 ffff8000
-
- move.d 0x5a67f19f,r3
- lslq 12,r3
- test_move_cc 0 0 0 0
- checkr3 7f19f000
-
- move.d 0xda67f19f,r3
- move.d 31,r4
- lsl.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 80000000
-
- move.d 0xda67f19f,r3
- move.d 32,r4
- lsl.d r4,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- move.d 0xda67f19f,r3
- move.d 33,r4
- lsl.d r4,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- move.d 0xda67f19f,r3
- move.d 66,r4
- lsl.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 699fc67c
-
- moveq -1,r3
- moveq 0,r4
- lsl.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- moveq 1,r4
- lsl.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 4
-
- moveq -1,r3
- moveq 31,r4
- lsl.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 80000000
-
- moveq -1,r3
- moveq 15,r4
- lsl.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffff8000
-
- move.d 0x5a67f19f,r3
- moveq 12,r4
- lsl.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 7f19f000
-
- move.d 0xda67f19f,r3
- move.d 31,r4
- lsl.w r4,r3
- test_move_cc 0 1 0 0
- checkr3 da670000
-
- move.d 0xda67f19f,r3
- move.d 32,r4
- lsl.w r4,r3
- test_move_cc 0 1 0 0
- checkr3 da670000
-
- move.d 0xda67f19f,r3
- move.d 33,r4
- lsl.w r4,r3
- test_move_cc 0 1 0 0
- checkr3 da670000
-
- move.d 0xda67f19f,r3
- move.d 66,r4
- lsl.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 da67c67c
-
- moveq -1,r3
- moveq 0,r4
- lsl.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0xfffaffff,r3
- moveq 1,r4
- lsl.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 fffafffe
-
- moveq 2,r3
- moveq 1,r4
- lsl.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 4
-
- moveq -1,r3
- moveq 31,r4
- lsl.w r4,r3
- test_move_cc 0 1 0 0
- checkr3 ffff0000
-
- moveq -1,r3
- moveq 15,r4
- lsl.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffff8000
-
- move.d 0x5a67f19f,r3
- moveq 12,r4
- lsl.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 5a67f000
-
- move.d 0xda67f19f,r3
- move.d 31,r4
- lsl.b r4,r3
- test_move_cc 0 1 0 0
- checkr3 da67f100
-
- move.d 0xda67f19f,r3
- move.d 32,r4
- lsl.b r4,r3
- test_move_cc 0 1 0 0
- checkr3 da67f100
-
- move.d 0xda67f19f,r3
- move.d 33,r4
- lsl.b r4,r3
- test_move_cc 0 1 0 0
- checkr3 da67f100
-
- move.d 0xda67f19f,r3
- move.d 66,r4
- lsl.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 da67f17c
-
- move.d 0xfff3faff,r3
- moveq 0,r4
- lsl.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 fff3faff
-
- move.d 0xfff3faff,r3
- moveq 1,r4
- lsl.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 fff3fafe
-
- moveq 2,r3
- moveq 1,r4
- lsl.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 4
-
- moveq -1,r3
- moveq 31,r4
- lsl.b r4,r3
- test_move_cc 0 1 0 0
- checkr3 ffffff00
-
- moveq -1,r3
- moveq 15,r4
- lsl.b r4,r3
- test_move_cc 0 1 0 0
- checkr3 ffffff00
-
- moveq -1,r3
- moveq 7,r4
- lsl.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffff80
-
- move.d 0x5a67f19f,r3
- moveq 12,r4
- lsl.b r4,r3
- test_move_cc 0 1 0 0
- checkr3 5a67f100
-
- move.d 0x5a67f19f,r3
- moveq 4,r4
- lsl.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 5a67f1f0
-
- quit
diff --git a/tests/tcg/cris/bare/check_lsr.s b/tests/tcg/cris/bare/check_lsr.s
deleted file mode 100644
index 18fdbef..0000000
--- a/tests/tcg/cris/bare/check_lsr.s
+++ /dev/null
@@ -1,218 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: ffffffff\n1\n1\n1ffff\n5a67f\n1\n0\n0\n3699fc67\nffffffff\n1\n1\n1ffff\n5a67f\nda670000\nda670000\nda670000\nda673c67\nffffffff\nffff7fff\n1\nffff0000\nffff0001\n5a67000f\nda67f100\nda67f100\nda67f100\nda67f127\nffffffff\nffffff7f\n1\nffffff00\nffffff00\nffffff01\n5a67f100\n5a67f109\n
-
- .include "testutils.inc"
- start
- moveq -1,r3
- lsrq 0,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- lsrq 1,r3
- test_move_cc 0 0 0 0
- checkr3 1
-
- moveq -1,r3
- lsrq 31,r3
- test_move_cc 0 0 0 0
- checkr3 1
-
- moveq -1,r3
- lsrq 15,r3
- test_move_cc 0 0 0 0
- checkr3 1ffff
-
- move.d 0x5a67f19f,r3
- lsrq 12,r3
- test_move_cc 0 0 0 0
- checkr3 5a67f
-
- move.d 0xda67f19f,r3
- move.d 31,r4
- lsr.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 1
-
- move.d 0xda67f19f,r3
- move.d 32,r4
- lsr.d r4,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- move.d 0xda67f19f,r3
- move.d 33,r4
- lsr.d r4,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- move.d 0xda67f19f,r3
- move.d 66,r4
- lsr.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 3699fc67
-
- moveq -1,r3
- moveq 0,r4
- lsr.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq 2,r3
- moveq 1,r4
- lsr.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 1
-
- moveq -1,r3
- moveq 31,r4
- lsr.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 1
-
- moveq -1,r3
- moveq 15,r4
- lsr.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 1ffff
-
- move.d 0x5a67f19f,r3
- moveq 12,r4
- lsr.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 5a67f
-
- move.d 0xda67f19f,r3
- move.d 31,r4
- lsr.w r4,r3
- test_move_cc 0 1 0 0
- checkr3 da670000
-
- move.d 0xda67f19f,r3
- move.d 32,r4
- lsr.w r4,r3
- test_move_cc 0 1 0 0
- checkr3 da670000
-
- move.d 0xda67f19f,r3
- move.d 33,r4
- lsr.w r4,r3
- test_move_cc 0 1 0 0
- checkr3 da670000
-
- move.d 0xda67f19f,r3
- move.d 66,r4
- lsr.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 da673c67
-
- moveq -1,r3
- moveq 0,r4
- lsr.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq -1,r3
- moveq 1,r4
- lsr.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 ffff7fff
-
- moveq 2,r3
- moveq 1,r4
- lsr.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 1
-
-;; FIXME: this was wrong. Z should be set.
- moveq -1,r3
- moveq 31,r4
- lsr.w r4,r3
- test_move_cc 0 1 0 0
- checkr3 ffff0000
-
- moveq -1,r3
- moveq 15,r4
- lsr.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 ffff0001
-
- move.d 0x5a67f19f,r3
- moveq 12,r4
- lsr.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 5a67000f
-
- move.d 0xda67f19f,r3
- move.d 31,r4
- lsr.b r4,r3
- test_move_cc 0 1 0 0
- checkr3 da67f100
-
- move.d 0xda67f19f,r3
- move.d 32,r4
- lsr.b r4,r3
- test_move_cc 0 1 0 0
- checkr3 da67f100
-
- move.d 0xda67f19f,r3
- move.d 33,r4
- lsr.b r4,r3
- test_move_cc 0 1 0 0
- checkr3 da67f100
-
- move.d 0xda67f19f,r3
- move.d 66,r4
- lsr.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 da67f127
-
- moveq -1,r3
- moveq 0,r4
- lsr.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq -1,r3
- moveq 1,r4
- lsr.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 ffffff7f
-
- moveq 2,r3
- moveq 1,r4
- lsr.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 1
-
- moveq -1,r3
- moveq 31,r4
- lsr.b r4,r3
- test_move_cc 0 1 0 0
- checkr3 ffffff00
-
- moveq -1,r3
- moveq 15,r4
- lsr.b r4,r3
- test_move_cc 0 1 0 0
- checkr3 ffffff00
-
- moveq -1,r3
- moveq 7,r4
- lsr.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 ffffff01
-
- move.d 0x5a67f19f,r3
- moveq 12,r4
- lsr.b r4,r3
- test_move_cc 0 1 0 0
- checkr3 5a67f100
-
- move.d 0x5a67f19f,r3
- moveq 4,r4
- lsr.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 5a67f109
-
- quit
diff --git a/tests/tcg/cris/bare/check_mcp.s b/tests/tcg/cris/bare/check_mcp.s
deleted file mode 100644
index e65ccdd..0000000
--- a/tests/tcg/cris/bare/check_mcp.s
+++ /dev/null
@@ -1,49 +0,0 @@
-# mach: crisv32
-# output: fffffffe\n1\n1ffff\nfffffffe\ncc463bdc\n4c463bdc\n0\n
-
- .include "testutils.inc"
- start
-
-; Set R, clear C.
- move 0x100,ccs
- moveq -5,r3
- move 2,mof
- mcp mof,r3
- test_cc 1 0 0 0
- checkr3 fffffffe
-
- moveq 2,r3
- move -1,srp
- mcp srp,r3
- test_cc 0 0 0 0
- checkr3 1
-
- move 0xffff,srp
- move srp,r3
- mcp srp,r3
- test_cc 0 0 0 0
- checkr3 1ffff
-
- move -1,mof
- move mof,r3
- mcp mof,r3
- test_cc 1 0 0 0
- checkr3 fffffffe
-
- move 0x5432f789,mof
- move.d 0x78134452,r3
- mcp mof,r3
- test_cc 1 0 1 0
- checkr3 cc463bdc
-
- move 0x80000000,srp
- mcp srp,r3
- test_cc 0 0 1 0
- checkr3 4c463bdc
-
- move 0xb3b9c423,srp
- mcp srp,r3
- test_cc 0 1 0 0
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_movdelsr1.s b/tests/tcg/cris/bare/check_movdelsr1.s
deleted file mode 100644
index 300cc87..0000000
--- a/tests/tcg/cris/bare/check_movdelsr1.s
+++ /dev/null
@@ -1,33 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: aa117acd\n
-# output: eeaabb42\n
-
-; Bug with move to special register in delay slot, due to
-; special flush-insn-cache simulator use. Ordinary move worked;
-; special register caused branch to fail.
-
- .include "testutils.inc"
- start
- move -1,srp
-
- move.d 0xaa117acd,r1
- moveq 3,r9
- cmpq 1,r9
- bhi 0f
- move.d r1,r3
-
- fail
-0:
- checkr3 aa117acd
-
- move.d 0xeeaabb42,r1
- moveq 3,r9
- cmpq 1,r9
- bhi 0f
- move r1,srp
-
- fail
-0:
- move srp,r3
- checkr3 eeaabb42
- quit
diff --git a/tests/tcg/cris/bare/check_movecr.s b/tests/tcg/cris/bare/check_movecr.s
deleted file mode 100644
index da8ec26..0000000
--- a/tests/tcg/cris/bare/check_movecr.s
+++ /dev/null
@@ -1,37 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: ffffff42\n94\nffff4321\n9234\n76543210\n76540000\n
-
-; Move constant byte, word, dword to register. Check that no extension is
-; performed, that only part of the register is set.
-
- .include "testutils.inc"
- startnostack
- moveq -1,r3
- move.b 0x42,r3
- test_move_cc 0 0 0 0
- checkr3 ffffff42
-
- moveq 0,r3
- move.b 0x94,r3
- test_move_cc 1 0 0 0
- checkr3 94
-
- moveq -1,r3
- move.w 0x4321,r3
- test_move_cc 0 0 0 0
- checkr3 ffff4321
-
- moveq 0,r3
- move.w 0x9234,r3
- test_move_cc 1 0 0 0
- checkr3 9234
-
- move.d 0x76543210,r3
- test_move_cc 0 0 0 0
- checkr3 76543210
-
- move.w 0,r3
- test_move_cc 0 1 0 0
- checkr3 76540000
-
- quit
diff --git a/tests/tcg/cris/bare/check_movei.s b/tests/tcg/cris/bare/check_movei.s
deleted file mode 100644
index bbfa633..0000000
--- a/tests/tcg/cris/bare/check_movei.s
+++ /dev/null
@@ -1,50 +0,0 @@
-# mach: crisv32
-# output: fffffffe\n
-# output: fffffffe\n
-
-; Check basic integral-write semantics regarding flags.
-
- .include "testutils.inc"
- start
-
- move.d 0, $r3
-; A write that works. Check that flags are set correspondingly.
- move.d d,r4
- ;; store to bring it into the tlb with the right prot bits
- move.d r3,[r4]
- moveq -2,r5
- setf c
- clearf p
- move.d [r4],r3
- ax
- move.d r5,[r4]
- move.d [r4],r3
-
- bcc 0f
- nop
- fail
-
-0:
- checkr3 fffffffe
-
-; A write that fails; check flags too.
- move.d d,r4
- moveq 23,r5
- setf p
- clearf c
- move.d [r4],r3
- ax
- move.d r5,[r4]
- move.d [r4],r3
-
- bcs 0f
- nop
- fail
-
-0:
- checkr3 fffffffe
- quit
-
- .data
-d:
- .dword 42424242
diff --git a/tests/tcg/cris/bare/check_movemr.s b/tests/tcg/cris/bare/check_movemr.s
deleted file mode 100644
index 88489de..0000000
--- a/tests/tcg/cris/bare/check_movemr.s
+++ /dev/null
@@ -1,78 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: 12345678\n10234567\n12345678\n12344567\n12344523\n76543210\nffffffaa\naa\n9911\nffff9911\n78\n56\n3456\n6712\n
-
- .include "testutils.inc"
- start
-
- .data
-mem1:
- .dword 0x12345678
-mem2:
- .word 0x4567
-mem3:
- .byte 0x23
- .dword 0x76543210
- .byte 0xaa,0x11,0x99
-
- .text
- move.d mem1,r2
- move.d [r2],r3
- test_move_cc 0 0 0 0
- checkr3 12345678
-
- move.d mem2,r3
- move.d [r3],r3
- test_move_cc 0 0 0 0
- checkr3 10234567
-
- move.d mem1,r2
- move.d [r2+],r3
- test_move_cc 0 0 0 0
- checkr3 12345678
-
- move.w [r2+],r3
- test_move_cc 0 0 0 0
- checkr3 12344567
-
- move.b [r2+],r3
- test_move_cc 0 0 0 0
- checkr3 12344523
-
- move.d [r2+],r3
- test_move_cc 0 0 0 0
- checkr3 76543210
-
- movs.b [r2],r3
- test_move_cc 1 0 0 0
- checkr3 ffffffaa
-
- movu.b [r2+],r3
- test_move_cc 0 0 0 0
- checkr3 aa
-
- movu.w [r2],r3
- test_move_cc 0 0 0 0
- checkr3 9911
-
- movs.w [r2+],r3
- test_move_cc 1 0 0 0
- checkr3 ffff9911
-
- move.d mem1,r13
- movs.b [r13+],r3
- test_move_cc 0 0 0 0
- checkr3 78
-
- movu.b [r13],r3
- test_move_cc 0 0 0 0
- checkr3 56
-
- movs.w [r13+],r3
- test_move_cc 0 0 0 0
- checkr3 3456
-
- movu.w [r13+],r3
- test_move_cc 0 0 0 0
- checkr3 6712
-
- quit
diff --git a/tests/tcg/cris/bare/check_movemrv32.s b/tests/tcg/cris/bare/check_movemrv32.s
deleted file mode 100644
index 53950ab..0000000
--- a/tests/tcg/cris/bare/check_movemrv32.s
+++ /dev/null
@@ -1,96 +0,0 @@
-# mach: crisv32
-# output: 15\n7\n2\nffff1234\nb\n16\nf\n2\nffffffef\nf\nffff1234\nf\nfffffff4\nd\nfffffff2\n10\nfffffff2\nd\n
-
- .include "testutils.inc"
- .data
-x:
- .dword 8,9,10,11
-y:
- .dword -12,13,-14,15,16
-
- start
- moveq 7,r0
- moveq 2,r1
- move.d 0xffff1234,r2
- moveq 21,r3
- move.d x,r4
- setf zcvn
- movem r2,[r4+]
- test_cc 1 1 1 1
- subq 12,r4
-
- checkr3 15
-
- move.d [r4+],r3
- checkr3 7
-
- move.d [r4+],r3
- checkr3 2
-
- move.d [r4+],r3
- checkr3 ffff1234
-
- move.d [r4+],r3
- checkr3 b
-
- subq 16,r4
- moveq 22,r0
- moveq 15,r1
- clearf zcvn
- movem r0,[r4]
- test_cc 0 0 0 0
- move.d [r4+],r3
- checkr3 16
-
- move.d r1,r3
- checkr3 f
-
- move.d [r4+],r3
- checkr3 2
-
- subq 8,r4
- moveq 10,r2
- moveq -17,r0
- clearf zc
- setf vn
- movem r1,[r4]
- test_cc 1 0 1 0
- move.d [r4+],r3
- checkr3 ffffffef
-
- move.d [r4+],r3
- checkr3 f
-
- move.d [r4+],r3
- checkr3 ffff1234
-
- move.d y,r4
- setf zc
- clearf vn
- movem [r4+],r3
- test_cc 0 1 0 1
- checkr3 f
-
- move.d r0,r3
- checkr3 fffffff4
-
- move.d r1,r3
- checkr3 d
-
- move.d r2,r3
- checkr3 fffffff2
-
- move.d [r4],r3
- checkr3 10
-
- subq 8,r4
- setf zcvn
- movem [r4+],r0
- test_cc 1 1 1 1
- move.d r0,r3
- checkr3 fffffff2
-
- move.d r1,r3
- checkr3 d
-
- quit
diff --git a/tests/tcg/cris/bare/check_mover.s b/tests/tcg/cris/bare/check_mover.s
deleted file mode 100644
index b4db595..0000000
--- a/tests/tcg/cris/bare/check_mover.s
+++ /dev/null
@@ -1,28 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: ffffff05\nffff0005\n5\nffffff00\n
-
-; Move between registers. Check that just the subreg is copied.
-
- .include "testutils.inc"
- startnostack
- moveq -30,r3
- moveq 5,r4
- move.b r4,r3
- test_move_cc 0 0 0 0 ; FIXME
- checkr3 ffffff05
-
- move.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 ffff0005
-
- move.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 5
-
- moveq -1,r3
- moveq 0,r4
- move.b r4,r3
- test_move_cc 0 1 0 0
- checkr3 ffffff00
-
- quit
diff --git a/tests/tcg/cris/bare/check_moverm.s b/tests/tcg/cris/bare/check_moverm.s
deleted file mode 100644
index eabc958..0000000
--- a/tests/tcg/cris/bare/check_moverm.s
+++ /dev/null
@@ -1,45 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: 7823fec2\n10231879\n102318fe\n
-
- .include "testutils.inc"
- start
-
- .data
-mem1:
- .dword 0x12345678
-mem2:
- .word 0x4567
-mem3:
- .byte 0x23
- .dword 0x76543210
- .byte 0xaa,0x11,0x99
-
- .text
- move.d mem1,r2
- move.d 0x7823fec2,r4
- setf nzvc
- move.d r4,[r2+]
- test_cc 1 1 1 1
- subq 4,r2
- move.d [r2],r3
- checkr3 7823fec2
-
- move.d mem2,r3
- move.d 0x45231879,r4
- clearf nzvc
- move.w r4,[r3]
- test_cc 0 0 0 0
- move.d [r3],r3
- checkr3 10231879
-
- move.d mem2,r2
- moveq -2,r4
- clearf nc
- setf zv
- move.b r4,[r2+]
- test_cc 0 1 1 0
- subq 1,r2
- move.d [r2],r3
- checkr3 102318fe
-
- quit
diff --git a/tests/tcg/cris/bare/check_movmp.s b/tests/tcg/cris/bare/check_movmp.s
deleted file mode 100644
index 7fc11f0..0000000
--- a/tests/tcg/cris/bare/check_movmp.s
+++ /dev/null
@@ -1,131 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: ffffff00\nffff0000\n0\nffffff00\nffff0000\n0\nffffff00\nffff0000\n0\nbb113344\n664433aa\ncc557788\nabcde012\nabcde000\n77880000\n0\n
-
-# Test generic "move Ps,[]" and "move [],Pd" insns; the ones with
-# functionality common to all models.
-
- .include "testutils.inc"
- start
-
- .data
-filler:
- .byte 0xaa
- .word 0x4433
- .dword 0x55778866
- .byte 0xcc
-
- .text
-; Test that writing to zero-registers is a nop
- .if 0
- ; We used to just ignore the writes, but now an error is emitted. We
- ; keep the test-code but disabled, in case we need to change this again.
- move 0xaa,p0
- move 0x4433,p4
- move 0x55774433,p8
- .endif
-
- moveq -1,r3
- setf zcvn
- clear.b r3
- test_cc 1 1 1 1
- checkr3 ffffff00
-
- moveq -1,r3
- clearf zcvn
- clear.w r3
- test_cc 0 0 0 0
- checkr3 ffff0000
-
- moveq -1,r3
- clear.d r3
- checkr3 0
-
-; "Write" using ordinary memory references too.
- .if 0 ; See ".if 0" above.
- move.d filler,r6
- move [r6],p0
- move [r6],p4
- move [r6],p8
- .endif
-
-# ffffff00\nffff0000\n0\nffffff00\nffff0000\n0\nbb113344\n664433aa\ncc557788\nabcde012\nabcde000\n77880000\n0\n
-
- moveq -1,r3
- clear.b r3
- checkr3 ffffff00
-
- moveq -1,r3
- clear.w r3
- checkr3 ffff0000
-
- moveq -1,r3
- clear.d r3
- checkr3 0
-
-; And postincremented.
- .if 0 ; See ".if 0" above.
- move [r6+],p0
- move [r6+],p4
- move [r6+],p8
- .endif
-
-# ffffff00\nffff0000\n0\nbb113344\n664433aa\ncc557788\nabcde012\nabcde000\n77880000\n0\n
-
- moveq -1,r3
- clear.b r3
- checkr3 ffffff00
-
- moveq -1,r3
- clear.w r3
- checkr3 ffff0000
-
- moveq -1,r3
- clear.d r3
- checkr3 0
-
-; Now see that we can write to the registers too.
-# bb113344\n664433aa\ncc557788\nabcde012\nabcde000\n77880000\n0\n
-; [PC+]
- move.d filler,r9
- move 0xbb113344,srp
- move srp,r3
- checkr3 bb113344
-
-; [R+]
- move [r9+],srp
- move srp,r3
- checkr3 664433aa
-
-; [R]
- move [r9],srp
- move srp,r3
- checkr3 cc557788
-
-; And check writing to memory, clear and srp.
-
- move.d filler,r9
- move 0xabcde012,srp
- setf zcvn
- move srp,[r9+]
- test_cc 1 1 1 1
- subq 4,r9
- move.d [r9],r3
- checkr3 abcde012
-
- clearf zcvn
- clear.b [r9]
- test_cc 0 0 0 0
- move.d [r9],r3
- checkr3 abcde000
-
- addq 2,r9
- clear.w [r9+]
- subq 2,r9
- move.d [r9],r3
- checkr3 77880000
-
- clear.d [r9]
- move.d [r9],r3
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_movpmv32.s b/tests/tcg/cris/bare/check_movpmv32.s
deleted file mode 100644
index daf0970..0000000
--- a/tests/tcg/cris/bare/check_movpmv32.s
+++ /dev/null
@@ -1,35 +0,0 @@
-# mach: crisv32
-# output: 11223320\nbb113344\naa557711\n
-
-# Test v32-specific special registers. FIXME: more registers.
-
- .include "testutils.inc"
- start
- .data
-store:
- .dword 0x11223344
- .dword 0x77665544
-
- .text
- moveq -1,r3
- move.d store,r4
- move vr,[r4]
- move [r4+],mof
- move mof,r3
- checkr3 11223320
-
- moveq -1,r3
- clearf zcvn
- move 0xbb113344,mof
- test_cc 0 0 0 0
- move mof,r3
- checkr3 bb113344
-
- setf zcvn
- move 0xaa557711,mof
- test_cc 1 1 1 1
- move mof,[r4]
- move.d [r4],r3
- checkr3 aa557711
-
- quit
diff --git a/tests/tcg/cris/bare/check_movpr.s b/tests/tcg/cris/bare/check_movpr.s
deleted file mode 100644
index eef9bdb..0000000
--- a/tests/tcg/cris/bare/check_movpr.s
+++ /dev/null
@@ -1,28 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: ffffff00\nffff0000\n0\nbb113344\n
-
-# Test generic "move Ps,Rd" and "move Rs,Pd" insns; the ones with
-# functionality common to all models.
-
- .include "testutils.inc"
- start
- moveq -1,r3
- clear.b r3
- checkr3 ffffff00
-
- moveq -1,r3
- clear.w r3
- checkr3 ffff0000
-
- moveq -1,r3
- clear.d r3
- checkr3 0
-
- moveq -1,r3
- move.d 0xbb113344,r4
- setf zcvn
- move r4,srp
- move srp,r3
- test_cc 1 1 1 1
- checkr3 bb113344
- quit
diff --git a/tests/tcg/cris/bare/check_movprv32.s b/tests/tcg/cris/bare/check_movprv32.s
deleted file mode 100644
index d0d90e1..0000000
--- a/tests/tcg/cris/bare/check_movprv32.s
+++ /dev/null
@@ -1,21 +0,0 @@
-# mach: crisv32
-# output: ffffff20\nbb113344\n
-
-# Test v32-specific special registers. FIXME: more registers.
-
- .include "testutils.inc"
- start
- moveq -1,r3
- setf zcvn
- move vr,r3
- test_cc 1 1 1 1
- checkr3 ffffff20
-
- moveq -1,r3
- move.d 0xbb113344,r4
- clearf cvnz
- move r4,mof
- test_cc 0 0 0 0
- move mof,r3
- checkr3 bb113344
- quit
diff --git a/tests/tcg/cris/bare/check_movscr.s b/tests/tcg/cris/bare/check_movscr.s
deleted file mode 100644
index 53c8ce6..0000000
--- a/tests/tcg/cris/bare/check_movscr.s
+++ /dev/null
@@ -1,29 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: 42\nffffff85\n7685\nffff8765\n0\n
-
-; Move constant byte, word, dword to register. Check that sign-extension
-; is performed.
-
- .include "testutils.inc"
- start
- moveq -1,r3
- movs.b 0x42,r3
- checkr3 42
-
- movs.b 0x85,r3
- test_move_cc 1 0 0 0
- checkr3 ffffff85
-
- movs.w 0x7685,r3
- test_move_cc 0 0 0 0
- checkr3 7685
-
- movs.w 0x8765,r3
- test_move_cc 1 0 0 0
- checkr3 ffff8765
-
- movs.w 0,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_movsm.s b/tests/tcg/cris/bare/check_movsm.s
deleted file mode 100644
index 7074336..0000000
--- a/tests/tcg/cris/bare/check_movsm.s
+++ /dev/null
@@ -1,44 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: 5\nfffffff5\n5\nfffffff5\n0\n
-
-; Movs between registers. Check that sign-extension is performed and the
-; full register is set.
-
- .include "testutils.inc"
-
- .data
-x:
- .byte 5,-11
- .word 5,-11
- .word 0
-
- start
- move.d x,r5
-
- moveq -1,r3
- movs.b [r5+],r3
- test_move_cc 0 0 0 0
- checkr3 5
-
- moveq 0,r3
- movs.b [r5],r3
- test_move_cc 1 0 0 0
- addq 1,r5
- checkr3 fffffff5
-
- moveq -1,r3
- movs.w [r5+],r3
- test_move_cc 0 0 0 0
- checkr3 5
-
- moveq 0,r3
- movs.w [r5],r3
- test_move_cc 1 0 0 0
- addq 2,r5
- checkr3 fffffff5
-
- movs.w [r5],r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_movsr.s b/tests/tcg/cris/bare/check_movsr.s
deleted file mode 100644
index d1889a7..0000000
--- a/tests/tcg/cris/bare/check_movsr.s
+++ /dev/null
@@ -1,46 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: 5\nfffffff5\n5\nfffffff5\n0\n
-
-; Movs between registers. Check that sign-extension is performed and the
-; full register is set.
-
- .include "testutils.inc"
- start
- moveq -1,r5
- moveq 5,r4
- move.b r4,r5
- moveq -1,r3
- movs.b r5,r3
- test_move_cc 0 0 0 0
- checkr3 5
-
- moveq 0,r5
- moveq -11,r4
- move.b r4,r5
- moveq 0,r3
- movs.b r5,r3
- test_move_cc 1 0 0 0
- checkr3 fffffff5
-
- moveq -1,r5
- moveq 5,r4
- move.w r4,r5
- moveq -1,r3
- movs.w r5,r3
- test_move_cc 0 0 0 0
- checkr3 5
-
- moveq 0,r5
- moveq -11,r4
- move.w r4,r5
- moveq 0,r3
- movs.w r5,r3
- test_move_cc 1 0 0 0
- checkr3 fffffff5
-
- moveq 0,r5
- movs.b r5,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_movucr.s b/tests/tcg/cris/bare/check_movucr.s
deleted file mode 100644
index 7c8487d..0000000
--- a/tests/tcg/cris/bare/check_movucr.s
+++ /dev/null
@@ -1,33 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: 42\n85\n7685\n8765\n0\n
-
-; Move constant byte, word, dword to register. Check that zero-extension
-; is performed.
-
- .include "testutils.inc"
- start
- moveq -1,r3
- movu.b 0x42,r3
- test_move_cc 0 0 0 0
- checkr3 42
-
- moveq -1,r3
- movu.b 0x85,r3
- test_move_cc 0 0 0 0
- checkr3 85
-
- moveq -1,r3
- movu.w 0x7685,r3
- test_move_cc 0 0 0 0
- checkr3 7685
-
- moveq -1,r3
- movu.w 0x8765,r3
- test_move_cc 0 0 0 0
- checkr3 8765
-
- movu.b 0,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_movum.s b/tests/tcg/cris/bare/check_movum.s
deleted file mode 100644
index 038e539..0000000
--- a/tests/tcg/cris/bare/check_movum.s
+++ /dev/null
@@ -1,40 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: 5\nf5\n5\nfff5\n0\n
-
-; Movu between registers. Check that zero-extension is performed and the
-; full register is set.
-
- .include "testutils.inc"
-
- .data
-x:
- .byte 5,-11
- .word 5,-11
- .word 0
-
- start
- move.d x,r5
-
- movu.b [r5+],r3
- test_move_cc 0 0 0 0
- checkr3 5
-
- movu.b [r5],r3
- test_move_cc 0 0 0 0
- addq 1,r5
- checkr3 f5
-
- movu.w [r5+],r3
- test_move_cc 0 0 0 0
- checkr3 5
-
- movu.w [r5],r3
- test_move_cc 0 0 0 0
- addq 2,r5
- checkr3 fff5
-
- movu.w [r5],r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_movur.s b/tests/tcg/cris/bare/check_movur.s
deleted file mode 100644
index 3ecf475..0000000
--- a/tests/tcg/cris/bare/check_movur.s
+++ /dev/null
@@ -1,45 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: 5\nf5\n5\nfff5\n0\n
-
-; Movu between registers. Check that zero-extension is performed and the
-; full register is set.
-
- .include "testutils.inc"
- start
- moveq -1,r5
- moveq 5,r4
- move.b r4,r5
- moveq -1,r3
- movu.b r5,r3
- test_move_cc 0 0 0 0
- checkr3 5
-
- moveq 0,r5
- moveq -11,r4
- move.b r4,r5
- moveq -1,r3
- movu.b r5,r3
- test_move_cc 0 0 0 0
- checkr3 f5
-
- moveq -1,r5
- moveq 5,r4
- move.w r4,r5
- moveq -1,r3
- movu.w r5,r3
- test_move_cc 0 0 0 0
- checkr3 5
-
- moveq 0,r5
- moveq -11,r4
- move.w r4,r5
- moveq -1,r3
- movu.w r5,r3
- test_move_cc 0 0 0 0
- checkr3 fff5
-
- movu.w 0,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_mulv32.s b/tests/tcg/cris/bare/check_mulv32.s
deleted file mode 100644
index f379358..0000000
--- a/tests/tcg/cris/bare/check_mulv32.s
+++ /dev/null
@@ -1,51 +0,0 @@
-# mach: crisv32
-# output: fffffffe\n
-# output: ffffffff\n
-# output: fffffffe\n
-# output: 1\n
-# output: fffffffe\n
-# output: ffffffff\n
-# output: fffffffe\n
-# output: 1\n
-
-; Check that carry is not modified on v32.
-
- .include "testutils.inc"
- start
- moveq -1,r3
- moveq 2,r4
- setf c
- muls.d r4,r3
- test_cc 1 0 0 1
- checkr3 fffffffe
- move mof,r3
- checkr3 ffffffff
-
- moveq -1,r3
- moveq 2,r4
- setf c
- mulu.d r4,r3
- test_cc 0 0 1 1
- checkr3 fffffffe
- move mof,r3
- checkr3 1
-
- moveq -1,r3
- moveq 2,r4
- clearf c
- muls.d r4,r3
- test_cc 1 0 0 0
- checkr3 fffffffe
- move mof,r3
- checkr3 ffffffff
-
- moveq -1,r3
- moveq 2,r4
- clearf c
- mulu.d r4,r3
- test_cc 0 0 1 0
- checkr3 fffffffe
- move mof,r3
- checkr3 1
-
- quit
diff --git a/tests/tcg/cris/bare/check_mulx.s b/tests/tcg/cris/bare/check_mulx.s
deleted file mode 100644
index a7a1f82..0000000
--- a/tests/tcg/cris/bare/check_mulx.s
+++ /dev/null
@@ -1,257 +0,0 @@
-# mach: crisv10 crisv32
-# output: fffffffe\nffffffff\nfffffffe\n1\nfffffffe\nffffffff\nfffffffe\n1\nfffe0001\n0\nfffe0001\n0\n1\n0\n1\nfffffffe\n193eade2\n277e3a49\n193eade2\n277e3a49\nfffffffe\nffffffff\n1fffe\n0\nfffffffe\nffffffff\n1fffe\n0\n1\n0\nfffe0001\n0\nfdbdade2\nffffffff\n420fade2\n0\nfffffffe\nffffffff\n1fe\n0\nfffffffe\nffffffff\n1fe\n0\n1\n0\nfe01\n0\n1\n0\nfe01\n0\nffffd9e2\nffffffff\n2be2\n0\n0\n0\n0\n0\n
-
- .include "testutils.inc"
- start
-
- .align 4
- moveq -1,r3
- moveq 2,r4
- muls.d r4,r3
- test_cc 1 0 0 0
- checkr3 fffffffe
- move mof,r3
- checkr3 ffffffff
-
- .align 4
- moveq -1,r3
- moveq 2,r4
- mulu.d r4,r3
- test_cc 0 0 1 0
- checkr3 fffffffe
- move mof,r3
- checkr3 1
-
- .align 4
- moveq 2,r3
- moveq -1,r4
- muls.d r4,r3
- test_cc 1 0 0 0
- checkr3 fffffffe
- move mof,r3
- checkr3 ffffffff
-
- .align 4
- moveq 2,r3
- moveq -1,r4
- mulu.d r4,r3
- test_cc 0 0 1 0
- checkr3 fffffffe
- move mof,r3
- checkr3 1
-
- move.d 0xffff,r4
- move.d r4,r3
- muls.d r4,r3
- test_cc 0 0 1 0
- checkr3 fffe0001
- move mof,r3
- checkr3 0
-
- move.d 0xffff,r4
- move.d r4,r3
- mulu.d r4,r3
- test_cc 0 0 0 0
- checkr3 fffe0001
- move mof,r3
- checkr3 0
-
- moveq -1,r4
- move.d r4,r3
- muls.d r4,r3
- test_cc 0 0 0 0
- checkr3 1
- move mof,r3
- checkr3 0
-
- moveq -1,r4
- move.d r4,r3
- mulu.d r4,r3
- test_cc 1 0 1 0
- checkr3 1
- move mof,r3
- checkr3 fffffffe
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- muls.d r4,r3
- test_cc 0 0 1 0
- checkr3 193eade2
- move mof,r3
- checkr3 277e3a49
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- mulu.d r4,r3
- test_cc 0 0 1 0
- checkr3 193eade2
- move mof,r3
- checkr3 277e3a49
-
- move.d 0xffff,r3
- moveq 2,r4
- muls.w r4,r3
- test_cc 1 0 0 0
- checkr3 fffffffe
- move mof,r3
- checkr3 ffffffff
-
- moveq -1,r3
- moveq 2,r4
- mulu.w r4,r3
- test_cc 0 0 0 0
- checkr3 1fffe
- move mof,r3
- checkr3 0
- nop
-
- moveq 2,r3
- move.d 0xffff,r4
- muls.w r4,r3
- test_cc 1 0 0 0
- checkr3 fffffffe
- move mof,r3
- checkr3 ffffffff
-
- moveq 2,r3
- moveq -1,r4
- mulu.w r4,r3
- test_cc 0 0 0 0
- checkr3 1fffe
- move mof,r3
- checkr3 0
-
- move.d 0xffff,r4
- move.d r4,r3
- muls.w r4,r3
- test_cc 0 0 0 0
- checkr3 1
- move mof,r3
- checkr3 0
-
- moveq -1,r4
- move.d r4,r3
- mulu.w r4,r3
- test_cc 0 0 0 0
- checkr3 fffe0001
- move mof,r3
- checkr3 0
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- muls.w r4,r3
- test_cc 1 0 0 0
- checkr3 fdbdade2
- move mof,r3
- checkr3 ffffffff
- nop
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- mulu.w r4,r3
- test_cc 0 0 0 0
- checkr3 420fade2
- move mof,r3
- checkr3 0
- nop
-
- move.d 0xff,r3
- moveq 2,r4
- muls.b r4,r3
- test_cc 1 0 0 0
- checkr3 fffffffe
- move mof,r3
- checkr3 ffffffff
-
- moveq -1,r3
- moveq 2,r4
- mulu.b r4,r3
- test_cc 0 0 0 0
- checkr3 1fe
- move mof,r3
- checkr3 0
-
- moveq 2,r3
- moveq -1,r4
- muls.b r4,r3
- test_cc 1 0 0 0
- checkr3 fffffffe
- move mof,r3
- checkr3 ffffffff
-
- moveq 2,r3
- moveq -1,r4
- mulu.b r4,r3
- test_cc 0 0 0 0
- checkr3 1fe
- move mof,r3
- checkr3 0
-
- move.d 0xff,r4
- move.d r4,r3
- muls.b r4,r3
- test_cc 0 0 0 0
- checkr3 1
- move mof,r3
- checkr3 0
- nop
-
- moveq -1,r4
- move.d r4,r3
- mulu.b r4,r3
- test_cc 0 0 0 0
- checkr3 fe01
- move mof,r3
- checkr3 0
- nop
-
- move.d 0xfeda49ff,r4
- move.d r4,r3
- muls.b r4,r3
- test_cc 0 0 0 0
- checkr3 1
- move mof,r3
- checkr3 0
- nop
-
- move.d 0xfeda49ff,r4
- move.d r4,r3
- mulu.b r4,r3
- test_cc 0 0 0 0
- checkr3 fe01
- move mof,r3
- checkr3 0
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- muls.b r4,r3
- test_cc 1 0 0 0
- checkr3 ffffd9e2
- move mof,r3
- checkr3 ffffffff
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- mulu.b r4,r3
- test_cc 0 0 0 0
- checkr3 2be2
- move mof,r3
- checkr3 0
-
- moveq 0,r3
- move.d 0xf87f4aeb,r4
- muls.d r4,r3
- test_cc 0 1 0 0
- checkr3 0
- move mof,r3
- checkr3 0
-
- move.d 0xf87f4aeb,r3
- moveq 0,r4
- mulu.d r4,r3
- test_cc 0 1 0 0
- checkr3 0
- move mof,r3
- checkr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_neg.s b/tests/tcg/cris/bare/check_neg.s
deleted file mode 100644
index 963c4b6..0000000
--- a/tests/tcg/cris/bare/check_neg.s
+++ /dev/null
@@ -1,104 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: ffffffff\nffffffff\n0\n80000000\n1\nba987655\nffff\nffff\n0\n89ab8000\nffff0001\n45677655\nff\nff\n0\n89abae80\nffffff01\n45678955\n
-
- .include "testutils.inc"
- start
- moveq 0,r3
- moveq 1,r4
- neg.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq 1,r3
- moveq 0,r4
- neg.d r3,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
-;; FIXME: this was wrong.
- moveq 0,r3
- neg.d r3,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- move.d 0x80000000,r3
- neg.d r3,r3
- test_move_cc 1 0 0 0
- checkr3 80000000
-
- moveq -1,r3
- neg.d r3,r3
- test_move_cc 0 0 0 0
- checkr3 1
-
- move.d 0x456789ab,r3
- neg.d r3,r3
- test_move_cc 1 0 0 0
- checkr3 ba987655
-
- moveq 0,r3
- moveq 1,r4
- neg.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffff
-
- moveq 1,r3
- moveq 0,r4
- neg.w r3,r3
- test_move_cc 1 0 0 0
- checkr3 ffff
-
- moveq 0,r3
- neg.w r3,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- move.d 0x89ab8000,r3
- neg.w r3,r3
- test_move_cc 1 0 0 0
- checkr3 89ab8000
-
- moveq -1,r3
- neg.w r3,r3
- test_move_cc 0 0 0 0
- checkr3 ffff0001
-
- move.d 0x456789ab,r3
- neg.w r3,r3
- test_move_cc 0 0 0 0
- checkr3 45677655
-
- moveq 0,r3
- moveq 1,r4
- neg.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 ff
-
- moveq 1,r3
- moveq 0,r4
- neg.b r3,r3
- test_move_cc 1 0 0 0
- checkr3 ff
-
- moveq 0,r3
- neg.b r3,r3
- test_move_cc 0 1 0 0
- checkr3 0
-
-;; FIXME: was wrong.
- move.d 0x89abae80,r3
- neg.b r3,r3
- test_move_cc 1 0 0 1
- checkr3 89abae80
-
- moveq -1,r3
- neg.b r3,r3
- test_move_cc 0 0 0 0
- checkr3 ffffff01
-
- move.d 0x456789ab,r3
- neg.b r3,r3
- test_move_cc 0 0 0 0
- checkr3 45678955
-
- quit
diff --git a/tests/tcg/cris/bare/check_not.s b/tests/tcg/cris/bare/check_not.s
deleted file mode 100644
index 33bcf15..0000000
--- a/tests/tcg/cris/bare/check_not.s
+++ /dev/null
@@ -1,31 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: fffffffe\nfffffffd\nffff0f00\n0\n87ecbbad\n
-
- .include "testutils.inc"
- start
- moveq 1,r3
- not r3
- test_move_cc 1 0 0 0
- checkr3 fffffffe
-
- moveq 2,r3
- not r3
- test_move_cc 1 0 0 0
- checkr3 fffffffd
-
- move.d 0xf0ff,r3
- not r3
- test_move_cc 1 0 0 0
- checkr3 ffff0f00
-
- moveq -1,r3
- not r3
- test_move_cc 0 1 0 0
- checkr3 0
-
- move.d 0x78134452,r3
- not r3
- test_move_cc 1 0 0 0
- checkr3 87ecbbad
-
- quit
diff --git a/tests/tcg/cris/bare/check_orc.s b/tests/tcg/cris/bare/check_orc.s
deleted file mode 100644
index c733f03..0000000
--- a/tests/tcg/cris/bare/check_orc.s
+++ /dev/null
@@ -1,71 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 3\n3\nffff\nffffffff\n7c33f7db\nffff0003\n3\nfedaffff\n7813f7db\n3\n3\nfeb\n781344db\n
-
- .include "testutils.inc"
- start
- moveq 1,r3
- or.d 2,r3
- test_move_cc 0 0 0 0
- checkr3 3
-
- moveq 2,r3
- or.d 1,r3
- test_move_cc 0 0 0 0
- checkr3 3
-
- move.d 0xf0ff,r3
- or.d 0xff0f,r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- moveq -1,r3
- or.d -1,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0x78134452,r3
- or.d 0x5432f789,r3
- test_move_cc 0 0 0 0
- checkr3 7c33f7db
-
- move.d 0xffff0001,r3
- or.w 2,r3
- test_move_cc 0 0 0 0
- checkr3 ffff0003
-
- moveq 2,r3
- or.w 1,r3
- test_move_cc 0 0 0 0
- checkr3 3
-
- move.d 0xfedaffaf,r3
- or.w 0xff5f,r3
- test_move_cc 1 0 0 0
- checkr3 fedaffff
-
- move.d 0x78134452,r3
- or.w 0xf789,r3
- test_move_cc 1 0 0 0
- checkr3 7813f7db
-
- moveq 1,r3
- or.b 2,r3
- test_move_cc 0 0 0 0
- checkr3 3
-
- moveq 2,r3
- or.b 1,r3
- test_move_cc 0 0 0 0
- checkr3 3
-
- move.d 0xfa3,r3
- or.b 0x4a,r3
- test_move_cc 1 0 0 0
- checkr3 feb
-
- move.d 0x78134453,r3
- or.b 0x89,r3
- test_move_cc 1 0 0 0
- checkr3 781344db
-
- quit
diff --git a/tests/tcg/cris/bare/check_orm.s b/tests/tcg/cris/bare/check_orm.s
deleted file mode 100644
index ee723a6..0000000
--- a/tests/tcg/cris/bare/check_orm.s
+++ /dev/null
@@ -1,75 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 3\n3\nffff\nffffffff\n7c33f7db\nffff0003\n3\nfedaffff\n7813f7db\n3\n3\nfeb\n781344db\n
-
- .include "testutils.inc"
- .data
-x:
- .dword 2,1,0xff0f,-1,0x5432f789
- .word 2,1,0xff5f,0xf789
- .byte 2,1,0x4a,0x89
-
- start
- moveq 1,r3
- move.d x,r5
- or.d [r5+],r3
- checkr3 3
-
- moveq 2,r3
- or.d [r5],r3
- addq 4,r5
- checkr3 3
-
- move.d 0xf0ff,r3
- or.d [r5+],r3
- checkr3 ffff
-
- moveq -1,r3
- or.d [r5+],r3
- checkr3 ffffffff
-
- move.d 0x78134452,r3
- or.d [r5+],r3
- checkr3 7c33f7db
-
- move.d 0xffff0001,r3
- or.w [r5+],r3
- checkr3 ffff0003
-
- moveq 2,r3
- or.w [r5],r3
- addq 2,r5
- test_move_cc 0 0 0 0
- checkr3 3
-
- move.d 0xfedaffaf,r3
- or.w [r5+],r3
- test_move_cc 1 0 0 0
- checkr3 fedaffff
-
- move.d 0x78134452,r3
- or.w [r5+],r3
- test_move_cc 1 0 0 0
- checkr3 7813f7db
-
- moveq 1,r3
- or.b [r5+],r3
- test_move_cc 0 0 0 0
- checkr3 3
-
- moveq 2,r3
- or.b [r5],r3
- addq 1,r5
- test_move_cc 0 0 0 0
- checkr3 3
-
- move.d 0xfa3,r3
- or.b [r5+],r3
- test_move_cc 1 0 0 0
- checkr3 feb
-
- move.d 0x78134453,r3
- or.b [r5],r3
- test_move_cc 1 0 0 0
- checkr3 781344db
-
- quit
diff --git a/tests/tcg/cris/bare/check_orq.s b/tests/tcg/cris/bare/check_orq.s
deleted file mode 100644
index 5060edc..0000000
--- a/tests/tcg/cris/bare/check_orq.s
+++ /dev/null
@@ -1,41 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 3\n3\nffffffff\nffffffff\n1f\nffffffe0\n7813445e\n
-
- .include "testutils.inc"
- start
- moveq 1,r3
- orq 2,r3
- test_move_cc 0 0 0 0
- checkr3 3
-
- moveq 2,r3
- orq 1,r3
- test_move_cc 0 0 0 0
- checkr3 3
-
- move.d 0xf0ff,r3
- orq -1,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq 0,r3
- orq -1,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- moveq 0,r3
- orq 31,r3
- test_move_cc 0 0 0 0
- checkr3 1f
-
- moveq 0,r3
- orq -32,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffe0
-
- move.d 0x78134452,r3
- orq 12,r3
- test_move_cc 0 0 0 0
- checkr3 7813445e
-
- quit
diff --git a/tests/tcg/cris/bare/check_orr.s b/tests/tcg/cris/bare/check_orr.s
deleted file mode 100644
index a514c11..0000000
--- a/tests/tcg/cris/bare/check_orr.s
+++ /dev/null
@@ -1,84 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 3\n3\nffff\nffffffff\n7c33f7db\nffff0003\n3\nfedaffff\n7813f7db\n3\n3\nfeb\n781344db\n
-
- .include "testutils.inc"
- start
- moveq 1,r3
- moveq 2,r4
- or.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 3
-
- moveq 2,r3
- moveq 1,r4
- or.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 3
-
- move.d 0xff0f,r4
- move.d 0xf0ff,r3
- or.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 ffff
-
- moveq -1,r4
- move.d r4,r3
- or.d r4,r3
- test_move_cc 1 0 0 0
- checkr3 ffffffff
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- or.d r4,r3
- test_move_cc 0 0 0 0
- checkr3 7c33f7db
-
- move.d 0xffff0001,r3
- moveq 2,r4
- or.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 ffff0003
-
- moveq 2,r3
- move.d 0xffff0001,r4
- or.w r4,r3
- test_move_cc 0 0 0 0
- checkr3 3
-
- move.d 0xfedaffaf,r3
- move.d 0xffffff5f,r4
- or.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 fedaffff
-
- move.d 0x5432f789,r4
- move.d 0x78134452,r3
- or.w r4,r3
- test_move_cc 1 0 0 0
- checkr3 7813f7db
-
- moveq 1,r3
- move.d 0xffffff02,r4
- or.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 3
-
- moveq 2,r3
- moveq 1,r4
- or.b r4,r3
- test_move_cc 0 0 0 0
- checkr3 3
-
- move.d 0x4a,r4
- move.d 0xfa3,r3
- or.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 feb
-
- move.d 0x5432f789,r4
- move.d 0x78134453,r3
- or.b r4,r3
- test_move_cc 1 0 0 0
- checkr3 781344db
-
- quit
diff --git a/tests/tcg/cris/bare/check_ret.s b/tests/tcg/cris/bare/check_ret.s
deleted file mode 100644
index b44fb25..0000000
--- a/tests/tcg/cris/bare/check_ret.s
+++ /dev/null
@@ -1,25 +0,0 @@
-# mach: crisv3 crisv8 crisv10
-# output: 3\n
-
-# Test that ret works.
-
- .include "testutils.inc"
- start
-x:
- moveq 0,r3
- jsr z
-w:
- quit
-y:
- addq 1,r3
- checkr3 3
- quit
-
-z:
- addq 1,r3
- move srp,r2
- add.d y-w,r2
- move r2,srp
- ret
- addq 1,r3
- quit
diff --git a/tests/tcg/cris/bare/check_scc.s b/tests/tcg/cris/bare/check_scc.s
deleted file mode 100644
index 4a8674c..0000000
--- a/tests/tcg/cris/bare/check_scc.s
+++ /dev/null
@@ -1,95 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 1\n0\n1\n0\n1\n0\n1\n0\n0\n1\n1\n0\n1\n0\n1\n0\n1\n0\n0\n1\n0\n1\n1\n0\n1\n0\n0\n1\n1\n0\n1\n1\n0\n
-
- .include "testutils.inc"
-
- .macro lcheckr3 v
- move $ccs, $r9
- checkr3 \v
- move $r9, $ccs
- .endm
-
- start
- clearf nzvc
- scc r3
- lcheckr3 1
- scs r3
- lcheckr3 0
- sne r3
- lcheckr3 1
- seq r3
- lcheckr3 0
- svc r3
- lcheckr3 1
- svs r3
- lcheckr3 0
- spl r3
- lcheckr3 1
- smi r3
- lcheckr3 0
- sls r3
- lcheckr3 0
- shi r3
- lcheckr3 1
- sge r3
- lcheckr3 1
- slt r3
- lcheckr3 0
- sgt r3
- lcheckr3 1
- sle r3
- lcheckr3 0
- sa r3
- lcheckr3 1
- setf nzvc
- scc r3
- lcheckr3 0
- scs r3
- lcheckr3 1
- sne r3
- lcheckr3 0
- svc r3
- lcheckr3 0
- svs r3
- lcheckr3 1
- spl r3
- lcheckr3 0
- smi r3
- lcheckr3 1
- sls r3
- lcheckr3 1
- shi r3
- lcheckr3 0
- sge r3
- lcheckr3 1
- slt r3
- lcheckr3 0
- sgt r3
- lcheckr3 0
- sle r3
- lcheckr3 1
- sa r3
- lcheckr3 1
- clearf n
- sge r3
- lcheckr3 0
- slt r3
- lcheckr3 1
-
- .if 1 ;..asm.arch.cris.v32
- setf p
- ssb r3
- .else
- moveq 1,r3
- .endif
- lcheckr3 1
-
- .if 1 ;..asm.arch.cris.v32
- clearf p
- ssb r3
- .else
- moveq 0,r3
- .endif
- lcheckr3 0
-
- quit
diff --git a/tests/tcg/cris/bare/check_subc.s b/tests/tcg/cris/bare/check_subc.s
deleted file mode 100644
index e34b544..0000000
--- a/tests/tcg/cris/bare/check_subc.s
+++ /dev/null
@@ -1,87 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 1\n1\n1fffe\nfffffffe\ncc463bdb\nffff0001\n1\nfffe\nfedafffe\n78133bdb\nffffff01\n1\nfe\nfeda49fe\n781344db\n85649200\n
-
- .include "testutils.inc"
- start
-
- moveq -1,r3
- sub.d -2,r3
- test_cc 0 0 0 0
- checkr3 1
-
- moveq 2,r3
- sub.d 1,r3
- test_cc 0 0 0 0
- checkr3 1
-
- move.d 0xffff,r3
- sub.d -0xffff,r3
- test_cc 0 0 0 1
- checkr3 1fffe
-
- moveq -1,r3
- sub.d 1,r3
- test_cc 1 0 0 0
- checkr3 fffffffe
-
- move.d 0x78134452,r3
- sub.d -0x5432f789,r3
- test_cc 1 0 1 1
- checkr3 cc463bdb
-
- moveq -1,r3
- sub.w -2,r3
- test_cc 0 0 0 0
- checkr3 ffff0001
-
- moveq 2,r3
- sub.w 1,r3
- test_cc 0 0 0 0
- checkr3 1
-
- move.d 0xffff,r3
- sub.w 1,r3
- test_cc 1 0 0 0
- checkr3 fffe
-
- move.d 0xfedaffff,r3
- sub.w 1,r3
- test_cc 1 0 0 0
- checkr3 fedafffe
-
- move.d 0x78134452,r3
- sub.w 0x877,r3
- test_cc 0 0 0 0
- checkr3 78133bdb
-
- moveq -1,r3
- sub.b -2,r3
- test_cc 0 0 0 0
- checkr3 ffffff01
-
- moveq 2,r3
- sub.b 1,r3
- test_cc 0 0 0 0
- checkr3 1
-
- move.d 0xff,r3
- sub.b 1,r3
- test_cc 1 0 0 0
- checkr3 fe
-
- move.d 0xfeda49ff,r3
- sub.b 1,r3
- test_cc 1 0 0 0
- checkr3 feda49fe
-
- move.d 0x78134452,r3
- sub.b 0x77,r3
- test_cc 1 0 0 1
- checkr3 781344db
-
- move.d 0x85649282,r3
- sub.b 0x82,r3
- test_cc 0 1 0 0
- checkr3 85649200
-
- quit
diff --git a/tests/tcg/cris/bare/check_subm.s b/tests/tcg/cris/bare/check_subm.s
deleted file mode 100644
index e07ea02..0000000
--- a/tests/tcg/cris/bare/check_subm.s
+++ /dev/null
@@ -1,96 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 1\n1\n1fffe\nfffffffe\ncc463bdb\nffff0001\n1\nfffe\nfedafffe\n78133bdb\nffffff01\n1\nfe\nfeda49fe\n781344db\n85649200\n
-
- .include "testutils.inc"
- .data
-x:
- .dword -2,1,-0xffff,1,-0x5432f789
- .word -2,1,1,0x877
- .byte -2,1,0x77
- .byte 0x22
-
- start
- moveq -1,r3
- move.d x,r5
- sub.d [r5+],r3
- test_cc 0 0 0 0
- checkr3 1
-
- moveq 2,r3
- sub.d [r5],r3
- test_cc 0 0 0 0
- addq 4,r5
- checkr3 1
-
- move.d 0xffff,r3
- sub.d [r5+],r3
- test_cc 0 0 0 1
- checkr3 1fffe
-
- moveq -1,r3
- sub.d [r5+],r3
- test_cc 1 0 0 0
- checkr3 fffffffe
-
- move.d 0x78134452,r3
- sub.d [r5+],r3
- test_cc 1 0 1 1
- checkr3 cc463bdb
-
- moveq -1,r3
- sub.w [r5+],r3
- test_cc 0 0 0 0
- checkr3 ffff0001
-
- moveq 2,r3
- sub.w [r5+],r3
- test_cc 0 0 0 0
- checkr3 1
-
- move.d 0xffff,r3
- sub.w [r5],r3
- test_cc 1 0 0 0
- checkr3 fffe
-
- move.d 0xfedaffff,r3
- sub.w [r5+],r3
- test_cc 1 0 0 0
- checkr3 fedafffe
-
- move.d 0x78134452,r3
- sub.w [r5+],r3
- test_cc 0 0 0 0
- checkr3 78133bdb
-
- moveq -1,r3
- sub.b [r5],r3
- test_cc 0 0 0 0
- addq 1,r5
- checkr3 ffffff01
-
- moveq 2,r3
- sub.b [r5],r3
- test_cc 0 0 0 0
- checkr3 1
-
- move.d 0xff,r3
- sub.b [r5],r3
- test_cc 1 0 0 0
- checkr3 fe
-
- move.d 0xfeda49ff,r3
- sub.b [r5+],r3
- test_cc 1 0 0 0
- checkr3 feda49fe
-
- move.d 0x78134452,r3
- sub.b [r5+],r3
- test_cc 1 0 0 1
- checkr3 781344db
-
- move.d 0x85649222,r3
- sub.b [r5],r3
- test_cc 0 1 0 0
- checkr3 85649200
-
- quit
diff --git a/tests/tcg/cris/bare/check_subq.s b/tests/tcg/cris/bare/check_subq.s
deleted file mode 100644
index 9e34fa3..0000000
--- a/tests/tcg/cris/bare/check_subq.s
+++ /dev/null
@@ -1,52 +0,0 @@
-# mach: crisv3 crisv8 crisv10 crisv32
-# output: 0\nffffffff\nfffffffe\nffff\nff\n56788f9\n56788d9\n567889a\n0\n7ffffffc\n
-
- .include "testutils.inc"
- start
- moveq 1,r3
- subq 1,r3
- test_cc 0 1 0 0
- checkr3 0
-
- subq 1,r3
- test_cc 1 0 0 1
- checkr3 ffffffff
-
- subq 1,r3
- test_cc 1 0 0 0
- checkr3 fffffffe
-
- move.d 0x10000,r3
- subq 1,r3
- test_cc 0 0 0 0
- checkr3 ffff
-
- move.d 0x100,r3
- subq 1,r3
- test_cc 0 0 0 0
- checkr3 ff
-
- move.d 0x5678900,r3
- subq 7,r3
- test_cc 0 0 0 0
- checkr3 56788f9
-
- subq 32,r3
- test_cc 0 0 0 0
- checkr3 56788d9
-
- subq 63,r3
- test_cc 0 0 0 0
- checkr3 567889a
-
- move.d 34,r3
- subq 34,r3
- test_cc 0 1 0 0
- checkr3 0
-
- move.d 0x80000024,r3
- subq 40,r3
- test_cc 0 0 1 0
- checkr3 7ffffffc
-
- quit
diff --git a/tests/tcg/cris/bare/check_subr.s b/tests/tcg/cris/bare/check_subr.s
deleted file mode 100644
index 742fbc8..0000000
--- a/tests/tcg/cris/bare/check_subr.s
+++ /dev/null
@@ -1,102 +0,0 @@
-# mach: crisv0 crisv3 crisv8 crisv10 crisv32
-# output: 1\n1\n1fffe\nfffffffe\ncc463bdb\nffff0001\n1\nfffe\nfedafffe\n78133bdb\nffffff01\n1\nfe\nfeda49fe\n781344db\n85649200\n
-
- .include "testutils.inc"
- start
- moveq -1,r3
- moveq -2,r4
- sub.d r4,r3
- test_cc 0 0 0 0
- checkr3 1
-
- moveq 2,r3
- moveq 1,r4
- sub.d r4,r3
- test_cc 0 0 0 0
- checkr3 1
-
- move.d 0xffff,r3
- move.d -0xffff,r4
- sub.d r4,r3
- test_cc 0 0 0 1
- checkr3 1fffe
-
- moveq 1,r4
- moveq -1,r3
- sub.d r4,r3
- test_cc 1 0 0 0
- checkr3 fffffffe
-
- move.d -0x5432f789,r4
- move.d 0x78134452,r3
- sub.d r4,r3
- test_cc 1 0 1 1
- checkr3 cc463bdb
-
- moveq -1,r3
- moveq -2,r4
- sub.w r4,r3
- test_cc 0 0 0 0
- checkr3 ffff0001
-
- moveq 2,r3
- moveq 1,r4
- sub.w r4,r3
- test_cc 0 0 0 0
- checkr3 1
-
- move.d 0xffff,r3
- move.d -0xffff,r4
- sub.w r4,r3
- test_cc 1 0 0 0
- checkr3 fffe
-
- move.d 0xfedaffff,r3
- move.d -0xfedaffff,r4
- sub.w r4,r3
- test_cc 1 0 0 0
- checkr3 fedafffe
-
- move.d -0x5432f789,r4
- move.d 0x78134452,r3
- sub.w r4,r3
- test_cc 0 0 0 0
- checkr3 78133bdb
-
- moveq -1,r3
- moveq -2,r4
- sub.b r4,r3
- test_cc 0 0 0 0
- checkr3 ffffff01
-
- moveq 2,r3
- moveq 1,r4
- sub.b r4,r3
- test_cc 0 0 0 0
- checkr3 1
-
- move.d -0xff,r4
- move.d 0xff,r3
- sub.b r4,r3
- test_cc 1 0 0 0
- checkr3 fe
-
- move.d -0xfeda49ff,r4
- move.d 0xfeda49ff,r3
- sub.b r4,r3
- test_cc 1 0 0 0
- checkr3 feda49fe
-
- move.d -0x5432f789,r4
- move.d 0x78134452,r3
- sub.b r4,r3
- test_cc 1 0 0 1
- checkr3 781344db
-
- move.d 0x85649222,r3
- move.d 0x77445622,r4
- sub.b r4,r3
- test_cc 0 1 0 0
- checkr3 85649200
-
- quit
diff --git a/tests/tcg/cris/bare/check_xarith.s b/tests/tcg/cris/bare/check_xarith.s
deleted file mode 100644
index 80038b2..0000000
--- a/tests/tcg/cris/bare/check_xarith.s
+++ /dev/null
@@ -1,72 +0,0 @@
-
-.include "testutils.inc"
-
- start
-
- moveq -1, $r0
- moveq 0, $r1
- addq 1, $r0
- ax
- addq 0, $r1
-
- move.d $r0, $r3
- checkr3 0
- move.d $r1, $r3
- checkr3 1
-
- move.d 0, $r0
- moveq -1, $r1
- subq 1, $r0
- ax
- subq 0, $r1
-
- move.d $r0, $r3
- checkr3 ffffffff
- move.d $r1, $r3
- checkr3 fffffffe
-
-
- moveq -1, $r0
- moveq -1, $r1
- cmpq -1, $r0
- ax
- cmpq -1, $r1
- beq 1f
- nop
- fail
-1:
- cmpq 0, $r0
- ax
- cmpq -1, $r1
- bne 1f
- nop
- fail
-1:
-
- ;; test for broken X sequence, run it several times.
- moveq 8, $r0
-1:
- moveq 0, $r3
- move.d $r0, $r1
- andq 1, $r1
- lslq 4, $r1
- moveq 1, $r2
- or.d $r1, $r2
- ba 2f
- move $r2, $ccs
-2:
- addq 0, $r3
- move.d $r0, $r4
- move.d $r1, $r5
- move.d $r2, $r6
- move.d $r3, $r7
- lsrq 4, $r1
- move.d $r1, $r8
- xor $r1, $r3
- checkr3 0
- subq 1, $r0
- bne 1b
- nop
-
- pass
- quit
diff --git a/tests/tcg/cris/bare/crt.s b/tests/tcg/cris/bare/crt.s
deleted file mode 100644
index af027d7..0000000
--- a/tests/tcg/cris/bare/crt.s
+++ /dev/null
@@ -1,13 +0,0 @@
- .data
-_stack_start:
- .space 8192, 0
-_stack_end:
- .text
- .global _start
-_start:
- move.d _stack_end, $sp
- jsr main
- nop
- moveq 0, $r10
- jump exit
- nop
diff --git a/tests/tcg/cris/bare/sys.c b/tests/tcg/cris/bare/sys.c
deleted file mode 100644
index 1644eec..0000000
--- a/tests/tcg/cris/bare/sys.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Helper functions for CRIS system tests
- *
- * There is no libc and only a limited set of headers.
- */
-
-#include <stddef.h>
-
-void exit(int status)
-{
- register unsigned int callno asm ("r9") = 1; /* NR_exit */
-
- asm volatile ("break 13\n"
- : /* no outputs */
- : "r" (callno)
- : "memory");
- while (1) {
- /* do nothing */
- };
-}
-
-size_t write(int fd, const void *buf, size_t count)
-{
- register unsigned int callno asm ("r9") = 4; /* NR_write */
- register unsigned int r10 asm ("r10") = fd;
- register const void *r11 asm ("r11") = buf;
- register size_t r12 asm ("r12") = count;
- register unsigned int r asm ("r10");
-
- asm volatile ("break 13\n"
- : "=r" (r)
- : "r" (callno), "0" (r10), "r" (r11), "r" (r12)
- : "memory");
-
- return r;
-}
-
-static inline int mystrlen(char *s)
-{
- int i = 0;
- while (s[i]) {
- i++;
- }
- return i;
-}
-
-
-void pass(void)
-{
- char s[] = "passed.\n";
- write(1, s, sizeof(s) - 1);
- exit(0);
-}
-
-void _fail(char *reason)
-{
- char s[] = "\nfailed: ";
- int len = mystrlen(reason);
- write(1, s, sizeof(s) - 1);
- write(1, reason, len);
- write(1, "\n", 1);
- exit(1);
-}
diff --git a/tests/tcg/cris/bare/testutils.inc b/tests/tcg/cris/bare/testutils.inc
deleted file mode 100644
index aa1641b..0000000
--- a/tests/tcg/cris/bare/testutils.inc
+++ /dev/null
@@ -1,117 +0,0 @@
- .syntax no_register_prefix
-
- .macro start
- .text
- .global main
-main:
- .endm
-
- .macro quit
- jump pass
- nop
- .endm
-
- .macro pass
- jump pass
- nop
- .endm
-
- .macro startnostack
- start
- .endm
-
- .macro fail
- .data
-99:
- .asciz " checkr3 failed\n"
- .text
- move.d 99b, $r10
- jsr _fail
- nop
- .endm
-
- .macro checkr3 val
- cmp.d 0x\val, $r3
- beq 100f
- nop
- .data
-99:
- .asciz "checkr3 failed\n"
- .text
- move.d 99b, $r10
- jsr _fail
- nop
-100:
- .endm
-
-; Test the condition codes
- .macro test_cc N Z V C
- .if \N
- bpl 9f
- nop
- .else
- bmi 9f
- nop
- .endif
- .if \Z
- bne 9f
- nop
- .else
- beq 9f
- nop
- .endif
- .if \V
- bvc 9f
- nop
- .else
- bvs 9f
- nop
- .endif
- .if \C
- bcc 9f
- nop
- .else
- bcs 9f
- nop
- .endif
- ba 8f
- nop
-9:
- .data
-99:
- .asciz "test_move_cc failed\n"
- .text
- move.d 99b, $r10
- jsr _fail
- nop
-8:
- .endm
-
-
- .macro test_move_cc N Z V C
- .if \N
- bpl 9f
- nop
- .else
- bmi 9f
- nop
- .endif
- .if \Z
- bne 9f
- nop
- .else
- beq 9f
- nop
- .endif
- ba 8f
- nop
-9:
- .data
-99:
- .asciz "test_move_cc failed\n"
- .text
- move.d 99b, $r10
- jsr _fail
- nop
-8:
- .endm
diff --git a/tests/tcg/cris/libc/check_abs.c b/tests/tcg/cris/libc/check_abs.c
deleted file mode 100644
index 08b67b6..0000000
--- a/tests/tcg/cris/libc/check_abs.c
+++ /dev/null
@@ -1,40 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include "sys.h"
-#include "crisutils.h"
-
-static always_inline int cris_abs(int n)
-{
- int r;
- asm ("abs\t%1, %0\n" : "=r" (r) : "r" (n));
- return r;
-}
-
-static always_inline void
-verify_abs(int val, int res,
- const int n, const int z, const int v, const int c)
-{
- int r;
-
- cris_tst_cc_init();
- r = cris_abs(val);
- cris_tst_cc(n, z, v, c);
- if (r != res)
- err();
-}
-
-int main(void)
-{
- verify_abs(-1, 1, 0, 0, 0, 0);
- verify_abs(0x80000000, 0x80000000, 1, 0, 0, 0);
- verify_abs(0x7fffffff, 0x7fffffff, 0, 0, 0, 0);
- verify_abs(42, 42, 0, 0, 0, 0);
- verify_abs(1, 1, 0, 0, 0, 0);
- verify_abs(0xffff, 0xffff, 0, 0, 0, 0);
- verify_abs(0xffff, 0xffff, 0, 0, 0, 0);
- verify_abs(-31, 0x1f, 0, 0, 0, 0);
- verify_abs(0, 0, 0, 1, 0, 0);
- pass();
- return 0;
-}
diff --git a/tests/tcg/cris/libc/check_addc.c b/tests/tcg/cris/libc/check_addc.c
deleted file mode 100644
index fc3fb1f..0000000
--- a/tests/tcg/cris/libc/check_addc.c
+++ /dev/null
@@ -1,58 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include "sys.h"
-#include "crisutils.h"
-
-static always_inline int cris_addc(int a, const int b)
-{
- asm ("addc\t%1, %0\n" : "+r" (a) : "r" (b));
- return a;
-}
-
-#define verify_addc(a, b, res, n, z, v, c) \
-{ \
- int r; \
- r = cris_addc((a), (b)); \
- cris_tst_cc((n), (z), (v), (c)); \
- if (r != (res)) \
- err(); \
-}
-
-int main(void)
-{
- cris_tst_cc_init();
- asm volatile ("clearf cz");
- verify_addc(0, 0, 0, 0, 0, 0, 0);
-
- cris_tst_cc_init();
- asm volatile ("setf z");
- verify_addc(0, 0, 0, 0, 1, 0, 0);
-
- cris_tst_cc_init();
- asm volatile ("setf cz");
- verify_addc(0, 0, 1, 0, 0, 0, 0);
- cris_tst_cc_init();
- asm volatile ("clearf c");
- verify_addc(-1, 2, 1, 0, 0, 0, 1);
-
- cris_tst_cc_init();
- asm volatile ("clearf nzv");
- asm volatile ("setf c");
- verify_addc(-1, 2, 2, 0, 0, 0, 1);
-
- cris_tst_cc_init();
- asm volatile ("setf c");
- verify_addc(0xffff, 0xffff, 0x1ffff, 0, 0, 0, 0);
-
- cris_tst_cc_init();
- asm volatile ("clearf nzvc");
- verify_addc(-1, -1, 0xfffffffe, 1, 0, 0, 1);
-
- cris_tst_cc_init();
- asm volatile ("setf c");
- verify_addc(0x78134452, 0x5432f789, 0xcc463bdc, 1, 0, 1, 0);
-
- pass();
- return 0;
-}
diff --git a/tests/tcg/cris/libc/check_addcm.c b/tests/tcg/cris/libc/check_addcm.c
deleted file mode 100644
index b355ba1..0000000
--- a/tests/tcg/cris/libc/check_addcm.c
+++ /dev/null
@@ -1,85 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include "sys.h"
-#include "crisutils.h"
-
-/* need to avoid acr as source here. */
-static always_inline int cris_addc_m(int a, const int *b)
-{
- asm volatile ("addc [%1], %0\n" : "+r" (a) : "r" (b));
- return a;
-}
-
-/* 'b' is a crisv32 constrain to avoid postinc with $acr. */
-static always_inline int cris_addc_pi_m(int a, int **b)
-{
- asm volatile ("addc [%1+], %0\n" : "+r" (a), "+b" (*b));
- return a;
-}
-
-#define verify_addc_m(a, b, res, n, z, v, c) \
-{ \
- int r; \
- r = cris_addc_m((a), (b)); \
- cris_tst_cc((n), (z), (v), (c)); \
- if (r != (res)) \
- err(); \
-}
-
-#define verify_addc_pi_m(a, b, res, n, z, v, c) \
-{ \
- int r; \
- r = cris_addc_pi_m((a), (b)); \
- cris_tst_cc((n), (z), (v), (c)); \
- if (r != (res)) \
- err(); \
-}
-
-int x[] = { 0, 0, 2, -1, 0xffff, -1, 0x5432f789};
-
-int main(void)
-{
- int *p = (void *)&x[0];
-#if 1
- cris_tst_cc_init();
- asm volatile ("clearf cz");
- verify_addc_m(0, p, 0, 0, 0, 0, 0);
-
- cris_tst_cc_init();
- asm volatile ("setf z");
- verify_addc_m(0, p, 0, 0, 1, 0, 0);
-
- cris_tst_cc_init();
- asm volatile ("setf c");
- verify_addc_m(0, p, 1, 0, 0, 0, 0);
-
- cris_tst_cc_init();
- asm volatile ("clearf c");
- verify_addc_pi_m(0, &p, 0, 0, 1, 0, 0);
-
- p = &x[1];
- cris_tst_cc_init();
- asm volatile ("setf c");
- verify_addc_pi_m(0, &p, 1, 0, 0, 0, 0);
-
- if (p != &x[2])
- err();
-
- cris_tst_cc_init();
- asm volatile ("clearf c");
- verify_addc_pi_m(-1, &p, 1, 0, 0, 0, 1);
-
- if (p != &x[3])
- err();
-#endif
- p = &x[3];
- /* TODO: investigate why this one fails. */
- cris_tst_cc_init();
- asm volatile ("setf c");
- verify_addc_m(2, p, 2, 0, 0, 0, 1);
- p += 4;
-
- pass();
- return 0;
-}
diff --git a/tests/tcg/cris/libc/check_addo.c b/tests/tcg/cris/libc/check_addo.c
deleted file mode 100644
index 4235e5f..0000000
--- a/tests/tcg/cris/libc/check_addo.c
+++ /dev/null
@@ -1,125 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include "sys.h"
-#include "crisutils.h"
-
-/* this would be better to do in asm, it's an orgy in GCC inline asm now. */
-
-#define cris_addo_b(o, v) \
- asm volatile ("addo.b\t[%0], %1, $acr\n" : : "r" (o), "r" (v) : "acr");
-#define cris_addo_w(o, v) \
- asm volatile ("addo.w\t[%0], %1, $acr\n" : : "r" (o), "r" (v) : "acr");
-#define cris_addo_d(o, v) \
- asm volatile ("addo.d\t[%0], %1, $acr\n" : : "r" (o), "r" (v) : "acr");
-#define cris_addo_pi_b(o, v) \
- asm volatile ("addo.b\t[%0+], %1, $acr\n" \
- : "+b" (o): "r" (v) : "acr");
-#define cris_addo_pi_w(o, v) \
- asm volatile ("addo.w\t[%0+], %1, $acr\n" \
- : "+b" (o): "r" (v) : "acr");
-#define cris_addo_pi_d(o, v) \
- asm volatile ("addo.d\t[%0+], %1, $acr\n" \
- : "+b" (o): "r" (v) : "acr");
-
-struct {
- uint32_t v1;
- uint16_t v2;
- uint32_t v3;
- uint8_t v4;
- uint8_t v5;
- uint16_t v6;
- uint32_t v7;
-} y = {
- 32769,
- -1,
- 5,
- 3, -4,
- 2,
- -76789887
-};
-
-static int x[3] = {0x55aa77ff, 0xccff2244, 0x88ccee19};
-
-int main(void)
-{
- int *r;
- unsigned char *t, *p;
-
- /* Note, this test-case will trig an unaligned access, partly
- to x[0] and to [x1]. */
- t = (unsigned char *)x;
- t -= 32768;
- p = (unsigned char *) &y.v1;
- mb(); /* don't reorder anything beyond here. */
- cris_tst_cc_init();
- asm volatile ("setf\tzvnc\n");
- cris_addo_pi_d(p, t);
- cris_tst_cc(1, 1, 1, 1);
- asm volatile ("move.d\t$acr, %0\n" : "=r" (r));
- if (*r != 0x4455aa77)
- err();
-
-
- t += 32770;
- mb(); /* don't reorder anything beyond here. */
- cris_tst_cc_init();
- asm volatile ("setf\tzvnc\n");
- cris_addo_pi_w(p, t);
- cris_tst_cc(1, 1, 1, 1);
- asm volatile ("move.d\t$acr, %0\n" : "=r" (r));
- if (*r != 0x4455aa77)
- err();
-
- mb(); /* don't reorder anything beyond here. */
- cris_tst_cc_init();
- asm volatile ("setf\tzvnc\n");
- cris_addo_d(p, r);
- cris_tst_cc(1, 1, 1, 1);
- p += 4;
- asm volatile ("move.d\t$acr, %0\n" : "=r" (r));
- if (*r != 0xee19ccff)
- err();
-
- mb(); /* don't reorder anything beyond here. */
- cris_tst_cc_init();
- asm volatile ("setf\tzvnc\n");
- cris_addo_pi_b(p, t);
- cris_tst_cc(0, 0, 0, 0);
- asm volatile ("move.d\t$acr, %0\n" : "=r" (r));
- if (*(uint16_t*)r != 0xff22)
- err();
-
- mb(); /* don't reorder anything beyond here. */
- cris_tst_cc_init();
- asm volatile ("setf\tzvnc\n");
- cris_addo_b(p, r);
- cris_tst_cc(1, 1, 1, 1);
- p += 1;
- asm volatile ("move.d\t$acr, %0\n" : "=r" (r));
- if (*r != 0x4455aa77)
- err();
-
- mb(); /* don't reorder anything beyond here. */
- cris_tst_cc_init();
- asm volatile ("setf\tzvnc\n");
- cris_addo_w(p, r);
- cris_tst_cc(1, 1, 1, 1);
- p += 2;
- asm volatile ("move.d\t$acr, %0\n" : "=r" (r));
- if (*r != 0xff224455)
- err();
-
- mb(); /* don't reorder anything beyond here. */
- cris_tst_cc_init();
- asm volatile ("setf\tzvnc\n");
- cris_addo_pi_d(p, t);
- cris_tst_cc(0, 0, 0, 0);
- asm volatile ("move.d\t$acr, %0\n" : "=r" (r));
- r = (void*)(((char *)r) + 76789885);
- if (*r != 0x55aa77ff)
- err();
-
- pass();
- return 0;
-}
diff --git a/tests/tcg/cris/libc/check_addoq.c b/tests/tcg/cris/libc/check_addoq.c
deleted file mode 100644
index ed509e2..0000000
--- a/tests/tcg/cris/libc/check_addoq.c
+++ /dev/null
@@ -1,44 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include "sys.h"
-#include "crisutils.h"
-
-/* this would be better to do in asm, it's an orgy in GCC inline asm now. */
-
-/* ACR will be clobbered. */
-#define cris_addoq(o, v) \
- asm volatile ("addoq\t%1, %0, $acr\n" : : "r" (v), "i" (o) : "acr");
-
-
-int main(void)
-{
- int x[3] = {0x55aa77ff, 0xccff2244, 0x88ccee19};
- int *p, *t = x + 1;
-
- cris_tst_cc_init();
- asm volatile ("setf\tzvnc\n");
- cris_addoq(0, t);
- cris_tst_cc(1, 1, 1, 1);
- asm volatile ("move.d\t$acr, %0\n" : "=r" (p));
- if (*p != 0xccff2244)
- err();
-
- cris_tst_cc_init();
- asm volatile ("setf\tzvnc\n");
- cris_addoq(4, t);
- cris_tst_cc(0, 0, 0, 0);
- asm volatile ("move.d\t$acr, %0\n" : "=r" (p));
- if (*p != 0x88ccee19)
- err();
-
- cris_tst_cc_init();
- asm volatile ("clearf\tzvnc\n");
- cris_addoq(-8, t + 1);
- cris_tst_cc(0, 0, 0, 0);
- asm volatile ("move.d\t$acr, %0\n" : "=r" (p));
- if (*p != 0x55aa77ff)
- err();
- pass();
- return 0;
-}
diff --git a/tests/tcg/cris/libc/check_bound.c b/tests/tcg/cris/libc/check_bound.c
deleted file mode 100644
index d956ab9..0000000
--- a/tests/tcg/cris/libc/check_bound.c
+++ /dev/null
@@ -1,142 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include "sys.h"
-#include "crisutils.h"
-
-static always_inline int cris_bound_b(int v, int b)
-{
- int r = v;
- asm ("bound.b\t%1, %0\n" : "+r" (r) : "ri" (b));
- return r;
-}
-
-static always_inline int cris_bound_w(int v, int b)
-{
- int r = v;
- asm ("bound.w\t%1, %0\n" : "+r" (r) : "ri" (b));
- return r;
-}
-
-static always_inline int cris_bound_d(int v, int b)
-{
- int r = v;
- asm ("bound.d\t%1, %0\n" : "+r" (r) : "ri" (b));
- return r;
-}
-
-int main(void)
-{
- int r;
-
- cris_tst_cc_init();
- r = cris_bound_d(-1, 2);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 2)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_d(2, 0xffffffff);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 2)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_d(0xffff, 0xffff);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 0xffff)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_d(-1, 0xffffffff);
- cris_tst_cc(1, 0, 0, 0);
- if (r != 0xffffffff)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_d(0x78134452, 0x5432f789);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 0x5432f789)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_w(-1, 2);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 2)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_w(-1, 0xffff);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 0xffff)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_w(2, 0xffff);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 2)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_w(0xfedaffff, 0xffff);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 0xffff)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_w(0x78134452, 0xf789);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 0xf789)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_b(-1, 2);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 2)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_b(2, 0xff);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 2)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_b(-1, 0xff);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 0xff)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_b(0xff, 0xff);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 0xff)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_b(0xfeda49ff, 0xff);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 0xff)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_b(0x78134452, 0x89);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 0x89)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_w(0x78134452, 0);
- cris_tst_cc(0, 1, 0, 0);
- if (r != 0)
- err();
-
- cris_tst_cc_init();
- r = cris_bound_b(0xffff, -1);
- cris_tst_cc(0, 0, 0, 0);
- if (r != 0xff)
- err();
-
- pass();
- return 0;
-}
diff --git a/tests/tcg/cris/libc/check_ftag.c b/tests/tcg/cris/libc/check_ftag.c
deleted file mode 100644
index aaa5c97..0000000
--- a/tests/tcg/cris/libc/check_ftag.c
+++ /dev/null
@@ -1,37 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include "sys.h"
-#include "crisutils.h"
-
-static always_inline void cris_ftag_i(unsigned int x)
-{
- register unsigned int v asm("$r10") = x;
- asm ("ftagi\t[%0]\n" : : "r" (v) );
-}
-static always_inline void cris_ftag_d(unsigned int x)
-{
- register unsigned int v asm("$r10") = x;
- asm ("ftagd\t[%0]\n" : : "r" (v) );
-}
-static always_inline void cris_fidx_i(unsigned int x)
-{
- register unsigned int v asm("$r10") = x;
- asm ("fidxi\t[%0]\n" : : "r" (v) );
-}
-static always_inline void cris_fidx_d(unsigned int x)
-{
- register unsigned int v asm("$r10") = x;
- asm ("fidxd\t[%0]\n" : : "r" (v) );
-}
-
-
-int main(void)
-{
- cris_ftag_i(0);
- cris_ftag_d(0);
- cris_fidx_i(0);
- cris_fidx_d(0);
- pass();
- return 0;
-}
diff --git a/tests/tcg/cris/libc/check_gcctorture_pr28634-1.c b/tests/tcg/cris/libc/check_gcctorture_pr28634-1.c
deleted file mode 100644
index 45ecd15..0000000
--- a/tests/tcg/cris/libc/check_gcctorture_pr28634-1.c
+++ /dev/null
@@ -1,15 +0,0 @@
-/* PR rtl-optimization/28634. On targets with delayed branches,
- dbr_schedule could do the next iteration's addition in the
- branch delay slot, then subtract the value again if the branch
- wasn't taken. This can lead to rounding errors. */
-int x = -1;
-int y = 1;
-int
-main (void)
-{
- while (y > 0)
- y += x;
- if (y != x + 1)
- abort ();
- exit (0);
-}
diff --git a/tests/tcg/cris/libc/check_gcctorture_pr28634.c b/tests/tcg/cris/libc/check_gcctorture_pr28634.c
deleted file mode 100644
index a0c5254..0000000
--- a/tests/tcg/cris/libc/check_gcctorture_pr28634.c
+++ /dev/null
@@ -1,15 +0,0 @@
-/* PR rtl-optimization/28634. On targets with delayed branches,
- dbr_schedule could do the next iteration's addition in the
- branch delay slot, then subtract the value again if the branch
- wasn't taken. This can lead to rounding errors. */
-double x = -0x1.0p53;
-double y = 1;
-int
-main (void)
-{
- while (y > 0)
- y += x;
- if (y != x + 1)
- abort ();
- exit (0);
-}
diff --git a/tests/tcg/cris/libc/check_glibc_kernelversion.c b/tests/tcg/cris/libc/check_glibc_kernelversion.c
deleted file mode 100644
index 7aada89..0000000
--- a/tests/tcg/cris/libc/check_glibc_kernelversion.c
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Check the lz insn.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include "sys.h"
-
-#define __LINUX_KERNEL_VERSION 131584
-
-#define DL_SYSDEP_OSCHECK(FATAL) \
- do { \
- /* Test whether the kernel is new enough. This test is only \
- performed if the library is not compiled to run on all \
- kernels. */ \
- if (__LINUX_KERNEL_VERSION > 0) \
- { \
- char bufmem[64]; \
- char *buf = bufmem; \
- unsigned int version; \
- int parts; \
- char *cp; \
- struct utsname uts; \
- \
- /* Try the uname syscall */ \
- if (__uname (&uts)) \
- { \
- /* This was not successful. Now try reading the /proc \
- filesystem. */ \
- ssize_t reslen; \
- int fd = __open ("/proc/sys/kernel/osrelease", O_RDONLY); \
- if (fd == -1 \
- || (reslen = __read (fd, bufmem, sizeof (bufmem))) <= 0) \
- /* This also didn't work. We give up since we cannot \
- make sure the library can actually work. */ \
- FATAL ("FATAL: cannot determine library version\n"); \
- __close (fd); \
- buf[MIN (reslen, (ssize_t) sizeof (bufmem) - 1)] = '\0'; \
- } \
- else \
- buf = uts.release; \
- \
- /* Now convert it into a number. The string consists of at most \
- three parts. */ \
- version = 0; \
- parts = 0; \
- cp = buf; \
- while ((*cp >= '0') && (*cp <= '9')) \
- { \
- unsigned int here = *cp++ - '0'; \
- \
- while ((*cp >= '0') && (*cp <= '9')) \
- { \
- here *= 10; \
- here += *cp++ - '0'; \
- } \
- \
- ++parts; \
- version <<= 8; \
- version |= here; \
- \
- if (*cp++ != '.') \
- /* Another part following? */ \
- break; \
- } \
- \
- if (parts < 3) \
- version <<= 8 * (3 - parts); \
- \
- /* Now we can test with the required version. */ \
- if (version < __LINUX_KERNEL_VERSION) \
- /* Not sufficient. */ \
- FATAL ("FATAL: kernel too old\n"); \
- \
- _dl_osversion = version; \
- } \
- } while (0)
-
-int main(void)
-{
- char bufmem[64] = "2.6.22";
- char *buf = bufmem;
- unsigned int version;
- int parts;
- char *cp;
-
- version = 0;
- parts = 0;
- cp = buf;
- while ((*cp >= '0') && (*cp <= '9'))
- {
- unsigned int here = *cp++ - '0';
-
- while ((*cp >= '0') && (*cp <= '9'))
- {
- here *= 10;
- here += *cp++ - '0';
- }
-
- ++parts;
- version <<= 8;
- version |= here;
-
- if (*cp++ != '.')
- /* Another part following? */
- break;
- }
-
- if (parts < 3)
- version <<= 8 * (3 - parts);
- if (version < __LINUX_KERNEL_VERSION)
- err();
- pass();
- exit(0);
-}
diff --git a/tests/tcg/cris/libc/check_hello.c b/tests/tcg/cris/libc/check_hello.c
deleted file mode 100644
index fb403ba..0000000
--- a/tests/tcg/cris/libc/check_hello.c
+++ /dev/null
@@ -1,7 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-int main ()
-{
- printf ("pass\n");
- exit (0);
-}
diff --git a/tests/tcg/cris/libc/check_int64.c b/tests/tcg/cris/libc/check_int64.c
deleted file mode 100644
index 69caec1..0000000
--- a/tests/tcg/cris/libc/check_int64.c
+++ /dev/null
@@ -1,47 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include "sys.h"
-#include "crisutils.h"
-
-
-static always_inline int64_t add64(const int64_t a, const int64_t b)
-{
- return a + b;
-}
-
-static always_inline int64_t sub64(const int64_t a, const int64_t b)
-{
- return a - b;
-}
-
-int main(void)
-{
- int64_t a = 1;
- int64_t b = 2;
-
- /* FIXME: add some tests. */
- a = add64(a, b);
- if (a != 3)
- err();
-
- a = sub64(a, b);
- if (a != 1)
- err();
-
- a = add64(a, -4);
- if (a != -3)
- err();
-
- a = add64(a, 3);
- if (a != 0)
- err();
-
- a = 0;
- a = sub64(a, 1);
- if (a != -1)
- err();
-
- pass();
- return 0;
-}
diff --git a/tests/tcg/cris/libc/check_lz.c b/tests/tcg/cris/libc/check_lz.c
deleted file mode 100644
index bf051a6..0000000
--- a/tests/tcg/cris/libc/check_lz.c
+++ /dev/null
@@ -1,49 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include "sys.h"
-
-static always_inline int cris_lz(int x)
-{
- int r;
- asm ("lz\t%1, %0\n" : "=r" (r) : "r" (x));
- return r;
-}
-
-void check_lz(void)
-{
- int i;
-
- if (cris_lz(0) != 32)
- err();
- if (cris_lz(1) != 31)
- err();
- if (cris_lz(2) != 30)
- err();
- if (cris_lz(4) != 29)
- err();
- if (cris_lz(8) != 28)
- err();
-
- /* try all positions with a single bit. */
- for (i = 1; i < 32; i++) {
- if (cris_lz(1 << (i-1)) != (32 - i))
- err();
- }
-
- /* try all positions with all bits. */
- for (i = 1; i < 32; i++) {
- /* split up this computation to clarify it. */
- uint32_t val;
- val = (unsigned int)-1 >> (32 - i);
- if (cris_lz(val) != (32 - i))
- err();
- }
-}
-
-int main(void)
-{
- check_lz();
- pass();
- exit(0);
-}
diff --git a/tests/tcg/cris/libc/check_mapbrk.c b/tests/tcg/cris/libc/check_mapbrk.c
deleted file mode 100644
index 1aff762..0000000
--- a/tests/tcg/cris/libc/check_mapbrk.c
+++ /dev/null
@@ -1,39 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-
-/* Basic sanity check that syscalls to implement malloc (brk, mmap2,
- munmap) are trivially functional. */
-
-int main ()
-{
- void *p1, *p2, *p3, *p4, *p5, *p6;
-
- if ((p1 = malloc (8100)) == NULL
- || (p2 = malloc (16300)) == NULL
- || (p3 = malloc (4000)) == NULL
- || (p4 = malloc (500)) == NULL
- || (p5 = malloc (1023*1024)) == NULL
- || (p6 = malloc (8191*1024)) == NULL)
- {
- printf ("fail\n");
- exit (1);
- }
-
- free (p1);
- free (p2);
- free (p3);
- free (p4);
- free (p5);
- free (p6);
-
- p1 = malloc (64000);
- if (p1 == NULL)
- {
- printf ("fail\n");
- exit (1);
- }
- free (p1);
-
- printf ("pass\n");
- exit (0);
-}
diff --git a/tests/tcg/cris/libc/check_mmap1.c b/tests/tcg/cris/libc/check_mmap1.c
deleted file mode 100644
index b803f0c..0000000
--- a/tests/tcg/cris/libc/check_mmap1.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
-#notarget: cris*-*-elf
-*/
-
-#define _GNU_SOURCE
-#include <string.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/mman.h>
-
-int main (int argc, char *argv[])
-{
- int fd = open (argv[0], O_RDONLY);
- struct stat sb;
- int size;
- void *a;
- const char *str = "a string you'll only find in the program";
-
- if (fd == -1)
- {
- perror ("open");
- abort ();
- }
-
- if (fstat (fd, &sb) < 0)
- {
- perror ("fstat");
- abort ();
- }
-
- size = sb.st_size;
-
- /* We want to test mmapping a size that isn't exactly a page. */
- if ((size & 8191) == 0)
- size--;
-
- a = mmap (NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
-
- if (memmem (a, size, str, strlen (str) + 1) == NULL)
- abort ();
-
- printf ("pass\n");
- exit (0);
-}
diff --git a/tests/tcg/cris/libc/check_mmap2.c b/tests/tcg/cris/libc/check_mmap2.c
deleted file mode 100644
index 35139a0..0000000
--- a/tests/tcg/cris/libc/check_mmap2.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
-#notarget: cris*-*-elf
-*/
-
-#define _GNU_SOURCE
-#include <string.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/mman.h>
-
-int main (int argc, char *argv[])
-{
- int fd = open (argv[0], O_RDONLY);
- struct stat sb;
- int size;
- void *a;
- const char *str = "a string you'll only find in the program";
-
- if (fd == -1)
- {
- perror ("open");
- abort ();
- }
-
- if (fstat (fd, &sb) < 0)
- {
- perror ("fstat");
- abort ();
- }
-
- size = sb.st_size;
-
- /* We want to test mmapping a size that isn't exactly a page. */
- if ((size & 8191) == 0)
- size--;
-
- a = mmap (NULL, size, PROT_READ, MAP_SHARED, fd, 0);
-
- if (memmem (a, size, str, strlen (str) + 1) == NULL)
- abort ();
-
- printf ("pass\n");
- exit (0);
-}
diff --git a/tests/tcg/cris/libc/check_mmap3.c b/tests/tcg/cris/libc/check_mmap3.c
deleted file mode 100644
index cb890ef..0000000
--- a/tests/tcg/cris/libc/check_mmap3.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
-#notarget: cris*-*-elf
-*/
-
-#define _GNU_SOURCE
-#include <string.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <sys/mman.h>
-
-int main (int argc, char *argv[])
-{
- volatile unsigned char *a;
-
- /* Check that we can map a non-multiple of a page and still get a full page. */
- a = mmap (NULL, 0x4c, PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (a == NULL || a == (unsigned char *) -1)
- abort ();
-
- a[0] = 0xbe;
- a[8191] = 0xef;
- memset ((char *) a + 1, 0, 8190);
-
- if (a[0] != 0xbe || a[8191] != 0xef)
- abort ();
-
- printf ("pass\n");
- exit (0);
-}
diff --git a/tests/tcg/cris/libc/check_moveq.c b/tests/tcg/cris/libc/check_moveq.c
deleted file mode 100644
index 80f2dff..0000000
--- a/tests/tcg/cris/libc/check_moveq.c
+++ /dev/null
@@ -1,51 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include "sys.h"
-#include "crisutils.h"
-
-#define cris_moveq(dst, src) \
- asm volatile ("moveq %1, %0\n" : "=r" (dst) : "i" (src));
-
-
-
-int main(void)
-{
- int t;
-
- cris_tst_cc_init();
- asm volatile ("setf\tzvnc\n");
- cris_moveq(t, 10);
- cris_tst_cc(1, 1, 1, 1);
- if (t != 10)
- err();
-
- /* make sure moveq doesn't clobber the zflag. */
- cris_tst_cc_init();
- asm volatile ("setf vnc\n");
- asm volatile ("clearf z\n");
- cris_moveq(t, 0);
- cris_tst_cc(1, 0, 1, 1);
- if (t != 0)
- err();
-
- /* make sure moveq doesn't clobber the nflag.
- Also check large immediates */
- cris_tst_cc_init();
- asm volatile ("setf zvc\n");
- asm volatile ("clearf n\n");
- cris_moveq(t, -31);
- cris_tst_cc(0, 1, 1, 1);
- if (t != -31)
- err();
-
- cris_tst_cc_init();
- asm volatile ("setf nzvc\n");
- cris_moveq(t, 31);
- cris_tst_cc(1, 1, 1, 1);
- if (t != 31)
- err();
-
- pass();
- return 0;
-}
diff --git a/tests/tcg/cris/libc/check_openpf1.c b/tests/tcg/cris/libc/check_openpf1.c
deleted file mode 100644
index 251d26e..0000000
--- a/tests/tcg/cris/libc/check_openpf1.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Check that --sysroot is applied to open(2).
-#sim: --sysroot=@exedir@
-
- We assume, with EXE being the name of the executable:
- - The simulator executes with cwd the same directory where the executable
- is located (so argv[0] contains a plain filename without directory
- components).
- - There's no /EXE on the host file system. */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-int main (int argc, char *argv[])
-{
- char *fnam = argv[0];
- FILE *f;
- if (argv[0][0] != '/')
- {
- fnam = malloc (strlen (argv[0]) + 2);
- if (fnam == NULL)
- abort ();
- strcpy (fnam, "/");
- strcat (fnam, argv[0]);
- }
-
- f = fopen (fnam, "rb");
- if (f == NULL)
- abort ();
- fclose(f);
-
- /* Cover another execution path. */
- if (fopen ("/nonexistent", "rb") != NULL
- || errno != ENOENT)
- abort ();
- printf ("pass\n");
- return 0;
-}
diff --git a/tests/tcg/cris/libc/check_openpf2.c b/tests/tcg/cris/libc/check_openpf2.c
deleted file mode 100644
index 5d56189..0000000
--- a/tests/tcg/cris/libc/check_openpf2.c
+++ /dev/null
@@ -1,16 +0,0 @@
-/* Check that the simulator has chdir:ed to the --sysroot argument
-#sim: --sysroot=@srcdir@
- (or that --sysroot is applied to relative file paths). */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <errno.h>
-int main (int argc, char *argv[])
-{
- FILE *f = fopen ("check_openpf2.c", "rb");
- if (f == NULL)
- abort ();
- fclose(f);
- printf ("pass\n");
- return 0;
-}
diff --git a/tests/tcg/cris/libc/check_openpf3.c b/tests/tcg/cris/libc/check_openpf3.c
deleted file mode 100644
index 557adee..0000000
--- a/tests/tcg/cris/libc/check_openpf3.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/* Basic file operations (rename, unlink); once without sysroot. We
- also test that the simulator has chdir:ed to PREFIX, when defined. */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <errno.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#ifndef PREFIX
-#define PREFIX
-#endif
-
-void err (const char *s)
-{
- perror (s);
- abort ();
-}
-
-int main (int argc, char *argv[])
-{
- FILE *f;
- struct stat buf;
-
- unlink (PREFIX "testfoo2.tmp");
-
- f = fopen ("testfoo1.tmp", "w");
- if (f == NULL)
- err ("open");
- fclose (f);
-
- if (rename (PREFIX "testfoo1.tmp", PREFIX "testfoo2.tmp") != 0)
- err ("rename");
-
- if (stat (PREFIX "testfoo2.tmp", &buf) != 0
- || !S_ISREG (buf.st_mode))
- err ("stat 1");
-
- if (stat ("testfoo2.tmp", &buf) != 0
- || !S_ISREG (buf.st_mode))
- err ("stat 2");
-
- if (unlink (PREFIX "testfoo2.tmp") != 0)
- err ("unlink");
-
- printf ("pass\n");
- return 0;
-}
diff --git a/tests/tcg/cris/libc/check_openpf5.c b/tests/tcg/cris/libc/check_openpf5.c
deleted file mode 100644
index 1f86ea2..0000000
--- a/tests/tcg/cris/libc/check_openpf5.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/* Check that TRT happens when error on too many opened files.
-#notarget: cris*-*-elf
-#sim: --sysroot=@exedir@
-*/
-#include <stddef.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <errno.h>
-#include <limits.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <string.h>
-
-int main (int argc, char *argv[])
-{
- int i;
- int filemax;
-
-#ifdef OPEN_MAX
- filemax = OPEN_MAX;
-#else
- filemax = sysconf (_SC_OPEN_MAX);
-#endif
-
- char *fn = malloc (strlen (argv[0]) + 2);
- if (fn == NULL)
- abort ();
- strcpy (fn, "/");
- strcat (fn, argv[0]);
-
- for (i = 0; i < filemax + 1; i++)
- {
- if (open (fn, O_RDONLY) < 0)
- {
- /* Shouldn't happen too early. */
- if (i < filemax - 3 - 1)
- {
- fprintf (stderr, "i: %d\n", i);
- abort ();
- }
- if (errno != EMFILE)
- {
- perror ("open");
- abort ();
- }
- goto ok;
- }
- }
- abort ();
-
-ok:
- printf ("pass\n");
- exit (0);
-}
diff --git a/tests/tcg/cris/libc/check_settls1.c b/tests/tcg/cris/libc/check_settls1.c
deleted file mode 100644
index 3abc3a9..0000000
--- a/tests/tcg/cris/libc/check_settls1.c
+++ /dev/null
@@ -1,45 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <errno.h>
-#include <unistd.h>
-
-#include <sys/syscall.h>
-
-#ifndef SYS_set_thread_area
-#define SYS_set_thread_area 243
-#endif
-
-int main (void)
-{
- unsigned long tp, old_tp;
- int ret;
-
- asm volatile ("move $pid,%0" : "=r" (old_tp));
- old_tp &= ~0xff;
-
- ret = syscall (SYS_set_thread_area, 0xf0);
- if (ret != -1 || errno != EINVAL) {
- syscall (SYS_set_thread_area, old_tp);
- perror ("Invalid thread area accepted:");
- abort();
- }
-
- ret = syscall (SYS_set_thread_area, 0xeddeed00);
- if (ret != 0) {
- perror ("Valid thread area not accepted: ");
- abort ();
- }
-
- asm volatile ("move $pid,%0" : "=r" (tp));
- tp &= ~0xff;
- syscall (SYS_set_thread_area, old_tp);
-
- if (tp != 0xeddeed00) {
- * (volatile int *) 0 = 0;
- perror ("tls2");
- abort ();
- }
-
- printf ("pass\n");
- return EXIT_SUCCESS;
-}
diff --git a/tests/tcg/cris/libc/check_sigalrm.c b/tests/tcg/cris/libc/check_sigalrm.c
deleted file mode 100644
index 39fa8d9..0000000
--- a/tests/tcg/cris/libc/check_sigalrm.c
+++ /dev/null
@@ -1,26 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <signal.h>
-#include <unistd.h>
-
-#define MAGIC (0xdeadbeef)
-
-int s = 0;
-void sighandler(int sig)
-{
- s = MAGIC;
-}
-
-int main(int argc, char **argv)
-{
- int p;
-
- p = getpid();
- signal(SIGALRM, sighandler);
- kill(p, SIGALRM);
- if (s != MAGIC)
- return EXIT_FAILURE;
-
- printf ("passed\n");
- return EXIT_SUCCESS;
-}
diff --git a/tests/tcg/cris/libc/check_stat1.c b/tests/tcg/cris/libc/check_stat1.c
deleted file mode 100644
index 2e2cae5..0000000
--- a/tests/tcg/cris/libc/check_stat1.c
+++ /dev/null
@@ -1,16 +0,0 @@
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-int main (void)
-{
- struct stat buf;
-
- if (stat (".", &buf) != 0
- || !S_ISDIR (buf.st_mode))
- abort ();
- printf ("pass\n");
- exit (0);
-}
diff --git a/tests/tcg/cris/libc/check_stat2.c b/tests/tcg/cris/libc/check_stat2.c
deleted file mode 100644
index e36172e..0000000
--- a/tests/tcg/cris/libc/check_stat2.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
-#notarget: cris*-*-elf
-*/
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-int main (void)
-{
- struct stat buf;
-
- if (lstat (".", &buf) != 0
- || !S_ISDIR (buf.st_mode))
- abort ();
- printf ("pass\n");
- exit (0);
-}
diff --git a/tests/tcg/cris/libc/check_stat3.c b/tests/tcg/cris/libc/check_stat3.c
deleted file mode 100644
index 36a9d5d..0000000
--- a/tests/tcg/cris/libc/check_stat3.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Simulator options:
-#sim: --sysroot=@exedir@
-*/
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-
-int main (int argc, char *argv[])
-{
- char path[1024] = "/";
- struct stat buf;
-
- strncat(path, argv[0], sizeof(path) - 2);
- if (stat (".", &buf) != 0
- || !S_ISDIR (buf.st_mode))
- abort ();
- if (stat (path, &buf) != 0
- || !S_ISREG (buf.st_mode))
- abort ();
- printf ("pass\n");
- exit (0);
-}
diff --git a/tests/tcg/cris/libc/check_stat4.c b/tests/tcg/cris/libc/check_stat4.c
deleted file mode 100644
index 04f21fe..0000000
--- a/tests/tcg/cris/libc/check_stat4.c
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Simulator options:
-#notarget: cris*-*-elf
-#sim: --sysroot=@exedir@
-*/
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-
-int main (int argc, char *argv[])
-{
- char path[1024] = "/";
- struct stat buf;
-
- strncat(path, argv[0], sizeof(path) - 2);
- if (lstat (".", &buf) != 0
- || !S_ISDIR (buf.st_mode))
- abort ();
- if (lstat (path, &buf) != 0
- || !S_ISREG (buf.st_mode))
- abort ();
- printf ("pass\n");
- exit (0);
-}
diff --git a/tests/tcg/cris/libc/check_swap.c b/tests/tcg/cris/libc/check_swap.c
deleted file mode 100644
index 9a68c1e..0000000
--- a/tests/tcg/cris/libc/check_swap.c
+++ /dev/null
@@ -1,76 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include "sys.h"
-#include "crisutils.h"
-
-#define N 8
-#define W 4
-#define B 2
-#define R 1
-
-static always_inline int cris_swap(const int mode, int x)
-{
- switch (mode)
- {
- case N: asm ("swapn\t%0\n" : "+r" (x) : "0" (x)); break;
- case W: asm ("swapw\t%0\n" : "+r" (x) : "0" (x)); break;
- case B: asm ("swapb\t%0\n" : "+r" (x) : "0" (x)); break;
- case R: asm ("swapr\t%0\n" : "+r" (x) : "0" (x)); break;
- case B|R: asm ("swapbr\t%0\n" : "+r" (x) : "0" (x)); break;
- case W|R: asm ("swapwr\t%0\n" : "+r" (x) : "0" (x)); break;
- case W|B: asm ("swapwb\t%0\n" : "+r" (x) : "0" (x)); break;
- case W|B|R: asm ("swapwbr\t%0\n" : "+r" (x) : "0" (x)); break;
- case N|R: asm ("swapnr\t%0\n" : "+r" (x) : "0" (x)); break;
- case N|B: asm ("swapnb\t%0\n" : "+r" (x) : "0" (x)); break;
- case N|B|R: asm ("swapnbr\t%0\n" : "+r" (x) : "0" (x)); break;
- case N|W: asm ("swapnw\t%0\n" : "+r" (x) : "0" (x)); break;
- default:
- err();
- break;
- }
- return x;
-}
-
-/* Made this a macro to be able to pick up the location of the errors. */
-#define verify_swap(mode, val, expected, n, z) \
-do { \
- int r; \
- cris_tst_cc_init(); \
- r = cris_swap(mode, val); \
- cris_tst_mov_cc(n, z); \
- if (r != expected) \
- err(); \
-} while(0)
-
-void check_swap(void)
-{
- /* Some of these numbers are borrowed from GDB's cris sim
- testsuite. */
- if (cris_swap(N, 0) != 0xffffffff)
- err();
- if (cris_swap(W, 0x12345678) != 0x56781234)
- err();
- if (cris_swap(B, 0x12345678) != 0x34127856)
- err();
-
- verify_swap(R, 0x78134452, 0x1ec8224a, 0, 0);
- verify_swap(B, 0x78134452, 0x13785244, 0, 0);
- verify_swap(B|R, 0x78134452, 0xc81e4a22, 1, 0);
- verify_swap(W, 0x78134452, 0x44527813, 0, 0);
- verify_swap(W|R, 0x78134452, 0x224a1ec8, 0, 0);
- verify_swap(W|B|R, 0x78134452, 0x4a22c81e, 0, 0);
- verify_swap(N, 0x78134452, 0x87ecbbad, 1, 0);
- verify_swap(N|R, 0x78134452, 0xe137ddb5, 1, 0);
- verify_swap(N|B, 0x78134452, 0xec87adbb, 1, 0);
- verify_swap(N|B|R, 0x78134452, 0x37e1b5dd, 0, 0);
- verify_swap(N|W, 0x78134452, 0xbbad87ec, 1, 0);
- verify_swap(N|B|R, 0xffffffff, 0, 0, 1);
-}
-
-int main(void)
-{
- check_swap();
- pass();
- return 0;
-}
diff --git a/tests/tcg/cris/libc/check_time2.c b/tests/tcg/cris/libc/check_time2.c
deleted file mode 100644
index 20b69b4..0000000
--- a/tests/tcg/cris/libc/check_time2.c
+++ /dev/null
@@ -1,18 +0,0 @@
-/* CB_SYS_time doesn't implement the Linux time syscall; the return
- value isn't written to the argument. */
-
-#include <time.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-int
-main (void)
-{
- time_t x = (time_t) -1;
- time_t t = time (&x);
-
- if (t == (time_t) -1 || t != x)
- abort ();
- printf ("pass\n");
- exit (0);
-}
diff --git a/tests/tcg/cris/libc/crisutils.h b/tests/tcg/cris/libc/crisutils.h
deleted file mode 100644
index bbbe6c5..0000000
--- a/tests/tcg/cris/libc/crisutils.h
+++ /dev/null
@@ -1,76 +0,0 @@
-#ifndef CRISUTILS_H
-#define CRISUTILS_H 1
-
-static char *tst_cc_loc = NULL;
-
-#define cris_tst_cc_init() \
-do { tst_cc_loc = "test_cc failed at " CURRENT_LOCATION; } while(0)
-
-/* We need a real symbol to signal error. */
-void _err(void) {
- if (!tst_cc_loc)
- tst_cc_loc = "tst_cc_failed\n";
- _fail(tst_cc_loc);
-}
-
-static always_inline void cris_tst_cc_n1(void)
-{
- asm volatile ("bpl _err\n"
- "nop\n");
-}
-static always_inline void cris_tst_cc_n0(void)
-{
- asm volatile ("bmi _err\n"
- "nop\n");
-}
-
-static always_inline void cris_tst_cc_z1(void)
-{
- asm volatile ("bne _err\n"
- "nop\n");
-}
-static always_inline void cris_tst_cc_z0(void)
-{
- asm volatile ("beq _err\n"
- "nop\n");
-}
-static always_inline void cris_tst_cc_v1(void)
-{
- asm volatile ("bvc _err\n"
- "nop\n");
-}
-static always_inline void cris_tst_cc_v0(void)
-{
- asm volatile ("bvs _err\n"
- "nop\n");
-}
-
-static always_inline void cris_tst_cc_c1(void)
-{
- asm volatile ("bcc _err\n"
- "nop\n");
-}
-static always_inline void cris_tst_cc_c0(void)
-{
- asm volatile ("bcs _err\n"
- "nop\n");
-}
-
-static always_inline void cris_tst_mov_cc(int n, int z)
-{
- if (n) cris_tst_cc_n1(); else cris_tst_cc_n0();
- if (z) cris_tst_cc_z1(); else cris_tst_cc_z0();
- asm volatile ("" : : "g" (_err));
-}
-
-static always_inline void cris_tst_cc(const int n, const int z,
- const int v, const int c)
-{
- if (n) cris_tst_cc_n1(); else cris_tst_cc_n0();
- if (z) cris_tst_cc_z1(); else cris_tst_cc_z0();
- if (v) cris_tst_cc_v1(); else cris_tst_cc_v0();
- if (c) cris_tst_cc_c1(); else cris_tst_cc_c0();
- asm volatile ("" : : "g" (_err));
-}
-
-#endif
diff --git a/tests/tcg/cris/libc/sys.h b/tests/tcg/cris/libc/sys.h
deleted file mode 100644
index 3dd47bb..0000000
--- a/tests/tcg/cris/libc/sys.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#include <unistd.h>
-
-#define STRINGIFY(x) #x
-#define TOSTRING(x) STRINGIFY(x)
-
-#define always_inline inline __attribute__((always_inline))
-
-#define CURRENT_LOCATION __FILE__ ":" TOSTRING(__LINE__)
-
-#define err() \
-{ \
- _fail("at " CURRENT_LOCATION " "); \
-}
-
-#define mb() asm volatile ("" : : : "memory")
-
-void pass(void);
-void _fail(char *reason);
diff --git a/tests/tcg/hexagon/usr.c b/tests/tcg/hexagon/usr.c
index 92bc86a..f0b23d3 100644
--- a/tests/tcg/hexagon/usr.c
+++ b/tests/tcg/hexagon/usr.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2022-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2022-2024 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1007,6 +1007,11 @@ int main()
TEST_P_OP_R(conv_sf2d_chop, SF_QNaN, 0xffffffffffffffffULL, USR_FPINVF);
TEST_P_OP_R(conv_sf2d_chop, SF_SNaN, 0xffffffffffffffffULL, USR_FPINVF);
+ TEST_R_OP_R(conv_sf2uw, SF_zero_neg, 0, USR_CLEAR);
+ TEST_R_OP_R(conv_sf2uw_chop, SF_zero_neg, 0, USR_CLEAR);
+ TEST_P_OP_R(conv_sf2ud, SF_zero_neg, 0, USR_CLEAR);
+ TEST_P_OP_R(conv_sf2ud_chop, SF_zero_neg, 0, USR_CLEAR);
+
TEST_R_OP_P(conv_df2sf, DF_QNaN, SF_HEX_NaN, USR_CLEAR);
TEST_R_OP_P(conv_df2sf, DF_SNaN, SF_HEX_NaN, USR_FPINVF);
TEST_R_OP_P(conv_df2uw, DF_QNaN, 0xffffffff, USR_FPINVF);
@@ -1020,6 +1025,11 @@ int main()
TEST_R_OP_P(conv_df2uw_chop, DF_QNaN, 0xffffffff, USR_FPINVF);
TEST_R_OP_P(conv_df2uw_chop, DF_SNaN, 0xffffffff, USR_FPINVF);
+ TEST_R_OP_P(conv_df2uw, DF_zero_neg, 0, USR_CLEAR);
+ TEST_R_OP_P(conv_df2uw_chop, DF_zero_neg, 0, USR_CLEAR);
+ TEST_P_OP_P(conv_df2ud, DF_zero_neg, 0, USR_CLEAR);
+ TEST_P_OP_P(conv_df2ud_chop, DF_zero_neg, 0, USR_CLEAR);
+
/* Test for typo in HELPER(conv_df2uw_chop) */
TEST_R_OP_P(conv_df2uw_chop, 0xffffff7f00000001ULL, 0xffffffff, USR_FPINVF);
diff --git a/tests/tcg/i386/Makefile.softmmu-target b/tests/tcg/i386/Makefile.softmmu-target
index 5266f23..4096a1c 100644
--- a/tests/tcg/i386/Makefile.softmmu-target
+++ b/tests/tcg/i386/Makefile.softmmu-target
@@ -25,7 +25,7 @@ EXTRA_RUNS+=$(MULTIARCH_RUNS)
.PRECIOUS: $(CRT_OBJS)
%.o: $(CRT_PATH)/%.S
- $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -c $< -o $@
+ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -Wa,--noexecstack -c $< -o $@
# Build and link the tests
%: %.c $(LINK_SCRIPT) $(CRT_OBJS) $(MINILIB_OBJS)
diff --git a/tests/tcg/i386/Makefile.target b/tests/tcg/i386/Makefile.target
index bbe2c44..f1df404 100644
--- a/tests/tcg/i386/Makefile.target
+++ b/tests/tcg/i386/Makefile.target
@@ -22,7 +22,7 @@ run-test-i386-sse-exceptions: QEMU_OPTS += -cpu max
test-i386-pcmpistri: CFLAGS += -msse4.2
run-test-i386-pcmpistri: QEMU_OPTS += -cpu max
-test-i386-bmi2: CFLAGS=-O2
+test-i386-bmi2: CFLAGS=-O2 -fwrapv
run-test-i386-bmi2: QEMU_OPTS += -cpu max
test-i386-adcox: CFLAGS=-O2
diff --git a/tests/tcg/i386/test-avx.c b/tests/tcg/i386/test-avx.c
index 230e6d8..80fe363 100644
--- a/tests/tcg/i386/test-avx.c
+++ b/tests/tcg/i386/test-avx.c
@@ -244,7 +244,7 @@ v4di indexd = {0x00000002ffffffcdull, 0xfffffff500000010ull,
0x0000003afffffff0ull, 0x000000000000000eull};
v4di gather_mem[0x20];
-_Static_assert(sizeof(gather_mem) == 1024);
+_Static_assert(sizeof(gather_mem) == 1024, "gather_mem not expected size");
void init_f16reg(v4di *r)
{
diff --git a/tests/tcg/i386/test-i386-adcox.c b/tests/tcg/i386/test-i386-adcox.c
index 16169ef..a717064 100644
--- a/tests/tcg/i386/test-i386-adcox.c
+++ b/tests/tcg/i386/test-i386-adcox.c
@@ -29,7 +29,7 @@ void test_adox_adcx(uint32_t in_c, uint32_t in_o, REG adcx_operand, REG adox_ope
"adcx %3, %1;"
"pushf; pop %0"
: "+r" (flags), "+r" (out_adcx), "+r" (out_adox)
- : "r" ((REG)-1), "0" (flags), "1" (out_adcx), "2" (out_adox));
+ : "r" ((REG) - 1), "0" (flags), "1" (out_adcx), "2" (out_adox));
assert(out_adcx == in_c + adcx_operand - 1);
assert(out_adox == in_o + adox_operand - 1);
@@ -53,8 +53,8 @@ void test_adcx_adox(uint32_t in_c, uint32_t in_o, REG adcx_operand, REG adox_ope
"adcx %3, %1;"
"adox %3, %2;"
"pushf; pop %0"
- : "+r" (flags), "+r" (out_adcx), "+r" (out_adox)
- : "r" ((REG)-1), "0" (flags), "1" (out_adcx), "2" (out_adox));
+ : "+r"(flags), "+r"(out_adcx), "+r"(out_adox)
+ : "r" ((REG)-1));
assert(out_adcx == in_c + adcx_operand - 1);
assert(out_adox == in_o + adox_operand - 1);
diff --git a/tests/tcg/loongarch64/Makefile.softmmu-target b/tests/tcg/loongarch64/Makefile.softmmu-target
index 908f3a8..6d4a20f 100644
--- a/tests/tcg/loongarch64/Makefile.softmmu-target
+++ b/tests/tcg/loongarch64/Makefile.softmmu-target
@@ -16,13 +16,13 @@ LINK_SCRIPT=$(LOONGARCH64_SYSTEM_SRC)/kernel.ld
LDFLAGS=-Wl,-T$(LINK_SCRIPT)
TESTS+=$(LOONGARCH64_TESTS) $(MULTIARCH_TESTS)
CFLAGS+=-nostdlib -g -O1 -march=loongarch64 -mabi=lp64d $(MINILIB_INC)
-LDFLAGS+=-static -nostdlib $(CRT_OBJS) $(MINILIB_OBJS) -lgcc
+LDFLAGS+=-static -nostdlib $(CRT_OBJS) $(MINILIB_OBJS) -lgcc -Wl,--no-warn-rwx-segments
# building head blobs
.PRECIOUS: $(CRT_OBJS)
%.o: $(CRT_PATH)/%.S
- $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -x assembler-with-cpp -c $< -o $@
+ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -x assembler-with-cpp -Wa,--noexecstack -c $< -o $@
# Build and link the tests
%: %.c $(LINK_SCRIPT) $(CRT_OBJS) $(MINILIB_OBJS)
diff --git a/tests/tcg/loongarch64/system/kernel.ld b/tests/tcg/loongarch64/system/kernel.ld
index f1a7c01..56d8588 100644
--- a/tests/tcg/loongarch64/system/kernel.ld
+++ b/tests/tcg/loongarch64/system/kernel.ld
@@ -3,7 +3,7 @@ ENTRY(_start)
SECTIONS
{
/* Linux kernel legacy start address. */
- . = 0x9000000000200000;
+ . = 0x200000;
_text = .;
.text : {
*(.text)
diff --git a/tests/tcg/loongarch64/system/regdef.h b/tests/tcg/loongarch64/system/regdef.h
index faa09b2..b586b4e 100644
--- a/tests/tcg/loongarch64/system/regdef.h
+++ b/tests/tcg/loongarch64/system/regdef.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021 Loongson Technology Corporation Limited
*/
diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target
index 5e3391e..45c9cfe 100644
--- a/tests/tcg/multiarch/Makefile.target
+++ b/tests/tcg/multiarch/Makefile.target
@@ -42,6 +42,17 @@ munmap-pthread: LDFLAGS+=-pthread
vma-pthread: CFLAGS+=-pthread
vma-pthread: LDFLAGS+=-pthread
+sigreturn-sigmask: CFLAGS+=-pthread
+sigreturn-sigmask: LDFLAGS+=-pthread
+
+# GCC versions 12/13/14/15 at least incorrectly complain about
+# "'SHA1Transform' reading 64 bytes from a region of size 0"; see the gcc bug
+# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106709
+# Since this is just a standard piece of library code we've borrowed for a
+# TCG test case, suppress the warning rather than trying to modify the
+# code to work around the compiler.
+sha1: CFLAGS+=-Wno-stringop-overread -Wno-unknown-warning-option
+
# The vma-pthread seems very sensitive on gitlab and we currently
# don't know if its exposing a real bug or the test is flaky.
ifneq ($(GITLAB_CI),)
@@ -127,6 +138,13 @@ run-gdbstub-follow-fork-mode-parent: follow-fork-mode
--bin $< --test $(MULTIARCH_SRC)/gdbstub/follow-fork-mode-parent.py, \
following parents on fork)
+run-gdbstub-late-attach: late-attach
+ $(call run-test, $@, env LATE_ATTACH_PY=1 $(GDB_SCRIPT) \
+ --gdb $(GDB) \
+ --qemu $(QEMU) --qargs "$(QEMU_OPTS)" --no-suspend \
+ --bin $< --test $(MULTIARCH_SRC)/gdbstub/late-attach.py, \
+ attaching to a running process)
+
else
run-gdbstub-%:
$(call skip-test, "gdbstub test $*", "need working gdb with $(patsubst -%,,$(TARGET_NAME)) support")
@@ -136,7 +154,7 @@ EXTRA_RUNS += run-gdbstub-sha1 run-gdbstub-qxfer-auxv-read \
run-gdbstub-registers run-gdbstub-prot-none \
run-gdbstub-catch-syscalls run-gdbstub-follow-fork-mode-child \
run-gdbstub-follow-fork-mode-parent \
- run-gdbstub-qxfer-siginfo-read
+ run-gdbstub-qxfer-siginfo-read run-gdbstub-late-attach
# ARM Compatible Semi Hosting Tests
#
@@ -170,5 +188,16 @@ run-plugin-semiconsole-with-%:
TESTS += semihosting semiconsole
endif
+# Test plugin memory access instrumentation
+run-plugin-test-plugin-mem-access-with-libmem.so: \
+ PLUGIN_ARGS=$(COMMA)print-accesses=true
+run-plugin-test-plugin-mem-access-with-libmem.so: \
+ CHECK_PLUGIN_OUTPUT_COMMAND= \
+ $(SRC_PATH)/tests/tcg/multiarch/check-plugin-output.sh \
+ $(QEMU) $<
+
+test-plugin-mem-access: CFLAGS+=-pthread -O0
+test-plugin-mem-access: LDFLAGS+=-pthread -O0
+
# Update TESTS
TESTS += $(MULTIARCH_TESTS)
diff --git a/tests/tcg/multiarch/check-plugin-output.sh b/tests/tcg/multiarch/check-plugin-output.sh
new file mode 100755
index 0000000..80607f0
--- /dev/null
+++ b/tests/tcg/multiarch/check-plugin-output.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+# This script runs a given executable using qemu, and compare its standard
+# output with an expected plugin output.
+# Each line of output is searched (as a regexp) in the expected plugin output.
+
+set -euo pipefail
+
+die()
+{
+ echo "$@" 1>&2
+ exit 1
+}
+
+check()
+{
+ file=$1
+ pattern=$2
+ grep "$pattern" "$file" > /dev/null || die "\"$pattern\" not found in $file"
+}
+
+[ $# -eq 3 ] || die "usage: qemu_bin exe plugin_out_file"
+
+qemu_bin=$1; shift
+exe=$1;shift
+plugin_out=$1; shift
+
+expected()
+{
+ $qemu_bin $exe ||
+ die "running $exe failed"
+}
+
+expected | while read line; do
+ check "$plugin_out" "$line"
+done
diff --git a/tests/tcg/multiarch/gdbstub/interrupt.py b/tests/tcg/multiarch/gdbstub/interrupt.py
index 90a45b5..2d5654d 100644
--- a/tests/tcg/multiarch/gdbstub/interrupt.py
+++ b/tests/tcg/multiarch/gdbstub/interrupt.py
@@ -8,7 +8,7 @@ from __future__ import print_function
#
import gdb
-from test_gdbstub import main, report
+from test_gdbstub import gdb_exit, main, report
def check_interrupt(thread):
@@ -49,7 +49,7 @@ def run_test():
"""
if len(gdb.selected_inferior().threads()) == 1:
print("SKIP: set to run on a single thread")
- exit(0)
+ gdb_exit(0)
gdb.execute("set scheduler-locking on")
for thread in gdb.selected_inferior().threads():
diff --git a/tests/tcg/multiarch/gdbstub/late-attach.py b/tests/tcg/multiarch/gdbstub/late-attach.py
new file mode 100644
index 0000000..1d40efb
--- /dev/null
+++ b/tests/tcg/multiarch/gdbstub/late-attach.py
@@ -0,0 +1,28 @@
+"""Test attaching GDB to a running process.
+
+SPDX-License-Identifier: GPL-2.0-or-later
+"""
+from test_gdbstub import main, report
+
+
+def run_test():
+ """Run through the tests one by one"""
+ try:
+ phase = gdb.parse_and_eval("phase").string()
+ except gdb.error:
+ # Assume the guest did not reach main().
+ phase = "start"
+
+ if phase == "start":
+ gdb.execute("break sigwait")
+ gdb.execute("continue")
+ phase = gdb.parse_and_eval("phase").string()
+ report(phase == "sigwait", "{} == \"sigwait\"".format(phase))
+
+ gdb.execute("signal SIGUSR1")
+
+ exitcode = int(gdb.parse_and_eval("$_exitcode"))
+ report(exitcode == 0, "{} == 0".format(exitcode))
+
+
+main(run_test)
diff --git a/tests/tcg/multiarch/gdbstub/prot-none.py b/tests/tcg/multiarch/gdbstub/prot-none.py
index 7e26458..51082a3 100644
--- a/tests/tcg/multiarch/gdbstub/prot-none.py
+++ b/tests/tcg/multiarch/gdbstub/prot-none.py
@@ -5,7 +5,7 @@ This runs as a sourced script (via -x, via run-test.py).
SPDX-License-Identifier: GPL-2.0-or-later
"""
import ctypes
-from test_gdbstub import main, report
+from test_gdbstub import gdb_exit, main, report
def probe_proc_self_mem():
@@ -22,7 +22,7 @@ def run_test():
"""Run through the tests one by one"""
if not probe_proc_self_mem():
print("SKIP: /proc/self/mem is not usable")
- exit(0)
+ gdb_exit(0)
gdb.Breakpoint("break_here")
gdb.execute("continue")
val = gdb.parse_and_eval("*(char[2] *)q").string()
diff --git a/tests/tcg/multiarch/gdbstub/test-proc-mappings.py b/tests/tcg/multiarch/gdbstub/test-proc-mappings.py
index 564613f..6eb6ebf 100644
--- a/tests/tcg/multiarch/gdbstub/test-proc-mappings.py
+++ b/tests/tcg/multiarch/gdbstub/test-proc-mappings.py
@@ -3,22 +3,17 @@
This runs as a sourced script (via -x, via run-test.py)."""
from __future__ import print_function
import gdb
-from test_gdbstub import main, report
+from test_gdbstub import gdb_exit, main, report
def run_test():
"""Run through the tests one by one"""
- try:
- mappings = gdb.execute("info proc mappings", False, True)
- except gdb.error as exc:
- exc_str = str(exc)
- if "Not supported on this target." in exc_str:
- # Detect failures due to an outstanding issue with how GDB handles
- # the x86_64 QEMU's target.xml, which does not contain the
- # definition of orig_rax. Skip the test in this case.
- print("SKIP: {}".format(exc_str))
- return
- raise
+ if gdb.selected_inferior().architecture().name() == "m68k":
+ # m68k GDB supports only GDB_OSABI_SVR4, but GDB_OSABI_LINUX is
+ # required for the info proc support (see set_gdbarch_info_proc()).
+ print("SKIP: m68k GDB does not support GDB_OSABI_LINUX")
+ gdb_exit(0)
+ mappings = gdb.execute("info proc mappings", False, True)
report(isinstance(mappings, str), "Fetched the mappings from the inferior")
# Broken with host page size > guest page size
# report("/sha1" in mappings, "Found the test binary name in the mappings")
diff --git a/tests/tcg/multiarch/late-attach.c b/tests/tcg/multiarch/late-attach.c
new file mode 100644
index 0000000..20a3640
--- /dev/null
+++ b/tests/tcg/multiarch/late-attach.c
@@ -0,0 +1,41 @@
+/*
+ * Test attaching GDB to a running process.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <assert.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+static const char *phase = "start";
+
+int main(void)
+{
+ sigset_t set;
+ int sig;
+
+ assert(sigfillset(&set) == 0);
+ assert(sigprocmask(SIG_BLOCK, &set, NULL) == 0);
+
+ /* Let GDB know it can send SIGUSR1. */
+ phase = "sigwait";
+ if (getenv("LATE_ATTACH_PY")) {
+ assert(sigwait(&set, &sig) == 0);
+ if (sig != SIGUSR1) {
+ fprintf(stderr, "Unexpected signal %d\n", sig);
+ return EXIT_FAILURE;
+ }
+ }
+
+ /* Check that the guest does not see host_interrupt_signal. */
+ assert(sigpending(&set) == 0);
+ for (sig = 1; sig < NSIG; sig++) {
+ if (sigismember(&set, sig)) {
+ fprintf(stderr, "Unexpected signal %d\n", sig);
+ return EXIT_FAILURE;
+ }
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/tcg/multiarch/linux/linux-sigrtminmax.c b/tests/tcg/multiarch/linux/linux-sigrtminmax.c
new file mode 100644
index 0000000..a7059aa
--- /dev/null
+++ b/tests/tcg/multiarch/linux/linux-sigrtminmax.c
@@ -0,0 +1,74 @@
+/*
+ * Test the lowest and the highest real-time signals.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <assert.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+/* For hexagon and microblaze. */
+#ifndef __SIGRTMIN
+#define __SIGRTMIN 32
+#endif
+
+extern char **environ;
+
+static bool seen_sigrtmin, seen_sigrtmax;
+
+static void handle_signal(int sig)
+{
+ if (sig == SIGRTMIN) {
+ seen_sigrtmin = true;
+ } else if (sig == SIGRTMAX) {
+ seen_sigrtmax = true;
+ } else {
+ _exit(1);
+ }
+}
+
+int main(int argc, char **argv)
+{
+ char *qemu = getenv("QEMU");
+ struct sigaction act;
+
+ assert(qemu);
+
+ if (!getenv("QEMU_RTSIG_MAP")) {
+ char **new_argv = malloc((argc + 2) + sizeof(char *));
+ int tsig1, hsig1, count1, tsig2, hsig2, count2;
+ char rt_sigmap[64];
+
+ /* Re-exec with a mapping that includes SIGRTMIN and SIGRTMAX. */
+ new_argv[0] = qemu;
+ memcpy(&new_argv[1], argv, (argc + 1) * sizeof(char *));
+ tsig1 = __SIGRTMIN;
+ /* The host must have a few signals starting from this one. */
+ hsig1 = 36;
+ count1 = SIGRTMIN - __SIGRTMIN + 1;
+ tsig2 = SIGRTMAX;
+ hsig2 = hsig1 + count1;
+ count2 = 1;
+ snprintf(rt_sigmap, sizeof(rt_sigmap), "%d %d %d,%d %d %d",
+ tsig1, hsig1, count1, tsig2, hsig2, count2);
+ setenv("QEMU_RTSIG_MAP", rt_sigmap, 0);
+ assert(execve(new_argv[0], new_argv, environ) == 0);
+ return EXIT_FAILURE;
+ }
+
+ memset(&act, 0, sizeof(act));
+ act.sa_handler = handle_signal;
+ assert(sigaction(SIGRTMIN, &act, NULL) == 0);
+ assert(sigaction(SIGRTMAX, &act, NULL) == 0);
+
+ assert(kill(getpid(), SIGRTMIN) == 0);
+ assert(seen_sigrtmin);
+ assert(kill(getpid(), SIGRTMAX) == 0);
+ assert(seen_sigrtmax);
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/tcg/multiarch/test-vma.c b/tests/tcg/multiarch/linux/test-vma.c
index 2893d60..2893d60 100644
--- a/tests/tcg/multiarch/test-vma.c
+++ b/tests/tcg/multiarch/linux/test-vma.c
diff --git a/tests/tcg/multiarch/sigreturn-sigmask.c b/tests/tcg/multiarch/sigreturn-sigmask.c
new file mode 100644
index 0000000..e6cc904
--- /dev/null
+++ b/tests/tcg/multiarch/sigreturn-sigmask.c
@@ -0,0 +1,51 @@
+/*
+ * Test that sigreturn() does not corrupt the signal mask.
+ * Block SIGUSR2 and handle SIGUSR1.
+ * Then sigwait() SIGUSR2, which relies on it remaining blocked.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <assert.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+int seen_sig = -1;
+
+static void signal_func(int sig)
+{
+ seen_sig = sig;
+}
+
+static void *thread_func(void *arg)
+{
+ kill(getpid(), SIGUSR2);
+ return NULL;
+}
+
+int main(void)
+{
+ struct sigaction act = {
+ .sa_handler = signal_func,
+ };
+ pthread_t thread;
+ sigset_t set;
+ int sig;
+
+ assert(sigaction(SIGUSR1, &act, NULL) == 0);
+
+ assert(sigemptyset(&set) == 0);
+ assert(sigaddset(&set, SIGUSR2) == 0);
+ assert(sigprocmask(SIG_BLOCK, &set, NULL) == 0);
+
+ kill(getpid(), SIGUSR1);
+ assert(seen_sig == SIGUSR1);
+
+ assert(pthread_create(&thread, NULL, thread_func, NULL) == 0);
+ assert(sigwait(&set, &sig) == 0);
+ assert(sig == SIGUSR2);
+ assert(pthread_join(thread, NULL) == 0);
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/tcg/multiarch/system/Makefile.softmmu-target b/tests/tcg/multiarch/system/Makefile.softmmu-target
index 32dc0f9..07be001 100644
--- a/tests/tcg/multiarch/system/Makefile.softmmu-target
+++ b/tests/tcg/multiarch/system/Makefile.softmmu-target
@@ -65,3 +65,9 @@ endif
MULTIARCH_RUNS += run-gdbstub-memory run-gdbstub-interrupt \
run-gdbstub-untimely-packet run-gdbstub-registers
+
+# Test plugin memory access instrumentation
+run-plugin-memory-with-libmem.so: \
+ PLUGIN_ARGS=$(COMMA)region-summary=true
+run-plugin-memory-with-libmem.so: \
+ CHECK_PLUGIN_OUTPUT_COMMAND=$(MULTIARCH_SYSTEM_SRC)/validate-memory-counts.py $@.out
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
index 6eb2eb1..7508f6b 100644
--- a/tests/tcg/multiarch/system/memory.c
+++ b/tests/tcg/multiarch/system/memory.c
@@ -20,20 +20,28 @@
# error "Target does not specify CHECK_UNALIGNED"
#endif
+uint32_t test_read_count;
+uint32_t test_write_count;
+
#define MEM_PAGE_SIZE 4096 /* nominal 4k "pages" */
#define TEST_SIZE (MEM_PAGE_SIZE * 4) /* 4 pages */
#define ARRAY_SIZE(x) ((sizeof(x) / sizeof((x)[0])))
-__attribute__((aligned(MEM_PAGE_SIZE)))
+__attribute__((aligned(TEST_SIZE)))
static uint8_t test_data[TEST_SIZE];
typedef void (*init_ufn) (int offset);
typedef bool (*read_ufn) (int offset);
typedef bool (*read_sfn) (int offset, bool nf);
-static void pdot(int count)
+static void pdot(int count, bool write)
{
+ if (write) {
+ test_write_count++;
+ } else {
+ test_read_count++;
+ }
if (count % 128 == 0) {
ml_printf(".");
}
@@ -63,12 +71,14 @@ static void init_test_data_u8(int unused_offset)
int i;
(void)(unused_offset);
- ml_printf("Filling test area with u8:");
+ ml_printf("Filling test area with u8 (%p):", ptr);
+
for (i = 0; i < TEST_SIZE; i++) {
*ptr++ = BYTE_NEXT(count);
- pdot(i);
+ pdot(i, true);
}
- ml_printf("done\n");
+
+ ml_printf("done %d @ %p\n", i, ptr);
}
/*
@@ -91,10 +101,11 @@ static void init_test_data_s8(bool neg_first)
neg_first ? "neg first" : "pos first");
for (i = 0; i < TEST_SIZE / 2; i++) {
*ptr++ = get_byte(i, neg_first);
+ pdot(i, true);
*ptr++ = get_byte(i, !neg_first);
- pdot(i);
+ pdot(i, true);
}
- ml_printf("done\n");
+ ml_printf("done %d @ %p\n", i * 2, ptr);
}
/*
@@ -105,9 +116,19 @@ static void reset_start_data(int offset)
{
uint32_t *ptr = (uint32_t *) &test_data[0];
int i;
+
+ if (!offset) {
+ return;
+ }
+
+ ml_printf("Flushing %d bytes from %p: ", offset, ptr);
+
for (i = 0; i < offset; i++) {
*ptr++ = 0;
+ pdot(i, true);
}
+
+ ml_printf("done %d @ %p\n", i, ptr);
}
static void init_test_data_u16(int offset)
@@ -117,17 +138,17 @@ static void init_test_data_u16(int offset)
const int max = (TEST_SIZE - offset) / sizeof(word);
int i;
- ml_printf("Filling test area with u16 (offset %d, %p):", offset, ptr);
-
reset_start_data(offset);
+ ml_printf("Filling test area with u16 (offset %d, %p):", offset, ptr);
+
for (i = 0; i < max; i++) {
uint16_t low = BYTE_NEXT(count), high = BYTE_NEXT(count);
word = BYTE_SHIFT(high, 1) | BYTE_SHIFT(low, 0);
*ptr++ = word;
- pdot(i);
+ pdot(i, true);
}
- ml_printf("done @ %p\n", ptr);
+ ml_printf("done %d @ %p\n", i, ptr);
}
static void init_test_data_u32(int offset)
@@ -137,21 +158,22 @@ static void init_test_data_u32(int offset)
const int max = (TEST_SIZE - offset) / sizeof(word);
int i;
- ml_printf("Filling test area with u32 (offset %d, %p):", offset, ptr);
-
reset_start_data(offset);
+ ml_printf("Filling test area with u32 (offset %d, %p):", offset, ptr);
+
for (i = 0; i < max; i++) {
uint32_t b4 = BYTE_NEXT(count), b3 = BYTE_NEXT(count);
uint32_t b2 = BYTE_NEXT(count), b1 = BYTE_NEXT(count);
word = BYTE_SHIFT(b1, 3) | BYTE_SHIFT(b2, 2) | BYTE_SHIFT(b3, 1) |
BYTE_SHIFT(b4, 0);
*ptr++ = word;
- pdot(i);
+ pdot(i, true);
}
- ml_printf("done @ %p\n", ptr);
+ ml_printf("done %d @ %p\n", i, ptr);
}
+#if __SIZEOF_POINTER__ >= 8
static void init_test_data_u64(int offset)
{
uint8_t count = 0;
@@ -159,10 +181,10 @@ static void init_test_data_u64(int offset)
const int max = (TEST_SIZE - offset) / sizeof(word);
int i;
- ml_printf("Filling test area with u64 (offset %d, %p):", offset, ptr);
-
reset_start_data(offset);
+ ml_printf("Filling test area with u64 (offset %d, %p):", offset, ptr);
+
for (i = 0; i < max; i++) {
uint64_t b8 = BYTE_NEXT(count), b7 = BYTE_NEXT(count);
uint64_t b6 = BYTE_NEXT(count), b5 = BYTE_NEXT(count);
@@ -172,10 +194,11 @@ static void init_test_data_u64(int offset)
BYTE_SHIFT(b4, 4) | BYTE_SHIFT(b5, 3) | BYTE_SHIFT(b6, 2) |
BYTE_SHIFT(b7, 1) | BYTE_SHIFT(b8, 0);
*ptr++ = word;
- pdot(i);
+ pdot(i, true);
}
- ml_printf("done @ %p\n", ptr);
+ ml_printf("done %d @ %p\n", i, ptr);
}
+#endif
static bool read_test_data_u16(int offset)
{
@@ -194,11 +217,11 @@ static bool read_test_data_u16(int offset)
ml_printf("Error %d < %d\n", high, low);
return false;
} else {
- pdot(i);
+ pdot(i, false);
}
}
- ml_printf("done @ %p\n", ptr);
+ ml_printf("done %d @ %p\n", i, ptr);
return true;
}
@@ -236,13 +259,14 @@ static bool read_test_data_u32(int offset)
ml_printf("Error %d, %d, %d, %d", b1, b2, b3, b4);
return false;
} else {
- pdot(i);
+ pdot(i, false);
}
}
- ml_printf("done @ %p\n", ptr);
+ ml_printf("done %d @ %p\n", i, ptr);
return true;
}
+#if __SIZEOF_POINTER__ >= 8
static bool read_test_data_u64(int offset)
{
uint64_t word, *ptr = (uint64_t *)&test_data[offset];
@@ -290,17 +314,22 @@ static bool read_test_data_u64(int offset)
b1, b2, b3, b4, b5, b6, b7, b8);
return false;
} else {
- pdot(i);
+ pdot(i, false);
}
}
- ml_printf("done @ %p\n", ptr);
+ ml_printf("done %d @ %p\n", i, ptr);
return true;
}
+#endif
/* Read the test data and verify at various offsets */
-read_ufn read_ufns[] = { read_test_data_u16,
- read_test_data_u32,
- read_test_data_u64 };
+read_ufn read_ufns[] = {
+ read_test_data_u16,
+ read_test_data_u32,
+#if __SIZEOF_POINTER__ >= 8
+ read_test_data_u64
+#endif
+};
bool do_unsigned_reads(int start_off)
{
@@ -357,15 +386,17 @@ static bool read_test_data_s8(int offset, bool neg_first)
second = *ptr++;
if (neg_first && first < 0 && second > 0) {
- pdot(i);
+ pdot(i, false);
+ pdot(i, false);
} else if (!neg_first && first > 0 && second < 0) {
- pdot(i);
+ pdot(i, false);
+ pdot(i, false);
} else {
ml_printf("Error %d %c %d\n", first, neg_first ? '<' : '>', second);
return false;
}
}
- ml_printf("done @ %p\n", ptr);
+ ml_printf("done %d @ %p\n", i * 2, ptr);
return true;
}
@@ -390,15 +421,15 @@ static bool read_test_data_s16(int offset, bool neg_first)
int32_t data = *ptr++;
if (neg_first && data < 0) {
- pdot(i);
+ pdot(i, false);
} else if (!neg_first && data > 0) {
- pdot(i);
+ pdot(i, false);
} else {
ml_printf("Error %d %c 0\n", data, neg_first ? '<' : '>');
return false;
}
}
- ml_printf("done @ %p\n", ptr);
+ ml_printf("done %d @ %p\n", i, ptr);
return true;
}
@@ -423,15 +454,15 @@ static bool read_test_data_s32(int offset, bool neg_first)
int64_t data = *ptr++;
if (neg_first && data < 0) {
- pdot(i);
+ pdot(i, false);
} else if (!neg_first && data > 0) {
- pdot(i);
+ pdot(i, false);
} else {
ml_printf("Error %d %c 0\n", data, neg_first ? '<' : '>');
return false;
}
}
- ml_printf("done @ %p\n", ptr);
+ ml_printf("done %d @ %p\n", i, ptr);
return true;
}
@@ -465,16 +496,23 @@ bool do_signed_reads(bool neg_first)
return ok;
}
-init_ufn init_ufns[] = { init_test_data_u8,
- init_test_data_u16,
- init_test_data_u32,
- init_test_data_u64 };
+init_ufn init_ufns[] = {
+ init_test_data_u8,
+ init_test_data_u16,
+ init_test_data_u32,
+#if __SIZEOF_POINTER__ >= 8
+ init_test_data_u64
+#endif
+};
int main(void)
{
int i;
bool ok = true;
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
+
/* Run through the unsigned tests first */
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
ok = do_unsigned_test(init_ufns[i]);
@@ -490,6 +528,8 @@ int main(void)
ok = do_signed_reads(true);
}
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
return ok ? 0 : -1;
}
diff --git a/tests/tcg/multiarch/system/validate-memory-counts.py b/tests/tcg/multiarch/system/validate-memory-counts.py
new file mode 100755
index 0000000..5b8bbf3
--- /dev/null
+++ b/tests/tcg/multiarch/system/validate-memory-counts.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python3
+#
+# validate-memory-counts.py: check we instrumented memory properly
+#
+# This program takes two inputs:
+# - the mem plugin output
+# - the memory binary output
+#
+# Copyright (C) 2024 Linaro Ltd
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import sys
+from argparse import ArgumentParser
+
+def extract_counts(path):
+ """
+ Load the output from path and extract the lines containing:
+
+ Test data start: 0x40214000
+ Test data end: 0x40218001
+ Test data read: 2522280
+ Test data write: 262111
+
+ From the stream of data. Extract the values for use in the
+ validation function.
+ """
+ start_address = None
+ end_address = None
+ read_count = 0
+ write_count = 0
+ with open(path, 'r') as f:
+ for line in f:
+ if line.startswith("Test data start:"):
+ start_address = int(line.split(':')[1].strip(), 16)
+ elif line.startswith("Test data end:"):
+ end_address = int(line.split(':')[1].strip(), 16)
+ elif line.startswith("Test data read:"):
+ read_count = int(line.split(':')[1].strip())
+ elif line.startswith("Test data write:"):
+ write_count = int(line.split(':')[1].strip())
+ return start_address, end_address, read_count, write_count
+
+
+def parse_plugin_output(path, start, end):
+ """
+ Load the plugin output from path in the form of:
+
+ Region Base, Reads, Writes, Seen all
+ 0x0000000040004000, 31093, 0, false
+ 0x0000000040214000, 2522280, 278579, true
+ 0x0000000040000000, 137398, 0, false
+ 0x0000000040210000, 54727397, 33721956, false
+
+ And extract the ranges that match test data start and end and
+ return the results.
+ """
+ total_reads = 0
+ total_writes = 0
+ seen_all = False
+
+ with open(path, 'r') as f:
+ next(f) # Skip the header
+ for line in f:
+
+ if line.startswith("Region Base"):
+ continue
+
+ parts = line.strip().split(', ')
+ if len(parts) != 4:
+ continue
+
+ region_base = int(parts[0], 16)
+ reads = int(parts[1])
+ writes = int(parts[2])
+
+ if start <= region_base < end: # Checking if within range
+ total_reads += reads
+ total_writes += writes
+ seen_all = parts[3] == "true"
+
+ return total_reads, total_writes, seen_all
+
+def main() -> None:
+ """
+ Process the arguments, injest the program and plugin out and
+ verify they match up and report if they do not.
+ """
+ parser = ArgumentParser(description="Validate memory instrumentation")
+ parser.add_argument('test_output',
+ help="The output from the test itself")
+ parser.add_argument('plugin_output',
+ help="The output from memory plugin")
+ parser.add_argument('--bss-cleared',
+ action='store_true',
+ help='Assume bss was cleared (and adjusts counts).')
+
+ args = parser.parse_args()
+
+ # Extract counts from memory binary
+ start, end, exp_reads, exp_writes = extract_counts(args.test_output)
+
+ # Some targets clear BSS before running but the test doesn't know
+ # that so we adjust it by the size of the test region.
+ if args.bss_cleared:
+ exp_writes += 16384
+
+ if start is None or end is None:
+ print("Failed to test_data boundaries from output.")
+ sys.exit(1)
+
+ # Parse plugin output
+ preads, pwrites, seen_all = parse_plugin_output(args.plugin_output,
+ start, end)
+
+ if not seen_all:
+ print("Fail: didn't instrument all accesses to test_data.")
+ sys.exit(1)
+
+ # Compare and report
+ if preads == exp_reads and pwrites == exp_writes:
+ sys.exit(0)
+ else:
+ print("Fail: The memory reads and writes count does not match.")
+ print(f"Expected Reads: {exp_reads}, Actual Reads: {preads}")
+ print(f"Expected Writes: {exp_writes}, Actual Writes: {pwrites}")
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/tests/tcg/multiarch/test-plugin-mem-access.c b/tests/tcg/multiarch/test-plugin-mem-access.c
new file mode 100644
index 0000000..057b9aa
--- /dev/null
+++ b/tests/tcg/multiarch/test-plugin-mem-access.c
@@ -0,0 +1,177 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Check if we detect all memory accesses expected using plugin API.
+ * Used in conjunction with ./check-plugin-mem-access.sh check script.
+ * Output of this program is the list of patterns expected in plugin output.
+ *
+ * 8,16,32 load/store are tested for all arch.
+ * 64,128 load/store are tested for aarch64/x64.
+ * atomic operations (8,16,32,64) are tested for x64 only.
+ */
+
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#if defined(__x86_64__)
+#include <emmintrin.h>
+#elif defined(__aarch64__)
+#include <arm_neon.h>
+#endif /* __x86_64__ */
+
+static void *data;
+
+/* ,store_u8,.*,8,store,0xf1 */
+#define PRINT_EXPECTED(function, type, value, action) \
+do { \
+ printf(",%s,.*,%d,%s,%s\n", \
+ #function, (int) sizeof(type) * 8, action, value); \
+} \
+while (0)
+
+#define DEFINE_STORE(name, type, value) \
+ \
+static void print_expected_store_##name(void) \
+{ \
+ PRINT_EXPECTED(store_##name, type, #value, "store"); \
+} \
+ \
+static void store_##name(void) \
+{ \
+ *((type *)data) = value; \
+ print_expected_store_##name(); \
+}
+
+#define DEFINE_ATOMIC_OP(name, type, value) \
+ \
+static void print_expected_atomic_op_##name(void) \
+{ \
+ PRINT_EXPECTED(atomic_op_##name, type, "0x0*42", "load"); \
+ PRINT_EXPECTED(atomic_op_##name, type, #value, "store"); \
+} \
+ \
+static void atomic_op_##name(void) \
+{ \
+ *((type *)data) = 0x42; \
+ __sync_val_compare_and_swap((type *)data, 0x42, value); \
+ print_expected_atomic_op_##name(); \
+}
+
+#define DEFINE_LOAD(name, type, value) \
+ \
+static void print_expected_load_##name(void) \
+{ \
+ PRINT_EXPECTED(load_##name, type, #value, "load"); \
+} \
+ \
+static void load_##name(void) \
+{ \
+ \
+ /* volatile forces load to be generated. */ \
+ volatile type src = *((type *) data); \
+ volatile type dest = src; \
+ (void)src, (void)dest; \
+ print_expected_load_##name(); \
+}
+
+DEFINE_STORE(u8, uint8_t, 0xf1)
+DEFINE_LOAD(u8, uint8_t, 0xf1)
+DEFINE_STORE(u16, uint16_t, 0xf123)
+DEFINE_LOAD(u16, uint16_t, 0xf123)
+DEFINE_STORE(u32, uint32_t, 0xff112233)
+DEFINE_LOAD(u32, uint32_t, 0xff112233)
+
+#if defined(__x86_64__) || defined(__aarch64__)
+DEFINE_STORE(u64, uint64_t, 0xf123456789abcdef)
+DEFINE_LOAD(u64, uint64_t, 0xf123456789abcdef)
+
+static void print_expected_store_u128(void)
+{
+ PRINT_EXPECTED(store_u128, __int128,
+ "0xf122334455667788f123456789abcdef", "store");
+}
+
+static void store_u128(void)
+{
+#ifdef __x86_64__
+ _mm_store_si128(data, _mm_set_epi32(0xf1223344, 0x55667788,
+ 0xf1234567, 0x89abcdef));
+#else
+ const uint32_t init[4] = {0x89abcdef, 0xf1234567, 0x55667788, 0xf1223344};
+ uint32x4_t vec = vld1q_u32(init);
+ vst1q_u32(data, vec);
+#endif /* __x86_64__ */
+ print_expected_store_u128();
+}
+
+static void print_expected_load_u128(void)
+{
+ PRINT_EXPECTED(load_u128, __int128,
+ "0xf122334455667788f123456789abcdef", "load");
+}
+
+static void load_u128(void)
+{
+#ifdef __x86_64__
+ __m128i var = _mm_load_si128(data);
+#else
+ uint32x4_t var = vld1q_u32(data);
+#endif
+ (void) var;
+ print_expected_load_u128();
+}
+#endif /* __x86_64__ || __aarch64__ */
+
+#if defined(__x86_64__)
+DEFINE_ATOMIC_OP(u8, uint8_t, 0xf1)
+DEFINE_ATOMIC_OP(u16, uint16_t, 0xf123)
+DEFINE_ATOMIC_OP(u32, uint32_t, 0xff112233)
+DEFINE_ATOMIC_OP(u64, uint64_t, 0xf123456789abcdef)
+#endif /* __x86_64__ */
+
+static void *f(void *p)
+{
+ return NULL;
+}
+
+int main(void)
+{
+ /*
+ * We force creation of a second thread to enable cpu flag CF_PARALLEL.
+ * This will generate atomic operations when needed.
+ */
+ pthread_t thread;
+ pthread_create(&thread, NULL, &f, NULL);
+ pthread_join(thread, NULL);
+
+ /* allocate storage up to 128 bits */
+ data = malloc(16);
+
+ store_u8();
+ load_u8();
+
+ store_u16();
+ load_u16();
+
+ store_u32();
+ load_u32();
+
+#if defined(__x86_64__) || defined(__aarch64__)
+ store_u64();
+ load_u64();
+
+ store_u128();
+ load_u128();
+#endif /* __x86_64__ || __aarch64__ */
+
+#if defined(__x86_64__)
+ atomic_op_u8();
+ atomic_op_u16();
+ atomic_op_u32();
+ atomic_op_u64();
+#endif /* __x86_64__ */
+
+ free(data);
+}
diff --git a/tests/plugin/bb.c b/tests/tcg/plugins/bb.c
index 36776de..36776de 100644
--- a/tests/plugin/bb.c
+++ b/tests/tcg/plugins/bb.c
diff --git a/tests/plugin/empty.c b/tests/tcg/plugins/empty.c
index 8fa6bac..8fa6bac 100644
--- a/tests/plugin/empty.c
+++ b/tests/tcg/plugins/empty.c
diff --git a/tests/tcg/plugins/inline.c b/tests/tcg/plugins/inline.c
new file mode 100644
index 0000000..73dde99
--- /dev/null
+++ b/tests/tcg/plugins/inline.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2023, Pierrick Bouvier <pierrick.bouvier@linaro.org>
+ *
+ * Demonstrates and tests usage of inline ops.
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <glib.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <qemu-plugin.h>
+
+typedef struct {
+ uint64_t count_tb;
+ uint64_t count_tb_inline;
+ uint64_t count_insn;
+ uint64_t count_insn_inline;
+ uint64_t count_mem;
+ uint64_t count_mem_inline;
+ uint64_t tb_cond_num_trigger;
+ uint64_t tb_cond_track_count;
+ uint64_t insn_cond_num_trigger;
+ uint64_t insn_cond_track_count;
+} CPUCount;
+
+static const uint64_t cond_trigger_limit = 100;
+
+typedef struct {
+ uint64_t data_insn;
+ uint64_t data_tb;
+ uint64_t data_mem;
+} CPUData;
+
+static struct qemu_plugin_scoreboard *counts;
+static qemu_plugin_u64 count_tb;
+static qemu_plugin_u64 count_tb_inline;
+static qemu_plugin_u64 count_insn;
+static qemu_plugin_u64 count_insn_inline;
+static qemu_plugin_u64 count_mem;
+static qemu_plugin_u64 count_mem_inline;
+static qemu_plugin_u64 tb_cond_num_trigger;
+static qemu_plugin_u64 tb_cond_track_count;
+static qemu_plugin_u64 insn_cond_num_trigger;
+static qemu_plugin_u64 insn_cond_track_count;
+static struct qemu_plugin_scoreboard *data;
+static qemu_plugin_u64 data_insn;
+static qemu_plugin_u64 data_tb;
+static qemu_plugin_u64 data_mem;
+
+static uint64_t global_count_tb;
+static uint64_t global_count_insn;
+static uint64_t global_count_mem;
+static unsigned int max_cpu_index;
+static GMutex tb_lock;
+static GMutex insn_lock;
+static GMutex mem_lock;
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+
+static void stats_insn(void)
+{
+ const uint64_t expected = global_count_insn;
+ const uint64_t per_vcpu = qemu_plugin_u64_sum(count_insn);
+ const uint64_t inl_per_vcpu =
+ qemu_plugin_u64_sum(count_insn_inline);
+ const uint64_t cond_num_trigger =
+ qemu_plugin_u64_sum(insn_cond_num_trigger);
+ const uint64_t cond_track_left = qemu_plugin_u64_sum(insn_cond_track_count);
+ const uint64_t conditional =
+ cond_num_trigger * cond_trigger_limit + cond_track_left;
+ g_autoptr(GString) stats = g_string_new("");
+ g_string_append_printf(stats, "insn: %" PRIu64 "\n", expected);
+ g_string_append_printf(stats, "insn: %" PRIu64 " (per vcpu)\n", per_vcpu);
+ g_string_append_printf(stats, "insn: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
+ g_string_append_printf(stats, "insn: %" PRIu64 " (cond cb)\n", conditional);
+ qemu_plugin_outs(stats->str);
+ g_assert(expected > 0);
+ g_assert(per_vcpu == expected);
+ g_assert(inl_per_vcpu == expected);
+ g_assert(conditional == expected);
+}
+
+static void stats_tb(void)
+{
+ const uint64_t expected = global_count_tb;
+ const uint64_t per_vcpu = qemu_plugin_u64_sum(count_tb);
+ const uint64_t inl_per_vcpu =
+ qemu_plugin_u64_sum(count_tb_inline);
+ const uint64_t cond_num_trigger = qemu_plugin_u64_sum(tb_cond_num_trigger);
+ const uint64_t cond_track_left = qemu_plugin_u64_sum(tb_cond_track_count);
+ const uint64_t conditional =
+ cond_num_trigger * cond_trigger_limit + cond_track_left;
+ g_autoptr(GString) stats = g_string_new("");
+ g_string_append_printf(stats, "tb: %" PRIu64 "\n", expected);
+ g_string_append_printf(stats, "tb: %" PRIu64 " (per vcpu)\n", per_vcpu);
+ g_string_append_printf(stats, "tb: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
+ g_string_append_printf(stats, "tb: %" PRIu64 " (conditional cb)\n", conditional);
+ qemu_plugin_outs(stats->str);
+ g_assert(expected > 0);
+ g_assert(per_vcpu == expected);
+ g_assert(inl_per_vcpu == expected);
+ g_assert(conditional == expected);
+}
+
+static void stats_mem(void)
+{
+ const uint64_t expected = global_count_mem;
+ const uint64_t per_vcpu = qemu_plugin_u64_sum(count_mem);
+ const uint64_t inl_per_vcpu =
+ qemu_plugin_u64_sum(count_mem_inline);
+ g_autoptr(GString) stats = g_string_new("");
+ g_string_append_printf(stats, "mem: %" PRIu64 "\n", expected);
+ g_string_append_printf(stats, "mem: %" PRIu64 " (per vcpu)\n", per_vcpu);
+ g_string_append_printf(stats, "mem: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
+ qemu_plugin_outs(stats->str);
+ g_assert(expected > 0);
+ g_assert(per_vcpu == expected);
+ g_assert(inl_per_vcpu == expected);
+}
+
+static void plugin_exit(qemu_plugin_id_t id, void *udata)
+{
+ const unsigned int num_cpus = qemu_plugin_num_vcpus();
+ g_autoptr(GString) stats = g_string_new("");
+ g_assert(num_cpus == max_cpu_index + 1);
+
+ for (int i = 0; i < num_cpus ; ++i) {
+ const uint64_t tb = qemu_plugin_u64_get(count_tb, i);
+ const uint64_t tb_inline = qemu_plugin_u64_get(count_tb_inline, i);
+ const uint64_t insn = qemu_plugin_u64_get(count_insn, i);
+ const uint64_t insn_inline = qemu_plugin_u64_get(count_insn_inline, i);
+ const uint64_t mem = qemu_plugin_u64_get(count_mem, i);
+ const uint64_t mem_inline = qemu_plugin_u64_get(count_mem_inline, i);
+ const uint64_t tb_cond_trigger =
+ qemu_plugin_u64_get(tb_cond_num_trigger, i);
+ const uint64_t tb_cond_left =
+ qemu_plugin_u64_get(tb_cond_track_count, i);
+ const uint64_t insn_cond_trigger =
+ qemu_plugin_u64_get(insn_cond_num_trigger, i);
+ const uint64_t insn_cond_left =
+ qemu_plugin_u64_get(insn_cond_track_count, i);
+ g_string_printf(stats, "cpu %d: tb (%" PRIu64 ", %" PRIu64
+ ", %" PRIu64 " * %" PRIu64 " + %" PRIu64
+ ") | "
+ "insn (%" PRIu64 ", %" PRIu64
+ ", %" PRIu64 " * %" PRIu64 " + %" PRIu64
+ ") | "
+ "mem (%" PRIu64 ", %" PRIu64 ")"
+ "\n",
+ i,
+ tb, tb_inline,
+ tb_cond_trigger, cond_trigger_limit, tb_cond_left,
+ insn, insn_inline,
+ insn_cond_trigger, cond_trigger_limit, insn_cond_left,
+ mem, mem_inline);
+ qemu_plugin_outs(stats->str);
+ g_assert(tb == tb_inline);
+ g_assert(insn == insn_inline);
+ g_assert(mem == mem_inline);
+ g_assert(tb_cond_trigger == tb / cond_trigger_limit);
+ g_assert(tb_cond_left == tb % cond_trigger_limit);
+ g_assert(insn_cond_trigger == insn / cond_trigger_limit);
+ g_assert(insn_cond_left == insn % cond_trigger_limit);
+ }
+
+ stats_tb();
+ stats_insn();
+ stats_mem();
+
+ qemu_plugin_scoreboard_free(counts);
+ qemu_plugin_scoreboard_free(data);
+}
+
+static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
+{
+ qemu_plugin_u64_add(count_tb, cpu_index, 1);
+ g_assert(qemu_plugin_u64_get(data_tb, cpu_index) == (uintptr_t) udata);
+ g_mutex_lock(&tb_lock);
+ max_cpu_index = MAX(max_cpu_index, cpu_index);
+ global_count_tb++;
+ g_mutex_unlock(&tb_lock);
+}
+
+static void vcpu_tb_cond_exec(unsigned int cpu_index, void *udata)
+{
+ g_assert(qemu_plugin_u64_get(tb_cond_track_count, cpu_index) ==
+ cond_trigger_limit);
+ g_assert(qemu_plugin_u64_get(data_tb, cpu_index) == (uintptr_t) udata);
+ qemu_plugin_u64_set(tb_cond_track_count, cpu_index, 0);
+ qemu_plugin_u64_add(tb_cond_num_trigger, cpu_index, 1);
+}
+
+static void vcpu_insn_cond_exec(unsigned int cpu_index, void *udata)
+{
+ g_assert(qemu_plugin_u64_get(insn_cond_track_count, cpu_index) ==
+ cond_trigger_limit);
+ g_assert(qemu_plugin_u64_get(data_insn, cpu_index) == (uintptr_t) udata);
+ qemu_plugin_u64_set(insn_cond_track_count, cpu_index, 0);
+ qemu_plugin_u64_add(insn_cond_num_trigger, cpu_index, 1);
+}
+
+static void vcpu_insn_exec(unsigned int cpu_index, void *udata)
+{
+ qemu_plugin_u64_add(count_insn, cpu_index, 1);
+ g_assert(qemu_plugin_u64_get(data_insn, cpu_index) == (uintptr_t) udata);
+ g_mutex_lock(&insn_lock);
+ global_count_insn++;
+ g_mutex_unlock(&insn_lock);
+}
+
+static void vcpu_mem_access(unsigned int cpu_index,
+ qemu_plugin_meminfo_t info,
+ uint64_t vaddr,
+ void *udata)
+{
+ qemu_plugin_u64_add(count_mem, cpu_index, 1);
+ g_assert(qemu_plugin_u64_get(data_mem, cpu_index) == (uintptr_t) udata);
+ g_mutex_lock(&mem_lock);
+ global_count_mem++;
+ g_mutex_unlock(&mem_lock);
+}
+
+static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
+{
+ void *tb_store = tb;
+ qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
+ tb, QEMU_PLUGIN_INLINE_STORE_U64, data_tb, (uintptr_t) tb_store);
+ qemu_plugin_register_vcpu_tb_exec_cb(
+ tb, vcpu_tb_exec, QEMU_PLUGIN_CB_NO_REGS, tb_store);
+ qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
+ tb, QEMU_PLUGIN_INLINE_ADD_U64, count_tb_inline, 1);
+
+ qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
+ tb, QEMU_PLUGIN_INLINE_ADD_U64, tb_cond_track_count, 1);
+ qemu_plugin_register_vcpu_tb_exec_cond_cb(
+ tb, vcpu_tb_cond_exec, QEMU_PLUGIN_CB_NO_REGS,
+ QEMU_PLUGIN_COND_EQ, tb_cond_track_count, cond_trigger_limit, tb_store);
+
+ for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) {
+ struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx);
+ void *insn_store = insn;
+ void *mem_store = (char *)insn_store + 0xff;
+
+ qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
+ insn, QEMU_PLUGIN_INLINE_STORE_U64, data_insn,
+ (uintptr_t) insn_store);
+ qemu_plugin_register_vcpu_insn_exec_cb(
+ insn, vcpu_insn_exec, QEMU_PLUGIN_CB_NO_REGS, insn_store);
+ qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
+ insn, QEMU_PLUGIN_INLINE_ADD_U64, count_insn_inline, 1);
+
+ qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
+ insn, QEMU_PLUGIN_INLINE_ADD_U64, insn_cond_track_count, 1);
+ qemu_plugin_register_vcpu_insn_exec_cond_cb(
+ insn, vcpu_insn_cond_exec, QEMU_PLUGIN_CB_NO_REGS,
+ QEMU_PLUGIN_COND_EQ, insn_cond_track_count, cond_trigger_limit,
+ insn_store);
+
+ qemu_plugin_register_vcpu_mem_inline_per_vcpu(
+ insn, QEMU_PLUGIN_MEM_RW,
+ QEMU_PLUGIN_INLINE_STORE_U64,
+ data_mem, (uintptr_t) mem_store);
+ qemu_plugin_register_vcpu_mem_cb(insn, &vcpu_mem_access,
+ QEMU_PLUGIN_CB_NO_REGS,
+ QEMU_PLUGIN_MEM_RW, mem_store);
+ qemu_plugin_register_vcpu_mem_inline_per_vcpu(
+ insn, QEMU_PLUGIN_MEM_RW,
+ QEMU_PLUGIN_INLINE_ADD_U64,
+ count_mem_inline, 1);
+ }
+}
+
+QEMU_PLUGIN_EXPORT
+int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
+ int argc, char **argv)
+{
+ counts = qemu_plugin_scoreboard_new(sizeof(CPUCount));
+ count_tb = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, count_tb);
+ count_insn = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, count_insn);
+ count_mem = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, count_mem);
+ count_tb_inline = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, count_tb_inline);
+ count_insn_inline = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, count_insn_inline);
+ count_mem_inline = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, count_mem_inline);
+ tb_cond_num_trigger = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, tb_cond_num_trigger);
+ tb_cond_track_count = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, tb_cond_track_count);
+ insn_cond_num_trigger = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, insn_cond_num_trigger);
+ insn_cond_track_count = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, insn_cond_track_count);
+ data = qemu_plugin_scoreboard_new(sizeof(CPUData));
+ data_insn = qemu_plugin_scoreboard_u64_in_struct(data, CPUData, data_insn);
+ data_tb = qemu_plugin_scoreboard_u64_in_struct(data, CPUData, data_tb);
+ data_mem = qemu_plugin_scoreboard_u64_in_struct(data, CPUData, data_mem);
+
+ qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
+ qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
+
+ return 0;
+}
diff --git a/tests/tcg/plugins/insn.c b/tests/tcg/plugins/insn.c
new file mode 100644
index 0000000..0c723cb
--- /dev/null
+++ b/tests/tcg/plugins/insn.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#include <inttypes.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <glib.h>
+
+#include <qemu-plugin.h>
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+
+static qemu_plugin_u64 insn_count;
+
+static bool do_inline;
+static bool do_size;
+static bool do_trace;
+static GArray *sizes;
+
+typedef struct {
+ uint64_t hits;
+ uint64_t last_hit;
+ uint64_t total_delta;
+} MatchCount;
+
+typedef struct {
+ char *match_string;
+ struct qemu_plugin_scoreboard *counts; /* MatchCount */
+} Match;
+
+static GArray *matches;
+
+typedef struct {
+ Match *match;
+ uint64_t vaddr;
+ uint64_t hits;
+ char *disas;
+} Instruction;
+
+/* A hash table to hold matched instructions */
+static GHashTable *match_insn_records;
+static GMutex match_hash_lock;
+
+
+static Instruction * get_insn_record(const char *disas, uint64_t vaddr, Match *m)
+{
+ g_autofree char *str_hash = g_strdup_printf("%"PRIx64" %s", vaddr, disas);
+ Instruction *record;
+
+ g_mutex_lock(&match_hash_lock);
+
+ if (!match_insn_records) {
+ match_insn_records = g_hash_table_new(g_str_hash, g_str_equal);
+ }
+
+ record = g_hash_table_lookup(match_insn_records, str_hash);
+
+ if (!record) {
+ g_autoptr(GString) ts = g_string_new(str_hash);
+
+ record = g_new0(Instruction, 1);
+ record->disas = g_strdup(disas);
+ record->vaddr = vaddr;
+ record->match = m;
+
+ g_hash_table_insert(match_insn_records, str_hash, record);
+
+ g_string_prepend(ts, "Created record for: ");
+ g_string_append(ts, "\n");
+ qemu_plugin_outs(ts->str);
+ }
+
+ g_mutex_unlock(&match_hash_lock);
+
+ return record;
+}
+
+/*
+ * Initialise a new vcpu with reading the register list
+ */
+static void vcpu_init(qemu_plugin_id_t id, unsigned int vcpu_index)
+{
+ g_autoptr(GArray) reg_list = qemu_plugin_get_registers();
+ g_autoptr(GByteArray) reg_value = g_byte_array_new();
+
+ if (reg_list) {
+ for (int i = 0; i < reg_list->len; i++) {
+ qemu_plugin_reg_descriptor *rd = &g_array_index(
+ reg_list, qemu_plugin_reg_descriptor, i);
+ int count = qemu_plugin_read_register(rd->handle, reg_value);
+ g_assert(count > 0);
+ }
+ }
+}
+
+
+static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata)
+{
+ qemu_plugin_u64_add(insn_count, cpu_index, 1);
+}
+
+static void vcpu_insn_matched_exec_before(unsigned int cpu_index, void *udata)
+{
+ Instruction *insn = (Instruction *) udata;
+ Match *insn_match = insn->match;
+ MatchCount *match = qemu_plugin_scoreboard_find(insn_match->counts,
+ cpu_index);
+
+ insn->hits++;
+
+ uint64_t icount = qemu_plugin_u64_get(insn_count, cpu_index);
+ uint64_t delta = icount - match->last_hit;
+
+ match->hits++;
+ match->total_delta += delta;
+ match->last_hit = icount;
+
+ if (do_trace) {
+ g_autoptr(GString) ts = g_string_new("");
+ g_string_append_printf(ts, "0x%" PRIx64 ", '%s', %"PRId64 " hits",
+ insn->vaddr, insn->disas, insn->hits);
+ g_string_append_printf(ts,
+ " , cpu %u,"
+ " %"PRId64" match hits,"
+ " Ī”+%"PRId64 " since last match,"
+ " %"PRId64 " avg insns/match\n",
+ cpu_index,
+ match->hits, delta,
+ match->total_delta / match->hits);
+
+ qemu_plugin_outs(ts->str);
+ }
+}
+
+static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
+{
+ size_t n = qemu_plugin_tb_n_insns(tb);
+ size_t i;
+
+ for (i = 0; i < n; i++) {
+ struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
+
+ if (do_inline) {
+ qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
+ insn, QEMU_PLUGIN_INLINE_ADD_U64, insn_count, 1);
+ } else {
+ qemu_plugin_register_vcpu_insn_exec_cb(
+ insn, vcpu_insn_exec_before, QEMU_PLUGIN_CB_NO_REGS, NULL);
+ }
+
+ if (do_size) {
+ size_t sz = qemu_plugin_insn_size(insn);
+ if (sz > sizes->len) {
+ g_array_set_size(sizes, sz);
+ }
+ unsigned long *cnt = &g_array_index(sizes, unsigned long, sz);
+ (*cnt)++;
+ }
+
+ /*
+ * If we are tracking certain instructions we will need more
+ * information about the instruction which we also need to
+ * save if there is a hit.
+ *
+ * We only want one record for each occurrence of the matched
+ * instruction.
+ */
+ if (matches->len) {
+ char *insn_disas = qemu_plugin_insn_disas(insn);
+ for (int j = 0; j < matches->len; j++) {
+ Match *m = &g_array_index(matches, Match, j);
+ if (g_str_has_prefix(insn_disas, m->match_string)) {
+ Instruction *rec = get_insn_record(insn_disas,
+ qemu_plugin_insn_vaddr(insn),
+ m);
+
+ qemu_plugin_register_vcpu_insn_exec_cb(
+ insn, vcpu_insn_matched_exec_before,
+ QEMU_PLUGIN_CB_NO_REGS, rec);
+ }
+ }
+ g_free(insn_disas);
+ }
+ }
+}
+
+static void plugin_exit(qemu_plugin_id_t id, void *p)
+{
+ g_autoptr(GString) out = g_string_new(NULL);
+ int i;
+
+ if (do_size) {
+ for (i = 0; i <= sizes->len; i++) {
+ unsigned long *cnt = &g_array_index(sizes, unsigned long, i);
+ if (*cnt) {
+ g_string_append_printf(out,
+ "len %d bytes: %ld insns\n", i, *cnt);
+ }
+ }
+ } else {
+ for (i = 0; i < qemu_plugin_num_vcpus(); i++) {
+ g_string_append_printf(out, "cpu %d insns: %" PRIu64 "\n",
+ i, qemu_plugin_u64_get(insn_count, i));
+ }
+ g_string_append_printf(out, "total insns: %" PRIu64 "\n",
+ qemu_plugin_u64_sum(insn_count));
+ }
+ qemu_plugin_outs(out->str);
+ qemu_plugin_scoreboard_free(insn_count.score);
+
+ g_mutex_lock(&match_hash_lock);
+
+ for (i = 0; i < matches->len; ++i) {
+ Match *m = &g_array_index(matches, Match, i);
+ GHashTableIter iter;
+ Instruction *record;
+ qemu_plugin_u64 hit_e = qemu_plugin_scoreboard_u64_in_struct(m->counts, MatchCount, hits);
+ uint64_t hits = qemu_plugin_u64_sum(hit_e);
+
+ g_string_printf(out, "Match: %s, hits %"PRId64"\n", m->match_string, hits);
+ qemu_plugin_outs(out->str);
+
+ g_hash_table_iter_init(&iter, match_insn_records);
+ while (g_hash_table_iter_next(&iter, NULL, (void **)&record)) {
+ if (record->match == m) {
+ g_string_printf(out,
+ " %"PRIx64": %s (hits %"PRId64")\n",
+ record->vaddr,
+ record->disas,
+ record->hits);
+ qemu_plugin_outs(out->str);
+ }
+ }
+
+ g_free(m->match_string);
+ qemu_plugin_scoreboard_free(m->counts);
+ }
+
+ g_mutex_unlock(&match_hash_lock);
+
+ g_array_free(matches, TRUE);
+ g_array_free(sizes, TRUE);
+}
+
+
+/* Add a match to the array of matches */
+static void parse_match(char *match)
+{
+ Match new_match = {
+ .match_string = g_strdup(match),
+ .counts = qemu_plugin_scoreboard_new(sizeof(MatchCount)) };
+ g_array_append_val(matches, new_match);
+}
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
+ const qemu_info_t *info,
+ int argc, char **argv)
+{
+ matches = g_array_new(false, true, sizeof(Match));
+ /* null terminated so 0 is not a special case */
+ sizes = g_array_new(true, true, sizeof(unsigned long));
+
+ for (int i = 0; i < argc; i++) {
+ char *opt = argv[i];
+ g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
+ if (g_strcmp0(tokens[0], "inline") == 0) {
+ if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_inline)) {
+ fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
+ return -1;
+ }
+ } else if (g_strcmp0(tokens[0], "sizes") == 0) {
+ if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_size)) {
+ fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
+ return -1;
+ }
+ } else if (g_strcmp0(tokens[0], "match") == 0) {
+ parse_match(tokens[1]);
+ } else if (g_strcmp0(tokens[0], "trace") == 0) {
+ if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_trace)) {
+ fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
+ return -1;
+ }
+ } else {
+ fprintf(stderr, "option parsing failed: %s\n", opt);
+ return -1;
+ }
+ }
+
+ insn_count = qemu_plugin_scoreboard_u64(
+ qemu_plugin_scoreboard_new(sizeof(uint64_t)));
+
+ /* Register init, translation block and exit callbacks */
+ qemu_plugin_register_vcpu_init_cb(id, vcpu_init);
+ qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
+ qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
+ return 0;
+}
diff --git a/tests/tcg/plugins/mem.c b/tests/tcg/plugins/mem.c
new file mode 100644
index 0000000..ca4e888
--- /dev/null
+++ b/tests/tcg/plugins/mem.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#include <inttypes.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <glib.h>
+
+/*
+ * plugins should not include anything from QEMU aside from the
+ * API header. However as this is a test plugin to exercise the
+ * internals of QEMU and we want to avoid needless code duplication we
+ * do so here. bswap.h is pretty self-contained although it needs a
+ * few things provided by compiler.h.
+ */
+#include <compiler.h>
+#include <bswap.h>
+#include <qemu-plugin.h>
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+
+typedef struct {
+ uint64_t mem_count;
+ uint64_t io_count;
+} CPUCount;
+
+typedef struct {
+ uint64_t vaddr;
+ const char *sym;
+} InsnInfo;
+
+/*
+ * For the "memory" system test we need to track accesses to
+ * individual regions. We mirror the data written to the region and
+ * then check when it is read that it matches up.
+ *
+ * We do this as regions rather than pages to save on complications
+ * with page crossing and the fact the test only cares about the
+ * test_data region.
+ */
+static uint64_t region_size = 4096 * 4;
+static uint64_t region_mask;
+
+typedef struct {
+ uint64_t region_address;
+ uint64_t reads;
+ uint64_t writes;
+ uint8_t *data;
+ /* Did we see every write and read with correct values? */
+ bool seen_all;
+} RegionInfo;
+
+static struct qemu_plugin_scoreboard *counts;
+static qemu_plugin_u64 mem_count;
+static qemu_plugin_u64 io_count;
+static bool do_inline, do_callback, do_print_accesses, do_region_summary;
+static bool do_haddr;
+static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW;
+
+
+static GMutex lock;
+static GHashTable *regions;
+
+static gint addr_order(gconstpointer a, gconstpointer b, gpointer d)
+{
+ RegionInfo *na = (RegionInfo *) a;
+ RegionInfo *nb = (RegionInfo *) b;
+
+ return na->region_address > nb->region_address ? 1 : -1;
+}
+
+
+static void plugin_exit(qemu_plugin_id_t id, void *p)
+{
+ g_autoptr(GString) out = g_string_new("");
+
+ if (do_inline || do_callback) {
+ g_string_printf(out, "mem accesses: %" PRIu64 "\n",
+ qemu_plugin_u64_sum(mem_count));
+ }
+ if (do_haddr) {
+ g_string_append_printf(out, "io accesses: %" PRIu64 "\n",
+ qemu_plugin_u64_sum(io_count));
+ }
+ qemu_plugin_outs(out->str);
+
+
+ if (do_region_summary) {
+ GList *counts = g_hash_table_get_values(regions);
+
+ counts = g_list_sort_with_data(counts, addr_order, NULL);
+
+ g_string_printf(out, "Region Base, Reads, Writes, Seen all\n");
+
+ if (counts && g_list_next(counts)) {
+ for (/* counts */; counts; counts = counts->next) {
+ RegionInfo *ri = (RegionInfo *) counts->data;
+
+ g_string_append_printf(out,
+ "0x%016"PRIx64", "
+ "%"PRId64", %"PRId64", %s\n",
+ ri->region_address,
+ ri->reads,
+ ri->writes,
+ ri->seen_all ? "true" : "false");
+ }
+ }
+ qemu_plugin_outs(out->str);
+ }
+
+ qemu_plugin_scoreboard_free(counts);
+}
+
+/*
+ * Update the region tracking info for the access. We split up accesses
+ * that span regions even though the plugin infrastructure will deliver
+ * it as a single access.
+ */
+static void update_region_info(uint64_t region, uint64_t offset,
+ qemu_plugin_meminfo_t meminfo,
+ qemu_plugin_mem_value value,
+ unsigned size)
+{
+ bool be = qemu_plugin_mem_is_big_endian(meminfo);
+ bool is_store = qemu_plugin_mem_is_store(meminfo);
+ RegionInfo *ri;
+ bool unseen_data = false;
+
+ g_assert(offset + size <= region_size);
+
+ g_mutex_lock(&lock);
+ ri = (RegionInfo *) g_hash_table_lookup(regions, &region);
+
+ if (!ri) {
+ ri = g_new0(RegionInfo, 1);
+ ri->region_address = region;
+ ri->data = g_malloc0(region_size);
+ ri->seen_all = true;
+ g_hash_table_insert(regions, &ri->region_address, ri);
+ }
+
+ if (is_store) {
+ ri->writes++;
+ } else {
+ ri->reads++;
+ }
+
+ switch (value.type) {
+ case QEMU_PLUGIN_MEM_VALUE_U8:
+ if (is_store) {
+ ri->data[offset] = value.data.u8;
+ } else if (ri->data[offset] != value.data.u8) {
+ unseen_data = true;
+ }
+ break;
+ case QEMU_PLUGIN_MEM_VALUE_U16:
+ {
+ uint16_t *p = (uint16_t *) &ri->data[offset];
+ if (is_store) {
+ if (be) {
+ stw_be_p(p, value.data.u16);
+ } else {
+ stw_le_p(p, value.data.u16);
+ }
+ } else {
+ uint16_t val = be ? lduw_be_p(p) : lduw_le_p(p);
+ unseen_data = val != value.data.u16;
+ }
+ break;
+ }
+ case QEMU_PLUGIN_MEM_VALUE_U32:
+ {
+ uint32_t *p = (uint32_t *) &ri->data[offset];
+ if (is_store) {
+ if (be) {
+ stl_be_p(p, value.data.u32);
+ } else {
+ stl_le_p(p, value.data.u32);
+ }
+ } else {
+ uint32_t val = be ? ldl_be_p(p) : ldl_le_p(p);
+ unseen_data = val != value.data.u32;
+ }
+ break;
+ }
+ case QEMU_PLUGIN_MEM_VALUE_U64:
+ {
+ uint64_t *p = (uint64_t *) &ri->data[offset];
+ if (is_store) {
+ if (be) {
+ stq_be_p(p, value.data.u64);
+ } else {
+ stq_le_p(p, value.data.u64);
+ }
+ } else {
+ uint64_t val = be ? ldq_be_p(p) : ldq_le_p(p);
+ unseen_data = val != value.data.u64;
+ }
+ break;
+ }
+ case QEMU_PLUGIN_MEM_VALUE_U128:
+ /* non in test so skip */
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /*
+ * This is expected for regions initialised by QEMU (.text etc) but we
+ * expect to see all data read and written to the test_data region
+ * of the memory test.
+ */
+ if (unseen_data && ri->seen_all) {
+ g_autoptr(GString) error = g_string_new("Warning: ");
+ g_string_append_printf(error, "0x%016"PRIx64":%"PRId64
+ " read an un-instrumented value\n",
+ region, offset);
+ qemu_plugin_outs(error->str);
+ ri->seen_all = false;
+ }
+
+ g_mutex_unlock(&lock);
+}
+
+static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
+ uint64_t vaddr, void *udata)
+{
+ if (do_haddr) {
+ struct qemu_plugin_hwaddr *hwaddr;
+ hwaddr = qemu_plugin_get_hwaddr(meminfo, vaddr);
+ if (qemu_plugin_hwaddr_is_io(hwaddr)) {
+ qemu_plugin_u64_add(io_count, cpu_index, 1);
+ } else {
+ qemu_plugin_u64_add(mem_count, cpu_index, 1);
+ }
+ } else {
+ qemu_plugin_u64_add(mem_count, cpu_index, 1);
+ }
+
+ if (do_region_summary) {
+ uint64_t region = vaddr & ~region_mask;
+ uint64_t offset = vaddr & region_mask;
+ qemu_plugin_mem_value value = qemu_plugin_mem_get_value(meminfo);
+ unsigned size = 1 << qemu_plugin_mem_size_shift(meminfo);
+
+ update_region_info(region, offset, meminfo, value, size);
+ }
+}
+
+static void print_access(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
+ uint64_t vaddr, void *udata)
+{
+ InsnInfo *insn_info = udata;
+ unsigned size = 8 << qemu_plugin_mem_size_shift(meminfo);
+ const char *type = qemu_plugin_mem_is_store(meminfo) ? "store" : "load";
+ qemu_plugin_mem_value value = qemu_plugin_mem_get_value(meminfo);
+ uint64_t hwaddr =
+ qemu_plugin_hwaddr_phys_addr(qemu_plugin_get_hwaddr(meminfo, vaddr));
+ g_autoptr(GString) out = g_string_new("");
+ g_string_printf(out,
+ "0x%"PRIx64",%s,0x%"PRIx64",0x%"PRIx64",%d,%s,",
+ insn_info->vaddr, insn_info->sym,
+ vaddr, hwaddr, size, type);
+ switch (value.type) {
+ case QEMU_PLUGIN_MEM_VALUE_U8:
+ g_string_append_printf(out, "0x%02"PRIx8, value.data.u8);
+ break;
+ case QEMU_PLUGIN_MEM_VALUE_U16:
+ g_string_append_printf(out, "0x%04"PRIx16, value.data.u16);
+ break;
+ case QEMU_PLUGIN_MEM_VALUE_U32:
+ g_string_append_printf(out, "0x%08"PRIx32, value.data.u32);
+ break;
+ case QEMU_PLUGIN_MEM_VALUE_U64:
+ g_string_append_printf(out, "0x%016"PRIx64, value.data.u64);
+ break;
+ case QEMU_PLUGIN_MEM_VALUE_U128:
+ g_string_append_printf(out, "0x%016"PRIx64"%016"PRIx64,
+ value.data.u128.high, value.data.u128.low);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ g_string_append_printf(out, "\n");
+ qemu_plugin_outs(out->str);
+}
+
+static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
+{
+ size_t n = qemu_plugin_tb_n_insns(tb);
+ size_t i;
+
+ for (i = 0; i < n; i++) {
+ struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
+
+ if (do_inline) {
+ qemu_plugin_register_vcpu_mem_inline_per_vcpu(
+ insn, rw,
+ QEMU_PLUGIN_INLINE_ADD_U64,
+ mem_count, 1);
+ }
+ if (do_callback || do_region_summary) {
+ qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem,
+ QEMU_PLUGIN_CB_NO_REGS,
+ rw, NULL);
+ }
+ if (do_print_accesses) {
+ /* we leak this pointer, to avoid locking to keep track of it */
+ InsnInfo *insn_info = g_malloc(sizeof(InsnInfo));
+ const char *sym = qemu_plugin_insn_symbol(insn);
+ insn_info->sym = sym ? sym : "";
+ insn_info->vaddr = qemu_plugin_insn_vaddr(insn);
+ qemu_plugin_register_vcpu_mem_cb(insn, print_access,
+ QEMU_PLUGIN_CB_NO_REGS,
+ rw, (void *) insn_info);
+ }
+ }
+}
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
+ const qemu_info_t *info,
+ int argc, char **argv)
+{
+
+ for (int i = 0; i < argc; i++) {
+ char *opt = argv[i];
+ g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
+
+ if (g_strcmp0(tokens[0], "haddr") == 0) {
+ if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_haddr)) {
+ fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
+ return -1;
+ }
+ } else if (g_strcmp0(tokens[0], "track") == 0) {
+ if (g_strcmp0(tokens[1], "r") == 0) {
+ rw = QEMU_PLUGIN_MEM_R;
+ } else if (g_strcmp0(tokens[1], "w") == 0) {
+ rw = QEMU_PLUGIN_MEM_W;
+ } else if (g_strcmp0(tokens[1], "rw") == 0) {
+ rw = QEMU_PLUGIN_MEM_RW;
+ } else {
+ fprintf(stderr, "invalid value for argument track: %s\n", opt);
+ return -1;
+ }
+ } else if (g_strcmp0(tokens[0], "inline") == 0) {
+ if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_inline)) {
+ fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
+ return -1;
+ }
+ } else if (g_strcmp0(tokens[0], "callback") == 0) {
+ if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_callback)) {
+ fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
+ return -1;
+ }
+ } else if (g_strcmp0(tokens[0], "print-accesses") == 0) {
+ if (!qemu_plugin_bool_parse(tokens[0], tokens[1],
+ &do_print_accesses)) {
+ fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
+ return -1;
+ }
+ } else if (g_strcmp0(tokens[0], "region-summary") == 0) {
+ if (!qemu_plugin_bool_parse(tokens[0], tokens[1],
+ &do_region_summary)) {
+ fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
+ return -1;
+ }
+ } else {
+ fprintf(stderr, "option parsing failed: %s\n", opt);
+ return -1;
+ }
+ }
+
+ if (do_inline && do_callback) {
+ fprintf(stderr,
+ "can't enable inline and callback counting at the same time\n");
+ return -1;
+ }
+
+ if (do_print_accesses) {
+ g_autoptr(GString) out = g_string_new("");
+ g_string_printf(out,
+ "insn_vaddr,insn_symbol,mem_vaddr,mem_hwaddr,"
+ "access_size,access_type,mem_value\n");
+ qemu_plugin_outs(out->str);
+ }
+
+ if (do_region_summary) {
+ region_mask = (region_size - 1);
+ regions = g_hash_table_new(g_int64_hash, g_int64_equal);
+ }
+
+ counts = qemu_plugin_scoreboard_new(sizeof(CPUCount));
+ mem_count = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, mem_count);
+ io_count = qemu_plugin_scoreboard_u64_in_struct(counts, CPUCount, io_count);
+ qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
+ qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
+ return 0;
+}
diff --git a/tests/tcg/plugins/meson.build b/tests/tcg/plugins/meson.build
new file mode 100644
index 0000000..0293422
--- /dev/null
+++ b/tests/tcg/plugins/meson.build
@@ -0,0 +1,23 @@
+t = []
+if get_option('plugins')
+ foreach i : ['bb', 'empty', 'inline', 'insn', 'mem', 'reset', 'syscall']
+ if host_os == 'windows'
+ t += shared_module(i, files(i + '.c') + '../../../contrib/plugins/win32_linker.c',
+ include_directories: '../../../include/qemu',
+ link_depends: [win32_qemu_plugin_api_lib],
+ link_args: win32_qemu_plugin_api_link_flags,
+ dependencies: glib)
+ else
+ t += shared_module(i, files(i + '.c'),
+ include_directories: '../../../include/qemu',
+ dependencies: glib)
+ endif
+ endforeach
+endif
+if t.length() > 0
+ alias_target('test-plugins', t)
+else
+ run_target('test-plugins', command: [python, '-c', ''])
+endif
+
+plugin_modules += t
diff --git a/tests/tcg/plugins/reset.c b/tests/tcg/plugins/reset.c
new file mode 100644
index 0000000..1be8be2
--- /dev/null
+++ b/tests/tcg/plugins/reset.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2025 Linaro Ltd
+ *
+ * Test the reset/uninstall cycle of a plugin.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <glib.h>
+
+#include <qemu-plugin.h>
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+static qemu_plugin_id_t plugin_id;
+static bool was_reset;
+static bool was_uninstalled;
+
+static void after_uninstall(qemu_plugin_id_t id)
+{
+ g_assert(was_reset && !was_uninstalled);
+ qemu_plugin_outs("uninstall done\n");
+ was_uninstalled = true;
+}
+
+static void tb_exec_after_reset(unsigned int vcpu_index, void *userdata)
+{
+ g_assert(was_reset && !was_uninstalled);
+ qemu_plugin_uninstall(plugin_id, after_uninstall);
+}
+
+static void tb_trans_after_reset(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
+{
+ g_assert(was_reset && !was_uninstalled);
+ qemu_plugin_register_vcpu_tb_exec_cb(tb, tb_exec_after_reset,
+ QEMU_PLUGIN_CB_NO_REGS, NULL);
+}
+
+static void after_reset(qemu_plugin_id_t id)
+{
+ g_assert(!was_reset && !was_uninstalled);
+ qemu_plugin_outs("reset done\n");
+ was_reset = true;
+ qemu_plugin_register_vcpu_tb_trans_cb(id, tb_trans_after_reset);
+}
+
+static void tb_exec_before_reset(unsigned int vcpu_index, void *userdata)
+{
+ g_assert(!was_reset && !was_uninstalled);
+ qemu_plugin_reset(plugin_id, after_reset);
+}
+
+static void tb_trans_before_reset(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
+{
+ g_assert(!was_reset && !was_uninstalled);
+ qemu_plugin_register_vcpu_tb_exec_cb(tb, tb_exec_before_reset,
+ QEMU_PLUGIN_CB_NO_REGS, NULL);
+}
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
+ const qemu_info_t *info,
+ int argc, char **argv)
+{
+ plugin_id = id;
+ qemu_plugin_register_vcpu_tb_trans_cb(id, tb_trans_before_reset);
+ return 0;
+}
+
+/* Since we uninstall the plugin, we can't use qemu_plugin_register_atexit_cb,
+ * so we use destructor attribute instead. */
+static void __attribute__((destructor)) on_plugin_exit(void)
+{
+ g_assert(was_reset && was_uninstalled);
+ qemu_plugin_outs("plugin exit\n");
+}
diff --git a/tests/tcg/plugins/syscall.c b/tests/tcg/plugins/syscall.c
new file mode 100644
index 0000000..42801f5
--- /dev/null
+++ b/tests/tcg/plugins/syscall.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2020, Matthias Weckbecker <matthias@weckbecker.name>
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#include <inttypes.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <glib.h>
+
+#include <qemu-plugin.h>
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+
+typedef struct {
+ int64_t num;
+ int64_t calls;
+ int64_t errors;
+} SyscallStats;
+
+struct SyscallInfo {
+ const char *name;
+ int64_t write_sysno;
+};
+
+static const struct SyscallInfo arch_syscall_info[] = {
+ { "aarch64", 64 },
+ { "aarch64_be", 64 },
+ { "alpha", 4 },
+ { "arm", 4 },
+ { "armeb", 4 },
+ { "avr", -1 },
+ { "hexagon", 64 },
+ { "hppa", -1 },
+ { "i386", 4 },
+ { "loongarch64", -1 },
+ { "m68k", 4 },
+ { "microblaze", 4 },
+ { "microblazeel", 4 },
+ { "mips", 1 },
+ { "mips64", 1 },
+ { "mips64el", 1 },
+ { "mipsel", 1 },
+ { "mipsn32", 1 },
+ { "mipsn32el", 1 },
+ { "or1k", -1 },
+ { "ppc", 4 },
+ { "ppc64", 4 },
+ { "ppc64le", 4 },
+ { "riscv32", 64 },
+ { "riscv64", 64 },
+ { "rx", -1 },
+ { "s390x", -1 },
+ { "sh4", -1 },
+ { "sh4eb", -1 },
+ { "sparc", 4 },
+ { "sparc32plus", 4 },
+ { "sparc64", 4 },
+ { "tricore", -1 },
+ { "x86_64", 1 },
+ { "xtensa", 13 },
+ { "xtensaeb", 13 },
+ { NULL, -1 },
+};
+
+static GMutex lock;
+static GHashTable *statistics;
+static GByteArray *memory_buffer;
+static bool do_log_writes;
+static int64_t write_sysno = -1;
+
+static SyscallStats *get_or_create_entry(int64_t num)
+{
+ SyscallStats *entry =
+ (SyscallStats *) g_hash_table_lookup(statistics, &num);
+
+ if (!entry) {
+ entry = g_new0(SyscallStats, 1);
+ entry->num = num;
+ g_hash_table_insert(statistics, &entry->num, entry);
+ }
+
+ return entry;
+}
+
+/*
+ * Hex-dump a GByteArray to the QEMU plugin output in the format:
+ * 61 63 63 65 6c 09 09 20 20 20 66 70 75 09 09 09 | accel.....fpu...
+ * 20 6d 6f 64 75 6c 65 2d 63 6f 6d 6d 6f 6e 2e 63 | .module-common.c
+ */
+static void hexdump(const GByteArray *data)
+{
+ g_autoptr(GString) out = g_string_new("");
+
+ for (guint index = 0; index < data->len; index += 16) {
+ for (guint col = 0; col < 16; col++) {
+ if (index + col < data->len) {
+ g_string_append_printf(out, "%02x ", data->data[index + col]);
+ } else {
+ g_string_append(out, " ");
+ }
+ }
+
+ g_string_append(out, " | ");
+
+ for (guint col = 0; col < 16; col++) {
+ if (index + col >= data->len) {
+ break;
+ }
+
+ if (g_ascii_isgraph(data->data[index + col])) {
+ g_string_append_printf(out, "%c", data->data[index + col]);
+ } else {
+ g_string_append(out, ".");
+ }
+ }
+
+ g_string_append(out, "\n");
+ }
+
+ qemu_plugin_outs(out->str);
+}
+
+static void vcpu_syscall(qemu_plugin_id_t id, unsigned int vcpu_index,
+ int64_t num, uint64_t a1, uint64_t a2,
+ uint64_t a3, uint64_t a4, uint64_t a5,
+ uint64_t a6, uint64_t a7, uint64_t a8)
+{
+ if (statistics) {
+ SyscallStats *entry;
+ g_mutex_lock(&lock);
+ entry = get_or_create_entry(num);
+ entry->calls++;
+ g_mutex_unlock(&lock);
+ } else {
+ g_autofree gchar *out = g_strdup_printf("syscall #%" PRIi64 "\n", num);
+ qemu_plugin_outs(out);
+ }
+
+ if (do_log_writes && num == write_sysno) {
+ if (qemu_plugin_read_memory_vaddr(a2, memory_buffer, a3)) {
+ hexdump(memory_buffer);
+ } else {
+ fprintf(stderr, "Error reading memory from vaddr %"PRIu64"\n", a2);
+ }
+ }
+}
+
+static void vcpu_syscall_ret(qemu_plugin_id_t id, unsigned int vcpu_idx,
+ int64_t num, int64_t ret)
+{
+ if (statistics) {
+ SyscallStats *entry;
+
+ g_mutex_lock(&lock);
+ /* Should always return an existent entry. */
+ entry = get_or_create_entry(num);
+ if (ret < 0) {
+ entry->errors++;
+ }
+ g_mutex_unlock(&lock);
+ } else {
+ g_autofree gchar *out = g_strdup_printf(
+ "syscall #%" PRIi64 " returned -> %" PRIi64 "\n", num, ret);
+ qemu_plugin_outs(out);
+ }
+}
+
+static void print_entry(gpointer val, gpointer user_data)
+{
+ SyscallStats *entry = (SyscallStats *) val;
+ int64_t syscall_num = entry->num;
+ g_autofree gchar *out = g_strdup_printf(
+ "%-13" PRIi64 "%-6" PRIi64 " %" PRIi64 "\n",
+ syscall_num, entry->calls, entry->errors);
+ qemu_plugin_outs(out);
+}
+
+static gint comp_func(gconstpointer ea, gconstpointer eb, gpointer d)
+{
+ SyscallStats *ent_a = (SyscallStats *) ea;
+ SyscallStats *ent_b = (SyscallStats *) eb;
+
+ return ent_a->calls > ent_b->calls ? -1 : 1;
+}
+
+/* ************************************************************************* */
+static void plugin_exit(qemu_plugin_id_t id, void *p)
+{
+ if (!statistics) {
+ return;
+ }
+
+ g_mutex_lock(&lock);
+ GList *entries = g_hash_table_get_values(statistics);
+ entries = g_list_sort_with_data(entries, comp_func, NULL);
+ qemu_plugin_outs("syscall no. calls errors\n");
+
+ g_list_foreach(entries, print_entry, NULL);
+
+ g_list_free(entries);
+ g_hash_table_destroy(statistics);
+ g_mutex_unlock(&lock);
+}
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
+ const qemu_info_t *info,
+ int argc, char **argv)
+{
+ bool do_print = false;
+
+ for (int i = 0; i < argc; i++) {
+ char *opt = argv[i];
+ g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
+
+ if (g_strcmp0(tokens[0], "print") == 0) {
+ if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_print)) {
+ fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
+ }
+ } else if (g_strcmp0(tokens[0], "log_writes") == 0) {
+ if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_log_writes)) {
+ fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
+ }
+ } else {
+ fprintf(stderr, "unsupported argument: %s\n", argv[i]);
+ return -1;
+ }
+ }
+
+ if (!do_print) {
+ statistics = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, g_free);
+ }
+
+ if (do_log_writes) {
+ for (const struct SyscallInfo *syscall_info = arch_syscall_info;
+ syscall_info->name != NULL; syscall_info++) {
+
+ if (g_strcmp0(syscall_info->name, info->target_name) == 0) {
+ write_sysno = syscall_info->write_sysno;
+ break;
+ }
+ }
+
+ if (write_sysno == -1) {
+ fprintf(stderr, "write syscall number not found\n");
+ return -1;
+ }
+
+ memory_buffer = g_byte_array_new();
+ }
+
+ qemu_plugin_register_vcpu_syscall_cb(id, vcpu_syscall);
+ qemu_plugin_register_vcpu_syscall_ret_cb(id, vcpu_syscall_ret);
+ qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
+ return 0;
+}
diff --git a/tests/tcg/ppc64/Makefile.target b/tests/tcg/ppc64/Makefile.target
index 8c3e4e4..0d058b2 100644
--- a/tests/tcg/ppc64/Makefile.target
+++ b/tests/tcg/ppc64/Makefile.target
@@ -6,20 +6,32 @@ VPATH += $(SRC_PATH)/tests/tcg/ppc64
config-cc.mak: Makefile
$(quiet-@)( \
- $(call cc-option,-mpower8-vector, CROSS_CC_HAS_POWER8_VECTOR); \
+ $(call cc-option,-mcpu=power8, CROSS_CC_HAS_CPU_POWER8); \
$(call cc-option,-mpower10, CROSS_CC_HAS_POWER10)) 3> config-cc.mak
-include config-cc.mak
-ifneq ($(CROSS_CC_HAS_POWER8_VECTOR),)
+# multi-threaded tests are known to fail (e.g., clang-user CI job)
+# See: https://gitlab.com/qemu-project/qemu/-/issues/2456
+run-signals: signals
+ $(call skip-test, $<, "BROKEN (flaky with clang) ")
+run-plugin-signals-with-%:
+ $(call skip-test, $<, "BROKEN (flaky with clang) ")
+
+run-threadcount: threadcount
+ $(call skip-test, $<, "BROKEN (flaky with clang) ")
+run-plugin-threadcount-with-%:
+ $(call skip-test, $<, "BROKEN (flaky with clang) ")
+
+ifneq ($(CROSS_CC_HAS_CPU_POWER8),)
PPC64_TESTS=bcdsub non_signalling_xscv
endif
-$(PPC64_TESTS): CFLAGS += -mpower8-vector
+$(PPC64_TESTS): CFLAGS += -mcpu=power8
-ifneq ($(CROSS_CC_HAS_POWER8_VECTOR),)
+ifneq ($(CROSS_CC_HAS_CPU_POWER8),)
PPC64_TESTS += vsx_f2i_nan
endif
-vsx_f2i_nan: CFLAGS += -mpower8-vector -I$(SRC_PATH)/include
+vsx_f2i_nan: CFLAGS += -mcpu=power8 -I$(SRC_PATH)/include
PPC64_TESTS += mtfsf
PPC64_TESTS += mffsce
@@ -43,4 +55,9 @@ PPC64_TESTS += signal_save_restore_xer
PPC64_TESTS += xxspltw
PPC64_TESTS += test-aes
+# ppc64 ABI uses function descriptors, and thus, QEMU can't find symbol for a
+# given instruction. Thus, we don't check output of mem-access plugin.
+run-plugin-test-plugin-mem-access-with-libmem.so: \
+ CHECK_PLUGIN_OUTPUT_COMMAND=
+
TESTS += $(PPC64_TESTS)
diff --git a/tests/tcg/riscv64/Makefile.softmmu-target b/tests/tcg/riscv64/Makefile.softmmu-target
index d5b126e..7c1d44d 100644
--- a/tests/tcg/riscv64/Makefile.softmmu-target
+++ b/tests/tcg/riscv64/Makefile.softmmu-target
@@ -10,7 +10,7 @@ LDFLAGS = -T $(LINK_SCRIPT)
CFLAGS += -g -Og
%.o: %.S
- $(CC) $(CFLAGS) $< -c -o $@
+ $(CC) $(CFLAGS) $< -Wa,--noexecstack -c -o $@
%: %.o $(LINK_SCRIPT)
$(LD) $(LDFLAGS) $< -o $@
diff --git a/tests/tcg/s390x/Makefile.softmmu-target b/tests/tcg/s390x/Makefile.softmmu-target
index 4c8e15e..8cd4667 100644
--- a/tests/tcg/s390x/Makefile.softmmu-target
+++ b/tests/tcg/s390x/Makefile.softmmu-target
@@ -1,12 +1,13 @@
S390X_SRC=$(SRC_PATH)/tests/tcg/s390x
VPATH+=$(S390X_SRC)
-QEMU_OPTS+=-action panic=exit-failure -nographic $(EXTFLAGS) -kernel
+# EXTFLAGS can be passed by the user, e.g. to override the --accel
+QEMU_OPTS+=-action panic=exit-failure -nographic -serial chardev:output $(EXTFLAGS) -kernel
LINK_SCRIPT=$(S390X_SRC)/softmmu.ld
-CFLAGS+=-ggdb -O0
+CFLAGS+=-ggdb -O0 -I$(SRC_PATH)/include/hw/s390x/ipl/
LDFLAGS=-nostdlib -static
%.o: %.S
- $(CC) -march=z13 -m64 -c $< -o $@
+ $(CC) -march=z13 -m64 -Wa,--noexecstack -c $< -o $@
%.o: %.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) -march=z13 -m64 -c $< -o $@
@@ -41,8 +42,15 @@ $(ASM_TESTS): LDFLAGS += -Wl,-T$(LINK_SCRIPT) -Wl,--build-id=none
$(ASM_TESTS): $(LINK_SCRIPT)
TESTS += $(ASM_TESTS)
+MULTIARCH_TESTS += mvc-smc
S390X_MULTIARCH_RUNTIME_OBJS = head64.o console.o $(MINILIB_OBJS)
$(MULTIARCH_TESTS): $(S390X_MULTIARCH_RUNTIME_OBJS)
$(MULTIARCH_TESTS): LDFLAGS += $(S390X_MULTIARCH_RUNTIME_OBJS)
-$(MULTIARCH_TESTS): CFLAGS += $(MINILIB_INC)
+$(MULTIARCH_TESTS): CFLAGS += $(MINILIB_INC) \
+ -I$(SRC_PATH)/roms/SLOF/lib/libc/include/
memory: CFLAGS += -DCHECK_UNALIGNED=0
+
+# s390x clears the BSS section so we need to account for that
+run-plugin-memory-with-libmem.so: \
+ CHECK_PLUGIN_OUTPUT_COMMAND=$(MULTIARCH_SYSTEM_SRC)/validate-memory-counts.py \
+ --bss-cleared $@.out
diff --git a/tests/tcg/s390x/Makefile.target b/tests/tcg/s390x/Makefile.target
index a8f86c9..da5fe71 100644
--- a/tests/tcg/s390x/Makefile.target
+++ b/tests/tcg/s390x/Makefile.target
@@ -48,6 +48,7 @@ TESTS+=lae
TESTS+=cvd
TESTS+=cvb
TESTS+=ts
+TESTS+=ex-smc
cdsg: CFLAGS+=-pthread
cdsg: LDFLAGS+=-pthread
@@ -73,8 +74,11 @@ $(Z13_TESTS): CFLAGS+=-march=z13 -O2
TESTS+=$(Z13_TESTS)
ifneq ($(CROSS_CC_HAS_Z14),)
-Z14_TESTS=vfminmax
+Z14_TESTS=fma vfminmax
+fma: float.h
+fma: LDFLAGS+=-lm
vfminmax: LDFLAGS+=-lm
+vfminmax: float.h
$(Z14_TESTS): CFLAGS+=-march=z14 -O2
TESTS+=$(Z14_TESTS)
endif
diff --git a/tests/tcg/s390x/console.c b/tests/tcg/s390x/console.c
index d43ce3f..6c26f04 100644
--- a/tests/tcg/s390x/console.c
+++ b/tests/tcg/s390x/console.c
@@ -4,7 +4,10 @@
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
+
#include "../../../pc-bios/s390-ccw/sclp.c"
+#include "../../../roms/SLOF/lib/libc/string/memset.c"
+#include "../../../roms/SLOF/lib/libc/string/memcpy.c"
void __sys_outc(char c)
{
diff --git a/tests/tcg/s390x/ex-smc.c b/tests/tcg/s390x/ex-smc.c
new file mode 100644
index 0000000..f403640
--- /dev/null
+++ b/tests/tcg/s390x/ex-smc.c
@@ -0,0 +1,57 @@
+/*
+ * Test modifying an EXECUTE target.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <assert.h>
+#include <stdlib.h>
+
+/* Make sure we exercise the same EXECUTE instruction. */
+extern void execute(unsigned char *insn, unsigned char mask,
+ unsigned long *r1_r5);
+asm(".globl execute\n"
+ "execute:\n"
+ "lg %r1,0(%r4)\n"
+ "lg %r5,8(%r4)\n"
+ "ex %r3,0(%r2)\n"
+ "stg %r5,8(%r4)\n"
+ "stg %r1,0(%r4)\n"
+ "br %r14\n");
+
+/* Define an RWX EXECUTE target. */
+extern unsigned char lgfi[];
+asm(".pushsection .rwx,\"awx\",@progbits\n"
+ ".globl lgfi\n"
+ "lgfi: lgfi %r0,0\n"
+ ".popsection\n");
+
+int main(void)
+{
+ unsigned long r1_r5[2];
+
+ /* Create an initial TB. */
+ r1_r5[0] = -1;
+ r1_r5[1] = -1;
+ execute(lgfi, 1 << 4, r1_r5);
+ assert(r1_r5[0] == 0);
+ assert(r1_r5[1] == -1);
+
+ /* Test changing the mask. */
+ execute(lgfi, 5 << 4, r1_r5);
+ assert(r1_r5[0] == 0);
+ assert(r1_r5[1] == 0);
+
+ /* Test changing the target. */
+ lgfi[5] = 42;
+ execute(lgfi, 5 << 4, r1_r5);
+ assert(r1_r5[0] == 0);
+ assert(r1_r5[1] == 42);
+
+ /* Test changing both the mask and the target. */
+ lgfi[5] = 24;
+ execute(lgfi, 1 << 4, r1_r5);
+ assert(r1_r5[0] == 24);
+ assert(r1_r5[1] == 42);
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/tcg/s390x/float.h b/tests/tcg/s390x/float.h
new file mode 100644
index 0000000..9d1682b
--- /dev/null
+++ b/tests/tcg/s390x/float.h
@@ -0,0 +1,104 @@
+/*
+ * Helpers for floating-point tests.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef FLOAT_H
+#define FLOAT_H
+
+/*
+ * Floating-point value classes.
+ */
+#define N_FORMATS 3
+#define CLASS_MINUS_INF 0
+#define CLASS_MINUS_FN 1
+#define CLASS_MINUS_ZERO 2
+#define CLASS_PLUS_ZERO 3
+#define CLASS_PLUS_FN 4
+#define CLASS_PLUS_INF 5
+#define CLASS_QNAN 6
+#define CLASS_SNAN 7
+#define N_SIGNED_CLASSES 8
+static const size_t float_sizes[N_FORMATS] = {
+ /* M4 == 2: short */ 4,
+ /* M4 == 3: long */ 8,
+ /* M4 == 4: extended */ 16,
+};
+static const size_t e_bits[N_FORMATS] = {
+ /* M4 == 2: short */ 8,
+ /* M4 == 3: long */ 11,
+ /* M4 == 4: extended */ 15,
+};
+struct float_class {
+ size_t n;
+ unsigned char v[2][16];
+};
+static const struct float_class signed_floats[N_FORMATS][N_SIGNED_CLASSES] = {
+ /* M4 == 2: short */
+ {
+ /* -inf */ {1, {{0xff, 0x80, 0x00, 0x00}}},
+ /* -Fn */ {2, {{0xc2, 0x28, 0x00, 0x00},
+ {0xc2, 0x29, 0x00, 0x00}}},
+ /* -0 */ {1, {{0x80, 0x00, 0x00, 0x00}}},
+ /* +0 */ {1, {{0x00, 0x00, 0x00, 0x00}}},
+ /* +Fn */ {2, {{0x42, 0x28, 0x00, 0x00},
+ {0x42, 0x2a, 0x00, 0x00}}},
+ /* +inf */ {1, {{0x7f, 0x80, 0x00, 0x00}}},
+ /* QNaN */ {2, {{0x7f, 0xff, 0xff, 0xff},
+ {0x7f, 0xff, 0xff, 0xfe}}},
+ /* SNaN */ {2, {{0x7f, 0xbf, 0xff, 0xff},
+ {0x7f, 0xbf, 0xff, 0xfd}}},
+ },
+
+ /* M4 == 3: long */
+ {
+ /* -inf */ {1, {{0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}},
+ /* -Fn */ {2, {{0xc0, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0xc0, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}},
+ /* -0 */ {1, {{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}},
+ /* +0 */ {1, {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}},
+ /* +Fn */ {2, {{0x40, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x40, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}},
+ /* +inf */ {1, {{0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}},
+ /* QNaN */ {2, {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}}},
+ /* SNaN */ {2, {{0x7f, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0x7f, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd}}},
+ },
+
+ /* M4 == 4: extended */
+ {
+ /* -inf */ {1, {{0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}},
+ /* -Fn */ {2, {{0xc0, 0x04, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0xc0, 0x04, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}},
+ /* -0 */ {1, {{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}},
+ /* +0 */ {1, {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}},
+ /* +Fn */ {2, {{0x40, 0x04, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x40, 0x04, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}},
+ /* +inf */ {1, {{0x7f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}},
+ /* QNaN */ {2, {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}}},
+ /* SNaN */ {2, {{0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd}}},
+ },
+};
+static const unsigned char default_nans[N_FORMATS][16] = {
+ /* M4 == 2: short */ {0x7f, 0xc0, 0x00, 0x00},
+ /* M4 == 3: long */ {0x7f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ /* M4 == 4: extended */ {0x7f, 0xff, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+};
+
+static void dump_v(FILE *f, const void *v, size_t n)
+{
+ for (int i = 0; i < n; i++) {
+ fprintf(f, "%02x", ((const unsigned char *)v)[i]);
+ }
+}
+
+static void snan_to_qnan(char *v, int fmt)
+{
+ size_t bit = 1 + e_bits[fmt];
+ v[bit / 8] |= 1 << (7 - (bit % 8));
+}
+
+#endif
diff --git a/tests/tcg/s390x/fma.c b/tests/tcg/s390x/fma.c
new file mode 100644
index 0000000..6872f59
--- /dev/null
+++ b/tests/tcg/s390x/fma.c
@@ -0,0 +1,233 @@
+/*
+ * Test floating-point multiply-and-add instructions.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <fenv.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "float.h"
+
+union val {
+ float e;
+ double d;
+ long double x;
+ char buf[16];
+};
+
+/*
+ * PoP tables as close to the original as possible.
+ */
+static const char *table1[N_SIGNED_CLASSES][N_SIGNED_CLASSES] = {
+ /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */
+ {/* -inf */ "P(+inf)", "P(+inf)", "Xi: T(dNaN)", "Xi: T(dNaN)", "P(-inf)", "P(-inf)", "P(b)", "Xi: T(b*)"},
+ {/* -Fn */ "P(+inf)", "P(a*b)", "P(+0)", "P(-0)", "P(a*b)", "P(-inf)", "P(b)", "Xi: T(b*)"},
+ {/* -0 */ "Xi: T(dNaN)", "P(+0)", "P(+0)", "P(-0)", "P(-0)", "Xi: T(dNaN)", "P(b)", "Xi: T(b*)"},
+ {/* +0 */ "Xi: T(dNaN)", "P(-0)", "P(-0)", "P(+0)", "P(+0)", "Xi: T(dNaN)", "P(b)", "Xi: T(b*)"},
+ {/* +Fn */ "P(-inf)", "P(a*b)", "P(-0)", "P(+0)", "P(a*b)", "P(+inf)", "P(b)", "Xi: T(b*)"},
+ {/* +inf */ "P(-inf)", "P(-inf)", "Xi: T(dNaN)", "Xi: T(dNaN)", "P(+inf)", "P(+inf)", "P(b)", "Xi: T(b*)"},
+ {/* QNaN */ "P(a)", "P(a)", "P(a)", "P(a)", "P(a)", "P(a)", "P(a)", "Xi: T(b*)"},
+ {/* SNaN */ "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)"},
+};
+
+static const char *table2[N_SIGNED_CLASSES][N_SIGNED_CLASSES] = {
+ /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */
+ {/* -inf */ "T(-inf)", "T(-inf)", "T(-inf)", "T(-inf)", "T(-inf)", "Xi: T(dNaN)", "T(c)", "Xi: T(c*)"},
+ {/* -Fn */ "T(-inf)", "R(p+c)", "R(p)", "R(p)", "R(p+c)", "T(+inf)", "T(c)", "Xi: T(c*)"},
+ {/* -0 */ "T(-inf)", "R(c)", "T(-0)", "Rezd", "R(c)", "T(+inf)", "T(c)", "Xi: T(c*)"},
+ {/* +0 */ "T(-inf)", "R(c)", "Rezd", "T(+0)", "R(c)", "T(+inf)", "T(c)", "Xi: T(c*)"},
+ {/* +Fn */ "T(-inf)", "R(p+c)", "R(p)", "R(p)", "R(p+c)", "T(+inf)", "T(c)", "Xi: T(c*)"},
+ {/* +inf */ "Xi: T(dNaN)", "T(+inf)", "T(+inf)", "T(+inf)", "T(+inf)", "T(+inf)", "T(c)", "Xi: T(c*)"},
+ {/* QNaN */ "T(p)", "T(p)", "T(p)", "T(p)", "T(p)", "T(p)", "T(p)", "Xi: T(c*)"},
+ /* SNaN: can't happen */
+};
+
+static void interpret_tables(union val *r, bool *xi, int fmt,
+ int cls_a, const union val *a,
+ int cls_b, const union val *b,
+ int cls_c, const union val *c)
+{
+ const char *spec1 = table1[cls_a][cls_b];
+ const char *spec2;
+ union val p;
+ int cls_p;
+
+ *xi = false;
+
+ if (strcmp(spec1, "P(-inf)") == 0) {
+ cls_p = CLASS_MINUS_INF;
+ } else if (strcmp(spec1, "P(+inf)") == 0) {
+ cls_p = CLASS_PLUS_INF;
+ } else if (strcmp(spec1, "P(-0)") == 0) {
+ cls_p = CLASS_MINUS_ZERO;
+ } else if (strcmp(spec1, "P(+0)") == 0) {
+ cls_p = CLASS_PLUS_ZERO;
+ } else if (strcmp(spec1, "P(a)") == 0) {
+ cls_p = cls_a;
+ memcpy(&p, a, sizeof(p));
+ } else if (strcmp(spec1, "P(b)") == 0) {
+ cls_p = cls_b;
+ memcpy(&p, b, sizeof(p));
+ } else if (strcmp(spec1, "P(a*b)") == 0) {
+ /*
+ * In the general case splitting fma into multiplication and addition
+ * doesn't work, but this is the case with our test inputs.
+ */
+ cls_p = cls_a == cls_b ? CLASS_PLUS_FN : CLASS_MINUS_FN;
+ switch (fmt) {
+ case 0:
+ p.e = a->e * b->e;
+ break;
+ case 1:
+ p.d = a->d * b->d;
+ break;
+ case 2:
+ p.x = a->x * b->x;
+ break;
+ default:
+ fprintf(stderr, "Unsupported fmt: %d\n", fmt);
+ exit(1);
+ }
+ } else if (strcmp(spec1, "Xi: T(dNaN)") == 0) {
+ memcpy(r, default_nans[fmt], sizeof(*r));
+ *xi = true;
+ return;
+ } else if (strcmp(spec1, "Xi: T(a*)") == 0) {
+ memcpy(r, a, sizeof(*r));
+ snan_to_qnan(r->buf, fmt);
+ *xi = true;
+ return;
+ } else if (strcmp(spec1, "Xi: T(b*)") == 0) {
+ memcpy(r, b, sizeof(*r));
+ snan_to_qnan(r->buf, fmt);
+ *xi = true;
+ return;
+ } else {
+ fprintf(stderr, "Unsupported spec1: %s\n", spec1);
+ exit(1);
+ }
+
+ spec2 = table2[cls_p][cls_c];
+ if (strcmp(spec2, "T(-inf)") == 0) {
+ memcpy(r, signed_floats[fmt][CLASS_MINUS_INF].v[0], sizeof(*r));
+ } else if (strcmp(spec2, "T(+inf)") == 0) {
+ memcpy(r, signed_floats[fmt][CLASS_PLUS_INF].v[0], sizeof(*r));
+ } else if (strcmp(spec2, "T(-0)") == 0) {
+ memcpy(r, signed_floats[fmt][CLASS_MINUS_ZERO].v[0], sizeof(*r));
+ } else if (strcmp(spec2, "T(+0)") == 0 || strcmp(spec2, "Rezd") == 0) {
+ memcpy(r, signed_floats[fmt][CLASS_PLUS_ZERO].v[0], sizeof(*r));
+ } else if (strcmp(spec2, "R(c)") == 0 || strcmp(spec2, "T(c)") == 0) {
+ memcpy(r, c, sizeof(*r));
+ } else if (strcmp(spec2, "R(p)") == 0 || strcmp(spec2, "T(p)") == 0) {
+ memcpy(r, &p, sizeof(*r));
+ } else if (strcmp(spec2, "R(p+c)") == 0 || strcmp(spec2, "T(p+c)") == 0) {
+ switch (fmt) {
+ case 0:
+ r->e = p.e + c->e;
+ break;
+ case 1:
+ r->d = p.d + c->d;
+ break;
+ case 2:
+ r->x = p.x + c->x;
+ break;
+ default:
+ fprintf(stderr, "Unsupported fmt: %d\n", fmt);
+ exit(1);
+ }
+ } else if (strcmp(spec2, "Xi: T(dNaN)") == 0) {
+ memcpy(r, default_nans[fmt], sizeof(*r));
+ *xi = true;
+ } else if (strcmp(spec2, "Xi: T(c*)") == 0) {
+ memcpy(r, c, sizeof(*r));
+ snan_to_qnan(r->buf, fmt);
+ *xi = true;
+ } else {
+ fprintf(stderr, "Unsupported spec2: %s\n", spec2);
+ exit(1);
+ }
+}
+
+struct iter {
+ int fmt;
+ int cls[3];
+ int val[3];
+};
+
+static bool iter_next(struct iter *it)
+{
+ int i;
+
+ for (i = 2; i >= 0; i--) {
+ if (++it->val[i] != signed_floats[it->fmt][it->cls[i]].n) {
+ return true;
+ }
+ it->val[i] = 0;
+
+ if (++it->cls[i] != N_SIGNED_CLASSES) {
+ return true;
+ }
+ it->cls[i] = 0;
+ }
+
+ return ++it->fmt != N_FORMATS;
+}
+
+int main(void)
+{
+ int ret = EXIT_SUCCESS;
+ struct iter it = {};
+
+ do {
+ size_t n = float_sizes[it.fmt];
+ union val a, b, c, exp, res;
+ bool xi_exp, xi;
+
+ memcpy(&a, signed_floats[it.fmt][it.cls[0]].v[it.val[0]], sizeof(a));
+ memcpy(&b, signed_floats[it.fmt][it.cls[1]].v[it.val[1]], sizeof(b));
+ memcpy(&c, signed_floats[it.fmt][it.cls[2]].v[it.val[2]], sizeof(c));
+
+ interpret_tables(&exp, &xi_exp, it.fmt,
+ it.cls[1], &b, it.cls[2], &c, it.cls[0], &a);
+
+ memcpy(&res, &a, sizeof(res));
+ feclearexcept(FE_ALL_EXCEPT);
+ switch (it.fmt) {
+ case 0:
+ asm("maebr %[a],%[b],%[c]"
+ : [a] "+f" (res.e) : [b] "f" (b.e), [c] "f" (c.e));
+ break;
+ case 1:
+ asm("madbr %[a],%[b],%[c]"
+ : [a] "+f" (res.d) : [b] "f" (b.d), [c] "f" (c.d));
+ break;
+ case 2:
+ asm("wfmaxb %[a],%[c],%[b],%[a]"
+ : [a] "+v" (res.x) : [b] "v" (b.x), [c] "v" (c.x));
+ break;
+ default:
+ fprintf(stderr, "Unsupported fmt: %d\n", it.fmt);
+ exit(1);
+ }
+ xi = fetestexcept(FE_ALL_EXCEPT) == FE_INVALID;
+
+ if (memcmp(&res, &exp, n) != 0 || xi != xi_exp) {
+ fprintf(stderr, "[ FAILED ] ");
+ dump_v(stderr, &b, n);
+ fprintf(stderr, " * ");
+ dump_v(stderr, &c, n);
+ fprintf(stderr, " + ");
+ dump_v(stderr, &a, n);
+ fprintf(stderr, ": actual=");
+ dump_v(stderr, &res, n);
+ fprintf(stderr, "/%d, expected=", (int)xi);
+ dump_v(stderr, &exp, n);
+ fprintf(stderr, "/%d\n", (int)xi_exp);
+ ret = EXIT_FAILURE;
+ }
+ } while (iter_next(&it));
+
+ return ret;
+}
diff --git a/tests/tcg/s390x/mvc-smc.c b/tests/tcg/s390x/mvc-smc.c
new file mode 100644
index 0000000..d68f60c
--- /dev/null
+++ b/tests/tcg/s390x/mvc-smc.c
@@ -0,0 +1,82 @@
+/*
+ * Test modifying code using the MVC instruction.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <minilib.h>
+
+#define PAGE_SIZE 4096
+#define BR_14_SIZE 2
+#define RWX_OFFSET 2
+
+static unsigned char rw[PAGE_SIZE + BR_14_SIZE];
+static unsigned char rwx[RWX_OFFSET + sizeof(rw)]
+ __attribute__((aligned(PAGE_SIZE)));
+
+typedef unsigned long (*function_t)(unsigned long);
+
+static int emit_function(unsigned char *p, int n)
+{
+ int i = 0, val = 0;
+
+ while (i < n - 2) {
+ /* aghi %r2,1 */
+ p[i++] = 0xa7;
+ p[i++] = 0x2b;
+ p[i++] = 0x00;
+ p[i++] = 0x01;
+ val++;
+ }
+
+ /* br %r14 */
+ p[i++] = 0x07;
+ p[i++] = 0xfe;
+
+ return val;
+}
+
+static void memcpy_mvc(void *dest, void *src, unsigned long n)
+{
+ while (n >= 256) {
+ asm("mvc 0(256,%[dest]),0(%[src])"
+ :
+ : [dest] "a" (dest)
+ , [src] "a" (src)
+ : "memory");
+ dest += 256;
+ src += 256;
+ n -= 256;
+ }
+ asm("exrl %[n],0f\n"
+ "j 1f\n"
+ "0: mvc 0(1,%[dest]),0(%[src])\n"
+ "1:"
+ :
+ : [dest] "a" (dest)
+ , [src] "a" (src)
+ , [n] "a" (n)
+ : "memory");
+}
+
+int main(void)
+{
+ int expected, size;
+
+ /* Create a TB. */
+ size = sizeof(rwx) - RWX_OFFSET - 4;
+ expected = emit_function(rwx + RWX_OFFSET, size);
+ if (((function_t)(rwx + RWX_OFFSET))(0) != expected) {
+ return 1;
+ }
+
+ /* Overwrite the TB. */
+ size += 4;
+ expected = emit_function(rw, size);
+ memcpy_mvc(rwx + RWX_OFFSET, rw, size);
+ if (((function_t)(rwx + RWX_OFFSET))(0) != expected) {
+ return 2;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/s390x/vfminmax.c b/tests/tcg/s390x/vfminmax.c
index 22629df..e66285f 100644
--- a/tests/tcg/s390x/vfminmax.c
+++ b/tests/tcg/s390x/vfminmax.c
@@ -4,6 +4,8 @@
#include <stdio.h>
#include <string.h>
+#include "float.h"
+
/*
* vfmin/vfmax instruction execution.
*/
@@ -21,99 +23,22 @@ static void vfminmax(unsigned int op,
unsigned int m4, unsigned int m5, unsigned int m6,
void *v1, const void *v2, const void *v3)
{
- insn[3] = (m6 << 4) | m5;
- insn[4] = (m4 << 4) | 0x0e;
- insn[5] = op;
+ insn[3] = (m6 << 4) | m5;
+ insn[4] = (m4 << 4) | 0x0e;
+ insn[5] = op;
asm("vl %%v25,%[v2]\n"
"vl %%v26,%[v3]\n"
"ex 0,%[insn]\n"
"vst %%v24,%[v1]\n"
: [v1] "=m" (*(char (*)[16])v1)
- : [v2] "m" (*(char (*)[16])v2)
- , [v3] "m" (*(char (*)[16])v3)
- , [insn] "m"(insn)
+ : [v2] "m" (*(const char (*)[16])v2)
+ , [v3] "m" (*(const char (*)[16])v3)
+ , [insn] "m" (insn)
: "v24", "v25", "v26");
}
/*
- * Floating-point value classes.
- */
-#define N_FORMATS 3
-#define N_SIGNED_CLASSES 8
-static const size_t float_sizes[N_FORMATS] = {
- /* M4 == 2: short */ 4,
- /* M4 == 3: long */ 8,
- /* M4 == 4: extended */ 16,
-};
-static const size_t e_bits[N_FORMATS] = {
- /* M4 == 2: short */ 8,
- /* M4 == 3: long */ 11,
- /* M4 == 4: extended */ 15,
-};
-static const unsigned char signed_floats[N_FORMATS][N_SIGNED_CLASSES][2][16] = {
- /* M4 == 2: short */
- {
- /* -inf */ {{0xff, 0x80, 0x00, 0x00},
- {0xff, 0x80, 0x00, 0x00}},
- /* -Fn */ {{0xc2, 0x28, 0x00, 0x00},
- {0xc2, 0x29, 0x00, 0x00}},
- /* -0 */ {{0x80, 0x00, 0x00, 0x00},
- {0x80, 0x00, 0x00, 0x00}},
- /* +0 */ {{0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00}},
- /* +Fn */ {{0x42, 0x28, 0x00, 0x00},
- {0x42, 0x2a, 0x00, 0x00}},
- /* +inf */ {{0x7f, 0x80, 0x00, 0x00},
- {0x7f, 0x80, 0x00, 0x00}},
- /* QNaN */ {{0x7f, 0xff, 0xff, 0xff},
- {0x7f, 0xff, 0xff, 0xfe}},
- /* SNaN */ {{0x7f, 0xbf, 0xff, 0xff},
- {0x7f, 0xbf, 0xff, 0xfd}},
- },
-
- /* M4 == 3: long */
- {
- /* -inf */ {{0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
- /* -Fn */ {{0xc0, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0xc0, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
- /* -0 */ {{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
- /* +0 */ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
- /* +Fn */ {{0x40, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x40, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
- /* +inf */ {{0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
- /* QNaN */ {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
- {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}},
- /* SNaN */ {{0x7f, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
- {0x7f, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd}},
- },
-
- /* M4 == 4: extended */
- {
- /* -inf */ {{0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
- /* -Fn */ {{0xc0, 0x04, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0xc0, 0x04, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
- /* -0 */ {{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
- /* +0 */ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
- /* +Fn */ {{0x40, 0x04, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x40, 0x04, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
- /* +inf */ {{0x7f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x7f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
- /* QNaN */ {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
- {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}},
- /* SNaN */ {{0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
- {0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd}},
- },
-};
-
-/*
* PoP tables as close to the original as possible.
*/
struct signed_test {
@@ -285,13 +210,6 @@ struct signed_test {
},
};
-static void dump_v(FILE *f, const void *v, size_t n)
-{
- for (int i = 0; i < n; i++) {
- fprintf(f, "%02x", ((const unsigned char *)v)[i]);
- }
-}
-
static int signed_test(struct signed_test *test, int m4, int m5,
const void *v1_exp, bool xi_exp,
const void *v2, const void *v3)
@@ -320,10 +238,28 @@ static int signed_test(struct signed_test *test, int m4, int m5,
return 0;
}
-static void snan_to_qnan(char *v, int m4)
+struct iter {
+ int cls[2];
+ int val[2];
+};
+
+static bool iter_next(struct iter *it, int fmt)
{
- size_t bit = 1 + e_bits[m4 - 2];
- v[bit / 8] |= 1 << (7 - (bit % 8));
+ int i;
+
+ for (i = 1; i >= 0; i--) {
+ if (++it->val[i] != signed_floats[fmt][it->cls[i]].n) {
+ return true;
+ }
+ it->val[i] = 0;
+
+ if (++it->cls[i] != N_SIGNED_CLASSES) {
+ return true;
+ }
+ it->cls[i] = 0;
+ }
+
+ return false;
}
int main(void)
@@ -333,72 +269,71 @@ int main(void)
for (i = 0; i < sizeof(signed_tests) / sizeof(signed_tests[0]); i++) {
struct signed_test *test = &signed_tests[i];
- int m4;
+ int fmt;
- for (m4 = 2; m4 <= 4; m4++) {
- const unsigned char (*floats)[2][16] = signed_floats[m4 - 2];
- size_t float_size = float_sizes[m4 - 2];
+ for (fmt = 0; fmt < N_FORMATS; fmt++) {
+ size_t float_size = float_sizes[fmt];
+ int m4 = fmt + 2;
int m5;
for (m5 = 0; m5 <= 8; m5 += 8) {
char v1_exp[16], v2[16], v3[16];
bool xi_exp = false;
+ struct iter it = {};
int pos = 0;
- int i2;
- for (i2 = 0; i2 < N_SIGNED_CLASSES * 2; i2++) {
- int i3;
+ do {
+ const char *spec = test->table[it.cls[0]][it.cls[1]];
- for (i3 = 0; i3 < N_SIGNED_CLASSES * 2; i3++) {
- const char *spec = test->table[i2 / 2][i3 / 2];
+ memcpy(&v2[pos],
+ signed_floats[fmt][it.cls[0]].v[it.val[0]],
+ float_size);
+ memcpy(&v3[pos],
+ signed_floats[fmt][it.cls[1]].v[it.val[1]],
+ float_size);
+ if (strcmp(spec, "T(a)") == 0 ||
+ strcmp(spec, "Xi: T(a)") == 0) {
+ memcpy(&v1_exp[pos], &v2[pos], float_size);
+ } else if (strcmp(spec, "T(b)") == 0 ||
+ strcmp(spec, "Xi: T(b)") == 0) {
+ memcpy(&v1_exp[pos], &v3[pos], float_size);
+ } else if (strcmp(spec, "Xi: T(a*)") == 0) {
+ memcpy(&v1_exp[pos], &v2[pos], float_size);
+ snan_to_qnan(&v1_exp[pos], fmt);
+ } else if (strcmp(spec, "Xi: T(b*)") == 0) {
+ memcpy(&v1_exp[pos], &v3[pos], float_size);
+ snan_to_qnan(&v1_exp[pos], fmt);
+ } else if (strcmp(spec, "T(M(a,b))") == 0) {
+ /*
+ * Comparing floats is risky, since the compiler might
+ * generate the same instruction that we are testing.
+ * Compare ints instead. This works, because we get
+ * here only for +-Fn, and the corresponding test
+ * values have identical exponents.
+ */
+ int v2_int = *(int *)&v2[pos];
+ int v3_int = *(int *)&v3[pos];
- memcpy(&v2[pos], floats[i2 / 2][i2 % 2], float_size);
- memcpy(&v3[pos], floats[i3 / 2][i3 % 2], float_size);
- if (strcmp(spec, "T(a)") == 0 ||
- strcmp(spec, "Xi: T(a)") == 0) {
+ if ((v2_int < v3_int) ==
+ ((test->op == VFMIN) != (v2_int < 0))) {
memcpy(&v1_exp[pos], &v2[pos], float_size);
- } else if (strcmp(spec, "T(b)") == 0 ||
- strcmp(spec, "Xi: T(b)") == 0) {
- memcpy(&v1_exp[pos], &v3[pos], float_size);
- } else if (strcmp(spec, "Xi: T(a*)") == 0) {
- memcpy(&v1_exp[pos], &v2[pos], float_size);
- snan_to_qnan(&v1_exp[pos], m4);
- } else if (strcmp(spec, "Xi: T(b*)") == 0) {
- memcpy(&v1_exp[pos], &v3[pos], float_size);
- snan_to_qnan(&v1_exp[pos], m4);
- } else if (strcmp(spec, "T(M(a,b))") == 0) {
- /*
- * Comparing floats is risky, since the compiler
- * might generate the same instruction that we are
- * testing. Compare ints instead. This works,
- * because we get here only for +-Fn, and the
- * corresponding test values have identical
- * exponents.
- */
- int v2_int = *(int *)&v2[pos];
- int v3_int = *(int *)&v3[pos];
-
- if ((v2_int < v3_int) ==
- ((test->op == VFMIN) != (v2_int < 0))) {
- memcpy(&v1_exp[pos], &v2[pos], float_size);
- } else {
- memcpy(&v1_exp[pos], &v3[pos], float_size);
- }
} else {
- fprintf(stderr, "Unexpected spec: %s\n", spec);
- return 1;
+ memcpy(&v1_exp[pos], &v3[pos], float_size);
}
- xi_exp |= spec[0] == 'X';
- pos += float_size;
+ } else {
+ fprintf(stderr, "Unexpected spec: %s\n", spec);
+ return 1;
+ }
+ xi_exp |= spec[0] == 'X';
+ pos += float_size;
- if ((m5 & 8) || pos == 16) {
- ret |= signed_test(test, m4, m5,
- v1_exp, xi_exp, v2, v3);
- pos = 0;
- xi_exp = false;
- }
+ if ((m5 & 8) || pos == 16) {
+ ret |= signed_test(test, m4, m5,
+ v1_exp, xi_exp, v2, v3);
+ pos = 0;
+ xi_exp = false;
}
- }
+ } while (iter_next(&it, fmt));
if (pos != 0) {
ret |= signed_test(test, m4, m5, v1_exp, xi_exp, v2, v3);
diff --git a/tests/tcg/x86_64/Makefile.softmmu-target b/tests/tcg/x86_64/Makefile.softmmu-target
index 1bd763f..ef6bcb4 100644
--- a/tests/tcg/x86_64/Makefile.softmmu-target
+++ b/tests/tcg/x86_64/Makefile.softmmu-target
@@ -25,7 +25,7 @@ EXTRA_RUNS+=$(MULTIARCH_RUNS)
.PRECIOUS: $(CRT_OBJS)
%.o: $(CRT_PATH)/%.S
- $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -c $< -o $@
+ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -Wa,--noexecstack -c $< -o $@
# Build and link the tests
%: %.c $(LINK_SCRIPT) $(CRT_OBJS) $(MINILIB_OBJS)
diff --git a/tests/tcg/x86_64/Makefile.target b/tests/tcg/x86_64/Makefile.target
index eda9bd7..be20fc6 100644
--- a/tests/tcg/x86_64/Makefile.target
+++ b/tests/tcg/x86_64/Makefile.target
@@ -16,6 +16,9 @@ X86_64_TESTS += noexec
X86_64_TESTS += cmpxchg
X86_64_TESTS += adox
X86_64_TESTS += test-1648
+X86_64_TESTS += test-2175
+X86_64_TESTS += cross-modifying-code
+X86_64_TESTS += fma
TESTS=$(MULTIARCH_TESTS) $(X86_64_TESTS) test-x86_64
else
TESTS=$(MULTIARCH_TESTS)
@@ -26,6 +29,9 @@ adox: CFLAGS=-O2
run-test-i386-ssse3: QEMU_OPTS += -cpu max
run-plugin-test-i386-ssse3-%: QEMU_OPTS += -cpu max
+cross-modifying-code: CFLAGS+=-pthread
+cross-modifying-code: LDFLAGS+=-pthread
+
test-x86_64: LDFLAGS+=-lm -lc
test-x86_64: test-i386.c test-i386.h test-i386-shift.h test-i386-muldiv.h
$(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
diff --git a/tests/tcg/x86_64/cross-modifying-code.c b/tests/tcg/x86_64/cross-modifying-code.c
new file mode 100644
index 0000000..2704df6
--- /dev/null
+++ b/tests/tcg/x86_64/cross-modifying-code.c
@@ -0,0 +1,80 @@
+/*
+ * Test patching code, running in one thread, from another thread.
+ *
+ * Intel SDM calls this "cross-modifying code" and recommends a special
+ * sequence, which requires both threads to cooperate.
+ *
+ * Linux kernel uses a different sequence that does not require cooperation and
+ * involves patching the first byte with int3.
+ *
+ * Finally, there is user-mode software out there that simply uses atomics, and
+ * that seems to be good enough in practice. Test that QEMU has no problems
+ * with this as well.
+ */
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdlib.h>
+
+void add1_or_nop(long *x);
+asm(".pushsection .rwx,\"awx\",@progbits\n"
+ ".globl add1_or_nop\n"
+ /* addq $0x1,(%rdi) */
+ "add1_or_nop: .byte 0x48, 0x83, 0x07, 0x01\n"
+ "ret\n"
+ ".popsection\n");
+
+#define THREAD_WAIT 0
+#define THREAD_PATCH 1
+#define THREAD_STOP 2
+
+static void *thread_func(void *arg)
+{
+ int val = 0x0026748d; /* nop */
+
+ while (true) {
+ switch (__atomic_load_n((int *)arg, __ATOMIC_SEQ_CST)) {
+ case THREAD_WAIT:
+ break;
+ case THREAD_PATCH:
+ val = __atomic_exchange_n((int *)&add1_or_nop, val,
+ __ATOMIC_SEQ_CST);
+ break;
+ case THREAD_STOP:
+ return NULL;
+ default:
+ assert(false);
+ __builtin_unreachable();
+ }
+ }
+}
+
+#define INITIAL 42
+#define COUNT 1000000
+
+int main(void)
+{
+ int command = THREAD_WAIT;
+ pthread_t thread;
+ long x = 0;
+ int err;
+ int i;
+
+ err = pthread_create(&thread, NULL, &thread_func, &command);
+ assert(err == 0);
+
+ __atomic_store_n(&command, THREAD_PATCH, __ATOMIC_SEQ_CST);
+ for (i = 0; i < COUNT; i++) {
+ add1_or_nop(&x);
+ }
+ __atomic_store_n(&command, THREAD_STOP, __ATOMIC_SEQ_CST);
+
+ err = pthread_join(thread, NULL);
+ assert(err == 0);
+
+ assert(x >= INITIAL);
+ assert(x <= INITIAL + COUNT);
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/tcg/x86_64/fma.c b/tests/tcg/x86_64/fma.c
new file mode 100644
index 0000000..3421961
--- /dev/null
+++ b/tests/tcg/x86_64/fma.c
@@ -0,0 +1,116 @@
+/*
+ * Test some fused multiply add corner cases.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+/*
+ * Perform one "n * m + a" operation using the vfmadd insn and return
+ * the result; on return *mxcsr_p is set to the bottom 6 bits of MXCSR
+ * (the Flag bits). If ftz is true then we set MXCSR.FTZ while doing
+ * the operation.
+ * We print the operation and its results to stdout.
+ */
+static uint64_t do_fmadd(uint64_t n, uint64_t m, uint64_t a,
+ bool ftz, uint32_t *mxcsr_p)
+{
+ uint64_t r;
+ uint32_t mxcsr = 0;
+ uint32_t ftz_bit = ftz ? (1 << 15) : 0;
+ uint32_t saved_mxcsr = 0;
+
+ asm volatile("stmxcsr %[saved_mxcsr]\n"
+ "stmxcsr %[mxcsr]\n"
+ "andl $0xffff7fc0, %[mxcsr]\n"
+ "orl %[ftz_bit], %[mxcsr]\n"
+ "ldmxcsr %[mxcsr]\n"
+ "movq %[a], %%xmm0\n"
+ "movq %[m], %%xmm1\n"
+ "movq %[n], %%xmm2\n"
+ /* xmm0 = xmm0 + xmm2 * xmm1 */
+ "vfmadd231sd %%xmm1, %%xmm2, %%xmm0\n"
+ "movq %%xmm0, %[r]\n"
+ "stmxcsr %[mxcsr]\n"
+ "ldmxcsr %[saved_mxcsr]\n"
+ : [r] "=r" (r), [mxcsr] "=m" (mxcsr),
+ [saved_mxcsr] "=m" (saved_mxcsr)
+ : [n] "r" (n), [m] "r" (m), [a] "r" (a),
+ [ftz_bit] "r" (ftz_bit)
+ : "xmm0", "xmm1", "xmm2");
+ *mxcsr_p = mxcsr & 0x3f;
+ printf("vfmadd132sd 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
+ " = 0x%" PRIx64 " MXCSR flags 0x%" PRIx32 "\n",
+ n, m, a, r, *mxcsr_p);
+ return r;
+}
+
+typedef struct testdata {
+ /* Input n, m, a */
+ uint64_t n;
+ uint64_t m;
+ uint64_t a;
+ bool ftz;
+ /* Expected result */
+ uint64_t expected_r;
+ /* Expected low 6 bits of MXCSR (the Flag bits) */
+ uint32_t expected_mxcsr;
+} testdata;
+
+static testdata tests[] = {
+ { 0, 0x7ff0000000000000, 0x7ff000000000aaaa, false, /* 0 * Inf + SNaN */
+ 0x7ff800000000aaaa, 1 }, /* Should be QNaN and does raise Invalid */
+ { 0, 0x7ff0000000000000, 0x7ff800000000aaaa, false, /* 0 * Inf + QNaN */
+ 0x7ff800000000aaaa, 0 }, /* Should be QNaN and does *not* raise Invalid */
+ /*
+ * These inputs give a result which is tiny before rounding but which
+ * becomes non-tiny after rounding. x86 is a "detect tininess after
+ * rounding" architecture, so it should give a non-denormal result and
+ * not set the Underflow flag (only the Precision flag for an inexact
+ * result).
+ */
+ { 0x3fdfffffffffffff, 0x001fffffffffffff, 0x801fffffffffffff, false,
+ 0x8010000000000000, 0x20 },
+ /*
+ * Flushing of denormal outputs to zero should also happen after
+ * rounding, so setting FTZ should not affect the result or the flags.
+ */
+ { 0x3fdfffffffffffff, 0x001fffffffffffff, 0x801fffffffffffff, true,
+ 0x8010000000000000, 0x20 }, /* Enabling FTZ shouldn't change flags */
+ /*
+ * normal * 0 + a denormal. With FTZ disabled this gives an exact
+ * result (equal to the input denormal) that has consumed the denormal.
+ */
+ { 0x3cc8000000000000, 0x0000000000000000, 0x8008000000000000, false,
+ 0x8008000000000000, 0x2 }, /* Denormal */
+ /*
+ * With FTZ enabled, this consumes the denormal, returns zero (because
+ * flushed) and indicates also Underflow and Precision.
+ */
+ { 0x3cc8000000000000, 0x0000000000000000, 0x8008000000000000, true,
+ 0x8000000000000000, 0x32 }, /* Precision, Underflow, Denormal */
+};
+
+int main(void)
+{
+ bool passed = true;
+ for (int i = 0; i < ARRAY_SIZE(tests); i++) {
+ uint32_t mxcsr;
+ uint64_t r = do_fmadd(tests[i].n, tests[i].m, tests[i].a,
+ tests[i].ftz, &mxcsr);
+ if (r != tests[i].expected_r) {
+ printf("expected result 0x%" PRIx64 "\n", tests[i].expected_r);
+ passed = false;
+ }
+ if (mxcsr != tests[i].expected_mxcsr) {
+ printf("expected MXCSR flags 0x%x\n", tests[i].expected_mxcsr);
+ passed = false;
+ }
+ }
+ return passed ? 0 : 1;
+}
diff --git a/tests/tcg/x86_64/test-2175.c b/tests/tcg/x86_64/test-2175.c
new file mode 100644
index 0000000..aafd037
--- /dev/null
+++ b/tests/tcg/x86_64/test-2175.c
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* See https://gitlab.com/qemu-project/qemu/-/issues/2185 */
+
+#include <assert.h>
+
+int test_setc(unsigned int x, unsigned int y)
+{
+ asm("blsi %1, %0; setc %b0" : "+r"(x) : "r"(y));
+ return (unsigned char)x;
+}
+
+int test_pushf(unsigned int x, unsigned int y)
+{
+ asm("blsi %1, %0; pushf; pop %q0" : "+r"(x) : "r"(y));
+ return x & 1;
+}
+
+int main()
+{
+ assert(test_setc(1, 0xedbf530a));
+ assert(test_pushf(1, 0xedbf530a));
+ return 0;
+}
+
diff --git a/tests/uefi-test-tools/Makefile b/tests/uefi-test-tools/Makefile
index f4eaebd..8ee6fb3 100644
--- a/tests/uefi-test-tools/Makefile
+++ b/tests/uefi-test-tools/Makefile
@@ -12,7 +12,7 @@
edk2_dir := ../../roms/edk2
images_dir := ../data/uefi-boot-images
-emulation_targets := arm aarch64 i386 x86_64 riscv64
+emulation_targets := arm aarch64 i386 x86_64 riscv64 loongarch64
uefi_binaries := bios-tables-test
intermediate_suffixes := .efi .fat .iso.raw
@@ -56,7 +56,8 @@ Build/%.iso.raw: Build/%.fat
# stripped from, the argument.
map_arm_to_uefi = $(subst arm,ARM,$(1))
map_aarch64_to_uefi = $(subst aarch64,AA64,$(call map_arm_to_uefi,$(1)))
-map_riscv64_to_uefi = $(subst riscv64,RISCV64,$(call map_aarch64_to_uefi,$(1)))
+map_loongarch64_to_uefi = $(subst loongarch64,LOONGARCH64,$(call map_aarch64_to_uefi,$(1)))
+map_riscv64_to_uefi = $(subst riscv64,RISCV64,$(call map_loongarch64_to_uefi,$(1)))
map_i386_to_uefi = $(subst i386,IA32,$(call map_riscv64_to_uefi,$(1)))
map_x86_64_to_uefi = $(subst x86_64,X64,$(call map_i386_to_uefi,$(1)))
map_to_uefi = $(subst .,,$(call map_x86_64_to_uefi,$(1)))
diff --git a/tests/uefi-test-tools/UefiTestToolsPkg/UefiTestToolsPkg.dsc b/tests/uefi-test-tools/UefiTestToolsPkg/UefiTestToolsPkg.dsc
index 0902fd3..facf8df 100644
--- a/tests/uefi-test-tools/UefiTestToolsPkg/UefiTestToolsPkg.dsc
+++ b/tests/uefi-test-tools/UefiTestToolsPkg/UefiTestToolsPkg.dsc
@@ -19,7 +19,7 @@
PLATFORM_VERSION = 0.1
PLATFORM_NAME = UefiTestTools
SKUID_IDENTIFIER = DEFAULT
- SUPPORTED_ARCHITECTURES = ARM|AARCH64|IA32|X64|RISCV64
+ SUPPORTED_ARCHITECTURES = ARM|AARCH64|IA32|X64|RISCV64|LOONGARCH64
BUILD_TARGETS = DEBUG
[BuildOptions.IA32]
@@ -65,6 +65,10 @@
[LibraryClasses.RISCV64]
BaseMemoryLib|MdePkg/Library/BaseMemoryLib/BaseMemoryLib.inf
+[LibraryClasses.LOONGARCH64]
+ BaseMemoryLib|MdePkg/Library/BaseMemoryLib/BaseMemoryLib.inf
+ StackCheckLib|MdePkg/Library/StackCheckLibNull/StackCheckLibNull.inf
+
[PcdsFixedAtBuild]
gEfiMdePkgTokenSpaceGuid.PcdDebugPrintErrorLevel|0x8040004F
gEfiMdePkgTokenSpaceGuid.PcdDebugPropertyMask|0x2F
diff --git a/tests/uefi-test-tools/uefi-test-build.config b/tests/uefi-test-tools/uefi-test-build.config
index a4c61fc..8bf4826 100644
--- a/tests/uefi-test-tools/uefi-test-build.config
+++ b/tests/uefi-test-tools/uefi-test-build.config
@@ -22,6 +22,16 @@ arch = AARCH64
cpy1 = AARCH64/BiosTablesTest.efi bios-tables-test.aarch64.efi
####################################################################################
+# loongarch64
+
+[build.loongarch64]
+conf = UefiTestToolsPkg/UefiTestToolsPkg.dsc
+plat = UefiTestTools
+dest = ./Build
+arch = LOONGARCH64
+cpy1 = LOONGARCH64/BiosTablesTest.efi bios-tables-test.loongarch64.efi
+
+####################################################################################
# riscv64
[build.riscv64]
diff --git a/tests/unit/check-block-qdict.c b/tests/unit/check-block-qdict.c
index 751c58e..0036d85 100644
--- a/tests/unit/check-block-qdict.c
+++ b/tests/unit/check-block-qdict.c
@@ -9,8 +9,8 @@
#include "qemu/osdep.h"
#include "block/qdict.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qnum.h"
+#include "qobject/qlist.h"
+#include "qobject/qnum.h"
#include "qapi/error.h"
static void qdict_defaults_test(void)
diff --git a/tests/unit/check-qdict.c b/tests/unit/check-qdict.c
index b5efa85..a1312be 100644
--- a/tests/unit/check-qdict.c
+++ b/tests/unit/check-qdict.c
@@ -11,9 +11,9 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
/*
* Public Interface test-cases
diff --git a/tests/unit/check-qjson.c b/tests/unit/check-qjson.c
index a89293c..780a365 100644
--- a/tests/unit/check-qjson.c
+++ b/tests/unit/check-qjson.c
@@ -14,12 +14,12 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/qlit.h"
-#include "qapi/qmp/qnull.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qbool.h"
+#include "qobject/qjson.h"
+#include "qobject/qlit.h"
+#include "qobject/qnull.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
#include "qemu/unicode.h"
static QString *from_json_str(const char *jstr, bool single, Error **errp)
diff --git a/tests/unit/check-qlist.c b/tests/unit/check-qlist.c
index 3cd0ccb..1388aee 100644
--- a/tests/unit/check-qlist.c
+++ b/tests/unit/check-qlist.c
@@ -11,8 +11,8 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qnum.h"
+#include "qobject/qlist.h"
/*
* Public Interface test-cases
diff --git a/tests/unit/check-qlit.c b/tests/unit/check-qlit.c
index bd6798d..ea7a0d9 100644
--- a/tests/unit/check-qlit.c
+++ b/tests/unit/check-qlit.c
@@ -9,12 +9,12 @@
#include "qemu/osdep.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qlit.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
+#include "qobject/qlit.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
static QLitObject qlit = QLIT_QDICT(((QLitDictEntry[]) {
{ "foo", QLIT_QNUM(42) },
diff --git a/tests/unit/check-qnull.c b/tests/unit/check-qnull.c
index 5ceacc6..724a66d 100644
--- a/tests/unit/check-qnull.c
+++ b/tests/unit/check-qnull.c
@@ -8,7 +8,7 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qnull.h"
+#include "qobject/qnull.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qobject-output-visitor.h"
#include "qapi/error.h"
diff --git a/tests/unit/check-qnum.c b/tests/unit/check-qnum.c
index bf7fe45..a40120e 100644
--- a/tests/unit/check-qnum.c
+++ b/tests/unit/check-qnum.c
@@ -14,7 +14,7 @@
#include "qemu/osdep.h"
-#include "qapi/qmp/qnum.h"
+#include "qobject/qnum.h"
/*
* Public Interface test-cases
diff --git a/tests/unit/check-qobject.c b/tests/unit/check-qobject.c
index 022b7c7..ccb2566 100644
--- a/tests/unit/check-qobject.c
+++ b/tests/unit/check-qobject.c
@@ -9,12 +9,12 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qnull.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
+#include "qobject/qnull.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
#include <math.h>
diff --git a/tests/unit/check-qom-interface.c b/tests/unit/check-qom-interface.c
index c99be97..86ae5f6 100644
--- a/tests/unit/check-qom-interface.c
+++ b/tests/unit/check-qom-interface.c
@@ -38,7 +38,7 @@ static const TypeInfo test_if_info = {
#define PATTERN 0xFAFBFCFD
-static void test_class_init(ObjectClass *oc, void *data)
+static void test_class_init(ObjectClass *oc, const void *data)
{
TestIfClass *tc = TEST_IF_CLASS(oc);
@@ -52,7 +52,7 @@ static const TypeInfo direct_impl_info = {
.name = TYPE_DIRECT_IMPL,
.parent = TYPE_OBJECT,
.class_init = test_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_TEST_IF },
{ }
}
diff --git a/tests/unit/check-qom-proplist.c b/tests/unit/check-qom-proplist.c
index 79d4a8b..ee3c6fb 100644
--- a/tests/unit/check-qom-proplist.c
+++ b/tests/unit/check-qom-proplist.c
@@ -22,8 +22,8 @@
#include "qapi/error.h"
#include "qapi/qobject-input-visitor.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qobject.h"
+#include "qobject/qdict.h"
+#include "qobject/qobject.h"
#include "qom/object.h"
#include "qemu/module.h"
#include "qemu/option.h"
@@ -135,7 +135,7 @@ static void dummy_init(Object *obj)
}
-static void dummy_class_init(ObjectClass *cls, void *data)
+static void dummy_class_init(ObjectClass *cls, const void *data)
{
object_class_property_add_str(cls, "sv",
dummy_get_sv,
@@ -164,7 +164,7 @@ static const TypeInfo dummy_info = {
.instance_finalize = dummy_finalize,
.class_size = sizeof(DummyObjectClass),
.class_init = dummy_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
@@ -264,7 +264,7 @@ static void dummy_dev_unparent(Object *obj)
object_unparent(OBJECT(dev->bus));
}
-static void dummy_dev_class_init(ObjectClass *klass, void *opaque)
+static void dummy_dev_class_init(ObjectClass *klass, const void *opaque)
{
klass->unparent = dummy_dev_unparent;
}
@@ -288,7 +288,7 @@ static void dummy_bus_unparent(Object *obj)
object_unparent(OBJECT(bus->backend));
}
-static void dummy_bus_class_init(ObjectClass *klass, void *opaque)
+static void dummy_bus_class_init(ObjectClass *klass, const void *opaque)
{
klass->unparent = dummy_bus_unparent;
}
@@ -610,7 +610,7 @@ static void test_dummy_delchild(void)
static void test_qom_partial_path(void)
{
Object *root = object_get_objects_root();
- Object *cont1 = container_get(root, "/cont1");
+ Object *cont1 = object_property_add_new_container(root, "cont1");
Object *obj1 = object_new(TYPE_DUMMY);
Object *obj2a = object_new(TYPE_DUMMY);
Object *obj2b = object_new(TYPE_DUMMY);
diff --git a/tests/unit/check-qstring.c b/tests/unit/check-qstring.c
index bd861f4..2e6a005 100644
--- a/tests/unit/check-qstring.c
+++ b/tests/unit/check-qstring.c
@@ -11,7 +11,7 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qstring.h"
/*
* Public Interface test-cases
diff --git a/tests/unit/crypto-tls-psk-helpers.c b/tests/unit/crypto-tls-psk-helpers.c
index c6cc740..36527fd 100644
--- a/tests/unit/crypto-tls-psk-helpers.c
+++ b/tests/unit/crypto-tls-psk-helpers.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
-#include "crypto-tls-x509-helpers.h"
#include "crypto-tls-psk-helpers.h"
#include "qemu/sockets.h"
diff --git a/tests/unit/crypto-tls-x509-helpers.c b/tests/unit/crypto-tls-x509-helpers.c
index e9937f6..2daecc4 100644
--- a/tests/unit/crypto-tls-x509-helpers.c
+++ b/tests/unit/crypto-tls-x509-helpers.c
@@ -20,15 +20,19 @@
#include "qemu/osdep.h"
+#include <libtasn1.h>
+
#include "crypto-tls-x509-helpers.h"
#include "crypto/init.h"
#include "qemu/sockets.h"
+#include "pkix_asn1_tab.c.inc"
+
/*
* This stores some static data that is needed when
* encoding extensions in the x509 certs
*/
-asn1_node pkix_asn1;
+static asn1_node pkix_asn1;
/*
* To avoid consuming random entropy to generate keys,
@@ -131,6 +135,7 @@ void test_tls_init(const char *keyfile)
void test_tls_cleanup(const char *keyfile)
{
asn1_delete_structure(&pkix_asn1);
+ gnutls_x509_privkey_deinit(privkey);
unlink(keyfile);
}
@@ -498,8 +503,7 @@ void test_tls_write_cert_chain(const char *filename,
g_free(buffer);
}
-
-void test_tls_discard_cert(QCryptoTLSTestCertReq *req)
+void test_tls_deinit_cert(QCryptoTLSTestCertReq *req)
{
if (!req->crt) {
return;
@@ -507,6 +511,15 @@ void test_tls_discard_cert(QCryptoTLSTestCertReq *req)
gnutls_x509_crt_deinit(req->crt);
req->crt = NULL;
+}
+
+void test_tls_discard_cert(QCryptoTLSTestCertReq *req)
+{
+ if (!req->crt) {
+ return;
+ }
+
+ test_tls_deinit_cert(req);
if (getenv("QEMU_TEST_DEBUG_CERTS") == NULL) {
unlink(req->filename);
diff --git a/tests/unit/crypto-tls-x509-helpers.h b/tests/unit/crypto-tls-x509-helpers.h
index 247e716..2a0f7c0 100644
--- a/tests/unit/crypto-tls-x509-helpers.h
+++ b/tests/unit/crypto-tls-x509-helpers.h
@@ -23,7 +23,6 @@
#include <gnutls/gnutls.h>
#include <gnutls/x509.h>
-#include <libtasn1.h>
#define QCRYPTO_TLS_TEST_CLIENT_NAME "ACME QEMU Client"
@@ -74,6 +73,12 @@ void test_tls_generate_cert(QCryptoTLSTestCertReq *req,
void test_tls_write_cert_chain(const char *filename,
gnutls_x509_crt_t *certs,
size_t ncerts);
+/*
+ * Deinitialize the QCryptoTLSTestCertReq, but don't delete the certificate
+ * file on disk. (The caller is then responsible for doing that themselves.
+ */
+void test_tls_deinit_cert(QCryptoTLSTestCertReq *req);
+/* Deinit the QCryptoTLSTestCertReq, and delete the certificate file */
void test_tls_discard_cert(QCryptoTLSTestCertReq *req);
void test_tls_init(const char *keyfile);
@@ -171,6 +176,4 @@ void test_tls_cleanup(const char *keyfile);
}; \
test_tls_generate_cert(&varname, cavarname.crt)
-extern const asn1_static_node pkix_asn1_tab[];
-
#endif
diff --git a/tests/unit/meson.build b/tests/unit/meson.build
index 26c109c..d5248ae 100644
--- a/tests/unit/meson.build
+++ b/tests/unit/meson.build
@@ -47,6 +47,7 @@ tests = {
'test-logging': [],
'test-qapi-util': [],
'test-interval-tree': [],
+ 'test-fifo': [],
}
if have_system or have_tools
@@ -99,11 +100,11 @@ if have_block
tasn1.found() and \
host_os != 'windows'
tests += {
- 'test-crypto-tlscredsx509': ['crypto-tls-x509-helpers.c', 'pkix_asn1_tab.c',
+ 'test-crypto-tlscredsx509': ['crypto-tls-x509-helpers.c',
tasn1, crypto, gnutls],
- 'test-crypto-tlssession': ['crypto-tls-x509-helpers.c', 'pkix_asn1_tab.c', 'crypto-tls-psk-helpers.c',
+ 'test-crypto-tlssession': ['crypto-tls-x509-helpers.c', 'crypto-tls-psk-helpers.c',
tasn1, crypto, gnutls],
- 'test-io-channel-tls': ['io-channel-helpers.c', 'crypto-tls-x509-helpers.c', 'pkix_asn1_tab.c',
+ 'test-io-channel-tls': ['io-channel-helpers.c', 'crypto-tls-x509-helpers.c',
tasn1, io, crypto, gnutls]}
endif
if pam.found()
@@ -115,15 +116,13 @@ if have_block
if host_os != 'windows'
tests += {
'test-image-locking': [testblock],
- 'test-nested-aio-poll': [testblock],
+ 'test-nested-aio-poll': [],
}
endif
if config_host_data.get('CONFIG_REPLICATION')
tests += {'test-replication': [testblock]}
endif
- if nettle.found() or gcrypt.found()
- tests += {'test-crypto-pbkdf': [io]}
- endif
+ tests += {'test-crypto-pbkdf': [io]}
endif
if have_system
diff --git a/tests/unit/pkix_asn1_tab.c b/tests/unit/pkix_asn1_tab.c
deleted file mode 100644
index 8952140..0000000
--- a/tests/unit/pkix_asn1_tab.c
+++ /dev/null
@@ -1,1105 +0,0 @@
-/*
- * This file is taken from gnutls 1.6.3 under the GPLv2+
- * and is under copyright of various GNUTLS contributors.
- */
-
-#include "qemu/osdep.h"
-#include "crypto-tls-x509-helpers.h"
-
-const asn1_static_node pkix_asn1_tab[] = {
- {"PKIX1", 536875024, 0},
- {0, 1073741836, 0},
- {"id-ce", 1879048204, 0},
- {"joint-iso-ccitt", 1073741825, "2"},
- {"ds", 1073741825, "5"},
- {0, 1, "29"},
- {"id-ce-authorityKeyIdentifier", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "35"},
- {"AuthorityKeyIdentifier", 1610612741, 0},
- {"keyIdentifier", 1610637314, "KeyIdentifier"},
- {0, 4104, "0"},
- {"authorityCertIssuer", 1610637314, "GeneralNames"},
- {0, 4104, "1"},
- {"authorityCertSerialNumber", 536895490, "CertificateSerialNumber"},
- {0, 4104, "2"},
- {"KeyIdentifier", 1073741831, 0},
- {"id-ce-subjectKeyIdentifier", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "14"},
- {"SubjectKeyIdentifier", 1073741826, "KeyIdentifier"},
- {"id-ce-keyUsage", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "15"},
- {"KeyUsage", 1610874886, 0},
- {"digitalSignature", 1073741825, "0"},
- {"nonRepudiation", 1073741825, "1"},
- {"keyEncipherment", 1073741825, "2"},
- {"dataEncipherment", 1073741825, "3"},
- {"keyAgreement", 1073741825, "4"},
- {"keyCertSign", 1073741825, "5"},
- {"cRLSign", 1073741825, "6"},
- {"encipherOnly", 1073741825, "7"},
- {"decipherOnly", 1, "8"},
- {"id-ce-privateKeyUsagePeriod", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "16"},
- {"PrivateKeyUsagePeriod", 1610612741, 0},
- {"notBefore", 1619025937, 0},
- {0, 4104, "0"},
- {"notAfter", 545284113, 0},
- {0, 4104, "1"},
- {"id-ce-certificatePolicies", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "32"},
- {"CertificatePolicies", 1612709899, 0},
- {"MAX", 1074266122, "1"},
- {0, 2, "PolicyInformation"},
- {"PolicyInformation", 1610612741, 0},
- {"policyIdentifier", 1073741826, "CertPolicyId"},
- {"policyQualifiers", 538984459, 0},
- {"MAX", 1074266122, "1"},
- {0, 2, "PolicyQualifierInfo"},
- {"CertPolicyId", 1073741836, 0},
- {"PolicyQualifierInfo", 1610612741, 0},
- {"policyQualifierId", 1073741826, "PolicyQualifierId"},
- {"qualifier", 541065229, 0},
- {"policyQualifierId", 1, 0},
- {"PolicyQualifierId", 1073741836, 0},
- {"CPSuri", 1073741826, "IA5String"},
- {"UserNotice", 1610612741, 0},
- {"noticeRef", 1073758210, "NoticeReference"},
- {"explicitText", 16386, "DisplayText"},
- {"NoticeReference", 1610612741, 0},
- {"organization", 1073741826, "DisplayText"},
- {"noticeNumbers", 536870923, 0},
- {0, 3, 0},
- {"DisplayText", 1610612754, 0},
- {"visibleString", 1612709890, "VisibleString"},
- {"200", 524298, "1"},
- {"bmpString", 1612709890, "BMPString"},
- {"200", 524298, "1"},
- {"utf8String", 538968066, "UTF8String"},
- {"200", 524298, "1"},
- {"id-ce-policyMappings", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "33"},
- {"PolicyMappings", 1612709899, 0},
- {"MAX", 1074266122, "1"},
- {0, 536870917, 0},
- {"issuerDomainPolicy", 1073741826, "CertPolicyId"},
- {"subjectDomainPolicy", 2, "CertPolicyId"},
- {"DirectoryString", 1610612754, 0},
- {"teletexString", 1612709890, "TeletexString"},
- {"MAX", 524298, "1"},
- {"printableString", 1612709890, "PrintableString"},
- {"MAX", 524298, "1"},
- {"universalString", 1612709890, "UniversalString"},
- {"MAX", 524298, "1"},
- {"utf8String", 1612709890, "UTF8String"},
- {"MAX", 524298, "1"},
- {"bmpString", 1612709890, "BMPString"},
- {"MAX", 524298, "1"},
- {"ia5String", 538968066, "IA5String"},
- {"MAX", 524298, "1"},
- {"id-ce-subjectAltName", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "17"},
- {"SubjectAltName", 1073741826, "GeneralNames"},
- {"GeneralNames", 1612709899, 0},
- {"MAX", 1074266122, "1"},
- {0, 2, "GeneralName"},
- {"GeneralName", 1610612754, 0},
- {"otherName", 1610620930, "AnotherName"},
- {0, 4104, "0"},
- {"rfc822Name", 1610620930, "IA5String"},
- {0, 4104, "1"},
- {"dNSName", 1610620930, "IA5String"},
- {0, 4104, "2"},
- {"x400Address", 1610620930, "ORAddress"},
- {0, 4104, "3"},
- {"directoryName", 1610620930, "RDNSequence"},
- {0, 2056, "4"},
- {"ediPartyName", 1610620930, "EDIPartyName"},
- {0, 4104, "5"},
- {"uniformResourceIdentifier", 1610620930, "IA5String"},
- {0, 4104, "6"},
- {"iPAddress", 1610620935, 0},
- {0, 4104, "7"},
- {"registeredID", 536879116, 0},
- {0, 4104, "8"},
- {"AnotherName", 1610612741, 0},
- {"type-id", 1073741836, 0},
- {"value", 541073421, 0},
- {0, 1073743880, "0"},
- {"type-id", 1, 0},
- {"EDIPartyName", 1610612741, 0},
- {"nameAssigner", 1610637314, "DirectoryString"},
- {0, 4104, "0"},
- {"partyName", 536879106, "DirectoryString"},
- {0, 4104, "1"},
- {"id-ce-issuerAltName", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "18"},
- {"IssuerAltName", 1073741826, "GeneralNames"},
- {"id-ce-subjectDirectoryAttributes", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "9"},
- {"SubjectDirectoryAttributes", 1612709899, 0},
- {"MAX", 1074266122, "1"},
- {0, 2, "Attribute"},
- {"id-ce-basicConstraints", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "19"},
- {"BasicConstraints", 1610612741, 0},
- {"cA", 1610645508, 0},
- {0, 131081, 0},
- {"pathLenConstraint", 537411587, 0},
- {"0", 10, "MAX"},
- {"id-ce-nameConstraints", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "30"},
- {"NameConstraints", 1610612741, 0},
- {"permittedSubtrees", 1610637314, "GeneralSubtrees"},
- {0, 4104, "0"},
- {"excludedSubtrees", 536895490, "GeneralSubtrees"},
- {0, 4104, "1"},
- {"GeneralSubtrees", 1612709899, 0},
- {"MAX", 1074266122, "1"},
- {0, 2, "GeneralSubtree"},
- {"GeneralSubtree", 1610612741, 0},
- {"base", 1073741826, "GeneralName"},
- {"minimum", 1610653698, "BaseDistance"},
- {0, 1073741833, "0"},
- {0, 4104, "0"},
- {"maximum", 536895490, "BaseDistance"},
- {0, 4104, "1"},
- {"BaseDistance", 1611137027, 0},
- {"0", 10, "MAX"},
- {"id-ce-policyConstraints", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "36"},
- {"PolicyConstraints", 1610612741, 0},
- {"requireExplicitPolicy", 1610637314, "SkipCerts"},
- {0, 4104, "0"},
- {"inhibitPolicyMapping", 536895490, "SkipCerts"},
- {0, 4104, "1"},
- {"SkipCerts", 1611137027, 0},
- {"0", 10, "MAX"},
- {"id-ce-cRLDistributionPoints", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "31"},
- {"CRLDistributionPoints", 1612709899, 0},
- {"MAX", 1074266122, "1"},
- {0, 2, "DistributionPoint"},
- {"DistributionPoint", 1610612741, 0},
- {"distributionPoint", 1610637314, "DistributionPointName"},
- {0, 2056, "0"},
- {"reasons", 1610637314, "ReasonFlags"},
- {0, 4104, "1"},
- {"cRLIssuer", 536895490, "GeneralNames"},
- {0, 4104, "2"},
- {"DistributionPointName", 1610612754, 0},
- {"fullName", 1610620930, "GeneralNames"},
- {0, 4104, "0"},
- {"nameRelativeToCRLIssuer", 536879106, "RelativeDistinguishedName"},
- {0, 4104, "1"},
- {"ReasonFlags", 1610874886, 0},
- {"unused", 1073741825, "0"},
- {"keyCompromise", 1073741825, "1"},
- {"cACompromise", 1073741825, "2"},
- {"affiliationChanged", 1073741825, "3"},
- {"superseded", 1073741825, "4"},
- {"cessationOfOperation", 1073741825, "5"},
- {"certificateHold", 1073741825, "6"},
- {"privilegeWithdrawn", 1073741825, "7"},
- {"aACompromise", 1, "8"},
- {"id-ce-extKeyUsage", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "37"},
- {"ExtKeyUsageSyntax", 1612709899, 0},
- {"MAX", 1074266122, "1"},
- {0, 2, "KeyPurposeId"},
- {"KeyPurposeId", 1073741836, 0},
- {"id-kp-serverAuth", 1879048204, 0},
- {0, 1073741825, "id-kp"},
- {0, 1, "1"},
- {"id-kp-clientAuth", 1879048204, 0},
- {0, 1073741825, "id-kp"},
- {0, 1, "2"},
- {"id-kp-codeSigning", 1879048204, 0},
- {0, 1073741825, "id-kp"},
- {0, 1, "3"},
- {"id-kp-emailProtection", 1879048204, 0},
- {0, 1073741825, "id-kp"},
- {0, 1, "4"},
- {"id-kp-ipsecEndSystem", 1879048204, 0},
- {0, 1073741825, "id-kp"},
- {0, 1, "5"},
- {"id-kp-ipsecTunnel", 1879048204, 0},
- {0, 1073741825, "id-kp"},
- {0, 1, "6"},
- {"id-kp-ipsecUser", 1879048204, 0},
- {0, 1073741825, "id-kp"},
- {0, 1, "7"},
- {"id-kp-timeStamping", 1879048204, 0},
- {0, 1073741825, "id-kp"},
- {0, 1, "8"},
- {"id-pe-authorityInfoAccess", 1879048204, 0},
- {0, 1073741825, "id-pe"},
- {0, 1, "1"},
- {"AuthorityInfoAccessSyntax", 1612709899, 0},
- {"MAX", 1074266122, "1"},
- {0, 2, "AccessDescription"},
- {"AccessDescription", 1610612741, 0},
- {"accessMethod", 1073741836, 0},
- {"accessLocation", 2, "GeneralName"},
- {"id-ce-cRLNumber", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "20"},
- {"CRLNumber", 1611137027, 0},
- {"0", 10, "MAX"},
- {"id-ce-issuingDistributionPoint", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "28"},
- {"IssuingDistributionPoint", 1610612741, 0},
- {"distributionPoint", 1610637314, "DistributionPointName"},
- {0, 4104, "0"},
- {"onlyContainsUserCerts", 1610653700, 0},
- {0, 1073872905, 0},
- {0, 4104, "1"},
- {"onlyContainsCACerts", 1610653700, 0},
- {0, 1073872905, 0},
- {0, 4104, "2"},
- {"onlySomeReasons", 1610637314, "ReasonFlags"},
- {0, 4104, "3"},
- {"indirectCRL", 536911876, 0},
- {0, 1073872905, 0},
- {0, 4104, "4"},
- {"id-ce-deltaCRLIndicator", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "27"},
- {"BaseCRLNumber", 1073741826, "CRLNumber"},
- {"id-ce-cRLReasons", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "21"},
- {"CRLReason", 1610874901, 0},
- {"unspecified", 1073741825, "0"},
- {"keyCompromise", 1073741825, "1"},
- {"cACompromise", 1073741825, "2"},
- {"affiliationChanged", 1073741825, "3"},
- {"superseded", 1073741825, "4"},
- {"cessationOfOperation", 1073741825, "5"},
- {"certificateHold", 1073741825, "6"},
- {"removeFromCRL", 1, "8"},
- {"id-ce-certificateIssuer", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "29"},
- {"CertificateIssuer", 1073741826, "GeneralNames"},
- {"id-ce-holdInstructionCode", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "23"},
- {"HoldInstructionCode", 1073741836, 0},
- {"holdInstruction", 1879048204, 0},
- {"joint-iso-itu-t", 1073741825, "2"},
- {"member-body", 1073741825, "2"},
- {"us", 1073741825, "840"},
- {"x9cm", 1073741825, "10040"},
- {0, 1, "2"},
- {"id-holdinstruction-none", 1879048204, 0},
- {0, 1073741825, "holdInstruction"},
- {0, 1, "1"},
- {"id-holdinstruction-callissuer", 1879048204, 0},
- {0, 1073741825, "holdInstruction"},
- {0, 1, "2"},
- {"id-holdinstruction-reject", 1879048204, 0},
- {0, 1073741825, "holdInstruction"},
- {0, 1, "3"},
- {"id-ce-invalidityDate", 1879048204, 0},
- {0, 1073741825, "id-ce"},
- {0, 1, "24"},
- {"InvalidityDate", 1082130449, 0},
- {"VisibleString", 1610620935, 0},
- {0, 4360, "26"},
- {"NumericString", 1610620935, 0},
- {0, 4360, "18"},
- {"IA5String", 1610620935, 0},
- {0, 4360, "22"},
- {"TeletexString", 1610620935, 0},
- {0, 4360, "20"},
- {"PrintableString", 1610620935, 0},
- {0, 4360, "19"},
- {"UniversalString", 1610620935, 0},
- {0, 4360, "28"},
- {"BMPString", 1610620935, 0},
- {0, 4360, "30"},
- {"UTF8String", 1610620935, 0},
- {0, 4360, "12"},
- {"id-pkix", 1879048204, 0},
- {"iso", 1073741825, "1"},
- {"identified-organization", 1073741825, "3"},
- {"dod", 1073741825, "6"},
- {"internet", 1073741825, "1"},
- {"security", 1073741825, "5"},
- {"mechanisms", 1073741825, "5"},
- {"pkix", 1, "7"},
- {"id-pe", 1879048204, 0},
- {0, 1073741825, "id-pkix"},
- {0, 1, "1"},
- {"id-qt", 1879048204, 0},
- {0, 1073741825, "id-pkix"},
- {0, 1, "2"},
- {"id-kp", 1879048204, 0},
- {0, 1073741825, "id-pkix"},
- {0, 1, "3"},
- {"id-ad", 1879048204, 0},
- {0, 1073741825, "id-pkix"},
- {0, 1, "48"},
- {"id-qt-cps", 1879048204, 0},
- {0, 1073741825, "id-qt"},
- {0, 1, "1"},
- {"id-qt-unotice", 1879048204, 0},
- {0, 1073741825, "id-qt"},
- {0, 1, "2"},
- {"id-ad-ocsp", 1879048204, 0},
- {0, 1073741825, "id-ad"},
- {0, 1, "1"},
- {"id-ad-caIssuers", 1879048204, 0},
- {0, 1073741825, "id-ad"},
- {0, 1, "2"},
- {"Attribute", 1610612741, 0},
- {"type", 1073741826, "AttributeType"},
- {"values", 536870927, 0},
- {0, 2, "AttributeValue"},
- {"AttributeType", 1073741836, 0},
- {"AttributeValue", 1614807053, 0},
- {"type", 1, 0},
- {"AttributeTypeAndValue", 1610612741, 0},
- {"type", 1073741826, "AttributeType"},
- {"value", 2, "AttributeValue"},
- {"id-at", 1879048204, 0},
- {"joint-iso-ccitt", 1073741825, "2"},
- {"ds", 1073741825, "5"},
- {0, 1, "4"},
- {"id-at-initials", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "43"},
- {"X520initials", 1073741826, "DirectoryString"},
- {"id-at-generationQualifier", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "44"},
- {"X520generationQualifier", 1073741826, "DirectoryString"},
- {"id-at-surname", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "4"},
- {"X520surName", 1073741826, "DirectoryString"},
- {"id-at-givenName", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "42"},
- {"X520givenName", 1073741826, "DirectoryString"},
- {"id-at-name", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "41"},
- {"X520name", 1073741826, "DirectoryString"},
- {"id-at-commonName", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "3"},
- {"X520CommonName", 1073741826, "DirectoryString"},
- {"id-at-localityName", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "7"},
- {"X520LocalityName", 1073741826, "DirectoryString"},
- {"id-at-stateOrProvinceName", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "8"},
- {"X520StateOrProvinceName", 1073741826, "DirectoryString"},
- {"id-at-organizationName", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "10"},
- {"X520OrganizationName", 1073741826, "DirectoryString"},
- {"id-at-organizationalUnitName", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "11"},
- {"X520OrganizationalUnitName", 1073741826, "DirectoryString"},
- {"id-at-title", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "12"},
- {"X520Title", 1073741826, "DirectoryString"},
- {"id-at-description", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "13"},
- {"X520Description", 1073741826, "DirectoryString"},
- {"id-at-dnQualifier", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "46"},
- {"X520dnQualifier", 1073741826, "PrintableString"},
- {"id-at-countryName", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "6"},
- {"X520countryName", 1612709890, "PrintableString"},
- {0, 1048586, "2"},
- {"id-at-serialNumber", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "5"},
- {"X520serialNumber", 1073741826, "PrintableString"},
- {"id-at-telephoneNumber", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "20"},
- {"X520telephoneNumber", 1073741826, "PrintableString"},
- {"id-at-facsimileTelephoneNumber", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "23"},
- {"X520facsimileTelephoneNumber", 1073741826, "PrintableString"},
- {"id-at-pseudonym", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "65"},
- {"X520pseudonym", 1073741826, "DirectoryString"},
- {"id-at-name", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "41"},
- {"X520name", 1073741826, "DirectoryString"},
- {"id-at-streetAddress", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "9"},
- {"X520streetAddress", 1073741826, "DirectoryString"},
- {"id-at-postalAddress", 1880096780, "AttributeType"},
- {0, 1073741825, "id-at"},
- {0, 1, "16"},
- {"X520postalAddress", 1073741826, "PostalAddress"},
- {"PostalAddress", 1610612747, 0},
- {0, 2, "DirectoryString"},
- {"pkcs", 1879048204, 0},
- {"iso", 1073741825, "1"},
- {"member-body", 1073741825, "2"},
- {"us", 1073741825, "840"},
- {"rsadsi", 1073741825, "113549"},
- {"pkcs", 1, "1"},
- {"pkcs-9", 1879048204, 0},
- {0, 1073741825, "pkcs"},
- {0, 1, "9"},
- {"emailAddress", 1880096780, "AttributeType"},
- {0, 1073741825, "pkcs-9"},
- {0, 1, "1"},
- {"Pkcs9email", 1612709890, "IA5String"},
- {"ub-emailaddress-length", 524298, "1"},
- {"Name", 1610612754, 0},
- {"rdnSequence", 2, "RDNSequence"},
- {"RDNSequence", 1610612747, 0},
- {0, 2, "RelativeDistinguishedName"},
- {"DistinguishedName", 1073741826, "RDNSequence"},
- {"RelativeDistinguishedName", 1612709903, 0},
- {"MAX", 1074266122, "1"},
- {0, 2, "AttributeTypeAndValue"},
- {"Certificate", 1610612741, 0},
- {"tbsCertificate", 1073741826, "TBSCertificate"},
- {"signatureAlgorithm", 1073741826, "AlgorithmIdentifier"},
- {"signature", 6, 0},
- {"TBSCertificate", 1610612741, 0},
- {"version", 1610653698, "Version"},
- {0, 1073741833, "v1"},
- {0, 2056, "0"},
- {"serialNumber", 1073741826, "CertificateSerialNumber"},
- {"signature", 1073741826, "AlgorithmIdentifier"},
- {"issuer", 1073741826, "Name"},
- {"validity", 1073741826, "Validity"},
- {"subject", 1073741826, "Name"},
- {"subjectPublicKeyInfo", 1073741826, "SubjectPublicKeyInfo"},
- {"issuerUniqueID", 1610637314, "UniqueIdentifier"},
- {0, 4104, "1"},
- {"subjectUniqueID", 1610637314, "UniqueIdentifier"},
- {0, 4104, "2"},
- {"extensions", 536895490, "Extensions"},
- {0, 2056, "3"},
- {"Version", 1610874883, 0},
- {"v1", 1073741825, "0"},
- {"v2", 1073741825, "1"},
- {"v3", 1, "2"},
- {"CertificateSerialNumber", 1073741827, 0},
- {"Validity", 1610612741, 0},
- {"notBefore", 1073741826, "Time"},
- {"notAfter", 2, "Time"},
- {"Time", 1610612754, 0},
- {"utcTime", 1090519057, 0},
- {"generalTime", 8388625, 0},
- {"UniqueIdentifier", 1073741830, 0},
- {"SubjectPublicKeyInfo", 1610612741, 0},
- {"algorithm", 1073741826, "AlgorithmIdentifier"},
- {"subjectPublicKey", 6, 0},
- {"Extensions", 1612709899, 0},
- {"MAX", 1074266122, "1"},
- {0, 2, "Extension"},
- {"Extension", 1610612741, 0},
- {"extnID", 1073741836, 0},
- {"critical", 1610645508, 0},
- {0, 131081, 0},
- {"extnValue", 7, 0},
- {"CertificateList", 1610612741, 0},
- {"tbsCertList", 1073741826, "TBSCertList"},
- {"signatureAlgorithm", 1073741826, "AlgorithmIdentifier"},
- {"signature", 6, 0},
- {"TBSCertList", 1610612741, 0},
- {"version", 1073758210, "Version"},
- {"signature", 1073741826, "AlgorithmIdentifier"},
- {"issuer", 1073741826, "Name"},
- {"thisUpdate", 1073741826, "Time"},
- {"nextUpdate", 1073758210, "Time"},
- {"revokedCertificates", 1610629131, 0},
- {0, 536870917, 0},
- {"userCertificate", 1073741826, "CertificateSerialNumber"},
- {"revocationDate", 1073741826, "Time"},
- {"crlEntryExtensions", 16386, "Extensions"},
- {"crlExtensions", 536895490, "Extensions"},
- {0, 2056, "0"},
- {"AlgorithmIdentifier", 1610612741, 0},
- {"algorithm", 1073741836, 0},
- {"parameters", 541081613, 0},
- {"algorithm", 1, 0},
- {"pkcs-1", 1879048204, 0},
- {0, 1073741825, "pkcs"},
- {0, 1, "1"},
- {"rsaEncryption", 1879048204, 0},
- {0, 1073741825, "pkcs-1"},
- {0, 1, "1"},
- {"md2WithRSAEncryption", 1879048204, 0},
- {0, 1073741825, "pkcs-1"},
- {0, 1, "2"},
- {"md5WithRSAEncryption", 1879048204, 0},
- {0, 1073741825, "pkcs-1"},
- {0, 1, "4"},
- {"sha1WithRSAEncryption", 1879048204, 0},
- {0, 1073741825, "pkcs-1"},
- {0, 1, "5"},
- {"id-dsa-with-sha1", 1879048204, 0},
- {"iso", 1073741825, "1"},
- {"member-body", 1073741825, "2"},
- {"us", 1073741825, "840"},
- {"x9-57", 1073741825, "10040"},
- {"x9algorithm", 1073741825, "4"},
- {0, 1, "3"},
- {"Dss-Sig-Value", 1610612741, 0},
- {"r", 1073741827, 0},
- {"s", 3, 0},
- {"dhpublicnumber", 1879048204, 0},
- {"iso", 1073741825, "1"},
- {"member-body", 1073741825, "2"},
- {"us", 1073741825, "840"},
- {"ansi-x942", 1073741825, "10046"},
- {"number-type", 1073741825, "2"},
- {0, 1, "1"},
- {"DomainParameters", 1610612741, 0},
- {"p", 1073741827, 0},
- {"g", 1073741827, 0},
- {"q", 1073741827, 0},
- {"j", 1073758211, 0},
- {"validationParms", 16386, "ValidationParms"},
- {"ValidationParms", 1610612741, 0},
- {"seed", 1073741830, 0},
- {"pgenCounter", 3, 0},
- {"id-dsa", 1879048204, 0},
- {"iso", 1073741825, "1"},
- {"member-body", 1073741825, "2"},
- {"us", 1073741825, "840"},
- {"x9-57", 1073741825, "10040"},
- {"x9algorithm", 1073741825, "4"},
- {0, 1, "1"},
- {"Dss-Parms", 1610612741, 0},
- {"p", 1073741827, 0},
- {"q", 1073741827, 0},
- {"g", 3, 0},
- {"ORAddress", 1610612741, 0},
- {"built-in-standard-attributes", 1073741826, "BuiltInStandardAttributes"},
- {"built-in-domain-defined-attributes", 1073758210,
- "BuiltInDomainDefinedAttributes"},
- {"extension-attributes", 16386, "ExtensionAttributes"},
- {"BuiltInStandardAttributes", 1610612741, 0},
- {"country-name", 1073758210, "CountryName"},
- {"administration-domain-name", 1073758210, "AdministrationDomainName"},
- {"network-address", 1610637314, "NetworkAddress"},
- {0, 2056, "0"},
- {"terminal-identifier", 1610637314, "TerminalIdentifier"},
- {0, 2056, "1"},
- {"private-domain-name", 1610637314, "PrivateDomainName"},
- {0, 2056, "2"},
- {"organization-name", 1610637314, "OrganizationName"},
- {0, 2056, "3"},
- {"numeric-user-identifier", 1610637314, "NumericUserIdentifier"},
- {0, 2056, "4"},
- {"personal-name", 1610637314, "PersonalName"},
- {0, 2056, "5"},
- {"organizational-unit-names", 536895490, "OrganizationalUnitNames"},
- {0, 2056, "6"},
- {"CountryName", 1610620946, 0},
- {0, 1073746952, "1"},
- {"x121-dcc-code", 1612709890, "NumericString"},
- {0, 1048586, "ub-country-name-numeric-length"},
- {"iso-3166-alpha2-code", 538968066, "PrintableString"},
- {0, 1048586, "ub-country-name-alpha-length"},
- {"AdministrationDomainName", 1610620946, 0},
- {0, 1073744904, "2"},
- {"numeric", 1612709890, "NumericString"},
- {"ub-domain-name-length", 524298, "0"},
- {"printable", 538968066, "PrintableString"},
- {"ub-domain-name-length", 524298, "0"},
- {"NetworkAddress", 1073741826, "X121Address"},
- {"X121Address", 1612709890, "NumericString"},
- {"ub-x121-address-length", 524298, "1"},
- {"TerminalIdentifier", 1612709890, "PrintableString"},
- {"ub-terminal-id-length", 524298, "1"},
- {"PrivateDomainName", 1610612754, 0},
- {"numeric", 1612709890, "NumericString"},
- {"ub-domain-name-length", 524298, "1"},
- {"printable", 538968066, "PrintableString"},
- {"ub-domain-name-length", 524298, "1"},
- {"OrganizationName", 1612709890, "PrintableString"},
- {"ub-organization-name-length", 524298, "1"},
- {"NumericUserIdentifier", 1612709890, "NumericString"},
- {"ub-numeric-user-id-length", 524298, "1"},
- {"PersonalName", 1610612750, 0},
- {"surname", 1814044674, "PrintableString"},
- {0, 1073745928, "0"},
- {"ub-surname-length", 524298, "1"},
- {"given-name", 1814061058, "PrintableString"},
- {0, 1073745928, "1"},
- {"ub-given-name-length", 524298, "1"},
- {"initials", 1814061058, "PrintableString"},
- {0, 1073745928, "2"},
- {"ub-initials-length", 524298, "1"},
- {"generation-qualifier", 740319234, "PrintableString"},
- {0, 1073745928, "3"},
- {"ub-generation-qualifier-length", 524298, "1"},
- {"OrganizationalUnitNames", 1612709899, 0},
- {"ub-organizational-units", 1074266122, "1"},
- {0, 2, "OrganizationalUnitName"},
- {"OrganizationalUnitName", 1612709890, "PrintableString"},
- {"ub-organizational-unit-name-length", 524298, "1"},
- {"BuiltInDomainDefinedAttributes", 1612709899, 0},
- {"ub-domain-defined-attributes", 1074266122, "1"},
- {0, 2, "BuiltInDomainDefinedAttribute"},
- {"BuiltInDomainDefinedAttribute", 1610612741, 0},
- {"type", 1612709890, "PrintableString"},
- {"ub-domain-defined-attribute-type-length", 524298, "1"},
- {"value", 538968066, "PrintableString"},
- {"ub-domain-defined-attribute-value-length", 524298, "1"},
- {"ExtensionAttributes", 1612709903, 0},
- {"ub-extension-attributes", 1074266122, "1"},
- {0, 2, "ExtensionAttribute"},
- {"ExtensionAttribute", 1610612741, 0},
- {"extension-attribute-type", 1611145219, 0},
- {0, 1073743880, "0"},
- {"0", 10, "ub-extension-attributes"},
- {"extension-attribute-value", 541073421, 0},
- {0, 1073743880, "1"},
- {"extension-attribute-type", 1, 0},
- {"common-name", 1342177283, "1"},
- {"CommonName", 1612709890, "PrintableString"},
- {"ub-common-name-length", 524298, "1"},
- {"teletex-common-name", 1342177283, "2"},
- {"TeletexCommonName", 1612709890, "TeletexString"},
- {"ub-common-name-length", 524298, "1"},
- {"teletex-organization-name", 1342177283, "3"},
- {"TeletexOrganizationName", 1612709890, "TeletexString"},
- {"ub-organization-name-length", 524298, "1"},
- {"teletex-personal-name", 1342177283, "4"},
- {"TeletexPersonalName", 1610612750, 0},
- {"surname", 1814044674, "TeletexString"},
- {0, 1073743880, "0"},
- {"ub-surname-length", 524298, "1"},
- {"given-name", 1814061058, "TeletexString"},
- {0, 1073743880, "1"},
- {"ub-given-name-length", 524298, "1"},
- {"initials", 1814061058, "TeletexString"},
- {0, 1073743880, "2"},
- {"ub-initials-length", 524298, "1"},
- {"generation-qualifier", 740319234, "TeletexString"},
- {0, 1073743880, "3"},
- {"ub-generation-qualifier-length", 524298, "1"},
- {"teletex-organizational-unit-names", 1342177283, "5"},
- {"TeletexOrganizationalUnitNames", 1612709899, 0},
- {"ub-organizational-units", 1074266122, "1"},
- {0, 2, "TeletexOrganizationalUnitName"},
- {"TeletexOrganizationalUnitName", 1612709890, "TeletexString"},
- {"ub-organizational-unit-name-length", 524298, "1"},
- {"pds-name", 1342177283, "7"},
- {"PDSName", 1612709890, "PrintableString"},
- {"ub-pds-name-length", 524298, "1"},
- {"physical-delivery-country-name", 1342177283, "8"},
- {"PhysicalDeliveryCountryName", 1610612754, 0},
- {"x121-dcc-code", 1612709890, "NumericString"},
- {0, 1048586, "ub-country-name-numeric-length"},
- {"iso-3166-alpha2-code", 538968066, "PrintableString"},
- {0, 1048586, "ub-country-name-alpha-length"},
- {"postal-code", 1342177283, "9"},
- {"PostalCode", 1610612754, 0},
- {"numeric-code", 1612709890, "NumericString"},
- {"ub-postal-code-length", 524298, "1"},
- {"printable-code", 538968066, "PrintableString"},
- {"ub-postal-code-length", 524298, "1"},
- {"physical-delivery-office-name", 1342177283, "10"},
- {"PhysicalDeliveryOfficeName", 1073741826, "PDSParameter"},
- {"physical-delivery-office-number", 1342177283, "11"},
- {"PhysicalDeliveryOfficeNumber", 1073741826, "PDSParameter"},
- {"extension-OR-address-components", 1342177283, "12"},
- {"ExtensionORAddressComponents", 1073741826, "PDSParameter"},
- {"physical-delivery-personal-name", 1342177283, "13"},
- {"PhysicalDeliveryPersonalName", 1073741826, "PDSParameter"},
- {"physical-delivery-organization-name", 1342177283, "14"},
- {"PhysicalDeliveryOrganizationName", 1073741826, "PDSParameter"},
- {"extension-physical-delivery-address-components", 1342177283, "15"},
- {"ExtensionPhysicalDeliveryAddressComponents", 1073741826, "PDSParameter"},
- {"unformatted-postal-address", 1342177283, "16"},
- {"UnformattedPostalAddress", 1610612750, 0},
- {"printable-address", 1814052875, 0},
- {"ub-pds-physical-address-lines", 1074266122, "1"},
- {0, 538968066, "PrintableString"},
- {"ub-pds-parameter-length", 524298, "1"},
- {"teletex-string", 740311042, "TeletexString"},
- {"ub-unformatted-address-length", 524298, "1"},
- {"street-address", 1342177283, "17"},
- {"StreetAddress", 1073741826, "PDSParameter"},
- {"post-office-box-address", 1342177283, "18"},
- {"PostOfficeBoxAddress", 1073741826, "PDSParameter"},
- {"poste-restante-address", 1342177283, "19"},
- {"PosteRestanteAddress", 1073741826, "PDSParameter"},
- {"unique-postal-name", 1342177283, "20"},
- {"UniquePostalName", 1073741826, "PDSParameter"},
- {"local-postal-attributes", 1342177283, "21"},
- {"LocalPostalAttributes", 1073741826, "PDSParameter"},
- {"PDSParameter", 1610612750, 0},
- {"printable-string", 1814052866, "PrintableString"},
- {"ub-pds-parameter-length", 524298, "1"},
- {"teletex-string", 740311042, "TeletexString"},
- {"ub-pds-parameter-length", 524298, "1"},
- {"extended-network-address", 1342177283, "22"},
- {"ExtendedNetworkAddress", 1610612754, 0},
- {"e163-4-address", 1610612741, 0},
- {"number", 1612718082, "NumericString"},
- {0, 1073743880, "0"},
- {"ub-e163-4-number-length", 524298, "1"},
- {"sub-address", 538992642, "NumericString"},
- {0, 1073743880, "1"},
- {"ub-e163-4-sub-address-length", 524298, "1"},
- {"psap-address", 536879106, "PresentationAddress"},
- {0, 2056, "0"},
- {"PresentationAddress", 1610612741, 0},
- {"pSelector", 1610637319, 0},
- {0, 2056, "0"},
- {"sSelector", 1610637319, 0},
- {0, 2056, "1"},
- {"tSelector", 1610637319, 0},
- {0, 2056, "2"},
- {"nAddresses", 538976271, 0},
- {0, 1073743880, "3"},
- {"MAX", 1074266122, "1"},
- {0, 7, 0},
- {"terminal-type", 1342177283, "23"},
- {"TerminalType", 1610874883, 0},
- {"telex", 1073741825, "3"},
- {"teletex", 1073741825, "4"},
- {"g3-facsimile", 1073741825, "5"},
- {"g4-facsimile", 1073741825, "6"},
- {"ia5-terminal", 1073741825, "7"},
- {"videotex", 1, "8"},
- {"teletex-domain-defined-attributes", 1342177283, "6"},
- {"TeletexDomainDefinedAttributes", 1612709899, 0},
- {"ub-domain-defined-attributes", 1074266122, "1"},
- {0, 2, "TeletexDomainDefinedAttribute"},
- {"TeletexDomainDefinedAttribute", 1610612741, 0},
- {"type", 1612709890, "TeletexString"},
- {"ub-domain-defined-attribute-type-length", 524298, "1"},
- {"value", 538968066, "TeletexString"},
- {"ub-domain-defined-attribute-value-length", 524298, "1"},
- {"ub-name", 1342177283, "32768"},
- {"ub-common-name", 1342177283, "64"},
- {"ub-locality-name", 1342177283, "128"},
- {"ub-state-name", 1342177283, "128"},
- {"ub-organization-name", 1342177283, "64"},
- {"ub-organizational-unit-name", 1342177283, "64"},
- {"ub-title", 1342177283, "64"},
- {"ub-match", 1342177283, "128"},
- {"ub-emailaddress-length", 1342177283, "128"},
- {"ub-common-name-length", 1342177283, "64"},
- {"ub-country-name-alpha-length", 1342177283, "2"},
- {"ub-country-name-numeric-length", 1342177283, "3"},
- {"ub-domain-defined-attributes", 1342177283, "4"},
- {"ub-domain-defined-attribute-type-length", 1342177283, "8"},
- {"ub-domain-defined-attribute-value-length", 1342177283, "128"},
- {"ub-domain-name-length", 1342177283, "16"},
- {"ub-extension-attributes", 1342177283, "256"},
- {"ub-e163-4-number-length", 1342177283, "15"},
- {"ub-e163-4-sub-address-length", 1342177283, "40"},
- {"ub-generation-qualifier-length", 1342177283, "3"},
- {"ub-given-name-length", 1342177283, "16"},
- {"ub-initials-length", 1342177283, "5"},
- {"ub-integer-options", 1342177283, "256"},
- {"ub-numeric-user-id-length", 1342177283, "32"},
- {"ub-organization-name-length", 1342177283, "64"},
- {"ub-organizational-unit-name-length", 1342177283, "32"},
- {"ub-organizational-units", 1342177283, "4"},
- {"ub-pds-name-length", 1342177283, "16"},
- {"ub-pds-parameter-length", 1342177283, "30"},
- {"ub-pds-physical-address-lines", 1342177283, "6"},
- {"ub-postal-code-length", 1342177283, "16"},
- {"ub-surname-length", 1342177283, "40"},
- {"ub-terminal-id-length", 1342177283, "24"},
- {"ub-unformatted-address-length", 1342177283, "180"},
- {"ub-x121-address-length", 1342177283, "16"},
- {"pkcs-7-ContentInfo", 1610612741, 0},
- {"contentType", 1073741826, "pkcs-7-ContentType"},
- {"content", 541073421, 0},
- {0, 1073743880, "0"},
- {"contentType", 1, 0},
- {"pkcs-7-DigestInfo", 1610612741, 0},
- {"digestAlgorithm", 1073741826, "pkcs-7-DigestAlgorithmIdentifier"},
- {"digest", 2, "pkcs-7-Digest"},
- {"pkcs-7-Digest", 1073741831, 0},
- {"pkcs-7-ContentType", 1073741836, 0},
- {"pkcs-7-SignedData", 1610612741, 0},
- {"version", 1073741826, "pkcs-7-CMSVersion"},
- {"digestAlgorithms", 1073741826, "pkcs-7-DigestAlgorithmIdentifiers"},
- {"encapContentInfo", 1073741826, "pkcs-7-EncapsulatedContentInfo"},
- {"certificates", 1610637314, "pkcs-7-CertificateSet"},
- {0, 4104, "0"},
- {"crls", 1610637314, "pkcs-7-CertificateRevocationLists"},
- {0, 4104, "1"},
- {"signerInfos", 2, "pkcs-7-SignerInfos"},
- {"pkcs-7-CMSVersion", 1610874883, 0},
- {"v0", 1073741825, "0"},
- {"v1", 1073741825, "1"},
- {"v2", 1073741825, "2"},
- {"v3", 1073741825, "3"},
- {"v4", 1, "4"},
- {"pkcs-7-DigestAlgorithmIdentifiers", 1610612751, 0},
- {0, 2, "pkcs-7-DigestAlgorithmIdentifier"},
- {"pkcs-7-DigestAlgorithmIdentifier", 1073741826, "AlgorithmIdentifier"},
- {"pkcs-7-EncapsulatedContentInfo", 1610612741, 0},
- {"eContentType", 1073741826, "pkcs-7-ContentType"},
- {"eContent", 536895495, 0},
- {0, 2056, "0"},
- {"pkcs-7-CertificateRevocationLists", 1610612751, 0},
- {0, 13, 0},
- {"pkcs-7-CertificateChoices", 1610612754, 0},
- {"certificate", 13, 0},
- {"pkcs-7-CertificateSet", 1610612751, 0},
- {0, 2, "pkcs-7-CertificateChoices"},
- {"pkcs-7-SignerInfos", 1610612751, 0},
- {0, 13, 0},
- {"pkcs-10-CertificationRequestInfo", 1610612741, 0},
- {"version", 1610874883, 0},
- {"v1", 1, "0"},
- {"subject", 1073741826, "Name"},
- {"subjectPKInfo", 1073741826, "SubjectPublicKeyInfo"},
- {"attributes", 536879106, "Attributes"},
- {0, 4104, "0"},
- {"Attributes", 1610612751, 0},
- {0, 2, "Attribute"},
- {"pkcs-10-CertificationRequest", 1610612741, 0},
- {"certificationRequestInfo", 1073741826, "pkcs-10-CertificationRequestInfo"},
- {"signatureAlgorithm", 1073741826, "AlgorithmIdentifier"},
- {"signature", 6, 0},
- {"pkcs-9-ub-challengePassword", 1342177283, "255"},
- {"pkcs-9-certTypes", 1879048204, 0},
- {0, 1073741825, "pkcs-9"},
- {0, 1, "22"},
- {"pkcs-9-crlTypes", 1879048204, 0},
- {0, 1073741825, "pkcs-9"},
- {0, 1, "23"},
- {"pkcs-9-at-challengePassword", 1879048204, 0},
- {0, 1073741825, "pkcs-9"},
- {0, 1, "7"},
- {"pkcs-9-challengePassword", 1610612754, 0},
- {"printableString", 1612709890, "PrintableString"},
- {"pkcs-9-ub-challengePassword", 524298, "1"},
- {"utf8String", 538968066, "UTF8String"},
- {"pkcs-9-ub-challengePassword", 524298, "1"},
- {"pkcs-9-at-localKeyId", 1879048204, 0},
- {0, 1073741825, "pkcs-9"},
- {0, 1, "21"},
- {"pkcs-9-localKeyId", 1073741831, 0},
- {"pkcs-9-at-friendlyName", 1879048204, 0},
- {0, 1073741825, "pkcs-9"},
- {0, 1, "20"},
- {"pkcs-9-friendlyName", 1612709890, "BMPString"},
- {"255", 524298, "1"},
- {"pkcs-8-PrivateKeyInfo", 1610612741, 0},
- {"version", 1073741826, "pkcs-8-Version"},
- {"privateKeyAlgorithm", 1073741826, "AlgorithmIdentifier"},
- {"privateKey", 1073741826, "pkcs-8-PrivateKey"},
- {"attributes", 536895490, "Attributes"},
- {0, 4104, "0"},
- {"pkcs-8-Version", 1610874883, 0},
- {"v1", 1, "0"},
- {"pkcs-8-PrivateKey", 1073741831, 0},
- {"pkcs-8-Attributes", 1610612751, 0},
- {0, 2, "Attribute"},
- {"pkcs-8-EncryptedPrivateKeyInfo", 1610612741, 0},
- {"encryptionAlgorithm", 1073741826, "AlgorithmIdentifier"},
- {"encryptedData", 2, "pkcs-8-EncryptedData"},
- {"pkcs-8-EncryptedData", 1073741831, 0},
- {"pkcs-5", 1879048204, 0},
- {0, 1073741825, "pkcs"},
- {0, 1, "5"},
- {"pkcs-5-encryptionAlgorithm", 1879048204, 0},
- {"iso", 1073741825, "1"},
- {"member-body", 1073741825, "2"},
- {"us", 1073741825, "840"},
- {"rsadsi", 1073741825, "113549"},
- {0, 1, "3"},
- {"pkcs-5-des-EDE3-CBC", 1879048204, 0},
- {0, 1073741825, "pkcs-5-encryptionAlgorithm"},
- {0, 1, "7"},
- {"pkcs-5-des-EDE3-CBC-params", 1612709895, 0},
- {0, 1048586, "8"},
- {"pkcs-5-id-PBES2", 1879048204, 0},
- {0, 1073741825, "pkcs-5"},
- {0, 1, "13"},
- {"pkcs-5-PBES2-params", 1610612741, 0},
- {"keyDerivationFunc", 1073741826, "AlgorithmIdentifier"},
- {"encryptionScheme", 2, "AlgorithmIdentifier"},
- {"pkcs-5-id-PBKDF2", 1879048204, 0},
- {0, 1073741825, "pkcs-5"},
- {0, 1, "12"},
- {"pkcs-5-PBKDF2-params", 1610612741, 0},
- {"salt", 1610612754, 0},
- {"specified", 1073741831, 0},
- {"otherSource", 2, "AlgorithmIdentifier"},
- {"iterationCount", 1611137027, 0},
- {"1", 10, "MAX"},
- {"keyLength", 1611153411, 0},
- {"1", 10, "MAX"},
- {"prf", 16386, "AlgorithmIdentifier"},
- {"pkcs-12", 1879048204, 0},
- {0, 1073741825, "pkcs"},
- {0, 1, "12"},
- {"pkcs-12-PFX", 1610612741, 0},
- {"version", 1610874883, 0},
- {"v3", 1, "3"},
- {"authSafe", 1073741826, "pkcs-7-ContentInfo"},
- {"macData", 16386, "pkcs-12-MacData"},
- {"pkcs-12-PbeParams", 1610612741, 0},
- {"salt", 1073741831, 0},
- {"iterations", 3, 0},
- {"pkcs-12-MacData", 1610612741, 0},
- {"mac", 1073741826, "pkcs-7-DigestInfo"},
- {"macSalt", 1073741831, 0},
- {"iterations", 536903683, 0},
- {0, 9, "1"},
- {"pkcs-12-AuthenticatedSafe", 1610612747, 0},
- {0, 2, "pkcs-7-ContentInfo"},
- {"pkcs-12-SafeContents", 1610612747, 0},
- {0, 2, "pkcs-12-SafeBag"},
- {"pkcs-12-SafeBag", 1610612741, 0},
- {"bagId", 1073741836, 0},
- {"bagValue", 1614815245, 0},
- {0, 1073743880, "0"},
- {"badId", 1, 0},
- {"bagAttributes", 536887311, 0},
- {0, 2, "pkcs-12-PKCS12Attribute"},
- {"pkcs-12-bagtypes", 1879048204, 0},
- {0, 1073741825, "pkcs-12"},
- {0, 1073741825, "10"},
- {0, 1, "1"},
- {"pkcs-12-keyBag", 1879048204, 0},
- {0, 1073741825, "pkcs-12-bagtypes"},
- {0, 1, "1"},
- {"pkcs-12-pkcs8ShroudedKeyBag", 1879048204, 0},
- {0, 1073741825, "pkcs-12-bagtypes"},
- {0, 1, "2"},
- {"pkcs-12-certBag", 1879048204, 0},
- {0, 1073741825, "pkcs-12-bagtypes"},
- {0, 1, "3"},
- {"pkcs-12-crlBag", 1879048204, 0},
- {0, 1073741825, "pkcs-12-bagtypes"},
- {0, 1, "4"},
- {"pkcs-12-KeyBag", 1073741826, "pkcs-8-PrivateKeyInfo"},
- {"pkcs-12-PKCS8ShroudedKeyBag", 1073741826, "pkcs-8-EncryptedPrivateKeyInfo"},
- {"pkcs-12-CertBag", 1610612741, 0},
- {"certId", 1073741836, 0},
- {"certValue", 541073421, 0},
- {0, 1073743880, "0"},
- {"certId", 1, 0},
- {"pkcs-12-CRLBag", 1610612741, 0},
- {"crlId", 1073741836, 0},
- {"crlValue", 541073421, 0},
- {0, 1073743880, "0"},
- {"crlId", 1, 0},
- {"pkcs-12-PKCS12Attribute", 1073741826, "Attribute"},
- {"pkcs-7-data", 1879048204, 0},
- {"iso", 1073741825, "1"},
- {"member-body", 1073741825, "2"},
- {"us", 1073741825, "840"},
- {"rsadsi", 1073741825, "113549"},
- {"pkcs", 1073741825, "1"},
- {"pkcs7", 1073741825, "7"},
- {0, 1, "1"},
- {"pkcs-7-encryptedData", 1879048204, 0},
- {"iso", 1073741825, "1"},
- {"member-body", 1073741825, "2"},
- {"us", 1073741825, "840"},
- {"rsadsi", 1073741825, "113549"},
- {"pkcs", 1073741825, "1"},
- {"pkcs7", 1073741825, "7"},
- {0, 1, "6"},
- {"pkcs-7-Data", 1073741831, 0},
- {"pkcs-7-EncryptedData", 1610612741, 0},
- {"version", 1073741826, "pkcs-7-CMSVersion"},
- {"encryptedContentInfo", 1073741826, "pkcs-7-EncryptedContentInfo"},
- {"unprotectedAttrs", 536895490, "pkcs-7-UnprotectedAttributes"},
- {0, 4104, "1"},
- {"pkcs-7-EncryptedContentInfo", 1610612741, 0},
- {"contentType", 1073741826, "pkcs-7-ContentType"},
- {"contentEncryptionAlgorithm", 1073741826,
- "pkcs-7-ContentEncryptionAlgorithmIdentifier"},
- {"encryptedContent", 536895490, "pkcs-7-EncryptedContent"},
- {0, 4104, "0"},
- {"pkcs-7-ContentEncryptionAlgorithmIdentifier", 1073741826,
- "AlgorithmIdentifier"},
- {"pkcs-7-EncryptedContent", 1073741831, 0},
- {"pkcs-7-UnprotectedAttributes", 1612709903, 0},
- {"MAX", 1074266122, "1"},
- {0, 2, "Attribute"},
- {"id-at-ldap-DC", 1880096780, "AttributeType"},
- {0, 1073741825, "0"},
- {0, 1073741825, "9"},
- {0, 1073741825, "2342"},
- {0, 1073741825, "19200300"},
- {0, 1073741825, "100"},
- {0, 1073741825, "1"},
- {0, 1, "25"},
- {"ldap-DC", 1073741826, "IA5String"},
- {"id-at-ldap-UID", 1880096780, "AttributeType"},
- {0, 1073741825, "0"},
- {0, 1073741825, "9"},
- {0, 1073741825, "2342"},
- {0, 1073741825, "19200300"},
- {0, 1073741825, "100"},
- {0, 1073741825, "1"},
- {0, 1, "1"},
- {"ldap-UID", 1073741826, "DirectoryString"},
- {"id-pda", 1879048204, 0},
- {0, 1073741825, "id-pkix"},
- {0, 1, "9"},
- {"id-pda-dateOfBirth", 1880096780, "AttributeType"},
- {0, 1073741825, "id-pda"},
- {0, 1, "1"},
- {"DateOfBirth", 1082130449, 0},
- {"id-pda-placeOfBirth", 1880096780, "AttributeType"},
- {0, 1073741825, "id-pda"},
- {0, 1, "2"},
- {"PlaceOfBirth", 1073741826, "DirectoryString"},
- {"id-pda-gender", 1880096780, "AttributeType"},
- {0, 1073741825, "id-pda"},
- {0, 1, "3"},
- {"Gender", 1612709890, "PrintableString"},
- {0, 1048586, "1"},
- {"id-pda-countryOfCitizenship", 1880096780, "AttributeType"},
- {0, 1073741825, "id-pda"},
- {0, 1, "4"},
- {"CountryOfCitizenship", 1612709890, "PrintableString"},
- {0, 1048586, "2"},
- {"id-pda-countryOfResidence", 1880096780, "AttributeType"},
- {0, 1073741825, "id-pda"},
- {0, 1, "5"},
- {"CountryOfResidence", 538968066, "PrintableString"},
- {0, 1048586, "2"},
- {0, 0, 0}
-};
diff --git a/tests/unit/pkix_asn1_tab.c.inc b/tests/unit/pkix_asn1_tab.c.inc
new file mode 100644
index 0000000..fe29c41
--- /dev/null
+++ b/tests/unit/pkix_asn1_tab.c.inc
@@ -0,0 +1,1102 @@
+/*
+ * This file is taken from gnutls 1.6.3 under the GPLv2+
+ * and is under copyright of various GNUTLS contributors.
+ */
+
+static const asn1_static_node pkix_asn1_tab[] = {
+ {"PKIX1", 536875024, 0},
+ {0, 1073741836, 0},
+ {"id-ce", 1879048204, 0},
+ {"joint-iso-ccitt", 1073741825, "2"},
+ {"ds", 1073741825, "5"},
+ {0, 1, "29"},
+ {"id-ce-authorityKeyIdentifier", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "35"},
+ {"AuthorityKeyIdentifier", 1610612741, 0},
+ {"keyIdentifier", 1610637314, "KeyIdentifier"},
+ {0, 4104, "0"},
+ {"authorityCertIssuer", 1610637314, "GeneralNames"},
+ {0, 4104, "1"},
+ {"authorityCertSerialNumber", 536895490, "CertificateSerialNumber"},
+ {0, 4104, "2"},
+ {"KeyIdentifier", 1073741831, 0},
+ {"id-ce-subjectKeyIdentifier", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "14"},
+ {"SubjectKeyIdentifier", 1073741826, "KeyIdentifier"},
+ {"id-ce-keyUsage", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "15"},
+ {"KeyUsage", 1610874886, 0},
+ {"digitalSignature", 1073741825, "0"},
+ {"nonRepudiation", 1073741825, "1"},
+ {"keyEncipherment", 1073741825, "2"},
+ {"dataEncipherment", 1073741825, "3"},
+ {"keyAgreement", 1073741825, "4"},
+ {"keyCertSign", 1073741825, "5"},
+ {"cRLSign", 1073741825, "6"},
+ {"encipherOnly", 1073741825, "7"},
+ {"decipherOnly", 1, "8"},
+ {"id-ce-privateKeyUsagePeriod", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "16"},
+ {"PrivateKeyUsagePeriod", 1610612741, 0},
+ {"notBefore", 1619025937, 0},
+ {0, 4104, "0"},
+ {"notAfter", 545284113, 0},
+ {0, 4104, "1"},
+ {"id-ce-certificatePolicies", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "32"},
+ {"CertificatePolicies", 1612709899, 0},
+ {"MAX", 1074266122, "1"},
+ {0, 2, "PolicyInformation"},
+ {"PolicyInformation", 1610612741, 0},
+ {"policyIdentifier", 1073741826, "CertPolicyId"},
+ {"policyQualifiers", 538984459, 0},
+ {"MAX", 1074266122, "1"},
+ {0, 2, "PolicyQualifierInfo"},
+ {"CertPolicyId", 1073741836, 0},
+ {"PolicyQualifierInfo", 1610612741, 0},
+ {"policyQualifierId", 1073741826, "PolicyQualifierId"},
+ {"qualifier", 541065229, 0},
+ {"policyQualifierId", 1, 0},
+ {"PolicyQualifierId", 1073741836, 0},
+ {"CPSuri", 1073741826, "IA5String"},
+ {"UserNotice", 1610612741, 0},
+ {"noticeRef", 1073758210, "NoticeReference"},
+ {"explicitText", 16386, "DisplayText"},
+ {"NoticeReference", 1610612741, 0},
+ {"organization", 1073741826, "DisplayText"},
+ {"noticeNumbers", 536870923, 0},
+ {0, 3, 0},
+ {"DisplayText", 1610612754, 0},
+ {"visibleString", 1612709890, "VisibleString"},
+ {"200", 524298, "1"},
+ {"bmpString", 1612709890, "BMPString"},
+ {"200", 524298, "1"},
+ {"utf8String", 538968066, "UTF8String"},
+ {"200", 524298, "1"},
+ {"id-ce-policyMappings", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "33"},
+ {"PolicyMappings", 1612709899, 0},
+ {"MAX", 1074266122, "1"},
+ {0, 536870917, 0},
+ {"issuerDomainPolicy", 1073741826, "CertPolicyId"},
+ {"subjectDomainPolicy", 2, "CertPolicyId"},
+ {"DirectoryString", 1610612754, 0},
+ {"teletexString", 1612709890, "TeletexString"},
+ {"MAX", 524298, "1"},
+ {"printableString", 1612709890, "PrintableString"},
+ {"MAX", 524298, "1"},
+ {"universalString", 1612709890, "UniversalString"},
+ {"MAX", 524298, "1"},
+ {"utf8String", 1612709890, "UTF8String"},
+ {"MAX", 524298, "1"},
+ {"bmpString", 1612709890, "BMPString"},
+ {"MAX", 524298, "1"},
+ {"ia5String", 538968066, "IA5String"},
+ {"MAX", 524298, "1"},
+ {"id-ce-subjectAltName", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "17"},
+ {"SubjectAltName", 1073741826, "GeneralNames"},
+ {"GeneralNames", 1612709899, 0},
+ {"MAX", 1074266122, "1"},
+ {0, 2, "GeneralName"},
+ {"GeneralName", 1610612754, 0},
+ {"otherName", 1610620930, "AnotherName"},
+ {0, 4104, "0"},
+ {"rfc822Name", 1610620930, "IA5String"},
+ {0, 4104, "1"},
+ {"dNSName", 1610620930, "IA5String"},
+ {0, 4104, "2"},
+ {"x400Address", 1610620930, "ORAddress"},
+ {0, 4104, "3"},
+ {"directoryName", 1610620930, "RDNSequence"},
+ {0, 2056, "4"},
+ {"ediPartyName", 1610620930, "EDIPartyName"},
+ {0, 4104, "5"},
+ {"uniformResourceIdentifier", 1610620930, "IA5String"},
+ {0, 4104, "6"},
+ {"iPAddress", 1610620935, 0},
+ {0, 4104, "7"},
+ {"registeredID", 536879116, 0},
+ {0, 4104, "8"},
+ {"AnotherName", 1610612741, 0},
+ {"type-id", 1073741836, 0},
+ {"value", 541073421, 0},
+ {0, 1073743880, "0"},
+ {"type-id", 1, 0},
+ {"EDIPartyName", 1610612741, 0},
+ {"nameAssigner", 1610637314, "DirectoryString"},
+ {0, 4104, "0"},
+ {"partyName", 536879106, "DirectoryString"},
+ {0, 4104, "1"},
+ {"id-ce-issuerAltName", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "18"},
+ {"IssuerAltName", 1073741826, "GeneralNames"},
+ {"id-ce-subjectDirectoryAttributes", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "9"},
+ {"SubjectDirectoryAttributes", 1612709899, 0},
+ {"MAX", 1074266122, "1"},
+ {0, 2, "Attribute"},
+ {"id-ce-basicConstraints", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "19"},
+ {"BasicConstraints", 1610612741, 0},
+ {"cA", 1610645508, 0},
+ {0, 131081, 0},
+ {"pathLenConstraint", 537411587, 0},
+ {"0", 10, "MAX"},
+ {"id-ce-nameConstraints", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "30"},
+ {"NameConstraints", 1610612741, 0},
+ {"permittedSubtrees", 1610637314, "GeneralSubtrees"},
+ {0, 4104, "0"},
+ {"excludedSubtrees", 536895490, "GeneralSubtrees"},
+ {0, 4104, "1"},
+ {"GeneralSubtrees", 1612709899, 0},
+ {"MAX", 1074266122, "1"},
+ {0, 2, "GeneralSubtree"},
+ {"GeneralSubtree", 1610612741, 0},
+ {"base", 1073741826, "GeneralName"},
+ {"minimum", 1610653698, "BaseDistance"},
+ {0, 1073741833, "0"},
+ {0, 4104, "0"},
+ {"maximum", 536895490, "BaseDistance"},
+ {0, 4104, "1"},
+ {"BaseDistance", 1611137027, 0},
+ {"0", 10, "MAX"},
+ {"id-ce-policyConstraints", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "36"},
+ {"PolicyConstraints", 1610612741, 0},
+ {"requireExplicitPolicy", 1610637314, "SkipCerts"},
+ {0, 4104, "0"},
+ {"inhibitPolicyMapping", 536895490, "SkipCerts"},
+ {0, 4104, "1"},
+ {"SkipCerts", 1611137027, 0},
+ {"0", 10, "MAX"},
+ {"id-ce-cRLDistributionPoints", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "31"},
+ {"CRLDistributionPoints", 1612709899, 0},
+ {"MAX", 1074266122, "1"},
+ {0, 2, "DistributionPoint"},
+ {"DistributionPoint", 1610612741, 0},
+ {"distributionPoint", 1610637314, "DistributionPointName"},
+ {0, 2056, "0"},
+ {"reasons", 1610637314, "ReasonFlags"},
+ {0, 4104, "1"},
+ {"cRLIssuer", 536895490, "GeneralNames"},
+ {0, 4104, "2"},
+ {"DistributionPointName", 1610612754, 0},
+ {"fullName", 1610620930, "GeneralNames"},
+ {0, 4104, "0"},
+ {"nameRelativeToCRLIssuer", 536879106, "RelativeDistinguishedName"},
+ {0, 4104, "1"},
+ {"ReasonFlags", 1610874886, 0},
+ {"unused", 1073741825, "0"},
+ {"keyCompromise", 1073741825, "1"},
+ {"cACompromise", 1073741825, "2"},
+ {"affiliationChanged", 1073741825, "3"},
+ {"superseded", 1073741825, "4"},
+ {"cessationOfOperation", 1073741825, "5"},
+ {"certificateHold", 1073741825, "6"},
+ {"privilegeWithdrawn", 1073741825, "7"},
+ {"aACompromise", 1, "8"},
+ {"id-ce-extKeyUsage", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "37"},
+ {"ExtKeyUsageSyntax", 1612709899, 0},
+ {"MAX", 1074266122, "1"},
+ {0, 2, "KeyPurposeId"},
+ {"KeyPurposeId", 1073741836, 0},
+ {"id-kp-serverAuth", 1879048204, 0},
+ {0, 1073741825, "id-kp"},
+ {0, 1, "1"},
+ {"id-kp-clientAuth", 1879048204, 0},
+ {0, 1073741825, "id-kp"},
+ {0, 1, "2"},
+ {"id-kp-codeSigning", 1879048204, 0},
+ {0, 1073741825, "id-kp"},
+ {0, 1, "3"},
+ {"id-kp-emailProtection", 1879048204, 0},
+ {0, 1073741825, "id-kp"},
+ {0, 1, "4"},
+ {"id-kp-ipsecEndSystem", 1879048204, 0},
+ {0, 1073741825, "id-kp"},
+ {0, 1, "5"},
+ {"id-kp-ipsecTunnel", 1879048204, 0},
+ {0, 1073741825, "id-kp"},
+ {0, 1, "6"},
+ {"id-kp-ipsecUser", 1879048204, 0},
+ {0, 1073741825, "id-kp"},
+ {0, 1, "7"},
+ {"id-kp-timeStamping", 1879048204, 0},
+ {0, 1073741825, "id-kp"},
+ {0, 1, "8"},
+ {"id-pe-authorityInfoAccess", 1879048204, 0},
+ {0, 1073741825, "id-pe"},
+ {0, 1, "1"},
+ {"AuthorityInfoAccessSyntax", 1612709899, 0},
+ {"MAX", 1074266122, "1"},
+ {0, 2, "AccessDescription"},
+ {"AccessDescription", 1610612741, 0},
+ {"accessMethod", 1073741836, 0},
+ {"accessLocation", 2, "GeneralName"},
+ {"id-ce-cRLNumber", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "20"},
+ {"CRLNumber", 1611137027, 0},
+ {"0", 10, "MAX"},
+ {"id-ce-issuingDistributionPoint", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "28"},
+ {"IssuingDistributionPoint", 1610612741, 0},
+ {"distributionPoint", 1610637314, "DistributionPointName"},
+ {0, 4104, "0"},
+ {"onlyContainsUserCerts", 1610653700, 0},
+ {0, 1073872905, 0},
+ {0, 4104, "1"},
+ {"onlyContainsCACerts", 1610653700, 0},
+ {0, 1073872905, 0},
+ {0, 4104, "2"},
+ {"onlySomeReasons", 1610637314, "ReasonFlags"},
+ {0, 4104, "3"},
+ {"indirectCRL", 536911876, 0},
+ {0, 1073872905, 0},
+ {0, 4104, "4"},
+ {"id-ce-deltaCRLIndicator", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "27"},
+ {"BaseCRLNumber", 1073741826, "CRLNumber"},
+ {"id-ce-cRLReasons", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "21"},
+ {"CRLReason", 1610874901, 0},
+ {"unspecified", 1073741825, "0"},
+ {"keyCompromise", 1073741825, "1"},
+ {"cACompromise", 1073741825, "2"},
+ {"affiliationChanged", 1073741825, "3"},
+ {"superseded", 1073741825, "4"},
+ {"cessationOfOperation", 1073741825, "5"},
+ {"certificateHold", 1073741825, "6"},
+ {"removeFromCRL", 1, "8"},
+ {"id-ce-certificateIssuer", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "29"},
+ {"CertificateIssuer", 1073741826, "GeneralNames"},
+ {"id-ce-holdInstructionCode", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "23"},
+ {"HoldInstructionCode", 1073741836, 0},
+ {"holdInstruction", 1879048204, 0},
+ {"joint-iso-itu-t", 1073741825, "2"},
+ {"member-body", 1073741825, "2"},
+ {"us", 1073741825, "840"},
+ {"x9cm", 1073741825, "10040"},
+ {0, 1, "2"},
+ {"id-holdinstruction-none", 1879048204, 0},
+ {0, 1073741825, "holdInstruction"},
+ {0, 1, "1"},
+ {"id-holdinstruction-callissuer", 1879048204, 0},
+ {0, 1073741825, "holdInstruction"},
+ {0, 1, "2"},
+ {"id-holdinstruction-reject", 1879048204, 0},
+ {0, 1073741825, "holdInstruction"},
+ {0, 1, "3"},
+ {"id-ce-invalidityDate", 1879048204, 0},
+ {0, 1073741825, "id-ce"},
+ {0, 1, "24"},
+ {"InvalidityDate", 1082130449, 0},
+ {"VisibleString", 1610620935, 0},
+ {0, 4360, "26"},
+ {"NumericString", 1610620935, 0},
+ {0, 4360, "18"},
+ {"IA5String", 1610620935, 0},
+ {0, 4360, "22"},
+ {"TeletexString", 1610620935, 0},
+ {0, 4360, "20"},
+ {"PrintableString", 1610620935, 0},
+ {0, 4360, "19"},
+ {"UniversalString", 1610620935, 0},
+ {0, 4360, "28"},
+ {"BMPString", 1610620935, 0},
+ {0, 4360, "30"},
+ {"UTF8String", 1610620935, 0},
+ {0, 4360, "12"},
+ {"id-pkix", 1879048204, 0},
+ {"iso", 1073741825, "1"},
+ {"identified-organization", 1073741825, "3"},
+ {"dod", 1073741825, "6"},
+ {"internet", 1073741825, "1"},
+ {"security", 1073741825, "5"},
+ {"mechanisms", 1073741825, "5"},
+ {"pkix", 1, "7"},
+ {"id-pe", 1879048204, 0},
+ {0, 1073741825, "id-pkix"},
+ {0, 1, "1"},
+ {"id-qt", 1879048204, 0},
+ {0, 1073741825, "id-pkix"},
+ {0, 1, "2"},
+ {"id-kp", 1879048204, 0},
+ {0, 1073741825, "id-pkix"},
+ {0, 1, "3"},
+ {"id-ad", 1879048204, 0},
+ {0, 1073741825, "id-pkix"},
+ {0, 1, "48"},
+ {"id-qt-cps", 1879048204, 0},
+ {0, 1073741825, "id-qt"},
+ {0, 1, "1"},
+ {"id-qt-unotice", 1879048204, 0},
+ {0, 1073741825, "id-qt"},
+ {0, 1, "2"},
+ {"id-ad-ocsp", 1879048204, 0},
+ {0, 1073741825, "id-ad"},
+ {0, 1, "1"},
+ {"id-ad-caIssuers", 1879048204, 0},
+ {0, 1073741825, "id-ad"},
+ {0, 1, "2"},
+ {"Attribute", 1610612741, 0},
+ {"type", 1073741826, "AttributeType"},
+ {"values", 536870927, 0},
+ {0, 2, "AttributeValue"},
+ {"AttributeType", 1073741836, 0},
+ {"AttributeValue", 1614807053, 0},
+ {"type", 1, 0},
+ {"AttributeTypeAndValue", 1610612741, 0},
+ {"type", 1073741826, "AttributeType"},
+ {"value", 2, "AttributeValue"},
+ {"id-at", 1879048204, 0},
+ {"joint-iso-ccitt", 1073741825, "2"},
+ {"ds", 1073741825, "5"},
+ {0, 1, "4"},
+ {"id-at-initials", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "43"},
+ {"X520initials", 1073741826, "DirectoryString"},
+ {"id-at-generationQualifier", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "44"},
+ {"X520generationQualifier", 1073741826, "DirectoryString"},
+ {"id-at-surname", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "4"},
+ {"X520surName", 1073741826, "DirectoryString"},
+ {"id-at-givenName", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "42"},
+ {"X520givenName", 1073741826, "DirectoryString"},
+ {"id-at-name", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "41"},
+ {"X520name", 1073741826, "DirectoryString"},
+ {"id-at-commonName", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "3"},
+ {"X520CommonName", 1073741826, "DirectoryString"},
+ {"id-at-localityName", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "7"},
+ {"X520LocalityName", 1073741826, "DirectoryString"},
+ {"id-at-stateOrProvinceName", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "8"},
+ {"X520StateOrProvinceName", 1073741826, "DirectoryString"},
+ {"id-at-organizationName", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "10"},
+ {"X520OrganizationName", 1073741826, "DirectoryString"},
+ {"id-at-organizationalUnitName", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "11"},
+ {"X520OrganizationalUnitName", 1073741826, "DirectoryString"},
+ {"id-at-title", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "12"},
+ {"X520Title", 1073741826, "DirectoryString"},
+ {"id-at-description", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "13"},
+ {"X520Description", 1073741826, "DirectoryString"},
+ {"id-at-dnQualifier", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "46"},
+ {"X520dnQualifier", 1073741826, "PrintableString"},
+ {"id-at-countryName", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "6"},
+ {"X520countryName", 1612709890, "PrintableString"},
+ {0, 1048586, "2"},
+ {"id-at-serialNumber", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "5"},
+ {"X520serialNumber", 1073741826, "PrintableString"},
+ {"id-at-telephoneNumber", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "20"},
+ {"X520telephoneNumber", 1073741826, "PrintableString"},
+ {"id-at-facsimileTelephoneNumber", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "23"},
+ {"X520facsimileTelephoneNumber", 1073741826, "PrintableString"},
+ {"id-at-pseudonym", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "65"},
+ {"X520pseudonym", 1073741826, "DirectoryString"},
+ {"id-at-name", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "41"},
+ {"X520name", 1073741826, "DirectoryString"},
+ {"id-at-streetAddress", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "9"},
+ {"X520streetAddress", 1073741826, "DirectoryString"},
+ {"id-at-postalAddress", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-at"},
+ {0, 1, "16"},
+ {"X520postalAddress", 1073741826, "PostalAddress"},
+ {"PostalAddress", 1610612747, 0},
+ {0, 2, "DirectoryString"},
+ {"pkcs", 1879048204, 0},
+ {"iso", 1073741825, "1"},
+ {"member-body", 1073741825, "2"},
+ {"us", 1073741825, "840"},
+ {"rsadsi", 1073741825, "113549"},
+ {"pkcs", 1, "1"},
+ {"pkcs-9", 1879048204, 0},
+ {0, 1073741825, "pkcs"},
+ {0, 1, "9"},
+ {"emailAddress", 1880096780, "AttributeType"},
+ {0, 1073741825, "pkcs-9"},
+ {0, 1, "1"},
+ {"Pkcs9email", 1612709890, "IA5String"},
+ {"ub-emailaddress-length", 524298, "1"},
+ {"Name", 1610612754, 0},
+ {"rdnSequence", 2, "RDNSequence"},
+ {"RDNSequence", 1610612747, 0},
+ {0, 2, "RelativeDistinguishedName"},
+ {"DistinguishedName", 1073741826, "RDNSequence"},
+ {"RelativeDistinguishedName", 1612709903, 0},
+ {"MAX", 1074266122, "1"},
+ {0, 2, "AttributeTypeAndValue"},
+ {"Certificate", 1610612741, 0},
+ {"tbsCertificate", 1073741826, "TBSCertificate"},
+ {"signatureAlgorithm", 1073741826, "AlgorithmIdentifier"},
+ {"signature", 6, 0},
+ {"TBSCertificate", 1610612741, 0},
+ {"version", 1610653698, "Version"},
+ {0, 1073741833, "v1"},
+ {0, 2056, "0"},
+ {"serialNumber", 1073741826, "CertificateSerialNumber"},
+ {"signature", 1073741826, "AlgorithmIdentifier"},
+ {"issuer", 1073741826, "Name"},
+ {"validity", 1073741826, "Validity"},
+ {"subject", 1073741826, "Name"},
+ {"subjectPublicKeyInfo", 1073741826, "SubjectPublicKeyInfo"},
+ {"issuerUniqueID", 1610637314, "UniqueIdentifier"},
+ {0, 4104, "1"},
+ {"subjectUniqueID", 1610637314, "UniqueIdentifier"},
+ {0, 4104, "2"},
+ {"extensions", 536895490, "Extensions"},
+ {0, 2056, "3"},
+ {"Version", 1610874883, 0},
+ {"v1", 1073741825, "0"},
+ {"v2", 1073741825, "1"},
+ {"v3", 1, "2"},
+ {"CertificateSerialNumber", 1073741827, 0},
+ {"Validity", 1610612741, 0},
+ {"notBefore", 1073741826, "Time"},
+ {"notAfter", 2, "Time"},
+ {"Time", 1610612754, 0},
+ {"utcTime", 1090519057, 0},
+ {"generalTime", 8388625, 0},
+ {"UniqueIdentifier", 1073741830, 0},
+ {"SubjectPublicKeyInfo", 1610612741, 0},
+ {"algorithm", 1073741826, "AlgorithmIdentifier"},
+ {"subjectPublicKey", 6, 0},
+ {"Extensions", 1612709899, 0},
+ {"MAX", 1074266122, "1"},
+ {0, 2, "Extension"},
+ {"Extension", 1610612741, 0},
+ {"extnID", 1073741836, 0},
+ {"critical", 1610645508, 0},
+ {0, 131081, 0},
+ {"extnValue", 7, 0},
+ {"CertificateList", 1610612741, 0},
+ {"tbsCertList", 1073741826, "TBSCertList"},
+ {"signatureAlgorithm", 1073741826, "AlgorithmIdentifier"},
+ {"signature", 6, 0},
+ {"TBSCertList", 1610612741, 0},
+ {"version", 1073758210, "Version"},
+ {"signature", 1073741826, "AlgorithmIdentifier"},
+ {"issuer", 1073741826, "Name"},
+ {"thisUpdate", 1073741826, "Time"},
+ {"nextUpdate", 1073758210, "Time"},
+ {"revokedCertificates", 1610629131, 0},
+ {0, 536870917, 0},
+ {"userCertificate", 1073741826, "CertificateSerialNumber"},
+ {"revocationDate", 1073741826, "Time"},
+ {"crlEntryExtensions", 16386, "Extensions"},
+ {"crlExtensions", 536895490, "Extensions"},
+ {0, 2056, "0"},
+ {"AlgorithmIdentifier", 1610612741, 0},
+ {"algorithm", 1073741836, 0},
+ {"parameters", 541081613, 0},
+ {"algorithm", 1, 0},
+ {"pkcs-1", 1879048204, 0},
+ {0, 1073741825, "pkcs"},
+ {0, 1, "1"},
+ {"rsaEncryption", 1879048204, 0},
+ {0, 1073741825, "pkcs-1"},
+ {0, 1, "1"},
+ {"md2WithRSAEncryption", 1879048204, 0},
+ {0, 1073741825, "pkcs-1"},
+ {0, 1, "2"},
+ {"md5WithRSAEncryption", 1879048204, 0},
+ {0, 1073741825, "pkcs-1"},
+ {0, 1, "4"},
+ {"sha1WithRSAEncryption", 1879048204, 0},
+ {0, 1073741825, "pkcs-1"},
+ {0, 1, "5"},
+ {"id-dsa-with-sha1", 1879048204, 0},
+ {"iso", 1073741825, "1"},
+ {"member-body", 1073741825, "2"},
+ {"us", 1073741825, "840"},
+ {"x9-57", 1073741825, "10040"},
+ {"x9algorithm", 1073741825, "4"},
+ {0, 1, "3"},
+ {"Dss-Sig-Value", 1610612741, 0},
+ {"r", 1073741827, 0},
+ {"s", 3, 0},
+ {"dhpublicnumber", 1879048204, 0},
+ {"iso", 1073741825, "1"},
+ {"member-body", 1073741825, "2"},
+ {"us", 1073741825, "840"},
+ {"ansi-x942", 1073741825, "10046"},
+ {"number-type", 1073741825, "2"},
+ {0, 1, "1"},
+ {"DomainParameters", 1610612741, 0},
+ {"p", 1073741827, 0},
+ {"g", 1073741827, 0},
+ {"q", 1073741827, 0},
+ {"j", 1073758211, 0},
+ {"validationParms", 16386, "ValidationParms"},
+ {"ValidationParms", 1610612741, 0},
+ {"seed", 1073741830, 0},
+ {"pgenCounter", 3, 0},
+ {"id-dsa", 1879048204, 0},
+ {"iso", 1073741825, "1"},
+ {"member-body", 1073741825, "2"},
+ {"us", 1073741825, "840"},
+ {"x9-57", 1073741825, "10040"},
+ {"x9algorithm", 1073741825, "4"},
+ {0, 1, "1"},
+ {"Dss-Parms", 1610612741, 0},
+ {"p", 1073741827, 0},
+ {"q", 1073741827, 0},
+ {"g", 3, 0},
+ {"ORAddress", 1610612741, 0},
+ {"built-in-standard-attributes", 1073741826, "BuiltInStandardAttributes"},
+ {"built-in-domain-defined-attributes", 1073758210,
+ "BuiltInDomainDefinedAttributes"},
+ {"extension-attributes", 16386, "ExtensionAttributes"},
+ {"BuiltInStandardAttributes", 1610612741, 0},
+ {"country-name", 1073758210, "CountryName"},
+ {"administration-domain-name", 1073758210, "AdministrationDomainName"},
+ {"network-address", 1610637314, "NetworkAddress"},
+ {0, 2056, "0"},
+ {"terminal-identifier", 1610637314, "TerminalIdentifier"},
+ {0, 2056, "1"},
+ {"private-domain-name", 1610637314, "PrivateDomainName"},
+ {0, 2056, "2"},
+ {"organization-name", 1610637314, "OrganizationName"},
+ {0, 2056, "3"},
+ {"numeric-user-identifier", 1610637314, "NumericUserIdentifier"},
+ {0, 2056, "4"},
+ {"personal-name", 1610637314, "PersonalName"},
+ {0, 2056, "5"},
+ {"organizational-unit-names", 536895490, "OrganizationalUnitNames"},
+ {0, 2056, "6"},
+ {"CountryName", 1610620946, 0},
+ {0, 1073746952, "1"},
+ {"x121-dcc-code", 1612709890, "NumericString"},
+ {0, 1048586, "ub-country-name-numeric-length"},
+ {"iso-3166-alpha2-code", 538968066, "PrintableString"},
+ {0, 1048586, "ub-country-name-alpha-length"},
+ {"AdministrationDomainName", 1610620946, 0},
+ {0, 1073744904, "2"},
+ {"numeric", 1612709890, "NumericString"},
+ {"ub-domain-name-length", 524298, "0"},
+ {"printable", 538968066, "PrintableString"},
+ {"ub-domain-name-length", 524298, "0"},
+ {"NetworkAddress", 1073741826, "X121Address"},
+ {"X121Address", 1612709890, "NumericString"},
+ {"ub-x121-address-length", 524298, "1"},
+ {"TerminalIdentifier", 1612709890, "PrintableString"},
+ {"ub-terminal-id-length", 524298, "1"},
+ {"PrivateDomainName", 1610612754, 0},
+ {"numeric", 1612709890, "NumericString"},
+ {"ub-domain-name-length", 524298, "1"},
+ {"printable", 538968066, "PrintableString"},
+ {"ub-domain-name-length", 524298, "1"},
+ {"OrganizationName", 1612709890, "PrintableString"},
+ {"ub-organization-name-length", 524298, "1"},
+ {"NumericUserIdentifier", 1612709890, "NumericString"},
+ {"ub-numeric-user-id-length", 524298, "1"},
+ {"PersonalName", 1610612750, 0},
+ {"surname", 1814044674, "PrintableString"},
+ {0, 1073745928, "0"},
+ {"ub-surname-length", 524298, "1"},
+ {"given-name", 1814061058, "PrintableString"},
+ {0, 1073745928, "1"},
+ {"ub-given-name-length", 524298, "1"},
+ {"initials", 1814061058, "PrintableString"},
+ {0, 1073745928, "2"},
+ {"ub-initials-length", 524298, "1"},
+ {"generation-qualifier", 740319234, "PrintableString"},
+ {0, 1073745928, "3"},
+ {"ub-generation-qualifier-length", 524298, "1"},
+ {"OrganizationalUnitNames", 1612709899, 0},
+ {"ub-organizational-units", 1074266122, "1"},
+ {0, 2, "OrganizationalUnitName"},
+ {"OrganizationalUnitName", 1612709890, "PrintableString"},
+ {"ub-organizational-unit-name-length", 524298, "1"},
+ {"BuiltInDomainDefinedAttributes", 1612709899, 0},
+ {"ub-domain-defined-attributes", 1074266122, "1"},
+ {0, 2, "BuiltInDomainDefinedAttribute"},
+ {"BuiltInDomainDefinedAttribute", 1610612741, 0},
+ {"type", 1612709890, "PrintableString"},
+ {"ub-domain-defined-attribute-type-length", 524298, "1"},
+ {"value", 538968066, "PrintableString"},
+ {"ub-domain-defined-attribute-value-length", 524298, "1"},
+ {"ExtensionAttributes", 1612709903, 0},
+ {"ub-extension-attributes", 1074266122, "1"},
+ {0, 2, "ExtensionAttribute"},
+ {"ExtensionAttribute", 1610612741, 0},
+ {"extension-attribute-type", 1611145219, 0},
+ {0, 1073743880, "0"},
+ {"0", 10, "ub-extension-attributes"},
+ {"extension-attribute-value", 541073421, 0},
+ {0, 1073743880, "1"},
+ {"extension-attribute-type", 1, 0},
+ {"common-name", 1342177283, "1"},
+ {"CommonName", 1612709890, "PrintableString"},
+ {"ub-common-name-length", 524298, "1"},
+ {"teletex-common-name", 1342177283, "2"},
+ {"TeletexCommonName", 1612709890, "TeletexString"},
+ {"ub-common-name-length", 524298, "1"},
+ {"teletex-organization-name", 1342177283, "3"},
+ {"TeletexOrganizationName", 1612709890, "TeletexString"},
+ {"ub-organization-name-length", 524298, "1"},
+ {"teletex-personal-name", 1342177283, "4"},
+ {"TeletexPersonalName", 1610612750, 0},
+ {"surname", 1814044674, "TeletexString"},
+ {0, 1073743880, "0"},
+ {"ub-surname-length", 524298, "1"},
+ {"given-name", 1814061058, "TeletexString"},
+ {0, 1073743880, "1"},
+ {"ub-given-name-length", 524298, "1"},
+ {"initials", 1814061058, "TeletexString"},
+ {0, 1073743880, "2"},
+ {"ub-initials-length", 524298, "1"},
+ {"generation-qualifier", 740319234, "TeletexString"},
+ {0, 1073743880, "3"},
+ {"ub-generation-qualifier-length", 524298, "1"},
+ {"teletex-organizational-unit-names", 1342177283, "5"},
+ {"TeletexOrganizationalUnitNames", 1612709899, 0},
+ {"ub-organizational-units", 1074266122, "1"},
+ {0, 2, "TeletexOrganizationalUnitName"},
+ {"TeletexOrganizationalUnitName", 1612709890, "TeletexString"},
+ {"ub-organizational-unit-name-length", 524298, "1"},
+ {"pds-name", 1342177283, "7"},
+ {"PDSName", 1612709890, "PrintableString"},
+ {"ub-pds-name-length", 524298, "1"},
+ {"physical-delivery-country-name", 1342177283, "8"},
+ {"PhysicalDeliveryCountryName", 1610612754, 0},
+ {"x121-dcc-code", 1612709890, "NumericString"},
+ {0, 1048586, "ub-country-name-numeric-length"},
+ {"iso-3166-alpha2-code", 538968066, "PrintableString"},
+ {0, 1048586, "ub-country-name-alpha-length"},
+ {"postal-code", 1342177283, "9"},
+ {"PostalCode", 1610612754, 0},
+ {"numeric-code", 1612709890, "NumericString"},
+ {"ub-postal-code-length", 524298, "1"},
+ {"printable-code", 538968066, "PrintableString"},
+ {"ub-postal-code-length", 524298, "1"},
+ {"physical-delivery-office-name", 1342177283, "10"},
+ {"PhysicalDeliveryOfficeName", 1073741826, "PDSParameter"},
+ {"physical-delivery-office-number", 1342177283, "11"},
+ {"PhysicalDeliveryOfficeNumber", 1073741826, "PDSParameter"},
+ {"extension-OR-address-components", 1342177283, "12"},
+ {"ExtensionORAddressComponents", 1073741826, "PDSParameter"},
+ {"physical-delivery-personal-name", 1342177283, "13"},
+ {"PhysicalDeliveryPersonalName", 1073741826, "PDSParameter"},
+ {"physical-delivery-organization-name", 1342177283, "14"},
+ {"PhysicalDeliveryOrganizationName", 1073741826, "PDSParameter"},
+ {"extension-physical-delivery-address-components", 1342177283, "15"},
+ {"ExtensionPhysicalDeliveryAddressComponents", 1073741826, "PDSParameter"},
+ {"unformatted-postal-address", 1342177283, "16"},
+ {"UnformattedPostalAddress", 1610612750, 0},
+ {"printable-address", 1814052875, 0},
+ {"ub-pds-physical-address-lines", 1074266122, "1"},
+ {0, 538968066, "PrintableString"},
+ {"ub-pds-parameter-length", 524298, "1"},
+ {"teletex-string", 740311042, "TeletexString"},
+ {"ub-unformatted-address-length", 524298, "1"},
+ {"street-address", 1342177283, "17"},
+ {"StreetAddress", 1073741826, "PDSParameter"},
+ {"post-office-box-address", 1342177283, "18"},
+ {"PostOfficeBoxAddress", 1073741826, "PDSParameter"},
+ {"poste-restante-address", 1342177283, "19"},
+ {"PosteRestanteAddress", 1073741826, "PDSParameter"},
+ {"unique-postal-name", 1342177283, "20"},
+ {"UniquePostalName", 1073741826, "PDSParameter"},
+ {"local-postal-attributes", 1342177283, "21"},
+ {"LocalPostalAttributes", 1073741826, "PDSParameter"},
+ {"PDSParameter", 1610612750, 0},
+ {"printable-string", 1814052866, "PrintableString"},
+ {"ub-pds-parameter-length", 524298, "1"},
+ {"teletex-string", 740311042, "TeletexString"},
+ {"ub-pds-parameter-length", 524298, "1"},
+ {"extended-network-address", 1342177283, "22"},
+ {"ExtendedNetworkAddress", 1610612754, 0},
+ {"e163-4-address", 1610612741, 0},
+ {"number", 1612718082, "NumericString"},
+ {0, 1073743880, "0"},
+ {"ub-e163-4-number-length", 524298, "1"},
+ {"sub-address", 538992642, "NumericString"},
+ {0, 1073743880, "1"},
+ {"ub-e163-4-sub-address-length", 524298, "1"},
+ {"psap-address", 536879106, "PresentationAddress"},
+ {0, 2056, "0"},
+ {"PresentationAddress", 1610612741, 0},
+ {"pSelector", 1610637319, 0},
+ {0, 2056, "0"},
+ {"sSelector", 1610637319, 0},
+ {0, 2056, "1"},
+ {"tSelector", 1610637319, 0},
+ {0, 2056, "2"},
+ {"nAddresses", 538976271, 0},
+ {0, 1073743880, "3"},
+ {"MAX", 1074266122, "1"},
+ {0, 7, 0},
+ {"terminal-type", 1342177283, "23"},
+ {"TerminalType", 1610874883, 0},
+ {"telex", 1073741825, "3"},
+ {"teletex", 1073741825, "4"},
+ {"g3-facsimile", 1073741825, "5"},
+ {"g4-facsimile", 1073741825, "6"},
+ {"ia5-terminal", 1073741825, "7"},
+ {"videotex", 1, "8"},
+ {"teletex-domain-defined-attributes", 1342177283, "6"},
+ {"TeletexDomainDefinedAttributes", 1612709899, 0},
+ {"ub-domain-defined-attributes", 1074266122, "1"},
+ {0, 2, "TeletexDomainDefinedAttribute"},
+ {"TeletexDomainDefinedAttribute", 1610612741, 0},
+ {"type", 1612709890, "TeletexString"},
+ {"ub-domain-defined-attribute-type-length", 524298, "1"},
+ {"value", 538968066, "TeletexString"},
+ {"ub-domain-defined-attribute-value-length", 524298, "1"},
+ {"ub-name", 1342177283, "32768"},
+ {"ub-common-name", 1342177283, "64"},
+ {"ub-locality-name", 1342177283, "128"},
+ {"ub-state-name", 1342177283, "128"},
+ {"ub-organization-name", 1342177283, "64"},
+ {"ub-organizational-unit-name", 1342177283, "64"},
+ {"ub-title", 1342177283, "64"},
+ {"ub-match", 1342177283, "128"},
+ {"ub-emailaddress-length", 1342177283, "128"},
+ {"ub-common-name-length", 1342177283, "64"},
+ {"ub-country-name-alpha-length", 1342177283, "2"},
+ {"ub-country-name-numeric-length", 1342177283, "3"},
+ {"ub-domain-defined-attributes", 1342177283, "4"},
+ {"ub-domain-defined-attribute-type-length", 1342177283, "8"},
+ {"ub-domain-defined-attribute-value-length", 1342177283, "128"},
+ {"ub-domain-name-length", 1342177283, "16"},
+ {"ub-extension-attributes", 1342177283, "256"},
+ {"ub-e163-4-number-length", 1342177283, "15"},
+ {"ub-e163-4-sub-address-length", 1342177283, "40"},
+ {"ub-generation-qualifier-length", 1342177283, "3"},
+ {"ub-given-name-length", 1342177283, "16"},
+ {"ub-initials-length", 1342177283, "5"},
+ {"ub-integer-options", 1342177283, "256"},
+ {"ub-numeric-user-id-length", 1342177283, "32"},
+ {"ub-organization-name-length", 1342177283, "64"},
+ {"ub-organizational-unit-name-length", 1342177283, "32"},
+ {"ub-organizational-units", 1342177283, "4"},
+ {"ub-pds-name-length", 1342177283, "16"},
+ {"ub-pds-parameter-length", 1342177283, "30"},
+ {"ub-pds-physical-address-lines", 1342177283, "6"},
+ {"ub-postal-code-length", 1342177283, "16"},
+ {"ub-surname-length", 1342177283, "40"},
+ {"ub-terminal-id-length", 1342177283, "24"},
+ {"ub-unformatted-address-length", 1342177283, "180"},
+ {"ub-x121-address-length", 1342177283, "16"},
+ {"pkcs-7-ContentInfo", 1610612741, 0},
+ {"contentType", 1073741826, "pkcs-7-ContentType"},
+ {"content", 541073421, 0},
+ {0, 1073743880, "0"},
+ {"contentType", 1, 0},
+ {"pkcs-7-DigestInfo", 1610612741, 0},
+ {"digestAlgorithm", 1073741826, "pkcs-7-DigestAlgorithmIdentifier"},
+ {"digest", 2, "pkcs-7-Digest"},
+ {"pkcs-7-Digest", 1073741831, 0},
+ {"pkcs-7-ContentType", 1073741836, 0},
+ {"pkcs-7-SignedData", 1610612741, 0},
+ {"version", 1073741826, "pkcs-7-CMSVersion"},
+ {"digestAlgorithms", 1073741826, "pkcs-7-DigestAlgorithmIdentifiers"},
+ {"encapContentInfo", 1073741826, "pkcs-7-EncapsulatedContentInfo"},
+ {"certificates", 1610637314, "pkcs-7-CertificateSet"},
+ {0, 4104, "0"},
+ {"crls", 1610637314, "pkcs-7-CertificateRevocationLists"},
+ {0, 4104, "1"},
+ {"signerInfos", 2, "pkcs-7-SignerInfos"},
+ {"pkcs-7-CMSVersion", 1610874883, 0},
+ {"v0", 1073741825, "0"},
+ {"v1", 1073741825, "1"},
+ {"v2", 1073741825, "2"},
+ {"v3", 1073741825, "3"},
+ {"v4", 1, "4"},
+ {"pkcs-7-DigestAlgorithmIdentifiers", 1610612751, 0},
+ {0, 2, "pkcs-7-DigestAlgorithmIdentifier"},
+ {"pkcs-7-DigestAlgorithmIdentifier", 1073741826, "AlgorithmIdentifier"},
+ {"pkcs-7-EncapsulatedContentInfo", 1610612741, 0},
+ {"eContentType", 1073741826, "pkcs-7-ContentType"},
+ {"eContent", 536895495, 0},
+ {0, 2056, "0"},
+ {"pkcs-7-CertificateRevocationLists", 1610612751, 0},
+ {0, 13, 0},
+ {"pkcs-7-CertificateChoices", 1610612754, 0},
+ {"certificate", 13, 0},
+ {"pkcs-7-CertificateSet", 1610612751, 0},
+ {0, 2, "pkcs-7-CertificateChoices"},
+ {"pkcs-7-SignerInfos", 1610612751, 0},
+ {0, 13, 0},
+ {"pkcs-10-CertificationRequestInfo", 1610612741, 0},
+ {"version", 1610874883, 0},
+ {"v1", 1, "0"},
+ {"subject", 1073741826, "Name"},
+ {"subjectPKInfo", 1073741826, "SubjectPublicKeyInfo"},
+ {"attributes", 536879106, "Attributes"},
+ {0, 4104, "0"},
+ {"Attributes", 1610612751, 0},
+ {0, 2, "Attribute"},
+ {"pkcs-10-CertificationRequest", 1610612741, 0},
+ {"certificationRequestInfo", 1073741826, "pkcs-10-CertificationRequestInfo"},
+ {"signatureAlgorithm", 1073741826, "AlgorithmIdentifier"},
+ {"signature", 6, 0},
+ {"pkcs-9-ub-challengePassword", 1342177283, "255"},
+ {"pkcs-9-certTypes", 1879048204, 0},
+ {0, 1073741825, "pkcs-9"},
+ {0, 1, "22"},
+ {"pkcs-9-crlTypes", 1879048204, 0},
+ {0, 1073741825, "pkcs-9"},
+ {0, 1, "23"},
+ {"pkcs-9-at-challengePassword", 1879048204, 0},
+ {0, 1073741825, "pkcs-9"},
+ {0, 1, "7"},
+ {"pkcs-9-challengePassword", 1610612754, 0},
+ {"printableString", 1612709890, "PrintableString"},
+ {"pkcs-9-ub-challengePassword", 524298, "1"},
+ {"utf8String", 538968066, "UTF8String"},
+ {"pkcs-9-ub-challengePassword", 524298, "1"},
+ {"pkcs-9-at-localKeyId", 1879048204, 0},
+ {0, 1073741825, "pkcs-9"},
+ {0, 1, "21"},
+ {"pkcs-9-localKeyId", 1073741831, 0},
+ {"pkcs-9-at-friendlyName", 1879048204, 0},
+ {0, 1073741825, "pkcs-9"},
+ {0, 1, "20"},
+ {"pkcs-9-friendlyName", 1612709890, "BMPString"},
+ {"255", 524298, "1"},
+ {"pkcs-8-PrivateKeyInfo", 1610612741, 0},
+ {"version", 1073741826, "pkcs-8-Version"},
+ {"privateKeyAlgorithm", 1073741826, "AlgorithmIdentifier"},
+ {"privateKey", 1073741826, "pkcs-8-PrivateKey"},
+ {"attributes", 536895490, "Attributes"},
+ {0, 4104, "0"},
+ {"pkcs-8-Version", 1610874883, 0},
+ {"v1", 1, "0"},
+ {"pkcs-8-PrivateKey", 1073741831, 0},
+ {"pkcs-8-Attributes", 1610612751, 0},
+ {0, 2, "Attribute"},
+ {"pkcs-8-EncryptedPrivateKeyInfo", 1610612741, 0},
+ {"encryptionAlgorithm", 1073741826, "AlgorithmIdentifier"},
+ {"encryptedData", 2, "pkcs-8-EncryptedData"},
+ {"pkcs-8-EncryptedData", 1073741831, 0},
+ {"pkcs-5", 1879048204, 0},
+ {0, 1073741825, "pkcs"},
+ {0, 1, "5"},
+ {"pkcs-5-encryptionAlgorithm", 1879048204, 0},
+ {"iso", 1073741825, "1"},
+ {"member-body", 1073741825, "2"},
+ {"us", 1073741825, "840"},
+ {"rsadsi", 1073741825, "113549"},
+ {0, 1, "3"},
+ {"pkcs-5-des-EDE3-CBC", 1879048204, 0},
+ {0, 1073741825, "pkcs-5-encryptionAlgorithm"},
+ {0, 1, "7"},
+ {"pkcs-5-des-EDE3-CBC-params", 1612709895, 0},
+ {0, 1048586, "8"},
+ {"pkcs-5-id-PBES2", 1879048204, 0},
+ {0, 1073741825, "pkcs-5"},
+ {0, 1, "13"},
+ {"pkcs-5-PBES2-params", 1610612741, 0},
+ {"keyDerivationFunc", 1073741826, "AlgorithmIdentifier"},
+ {"encryptionScheme", 2, "AlgorithmIdentifier"},
+ {"pkcs-5-id-PBKDF2", 1879048204, 0},
+ {0, 1073741825, "pkcs-5"},
+ {0, 1, "12"},
+ {"pkcs-5-PBKDF2-params", 1610612741, 0},
+ {"salt", 1610612754, 0},
+ {"specified", 1073741831, 0},
+ {"otherSource", 2, "AlgorithmIdentifier"},
+ {"iterationCount", 1611137027, 0},
+ {"1", 10, "MAX"},
+ {"keyLength", 1611153411, 0},
+ {"1", 10, "MAX"},
+ {"prf", 16386, "AlgorithmIdentifier"},
+ {"pkcs-12", 1879048204, 0},
+ {0, 1073741825, "pkcs"},
+ {0, 1, "12"},
+ {"pkcs-12-PFX", 1610612741, 0},
+ {"version", 1610874883, 0},
+ {"v3", 1, "3"},
+ {"authSafe", 1073741826, "pkcs-7-ContentInfo"},
+ {"macData", 16386, "pkcs-12-MacData"},
+ {"pkcs-12-PbeParams", 1610612741, 0},
+ {"salt", 1073741831, 0},
+ {"iterations", 3, 0},
+ {"pkcs-12-MacData", 1610612741, 0},
+ {"mac", 1073741826, "pkcs-7-DigestInfo"},
+ {"macSalt", 1073741831, 0},
+ {"iterations", 536903683, 0},
+ {0, 9, "1"},
+ {"pkcs-12-AuthenticatedSafe", 1610612747, 0},
+ {0, 2, "pkcs-7-ContentInfo"},
+ {"pkcs-12-SafeContents", 1610612747, 0},
+ {0, 2, "pkcs-12-SafeBag"},
+ {"pkcs-12-SafeBag", 1610612741, 0},
+ {"bagId", 1073741836, 0},
+ {"bagValue", 1614815245, 0},
+ {0, 1073743880, "0"},
+ {"badId", 1, 0},
+ {"bagAttributes", 536887311, 0},
+ {0, 2, "pkcs-12-PKCS12Attribute"},
+ {"pkcs-12-bagtypes", 1879048204, 0},
+ {0, 1073741825, "pkcs-12"},
+ {0, 1073741825, "10"},
+ {0, 1, "1"},
+ {"pkcs-12-keyBag", 1879048204, 0},
+ {0, 1073741825, "pkcs-12-bagtypes"},
+ {0, 1, "1"},
+ {"pkcs-12-pkcs8ShroudedKeyBag", 1879048204, 0},
+ {0, 1073741825, "pkcs-12-bagtypes"},
+ {0, 1, "2"},
+ {"pkcs-12-certBag", 1879048204, 0},
+ {0, 1073741825, "pkcs-12-bagtypes"},
+ {0, 1, "3"},
+ {"pkcs-12-crlBag", 1879048204, 0},
+ {0, 1073741825, "pkcs-12-bagtypes"},
+ {0, 1, "4"},
+ {"pkcs-12-KeyBag", 1073741826, "pkcs-8-PrivateKeyInfo"},
+ {"pkcs-12-PKCS8ShroudedKeyBag", 1073741826, "pkcs-8-EncryptedPrivateKeyInfo"},
+ {"pkcs-12-CertBag", 1610612741, 0},
+ {"certId", 1073741836, 0},
+ {"certValue", 541073421, 0},
+ {0, 1073743880, "0"},
+ {"certId", 1, 0},
+ {"pkcs-12-CRLBag", 1610612741, 0},
+ {"crlId", 1073741836, 0},
+ {"crlValue", 541073421, 0},
+ {0, 1073743880, "0"},
+ {"crlId", 1, 0},
+ {"pkcs-12-PKCS12Attribute", 1073741826, "Attribute"},
+ {"pkcs-7-data", 1879048204, 0},
+ {"iso", 1073741825, "1"},
+ {"member-body", 1073741825, "2"},
+ {"us", 1073741825, "840"},
+ {"rsadsi", 1073741825, "113549"},
+ {"pkcs", 1073741825, "1"},
+ {"pkcs7", 1073741825, "7"},
+ {0, 1, "1"},
+ {"pkcs-7-encryptedData", 1879048204, 0},
+ {"iso", 1073741825, "1"},
+ {"member-body", 1073741825, "2"},
+ {"us", 1073741825, "840"},
+ {"rsadsi", 1073741825, "113549"},
+ {"pkcs", 1073741825, "1"},
+ {"pkcs7", 1073741825, "7"},
+ {0, 1, "6"},
+ {"pkcs-7-Data", 1073741831, 0},
+ {"pkcs-7-EncryptedData", 1610612741, 0},
+ {"version", 1073741826, "pkcs-7-CMSVersion"},
+ {"encryptedContentInfo", 1073741826, "pkcs-7-EncryptedContentInfo"},
+ {"unprotectedAttrs", 536895490, "pkcs-7-UnprotectedAttributes"},
+ {0, 4104, "1"},
+ {"pkcs-7-EncryptedContentInfo", 1610612741, 0},
+ {"contentType", 1073741826, "pkcs-7-ContentType"},
+ {"contentEncryptionAlgorithm", 1073741826,
+ "pkcs-7-ContentEncryptionAlgorithmIdentifier"},
+ {"encryptedContent", 536895490, "pkcs-7-EncryptedContent"},
+ {0, 4104, "0"},
+ {"pkcs-7-ContentEncryptionAlgorithmIdentifier", 1073741826,
+ "AlgorithmIdentifier"},
+ {"pkcs-7-EncryptedContent", 1073741831, 0},
+ {"pkcs-7-UnprotectedAttributes", 1612709903, 0},
+ {"MAX", 1074266122, "1"},
+ {0, 2, "Attribute"},
+ {"id-at-ldap-DC", 1880096780, "AttributeType"},
+ {0, 1073741825, "0"},
+ {0, 1073741825, "9"},
+ {0, 1073741825, "2342"},
+ {0, 1073741825, "19200300"},
+ {0, 1073741825, "100"},
+ {0, 1073741825, "1"},
+ {0, 1, "25"},
+ {"ldap-DC", 1073741826, "IA5String"},
+ {"id-at-ldap-UID", 1880096780, "AttributeType"},
+ {0, 1073741825, "0"},
+ {0, 1073741825, "9"},
+ {0, 1073741825, "2342"},
+ {0, 1073741825, "19200300"},
+ {0, 1073741825, "100"},
+ {0, 1073741825, "1"},
+ {0, 1, "1"},
+ {"ldap-UID", 1073741826, "DirectoryString"},
+ {"id-pda", 1879048204, 0},
+ {0, 1073741825, "id-pkix"},
+ {0, 1, "9"},
+ {"id-pda-dateOfBirth", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-pda"},
+ {0, 1, "1"},
+ {"DateOfBirth", 1082130449, 0},
+ {"id-pda-placeOfBirth", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-pda"},
+ {0, 1, "2"},
+ {"PlaceOfBirth", 1073741826, "DirectoryString"},
+ {"id-pda-gender", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-pda"},
+ {0, 1, "3"},
+ {"Gender", 1612709890, "PrintableString"},
+ {0, 1048586, "1"},
+ {"id-pda-countryOfCitizenship", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-pda"},
+ {0, 1, "4"},
+ {"CountryOfCitizenship", 1612709890, "PrintableString"},
+ {0, 1048586, "2"},
+ {"id-pda-countryOfResidence", 1880096780, "AttributeType"},
+ {0, 1073741825, "id-pda"},
+ {0, 1, "5"},
+ {"CountryOfResidence", 538968066, "PrintableString"},
+ {0, 1048586, "2"},
+ {0, 0, 0}
+};
diff --git a/tests/unit/ptimer-test.c b/tests/unit/ptimer-test.c
index 04b5f4e..0824059 100644
--- a/tests/unit/ptimer-test.c
+++ b/tests/unit/ptimer-test.c
@@ -763,6 +763,33 @@ static void check_oneshot_with_load_0(gconstpointer arg)
ptimer_free(ptimer);
}
+static void check_freq_more_than_1000M(gconstpointer arg)
+{
+ const uint8_t *policy = arg;
+ ptimer_state *ptimer = ptimer_init(ptimer_trigger, NULL, *policy);
+ bool no_round_down = (*policy & PTIMER_POLICY_NO_COUNTER_ROUND_DOWN);
+
+ triggered = false;
+
+ ptimer_transaction_begin(ptimer);
+ ptimer_set_freq(ptimer, 2000000000);
+ ptimer_set_limit(ptimer, 8, 1);
+ ptimer_run(ptimer, 1);
+ ptimer_transaction_commit(ptimer);
+
+ qemu_clock_step(3);
+
+ g_assert_cmpuint(ptimer_get_count(ptimer), ==, no_round_down ? 3 : 2);
+ g_assert_false(triggered);
+
+ qemu_clock_step(1);
+
+ g_assert_cmpuint(ptimer_get_count(ptimer), ==, 0);
+ g_assert_true(triggered);
+
+ ptimer_free(ptimer);
+}
+
static void add_ptimer_tests(uint8_t policy)
{
char policy_name[256] = "";
@@ -857,6 +884,12 @@ static void add_ptimer_tests(uint8_t policy)
policy_name),
g_memdup2(&policy, 1), check_oneshot_with_load_0, g_free);
g_free(tmp);
+
+ g_test_add_data_func_full(
+ tmp = g_strdup_printf("/ptimer/freq_more_than_1000M policy=%s",
+ policy_name),
+ g_memdup2(&policy, 1), check_freq_more_than_1000M, g_free);
+ g_free(tmp);
}
static void add_all_ptimer_policies_comb_tests(void)
diff --git a/tests/unit/socket-helpers.c b/tests/unit/socket-helpers.c
index f3439cc..37db24f 100644
--- a/tests/unit/socket-helpers.c
+++ b/tests/unit/socket-helpers.c
@@ -170,5 +170,4 @@ void socket_check_afunix_support(bool *has_afunix)
if (*has_afunix) {
close(fd);
}
- return;
}
diff --git a/tests/unit/test-aio-multithread.c b/tests/unit/test-aio-multithread.c
index 08d4570..0ead6bf 100644
--- a/tests/unit/test-aio-multithread.c
+++ b/tests/unit/test-aio-multithread.c
@@ -305,7 +305,9 @@ static void mcs_mutex_lock(void)
prev = qatomic_xchg(&mutex_head, id);
if (prev != -1) {
qatomic_set(&nodes[prev].next, id);
- qemu_futex_wait(&nodes[id].locked, 1);
+ while (qatomic_read(&nodes[id].locked) == 1) {
+ qemu_futex_wait(&nodes[id].locked, 1);
+ }
}
}
@@ -328,7 +330,7 @@ static void mcs_mutex_unlock(void)
/* Wake up the next in line. */
next = qatomic_read(&nodes[id].next);
nodes[next].locked = 0;
- qemu_futex_wake(&nodes[next].locked, 1);
+ qemu_futex_wake_single(&nodes[next].locked);
}
static void test_multi_fair_mutex_entry(void *opaque)
diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c
index 6668804..59c2793 100644
--- a/tests/unit/test-bdrv-drain.c
+++ b/tests/unit/test-bdrv-drain.c
@@ -25,7 +25,7 @@
#include "qemu/osdep.h"
#include "block/block_int.h"
#include "block/blockjob_int.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qapi/error.h"
#include "qemu/main-loop.h"
#include "iothread.h"
@@ -632,6 +632,8 @@ typedef struct TestBlockJob {
BlockDriverState *bs;
int run_ret;
int prepare_ret;
+
+ /* Accessed with atomics */
bool running;
bool should_complete;
} TestBlockJob;
@@ -667,10 +669,10 @@ static int coroutine_fn test_job_run(Job *job, Error **errp)
/* We are running the actual job code past the pause point in
* job_co_entry(). */
- s->running = true;
+ qatomic_set(&s->running, true);
job_transition_to_ready(&s->common.job);
- while (!s->should_complete) {
+ while (!qatomic_read(&s->should_complete)) {
/* Avoid job_sleep_ns() because it marks the job as !busy. We want to
* emulate some actual activity (probably some I/O) here so that drain
* has to wait for this activity to stop. */
@@ -685,7 +687,7 @@ static int coroutine_fn test_job_run(Job *job, Error **errp)
static void test_job_complete(Job *job, Error **errp)
{
TestBlockJob *s = container_of(job, TestBlockJob, common.job);
- s->should_complete = true;
+ qatomic_set(&s->should_complete, true);
}
BlockJobDriver test_job_driver = {
@@ -722,7 +724,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
BlockJob *job;
TestBlockJob *tjob;
IOThread *iothread = NULL;
- int ret;
+ int ret = -1;
src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
&error_abort);
@@ -770,9 +772,11 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
tjob->bs = src;
job = &tjob->common;
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
switch (result) {
case TEST_JOB_SUCCESS:
@@ -791,7 +795,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
/* job_co_entry() is run in the I/O thread, wait for the actual job
* code to start (we don't want to catch the job in the pause point in
* job_co_entry(). */
- while (!tjob->running) {
+ while (!qatomic_read(&tjob->running)) {
aio_poll(qemu_get_aio_context(), false);
}
}
@@ -799,7 +803,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
WITH_JOB_LOCK_GUARD() {
g_assert_cmpint(job->job.pause_count, ==, 0);
g_assert_false(job->job.paused);
- g_assert_true(tjob->running);
+ g_assert_true(qatomic_read(&tjob->running));
g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
}
@@ -825,7 +829,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
*
* paused is reset in the I/O thread, wait for it
*/
- while (job->job.paused) {
+ while (job_is_paused(&job->job)) {
aio_poll(qemu_get_aio_context(), false);
}
}
@@ -858,7 +862,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
*
* paused is reset in the I/O thread, wait for it
*/
- while (job->job.paused) {
+ while (job_is_paused(&job->job)) {
aio_poll(qemu_get_aio_context(), false);
}
}
@@ -951,11 +955,13 @@ static void bdrv_test_top_close(BlockDriverState *bs)
{
BdrvChild *c, *next_c;
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
bdrv_unref_child(bs, c);
}
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
}
static int coroutine_fn GRAPH_RDLOCK
@@ -1012,7 +1018,9 @@ static void coroutine_fn test_co_delete_by_drain(void *opaque)
bdrv_graph_co_rdlock();
QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
bdrv_graph_co_rdunlock();
+ bdrv_drain_all_begin();
bdrv_co_unref_child(bs, c);
+ bdrv_drain_all_end();
bdrv_graph_co_rdlock();
}
bdrv_graph_co_rdunlock();
@@ -1045,10 +1053,12 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete,
null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
&error_abort);
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds,
BDRV_CHILD_DATA, &error_abort);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
/* This child will be the one to pass to requests through to, and
* it will stall until a drain occurs */
@@ -1056,21 +1066,25 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete,
&error_abort);
child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
/* Takes our reference to child_bs */
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child",
&child_of_bds,
BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
&error_abort);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
/* This child is just there to be deleted
* (for detach_instead_of_delete == true) */
null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
&error_abort);
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA,
&error_abort);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
blk_insert_bs(blk, bs, &error_abort);
@@ -1153,6 +1167,7 @@ static void no_coroutine_fn detach_indirect_bh(void *opaque)
bdrv_dec_in_flight(data->child_b->bs);
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_unref_child(data->parent_b, data->child_b);
@@ -1161,6 +1176,7 @@ static void no_coroutine_fn detach_indirect_bh(void *opaque)
&child_of_bds, BDRV_CHILD_DATA,
&error_abort);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
}
static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret)
@@ -1258,6 +1274,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb)
/* Set child relationships */
bdrv_ref(b);
bdrv_ref(a);
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds,
BDRV_CHILD_DATA, &error_abort);
@@ -1269,6 +1286,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb)
by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class,
BDRV_CHILD_DATA, &error_abort);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
g_assert_cmpint(parent_a->refcnt, ==, 1);
g_assert_cmpint(parent_b->refcnt, ==, 1);
@@ -1394,14 +1412,10 @@ static void test_set_aio_context(void)
bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
&error_abort);
- bdrv_drained_begin(bs);
bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort);
- bdrv_drained_end(bs);
- bdrv_drained_begin(bs);
bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort);
bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort);
- bdrv_drained_end(bs);
bdrv_unref(bs);
iothread_join(a);
@@ -1411,10 +1425,12 @@ static void test_set_aio_context(void)
typedef struct TestDropBackingBlockJob {
BlockJob common;
- bool should_complete;
bool *did_complete;
BlockDriverState *detach_also;
BlockDriverState *bs;
+
+ /* Accessed with atomics */
+ bool should_complete;
} TestDropBackingBlockJob;
static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp)
@@ -1422,7 +1438,7 @@ static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp)
TestDropBackingBlockJob *s =
container_of(job, TestDropBackingBlockJob, common.job);
- while (!s->should_complete) {
+ while (!qatomic_read(&s->should_complete)) {
job_sleep_ns(job, 0);
}
@@ -1541,7 +1557,7 @@ static void test_blockjob_commit_by_drained_end(void)
job_start(&job->common.job);
- job->should_complete = true;
+ qatomic_set(&job->should_complete, true);
bdrv_drained_begin(bs_child);
g_assert(!job_has_completed);
bdrv_drained_end(bs_child);
@@ -1557,15 +1573,17 @@ static void test_blockjob_commit_by_drained_end(void)
typedef struct TestSimpleBlockJob {
BlockJob common;
- bool should_complete;
bool *did_complete;
+
+ /* Accessed with atomics */
+ bool should_complete;
} TestSimpleBlockJob;
static int coroutine_fn test_simple_job_run(Job *job, Error **errp)
{
TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job);
- while (!s->should_complete) {
+ while (!qatomic_read(&s->should_complete)) {
job_sleep_ns(job, 0);
}
@@ -1681,6 +1699,7 @@ static void test_drop_intermediate_poll(void)
* Establish the chain last, so the chain links are the first
* elements in the BDS.parents lists
*/
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
for (i = 0; i < 3; i++) {
if (i) {
@@ -1690,6 +1709,7 @@ static void test_drop_intermediate_poll(void)
}
}
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
job = block_job_create("job", &test_simple_job_driver, NULL, job_node,
0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort);
@@ -1700,7 +1720,7 @@ static void test_drop_intermediate_poll(void)
job->did_complete = &job_has_completed;
job_start(&job->common.job);
- job->should_complete = true;
+ qatomic_set(&job->should_complete, true);
g_assert(!job_has_completed);
ret = bdrv_drop_intermediate(chain[1], chain[0], NULL, false);
@@ -1936,10 +1956,12 @@ static void do_test_replace_child_mid_drain(int old_drain_count,
new_child_bs->total_sectors = 1;
bdrv_ref(old_child_bs);
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds,
BDRV_CHILD_COW, &error_abort);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
parent_s->setup_completed = true;
for (i = 0; i < old_drain_count; i++) {
diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c
index cafc023..7b03ebe 100644
--- a/tests/unit/test-bdrv-graph-mod.c
+++ b/tests/unit/test-bdrv-graph-mod.c
@@ -22,7 +22,7 @@
#include "qapi/error.h"
#include "qemu/main-loop.h"
#include "block/block_int.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
static BlockDriver bdrv_pass_through = {
.format_name = "pass-through",
@@ -137,10 +137,12 @@ static void test_update_perm_tree(void)
blk_insert_bs(root, bs, &error_abort);
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_attach_child(filter, bs, "child", &child_of_bds,
BDRV_CHILD_DATA, &error_abort);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
ret = bdrv_append(filter, bs, NULL);
g_assert_cmpint(ret, <, 0);
@@ -204,11 +206,13 @@ static void test_should_update_child(void)
bdrv_set_backing_hd(target, bs, &error_abort);
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
g_assert(target->backing->bs == bs);
bdrv_attach_child(filter, target, "target", &child_of_bds,
BDRV_CHILD_DATA, &error_abort);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
bdrv_append(filter, bs, &error_abort);
bdrv_graph_rdlock_main_loop();
@@ -244,6 +248,7 @@ static void test_parallel_exclusive_write(void)
bdrv_ref(base);
bdrv_ref(fl1);
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_attach_child(top, fl1, "backing", &child_of_bds,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
@@ -257,6 +262,7 @@ static void test_parallel_exclusive_write(void)
bdrv_replace_node(fl1, fl2, &error_abort);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
bdrv_drained_end(fl2);
bdrv_drained_end(fl1);
@@ -363,6 +369,7 @@ static void test_parallel_perm_update(void)
*/
bdrv_ref(base);
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_attach_child(top, ws, "file", &child_of_bds, BDRV_CHILD_DATA,
&error_abort);
@@ -377,6 +384,7 @@ static void test_parallel_perm_update(void)
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
&error_abort);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
/* Select fl1 as first child to be active */
s->selected = c_fl1;
@@ -430,11 +438,13 @@ static void test_append_greedy_filter(void)
BlockDriverState *base = no_perm_node("base");
BlockDriverState *fl = exclusive_writer_node("fl1");
+ bdrv_drain_all_begin();
bdrv_graph_wrlock();
bdrv_attach_child(top, base, "backing", &child_of_bds,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
&error_abort);
bdrv_graph_wrunlock();
+ bdrv_drain_all_end();
bdrv_append(fl, base, &error_abort);
bdrv_unref(fl);
diff --git a/tests/unit/test-block-backend.c b/tests/unit/test-block-backend.c
index 2fb1a44..4257b3f 100644
--- a/tests/unit/test-block-backend.c
+++ b/tests/unit/test-block-backend.c
@@ -24,7 +24,7 @@
#include "qemu/osdep.h"
#include "block/block.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qapi/error.h"
#include "qemu/main-loop.h"
diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c
index 3766d5d..e26b3be 100644
--- a/tests/unit/test-block-iothread.c
+++ b/tests/unit/test-block-iothread.c
@@ -26,9 +26,9 @@
#include "block/block.h"
#include "block/block_int-global-state.h"
#include "block/blockjob_int.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/main-loop.h"
#include "iothread.h"
@@ -63,7 +63,7 @@ bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
}
static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs,
- bool want_zero,
+ unsigned int mode,
int64_t offset, int64_t count,
int64_t *pnum, int64_t *map,
BlockDriverState **file)
@@ -745,7 +745,7 @@ static void test_propagate_mirror(void)
AioContext *main_ctx = qemu_get_aio_context();
BlockDriverState *src, *target, *filter;
BlockBackend *blk;
- Job *job;
+ Job *job = NULL;
Error *local_err = NULL;
/* Create src and target*/
diff --git a/tests/unit/test-blockjob-txn.c b/tests/unit/test-blockjob-txn.c
index d3b0bb2..118503a 100644
--- a/tests/unit/test-blockjob-txn.c
+++ b/tests/unit/test-blockjob-txn.c
@@ -14,8 +14,8 @@
#include "qapi/error.h"
#include "qemu/main-loop.h"
#include "block/blockjob_int.h"
-#include "sysemu/block-backend.h"
-#include "qapi/qmp/qdict.h"
+#include "system/block-backend.h"
+#include "qobject/qdict.h"
typedef struct {
BlockJob common;
diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c
index fe3e0d2..abdbe4b 100644
--- a/tests/unit/test-blockjob.c
+++ b/tests/unit/test-blockjob.c
@@ -14,8 +14,8 @@
#include "qapi/error.h"
#include "qemu/main-loop.h"
#include "block/blockjob_int.h"
-#include "sysemu/block-backend.h"
-#include "qapi/qmp/qdict.h"
+#include "system/block-backend.h"
+#include "qobject/qdict.h"
#include "iothread.h"
static const BlockJobDriver test_block_job_driver = {
diff --git a/tests/unit/test-char.c b/tests/unit/test-char.c
index f273ce5..f30a39f 100644
--- a/tests/unit/test-char.c
+++ b/tests/unit/test-char.c
@@ -1,15 +1,16 @@
#include "qemu/osdep.h"
#include <glib/gstdio.h>
+#include "qapi/error.h"
#include "qemu/config-file.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include "qemu/sockets.h"
#include "chardev/char-fe.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-char.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qom/qom-qobject.h"
#include "io/channel-socket.h"
#include "qapi/qobject-input-visitor.h"
@@ -184,6 +185,21 @@ static void char_mux_test(void)
char *data;
FeHandler h1 = { 0, false, 0, false, }, h2 = { 0, false, 0, false, };
CharBackend chr_be1, chr_be2;
+ Error *error = NULL;
+
+ /* Create mux and chardev to be immediately removed */
+ opts = qemu_opts_create(qemu_find_opts("chardev"), "mux-label",
+ 1, &error_abort);
+ qemu_opt_set(opts, "backend", "ringbuf", &error_abort);
+ qemu_opt_set(opts, "size", "128", &error_abort);
+ qemu_opt_set(opts, "mux", "on", &error_abort);
+ chr = qemu_chr_new_from_opts(opts, NULL, &error_abort);
+ g_assert_nonnull(chr);
+ qemu_opts_del(opts);
+
+ /* Remove just created mux and chardev */
+ qmp_chardev_remove("mux-label", &error_abort);
+ qmp_chardev_remove("mux-label-base", &error_abort);
opts = qemu_opts_create(qemu_find_opts("chardev"), "mux-label",
1, &error_abort);
@@ -334,9 +350,412 @@ static void char_mux_test(void)
g_free(data);
qemu_chr_fe_deinit(&chr_be1, false);
- qemu_chr_fe_deinit(&chr_be2, true);
+
+ qmp_chardev_remove("mux-label", &error);
+ g_assert_cmpstr(error_get_pretty(error), ==, "Chardev 'mux-label' is busy");
+ error_free(error);
+
+ qemu_chr_fe_deinit(&chr_be2, false);
+ qmp_chardev_remove("mux-label", &error_abort);
}
+static void char_hub_test(void)
+{
+ QemuOpts *opts;
+ Chardev *hub, *chr1, *chr2, *base;
+ char *data;
+ FeHandler h = { 0, false, 0, false, };
+ Error *error = NULL;
+ CharBackend chr_be;
+ int ret, i;
+
+#define RB_SIZE 128
+
+ /*
+ * Create invalid hub
+ * 1. Create hub without a 'chardevs.N' defined (expect error)
+ */
+ opts = qemu_opts_create(qemu_find_opts("chardev"), "hub0",
+ 1, &error_abort);
+ qemu_opt_set(opts, "backend", "hub", &error_abort);
+ hub = qemu_chr_new_from_opts(opts, NULL, &error);
+ g_assert_cmpstr(error_get_pretty(error), ==,
+ "hub: 'chardevs' list is not defined");
+ error_free(error);
+ error = NULL;
+ qemu_opts_del(opts);
+
+ /*
+ * Create invalid hub
+ * 1. Create chardev with embedded mux: 'mux=on'
+ * 2. Create hub which refers mux
+ * 3. Create hub which refers chardev already attached
+ * to the mux (already in use, expect error)
+ */
+ opts = qemu_opts_create(qemu_find_opts("chardev"), "chr0",
+ 1, &error_abort);
+ qemu_opt_set(opts, "mux", "on", &error_abort);
+ qemu_opt_set(opts, "backend", "ringbuf", &error_abort);
+ qemu_opt_set(opts, "size", stringify(RB_SIZE), &error_abort);
+ base = qemu_chr_new_from_opts(opts, NULL, &error_abort);
+ g_assert_nonnull(base);
+ qemu_opts_del(opts);
+
+ opts = qemu_opts_create(qemu_find_opts("chardev"), "hub0",
+ 1, &error_abort);
+ qemu_opt_set(opts, "backend", "hub", &error_abort);
+ qemu_opt_set(opts, "chardevs.0", "chr0", &error_abort);
+ hub = qemu_chr_new_from_opts(opts, NULL, &error);
+ g_assert_cmpstr(error_get_pretty(error), ==,
+ "hub: multiplexers and hub devices can't be "
+ "stacked, check chardev 'chr0', chardev should "
+ "not be a hub device or have 'mux=on' enabled");
+ error_free(error);
+ error = NULL;
+ qemu_opts_del(opts);
+
+ opts = qemu_opts_create(qemu_find_opts("chardev"), "hub0",
+ 1, &error_abort);
+ qemu_opt_set(opts, "backend", "hub", &error_abort);
+ qemu_opt_set(opts, "chardevs.0", "chr0-base", &error_abort);
+ hub = qemu_chr_new_from_opts(opts, NULL, &error);
+ g_assert_cmpstr(error_get_pretty(error), ==,
+ "chardev 'chr0-base' is already in use");
+ error_free(error);
+ error = NULL;
+ qemu_opts_del(opts);
+
+ /* Finalize chr0 */
+ qmp_chardev_remove("chr0", &error_abort);
+
+ /*
+ * Create invalid hub with more than maximum allowed backends
+ * 1. Create more than maximum allowed 'chardevs.%d' options for
+ * hub (expect error)
+ */
+ opts = qemu_opts_create(qemu_find_opts("chardev"), "hub0",
+ 1, &error_abort);
+ for (i = 0; i < 10; i++) {
+ char key[32], val[32];
+
+ snprintf(key, sizeof(key), "chardevs.%d", i);
+ snprintf(val, sizeof(val), "chr%d", i);
+ qemu_opt_set(opts, key, val, &error);
+ if (error) {
+ char buf[64];
+
+ snprintf(buf, sizeof(buf), "Invalid parameter 'chardevs.%d'", i);
+ g_assert_cmpstr(error_get_pretty(error), ==, buf);
+ error_free(error);
+ break;
+ }
+ }
+ g_assert_nonnull(error);
+ error = NULL;
+ qemu_opts_del(opts);
+
+ /*
+ * Create hub with 2 backend chardevs and 1 frontend and perform
+ * data aggregation
+ * 1. Create 2 ringbuf backend chardevs
+ * 2. Create 1 frontend
+ * 3. Create hub which refers 2 backend chardevs
+ * 4. Attach hub to a frontend
+ * 5. Attach hub to a frontend second time (expect error)
+ * 6. Perform data aggregation
+ * 7. Remove chr1 ("chr1 is busy", expect error)
+ * 8. Remove hub0 ("hub0 is busy", expect error);
+ * 9. Finilize frontend, hub and backend chardevs in correct order
+ */
+
+ /* Create first chardev */
+ opts = qemu_opts_create(qemu_find_opts("chardev"), "chr1",
+ 1, &error_abort);
+ qemu_opt_set(opts, "backend", "ringbuf", &error_abort);
+ qemu_opt_set(opts, "size", stringify(RB_SIZE), &error_abort);
+ chr1 = qemu_chr_new_from_opts(opts, NULL, &error_abort);
+ g_assert_nonnull(chr1);
+ qemu_opts_del(opts);
+
+ /* Create second chardev */
+ opts = qemu_opts_create(qemu_find_opts("chardev"), "chr2",
+ 1, &error_abort);
+ qemu_opt_set(opts, "backend", "ringbuf", &error_abort);
+ qemu_opt_set(opts, "size", stringify(RB_SIZE), &error_abort);
+ chr2 = qemu_chr_new_from_opts(opts, NULL, &error_abort);
+ g_assert_nonnull(chr2);
+ qemu_opts_del(opts);
+
+ /* Create hub0 and refer 2 backend chardevs */
+ opts = qemu_opts_create(qemu_find_opts("chardev"), "hub0",
+ 1, &error_abort);
+ qemu_opt_set(opts, "backend", "hub", &error_abort);
+ qemu_opt_set(opts, "chardevs.0", "chr1", &error_abort);
+ qemu_opt_set(opts, "chardevs.1", "chr2", &error_abort);
+ hub = qemu_chr_new_from_opts(opts, NULL, &error_abort);
+ g_assert_nonnull(hub);
+ qemu_opts_del(opts);
+
+ /* Attach hub to a frontend */
+ qemu_chr_fe_init(&chr_be, hub, &error_abort);
+ qemu_chr_fe_set_handlers(&chr_be,
+ fe_can_read,
+ fe_read,
+ fe_event,
+ NULL,
+ &h,
+ NULL, true);
+
+ /* Fails second time */
+ qemu_chr_fe_init(&chr_be, hub, &error);
+ g_assert_cmpstr(error_get_pretty(error), ==, "chardev 'hub0' is already in use");
+ error_free(error);
+ error = NULL;
+
+ /* Write to backend, chr1 */
+ base = qemu_chr_find("chr1");
+ g_assert_cmpint(qemu_chr_be_can_write(base), !=, 0);
+
+ qemu_chr_be_write(base, (void *)"hello", 6);
+ g_assert_cmpint(h.read_count, ==, 6);
+ g_assert_cmpstr(h.read_buf, ==, "hello");
+ h.read_count = 0;
+
+ /* Write to backend, chr2 */
+ base = qemu_chr_find("chr2");
+ g_assert_cmpint(qemu_chr_be_can_write(base), !=, 0);
+
+ qemu_chr_be_write(base, (void *)"olleh", 6);
+ g_assert_cmpint(h.read_count, ==, 6);
+ g_assert_cmpstr(h.read_buf, ==, "olleh");
+ h.read_count = 0;
+
+ /* Write to frontend, chr_be */
+ ret = qemu_chr_fe_write(&chr_be, (void *)"heyhey", 6);
+ g_assert_cmpint(ret, ==, 6);
+
+ data = qmp_ringbuf_read("chr1", RB_SIZE, false, 0, &error_abort);
+ g_assert_cmpint(strlen(data), ==, 6);
+ g_assert_cmpstr(data, ==, "heyhey");
+ g_free(data);
+
+ data = qmp_ringbuf_read("chr2", RB_SIZE, false, 0, &error_abort);
+ g_assert_cmpint(strlen(data), ==, 6);
+ g_assert_cmpstr(data, ==, "heyhey");
+ g_free(data);
+
+ /* Can't be removed, depends on hub0 */
+ qmp_chardev_remove("chr1", &error);
+ g_assert_cmpstr(error_get_pretty(error), ==, "Chardev 'chr1' is busy");
+ error_free(error);
+ error = NULL;
+
+ /* Can't be removed, depends on frontend chr_be */
+ qmp_chardev_remove("hub0", &error);
+ g_assert_cmpstr(error_get_pretty(error), ==, "Chardev 'hub0' is busy");
+ error_free(error);
+ error = NULL;
+
+ /* Finalize frontend */
+ qemu_chr_fe_deinit(&chr_be, false);
+
+ /* Finalize hub0 */
+ qmp_chardev_remove("hub0", &error_abort);
+
+ /* Finalize backend chardevs */
+ qmp_chardev_remove("chr1", &error_abort);
+ qmp_chardev_remove("chr2", &error_abort);
+
+#ifndef _WIN32
+ /*
+ * Create 3 backend chardevs to simulate EAGAIN and watcher.
+ * Mainly copied from char_pipe_test().
+ * 1. Create 2 ringbuf backend chardevs
+ * 2. Create 1 pipe backend chardev
+ * 3. Create 1 frontend
+ * 4. Create hub which refers 2 backend chardevs
+ * 5. Attach hub to a frontend
+ * 6. Perform data aggregation and check watcher
+ * 7. Finilize frontend, hub and backend chardevs in correct order
+ */
+ {
+ gchar *tmp_path = g_dir_make_tmp("qemu-test-char.XXXXXX", NULL);
+ gchar *in, *out, *pipe = g_build_filename(tmp_path, "pipe", NULL);
+ Chardev *chr3;
+ int fd, len;
+ char buf[128];
+
+ in = g_strdup_printf("%s.in", pipe);
+ if (mkfifo(in, 0600) < 0) {
+ abort();
+ }
+ out = g_strdup_printf("%s.out", pipe);
+ if (mkfifo(out, 0600) < 0) {
+ abort();
+ }
+
+ /* Create first chardev */
+ opts = qemu_opts_create(qemu_find_opts("chardev"), "chr1",
+ 1, &error_abort);
+ qemu_opt_set(opts, "backend", "ringbuf", &error_abort);
+ qemu_opt_set(opts, "size", stringify(RB_SIZE), &error_abort);
+ chr1 = qemu_chr_new_from_opts(opts, NULL, &error_abort);
+ g_assert_nonnull(chr1);
+ qemu_opts_del(opts);
+
+ /* Create second chardev */
+ opts = qemu_opts_create(qemu_find_opts("chardev"), "chr2",
+ 1, &error_abort);
+ qemu_opt_set(opts, "backend", "ringbuf", &error_abort);
+ qemu_opt_set(opts, "size", stringify(RB_SIZE), &error_abort);
+ chr2 = qemu_chr_new_from_opts(opts, NULL, &error_abort);
+ g_assert_nonnull(chr2);
+ qemu_opts_del(opts);
+
+ /* Create third chardev */
+ opts = qemu_opts_create(qemu_find_opts("chardev"), "chr3",
+ 1, &error_abort);
+ qemu_opt_set(opts, "backend", "pipe", &error_abort);
+ qemu_opt_set(opts, "path", pipe, &error_abort);
+ chr3 = qemu_chr_new_from_opts(opts, NULL, &error_abort);
+ g_assert_nonnull(chr3);
+
+ /* Create hub0 and refer 3 backend chardevs */
+ opts = qemu_opts_create(qemu_find_opts("chardev"), "hub0",
+ 1, &error_abort);
+ qemu_opt_set(opts, "backend", "hub", &error_abort);
+ qemu_opt_set(opts, "chardevs.0", "chr1", &error_abort);
+ qemu_opt_set(opts, "chardevs.1", "chr2", &error_abort);
+ qemu_opt_set(opts, "chardevs.2", "chr3", &error_abort);
+ hub = qemu_chr_new_from_opts(opts, NULL, &error_abort);
+ g_assert_nonnull(hub);
+ qemu_opts_del(opts);
+
+ /* Attach hub to a frontend */
+ qemu_chr_fe_init(&chr_be, hub, &error_abort);
+ qemu_chr_fe_set_handlers(&chr_be,
+ fe_can_read,
+ fe_read,
+ fe_event,
+ NULL,
+ &h,
+ NULL, true);
+
+ /* Write to frontend, chr_be */
+ ret = qemu_chr_fe_write(&chr_be, (void *)"thisis", 6);
+ g_assert_cmpint(ret, ==, 6);
+
+ data = qmp_ringbuf_read("chr1", RB_SIZE, false, 0, &error_abort);
+ g_assert_cmpint(strlen(data), ==, 6);
+ g_assert_cmpstr(data, ==, "thisis");
+ g_free(data);
+
+ data = qmp_ringbuf_read("chr2", RB_SIZE, false, 0, &error_abort);
+ g_assert_cmpint(strlen(data), ==, 6);
+ g_assert_cmpstr(data, ==, "thisis");
+ g_free(data);
+
+ fd = open(out, O_RDWR);
+ ret = read(fd, buf, sizeof(buf));
+ g_assert_cmpint(ret, ==, 6);
+ buf[ret] = 0;
+ g_assert_cmpstr(buf, ==, "thisis");
+ close(fd);
+
+ /* Add watch. 0 indicates no watches if nothing to wait for */
+ ret = qemu_chr_fe_add_watch(&chr_be, G_IO_OUT | G_IO_HUP,
+ NULL, NULL);
+ g_assert_cmpint(ret, ==, 0);
+
+ /*
+ * Write to frontend, chr_be, until EAGAIN. Make sure length is
+ * power of two to fit nicely the whole pipe buffer.
+ */
+ len = 0;
+ while ((ret = qemu_chr_fe_write(&chr_be, (void *)"thisisit", 8))
+ != -1) {
+ len += ret;
+ }
+ g_assert_cmpint(errno, ==, EAGAIN);
+
+ /* Further all writes should cause EAGAIN */
+ ret = qemu_chr_fe_write(&chr_be, (void *)"b", 1);
+ g_assert_cmpint(ret, ==, -1);
+ g_assert_cmpint(errno, ==, EAGAIN);
+
+ /*
+ * Add watch. Non 0 indicates we have a blocked chardev, which
+ * can wakes us up when write is possible.
+ */
+ ret = qemu_chr_fe_add_watch(&chr_be, G_IO_OUT | G_IO_HUP,
+ NULL, NULL);
+ g_assert_cmpint(ret, !=, 0);
+ g_source_remove(ret);
+
+ /* Drain pipe and ring buffers */
+ fd = open(out, O_RDWR);
+ while ((ret = read(fd, buf, MIN(sizeof(buf), len))) != -1 && len > 0) {
+ len -= ret;
+ }
+ close(fd);
+
+ data = qmp_ringbuf_read("chr1", RB_SIZE, false, 0, &error_abort);
+ g_assert_cmpint(strlen(data), ==, 128);
+ g_free(data);
+
+ data = qmp_ringbuf_read("chr2", RB_SIZE, false, 0, &error_abort);
+ g_assert_cmpint(strlen(data), ==, 128);
+ g_free(data);
+
+ /*
+ * Now we are good to go, first repeat "lost" sequence, which
+ * was already consumed and drained by the ring buffers, but
+ * pipe have not recieved that yet.
+ */
+ ret = qemu_chr_fe_write(&chr_be, (void *)"thisisit", 8);
+ g_assert_cmpint(ret, ==, 8);
+
+ ret = qemu_chr_fe_write(&chr_be, (void *)"streamisrestored", 16);
+ g_assert_cmpint(ret, ==, 16);
+
+ data = qmp_ringbuf_read("chr1", RB_SIZE, false, 0, &error_abort);
+ g_assert_cmpint(strlen(data), ==, 16);
+ /* Only last 16 bytes, see big comment above */
+ g_assert_cmpstr(data, ==, "streamisrestored");
+ g_free(data);
+
+ data = qmp_ringbuf_read("chr2", RB_SIZE, false, 0, &error_abort);
+ g_assert_cmpint(strlen(data), ==, 16);
+ /* Only last 16 bytes, see big comment above */
+ g_assert_cmpstr(data, ==, "streamisrestored");
+ g_free(data);
+
+ fd = open(out, O_RDWR);
+ ret = read(fd, buf, sizeof(buf));
+ g_assert_cmpint(ret, ==, 24);
+ buf[ret] = 0;
+ /* Both 8 and 16 bytes */
+ g_assert_cmpstr(buf, ==, "thisisitstreamisrestored");
+ close(fd);
+
+ g_free(in);
+ g_free(out);
+ g_free(tmp_path);
+ g_free(pipe);
+
+ /* Finalize frontend */
+ qemu_chr_fe_deinit(&chr_be, false);
+
+ /* Finalize hub0 */
+ qmp_chardev_remove("hub0", &error_abort);
+
+ /* Finalize backend chardevs */
+ qmp_chardev_remove("chr1", &error_abort);
+ qmp_chardev_remove("chr2", &error_abort);
+ qmp_chardev_remove("chr3", &error_abort);
+ }
+#endif
+}
static void websock_server_read(void *opaque, const uint8_t *buf, int size)
{
@@ -574,7 +993,7 @@ static void char_udp_test_internal(Chardev *reuse_chr, int sock)
struct sockaddr_in other;
SocketIdleData d = { 0, };
Chardev *chr;
- CharBackend *be;
+ CharBackend stack_be, *be = &stack_be;
socklen_t alen = sizeof(other);
int ret;
char buf[10];
@@ -590,7 +1009,6 @@ static void char_udp_test_internal(Chardev *reuse_chr, int sock)
chr = qemu_chr_new("client", tmp, NULL);
g_assert_nonnull(chr);
- be = g_alloca(sizeof(CharBackend));
qemu_chr_fe_init(be, chr, &error_abort);
}
@@ -1485,6 +1903,7 @@ int main(int argc, char **argv)
g_test_add_func("/char/invalid", char_invalid_test);
g_test_add_func("/char/ringbuf", char_ringbuf_test);
g_test_add_func("/char/mux", char_mux_test);
+ g_test_add_func("/char/hub", char_hub_test);
#ifdef _WIN32
g_test_add_func("/char/console/subprocess", char_console_test_subprocess);
g_test_add_func("/char/console", char_console_test);
@@ -1523,18 +1942,18 @@ int main(int argc, char **argv)
static CharSocketClientTestConfig client2 ## name = \
{ addr, NULL, true, false, char_socket_event }; \
static CharSocketClientTestConfig client3 ## name = \
- { addr, ",reconnect=1", false, false, char_socket_event }; \
+ { addr, ",reconnect-ms=1000", false, false, char_socket_event }; \
static CharSocketClientTestConfig client4 ## name = \
- { addr, ",reconnect=1", true, false, char_socket_event }; \
+ { addr, ",reconnect-ms=1000", true, false, char_socket_event }; \
static CharSocketClientTestConfig client5 ## name = \
{ addr, NULL, false, true, char_socket_event }; \
static CharSocketClientTestConfig client6 ## name = \
{ addr, NULL, true, true, char_socket_event }; \
static CharSocketClientTestConfig client7 ## name = \
- { addr, ",reconnect=1", true, false, \
+ { addr, ",reconnect-ms=1000", true, false, \
char_socket_event_with_error }; \
static CharSocketClientTestConfig client8 ## name = \
- { addr, ",reconnect=1", false, false, char_socket_event }; \
+ { addr, ",reconnect-ms=1000", false, false, char_socket_event };\
g_test_add_data_func("/char/socket/client/mainloop/" # name, \
&client1 ##name, char_socket_client_test); \
g_test_add_data_func("/char/socket/client/wait-conn/" # name, \
diff --git a/tests/unit/test-crypto-afsplit.c b/tests/unit/test-crypto-afsplit.c
index 00a7c18..45e9046 100644
--- a/tests/unit/test-crypto-afsplit.c
+++ b/tests/unit/test-crypto-afsplit.c
@@ -26,7 +26,7 @@
typedef struct QCryptoAFSplitTestData QCryptoAFSplitTestData;
struct QCryptoAFSplitTestData {
const char *path;
- QCryptoHashAlgorithm hash;
+ QCryptoHashAlgo hash;
uint32_t stripes;
size_t blocklen;
const uint8_t *key;
@@ -36,7 +36,7 @@ struct QCryptoAFSplitTestData {
static QCryptoAFSplitTestData test_data[] = {
{
.path = "/crypto/afsplit/sha256/5",
- .hash = QCRYPTO_HASH_ALG_SHA256,
+ .hash = QCRYPTO_HASH_ALGO_SHA256,
.stripes = 5,
.blocklen = 32,
.key = (const uint8_t *)
@@ -68,7 +68,7 @@ static QCryptoAFSplitTestData test_data[] = {
},
{
.path = "/crypto/afsplit/sha256/5000",
- .hash = QCRYPTO_HASH_ALG_SHA256,
+ .hash = QCRYPTO_HASH_ALGO_SHA256,
.stripes = 5000,
.blocklen = 16,
.key = (const uint8_t *)
@@ -77,7 +77,7 @@ static QCryptoAFSplitTestData test_data[] = {
},
{
.path = "/crypto/afsplit/sha1/1000",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.stripes = 1000,
.blocklen = 32,
.key = (const uint8_t *)
@@ -88,7 +88,7 @@ static QCryptoAFSplitTestData test_data[] = {
},
{
.path = "/crypto/afsplit/sha256/big",
- .hash = QCRYPTO_HASH_ALG_SHA256,
+ .hash = QCRYPTO_HASH_ALGO_SHA256,
.stripes = 1000,
.blocklen = 64,
.key = (const uint8_t *)
diff --git a/tests/unit/test-crypto-akcipher.c b/tests/unit/test-crypto-akcipher.c
index 4f1f421..53c2211 100644
--- a/tests/unit/test-crypto-akcipher.c
+++ b/tests/unit/test-crypto-akcipher.c
@@ -692,7 +692,7 @@ struct QCryptoAkCipherTestData {
static QCryptoRSAKeyTestData rsakey_test_data[] = {
{
.path = "/crypto/akcipher/rsakey-1024-public",
- .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC,
+ .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC,
.key = rsa1024_public_key,
.keylen = sizeof(rsa1024_public_key),
.is_valid_key = true,
@@ -700,7 +700,7 @@ static QCryptoRSAKeyTestData rsakey_test_data[] = {
},
{
.path = "/crypto/akcipher/rsakey-1024-private",
- .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE,
+ .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE,
.key = rsa1024_private_key,
.keylen = sizeof(rsa1024_private_key),
.is_valid_key = true,
@@ -708,7 +708,7 @@ static QCryptoRSAKeyTestData rsakey_test_data[] = {
},
{
.path = "/crypto/akcipher/rsakey-2048-public",
- .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC,
+ .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC,
.key = rsa2048_public_key,
.keylen = sizeof(rsa2048_public_key),
.is_valid_key = true,
@@ -716,7 +716,7 @@ static QCryptoRSAKeyTestData rsakey_test_data[] = {
},
{
.path = "/crypto/akcipher/rsakey-2048-private",
- .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE,
+ .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE,
.key = rsa2048_private_key,
.keylen = sizeof(rsa2048_private_key),
.is_valid_key = true,
@@ -724,56 +724,56 @@ static QCryptoRSAKeyTestData rsakey_test_data[] = {
},
{
.path = "/crypto/akcipher/rsakey-public-lack-elem",
- .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC,
+ .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC,
.key = rsa_public_key_lack_element,
.keylen = sizeof(rsa_public_key_lack_element),
.is_valid_key = false,
},
{
.path = "/crypto/akcipher/rsakey-private-lack-elem",
- .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE,
+ .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE,
.key = rsa_private_key_lack_element,
.keylen = sizeof(rsa_private_key_lack_element),
.is_valid_key = false,
},
{
.path = "/crypto/akcipher/rsakey-public-empty-elem",
- .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC,
+ .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC,
.key = rsa_public_key_empty_element,
.keylen = sizeof(rsa_public_key_empty_element),
.is_valid_key = false,
},
{
.path = "/crypto/akcipher/rsakey-private-empty-elem",
- .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE,
+ .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE,
.key = rsa_private_key_empty_element,
.keylen = sizeof(rsa_private_key_empty_element),
.is_valid_key = false,
},
{
.path = "/crypto/akcipher/rsakey-public-empty-key",
- .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC,
+ .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC,
.key = NULL,
.keylen = 0,
.is_valid_key = false,
},
{
.path = "/crypto/akcipher/rsakey-private-empty-key",
- .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE,
+ .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE,
.key = NULL,
.keylen = 0,
.is_valid_key = false,
},
{
.path = "/crypto/akcipher/rsakey-public-invalid-length-val",
- .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC,
+ .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC,
.key = rsa_public_key_invalid_length_val,
.keylen = sizeof(rsa_public_key_invalid_length_val),
.is_valid_key = false,
},
{
.path = "/crypto/akcipher/rsakey-public-extra-elem",
- .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC,
+ .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC,
.key = rsa_public_key_extra_elem,
.keylen = sizeof(rsa_public_key_extra_elem),
.is_valid_key = false,
@@ -785,9 +785,9 @@ static QCryptoAkCipherTestData akcipher_test_data[] = {
{
.path = "/crypto/akcipher/rsa1024-raw",
.opt = {
- .alg = QCRYPTO_AKCIPHER_ALG_RSA,
+ .alg = QCRYPTO_AK_CIPHER_ALGO_RSA,
.u.rsa = {
- .padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW,
+ .padding_alg = QCRYPTO_RSA_PADDING_ALGO_RAW,
},
},
.pub_key = rsa1024_public_key,
@@ -805,10 +805,10 @@ static QCryptoAkCipherTestData akcipher_test_data[] = {
{
.path = "/crypto/akcipher/rsa1024-pkcs1",
.opt = {
- .alg = QCRYPTO_AKCIPHER_ALG_RSA,
+ .alg = QCRYPTO_AK_CIPHER_ALGO_RSA,
.u.rsa = {
- .padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1,
- .hash_alg = QCRYPTO_HASH_ALG_SHA1,
+ .padding_alg = QCRYPTO_RSA_PADDING_ALGO_PKCS1,
+ .hash_alg = QCRYPTO_HASH_ALGO_SHA1,
},
},
.pub_key = rsa1024_public_key,
@@ -830,9 +830,9 @@ static QCryptoAkCipherTestData akcipher_test_data[] = {
{
.path = "/crypto/akcipher/rsa2048-raw",
.opt = {
- .alg = QCRYPTO_AKCIPHER_ALG_RSA,
+ .alg = QCRYPTO_AK_CIPHER_ALGO_RSA,
.u.rsa = {
- .padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW,
+ .padding_alg = QCRYPTO_RSA_PADDING_ALGO_RAW,
},
},
.pub_key = rsa2048_public_key,
@@ -850,10 +850,10 @@ static QCryptoAkCipherTestData akcipher_test_data[] = {
{
.path = "/crypto/akcipher/rsa2048-pkcs1",
.opt = {
- .alg = QCRYPTO_AKCIPHER_ALG_RSA,
+ .alg = QCRYPTO_AK_CIPHER_ALGO_RSA,
.u.rsa = {
- .padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1,
- .hash_alg = QCRYPTO_HASH_ALG_SHA1,
+ .padding_alg = QCRYPTO_RSA_PADDING_ALGO_PKCS1,
+ .hash_alg = QCRYPTO_HASH_ALGO_SHA1,
},
},
.pub_key = rsa2048_public_key,
@@ -885,12 +885,12 @@ static void test_akcipher(const void *opaque)
return;
}
pub_key = qcrypto_akcipher_new(&data->opt,
- QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC,
+ QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC,
data->pub_key, data->pub_key_len,
&error_abort);
g_assert(pub_key != NULL);
priv_key = qcrypto_akcipher_new(&data->opt,
- QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE,
+ QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE,
data->priv_key, data->priv_key_len,
&error_abort);
g_assert(priv_key != NULL);
@@ -944,10 +944,10 @@ static void test_rsakey(const void *opaque)
{
const QCryptoRSAKeyTestData *data = (const QCryptoRSAKeyTestData *)opaque;
QCryptoAkCipherOptions opt = {
- .alg = QCRYPTO_AKCIPHER_ALG_RSA,
+ .alg = QCRYPTO_AK_CIPHER_ALGO_RSA,
.u.rsa = {
- .padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1,
- .hash_alg = QCRYPTO_HASH_ALG_SHA1,
+ .padding_alg = QCRYPTO_RSA_PADDING_ALGO_PKCS1,
+ .hash_alg = QCRYPTO_HASH_ALGO_SHA1,
}
};
g_autoptr(QCryptoAkCipher) key = qcrypto_akcipher_new(
diff --git a/tests/unit/test-crypto-block.c b/tests/unit/test-crypto-block.c
index 42cfab6..3ac7f17 100644
--- a/tests/unit/test-crypto-block.c
+++ b/tests/unit/test-crypto-block.c
@@ -39,14 +39,14 @@
#endif
static QCryptoBlockCreateOptions qcow_create_opts = {
- .format = Q_CRYPTO_BLOCK_FORMAT_QCOW,
+ .format = QCRYPTO_BLOCK_FORMAT_QCOW,
.u.qcow = {
.key_secret = (char *)"sec0",
},
};
static QCryptoBlockOpenOptions qcow_open_opts = {
- .format = Q_CRYPTO_BLOCK_FORMAT_QCOW,
+ .format = QCRYPTO_BLOCK_FORMAT_QCOW,
.u.qcow = {
.key_secret = (char *)"sec0",
},
@@ -55,7 +55,7 @@ static QCryptoBlockOpenOptions qcow_open_opts = {
#ifdef TEST_LUKS
static QCryptoBlockOpenOptions luks_open_opts = {
- .format = Q_CRYPTO_BLOCK_FORMAT_LUKS,
+ .format = QCRYPTO_BLOCK_FORMAT_LUKS,
.u.luks = {
.key_secret = (char *)"sec0",
},
@@ -64,7 +64,7 @@ static QCryptoBlockOpenOptions luks_open_opts = {
/* Creation with all default values */
static QCryptoBlockCreateOptions luks_create_opts_default = {
- .format = Q_CRYPTO_BLOCK_FORMAT_LUKS,
+ .format = QCRYPTO_BLOCK_FORMAT_LUKS,
.u.luks = {
.key_secret = (char *)"sec0",
},
@@ -73,33 +73,33 @@ static QCryptoBlockCreateOptions luks_create_opts_default = {
/* ...and with explicit values */
static QCryptoBlockCreateOptions luks_create_opts_aes256_cbc_plain64 = {
- .format = Q_CRYPTO_BLOCK_FORMAT_LUKS,
+ .format = QCRYPTO_BLOCK_FORMAT_LUKS,
.u.luks = {
.key_secret = (char *)"sec0",
.has_cipher_alg = true,
- .cipher_alg = QCRYPTO_CIPHER_ALG_AES_256,
+ .cipher_alg = QCRYPTO_CIPHER_ALGO_AES_256,
.has_cipher_mode = true,
.cipher_mode = QCRYPTO_CIPHER_MODE_CBC,
.has_ivgen_alg = true,
- .ivgen_alg = QCRYPTO_IVGEN_ALG_PLAIN64,
+ .ivgen_alg = QCRYPTO_IV_GEN_ALGO_PLAIN64,
},
};
static QCryptoBlockCreateOptions luks_create_opts_aes256_cbc_essiv = {
- .format = Q_CRYPTO_BLOCK_FORMAT_LUKS,
+ .format = QCRYPTO_BLOCK_FORMAT_LUKS,
.u.luks = {
.key_secret = (char *)"sec0",
.has_cipher_alg = true,
- .cipher_alg = QCRYPTO_CIPHER_ALG_AES_256,
+ .cipher_alg = QCRYPTO_CIPHER_ALGO_AES_256,
.has_cipher_mode = true,
.cipher_mode = QCRYPTO_CIPHER_MODE_CBC,
.has_ivgen_alg = true,
- .ivgen_alg = QCRYPTO_IVGEN_ALG_ESSIV,
+ .ivgen_alg = QCRYPTO_IV_GEN_ALGO_ESSIV,
.has_ivgen_hash_alg = true,
- .ivgen_hash_alg = QCRYPTO_HASH_ALG_SHA256,
+ .ivgen_hash_alg = QCRYPTO_HASH_ALGO_SHA256,
.has_hash_alg = true,
- .hash_alg = QCRYPTO_HASH_ALG_SHA1,
+ .hash_alg = QCRYPTO_HASH_ALGO_SHA1,
},
};
#endif /* TEST_LUKS */
@@ -112,12 +112,12 @@ static struct QCryptoBlockTestData {
bool expect_header;
- QCryptoCipherAlgorithm cipher_alg;
+ QCryptoCipherAlgo cipher_alg;
QCryptoCipherMode cipher_mode;
- QCryptoHashAlgorithm hash_alg;
+ QCryptoHashAlgo hash_alg;
- QCryptoIVGenAlgorithm ivgen_alg;
- QCryptoHashAlgorithm ivgen_hash;
+ QCryptoIVGenAlgo ivgen_alg;
+ QCryptoHashAlgo ivgen_hash;
bool slow;
} test_data[] = {
@@ -128,10 +128,10 @@ static struct QCryptoBlockTestData {
.expect_header = false,
- .cipher_alg = QCRYPTO_CIPHER_ALG_AES_128,
+ .cipher_alg = QCRYPTO_CIPHER_ALGO_AES_128,
.cipher_mode = QCRYPTO_CIPHER_MODE_CBC,
- .ivgen_alg = QCRYPTO_IVGEN_ALG_PLAIN64,
+ .ivgen_alg = QCRYPTO_IV_GEN_ALGO_PLAIN64,
},
#ifdef TEST_LUKS
{
@@ -141,11 +141,11 @@ static struct QCryptoBlockTestData {
.expect_header = true,
- .cipher_alg = QCRYPTO_CIPHER_ALG_AES_256,
+ .cipher_alg = QCRYPTO_CIPHER_ALGO_AES_256,
.cipher_mode = QCRYPTO_CIPHER_MODE_XTS,
- .hash_alg = QCRYPTO_HASH_ALG_SHA256,
+ .hash_alg = QCRYPTO_HASH_ALGO_SHA256,
- .ivgen_alg = QCRYPTO_IVGEN_ALG_PLAIN64,
+ .ivgen_alg = QCRYPTO_IV_GEN_ALGO_PLAIN64,
.slow = true,
},
@@ -156,11 +156,11 @@ static struct QCryptoBlockTestData {
.expect_header = true,
- .cipher_alg = QCRYPTO_CIPHER_ALG_AES_256,
+ .cipher_alg = QCRYPTO_CIPHER_ALGO_AES_256,
.cipher_mode = QCRYPTO_CIPHER_MODE_CBC,
- .hash_alg = QCRYPTO_HASH_ALG_SHA256,
+ .hash_alg = QCRYPTO_HASH_ALGO_SHA256,
- .ivgen_alg = QCRYPTO_IVGEN_ALG_PLAIN64,
+ .ivgen_alg = QCRYPTO_IV_GEN_ALGO_PLAIN64,
.slow = true,
},
@@ -171,12 +171,12 @@ static struct QCryptoBlockTestData {
.expect_header = true,
- .cipher_alg = QCRYPTO_CIPHER_ALG_AES_256,
+ .cipher_alg = QCRYPTO_CIPHER_ALGO_AES_256,
.cipher_mode = QCRYPTO_CIPHER_MODE_CBC,
- .hash_alg = QCRYPTO_HASH_ALG_SHA1,
+ .hash_alg = QCRYPTO_HASH_ALGO_SHA1,
- .ivgen_alg = QCRYPTO_IVGEN_ALG_ESSIV,
- .ivgen_hash = QCRYPTO_HASH_ALG_SHA256,
+ .ivgen_alg = QCRYPTO_IV_GEN_ALGO_ESSIV,
+ .ivgen_hash = QCRYPTO_HASH_ALGO_SHA256,
.slow = true,
},
@@ -572,8 +572,15 @@ int main(int argc, char **argv)
g_assert(qcrypto_init(NULL) == 0);
for (i = 0; i < G_N_ELEMENTS(test_data); i++) {
- if (test_data[i].open_opts->format == Q_CRYPTO_BLOCK_FORMAT_LUKS &&
+ if (test_data[i].open_opts->format == QCRYPTO_BLOCK_FORMAT_LUKS &&
!qcrypto_hash_supports(test_data[i].hash_alg)) {
+ g_printerr("# skip unsupported %s\n",
+ QCryptoHashAlgo_str(test_data[i].hash_alg));
+ continue;
+ }
+ if (!qcrypto_cipher_supports(QCRYPTO_CIPHER_ALGO_AES_128,
+ QCRYPTO_CIPHER_MODE_CBC)) {
+ g_printerr("# skip unsupported aes-128:cbc\n");
continue;
}
if (!test_data[i].slow ||
diff --git a/tests/unit/test-crypto-cipher.c b/tests/unit/test-crypto-cipher.c
index f5152e5..1331d55 100644
--- a/tests/unit/test-crypto-cipher.c
+++ b/tests/unit/test-crypto-cipher.c
@@ -27,7 +27,7 @@
typedef struct QCryptoCipherTestData QCryptoCipherTestData;
struct QCryptoCipherTestData {
const char *path;
- QCryptoCipherAlgorithm alg;
+ QCryptoCipherAlgo alg;
QCryptoCipherMode mode;
const char *key;
const char *plaintext;
@@ -43,7 +43,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* NIST F.1.1 ECB-AES128.Encrypt */
.path = "/crypto/cipher/aes-ecb-128",
- .alg = QCRYPTO_CIPHER_ALG_AES_128,
+ .alg = QCRYPTO_CIPHER_ALGO_AES_128,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key = "2b7e151628aed2a6abf7158809cf4f3c",
.plaintext =
@@ -60,7 +60,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* NIST F.1.3 ECB-AES192.Encrypt */
.path = "/crypto/cipher/aes-ecb-192",
- .alg = QCRYPTO_CIPHER_ALG_AES_192,
+ .alg = QCRYPTO_CIPHER_ALGO_AES_192,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key = "8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b",
.plaintext =
@@ -77,7 +77,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* NIST F.1.5 ECB-AES256.Encrypt */
.path = "/crypto/cipher/aes-ecb-256",
- .alg = QCRYPTO_CIPHER_ALG_AES_256,
+ .alg = QCRYPTO_CIPHER_ALGO_AES_256,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key =
"603deb1015ca71be2b73aef0857d7781"
@@ -96,7 +96,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* NIST F.2.1 CBC-AES128.Encrypt */
.path = "/crypto/cipher/aes-cbc-128",
- .alg = QCRYPTO_CIPHER_ALG_AES_128,
+ .alg = QCRYPTO_CIPHER_ALGO_AES_128,
.mode = QCRYPTO_CIPHER_MODE_CBC,
.key = "2b7e151628aed2a6abf7158809cf4f3c",
.iv = "000102030405060708090a0b0c0d0e0f",
@@ -114,7 +114,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* NIST F.2.3 CBC-AES128.Encrypt */
.path = "/crypto/cipher/aes-cbc-192",
- .alg = QCRYPTO_CIPHER_ALG_AES_192,
+ .alg = QCRYPTO_CIPHER_ALGO_AES_192,
.mode = QCRYPTO_CIPHER_MODE_CBC,
.key = "8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b",
.iv = "000102030405060708090a0b0c0d0e0f",
@@ -132,7 +132,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* NIST F.2.5 CBC-AES128.Encrypt */
.path = "/crypto/cipher/aes-cbc-256",
- .alg = QCRYPTO_CIPHER_ALG_AES_256,
+ .alg = QCRYPTO_CIPHER_ALGO_AES_256,
.mode = QCRYPTO_CIPHER_MODE_CBC,
.key =
"603deb1015ca71be2b73aef0857d7781"
@@ -156,7 +156,7 @@ static QCryptoCipherTestData test_data[] = {
* ciphertext in ECB and CBC modes
*/
.path = "/crypto/cipher/des-ecb-56-one-block",
- .alg = QCRYPTO_CIPHER_ALG_DES,
+ .alg = QCRYPTO_CIPHER_ALGO_DES,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key = "80c4a2e691d5b3f7",
.plaintext = "70617373776f7264",
@@ -165,7 +165,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* See previous comment */
.path = "/crypto/cipher/des-cbc-56-one-block",
- .alg = QCRYPTO_CIPHER_ALG_DES,
+ .alg = QCRYPTO_CIPHER_ALGO_DES,
.mode = QCRYPTO_CIPHER_MODE_CBC,
.key = "80c4a2e691d5b3f7",
.iv = "0000000000000000",
@@ -174,7 +174,7 @@ static QCryptoCipherTestData test_data[] = {
},
{
.path = "/crypto/cipher/des-ecb-56",
- .alg = QCRYPTO_CIPHER_ALG_DES,
+ .alg = QCRYPTO_CIPHER_ALGO_DES,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key = "80c4a2e691d5b3f7",
.plaintext =
@@ -191,7 +191,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* Borrowed from linux-kernel crypto/testmgr.h */
.path = "/crypto/cipher/3des-cbc",
- .alg = QCRYPTO_CIPHER_ALG_3DES,
+ .alg = QCRYPTO_CIPHER_ALGO_3DES,
.mode = QCRYPTO_CIPHER_MODE_CBC,
.key =
"e9c0ff2e760b6424444d995a12d640c0"
@@ -220,7 +220,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* Borrowed from linux-kernel crypto/testmgr.h */
.path = "/crypto/cipher/3des-ecb",
- .alg = QCRYPTO_CIPHER_ALG_3DES,
+ .alg = QCRYPTO_CIPHER_ALGO_3DES,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key =
"0123456789abcdef5555555555555555"
@@ -233,7 +233,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* Borrowed from linux-kernel crypto/testmgr.h */
.path = "/crypto/cipher/3des-ctr",
- .alg = QCRYPTO_CIPHER_ALG_3DES,
+ .alg = QCRYPTO_CIPHER_ALGO_3DES,
.mode = QCRYPTO_CIPHER_MODE_CTR,
.key =
"9cd6f39cb95a67005a67002dceeb2dce"
@@ -308,7 +308,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* RFC 2144, Appendix B.1 */
.path = "/crypto/cipher/cast5-128",
- .alg = QCRYPTO_CIPHER_ALG_CAST5_128,
+ .alg = QCRYPTO_CIPHER_ALGO_CAST5_128,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key = "0123456712345678234567893456789A",
.plaintext = "0123456789abcdef",
@@ -317,7 +317,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* libgcrypt serpent.c */
.path = "/crypto/cipher/serpent-128",
- .alg = QCRYPTO_CIPHER_ALG_SERPENT_128,
+ .alg = QCRYPTO_CIPHER_ALGO_SERPENT_128,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key = "00000000000000000000000000000000",
.plaintext = "d29d576fcea3a3a7ed9099f29273d78e",
@@ -326,7 +326,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* libgcrypt serpent.c */
.path = "/crypto/cipher/serpent-192",
- .alg = QCRYPTO_CIPHER_ALG_SERPENT_192,
+ .alg = QCRYPTO_CIPHER_ALGO_SERPENT_192,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key = "00000000000000000000000000000000"
"0000000000000000",
@@ -336,7 +336,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* libgcrypt serpent.c */
.path = "/crypto/cipher/serpent-256a",
- .alg = QCRYPTO_CIPHER_ALG_SERPENT_256,
+ .alg = QCRYPTO_CIPHER_ALGO_SERPENT_256,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key = "00000000000000000000000000000000"
"00000000000000000000000000000000",
@@ -346,7 +346,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* libgcrypt serpent.c */
.path = "/crypto/cipher/serpent-256b",
- .alg = QCRYPTO_CIPHER_ALG_SERPENT_256,
+ .alg = QCRYPTO_CIPHER_ALGO_SERPENT_256,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key = "00000000000000000000000000000000"
"00000000000000000000000000000000",
@@ -356,7 +356,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* Twofish paper "Known Answer Test" */
.path = "/crypto/cipher/twofish-128",
- .alg = QCRYPTO_CIPHER_ALG_TWOFISH_128,
+ .alg = QCRYPTO_CIPHER_ALGO_TWOFISH_128,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key = "d491db16e7b1c39e86cb086b789f5419",
.plaintext = "019f9809de1711858faac3a3ba20fbc3",
@@ -365,7 +365,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* Twofish paper "Known Answer Test", I=3 */
.path = "/crypto/cipher/twofish-192",
- .alg = QCRYPTO_CIPHER_ALG_TWOFISH_192,
+ .alg = QCRYPTO_CIPHER_ALGO_TWOFISH_192,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key = "88b2b2706b105e36b446bb6d731a1e88"
"efa71f788965bd44",
@@ -375,7 +375,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* Twofish paper "Known Answer Test", I=4 */
.path = "/crypto/cipher/twofish-256",
- .alg = QCRYPTO_CIPHER_ALG_TWOFISH_256,
+ .alg = QCRYPTO_CIPHER_ALGO_TWOFISH_256,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key = "d43bb7556ea32e46f2a282b7d45b4e0d"
"57ff739d4dc92c1bd7fc01700cc8216f",
@@ -386,7 +386,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* SM4, GB/T 32907-2016, Appendix A.1 */
.path = "/crypto/cipher/sm4",
- .alg = QCRYPTO_CIPHER_ALG_SM4,
+ .alg = QCRYPTO_CIPHER_ALGO_SM4,
.mode = QCRYPTO_CIPHER_MODE_ECB,
.key = "0123456789abcdeffedcba9876543210",
.plaintext =
@@ -398,7 +398,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* #1 32 byte key, 32 byte PTX */
.path = "/crypto/cipher/aes-xts-128-1",
- .alg = QCRYPTO_CIPHER_ALG_AES_128,
+ .alg = QCRYPTO_CIPHER_ALGO_AES_128,
.mode = QCRYPTO_CIPHER_MODE_XTS,
.key =
"00000000000000000000000000000000"
@@ -415,7 +415,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* #2, 32 byte key, 32 byte PTX */
.path = "/crypto/cipher/aes-xts-128-2",
- .alg = QCRYPTO_CIPHER_ALG_AES_128,
+ .alg = QCRYPTO_CIPHER_ALGO_AES_128,
.mode = QCRYPTO_CIPHER_MODE_XTS,
.key =
"11111111111111111111111111111111"
@@ -432,7 +432,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* #5 from xts.7, 32 byte key, 32 byte PTX */
.path = "/crypto/cipher/aes-xts-128-3",
- .alg = QCRYPTO_CIPHER_ALG_AES_128,
+ .alg = QCRYPTO_CIPHER_ALGO_AES_128,
.mode = QCRYPTO_CIPHER_MODE_XTS,
.key =
"fffefdfcfbfaf9f8f7f6f5f4f3f2f1f0"
@@ -449,7 +449,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* #4, 32 byte key, 512 byte PTX */
.path = "/crypto/cipher/aes-xts-128-4",
- .alg = QCRYPTO_CIPHER_ALG_AES_128,
+ .alg = QCRYPTO_CIPHER_ALGO_AES_128,
.mode = QCRYPTO_CIPHER_MODE_XTS,
.key =
"27182818284590452353602874713526"
@@ -528,7 +528,7 @@ static QCryptoCipherTestData test_data[] = {
* which is incompatible with XTS
*/
.path = "/crypto/cipher/cast5-xts-128",
- .alg = QCRYPTO_CIPHER_ALG_CAST5_128,
+ .alg = QCRYPTO_CIPHER_ALGO_CAST5_128,
.mode = QCRYPTO_CIPHER_MODE_XTS,
.key =
"27182818284590452353602874713526"
@@ -537,7 +537,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* NIST F.5.1 CTR-AES128.Encrypt */
.path = "/crypto/cipher/aes-ctr-128",
- .alg = QCRYPTO_CIPHER_ALG_AES_128,
+ .alg = QCRYPTO_CIPHER_ALGO_AES_128,
.mode = QCRYPTO_CIPHER_MODE_CTR,
.key = "2b7e151628aed2a6abf7158809cf4f3c",
.iv = "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
@@ -555,7 +555,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* NIST F.5.3 CTR-AES192.Encrypt */
.path = "/crypto/cipher/aes-ctr-192",
- .alg = QCRYPTO_CIPHER_ALG_AES_192,
+ .alg = QCRYPTO_CIPHER_ALGO_AES_192,
.mode = QCRYPTO_CIPHER_MODE_CTR,
.key = "8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b",
.iv = "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
@@ -573,7 +573,7 @@ static QCryptoCipherTestData test_data[] = {
{
/* NIST F.5.5 CTR-AES256.Encrypt */
.path = "/crypto/cipher/aes-ctr-256",
- .alg = QCRYPTO_CIPHER_ALG_AES_256,
+ .alg = QCRYPTO_CIPHER_ALGO_AES_256,
.mode = QCRYPTO_CIPHER_MODE_CTR,
.key = "603deb1015ca71be2b73aef0857d7781"
"1f352c073b6108d72d9810a30914dff4",
@@ -750,7 +750,7 @@ static void test_cipher_null_iv(void)
uint8_t ciphertext[32] = { 0 };
cipher = qcrypto_cipher_new(
- QCRYPTO_CIPHER_ALG_AES_256,
+ QCRYPTO_CIPHER_ALGO_AES_256,
QCRYPTO_CIPHER_MODE_CBC,
key, sizeof(key),
&error_abort);
@@ -779,7 +779,7 @@ static void test_cipher_short_plaintext(void)
int ret;
cipher = qcrypto_cipher_new(
- QCRYPTO_CIPHER_ALG_AES_256,
+ QCRYPTO_CIPHER_ALGO_AES_256,
QCRYPTO_CIPHER_MODE_CBC,
key, sizeof(key),
&error_abort);
@@ -823,16 +823,21 @@ int main(int argc, char **argv)
g_test_add_data_func(test_data[i].path, &test_data[i], test_cipher);
} else {
g_printerr("# skip unsupported %s:%s\n",
- QCryptoCipherAlgorithm_str(test_data[i].alg),
+ QCryptoCipherAlgo_str(test_data[i].alg),
QCryptoCipherMode_str(test_data[i].mode));
}
}
- g_test_add_func("/crypto/cipher/null-iv",
- test_cipher_null_iv);
+ if (qcrypto_cipher_supports(QCRYPTO_CIPHER_ALGO_AES_256,
+ QCRYPTO_CIPHER_MODE_CBC)) {
+ g_test_add_func("/crypto/cipher/null-iv",
+ test_cipher_null_iv);
- g_test_add_func("/crypto/cipher/short-plaintext",
- test_cipher_short_plaintext);
+ g_test_add_func("/crypto/cipher/short-plaintext",
+ test_cipher_short_plaintext);
+ } else {
+ g_printerr("# skip unsupported aes-256:cbc\n");
+ }
return g_test_run();
}
diff --git a/tests/unit/test-crypto-hash.c b/tests/unit/test-crypto-hash.c
index 1f4abb8..8fee159 100644
--- a/tests/unit/test-crypto-hash.c
+++ b/tests/unit/test-crypto-hash.c
@@ -1,6 +1,7 @@
/*
* QEMU Crypto hash algorithms
*
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
* Copyright (c) 2015 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
@@ -42,6 +43,9 @@
"63b54e4cb2d2032b393994aa263c0dbb" \
"e00a9f2fe9ef6037352232a1eec55ee7"
#define OUTPUT_RIPEMD160 "f3d658fad3fdfb2b52c9369cf0d441249ddfa8a0"
+#ifdef CONFIG_CRYPTO_SM3
+#define OUTPUT_SM3 "d4a97db105b477b84c4f20ec9c31a6c814e2705a0b83a5a89748d75f0ef456a1"
+#endif
#define OUTPUT_MD5_B64 "Yo0gY3FWMDWrjvYvSSveyQ=="
#define OUTPUT_SHA1_B64 "sudPJnWKOkIeUJzuBFJEt4dTzAI="
@@ -54,32 +58,45 @@
"7sVe5w=="
#define OUTPUT_RIPEMD160_B64 "89ZY+tP9+ytSyTac8NRBJJ3fqKA="
+#ifdef CONFIG_CRYPTO_SM3
+#define OUTPUT_SM3_B64 "1Kl9sQW0d7hMTyDsnDGmyBTicFoLg6Wol0jXXw70VqE="
+#endif
+
static const char *expected_outputs[] = {
- [QCRYPTO_HASH_ALG_MD5] = OUTPUT_MD5,
- [QCRYPTO_HASH_ALG_SHA1] = OUTPUT_SHA1,
- [QCRYPTO_HASH_ALG_SHA224] = OUTPUT_SHA224,
- [QCRYPTO_HASH_ALG_SHA256] = OUTPUT_SHA256,
- [QCRYPTO_HASH_ALG_SHA384] = OUTPUT_SHA384,
- [QCRYPTO_HASH_ALG_SHA512] = OUTPUT_SHA512,
- [QCRYPTO_HASH_ALG_RIPEMD160] = OUTPUT_RIPEMD160,
+ [QCRYPTO_HASH_ALGO_MD5] = OUTPUT_MD5,
+ [QCRYPTO_HASH_ALGO_SHA1] = OUTPUT_SHA1,
+ [QCRYPTO_HASH_ALGO_SHA224] = OUTPUT_SHA224,
+ [QCRYPTO_HASH_ALGO_SHA256] = OUTPUT_SHA256,
+ [QCRYPTO_HASH_ALGO_SHA384] = OUTPUT_SHA384,
+ [QCRYPTO_HASH_ALGO_SHA512] = OUTPUT_SHA512,
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = OUTPUT_RIPEMD160,
+#ifdef CONFIG_CRYPTO_SM3
+ [QCRYPTO_HASH_ALGO_SM3] = OUTPUT_SM3,
+#endif
};
static const char *expected_outputs_b64[] = {
- [QCRYPTO_HASH_ALG_MD5] = OUTPUT_MD5_B64,
- [QCRYPTO_HASH_ALG_SHA1] = OUTPUT_SHA1_B64,
- [QCRYPTO_HASH_ALG_SHA224] = OUTPUT_SHA224_B64,
- [QCRYPTO_HASH_ALG_SHA256] = OUTPUT_SHA256_B64,
- [QCRYPTO_HASH_ALG_SHA384] = OUTPUT_SHA384_B64,
- [QCRYPTO_HASH_ALG_SHA512] = OUTPUT_SHA512_B64,
- [QCRYPTO_HASH_ALG_RIPEMD160] = OUTPUT_RIPEMD160_B64,
+ [QCRYPTO_HASH_ALGO_MD5] = OUTPUT_MD5_B64,
+ [QCRYPTO_HASH_ALGO_SHA1] = OUTPUT_SHA1_B64,
+ [QCRYPTO_HASH_ALGO_SHA224] = OUTPUT_SHA224_B64,
+ [QCRYPTO_HASH_ALGO_SHA256] = OUTPUT_SHA256_B64,
+ [QCRYPTO_HASH_ALGO_SHA384] = OUTPUT_SHA384_B64,
+ [QCRYPTO_HASH_ALGO_SHA512] = OUTPUT_SHA512_B64,
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = OUTPUT_RIPEMD160_B64,
+#ifdef CONFIG_CRYPTO_SM3
+ [QCRYPTO_HASH_ALGO_SM3] = OUTPUT_SM3_B64,
+#endif
};
static const int expected_lens[] = {
- [QCRYPTO_HASH_ALG_MD5] = 16,
- [QCRYPTO_HASH_ALG_SHA1] = 20,
- [QCRYPTO_HASH_ALG_SHA224] = 28,
- [QCRYPTO_HASH_ALG_SHA256] = 32,
- [QCRYPTO_HASH_ALG_SHA384] = 48,
- [QCRYPTO_HASH_ALG_SHA512] = 64,
- [QCRYPTO_HASH_ALG_RIPEMD160] = 20,
+ [QCRYPTO_HASH_ALGO_MD5] = 16,
+ [QCRYPTO_HASH_ALGO_SHA1] = 20,
+ [QCRYPTO_HASH_ALGO_SHA224] = 28,
+ [QCRYPTO_HASH_ALGO_SHA256] = 32,
+ [QCRYPTO_HASH_ALGO_SHA384] = 48,
+ [QCRYPTO_HASH_ALGO_SHA512] = 64,
+ [QCRYPTO_HASH_ALGO_RIPEMD160] = 20,
+#ifdef CONFIG_CRYPTO_SM3
+ [QCRYPTO_HASH_ALGO_SM3] = 32,
+#endif
};
static const char hex[] = "0123456789abcdef";
@@ -122,7 +139,7 @@ static void test_hash_prealloc(void)
size_t i;
for (i = 0; i < G_N_ELEMENTS(expected_outputs) ; i++) {
- uint8_t *result;
+ uint8_t *result, *origresult;
size_t resultlen;
int ret;
size_t j;
@@ -132,7 +149,7 @@ static void test_hash_prealloc(void)
}
resultlen = expected_lens[i];
- result = g_new0(uint8_t, resultlen);
+ origresult = result = g_new0(uint8_t, resultlen);
ret = qcrypto_hash_bytes(i,
INPUT_TEXT,
@@ -141,7 +158,8 @@ static void test_hash_prealloc(void)
&resultlen,
&error_fatal);
g_assert(ret == 0);
-
+ /* Validate that our pre-allocated pointer was not replaced */
+ g_assert(result == origresult);
g_assert(resultlen == expected_lens[i]);
for (j = 0; j < resultlen; j++) {
g_assert(expected_outputs[i][j * 2] == hex[(result[j] >> 4) & 0xf]);
@@ -241,6 +259,50 @@ static void test_hash_base64(void)
}
}
+static void test_hash_accumulate(void)
+{
+ size_t i;
+
+ for (i = 0; i < G_N_ELEMENTS(expected_outputs) ; i++) {
+ g_autoptr(QCryptoHash) hash = NULL;
+ struct iovec iov[] = {
+ { .iov_base = (char *)INPUT_TEXT1, .iov_len = strlen(INPUT_TEXT1) },
+ { .iov_base = (char *)INPUT_TEXT2, .iov_len = strlen(INPUT_TEXT2) },
+ { .iov_base = (char *)INPUT_TEXT3, .iov_len = strlen(INPUT_TEXT3) },
+ };
+ g_autofree uint8_t *result = NULL;
+ size_t resultlen = 0;
+ int ret;
+ size_t j;
+
+ if (!qcrypto_hash_supports(i)) {
+ continue;
+ }
+
+ hash = qcrypto_hash_new(i, &error_fatal);
+ g_assert(hash != NULL);
+
+ /* Add each iovec to the hash context separately */
+ for (j = 0; j < G_N_ELEMENTS(iov); j++) {
+ ret = qcrypto_hash_updatev(hash,
+ &iov[j], 1,
+ &error_fatal);
+
+ g_assert(ret == 0);
+ }
+
+ ret = qcrypto_hash_finalize_bytes(hash, &result, &resultlen,
+ &error_fatal);
+
+ g_assert(ret == 0);
+ g_assert(resultlen == expected_lens[i]);
+ for (j = 0; j < resultlen; j++) {
+ g_assert(expected_outputs[i][j * 2] == hex[(result[j] >> 4) & 0xf]);
+ g_assert(expected_outputs[i][j * 2 + 1] == hex[result[j] & 0xf]);
+ }
+ }
+}
+
int main(int argc, char **argv)
{
int ret = qcrypto_init(&error_fatal);
@@ -252,5 +314,6 @@ int main(int argc, char **argv)
g_test_add_func("/crypto/hash/prealloc", test_hash_prealloc);
g_test_add_func("/crypto/hash/digest", test_hash_digest);
g_test_add_func("/crypto/hash/base64", test_hash_base64);
+ g_test_add_func("/crypto/hash/accumulate", test_hash_accumulate);
return g_test_run();
}
diff --git a/tests/unit/test-crypto-hmac.c b/tests/unit/test-crypto-hmac.c
index 23eb724..20c60eb 100644
--- a/tests/unit/test-crypto-hmac.c
+++ b/tests/unit/test-crypto-hmac.c
@@ -27,43 +27,43 @@
typedef struct QCryptoHmacTestData QCryptoHmacTestData;
struct QCryptoHmacTestData {
- QCryptoHashAlgorithm alg;
+ QCryptoHashAlgo alg;
const char *hex_digest;
};
static QCryptoHmacTestData test_data[] = {
{
- .alg = QCRYPTO_HASH_ALG_MD5,
+ .alg = QCRYPTO_HASH_ALGO_MD5,
.hex_digest =
"ede9cb83679ba82d88fbeae865b3f8fc",
},
{
- .alg = QCRYPTO_HASH_ALG_SHA1,
+ .alg = QCRYPTO_HASH_ALGO_SHA1,
.hex_digest =
"c7b5a631e3aac975c4ededfcd346e469"
"dbc5f2d1",
},
{
- .alg = QCRYPTO_HASH_ALG_SHA224,
+ .alg = QCRYPTO_HASH_ALGO_SHA224,
.hex_digest =
"5f768179dbb29ca722875d0f461a2e2f"
"597d0210340a84df1a8e9c63",
},
{
- .alg = QCRYPTO_HASH_ALG_SHA256,
+ .alg = QCRYPTO_HASH_ALGO_SHA256,
.hex_digest =
"3798f363c57afa6edaffe39016ca7bad"
"efd1e670afb0e3987194307dec3197db",
},
{
- .alg = QCRYPTO_HASH_ALG_SHA384,
+ .alg = QCRYPTO_HASH_ALGO_SHA384,
.hex_digest =
"d218680a6032d33dccd9882d6a6a7164"
"64f26623be257a9b2919b185294f4a49"
"9e54b190bfd6bc5cedd2cd05c7e65e82",
},
{
- .alg = QCRYPTO_HASH_ALG_SHA512,
+ .alg = QCRYPTO_HASH_ALGO_SHA512,
.hex_digest =
"835a4f5b3750b4c1fccfa88da2f746a4"
"900160c9f18964309bb736c13b59491b"
@@ -71,11 +71,19 @@ static QCryptoHmacTestData test_data[] = {
"94c4ba26862b2dadb59b7ede1d08d53e",
},
{
- .alg = QCRYPTO_HASH_ALG_RIPEMD160,
+ .alg = QCRYPTO_HASH_ALGO_RIPEMD160,
.hex_digest =
"94964ed4c1155b62b668c241d67279e5"
"8a711676",
},
+#ifdef CONFIG_CRYPTO_SM3
+ {
+ .alg = QCRYPTO_HASH_ALGO_SM3,
+ .hex_digest =
+ "760e3799332bc913819b930085360ddb"
+ "c05529261313d5b15b75bab4fd7ae91e",
+ },
+#endif
};
static const char hex[] = "0123456789abcdef";
@@ -126,7 +134,7 @@ static void test_hmac_prealloc(void)
for (i = 0; i < G_N_ELEMENTS(test_data); i++) {
QCryptoHmacTestData *data = &test_data[i];
QCryptoHmac *hmac = NULL;
- uint8_t *result = NULL;
+ uint8_t *result = NULL, *origresult = NULL;
size_t resultlen = 0;
const char *exp_output = NULL;
int ret;
@@ -139,7 +147,7 @@ static void test_hmac_prealloc(void)
exp_output = data->hex_digest;
resultlen = strlen(exp_output) / 2;
- result = g_new0(uint8_t, resultlen);
+ origresult = result = g_new0(uint8_t, resultlen);
hmac = qcrypto_hmac_new(data->alg, (const uint8_t *)KEY,
strlen(KEY), &error_fatal);
@@ -149,6 +157,8 @@ static void test_hmac_prealloc(void)
strlen(INPUT_TEXT), &result,
&resultlen, &error_fatal);
g_assert(ret == 0);
+ /* Validate that our pre-allocated pointer was not replaced */
+ g_assert(result == origresult);
exp_output = data->hex_digest;
for (j = 0; j < resultlen; j++) {
diff --git a/tests/unit/test-crypto-ivgen.c b/tests/unit/test-crypto-ivgen.c
index 29630ed..bc9ffe3 100644
--- a/tests/unit/test-crypto-ivgen.c
+++ b/tests/unit/test-crypto-ivgen.c
@@ -26,9 +26,9 @@
struct QCryptoIVGenTestData {
const char *path;
uint64_t sector;
- QCryptoIVGenAlgorithm ivalg;
- QCryptoHashAlgorithm hashalg;
- QCryptoCipherAlgorithm cipheralg;
+ QCryptoIVGenAlgo ivalg;
+ QCryptoHashAlgo hashalg;
+ QCryptoCipherAlgo cipheralg;
const uint8_t *key;
size_t nkey;
const uint8_t *iv;
@@ -38,7 +38,7 @@ struct QCryptoIVGenTestData {
{
"/crypto/ivgen/plain/1",
.sector = 0x1,
- .ivalg = QCRYPTO_IVGEN_ALG_PLAIN,
+ .ivalg = QCRYPTO_IV_GEN_ALGO_PLAIN,
.iv = (const uint8_t *)"\x01\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.niv = 16,
@@ -47,7 +47,7 @@ struct QCryptoIVGenTestData {
{
"/crypto/ivgen/plain/1f2e3d4c",
.sector = 0x1f2e3d4cULL,
- .ivalg = QCRYPTO_IVGEN_ALG_PLAIN,
+ .ivalg = QCRYPTO_IV_GEN_ALGO_PLAIN,
.iv = (const uint8_t *)"\x4c\x3d\x2e\x1f\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.niv = 16,
@@ -56,7 +56,7 @@ struct QCryptoIVGenTestData {
{
"/crypto/ivgen/plain/1f2e3d4c5b6a7988",
.sector = 0x1f2e3d4c5b6a7988ULL,
- .ivalg = QCRYPTO_IVGEN_ALG_PLAIN,
+ .ivalg = QCRYPTO_IV_GEN_ALGO_PLAIN,
.iv = (const uint8_t *)"\x88\x79\x6a\x5b\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.niv = 16,
@@ -65,7 +65,7 @@ struct QCryptoIVGenTestData {
{
"/crypto/ivgen/plain64/1",
.sector = 0x1,
- .ivalg = QCRYPTO_IVGEN_ALG_PLAIN64,
+ .ivalg = QCRYPTO_IV_GEN_ALGO_PLAIN64,
.iv = (const uint8_t *)"\x01\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.niv = 16,
@@ -74,7 +74,7 @@ struct QCryptoIVGenTestData {
{
"/crypto/ivgen/plain64/1f2e3d4c",
.sector = 0x1f2e3d4cULL,
- .ivalg = QCRYPTO_IVGEN_ALG_PLAIN64,
+ .ivalg = QCRYPTO_IV_GEN_ALGO_PLAIN64,
.iv = (const uint8_t *)"\x4c\x3d\x2e\x1f\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.niv = 16,
@@ -83,7 +83,7 @@ struct QCryptoIVGenTestData {
{
"/crypto/ivgen/plain64/1f2e3d4c5b6a7988",
.sector = 0x1f2e3d4c5b6a7988ULL,
- .ivalg = QCRYPTO_IVGEN_ALG_PLAIN64,
+ .ivalg = QCRYPTO_IV_GEN_ALGO_PLAIN64,
.iv = (const uint8_t *)"\x88\x79\x6a\x5b\x4c\x3d\x2e\x1f"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.niv = 16,
@@ -92,9 +92,9 @@ struct QCryptoIVGenTestData {
{
"/crypto/ivgen/essiv/1",
.sector = 0x1,
- .ivalg = QCRYPTO_IVGEN_ALG_ESSIV,
- .cipheralg = QCRYPTO_CIPHER_ALG_AES_128,
- .hashalg = QCRYPTO_HASH_ALG_SHA256,
+ .ivalg = QCRYPTO_IV_GEN_ALGO_ESSIV,
+ .cipheralg = QCRYPTO_CIPHER_ALGO_AES_128,
+ .hashalg = QCRYPTO_HASH_ALGO_SHA256,
.key = (const uint8_t *)"\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.nkey = 16,
@@ -106,9 +106,9 @@ struct QCryptoIVGenTestData {
{
"/crypto/ivgen/essiv/1f2e3d4c",
.sector = 0x1f2e3d4cULL,
- .ivalg = QCRYPTO_IVGEN_ALG_ESSIV,
- .cipheralg = QCRYPTO_CIPHER_ALG_AES_128,
- .hashalg = QCRYPTO_HASH_ALG_SHA256,
+ .ivalg = QCRYPTO_IV_GEN_ALGO_ESSIV,
+ .cipheralg = QCRYPTO_CIPHER_ALGO_AES_128,
+ .hashalg = QCRYPTO_HASH_ALGO_SHA256,
.key = (const uint8_t *)"\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.nkey = 16,
@@ -120,9 +120,9 @@ struct QCryptoIVGenTestData {
{
"/crypto/ivgen/essiv/1f2e3d4c5b6a7988",
.sector = 0x1f2e3d4c5b6a7988ULL,
- .ivalg = QCRYPTO_IVGEN_ALG_ESSIV,
- .cipheralg = QCRYPTO_CIPHER_ALG_AES_128,
- .hashalg = QCRYPTO_HASH_ALG_SHA256,
+ .ivalg = QCRYPTO_IV_GEN_ALGO_ESSIV,
+ .cipheralg = QCRYPTO_CIPHER_ALGO_AES_128,
+ .hashalg = QCRYPTO_HASH_ALGO_SHA256,
.key = (const uint8_t *)"\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.nkey = 16,
@@ -166,7 +166,7 @@ int main(int argc, char **argv)
size_t i;
g_test_init(&argc, &argv, NULL);
for (i = 0; i < G_N_ELEMENTS(test_data); i++) {
- if (test_data[i].ivalg == QCRYPTO_IVGEN_ALG_ESSIV &&
+ if (test_data[i].ivalg == QCRYPTO_IV_GEN_ALGO_ESSIV &&
!qcrypto_hash_supports(test_data[i].hashalg)) {
continue;
}
diff --git a/tests/unit/test-crypto-pbkdf.c b/tests/unit/test-crypto-pbkdf.c
index 43c417f..ddb7244 100644
--- a/tests/unit/test-crypto-pbkdf.c
+++ b/tests/unit/test-crypto-pbkdf.c
@@ -25,14 +25,13 @@
#include <sys/resource.h>
#endif
-#if ((defined(CONFIG_NETTLE) || defined(CONFIG_GCRYPT)) && \
- (defined(_WIN32) || defined(RUSAGE_THREAD)))
+#if defined(_WIN32) || defined(RUSAGE_THREAD) || defined(CONFIG_DARWIN)
#include "crypto/pbkdf.h"
typedef struct QCryptoPbkdfTestData QCryptoPbkdfTestData;
struct QCryptoPbkdfTestData {
const char *path;
- QCryptoHashAlgorithm hash;
+ QCryptoHashAlgo hash;
unsigned int iterations;
const char *key;
size_t nkey;
@@ -53,7 +52,7 @@ static QCryptoPbkdfTestData test_data[] = {
/* RFC 3962 test data */
{
.path = "/crypto/pbkdf/rfc3962/sha1/iter1",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.iterations = 1,
.key = "password",
.nkey = 8,
@@ -67,7 +66,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/rfc3962/sha1/iter2",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.iterations = 2,
.key = "password",
.nkey = 8,
@@ -81,7 +80,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/rfc3962/sha1/iter1200a",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.iterations = 1200,
.key = "password",
.nkey = 8,
@@ -95,7 +94,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/rfc3962/sha1/iter5",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.iterations = 5,
.key = "password",
.nkey = 8,
@@ -109,7 +108,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/rfc3962/sha1/iter1200b",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.iterations = 1200,
.key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
@@ -124,7 +123,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/rfc3962/sha1/iter1200c",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.iterations = 1200,
.key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
@@ -139,7 +138,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/rfc3962/sha1/iter50",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.iterations = 50,
.key = "\360\235\204\236", /* g-clef ("\xf09d849e) */
.nkey = 4,
@@ -155,7 +154,7 @@ static QCryptoPbkdfTestData test_data[] = {
/* RFC-6070 test data */
{
.path = "/crypto/pbkdf/rfc6070/sha1/iter1",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.iterations = 1,
.key = "password",
.nkey = 8,
@@ -167,7 +166,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/rfc6070/sha1/iter2",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.iterations = 2,
.key = "password",
.nkey = 8,
@@ -179,7 +178,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/rfc6070/sha1/iter4096",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.iterations = 4096,
.key = "password",
.nkey = 8,
@@ -191,7 +190,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/rfc6070/sha1/iter16777216",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.iterations = 16777216,
.key = "password",
.nkey = 8,
@@ -204,7 +203,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/rfc6070/sha1/iter4096a",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.iterations = 4096,
.key = "passwordPASSWORDpassword",
.nkey = 24,
@@ -217,7 +216,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/rfc6070/sha1/iter4096b",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.iterations = 4096,
.key = "pass\0word",
.nkey = 9,
@@ -232,7 +231,7 @@ static QCryptoPbkdfTestData test_data[] = {
{
/* empty password test. */
.path = "/crypto/pbkdf/nonrfc/sha1/iter2",
- .hash = QCRYPTO_HASH_ALG_SHA1,
+ .hash = QCRYPTO_HASH_ALGO_SHA1,
.iterations = 2,
.key = "",
.nkey = 0,
@@ -245,7 +244,7 @@ static QCryptoPbkdfTestData test_data[] = {
{
/* Password exceeds block size test */
.path = "/crypto/pbkdf/nonrfc/sha256/iter1200",
- .hash = QCRYPTO_HASH_ALG_SHA256,
+ .hash = QCRYPTO_HASH_ALGO_SHA256,
.iterations = 1200,
.key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
@@ -260,7 +259,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/nonrfc/sha512/iter1200",
- .hash = QCRYPTO_HASH_ALG_SHA512,
+ .hash = QCRYPTO_HASH_ALGO_SHA512,
.iterations = 1200,
.key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
@@ -277,7 +276,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/nonrfc/sha224/iter1200",
- .hash = QCRYPTO_HASH_ALG_SHA224,
+ .hash = QCRYPTO_HASH_ALGO_SHA224,
.iterations = 1200,
.key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
@@ -294,7 +293,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/nonrfc/sha384/iter1200",
- .hash = QCRYPTO_HASH_ALG_SHA384,
+ .hash = QCRYPTO_HASH_ALGO_SHA384,
.iterations = 1200,
.key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
@@ -311,7 +310,7 @@ static QCryptoPbkdfTestData test_data[] = {
},
{
.path = "/crypto/pbkdf/nonrfc/ripemd160/iter1200",
- .hash = QCRYPTO_HASH_ALG_RIPEMD160,
+ .hash = QCRYPTO_HASH_ALGO_RIPEMD160,
.iterations = 1200,
.key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
@@ -326,10 +325,26 @@ static QCryptoPbkdfTestData test_data[] = {
"\xce\xbf\x91\x14\x8b\x5c\x48\x41",
.nout = 32
},
+#ifdef CONFIG_CRYPTO_SM3
+ {
+ .path = "/crypto/pbkdf/nonrfc/sm3/iter2",
+ .hash = QCRYPTO_HASH_ALGO_SM3,
+ .iterations = 2,
+ .key = "password",
+ .nkey = 8,
+ .salt = "ATHENA.MIT.EDUraeburn",
+ .nsalt = 21,
+ .out = "\x48\x71\x1b\x58\xa3\xcb\xce\x06"
+ "\xba\xad\x77\xa8\xb5\xb9\xd8\x07"
+ "\x6a\xe2\xb3\x5b\x95\xce\xc8\xce"
+ "\xe7\xb1\xcb\xee\x61\xdf\x04\xea",
+ .nout = 32
+ },
+#endif
#if 0
{
.path = "/crypto/pbkdf/nonrfc/whirlpool/iter1200",
- .hash = QCRYPTO_HASH_ALG_WHIRLPOOL,
+ .hash = QCRYPTO_HASH_ALGO_WHIRLPOOL,
.iterations = 1200,
.key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
@@ -394,7 +409,7 @@ static void test_pbkdf(const void *opaque)
}
-static void test_pbkdf_timing(void)
+static void test_pbkdf_timing_sha256(void)
{
uint8_t key[32];
uint8_t salt[32];
@@ -403,7 +418,7 @@ static void test_pbkdf_timing(void)
memset(key, 0x5d, sizeof(key));
memset(salt, 0x7c, sizeof(salt));
- iters = qcrypto_pbkdf2_count_iters(QCRYPTO_HASH_ALG_SHA256,
+ iters = qcrypto_pbkdf2_count_iters(QCRYPTO_HASH_ALGO_SHA256,
key, sizeof(key),
salt, sizeof(salt),
32,
@@ -422,14 +437,18 @@ int main(int argc, char **argv)
g_assert(qcrypto_init(NULL) == 0);
for (i = 0; i < G_N_ELEMENTS(test_data); i++) {
+ if (!qcrypto_pbkdf2_supports(test_data[i].hash)) {
+ continue;
+ }
+
if (!test_data[i].slow ||
g_test_slow()) {
g_test_add_data_func(test_data[i].path, &test_data[i], test_pbkdf);
}
}
- if (g_test_slow()) {
- g_test_add_func("/crypt0/pbkdf/timing", test_pbkdf_timing);
+ if (g_test_slow() && qcrypto_pbkdf2_supports(QCRYPTO_HASH_ALGO_SHA256)) {
+ g_test_add_func("/crypt0/pbkdf/timing/sha256", test_pbkdf_timing_sha256);
}
return g_test_run();
diff --git a/tests/unit/test-crypto-secret.c b/tests/unit/test-crypto-secret.c
index ffd13ff..fc32a01 100644
--- a/tests/unit/test-crypto-secret.c
+++ b/tests/unit/test-crypto-secret.c
@@ -22,6 +22,7 @@
#include "crypto/init.h"
#include "crypto/secret.h"
+#include "crypto/cipher.h"
#include "qapi/error.h"
#include "qemu/module.h"
#if defined(CONFIG_KEYUTILS) && defined(CONFIG_SECRET_KEYRING)
@@ -597,18 +598,21 @@ int main(int argc, char **argv)
g_test_add_func("/crypto/secret/conv/utf8/base64",
test_secret_conv_utf8_base64);
- g_test_add_func("/crypto/secret/crypt/raw",
- test_secret_crypt_raw);
- g_test_add_func("/crypto/secret/crypt/base64",
- test_secret_crypt_base64);
- g_test_add_func("/crypto/secret/crypt/shortkey",
- test_secret_crypt_short_key);
- g_test_add_func("/crypto/secret/crypt/shortiv",
- test_secret_crypt_short_iv);
- g_test_add_func("/crypto/secret/crypt/missingiv",
- test_secret_crypt_missing_iv);
- g_test_add_func("/crypto/secret/crypt/badiv",
- test_secret_crypt_bad_iv);
+ if (qcrypto_cipher_supports(QCRYPTO_CIPHER_ALGO_AES_128,
+ QCRYPTO_CIPHER_MODE_CBC)) {
+ g_test_add_func("/crypto/secret/crypt/raw",
+ test_secret_crypt_raw);
+ g_test_add_func("/crypto/secret/crypt/base64",
+ test_secret_crypt_base64);
+ g_test_add_func("/crypto/secret/crypt/shortkey",
+ test_secret_crypt_short_key);
+ g_test_add_func("/crypto/secret/crypt/shortiv",
+ test_secret_crypt_short_iv);
+ g_test_add_func("/crypto/secret/crypt/missingiv",
+ test_secret_crypt_missing_iv);
+ g_test_add_func("/crypto/secret/crypt/badiv",
+ test_secret_crypt_bad_iv);
+ }
return g_test_run();
}
diff --git a/tests/unit/test-crypto-tlssession.c b/tests/unit/test-crypto-tlssession.c
index b12e7b6..554054e 100644
--- a/tests/unit/test-crypto-tlssession.c
+++ b/tests/unit/test-crypto-tlssession.c
@@ -35,18 +35,40 @@
#define PSKFILE WORKDIR "keys.psk"
#define KEYFILE WORKDIR "key-ctx.pem"
-static ssize_t testWrite(const char *buf, size_t len, void *opaque)
+static ssize_t
+testWrite(const char *buf, size_t len, void *opaque, Error **errp)
{
int *fd = opaque;
+ int ret;
- return write(*fd, buf, len);
+ ret = write(*fd, buf, len);
+ if (ret < 0) {
+ if (errno == EAGAIN) {
+ return QCRYPTO_TLS_SESSION_ERR_BLOCK;
+ } else {
+ error_setg_errno(errp, errno, "unable to write");
+ return -1;
+ }
+ }
+ return ret;
}
-static ssize_t testRead(char *buf, size_t len, void *opaque)
+static ssize_t
+testRead(char *buf, size_t len, void *opaque, Error **errp)
{
int *fd = opaque;
+ int ret;
- return read(*fd, buf, len);
+ ret = read(*fd, buf, len);
+ if (ret < 0) {
+ if (errno == EAGAIN) {
+ return QCRYPTO_TLS_SESSION_ERR_BLOCK;
+ } else {
+ error_setg_errno(errp, errno, "unable to read");
+ return -1;
+ }
+ }
+ return ret;
}
static QCryptoTLSCreds *test_tls_creds_psk_create(
@@ -136,8 +158,7 @@ static void test_crypto_tls_session_psk(void)
rv = qcrypto_tls_session_handshake(serverSess,
&error_abort);
g_assert(rv >= 0);
- if (qcrypto_tls_session_get_handshake_status(serverSess) ==
- QCRYPTO_TLS_HANDSHAKE_COMPLETE) {
+ if (rv == QCRYPTO_TLS_HANDSHAKE_COMPLETE) {
serverShake = true;
}
}
@@ -145,8 +166,7 @@ static void test_crypto_tls_session_psk(void)
rv = qcrypto_tls_session_handshake(clientSess,
&error_abort);
g_assert(rv >= 0);
- if (qcrypto_tls_session_get_handshake_status(clientSess) ==
- QCRYPTO_TLS_HANDSHAKE_COMPLETE) {
+ if (rv == QCRYPTO_TLS_HANDSHAKE_COMPLETE) {
clientShake = true;
}
}
@@ -330,8 +350,7 @@ static void test_crypto_tls_session_x509(const void *opaque)
rv = qcrypto_tls_session_handshake(serverSess,
&error_abort);
g_assert(rv >= 0);
- if (qcrypto_tls_session_get_handshake_status(serverSess) ==
- QCRYPTO_TLS_HANDSHAKE_COMPLETE) {
+ if (rv == QCRYPTO_TLS_HANDSHAKE_COMPLETE) {
serverShake = true;
}
}
@@ -339,8 +358,7 @@ static void test_crypto_tls_session_x509(const void *opaque)
rv = qcrypto_tls_session_handshake(clientSess,
&error_abort);
g_assert(rv >= 0);
- if (qcrypto_tls_session_get_handshake_status(clientSess) ==
- QCRYPTO_TLS_HANDSHAKE_COMPLETE) {
+ if (rv == QCRYPTO_TLS_HANDSHAKE_COMPLETE) {
clientShake = true;
}
}
diff --git a/tests/unit/test-fifo.c b/tests/unit/test-fifo.c
new file mode 100644
index 0000000..14153c4
--- /dev/null
+++ b/tests/unit/test-fifo.c
@@ -0,0 +1,449 @@
+/*
+ * Fifo8 tests
+ *
+ * Copyright 2024 Mark Cave-Ayland
+ *
+ * Authors:
+ * Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "migration/vmstate.h"
+#include "qemu/fifo8.h"
+
+const VMStateInfo vmstate_info_uint32;
+const VMStateInfo vmstate_info_buffer;
+
+
+static void test_fifo8_pop_bufptr_wrap(void)
+{
+ Fifo8 fifo;
+ uint8_t data_in1[] = { 0x1, 0x2, 0x3, 0x4 };
+ uint8_t data_in2[] = { 0x5, 0x6, 0x7, 0x8, 0x9, 0xa };
+ const uint8_t *buf;
+ uint32_t count;
+
+ fifo8_create(&fifo, 8);
+ /*
+ * head --v-- tail used = 0
+ * FIFO: [ . . . . . . . . ]
+ */
+
+ fifo8_push_all(&fifo, data_in1, sizeof(data_in1));
+ /*
+ * head --v ]-- tail used = 4
+ * FIFO: [ 1 2 3 4 . . . . ]
+ */
+ buf = fifo8_pop_bufptr(&fifo, 2, &count);
+ /*
+ * head --v ]-- tail used = 2
+ * FIFO: [ 1 2 3 4 . . . . ]
+ * buf --^ count = 2
+ */
+ g_assert(count == 2);
+ g_assert(buf[0] == 0x1 && buf[1] == 0x2);
+
+ fifo8_push_all(&fifo, data_in2, sizeof(data_in2));
+ /*
+ * tail --]v-- head used = 8
+ * FIFO: [ 9 a 3 4 5 6 7 8 ]
+ */
+ buf = fifo8_pop_bufptr(&fifo, 8, &count);
+ /*
+ * head --v ]-- tail used = 2
+ * FIFO: [ 9 a 3 4 5 6 7 8 ]
+ * buf --^ count = 6
+ */
+ g_assert(count == 6);
+ g_assert(buf[0] == 0x3 && buf[1] == 0x4 && buf[2] == 0x5 &&
+ buf[3] == 0x6 && buf[4] == 0x7 && buf[5] == 0x8);
+
+ g_assert(fifo8_num_used(&fifo) == 2);
+ fifo8_destroy(&fifo);
+}
+
+static void test_fifo8_pop_bufptr(void)
+{
+ Fifo8 fifo;
+ uint8_t data_in[] = { 0x1, 0x2, 0x3, 0x4 };
+ const uint8_t *buf;
+ uint32_t count;
+
+ fifo8_create(&fifo, 8);
+ /*
+ * head --v-- tail used = 0
+ * FIFO: [ . . . . . . . . ]
+ */
+
+ fifo8_push_all(&fifo, data_in, sizeof(data_in));
+ /*
+ * head --v ]-- tail used = 4
+ * FIFO: [ 1 2 3 4 . . . . ]
+ */
+ buf = fifo8_pop_bufptr(&fifo, 2, &count);
+ /*
+ * head --v ]-- tail used = 2
+ * FIFO: [ 1 2 3 4 . . . . ]
+ * buf --^ count = 2
+ */
+ g_assert(count == 2);
+ g_assert(buf[0] == 0x1 && buf[1] == 0x2);
+
+ g_assert(fifo8_num_used(&fifo) == 2);
+ fifo8_destroy(&fifo);
+}
+
+static void test_fifo8_peek_bufptr_wrap(void)
+{
+ Fifo8 fifo;
+ uint8_t data_in1[] = { 0x1, 0x2, 0x3, 0x4 };
+ uint8_t data_in2[] = { 0x5, 0x6, 0x7, 0x8, 0x9, 0xa };
+ const uint8_t *buf;
+ uint32_t count;
+
+ fifo8_create(&fifo, 8);
+ /*
+ * head --v-- tail used = 0
+ * FIFO: { . . . . . . . . }
+ */
+
+ fifo8_push_all(&fifo, data_in1, sizeof(data_in1));
+ /*
+ * head --v ]-- tail used = 4
+ * FIFO: { 1 2 3 4 . . . . }
+ */
+ buf = fifo8_peek_bufptr(&fifo, 2, &count);
+ /*
+ * head --v ]-- tail used = 4
+ * FIFO: { 1 2 3 4 . . . . }
+ * buf: [ 1 2 ] count = 2
+ */
+ g_assert(count == 2);
+ g_assert(buf[0] == 0x1 && buf[1] == 0x2);
+
+ buf = fifo8_pop_bufptr(&fifo, 2, &count);
+ /*
+ * head --v ]-- tail used = 2
+ * FIFO: { 1 2 3 4 . . . . }
+ * buf: [ 1 2 ] count = 2
+ */
+ g_assert(count == 2);
+ g_assert(buf[0] == 0x1 && buf[1] == 0x2);
+ fifo8_push_all(&fifo, data_in2, sizeof(data_in2));
+ /*
+ * tail ---]v-- head used = 8
+ * FIFO: { 9 a 3 4 5 6 7 8 }
+ */
+
+ buf = fifo8_peek_bufptr(&fifo, 8, &count);
+ /*
+ * tail --]v-- head used = 8
+ * FIFO: { 9 a 3 4 5 6 7 8 }
+ * buf: [ 3 4 5 6 7 8 ] count = 6
+ */
+ g_assert(count == 6);
+ g_assert(buf[0] == 0x3 && buf[1] == 0x4 && buf[2] == 0x5 &&
+ buf[3] == 0x6 && buf[4] == 0x7 && buf[5] == 0x8);
+
+ g_assert(fifo8_num_used(&fifo) == 8);
+ fifo8_destroy(&fifo);
+}
+
+static void test_fifo8_peek_bufptr(void)
+{
+ Fifo8 fifo;
+ uint8_t data_in[] = { 0x1, 0x2, 0x3, 0x4 };
+ const uint8_t *buf;
+ uint32_t count;
+
+ fifo8_create(&fifo, 8);
+ /*
+ * head --v-- tail used = 0
+ * FIFO: { . . . . . . . . }
+ */
+
+ fifo8_push_all(&fifo, data_in, sizeof(data_in));
+ /*
+ * head --v ]-- tail used = 4
+ * FIFO: { 1 2 3 4 . . . . }
+ */
+ buf = fifo8_peek_bufptr(&fifo, 2, &count);
+ /*
+ * head --v ]-- tail used = 4
+ * FIFO: { 1 2 3 4 . . . . }
+ * buf: [ 1 2 ] count = 2
+ */
+ g_assert(count == 2);
+ g_assert(buf[0] == 0x1 && buf[1] == 0x2);
+
+ g_assert(fifo8_num_used(&fifo) == 4);
+ fifo8_destroy(&fifo);
+}
+
+static void test_fifo8_pop_buf_wrap(void)
+{
+ Fifo8 fifo;
+ uint8_t data_in1[] = { 0x1, 0x2, 0x3, 0x4 };
+ uint8_t data_in2[] = { 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc };
+ uint8_t data_out[4];
+ int count;
+
+ fifo8_create(&fifo, 8);
+ /*
+ * head --v-- tail used = 0
+ * FIFO: { . . . . . . . . }
+ */
+
+ fifo8_push_all(&fifo, data_in1, sizeof(data_in1));
+ /*
+ * head --v ]-- tail used = 4
+ * FIFO: { 1 2 3 4 . . . . }
+ */
+ fifo8_pop_buf(&fifo, NULL, 4);
+ /*
+ * tail --]v-- head used = 0
+ * FIFO: [ 1 2 3 4 . . . . ]
+ */
+
+ fifo8_push_all(&fifo, data_in2, sizeof(data_in2));
+ /*
+ * tail --]v-- head used = 8
+ * FIFO: { 9 a b c 5 6 7 8 }
+ */
+ count = fifo8_pop_buf(&fifo, NULL, 4);
+ /*
+ * head --v ]-- tail used = 4
+ * FIFO: { 9 a b c 5 6 7 8 }
+ */
+ g_assert(count == 4);
+ count = fifo8_pop_buf(&fifo, data_out, 4);
+ /*
+ * tail --]v-- head used = 0
+ * FIFO: { 9 a b c 5 6 7 8 }
+ */
+ g_assert(count == 4);
+ g_assert(data_out[0] == 0x9 && data_out[1] == 0xa &&
+ data_out[2] == 0xb && data_out[3] == 0xc);
+
+ g_assert(fifo8_num_used(&fifo) == 0);
+ fifo8_destroy(&fifo);
+}
+
+static void test_fifo8_pop_buf(void)
+{
+ Fifo8 fifo;
+ uint8_t data_in[] = { 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8 };
+ uint8_t data_out[] = { 0xff, 0xff, 0xff, 0xff };
+ int count;
+
+ fifo8_create(&fifo, 8);
+ /*
+ * head --v-- tail used = 0
+ * FIFO: { . . . . . . . . }
+ */
+
+ fifo8_push_all(&fifo, data_in, sizeof(data_in));
+ /*
+ * head --v ]-- tail used = 4
+ * FIFO: { 1 2 3 4 . . . . }
+ */
+ count = fifo8_pop_buf(&fifo, NULL, 4);
+ /*
+ * tail --]v-- head used = 0
+ * FIFO: { 1 2 3 4 . . . . }
+ */
+ g_assert(count == 4);
+ count = fifo8_pop_buf(&fifo, data_out, 4);
+ g_assert(data_out[0] == 0x5 && data_out[1] == 0x6 &&
+ data_out[2] == 0x7 && data_out[3] == 0x8);
+
+ g_assert(fifo8_num_used(&fifo) == 0);
+ fifo8_destroy(&fifo);
+}
+
+static void test_fifo8_peek_buf_wrap(void)
+{
+ Fifo8 fifo;
+ uint8_t data_in1[] = { 0x1, 0x2, 0x3, 0x4 };
+ uint8_t data_in2[] = { 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc };
+ uint8_t data_out[8];
+ int count;
+
+ fifo8_create(&fifo, 8);
+ /*
+ * head --v-- tail used = 0
+ * FIFO: { . . . . . . . . }
+ */
+
+ fifo8_push_all(&fifo, data_in1, sizeof(data_in1));
+ /*
+ * head --v ]-- tail used = 4
+ * FIFO: { 1 2 3 4 . . . . }
+ */
+ fifo8_pop_buf(&fifo, NULL, 4);
+ /*
+ * tail --]v-- head used = 0
+ * FIFO: { 1 2 3 4 . . . . }
+ */
+
+ fifo8_push_all(&fifo, data_in2, sizeof(data_in2));
+ /*
+ * tail --]v-- head used = 8
+ * FIFO: { 9 a b c 5 6 7 8 }
+ */
+ count = fifo8_peek_buf(&fifo, NULL, 4);
+ g_assert(count == 4);
+ count = fifo8_peek_buf(&fifo, data_out, 4);
+ /*
+ * tail --]v-- head used = 8
+ * FIFO: { 9 a b c 5 6 7 8 }
+ * buf: [ 5 6 7 8 ] count = 4
+ */
+ g_assert(count == 4);
+ g_assert(data_out[0] == 0x5 && data_out[1] == 0x6 &&
+ data_out[2] == 0x7 && data_out[3] == 0x8);
+
+ count = fifo8_peek_buf(&fifo, data_out, 8);
+ /*
+ * tail --]v-- head used = 8
+ * FIFO: { 9 a b c 5 6 7 8 }
+ * buf: [ 5 6 7 8 9 a b c ] count = 8
+ */
+ g_assert(count == 8);
+ g_assert(data_out[0] == 0x5 && data_out[1] == 0x6 &&
+ data_out[2] == 0x7 && data_out[3] == 0x8);
+ g_assert(data_out[4] == 0x9 && data_out[5] == 0xa &&
+ data_out[6] == 0xb && data_out[7] == 0xc);
+
+ g_assert(fifo8_num_used(&fifo) == 8);
+ fifo8_destroy(&fifo);
+}
+
+static void test_fifo8_peek_buf(void)
+{
+ Fifo8 fifo;
+ uint8_t data_in[] = { 0x1, 0x2, 0x3, 0x4 };
+ uint8_t data_out[] = { 0xff, 0xff, 0xff, 0xff };
+ int count;
+
+ fifo8_create(&fifo, 8);
+ /*
+ * head --v-- tail used = 0
+ * FIFO: { . . . . . . . . }
+ */
+
+ fifo8_push_all(&fifo, data_in, sizeof(data_in));
+ /*
+ * head --v ]-- tail used = 4
+ * FIFO: { 1 2 3 4 . . . . }
+ */
+ count = fifo8_peek_buf(&fifo, NULL, 4);
+ g_assert(count == 4);
+
+ g_assert(data_out[0] == 0xff && data_out[1] == 0xff &&
+ data_out[2] == 0xff && data_out[3] == 0xff);
+ count = fifo8_peek_buf(&fifo, data_out, 4);
+ /*
+ * head --v ]-- tail used = 4
+ * FIFO: { 1 2 3 4 . . . . }
+ * buf: [ 1 2 3 4 ] count = 4
+ */
+ g_assert(count == 4);
+ g_assert(data_out[0] == 0x1 && data_out[1] == 0x2 &&
+ data_out[2] == 0x3 && data_out[3] == 0x4);
+
+ g_assert(fifo8_num_used(&fifo) == 4);
+ fifo8_destroy(&fifo);
+}
+
+static void test_fifo8_peek(void)
+{
+ Fifo8 fifo;
+ uint8_t c;
+
+ fifo8_create(&fifo, 8);
+ /*
+ * head --v-- tail used = 0
+ * FIFO: { . . . . . . . . }
+ */
+ fifo8_push(&fifo, 0x1);
+ /*
+ * head --v]-- tail used = 1
+ * FIFO: { 1 . . . . . . . }
+ */
+ fifo8_push(&fifo, 0x2);
+ /*
+ * head --v ]-- tail used = 2
+ * FIFO: { 1 2 . . . . . . }
+ */
+
+ c = fifo8_peek(&fifo);
+ g_assert(c == 0x1);
+ fifo8_pop(&fifo);
+ /*
+ * head --v]-- tail used = 1
+ * FIFO: { 1 2 . . . . . . }
+ */
+ c = fifo8_peek(&fifo);
+ g_assert(c == 0x2);
+
+ g_assert(fifo8_num_used(&fifo) == 1);
+ fifo8_destroy(&fifo);
+}
+
+static void test_fifo8_pushpop(void)
+{
+ Fifo8 fifo;
+ uint8_t c;
+
+ fifo8_create(&fifo, 8);
+ /*
+ * head --v-- tail used = 0
+ * FIFO: { . . . . . . . . }
+ */
+ fifo8_push(&fifo, 0x1);
+ /*
+ * head --v]-- tail used = 1
+ * FIFO: { 1 . . . . . . . }
+ */
+ fifo8_push(&fifo, 0x2);
+ /*
+ * head --v ]-- tail used = 2
+ * FIFO: { 1 2 . . . . . . }
+ */
+
+ c = fifo8_pop(&fifo);
+ /*
+ * head --v]-- tail used = 1
+ * FIFO: { 1 2 . . . . . . }
+ */
+ g_assert(c == 0x1);
+ c = fifo8_pop(&fifo);
+ /*
+ * tail --]v-- head used = 0
+ * FIFO: { 1 2 . . . . . . }
+ */
+ g_assert(c == 0x2);
+
+ g_assert(fifo8_num_used(&fifo) == 0);
+ fifo8_destroy(&fifo);
+}
+
+int main(int argc, char *argv[])
+{
+ g_test_init(&argc, &argv, NULL);
+ g_test_add_func("/fifo8/pushpop", test_fifo8_pushpop);
+ g_test_add_func("/fifo8/peek", test_fifo8_peek);
+ g_test_add_func("/fifo8/peek_buf", test_fifo8_peek_buf);
+ g_test_add_func("/fifo8/peek_buf_wrap", test_fifo8_peek_buf_wrap);
+ g_test_add_func("/fifo8/pop_buf", test_fifo8_pop_buf);
+ g_test_add_func("/fifo8/pop_buf_wrap", test_fifo8_pop_buf_wrap);
+ g_test_add_func("/fifo8/peek_bufptr", test_fifo8_peek_bufptr);
+ g_test_add_func("/fifo8/peek_bufptr_wrap", test_fifo8_peek_bufptr_wrap);
+ g_test_add_func("/fifo8/pop_bufptr", test_fifo8_pop_bufptr);
+ g_test_add_func("/fifo8/pop_bufptr_wrap", test_fifo8_pop_bufptr_wrap);
+ return g_test_run();
+}
diff --git a/tests/unit/test-forward-visitor.c b/tests/unit/test-forward-visitor.c
index eea8ffc..aad1c89 100644
--- a/tests/unit/test-forward-visitor.c
+++ b/tests/unit/test-forward-visitor.c
@@ -12,8 +12,8 @@
#include "qapi/forward-visitor.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/error.h"
-#include "qapi/qmp/qobject.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qobject.h"
+#include "qobject/qdict.h"
#include "test-qapi-visit.h"
#include "qemu/keyval.h"
diff --git a/tests/unit/test-image-locking.c b/tests/unit/test-image-locking.c
index 2624cec..019195f 100644
--- a/tests/unit/test-image-locking.c
+++ b/tests/unit/test-image-locking.c
@@ -26,9 +26,9 @@
#include "qemu/osdep.h"
#include "block/block.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/main-loop.h"
static BlockBackend *open_image(const char *path,
diff --git a/tests/unit/test-io-channel-socket.c b/tests/unit/test-io-channel-socket.c
index b964bb2..dc7be96 100644
--- a/tests/unit/test-io-channel-socket.c
+++ b/tests/unit/test-io-channel-socket.c
@@ -506,7 +506,7 @@ static void test_io_channel_unix_listen_cleanup(void)
{
QIOChannelSocket *ioc;
struct sockaddr_un un;
- int sock;
+ int sock, ret = 0;
#define TEST_SOCKET "test-io-channel-socket.sock"
@@ -519,7 +519,9 @@ static void test_io_channel_unix_listen_cleanup(void)
un.sun_family = AF_UNIX;
snprintf(un.sun_path, sizeof(un.sun_path), "%s", TEST_SOCKET);
unlink(TEST_SOCKET);
- bind(sock, (struct sockaddr *)&un, sizeof(un));
+ ret = bind(sock, (struct sockaddr *)&un, sizeof(un));
+ g_assert_cmpint(ret, ==, 0);
+
ioc->fd = sock;
ioc->localAddrLen = sizeof(ioc->localAddr);
getsockname(sock, (struct sockaddr *)&ioc->localAddr,
diff --git a/tests/unit/test-keyval.c b/tests/unit/test-keyval.c
index 4dc52c7..c6e8f4f 100644
--- a/tests/unit/test-keyval.c
+++ b/tests/unit/test-keyval.c
@@ -13,9 +13,9 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
+#include "qobject/qstring.h"
#include "qapi/qobject-input-visitor.h"
#include "test-qapi-visit.h"
#include "qemu/cutils.h"
diff --git a/tests/unit/test-qdev-global-props.c b/tests/unit/test-qdev-global-props.c
index c8862ca..3306276 100644
--- a/tests/unit/test-qdev-global-props.c
+++ b/tests/unit/test-qdev-global-props.c
@@ -46,13 +46,12 @@ struct MyType {
uint32_t prop2;
};
-static Property static_props[] = {
+static const Property static_props[] = {
DEFINE_PROP_UINT32("prop1", MyType, prop1, PROP_DEFAULT),
DEFINE_PROP_UINT32("prop2", MyType, prop2, PROP_DEFAULT),
- DEFINE_PROP_END_OF_LIST()
};
-static void static_prop_class_init(ObjectClass *oc, void *data)
+static void static_prop_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -72,6 +71,26 @@ static const TypeInfo subclass_type = {
.parent = TYPE_STATIC_PROPS,
};
+/*
+ * Initialize a fake machine, being prepared for future tests.
+ *
+ * All the tests later (even if to be run in subprocesses.. which will
+ * inherit the global states of the parent process) will try to create qdev
+ * and realize the device.
+ *
+ * Realization of such anonymous qdev (with no parent object) requires both
+ * the machine object and its "unattached" container to be at least present.
+ */
+static void test_init_machine(void)
+{
+ /* This is a fake machine - it doesn't need to be a machine object */
+ Object *machine = object_property_add_new_container(
+ object_get_root(), "machine");
+
+ /* This container must exist for anonymous qdevs to realize() */
+ object_property_add_new_container(machine, "unattached");
+}
+
/* Test simple static property setting to default value */
static void test_static_prop_subprocess(void)
{
@@ -158,7 +177,7 @@ static void dynamic_instance_init(Object *obj)
NULL, NULL);
}
-static void dynamic_class_init(ObjectClass *oc, void *data)
+static void dynamic_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -174,7 +193,7 @@ static const TypeInfo dynamic_prop_type = {
.class_init = dynamic_class_init,
};
-static void hotplug_class_init(ObjectClass *oc, void *data)
+static void hotplug_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -190,7 +209,7 @@ static const TypeInfo hotplug_type = {
.class_init = hotplug_class_init,
};
-static void nohotplug_class_init(ObjectClass *oc, void *data)
+static void nohotplug_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -295,6 +314,8 @@ int main(int argc, char **argv)
type_register_static(&nohotplug_type);
type_register_static(&nondevice_type);
+ test_init_machine();
+
g_test_add_func("/qdev/properties/static/default/subprocess",
test_static_prop_subprocess);
g_test_add_func("/qdev/properties/static/default",
diff --git a/tests/unit/test-qemu-opts.c b/tests/unit/test-qemu-opts.c
index 828d40e..8d03a69 100644
--- a/tests/unit/test-qemu-opts.c
+++ b/tests/unit/test-qemu-opts.c
@@ -12,8 +12,8 @@
#include "qemu/option.h"
#include "qemu/option_int.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qstring.h"
#include "qemu/config-file.h"
diff --git a/tests/unit/test-qga.c b/tests/unit/test-qga.c
index 8cddf5d..587e30c 100644
--- a/tests/unit/test-qga.c
+++ b/tests/unit/test-qga.c
@@ -5,8 +5,8 @@
#include <sys/un.h>
#include "../qtest/libqtest.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
typedef struct {
char *test_dir;
@@ -332,6 +332,22 @@ static void test_qga_get_fsinfo(gconstpointer fix)
}
}
+static void test_qga_get_load(gconstpointer fix)
+{
+ const TestFixture *fixture = fix;
+ g_autoptr(QDict) ret = NULL;
+ QDict *load;
+
+ ret = qmp_fd(fixture->fd, "{'execute': 'guest-get-load'}");
+ g_assert_nonnull(ret);
+ qmp_assert_no_error(ret);
+
+ load = qdict_get_qdict(ret, "return");
+ g_assert(qdict_haskey(load, "load1m"));
+ g_assert(qdict_haskey(load, "load5m"));
+ g_assert(qdict_haskey(load, "load15m"));
+}
+
static void test_qga_get_memory_block_info(gconstpointer fix)
{
const TestFixture *fixture = fix;
@@ -1105,6 +1121,7 @@ int main(int argc, char **argv)
g_test_add_data_func("/qga/get-vcpus", &fix, test_qga_get_vcpus);
}
g_test_add_data_func("/qga/get-fsinfo", &fix, test_qga_get_fsinfo);
+ g_test_add_data_func("/qga/get-load", &fix, test_qga_get_load);
g_test_add_data_func("/qga/get-memory-block-info", &fix,
test_qga_get_memory_block_info);
g_test_add_data_func("/qga/get-memory-blocks", &fix,
diff --git a/tests/unit/test-qgraph.c b/tests/unit/test-qgraph.c
index 334c76c..ca1d60f 100644
--- a/tests/unit/test-qgraph.c
+++ b/tests/unit/test-qgraph.c
@@ -44,7 +44,6 @@ static void *driverfunct(void *obj, QGuestAllocator *machine, void *arg)
static void testfunct(void *obj, void *arg, QGuestAllocator *alloc)
{
- return;
}
static void check_interface(const char *interface)
diff --git a/tests/unit/test-qmp-cmds.c b/tests/unit/test-qmp-cmds.c
index 6d52b4e..ad53886 100644
--- a/tests/unit/test-qmp-cmds.c
+++ b/tests/unit/test-qmp-cmds.c
@@ -1,9 +1,9 @@
#include "qemu/osdep.h"
#include "qapi/compat-policy.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qjson.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
#include "qapi/error.h"
#include "qapi/qobject-input-visitor.h"
#include "tests/test-qapi-types.h"
diff --git a/tests/unit/test-qmp-event.c b/tests/unit/test-qmp-event.c
index 08e95a3..2aac271 100644
--- a/tests/unit/test-qmp-event.c
+++ b/tests/unit/test-qmp-event.c
@@ -15,11 +15,11 @@
#include "qapi/compat-policy.h"
#include "qapi/error.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
+#include "qobject/qjson.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
#include "qapi/qmp-event.h"
#include "test-qapi-events.h"
#include "test-qapi-emit-events.h"
diff --git a/tests/unit/test-qobject-input-visitor.c b/tests/unit/test-qobject-input-visitor.c
index 024e26c..84bdcdf 100644
--- a/tests/unit/test-qobject-input-visitor.c
+++ b/tests/unit/test-qobject-input-visitor.c
@@ -17,12 +17,12 @@
#include "qapi/qapi-visit-introspect.h"
#include "qapi/qobject-input-visitor.h"
#include "test-qapi-visit.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qnull.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
-#include "qapi/qmp/qjson.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
+#include "qobject/qnull.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
+#include "qobject/qjson.h"
#include "test-qapi-introspect.h"
#include "qapi/qapi-introspect.h"
@@ -720,7 +720,7 @@ static void test_visitor_in_union_in_union(TestInputVisitorData *data,
visit_type_TestUnionInUnion(v, NULL, &tmp, &error_abort);
g_assert_cmpint(tmp->type, ==, TEST_UNION_ENUM_VALUE_A);
- g_assert_cmpint(tmp->u.value_a.type_a, ==, TEST_UNION_ENUMA_VALUE_A1);
+ g_assert_cmpint(tmp->u.value_a.type_a, ==, TEST_UNION_ENUM_A_VALUE_A1);
g_assert_cmpint(tmp->u.value_a.u.value_a1.integer, ==, 2);
g_assert_cmpint(strcmp(tmp->u.value_a.u.value_a1.name, "fish"), ==, 0);
@@ -734,7 +734,7 @@ static void test_visitor_in_union_in_union(TestInputVisitorData *data,
visit_type_TestUnionInUnion(v, NULL, &tmp, &error_abort);
g_assert_cmpint(tmp->type, ==, TEST_UNION_ENUM_VALUE_A);
- g_assert_cmpint(tmp->u.value_a.type_a, ==, TEST_UNION_ENUMA_VALUE_A2);
+ g_assert_cmpint(tmp->u.value_a.type_a, ==, TEST_UNION_ENUM_A_VALUE_A2);
g_assert_cmpint(tmp->u.value_a.u.value_a2.integer, ==, 1729);
g_assert_cmpint(tmp->u.value_a.u.value_a2.size, ==, 87539319);
diff --git a/tests/unit/test-qobject-output-visitor.c b/tests/unit/test-qobject-output-visitor.c
index 1535b3a..407ab9e 100644
--- a/tests/unit/test-qobject-output-visitor.c
+++ b/tests/unit/test-qobject-output-visitor.c
@@ -15,12 +15,12 @@
#include "qapi/error.h"
#include "qapi/qobject-output-visitor.h"
#include "test-qapi-visit.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qnull.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
+#include "qobject/qnull.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
typedef struct TestOutputVisitorData {
Visitor *ov;
@@ -359,7 +359,7 @@ static void test_visitor_out_union_in_union(TestOutputVisitorData *data,
TestUnionInUnion *tmp = g_new0(TestUnionInUnion, 1);
tmp->type = TEST_UNION_ENUM_VALUE_A;
- tmp->u.value_a.type_a = TEST_UNION_ENUMA_VALUE_A1;
+ tmp->u.value_a.type_a = TEST_UNION_ENUM_A_VALUE_A1;
tmp->u.value_a.u.value_a1.integer = 42;
tmp->u.value_a.u.value_a1.name = g_strdup("fish");
@@ -377,7 +377,7 @@ static void test_visitor_out_union_in_union(TestOutputVisitorData *data,
visitor_reset(data);
tmp = g_new0(TestUnionInUnion, 1);
tmp->type = TEST_UNION_ENUM_VALUE_A;
- tmp->u.value_a.type_a = TEST_UNION_ENUMA_VALUE_A2;
+ tmp->u.value_a.type_a = TEST_UNION_ENUM_A_VALUE_A2;
tmp->u.value_a.u.value_a2.integer = 1729;
tmp->u.value_a.u.value_a2.size = 87539319;
diff --git a/tests/unit/test-replication.c b/tests/unit/test-replication.c
index 5d2003b..3aa98e6 100644
--- a/tests/unit/test-replication.c
+++ b/tests/unit/test-replication.c
@@ -11,13 +11,13 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/option.h"
#include "qemu/main-loop.h"
#include "block/replication.h"
#include "block/block_int.h"
#include "block/qdict.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#define IMG_SIZE (64 * 1024 * 1024)
diff --git a/tests/unit/test-resv-mem.c b/tests/unit/test-resv-mem.c
index cd8f731..4de2d04 100644
--- a/tests/unit/test-resv-mem.c
+++ b/tests/unit/test-resv-mem.c
@@ -10,7 +10,7 @@
#include "qemu/osdep.h"
#include "qemu/range.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qemu/reserved-region.h"
#define DEBUG 0
diff --git a/tests/unit/test-seccomp.c b/tests/unit/test-seccomp.c
index bab93fd..71d4083 100644
--- a/tests/unit/test-seccomp.c
+++ b/tests/unit/test-seccomp.c
@@ -21,7 +21,7 @@
#include "qemu/osdep.h"
#include "qemu/config-file.h"
#include "qemu/option.h"
-#include "sysemu/seccomp.h"
+#include "system/seccomp.h"
#include "qapi/error.h"
#include "qemu/module.h"
diff --git a/tests/unit/test-smp-parse.c b/tests/unit/test-smp-parse.c
index f9bccb5..326045e 100644
--- a/tests/unit/test-smp-parse.c
+++ b/tests/unit/test-smp-parse.c
@@ -924,7 +924,7 @@ static void unsupported_params_init(const MachineClass *mc, SMPTestData *data)
}
}
-static void machine_base_class_init(ObjectClass *oc, void *data)
+static void machine_base_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -934,7 +934,8 @@ static void machine_base_class_init(ObjectClass *oc, void *data)
mc->name = g_strdup(SMP_MACHINE_NAME);
}
-static void machine_generic_invalid_class_init(ObjectClass *oc, void *data)
+static void machine_generic_invalid_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -943,21 +944,22 @@ static void machine_generic_invalid_class_init(ObjectClass *oc, void *data)
mc->max_cpus = MAX_CPUS - 1;
}
-static void machine_with_modules_class_init(ObjectClass *oc, void *data)
+static void machine_with_modules_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
mc->smp_props.modules_supported = true;
}
-static void machine_with_dies_class_init(ObjectClass *oc, void *data)
+static void machine_with_dies_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
mc->smp_props.dies_supported = true;
}
-static void machine_with_modules_dies_class_init(ObjectClass *oc, void *data)
+static void machine_with_modules_dies_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -965,28 +967,29 @@ static void machine_with_modules_dies_class_init(ObjectClass *oc, void *data)
mc->smp_props.dies_supported = true;
}
-static void machine_with_clusters_class_init(ObjectClass *oc, void *data)
+static void machine_with_clusters_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
mc->smp_props.clusters_supported = true;
}
-static void machine_with_books_class_init(ObjectClass *oc, void *data)
+static void machine_with_books_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
mc->smp_props.books_supported = true;
}
-static void machine_with_drawers_class_init(ObjectClass *oc, void *data)
+static void machine_with_drawers_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
mc->smp_props.drawers_supported = true;
}
-static void machine_with_drawers_books_class_init(ObjectClass *oc, void *data)
+static void machine_with_drawers_books_class_init(ObjectClass *oc,
+ const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -994,7 +997,7 @@ static void machine_with_drawers_books_class_init(ObjectClass *oc, void *data)
mc->smp_props.books_supported = true;
}
-static void machine_full_topo_class_init(ObjectClass *oc, void *data)
+static void machine_full_topo_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
diff --git a/tests/unit/test-thread-pool.c b/tests/unit/test-thread-pool.c
index 1483e53..33407b5 100644
--- a/tests/unit/test-thread-pool.c
+++ b/tests/unit/test-thread-pool.c
@@ -43,10 +43,10 @@ static void done_cb(void *opaque, int ret)
active--;
}
-static void test_submit(void)
+static void test_submit_no_complete(void)
{
WorkerTestData data = { .n = 0 };
- thread_pool_submit(worker_cb, &data);
+ thread_pool_submit_aio(worker_cb, &data, NULL, NULL);
while (data.n == 0) {
aio_poll(ctx, true);
}
@@ -236,7 +236,7 @@ int main(int argc, char **argv)
ctx = qemu_get_current_aio_context();
g_test_init(&argc, &argv, NULL);
- g_test_add_func("/thread-pool/submit", test_submit);
+ g_test_add_func("/thread-pool/submit-no-complete", test_submit_no_complete);
g_test_add_func("/thread-pool/submit-aio", test_submit_aio);
g_test_add_func("/thread-pool/submit-co", test_submit_co);
g_test_add_func("/thread-pool/submit-many", test_submit_many);
diff --git a/tests/unit/test-throttle.c b/tests/unit/test-throttle.c
index 24032a0..dfa61c7 100644
--- a/tests/unit/test-throttle.c
+++ b/tests/unit/test-throttle.c
@@ -21,7 +21,7 @@
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "block/throttle-groups.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
static AioContext *ctx;
static LeakyBucket bkt;
diff --git a/tests/unit/test-timed-average.c b/tests/unit/test-timed-average.c
index 82c9250..747ed1e 100644
--- a/tests/unit/test-timed-average.c
+++ b/tests/unit/test-timed-average.c
@@ -11,7 +11,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/cpu-timers.h"
+#include "system/cpu-timers.h"
#include "qemu/timed-average.h"
/* This is the clock for QEMU_CLOCK_VIRTUAL */
diff --git a/tests/unit/test-util-sockets.c b/tests/unit/test-util-sockets.c
index 4c9dd0b..ee66d72 100644
--- a/tests/unit/test-util-sockets.c
+++ b/tests/unit/test-util-sockets.c
@@ -332,6 +332,220 @@ static void test_socket_unix_abstract(void)
#endif /* CONFIG_LINUX */
+static void inet_parse_test_helper(const char *str,
+ InetSocketAddress *exp_addr, bool success)
+{
+ InetSocketAddress addr;
+ Error *error = NULL;
+
+ int rc = inet_parse(&addr, str, &error);
+
+ if (success) {
+ if (error) {
+ error_report_err(error);
+ }
+ g_assert_cmpint(rc, ==, 0);
+ } else {
+ error_free(error);
+ g_assert_cmpint(rc, <, 0);
+ }
+ if (exp_addr != NULL) {
+ g_assert_cmpstr(addr.host, ==, exp_addr->host);
+ g_assert_cmpstr(addr.port, ==, exp_addr->port);
+ /* Own members: */
+ g_assert_cmpint(addr.has_numeric, ==, exp_addr->has_numeric);
+ g_assert_cmpint(addr.numeric, ==, exp_addr->numeric);
+ g_assert_cmpint(addr.has_to, ==, exp_addr->has_to);
+ g_assert_cmpint(addr.to, ==, exp_addr->to);
+ g_assert_cmpint(addr.has_ipv4, ==, exp_addr->has_ipv4);
+ g_assert_cmpint(addr.ipv4, ==, exp_addr->ipv4);
+ g_assert_cmpint(addr.has_ipv6, ==, exp_addr->has_ipv6);
+ g_assert_cmpint(addr.ipv6, ==, exp_addr->ipv6);
+ g_assert_cmpint(addr.has_keep_alive, ==, exp_addr->has_keep_alive);
+ g_assert_cmpint(addr.keep_alive, ==, exp_addr->keep_alive);
+#ifdef HAVE_TCP_KEEPCNT
+ g_assert_cmpint(addr.has_keep_alive_count, ==,
+ exp_addr->has_keep_alive_count);
+ g_assert_cmpint(addr.keep_alive_count, ==,
+ exp_addr->keep_alive_count);
+#endif
+#ifdef HAVE_TCP_KEEPIDLE
+ g_assert_cmpint(addr.has_keep_alive_idle, ==,
+ exp_addr->has_keep_alive_idle);
+ g_assert_cmpint(addr.keep_alive_idle, ==,
+ exp_addr->keep_alive_idle);
+#endif
+#ifdef HAVE_TCP_KEEPINTVL
+ g_assert_cmpint(addr.has_keep_alive_interval, ==,
+ exp_addr->has_keep_alive_interval);
+ g_assert_cmpint(addr.keep_alive_interval, ==,
+ exp_addr->keep_alive_interval);
+#endif
+#ifdef HAVE_IPPROTO_MPTCP
+ g_assert_cmpint(addr.has_mptcp, ==, exp_addr->has_mptcp);
+ g_assert_cmpint(addr.mptcp, ==, exp_addr->mptcp);
+#endif
+ }
+
+ g_free(addr.host);
+ g_free(addr.port);
+}
+
+static void test_inet_parse_nohost_good(void)
+{
+ char host[] = "";
+ char port[] = "5000";
+ InetSocketAddress exp_addr = {
+ .host = host,
+ .port = port,
+ };
+ inet_parse_test_helper(":5000", &exp_addr, true);
+}
+
+static void test_inet_parse_empty_bad(void)
+{
+ inet_parse_test_helper("", NULL, false);
+}
+
+static void test_inet_parse_only_colon_bad(void)
+{
+ inet_parse_test_helper(":", NULL, false);
+}
+
+static void test_inet_parse_ipv4_good(void)
+{
+ char host[] = "127.0.0.1";
+ char port[] = "5000";
+ InetSocketAddress exp_addr = {
+ .host = host,
+ .port = port,
+ };
+ inet_parse_test_helper("127.0.0.1:5000", &exp_addr, true);
+}
+
+static void test_inet_parse_ipv4_noport_bad(void)
+{
+ inet_parse_test_helper("127.0.0.1", NULL, false);
+}
+
+static void test_inet_parse_ipv6_good(void)
+{
+ char host[] = "::1";
+ char port[] = "5000";
+ InetSocketAddress exp_addr = {
+ .host = host,
+ .port = port,
+ };
+ inet_parse_test_helper("[::1]:5000", &exp_addr, true);
+}
+
+static void test_inet_parse_ipv6_noend_bad(void)
+{
+ inet_parse_test_helper("[::1", NULL, false);
+}
+
+static void test_inet_parse_ipv6_noport_bad(void)
+{
+ inet_parse_test_helper("[::1]:", NULL, false);
+}
+
+static void test_inet_parse_ipv6_empty_bad(void)
+{
+ inet_parse_test_helper("[]:5000", NULL, false);
+}
+
+static void test_inet_parse_hostname_good(void)
+{
+ char host[] = "localhost";
+ char port[] = "5000";
+ InetSocketAddress exp_addr = {
+ .host = host,
+ .port = port,
+ };
+ inet_parse_test_helper("localhost:5000", &exp_addr, true);
+}
+
+static void test_inet_parse_all_options_good(void)
+{
+ char host[] = "::1";
+ char port[] = "5000";
+ InetSocketAddress exp_addr = {
+ .host = host,
+ .port = port,
+ .has_numeric = true,
+ .numeric = true,
+ .has_to = true,
+ .to = 5006,
+ .has_ipv4 = true,
+ .ipv4 = false,
+ .has_ipv6 = true,
+ .ipv6 = true,
+ .has_keep_alive = true,
+ .keep_alive = true,
+#ifdef HAVE_TCP_KEEPCNT
+ .has_keep_alive_count = true,
+ .keep_alive_count = 10,
+#endif
+#ifdef HAVE_TCP_KEEPIDLE
+ .has_keep_alive_idle = true,
+ .keep_alive_idle = 60,
+#endif
+#ifdef HAVE_TCP_KEEPINTVL
+ .has_keep_alive_interval = true,
+ .keep_alive_interval = 30,
+#endif
+#ifdef HAVE_IPPROTO_MPTCP
+ .has_mptcp = true,
+ .mptcp = false,
+#endif
+ };
+ inet_parse_test_helper(
+ "[::1]:5000,numeric=on,to=5006,ipv4=off,ipv6=on,keep-alive=on"
+#ifdef HAVE_TCP_KEEPCNT
+ ",keep-alive-count=10"
+#endif
+#ifdef HAVE_TCP_KEEPIDLE
+ ",keep-alive-idle=60"
+#endif
+#ifdef HAVE_TCP_KEEPINTVL
+ ",keep-alive-interval=30"
+#endif
+#ifdef HAVE_IPPROTO_MPTCP
+ ",mptcp=off"
+#endif
+ , &exp_addr, true);
+}
+
+static void test_inet_parse_all_implicit_bool_good(void)
+{
+ char host[] = "::1";
+ char port[] = "5000";
+ InetSocketAddress exp_addr = {
+ .host = host,
+ .port = port,
+ .has_numeric = true,
+ .numeric = true,
+ .has_to = true,
+ .to = 5006,
+ .has_ipv4 = true,
+ .ipv4 = true,
+ .has_ipv6 = true,
+ .ipv6 = true,
+ .has_keep_alive = true,
+ .keep_alive = true,
+#ifdef HAVE_IPPROTO_MPTCP
+ .has_mptcp = true,
+ .mptcp = true,
+#endif
+ };
+ inet_parse_test_helper(
+ "[::1]:5000,numeric,to=5006,ipv4,ipv6,keep-alive"
+#ifdef HAVE_IPPROTO_MPTCP
+ ",mptcp"
+#endif
+ , &exp_addr, true);
+}
+
int main(int argc, char **argv)
{
bool has_ipv4, has_ipv6;
@@ -377,6 +591,31 @@ int main(int argc, char **argv)
test_socket_unix_abstract);
#endif
+ g_test_add_func("/util/socket/inet-parse/nohost-good",
+ test_inet_parse_nohost_good);
+ g_test_add_func("/util/socket/inet-parse/empty-bad",
+ test_inet_parse_empty_bad);
+ g_test_add_func("/util/socket/inet-parse/only-colon-bad",
+ test_inet_parse_only_colon_bad);
+ g_test_add_func("/util/socket/inet-parse/ipv4-good",
+ test_inet_parse_ipv4_good);
+ g_test_add_func("/util/socket/inet-parse/ipv4-noport-bad",
+ test_inet_parse_ipv4_noport_bad);
+ g_test_add_func("/util/socket/inet-parse/ipv6-good",
+ test_inet_parse_ipv6_good);
+ g_test_add_func("/util/socket/inet-parse/ipv6-noend-bad",
+ test_inet_parse_ipv6_noend_bad);
+ g_test_add_func("/util/socket/inet-parse/ipv6-noport-bad",
+ test_inet_parse_ipv6_noport_bad);
+ g_test_add_func("/util/socket/inet-parse/ipv6-empty-bad",
+ test_inet_parse_ipv6_empty_bad);
+ g_test_add_func("/util/socket/inet-parse/hostname-good",
+ test_inet_parse_hostname_good);
+ g_test_add_func("/util/socket/inet-parse/all-options-good",
+ test_inet_parse_all_options_good);
+ g_test_add_func("/util/socket/inet-parse/all-bare-bool-good",
+ test_inet_parse_all_implicit_bool_good);
+
end:
return g_test_run();
}
diff --git a/tests/unit/test-visitor-serialization.c b/tests/unit/test-visitor-serialization.c
index c2056c3..2d36599 100644
--- a/tests/unit/test-visitor-serialization.c
+++ b/tests/unit/test-visitor-serialization.c
@@ -16,8 +16,8 @@
#include "test-qapi-visit.h"
#include "qapi/error.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qjson.h"
+#include "qobject/qstring.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qobject-output-visitor.h"
#include "qapi/string-input-visitor.h"
diff --git a/tests/unit/test-xs-node.c b/tests/unit/test-xs-node.c
index ac94e7e..2f447a7 100644
--- a/tests/unit/test-xs-node.c
+++ b/tests/unit/test-xs-node.c
@@ -212,7 +212,7 @@ static void compare_tx(gpointer key, gpointer val, gpointer opaque)
printf("Comparison failure in TX %u after serdes:\n", tx_id);
dump_ref("Original", t1->root, 0);
dump_ref("Deserialised", t2->root, 0);
- g_assert(0);
+ g_assert_not_reached();
}
g_assert(t1->nr_nodes == t2->nr_nodes);
}
@@ -257,7 +257,7 @@ static void check_serdes(XenstoreImplState *s)
printf("Comparison failure in main tree after serdes:\n");
dump_ref("Original", s->root, 0);
dump_ref("Deserialised", s2->root, 0);
- g_assert(0);
+ g_assert_not_reached();
}
nr_transactions1 = g_hash_table_size(s->transactions);
diff --git a/tests/unit/test-yank.c b/tests/unit/test-yank.c
index e6c036a..4acfb2f 100644
--- a/tests/unit/test-yank.c
+++ b/tests/unit/test-yank.c
@@ -14,7 +14,7 @@
#include "qemu/module.h"
#include "qemu/option.h"
#include "chardev/char-fe.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-char.h"
#include "qapi/qapi-types-char.h"
diff --git a/tests/vm/Makefile.include b/tests/vm/Makefile.include
index 13ed80f..14188bb 100644
--- a/tests/vm/Makefile.include
+++ b/tests/vm/Makefile.include
@@ -64,23 +64,24 @@ endif
@echo " vm-boot-ssh-<guest> - Boot guest and login via ssh"
@echo
@echo "Special variables:"
- @echo " BUILD_TARGET=foo - Override the build target"
- @echo " DEBUG=1 - Enable verbose output on host and interactive debugging"
- @echo ' EXTRA_CONFIGURE_OPTS="..." - Pass to configure step'
- @echo " J=[0..9]* - Override the -jN parameter for make commands"
- @echo " LOG_CONSOLE=1 - Log console to file in: ~/.cache/qemu-vm "
- @echo " USE_TCG=1 - Use TCG for cross-arch images"
- @echo " QEMU=/path/to/qemu - Change path to QEMU binary"
+ @echo " BUILD_TARGET=foo - Override the build target"
+ @echo " DEBUG=1 - Enable verbose output on host and interactive debugging"
+ @echo " ROOT_USER=1 - Login as root user for interactive shell"
+ @echo ' EXTRA_CONFIGURE_OPTS="..." - Pass to configure step'
+ @echo " J=[0..9]* - Override the -jN parameter for make commands"
+ @echo " LOG_CONSOLE=1 - Log console to file in: ~/.cache/qemu-vm "
+ @echo " USE_TCG=1 - Use TCG for cross-arch images"
+ @echo " QEMU=/path/to/qemu - Change path to QEMU binary"
ifeq ($(HAVE_PYTHON_YAML),yes)
- @echo " QEMU_CONFIG=/path/conf.yml - Change path to VM configuration .yml file."
+ @echo " QEMU_CONFIG=/path/conf.yml - Change path to VM configuration .yml file."
else
@echo " (install python3-yaml to enable support for yaml file to configure a VM.)"
endif
- @echo " See conf_example_*.yml for file format details."
- @echo " QEMU_IMG=/path/to/qemu-img - Change path to qemu-img tool"
- @echo " QEMU_LOCAL=1 - Use QEMU binary local to this build."
- @echo " TARGET_LIST=a,b,c - Override target list in builds"
- @echo " V=1 - Enable verbose output on host and guest commands"
+ @echo " See conf_example_*.yml for file format details."
+ @echo " QEMU_IMG=/path/to/qemu-img - Change path to qemu-img tool"
+ @echo " QEMU_LOCAL=1 - Use QEMU binary local to this build."
+ @echo " TARGET_LIST=a,b,c - Override target list in builds"
+ @echo " V=1 - Enable verbose output on host and guest commands"
vm-build-all: $(addprefix vm-build-, $(IMAGES))
@@ -141,6 +142,6 @@ vm-boot-ssh-%: $(IMAGES_DIR)/%.img $(VM_VENV)
$(if $(EFI_AARCH64),--efi-aarch64 $(EFI_AARCH64)) \
$(if $(LOG_CONSOLE),--log-console) \
--image "$<" \
- --interactive \
+ $(if $(ROOT_USER),--interactive-root,-interactive) \
false, \
" VM-BOOT-SSH $*") || true
diff --git a/tests/vm/README b/tests/vm/README
index f9c04cc..14ac323 100644
--- a/tests/vm/README
+++ b/tests/vm/README
@@ -1 +1 @@
-See docs/devel/testing.rst for help.
+See docs/devel/testing/main.rst for help.
diff --git a/tests/vm/basevm.py b/tests/vm/basevm.py
index 4a1af04..9e879e9 100644
--- a/tests/vm/basevm.py
+++ b/tests/vm/basevm.py
@@ -83,7 +83,7 @@ class BaseVM(object):
# command to halt the guest, can be overridden by subclasses
poweroff = "poweroff"
# Time to wait for shutdown to finish.
- shutdown_timeout_default = 30
+ shutdown_timeout_default = 90
# enable IPv6 networking
ipv6 = True
# This is the timeout on the wait for console bytes.
@@ -520,8 +520,7 @@ def get_qemu_path(arch, build_path=None):
if "QEMU" in os.environ:
qemu_path = os.environ["QEMU"]
elif build_path:
- qemu_path = os.path.join(build_path, arch + "-softmmu")
- qemu_path = os.path.join(qemu_path, "qemu-system-" + arch)
+ qemu_path = os.path.join(build_path, "qemu-system-" + arch)
else:
# Default is to use system path for qemu.
qemu_path = "qemu-system-" + arch
@@ -613,8 +612,11 @@ def parse_args(vmcls):
parser.add_argument("--source-path", default=None,
help="Path of source directory, "\
"for finding additional files. ")
- parser.add_argument("--interactive", "-I", action="store_true",
- help="Interactively run command")
+ int_ops = parser.add_mutually_exclusive_group()
+ int_ops.add_argument("--interactive", "-I", action="store_true",
+ help="Interactively run command")
+ int_ops.add_argument("--interactive-root", action="store_true",
+ help="Interactively run command as root")
parser.add_argument("--snapshot", "-s", action="store_true",
help="run tests with a snapshot")
parser.add_argument("--genisoimage", default="genisoimage",
@@ -676,6 +678,8 @@ def main(vmcls, config=None):
exitcode = 3
if args.interactive:
vm.ssh()
+ elif args.interactive_root:
+ vm.ssh_root()
if not args.snapshot:
vm.graceful_shutdown()
diff --git a/tests/vm/freebsd b/tests/vm/freebsd
index 1247f40..74b3b1e 100755
--- a/tests/vm/freebsd
+++ b/tests/vm/freebsd
@@ -28,8 +28,8 @@ class FreeBSDVM(basevm.BaseVM):
name = "freebsd"
arch = "x86_64"
- link = "https://download.freebsd.org/releases/CI-IMAGES/13.2-RELEASE/amd64/Latest/FreeBSD-13.2-RELEASE-amd64-BASIC-CI.raw.xz"
- csum = "a4fb3b6c7b75dd4d58fb0d75e4caf72844bffe0ca00e66459c028b198ffb3c0e"
+ link = "https://download.freebsd.org/releases/CI-IMAGES/14.1-RELEASE/amd64/Latest/FreeBSD-14.1-RELEASE-amd64-BASIC-CI.raw.xz"
+ csum = "202fe27a05427f0a86d3ebb97712745186f2776ccc4f70d95466dd99a0238ba5"
size = "20G"
BUILD_SCRIPT = """
@@ -39,7 +39,7 @@ class FreeBSDVM(basevm.BaseVM):
mkdir src build; cd src;
tar -xf /dev/vtbd1;
cd ../build;
- ../src/configure --python=python3.9 --extra-ldflags=-L/usr/local/lib \
+ ../src/configure --extra-ldflags=-L/usr/local/lib \
--extra-cflags=-I/usr/local/include {configure_opts};
gmake --output-sync -j{jobs} {target} {verbose};
"""
diff --git a/tests/vm/generated/freebsd.json b/tests/vm/generated/freebsd.json
index 2d5895e..c03e1cd 100644
--- a/tests/vm/generated/freebsd.json
+++ b/tests/vm/generated/freebsd.json
@@ -5,7 +5,7 @@
"make": "/usr/local/bin/gmake",
"ninja": "/usr/local/bin/ninja",
"packaging_command": "pkg",
- "pip3": "/usr/local/bin/pip-3.8",
+ "pip3": "/usr/local/bin/pip",
"pkgs": [
"alsa-lib",
"bash",
@@ -13,7 +13,7 @@
"bzip2",
"ca_root_nss",
"capstone4",
- "ccache",
+ "ccache4",
"cmocka",
"ctags",
"curl",
@@ -29,6 +29,7 @@
"gmake",
"gnutls",
"gsed",
+ "gtk-vnc",
"gtk3",
"json-c",
"libepoxy",
@@ -51,15 +52,17 @@
"pixman",
"pkgconf",
"png",
- "py39-numpy",
- "py39-pillow",
- "py39-pip",
- "py39-sphinx",
- "py39-sphinx_rtd_theme",
- "py39-tomli",
- "py39-yaml",
+ "py311-numpy",
+ "py311-pillow",
+ "py311-pip",
+ "py311-pyyaml",
+ "py311-sphinx",
+ "py311-sphinx_rtd_theme",
+ "py311-tomli",
"python3",
"rpm2cpio",
+ "rust",
+ "rust-bindgen-cli",
"sdl2",
"sdl2_image",
"snappy",
@@ -70,6 +73,7 @@
"usbredir",
"virglrenderer",
"vte3",
+ "vulkan-tools",
"xorriso",
"zstd"
],
diff --git a/tests/vm/openbsd b/tests/vm/openbsd
index 5e646f7..2ea86a0 100755
--- a/tests/vm/openbsd
+++ b/tests/vm/openbsd
@@ -22,8 +22,8 @@ class OpenBSDVM(basevm.BaseVM):
name = "openbsd"
arch = "x86_64"
- link = "https://cdn.openbsd.org/pub/OpenBSD/7.5/amd64/install75.iso"
- csum = "034435c6e27405d5a7fafb058162943c194eb793dafdc412c08d49bb56b3892a"
+ link = "https://cdn.openbsd.org/pub/OpenBSD/7.7/amd64/install77.iso"
+ csum = "da0106e39463f015524dca806f407c37a9bdd17e6dfffe533b06a2dd2edd8a27"
size = "20G"
pkgs = [
# tools
diff --git a/tools/i386/qemu-vmsr-helper.c b/tools/i386/qemu-vmsr-helper.c
new file mode 100644
index 0000000..5f19a48
--- /dev/null
+++ b/tools/i386/qemu-vmsr-helper.c
@@ -0,0 +1,537 @@
+/*
+ * Privileged RAPL MSR helper commands for QEMU
+ *
+ * Copyright (C) 2024 Red Hat, Inc. <aharivel@redhat.com>
+ *
+ * Author: Anthony Harivel <aharivel@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; under version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include <getopt.h>
+#include <stdbool.h>
+#include <sys/ioctl.h>
+#ifdef CONFIG_LIBCAP_NG
+#include <cap-ng.h>
+#endif
+#include <pwd.h>
+#include <grp.h>
+
+#include "qemu/help-texts.h"
+#include "qapi/error.h"
+#include "qemu/cutils.h"
+#include "qemu/main-loop.h"
+#include "qemu/module.h"
+#include "qemu/error-report.h"
+#include "qemu/config-file.h"
+#include "qemu-version.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "qemu/log.h"
+#include "qemu/systemd.h"
+#include "io/channel.h"
+#include "io/channel-socket.h"
+#include "trace/control.h"
+#include "qemu-version.h"
+#include "rapl-msr-index.h"
+
+#define MSR_PATH_TEMPLATE "/dev/cpu/%u/msr"
+
+static char *socket_path;
+static char *pidfile;
+static enum { RUNNING, TERMINATE, TERMINATING } state;
+static QIOChannelSocket *server_ioc;
+static int server_watch;
+static int num_active_sockets = 1;
+static bool verbose;
+
+#ifdef CONFIG_LIBCAP_NG
+static int uid = -1;
+static int gid = -1;
+#endif
+
+static void compute_default_paths(void)
+{
+ g_autofree char *state = qemu_get_local_state_dir();
+
+ socket_path = g_build_filename(state, "run", "qemu-vmsr-helper.sock", NULL);
+ pidfile = g_build_filename(state, "run", "qemu-vmsr-helper.pid", NULL);
+}
+
+static int is_intel_processor(void)
+{
+ int ebx, ecx, edx;
+
+ /* Execute CPUID instruction with eax=0 (basic identification) */
+ asm volatile (
+ "cpuid"
+ : "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a" (0)
+ );
+
+ /*
+ * Check if processor is "GenuineIntel"
+ * 0x756e6547 = "Genu"
+ * 0x49656e69 = "ineI"
+ * 0x6c65746e = "ntel"
+ */
+ return (ebx == 0x756e6547) && (edx == 0x49656e69) && (ecx == 0x6c65746e);
+}
+
+static int is_rapl_enabled(void)
+{
+ const char *path = "/sys/class/powercap/intel-rapl/enabled";
+ FILE *file = fopen(path, "r");
+ int value = 0;
+
+ if (file != NULL) {
+ if (fscanf(file, "%d", &value) != 1) {
+ error_report("INTEL RAPL not enabled");
+ }
+ fclose(file);
+ } else {
+ error_report("Error opening %s", path);
+ }
+
+ return value;
+}
+
+/*
+ * Check if the TID that request the MSR read
+ * belongs to the peer. It be should a TID of a vCPU.
+ */
+static bool is_tid_present(pid_t pid, pid_t tid)
+{
+ g_autofree char *tidPath = g_strdup_printf("/proc/%d/task/%d", pid, tid);
+
+ /* Check if the TID directory exists within the PID directory */
+ if (access(tidPath, F_OK) == 0) {
+ return true;
+ }
+
+ error_report("Failed to open /proc at %s", tidPath);
+ return false;
+}
+
+/*
+ * Only the RAPL MSR in target/i386/cpu.h are allowed
+ */
+static bool is_msr_allowed(uint32_t reg)
+{
+ switch (reg) {
+ case MSR_RAPL_POWER_UNIT:
+ case MSR_PKG_POWER_LIMIT:
+ case MSR_PKG_ENERGY_STATUS:
+ case MSR_PKG_POWER_INFO:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static uint64_t vmsr_read_msr(uint32_t msr_register, unsigned int cpu_id)
+{
+ int fd;
+ uint64_t result = 0;
+
+ g_autofree char *path = g_strdup_printf(MSR_PATH_TEMPLATE, cpu_id);
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ error_report("Failed to open MSR file at %s", path);
+ return result;
+ }
+
+ if (pread(fd, &result, sizeof(result), msr_register) != sizeof(result)) {
+ error_report("Failed to read MSR");
+ result = 0;
+ }
+
+ close(fd);
+ return result;
+}
+
+static void usage(const char *name)
+{
+ (printf) (
+"Usage: %s [OPTIONS] FILE\n"
+"Virtual RAPL MSR helper program for QEMU\n"
+"\n"
+" -h, --help display this help and exit\n"
+" -V, --version output version information and exit\n"
+"\n"
+" -d, --daemon run in the background\n"
+" -f, --pidfile=PATH PID file when running as a daemon\n"
+" (default '%s')\n"
+" -k, --socket=PATH path to the unix socket\n"
+" (default '%s')\n"
+" -T, --trace [[enable=]<pattern>][,events=<file>][,file=<file>]\n"
+" specify tracing options\n"
+#ifdef CONFIG_LIBCAP_NG
+" -u, --user=USER user to drop privileges to\n"
+" -g, --group=GROUP group to drop privileges to\n"
+#endif
+"\n"
+QEMU_HELP_BOTTOM "\n"
+ , name, pidfile, socket_path);
+}
+
+static void version(const char *name)
+{
+ printf(
+"%s " QEMU_FULL_VERSION "\n"
+"Written by Anthony Harivel.\n"
+"\n"
+QEMU_COPYRIGHT "\n"
+"This is free software; see the source for copying conditions. There is NO\n"
+"warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
+ , name);
+}
+
+typedef struct VMSRHelperClient {
+ QIOChannelSocket *ioc;
+ Coroutine *co;
+} VMSRHelperClient;
+
+static void coroutine_fn vh_co_entry(void *opaque)
+{
+ VMSRHelperClient *client = opaque;
+ Error *local_err = NULL;
+ unsigned int peer_pid;
+ uint32_t request[3];
+ uint64_t vmsr;
+ int r;
+
+ qio_channel_set_blocking(QIO_CHANNEL(client->ioc),
+ false, NULL);
+
+ qio_channel_set_follow_coroutine_ctx(QIO_CHANNEL(client->ioc), true);
+
+ /*
+ * Check peer credentials
+ */
+ r = qio_channel_get_peerpid(QIO_CHANNEL(client->ioc),
+ &peer_pid,
+ &local_err);
+ if (r < 0) {
+ goto out;
+ }
+
+ for (;;) {
+ /*
+ * Read the requested MSR
+ * Only RAPL MSR in rapl-msr-index.h is allowed
+ */
+ r = qio_channel_read_all_eof(QIO_CHANNEL(client->ioc),
+ (char *) &request, sizeof(request), &local_err);
+ if (r <= 0) {
+ break;
+ }
+
+ if (!is_msr_allowed(request[0])) {
+ error_report("Requested unallowed msr: %d", request[0]);
+ break;
+ }
+
+ vmsr = vmsr_read_msr(request[0], request[1]);
+
+ if (!is_tid_present(peer_pid, request[2])) {
+ error_report("Requested TID not in peer PID: %d %d",
+ peer_pid, request[2]);
+ vmsr = 0;
+ }
+
+ r = qio_channel_write_all(QIO_CHANNEL(client->ioc),
+ (char *) &vmsr,
+ sizeof(vmsr),
+ &local_err);
+ if (r < 0) {
+ break;
+ }
+ }
+
+out:
+ if (local_err) {
+ if (!verbose) {
+ error_free(local_err);
+ } else {
+ error_report_err(local_err);
+ }
+ }
+
+ object_unref(OBJECT(client->ioc));
+ g_free(client);
+}
+
+static gboolean accept_client(QIOChannel *ioc,
+ GIOCondition cond,
+ gpointer opaque)
+{
+ QIOChannelSocket *cioc;
+ VMSRHelperClient *vmsrh;
+
+ cioc = qio_channel_socket_accept(QIO_CHANNEL_SOCKET(ioc),
+ NULL);
+ if (!cioc) {
+ return TRUE;
+ }
+
+ vmsrh = g_new(VMSRHelperClient, 1);
+ vmsrh->ioc = cioc;
+ vmsrh->co = qemu_coroutine_create(vh_co_entry, vmsrh);
+ qemu_coroutine_enter(vmsrh->co);
+
+ return TRUE;
+}
+
+static void termsig_handler(int signum)
+{
+ qatomic_cmpxchg(&state, RUNNING, TERMINATE);
+ qemu_notify_event();
+}
+
+static void close_server_socket(void)
+{
+ assert(server_ioc);
+
+ g_source_remove(server_watch);
+ server_watch = -1;
+ object_unref(OBJECT(server_ioc));
+ num_active_sockets--;
+}
+
+#ifdef CONFIG_LIBCAP_NG
+static int drop_privileges(void)
+{
+ /* clear all capabilities */
+ capng_clear(CAPNG_SELECT_BOTH);
+
+ if (capng_update(CAPNG_ADD, CAPNG_EFFECTIVE | CAPNG_PERMITTED,
+ CAP_SYS_RAWIO) < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+int main(int argc, char **argv)
+{
+ const char *sopt = "hVk:f:dT:u:g:vq";
+ struct option lopt[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "version", no_argument, NULL, 'V' },
+ { "socket", required_argument, NULL, 'k' },
+ { "pidfile", required_argument, NULL, 'f' },
+ { "daemon", no_argument, NULL, 'd' },
+ { "trace", required_argument, NULL, 'T' },
+ { "verbose", no_argument, NULL, 'v' },
+ { NULL, 0, NULL, 0 }
+ };
+ int opt_ind = 0;
+ int ch;
+ Error *local_err = NULL;
+ bool daemonize = false;
+ bool pidfile_specified = false;
+ bool socket_path_specified = false;
+ unsigned socket_activation;
+
+ struct sigaction sa_sigterm;
+ memset(&sa_sigterm, 0, sizeof(sa_sigterm));
+ sa_sigterm.sa_handler = termsig_handler;
+ sigaction(SIGTERM, &sa_sigterm, NULL);
+ sigaction(SIGINT, &sa_sigterm, NULL);
+ sigaction(SIGHUP, &sa_sigterm, NULL);
+
+ signal(SIGPIPE, SIG_IGN);
+
+ error_init(argv[0]);
+ module_call_init(MODULE_INIT_TRACE);
+ module_call_init(MODULE_INIT_QOM);
+ qemu_add_opts(&qemu_trace_opts);
+ qemu_init_exec_dir(argv[0]);
+
+ compute_default_paths();
+
+ /*
+ * Sanity check
+ * 1. cpu must be Intel cpu
+ * 2. RAPL must be enabled
+ */
+ if (!is_intel_processor()) {
+ error_report("error: CPU is not INTEL cpu");
+ exit(EXIT_FAILURE);
+ }
+
+ if (!is_rapl_enabled()) {
+ error_report("error: RAPL driver not enable");
+ exit(EXIT_FAILURE);
+ }
+
+ while ((ch = getopt_long(argc, argv, sopt, lopt, &opt_ind)) != -1) {
+ switch (ch) {
+ case 'k':
+ g_free(socket_path);
+ socket_path = g_strdup(optarg);
+ socket_path_specified = true;
+ if (socket_path[0] != '/') {
+ error_report("socket path must be absolute");
+ exit(EXIT_FAILURE);
+ }
+ break;
+ case 'f':
+ g_free(pidfile);
+ pidfile = g_strdup(optarg);
+ pidfile_specified = true;
+ break;
+#ifdef CONFIG_LIBCAP_NG
+ case 'u': {
+ unsigned long res;
+ struct passwd *userinfo = getpwnam(optarg);
+ if (userinfo) {
+ uid = userinfo->pw_uid;
+ } else if (qemu_strtoul(optarg, NULL, 10, &res) == 0 &&
+ (uid_t)res == res) {
+ uid = res;
+ } else {
+ error_report("invalid user '%s'", optarg);
+ exit(EXIT_FAILURE);
+ }
+ break;
+ }
+ case 'g': {
+ unsigned long res;
+ struct group *groupinfo = getgrnam(optarg);
+ if (groupinfo) {
+ gid = groupinfo->gr_gid;
+ } else if (qemu_strtoul(optarg, NULL, 10, &res) == 0 &&
+ (gid_t)res == res) {
+ gid = res;
+ } else {
+ error_report("invalid group '%s'", optarg);
+ exit(EXIT_FAILURE);
+ }
+ break;
+ }
+#else
+ case 'u':
+ case 'g':
+ error_report("-%c not supported by this %s", ch, argv[0]);
+ exit(1);
+#endif
+ case 'd':
+ daemonize = true;
+ break;
+ case 'v':
+ verbose = true;
+ break;
+ case 'T':
+ trace_opt_parse(optarg);
+ break;
+ case 'V':
+ version(argv[0]);
+ exit(EXIT_SUCCESS);
+ break;
+ case 'h':
+ usage(argv[0]);
+ exit(EXIT_SUCCESS);
+ break;
+ case '?':
+ error_report("Try `%s --help' for more information.", argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (!trace_init_backends()) {
+ exit(EXIT_FAILURE);
+ }
+ trace_init_file();
+ qemu_set_log(LOG_TRACE, &error_fatal);
+
+ socket_activation = check_socket_activation();
+ if (socket_activation == 0) {
+ SocketAddress saddr;
+ saddr = (SocketAddress){
+ .type = SOCKET_ADDRESS_TYPE_UNIX,
+ .u.q_unix.path = socket_path,
+ };
+ server_ioc = qio_channel_socket_new();
+ if (qio_channel_socket_listen_sync(server_ioc, &saddr,
+ 1, &local_err) < 0) {
+ object_unref(OBJECT(server_ioc));
+ error_report_err(local_err);
+ return 1;
+ }
+ } else {
+ /* Using socket activation - check user didn't use -p etc. */
+ if (socket_path_specified) {
+ error_report("Unix socket can't be set when"
+ "using socket activation");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Can only listen on a single socket. */
+ if (socket_activation > 1) {
+ error_report("%s does not support socket activation"
+ "with LISTEN_FDS > 1",
+ argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ server_ioc = qio_channel_socket_new_fd(FIRST_SOCKET_ACTIVATION_FD,
+ &local_err);
+ if (server_ioc == NULL) {
+ error_reportf_err(local_err,
+ "Failed to use socket activation: ");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ qemu_init_main_loop(&error_fatal);
+
+ server_watch = qio_channel_add_watch(QIO_CHANNEL(server_ioc),
+ G_IO_IN,
+ accept_client,
+ NULL, NULL);
+
+ if (daemonize) {
+ if (daemon(0, 0) < 0) {
+ error_report("Failed to daemonize: %s", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (daemonize || pidfile_specified) {
+ qemu_write_pidfile(pidfile, &error_fatal);
+ }
+
+#ifdef CONFIG_LIBCAP_NG
+ if (drop_privileges() < 0) {
+ error_report("Failed to drop privileges: %s", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+#endif
+
+ info_report("Listening on %s", socket_path);
+
+ state = RUNNING;
+ do {
+ main_loop_wait(false);
+ if (state == TERMINATE) {
+ state = TERMINATING;
+ close_server_socket();
+ }
+ } while (num_active_sockets > 0);
+
+ exit(EXIT_SUCCESS);
+}
diff --git a/tools/i386/rapl-msr-index.h b/tools/i386/rapl-msr-index.h
new file mode 100644
index 0000000..9a71186
--- /dev/null
+++ b/tools/i386/rapl-msr-index.h
@@ -0,0 +1,28 @@
+/*
+ * Allowed list of MSR for Privileged RAPL MSR helper commands for QEMU
+ *
+ * Copyright (C) 2023 Red Hat, Inc. <aharivel@redhat.com>
+ *
+ * Author: Anthony Harivel <aharivel@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; under version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Should stay in sync with the RAPL MSR
+ * in target/i386/cpu.h
+ */
+#define MSR_RAPL_POWER_UNIT 0x00000606
+#define MSR_PKG_POWER_LIMIT 0x00000610
+#define MSR_PKG_ENERGY_STATUS 0x00000611
+#define MSR_PKG_POWER_INFO 0x00000614
diff --git a/trace-events b/trace-events
index 9cb96f6..3ec8a6c 100644
--- a/trace-events
+++ b/trace-events
@@ -30,13 +30,6 @@ breakpoint_insert(int cpu_index, uint64_t pc, int flags) "cpu=%d pc=0x%" PRIx64
breakpoint_remove(int cpu_index, uint64_t pc, int flags) "cpu=%d pc=0x%" PRIx64 " flags=0x%x"
breakpoint_singlestep(int cpu_index, int enabled) "cpu=%d enable=%d"
-# dma-helpers.c
-dma_blk_io(void *dbs, void *bs, int64_t offset, bool to_dev) "dbs=%p bs=%p offset=%" PRId64 " to_dev=%d"
-dma_aio_cancel(void *dbs) "dbs=%p"
-dma_complete(void *dbs, int ret, void *cb) "dbs=%p ret=%d cb=%p"
-dma_blk_cb(void *dbs, int ret) "dbs=%p ret=%d"
-dma_map_wait(void *dbs) "dbs=%p"
-
# job.c
job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)"
job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)"
diff --git a/trace/control-target.c b/trace/control-target.c
index 97f21e4..57ceac2 100644
--- a/trace/control-target.c
+++ b/trace/control-target.c
@@ -8,9 +8,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu/lockable.h"
-#include "cpu.h"
-#include "trace/trace-root.h"
#include "trace/control.h"
diff --git a/trace/control.c b/trace/control.c
index ef10782..1c8c500 100644
--- a/trace/control.c
+++ b/trace/control.c
@@ -27,7 +27,6 @@
#include "qemu/error-report.h"
#include "qemu/config-file.h"
#include "monitor/monitor.h"
-#include "trace/trace-root.h"
int trace_events_enabled_count;
diff --git a/trace/meson.build b/trace/meson.build
index c3412dc..9c42a57 100644
--- a/trace/meson.build
+++ b/trace/meson.build
@@ -1,12 +1,10 @@
-system_ss.add(files('trace-hmp-cmds.c'))
-
-specific_ss.add(files('control-target.c'))
+system_ss.add(files('control-target.c', 'trace-hmp-cmds.c'))
trace_events_files = []
foreach item : [ '.' ] + trace_events_subdirs + qapi_trace_events
if item in qapi_trace_events
trace_events_file = item
- group_name = item.full_path().split('/')[-1].underscorify()
+ group_name = fs.name(item).underscorify()
else
trace_events_file = meson.project_source_root() / item / 'trace-events'
group_name = item == '.' ? 'root' : item.underscorify()
@@ -59,10 +57,11 @@ foreach item : [ '.' ] + trace_events_subdirs + qapi_trace_events
endif
endforeach
+cat = [ python, '-c', 'import fileinput; [print(line, end="") for line in fileinput.input()]', '@INPUT@' ]
trace_events_all = custom_target('trace-events-all',
output: 'trace-events-all',
input: trace_events_files,
- command: [ 'cat', '@INPUT@' ],
+ command: cat,
capture: true,
install: get_option('trace_backends') != [ 'nop' ],
install_dir: qemu_datadir)
diff --git a/trace/simple.c b/trace/simple.c
index 18af590..c0aba00 100644
--- a/trace/simple.c
+++ b/trace/simple.c
@@ -366,7 +366,7 @@ void st_set_trace_file(const char *file)
/* Type cast needed for Windows where getpid() returns an int. */
trace_file_name = g_strdup_printf(CONFIG_TRACE_FILE "-" FMT_pid, (pid_t)getpid());
} else {
- trace_file_name = g_strdup_printf("%s", file);
+ trace_file_name = g_strdup(file);
}
st_set_trace_file_enabled(saved_enable);
diff --git a/trace/trace-hmp-cmds.c b/trace/trace-hmp-cmds.c
index d38dd60..45f4335 100644
--- a/trace/trace-hmp-cmds.c
+++ b/trace/trace-hmp-cmds.c
@@ -27,7 +27,7 @@
#include "monitor/monitor.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-trace.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "trace/control.h"
#ifdef CONFIG_TRACE_SIMPLE
#include "trace/simple.h"
diff --git a/ui/clipboard.c b/ui/clipboard.c
index 4264884..ec00a0b 100644
--- a/ui/clipboard.c
+++ b/ui/clipboard.c
@@ -1,4 +1,5 @@
#include "qemu/osdep.h"
+#include "system/runstate.h"
#include "ui/clipboard.h"
#include "trace.h"
@@ -7,8 +8,62 @@ static NotifierList clipboard_notifiers =
static QemuClipboardInfo *cbinfo[QEMU_CLIPBOARD_SELECTION__COUNT];
+static VMChangeStateEntry *cb_change_state_entry = NULL;
+
+static bool cb_reset_serial_on_resume = false;
+
+static const VMStateDescription vmstate_cbcontent = {
+ .name = "clipboard/content",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (const VMStateField[]) {
+ VMSTATE_BOOL(available, QemuClipboardContent),
+ VMSTATE_BOOL(requested, QemuClipboardContent),
+ VMSTATE_UINT32(size, QemuClipboardContent),
+ VMSTATE_VBUFFER_ALLOC_UINT32(data, QemuClipboardContent, 0, 0, size),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_cbinfo = {
+ .name = "clipboard",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (const VMStateField[]) {
+ VMSTATE_INT32(selection, QemuClipboardInfo),
+ VMSTATE_BOOL(has_serial, QemuClipboardInfo),
+ VMSTATE_UINT32(serial, QemuClipboardInfo),
+ VMSTATE_STRUCT_ARRAY(types, QemuClipboardInfo, QEMU_CLIPBOARD_TYPE__COUNT, 0, vmstate_cbcontent, QemuClipboardContent),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void qemu_clipboard_change_state(void *opaque, bool running, RunState state)
+{
+ int i;
+
+ if (!running) {
+ return;
+ }
+
+ if (cb_reset_serial_on_resume) {
+ qemu_clipboard_reset_serial();
+ }
+
+ for (i = 0; i < QEMU_CLIPBOARD_SELECTION__COUNT; i++) {
+ if (cbinfo[i]) {
+ qemu_clipboard_update(cbinfo[i]);
+ }
+ }
+
+}
+
void qemu_clipboard_peer_register(QemuClipboardPeer *peer)
{
+ if (cb_change_state_entry == NULL) {
+ cb_change_state_entry = qemu_add_vm_change_state_handler(qemu_clipboard_change_state, NULL);
+ }
+
notifier_list_add(&clipboard_notifiers, &peer->notifier);
}
@@ -83,7 +138,9 @@ void qemu_clipboard_update(QemuClipboardInfo *info)
}
}
- notifier_list_notify(&clipboard_notifiers, &notify);
+ if (runstate_is_running() || runstate_check(RUN_STATE_SUSPENDED)) {
+ notifier_list_notify(&clipboard_notifiers, &notify);
+ }
if (cbinfo[info->selection] != info) {
qemu_clipboard_info_unref(cbinfo[info->selection]);
@@ -155,13 +212,20 @@ void qemu_clipboard_reset_serial(void)
QemuClipboardNotify notify = { .type = QEMU_CLIPBOARD_RESET_SERIAL };
int i;
+ trace_clipboard_reset_serial();
+
for (i = 0; i < QEMU_CLIPBOARD_SELECTION__COUNT; i++) {
QemuClipboardInfo *info = qemu_clipboard_info(i);
if (info) {
info->serial = 0;
}
}
- notifier_list_notify(&clipboard_notifiers, &notify);
+
+ if (runstate_is_running() || runstate_check(RUN_STATE_SUSPENDED)) {
+ notifier_list_notify(&clipboard_notifiers, &notify);
+ } else {
+ cb_reset_serial_on_resume = true;
+ }
}
void qemu_clipboard_set_data(QemuClipboardPeer *peer,
diff --git a/ui/cocoa.m b/ui/cocoa.m
index 4c2dd33..23b7a73 100644
--- a/ui/cocoa.m
+++ b/ui/cocoa.m
@@ -34,15 +34,15 @@
#include "ui/console.h"
#include "ui/input.h"
#include "ui/kbd-state.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/runstate.h"
-#include "sysemu/runstate-action.h"
-#include "sysemu/cpu-throttle.h"
+#include "system/system.h"
+#include "system/runstate.h"
+#include "system/runstate-action.h"
+#include "system/cpu-throttle.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-block.h"
#include "qapi/qapi-commands-machine.h"
#include "qapi/qapi-commands-misc.h"
-#include "sysemu/blockdev.h"
+#include "system/blockdev.h"
#include "qemu-version.h"
#include "qemu/cutils.h"
#include "qemu/main-loop.h"
@@ -73,6 +73,8 @@ typedef struct {
int height;
} QEMUScreen;
+@class QemuCocoaPasteboardTypeOwner;
+
static void cocoa_update(DisplayChangeListener *dcl,
int x, int y, int w, int h);
@@ -107,6 +109,7 @@ static bool allow_events;
static NSInteger cbchangecount = -1;
static QemuClipboardInfo *cbinfo;
static QemuEvent cbevent;
+static QemuCocoaPasteboardTypeOwner *cbowner;
// Utility functions to run specified code block with the BQL held
typedef void (^CodeBlock)(void);
@@ -639,6 +642,9 @@ static CGEventRef handleTapEvent(CGEventTapProxy proxy, CGEventType type, CGEven
[self setBoundsSize:NSMakeSize(screen.width, screen.height)];
}
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+
- (void) updateUIInfoLocked
{
/* Must be called with the BQL, i.e. via updateUIInfo */
@@ -685,6 +691,8 @@ static CGEventRef handleTapEvent(CGEventTapProxy proxy, CGEventType type, CGEven
dpy_set_ui_info(dcl.con, &info, TRUE);
}
+#pragma clang diagnostic pop
+
- (void) updateUIInfo
{
if (!allow_events) {
@@ -1321,8 +1329,10 @@ static CGEventRef handleTapEvent(CGEventTapProxy proxy, CGEventType type, CGEven
{
COCOA_DEBUG("QemuCocoaAppController: dealloc\n");
- if (cocoaView)
- [cocoaView release];
+ [cocoaView release];
+ [cbowner release];
+ cbowner = nil;
+
[super dealloc];
}
@@ -1938,8 +1948,6 @@ static Notifier mouse_mode_change_notifier = {
@end
-static QemuCocoaPasteboardTypeOwner *cbowner;
-
static void cocoa_clipboard_notify(Notifier *notifier, void *data);
static void cocoa_clipboard_request(QemuClipboardInfo *info,
QemuClipboardType type);
@@ -2002,43 +2010,8 @@ static void cocoa_clipboard_request(QemuClipboardInfo *info,
}
}
-/*
- * The startup process for the OSX/Cocoa UI is complicated, because
- * OSX insists that the UI runs on the initial main thread, and so we
- * need to start a second thread which runs the qemu_default_main():
- * in main():
- * in cocoa_display_init():
- * assign cocoa_main to qemu_main
- * create application, menus, etc
- * in cocoa_main():
- * create qemu-main thread
- * enter OSX run loop
- */
-
-static void *call_qemu_main(void *opaque)
-{
- int status;
-
- COCOA_DEBUG("Second thread: calling qemu_default_main()\n");
- bql_lock();
- status = qemu_default_main();
- bql_unlock();
- COCOA_DEBUG("Second thread: qemu_default_main() returned, exiting\n");
- [cbowner release];
- exit(status);
-}
-
static int cocoa_main(void)
{
- QemuThread thread;
-
- COCOA_DEBUG("Entered %s()\n", __func__);
-
- bql_unlock();
- qemu_thread_create(&thread, "qemu_main", call_qemu_main,
- NULL, QEMU_THREAD_DETACHED);
-
- // Start the main event loop
COCOA_DEBUG("Main thread: entering OSX run loop\n");
[NSApp run];
COCOA_DEBUG("Main thread: left OSX run loop, which should never happen\n");
@@ -2120,8 +2093,6 @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts)
COCOA_DEBUG("qemu_cocoa: cocoa_display_init\n");
- qemu_main = cocoa_main;
-
// Pull this console process up to being a fully-fledged graphical
// app with a menubar and Dock icon
ProcessSerialNumber psn = { 0, kCurrentProcess };
@@ -2185,6 +2156,12 @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts)
qemu_clipboard_peer_register(&cbpeer);
[pool release];
+
+ /*
+ * The Cocoa UI will run the NSApplication runloop on the main thread
+ * rather than the default Core Foundation one.
+ */
+ qemu_main = cocoa_main;
}
static QemuDisplay qemu_display_cocoa = {
diff --git a/ui/console-vc.c b/ui/console-vc.c
index 899fa11..8308420 100644
--- a/ui/console-vc.c
+++ b/ui/console-vc.c
@@ -42,6 +42,8 @@ enum TTYState {
TTY_STATE_NORM,
TTY_STATE_ESC,
TTY_STATE_CSI,
+ TTY_STATE_G0,
+ TTY_STATE_G1,
};
typedef struct QemuTextConsole {
@@ -88,6 +90,7 @@ struct VCChardev {
int esc_params[MAX_ESC_PARAMS];
int nb_esc_params;
TextAttributes t_attrib; /* currently active text attributes */
+ TextAttributes t_attrib_saved;
int x_saved, y_saved;
};
typedef struct VCChardev VCChardev;
@@ -287,7 +290,7 @@ static void kbd_send_chars(QemuTextConsole *s)
const uint8_t *buf;
uint32_t size;
- buf = fifo8_pop_buf(&s->out_fifo, MIN(len, avail), &size);
+ buf = fifo8_pop_bufptr(&s->out_fifo, MIN(len, avail), &size);
qemu_chr_be_write(s->chr, buf, size);
len = qemu_chr_be_can_write(s->chr);
avail -= size;
@@ -615,10 +618,9 @@ static void vc_put_one(VCChardev *vc, int ch)
static void vc_respond_str(VCChardev *vc, const char *buf)
{
- while (*buf) {
- vc_put_one(vc, *buf);
- buf++;
- }
+ QemuTextConsole *s = vc->console;
+
+ qemu_chr_be_write(s->chr, (const uint8_t *)buf, strlen(buf));
}
/* set cursor, checking bounds */
@@ -643,12 +645,119 @@ static void vc_set_cursor(VCChardev *vc, int x, int y)
s->y = y;
}
+/**
+ * vc_csi_P() - (DCH) deletes one or more characters from the cursor
+ * position to the right. As characters are deleted, the remaining
+ * characters between the cursor and right margin move to the
+ * left. Character attributes move with the characters.
+ */
+static void vc_csi_P(struct VCChardev *vc, unsigned int nr)
+{
+ QemuTextConsole *s = vc->console;
+ TextCell *c1, *c2;
+ unsigned int x1, x2, y;
+ unsigned int end, len;
+
+ if (!nr) {
+ nr = 1;
+ }
+ if (nr > s->width - s->x) {
+ nr = s->width - s->x;
+ if (!nr) {
+ return;
+ }
+ }
+
+ x1 = s->x;
+ x2 = s->x + nr;
+ len = s->width - x2;
+ if (len) {
+ y = (s->y_base + s->y) % s->total_height;
+ c1 = &s->cells[y * s->width + x1];
+ c2 = &s->cells[y * s->width + x2];
+ memmove(c1, c2, len * sizeof(*c1));
+ for (end = x1 + len; x1 < end; x1++) {
+ vc_update_xy(vc, x1, s->y);
+ }
+ }
+ /* Clear the rest */
+ for (; x1 < s->width; x1++) {
+ vc_clear_xy(vc, x1, s->y);
+ }
+}
+
+/**
+ * vc_csi_at() - (ICH) inserts `nr` blank characters with the default
+ * character attribute. The cursor remains at the beginning of the
+ * blank characters. Text between the cursor and right margin moves to
+ * the right. Characters scrolled past the right margin are lost.
+ */
+static void vc_csi_at(struct VCChardev *vc, unsigned int nr)
+{
+ QemuTextConsole *s = vc->console;
+ TextCell *c1, *c2;
+ unsigned int x1, x2, y;
+ unsigned int end, len;
+
+ if (!nr) {
+ nr = 1;
+ }
+ if (nr > s->width - s->x) {
+ nr = s->width - s->x;
+ if (!nr) {
+ return;
+ }
+ }
+
+ x1 = s->x + nr;
+ x2 = s->x;
+ len = s->width - x1;
+ if (len) {
+ y = (s->y_base + s->y) % s->total_height;
+ c1 = &s->cells[y * s->width + x1];
+ c2 = &s->cells[y * s->width + x2];
+ memmove(c1, c2, len * sizeof(*c1));
+ for (end = x1 + len; x1 < end; x1++) {
+ vc_update_xy(vc, x1, s->y);
+ }
+ }
+ /* Insert blanks */
+ for (x1 = s->x; x1 < s->x + nr; x1++) {
+ vc_clear_xy(vc, x1, s->y);
+ }
+}
+
+/**
+ * vc_save_cursor() - saves cursor position and character attributes.
+ */
+static void vc_save_cursor(VCChardev *vc)
+{
+ QemuTextConsole *s = vc->console;
+
+ vc->x_saved = s->x;
+ vc->y_saved = s->y;
+ vc->t_attrib_saved = vc->t_attrib;
+}
+
+/**
+ * vc_restore_cursor() - restores cursor position and character
+ * attributes from saved state.
+ */
+static void vc_restore_cursor(VCChardev *vc)
+{
+ QemuTextConsole *s = vc->console;
+
+ s->x = vc->x_saved;
+ s->y = vc->y_saved;
+ vc->t_attrib = vc->t_attrib_saved;
+}
+
static void vc_putchar(VCChardev *vc, int ch)
{
QemuTextConsole *s = vc->console;
int i;
int x, y;
- char response[40];
+ g_autofree char *response = NULL;
switch(vc->state) {
case TTY_STATE_NORM:
@@ -694,6 +803,16 @@ static void vc_putchar(VCChardev *vc, int ch)
vc->esc_params[i] = 0;
vc->nb_esc_params = 0;
vc->state = TTY_STATE_CSI;
+ } else if (ch == '(') {
+ vc->state = TTY_STATE_G0;
+ } else if (ch == ')') {
+ vc->state = TTY_STATE_G1;
+ } else if (ch == '7') {
+ vc_save_cursor(vc);
+ vc->state = TTY_STATE_NORM;
+ } else if (ch == '8') {
+ vc_restore_cursor(vc);
+ vc->state = TTY_STATE_NORM;
} else {
vc->state = TTY_STATE_NORM;
}
@@ -810,6 +929,9 @@ static void vc_putchar(VCChardev *vc, int ch)
break;
}
break;
+ case 'P':
+ vc_csi_P(vc, vc->esc_params[0]);
+ break;
case 'm':
vc_handle_escape(vc);
break;
@@ -821,22 +943,20 @@ static void vc_putchar(VCChardev *vc, int ch)
break;
case 6:
/* report cursor position */
- sprintf(response, "\033[%d;%dR",
- (s->y_base + s->y) % s->total_height + 1,
- s->x + 1);
+ response = g_strdup_printf("\033[%d;%dR",
+ s->y + 1, s->x + 1);
vc_respond_str(vc, response);
break;
}
break;
case 's':
- /* save cursor position */
- vc->x_saved = s->x;
- vc->y_saved = s->y;
+ vc_save_cursor(vc);
break;
case 'u':
- /* restore cursor position */
- s->x = vc->x_saved;
- s->y = vc->y_saved;
+ vc_restore_cursor(vc);
+ break;
+ case '@':
+ vc_csi_at(vc, vc->esc_params[0]);
break;
default:
trace_console_putchar_unhandled(ch);
@@ -844,6 +964,16 @@ static void vc_putchar(VCChardev *vc, int ch)
}
break;
}
+ break;
+ case TTY_STATE_G0: /* set character sets */
+ case TTY_STATE_G1: /* set character sets */
+ switch (ch) {
+ case 'B':
+ /* Latin-1 map */
+ break;
+ }
+ vc->state = TTY_STATE_NORM;
+ break;
}
}
@@ -906,7 +1036,7 @@ qemu_text_console_finalize(Object *obj)
}
static void
-qemu_text_console_class_init(ObjectClass *oc, void *data)
+qemu_text_console_class_init(ObjectClass *oc, const void *data)
{
if (!cursor_timer) {
cursor_timer = timer_new_ms(QEMU_CLOCK_REALTIME, cursor_timer_cb, NULL);
@@ -935,7 +1065,7 @@ qemu_fixed_text_console_finalize(Object *obj)
}
static void
-qemu_fixed_text_console_class_init(ObjectClass *oc, void *data)
+qemu_fixed_text_console_class_init(ObjectClass *oc, const void *data)
{
}
@@ -1051,7 +1181,7 @@ static void vc_chr_parse(QemuOpts *opts, ChardevBackend *backend, Error **errp)
}
}
-static void char_vc_class_init(ObjectClass *oc, void *data)
+static void char_vc_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
@@ -1073,6 +1203,6 @@ void qemu_console_early_init(void)
{
/* set the default vc driver */
if (!object_class_by_name(TYPE_CHARDEV_VC)) {
- type_register(&char_vc_type_info);
+ type_register_static(&char_vc_type_info);
}
}
diff --git a/ui/console.c b/ui/console.c
index e8f0083..2d00828 100644
--- a/ui/console.c
+++ b/ui/console.c
@@ -35,8 +35,9 @@
#include "qemu/option.h"
#include "chardev/char.h"
#include "trace.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "qom/object.h"
+#include "qemu/memfd.h"
#include "console-priv.h"
@@ -400,7 +401,7 @@ qemu_console_finalize(Object *obj)
}
static void
-qemu_console_class_init(ObjectClass *oc, void *data)
+qemu_console_class_init(ObjectClass *oc, const void *data)
{
}
@@ -436,7 +437,7 @@ qemu_graphic_console_prop_get_head(Object *obj, Visitor *v, const char *name,
}
static void
-qemu_graphic_console_class_init(ObjectClass *oc, void *data)
+qemu_graphic_console_class_init(ObjectClass *oc, const void *data)
{
object_class_property_add_link(oc, "device", TYPE_DEVICE,
offsetof(QemuGraphicConsole, device),
@@ -452,60 +453,26 @@ qemu_graphic_console_init(Object *obj)
{
}
-#ifdef WIN32
-void qemu_displaysurface_win32_set_handle(DisplaySurface *surface,
- HANDLE h, uint32_t offset)
+void qemu_displaysurface_set_share_handle(DisplaySurface *surface,
+ qemu_pixman_shareable handle,
+ uint32_t offset)
{
- assert(!surface->handle);
+ assert(surface->share_handle == SHAREABLE_NONE);
- surface->handle = h;
- surface->handle_offset = offset;
-}
-
-static void
-win32_pixman_image_destroy(pixman_image_t *image, void *data)
-{
- DisplaySurface *surface = data;
-
- if (!surface->handle) {
- return;
- }
+ surface->share_handle = handle;
+ surface->share_handle_offset = offset;
- assert(surface->handle_offset == 0);
-
- qemu_win32_map_free(
- pixman_image_get_data(surface->image),
- surface->handle,
- &error_warn
- );
}
-#endif
DisplaySurface *qemu_create_displaysurface(int width, int height)
{
- DisplaySurface *surface;
- void *bits = NULL;
-#ifdef WIN32
- HANDLE handle = NULL;
-#endif
-
trace_displaysurface_create(width, height);
-#ifdef WIN32
- bits = qemu_win32_map_alloc(width * height * 4, &handle, &error_abort);
-#endif
-
- surface = qemu_create_displaysurface_from(
+ return qemu_create_displaysurface_from(
width, height,
PIXMAN_x8r8g8b8,
- width * 4, bits
+ width * 4, NULL
);
- surface->flags = QEMU_ALLOCATED_FLAG;
-
-#ifdef WIN32
- qemu_displaysurface_win32_set_handle(surface, handle, 0);
-#endif
- return surface;
}
DisplaySurface *qemu_create_displaysurface_from(int width, int height,
@@ -515,15 +482,25 @@ DisplaySurface *qemu_create_displaysurface_from(int width, int height,
DisplaySurface *surface = g_new0(DisplaySurface, 1);
trace_displaysurface_create_from(surface, width, height, format);
- surface->image = pixman_image_create_bits(format,
- width, height,
- (void *)data, linesize);
- assert(surface->image != NULL);
-#ifdef WIN32
- pixman_image_set_destroy_function(surface->image,
- win32_pixman_image_destroy, surface);
-#endif
+ surface->share_handle = SHAREABLE_NONE;
+
+ if (data) {
+ surface->image = pixman_image_create_bits(format,
+ width, height,
+ (void *)data, linesize);
+ } else {
+ qemu_pixman_image_new_shareable(&surface->image,
+ &surface->share_handle,
+ "displaysurface",
+ format,
+ width,
+ height,
+ linesize,
+ &error_abort);
+ surface->flags = QEMU_ALLOCATED_FLAG;
+ }
+ assert(surface->image != NULL);
return surface;
}
@@ -532,6 +509,7 @@ DisplaySurface *qemu_create_displaysurface_pixman(pixman_image_t *image)
DisplaySurface *surface = g_new0(DisplaySurface, 1);
trace_displaysurface_create_pixman(surface);
+ surface->share_handle = SHAREABLE_NONE;
surface->image = pixman_image_ref(image);
return surface;
@@ -1182,7 +1160,7 @@ DisplayState *init_displaystate(void)
* all QemuConsoles are created and the order / numbering
* doesn't change any more */
name = g_strdup_printf("console[%d]", con->index);
- object_property_add_child(container_get(object_get_root(), "/backend"),
+ object_property_add_child(object_get_container("backend"),
name, OBJECT(con));
g_free(name);
}
@@ -1408,9 +1386,7 @@ char *qemu_console_get_label(QemuConsole *con)
object_get_typename(c->device),
c->head);
} else {
- return g_strdup_printf("%s", dev->id ?
- dev->id :
- object_get_typename(c->device));
+ return g_strdup(dev->id ? : object_get_typename(c->device));
}
}
return g_strdup("VGA");
@@ -1632,4 +1608,9 @@ void qemu_display_help(void)
printf("%s\n", DisplayType_str(dpys[idx]->type));
}
}
+ printf("\n"
+ "Some display backends support suboptions, which can be set with\n"
+ " -display backend,option=value,option=value...\n"
+ "For a short list of the suboptions for each display, see the "
+ "top-level -help output; more detail is in the documentation.\n");
}
diff --git a/ui/curses.c b/ui/curses.c
index ec61615..a39aee8 100644
--- a/ui/curses.c
+++ b/ui/curses.c
@@ -36,9 +36,9 @@
#include "qemu/module.h"
#include "ui/console.h"
#include "ui/input.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
-#if defined(__APPLE__) || defined(__OpenBSD__)
+#ifdef __APPLE__
#define _XOPEN_SOURCE_EXTENDED 1
#endif
diff --git a/ui/cursor.c b/ui/cursor.c
index 29717b3e..6e23244 100644
--- a/ui/cursor.c
+++ b/ui/cursor.c
@@ -197,30 +197,6 @@ void cursor_set_mono(QEMUCursor *c,
}
}
-void cursor_get_mono_image(QEMUCursor *c, int foreground, uint8_t *image)
-{
- uint32_t *data = c->data;
- uint8_t bit;
- int x,y,bpl;
-
- bpl = cursor_get_mono_bpl(c);
- memset(image, 0, bpl * c->height);
- for (y = 0; y < c->height; y++) {
- bit = 0x80;
- for (x = 0; x < c->width; x++, data++) {
- if (((*data & 0xff000000) == 0xff000000) &&
- ((*data & 0x00ffffff) == foreground)) {
- image[x/8] |= bit;
- }
- bit >>= 1;
- if (bit == 0) {
- bit = 0x80;
- }
- }
- image += bpl;
- }
-}
-
void cursor_get_mono_mask(QEMUCursor *c, int transparent, uint8_t *mask)
{
uint32_t *data = c->data;
@@ -232,7 +208,7 @@ void cursor_get_mono_mask(QEMUCursor *c, int transparent, uint8_t *mask)
for (y = 0; y < c->height; y++) {
bit = 0x80;
for (x = 0; x < c->width; x++, data++) {
- if ((*data & 0xff000000) != 0xff000000) {
+ if ((*data & 0x80000000) == 0x0) { /* Alpha < 0x80 (128) */
if (transparent != 0) {
mask[x/8] |= bit;
}
diff --git a/ui/dbus-chardev.c b/ui/dbus-chardev.c
index 1d3a712..d05ddda 100644
--- a/ui/dbus-chardev.c
+++ b/ui/dbus-chardev.c
@@ -106,7 +106,7 @@ dbus_chardev_init(DBusDisplay *dpy)
dpy->notifier.notify = dbus_display_on_notify;
dbus_display_notifier_add(&dpy->notifier);
- object_child_foreach(container_get(object_get_root(), "/chardevs"),
+ object_child_foreach(object_get_container("chardevs"),
dbus_display_chardev_foreach, dpy);
}
@@ -269,7 +269,7 @@ dbus_chr_parse(QemuOpts *opts, ChardevBackend *backend,
}
static void
-char_dbus_class_init(ObjectClass *oc, void *data)
+char_dbus_class_init(ObjectClass *oc, const void *data)
{
DBusChardevClass *klass = DBUS_CHARDEV_CLASS(oc);
ChardevClass *cc = CHARDEV_CLASS(oc);
diff --git a/ui/dbus-clipboard.c b/ui/dbus-clipboard.c
index fe7fcde..6787a77 100644
--- a/ui/dbus-clipboard.c
+++ b/ui/dbus-clipboard.c
@@ -26,7 +26,7 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qom/object_interfaces.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qapi/error.h"
#include "trace.h"
@@ -141,6 +141,8 @@ dbus_clipboard_qemu_request(QemuClipboardInfo *info,
const char *mimes[] = { MIME_TEXT_PLAIN_UTF8, NULL };
size_t n;
+ trace_dbus_clipboard_qemu_request(type);
+
if (type != QEMU_CLIPBOARD_TYPE_TEXT) {
/* unsupported atm */
return;
@@ -305,6 +307,8 @@ dbus_clipboard_grab(
return DBUS_METHOD_INVOCATION_HANDLED;
}
+ trace_dbus_clipboard_grab(arg_selection, arg_serial);
+
if (s >= QEMU_CLIPBOARD_SELECTION__COUNT) {
g_dbus_method_invocation_return_error(
invocation,
diff --git a/ui/dbus-console.c b/ui/dbus-console.c
index 578b67f..85e215e 100644
--- a/ui/dbus-console.c
+++ b/ui/dbus-console.c
@@ -41,7 +41,7 @@ struct _DBusDisplayConsole {
DisplayChangeListener dcl;
DBusDisplay *display;
- GHashTable *listeners;
+ GPtrArray *listeners;
QemuDBusDisplay1Console *iface;
QemuDBusDisplay1Keyboard *iface_kbd;
@@ -142,8 +142,7 @@ dbus_display_console_init(DBusDisplayConsole *object)
{
DBusDisplayConsole *ddc = DBUS_DISPLAY_CONSOLE(object);
- ddc->listeners = g_hash_table_new_full(g_str_hash, g_str_equal,
- NULL, g_object_unref);
+ ddc->listeners = g_ptr_array_new_with_free_func(g_object_unref);
ddc->dcl.ops = &dbus_console_dcl_ops;
}
@@ -157,7 +156,7 @@ dbus_display_console_dispose(GObject *object)
g_clear_object(&ddc->iface_mouse);
g_clear_object(&ddc->iface_kbd);
g_clear_object(&ddc->iface);
- g_clear_pointer(&ddc->listeners, g_hash_table_unref);
+ g_clear_pointer(&ddc->listeners, g_ptr_array_unref);
g_clear_pointer(&ddc->kbd, qkbd_state_free);
G_OBJECT_CLASS(dbus_display_console_parent_class)->dispose(object);
@@ -179,7 +178,7 @@ listener_vanished_cb(DBusDisplayListener *listener)
trace_dbus_listener_vanished(name);
- g_hash_table_remove(ddc->listeners, name);
+ g_ptr_array_remove_fast(ddc->listeners, listener);
qkbd_state_lift_all_keys(ddc->kbd);
}
@@ -267,16 +266,6 @@ dbus_console_register_listener(DBusDisplayConsole *ddc,
DBusDisplayListener *listener;
int fd;
- if (sender && g_hash_table_contains(ddc->listeners, sender)) {
- g_dbus_method_invocation_return_error(
- invocation,
- DBUS_DISPLAY_ERROR,
- DBUS_DISPLAY_ERROR_INVALID,
- "`%s` is already registered!",
- sender);
- return DBUS_METHOD_INVOCATION_HANDLED;
- }
-
#ifdef G_OS_WIN32
if (!dbus_win32_import_socket(invocation, arg_listener, &fd)) {
return DBUS_METHOD_INVOCATION_HANDLED;
@@ -316,10 +305,16 @@ dbus_console_register_listener(DBusDisplayConsole *ddc,
#endif
);
+ GDBusConnectionFlags flags =
+ G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER;
+#ifdef WIN32
+ flags |= G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_ALLOW_ANONYMOUS;
+#endif
+
listener_conn = g_dbus_connection_new_sync(
G_IO_STREAM(socket_conn),
guid,
- G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER,
+ flags,
NULL, NULL, &err);
if (err) {
error_report("Failed to setup peer connection: %s", err->message);
@@ -331,9 +326,7 @@ dbus_console_register_listener(DBusDisplayConsole *ddc,
return DBUS_METHOD_INVOCATION_HANDLED;
}
- g_hash_table_insert(ddc->listeners,
- (gpointer)dbus_display_listener_get_bus_name(listener),
- listener);
+ g_ptr_array_add(ddc->listeners, listener);
g_object_connect(listener_conn,
"swapped-signal::closed", listener_vanished_cb, listener,
NULL);
diff --git a/ui/dbus-display1.xml b/ui/dbus-display1.xml
index ce35d64..4a41a7e 100644
--- a/ui/dbus-display1.xml
+++ b/ui/dbus-display1.xml
@@ -470,23 +470,71 @@
</interface>
<!--
+ org.qemu.Display1.Listener.Unix.Map:
+
+ This optional client-side interface can complement
+ org.qemu.Display1.Listener on ``/org/qemu/Display1/Listener`` for
+ Unix-specific shared memory scanouts.
+ -->
+ <?if $(env.HOST_OS) != windows?>
+ <interface name="org.qemu.Display1.Listener.Unix.Map">
+ <!--
+ ScanoutMap:
+ @handle: the shared map FD.
+ @offset: mapping offset, in bytes.
+ @width: display width, in pixels.
+ @height: display height, in pixels.
+ @stride: stride, in bytes.
+ @pixman_format: image format (ex: ``PIXMAN_X8R8G8B8``).
+
+ Resize and update the display content with a shared map.
+ -->
+ <method name="ScanoutMap">
+ <arg type="h" name="handle" direction="in"/>
+ <arg type="u" name="offset" direction="in"/>
+ <arg type="u" name="width" direction="in"/>
+ <arg type="u" name="height" direction="in"/>
+ <arg type="u" name="stride" direction="in"/>
+ <arg type="u" name="pixman_format" direction="in"/>
+ </method>
+
+ <!--
+ UpdateMap:
+ @x: the X update position, in pixels.
+ @y: the Y update position, in pixels.
+ @width: the update width, in pixels.
+ @height: the update height, in pixels.
+
+ Update the display content with the current shared map and the given region.
+ -->
+ <method name="UpdateMap">
+ <arg type="i" name="x" direction="in"/>
+ <arg type="i" name="y" direction="in"/>
+ <arg type="i" name="width" direction="in"/>
+ <arg type="i" name="height" direction="in"/>
+ </method>
+ </interface>
+ <?endif?>
+
+ <!--
org.qemu.Display1.Listener.Win32.Map:
This optional client-side interface can complement
org.qemu.Display1.Listener on ``/org/qemu/Display1/Listener`` for Windows
specific shared memory scanouts.
-->
+ <?if $(env.HOST_OS) == windows?>
<interface name="org.qemu.Display1.Listener.Win32.Map">
<!--
ScanoutMap:
- @handle: the shared map handle value.
+ @handle: the shared file mapping handle value (not a file handle)
@offset: mapping offset.
@width: display width, in pixels.
@height: display height, in pixels.
@stride: stride, in bytes.
@pixman_format: image format (ex: ``PIXMAN_X8R8G8B8``).
- Resize and update the display content with a shared map.
+ Resize and update the display content with a shared file mapping object.
-->
<method name="ScanoutMap">
<arg type="t" name="handle" direction="in"/>
@@ -513,6 +561,7 @@
<arg type="i" name="height" direction="in"/>
</method>
</interface>
+ <?endif?>
<!--
org.qemu.Display1.Listener.Win32.D3d11:
@@ -566,6 +615,51 @@
</interface>
<!--
+ org.qemu.Display1.Listener.Unix.ScanoutDMABUF2:
+
+ This optional client-side interface can complement
+ org.qemu.Display1.Listener on ``/org/qemu/Display1/Listener`` for
+ Unix-specific DMABUF scanout setup which support multi plane.
+ -->
+ <?if $(env.HOST_OS) != windows?>
+ <interface name="org.qemu.Display1.Listener.Unix.ScanoutDMABUF2">
+ <!--
+ ScanoutDMABUF2:
+ @dmabuf: DMABUF file descriptor of each plane.
+ @x: display x offset, in pixels
+ @y: display y offset, in pixels
+ @width: display width, in pixels.
+ @height: display height, in pixels.
+ @offset: offset of each plane, in bytes.
+ @stride: stride of each plane, in bytes.
+ @num_planes: plane number.
+ @fourcc: DMABUF fourcc.
+ @backing_width: backing framebuffer width, in pixels
+ @backing_height: backing framebuffer height, in pixels
+ @modifier: DMABUF modifier.
+ @y0_top: whether Y position 0 is the top or not.
+
+ Resize and update the display content with DMABUF.
+ -->
+ <method name="ScanoutDMABUF2">
+ <arg type="ah" name="dmabuf" direction="in"/>
+ <arg type="u" name="x" direction="in"/>
+ <arg type="u" name="y" direction="in"/>
+ <arg type="u" name="width" direction="in"/>
+ <arg type="u" name="height" direction="in"/>
+ <arg type="au" name="offset" direction="in"/>
+ <arg type="au" name="stride" direction="in"/>
+ <arg type="u" name="num_planes" direction="in"/>
+ <arg type="u" name="fourcc" direction="in"/>
+ <arg type="u" name="backing_width" direction="in"/>
+ <arg type="u" name="backing_height" direction="in"/>
+ <arg type="t" name="modifier" direction="in"/>
+ <arg type="b" name="y0_top" direction="in"/>
+ </method>
+ </interface>
+ <?endif?>
+
+ <!--
org.qemu.Display1.Clipboard:
This interface must be implemented by both the client and the server on
@@ -725,6 +819,18 @@
</method>
<!--
+ NSamples:
+
+ The number of samples per read/write frames. (for example the default is
+ 480, or 10ms at 48kHz)
+
+ (earlier version of the display interface do not provide this property)
+ -->
+ <property name="NSamples" type="u" access="read">
+ <annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="const"/>
+ </property>
+
+ <!--
Interfaces:
This property lists extra interfaces provided by the
diff --git a/ui/dbus-listener.c b/ui/dbus-listener.c
index a54123a..42875b8 100644
--- a/ui/dbus-listener.c
+++ b/ui/dbus-listener.c
@@ -24,8 +24,9 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "dbus.h"
+#include "glib.h"
#ifdef G_OS_UNIX
#include <gio/gunixfdlist.h>
#endif
@@ -82,10 +83,14 @@ struct _DBusDisplayListener {
#ifdef CONFIG_OPENGL
egl_fb fb;
#endif
+#else /* !WIN32 */
+ QemuDBusDisplay1ListenerUnixMap *map_proxy;
+ QemuDBusDisplay1ListenerUnixScanoutDMABUF2 *scanout_dmabuf_v2_proxy;
#endif
guint dbus_filter;
- guint32 out_serial_to_discard;
+ guint32 display_serial_to_discard;
+ guint32 cursor_serial_to_discard;
};
G_DEFINE_TYPE(DBusDisplayListener, dbus_display_listener, G_TYPE_OBJECT)
@@ -93,10 +98,20 @@ G_DEFINE_TYPE(DBusDisplayListener, dbus_display_listener, G_TYPE_OBJECT)
static void dbus_gfx_update(DisplayChangeListener *dcl,
int x, int y, int w, int h);
-static void ddl_discard_pending_messages(DBusDisplayListener *ddl)
+static void ddl_discard_display_messages(DBusDisplayListener *ddl)
{
- ddl->out_serial_to_discard = g_dbus_connection_get_last_serial(
+ guint32 serial = g_dbus_connection_get_last_serial(
g_dbus_proxy_get_connection(G_DBUS_PROXY(ddl->proxy)));
+
+ g_atomic_int_set(&ddl->display_serial_to_discard, serial);
+}
+
+static void ddl_discard_cursor_messages(DBusDisplayListener *ddl)
+{
+ guint32 serial = g_dbus_connection_get_last_serial(
+ g_dbus_proxy_get_connection(G_DBUS_PROXY(ddl->proxy)));
+
+ g_atomic_int_set(&ddl->cursor_serial_to_discard, serial);
}
#ifdef CONFIG_OPENGL
@@ -104,6 +119,8 @@ static void dbus_scanout_disable(DisplayChangeListener *dcl)
{
DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl);
+ ddl_discard_display_messages(ddl);
+
qemu_dbus_display1_listener_call_disable(
ddl->proxy, G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL);
}
@@ -272,10 +289,9 @@ static void dbus_call_update_gl(DisplayChangeListener *dcl,
}
#ifdef CONFIG_GBM
-static void dbus_scanout_dmabuf(DisplayChangeListener *dcl,
- QemuDmaBuf *dmabuf)
+static void dbus_scanout_dmabuf_v1(DBusDisplayListener *ddl,
+ QemuDmaBuf *dmabuf)
{
- DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl);
g_autoptr(GError) err = NULL;
g_autoptr(GUnixFDList) fd_list = NULL;
int fd;
@@ -283,18 +299,18 @@ static void dbus_scanout_dmabuf(DisplayChangeListener *dcl,
uint64_t modifier;
bool y0_top;
- fd = qemu_dmabuf_get_fd(dmabuf);
+ fd = qemu_dmabuf_get_fds(dmabuf, NULL)[0];
fd_list = g_unix_fd_list_new();
if (g_unix_fd_list_append(fd_list, fd, &err) != 0) {
error_report("Failed to setup dmabuf fdlist: %s", err->message);
return;
}
- ddl_discard_pending_messages(ddl);
+ ddl_discard_display_messages(ddl);
width = qemu_dmabuf_get_width(dmabuf);
height = qemu_dmabuf_get_height(dmabuf);
- stride = qemu_dmabuf_get_stride(dmabuf);
+ stride = qemu_dmabuf_get_strides(dmabuf, NULL)[0];
fourcc = qemu_dmabuf_get_fourcc(dmabuf);
modifier = qemu_dmabuf_get_modifier(dmabuf);
y0_top = qemu_dmabuf_get_y0_top(dmabuf);
@@ -306,6 +322,87 @@ static void dbus_scanout_dmabuf(DisplayChangeListener *dcl,
y0_top, G_DBUS_CALL_FLAGS_NONE,
-1, fd_list, NULL, NULL, NULL);
}
+
+static void dbus_scanout_dmabuf_v2(DBusDisplayListener *ddl,
+ QemuDmaBuf *dmabuf)
+{
+ g_autoptr(GError) err = NULL;
+ g_autoptr(GUnixFDList) fd_list = NULL;
+ int i, fd_index[DMABUF_MAX_PLANES], num_fds;
+ uint32_t x, y, width, height, fourcc, backing_width, backing_height;
+ GVariant *fd, *offset, *stride, *fd_handles[DMABUF_MAX_PLANES];
+ uint64_t modifier;
+ bool y0_top;
+ int nfds, noffsets, nstrides;
+ const int *fds = qemu_dmabuf_get_fds(dmabuf, &nfds);
+ const uint32_t *offsets = qemu_dmabuf_get_offsets(dmabuf, &noffsets);
+ const uint32_t *strides = qemu_dmabuf_get_strides(dmabuf, &nstrides);
+ uint32_t num_planes = qemu_dmabuf_get_num_planes(dmabuf);
+
+ assert(nfds >= num_planes);
+ assert(noffsets >= num_planes);
+ assert(nstrides >= num_planes);
+
+ fd_list = g_unix_fd_list_new();
+
+ for (num_fds = 0; num_fds < num_planes; num_fds++) {
+ int plane_fd = fds[num_fds];
+
+ if (plane_fd < 0) {
+ break;
+ }
+
+ fd_index[num_fds] = g_unix_fd_list_append(fd_list, plane_fd, &err);
+ if (fd_index[num_fds] < 0) {
+ error_report("Failed to setup dmabuf fdlist: %s", err->message);
+ return;
+ }
+ }
+
+ ddl_discard_display_messages(ddl);
+
+ x = qemu_dmabuf_get_x(dmabuf);
+ y = qemu_dmabuf_get_y(dmabuf);
+ width = qemu_dmabuf_get_width(dmabuf);
+ height = qemu_dmabuf_get_height(dmabuf);
+ fourcc = qemu_dmabuf_get_fourcc(dmabuf);
+ backing_width = qemu_dmabuf_get_backing_width(dmabuf);
+ backing_height = qemu_dmabuf_get_backing_height(dmabuf);
+ modifier = qemu_dmabuf_get_modifier(dmabuf);
+ y0_top = qemu_dmabuf_get_y0_top(dmabuf);
+
+ offset = g_variant_new_fixed_array(G_VARIANT_TYPE_UINT32,
+ offsets, num_planes, sizeof(uint32_t));
+ stride = g_variant_new_fixed_array(G_VARIANT_TYPE_UINT32,
+ strides, num_planes, sizeof(uint32_t));
+
+ for (i = 0; i < num_fds; i++) {
+ fd_handles[i] = g_variant_new_handle(fd_index[i]);
+ }
+ fd = g_variant_new_array(G_VARIANT_TYPE_HANDLE, fd_handles, num_fds);
+
+ qemu_dbus_display1_listener_unix_scanout_dmabuf2_call_scanout_dmabuf2(
+ ddl->scanout_dmabuf_v2_proxy, fd, x, y, width, height, offset, stride,
+ num_planes, fourcc, backing_width, backing_height, modifier, y0_top,
+ G_DBUS_CALL_FLAGS_NONE, -1, fd_list, NULL, NULL, NULL);
+}
+
+static void dbus_scanout_dmabuf(DisplayChangeListener *dcl,
+ QemuDmaBuf *dmabuf)
+{
+ DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl);
+
+ if (ddl->scanout_dmabuf_v2_proxy) {
+ dbus_scanout_dmabuf_v2(ddl, dmabuf);
+ } else {
+ if (qemu_dmabuf_get_num_planes(dmabuf) > 1) {
+ g_debug("org.qemu.Display1.Listener.ScanoutDMABUF "
+ "does not support mutli plane");
+ return;
+ }
+ dbus_scanout_dmabuf_v1(ddl, dmabuf);
+ }
+}
#endif /* GBM */
#endif /* OPENGL */
@@ -320,13 +417,13 @@ static bool dbus_scanout_map(DBusDisplayListener *ddl)
return true;
}
- if (!ddl->can_share_map || !ddl->ds->handle) {
+ if (!ddl->can_share_map || !ddl->ds->share_handle) {
return false;
}
success = DuplicateHandle(
GetCurrentProcess(),
- ddl->ds->handle,
+ ddl->ds->share_handle,
ddl->peer_process,
&target_handle,
FILE_MAP_READ | SECTION_QUERY,
@@ -338,12 +435,12 @@ static bool dbus_scanout_map(DBusDisplayListener *ddl)
return false;
}
- ddl_discard_pending_messages(ddl);
+ ddl_discard_display_messages(ddl);
if (!qemu_dbus_display1_listener_win32_map_call_scanout_map_sync(
ddl->map_proxy,
GPOINTER_TO_UINT(target_handle),
- ddl->ds->handle_offset,
+ ddl->ds->share_handle_offset,
surface_width(ddl->ds),
surface_height(ddl->ds),
surface_stride(ddl->ds),
@@ -401,7 +498,7 @@ dbus_scanout_share_d3d_texture(
return false;
}
- ddl_discard_pending_messages(ddl);
+ ddl_discard_display_messages(ddl);
qemu_dbus_display1_listener_win32_d3d11_call_scanout_texture2d(
ddl->d3d11_proxy,
@@ -427,6 +524,51 @@ dbus_scanout_share_d3d_texture(
return true;
}
#endif /* CONFIG_OPENGL */
+#else /* !WIN32 */
+static bool dbus_scanout_map(DBusDisplayListener *ddl)
+{
+ g_autoptr(GError) err = NULL;
+ g_autoptr(GUnixFDList) fd_list = NULL;
+
+ if (ddl->ds_share == SHARE_KIND_MAPPED) {
+ return true;
+ }
+
+ if (!ddl->can_share_map || ddl->ds->share_handle == SHAREABLE_NONE) {
+ return false;
+ }
+
+ ddl_discard_display_messages(ddl);
+ fd_list = g_unix_fd_list_new();
+ if (g_unix_fd_list_append(fd_list, ddl->ds->share_handle, &err) != 0) {
+ g_debug("Failed to setup scanout map fdlist: %s", err->message);
+ ddl->can_share_map = false;
+ return false;
+ }
+
+ if (!qemu_dbus_display1_listener_unix_map_call_scanout_map_sync(
+ ddl->map_proxy,
+ g_variant_new_handle(0),
+ ddl->ds->share_handle_offset,
+ surface_width(ddl->ds),
+ surface_height(ddl->ds),
+ surface_stride(ddl->ds),
+ surface_format(ddl->ds),
+ G_DBUS_CALL_FLAGS_NONE,
+ DBUS_DEFAULT_TIMEOUT,
+ fd_list,
+ NULL,
+ NULL,
+ &err)) {
+ g_debug("Failed to call ScanoutMap: %s", err->message);
+ ddl->can_share_map = false;
+ return false;
+ }
+
+ ddl->ds_share = SHARE_KIND_MAPPED;
+
+ return true;
+}
#endif /* WIN32 */
#ifdef CONFIG_OPENGL
@@ -443,19 +585,18 @@ static void dbus_scanout_texture(DisplayChangeListener *dcl,
backing_width, backing_height, x, y, w, h);
#ifdef CONFIG_GBM
g_autoptr(QemuDmaBuf) dmabuf = NULL;
- int fd;
- uint32_t stride, fourcc;
+ int fd[DMABUF_MAX_PLANES], num_planes;
+ uint32_t offset[DMABUF_MAX_PLANES], stride[DMABUF_MAX_PLANES], fourcc;
uint64_t modifier;
assert(tex_id);
- fd = egl_get_fd_for_texture(tex_id, (EGLint *)&stride, (EGLint *)&fourcc,
- &modifier);
- if (fd < 0) {
- error_report("%s: failed to get fd for texture", __func__);
+ if (!egl_dmabuf_export_texture(tex_id, fd, (EGLint *)offset, (EGLint *)stride,
+ (EGLint *)&fourcc, &num_planes, &modifier)) {
+ error_report("%s: failed to export dmabuf for texture", __func__);
return;
}
- dmabuf = qemu_dmabuf_new(w, h, stride, x, y, backing_width,
- backing_height, fourcc, modifier, fd,
+ dmabuf = qemu_dmabuf_new(w, h, offset, stride, x, y, backing_width,
+ backing_height, fourcc, modifier, fd, num_planes,
false, backing_y_0_top);
dbus_scanout_dmabuf(dcl, dmabuf);
@@ -497,6 +638,8 @@ static void dbus_cursor_dmabuf(DisplayChangeListener *dcl,
return;
}
+ ddl_discard_cursor_messages(ddl);
+
egl_dmabuf_import_texture(dmabuf);
texture = qemu_dmabuf_get_texture(dmabuf);
if (!texture) {
@@ -659,7 +802,7 @@ static void ddl_scanout(DBusDisplayListener *ddl)
surface_stride(ddl->ds) * surface_height(ddl->ds), TRUE,
(GDestroyNotify)pixman_image_unref, pixman_image_ref(ddl->ds->image));
- ddl_discard_pending_messages(ddl);
+ ddl_discard_display_messages(ddl);
qemu_dbus_display1_listener_call_scanout(
ddl->proxy, surface_width(ddl->ds), surface_height(ddl->ds),
@@ -677,16 +820,22 @@ static void dbus_gfx_update(DisplayChangeListener *dcl,
trace_dbus_update(x, y, w, h);
-#ifdef WIN32
if (dbus_scanout_map(ddl)) {
+#ifdef WIN32
qemu_dbus_display1_listener_win32_map_call_update_map(
ddl->map_proxy,
x, y, w, h,
G_DBUS_CALL_FLAGS_NONE,
DBUS_DEFAULT_TIMEOUT, NULL, NULL, NULL);
+#else
+ qemu_dbus_display1_listener_unix_map_call_update_map(
+ ddl->map_proxy,
+ x, y, w, h,
+ G_DBUS_CALL_FLAGS_NONE,
+ DBUS_DEFAULT_TIMEOUT, NULL, NULL, NULL);
+#endif
return;
}
-#endif
if (x == 0 && y == 0 && w == surface_width(ddl->ds) && h == surface_height(ddl->ds)) {
return ddl_scanout(ddl);
@@ -740,6 +889,8 @@ static void dbus_cursor_define(DisplayChangeListener *dcl,
DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl);
GVariant *v_data = NULL;
+ ddl_discard_cursor_messages(ddl);
+
v_data = g_variant_new_from_data(
G_VARIANT_TYPE("ay"),
c->data,
@@ -812,6 +963,8 @@ dbus_display_listener_dispose(GObject *object)
#ifdef CONFIG_OPENGL
egl_fb_destroy(&ddl->fb);
#endif
+#else /* !WIN32 */
+ g_clear_object(&ddl->scanout_dmabuf_v2_proxy);
#endif
G_OBJECT_CLASS(dbus_display_listener_parent_class)->dispose(object);
@@ -861,7 +1014,6 @@ dbus_display_listener_get_console(DBusDisplayListener *ddl)
return ddl->console;
}
-#ifdef WIN32
static bool
dbus_display_listener_implements(DBusDisplayListener *ddl, const char *iface)
{
@@ -876,6 +1028,7 @@ dbus_display_listener_implements(DBusDisplayListener *ddl, const char *iface)
return implements;
}
+#ifdef WIN32
static bool
dbus_display_listener_setup_peer_process(DBusDisplayListener *ddl)
{
@@ -958,10 +1111,11 @@ dbus_display_listener_setup_d3d11(DBusDisplayListener *ddl)
static void
dbus_display_listener_setup_shared_map(DBusDisplayListener *ddl)
{
-#ifdef WIN32
g_autoptr(GError) err = NULL;
- if (!dbus_display_listener_implements(ddl, "org.qemu.Display1.Listener.Win32.Map")) {
+#ifdef WIN32
+ if (!dbus_display_listener_implements(
+ ddl, "org.qemu.Display1.Listener.Win32.Map")) {
return;
}
@@ -982,6 +1136,40 @@ dbus_display_listener_setup_shared_map(DBusDisplayListener *ddl)
}
ddl->can_share_map = true;
+#else /* !WIN32 */
+ if (!dbus_display_listener_implements(
+ ddl, "org.qemu.Display1.Listener.Unix.Map")) {
+ return;
+ }
+ ddl->map_proxy = qemu_dbus_display1_listener_unix_map_proxy_new_sync(
+ ddl->conn, G_DBUS_PROXY_FLAGS_DO_NOT_AUTO_START, NULL,
+ "/org/qemu/Display1/Listener", NULL, &err);
+ if (!ddl->map_proxy) {
+ g_debug("Failed to setup Unix map proxy: %s", err->message);
+ return;
+ }
+
+ ddl->can_share_map = true;
+#endif
+}
+
+static void dbus_display_listener_setup_scanout_dmabuf_v2(DBusDisplayListener *ddl)
+{
+#ifndef WIN32
+ g_autoptr(GError) err = NULL;
+
+ if (!dbus_display_listener_implements(
+ ddl, "org.qemu.Display1.Listener.Unix.ScanoutDMABUF2")) {
+ return;
+ }
+ ddl->scanout_dmabuf_v2_proxy =
+ qemu_dbus_display1_listener_unix_scanout_dmabuf2_proxy_new_sync(
+ ddl->conn, G_DBUS_PROXY_FLAGS_DO_NOT_AUTO_START, NULL,
+ "/org/qemu/Display1/Listener", NULL, &err);
+ if (!ddl->scanout_dmabuf_v2_proxy) {
+ g_debug("Failed to setup Unix scanout dmabuf v2 proxy: %s", err->message);
+ return;
+ }
#endif
}
@@ -992,16 +1180,50 @@ dbus_filter(GDBusConnection *connection,
gpointer user_data)
{
DBusDisplayListener *ddl = DBUS_DISPLAY_LISTENER(user_data);
- guint32 serial;
+ guint32 serial, discard_serial;
if (incoming) {
return message;
}
serial = g_dbus_message_get_serial(message);
- if (serial <= ddl->out_serial_to_discard) {
- trace_dbus_filter(serial, ddl->out_serial_to_discard);
- return NULL;
+
+ discard_serial = g_atomic_int_get(&ddl->display_serial_to_discard);
+ if (serial <= discard_serial) {
+ const char *member = g_dbus_message_get_member(message);
+ static const char *const display_messages[] = {
+ "Scanout",
+ "Update",
+#ifdef CONFIG_GBM
+ "ScanoutDMABUF",
+ "UpdateDMABUF",
+#endif
+ "ScanoutMap",
+ "UpdateMap",
+ "Disable",
+ NULL,
+ };
+
+ if (g_strv_contains(display_messages, member)) {
+ trace_dbus_filter(serial, discard_serial);
+ g_object_unref(message);
+ return NULL;
+ }
+ }
+
+ discard_serial = g_atomic_int_get(&ddl->cursor_serial_to_discard);
+ if (serial <= discard_serial) {
+ const gchar *member = g_dbus_message_get_member(message);
+ static const char *const cursor_messages[] = {
+ "CursorDefine",
+ NULL
+ };
+
+ if (g_strv_contains(cursor_messages, member)) {
+ trace_dbus_filter(serial, discard_serial);
+ g_object_unref(message);
+ return NULL;
+ }
}
return message;
@@ -1037,7 +1259,9 @@ dbus_display_listener_new(const char *bus_name,
ddl->console = console;
dbus_display_listener_setup_shared_map(ddl);
+ trace_dbus_can_share_map(ddl->can_share_map);
dbus_display_listener_setup_d3d11(ddl);
+ dbus_display_listener_setup_scanout_dmabuf_v2(ddl);
con = qemu_console_lookup_by_index(dbus_display_console_get_index(console));
assert(con);
diff --git a/ui/dbus.c b/ui/dbus.c
index e08b5de..dd03367 100644
--- a/ui/dbus.c
+++ b/ui/dbus.c
@@ -28,7 +28,7 @@
#include "qemu/main-loop.h"
#include "qemu/option.h"
#include "qom/object_interfaces.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "ui/dbus-module.h"
#ifdef CONFIG_OPENGL
#include "ui/egl-helpers.h"
@@ -176,7 +176,7 @@ dbus_display_add_console(DBusDisplay *dd, int idx, Error **errp)
assert(con);
if (qemu_console_is_graphic(con) &&
- dd->gl_mode != DISPLAYGL_MODE_OFF) {
+ dd->gl_mode != DISPLAY_GL_MODE_OFF) {
qemu_console_set_display_gl_ctx(con, &dd->glctx);
}
@@ -317,11 +317,17 @@ dbus_display_add_client(int csock, Error **errp)
conn = g_socket_connection_factory_create_connection(socket);
dbus_display->add_client_cancellable = g_cancellable_new();
+ GDBusConnectionFlags flags =
+ G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER |
+ G_DBUS_CONNECTION_FLAGS_DELAY_MESSAGE_PROCESSING;
+
+#ifdef WIN32
+ flags |= G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_ALLOW_ANONYMOUS;
+#endif
g_dbus_connection_new(G_IO_STREAM(conn),
guid,
- G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER |
- G_DBUS_CONNECTION_FLAGS_DELAY_MESSAGE_PROCESSING,
+ flags,
NULL,
dbus_display->add_client_cancellable,
dbus_display_add_client_ready,
@@ -398,7 +404,7 @@ set_gl_mode(Object *o, int val, Error **errp)
}
static void
-dbus_display_class_init(ObjectClass *oc, void *data)
+dbus_display_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
@@ -447,7 +453,7 @@ dbus_vc_parse(QemuOpts *opts, ChardevBackend *backend,
}
static void
-dbus_vc_class_init(ObjectClass *oc, void *data)
+dbus_vc_class_init(ObjectClass *oc, const void *data)
{
DBusVCClass *klass = DBUS_VC_CLASS(oc);
ChardevClass *cc = CHARDEV_CLASS(oc);
@@ -466,9 +472,9 @@ static const TypeInfo dbus_vc_type_info = {
static void
early_dbus_init(DisplayOptions *opts)
{
- DisplayGLMode mode = opts->has_gl ? opts->gl : DISPLAYGL_MODE_OFF;
+ DisplayGLMode mode = opts->has_gl ? opts->gl : DISPLAY_GL_MODE_OFF;
- if (mode != DISPLAYGL_MODE_OFF) {
+ if (mode != DISPLAY_GL_MODE_OFF) {
#ifdef CONFIG_OPENGL
egl_init(opts->u.dbus.rendernode, mode, &error_fatal);
#else
@@ -476,13 +482,13 @@ early_dbus_init(DisplayOptions *opts)
#endif
}
- type_register(&dbus_vc_type_info);
+ type_register_static(&dbus_vc_type_info);
}
static void
dbus_init(DisplayState *ds, DisplayOptions *opts)
{
- DisplayGLMode mode = opts->has_gl ? opts->gl : DISPLAYGL_MODE_OFF;
+ DisplayGLMode mode = opts->has_gl ? opts->gl : DISPLAY_GL_MODE_OFF;
if (opts->u.dbus.addr && opts->u.dbus.p2p) {
error_report("dbus: can't accept both addr=X and p2p=yes options");
@@ -508,7 +514,7 @@ static const TypeInfo dbus_display_info = {
.instance_init = dbus_display_init,
.instance_finalize = dbus_display_finalize,
.class_init = dbus_display_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/ui/dmabuf.c b/ui/dmabuf.c
index df7a097..7433a26 100644
--- a/ui/dmabuf.c
+++ b/ui/dmabuf.c
@@ -11,10 +11,12 @@
#include "ui/dmabuf.h"
struct QemuDmaBuf {
- int fd;
+ int fd[DMABUF_MAX_PLANES];
uint32_t width;
uint32_t height;
- uint32_t stride;
+ uint32_t offset[DMABUF_MAX_PLANES];
+ uint32_t stride[DMABUF_MAX_PLANES];
+ uint32_t num_planes;
uint32_t fourcc;
uint64_t modifier;
uint32_t texture;
@@ -30,28 +32,33 @@ struct QemuDmaBuf {
};
QemuDmaBuf *qemu_dmabuf_new(uint32_t width, uint32_t height,
- uint32_t stride, uint32_t x,
- uint32_t y, uint32_t backing_width,
- uint32_t backing_height, uint32_t fourcc,
- uint64_t modifier, int32_t dmabuf_fd,
+ const uint32_t *offset, const uint32_t *stride,
+ uint32_t x, uint32_t y,
+ uint32_t backing_width, uint32_t backing_height,
+ uint32_t fourcc, uint64_t modifier,
+ const int32_t *dmabuf_fd, uint32_t num_planes,
bool allow_fences, bool y0_top) {
QemuDmaBuf *dmabuf;
+ assert(num_planes > 0 && num_planes <= DMABUF_MAX_PLANES);
+
dmabuf = g_new0(QemuDmaBuf, 1);
dmabuf->width = width;
dmabuf->height = height;
- dmabuf->stride = stride;
+ memcpy(dmabuf->offset, offset, num_planes * sizeof(*offset));
+ memcpy(dmabuf->stride, stride, num_planes * sizeof(*stride));
dmabuf->x = x;
dmabuf->y = y;
dmabuf->backing_width = backing_width;
dmabuf->backing_height = backing_height;
dmabuf->fourcc = fourcc;
dmabuf->modifier = modifier;
- dmabuf->fd = dmabuf_fd;
+ memcpy(dmabuf->fd, dmabuf_fd, num_planes * sizeof(*dmabuf_fd));
dmabuf->allow_fences = allow_fences;
dmabuf->y0_top = y0_top;
dmabuf->fence_fd = -1;
+ dmabuf->num_planes = num_planes;
return dmabuf;
}
@@ -65,31 +72,40 @@ void qemu_dmabuf_free(QemuDmaBuf *dmabuf)
g_free(dmabuf);
}
-int qemu_dmabuf_get_fd(QemuDmaBuf *dmabuf)
+const int *qemu_dmabuf_get_fds(QemuDmaBuf *dmabuf, int *nfds)
{
assert(dmabuf != NULL);
+ if (nfds) {
+ *nfds = ARRAY_SIZE(dmabuf->fd);
+ }
+
return dmabuf->fd;
}
-int qemu_dmabuf_dup_fd(QemuDmaBuf *dmabuf)
+void qemu_dmabuf_dup_fds(QemuDmaBuf *dmabuf, int *fds, int nfds)
{
+ int i;
+
assert(dmabuf != NULL);
+ assert(nfds >= dmabuf->num_planes);
- if (dmabuf->fd >= 0) {
- return dup(dmabuf->fd);
- } else {
- return -1;
+ for (i = 0; i < dmabuf->num_planes; i++) {
+ fds[i] = dmabuf->fd[i] >= 0 ? dup(dmabuf->fd[i]) : -1;
}
}
void qemu_dmabuf_close(QemuDmaBuf *dmabuf)
{
+ int i;
+
assert(dmabuf != NULL);
- if (dmabuf->fd >= 0) {
- close(dmabuf->fd);
- dmabuf->fd = -1;
+ for (i = 0; i < dmabuf->num_planes; i++) {
+ if (dmabuf->fd[i] >= 0) {
+ close(dmabuf->fd[i]);
+ dmabuf->fd[i] = -1;
+ }
}
}
@@ -107,13 +123,35 @@ uint32_t qemu_dmabuf_get_height(QemuDmaBuf *dmabuf)
return dmabuf->height;
}
-uint32_t qemu_dmabuf_get_stride(QemuDmaBuf *dmabuf)
+const uint32_t *qemu_dmabuf_get_offsets(QemuDmaBuf *dmabuf, int *noffsets)
+{
+ assert(dmabuf != NULL);
+
+ if (noffsets) {
+ *noffsets = ARRAY_SIZE(dmabuf->offset);
+ }
+
+ return dmabuf->offset;
+}
+
+const uint32_t *qemu_dmabuf_get_strides(QemuDmaBuf *dmabuf, int *nstrides)
{
assert(dmabuf != NULL);
+ if (nstrides) {
+ *nstrides = ARRAY_SIZE(dmabuf->stride);
+ }
+
return dmabuf->stride;
}
+uint32_t qemu_dmabuf_get_num_planes(QemuDmaBuf *dmabuf)
+{
+ assert(dmabuf != NULL);
+
+ return dmabuf->num_planes;
+}
+
uint32_t qemu_dmabuf_get_fourcc(QemuDmaBuf *dmabuf)
{
assert(dmabuf != NULL);
@@ -221,9 +259,3 @@ void qemu_dmabuf_set_draw_submitted(QemuDmaBuf *dmabuf, bool draw_submitted)
assert(dmabuf != NULL);
dmabuf->draw_submitted = draw_submitted;
}
-
-void qemu_dmabuf_set_fd(QemuDmaBuf *dmabuf, int32_t fd)
-{
- assert(dmabuf != NULL);
- dmabuf->fd = fd;
-}
diff --git a/ui/egl-context.c b/ui/egl-context.c
index 9e0df46..aed3e3b 100644
--- a/ui/egl-context.c
+++ b/ui/egl-context.c
@@ -17,7 +17,7 @@ QEMUGLContext qemu_egl_create_context(DisplayGLCtx *dgc,
EGL_CONTEXT_MINOR_VERSION_KHR, params->minor_ver,
EGL_NONE
};
- bool gles = (qemu_egl_mode == DISPLAYGL_MODE_ES);
+ bool gles = (qemu_egl_mode == DISPLAY_GL_MODE_ES);
ctx = eglCreateContext(qemu_egl_display, qemu_egl_config,
eglGetCurrentContext(),
diff --git a/ui/egl-headless.c b/ui/egl-headless.c
index 6187249..1f6b845 100644
--- a/ui/egl-headless.c
+++ b/ui/egl-headless.c
@@ -207,7 +207,7 @@ static const DisplayGLCtxOps eglctx_ops = {
static void early_egl_headless_init(DisplayOptions *opts)
{
- DisplayGLMode mode = DISPLAYGL_MODE_ON;
+ DisplayGLMode mode = DISPLAY_GL_MODE_ON;
if (opts->has_gl) {
mode = opts->gl;
diff --git a/ui/egl-helpers.c b/ui/egl-helpers.c
index 99b2ebbe..5503a79 100644
--- a/ui/egl-helpers.c
+++ b/ui/egl-helpers.c
@@ -20,9 +20,10 @@
#include "qemu/error-report.h"
#include "ui/console.h"
#include "ui/egl-helpers.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qapi/error.h"
#include "trace.h"
+#include "standard-headers/drm/drm_fourcc.h"
EGLDisplay *qemu_egl_display;
EGLConfig qemu_egl_config;
@@ -92,14 +93,18 @@ void egl_fb_destroy(egl_fb *fb)
fb->width = 0;
fb->height = 0;
+ fb->x = 0;
+ fb->y = 0;
fb->texture = 0;
fb->framebuffer = 0;
}
-void egl_fb_setup_default(egl_fb *fb, int width, int height)
+void egl_fb_setup_default(egl_fb *fb, int width, int height, int x, int y)
{
fb->width = width;
fb->height = height;
+ fb->x = x;
+ fb->y = y;
fb->framebuffer = 0; /* default framebuffer */
}
@@ -144,6 +149,7 @@ void egl_fb_blit(egl_fb *dst, egl_fb *src, bool flip)
glBindFramebuffer(GL_READ_FRAMEBUFFER, src->framebuffer);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, dst->framebuffer);
glViewport(0, 0, dst->width, dst->height);
+ glClear(GL_COLOR_BUFFER_BIT);
if (src->dmabuf) {
x1 = qemu_dmabuf_get_x(src->dmabuf);
@@ -160,7 +166,8 @@ void egl_fb_blit(egl_fb *dst, egl_fb *src, bool flip)
x2 = x1 + w;
glBlitFramebuffer(x1, y1, x2, y2,
- 0, 0, dst->width, dst->height,
+ dst->x, dst->y,
+ dst->x + dst->width, dst->y + dst->height,
GL_COLOR_BUFFER_BIT, GL_LINEAR);
}
@@ -257,6 +264,11 @@ int egl_rendernode_init(const char *rendernode, DisplayGLMode mode)
error_report("egl: EGL_MESA_image_dma_buf_export not supported");
goto err;
}
+ if (!epoxy_has_egl_extension(qemu_egl_display,
+ "EGL_EXT_image_dma_buf_import_modifiers")) {
+ error_report("egl: EGL_EXT_image_dma_buf_import_modifiers not supported");
+ goto err;
+ }
qemu_egl_rn_ctx = qemu_egl_init_ctx();
if (!qemu_egl_rn_ctx) {
@@ -277,44 +289,86 @@ err:
return -1;
}
-int egl_get_fd_for_texture(uint32_t tex_id, EGLint *stride, EGLint *fourcc,
- EGLuint64KHR *modifier)
+bool egl_dmabuf_export_texture(uint32_t tex_id, int *fd, EGLint *offset,
+ EGLint *stride, EGLint *fourcc, int *num_planes,
+ EGLuint64KHR *modifier)
{
EGLImageKHR image;
- EGLint num_planes, fd;
+ EGLuint64KHR modifiers[DMABUF_MAX_PLANES];
image = eglCreateImageKHR(qemu_egl_display, eglGetCurrentContext(),
EGL_GL_TEXTURE_2D_KHR,
(EGLClientBuffer)(unsigned long)tex_id,
NULL);
if (!image) {
- return -1;
+ return false;
}
eglExportDMABUFImageQueryMESA(qemu_egl_display, image, fourcc,
- &num_planes, modifier);
- if (num_planes != 1) {
- eglDestroyImageKHR(qemu_egl_display, image);
- return -1;
- }
- eglExportDMABUFImageMESA(qemu_egl_display, image, &fd, stride, NULL);
+ num_planes, modifiers);
+ eglExportDMABUFImageMESA(qemu_egl_display, image, fd, stride, offset);
eglDestroyImageKHR(qemu_egl_display, image);
- return fd;
+ /* Only first modifier matters. */
+ if (modifier) {
+ *modifier = modifiers[0];
+ }
+
+ return true;
}
void egl_dmabuf_import_texture(QemuDmaBuf *dmabuf)
{
EGLImageKHR image = EGL_NO_IMAGE_KHR;
EGLint attrs[64];
- int i = 0;
- uint64_t modifier;
+ int i = 0, j;
+ uint64_t modifier = qemu_dmabuf_get_modifier(dmabuf);
uint32_t texture = qemu_dmabuf_get_texture(dmabuf);
+ int nfds, noffsets, nstrides;
+ const int *fds = qemu_dmabuf_get_fds(dmabuf, &nfds);
+ const uint32_t *offsets = qemu_dmabuf_get_offsets(dmabuf, &noffsets);
+ const uint32_t *strides = qemu_dmabuf_get_strides(dmabuf, &nstrides);
+ uint32_t num_planes = qemu_dmabuf_get_num_planes(dmabuf);
+
+ EGLint fd_attrs[] = {
+ EGL_DMA_BUF_PLANE0_FD_EXT,
+ EGL_DMA_BUF_PLANE1_FD_EXT,
+ EGL_DMA_BUF_PLANE2_FD_EXT,
+ EGL_DMA_BUF_PLANE3_FD_EXT,
+ };
+ EGLint offset_attrs[] = {
+ EGL_DMA_BUF_PLANE0_OFFSET_EXT,
+ EGL_DMA_BUF_PLANE1_OFFSET_EXT,
+ EGL_DMA_BUF_PLANE2_OFFSET_EXT,
+ EGL_DMA_BUF_PLANE3_OFFSET_EXT,
+ };
+ EGLint stride_attrs[] = {
+ EGL_DMA_BUF_PLANE0_PITCH_EXT,
+ EGL_DMA_BUF_PLANE1_PITCH_EXT,
+ EGL_DMA_BUF_PLANE2_PITCH_EXT,
+ EGL_DMA_BUF_PLANE3_PITCH_EXT,
+ };
+ EGLint modifier_lo_attrs[] = {
+ EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT,
+ EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT,
+ EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT,
+ EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT,
+ };
+ EGLint modifier_hi_attrs[] = {
+ EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT,
+ EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT,
+ EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT,
+ EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT,
+ };
if (texture != 0) {
return;
}
+ assert(nfds >= num_planes);
+ assert(noffsets >= num_planes);
+ assert(nstrides >= num_planes);
+
attrs[i++] = EGL_WIDTH;
attrs[i++] = qemu_dmabuf_get_backing_width(dmabuf);
attrs[i++] = EGL_HEIGHT;
@@ -322,21 +376,22 @@ void egl_dmabuf_import_texture(QemuDmaBuf *dmabuf)
attrs[i++] = EGL_LINUX_DRM_FOURCC_EXT;
attrs[i++] = qemu_dmabuf_get_fourcc(dmabuf);
- attrs[i++] = EGL_DMA_BUF_PLANE0_FD_EXT;
- attrs[i++] = qemu_dmabuf_get_fd(dmabuf);
- attrs[i++] = EGL_DMA_BUF_PLANE0_PITCH_EXT;
- attrs[i++] = qemu_dmabuf_get_stride(dmabuf);
- attrs[i++] = EGL_DMA_BUF_PLANE0_OFFSET_EXT;
- attrs[i++] = 0;
-#ifdef EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT
- modifier = qemu_dmabuf_get_modifier(dmabuf);
- if (modifier) {
- attrs[i++] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT;
- attrs[i++] = (modifier >> 0) & 0xffffffff;
- attrs[i++] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT;
- attrs[i++] = (modifier >> 32) & 0xffffffff;
+ for (j = 0; j < num_planes; j++) {
+ attrs[i++] = fd_attrs[j];
+ /* fd[1-3] may be -1 if using a joint buffer for all planes */
+ attrs[i++] = fds[j] >= 0 ? fds[j] : fds[0];
+ attrs[i++] = stride_attrs[j];
+ attrs[i++] = strides[j];
+ attrs[i++] = offset_attrs[j];
+ attrs[i++] = offsets[j];
+ if (modifier != DRM_FORMAT_MOD_INVALID) {
+ attrs[i++] = modifier_lo_attrs[j];
+ attrs[i++] = (modifier >> 0) & 0xffffffff;
+ attrs[i++] = modifier_hi_attrs[j];
+ attrs[i++] = (modifier >> 32) & 0xffffffff;
+ }
}
-#endif
+
attrs[i++] = EGL_NONE;
image = eglCreateImageKHR(qemu_egl_display,
@@ -503,7 +558,7 @@ static int qemu_egl_init_dpy(EGLNativeDisplayType dpy,
EGLint major, minor;
EGLBoolean b;
EGLint n;
- bool gles = (mode == DISPLAYGL_MODE_ES);
+ bool gles = (mode == DISPLAY_GL_MODE_ES);
qemu_egl_display = qemu_egl_get_display(dpy, platform);
if (qemu_egl_display == EGL_NO_DISPLAY) {
@@ -533,7 +588,7 @@ static int qemu_egl_init_dpy(EGLNativeDisplayType dpy,
return -1;
}
- qemu_egl_mode = gles ? DISPLAYGL_MODE_ES : DISPLAYGL_MODE_CORE;
+ qemu_egl_mode = gles ? DISPLAY_GL_MODE_ES : DISPLAY_GL_MODE_CORE;
return 0;
}
@@ -564,8 +619,8 @@ int qemu_egl_init_dpy_mesa(EGLNativeDisplayType dpy, DisplayGLMode mode)
int qemu_egl_init_dpy_win32(EGLNativeDisplayType dpy, DisplayGLMode mode)
{
/* prefer GL ES, as that's what ANGLE supports */
- if (mode == DISPLAYGL_MODE_ON) {
- mode = DISPLAYGL_MODE_ES;
+ if (mode == DISPLAY_GL_MODE_ON) {
+ mode = DISPLAY_GL_MODE_ES;
}
if (qemu_egl_init_dpy(dpy, 0, mode) < 0) {
@@ -618,7 +673,7 @@ EGLContext qemu_egl_init_ctx(void)
EGL_CONTEXT_CLIENT_VERSION, 2,
EGL_NONE
};
- bool gles = (qemu_egl_mode == DISPLAYGL_MODE_ES);
+ bool gles = (qemu_egl_mode == DISPLAY_GL_MODE_ES);
EGLContext ectx;
EGLBoolean b;
@@ -642,7 +697,7 @@ bool egl_init(const char *rendernode, DisplayGLMode mode, Error **errp)
{
ERRP_GUARD();
- if (mode == DISPLAYGL_MODE_OFF) {
+ if (mode == DISPLAY_GL_MODE_OFF) {
error_setg(errp, "egl: turning off GL doesn't make sense");
return false;
}
diff --git a/ui/gtk-clipboard.c b/ui/gtk-clipboard.c
index 8d8a636..65d89ec 100644
--- a/ui/gtk-clipboard.c
+++ b/ui/gtk-clipboard.c
@@ -19,6 +19,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "ui/gtk.h"
@@ -95,11 +96,13 @@ static void gd_clipboard_update_info(GtkDisplayState *gd,
gtk_clipboard_clear(gd->gtkcb[s]);
if (targets) {
gd->cbowner[s] = true;
- gtk_clipboard_set_with_data(gd->gtkcb[s],
- targets, n_targets,
- gd_clipboard_get_data,
- gd_clipboard_clear,
- gd);
+ if (!gtk_clipboard_set_with_data(gd->gtkcb[s],
+ targets, n_targets,
+ gd_clipboard_get_data,
+ gd_clipboard_clear,
+ gd)) {
+ warn_report("Failed to set GTK clipboard");
+ }
gtk_target_table_free(targets, n_targets);
}
diff --git a/ui/gtk-egl.c b/ui/gtk-egl.c
index 9831c10..0b787be 100644
--- a/ui/gtk-egl.c
+++ b/ui/gtk-egl.c
@@ -22,7 +22,7 @@
#include "ui/egl-helpers.h"
#include "ui/shader.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
static void gtk_egl_set_scanout_mode(VirtualConsole *vc, bool scanout)
{
@@ -70,16 +70,18 @@ void gd_egl_draw(VirtualConsole *vc)
QemuDmaBuf *dmabuf = vc->gfx.guest_fb.dmabuf;
int fence_fd;
#endif
- int ww, wh, ws;
+ int ww, wh, pw, ph, gs;
if (!vc->gfx.gls) {
return;
}
window = gtk_widget_get_window(vc->gfx.drawing_area);
- ws = gdk_window_get_scale_factor(window);
- ww = gdk_window_get_width(window) * ws;
- wh = gdk_window_get_height(window) * ws;
+ gs = gdk_window_get_scale_factor(window);
+ ww = gdk_window_get_width(window);
+ wh = gdk_window_get_height(window);
+ pw = ww * gs;
+ ph = wh * gs;
if (vc->gfx.scanout_mode) {
#ifdef CONFIG_GBM
@@ -93,8 +95,9 @@ void gd_egl_draw(VirtualConsole *vc)
#endif
gd_egl_scanout_flush(&vc->gfx.dcl, 0, 0, vc->gfx.w, vc->gfx.h);
- vc->gfx.scale_x = (double)ww / surface_width(vc->gfx.ds);
- vc->gfx.scale_y = (double)wh / surface_height(vc->gfx.ds);
+ gd_update_scale(vc, ww, wh,
+ surface_width(vc->gfx.ds),
+ surface_height(vc->gfx.ds));
glFlush();
#ifdef CONFIG_GBM
@@ -115,13 +118,14 @@ void gd_egl_draw(VirtualConsole *vc)
eglMakeCurrent(qemu_egl_display, vc->gfx.esurface,
vc->gfx.esurface, vc->gfx.ectx);
- surface_gl_setup_viewport(vc->gfx.gls, vc->gfx.ds, ww, wh);
+ surface_gl_setup_viewport(vc->gfx.gls, vc->gfx.ds, pw, ph);
surface_gl_render_texture(vc->gfx.gls, vc->gfx.ds);
eglSwapBuffers(qemu_egl_display, vc->gfx.esurface);
- vc->gfx.scale_x = (double)ww / surface_width(vc->gfx.ds);
- vc->gfx.scale_y = (double)wh / surface_height(vc->gfx.ds);
+ gd_update_scale(vc, ww, wh,
+ surface_width(vc->gfx.ds),
+ surface_height(vc->gfx.ds));
glFlush();
}
@@ -336,7 +340,11 @@ void gd_egl_scanout_flush(DisplayChangeListener *dcl,
{
VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl);
GdkWindow *window;
- int ww, wh, ws;
+ int px_offset, py_offset;
+ int gs;
+ int pw_widget, ph_widget, pw_surface, ph_surface;
+ int ww_widget, wh_widget, ww_surface, wh_surface;
+ int fbw, fbh;
if (!vc->gfx.scanout_mode) {
return;
@@ -349,10 +357,32 @@ void gd_egl_scanout_flush(DisplayChangeListener *dcl,
vc->gfx.esurface, vc->gfx.ectx);
window = gtk_widget_get_window(vc->gfx.drawing_area);
- ws = gdk_window_get_scale_factor(window);
- ww = gdk_window_get_width(window) * ws;
- wh = gdk_window_get_height(window) * ws;
- egl_fb_setup_default(&vc->gfx.win_fb, ww, wh);
+ gs = gdk_window_get_scale_factor(window);
+ ww_widget = gdk_window_get_width(window);
+ wh_widget = gdk_window_get_height(window);
+ fbw = surface_width(vc->gfx.ds);
+ fbh = surface_height(vc->gfx.ds);
+
+ gd_update_scale(vc, ww_widget, wh_widget, fbw, fbh);
+
+ ww_surface = fbw * vc->gfx.scale_x;
+ wh_surface = fbh * vc->gfx.scale_y;
+ pw_widget = ww_widget * gs;
+ ph_widget = wh_widget * gs;
+ pw_surface = ww_surface * gs;
+ ph_surface = wh_surface * gs;
+
+ px_offset = 0;
+ py_offset = 0;
+ if (pw_widget > pw_surface) {
+ px_offset = (pw_widget - pw_surface) / 2;
+ }
+ if (ph_widget > ph_surface) {
+ py_offset = (ph_widget - ph_surface) / 2;
+ }
+
+ egl_fb_setup_default(&vc->gfx.win_fb, pw_surface, ph_surface,
+ px_offset, py_offset);
if (vc->gfx.cursor_fb.texture) {
egl_texture_blit(vc->gfx.gls, &vc->gfx.win_fb, &vc->gfx.guest_fb,
vc->gfx.y0_top);
diff --git a/ui/gtk-gl-area.c b/ui/gtk-gl-area.c
index b628b35..8151cc4 100644
--- a/ui/gtk-gl-area.c
+++ b/ui/gtk-gl-area.c
@@ -16,7 +16,7 @@
#include "ui/gtk.h"
#include "ui/egl-helpers.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
static void gtk_gl_area_set_scanout_mode(VirtualConsole *vc, bool scanout)
{
@@ -42,16 +42,37 @@ void gd_gl_area_draw(VirtualConsole *vc)
#ifdef CONFIG_GBM
QemuDmaBuf *dmabuf = vc->gfx.guest_fb.dmabuf;
#endif
- int ww, wh, ws, y1, y2;
+ int pw, ph, gs, y1, y2;
+ int ww, wh;
+ int ww_surface, wh_surface;
+ int fbw, fbh;
+ int wx_offset, wy_offset;
if (!vc->gfx.gls) {
return;
}
gtk_gl_area_make_current(GTK_GL_AREA(vc->gfx.drawing_area));
- ws = gdk_window_get_scale_factor(gtk_widget_get_window(vc->gfx.drawing_area));
- ww = gtk_widget_get_allocated_width(vc->gfx.drawing_area) * ws;
- wh = gtk_widget_get_allocated_height(vc->gfx.drawing_area) * ws;
+ gs = gdk_window_get_scale_factor(gtk_widget_get_window(vc->gfx.drawing_area));
+ fbw = surface_width(vc->gfx.ds);
+ fbh = surface_height(vc->gfx.ds);
+ ww = gtk_widget_get_allocated_width(vc->gfx.drawing_area);
+ wh = gtk_widget_get_allocated_height(vc->gfx.drawing_area);
+ pw = ww * gs;
+ ph = wh * gs;
+
+ gd_update_scale(vc, ww, wh, fbw, fbh);
+
+ ww_surface = fbw * vc->gfx.scale_x;
+ wh_surface = fbh * vc->gfx.scale_y;
+
+ wx_offset = wy_offset = 0;
+ if (ww > ww_surface) {
+ wx_offset = (ww - ww_surface) / 2;
+ }
+ if (wh > wh_surface) {
+ wy_offset = (wh - wh_surface) / 2;
+ }
if (vc->gfx.scanout_mode) {
if (!vc->gfx.guest_fb.framebuffer) {
@@ -71,11 +92,29 @@ void gd_gl_area_draw(VirtualConsole *vc)
glBindFramebuffer(GL_READ_FRAMEBUFFER, vc->gfx.guest_fb.framebuffer);
/* GtkGLArea sets GL_DRAW_FRAMEBUFFER for us */
- glViewport(0, 0, ww, wh);
+ if (wx_offset > 0) {
+ glEnable(GL_SCISSOR_TEST);
+ glScissor(0, 0, wx_offset * gs, wh * gs);
+ glClear(GL_COLOR_BUFFER_BIT);
+ glScissor((ww - wx_offset) * gs, 0, wx_offset * gs, wh * gs);
+ glClear(GL_COLOR_BUFFER_BIT);
+ glDisable(GL_SCISSOR_TEST);
+ }
+ if (wy_offset > 0) {
+ glEnable(GL_SCISSOR_TEST);
+ glScissor(0, 0, ww * gs, wy_offset * gs);
+ glClear(GL_COLOR_BUFFER_BIT);
+ glScissor(0, (wh - wy_offset) * gs, ww * gs, wy_offset * gs);
+ glClear(GL_COLOR_BUFFER_BIT);
+ glDisable(GL_SCISSOR_TEST);
+ }
+
+ glViewport(0, 0, pw, ph);
y1 = vc->gfx.y0_top ? 0 : vc->gfx.h;
y2 = vc->gfx.y0_top ? vc->gfx.h : 0;
glBlitFramebuffer(0, y1, vc->gfx.w, y2,
- 0, 0, ww, wh,
+ wx_offset * gs, wy_offset * gs,
+ (ww - wx_offset) * gs, (wh - wy_offset) * gs,
GL_COLOR_BUFFER_BIT, GL_NEAREST);
#ifdef CONFIG_GBM
if (dmabuf) {
@@ -101,7 +140,7 @@ void gd_gl_area_draw(VirtualConsole *vc)
}
gtk_gl_area_make_current(GTK_GL_AREA(vc->gfx.drawing_area));
- surface_gl_setup_viewport(vc->gfx.gls, vc->gfx.ds, ww, wh);
+ surface_gl_setup_viewport(vc->gfx.gls, vc->gfx.ds, pw, ph);
surface_gl_render_texture(vc->gfx.gls, vc->gfx.ds);
}
}
diff --git a/ui/gtk.c b/ui/gtk.c
index bc29f7a..8c4a94c 100644
--- a/ui/gtk.c
+++ b/ui/gtk.c
@@ -38,6 +38,7 @@
#include "qemu/cutils.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
+#include "qemu-main.h"
#include "ui/console.h"
#include "ui/gtk.h"
@@ -55,8 +56,8 @@
#include "trace.h"
#include "ui/input.h"
-#include "sysemu/runstate.h"
-#include "sysemu/sysemu.h"
+#include "system/runstate.h"
+#include "system/system.h"
#include "keymaps.h"
#include "chardev/char.h"
#include "qom/object.h"
@@ -386,16 +387,16 @@ static void *gd_win32_get_hwnd(VirtualConsole *vc)
/** DisplayState Callbacks **/
static void gd_update(DisplayChangeListener *dcl,
- int x, int y, int w, int h)
+ int fbx, int fby, int fbw, int fbh)
{
VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl);
GdkWindow *win;
- int x1, x2, y1, y2;
- int mx, my;
- int fbw, fbh;
- int ww, wh;
+ int wx1, wx2, wy1, wy2;
+ int wx_offset, wy_offset;
+ int ww_surface, wh_surface;
+ int ww_widget, wh_widget;
- trace_gd_update(vc->label, x, y, w, h);
+ trace_gd_update(vc->label, fbx, fby, fbw, fbh);
if (!gtk_widget_get_realized(vc->gfx.drawing_area)) {
return;
@@ -404,35 +405,36 @@ static void gd_update(DisplayChangeListener *dcl,
if (vc->gfx.convert) {
pixman_image_composite(PIXMAN_OP_SRC, vc->gfx.ds->image,
NULL, vc->gfx.convert,
- x, y, 0, 0, x, y, w, h);
+ fbx, fby, 0, 0, fbx, fby, fbw, fbh);
}
- x1 = floor(x * vc->gfx.scale_x);
- y1 = floor(y * vc->gfx.scale_y);
+ wx1 = floor(fbx * vc->gfx.scale_x);
+ wy1 = floor(fby * vc->gfx.scale_y);
- x2 = ceil(x * vc->gfx.scale_x + w * vc->gfx.scale_x);
- y2 = ceil(y * vc->gfx.scale_y + h * vc->gfx.scale_y);
+ wx2 = ceil(fbx * vc->gfx.scale_x + fbw * vc->gfx.scale_x);
+ wy2 = ceil(fby * vc->gfx.scale_y + fbh * vc->gfx.scale_y);
- fbw = surface_width(vc->gfx.ds) * vc->gfx.scale_x;
- fbh = surface_height(vc->gfx.ds) * vc->gfx.scale_y;
+ ww_surface = surface_width(vc->gfx.ds) * vc->gfx.scale_x;
+ wh_surface = surface_height(vc->gfx.ds) * vc->gfx.scale_y;
win = gtk_widget_get_window(vc->gfx.drawing_area);
if (!win) {
return;
}
- ww = gdk_window_get_width(win);
- wh = gdk_window_get_height(win);
+ ww_widget = gdk_window_get_width(win);
+ wh_widget = gdk_window_get_height(win);
- mx = my = 0;
- if (ww > fbw) {
- mx = (ww - fbw) / 2;
+ wx_offset = wy_offset = 0;
+ if (ww_widget > ww_surface) {
+ wx_offset = (ww_widget - ww_surface) / 2;
}
- if (wh > fbh) {
- my = (wh - fbh) / 2;
+ if (wh_widget > wh_surface) {
+ wy_offset = (wh_widget - wh_surface) / 2;
}
gtk_widget_queue_draw_area(vc->gfx.drawing_area,
- mx + x1, my + y1, (x2 - x1), (y2 - y1));
+ wx_offset + wx1, wy_offset + wy1,
+ (wx2 - wx1), (wy2 - wy1));
}
static void gd_refresh(DisplayChangeListener *dcl)
@@ -770,8 +772,21 @@ static void gd_resize_event(GtkGLArea *area,
gint width, gint height, gpointer *opaque)
{
VirtualConsole *vc = (void *)opaque;
+ double pw = width, ph = height;
+ double sx = vc->gfx.scale_x, sy = vc->gfx.scale_y;
+ GdkWindow *window = gtk_widget_get_window(GTK_WIDGET(area));
+ const int gs = gdk_window_get_scale_factor(window);
- gd_set_ui_size(vc, width, height);
+ if (!vc->s->free_scale && !vc->s->full_screen) {
+ pw /= sx;
+ ph /= sy;
+ }
+
+ /**
+ * width and height here are in pixel coordinate, so we must divide it
+ * by global window scale (gs)
+ */
+ gd_set_ui_size(vc, pw / gs, ph / gs);
}
#endif
@@ -799,12 +814,95 @@ void gd_update_monitor_refresh_rate(VirtualConsole *vc, GtkWidget *widget)
#endif
}
+void gd_update_scale(VirtualConsole *vc, int ww, int wh, int fbw, int fbh)
+{
+ if (!vc) {
+ return;
+ }
+
+ if (vc->s->full_screen) {
+ vc->gfx.scale_x = (double)ww / fbw;
+ vc->gfx.scale_y = (double)wh / fbh;
+ } else if (vc->s->free_scale) {
+ double sx, sy;
+
+ sx = (double)ww / fbw;
+ sy = (double)wh / fbh;
+
+ vc->gfx.scale_x = vc->gfx.scale_y = MIN(sx, sy);
+ }
+}
+/**
+ * DOC: Coordinate handling.
+ *
+ * We are coping with sizes and positions in various coordinates and the
+ * handling of these coordinates is somewhat confusing. It would benefit us
+ * all if we define these coordinates explicitly and clearly. Besides, it's
+ * also helpful to follow the same naming convention for variables
+ * representing values in different coordinates.
+ *
+ * I. Definitions
+ *
+ * - (guest) buffer coordinate: this is the coordinates that the guest will
+ * see. The x/y offsets and width/height specified in commands sent by
+ * guest is basically in buffer coordinate.
+ *
+ * - (host) pixel coordinate: this is the coordinate in pixel level on the
+ * host destop. A window/widget of width 300 in pixel coordinate means it
+ * occupies 300 pixels horizontally.
+ *
+ * - (host) logical window coordinate: the existence of global scaling
+ * factor in desktop level makes this kind of coordinate play a role. It
+ * always holds that (logical window size) * (global scale factor) =
+ * (pixel size).
+ *
+ * - global scale factor: this is specified in desktop level and is
+ * typically invariant during the life cycle of the process. Users with
+ * high-DPI monitors might set this scale, for example, to 2, in order to
+ * make the UI look larger.
+ *
+ * - zooming scale: this can be freely controlled by the QEMU user to zoom
+ * in/out the guest content.
+ *
+ * II. Representation
+ *
+ * We'd like to use consistent representation for variables in different
+ * coordinates:
+ * - buffer coordinate: prefix fb
+ * - pixel coordinate: prefix p
+ * - logical window coordinate: prefix w
+ *
+ * For scales:
+ * - global scale factor: prefix gs
+ * - zooming scale: prefix scale/s
+ *
+ * Example: fbw, pw, ww for width in different coordinates
+ *
+ * III. Equation
+ *
+ * - fbw * gs * scale_x = pw
+ * - pw = gs * ww
+ *
+ * Consequently we have
+ *
+ * - fbw * scale_x = ww
+ *
+ * Example: assuming we are running QEMU on a 3840x2160 screen and have set
+ * global scaling factor to 2, if the guest buffer size is 1920x1080 and the
+ * zooming scale is 0.5, then we have:
+ * - fbw = 1920, fbh = 1080
+ * - pw = 1920, ph = 1080
+ * - ww = 960, wh = 540
+ * A bonus of this configuration is that we can achieve pixel to pixel
+ * presentation of the guest content.
+ */
+
static gboolean gd_draw_event(GtkWidget *widget, cairo_t *cr, void *opaque)
{
VirtualConsole *vc = opaque;
GtkDisplayState *s = vc->s;
- int mx, my;
- int ww, wh;
+ int wx_offset, wy_offset;
+ int ww_widget, wh_widget, ww_surface, wh_surface;
int fbw, fbh;
#if defined(CONFIG_OPENGL)
@@ -838,46 +936,37 @@ static gboolean gd_draw_event(GtkWidget *widget, cairo_t *cr, void *opaque)
fbw = surface_width(vc->gfx.ds);
fbh = surface_height(vc->gfx.ds);
- ww = gdk_window_get_width(gtk_widget_get_window(widget));
- wh = gdk_window_get_height(gtk_widget_get_window(widget));
-
- if (s->full_screen) {
- vc->gfx.scale_x = (double)ww / fbw;
- vc->gfx.scale_y = (double)wh / fbh;
- } else if (s->free_scale) {
- double sx, sy;
-
- sx = (double)ww / fbw;
- sy = (double)wh / fbh;
+ ww_widget = gdk_window_get_width(gtk_widget_get_window(widget));
+ wh_widget = gdk_window_get_height(gtk_widget_get_window(widget));
- vc->gfx.scale_x = vc->gfx.scale_y = MIN(sx, sy);
- }
+ gd_update_scale(vc, ww_widget, wh_widget, fbw, fbh);
- fbw *= vc->gfx.scale_x;
- fbh *= vc->gfx.scale_y;
+ ww_surface = fbw * vc->gfx.scale_x;
+ wh_surface = fbh * vc->gfx.scale_y;
- mx = my = 0;
- if (ww > fbw) {
- mx = (ww - fbw) / 2;
+ wx_offset = wy_offset = 0;
+ if (ww_widget > ww_surface) {
+ wx_offset = (ww_widget - ww_surface) / 2;
}
- if (wh > fbh) {
- my = (wh - fbh) / 2;
+ if (wh_widget > wh_surface) {
+ wy_offset = (wh_widget - wh_surface) / 2;
}
- cairo_rectangle(cr, 0, 0, ww, wh);
+ cairo_rectangle(cr, 0, 0, ww_widget, wh_widget);
/* Optionally cut out the inner area where the pixmap
will be drawn. This avoids 'flashing' since we're
not double-buffering. Note we're using the undocumented
behaviour of drawing the rectangle from right to left
to cut out the whole */
- cairo_rectangle(cr, mx + fbw, my,
- -1 * fbw, fbh);
+ cairo_rectangle(cr, wx_offset + ww_surface, wy_offset,
+ -1 * ww_surface, wh_surface);
cairo_fill(cr);
cairo_scale(cr, vc->gfx.scale_x, vc->gfx.scale_y);
cairo_set_source_surface(cr, vc->gfx.surface,
- mx / vc->gfx.scale_x, my / vc->gfx.scale_y);
+ wx_offset / vc->gfx.scale_x,
+ wy_offset / vc->gfx.scale_y);
cairo_paint(cr);
return TRUE;
@@ -888,19 +977,19 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion,
{
VirtualConsole *vc = opaque;
GtkDisplayState *s = vc->s;
- int x, y;
- int mx, my;
- int fbh, fbw;
- int ww, wh;
+ int fbx, fby;
+ int wx_offset, wy_offset;
+ int wh_surface, ww_surface;
+ int ww_widget, wh_widget;
if (!vc->gfx.ds) {
return TRUE;
}
- fbw = surface_width(vc->gfx.ds) * vc->gfx.scale_x;
- fbh = surface_height(vc->gfx.ds) * vc->gfx.scale_y;
- ww = gtk_widget_get_allocated_width(widget);
- wh = gtk_widget_get_allocated_height(widget);
+ ww_surface = surface_width(vc->gfx.ds) * vc->gfx.scale_x;
+ wh_surface = surface_height(vc->gfx.ds) * vc->gfx.scale_y;
+ ww_widget = gtk_widget_get_allocated_width(widget);
+ wh_widget = gtk_widget_get_allocated_height(widget);
/*
* `widget` may not have the same size with the frame buffer.
@@ -908,41 +997,42 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion,
* To achieve that, `vc` will be displayed at (mx, my)
* so that it is displayed at the center of the widget.
*/
- mx = my = 0;
- if (ww > fbw) {
- mx = (ww - fbw) / 2;
+ wx_offset = wy_offset = 0;
+ if (ww_widget > ww_surface) {
+ wx_offset = (ww_widget - ww_surface) / 2;
}
- if (wh > fbh) {
- my = (wh - fbh) / 2;
+ if (wh_widget > wh_surface) {
+ wy_offset = (wh_widget - wh_surface) / 2;
}
/*
* `motion` is reported in `widget` coordinates
* so translating it to the coordinates in `vc`.
*/
- x = (motion->x - mx) / vc->gfx.scale_x;
- y = (motion->y - my) / vc->gfx.scale_y;
+ fbx = (motion->x - wx_offset) / vc->gfx.scale_x;
+ fby = (motion->y - wy_offset) / vc->gfx.scale_y;
- trace_gd_motion_event(ww, wh, gtk_widget_get_scale_factor(widget), x, y);
+ trace_gd_motion_event(ww_widget, wh_widget,
+ gtk_widget_get_scale_factor(widget), fbx, fby);
if (qemu_input_is_absolute(vc->gfx.dcl.con)) {
- if (x < 0 || y < 0 ||
- x >= surface_width(vc->gfx.ds) ||
- y >= surface_height(vc->gfx.ds)) {
+ if (fbx < 0 || fby < 0 ||
+ fbx >= surface_width(vc->gfx.ds) ||
+ fby >= surface_height(vc->gfx.ds)) {
return TRUE;
}
- qemu_input_queue_abs(vc->gfx.dcl.con, INPUT_AXIS_X, x,
+ qemu_input_queue_abs(vc->gfx.dcl.con, INPUT_AXIS_X, fbx,
0, surface_width(vc->gfx.ds));
- qemu_input_queue_abs(vc->gfx.dcl.con, INPUT_AXIS_Y, y,
+ qemu_input_queue_abs(vc->gfx.dcl.con, INPUT_AXIS_Y, fby,
0, surface_height(vc->gfx.ds));
qemu_input_event_sync();
} else if (s->last_set && s->ptr_owner == vc) {
- qemu_input_queue_rel(vc->gfx.dcl.con, INPUT_AXIS_X, x - s->last_x);
- qemu_input_queue_rel(vc->gfx.dcl.con, INPUT_AXIS_Y, y - s->last_y);
+ qemu_input_queue_rel(vc->gfx.dcl.con, INPUT_AXIS_X, fbx - s->last_x);
+ qemu_input_queue_rel(vc->gfx.dcl.con, INPUT_AXIS_Y, fby - s->last_y);
qemu_input_event_sync();
}
- s->last_x = x;
- s->last_y = y;
+ s->last_x = fbx;
+ s->last_y = fby;
s->last_set = TRUE;
if (!qemu_input_is_absolute(vc->gfx.dcl.con) && s->ptr_owner == vc) {
@@ -1759,8 +1849,16 @@ static gboolean gd_configure(GtkWidget *widget,
GdkEventConfigure *cfg, gpointer opaque)
{
VirtualConsole *vc = opaque;
+ const double sx = vc->gfx.scale_x, sy = vc->gfx.scale_y;
+ double width = cfg->width, height = cfg->height;
+
+ if (!vc->s->free_scale && !vc->s->full_screen) {
+ width /= sx;
+ height /= sy;
+ }
+
+ gd_set_ui_size(vc, width, height);
- gd_set_ui_size(vc, cfg->width, cfg->height);
return FALSE;
}
@@ -1820,7 +1918,7 @@ static void gd_vc_send_chars(VirtualConsole *vc)
const uint8_t *buf;
uint32_t size;
- buf = fifo8_pop_buf(&vc->vte.out_fifo, MIN(len, avail), &size);
+ buf = fifo8_pop_bufptr(&vc->vte.out_fifo, MIN(len, avail), &size);
qemu_chr_be_write(vc->vte.chr, buf, size);
len = qemu_chr_be_can_write(vc->vte.chr);
avail -= size;
@@ -1878,7 +1976,7 @@ static void gd_vc_open(Chardev *chr,
*be_opened = false;
}
-static void char_gd_vc_class_init(ObjectClass *oc, void *data)
+static void char_gd_vc_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
@@ -1943,8 +2041,7 @@ static GSList *gd_vc_vte_init(GtkDisplayState *s, VirtualConsole *vc,
vcd->console = vc;
snprintf(buffer, sizeof(buffer), "vc%d", idx);
- vc->label = g_strdup_printf("%s", vc->vte.chr->label
- ? vc->vte.chr->label : buffer);
+ vc->label = g_strdup(vc->vte.chr->label ? : buffer);
group = gd_vc_menu_init(s, vc, idx, group, view_menu);
vc->vte.terminal = vte_terminal_new();
@@ -2485,6 +2582,9 @@ static void gtk_display_init(DisplayState *ds, DisplayOptions *opts)
#ifdef CONFIG_GTK_CLIPBOARD
gd_clipboard_init(s);
#endif /* CONFIG_GTK_CLIPBOARD */
+
+ /* GTK's event polling must happen on the main thread. */
+ qemu_main = NULL;
}
static void early_gtk_display_init(DisplayOptions *opts)
@@ -2514,7 +2614,7 @@ static void early_gtk_display_init(DisplayOptions *opts)
}
assert(opts->type == DISPLAY_TYPE_GTK);
- if (opts->has_gl && opts->gl != DISPLAYGL_MODE_OFF) {
+ if (opts->has_gl && opts->gl != DISPLAY_GL_MODE_OFF) {
#if defined(CONFIG_OPENGL)
#if defined(GDK_WINDOWING_WAYLAND)
if (GDK_IS_WAYLAND_DISPLAY(gdk_display_get_default())) {
@@ -2530,7 +2630,7 @@ static void early_gtk_display_init(DisplayOptions *opts)
#endif
{
#ifdef CONFIG_X11
- DisplayGLMode mode = opts->has_gl ? opts->gl : DISPLAYGL_MODE_ON;
+ DisplayGLMode mode = opts->has_gl ? opts->gl : DISPLAY_GL_MODE_ON;
gtk_egl_init(mode);
#endif
}
@@ -2540,7 +2640,7 @@ static void early_gtk_display_init(DisplayOptions *opts)
keycode_map = gd_get_keymap(&keycode_maplen);
#if defined(CONFIG_VTE)
- type_register(&char_gd_vc_type_info);
+ type_register_static(&char_gd_vc_type_info);
#endif
}
diff --git a/ui/input-barrier.c b/ui/input-barrier.c
index 2d57ca7..9793258 100644
--- a/ui/input-barrier.c
+++ b/ui/input-barrier.c
@@ -11,7 +11,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qemu/main-loop.h"
#include "qemu/sockets.h"
#include "qapi/error.h"
@@ -696,7 +696,7 @@ static void input_barrier_instance_init(Object *obj)
ib->height = 1080;
}
-static void input_barrier_class_init(ObjectClass *oc, void *data)
+static void input_barrier_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
@@ -732,7 +732,7 @@ static const TypeInfo input_barrier_info = {
.instance_size = sizeof(InputBarrier),
.instance_init = input_barrier_instance_init,
.instance_finalize = input_barrier_instance_finalize,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/ui/input-legacy.c b/ui/input-legacy.c
index 210ae5e..ca4bccb 100644
--- a/ui/input-legacy.c
+++ b/ui/input-legacy.c
@@ -109,43 +109,6 @@ void qmp_send_key(KeyValueList *keys, bool has_hold_time, int64_t hold_time,
g_free(up);
}
-static void legacy_kbd_event(DeviceState *dev, QemuConsole *src,
- InputEvent *evt)
-{
- QEMUPutKbdEntry *entry = (QEMUPutKbdEntry *)dev;
- int scancodes[3], i, count;
- InputKeyEvent *key = evt->u.key.data;
-
- if (!entry || !entry->put_kbd) {
- return;
- }
- count = qemu_input_key_value_to_scancode(key->key,
- key->down,
- scancodes);
- for (i = 0; i < count; i++) {
- entry->put_kbd(entry->opaque, scancodes[i]);
- }
-}
-
-static const QemuInputHandler legacy_kbd_handler = {
- .name = "legacy-kbd",
- .mask = INPUT_EVENT_MASK_KEY,
- .event = legacy_kbd_event,
-};
-
-QEMUPutKbdEntry *qemu_add_kbd_event_handler(QEMUPutKBDEvent *func, void *opaque)
-{
- QEMUPutKbdEntry *entry;
-
- entry = g_new0(QEMUPutKbdEntry, 1);
- entry->put_kbd = func;
- entry->opaque = opaque;
- entry->s = qemu_input_handler_register((DeviceState *)entry,
- &legacy_kbd_handler);
- qemu_input_handler_activate(entry->s);
- return entry;
-}
-
static void legacy_mouse_event(DeviceState *dev, QemuConsole *src,
InputEvent *evt)
{
diff --git a/ui/input-linux.c b/ui/input-linux.c
index e572a2e..92e1a1a 100644
--- a/ui/input-linux.c
+++ b/ui/input-linux.c
@@ -12,7 +12,7 @@
#include "qemu/sockets.h"
#include "ui/input.h"
#include "qom/object_interfaces.h"
-#include "sysemu/iothread.h"
+#include "system/iothread.h"
#include "block/aio.h"
#include <sys/ioctl.h>
@@ -412,7 +412,6 @@ err_read_event_bits:
err_close:
close(il->fd);
- return;
}
static void input_linux_instance_finalize(Object *obj)
@@ -495,7 +494,7 @@ static void input_linux_instance_init(Object *obj)
{
}
-static void input_linux_class_init(ObjectClass *oc, void *data)
+static void input_linux_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
@@ -523,7 +522,7 @@ static const TypeInfo input_linux_info = {
.instance_size = sizeof(InputLinux),
.instance_init = input_linux_instance_init,
.instance_finalize = input_linux_instance_finalize,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/ui/input.c b/ui/input.c
index dc74586..147e69c 100644
--- a/ui/input.c
+++ b/ui/input.c
@@ -1,12 +1,12 @@
#include "qemu/osdep.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-ui.h"
#include "trace.h"
#include "ui/input.h"
#include "ui/console.h"
-#include "sysemu/replay.h"
-#include "sysemu/runstate.h"
+#include "system/replay.h"
+#include "system/runstate.h"
struct QemuInputHandlerState {
DeviceState *dev;
@@ -174,37 +174,6 @@ void qmp_input_send_event(const char *device,
qemu_input_event_sync();
}
-static int qemu_input_transform_invert_abs_value(int value)
-{
- return (int64_t)INPUT_EVENT_ABS_MAX - value + INPUT_EVENT_ABS_MIN;
-}
-
-static void qemu_input_transform_abs_rotate(InputEvent *evt)
-{
- InputMoveEvent *move = evt->u.abs.data;
- switch (graphic_rotate) {
- case 90:
- if (move->axis == INPUT_AXIS_X) {
- move->axis = INPUT_AXIS_Y;
- } else if (move->axis == INPUT_AXIS_Y) {
- move->axis = INPUT_AXIS_X;
- move->value = qemu_input_transform_invert_abs_value(move->value);
- }
- break;
- case 180:
- move->value = qemu_input_transform_invert_abs_value(move->value);
- break;
- case 270:
- if (move->axis == INPUT_AXIS_X) {
- move->axis = INPUT_AXIS_Y;
- move->value = qemu_input_transform_invert_abs_value(move->value);
- } else if (move->axis == INPUT_AXIS_Y) {
- move->axis = INPUT_AXIS_X;
- }
- break;
- }
-}
-
static void qemu_input_event_trace(QemuConsole *src, InputEvent *evt)
{
const char *name;
@@ -340,11 +309,6 @@ void qemu_input_event_send_impl(QemuConsole *src, InputEvent *evt)
qemu_input_event_trace(src, evt);
- /* pre processing */
- if (graphic_rotate && (evt->type == INPUT_EVENT_KIND_ABS)) {
- qemu_input_transform_abs_rotate(evt);
- }
-
/* send event */
s = qemu_input_find_handler(1 << evt->type, src);
if (!s) {
diff --git a/ui/meson.build b/ui/meson.build
index 28c7381..6371422 100644
--- a/ui/meson.build
+++ b/ui/meson.build
@@ -1,7 +1,4 @@
system_ss.add(pixman)
-specific_ss.add(when: ['CONFIG_SYSTEM_ONLY'], if_true: pixman) # for the include path
-specific_ss.add(when: ['CONFIG_SYSTEM_ONLY'], if_true: opengl) # for the include path
-
system_ss.add(png)
system_ss.add(files(
'clipboard.c',
@@ -120,10 +117,6 @@ if gtk.found()
endif
if sdl.found()
- if host_os == 'windows'
- system_ss.add(files('win32-kbd-hook.c'))
- endif
-
sdl_ss = ss.source_set()
sdl_ss.add(sdl, sdl_image, pixman, glib, files(
'sdl2-2d.c',
diff --git a/ui/qemu-pixman.c b/ui/qemu-pixman.c
index 5ca55dd..ef4e71d 100644
--- a/ui/qemu-pixman.c
+++ b/ui/qemu-pixman.c
@@ -4,7 +4,9 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "ui/console.h"
+#include "qemu/memfd.h"
#include "standard-headers/drm/drm_fourcc.h"
#include "trace.h"
@@ -49,7 +51,6 @@ PixelFormat qemu_pixelformat_from_pixman(pixman_format_code_t format)
break;
default:
g_assert_not_reached();
- break;
}
pf.amax = (1 << pf.abits) - 1;
@@ -125,33 +126,34 @@ uint32_t qemu_pixman_to_drm_format(pixman_format_code_t pixman_format)
return 0;
}
-int qemu_pixman_get_type(int rshift, int gshift, int bshift)
+int qemu_pixman_get_type(int rshift, int gshift, int bshift, int endian)
{
int type = PIXMAN_TYPE_OTHER;
+ bool native_endian = (endian == G_BYTE_ORDER);
if (rshift > gshift && gshift > bshift) {
if (bshift == 0) {
- type = PIXMAN_TYPE_ARGB;
+ type = native_endian ? PIXMAN_TYPE_ARGB : PIXMAN_TYPE_BGRA;
} else {
- type = PIXMAN_TYPE_RGBA;
+ type = native_endian ? PIXMAN_TYPE_RGBA : PIXMAN_TYPE_ABGR;
}
} else if (rshift < gshift && gshift < bshift) {
if (rshift == 0) {
- type = PIXMAN_TYPE_ABGR;
+ type = native_endian ? PIXMAN_TYPE_ABGR : PIXMAN_TYPE_RGBA;
} else {
- type = PIXMAN_TYPE_BGRA;
+ type = native_endian ? PIXMAN_TYPE_BGRA : PIXMAN_TYPE_ARGB;
}
}
return type;
}
#ifdef CONFIG_PIXMAN
-pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf)
+pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf, int endian)
{
pixman_format_code_t format;
int type;
- type = qemu_pixman_get_type(pf->rshift, pf->gshift, pf->bshift);
+ type = qemu_pixman_get_type(pf->rshift, pf->gshift, pf->bshift, endian);
format = PIXMAN_FORMAT(pf->bits_per_pixel, type,
pf->abits, pf->rbits, pf->gbits, pf->bbits);
if (!pixman_format_supported_source(format)) {
@@ -268,3 +270,72 @@ void qemu_pixman_glyph_render(pixman_image_t *glyph,
pixman_image_unref(ibg);
}
#endif /* CONFIG_PIXMAN */
+
+static void *
+qemu_pixman_shareable_alloc(const char *name, size_t size,
+ qemu_pixman_shareable *handle,
+ Error **errp)
+{
+#ifdef WIN32
+ return qemu_win32_map_alloc(size, handle, errp);
+#else
+ return qemu_memfd_alloc(name, size, 0, handle, errp);
+#endif
+}
+
+static void
+qemu_pixman_shareable_free(qemu_pixman_shareable handle,
+ void *ptr, size_t size)
+{
+#ifdef WIN32
+ qemu_win32_map_free(ptr, handle, &error_warn);
+#else
+ qemu_memfd_free(ptr, size, handle);
+#endif
+}
+
+static void
+qemu_pixman_shared_image_destroy(pixman_image_t *image, void *data)
+{
+ qemu_pixman_shareable handle = PTR_TO_SHAREABLE(data);
+ void *ptr = pixman_image_get_data(image);
+ size_t size = pixman_image_get_height(image) * pixman_image_get_stride(image);
+
+ qemu_pixman_shareable_free(handle, ptr, size);
+}
+
+bool
+qemu_pixman_image_new_shareable(pixman_image_t **image,
+ qemu_pixman_shareable *handle,
+ const char *name,
+ pixman_format_code_t format,
+ int width,
+ int height,
+ int rowstride_bytes,
+ Error **errp)
+{
+ ERRP_GUARD();
+ size_t size = height * rowstride_bytes;
+ void *bits = NULL;
+
+ g_return_val_if_fail(image != NULL, false);
+ g_return_val_if_fail(handle != NULL, false);
+
+ bits = qemu_pixman_shareable_alloc(name, size, handle, errp);
+ if (!bits) {
+ return false;
+ }
+
+ *image = pixman_image_create_bits(format, width, height, bits, rowstride_bytes);
+ if (!*image) {
+ error_setg(errp, "Failed to allocate image");
+ qemu_pixman_shareable_free(*handle, bits, size);
+ return false;
+ }
+
+ pixman_image_set_destroy_function(*image,
+ qemu_pixman_shared_image_destroy,
+ SHAREABLE_TO_PTR(*handle));
+
+ return true;
+}
diff --git a/ui/sdl2-gl.c b/ui/sdl2-gl.c
index 91b7ee2..3be17d1 100644
--- a/ui/sdl2-gl.c
+++ b/ui/sdl2-gl.c
@@ -147,11 +147,11 @@ QEMUGLContext sdl2_gl_create_context(DisplayGLCtx *dgc,
SDL_GL_MakeCurrent(scon->real_window, scon->winctx);
SDL_GL_SetAttribute(SDL_GL_SHARE_WITH_CURRENT_CONTEXT, 1);
- if (scon->opts->gl == DISPLAYGL_MODE_ON ||
- scon->opts->gl == DISPLAYGL_MODE_CORE) {
+ if (scon->opts->gl == DISPLAY_GL_MODE_ON ||
+ scon->opts->gl == DISPLAY_GL_MODE_CORE) {
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK,
SDL_GL_CONTEXT_PROFILE_CORE);
- } else if (scon->opts->gl == DISPLAYGL_MODE_ES) {
+ } else if (scon->opts->gl == DISPLAY_GL_MODE_ES) {
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK,
SDL_GL_CONTEXT_PROFILE_ES);
}
@@ -163,7 +163,7 @@ QEMUGLContext sdl2_gl_create_context(DisplayGLCtx *dgc,
/* If SDL fail to create a GL context and we use the "on" flag,
* then try to fallback to GLES.
*/
- if (!ctx && scon->opts->gl == DISPLAYGL_MODE_ON) {
+ if (!ctx && scon->opts->gl == DISPLAY_GL_MODE_ON) {
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK,
SDL_GL_CONTEXT_PROFILE_ES);
ctx = SDL_GL_CreateContext(scon->real_window);
@@ -241,7 +241,7 @@ void sdl2_gl_scanout_flush(DisplayChangeListener *dcl,
SDL_GL_MakeCurrent(scon->real_window, scon->winctx);
SDL_GetWindowSize(scon->real_window, &ww, &wh);
- egl_fb_setup_default(&scon->win_fb, ww, wh);
+ egl_fb_setup_default(&scon->win_fb, ww, wh, 0, 0);
egl_fb_blit(&scon->win_fb, &scon->guest_fb, !scon->y0_top);
SDL_GL_SwapWindow(scon->real_window);
diff --git a/ui/sdl2-input.c b/ui/sdl2-input.c
index b02a89e..2286df4 100644
--- a/ui/sdl2-input.c
+++ b/ui/sdl2-input.c
@@ -58,3 +58,8 @@ void sdl2_process_key(struct sdl2_console *scon,
}
}
}
+
+void sdl2_release_modifiers(struct sdl2_console *scon)
+{
+ qkbd_state_lift_all_keys(scon->kbd);
+}
diff --git a/ui/sdl2.c b/ui/sdl2.c
index 98ed974..b00e421 100644
--- a/ui/sdl2.c
+++ b/ui/sdl2.c
@@ -29,11 +29,11 @@
#include "ui/console.h"
#include "ui/input.h"
#include "ui/sdl2.h"
-#include "sysemu/runstate.h"
-#include "sysemu/runstate-action.h"
-#include "sysemu/sysemu.h"
-#include "ui/win32-kbd-hook.h"
+#include "system/runstate.h"
+#include "system/runstate-action.h"
+#include "system/system.h"
#include "qemu/log.h"
+#include "qemu-main.h"
static int sdl2_num_outputs;
static struct sdl2_console *sdl2_console;
@@ -107,7 +107,7 @@ void sdl2_window_create(struct sdl2_console *scon)
if (scon->opengl) {
const char *driver = "opengl";
- if (scon->opts->gl == DISPLAYGL_MODE_ES) {
+ if (scon->opts->gl == DISPLAY_GL_MODE_ES) {
driver = "opengles2";
}
@@ -115,6 +115,7 @@ void sdl2_window_create(struct sdl2_console *scon)
SDL_SetHint(SDL_HINT_RENDER_BATCHING, "1");
scon->winctx = SDL_GL_CreateContext(scon->real_window);
+ SDL_GL_SetSwapInterval(0);
} else {
/* The SDL renderer is only used by sdl2-2D, when OpenGL is disabled */
scon->real_renderer = SDL_CreateRenderer(scon->real_window, -1, 0);
@@ -261,7 +262,6 @@ static void sdl_grab_start(struct sdl2_console *scon)
}
SDL_SetWindowGrab(scon->real_window, SDL_TRUE);
gui_grab = 1;
- win32_kbd_set_grab(true);
sdl_update_caption(scon);
}
@@ -269,7 +269,6 @@ static void sdl_grab_end(struct sdl2_console *scon)
{
SDL_SetWindowGrab(scon->real_window, SDL_FALSE);
gui_grab = 0;
- win32_kbd_set_grab(false);
sdl_show_cursor(scon);
sdl_update_caption(scon);
}
@@ -370,30 +369,18 @@ static int get_mod_state(void)
}
}
-static void *sdl2_win32_get_hwnd(struct sdl2_console *scon)
-{
-#ifdef CONFIG_WIN32
- SDL_SysWMinfo info;
-
- SDL_VERSION(&info.version);
- if (SDL_GetWindowWMInfo(scon->real_window, &info)) {
- return info.info.win.window;
- }
-#endif
- return NULL;
-}
-
static void handle_keydown(SDL_Event *ev)
{
int win;
struct sdl2_console *scon = get_scon_from_window(ev->key.windowID);
int gui_key_modifier_pressed = get_mod_state();
- int gui_keysym = 0;
if (!scon) {
return;
}
+ scon->gui_keysym = false;
+
if (!scon->ignore_hotkeys && gui_key_modifier_pressed && !ev->key.repeat) {
switch (ev->key.keysym.scancode) {
case SDL_SCANCODE_2:
@@ -418,15 +405,16 @@ static void handle_keydown(SDL_Event *ev)
SDL_ShowWindow(sdl2_console[win].real_window);
}
}
- gui_keysym = 1;
+ sdl2_release_modifiers(scon);
+ scon->gui_keysym = true;
}
break;
case SDL_SCANCODE_F:
toggle_full_screen(scon);
- gui_keysym = 1;
+ scon->gui_keysym = true;
break;
case SDL_SCANCODE_G:
- gui_keysym = 1;
+ scon->gui_keysym = true;
if (!gui_grab) {
sdl_grab_start(scon);
} else if (!gui_fullscreen) {
@@ -439,7 +427,7 @@ static void handle_keydown(SDL_Event *ev)
/* re-create scon->texture */
sdl2_2d_switch(&scon->dcl, scon->surface);
}
- gui_keysym = 1;
+ scon->gui_keysym = true;
break;
#if 0
case SDL_SCANCODE_KP_PLUS:
@@ -458,14 +446,14 @@ static void handle_keydown(SDL_Event *ev)
__func__, width, height);
sdl_scale(scon, width, height);
sdl2_redraw(scon);
- gui_keysym = 1;
+ scon->gui_keysym = true;
}
#endif
default:
break;
}
}
- if (!gui_keysym) {
+ if (!scon->gui_keysym) {
sdl2_process_key(scon, &ev->key);
}
}
@@ -491,7 +479,7 @@ static void handle_textinput(SDL_Event *ev)
return;
}
- if (QEMU_IS_TEXT_CONSOLE(con)) {
+ if (!scon->gui_keysym && QEMU_IS_TEXT_CONSOLE(con)) {
qemu_text_console_put_string(QEMU_TEXT_CONSOLE(con), ev->text.text, strlen(ev->text.text));
}
}
@@ -500,14 +488,14 @@ static void handle_mousemotion(SDL_Event *ev)
{
int max_x, max_y;
struct sdl2_console *scon = get_scon_from_window(ev->motion.windowID);
+ int scr_w, scr_h, surf_w, surf_h, x, y, dx, dy;
if (!scon || !qemu_console_is_graphic(scon->dcl.con)) {
return;
}
+ SDL_GetWindowSize(scon->real_window, &scr_w, &scr_h);
if (qemu_input_is_absolute(scon->dcl.con) || absolute_enabled) {
- int scr_w, scr_h;
- SDL_GetWindowSize(scon->real_window, &scr_w, &scr_h);
max_x = scr_w - 1;
max_y = scr_h - 1;
if (gui_grab && !gui_fullscreen
@@ -521,9 +509,14 @@ static void handle_mousemotion(SDL_Event *ev)
sdl_grab_start(scon);
}
}
+ surf_w = surface_width(scon->surface);
+ surf_h = surface_height(scon->surface);
+ x = (int64_t)ev->motion.x * surf_w / scr_w;
+ y = (int64_t)ev->motion.y * surf_h / scr_h;
+ dx = (int64_t)ev->motion.xrel * surf_w / scr_w;
+ dy = (int64_t)ev->motion.yrel * surf_h / scr_h;
if (gui_grab || qemu_input_is_absolute(scon->dcl.con) || absolute_enabled) {
- sdl_send_mouse_event(scon, ev->motion.xrel, ev->motion.yrel,
- ev->motion.x, ev->motion.y, ev->motion.state);
+ sdl_send_mouse_event(scon, dx, dy, x, y, ev->motion.state);
}
}
@@ -532,12 +525,17 @@ static void handle_mousebutton(SDL_Event *ev)
int buttonstate = SDL_GetMouseState(NULL, NULL);
SDL_MouseButtonEvent *bev;
struct sdl2_console *scon = get_scon_from_window(ev->button.windowID);
+ int scr_w, scr_h, x, y;
if (!scon || !qemu_console_is_graphic(scon->dcl.con)) {
return;
}
bev = &ev->button;
+ SDL_GetWindowSize(scon->real_window, &scr_w, &scr_h);
+ x = (int64_t)bev->x * surface_width(scon->surface) / scr_w;
+ y = (int64_t)bev->y * surface_height(scon->surface) / scr_h;
+
if (!gui_grab && !qemu_input_is_absolute(scon->dcl.con)) {
if (ev->type == SDL_MOUSEBUTTONUP && bev->button == SDL_BUTTON_LEFT) {
/* start grabbing all events */
@@ -549,7 +547,7 @@ static void handle_mousebutton(SDL_Event *ev)
} else {
buttonstate &= ~SDL_BUTTON(bev->button);
}
- sdl_send_mouse_event(scon, 0, 0, bev->x, bev->y, buttonstate);
+ sdl_send_mouse_event(scon, 0, 0, x, y, buttonstate);
}
}
@@ -605,10 +603,6 @@ static void handle_windowevent(SDL_Event *ev)
sdl2_redraw(scon);
break;
case SDL_WINDOWEVENT_FOCUS_GAINED:
- win32_kbd_set_grab(gui_grab);
- if (qemu_console_is_graphic(scon->dcl.con)) {
- win32_kbd_set_window(sdl2_win32_get_hwnd(scon));
- }
/* fall through */
case SDL_WINDOWEVENT_ENTER:
if (!gui_grab && (qemu_input_is_absolute(scon->dcl.con) || absolute_enabled)) {
@@ -624,9 +618,6 @@ static void handle_windowevent(SDL_Event *ev)
scon->ignore_hotkeys = get_mod_state();
break;
case SDL_WINDOWEVENT_FOCUS_LOST:
- if (qemu_console_is_graphic(scon->dcl.con)) {
- win32_kbd_set_window(NULL);
- }
if (gui_grab && !gui_fullscreen) {
sdl_grab_end(scon);
}
@@ -866,10 +857,7 @@ static void sdl2_display_init(DisplayState *ds, DisplayOptions *o)
#ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR /* only available since SDL 2.0.8 */
SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
#endif
-#ifndef CONFIG_WIN32
- /* QEMU uses its own low level keyboard hook procedure on Windows */
SDL_SetHint(SDL_HINT_GRAB_KEYBOARD, "1");
-#endif
#ifdef SDL_HINT_ALLOW_ALT_TAB_WHILE_GRABBED
SDL_SetHint(SDL_HINT_ALLOW_ALT_TAB_WHILE_GRABBED, "0");
#endif
@@ -962,6 +950,9 @@ static void sdl2_display_init(DisplayState *ds, DisplayOptions *o)
}
atexit(sdl_cleanup);
+
+ /* SDL's event polling (in dpy_refresh) must happen on the main thread. */
+ qemu_main = NULL;
}
static QemuDisplay qemu_display_sdl2 = {
diff --git a/ui/spice-app.c b/ui/spice-app.c
index a10b4a5..24f78f3 100644
--- a/ui/spice-app.c
+++ b/ui/spice-app.c
@@ -36,7 +36,7 @@
#include "qapi/error.h"
#include "io/channel-command.h"
#include "chardev/spice.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "qom/object.h"
static const char *tmp_dir;
@@ -101,7 +101,7 @@ static void vc_chr_parse(QemuOpts *opts, ChardevBackend *backend, Error **errp)
/* fqdn is dealt with in vc_chr_open() */
}
-static void char_vc_class_init(ObjectClass *oc, void *data)
+static void char_vc_class_init(ObjectClass *oc, const void *data)
{
VCChardevClass *vc = CHARDEV_VC_CLASS(oc);
ChardevClass *cc = CHARDEV_CLASS(oc);
@@ -173,7 +173,7 @@ static void spice_app_display_early_init(DisplayOptions *opts)
exit(1);
}
- type_register(&char_vc_type_info);
+ type_register_static(&char_vc_type_info);
sock_path = g_strjoin("", app_dir, "/", "spice.sock", NULL);
qopts = qemu_opts_create(list, NULL, 0, &error_abort);
diff --git a/ui/spice-core.c b/ui/spice-core.c
index 15be640..0326c63 100644
--- a/ui/spice-core.c
+++ b/ui/spice-core.c
@@ -18,8 +18,8 @@
#include "qemu/osdep.h"
#include <spice.h>
-#include "sysemu/sysemu.h"
-#include "sysemu/runstate.h"
+#include "system/system.h"
+#include "system/runstate.h"
#include "ui/qemu-spice.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
@@ -840,7 +840,7 @@ static void qemu_spice_init(void)
"incompatible with -spice port/tls-port");
exit(1);
}
- egl_init(qemu_opt_get(opts, "rendernode"), DISPLAYGL_MODE_ON, &error_fatal);
+ egl_init(qemu_opt_get(opts, "rendernode"), DISPLAY_GL_MODE_ON, &error_fatal);
spice_opengl = 1;
}
#endif
diff --git a/ui/spice-display.c b/ui/spice-display.c
index c794ae0..9c39d2c 100644
--- a/ui/spice-display.c
+++ b/ui/spice-display.c
@@ -28,6 +28,8 @@
#include "ui/spice-display.h"
+#include "standard-headers/drm/drm_fourcc.h"
+
bool spice_opengl;
int qemu_spice_rect_is_empty(const QXLRect* r)
@@ -872,23 +874,48 @@ static void spice_gl_update(DisplayChangeListener *dcl,
ssd->gl_updates++;
}
+static void spice_server_gl_scanout(QXLInstance *qxl,
+ const int *fd,
+ uint32_t width, uint32_t height,
+ const uint32_t *offset,
+ const uint32_t *stride,
+ uint32_t num_planes, uint32_t format,
+ uint64_t modifier, int y_0_top)
+{
+#ifdef HAVE_SPICE_QXL_GL_SCANOUT2
+ spice_qxl_gl_scanout2(qxl, fd, width, height, offset, stride,
+ num_planes, format, modifier, y_0_top);
+#else
+ if (num_planes <= 1) {
+ spice_qxl_gl_scanout(qxl, fd[0], width, height, stride[0], format, y_0_top);
+ } else {
+ error_report("SPICE server does not support multi plane GL scanout");
+ }
+#endif
+}
+
static void spice_gl_switch(DisplayChangeListener *dcl,
struct DisplaySurface *new_surface)
{
SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
- EGLint stride, fourcc;
- int fd;
if (ssd->ds) {
surface_gl_destroy_texture(ssd->gls, ssd->ds);
}
ssd->ds = new_surface;
if (ssd->ds) {
+ uint32_t offset[DMABUF_MAX_PLANES], stride[DMABUF_MAX_PLANES];
+ int fd[DMABUF_MAX_PLANES], num_planes, fourcc;
+ uint64_t modifier;
+
surface_gl_create_texture(ssd->gls, ssd->ds);
- fd = egl_get_fd_for_texture(ssd->ds->texture,
- &stride, &fourcc,
- NULL);
- if (fd < 0) {
+ if (!egl_dmabuf_export_texture(ssd->ds->texture,
+ fd,
+ (EGLint *)offset,
+ (EGLint *)stride,
+ &fourcc,
+ &num_planes,
+ &modifier)) {
surface_gl_destroy_texture(ssd->gls, ssd->ds);
return;
}
@@ -899,10 +926,11 @@ static void spice_gl_switch(DisplayChangeListener *dcl,
fourcc);
/* note: spice server will close the fd */
- spice_qxl_gl_scanout(&ssd->qxl, fd,
- surface_width(ssd->ds),
- surface_height(ssd->ds),
- stride, fourcc, false);
+ spice_server_gl_scanout(&ssd->qxl, fd,
+ surface_width(ssd->ds),
+ surface_height(ssd->ds),
+ offset, stride, num_planes,
+ fourcc, modifier, false);
ssd->have_surface = true;
ssd->have_scanout = false;
@@ -925,7 +953,8 @@ static void qemu_spice_gl_scanout_disable(DisplayChangeListener *dcl)
SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
trace_qemu_spice_gl_scanout_disable(ssd->qxl.id);
- spice_qxl_gl_scanout(&ssd->qxl, -1, 0, 0, 0, 0, false);
+ spice_server_gl_scanout(&ssd->qxl, NULL, 0, 0, NULL, NULL, 0, DRM_FORMAT_INVALID,
+ DRM_FORMAT_MOD_INVALID, false);
qemu_spice_gl_monitor_config(ssd, 0, 0, 0, 0);
ssd->have_surface = false;
ssd->have_scanout = false;
@@ -941,20 +970,23 @@ static void qemu_spice_gl_scanout_texture(DisplayChangeListener *dcl,
void *d3d_tex2d)
{
SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
- EGLint stride = 0, fourcc = 0;
- int fd = -1;
+ EGLint offset[DMABUF_MAX_PLANES], stride[DMABUF_MAX_PLANES], fourcc = 0;
+ int fd[DMABUF_MAX_PLANES], num_planes;
+ uint64_t modifier;
assert(tex_id);
- fd = egl_get_fd_for_texture(tex_id, &stride, &fourcc, NULL);
- if (fd < 0) {
- fprintf(stderr, "%s: failed to get fd for texture\n", __func__);
+ if (!egl_dmabuf_export_texture(tex_id, fd, offset, stride, &fourcc,
+ &num_planes, &modifier)) {
+ fprintf(stderr, "%s: failed to export dmabuf for texture\n", __func__);
return;
}
+
trace_qemu_spice_gl_scanout_texture(ssd->qxl.id, w, h, fourcc);
/* note: spice server will close the fd */
- spice_qxl_gl_scanout(&ssd->qxl, fd, backing_width, backing_height,
- stride, fourcc, y_0_top);
+ spice_server_gl_scanout(&ssd->qxl, fd, backing_width, backing_height,
+ (uint32_t *)offset, (uint32_t *)stride, num_planes,
+ fourcc, modifier, y_0_top);
qemu_spice_gl_monitor_config(ssd, x, y, w, h);
ssd->have_surface = false;
ssd->have_scanout = true;
@@ -1025,11 +1057,10 @@ static void qemu_spice_gl_update(DisplayChangeListener *dcl,
uint32_t x, uint32_t y, uint32_t w, uint32_t h)
{
SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
- EGLint stride = 0, fourcc = 0;
+ EGLint fourcc = 0;
bool render_cursor = false;
bool y_0_top = false; /* FIXME */
uint64_t cookie;
- int fd;
uint32_t width, height, texture;
if (!ssd->have_scanout) {
@@ -1064,26 +1095,47 @@ static void qemu_spice_gl_update(DisplayChangeListener *dcl,
/* dest framebuffer */
if (ssd->blit_fb.width != width ||
ssd->blit_fb.height != height) {
+ int fds[DMABUF_MAX_PLANES], num_planes;
+ uint32_t offsets[DMABUF_MAX_PLANES], strides[DMABUF_MAX_PLANES];
+ uint64_t modifier;
+
trace_qemu_spice_gl_render_dmabuf(ssd->qxl.id, width,
height);
egl_fb_destroy(&ssd->blit_fb);
egl_fb_setup_new_tex(&ssd->blit_fb,
width, height);
- fd = egl_get_fd_for_texture(ssd->blit_fb.texture,
- &stride, &fourcc, NULL);
- spice_qxl_gl_scanout(&ssd->qxl, fd, width, height,
- stride, fourcc, false);
+ if (!egl_dmabuf_export_texture(ssd->blit_fb.texture, fds,
+ (EGLint *)offsets, (EGLint *)strides,
+ &fourcc, &num_planes, &modifier)) {
+ fprintf(stderr,
+ "%s: failed to export dmabuf for texture\n", __func__);
+ return;
+ }
+
+ spice_server_gl_scanout(&ssd->qxl, fds, width, height, offsets, strides,
+ num_planes, fourcc, modifier, false);
}
} else {
- stride = qemu_dmabuf_get_stride(dmabuf);
+ int fds[DMABUF_MAX_PLANES];
+ int noffsets, nstrides;
+ const uint32_t *offsets = qemu_dmabuf_get_offsets(dmabuf, &noffsets);
+ const uint32_t *strides = qemu_dmabuf_get_strides(dmabuf, &nstrides);
+ uint32_t num_planes = qemu_dmabuf_get_num_planes(dmabuf);
+
+ assert(noffsets >= num_planes);
+ assert(nstrides >= num_planes);
+
fourcc = qemu_dmabuf_get_fourcc(dmabuf);
y_0_top = qemu_dmabuf_get_y0_top(dmabuf);
- fd = qemu_dmabuf_dup_fd(dmabuf);
+ qemu_dmabuf_dup_fds(dmabuf, fds, DMABUF_MAX_PLANES);
trace_qemu_spice_gl_forward_dmabuf(ssd->qxl.id, width, height);
/* note: spice server will close the fd, so hand over a dup */
- spice_qxl_gl_scanout(&ssd->qxl, fd, width, height,
- stride, fourcc, y_0_top);
+ spice_server_gl_scanout(&ssd->qxl, fds, width, height,
+ offsets, strides, num_planes,
+ fourcc,
+ qemu_dmabuf_get_modifier(dmabuf),
+ y_0_top);
}
qemu_spice_gl_monitor_config(ssd, 0, 0, width, height);
ssd->guest_dmabuf_refresh = false;
diff --git a/ui/trace-events b/ui/trace-events
index 69ff229..3da0d5e 100644
--- a/ui/trace-events
+++ b/ui/trace-events
@@ -130,9 +130,10 @@ xkeymap_keymap(const char *name) "keymap '%s'"
# clipboard.c
clipboard_check_serial(int cur, int recv, bool ok) "cur:%d recv:%d %d"
+clipboard_reset_serial(void) ""
# vdagent.c
-vdagent_open(void) ""
+vdagent_fe_open(bool fe_open) "fe_open=%d"
vdagent_close(void) ""
vdagent_disconnect(void) ""
vdagent_send(const char *name) "msg %s"
@@ -157,12 +158,15 @@ dbus_mouse_rel_motion(int dx, int dy) "dx=%d, dy=%d"
dbus_touch_send_event(unsigned int kind, uint32_t num_slot, uint32_t x, uint32_t y) "kind=%u, num_slot=%u, x=%d, y=%d"
dbus_update(int x, int y, int w, int h) "x=%d, y=%d, w=%d, h=%d"
dbus_update_gl(int x, int y, int w, int h) "x=%d, y=%d, w=%d, h=%d"
+dbus_clipboard_grab(int selection, unsigned int serial) "selection=%d serial=%u"
dbus_clipboard_grab_failed(void) ""
+dbus_clipboard_qemu_request(int type) "type=%d"
dbus_clipboard_register(const char *bus_name) "peer %s"
dbus_clipboard_unregister(const char *bus_name) "peer %s"
dbus_scanout_texture(uint32_t tex_id, bool backing_y_0_top, uint32_t backing_width, uint32_t backing_height, uint32_t x, uint32_t y, uint32_t w, uint32_t h) "tex_id:%u y0top:%d back:%ux%u %u+%u-%ux%u"
dbus_gl_gfx_switch(void *p) "surf: %p"
dbus_filter(unsigned int serial, unsigned int filter) "serial=%u (<= %u)"
+dbus_can_share_map(bool share) "can_share_map: %d"
# egl-helpers.c
egl_init_d3d11_device(void *p) "d3d device: %p"
diff --git a/ui/ui-hmp-cmds.c b/ui/ui-hmp-cmds.c
index 26c8ced..980a8bb 100644
--- a/ui/ui-hmp-cmds.c
+++ b/ui/ui-hmp-cmds.c
@@ -21,7 +21,7 @@
#include "monitor/monitor-internal.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-ui.h"
-#include "qapi/qmp/qdict.h"
+#include "qobject/qdict.h"
#include "qemu/cutils.h"
#include "ui/console.h"
#include "ui/input.h"
diff --git a/ui/vdagent.c b/ui/vdagent.c
index 64d7ab2..c0746fe 100644
--- a/ui/vdagent.c
+++ b/ui/vdagent.c
@@ -6,10 +6,10 @@
#include "qemu/option.h"
#include "qemu/units.h"
#include "hw/qdev-core.h"
-#include "migration/blocker.h"
#include "ui/clipboard.h"
#include "ui/console.h"
#include "ui/input.h"
+#include "migration/vmstate.h"
#include "trace.h"
#include "qapi/qapi-types-char.h"
@@ -32,14 +32,12 @@
struct VDAgentChardev {
Chardev parent;
- /* TODO: migration isn't yet supported */
- Error *migration_blocker;
-
/* config */
bool mouse;
bool clipboard;
/* guest vdagent */
+ bool connected;
uint32_t caps;
VDIChunkHeader chunk;
uint32_t chunksize;
@@ -47,7 +45,7 @@ struct VDAgentChardev {
uint32_t msgsize;
uint8_t *xbuf;
uint32_t xoff, xsize;
- Buffer outbuf;
+ GByteArray *outbuf;
/* mouse */
DeviceState mouse_dev;
@@ -142,16 +140,16 @@ static void vdagent_send_buf(VDAgentChardev *vd)
{
uint32_t len;
- while (!buffer_empty(&vd->outbuf)) {
+ while (vd->outbuf->len) {
len = qemu_chr_be_can_write(CHARDEV(vd));
if (len == 0) {
return;
}
- if (len > vd->outbuf.offset) {
- len = vd->outbuf.offset;
+ if (len > vd->outbuf->len) {
+ len = vd->outbuf->len;
}
- qemu_chr_be_write(CHARDEV(vd), vd->outbuf.buffer, len);
- buffer_advance(&vd->outbuf, len);
+ qemu_chr_be_write(CHARDEV(vd), vd->outbuf->data, len);
+ g_byte_array_remove_range(vd->outbuf, 0, len);
}
}
@@ -166,7 +164,7 @@ static void vdagent_send_msg(VDAgentChardev *vd, VDAgentMessage *msg)
msg->protocol = VD_AGENT_PROTOCOL;
- if (vd->outbuf.offset + msgsize > VDAGENT_BUFFER_LIMIT) {
+ if (vd->outbuf->len + msgsize > VDAGENT_BUFFER_LIMIT) {
error_report("buffer full, dropping message");
return;
}
@@ -177,15 +175,14 @@ static void vdagent_send_msg(VDAgentChardev *vd, VDAgentMessage *msg)
if (chunk.size > 1024) {
chunk.size = 1024;
}
- buffer_reserve(&vd->outbuf, sizeof(chunk) + chunk.size);
- buffer_append(&vd->outbuf, &chunk, sizeof(chunk));
- buffer_append(&vd->outbuf, msgbuf + msgoff, chunk.size);
+ g_byte_array_append(vd->outbuf, (void *)&chunk, sizeof(chunk));
+ g_byte_array_append(vd->outbuf, msgbuf + msgoff, chunk.size);
msgoff += chunk.size;
}
vdagent_send_buf(vd);
}
-static void vdagent_send_caps(VDAgentChardev *vd)
+static void vdagent_send_caps(VDAgentChardev *vd, bool request)
{
g_autofree VDAgentMessage *msg = g_malloc0(sizeof(VDAgentMessage) +
sizeof(VDAgentAnnounceCapabilities) +
@@ -205,6 +202,7 @@ static void vdagent_send_caps(VDAgentChardev *vd)
#endif
}
+ caps->request = request;
vdagent_send_msg(vd, msg);
}
@@ -671,10 +669,6 @@ static void vdagent_chr_open(Chardev *chr,
return;
#endif
- if (migrate_add_blocker(&vd->migration_blocker, errp) != 0) {
- return;
- }
-
vd->mouse = VDAGENT_MOUSE_DEFAULT;
if (cfg->has_mouse) {
vd->mouse = cfg->mouse;
@@ -693,6 +687,18 @@ static void vdagent_chr_open(Chardev *chr,
*be_opened = true;
}
+static void vdagent_clipboard_peer_register(VDAgentChardev *vd)
+{
+ if (vd->cbpeer.notifier.notify != NULL) {
+ return;
+ }
+
+ vd->cbpeer.name = "vdagent";
+ vd->cbpeer.notifier.notify = vdagent_clipboard_notify;
+ vd->cbpeer.request = vdagent_clipboard_request;
+ qemu_clipboard_peer_register(&vd->cbpeer);
+}
+
static void vdagent_chr_recv_caps(VDAgentChardev *vd, VDAgentMessage *msg)
{
VDAgentAnnounceCapabilities *caps = (void *)msg->data;
@@ -711,7 +717,7 @@ static void vdagent_chr_recv_caps(VDAgentChardev *vd, VDAgentMessage *msg)
vd->caps = caps->caps[0];
if (caps->request) {
- vdagent_send_caps(vd);
+ vdagent_send_caps(vd, false);
}
if (have_mouse(vd) && vd->mouse_hs) {
qemu_input_handler_activate(vd->mouse_hs);
@@ -719,11 +725,9 @@ static void vdagent_chr_recv_caps(VDAgentChardev *vd, VDAgentMessage *msg)
memset(vd->last_serial, 0, sizeof(vd->last_serial));
- if (have_clipboard(vd) && vd->cbpeer.notifier.notify == NULL) {
- vd->cbpeer.name = "vdagent";
- vd->cbpeer.notifier.notify = vdagent_clipboard_notify;
- vd->cbpeer.request = vdagent_clipboard_request;
- qemu_clipboard_peer_register(&vd->cbpeer);
+ if (have_clipboard(vd)) {
+ qemu_clipboard_reset_serial();
+ vdagent_clipboard_peer_register(vd);
}
}
@@ -856,7 +860,8 @@ static void vdagent_disconnect(VDAgentChardev *vd)
{
trace_vdagent_disconnect();
- buffer_reset(&vd->outbuf);
+ vd->connected = false;
+ g_byte_array_set_size(vd->outbuf, 0);
vdagent_reset_bufs(vd);
vd->caps = 0;
if (vd->mouse_hs) {
@@ -872,6 +877,12 @@ static void vdagent_chr_set_fe_open(struct Chardev *chr, int fe_open)
{
VDAgentChardev *vd = QEMU_VDAGENT_CHARDEV(chr);
+ trace_vdagent_fe_open(fe_open);
+
+ if (vd->connected == fe_open) {
+ return;
+ }
+
if (!fe_open) {
trace_vdagent_close();
vdagent_disconnect(vd);
@@ -881,7 +892,8 @@ static void vdagent_chr_set_fe_open(struct Chardev *chr, int fe_open)
return;
}
- trace_vdagent_open();
+ vd->connected = true;
+ vdagent_send_caps(vd, true);
}
static void vdagent_chr_parse(QemuOpts *opts, ChardevBackend *backend,
@@ -900,7 +912,7 @@ static void vdagent_chr_parse(QemuOpts *opts, ChardevBackend *backend,
/* ------------------------------------------------------------------ */
-static void vdagent_chr_class_init(ObjectClass *oc, void *data)
+static void vdagent_chr_class_init(ObjectClass *oc, const void *data)
{
ChardevClass *cc = CHARDEV_CLASS(oc);
@@ -911,25 +923,163 @@ static void vdagent_chr_class_init(ObjectClass *oc, void *data)
cc->chr_accept_input = vdagent_chr_accept_input;
}
+static int post_load(void *opaque, int version_id)
+{
+ VDAgentChardev *vd = QEMU_VDAGENT_CHARDEV(opaque);
+
+ if (have_mouse(vd) && vd->mouse_hs) {
+ qemu_input_handler_activate(vd->mouse_hs);
+ }
+
+ if (have_clipboard(vd)) {
+ vdagent_clipboard_peer_register(vd);
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_chunk = {
+ .name = "vdagent/chunk",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32(port, VDIChunkHeader),
+ VMSTATE_UINT32(size, VDIChunkHeader),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_vdba = {
+ .name = "vdagent/bytearray",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32(len, GByteArray),
+ VMSTATE_VBUFFER_ALLOC_UINT32(data, GByteArray, 0, 0, len),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+struct CBInfoArray {
+ uint32_t n;
+ QemuClipboardInfo cbinfo[QEMU_CLIPBOARD_SELECTION__COUNT];
+};
+
+static const VMStateDescription vmstate_cbinfo_array = {
+ .name = "cbinfoarray",
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32(n, struct CBInfoArray),
+ VMSTATE_STRUCT_VARRAY_UINT32(cbinfo, struct CBInfoArray, n,
+ 0, vmstate_cbinfo, QemuClipboardInfo),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int put_cbinfo(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ VDAgentChardev *vd = QEMU_VDAGENT_CHARDEV(pv);
+ struct CBInfoArray cbinfo = {};
+ int i;
+
+ if (!have_clipboard(vd)) {
+ return 0;
+ }
+
+ for (i = 0; i < QEMU_CLIPBOARD_SELECTION__COUNT; i++) {
+ if (qemu_clipboard_peer_owns(&vd->cbpeer, i)) {
+ cbinfo.cbinfo[cbinfo.n++] = *qemu_clipboard_info(i);
+ }
+ }
+
+ return vmstate_save_state(f, &vmstate_cbinfo_array, &cbinfo, vmdesc);
+}
+
+static int get_cbinfo(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ VDAgentChardev *vd = QEMU_VDAGENT_CHARDEV(pv);
+ struct CBInfoArray cbinfo = {};
+ int i, ret;
+
+ if (!have_clipboard(vd)) {
+ return 0;
+ }
+
+ vdagent_clipboard_peer_register(vd);
+
+ ret = vmstate_load_state(f, &vmstate_cbinfo_array, &cbinfo, 0);
+ if (ret) {
+ return ret;
+ }
+
+ for (i = 0; i < cbinfo.n; i++) {
+ g_autoptr(QemuClipboardInfo) info =
+ qemu_clipboard_info_new(&vd->cbpeer, cbinfo.cbinfo[i].selection);
+ /* this will steal clipboard data pointer from cbinfo.types */
+ memcpy(info->types, cbinfo.cbinfo[i].types, sizeof(cbinfo.cbinfo[i].types));
+ qemu_clipboard_update(info);
+ }
+
+ return 0;
+}
+
+static const VMStateInfo vmstate_cbinfos = {
+ .name = "vdagent/cbinfos",
+ .get = get_cbinfo,
+ .put = put_cbinfo,
+};
+
+static const VMStateDescription vmstate_vdagent = {
+ .name = "vdagent",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .post_load = post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_BOOL(connected, VDAgentChardev),
+ VMSTATE_UINT32(caps, VDAgentChardev),
+ VMSTATE_STRUCT(chunk, VDAgentChardev, 0, vmstate_chunk, VDIChunkHeader),
+ VMSTATE_UINT32(chunksize, VDAgentChardev),
+ VMSTATE_UINT32(msgsize, VDAgentChardev),
+ VMSTATE_VBUFFER_ALLOC_UINT32(msgbuf, VDAgentChardev, 0, 0, msgsize),
+ VMSTATE_UINT32(xsize, VDAgentChardev),
+ VMSTATE_UINT32(xoff, VDAgentChardev),
+ VMSTATE_VBUFFER_ALLOC_UINT32(xbuf, VDAgentChardev, 0, 0, xsize),
+ VMSTATE_STRUCT_POINTER(outbuf, VDAgentChardev, vmstate_vdba, GByteArray),
+ VMSTATE_UINT32(mouse_x, VDAgentChardev),
+ VMSTATE_UINT32(mouse_y, VDAgentChardev),
+ VMSTATE_UINT32(mouse_btn, VDAgentChardev),
+ VMSTATE_UINT32(mouse_display, VDAgentChardev),
+ VMSTATE_UINT32_ARRAY(last_serial, VDAgentChardev,
+ QEMU_CLIPBOARD_SELECTION__COUNT),
+ VMSTATE_UINT32_ARRAY(cbpending, VDAgentChardev,
+ QEMU_CLIPBOARD_SELECTION__COUNT),
+ {
+ .name = "cbinfos",
+ .info = &vmstate_cbinfos,
+ .flags = VMS_SINGLE,
+ },
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static void vdagent_chr_init(Object *obj)
{
VDAgentChardev *vd = QEMU_VDAGENT_CHARDEV(obj);
- buffer_init(&vd->outbuf, "vdagent-outbuf");
- error_setg(&vd->migration_blocker,
- "The vdagent chardev doesn't yet support migration");
+ vd->outbuf = g_byte_array_new();
+ vmstate_register_any(NULL, &vmstate_vdagent, vd);
}
static void vdagent_chr_fini(Object *obj)
{
VDAgentChardev *vd = QEMU_VDAGENT_CHARDEV(obj);
- migrate_del_blocker(&vd->migration_blocker);
vdagent_disconnect(vd);
if (vd->mouse_hs) {
qemu_input_handler_unregister(vd->mouse_hs);
}
- buffer_free(&vd->outbuf);
+ g_clear_pointer(&vd->outbuf, g_byte_array_unref);
}
static const TypeInfo vdagent_chr_type_info = {
diff --git a/ui/vnc-auth-sasl.c b/ui/vnc-auth-sasl.c
index 47fdae5..3f4cfc4 100644
--- a/ui/vnc-auth-sasl.c
+++ b/ui/vnc-auth-sasl.c
@@ -263,8 +263,14 @@ static int protocol_client_auth_sasl_step(VncState *vs, uint8_t *data, size_t le
/* NB, distinction of NULL vs "" is *critical* in SASL */
if (datalen) {
clientdata = (char*)data;
- clientdata[datalen-1] = '\0'; /* Wire includes '\0', but make sure */
- datalen--; /* Don't count NULL byte when passing to _start() */
+ if (clientdata[datalen - 1] != '\0') {
+ trace_vnc_auth_fail(vs, vs->auth, "Malformed SASL client data",
+ "Missing SASL NUL padding byte");
+ sasl_dispose(&vs->sasl.conn);
+ vs->sasl.conn = NULL;
+ goto authabort;
+ }
+ datalen--; /* Discard the extra NUL padding byte */
}
err = sasl_server_step(vs->sasl.conn,
@@ -289,9 +295,10 @@ static int protocol_client_auth_sasl_step(VncState *vs, uint8_t *data, size_t le
goto authabort;
}
- if (serveroutlen) {
+ if (serverout) {
vnc_write_u32(vs, serveroutlen + 1);
- vnc_write(vs, serverout, serveroutlen + 1);
+ vnc_write(vs, serverout, serveroutlen);
+ vnc_write_u8(vs, '\0');
} else {
vnc_write_u32(vs, 0);
}
@@ -384,8 +391,14 @@ static int protocol_client_auth_sasl_start(VncState *vs, uint8_t *data, size_t l
/* NB, distinction of NULL vs "" is *critical* in SASL */
if (datalen) {
clientdata = (char*)data;
- clientdata[datalen-1] = '\0'; /* Should be on wire, but make sure */
- datalen--; /* Don't count NULL byte when passing to _start() */
+ if (clientdata[datalen - 1] != '\0') {
+ trace_vnc_auth_fail(vs, vs->auth, "Malformed SASL client data",
+ "Missing SASL NUL padding byte");
+ sasl_dispose(&vs->sasl.conn);
+ vs->sasl.conn = NULL;
+ goto authabort;
+ }
+ datalen--; /* Discard the extra NUL padding byte */
}
err = sasl_server_start(vs->sasl.conn,
@@ -410,9 +423,10 @@ static int protocol_client_auth_sasl_start(VncState *vs, uint8_t *data, size_t l
goto authabort;
}
- if (serveroutlen) {
+ if (serverout) {
vnc_write_u32(vs, serveroutlen + 1);
- vnc_write(vs, serverout, serveroutlen + 1);
+ vnc_write(vs, serverout, serveroutlen);
+ vnc_write_u8(vs, '\0');
} else {
vnc_write_u32(vs, 0);
}
@@ -524,13 +538,13 @@ static int protocol_client_auth_sasl_mechname_len(VncState *vs, uint8_t *data, s
return 0;
}
-static char *
+static int
vnc_socket_ip_addr_string(QIOChannelSocket *ioc,
bool local,
+ char **addrstr,
Error **errp)
{
SocketAddress *addr;
- char *ret;
if (local) {
addr = qio_channel_socket_get_local_address(ioc, errp);
@@ -538,17 +552,24 @@ vnc_socket_ip_addr_string(QIOChannelSocket *ioc,
addr = qio_channel_socket_get_remote_address(ioc, errp);
}
if (!addr) {
- return NULL;
+ return -1;
}
if (addr->type != SOCKET_ADDRESS_TYPE_INET) {
- error_setg(errp, "Not an inet socket type");
+ *addrstr = NULL;
qapi_free_SocketAddress(addr);
- return NULL;
+ return 0;
}
- ret = g_strdup_printf("%s;%s", addr->u.inet.host, addr->u.inet.port);
+ *addrstr = g_strdup_printf("%s;%s", addr->u.inet.host, addr->u.inet.port);
qapi_free_SocketAddress(addr);
- return ret;
+ return 0;
+}
+
+static bool
+vnc_socket_is_unix(QIOChannelSocket *ioc)
+{
+ SocketAddress *addr = qio_channel_socket_get_local_address(ioc, NULL);
+ return addr && addr->type == SOCKET_ADDRESS_TYPE_UNIX;
}
void start_auth_sasl(VncState *vs)
@@ -561,15 +582,15 @@ void start_auth_sasl(VncState *vs)
int mechlistlen;
/* Get local & remote client addresses in form IPADDR;PORT */
- localAddr = vnc_socket_ip_addr_string(vs->sioc, true, &local_err);
- if (!localAddr) {
+ if (vnc_socket_ip_addr_string(vs->sioc, true,
+ &localAddr, &local_err) < 0) {
trace_vnc_auth_fail(vs, vs->auth, "Cannot format local IP",
error_get_pretty(local_err));
goto authabort;
}
- remoteAddr = vnc_socket_ip_addr_string(vs->sioc, false, &local_err);
- if (!remoteAddr) {
+ if (vnc_socket_ip_addr_string(vs->sioc, false,
+ &remoteAddr, &local_err) < 0) {
trace_vnc_auth_fail(vs, vs->auth, "Cannot format remote IP",
error_get_pretty(local_err));
g_free(localAddr);
@@ -621,16 +642,17 @@ void start_auth_sasl(VncState *vs)
goto authabort;
}
} else {
- vs->sasl.wantSSF = 1;
+ vs->sasl.wantSSF = !vnc_socket_is_unix(vs->sioc);
}
memset (&secprops, 0, sizeof secprops);
/* Inform SASL that we've got an external SSF layer from TLS.
*
- * Disable SSF, if using TLS+x509+SASL only. TLS without x509
- * is not sufficiently strong
+ * Disable SSF, if using TLS+x509+SASL only, or UNIX sockets.
+ * TLS without x509 is not sufficiently strong, nor is plain
+ * TCP
*/
- if (vs->vd->is_unix ||
+ if (vnc_socket_is_unix(vs->sioc) ||
(vs->auth == VNC_AUTH_VENCRYPT &&
vs->subauth == VNC_AUTH_VENCRYPT_X509SASL)) {
/* If we've got TLS or UNIX domain sock, we don't care about SSF */
@@ -674,6 +696,13 @@ void start_auth_sasl(VncState *vs)
}
trace_vnc_auth_sasl_mech_list(vs, mechlist);
+ if (g_str_equal(mechlist, "")) {
+ trace_vnc_auth_fail(vs, vs->auth, "no available SASL mechanisms", "");
+ sasl_dispose(&vs->sasl.conn);
+ vs->sasl.conn = NULL;
+ goto authabort;
+ }
+
vs->sasl.mechlist = g_strdup(mechlist);
mechlistlen = strlen(mechlist);
vnc_write_u32(vs, mechlistlen);
diff --git a/ui/vnc-enc-tight.c b/ui/vnc-enc-tight.c
index 41f559e..25c7b2c 100644
--- a/ui/vnc-enc-tight.c
+++ b/ui/vnc-enc-tight.c
@@ -150,7 +150,7 @@ tight_detect_smooth_image24(VncState *vs, int w, int h)
* If client is big-endian, color samples begin from the second
* byte (offset 1) of a 32-bit pixel value.
*/
- off = vs->client_be;
+ off = vs->client_endian == G_BIG_ENDIAN ? 1 : 0;
memset(stats, 0, sizeof (stats));
@@ -891,7 +891,7 @@ static void tight_pack24(VncState *vs, uint8_t *buf, size_t count, size_t *ret)
buf8 = buf;
- if (1 /* FIXME */) {
+ if (vs->client_endian == G_BYTE_ORDER) {
rshift = vs->client_pf.rshift;
gshift = vs->client_pf.gshift;
bshift = vs->client_pf.bshift;
@@ -1001,16 +1001,24 @@ static int send_mono_rect(VncState *vs, int x, int y,
break;
}
case 2:
- vnc_write(vs, &bg, 2);
- vnc_write(vs, &fg, 2);
+ {
+ uint16_t bg16 = bg;
+ uint16_t fg16 = fg;
+ vnc_write(vs, &bg16, 2);
+ vnc_write(vs, &fg16, 2);
tight_encode_mono_rect16(vs->tight->tight.buffer, w, h, bg, fg);
break;
+ }
default:
- vnc_write_u8(vs, bg);
- vnc_write_u8(vs, fg);
+ {
+ uint8_t bg8 = bg;
+ uint8_t fg8 = fg;
+ vnc_write_u8(vs, bg8);
+ vnc_write_u8(vs, fg8);
tight_encode_mono_rect8(vs->tight->tight.buffer, w, h, bg, fg);
break;
}
+ }
vs->tight->tight.offset = bytes;
bytes = tight_compress_data(vs, stream, bytes, level, Z_DEFAULT_STRATEGY);
diff --git a/ui/vnc-enc-zrle.c b/ui/vnc-enc-zrle.c
index bd33b89..97ec6c7 100644
--- a/ui/vnc-enc-zrle.c
+++ b/ui/vnc-enc-zrle.c
@@ -255,7 +255,7 @@ static void zrle_write_u8(VncState *vs, uint8_t value)
static int zrle_send_framebuffer_update(VncState *vs, int x, int y,
int w, int h)
{
- bool be = vs->client_be;
+ bool be = vs->client_endian == G_BIG_ENDIAN;
size_t bytes;
int zywrle_level;
diff --git a/ui/vnc-jobs.c b/ui/vnc-jobs.c
index fcca7ec..d3486af 100644
--- a/ui/vnc-jobs.c
+++ b/ui/vnc-jobs.c
@@ -188,7 +188,7 @@ static void vnc_async_encoding_start(VncState *orig, VncState *local)
local->lossy_rect = orig->lossy_rect;
local->write_pixels = orig->write_pixels;
local->client_pf = orig->client_pf;
- local->client_be = orig->client_be;
+ local->client_endian = orig->client_endian;
local->tight = orig->tight;
local->zlib = orig->zlib;
local->hextile = orig->hextile;
diff --git a/ui/vnc.c b/ui/vnc.c
index dae5d51..e9c30aa 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -29,8 +29,8 @@
#include "vnc-jobs.h"
#include "trace.h"
#include "hw/qdev-core.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/runstate.h"
+#include "system/system.h"
+#include "system/runstate.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
@@ -146,8 +146,6 @@ static void vnc_init_basic_info(SocketAddress *addr,
default:
abort();
}
-
- return;
}
static void vnc_init_basic_info_from_server_addr(QIOChannelSocket *ioc,
@@ -893,7 +891,7 @@ void vnc_convert_pixel(VncState *vs, uint8_t *buf, uint32_t v)
buf[0] = v;
break;
case 2:
- if (vs->client_be) {
+ if (vs->client_endian == G_BIG_ENDIAN) {
buf[0] = v >> 8;
buf[1] = v;
} else {
@@ -903,7 +901,7 @@ void vnc_convert_pixel(VncState *vs, uint8_t *buf, uint32_t v)
break;
default:
case 4:
- if (vs->client_be) {
+ if (vs->client_endian == G_BIG_ENDIAN) {
buf[0] = v >> 24;
buf[1] = v >> 16;
buf[2] = v >> 8;
@@ -1935,7 +1933,7 @@ static void do_key_event(VncState *vs, int down, int keycode, int sym)
}
qkbd_state_key_event(vs->vd->kbd, qcode, down);
- if (!qemu_console_is_graphic(vs->vd->dcl.con)) {
+ if (QEMU_IS_TEXT_CONSOLE(vs->vd->dcl.con)) {
QemuTextConsole *con = QEMU_TEXT_CONSOLE(vs->vd->dcl.con);
bool numlock = qkbd_state_modifier_get(vs->vd->kbd, QKBD_MOD_NUMLOCK);
bool control = qkbd_state_modifier_get(vs->vd->kbd, QKBD_MOD_CTRL);
@@ -2242,7 +2240,8 @@ static void set_encodings(VncState *vs, int32_t *encodings, size_t n_encodings)
static void set_pixel_conversion(VncState *vs)
{
- pixman_format_code_t fmt = qemu_pixman_get_format(&vs->client_pf);
+ pixman_format_code_t fmt = qemu_pixman_get_format(&vs->client_pf,
+ vs->client_endian);
if (fmt == VNC_SERVER_FB_FORMAT) {
vs->write_pixels = vnc_write_pixels_copy;
@@ -2314,7 +2313,7 @@ static void set_pixel_format(VncState *vs, int bits_per_pixel,
vs->client_pf.bits_per_pixel = bits_per_pixel;
vs->client_pf.bytes_per_pixel = bits_per_pixel / 8;
vs->client_pf.depth = bits_per_pixel == 32 ? 24 : bits_per_pixel;
- vs->client_be = big_endian_flag;
+ vs->client_endian = big_endian_flag ? G_BIG_ENDIAN : G_LITTLE_ENDIAN;
if (!true_color_flag) {
send_color_map(vs);
@@ -2783,7 +2782,7 @@ static int protocol_client_auth_vnc(VncState *vs, uint8_t *data, size_t len)
vnc_munge_des_rfb_key(key, sizeof(key));
cipher = qcrypto_cipher_new(
- QCRYPTO_CIPHER_ALG_DES,
+ QCRYPTO_CIPHER_ALGO_DES,
QCRYPTO_CIPHER_MODE_ECB,
key, G_N_ELEMENTS(key),
&err);
@@ -3386,6 +3385,16 @@ static const DisplayChangeListenerOps dcl_ops = {
.dpy_cursor_define = vnc_dpy_cursor_define,
};
+static void vmstate_change_handler(void *opaque, bool running, RunState state)
+{
+ VncDisplay *vd = opaque;
+
+ if (state != RUN_STATE_RUNNING) {
+ return;
+ }
+ update_displaychangelistener(&vd->dcl, VNC_REFRESH_INTERVAL_BASE);
+}
+
void vnc_display_init(const char *id, Error **errp)
{
VncDisplay *vd;
@@ -3422,6 +3431,8 @@ void vnc_display_init(const char *id, Error **errp)
vd->dcl.ops = &dcl_ops;
register_displaychangelistener(&vd->dcl);
vd->kbd = qkbd_state_init(vd->dcl.con);
+ vd->vmstate_handler_entry = qemu_add_vm_change_state_handler(
+ &vmstate_change_handler, vd);
}
@@ -3430,7 +3441,6 @@ static void vnc_display_close(VncDisplay *vd)
if (!vd) {
return;
}
- vd->is_unix = false;
if (vd->listener) {
qio_net_listener_disconnect(vd->listener);
@@ -3852,7 +3862,7 @@ static int vnc_display_get_addresses(QemuOpts *opts,
return 0;
}
if (qemu_opt_get(opts, "websocket") &&
- !qcrypto_hash_supports(QCRYPTO_HASH_ALG_SHA1)) {
+ !qcrypto_hash_supports(QCRYPTO_HASH_ALGO_SHA1)) {
error_setg(errp,
"SHA1 hash support is required for websockets");
return -1;
@@ -3932,8 +3942,6 @@ static int vnc_display_connect(VncDisplay *vd,
error_setg(errp, "Expected a single address in reverse mode");
return -1;
}
- /* TODO SOCKET_ADDRESS_TYPE_FD when fd has AF_UNIX */
- vd->is_unix = saddr_list->value->type == SOCKET_ADDRESS_TYPE_UNIX;
sioc = qio_channel_socket_new();
qio_channel_set_name(QIO_CHANNEL(sioc), "vnc-reverse");
if (qio_channel_socket_connect_sync(sioc, saddr_list->value, errp) < 0) {
@@ -4064,7 +4072,7 @@ void vnc_display_open(const char *id, Error **errp)
}
if (password) {
if (!qcrypto_cipher_supports(
- QCRYPTO_CIPHER_ALG_DES, QCRYPTO_CIPHER_MODE_ECB)) {
+ QCRYPTO_CIPHER_ALGO_DES, QCRYPTO_CIPHER_MODE_ECB)) {
error_setg(errp,
"Cipher backend does not support DES algorithm");
goto fail;
diff --git a/ui/vnc.h b/ui/vnc.h
index 4521dc8..b3e0726 100644
--- a/ui/vnc.h
+++ b/ui/vnc.h
@@ -81,8 +81,8 @@ typedef void VncSendHextileTile(VncState *vs,
/* VNC_MAX_WIDTH must be a multiple of VNC_DIRTY_PIXELS_PER_BIT. */
-#define VNC_MAX_WIDTH ROUND_UP(2560, VNC_DIRTY_PIXELS_PER_BIT)
-#define VNC_MAX_HEIGHT 2048
+#define VNC_MAX_WIDTH ROUND_UP(5120, VNC_DIRTY_PIXELS_PER_BIT)
+#define VNC_MAX_HEIGHT 2160
/* VNC_DIRTY_BITS is the number of bits in the dirty bitmap. */
#define VNC_DIRTY_BITS (VNC_MAX_WIDTH / VNC_DIRTY_PIXELS_PER_BIT)
@@ -168,7 +168,6 @@ struct VncDisplay
const char *id;
QTAILQ_ENTRY(VncDisplay) next;
- bool is_unix;
char *password;
time_t expires;
int auth;
@@ -186,6 +185,8 @@ struct VncDisplay
#endif
AudioState *audio_state;
+
+ VMChangeStateEntry *vmstate_handler_entry;
};
typedef struct VncTight {
@@ -324,7 +325,7 @@ struct VncState
VncWritePixels *write_pixels;
PixelFormat client_pf;
pixman_format_code_t client_format;
- bool client_be;
+ int client_endian; /* G_LITTLE_ENDIAN or G_BIG_ENDIAN */
CaptureVoiceOut *audio_cap;
struct audsettings as;
diff --git a/ui/win32-kbd-hook.c b/ui/win32-kbd-hook.c
index 1ac237d..f448247 100644
--- a/ui/win32-kbd-hook.c
+++ b/ui/win32-kbd-hook.c
@@ -7,7 +7,7 @@
*/
#include "qemu/osdep.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "ui/win32-kbd-hook.h"
static Notifier win32_unhook_notifier;
diff --git a/util/aio-posix.c b/util/aio-posix.c
index 266c9dd..2e0a5da 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -17,6 +17,7 @@
#include "block/block.h"
#include "block/thread-pool.h"
#include "qemu/main-loop.h"
+#include "qemu/lockcnt.h"
#include "qemu/rcu.h"
#include "qemu/rcu_queue.h"
#include "qemu/sockets.h"
@@ -27,6 +28,9 @@
/* Stop userspace polling on a handler if it isn't active for some time */
#define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND)
+static void adjust_polling_time(AioContext *ctx, AioPolledEvent *poll,
+ int64_t block_ns);
+
bool aio_poll_disabled(AioContext *ctx)
{
return qatomic_read(&ctx->poll_disable_cnt);
@@ -391,7 +395,8 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
* scanning all handlers with aio_dispatch_handlers().
*/
static bool aio_dispatch_ready_handlers(AioContext *ctx,
- AioHandlerList *ready_list)
+ AioHandlerList *ready_list,
+ int64_t block_ns)
{
bool progress = false;
AioHandler *node;
@@ -399,6 +404,14 @@ static bool aio_dispatch_ready_handlers(AioContext *ctx,
while ((node = QLIST_FIRST(ready_list))) {
QLIST_REMOVE(node, node_ready);
progress = aio_dispatch_handler(ctx, node) || progress;
+
+ /*
+ * Adjust polling time only after aio_dispatch_handler(), which can
+ * add the handler to ctx->poll_aio_handlers.
+ */
+ if (ctx->poll_max_ns && QLIST_IS_INSERTED(node, node_poll)) {
+ adjust_polling_time(ctx, &node->poll, block_ns);
+ }
}
return progress;
@@ -578,13 +591,19 @@ static bool run_poll_handlers(AioContext *ctx, AioHandlerList *ready_list,
static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
int64_t *timeout)
{
+ AioHandler *node;
int64_t max_ns;
if (QLIST_EMPTY_RCU(&ctx->poll_aio_handlers)) {
return false;
}
- max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
+ max_ns = 0;
+ QLIST_FOREACH(node, &ctx->poll_aio_handlers, node_poll) {
+ max_ns = MAX(max_ns, node->poll.ns);
+ }
+ max_ns = qemu_soonest_timeout(*timeout, max_ns);
+
if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
/*
* Enable poll mode. It pairs with the poll_set_started() in
@@ -599,6 +618,46 @@ static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
return false;
}
+static void adjust_polling_time(AioContext *ctx, AioPolledEvent *poll,
+ int64_t block_ns)
+{
+ if (block_ns <= poll->ns) {
+ /* This is the sweet spot, no adjustment needed */
+ } else if (block_ns > ctx->poll_max_ns) {
+ /* We'd have to poll for too long, poll less */
+ int64_t old = poll->ns;
+
+ if (ctx->poll_shrink) {
+ poll->ns /= ctx->poll_shrink;
+ } else {
+ poll->ns = 0;
+ }
+
+ trace_poll_shrink(ctx, old, poll->ns);
+ } else if (poll->ns < ctx->poll_max_ns &&
+ block_ns < ctx->poll_max_ns) {
+ /* There is room to grow, poll longer */
+ int64_t old = poll->ns;
+ int64_t grow = ctx->poll_grow;
+
+ if (grow == 0) {
+ grow = 2;
+ }
+
+ if (poll->ns) {
+ poll->ns *= grow;
+ } else {
+ poll->ns = 4000; /* start polling at 4 microseconds */
+ }
+
+ if (poll->ns > ctx->poll_max_ns) {
+ poll->ns = ctx->poll_max_ns;
+ }
+
+ trace_poll_grow(ctx, old, poll->ns);
+ }
+}
+
bool aio_poll(AioContext *ctx, bool blocking)
{
AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
@@ -606,6 +665,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
bool use_notify_me;
int64_t timeout;
int64_t start = 0;
+ int64_t block_ns = 0;
/*
* There cannot be two concurrent aio_poll calls for the same AioContext (or
@@ -678,49 +738,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
aio_notify_accept(ctx);
- /* Adjust polling time */
+ /* Calculate blocked time for adaptive polling */
if (ctx->poll_max_ns) {
- int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
-
- if (block_ns <= ctx->poll_ns) {
- /* This is the sweet spot, no adjustment needed */
- } else if (block_ns > ctx->poll_max_ns) {
- /* We'd have to poll for too long, poll less */
- int64_t old = ctx->poll_ns;
-
- if (ctx->poll_shrink) {
- ctx->poll_ns /= ctx->poll_shrink;
- } else {
- ctx->poll_ns = 0;
- }
-
- trace_poll_shrink(ctx, old, ctx->poll_ns);
- } else if (ctx->poll_ns < ctx->poll_max_ns &&
- block_ns < ctx->poll_max_ns) {
- /* There is room to grow, poll longer */
- int64_t old = ctx->poll_ns;
- int64_t grow = ctx->poll_grow;
-
- if (grow == 0) {
- grow = 2;
- }
-
- if (ctx->poll_ns) {
- ctx->poll_ns *= grow;
- } else {
- ctx->poll_ns = 4000; /* start polling at 4 microseconds */
- }
-
- if (ctx->poll_ns > ctx->poll_max_ns) {
- ctx->poll_ns = ctx->poll_max_ns;
- }
-
- trace_poll_grow(ctx, old, ctx->poll_ns);
- }
+ block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
}
progress |= aio_bh_poll(ctx);
- progress |= aio_dispatch_ready_handlers(ctx, &ready_list);
+ progress |= aio_dispatch_ready_handlers(ctx, &ready_list, block_ns);
aio_free_deleted_handlers(ctx);
@@ -766,11 +790,18 @@ void aio_context_use_g_source(AioContext *ctx)
void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
int64_t grow, int64_t shrink, Error **errp)
{
+ AioHandler *node;
+
+ qemu_lockcnt_inc(&ctx->list_lock);
+ QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ node->poll.ns = 0;
+ }
+ qemu_lockcnt_dec(&ctx->list_lock);
+
/* No thread synchronization here, it doesn't matter if an incorrect value
* is used once.
*/
ctx->poll_max_ns = max_ns;
- ctx->poll_ns = 0;
ctx->poll_grow = grow;
ctx->poll_shrink = shrink;
diff --git a/util/aio-posix.h b/util/aio-posix.h
index 4264c51..82a0201 100644
--- a/util/aio-posix.h
+++ b/util/aio-posix.h
@@ -38,6 +38,7 @@ struct AioHandler {
#endif
int64_t poll_idle_timeout; /* when to stop userspace polling */
bool poll_ready; /* has polling detected an event? */
+ AioPolledEvent poll;
};
/* Add a handler to a ready list */
diff --git a/util/aio-win32.c b/util/aio-win32.c
index d144f93..6583d5c 100644
--- a/util/aio-win32.c
+++ b/util/aio-win32.c
@@ -18,6 +18,7 @@
#include "qemu/osdep.h"
#include "block/block.h"
#include "qemu/main-loop.h"
+#include "qemu/lockcnt.h"
#include "qemu/queue.h"
#include "qemu/sockets.h"
#include "qapi/error.h"
diff --git a/util/async.c b/util/async.c
index 0467890..2719c62 100644
--- a/util/async.c
+++ b/util/async.c
@@ -30,11 +30,12 @@
#include "block/graph-lock.h"
#include "qemu/main-loop.h"
#include "qemu/atomic.h"
+#include "qemu/lockcnt.h"
#include "qemu/rcu_queue.h"
#include "block/raw-aio.h"
#include "qemu/coroutine_int.h"
#include "qemu/coroutine-tls.h"
-#include "sysemu/cpu-timers.h"
+#include "exec/icount.h"
#include "trace.h"
/***********************************************************/
@@ -368,7 +369,7 @@ aio_ctx_finalize(GSource *source)
QEMUBH *bh;
unsigned flags;
- thread_pool_free(ctx->thread_pool);
+ thread_pool_free_aio(ctx->thread_pool);
#ifdef CONFIG_LINUX_AIO
if (ctx->linux_aio) {
@@ -434,10 +435,10 @@ GSource *aio_get_g_source(AioContext *ctx)
return &ctx->source;
}
-ThreadPool *aio_get_thread_pool(AioContext *ctx)
+ThreadPoolAio *aio_get_thread_pool(AioContext *ctx)
{
if (!ctx->thread_pool) {
- ctx->thread_pool = thread_pool_new(ctx);
+ ctx->thread_pool = thread_pool_new_aio(ctx);
}
return ctx->thread_pool;
}
@@ -608,7 +609,6 @@ AioContext *aio_context_new(Error **errp)
qemu_rec_mutex_init(&ctx->lock);
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
- ctx->poll_ns = 0;
ctx->poll_max_ns = 0;
ctx->poll_grow = 0;
ctx->poll_shrink = 0;
@@ -746,7 +746,7 @@ void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
int64_t max, Error **errp)
{
- if (min > max || !max || min > INT_MAX || max > INT_MAX) {
+ if (min > max || max <= 0 || min < 0 || min > INT_MAX || max > INT_MAX) {
error_setg(errp, "bad thread-pool-min/thread-pool-max values");
return;
}
diff --git a/util/block-helpers.c b/util/block-helpers.c
index c485143..052b4e1 100644
--- a/util/block-helpers.c
+++ b/util/block-helpers.c
@@ -10,12 +10,10 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qmp/qerror.h"
#include "block-helpers.h"
/**
* check_block_size:
- * @id: The unique ID of the object
* @name: The name of the property being validated
* @value: The block size in bytes
* @errp: A pointer to an area to store an error
@@ -24,23 +22,23 @@
* 1. At least MIN_BLOCK_SIZE
* 2. No larger than MAX_BLOCK_SIZE
* 3. A power of 2
+ *
+ * Returns: true on success, false on failure
*/
-void check_block_size(const char *id, const char *name, int64_t value,
- Error **errp)
+bool check_block_size(const char *name, int64_t value, Error **errp)
{
- /* value of 0 means "unset" */
- if (value && (value < MIN_BLOCK_SIZE || value > MAX_BLOCK_SIZE)) {
- error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE,
- id, name, value, MIN_BLOCK_SIZE, MAX_BLOCK_SIZE);
- return;
+ if (!value) {
+ /* unset */
+ return true;
}
- /* We rely on power-of-2 blocksizes for bitmasks */
- if ((value & (value - 1)) != 0) {
+ if (value < MIN_BLOCK_SIZE || value > MAX_BLOCK_SIZE
+ || (value & (value - 1))) {
error_setg(errp,
- "Property %s.%s doesn't take value '%" PRId64
- "', it's not a power of 2",
- id, name, value);
- return;
+ "parameter %s must be a power of 2 between %" PRId64
+ " and %" PRId64,
+ name, MIN_BLOCK_SIZE, MAX_BLOCK_SIZE);
+ return false;
}
+ return true;
}
diff --git a/util/block-helpers.h b/util/block-helpers.h
index b53295a..838b082 100644
--- a/util/block-helpers.h
+++ b/util/block-helpers.h
@@ -13,7 +13,6 @@
#define MAX_BLOCK_SIZE (2 * MiB)
#define MAX_BLOCK_SIZE_STR "2 MiB"
-void check_block_size(const char *id, const char *name, int64_t value,
- Error **errp);
+bool check_block_size(const char *name, int64_t value, Error **errp);
#endif /* BLOCK_HELPERS_H */
diff --git a/util/cacheflush.c b/util/cacheflush.c
index a089061..17c5891 100644
--- a/util/cacheflush.c
+++ b/util/cacheflush.c
@@ -229,6 +229,10 @@ static void __attribute__((constructor)) init_cache_info(void)
/* Caches are coherent and do not require flushing; symbol inline. */
+#elif defined(EMSCRIPTEN)
+
+/* Wasm doesn't have executable region of memory. */
+
#elif defined(__aarch64__) && !defined(CONFIG_WIN32)
/*
* For Windows, we use generic implementation of flush_idcache_range, that
@@ -279,9 +283,11 @@ void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
for (p = rw & -dcache_lsize; p < rw + len; p += dcache_lsize) {
asm volatile("dc\tcvau, %0" : : "r" (p) : "memory");
}
- asm volatile("dsb\tish" : : : "memory");
}
+ /* DSB unconditionally to ensure any outstanding writes are committed. */
+ asm volatile("dsb\tish" : : : "memory");
+
/*
* If CTR_EL0.DIC is enabled, Instruction cache cleaning to the Point
* of Unification is not required for instruction to data coherence.
diff --git a/util/coroutine-wasm.c b/util/coroutine-wasm.c
new file mode 100644
index 0000000..cb1ec92
--- /dev/null
+++ b/util/coroutine-wasm.c
@@ -0,0 +1,127 @@
+/*
+ * emscripten fiber coroutine initialization code
+ * based on coroutine-ucontext.c
+ *
+ * Copyright (C) 2006 Anthony Liguori <anthony@codemonkey.ws>
+ * Copyright (C) 2011 Kevin Wolf <kwolf@redhat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.0 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/coroutine_int.h"
+#include "qemu/coroutine-tls.h"
+
+#include <emscripten/fiber.h>
+
+typedef struct {
+ Coroutine base;
+ void *stack;
+ size_t stack_size;
+
+ void *asyncify_stack;
+ size_t asyncify_stack_size;
+
+ CoroutineAction action;
+
+ emscripten_fiber_t fiber;
+} CoroutineEmscripten;
+
+/**
+ * Per-thread coroutine bookkeeping
+ */
+QEMU_DEFINE_STATIC_CO_TLS(Coroutine *, current);
+QEMU_DEFINE_STATIC_CO_TLS(CoroutineEmscripten *, leader);
+size_t leader_asyncify_stack_size = COROUTINE_STACK_SIZE;
+
+static void coroutine_trampoline(void *co_)
+{
+ Coroutine *co = co_;
+
+ while (true) {
+ co->entry(co->entry_arg);
+ qemu_coroutine_switch(co, co->caller, COROUTINE_TERMINATE);
+ }
+}
+
+Coroutine *qemu_coroutine_new(void)
+{
+ CoroutineEmscripten *co;
+
+ co = g_malloc0(sizeof(*co));
+
+ co->stack_size = COROUTINE_STACK_SIZE;
+ co->stack = qemu_alloc_stack(&co->stack_size);
+
+ co->asyncify_stack_size = COROUTINE_STACK_SIZE;
+ co->asyncify_stack = g_malloc0(co->asyncify_stack_size);
+ emscripten_fiber_init(&co->fiber, coroutine_trampoline, &co->base,
+ co->stack, co->stack_size, co->asyncify_stack,
+ co->asyncify_stack_size);
+
+ return &co->base;
+}
+
+void qemu_coroutine_delete(Coroutine *co_)
+{
+ CoroutineEmscripten *co = DO_UPCAST(CoroutineEmscripten, base, co_);
+
+ qemu_free_stack(co->stack, co->stack_size);
+ g_free(co->asyncify_stack);
+ g_free(co);
+}
+
+CoroutineAction qemu_coroutine_switch(Coroutine *from_, Coroutine *to_,
+ CoroutineAction action)
+{
+ CoroutineEmscripten *from = DO_UPCAST(CoroutineEmscripten, base, from_);
+ CoroutineEmscripten *to = DO_UPCAST(CoroutineEmscripten, base, to_);
+
+ set_current(to_);
+ to->action = action;
+ emscripten_fiber_swap(&from->fiber, &to->fiber);
+ return from->action;
+}
+
+Coroutine *qemu_coroutine_self(void)
+{
+ Coroutine *self = get_current();
+
+ if (!self) {
+ CoroutineEmscripten *leaderp = get_leader();
+ if (!leaderp) {
+ leaderp = g_malloc0(sizeof(*leaderp));
+ leaderp->asyncify_stack = g_malloc0(leader_asyncify_stack_size);
+ leaderp->asyncify_stack_size = leader_asyncify_stack_size;
+ emscripten_fiber_init_from_current_context(
+ &leaderp->fiber,
+ leaderp->asyncify_stack,
+ leaderp->asyncify_stack_size);
+ leaderp->stack = leaderp->fiber.stack_limit;
+ leaderp->stack_size =
+ leaderp->fiber.stack_base - leaderp->fiber.stack_limit;
+ set_leader(leaderp);
+ }
+ self = &leaderp->base;
+ set_current(self);
+ }
+ return self;
+}
+
+bool qemu_in_coroutine(void)
+{
+ Coroutine *self = get_current();
+
+ return self && self->caller;
+}
diff --git a/util/cpuinfo-aarch64.c b/util/cpuinfo-aarch64.c
index 8ca775a..5746889 100644
--- a/util/cpuinfo-aarch64.c
+++ b/util/cpuinfo-aarch64.c
@@ -17,10 +17,13 @@
# define HWCAP2_BTI 0 /* added in glibc 2.32 */
# endif
#endif
+#ifdef CONFIG_ELF_AUX_INFO
+#include <sys/auxv.h>
+#endif
#ifdef CONFIG_DARWIN
# include <sys/sysctl.h>
#endif
-#ifdef __OpenBSD__
+#if defined(__OpenBSD__) && !defined(CONFIG_ELF_AUX_INFO)
# include <machine/armreg.h>
# include <machine/cpu.h>
# include <sys/types.h>
@@ -61,7 +64,7 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
info = CPUINFO_ALWAYS;
-#ifdef CONFIG_LINUX
+#if defined(CONFIG_LINUX) || defined(CONFIG_ELF_AUX_INFO)
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
info |= (hwcap & HWCAP_ATOMICS ? CPUINFO_LSE : 0);
info |= (hwcap & HWCAP_USCAT ? CPUINFO_LSE2 : 0);
@@ -78,7 +81,7 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
info |= sysctl_for_bool("hw.optional.arm.FEAT_PMULL") * CPUINFO_PMULL;
info |= sysctl_for_bool("hw.optional.arm.FEAT_BTI") * CPUINFO_BTI;
#endif
-#ifdef __OpenBSD__
+#if defined(__OpenBSD__) && !defined(CONFIG_ELF_AUX_INFO)
int mib[2];
uint64_t isar0;
uint64_t pfr1;
diff --git a/util/cpuinfo-i386.c b/util/cpuinfo-i386.c
index 90f92a4..c8c8a1b 100644
--- a/util/cpuinfo-i386.c
+++ b/util/cpuinfo-i386.c
@@ -35,6 +35,7 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
__cpuid(1, a, b, c, d);
info |= (d & bit_SSE2 ? CPUINFO_SSE2 : 0);
+ info |= (c & bit_OSXSAVE ? CPUINFO_OSXSAVE : 0);
info |= (c & bit_MOVBE ? CPUINFO_MOVBE : 0);
info |= (c & bit_POPCNT ? CPUINFO_POPCNT : 0);
info |= (c & bit_PCLMUL ? CPUINFO_PCLMUL : 0);
diff --git a/util/cpuinfo-ppc.c b/util/cpuinfo-ppc.c
index 1304f9a..4d3d3aa 100644
--- a/util/cpuinfo-ppc.c
+++ b/util/cpuinfo-ppc.c
@@ -14,7 +14,8 @@
# include "elf.h"
# endif
#endif
-#ifdef __FreeBSD__
+#if defined(CONFIG_ELF_AUX_INFO)
+# include <sys/auxv.h>
# include <machine/cpu.h>
# ifndef PPC_FEATURE2_ARCH_3_1
# define PPC_FEATURE2_ARCH_3_1 0
@@ -35,7 +36,7 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
info = CPUINFO_ALWAYS;
-#if defined(CONFIG_LINUX) || defined(__FreeBSD__)
+#if defined(CONFIG_LINUX) || defined(CONFIG_ELF_AUX_INFO)
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
diff --git a/util/cpuinfo-riscv.c b/util/cpuinfo-riscv.c
index 497ce12..0291b72 100644
--- a/util/cpuinfo-riscv.c
+++ b/util/cpuinfo-riscv.c
@@ -4,14 +4,17 @@
*/
#include "qemu/osdep.h"
+#include "qemu/host-utils.h"
#include "host/cpuinfo.h"
#ifdef CONFIG_ASM_HWPROBE_H
#include <asm/hwprobe.h>
#include <sys/syscall.h>
+#include <asm/unistd.h>
#endif
unsigned cpuinfo;
+unsigned riscv_lg2_vlenb;
static volatile sig_atomic_t got_sigill;
static void sigill_handler(int signo, siginfo_t *si, void *data)
@@ -33,7 +36,8 @@ static void sigill_handler(int signo, siginfo_t *si, void *data)
/* Called both as constructor and (possibly) via other constructors. */
unsigned __attribute__((constructor)) cpuinfo_init(void)
{
- unsigned left = CPUINFO_ZBA | CPUINFO_ZBB | CPUINFO_ZICOND;
+ unsigned left = CPUINFO_ZBA | CPUINFO_ZBB | CPUINFO_ZBS
+ | CPUINFO_ZICOND | CPUINFO_ZVE64X;
unsigned info = cpuinfo;
if (info) {
@@ -47,9 +51,16 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
#if defined(__riscv_arch_test) && defined(__riscv_zbb)
info |= CPUINFO_ZBB;
#endif
+#if defined(__riscv_arch_test) && defined(__riscv_zbs)
+ info |= CPUINFO_ZBS;
+#endif
#if defined(__riscv_arch_test) && defined(__riscv_zicond)
info |= CPUINFO_ZICOND;
#endif
+#if defined(__riscv_arch_test) && \
+ (defined(__riscv_vector) || defined(__riscv_zve64x))
+ info |= CPUINFO_ZVE64X;
+#endif
left &= ~info;
#ifdef CONFIG_ASM_HWPROBE_H
@@ -64,15 +75,27 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
&& pair.key >= 0) {
info |= pair.value & RISCV_HWPROBE_EXT_ZBA ? CPUINFO_ZBA : 0;
info |= pair.value & RISCV_HWPROBE_EXT_ZBB ? CPUINFO_ZBB : 0;
- left &= ~(CPUINFO_ZBA | CPUINFO_ZBB);
+ info |= pair.value & RISCV_HWPROBE_EXT_ZBS ? CPUINFO_ZBS : 0;
+ left &= ~(CPUINFO_ZBA | CPUINFO_ZBB | CPUINFO_ZBS);
#ifdef RISCV_HWPROBE_EXT_ZICOND
info |= pair.value & RISCV_HWPROBE_EXT_ZICOND ? CPUINFO_ZICOND : 0;
left &= ~CPUINFO_ZICOND;
#endif
+ /* For rv64, V is Zve64d, a superset of Zve64x. */
+ info |= pair.value & RISCV_HWPROBE_IMA_V ? CPUINFO_ZVE64X : 0;
+#ifdef RISCV_HWPROBE_EXT_ZVE64X
+ info |= pair.value & RISCV_HWPROBE_EXT_ZVE64X ? CPUINFO_ZVE64X : 0;
+#endif
}
}
#endif /* CONFIG_ASM_HWPROBE_H */
+ /*
+ * We only detect support for vectors with hwprobe. All kernels with
+ * support for vectors in userspace also support the hwprobe syscall.
+ */
+ left &= ~CPUINFO_ZVE64X;
+
if (left) {
struct sigaction sa_old, sa_new;
@@ -99,6 +122,15 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
left &= ~CPUINFO_ZBB;
}
+ if (left & CPUINFO_ZBS) {
+ /* Probe for Zbs: bext zero,zero,zero. */
+ got_sigill = 0;
+ asm volatile(".insn r 0x33, 5, 0x24, zero, zero, zero"
+ : : : "memory");
+ info |= got_sigill ? 0 : CPUINFO_ZBS;
+ left &= ~CPUINFO_ZBS;
+ }
+
if (left & CPUINFO_ZICOND) {
/* Probe for Zicond: czero.eqz zero,zero,zero. */
got_sigill = 0;
@@ -112,6 +144,21 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
assert(left == 0);
}
+ if (info & CPUINFO_ZVE64X) {
+ /*
+ * We are guaranteed by RVV-1.0 that VLEN is a power of 2.
+ * We are guaranteed by Zve64x that VLEN >= 64, and that
+ * EEW of {8,16,32,64} are supported.
+ */
+ unsigned long vlenb;
+ /* csrr %0, vlenb */
+ asm volatile(".insn i 0x73, 0x2, %0, zero, -990" : "=r"(vlenb));
+ assert(vlenb >= 8);
+ assert(is_power_of_2(vlenb));
+ /* Cache VLEN in a convenient form. */
+ riscv_lg2_vlenb = ctz32(vlenb);
+ }
+
info |= CPUINFO_ALWAYS;
cpuinfo = info;
return info;
diff --git a/util/cutils.c b/util/cutils.c
index 4236403..9803f11 100644
--- a/util/cutils.c
+++ b/util/cutils.c
@@ -1144,11 +1144,6 @@ void qemu_init_exec_dir(const char *argv0)
#endif
}
-const char *qemu_get_exec_dir(void)
-{
- return exec_dir;
-}
-
char *get_relocated_path(const char *dir)
{
size_t prefix_len = strlen(CONFIG_PREFIX);
diff --git a/util/envlist.c b/util/envlist.c
index db937c0..15fdbb1 100644
--- a/util/envlist.c
+++ b/util/envlist.c
@@ -12,9 +12,6 @@ struct envlist {
size_t el_count; /* number of entries */
};
-static int envlist_parse(envlist_t *envlist,
- const char *env, int (*)(envlist_t *, const char *));
-
/*
* Allocates new envlist and returns pointer to it.
*/
@@ -52,72 +49,6 @@ envlist_free(envlist_t *envlist)
}
/*
- * Parses comma separated list of set/modify environment
- * variable entries and updates given enlist accordingly.
- *
- * For example:
- * envlist_parse(el, "HOME=foo,SHELL=/bin/sh");
- *
- * inserts/sets environment variables HOME and SHELL.
- *
- * Returns 0 on success, errno otherwise.
- */
-int
-envlist_parse_set(envlist_t *envlist, const char *env)
-{
- return (envlist_parse(envlist, env, &envlist_setenv));
-}
-
-/*
- * Parses comma separated list of unset environment variable
- * entries and removes given variables from given envlist.
- *
- * Returns 0 on success, errno otherwise.
- */
-int
-envlist_parse_unset(envlist_t *envlist, const char *env)
-{
- return (envlist_parse(envlist, env, &envlist_unsetenv));
-}
-
-/*
- * Parses comma separated list of set, modify or unset entries
- * and calls given callback for each entry.
- *
- * Returns 0 in case of success, errno otherwise.
- */
-static int
-envlist_parse(envlist_t *envlist, const char *env,
- int (*callback)(envlist_t *, const char *))
-{
- char *tmpenv, *envvar;
- char *envsave = NULL;
- int ret = 0;
- assert(callback != NULL);
-
- if ((envlist == NULL) || (env == NULL))
- return (EINVAL);
-
- tmpenv = g_strdup(env);
- envsave = tmpenv;
-
- do {
- envvar = strchr(tmpenv, ',');
- if (envvar != NULL) {
- *envvar = '\0';
- }
- if ((*callback)(envlist, tmpenv) != 0) {
- ret = errno;
- break;
- }
- tmpenv = envvar + 1;
- } while (envvar != NULL);
-
- g_free(envsave);
- return ret;
-}
-
-/*
* Sets environment value to envlist in similar manner
* than putenv(3).
*
diff --git a/util/error.c b/util/error.c
index e5e2472..daea214 100644
--- a/util/error.c
+++ b/util/error.c
@@ -15,15 +15,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-
-struct Error
-{
- char *msg;
- ErrorClass err_class;
- const char *src, *func;
- int line;
- GString *hint;
-};
+#include "qapi/error-internal.h"
Error *error_abort;
Error *error_fatal;
@@ -32,8 +24,13 @@ Error *error_warn;
static void error_handle(Error **errp, Error *err)
{
if (errp == &error_abort) {
- fprintf(stderr, "Unexpected error in %s() at %s:%d:\n",
- err->func, err->src, err->line);
+ if (err->func) {
+ fprintf(stderr, "Unexpected error in %s() at %.*s:%d:\n",
+ err->func, err->src_len, err->src, err->line);
+ } else {
+ fprintf(stderr, "Unexpected error at %.*s:%d:\n",
+ err->src_len, err->src, err->line);
+ }
error_report("%s", error_get_pretty(err));
if (err->hint) {
error_printf("%s", err->hint->str);
@@ -75,6 +72,7 @@ static void error_setv(Error **errp,
g_free(msg);
}
err->err_class = err_class;
+ err->src_len = -1;
err->src = src;
err->line = line;
err->func = func;
@@ -247,6 +245,17 @@ void warn_report_err(Error *err)
error_free(err);
}
+bool warn_report_err_once_cond(bool *printed, Error *err)
+{
+ if (*printed) {
+ error_free(err);
+ return false;
+ }
+ *printed = true;
+ warn_report_err(err);
+ return true;
+}
+
void error_reportf_err(Error *err, const char *fmt, ...)
{
va_list ap;
diff --git a/util/event.c b/util/event.c
new file mode 100644
index 0000000..5a8141c
--- /dev/null
+++ b/util/event.c
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+#include "qemu/thread.h"
+
+/*
+ * Valid transitions:
+ * - FREE -> SET (qemu_event_set)
+ * - BUSY -> SET (qemu_event_set)
+ * - SET -> FREE (qemu_event_reset)
+ * - FREE -> BUSY (qemu_event_wait)
+ *
+ * With futex, the waking and blocking operations follow
+ * BUSY -> SET and FREE -> BUSY, respectively.
+ *
+ * Without futex, BUSY -> SET and FREE -> BUSY never happen. Instead, the waking
+ * operation follows FREE -> SET and the blocking operation will happen in
+ * qemu_event_wait() if the event is not SET.
+ *
+ * SET->BUSY does not happen (it can be observed from the outside but
+ * it really is SET->FREE->BUSY).
+ *
+ * busy->free provably cannot happen; to enforce it, the set->free transition
+ * is done with an OR, which becomes a no-op if the event has concurrently
+ * transitioned to free or busy.
+ */
+
+#define EV_SET 0
+#define EV_FREE 1
+#define EV_BUSY -1
+
+void qemu_event_init(QemuEvent *ev, bool init)
+{
+#ifndef HAVE_FUTEX
+ pthread_mutex_init(&ev->lock, NULL);
+ pthread_cond_init(&ev->cond, NULL);
+#endif
+
+ ev->value = (init ? EV_SET : EV_FREE);
+ ev->initialized = true;
+}
+
+void qemu_event_destroy(QemuEvent *ev)
+{
+ assert(ev->initialized);
+ ev->initialized = false;
+#ifndef HAVE_FUTEX
+ pthread_mutex_destroy(&ev->lock);
+ pthread_cond_destroy(&ev->cond);
+#endif
+}
+
+void qemu_event_set(QemuEvent *ev)
+{
+ assert(ev->initialized);
+
+#ifdef HAVE_FUTEX
+ /*
+ * Pairs with both qemu_event_reset() and qemu_event_wait().
+ *
+ * qemu_event_set has release semantics, but because it *loads*
+ * ev->value we need a full memory barrier here.
+ */
+ smp_mb();
+ if (qatomic_read(&ev->value) != EV_SET) {
+ int old = qatomic_xchg(&ev->value, EV_SET);
+
+ /* Pairs with memory barrier in kernel futex_wait system call. */
+ smp_mb__after_rmw();
+ if (old == EV_BUSY) {
+ /* There were waiters, wake them up. */
+ qemu_futex_wake_all(ev);
+ }
+ }
+#else
+ pthread_mutex_lock(&ev->lock);
+ /* Pairs with qemu_event_reset()'s load acquire. */
+ qatomic_store_release(&ev->value, EV_SET);
+ pthread_cond_broadcast(&ev->cond);
+ pthread_mutex_unlock(&ev->lock);
+#endif
+}
+
+void qemu_event_reset(QemuEvent *ev)
+{
+ assert(ev->initialized);
+
+#ifdef HAVE_FUTEX
+ /*
+ * If there was a concurrent reset (or even reset+wait),
+ * do nothing. Otherwise change EV_SET->EV_FREE.
+ */
+ qatomic_or(&ev->value, EV_FREE);
+
+ /*
+ * Order reset before checking the condition in the caller.
+ * Pairs with the first memory barrier in qemu_event_set().
+ */
+ smp_mb__after_rmw();
+#else
+ /*
+ * If futexes are not available, there are no EV_FREE->EV_BUSY
+ * transitions because wakeups are done entirely through the
+ * condition variable. Since qatomic_set() only writes EV_FREE,
+ * the load seems useless but in reality, the acquire synchronizes
+ * with qemu_event_set()'s store release: if qemu_event_reset()
+ * sees EV_SET here, then the caller will certainly see a
+ * successful condition and skip qemu_event_wait():
+ *
+ * done = 1; if (done == 0)
+ * qemu_event_set() { qemu_event_reset() {
+ * lock();
+ * ev->value = EV_SET -----> load ev->value
+ * ev->value = old value | EV_FREE
+ * cond_broadcast()
+ * unlock(); }
+ * } if (done == 0)
+ * // qemu_event_wait() not called
+ */
+ qatomic_set(&ev->value, qatomic_load_acquire(&ev->value) | EV_FREE);
+#endif
+}
+
+void qemu_event_wait(QemuEvent *ev)
+{
+ assert(ev->initialized);
+
+#ifdef HAVE_FUTEX
+ while (true) {
+ /*
+ * qemu_event_wait must synchronize with qemu_event_set even if it does
+ * not go down the slow path, so this load-acquire is needed that
+ * synchronizes with the first memory barrier in qemu_event_set().
+ */
+ unsigned value = qatomic_load_acquire(&ev->value);
+ if (value == EV_SET) {
+ break;
+ }
+
+ if (value == EV_FREE) {
+ /*
+ * Leave the event reset and tell qemu_event_set that there are
+ * waiters. No need to retry, because there cannot be a concurrent
+ * busy->free transition. After the CAS, the event will be either
+ * set or busy.
+ *
+ * This cmpxchg doesn't have particular ordering requirements if it
+ * succeeds (moving the store earlier can only cause
+ * qemu_event_set() to issue _more_ wakeups), the failing case needs
+ * acquire semantics like the load above.
+ */
+ if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
+ break;
+ }
+ }
+
+ /*
+ * This is the final check for a concurrent set, so it does need
+ * a smp_mb() pairing with the second barrier of qemu_event_set().
+ * The barrier is inside the FUTEX_WAIT system call.
+ */
+ qemu_futex_wait(ev, EV_BUSY);
+ }
+#else
+ pthread_mutex_lock(&ev->lock);
+ while (qatomic_read(&ev->value) != EV_SET) {
+ pthread_cond_wait(&ev->cond, &ev->lock);
+ }
+ pthread_mutex_unlock(&ev->lock);
+#endif
+}
diff --git a/util/fdmon-epoll.c b/util/fdmon-epoll.c
index c6413cb..9fb8800 100644
--- a/util/fdmon-epoll.c
+++ b/util/fdmon-epoll.c
@@ -5,6 +5,7 @@
#include "qemu/osdep.h"
#include <sys/epoll.h>
+#include "qemu/lockcnt.h"
#include "qemu/rcu_queue.h"
#include "aio-posix.h"
diff --git a/util/fifo8.c b/util/fifo8.c
index 4e01b53..a26da66 100644
--- a/util/fifo8.c
+++ b/util/fifo8.c
@@ -16,12 +16,17 @@
#include "migration/vmstate.h"
#include "qemu/fifo8.h"
+void fifo8_reset(Fifo8 *fifo)
+{
+ fifo->num = 0;
+ fifo->head = 0;
+}
+
void fifo8_create(Fifo8 *fifo, uint32_t capacity)
{
fifo->data = g_new(uint8_t, capacity);
fifo->capacity = capacity;
- fifo->head = 0;
- fifo->num = 0;
+ fifo8_reset(fifo);
}
void fifo8_destroy(Fifo8 *fifo)
@@ -66,18 +71,27 @@ uint8_t fifo8_pop(Fifo8 *fifo)
return ret;
}
-static const uint8_t *fifo8_peekpop_buf(Fifo8 *fifo, uint32_t max,
- uint32_t *numptr, bool do_pop)
+uint8_t fifo8_peek(Fifo8 *fifo)
+{
+ assert(fifo->num > 0);
+ return fifo->data[fifo->head];
+}
+
+static const uint8_t *fifo8_peekpop_bufptr(Fifo8 *fifo, uint32_t max,
+ uint32_t skip, uint32_t *numptr,
+ bool do_pop)
{
uint8_t *ret;
- uint32_t num;
+ uint32_t num, head;
assert(max > 0 && max <= fifo->num);
- num = MIN(fifo->capacity - fifo->head, max);
- ret = &fifo->data[fifo->head];
+ assert(skip <= fifo->num);
+ head = (fifo->head + skip) % fifo->capacity;
+ num = MIN(fifo->capacity - head, max);
+ ret = &fifo->data[head];
if (do_pop) {
- fifo->head += num;
+ fifo->head = head + num;
fifo->head %= fifo->capacity;
fifo->num -= num;
}
@@ -87,20 +101,60 @@ static const uint8_t *fifo8_peekpop_buf(Fifo8 *fifo, uint32_t max,
return ret;
}
-const uint8_t *fifo8_peek_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr)
+const uint8_t *fifo8_peek_bufptr(Fifo8 *fifo, uint32_t max, uint32_t *numptr)
{
- return fifo8_peekpop_buf(fifo, max, numptr, false);
+ return fifo8_peekpop_bufptr(fifo, max, 0, numptr, false);
}
-const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr)
+const uint8_t *fifo8_pop_bufptr(Fifo8 *fifo, uint32_t max, uint32_t *numptr)
{
- return fifo8_peekpop_buf(fifo, max, numptr, true);
+ return fifo8_peekpop_bufptr(fifo, max, 0, numptr, true);
}
-void fifo8_reset(Fifo8 *fifo)
+static uint32_t fifo8_peekpop_buf(Fifo8 *fifo, uint8_t *dest, uint32_t destlen,
+ bool do_pop)
{
- fifo->num = 0;
- fifo->head = 0;
+ const uint8_t *buf;
+ uint32_t n1, n2 = 0;
+ uint32_t len;
+
+ if (destlen == 0) {
+ return 0;
+ }
+
+ len = destlen;
+ buf = fifo8_peekpop_bufptr(fifo, len, 0, &n1, do_pop);
+ if (dest) {
+ memcpy(dest, buf, n1);
+ }
+
+ /* Add FIFO wraparound if needed */
+ len -= n1;
+ len = MIN(len, fifo8_num_used(fifo));
+ if (len) {
+ buf = fifo8_peekpop_bufptr(fifo, len, do_pop ? 0 : n1, &n2, do_pop);
+ if (dest) {
+ memcpy(&dest[n1], buf, n2);
+ }
+ }
+
+ return n1 + n2;
+}
+
+uint32_t fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, uint32_t destlen)
+{
+ return fifo8_peekpop_buf(fifo, dest, destlen, true);
+}
+
+uint32_t fifo8_peek_buf(Fifo8 *fifo, uint8_t *dest, uint32_t destlen)
+{
+ return fifo8_peekpop_buf(fifo, dest, destlen, false);
+}
+
+void fifo8_drop(Fifo8 *fifo, uint32_t len)
+{
+ len -= fifo8_pop_buf(fifo, NULL, len);
+ assert(len == 0);
}
bool fifo8_is_empty(Fifo8 *fifo)
diff --git a/util/getauxval.c b/util/getauxval.c
index b124107..0735cd8 100644
--- a/util/getauxval.c
+++ b/util/getauxval.c
@@ -95,16 +95,20 @@ unsigned long qemu_getauxval(unsigned long type)
}
}
+ errno = ENOENT;
return 0;
}
-#elif defined(__FreeBSD__)
+#elif defined(CONFIG_ELF_AUX_INFO)
#include <sys/auxv.h>
unsigned long qemu_getauxval(unsigned long type)
{
unsigned long aux = 0;
- elf_aux_info(type, &aux, sizeof(aux));
+ int ret = elf_aux_info(type, &aux, sizeof(aux));
+ if (ret != 0) {
+ errno = ret;
+ }
return aux;
}
@@ -112,6 +116,7 @@ unsigned long qemu_getauxval(unsigned long type)
unsigned long qemu_getauxval(unsigned long type)
{
+ errno = ENOSYS;
return 0;
}
diff --git a/util/hbitmap.c b/util/hbitmap.c
index 6d6e1b5..d9a1dab 100644
--- a/util/hbitmap.c
+++ b/util/hbitmap.c
@@ -949,7 +949,7 @@ char *hbitmap_sha256(const HBitmap *bitmap, Error **errp)
size_t size = bitmap->sizes[HBITMAP_LEVELS - 1] * sizeof(unsigned long);
char *data = (char *)bitmap->levels[HBITMAP_LEVELS - 1];
char *hash = NULL;
- qcrypto_hash_digest(QCRYPTO_HASH_ALG_SHA256, data, size, &hash, errp);
+ qcrypto_hash_digest(QCRYPTO_HASH_ALGO_SHA256, data, size, &hash, errp);
return hash;
}
diff --git a/util/hexdump.c b/util/hexdump.c
index ae0d499..f29ffce 100644
--- a/util/hexdump.c
+++ b/util/hexdump.c
@@ -15,6 +15,7 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
+#include "qemu/host-utils.h"
static inline char hexdump_nibble(unsigned x)
{
@@ -97,3 +98,20 @@ void qemu_hexdump(FILE *fp, const char *prefix,
}
}
+
+void qemu_hexdump_to_buffer(char *restrict buffer, size_t buffer_size,
+ const uint8_t *restrict data, size_t data_size)
+{
+ size_t i;
+ uint64_t required_buffer_size;
+ bool overflow = umul64_overflow(data_size, 2, &required_buffer_size);
+ overflow |= uadd64_overflow(required_buffer_size, 1, &required_buffer_size);
+ assert(!overflow && buffer_size >= required_buffer_size);
+
+ for (i = 0; i < data_size; i++) {
+ uint8_t val = data[i];
+ *(buffer++) = hexdump_nibble(val >> 4);
+ *(buffer++) = hexdump_nibble(val & 0xf);
+ }
+ *buffer = '\0';
+}
diff --git a/util/iov.c b/util/iov.c
index 7e73948..f8536f0 100644
--- a/util/iov.c
+++ b/util/iov.c
@@ -3,6 +3,7 @@
*
* Copyright IBM, Corp. 2007, 2008
* Copyright (C) 2010 Red Hat, Inc.
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
*
* Author(s):
* Anthony Liguori <aliguori@us.ibm.com>
@@ -36,7 +37,6 @@ size_t iov_from_buf_full(const struct iovec *iov, unsigned int iov_cnt,
offset -= iov[i].iov_len;
}
}
- assert(offset == 0);
return done;
}
@@ -55,7 +55,6 @@ size_t iov_to_buf_full(const struct iovec *iov, const unsigned int iov_cnt,
offset -= iov[i].iov_len;
}
}
- assert(offset == 0);
return done;
}
@@ -74,7 +73,6 @@ size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt,
offset -= iov[i].iov_len;
}
}
- assert(offset == 0);
return done;
}
@@ -92,7 +90,8 @@ size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt)
/* helper function for iov_send_recv() */
static ssize_t
-do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send)
+do_send_recv(int sockfd, int flags, struct iovec *iov, unsigned iov_cnt,
+ bool do_send)
{
#ifdef CONFIG_POSIX
ssize_t ret;
@@ -102,8 +101,8 @@ do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send)
msg.msg_iovlen = iov_cnt;
do {
ret = do_send
- ? sendmsg(sockfd, &msg, 0)
- : recvmsg(sockfd, &msg, 0);
+ ? sendmsg(sockfd, &msg, flags)
+ : recvmsg(sockfd, &msg, flags);
} while (ret < 0 && errno == EINTR);
return ret;
#else
@@ -114,8 +113,8 @@ do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send)
ssize_t off = 0;
while (i < iov_cnt) {
ssize_t r = do_send
- ? send(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, 0)
- : recv(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, 0);
+ ? send(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, flags)
+ : recv(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, flags);
if (r > 0) {
ret += r;
off += r;
@@ -145,6 +144,15 @@ ssize_t iov_send_recv(int sockfd, const struct iovec *_iov, unsigned iov_cnt,
size_t offset, size_t bytes,
bool do_send)
{
+ return iov_send_recv_with_flags(sockfd, 0, _iov, iov_cnt, offset, bytes,
+ do_send);
+}
+
+ssize_t iov_send_recv_with_flags(int sockfd, int sockflags,
+ const struct iovec *_iov,
+ unsigned iov_cnt, size_t offset,
+ size_t bytes, bool do_send)
+{
ssize_t total = 0;
ssize_t ret;
size_t orig_len, tail;
@@ -192,11 +200,11 @@ ssize_t iov_send_recv(int sockfd, const struct iovec *_iov, unsigned iov_cnt,
assert(iov[niov].iov_len > tail);
orig_len = iov[niov].iov_len;
iov[niov++].iov_len = tail;
- ret = do_send_recv(sockfd, iov, niov, do_send);
+ ret = do_send_recv(sockfd, sockflags, iov, niov, do_send);
/* Undo the changes above before checking for errors */
iov[niov-1].iov_len = orig_len;
} else {
- ret = do_send_recv(sockfd, iov, niov, do_send);
+ ret = do_send_recv(sockfd, sockflags, iov, niov, do_send);
}
if (offset) {
iov[0].iov_base -= offset;
@@ -266,7 +274,6 @@ unsigned iov_copy(struct iovec *dst_iov, unsigned int dst_iov_cnt,
bytes -= len;
offset = 0;
}
- assert(offset == 0);
return j;
}
@@ -337,7 +344,6 @@ size_t qemu_iovec_concat_iov(QEMUIOVector *dst,
soffset -= src_iov[i].iov_len;
}
}
- assert(soffset == 0); /* offset beyond end of src */
return done;
}
diff --git a/util/iova-tree.c b/util/iova-tree.c
index 5367897..5b0c95f 100644
--- a/util/iova-tree.c
+++ b/util/iova-tree.c
@@ -115,13 +115,6 @@ const DMAMap *iova_tree_find_iova(const IOVATree *tree, const DMAMap *map)
return args.result;
}
-const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova)
-{
- const DMAMap map = { .iova = iova, .size = 0 };
-
- return iova_tree_find(tree, &map);
-}
-
static inline void iova_tree_insert_internal(GTree *gtree, DMAMap *range)
{
/* Key and value are sharing the same range data */
@@ -148,22 +141,6 @@ int iova_tree_insert(IOVATree *tree, const DMAMap *map)
return IOVA_OK;
}
-static gboolean iova_tree_traverse(gpointer key, gpointer value,
- gpointer data)
-{
- iova_tree_iterator iterator = data;
- DMAMap *map = key;
-
- g_assert(key == value);
-
- return iterator(map);
-}
-
-void iova_tree_foreach(IOVATree *tree, iova_tree_iterator iterator)
-{
- g_tree_foreach(tree->tree, iova_tree_traverse, iterator);
-}
-
void iova_tree_remove(IOVATree *tree, DMAMap map)
{
const DMAMap *overlap;
@@ -280,3 +257,49 @@ void iova_tree_destroy(IOVATree *tree)
g_tree_destroy(tree->tree);
g_free(tree);
}
+
+static int gpa_tree_compare(gconstpointer a, gconstpointer b, gpointer data)
+{
+ const DMAMap *m1 = a, *m2 = b;
+
+ if (m1->translated_addr > m2->translated_addr + m2->size) {
+ return 1;
+ }
+
+ if (m1->translated_addr + m1->size < m2->translated_addr) {
+ return -1;
+ }
+
+ /* Overlapped */
+ return 0;
+}
+
+IOVATree *gpa_tree_new(void)
+{
+ IOVATree *gpa_tree = g_new0(IOVATree, 1);
+
+ gpa_tree->tree = g_tree_new_full(gpa_tree_compare, NULL, g_free, NULL);
+
+ return gpa_tree;
+}
+
+int gpa_tree_insert(IOVATree *tree, const DMAMap *map)
+{
+ DMAMap *new;
+
+ if (map->translated_addr + map->size < map->translated_addr ||
+ map->perm == IOMMU_NONE) {
+ return IOVA_ERR_INVALID;
+ }
+
+ /* We don't allow inserting ranges that overlap with existing ones */
+ if (iova_tree_find(tree, map)) {
+ return IOVA_ERR_OVERLAP;
+ }
+
+ new = g_new0(DMAMap, 1);
+ memcpy(new, map, sizeof(*new));
+ iova_tree_insert_internal(tree->tree, new);
+
+ return IOVA_OK;
+}
diff --git a/util/keyval.c b/util/keyval.c
index 66a5b47..a70629a 100644
--- a/util/keyval.c
+++ b/util/keyval.c
@@ -91,9 +91,9 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
+#include "qobject/qstring.h"
#include "qemu/cutils.h"
#include "qemu/keyval.h"
#include "qemu/help_option.h"
diff --git a/util/lockcnt.c b/util/lockcnt.c
index 5da3694..92c9f8c 100644
--- a/util/lockcnt.c
+++ b/util/lockcnt.c
@@ -7,14 +7,16 @@
* Paolo Bonzini <pbonzini@redhat.com>
*/
#include "qemu/osdep.h"
+#include "qemu/lockcnt.h"
#include "qemu/thread.h"
#include "qemu/atomic.h"
#include "trace.h"
-#ifdef CONFIG_LINUX
-#include "qemu/futex.h"
+#ifdef HAVE_FUTEX
-/* On Linux, bits 0-1 are a futex-based lock, bits 2-31 are the counter.
+/*
+ * When futex is available, bits 0-1 are a futex-based lock, bits 2-31 are the
+ * counter.
* For the mutex algorithm see Ulrich Drepper's "Futexes Are Tricky" (ok,
* this is not the most relaxing citation I could make...). It is similar
* to mutex2 in the paper.
@@ -105,7 +107,7 @@ static bool qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt *lockcnt, int *val,
static void lockcnt_wake(QemuLockCnt *lockcnt)
{
trace_lockcnt_futex_wake(lockcnt);
- qemu_futex_wake(&lockcnt->count, 1);
+ qemu_futex_wake_single(&lockcnt->count);
}
void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
diff --git a/util/log.c b/util/log.c
index 6219819..b87d399 100644
--- a/util/log.c
+++ b/util/log.c
@@ -503,6 +503,8 @@ const QEMULogItem qemu_log_items[] = {
"open a separate log file per thread; filename must contain '%d'" },
{ CPU_LOG_TB_VPU, "vpu",
"include VPU registers in the 'cpu' logging" },
+ { LOG_INVALID_MEM, "invalid_mem",
+ "log invalid memory accesses" },
{ 0, NULL, NULL },
};
diff --git a/util/main-loop.c b/util/main-loop.c
index a0386cf..51aeb24 100644
--- a/util/main-loop.c
+++ b/util/main-loop.c
@@ -26,8 +26,9 @@
#include "qapi/error.h"
#include "qemu/cutils.h"
#include "qemu/timer.h"
-#include "sysemu/cpu-timers.h"
-#include "sysemu/replay.h"
+#include "system/cpu-timers.h"
+#include "exec/icount.h"
+#include "system/replay.h"
#include "qemu/main-loop.h"
#include "block/aio.h"
#include "block/thread-pool.h"
@@ -212,7 +213,6 @@ static void main_loop_init(EventLoopBase *base, Error **errp)
main_loop_update_params(base, errp);
mloop = m;
- return;
}
static bool main_loop_can_be_deleted(EventLoopBase *base)
@@ -220,7 +220,7 @@ static bool main_loop_can_be_deleted(EventLoopBase *base)
return false;
}
-static void main_loop_class_init(ObjectClass *oc, void *class_data)
+static void main_loop_class_init(ObjectClass *oc, const void *class_data)
{
EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(oc);
diff --git a/util/memfd.c b/util/memfd.c
index 4a3c07e..07beab1 100644
--- a/util/memfd.c
+++ b/util/memfd.c
@@ -28,6 +28,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "qemu/memfd.h"
#include "qemu/host-utils.h"
@@ -149,11 +150,15 @@ err:
void qemu_memfd_free(void *ptr, size_t size, int fd)
{
if (ptr) {
- munmap(ptr, size);
+ if (munmap(ptr, size) != 0) {
+ error_report("memfd munmap() failed: %s", strerror(errno));
+ }
}
if (fd != -1) {
- close(fd);
+ if (close(fd) != 0) {
+ error_report("memfd close() failed: %s", strerror(errno));
+ }
}
}
@@ -189,17 +194,27 @@ bool qemu_memfd_alloc_check(void)
/**
* qemu_memfd_check():
*
- * Check if host supports memfd.
+ * Check if host supports memfd. Cache the answer for the common case flags=0.
*/
bool qemu_memfd_check(unsigned int flags)
{
#ifdef CONFIG_LINUX
- int mfd = memfd_create("test", flags | MFD_CLOEXEC);
+ int mfd;
+ static int memfd_check = MEMFD_TODO;
+ if (!flags && memfd_check != MEMFD_TODO) {
+ return memfd_check;
+ }
+
+ mfd = memfd_create("test", flags | MFD_CLOEXEC);
if (mfd >= 0) {
close(mfd);
- return true;
}
+ if (!flags) {
+ memfd_check = (mfd >= 0) ? MEMFD_OK : MEMFD_KO;
+ }
+ return (mfd >= 0);
+
#endif
return false;
diff --git a/util/meson.build b/util/meson.build
index 5d8bef9..3502938 100644
--- a/util/meson.build
+++ b/util/meson.build
@@ -11,7 +11,9 @@ if host_os != 'windows'
endif
util_ss.add(files('compatfd.c'))
util_ss.add(files('event_notifier-posix.c'))
- util_ss.add(files('mmap-alloc.c'))
+ if host_os != 'emscripten'
+ util_ss.add(files('mmap-alloc.c'))
+ endif
freebsd_dep = []
if host_os == 'freebsd'
freebsd_dep = util
@@ -25,7 +27,7 @@ else
util_ss.add(files('event_notifier-win32.c'))
util_ss.add(files('oslib-win32.c'))
util_ss.add(files('qemu-thread-win32.c'))
- util_ss.add(winmm, pathcch)
+ util_ss.add(winmm, pathcch, synchronization)
endif
util_ss.add(when: linux_io_uring, if_true: files('fdmon-io_uring.c'))
if glib_has_gslice
@@ -33,6 +35,7 @@ if glib_has_gslice
endif
util_ss.add(files('defer-call.c'))
util_ss.add(files('envlist.c', 'path.c', 'module.c'))
+util_ss.add(files('event.c'))
util_ss.add(files('host-utils.c'))
util_ss.add(files('bitmap.c', 'bitops.c'))
util_ss.add(files('fifo8.c'))
@@ -84,6 +87,8 @@ if have_block or have_ga
util_ss.add(files('qemu-coroutine.c', 'qemu-coroutine-lock.c', 'qemu-coroutine-io.c'))
util_ss.add(files(f'coroutine-@coroutine_backend@.c'))
util_ss.add(files('thread-pool.c', 'qemu-timer.c'))
+endif
+if have_block or have_ga or have_user
util_ss.add(files('qemu-sockets.c'))
endif
if have_block
@@ -129,4 +134,6 @@ elif cpu in ['ppc', 'ppc64']
util_ss.add(files('cpuinfo-ppc.c'))
elif cpu in ['riscv32', 'riscv64']
util_ss.add(files('cpuinfo-riscv.c'))
+elif cpu == 's390x'
+ util_ss.add(files('s390x_pci_mmio.c'))
endif
diff --git a/util/module.c b/util/module.c
index 32e2631..1aa2079 100644
--- a/util/module.c
+++ b/util/module.c
@@ -234,7 +234,7 @@ int module_load(const char *prefix, const char *name, Error **errp)
search_dir = getenv("QEMU_MODULE_DIR");
if (search_dir != NULL) {
- dirs[n_dirs++] = g_strdup_printf("%s", search_dir);
+ dirs[n_dirs++] = g_strdup(search_dir);
}
dirs[n_dirs++] = get_relocated_path(CONFIG_QEMU_MODDIR);
@@ -354,13 +354,13 @@ int module_load_qom(const char *type, Error **errp)
void module_load_qom_all(void)
{
const QemuModinfo *modinfo;
- Error *local_err = NULL;
if (module_loaded_qom_all) {
return;
}
for (modinfo = module_info; modinfo->name != NULL; modinfo++) {
+ Error *local_err = NULL;
if (!modinfo->objs) {
continue;
}
diff --git a/util/oslib-posix.c b/util/oslib-posix.c
index b090fe0..4ff577e 100644
--- a/util/oslib-posix.c
+++ b/util/oslib-posix.c
@@ -31,7 +31,7 @@
#include <glib/gprintf.h>
-#include "sysemu/sysemu.h"
+#include "system/system.h"
#include "trace.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
@@ -58,6 +58,7 @@
#include <lwp.h>
#endif
+#include "qemu/memalign.h"
#include "qemu/mmap-alloc.h"
#define MAX_MEM_PREALLOC_THREAD_COUNT 16
@@ -111,6 +112,21 @@ int qemu_get_thread_id(void)
#endif
}
+int qemu_kill_thread(int tid, int sig)
+{
+#if defined(__linux__)
+ return syscall(__NR_tgkill, getpid(), tid, sig);
+#elif defined(__FreeBSD__)
+ return thr_kill2(getpid(), tid, sig);
+#elif defined(__NetBSD__)
+ return _lwp_kill(tid, sig);
+#elif defined(__OpenBSD__)
+ return thrkill(tid, sig, NULL);
+#else
+ return kill(tid, sig);
+#endif
+}
+
int qemu_daemon(int nochdir, int noclose)
{
return daemon(nochdir, noclose);
@@ -195,11 +211,21 @@ void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared,
const uint32_t qemu_map_flags = (shared ? QEMU_MAP_SHARED : 0) |
(noreserve ? QEMU_MAP_NORESERVE : 0);
size_t align = QEMU_VMALLOC_ALIGN;
+#ifndef EMSCRIPTEN
void *ptr = qemu_ram_mmap(-1, size, align, qemu_map_flags, 0);
if (ptr == MAP_FAILED) {
return NULL;
}
+#else
+ /*
+ * qemu_ram_mmap is not implemented for Emscripten. Use qemu_memalign
+ * for the anonymous allocation. noreserve is ignored as there is no swap
+ * space on Emscripten, and shared is ignored as there is no other
+ * processes on Emscripten.
+ */
+ void *ptr = qemu_memalign(align, size);
+#endif
if (alignment) {
*alignment = align;
@@ -212,7 +238,16 @@ void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared,
void qemu_anon_ram_free(void *ptr, size_t size)
{
trace_qemu_anon_ram_free(ptr, size);
+#ifndef EMSCRIPTEN
qemu_ram_munmap(-1, ptr, size);
+#else
+ /*
+ * qemu_ram_munmap is not implemented for Emscripten and qemu_memalign
+ * was used for the allocation. Use the corresponding freeing function
+ * here.
+ */
+ qemu_vfree(ptr);
+#endif
}
void qemu_socket_set_block(int fd)
@@ -573,7 +608,15 @@ bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads,
{
static gsize initialized;
int ret;
+#ifndef EMSCRIPTEN
size_t hpagesize = qemu_fd_getpagesize(fd);
+#else
+ /*
+ * mmap-alloc.c is excluded from Emscripten build, so qemu_fd_getpagesize
+ * is unavailable. Fallback to the lower level implementation.
+ */
+ size_t hpagesize = qemu_real_host_page_size();
+#endif
size_t numpages = DIV_ROUND_UP(sz, hpagesize);
bool use_madv_populate_write;
struct sigaction act;
@@ -807,3 +850,179 @@ int qemu_msync(void *addr, size_t length, int fd)
return msync(addr, length, MS_SYNC);
}
+
+static bool qemu_close_all_open_fd_proc(const int *skip, unsigned int nskip)
+{
+ struct dirent *de;
+ int fd, dfd;
+ DIR *dir;
+ unsigned int skip_start = 0, skip_end = nskip;
+
+ dir = opendir("/proc/self/fd");
+ if (!dir) {
+ /* If /proc is not mounted, there is nothing that can be done. */
+ return false;
+ }
+ /* Avoid closing the directory. */
+ dfd = dirfd(dir);
+
+ for (de = readdir(dir); de; de = readdir(dir)) {
+ bool close_fd = true;
+
+ if (de->d_name[0] == '.') {
+ continue;
+ }
+ fd = atoi(de->d_name);
+ if (fd == dfd) {
+ continue;
+ }
+
+ for (unsigned int i = skip_start; i < skip_end; i++) {
+ if (fd < skip[i]) {
+ /* We are below the next skipped fd, break */
+ break;
+ } else if (fd == skip[i]) {
+ close_fd = false;
+ /* Restrict the range as we found fds matching start/end */
+ if (i == skip_start) {
+ skip_start++;
+ } else if (i == skip_end) {
+ skip_end--;
+ }
+ break;
+ }
+ }
+
+ if (close_fd) {
+ close(fd);
+ }
+ }
+ closedir(dir);
+
+ return true;
+}
+
+static bool qemu_close_all_open_fd_close_range(const int *skip,
+ unsigned int nskip,
+ int open_max)
+{
+#ifdef CONFIG_CLOSE_RANGE
+ int max_fd = open_max - 1;
+ int first = 0, last;
+ unsigned int cur_skip = 0;
+ int ret;
+
+ do {
+ /* Find the start boundary of the range to close */
+ while (cur_skip < nskip && first == skip[cur_skip]) {
+ cur_skip++;
+ first++;
+ }
+
+ /* Find the upper boundary of the range to close */
+ last = max_fd;
+ if (cur_skip < nskip) {
+ last = skip[cur_skip] - 1;
+ last = MIN(last, max_fd);
+ }
+
+ /* With the adjustments to the range, we might be done. */
+ if (first > last) {
+ break;
+ }
+
+ ret = close_range(first, last, 0);
+ if (ret < 0) {
+ return false;
+ }
+
+ first = last + 1;
+ } while (last < max_fd);
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+static void qemu_close_all_open_fd_fallback(const int *skip, unsigned int nskip,
+ int open_max)
+{
+ unsigned int cur_skip = 0;
+
+ /* Fallback */
+ for (int i = 0; i < open_max; i++) {
+ if (cur_skip < nskip && i == skip[cur_skip]) {
+ cur_skip++;
+ continue;
+ }
+ close(i);
+ }
+}
+
+/*
+ * Close all open file descriptors.
+ */
+void qemu_close_all_open_fd(const int *skip, unsigned int nskip)
+{
+ int open_max = sysconf(_SC_OPEN_MAX);
+
+ assert(skip != NULL || nskip == 0);
+
+ if (!qemu_close_all_open_fd_close_range(skip, nskip, open_max) &&
+ !qemu_close_all_open_fd_proc(skip, nskip)) {
+ qemu_close_all_open_fd_fallback(skip, nskip, open_max);
+ }
+}
+
+int qemu_shm_alloc(size_t size, Error **errp)
+{
+ g_autoptr(GString) shm_name = g_string_new(NULL);
+ int fd, oflag, cur_sequence;
+ static int sequence;
+ mode_t mode;
+
+ cur_sequence = qatomic_fetch_inc(&sequence);
+
+ /*
+ * Let's use `mode = 0` because we don't want other processes to open our
+ * memory unless we share the file descriptor with them.
+ */
+ mode = 0;
+ oflag = O_RDWR | O_CREAT | O_EXCL;
+
+ /*
+ * Some operating systems allow creating anonymous POSIX shared memory
+ * objects (e.g. FreeBSD provides the SHM_ANON constant), but this is not
+ * defined by POSIX, so let's create a unique name.
+ *
+ * From Linux's shm_open(3) man-page:
+ * For portable use, a shared memory object should be identified
+ * by a name of the form /somename;"
+ */
+ g_string_printf(shm_name, "/qemu-" FMT_pid "-shm-%d", getpid(),
+ cur_sequence);
+
+ fd = shm_open(shm_name->str, oflag, mode);
+ if (fd < 0) {
+ error_setg_errno(errp, errno,
+ "failed to create POSIX shared memory");
+ return -1;
+ }
+
+ /*
+ * We have the file descriptor, so we no longer need to expose the
+ * POSIX shared memory object. However it will remain allocated as long as
+ * there are file descriptors pointing to it.
+ */
+ shm_unlink(shm_name->str);
+
+ if (ftruncate(fd, size) == -1) {
+ error_setg_errno(errp, errno,
+ "failed to resize POSIX shared memory to %zu", size);
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
diff --git a/util/oslib-win32.c b/util/oslib-win32.c
index b623830..b735163 100644
--- a/util/oslib-win32.c
+++ b/util/oslib-win32.c
@@ -877,3 +877,9 @@ void qemu_win32_map_free(void *ptr, HANDLE h, Error **errp)
}
CloseHandle(h);
}
+
+int qemu_shm_alloc(size_t size, Error **errp)
+{
+ error_setg(errp, "Shared memory is not supported.");
+ return -1;
+}
diff --git a/util/qemu-co-shared-resource.c b/util/qemu-co-shared-resource.c
index a66cc07..752eb5a 100644
--- a/util/qemu-co-shared-resource.c
+++ b/util/qemu-co-shared-resource.c
@@ -66,12 +66,6 @@ static bool co_try_get_from_shres_locked(SharedResource *s, uint64_t n)
return false;
}
-bool co_try_get_from_shres(SharedResource *s, uint64_t n)
-{
- QEMU_LOCK_GUARD(&s->lock);
- return co_try_get_from_shres_locked(s, n);
-}
-
void coroutine_fn co_get_from_shres(SharedResource *s, uint64_t n)
{
assert(n <= s->total);
diff --git a/util/qemu-config.c b/util/qemu-config.c
index a90c18d..d1fc49c 100644
--- a/util/qemu-config.c
+++ b/util/qemu-config.c
@@ -1,8 +1,8 @@
#include "qemu/osdep.h"
#include "block/qdict.h" /* for qdict_extract_subqdict() */
#include "qapi/error.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qlist.h"
+#include "qobject/qdict.h"
+#include "qobject/qlist.h"
#include "qemu/error-report.h"
#include "qemu/option.h"
#include "qemu/config-file.h"
diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
index eb4eebe..64d6264 100644
--- a/util/qemu-coroutine.c
+++ b/util/qemu-coroutine.c
@@ -136,7 +136,7 @@ static Coroutine *coroutine_pool_get_local(void)
static void coroutine_pool_refill_local(void)
{
CoroutinePool *local_pool = get_ptr_local_pool();
- CoroutinePoolBatch *batch;
+ CoroutinePoolBatch *batch = NULL;
WITH_QEMU_LOCK_GUARD(&global_pool_lock) {
batch = QSLIST_FIRST(&global_pool);
diff --git a/util/qemu-option.c b/util/qemu-option.c
index 201f7a8..770300df 100644
--- a/util/qemu-option.c
+++ b/util/qemu-option.c
@@ -27,10 +27,10 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi/qmp/qnum.h"
-#include "qapi/qmp/qstring.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
+#include "qobject/qnum.h"
+#include "qobject/qstring.h"
#include "qapi/qmp/qerror.h"
#include "qemu/option_int.h"
#include "qemu/cutils.h"
diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c
index 60c44b2..4773755 100644
--- a/util/qemu-sockets.c
+++ b/util/qemu-sockets.c
@@ -30,6 +30,7 @@
#include "qapi/qobject-input-visitor.h"
#include "qapi/qobject-output-visitor.h"
#include "qemu/cutils.h"
+#include "qemu/option.h"
#include "trace.h"
#ifndef AI_ADDRCONFIG
@@ -44,6 +45,14 @@
# define AI_NUMERICSERV 0
#endif
+/*
+ * On macOS TCP_KEEPIDLE is available under a different name, TCP_KEEPALIVE.
+ * https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/bsd/man/man4/tcp.4#L172
+ */
+#if defined(TCP_KEEPALIVE) && !defined(TCP_KEEPIDLE)
+# define TCP_KEEPIDLE TCP_KEEPALIVE
+#endif
+
static int inet_getport(struct addrinfo *e)
{
@@ -205,6 +214,58 @@ static int try_bind(int socket, InetSocketAddress *saddr, struct addrinfo *e)
#endif
}
+static int inet_set_sockopts(int sock, InetSocketAddress *saddr, Error **errp)
+{
+ if (saddr->keep_alive) {
+ int keep_alive = 1;
+ int ret = setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
+ &keep_alive, sizeof(keep_alive));
+
+ if (ret < 0) {
+ error_setg_errno(errp, errno,
+ "Unable to set keep-alive option on socket");
+ return -1;
+ }
+#ifdef HAVE_TCP_KEEPCNT
+ if (saddr->has_keep_alive_count && saddr->keep_alive_count) {
+ int keep_count = saddr->keep_alive_count;
+ ret = setsockopt(sock, IPPROTO_TCP, TCP_KEEPCNT, &keep_count,
+ sizeof(keep_count));
+ if (ret < 0) {
+ error_setg_errno(errp, errno,
+ "Unable to set TCP keep-alive count option on socket");
+ return -1;
+ }
+ }
+#endif
+#ifdef HAVE_TCP_KEEPIDLE
+ if (saddr->has_keep_alive_idle && saddr->keep_alive_idle) {
+ int keep_idle = saddr->keep_alive_idle;
+ ret = setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE, &keep_idle,
+ sizeof(keep_idle));
+ if (ret < 0) {
+ error_setg_errno(errp, errno,
+ "Unable to set TCP keep-alive idle option on socket");
+ return -1;
+ }
+ }
+#endif
+#ifdef HAVE_TCP_KEEPINTVL
+ if (saddr->has_keep_alive_interval && saddr->keep_alive_interval) {
+ int keep_interval = saddr->keep_alive_interval;
+ ret = setsockopt(sock, IPPROTO_TCP, TCP_KEEPINTVL, &keep_interval,
+ sizeof(keep_interval));
+ if (ret < 0) {
+ error_setg_errno(errp, errno,
+ "Unable to set TCP keep-alive interval option on socket");
+ return -1;
+ }
+ }
+#endif
+ }
+ return 0;
+}
+
static int inet_listen_saddr(InetSocketAddress *saddr,
int port_offset,
int num,
@@ -220,12 +281,6 @@ static int inet_listen_saddr(InetSocketAddress *saddr,
int saved_errno = 0;
bool socket_created = false;
- if (saddr->keep_alive) {
- error_setg(errp, "keep-alive option is not supported for passive "
- "sockets");
- return -1;
- }
-
memset(&ai,0, sizeof(ai));
ai.ai_flags = AI_PASSIVE;
if (saddr->has_numeric && saddr->numeric) {
@@ -287,11 +342,20 @@ static int inet_listen_saddr(InetSocketAddress *saddr,
port_min = inet_getport(e);
port_max = saddr->has_to ? saddr->to + port_offset : port_min;
for (p = port_min; p <= port_max; p++) {
+ if (slisten >= 0) {
+ /*
+ * We have a socket we tried with the previous port. It cannot
+ * be rebound, we need to close it and create a new one.
+ */
+ close(slisten);
+ slisten = -1;
+ }
inet_setport(e, p);
slisten = create_fast_reuse_socket(e);
if (slisten < 0) {
- /* First time we expect we might fail to create the socket
+ /*
+ * First time we expect we might fail to create the socket
* eg if 'e' has AF_INET6 but ipv6 kmod is not loaded.
* Later iterations should always succeed if first iteration
* worked though, so treat that as fatal.
@@ -301,40 +365,41 @@ static int inet_listen_saddr(InetSocketAddress *saddr,
} else {
error_setg_errno(errp, errno,
"Failed to recreate failed listening socket");
- goto listen_failed;
+ goto fail;
}
}
socket_created = true;
rc = try_bind(slisten, saddr, e);
if (rc < 0) {
- if (errno != EADDRINUSE) {
- error_setg_errno(errp, errno, "Failed to bind socket");
- goto listen_failed;
- }
- } else {
- if (!listen(slisten, num)) {
- goto listen_ok;
+ if (errno == EADDRINUSE) {
+ /* This port is already used, try the next one */
+ continue;
}
- if (errno != EADDRINUSE) {
- error_setg_errno(errp, errno, "Failed to listen on socket");
- goto listen_failed;
+ error_setg_errno(errp, errno, "Failed to bind socket");
+ goto fail;
+ }
+ if (listen(slisten, num)) {
+ if (errno == EADDRINUSE) {
+ /* This port is already used, try the next one */
+ continue;
}
+ error_setg_errno(errp, errno, "Failed to listen on socket");
+ goto fail;
+ }
+ /* We have a listening socket */
+ if (inet_set_sockopts(slisten, saddr, errp) < 0) {
+ goto fail;
}
- /* Someone else managed to bind to the same port and beat us
- * to listen on it! Socket semantics does not allow us to
- * recover from this situation, so we need to recreate the
- * socket to allow bind attempts for subsequent ports:
- */
- close(slisten);
- slisten = -1;
+ freeaddrinfo(res);
+ return slisten;
}
}
error_setg_errno(errp, errno,
socket_created ?
"Failed to find an available port" :
"Failed to create a socket");
-listen_failed:
+fail:
saved_errno = errno;
if (slisten >= 0) {
close(slisten);
@@ -342,10 +407,6 @@ listen_failed:
freeaddrinfo(res);
errno = saved_errno;
return -1;
-
-listen_ok:
- freeaddrinfo(res);
- return slisten;
}
#ifdef _WIN32
@@ -367,7 +428,6 @@ static int inet_connect_addr(const InetSocketAddress *saddr,
addr->ai_family);
return -1;
}
- socket_set_fast_reuse(sock);
/* connect to peer */
do {
@@ -476,16 +536,9 @@ int inet_connect_saddr(InetSocketAddress *saddr, Error **errp)
return sock;
}
- if (saddr->keep_alive) {
- int val = 1;
- int ret = setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
- &val, sizeof(val));
-
- if (ret < 0) {
- error_setg_errno(errp, errno, "Unable to set KEEPALIVE");
- close(sock);
- return -1;
- }
+ if (inet_set_sockopts(sock, saddr, errp) < 0) {
+ close(sock);
+ return -1;
}
return sock;
@@ -592,141 +645,146 @@ err:
return -1;
}
-/* compatibility wrapper */
-static int inet_parse_flag(const char *flagname, const char *optstr, bool *val,
- Error **errp)
-{
- char *end;
- size_t len;
-
- end = strstr(optstr, ",");
- if (end) {
- if (end[1] == ',') { /* Reject 'ipv6=on,,foo' */
- error_setg(errp, "error parsing '%s' flag '%s'", flagname, optstr);
- return -1;
- }
- len = end - optstr;
- } else {
- len = strlen(optstr);
- }
- if (len == 0 || (len == 3 && strncmp(optstr, "=on", len) == 0)) {
- *val = true;
- } else if (len == 4 && strncmp(optstr, "=off", len) == 0) {
- *val = false;
- } else {
- error_setg(errp, "error parsing '%s' flag '%s'", flagname, optstr);
- return -1;
- }
- return 0;
-}
+static QemuOptsList inet_opts = {
+ .name = "InetSocketAddress",
+ .head = QTAILQ_HEAD_INITIALIZER(inet_opts.head),
+ .implied_opt_name = "addr",
+ .desc = {
+ {
+ .name = "addr",
+ .type = QEMU_OPT_STRING,
+ },
+ {
+ .name = "numeric",
+ .type = QEMU_OPT_BOOL,
+ },
+ {
+ .name = "to",
+ .type = QEMU_OPT_NUMBER,
+ },
+ {
+ .name = "ipv4",
+ .type = QEMU_OPT_BOOL,
+ },
+ {
+ .name = "ipv6",
+ .type = QEMU_OPT_BOOL,
+ },
+ {
+ .name = "keep-alive",
+ .type = QEMU_OPT_BOOL,
+ },
+#ifdef HAVE_TCP_KEEPCNT
+ {
+ .name = "keep-alive-count",
+ .type = QEMU_OPT_NUMBER,
+ },
+#endif
+#ifdef HAVE_TCP_KEEPIDLE
+ {
+ .name = "keep-alive-idle",
+ .type = QEMU_OPT_NUMBER,
+ },
+#endif
+#ifdef HAVE_TCP_KEEPINTVL
+ {
+ .name = "keep-alive-interval",
+ .type = QEMU_OPT_NUMBER,
+ },
+#endif
+#ifdef HAVE_IPPROTO_MPTCP
+ {
+ .name = "mptcp",
+ .type = QEMU_OPT_BOOL,
+ },
+#endif
+ { /* end of list */ }
+ },
+};
int inet_parse(InetSocketAddress *addr, const char *str, Error **errp)
{
- const char *optstr, *h;
- char host[65];
- char port[33];
- int to;
- int pos;
- char *begin;
-
+ QemuOpts *opts = qemu_opts_parse(&inet_opts, str, true, errp);
+ if (!opts) {
+ return -1;
+ }
memset(addr, 0, sizeof(*addr));
/* parse address */
- if (str[0] == ':') {
- /* no host given */
- host[0] = '\0';
- if (sscanf(str, ":%32[^,]%n", port, &pos) != 1) {
- error_setg(errp, "error parsing port in address '%s'", str);
- return -1;
- }
- } else if (str[0] == '[') {
+ const char *addr_str = qemu_opt_get(opts, "addr");
+ if (!addr_str) {
+ error_setg(errp, "error parsing address ''");
+ return -1;
+ }
+ if (str[0] == '[') {
/* IPv6 addr */
- if (sscanf(str, "[%64[^]]]:%32[^,]%n", host, port, &pos) != 2) {
- error_setg(errp, "error parsing IPv6 address '%s'", str);
+ const char *ip_end = strstr(addr_str, "]:");
+ if (!ip_end || ip_end - addr_str < 2 || strlen(ip_end) < 3) {
+ error_setg(errp, "error parsing IPv6 address '%s'", addr_str);
return -1;
}
+ addr->host = g_strndup(addr_str + 1, ip_end - addr_str - 1);
+ addr->port = g_strdup(ip_end + 2);
} else {
- /* hostname or IPv4 addr */
- if (sscanf(str, "%64[^:]:%32[^,]%n", host, port, &pos) != 2) {
- error_setg(errp, "error parsing address '%s'", str);
+ /* no host, hostname or IPv4 addr */
+ const char *port = strchr(addr_str, ':');
+ if (!port || strlen(port) < 2) {
+ error_setg(errp, "error parsing address '%s'", addr_str);
return -1;
}
+ addr->host = g_strndup(addr_str, port - addr_str);
+ addr->port = g_strdup(port + 1);
}
- addr->host = g_strdup(host);
- addr->port = g_strdup(port);
-
/* parse options */
- optstr = str + pos;
- h = strstr(optstr, ",to=");
- if (h) {
- h += 4;
- if (sscanf(h, "%d%n", &to, &pos) != 1 ||
- (h[pos] != '\0' && h[pos] != ',')) {
- error_setg(errp, "error parsing to= argument");
- return -1;
- }
+ if (qemu_opt_find(opts, "numeric")) {
+ addr->has_numeric = true,
+ addr->numeric = qemu_opt_get_bool(opts, "numeric", false);
+ }
+ if (qemu_opt_find(opts, "to")) {
addr->has_to = true;
- addr->to = to;
+ addr->to = qemu_opt_get_number(opts, "to", 0);
}
- begin = strstr(optstr, ",ipv4");
- if (begin) {
- if (inet_parse_flag("ipv4", begin + 5, &addr->ipv4, errp) < 0) {
- return -1;
- }
+ if (qemu_opt_find(opts, "ipv4")) {
addr->has_ipv4 = true;
+ addr->ipv4 = qemu_opt_get_bool(opts, "ipv4", false);
}
- begin = strstr(optstr, ",ipv6");
- if (begin) {
- if (inet_parse_flag("ipv6", begin + 5, &addr->ipv6, errp) < 0) {
- return -1;
- }
+ if (qemu_opt_find(opts, "ipv6")) {
addr->has_ipv6 = true;
+ addr->ipv6 = qemu_opt_get_bool(opts, "ipv6", false);
}
- begin = strstr(optstr, ",keep-alive");
- if (begin) {
- if (inet_parse_flag("keep-alive", begin + strlen(",keep-alive"),
- &addr->keep_alive, errp) < 0)
- {
- return -1;
- }
+ if (qemu_opt_find(opts, "keep-alive")) {
addr->has_keep_alive = true;
+ addr->keep_alive = qemu_opt_get_bool(opts, "keep-alive", false);
+ }
+#ifdef HAVE_TCP_KEEPCNT
+ if (qemu_opt_find(opts, "keep-alive-count")) {
+ addr->has_keep_alive_count = true;
+ addr->keep_alive_count = qemu_opt_get_number(opts, "keep-alive-count", 0);
+ }
+#endif
+#ifdef HAVE_TCP_KEEPIDLE
+ if (qemu_opt_find(opts, "keep-alive-idle")) {
+ addr->has_keep_alive_idle = true;
+ addr->keep_alive_idle = qemu_opt_get_number(opts, "keep-alive-idle", 0);
}
+#endif
+#ifdef HAVE_TCP_KEEPINTVL
+ if (qemu_opt_find(opts, "keep-alive-interval")) {
+ addr->has_keep_alive_interval = true;
+ addr->keep_alive_interval = qemu_opt_get_number(opts, "keep-alive-interval", 0);
+ }
+#endif
#ifdef HAVE_IPPROTO_MPTCP
- begin = strstr(optstr, ",mptcp");
- if (begin) {
- if (inet_parse_flag("mptcp", begin + strlen(",mptcp"),
- &addr->mptcp, errp) < 0)
- {
- return -1;
- }
+ if (qemu_opt_find(opts, "mptcp")) {
addr->has_mptcp = true;
+ addr->mptcp = qemu_opt_get_bool(opts, "mptcp", 0);
}
#endif
return 0;
}
-/**
- * Create a blocking socket and connect it to an address.
- *
- * @str: address string
- * @errp: set in case of an error
- *
- * Returns -1 in case of error, file descriptor on success
- **/
-int inet_connect(const char *str, Error **errp)
-{
- int sock = -1;
- InetSocketAddress *addr = g_new(InetSocketAddress, 1);
-
- if (!inet_parse(addr, str, errp)) {
- sock = inet_connect_saddr(addr, errp);
- }
- qapi_free_InetSocketAddress(addr);
- return sock;
-}
-
#ifdef CONFIG_AF_VSOCK
static bool vsock_parse_vaddr_to_sockaddr(const VsockSocketAddress *vaddr,
struct sockaddr_vm *svm,
@@ -1421,21 +1479,6 @@ SocketAddress *socket_local_address(int fd, Error **errp)
}
-SocketAddress *socket_remote_address(int fd, Error **errp)
-{
- struct sockaddr_storage ss;
- socklen_t sslen = sizeof(ss);
-
- if (getpeername(fd, (struct sockaddr *)&ss, &sslen) < 0) {
- error_setg_errno(errp, errno, "%s",
- "Unable to query remote socket address");
- return NULL;
- }
-
- return socket_sockaddr_to_address(&ss, sslen, errp);
-}
-
-
SocketAddress *socket_address_flatten(SocketAddressLegacy *addr_legacy)
{
SocketAddress *addr;
diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c
index b2e26e2..ba72544 100644
--- a/util/qemu-thread-posix.c
+++ b/util/qemu-thread-posix.c
@@ -317,154 +317,6 @@ void qemu_sem_wait(QemuSemaphore *sem)
qemu_mutex_unlock(&sem->mutex);
}
-#ifdef __linux__
-#include "qemu/futex.h"
-#else
-static inline void qemu_futex_wake(QemuEvent *ev, int n)
-{
- assert(ev->initialized);
- pthread_mutex_lock(&ev->lock);
- if (n == 1) {
- pthread_cond_signal(&ev->cond);
- } else {
- pthread_cond_broadcast(&ev->cond);
- }
- pthread_mutex_unlock(&ev->lock);
-}
-
-static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
-{
- assert(ev->initialized);
- pthread_mutex_lock(&ev->lock);
- if (ev->value == val) {
- pthread_cond_wait(&ev->cond, &ev->lock);
- }
- pthread_mutex_unlock(&ev->lock);
-}
-#endif
-
-/* Valid transitions:
- * - free->set, when setting the event
- * - busy->set, when setting the event, followed by qemu_futex_wake
- * - set->free, when resetting the event
- * - free->busy, when waiting
- *
- * set->busy does not happen (it can be observed from the outside but
- * it really is set->free->busy).
- *
- * busy->free provably cannot happen; to enforce it, the set->free transition
- * is done with an OR, which becomes a no-op if the event has concurrently
- * transitioned to free or busy.
- */
-
-#define EV_SET 0
-#define EV_FREE 1
-#define EV_BUSY -1
-
-void qemu_event_init(QemuEvent *ev, bool init)
-{
-#ifndef __linux__
- pthread_mutex_init(&ev->lock, NULL);
- pthread_cond_init(&ev->cond, NULL);
-#endif
-
- ev->value = (init ? EV_SET : EV_FREE);
- ev->initialized = true;
-}
-
-void qemu_event_destroy(QemuEvent *ev)
-{
- assert(ev->initialized);
- ev->initialized = false;
-#ifndef __linux__
- pthread_mutex_destroy(&ev->lock);
- pthread_cond_destroy(&ev->cond);
-#endif
-}
-
-void qemu_event_set(QemuEvent *ev)
-{
- assert(ev->initialized);
-
- /*
- * Pairs with both qemu_event_reset() and qemu_event_wait().
- *
- * qemu_event_set has release semantics, but because it *loads*
- * ev->value we need a full memory barrier here.
- */
- smp_mb();
- if (qatomic_read(&ev->value) != EV_SET) {
- int old = qatomic_xchg(&ev->value, EV_SET);
-
- /* Pairs with memory barrier in kernel futex_wait system call. */
- smp_mb__after_rmw();
- if (old == EV_BUSY) {
- /* There were waiters, wake them up. */
- qemu_futex_wake(ev, INT_MAX);
- }
- }
-}
-
-void qemu_event_reset(QemuEvent *ev)
-{
- assert(ev->initialized);
-
- /*
- * If there was a concurrent reset (or even reset+wait),
- * do nothing. Otherwise change EV_SET->EV_FREE.
- */
- qatomic_or(&ev->value, EV_FREE);
-
- /*
- * Order reset before checking the condition in the caller.
- * Pairs with the first memory barrier in qemu_event_set().
- */
- smp_mb__after_rmw();
-}
-
-void qemu_event_wait(QemuEvent *ev)
-{
- unsigned value;
-
- assert(ev->initialized);
-
- /*
- * qemu_event_wait must synchronize with qemu_event_set even if it does
- * not go down the slow path, so this load-acquire is needed that
- * synchronizes with the first memory barrier in qemu_event_set().
- *
- * If we do go down the slow path, there is no requirement at all: we
- * might miss a qemu_event_set() here but ultimately the memory barrier in
- * qemu_futex_wait() will ensure the check is done correctly.
- */
- value = qatomic_load_acquire(&ev->value);
- if (value != EV_SET) {
- if (value == EV_FREE) {
- /*
- * Leave the event reset and tell qemu_event_set that there are
- * waiters. No need to retry, because there cannot be a concurrent
- * busy->free transition. After the CAS, the event will be either
- * set or busy.
- *
- * This cmpxchg doesn't have particular ordering requirements if it
- * succeeds (moving the store earlier can only cause qemu_event_set()
- * to issue _more_ wakeups), the failing case needs acquire semantics
- * like the load above.
- */
- if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
- return;
- }
- }
-
- /*
- * This is the final check for a concurrent set, so it does need
- * a smp_mb() pairing with the second barrier of qemu_event_set().
- * The barrier is inside the FUTEX_WAIT system call.
- */
- qemu_futex_wait(ev, EV_BUSY);
- }
-}
-
static __thread NotifierList thread_exit;
/*
diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c
index a7fe3cc..ca2e0b5 100644
--- a/util/qemu-thread-win32.c
+++ b/util/qemu-thread-win32.c
@@ -231,135 +231,6 @@ void qemu_sem_wait(QemuSemaphore *sem)
}
}
-/* Wrap a Win32 manual-reset event with a fast userspace path. The idea
- * is to reset the Win32 event lazily, as part of a test-reset-test-wait
- * sequence. Such a sequence is, indeed, how QemuEvents are used by
- * RCU and other subsystems!
- *
- * Valid transitions:
- * - free->set, when setting the event
- * - busy->set, when setting the event, followed by SetEvent
- * - set->free, when resetting the event
- * - free->busy, when waiting
- *
- * set->busy does not happen (it can be observed from the outside but
- * it really is set->free->busy).
- *
- * busy->free provably cannot happen; to enforce it, the set->free transition
- * is done with an OR, which becomes a no-op if the event has concurrently
- * transitioned to free or busy (and is faster than cmpxchg).
- */
-
-#define EV_SET 0
-#define EV_FREE 1
-#define EV_BUSY -1
-
-void qemu_event_init(QemuEvent *ev, bool init)
-{
- /* Manual reset. */
- ev->event = CreateEvent(NULL, TRUE, TRUE, NULL);
- ev->value = (init ? EV_SET : EV_FREE);
- ev->initialized = true;
-}
-
-void qemu_event_destroy(QemuEvent *ev)
-{
- assert(ev->initialized);
- ev->initialized = false;
- CloseHandle(ev->event);
-}
-
-void qemu_event_set(QemuEvent *ev)
-{
- assert(ev->initialized);
-
- /*
- * Pairs with both qemu_event_reset() and qemu_event_wait().
- *
- * qemu_event_set has release semantics, but because it *loads*
- * ev->value we need a full memory barrier here.
- */
- smp_mb();
- if (qatomic_read(&ev->value) != EV_SET) {
- int old = qatomic_xchg(&ev->value, EV_SET);
-
- /* Pairs with memory barrier after ResetEvent. */
- smp_mb__after_rmw();
- if (old == EV_BUSY) {
- /* There were waiters, wake them up. */
- SetEvent(ev->event);
- }
- }
-}
-
-void qemu_event_reset(QemuEvent *ev)
-{
- assert(ev->initialized);
-
- /*
- * If there was a concurrent reset (or even reset+wait),
- * do nothing. Otherwise change EV_SET->EV_FREE.
- */
- qatomic_or(&ev->value, EV_FREE);
-
- /*
- * Order reset before checking the condition in the caller.
- * Pairs with the first memory barrier in qemu_event_set().
- */
- smp_mb__after_rmw();
-}
-
-void qemu_event_wait(QemuEvent *ev)
-{
- unsigned value;
-
- assert(ev->initialized);
-
- /*
- * qemu_event_wait must synchronize with qemu_event_set even if it does
- * not go down the slow path, so this load-acquire is needed that
- * synchronizes with the first memory barrier in qemu_event_set().
- *
- * If we do go down the slow path, there is no requirement at all: we
- * might miss a qemu_event_set() here but ultimately the memory barrier in
- * qemu_futex_wait() will ensure the check is done correctly.
- */
- value = qatomic_load_acquire(&ev->value);
- if (value != EV_SET) {
- if (value == EV_FREE) {
- /*
- * Here the underlying kernel event is reset, but qemu_event_set is
- * not yet going to call SetEvent. However, there will be another
- * check for EV_SET below when setting EV_BUSY. At that point it
- * is safe to call WaitForSingleObject.
- */
- ResetEvent(ev->event);
-
- /*
- * It is not clear whether ResetEvent provides this barrier; kernel
- * APIs (KeResetEvent/KeClearEvent) do not. Better safe than sorry!
- */
- smp_mb();
-
- /*
- * Leave the event reset and tell qemu_event_set that there are
- * waiters. No need to retry, because there cannot be a concurrent
- * busy->free transition. After the CAS, the event will be either
- * set or busy.
- */
- if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
- return;
- }
- }
-
- /*
- * ev->value is now EV_BUSY. Since we didn't observe EV_SET,
- * qemu_event_set() must observe EV_BUSY and call SetEvent().
- */
- WaitForSingleObject(ev->event, INFINITE);
- }
-}
-
struct QemuThreadData {
/* Passed to win32_start_routine. */
void *(*start_routine)(void *);
diff --git a/util/qemu-timer.c b/util/qemu-timer.c
index 213114b..1fb48be 100644
--- a/util/qemu-timer.c
+++ b/util/qemu-timer.c
@@ -26,9 +26,10 @@
#include "qemu/main-loop.h"
#include "qemu/timer.h"
#include "qemu/lockable.h"
-#include "sysemu/cpu-timers.h"
-#include "sysemu/replay.h"
-#include "sysemu/cpus.h"
+#include "system/cpu-timers.h"
+#include "exec/icount.h"
+#include "system/replay.h"
+#include "system/cpus.h"
#ifdef CONFIG_POSIX
#include <pthread.h>
@@ -182,7 +183,7 @@ bool qemu_clock_has_timers(QEMUClockType type)
bool timerlist_expired(QEMUTimerList *timer_list)
{
- int64_t expire_time;
+ int64_t expire_time = 0;
if (!qatomic_read(&timer_list->active_timers)) {
return false;
@@ -212,7 +213,7 @@ bool qemu_clock_expired(QEMUClockType type)
int64_t timerlist_deadline_ns(QEMUTimerList *timer_list)
{
int64_t delta;
- int64_t expire_time;
+ int64_t expire_time = 0;
if (!qatomic_read(&timer_list->active_timers)) {
return -1;
@@ -286,16 +287,6 @@ int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask)
return deadline;
}
-QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list)
-{
- return timer_list->clock->type;
-}
-
-QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type)
-{
- return main_loop_tlg.tl[type];
-}
-
void timerlist_notify(QEMUTimerList *timer_list)
{
if (timer_list->notify_cb) {
@@ -419,10 +410,6 @@ static bool timer_mod_ns_locked(QEMUTimerList *timer_list,
static void timerlist_rearm(QEMUTimerList *timer_list)
{
- /* Interrupt execution to force deadline recalculation. */
- if (icount_enabled() && timer_list->clock->type == QEMU_CLOCK_VIRTUAL) {
- icount_start_warp_timer();
- }
timerlist_notify(timer_list);
}
@@ -461,7 +448,7 @@ void timer_mod_ns(QEMUTimer *ts, int64_t expire_time)
void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time)
{
QEMUTimerList *timer_list = ts->timer_list;
- bool rearm;
+ bool rearm = false;
WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) {
if (ts->expire_time == -1 || ts->expire_time > expire_time) {
diff --git a/util/qht.c b/util/qht.c
index 92c6b78..208c2f4 100644
--- a/util/qht.c
+++ b/util/qht.c
@@ -367,7 +367,6 @@ void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
qht_map_lock_buckets(map);
qht_unlock(ht);
*pmap = map;
- return;
}
/*
diff --git a/util/rcu.c b/util/rcu.c
index fa32c94..b703c86 100644
--- a/util/rcu.c
+++ b/util/rcu.c
@@ -20,8 +20,8 @@
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ * License along with this library; if not, see
+ * <https://www.gnu.org/licenses/>.
*
* IBM's contributions to this file may be relicensed under LGPLv2 or later.
*/
diff --git a/util/s390x_pci_mmio.c b/util/s390x_pci_mmio.c
new file mode 100644
index 0000000..5ab24fa
--- /dev/null
+++ b/util/s390x_pci_mmio.c
@@ -0,0 +1,146 @@
+/*
+ * s390x PCI MMIO definitions
+ *
+ * Copyright 2025 IBM Corp.
+ * Author(s): Farhan Ali <alifm@linux.ibm.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include <sys/syscall.h>
+#include "qemu/s390x_pci_mmio.h"
+#include "elf.h"
+
+union register_pair {
+ unsigned __int128 pair;
+ struct {
+ uint64_t even;
+ uint64_t odd;
+ };
+};
+
+static bool is_mio_supported;
+
+static __attribute__((constructor)) void check_is_mio_supported(void)
+{
+ is_mio_supported = !!(qemu_getauxval(AT_HWCAP) & HWCAP_S390_PCI_MIO);
+}
+
+static uint64_t s390x_pcilgi(const void *ioaddr, size_t len)
+{
+ union register_pair ioaddr_len = { .even = (uint64_t)ioaddr,
+ .odd = len };
+ uint64_t val;
+ int cc;
+
+ asm volatile(
+ /* pcilgi */
+ ".insn rre,0xb9d60000,%[val],%[ioaddr_len]\n"
+ "ipm %[cc]\n"
+ "srl %[cc],28\n"
+ : [cc] "=d"(cc), [val] "=d"(val),
+ [ioaddr_len] "+d"(ioaddr_len.pair) :: "cc");
+
+ if (cc) {
+ val = -1ULL;
+ }
+
+ return val;
+}
+
+static void s390x_pcistgi(void *ioaddr, uint64_t val, size_t len)
+{
+ union register_pair ioaddr_len = {.even = (uint64_t)ioaddr, .odd = len};
+
+ asm volatile (
+ /* pcistgi */
+ ".insn rre,0xb9d40000,%[val],%[ioaddr_len]\n"
+ : [ioaddr_len] "+d" (ioaddr_len.pair)
+ : [val] "d" (val)
+ : "cc", "memory");
+}
+
+uint8_t s390x_pci_mmio_read_8(const void *ioaddr)
+{
+ uint8_t val = 0;
+
+ if (is_mio_supported) {
+ val = s390x_pcilgi(ioaddr, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_read, ioaddr, &val, sizeof(val));
+ }
+ return val;
+}
+
+uint16_t s390x_pci_mmio_read_16(const void *ioaddr)
+{
+ uint16_t val = 0;
+
+ if (is_mio_supported) {
+ val = s390x_pcilgi(ioaddr, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_read, ioaddr, &val, sizeof(val));
+ }
+ return val;
+}
+
+uint32_t s390x_pci_mmio_read_32(const void *ioaddr)
+{
+ uint32_t val = 0;
+
+ if (is_mio_supported) {
+ val = s390x_pcilgi(ioaddr, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_read, ioaddr, &val, sizeof(val));
+ }
+ return val;
+}
+
+uint64_t s390x_pci_mmio_read_64(const void *ioaddr)
+{
+ uint64_t val = 0;
+
+ if (is_mio_supported) {
+ val = s390x_pcilgi(ioaddr, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_read, ioaddr, &val, sizeof(val));
+ }
+ return val;
+}
+
+void s390x_pci_mmio_write_8(void *ioaddr, uint8_t val)
+{
+ if (is_mio_supported) {
+ s390x_pcistgi(ioaddr, val, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_write, ioaddr, &val, sizeof(val));
+ }
+}
+
+void s390x_pci_mmio_write_16(void *ioaddr, uint16_t val)
+{
+ if (is_mio_supported) {
+ s390x_pcistgi(ioaddr, val, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_write, ioaddr, &val, sizeof(val));
+ }
+}
+
+void s390x_pci_mmio_write_32(void *ioaddr, uint32_t val)
+{
+ if (is_mio_supported) {
+ s390x_pcistgi(ioaddr, val, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_write, ioaddr, &val, sizeof(val));
+ }
+}
+
+void s390x_pci_mmio_write_64(void *ioaddr, uint64_t val)
+{
+ if (is_mio_supported) {
+ s390x_pcistgi(ioaddr, val, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_write, ioaddr, &val, sizeof(val));
+ }
+}
diff --git a/util/thread-context.c b/util/thread-context.c
index 2bc7883..0146154 100644
--- a/util/thread-context.c
+++ b/util/thread-context.c
@@ -273,7 +273,7 @@ static void thread_context_instance_complete(UserCreatable *uc, Error **errp)
}
}
-static void thread_context_class_init(ObjectClass *oc, void *data)
+static void thread_context_class_init(ObjectClass *oc, const void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
@@ -319,7 +319,7 @@ static const TypeInfo thread_context_info = {
.instance_size = sizeof(ThreadContext),
.instance_init = thread_context_instance_init,
.instance_finalize = thread_context_instance_finalize,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
diff --git a/util/thread-pool.c b/util/thread-pool.c
index 27eb777..d2ead6b 100644
--- a/util/thread-pool.c
+++ b/util/thread-pool.c
@@ -23,9 +23,9 @@
#include "block/thread-pool.h"
#include "qemu/main-loop.h"
-static void do_spawn_thread(ThreadPool *pool);
+static void do_spawn_thread(ThreadPoolAio *pool);
-typedef struct ThreadPoolElement ThreadPoolElement;
+typedef struct ThreadPoolElementAio ThreadPoolElementAio;
enum ThreadState {
THREAD_QUEUED,
@@ -33,9 +33,9 @@ enum ThreadState {
THREAD_DONE,
};
-struct ThreadPoolElement {
+struct ThreadPoolElementAio {
BlockAIOCB common;
- ThreadPool *pool;
+ ThreadPoolAio *pool;
ThreadPoolFunc *func;
void *arg;
@@ -47,13 +47,13 @@ struct ThreadPoolElement {
int ret;
/* Access to this list is protected by lock. */
- QTAILQ_ENTRY(ThreadPoolElement) reqs;
+ QTAILQ_ENTRY(ThreadPoolElementAio) reqs;
/* This list is only written by the thread pool's mother thread. */
- QLIST_ENTRY(ThreadPoolElement) all;
+ QLIST_ENTRY(ThreadPoolElementAio) all;
};
-struct ThreadPool {
+struct ThreadPoolAio {
AioContext *ctx;
QEMUBH *completion_bh;
QemuMutex lock;
@@ -62,10 +62,10 @@ struct ThreadPool {
QEMUBH *new_thread_bh;
/* The following variables are only accessed from one AioContext. */
- QLIST_HEAD(, ThreadPoolElement) head;
+ QLIST_HEAD(, ThreadPoolElementAio) head;
/* The following variables are protected by lock. */
- QTAILQ_HEAD(, ThreadPoolElement) request_list;
+ QTAILQ_HEAD(, ThreadPoolElementAio) request_list;
int cur_threads;
int idle_threads;
int new_threads; /* backlog of threads we need to create */
@@ -76,14 +76,14 @@ struct ThreadPool {
static void *worker_thread(void *opaque)
{
- ThreadPool *pool = opaque;
+ ThreadPoolAio *pool = opaque;
qemu_mutex_lock(&pool->lock);
pool->pending_threads--;
do_spawn_thread(pool);
while (pool->cur_threads <= pool->max_threads) {
- ThreadPoolElement *req;
+ ThreadPoolElementAio *req;
int ret;
if (QTAILQ_EMPTY(&pool->request_list)) {
@@ -131,7 +131,7 @@ static void *worker_thread(void *opaque)
return NULL;
}
-static void do_spawn_thread(ThreadPool *pool)
+static void do_spawn_thread(ThreadPoolAio *pool)
{
QemuThread t;
@@ -148,14 +148,14 @@ static void do_spawn_thread(ThreadPool *pool)
static void spawn_thread_bh_fn(void *opaque)
{
- ThreadPool *pool = opaque;
+ ThreadPoolAio *pool = opaque;
qemu_mutex_lock(&pool->lock);
do_spawn_thread(pool);
qemu_mutex_unlock(&pool->lock);
}
-static void spawn_thread(ThreadPool *pool)
+static void spawn_thread(ThreadPoolAio *pool)
{
pool->cur_threads++;
pool->new_threads++;
@@ -173,8 +173,8 @@ static void spawn_thread(ThreadPool *pool)
static void thread_pool_completion_bh(void *opaque)
{
- ThreadPool *pool = opaque;
- ThreadPoolElement *elem, *next;
+ ThreadPoolAio *pool = opaque;
+ ThreadPoolElementAio *elem, *next;
defer_call_begin(); /* cb() may use defer_call() to coalesce work */
@@ -184,8 +184,8 @@ restart:
continue;
}
- trace_thread_pool_complete(pool, elem, elem->common.opaque,
- elem->ret);
+ trace_thread_pool_complete_aio(pool, elem, elem->common.opaque,
+ elem->ret);
QLIST_REMOVE(elem, all);
if (elem->common.cb) {
@@ -217,10 +217,10 @@ restart:
static void thread_pool_cancel(BlockAIOCB *acb)
{
- ThreadPoolElement *elem = (ThreadPoolElement *)acb;
- ThreadPool *pool = elem->pool;
+ ThreadPoolElementAio *elem = (ThreadPoolElementAio *)acb;
+ ThreadPoolAio *pool = elem->pool;
- trace_thread_pool_cancel(elem, elem->common.opaque);
+ trace_thread_pool_cancel_aio(elem, elem->common.opaque);
QEMU_LOCK_GUARD(&pool->lock);
if (elem->state == THREAD_QUEUED) {
@@ -234,16 +234,16 @@ static void thread_pool_cancel(BlockAIOCB *acb)
}
static const AIOCBInfo thread_pool_aiocb_info = {
- .aiocb_size = sizeof(ThreadPoolElement),
+ .aiocb_size = sizeof(ThreadPoolElementAio),
.cancel_async = thread_pool_cancel,
};
BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
BlockCompletionFunc *cb, void *opaque)
{
- ThreadPoolElement *req;
+ ThreadPoolElementAio *req;
AioContext *ctx = qemu_get_current_aio_context();
- ThreadPool *pool = aio_get_thread_pool(ctx);
+ ThreadPoolAio *pool = aio_get_thread_pool(ctx);
/* Assert that the thread submitting work is the same running the pool */
assert(pool->ctx == qemu_get_current_aio_context());
@@ -256,7 +256,7 @@ BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
QLIST_INSERT_HEAD(&pool->head, req, all);
- trace_thread_pool_submit(pool, req, arg);
+ trace_thread_pool_submit_aio(pool, req, arg);
qemu_mutex_lock(&pool->lock);
if (pool->idle_threads == 0 && pool->cur_threads < pool->max_threads) {
@@ -290,12 +290,7 @@ int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg)
return tpc.ret;
}
-void thread_pool_submit(ThreadPoolFunc *func, void *arg)
-{
- thread_pool_submit_aio(func, arg, NULL, NULL);
-}
-
-void thread_pool_update_params(ThreadPool *pool, AioContext *ctx)
+void thread_pool_update_params(ThreadPoolAio *pool, AioContext *ctx)
{
qemu_mutex_lock(&pool->lock);
@@ -322,7 +317,7 @@ void thread_pool_update_params(ThreadPool *pool, AioContext *ctx)
qemu_mutex_unlock(&pool->lock);
}
-static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
+static void thread_pool_init_one(ThreadPoolAio *pool, AioContext *ctx)
{
if (!ctx) {
ctx = qemu_get_aio_context();
@@ -342,14 +337,14 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
thread_pool_update_params(pool, ctx);
}
-ThreadPool *thread_pool_new(AioContext *ctx)
+ThreadPoolAio *thread_pool_new_aio(AioContext *ctx)
{
- ThreadPool *pool = g_new(ThreadPool, 1);
+ ThreadPoolAio *pool = g_new(ThreadPoolAio, 1);
thread_pool_init_one(pool, ctx);
return pool;
}
-void thread_pool_free(ThreadPool *pool)
+void thread_pool_free_aio(ThreadPoolAio *pool)
{
if (!pool) {
return;
@@ -379,3 +374,122 @@ void thread_pool_free(ThreadPool *pool)
qemu_mutex_destroy(&pool->lock);
g_free(pool);
}
+
+struct ThreadPool {
+ GThreadPool *t;
+ size_t cur_work;
+ QemuMutex cur_work_lock;
+ QemuCond all_finished_cond;
+};
+
+typedef struct {
+ ThreadPoolFunc *func;
+ void *opaque;
+ GDestroyNotify opaque_destroy;
+} ThreadPoolElement;
+
+static void thread_pool_func(gpointer data, gpointer user_data)
+{
+ ThreadPool *pool = user_data;
+ g_autofree ThreadPoolElement *el = data;
+
+ el->func(el->opaque);
+
+ if (el->opaque_destroy) {
+ el->opaque_destroy(el->opaque);
+ }
+
+ QEMU_LOCK_GUARD(&pool->cur_work_lock);
+
+ assert(pool->cur_work > 0);
+ pool->cur_work--;
+
+ if (pool->cur_work == 0) {
+ qemu_cond_signal(&pool->all_finished_cond);
+ }
+}
+
+ThreadPool *thread_pool_new(void)
+{
+ ThreadPool *pool = g_new(ThreadPool, 1);
+
+ pool->cur_work = 0;
+ qemu_mutex_init(&pool->cur_work_lock);
+ qemu_cond_init(&pool->all_finished_cond);
+
+ pool->t = g_thread_pool_new(thread_pool_func, pool, 0, TRUE, NULL);
+ /*
+ * g_thread_pool_new() can only return errors if initial thread(s)
+ * creation fails but we ask for 0 initial threads above.
+ */
+ assert(pool->t);
+
+ return pool;
+}
+
+void thread_pool_free(ThreadPool *pool)
+{
+ /*
+ * With _wait = TRUE this effectively waits for all
+ * previously submitted work to complete first.
+ */
+ g_thread_pool_free(pool->t, FALSE, TRUE);
+
+ qemu_cond_destroy(&pool->all_finished_cond);
+ qemu_mutex_destroy(&pool->cur_work_lock);
+
+ g_free(pool);
+}
+
+void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func,
+ void *opaque, GDestroyNotify opaque_destroy)
+{
+ ThreadPoolElement *el = g_new(ThreadPoolElement, 1);
+
+ el->func = func;
+ el->opaque = opaque;
+ el->opaque_destroy = opaque_destroy;
+
+ WITH_QEMU_LOCK_GUARD(&pool->cur_work_lock) {
+ pool->cur_work++;
+ }
+
+ /*
+ * Ignore the return value since this function can only return errors
+ * if creation of an additional thread fails but even in this case the
+ * provided work is still getting queued (just for the existing threads).
+ */
+ g_thread_pool_push(pool->t, el, NULL);
+}
+
+void thread_pool_submit_immediate(ThreadPool *pool, ThreadPoolFunc *func,
+ void *opaque, GDestroyNotify opaque_destroy)
+{
+ thread_pool_submit(pool, func, opaque, opaque_destroy);
+ thread_pool_adjust_max_threads_to_work(pool);
+}
+
+void thread_pool_wait(ThreadPool *pool)
+{
+ QEMU_LOCK_GUARD(&pool->cur_work_lock);
+
+ while (pool->cur_work > 0) {
+ qemu_cond_wait(&pool->all_finished_cond,
+ &pool->cur_work_lock);
+ }
+}
+
+bool thread_pool_set_max_threads(ThreadPool *pool,
+ int max_threads)
+{
+ assert(max_threads > 0);
+
+ return g_thread_pool_set_max_threads(pool->t, max_threads, NULL);
+}
+
+bool thread_pool_adjust_max_threads_to_work(ThreadPool *pool)
+{
+ QEMU_LOCK_GUARD(&pool->cur_work_lock);
+
+ return thread_pool_set_max_threads(pool, pool->cur_work);
+}
diff --git a/util/timed-average.c b/util/timed-average.c
index 2b49d53..5b5c22a 100644
--- a/util/timed-average.c
+++ b/util/timed-average.c
@@ -8,10 +8,12 @@
* BenoƮt Canet <benoit.canet@nodalink.com>
* Alberto Garcia <berto@igalia.com>
*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
- * (at your option) version 3 or any later version.
+ * (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/util/trace-events b/util/trace-events
index 49a4962..bd8f25f 100644
--- a/util/trace-events
+++ b/util/trace-events
@@ -14,9 +14,9 @@ aio_co_schedule_bh_cb(void *ctx, void *co) "ctx %p co %p"
reentrant_aio(void *ctx, const char *name) "ctx %p name %s"
# thread-pool.c
-thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
-thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d"
-thread_pool_cancel(void *req, void *opaque) "req %p opaque %p"
+thread_pool_submit_aio(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
+thread_pool_complete_aio(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d"
+thread_pool_cancel_aio(void *req, void *opaque) "req %p opaque %p"
# buffer.c
buffer_resize(const char *buf, size_t olen, size_t len) "%s: old %zd, new %zd"
diff --git a/util/userfaultfd.c b/util/userfaultfd.c
index 1b2fa94..2396104 100644
--- a/util/userfaultfd.c
+++ b/util/userfaultfd.c
@@ -240,7 +240,7 @@ int uffd_change_protection(int uffd_fd, void *addr, uint64_t length,
* Copy range of source pages to the destination to resolve
* missing page fault somewhere in the destination range.
*
- * Returns 0 on success, negative value in case of an error
+ * Returns 0 on success, -errno in case of an error
*
* @uffd_fd: UFFD file descriptor
* @dst_addr: destination base address
@@ -259,10 +259,11 @@ int uffd_copy_page(int uffd_fd, void *dst_addr, void *src_addr,
uffd_copy.mode = dont_wake ? UFFDIO_COPY_MODE_DONTWAKE : 0;
if (ioctl(uffd_fd, UFFDIO_COPY, &uffd_copy)) {
+ int e = errno;
error_report("uffd_copy_page() failed: dst_addr=%p src_addr=%p length=%" PRIu64
" mode=%" PRIx64 " errno=%i", dst_addr, src_addr,
- length, (uint64_t) uffd_copy.mode, errno);
- return -1;
+ length, (uint64_t) uffd_copy.mode, e);
+ return -e;
}
return 0;
@@ -273,7 +274,7 @@ int uffd_copy_page(int uffd_fd, void *dst_addr, void *src_addr,
*
* Fill range pages with zeroes to resolve missing page fault within the range.
*
- * Returns 0 on success, negative value in case of an error
+ * Returns 0 on success, -errno in case of an error
*
* @uffd_fd: UFFD file descriptor
* @addr: base address
@@ -289,10 +290,11 @@ int uffd_zero_page(int uffd_fd, void *addr, uint64_t length, bool dont_wake)
uffd_zeropage.mode = dont_wake ? UFFDIO_ZEROPAGE_MODE_DONTWAKE : 0;
if (ioctl(uffd_fd, UFFDIO_ZEROPAGE, &uffd_zeropage)) {
+ int e = errno;
error_report("uffd_zero_page() failed: addr=%p length=%" PRIu64
" mode=%" PRIx64 " errno=%i", addr, length,
- (uint64_t) uffd_zeropage.mode, errno);
- return -1;
+ (uint64_t) uffd_zeropage.mode, e);
+ return -e;
}
return 0;
@@ -306,7 +308,7 @@ int uffd_zero_page(int uffd_fd, void *addr, uint64_t length, bool dont_wake)
* via UFFD-IO IOCTLs with MODE_DONTWAKE flag set, then after that all waits
* for the whole memory range are satisfied in a single call to uffd_wakeup().
*
- * Returns 0 on success, negative value in case of an error
+ * Returns 0 on success, -errno in case of an error
*
* @uffd_fd: UFFD file descriptor
* @addr: base address
@@ -320,9 +322,10 @@ int uffd_wakeup(int uffd_fd, void *addr, uint64_t length)
uffd_range.len = length;
if (ioctl(uffd_fd, UFFDIO_WAKE, &uffd_range)) {
+ int e = errno;
error_report("uffd_wakeup() failed: addr=%p length=%" PRIu64 " errno=%i",
- addr, length, errno);
- return -1;
+ addr, length, e);
+ return -e;
}
return 0;
@@ -355,31 +358,3 @@ int uffd_read_events(int uffd_fd, struct uffd_msg *msgs, int count)
return (int) (res / sizeof(struct uffd_msg));
}
-
-/**
- * uffd_poll_events: poll UFFD file descriptor for read
- *
- * Returns true if events are available for read, false otherwise
- *
- * @uffd_fd: UFFD file descriptor
- * @tmo: timeout value
- */
-bool uffd_poll_events(int uffd_fd, int tmo)
-{
- int res;
- struct pollfd poll_fd = { .fd = uffd_fd, .events = POLLIN, .revents = 0 };
-
- do {
- res = poll(&poll_fd, 1, tmo);
- } while (res < 0 && errno == EINTR);
-
- if (res == 0) {
- return false;
- }
- if (res < 0) {
- error_report("uffd_poll_events() failed: errno=%i", errno);
- return false;
- }
-
- return (poll_fd.revents & POLLIN) != 0;
-}
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
index f8bab46..fdff042 100644
--- a/util/vfio-helpers.c
+++ b/util/vfio-helpers.c
@@ -16,7 +16,7 @@
#include "qapi/error.h"
#include "exec/ramlist.h"
#include "exec/cpu-common.h"
-#include "exec/memory.h"
+#include "system/memory.h"
#include "trace.h"
#include "qemu/error-report.h"
#include "standard-headers/linux/pci_regs.h"